summaryrefslogtreecommitdiffstats
path: root/gfx/skia
diff options
context:
space:
mode:
Diffstat (limited to 'gfx/skia')
-rw-r--r--gfx/skia/LICENSE27
-rw-r--r--gfx/skia/README3
-rw-r--r--gfx/skia/README_COMMITTING10
-rw-r--r--gfx/skia/README_MOZILLA12
-rw-r--r--gfx/skia/dump_mozbuild.py92
-rwxr-xr-xgfx/skia/generate_mozbuild.py502
-rwxr-xr-xgfx/skia/gyp_mozbuild13
-rw-r--r--gfx/skia/moz.build758
-rw-r--r--gfx/skia/patches/README2
-rw-r--r--gfx/skia/patches/archive/0001-Bug-687189-Implement-SkPaint-getPosTextPath.patch66
-rw-r--r--gfx/skia/patches/archive/0001-Bug-777614-Re-add-our-SkUserConfig.h-r-nrc.patch34
-rw-r--r--gfx/skia/patches/archive/0001-Bug-803063-Skia-cross-compilation-for-Windows-fails-.patch26
-rw-r--r--gfx/skia/patches/archive/0001-Bug-895086-Remove-unused-find_from_uniqueID-function.patch38
-rw-r--r--gfx/skia/patches/archive/0002-Bug-688366-Dont-invalidate-all-radial-gradients.patch30
-rw-r--r--gfx/skia/patches/archive/0002-Bug-848491-Re-apply-Bug-795549-Move-TileProc-functio.patch50
-rw-r--r--gfx/skia/patches/archive/0003-SkUserConfig-for-Mozilla.patch39
-rw-r--r--gfx/skia/patches/archive/0004-Bug-722011-Fix-trailing-commas-in-enums.patch280
-rw-r--r--gfx/skia/patches/archive/0004-Bug-777614-Re-apply-bug-719872-Fix-crash-on-Android-.patch684
-rw-r--r--gfx/skia/patches/archive/0005-Bug-731384-Fix-clang-SK_OVERRIDE.patch36
-rw-r--r--gfx/skia/patches/archive/0005-Bug-736276-Add-a-new-SkFontHost-that-takes-a-cairo_s.patch472
-rw-r--r--gfx/skia/patches/archive/0005-Bug-777614-Re-apply-bug-687188-Expand-the-gradient-c.patch198
-rw-r--r--gfx/skia/patches/archive/0006-Bug-751814-ARM-EDSP-ARMv6-Skia-fixes.patch147
-rw-r--r--gfx/skia/patches/archive/0006-Bug-848491-Re-apply-Bug-777614-Add-our-SkUserConfig..patch27
-rw-r--r--gfx/skia/patches/archive/0007-Bug-719872-Old-Android-FontHost.patch702
-rw-r--r--gfx/skia/patches/archive/0007-Bug-848491-Re-apply-bug-687188-Expand-the-gradient-c.patch168
-rw-r--r--gfx/skia/patches/archive/0008-Bug-687188-Skia-radial-gradients.patch173
-rw-r--r--gfx/skia/patches/archive/0008-Bug-848491-Re-apply-759683-Handle-compilers-that-don.patch35
-rw-r--r--gfx/skia/patches/archive/0009-Bug-755869-FreeBSD-Hurd.patch28
-rw-r--r--gfx/skia/patches/archive/0009-Bug-777614-Re-apply-759683-Handle-compilers-that-don.patch40
-rw-r--r--gfx/skia/patches/archive/0009-Bug-848491-Re-apply-bug-751418-Add-our-own-GrUserCon.patch23
-rw-r--r--gfx/skia/patches/archive/0010-Bug-689069-ARM-Opts.patch36
-rw-r--r--gfx/skia/patches/archive/0010-Bug-836892-Add-new-blending-modes-to-SkXfermode.patch698
-rw-r--r--gfx/skia/patches/archive/0010-Bug-848491-Re-apply-bug-817356-Patch-Skia-to-recogni.patch22
-rw-r--r--gfx/skia/patches/archive/0011-Bug-719575-Fix-clang-build.patch28
-rw-r--r--gfx/skia/patches/archive/0011-Bug-839347-no-anon-namespace-around-SkNO_RETURN_HINT.patch31
-rw-r--r--gfx/skia/patches/archive/0012-Bug-751418-Add-our-own-GrUserConfig-r-mattwoodrow.patch29
-rw-r--r--gfx/skia/patches/archive/0012-Bug-759683-make-ssse3-conditional.patch22
-rw-r--r--gfx/skia/patches/archive/0013-Bug-751418-Fix-compile-error-on-gcc-in-Skia-GL-r-mat.patch26
-rw-r--r--gfx/skia/patches/archive/0013-Bug-761890-fonts.patch162
-rw-r--r--gfx/skia/patches/archive/0014-Bug-765038-Fix-clang-build.patch29
-rw-r--r--gfx/skia/patches/archive/0015-Bug-766017-warnings.patch865
-rw-r--r--gfx/skia/patches/archive/0016-Bug-718849-Radial-gradients.patch400
-rw-r--r--gfx/skia/patches/archive/0017-Bug-740194-SkMemory-mozalloc.patch73
-rw-r--r--gfx/skia/patches/archive/0018-Bug-817356-PPC-defines.patch14
-rw-r--r--gfx/skia/patches/archive/0022-Bug-848491-Re-apply-bug-795538-Ensure-we-use-the-cor.patch39
-rw-r--r--gfx/skia/patches/archive/0023-Bug-890539-Fix-SK_COMPILE_ASSERT-build-warning.patch39
-rw-r--r--gfx/skia/patches/archive/0024-Bug-887318-fix-bgra-readback.patch217
-rw-r--r--gfx/skia/patches/archive/0025-Bug-896049-Add-default-Value-SK_OVERRIDE.patch26
-rw-r--r--gfx/skia/patches/archive/0026-Bug-901208-Fix-ARM-v4t.patch83
-rw-r--r--gfx/skia/patches/archive/0030-Bug-939629-Add-missing-include-guards.patch94
-rw-r--r--gfx/skia/patches/archive/0031-Bug-945588-Add-include-guard.patch39
-rw-r--r--gfx/skia/patches/archive/0032-Bug-974900-More-missing-include-guards.patch148
-rw-r--r--gfx/skia/patches/archive/0033-Bug-974900-undef-interface-windows.patch27
-rw-r--r--gfx/skia/patches/archive/SkPostConfig.patch32
-rw-r--r--gfx/skia/patches/archive/arm-fixes.patch191
-rw-r--r--gfx/skia/patches/archive/arm-opts.patch41
-rw-r--r--gfx/skia/patches/archive/fix-comma-end-enum-list.patch380
-rw-r--r--gfx/skia/patches/archive/fix-gradient-clamp.patch211
-rw-r--r--gfx/skia/patches/archive/getpostextpath.patch70
-rw-r--r--gfx/skia/patches/archive/mingw-fix.patch57
-rw-r--r--gfx/skia/patches/archive/new-aa.patch22
-rw-r--r--gfx/skia/patches/archive/old-android-fonthost.patch530
-rw-r--r--gfx/skia/patches/archive/radial-gradients.patch25
-rw-r--r--gfx/skia/patches/archive/skia_restrict_problem.patch461
-rw-r--r--gfx/skia/patches/archive/uninitialized-margin.patch22
-rw-r--r--gfx/skia/patches/archive/user-config.patch40
-rw-r--r--gfx/skia/skia/include/android/SkBRDAllocator.h29
-rw-r--r--gfx/skia/skia/include/android/SkBitmapRegionDecoder.h88
-rw-r--r--gfx/skia/skia/include/animator/SkAnimator.h501
-rw-r--r--gfx/skia/skia/include/animator/SkAnimatorView.h39
-rw-r--r--gfx/skia/skia/include/c/sk_canvas.h159
-rw-r--r--gfx/skia/skia/include/c/sk_data.h70
-rw-r--r--gfx/skia/skia/include/c/sk_image.h71
-rw-r--r--gfx/skia/skia/include/c/sk_maskfilter.h47
-rw-r--r--gfx/skia/skia/include/c/sk_matrix.h49
-rw-r--r--gfx/skia/skia/include/c/sk_paint.h145
-rw-r--r--gfx/skia/skia/include/c/sk_path.h84
-rw-r--r--gfx/skia/skia/include/c/sk_picture.h70
-rw-r--r--gfx/skia/skia/include/c/sk_shader.h143
-rw-r--r--gfx/skia/skia/include/c/sk_surface.h73
-rw-r--r--gfx/skia/skia/include/c/sk_types.h217
-rw-r--r--gfx/skia/skia/include/codec/SkAndroidCodec.h265
-rw-r--r--gfx/skia/skia/include/codec/SkCodec.h798
-rw-r--r--gfx/skia/skia/include/codec/SkEncodedFormat.h28
-rw-r--r--gfx/skia/skia/include/codec/SkEncodedInfo.h199
-rw-r--r--gfx/skia/skia/include/config/SkUserConfig.h174
-rw-r--r--gfx/skia/skia/include/core/SkAnnotation.h50
-rw-r--r--gfx/skia/skia/include/core/SkBBHFactory.h31
-rw-r--r--gfx/skia/skia/include/core/SkBitmap.h830
-rw-r--r--gfx/skia/skia/include/core/SkBitmapDevice.h181
-rw-r--r--gfx/skia/skia/include/core/SkBlendMode.h51
-rw-r--r--gfx/skia/skia/include/core/SkBlitRow.h91
-rw-r--r--gfx/skia/skia/include/core/SkBlurTypes.h29
-rw-r--r--gfx/skia/skia/include/core/SkCanvas.h1774
-rw-r--r--gfx/skia/skia/include/core/SkChunkAlloc.h89
-rw-r--r--gfx/skia/skia/include/core/SkClipOp.h26
-rw-r--r--gfx/skia/skia/include/core/SkClipStack.h520
-rw-r--r--gfx/skia/skia/include/core/SkColor.h198
-rw-r--r--gfx/skia/skia/include/core/SkColorFilter.h193
-rw-r--r--gfx/skia/skia/include/core/SkColorPriv.h1098
-rw-r--r--gfx/skia/skia/include/core/SkColorSpace.h106
-rw-r--r--gfx/skia/skia/include/core/SkColorTable.h86
-rw-r--r--gfx/skia/skia/include/core/SkData.h191
-rw-r--r--gfx/skia/skia/include/core/SkDataTable.h174
-rw-r--r--gfx/skia/skia/include/core/SkDeque.h138
-rw-r--r--gfx/skia/skia/include/core/SkDevice.h374
-rw-r--r--gfx/skia/skia/include/core/SkDocument.h218
-rw-r--r--gfx/skia/skia/include/core/SkDraw.h163
-rw-r--r--gfx/skia/skia/include/core/SkDrawFilter.h55
-rw-r--r--gfx/skia/skia/include/core/SkDrawLooper.h119
-rw-r--r--gfx/skia/skia/include/core/SkDrawable.h81
-rw-r--r--gfx/skia/skia/include/core/SkError.h91
-rw-r--r--gfx/skia/skia/include/core/SkFilterQuality.h26
-rw-r--r--gfx/skia/skia/include/core/SkFlattenable.h134
-rw-r--r--gfx/skia/skia/include/core/SkFlattenableSerialization.h19
-rw-r--r--gfx/skia/skia/include/core/SkFont.h176
-rw-r--r--gfx/skia/skia/include/core/SkFontLCDConfig.h58
-rw-r--r--gfx/skia/skia/include/core/SkFontStyle.h71
-rw-r--r--gfx/skia/skia/include/core/SkGraphics.h180
-rw-r--r--gfx/skia/skia/include/core/SkImage.h483
-rw-r--r--gfx/skia/skia/include/core/SkImageDeserializer.h36
-rw-r--r--gfx/skia/skia/include/core/SkImageEncoder.h119
-rw-r--r--gfx/skia/skia/include/core/SkImageFilter.h433
-rw-r--r--gfx/skia/skia/include/core/SkImageGenerator.h295
-rw-r--r--gfx/skia/skia/include/core/SkImageInfo.h359
-rw-r--r--gfx/skia/skia/include/core/SkLights.h199
-rw-r--r--gfx/skia/skia/include/core/SkMallocPixelRef.h128
-rw-r--r--gfx/skia/skia/include/core/SkMask.h150
-rw-r--r--gfx/skia/skia/include/core/SkMaskFilter.h243
-rw-r--r--gfx/skia/skia/include/core/SkMath.h144
-rw-r--r--gfx/skia/skia/include/core/SkMatrix.h850
-rw-r--r--gfx/skia/skia/include/core/SkMatrix44.h497
-rw-r--r--gfx/skia/skia/include/core/SkMetaData.h175
-rw-r--r--gfx/skia/skia/include/core/SkMilestone.h9
-rw-r--r--gfx/skia/skia/include/core/SkMultiPictureDraw.h75
-rw-r--r--gfx/skia/skia/include/core/SkOSFile.h149
-rw-r--r--gfx/skia/skia/include/core/SkPaint.h1226
-rw-r--r--gfx/skia/skia/include/core/SkPath.h1198
-rw-r--r--gfx/skia/skia/include/core/SkPathEffect.h273
-rw-r--r--gfx/skia/skia/include/core/SkPathMeasure.h123
-rw-r--r--gfx/skia/skia/include/core/SkPathRef.h552
-rw-r--r--gfx/skia/skia/include/core/SkPicture.h259
-rw-r--r--gfx/skia/skia/include/core/SkPictureAnalyzer.h66
-rw-r--r--gfx/skia/skia/include/core/SkPictureRecorder.h138
-rw-r--r--gfx/skia/skia/include/core/SkPixelRef.h405
-rw-r--r--gfx/skia/skia/include/core/SkPixelSerializer.h50
-rw-r--r--gfx/skia/skia/include/core/SkPixmap.h255
-rw-r--r--gfx/skia/skia/include/core/SkPngChunkReader.h45
-rw-r--r--gfx/skia/skia/include/core/SkPoint.h556
-rw-r--r--gfx/skia/skia/include/core/SkPoint3.h124
-rw-r--r--gfx/skia/skia/include/core/SkPostConfig.h371
-rw-r--r--gfx/skia/skia/include/core/SkPreConfig.h272
-rw-r--r--gfx/skia/skia/include/core/SkRRect.h353
-rw-r--r--gfx/skia/skia/include/core/SkRSXform.h67
-rw-r--r--gfx/skia/skia/include/core/SkRWBuffer.h107
-rw-r--r--gfx/skia/skia/include/core/SkRasterizer.h41
-rw-r--r--gfx/skia/skia/include/core/SkRect.h908
-rw-r--r--gfx/skia/skia/include/core/SkRefCnt.h466
-rw-r--r--gfx/skia/skia/include/core/SkRegion.h460
-rw-r--r--gfx/skia/skia/include/core/SkScalar.h268
-rw-r--r--gfx/skia/skia/include/core/SkShader.h549
-rw-r--r--gfx/skia/skia/include/core/SkSize.h110
-rw-r--r--gfx/skia/skia/include/core/SkStream.h467
-rw-r--r--gfx/skia/skia/include/core/SkString.h289
-rw-r--r--gfx/skia/skia/include/core/SkStrokeRec.h151
-rw-r--r--gfx/skia/skia/include/core/SkSurface.h402
-rw-r--r--gfx/skia/skia/include/core/SkSurfaceProps.h82
-rw-r--r--gfx/skia/skia/include/core/SkSwizzle.h19
-rw-r--r--gfx/skia/skia/include/core/SkTLazy.h173
-rw-r--r--gfx/skia/skia/include/core/SkTRegistry.h55
-rw-r--r--gfx/skia/skia/include/core/SkTextBlob.h245
-rw-r--r--gfx/skia/skia/include/core/SkTime.h61
-rw-r--r--gfx/skia/skia/include/core/SkTraceMemoryDump.h80
-rw-r--r--gfx/skia/skia/include/core/SkTypeface.h433
-rw-r--r--gfx/skia/skia/include/core/SkTypes.h730
-rw-r--r--gfx/skia/skia/include/core/SkUnPreMultiply.h58
-rw-r--r--gfx/skia/skia/include/core/SkWriteBuffer.h157
-rw-r--r--gfx/skia/skia/include/core/SkWriter32.h276
-rw-r--r--gfx/skia/skia/include/core/SkXfermode.h331
-rw-r--r--gfx/skia/skia/include/core/SkYUVSizeInfo.h34
-rw-r--r--gfx/skia/skia/include/effects/Sk1DPathEffect.h90
-rw-r--r--gfx/skia/skia/include/effects/Sk2DPathEffect.h106
-rw-r--r--gfx/skia/skia/include/effects/SkAlphaThresholdFilter.h39
-rw-r--r--gfx/skia/skia/include/effects/SkArcToPathEffect.h40
-rw-r--r--gfx/skia/skia/include/effects/SkArithmeticMode.h44
-rw-r--r--gfx/skia/skia/include/effects/SkBlurDrawLooper.h94
-rw-r--r--gfx/skia/skia/include/effects/SkBlurImageFilter.h31
-rw-r--r--gfx/skia/skia/include/effects/SkBlurMaskFilter.h104
-rw-r--r--gfx/skia/skia/include/effects/SkColorCubeFilter.h81
-rw-r--r--gfx/skia/skia/include/effects/SkColorFilterImageFilter.h52
-rw-r--r--gfx/skia/skia/include/effects/SkColorMatrix.h70
-rw-r--r--gfx/skia/skia/include/effects/SkColorMatrixFilter.h37
-rw-r--r--gfx/skia/skia/include/effects/SkComposeImageFilter.h43
-rw-r--r--gfx/skia/skia/include/effects/SkCornerPathEffect.h55
-rw-r--r--gfx/skia/skia/include/effects/SkDashPathEffect.h81
-rw-r--r--gfx/skia/skia/include/effects/SkDiscretePathEffect.h65
-rw-r--r--gfx/skia/skia/include/effects/SkDisplacementMapEffect.h74
-rw-r--r--gfx/skia/skia/include/effects/SkDropShadowImageFilter.h63
-rw-r--r--gfx/skia/skia/include/effects/SkEmbossMaskFilter.h55
-rw-r--r--gfx/skia/skia/include/effects/SkGammaColorFilter.h48
-rw-r--r--gfx/skia/skia/include/effects/SkGaussianEdgeShader.h27
-rw-r--r--gfx/skia/skia/include/effects/SkGradientShader.h258
-rw-r--r--gfx/skia/skia/include/effects/SkImageSource.h59
-rw-r--r--gfx/skia/skia/include/effects/SkLayerDrawLooper.h159
-rw-r--r--gfx/skia/skia/include/effects/SkLayerRasterizer.h96
-rw-r--r--gfx/skia/skia/include/effects/SkLightingImageFilter.h104
-rw-r--r--gfx/skia/skia/include/effects/SkLumaColorFilter.h51
-rw-r--r--gfx/skia/skia/include/effects/SkMagnifierImageFilter.h47
-rw-r--r--gfx/skia/skia/include/effects/SkMatrixConvolutionImageFilter.h133
-rw-r--r--gfx/skia/skia/include/effects/SkMergeImageFilter.h72
-rw-r--r--gfx/skia/skia/include/effects/SkMorphologyImageFilter.h118
-rw-r--r--gfx/skia/skia/include/effects/SkOffsetImageFilter.h46
-rw-r--r--gfx/skia/skia/include/effects/SkPaintFlagsDrawFilter.h24
-rw-r--r--gfx/skia/skia/include/effects/SkPaintImageFilter.h51
-rw-r--r--gfx/skia/skia/include/effects/SkPerlinNoiseShader.h133
-rw-r--r--gfx/skia/skia/include/effects/SkPictureImageFilter.h94
-rw-r--r--gfx/skia/skia/include/effects/SkRRectsGaussianEdgeShader.h37
-rw-r--r--gfx/skia/skia/include/effects/SkTableColorFilter.h54
-rw-r--r--gfx/skia/skia/include/effects/SkTableMaskFilter.h66
-rw-r--r--gfx/skia/skia/include/effects/SkTileImageFilter.h53
-rw-r--r--gfx/skia/skia/include/effects/SkXfermodeImageFilter.h82
-rw-r--r--gfx/skia/skia/include/gpu/GrBlend.h228
-rw-r--r--gfx/skia/skia/include/gpu/GrBuffer.h136
-rw-r--r--gfx/skia/skia/include/gpu/GrBufferAccess.h55
-rw-r--r--gfx/skia/skia/include/gpu/GrCaps.h359
-rw-r--r--gfx/skia/skia/include/gpu/GrClip.h146
-rw-r--r--gfx/skia/skia/include/gpu/GrColor.h309
-rw-r--r--gfx/skia/skia/include/gpu/GrColorSpaceXform.h45
-rw-r--r--gfx/skia/skia/include/gpu/GrConfig.h183
-rw-r--r--gfx/skia/skia/include/gpu/GrContext.h479
-rw-r--r--gfx/skia/skia/include/gpu/GrContextOptions.h87
-rw-r--r--gfx/skia/skia/include/gpu/GrCoordTransform.h116
-rw-r--r--gfx/skia/skia/include/gpu/GrDrawContext.h438
-rw-r--r--gfx/skia/skia/include/gpu/GrFragmentProcessor.h273
-rw-r--r--gfx/skia/skia/include/gpu/GrGpuResource.h309
-rw-r--r--gfx/skia/skia/include/gpu/GrGpuResourceRef.h213
-rw-r--r--gfx/skia/skia/include/gpu/GrInvariantOutput.h342
-rw-r--r--gfx/skia/skia/include/gpu/GrPaint.h200
-rw-r--r--gfx/skia/skia/include/gpu/GrProcessor.h183
-rw-r--r--gfx/skia/skia/include/gpu/GrProcessorUnitTest.h153
-rw-r--r--gfx/skia/skia/include/gpu/GrProgramElement.h148
-rw-r--r--gfx/skia/skia/include/gpu/GrRenderTarget.h166
-rw-r--r--gfx/skia/skia/include/gpu/GrResourceKey.h322
-rw-r--r--gfx/skia/skia/include/gpu/GrShaderVar.h186
-rw-r--r--gfx/skia/skia/include/gpu/GrSurface.h171
-rw-r--r--gfx/skia/skia/include/gpu/GrTestUtils.h137
-rw-r--r--gfx/skia/skia/include/gpu/GrTexture.h74
-rw-r--r--gfx/skia/skia/include/gpu/GrTextureAccess.h71
-rw-r--r--gfx/skia/skia/include/gpu/GrTextureParams.h110
-rw-r--r--gfx/skia/skia/include/gpu/GrTextureProvider.h173
-rw-r--r--gfx/skia/skia/include/gpu/GrTypes.h668
-rw-r--r--gfx/skia/skia/include/gpu/GrTypesPriv.h490
-rw-r--r--gfx/skia/skia/include/gpu/GrXferProcessor.h388
-rw-r--r--gfx/skia/skia/include/gpu/SkGr.h94
-rw-r--r--gfx/skia/skia/include/gpu/effects/GrConstColorProcessor.h66
-rw-r--r--gfx/skia/skia/include/gpu/effects/GrCoverageSetOpXP.h54
-rw-r--r--gfx/skia/skia/include/gpu/effects/GrCustomXfermode.h24
-rw-r--r--gfx/skia/skia/include/gpu/effects/GrPorterDuffXferProcessor.h80
-rw-r--r--gfx/skia/skia/include/gpu/effects/GrXfermodeFragmentProcessor.h34
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLAssembleInterface.h30
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLConfig.h129
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLConfig_chrome.h36
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLExtensions.h74
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLFunctions.h374
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLInterface.h481
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLSLPrettyPrint.h19
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLTypes.h117
-rw-r--r--gfx/skia/skia/include/gpu/vk/GrVkBackendContext.h62
-rw-r--r--gfx/skia/skia/include/gpu/vk/GrVkDefines.h26
-rw-r--r--gfx/skia/skia/include/gpu/vk/GrVkInterface.h206
-rw-r--r--gfx/skia/skia/include/gpu/vk/GrVkTypes.h64
-rw-r--r--gfx/skia/skia/include/images/SkForceLinking.h22
-rw-r--r--gfx/skia/skia/include/images/SkMovie.h80
-rw-r--r--gfx/skia/skia/include/pathops/SkPathOps.h97
-rw-r--r--gfx/skia/skia/include/ports/SkFontConfigInterface.h118
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr.h205
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_FontConfigInterface.h20
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_android.h52
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_custom.h21
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_fontconfig.h22
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_indirect.h105
-rw-r--r--gfx/skia/skia/include/ports/SkRemotableFontMgr.h148
-rw-r--r--gfx/skia/skia/include/ports/SkTypeface_cairo.h18
-rw-r--r--gfx/skia/skia/include/ports/SkTypeface_mac.h49
-rw-r--r--gfx/skia/skia/include/ports/SkTypeface_win.h82
-rw-r--r--gfx/skia/skia/include/private/GrAuditTrail.h186
-rw-r--r--gfx/skia/skia/include/private/GrInstancedPipelineInfo.h49
-rw-r--r--gfx/skia/skia/include/private/GrRenderTargetProxy.h97
-rw-r--r--gfx/skia/skia/include/private/GrSingleOwner.h55
-rw-r--r--gfx/skia/skia/include/private/GrSurfaceProxy.h92
-rw-r--r--gfx/skia/skia/include/private/GrTextureProxy.h46
-rw-r--r--gfx/skia/skia/include/private/GrTextureStripAtlas.h189
-rw-r--r--gfx/skia/skia/include/private/SkAtomics.h159
-rw-r--r--gfx/skia/skia/include/private/SkBitmaskEnum.h34
-rw-r--r--gfx/skia/skia/include/private/SkChecksum.h70
-rw-r--r--gfx/skia/skia/include/private/SkFixed.h159
-rw-r--r--gfx/skia/skia/include/private/SkFloatBits.h104
-rw-r--r--gfx/skia/skia/include/private/SkFloatingPoint.h157
-rw-r--r--gfx/skia/skia/include/private/SkLeanWindows.h34
-rw-r--r--gfx/skia/skia/include/private/SkMiniRecorder.h56
-rw-r--r--gfx/skia/skia/include/private/SkMutex.h93
-rw-r--r--gfx/skia/skia/include/private/SkOnce.h50
-rw-r--r--gfx/skia/skia/include/private/SkRecords.h358
-rw-r--r--gfx/skia/skia/include/private/SkSemaphore.h83
-rw-r--r--gfx/skia/skia/include/private/SkShadowParams.h48
-rw-r--r--gfx/skia/skia/include/private/SkSpinlock.h37
-rw-r--r--gfx/skia/skia/include/private/SkTArray.h560
-rw-r--r--gfx/skia/skia/include/private/SkTDArray.h381
-rw-r--r--gfx/skia/skia/include/private/SkTDict.h146
-rw-r--r--gfx/skia/skia/include/private/SkTFitsIn.h227
-rw-r--r--gfx/skia/skia/include/private/SkTHash.h301
-rw-r--r--gfx/skia/skia/include/private/SkTLogic.h105
-rw-r--r--gfx/skia/skia/include/private/SkTSearch.h146
-rw-r--r--gfx/skia/skia/include/private/SkTemplates.h492
-rw-r--r--gfx/skia/skia/include/private/SkThreadID.h19
-rw-r--r--gfx/skia/skia/include/private/SkWeakRefCnt.h175
-rw-r--r--gfx/skia/skia/include/svg/SkSVGCanvas.h31
-rw-r--r--gfx/skia/skia/include/utils/SkBoundaryPatch.h66
-rw-r--r--gfx/skia/skia/include/utils/SkCamera.h153
-rw-r--r--gfx/skia/skia/include/utils/SkCanvasStateUtils.h78
-rw-r--r--gfx/skia/skia/include/utils/SkDumpCanvas.h168
-rw-r--r--gfx/skia/skia/include/utils/SkEventTracer.h70
-rw-r--r--gfx/skia/skia/include/utils/SkFrontBufferedStream.h37
-rw-r--r--gfx/skia/skia/include/utils/SkInterpolator.h137
-rw-r--r--gfx/skia/skia/include/utils/SkLayer.h130
-rw-r--r--gfx/skia/skia/include/utils/SkLua.h75
-rw-r--r--gfx/skia/skia/include/utils/SkLuaCanvas.h82
-rw-r--r--gfx/skia/skia/include/utils/SkMeshUtils.h50
-rw-r--r--gfx/skia/skia/include/utils/SkNWayCanvas.h94
-rw-r--r--gfx/skia/skia/include/utils/SkNoSaveLayerCanvas.h33
-rw-r--r--gfx/skia/skia/include/utils/SkNullCanvas.h20
-rw-r--r--gfx/skia/skia/include/utils/SkPaintFilterCanvas.h111
-rw-r--r--gfx/skia/skia/include/utils/SkParse.h36
-rw-r--r--gfx/skia/skia/include/utils/SkParsePath.h23
-rw-r--r--gfx/skia/skia/include/utils/SkPictureUtils.h28
-rw-r--r--gfx/skia/skia/include/utils/SkRandom.h184
-rw-r--r--gfx/skia/skia/include/utils/SkTextBox.h87
-rw-r--r--gfx/skia/skia/include/utils/mac/SkCGUtils.h76
-rw-r--r--gfx/skia/skia/include/views/SkApplication.h30
-rw-r--r--gfx/skia/skia/include/views/SkEvent.h293
-rw-r--r--gfx/skia/skia/include/views/SkEventSink.h112
-rw-r--r--gfx/skia/skia/include/views/SkKey.h62
-rw-r--r--gfx/skia/skia/include/views/SkOSMenu.h182
-rw-r--r--gfx/skia/skia/include/views/SkOSWindow_Mac.h60
-rw-r--r--gfx/skia/skia/include/views/SkOSWindow_SDL.h54
-rw-r--r--gfx/skia/skia/include/views/SkOSWindow_Unix.h86
-rw-r--r--gfx/skia/skia/include/views/SkOSWindow_Win.h135
-rw-r--r--gfx/skia/skia/include/views/SkOSWindow_iOS.h50
-rw-r--r--gfx/skia/skia/include/views/SkSystemEventTypes.h25
-rw-r--r--gfx/skia/skia/include/views/SkTouchGesture.h83
-rw-r--r--gfx/skia/skia/include/views/SkView.h405
-rw-r--r--gfx/skia/skia/include/views/SkWindow.h139
-rw-r--r--gfx/skia/skia/include/xml/SkDOM.h99
-rw-r--r--gfx/skia/skia/include/xml/SkXMLParser.h87
-rw-r--r--gfx/skia/skia/include/xml/SkXMLWriter.h95
-rw-r--r--gfx/skia/skia/src/android/SkBitmapRegionCodec.cpp142
-rw-r--r--gfx/skia/skia/src/android/SkBitmapRegionCodec.h37
-rw-r--r--gfx/skia/skia/src/android/SkBitmapRegionDecoder.cpp48
-rw-r--r--gfx/skia/skia/src/android/SkBitmapRegionDecoderPriv.h59
-rw-r--r--gfx/skia/skia/src/animator/SkADrawable.cpp24
-rw-r--r--gfx/skia/skia/src/animator/SkADrawable.h28
-rw-r--r--gfx/skia/skia/src/animator/SkAnimate.h34
-rw-r--r--gfx/skia/skia/src/animator/SkAnimate3DSchema.xsd39
-rw-r--r--gfx/skia/skia/src/animator/SkAnimate3DSchema.xsx3
-rw-r--r--gfx/skia/skia/src/animator/SkAnimateActive.cpp504
-rw-r--r--gfx/skia/skia/src/animator/SkAnimateActive.h79
-rw-r--r--gfx/skia/skia/src/animator/SkAnimateBase.cpp235
-rw-r--r--gfx/skia/skia/src/animator/SkAnimateBase.h83
-rw-r--r--gfx/skia/skia/src/animator/SkAnimateField.cpp111
-rw-r--r--gfx/skia/skia/src/animator/SkAnimateMaker.cpp372
-rw-r--r--gfx/skia/skia/src/animator/SkAnimateMaker.h160
-rw-r--r--gfx/skia/skia/src/animator/SkAnimateProperties.h21
-rw-r--r--gfx/skia/skia/src/animator/SkAnimateSchema.xsd2787
-rw-r--r--gfx/skia/skia/src/animator/SkAnimateSchema.xsx3
-rw-r--r--gfx/skia/skia/src/animator/SkAnimateSet.cpp87
-rw-r--r--gfx/skia/skia/src/animator/SkAnimateSet.h27
-rw-r--r--gfx/skia/skia/src/animator/SkAnimator.cpp704
-rw-r--r--gfx/skia/skia/src/animator/SkAnimatorScript.cpp594
-rw-r--r--gfx/skia/skia/src/animator/SkAnimatorScript.h75
-rw-r--r--gfx/skia/skia/src/animator/SkAnimatorScript2.cpp622
-rw-r--r--gfx/skia/skia/src/animator/SkAnimatorScript2.h50
-rw-r--r--gfx/skia/skia/src/animator/SkBoundable.cpp55
-rw-r--r--gfx/skia/skia/src/animator/SkBoundable.h41
-rw-r--r--gfx/skia/skia/src/animator/SkBuildCondensedInfo.cpp282
-rw-r--r--gfx/skia/skia/src/animator/SkCondensedDebug.inc1387
-rw-r--r--gfx/skia/skia/src/animator/SkCondensedRelease.inc1365
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayAdd.cpp245
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayAdd.h71
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayApply.cpp804
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayApply.h106
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayBounds.cpp43
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayBounds.h24
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayEvent.cpp252
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayEvent.h66
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayEvents.cpp113
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayEvents.h42
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayInclude.cpp52
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayInclude.h25
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayInput.cpp55
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayInput.h33
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayList.cpp158
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayList.h68
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayMath.cpp229
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayMath.h31
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayMovie.cpp128
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayMovie.h51
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayNumber.cpp70
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayNumber.h22
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayPost.cpp298
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayPost.h59
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayRandom.cpp65
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayRandom.h40
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayScreenplay.cpp20
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayScreenplay.h21
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayType.cpp761
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayType.h206
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayTypes.cpp214
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayTypes.h104
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayXMLParser.cpp316
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayXMLParser.h91
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayable.cpp540
-rw-r--r--gfx/skia/skia/src/animator/SkDisplayable.h112
-rw-r--r--gfx/skia/skia/src/animator/SkDraw3D.cpp106
-rw-r--r--gfx/skia/skia/src/animator/SkDraw3D.h50
-rw-r--r--gfx/skia/skia/src/animator/SkDrawBitmap.cpp201
-rw-r--r--gfx/skia/skia/src/animator/SkDrawBitmap.h73
-rw-r--r--gfx/skia/skia/src/animator/SkDrawBlur.cpp33
-rw-r--r--gfx/skia/skia/src/animator/SkDrawBlur.h25
-rw-r--r--gfx/skia/skia/src/animator/SkDrawClip.cpp39
-rw-r--r--gfx/skia/skia/src/animator/SkDrawClip.h29
-rw-r--r--gfx/skia/skia/src/animator/SkDrawColor.cpp265
-rw-r--r--gfx/skia/skia/src/animator/SkDrawColor.h40
-rw-r--r--gfx/skia/skia/src/animator/SkDrawDash.cpp35
-rw-r--r--gfx/skia/skia/src/animator/SkDrawDash.h24
-rw-r--r--gfx/skia/skia/src/animator/SkDrawDiscrete.cpp34
-rw-r--r--gfx/skia/skia/src/animator/SkDrawDiscrete.h22
-rw-r--r--gfx/skia/skia/src/animator/SkDrawEmboss.cpp34
-rw-r--r--gfx/skia/skia/src/animator/SkDrawEmboss.h28
-rw-r--r--gfx/skia/skia/src/animator/SkDrawExtraPathEffect.cpp522
-rw-r--r--gfx/skia/skia/src/animator/SkDrawExtraPathEffect.h15
-rw-r--r--gfx/skia/skia/src/animator/SkDrawFull.cpp18
-rw-r--r--gfx/skia/skia/src/animator/SkDrawFull.h22
-rw-r--r--gfx/skia/skia/src/animator/SkDrawGradient.cpp168
-rw-r--r--gfx/skia/skia/src/animator/SkDrawGradient.h64
-rw-r--r--gfx/skia/skia/src/animator/SkDrawGroup.cpp321
-rw-r--r--gfx/skia/skia/src/animator/SkDrawGroup.h72
-rw-r--r--gfx/skia/skia/src/animator/SkDrawLine.cpp35
-rw-r--r--gfx/skia/skia/src/animator/SkDrawLine.h28
-rw-r--r--gfx/skia/skia/src/animator/SkDrawMatrix.cpp268
-rw-r--r--gfx/skia/skia/src/animator/SkDrawMatrix.h74
-rw-r--r--gfx/skia/skia/src/animator/SkDrawOval.cpp28
-rw-r--r--gfx/skia/skia/src/animator/SkDrawOval.h22
-rw-r--r--gfx/skia/skia/src/animator/SkDrawPaint.cpp267
-rw-r--r--gfx/skia/skia/src/animator/SkDrawPaint.h79
-rw-r--r--gfx/skia/skia/src/animator/SkDrawPath.cpp220
-rw-r--r--gfx/skia/skia/src/animator/SkDrawPath.h69
-rw-r--r--gfx/skia/skia/src/animator/SkDrawPoint.cpp44
-rw-r--r--gfx/skia/skia/src/animator/SkDrawPoint.h33
-rw-r--r--gfx/skia/skia/src/animator/SkDrawRectangle.cpp142
-rw-r--r--gfx/skia/skia/src/animator/SkDrawRectangle.h55
-rw-r--r--gfx/skia/skia/src/animator/SkDrawSaveLayer.cpp76
-rw-r--r--gfx/skia/skia/src/animator/SkDrawSaveLayer.h36
-rw-r--r--gfx/skia/skia/src/animator/SkDrawShader.cpp78
-rw-r--r--gfx/skia/skia/src/animator/SkDrawShader.h28
-rw-r--r--gfx/skia/skia/src/animator/SkDrawText.cpp55
-rw-r--r--gfx/skia/skia/src/animator/SkDrawText.h36
-rw-r--r--gfx/skia/skia/src/animator/SkDrawTextBox.cpp80
-rw-r--r--gfx/skia/skia/src/animator/SkDrawTextBox.h38
-rw-r--r--gfx/skia/skia/src/animator/SkDrawTo.cpp55
-rw-r--r--gfx/skia/skia/src/animator/SkDrawTo.h34
-rw-r--r--gfx/skia/skia/src/animator/SkDump.cpp150
-rw-r--r--gfx/skia/skia/src/animator/SkDump.h42
-rw-r--r--gfx/skia/skia/src/animator/SkExtraPathEffects.xsd33
-rw-r--r--gfx/skia/skia/src/animator/SkExtras.h34
-rw-r--r--gfx/skia/skia/src/animator/SkGetCondensedInfo.cpp121
-rw-r--r--gfx/skia/skia/src/animator/SkHitClear.cpp32
-rw-r--r--gfx/skia/skia/src/animator/SkHitClear.h25
-rw-r--r--gfx/skia/skia/src/animator/SkHitTest.cpp74
-rw-r--r--gfx/skia/skia/src/animator/SkHitTest.h30
-rw-r--r--gfx/skia/skia/src/animator/SkIntArray.h55
-rw-r--r--gfx/skia/skia/src/animator/SkMatrixParts.cpp292
-rw-r--r--gfx/skia/skia/src/animator/SkMatrixParts.h119
-rw-r--r--gfx/skia/skia/src/animator/SkMemberInfo.cpp559
-rw-r--r--gfx/skia/skia/src/animator/SkMemberInfo.h276
-rw-r--r--gfx/skia/skia/src/animator/SkOpArray.cpp23
-rw-r--r--gfx/skia/skia/src/animator/SkOpArray.h29
-rw-r--r--gfx/skia/skia/src/animator/SkOperand.h46
-rw-r--r--gfx/skia/skia/src/animator/SkOperand2.h54
-rw-r--r--gfx/skia/skia/src/animator/SkOperandInterpolator.h47
-rw-r--r--gfx/skia/skia/src/animator/SkOperandIterpolator.cpp147
-rw-r--r--gfx/skia/skia/src/animator/SkPaintPart.cpp99
-rw-r--r--gfx/skia/skia/src/animator/SkPaintPart.h72
-rw-r--r--gfx/skia/skia/src/animator/SkParseSVGPath.cpp234
-rw-r--r--gfx/skia/skia/src/animator/SkPathParts.cpp318
-rw-r--r--gfx/skia/skia/src/animator/SkPathParts.h164
-rw-r--r--gfx/skia/skia/src/animator/SkPostParts.cpp56
-rw-r--r--gfx/skia/skia/src/animator/SkPostParts.h31
-rw-r--r--gfx/skia/skia/src/animator/SkScript.cpp1890
-rw-r--r--gfx/skia/skia/src/animator/SkScript.h264
-rw-r--r--gfx/skia/skia/src/animator/SkScript2.h293
-rw-r--r--gfx/skia/skia/src/animator/SkScriptCallBack.h67
-rw-r--r--gfx/skia/skia/src/animator/SkScriptDecompile.cpp211
-rw-r--r--gfx/skia/skia/src/animator/SkScriptRuntime.cpp351
-rw-r--r--gfx/skia/skia/src/animator/SkScriptRuntime.h50
-rw-r--r--gfx/skia/skia/src/animator/SkScriptTokenizer.cpp1506
-rw-r--r--gfx/skia/skia/src/animator/SkSnapshot.cpp67
-rw-r--r--gfx/skia/skia/src/animator/SkSnapshot.h29
-rw-r--r--gfx/skia/skia/src/animator/SkTDArray_Experimental.h142
-rw-r--r--gfx/skia/skia/src/animator/SkTDStack.h110
-rw-r--r--gfx/skia/skia/src/animator/SkTextOnPath.cpp39
-rw-r--r--gfx/skia/skia/src/animator/SkTextOnPath.h30
-rw-r--r--gfx/skia/skia/src/animator/SkTextToPath.cpp47
-rw-r--r--gfx/skia/skia/src/animator/SkTextToPath.h31
-rw-r--r--gfx/skia/skia/src/animator/SkTypedArray.cpp179
-rw-r--r--gfx/skia/skia/src/animator/SkTypedArray.h31
-rw-r--r--gfx/skia/skia/src/animator/SkXMLAnimatorWriter.cpp84
-rw-r--r--gfx/skia/skia/src/animator/SkXMLAnimatorWriter.h36
-rw-r--r--gfx/skia/skia/src/animator/thingstodo.txt21
-rw-r--r--gfx/skia/skia/src/c/sk_c_from_to.h34
-rw-r--r--gfx/skia/skia/src/c/sk_paint.cpp173
-rw-r--r--gfx/skia/skia/src/c/sk_surface.cpp707
-rw-r--r--gfx/skia/skia/src/c/sk_types_priv.h41
-rw-r--r--gfx/skia/skia/src/codec/SkAndroidCodec.cpp197
-rw-r--r--gfx/skia/skia/src/codec/SkBmpCodec.cpp629
-rw-r--r--gfx/skia/skia/src/codec/SkBmpCodec.h148
-rw-r--r--gfx/skia/skia/src/codec/SkBmpMaskCodec.cpp94
-rw-r--r--gfx/skia/skia/src/codec/SkBmpMaskCodec.h60
-rw-r--r--gfx/skia/skia/src/codec/SkBmpRLECodec.cpp557
-rw-r--r--gfx/skia/skia/src/codec/SkBmpRLECodec.h116
-rw-r--r--gfx/skia/skia/src/codec/SkBmpStandardCodec.cpp312
-rw-r--r--gfx/skia/skia/src/codec/SkBmpStandardCodec.h99
-rw-r--r--gfx/skia/skia/src/codec/SkCodec.cpp485
-rw-r--r--gfx/skia/skia/src/codec/SkCodecImageGenerator.cpp71
-rw-r--r--gfx/skia/skia/src/codec/SkCodecImageGenerator.h43
-rw-r--r--gfx/skia/skia/src/codec/SkCodecPriv.h402
-rw-r--r--gfx/skia/skia/src/codec/SkGifCodec.cpp607
-rw-r--r--gfx/skia/skia/src/codec/SkGifCodec.h205
-rw-r--r--gfx/skia/skia/src/codec/SkIcoCodec.cpp391
-rw-r--r--gfx/skia/skia/src/codec/SkIcoCodec.h101
-rw-r--r--gfx/skia/skia/src/codec/SkJpegCodec.cpp934
-rw-r--r--gfx/skia/skia/src/codec/SkJpegCodec.h147
-rw-r--r--gfx/skia/skia/src/codec/SkJpegDecoderMgr.cpp88
-rw-r--r--gfx/skia/skia/src/codec/SkJpegDecoderMgr.h74
-rw-r--r--gfx/skia/skia/src/codec/SkJpegUtility.cpp95
-rw-r--r--gfx/skia/skia/src/codec/SkJpegUtility.h50
-rw-r--r--gfx/skia/skia/src/codec/SkMaskSwizzler.cpp568
-rw-r--r--gfx/skia/skia/src/codec/SkMaskSwizzler.h71
-rw-r--r--gfx/skia/skia/src/codec/SkMasks.cpp161
-rw-r--r--gfx/skia/skia/src/codec/SkMasks.h86
-rw-r--r--gfx/skia/skia/src/codec/SkPngCodec.cpp1309
-rw-r--r--gfx/skia/skia/src/codec/SkPngCodec.h141
-rw-r--r--gfx/skia/skia/src/codec/SkRawAdapterCodec.cpp30
-rw-r--r--gfx/skia/skia/src/codec/SkRawAdapterCodec.h42
-rw-r--r--gfx/skia/skia/src/codec/SkRawCodec.cpp782
-rw-r--r--gfx/skia/skia/src/codec/SkRawCodec.h61
-rw-r--r--gfx/skia/skia/src/codec/SkSampledCodec.cpp358
-rw-r--r--gfx/skia/skia/src/codec/SkSampledCodec.h60
-rw-r--r--gfx/skia/skia/src/codec/SkSampler.cpp96
-rw-r--r--gfx/skia/skia/src/codec/SkSampler.h93
-rw-r--r--gfx/skia/skia/src/codec/SkSwizzler.cpp1045
-rw-r--r--gfx/skia/skia/src/codec/SkSwizzler.h210
-rw-r--r--gfx/skia/skia/src/codec/SkWbmpCodec.cpp217
-rw-r--r--gfx/skia/skia/src/codec/SkWbmpCodec.h64
-rw-r--r--gfx/skia/skia/src/codec/SkWebpAdapterCodec.cpp45
-rw-r--r--gfx/skia/skia/src/codec/SkWebpAdapterCodec.h37
-rw-r--r--gfx/skia/skia/src/codec/SkWebpCodec.cpp334
-rw-r--r--gfx/skia/skia/src/codec/SkWebpCodec.h52
-rw-r--r--gfx/skia/skia/src/core/Sk4px.h241
-rw-r--r--gfx/skia/skia/src/core/Sk4x4f.h153
-rw-r--r--gfx/skia/skia/src/core/SkAAClip.cpp2220
-rw-r--r--gfx/skia/skia/src/core/SkAAClip.h136
-rw-r--r--gfx/skia/skia/src/core/SkAdvancedTypefaceMetrics.h103
-rw-r--r--gfx/skia/skia/src/core/SkAlphaRuns.cpp78
-rw-r--r--gfx/skia/skia/src/core/SkAnnotation.cpp48
-rw-r--r--gfx/skia/skia/src/core/SkAnnotationKeys.h33
-rw-r--r--gfx/skia/skia/src/core/SkAntiRun.h190
-rw-r--r--gfx/skia/skia/src/core/SkAutoKern.h51
-rw-r--r--gfx/skia/skia/src/core/SkAutoPixmapStorage.cpp58
-rw-r--r--gfx/skia/skia/src/core/SkAutoPixmapStorage.h78
-rw-r--r--gfx/skia/skia/src/core/SkBBHFactory.cpp16
-rw-r--r--gfx/skia/skia/src/core/SkBBoxHierarchy.h43
-rw-r--r--gfx/skia/skia/src/core/SkBigPicture.cpp94
-rw-r--r--gfx/skia/skia/src/core/SkBigPicture.h84
-rw-r--r--gfx/skia/skia/src/core/SkBitmap.cpp1261
-rw-r--r--gfx/skia/skia/src/core/SkBitmapCache.cpp311
-rw-r--r--gfx/skia/skia/src/core/SkBitmapCache.h81
-rw-r--r--gfx/skia/skia/src/core/SkBitmapController.cpp232
-rw-r--r--gfx/skia/skia/src/core/SkBitmapController.h70
-rw-r--r--gfx/skia/skia/src/core/SkBitmapDevice.cpp454
-rw-r--r--gfx/skia/skia/src/core/SkBitmapFilter.h209
-rw-r--r--gfx/skia/skia/src/core/SkBitmapProcShader.cpp244
-rw-r--r--gfx/skia/skia/src/core/SkBitmapProcShader.h26
-rw-r--r--gfx/skia/skia/src/core/SkBitmapProcState.cpp828
-rw-r--r--gfx/skia/skia/src/core/SkBitmapProcState.h269
-rw-r--r--gfx/skia/skia/src/core/SkBitmapProcState_filter.h124
-rw-r--r--gfx/skia/skia/src/core/SkBitmapProcState_matrix.h159
-rw-r--r--gfx/skia/skia/src/core/SkBitmapProcState_matrixProcs.cpp520
-rw-r--r--gfx/skia/skia/src/core/SkBitmapProcState_matrix_template.h118
-rw-r--r--gfx/skia/skia/src/core/SkBitmapProcState_procs.h251
-rw-r--r--gfx/skia/skia/src/core/SkBitmapProcState_sample.h228
-rw-r--r--gfx/skia/skia/src/core/SkBitmapProcState_shaderproc.h89
-rw-r--r--gfx/skia/skia/src/core/SkBitmapProcState_utils.h40
-rw-r--r--gfx/skia/skia/src/core/SkBitmapProvider.cpp83
-rw-r--r--gfx/skia/skia/src/core/SkBitmapProvider.h45
-rw-r--r--gfx/skia/skia/src/core/SkBitmapScaler.cpp268
-rw-r--r--gfx/skia/skia/src/core/SkBitmapScaler.h52
-rw-r--r--gfx/skia/skia/src/core/SkBlendModePriv.h19
-rw-r--r--gfx/skia/skia/src/core/SkBlitBWMaskTemplate.h127
-rw-r--r--gfx/skia/skia/src/core/SkBlitMask.h78
-rw-r--r--gfx/skia/skia/src/core/SkBlitMask_D32.cpp289
-rw-r--r--gfx/skia/skia/src/core/SkBlitRow_D16.cpp281
-rw-r--r--gfx/skia/skia/src/core/SkBlitRow_D32.cpp119
-rw-r--r--gfx/skia/skia/src/core/SkBlitter.cpp1018
-rw-r--r--gfx/skia/skia/src/core/SkBlitter.h258
-rw-r--r--gfx/skia/skia/src/core/SkBlitter_A8.cpp430
-rw-r--r--gfx/skia/skia/src/core/SkBlitter_ARGB32.cpp697
-rw-r--r--gfx/skia/skia/src/core/SkBlitter_PM4f.cpp436
-rw-r--r--gfx/skia/skia/src/core/SkBlitter_RGB16.cpp928
-rw-r--r--gfx/skia/skia/src/core/SkBlitter_Sprite.cpp189
-rw-r--r--gfx/skia/skia/src/core/SkBlurImageFilter.cpp299
-rw-r--r--gfx/skia/skia/src/core/SkBuffer.cpp137
-rw-r--r--gfx/skia/skia/src/core/SkBuffer.h170
-rw-r--r--gfx/skia/skia/src/core/SkCachedData.cpp198
-rw-r--r--gfx/skia/skia/src/core/SkCachedData.h112
-rw-r--r--gfx/skia/skia/src/core/SkCanvas.cpp3406
-rw-r--r--gfx/skia/skia/src/core/SkCanvasPriv.h23
-rw-r--r--gfx/skia/skia/src/core/SkChunkAlloc.cpp235
-rw-r--r--gfx/skia/skia/src/core/SkClipStack.cpp990
-rw-r--r--gfx/skia/skia/src/core/SkColor.cpp183
-rw-r--r--gfx/skia/skia/src/core/SkColorFilter.cpp195
-rw-r--r--gfx/skia/skia/src/core/SkColorFilterShader.cpp140
-rw-r--r--gfx/skia/skia/src/core/SkColorFilterShader.h59
-rw-r--r--gfx/skia/skia/src/core/SkColorMatrixFilterRowMajor255.cpp438
-rw-r--r--gfx/skia/skia/src/core/SkColorMatrixFilterRowMajor255.h48
-rw-r--r--gfx/skia/skia/src/core/SkColorShader.cpp303
-rw-r--r--gfx/skia/skia/src/core/SkColorShader.h129
-rw-r--r--gfx/skia/skia/src/core/SkColorSpace.cpp472
-rw-r--r--gfx/skia/skia/src/core/SkColorSpacePriv.h12
-rw-r--r--gfx/skia/skia/src/core/SkColorSpaceXform.cpp1440
-rw-r--r--gfx/skia/skia/src/core/SkColorSpaceXform.h102
-rw-r--r--gfx/skia/skia/src/core/SkColorSpace_Base.h240
-rw-r--r--gfx/skia/skia/src/core/SkColorSpace_ICC.cpp1338
-rw-r--r--gfx/skia/skia/src/core/SkColorTable.cpp110
-rw-r--r--gfx/skia/skia/src/core/SkComposeShader.cpp251
-rw-r--r--gfx/skia/skia/src/core/SkComposeShader.h86
-rw-r--r--gfx/skia/skia/src/core/SkConfig8888.cpp369
-rw-r--r--gfx/skia/skia/src/core/SkConfig8888.h48
-rw-r--r--gfx/skia/skia/src/core/SkConvolver.cpp486
-rw-r--r--gfx/skia/skia/src/core/SkConvolver.h207
-rw-r--r--gfx/skia/skia/src/core/SkCoreBlitters.h217
-rw-r--r--gfx/skia/skia/src/core/SkCpu.cpp114
-rw-r--r--gfx/skia/skia/src/core/SkCpu.h96
-rw-r--r--gfx/skia/skia/src/core/SkCubicClipper.cpp153
-rw-r--r--gfx/skia/skia/src/core/SkCubicClipper.h34
-rw-r--r--gfx/skia/skia/src/core/SkData.cpp198
-rw-r--r--gfx/skia/skia/src/core/SkDataTable.cpp181
-rw-r--r--gfx/skia/skia/src/core/SkDebug.cpp14
-rw-r--r--gfx/skia/skia/src/core/SkDeduper.h39
-rw-r--r--gfx/skia/skia/src/core/SkDeque.cpp307
-rw-r--r--gfx/skia/skia/src/core/SkDescriptor.h176
-rw-r--r--gfx/skia/skia/src/core/SkDevice.cpp577
-rw-r--r--gfx/skia/skia/src/core/SkDeviceLooper.cpp127
-rw-r--r--gfx/skia/skia/src/core/SkDeviceLooper.h95
-rw-r--r--gfx/skia/skia/src/core/SkDeviceProfile.cpp77
-rw-r--r--gfx/skia/skia/src/core/SkDeviceProfile.h98
-rw-r--r--gfx/skia/skia/src/core/SkDiscardableMemory.h65
-rwxr-xr-xgfx/skia/skia/src/core/SkDistanceFieldGen.cpp521
-rw-r--r--gfx/skia/skia/src/core/SkDistanceFieldGen.h62
-rw-r--r--gfx/skia/skia/src/core/SkDither.cpp54
-rw-r--r--gfx/skia/skia/src/core/SkDither.h197
-rw-r--r--gfx/skia/skia/src/core/SkDocument.cpp95
-rw-r--r--gfx/skia/skia/src/core/SkDraw.cpp2120
-rw-r--r--gfx/skia/skia/src/core/SkDrawLooper.cpp68
-rw-r--r--gfx/skia/skia/src/core/SkDrawProcs.h66
-rw-r--r--gfx/skia/skia/src/core/SkDrawable.cpp85
-rw-r--r--gfx/skia/skia/src/core/SkEdge.cpp479
-rw-r--r--gfx/skia/skia/src/core/SkEdge.h134
-rw-r--r--gfx/skia/skia/src/core/SkEdgeBuilder.cpp337
-rw-r--r--gfx/skia/skia/src/core/SkEdgeBuilder.h60
-rw-r--r--gfx/skia/skia/src/core/SkEdgeClipper.cpp507
-rw-r--r--gfx/skia/skia/src/core/SkEdgeClipper.h55
-rw-r--r--gfx/skia/skia/src/core/SkEmptyShader.h47
-rw-r--r--gfx/skia/skia/src/core/SkEndian.h194
-rw-r--r--gfx/skia/skia/src/core/SkError.cpp130
-rw-r--r--gfx/skia/skia/src/core/SkErrorInternals.h26
-rw-r--r--gfx/skia/skia/src/core/SkExchange.h25
-rw-r--r--gfx/skia/skia/src/core/SkFDot6.h78
-rw-r--r--gfx/skia/skia/src/core/SkFilterProc.cpp293
-rw-r--r--gfx/skia/skia/src/core/SkFilterProc.h134
-rw-r--r--gfx/skia/skia/src/core/SkFindAndPlaceGlyph.h736
-rw-r--r--gfx/skia/skia/src/core/SkFlattenable.cpp125
-rw-r--r--gfx/skia/skia/src/core/SkFlattenableSerialization.cpp27
-rw-r--r--gfx/skia/skia/src/core/SkFont.cpp151
-rw-r--r--gfx/skia/skia/src/core/SkFontDescriptor.cpp149
-rw-r--r--gfx/skia/skia/src/core/SkFontDescriptor.h83
-rw-r--r--gfx/skia/skia/src/core/SkFontLCDConfig.cpp27
-rw-r--r--gfx/skia/skia/src/core/SkFontMgr.cpp295
-rw-r--r--gfx/skia/skia/src/core/SkFontStream.cpp210
-rw-r--r--gfx/skia/skia/src/core/SkFontStream.h49
-rw-r--r--gfx/skia/skia/src/core/SkFontStyle.cpp32
-rw-r--r--gfx/skia/skia/src/core/SkForceCPlusPlusLinking.cpp20
-rw-r--r--gfx/skia/skia/src/core/SkFuzzLogging.h23
-rw-r--r--gfx/skia/skia/src/core/SkGeometry.cpp1421
-rw-r--r--gfx/skia/skia/src/core/SkGeometry.h409
-rw-r--r--gfx/skia/skia/src/core/SkGlobalInitialization_core.cpp64
-rw-r--r--gfx/skia/skia/src/core/SkGlyph.h204
-rw-r--r--gfx/skia/skia/src/core/SkGlyphCache.cpp859
-rw-r--r--gfx/skia/skia/src/core/SkGlyphCache.h312
-rw-r--r--gfx/skia/skia/src/core/SkGlyphCache_Globals.h87
-rw-r--r--gfx/skia/skia/src/core/SkGpuBlurUtils.cpp383
-rw-r--r--gfx/skia/skia/src/core/SkGpuBlurUtils.h46
-rw-r--r--gfx/skia/skia/src/core/SkGraphics.cpp112
-rw-r--r--gfx/skia/skia/src/core/SkHalf.cpp97
-rw-r--r--gfx/skia/skia/src/core/SkHalf.h88
-rw-r--r--gfx/skia/skia/src/core/SkImageCacherator.cpp356
-rw-r--r--gfx/skia/skia/src/core/SkImageCacherator.h105
-rw-r--r--gfx/skia/skia/src/core/SkImageFilter.cpp471
-rw-r--r--gfx/skia/skia/src/core/SkImageFilterCache.cpp134
-rw-r--r--gfx/skia/skia/src/core/SkImageFilterCache.h64
-rw-r--r--gfx/skia/skia/src/core/SkImageGenerator.cpp219
-rw-r--r--gfx/skia/skia/src/core/SkImageGeneratorPriv.h40
-rw-r--r--gfx/skia/skia/src/core/SkImageInfo.cpp137
-rw-r--r--gfx/skia/skia/src/core/SkImagePriv.h103
-rw-r--r--gfx/skia/skia/src/core/SkLatticeIter.cpp288
-rw-r--r--gfx/skia/skia/src/core/SkLatticeIter.h62
-rw-r--r--gfx/skia/skia/src/core/SkLightingShader.cpp516
-rw-r--r--gfx/skia/skia/src/core/SkLightingShader.h39
-rw-r--r--gfx/skia/skia/src/core/SkLights.cpp88
-rw-r--r--gfx/skia/skia/src/core/SkLineClipper.cpp261
-rw-r--r--gfx/skia/skia/src/core/SkLineClipper.h45
-rw-r--r--gfx/skia/skia/src/core/SkLinearBitmapPipeline.cpp743
-rw-r--r--gfx/skia/skia/src/core/SkLinearBitmapPipeline.h181
-rw-r--r--gfx/skia/skia/src/core/SkLinearBitmapPipeline_core.h250
-rw-r--r--gfx/skia/skia/src/core/SkLinearBitmapPipeline_matrix.h118
-rw-r--r--gfx/skia/skia/src/core/SkLinearBitmapPipeline_sample.h1036
-rw-r--r--gfx/skia/skia/src/core/SkLinearBitmapPipeline_tile.h423
-rw-r--r--gfx/skia/skia/src/core/SkLiteDL.cpp815
-rw-r--r--gfx/skia/skia/src/core/SkLiteDL.h109
-rw-r--r--gfx/skia/skia/src/core/SkLiteRecorder.cpp209
-rw-r--r--gfx/skia/skia/src/core/SkLiteRecorder.h96
-rw-r--r--gfx/skia/skia/src/core/SkLocalMatrixImageFilter.cpp63
-rw-r--r--gfx/skia/skia/src/core/SkLocalMatrixImageFilter.h44
-rw-r--r--gfx/skia/skia/src/core/SkLocalMatrixShader.cpp83
-rw-r--r--gfx/skia/skia/src/core/SkLocalMatrixShader.h66
-rw-r--r--gfx/skia/skia/src/core/SkMD5.cpp258
-rw-r--r--gfx/skia/skia/src/core/SkMD5.h41
-rw-r--r--gfx/skia/skia/src/core/SkMSAN.h40
-rw-r--r--gfx/skia/skia/src/core/SkMakeUnique.h23
-rw-r--r--gfx/skia/skia/src/core/SkMallocPixelRef.cpp222
-rw-r--r--gfx/skia/skia/src/core/SkMask.cpp86
-rw-r--r--gfx/skia/skia/src/core/SkMaskCache.cpp190
-rw-r--r--gfx/skia/skia/src/core/SkMaskCache.h44
-rw-r--r--gfx/skia/skia/src/core/SkMaskFilter.cpp359
-rw-r--r--gfx/skia/skia/src/core/SkMaskGamma.cpp124
-rw-r--r--gfx/skia/skia/src/core/SkMaskGamma.h230
-rw-r--r--gfx/skia/skia/src/core/SkMath.cpp86
-rw-r--r--gfx/skia/skia/src/core/SkMathPriv.h161
-rw-r--r--gfx/skia/skia/src/core/SkMatrix.cpp1908
-rw-r--r--gfx/skia/skia/src/core/SkMatrix44.cpp1018
-rw-r--r--gfx/skia/skia/src/core/SkMatrixImageFilter.cpp148
-rw-r--r--gfx/skia/skia/src/core/SkMatrixImageFilter.h61
-rw-r--r--gfx/skia/skia/src/core/SkMatrixPriv.h102
-rw-r--r--gfx/skia/skia/src/core/SkMatrixUtils.h37
-rw-r--r--gfx/skia/skia/src/core/SkMessageBus.h110
-rw-r--r--gfx/skia/skia/src/core/SkMetaData.cpp335
-rw-r--r--gfx/skia/skia/src/core/SkMiniRecorder.cpp126
-rw-r--r--gfx/skia/skia/src/core/SkMipMap.cpp636
-rw-r--r--gfx/skia/skia/src/core/SkMipMap.h82
-rw-r--r--gfx/skia/skia/src/core/SkModeColorFilter.cpp192
-rw-r--r--gfx/skia/skia/src/core/SkModeColorFilter.h66
-rw-r--r--gfx/skia/skia/src/core/SkMultiPictureDraw.cpp110
-rw-r--r--gfx/skia/skia/src/core/SkNextID.h21
-rw-r--r--gfx/skia/skia/src/core/SkNormalBevelSource.cpp310
-rw-r--r--gfx/skia/skia/src/core/SkNormalBevelSource.h57
-rw-r--r--gfx/skia/skia/src/core/SkNormalFlatSource.cpp107
-rw-r--r--gfx/skia/skia/src/core/SkNormalFlatSource.h48
-rw-r--r--gfx/skia/skia/src/core/SkNormalMapSource.cpp267
-rw-r--r--gfx/skia/skia/src/core/SkNormalMapSource.h62
-rw-r--r--gfx/skia/skia/src/core/SkNormalSource.cpp24
-rw-r--r--gfx/skia/skia/src/core/SkNormalSource.h127
-rw-r--r--gfx/skia/skia/src/core/SkNormalSourcePriv.h57
-rw-r--r--gfx/skia/skia/src/core/SkNx.h365
-rw-r--r--gfx/skia/skia/src/core/SkOpts.cpp223
-rw-r--r--gfx/skia/skia/src/core/SkOpts.h80
-rw-r--r--gfx/skia/skia/src/core/SkOrderedReadBuffer.h9
-rw-r--r--gfx/skia/skia/src/core/SkPM4f.h74
-rw-r--r--gfx/skia/skia/src/core/SkPM4fPriv.h74
-rw-r--r--gfx/skia/skia/src/core/SkPaint.cpp2406
-rw-r--r--gfx/skia/skia/src/core/SkPaintDefaults.h35
-rw-r--r--gfx/skia/skia/src/core/SkPaintPriv.cpp55
-rw-r--r--gfx/skia/skia/src/core/SkPaintPriv.h50
-rw-r--r--gfx/skia/skia/src/core/SkPath.cpp3388
-rw-r--r--gfx/skia/skia/src/core/SkPathEffect.cpp108
-rw-r--r--gfx/skia/skia/src/core/SkPathMeasure.cpp710
-rw-r--r--gfx/skia/skia/src/core/SkPathMeasurePriv.h29
-rw-r--r--gfx/skia/skia/src/core/SkPathPriv.h126
-rw-r--r--gfx/skia/skia/src/core/SkPathRef.cpp770
-rw-r--r--gfx/skia/skia/src/core/SkPerspIter.h48
-rw-r--r--gfx/skia/skia/src/core/SkPicture.cpp280
-rw-r--r--gfx/skia/skia/src/core/SkPictureAnalyzer.cpp65
-rw-r--r--gfx/skia/skia/src/core/SkPictureCommon.h140
-rw-r--r--gfx/skia/skia/src/core/SkPictureContentInfo.cpp175
-rw-r--r--gfx/skia/skia/src/core/SkPictureContentInfo.h89
-rw-r--r--gfx/skia/skia/src/core/SkPictureData.cpp658
-rw-r--r--gfx/skia/skia/src/core/SkPictureData.h206
-rw-r--r--gfx/skia/skia/src/core/SkPictureFlat.cpp58
-rw-r--r--gfx/skia/skia/src/core/SkPictureFlat.h184
-rw-r--r--gfx/skia/skia/src/core/SkPictureImageGenerator.cpp159
-rw-r--r--gfx/skia/skia/src/core/SkPicturePlayback.cpp682
-rw-r--r--gfx/skia/skia/src/core/SkPicturePlayback.h65
-rw-r--r--gfx/skia/skia/src/core/SkPictureRecord.cpp996
-rw-r--r--gfx/skia/skia/src/core/SkPictureRecord.h285
-rw-r--r--gfx/skia/skia/src/core/SkPictureRecorder.cpp147
-rw-r--r--gfx/skia/skia/src/core/SkPictureShader.cpp335
-rw-r--r--gfx/skia/skia/src/core/SkPictureShader.h76
-rw-r--r--gfx/skia/skia/src/core/SkPipe.h92
-rw-r--r--gfx/skia/skia/src/core/SkPixelRef.cpp348
-rw-r--r--gfx/skia/skia/src/core/SkPixmap.cpp276
-rw-r--r--gfx/skia/skia/src/core/SkPoint.cpp264
-rw-r--r--gfx/skia/skia/src/core/SkPoint3.cpp80
-rw-r--r--gfx/skia/skia/src/core/SkPtrRecorder.cpp73
-rw-r--r--gfx/skia/skia/src/core/SkPtrRecorder.h171
-rw-r--r--gfx/skia/skia/src/core/SkQuadClipper.cpp114
-rw-r--r--gfx/skia/skia/src/core/SkQuadClipper.h69
-rw-r--r--gfx/skia/skia/src/core/SkRRect.cpp585
-rw-r--r--gfx/skia/skia/src/core/SkRTree.cpp188
-rw-r--r--gfx/skia/skia/src/core/SkRTree.h97
-rw-r--r--gfx/skia/skia/src/core/SkRWBuffer.cpp360
-rw-r--r--gfx/skia/skia/src/core/SkRadialShadowMapShader.cpp431
-rw-r--r--gfx/skia/skia/src/core/SkRadialShadowMapShader.h31
-rw-r--r--gfx/skia/skia/src/core/SkRasterClip.cpp483
-rw-r--r--gfx/skia/skia/src/core/SkRasterClip.h195
-rw-r--r--gfx/skia/skia/src/core/SkRasterPipeline.cpp59
-rw-r--r--gfx/skia/skia/src/core/SkRasterPipeline.h157
-rw-r--r--gfx/skia/skia/src/core/SkRasterPipelineBlitter.cpp215
-rw-r--r--gfx/skia/skia/src/core/SkRasterizer.cpp46
-rw-r--r--gfx/skia/skia/src/core/SkReadBuffer.cpp399
-rw-r--r--gfx/skia/skia/src/core/SkReadBuffer.h275
-rw-r--r--gfx/skia/skia/src/core/SkReader32.h168
-rw-r--r--gfx/skia/skia/src/core/SkRecord.cpp42
-rw-r--r--gfx/skia/skia/src/core/SkRecord.h198
-rw-r--r--gfx/skia/skia/src/core/SkRecordDraw.cpp633
-rw-r--r--gfx/skia/skia/src/core/SkRecordDraw.h82
-rw-r--r--gfx/skia/skia/src/core/SkRecordOpts.cpp309
-rw-r--r--gfx/skia/skia/src/core/SkRecordOpts.h30
-rw-r--r--gfx/skia/skia/src/core/SkRecordPattern.h175
-rw-r--r--gfx/skia/skia/src/core/SkRecordedDrawable.cpp94
-rw-r--r--gfx/skia/skia/src/core/SkRecordedDrawable.h41
-rw-r--r--gfx/skia/skia/src/core/SkRecorder.cpp431
-rw-r--r--gfx/skia/skia/src/core/SkRecorder.h181
-rw-r--r--gfx/skia/skia/src/core/SkRecords.cpp23
-rw-r--r--gfx/skia/skia/src/core/SkRect.cpp176
-rw-r--r--gfx/skia/skia/src/core/SkRefDict.cpp88
-rw-r--r--gfx/skia/skia/src/core/SkRefDict.h53
-rw-r--r--gfx/skia/skia/src/core/SkRegion.cpp1479
-rw-r--r--gfx/skia/skia/src/core/SkRegionPriv.h235
-rw-r--r--gfx/skia/skia/src/core/SkRegion_path.cpp540
-rw-r--r--gfx/skia/skia/src/core/SkResourceCache.cpp699
-rw-r--r--gfx/skia/skia/src/core/SkResourceCache.h290
-rw-r--r--gfx/skia/skia/src/core/SkSRGB.cpp75
-rw-r--r--gfx/skia/skia/src/core/SkSRGB.h84
-rw-r--r--gfx/skia/skia/src/core/SkScalar.cpp35
-rw-r--r--gfx/skia/skia/src/core/SkScaleToSides.h68
-rw-r--r--gfx/skia/skia/src/core/SkScalerContext.cpp871
-rw-r--r--gfx/skia/skia/src/core/SkScalerContext.h390
-rw-r--r--gfx/skia/skia/src/core/SkScan.cpp108
-rw-r--r--gfx/skia/skia/src/core/SkScan.h125
-rw-r--r--gfx/skia/skia/src/core/SkScanPriv.h39
-rw-r--r--gfx/skia/skia/src/core/SkScan_AntiPath.cpp767
-rw-r--r--gfx/skia/skia/src/core/SkScan_Antihair.cpp1006
-rw-r--r--gfx/skia/skia/src/core/SkScan_Hairline.cpp705
-rw-r--r--gfx/skia/skia/src/core/SkScan_Path.cpp808
-rw-r--r--gfx/skia/skia/src/core/SkSemaphore.cpp73
-rw-r--r--gfx/skia/skia/src/core/SkShader.cpp303
-rw-r--r--gfx/skia/skia/src/core/SkShadowShader.cpp957
-rw-r--r--gfx/skia/skia/src/core/SkShadowShader.h40
-rw-r--r--gfx/skia/skia/src/core/SkSharedMutex.cpp354
-rw-r--r--gfx/skia/skia/src/core/SkSharedMutex.h82
-rw-r--r--gfx/skia/skia/src/core/SkSinglyLinkedList.h97
-rw-r--r--gfx/skia/skia/src/core/SkSmallAllocator.h145
-rw-r--r--gfx/skia/skia/src/core/SkSpanProcs.cpp95
-rw-r--r--gfx/skia/skia/src/core/SkSpanProcs.h24
-rw-r--r--gfx/skia/skia/src/core/SkSpecialImage.cpp490
-rw-r--r--gfx/skia/skia/src/core/SkSpecialImage.h154
-rw-r--r--gfx/skia/skia/src/core/SkSpecialSurface.cpp176
-rw-r--r--gfx/skia/skia/src/core/SkSpecialSurface.h94
-rw-r--r--gfx/skia/skia/src/core/SkSpinlock.cpp15
-rw-r--r--gfx/skia/skia/src/core/SkSpriteBlitter.h51
-rw-r--r--gfx/skia/skia/src/core/SkSpriteBlitter4f.cpp129
-rw-r--r--gfx/skia/skia/src/core/SkSpriteBlitterTemplate.h76
-rw-r--r--gfx/skia/skia/src/core/SkSpriteBlitter_ARGB32.cpp296
-rw-r--r--gfx/skia/skia/src/core/SkSpriteBlitter_RGB16.cpp373
-rw-r--r--gfx/skia/skia/src/core/SkStream.cpp910
-rw-r--r--gfx/skia/skia/src/core/SkStreamPriv.h36
-rw-r--r--gfx/skia/skia/src/core/SkString.cpp680
-rw-r--r--gfx/skia/skia/src/core/SkStringUtils.cpp63
-rw-r--r--gfx/skia/skia/src/core/SkStringUtils.h41
-rw-r--r--gfx/skia/skia/src/core/SkStroke.cpp1554
-rw-r--r--gfx/skia/skia/src/core/SkStroke.h78
-rw-r--r--gfx/skia/skia/src/core/SkStrokeRec.cpp164
-rw-r--r--gfx/skia/skia/src/core/SkStrokerPriv.cpp250
-rw-r--r--gfx/skia/skia/src/core/SkStrokerPriv.h40
-rw-r--r--gfx/skia/skia/src/core/SkSurfacePriv.h25
-rw-r--r--gfx/skia/skia/src/core/SkSwizzle.cpp14
-rw-r--r--gfx/skia/skia/src/core/SkTDPQueue.h195
-rw-r--r--gfx/skia/skia/src/core/SkTDynamicHash.h289
-rw-r--r--gfx/skia/skia/src/core/SkTInternalLList.h272
-rw-r--r--gfx/skia/skia/src/core/SkTLList.h348
-rwxr-xr-xgfx/skia/skia/src/core/SkTLS.cpp130
-rw-r--r--gfx/skia/skia/src/core/SkTLS.h83
-rw-r--r--gfx/skia/skia/src/core/SkTMultiMap.h167
-rw-r--r--gfx/skia/skia/src/core/SkTSearch.cpp113
-rw-r--r--gfx/skia/skia/src/core/SkTSort.h209
-rw-r--r--gfx/skia/skia/src/core/SkTTopoSort.h112
-rw-r--r--gfx/skia/skia/src/core/SkTaskGroup.cpp210
-rw-r--r--gfx/skia/skia/src/core/SkTaskGroup.h42
-rw-r--r--gfx/skia/skia/src/core/SkTextBlob.cpp779
-rw-r--r--gfx/skia/skia/src/core/SkTextBlobRunIterator.h44
-rw-r--r--gfx/skia/skia/src/core/SkTextFormatParams.h38
-rw-r--r--gfx/skia/skia/src/core/SkTextMapStateProc.h77
-rw-r--r--gfx/skia/skia/src/core/SkTextToPathIter.h103
-rw-r--r--gfx/skia/skia/src/core/SkThreadID.cpp16
-rw-r--r--gfx/skia/skia/src/core/SkTime.cpp82
-rw-r--r--gfx/skia/skia/src/core/SkTraceEvent.h534
-rw-r--r--gfx/skia/skia/src/core/SkTraceEventCommon.h1039
-rw-r--r--gfx/skia/skia/src/core/SkTypeface.cpp358
-rw-r--r--gfx/skia/skia/src/core/SkTypefaceCache.cpp102
-rw-r--r--gfx/skia/skia/src/core/SkTypefaceCache.h76
-rw-r--r--gfx/skia/skia/src/core/SkTypefacePriv.h38
-rw-r--r--gfx/skia/skia/src/core/SkUnPreMultiply.cpp88
-rw-r--r--gfx/skia/skia/src/core/SkUtils.cpp279
-rw-r--r--gfx/skia/skia/src/core/SkUtils.h103
-rw-r--r--gfx/skia/skia/src/core/SkUtilsArm.cpp8
-rw-r--r--gfx/skia/skia/src/core/SkUtilsArm.h21
-rw-r--r--gfx/skia/skia/src/core/SkValidatingReadBuffer.cpp297
-rw-r--r--gfx/skia/skia/src/core/SkValidatingReadBuffer.h86
-rw-r--r--gfx/skia/skia/src/core/SkValidationUtils.h41
-rw-r--r--gfx/skia/skia/src/core/SkVarAlloc.cpp56
-rw-r--r--gfx/skia/skia/src/core/SkVarAlloc.h55
-rw-r--r--gfx/skia/skia/src/core/SkVertState.cpp106
-rw-r--r--gfx/skia/skia/src/core/SkVertState.h58
-rw-r--r--gfx/skia/skia/src/core/SkWriteBuffer.cpp304
-rw-r--r--gfx/skia/skia/src/core/SkWriter32.cpp81
-rw-r--r--gfx/skia/skia/src/core/SkXfermode.cpp1535
-rw-r--r--gfx/skia/skia/src/core/SkXfermode4f.cpp474
-rw-r--r--gfx/skia/skia/src/core/SkXfermodeF16.cpp178
-rw-r--r--gfx/skia/skia/src/core/SkXfermodeInterpretation.cpp44
-rw-r--r--gfx/skia/skia/src/core/SkXfermodeInterpretation.h30
-rw-r--r--gfx/skia/skia/src/core/SkXfermode_proccoeff.h75
-rw-r--r--gfx/skia/skia/src/core/SkYUVPlanesCache.cpp89
-rw-r--r--gfx/skia/skia/src/core/SkYUVPlanesCache.h45
-rw-r--r--gfx/skia/skia/src/effects/GrAlphaThresholdFragmentProcessor.cpp198
-rw-r--r--gfx/skia/skia/src/effects/GrAlphaThresholdFragmentProcessor.h68
-rw-r--r--gfx/skia/skia/src/effects/GrCircleBlurFragmentProcessor.cpp358
-rw-r--r--gfx/skia/skia/src/effects/GrCircleBlurFragmentProcessor.h75
-rw-r--r--gfx/skia/skia/src/effects/Sk1DPathEffect.cpp212
-rw-r--r--gfx/skia/skia/src/effects/Sk2DPathEffect.cpp163
-rw-r--r--gfx/skia/skia/src/effects/SkAlphaThresholdFilter.cpp268
-rw-r--r--gfx/skia/skia/src/effects/SkArcToPathEffect.cpp78
-rw-r--r--gfx/skia/skia/src/effects/SkArithmeticMode.cpp161
-rw-r--r--gfx/skia/skia/src/effects/SkArithmeticModePriv.h47
-rw-r--r--gfx/skia/skia/src/effects/SkArithmeticMode_gpu.cpp296
-rw-r--r--gfx/skia/skia/src/effects/SkArithmeticMode_gpu.h121
-rw-r--r--gfx/skia/skia/src/effects/SkBlurDrawLooper.cpp178
-rw-r--r--gfx/skia/skia/src/effects/SkBlurMask.cpp993
-rw-r--r--gfx/skia/skia/src/effects/SkBlurMask.h84
-rw-r--r--gfx/skia/skia/src/effects/SkBlurMaskFilter.cpp1572
-rw-r--r--gfx/skia/skia/src/effects/SkColorCubeFilter.cpp329
-rw-r--r--gfx/skia/skia/src/effects/SkColorFilterImageFilter.cpp149
-rw-r--r--gfx/skia/skia/src/effects/SkColorMatrix.cpp194
-rw-r--r--gfx/skia/skia/src/effects/SkColorMatrixFilter.cpp37
-rw-r--r--gfx/skia/skia/src/effects/SkComposeImageFilter.cpp92
-rw-r--r--gfx/skia/skia/src/effects/SkCornerPathEffect.cpp155
-rw-r--r--gfx/skia/skia/src/effects/SkDashPathEffect.cpp401
-rw-r--r--gfx/skia/skia/src/effects/SkDiscretePathEffect.cpp150
-rw-r--r--gfx/skia/skia/src/effects/SkDisplacementMapEffect.cpp641
-rw-r--r--gfx/skia/skia/src/effects/SkDropShadowImageFilter.cpp170
-rw-r--r--gfx/skia/skia/src/effects/SkEmbossMask.cpp163
-rw-r--r--gfx/skia/skia/src/effects/SkEmbossMask.h19
-rw-r--r--gfx/skia/skia/src/effects/SkEmbossMaskFilter.cpp153
-rw-r--r--gfx/skia/skia/src/effects/SkEmbossMask_Table.h1037
-rw-r--r--gfx/skia/skia/src/effects/SkGammaColorFilter.cpp56
-rw-r--r--gfx/skia/skia/src/effects/SkGaussianEdgeShader.cpp169
-rw-r--r--gfx/skia/skia/src/effects/SkImageSource.cpp139
-rw-r--r--gfx/skia/skia/src/effects/SkLayerDrawLooper.cpp356
-rw-r--r--gfx/skia/skia/src/effects/SkLayerRasterizer.cpp227
-rw-r--r--gfx/skia/skia/src/effects/SkLightingImageFilter.cpp2183
-rw-r--r--gfx/skia/skia/src/effects/SkLumaColorFilter.cpp117
-rw-r--r--gfx/skia/skia/src/effects/SkMagnifierImageFilter.cpp423
-rw-r--r--gfx/skia/skia/src/effects/SkMatrixConvolutionImageFilter.cpp422
-rwxr-xr-xgfx/skia/skia/src/effects/SkMergeImageFilter.cpp194
-rw-r--r--gfx/skia/skia/src/effects/SkMorphologyImageFilter.cpp642
-rw-r--r--gfx/skia/skia/src/effects/SkOffsetImageFilter.cpp122
-rw-r--r--gfx/skia/skia/src/effects/SkPackBits.cpp107
-rw-r--r--gfx/skia/skia/src/effects/SkPackBits.h45
-rw-r--r--gfx/skia/skia/src/effects/SkPaintFlagsDrawFilter.cpp20
-rw-r--r--gfx/skia/skia/src/effects/SkPaintImageFilter.cpp81
-rw-r--r--gfx/skia/skia/src/effects/SkPerlinNoiseShader.cpp983
-rw-r--r--gfx/skia/skia/src/effects/SkPictureImageFilter.cpp213
-rw-r--r--gfx/skia/skia/src/effects/SkRRectsGaussianEdgeShader.cpp433
-rw-r--r--gfx/skia/skia/src/effects/SkTableColorFilter.cpp604
-rw-r--r--gfx/skia/skia/src/effects/SkTableMaskFilter.cpp145
-rw-r--r--gfx/skia/skia/src/effects/SkTileImageFilter.cpp163
-rw-r--r--gfx/skia/skia/src/effects/SkXfermodeImageFilter.cpp544
-rw-r--r--gfx/skia/skia/src/effects/gradients/Sk4fGradientBase.cpp444
-rw-r--r--gfx/skia/skia/src/effects/gradients/Sk4fGradientBase.h75
-rw-r--r--gfx/skia/skia/src/effects/gradients/Sk4fGradientPriv.h195
-rw-r--r--gfx/skia/skia/src/effects/gradients/Sk4fLinearGradient.cpp488
-rw-r--r--gfx/skia/skia/src/effects/gradients/Sk4fLinearGradient.h51
-rw-r--r--gfx/skia/skia/src/effects/gradients/SkClampRange.cpp158
-rw-r--r--gfx/skia/skia/src/effects/gradients/SkClampRange.h51
-rw-r--r--gfx/skia/skia/src/effects/gradients/SkGradientBitmapCache.cpp152
-rw-r--r--gfx/skia/skia/src/effects/gradients/SkGradientBitmapCache.h48
-rw-r--r--gfx/skia/skia/src/effects/gradients/SkGradientShader.cpp1750
-rw-r--r--gfx/skia/skia/src/effects/gradients/SkGradientShaderPriv.h517
-rw-r--r--gfx/skia/skia/src/effects/gradients/SkLinearGradient.cpp769
-rw-r--r--gfx/skia/skia/src/effects/gradients/SkLinearGradient.h81
-rw-r--r--gfx/skia/skia/src/effects/gradients/SkRadialGradient.cpp386
-rw-r--r--gfx/skia/skia/src/effects/gradients/SkRadialGradient.h49
-rw-r--r--gfx/skia/skia/src/effects/gradients/SkSweepGradient.cpp276
-rw-r--r--gfx/skia/skia/src/effects/gradients/SkSweepGradient.h48
-rw-r--r--gfx/skia/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp398
-rw-r--r--gfx/skia/skia/src/effects/gradients/SkTwoPointConicalGradient.h92
-rw-r--r--gfx/skia/skia/src/effects/gradients/SkTwoPointConicalGradient_gpu.cpp1343
-rw-r--r--gfx/skia/skia/src/effects/gradients/SkTwoPointConicalGradient_gpu.h24
-rw-r--r--gfx/skia/skia/src/fonts/SkFontMgr_indirect.cpp201
-rw-r--r--gfx/skia/skia/src/fonts/SkGScalerContext.cpp257
-rw-r--r--gfx/skia/skia/src/fonts/SkGScalerContext.h49
-rw-r--r--gfx/skia/skia/src/fonts/SkRandomScalerContext.cpp255
-rw-r--r--gfx/skia/skia/src/fonts/SkRandomScalerContext.h55
-rw-r--r--gfx/skia/skia/src/fonts/SkRemotableFontMgr.cpp23
-rw-r--r--gfx/skia/skia/src/fonts/SkTestScalerContext.cpp301
-rw-r--r--gfx/skia/skia/src/fonts/SkTestScalerContext.h114
-rw-r--r--gfx/skia/skia/src/gpu/GrAllocator.h398
-rw-r--r--gfx/skia/skia/src/gpu/GrAppliedClip.h75
-rw-r--r--gfx/skia/skia/src/gpu/GrAuditTrail.cpp298
-rw-r--r--gfx/skia/skia/src/gpu/GrAutoLocaleSetter.h86
-rw-r--r--gfx/skia/skia/src/gpu/GrBatchAtlas.cpp249
-rw-r--r--gfx/skia/skia/src/gpu/GrBatchAtlas.h262
-rw-r--r--gfx/skia/skia/src/gpu/GrBatchFlushState.cpp30
-rw-r--r--gfx/skia/skia/src/gpu/GrBatchFlushState.h210
-rw-r--r--gfx/skia/skia/src/gpu/GrBatchTest.cpp59
-rw-r--r--gfx/skia/skia/src/gpu/GrBatchTest.h39
-rw-r--r--gfx/skia/skia/src/gpu/GrBlend.cpp124
-rw-r--r--gfx/skia/skia/src/gpu/GrBlurUtils.cpp295
-rw-r--r--gfx/skia/skia/src/gpu/GrBlurUtils.h58
-rw-r--r--gfx/skia/skia/src/gpu/GrBuffer.cpp72
-rw-r--r--gfx/skia/skia/src/gpu/GrBufferAllocPool.cpp371
-rw-r--r--gfx/skia/skia/src/gpu/GrBufferAllocPool.h197
-rw-r--r--gfx/skia/skia/src/gpu/GrCaps.cpp300
-rw-r--r--gfx/skia/skia/src/gpu/GrClipStackClip.cpp487
-rw-r--r--gfx/skia/skia/src/gpu/GrClipStackClip.h71
-rw-r--r--gfx/skia/skia/src/gpu/GrColorSpaceXform.cpp78
-rw-r--r--gfx/skia/skia/src/gpu/GrContext.cpp843
-rw-r--r--gfx/skia/skia/src/gpu/GrContextPriv.h59
-rw-r--r--gfx/skia/skia/src/gpu/GrCoordTransform.cpp60
-rw-r--r--gfx/skia/skia/src/gpu/GrDefaultGeoProcFactory.cpp317
-rw-r--r--gfx/skia/skia/src/gpu/GrDefaultGeoProcFactory.h136
-rw-r--r--gfx/skia/skia/src/gpu/GrDrawContext.cpp1435
-rw-r--r--gfx/skia/skia/src/gpu/GrDrawContextPriv.h86
-rw-r--r--gfx/skia/skia/src/gpu/GrDrawTarget.cpp626
-rw-r--r--gfx/skia/skia/src/gpu/GrDrawTarget.h249
-rw-r--r--gfx/skia/skia/src/gpu/GrDrawingManager.cpp251
-rw-r--r--gfx/skia/skia/src/gpu/GrDrawingManager.h114
-rw-r--r--gfx/skia/skia/src/gpu/GrFixedClip.cpp72
-rw-r--r--gfx/skia/skia/src/gpu/GrFixedClip.h55
-rw-r--r--gfx/skia/skia/src/gpu/GrFragmentProcessor.cpp406
-rw-r--r--gfx/skia/skia/src/gpu/GrGeometryProcessor.h96
-rw-r--r--gfx/skia/skia/src/gpu/GrGlyph.h104
-rw-r--r--gfx/skia/skia/src/gpu/GrGpu.cpp505
-rw-r--r--gfx/skia/skia/src/gpu/GrGpu.h611
-rw-r--r--gfx/skia/skia/src/gpu/GrGpuCommandBuffer.cpp49
-rw-r--r--gfx/skia/skia/src/gpu/GrGpuCommandBuffer.h98
-rw-r--r--gfx/skia/skia/src/gpu/GrGpuFactory.cpp37
-rw-r--r--gfx/skia/skia/src/gpu/GrGpuFactory.h24
-rw-r--r--gfx/skia/skia/src/gpu/GrGpuResource.cpp204
-rw-r--r--gfx/skia/skia/src/gpu/GrGpuResourceCacheAccess.h98
-rw-r--r--gfx/skia/skia/src/gpu/GrGpuResourcePriv.h90
-rw-r--r--gfx/skia/skia/src/gpu/GrGpuResourceRef.cpp117
-rw-r--r--gfx/skia/skia/src/gpu/GrImageIDTextureAdjuster.cpp114
-rw-r--r--gfx/skia/skia/src/gpu/GrImageIDTextureAdjuster.h70
-rw-r--r--gfx/skia/skia/src/gpu/GrInvariantOutput.cpp31
-rw-r--r--gfx/skia/skia/src/gpu/GrMemoryPool.cpp185
-rw-r--r--gfx/skia/skia/src/gpu/GrMemoryPool.h101
-rw-r--r--gfx/skia/skia/src/gpu/GrMesh.h179
-rw-r--r--gfx/skia/skia/src/gpu/GrNonAtomicRef.h58
-rw-r--r--gfx/skia/skia/src/gpu/GrOvalRenderer.cpp2171
-rw-r--r--gfx/skia/skia/src/gpu/GrOvalRenderer.h47
-rw-r--r--gfx/skia/skia/src/gpu/GrPLSGeometryProcessor.h35
-rw-r--r--gfx/skia/skia/src/gpu/GrPaint.cpp75
-rw-r--r--gfx/skia/skia/src/gpu/GrPath.cpp55
-rw-r--r--gfx/skia/skia/src/gpu/GrPath.h57
-rw-r--r--gfx/skia/skia/src/gpu/GrPathProcessor.cpp141
-rw-r--r--gfx/skia/skia/src/gpu/GrPathProcessor.h57
-rw-r--r--gfx/skia/skia/src/gpu/GrPathRange.cpp55
-rw-r--r--gfx/skia/skia/src/gpu/GrPathRange.h153
-rw-r--r--gfx/skia/skia/src/gpu/GrPathRenderer.cpp23
-rw-r--r--gfx/skia/skia/src/gpu/GrPathRenderer.h294
-rw-r--r--gfx/skia/skia/src/gpu/GrPathRendererChain.cpp107
-rw-r--r--gfx/skia/skia/src/gpu/GrPathRendererChain.h64
-rw-r--r--gfx/skia/skia/src/gpu/GrPathRendering.cpp116
-rw-r--r--gfx/skia/skia/src/gpu/GrPathRendering.h215
-rw-r--r--gfx/skia/skia/src/gpu/GrPathRenderingDrawContext.cpp77
-rw-r--r--gfx/skia/skia/src/gpu/GrPathRenderingDrawContext.h42
-rw-r--r--gfx/skia/skia/src/gpu/GrPathUtils.cpp826
-rw-r--r--gfx/skia/skia/src/gpu/GrPathUtils.h180
-rw-r--r--gfx/skia/skia/src/gpu/GrPendingProgramElement.h56
-rw-r--r--gfx/skia/skia/src/gpu/GrPipeline.cpp248
-rw-r--r--gfx/skia/skia/src/gpu/GrPipeline.h239
-rw-r--r--gfx/skia/skia/src/gpu/GrPipelineBuilder.cpp83
-rw-r--r--gfx/skia/skia/src/gpu/GrPipelineBuilder.h317
-rw-r--r--gfx/skia/skia/src/gpu/GrPrimitiveProcessor.cpp64
-rw-r--r--gfx/skia/skia/src/gpu/GrPrimitiveProcessor.h235
-rw-r--r--gfx/skia/skia/src/gpu/GrProcOptInfo.cpp60
-rw-r--r--gfx/skia/skia/src/gpu/GrProcOptInfo.h85
-rw-r--r--gfx/skia/skia/src/gpu/GrProcessor.cpp149
-rw-r--r--gfx/skia/skia/src/gpu/GrProcessorUnitTest.cpp23
-rw-r--r--gfx/skia/skia/src/gpu/GrProgramDesc.cpp187
-rw-r--r--gfx/skia/skia/src/gpu/GrProgramDesc.h168
-rw-r--r--gfx/skia/skia/src/gpu/GrProgramElement.cpp37
-rw-r--r--gfx/skia/skia/src/gpu/GrQuad.h65
-rw-r--r--gfx/skia/skia/src/gpu/GrRect.h57
-rw-r--r--gfx/skia/skia/src/gpu/GrRectanizer.h44
-rw-r--r--gfx/skia/skia/src/gpu/GrRectanizer_pow2.cpp59
-rw-r--r--gfx/skia/skia/src/gpu/GrRectanizer_pow2.h80
-rw-r--r--gfx/skia/skia/src/gpu/GrRectanizer_skyline.cpp121
-rw-r--r--gfx/skia/skia/src/gpu/GrRectanizer_skyline.h62
-rw-r--r--gfx/skia/skia/src/gpu/GrReducedClip.cpp849
-rw-r--r--gfx/skia/skia/src/gpu/GrReducedClip.h91
-rw-r--r--gfx/skia/skia/src/gpu/GrRenderTarget.cpp134
-rw-r--r--gfx/skia/skia/src/gpu/GrRenderTargetPriv.h63
-rw-r--r--gfx/skia/skia/src/gpu/GrRenderTargetProxy.cpp98
-rw-r--r--gfx/skia/skia/src/gpu/GrResourceCache.cpp763
-rw-r--r--gfx/skia/skia/src/gpu/GrResourceCache.h412
-rw-r--r--gfx/skia/skia/src/gpu/GrResourceHandle.h36
-rw-r--r--gfx/skia/skia/src/gpu/GrResourceProvider.cpp217
-rw-r--r--gfx/skia/skia/src/gpu/GrResourceProvider.h182
-rw-r--r--gfx/skia/skia/src/gpu/GrSWMaskHelper.cpp201
-rw-r--r--gfx/skia/skia/src/gpu/GrSWMaskHelper.h111
-rw-r--r--gfx/skia/skia/src/gpu/GrScissorState.h40
-rw-r--r--gfx/skia/skia/src/gpu/GrShape.cpp540
-rw-r--r--gfx/skia/skia/src/gpu/GrShape.h463
-rw-r--r--gfx/skia/skia/src/gpu/GrSoftwarePathRenderer.cpp228
-rw-r--r--gfx/skia/skia/src/gpu/GrSoftwarePathRenderer.h55
-rw-r--r--gfx/skia/skia/src/gpu/GrStencilAttachment.cpp18
-rw-r--r--gfx/skia/skia/src/gpu/GrStencilAttachment.h79
-rw-r--r--gfx/skia/skia/src/gpu/GrStencilSettings.cpp489
-rw-r--r--gfx/skia/skia/src/gpu/GrStencilSettings.h121
-rw-r--r--gfx/skia/skia/src/gpu/GrStyle.cpp199
-rw-r--r--gfx/skia/skia/src/gpu/GrStyle.h213
-rw-r--r--gfx/skia/skia/src/gpu/GrSurface.cpp197
-rw-r--r--gfx/skia/skia/src/gpu/GrSurfacePriv.h66
-rw-r--r--gfx/skia/skia/src/gpu/GrSurfaceProxy.cpp9
-rw-r--r--gfx/skia/skia/src/gpu/GrSwizzle.h152
-rw-r--r--gfx/skia/skia/src/gpu/GrTRecorder.h390
-rw-r--r--gfx/skia/skia/src/gpu/GrTessellator.cpp1813
-rw-r--r--gfx/skia/skia/src/gpu/GrTessellator.h53
-rw-r--r--gfx/skia/skia/src/gpu/GrTestUtils.cpp326
-rw-r--r--gfx/skia/skia/src/gpu/GrTexture.cpp128
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureAccess.cpp42
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureParamsAdjuster.cpp522
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureParamsAdjuster.h226
-rw-r--r--gfx/skia/skia/src/gpu/GrTexturePriv.h79
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureProvider.cpp203
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureProxy.cpp51
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureToYUVPlanes.cpp233
-rw-r--r--gfx/skia/skia/src/gpu/GrTextureToYUVPlanes.h19
-rw-r--r--gfx/skia/skia/src/gpu/GrTraceMarker.cpp102
-rw-r--r--gfx/skia/skia/src/gpu/GrTraceMarker.h98
-rw-r--r--gfx/skia/skia/src/gpu/GrTracing.h120
-rw-r--r--gfx/skia/skia/src/gpu/GrUserStencilSettings.h239
-rw-r--r--gfx/skia/skia/src/gpu/GrWindowRectangles.h101
-rw-r--r--gfx/skia/skia/src/gpu/GrWindowRectsState.h60
-rw-r--r--gfx/skia/skia/src/gpu/GrXferProcessor.cpp222
-rw-r--r--gfx/skia/skia/src/gpu/GrYUVProvider.cpp152
-rw-r--r--gfx/skia/skia/src/gpu/GrYUVProvider.h67
-rw-r--r--gfx/skia/skia/src/gpu/SkGpuDevice.cpp1823
-rw-r--r--gfx/skia/skia/src/gpu/SkGpuDevice.h254
-rw-r--r--gfx/skia/skia/src/gpu/SkGpuDevice_drawTexture.cpp251
-rw-r--r--gfx/skia/skia/src/gpu/SkGr.cpp813
-rw-r--r--gfx/skia/skia/src/gpu/SkGrPriv.h162
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrAAConvexPathRenderer.cpp1019
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrAAConvexPathRenderer.h23
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrAAConvexTessellator.cpp1103
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrAAConvexTessellator.h291
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp621
-rwxr-xr-xgfx/skia/skia/src/gpu/batches/GrAADistanceFieldPathRenderer.h103
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrAAFillRectBatch.cpp408
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrAAFillRectBatch.h41
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrAAHairLinePathRenderer.cpp993
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrAAHairLinePathRenderer.h30
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp393
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrAALinearizingConvexPathRenderer.h23
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrAAStrokeRectBatch.cpp622
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrAAStrokeRectBatch.h34
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrAnalyticRectBatch.cpp409
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrAnalyticRectBatch.h36
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrAtlasTextBatch.cpp313
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrAtlasTextBatch.h222
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrBatch.cpp63
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrBatch.h235
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrClearBatch.h108
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrClearStencilClipBatch.h64
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrCopySurfaceBatch.cpp74
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrCopySurfaceBatch.h86
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrDashLinePathRenderer.cpp56
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrDashLinePathRenderer.h28
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrDefaultPathRenderer.cpp638
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrDefaultPathRenderer.h47
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrDiscardBatch.h55
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrDrawAtlasBatch.cpp262
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrDrawAtlasBatch.h67
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrDrawBatch.cpp35
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrDrawBatch.h148
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrDrawPathBatch.cpp221
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrDrawPathBatch.h201
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrDrawVerticesBatch.cpp326
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrDrawVerticesBatch.h71
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrMSAAPathRenderer.cpp716
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrMSAAPathRenderer.h35
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrNinePatch.cpp188
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrNinePatch.h26
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrNonAAFillRectBatch.cpp224
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrNonAAFillRectBatch.h33
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp270
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrNonAAStrokeRectBatch.cpp206
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrNonAAStrokeRectBatch.h30
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrPLSPathRenderer.cpp959
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrPLSPathRenderer.h48
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrPathStencilSettings.h160
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrRectBatchFactory.cpp35
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrRectBatchFactory.h85
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrRegionBatch.cpp167
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrRegionBatch.h25
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrStencilAndCoverPathRenderer.cpp175
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrStencilAndCoverPathRenderer.h44
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrStencilPathBatch.h85
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrTessellatingPathRenderer.cpp393
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrTessellatingPathRenderer.h33
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrTestBatch.h68
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrVertexBatch.cpp112
-rw-r--r--gfx/skia/skia/src/gpu/batches/GrVertexBatch.h91
-rw-r--r--gfx/skia/skia/src/gpu/effects/Gr1DKernelEffect.h61
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrBezierEffect.cpp712
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrBezierEffect.h285
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrBicubicEffect.cpp243
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrBicubicEffect.h123
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrBitmapTextGeoProc.cpp191
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrBitmapTextGeoProc.h67
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrConfigConversionEffect.cpp285
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrConfigConversionEffect.h75
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrConstColorProcessor.cpp137
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrConvexPolyEffect.cpp377
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrConvexPolyEffect.h93
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrConvolutionEffect.cpp237
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrConvolutionEffect.h102
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrCoverageSetOpXP.cpp344
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrCustomXfermode.cpp400
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrDashingEffect.cpp1281
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrDashingEffect.h36
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrDisableColorXP.cpp109
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrDisableColorXP.h48
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrDistanceFieldGeoProc.cpp850
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrDistanceFieldGeoProc.h237
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrDitherEffect.cpp98
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrDitherEffect.h24
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrGammaEffect.cpp148
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrGammaEffect.h47
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrMatrixConvolutionEffect.cpp268
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrMatrixConvolutionEffect.h93
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrOvalEffect.cpp411
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrOvalEffect.h25
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrPorterDuffXferProcessor.cpp923
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrRRectEffect.cpp779
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrRRectEffect.h27
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrSimpleTextureEffect.cpp90
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrSimpleTextureEffect.h82
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrSingleTextureEffect.cpp43
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrSingleTextureEffect.h69
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrTextureDomain.cpp393
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrTextureDomain.h246
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrTextureStripAtlas.cpp356
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrXfermodeFragmentProcessor.cpp312
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrYUVEffect.cpp407
-rw-r--r--gfx/skia/skia/src/gpu/effects/GrYUVEffect.h50
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLAssembleInterface.cpp938
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLBuffer.cpp288
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLBuffer.h67
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLCaps.cpp1972
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLCaps.h498
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLContext.cpp76
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLContext.h85
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLCreateNativeInterface_none.cpp12
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLCreateNullInterface.cpp822
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLDefaultInterface_native.cpp12
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLDefaultInterface_none.cpp12
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLDefines.h989
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLExtensions.cpp157
-rwxr-xr-xgfx/skia/skia/src/gpu/gl/GrGLGLSL.cpp52
-rwxr-xr-xgfx/skia/skia/src/gpu/gl/GrGLGLSL.h25
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLGpu.cpp4712
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLGpu.h651
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLGpuCommandBuffer.h58
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLGpuProgramCache.cpp222
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLIRect.h96
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLInterface.cpp825
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLPath.cpp345
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLPath.h56
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLPathRange.cpp110
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLPathRange.h66
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLPathRendering.cpp339
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLPathRendering.h132
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLProgram.cpp174
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLProgram.h152
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLProgramDataManager.cpp325
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLProgramDataManager.h117
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLRenderTarget.cpp239
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLRenderTarget.h116
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLSampler.h45
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLStencilAttachment.cpp44
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLStencilAttachment.h67
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLTestInterface.cpp325
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLTestInterface.h341
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLTexture.cpp103
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLTexture.h86
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLTextureRenderTarget.cpp52
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLTextureRenderTarget.h80
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLUniformHandler.cpp120
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLUniformHandler.h75
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLUtil.cpp365
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLUtil.h213
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLVaryingHandler.cpp37
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLVaryingHandler.h36
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLVertexArray.cpp133
-rw-r--r--gfx/skia/skia/src/gpu/gl/GrGLVertexArray.h118
-rw-r--r--gfx/skia/skia/src/gpu/gl/android/GrGLCreateNativeInterface_android.cpp237
-rw-r--r--gfx/skia/skia/src/gpu/gl/builders/GrGLProgramBuilder.cpp240
-rw-r--r--gfx/skia/skia/src/gpu/gl/builders/GrGLProgramBuilder.h72
-rw-r--r--gfx/skia/skia/src/gpu/gl/builders/GrGLSLPrettyPrint.cpp204
-rw-r--r--gfx/skia/skia/src/gpu/gl/builders/GrGLShaderStringBuilder.cpp105
-rw-r--r--gfx/skia/skia/src/gpu/gl/builders/GrGLShaderStringBuilder.h24
-rw-r--r--gfx/skia/skia/src/gpu/gl/egl/GrGLCreateNativeInterface_egl.cpp29
-rw-r--r--gfx/skia/skia/src/gpu/gl/glfw/GrGLCreateNativeInterface_glfw.cpp27
-rw-r--r--gfx/skia/skia/src/gpu/gl/glx/GrGLCreateNativeInterface_glx.cpp33
-rw-r--r--gfx/skia/skia/src/gpu/gl/iOS/GrGLCreateNativeInterface_iOS.cpp55
-rw-r--r--gfx/skia/skia/src/gpu/gl/mac/GrGLCreateNativeInterface_mac.cpp61
-rw-r--r--gfx/skia/skia/src/gpu/gl/mesa/osmesa_wrapper.h15
-rw-r--r--gfx/skia/skia/src/gpu/gl/win/GrGLCreateNativeInterface_win.cpp90
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSL.cpp58
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSL.h390
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLBlend.cpp481
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLBlend.h28
-rwxr-xr-xgfx/skia/skia/src/gpu/glsl/GrGLSLCaps.cpp154
-rwxr-xr-xgfx/skia/skia/src/gpu/glsl/GrGLSLCaps.h238
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLColorSpaceXformHelper.h40
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentProcessor.cpp80
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentProcessor.h198
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentShaderBuilder.cpp397
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentShaderBuilder.h255
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryProcessor.cpp119
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryProcessor.h89
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryShaderBuilder.cpp19
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryShaderBuilder.h27
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLPLSPathRendering.h12
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLPrimitiveProcessor.cpp57
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLPrimitiveProcessor.h135
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLProgramBuilder.cpp427
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLProgramBuilder.h177
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLProgramDataManager.cpp33
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLProgramDataManager.h67
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLSampler.h45
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLShaderBuilder.cpp249
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLShaderBuilder.h263
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLShaderVar.h250
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLUniformHandler.h94
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLUtil.cpp52
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLUtil.h19
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLVarying.cpp159
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLVarying.h186
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLVertexShaderBuilder.cpp57
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLVertexShaderBuilder.h34
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLXferProcessor.cpp122
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSLXferProcessor.h117
-rw-r--r--gfx/skia/skia/src/gpu/glsl/GrGLSL_impl.h175
-rw-r--r--gfx/skia/skia/src/gpu/instanced/GLInstancedRendering.cpp324
-rw-r--r--gfx/skia/skia/src/gpu/instanced/GLInstancedRendering.h66
-rw-r--r--gfx/skia/skia/src/gpu/instanced/InstanceProcessor.cpp2123
-rw-r--r--gfx/skia/skia/src/gpu/instanced/InstanceProcessor.h70
-rw-r--r--gfx/skia/skia/src/gpu/instanced/InstancedRendering.cpp496
-rw-r--r--gfx/skia/skia/src/gpu/instanced/InstancedRendering.h185
-rw-r--r--gfx/skia/skia/src/gpu/instanced/InstancedRenderingTypes.h192
-rw-r--r--gfx/skia/skia/src/gpu/text/GrAtlasTextBlob.cpp568
-rw-r--r--gfx/skia/skia/src/gpu/text/GrAtlasTextBlob.h567
-rw-r--r--gfx/skia/skia/src/gpu/text/GrAtlasTextBlob_regenInBatch.cpp314
-rw-r--r--gfx/skia/skia/src/gpu/text/GrAtlasTextContext.cpp436
-rw-r--r--gfx/skia/skia/src/gpu/text/GrAtlasTextContext.h98
-rw-r--r--gfx/skia/skia/src/gpu/text/GrBatchFontCache.cpp390
-rw-r--r--gfx/skia/skia/src/gpu/text/GrBatchFontCache.h241
-rw-r--r--gfx/skia/skia/src/gpu/text/GrDistanceFieldAdjustTable.cpp98
-rw-r--r--gfx/skia/skia/src/gpu/text/GrDistanceFieldAdjustTable.h35
-rw-r--r--gfx/skia/skia/src/gpu/text/GrStencilAndCoverTextContext.cpp739
-rw-r--r--gfx/skia/skia/src/gpu/text/GrStencilAndCoverTextContext.h160
-rw-r--r--gfx/skia/skia/src/gpu/text/GrTextBlobCache.cpp26
-rw-r--r--gfx/skia/skia/src/gpu/text/GrTextBlobCache.h144
-rw-r--r--gfx/skia/skia/src/gpu/text/GrTextUtils.cpp570
-rw-r--r--gfx/skia/skia/src/gpu/text/GrTextUtils.h106
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkBackendContext.cpp275
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkBuffer.cpp224
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkBuffer.h121
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkCaps.cpp272
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkCaps.h151
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkCommandBuffer.cpp695
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkCommandBuffer.h352
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkCopyManager.cpp405
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkCopyManager.h55
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkCopyPipeline.cpp190
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkCopyPipeline.h43
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkDescriptorPool.cpp51
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkDescriptorPool.h51
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkDescriptorSet.cpp34
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkDescriptorSet.h44
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkDescriptorSetManager.cpp315
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkDescriptorSetManager.h95
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkExtensions.cpp259
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkExtensions.h46
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkFramebuffer.cpp57
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkFramebuffer.h50
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkGLSLSampler.h49
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkGpu.cpp1918
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkGpu.h290
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkGpuCommandBuffer.cpp450
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkGpuCommandBuffer.h71
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkImage.cpp161
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkImage.h141
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkImageView.cpp45
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkImageView.h48
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkIndexBuffer.cpp74
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkIndexBuffer.h38
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkInterface.cpp318
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkMemory.cpp642
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkMemory.h167
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkPipeline.cpp546
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkPipeline.h57
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkPipelineState.cpp516
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkPipelineState.h236
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkPipelineStateBuilder.cpp169
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkPipelineStateBuilder.h74
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkPipelineStateCache.cpp147
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkPipelineStateDataManager.cpp286
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkPipelineStateDataManager.h83
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkRenderPass.cpp266
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkRenderPass.h146
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkRenderTarget.cpp386
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkRenderTarget.h150
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkResource.h213
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkResourceProvider.cpp481
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkResourceProvider.h258
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkSampler.cpp97
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkSampler.h49
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkStencilAttachment.cpp99
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkStencilAttachment.h57
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkTexture.cpp233
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkTexture.h59
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkTextureRenderTarget.cpp186
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkTextureRenderTarget.h119
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkTransferBuffer.cpp61
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkTransferBuffer.h56
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkUniformBuffer.cpp103
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkUniformBuffer.h58
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkUniformHandler.cpp228
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkUniformHandler.h98
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkUtil.cpp359
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkUtil.h53
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkVaryingHandler.cpp83
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkVaryingHandler.h27
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkVertexBuffer.cpp73
-rw-r--r--gfx/skia/skia/src/gpu/vk/GrVkVertexBuffer.h37
-rw-r--r--gfx/skia/skia/src/image/SkImage.cpp524
-rw-r--r--gfx/skia/skia/src/image/SkImageShader.cpp266
-rw-r--r--gfx/skia/skia/src/image/SkImageShader.h50
-rw-r--r--gfx/skia/skia/src/image/SkImage_Base.h98
-rw-r--r--gfx/skia/skia/src/image/SkImage_Generator.cpp109
-rw-r--r--gfx/skia/skia/src/image/SkImage_Gpu.cpp705
-rw-r--r--gfx/skia/skia/src/image/SkImage_Gpu.h65
-rw-r--r--gfx/skia/skia/src/image/SkImage_Raster.cpp377
-rw-r--r--gfx/skia/skia/src/image/SkReadPixelsRec.h41
-rw-r--r--gfx/skia/skia/src/image/SkSurface.cpp248
-rw-r--r--gfx/skia/skia/src/image/SkSurface_Base.h136
-rw-r--r--gfx/skia/skia/src/image/SkSurface_Gpu.cpp267
-rw-r--r--gfx/skia/skia/src/image/SkSurface_Gpu.h44
-rw-r--r--gfx/skia/skia/src/image/SkSurface_Raster.cpp223
-rw-r--r--gfx/skia/skia/src/images/SkForceLinking.cpp41
-rw-r--r--gfx/skia/skia/src/images/SkGIFMovie.cpp451
-rw-r--r--gfx/skia/skia/src/images/SkImageEncoder.cpp88
-rw-r--r--gfx/skia/skia/src/images/SkImageEncoder_Factory.cpp22
-rw-r--r--gfx/skia/skia/src/images/SkJPEGImageEncoder.cpp184
-rw-r--r--gfx/skia/skia/src/images/SkJPEGWriteUtility.cpp64
-rw-r--r--gfx/skia/skia/src/images/SkJPEGWriteUtility.h46
-rw-r--r--gfx/skia/skia/src/images/SkKTXImageEncoder.cpp97
-rw-r--r--gfx/skia/skia/src/images/SkMovie.cpp94
-rw-r--r--gfx/skia/skia/src/images/SkMovie_FactoryDefault.cpp26
-rw-r--r--gfx/skia/skia/src/images/SkPNGImageEncoder.cpp362
-rw-r--r--gfx/skia/skia/src/images/SkWEBPImageEncoder.cpp247
-rw-r--r--gfx/skia/skia/src/images/transform_scanline.h188
-rw-r--r--gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.cpp258
-rw-r--r--gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.h69
-rw-r--r--gfx/skia/skia/src/lazy/SkDiscardablePixelRef.cpp155
-rw-r--r--gfx/skia/skia/src/lazy/SkDiscardablePixelRef.h81
-rw-r--r--gfx/skia/skia/src/opts/Sk4px_NEON.h99
-rw-r--r--gfx/skia/skia/src/opts/Sk4px_SSE2.h104
-rw-r--r--gfx/skia/skia/src/opts/Sk4px_none.h109
-rw-r--r--gfx/skia/skia/src/opts/SkBitmapFilter_opts_SSE2.cpp500
-rw-r--r--gfx/skia/skia/src/opts/SkBitmapFilter_opts_SSE2.h30
-rw-r--r--gfx/skia/skia/src/opts/SkBitmapProcState_arm_neon.cpp499
-rw-r--r--gfx/skia/skia/src/opts/SkBitmapProcState_filter_neon.h94
-rw-r--r--gfx/skia/skia/src/opts/SkBitmapProcState_matrixProcs_neon.cpp234
-rw-r--r--gfx/skia/skia/src/opts/SkBitmapProcState_matrix_neon.h500
-rw-r--r--gfx/skia/skia/src/opts/SkBitmapProcState_opts_SSE2.cpp621
-rw-r--r--gfx/skia/skia/src/opts/SkBitmapProcState_opts_SSE2.h28
-rw-r--r--gfx/skia/skia/src/opts/SkBitmapProcState_opts_SSSE3.cpp761
-rw-r--r--gfx/skia/skia/src/opts/SkBitmapProcState_opts_SSSE3.h25
-rw-r--r--gfx/skia/skia/src/opts/SkBitmapProcState_opts_arm.cpp30
-rw-r--r--gfx/skia/skia/src/opts/SkBitmapProcState_opts_mips_dsp.cpp261
-rw-r--r--gfx/skia/skia/src/opts/SkBitmapProcState_opts_none.cpp27
-rw-r--r--gfx/skia/skia/src/opts/SkBlend_opts.h187
-rw-r--r--gfx/skia/skia/src/opts/SkBlitMask_opts.h206
-rw-r--r--gfx/skia/skia/src/opts/SkBlitMask_opts_arm.cpp26
-rw-r--r--gfx/skia/skia/src/opts/SkBlitMask_opts_arm_neon.cpp211
-rw-r--r--gfx/skia/skia/src/opts/SkBlitMask_opts_arm_neon.h21
-rw-r--r--gfx/skia/skia/src/opts/SkBlitMask_opts_none.cpp18
-rw-r--r--gfx/skia/skia/src/opts/SkBlitRow_opts.h235
-rw-r--r--gfx/skia/skia/src/opts/SkBlitRow_opts_SSE2.cpp990
-rw-r--r--gfx/skia/skia/src/opts/SkBlitRow_opts_SSE2.h41
-rw-r--r--gfx/skia/skia/src/opts/SkBlitRow_opts_arm.cpp35
-rw-r--r--gfx/skia/skia/src/opts/SkBlitRow_opts_arm_neon.cpp1323
-rw-r--r--gfx/skia/skia/src/opts/SkBlitRow_opts_arm_neon.h16
-rw-r--r--gfx/skia/skia/src/opts/SkBlitRow_opts_mips_dsp.cpp958
-rw-r--r--gfx/skia/skia/src/opts/SkBlitRow_opts_none.cpp22
-rw-r--r--gfx/skia/skia/src/opts/SkBlurImageFilter_opts.h323
-rw-r--r--gfx/skia/skia/src/opts/SkChecksum_opts.h216
-rw-r--r--gfx/skia/skia/src/opts/SkColorCubeFilter_opts.h84
-rw-r--r--gfx/skia/skia/src/opts/SkColor_opts_SSE2.h305
-rw-r--r--gfx/skia/skia/src/opts/SkColor_opts_neon.h121
-rw-r--r--gfx/skia/skia/src/opts/SkMorphologyImageFilter_opts.h137
-rw-r--r--gfx/skia/skia/src/opts/SkNx_neon.h556
-rw-r--r--gfx/skia/skia/src/opts/SkNx_sse.h506
-rw-r--r--gfx/skia/skia/src/opts/SkOpts_avx.cpp14
-rw-r--r--gfx/skia/skia/src/opts/SkOpts_crc32.cpp17
-rw-r--r--gfx/skia/skia/src/opts/SkOpts_hsw.cpp15
-rw-r--r--gfx/skia/skia/src/opts/SkOpts_neon.cpp54
-rw-r--r--gfx/skia/skia/src/opts/SkOpts_sse41.cpp81
-rw-r--r--gfx/skia/skia/src/opts/SkOpts_sse42.cpp18
-rw-r--r--gfx/skia/skia/src/opts/SkOpts_ssse3.cpp32
-rw-r--r--gfx/skia/skia/src/opts/SkRasterPipeline_opts.h358
-rw-r--r--gfx/skia/skia/src/opts/SkSwizzler_opts.h846
-rw-r--r--gfx/skia/skia/src/opts/SkTextureCompressor_opts.h266
-rw-r--r--gfx/skia/skia/src/opts/SkXfermode_opts.h360
-rw-r--r--gfx/skia/skia/src/opts/opts_check_x86.cpp163
-rw-r--r--gfx/skia/skia/src/pathops/SkAddIntersections.cpp564
-rw-r--r--gfx/skia/skia/src/pathops/SkAddIntersections.h17
-rw-r--r--gfx/skia/skia/src/pathops/SkDConicLineIntersection.cpp384
-rw-r--r--gfx/skia/skia/src/pathops/SkDCubicLineIntersection.cpp454
-rw-r--r--gfx/skia/skia/src/pathops/SkDCubicToQuads.cpp44
-rw-r--r--gfx/skia/skia/src/pathops/SkDLineIntersection.cpp333
-rw-r--r--gfx/skia/skia/src/pathops/SkDQuadLineIntersection.cpp470
-rw-r--r--gfx/skia/skia/src/pathops/SkIntersectionHelper.h113
-rw-r--r--gfx/skia/skia/src/pathops/SkIntersections.cpp160
-rw-r--r--gfx/skia/skia/src/pathops/SkIntersections.h329
-rw-r--r--gfx/skia/skia/src/pathops/SkLineParameters.h181
-rw-r--r--gfx/skia/skia/src/pathops/SkOpAngle.cpp995
-rw-r--r--gfx/skia/skia/src/pathops/SkOpAngle.h137
-rw-r--r--gfx/skia/skia/src/pathops/SkOpBuilder.cpp186
-rwxr-xr-xgfx/skia/skia/src/pathops/SkOpCoincidence.cpp1363
-rw-r--r--gfx/skia/skia/src/pathops/SkOpCoincidence.h303
-rw-r--r--gfx/skia/skia/src/pathops/SkOpContour.cpp70
-rw-r--r--gfx/skia/skia/src/pathops/SkOpContour.h431
-rw-r--r--gfx/skia/skia/src/pathops/SkOpCubicHull.cpp150
-rw-r--r--gfx/skia/skia/src/pathops/SkOpEdgeBuilder.cpp305
-rw-r--r--gfx/skia/skia/src/pathops/SkOpEdgeBuilder.h71
-rw-r--r--gfx/skia/skia/src/pathops/SkOpSegment.cpp1695
-rw-r--r--gfx/skia/skia/src/pathops/SkOpSegment.h458
-rwxr-xr-xgfx/skia/skia/src/pathops/SkOpSpan.cpp475
-rw-r--r--gfx/skia/skia/src/pathops/SkOpSpan.h570
-rw-r--r--gfx/skia/skia/src/pathops/SkOpTAllocator.h33
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsBounds.h65
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsCommon.cpp335
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsCommon.h33
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsConic.cpp169
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsConic.h123
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsCubic.cpp706
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsCubic.h150
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsCurve.cpp145
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsCurve.h415
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsDebug.cpp2913
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsDebug.h382
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsLine.cpp149
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsLine.h39
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsOp.cpp472
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsPoint.cpp12
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsPoint.h271
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsQuad.cpp390
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsQuad.h113
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsRect.cpp62
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsRect.h69
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsSimplify.cpp222
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsTSect.cpp62
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsTSect.h2365
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsTightBounds.cpp83
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsTypes.cpp251
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsTypes.h618
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsWinding.cpp416
-rw-r--r--gfx/skia/skia/src/pathops/SkPathWriter.cpp362
-rw-r--r--gfx/skia/skia/src/pathops/SkPathWriter.h54
-rw-r--r--gfx/skia/skia/src/pathops/SkReduceOrder.cpp283
-rw-r--r--gfx/skia/skia/src/pathops/SkReduceOrder.h35
-rw-r--r--gfx/skia/skia/src/pdf/SkBitmapKey.h77
-rw-r--r--gfx/skia/skia/src/pdf/SkDeflate.cpp121
-rw-r--r--gfx/skia/skia/src/pdf/SkDeflate.h53
-rw-r--r--gfx/skia/skia/src/pdf/SkDocument_PDF_None.cpp17
-rw-r--r--gfx/skia/skia/src/pdf/SkJpegInfo.cpp119
-rw-r--r--gfx/skia/skia/src/pdf/SkJpegInfo.h31
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFBitmap.cpp524
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFBitmap.h24
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFCanon.cpp125
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFCanon.h108
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFCanvas.cpp97
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFCanvas.h56
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFConvertType1FontStream.cpp205
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFConvertType1FontStream.h28
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFDevice.cpp2325
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFDevice.h303
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFDocument.cpp463
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFDocument.h89
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFFont.cpp736
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFFont.h136
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFFormXObject.cpp39
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFFormXObject.h25
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFGraphicState.cpp205
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFGraphicState.h80
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.cpp262
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.h23
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.cpp225
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.h29
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFMetadata.cpp329
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFMetadata.h30
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFResourceDict.cpp100
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFResourceDict.h60
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFShader.cpp1362
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFShader.h86
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFTypes.cpp613
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFTypes.h396
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFUtils.cpp481
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFUtils.h107
-rw-r--r--gfx/skia/skia/src/pdf/SkScopeExit.h50
-rw-r--r--gfx/skia/skia/src/pipe/SkPipeCanvas.cpp1069
-rw-r--r--gfx/skia/skia/src/pipe/SkPipeCanvas.h170
-rw-r--r--gfx/skia/skia/src/pipe/SkPipeFormat.h220
-rw-r--r--gfx/skia/skia/src/pipe/SkPipeReader.cpp962
-rw-r--r--gfx/skia/skia/src/pipe/SkRefSet.h40
-rw-r--r--gfx/skia/skia/src/ports/SkDebug_android.cpp36
-rw-r--r--gfx/skia/skia/src/ports/SkDebug_stdio.cpp20
-rw-r--r--gfx/skia/skia/src/ports/SkDebug_win.cpp34
-rw-r--r--gfx/skia/skia/src/ports/SkDiscardableMemory_none.cpp14
-rw-r--r--gfx/skia/skia/src/ports/SkFontConfigInterface.cpp30
-rw-r--r--gfx/skia/skia/src/ports/SkFontConfigInterface_direct.cpp735
-rw-r--r--gfx/skia/skia/src/ports/SkFontConfigInterface_direct.h38
-rw-r--r--gfx/skia/skia/src/ports/SkFontConfigInterface_direct_factory.cpp16
-rw-r--r--gfx/skia/skia/src/ports/SkFontConfigTypeface.h67
-rw-r--r--gfx/skia/skia/src/ports/SkFontHost_FreeType.cpp1776
-rw-r--r--gfx/skia/skia/src/ports/SkFontHost_FreeType_common.cpp635
-rw-r--r--gfx/skia/skia/src/ports/SkFontHost_FreeType_common.h99
-rw-r--r--gfx/skia/skia/src/ports/SkFontHost_cairo.cpp846
-rw-r--r--gfx/skia/skia/src/ports/SkFontHost_mac.cpp2621
-rw-r--r--gfx/skia/skia/src/ports/SkFontHost_win.cpp2503
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface.cpp302
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface_factory.cpp18
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_android.cpp550
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_android_factory.cpp48
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_android_parser.cpp801
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_android_parser.h213
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_custom.cpp522
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_custom_directory_factory.cpp17
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_custom_embedded_factory.cpp17
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_custom_empty_factory.cpp13
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_empty_factory.cpp13
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_fontconfig.cpp958
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_fontconfig_factory.cpp14
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_win_dw.cpp1095
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_win_dw_factory.cpp18
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_win_gdi_factory.cpp18
-rw-r--r--gfx/skia/skia/src/ports/SkGlobalInitialization_default.cpp45
-rw-r--r--gfx/skia/skia/src/ports/SkImageEncoder_CG.cpp151
-rw-r--r--gfx/skia/skia/src/ports/SkImageEncoder_WIC.cpp240
-rw-r--r--gfx/skia/skia/src/ports/SkImageEncoder_none.cpp76
-rw-r--r--gfx/skia/skia/src/ports/SkImageGeneratorCG.cpp122
-rw-r--r--gfx/skia/skia/src/ports/SkImageGeneratorCG.h43
-rw-r--r--gfx/skia/skia/src/ports/SkImageGeneratorWIC.cpp175
-rw-r--r--gfx/skia/skia/src/ports/SkImageGeneratorWIC.h63
-rw-r--r--gfx/skia/skia/src/ports/SkImageGenerator_none.cpp12
-rw-r--r--gfx/skia/skia/src/ports/SkImageGenerator_skia.cpp13
-rw-r--r--gfx/skia/skia/src/ports/SkMemory_malloc.cpp75
-rw-r--r--gfx/skia/skia/src/ports/SkMemory_mozalloc.cpp53
-rw-r--r--gfx/skia/skia/src/ports/SkOSFile_posix.cpp181
-rw-r--r--gfx/skia/skia/src/ports/SkOSFile_stdio.cpp215
-rw-r--r--gfx/skia/skia/src/ports/SkOSFile_win.cpp247
-rw-r--r--gfx/skia/skia/src/ports/SkOSLibrary.h14
-rw-r--r--gfx/skia/skia/src/ports/SkOSLibrary_posix.cpp25
-rw-r--r--gfx/skia/skia/src/ports/SkOSLibrary_win.cpp21
-rw-r--r--gfx/skia/skia/src/ports/SkRemotableFontMgr_win_dw.cpp490
-rw-r--r--gfx/skia/skia/src/ports/SkScalerContext_win_dw.cpp938
-rw-r--r--gfx/skia/skia/src/ports/SkScalerContext_win_dw.h85
-rw-r--r--gfx/skia/skia/src/ports/SkTLS_none.cpp18
-rw-r--r--gfx/skia/skia/src/ports/SkTLS_pthread.cpp25
-rw-r--r--gfx/skia/skia/src/ports/SkTLS_win.cpp80
-rw-r--r--gfx/skia/skia/src/ports/SkTypeface_win_dw.cpp431
-rw-r--r--gfx/skia/skia/src/ports/SkTypeface_win_dw.h137
-rw-r--r--gfx/skia/skia/src/sfnt/SkIBMFamilyClass.h142
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTableTypes.h62
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_EBDT.h108
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_EBLC.h150
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_EBSC.h41
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2.h52
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V0.h146
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V1.h515
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V2.h538
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V3.h547
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V4.h582
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2_VA.h141
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_gasp.h72
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_glyf.h213
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_head.h146
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_hhea.h54
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_loca.h31
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_maxp.h34
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_maxp_CFF.h30
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_maxp_TT.h48
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_name.cpp534
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_name.h575
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_post.h50
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTUtils.cpp203
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTUtils.h91
-rw-r--r--gfx/skia/skia/src/sfnt/SkPanose.h527
-rw-r--r--gfx/skia/skia/src/sfnt/SkSFNTHeader.h70
-rw-r--r--gfx/skia/skia/src/sfnt/SkTTCFHeader.h56
-rw-r--r--gfx/skia/skia/src/sksl/GLSL.std.450.h131
-rw-r--r--gfx/skia/skia/src/sksl/SkSLCodeGenerator.h30
-rw-r--r--gfx/skia/skia/src/sksl/SkSLCompiler.cpp269
-rw-r--r--gfx/skia/skia/src/sksl/SkSLCompiler.h66
-rw-r--r--gfx/skia/skia/src/sksl/SkSLContext.h227
-rw-r--r--gfx/skia/skia/src/sksl/SkSLErrorReporter.h27
-rw-r--r--gfx/skia/skia/src/sksl/SkSLGLSLCodeGenerator.cpp480
-rw-r--r--gfx/skia/skia/src/sksl/SkSLGLSLCodeGenerator.h177
-rw-r--r--gfx/skia/skia/src/sksl/SkSLIRGenerator.cpp1260
-rw-r--r--gfx/skia/skia/src/sksl/SkSLIRGenerator.h123
-rw-r--r--gfx/skia/skia/src/sksl/SkSLMain.cpp48
-rw-r--r--gfx/skia/skia/src/sksl/SkSLParser.cpp1407
-rw-r--r--gfx/skia/skia/src/sksl/SkSLParser.h209
-rw-r--r--gfx/skia/skia/src/sksl/SkSLPosition.h38
-rw-r--r--gfx/skia/skia/src/sksl/SkSLSPIRVCodeGenerator.cpp2638
-rw-r--r--gfx/skia/skia/src/sksl/SkSLSPIRVCodeGenerator.h267
-rw-r--r--gfx/skia/skia/src/sksl/SkSLToken.h162
-rw-r--r--gfx/skia/skia/src/sksl/SkSLUtil.cpp33
-rw-r--r--gfx/skia/skia/src/sksl/SkSLUtil.h57
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTBinaryExpression.h42
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTBlock.h40
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTBoolLiteral.h34
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTBreakStatement.h31
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTCallSuffix.h44
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTContinueStatement.h31
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTDeclaration.h37
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTDiscardStatement.h31
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTDoStatement.h37
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTExpression.h41
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTExpressionStatement.h34
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTExtension.h34
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTFieldSuffix.h35
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTFloatLiteral.h34
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTForStatement.h56
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTFunction.h57
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTIdentifier.h34
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTIfStatement.h47
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTIndexSuffix.h35
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTIntLiteral.h35
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTInterfaceBlock.h58
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTLayout.h74
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTModifiers.h78
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTNode.h28
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTParameter.h48
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTPositionNode.h28
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTPrefixExpression.h37
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTReturnStatement.h39
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTStatement.h46
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTSuffix.h51
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTSuffixExpression.h37
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTTernaryExpression.h41
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTType.h40
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTVarDeclaration.h88
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTVarDeclarationStatement.h35
-rw-r--r--gfx/skia/skia/src/sksl/ast/SkSLASTWhileStatement.h37
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLBinaryExpression.h41
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLBlock.h44
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLBoolLiteral.h39
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLBreakStatement.h32
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructor.h52
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLContinueStatement.h32
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLDiscardStatement.h32
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLDoStatement.h38
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLExpression.h55
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLExpressionStatement.h35
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLExtension.h34
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLField.h41
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFieldAccess.h47
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFloatLiteral.h39
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLForStatement.h59
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFunctionCall.h46
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFunctionDeclaration.h66
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFunctionDefinition.h39
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFunctionReference.h38
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLIRNode.h32
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLIfStatement.h44
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLIndexExpression.h65
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLIntLiteral.h40
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLInterfaceBlock.h51
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLLayout.h92
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLModifiers.h90
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLPostfixExpression.h36
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLPrefixExpression.h36
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLProgram.h42
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLProgramElement.h37
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLReturnStatement.h42
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLStatement.h45
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSwizzle.h87
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSymbol.h40
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.cpp100
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.h56
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLTernaryExpression.h43
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLType.cpp135
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLType.h345
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLTypeReference.h37
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLUnresolvedFunction.h40
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLVarDeclaration.h83
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLVarDeclarationStatement.h35
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLVariable.h55
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLVariableReference.h38
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLWhileStatement.h38
-rw-r--r--gfx/skia/skia/src/sksl/lex.sksl.c2505
-rw-r--r--gfx/skia/skia/src/sksl/sksl.flex191
-rw-r--r--gfx/skia/skia/src/sksl/sksl.include544
-rw-r--r--gfx/skia/skia/src/sksl/sksl_frag.include8
-rw-r--r--gfx/skia/skia/src/sksl/sksl_vert.include11
-rw-r--r--gfx/skia/skia/src/sksl/spirv.h870
-rw-r--r--gfx/skia/skia/src/svg/SkSVGCanvas.cpp17
-rw-r--r--gfx/skia/skia/src/svg/SkSVGDevice.cpp813
-rw-r--r--gfx/skia/skia/src/svg/SkSVGDevice.h73
-rw-r--r--gfx/skia/skia/src/utils/SkBase64.cpp163
-rw-r--r--gfx/skia/skia/src/utils/SkBase64.h39
-rw-r--r--gfx/skia/skia/src/utils/SkBitSet.h82
-rw-r--r--gfx/skia/skia/src/utils/SkBitmapSourceDeserializer.cpp31
-rw-r--r--gfx/skia/skia/src/utils/SkBitmapSourceDeserializer.h21
-rw-r--r--gfx/skia/skia/src/utils/SkBoundaryPatch.cpp78
-rw-r--r--gfx/skia/skia/src/utils/SkCamera.cpp373
-rw-r--r--gfx/skia/skia/src/utils/SkCanvasStack.cpp104
-rw-r--r--gfx/skia/skia/src/utils/SkCanvasStack.h52
-rw-r--r--gfx/skia/skia/src/utils/SkCanvasStateUtils.cpp355
-rw-r--r--gfx/skia/skia/src/utils/SkCurveMeasure.cpp310
-rw-r--r--gfx/skia/skia/src/utils/SkCurveMeasure.h76
-rw-r--r--gfx/skia/skia/src/utils/SkDashPath.cpp345
-rw-r--r--gfx/skia/skia/src/utils/SkDashPathPriv.h52
-rw-r--r--gfx/skia/skia/src/utils/SkDeferredCanvas.cpp570
-rw-r--r--gfx/skia/skia/src/utils/SkDeferredCanvas.h156
-rw-r--r--gfx/skia/skia/src/utils/SkDumpCanvas.cpp571
-rw-r--r--gfx/skia/skia/src/utils/SkEventTracer.cpp60
-rw-r--r--gfx/skia/skia/src/utils/SkFloatUtils.h173
-rw-r--r--gfx/skia/skia/src/utils/SkFrontBufferedStream.cpp212
-rw-r--r--gfx/skia/skia/src/utils/SkInterpolator.cpp270
-rw-r--r--gfx/skia/skia/src/utils/SkLayer.cpp229
-rw-r--r--gfx/skia/skia/src/utils/SkLua.cpp2151
-rw-r--r--gfx/skia/skia/src/utils/SkLuaCanvas.cpp317
-rw-r--r--gfx/skia/skia/src/utils/SkMatrix22.cpp40
-rw-r--r--gfx/skia/skia/src/utils/SkMatrix22.h31
-rw-r--r--gfx/skia/skia/src/utils/SkMeshUtils.cpp101
-rw-r--r--gfx/skia/skia/src/utils/SkMultiPictureDocument.cpp91
-rw-r--r--gfx/skia/skia/src/utils/SkMultiPictureDocument.h48
-rw-r--r--gfx/skia/skia/src/utils/SkMultiPictureDocumentPriv.h32
-rw-r--r--gfx/skia/skia/src/utils/SkMultiPictureDocumentReader.cpp93
-rw-r--r--gfx/skia/skia/src/utils/SkMultiPictureDocumentReader.h46
-rw-r--r--gfx/skia/skia/src/utils/SkNWayCanvas.cpp325
-rw-r--r--gfx/skia/skia/src/utils/SkNullCanvas.cpp18
-rw-r--r--gfx/skia/skia/src/utils/SkOSFile.cpp44
-rw-r--r--gfx/skia/skia/src/utils/SkPaintFilterCanvas.cpp230
-rw-r--r--gfx/skia/skia/src/utils/SkParse.cpp296
-rw-r--r--gfx/skia/skia/src/utils/SkParseColor.cpp538
-rw-r--r--gfx/skia/skia/src/utils/SkParsePath.cpp269
-rw-r--r--gfx/skia/skia/src/utils/SkPatchGrid.cpp189
-rw-r--r--gfx/skia/skia/src/utils/SkPatchGrid.h144
-rw-r--r--gfx/skia/skia/src/utils/SkPatchUtils.cpp311
-rw-r--r--gfx/skia/skia/src/utils/SkPatchUtils.h121
-rw-r--r--gfx/skia/skia/src/utils/SkRGBAToYUV.cpp58
-rw-r--r--gfx/skia/skia/src/utils/SkRGBAToYUV.h21
-rw-r--r--gfx/skia/skia/src/utils/SkShadowPaintFilterCanvas.cpp307
-rw-r--r--gfx/skia/skia/src/utils/SkShadowPaintFilterCanvas.h117
-rw-r--r--gfx/skia/skia/src/utils/SkTextBox.cpp302
-rw-r--r--gfx/skia/skia/src/utils/SkTextureCompressor.cpp231
-rw-r--r--gfx/skia/skia/src/utils/SkTextureCompressor.h110
-rw-r--r--gfx/skia/skia/src/utils/SkTextureCompressor_ASTC.cpp2101
-rw-r--r--gfx/skia/skia/src/utils/SkTextureCompressor_ASTC.h27
-rw-r--r--gfx/skia/skia/src/utils/SkTextureCompressor_Blitter.h733
-rw-r--r--gfx/skia/skia/src/utils/SkTextureCompressor_LATC.cpp519
-rw-r--r--gfx/skia/skia/src/utils/SkTextureCompressor_LATC.h26
-rw-r--r--gfx/skia/skia/src/utils/SkTextureCompressor_R11EAC.cpp670
-rw-r--r--gfx/skia/skia/src/utils/SkTextureCompressor_R11EAC.h26
-rwxr-xr-xgfx/skia/skia/src/utils/SkTextureCompressor_Utils.h68
-rw-r--r--gfx/skia/skia/src/utils/SkThreadUtils.h39
-rw-r--r--gfx/skia/skia/src/utils/SkThreadUtils_pthread.cpp117
-rw-r--r--gfx/skia/skia/src/utils/SkThreadUtils_pthread.h43
-rw-r--r--gfx/skia/skia/src/utils/SkThreadUtils_win.cpp101
-rw-r--r--gfx/skia/skia/src/utils/SkThreadUtils_win.h27
-rw-r--r--gfx/skia/skia/src/utils/SkWhitelistChecksums.inc50
-rw-r--r--gfx/skia/skia/src/utils/SkWhitelistTypefaces.cpp273
-rw-r--r--gfx/skia/skia/src/utils/mac/SkCreateCGImageRef.cpp248
-rw-r--r--gfx/skia/skia/src/utils/mac/SkStream_mac.cpp81
-rw-r--r--gfx/skia/skia/src/utils/win/SkAutoCoInitialize.cpp32
-rw-r--r--gfx/skia/skia/src/utils/win/SkAutoCoInitialize.h31
-rw-r--r--gfx/skia/skia/src/utils/win/SkDWrite.cpp128
-rw-r--r--gfx/skia/skia/src/utils/win/SkDWrite.h107
-rw-r--r--gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.cpp235
-rw-r--r--gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.h79
-rw-r--r--gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.cpp149
-rw-r--r--gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.h46
-rw-r--r--gfx/skia/skia/src/utils/win/SkHRESULT.cpp40
-rw-r--r--gfx/skia/skia/src/utils/win/SkHRESULT.h62
-rw-r--r--gfx/skia/skia/src/utils/win/SkIStream.cpp276
-rw-r--r--gfx/skia/skia/src/utils/win/SkIStream.h135
-rw-r--r--gfx/skia/skia/src/utils/win/SkTScopedComPtr.h81
-rw-r--r--gfx/skia/skia/src/utils/win/SkWGL.h162
-rw-r--r--gfx/skia/skia/src/utils/win/SkWGL_win.cpp469
-rw-r--r--gfx/skia/skia/src/views/SkEvent.cpp512
-rw-r--r--gfx/skia/skia/src/views/SkEventSink.cpp300
-rw-r--r--gfx/skia/skia/src/views/SkOSMenu.cpp263
-rw-r--r--gfx/skia/skia/src/views/SkTagList.cpp61
-rw-r--r--gfx/skia/skia/src/views/SkTagList.h42
-rw-r--r--gfx/skia/skia/src/views/SkTouchGesture.cpp353
-rw-r--r--gfx/skia/skia/src/views/SkView.cpp810
-rw-r--r--gfx/skia/skia/src/views/SkViewPriv.cpp102
-rw-r--r--gfx/skia/skia/src/views/SkViewPriv.h43
-rw-r--r--gfx/skia/skia/src/views/SkWindow.cpp361
-rwxr-xr-xgfx/skia/skia/src/views/ios/SkOSWindow_iOS.mm72
-rw-r--r--gfx/skia/skia/src/views/mac/SkEventNotifier.h13
-rw-r--r--gfx/skia/skia/src/views/mac/SkEventNotifier.mm68
-rw-r--r--gfx/skia/skia/src/views/mac/SkNSView.h56
-rw-r--r--gfx/skia/skia/src/views/mac/SkNSView.mm430
-rw-r--r--gfx/skia/skia/src/views/mac/SkOSWindow_Mac.mm95
-rw-r--r--gfx/skia/skia/src/views/mac/SkOptionsTableView.h39
-rw-r--r--gfx/skia/skia/src/views/mac/SkOptionsTableView.mm297
-rw-r--r--gfx/skia/skia/src/views/mac/SkSampleNSView.h11
-rw-r--r--gfx/skia/skia/src/views/mac/SkSampleNSView.mm31
-rw-r--r--gfx/skia/skia/src/views/mac/SkTextFieldCell.h14
-rw-r--r--gfx/skia/skia/src/views/mac/SkTextFieldCell.m56
-rw-r--r--gfx/skia/skia/src/views/mac/skia_mac.mm126
-rw-r--r--gfx/skia/skia/src/views/sdl/SkOSWindow_SDL.cpp401
-rw-r--r--gfx/skia/skia/src/views/unix/SkOSWindow_Unix.cpp519
-rw-r--r--gfx/skia/skia/src/views/unix/XkeysToSkKeys.h66
-rw-r--r--gfx/skia/skia/src/views/unix/keysym2ucs.c848
-rw-r--r--gfx/skia/skia/src/views/unix/keysym2ucs.h14
-rw-r--r--gfx/skia/skia/src/views/unix/skia_unix.cpp28
-rw-r--r--gfx/skia/skia/src/views/win/SkOSWindow_win.cpp772
-rw-r--r--gfx/skia/skia/src/views/win/skia_win.cpp135
-rw-r--r--gfx/skia/skia/src/xml/SkDOM.cpp477
-rw-r--r--gfx/skia/skia/src/xml/SkXMLParser.cpp214
-rw-r--r--gfx/skia/skia/src/xml/SkXMLWriter.cpp361
-rw-r--r--gfx/skia/skia/src/xps/SkDocument_XPS.cpp82
-rw-r--r--gfx/skia/skia/src/xps/SkDocument_XPS_None.cpp17
-rw-r--r--gfx/skia/skia/src/xps/SkXPSDevice.cpp2288
-rw-r--r--gfx/skia/skia/src/xps/SkXPSDevice.h324
1974 files changed, 455705 insertions, 0 deletions
diff --git a/gfx/skia/LICENSE b/gfx/skia/LICENSE
new file mode 100644
index 000000000..e74c256cb
--- /dev/null
+++ b/gfx/skia/LICENSE
@@ -0,0 +1,27 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/gfx/skia/README b/gfx/skia/README
new file mode 100644
index 000000000..84e4ecc90
--- /dev/null
+++ b/gfx/skia/README
@@ -0,0 +1,3 @@
+Skia is a complete 2D graphic library for drawing Text, Geometries, and Images.
+
+See full details, and build instructions, at http://code.google.com/p/skia/wiki/DocRoot
diff --git a/gfx/skia/README_COMMITTING b/gfx/skia/README_COMMITTING
new file mode 100644
index 000000000..4014ea3c7
--- /dev/null
+++ b/gfx/skia/README_COMMITTING
@@ -0,0 +1,10 @@
+Any changes to Skia should have at a minimum both a Mozilla bug tagged with the [skia-upstream]
+whiteboard tag, and also an upstream bug and review request. Any patches that do ultimately land
+in mozilla-central must be reviewed by a Skia submodule peer.
+
+See https://wiki.mozilla.org/Modules/Core#Graphics for current peers.
+
+In most cases the patch will need to have an r+ from upstream before it is eligible to land here.
+
+For information on submitting upstream, see:
+https://sites.google.com/site/skiadocs/developer-documentation/contributing-code/how-to-submit-a-patch
diff --git a/gfx/skia/README_MOZILLA b/gfx/skia/README_MOZILLA
new file mode 100644
index 000000000..bf5989d0d
--- /dev/null
+++ b/gfx/skia/README_MOZILLA
@@ -0,0 +1,12 @@
+This is an import of Skia. See skia/include/core/SkMilestone.h for the milestone number.
+
+To update to a new version of Skia:
+
+- Clone Skia from upstream using the instructions here: https://sites.google.com/site/skiadocs/user-documentation/downloading
+- Copy the entire source tree from a Skia clone to mozilla-central/gfx/skia/skia
+- cd gfx/skia && ./gyp_mozbuild
+
+Once that's done, use git status to view the files that have changed. Keep an eye on GrUserConfig.h
+and SkUserConfig.h as those probably don't want to be overwritten by upstream versions.
+
+This process will be made more automatic in the future.
diff --git a/gfx/skia/dump_mozbuild.py b/gfx/skia/dump_mozbuild.py
new file mode 100644
index 000000000..b5d40a65d
--- /dev/null
+++ b/gfx/skia/dump_mozbuild.py
@@ -0,0 +1,92 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import os
+import gyp
+import gyp.common
+import gyp.msvs_emulation
+import json
+import sys
+
+generator_supports_multiple_toolsets = True
+
+generator_wants_static_library_dependencies_adjusted = False
+
+generator_default_variables = {
+}
+for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
+ 'LIB_DIR', 'SHARED_LIB_DIR']:
+ # Some gyp steps fail if these are empty(!).
+ generator_default_variables[dirname] = 'dir'
+for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
+ 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
+ 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
+ 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
+ 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
+ 'CONFIGURATION_NAME']:
+ generator_default_variables[unused] = ''
+
+
+def CalculateVariables(default_variables, params):
+ generator_flags = params.get('generator_flags', {})
+ for key, val in generator_flags.items():
+ default_variables.setdefault(key, val)
+ default_variables.setdefault('OS', gyp.common.GetFlavor(params))
+
+ flavor = gyp.common.GetFlavor(params)
+ if flavor =='win':
+ # Copy additional generator configuration data from VS, which is shared
+ # by the Windows Ninja generator.
+ import gyp.generator.msvs as msvs_generator
+ generator_additional_non_configuration_keys = getattr(msvs_generator,
+ 'generator_additional_non_configuration_keys', [])
+ generator_additional_path_sections = getattr(msvs_generator,
+ 'generator_additional_path_sections', [])
+
+ gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
+
+
+def CalculateGeneratorInputInfo(params):
+ """Calculate the generator specific info that gets fed to input (called by
+ gyp)."""
+ generator_flags = params.get('generator_flags', {})
+ if generator_flags.get('adjust_static_libraries', False):
+ global generator_wants_static_library_dependencies_adjusted
+ generator_wants_static_library_dependencies_adjusted = True
+
+def GetOS(params):
+ for d in params['defines']:
+ pass
+
+def GenerateOutput(target_list, target_dicts, data, params):
+ # Map of target -> list of targets it depends on.
+ edges = {}
+
+ # Queue of targets to visit.
+ targets_to_visit = target_list[:]
+
+ sources = [];
+
+ while len(targets_to_visit) > 0:
+ target = targets_to_visit.pop()
+ if target in edges:
+ continue
+ edges[target] = []
+
+ target_sources = target_dicts[target].get('sources')
+ if target_sources:
+ for source in target_sources:
+ if source.endswith('.cpp'):
+ sources.append(source)
+
+ for dep in target_dicts[target].get('dependencies', []):
+ edges[target].append(dep)
+ targets_to_visit.append(dep)
+
+ skia_os = data['gyp/core.gyp']['variables']['skia_os%']
+
+ f = open('sources.json', 'w')
+ json.dump(sources, f)
+ f.close() \ No newline at end of file
diff --git a/gfx/skia/generate_mozbuild.py b/gfx/skia/generate_mozbuild.py
new file mode 100755
index 000000000..bb069870a
--- /dev/null
+++ b/gfx/skia/generate_mozbuild.py
@@ -0,0 +1,502 @@
+#!/usr/bin/env python
+
+import os
+
+import locale
+from collections import defaultdict
+locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
+
+header = """
+#
+# ##### ####### # # # # # #
+# ## # # # # # # # # # # # #
+# ## # # # # # # # # # # #
+# ## #### # # # # # # # # # #
+# ## # # # ####### # # # ####### # ###
+# ## # # # # # # # # # # # ###
+# # ##### ####### # # ## ## # # # ###
+#
+# Seriously. You shouldn't even be looking at this file unless you're
+# debugging generate_mozbuild.py.
+#
+# DO NOT MODIFY THIS FILE IT IS AUTOGENERATED.
+#
+
+skia_opt_flags = []
+
+if CONFIG['MOZ_OPTIMIZE']:
+ if CONFIG['_MSC_VER']:
+ skia_opt_flags += ['-O2']
+ elif CONFIG['GNU_CC']:
+ skia_opt_flags += ['-O3']
+
+"""
+
+footer = """
+
+# We allow warnings for third-party code that can be updated from upstream.
+ALLOW_COMPILER_WARNINGS = True
+
+FINAL_LIBRARY = 'gkmedias'
+LOCAL_INCLUDES += [
+ 'skia/include/c',
+ 'skia/include/config',
+ 'skia/include/core',
+ 'skia/include/effects',
+ 'skia/include/gpu',
+ 'skia/include/images',
+ 'skia/include/pathops',
+ 'skia/include/ports',
+ 'skia/include/private',
+ 'skia/include/utils',
+ 'skia/include/utils/mac',
+ 'skia/include/views',
+ 'skia/src/core',
+ 'skia/src/gpu',
+ 'skia/src/gpu/effects',
+ 'skia/src/gpu/gl',
+ 'skia/src/image',
+ 'skia/src/lazy',
+ 'skia/src/opts',
+ 'skia/src/sfnt',
+ 'skia/src/utils',
+ 'skia/src/utils/mac',
+ 'skia/src/utils/win',
+]
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] in ('android'):
+ DEFINES['SK_FONTHOST_CAIRO_STANDALONE'] = 0
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] in {
+ 'android',
+ 'cocoa',
+ 'gtk2',
+ 'gtk3',
+ 'uikit',
+ }:
+ DEFINES['SK_FONTHOST_DOES_NOT_USE_FONTMGR'] = 1
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':
+ DEFINES['UNICODE'] = True
+ DEFINES['_UNICODE'] = True
+ UNIFIED_SOURCES += [
+ 'skia/src/fonts/SkFontMgr_indirect.cpp',
+ 'skia/src/fonts/SkRemotableFontMgr.cpp',
+ ]
+
+# We should autogenerate these SSE related flags.
+
+if CONFIG['_MSC_VER']:
+ # MSVC doesn't need special compiler flags, but Skia needs to be told that these files should
+ # be built with the required SSE level or it will simply compile in stubs and cause runtime crashes
+ SOURCES['skia/src/opts/SkBitmapFilter_opts_SSE2.cpp'].flags += ['-DSK_CPU_SSE_LEVEL=20']
+ SOURCES['skia/src/opts/SkBitmapProcState_opts_SSE2.cpp'].flags += ['-DSK_CPU_SSE_LEVEL=20']
+ SOURCES['skia/src/opts/SkBitmapProcState_opts_SSSE3.cpp'].flags += ['-DSK_CPU_SSE_LEVEL=31']
+ SOURCES['skia/src/opts/SkBlitRow_opts_SSE2.cpp'].flags += ['-DSK_CPU_SSE_LEVEL=20']
+ SOURCES['skia/src/opts/SkOpts_ssse3.cpp'].flags += ['-DSK_CPU_SSE_LEVEL=31']
+ SOURCES['skia/src/opts/SkOpts_sse41.cpp'].flags += ['-DSK_CPU_SSE_LEVEL=41']
+ SOURCES['skia/src/opts/SkOpts_sse42.cpp'].flags += ['-DSK_CPU_SSE_LEVEL=42']
+ SOURCES['skia/src/opts/SkOpts_avx.cpp'].flags += ['-DSK_CPU_SSE_LEVEL=51']
+if CONFIG['INTEL_ARCHITECTURE'] and (CONFIG['GNU_CC'] or CONFIG['CLANG_CL']):
+ SOURCES['skia/src/opts/SkBitmapFilter_opts_SSE2.cpp'].flags += CONFIG['SSE2_FLAGS']
+ SOURCES['skia/src/opts/SkBitmapProcState_opts_SSE2.cpp'].flags += CONFIG['SSE2_FLAGS']
+ SOURCES['skia/src/opts/SkBitmapProcState_opts_SSSE3.cpp'].flags += ['-mssse3']
+ SOURCES['skia/src/opts/SkBlitRow_opts_SSE2.cpp'].flags += CONFIG['SSE2_FLAGS']
+ SOURCES['skia/src/opts/SkOpts_ssse3.cpp'].flags += ['-mssse3']
+ SOURCES['skia/src/opts/SkOpts_sse41.cpp'].flags += ['-msse4.1']
+ SOURCES['skia/src/opts/SkOpts_sse42.cpp'].flags += ['-msse4.2']
+ SOURCES['skia/src/opts/SkOpts_avx.cpp'].flags += ['-mavx']
+elif CONFIG['CPU_ARCH'] == 'arm' and CONFIG['GNU_CC'] and CONFIG['BUILD_ARM_NEON']:
+ DEFINES['SK_ARM_HAS_OPTIONAL_NEON'] = 1
+
+DEFINES['SKIA_IMPLEMENTATION'] = 1
+
+if not CONFIG['MOZ_ENABLE_SKIA_GPU']:
+ DEFINES['SK_SUPPORT_GPU'] = 0
+
+if CONFIG['MOZ_TREE_FREETYPE']:
+ DEFINES['SK_CAN_USE_DLOPEN'] = 0
+
+# Suppress warnings in third-party code.
+if CONFIG['GNU_CXX'] or CONFIG['CLANG_CL']:
+ CXXFLAGS += [
+ '-Wno-deprecated-declarations',
+ '-Wno-overloaded-virtual',
+ '-Wno-shadow',
+ '-Wno-sign-compare',
+ '-Wno-unreachable-code',
+ '-Wno-unused-function',
+ ]
+if CONFIG['GNU_CXX'] and not CONFIG['CLANG_CXX'] and not CONFIG['CLANG_CL']:
+ CXXFLAGS += [
+ '-Wno-logical-op',
+ '-Wno-maybe-uninitialized',
+ ]
+if CONFIG['CLANG_CXX'] or CONFIG['CLANG_CL']:
+ CXXFLAGS += [
+ '-Wno-implicit-fallthrough',
+ '-Wno-inconsistent-missing-override',
+ '-Wno-macro-redefined',
+ '-Wno-unused-private-field',
+ ]
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] in ('gtk2', 'gtk3', 'android'):
+ CXXFLAGS += CONFIG['MOZ_CAIRO_CFLAGS']
+ CXXFLAGS += CONFIG['CAIRO_FT_CFLAGS']
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] in ('gtk2', 'gtk3'):
+ CXXFLAGS += CONFIG['MOZ_PANGO_CFLAGS']
+"""
+
+import json
+
+platforms = ['linux', 'mac', 'android', 'win']
+
+def generate_opt_sources():
+ opt_sources = {'opts': {''}}
+ for root, dirs, files in os.walk('skia/src/opts'):
+ for name in files:
+ if name.endswith('.cpp'):
+ opt_sources['opts'].add(os.path.join(root, name))
+
+ return opt_sources
+
+def generate_platform_sources():
+ sources = {}
+
+ for plat in platforms:
+ if os.system("cd skia && GYP_GENERATORS=dump_mozbuild ./gyp_skia -D OS=%s -D host_os=linux gyp/skia_lib.gyp" % plat) != 0:
+ print 'Failed to generate sources for ' + plat
+ continue
+
+
+ f = open('skia/sources.json');
+ sources[plat] = set(v.replace('../', 'skia/') for v in json.load(f));
+ f.close()
+
+ return dict(sources.items() + generate_opt_sources().items())
+
+
+def generate_separated_sources(platform_sources):
+ blacklist = [
+ 'experimental',
+ 'SkXML',
+ 'GrGLCreateNativeInterface',
+ 'GrGLCreateNullInterface',
+ 'GrGLAssembleInterface',
+ 'GrGLTestInterface',
+ 'fontconfig',
+ 'FontConfig',
+ 'SkThreadUtils_pthread_',
+ 'SkFontMgr_android',
+ 'SkFontMgr_custom',
+ 'SkFontHost_FreeType.cpp',
+ 'Movie',
+ 'ImageEncoder',
+ 'skia/src/c/',
+ 'skia/src/effects/Gr',
+ 'skia/src/effects/Sk',
+ 'skia/src/fonts/',
+ 'skia/src/images/',
+ 'skia/src/ports/SkImageGenerator',
+ 'skia/src/sksl/',
+ 'skia/src/gpu/vk/',
+ 'SkBitmapRegion',
+ 'SkLight',
+ 'SkRadialShadow',
+ 'SkShadow',
+ 'SkNormal',
+ 'SkLite',
+ 'codec',
+ 'SkWGL',
+ 'SkMemory_malloc',
+ 'SkOpts_',
+ 'opts_check_x86',
+ 'third_party',
+ # unused in skia/src/utils
+ 'SkBoundaryPatch',
+ 'SkCamera',
+ 'SkCanvasStack',
+ 'SkCanvasStateUtils',
+ 'SkCurveMeasure',
+ 'SkDeferredCanvas',
+ 'SkDumpCanvas',
+ 'SkFrontBufferedStream',
+ 'SkInterpolator',
+ 'SkLayer',
+ 'SkMeshUtils',
+ 'SkMD5',
+ 'SkMultiPictureDocument',
+ 'SkNinePatch',
+ 'SkNullCanvas',
+ 'SkNWayCanvas',
+ 'SkPaintFilterCanvas',
+ 'SkParseColor',
+ 'SkPatchGrid',
+ 'SkRTConf',
+ 'SkTextBox',
+ 'SkWhitelistTypefaces',
+ ]
+
+ def isblacklisted(value):
+ for item in blacklist:
+ if value.find(item) >= 0:
+ return True
+
+ return False
+
+ separated = defaultdict(set, {
+ 'common': {
+ 'skia/src/core/SkBlurImageFilter.cpp',
+ 'skia/src/core/SkGpuBlurUtils.cpp',
+ 'skia/src/effects/SkDashPathEffect.cpp',
+ 'skia/src/effects/SkImageSource.cpp',
+ 'skia/src/effects/SkLayerRasterizer.cpp',
+ 'skia/src/gpu/gl/GrGLCreateNativeInterface_none.cpp',
+ 'skia/src/ports/SkDiscardableMemory_none.cpp',
+ 'skia/src/ports/SkMemory_mozalloc.cpp',
+ 'skia/src/ports/SkImageEncoder_none.cpp',
+ 'skia/src/ports/SkImageGenerator_none.cpp',
+ },
+ 'android': {
+ # 'skia/src/ports/SkDebug_android.cpp',
+ 'skia/src/ports/SkFontHost_cairo.cpp',
+ # 'skia/src/ports/SkFontHost_FreeType.cpp',
+ # 'skia/src/ports/SkFontHost_FreeType_common.cpp',
+ # 'skia/src/ports/SkTime_Unix.cpp',
+ # 'skia/src/utils/SkThreadUtils_pthread.cpp',
+ },
+ 'linux': {
+ 'skia/src/ports/SkFontHost_cairo.cpp',
+ },
+ 'intel': {
+ # There is currently no x86-specific opt for SkTextureCompression
+ 'skia/src/opts/opts_check_x86.cpp',
+ 'skia/src/opts/SkOpts_ssse3.cpp',
+ 'skia/src/opts/SkOpts_sse41.cpp',
+ 'skia/src/opts/SkOpts_sse42.cpp',
+ 'skia/src/opts/SkOpts_avx.cpp',
+ 'skia/src/opts/SkOpts_hsw.cpp',
+ },
+ 'arm': {
+ 'skia/src/core/SkUtilsArm.cpp',
+ },
+ 'neon': {
+ 'skia/src/opts/SkOpts_neon.cpp',
+ },
+ 'none': set(),
+ 'pdf': set(),
+ 'gpu': set()
+ })
+
+ for plat in platform_sources.keys():
+ for value in platform_sources[plat]:
+ if isblacklisted(value):
+ continue
+
+ if value in separated['common']:
+ continue
+
+ key = plat
+
+ if '_SSE' in value or '_SSSE' in value:
+ key = 'intel'
+ elif '_neon' in value:
+ key = 'neon'
+ elif '_arm' in value:
+ key = 'arm'
+ elif '_none' in value:
+ key = 'none'
+ elif 'gpu' in value or 'Gpu' in value:
+ key = 'gpu'
+ elif all(value in platform_sources.get(p, {})
+ for p in platforms if p != plat):
+ key = 'common'
+
+ separated[key].add(value)
+
+ if os.system("cd skia && GYP_GENERATORS=dump_mozbuild ./gyp_skia -D OS=linux -D host_os=linux -R pdf gyp/pdf.gyp") != 0:
+ print 'Failed to generate sources for Skia PDF'
+ else:
+ f = open('skia/sources.json');
+ separated['pdf'].add('skia/src/core/SkMD5.cpp');
+ separated['pdf'].update(filter(lambda x: 'pdf' in x, set(v.replace('../', 'skia/') for v in json.load(f))));
+ f.close()
+
+ return separated
+
+def uniq(seq):
+ seen = set()
+ seen_add = seen.add
+ return [ x for x in seq if x not in seen and not seen_add(x)]
+
+def write_cflags(f, values, subsearch, cflag, indent):
+ def write_indent(indent):
+ for _ in range(indent):
+ f.write(' ')
+
+ if isinstance(subsearch, str):
+ subsearch = [ subsearch ]
+
+ def iswhitelisted(value):
+ for item in subsearch:
+ if value.find(item) >= 0:
+ return True
+
+ return False
+
+ val_list = uniq(sorted(values, key=lambda x: x.lower()))
+
+ if len(val_list) == 0:
+ return
+
+ for val in val_list:
+ if iswhitelisted(val):
+ write_indent(indent)
+ f.write("SOURCES[\'" + val + "\'].flags += " + cflag + "\n")
+
+opt_whitelist = [
+ 'skia/src/opts/Sk',
+ 'SkOpts',
+ 'SkBitmapProcState',
+ 'SkBlitMask',
+ 'SkBlitRow',
+ 'SkBlitter',
+ 'SkSpriteBlitter',
+ 'SkMatrix.cpp',
+]
+
+# Unfortunately for now the gpu and pathops directories are
+# non-unifiable. Keep track of this and fix it.
+unified_blacklist = [
+ 'FontHost',
+ 'SkAdvancedTypefaceMetrics',
+ 'SkBitmapProcState_matrixProcs.cpp',
+ 'SkBlitter_A8.cpp',
+ 'SkBlitter_ARGB32.cpp',
+ 'SkBlitter_RGB16.cpp',
+ 'SkBlitter_Sprite.cpp',
+ 'SkScan_Antihair.cpp',
+ 'SkParse.cpp',
+ 'SkPDFFont.cpp',
+ 'SkPictureData.cpp',
+ 'GrDrawContext',
+ 'GrResourceCache',
+ 'GrAA',
+ 'GrGL',
+ 'GrBatchAtlas.cpp',
+ 'GrMSAAPathRenderer.cpp',
+ 'GrNonAAFillRect',
+ 'SkColorSpace',
+ 'SkImage_Gpu.cpp',
+ 'SkPathOpsDebug.cpp',
+ 'SkParsePath.cpp',
+ 'SkRecorder.cpp',
+ 'SkMiniRecorder.cpp',
+ 'SkXfermode',
+ 'SkMatrix44.cpp',
+] + opt_whitelist
+
+def write_sources(f, values, indent):
+ def isblacklisted(value):
+ for item in unified_blacklist:
+ if value.find(item) >= 0:
+ return True
+
+ return False
+
+ sources = {}
+ sources['nonunified'] = set()
+ sources['unified'] = set()
+
+ for item in values:
+ if isblacklisted(item):
+ sources['nonunified'].add(item)
+ else:
+ sources['unified'].add(item)
+
+ write_list(f, "UNIFIED_SOURCES", sources['unified'], indent)
+ write_list(f, "SOURCES", sources['nonunified'], indent)
+
+def write_list(f, name, values, indent):
+ def write_indent(indent):
+ for _ in range(indent):
+ f.write(' ')
+
+ val_list = uniq(sorted(values, key=lambda x: x.lower()))
+
+ if len(val_list) == 0:
+ return
+
+ write_indent(indent)
+ f.write(name + ' += [\n')
+ for val in val_list:
+ write_indent(indent + 4)
+ f.write('\'' + val + '\',\n')
+
+ write_indent(indent)
+ f.write(']\n')
+
+def write_mozbuild(sources):
+ filename = 'moz.build'
+ f = open(filename, 'w')
+
+ f.write(header)
+
+ write_sources(f, sources['common'], 0)
+ write_cflags(f, sources['common'], opt_whitelist, 'skia_opt_flags', 0)
+
+ f.write("if CONFIG['MOZ_ENABLE_SKIA_PDF']:\n")
+ write_sources(f, sources['pdf'], 4)
+
+ f.write("if CONFIG['MOZ_ENABLE_SKIA_GPU']:\n")
+ write_sources(f, sources['gpu'], 4)
+
+ f.write("if CONFIG['MOZ_WIDGET_TOOLKIT'] in ('android'):\n")
+ write_sources(f, sources['android'], 4)
+
+ f.write("if CONFIG['MOZ_WIDGET_TOOLKIT'] in {'cocoa', 'uikit'}:\n")
+ write_sources(f, sources['mac'], 4)
+
+ f.write("if 'gtk' in CONFIG['MOZ_WIDGET_TOOLKIT']:\n")
+ write_sources(f, sources['linux'], 4)
+
+ f.write("if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':\n")
+ # Windows-specific files don't get unification because of nasty headers.
+ # Luckily there are not many files in this.
+ write_list(f, "SOURCES", sources['win'], 4)
+
+ f.write("if CONFIG['INTEL_ARCHITECTURE']:\n")
+ write_sources(f, sources['intel'], 4)
+ write_cflags(f, sources['intel'], opt_whitelist, 'skia_opt_flags', 4)
+
+ f.write("elif CONFIG['CPU_ARCH'] in ('arm', 'aarch64') and CONFIG['GNU_CC']:\n")
+ write_sources(f, sources['arm'], 4)
+ write_cflags(f, sources['arm'], opt_whitelist, 'skia_opt_flags', 4)
+
+ f.write(" if CONFIG['CPU_ARCH'] == 'aarch64':\n")
+ write_sources(f, sources['neon'], 8)
+ f.write(" elif CONFIG['BUILD_ARM_NEON']:\n")
+ write_list(f, 'SOURCES', sources['neon'], 8)
+ write_cflags(f, sources['neon'], 'neon', "CONFIG['NEON_FLAGS']", 8)
+
+ f.write(" if CONFIG['CPU_ARCH'] == 'aarch64' or CONFIG['BUILD_ARM_NEON']:\n")
+ write_cflags(f, sources['neon'], opt_whitelist, 'skia_opt_flags', 8)
+
+ f.write("else:\n")
+ write_sources(f, sources['none'], 4)
+
+ f.write(footer)
+
+ f.close()
+
+ print 'Wrote ' + filename
+
+def main():
+ platform_sources = generate_platform_sources()
+ separated_sources = generate_separated_sources(platform_sources)
+ write_mozbuild(separated_sources)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/gfx/skia/gyp_mozbuild b/gfx/skia/gyp_mozbuild
new file mode 100755
index 000000000..0cec5f9a6
--- /dev/null
+++ b/gfx/skia/gyp_mozbuild
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+# Install our generator
+cp dump_mozbuild.py skia/third_party/externals/gyp/pylib/gyp/generator
+
+# pushd skia
+# for OS in win linux mac; do
+# GYP_GENERATORS=dump_mozbuild ./gyp_skia -D OS=$OS -D arm_neon=0 gyp/effects.gyp
+# done
+# popd
+
+./generate_mozbuild.py
+
diff --git a/gfx/skia/moz.build b/gfx/skia/moz.build
new file mode 100644
index 000000000..7a3ef6549
--- /dev/null
+++ b/gfx/skia/moz.build
@@ -0,0 +1,758 @@
+
+#
+# ##### ####### # # # # # #
+# ## # # # # # # # # # # # #
+# ## # # # # # # # # # # #
+# ## #### # # # # # # # # # #
+# ## # # # ####### # # # ####### # ###
+# ## # # # # # # # # # # # ###
+# # ##### ####### # # ## ## # # # ###
+#
+# Seriously. You shouldn't even be looking at this file unless you're
+# debugging generate_mozbuild.py.
+#
+# DO NOT MODIFY THIS FILE IT IS AUTOGENERATED.
+#
+
+skia_opt_flags = []
+
+if CONFIG['MOZ_OPTIMIZE']:
+ if CONFIG['_MSC_VER']:
+ skia_opt_flags += ['-O2']
+ elif CONFIG['GNU_CC']:
+ skia_opt_flags += ['-O3']
+
+UNIFIED_SOURCES += [
+ 'skia/src/core/SkAAClip.cpp',
+ 'skia/src/core/SkAlphaRuns.cpp',
+ 'skia/src/core/SkAnnotation.cpp',
+ 'skia/src/core/SkAutoPixmapStorage.cpp',
+ 'skia/src/core/SkBBHFactory.cpp',
+ 'skia/src/core/SkBigPicture.cpp',
+ 'skia/src/core/SkBitmap.cpp',
+ 'skia/src/core/SkBitmapCache.cpp',
+ 'skia/src/core/SkBitmapController.cpp',
+ 'skia/src/core/SkBitmapDevice.cpp',
+ 'skia/src/core/SkBitmapProcShader.cpp',
+ 'skia/src/core/SkBitmapProvider.cpp',
+ 'skia/src/core/SkBitmapScaler.cpp',
+ 'skia/src/core/SkBlurImageFilter.cpp',
+ 'skia/src/core/SkBuffer.cpp',
+ 'skia/src/core/SkCachedData.cpp',
+ 'skia/src/core/SkCanvas.cpp',
+ 'skia/src/core/SkChunkAlloc.cpp',
+ 'skia/src/core/SkClipStack.cpp',
+ 'skia/src/core/SkColor.cpp',
+ 'skia/src/core/SkColorFilter.cpp',
+ 'skia/src/core/SkColorFilterShader.cpp',
+ 'skia/src/core/SkColorMatrixFilterRowMajor255.cpp',
+ 'skia/src/core/SkColorShader.cpp',
+ 'skia/src/core/SkColorTable.cpp',
+ 'skia/src/core/SkComposeShader.cpp',
+ 'skia/src/core/SkConfig8888.cpp',
+ 'skia/src/core/SkConvolver.cpp',
+ 'skia/src/core/SkCpu.cpp',
+ 'skia/src/core/SkCubicClipper.cpp',
+ 'skia/src/core/SkData.cpp',
+ 'skia/src/core/SkDataTable.cpp',
+ 'skia/src/core/SkDebug.cpp',
+ 'skia/src/core/SkDeque.cpp',
+ 'skia/src/core/SkDevice.cpp',
+ 'skia/src/core/SkDeviceLooper.cpp',
+ 'skia/src/core/SkDeviceProfile.cpp',
+ 'skia/src/core/SkDistanceFieldGen.cpp',
+ 'skia/src/core/SkDither.cpp',
+ 'skia/src/core/SkDocument.cpp',
+ 'skia/src/core/SkDraw.cpp',
+ 'skia/src/core/SkDrawable.cpp',
+ 'skia/src/core/SkDrawLooper.cpp',
+ 'skia/src/core/SkEdge.cpp',
+ 'skia/src/core/SkEdgeBuilder.cpp',
+ 'skia/src/core/SkEdgeClipper.cpp',
+ 'skia/src/core/SkError.cpp',
+ 'skia/src/core/SkFilterProc.cpp',
+ 'skia/src/core/SkFlattenable.cpp',
+ 'skia/src/core/SkFlattenableSerialization.cpp',
+ 'skia/src/core/SkFont.cpp',
+ 'skia/src/core/SkFontDescriptor.cpp',
+ 'skia/src/core/SkFontLCDConfig.cpp',
+ 'skia/src/core/SkFontMgr.cpp',
+ 'skia/src/core/SkFontStream.cpp',
+ 'skia/src/core/SkFontStyle.cpp',
+ 'skia/src/core/SkForceCPlusPlusLinking.cpp',
+ 'skia/src/core/SkGeometry.cpp',
+ 'skia/src/core/SkGlobalInitialization_core.cpp',
+ 'skia/src/core/SkGlyphCache.cpp',
+ 'skia/src/core/SkGpuBlurUtils.cpp',
+ 'skia/src/core/SkGraphics.cpp',
+ 'skia/src/core/SkHalf.cpp',
+ 'skia/src/core/SkImageCacherator.cpp',
+ 'skia/src/core/SkImageFilter.cpp',
+ 'skia/src/core/SkImageFilterCache.cpp',
+ 'skia/src/core/SkImageGenerator.cpp',
+ 'skia/src/core/SkImageInfo.cpp',
+ 'skia/src/core/SkLatticeIter.cpp',
+ 'skia/src/core/SkLinearBitmapPipeline.cpp',
+ 'skia/src/core/SkLineClipper.cpp',
+ 'skia/src/core/SkLocalMatrixImageFilter.cpp',
+ 'skia/src/core/SkLocalMatrixShader.cpp',
+ 'skia/src/core/SkMallocPixelRef.cpp',
+ 'skia/src/core/SkMask.cpp',
+ 'skia/src/core/SkMaskCache.cpp',
+ 'skia/src/core/SkMaskFilter.cpp',
+ 'skia/src/core/SkMaskGamma.cpp',
+ 'skia/src/core/SkMath.cpp',
+ 'skia/src/core/SkMatrixImageFilter.cpp',
+ 'skia/src/core/SkMetaData.cpp',
+ 'skia/src/core/SkMipMap.cpp',
+ 'skia/src/core/SkModeColorFilter.cpp',
+ 'skia/src/core/SkMultiPictureDraw.cpp',
+ 'skia/src/core/SkPaint.cpp',
+ 'skia/src/core/SkPaintPriv.cpp',
+ 'skia/src/core/SkPath.cpp',
+ 'skia/src/core/SkPathEffect.cpp',
+ 'skia/src/core/SkPathMeasure.cpp',
+ 'skia/src/core/SkPathRef.cpp',
+ 'skia/src/core/SkPicture.cpp',
+ 'skia/src/core/SkPictureAnalyzer.cpp',
+ 'skia/src/core/SkPictureContentInfo.cpp',
+ 'skia/src/core/SkPictureFlat.cpp',
+ 'skia/src/core/SkPictureImageGenerator.cpp',
+ 'skia/src/core/SkPicturePlayback.cpp',
+ 'skia/src/core/SkPictureRecord.cpp',
+ 'skia/src/core/SkPictureRecorder.cpp',
+ 'skia/src/core/SkPictureShader.cpp',
+ 'skia/src/core/SkPixelRef.cpp',
+ 'skia/src/core/SkPixmap.cpp',
+ 'skia/src/core/SkPoint.cpp',
+ 'skia/src/core/SkPoint3.cpp',
+ 'skia/src/core/SkPtrRecorder.cpp',
+ 'skia/src/core/SkQuadClipper.cpp',
+ 'skia/src/core/SkRasterClip.cpp',
+ 'skia/src/core/SkRasterizer.cpp',
+ 'skia/src/core/SkRasterPipeline.cpp',
+ 'skia/src/core/SkRasterPipelineBlitter.cpp',
+ 'skia/src/core/SkReadBuffer.cpp',
+ 'skia/src/core/SkRecord.cpp',
+ 'skia/src/core/SkRecordDraw.cpp',
+ 'skia/src/core/SkRecordedDrawable.cpp',
+ 'skia/src/core/SkRecordOpts.cpp',
+ 'skia/src/core/SkRecords.cpp',
+ 'skia/src/core/SkRect.cpp',
+ 'skia/src/core/SkRefDict.cpp',
+ 'skia/src/core/SkRegion.cpp',
+ 'skia/src/core/SkRegion_path.cpp',
+ 'skia/src/core/SkResourceCache.cpp',
+ 'skia/src/core/SkRRect.cpp',
+ 'skia/src/core/SkRTree.cpp',
+ 'skia/src/core/SkRWBuffer.cpp',
+ 'skia/src/core/SkScalar.cpp',
+ 'skia/src/core/SkScalerContext.cpp',
+ 'skia/src/core/SkScan.cpp',
+ 'skia/src/core/SkScan_AntiPath.cpp',
+ 'skia/src/core/SkScan_Hairline.cpp',
+ 'skia/src/core/SkScan_Path.cpp',
+ 'skia/src/core/SkSemaphore.cpp',
+ 'skia/src/core/SkShader.cpp',
+ 'skia/src/core/SkSharedMutex.cpp',
+ 'skia/src/core/SkSpanProcs.cpp',
+ 'skia/src/core/SkSpecialImage.cpp',
+ 'skia/src/core/SkSpecialSurface.cpp',
+ 'skia/src/core/SkSpinlock.cpp',
+ 'skia/src/core/SkSRGB.cpp',
+ 'skia/src/core/SkStream.cpp',
+ 'skia/src/core/SkString.cpp',
+ 'skia/src/core/SkStringUtils.cpp',
+ 'skia/src/core/SkStroke.cpp',
+ 'skia/src/core/SkStrokeRec.cpp',
+ 'skia/src/core/SkStrokerPriv.cpp',
+ 'skia/src/core/SkSwizzle.cpp',
+ 'skia/src/core/SkTaskGroup.cpp',
+ 'skia/src/core/SkTextBlob.cpp',
+ 'skia/src/core/SkThreadID.cpp',
+ 'skia/src/core/SkTime.cpp',
+ 'skia/src/core/SkTLS.cpp',
+ 'skia/src/core/SkTSearch.cpp',
+ 'skia/src/core/SkTypeface.cpp',
+ 'skia/src/core/SkTypefaceCache.cpp',
+ 'skia/src/core/SkUnPreMultiply.cpp',
+ 'skia/src/core/SkUtils.cpp',
+ 'skia/src/core/SkValidatingReadBuffer.cpp',
+ 'skia/src/core/SkVarAlloc.cpp',
+ 'skia/src/core/SkVertState.cpp',
+ 'skia/src/core/SkWriteBuffer.cpp',
+ 'skia/src/core/SkWriter32.cpp',
+ 'skia/src/core/SkYUVPlanesCache.cpp',
+ 'skia/src/effects/gradients/Sk4fGradientBase.cpp',
+ 'skia/src/effects/gradients/Sk4fLinearGradient.cpp',
+ 'skia/src/effects/gradients/SkClampRange.cpp',
+ 'skia/src/effects/gradients/SkGradientBitmapCache.cpp',
+ 'skia/src/effects/gradients/SkGradientShader.cpp',
+ 'skia/src/effects/gradients/SkLinearGradient.cpp',
+ 'skia/src/effects/gradients/SkRadialGradient.cpp',
+ 'skia/src/effects/gradients/SkSweepGradient.cpp',
+ 'skia/src/effects/gradients/SkTwoPointConicalGradient.cpp',
+ 'skia/src/effects/SkDashPathEffect.cpp',
+ 'skia/src/effects/SkImageSource.cpp',
+ 'skia/src/effects/SkLayerRasterizer.cpp',
+ 'skia/src/image/SkImage.cpp',
+ 'skia/src/image/SkImage_Generator.cpp',
+ 'skia/src/image/SkImage_Raster.cpp',
+ 'skia/src/image/SkImageShader.cpp',
+ 'skia/src/image/SkSurface.cpp',
+ 'skia/src/image/SkSurface_Raster.cpp',
+ 'skia/src/lazy/SkDiscardableMemoryPool.cpp',
+ 'skia/src/lazy/SkDiscardablePixelRef.cpp',
+ 'skia/src/pathops/SkAddIntersections.cpp',
+ 'skia/src/pathops/SkDConicLineIntersection.cpp',
+ 'skia/src/pathops/SkDCubicLineIntersection.cpp',
+ 'skia/src/pathops/SkDCubicToQuads.cpp',
+ 'skia/src/pathops/SkDLineIntersection.cpp',
+ 'skia/src/pathops/SkDQuadLineIntersection.cpp',
+ 'skia/src/pathops/SkIntersections.cpp',
+ 'skia/src/pathops/SkOpAngle.cpp',
+ 'skia/src/pathops/SkOpBuilder.cpp',
+ 'skia/src/pathops/SkOpCoincidence.cpp',
+ 'skia/src/pathops/SkOpContour.cpp',
+ 'skia/src/pathops/SkOpCubicHull.cpp',
+ 'skia/src/pathops/SkOpEdgeBuilder.cpp',
+ 'skia/src/pathops/SkOpSegment.cpp',
+ 'skia/src/pathops/SkOpSpan.cpp',
+ 'skia/src/pathops/SkPathOpsCommon.cpp',
+ 'skia/src/pathops/SkPathOpsConic.cpp',
+ 'skia/src/pathops/SkPathOpsCubic.cpp',
+ 'skia/src/pathops/SkPathOpsCurve.cpp',
+ 'skia/src/pathops/SkPathOpsLine.cpp',
+ 'skia/src/pathops/SkPathOpsOp.cpp',
+ 'skia/src/pathops/SkPathOpsPoint.cpp',
+ 'skia/src/pathops/SkPathOpsQuad.cpp',
+ 'skia/src/pathops/SkPathOpsRect.cpp',
+ 'skia/src/pathops/SkPathOpsSimplify.cpp',
+ 'skia/src/pathops/SkPathOpsTightBounds.cpp',
+ 'skia/src/pathops/SkPathOpsTSect.cpp',
+ 'skia/src/pathops/SkPathOpsTypes.cpp',
+ 'skia/src/pathops/SkPathOpsWinding.cpp',
+ 'skia/src/pathops/SkPathWriter.cpp',
+ 'skia/src/pathops/SkReduceOrder.cpp',
+ 'skia/src/pipe/SkPipeCanvas.cpp',
+ 'skia/src/pipe/SkPipeReader.cpp',
+ 'skia/src/ports/SkDiscardableMemory_none.cpp',
+ 'skia/src/ports/SkGlobalInitialization_default.cpp',
+ 'skia/src/ports/SkImageEncoder_none.cpp',
+ 'skia/src/ports/SkImageGenerator_none.cpp',
+ 'skia/src/ports/SkMemory_mozalloc.cpp',
+ 'skia/src/ports/SkOSFile_stdio.cpp',
+ 'skia/src/sfnt/SkOTTable_name.cpp',
+ 'skia/src/sfnt/SkOTUtils.cpp',
+ 'skia/src/utils/SkBase64.cpp',
+ 'skia/src/utils/SkBitmapSourceDeserializer.cpp',
+ 'skia/src/utils/SkDashPath.cpp',
+ 'skia/src/utils/SkEventTracer.cpp',
+ 'skia/src/utils/SkMatrix22.cpp',
+ 'skia/src/utils/SkOSFile.cpp',
+ 'skia/src/utils/SkPatchUtils.cpp',
+ 'skia/src/utils/SkRGBAToYUV.cpp',
+ 'skia/src/utils/SkTextureCompressor.cpp',
+ 'skia/src/utils/SkTextureCompressor_ASTC.cpp',
+ 'skia/src/utils/SkTextureCompressor_LATC.cpp',
+ 'skia/src/utils/SkTextureCompressor_R11EAC.cpp',
+]
+SOURCES += [
+ 'skia/src/core/SkBitmapProcState.cpp',
+ 'skia/src/core/SkBitmapProcState_matrixProcs.cpp',
+ 'skia/src/core/SkBlitMask_D32.cpp',
+ 'skia/src/core/SkBlitRow_D16.cpp',
+ 'skia/src/core/SkBlitRow_D32.cpp',
+ 'skia/src/core/SkBlitter.cpp',
+ 'skia/src/core/SkBlitter_A8.cpp',
+ 'skia/src/core/SkBlitter_ARGB32.cpp',
+ 'skia/src/core/SkBlitter_PM4f.cpp',
+ 'skia/src/core/SkBlitter_RGB16.cpp',
+ 'skia/src/core/SkBlitter_Sprite.cpp',
+ 'skia/src/core/SkColorSpace.cpp',
+ 'skia/src/core/SkColorSpace_ICC.cpp',
+ 'skia/src/core/SkColorSpaceXform.cpp',
+ 'skia/src/core/SkMatrix.cpp',
+ 'skia/src/core/SkMatrix44.cpp',
+ 'skia/src/core/SkMiniRecorder.cpp',
+ 'skia/src/core/SkOpts.cpp',
+ 'skia/src/core/SkPictureData.cpp',
+ 'skia/src/core/SkRecorder.cpp',
+ 'skia/src/core/SkScan_Antihair.cpp',
+ 'skia/src/core/SkSpriteBlitter4f.cpp',
+ 'skia/src/core/SkSpriteBlitter_ARGB32.cpp',
+ 'skia/src/core/SkSpriteBlitter_RGB16.cpp',
+ 'skia/src/core/SkXfermode.cpp',
+ 'skia/src/core/SkXfermode4f.cpp',
+ 'skia/src/core/SkXfermodeF16.cpp',
+ 'skia/src/core/SkXfermodeInterpretation.cpp',
+ 'skia/src/gpu/gl/GrGLCreateNativeInterface_none.cpp',
+ 'skia/src/pathops/SkPathOpsDebug.cpp',
+ 'skia/src/utils/SkParse.cpp',
+ 'skia/src/utils/SkParsePath.cpp',
+]
+SOURCES['skia/src/core/SkBitmapProcState.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBitmapProcState_matrixProcs.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBlitMask_D32.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBlitRow_D16.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBlitRow_D32.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBlitter.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBlitter_A8.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBlitter_ARGB32.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBlitter_PM4f.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBlitter_RGB16.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBlitter_Sprite.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkMatrix.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkOpts.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkSpriteBlitter4f.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkSpriteBlitter_ARGB32.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkSpriteBlitter_RGB16.cpp'].flags += skia_opt_flags
+if CONFIG['MOZ_ENABLE_SKIA_PDF']:
+ UNIFIED_SOURCES += [
+ 'skia/src/core/SkMD5.cpp',
+ 'skia/src/pdf/SkDeflate.cpp',
+ 'skia/src/pdf/SkJpegInfo.cpp',
+ 'skia/src/pdf/SkPDFBitmap.cpp',
+ 'skia/src/pdf/SkPDFCanon.cpp',
+ 'skia/src/pdf/SkPDFCanvas.cpp',
+ 'skia/src/pdf/SkPDFConvertType1FontStream.cpp',
+ 'skia/src/pdf/SkPDFDevice.cpp',
+ 'skia/src/pdf/SkPDFDocument.cpp',
+ 'skia/src/pdf/SkPDFFormXObject.cpp',
+ 'skia/src/pdf/SkPDFGraphicState.cpp',
+ 'skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.cpp',
+ 'skia/src/pdf/SkPDFMakeToUnicodeCmap.cpp',
+ 'skia/src/pdf/SkPDFMetadata.cpp',
+ 'skia/src/pdf/SkPDFResourceDict.cpp',
+ 'skia/src/pdf/SkPDFShader.cpp',
+ 'skia/src/pdf/SkPDFTypes.cpp',
+ 'skia/src/pdf/SkPDFUtils.cpp',
+ ]
+ SOURCES += [
+ 'skia/src/pdf/SkPDFFont.cpp',
+ ]
+if CONFIG['MOZ_ENABLE_SKIA_GPU']:
+ UNIFIED_SOURCES += [
+ 'skia/src/effects/gradients/SkTwoPointConicalGradient_gpu.cpp',
+ 'skia/src/gpu/batches/GrAnalyticRectBatch.cpp',
+ 'skia/src/gpu/batches/GrAtlasTextBatch.cpp',
+ 'skia/src/gpu/batches/GrBatch.cpp',
+ 'skia/src/gpu/batches/GrCopySurfaceBatch.cpp',
+ 'skia/src/gpu/batches/GrDashLinePathRenderer.cpp',
+ 'skia/src/gpu/batches/GrDefaultPathRenderer.cpp',
+ 'skia/src/gpu/batches/GrDrawAtlasBatch.cpp',
+ 'skia/src/gpu/batches/GrDrawBatch.cpp',
+ 'skia/src/gpu/batches/GrDrawPathBatch.cpp',
+ 'skia/src/gpu/batches/GrDrawVerticesBatch.cpp',
+ 'skia/src/gpu/batches/GrNinePatch.cpp',
+ 'skia/src/gpu/batches/GrNonAAStrokeRectBatch.cpp',
+ 'skia/src/gpu/batches/GrPLSPathRenderer.cpp',
+ 'skia/src/gpu/batches/GrRectBatchFactory.cpp',
+ 'skia/src/gpu/batches/GrRegionBatch.cpp',
+ 'skia/src/gpu/batches/GrStencilAndCoverPathRenderer.cpp',
+ 'skia/src/gpu/batches/GrTessellatingPathRenderer.cpp',
+ 'skia/src/gpu/batches/GrVertexBatch.cpp',
+ 'skia/src/gpu/effects/GrBezierEffect.cpp',
+ 'skia/src/gpu/effects/GrBicubicEffect.cpp',
+ 'skia/src/gpu/effects/GrBitmapTextGeoProc.cpp',
+ 'skia/src/gpu/effects/GrConfigConversionEffect.cpp',
+ 'skia/src/gpu/effects/GrConstColorProcessor.cpp',
+ 'skia/src/gpu/effects/GrConvexPolyEffect.cpp',
+ 'skia/src/gpu/effects/GrConvolutionEffect.cpp',
+ 'skia/src/gpu/effects/GrCoverageSetOpXP.cpp',
+ 'skia/src/gpu/effects/GrCustomXfermode.cpp',
+ 'skia/src/gpu/effects/GrDashingEffect.cpp',
+ 'skia/src/gpu/effects/GrDisableColorXP.cpp',
+ 'skia/src/gpu/effects/GrDistanceFieldGeoProc.cpp',
+ 'skia/src/gpu/effects/GrDitherEffect.cpp',
+ 'skia/src/gpu/effects/GrGammaEffect.cpp',
+ 'skia/src/gpu/effects/GrMatrixConvolutionEffect.cpp',
+ 'skia/src/gpu/effects/GrOvalEffect.cpp',
+ 'skia/src/gpu/effects/GrPorterDuffXferProcessor.cpp',
+ 'skia/src/gpu/effects/GrRRectEffect.cpp',
+ 'skia/src/gpu/effects/GrSimpleTextureEffect.cpp',
+ 'skia/src/gpu/effects/GrSingleTextureEffect.cpp',
+ 'skia/src/gpu/effects/GrTextureDomain.cpp',
+ 'skia/src/gpu/effects/GrTextureStripAtlas.cpp',
+ 'skia/src/gpu/effects/GrXfermodeFragmentProcessor.cpp',
+ 'skia/src/gpu/effects/GrYUVEffect.cpp',
+ 'skia/src/gpu/GrAuditTrail.cpp',
+ 'skia/src/gpu/GrBatchFlushState.cpp',
+ 'skia/src/gpu/GrBatchTest.cpp',
+ 'skia/src/gpu/GrBlend.cpp',
+ 'skia/src/gpu/GrBlurUtils.cpp',
+ 'skia/src/gpu/GrBuffer.cpp',
+ 'skia/src/gpu/GrBufferAllocPool.cpp',
+ 'skia/src/gpu/GrCaps.cpp',
+ 'skia/src/gpu/GrClipStackClip.cpp',
+ 'skia/src/gpu/GrColorSpaceXform.cpp',
+ 'skia/src/gpu/GrContext.cpp',
+ 'skia/src/gpu/GrCoordTransform.cpp',
+ 'skia/src/gpu/GrDefaultGeoProcFactory.cpp',
+ 'skia/src/gpu/GrDrawingManager.cpp',
+ 'skia/src/gpu/GrDrawTarget.cpp',
+ 'skia/src/gpu/GrFixedClip.cpp',
+ 'skia/src/gpu/GrFragmentProcessor.cpp',
+ 'skia/src/gpu/GrGpu.cpp',
+ 'skia/src/gpu/GrGpuCommandBuffer.cpp',
+ 'skia/src/gpu/GrGpuFactory.cpp',
+ 'skia/src/gpu/GrGpuResource.cpp',
+ 'skia/src/gpu/GrGpuResourceRef.cpp',
+ 'skia/src/gpu/GrImageIDTextureAdjuster.cpp',
+ 'skia/src/gpu/GrInvariantOutput.cpp',
+ 'skia/src/gpu/GrMemoryPool.cpp',
+ 'skia/src/gpu/GrOvalRenderer.cpp',
+ 'skia/src/gpu/GrPaint.cpp',
+ 'skia/src/gpu/GrPath.cpp',
+ 'skia/src/gpu/GrPathProcessor.cpp',
+ 'skia/src/gpu/GrPathRange.cpp',
+ 'skia/src/gpu/GrPathRenderer.cpp',
+ 'skia/src/gpu/GrPathRendererChain.cpp',
+ 'skia/src/gpu/GrPathRendering.cpp',
+ 'skia/src/gpu/GrPathRenderingDrawContext.cpp',
+ 'skia/src/gpu/GrPathUtils.cpp',
+ 'skia/src/gpu/GrPipeline.cpp',
+ 'skia/src/gpu/GrPipelineBuilder.cpp',
+ 'skia/src/gpu/GrPrimitiveProcessor.cpp',
+ 'skia/src/gpu/GrProcessor.cpp',
+ 'skia/src/gpu/GrProcessorUnitTest.cpp',
+ 'skia/src/gpu/GrProcOptInfo.cpp',
+ 'skia/src/gpu/GrProgramDesc.cpp',
+ 'skia/src/gpu/GrProgramElement.cpp',
+ 'skia/src/gpu/GrRectanizer_pow2.cpp',
+ 'skia/src/gpu/GrRectanizer_skyline.cpp',
+ 'skia/src/gpu/GrReducedClip.cpp',
+ 'skia/src/gpu/GrRenderTarget.cpp',
+ 'skia/src/gpu/GrRenderTargetProxy.cpp',
+ 'skia/src/gpu/GrResourceProvider.cpp',
+ 'skia/src/gpu/GrShape.cpp',
+ 'skia/src/gpu/GrSoftwarePathRenderer.cpp',
+ 'skia/src/gpu/GrStencilAttachment.cpp',
+ 'skia/src/gpu/GrStencilSettings.cpp',
+ 'skia/src/gpu/GrStyle.cpp',
+ 'skia/src/gpu/GrSurface.cpp',
+ 'skia/src/gpu/GrSurfaceProxy.cpp',
+ 'skia/src/gpu/GrSWMaskHelper.cpp',
+ 'skia/src/gpu/GrTessellator.cpp',
+ 'skia/src/gpu/GrTestUtils.cpp',
+ 'skia/src/gpu/GrTexture.cpp',
+ 'skia/src/gpu/GrTextureAccess.cpp',
+ 'skia/src/gpu/GrTextureParamsAdjuster.cpp',
+ 'skia/src/gpu/GrTextureProvider.cpp',
+ 'skia/src/gpu/GrTextureProxy.cpp',
+ 'skia/src/gpu/GrTextureToYUVPlanes.cpp',
+ 'skia/src/gpu/GrTraceMarker.cpp',
+ 'skia/src/gpu/GrXferProcessor.cpp',
+ 'skia/src/gpu/GrYUVProvider.cpp',
+ 'skia/src/gpu/instanced/GLInstancedRendering.cpp',
+ 'skia/src/gpu/instanced/InstancedRendering.cpp',
+ 'skia/src/gpu/instanced/InstanceProcessor.cpp',
+ 'skia/src/gpu/SkGpuDevice.cpp',
+ 'skia/src/gpu/SkGpuDevice_drawTexture.cpp',
+ 'skia/src/gpu/SkGr.cpp',
+ 'skia/src/gpu/text/GrAtlasTextBlob.cpp',
+ 'skia/src/gpu/text/GrAtlasTextBlob_regenInBatch.cpp',
+ 'skia/src/gpu/text/GrAtlasTextContext.cpp',
+ 'skia/src/gpu/text/GrBatchFontCache.cpp',
+ 'skia/src/gpu/text/GrDistanceFieldAdjustTable.cpp',
+ 'skia/src/gpu/text/GrStencilAndCoverTextContext.cpp',
+ 'skia/src/gpu/text/GrTextBlobCache.cpp',
+ 'skia/src/gpu/text/GrTextUtils.cpp',
+ 'skia/src/image/SkSurface_Gpu.cpp',
+ ]
+ SOURCES += [
+ 'skia/src/gpu/batches/GrAAConvexPathRenderer.cpp',
+ 'skia/src/gpu/batches/GrAAConvexTessellator.cpp',
+ 'skia/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp',
+ 'skia/src/gpu/batches/GrAAFillRectBatch.cpp',
+ 'skia/src/gpu/batches/GrAAHairLinePathRenderer.cpp',
+ 'skia/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp',
+ 'skia/src/gpu/batches/GrAAStrokeRectBatch.cpp',
+ 'skia/src/gpu/batches/GrMSAAPathRenderer.cpp',
+ 'skia/src/gpu/batches/GrNonAAFillRectBatch.cpp',
+ 'skia/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp',
+ 'skia/src/gpu/gl/builders/GrGLProgramBuilder.cpp',
+ 'skia/src/gpu/gl/builders/GrGLShaderStringBuilder.cpp',
+ 'skia/src/gpu/gl/builders/GrGLSLPrettyPrint.cpp',
+ 'skia/src/gpu/gl/GrGLBuffer.cpp',
+ 'skia/src/gpu/gl/GrGLCaps.cpp',
+ 'skia/src/gpu/gl/GrGLContext.cpp',
+ 'skia/src/gpu/gl/GrGLDefaultInterface_native.cpp',
+ 'skia/src/gpu/gl/GrGLExtensions.cpp',
+ 'skia/src/gpu/gl/GrGLGLSL.cpp',
+ 'skia/src/gpu/gl/GrGLGpu.cpp',
+ 'skia/src/gpu/gl/GrGLGpuProgramCache.cpp',
+ 'skia/src/gpu/gl/GrGLInterface.cpp',
+ 'skia/src/gpu/gl/GrGLPath.cpp',
+ 'skia/src/gpu/gl/GrGLPathRange.cpp',
+ 'skia/src/gpu/gl/GrGLPathRendering.cpp',
+ 'skia/src/gpu/gl/GrGLProgram.cpp',
+ 'skia/src/gpu/gl/GrGLProgramDataManager.cpp',
+ 'skia/src/gpu/gl/GrGLRenderTarget.cpp',
+ 'skia/src/gpu/gl/GrGLStencilAttachment.cpp',
+ 'skia/src/gpu/gl/GrGLTexture.cpp',
+ 'skia/src/gpu/gl/GrGLTextureRenderTarget.cpp',
+ 'skia/src/gpu/gl/GrGLUniformHandler.cpp',
+ 'skia/src/gpu/gl/GrGLUtil.cpp',
+ 'skia/src/gpu/gl/GrGLVaryingHandler.cpp',
+ 'skia/src/gpu/gl/GrGLVertexArray.cpp',
+ 'skia/src/gpu/glsl/GrGLSL.cpp',
+ 'skia/src/gpu/glsl/GrGLSLBlend.cpp',
+ 'skia/src/gpu/glsl/GrGLSLCaps.cpp',
+ 'skia/src/gpu/glsl/GrGLSLFragmentProcessor.cpp',
+ 'skia/src/gpu/glsl/GrGLSLFragmentShaderBuilder.cpp',
+ 'skia/src/gpu/glsl/GrGLSLGeometryProcessor.cpp',
+ 'skia/src/gpu/glsl/GrGLSLGeometryShaderBuilder.cpp',
+ 'skia/src/gpu/glsl/GrGLSLPrimitiveProcessor.cpp',
+ 'skia/src/gpu/glsl/GrGLSLProgramBuilder.cpp',
+ 'skia/src/gpu/glsl/GrGLSLProgramDataManager.cpp',
+ 'skia/src/gpu/glsl/GrGLSLShaderBuilder.cpp',
+ 'skia/src/gpu/glsl/GrGLSLUtil.cpp',
+ 'skia/src/gpu/glsl/GrGLSLVarying.cpp',
+ 'skia/src/gpu/glsl/GrGLSLVertexShaderBuilder.cpp',
+ 'skia/src/gpu/glsl/GrGLSLXferProcessor.cpp',
+ 'skia/src/gpu/GrBatchAtlas.cpp',
+ 'skia/src/gpu/GrDrawContext.cpp',
+ 'skia/src/gpu/GrResourceCache.cpp',
+ 'skia/src/image/SkImage_Gpu.cpp',
+ ]
+if CONFIG['MOZ_WIDGET_TOOLKIT'] in ('android'):
+ UNIFIED_SOURCES += [
+ 'skia/src/ports/SkDebug_android.cpp',
+ 'skia/src/ports/SkOSFile_posix.cpp',
+ 'skia/src/ports/SkOSLibrary_posix.cpp',
+ 'skia/src/ports/SkTLS_pthread.cpp',
+ 'skia/src/utils/SkThreadUtils_pthread.cpp',
+ ]
+ SOURCES += [
+ 'skia/src/ports/SkFontHost_cairo.cpp',
+ 'skia/src/ports/SkFontHost_FreeType_common.cpp',
+ ]
+if CONFIG['MOZ_WIDGET_TOOLKIT'] in {'cocoa', 'uikit'}:
+ UNIFIED_SOURCES += [
+ 'skia/src/ports/SkDebug_stdio.cpp',
+ 'skia/src/ports/SkOSFile_posix.cpp',
+ 'skia/src/ports/SkOSLibrary_posix.cpp',
+ 'skia/src/ports/SkTLS_pthread.cpp',
+ 'skia/src/utils/mac/SkCreateCGImageRef.cpp',
+ 'skia/src/utils/mac/SkStream_mac.cpp',
+ 'skia/src/utils/SkThreadUtils_pthread.cpp',
+ ]
+ SOURCES += [
+ 'skia/src/ports/SkFontHost_mac.cpp',
+ ]
+if 'gtk' in CONFIG['MOZ_WIDGET_TOOLKIT']:
+ UNIFIED_SOURCES += [
+ 'skia/src/ports/SkDebug_stdio.cpp',
+ 'skia/src/ports/SkOSFile_posix.cpp',
+ 'skia/src/ports/SkOSLibrary_posix.cpp',
+ 'skia/src/ports/SkTLS_pthread.cpp',
+ 'skia/src/utils/SkThreadUtils_pthread.cpp',
+ ]
+ SOURCES += [
+ 'skia/src/ports/SkFontHost_cairo.cpp',
+ 'skia/src/ports/SkFontHost_FreeType_common.cpp',
+ ]
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':
+ SOURCES += [
+ 'skia/src/ports/SkDebug_win.cpp',
+ 'skia/src/ports/SkFontHost_win.cpp',
+ 'skia/src/ports/SkFontMgr_win_dw.cpp',
+ 'skia/src/ports/SkFontMgr_win_dw_factory.cpp',
+ 'skia/src/ports/SkOSFile_win.cpp',
+ 'skia/src/ports/SkOSLibrary_win.cpp',
+ 'skia/src/ports/SkRemotableFontMgr_win_dw.cpp',
+ 'skia/src/ports/SkScalerContext_win_dw.cpp',
+ 'skia/src/ports/SkTLS_win.cpp',
+ 'skia/src/ports/SkTypeface_win_dw.cpp',
+ 'skia/src/utils/SkThreadUtils_win.cpp',
+ 'skia/src/utils/win/SkAutoCoInitialize.cpp',
+ 'skia/src/utils/win/SkDWrite.cpp',
+ 'skia/src/utils/win/SkDWriteFontFileStream.cpp',
+ 'skia/src/utils/win/SkDWriteGeometrySink.cpp',
+ 'skia/src/utils/win/SkHRESULT.cpp',
+ 'skia/src/utils/win/SkIStream.cpp',
+ ]
+if CONFIG['INTEL_ARCHITECTURE']:
+ UNIFIED_SOURCES += [
+ 'skia/src/opts/opts_check_x86.cpp',
+ ]
+ SOURCES += [
+ 'skia/src/opts/SkBitmapFilter_opts_SSE2.cpp',
+ 'skia/src/opts/SkBitmapProcState_opts_SSE2.cpp',
+ 'skia/src/opts/SkBitmapProcState_opts_SSSE3.cpp',
+ 'skia/src/opts/SkBlitRow_opts_SSE2.cpp',
+ 'skia/src/opts/SkOpts_avx.cpp',
+ 'skia/src/opts/SkOpts_hsw.cpp',
+ 'skia/src/opts/SkOpts_sse41.cpp',
+ 'skia/src/opts/SkOpts_sse42.cpp',
+ 'skia/src/opts/SkOpts_ssse3.cpp',
+ ]
+ SOURCES['skia/src/opts/SkBitmapFilter_opts_SSE2.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkBitmapProcState_opts_SSE2.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkBitmapProcState_opts_SSSE3.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkBlitRow_opts_SSE2.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkOpts_avx.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkOpts_hsw.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkOpts_sse41.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkOpts_sse42.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkOpts_ssse3.cpp'].flags += skia_opt_flags
+elif CONFIG['CPU_ARCH'] in ('arm', 'aarch64') and CONFIG['GNU_CC']:
+ UNIFIED_SOURCES += [
+ 'skia/src/core/SkUtilsArm.cpp',
+ ]
+ SOURCES += [
+ 'skia/src/opts/SkBitmapProcState_opts_arm.cpp',
+ 'skia/src/opts/SkBlitMask_opts_arm.cpp',
+ 'skia/src/opts/SkBlitRow_opts_arm.cpp',
+ ]
+ SOURCES['skia/src/opts/SkBitmapProcState_opts_arm.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkBlitMask_opts_arm.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkBlitRow_opts_arm.cpp'].flags += skia_opt_flags
+ if CONFIG['CPU_ARCH'] == 'aarch64':
+ SOURCES += [
+ 'skia/src/opts/SkBitmapProcState_arm_neon.cpp',
+ 'skia/src/opts/SkBitmapProcState_matrixProcs_neon.cpp',
+ 'skia/src/opts/SkBlitMask_opts_arm_neon.cpp',
+ 'skia/src/opts/SkBlitRow_opts_arm_neon.cpp',
+ 'skia/src/opts/SkOpts_neon.cpp',
+ ]
+ elif CONFIG['BUILD_ARM_NEON']:
+ SOURCES += [
+ 'skia/src/opts/SkBitmapProcState_arm_neon.cpp',
+ 'skia/src/opts/SkBitmapProcState_matrixProcs_neon.cpp',
+ 'skia/src/opts/SkBlitMask_opts_arm_neon.cpp',
+ 'skia/src/opts/SkBlitRow_opts_arm_neon.cpp',
+ 'skia/src/opts/SkOpts_neon.cpp',
+ ]
+ SOURCES['skia/src/opts/SkBitmapProcState_arm_neon.cpp'].flags += CONFIG['NEON_FLAGS']
+ SOURCES['skia/src/opts/SkBitmapProcState_matrixProcs_neon.cpp'].flags += CONFIG['NEON_FLAGS']
+ SOURCES['skia/src/opts/SkBlitMask_opts_arm_neon.cpp'].flags += CONFIG['NEON_FLAGS']
+ SOURCES['skia/src/opts/SkBlitRow_opts_arm_neon.cpp'].flags += CONFIG['NEON_FLAGS']
+ SOURCES['skia/src/opts/SkOpts_neon.cpp'].flags += CONFIG['NEON_FLAGS']
+ if CONFIG['CPU_ARCH'] == 'aarch64' or CONFIG['BUILD_ARM_NEON']:
+ SOURCES['skia/src/opts/SkBitmapProcState_arm_neon.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkBitmapProcState_matrixProcs_neon.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkBlitMask_opts_arm_neon.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkBlitRow_opts_arm_neon.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkOpts_neon.cpp'].flags += skia_opt_flags
+else:
+ SOURCES += [
+ 'skia/src/opts/SkBitmapProcState_opts_none.cpp',
+ 'skia/src/opts/SkBlitMask_opts_none.cpp',
+ 'skia/src/opts/SkBlitRow_opts_none.cpp',
+ ]
+
+
+# We allow warnings for third-party code that can be updated from upstream.
+ALLOW_COMPILER_WARNINGS = True
+
+FINAL_LIBRARY = 'gkmedias'
+LOCAL_INCLUDES += [
+ 'skia/include/c',
+ 'skia/include/config',
+ 'skia/include/core',
+ 'skia/include/effects',
+ 'skia/include/gpu',
+ 'skia/include/images',
+ 'skia/include/pathops',
+ 'skia/include/ports',
+ 'skia/include/private',
+ 'skia/include/utils',
+ 'skia/include/utils/mac',
+ 'skia/include/views',
+ 'skia/src/core',
+ 'skia/src/gpu',
+ 'skia/src/gpu/effects',
+ 'skia/src/gpu/gl',
+ 'skia/src/image',
+ 'skia/src/lazy',
+ 'skia/src/opts',
+ 'skia/src/sfnt',
+ 'skia/src/utils',
+ 'skia/src/utils/mac',
+ 'skia/src/utils/win',
+]
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] in ('android'):
+ DEFINES['SK_FONTHOST_CAIRO_STANDALONE'] = 0
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] in {
+ 'android',
+ 'cocoa',
+ 'gtk2',
+ 'gtk3',
+ 'uikit',
+ }:
+ DEFINES['SK_FONTHOST_DOES_NOT_USE_FONTMGR'] = 1
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':
+ DEFINES['UNICODE'] = True
+ DEFINES['_UNICODE'] = True
+ UNIFIED_SOURCES += [
+ 'skia/src/fonts/SkFontMgr_indirect.cpp',
+ 'skia/src/fonts/SkRemotableFontMgr.cpp',
+ ]
+
+# We should autogenerate these SSE related flags.
+
+if CONFIG['_MSC_VER']:
+ # MSVC doesn't need special compiler flags, but Skia needs to be told that these files should
+ # be built with the required SSE level or it will simply compile in stubs and cause runtime crashes
+ SOURCES['skia/src/opts/SkBitmapFilter_opts_SSE2.cpp'].flags += ['-DSK_CPU_SSE_LEVEL=20']
+ SOURCES['skia/src/opts/SkBitmapProcState_opts_SSE2.cpp'].flags += ['-DSK_CPU_SSE_LEVEL=20']
+ SOURCES['skia/src/opts/SkBitmapProcState_opts_SSSE3.cpp'].flags += ['-DSK_CPU_SSE_LEVEL=31']
+ SOURCES['skia/src/opts/SkBlitRow_opts_SSE2.cpp'].flags += ['-DSK_CPU_SSE_LEVEL=20']
+ SOURCES['skia/src/opts/SkOpts_ssse3.cpp'].flags += ['-DSK_CPU_SSE_LEVEL=31']
+ SOURCES['skia/src/opts/SkOpts_sse41.cpp'].flags += ['-DSK_CPU_SSE_LEVEL=41']
+ SOURCES['skia/src/opts/SkOpts_sse42.cpp'].flags += ['-DSK_CPU_SSE_LEVEL=42']
+ SOURCES['skia/src/opts/SkOpts_avx.cpp'].flags += ['-DSK_CPU_SSE_LEVEL=51']
+if CONFIG['INTEL_ARCHITECTURE'] and (CONFIG['GNU_CC'] or CONFIG['CLANG_CL']):
+ SOURCES['skia/src/opts/SkBitmapFilter_opts_SSE2.cpp'].flags += CONFIG['SSE2_FLAGS']
+ SOURCES['skia/src/opts/SkBitmapProcState_opts_SSE2.cpp'].flags += CONFIG['SSE2_FLAGS']
+ SOURCES['skia/src/opts/SkBitmapProcState_opts_SSSE3.cpp'].flags += ['-mssse3']
+ SOURCES['skia/src/opts/SkBlitRow_opts_SSE2.cpp'].flags += CONFIG['SSE2_FLAGS']
+ SOURCES['skia/src/opts/SkOpts_ssse3.cpp'].flags += ['-mssse3']
+ SOURCES['skia/src/opts/SkOpts_sse41.cpp'].flags += ['-msse4.1']
+ SOURCES['skia/src/opts/SkOpts_sse42.cpp'].flags += ['-msse4.2']
+ SOURCES['skia/src/opts/SkOpts_avx.cpp'].flags += ['-mavx']
+elif CONFIG['CPU_ARCH'] == 'arm' and CONFIG['GNU_CC'] and CONFIG['BUILD_ARM_NEON']:
+ DEFINES['SK_ARM_HAS_OPTIONAL_NEON'] = 1
+
+DEFINES['SKIA_IMPLEMENTATION'] = 1
+
+if not CONFIG['MOZ_ENABLE_SKIA_GPU']:
+ DEFINES['SK_SUPPORT_GPU'] = 0
+
+if CONFIG['MOZ_TREE_FREETYPE']:
+ DEFINES['SK_CAN_USE_DLOPEN'] = 0
+
+# Suppress warnings in third-party code.
+if CONFIG['GNU_CXX'] or CONFIG['CLANG_CL']:
+ CXXFLAGS += [
+ '-Wno-deprecated-declarations',
+ '-Wno-overloaded-virtual',
+ '-Wno-shadow',
+ '-Wno-sign-compare',
+ '-Wno-unreachable-code',
+ '-Wno-unused-function',
+ ]
+if CONFIG['GNU_CXX'] and not CONFIG['CLANG_CXX'] and not CONFIG['CLANG_CL']:
+ CXXFLAGS += [
+ '-Wno-logical-op',
+ '-Wno-maybe-uninitialized',
+ ]
+if CONFIG['CLANG_CXX'] or CONFIG['CLANG_CL']:
+ CXXFLAGS += [
+ '-Wno-implicit-fallthrough',
+ '-Wno-inconsistent-missing-override',
+ '-Wno-macro-redefined',
+ '-Wno-unused-private-field',
+ ]
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] in ('gtk2', 'gtk3', 'android'):
+ CXXFLAGS += CONFIG['MOZ_CAIRO_CFLAGS']
+ CXXFLAGS += CONFIG['CAIRO_FT_CFLAGS']
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] in ('gtk2', 'gtk3'):
+ CXXFLAGS += CONFIG['MOZ_PANGO_CFLAGS']
diff --git a/gfx/skia/patches/README b/gfx/skia/patches/README
new file mode 100644
index 000000000..8fd2c5396
--- /dev/null
+++ b/gfx/skia/patches/README
@@ -0,0 +1,2 @@
+We no longer keep a local patch queue of patches against upstream. The protocol now
+is to upstream all patches before they are landed in mozilla-central.
diff --git a/gfx/skia/patches/archive/0001-Bug-687189-Implement-SkPaint-getPosTextPath.patch b/gfx/skia/patches/archive/0001-Bug-687189-Implement-SkPaint-getPosTextPath.patch
new file mode 100644
index 000000000..f8e76dbb9
--- /dev/null
+++ b/gfx/skia/patches/archive/0001-Bug-687189-Implement-SkPaint-getPosTextPath.patch
@@ -0,0 +1,66 @@
+From 27a914815e757ed12523edf968c9da134dabeaf8 Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:10:44 -0400
+Subject: [PATCH 01/10] Bug 755869 - [4] Re-apply bug 687189 - Implement
+ SkPaint::getPosTextPath r=mattwoodrow
+
+---
+ gfx/skia/include/core/SkPaint.h | 3 +++
+ gfx/skia/src/core/SkPaint.cpp | 27 +++++++++++++++++++++++++++
+ 2 files changed, 30 insertions(+), 0 deletions(-)
+
+diff --git a/gfx/skia/include/core/SkPaint.h b/gfx/skia/include/core/SkPaint.h
+index 1930db1..ff37d77 100644
+--- a/gfx/skia/include/core/SkPaint.h
++++ b/gfx/skia/include/core/SkPaint.h
+@@ -813,6 +813,9 @@ public:
+ void getTextPath(const void* text, size_t length, SkScalar x, SkScalar y,
+ SkPath* path) const;
+
++ void getPosTextPath(const void* text, size_t length,
++ const SkPoint pos[], SkPath* path) const;
++
+ #ifdef SK_BUILD_FOR_ANDROID
+ const SkGlyph& getUnicharMetrics(SkUnichar);
+ const SkGlyph& getGlyphMetrics(uint16_t);
+diff --git a/gfx/skia/src/core/SkPaint.cpp b/gfx/skia/src/core/SkPaint.cpp
+index 1b74fa1..4c119aa 100644
+--- a/gfx/skia/src/core/SkPaint.cpp
++++ b/gfx/skia/src/core/SkPaint.cpp
+@@ -1355,6 +1355,33 @@ void SkPaint::getTextPath(const void* textData, size_t length,
+ }
+ }
+
++void SkPaint::getPosTextPath(const void* textData, size_t length,
++ const SkPoint pos[], SkPath* path) const {
++ SkASSERT(length == 0 || textData != NULL);
++
++ const char* text = (const char*)textData;
++ if (text == NULL || length == 0 || path == NULL) {
++ return;
++ }
++
++ SkTextToPathIter iter(text, length, *this, false);
++ SkMatrix matrix;
++ SkPoint prevPos;
++ prevPos.set(0, 0);
++
++ matrix.setScale(iter.getPathScale(), iter.getPathScale());
++ path->reset();
++
++ unsigned int i = 0;
++ const SkPath* iterPath;
++ while ((iterPath = iter.next(NULL)) != NULL) {
++ matrix.postTranslate(pos[i].fX - prevPos.fX, pos[i].fY - prevPos.fY);
++ path->addPath(*iterPath, matrix);
++ prevPos = pos[i];
++ i++;
++ }
++}
++
+ static void add_flattenable(SkDescriptor* desc, uint32_t tag,
+ SkFlattenableWriteBuffer* buffer) {
+ buffer->flatten(desc->addEntry(tag, buffer->size(), NULL));
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0001-Bug-777614-Re-add-our-SkUserConfig.h-r-nrc.patch b/gfx/skia/patches/archive/0001-Bug-777614-Re-add-our-SkUserConfig.h-r-nrc.patch
new file mode 100644
index 000000000..8fe0135fb
--- /dev/null
+++ b/gfx/skia/patches/archive/0001-Bug-777614-Re-add-our-SkUserConfig.h-r-nrc.patch
@@ -0,0 +1,34 @@
+From 2dd8c789fc4ad3b5323c2c29f3e982d185f5b5d9 Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 13 Sep 2012 22:33:38 -0400
+Subject: [PATCH 1/9] Bug 777614 - Re-add our SkUserConfig.h r=nrc
+
+---
+ gfx/skia/include/config/SkUserConfig.h | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/gfx/skia/include/config/SkUserConfig.h b/gfx/skia/include/config/SkUserConfig.h
+index 353272c..fbfbfe0 100644
+--- a/gfx/skia/include/config/SkUserConfig.h
++++ b/gfx/skia/include/config/SkUserConfig.h
+@@ -184,5 +184,16 @@
+ directories from your include search path when you're not building the GPU
+ backend. Defaults to 1 (build the GPU code).
+ */
+-//#define SK_SUPPORT_GPU 1
++#define SK_SUPPORT_GPU 0
++
++/* Don't dither 32bit gradients, to match what the canvas test suite expects.
++ */
++#define SK_DISABLE_DITHER_32BIT_GRADIENT
++
++/* Don't include stdint.h on windows as it conflicts with our build system.
++ */
++#ifdef SK_BUILD_FOR_WIN32
++ #define SK_IGNORE_STDINT_DOT_H
++#endif
++
+ #endif
+--
+1.7.11.4
+
diff --git a/gfx/skia/patches/archive/0001-Bug-803063-Skia-cross-compilation-for-Windows-fails-.patch b/gfx/skia/patches/archive/0001-Bug-803063-Skia-cross-compilation-for-Windows-fails-.patch
new file mode 100644
index 000000000..20155977e
--- /dev/null
+++ b/gfx/skia/patches/archive/0001-Bug-803063-Skia-cross-compilation-for-Windows-fails-.patch
@@ -0,0 +1,26 @@
+From 81ff1a8f5c2a7cc9e8b853101b995433a0c0fa37 Mon Sep 17 00:00:00 2001
+From: Jacek Caban <jacek@codeweavers.com>
+Date: Thu, 18 Oct 2012 15:25:08 +0200
+Subject: [PATCH] Bug 803063 - Skia cross compilation for Windows fails on
+ case sensitive OS
+
+---
+ gfx/skia/src/core/SkAdvancedTypefaceMetrics.cpp | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/gfx/skia/src/core/SkAdvancedTypefaceMetrics.cpp b/gfx/skia/src/core/SkAdvancedTypefaceMetrics.cpp
+index 370616e..b647ada 100644
+--- a/gfx/skia/src/core/SkAdvancedTypefaceMetrics.cpp
++++ b/gfx/skia/src/core/SkAdvancedTypefaceMetrics.cpp
+@@ -13,7 +13,7 @@
+ SK_DEFINE_INST_COUNT(SkAdvancedTypefaceMetrics)
+
+ #if defined(SK_BUILD_FOR_WIN)
+-#include <DWrite.h>
++#include <dwrite.h>
+ #endif
+
+ #if defined(SK_BUILD_FOR_UNIX) || defined(SK_BUILD_FOR_ANDROID)
+--
+1.7.8.6
+
diff --git a/gfx/skia/patches/archive/0001-Bug-895086-Remove-unused-find_from_uniqueID-function.patch b/gfx/skia/patches/archive/0001-Bug-895086-Remove-unused-find_from_uniqueID-function.patch
new file mode 100644
index 000000000..aa1fadb43
--- /dev/null
+++ b/gfx/skia/patches/archive/0001-Bug-895086-Remove-unused-find_from_uniqueID-function.patch
@@ -0,0 +1,38 @@
+From 58861c38751adf1f4ef3f67f8e85f5c36f1c43a5 Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Wed, 17 Jul 2013 16:28:07 -0400
+Subject: [PATCH] Bug 895086 - Remove unused find_from_uniqueID() function from
+ SkFontHost_linux
+
+---
+ gfx/skia/src/ports/SkFontHost_linux.cpp | 14 --------------
+ 1 file changed, 14 deletions(-)
+
+diff --git a/gfx/skia/src/ports/SkFontHost_linux.cpp b/gfx/skia/src/ports/SkFontHost_linux.cpp
+index df21014..05b73dc 100644
+--- a/gfx/skia/src/ports/SkFontHost_linux.cpp
++++ b/gfx/skia/src/ports/SkFontHost_linux.cpp
+@@ -117,20 +117,6 @@ static FamilyRec* find_family(const SkTypeface* member) {
+ return NULL;
+ }
+
+-static SkTypeface* find_from_uniqueID(uint32_t uniqueID) {
+- FamilyRec* curr = gFamilyHead;
+- while (curr != NULL) {
+- for (int i = 0; i < 4; i++) {
+- SkTypeface* face = curr->fFaces[i];
+- if (face != NULL && face->uniqueID() == uniqueID) {
+- return face;
+- }
+- }
+- curr = curr->fNext;
+- }
+- return NULL;
+-}
+-
+ /* Remove reference to this face from its family. If the resulting family
+ is empty (has no faces), return that family, otherwise return NULL
+ */
+--
+1.8.3.1
+
diff --git a/gfx/skia/patches/archive/0002-Bug-688366-Dont-invalidate-all-radial-gradients.patch b/gfx/skia/patches/archive/0002-Bug-688366-Dont-invalidate-all-radial-gradients.patch
new file mode 100644
index 000000000..d396b4ed1
--- /dev/null
+++ b/gfx/skia/patches/archive/0002-Bug-688366-Dont-invalidate-all-radial-gradients.patch
@@ -0,0 +1,30 @@
+From f310d7e8b8d9cf6870c739650324bb585b591c0c Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:11:32 -0400
+Subject: [PATCH 02/10] Bug 755869 - [5] Re-apply bug 688366 - Fix Skia
+ marking radial gradients with the same radius as
+ invalid. r=mattwoodrow
+
+---
+ gfx/skia/src/effects/SkGradientShader.cpp | 5 ++++-
+ 1 files changed, 4 insertions(+), 1 deletions(-)
+
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+index 6de820b..59ba48c 100644
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -1911,7 +1911,10 @@ public:
+ SkPMColor* SK_RESTRICT dstC = dstCParam;
+
+ // Zero difference between radii: fill with transparent black.
+- if (fDiffRadius == 0) {
++ // TODO: Is removing this actually correct? Two circles with the
++ // same radius, but different centers doesn't sound like it
++ // should be cleared
++ if (fDiffRadius == 0 && fCenter1 == fCenter2) {
+ sk_bzero(dstC, count * sizeof(*dstC));
+ return;
+ }
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0002-Bug-848491-Re-apply-Bug-795549-Move-TileProc-functio.patch b/gfx/skia/patches/archive/0002-Bug-848491-Re-apply-Bug-795549-Move-TileProc-functio.patch
new file mode 100644
index 000000000..6ac2c9179
--- /dev/null
+++ b/gfx/skia/patches/archive/0002-Bug-848491-Re-apply-Bug-795549-Move-TileProc-functio.patch
@@ -0,0 +1,50 @@
+From: George Wright <george@mozilla.com>
+Date: Mon, 14 Jan 2013 17:59:09 -0500
+Subject: Bug 848491 - Re-apply Bug 795549 - Move TileProc functions into their own file to ensure they only exist once in a library
+
+
+diff --git a/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h b/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
+index b9dbf1b..729ce4e 100644
+--- a/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
++++ b/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
+@@ -37,34 +37,9 @@ static inline void sk_memset32_dither(uint32_t dst[], uint32_t v0, uint32_t v1,
+ }
+ }
+
+-// Clamp
+-
+-static inline SkFixed clamp_tileproc(SkFixed x) {
+- return SkClampMax(x, 0xFFFF);
+-}
+-
+-// Repeat
+-
+-static inline SkFixed repeat_tileproc(SkFixed x) {
+- return x & 0xFFFF;
+-}
+-
+-// Mirror
+-
+-// Visual Studio 2010 (MSC_VER=1600) optimizes bit-shift code incorrectly.
+-// See http://code.google.com/p/skia/issues/detail?id=472
+-#if defined(_MSC_VER) && (_MSC_VER >= 1600)
+-#pragma optimize("", off)
+-#endif
+-
+-static inline SkFixed mirror_tileproc(SkFixed x) {
+- int s = x << 15 >> 31;
+- return (x ^ s) & 0xFFFF;
+-}
+-
+-#if defined(_MSC_VER) && (_MSC_VER >= 1600)
+-#pragma optimize("", on)
+-#endif
++SkFixed clamp_tileproc(SkFixed x);
++SkFixed repeat_tileproc(SkFixed x);
++SkFixed mirror_tileproc(SkFixed x);
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0003-SkUserConfig-for-Mozilla.patch b/gfx/skia/patches/archive/0003-SkUserConfig-for-Mozilla.patch
new file mode 100644
index 000000000..dc52a8d3d
--- /dev/null
+++ b/gfx/skia/patches/archive/0003-SkUserConfig-for-Mozilla.patch
@@ -0,0 +1,39 @@
+From ef53776c06cffc7607c3777702f93e04c0852981 Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:13:49 -0400
+Subject: [PATCH 03/10] Bug 755869 - [6] Re-apply SkUserConfig (no
+ original bug) r=mattwoodrow
+
+---
+ gfx/skia/include/config/SkUserConfig.h | 10 ++++++++++
+ 1 files changed, 10 insertions(+), 0 deletions(-)
+
+diff --git a/gfx/skia/include/config/SkUserConfig.h b/gfx/skia/include/config/SkUserConfig.h
+index 9fdbd0a..f98ba85 100644
+--- a/gfx/skia/include/config/SkUserConfig.h
++++ b/gfx/skia/include/config/SkUserConfig.h
+@@ -156,6 +156,10 @@
+ //#define SK_SUPPORT_UNITTEST
+ #endif
+
++/* Don't dither 32bit gradients, to match what the canvas test suite expects.
++ */
++#define SK_DISABLE_DITHER_32BIT_GRADIENT
++
+ /* If your system embeds skia and has complex event logging, define this
+ symbol to name a file that maps the following macros to your system's
+ equivalents:
+@@ -177,4 +181,10 @@
+ #define SK_A32_SHIFT 24
+ #endif
+
++/* Don't include stdint.h on windows as it conflicts with our build system.
++ */
++#ifdef SK_BUILD_FOR_WIN32
++ #define SK_IGNORE_STDINT_DOT_H
++#endif
++
+ #endif
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0004-Bug-722011-Fix-trailing-commas-in-enums.patch b/gfx/skia/patches/archive/0004-Bug-722011-Fix-trailing-commas-in-enums.patch
new file mode 100644
index 000000000..179aeded5
--- /dev/null
+++ b/gfx/skia/patches/archive/0004-Bug-722011-Fix-trailing-commas-in-enums.patch
@@ -0,0 +1,280 @@
+From 81d61682a94d47be5b47fb7882ea7e7c7e6c3351 Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:15:28 -0400
+Subject: [PATCH 04/10] Bug 755869 - [7] Re-apply bug 722011 - Fix
+ trailing commas at end of enum lists r=mattwoodrow
+
+---
+ gfx/skia/include/core/SkAdvancedTypefaceMetrics.h | 8 ++++----
+ gfx/skia/include/core/SkBlitRow.h | 2 +-
+ gfx/skia/include/core/SkCanvas.h | 2 +-
+ gfx/skia/include/core/SkDevice.h | 2 +-
+ gfx/skia/include/core/SkDeviceProfile.h | 4 ++--
+ gfx/skia/include/core/SkFlattenable.h | 2 +-
+ gfx/skia/include/core/SkFontHost.h | 4 ++--
+ gfx/skia/include/core/SkMaskFilter.h | 2 +-
+ gfx/skia/include/core/SkPaint.h | 4 ++--
+ gfx/skia/include/core/SkScalerContext.h | 9 +++++----
+ gfx/skia/include/core/SkTypes.h | 2 +-
+ gfx/skia/include/effects/SkLayerDrawLooper.h | 2 +-
+ gfx/skia/src/core/SkBitmap.cpp | 2 +-
+ gfx/skia/src/core/SkGlyphCache.cpp | 2 +-
+ 14 files changed, 24 insertions(+), 23 deletions(-)
+
+diff --git a/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h b/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h
+index 09fc9a9..5ffdb45 100644
+--- a/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h
++++ b/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h
+@@ -34,7 +34,7 @@ public:
+ kCFF_Font,
+ kTrueType_Font,
+ kOther_Font,
+- kNotEmbeddable_Font,
++ kNotEmbeddable_Font
+ };
+ // The type of the underlying font program. This field determines which
+ // of the following fields are valid. If it is kOther_Font or
+@@ -56,7 +56,7 @@ public:
+ kItalic_Style = 0x00040,
+ kAllCaps_Style = 0x10000,
+ kSmallCaps_Style = 0x20000,
+- kForceBold_Style = 0x40000,
++ kForceBold_Style = 0x40000
+ };
+ uint16_t fStyle; // Font style characteristics.
+ int16_t fItalicAngle; // Counterclockwise degrees from vertical of the
+@@ -75,7 +75,7 @@ public:
+ kHAdvance_PerGlyphInfo = 0x1, // Populate horizontal advance data.
+ kVAdvance_PerGlyphInfo = 0x2, // Populate vertical advance data.
+ kGlyphNames_PerGlyphInfo = 0x4, // Populate glyph names (Type 1 only).
+- kToUnicode_PerGlyphInfo = 0x8, // Populate ToUnicode table, ignored
++ kToUnicode_PerGlyphInfo = 0x8 // Populate ToUnicode table, ignored
+ // for Type 1 fonts
+ };
+
+@@ -84,7 +84,7 @@ public:
+ enum MetricType {
+ kDefault, // Default advance: fAdvance.count = 1
+ kRange, // Advances for a range: fAdvance.count = fEndID-fStartID
+- kRun, // fStartID-fEndID have same advance: fAdvance.count = 1
++ kRun // fStartID-fEndID have same advance: fAdvance.count = 1
+ };
+ MetricType fType;
+ uint16_t fStartId;
+diff --git a/gfx/skia/include/core/SkBlitRow.h b/gfx/skia/include/core/SkBlitRow.h
+index 973ab4c..febc405 100644
+--- a/gfx/skia/include/core/SkBlitRow.h
++++ b/gfx/skia/include/core/SkBlitRow.h
+@@ -42,7 +42,7 @@ public:
+
+ enum Flags32 {
+ kGlobalAlpha_Flag32 = 1 << 0,
+- kSrcPixelAlpha_Flag32 = 1 << 1,
++ kSrcPixelAlpha_Flag32 = 1 << 1
+ };
+
+ /** Function pointer that blends 32bit colors onto a 32bit destination.
+diff --git a/gfx/skia/include/core/SkCanvas.h b/gfx/skia/include/core/SkCanvas.h
+index 25cc94a..d942783 100644
+--- a/gfx/skia/include/core/SkCanvas.h
++++ b/gfx/skia/include/core/SkCanvas.h
+@@ -148,7 +148,7 @@ public:
+ * low byte to high byte: R, G, B, A.
+ */
+ kRGBA_Premul_Config8888,
+- kRGBA_Unpremul_Config8888,
++ kRGBA_Unpremul_Config8888
+ };
+
+ /**
+diff --git a/gfx/skia/include/core/SkDevice.h b/gfx/skia/include/core/SkDevice.h
+index 1e4e0a3..b4d44bf 100644
+--- a/gfx/skia/include/core/SkDevice.h
++++ b/gfx/skia/include/core/SkDevice.h
+@@ -139,7 +139,7 @@ public:
+ protected:
+ enum Usage {
+ kGeneral_Usage,
+- kSaveLayer_Usage, // <! internal use only
++ kSaveLayer_Usage // <! internal use only
+ };
+
+ struct TextFlags {
+diff --git a/gfx/skia/include/core/SkDeviceProfile.h b/gfx/skia/include/core/SkDeviceProfile.h
+index 46b9781..f6a0bca 100644
+--- a/gfx/skia/include/core/SkDeviceProfile.h
++++ b/gfx/skia/include/core/SkDeviceProfile.h
+@@ -17,7 +17,7 @@ public:
+ kRGB_Horizontal_LCDConfig,
+ kBGR_Horizontal_LCDConfig,
+ kRGB_Vertical_LCDConfig,
+- kBGR_Vertical_LCDConfig,
++ kBGR_Vertical_LCDConfig
+ };
+
+ enum FontHintLevel {
+@@ -25,7 +25,7 @@ public:
+ kSlight_FontHintLevel,
+ kNormal_FontHintLevel,
+ kFull_FontHintLevel,
+- kAuto_FontHintLevel,
++ kAuto_FontHintLevel
+ };
+
+ /**
+diff --git a/gfx/skia/include/core/SkFlattenable.h b/gfx/skia/include/core/SkFlattenable.h
+index 5714f9d..dc115fc 100644
+--- a/gfx/skia/include/core/SkFlattenable.h
++++ b/gfx/skia/include/core/SkFlattenable.h
+@@ -272,7 +272,7 @@ public:
+ * Instructs the writer to inline Factory names as there are seen the
+ * first time (after that we store an index). The pipe code uses this.
+ */
+- kInlineFactoryNames_Flag = 0x02,
++ kInlineFactoryNames_Flag = 0x02
+ };
+ Flags getFlags() const { return (Flags)fFlags; }
+ void setFlags(Flags flags) { fFlags = flags; }
+diff --git a/gfx/skia/include/core/SkFontHost.h b/gfx/skia/include/core/SkFontHost.h
+index 732de5c..10f9bdf 100644
+--- a/gfx/skia/include/core/SkFontHost.h
++++ b/gfx/skia/include/core/SkFontHost.h
+@@ -240,7 +240,7 @@ public:
+ */
+ enum LCDOrientation {
+ kHorizontal_LCDOrientation = 0, //!< this is the default
+- kVertical_LCDOrientation = 1,
++ kVertical_LCDOrientation = 1
+ };
+
+ static void SetSubpixelOrientation(LCDOrientation orientation);
+@@ -259,7 +259,7 @@ public:
+ enum LCDOrder {
+ kRGB_LCDOrder = 0, //!< this is the default
+ kBGR_LCDOrder = 1,
+- kNONE_LCDOrder = 2,
++ kNONE_LCDOrder = 2
+ };
+
+ static void SetSubpixelOrder(LCDOrder order);
+diff --git a/gfx/skia/include/core/SkMaskFilter.h b/gfx/skia/include/core/SkMaskFilter.h
+index 9a470a4..3422e27 100644
+--- a/gfx/skia/include/core/SkMaskFilter.h
++++ b/gfx/skia/include/core/SkMaskFilter.h
+@@ -61,7 +61,7 @@ public:
+ kNormal_BlurType, //!< fuzzy inside and outside
+ kSolid_BlurType, //!< solid inside, fuzzy outside
+ kOuter_BlurType, //!< nothing inside, fuzzy outside
+- kInner_BlurType, //!< fuzzy inside, nothing outside
++ kInner_BlurType //!< fuzzy inside, nothing outside
+ };
+
+ struct BlurInfo {
+diff --git a/gfx/skia/include/core/SkPaint.h b/gfx/skia/include/core/SkPaint.h
+index ff37d77..7c96e193 100644
+--- a/gfx/skia/include/core/SkPaint.h
++++ b/gfx/skia/include/core/SkPaint.h
+@@ -76,7 +76,7 @@ public:
+ kNo_Hinting = 0,
+ kSlight_Hinting = 1,
+ kNormal_Hinting = 2, //!< this is the default
+- kFull_Hinting = 3,
++ kFull_Hinting = 3
+ };
+
+ Hinting getHinting() const {
+@@ -289,7 +289,7 @@ public:
+ kStroke_Style, //!< stroke the geometry
+ kStrokeAndFill_Style, //!< fill and stroke the geometry
+
+- kStyleCount,
++ kStyleCount
+ };
+
+ /** Return the paint's style, used for controlling how primitives'
+diff --git a/gfx/skia/include/core/SkScalerContext.h b/gfx/skia/include/core/SkScalerContext.h
+index 2cb171b..3dbce27 100644
+--- a/gfx/skia/include/core/SkScalerContext.h
++++ b/gfx/skia/include/core/SkScalerContext.h
+@@ -182,21 +182,22 @@ public:
+ kGenA8FromLCD_Flag = 0x0800,
+
+ #ifdef SK_USE_COLOR_LUMINANCE
+- kLuminance_Bits = 3,
++ kLuminance_Bits = 3
+ #else
+ // luminance : 0 for black text, kLuminance_Max for white text
+ kLuminance_Shift = 13, // shift to land in the high 3-bits of Flags
+- kLuminance_Bits = 3, // ensure Flags doesn't exceed 16bits
++ kLuminance_Bits = 3 // ensure Flags doesn't exceed 16bits
+ #endif
+ };
+
+ // computed values
+ enum {
+- kHinting_Mask = kHintingBit1_Flag | kHintingBit2_Flag,
+ #ifdef SK_USE_COLOR_LUMINANCE
++ kHinting_Mask = kHintingBit1_Flag | kHintingBit2_Flag
+ #else
++ kHinting_Mask = kHintingBit1_Flag | kHintingBit2_Flag,
+ kLuminance_Max = (1 << kLuminance_Bits) - 1,
+- kLuminance_Mask = kLuminance_Max << kLuminance_Shift,
++ kLuminance_Mask = kLuminance_Max << kLuminance_Shift
+ #endif
+ };
+
+diff --git a/gfx/skia/include/core/SkTypes.h b/gfx/skia/include/core/SkTypes.h
+index 7963a7d..0c5c2d7 100644
+--- a/gfx/skia/include/core/SkTypes.h
++++ b/gfx/skia/include/core/SkTypes.h
+@@ -438,7 +438,7 @@ public:
+ * current block is dynamically allocated, just return the old
+ * block.
+ */
+- kReuse_OnShrink,
++ kReuse_OnShrink
+ };
+
+ /**
+diff --git a/gfx/skia/include/effects/SkLayerDrawLooper.h b/gfx/skia/include/effects/SkLayerDrawLooper.h
+index 0bc4af2..6cb8ef6 100644
+--- a/gfx/skia/include/effects/SkLayerDrawLooper.h
++++ b/gfx/skia/include/effects/SkLayerDrawLooper.h
+@@ -41,7 +41,7 @@ public:
+ * - Flags and Color are always computed using the LayerInfo's
+ * fFlagsMask and fColorMode.
+ */
+- kEntirePaint_Bits = -1,
++ kEntirePaint_Bits = -1
+
+ };
+ typedef int32_t BitFlags;
+diff --git a/gfx/skia/src/core/SkBitmap.cpp b/gfx/skia/src/core/SkBitmap.cpp
+index 6b99145..aff52fd 100644
+--- a/gfx/skia/src/core/SkBitmap.cpp
++++ b/gfx/skia/src/core/SkBitmap.cpp
+@@ -1376,7 +1376,7 @@ enum {
+ SERIALIZE_PIXELTYPE_RAW_WITH_CTABLE,
+ SERIALIZE_PIXELTYPE_RAW_NO_CTABLE,
+ SERIALIZE_PIXELTYPE_REF_DATA,
+- SERIALIZE_PIXELTYPE_REF_PTR,
++ SERIALIZE_PIXELTYPE_REF_PTR
+ };
+
+ /*
+diff --git a/gfx/skia/src/core/SkGlyphCache.cpp b/gfx/skia/src/core/SkGlyphCache.cpp
+index f3363cd..1fddc9d 100644
+--- a/gfx/skia/src/core/SkGlyphCache.cpp
++++ b/gfx/skia/src/core/SkGlyphCache.cpp
+@@ -417,7 +417,7 @@ class SkGlyphCache_Globals {
+ public:
+ enum UseMutex {
+ kNo_UseMutex, // thread-local cache
+- kYes_UseMutex, // shared cache
++ kYes_UseMutex // shared cache
+ };
+
+ SkGlyphCache_Globals(UseMutex um) {
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0004-Bug-777614-Re-apply-bug-719872-Fix-crash-on-Android-.patch b/gfx/skia/patches/archive/0004-Bug-777614-Re-apply-bug-719872-Fix-crash-on-Android-.patch
new file mode 100644
index 000000000..ad6e18127
--- /dev/null
+++ b/gfx/skia/patches/archive/0004-Bug-777614-Re-apply-bug-719872-Fix-crash-on-Android-.patch
@@ -0,0 +1,684 @@
+From 0d730a94e9f6676d5cde45f955fe025a4549817e Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 23 Aug 2012 16:45:38 -0400
+Subject: [PATCH 4/9] Bug 777614 - Re-apply bug 719872 - Fix crash on Android
+ by reverting to older FontHost r=nrc
+
+---
+ gfx/skia/src/ports/SkFontHost_android_old.cpp | 664 ++++++++++++++++++++++++++
+ 1 file changed, 664 insertions(+)
+ create mode 100644 gfx/skia/src/ports/SkFontHost_android_old.cpp
+
+diff --git a/gfx/skia/src/ports/SkFontHost_android_old.cpp b/gfx/skia/src/ports/SkFontHost_android_old.cpp
+new file mode 100644
+index 0000000..b5c4f3c
+--- /dev/null
++++ b/gfx/skia/src/ports/SkFontHost_android_old.cpp
+@@ -0,0 +1,664 @@
++
++/*
++ * Copyright 2006 The Android Open Source Project
++ *
++ * Use of this source code is governed by a BSD-style license that can be
++ * found in the LICENSE file.
++ */
++
++
++#include "SkFontHost.h"
++#include "SkDescriptor.h"
++#include "SkMMapStream.h"
++#include "SkPaint.h"
++#include "SkString.h"
++#include "SkStream.h"
++#include "SkThread.h"
++#include "SkTSearch.h"
++#include <stdio.h>
++
++#define FONT_CACHE_MEMORY_BUDGET (768 * 1024)
++
++#ifndef SK_FONT_FILE_PREFIX
++ #define SK_FONT_FILE_PREFIX "/fonts/"
++#endif
++
++bool find_name_and_attributes(SkStream* stream, SkString* name, SkTypeface::Style* style,
++ bool* isFixedWidth);
++
++static void GetFullPathForSysFonts(SkString* full, const char name[]) {
++ full->set(getenv("ANDROID_ROOT"));
++ full->append(SK_FONT_FILE_PREFIX);
++ full->append(name);
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++struct FamilyRec;
++
++/* This guy holds a mapping of a name -> family, used for looking up fonts.
++ Since it is stored in a stretchy array that doesn't preserve object
++ semantics, we don't use constructor/destructors, but just have explicit
++ helpers to manage our internal bookkeeping.
++*/
++struct NameFamilyPair {
++ const char* fName; // we own this
++ FamilyRec* fFamily; // we don't own this, we just reference it
++
++ void construct(const char name[], FamilyRec* family) {
++ fName = strdup(name);
++ fFamily = family; // we don't own this, so just record the referene
++ }
++
++ void destruct() {
++ free((char*)fName);
++ // we don't own family, so just ignore our reference
++ }
++};
++
++// we use atomic_inc to grow this for each typeface we create
++static int32_t gUniqueFontID;
++
++// this is the mutex that protects these globals
++static SkMutex gFamilyMutex;
++static FamilyRec* gFamilyHead;
++static SkTDArray<NameFamilyPair> gNameList;
++
++struct FamilyRec {
++ FamilyRec* fNext;
++ SkTypeface* fFaces[4];
++
++ FamilyRec()
++ {
++ fNext = gFamilyHead;
++ memset(fFaces, 0, sizeof(fFaces));
++ gFamilyHead = this;
++ }
++};
++
++static SkTypeface* find_best_face(const FamilyRec* family,
++ SkTypeface::Style style) {
++ SkTypeface* const* faces = family->fFaces;
++
++ if (faces[style] != NULL) { // exact match
++ return faces[style];
++ }
++ // look for a matching bold
++ style = (SkTypeface::Style)(style ^ SkTypeface::kItalic);
++ if (faces[style] != NULL) {
++ return faces[style];
++ }
++ // look for the plain
++ if (faces[SkTypeface::kNormal] != NULL) {
++ return faces[SkTypeface::kNormal];
++ }
++ // look for anything
++ for (int i = 0; i < 4; i++) {
++ if (faces[i] != NULL) {
++ return faces[i];
++ }
++ }
++ // should never get here, since the faces list should not be empty
++ SkASSERT(!"faces list is empty");
++ return NULL;
++}
++
++static FamilyRec* find_family(const SkTypeface* member) {
++ FamilyRec* curr = gFamilyHead;
++ while (curr != NULL) {
++ for (int i = 0; i < 4; i++) {
++ if (curr->fFaces[i] == member) {
++ return curr;
++ }
++ }
++ curr = curr->fNext;
++ }
++ return NULL;
++}
++
++/* Returns the matching typeface, or NULL. If a typeface is found, its refcnt
++ is not modified.
++ */
++static SkTypeface* find_from_uniqueID(uint32_t uniqueID) {
++ FamilyRec* curr = gFamilyHead;
++ while (curr != NULL) {
++ for (int i = 0; i < 4; i++) {
++ SkTypeface* face = curr->fFaces[i];
++ if (face != NULL && face->uniqueID() == uniqueID) {
++ return face;
++ }
++ }
++ curr = curr->fNext;
++ }
++ return NULL;
++}
++
++/* Remove reference to this face from its family. If the resulting family
++ is empty (has no faces), return that family, otherwise return NULL
++*/
++static FamilyRec* remove_from_family(const SkTypeface* face) {
++ FamilyRec* family = find_family(face);
++ SkASSERT(family->fFaces[face->style()] == face);
++ family->fFaces[face->style()] = NULL;
++
++ for (int i = 0; i < 4; i++) {
++ if (family->fFaces[i] != NULL) { // family is non-empty
++ return NULL;
++ }
++ }
++ return family; // return the empty family
++}
++
++// maybe we should make FamilyRec be doubly-linked
++static void detach_and_delete_family(FamilyRec* family) {
++ FamilyRec* curr = gFamilyHead;
++ FamilyRec* prev = NULL;
++
++ while (curr != NULL) {
++ FamilyRec* next = curr->fNext;
++ if (curr == family) {
++ if (prev == NULL) {
++ gFamilyHead = next;
++ } else {
++ prev->fNext = next;
++ }
++ SkDELETE(family);
++ return;
++ }
++ prev = curr;
++ curr = next;
++ }
++ SkASSERT(!"Yikes, couldn't find family in our list to remove/delete");
++}
++
++static SkTypeface* find_typeface(const char name[], SkTypeface::Style style) {
++ NameFamilyPair* list = gNameList.begin();
++ int count = gNameList.count();
++
++ int index = SkStrLCSearch(&list[0].fName, count, name, sizeof(list[0]));
++
++ if (index >= 0) {
++ return find_best_face(list[index].fFamily, style);
++ }
++ return NULL;
++}
++
++static SkTypeface* find_typeface(const SkTypeface* familyMember,
++ SkTypeface::Style style) {
++ const FamilyRec* family = find_family(familyMember);
++ return family ? find_best_face(family, style) : NULL;
++}
++
++static void add_name(const char name[], FamilyRec* family) {
++ SkAutoAsciiToLC tolc(name);
++ name = tolc.lc();
++
++ NameFamilyPair* list = gNameList.begin();
++ int count = gNameList.count();
++
++ int index = SkStrLCSearch(&list[0].fName, count, name, sizeof(list[0]));
++
++ if (index < 0) {
++ list = gNameList.insert(~index);
++ list->construct(name, family);
++ }
++}
++
++static void remove_from_names(FamilyRec* emptyFamily)
++{
++#ifdef SK_DEBUG
++ for (int i = 0; i < 4; i++) {
++ SkASSERT(emptyFamily->fFaces[i] == NULL);
++ }
++#endif
++
++ SkTDArray<NameFamilyPair>& list = gNameList;
++
++ // must go backwards when removing
++ for (int i = list.count() - 1; i >= 0; --i) {
++ NameFamilyPair* pair = &list[i];
++ if (pair->fFamily == emptyFamily) {
++ pair->destruct();
++ list.remove(i);
++ }
++ }
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++class FamilyTypeface : public SkTypeface {
++public:
++ FamilyTypeface(Style style, bool sysFont, SkTypeface* familyMember,
++ bool isFixedWidth)
++ : SkTypeface(style, sk_atomic_inc(&gUniqueFontID) + 1, isFixedWidth) {
++ fIsSysFont = sysFont;
++
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ FamilyRec* rec = NULL;
++ if (familyMember) {
++ rec = find_family(familyMember);
++ SkASSERT(rec);
++ } else {
++ rec = SkNEW(FamilyRec);
++ }
++ rec->fFaces[style] = this;
++ }
++
++ virtual ~FamilyTypeface() {
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ // remove us from our family. If the family is now empty, we return
++ // that and then remove that family from the name list
++ FamilyRec* family = remove_from_family(this);
++ if (NULL != family) {
++ remove_from_names(family);
++ detach_and_delete_family(family);
++ }
++ }
++
++ bool isSysFont() const { return fIsSysFont; }
++
++ virtual SkStream* openStream() = 0;
++ virtual const char* getUniqueString() const = 0;
++ virtual const char* getFilePath() const = 0;
++
++private:
++ bool fIsSysFont;
++
++ typedef SkTypeface INHERITED;
++};
++
++///////////////////////////////////////////////////////////////////////////////
++
++class StreamTypeface : public FamilyTypeface {
++public:
++ StreamTypeface(Style style, bool sysFont, SkTypeface* familyMember,
++ SkStream* stream, bool isFixedWidth)
++ : INHERITED(style, sysFont, familyMember, isFixedWidth) {
++ SkASSERT(stream);
++ stream->ref();
++ fStream = stream;
++ }
++ virtual ~StreamTypeface() {
++ fStream->unref();
++ }
++
++ // overrides
++ virtual SkStream* openStream() {
++ // we just ref our existing stream, since the caller will call unref()
++ // when they are through
++ fStream->ref();
++ // must rewind each time, since the caller assumes a "new" stream
++ fStream->rewind();
++ return fStream;
++ }
++ virtual const char* getUniqueString() const { return NULL; }
++ virtual const char* getFilePath() const { return NULL; }
++
++private:
++ SkStream* fStream;
++
++ typedef FamilyTypeface INHERITED;
++};
++
++class FileTypeface : public FamilyTypeface {
++public:
++ FileTypeface(Style style, bool sysFont, SkTypeface* familyMember,
++ const char path[], bool isFixedWidth)
++ : INHERITED(style, sysFont, familyMember, isFixedWidth) {
++ SkString fullpath;
++
++ if (sysFont) {
++ GetFullPathForSysFonts(&fullpath, path);
++ path = fullpath.c_str();
++ }
++ fPath.set(path);
++ }
++
++ // overrides
++ virtual SkStream* openStream() {
++ SkStream* stream = SkNEW_ARGS(SkMMAPStream, (fPath.c_str()));
++
++ // check for failure
++ if (stream->getLength() <= 0) {
++ SkDELETE(stream);
++ // maybe MMAP isn't supported. try FILE
++ stream = SkNEW_ARGS(SkFILEStream, (fPath.c_str()));
++ if (stream->getLength() <= 0) {
++ SkDELETE(stream);
++ stream = NULL;
++ }
++ }
++ return stream;
++ }
++ virtual const char* getUniqueString() const {
++ const char* str = strrchr(fPath.c_str(), '/');
++ if (str) {
++ str += 1; // skip the '/'
++ }
++ return str;
++ }
++ virtual const char* getFilePath() const {
++ return fPath.c_str();
++ }
++
++private:
++ SkString fPath;
++
++ typedef FamilyTypeface INHERITED;
++};
++
++///////////////////////////////////////////////////////////////////////////////
++///////////////////////////////////////////////////////////////////////////////
++
++static bool get_name_and_style(const char path[], SkString* name,
++ SkTypeface::Style* style,
++ bool* isFixedWidth, bool isExpected) {
++ SkString fullpath;
++ GetFullPathForSysFonts(&fullpath, path);
++
++ SkMMAPStream stream(fullpath.c_str());
++ if (stream.getLength() > 0) {
++ find_name_and_attributes(&stream, name, style, isFixedWidth);
++ return true;
++ }
++ else {
++ SkFILEStream stream(fullpath.c_str());
++ if (stream.getLength() > 0) {
++ find_name_and_attributes(&stream, name, style, isFixedWidth);
++ return true;
++ }
++ }
++
++ if (isExpected) {
++ SkDebugf("---- failed to open <%s> as a font\n", fullpath.c_str());
++ }
++ return false;
++}
++
++// used to record our notion of the pre-existing fonts
++struct FontInitRec {
++ const char* fFileName;
++ const char* const* fNames; // null-terminated list
++};
++
++static const char* gSansNames[] = {
++ "sans-serif", "arial", "helvetica", "tahoma", "verdana", NULL
++};
++
++static const char* gSerifNames[] = {
++ "serif", "times", "times new roman", "palatino", "georgia", "baskerville",
++ "goudy", "fantasy", "cursive", "ITC Stone Serif", NULL
++};
++
++static const char* gMonoNames[] = {
++ "monospace", "courier", "courier new", "monaco", NULL
++};
++
++// deliberately empty, but we use the address to identify fallback fonts
++static const char* gFBNames[] = { NULL };
++
++/* Fonts must be grouped by family, with the first font in a family having the
++ list of names (even if that list is empty), and the following members having
++ null for the list. The names list must be NULL-terminated
++*/
++static const FontInitRec gSystemFonts[] = {
++ { "DroidSans.ttf", gSansNames },
++ { "DroidSans-Bold.ttf", NULL },
++ { "DroidSerif-Regular.ttf", gSerifNames },
++ { "DroidSerif-Bold.ttf", NULL },
++ { "DroidSerif-Italic.ttf", NULL },
++ { "DroidSerif-BoldItalic.ttf", NULL },
++ { "DroidSansMono.ttf", gMonoNames },
++ /* These are optional, and can be ignored if not found in the file system.
++ These are appended to gFallbackFonts[] as they are seen, so we list
++ them in the order we want them to be accessed by NextLogicalFont().
++ */
++ { "DroidSansArabic.ttf", gFBNames },
++ { "DroidSansHebrew.ttf", gFBNames },
++ { "DroidSansThai.ttf", gFBNames },
++ { "MTLmr3m.ttf", gFBNames }, // Motoya Japanese Font
++ { "MTLc3m.ttf", gFBNames }, // Motoya Japanese Font
++ { "DroidSansJapanese.ttf", gFBNames },
++ { "DroidSansFallback.ttf", gFBNames }
++};
++
++#define DEFAULT_NAMES gSansNames
++
++// these globals are assigned (once) by load_system_fonts()
++static FamilyRec* gDefaultFamily;
++static SkTypeface* gDefaultNormal;
++
++/* This is sized conservatively, assuming that it will never be a size issue.
++ It will be initialized in load_system_fonts(), and will be filled with the
++ fontIDs that can be used for fallback consideration, in sorted order (sorted
++ meaning element[0] should be used first, then element[1], etc. When we hit
++ a fontID==0 in the array, the list is done, hence our allocation size is
++ +1 the total number of possible system fonts. Also see NextLogicalFont().
++ */
++static uint32_t gFallbackFonts[SK_ARRAY_COUNT(gSystemFonts)+1];
++
++/* Called once (ensured by the sentinel check at the beginning of our body).
++ Initializes all the globals, and register the system fonts.
++ */
++static void load_system_fonts() {
++ // check if we've already be called
++ if (NULL != gDefaultNormal) {
++ return;
++ }
++
++ const FontInitRec* rec = gSystemFonts;
++ SkTypeface* firstInFamily = NULL;
++ int fallbackCount = 0;
++
++ for (size_t i = 0; i < SK_ARRAY_COUNT(gSystemFonts); i++) {
++ // if we're the first in a new family, clear firstInFamily
++ if (rec[i].fNames != NULL) {
++ firstInFamily = NULL;
++ }
++
++ bool isFixedWidth;
++ SkString name;
++ SkTypeface::Style style;
++
++ // we expect all the fonts, except the "fallback" fonts
++ bool isExpected = (rec[i].fNames != gFBNames);
++ if (!get_name_and_style(rec[i].fFileName, &name, &style,
++ &isFixedWidth, isExpected)) {
++ continue;
++ }
++
++ SkTypeface* tf = SkNEW_ARGS(FileTypeface,
++ (style,
++ true, // system-font (cannot delete)
++ firstInFamily, // what family to join
++ rec[i].fFileName,
++ isFixedWidth) // filename
++ );
++
++ if (rec[i].fNames != NULL) {
++ // see if this is one of our fallback fonts
++ if (rec[i].fNames == gFBNames) {
++ // SkDebugf("---- adding %s as fallback[%d] fontID %d\n",
++ // rec[i].fFileName, fallbackCount, tf->uniqueID());
++ gFallbackFonts[fallbackCount++] = tf->uniqueID();
++ }
++
++ firstInFamily = tf;
++ FamilyRec* family = find_family(tf);
++ const char* const* names = rec[i].fNames;
++
++ // record the default family if this is it
++ if (names == DEFAULT_NAMES) {
++ gDefaultFamily = family;
++ }
++ // add the names to map to this family
++ while (*names) {
++ add_name(*names, family);
++ names += 1;
++ }
++ }
++ }
++
++ // do this after all fonts are loaded. This is our default font, and it
++ // acts as a sentinel so we only execute load_system_fonts() once
++ gDefaultNormal = find_best_face(gDefaultFamily, SkTypeface::kNormal);
++ // now terminate our fallback list with the sentinel value
++ gFallbackFonts[fallbackCount] = 0;
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++void SkFontHost::Serialize(const SkTypeface* face, SkWStream* stream) {
++ const char* name = ((FamilyTypeface*)face)->getUniqueString();
++
++ stream->write8((uint8_t)face->style());
++
++ if (NULL == name || 0 == *name) {
++ stream->writePackedUInt(0);
++// SkDebugf("--- fonthost serialize null\n");
++ } else {
++ uint32_t len = strlen(name);
++ stream->writePackedUInt(len);
++ stream->write(name, len);
++// SkDebugf("--- fonthost serialize <%s> %d\n", name, face->style());
++ }
++}
++
++SkTypeface* SkFontHost::Deserialize(SkStream* stream) {
++ load_system_fonts();
++
++ int style = stream->readU8();
++
++ int len = stream->readPackedUInt();
++ if (len > 0) {
++ SkString str;
++ str.resize(len);
++ stream->read(str.writable_str(), len);
++
++ const FontInitRec* rec = gSystemFonts;
++ for (size_t i = 0; i < SK_ARRAY_COUNT(gSystemFonts); i++) {
++ if (strcmp(rec[i].fFileName, str.c_str()) == 0) {
++ // backup until we hit the fNames
++ for (int j = i; j >= 0; --j) {
++ if (rec[j].fNames != NULL) {
++ return SkFontHost::CreateTypeface(NULL,
++ rec[j].fNames[0], (SkTypeface::Style)style);
++ }
++ }
++ }
++ }
++ }
++ return NULL;
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++SkTypeface* SkFontHost::CreateTypeface(const SkTypeface* familyFace,
++ const char familyName[],
++ SkTypeface::Style style) {
++ load_system_fonts();
++
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ // clip to legal style bits
++ style = (SkTypeface::Style)(style & SkTypeface::kBoldItalic);
++
++ SkTypeface* tf = NULL;
++
++ if (NULL != familyFace) {
++ tf = find_typeface(familyFace, style);
++ } else if (NULL != familyName) {
++// SkDebugf("======= familyName <%s>\n", familyName);
++ tf = find_typeface(familyName, style);
++ }
++
++ if (NULL == tf) {
++ tf = find_best_face(gDefaultFamily, style);
++ }
++
++ // we ref(), since the symantic is to return a new instance
++ tf->ref();
++ return tf;
++}
++
++SkStream* SkFontHost::OpenStream(uint32_t fontID) {
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ FamilyTypeface* tf = (FamilyTypeface*)find_from_uniqueID(fontID);
++ SkStream* stream = tf ? tf->openStream() : NULL;
++
++ if (stream && stream->getLength() == 0) {
++ stream->unref();
++ stream = NULL;
++ }
++ return stream;
++}
++
++size_t SkFontHost::GetFileName(SkFontID fontID, char path[], size_t length,
++ int32_t* index) {
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ FamilyTypeface* tf = (FamilyTypeface*)find_from_uniqueID(fontID);
++ const char* src = tf ? tf->getFilePath() : NULL;
++
++ if (src) {
++ size_t size = strlen(src);
++ if (path) {
++ memcpy(path, src, SkMin32(size, length));
++ }
++ if (index) {
++ *index = 0; // we don't have collections (yet)
++ }
++ return size;
++ } else {
++ return 0;
++ }
++}
++
++SkFontID SkFontHost::NextLogicalFont(SkFontID currFontID, SkFontID origFontID) {
++ load_system_fonts();
++
++ /* First see if fontID is already one of our fallbacks. If so, return
++ its successor. If fontID is not in our list, then return the first one
++ in our list. Note: list is zero-terminated, and returning zero means
++ we have no more fonts to use for fallbacks.
++ */
++ const uint32_t* list = gFallbackFonts;
++ for (int i = 0; list[i] != 0; i++) {
++ if (list[i] == currFontID) {
++ return list[i+1];
++ }
++ }
++ return list[0];
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++SkTypeface* SkFontHost::CreateTypefaceFromStream(SkStream* stream) {
++ if (NULL == stream || stream->getLength() <= 0) {
++ return NULL;
++ }
++
++ bool isFixedWidth;
++ SkString name;
++ SkTypeface::Style style;
++ find_name_and_attributes(stream, &name, &style, &isFixedWidth);
++
++ if (!name.isEmpty()) {
++ return SkNEW_ARGS(StreamTypeface, (style, false, NULL, stream, isFixedWidth));
++ } else {
++ return NULL;
++ }
++}
++
++SkTypeface* SkFontHost::CreateTypefaceFromFile(const char path[]) {
++ SkStream* stream = SkNEW_ARGS(SkMMAPStream, (path));
++ SkTypeface* face = SkFontHost::CreateTypefaceFromStream(stream);
++ // since we created the stream, we let go of our ref() here
++ stream->unref();
++ return face;
++}
++
++///////////////////////////////////////////////////////////////////////////////
+--
+1.7.11.4
+
diff --git a/gfx/skia/patches/archive/0005-Bug-731384-Fix-clang-SK_OVERRIDE.patch b/gfx/skia/patches/archive/0005-Bug-731384-Fix-clang-SK_OVERRIDE.patch
new file mode 100644
index 000000000..e8b5df635
--- /dev/null
+++ b/gfx/skia/patches/archive/0005-Bug-731384-Fix-clang-SK_OVERRIDE.patch
@@ -0,0 +1,36 @@
+From 80350275c72921ed5ac405c029ae33727467d7c5 Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:15:50 -0400
+Subject: [PATCH 05/10] Bug 755869 - [8] Re-apply bug 731384 - Fix compile
+ errors on older versions of clang r=mattwoodrow
+
+---
+ gfx/skia/include/core/SkPostConfig.h | 9 +++++++++
+ 1 files changed, 9 insertions(+), 0 deletions(-)
+
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+index 8316f7a..041fe2a 100644
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -288,9 +288,18 @@
+ #if defined(_MSC_VER)
+ #define SK_OVERRIDE override
+ #elif defined(__clang__)
++#if __has_feature(cxx_override_control)
+ // Some documentation suggests we should be using __attribute__((override)),
+ // but it doesn't work.
+ #define SK_OVERRIDE override
++#elif defined(__has_extension)
++#if __has_extension(cxx_override_control)
++#define SK_OVERRIDE override
++#endif
++#endif
++#ifndef SK_OVERRIDE
++#define SK_OVERRIDE
++#endif
+ #else
+ // Linux GCC ignores "__attribute__((override))" and rejects "override".
+ #define SK_OVERRIDE
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0005-Bug-736276-Add-a-new-SkFontHost-that-takes-a-cairo_s.patch b/gfx/skia/patches/archive/0005-Bug-736276-Add-a-new-SkFontHost-that-takes-a-cairo_s.patch
new file mode 100644
index 000000000..4b76fcea1
--- /dev/null
+++ b/gfx/skia/patches/archive/0005-Bug-736276-Add-a-new-SkFontHost-that-takes-a-cairo_s.patch
@@ -0,0 +1,472 @@
+From: George Wright <george@mozilla.com>
+Date: Wed, 1 Aug 2012 16:43:15 -0400
+Subject: Bug 736276 - Add a new SkFontHost that takes a cairo_scaled_font_t r=karl
+
+
+diff --git a/gfx/skia/Makefile.in b/gfx/skia/Makefile.in
+index 5ebbd2e..7c8cdbf 100644
+--- a/gfx/skia/Makefile.in
++++ b/gfx/skia/Makefile.in
+@@ -60,15 +60,15 @@ VPATH += \
+ $(NULL)
+
+ ifeq (android,$(MOZ_WIDGET_TOOLKIT))
+-OS_CXXFLAGS += $(CAIRO_FT_CFLAGS)
++OS_CXXFLAGS += $(MOZ_CAIRO_CFLAGS) $(CAIRO_FT_CFLAGS)
+ endif
+
+ ifeq (gtk2,$(MOZ_WIDGET_TOOLKIT))
+-OS_CXXFLAGS += $(MOZ_PANGO_CFLAGS)
++OS_CXXFLAGS += $(MOZ_CAIRO_CFLAGS) $(MOZ_PANGO_CFLAGS) $(CAIRO_FT_CFLAGS)
+ endif
+
+ ifeq (qt,$(MOZ_WIDGET_TOOLKIT))
+-OS_CXXFLAGS += $(MOZ_PANGO_CFLAGS)
++OS_CXXFLAGS += $(MOZ_CAIRO_CFLAGS) $(MOZ_PANGO_CFLAGS) $(CAIRO_FT_CFLAGS)
+ ifeq (Linux,$(OS_TARGET))
+ DEFINES += -DSK_USE_POSIX_THREADS=1
+ endif
+diff --git a/gfx/skia/include/ports/SkTypeface_cairo.h b/gfx/skia/include/ports/SkTypeface_cairo.h
+new file mode 100644
+index 0000000..7e44f04
+--- /dev/null
++++ b/gfx/skia/include/ports/SkTypeface_cairo.h
+@@ -0,0 +1,11 @@
++#ifndef SkTypeface_cairo_DEFINED
++#define SkTypeface_cairo_DEFINED
++
++#include <cairo.h>
++
++#include "SkTypeface.h"
++
++SK_API extern SkTypeface* SkCreateTypefaceFromCairoFont(cairo_font_face_t* fontFace, SkTypeface::Style style, bool isFixedWidth);
++
++#endif
++
+diff --git a/gfx/skia/moz.build b/gfx/skia/moz.build
+index 9ceba59..66efd52 100644
+--- a/gfx/skia/moz.build
++++ b/gfx/skia/moz.build
+@@ -171,10 +171,12 @@ elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':
+ 'SkTime_win.cpp',
+ ]
+ elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gtk2':
++ EXPORTS.skia += [
++ 'include/ports/SkTypeface_cairo.h',
++ ]
+ CPP_SOURCES += [
+- 'SkFontHost_FreeType.cpp',
++ 'SkFontHost_cairo.cpp',
+ 'SkFontHost_FreeType_common.cpp',
+- 'SkFontHost_linux.cpp',
+ 'SkThread_pthread.cpp',
+ 'SkThreadUtils_pthread.cpp',
+ 'SkThreadUtils_pthread_linux.cpp',
+@@ -183,14 +185,15 @@ elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gtk2':
+ ]
+ elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'qt':
+ CPP_SOURCES += [
+- 'SkFontHost_FreeType.cpp',
++ 'SkFontHost_cairo.cpp',
+ 'SkFontHost_FreeType_common.cpp',
+ 'SkOSFile.cpp',
+ ]
+ if CONFIG['OS_TARGET'] == 'Linux':
++ EXPORTS.skia += [
++ 'include/ports/SkTypeface_cairo.h',
++ ]
+ CPP_SOURCES += [
+- 'SkFontHost_linux.cpp',
+- 'SkFontHost_tables.cpp',
+ 'SkThread_pthread.cpp',
+ 'SkThreadUtils_pthread.cpp',
+ 'SkThreadUtils_pthread_linux.cpp',
+@@ -204,11 +207,13 @@ elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gonk':
+ # Separate 'if' from above, since the else below applies to all != 'android'
+ # toolkits.
+ if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'android':
++ EXPORTS.skia += [
++ 'include/ports/SkTypeface_cairo.h',
++ ]
+ CPP_SOURCES += [
+ 'ashmem.cpp',
+ 'SkDebug_android.cpp',
+- 'SkFontHost_android_old.cpp',
+- 'SkFontHost_FreeType.cpp',
++ 'SkFontHost_cairo.cpp',
+ 'SkFontHost_FreeType_common.cpp',
+ 'SkImageRef_ashmem.cpp',
+ 'SkTime_Unix.cpp',
+diff --git a/gfx/skia/src/ports/SkFontHost_cairo.cpp b/gfx/skia/src/ports/SkFontHost_cairo.cpp
+new file mode 100644
+index 0000000..bb5b778
+--- /dev/null
++++ b/gfx/skia/src/ports/SkFontHost_cairo.cpp
+@@ -0,0 +1,364 @@
++
++/*
++ * Copyright 2012 Mozilla Foundation
++ *
++ * Use of this source code is governed by a BSD-style license that can be
++ * found in the LICENSE file.
++ */
++
++#include "cairo.h"
++#include "cairo-ft.h"
++
++#include "SkFontHost_FreeType_common.h"
++
++#include "SkAdvancedTypefaceMetrics.h"
++#include "SkFontHost.h"
++#include "SkPath.h"
++#include "SkScalerContext.h"
++#include "SkTypefaceCache.h"
++
++#include <ft2build.h>
++#include FT_FREETYPE_H
++
++static cairo_user_data_key_t kSkTypefaceKey;
++
++class SkScalerContext_CairoFT : public SkScalerContext_FreeType_Base {
++public:
++ SkScalerContext_CairoFT(SkTypeface* typeface, const SkDescriptor* desc);
++ virtual ~SkScalerContext_CairoFT();
++
++protected:
++ virtual unsigned generateGlyphCount() SK_OVERRIDE;
++ virtual uint16_t generateCharToGlyph(SkUnichar uniChar) SK_OVERRIDE;
++ virtual void generateAdvance(SkGlyph* glyph) SK_OVERRIDE;
++ virtual void generateMetrics(SkGlyph* glyph) SK_OVERRIDE;
++ virtual void generateImage(const SkGlyph& glyph) SK_OVERRIDE;
++ virtual void generatePath(const SkGlyph& glyph, SkPath* path) SK_OVERRIDE;
++ virtual void generateFontMetrics(SkPaint::FontMetrics* mx,
++ SkPaint::FontMetrics* my) SK_OVERRIDE;
++ virtual SkUnichar generateGlyphToChar(uint16_t glyph) SK_OVERRIDE;
++private:
++ cairo_scaled_font_t* fScaledFont;
++ uint32_t fLoadGlyphFlags;
++};
++
++class CairoLockedFTFace {
++public:
++ CairoLockedFTFace(cairo_scaled_font_t* scaledFont)
++ : fScaledFont(scaledFont)
++ , fFace(cairo_ft_scaled_font_lock_face(scaledFont))
++ {}
++
++ ~CairoLockedFTFace()
++ {
++ cairo_ft_scaled_font_unlock_face(fScaledFont);
++ }
++
++ FT_Face getFace()
++ {
++ return fFace;
++ }
++
++private:
++ cairo_scaled_font_t* fScaledFont;
++ FT_Face fFace;
++};
++
++class SkCairoFTTypeface : public SkTypeface {
++public:
++ static SkTypeface* CreateTypeface(cairo_font_face_t* fontFace, SkTypeface::Style style, bool isFixedWidth) {
++ SkASSERT(fontFace != NULL);
++ SkASSERT(cairo_font_face_get_type(fontFace) == CAIRO_FONT_TYPE_FT);
++
++ SkFontID newId = SkTypefaceCache::NewFontID();
++
++ return SkNEW_ARGS(SkCairoFTTypeface, (fontFace, style, newId, isFixedWidth));
++ }
++
++ cairo_font_face_t* getFontFace() {
++ return fFontFace;
++ }
++
++ virtual SkStream* onOpenStream(int*) const SK_OVERRIDE { return NULL; }
++
++ virtual SkAdvancedTypefaceMetrics*
++ onGetAdvancedTypefaceMetrics(SkAdvancedTypefaceMetrics::PerGlyphInfo,
++ const uint32_t*, uint32_t) const SK_OVERRIDE
++ {
++ SkDEBUGCODE(SkDebugf("SkCairoFTTypeface::onGetAdvancedTypefaceMetrics unimplemented\n"));
++ return NULL;
++ }
++
++ virtual SkScalerContext* onCreateScalerContext(const SkDescriptor* desc) const SK_OVERRIDE
++ {
++ return SkNEW_ARGS(SkScalerContext_CairoFT, (const_cast<SkCairoFTTypeface*>(this), desc));
++ }
++
++ virtual void onFilterRec(SkScalerContextRec*) const SK_OVERRIDE
++ {
++ SkDEBUGCODE(SkDebugf("SkCairoFTTypeface::onFilterRec unimplemented\n"));
++ }
++
++ virtual void onGetFontDescriptor(SkFontDescriptor*, bool*) const SK_OVERRIDE
++ {
++ SkDEBUGCODE(SkDebugf("SkCairoFTTypeface::onGetFontDescriptor unimplemented\n"));
++ }
++
++
++private:
++
++ SkCairoFTTypeface(cairo_font_face_t* fontFace, SkTypeface::Style style, SkFontID id, bool isFixedWidth)
++ : SkTypeface(style, id, isFixedWidth)
++ , fFontFace(fontFace)
++ {
++ cairo_font_face_set_user_data(fFontFace, &kSkTypefaceKey, this, NULL);
++ cairo_font_face_reference(fFontFace);
++ }
++
++ ~SkCairoFTTypeface()
++ {
++ cairo_font_face_set_user_data(fFontFace, &kSkTypefaceKey, NULL, NULL);
++ cairo_font_face_destroy(fFontFace);
++ }
++
++ cairo_font_face_t* fFontFace;
++};
++
++SkTypeface* SkCreateTypefaceFromCairoFont(cairo_font_face_t* fontFace, SkTypeface::Style style, bool isFixedWidth)
++{
++ SkTypeface* typeface = reinterpret_cast<SkTypeface*>(cairo_font_face_get_user_data(fontFace, &kSkTypefaceKey));
++
++ if (typeface) {
++ typeface->ref();
++ } else {
++ typeface = SkCairoFTTypeface::CreateTypeface(fontFace, style, isFixedWidth);
++ SkTypefaceCache::Add(typeface, style);
++ }
++
++ return typeface;
++}
++
++SkTypeface* SkFontHost::CreateTypeface(const SkTypeface* familyFace,
++ const char famillyName[],
++ SkTypeface::Style style)
++{
++ SkDEBUGFAIL("SkFontHost::FindTypeface unimplemented");
++ return NULL;
++}
++
++SkTypeface* SkFontHost::CreateTypefaceFromStream(SkStream*)
++{
++ SkDEBUGFAIL("SkFontHost::CreateTypeface unimplemented");
++ return NULL;
++}
++
++SkTypeface* SkFontHost::CreateTypefaceFromFile(char const*)
++{
++ SkDEBUGFAIL("SkFontHost::CreateTypefaceFromFile unimplemented");
++ return NULL;
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++static bool isLCD(const SkScalerContext::Rec& rec) {
++ switch (rec.fMaskFormat) {
++ case SkMask::kLCD16_Format:
++ case SkMask::kLCD32_Format:
++ return true;
++ default:
++ return false;
++ }
++}
++
++///////////////////////////////////////////////////////////////////////////////
++SkScalerContext_CairoFT::SkScalerContext_CairoFT(SkTypeface* typeface, const SkDescriptor* desc)
++ : SkScalerContext_FreeType_Base(typeface, desc)
++{
++ SkMatrix matrix;
++ fRec.getSingleMatrix(&matrix);
++
++ cairo_font_face_t* fontFace = static_cast<SkCairoFTTypeface*>(typeface)->getFontFace();
++
++ cairo_matrix_t fontMatrix, ctMatrix;
++ cairo_matrix_init(&fontMatrix, matrix.getScaleX(), matrix.getSkewY(), matrix.getSkewX(), matrix.getScaleY(), 0.0, 0.0);
++ cairo_matrix_init_scale(&ctMatrix, 1.0, 1.0);
++
++ // We need to ensure that the font options match for hinting, as generateMetrics()
++ // uses the fScaledFont which uses these font options
++ cairo_font_options_t *fontOptions = cairo_font_options_create();
++
++ FT_Int32 loadFlags = FT_LOAD_DEFAULT;
++
++ if (SkMask::kBW_Format == fRec.fMaskFormat) {
++ // See http://code.google.com/p/chromium/issues/detail?id=43252#c24
++ loadFlags = FT_LOAD_TARGET_MONO;
++ if (fRec.getHinting() == SkPaint::kNo_Hinting) {
++ cairo_font_options_set_hint_style(fontOptions, CAIRO_HINT_STYLE_NONE);
++ loadFlags = FT_LOAD_NO_HINTING;
++ }
++ } else {
++ switch (fRec.getHinting()) {
++ case SkPaint::kNo_Hinting:
++ loadFlags = FT_LOAD_NO_HINTING;
++ cairo_font_options_set_hint_style(fontOptions, CAIRO_HINT_STYLE_NONE);
++ break;
++ case SkPaint::kSlight_Hinting:
++ loadFlags = FT_LOAD_TARGET_LIGHT; // This implies FORCE_AUTOHINT
++ cairo_font_options_set_hint_style(fontOptions, CAIRO_HINT_STYLE_SLIGHT);
++ break;
++ case SkPaint::kNormal_Hinting:
++ cairo_font_options_set_hint_style(fontOptions, CAIRO_HINT_STYLE_MEDIUM);
++ if (fRec.fFlags & SkScalerContext::kAutohinting_Flag) {
++ loadFlags = FT_LOAD_FORCE_AUTOHINT;
++ }
++ break;
++ case SkPaint::kFull_Hinting:
++ cairo_font_options_set_hint_style(fontOptions, CAIRO_HINT_STYLE_FULL);
++ if (fRec.fFlags & SkScalerContext::kAutohinting_Flag) {
++ loadFlags = FT_LOAD_FORCE_AUTOHINT;
++ }
++ if (isLCD(fRec)) {
++ if (SkToBool(fRec.fFlags & SkScalerContext::kLCD_Vertical_Flag)) {
++ loadFlags = FT_LOAD_TARGET_LCD_V;
++ } else {
++ loadFlags = FT_LOAD_TARGET_LCD;
++ }
++ }
++ break;
++ default:
++ SkDebugf("---------- UNKNOWN hinting %d\n", fRec.getHinting());
++ break;
++ }
++ }
++
++ fScaledFont = cairo_scaled_font_create(fontFace, &fontMatrix, &ctMatrix, fontOptions);
++
++ if ((fRec.fFlags & SkScalerContext::kEmbeddedBitmapText_Flag) == 0) {
++ loadFlags |= FT_LOAD_NO_BITMAP;
++ }
++
++ // Always using FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH to get correct
++ // advances, as fontconfig and cairo do.
++ // See http://code.google.com/p/skia/issues/detail?id=222.
++ loadFlags |= FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH;
++
++ fLoadGlyphFlags = loadFlags;
++}
++
++SkScalerContext_CairoFT::~SkScalerContext_CairoFT()
++{
++ cairo_scaled_font_destroy(fScaledFont);
++}
++
++unsigned SkScalerContext_CairoFT::generateGlyphCount()
++{
++ CairoLockedFTFace faceLock(fScaledFont);
++ return faceLock.getFace()->num_glyphs;
++}
++
++uint16_t SkScalerContext_CairoFT::generateCharToGlyph(SkUnichar uniChar)
++{
++ CairoLockedFTFace faceLock(fScaledFont);
++ return SkToU16(FT_Get_Char_Index(faceLock.getFace(), uniChar));
++}
++
++void SkScalerContext_CairoFT::generateAdvance(SkGlyph* glyph)
++{
++ generateMetrics(glyph);
++}
++
++void SkScalerContext_CairoFT::generateMetrics(SkGlyph* glyph)
++{
++ SkASSERT(fScaledFont != NULL);
++ cairo_text_extents_t extents;
++ cairo_glyph_t cairoGlyph = { glyph->getGlyphID(fBaseGlyphCount), 0.0, 0.0 };
++ cairo_scaled_font_glyph_extents(fScaledFont, &cairoGlyph, 1, &extents);
++
++ glyph->fAdvanceX = SkDoubleToFixed(extents.x_advance);
++ glyph->fAdvanceY = SkDoubleToFixed(extents.y_advance);
++ glyph->fWidth = SkToU16(SkScalarCeil(extents.width));
++ glyph->fHeight = SkToU16(SkScalarCeil(extents.height));
++ glyph->fLeft = SkToS16(SkScalarCeil(extents.x_bearing));
++ glyph->fTop = SkToS16(SkScalarCeil(extents.y_bearing));
++ glyph->fLsbDelta = 0;
++ glyph->fRsbDelta = 0;
++}
++
++void SkScalerContext_CairoFT::generateImage(const SkGlyph& glyph)
++{
++ SkASSERT(fScaledFont != NULL);
++ CairoLockedFTFace faceLock(fScaledFont);
++ FT_Face face = faceLock.getFace();
++
++ FT_Error err = FT_Load_Glyph(face, glyph.getGlyphID(fBaseGlyphCount), fLoadGlyphFlags);
++
++ if (err != 0) {
++ memset(glyph.fImage, 0, glyph.rowBytes() * glyph.fHeight);
++ return;
++ }
++
++ generateGlyphImage(face, glyph);
++}
++
++void SkScalerContext_CairoFT::generatePath(const SkGlyph& glyph, SkPath* path)
++{
++ SkASSERT(fScaledFont != NULL);
++ CairoLockedFTFace faceLock(fScaledFont);
++ FT_Face face = faceLock.getFace();
++
++ SkASSERT(&glyph && path);
++
++ uint32_t flags = fLoadGlyphFlags;
++ flags |= FT_LOAD_NO_BITMAP; // ignore embedded bitmaps so we're sure to get the outline
++ flags &= ~FT_LOAD_RENDER; // don't scan convert (we just want the outline)
++
++ FT_Error err = FT_Load_Glyph(face, glyph.getGlyphID(fBaseGlyphCount), flags);
++
++ if (err != 0) {
++ path->reset();
++ return;
++ }
++
++ generateGlyphPath(face, path);
++}
++
++void SkScalerContext_CairoFT::generateFontMetrics(SkPaint::FontMetrics* mx,
++ SkPaint::FontMetrics* my)
++{
++ SkDEBUGCODE(SkDebugf("SkScalerContext_CairoFT::generateFontMetrics unimplemented\n"));
++}
++
++SkUnichar SkScalerContext_CairoFT::generateGlyphToChar(uint16_t glyph)
++{
++ SkASSERT(fScaledFont != NULL);
++ CairoLockedFTFace faceLock(fScaledFont);
++ FT_Face face = faceLock.getFace();
++
++ FT_UInt glyphIndex;
++ SkUnichar charCode = FT_Get_First_Char(face, &glyphIndex);
++ while (glyphIndex != 0) {
++ if (glyphIndex == glyph) {
++ return charCode;
++ }
++ charCode = FT_Get_Next_Char(face, charCode, &glyphIndex);
++ }
++
++ return 0;
++}
++
++#ifdef SK_BUILD_FOR_ANDROID
++SkTypeface* SkAndroidNextLogicalTypeface(SkFontID currFontID,
++ SkFontID origFontID) {
++ return NULL;
++}
++#endif
++
++///////////////////////////////////////////////////////////////////////////////
++
++#include "SkFontMgr.h"
++
++SkFontMgr* SkFontMgr::Factory() {
++ // todo
++ return NULL;
++}
++
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0005-Bug-777614-Re-apply-bug-687188-Expand-the-gradient-c.patch b/gfx/skia/patches/archive/0005-Bug-777614-Re-apply-bug-687188-Expand-the-gradient-c.patch
new file mode 100644
index 000000000..cfcb40b9d
--- /dev/null
+++ b/gfx/skia/patches/archive/0005-Bug-777614-Re-apply-bug-687188-Expand-the-gradient-c.patch
@@ -0,0 +1,198 @@
+From 1ab13a923399aa638388231baca784ba89f2c82b Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Wed, 12 Sep 2012 12:30:29 -0400
+Subject: [PATCH 5/9] Bug 777614 - Re-apply bug 687188 - Expand the gradient
+ cache by 2 to store 0/1 colour stop values for
+ clamping. r=nrc
+
+---
+ .../src/effects/gradients/SkGradientShader.cpp | 22 +++++++++++----
+ .../src/effects/gradients/SkGradientShaderPriv.h | 5 +++-
+ .../src/effects/gradients/SkLinearGradient.cpp | 32 ++++++++++++++++------
+ .../gradients/SkTwoPointConicalGradient.cpp | 11 ++++++--
+ .../effects/gradients/SkTwoPointRadialGradient.cpp | 11 ++++++--
+ 5 files changed, 61 insertions(+), 20 deletions(-)
+
+diff --git a/gfx/skia/src/effects/gradients/SkGradientShader.cpp b/gfx/skia/src/effects/gradients/SkGradientShader.cpp
+index f0dac4d..79e7202 100644
+--- a/gfx/skia/src/effects/gradients/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/gradients/SkGradientShader.cpp
+@@ -426,15 +426,15 @@ static void complete_32bit_cache(SkPMColor* cache, int stride) {
+
+ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ if (fCache32 == NULL) {
+- // double the count for dither entries
+- const int entryCount = kCache32Count * 2;
++ // double the count for dither entries, and have an extra two entries for clamp values
++ const int entryCount = kCache32Count * 2 + 2;
+ const size_t allocSize = sizeof(SkPMColor) * entryCount;
+
+ if (NULL == fCache32PixelRef) {
+ fCache32PixelRef = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ }
+- fCache32 = (SkPMColor*)fCache32PixelRef->getAddr();
++ fCache32 = (SkPMColor*)fCache32PixelRef->getAddr() + 1;
+ if (fColorCount == 2) {
+ Build32bitCache(fCache32, fOrigColors[0], fOrigColors[1],
+ kGradient32Length, fCacheAlpha);
+@@ -458,7 +458,7 @@ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ SkMallocPixelRef* newPR = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ SkPMColor* linear = fCache32; // just computed linear data
+- SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapped data
++ SkPMColor* mapped = (SkPMColor*)newPR->getAddr() + 1; // storage for mapped data
+ SkUnitMapper* map = fMapper;
+ for (int i = 0; i < kGradient32Length; i++) {
+ int index = map->mapUnit16((i << 8) | i) >> 8;
+@@ -467,10 +467,22 @@ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ }
+ fCache32PixelRef->unref();
+ fCache32PixelRef = newPR;
+- fCache32 = (SkPMColor*)newPR->getAddr();
++ fCache32 = (SkPMColor*)newPR->getAddr() + 1;
+ }
+ complete_32bit_cache(fCache32, kCache32Count);
+ }
++
++ // Write the clamp colours into the first and last entries of fCache32
++ fCache32[kCache32ClampLower] = SkPackARGB32(fCacheAlpha,
++ SkColorGetR(fOrigColors[0]),
++ SkColorGetG(fOrigColors[0]),
++ SkColorGetB(fOrigColors[0]));
++
++ fCache32[kCache32ClampUpper] = SkPackARGB32(fCacheAlpha,
++ SkColorGetR(fOrigColors[fColorCount - 1]),
++ SkColorGetG(fOrigColors[fColorCount - 1]),
++ SkColorGetB(fOrigColors[fColorCount - 1]));
++
+ return fCache32;
+ }
+
+diff --git a/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h b/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
+index 0e7c2fc..7427935 100644
+--- a/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
++++ b/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
+@@ -133,7 +133,10 @@ public:
+ kDitherStride32 = 0,
+ #endif
+ kDitherStride16 = kCache16Count,
+- kLerpRemainderMask32 = (1 << (16 - kCache32Bits)) - 1
++ kLerpRemainderMask32 = (1 << (16 - kCache32Bits)) - 1,
++
++ kCache32ClampLower = -1,
++ kCache32ClampUpper = kCache32Count * 2
+ };
+
+
+diff --git a/gfx/skia/src/effects/gradients/SkLinearGradient.cpp b/gfx/skia/src/effects/gradients/SkLinearGradient.cpp
+index bcebc26..d400b4d 100644
+--- a/gfx/skia/src/effects/gradients/SkLinearGradient.cpp
++++ b/gfx/skia/src/effects/gradients/SkLinearGradient.cpp
+@@ -126,6 +126,17 @@ void shadeSpan_linear_vertical(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
++ if (proc == clamp_tileproc) {
++ // No need to lerp or dither for clamp values
++ if (fx < 0) {
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampLower], count);
++ return;
++ } else if (fx > 0xffff) {
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampUpper], count);
++ return;
++ }
++ }
++
+ // We're a vertical gradient, so no change in a span.
+ // If colors change sharply across the gradient, dithering is
+ // insufficient (it subsamples the color space) and we need to lerp.
+@@ -144,6 +155,17 @@ void shadeSpan_linear_vertical_lerp(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
++ if (proc == clamp_tileproc) {
++ // No need to lerp or dither for clamp values
++ if (fx < 0) {
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampLower], count);
++ return;
++ } else if (fx > 0xffff) {
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampUpper], count);
++ return;
++ }
++ }
++
+ // We're a vertical gradient, so no change in a span.
+ // If colors change sharply across the gradient, dithering is
+ // insufficient (it subsamples the color space) and we need to lerp.
+@@ -169,10 +191,7 @@ void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx,
+ range.init(fx, dx, count, 0, SkGradientShaderBase::kGradient32Length);
+
+ if ((count = range.fCount0) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV0],
+- cache[(toggle ^ SkGradientShaderBase::kDitherStride32) + range.fV0],
+- count);
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampLower], count);
+ dstC += count;
+ }
+ if ((count = range.fCount1) > 0) {
+@@ -191,10 +210,7 @@ void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx,
+ }
+ }
+ if ((count = range.fCount2) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV1],
+- cache[(toggle ^ SkGradientShaderBase::kDitherStride32) + range.fV1],
+- count);
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampUpper], count);
+ }
+ }
+
+diff --git a/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp b/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp
+index 3466d2c..764a444 100644
+--- a/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp
++++ b/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp
+@@ -123,9 +123,14 @@ static void twopoint_clamp(TwoPtRadial* rec, SkPMColor* SK_RESTRICT dstC,
+ if (TwoPtRadial::DontDrawT(t)) {
+ *dstC++ = 0;
+ } else {
+- SkFixed index = SkClampMax(t, 0xFFFF);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> SkGradientShaderBase::kCache32Shift];
++ if (t < 0) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampLower];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampUpper];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> SkGradientShaderBase::kCache32Shift];
++ }
+ }
+ }
+ }
+diff --git a/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp b/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp
+index 9362ded..22b028e 100644
+--- a/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp
++++ b/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp
+@@ -120,9 +120,14 @@ void shadeSpan_twopoint_clamp(SkScalar fx, SkScalar dx,
+ for (; count > 0; --count) {
+ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+ fOneOverTwoA, posRoot);
+- SkFixed index = SkClampMax(t, 0xFFFF);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> SkGradientShaderBase::kCache32Shift];
++ if (t < 0) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampLower];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampUpper];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> SkGradientShaderBase::kCache32Shift];
++ }
+ fx += dx;
+ fy += dy;
+ b += db;
+--
+1.7.11.4
+
diff --git a/gfx/skia/patches/archive/0006-Bug-751814-ARM-EDSP-ARMv6-Skia-fixes.patch b/gfx/skia/patches/archive/0006-Bug-751814-ARM-EDSP-ARMv6-Skia-fixes.patch
new file mode 100644
index 000000000..eb75691ad
--- /dev/null
+++ b/gfx/skia/patches/archive/0006-Bug-751814-ARM-EDSP-ARMv6-Skia-fixes.patch
@@ -0,0 +1,147 @@
+From 94916fbbc7865c6fe23a57d6edc48c6daf93dda8 Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:16:08 -0400
+Subject: [PATCH 06/10] Bug 755869 - [9] Re-apply bug 751814 - Various
+ Skia fixes for ARM without EDSP and ARMv6+
+ r=mattwoodrow
+
+---
+ gfx/skia/include/core/SkMath.h | 5 +--
+ gfx/skia/include/core/SkPostConfig.h | 45 ++++++++++++++++++++++
+ gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp | 6 +-
+ gfx/skia/src/opts/SkBlitRow_opts_arm.cpp | 9 ++++
+ 4 files changed, 58 insertions(+), 7 deletions(-)
+
+diff --git a/gfx/skia/include/core/SkMath.h b/gfx/skia/include/core/SkMath.h
+index 5889103..7a4b707 100644
+--- a/gfx/skia/include/core/SkMath.h
++++ b/gfx/skia/include/core/SkMath.h
+@@ -153,10 +153,7 @@ static inline bool SkIsPow2(int value) {
+ With this requirement, we can generate faster instructions on some
+ architectures.
+ */
+-#if defined(__arm__) \
+- && !defined(__thumb__) \
+- && !defined(__ARM_ARCH_4T__) \
+- && !defined(__ARM_ARCH_5T__)
++#ifdef SK_ARM_HAS_EDSP
+ static inline int32_t SkMulS16(S16CPU x, S16CPU y) {
+ SkASSERT((int16_t)x == x);
+ SkASSERT((int16_t)y == y);
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+index 041fe2a..03105e4 100644
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -311,3 +311,48 @@
+ #ifndef SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
+ #define SK_ALLOW_STATIC_GLOBAL_INITIALIZERS 1
+ #endif
++
++//////////////////////////////////////////////////////////////////////
++// ARM defines
++
++#if defined(__GNUC__) && defined(__arm__)
++
++# define SK_ARM_ARCH 3
++
++# if defined(__ARM_ARCH_4__) || defined(__ARM_ARCH_4T__) \
++ || defined(_ARM_ARCH_4)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 4
++# endif
++
++# if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
++ || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
++ || defined(__ARM_ARCH_5TEJ__) || defined(_ARM_ARCH_5)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 5
++# endif
++
++# if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
++ || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
++ || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \
++ || defined(__ARM_ARCH_6M__) || defined(_ARM_ARCH_6)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 6
++# endif
++
++# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
++ || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
++ || defined(__ARM_ARCH_7EM__) || defined(_ARM_ARCH_7)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 7
++# endif
++
++# undef SK_ARM_HAS_EDSP
++# if defined(__thumb2__) && (SK_ARM_ARCH >= 6) \
++ || !defined(__thumb__) \
++ && ((SK_ARM_ARCH > 5) || defined(__ARM_ARCH_5E__) \
++ || defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__))
++# define SK_ARM_HAS_EDSP 1
++# endif
++
++#endif
+diff --git a/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp b/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp
+index 20d62e1..deb1bfe 100644
+--- a/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp
++++ b/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp
+@@ -11,7 +11,7 @@
+ #include "SkColorPriv.h"
+ #include "SkUtils.h"
+
+-#if __ARM_ARCH__ >= 6 && !defined(SK_CPU_BENDIAN)
++#if SK_ARM_ARCH >= 6 && !defined(SK_CPU_BENDIAN)
+ void SI8_D16_nofilter_DX_arm(
+ const SkBitmapProcState& s,
+ const uint32_t* SK_RESTRICT xy,
+@@ -182,7 +182,7 @@ void SI8_opaque_D32_nofilter_DX_arm(const SkBitmapProcState& s,
+
+ s.fBitmap->getColorTable()->unlockColors(false);
+ }
+-#endif //__ARM_ARCH__ >= 6 && !defined(SK_CPU_BENDIAN)
++#endif // SK_ARM_ARCH >= 6 && !defined(SK_CPU_BENDIAN)
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+@@ -200,7 +200,7 @@ void SkBitmapProcState::platformProcs() {
+
+ switch (fBitmap->config()) {
+ case SkBitmap::kIndex8_Config:
+-#if __ARM_ARCH__ >= 6 && !defined(SK_CPU_BENDIAN)
++#if SK_ARM_ARCH >= 6 && !defined(SK_CPU_BENDIAN)
+ if (justDx && !doFilter) {
+ #if 0 /* crashing on android device */
+ fSampleProc16 = SI8_D16_nofilter_DX_arm;
+diff --git a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+index 2490371..c928888 100644
+--- a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
++++ b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+@@ -675,8 +675,13 @@ static void __attribute((noinline,optimize("-fomit-frame-pointer"))) S32A_Blend_
+ /* dst1_scale and dst2_scale*/
+ "lsr r9, r5, #24 \n\t" /* src >> 24 */
+ "lsr r10, r6, #24 \n\t" /* src >> 24 */
++#ifdef SK_ARM_HAS_EDSP
+ "smulbb r9, r9, %[alpha] \n\t" /* r9 = SkMulS16 r9 with src_scale */
+ "smulbb r10, r10, %[alpha] \n\t" /* r10 = SkMulS16 r10 with src_scale */
++#else
++ "mul r9, r9, %[alpha] \n\t" /* r9 = SkMulS16 r9 with src_scale */
++ "mul r10, r10, %[alpha] \n\t" /* r10 = SkMulS16 r10 with src_scale */
++#endif
+ "lsr r9, r9, #8 \n\t" /* r9 >> 8 */
+ "lsr r10, r10, #8 \n\t" /* r10 >> 8 */
+ "rsb r9, r9, #256 \n\t" /* dst1_scale = r9 = 255 - r9 + 1 */
+@@ -745,7 +750,11 @@ static void __attribute((noinline,optimize("-fomit-frame-pointer"))) S32A_Blend_
+
+ "lsr r6, r5, #24 \n\t" /* src >> 24 */
+ "and r8, r12, r5, lsr #8 \n\t" /* ag = r8 = r5 masked by r12 lsr by #8 */
++#ifdef SK_ARM_HAS_EDSP
+ "smulbb r6, r6, %[alpha] \n\t" /* r6 = SkMulS16 with src_scale */
++#else
++ "mul r6, r6, %[alpha] \n\t" /* r6 = SkMulS16 with src_scale */
++#endif
+ "and r9, r12, r5 \n\t" /* rb = r9 = r5 masked by r12 */
+ "lsr r6, r6, #8 \n\t" /* r6 >> 8 */
+ "mul r8, r8, %[alpha] \n\t" /* ag = r8 times scale */
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0006-Bug-848491-Re-apply-Bug-777614-Add-our-SkUserConfig..patch b/gfx/skia/patches/archive/0006-Bug-848491-Re-apply-Bug-777614-Add-our-SkUserConfig..patch
new file mode 100644
index 000000000..2850000ac
--- /dev/null
+++ b/gfx/skia/patches/archive/0006-Bug-848491-Re-apply-Bug-777614-Add-our-SkUserConfig..patch
@@ -0,0 +1,27 @@
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 25 Apr 2013 20:40:12 -0400
+Subject: Bug 848491 - Re-apply Bug 777614 - Add our SkUserConfig.h
+
+
+diff --git a/gfx/skia/include/config/SkUserConfig.h b/gfx/skia/include/config/SkUserConfig.h
+index 63fc90d..c965e91 100644
+--- a/gfx/skia/include/config/SkUserConfig.h
++++ b/gfx/skia/include/config/SkUserConfig.h
+@@ -201,4 +201,14 @@
+ */
+ //#define SK_SUPPORT_GPU 1
+
++/* Don't dither 32bit gradients, to match what the canvas test suite expects.
++ */
++#define SK_DISABLE_DITHER_32BIT_GRADIENT
++
++/* Don't include stdint.h on windows as it conflicts with our build system.
++ */
++#ifdef SK_BUILD_FOR_WIN32
++ #define SK_IGNORE_STDINT_DOT_H
++#endif
++
+ #endif
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0007-Bug-719872-Old-Android-FontHost.patch b/gfx/skia/patches/archive/0007-Bug-719872-Old-Android-FontHost.patch
new file mode 100644
index 000000000..ca34e1a45
--- /dev/null
+++ b/gfx/skia/patches/archive/0007-Bug-719872-Old-Android-FontHost.patch
@@ -0,0 +1,702 @@
+From 6982ad469adcdfa2b7bdbf8bbd843bc22d3832fc Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:52:40 -0400
+Subject: [PATCH 07/10] Bug 755869 - [10] Re-apply bug 719872 - Fix crash
+ on Android by reverting to older FontHost impl
+ r=mattwoodrow
+
+---
+ gfx/skia/Makefile.in | 5 +-
+ gfx/skia/src/ports/SkFontHost_android_old.cpp | 664 +++++++++++++++++++++++++
+ 2 files changed, 668 insertions(+), 1 deletions(-)
+ create mode 100644 gfx/skia/src/ports/SkFontHost_android_old.cpp
+
+diff --git a/gfx/skia/Makefile.in b/gfx/skia/Makefile.in
+index 9da098a..8184f1c 100644
+--- a/gfx/skia/Makefile.in
++++ b/gfx/skia/Makefile.in
+@@ -327,7 +327,10 @@ endif
+ ifeq (android,$(MOZ_WIDGET_TOOLKIT))
+ CPPSRCS += \
+ SkDebug_android.cpp \
+- SkFontHost_none.cpp \
++ SkFontHost_android_old.cpp \
++ SkFontHost_gamma.cpp \
++ SkFontHost_FreeType.cpp \
++ SkFontHost_tables.cpp \
+ SkMMapStream.cpp \
+ SkTime_Unix.cpp \
+ SkThread_pthread.cpp \
+diff --git a/gfx/skia/src/ports/SkFontHost_android_old.cpp b/gfx/skia/src/ports/SkFontHost_android_old.cpp
+new file mode 100644
+index 0000000..b5c4f3c
+--- /dev/null
++++ b/gfx/skia/src/ports/SkFontHost_android_old.cpp
+@@ -0,0 +1,664 @@
++
++/*
++ * Copyright 2006 The Android Open Source Project
++ *
++ * Use of this source code is governed by a BSD-style license that can be
++ * found in the LICENSE file.
++ */
++
++
++#include "SkFontHost.h"
++#include "SkDescriptor.h"
++#include "SkMMapStream.h"
++#include "SkPaint.h"
++#include "SkString.h"
++#include "SkStream.h"
++#include "SkThread.h"
++#include "SkTSearch.h"
++#include <stdio.h>
++
++#define FONT_CACHE_MEMORY_BUDGET (768 * 1024)
++
++#ifndef SK_FONT_FILE_PREFIX
++ #define SK_FONT_FILE_PREFIX "/fonts/"
++#endif
++
++bool find_name_and_attributes(SkStream* stream, SkString* name, SkTypeface::Style* style,
++ bool* isFixedWidth);
++
++static void GetFullPathForSysFonts(SkString* full, const char name[]) {
++ full->set(getenv("ANDROID_ROOT"));
++ full->append(SK_FONT_FILE_PREFIX);
++ full->append(name);
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++struct FamilyRec;
++
++/* This guy holds a mapping of a name -> family, used for looking up fonts.
++ Since it is stored in a stretchy array that doesn't preserve object
++ semantics, we don't use constructor/destructors, but just have explicit
++ helpers to manage our internal bookkeeping.
++*/
++struct NameFamilyPair {
++ const char* fName; // we own this
++ FamilyRec* fFamily; // we don't own this, we just reference it
++
++ void construct(const char name[], FamilyRec* family) {
++ fName = strdup(name);
++ fFamily = family; // we don't own this, so just record the referene
++ }
++
++ void destruct() {
++ free((char*)fName);
++ // we don't own family, so just ignore our reference
++ }
++};
++
++// we use atomic_inc to grow this for each typeface we create
++static int32_t gUniqueFontID;
++
++// this is the mutex that protects these globals
++static SkMutex gFamilyMutex;
++static FamilyRec* gFamilyHead;
++static SkTDArray<NameFamilyPair> gNameList;
++
++struct FamilyRec {
++ FamilyRec* fNext;
++ SkTypeface* fFaces[4];
++
++ FamilyRec()
++ {
++ fNext = gFamilyHead;
++ memset(fFaces, 0, sizeof(fFaces));
++ gFamilyHead = this;
++ }
++};
++
++static SkTypeface* find_best_face(const FamilyRec* family,
++ SkTypeface::Style style) {
++ SkTypeface* const* faces = family->fFaces;
++
++ if (faces[style] != NULL) { // exact match
++ return faces[style];
++ }
++ // look for a matching bold
++ style = (SkTypeface::Style)(style ^ SkTypeface::kItalic);
++ if (faces[style] != NULL) {
++ return faces[style];
++ }
++ // look for the plain
++ if (faces[SkTypeface::kNormal] != NULL) {
++ return faces[SkTypeface::kNormal];
++ }
++ // look for anything
++ for (int i = 0; i < 4; i++) {
++ if (faces[i] != NULL) {
++ return faces[i];
++ }
++ }
++ // should never get here, since the faces list should not be empty
++ SkASSERT(!"faces list is empty");
++ return NULL;
++}
++
++static FamilyRec* find_family(const SkTypeface* member) {
++ FamilyRec* curr = gFamilyHead;
++ while (curr != NULL) {
++ for (int i = 0; i < 4; i++) {
++ if (curr->fFaces[i] == member) {
++ return curr;
++ }
++ }
++ curr = curr->fNext;
++ }
++ return NULL;
++}
++
++/* Returns the matching typeface, or NULL. If a typeface is found, its refcnt
++ is not modified.
++ */
++static SkTypeface* find_from_uniqueID(uint32_t uniqueID) {
++ FamilyRec* curr = gFamilyHead;
++ while (curr != NULL) {
++ for (int i = 0; i < 4; i++) {
++ SkTypeface* face = curr->fFaces[i];
++ if (face != NULL && face->uniqueID() == uniqueID) {
++ return face;
++ }
++ }
++ curr = curr->fNext;
++ }
++ return NULL;
++}
++
++/* Remove reference to this face from its family. If the resulting family
++ is empty (has no faces), return that family, otherwise return NULL
++*/
++static FamilyRec* remove_from_family(const SkTypeface* face) {
++ FamilyRec* family = find_family(face);
++ SkASSERT(family->fFaces[face->style()] == face);
++ family->fFaces[face->style()] = NULL;
++
++ for (int i = 0; i < 4; i++) {
++ if (family->fFaces[i] != NULL) { // family is non-empty
++ return NULL;
++ }
++ }
++ return family; // return the empty family
++}
++
++// maybe we should make FamilyRec be doubly-linked
++static void detach_and_delete_family(FamilyRec* family) {
++ FamilyRec* curr = gFamilyHead;
++ FamilyRec* prev = NULL;
++
++ while (curr != NULL) {
++ FamilyRec* next = curr->fNext;
++ if (curr == family) {
++ if (prev == NULL) {
++ gFamilyHead = next;
++ } else {
++ prev->fNext = next;
++ }
++ SkDELETE(family);
++ return;
++ }
++ prev = curr;
++ curr = next;
++ }
++ SkASSERT(!"Yikes, couldn't find family in our list to remove/delete");
++}
++
++static SkTypeface* find_typeface(const char name[], SkTypeface::Style style) {
++ NameFamilyPair* list = gNameList.begin();
++ int count = gNameList.count();
++
++ int index = SkStrLCSearch(&list[0].fName, count, name, sizeof(list[0]));
++
++ if (index >= 0) {
++ return find_best_face(list[index].fFamily, style);
++ }
++ return NULL;
++}
++
++static SkTypeface* find_typeface(const SkTypeface* familyMember,
++ SkTypeface::Style style) {
++ const FamilyRec* family = find_family(familyMember);
++ return family ? find_best_face(family, style) : NULL;
++}
++
++static void add_name(const char name[], FamilyRec* family) {
++ SkAutoAsciiToLC tolc(name);
++ name = tolc.lc();
++
++ NameFamilyPair* list = gNameList.begin();
++ int count = gNameList.count();
++
++ int index = SkStrLCSearch(&list[0].fName, count, name, sizeof(list[0]));
++
++ if (index < 0) {
++ list = gNameList.insert(~index);
++ list->construct(name, family);
++ }
++}
++
++static void remove_from_names(FamilyRec* emptyFamily)
++{
++#ifdef SK_DEBUG
++ for (int i = 0; i < 4; i++) {
++ SkASSERT(emptyFamily->fFaces[i] == NULL);
++ }
++#endif
++
++ SkTDArray<NameFamilyPair>& list = gNameList;
++
++ // must go backwards when removing
++ for (int i = list.count() - 1; i >= 0; --i) {
++ NameFamilyPair* pair = &list[i];
++ if (pair->fFamily == emptyFamily) {
++ pair->destruct();
++ list.remove(i);
++ }
++ }
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++class FamilyTypeface : public SkTypeface {
++public:
++ FamilyTypeface(Style style, bool sysFont, SkTypeface* familyMember,
++ bool isFixedWidth)
++ : SkTypeface(style, sk_atomic_inc(&gUniqueFontID) + 1, isFixedWidth) {
++ fIsSysFont = sysFont;
++
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ FamilyRec* rec = NULL;
++ if (familyMember) {
++ rec = find_family(familyMember);
++ SkASSERT(rec);
++ } else {
++ rec = SkNEW(FamilyRec);
++ }
++ rec->fFaces[style] = this;
++ }
++
++ virtual ~FamilyTypeface() {
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ // remove us from our family. If the family is now empty, we return
++ // that and then remove that family from the name list
++ FamilyRec* family = remove_from_family(this);
++ if (NULL != family) {
++ remove_from_names(family);
++ detach_and_delete_family(family);
++ }
++ }
++
++ bool isSysFont() const { return fIsSysFont; }
++
++ virtual SkStream* openStream() = 0;
++ virtual const char* getUniqueString() const = 0;
++ virtual const char* getFilePath() const = 0;
++
++private:
++ bool fIsSysFont;
++
++ typedef SkTypeface INHERITED;
++};
++
++///////////////////////////////////////////////////////////////////////////////
++
++class StreamTypeface : public FamilyTypeface {
++public:
++ StreamTypeface(Style style, bool sysFont, SkTypeface* familyMember,
++ SkStream* stream, bool isFixedWidth)
++ : INHERITED(style, sysFont, familyMember, isFixedWidth) {
++ SkASSERT(stream);
++ stream->ref();
++ fStream = stream;
++ }
++ virtual ~StreamTypeface() {
++ fStream->unref();
++ }
++
++ // overrides
++ virtual SkStream* openStream() {
++ // we just ref our existing stream, since the caller will call unref()
++ // when they are through
++ fStream->ref();
++ // must rewind each time, since the caller assumes a "new" stream
++ fStream->rewind();
++ return fStream;
++ }
++ virtual const char* getUniqueString() const { return NULL; }
++ virtual const char* getFilePath() const { return NULL; }
++
++private:
++ SkStream* fStream;
++
++ typedef FamilyTypeface INHERITED;
++};
++
++class FileTypeface : public FamilyTypeface {
++public:
++ FileTypeface(Style style, bool sysFont, SkTypeface* familyMember,
++ const char path[], bool isFixedWidth)
++ : INHERITED(style, sysFont, familyMember, isFixedWidth) {
++ SkString fullpath;
++
++ if (sysFont) {
++ GetFullPathForSysFonts(&fullpath, path);
++ path = fullpath.c_str();
++ }
++ fPath.set(path);
++ }
++
++ // overrides
++ virtual SkStream* openStream() {
++ SkStream* stream = SkNEW_ARGS(SkMMAPStream, (fPath.c_str()));
++
++ // check for failure
++ if (stream->getLength() <= 0) {
++ SkDELETE(stream);
++ // maybe MMAP isn't supported. try FILE
++ stream = SkNEW_ARGS(SkFILEStream, (fPath.c_str()));
++ if (stream->getLength() <= 0) {
++ SkDELETE(stream);
++ stream = NULL;
++ }
++ }
++ return stream;
++ }
++ virtual const char* getUniqueString() const {
++ const char* str = strrchr(fPath.c_str(), '/');
++ if (str) {
++ str += 1; // skip the '/'
++ }
++ return str;
++ }
++ virtual const char* getFilePath() const {
++ return fPath.c_str();
++ }
++
++private:
++ SkString fPath;
++
++ typedef FamilyTypeface INHERITED;
++};
++
++///////////////////////////////////////////////////////////////////////////////
++///////////////////////////////////////////////////////////////////////////////
++
++static bool get_name_and_style(const char path[], SkString* name,
++ SkTypeface::Style* style,
++ bool* isFixedWidth, bool isExpected) {
++ SkString fullpath;
++ GetFullPathForSysFonts(&fullpath, path);
++
++ SkMMAPStream stream(fullpath.c_str());
++ if (stream.getLength() > 0) {
++ find_name_and_attributes(&stream, name, style, isFixedWidth);
++ return true;
++ }
++ else {
++ SkFILEStream stream(fullpath.c_str());
++ if (stream.getLength() > 0) {
++ find_name_and_attributes(&stream, name, style, isFixedWidth);
++ return true;
++ }
++ }
++
++ if (isExpected) {
++ SkDebugf("---- failed to open <%s> as a font\n", fullpath.c_str());
++ }
++ return false;
++}
++
++// used to record our notion of the pre-existing fonts
++struct FontInitRec {
++ const char* fFileName;
++ const char* const* fNames; // null-terminated list
++};
++
++static const char* gSansNames[] = {
++ "sans-serif", "arial", "helvetica", "tahoma", "verdana", NULL
++};
++
++static const char* gSerifNames[] = {
++ "serif", "times", "times new roman", "palatino", "georgia", "baskerville",
++ "goudy", "fantasy", "cursive", "ITC Stone Serif", NULL
++};
++
++static const char* gMonoNames[] = {
++ "monospace", "courier", "courier new", "monaco", NULL
++};
++
++// deliberately empty, but we use the address to identify fallback fonts
++static const char* gFBNames[] = { NULL };
++
++/* Fonts must be grouped by family, with the first font in a family having the
++ list of names (even if that list is empty), and the following members having
++ null for the list. The names list must be NULL-terminated
++*/
++static const FontInitRec gSystemFonts[] = {
++ { "DroidSans.ttf", gSansNames },
++ { "DroidSans-Bold.ttf", NULL },
++ { "DroidSerif-Regular.ttf", gSerifNames },
++ { "DroidSerif-Bold.ttf", NULL },
++ { "DroidSerif-Italic.ttf", NULL },
++ { "DroidSerif-BoldItalic.ttf", NULL },
++ { "DroidSansMono.ttf", gMonoNames },
++ /* These are optional, and can be ignored if not found in the file system.
++ These are appended to gFallbackFonts[] as they are seen, so we list
++ them in the order we want them to be accessed by NextLogicalFont().
++ */
++ { "DroidSansArabic.ttf", gFBNames },
++ { "DroidSansHebrew.ttf", gFBNames },
++ { "DroidSansThai.ttf", gFBNames },
++ { "MTLmr3m.ttf", gFBNames }, // Motoya Japanese Font
++ { "MTLc3m.ttf", gFBNames }, // Motoya Japanese Font
++ { "DroidSansJapanese.ttf", gFBNames },
++ { "DroidSansFallback.ttf", gFBNames }
++};
++
++#define DEFAULT_NAMES gSansNames
++
++// these globals are assigned (once) by load_system_fonts()
++static FamilyRec* gDefaultFamily;
++static SkTypeface* gDefaultNormal;
++
++/* This is sized conservatively, assuming that it will never be a size issue.
++ It will be initialized in load_system_fonts(), and will be filled with the
++ fontIDs that can be used for fallback consideration, in sorted order (sorted
++ meaning element[0] should be used first, then element[1], etc. When we hit
++ a fontID==0 in the array, the list is done, hence our allocation size is
++ +1 the total number of possible system fonts. Also see NextLogicalFont().
++ */
++static uint32_t gFallbackFonts[SK_ARRAY_COUNT(gSystemFonts)+1];
++
++/* Called once (ensured by the sentinel check at the beginning of our body).
++ Initializes all the globals, and register the system fonts.
++ */
++static void load_system_fonts() {
++ // check if we've already be called
++ if (NULL != gDefaultNormal) {
++ return;
++ }
++
++ const FontInitRec* rec = gSystemFonts;
++ SkTypeface* firstInFamily = NULL;
++ int fallbackCount = 0;
++
++ for (size_t i = 0; i < SK_ARRAY_COUNT(gSystemFonts); i++) {
++ // if we're the first in a new family, clear firstInFamily
++ if (rec[i].fNames != NULL) {
++ firstInFamily = NULL;
++ }
++
++ bool isFixedWidth;
++ SkString name;
++ SkTypeface::Style style;
++
++ // we expect all the fonts, except the "fallback" fonts
++ bool isExpected = (rec[i].fNames != gFBNames);
++ if (!get_name_and_style(rec[i].fFileName, &name, &style,
++ &isFixedWidth, isExpected)) {
++ continue;
++ }
++
++ SkTypeface* tf = SkNEW_ARGS(FileTypeface,
++ (style,
++ true, // system-font (cannot delete)
++ firstInFamily, // what family to join
++ rec[i].fFileName,
++ isFixedWidth) // filename
++ );
++
++ if (rec[i].fNames != NULL) {
++ // see if this is one of our fallback fonts
++ if (rec[i].fNames == gFBNames) {
++ // SkDebugf("---- adding %s as fallback[%d] fontID %d\n",
++ // rec[i].fFileName, fallbackCount, tf->uniqueID());
++ gFallbackFonts[fallbackCount++] = tf->uniqueID();
++ }
++
++ firstInFamily = tf;
++ FamilyRec* family = find_family(tf);
++ const char* const* names = rec[i].fNames;
++
++ // record the default family if this is it
++ if (names == DEFAULT_NAMES) {
++ gDefaultFamily = family;
++ }
++ // add the names to map to this family
++ while (*names) {
++ add_name(*names, family);
++ names += 1;
++ }
++ }
++ }
++
++ // do this after all fonts are loaded. This is our default font, and it
++ // acts as a sentinel so we only execute load_system_fonts() once
++ gDefaultNormal = find_best_face(gDefaultFamily, SkTypeface::kNormal);
++ // now terminate our fallback list with the sentinel value
++ gFallbackFonts[fallbackCount] = 0;
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++void SkFontHost::Serialize(const SkTypeface* face, SkWStream* stream) {
++ const char* name = ((FamilyTypeface*)face)->getUniqueString();
++
++ stream->write8((uint8_t)face->style());
++
++ if (NULL == name || 0 == *name) {
++ stream->writePackedUInt(0);
++// SkDebugf("--- fonthost serialize null\n");
++ } else {
++ uint32_t len = strlen(name);
++ stream->writePackedUInt(len);
++ stream->write(name, len);
++// SkDebugf("--- fonthost serialize <%s> %d\n", name, face->style());
++ }
++}
++
++SkTypeface* SkFontHost::Deserialize(SkStream* stream) {
++ load_system_fonts();
++
++ int style = stream->readU8();
++
++ int len = stream->readPackedUInt();
++ if (len > 0) {
++ SkString str;
++ str.resize(len);
++ stream->read(str.writable_str(), len);
++
++ const FontInitRec* rec = gSystemFonts;
++ for (size_t i = 0; i < SK_ARRAY_COUNT(gSystemFonts); i++) {
++ if (strcmp(rec[i].fFileName, str.c_str()) == 0) {
++ // backup until we hit the fNames
++ for (int j = i; j >= 0; --j) {
++ if (rec[j].fNames != NULL) {
++ return SkFontHost::CreateTypeface(NULL,
++ rec[j].fNames[0], (SkTypeface::Style)style);
++ }
++ }
++ }
++ }
++ }
++ return NULL;
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++SkTypeface* SkFontHost::CreateTypeface(const SkTypeface* familyFace,
++ const char familyName[],
++ SkTypeface::Style style) {
++ load_system_fonts();
++
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ // clip to legal style bits
++ style = (SkTypeface::Style)(style & SkTypeface::kBoldItalic);
++
++ SkTypeface* tf = NULL;
++
++ if (NULL != familyFace) {
++ tf = find_typeface(familyFace, style);
++ } else if (NULL != familyName) {
++// SkDebugf("======= familyName <%s>\n", familyName);
++ tf = find_typeface(familyName, style);
++ }
++
++ if (NULL == tf) {
++ tf = find_best_face(gDefaultFamily, style);
++ }
++
++ // we ref(), since the symantic is to return a new instance
++ tf->ref();
++ return tf;
++}
++
++SkStream* SkFontHost::OpenStream(uint32_t fontID) {
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ FamilyTypeface* tf = (FamilyTypeface*)find_from_uniqueID(fontID);
++ SkStream* stream = tf ? tf->openStream() : NULL;
++
++ if (stream && stream->getLength() == 0) {
++ stream->unref();
++ stream = NULL;
++ }
++ return stream;
++}
++
++size_t SkFontHost::GetFileName(SkFontID fontID, char path[], size_t length,
++ int32_t* index) {
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ FamilyTypeface* tf = (FamilyTypeface*)find_from_uniqueID(fontID);
++ const char* src = tf ? tf->getFilePath() : NULL;
++
++ if (src) {
++ size_t size = strlen(src);
++ if (path) {
++ memcpy(path, src, SkMin32(size, length));
++ }
++ if (index) {
++ *index = 0; // we don't have collections (yet)
++ }
++ return size;
++ } else {
++ return 0;
++ }
++}
++
++SkFontID SkFontHost::NextLogicalFont(SkFontID currFontID, SkFontID origFontID) {
++ load_system_fonts();
++
++ /* First see if fontID is already one of our fallbacks. If so, return
++ its successor. If fontID is not in our list, then return the first one
++ in our list. Note: list is zero-terminated, and returning zero means
++ we have no more fonts to use for fallbacks.
++ */
++ const uint32_t* list = gFallbackFonts;
++ for (int i = 0; list[i] != 0; i++) {
++ if (list[i] == currFontID) {
++ return list[i+1];
++ }
++ }
++ return list[0];
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++SkTypeface* SkFontHost::CreateTypefaceFromStream(SkStream* stream) {
++ if (NULL == stream || stream->getLength() <= 0) {
++ return NULL;
++ }
++
++ bool isFixedWidth;
++ SkString name;
++ SkTypeface::Style style;
++ find_name_and_attributes(stream, &name, &style, &isFixedWidth);
++
++ if (!name.isEmpty()) {
++ return SkNEW_ARGS(StreamTypeface, (style, false, NULL, stream, isFixedWidth));
++ } else {
++ return NULL;
++ }
++}
++
++SkTypeface* SkFontHost::CreateTypefaceFromFile(const char path[]) {
++ SkStream* stream = SkNEW_ARGS(SkMMAPStream, (path));
++ SkTypeface* face = SkFontHost::CreateTypefaceFromStream(stream);
++ // since we created the stream, we let go of our ref() here
++ stream->unref();
++ return face;
++}
++
++///////////////////////////////////////////////////////////////////////////////
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0007-Bug-848491-Re-apply-bug-687188-Expand-the-gradient-c.patch b/gfx/skia/patches/archive/0007-Bug-848491-Re-apply-bug-687188-Expand-the-gradient-c.patch
new file mode 100644
index 000000000..73bca9a48
--- /dev/null
+++ b/gfx/skia/patches/archive/0007-Bug-848491-Re-apply-bug-687188-Expand-the-gradient-c.patch
@@ -0,0 +1,168 @@
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 25 Apr 2013 20:47:06 -0400
+Subject: Bug 848491 - Re-apply bug 687188 - Expand the gradient cache by 2 to store 0/1 colour stop values for clamping.
+
+
+diff --git a/gfx/skia/src/effects/gradients/SkGradientShader.cpp b/gfx/skia/src/effects/gradients/SkGradientShader.cpp
+index 684355d..27a9c46 100644
+--- a/gfx/skia/src/effects/gradients/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/gradients/SkGradientShader.cpp
+@@ -453,15 +453,15 @@ const uint16_t* SkGradientShaderBase::getCache16() const {
+
+ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ if (fCache32 == NULL) {
+- // double the count for dither entries
+- const int entryCount = kCache32Count * 4;
++ // double the count for dither entries, and have an extra two entries for clamp values
++ const int entryCount = kCache32Count * 4 + 2;
+ const size_t allocSize = sizeof(SkPMColor) * entryCount;
+
+ if (NULL == fCache32PixelRef) {
+ fCache32PixelRef = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ }
+- fCache32 = (SkPMColor*)fCache32PixelRef->getAddr();
++ fCache32 = (SkPMColor*)fCache32PixelRef->getAddr() + 1;
+ if (fColorCount == 2) {
+ Build32bitCache(fCache32, fOrigColors[0], fOrigColors[1],
+ kCache32Count, fCacheAlpha);
+@@ -484,7 +484,7 @@ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ SkMallocPixelRef* newPR = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ SkPMColor* linear = fCache32; // just computed linear data
+- SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapped data
++ SkPMColor* mapped = (SkPMColor*)newPR->getAddr() + 1; // storage for mapped data
+ SkUnitMapper* map = fMapper;
+ for (int i = 0; i < kCache32Count; i++) {
+ int index = map->mapUnit16((i << 8) | i) >> 8;
+@@ -495,9 +495,21 @@ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ }
+ fCache32PixelRef->unref();
+ fCache32PixelRef = newPR;
+- fCache32 = (SkPMColor*)newPR->getAddr();
++ fCache32 = (SkPMColor*)newPR->getAddr() + 1;
+ }
+ }
++
++ // Write the clamp colours into the first and last entries of fCache32
++ fCache32[kCache32ClampLower] = SkPackARGB32(fCacheAlpha,
++ SkColorGetR(fOrigColors[0]),
++ SkColorGetG(fOrigColors[0]),
++ SkColorGetB(fOrigColors[0]));
++
++ fCache32[kCache32ClampUpper] = SkPackARGB32(fCacheAlpha,
++ SkColorGetR(fOrigColors[fColorCount - 1]),
++ SkColorGetG(fOrigColors[fColorCount - 1]),
++ SkColorGetB(fOrigColors[fColorCount - 1]));
++
+ return fCache32;
+ }
+
+diff --git a/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h b/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
+index 729ce4e..2cb6a9d 100644
+--- a/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
++++ b/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
+@@ -86,6 +86,9 @@ public:
+ /// if dithering is disabled.
+ kDitherStride32 = kCache32Count,
+ kDitherStride16 = kCache16Count,
++
++ kCache32ClampLower = -1,
++ kCache32ClampUpper = kCache32Count * 4
+ };
+
+
+diff --git a/gfx/skia/src/effects/gradients/SkLinearGradient.cpp b/gfx/skia/src/effects/gradients/SkLinearGradient.cpp
+index e0f216c..40ab918 100644
+--- a/gfx/skia/src/effects/gradients/SkLinearGradient.cpp
++++ b/gfx/skia/src/effects/gradients/SkLinearGradient.cpp
+@@ -127,6 +127,17 @@ void shadeSpan_linear_vertical_lerp(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
++ if (proc == clamp_tileproc) {
++ // No need to lerp or dither for clamp values
++ if (fx < 0) {
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampLower], count);
++ return;
++ } else if (fx > 0xffff) {
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampUpper], count);
++ return;
++ }
++ }
++
+ // We're a vertical gradient, so no change in a span.
+ // If colors change sharply across the gradient, dithering is
+ // insufficient (it subsamples the color space) and we need to lerp.
+@@ -154,10 +165,7 @@ void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx,
+ range.init(fx, dx, count, 0, SkGradientShaderBase::kCache32Count - 1);
+
+ if ((count = range.fCount0) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV0],
+- cache[next_dither_toggle(toggle) + range.fV0],
+- count);
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampLower], count);
+ dstC += count;
+ }
+ if ((count = range.fCount1) > 0) {
+@@ -176,10 +184,7 @@ void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx,
+ }
+ }
+ if ((count = range.fCount2) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV1],
+- cache[next_dither_toggle(toggle) + range.fV1],
+- count);
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampUpper], count);
+ }
+ }
+
+diff --git a/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp b/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp
+index abd974b..601fff4 100644
+--- a/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp
++++ b/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp
+@@ -124,10 +124,14 @@ static void twopoint_clamp(TwoPtRadial* rec, SkPMColor* SK_RESTRICT dstC,
+ if (TwoPtRadial::DontDrawT(t)) {
+ *dstC++ = 0;
+ } else {
+- SkFixed index = SkClampMax(t, 0xFFFF);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[toggle +
+- (index >> SkGradientShaderBase::kCache32Shift)];
++ if (t < 0) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampLower];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampUpper];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> SkGradientShaderBase::kCache32Shift];
++ }
+ }
+ toggle = next_dither_toggle(toggle);
+ }
+diff --git a/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp b/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp
+index f70b67d..ec2ae75 100644
+--- a/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp
++++ b/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp
+@@ -120,9 +120,14 @@ void shadeSpan_twopoint_clamp(SkScalar fx, SkScalar dx,
+ for (; count > 0; --count) {
+ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+ fOneOverTwoA, posRoot);
+- SkFixed index = SkClampMax(t, 0xFFFF);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> SkGradientShaderBase::kCache32Shift];
++ if (t < 0) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampLower];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampUpper];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> SkGradientShaderBase::kCache32Shift];
++ }
+ fx += dx;
+ fy += dy;
+ b += db;
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0008-Bug-687188-Skia-radial-gradients.patch b/gfx/skia/patches/archive/0008-Bug-687188-Skia-radial-gradients.patch
new file mode 100644
index 000000000..0f60dbd8e
--- /dev/null
+++ b/gfx/skia/patches/archive/0008-Bug-687188-Skia-radial-gradients.patch
@@ -0,0 +1,173 @@
+From f941ea32e44a2436d235e83ef1a434289a9d9c1e Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Wed, 23 May 2012 11:40:25 -0400
+Subject: [PATCH 08/10] Bug 755869 - [11] Re-apply bug 687188 - Skia
+ radial gradients should use the 0/1 color stop values
+ for clamping. r=mattwoodrow
+
+---
+ gfx/skia/src/effects/SkGradientShader.cpp | 76 +++++++++++++++++++++++------
+ 1 files changed, 61 insertions(+), 15 deletions(-)
+
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+index 59ba48c..ea05a39 100644
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -204,6 +204,7 @@ private:
+ mutable SkMallocPixelRef* fCache32PixelRef;
+ mutable unsigned fCacheAlpha; // the alpha value we used when we computed the cache. larger than 8bits so we can store uninitialized value
+
++ static SkPMColor PremultiplyColor(SkColor c0, U8CPU alpha);
+ static void Build16bitCache(uint16_t[], SkColor c0, SkColor c1, int count);
+ static void Build32bitCache(SkPMColor[], SkColor c0, SkColor c1, int count,
+ U8CPU alpha);
+@@ -507,6 +508,21 @@ static inline U8CPU dither_ceil_fixed_to_8(SkFixed n) {
+ return ((n << 1) - (n | (n >> 8))) >> 8;
+ }
+
++SkPMColor Gradient_Shader::PremultiplyColor(SkColor c0, U8CPU paintAlpha)
++{
++ SkFixed a = SkMulDiv255Round(SkColorGetA(c0), paintAlpha);
++ SkFixed r = SkColorGetR(c0);
++ SkFixed g = SkColorGetG(c0);
++ SkFixed b = SkColorGetB(c0);
++
++ a = SkIntToFixed(a) + 0x8000;
++ r = SkIntToFixed(r) + 0x8000;
++ g = SkIntToFixed(g) + 0x8000;
++ b = SkIntToFixed(b) + 0x8000;
++
++ return SkPremultiplyARGBInline(a >> 16, r >> 16, g >> 16, b >> 16);
++}
++
+ void Gradient_Shader::Build32bitCache(SkPMColor cache[], SkColor c0, SkColor c1,
+ int count, U8CPU paintAlpha) {
+ SkASSERT(count > 1);
+@@ -628,14 +644,14 @@ static void complete_32bit_cache(SkPMColor* cache, int stride) {
+ const SkPMColor* Gradient_Shader::getCache32() const {
+ if (fCache32 == NULL) {
+ // double the count for dither entries
+- const int entryCount = kCache32Count * 2;
++ const int entryCount = kCache32Count * 2 + 2;
+ const size_t allocSize = sizeof(SkPMColor) * entryCount;
+
+ if (NULL == fCache32PixelRef) {
+ fCache32PixelRef = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ }
+- fCache32 = (SkPMColor*)fCache32PixelRef->getAddr();
++ fCache32 = (SkPMColor*)fCache32PixelRef->getAddr() + 1;
+ if (fColorCount == 2) {
+ Build32bitCache(fCache32, fOrigColors[0], fOrigColors[1],
+ kGradient32Length, fCacheAlpha);
+@@ -659,7 +675,7 @@ const SkPMColor* Gradient_Shader::getCache32() const {
+ SkMallocPixelRef* newPR = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ SkPMColor* linear = fCache32; // just computed linear data
+- SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapped data
++ SkPMColor* mapped = (SkPMColor*)newPR->getAddr() + 1; // storage for mapped data
+ SkUnitMapper* map = fMapper;
+ for (int i = 0; i < kGradient32Length; i++) {
+ int index = map->mapUnit16((i << 8) | i) >> 8;
+@@ -668,10 +684,13 @@ const SkPMColor* Gradient_Shader::getCache32() const {
+ }
+ fCache32PixelRef->unref();
+ fCache32PixelRef = newPR;
+- fCache32 = (SkPMColor*)newPR->getAddr();
++ fCache32 = (SkPMColor*)newPR->getAddr() + 1;
+ }
+ complete_32bit_cache(fCache32, kCache32Count);
+ }
++ //Write the clamp colours into the first and last entries of fCache32
++ fCache32[-1] = PremultiplyColor(fOrigColors[0], fCacheAlpha);
++ fCache32[kCache32Count * 2] = PremultiplyColor(fOrigColors[fColorCount - 1], fCacheAlpha);
+ return fCache32;
+ }
+
+@@ -857,6 +876,18 @@ void shadeSpan_linear_vertical(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
++ if (proc == clamp_tileproc) {
++ // Read out clamp values from beginning/end of the cache. No need to lerp
++ // or dither
++ if (fx < 0) {
++ sk_memset32(dstC, cache[-1], count);
++ return;
++ } else if (fx > 0xFFFF) {
++ sk_memset32(dstC, cache[Gradient_Shader::kCache32Count * 2], count);
++ return;
++ }
++ }
++
+ // We're a vertical gradient, so no change in a span.
+ // If colors change sharply across the gradient, dithering is
+ // insufficient (it subsamples the color space) and we need to lerp.
+@@ -875,6 +906,18 @@ void shadeSpan_linear_vertical_lerp(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
++ if (proc == clamp_tileproc) {
++ // Read out clamp values from beginning/end of the cache. No need to lerp
++ // or dither
++ if (fx < 0) {
++ sk_memset32(dstC, cache[-1], count);
++ return;
++ } else if (fx > 0xFFFF) {
++ sk_memset32(dstC, cache[Gradient_Shader::kCache32Count * 2], count);
++ return;
++ }
++ }
++
+ // We're a vertical gradient, so no change in a span.
+ // If colors change sharply across the gradient, dithering is
+ // insufficient (it subsamples the color space) and we need to lerp.
+@@ -900,10 +943,8 @@ void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx,
+ range.init(fx, dx, count, 0, Gradient_Shader::kGradient32Length);
+
+ if ((count = range.fCount0) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV0],
+- cache[(toggle ^ Gradient_Shader::kDitherStride32) + range.fV0],
+- count);
++ // Shouldn't be any need to dither for clamping?
++ sk_memset32(dstC, cache[-1], count);
+ dstC += count;
+ }
+ if ((count = range.fCount1) > 0) {
+@@ -922,10 +963,8 @@ void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx,
+ }
+ }
+ if ((count = range.fCount2) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV1],
+- cache[(toggle ^ Gradient_Shader::kDitherStride32) + range.fV1],
+- count);
++ // Shouldn't be any need to dither for clamping?
++ sk_memset32(dstC, cache[Gradient_Shader::kCache32Count * 2], count);
+ }
+ }
+
+@@ -1796,9 +1835,16 @@ void shadeSpan_twopoint_clamp(SkScalar fx, SkScalar dx,
+ for (; count > 0; --count) {
+ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+ fOneOverTwoA, posRoot);
+- SkFixed index = SkClampMax(t, 0xFFFF);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> Gradient_Shader::kCache32Shift];
++
++ if (t < 0) {
++ *dstC++ = cache[-1];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[Gradient_Shader::kCache32Count * 2];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> Gradient_Shader::kCache32Shift];
++ }
++
+ fx += dx;
+ fy += dy;
+ b += db;
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0008-Bug-848491-Re-apply-759683-Handle-compilers-that-don.patch b/gfx/skia/patches/archive/0008-Bug-848491-Re-apply-759683-Handle-compilers-that-don.patch
new file mode 100644
index 000000000..58961d6e0
--- /dev/null
+++ b/gfx/skia/patches/archive/0008-Bug-848491-Re-apply-759683-Handle-compilers-that-don.patch
@@ -0,0 +1,35 @@
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 25 Apr 2013 20:49:45 -0400
+Subject: Bug 848491 - Re-apply 759683 - Handle compilers that don't support SSSE3 intrinsics
+
+
+diff --git a/gfx/skia/src/opts/opts_check_SSE2.cpp b/gfx/skia/src/opts/opts_check_SSE2.cpp
+index 6370058..18f68d6 100644
+--- a/gfx/skia/src/opts/opts_check_SSE2.cpp
++++ b/gfx/skia/src/opts/opts_check_SSE2.cpp
+@@ -86,9 +86,13 @@ static inline bool hasSSSE3() {
+ #else
+
+ static inline bool hasSSSE3() {
++#if defined(SK_BUILD_SSSE3)
+ int cpu_info[4] = { 0 };
+ getcpuid(1, cpu_info);
+ return (cpu_info[2] & 0x200) != 0;
++#else
++ return false;
++#endif
+ }
+ #endif
+
+@@ -104,7 +108,7 @@ static bool cachedHasSSSE3() {
+
+ void SkBitmapProcState::platformProcs() {
+ if (cachedHasSSSE3()) {
+-#if !defined(SK_BUILD_FOR_ANDROID)
++#if !defined(SK_BUILD_FOR_ANDROID) && defined(SK_BUILD_SSSE3)
+ // Disable SSSE3 optimization for Android x86
+ if (fSampleProc32 == S32_opaque_D32_filter_DX) {
+ fSampleProc32 = S32_opaque_D32_filter_DX_SSSE3;
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0009-Bug-755869-FreeBSD-Hurd.patch b/gfx/skia/patches/archive/0009-Bug-755869-FreeBSD-Hurd.patch
new file mode 100644
index 000000000..1e9a93f20
--- /dev/null
+++ b/gfx/skia/patches/archive/0009-Bug-755869-FreeBSD-Hurd.patch
@@ -0,0 +1,28 @@
+From df3be24040f7cb2f9c7ed86ad3e47206630e885f Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Wed, 23 May 2012 14:49:57 -0400
+Subject: [PATCH 09/10] Bug 755869 - [12] Re-apply bug 749533 - Add
+ support for GNU/kFreeBSD and Hurd in Skia.
+ r=mattwoodrow
+
+---
+ gfx/skia/include/core/SkPreConfig.h | 3 ++-
+ 1 files changed, 2 insertions(+), 1 deletions(-)
+
+diff --git a/gfx/skia/include/core/SkPreConfig.h b/gfx/skia/include/core/SkPreConfig.h
+index 46c6929..16c4d6c 100644
+--- a/gfx/skia/include/core/SkPreConfig.h
++++ b/gfx/skia/include/core/SkPreConfig.h
+@@ -35,7 +35,8 @@
+ #elif defined(ANDROID)
+ #define SK_BUILD_FOR_ANDROID
+ #elif defined(linux) || defined(__FreeBSD__) || defined(__OpenBSD__) || \
+- defined(__sun) || defined(__NetBSD__) || defined(__DragonFly__)
++ defined(__sun) || defined(__NetBSD__) || defined(__DragonFly__) || \
++ defined(__GLIBC__) || defined(__GNU__)
+ #define SK_BUILD_FOR_UNIX
+ #elif TARGET_OS_IPHONE || TARGET_IPHONE_SIMULATOR
+ #define SK_BUILD_FOR_IOS
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0009-Bug-777614-Re-apply-759683-Handle-compilers-that-don.patch b/gfx/skia/patches/archive/0009-Bug-777614-Re-apply-759683-Handle-compilers-that-don.patch
new file mode 100644
index 000000000..1da208ed1
--- /dev/null
+++ b/gfx/skia/patches/archive/0009-Bug-777614-Re-apply-759683-Handle-compilers-that-don.patch
@@ -0,0 +1,40 @@
+From 2c5a8cebc806ed287ce7c3723ea64a233266cd9e Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 13 Sep 2012 14:55:33 -0400
+Subject: [PATCH 9/9] Bug 777614 - Re-apply 759683 - Handle compilers that
+ don't support SSSE3 intrinsics r=nrc
+
+---
+ gfx/skia/src/opts/opts_check_SSE2.cpp | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/gfx/skia/src/opts/opts_check_SSE2.cpp b/gfx/skia/src/opts/opts_check_SSE2.cpp
+index 96d0dea..add6d5f 100644
+--- a/gfx/skia/src/opts/opts_check_SSE2.cpp
++++ b/gfx/skia/src/opts/opts_check_SSE2.cpp
+@@ -86,9 +86,13 @@ static inline bool hasSSSE3() {
+ #else
+
+ static inline bool hasSSSE3() {
++#if defined(SK_BUILD_SSSE3)
+ int cpu_info[4] = { 0 };
+ getcpuid(1, cpu_info);
+ return (cpu_info[2] & 0x200) != 0;
++#else
++ return false;
++#endif
+ }
+ #endif
+
+@@ -104,7 +108,7 @@ static bool cachedHasSSSE3() {
+
+ void SkBitmapProcState::platformProcs() {
+ if (cachedHasSSSE3()) {
+-#if !defined(SK_BUILD_FOR_ANDROID)
++#if !defined(SK_BUILD_FOR_ANDROID) && defined(SK_BUILD_SSSE3)
+ // Disable SSSE3 optimization for Android x86
+ if (fSampleProc32 == S32_opaque_D32_filter_DX) {
+ fSampleProc32 = S32_opaque_D32_filter_DX_SSSE3;
+--
+1.7.11.4
+
diff --git a/gfx/skia/patches/archive/0009-Bug-848491-Re-apply-bug-751418-Add-our-own-GrUserCon.patch b/gfx/skia/patches/archive/0009-Bug-848491-Re-apply-bug-751418-Add-our-own-GrUserCon.patch
new file mode 100644
index 000000000..9778015c4
--- /dev/null
+++ b/gfx/skia/patches/archive/0009-Bug-848491-Re-apply-bug-751418-Add-our-own-GrUserCon.patch
@@ -0,0 +1,23 @@
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 25 Apr 2013 20:52:32 -0400
+Subject: Bug 848491 - Re-apply bug 751418 - Add our own GrUserConfig
+
+
+diff --git a/gfx/skia/include/gpu/GrUserConfig.h b/gfx/skia/include/gpu/GrUserConfig.h
+index 11d4feb..77ab850 100644
+--- a/gfx/skia/include/gpu/GrUserConfig.h
++++ b/gfx/skia/include/gpu/GrUserConfig.h
+@@ -43,6 +43,10 @@
+ */
+ //#define GR_DEFAULT_TEXTURE_CACHE_MB_LIMIT 96
+
++/*
++ * This allows us to set a callback to be called before each GL call to ensure
++ * that our context is set correctly
++ */
+ #define GR_GL_PER_GL_FUNC_CALLBACK 1
+
+ #endif
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0010-Bug-689069-ARM-Opts.patch b/gfx/skia/patches/archive/0010-Bug-689069-ARM-Opts.patch
new file mode 100644
index 000000000..bd6604b4b
--- /dev/null
+++ b/gfx/skia/patches/archive/0010-Bug-689069-ARM-Opts.patch
@@ -0,0 +1,36 @@
+From dc1292fc8c2b9da900ebcac953120eaffd0d329e Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Wed, 23 May 2012 14:52:36 -0400
+Subject: [PATCH 10/10] Bug 755869 - [13] Re-apply bug 750733 - Use
+ handles in API object hooks where possible
+ r=mattwoodrow
+
+---
+ gfx/skia/src/xml/SkJS.cpp | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gfx/skia/src/xml/SkJS.cpp b/gfx/skia/src/xml/SkJS.cpp
+index f2e7a83..b2717d7 100644
+--- a/gfx/skia/src/xml/SkJS.cpp
++++ b/gfx/skia/src/xml/SkJS.cpp
+@@ -74,7 +74,7 @@ extern "C" {
+ #endif
+
+ static bool
+-global_enumerate(JSContext *cx, JSObject *obj)
++global_enumerate(JSContext *cx, JSHandleObject *obj)
+ {
+ #ifdef LAZY_STANDARD_CLASSES
+ return JS_EnumerateStandardClasses(cx, obj);
+@@ -84,7 +84,7 @@ global_enumerate(JSContext *cx, JSObject *obj)
+ }
+
+ static bool
+-global_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags, JSObject **objp)
++global_resolve(JSContext *cx, JSHandleObject obj, JSHandleId id, unsigned flags, JSObject **objp)
+ {
+ #ifdef LAZY_STANDARD_CLASSES
+ if ((flags & JSRESOLVE_ASSIGNING) == 0) {
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0010-Bug-836892-Add-new-blending-modes-to-SkXfermode.patch b/gfx/skia/patches/archive/0010-Bug-836892-Add-new-blending-modes-to-SkXfermode.patch
new file mode 100644
index 000000000..a446037de
--- /dev/null
+++ b/gfx/skia/patches/archive/0010-Bug-836892-Add-new-blending-modes-to-SkXfermode.patch
@@ -0,0 +1,698 @@
+# HG changeset patch
+# User Rik Cabanier <cabanier@adobe.com>
+# Date 1360273929 -46800
+# Node ID 3ac8edca3a03b3d22240b5a5b95ae3b5ada9877d
+# Parent cbb67fe70b864b36165061e1fd3b083cd09af087
+Bug 836892 - Add new blending modes to SkXfermode. r=gw280
+
+diff --git a/gfx/skia/include/core/SkXfermode.h b/gfx/skia/include/core/SkXfermode.h
+--- a/gfx/skia/include/core/SkXfermode.h
++++ b/gfx/skia/include/core/SkXfermode.h
+@@ -96,33 +96,37 @@ public:
+ kDstOut_Mode, //!< [Da * (1 - Sa), Dc * (1 - Sa)]
+ kSrcATop_Mode, //!< [Da, Sc * Da + (1 - Sa) * Dc]
+ kDstATop_Mode, //!< [Sa, Sa * Dc + Sc * (1 - Da)]
+ kXor_Mode, //!< [Sa + Da - 2 * Sa * Da, Sc * (1 - Da) + (1 - Sa) * Dc]
+
+ // all remaining modes are defined in the SVG Compositing standard
+ // http://www.w3.org/TR/2009/WD-SVGCompositing-20090430/
+ kPlus_Mode,
+- kMultiply_Mode,
+
+ // all above modes can be expressed as pair of src/dst Coeffs
+ kCoeffModesCnt,
+
+- kScreen_Mode = kCoeffModesCnt,
++ kMultiply_Mode = kCoeffModesCnt,
++ kScreen_Mode,
+ kOverlay_Mode,
+ kDarken_Mode,
+ kLighten_Mode,
+ kColorDodge_Mode,
+ kColorBurn_Mode,
+ kHardLight_Mode,
+ kSoftLight_Mode,
+ kDifference_Mode,
+ kExclusion_Mode,
++ kHue_Mode,
++ kSaturation_Mode,
++ kColor_Mode,
++ kLuminosity_Mode,
+
+- kLastMode = kExclusion_Mode
++ kLastMode = kLuminosity_Mode
+ };
+
+ /**
+ * If the xfermode is one of the modes in the Mode enum, then asMode()
+ * returns true and sets (if not null) mode accordingly. Otherwise it
+ * returns false and ignores the mode parameter.
+ */
+ virtual bool asMode(Mode* mode);
+diff --git a/gfx/skia/src/core/SkXfermode.cpp b/gfx/skia/src/core/SkXfermode.cpp
+--- a/gfx/skia/src/core/SkXfermode.cpp
++++ b/gfx/skia/src/core/SkXfermode.cpp
+@@ -7,16 +7,18 @@
+ */
+
+
+ #include "SkXfermode.h"
+ #include "SkColorPriv.h"
+ #include "SkFlattenableBuffers.h"
+ #include "SkMathPriv.h"
+
++#include <algorithm>
++
+ SK_DEFINE_INST_COUNT(SkXfermode)
+
+ #define SkAlphaMulAlpha(a, b) SkMulDiv255Round(a, b)
+
+ #if 0
+ // idea for higher precision blends in xfer procs (and slightly faster)
+ // see DstATop as a probable caller
+ static U8CPU mulmuldiv255round(U8CPU a, U8CPU b, U8CPU c, U8CPU d) {
+@@ -176,244 +178,439 @@ static SkPMColor xor_modeproc(SkPMColor
+ static SkPMColor plus_modeproc(SkPMColor src, SkPMColor dst) {
+ unsigned b = saturated_add(SkGetPackedB32(src), SkGetPackedB32(dst));
+ unsigned g = saturated_add(SkGetPackedG32(src), SkGetPackedG32(dst));
+ unsigned r = saturated_add(SkGetPackedR32(src), SkGetPackedR32(dst));
+ unsigned a = saturated_add(SkGetPackedA32(src), SkGetPackedA32(dst));
+ return SkPackARGB32(a, r, g, b);
+ }
+
++static inline int srcover_byte(int a, int b) {
++ return a + b - SkAlphaMulAlpha(a, b);
++}
++
++#define blendfunc_byte(sc, dc, sa, da, blendfunc) \
++ clamp_div255round(sc * (255 - da) + dc * (255 - sa) + blendfunc(sc, dc, sa, da))
++
+ // kMultiply_Mode
++static inline int multiply_byte(int sc, int dc, int sa, int da) {
++ return sc * dc;
++}
+ static SkPMColor multiply_modeproc(SkPMColor src, SkPMColor dst) {
+- int a = SkAlphaMulAlpha(SkGetPackedA32(src), SkGetPackedA32(dst));
+- int r = SkAlphaMulAlpha(SkGetPackedR32(src), SkGetPackedR32(dst));
+- int g = SkAlphaMulAlpha(SkGetPackedG32(src), SkGetPackedG32(dst));
+- int b = SkAlphaMulAlpha(SkGetPackedB32(src), SkGetPackedB32(dst));
++ int sa = SkGetPackedA32(src);
++ int da = SkGetPackedA32(dst);
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, multiply_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, multiply_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, multiply_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kScreen_Mode
+-static inline int srcover_byte(int a, int b) {
+- return a + b - SkAlphaMulAlpha(a, b);
++static inline int screen_byte(int sc, int dc, int sa, int da) {
++ return sc * da + sa * dc - sc * dc;
+ }
+ static SkPMColor screen_modeproc(SkPMColor src, SkPMColor dst) {
+- int a = srcover_byte(SkGetPackedA32(src), SkGetPackedA32(dst));
+- int r = srcover_byte(SkGetPackedR32(src), SkGetPackedR32(dst));
+- int g = srcover_byte(SkGetPackedG32(src), SkGetPackedG32(dst));
+- int b = srcover_byte(SkGetPackedB32(src), SkGetPackedB32(dst));
++ int sa = SkGetPackedA32(src);
++ int da = SkGetPackedA32(dst);
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, screen_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, screen_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, screen_byte);
++ return SkPackARGB32(a, r, g, b);
++}
++
++// kHardLight_Mode
++static inline int hardlight_byte(int sc, int dc, int sa, int da) {
++ if(!sa || !da)
++ return sc * da;
++ float Sc = (float)sc/sa;
++ float Dc = (float)dc/da;
++ if(Sc <= 0.5)
++ Sc *= 2 * Dc;
++ else
++ Sc = -1 + 2 * Sc + 2 * Dc - 2 * Sc * Dc;
++
++ return Sc * sa * da;
++}
++static SkPMColor hardlight_modeproc(SkPMColor src, SkPMColor dst) {
++ int sa = SkGetPackedA32(src);
++ int da = SkGetPackedA32(dst);
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, hardlight_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, hardlight_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, hardlight_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kOverlay_Mode
+ static inline int overlay_byte(int sc, int dc, int sa, int da) {
+- int tmp = sc * (255 - da) + dc * (255 - sa);
+- int rc;
+- if (2 * dc <= da) {
+- rc = 2 * sc * dc;
+- } else {
+- rc = sa * da - 2 * (da - dc) * (sa - sc);
+- }
+- return clamp_div255round(rc + tmp);
++ return hardlight_byte(dc, sc, da, sa);
+ }
+ static SkPMColor overlay_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = overlay_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = overlay_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = overlay_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, overlay_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, overlay_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, overlay_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kDarken_Mode
+ static inline int darken_byte(int sc, int dc, int sa, int da) {
+- int sd = sc * da;
+- int ds = dc * sa;
+- if (sd < ds) {
+- // srcover
+- return sc + dc - SkDiv255Round(ds);
+- } else {
+- // dstover
+- return dc + sc - SkDiv255Round(sd);
+- }
++ return SkMin32(sc * da, sa * dc);
+ }
+ static SkPMColor darken_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = darken_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = darken_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = darken_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, darken_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, darken_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, darken_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kLighten_Mode
+ static inline int lighten_byte(int sc, int dc, int sa, int da) {
+- int sd = sc * da;
+- int ds = dc * sa;
+- if (sd > ds) {
+- // srcover
+- return sc + dc - SkDiv255Round(ds);
+- } else {
+- // dstover
+- return dc + sc - SkDiv255Round(sd);
+- }
++ return SkMax32(sc * da, sa * dc);
+ }
+ static SkPMColor lighten_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = lighten_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = lighten_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = lighten_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, lighten_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, lighten_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, lighten_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kColorDodge_Mode
+ static inline int colordodge_byte(int sc, int dc, int sa, int da) {
+- int diff = sa - sc;
+- int rc;
+- if (0 == diff) {
+- rc = sa * da + sc * (255 - da) + dc * (255 - sa);
+- rc = SkDiv255Round(rc);
+- } else {
+- int tmp = (dc * sa << 15) / (da * diff);
+- rc = SkDiv255Round(sa * da) * tmp >> 15;
+- // don't clamp here, since we'll do it in our modeproc
+- }
+- return rc;
++ if (dc == 0)
++ return 0;
++ // Avoid division by 0
++ if (sc == sa)
++ return da * sa;
++
++ return SkMin32(sa * da, sa * sa * dc / (sa - sc));
+ }
+ static SkPMColor colordodge_modeproc(SkPMColor src, SkPMColor dst) {
+- // added to avoid div-by-zero in colordodge_byte
+- if (0 == dst) {
+- return src;
+- }
+-
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = colordodge_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = colordodge_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = colordodge_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+- r = clamp_max(r, a);
+- g = clamp_max(g, a);
+- b = clamp_max(b, a);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, colordodge_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, colordodge_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, colordodge_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kColorBurn_Mode
+ static inline int colorburn_byte(int sc, int dc, int sa, int da) {
+- int rc;
+- if (dc == da && 0 == sc) {
+- rc = sa * da + dc * (255 - sa);
+- } else if (0 == sc) {
+- return SkAlphaMulAlpha(dc, 255 - sa);
+- } else {
+- int tmp = (sa * (da - dc) * 256) / (sc * da);
+- if (tmp > 256) {
+- tmp = 256;
+- }
+- int tmp2 = sa * da;
+- rc = tmp2 - (tmp2 * tmp >> 8) + sc * (255 - da) + dc * (255 - sa);
+- }
+- return SkDiv255Round(rc);
++ // Avoid division by 0
++ if(dc == da)
++ return sa * da;
++ if(sc == 0)
++ return 0;
++
++ return sa * da - SkMin32(sa * da, sa * sa * (da - dc) / sc);
+ }
+ static SkPMColor colorburn_modeproc(SkPMColor src, SkPMColor dst) {
+- // added to avoid div-by-zero in colorburn_byte
+- if (0 == dst) {
+- return src;
+- }
+-
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = colorburn_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = colorburn_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = colorburn_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+- return SkPackARGB32(a, r, g, b);
+-}
+-
+-// kHardLight_Mode
+-static inline int hardlight_byte(int sc, int dc, int sa, int da) {
+- int rc;
+- if (2 * sc <= sa) {
+- rc = 2 * sc * dc;
+- } else {
+- rc = sa * da - 2 * (da - dc) * (sa - sc);
+- }
+- return clamp_div255round(rc + sc * (255 - da) + dc * (255 - sa));
+-}
+-static SkPMColor hardlight_modeproc(SkPMColor src, SkPMColor dst) {
+- int sa = SkGetPackedA32(src);
+- int da = SkGetPackedA32(dst);
+- int a = srcover_byte(sa, da);
+- int r = hardlight_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = hardlight_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = hardlight_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, colorburn_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, colorburn_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, colorburn_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // returns 255 * sqrt(n/255)
+ static U8CPU sqrt_unit_byte(U8CPU n) {
+ return SkSqrtBits(n, 15+4);
+ }
+
+ // kSoftLight_Mode
+ static inline int softlight_byte(int sc, int dc, int sa, int da) {
+ int m = da ? dc * 256 / da : 0;
+ int rc;
+- if (2 * sc <= sa) {
+- rc = dc * (sa + ((2 * sc - sa) * (256 - m) >> 8));
+- } else if (4 * dc <= da) {
++ if (2 * sc <= sa)
++ return dc * (sa + ((2 * sc - sa) * (256 - m) >> 8));
++
++ if (4 * dc <= da) {
+ int tmp = (4 * m * (4 * m + 256) * (m - 256) >> 16) + 7 * m;
+- rc = dc * sa + (da * (2 * sc - sa) * tmp >> 8);
+- } else {
+- int tmp = sqrt_unit_byte(m) - m;
+- rc = dc * sa + (da * (2 * sc - sa) * tmp >> 8);
++ return dc * sa + (da * (2 * sc - sa) * tmp >> 8);
+ }
+- return clamp_div255round(rc + sc * (255 - da) + dc * (255 - sa));
++ int tmp = sqrt_unit_byte(m) - m;
++ return rc = dc * sa + (da * (2 * sc - sa) * tmp >> 8);
+ }
+ static SkPMColor softlight_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = softlight_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = softlight_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = softlight_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, softlight_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, softlight_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, softlight_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kDifference_Mode
+ static inline int difference_byte(int sc, int dc, int sa, int da) {
+- int tmp = SkMin32(sc * da, dc * sa);
+- return clamp_signed_byte(sc + dc - 2 * SkDiv255Round(tmp));
++ int tmp = dc * sa - sc * da;
++ if(tmp<0)
++ return - tmp;
++
++ return tmp;
+ }
+ static SkPMColor difference_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = difference_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = difference_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = difference_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, difference_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, difference_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, difference_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kExclusion_Mode
+ static inline int exclusion_byte(int sc, int dc, int sa, int da) {
+- // this equations is wacky, wait for SVG to confirm it
+- int r = sc * da + dc * sa - 2 * sc * dc + sc * (255 - da) + dc * (255 - sa);
+- return clamp_div255round(r);
++ return sc * da + dc * sa - 2 * dc * sc;
+ }
+ static SkPMColor exclusion_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = exclusion_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = exclusion_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = exclusion_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, exclusion_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, exclusion_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, exclusion_byte);
++ return SkPackARGB32(a, r, g, b);
++}
++
++///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
++struct BlendColor {
++ float r;
++ float g;
++ float b;
++
++ BlendColor(): r(0), g(0), b(0)
++ {}
++};
++
++static inline float Lum(BlendColor C)
++{
++ return C.r * 0.3 + C.g * 0.59 + C.b* 0.11;
++}
++
++static inline float SkMinFloat(float a, float b)
++{
++ if (a > b)
++ a = b;
++ return a;
++}
++
++static inline float SkMaxFloat(float a, float b)
++{
++ if (a < b)
++ a = b;
++ return a;
++}
++
++#define minimum(C) SkMinFloat(SkMinFloat(C.r, C.g), C.b)
++#define maximum(C) SkMaxFloat(SkMaxFloat(C.r, C.g), C.b)
++
++static inline float Sat(BlendColor c) {
++ return maximum(c) - minimum(c);
++}
++
++static inline void setSaturationComponents(float& Cmin, float& Cmid, float& Cmax, float s) {
++ if(Cmax > Cmin) {
++ Cmid = (((Cmid - Cmin) * s ) / (Cmax - Cmin));
++ Cmax = s;
++ } else {
++ Cmax = 0;
++ Cmid = 0;
++ }
++ Cmin = 0;
++}
++
++static inline BlendColor SetSat(BlendColor C, float s) {
++ if(C.r <= C.g) {
++ if(C.g <= C.b)
++ setSaturationComponents(C.r, C.g, C.b, s);
++ else
++ if(C.r <= C.b)
++ setSaturationComponents(C.r, C.b, C.g, s);
++ else
++ setSaturationComponents(C.b, C.r, C.g, s);
++ } else if(C.r <= C.b)
++ setSaturationComponents(C.g, C.r, C.b, s);
++ else
++ if(C.g <= C.b)
++ setSaturationComponents(C.g, C.b, C.r, s);
++ else
++ setSaturationComponents(C.b, C.g, C.r, s);
++
++ return C;
++}
++
++static inline BlendColor clipColor(BlendColor C) {
++ float L = Lum(C);
++ float n = minimum(C);
++ float x = maximum(C);
++ if(n < 0) {
++ C.r = L + (((C.r - L) * L) / (L - n));
++ C.g = L + (((C.g - L) * L) / (L - n));
++ C.b = L + (((C.b - L) * L) / (L - n));
++ }
++
++ if(x > 1) {
++ C.r = L + (((C.r - L) * (1 - L)) / (x - L));
++ C.g = L + (((C.g - L) * (1 - L)) / (x - L));
++ C.b = L + (((C.b - L) * (1 - L)) / (x - L));
++ }
++ return C;
++}
++
++static inline BlendColor SetLum(BlendColor C, float l) {
++ float d = l - Lum(C);
++ C.r += d;
++ C.g += d;
++ C.b += d;
++
++ return clipColor(C);
++}
++
++#define blendfunc_nonsep_byte(sc, dc, sa, da, blendval) \
++ clamp_div255round(sc * (255 - da) + dc * (255 - sa) + (int)(sa * da * blendval))
++
++static SkPMColor hue_modeproc(SkPMColor src, SkPMColor dst) {
++ int sr = SkGetPackedR32(src);
++ int sg = SkGetPackedG32(src);
++ int sb = SkGetPackedB32(src);
++ int sa = SkGetPackedA32(src);
++
++ int dr = SkGetPackedR32(dst);
++ int dg = SkGetPackedG32(dst);
++ int db = SkGetPackedB32(dst);
++ int da = SkGetPackedA32(dst);
++
++ BlendColor Cs;
++ if(sa) {
++ Cs.r = (float)sr / sa;
++ Cs.g = (float)sg / sa;
++ Cs.b = (float)sb / sa;
++ BlendColor Cd;
++ if(da) {
++ Cd.r = (float)dr / da;
++ Cd.g = (float)dg / da;
++ Cd.b = (float)db / da;
++ Cs = SetLum(SetSat(Cs, Sat(Cd)), Lum(Cd));
++ }
++ }
++
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_nonsep_byte(sr, dr, sa, da, Cs.r);
++ int g = blendfunc_nonsep_byte(sg, dg, sa, da, Cs.g);
++ int b = blendfunc_nonsep_byte(sb, db, sa, da, Cs.b);
++ return SkPackARGB32(a, r, g, b);
++}
++
++static SkPMColor saturation_modeproc(SkPMColor src, SkPMColor dst) {
++ int sr = SkGetPackedR32(src);
++ int sg = SkGetPackedG32(src);
++ int sb = SkGetPackedB32(src);
++ int sa = SkGetPackedA32(src);
++
++ int dr = SkGetPackedR32(dst);
++ int dg = SkGetPackedG32(dst);
++ int db = SkGetPackedB32(dst);
++ int da = SkGetPackedA32(dst);
++
++ BlendColor Cs;
++ if(sa) {
++ Cs.r = (float)sr / sa;
++ Cs.g = (float)sg / sa;
++ Cs.b = (float)sb / sa;
++ BlendColor Cd;
++ if(da) {
++ Cd.r = (float)dr / da;
++ Cd.g = (float)dg / da;
++ Cd.b = (float)db / da;
++ Cs = SetLum(SetSat(Cd, Sat(Cs)), Lum(Cd));
++ }
++ }
++
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_nonsep_byte(sr, dr, sa, da, Cs.r);
++ int g = blendfunc_nonsep_byte(sg, dg, sa, da, Cs.g);
++ int b = blendfunc_nonsep_byte(sb, db, sa, da, Cs.b);
++ return SkPackARGB32(a, r, g, b);
++}
++
++static SkPMColor color_modeproc(SkPMColor src, SkPMColor dst) {
++ int sr = SkGetPackedR32(src);
++ int sg = SkGetPackedG32(src);
++ int sb = SkGetPackedB32(src);
++ int sa = SkGetPackedA32(src);
++
++ int dr = SkGetPackedR32(dst);
++ int dg = SkGetPackedG32(dst);
++ int db = SkGetPackedB32(dst);
++ int da = SkGetPackedA32(dst);
++
++ BlendColor Cs;
++ if(sa) {
++ Cs.r = (float)sr / sa;
++ Cs.g = (float)sg / sa;
++ Cs.b = (float)sb / sa;
++ BlendColor Cd;
++ if(da) {
++ Cd.r = (float)dr / da;
++ Cd.g = (float)dg / da;
++ Cd.b = (float)db / da;
++ Cs = SetLum(Cs, Lum(Cd));
++ }
++ }
++
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_nonsep_byte(sr, dr, sa, da, Cs.r);
++ int g = blendfunc_nonsep_byte(sg, dg, sa, da, Cs.g);
++ int b = blendfunc_nonsep_byte(sb, db, sa, da, Cs.b);
++ return SkPackARGB32(a, r, g, b);
++}
++
++static SkPMColor luminosity_modeproc(SkPMColor src, SkPMColor dst) {
++ int sr = SkGetPackedR32(src);
++ int sg = SkGetPackedG32(src);
++ int sb = SkGetPackedB32(src);
++ int sa = SkGetPackedA32(src);
++
++ int dr = SkGetPackedR32(dst);
++ int dg = SkGetPackedG32(dst);
++ int db = SkGetPackedB32(dst);
++ int da = SkGetPackedA32(dst);
++
++ BlendColor Cs;
++ if(sa) {
++ Cs.r = (float)sr / sa;
++ Cs.g = (float)sg / sa;
++ Cs.b = (float)sb / sa;
++ BlendColor Cd;
++ if(da) {
++ Cd.r = (float)dr / da;
++ Cd.g = (float)dg / da;
++ Cd.b = (float)db / da;
++ Cs = SetLum(Cd, Lum(Cs));
++ }
++ }
++
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_nonsep_byte(sr, dr, sa, da, Cs.r);
++ int g = blendfunc_nonsep_byte(sg, dg, sa, da, Cs.g);
++ int b = blendfunc_nonsep_byte(sb, db, sa, da, Cs.b);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ struct ProcCoeff {
+ SkXfermodeProc fProc;
+ SkXfermode::Coeff fSC;
+ SkXfermode::Coeff fDC;
+ };
+@@ -430,27 +627,31 @@ static const ProcCoeff gProcCoeffs[] = {
+ { dstin_modeproc, SkXfermode::kZero_Coeff, SkXfermode::kSA_Coeff },
+ { srcout_modeproc, SkXfermode::kIDA_Coeff, SkXfermode::kZero_Coeff },
+ { dstout_modeproc, SkXfermode::kZero_Coeff, SkXfermode::kISA_Coeff },
+ { srcatop_modeproc, SkXfermode::kDA_Coeff, SkXfermode::kISA_Coeff },
+ { dstatop_modeproc, SkXfermode::kIDA_Coeff, SkXfermode::kSA_Coeff },
+ { xor_modeproc, SkXfermode::kIDA_Coeff, SkXfermode::kISA_Coeff },
+
+ { plus_modeproc, SkXfermode::kOne_Coeff, SkXfermode::kOne_Coeff },
+- { multiply_modeproc,SkXfermode::kZero_Coeff, SkXfermode::kSC_Coeff },
++ { multiply_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF},
+ { screen_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { overlay_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { darken_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { lighten_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { colordodge_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { colorburn_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { hardlight_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { softlight_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { difference_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { exclusion_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
++ { hue_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
++ { saturation_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
++ { color_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
++ { luminosity_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ };
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ bool SkXfermode::asCoeff(Coeff* src, Coeff* dst) {
+ return false;
+ }
+
+@@ -1172,16 +1373,20 @@ static const Proc16Rec gModeProcs16[] =
+ { darken_modeproc16_0, darken_modeproc16_255, NULL }, // darken
+ { lighten_modeproc16_0, lighten_modeproc16_255, NULL }, // lighten
+ { NULL, NULL, NULL }, // colordodge
+ { NULL, NULL, NULL }, // colorburn
+ { NULL, NULL, NULL }, // hardlight
+ { NULL, NULL, NULL }, // softlight
+ { NULL, NULL, NULL }, // difference
+ { NULL, NULL, NULL }, // exclusion
++ { NULL, NULL, NULL }, // hue
++ { NULL, NULL, NULL }, // saturation
++ { NULL, NULL, NULL }, // color
++ { NULL, NULL, NULL }, // luminosity
+ };
+
+ SkXfermodeProc16 SkXfermode::GetProc16(Mode mode, SkColor srcColor) {
+ SkXfermodeProc16 proc16 = NULL;
+ if ((unsigned)mode < kModeCount) {
+ const Proc16Rec& rec = gModeProcs16[mode];
+ unsigned a = SkColorGetA(srcColor);
+
diff --git a/gfx/skia/patches/archive/0010-Bug-848491-Re-apply-bug-817356-Patch-Skia-to-recogni.patch b/gfx/skia/patches/archive/0010-Bug-848491-Re-apply-bug-817356-Patch-Skia-to-recogni.patch
new file mode 100644
index 000000000..0d44b008d
--- /dev/null
+++ b/gfx/skia/patches/archive/0010-Bug-848491-Re-apply-bug-817356-Patch-Skia-to-recogni.patch
@@ -0,0 +1,22 @@
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 25 Apr 2013 20:55:02 -0400
+Subject: Bug 848491 - Re-apply bug 817356 - Patch Skia to recognize uppercase PPC/PPC64.
+
+
+diff --git a/gfx/skia/include/core/SkPreConfig.h b/gfx/skia/include/core/SkPreConfig.h
+index 11cb223..7e95b99 100644
+--- a/gfx/skia/include/core/SkPreConfig.h
++++ b/gfx/skia/include/core/SkPreConfig.h
+@@ -99,7 +99,8 @@
+ //////////////////////////////////////////////////////////////////////
+
+ #if !defined(SK_CPU_BENDIAN) && !defined(SK_CPU_LENDIAN)
+- #if defined (__ppc__) || defined(__ppc64__)
++ #if defined (__ppc__) || defined(__PPC__) || defined(__ppc64__) \
++ || defined(__PPC64__)
+ #define SK_CPU_BENDIAN
+ #else
+ #define SK_CPU_LENDIAN
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0011-Bug-719575-Fix-clang-build.patch b/gfx/skia/patches/archive/0011-Bug-719575-Fix-clang-build.patch
new file mode 100644
index 000000000..95cb08a36
--- /dev/null
+++ b/gfx/skia/patches/archive/0011-Bug-719575-Fix-clang-build.patch
@@ -0,0 +1,28 @@
+From cf855f31194ff071f2c787a7413d70a43f15f204 Mon Sep 17 00:00:00 2001
+From: Ehsan Akhgari <ehsan@mozilla.com>
+Date: Tue, 29 May 2012 15:39:55 -0400
+Subject: [PATCH] Bug 755869 - Re-apply patch from bug 719575 to fix clang
+ builds for the new Skia r=gw280
+
+---
+ gfx/skia/src/ports/SkFontHost_mac_coretext.cpp | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp b/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp
+index c43d1a6..ce5f409 100644
+--- a/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp
++++ b/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp
+@@ -807,8 +807,8 @@ CGRGBPixel* Offscreen::getCG(const SkScalerContext_Mac& context, const SkGlyph&
+ void SkScalerContext_Mac::getVerticalOffset(CGGlyph glyphID, SkIPoint* offset) const {
+ CGSize vertOffset;
+ CTFontGetVerticalTranslationsForGlyphs(fCTVerticalFont, &glyphID, &vertOffset, 1);
+- const SkPoint trans = {SkFloatToScalar(vertOffset.width),
+- SkFloatToScalar(vertOffset.height)};
++ const SkPoint trans = {SkScalar(SkFloatToScalar(vertOffset.width)),
++ SkScalar(SkFloatToScalar(vertOffset.height))};
+ SkPoint floatOffset;
+ fVerticalMatrix.mapPoints(&floatOffset, &trans, 1);
+ if (!isSnowLeopard()) {
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0011-Bug-839347-no-anon-namespace-around-SkNO_RETURN_HINT.patch b/gfx/skia/patches/archive/0011-Bug-839347-no-anon-namespace-around-SkNO_RETURN_HINT.patch
new file mode 100644
index 000000000..854f0b1af
--- /dev/null
+++ b/gfx/skia/patches/archive/0011-Bug-839347-no-anon-namespace-around-SkNO_RETURN_HINT.patch
@@ -0,0 +1,31 @@
+# HG changeset patch
+# Parent 2c6da9f02606f7a02f635d99ef8cf669d3bc5c4b
+# User Daniel Holbert <dholbert@cs.stanford.edu>
+Bug 839347: Move SkNO_RETURN_HINT out of anonymous namespace so that clang won't warn about it being unused. r=mattwoodrow
+
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -63,20 +63,18 @@
+ * The clang static analyzer likes to know that when the program is not
+ * expected to continue (crash, assertion failure, etc). It will notice that
+ * some combination of parameters lead to a function call that does not return.
+ * It can then make appropriate assumptions about the parameters in code
+ * executed only if the non-returning function was *not* called.
+ */
+ #if !defined(SkNO_RETURN_HINT)
+ #if SK_HAS_COMPILER_FEATURE(attribute_analyzer_noreturn)
+- namespace {
+- inline void SkNO_RETURN_HINT() __attribute__((analyzer_noreturn));
+- inline void SkNO_RETURN_HINT() {}
+- }
++ inline void SkNO_RETURN_HINT() __attribute__((analyzer_noreturn));
++ inline void SkNO_RETURN_HINT() {}
+ #else
+ #define SkNO_RETURN_HINT() do {} while (false)
+ #endif
+ #endif
+
+ #if defined(SK_ZLIB_INCLUDE) && defined(SK_SYSTEM_ZLIB)
+ #error "cannot define both SK_ZLIB_INCLUDE and SK_SYSTEM_ZLIB"
+ #elif defined(SK_ZLIB_INCLUDE) || defined(SK_SYSTEM_ZLIB)
diff --git a/gfx/skia/patches/archive/0012-Bug-751418-Add-our-own-GrUserConfig-r-mattwoodrow.patch b/gfx/skia/patches/archive/0012-Bug-751418-Add-our-own-GrUserConfig-r-mattwoodrow.patch
new file mode 100644
index 000000000..cde294095
--- /dev/null
+++ b/gfx/skia/patches/archive/0012-Bug-751418-Add-our-own-GrUserConfig-r-mattwoodrow.patch
@@ -0,0 +1,29 @@
+From 4c25387e6e6cdb55f19e51631a78c3fa9b4a3c73 Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 1 Nov 2012 17:29:50 -0400
+Subject: [PATCH 2/8] Bug 751418 - Add our own GrUserConfig r=mattwoodrow
+
+---
+ gfx/skia/include/gpu/GrUserConfig.h | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/gfx/skia/include/gpu/GrUserConfig.h b/gfx/skia/include/gpu/GrUserConfig.h
+index d514486..b729ab3 100644
+--- a/gfx/skia/include/gpu/GrUserConfig.h
++++ b/gfx/skia/include/gpu/GrUserConfig.h
+@@ -64,6 +64,12 @@
+ #define GR_TEXT_SCALAR_IS_FIXED 0
+ #define GR_TEXT_SCALAR_IS_FLOAT 1
+
++/*
++ * This allows us to set a callback to be called before each GL call to ensure
++ * that our context is set correctly
++ */
++#define GR_GL_PER_GL_FUNC_CALLBACK 1
++
+ #endif
+
+
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0012-Bug-759683-make-ssse3-conditional.patch b/gfx/skia/patches/archive/0012-Bug-759683-make-ssse3-conditional.patch
new file mode 100644
index 000000000..dc780c5ec
--- /dev/null
+++ b/gfx/skia/patches/archive/0012-Bug-759683-make-ssse3-conditional.patch
@@ -0,0 +1,22 @@
+diff --git a/gfx/skia/src/opts/opts_check_SSE2.cpp b/gfx/skia/src/opts/opts_check_SSE2.cpp
+--- a/gfx/skia/src/opts/opts_check_SSE2.cpp
++++ b/gfx/skia/src/opts/opts_check_SSE2.cpp
+@@ -91,17 +91,17 @@ static bool cachedHasSSE2() {
+
+ static bool cachedHasSSSE3() {
+ static bool gHasSSSE3 = hasSSSE3();
+ return gHasSSSE3;
+ }
+
+ void SkBitmapProcState::platformProcs() {
+ if (cachedHasSSSE3()) {
+-#if !defined(SK_BUILD_FOR_ANDROID)
++#if defined(SK_BUILD_SSSE3)
+ // Disable SSSE3 optimization for Android x86
+ if (fSampleProc32 == S32_opaque_D32_filter_DX) {
+ fSampleProc32 = S32_opaque_D32_filter_DX_SSSE3;
+ } else if (fSampleProc32 == S32_alpha_D32_filter_DX) {
+ fSampleProc32 = S32_alpha_D32_filter_DX_SSSE3;
+ }
+
+ if (fSampleProc32 == S32_opaque_D32_filter_DXDY) {
diff --git a/gfx/skia/patches/archive/0013-Bug-751418-Fix-compile-error-on-gcc-in-Skia-GL-r-mat.patch b/gfx/skia/patches/archive/0013-Bug-751418-Fix-compile-error-on-gcc-in-Skia-GL-r-mat.patch
new file mode 100644
index 000000000..167e22184
--- /dev/null
+++ b/gfx/skia/patches/archive/0013-Bug-751418-Fix-compile-error-on-gcc-in-Skia-GL-r-mat.patch
@@ -0,0 +1,26 @@
+From 3d786b1f0c040205ad9ef6d4216ce06b41f7359f Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Mon, 5 Nov 2012 15:49:42 +0000
+Subject: [PATCH 3/8] Bug 751418 - Fix compile error on gcc in Skia/GL
+ r=mattwoodrow
+
+---
+ gfx/skia/src/gpu/gl/GrGLProgram.cpp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gfx/skia/src/gpu/gl/GrGLProgram.cpp b/gfx/skia/src/gpu/gl/GrGLProgram.cpp
+index 2703110..40cadc3 100644
+--- a/gfx/skia/src/gpu/gl/GrGLProgram.cpp
++++ b/gfx/skia/src/gpu/gl/GrGLProgram.cpp
+@@ -575,7 +575,7 @@ bool GrGLProgram::genProgram(const GrCustomStage** customStages) {
+ POS_ATTR_NAME);
+
+ builder.fVSCode.appendf("void main() {\n"
+- "\tvec3 pos3 = %s * vec3("POS_ATTR_NAME", 1);\n"
++ "\tvec3 pos3 = %s * vec3(" POS_ATTR_NAME ", 1);\n"
+ "\tgl_Position = vec4(pos3.xy, 0, pos3.z);\n",
+ viewMName);
+
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0013-Bug-761890-fonts.patch b/gfx/skia/patches/archive/0013-Bug-761890-fonts.patch
new file mode 100644
index 000000000..f20293d4c
--- /dev/null
+++ b/gfx/skia/patches/archive/0013-Bug-761890-fonts.patch
@@ -0,0 +1,162 @@
+# HG changeset patch
+# User Nicholas Cameron <ncameron@mozilla.com>
+# Date 1337146927 -43200
+# Node ID 310209abef2c2667e5de41dd2a1f071e8cd42821
+# Parent 93f3ca4d5707b2aae9c6ae52d5d29c2c802e7ef8
+Bug 746883; changes to the Skia library. r=gw280
+
+diff --git a/gfx/skia/include/core/SkDraw.h b/gfx/skia/include/core/SkDraw.h
+--- a/gfx/skia/include/core/SkDraw.h
++++ b/gfx/skia/include/core/SkDraw.h
+@@ -125,23 +125,24 @@ public:
+ #endif
+ };
+
+ class SkGlyphCache;
+
+ class SkTextToPathIter {
+ public:
+ SkTextToPathIter(const char text[], size_t length, const SkPaint& paint,
+- bool applyStrokeAndPathEffects);
++ bool applyStrokeAndPathEffects, bool useCanonicalTextSize = true);
+ ~SkTextToPathIter();
+
+ const SkPaint& getPaint() const { return fPaint; }
+ SkScalar getPathScale() const { return fScale; }
+
+ const SkPath* next(SkScalar* xpos); //!< returns nil when there are no more paths
++ bool nextWithWhitespace(const SkPath** path, SkScalar* xpos); //!< returns false when there are no more paths
+
+ private:
+ SkGlyphCache* fCache;
+ SkPaint fPaint;
+ SkScalar fScale;
+ SkFixed fPrevAdvance;
+ const char* fText;
+ const char* fStop;
+diff --git a/gfx/skia/src/core/SkPaint.cpp b/gfx/skia/src/core/SkPaint.cpp
+--- a/gfx/skia/src/core/SkPaint.cpp
++++ b/gfx/skia/src/core/SkPaint.cpp
+@@ -1359,30 +1359,32 @@ void SkPaint::getPosTextPath(const void*
+ const SkPoint pos[], SkPath* path) const {
+ SkASSERT(length == 0 || textData != NULL);
+
+ const char* text = (const char*)textData;
+ if (text == NULL || length == 0 || path == NULL) {
+ return;
+ }
+
+- SkTextToPathIter iter(text, length, *this, false);
++ SkTextToPathIter iter(text, length, *this, false, false);
+ SkMatrix matrix;
+ SkPoint prevPos;
+ prevPos.set(0, 0);
+
+ matrix.setScale(iter.getPathScale(), iter.getPathScale());
+ path->reset();
+
+ unsigned int i = 0;
+ const SkPath* iterPath;
+- while ((iterPath = iter.next(NULL)) != NULL) {
+- matrix.postTranslate(pos[i].fX - prevPos.fX, pos[i].fY - prevPos.fY);
+- path->addPath(*iterPath, matrix);
+- prevPos = pos[i];
++ while (iter.nextWithWhitespace(&iterPath, NULL)) {
++ if (iterPath) {
++ matrix.postTranslate(pos[i].fX - prevPos.fX, pos[i].fY - prevPos.fY);
++ path->addPath(*iterPath, matrix);
++ prevPos = pos[i];
++ }
+ i++;
+ }
+ }
+
+ static void add_flattenable(SkDescriptor* desc, uint32_t tag,
+ SkFlattenableWriteBuffer* buffer) {
+ buffer->flatten(desc->addEntry(tag, buffer->size(), NULL));
+ }
+@@ -2118,30 +2120,31 @@ const SkRect& SkPaint::doComputeFastBoun
+
+ static bool has_thick_frame(const SkPaint& paint) {
+ return paint.getStrokeWidth() > 0 &&
+ paint.getStyle() != SkPaint::kFill_Style;
+ }
+
+ SkTextToPathIter::SkTextToPathIter( const char text[], size_t length,
+ const SkPaint& paint,
+- bool applyStrokeAndPathEffects)
++ bool applyStrokeAndPathEffects,
++ bool useCanonicalTextSize)
+ : fPaint(paint) {
+ fGlyphCacheProc = paint.getMeasureCacheProc(SkPaint::kForward_TextBufferDirection,
+ true);
+
+ fPaint.setLinearText(true);
+ fPaint.setMaskFilter(NULL); // don't want this affecting our path-cache lookup
+
+ if (fPaint.getPathEffect() == NULL && !has_thick_frame(fPaint)) {
+ applyStrokeAndPathEffects = false;
+ }
+
+ // can't use our canonical size if we need to apply patheffects
+- if (fPaint.getPathEffect() == NULL) {
++ if (useCanonicalTextSize && fPaint.getPathEffect() == NULL) {
+ fPaint.setTextSize(SkIntToScalar(SkPaint::kCanonicalTextSizeForPaths));
+ fScale = paint.getTextSize() / SkPaint::kCanonicalTextSizeForPaths;
+ if (has_thick_frame(fPaint)) {
+ fPaint.setStrokeWidth(SkScalarDiv(fPaint.getStrokeWidth(), fScale));
+ }
+ } else {
+ fScale = SK_Scalar1;
+ }
+@@ -2185,30 +2188,47 @@ SkTextToPathIter::SkTextToPathIter( cons
+ fXYIndex = paint.isVerticalText() ? 1 : 0;
+ }
+
+ SkTextToPathIter::~SkTextToPathIter() {
+ SkGlyphCache::AttachCache(fCache);
+ }
+
+ const SkPath* SkTextToPathIter::next(SkScalar* xpos) {
+- while (fText < fStop) {
++ const SkPath* result;
++ while (nextWithWhitespace(&result, xpos)) {
++ if (result) {
++ if (xpos) {
++ *xpos = fXPos;
++ }
++ return result;
++ }
++ }
++ return NULL;
++}
++
++bool SkTextToPathIter::nextWithWhitespace(const SkPath** path, SkScalar* xpos) {
++ if (fText < fStop) {
+ const SkGlyph& glyph = fGlyphCacheProc(fCache, &fText);
+
+ fXPos += SkScalarMul(SkFixedToScalar(fPrevAdvance + fAutoKern.adjust(glyph)), fScale);
+ fPrevAdvance = advance(glyph, fXYIndex); // + fPaint.getTextTracking();
+
+ if (glyph.fWidth) {
+ if (xpos) {
+ *xpos = fXPos;
+ }
+- return fCache->findPath(glyph);
++ *path = fCache->findPath(glyph);
++ return true;
++ } else {
++ *path = NULL;
++ return true;
+ }
+ }
+- return NULL;
++ return false;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ bool SkPaint::nothingToDraw() const {
+ if (fLooper) {
+ return false;
+ }
diff --git a/gfx/skia/patches/archive/0014-Bug-765038-Fix-clang-build.patch b/gfx/skia/patches/archive/0014-Bug-765038-Fix-clang-build.patch
new file mode 100644
index 000000000..6cc74914d
--- /dev/null
+++ b/gfx/skia/patches/archive/0014-Bug-765038-Fix-clang-build.patch
@@ -0,0 +1,29 @@
+# HG changeset patch
+# Parent 9ded7a9f94a863dfa1f3227d3013367f51b8b522
+# User Nicholas Cameron <ncameron@mozilla.com>
+Bug 765038; fix a Clang compilation bug in Skia; r=jwatt
+
+diff --git a/gfx/skia/src/sfnt/SkOTTable_head.h b/gfx/skia/src/sfnt/SkOTTable_head.h
+--- a/gfx/skia/src/sfnt/SkOTTable_head.h
++++ b/gfx/skia/src/sfnt/SkOTTable_head.h
+@@ -109,18 +109,18 @@ struct SkOTTableHead {
+ } raw;
+ } macStyle;
+ SK_OT_USHORT lowestRecPPEM;
+ struct FontDirectionHint {
+ SK_TYPED_ENUM(Value, SK_OT_SHORT,
+ ((FullyMixedDirectionalGlyphs, SkTEndian_SwapBE16(0)))
+ ((OnlyStronglyLTR, SkTEndian_SwapBE16(1)))
+ ((StronglyLTR, SkTEndian_SwapBE16(2)))
+- ((OnlyStronglyRTL, static_cast<SK_OT_SHORT>(SkTEndian_SwapBE16(-1))))
+- ((StronglyRTL, static_cast<SK_OT_SHORT>(SkTEndian_SwapBE16(-2))))
++ ((OnlyStronglyRTL, static_cast<SK_OT_SHORT>(SkTEndian_SwapBE16(static_cast<SK_OT_USHORT>(-1)))))
++ ((StronglyRTL, static_cast<SK_OT_SHORT>(SkTEndian_SwapBE16(static_cast<SK_OT_USHORT>(-2)))))
+ SK_SEQ_END,
+ (value)SK_SEQ_END)
+ } fontDirectionHint;
+ struct IndexToLocFormat {
+ SK_TYPED_ENUM(Value, SK_OT_SHORT,
+ ((ShortOffsets, SkTEndian_SwapBE16(0)))
+ ((LongOffsets, SkTEndian_SwapBE16(1)))
+ SK_SEQ_END,
diff --git a/gfx/skia/patches/archive/0015-Bug-766017-warnings.patch b/gfx/skia/patches/archive/0015-Bug-766017-warnings.patch
new file mode 100644
index 000000000..174dcb9bc
--- /dev/null
+++ b/gfx/skia/patches/archive/0015-Bug-766017-warnings.patch
@@ -0,0 +1,865 @@
+From: David Zbarsky <dzbarsky@gmail.com>
+Bug 766017 - Fix some skia warnings r=gw280
+
+diff --git a/gfx/skia/include/utils/mac/SkCGUtils.h b/gfx/skia/include/utils/mac/SkCGUtils.h
+--- a/gfx/skia/include/utils/mac/SkCGUtils.h
++++ b/gfx/skia/include/utils/mac/SkCGUtils.h
+@@ -39,18 +39,16 @@ static inline CGImageRef SkCreateCGImage
+ /**
+ * Draw the bitmap into the specified CG context. The bitmap will be converted
+ * to a CGImage using the generic RGB colorspace. (x,y) specifies the position
+ * of the top-left corner of the bitmap. The bitmap is converted using the
+ * colorspace returned by CGColorSpaceCreateDeviceRGB()
+ */
+ void SkCGDrawBitmap(CGContextRef, const SkBitmap&, float x, float y);
+
+-bool SkPDFDocumentToBitmap(SkStream* stream, SkBitmap* output);
+-
+ /**
+ * Return a provider that wraps the specified stream. It will become an
+ * owner of the stream, so the caller must still manage its ownership.
+ *
+ * To hand-off ownership of the stream to the provider, the caller must do
+ * something like the following:
+ *
+ * SkStream* stream = new ...;
+diff --git a/gfx/skia/src/core/SkAAClip.cpp b/gfx/skia/src/core/SkAAClip.cpp
+--- a/gfx/skia/src/core/SkAAClip.cpp
++++ b/gfx/skia/src/core/SkAAClip.cpp
+@@ -246,17 +246,17 @@ static void count_left_right_zeros(const
+ zeros = 0;
+ }
+ row += 2;
+ width -= n;
+ }
+ *riteZ = zeros;
+ }
+
+-#ifdef SK_DEBUG
++#if 0
+ static void test_count_left_right_zeros() {
+ static bool gOnce;
+ if (gOnce) {
+ return;
+ }
+ gOnce = true;
+
+ const uint8_t data0[] = { 0, 0, 10, 0xFF };
+@@ -1319,22 +1319,16 @@ bool SkAAClip::setPath(const SkPath& pat
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ typedef void (*RowProc)(SkAAClip::Builder&, int bottom,
+ const uint8_t* rowA, const SkIRect& rectA,
+ const uint8_t* rowB, const SkIRect& rectB);
+
+-static void sectRowProc(SkAAClip::Builder& builder, int bottom,
+- const uint8_t* rowA, const SkIRect& rectA,
+- const uint8_t* rowB, const SkIRect& rectB) {
+-
+-}
+-
+ typedef U8CPU (*AlphaProc)(U8CPU alphaA, U8CPU alphaB);
+
+ static U8CPU sectAlphaProc(U8CPU alphaA, U8CPU alphaB) {
+ // Multiply
+ return SkMulDiv255Round(alphaA, alphaB);
+ }
+
+ static U8CPU unionAlphaProc(U8CPU alphaA, U8CPU alphaB) {
+@@ -1429,31 +1423,16 @@ private:
+ static void adjust_row(RowIter& iter, int& leftA, int& riteA, int rite) {
+ if (rite == riteA) {
+ iter.next();
+ leftA = iter.left();
+ riteA = iter.right();
+ }
+ }
+
+-static bool intersect(int& min, int& max, int boundsMin, int boundsMax) {
+- SkASSERT(min < max);
+- SkASSERT(boundsMin < boundsMax);
+- if (min >= boundsMax || max <= boundsMin) {
+- return false;
+- }
+- if (min < boundsMin) {
+- min = boundsMin;
+- }
+- if (max > boundsMax) {
+- max = boundsMax;
+- }
+- return true;
+-}
+-
+ static void operatorX(SkAAClip::Builder& builder, int lastY,
+ RowIter& iterA, RowIter& iterB,
+ AlphaProc proc, const SkIRect& bounds) {
+ int leftA = iterA.left();
+ int riteA = iterA.right();
+ int leftB = iterB.left();
+ int riteB = iterB.right();
+
+@@ -1970,34 +1949,33 @@ static void small_bzero(void* dst, size_
+ static inline uint8_t mergeOne(uint8_t value, unsigned alpha) {
+ return SkMulDiv255Round(value, alpha);
+ }
+ static inline uint16_t mergeOne(uint16_t value, unsigned alpha) {
+ unsigned r = SkGetPackedR16(value);
+ unsigned g = SkGetPackedG16(value);
+ unsigned b = SkGetPackedB16(value);
+ return SkPackRGB16(SkMulDiv255Round(r, alpha),
+- SkMulDiv255Round(r, alpha),
+- SkMulDiv255Round(r, alpha));
++ SkMulDiv255Round(g, alpha),
++ SkMulDiv255Round(b, alpha));
+ }
+ static inline SkPMColor mergeOne(SkPMColor value, unsigned alpha) {
+ unsigned a = SkGetPackedA32(value);
+ unsigned r = SkGetPackedR32(value);
+ unsigned g = SkGetPackedG32(value);
+ unsigned b = SkGetPackedB32(value);
+ return SkPackARGB32(SkMulDiv255Round(a, alpha),
+ SkMulDiv255Round(r, alpha),
+ SkMulDiv255Round(g, alpha),
+ SkMulDiv255Round(b, alpha));
+ }
+
+ template <typename T> void mergeT(const T* SK_RESTRICT src, int srcN,
+ const uint8_t* SK_RESTRICT row, int rowN,
+ T* SK_RESTRICT dst) {
+- SkDEBUGCODE(int accumulated = 0;)
+ for (;;) {
+ SkASSERT(rowN > 0);
+ SkASSERT(srcN > 0);
+
+ int n = SkMin32(rowN, srcN);
+ unsigned rowA = row[1];
+ if (0xFF == rowA) {
+ small_memcpy(dst, src, n * sizeof(T));
+diff --git a/gfx/skia/src/core/SkBlitMask_D32.cpp b/gfx/skia/src/core/SkBlitMask_D32.cpp
+--- a/gfx/skia/src/core/SkBlitMask_D32.cpp
++++ b/gfx/skia/src/core/SkBlitMask_D32.cpp
+@@ -268,107 +268,49 @@ bool SkBlitMask::BlitColor(const SkBitma
+ return true;
+ }
+ return false;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+ ///////////////////////////////////////////////////////////////////////////////
+
+-static void BW_RowProc_Blend(SkPMColor* SK_RESTRICT dst,
+- const uint8_t* SK_RESTRICT mask,
+- const SkPMColor* SK_RESTRICT src, int count) {
+- int i, octuple = (count + 7) >> 3;
+- for (i = 0; i < octuple; ++i) {
+- int m = *mask++;
+- if (m & 0x80) { dst[0] = SkPMSrcOver(src[0], dst[0]); }
+- if (m & 0x40) { dst[1] = SkPMSrcOver(src[1], dst[1]); }
+- if (m & 0x20) { dst[2] = SkPMSrcOver(src[2], dst[2]); }
+- if (m & 0x10) { dst[3] = SkPMSrcOver(src[3], dst[3]); }
+- if (m & 0x08) { dst[4] = SkPMSrcOver(src[4], dst[4]); }
+- if (m & 0x04) { dst[5] = SkPMSrcOver(src[5], dst[5]); }
+- if (m & 0x02) { dst[6] = SkPMSrcOver(src[6], dst[6]); }
+- if (m & 0x01) { dst[7] = SkPMSrcOver(src[7], dst[7]); }
+- src += 8;
+- dst += 8;
+- }
+- count &= 7;
+- if (count > 0) {
+- int m = *mask;
+- do {
+- if (m & 0x80) { dst[0] = SkPMSrcOver(src[0], dst[0]); }
+- m <<= 1;
+- src += 1;
+- dst += 1;
+- } while (--count > 0);
+- }
+-}
+-
+-static void BW_RowProc_Opaque(SkPMColor* SK_RESTRICT dst,
+- const uint8_t* SK_RESTRICT mask,
+- const SkPMColor* SK_RESTRICT src, int count) {
+- int i, octuple = (count + 7) >> 3;
+- for (i = 0; i < octuple; ++i) {
+- int m = *mask++;
+- if (m & 0x80) { dst[0] = src[0]; }
+- if (m & 0x40) { dst[1] = src[1]; }
+- if (m & 0x20) { dst[2] = src[2]; }
+- if (m & 0x10) { dst[3] = src[3]; }
+- if (m & 0x08) { dst[4] = src[4]; }
+- if (m & 0x04) { dst[5] = src[5]; }
+- if (m & 0x02) { dst[6] = src[6]; }
+- if (m & 0x01) { dst[7] = src[7]; }
+- src += 8;
+- dst += 8;
+- }
+- count &= 7;
+- if (count > 0) {
+- int m = *mask;
+- do {
+- if (m & 0x80) { dst[0] = SkPMSrcOver(src[0], dst[0]); }
+- m <<= 1;
+- src += 1;
+- dst += 1;
+- } while (--count > 0);
+- }
+-}
+-
+ static void A8_RowProc_Blend(SkPMColor* SK_RESTRICT dst,
+ const uint8_t* SK_RESTRICT mask,
+ const SkPMColor* SK_RESTRICT src, int count) {
+ for (int i = 0; i < count; ++i) {
+ if (mask[i]) {
+ dst[i] = SkBlendARGB32(src[i], dst[i], mask[i]);
+ }
+ }
+ }
+
+ // expand the steps that SkAlphaMulQ performs, but this way we can
+-// exand.. add.. combine
++// expand.. add.. combine
+ // instead of
+ // expand..combine add expand..combine
+ //
+ #define EXPAND0(v, m, s) ((v) & (m)) * (s)
+ #define EXPAND1(v, m, s) (((v) >> 8) & (m)) * (s)
+ #define COMBINE(e0, e1, m) ((((e0) >> 8) & (m)) | ((e1) & ~(m)))
+
+ static void A8_RowProc_Opaque(SkPMColor* SK_RESTRICT dst,
+ const uint8_t* SK_RESTRICT mask,
+ const SkPMColor* SK_RESTRICT src, int count) {
+- const uint32_t rbmask = gMask_00FF00FF;
+ for (int i = 0; i < count; ++i) {
+ int m = mask[i];
+ if (m) {
+ m += (m >> 7);
+ #if 1
+ // this is slightly slower than the expand/combine version, but it
+ // is much closer to the old results, so we use it for now to reduce
+ // rebaselining.
+ dst[i] = SkAlphaMulQ(src[i], m) + SkAlphaMulQ(dst[i], 256 - m);
+ #else
++ const uint32_t rbmask = gMask_00FF00FF;
+ uint32_t v = src[i];
+ uint32_t s0 = EXPAND0(v, rbmask, m);
+ uint32_t s1 = EXPAND1(v, rbmask, m);
+ v = dst[i];
+ uint32_t d0 = EXPAND0(v, rbmask, m);
+ uint32_t d1 = EXPAND1(v, rbmask, m);
+ dst[i] = COMBINE(s0 + d0, s1 + d1, rbmask);
+ #endif
+@@ -559,17 +501,17 @@ SkBlitMask::RowProc SkBlitMask::RowFacto
+ // make this opt-in until chrome can rebaseline
+ RowProc proc = PlatformRowProcs(config, format, flags);
+ if (proc) {
+ return proc;
+ }
+
+ static const RowProc gProcs[] = {
+ // need X coordinate to handle BW
+- NULL, NULL, //(RowProc)BW_RowProc_Blend, (RowProc)BW_RowProc_Opaque,
++ NULL, NULL,
+ (RowProc)A8_RowProc_Blend, (RowProc)A8_RowProc_Opaque,
+ (RowProc)LCD16_RowProc_Blend, (RowProc)LCD16_RowProc_Opaque,
+ (RowProc)LCD32_RowProc_Blend, (RowProc)LCD32_RowProc_Opaque,
+ };
+
+ int index;
+ switch (config) {
+ case SkBitmap::kARGB_8888_Config:
+diff --git a/gfx/skia/src/core/SkConcaveToTriangles.cpp b/gfx/skia/src/core/SkConcaveToTriangles.cpp
+--- a/gfx/skia/src/core/SkConcaveToTriangles.cpp
++++ b/gfx/skia/src/core/SkConcaveToTriangles.cpp
+@@ -37,17 +37,16 @@
+ #include "SkTDArray.h"
+ #include "SkGeometry.h"
+ #include "SkTSort.h"
+
+ // This is used to prevent runaway code bugs, and can probably be removed after
+ // the code has been proven robust.
+ #define kMaxCount 1000
+
+-#define DEBUG
+ #ifdef DEBUG
+ //------------------------------------------------------------------------------
+ // Debugging support
+ //------------------------------------------------------------------------------
+
+ #include <cstdio>
+ #include <stdarg.h>
+
+diff --git a/gfx/skia/src/core/SkPath.cpp b/gfx/skia/src/core/SkPath.cpp
+--- a/gfx/skia/src/core/SkPath.cpp
++++ b/gfx/skia/src/core/SkPath.cpp
+@@ -469,17 +469,16 @@ void SkPath::incReserve(U16CPU inc) {
+ fPts.setReserve(fPts.count() + inc);
+
+ SkDEBUGCODE(this->validate();)
+ }
+
+ void SkPath::moveTo(SkScalar x, SkScalar y) {
+ SkDEBUGCODE(this->validate();)
+
+- int vc = fVerbs.count();
+ SkPoint* pt;
+
+ // remember our index
+ fLastMoveToIndex = fPts.count();
+
+ pt = fPts.append();
+ *fVerbs.append() = kMove_Verb;
+ pt->set(x, y);
+@@ -1163,17 +1162,16 @@ void SkPath::reversePathTo(const SkPath&
+ }
+ pts -= gPtsInVerb[verbs[i]];
+ }
+ }
+
+ void SkPath::reverseAddPath(const SkPath& src) {
+ this->incReserve(src.fPts.count());
+
+- const SkPoint* startPts = src.fPts.begin();
+ const SkPoint* pts = src.fPts.end();
+ const uint8_t* startVerbs = src.fVerbs.begin();
+ const uint8_t* verbs = src.fVerbs.end();
+
+ fIsOval = false;
+
+ bool needMove = true;
+ bool needClose = false;
+diff --git a/gfx/skia/src/core/SkRegion.cpp b/gfx/skia/src/core/SkRegion.cpp
+--- a/gfx/skia/src/core/SkRegion.cpp
++++ b/gfx/skia/src/core/SkRegion.cpp
+@@ -920,20 +920,16 @@ static int operate(const SkRegion::RunTy
+ /* Given count RunTypes in a complex region, return the worst case number of
+ logical intervals that represents (i.e. number of rects that would be
+ returned from the iterator).
+
+ We could just return count/2, since there must be at least 2 values per
+ interval, but we can first trim off the const overhead of the initial TOP
+ value, plus the final BOTTOM + 2 sentinels.
+ */
+-static int count_to_intervals(int count) {
+- SkASSERT(count >= 6); // a single rect is 6 values
+- return (count - 4) >> 1;
+-}
+
+ /* Given a number of intervals, what is the worst case representation of that
+ many intervals?
+
+ Worst case (from a storage perspective), is a vertical stack of single
+ intervals: TOP + N * (BOTTOM INTERVALCOUNT LEFT RIGHT SENTINEL) + SENTINEL
+ */
+ static int intervals_to_count(int intervals) {
+diff --git a/gfx/skia/src/core/SkScalerContext.cpp b/gfx/skia/src/core/SkScalerContext.cpp
+--- a/gfx/skia/src/core/SkScalerContext.cpp
++++ b/gfx/skia/src/core/SkScalerContext.cpp
+@@ -336,44 +336,16 @@ SK_ERROR:
+ glyph->fTop = 0;
+ glyph->fWidth = 0;
+ glyph->fHeight = 0;
+ // put a valid value here, in case it was earlier set to
+ // MASK_FORMAT_JUST_ADVANCE
+ glyph->fMaskFormat = fRec.fMaskFormat;
+ }
+
+-static bool isLCD(const SkScalerContext::Rec& rec) {
+- return SkMask::kLCD16_Format == rec.fMaskFormat ||
+- SkMask::kLCD32_Format == rec.fMaskFormat;
+-}
+-
+-static uint16_t a8_to_rgb565(unsigned a8) {
+- return SkPackRGB16(a8 >> 3, a8 >> 2, a8 >> 3);
+-}
+-
+-static void copyToLCD16(const SkBitmap& src, const SkMask& dst) {
+- SkASSERT(SkBitmap::kA8_Config == src.config());
+- SkASSERT(SkMask::kLCD16_Format == dst.fFormat);
+-
+- const int width = dst.fBounds.width();
+- const int height = dst.fBounds.height();
+- const uint8_t* srcP = src.getAddr8(0, 0);
+- size_t srcRB = src.rowBytes();
+- uint16_t* dstP = (uint16_t*)dst.fImage;
+- size_t dstRB = dst.fRowBytes;
+- for (int y = 0; y < height; ++y) {
+- for (int x = 0; x < width; ++x) {
+- dstP[x] = a8_to_rgb565(srcP[x]);
+- }
+- srcP += srcRB;
+- dstP = (uint16_t*)((char*)dstP + dstRB);
+- }
+-}
+-
+ #define SK_FREETYPE_LCD_LERP 160
+
+ static int lerp(int start, int end) {
+ SkASSERT((unsigned)SK_FREETYPE_LCD_LERP <= 256);
+ return start + ((end - start) * (SK_FREETYPE_LCD_LERP) >> 8);
+ }
+
+ static uint16_t packLCD16(unsigned r, unsigned g, unsigned b) {
+diff --git a/gfx/skia/src/core/SkScan_AntiPath.cpp b/gfx/skia/src/core/SkScan_AntiPath.cpp
+--- a/gfx/skia/src/core/SkScan_AntiPath.cpp
++++ b/gfx/skia/src/core/SkScan_AntiPath.cpp
+@@ -230,52 +230,16 @@ void SuperBlitter::blitH(int x, int y, i
+ fOffsetX);
+
+ #ifdef SK_DEBUG
+ fRuns.assertValid(y & MASK, (1 << (8 - SHIFT)));
+ fCurrX = x + width;
+ #endif
+ }
+
+-static void set_left_rite_runs(SkAlphaRuns& runs, int ileft, U8CPU leftA,
+- int n, U8CPU riteA) {
+- SkASSERT(leftA <= 0xFF);
+- SkASSERT(riteA <= 0xFF);
+-
+- int16_t* run = runs.fRuns;
+- uint8_t* aa = runs.fAlpha;
+-
+- if (ileft > 0) {
+- run[0] = ileft;
+- aa[0] = 0;
+- run += ileft;
+- aa += ileft;
+- }
+-
+- SkASSERT(leftA < 0xFF);
+- if (leftA > 0) {
+- *run++ = 1;
+- *aa++ = leftA;
+- }
+-
+- if (n > 0) {
+- run[0] = n;
+- aa[0] = 0xFF;
+- run += n;
+- aa += n;
+- }
+-
+- SkASSERT(riteA < 0xFF);
+- if (riteA > 0) {
+- *run++ = 1;
+- *aa++ = riteA;
+- }
+- run[0] = 0;
+-}
+-
+ void SuperBlitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(width > 0);
+ SkASSERT(height > 0);
+
+ // blit leading rows
+ while ((y & MASK)) {
+ this->blitH(x, y++, width);
+ if (--height <= 0) {
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -865,45 +865,16 @@ bool Linear_Gradient::setContext(const S
+ } while (0)
+
+ namespace {
+
+ typedef void (*LinearShadeProc)(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* dstC, const SkPMColor* cache,
+ int toggle, int count);
+
+-// This function is deprecated, and will be replaced by
+-// shadeSpan_linear_vertical_lerp() once Chrome has been weaned off of it.
+-void shadeSpan_linear_vertical(TileProc proc, SkFixed dx, SkFixed fx,
+- SkPMColor* SK_RESTRICT dstC,
+- const SkPMColor* SK_RESTRICT cache,
+- int toggle, int count) {
+- if (proc == clamp_tileproc) {
+- // Read out clamp values from beginning/end of the cache. No need to lerp
+- // or dither
+- if (fx < 0) {
+- sk_memset32(dstC, cache[-1], count);
+- return;
+- } else if (fx > 0xFFFF) {
+- sk_memset32(dstC, cache[Gradient_Shader::kCache32Count * 2], count);
+- return;
+- }
+- }
+-
+- // We're a vertical gradient, so no change in a span.
+- // If colors change sharply across the gradient, dithering is
+- // insufficient (it subsamples the color space) and we need to lerp.
+- unsigned fullIndex = proc(fx);
+- unsigned fi = fullIndex >> (16 - Gradient_Shader::kCache32Bits);
+- sk_memset32_dither(dstC,
+- cache[toggle + fi],
+- cache[(toggle ^ Gradient_Shader::kDitherStride32) + fi],
+- count);
+-}
+-
+ // Linear interpolation (lerp) is unnecessary if there are no sharp
+ // discontinuities in the gradient - which must be true if there are
+ // only 2 colors - but it's cheap.
+ void shadeSpan_linear_vertical_lerp(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
+ if (proc == clamp_tileproc) {
+@@ -2131,16 +2102,18 @@ protected:
+ buffer.writePoint(fCenter);
+ }
+
+ private:
+ typedef Gradient_Shader INHERITED;
+ const SkPoint fCenter;
+ };
+
++#ifndef SK_SCALAR_IS_FLOAT
++
+ #ifdef COMPUTE_SWEEP_TABLE
+ #define PI 3.14159265
+ static bool gSweepTableReady;
+ static uint8_t gSweepTable[65];
+
+ /* Our table stores precomputed values for atan: [0...1] -> [0..PI/4]
+ We scale the results to [0..32]
+ */
+@@ -2168,20 +2141,23 @@ static const uint8_t gSweepTable[] = {
+ 10, 11, 11, 12, 12, 13, 13, 14, 15, 15, 16, 16, 17, 17, 18, 18,
+ 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 25, 26,
+ 26, 27, 27, 27, 28, 28, 29, 29, 29, 30, 30, 30, 31, 31, 31, 32,
+ 32
+ };
+ static const uint8_t* build_sweep_table() { return gSweepTable; }
+ #endif
+
++#endif
++
+ // divide numer/denom, with a bias of 6bits. Assumes numer <= denom
+ // and denom != 0. Since our table is 6bits big (+1), this is a nice fit.
+ // Same as (but faster than) SkFixedDiv(numer, denom) >> 10
+
++#ifndef SK_SCALAR_IS_FLOAT
+ //unsigned div_64(int numer, int denom);
+ static unsigned div_64(int numer, int denom) {
+ SkASSERT(numer <= denom);
+ SkASSERT(numer > 0);
+ SkASSERT(denom > 0);
+
+ int nbits = SkCLZ(numer);
+ int dbits = SkCLZ(denom);
+@@ -2294,16 +2270,17 @@ static unsigned atan_0_90(SkFixed y, SkF
+ result = 64 - result;
+ // pin to 63
+ result -= result >> 6;
+ }
+
+ SkASSERT(result <= 63);
+ return result;
+ }
++#endif
+
+ // returns angle in a circle [0..2PI) -> [0..255]
+ #ifdef SK_SCALAR_IS_FLOAT
+ static unsigned SkATan2_255(float y, float x) {
+ // static const float g255Over2PI = 255 / (2 * SK_ScalarPI);
+ static const float g255Over2PI = 40.584510488433314f;
+
+ float result = sk_float_atan2(y, x);
+diff --git a/gfx/skia/src/opts/SkBlitRect_opts_SSE2.cpp b/gfx/skia/src/opts/SkBlitRect_opts_SSE2.cpp
+--- a/gfx/skia/src/opts/SkBlitRect_opts_SSE2.cpp
++++ b/gfx/skia/src/opts/SkBlitRect_opts_SSE2.cpp
+@@ -112,17 +112,17 @@ void BlitRect32_OpaqueWide_SSE2(SkPMColo
+ }
+
+ void ColorRect32_SSE2(SkPMColor* destination,
+ int width, int height,
+ size_t rowBytes, uint32_t color) {
+ if (0 == height || 0 == width || 0 == color) {
+ return;
+ }
+- unsigned colorA = SkGetPackedA32(color);
++ //unsigned colorA = SkGetPackedA32(color);
+ //if (255 == colorA) {
+ //if (width < 31) {
+ //BlitRect32_OpaqueNarrow_SSE2(destination, width, height,
+ //rowBytes, color);
+ //} else {
+ //BlitRect32_OpaqueWide_SSE2(destination, width, height,
+ //rowBytes, color);
+ //}
+diff --git a/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp b/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp
+--- a/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp
++++ b/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp
+@@ -75,20 +75,16 @@ static CGFloat CGRectGetMinY_inline(cons
+ static CGFloat CGRectGetMaxY_inline(const CGRect& rect) {
+ return rect.origin.y + rect.size.height;
+ }
+
+ static CGFloat CGRectGetWidth_inline(const CGRect& rect) {
+ return rect.size.width;
+ }
+
+-static CGFloat CGRectGetHeight(const CGRect& rect) {
+- return rect.size.height;
+-}
+-
+ ///////////////////////////////////////////////////////////////////////////////
+
+ static void sk_memset_rect32(uint32_t* ptr, uint32_t value, size_t width,
+ size_t height, size_t rowBytes) {
+ SkASSERT(width);
+ SkASSERT(width * sizeof(uint32_t) <= rowBytes);
+
+ if (width >= 32) {
+@@ -125,28 +121,30 @@ static void sk_memset_rect32(uint32_t* p
+ *ptr++ = value;
+ } while (--w > 0);
+ ptr = (uint32_t*)((char*)ptr + rowBytes);
+ height -= 1;
+ }
+ }
+ }
+
++#if 0
+ // Potentially this should be made (1) public (2) optimized when width is small.
+ // Also might want 16 and 32 bit version
+ //
+ static void sk_memset_rect(void* ptr, U8CPU byte, size_t width, size_t height,
+ size_t rowBytes) {
+ uint8_t* dst = (uint8_t*)ptr;
+ while (height) {
+ memset(dst, byte, width);
+ dst += rowBytes;
+ height -= 1;
+ }
+ }
++#endif
+
+ #include <sys/utsname.h>
+
+ typedef uint32_t CGRGBPixel;
+
+ static unsigned CGRGBPixel_getAlpha(CGRGBPixel pixel) {
+ return pixel & 0xFF;
+ }
+@@ -250,23 +248,16 @@ static CGAffineTransform MatrixToCGAffin
+ return CGAffineTransformMake(ScalarToCG(matrix[SkMatrix::kMScaleX]) * sx,
+ -ScalarToCG(matrix[SkMatrix::kMSkewY]) * sy,
+ -ScalarToCG(matrix[SkMatrix::kMSkewX]) * sx,
+ ScalarToCG(matrix[SkMatrix::kMScaleY]) * sy,
+ ScalarToCG(matrix[SkMatrix::kMTransX]) * sx,
+ ScalarToCG(matrix[SkMatrix::kMTransY]) * sy);
+ }
+
+-static void CGAffineTransformToMatrix(const CGAffineTransform& xform, SkMatrix* matrix) {
+- matrix->setAll(
+- CGToScalar(xform.a), CGToScalar(xform.c), CGToScalar(xform.tx),
+- CGToScalar(xform.b), CGToScalar(xform.d), CGToScalar(xform.ty),
+- 0, 0, SK_Scalar1);
+-}
+-
+ static SkScalar getFontScale(CGFontRef cgFont) {
+ int unitsPerEm = CGFontGetUnitsPerEm(cgFont);
+ return SkScalarInvert(SkIntToScalar(unitsPerEm));
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ #define BITMAP_INFO_RGB (kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Host)
+@@ -1075,16 +1066,17 @@ static const uint8_t* getInverseTable(bo
+ if (!gInited) {
+ build_power_table(gWhiteTable, 1.5f);
+ build_power_table(gTable, 2.2f);
+ gInited = true;
+ }
+ return isWhite ? gWhiteTable : gTable;
+ }
+
++#ifdef SK_USE_COLOR_LUMINANCE
+ static const uint8_t* getGammaTable(U8CPU luminance) {
+ static uint8_t gGammaTables[4][256];
+ static bool gInited;
+ if (!gInited) {
+ #if 1
+ float start = 1.1;
+ float stop = 2.1;
+ for (int i = 0; i < 4; ++i) {
+@@ -1097,45 +1089,49 @@ static const uint8_t* getGammaTable(U8CP
+ build_power_table(gGammaTables[2], 1);
+ build_power_table(gGammaTables[3], 1);
+ #endif
+ gInited = true;
+ }
+ SkASSERT(0 == (luminance >> 8));
+ return gGammaTables[luminance >> 6];
+ }
++#endif
+
++#ifndef SK_USE_COLOR_LUMINANCE
+ static void invertGammaMask(bool isWhite, CGRGBPixel rgb[], int width,
+ int height, size_t rb) {
+ const uint8_t* table = getInverseTable(isWhite);
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ uint32_t c = rgb[x];
+ int r = (c >> 16) & 0xFF;
+ int g = (c >> 8) & 0xFF;
+ int b = (c >> 0) & 0xFF;
+ rgb[x] = (table[r] << 16) | (table[g] << 8) | table[b];
+ }
+ rgb = (CGRGBPixel*)((char*)rgb + rb);
+ }
+ }
++#endif
+
+ static void cgpixels_to_bits(uint8_t dst[], const CGRGBPixel src[], int count) {
+ while (count > 0) {
+ uint8_t mask = 0;
+ for (int i = 7; i >= 0; --i) {
+ mask |= (CGRGBPixel_getAlpha(*src++) >> 7) << i;
+ if (0 == --count) {
+ break;
+ }
+ }
+ *dst++ = mask;
+ }
+ }
+
++#ifdef SK_USE_COLOR_LUMINANCE
+ static int lerpScale(int dst, int src, int scale) {
+ return dst + (scale * (src - dst) >> 23);
+ }
+
+ static CGRGBPixel lerpPixel(CGRGBPixel dst, CGRGBPixel src,
+ int scaleR, int scaleG, int scaleB) {
+ int sr = (src >> 16) & 0xFF;
+ int sg = (src >> 8) & 0xFF;
+@@ -1147,37 +1143,31 @@ static CGRGBPixel lerpPixel(CGRGBPixel d
+ int rr = lerpScale(dr, sr, scaleR);
+ int rg = lerpScale(dg, sg, scaleG);
+ int rb = lerpScale(db, sb, scaleB);
+ return (rr << 16) | (rg << 8) | rb;
+ }
+
+ static void lerpPixels(CGRGBPixel dst[], const CGRGBPixel src[], int width,
+ int height, int rowBytes, int lumBits) {
+-#ifdef SK_USE_COLOR_LUMINANCE
+ int scaleR = (1 << 23) * SkColorGetR(lumBits) / 0xFF;
+ int scaleG = (1 << 23) * SkColorGetG(lumBits) / 0xFF;
+ int scaleB = (1 << 23) * SkColorGetB(lumBits) / 0xFF;
+-#else
+- int scale = (1 << 23) * lumBits / SkScalerContext::kLuminance_Max;
+- int scaleR = scale;
+- int scaleG = scale;
+- int scaleB = scale;
+-#endif
+
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ // bit-not the src, since it was drawn from black, so we need the
+ // compliment of those bits
+ dst[x] = lerpPixel(dst[x], ~src[x], scaleR, scaleG, scaleB);
+ }
+ src = (CGRGBPixel*)((char*)src + rowBytes);
+ dst = (CGRGBPixel*)((char*)dst + rowBytes);
+ }
+ }
++#endif
+
+ #if 1
+ static inline int r32_to_16(int x) { return SkR32ToR16(x); }
+ static inline int g32_to_16(int x) { return SkG32ToG16(x); }
+ static inline int b32_to_16(int x) { return SkB32ToB16(x); }
+ #else
+ static inline int round8to5(int x) {
+ return (x + 3 - (x >> 5) + (x >> 7)) >> 3;
+@@ -1212,22 +1202,21 @@ static inline uint32_t rgb_to_lcd32(CGRG
+ return SkPackARGB32(0xFF, r, g, b);
+ }
+
+ #define BLACK_LUMINANCE_LIMIT 0x40
+ #define WHITE_LUMINANCE_LIMIT 0xA0
+
+ void SkScalerContext_Mac::generateImage(const SkGlyph& glyph) {
+ CGGlyph cgGlyph = (CGGlyph) glyph.getGlyphID(fBaseGlyphCount);
+-
+ const bool isLCD = isLCDFormat(glyph.fMaskFormat);
++#ifdef SK_USE_COLOR_LUMINANCE
+ const bool isBW = SkMask::kBW_Format == glyph.fMaskFormat;
+ const bool isA8 = !isLCD && !isBW;
+-
+-#ifdef SK_USE_COLOR_LUMINANCE
++
+ unsigned lumBits = fRec.getLuminanceColor();
+ uint32_t xorMask = 0;
+
+ if (isA8) {
+ // for A8, we just want a component (they're all the same)
+ lumBits = SkColorGetR(lumBits);
+ }
+ #else
+diff --git a/gfx/skia/src/utils/mac/SkCreateCGImageRef.cpp b/gfx/skia/src/utils/mac/SkCreateCGImageRef.cpp
+--- a/gfx/skia/src/utils/mac/SkCreateCGImageRef.cpp
++++ b/gfx/skia/src/utils/mac/SkCreateCGImageRef.cpp
+@@ -163,59 +163,8 @@ private:
+ CGPDFDocumentRef fDoc;
+ };
+
+ static void CGDataProviderReleaseData_FromMalloc(void*, const void* data,
+ size_t size) {
+ sk_free((void*)data);
+ }
+
+-bool SkPDFDocumentToBitmap(SkStream* stream, SkBitmap* output) {
+- size_t size = stream->getLength();
+- void* ptr = sk_malloc_throw(size);
+- stream->read(ptr, size);
+- CGDataProviderRef data = CGDataProviderCreateWithData(NULL, ptr, size,
+- CGDataProviderReleaseData_FromMalloc);
+- if (NULL == data) {
+- return false;
+- }
+-
+- CGPDFDocumentRef pdf = CGPDFDocumentCreateWithProvider(data);
+- CGDataProviderRelease(data);
+- if (NULL == pdf) {
+- return false;
+- }
+- SkAutoPDFRelease releaseMe(pdf);
+-
+- CGPDFPageRef page = CGPDFDocumentGetPage(pdf, 1);
+- if (NULL == page) {
+- return false;
+- }
+-
+- CGRect bounds = CGPDFPageGetBoxRect(page, kCGPDFMediaBox);
+-
+- int w = (int)CGRectGetWidth(bounds);
+- int h = (int)CGRectGetHeight(bounds);
+-
+- SkBitmap bitmap;
+- bitmap.setConfig(SkBitmap::kARGB_8888_Config, w, h);
+- bitmap.allocPixels();
+- bitmap.eraseColor(SK_ColorWHITE);
+-
+- size_t bitsPerComponent;
+- CGBitmapInfo info;
+- getBitmapInfo(bitmap, &bitsPerComponent, &info, NULL);
+-
+- CGColorSpaceRef cs = CGColorSpaceCreateDeviceRGB();
+- CGContextRef ctx = CGBitmapContextCreate(bitmap.getPixels(), w, h,
+- bitsPerComponent, bitmap.rowBytes(),
+- cs, info);
+- CGColorSpaceRelease(cs);
+-
+- if (ctx) {
+- CGContextDrawPDFPage(ctx, page);
+- CGContextRelease(ctx);
+- }
+-
+- output->swap(bitmap);
+- return true;
+-}
+-
diff --git a/gfx/skia/patches/archive/0016-Bug-718849-Radial-gradients.patch b/gfx/skia/patches/archive/0016-Bug-718849-Radial-gradients.patch
new file mode 100644
index 000000000..e00fd8602
--- /dev/null
+++ b/gfx/skia/patches/archive/0016-Bug-718849-Radial-gradients.patch
@@ -0,0 +1,400 @@
+# HG changeset patch
+# User Matt Woodrow <mwoodrow@mozilla.com>
+# Date 1339988782 -43200
+# Node ID 1e9dae659ee6c992f719fd4136efbcc5410ded37
+# Parent 946750f6d95febd199fb7b748e9d2c48fd01c8a6
+[mq]: skia-windows-gradients
+
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -847,16 +847,19 @@ bool Linear_Gradient::setContext(const S
+ fFlags |= SkShader::kConstInY32_Flag;
+ if ((fFlags & SkShader::kHasSpan16_Flag) && !paint.isDither()) {
+ // only claim this if we do have a 16bit mode (i.e. none of our
+ // colors have alpha), and if we are not dithering (which obviously
+ // is not const in Y).
+ fFlags |= SkShader::kConstInY16_Flag;
+ }
+ }
++ if (fStart == fEnd) {
++ fFlags &= ~kOpaqueAlpha_Flag;
++ }
+ return true;
+ }
+
+ #define NO_CHECK_ITER \
+ do { \
+ unsigned fi = fx >> Gradient_Shader::kCache32Shift; \
+ SkASSERT(fi <= 0xFF); \
+ fx += dx; \
+@@ -976,16 +979,21 @@ void Linear_Gradient::shadeSpan(int x, i
+ TileProc proc = fTileProc;
+ const SkPMColor* SK_RESTRICT cache = this->getCache32();
+ #ifdef USE_DITHER_32BIT_GRADIENT
+ int toggle = ((x ^ y) & 1) * kDitherStride32;
+ #else
+ int toggle = 0;
+ #endif
+
++ if (fStart == fEnd) {
++ sk_bzero(dstC, count * sizeof(*dstC));
++ return;
++ }
++
+ if (fDstToIndexClass != kPerspective_MatrixClass) {
+ dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
+ SkFixed dx, fx = SkScalarToFixed(srcPt.fX);
+
+ if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
+ SkFixed dxStorage[1];
+ (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), dxStorage, NULL);
+@@ -1169,16 +1177,21 @@ void Linear_Gradient::shadeSpan16(int x,
+ SkASSERT(count > 0);
+
+ SkPoint srcPt;
+ SkMatrix::MapXYProc dstProc = fDstToIndexProc;
+ TileProc proc = fTileProc;
+ const uint16_t* SK_RESTRICT cache = this->getCache16();
+ int toggle = ((x ^ y) & 1) * kDitherStride16;
+
++ if (fStart == fEnd) {
++ sk_bzero(dstC, count * sizeof(*dstC));
++ return;
++ }
++
+ if (fDstToIndexClass != kPerspective_MatrixClass) {
+ dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
+ SkFixed dx, fx = SkScalarToFixed(srcPt.fX);
+
+ if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
+ SkFixed dxStorage[1];
+ (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), dxStorage, NULL);
+@@ -1739,21 +1752,25 @@ void Radial_Gradient::shadeSpan(int x, i
+ possible circles on which the point may fall. Solving for t yields
+ the gradient value to use.
+
+ If a<0, the start circle is entirely contained in the
+ end circle, and one of the roots will be <0 or >1 (off the line
+ segment). If a>0, the start circle falls at least partially
+ outside the end circle (or vice versa), and the gradient
+ defines a "tube" where a point may be on one circle (on the
+- inside of the tube) or the other (outside of the tube). We choose
+- one arbitrarily.
++ inside of the tube) or the other (outside of the tube). We choose
++ the one with the highest t value, as long as the radius that it
++ corresponds to is >=0. In the case where neither root has a positive
++ radius, we don't draw anything.
+
++ XXXmattwoodrow: I've removed this for now since it breaks
++ down when Dr == 0. Is there something else we can do instead?
+ In order to keep the math to within the limits of fixed point,
+- we divide the entire quadratic by Dr^2, and replace
++ we divide the entire quadratic by Dr, and replace
+ (x - Sx)/Dr with x' and (y - Sy)/Dr with y', giving
+
+ [Dx^2 / Dr^2 + Dy^2 / Dr^2 - 1)] * t^2
+ + 2 * [x' * Dx / Dr + y' * Dy / Dr - Sr / Dr] * t
+ + [x'^2 + y'^2 - Sr^2/Dr^2] = 0
+
+ (x' and y' are computed by appending the subtract and scale to the
+ fDstToIndex matrix in the constructor).
+@@ -1763,99 +1780,122 @@ void Radial_Gradient::shadeSpan(int x, i
+ x' and y', if x and y are linear in the span, 'B' can be computed
+ incrementally with a simple delta (db below). If it is not (e.g.,
+ a perspective projection), it must be computed in the loop.
+
+ */
+
+ namespace {
+
+-inline SkFixed two_point_radial(SkScalar b, SkScalar fx, SkScalar fy,
+- SkScalar sr2d2, SkScalar foura,
+- SkScalar oneOverTwoA, bool posRoot) {
++inline bool two_point_radial(SkScalar b, SkScalar fx, SkScalar fy,
++ SkScalar sr2d2, SkScalar foura,
++ SkScalar oneOverTwoA, SkScalar diffRadius,
++ SkScalar startRadius, SkFixed& t) {
+ SkScalar c = SkScalarSquare(fx) + SkScalarSquare(fy) - sr2d2;
+ if (0 == foura) {
+- return SkScalarToFixed(SkScalarDiv(-c, b));
++ SkScalar result = SkScalarDiv(-c, b);
++ if (result * diffRadius + startRadius >= 0) {
++ t = SkScalarToFixed(result);
++ return true;
++ }
++ return false;
+ }
+
+ SkScalar discrim = SkScalarSquare(b) - SkScalarMul(foura, c);
+ if (discrim < 0) {
+- discrim = -discrim;
++ return false;
+ }
+ SkScalar rootDiscrim = SkScalarSqrt(discrim);
+- SkScalar result;
+- if (posRoot) {
+- result = SkScalarMul(-b + rootDiscrim, oneOverTwoA);
+- } else {
+- result = SkScalarMul(-b - rootDiscrim, oneOverTwoA);
++
++ // Make sure the results corresponds to a positive radius.
++ SkScalar result = SkScalarMul(-b + rootDiscrim, oneOverTwoA);
++ if (result * diffRadius + startRadius >= 0) {
++ t = SkScalarToFixed(result);
++ return true;
+ }
+- return SkScalarToFixed(result);
++ result = SkScalarMul(-b - rootDiscrim, oneOverTwoA);
++ if (result * diffRadius + startRadius >= 0) {
++ t = SkScalarToFixed(result);
++ return true;
++ }
++
++ return false;
+ }
+
+ typedef void (* TwoPointRadialShadeProc)(SkScalar fx, SkScalar dx,
+ SkScalar fy, SkScalar dy,
+ SkScalar b, SkScalar db,
+- SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA, bool posRoot,
++ SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA,
++ SkScalar fDiffRadius, SkScalar fRadius1,
+ SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
+ int count);
+
+ void shadeSpan_twopoint_clamp(SkScalar fx, SkScalar dx,
+ SkScalar fy, SkScalar dy,
+ SkScalar b, SkScalar db,
+- SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA, bool posRoot,
++ SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA,
++ SkScalar fDiffRadius, SkScalar fRadius1,
+ SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
+ int count) {
+ for (; count > 0; --count) {
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+- fOneOverTwoA, posRoot);
+-
+- if (t < 0) {
++ SkFixed t;
++ if (!two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, fDiffRadius, fRadius1, t)) {
++ *(dstC++) = 0;
++ } else if (t < 0) {
+ *dstC++ = cache[-1];
+ } else if (t > 0xFFFF) {
+ *dstC++ = cache[Gradient_Shader::kCache32Count * 2];
+ } else {
+ SkASSERT(t <= 0xFFFF);
+ *dstC++ = cache[t >> Gradient_Shader::kCache32Shift];
+ }
+
+ fx += dx;
+ fy += dy;
+ b += db;
+ }
+ }
+ void shadeSpan_twopoint_mirror(SkScalar fx, SkScalar dx,
+ SkScalar fy, SkScalar dy,
+ SkScalar b, SkScalar db,
+- SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA, bool posRoot,
++ SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA,
++ SkScalar fDiffRadius, SkScalar fRadius1,
+ SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
+ int count) {
+ for (; count > 0; --count) {
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+- fOneOverTwoA, posRoot);
+- SkFixed index = mirror_tileproc(t);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> Gradient_Shader::kCache32Shift];
++ SkFixed t;
++ if (!two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, fDiffRadius, fRadius1, t)) {
++ *(dstC++) = 0;
++ } else {
++ SkFixed index = mirror_tileproc(t);
++ SkASSERT(index <= 0xFFFF);
++ *dstC++ = cache[index >> (16 - Gradient_Shader::kCache32Shift)];
++ }
+ fx += dx;
+ fy += dy;
+ b += db;
+ }
+ }
+
+ void shadeSpan_twopoint_repeat(SkScalar fx, SkScalar dx,
+ SkScalar fy, SkScalar dy,
+ SkScalar b, SkScalar db,
+- SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA, bool posRoot,
++ SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA,
++ SkScalar fDiffRadius, SkScalar fRadius1,
+ SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
+ int count) {
+ for (; count > 0; --count) {
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+- fOneOverTwoA, posRoot);
+- SkFixed index = repeat_tileproc(t);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> Gradient_Shader::kCache32Shift];
++ SkFixed t;
++ if (!two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, fDiffRadius, fRadius1, t)) {
++ *(dstC++) = 0;
++ } else {
++ SkFixed index = repeat_tileproc(t);
++ SkASSERT(index <= 0xFFFF);
++ *dstC++ = cache[index >> (16 - Gradient_Shader::kCache32Shift)];
++ }
+ fx += dx;
+ fy += dy;
+ b += db;
+ }
+ }
+
+
+
+@@ -1935,17 +1975,16 @@ public:
+ sk_bzero(dstC, count * sizeof(*dstC));
+ return;
+ }
+ SkMatrix::MapXYProc dstProc = fDstToIndexProc;
+ TileProc proc = fTileProc;
+ const SkPMColor* SK_RESTRICT cache = this->getCache32();
+
+ SkScalar foura = fA * 4;
+- bool posRoot = fDiffRadius < 0;
+ if (fDstToIndexClass != kPerspective_MatrixClass) {
+ SkPoint srcPt;
+ dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
+ SkScalar dx, fx = srcPt.fX;
+ SkScalar dy, fy = srcPt.fY;
+
+ if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
+@@ -1954,60 +1993,69 @@ public:
+ dx = SkFixedToScalar(fixedX);
+ dy = SkFixedToScalar(fixedY);
+ } else {
+ SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
+ dx = fDstToIndex.getScaleX();
+ dy = fDstToIndex.getSkewY();
+ }
+ SkScalar b = (SkScalarMul(fDiff.fX, fx) +
+- SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
++ SkScalarMul(fDiff.fY, fy) - fStartRadius * fDiffRadius) * 2;
+ SkScalar db = (SkScalarMul(fDiff.fX, dx) +
+ SkScalarMul(fDiff.fY, dy)) * 2;
+
+ TwoPointRadialShadeProc shadeProc = shadeSpan_twopoint_repeat;
+ if (proc == clamp_tileproc) {
+ shadeProc = shadeSpan_twopoint_clamp;
+ } else if (proc == mirror_tileproc) {
+ shadeProc = shadeSpan_twopoint_mirror;
+ } else {
+ SkASSERT(proc == repeat_tileproc);
+ }
+ (*shadeProc)(fx, dx, fy, dy, b, db,
+- fSr2D2, foura, fOneOverTwoA, posRoot,
++ fSr2D2, foura, fOneOverTwoA, fDiffRadius, fRadius1,
+ dstC, cache, count);
+ } else { // perspective case
+ SkScalar dstX = SkIntToScalar(x);
+ SkScalar dstY = SkIntToScalar(y);
+ for (; count > 0; --count) {
+ SkPoint srcPt;
+ dstProc(fDstToIndex, dstX, dstY, &srcPt);
+ SkScalar fx = srcPt.fX;
+ SkScalar fy = srcPt.fY;
+ SkScalar b = (SkScalarMul(fDiff.fX, fx) +
+ SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+- fOneOverTwoA, posRoot);
+- SkFixed index = proc(t);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> Gradient_Shader::kCache32Shift];
++ SkFixed t;
++ if (!two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, fDiffRadius, fRadius1, t)) {
++ *(dstC++) = 0;
++ } else {
++ SkFixed index = proc(t);
++ SkASSERT(index <= 0xFFFF);
++ *dstC++ = cache[index >> (16 - kCache32Bits)];
++ }
+ dstX += SK_Scalar1;
+ }
+ }
+ }
+
+ virtual bool setContext(const SkBitmap& device,
+ const SkPaint& paint,
+ const SkMatrix& matrix) SK_OVERRIDE {
+ if (!this->INHERITED::setContext(device, paint, matrix)) {
+ return false;
+ }
+
+ // we don't have a span16 proc
+ fFlags &= ~kHasSpan16_Flag;
++
++ // If we might end up wanting to draw nothing as part of the gradient
++ // then we should mark ourselves as not being opaque.
++ if (fA >= 0 || (fDiffRadius == 0 && fCenter1 == fCenter2)) {
++ fFlags &= ~kOpaqueAlpha_Flag;
++ }
+ return true;
+ }
+
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(Two_Point_Radial_Gradient)
+
+ protected:
+ Two_Point_Radial_Gradient(SkFlattenableReadBuffer& buffer)
+ : INHERITED(buffer),
+@@ -2033,26 +2081,22 @@ private:
+ const SkScalar fRadius1;
+ const SkScalar fRadius2;
+ SkPoint fDiff;
+ SkScalar fStartRadius, fDiffRadius, fSr2D2, fA, fOneOverTwoA;
+
+ void init() {
+ fDiff = fCenter1 - fCenter2;
+ fDiffRadius = fRadius2 - fRadius1;
+- SkScalar inv = SkScalarInvert(fDiffRadius);
+- fDiff.fX = SkScalarMul(fDiff.fX, inv);
+- fDiff.fY = SkScalarMul(fDiff.fY, inv);
+- fStartRadius = SkScalarMul(fRadius1, inv);
++ fStartRadius = fRadius1;
+ fSr2D2 = SkScalarSquare(fStartRadius);
+- fA = SkScalarSquare(fDiff.fX) + SkScalarSquare(fDiff.fY) - SK_Scalar1;
++ fA = SkScalarSquare(fDiff.fX) + SkScalarSquare(fDiff.fY) - SkScalarSquare(fDiffRadius);
+ fOneOverTwoA = fA ? SkScalarInvert(fA * 2) : 0;
+
+ fPtsToUnit.setTranslate(-fCenter1.fX, -fCenter1.fY);
+- fPtsToUnit.postScale(inv, inv);
+ }
+ };
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ class Sweep_Gradient : public Gradient_Shader {
+ public:
+ Sweep_Gradient(SkScalar cx, SkScalar cy, const SkColor colors[],
+@@ -2488,16 +2532,20 @@ SkShader* SkGradientShader::CreateTwoPoi
+ int colorCount,
+ SkShader::TileMode mode,
+ SkUnitMapper* mapper) {
+ if (startRadius < 0 || endRadius < 0 || NULL == colors || colorCount < 1) {
+ return NULL;
+ }
+ EXPAND_1_COLOR(colorCount);
+
++ if (start == end && startRadius == 0) {
++ return CreateRadial(start, endRadius, colors, pos, colorCount, mode, mapper);
++ }
++
+ return SkNEW_ARGS(Two_Point_Radial_Gradient,
+ (start, startRadius, end, endRadius, colors, pos,
+ colorCount, mode, mapper));
+ }
+
+ SkShader* SkGradientShader::CreateSweep(SkScalar cx, SkScalar cy,
+ const SkColor colors[],
+ const SkScalar pos[],
diff --git a/gfx/skia/patches/archive/0017-Bug-740194-SkMemory-mozalloc.patch b/gfx/skia/patches/archive/0017-Bug-740194-SkMemory-mozalloc.patch
new file mode 100644
index 000000000..719fda165
--- /dev/null
+++ b/gfx/skia/patches/archive/0017-Bug-740194-SkMemory-mozalloc.patch
@@ -0,0 +1,73 @@
+commit 5786f516119bcb677510f3c9256b870c3b5616c8
+Author: George Wright <gwright@mozilla.com>
+Date: Wed Aug 15 23:51:34 2012 -0400
+
+ Bug 740194 - [Skia] Implement a version of SkMemory for Mozilla that uses the infallible mozalloc allocators r=cjones
+
+diff --git a/gfx/skia/include/config/SkUserConfig.h b/gfx/skia/include/config/SkUserConfig.h
+index f98ba85..17be191 100644
+--- a/gfx/skia/include/config/SkUserConfig.h
++++ b/gfx/skia/include/config/SkUserConfig.h
+@@ -35,6 +35,16 @@
+ commented out, so including it will have no effect.
+ */
+
++/*
++ Override new/delete with Mozilla's allocator, mozalloc
++
++ Ideally we shouldn't need to do this here, but until
++ http://code.google.com/p/skia/issues/detail?id=598 is fixed
++ we need to include this here to override operator new and delete
++*/
++
++#include "mozilla/mozalloc.h"
++
+ ///////////////////////////////////////////////////////////////////////////////
+
+ /* Scalars (the fractional value type in skia) can be implemented either as
+diff --git a/gfx/skia/src/ports/SkMemory_mozalloc.cpp b/gfx/skia/src/ports/SkMemory_mozalloc.cpp
+new file mode 100644
+index 0000000..1f16ee5
+--- /dev/null
++++ b/gfx/skia/src/ports/SkMemory_mozalloc.cpp
+@@ -0,0 +1,40 @@
++/*
++ * Copyright 2011 Google Inc.
++ * Copyright 2012 Mozilla Foundation
++ *
++ * Use of this source code is governed by a BSD-style license that can be
++ * found in the LICENSE file.
++ */
++
++#include "SkTypes.h"
++
++#include "mozilla/mozalloc.h"
++#include "mozilla/mozalloc_abort.h"
++#include "mozilla/mozalloc_oom.h"
++
++void sk_throw() {
++ SkDEBUGFAIL("sk_throw");
++ mozalloc_abort("Abort from sk_throw");
++}
++
++void sk_out_of_memory(void) {
++ SkDEBUGFAIL("sk_out_of_memory");
++ mozalloc_handle_oom(0);
++}
++
++void* sk_malloc_throw(size_t size) {
++ return sk_malloc_flags(size, SK_MALLOC_THROW);
++}
++
++void* sk_realloc_throw(void* addr, size_t size) {
++ return moz_xrealloc(addr, size);
++}
++
++void sk_free(void* p) {
++ free(p);
++}
++
++void* sk_malloc_flags(size_t size, unsigned flags) {
++ return (flags & SK_MALLOC_THROW) ? moz_xmalloc(size) : malloc(size);
++}
++
diff --git a/gfx/skia/patches/archive/0018-Bug-817356-PPC-defines.patch b/gfx/skia/patches/archive/0018-Bug-817356-PPC-defines.patch
new file mode 100644
index 000000000..d16ec4b3b
--- /dev/null
+++ b/gfx/skia/patches/archive/0018-Bug-817356-PPC-defines.patch
@@ -0,0 +1,14 @@
+Index: gfx/skia/include/core/SkPreConfig.h
+===================================================================
+--- gfx/skia/include/core/SkPreConfig.h (revision 6724)
++++ gfx/skia/include/core/SkPreConfig.h (working copy)
+@@ -94,7 +94,8 @@
+ //////////////////////////////////////////////////////////////////////
+
+ #if !defined(SK_CPU_BENDIAN) && !defined(SK_CPU_LENDIAN)
+- #if defined (__ppc__) || defined(__ppc64__)
++ #if defined (__ppc__) || defined(__PPC__) || defined(__ppc64__) \
++ || defined(__PPC64__)
+ #define SK_CPU_BENDIAN
+ #else
+ #define SK_CPU_LENDIAN
diff --git a/gfx/skia/patches/archive/0022-Bug-848491-Re-apply-bug-795538-Ensure-we-use-the-cor.patch b/gfx/skia/patches/archive/0022-Bug-848491-Re-apply-bug-795538-Ensure-we-use-the-cor.patch
new file mode 100644
index 000000000..97404c431
--- /dev/null
+++ b/gfx/skia/patches/archive/0022-Bug-848491-Re-apply-bug-795538-Ensure-we-use-the-cor.patch
@@ -0,0 +1,39 @@
+From: George Wright <gwright@mozilla.com>
+Date: Thu, 20 Jun 2013 09:21:21 -0400
+Subject: Bug 848491 - Re-apply bug 795538 - Ensure we use the correct colour (and alpha) for the clamp values r=mattwoodrow
+
+
+diff --git a/gfx/skia/src/effects/gradients/SkGradientShader.cpp b/gfx/skia/src/effects/gradients/SkGradientShader.cpp
+index 27a9c46..ce077b5 100644
+--- a/gfx/skia/src/effects/gradients/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/gradients/SkGradientShader.cpp
+@@ -500,15 +500,17 @@ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ }
+
+ // Write the clamp colours into the first and last entries of fCache32
+- fCache32[kCache32ClampLower] = SkPackARGB32(fCacheAlpha,
+- SkColorGetR(fOrigColors[0]),
+- SkColorGetG(fOrigColors[0]),
+- SkColorGetB(fOrigColors[0]));
+-
+- fCache32[kCache32ClampUpper] = SkPackARGB32(fCacheAlpha,
+- SkColorGetR(fOrigColors[fColorCount - 1]),
+- SkColorGetG(fOrigColors[fColorCount - 1]),
+- SkColorGetB(fOrigColors[fColorCount - 1]));
++ fCache32[kCache32ClampLower] = SkPremultiplyARGBInline(SkMulDiv255Round(SkColorGetA(fOrigColors[0]),
++ fCacheAlpha),
++ SkColorGetR(fOrigColors[0]),
++ SkColorGetG(fOrigColors[0]),
++ SkColorGetB(fOrigColors[0]));
++
++ fCache32[kCache32ClampUpper] = SkPremultiplyARGBInline(SkMulDiv255Round(SkColorGetA(fOrigColors[fColorCount - 1]),
++ fCacheAlpha),
++ SkColorGetR(fOrigColors[fColorCount - 1]),
++ SkColorGetG(fOrigColors[fColorCount - 1]),
++ SkColorGetB(fOrigColors[fColorCount - 1]));
+
+ return fCache32;
+ }
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0023-Bug-890539-Fix-SK_COMPILE_ASSERT-build-warning.patch b/gfx/skia/patches/archive/0023-Bug-890539-Fix-SK_COMPILE_ASSERT-build-warning.patch
new file mode 100644
index 000000000..9bc7ddec4
--- /dev/null
+++ b/gfx/skia/patches/archive/0023-Bug-890539-Fix-SK_COMPILE_ASSERT-build-warning.patch
@@ -0,0 +1,39 @@
+# HG changeset patch
+# Parent e378875000890099fffcdb4cbc4ab12828ac34ee
+# User Daniel Holbert <dholbert@cs.stanford.edu>
+Bug 890539: Annotate SK_COMPILE_ASSERT's typedef as permissibly unused, to fix GCC 4.8 build warning. r=gw280
+
+diff --git a/gfx/skia/include/core/SkTypes.h b/gfx/skia/include/core/SkTypes.h
+--- a/gfx/skia/include/core/SkTypes.h
++++ b/gfx/skia/include/core/SkTypes.h
+@@ -121,18 +121,29 @@ inline void operator delete(void* p) {
+ #define SkDEVCODE(code)
+ #define SK_DEVELOPER_TO_STRING()
+ #endif
+
+ template <bool>
+ struct SkCompileAssert {
+ };
+
++/*
++ * The SK_COMPILE_ASSERT definition creates an otherwise-unused typedef. This
++ * triggers compiler warnings with some versions of gcc, so mark the typedef
++ * as permissibly-unused to disable the warnings.
++ */
++# if defined(__GNUC__)
++# define SK_COMPILE_ASSERT_UNUSED_ATTRIBUTE __attribute__((unused))
++# else
++# define SK_COMPILE_ASSERT_UNUSED_ATTRIBUTE /* nothing */
++# endif
++
+ #define SK_COMPILE_ASSERT(expr, msg) \
+- typedef SkCompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1]
++ typedef SkCompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] SK_COMPILE_ASSERT_UNUSED_ATTRIBUTE
+
+ /*
+ * Usage: SK_MACRO_CONCAT(a, b) to construct the symbol ab
+ *
+ * SK_MACRO_CONCAT_IMPL_PRIV just exists to make this work. Do not use directly
+ *
+ */
+ #define SK_MACRO_CONCAT(X, Y) SK_MACRO_CONCAT_IMPL_PRIV(X, Y)
diff --git a/gfx/skia/patches/archive/0024-Bug-887318-fix-bgra-readback.patch b/gfx/skia/patches/archive/0024-Bug-887318-fix-bgra-readback.patch
new file mode 100644
index 000000000..864a0af7a
--- /dev/null
+++ b/gfx/skia/patches/archive/0024-Bug-887318-fix-bgra-readback.patch
@@ -0,0 +1,217 @@
+diff --git a/gfx/gl/GLContextSkia.cpp b/gfx/gl/GLContextSkia.cpp
+--- a/gfx/gl/GLContextSkia.cpp
++++ b/gfx/gl/GLContextSkia.cpp
+@@ -303,39 +303,47 @@ const GLubyte* glGetString_mozilla(GrGLe
+ if (name == LOCAL_GL_VERSION) {
+ if (sGLContext.get()->IsGLES2()) {
+ return reinterpret_cast<const GLubyte*>("OpenGL ES 2.0");
+ } else {
+ return reinterpret_cast<const GLubyte*>("2.0");
+ }
+ } else if (name == LOCAL_GL_EXTENSIONS) {
+ // Only expose the bare minimum extensions we want to support to ensure a functional Ganesh
+ // as GLContext only exposes certain extensions
+ static bool extensionsStringBuilt = false;
+- static char extensionsString[120];
++ static char extensionsString[256];
+
+ if (!extensionsStringBuilt) {
+ if (sGLContext.get()->IsExtensionSupported(GLContext::EXT_texture_format_BGRA8888)) {
+ strcpy(extensionsString, "GL_EXT_texture_format_BGRA8888 ");
+ }
+
+ if (sGLContext.get()->IsExtensionSupported(GLContext::OES_packed_depth_stencil)) {
+ strcat(extensionsString, "GL_OES_packed_depth_stencil ");
+ }
+
+ if (sGLContext.get()->IsExtensionSupported(GLContext::EXT_packed_depth_stencil)) {
+ strcat(extensionsString, "GL_EXT_packed_depth_stencil ");
+ }
+
+ if (sGLContext.get()->IsExtensionSupported(GLContext::OES_rgb8_rgba8)) {
+ strcat(extensionsString, "GL_OES_rgb8_rgba8 ");
+ }
+
++ if (sGLContext.get()->IsExtensionSupported(GLContext::EXT_bgra)) {
++ strcat(extensionsString, "GL_EXT_bgra ");
++ }
++
++ if (sGLContext.get()->IsExtensionSupported(GLContext::EXT_read_format_bgra)) {
++ strcat(extensionsString, "GL_EXT_read_format_bgra ");
++ }
++
+ extensionsStringBuilt = true;
+ }
+
+ return reinterpret_cast<const GLubyte*>(extensionsString);
+
+ } else if (name == LOCAL_GL_SHADING_LANGUAGE_VERSION) {
+ if (sGLContext.get()->IsGLES2()) {
+ return reinterpret_cast<const GLubyte*>("OpenGL ES GLSL ES 1.0");
+ } else {
+ return reinterpret_cast<const GLubyte*>("1.10");
+diff --git a/gfx/skia/src/gpu/gl/GrGpuGL.cpp b/gfx/skia/src/gpu/gl/GrGpuGL.cpp
+--- a/gfx/skia/src/gpu/gl/GrGpuGL.cpp
++++ b/gfx/skia/src/gpu/gl/GrGpuGL.cpp
+@@ -1,18 +1,18 @@
+ /*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+-
++#include <algorithm>
+ #include "GrGpuGL.h"
+ #include "GrGLStencilBuffer.h"
+ #include "GrGLPath.h"
+ #include "GrGLShaderBuilder.h"
+ #include "GrTemplates.h"
+ #include "GrTypes.h"
+ #include "SkTemplates.h"
+
+ static const GrGLuint GR_MAX_GLUINT = ~0U;
+ static const GrGLint GR_INVAL_GLINT = ~0;
+@@ -1381,29 +1381,67 @@ bool GrGpuGL::readPixelsWillPayForYFlip(
+ // Note the rowBytes might be tight to the passed in data, but if data
+ // gets clipped in x to the target the rowBytes will no longer be tight.
+ if (left >= 0 && (left + width) < renderTarget->width()) {
+ return 0 == rowBytes ||
+ GrBytesPerPixel(config) * width == rowBytes;
+ } else {
+ return false;
+ }
+ }
+
++static void swizzleRow(void* buffer, int byteLen) {
++ uint8_t* src = (uint8_t*)buffer;
++ uint8_t* end = src + byteLen;
++
++ GrAssert((end - src) % 4 == 0);
++
++ for (; src != end; src += 4) {
++ std::swap(src[0], src[2]);
++ }
++}
++
++bool GrGpuGL::canReadBGRA() const
++{
++ if (kDesktop_GrGLBinding == this->glBinding() ||
++ this->hasExtension("GL_EXT_bgra"))
++ return true;
++
++ if (this->hasExtension("GL_EXT_read_format_bgra")) {
++ GrGLint readFormat = 0;
++ GrGLint readType = 0;
++
++ GL_CALL(GetIntegerv(GR_GL_IMPLEMENTATION_COLOR_READ_FORMAT, &readFormat));
++ GL_CALL(GetIntegerv(GR_GL_IMPLEMENTATION_COLOR_READ_TYPE, &readType));
++
++ return readFormat == GR_GL_BGRA && readType == GR_GL_UNSIGNED_BYTE;
++ }
++
++ return false;
++}
++
+ bool GrGpuGL::onReadPixels(GrRenderTarget* target,
+ int left, int top,
+ int width, int height,
+ GrPixelConfig config,
+ void* buffer,
+ size_t rowBytes) {
+ GrGLenum format;
+ GrGLenum type;
+ bool flipY = kBottomLeft_GrSurfaceOrigin == target->origin();
++ bool needSwizzle = false;
++
++ if (kBGRA_8888_GrPixelConfig == config && !this->canReadBGRA()) {
++ // Read RGBA and swizzle after
++ config = kRGBA_8888_GrPixelConfig;
++ needSwizzle = true;
++ }
++
+ if (!this->configToGLFormats(config, false, NULL, &format, &type)) {
+ return false;
+ }
+ size_t bpp = GrBytesPerPixel(config);
+ if (!adjust_pixel_ops_params(target->width(), target->height(), bpp,
+ &left, &top, &width, &height,
+ const_cast<const void**>(&buffer),
+ &rowBytes)) {
+ return false;
+ }
+@@ -1478,35 +1516,46 @@ bool GrGpuGL::onReadPixels(GrRenderTarge
+ scratch.reset(tightRowBytes);
+ void* tmpRow = scratch.get();
+ // flip y in-place by rows
+ const int halfY = height >> 1;
+ char* top = reinterpret_cast<char*>(buffer);
+ char* bottom = top + (height - 1) * rowBytes;
+ for (int y = 0; y < halfY; y++) {
+ memcpy(tmpRow, top, tightRowBytes);
+ memcpy(top, bottom, tightRowBytes);
+ memcpy(bottom, tmpRow, tightRowBytes);
++
++ if (needSwizzle) {
++ swizzleRow(top, tightRowBytes);
++ swizzleRow(bottom, tightRowBytes);
++ }
++
+ top += rowBytes;
+ bottom -= rowBytes;
+ }
+ }
+ } else {
+- GrAssert(readDst != buffer); GrAssert(rowBytes != tightRowBytes);
++ GrAssert(readDst != buffer);
++ GrAssert(rowBytes != tightRowBytes);
+ // copy from readDst to buffer while flipping y
+ // const int halfY = height >> 1;
+ const char* src = reinterpret_cast<const char*>(readDst);
+ char* dst = reinterpret_cast<char*>(buffer);
+ if (flipY) {
+ dst += (height-1) * rowBytes;
+ }
+ for (int y = 0; y < height; y++) {
+ memcpy(dst, src, tightRowBytes);
++ if (needSwizzle) {
++ swizzleRow(dst, tightRowBytes);
++ }
++
+ src += readDstRowBytes;
+ if (!flipY) {
+ dst += rowBytes;
+ } else {
+ dst -= rowBytes;
+ }
+ }
+ }
+ return true;
+ }
+diff --git a/gfx/skia/src/gpu/gl/GrGpuGL.h b/gfx/skia/src/gpu/gl/GrGpuGL.h
+--- a/gfx/skia/src/gpu/gl/GrGpuGL.h
++++ b/gfx/skia/src/gpu/gl/GrGpuGL.h
+@@ -243,20 +243,22 @@ private:
+ GrPixelConfig dataConfig,
+ const void* data,
+ size_t rowBytes);
+
+ bool createRenderTargetObjects(int width, int height,
+ GrGLuint texID,
+ GrGLRenderTarget::Desc* desc);
+
+ void fillInConfigRenderableTable();
+
++ bool canReadBGRA() const;
++
+ GrGLContext fGLContext;
+
+ // GL program-related state
+ ProgramCache* fProgramCache;
+ SkAutoTUnref<GrGLProgram> fCurrentProgram;
+
+ ///////////////////////////////////////////////////////////////////////////
+ ///@name Caching of GL State
+ ///@{
+ int fHWActiveTextureUnitIdx;
diff --git a/gfx/skia/patches/archive/0025-Bug-896049-Add-default-Value-SK_OVERRIDE.patch b/gfx/skia/patches/archive/0025-Bug-896049-Add-default-Value-SK_OVERRIDE.patch
new file mode 100644
index 000000000..aff99f75f
--- /dev/null
+++ b/gfx/skia/patches/archive/0025-Bug-896049-Add-default-Value-SK_OVERRIDE.patch
@@ -0,0 +1,26 @@
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -325,19 +325,19 @@
+ // Some documentation suggests we should be using __attribute__((override)),
+ // but it doesn't work.
+ #define SK_OVERRIDE override
+ #elif defined(__has_extension)
+ #if __has_extension(cxx_override_control)
+ #define SK_OVERRIDE override
+ #endif
+ #endif
+- #else
+- // Linux GCC ignores "__attribute__((override))" and rejects "override".
+- #define SK_OVERRIDE
++ #endif
++ #ifndef SK_OVERRIDE
++ #define SK_OVERRIDE
+ #endif
+ #endif
+
+ //////////////////////////////////////////////////////////////////////
+
+ #ifndef SK_PRINTF_LIKE
+ #if defined(__clang__) || defined(__GNUC__)
+ #define SK_PRINTF_LIKE(A, B) __attribute__((format(printf, (A), (B))))
diff --git a/gfx/skia/patches/archive/0026-Bug-901208-Fix-ARM-v4t.patch b/gfx/skia/patches/archive/0026-Bug-901208-Fix-ARM-v4t.patch
new file mode 100644
index 000000000..5c95b5401
--- /dev/null
+++ b/gfx/skia/patches/archive/0026-Bug-901208-Fix-ARM-v4t.patch
@@ -0,0 +1,83 @@
+diff --git a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+--- a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
++++ b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+@@ -26,66 +26,78 @@ static void S32A_D565_Opaque(uint16_t* S
+ asm volatile (
+ "1: \n\t"
+ "ldr r3, [%[src]], #4 \n\t"
+ "cmp r3, #0xff000000 \n\t"
+ "blo 2f \n\t"
+ "and r4, r3, #0x0000f8 \n\t"
+ "and r5, r3, #0x00fc00 \n\t"
+ "and r6, r3, #0xf80000 \n\t"
++#ifdef SK_ARM_HAS_EDSP
+ "pld [r1, #32] \n\t"
++#endif
+ "lsl r3, r4, #8 \n\t"
+ "orr r3, r3, r5, lsr #5 \n\t"
+ "orr r3, r3, r6, lsr #19 \n\t"
+ "subs %[count], %[count], #1 \n\t"
+ "strh r3, [%[dst]], #2 \n\t"
+ "bne 1b \n\t"
+ "b 4f \n\t"
+ "2: \n\t"
+ "lsrs r7, r3, #24 \n\t"
+ "beq 3f \n\t"
+ "ldrh r4, [%[dst]] \n\t"
+ "rsb r7, r7, #255 \n\t"
+ "and r6, r4, #0x001f \n\t"
+-#if SK_ARM_ARCH == 6
++#if SK_ARM_ARCH <= 6
+ "lsl r5, r4, #21 \n\t"
+ "lsr r5, r5, #26 \n\t"
+ #else
+ "ubfx r5, r4, #5, #6 \n\t"
+ #endif
++#ifdef SK_ARM_HAS_EDSP
+ "pld [r0, #16] \n\t"
++#endif
+ "lsr r4, r4, #11 \n\t"
+ #ifdef SK_ARM_HAS_EDSP
+ "smulbb r6, r6, r7 \n\t"
+ "smulbb r5, r5, r7 \n\t"
+ "smulbb r4, r4, r7 \n\t"
+ #else
+ "mul r6, r6, r7 \n\t"
+ "mul r5, r5, r7 \n\t"
+ "mul r4, r4, r7 \n\t"
+ #endif
++#if SK_ARM_ARCH >= 6
+ "uxtb r7, r3, ROR #16 \n\t"
+ "uxtb ip, r3, ROR #8 \n\t"
++#else
++ "mov ip, #0xff \n\t"
++ "and r7, ip, r3, ROR #16 \n\t"
++ "and ip, ip, r3, ROR #8 \n\t"
++#endif
+ "and r3, r3, #0xff \n\t"
+ "add r6, r6, #16 \n\t"
+ "add r5, r5, #32 \n\t"
+ "add r4, r4, #16 \n\t"
+ "add r6, r6, r6, lsr #5 \n\t"
+ "add r5, r5, r5, lsr #6 \n\t"
+ "add r4, r4, r4, lsr #5 \n\t"
+ "add r6, r7, r6, lsr #5 \n\t"
+ "add r5, ip, r5, lsr #6 \n\t"
+ "add r4, r3, r4, lsr #5 \n\t"
+ "lsr r6, r6, #3 \n\t"
+ "and r5, r5, #0xfc \n\t"
+ "and r4, r4, #0xf8 \n\t"
+ "orr r6, r6, r5, lsl #3 \n\t"
+ "orr r4, r6, r4, lsl #8 \n\t"
+ "strh r4, [%[dst]], #2 \n\t"
++#ifdef SK_ARM_HAS_EDSP
+ "pld [r1, #32] \n\t"
++#endif
+ "subs %[count], %[count], #1 \n\t"
+ "bne 1b \n\t"
+ "b 4f \n\t"
+ "3: \n\t"
+ "subs %[count], %[count], #1 \n\t"
+ "add %[dst], %[dst], #2 \n\t"
+ "bne 1b \n\t"
+ "4: \n\t"
diff --git a/gfx/skia/patches/archive/0030-Bug-939629-Add-missing-include-guards.patch b/gfx/skia/patches/archive/0030-Bug-939629-Add-missing-include-guards.patch
new file mode 100644
index 000000000..c92bf2aae
--- /dev/null
+++ b/gfx/skia/patches/archive/0030-Bug-939629-Add-missing-include-guards.patch
@@ -0,0 +1,94 @@
+# HG changeset patch
+# Parent 979e60d9c09f22eb139643da6de7568b603e1aa1
+
+diff --git a/gfx/skia/include/images/SkImages.h b/gfx/skia/include/images/SkImages.h
+--- a/gfx/skia/include/images/SkImages.h
++++ b/gfx/skia/include/images/SkImages.h
+@@ -1,14 +1,19 @@
+ /*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
++#ifndef SkImages_DEFINED
++#define SkImages_DEFINED
++
+ class SkImages {
+ public:
+ /**
+ * Initializes flattenables in the images project.
+ */
+ static void InitializeFlattenables();
+ };
++
++#endif
+diff --git a/gfx/skia/src/gpu/GrAAConvexPathRenderer.h b/gfx/skia/src/gpu/GrAAConvexPathRenderer.h
+--- a/gfx/skia/src/gpu/GrAAConvexPathRenderer.h
++++ b/gfx/skia/src/gpu/GrAAConvexPathRenderer.h
+@@ -1,16 +1,19 @@
+
+ /*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
++#ifndef GrAAConvexPathRenderer_DEFINED
++#define GrAAConvexPathRenderer_DEFINED
++
+ #include "GrPathRenderer.h"
+
+
+ class GrAAConvexPathRenderer : public GrPathRenderer {
+ public:
+ GrAAConvexPathRenderer();
+
+ virtual bool canDrawPath(const SkPath& path,
+@@ -19,8 +22,10 @@ public:
+ bool antiAlias) const SK_OVERRIDE;
+
+ protected:
+ virtual bool onDrawPath(const SkPath& path,
+ const SkStrokeRec& stroke,
+ GrDrawTarget* target,
+ bool antiAlias) SK_OVERRIDE;
+ };
++
++#endif
+diff --git a/gfx/skia/src/gpu/GrReducedClip.h b/gfx/skia/src/gpu/GrReducedClip.h
+--- a/gfx/skia/src/gpu/GrReducedClip.h
++++ b/gfx/skia/src/gpu/GrReducedClip.h
+@@ -1,16 +1,19 @@
+
+ /*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
++#ifndef GrReducedClip_DEFINED
++#define GrReducedClip_DEFINED
++
+ #include "SkClipStack.h"
+ #include "SkTLList.h"
+
+ namespace GrReducedClip {
+
+ typedef SkTLList<SkClipStack::Element> ElementList;
+
+ enum InitialState {
+@@ -33,8 +36,10 @@ enum InitialState {
+ void ReduceClipStack(const SkClipStack& stack,
+ const SkIRect& queryBounds,
+ ElementList* result,
+ InitialState* initialState,
+ SkIRect* tighterBounds = NULL,
+ bool* requiresAA = NULL);
+
+ } // namespace GrReducedClip
++
++#endif
diff --git a/gfx/skia/patches/archive/0031-Bug-945588-Add-include-guard.patch b/gfx/skia/patches/archive/0031-Bug-945588-Add-include-guard.patch
new file mode 100644
index 000000000..f58e7e165
--- /dev/null
+++ b/gfx/skia/patches/archive/0031-Bug-945588-Add-include-guard.patch
@@ -0,0 +1,39 @@
+# HG changeset patch
+# User Ehsan Akhgari <ehsan@mozilla.com>
+
+Bug 945588 - Add include guards to SkConfig8888.h
+
+diff --git a/gfx/skia/src/core/SkConfig8888.h b/gfx/skia/src/core/SkConfig8888.h
+index 96eaef2..36bc9b4 100644
+--- a/gfx/skia/src/core/SkConfig8888.h
++++ b/gfx/skia/src/core/SkConfig8888.h
+@@ -1,16 +1,18 @@
+
+ /*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
++#ifndef SkConfig8888_DEFINED
++#define SkConfig8888_DEFINED
+
+ #include "SkCanvas.h"
+ #include "SkColorPriv.h"
+
+ /**
+ * Converts pixels from one Config8888 to another Config8888
+ */
+ void SkConvertConfig8888Pixels(uint32_t* dstPixels,
+@@ -69,8 +71,10 @@ static inline void SkCopyConfig8888ToBitmap(const SkBitmap& dstBmp,
+ int h = dstBmp.height();
+ size_t dstRowBytes = dstBmp.rowBytes();
+ uint32_t* dstPixels = reinterpret_cast<uint32_t*>(dstBmp.getPixels());
+
+ SkConvertConfig8888Pixels(dstPixels, dstRowBytes, SkCanvas::kNative_Premul_Config8888, srcPixels, srcRowBytes, srcConfig8888, w, h);
+ }
+
+ }
++
++#endif
diff --git a/gfx/skia/patches/archive/0032-Bug-974900-More-missing-include-guards.patch b/gfx/skia/patches/archive/0032-Bug-974900-More-missing-include-guards.patch
new file mode 100644
index 000000000..b6b846121
--- /dev/null
+++ b/gfx/skia/patches/archive/0032-Bug-974900-More-missing-include-guards.patch
@@ -0,0 +1,148 @@
+# HG changeset patch
+# Parent c8288d0c7a1544a590a0cac9c39397ac10c8a45b
+Bug 974900 - Add missing include guards to Skia headers - r=gw280
+
+diff --git a/gfx/skia/trunk/include/images/SkImages.h b/gfx/skia/trunk/include/images/SkImages.h
+--- a/gfx/skia/trunk/include/images/SkImages.h
++++ b/gfx/skia/trunk/include/images/SkImages.h
+@@ -1,14 +1,19 @@
+ /*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
++#ifndef SkImages_DEFINED
++#define SkImages_DEFINED
++
+ class SkImages {
+ public:
+ /**
+ * Initializes flattenables in the images project.
+ */
+ static void InitializeFlattenables();
+ };
++
++#endif
+diff --git a/gfx/skia/trunk/src/core/SkConvolver.h b/gfx/skia/trunk/src/core/SkConvolver.h
+--- a/gfx/skia/trunk/src/core/SkConvolver.h
++++ b/gfx/skia/trunk/src/core/SkConvolver.h
+@@ -8,16 +8,18 @@
+ #include "SkSize.h"
+ #include "SkTypes.h"
+ #include "SkTArray.h"
+
+ // avoid confusion with Mac OS X's math library (Carbon)
+ #if defined(__APPLE__)
+ #undef FloatToConvolutionFixed
+ #undef ConvolutionFixedToFloat
++#undef FloatToFixed
++#undef FixedToFloat
+ #endif
+
+ // Represents a filter in one dimension. Each output pixel has one entry in this
+ // object for the filter values contributing to it. You build up the filter
+ // list by calling AddFilter for each output pixel (in order).
+ //
+ // We do 2-dimensional convolution by first convolving each row by one
+ // SkConvolutionFilter1D, then convolving each column by another one.
+diff --git a/gfx/skia/trunk/src/gpu/GrAAConvexPathRenderer.h b/gfx/skia/trunk/src/gpu/GrAAConvexPathRenderer.h
+--- a/gfx/skia/trunk/src/gpu/GrAAConvexPathRenderer.h
++++ b/gfx/skia/trunk/src/gpu/GrAAConvexPathRenderer.h
+@@ -3,24 +3,28 @@
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+ #include "GrPathRenderer.h"
+
++#ifndef GrAAConvexPathRenderer_DEFINED
++#define GrAAConvexPathRenderer_DEFINED
+
+ class GrAAConvexPathRenderer : public GrPathRenderer {
+ public:
+ GrAAConvexPathRenderer();
+
+ virtual bool canDrawPath(const SkPath& path,
+ const SkStrokeRec& stroke,
+ const GrDrawTarget* target,
+ bool antiAlias) const SK_OVERRIDE;
+
+ protected:
+ virtual bool onDrawPath(const SkPath& path,
+ const SkStrokeRec& stroke,
+ GrDrawTarget* target,
+ bool antiAlias) SK_OVERRIDE;
+ };
++
++#endif
+diff --git a/gfx/skia/trunk/src/gpu/GrReducedClip.h b/gfx/skia/trunk/src/gpu/GrReducedClip.h
+--- a/gfx/skia/trunk/src/gpu/GrReducedClip.h
++++ b/gfx/skia/trunk/src/gpu/GrReducedClip.h
+@@ -1,16 +1,19 @@
+
+ /*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
++#ifndef GrReducedClip_DEFINED
++#define GrReducedClip_DEFINED
++
+ #include "SkClipStack.h"
+ #include "SkTLList.h"
+
+ namespace GrReducedClip {
+
+ typedef SkTLList<SkClipStack::Element> ElementList;
+
+ enum InitialState {
+@@ -36,8 +39,10 @@ SK_API void ReduceClipStack(const SkClip
+ const SkIRect& queryBounds,
+ ElementList* result,
+ int32_t* resultGenID,
+ InitialState* initialState,
+ SkIRect* tighterBounds = NULL,
+ bool* requiresAA = NULL);
+
+ } // namespace GrReducedClip
++
++#endif
+diff --git a/gfx/skia/trunk/src/pathops/SkLineParameters.h b/gfx/skia/trunk/src/pathops/SkLineParameters.h
+--- a/gfx/skia/trunk/src/pathops/SkLineParameters.h
++++ b/gfx/skia/trunk/src/pathops/SkLineParameters.h
+@@ -1,14 +1,18 @@
+ /*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
++
++#ifndef SkLineParameters_DEFINED
++#define SkLineParameters_DEFINED
++
+ #include "SkPathOpsCubic.h"
+ #include "SkPathOpsLine.h"
+ #include "SkPathOpsQuad.h"
+
+ // Sources
+ // computer-aided design - volume 22 number 9 november 1990 pp 538 - 549
+ // online at http://cagd.cs.byu.edu/~tom/papers/bezclip.pdf
+
+@@ -164,8 +168,10 @@ public:
+ return -a;
+ }
+
+ private:
+ double a;
+ double b;
+ double c;
+ };
++
++#endif
diff --git a/gfx/skia/patches/archive/0033-Bug-974900-undef-interface-windows.patch b/gfx/skia/patches/archive/0033-Bug-974900-undef-interface-windows.patch
new file mode 100644
index 000000000..05f17000a
--- /dev/null
+++ b/gfx/skia/patches/archive/0033-Bug-974900-undef-interface-windows.patch
@@ -0,0 +1,27 @@
+# HG changeset patch
+# Parent b12f9a408740aa5fd93c296a7d41e1b5f54c1b20
+Bug 974900 - #undef interface defined by windows headers - r=gw280
+
+diff --git a/gfx/skia/trunk/src/gpu/gl/GrGLCaps.h b/gfx/skia/trunk/src/gpu/gl/GrGLCaps.h
+--- a/gfx/skia/trunk/src/gpu/gl/GrGLCaps.h
++++ b/gfx/skia/trunk/src/gpu/gl/GrGLCaps.h
+@@ -9,16 +9,19 @@
+ #ifndef GrGLCaps_DEFINED
+ #define GrGLCaps_DEFINED
+
+ #include "GrDrawTargetCaps.h"
+ #include "GrGLStencilBuffer.h"
+ #include "SkTArray.h"
+ #include "SkTDArray.h"
+
++// defined in Windows headers
++#undef interface
++
+ class GrGLContextInfo;
+
+ /**
+ * Stores some capabilities of a GL context. Most are determined by the GL
+ * version and the extensions string. It also tracks formats that have passed
+ * the FBO completeness test.
+ */
+ class GrGLCaps : public GrDrawTargetCaps {
diff --git a/gfx/skia/patches/archive/SkPostConfig.patch b/gfx/skia/patches/archive/SkPostConfig.patch
new file mode 100644
index 000000000..d32341f4e
--- /dev/null
+++ b/gfx/skia/patches/archive/SkPostConfig.patch
@@ -0,0 +1,32 @@
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -277,19 +277,28 @@
+ #endif
+
+ //////////////////////////////////////////////////////////////////////
+
+ #ifndef SK_OVERRIDE
+ #if defined(_MSC_VER)
+ #define SK_OVERRIDE override
+ #elif defined(__clang__)
++#if __has_feature(cxx_override_control)
+ // Some documentation suggests we should be using __attribute__((override)),
+ // but it doesn't work.
+ #define SK_OVERRIDE override
++#elif defined(__has_extension)
++#if __has_extension(cxx_override_control)
++#define SK_OVERRIDE override
++#endif
++#endif
++#ifndef SK_OVERRIDE
++#define SK_OVERRIDE
++#endif
+ #else
+ // Linux GCC ignores "__attribute__((override))" and rejects "override".
+ #define SK_OVERRIDE
+ #endif
+ #endif
+
+ //////////////////////////////////////////////////////////////////////
+
diff --git a/gfx/skia/patches/archive/arm-fixes.patch b/gfx/skia/patches/archive/arm-fixes.patch
new file mode 100644
index 000000000..d9fa430df
--- /dev/null
+++ b/gfx/skia/patches/archive/arm-fixes.patch
@@ -0,0 +1,191 @@
+diff --git a/gfx/skia/include/core/SkMath.h b/gfx/skia/include/core/SkMath.h
+--- a/gfx/skia/include/core/SkMath.h
++++ b/gfx/skia/include/core/SkMath.h
+@@ -148,20 +148,17 @@ static inline bool SkIsPow2(int value) {
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ /** SkMulS16(a, b) multiplies a * b, but requires that a and b are both int16_t.
+ With this requirement, we can generate faster instructions on some
+ architectures.
+ */
+-#if defined(__arm__) \
+- && !defined(__thumb__) \
+- && !defined(__ARM_ARCH_4T__) \
+- && !defined(__ARM_ARCH_5T__)
++#ifdef SK_ARM_HAS_EDSP
+ static inline int32_t SkMulS16(S16CPU x, S16CPU y) {
+ SkASSERT((int16_t)x == x);
+ SkASSERT((int16_t)y == y);
+ int32_t product;
+ asm("smulbb %0, %1, %2 \n"
+ : "=r"(product)
+ : "r"(x), "r"(y)
+ );
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -300,8 +300,53 @@
+ #endif
+ #endif
+
+ //////////////////////////////////////////////////////////////////////
+
+ #ifndef SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
+ #define SK_ALLOW_STATIC_GLOBAL_INITIALIZERS 1
+ #endif
++
++//////////////////////////////////////////////////////////////////////
++// ARM defines
++
++#if defined(__GNUC__) && defined(__arm__)
++
++# define SK_ARM_ARCH 3
++
++# if defined(__ARM_ARCH_4__) || defined(__ARM_ARCH_4T__) \
++ || defined(_ARM_ARCH_4)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 4
++# endif
++
++# if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
++ || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
++ || defined(__ARM_ARCH_5TEJ__) || defined(_ARM_ARCH_5)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 5
++# endif
++
++# if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
++ || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
++ || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \
++ || defined(__ARM_ARCH_6M__) || defined(_ARM_ARCH_6)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 6
++# endif
++
++# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
++ || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
++ || defined(__ARM_ARCH_7EM__) || defined(_ARM_ARCH_7)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 7
++# endif
++
++# undef SK_ARM_HAS_EDSP
++# if defined(__thumb2__) && (SK_ARM_ARCH >= 6) \
++ || !defined(__thumb__) \
++ && ((SK_ARM_ARCH > 5) || defined(__ARM_ARCH_5E__) \
++ || defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__))
++# define SK_ARM_HAS_EDSP 1
++# endif
++
++#endif
+diff --git a/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp b/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp
+--- a/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp
++++ b/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp
+@@ -6,17 +6,17 @@
+ * found in the LICENSE file.
+ */
+
+
+ #include "SkBitmapProcState.h"
+ #include "SkColorPriv.h"
+ #include "SkUtils.h"
+
+-#if __ARM_ARCH__ >= 6 && !defined(SK_CPU_BENDIAN)
++#if SK_ARM_ARCH >= 6 && !defined(SK_CPU_BENDIAN)
+ void SI8_D16_nofilter_DX_arm(
+ const SkBitmapProcState& s,
+ const uint32_t* SK_RESTRICT xy,
+ int count,
+ uint16_t* SK_RESTRICT colors) __attribute__((optimize("O1")));
+
+ void SI8_D16_nofilter_DX_arm(const SkBitmapProcState& s,
+ const uint32_t* SK_RESTRICT xy,
+@@ -177,17 +177,17 @@ void SI8_opaque_D32_nofilter_DX_arm(cons
+ : [xx] "+r" (xx), [count] "+r" (count), [colors] "+r" (colors)
+ : [table] "r" (table), [srcAddr] "r" (srcAddr)
+ : "memory", "cc", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11"
+ );
+ }
+
+ s.fBitmap->getColorTable()->unlockColors(false);
+ }
+-#endif //__ARM_ARCH__ >= 6 && !defined(SK_CPU_BENDIAN)
++#endif // SK_ARM_ARCH >= 6 && !defined(SK_CPU_BENDIAN)
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ /* If we replace a sampleproc, then we null-out the associated shaderproc,
+ otherwise the shader won't even look at the matrix/sampler
+ */
+ void SkBitmapProcState::platformProcs() {
+ bool doFilter = fDoFilter;
+@@ -195,17 +195,17 @@ void SkBitmapProcState::platformProcs()
+ bool justDx = false;
+
+ if (fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask)) {
+ justDx = true;
+ }
+
+ switch (fBitmap->config()) {
+ case SkBitmap::kIndex8_Config:
+-#if __ARM_ARCH__ >= 6 && !defined(SK_CPU_BENDIAN)
++#if SK_ARM_ARCH >= 6 && !defined(SK_CPU_BENDIAN)
+ if (justDx && !doFilter) {
+ #if 0 /* crashing on android device */
+ fSampleProc16 = SI8_D16_nofilter_DX_arm;
+ fShaderProc16 = NULL;
+ #endif
+ if (isOpaque) {
+ // this one is only very slighty faster than the C version
+ fSampleProc32 = SI8_opaque_D32_nofilter_DX_arm;
+diff --git a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+--- a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
++++ b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+@@ -669,18 +669,23 @@ static void __attribute((noinline,optimi
+ /* Double Loop */
+ "1: \n\t" /* <double loop> */
+ "ldm %[src]!, {r5, r6} \n\t" /* loading src pointers into r5 and r6 */
+ "ldm %[dst], {r7, r8} \n\t" /* loading dst pointers into r7 and r8 */
+
+ /* dst1_scale and dst2_scale*/
+ "lsr r9, r5, #24 \n\t" /* src >> 24 */
+ "lsr r10, r6, #24 \n\t" /* src >> 24 */
++#ifdef SK_ARM_HAS_EDSP
+ "smulbb r9, r9, %[alpha] \n\t" /* r9 = SkMulS16 r9 with src_scale */
+ "smulbb r10, r10, %[alpha] \n\t" /* r10 = SkMulS16 r10 with src_scale */
++#else
++ "mul r9, r9, %[alpha] \n\t" /* r9 = SkMulS16 r9 with src_scale */
++ "mul r10, r10, %[alpha] \n\t" /* r10 = SkMulS16 r10 with src_scale */
++#endif
+ "lsr r9, r9, #8 \n\t" /* r9 >> 8 */
+ "lsr r10, r10, #8 \n\t" /* r10 >> 8 */
+ "rsb r9, r9, #256 \n\t" /* dst1_scale = r9 = 255 - r9 + 1 */
+ "rsb r10, r10, #256 \n\t" /* dst2_scale = r10 = 255 - r10 + 1 */
+
+ /* ---------------------- */
+
+ /* src1, src1_scale */
+@@ -739,17 +744,21 @@ static void __attribute((noinline,optimi
+ /* else get into the single loop */
+ /* Single Loop */
+ "2: \n\t" /* <single loop> */
+ "ldr r5, [%[src]], #4 \n\t" /* loading src pointer into r5: r5=src */
+ "ldr r7, [%[dst]] \n\t" /* loading dst pointer into r7: r7=dst */
+
+ "lsr r6, r5, #24 \n\t" /* src >> 24 */
+ "and r8, r12, r5, lsr #8 \n\t" /* ag = r8 = r5 masked by r12 lsr by #8 */
++#ifdef SK_ARM_HAS_EDSP
+ "smulbb r6, r6, %[alpha] \n\t" /* r6 = SkMulS16 with src_scale */
++#else
++ "mul r6, r6, %[alpha] \n\t" /* r6 = SkMulS16 with src_scale */
++#endif
+ "and r9, r12, r5 \n\t" /* rb = r9 = r5 masked by r12 */
+ "lsr r6, r6, #8 \n\t" /* r6 >> 8 */
+ "mul r8, r8, %[alpha] \n\t" /* ag = r8 times scale */
+ "rsb r6, r6, #256 \n\t" /* r6 = 255 - r6 + 1 */
+
+ /* src, src_scale */
+ "mul r9, r9, %[alpha] \n\t" /* rb = r9 times scale */
+ "and r8, r8, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */
diff --git a/gfx/skia/patches/archive/arm-opts.patch b/gfx/skia/patches/archive/arm-opts.patch
new file mode 100644
index 000000000..02ad85c9a
--- /dev/null
+++ b/gfx/skia/patches/archive/arm-opts.patch
@@ -0,0 +1,41 @@
+diff --git a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+--- a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
++++ b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+@@ -549,17 +549,17 @@ static void S32A_Opaque_BlitRow32_neon(S
+ #define S32A_Opaque_BlitRow32_PROC S32A_Opaque_BlitRow32_neon
+
+ #else
+
+ #ifdef TEST_SRC_ALPHA
+ #error The ARM asm version of S32A_Opaque_BlitRow32 does not support TEST_SRC_ALPHA
+ #endif
+
+-static void S32A_Opaque_BlitRow32_arm(SkPMColor* SK_RESTRICT dst,
++static void __attribute((noinline,optimize("-fomit-frame-pointer"))) S32A_Opaque_BlitRow32_arm(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha) {
+
+ SkASSERT(255 == alpha);
+
+ /* Does not support the TEST_SRC_ALPHA case */
+ asm volatile (
+ "cmp %[count], #0 \n\t" /* comparing count with 0 */
+@@ -646,17 +646,17 @@ static void S32A_Opaque_BlitRow32_arm(Sk
+ );
+ }
+ #define S32A_Opaque_BlitRow32_PROC S32A_Opaque_BlitRow32_arm
+ #endif
+
+ /*
+ * ARM asm version of S32A_Blend_BlitRow32
+ */
+-static void S32A_Blend_BlitRow32_arm(SkPMColor* SK_RESTRICT dst,
++static void __attribute((noinline,optimize("-fomit-frame-pointer"))) S32A_Blend_BlitRow32_arm(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha) {
+ asm volatile (
+ "cmp %[count], #0 \n\t" /* comparing count with 0 */
+ "beq 3f \n\t" /* if zero exit */
+
+ "mov r12, #0xff \n\t" /* load the 0xff mask in r12 */
+ "orr r12, r12, r12, lsl #16 \n\t" /* convert it to 0xff00ff in r12 */
diff --git a/gfx/skia/patches/archive/fix-comma-end-enum-list.patch b/gfx/skia/patches/archive/fix-comma-end-enum-list.patch
new file mode 100644
index 000000000..dea36377e
--- /dev/null
+++ b/gfx/skia/patches/archive/fix-comma-end-enum-list.patch
@@ -0,0 +1,380 @@
+diff --git a/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h b/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h
+--- a/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h
++++ b/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h
+@@ -29,17 +29,17 @@ public:
+ SkString fFontName;
+
+ enum FontType {
+ kType1_Font,
+ kType1CID_Font,
+ kCFF_Font,
+ kTrueType_Font,
+ kOther_Font,
+- kNotEmbeddable_Font,
++ kNotEmbeddable_Font
+ };
+ // The type of the underlying font program. This field determines which
+ // of the following fields are valid. If it is kOther_Font or
+ // kNotEmbeddable_Font, the per glyph information will never be populated.
+ FontType fType;
+
+ // fMultiMaster may be true for Type1_Font or CFF_Font.
+ bool fMultiMaster;
+@@ -51,17 +51,17 @@ public:
+ kFixedPitch_Style = 0x00001,
+ kSerif_Style = 0x00002,
+ kSymbolic_Style = 0x00004,
+ kScript_Style = 0x00008,
+ kNonsymbolic_Style = 0x00020,
+ kItalic_Style = 0x00040,
+ kAllCaps_Style = 0x10000,
+ kSmallCaps_Style = 0x20000,
+- kForceBold_Style = 0x40000,
++ kForceBold_Style = 0x40000
+ };
+ uint16_t fStyle; // Font style characteristics.
+ int16_t fItalicAngle; // Counterclockwise degrees from vertical of the
+ // dominant vertical stroke for an Italic face.
+ // The following fields are all in font units.
+ int16_t fAscent; // Max height above baseline, not including accents.
+ int16_t fDescent; // Max depth below baseline (negative).
+ int16_t fStemV; // Thickness of dominant vertical stem.
+@@ -70,26 +70,26 @@ public:
+ SkIRect fBBox; // The bounding box of all glyphs (in font units).
+
+ // The type of advance data wanted.
+ enum PerGlyphInfo {
+ kNo_PerGlyphInfo = 0x0, // Don't populate any per glyph info.
+ kHAdvance_PerGlyphInfo = 0x1, // Populate horizontal advance data.
+ kVAdvance_PerGlyphInfo = 0x2, // Populate vertical advance data.
+ kGlyphNames_PerGlyphInfo = 0x4, // Populate glyph names (Type 1 only).
+- kToUnicode_PerGlyphInfo = 0x8, // Populate ToUnicode table, ignored
++ kToUnicode_PerGlyphInfo = 0x8 // Populate ToUnicode table, ignored
+ // for Type 1 fonts
+ };
+
+ template <typename Data>
+ struct AdvanceMetric {
+ enum MetricType {
+ kDefault, // Default advance: fAdvance.count = 1
+ kRange, // Advances for a range: fAdvance.count = fEndID-fStartID
+- kRun, // fStartID-fEndID have same advance: fAdvance.count = 1
++ kRun // fStartID-fEndID have same advance: fAdvance.count = 1
+ };
+ MetricType fType;
+ uint16_t fStartId;
+ uint16_t fEndId;
+ SkTDArray<Data> fAdvance;
+ SkTScopedPtr<AdvanceMetric<Data> > fNext;
+ };
+
+diff --git a/gfx/skia/include/core/SkBlitRow.h b/gfx/skia/include/core/SkBlitRow.h
+--- a/gfx/skia/include/core/SkBlitRow.h
++++ b/gfx/skia/include/core/SkBlitRow.h
+@@ -44,17 +44,17 @@ public:
+
+ //! Public entry-point to return a blit function ptr
+ static Proc Factory(unsigned flags, SkBitmap::Config);
+
+ ///////////// D32 version
+
+ enum Flags32 {
+ kGlobalAlpha_Flag32 = 1 << 0,
+- kSrcPixelAlpha_Flag32 = 1 << 1,
++ kSrcPixelAlpha_Flag32 = 1 << 1
+ };
+
+ /** Function pointer that blends 32bit colors onto a 32bit destination.
+ @param dst array of dst 32bit colors
+ @param src array of src 32bit colors (w/ or w/o alpha)
+ @param count number of colors to blend
+ @param alpha global alpha to be applied to all src colors
+ */
+diff --git a/gfx/skia/include/core/SkCanvas.h b/gfx/skia/include/core/SkCanvas.h
+--- a/gfx/skia/include/core/SkCanvas.h
++++ b/gfx/skia/include/core/SkCanvas.h
+@@ -132,17 +132,17 @@ public:
+ * low byte to high byte: B, G, R, A.
+ */
+ kBGRA_Premul_Config8888,
+ kBGRA_Unpremul_Config8888,
+ /**
+ * low byte to high byte: R, G, B, A.
+ */
+ kRGBA_Premul_Config8888,
+- kRGBA_Unpremul_Config8888,
++ kRGBA_Unpremul_Config8888
+ };
+
+ /**
+ * On success (returns true), copy the canvas pixels into the bitmap.
+ * On failure, the bitmap parameter is left unchanged and false is
+ * returned.
+ *
+ * The canvas' pixels are converted to the bitmap's config. The only
+diff --git a/gfx/skia/include/core/SkDevice.h b/gfx/skia/include/core/SkDevice.h
+--- a/gfx/skia/include/core/SkDevice.h
++++ b/gfx/skia/include/core/SkDevice.h
+@@ -134,17 +134,17 @@ public:
+ * Return the device's origin: its offset in device coordinates from
+ * the default origin in its canvas' matrix/clip
+ */
+ const SkIPoint& getOrigin() const { return fOrigin; }
+
+ protected:
+ enum Usage {
+ kGeneral_Usage,
+- kSaveLayer_Usage, // <! internal use only
++ kSaveLayer_Usage // <! internal use only
+ };
+
+ struct TextFlags {
+ uint32_t fFlags; // SkPaint::getFlags()
+ SkPaint::Hinting fHinting;
+ };
+
+ /**
+diff --git a/gfx/skia/include/core/SkFlattenable.h b/gfx/skia/include/core/SkFlattenable.h
+--- a/gfx/skia/include/core/SkFlattenable.h
++++ b/gfx/skia/include/core/SkFlattenable.h
+@@ -216,17 +216,17 @@ public:
+ SkFactorySet* setFactoryRecorder(SkFactorySet*);
+
+ enum Flags {
+ kCrossProcess_Flag = 0x01,
+ /**
+ * Instructs the writer to inline Factory names as there are seen the
+ * first time (after that we store an index). The pipe code uses this.
+ */
+- kInlineFactoryNames_Flag = 0x02,
++ kInlineFactoryNames_Flag = 0x02
+ };
+ Flags getFlags() const { return (Flags)fFlags; }
+ void setFlags(Flags flags) { fFlags = flags; }
+
+ bool isCrossProcess() const {
+ return SkToBool(fFlags & kCrossProcess_Flag);
+ }
+ bool inlineFactoryNames() const {
+diff --git a/gfx/skia/include/core/SkFontHost.h b/gfx/skia/include/core/SkFontHost.h
+--- a/gfx/skia/include/core/SkFontHost.h
++++ b/gfx/skia/include/core/SkFontHost.h
+@@ -245,17 +245,17 @@ public:
+ vertically. When rendering subpixel glyphs we need to know which way
+ round they are.
+
+ Note, if you change this after startup, you'll need to flush the glyph
+ cache because it'll have the wrong type of masks cached.
+ */
+ enum LCDOrientation {
+ kHorizontal_LCDOrientation = 0, //!< this is the default
+- kVertical_LCDOrientation = 1,
++ kVertical_LCDOrientation = 1
+ };
+
+ static void SetSubpixelOrientation(LCDOrientation orientation);
+ static LCDOrientation GetSubpixelOrientation();
+
+ /** LCD color elements can vary in order. For subpixel text we need to know
+ the order which the LCDs uses so that the color fringes are in the
+ correct place.
+@@ -264,17 +264,17 @@ public:
+ cache because it'll have the wrong type of masks cached.
+
+ kNONE_LCDOrder means that the subpixel elements are not spatially
+ separated in any usable fashion.
+ */
+ enum LCDOrder {
+ kRGB_LCDOrder = 0, //!< this is the default
+ kBGR_LCDOrder = 1,
+- kNONE_LCDOrder = 2,
++ kNONE_LCDOrder = 2
+ };
+
+ static void SetSubpixelOrder(LCDOrder order);
+ static LCDOrder GetSubpixelOrder();
+
+ #ifdef SK_BUILD_FOR_ANDROID
+ ///////////////////////////////////////////////////////////////////////////
+
+diff --git a/gfx/skia/include/core/SkMaskFilter.h b/gfx/skia/include/core/SkMaskFilter.h
+--- a/gfx/skia/include/core/SkMaskFilter.h
++++ b/gfx/skia/include/core/SkMaskFilter.h
+@@ -57,17 +57,17 @@ public:
+
+ virtual void flatten(SkFlattenableWriteBuffer& ) {}
+
+ enum BlurType {
+ kNone_BlurType, //!< this maskfilter is not a blur
+ kNormal_BlurType, //!< fuzzy inside and outside
+ kSolid_BlurType, //!< solid inside, fuzzy outside
+ kOuter_BlurType, //!< nothing inside, fuzzy outside
+- kInner_BlurType, //!< fuzzy inside, nothing outside
++ kInner_BlurType //!< fuzzy inside, nothing outside
+ };
+
+ struct BlurInfo {
+ SkScalar fRadius;
+ bool fIgnoreTransform;
+ bool fHighQuality;
+ };
+
+diff --git a/gfx/skia/include/core/SkPaint.h b/gfx/skia/include/core/SkPaint.h
+--- a/gfx/skia/include/core/SkPaint.h
++++ b/gfx/skia/include/core/SkPaint.h
+@@ -70,17 +70,17 @@ public:
+ kFull_Hinting -> <same as kNormalHinting, unless we are rendering
+ subpixel glyphs, in which case TARGET_LCD or
+ TARGET_LCD_V is used>
+ */
+ enum Hinting {
+ kNo_Hinting = 0,
+ kSlight_Hinting = 1,
+ kNormal_Hinting = 2, //!< this is the default
+- kFull_Hinting = 3,
++ kFull_Hinting = 3
+ };
+
+ Hinting getHinting() const {
+ return static_cast<Hinting>(fHinting);
+ }
+
+ void setHinting(Hinting hintingLevel);
+
+@@ -282,17 +282,17 @@ public:
+ results may not appear the same as if it was drawn twice, filled and
+ then stroked.
+ */
+ enum Style {
+ kFill_Style, //!< fill the geometry
+ kStroke_Style, //!< stroke the geometry
+ kStrokeAndFill_Style, //!< fill and stroke the geometry
+
+- kStyleCount,
++ kStyleCount
+ };
+
+ /** Return the paint's style, used for controlling how primitives'
+ geometries are interpreted (except for drawBitmap, which always assumes
+ kFill_Style).
+ @return the paint's Style
+ */
+ Style getStyle() const { return (Style)fStyle; }
+diff --git a/gfx/skia/include/core/SkScalerContext.h b/gfx/skia/include/core/SkScalerContext.h
+--- a/gfx/skia/include/core/SkScalerContext.h
++++ b/gfx/skia/include/core/SkScalerContext.h
+@@ -172,24 +172,24 @@ public:
+ kHintingBit2_Flag = 0x0100,
+
+ // these should only ever be set if fMaskFormat is LCD16 or LCD32
+ kLCD_Vertical_Flag = 0x0200, // else Horizontal
+ kLCD_BGROrder_Flag = 0x0400, // else RGB order
+
+ // luminance : 0 for black text, kLuminance_Max for white text
+ kLuminance_Shift = 11, // to shift into the other flags above
+- kLuminance_Bits = 3, // ensure Flags doesn't exceed 16bits
++ kLuminance_Bits = 3 // ensure Flags doesn't exceed 16bits
+ };
+
+ // computed values
+ enum {
+ kHinting_Mask = kHintingBit1_Flag | kHintingBit2_Flag,
+ kLuminance_Max = (1 << kLuminance_Bits) - 1,
+- kLuminance_Mask = kLuminance_Max << kLuminance_Shift,
++ kLuminance_Mask = kLuminance_Max << kLuminance_Shift
+ };
+
+ struct Rec {
+ uint32_t fOrigFontID;
+ uint32_t fFontID;
+ SkScalar fTextSize, fPreScaleX, fPreSkewX;
+ SkScalar fPost2x2[2][2];
+ SkScalar fFrameWidth, fMiterLimit;
+diff --git a/gfx/skia/include/core/SkTypes.h b/gfx/skia/include/core/SkTypes.h
+--- a/gfx/skia/include/core/SkTypes.h
++++ b/gfx/skia/include/core/SkTypes.h
+@@ -433,17 +433,17 @@ public:
+ */
+ kAlloc_OnShrink,
+
+ /**
+ * If the requested size is smaller than the current size, and the
+ * current block is dynamically allocated, just return the old
+ * block.
+ */
+- kReuse_OnShrink,
++ kReuse_OnShrink
+ };
+
+ /**
+ * Reallocates the block to a new size. The ptr may or may not change.
+ */
+ void* reset(size_t size, OnShrink shrink = kAlloc_OnShrink) {
+ if (size == fSize || (kReuse_OnShrink == shrink && size < fSize)) {
+ return fPtr;
+diff --git a/gfx/skia/include/effects/SkLayerDrawLooper.h b/gfx/skia/include/effects/SkLayerDrawLooper.h
+--- a/gfx/skia/include/effects/SkLayerDrawLooper.h
++++ b/gfx/skia/include/effects/SkLayerDrawLooper.h
+@@ -36,17 +36,17 @@ public:
+
+ /**
+ * Use the layer's paint entirely, with these exceptions:
+ * - We never override the draw's paint's text_encoding, since that is
+ * used to interpret the text/len parameters in draw[Pos]Text.
+ * - Flags and Color are always computed using the LayerInfo's
+ * fFlagsMask and fColorMode.
+ */
+- kEntirePaint_Bits = -1,
++ kEntirePaint_Bits = -1
+
+ };
+ typedef int32_t BitFlags;
+
+ /**
+ * Info for how to apply the layer's paint and offset.
+ *
+ * fFlagsMask selects which flags in the layer's paint should be applied.
+diff --git a/gfx/skia/src/core/SkBitmap.cpp b/gfx/skia/src/core/SkBitmap.cpp
+--- a/gfx/skia/src/core/SkBitmap.cpp
++++ b/gfx/skia/src/core/SkBitmap.cpp
+@@ -1357,17 +1357,17 @@ bool SkBitmap::extractAlpha(SkBitmap* ds
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ enum {
+ SERIALIZE_PIXELTYPE_NONE,
+ SERIALIZE_PIXELTYPE_RAW_WITH_CTABLE,
+ SERIALIZE_PIXELTYPE_RAW_NO_CTABLE,
+ SERIALIZE_PIXELTYPE_REF_DATA,
+- SERIALIZE_PIXELTYPE_REF_PTR,
++ SERIALIZE_PIXELTYPE_REF_PTR
+ };
+
+ static void writeString(SkFlattenableWriteBuffer& buffer, const char str[]) {
+ size_t len = strlen(str);
+ buffer.write32(len);
+ buffer.writePad(str, len);
+ }
+
+diff --git a/gfx/skia/src/core/SkMatrix.cpp b/gfx/skia/src/core/SkMatrix.cpp
+--- a/gfx/skia/src/core/SkMatrix.cpp
++++ b/gfx/skia/src/core/SkMatrix.cpp
+@@ -1715,17 +1715,17 @@ SkScalar SkMatrix::getMaxStretch() const
+ const SkMatrix& SkMatrix::I() {
+ static SkMatrix gIdentity;
+ static bool gOnce;
+ if (!gOnce) {
+ gIdentity.reset();
+ gOnce = true;
+ }
+ return gIdentity;
+-};
++}
+
+ const SkMatrix& SkMatrix::InvalidMatrix() {
+ static SkMatrix gInvalid;
+ static bool gOnce;
+ if (!gOnce) {
+ gInvalid.setAll(SK_ScalarMax, SK_ScalarMax, SK_ScalarMax,
+ SK_ScalarMax, SK_ScalarMax, SK_ScalarMax,
+ SK_ScalarMax, SK_ScalarMax, SK_ScalarMax);
diff --git a/gfx/skia/patches/archive/fix-gradient-clamp.patch b/gfx/skia/patches/archive/fix-gradient-clamp.patch
new file mode 100644
index 000000000..91481c2c1
--- /dev/null
+++ b/gfx/skia/patches/archive/fix-gradient-clamp.patch
@@ -0,0 +1,211 @@
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -167,16 +167,17 @@ private:
+
+ mutable uint16_t* fCache16; // working ptr. If this is NULL, we need to recompute the cache values
+ mutable SkPMColor* fCache32; // working ptr. If this is NULL, we need to recompute the cache values
+
+ mutable uint16_t* fCache16Storage; // storage for fCache16, allocated on demand
+ mutable SkMallocPixelRef* fCache32PixelRef;
+ mutable unsigned fCacheAlpha; // the alpha value we used when we computed the cache. larger than 8bits so we can store uninitialized value
+
++ static SkPMColor PremultiplyColor(SkColor c0, U8CPU alpha);
+ static void Build16bitCache(uint16_t[], SkColor c0, SkColor c1, int count);
+ static void Build32bitCache(SkPMColor[], SkColor c0, SkColor c1, int count,
+ U8CPU alpha);
+ void setCacheAlpha(U8CPU alpha) const;
+ void initCommon();
+
+ typedef SkShader INHERITED;
+ };
+@@ -512,16 +513,31 @@ static inline U8CPU dither_fixed_to_8(Sk
+ * For dithering with premultiply, we want to ceiling the alpha component,
+ * to ensure that it is always >= any color component.
+ */
+ static inline U8CPU dither_ceil_fixed_to_8(SkFixed n) {
+ n >>= 8;
+ return ((n << 1) - (n | (n >> 8))) >> 8;
+ }
+
++SkPMColor Gradient_Shader::PremultiplyColor(SkColor c0, U8CPU paintAlpha)
++{
++ SkFixed a = SkMulDiv255Round(SkColorGetA(c0), paintAlpha);
++ SkFixed r = SkColorGetR(c0);
++ SkFixed g = SkColorGetG(c0);
++ SkFixed b = SkColorGetB(c0);
++
++ a = SkIntToFixed(a) + 0x8000;
++ r = SkIntToFixed(r) + 0x8000;
++ g = SkIntToFixed(g) + 0x8000;
++ b = SkIntToFixed(b) + 0x8000;
++
++ return SkPremultiplyARGBInline(a >> 16, r >> 16, g >> 16, b >> 16);
++}
++
+ void Gradient_Shader::Build32bitCache(SkPMColor cache[], SkColor c0, SkColor c1,
+ int count, U8CPU paintAlpha) {
+ SkASSERT(count > 1);
+
+ // need to apply paintAlpha to our two endpoints
+ SkFixed a = SkMulDiv255Round(SkColorGetA(c0), paintAlpha);
+ SkFixed da;
+ {
+@@ -613,24 +629,24 @@ const uint16_t* Gradient_Shader::getCach
+ }
+ }
+ return fCache16;
+ }
+
+ const SkPMColor* Gradient_Shader::getCache32() const {
+ if (fCache32 == NULL) {
+ // double the count for dither entries
+- const int entryCount = kCache32Count * 2;
++ const int entryCount = kCache32Count * 2 + 2;
+ const size_t allocSize = sizeof(SkPMColor) * entryCount;
+
+ if (NULL == fCache32PixelRef) {
+ fCache32PixelRef = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ }
+- fCache32 = (SkPMColor*)fCache32PixelRef->getAddr();
++ fCache32 = (SkPMColor*)fCache32PixelRef->getAddr() + 1;
+ if (fColorCount == 2) {
+ Build32bitCache(fCache32, fOrigColors[0], fOrigColors[1],
+ kCache32Count, fCacheAlpha);
+ } else {
+ Rec* rec = fRecs;
+ int prevIndex = 0;
+ for (int i = 1; i < fColorCount; i++) {
+ int nextIndex = SkFixedToFFFF(rec[i].fPos) >> (16 - kCache32Bits);
+@@ -644,28 +660,31 @@ const SkPMColor* Gradient_Shader::getCac
+ }
+ SkASSERT(prevIndex == kCache32Count - 1);
+ }
+
+ if (fMapper) {
+ SkMallocPixelRef* newPR = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ SkPMColor* linear = fCache32; // just computed linear data
+- SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapped data
++ SkPMColor* mapped = (SkPMColor*)newPR->getAddr() + 1; // storage for mapped data
+ SkUnitMapper* map = fMapper;
+ for (int i = 0; i < kCache32Count; i++) {
+ int index = map->mapUnit16((i << 8) | i) >> 8;
+ mapped[i] = linear[index];
+ mapped[i + kCache32Count] = linear[index + kCache32Count];
+ }
+ fCache32PixelRef->unref();
+ fCache32PixelRef = newPR;
+- fCache32 = (SkPMColor*)newPR->getAddr();
++ fCache32 = (SkPMColor*)newPR->getAddr() + 1;
+ }
+ }
++ //Write the clamp colours into the first and last entries of fCache32
++ fCache32[-1] = PremultiplyColor(fOrigColors[0], fCacheAlpha);
++ fCache32[kCache32Count * 2] = PremultiplyColor(fOrigColors[fColorCount - 1], fCacheAlpha);
+ return fCache32;
+ }
+
+ /*
+ * Because our caller might rebuild the same (logically the same) gradient
+ * over and over, we'd like to return exactly the same "bitmap" if possible,
+ * allowing the client to utilize a cache of our bitmap (e.g. with a GPU).
+ * To do that, we maintain a private cache of built-bitmaps, based on our
+@@ -875,28 +894,38 @@ void Linear_Gradient::shadeSpan(int x, i
+ dx = dxStorage[0];
+ } else {
+ SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
+ dx = SkScalarToFixed(fDstToIndex.getScaleX());
+ }
+
+ if (SkFixedNearlyZero(dx)) {
+ // we're a vertical gradient, so no change in a span
+- unsigned fi = proc(fx) >> (16 - kCache32Bits);
+- sk_memset32_dither(dstC, cache[toggle + fi],
+- cache[(toggle ^ TOGGLE_MASK) + fi], count);
++ if (proc == clamp_tileproc) {
++ if (fx < 0) {
++ sk_memset32(dstC, cache[-1], count);
++ } else if (fx > 0xFFFF) {
++ sk_memset32(dstC, cache[kCache32Count * 2], count);
++ } else {
++ unsigned fi = proc(fx) >> (16 - kCache32Bits);
++ sk_memset32_dither(dstC, cache[toggle + fi],
++ cache[(toggle ^ TOGGLE_MASK) + fi], count);
++ }
++ } else {
++ unsigned fi = proc(fx) >> (16 - kCache32Bits);
++ sk_memset32_dither(dstC, cache[toggle + fi],
++ cache[(toggle ^ TOGGLE_MASK) + fi], count);
++ }
+ } else if (proc == clamp_tileproc) {
+ SkClampRange range;
+- range.init(fx, dx, count, 0, 0xFF);
++ range.init(fx, dx, count, cache[-1], cache[kCache32Count * 2]);
+
+ if ((count = range.fCount0) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV0],
+- cache[(toggle ^ TOGGLE_MASK) + range.fV0],
+- count);
++ // Do we really want to dither the clamp values?
++ sk_memset32(dstC, range.fV0, count);
+ dstC += count;
+ }
+ if ((count = range.fCount1) > 0) {
+ int unroll = count >> 3;
+ fx = range.fFx1;
+ for (int i = 0; i < unroll; i++) {
+ NO_CHECK_ITER; NO_CHECK_ITER;
+ NO_CHECK_ITER; NO_CHECK_ITER;
+@@ -905,20 +934,17 @@ void Linear_Gradient::shadeSpan(int x, i
+ }
+ if ((count &= 7) > 0) {
+ do {
+ NO_CHECK_ITER;
+ } while (--count != 0);
+ }
+ }
+ if ((count = range.fCount2) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV1],
+- cache[(toggle ^ TOGGLE_MASK) + range.fV1],
+- count);
++ sk_memset32(dstC, range.fV1, count);
+ }
+ } else if (proc == mirror_tileproc) {
+ do {
+ unsigned fi = mirror_8bits(fx >> 8);
+ SkASSERT(fi <= 0xFF);
+ fx += dx;
+ *dstC++ = cache[toggle + fi];
+ toggle ^= TOGGLE_MASK;
+@@ -1670,19 +1699,24 @@ public:
+ }
+ SkScalar b = (SkScalarMul(fDiff.fX, fx) +
+ SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
+ SkScalar db = (SkScalarMul(fDiff.fX, dx) +
+ SkScalarMul(fDiff.fY, dy)) * 2;
+ if (proc == clamp_tileproc) {
+ for (; count > 0; --count) {
+ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
+- SkFixed index = SkClampMax(t, 0xFFFF);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> (16 - kCache32Bits)];
++ if (t < 0) {
++ *dstC++ = cache[-1];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[kCache32Count * 2];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> (16 - kCache32Bits)];
++ }
+ fx += dx;
+ fy += dy;
+ b += db;
+ }
+ } else if (proc == mirror_tileproc) {
+ for (; count > 0; --count) {
+ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
+ SkFixed index = mirror_tileproc(t);
diff --git a/gfx/skia/patches/archive/getpostextpath.patch b/gfx/skia/patches/archive/getpostextpath.patch
new file mode 100644
index 000000000..7181411ec
--- /dev/null
+++ b/gfx/skia/patches/archive/getpostextpath.patch
@@ -0,0 +1,70 @@
+diff --git a/gfx/skia/include/core/SkPaint.h b/gfx/skia/include/core/SkPaint.h
+--- a/gfx/skia/include/core/SkPaint.h
++++ b/gfx/skia/include/core/SkPaint.h
+@@ -836,16 +836,19 @@ public:
+
+ /** Return the path (outline) for the specified text.
+ Note: just like SkCanvas::drawText, this will respect the Align setting
+ in the paint.
+ */
+ void getTextPath(const void* text, size_t length, SkScalar x, SkScalar y,
+ SkPath* path) const;
+
++ void getPosTextPath(const void* text, size_t length,
++ const SkPoint pos[], SkPath* path) const;
++
+ #ifdef SK_BUILD_FOR_ANDROID
+ const SkGlyph& getUnicharMetrics(SkUnichar);
+ const void* findImage(const SkGlyph&);
+
+ uint32_t getGenerationID() const;
+ #endif
+
+ // returns true if the paint's settings (e.g. xfermode + alpha) resolve to
+diff --git a/gfx/skia/src/core/SkPaint.cpp b/gfx/skia/src/core/SkPaint.cpp
+--- a/gfx/skia/src/core/SkPaint.cpp
++++ b/gfx/skia/src/core/SkPaint.cpp
+@@ -1242,16 +1242,43 @@ void SkPaint::getTextPath(const void* te
+ const SkPath* iterPath;
+ while ((iterPath = iter.next(&xpos)) != NULL) {
+ matrix.postTranslate(xpos - prevXPos, 0);
+ path->addPath(*iterPath, matrix);
+ prevXPos = xpos;
+ }
+ }
+
++void SkPaint::getPosTextPath(const void* textData, size_t length,
++ const SkPoint pos[], SkPath* path) const {
++ SkASSERT(length == 0 || textData != NULL);
++
++ const char* text = (const char*)textData;
++ if (text == NULL || length == 0 || path == NULL) {
++ return;
++ }
++
++ SkTextToPathIter iter(text, length, *this, false, true);
++ SkMatrix matrix;
++ SkPoint prevPos;
++ prevPos.set(0, 0);
++
++ matrix.setScale(iter.getPathScale(), iter.getPathScale());
++ path->reset();
++
++ unsigned int i = 0;
++ const SkPath* iterPath;
++ while ((iterPath = iter.next(NULL)) != NULL) {
++ matrix.postTranslate(pos[i].fX - prevPos.fX, pos[i].fY - prevPos.fY);
++ path->addPath(*iterPath, matrix);
++ prevPos = pos[i];
++ i++;
++ }
++}
++
+ static void add_flattenable(SkDescriptor* desc, uint32_t tag,
+ SkFlattenableWriteBuffer* buffer) {
+ buffer->flatten(desc->addEntry(tag, buffer->size(), NULL));
+ }
+
+ // SkFontHost can override this choice in FilterRec()
+ static SkMask::Format computeMaskFormat(const SkPaint& paint) {
+ uint32_t flags = paint.getFlags();
diff --git a/gfx/skia/patches/archive/mingw-fix.patch b/gfx/skia/patches/archive/mingw-fix.patch
new file mode 100644
index 000000000..d91a16aa7
--- /dev/null
+++ b/gfx/skia/patches/archive/mingw-fix.patch
@@ -0,0 +1,57 @@
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+index 0135b85..bb108f8 100644
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -253,7 +253,7 @@
+ //////////////////////////////////////////////////////////////////////
+
+ #ifndef SK_OVERRIDE
+-#if defined(SK_BUILD_FOR_WIN)
++#if defined(_MSC_VER)
+ #define SK_OVERRIDE override
+ #elif defined(__clang__)
+ // Some documentation suggests we should be using __attribute__((override)),
+diff --git a/gfx/skia/src/ports/SkFontHost_win.cpp b/gfx/skia/src/ports/SkFontHost_win.cpp
+index dd9c5dc..ca2c3dc 100644
+--- a/gfx/skia/src/ports/SkFontHost_win.cpp
++++ b/gfx/skia/src/ports/SkFontHost_win.cpp
+@@ -22,7 +22,7 @@
+ #ifdef WIN32
+ #include "windows.h"
+ #include "tchar.h"
+-#include "Usp10.h"
++#include "usp10.h"
+
+ // always packed xxRRGGBB
+ typedef uint32_t SkGdiRGB;
+@@ -1033,6 +1033,10 @@ SkAdvancedTypefaceMetrics* SkFontHost::GetAdvancedTypefaceMetrics(
+ HFONT savefont = (HFONT)SelectObject(hdc, font);
+ HFONT designFont = NULL;
+
++ const char stem_chars[] = {'i', 'I', '!', '1'};
++ int16_t min_width;
++ unsigned glyphCount;
++
+ // To request design units, create a logical font whose height is specified
+ // as unitsPerEm.
+ OUTLINETEXTMETRIC otm;
+@@ -1046,7 +1050,7 @@ SkAdvancedTypefaceMetrics* SkFontHost::GetAdvancedTypefaceMetrics(
+ if (!GetOutlineTextMetrics(hdc, sizeof(otm), &otm)) {
+ goto Error;
+ }
+- const unsigned glyphCount = calculateGlyphCount(hdc);
++ glyphCount = calculateGlyphCount(hdc);
+
+ info = new SkAdvancedTypefaceMetrics;
+ info->fEmSize = otm.otmEMSquare;
+@@ -1115,9 +1119,8 @@ SkAdvancedTypefaceMetrics* SkFontHost::GetAdvancedTypefaceMetrics(
+
+ // Figure out a good guess for StemV - Min width of i, I, !, 1.
+ // This probably isn't very good with an italic font.
+- int16_t min_width = SHRT_MAX;
++ min_width = SHRT_MAX;
+ info->fStemV = 0;
+- char stem_chars[] = {'i', 'I', '!', '1'};
+ for (size_t i = 0; i < SK_ARRAY_COUNT(stem_chars); i++) {
+ ABC abcWidths;
+ if (GetCharABCWidths(hdc, stem_chars[i], stem_chars[i], &abcWidths)) {
diff --git a/gfx/skia/patches/archive/new-aa.patch b/gfx/skia/patches/archive/new-aa.patch
new file mode 100644
index 000000000..d5e6fbf73
--- /dev/null
+++ b/gfx/skia/patches/archive/new-aa.patch
@@ -0,0 +1,22 @@
+diff --git a/gfx/skia/src/core/SkScan_AntiPath.cpp b/gfx/skia/src/core/SkScan_AntiPath.cpp
+--- a/gfx/skia/src/core/SkScan_AntiPath.cpp
++++ b/gfx/skia/src/core/SkScan_AntiPath.cpp
+@@ -31,17 +31,17 @@
+ - supersampled coordinates, scale equal to the output * SCALE
+
+ NEW_AA is a set of code-changes to try to make both paths produce identical
+ results. Its not quite there yet, though the remaining differences may be
+ in the subsequent blits, and not in the different masks/runs...
+ */
+ //#define FORCE_SUPERMASK
+ //#define FORCE_RLE
+-//#define SK_SUPPORT_NEW_AA
++#define SK_SUPPORT_NEW_AA
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ /// Base class for a single-pass supersampled blitter.
+ class BaseSuperBlitter : public SkBlitter {
+ public:
+ BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
+ const SkRegion& clip);
diff --git a/gfx/skia/patches/archive/old-android-fonthost.patch b/gfx/skia/patches/archive/old-android-fonthost.patch
new file mode 100644
index 000000000..1c64ace7d
--- /dev/null
+++ b/gfx/skia/patches/archive/old-android-fonthost.patch
@@ -0,0 +1,530 @@
+# HG changeset patch
+# Parent 9ee29e4aace683ddf6cf8ddb2893cd34fcfc772c
+# User James Willcox <jwillcox@mozilla.com>
+diff --git a/gfx/skia/Makefile.in b/gfx/skia/Makefile.in
+--- a/gfx/skia/Makefile.in
++++ b/gfx/skia/Makefile.in
+@@ -305,21 +305,20 @@ CPPSRCS += \
+ SkFontHost_mac_coretext.cpp \
+ SkTime_Unix.cpp \
+ $(NULL)
+ endif
+
+ ifeq (android,$(MOZ_WIDGET_TOOLKIT))
+ CPPSRCS += \
+ SkFontHost_FreeType.cpp \
+ SkFontHost_android.cpp \
+ SkFontHost_gamma.cpp \
+- FontHostConfiguration_android.cpp \
+ SkMMapStream.cpp \
+ SkTime_Unix.cpp \
+ $(NULL)
+
+ DEFINES += -DSK_BUILD_FOR_ANDROID_NDK
+ OS_CXXFLAGS += $(CAIRO_FT_CFLAGS)
+ endif
+
+ ifeq (gtk2,$(MOZ_WIDGET_TOOLKIT))
+ CPPSRCS += \
+diff --git a/gfx/skia/src/ports/SkFontHost_android.cpp b/gfx/skia/src/ports/SkFontHost_android.cpp
+--- a/gfx/skia/src/ports/SkFontHost_android.cpp
++++ b/gfx/skia/src/ports/SkFontHost_android.cpp
+@@ -1,38 +1,31 @@
++
+ /*
+-**
+-** Copyright 2006, The Android Open Source Project
+-**
+-** Licensed under the Apache License, Version 2.0 (the "License");
+-** you may not use this file except in compliance with the License.
+-** You may obtain a copy of the License at
+-**
+-** http://www.apache.org/licenses/LICENSE-2.0
+-**
+-** Unless required by applicable law or agreed to in writing, software
+-** distributed under the License is distributed on an "AS IS" BASIS,
+-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-** See the License for the specific language governing permissions and
+-** limitations under the License.
+-*/
++ * Copyright 2006 The Android Open Source Project
++ *
++ * Use of this source code is governed by a BSD-style license that can be
++ * found in the LICENSE file.
++ */
++
+
+ #include "SkFontHost.h"
+ #include "SkDescriptor.h"
+ #include "SkMMapStream.h"
+ #include "SkPaint.h"
+ #include "SkString.h"
+ #include "SkStream.h"
+ #include "SkThread.h"
+ #include "SkTSearch.h"
+-#include "FontHostConfiguration_android.h"
+ #include <stdio.h>
+
++#define FONT_CACHE_MEMORY_BUDGET (768 * 1024)
++
+ #ifndef SK_FONT_FILE_PREFIX
+ #define SK_FONT_FILE_PREFIX "/fonts/"
+ #endif
+
+ SkTypeface::Style find_name_and_attributes(SkStream* stream, SkString* name,
+ bool* isFixedWidth);
+
+ static void GetFullPathForSysFonts(SkString* full, const char name[]) {
+ full->set(getenv("ANDROID_ROOT"));
+ full->append(SK_FONT_FILE_PREFIX);
+@@ -99,21 +92,21 @@ static SkTypeface* find_best_face(const
+ if (faces[SkTypeface::kNormal] != NULL) {
+ return faces[SkTypeface::kNormal];
+ }
+ // look for anything
+ for (int i = 0; i < 4; i++) {
+ if (faces[i] != NULL) {
+ return faces[i];
+ }
+ }
+ // should never get here, since the faces list should not be empty
+- SkDEBUGFAIL("faces list is empty");
++ SkASSERT(!"faces list is empty");
+ return NULL;
+ }
+
+ static FamilyRec* find_family(const SkTypeface* member) {
+ FamilyRec* curr = gFamilyHead;
+ while (curr != NULL) {
+ for (int i = 0; i < 4; i++) {
+ if (curr->fFaces[i] == member) {
+ return curr;
+ }
+@@ -138,31 +131,27 @@ static SkTypeface* find_from_uniqueID(ui
+ curr = curr->fNext;
+ }
+ return NULL;
+ }
+
+ /* Remove reference to this face from its family. If the resulting family
+ is empty (has no faces), return that family, otherwise return NULL
+ */
+ static FamilyRec* remove_from_family(const SkTypeface* face) {
+ FamilyRec* family = find_family(face);
+- if (family) {
+- SkASSERT(family->fFaces[face->style()] == face);
+- family->fFaces[face->style()] = NULL;
++ SkASSERT(family->fFaces[face->style()] == face);
++ family->fFaces[face->style()] = NULL;
+
+- for (int i = 0; i < 4; i++) {
+- if (family->fFaces[i] != NULL) { // family is non-empty
+- return NULL;
+- }
++ for (int i = 0; i < 4; i++) {
++ if (family->fFaces[i] != NULL) { // family is non-empty
++ return NULL;
+ }
+- } else {
+-// SkDebugf("remove_from_family(%p) face not found", face);
+ }
+ return family; // return the empty family
+ }
+
+ // maybe we should make FamilyRec be doubly-linked
+ static void detach_and_delete_family(FamilyRec* family) {
+ FamilyRec* curr = gFamilyHead;
+ FamilyRec* prev = NULL;
+
+ while (curr != NULL) {
+@@ -172,21 +161,21 @@ static void detach_and_delete_family(Fam
+ gFamilyHead = next;
+ } else {
+ prev->fNext = next;
+ }
+ SkDELETE(family);
+ return;
+ }
+ prev = curr;
+ curr = next;
+ }
+- SkDEBUGFAIL("Yikes, couldn't find family in our list to remove/delete");
++ SkASSERT(!"Yikes, couldn't find family in our list to remove/delete");
+ }
+
+ static SkTypeface* find_typeface(const char name[], SkTypeface::Style style) {
+ NameFamilyPair* list = gNameList.begin();
+ int count = gNameList.count();
+
+ int index = SkStrLCSearch(&list[0].fName, count, name, sizeof(list[0]));
+
+ if (index >= 0) {
+ return find_best_face(list[index].fFamily, style);
+@@ -387,111 +376,90 @@ static bool get_name_and_style(const cha
+ }
+ return false;
+ }
+
+ // used to record our notion of the pre-existing fonts
+ struct FontInitRec {
+ const char* fFileName;
+ const char* const* fNames; // null-terminated list
+ };
+
++static const char* gSansNames[] = {
++ "sans-serif", "arial", "helvetica", "tahoma", "verdana", NULL
++};
++
++static const char* gSerifNames[] = {
++ "serif", "times", "times new roman", "palatino", "georgia", "baskerville",
++ "goudy", "fantasy", "cursive", "ITC Stone Serif", NULL
++};
++
++static const char* gMonoNames[] = {
++ "monospace", "courier", "courier new", "monaco", NULL
++};
++
+ // deliberately empty, but we use the address to identify fallback fonts
+ static const char* gFBNames[] = { NULL };
+
++/* Fonts must be grouped by family, with the first font in a family having the
++ list of names (even if that list is empty), and the following members having
++ null for the list. The names list must be NULL-terminated
++*/
++static const FontInitRec gSystemFonts[] = {
++ { "DroidSans.ttf", gSansNames },
++ { "DroidSans-Bold.ttf", NULL },
++ { "DroidSerif-Regular.ttf", gSerifNames },
++ { "DroidSerif-Bold.ttf", NULL },
++ { "DroidSerif-Italic.ttf", NULL },
++ { "DroidSerif-BoldItalic.ttf", NULL },
++ { "DroidSansMono.ttf", gMonoNames },
++ /* These are optional, and can be ignored if not found in the file system.
++ These are appended to gFallbackFonts[] as they are seen, so we list
++ them in the order we want them to be accessed by NextLogicalFont().
++ */
++ { "DroidSansArabic.ttf", gFBNames },
++ { "DroidSansHebrew.ttf", gFBNames },
++ { "DroidSansThai.ttf", gFBNames },
++ { "MTLmr3m.ttf", gFBNames }, // Motoya Japanese Font
++ { "MTLc3m.ttf", gFBNames }, // Motoya Japanese Font
++ { "DroidSansJapanese.ttf", gFBNames },
++ { "DroidSansFallback.ttf", gFBNames }
++};
+
+-/* Fonts are grouped by family, with the first font in a family having the
+- list of names (even if that list is empty), and the following members having
+- null for the list. The names list must be NULL-terminated.
+-*/
+-static FontInitRec *gSystemFonts;
+-static size_t gNumSystemFonts = 0;
+-
+-#define SYSTEM_FONTS_FILE "/system/etc/system_fonts.cfg"
++#define DEFAULT_NAMES gSansNames
+
+ // these globals are assigned (once) by load_system_fonts()
+ static FamilyRec* gDefaultFamily;
+ static SkTypeface* gDefaultNormal;
+-static char** gDefaultNames = NULL;
+-static uint32_t *gFallbackFonts;
+
+-/* Load info from a configuration file that populates the system/fallback font structures
+-*/
+-static void load_font_info() {
+-// load_font_info_xml("/system/etc/system_fonts.xml");
+- SkTDArray<FontFamily*> fontFamilies;
+- getFontFamilies(fontFamilies);
+-
+- SkTDArray<FontInitRec> fontInfo;
+- bool firstInFamily = false;
+- for (int i = 0; i < fontFamilies.count(); ++i) {
+- FontFamily *family = fontFamilies[i];
+- firstInFamily = true;
+- for (int j = 0; j < family->fFileNames.count(); ++j) {
+- FontInitRec fontInfoRecord;
+- fontInfoRecord.fFileName = family->fFileNames[j];
+- if (j == 0) {
+- if (family->fNames.count() == 0) {
+- // Fallback font
+- fontInfoRecord.fNames = (char **)gFBNames;
+- } else {
+- SkTDArray<const char*> names = family->fNames;
+- const char **nameList = (const char**)
+- malloc((names.count() + 1) * sizeof(char*));
+- if (nameList == NULL) {
+- // shouldn't get here
+- break;
+- }
+- if (gDefaultNames == NULL) {
+- gDefaultNames = (char**) nameList;
+- }
+- for (int i = 0; i < names.count(); ++i) {
+- nameList[i] = names[i];
+- }
+- nameList[names.count()] = NULL;
+- fontInfoRecord.fNames = nameList;
+- }
+- } else {
+- fontInfoRecord.fNames = NULL;
+- }
+- *fontInfo.append() = fontInfoRecord;
+- }
+- }
+- gNumSystemFonts = fontInfo.count();
+- gSystemFonts = (FontInitRec*) malloc(gNumSystemFonts * sizeof(FontInitRec));
+- gFallbackFonts = (uint32_t*) malloc((gNumSystemFonts + 1) * sizeof(uint32_t));
+- if (gSystemFonts == NULL) {
+- // shouldn't get here
+- gNumSystemFonts = 0;
+- }
+- for (size_t i = 0; i < gNumSystemFonts; ++i) {
+- gSystemFonts[i].fFileName = fontInfo[i].fFileName;
+- gSystemFonts[i].fNames = fontInfo[i].fNames;
+- }
+- fontFamilies.deleteAll();
+-}
++/* This is sized conservatively, assuming that it will never be a size issue.
++ It will be initialized in load_system_fonts(), and will be filled with the
++ fontIDs that can be used for fallback consideration, in sorted order (sorted
++ meaning element[0] should be used first, then element[1], etc. When we hit
++ a fontID==0 in the array, the list is done, hence our allocation size is
++ +1 the total number of possible system fonts. Also see NextLogicalFont().
++ */
++static uint32_t gFallbackFonts[SK_ARRAY_COUNT(gSystemFonts)+1];
+
+ /* Called once (ensured by the sentinel check at the beginning of our body).
+ Initializes all the globals, and register the system fonts.
+ */
+ static void load_system_fonts() {
+ // check if we've already be called
+ if (NULL != gDefaultNormal) {
+ return;
+ }
+
+- load_font_info();
+-
+ const FontInitRec* rec = gSystemFonts;
+ SkTypeface* firstInFamily = NULL;
+ int fallbackCount = 0;
+
+- for (size_t i = 0; i < gNumSystemFonts; i++) {
++ for (size_t i = 0; i < SK_ARRAY_COUNT(gSystemFonts); i++) {
+ // if we're the first in a new family, clear firstInFamily
+ if (rec[i].fNames != NULL) {
+ firstInFamily = NULL;
+ }
+
+ bool isFixedWidth;
+ SkString name;
+ SkTypeface::Style style;
+
+ // we expect all the fonts, except the "fallback" fonts
+@@ -515,120 +483,75 @@ static void load_system_fonts() {
+ // SkDebugf("---- adding %s as fallback[%d] fontID %d\n",
+ // rec[i].fFileName, fallbackCount, tf->uniqueID());
+ gFallbackFonts[fallbackCount++] = tf->uniqueID();
+ }
+
+ firstInFamily = tf;
+ FamilyRec* family = find_family(tf);
+ const char* const* names = rec[i].fNames;
+
+ // record the default family if this is it
+- if (names == gDefaultNames) {
++ if (names == DEFAULT_NAMES) {
+ gDefaultFamily = family;
+ }
+ // add the names to map to this family
+ while (*names) {
+ add_name(*names, family);
+ names += 1;
+ }
+ }
+ }
+
+ // do this after all fonts are loaded. This is our default font, and it
+ // acts as a sentinel so we only execute load_system_fonts() once
+ gDefaultNormal = find_best_face(gDefaultFamily, SkTypeface::kNormal);
+ // now terminate our fallback list with the sentinel value
+ gFallbackFonts[fallbackCount] = 0;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ void SkFontHost::Serialize(const SkTypeface* face, SkWStream* stream) {
+- // lookup and record if the font is custom (i.e. not a system font)
+- bool isCustomFont = !((FamilyTypeface*)face)->isSysFont();
+- stream->writeBool(isCustomFont);
++ const char* name = ((FamilyTypeface*)face)->getUniqueString();
+
+- if (isCustomFont) {
+- SkStream* fontStream = ((FamilyTypeface*)face)->openStream();
++ stream->write8((uint8_t)face->style());
+
+- // store the length of the custom font
+- uint32_t len = fontStream->getLength();
+- stream->write32(len);
+-
+- // store the entire font in the serialized stream
+- void* fontData = malloc(len);
+-
+- fontStream->read(fontData, len);
+- stream->write(fontData, len);
+-
+- fontStream->unref();
+- free(fontData);
+-// SkDebugf("--- fonthost custom serialize %d %d\n", face->style(), len);
+-
++ if (NULL == name || 0 == *name) {
++ stream->writePackedUInt(0);
++// SkDebugf("--- fonthost serialize null\n");
+ } else {
+- const char* name = ((FamilyTypeface*)face)->getUniqueString();
+-
+- stream->write8((uint8_t)face->style());
+-
+- if (NULL == name || 0 == *name) {
+- stream->writePackedUInt(0);
+-// SkDebugf("--- fonthost serialize null\n");
+- } else {
+- uint32_t len = strlen(name);
+- stream->writePackedUInt(len);
+- stream->write(name, len);
+-// SkDebugf("--- fonthost serialize <%s> %d\n", name, face->style());
+- }
++ uint32_t len = strlen(name);
++ stream->writePackedUInt(len);
++ stream->write(name, len);
++// SkDebugf("--- fonthost serialize <%s> %d\n", name, face->style());
+ }
+ }
+
+ SkTypeface* SkFontHost::Deserialize(SkStream* stream) {
+ load_system_fonts();
+
+- // check if the font is a custom or system font
+- bool isCustomFont = stream->readBool();
++ int style = stream->readU8();
+
+- if (isCustomFont) {
++ int len = stream->readPackedUInt();
++ if (len > 0) {
++ SkString str;
++ str.resize(len);
++ stream->read(str.writable_str(), len);
+
+- // read the length of the custom font from the stream
+- uint32_t len = stream->readU32();
+-
+- // generate a new stream to store the custom typeface
+- SkMemoryStream* fontStream = new SkMemoryStream(len);
+- stream->read((void*)fontStream->getMemoryBase(), len);
+-
+- SkTypeface* face = CreateTypefaceFromStream(fontStream);
+-
+- fontStream->unref();
+-
+-// SkDebugf("--- fonthost custom deserialize %d %d\n", face->style(), len);
+- return face;
+-
+- } else {
+- int style = stream->readU8();
+-
+- int len = stream->readPackedUInt();
+- if (len > 0) {
+- SkString str;
+- str.resize(len);
+- stream->read(str.writable_str(), len);
+-
+- const FontInitRec* rec = gSystemFonts;
+- for (size_t i = 0; i < gNumSystemFonts; i++) {
+- if (strcmp(rec[i].fFileName, str.c_str()) == 0) {
+- // backup until we hit the fNames
+- for (int j = i; j >= 0; --j) {
+- if (rec[j].fNames != NULL) {
+- return SkFontHost::CreateTypeface(NULL,
+- rec[j].fNames[0], NULL, 0,
+- (SkTypeface::Style)style);
+- }
++ const FontInitRec* rec = gSystemFonts;
++ for (size_t i = 0; i < SK_ARRAY_COUNT(gSystemFonts); i++) {
++ if (strcmp(rec[i].fFileName, str.c_str()) == 0) {
++ // backup until we hit the fNames
++ for (int j = i; j >= 0; --j) {
++ if (rec[j].fNames != NULL) {
++ return SkFontHost::CreateTypeface(NULL,
++ rec[j].fNames[0], NULL, 0, (SkTypeface::Style)style);
+ }
+ }
+ }
+ }
+ }
+ return NULL;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+@@ -697,49 +620,32 @@ size_t SkFontHost::GetFileName(SkFontID
+ }
+ return size;
+ } else {
+ return 0;
+ }
+ }
+
+ SkFontID SkFontHost::NextLogicalFont(SkFontID currFontID, SkFontID origFontID) {
+ load_system_fonts();
+
+- const SkTypeface* origTypeface = find_from_uniqueID(origFontID);
+- const SkTypeface* currTypeface = find_from_uniqueID(currFontID);
+-
+- SkASSERT(origTypeface != 0);
+- SkASSERT(currTypeface != 0);
+-
+- // Our fallback list always stores the id of the plain in each fallback
+- // family, so we transform currFontID to its plain equivalent.
+- currFontID = find_typeface(currTypeface, SkTypeface::kNormal)->uniqueID();
+-
+ /* First see if fontID is already one of our fallbacks. If so, return
+ its successor. If fontID is not in our list, then return the first one
+ in our list. Note: list is zero-terminated, and returning zero means
+ we have no more fonts to use for fallbacks.
+ */
+ const uint32_t* list = gFallbackFonts;
+ for (int i = 0; list[i] != 0; i++) {
+ if (list[i] == currFontID) {
+- if (list[i+1] == 0)
+- return 0;
+- const SkTypeface* nextTypeface = find_from_uniqueID(list[i+1]);
+- return find_typeface(nextTypeface, origTypeface->style())->uniqueID();
++ return list[i+1];
+ }
+ }
+-
+- // If we get here, currFontID was not a fallback, so we start at the
+- // beginning of our list.
+- const SkTypeface* firstTypeface = find_from_uniqueID(list[0]);
+- return find_typeface(firstTypeface, origTypeface->style())->uniqueID();
++ return list[0];
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ SkTypeface* SkFontHost::CreateTypefaceFromStream(SkStream* stream) {
+ if (NULL == stream || stream->getLength() <= 0) {
+ return NULL;
+ }
+
+ bool isFixedWidth;
+@@ -754,10 +660,11 @@ SkTypeface* SkFontHost::CreateTypefaceFr
+ }
+
+ SkTypeface* SkFontHost::CreateTypefaceFromFile(const char path[]) {
+ SkStream* stream = SkNEW_ARGS(SkMMAPStream, (path));
+ SkTypeface* face = SkFontHost::CreateTypefaceFromStream(stream);
+ // since we created the stream, we let go of our ref() here
+ stream->unref();
+ return face;
+ }
+
++///////////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/patches/archive/radial-gradients.patch b/gfx/skia/patches/archive/radial-gradients.patch
new file mode 100644
index 000000000..183923e83
--- /dev/null
+++ b/gfx/skia/patches/archive/radial-gradients.patch
@@ -0,0 +1,25 @@
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -1665,17 +1665,20 @@ public:
+ }
+ return kRadial2_GradientType;
+ }
+
+ virtual void shadeSpan(int x, int y, SkPMColor* SK_RESTRICT dstC, int count) SK_OVERRIDE {
+ SkASSERT(count > 0);
+
+ // Zero difference between radii: fill with transparent black.
+- if (fDiffRadius == 0) {
++ // TODO: Is removing this actually correct? Two circles with the
++ // same radius, but different centers doesn't sound like it
++ // should be cleared
++ if (fDiffRadius == 0 && fCenter1 == fCenter2) {
+ sk_bzero(dstC, count * sizeof(*dstC));
+ return;
+ }
+ SkMatrix::MapXYProc dstProc = fDstToIndexProc;
+ TileProc proc = fTileProc;
+ const SkPMColor* SK_RESTRICT cache = this->getCache32();
+
+ SkScalar foura = fA * 4;
diff --git a/gfx/skia/patches/archive/skia_restrict_problem.patch b/gfx/skia/patches/archive/skia_restrict_problem.patch
new file mode 100644
index 000000000..c7639ca2c
--- /dev/null
+++ b/gfx/skia/patches/archive/skia_restrict_problem.patch
@@ -0,0 +1,461 @@
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -1184,116 +1184,17 @@ public:
+ {
+ // make sure our table is insync with our current #define for kSQRT_TABLE_SIZE
+ SkASSERT(sizeof(gSqrt8Table) == kSQRT_TABLE_SIZE);
+
+ rad_to_unit_matrix(center, radius, &fPtsToUnit);
+ }
+
+ virtual void shadeSpan(int x, int y, SkPMColor* dstC, int count) SK_OVERRIDE;
+- virtual void shadeSpan16(int x, int y, uint16_t* SK_RESTRICT dstC, int count) SK_OVERRIDE {
+- SkASSERT(count > 0);
+-
+- SkPoint srcPt;
+- SkMatrix::MapXYProc dstProc = fDstToIndexProc;
+- TileProc proc = fTileProc;
+- const uint16_t* SK_RESTRICT cache = this->getCache16();
+- int toggle = ((x ^ y) & 1) << kCache16Bits;
+-
+- if (fDstToIndexClass != kPerspective_MatrixClass) {
+- dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
+- SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
+- SkFixed dx, fx = SkScalarToFixed(srcPt.fX);
+- SkFixed dy, fy = SkScalarToFixed(srcPt.fY);
+-
+- if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
+- SkFixed storage[2];
+- (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), &storage[0], &storage[1]);
+- dx = storage[0];
+- dy = storage[1];
+- } else {
+- SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
+- dx = SkScalarToFixed(fDstToIndex.getScaleX());
+- dy = SkScalarToFixed(fDstToIndex.getSkewY());
+- }
+-
+- if (proc == clamp_tileproc) {
+- const uint8_t* SK_RESTRICT sqrt_table = gSqrt8Table;
+-
+- /* knock these down so we can pin against +- 0x7FFF, which is an immediate load,
+- rather than 0xFFFF which is slower. This is a compromise, since it reduces our
+- precision, but that appears to be visually OK. If we decide this is OK for
+- all of our cases, we could (it seems) put this scale-down into fDstToIndex,
+- to avoid having to do these extra shifts each time.
+- */
+- fx >>= 1;
+- dx >>= 1;
+- fy >>= 1;
+- dy >>= 1;
+- if (dy == 0) { // might perform this check for the other modes, but the win will be a smaller % of the total
+- fy = SkPin32(fy, -0xFFFF >> 1, 0xFFFF >> 1);
+- fy *= fy;
+- do {
+- unsigned xx = SkPin32(fx, -0xFFFF >> 1, 0xFFFF >> 1);
+- unsigned fi = (xx * xx + fy) >> (14 + 16 - kSQRT_TABLE_BITS);
+- fi = SkFastMin32(fi, 0xFFFF >> (16 - kSQRT_TABLE_BITS));
+- fx += dx;
+- *dstC++ = cache[toggle + (sqrt_table[fi] >> (8 - kCache16Bits))];
+- toggle ^= (1 << kCache16Bits);
+- } while (--count != 0);
+- } else {
+- do {
+- unsigned xx = SkPin32(fx, -0xFFFF >> 1, 0xFFFF >> 1);
+- unsigned fi = SkPin32(fy, -0xFFFF >> 1, 0xFFFF >> 1);
+- fi = (xx * xx + fi * fi) >> (14 + 16 - kSQRT_TABLE_BITS);
+- fi = SkFastMin32(fi, 0xFFFF >> (16 - kSQRT_TABLE_BITS));
+- fx += dx;
+- fy += dy;
+- *dstC++ = cache[toggle + (sqrt_table[fi] >> (8 - kCache16Bits))];
+- toggle ^= (1 << kCache16Bits);
+- } while (--count != 0);
+- }
+- } else if (proc == mirror_tileproc) {
+- do {
+- SkFixed dist = SkFixedSqrt(SkFixedSquare(fx) + SkFixedSquare(fy));
+- unsigned fi = mirror_tileproc(dist);
+- SkASSERT(fi <= 0xFFFF);
+- fx += dx;
+- fy += dy;
+- *dstC++ = cache[toggle + (fi >> (16 - kCache16Bits))];
+- toggle ^= (1 << kCache16Bits);
+- } while (--count != 0);
+- } else {
+- SkASSERT(proc == repeat_tileproc);
+- do {
+- SkFixed dist = SkFixedSqrt(SkFixedSquare(fx) + SkFixedSquare(fy));
+- unsigned fi = repeat_tileproc(dist);
+- SkASSERT(fi <= 0xFFFF);
+- fx += dx;
+- fy += dy;
+- *dstC++ = cache[toggle + (fi >> (16 - kCache16Bits))];
+- toggle ^= (1 << kCache16Bits);
+- } while (--count != 0);
+- }
+- } else { // perspective case
+- SkScalar dstX = SkIntToScalar(x);
+- SkScalar dstY = SkIntToScalar(y);
+- do {
+- dstProc(fDstToIndex, dstX, dstY, &srcPt);
+- unsigned fi = proc(SkScalarToFixed(srcPt.length()));
+- SkASSERT(fi <= 0xFFFF);
+-
+- int index = fi >> (16 - kCache16Bits);
+- *dstC++ = cache[toggle + index];
+- toggle ^= (1 << kCache16Bits);
+-
+- dstX += SK_Scalar1;
+- } while (--count != 0);
+- }
+- }
++ virtual void shadeSpan16(int x, int y, uint16_t* dstC, int count) SK_OVERRIDE;
+
+ virtual BitmapType asABitmap(SkBitmap* bitmap,
+ SkMatrix* matrix,
+ TileMode* xy,
+ SkScalar* twoPointRadialParams) const SK_OVERRIDE {
+ if (bitmap) {
+ this->commonAsABitmap(bitmap);
+ }
+@@ -1507,16 +1408,117 @@ void Radial_Gradient::shadeSpan(int x, i
+ unsigned fi = proc(SkScalarToFixed(srcPt.length()));
+ SkASSERT(fi <= 0xFFFF);
+ *dstC++ = cache[fi >> (16 - kCache32Bits)];
+ dstX += SK_Scalar1;
+ } while (--count != 0);
+ }
+ }
+
++void Radial_Gradient::shadeSpan16(int x, int y, uint16_t* SK_RESTRICT dstC, int count) {
++ SkASSERT(count > 0);
++
++ SkPoint srcPt;
++ SkMatrix::MapXYProc dstProc = fDstToIndexProc;
++ TileProc proc = fTileProc;
++ const uint16_t* SK_RESTRICT cache = this->getCache16();
++ int toggle = ((x ^ y) & 1) << kCache16Bits;
++
++ if (fDstToIndexClass != kPerspective_MatrixClass) {
++ dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
++ SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
++ SkFixed dx, fx = SkScalarToFixed(srcPt.fX);
++ SkFixed dy, fy = SkScalarToFixed(srcPt.fY);
++
++ if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
++ SkFixed storage[2];
++ (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), &storage[0], &storage[1]);
++ dx = storage[0];
++ dy = storage[1];
++ } else {
++ SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
++ dx = SkScalarToFixed(fDstToIndex.getScaleX());
++ dy = SkScalarToFixed(fDstToIndex.getSkewY());
++ }
++
++ if (proc == clamp_tileproc) {
++ const uint8_t* SK_RESTRICT sqrt_table = gSqrt8Table;
++
++ /* knock these down so we can pin against +- 0x7FFF, which is an immediate load,
++ rather than 0xFFFF which is slower. This is a compromise, since it reduces our
++ precision, but that appears to be visually OK. If we decide this is OK for
++ all of our cases, we could (it seems) put this scale-down into fDstToIndex,
++ to avoid having to do these extra shifts each time.
++ */
++ fx >>= 1;
++ dx >>= 1;
++ fy >>= 1;
++ dy >>= 1;
++ if (dy == 0) { // might perform this check for the other modes, but the win will be a smaller % of the total
++ fy = SkPin32(fy, -0xFFFF >> 1, 0xFFFF >> 1);
++ fy *= fy;
++ do {
++ unsigned xx = SkPin32(fx, -0xFFFF >> 1, 0xFFFF >> 1);
++ unsigned fi = (xx * xx + fy) >> (14 + 16 - kSQRT_TABLE_BITS);
++ fi = SkFastMin32(fi, 0xFFFF >> (16 - kSQRT_TABLE_BITS));
++ fx += dx;
++ *dstC++ = cache[toggle + (sqrt_table[fi] >> (8 - kCache16Bits))];
++ toggle ^= (1 << kCache16Bits);
++ } while (--count != 0);
++ } else {
++ do {
++ unsigned xx = SkPin32(fx, -0xFFFF >> 1, 0xFFFF >> 1);
++ unsigned fi = SkPin32(fy, -0xFFFF >> 1, 0xFFFF >> 1);
++ fi = (xx * xx + fi * fi) >> (14 + 16 - kSQRT_TABLE_BITS);
++ fi = SkFastMin32(fi, 0xFFFF >> (16 - kSQRT_TABLE_BITS));
++ fx += dx;
++ fy += dy;
++ *dstC++ = cache[toggle + (sqrt_table[fi] >> (8 - kCache16Bits))];
++ toggle ^= (1 << kCache16Bits);
++ } while (--count != 0);
++ }
++ } else if (proc == mirror_tileproc) {
++ do {
++ SkFixed dist = SkFixedSqrt(SkFixedSquare(fx) + SkFixedSquare(fy));
++ unsigned fi = mirror_tileproc(dist);
++ SkASSERT(fi <= 0xFFFF);
++ fx += dx;
++ fy += dy;
++ *dstC++ = cache[toggle + (fi >> (16 - kCache16Bits))];
++ toggle ^= (1 << kCache16Bits);
++ } while (--count != 0);
++ } else {
++ SkASSERT(proc == repeat_tileproc);
++ do {
++ SkFixed dist = SkFixedSqrt(SkFixedSquare(fx) + SkFixedSquare(fy));
++ unsigned fi = repeat_tileproc(dist);
++ SkASSERT(fi <= 0xFFFF);
++ fx += dx;
++ fy += dy;
++ *dstC++ = cache[toggle + (fi >> (16 - kCache16Bits))];
++ toggle ^= (1 << kCache16Bits);
++ } while (--count != 0);
++ }
++ } else { // perspective case
++ SkScalar dstX = SkIntToScalar(x);
++ SkScalar dstY = SkIntToScalar(y);
++ do {
++ dstProc(fDstToIndex, dstX, dstY, &srcPt);
++ unsigned fi = proc(SkScalarToFixed(srcPt.length()));
++ SkASSERT(fi <= 0xFFFF);
++
++ int index = fi >> (16 - kCache16Bits);
++ *dstC++ = cache[toggle + index];
++ toggle ^= (1 << kCache16Bits);
++
++ dstX += SK_Scalar1;
++ } while (--count != 0);
++ }
++}
++
+ /* Two-point radial gradients are specified by two circles, each with a center
+ point and radius. The gradient can be considered to be a series of
+ concentric circles, with the color interpolated from the start circle
+ (at t=0) to the end circle (at t=1).
+
+ For each point (x, y) in the span, we want to find the
+ interpolated circle that intersects that point. The center
+ of the desired circle (Cx, Cy) falls at some distance t
+@@ -1661,109 +1663,17 @@ public:
+ info->fPoint[0] = fCenter1;
+ info->fPoint[1] = fCenter2;
+ info->fRadius[0] = fRadius1;
+ info->fRadius[1] = fRadius2;
+ }
+ return kRadial2_GradientType;
+ }
+
+- virtual void shadeSpan(int x, int y, SkPMColor* SK_RESTRICT dstC, int count) SK_OVERRIDE {
+- SkASSERT(count > 0);
+-
+- // Zero difference between radii: fill with transparent black.
+- // TODO: Is removing this actually correct? Two circles with the
+- // same radius, but different centers doesn't sound like it
+- // should be cleared
+- if (fDiffRadius == 0 && fCenter1 == fCenter2) {
+- sk_bzero(dstC, count * sizeof(*dstC));
+- return;
+- }
+- SkMatrix::MapXYProc dstProc = fDstToIndexProc;
+- TileProc proc = fTileProc;
+- const SkPMColor* SK_RESTRICT cache = this->getCache32();
+-
+- SkScalar foura = fA * 4;
+- bool posRoot = fDiffRadius < 0;
+- if (fDstToIndexClass != kPerspective_MatrixClass) {
+- SkPoint srcPt;
+- dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
+- SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
+- SkScalar dx, fx = srcPt.fX;
+- SkScalar dy, fy = srcPt.fY;
+-
+- if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
+- SkFixed fixedX, fixedY;
+- (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), &fixedX, &fixedY);
+- dx = SkFixedToScalar(fixedX);
+- dy = SkFixedToScalar(fixedY);
+- } else {
+- SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
+- dx = fDstToIndex.getScaleX();
+- dy = fDstToIndex.getSkewY();
+- }
+- SkScalar b = (SkScalarMul(fDiff.fX, fx) +
+- SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
+- SkScalar db = (SkScalarMul(fDiff.fX, dx) +
+- SkScalarMul(fDiff.fY, dy)) * 2;
+- if (proc == clamp_tileproc) {
+- for (; count > 0; --count) {
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
+- if (t < 0) {
+- *dstC++ = cache[-1];
+- } else if (t > 0xFFFF) {
+- *dstC++ = cache[kCache32Count * 2];
+- } else {
+- SkASSERT(t <= 0xFFFF);
+- *dstC++ = cache[t >> (16 - kCache32Bits)];
+- }
+- fx += dx;
+- fy += dy;
+- b += db;
+- }
+- } else if (proc == mirror_tileproc) {
+- for (; count > 0; --count) {
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
+- SkFixed index = mirror_tileproc(t);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> (16 - kCache32Bits)];
+- fx += dx;
+- fy += dy;
+- b += db;
+- }
+- } else {
+- SkASSERT(proc == repeat_tileproc);
+- for (; count > 0; --count) {
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
+- SkFixed index = repeat_tileproc(t);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> (16 - kCache32Bits)];
+- fx += dx;
+- fy += dy;
+- b += db;
+- }
+- }
+- } else { // perspective case
+- SkScalar dstX = SkIntToScalar(x);
+- SkScalar dstY = SkIntToScalar(y);
+- for (; count > 0; --count) {
+- SkPoint srcPt;
+- dstProc(fDstToIndex, dstX, dstY, &srcPt);
+- SkScalar fx = srcPt.fX;
+- SkScalar fy = srcPt.fY;
+- SkScalar b = (SkScalarMul(fDiff.fX, fx) +
+- SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
+- SkFixed index = proc(t);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> (16 - kCache32Bits)];
+- dstX += SK_Scalar1;
+- }
+- }
+- }
++ virtual void shadeSpan(int x, int y, SkPMColor* dstC, int count) SK_OVERRIDE;
+
+ virtual bool setContext(const SkBitmap& device,
+ const SkPaint& paint,
+ const SkMatrix& matrix) SK_OVERRIDE {
+ if (!this->INHERITED::setContext(device, paint, matrix)) {
+ return false;
+ }
+
+@@ -1817,16 +1727,110 @@ private:
+ fA = SkScalarSquare(fDiff.fX) + SkScalarSquare(fDiff.fY) - SK_Scalar1;
+ fOneOverTwoA = fA ? SkScalarInvert(fA * 2) : 0;
+
+ fPtsToUnit.setTranslate(-fCenter1.fX, -fCenter1.fY);
+ fPtsToUnit.postScale(inv, inv);
+ }
+ };
+
++void Two_Point_Radial_Gradient::shadeSpan(int x, int y, SkPMColor* SK_RESTRICT dstC, int count) {
++ SkASSERT(count > 0);
++
++ // Zero difference between radii: fill with transparent black.
++ // TODO: Is removing this actually correct? Two circles with the
++ // same radius, but different centers doesn't sound like it
++ // should be cleared
++ if (fDiffRadius == 0 && fCenter1 == fCenter2) {
++ sk_bzero(dstC, count * sizeof(*dstC));
++ return;
++ }
++ SkMatrix::MapXYProc dstProc = fDstToIndexProc;
++ TileProc proc = fTileProc;
++ const SkPMColor* SK_RESTRICT cache = this->getCache32();
++
++ SkScalar foura = fA * 4;
++ bool posRoot = fDiffRadius < 0;
++ if (fDstToIndexClass != kPerspective_MatrixClass) {
++ SkPoint srcPt;
++ dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
++ SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
++ SkScalar dx, fx = srcPt.fX;
++ SkScalar dy, fy = srcPt.fY;
++
++ if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
++ SkFixed fixedX, fixedY;
++ (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), &fixedX, &fixedY);
++ dx = SkFixedToScalar(fixedX);
++ dy = SkFixedToScalar(fixedY);
++ } else {
++ SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
++ dx = fDstToIndex.getScaleX();
++ dy = fDstToIndex.getSkewY();
++ }
++ SkScalar b = (SkScalarMul(fDiff.fX, fx) +
++ SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
++ SkScalar db = (SkScalarMul(fDiff.fX, dx) +
++ SkScalarMul(fDiff.fY, dy)) * 2;
++ if (proc == clamp_tileproc) {
++ for (; count > 0; --count) {
++ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
++ if (t < 0) {
++ *dstC++ = cache[-1];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[kCache32Count * 2];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> (16 - kCache32Bits)];
++ }
++ fx += dx;
++ fy += dy;
++ b += db;
++ }
++ } else if (proc == mirror_tileproc) {
++ for (; count > 0; --count) {
++ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
++ SkFixed index = mirror_tileproc(t);
++ SkASSERT(index <= 0xFFFF);
++ *dstC++ = cache[index >> (16 - kCache32Bits)];
++ fx += dx;
++ fy += dy;
++ b += db;
++ }
++ } else {
++ SkASSERT(proc == repeat_tileproc);
++ for (; count > 0; --count) {
++ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
++ SkFixed index = repeat_tileproc(t);
++ SkASSERT(index <= 0xFFFF);
++ *dstC++ = cache[index >> (16 - kCache32Bits)];
++ fx += dx;
++ fy += dy;
++ b += db;
++ }
++ }
++ } else { // perspective case
++ SkScalar dstX = SkIntToScalar(x);
++ SkScalar dstY = SkIntToScalar(y);
++ for (; count > 0; --count) {
++ SkPoint srcPt;
++ dstProc(fDstToIndex, dstX, dstY, &srcPt);
++ SkScalar fx = srcPt.fX;
++ SkScalar fy = srcPt.fY;
++ SkScalar b = (SkScalarMul(fDiff.fX, fx) +
++ SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
++ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
++ SkFixed index = proc(t);
++ SkASSERT(index <= 0xFFFF);
++ *dstC++ = cache[index >> (16 - kCache32Bits)];
++ dstX += SK_Scalar1;
++ }
++ }
++}
++
+ ///////////////////////////////////////////////////////////////////////////////
+
+ class Sweep_Gradient : public Gradient_Shader {
+ public:
+ Sweep_Gradient(SkScalar cx, SkScalar cy, const SkColor colors[],
+ const SkScalar pos[], int count, SkUnitMapper* mapper)
+ : Gradient_Shader(colors, pos, count, SkShader::kClamp_TileMode, mapper),
+ fCenter(SkPoint::Make(cx, cy))
diff --git a/gfx/skia/patches/archive/uninitialized-margin.patch b/gfx/skia/patches/archive/uninitialized-margin.patch
new file mode 100644
index 000000000..b8ab213e7
--- /dev/null
+++ b/gfx/skia/patches/archive/uninitialized-margin.patch
@@ -0,0 +1,22 @@
+diff --git a/gfx/skia/src/core/SkDraw.cpp b/gfx/skia/src/core/SkDraw.cpp
+--- a/gfx/skia/src/core/SkDraw.cpp
++++ b/gfx/skia/src/core/SkDraw.cpp
+@@ -2529,17 +2529,17 @@ static bool compute_bounds(const SkPath&
+
+ // init our bounds from the path
+ {
+ SkRect pathBounds = devPath.getBounds();
+ pathBounds.inset(-SK_ScalarHalf, -SK_ScalarHalf);
+ pathBounds.roundOut(bounds);
+ }
+
+- SkIPoint margin;
++ SkIPoint margin = SkIPoint::Make(0, 0);
+ if (filter) {
+ SkASSERT(filterMatrix);
+
+ SkMask srcM, dstM;
+
+ srcM.fBounds = *bounds;
+ srcM.fFormat = SkMask::kA8_Format;
+ srcM.fImage = NULL;
diff --git a/gfx/skia/patches/archive/user-config.patch b/gfx/skia/patches/archive/user-config.patch
new file mode 100644
index 000000000..11c6f1f63
--- /dev/null
+++ b/gfx/skia/patches/archive/user-config.patch
@@ -0,0 +1,40 @@
+diff --git a/gfx/skia/include/config/SkUserConfig.h b/gfx/skia/include/config/SkUserConfig.h
+--- a/gfx/skia/include/config/SkUserConfig.h
++++ b/gfx/skia/include/config/SkUserConfig.h
+@@ -140,16 +140,20 @@
+ /* If SK_DEBUG is defined, then you can optionally define SK_SUPPORT_UNITTEST
+ which will run additional self-tests at startup. These can take a long time,
+ so this flag is optional.
+ */
+ #ifdef SK_DEBUG
+ //#define SK_SUPPORT_UNITTEST
+ #endif
+
++/* Don't dither 32bit gradients, to match what the canvas test suite expects.
++ */
++#define SK_DISABLE_DITHER_32BIT_GRADIENT
++
+ /* If your system embeds skia and has complex event logging, define this
+ symbol to name a file that maps the following macros to your system's
+ equivalents:
+ SK_TRACE_EVENT0(event)
+ SK_TRACE_EVENT1(event, name1, value1)
+ SK_TRACE_EVENT2(event, name1, value1, name2, value2)
+ src/utils/SkDebugTrace.h has a trivial implementation that writes to
+ the debug output stream. If SK_USER_TRACE_INCLUDE_FILE is not defined,
+@@ -161,9 +165,15 @@
+ */
+ #ifdef SK_SAMPLES_FOR_X
+ #define SK_R32_SHIFT 16
+ #define SK_G32_SHIFT 8
+ #define SK_B32_SHIFT 0
+ #define SK_A32_SHIFT 24
+ #endif
+
++/* Don't include stdint.h on windows as it conflicts with our build system.
++ */
++#ifdef SK_BUILD_FOR_WIN32
++ #define SK_IGNORE_STDINT_DOT_H
++#endif
++
+ #endif
diff --git a/gfx/skia/skia/include/android/SkBRDAllocator.h b/gfx/skia/skia/include/android/SkBRDAllocator.h
new file mode 100644
index 000000000..3ca30c9b4
--- /dev/null
+++ b/gfx/skia/skia/include/android/SkBRDAllocator.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBRDAllocator_DEFINED
+#define SkBRDAllocator_DEFINED
+
+#include "SkBitmap.h"
+#include "SkCodec.h"
+
+/**
+ * Abstract subclass of SkBitmap's allocator.
+ * Allows the allocator to indicate if the memory it allocates
+ * is zero initialized.
+ */
+class SkBRDAllocator : public SkBitmap::Allocator {
+public:
+
+ /**
+ * Indicates if the memory allocated by this allocator is
+ * zero initialized.
+ */
+ virtual SkCodec::ZeroInitialized zeroInit() const = 0;
+};
+
+#endif // SkBRDAllocator_DEFINED
diff --git a/gfx/skia/skia/include/android/SkBitmapRegionDecoder.h b/gfx/skia/skia/include/android/SkBitmapRegionDecoder.h
new file mode 100644
index 000000000..b8922d469
--- /dev/null
+++ b/gfx/skia/skia/include/android/SkBitmapRegionDecoder.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapRegionDecoder_DEFINED
+#define SkBitmapRegionDecoder_DEFINED
+
+#include "SkBitmap.h"
+#include "SkBRDAllocator.h"
+#include "SkEncodedFormat.h"
+#include "SkStream.h"
+
+/*
+ * This class aims to provide an interface to test multiple implementations of
+ * SkBitmapRegionDecoder.
+ */
+class SkBitmapRegionDecoder {
+public:
+
+ enum Strategy {
+ kAndroidCodec_Strategy, // Uses SkAndroidCodec for scaling and subsetting
+ };
+
+ /*
+ * @param data Refs the data while this object exists, unrefs on destruction
+ * @param strategy Strategy used for scaling and subsetting
+ * @return Tries to create an SkBitmapRegionDecoder, returns NULL on failure
+ */
+ static SkBitmapRegionDecoder* Create(sk_sp<SkData>, Strategy strategy);
+
+ /*
+ * @param stream Takes ownership of the stream
+ * @param strategy Strategy used for scaling and subsetting
+ * @return Tries to create an SkBitmapRegionDecoder, returns NULL on failure
+ */
+ static SkBitmapRegionDecoder* Create(
+ SkStreamRewindable* stream, Strategy strategy);
+
+ /*
+ * Decode a scaled region of the encoded image stream
+ *
+ * @param bitmap Container for decoded pixels. It is assumed that the pixels
+ * are initially unallocated and will be allocated by this function.
+ * @param allocator Allocator for the pixels. If this is NULL, the default
+ * allocator (HeapAllocator) will be used.
+ * @param desiredSubset Subset of the original image to decode.
+ * @param sampleSize An integer downscaling factor for the decode.
+ * @param colorType Preferred output colorType.
+ * New implementations should return NULL if they do not support
+ * decoding to this color type.
+ * The old kOriginal_Strategy will decode to a default color type
+ * if this color type is unsupported.
+ * @param requireUnpremul If the image is not opaque, we will use this to determine the
+ * alpha type to use.
+ *
+ */
+ virtual bool decodeRegion(SkBitmap* bitmap, SkBRDAllocator* allocator,
+ const SkIRect& desiredSubset, int sampleSize,
+ SkColorType colorType, bool requireUnpremul) = 0;
+ /*
+ * @param Requested destination color type
+ * @return true if we support the requested color type and false otherwise
+ */
+ virtual bool conversionSupported(SkColorType colorType) = 0;
+
+ virtual SkEncodedFormat getEncodedFormat() = 0;
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+
+ virtual ~SkBitmapRegionDecoder() {}
+
+protected:
+
+ SkBitmapRegionDecoder(int width, int height)
+ : fWidth(width)
+ , fHeight(height)
+ {}
+
+private:
+ const int fWidth;
+ const int fHeight;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/animator/SkAnimator.h b/gfx/skia/skia/include/animator/SkAnimator.h
new file mode 100644
index 000000000..0fe787c52
--- /dev/null
+++ b/gfx/skia/skia/include/animator/SkAnimator.h
@@ -0,0 +1,501 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkAnimator_DEFINED
+#define SkAnimator_DEFINED
+
+#include "SkScalar.h"
+#include "SkKey.h"
+#include "SkEventSink.h"
+
+class SkAnimateMaker;
+class SkCanvas;
+class SkDisplayable;
+class SkEvent;
+class SkExtras;
+struct SkMemberInfo;
+class SkPaint;
+struct SkRect;
+class SkStream;
+class SkTypedArray;
+class SkXMLParserError;
+class SkDOM;
+struct SkDOMNode;
+
+/** SkElementType is the type of element: a rectangle, a color, an animator, and so on.
+ This enum is incomplete and will be fleshed out in a future release */
+enum SkElementType {
+ kElementDummyType
+};
+/** SkFieldType is the type of field: a scalar, a string, an integer, a boolean, and so on.
+ This enum is incomplete and will be fleshed out in a future release */
+enum SkFieldType {
+ kFieldDummyType
+};
+
+/** \class SkAnimator
+
+ The SkAnimator class decodes an XML stream into a display list. The
+ display list can be drawn statically as a picture, or can drawn
+ different elements at different times to form a moving animation.
+
+ SkAnimator does not read the system time on its own; it relies on the
+ caller to pass the current time. The caller can pause, speed up, or
+ reverse the animation by varying the time passed in.
+
+ The XML describing the display list must conform to the schema
+ described by SkAnimateSchema.xsd.
+
+ The XML must contain an <event> element to draw. Usually, it contains
+ an <event kind="onload" /> block to add some drawing elements to the
+ display list when the document is first decoded.
+
+ Here's an "Hello World" XML sample:
+
+ <screenplay>
+ <event kind="onload" >
+ <text text="Hello World" y="20" />
+ </event>
+ </screenplay>
+
+ To read and draw this sample:
+
+ // choose one of these two
+ SkAnimator animator; // declare an animator instance on the stack
+ // SkAnimator* animator = new SkAnimator() // or one could instantiate the class
+
+ // choose one of these three
+ animator.decodeMemory(buffer, size); // to read from RAM
+ animator.decodeStream(stream); // to read from a user-defined stream (e.g., a zip file)
+ animator.decodeURI(filename); // to read from a web location, or from a local text file
+
+ // to draw to the current window:
+ SkCanvas canvas(getBitmap()); // create a canvas
+ animator.draw(canvas, &paint, 0); // draw the scene
+*/
+class SkAnimator : public SkEventSink {
+public:
+ SkAnimator();
+ virtual ~SkAnimator();
+
+ /** Add a drawable extension to the graphics engine. Experimental.
+ @param extras A derived class that implements methods that identify and instantiate the class
+ */
+ void addExtras(SkExtras* extras);
+
+ /** Read in XML from a stream, and append it to the current
+ animator. Returns false if an error was encountered.
+ Error diagnostics are stored in fErrorCode and fLineNumber.
+ @param stream The stream to append.
+ @return true if the XML was parsed successfully.
+ */
+ bool appendStream(SkStream* stream);
+
+ /** Read in XML from memory. Returns true if the file can be
+ read without error. Returns false if an error was encountered.
+ Error diagnostics are stored in fErrorCode and fLineNumber.
+ @param buffer The XML text as UTF-8 characters.
+ @param size The XML text length in bytes.
+ @return true if the XML was parsed successfully.
+ */
+ bool decodeMemory(const void* buffer, size_t size);
+
+ /** Read in XML from a stream. Returns true if the file can be
+ read without error. Returns false if an error was encountered.
+ Error diagnostics are stored in fErrorCode and fLineNumber.
+ @param stream The stream containg the XML text as UTF-8 characters.
+ @return true if the XML was parsed successfully.
+ */
+ virtual bool decodeStream(SkStream* stream);
+
+ /** Parse the DOM tree starting at the specified node. Returns true if it can be
+ parsed without error. Returns false if an error was encountered.
+ Error diagnostics are stored in fErrorCode and fLineNumber.
+ @return true if the DOM was parsed successfully.
+ */
+ virtual bool decodeDOM(const SkDOM&, const SkDOMNode*);
+
+ /** Read in XML from a URI. Returns true if the file can be
+ read without error. Returns false if an error was encountered.
+ Error diagnostics are stored in fErrorCode and fLineNumber.
+ @param uri The complete url path to be read (either ftp, http or https).
+ @return true if the XML was parsed successfully.
+ */
+ bool decodeURI(const char uri[]);
+
+ /** Pass a char event, usually a keyboard symbol, to the animator.
+ This triggers events of the form <event kind="keyChar" key="... />
+ @param ch The character to match against <event> element "key"
+ attributes.
+ @return true if the event was dispatched successfully.
+ */
+ bool doCharEvent(SkUnichar ch);
+
+ /** Experimental:
+ Pass a mouse click event along with the mouse coordinates to
+ the animator. This triggers events of the form <event kind="mouseDown" ... />
+ and other mouse events.
+ @param state The mouse state, described by SkView::Click::State : values are
+ down == 0, moved == 1, up == 2
+ @param x The x-position of the mouse
+ @param y The y-position of the mouse
+ @return true if the event was dispatched successfully.
+ */
+ bool doClickEvent(int state, SkScalar x, SkScalar y);
+
+ /** Pass a meta-key event, such as an arrow , to the animator.
+ This triggers events of the form <event kind="keyPress" code="... />
+ @param code The key to match against <event> element "code"
+ attributes.
+ @return true if the event was dispatched successfully.
+ */
+ bool doKeyEvent(SkKey code);
+ bool doKeyUpEvent(SkKey code);
+
+ /** Send an event to the animator. The animator's clock is set
+ relative to the current time.
+ @return true if the event was dispatched successfully.
+ */
+ bool doUserEvent(const SkEvent& evt);
+
+ /** The possible results from the draw function.
+ */
+ enum DifferenceType {
+ kNotDifferent,
+ kDifferent,
+ kPartiallyDifferent
+ };
+ /** Draws one frame of the animation. The first call to draw always
+ draws the initial frame of the animation. Subsequent calls draw
+ the offset into the animation by
+ subtracting the initial time from the current time.
+ @param canvas The canvas to draw into.
+ @param paint The paint to draw with.
+ @param time The offset into the current animation.
+ @return kNotDifferent if there are no active animations; kDifferent if there are active animations; and
+ kPartiallyDifferent if the document contains an active <bounds> element that specifies a minimal
+ redraw area.
+ */
+ DifferenceType draw(SkCanvas* canvas, SkPaint* paint, SkMSec time);
+
+ /** Draws one frame of the animation, using a new Paint each time.
+ The first call to draw always
+ draws the initial frame of the animation. Subsequent calls draw
+ the offset into the animation by
+ subtracting the initial time from the current time.
+ @param canvas The canvas to draw into.
+ @param time The offset into the current animation.
+ @return kNotDifferent if there are no active animations; kDifferent if there are active animations; and
+ kPartiallyDifferent if the document contains an active <bounds> element that specifies a minimal
+ redraw area.
+ */
+ DifferenceType draw(SkCanvas* canvas, SkMSec time);
+
+ /** Experimental:
+ Helper to choose whether to return a SkView::Click handler.
+ @param x ignored
+ @param y ignored
+ @return true if a mouseDown event handler is enabled.
+ */
+ bool findClickEvent(SkScalar x, SkScalar y);
+
+
+ /** Get the nested animator associated with this element, if any.
+ Use this to access a movie's event sink, to send events to movies.
+ @param element the value returned by getElement
+ @return the internal animator.
+ */
+ const SkAnimator* getAnimator(const SkDisplayable* element) const;
+
+ /** Returns the scalar value of the specified element's attribute[index]
+ @param element the value returned by getElement
+ @param field the value returned by getField
+ @param index the array entry
+ @return the integer value to retrieve, or SK_NaN32 if unsuccessful
+ */
+ int32_t getArrayInt(const SkDisplayable* element, const SkMemberInfo* field, int index);
+
+ /** Returns the scalar value of the specified element's attribute[index]
+ @param elementID is the value of the id attribute in the XML of this element
+ @param fieldName specifies the name of the attribute
+ @param index the array entry
+ @return the integer value to retrieve, or SK_NaN32 if unsuccessful
+ */
+ int32_t getArrayInt(const char* elementID, const char* fieldName, int index);
+
+ /** Returns the scalar value of the specified element's attribute[index]
+ @param element the value returned by getElement
+ @param field the value returned by getField
+ @param index the array entry
+ @return the scalar value to retrieve, or SK_ScalarNaN if unsuccessful
+ */
+ SkScalar getArrayScalar(const SkDisplayable* element, const SkMemberInfo* field, int index);
+
+ /** Returns the scalar value of the specified element's attribute[index]
+ @param elementID is the value of the id attribute in the XML of this element
+ @param fieldName specifies the name of the attribute
+ @param index the array entry
+ @return the scalar value to retrieve, or SK_ScalarNaN if unsuccessful
+ */
+ SkScalar getArrayScalar(const char* elementID, const char* fieldName, int index);
+
+ /** Returns the string value of the specified element's attribute[index]
+ @param element is a value returned by getElement
+ @param field is a value returned by getField
+ @param index the array entry
+ @return the string value to retrieve, or null if unsuccessful
+ */
+ const char* getArrayString(const SkDisplayable* element, const SkMemberInfo* field, int index);
+
+ /** Returns the string value of the specified element's attribute[index]
+ @param elementID is the value of the id attribute in the XML of this element
+ @param fieldName specifies the name of the attribute
+ @param index the array entry
+ @return the string value to retrieve, or null if unsuccessful
+ */
+ const char* getArrayString(const char* elementID, const char* fieldName, int index);
+
+ /** Returns the XML element corresponding to the given ID.
+ @param elementID is the value of the id attribute in the XML of this element
+ @return the element matching the ID, or null if the element can't be found
+ */
+ const SkDisplayable* getElement(const char* elementID);
+
+ /** Returns the element type corresponding to the XML element.
+ The element type matches the element name; for instance, <line> returns kElement_LineType
+ @param element is a value returned by getElement
+ @return element type, or 0 if the element can't be found
+ */
+ SkElementType getElementType(const SkDisplayable* element);
+
+ /** Returns the element type corresponding to the given ID.
+ @param elementID is the value of the id attribute in the XML of this element
+ @return element type, or 0 if the element can't be found
+ */
+ SkElementType getElementType(const char* elementID);
+
+ /** Returns the XML field of the named attribute in the XML element.
+ @param element is a value returned by getElement
+ @param fieldName is the attribute to return
+ @return the attribute matching the fieldName, or null if the element can't be found
+ */
+ const SkMemberInfo* getField(const SkDisplayable* element, const char* fieldName);
+
+ /** Returns the XML field of the named attribute in the XML element matching the elementID.
+ @param elementID is the value of the id attribute in the XML of this element
+ @param fieldName is the attribute to return
+ @return the attribute matching the fieldName, or null if the element can't be found
+ */
+ const SkMemberInfo* getField(const char* elementID, const char* fieldName);
+
+ /** Returns the value type coresponding to the element's attribute.
+ The value type matches the XML schema: and may be kField_BooleanType, kField_ScalarType, etc.
+ @param field is a value returned by getField
+ @return the attribute type, or 0 if the element can't be found
+ */
+ SkFieldType getFieldType(const SkMemberInfo* field);
+
+ /** Returns the value type coresponding to the element's attribute.
+ @param elementID is the value of the id attribute in the XML of this element
+ @param fieldName specifies the name of the attribute
+ @return the attribute type, or 0 if the element can't be found
+ */
+ SkFieldType getFieldType(const char* elementID, const char* fieldName);
+
+ /** Returns the recommended animation interval. Returns zero if no
+ interval is specified.
+ */
+ SkMSec getInterval();
+
+ /** Returns the partial rectangle to invalidate after drawing. Call after draw() returns
+ kIsPartiallyDifferent to do a mimimal inval(). */
+ void getInvalBounds(SkRect* inval);
+
+ /** Returns the details of any error encountered while parsing the XML.
+ */
+ const SkXMLParserError* getParserError();
+
+ /** Returns the details of any error encountered while parsing the XML as string.
+ */
+ const char* getParserErrorString();
+
+ /** Returns the scalar value of the specified element's attribute
+ @param element is a value returned by getElement
+ @param field is a value returned by getField
+ @return the integer value to retrieve, or SK_NaN32 if not found
+ */
+ int32_t getInt(const SkDisplayable* element, const SkMemberInfo* field);
+
+ /** Returns the scalar value of the specified element's attribute
+ @param elementID is the value of the id attribute in the XML of this element
+ @param fieldName specifies the name of the attribute
+ @return the integer value to retrieve, or SK_NaN32 if not found
+ */
+ int32_t getInt(const char* elementID, const char* fieldName);
+
+ /** Returns the scalar value of the specified element's attribute
+ @param element is a value returned by getElement
+ @param field is a value returned by getField
+ @return the scalar value to retrieve, or SK_ScalarNaN if not found
+ */
+ SkScalar getScalar(const SkDisplayable* element, const SkMemberInfo* field);
+
+ /** Returns the scalar value of the specified element's attribute
+ @param elementID is the value of the id attribute in the XML of this element
+ @param fieldName specifies the name of the attribute
+ @return the scalar value to retrieve, or SK_ScalarNaN if not found
+ */
+ SkScalar getScalar(const char* elementID, const char* fieldName);
+
+ /** Returns the string value of the specified element's attribute
+ @param element is a value returned by getElement
+ @param field is a value returned by getField
+ @return the string value to retrieve, or null if not found
+ */
+ const char* getString(const SkDisplayable* element, const SkMemberInfo* field);
+
+ /** Returns the string value of the specified element's attribute
+ @param elementID is the value of the id attribute in the XML of this element
+ @param fieldName specifies the name of the attribute
+ @return the string value to retrieve, or null if not found
+ */
+ const char* getString(const char* elementID, const char* fieldName);
+
+ /** Gets the file default directory of the URL base path set explicitly or by reading the last URL. */
+ const char* getURIBase();
+
+ /** Resets the animator to a newly created state with no animation data. */
+ void initialize();
+
+ /** Experimental. Resets any active animations so that the next time passed is treated as
+ time zero. */
+ void reset();
+
+ /** Sets the scalar value of the specified element's attribute
+ @param elementID is the value of the id attribute in the XML of this element
+ @param fieldName specifies the name of the attribute
+ @param array is the c-style array of integers
+ @param count is the length of the array
+ @return true if the value was set successfully
+ */
+ bool setArrayInt(const char* elementID, const char* fieldName, const int* array, int count);
+
+ /** Sets the scalar value of the specified element's attribute
+ @param elementID is the value of the id attribute in the XML of this element
+ @param fieldName specifies the name of the attribute
+ @param array is the c-style array of strings
+ @param count is the length of the array
+ @return true if the value was set successfully
+ */
+ bool setArrayString(const char* elementID, const char* fieldName, const char** array, int count);
+
+ /** Sets the scalar value of the specified element's attribute
+ @param elementID is the value of the id attribute in the XML of this element
+ @param fieldName specifies the name of the attribute
+ @param data the integer value to set
+ @return true if the value was set successfully
+ */
+ bool setInt(const char* elementID, const char* fieldName, int32_t data);
+
+ /** Sets the scalar value of the specified element's attribute
+ @param elementID is the value of the id attribute in the XML of this element
+ @param fieldName specifies the name of the attribute
+ @param data the scalar value to set
+ @return true if the value was set successfully
+ */
+ bool setScalar(const char* elementID, const char* fieldName, SkScalar data);
+
+ /** Sets the string value of the specified element's attribute
+ @param elementID is the value of the id attribute in the XML of this element
+ @param fieldName specifies the name of the attribute
+ @param data the string value to set
+ @return true if the value was set successfully
+ */
+ bool setString(const char* elementID, const char* fieldName, const char* data);
+
+ /** Sets the file default directory of the URL base path
+ @param path the directory path
+ */
+ void setURIBase(const char* path);
+
+ typedef void* Handler;
+ // This guy needs to be exported to java, so don't make it virtual
+ void setHostHandler(Handler handler) {
+ this->onSetHostHandler(handler);
+ }
+
+ /** \class Timeline
+ Returns current time to animator. To return a custom timeline, create a child
+ class and override the getMSecs method.
+ */
+ class Timeline {
+ public:
+ virtual ~Timeline() {}
+
+ /** Returns the current time in milliseconds */
+ virtual SkMSec getMSecs() const = 0;
+ };
+
+ /** Sets a user class to return the current time to the animator.
+ Optional; if not called, the system clock will be used by calling
+ SkEvent::GetMSecsSinceStartup instead.
+ @param callBack the time function
+ */
+ void setTimeline(const Timeline& );
+
+ static void Init(bool runUnitTests);
+ static void Term();
+
+ /** The event sink events generated by the animation are posted to.
+ Screenplay also posts an inval event to this event sink after processing an
+ event to force a redraw.
+ @param target the event sink id
+ */
+ void setHostEventSinkID(SkEventSinkID hostID);
+ SkEventSinkID getHostEventSinkID() const;
+
+ // helper
+ void setHostEventSink(SkEventSink* sink) {
+ this->setHostEventSinkID(sink ? sink->getSinkID() : 0);
+ }
+
+ virtual void setJavaOwner(Handler owner);
+
+#ifdef SK_DEBUG
+ virtual void eventDone(const SkEvent& evt);
+ virtual bool isTrackingEvents();
+ static bool NoLeaks();
+#endif
+
+protected:
+ virtual void onSetHostHandler(Handler handler);
+ virtual void onEventPost(SkEvent*, SkEventSinkID);
+ virtual void onEventPostTime(SkEvent*, SkEventSinkID, SkMSec time);
+
+private:
+// helper functions for setters
+ bool setArray(SkDisplayable* element, const SkMemberInfo* field, SkTypedArray array);
+ bool setArray(const char* elementID, const char* fieldName, SkTypedArray array);
+ bool setInt(SkDisplayable* element, const SkMemberInfo* field, int32_t data);
+ bool setScalar(SkDisplayable* element, const SkMemberInfo* field, SkScalar data);
+ bool setString(SkDisplayable* element, const SkMemberInfo* field, const char* data);
+
+ virtual bool onEvent(const SkEvent&);
+ SkAnimateMaker* fMaker;
+ friend class SkAnimateMaker;
+ friend class SkAnimatorScript;
+ friend class SkAnimatorScript2;
+ friend class SkApply;
+ friend class SkDisplayMovie;
+ friend class SkDisplayType;
+ friend class SkPost;
+ friend class SkXMLAnimatorWriter;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/animator/SkAnimatorView.h b/gfx/skia/skia/include/animator/SkAnimatorView.h
new file mode 100644
index 000000000..2b2c61b5d
--- /dev/null
+++ b/gfx/skia/skia/include/animator/SkAnimatorView.h
@@ -0,0 +1,39 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkAnimatorView_DEFINED
+#define SkAnimatorView_DEFINED
+
+#include "SkView.h"
+#include "SkAnimator.h"
+
+class SkAnimatorView : public SkView {
+public:
+ SkAnimatorView();
+ virtual ~SkAnimatorView();
+
+ SkAnimator* getAnimator() const { return fAnimator; }
+
+ bool decodeFile(const char path[]);
+ bool decodeMemory(const void* buffer, size_t size);
+ bool decodeStream(SkStream* stream);
+
+protected:
+ // overrides
+ virtual bool onEvent(const SkEvent&);
+ virtual void onDraw(SkCanvas*);
+ virtual void onInflate(const SkDOM&, const SkDOM::Node*);
+
+private:
+ SkAnimator* fAnimator;
+
+ typedef SkView INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_canvas.h b/gfx/skia/skia/include/c/sk_canvas.h
new file mode 100644
index 000000000..1e1dd24f9
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_canvas.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_canvas_DEFINED
+#define sk_canvas_DEFINED
+
+#include "sk_types.h"
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+/**
+ Save the current matrix and clip on the canvas. When the
+ balancing call to sk_canvas_restore() is made, the previous matrix
+ and clip are restored.
+*/
+SK_API void sk_canvas_save(sk_canvas_t*);
+/**
+ This behaves the same as sk_canvas_save(), but in addition it
+ allocates an offscreen surface. All drawing calls are directed
+ there, and only when the balancing call to sk_canvas_restore() is
+ made is that offscreen transfered to the canvas (or the previous
+ layer).
+
+ @param sk_rect_t* (may be null) This rect, if non-null, is used as
+ a hint to limit the size of the offscreen, and
+ thus drawing may be clipped to it, though that
+ clipping is not guaranteed to happen. If exact
+ clipping is desired, use sk_canvas_clip_rect().
+ @param sk_paint_t* (may be null) The paint is copied, and is applied
+ to the offscreen when sk_canvas_restore() is
+ called.
+*/
+SK_API void sk_canvas_save_layer(sk_canvas_t*, const sk_rect_t*, const sk_paint_t*);
+/**
+ This call balances a previous call to sk_canvas_save() or
+ sk_canvas_save_layer(), and is used to remove all modifications to
+ the matrix and clip state since the last save call. It is an
+ error to call sk_canvas_restore() more times than save and
+ save_layer were called.
+*/
+SK_API void sk_canvas_restore(sk_canvas_t*);
+
+/**
+ Preconcat the current coordinate transformation matrix with the
+ specified translation.
+*/
+SK_API void sk_canvas_translate(sk_canvas_t*, float dx, float dy);
+/**
+ Preconcat the current coordinate transformation matrix with the
+ specified scale.
+*/
+SK_API void sk_canvas_scale(sk_canvas_t*, float sx, float sy);
+/**
+ Preconcat the current coordinate transformation matrix with the
+ specified rotation in degrees.
+*/
+SK_API void sk_canvas_rotate_degrees(sk_canvas_t*, float degrees);
+/**
+ Preconcat the current coordinate transformation matrix with the
+ specified rotation in radians.
+*/
+SK_API void sk_canvas_rotate_radians(sk_canvas_t*, float radians);
+/**
+ Preconcat the current coordinate transformation matrix with the
+ specified skew.
+*/
+SK_API void sk_canvas_skew(sk_canvas_t*, float sx, float sy);
+/**
+ Preconcat the current coordinate transformation matrix with the
+ specified matrix.
+*/
+SK_API void sk_canvas_concat(sk_canvas_t*, const sk_matrix_t*);
+
+/**
+ Modify the current clip with the specified rectangle. The new
+ current clip will be the intersection of the old clip and the
+ rectange.
+*/
+SK_API void sk_canvas_clip_rect(sk_canvas_t*, const sk_rect_t*);
+/**
+ Modify the current clip with the specified path. The new
+ current clip will be the intersection of the old clip and the
+ path.
+*/
+SK_API void sk_canvas_clip_path(sk_canvas_t*, const sk_path_t*);
+
+/**
+ Fill the entire canvas (restricted to the current clip) with the
+ specified paint.
+*/
+SK_API void sk_canvas_draw_paint(sk_canvas_t*, const sk_paint_t*);
+/**
+ Draw the specified rectangle using the specified paint. The
+ rectangle will be filled or stroked based on the style in the
+ paint.
+*/
+SK_API void sk_canvas_draw_rect(sk_canvas_t*, const sk_rect_t*, const sk_paint_t*);
+/**
+ * Draw the circle centered at (cx, cy) with radius rad using the specified paint.
+ * The circle will be filled or framed based on the style in the paint
+ */
+SK_API void sk_canvas_draw_circle(sk_canvas_t*, float cx, float cy, float rad, const sk_paint_t*);
+/**
+ Draw the specified oval using the specified paint. The oval will be
+ filled or framed based on the style in the paint
+*/
+SK_API void sk_canvas_draw_oval(sk_canvas_t*, const sk_rect_t*, const sk_paint_t*);
+/**
+ Draw the specified path using the specified paint. The path will be
+ filled or framed based on the style in the paint
+*/
+SK_API void sk_canvas_draw_path(sk_canvas_t*, const sk_path_t*, const sk_paint_t*);
+/**
+ Draw the specified image, with its top/left corner at (x,y), using
+ the specified paint, transformed by the current matrix.
+
+ @param sk_paint_t* (may be NULL) the paint used to draw the image.
+*/
+SK_API void sk_canvas_draw_image(sk_canvas_t*, const sk_image_t*,
+ float x, float y, const sk_paint_t*);
+/**
+ Draw the specified image, scaling and translating so that it fills
+ the specified dst rect. If the src rect is non-null, only that
+ subset of the image is transformed and drawn.
+
+ @param sk_paint_t* (may be NULL) The paint used to draw the image.
+*/
+SK_API void sk_canvas_draw_image_rect(sk_canvas_t*, const sk_image_t*,
+ const sk_rect_t* src,
+ const sk_rect_t* dst, const sk_paint_t*);
+
+/**
+ Draw the picture into this canvas (replay the pciture's drawing commands).
+
+ @param sk_matrix_t* If non-null, apply that matrix to the CTM when
+ drawing this picture. This is logically
+ equivalent to: save, concat, draw_picture,
+ restore.
+
+ @param sk_paint_t* If non-null, draw the picture into a temporary
+ buffer, and then apply the paint's alpha,
+ colorfilter, imagefilter, and xfermode to that
+ buffer as it is drawn to the canvas. This is
+ logically equivalent to save_layer(paint),
+ draw_picture, restore.
+*/
+SK_API void sk_canvas_draw_picture(sk_canvas_t*, const sk_picture_t*,
+ const sk_matrix_t*, const sk_paint_t*);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_data.h b/gfx/skia/skia/include/c/sk_data.h
new file mode 100644
index 000000000..90333bba5
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_data.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_data_DEFINED
+#define sk_data_DEFINED
+
+#include "sk_types.h"
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+/**
+ Returns a new empty sk_data_t. This call must be balanced with a call to
+ sk_data_unref().
+*/
+SK_API sk_data_t* sk_data_new_empty();
+/**
+ Returns a new sk_data_t by copying the specified source data.
+ This call must be balanced with a call to sk_data_unref().
+*/
+SK_API sk_data_t* sk_data_new_with_copy(const void* src, size_t length);
+/**
+ Pass ownership of the given memory to a new sk_data_t, which will
+ call free() when the refernce count of the data goes to zero. For
+ example:
+ size_t length = 1024;
+ void* buffer = malloc(length);
+ memset(buffer, 'X', length);
+ sk_data_t* data = sk_data_new_from_malloc(buffer, length);
+ This call must be balanced with a call to sk_data_unref().
+*/
+SK_API sk_data_t* sk_data_new_from_malloc(const void* memory, size_t length);
+/**
+ Returns a new sk_data_t using a subset of the data in the
+ specified source sk_data_t. This call must be balanced with a
+ call to sk_data_unref().
+*/
+SK_API sk_data_t* sk_data_new_subset(const sk_data_t* src, size_t offset, size_t length);
+
+/**
+ Increment the reference count on the given sk_data_t. Must be
+ balanced by a call to sk_data_unref().
+*/
+SK_API void sk_data_ref(const sk_data_t*);
+/**
+ Decrement the reference count. If the reference count is 1 before
+ the decrement, then release both the memory holding the sk_data_t
+ and the memory it is managing. New sk_data_t are created with a
+ reference count of 1.
+*/
+SK_API void sk_data_unref(const sk_data_t*);
+
+/**
+ Returns the number of bytes stored.
+*/
+SK_API size_t sk_data_get_size(const sk_data_t*);
+/**
+ Returns the pointer to the data.
+ */
+SK_API const void* sk_data_get_data(const sk_data_t*);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_image.h b/gfx/skia/skia/include/c/sk_image.h
new file mode 100644
index 000000000..e90649d75
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_image.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_image_DEFINED
+#define sk_image_DEFINED
+
+#include "sk_types.h"
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+/**
+ * Return a new image that has made a copy of the provided pixels, or NULL on failure.
+ * Balance with a call to sk_image_unref().
+ */
+SK_API sk_image_t* sk_image_new_raster_copy(const sk_imageinfo_t*, const void* pixels, size_t rowBytes);
+
+/**
+ * If the specified data can be interpreted as a compressed image (e.g. PNG or JPEG) then this
+ * returns an image. If the encoded data is not supported, returns NULL.
+ *
+ * On success, the encoded data may be processed immediately, or it may be ref()'d for later
+ * use.
+ */
+SK_API sk_image_t* sk_image_new_from_encoded(const sk_data_t* encoded, const sk_irect_t* subset);
+
+/**
+ * Encode the image's pixels and return the result as a new PNG in a
+ * sk_data_t, which the caller must manage: call sk_data_unref() when
+ * they are done.
+ *
+ * If the image type cannot be encoded, this will return NULL.
+ */
+SK_API sk_data_t* sk_image_encode(const sk_image_t*);
+
+/**
+ * Increment the reference count on the given sk_image_t. Must be
+ * balanced by a call to sk_image_unref().
+*/
+SK_API void sk_image_ref(const sk_image_t*);
+/**
+ * Decrement the reference count. If the reference count is 1 before
+ * the decrement, then release both the memory holding the sk_image_t
+ * and the memory it is managing. New sk_image_t are created with a
+ reference count of 1.
+*/
+SK_API void sk_image_unref(const sk_image_t*);
+
+/**
+ * Return the width of the sk_image_t/
+ */
+SK_API int sk_image_get_width(const sk_image_t*);
+/**
+ * Return the height of the sk_image_t/
+ */
+SK_API int sk_image_get_height(const sk_image_t*);
+
+/**
+ * Returns a non-zero value unique among all images.
+ */
+SK_API uint32_t sk_image_get_unique_id(const sk_image_t*);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_maskfilter.h b/gfx/skia/skia/include/c/sk_maskfilter.h
new file mode 100644
index 000000000..5c22a0639
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_maskfilter.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_maskfilter_DEFINED
+#define sk_maskfilter_DEFINED
+
+#include "sk_types.h"
+
+typedef enum {
+ NORMAL_SK_BLUR_STYLE, //!< fuzzy inside and outside
+ SOLID_SK_BLUR_STYLE, //!< solid inside, fuzzy outside
+ OUTER_SK_BLUR_STYLE, //!< nothing inside, fuzzy outside
+ INNER_SK_BLUR_STYLE, //!< fuzzy inside, nothing outside
+} sk_blurstyle_t;
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+/**
+ Increment the reference count on the given sk_maskfilter_t. Must be
+ balanced by a call to sk_maskfilter_unref().
+*/
+void sk_maskfilter_ref(sk_maskfilter_t*);
+/**
+ Decrement the reference count. If the reference count is 1 before
+ the decrement, then release both the memory holding the
+ sk_maskfilter_t and any other associated resources. New
+ sk_maskfilter_t are created with a reference count of 1.
+*/
+void sk_maskfilter_unref(sk_maskfilter_t*);
+
+/**
+ Create a blur maskfilter.
+ @param sk_blurstyle_t The SkBlurStyle to use
+ @param sigma Standard deviation of the Gaussian blur to apply. Must be > 0.
+*/
+sk_maskfilter_t* sk_maskfilter_new_blur(sk_blurstyle_t, float sigma);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_matrix.h b/gfx/skia/skia/include/c/sk_matrix.h
new file mode 100644
index 000000000..83f0122b0
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_matrix.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_matrix_DEFINED
+#define sk_matrix_DEFINED
+
+#include "sk_types.h"
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+/** Set the matrix to identity */
+void sk_matrix_set_identity(sk_matrix_t*);
+
+/** Set the matrix to translate by (tx, ty). */
+void sk_matrix_set_translate(sk_matrix_t*, float tx, float ty);
+/**
+ Preconcats the matrix with the specified translation.
+ M' = M * T(dx, dy)
+*/
+void sk_matrix_pre_translate(sk_matrix_t*, float tx, float ty);
+/**
+ Postconcats the matrix with the specified translation.
+ M' = T(dx, dy) * M
+*/
+void sk_matrix_post_translate(sk_matrix_t*, float tx, float ty);
+
+/** Set the matrix to scale by sx and sy. */
+void sk_matrix_set_scale(sk_matrix_t*, float sx, float sy);
+/**
+ Preconcats the matrix with the specified scale.
+ M' = M * S(sx, sy)
+*/
+void sk_matrix_pre_scale(sk_matrix_t*, float sx, float sy);
+/**
+ Postconcats the matrix with the specified scale.
+ M' = S(sx, sy) * M
+*/
+void sk_matrix_post_scale(sk_matrix_t*, float sx, float sy);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_paint.h b/gfx/skia/skia/include/c/sk_paint.h
new file mode 100644
index 000000000..e0886ad34
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_paint.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_paint_DEFINED
+#define sk_paint_DEFINED
+
+#include "sk_types.h"
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+/**
+ Create a new paint with default settings:
+ antialias : false
+ stroke : false
+ stroke width : 0.0f (hairline)
+ stroke miter : 4.0f
+ stroke cap : BUTT_SK_STROKE_CAP
+ stroke join : MITER_SK_STROKE_JOIN
+ color : opaque black
+ shader : NULL
+ maskfilter : NULL
+ xfermode_mode : SRCOVER_SK_XFERMODE_MODE
+*/
+SK_API sk_paint_t* sk_paint_new();
+/**
+ Release the memory storing the sk_paint_t and unref() all
+ associated objects.
+*/
+SK_API void sk_paint_delete(sk_paint_t*);
+
+/**
+ Return true iff the paint has antialiasing enabled.
+*/
+SK_API bool sk_paint_is_antialias(const sk_paint_t*);
+/**
+ Set to true to enable antialiasing, false to disable it on this
+ sk_paint_t.
+*/
+SK_API void sk_paint_set_antialias(sk_paint_t*, bool);
+
+/**
+ Return the paint's curent drawing color.
+*/
+SK_API sk_color_t sk_paint_get_color(const sk_paint_t*);
+/**
+ Set the paint's curent drawing color.
+*/
+SK_API void sk_paint_set_color(sk_paint_t*, sk_color_t);
+
+/* stroke settings */
+
+/**
+ Return true iff stroking is enabled rather than filling on this
+ sk_paint_t.
+*/
+SK_API bool sk_paint_is_stroke(const sk_paint_t*);
+/**
+ Set to true to enable stroking rather than filling with this
+ sk_paint_t.
+*/
+SK_API void sk_paint_set_stroke(sk_paint_t*, bool);
+
+/**
+ Return the width for stroking. A value of 0 strokes in hairline mode.
+ */
+SK_API float sk_paint_get_stroke_width(const sk_paint_t*);
+/**
+ Set the width for stroking. A value of 0 strokes in hairline mode
+ (always draw 1-pixel wide, regardless of the matrix).
+ */
+SK_API void sk_paint_set_stroke_width(sk_paint_t*, float width);
+
+/**
+ Return the paint's stroke miter value. This is used to control the
+ behavior of miter joins when the joins angle is sharp.
+*/
+SK_API float sk_paint_get_stroke_miter(const sk_paint_t*);
+/**
+ Set the paint's stroke miter value. This is used to control the
+ behavior of miter joins when the joins angle is sharp. This value
+ must be >= 0.
+*/
+SK_API void sk_paint_set_stroke_miter(sk_paint_t*, float miter);
+
+typedef enum {
+ BUTT_SK_STROKE_CAP,
+ ROUND_SK_STROKE_CAP,
+ SQUARE_SK_STROKE_CAP
+} sk_stroke_cap_t;
+
+/**
+ Return the paint's stroke cap type, controlling how the start and
+ end of stroked lines and paths are treated.
+*/
+SK_API sk_stroke_cap_t sk_paint_get_stroke_cap(const sk_paint_t*);
+/**
+ Set the paint's stroke cap type, controlling how the start and
+ end of stroked lines and paths are treated.
+*/
+SK_API void sk_paint_set_stroke_cap(sk_paint_t*, sk_stroke_cap_t);
+
+typedef enum {
+ MITER_SK_STROKE_JOIN,
+ ROUND_SK_STROKE_JOIN,
+ BEVEL_SK_STROKE_JOIN
+} sk_stroke_join_t;
+
+/**
+ Return the paint's stroke join type, specifies the treatment that
+ is applied to corners in paths and rectangles
+ */
+SK_API sk_stroke_join_t sk_paint_get_stroke_join(const sk_paint_t*);
+/**
+ Set the paint's stroke join type, specifies the treatment that
+ is applied to corners in paths and rectangles
+ */
+SK_API void sk_paint_set_stroke_join(sk_paint_t*, sk_stroke_join_t);
+
+/**
+ * Set the paint's shader to the specified parameter. This will automatically call unref() on
+ * any previous value, and call ref() on the new value.
+ */
+SK_API void sk_paint_set_shader(sk_paint_t*, sk_shader_t*);
+
+/**
+ * Set the paint's maskfilter to the specified parameter. This will automatically call unref() on
+ * any previous value, and call ref() on the new value.
+ */
+SK_API void sk_paint_set_maskfilter(sk_paint_t*, sk_maskfilter_t*);
+
+/**
+ * Set the paint's xfermode to the specified parameter.
+ */
+SK_API void sk_paint_set_xfermode_mode(sk_paint_t*, sk_xfermode_mode_t);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_path.h b/gfx/skia/skia/include/c/sk_path.h
new file mode 100644
index 000000000..6b4e83d3b
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_path.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_path_DEFINED
+#define sk_path_DEFINED
+
+#include "sk_types.h"
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+typedef enum {
+ CW_SK_PATH_DIRECTION,
+ CCW_SK_PATH_DIRECTION,
+} sk_path_direction_t;
+
+/** Create a new, empty path. */
+SK_API sk_path_t* sk_path_new();
+/** Release the memory used by a sk_path_t. */
+SK_API void sk_path_delete(sk_path_t*);
+
+/** Set the beginning of the next contour to the point (x,y). */
+SK_API void sk_path_move_to(sk_path_t*, float x, float y);
+/**
+ Add a line from the last point to the specified point (x,y). If no
+ sk_path_move_to() call has been made for this contour, the first
+ point is automatically set to (0,0).
+*/
+SK_API void sk_path_line_to(sk_path_t*, float x, float y);
+/**
+ Add a quadratic bezier from the last point, approaching control
+ point (x0,y0), and ending at (x1,y1). If no sk_path_move_to() call
+ has been made for this contour, the first point is automatically
+ set to (0,0).
+*/
+SK_API void sk_path_quad_to(sk_path_t*, float x0, float y0, float x1, float y1);
+/**
+ Add a conic curve from the last point, approaching control point
+ (x0,y01), and ending at (x1,y1) with weight w. If no
+ sk_path_move_to() call has been made for this contour, the first
+ point is automatically set to (0,0).
+*/
+SK_API void sk_path_conic_to(sk_path_t*, float x0, float y0, float x1, float y1, float w);
+/**
+ Add a cubic bezier from the last point, approaching control points
+ (x0,y0) and (x1,y1), and ending at (x2,y2). If no
+ sk_path_move_to() call has been made for this contour, the first
+ point is automatically set to (0,0).
+*/
+SK_API void sk_path_cubic_to(sk_path_t*,
+ float x0, float y0,
+ float x1, float y1,
+ float x2, float y2);
+/**
+ Close the current contour. If the current point is not equal to the
+ first point of the contour, a line segment is automatically added.
+*/
+SK_API void sk_path_close(sk_path_t*);
+
+/**
+ Add a closed rectangle contour to the path.
+*/
+SK_API void sk_path_add_rect(sk_path_t*, const sk_rect_t*, sk_path_direction_t);
+/**
+ Add a closed oval contour to the path
+*/
+SK_API void sk_path_add_oval(sk_path_t*, const sk_rect_t*, sk_path_direction_t);
+
+/**
+ * If the path is empty, return false and set the rect parameter to [0, 0, 0, 0].
+ * else return true and set the rect parameter to the bounds of the control-points
+ * of the path.
+ */
+SK_API bool sk_path_get_bounds(const sk_path_t*, sk_rect_t*);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_picture.h b/gfx/skia/skia/include/c/sk_picture.h
new file mode 100644
index 000000000..338b7d906
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_picture.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_picture_DEFINED
+#define sk_picture_DEFINED
+
+#include "sk_types.h"
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+/**
+ Create a new sk_picture_recorder_t. Its resources should be
+ released with a call to sk_picture_recorder_delete().
+*/
+sk_picture_recorder_t* sk_picture_recorder_new();
+/**
+ Release the memory and other resources used by this
+ sk_picture_recorder_t.
+*/
+void sk_picture_recorder_delete(sk_picture_recorder_t*);
+
+/**
+ Returns the canvas that records the drawing commands
+
+ @param sk_rect_t* the cull rect used when recording this
+ picture. Any drawing the falls outside of this
+ rect is undefined, and may be drawn or it may not.
+*/
+sk_canvas_t* sk_picture_recorder_begin_recording(sk_picture_recorder_t*, const sk_rect_t*);
+/**
+ Signal that the caller is done recording. This invalidates the
+ canvas returned by begin_recording. Ownership of the sk_picture_t
+ is passed to the caller, who must call sk_picture_unref() when
+ they are done using it. The returned picture is immutable.
+*/
+sk_picture_t* sk_picture_recorder_end_recording(sk_picture_recorder_t*);
+
+/**
+ Increment the reference count on the given sk_picture_t. Must be
+ balanced by a call to sk_picture_unref().
+*/
+void sk_picture_ref(sk_picture_t*);
+/**
+ Decrement the reference count. If the reference count is 1 before
+ the decrement, then release both the memory holding the
+ sk_picture_t and any resouces it may be managing. New
+ sk_picture_t are created with a reference count of 1.
+*/
+void sk_picture_unref(sk_picture_t*);
+
+/**
+ Returns a non-zero value unique among all pictures.
+ */
+uint32_t sk_picture_get_unique_id(sk_picture_t*);
+
+/**
+ Return the cull rect specified when this picture was recorded.
+*/
+sk_rect_t sk_picture_get_bounds(sk_picture_t*);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_shader.h b/gfx/skia/skia/include/c/sk_shader.h
new file mode 100644
index 000000000..702cda7fd
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_shader.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_shader_DEFINED
+#define sk_shader_DEFINED
+
+#include "sk_types.h"
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+void sk_shader_ref(sk_shader_t*);
+void sk_shader_unref(sk_shader_t*);
+
+typedef enum {
+ CLAMP_SK_SHADER_TILEMODE,
+ REPEAT_SK_SHADER_TILEMODE,
+ MIRROR_SK_SHADER_TILEMODE,
+} sk_shader_tilemode_t;
+
+/**
+ Returns a shader that generates a linear gradient between the two
+ specified points.
+
+ @param points The start and end points for the gradient.
+ @param colors The array[count] of colors, to be distributed between
+ the two points
+ @param colorPos May be NULL. array[count] of SkScalars, or NULL, of
+ the relative position of each corresponding color
+ in the colors array. If this is NULL, the the
+ colors are distributed evenly between the start
+ and end point. If this is not null, the values
+ must begin with 0, end with 1.0, and intermediate
+ values must be strictly increasing.
+ @param colorCount Must be >=2. The number of colors (and pos if not
+ NULL) entries.
+ @param mode The tiling mode
+*/
+sk_shader_t* sk_shader_new_linear_gradient(const sk_point_t points[2],
+ const sk_color_t colors[],
+ const float colorPos[],
+ int colorCount,
+ sk_shader_tilemode_t tileMode,
+ const sk_matrix_t* localMatrix);
+
+
+/**
+ Returns a shader that generates a radial gradient given the center
+ and radius.
+
+ @param center The center of the circle for this gradient
+ @param radius Must be positive. The radius of the circle for this
+ gradient
+ @param colors The array[count] of colors, to be distributed
+ between the center and edge of the circle
+ @param colorPos May be NULL. The array[count] of the relative
+ position of each corresponding color in the colors
+ array. If this is NULL, the the colors are
+ distributed evenly between the center and edge of
+ the circle. If this is not null, the values must
+ begin with 0, end with 1.0, and intermediate
+ values must be strictly increasing.
+ @param count Must be >= 2. The number of colors (and pos if not
+ NULL) entries
+ @param tileMode The tiling mode
+ @param localMatrix May be NULL
+*/
+sk_shader_t* sk_shader_new_radial_gradient(const sk_point_t* center,
+ float radius,
+ const sk_color_t colors[],
+ const float colorPos[],
+ int colorCount,
+ sk_shader_tilemode_t tileMode,
+ const sk_matrix_t* localMatrix);
+
+/**
+ Returns a shader that generates a sweep gradient given a center.
+
+ @param center The coordinates of the center of the sweep
+ @param colors The array[count] of colors, to be distributed around
+ the center.
+ @param colorPos May be NULL. The array[count] of the relative
+ position of each corresponding color in the colors
+ array. If this is NULL, the the colors are
+ distributed evenly between the center and edge of
+ the circle. If this is not null, the values must
+ begin with 0, end with 1.0, and intermediate
+ values must be strictly increasing.
+ @param colorCount Must be >= 2. The number of colors (and pos if
+ not NULL) entries
+ @param localMatrix May be NULL
+*/
+sk_shader_t* sk_shader_new_sweep_gradient(const sk_point_t* center,
+ const sk_color_t colors[],
+ const float colorPos[],
+ int colorCount,
+ const sk_matrix_t* localMatrix);
+
+/**
+ Returns a shader that generates a conical gradient given two circles, or
+ returns NULL if the inputs are invalid. The gradient interprets the
+ two circles according to the following HTML spec.
+ http://dev.w3.org/html5/2dcontext/#dom-context-2d-createradialgradient
+
+ Returns a shader that generates a sweep gradient given a center.
+
+ @param start, startRadius Defines the first circle.
+ @param end, endRadius Defines the first circle.
+ @param colors The array[count] of colors, to be distributed between
+ the two circles.
+ @param colorPos May be NULL. The array[count] of the relative
+ position of each corresponding color in the colors
+ array. If this is NULL, the the colors are
+ distributed evenly between the two circles. If
+ this is not null, the values must begin with 0,
+ end with 1.0, and intermediate values must be
+ strictly increasing.
+ @param colorCount Must be >= 2. The number of colors (and pos if
+ not NULL) entries
+ @param tileMode The tiling mode
+ @param localMatrix May be NULL
+
+*/
+sk_shader_t* sk_shader_new_two_point_conical_gradient(
+ const sk_point_t* start,
+ float startRadius,
+ const sk_point_t* end,
+ float endRadius,
+ const sk_color_t colors[],
+ const float colorPos[],
+ int colorCount,
+ sk_shader_tilemode_t tileMode,
+ const sk_matrix_t* localMatrix);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_surface.h b/gfx/skia/skia/include/c/sk_surface.h
new file mode 100644
index 000000000..d634185ee
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_surface.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_surface_DEFINED
+#define sk_surface_DEFINED
+
+#include "sk_types.h"
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+/**
+ Return a new surface, with the memory for the pixels automatically
+ allocated. If the requested surface cannot be created, or the
+ request is not a supported configuration, NULL will be returned.
+
+ @param sk_imageinfo_t* Specify the width, height, color type, and
+ alpha type for the surface.
+
+ @param sk_surfaceprops_t* If not NULL, specify additional non-default
+ properties of the surface.
+*/
+SK_API sk_surface_t* sk_surface_new_raster(const sk_imageinfo_t*, const sk_surfaceprops_t*);
+
+/**
+ Create a new surface which will draw into the specified pixels
+ with the specified rowbytes. If the requested surface cannot be
+ created, or the request is not a supported configuration, NULL
+ will be returned.
+
+ @param sk_imageinfo_t* Specify the width, height, color type, and
+ alpha type for the surface.
+ @param void* pixels Specify the location in memory where the
+ destination pixels are. This memory must
+ outlast this surface.
+ @param size_t rowBytes Specify the difference, in bytes, between
+ each adjacent row. Should be at least
+ (width * sizeof(one pixel)).
+ @param sk_surfaceprops_t* If not NULL, specify additional non-default
+ properties of the surface.
+*/
+SK_API sk_surface_t* sk_surface_new_raster_direct(const sk_imageinfo_t*,
+ void* pixels, size_t rowBytes,
+ const sk_surfaceprops_t* props);
+
+/**
+ Decrement the reference count. If the reference count is 1 before
+ the decrement, then release both the memory holding the
+ sk_surface_t and any pixel memory it may be managing. New
+ sk_surface_t are created with a reference count of 1.
+*/
+SK_API void sk_surface_unref(sk_surface_t*);
+
+/**
+ * Return the canvas associated with this surface. Note: the canvas is owned by the surface,
+ * so the returned object is only valid while the owning surface is valid.
+ */
+SK_API sk_canvas_t* sk_surface_get_canvas(sk_surface_t*);
+
+/**
+ * Call sk_image_unref() when the returned image is no longer used.
+ */
+SK_API sk_image_t* sk_surface_new_image_snapshot(sk_surface_t*);
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/c/sk_types.h b/gfx/skia/skia/include/c/sk_types.h
new file mode 100644
index 000000000..baa3ac9ce
--- /dev/null
+++ b/gfx/skia/skia/include/c/sk_types.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL
+// DO NOT USE -- FOR INTERNAL TESTING ONLY
+
+#ifndef sk_types_DEFINED
+#define sk_types_DEFINED
+
+#include <stdint.h>
+#include <stddef.h>
+
+#ifdef __cplusplus
+ #define SK_C_PLUS_PLUS_BEGIN_GUARD extern "C" {
+ #define SK_C_PLUS_PLUS_END_GUARD }
+#else
+ #include <stdbool.h>
+ #define SK_C_PLUS_PLUS_BEGIN_GUARD
+ #define SK_C_PLUS_PLUS_END_GUARD
+#endif
+
+#if !defined(SK_API)
+ #if defined(SKIA_DLL)
+ #if defined(_MSC_VER)
+ #if SKIA_IMPLEMENTATION
+ #define SK_API __declspec(dllexport)
+ #else
+ #define SK_API __declspec(dllimport)
+ #endif
+ #else
+ #define SK_API __attribute__((visibility("default")))
+ #endif
+ #else
+ #define SK_API
+ #endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////
+
+SK_C_PLUS_PLUS_BEGIN_GUARD
+
+typedef uint32_t sk_color_t;
+
+/* This macro assumes all arguments are >=0 and <=255. */
+#define sk_color_set_argb(a, r, g, b) (((a) << 24) | ((r) << 16) | ((g) << 8) | (b))
+#define sk_color_get_a(c) (((c) >> 24) & 0xFF)
+#define sk_color_get_r(c) (((c) >> 16) & 0xFF)
+#define sk_color_get_g(c) (((c) >> 8) & 0xFF)
+#define sk_color_get_b(c) (((c) >> 0) & 0xFF)
+
+typedef enum {
+ UNKNOWN_SK_COLORTYPE,
+ RGBA_8888_SK_COLORTYPE,
+ BGRA_8888_SK_COLORTYPE,
+ ALPHA_8_SK_COLORTYPE,
+} sk_colortype_t;
+
+typedef enum {
+ OPAQUE_SK_ALPHATYPE,
+ PREMUL_SK_ALPHATYPE,
+ UNPREMUL_SK_ALPHATYPE,
+} sk_alphatype_t;
+
+typedef enum {
+ INTERSECT_SK_CLIPTYPE,
+ DIFFERENCE_SK_CLIPTYPE,
+} sk_cliptype_t;
+
+typedef enum {
+ UNKNOWN_SK_PIXELGEOMETRY,
+ RGB_H_SK_PIXELGEOMETRY,
+ BGR_H_SK_PIXELGEOMETRY,
+ RGB_V_SK_PIXELGEOMETRY,
+ BGR_V_SK_PIXELGEOMETRY,
+} sk_pixelgeometry_t;
+
+/**
+ Return the default sk_colortype_t; this is operating-system dependent.
+*/
+SK_API sk_colortype_t sk_colortype_get_default_8888();
+
+typedef struct {
+ int32_t width;
+ int32_t height;
+ sk_colortype_t colorType;
+ sk_alphatype_t alphaType;
+} sk_imageinfo_t;
+
+typedef struct {
+ sk_pixelgeometry_t pixelGeometry;
+} sk_surfaceprops_t;
+
+typedef struct {
+ float x;
+ float y;
+} sk_point_t;
+
+typedef struct {
+ int32_t left;
+ int32_t top;
+ int32_t right;
+ int32_t bottom;
+} sk_irect_t;
+
+typedef struct {
+ float left;
+ float top;
+ float right;
+ float bottom;
+} sk_rect_t;
+
+typedef struct {
+ float mat[9];
+} sk_matrix_t;
+
+/**
+ A sk_canvas_t encapsulates all of the state about drawing into a
+ destination This includes a reference to the destination itself,
+ and a stack of matrix/clip values.
+*/
+typedef struct sk_canvas_t sk_canvas_t;
+/**
+ A sk_data_ holds an immutable data buffer.
+*/
+typedef struct sk_data_t sk_data_t;
+/**
+ A sk_image_t is an abstraction for drawing a rectagle of pixels.
+ The content of the image is always immutable, though the actual
+ storage may change, if for example that image can be re-created via
+ encoded data or other means.
+*/
+typedef struct sk_image_t sk_image_t;
+/**
+ A sk_maskfilter_t is an object that perform transformations on an
+ alpha-channel mask before drawing it; it may be installed into a
+ sk_paint_t. Each time a primitive is drawn, it is first
+ scan-converted into a alpha mask, which os handed to the
+ maskfilter, which may create a new mask is to render into the
+ destination.
+ */
+typedef struct sk_maskfilter_t sk_maskfilter_t;
+/**
+ A sk_paint_t holds the style and color information about how to
+ draw geometries, text and bitmaps.
+*/
+typedef struct sk_paint_t sk_paint_t;
+/**
+ A sk_path_t encapsulates compound (multiple contour) geometric
+ paths consisting of straight line segments, quadratic curves, and
+ cubic curves.
+*/
+typedef struct sk_path_t sk_path_t;
+/**
+ A sk_picture_t holds recorded canvas drawing commands to be played
+ back at a later time.
+*/
+typedef struct sk_picture_t sk_picture_t;
+/**
+ A sk_picture_recorder_t holds a sk_canvas_t that records commands
+ to create a sk_picture_t.
+*/
+typedef struct sk_picture_recorder_t sk_picture_recorder_t;
+/**
+ A sk_shader_t specifies the source color(s) for what is being drawn. If a
+ paint has no shader, then the paint's color is used. If the paint
+ has a shader, then the shader's color(s) are use instead, but they
+ are modulated by the paint's alpha.
+*/
+typedef struct sk_shader_t sk_shader_t;
+/**
+ A sk_surface_t holds the destination for drawing to a canvas. For
+ raster drawing, the destination is an array of pixels in memory.
+ For GPU drawing, the destination is a texture or a framebuffer.
+*/
+typedef struct sk_surface_t sk_surface_t;
+
+typedef enum {
+ CLEAR_SK_XFERMODE_MODE,
+ SRC_SK_XFERMODE_MODE,
+ DST_SK_XFERMODE_MODE,
+ SRCOVER_SK_XFERMODE_MODE,
+ DSTOVER_SK_XFERMODE_MODE,
+ SRCIN_SK_XFERMODE_MODE,
+ DSTIN_SK_XFERMODE_MODE,
+ SRCOUT_SK_XFERMODE_MODE,
+ DSTOUT_SK_XFERMODE_MODE,
+ SRCATOP_SK_XFERMODE_MODE,
+ DSTATOP_SK_XFERMODE_MODE,
+ XOR_SK_XFERMODE_MODE,
+ PLUS_SK_XFERMODE_MODE,
+ MODULATE_SK_XFERMODE_MODE,
+ SCREEN_SK_XFERMODE_MODE,
+ OVERLAY_SK_XFERMODE_MODE,
+ DARKEN_SK_XFERMODE_MODE,
+ LIGHTEN_SK_XFERMODE_MODE,
+ COLORDODGE_SK_XFERMODE_MODE,
+ COLORBURN_SK_XFERMODE_MODE,
+ HARDLIGHT_SK_XFERMODE_MODE,
+ SOFTLIGHT_SK_XFERMODE_MODE,
+ DIFFERENCE_SK_XFERMODE_MODE,
+ EXCLUSION_SK_XFERMODE_MODE,
+ MULTIPLY_SK_XFERMODE_MODE,
+ HUE_SK_XFERMODE_MODE,
+ SATURATION_SK_XFERMODE_MODE,
+ COLOR_SK_XFERMODE_MODE,
+ LUMINOSITY_SK_XFERMODE_MODE,
+} sk_xfermode_mode_t;
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+SK_C_PLUS_PLUS_END_GUARD
+
+#endif
diff --git a/gfx/skia/skia/include/codec/SkAndroidCodec.h b/gfx/skia/skia/include/codec/SkAndroidCodec.h
new file mode 100644
index 000000000..c7587b62e
--- /dev/null
+++ b/gfx/skia/skia/include/codec/SkAndroidCodec.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAndroidCodec_DEFINED
+#define SkAndroidCodec_DEFINED
+
+#include "SkCodec.h"
+#include "SkEncodedFormat.h"
+#include "SkStream.h"
+#include "SkTypes.h"
+
+/**
+ * Abstract interface defining image codec functionality that is necessary for
+ * Android.
+ */
+class SkAndroidCodec : SkNoncopyable {
+public:
+ /**
+ * If this stream represents an encoded image that we know how to decode,
+ * return an SkAndroidCodec that can decode it. Otherwise return NULL.
+ *
+ * The SkPngChunkReader handles unknown chunks in PNGs.
+ * See SkCodec.h for more details.
+ *
+ * If NULL is returned, the stream is deleted immediately. Otherwise, the
+ * SkCodec takes ownership of it, and will delete it when done with it.
+ */
+ static SkAndroidCodec* NewFromStream(SkStream*, SkPngChunkReader* = NULL);
+
+ /**
+ * If this data represents an encoded image that we know how to decode,
+ * return an SkAndroidCodec that can decode it. Otherwise return NULL.
+ *
+ * The SkPngChunkReader handles unknown chunks in PNGs.
+ * See SkCodec.h for more details.
+ */
+ static SkAndroidCodec* NewFromData(sk_sp<SkData>, SkPngChunkReader* = NULL);
+ static SkAndroidCodec* NewFromData(SkData* data, SkPngChunkReader* reader) {
+ return NewFromData(sk_ref_sp(data), reader);
+ }
+
+ virtual ~SkAndroidCodec() {}
+
+
+ const SkImageInfo& getInfo() const { return fInfo; }
+
+ /**
+ * Format of the encoded data.
+ */
+ SkEncodedFormat getEncodedFormat() const { return fCodec->getEncodedFormat(); }
+
+ /**
+ * @param requestedColorType Color type requested by the client
+ *
+ * If it is possible to decode to requestedColorType, this returns
+ * requestedColorType. Otherwise, this returns whichever color type
+ * is suggested by the codec as the best match for the encoded data.
+ */
+ SkColorType computeOutputColorType(SkColorType requestedColorType);
+
+ /**
+ * @param requestedUnpremul Indicates if the client requested
+ * unpremultiplied output
+ *
+ * Returns the appropriate alpha type to decode to. If the image
+ * has alpha, the value of requestedUnpremul will be honored.
+ */
+ SkAlphaType computeOutputAlphaType(bool requestedUnpremul);
+
+ /**
+ * Returns the dimensions of the scaled output image, for an input
+ * sampleSize.
+ *
+ * When the sample size divides evenly into the original dimensions, the
+ * scaled output dimensions will simply be equal to the original
+ * dimensions divided by the sample size.
+ *
+ * When the sample size does not divide even into the original
+ * dimensions, the codec may round up or down, depending on what is most
+ * efficient to decode.
+ *
+ * Finally, the codec will always recommend a non-zero output, so the output
+ * dimension will always be one if the sampleSize is greater than the
+ * original dimension.
+ */
+ SkISize getSampledDimensions(int sampleSize) const;
+
+ /**
+ * Return (via desiredSubset) a subset which can decoded from this codec,
+ * or false if the input subset is invalid.
+ *
+ * @param desiredSubset in/out parameter
+ * As input, a desired subset of the original bounds
+ * (as specified by getInfo).
+ * As output, if true is returned, desiredSubset may
+ * have been modified to a subset which is
+ * supported. Although a particular change may have
+ * been made to desiredSubset to create something
+ * supported, it is possible other changes could
+ * result in a valid subset. If false is returned,
+ * desiredSubset's value is undefined.
+ * @return true If the input desiredSubset is valid.
+ * desiredSubset may be modified to a subset
+ * supported by the codec.
+ * false If desiredSubset is invalid (NULL or not fully
+ * contained within the image).
+ */
+ bool getSupportedSubset(SkIRect* desiredSubset) const;
+ // TODO: Rename SkCodec::getValidSubset() to getSupportedSubset()
+
+ /**
+ * Returns the dimensions of the scaled, partial output image, for an
+ * input sampleSize and subset.
+ *
+ * @param sampleSize Factor to scale down by.
+ * @param subset Must be a valid subset of the original image
+ * dimensions and a subset supported by SkAndroidCodec.
+ * getSubset() can be used to obtain a subset supported
+ * by SkAndroidCodec.
+ * @return Size of the scaled partial image. Or zero size
+ * if either of the inputs is invalid.
+ */
+ SkISize getSampledSubsetDimensions(int sampleSize, const SkIRect& subset) const;
+
+ /**
+ * Additional options to pass to getAndroidPixels().
+ */
+ // FIXME: It's a bit redundant to name these AndroidOptions when this class is already
+ // called SkAndroidCodec. On the other hand, it's may be a bit confusing to call
+ // these Options when SkCodec has a slightly different set of Options. Maybe these
+ // should be DecodeOptions or SamplingOptions?
+ struct AndroidOptions {
+ AndroidOptions()
+ : fZeroInitialized(SkCodec::kNo_ZeroInitialized)
+ , fSubset(nullptr)
+ , fColorPtr(nullptr)
+ , fColorCount(nullptr)
+ , fSampleSize(1)
+ {}
+
+ /**
+ * Indicates is destination pixel memory is zero initialized.
+ *
+ * The default is SkCodec::kNo_ZeroInitialized.
+ */
+ SkCodec::ZeroInitialized fZeroInitialized;
+
+ /**
+ * If not NULL, represents a subset of the original image to decode.
+ *
+ * Must be within the bounds returned by getInfo().
+ *
+ * If the EncodedFormat is kWEBP_SkEncodedFormat, the top and left
+ * values must be even.
+ *
+ * The default is NULL, meaning a decode of the entire image.
+ */
+ SkIRect* fSubset;
+
+ /**
+ * If the client has requested a decode to kIndex8_SkColorType
+ * (specified in the SkImageInfo), then the caller must provide
+ * storage for up to 256 SkPMColor values in fColorPtr. On success,
+ * the codec must copy N colors into that storage, (where N is the
+ * logical number of table entries) and set fColorCount to N.
+ *
+ * If the client does not request kIndex8_SkColorType, then the last
+ * two parameters may be NULL. If fColorCount is not null, it will be
+ * set to 0.
+ *
+ * The default is NULL for both pointers.
+ */
+ SkPMColor* fColorPtr;
+ int* fColorCount;
+
+ /**
+ * The client may provide an integer downscale factor for the decode.
+ * The codec may implement this downscaling by sampling or another
+ * method if it is more efficient.
+ *
+ * The default is 1, representing no downscaling.
+ */
+ int fSampleSize;
+ };
+
+ /**
+ * Decode into the given pixels, a block of memory of size at
+ * least (info.fHeight - 1) * rowBytes + (info.fWidth *
+ * bytesPerPixel)
+ *
+ * Repeated calls to this function should give the same results,
+ * allowing the PixelRef to be immutable.
+ *
+ * @param info A description of the format (config, size)
+ * expected by the caller. This can simply be identical
+ * to the info returned by getInfo().
+ *
+ * This contract also allows the caller to specify
+ * different output-configs, which the implementation can
+ * decide to support or not.
+ *
+ * A size that does not match getInfo() implies a request
+ * to scale or subset. If the codec cannot perform this
+ * scaling or subsetting, it will return an error code.
+ *
+ * If info is kIndex8_SkColorType, then the caller must provide storage for up to 256
+ * SkPMColor values in options->fColorPtr. On success the codec must copy N colors into
+ * that storage, (where N is the logical number of table entries) and set
+ * options->fColorCount to N.
+ *
+ * If info is not kIndex8_SkColorType, options->fColorPtr and options->fColorCount may
+ * be nullptr.
+ *
+ * The AndroidOptions object is also used to specify any requested scaling or subsetting
+ * using options->fSampleSize and options->fSubset. If NULL, the defaults (as specified above
+ * for AndroidOptions) are used.
+ *
+ * @return Result kSuccess, or another value explaining the type of failure.
+ */
+ // FIXME: It's a bit redundant to name this getAndroidPixels() when this class is already
+ // called SkAndroidCodec. On the other hand, it's may be a bit confusing to call
+ // this getPixels() when it is a slightly different API than SkCodec's getPixels().
+ // Maybe this should be decode() or decodeSubset()?
+ SkCodec::Result getAndroidPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const AndroidOptions* options);
+
+ /**
+ * Simplified version of getAndroidPixels() where we supply the default AndroidOptions as
+ * specified above for AndroidOptions.
+ *
+ * This will return an error if the info is kIndex_8_SkColorType and also will not perform
+ * any scaling or subsetting.
+ */
+ SkCodec::Result getAndroidPixels(const SkImageInfo& info, void* pixels, size_t rowBytes);
+
+ SkCodec::Result getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes) {
+ return this->getAndroidPixels(info, pixels, rowBytes);
+ }
+
+protected:
+
+ SkAndroidCodec(SkCodec*);
+
+ SkCodec* codec() const { return fCodec.get(); }
+
+ virtual SkISize onGetSampledDimensions(int sampleSize) const = 0;
+
+ virtual bool onGetSupportedSubset(SkIRect* desiredSubset) const = 0;
+
+ virtual SkCodec::Result onGetAndroidPixels(const SkImageInfo& info, void* pixels,
+ size_t rowBytes, const AndroidOptions& options) = 0;
+
+private:
+
+ // This will always be a reference to the info that is contained by the
+ // embedded SkCodec.
+ const SkImageInfo& fInfo;
+
+ SkAutoTDelete<SkCodec> fCodec;
+};
+#endif // SkAndroidCodec_DEFINED
diff --git a/gfx/skia/skia/include/codec/SkCodec.h b/gfx/skia/skia/include/codec/SkCodec.h
new file mode 100644
index 000000000..363347dd7
--- /dev/null
+++ b/gfx/skia/skia/include/codec/SkCodec.h
@@ -0,0 +1,798 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCodec_DEFINED
+#define SkCodec_DEFINED
+
+#include "../private/SkTemplates.h"
+#include "SkColor.h"
+#include "SkEncodedFormat.h"
+#include "SkEncodedInfo.h"
+#include "SkImageInfo.h"
+#include "SkSize.h"
+#include "SkStream.h"
+#include "SkTypes.h"
+#include "SkYUVSizeInfo.h"
+
+class SkColorSpace;
+class SkData;
+class SkPngChunkReader;
+class SkSampler;
+
+namespace DM {
+class CodecSrc;
+class ColorCodecSrc;
+}
+class ColorCodecBench;
+
+/**
+ * Abstraction layer directly on top of an image codec.
+ */
+class SkCodec : SkNoncopyable {
+public:
+ /**
+ * Minimum number of bytes that must be buffered in SkStream input.
+ *
+ * An SkStream passed to NewFromStream must be able to use this many
+ * bytes to determine the image type. Then the same SkStream must be
+ * passed to the correct decoder to read from the beginning.
+ *
+ * This can be accomplished by implementing peek() to support peeking
+ * this many bytes, or by implementing rewind() to be able to rewind()
+ * after reading this many bytes.
+ */
+ static size_t MinBufferedBytesNeeded();
+
+ /**
+ * If this stream represents an encoded image that we know how to decode,
+ * return an SkCodec that can decode it. Otherwise return NULL.
+ *
+ * As stated above, this call must be able to peek or read
+ * MinBufferedBytesNeeded to determine the correct format, and then start
+ * reading from the beginning. First it will attempt to peek, and it
+ * assumes that if less than MinBufferedBytesNeeded bytes (but more than
+ * zero) are returned, this is because the stream is shorter than this,
+ * so falling back to reading would not provide more data. If peek()
+ * returns zero bytes, this call will instead attempt to read(). This
+ * will require that the stream can be rewind()ed.
+ *
+ * If SkPngChunkReader is not NULL, take a ref and pass it to libpng if
+ * the image is a png.
+ *
+ * If the SkPngChunkReader is not NULL then:
+ * If the image is not a PNG, the SkPngChunkReader will be ignored.
+ * If the image is a PNG, the SkPngChunkReader will be reffed.
+ * If the PNG has unknown chunks, the SkPngChunkReader will be used
+ * to handle these chunks. SkPngChunkReader will be called to read
+ * any unknown chunk at any point during the creation of the codec
+ * or the decode. Note that if SkPngChunkReader fails to read a
+ * chunk, this could result in a failure to create the codec or a
+ * failure to decode the image.
+ * If the PNG does not contain unknown chunks, the SkPngChunkReader
+ * will not be used or modified.
+ *
+ * If NULL is returned, the stream is deleted immediately. Otherwise, the
+ * SkCodec takes ownership of it, and will delete it when done with it.
+ */
+ static SkCodec* NewFromStream(SkStream*, SkPngChunkReader* = NULL);
+
+ /**
+ * If this data represents an encoded image that we know how to decode,
+ * return an SkCodec that can decode it. Otherwise return NULL.
+ *
+ * If the SkPngChunkReader is not NULL then:
+ * If the image is not a PNG, the SkPngChunkReader will be ignored.
+ * If the image is a PNG, the SkPngChunkReader will be reffed.
+ * If the PNG has unknown chunks, the SkPngChunkReader will be used
+ * to handle these chunks. SkPngChunkReader will be called to read
+ * any unknown chunk at any point during the creation of the codec
+ * or the decode. Note that if SkPngChunkReader fails to read a
+ * chunk, this could result in a failure to create the codec or a
+ * failure to decode the image.
+ * If the PNG does not contain unknown chunks, the SkPngChunkReader
+ * will not be used or modified.
+ */
+ static SkCodec* NewFromData(sk_sp<SkData>, SkPngChunkReader* = NULL);
+ static SkCodec* NewFromData(SkData* data, SkPngChunkReader* reader) {
+ return NewFromData(sk_ref_sp(data), reader);
+ }
+
+ virtual ~SkCodec();
+
+ /**
+ * Return the ImageInfo associated with this codec.
+ */
+ const SkImageInfo& getInfo() const { return fSrcInfo; }
+
+ const SkEncodedInfo& getEncodedInfo() const { return fEncodedInfo; }
+
+ enum Origin {
+ kTopLeft_Origin = 1, // Default
+ kTopRight_Origin = 2, // Reflected across y-axis
+ kBottomRight_Origin = 3, // Rotated 180
+ kBottomLeft_Origin = 4, // Reflected across x-axis
+ kLeftTop_Origin = 5, // Reflected across x-axis, Rotated 90 CCW
+ kRightTop_Origin = 6, // Rotated 90 CW
+ kRightBottom_Origin = 7, // Reflected across x-axis, Rotated 90 CW
+ kLeftBottom_Origin = 8, // Rotated 90 CCW
+ kDefault_Origin = kTopLeft_Origin,
+ kLast_Origin = kLeftBottom_Origin,
+ };
+
+ /**
+ * Returns the image orientation stored in the EXIF data.
+ * If there is no EXIF data, or if we cannot read the EXIF data, returns kTopLeft.
+ */
+ Origin getOrigin() const { return fOrigin; }
+
+ /**
+ * Return a size that approximately supports the desired scale factor.
+ * The codec may not be able to scale efficiently to the exact scale
+ * factor requested, so return a size that approximates that scale.
+ * The returned value is the codec's suggestion for the closest valid
+ * scale that it can natively support
+ */
+ SkISize getScaledDimensions(float desiredScale) const {
+ // Negative and zero scales are errors.
+ SkASSERT(desiredScale > 0.0f);
+ if (desiredScale <= 0.0f) {
+ return SkISize::Make(0, 0);
+ }
+
+ // Upscaling is not supported. Return the original size if the client
+ // requests an upscale.
+ if (desiredScale >= 1.0f) {
+ return this->getInfo().dimensions();
+ }
+ return this->onGetScaledDimensions(desiredScale);
+ }
+
+ /**
+ * Return (via desiredSubset) a subset which can decoded from this codec,
+ * or false if this codec cannot decode subsets or anything similar to
+ * desiredSubset.
+ *
+ * @param desiredSubset In/out parameter. As input, a desired subset of
+ * the original bounds (as specified by getInfo). If true is returned,
+ * desiredSubset may have been modified to a subset which is
+ * supported. Although a particular change may have been made to
+ * desiredSubset to create something supported, it is possible other
+ * changes could result in a valid subset.
+ * If false is returned, desiredSubset's value is undefined.
+ * @return true if this codec supports decoding desiredSubset (as
+ * returned, potentially modified)
+ */
+ bool getValidSubset(SkIRect* desiredSubset) const {
+ return this->onGetValidSubset(desiredSubset);
+ }
+
+ /**
+ * Format of the encoded data.
+ */
+ SkEncodedFormat getEncodedFormat() const { return this->onGetEncodedFormat(); }
+
+ /**
+ * Used to describe the result of a call to getPixels().
+ *
+ * Result is the union of possible results from subclasses.
+ */
+ enum Result {
+ /**
+ * General return value for success.
+ */
+ kSuccess,
+ /**
+ * The input is incomplete. A partial image was generated.
+ */
+ kIncompleteInput,
+ /**
+ * The generator cannot convert to match the request, ignoring
+ * dimensions.
+ */
+ kInvalidConversion,
+ /**
+ * The generator cannot scale to requested size.
+ */
+ kInvalidScale,
+ /**
+ * Parameters (besides info) are invalid. e.g. NULL pixels, rowBytes
+ * too small, etc.
+ */
+ kInvalidParameters,
+ /**
+ * The input did not contain a valid image.
+ */
+ kInvalidInput,
+ /**
+ * Fulfilling this request requires rewinding the input, which is not
+ * supported for this input.
+ */
+ kCouldNotRewind,
+ /**
+ * This method is not implemented by this codec.
+ * FIXME: Perhaps this should be kUnsupported?
+ */
+ kUnimplemented,
+ };
+
+ /**
+ * Whether or not the memory passed to getPixels is zero initialized.
+ */
+ enum ZeroInitialized {
+ /**
+ * The memory passed to getPixels is zero initialized. The SkCodec
+ * may take advantage of this by skipping writing zeroes.
+ */
+ kYes_ZeroInitialized,
+ /**
+ * The memory passed to getPixels has not been initialized to zero,
+ * so the SkCodec must write all zeroes to memory.
+ *
+ * This is the default. It will be used if no Options struct is used.
+ */
+ kNo_ZeroInitialized,
+ };
+
+ /**
+ * Additional options to pass to getPixels.
+ */
+ struct Options {
+ Options()
+ : fZeroInitialized(kNo_ZeroInitialized)
+ , fSubset(NULL)
+ {}
+
+ ZeroInitialized fZeroInitialized;
+ /**
+ * If not NULL, represents a subset of the original image to decode.
+ * Must be within the bounds returned by getInfo().
+ * If the EncodedFormat is kWEBP_SkEncodedFormat (the only one which
+ * currently supports subsets), the top and left values must be even.
+ *
+ * In getPixels and incremental decode, we will attempt to decode the
+ * exact rectangular subset specified by fSubset.
+ *
+ * In a scanline decode, it does not make sense to specify a subset
+ * top or subset height, since the client already controls which rows
+ * to get and which rows to skip. During scanline decodes, we will
+ * require that the subset top be zero and the subset height be equal
+ * to the full height. We will, however, use the values of
+ * subset left and subset width to decode partial scanlines on calls
+ * to getScanlines().
+ */
+ SkIRect* fSubset;
+ };
+
+ /**
+ * Decode into the given pixels, a block of memory of size at
+ * least (info.fHeight - 1) * rowBytes + (info.fWidth *
+ * bytesPerPixel)
+ *
+ * Repeated calls to this function should give the same results,
+ * allowing the PixelRef to be immutable.
+ *
+ * @param info A description of the format (config, size)
+ * expected by the caller. This can simply be identical
+ * to the info returned by getInfo().
+ *
+ * This contract also allows the caller to specify
+ * different output-configs, which the implementation can
+ * decide to support or not.
+ *
+ * A size that does not match getInfo() implies a request
+ * to scale. If the generator cannot perform this scale,
+ * it will return kInvalidScale.
+ *
+ * If the info contains a non-null SkColorSpace, the codec
+ * will perform the appropriate color space transformation.
+ * If the caller passes in the same color space that was
+ * reported by the codec, the color space transformation is
+ * a no-op.
+ *
+ * If info is kIndex8_SkColorType, then the caller must provide storage for up to 256
+ * SkPMColor values in ctable. On success the generator must copy N colors into that storage,
+ * (where N is the logical number of table entries) and set ctableCount to N.
+ *
+ * If info is not kIndex8_SkColorType, then the last two parameters may be NULL. If ctableCount
+ * is not null, it will be set to 0.
+ *
+ * If a scanline decode is in progress, scanline mode will end, requiring the client to call
+ * startScanlineDecode() in order to return to decoding scanlines.
+ *
+ * @return Result kSuccess, or another value explaining the type of failure.
+ */
+ Result getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes, const Options*,
+ SkPMColor ctable[], int* ctableCount);
+
+ /**
+ * Simplified version of getPixels() that asserts that info is NOT kIndex8_SkColorType and
+ * uses the default Options.
+ */
+ Result getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes);
+
+ /**
+ * If decoding to YUV is supported, this returns true. Otherwise, this
+ * returns false and does not modify any of the parameters.
+ *
+ * @param sizeInfo Output parameter indicating the sizes and required
+ * allocation widths of the Y, U, and V planes.
+ * @param colorSpace Output parameter. If non-NULL this is set to kJPEG,
+ * otherwise this is ignored.
+ */
+ bool queryYUV8(SkYUVSizeInfo* sizeInfo, SkYUVColorSpace* colorSpace) const {
+ if (nullptr == sizeInfo) {
+ return false;
+ }
+
+ return this->onQueryYUV8(sizeInfo, colorSpace);
+ }
+
+ /**
+ * Returns kSuccess, or another value explaining the type of failure.
+ * This always attempts to perform a full decode. If the client only
+ * wants size, it should call queryYUV8().
+ *
+ * @param sizeInfo Needs to exactly match the values returned by the
+ * query, except the WidthBytes may be larger than the
+ * recommendation (but not smaller).
+ * @param planes Memory for each of the Y, U, and V planes.
+ */
+ Result getYUV8Planes(const SkYUVSizeInfo& sizeInfo, void* planes[3]) {
+ if (nullptr == planes || nullptr == planes[0] || nullptr == planes[1] ||
+ nullptr == planes[2]) {
+ return kInvalidInput;
+ }
+
+ if (!this->rewindIfNeeded()) {
+ return kCouldNotRewind;
+ }
+
+ return this->onGetYUV8Planes(sizeInfo, planes);
+ }
+
+ /**
+ * Prepare for an incremental decode with the specified options.
+ *
+ * This may require a rewind.
+ *
+ * @param dstInfo Info of the destination. If the dimensions do not match
+ * those of getInfo, this implies a scale.
+ * @param dst Memory to write to. Needs to be large enough to hold the subset,
+ * if present, or the full image as described in dstInfo.
+ * @param options Contains decoding options, including if memory is zero
+ * initialized and whether to decode a subset.
+ * @param ctable A pointer to a color table. When dstInfo.colorType() is
+ * kIndex8, this should be non-NULL and have enough storage for 256
+ * colors. The color table will be populated after decoding the palette.
+ * @param ctableCount A pointer to the size of the color table. When
+ * dstInfo.colorType() is kIndex8, this should be non-NULL. It will
+ * be modified to the true size of the color table (<= 256) after
+ * decoding the palette.
+ * @return Enum representing success or reason for failure.
+ */
+ Result startIncrementalDecode(const SkImageInfo& dstInfo, void* dst, size_t rowBytes,
+ const SkCodec::Options*, SkPMColor* ctable, int* ctableCount);
+
+ Result startIncrementalDecode(const SkImageInfo& dstInfo, void* dst, size_t rowBytes,
+ const SkCodec::Options* options) {
+ return this->startIncrementalDecode(dstInfo, dst, rowBytes, options, nullptr, nullptr);
+ }
+
+ Result startIncrementalDecode(const SkImageInfo& dstInfo, void* dst, size_t rowBytes) {
+ return this->startIncrementalDecode(dstInfo, dst, rowBytes, nullptr, nullptr, nullptr);
+ }
+
+ /**
+ * Start/continue the incremental decode.
+ *
+ * Not valid to call before calling startIncrementalDecode().
+ *
+ * After the first call, should only be called again if more data has been
+ * provided to the source SkStream.
+ *
+ * Unlike getPixels and getScanlines, this does not do any filling. This is
+ * left up to the caller, since they may be skipping lines or continuing the
+ * decode later. In the latter case, they may choose to initialize all lines
+ * first, or only initialize the remaining lines after the first call.
+ *
+ * @param rowsDecoded Optional output variable returning the total number of
+ * lines initialized. Only meaningful if this method returns kIncompleteInput.
+ * Otherwise the implementation may not set it.
+ * Note that some implementations may have initialized this many rows, but
+ * not necessarily finished those rows (e.g. interlaced PNG). This may be
+ * useful for determining what rows the client needs to initialize.
+ * @return kSuccess if all lines requested in startIncrementalDecode have
+ * been completely decoded. kIncompleteInput otherwise.
+ */
+ Result incrementalDecode(int* rowsDecoded = nullptr) {
+ if (!fStartedIncrementalDecode) {
+ return kInvalidParameters;
+ }
+ return this->onIncrementalDecode(rowsDecoded);
+ }
+
+ /**
+ * The remaining functions revolve around decoding scanlines.
+ */
+
+ /**
+ * Prepare for a scanline decode with the specified options.
+ *
+ * After this call, this class will be ready to decode the first scanline.
+ *
+ * This must be called in order to call getScanlines or skipScanlines.
+ *
+ * This may require rewinding the stream.
+ *
+ * Not all SkCodecs support this.
+ *
+ * @param dstInfo Info of the destination. If the dimensions do not match
+ * those of getInfo, this implies a scale.
+ * @param options Contains decoding options, including if memory is zero
+ * initialized.
+ * @param ctable A pointer to a color table. When dstInfo.colorType() is
+ * kIndex8, this should be non-NULL and have enough storage for 256
+ * colors. The color table will be populated after decoding the palette.
+ * @param ctableCount A pointer to the size of the color table. When
+ * dstInfo.colorType() is kIndex8, this should be non-NULL. It will
+ * be modified to the true size of the color table (<= 256) after
+ * decoding the palette.
+ * @return Enum representing success or reason for failure.
+ */
+ Result startScanlineDecode(const SkImageInfo& dstInfo, const SkCodec::Options* options,
+ SkPMColor ctable[], int* ctableCount);
+
+ /**
+ * Simplified version of startScanlineDecode() that asserts that info is NOT
+ * kIndex8_SkColorType and uses the default Options.
+ */
+ Result startScanlineDecode(const SkImageInfo& dstInfo);
+
+ /**
+ * Write the next countLines scanlines into dst.
+ *
+ * Not valid to call before calling startScanlineDecode().
+ *
+ * @param dst Must be non-null, and large enough to hold countLines
+ * scanlines of size rowBytes.
+ * @param countLines Number of lines to write.
+ * @param rowBytes Number of bytes per row. Must be large enough to hold
+ * a scanline based on the SkImageInfo used to create this object.
+ * @return the number of lines successfully decoded. If this value is
+ * less than countLines, this will fill the remaining lines with a
+ * default value.
+ */
+ int getScanlines(void* dst, int countLines, size_t rowBytes);
+
+ /**
+ * Skip count scanlines.
+ *
+ * Not valid to call before calling startScanlineDecode().
+ *
+ * The default version just calls onGetScanlines and discards the dst.
+ * NOTE: If skipped lines are the only lines with alpha, this default
+ * will make reallyHasAlpha return true, when it could have returned
+ * false.
+ *
+ * @return true if the scanlines were successfully skipped
+ * false on failure, possible reasons for failure include:
+ * An incomplete input image stream.
+ * Calling this function before calling startScanlineDecode().
+ * If countLines is less than zero or so large that it moves
+ * the current scanline past the end of the image.
+ */
+ bool skipScanlines(int countLines);
+
+ /**
+ * The order in which rows are output from the scanline decoder is not the
+ * same for all variations of all image types. This explains the possible
+ * output row orderings.
+ */
+ enum SkScanlineOrder {
+ /*
+ * By far the most common, this indicates that the image can be decoded
+ * reliably using the scanline decoder, and that rows will be output in
+ * the logical order.
+ */
+ kTopDown_SkScanlineOrder,
+
+ /*
+ * This indicates that the scanline decoder reliably outputs rows, but
+ * they will be returned in reverse order. If the scanline format is
+ * kBottomUp, the nextScanline() API can be used to determine the actual
+ * y-coordinate of the next output row, but the client is not forced
+ * to take advantage of this, given that it's not too tough to keep
+ * track independently.
+ *
+ * For full image decodes, it is safe to get all of the scanlines at
+ * once, since the decoder will handle inverting the rows as it
+ * decodes.
+ *
+ * For subset decodes and sampling, it is simplest to get and skip
+ * scanlines one at a time, using the nextScanline() API. It is
+ * possible to ask for larger chunks at a time, but this should be used
+ * with caution. As with full image decodes, the decoder will handle
+ * inverting the requested rows, but rows will still be delivered
+ * starting from the bottom of the image.
+ *
+ * Upside down bmps are an example.
+ */
+ kBottomUp_SkScanlineOrder,
+
+ /*
+ * This indicates that the scanline decoder reliably outputs rows, but
+ * they will not be in logical order. If the scanline format is
+ * kOutOfOrder, the nextScanline() API should be used to determine the
+ * actual y-coordinate of the next output row.
+ *
+ * For this scanline ordering, it is advisable to get and skip
+ * scanlines one at a time.
+ *
+ * Interlaced gifs are an example.
+ */
+ kOutOfOrder_SkScanlineOrder,
+ };
+
+ /**
+ * An enum representing the order in which scanlines will be returned by
+ * the scanline decoder.
+ *
+ * This is undefined before startScanlineDecode() is called.
+ */
+ SkScanlineOrder getScanlineOrder() const { return this->onGetScanlineOrder(); }
+
+ /**
+ * Returns the y-coordinate of the next row to be returned by the scanline
+ * decoder.
+ *
+ * This will equal fCurrScanline, except in the case of strangely
+ * encoded image types (bottom-up bmps, interlaced gifs).
+ *
+ * Results are undefined when not in scanline decoding mode.
+ */
+ int nextScanline() const { return this->outputScanline(fCurrScanline); }
+
+ /**
+ * Returns the output y-coordinate of the row that corresponds to an input
+ * y-coordinate. The input y-coordinate represents where the scanline
+ * is located in the encoded data.
+ *
+ * This will equal inputScanline, except in the case of strangely
+ * encoded image types (bottom-up bmps, interlaced gifs).
+ */
+ int outputScanline(int inputScanline) const;
+
+protected:
+ /**
+ * Takes ownership of SkStream*
+ */
+ SkCodec(int width,
+ int height,
+ const SkEncodedInfo&,
+ SkStream*,
+ sk_sp<SkColorSpace> = nullptr,
+ Origin = kTopLeft_Origin);
+
+ /**
+ * Takes ownership of SkStream*
+ * Allows the subclass to set the recommended SkImageInfo
+ */
+ SkCodec(const SkEncodedInfo&,
+ const SkImageInfo&,
+ SkStream*,
+ Origin = kTopLeft_Origin);
+
+ virtual SkISize onGetScaledDimensions(float /*desiredScale*/) const {
+ // By default, scaling is not supported.
+ return this->getInfo().dimensions();
+ }
+
+ // FIXME: What to do about subsets??
+ /**
+ * Subclasses should override if they support dimensions other than the
+ * srcInfo's.
+ */
+ virtual bool onDimensionsSupported(const SkISize&) {
+ return false;
+ }
+
+ virtual SkEncodedFormat onGetEncodedFormat() const = 0;
+
+ /**
+ * @param rowsDecoded When the encoded image stream is incomplete, this function
+ * will return kIncompleteInput and rowsDecoded will be set to
+ * the number of scanlines that were successfully decoded.
+ * This will allow getPixels() to fill the uninitialized memory.
+ */
+ virtual Result onGetPixels(const SkImageInfo& info,
+ void* pixels, size_t rowBytes, const Options&,
+ SkPMColor ctable[], int* ctableCount,
+ int* rowsDecoded) = 0;
+
+ virtual bool onQueryYUV8(SkYUVSizeInfo*, SkYUVColorSpace*) const {
+ return false;
+ }
+
+ virtual Result onGetYUV8Planes(const SkYUVSizeInfo&, void*[3] /*planes*/) {
+ return kUnimplemented;
+ }
+
+ virtual bool onGetValidSubset(SkIRect* /*desiredSubset*/) const {
+ // By default, subsets are not supported.
+ return false;
+ }
+
+ /**
+ * If the stream was previously read, attempt to rewind.
+ *
+ * If the stream needed to be rewound, call onRewind.
+ * @returns true if the codec is at the right position and can be used.
+ * false if there was a failure to rewind.
+ *
+ * This is called by getPixels() and start(). Subclasses may call if they
+ * need to rewind at another time.
+ */
+ bool SK_WARN_UNUSED_RESULT rewindIfNeeded();
+
+ /**
+ * Called by rewindIfNeeded, if the stream needed to be rewound.
+ *
+ * Subclasses should do any set up needed after a rewind.
+ */
+ virtual bool onRewind() {
+ return true;
+ }
+
+ /**
+ * On an incomplete input, getPixels() and getScanlines() will fill any uninitialized
+ * scanlines. This allows the subclass to indicate what value to fill with.
+ *
+ * @param dstInfo Describes the destination.
+ * @return The value with which to fill uninitialized pixels.
+ *
+ * Note that we can interpret the return value as a 64-bit Float16 color, a SkPMColor,
+ * a 16-bit 565 color, an 8-bit gray color, or an 8-bit index into a color table,
+ * depending on the color type.
+ */
+ uint64_t getFillValue(const SkImageInfo& dstInfo) const {
+ return this->onGetFillValue(dstInfo);
+ }
+
+ /**
+ * Some subclasses will override this function, but this is a useful default for the color
+ * types that we support. Note that for color types that do not use the full 64-bits,
+ * we will simply take the low bits of the fill value.
+ *
+ * The defaults are:
+ * kRGBA_F16_SkColorType: Transparent or Black, depending on the src alpha type
+ * kN32_SkColorType: Transparent or Black, depending on the src alpha type
+ * kRGB_565_SkColorType: Black
+ * kGray_8_SkColorType: Black
+ * kIndex_8_SkColorType: First color in color table
+ */
+ virtual uint64_t onGetFillValue(const SkImageInfo& dstInfo) const;
+
+ /**
+ * Get method for the input stream
+ */
+ SkStream* stream() {
+ return fStream.get();
+ }
+
+ /**
+ * The remaining functions revolve around decoding scanlines.
+ */
+
+ /**
+ * Most images types will be kTopDown and will not need to override this function.
+ */
+ virtual SkScanlineOrder onGetScanlineOrder() const { return kTopDown_SkScanlineOrder; }
+
+ const SkImageInfo& dstInfo() const { return fDstInfo; }
+
+ const SkCodec::Options& options() const { return fOptions; }
+
+ /**
+ * Returns the number of scanlines that have been decoded so far.
+ * This is unaffected by the SkScanlineOrder.
+ *
+ * Returns -1 if we have not started a scanline decode.
+ */
+ int currScanline() const { return fCurrScanline; }
+
+ virtual int onOutputScanline(int inputScanline) const;
+
+ /**
+ * Used for testing with qcms.
+ * FIXME: Remove this when we are done comparing with qcms.
+ */
+ virtual sk_sp<SkData> getICCData() const { return nullptr; }
+private:
+ const SkEncodedInfo fEncodedInfo;
+ const SkImageInfo fSrcInfo;
+ SkAutoTDelete<SkStream> fStream;
+ bool fNeedsRewind;
+ const Origin fOrigin;
+
+ SkImageInfo fDstInfo;
+ SkCodec::Options fOptions;
+
+ // Only meaningful during scanline decodes.
+ int fCurrScanline;
+
+ bool fStartedIncrementalDecode;
+
+ /**
+ * Return whether these dimensions are supported as a scale.
+ *
+ * The codec may choose to cache the information about scale and subset.
+ * Either way, the same information will be passed to onGetPixels/onStart
+ * on success.
+ *
+ * This must return true for a size returned from getScaledDimensions.
+ */
+ bool dimensionsSupported(const SkISize& dim) {
+ return dim == fSrcInfo.dimensions() || this->onDimensionsSupported(dim);
+ }
+
+ // Methods for scanline decoding.
+ virtual SkCodec::Result onStartScanlineDecode(const SkImageInfo& /*dstInfo*/,
+ const SkCodec::Options& /*options*/, SkPMColor* /*ctable*/, int* /*ctableCount*/) {
+ return kUnimplemented;
+ }
+
+ virtual Result onStartIncrementalDecode(const SkImageInfo& /*dstInfo*/, void*, size_t,
+ const SkCodec::Options&, SkPMColor*, int*) {
+ return kUnimplemented;
+ }
+
+ virtual Result onIncrementalDecode(int*) {
+ return kUnimplemented;
+ }
+
+
+ virtual bool onSkipScanlines(int /*countLines*/) { return false; }
+
+ virtual int onGetScanlines(void* /*dst*/, int /*countLines*/, size_t /*rowBytes*/) { return 0; }
+
+ /**
+ * On an incomplete decode, getPixels() and getScanlines() will call this function
+ * to fill any uinitialized memory.
+ *
+ * @param dstInfo Contains the destination color type
+ * Contains the destination alpha type
+ * Contains the destination width
+ * The height stored in this info is unused
+ * @param dst Pointer to the start of destination pixel memory
+ * @param rowBytes Stride length in destination pixel memory
+ * @param zeroInit Indicates if memory is zero initialized
+ * @param linesRequested Number of lines that the client requested
+ * @param linesDecoded Number of lines that were successfully decoded
+ */
+ void fillIncompleteImage(const SkImageInfo& dstInfo, void* dst, size_t rowBytes,
+ ZeroInitialized zeroInit, int linesRequested, int linesDecoded);
+
+ /**
+ * Return an object which will allow forcing scanline decodes to sample in X.
+ *
+ * May create a sampler, if one is not currently being used. Otherwise, does
+ * not affect ownership.
+ *
+ * Only valid during scanline decoding.
+ */
+ virtual SkSampler* getSampler(bool /*createIfNecessary*/) { return nullptr; }
+
+ // For testing with qcms
+ // FIXME: Remove these when we are done comparing with qcms.
+ friend class DM::ColorCodecSrc;
+ friend class ColorCodecBench;
+
+ friend class DM::CodecSrc; // for fillIncompleteImage
+ friend class SkSampledCodec;
+ friend class SkIcoCodec;
+};
+#endif // SkCodec_DEFINED
diff --git a/gfx/skia/skia/include/codec/SkEncodedFormat.h b/gfx/skia/skia/include/codec/SkEncodedFormat.h
new file mode 100644
index 000000000..c097e088f
--- /dev/null
+++ b/gfx/skia/skia/include/codec/SkEncodedFormat.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEncodedFormat_DEFINED
+#define SkEncodedFormat_DEFINED
+
+/**
+ * Enum describing format of encoded data.
+ */
+enum SkEncodedFormat {
+ kUnknown_SkEncodedFormat,
+ kBMP_SkEncodedFormat,
+ kGIF_SkEncodedFormat,
+ kICO_SkEncodedFormat,
+ kJPEG_SkEncodedFormat,
+ kPNG_SkEncodedFormat,
+ kWBMP_SkEncodedFormat,
+ kWEBP_SkEncodedFormat,
+ kPKM_SkEncodedFormat,
+ kKTX_SkEncodedFormat,
+ kASTC_SkEncodedFormat,
+ kDNG_SkEncodedFormat,
+};
+#endif // SkEncodedFormat_DEFINED
diff --git a/gfx/skia/skia/include/codec/SkEncodedInfo.h b/gfx/skia/skia/include/codec/SkEncodedInfo.h
new file mode 100644
index 000000000..eb8c147a3
--- /dev/null
+++ b/gfx/skia/skia/include/codec/SkEncodedInfo.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEncodedInfo_DEFINED
+#define SkEncodedInfo_DEFINED
+
+#include "SkImageInfo.h"
+
+class SkColorSpace;
+
+struct SkEncodedInfo {
+public:
+
+ enum Alpha {
+ kOpaque_Alpha,
+ kUnpremul_Alpha,
+
+ // Each pixel is either fully opaque or fully transparent.
+ // There is no difference between requesting kPremul or kUnpremul.
+ kBinary_Alpha,
+ };
+
+ /*
+ * We strive to make the number of components per pixel obvious through
+ * our naming conventions.
+ * Ex: kRGB has 3 components. kRGBA has 4 components.
+ *
+ * This sometimes results in redundant Alpha and Color information.
+ * Ex: kRGB images must also be kOpaque.
+ */
+ enum Color {
+ // PNG, WBMP
+ kGray_Color,
+
+ // PNG
+ kGrayAlpha_Color,
+
+ // PNG, GIF, BMP
+ kPalette_Color,
+
+ // PNG, RAW
+ kRGB_Color,
+ kRGBA_Color,
+
+ // BMP
+ kBGR_Color,
+ kBGRX_Color,
+ kBGRA_Color,
+
+ // JPEG, WEBP
+ kYUV_Color,
+
+ // WEBP
+ kYUVA_Color,
+
+ // JPEG
+ // Photoshop actually writes inverted CMYK data into JPEGs, where zero
+ // represents 100% ink coverage. For this reason, we treat CMYK JPEGs
+ // as having inverted CMYK. libjpeg-turbo warns that this may break
+ // other applications, but the CMYK JPEGs we see on the web expect to
+ // be treated as inverted CMYK.
+ kInvertedCMYK_Color,
+ kYCCK_Color,
+ };
+
+ static SkEncodedInfo Make(Color color, Alpha alpha, int bitsPerComponent) {
+ SkASSERT(1 == bitsPerComponent ||
+ 2 == bitsPerComponent ||
+ 4 == bitsPerComponent ||
+ 8 == bitsPerComponent ||
+ 16 == bitsPerComponent);
+
+ switch (color) {
+ case kGray_Color:
+ SkASSERT(kOpaque_Alpha == alpha);
+ break;
+ case kGrayAlpha_Color:
+ SkASSERT(kOpaque_Alpha != alpha);
+ break;
+ case kPalette_Color:
+ SkASSERT(16 != bitsPerComponent);
+ break;
+ case kRGB_Color:
+ case kBGR_Color:
+ case kBGRX_Color:
+ SkASSERT(kOpaque_Alpha == alpha);
+ SkASSERT(bitsPerComponent >= 8);
+ break;
+ case kYUV_Color:
+ case kInvertedCMYK_Color:
+ case kYCCK_Color:
+ SkASSERT(kOpaque_Alpha == alpha);
+ SkASSERT(8 == bitsPerComponent);
+ break;
+ case kRGBA_Color:
+ SkASSERT(kOpaque_Alpha != alpha);
+ SkASSERT(bitsPerComponent >= 8);
+ break;
+ case kBGRA_Color:
+ case kYUVA_Color:
+ SkASSERT(kOpaque_Alpha != alpha);
+ SkASSERT(8 == bitsPerComponent);
+ break;
+ default:
+ SkASSERT(false);
+ break;
+ }
+
+ return SkEncodedInfo(color, alpha, bitsPerComponent);
+ }
+
+ /*
+ * Returns an SkImageInfo with Skia color and alpha types that are the
+ * closest possible match to the encoded info.
+ */
+ SkImageInfo makeImageInfo(int width, int height, sk_sp<SkColorSpace> colorSpace) const {
+ switch (fColor) {
+ case kGray_Color:
+ SkASSERT(kOpaque_Alpha == fAlpha);
+ return SkImageInfo::Make(width, height, kGray_8_SkColorType,
+ kOpaque_SkAlphaType, colorSpace);
+ case kGrayAlpha_Color:
+ SkASSERT(kOpaque_Alpha != fAlpha);
+ return SkImageInfo::Make(width, height, kN32_SkColorType,
+ kUnpremul_SkAlphaType, colorSpace);
+ case kPalette_Color: {
+ SkAlphaType alphaType = (kOpaque_Alpha == fAlpha) ? kOpaque_SkAlphaType :
+ kUnpremul_SkAlphaType;
+ return SkImageInfo::Make(width, height, kIndex_8_SkColorType,
+ alphaType, colorSpace);
+ }
+ case kRGB_Color:
+ case kBGR_Color:
+ case kBGRX_Color:
+ case kYUV_Color:
+ case kInvertedCMYK_Color:
+ case kYCCK_Color:
+ SkASSERT(kOpaque_Alpha == fAlpha);
+ return SkImageInfo::Make(width, height, kN32_SkColorType,
+ kOpaque_SkAlphaType, colorSpace);
+ case kRGBA_Color:
+ case kBGRA_Color:
+ case kYUVA_Color:
+ SkASSERT(kOpaque_Alpha != fAlpha);
+ return SkImageInfo::Make(width, height, kN32_SkColorType,
+ kUnpremul_SkAlphaType, std::move(colorSpace));
+ default:
+ SkASSERT(false);
+ return SkImageInfo::MakeUnknown();
+ }
+ }
+
+ Color color() const { return fColor; }
+ Alpha alpha() const { return fAlpha; }
+ uint8_t bitsPerComponent() const { return fBitsPerComponent; }
+
+ uint8_t bitsPerPixel() const {
+ switch (fColor) {
+ case kGray_Color:
+ return fBitsPerComponent;
+ case kGrayAlpha_Color:
+ return 2 * fBitsPerComponent;
+ case kPalette_Color:
+ return fBitsPerComponent;
+ case kRGB_Color:
+ case kBGR_Color:
+ case kYUV_Color:
+ return 3 * fBitsPerComponent;
+ case kRGBA_Color:
+ case kBGRA_Color:
+ case kBGRX_Color:
+ case kYUVA_Color:
+ case kInvertedCMYK_Color:
+ case kYCCK_Color:
+ return 4 * fBitsPerComponent;
+ default:
+ SkASSERT(false);
+ return 0;
+ }
+ }
+
+private:
+
+ SkEncodedInfo(Color color, Alpha alpha, uint8_t bitsPerComponent)
+ : fColor(color)
+ , fAlpha(alpha)
+ , fBitsPerComponent(bitsPerComponent)
+ {}
+
+ Color fColor;
+ Alpha fAlpha;
+ uint8_t fBitsPerComponent;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/config/SkUserConfig.h b/gfx/skia/skia/include/config/SkUserConfig.h
new file mode 100644
index 000000000..18e77ada5
--- /dev/null
+++ b/gfx/skia/skia/include/config/SkUserConfig.h
@@ -0,0 +1,174 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkUserConfig_DEFINED
+#define SkUserConfig_DEFINED
+
+/* SkTypes.h, the root of the public header files, does the following trick:
+
+ #include "SkPreConfig.h"
+ #include "SkUserConfig.h"
+ #include "SkPostConfig.h"
+
+ SkPreConfig.h runs first, and it is responsible for initializing certain
+ skia defines.
+
+ SkPostConfig.h runs last, and its job is to just check that the final
+ defines are consistent (i.e. that we don't have mutually conflicting
+ defines).
+
+ SkUserConfig.h (this file) runs in the middle. It gets to change or augment
+ the list of flags initially set in preconfig, and then postconfig checks
+ that everything still makes sense.
+
+ Below are optional defines that add, subtract, or change default behavior
+ in Skia. Your port can locally edit this file to enable/disable flags as
+ you choose, or these can be delared on your command line (i.e. -Dfoo).
+
+ By default, this include file will always default to having all of the flags
+ commented out, so including it will have no effect.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* Skia has lots of debug-only code. Often this is just null checks or other
+ parameter checking, but sometimes it can be quite intrusive (e.g. check that
+ each 32bit pixel is in premultiplied form). This code can be very useful
+ during development, but will slow things down in a shipping product.
+
+ By default, these mutually exclusive flags are defined in SkPreConfig.h,
+ based on the presence or absence of NDEBUG, but that decision can be changed
+ here.
+ */
+//#define SK_DEBUG
+//#define SK_RELEASE
+
+/* Skia has certain debug-only code that is extremely intensive even for debug
+ builds. This code is useful for diagnosing specific issues, but is not
+ generally applicable, therefore it must be explicitly enabled to avoid
+ the performance impact. By default these flags are undefined, but can be
+ enabled by uncommenting them below.
+ */
+//#define SK_DEBUG_GLYPH_CACHE
+//#define SK_DEBUG_PATH
+
+/* preconfig will have attempted to determine the endianness of the system,
+ but you can change these mutually exclusive flags here.
+ */
+//#define SK_CPU_BENDIAN
+//#define SK_CPU_LENDIAN
+
+/* Most compilers use the same bit endianness for bit flags in a byte as the
+ system byte endianness, and this is the default. If for some reason this
+ needs to be overridden, specify which of the mutually exclusive flags to
+ use. For example, some atom processors in certain configurations have big
+ endian byte order but little endian bit orders.
+*/
+//#define SK_UINT8_BITFIELD_BENDIAN
+//#define SK_UINT8_BITFIELD_LENDIAN
+
+
+/* To write debug messages to a console, skia will call SkDebugf(...) following
+ printf conventions (e.g. const char* format, ...). If you want to redirect
+ this to something other than printf, define yours here
+ */
+//#define SkDebugf(...) MyFunction(__VA_ARGS__)
+
+/*
+ * To specify a different default font cache limit, define this. If this is
+ * undefined, skia will use a built-in value.
+ */
+//#define SK_DEFAULT_FONT_CACHE_LIMIT (1024 * 1024)
+
+/*
+ * To specify the default size of the image cache, undefine this and set it to
+ * the desired value (in bytes). SkGraphics.h as a runtime API to set this
+ * value as well. If this is undefined, a built-in value will be used.
+ */
+//#define SK_DEFAULT_IMAGE_CACHE_LIMIT (1024 * 1024)
+
+/* Define this to provide font subsetter in PDF generation.
+ */
+//#define SK_SFNTLY_SUBSETTER "sample/chromium/font_subsetter.h"
+
+/* Define this to set the upper limit for text to support LCD. Values that
+ are very large increase the cost in the font cache and draw slower, without
+ improving readability. If this is undefined, Skia will use its default
+ value (e.g. 48)
+ */
+//#define SK_MAX_SIZE_FOR_LCDTEXT 48
+
+/* If SK_DEBUG is defined, then you can optionally define SK_SUPPORT_UNITTEST
+ which will run additional self-tests at startup. These can take a long time,
+ so this flag is optional.
+ */
+#ifdef SK_DEBUG
+//#define SK_SUPPORT_UNITTEST
+#endif
+
+/* Change the ordering to work in X windows.
+ */
+//#ifdef SK_SAMPLES_FOR_X
+// #define SK_R32_SHIFT 16
+// #define SK_G32_SHIFT 8
+// #define SK_B32_SHIFT 0
+// #define SK_A32_SHIFT 24
+//#endif
+
+
+/* Determines whether to build code that supports the GPU backend. Some classes
+ that are not GPU-specific, such as SkShader subclasses, have optional code
+ that is used allows them to interact with the GPU backend. If you'd like to
+ omit this code set SK_SUPPORT_GPU to 0. This also allows you to omit the gpu
+ directories from your include search path when you're not building the GPU
+ backend. Defaults to 1 (build the GPU code).
+ */
+//#define SK_SUPPORT_GPU 1
+
+/* Skia makes use of histogram logging macros to trace the frequency of
+ * events. By default, Skia provides no-op versions of these macros.
+ * Skia consumers can provide their own definitions of these macros to
+ * integrate with their histogram collection backend.
+ */
+//#define SK_HISTOGRAM_BOOLEAN(name, value)
+//#define SK_HISTOGRAM_ENUMERATION(name, value, boundary_value)
+
+// On all platforms we have this byte order
+#define SK_A32_SHIFT 24
+#define SK_R32_SHIFT 16
+#define SK_G32_SHIFT 8
+#define SK_B32_SHIFT 0
+
+#define SK_ALLOW_STATIC_GLOBAL_INITIALIZERS 0
+
+#define SK_SUPPORT_LEGACY_GETDEVICE
+#define SK_SUPPORT_LEGACY_GETTOPDEVICE
+
+#define SK_IGNORE_ETC1_SUPPORT
+
+// Don't use __stdcall with SkiaGLGlue - bug 1320644
+#define GR_GL_FUNCTION_TYPE
+
+#define SK_RASTERIZE_EVEN_ROUNDING
+
+#define SK_DISABLE_SCREENSPACE_TESS_AA_PATH_RENDERER
+
+#define SK_DISABLE_SLOW_DEBUG_VALIDATION 1
+
+#define MOZ_SKIA 1
+
+#ifndef MOZ_IMPLICIT
+# ifdef MOZ_CLANG_PLUGIN
+# define MOZ_IMPLICIT __attribute__((annotate("moz_implicit")))
+# else
+# define MOZ_IMPLICIT
+# endif
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkAnnotation.h b/gfx/skia/skia/include/core/SkAnnotation.h
new file mode 100644
index 000000000..35cc2b5d0
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkAnnotation.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAnnotation_DEFINED
+#define SkAnnotation_DEFINED
+
+#include "SkTypes.h"
+
+class SkData;
+struct SkPoint;
+struct SkRect;
+class SkCanvas;
+
+/**
+ * Annotate the canvas by associating the specified URL with the
+ * specified rectangle (in local coordinates, just like drawRect).
+ *
+ * If the backend of this canvas does not support annotations, this call is
+ * safely ignored.
+ *
+ * The caller is responsible for managing its ownership of the SkData.
+ */
+SK_API void SkAnnotateRectWithURL(SkCanvas*, const SkRect&, SkData*);
+
+/**
+ * Annotate the canvas by associating a name with the specified point.
+ *
+ * If the backend of this canvas does not support annotations, this call is
+ * safely ignored.
+ *
+ * The caller is responsible for managing its ownership of the SkData.
+ */
+SK_API void SkAnnotateNamedDestination(SkCanvas*, const SkPoint&, SkData*);
+
+/**
+ * Annotate the canvas by making the specified rectangle link to a named
+ * destination.
+ *
+ * If the backend of this canvas does not support annotations, this call is
+ * safely ignored.
+ *
+ * The caller is responsible for managing its ownership of the SkData.
+ */
+SK_API void SkAnnotateLinkToDestination(SkCanvas*, const SkRect&, SkData*);
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkBBHFactory.h b/gfx/skia/skia/include/core/SkBBHFactory.h
new file mode 100644
index 000000000..58bd754b2
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkBBHFactory.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBBHFactory_DEFINED
+#define SkBBHFactory_DEFINED
+
+#include "SkTypes.h"
+class SkBBoxHierarchy;
+struct SkRect;
+
+class SK_API SkBBHFactory {
+public:
+ /**
+ * Allocate a new SkBBoxHierarchy. Return NULL on failure.
+ */
+ virtual SkBBoxHierarchy* operator()(const SkRect& bounds) const = 0;
+ virtual ~SkBBHFactory() {}
+};
+
+class SK_API SkRTreeFactory : public SkBBHFactory {
+public:
+ SkBBoxHierarchy* operator()(const SkRect& bounds) const override;
+private:
+ typedef SkBBHFactory INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkBitmap.h b/gfx/skia/skia/include/core/SkBitmap.h
new file mode 100644
index 000000000..ce1b56e7b
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkBitmap.h
@@ -0,0 +1,830 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmap_DEFINED
+#define SkBitmap_DEFINED
+
+#include "SkColor.h"
+#include "SkColorTable.h"
+#include "SkImageInfo.h"
+#include "SkPixmap.h"
+#include "SkPoint.h"
+#include "SkRefCnt.h"
+
+struct SkMask;
+struct SkIRect;
+struct SkRect;
+class SkPaint;
+class SkPixelRef;
+class SkPixelRefFactory;
+class SkRegion;
+class SkString;
+
+/** \class SkBitmap
+
+ The SkBitmap class specifies a raster bitmap. A bitmap has an integer width
+ and height, and a format (colortype), and a pointer to the actual pixels.
+ Bitmaps can be drawn into a SkCanvas, but they are also used to specify the
+ target of a SkCanvas' drawing operations.
+ A const SkBitmap exposes getAddr(), which lets a caller write its pixels;
+ the constness is considered to apply to the bitmap's configuration, not
+ its contents.
+*/
+class SK_API SkBitmap {
+public:
+ class SK_API Allocator;
+
+ /**
+ * Default construct creates a bitmap with zero width and height, and no pixels.
+ * Its colortype is set to kUnknown_SkColorType.
+ */
+ SkBitmap();
+
+ /**
+ * Copy the settings from the src into this bitmap. If the src has pixels
+ * allocated, they will be shared, not copied, so that the two bitmaps will
+ * reference the same memory for the pixels. If a deep copy is needed,
+ * where the new bitmap has its own separate copy of the pixels, use
+ * deepCopyTo().
+ */
+ SkBitmap(const SkBitmap& src);
+
+ /**
+ * Copy the settings from the src into this bitmap. If the src has pixels
+ * allocated, ownership of the pixels will be taken.
+ */
+ SkBitmap(SkBitmap&& src);
+
+ ~SkBitmap();
+
+ /** Copies the src bitmap into this bitmap. Ownership of the src
+ bitmap's pixels is shared with the src bitmap.
+ */
+ SkBitmap& operator=(const SkBitmap& src);
+
+ /** Copies the src bitmap into this bitmap. Takes ownership of the src
+ bitmap's pixels.
+ */
+ SkBitmap& operator=(SkBitmap&& src);
+
+ /** Swap the fields of the two bitmaps. This routine is guaranteed to never fail or throw.
+ */
+ // This method is not exported to java.
+ void swap(SkBitmap& other);
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ const SkImageInfo& info() const { return fInfo; }
+
+ int width() const { return fInfo.width(); }
+ int height() const { return fInfo.height(); }
+ SkColorType colorType() const { return fInfo.colorType(); }
+ SkAlphaType alphaType() const { return fInfo.alphaType(); }
+ SkColorSpace* colorSpace() const { return fInfo.colorSpace(); }
+
+ /**
+ * Return the number of bytes per pixel based on the colortype. If the colortype is
+ * kUnknown_SkColorType, then 0 is returned.
+ */
+ int bytesPerPixel() const { return fInfo.bytesPerPixel(); }
+
+ /**
+ * Return the rowbytes expressed as a number of pixels (like width and height).
+ * If the colortype is kUnknown_SkColorType, then 0 is returned.
+ */
+ int rowBytesAsPixels() const {
+ return fRowBytes >> this->shiftPerPixel();
+ }
+
+ /**
+ * Return the shift amount per pixel (i.e. 0 for 1-byte per pixel, 1 for 2-bytes per pixel
+ * colortypes, 2 for 4-bytes per pixel colortypes). Return 0 for kUnknown_SkColorType.
+ */
+ int shiftPerPixel() const { return this->fInfo.shiftPerPixel(); }
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ /** Return true iff the bitmap has empty dimensions.
+ * Hey! Before you use this, see if you really want to know drawsNothing() instead.
+ */
+ bool empty() const { return fInfo.isEmpty(); }
+
+ /** Return true iff the bitmap has no pixelref. Note: this can return true even if the
+ * dimensions of the bitmap are > 0 (see empty()).
+ * Hey! Before you use this, see if you really want to know drawsNothing() instead.
+ */
+ bool isNull() const { return NULL == fPixelRef; }
+
+ /** Return true iff drawing this bitmap has no effect.
+ */
+ bool drawsNothing() const { return this->empty() || this->isNull(); }
+
+ /** Return the number of bytes between subsequent rows of the bitmap. */
+ size_t rowBytes() const { return fRowBytes; }
+
+ /**
+ * Set the bitmap's alphaType, returning true on success. If false is
+ * returned, then the specified new alphaType is incompatible with the
+ * colortype, and the current alphaType is unchanged.
+ *
+ * Note: this changes the alphatype for the underlying pixels, which means
+ * that all bitmaps that might be sharing (subsets of) the pixels will
+ * be affected.
+ */
+ bool setAlphaType(SkAlphaType);
+
+ /** Return the address of the pixels for this SkBitmap.
+ */
+ void* getPixels() const { return fPixels; }
+
+ /** Return the byte size of the pixels, based on the height and rowBytes.
+ Note this truncates the result to 32bits. Call getSize64() to detect
+ if the real size exceeds 32bits.
+ */
+ size_t getSize() const { return fInfo.height() * fRowBytes; }
+
+ /** Return the number of bytes from the pointer returned by getPixels()
+ to the end of the allocated space in the buffer. Required in
+ cases where extractSubset has been called.
+ */
+ size_t getSafeSize() const { return fInfo.getSafeSize(fRowBytes); }
+
+ /**
+ * Return the full size of the bitmap, in bytes.
+ */
+ int64_t computeSize64() const {
+ return sk_64_mul(fInfo.height(), fRowBytes);
+ }
+
+ /**
+ * Return the number of bytes from the pointer returned by getPixels()
+ * to the end of the allocated space in the buffer. This may be smaller
+ * than computeSize64() if there is any rowbytes padding beyond the width.
+ */
+ int64_t computeSafeSize64() const {
+ return fInfo.getSafeSize64(fRowBytes);
+ }
+
+ /** Returns true if this bitmap is marked as immutable, meaning that the
+ contents of its pixels will not change for the lifetime of the bitmap.
+ */
+ bool isImmutable() const;
+
+ /** Marks this bitmap as immutable, meaning that the contents of its
+ pixels will not change for the lifetime of the bitmap and of the
+ underlying pixelref. This state can be set, but it cannot be
+ cleared once it is set. This state propagates to all other bitmaps
+ that share the same pixelref.
+ */
+ void setImmutable();
+
+ /** Returns true if the bitmap is opaque (has no translucent/transparent pixels).
+ */
+ bool isOpaque() const {
+ return SkAlphaTypeIsOpaque(this->alphaType());
+ }
+
+ /** Returns true if the bitmap is volatile (i.e. should not be cached by devices.)
+ */
+ bool isVolatile() const;
+
+ /** Specify whether this bitmap is volatile. Bitmaps are not volatile by
+ default. Temporary bitmaps that are discarded after use should be
+ marked as volatile. This provides a hint to the device that the bitmap
+ should not be cached. Providing this hint when appropriate can
+ improve performance by avoiding unnecessary overhead and resource
+ consumption on the device.
+ */
+ void setIsVolatile(bool);
+
+ /** Reset the bitmap to its initial state (see default constructor). If we are a (shared)
+ owner of the pixels, that ownership is decremented.
+ */
+ void reset();
+
+ /**
+ * This will brute-force return true if all of the pixels in the bitmap
+ * are opaque. If it fails to read the pixels, or encounters an error,
+ * it will return false.
+ *
+ * Since this can be an expensive operation, the bitmap stores a flag for
+ * this (isOpaque). Only call this if you need to compute this value from
+ * "unknown" pixels.
+ */
+ static bool ComputeIsOpaque(const SkBitmap&);
+
+ /**
+ * Return the bitmap's bounds [0, 0, width, height] as an SkRect
+ */
+ void getBounds(SkRect* bounds) const;
+ void getBounds(SkIRect* bounds) const;
+
+ SkIRect bounds() const { return fInfo.bounds(); }
+ SkISize dimensions() const { return fInfo.dimensions(); }
+ // Returns the bounds of this bitmap, offset by its pixelref origin.
+ SkIRect getSubset() const {
+ return SkIRect::MakeXYWH(fPixelRefOrigin.x(), fPixelRefOrigin.y(),
+ fInfo.width(), fInfo.height());
+ }
+
+ bool setInfo(const SkImageInfo&, size_t rowBytes = 0);
+
+ /**
+ * Allocate the bitmap's pixels to match the requested image info. If the Factory
+ * is non-null, call it to allcoate the pixelref. If the ImageInfo requires
+ * a colortable, then ColorTable must be non-null, and will be ref'd.
+ * On failure, the bitmap will be set to empty and return false.
+ */
+ bool SK_WARN_UNUSED_RESULT tryAllocPixels(const SkImageInfo&, SkPixelRefFactory*, SkColorTable*);
+
+ void allocPixels(const SkImageInfo& info, SkPixelRefFactory* factory, SkColorTable* ctable) {
+ if (!this->tryAllocPixels(info, factory, ctable)) {
+ sk_throw();
+ }
+ }
+
+ /**
+ * Allocate the bitmap's pixels to match the requested image info and
+ * rowBytes. If the request cannot be met (e.g. the info is invalid or
+ * the requested rowBytes are not compatible with the info
+ * (e.g. rowBytes < info.minRowBytes() or rowBytes is not aligned with
+ * the pixel size specified by info.colorType()) then false is returned
+ * and the bitmap is set to empty.
+ */
+ bool SK_WARN_UNUSED_RESULT tryAllocPixels(const SkImageInfo& info, size_t rowBytes);
+
+ void allocPixels(const SkImageInfo& info, size_t rowBytes) {
+ if (!this->tryAllocPixels(info, rowBytes)) {
+ sk_throw();
+ }
+ }
+
+ bool SK_WARN_UNUSED_RESULT tryAllocPixels(const SkImageInfo& info) {
+ return this->tryAllocPixels(info, info.minRowBytes());
+ }
+
+ void allocPixels(const SkImageInfo& info) {
+ this->allocPixels(info, info.minRowBytes());
+ }
+
+ bool SK_WARN_UNUSED_RESULT tryAllocN32Pixels(int width, int height, bool isOpaque = false) {
+ SkImageInfo info = SkImageInfo::MakeN32(width, height,
+ isOpaque ? kOpaque_SkAlphaType : kPremul_SkAlphaType);
+ return this->tryAllocPixels(info);
+ }
+
+ void allocN32Pixels(int width, int height, bool isOpaque = false) {
+ SkImageInfo info = SkImageInfo::MakeN32(width, height,
+ isOpaque ? kOpaque_SkAlphaType : kPremul_SkAlphaType);
+ this->allocPixels(info);
+ }
+
+ /**
+ * Install a pixelref that wraps the specified pixels and rowBytes, and
+ * optional ReleaseProc and context. When the pixels are no longer
+ * referenced, if releaseProc is not null, it will be called with the
+ * pixels and context as parameters.
+ * On failure, the bitmap will be set to empty and return false.
+ *
+ * If specified, the releaseProc will always be called, even on failure. It is also possible
+ * for success but the releaseProc is immediately called (e.g. valid Info but NULL pixels).
+ */
+ bool installPixels(const SkImageInfo&, void* pixels, size_t rowBytes, SkColorTable*,
+ void (*releaseProc)(void* addr, void* context), void* context);
+
+ /**
+ * Call installPixels with no ReleaseProc specified. This means that the
+ * caller must ensure that the specified pixels are valid for the lifetime
+ * of the created bitmap (and its pixelRef).
+ */
+ bool installPixels(const SkImageInfo& info, void* pixels, size_t rowBytes) {
+ return this->installPixels(info, pixels, rowBytes, NULL, NULL, NULL);
+ }
+
+ /**
+ * Call installPixels with no ReleaseProc specified. This means
+ * that the caller must ensure that the specified pixels and
+ * colortable are valid for the lifetime of the created bitmap
+ * (and its pixelRef).
+ */
+ bool installPixels(const SkPixmap&);
+
+ /**
+ * Calls installPixels() with the value in the SkMask. The caller must
+ * ensure that the specified mask pixels are valid for the lifetime
+ * of the created bitmap (and its pixelRef).
+ */
+ bool installMaskPixels(const SkMask&);
+
+ /** Use this to assign a new pixel address for an existing bitmap. This
+ will automatically release any pixelref previously installed. Only call
+ this if you are handling ownership/lifetime of the pixel memory.
+
+ If the bitmap retains a reference to the colortable (assuming it is
+ not null) it will take care of incrementing the reference count.
+
+ @param pixels Address for the pixels, managed by the caller.
+ @param ctable ColorTable (or null) that matches the specified pixels
+ */
+ void setPixels(void* p, SkColorTable* ctable = NULL);
+
+ /** Copies the bitmap's pixels to the location pointed at by dst and returns
+ true if possible, returns false otherwise.
+
+ In the case when the dstRowBytes matches the bitmap's rowBytes, the copy
+ may be made faster by copying over the dst's per-row padding (for all
+ rows but the last). By setting preserveDstPad to true the caller can
+ disable this optimization and ensure that pixels in the padding are not
+ overwritten.
+
+ Always returns false for RLE formats.
+
+ @param dst Location of destination buffer.
+ @param dstSize Size of destination buffer. Must be large enough to hold
+ pixels using indicated stride.
+ @param dstRowBytes Width of each line in the buffer. If 0, uses
+ bitmap's internal stride.
+ @param preserveDstPad Must we preserve padding in the dst
+ */
+ bool copyPixelsTo(void* const dst, size_t dstSize, size_t dstRowBytes = 0,
+ bool preserveDstPad = false) const;
+
+ /** Use the standard HeapAllocator to create the pixelref that manages the
+ pixel memory. It will be sized based on the current ImageInfo.
+ If this is called multiple times, a new pixelref object will be created
+ each time.
+
+ If the bitmap retains a reference to the colortable (assuming it is
+ not null) it will take care of incrementing the reference count.
+
+ @param ctable ColorTable (or null) to use with the pixels that will
+ be allocated. Only used if colortype == kIndex_8_SkColorType
+ @return true if the allocation succeeds. If not the pixelref field of
+ the bitmap will be unchanged.
+ */
+ bool SK_WARN_UNUSED_RESULT tryAllocPixels(SkColorTable* ctable = NULL) {
+ return this->tryAllocPixels(NULL, ctable);
+ }
+
+ void allocPixels(SkColorTable* ctable = NULL) {
+ this->allocPixels(NULL, ctable);
+ }
+
+ /** Use the specified Allocator to create the pixelref that manages the
+ pixel memory. It will be sized based on the current ImageInfo.
+ If this is called multiple times, a new pixelref object will be created
+ each time.
+
+ If the bitmap retains a reference to the colortable (assuming it is
+ not null) it will take care of incrementing the reference count.
+
+ @param allocator The Allocator to use to create a pixelref that can
+ manage the pixel memory for the current ImageInfo.
+ If allocator is NULL, the standard HeapAllocator will be used.
+ @param ctable ColorTable (or null) to use with the pixels that will
+ be allocated. Only used if colortype == kIndex_8_SkColorType.
+ If it is non-null and the colortype is not indexed, it will
+ be ignored.
+ @return true if the allocation succeeds. If not the pixelref field of
+ the bitmap will be unchanged.
+ */
+ bool SK_WARN_UNUSED_RESULT tryAllocPixels(Allocator* allocator, SkColorTable* ctable);
+
+ void allocPixels(Allocator* allocator, SkColorTable* ctable) {
+ if (!this->tryAllocPixels(allocator, ctable)) {
+ sk_throw();
+ }
+ }
+
+ /**
+ * Return the current pixelref object or NULL if there is none. This does
+ * not affect the refcount of the pixelref.
+ */
+ SkPixelRef* pixelRef() const { return fPixelRef; }
+
+ /**
+ * A bitmap can reference a subset of a pixelref's pixels. That means the
+ * bitmap's width/height can be <= the dimensions of the pixelref. The
+ * pixelref origin is the x,y location within the pixelref's pixels for
+ * the bitmap's top/left corner. To be valid the following must be true:
+ *
+ * origin_x + bitmap_width <= pixelref_width
+ * origin_y + bitmap_height <= pixelref_height
+ *
+ * pixelRefOrigin() returns this origin, or (0,0) if there is no pixelRef.
+ */
+ SkIPoint pixelRefOrigin() const { return fPixelRefOrigin; }
+
+ /**
+ * Assign a pixelref and origin to the bitmap. Pixelrefs are reference,
+ * so the existing one (if any) will be unref'd and the new one will be
+ * ref'd. (x,y) specify the offset within the pixelref's pixels for the
+ * top/left corner of the bitmap. For a bitmap that encompases the entire
+ * pixels of the pixelref, these will be (0,0).
+ */
+ SkPixelRef* setPixelRef(SkPixelRef* pr, int dx, int dy);
+
+ SkPixelRef* setPixelRef(SkPixelRef* pr, const SkIPoint& origin) {
+ return this->setPixelRef(pr, origin.fX, origin.fY);
+ }
+
+ SkPixelRef* setPixelRef(SkPixelRef* pr) {
+ return this->setPixelRef(pr, 0, 0);
+ }
+
+ /** Call this to ensure that the bitmap points to the current pixel address
+ in the pixelref. Balance it with a call to unlockPixels(). These calls
+ are harmless if there is no pixelref.
+ */
+ void lockPixels() const;
+ /** When you are finished access the pixel memory, call this to balance a
+ previous call to lockPixels(). This allows pixelrefs that implement
+ cached/deferred image decoding to know when there are active clients of
+ a given image.
+ */
+ void unlockPixels() const;
+
+ /**
+ * Some bitmaps can return a copy of their pixels for lockPixels(), but
+ * that copy, if modified, will not be pushed back. These bitmaps should
+ * not be used as targets for a raster device/canvas (since all pixels
+ * modifications will be lost when unlockPixels() is called.)
+ */
+ // DEPRECATED
+ bool lockPixelsAreWritable() const;
+
+ bool requestLock(SkAutoPixmapUnlock* result) const;
+
+ /** Call this to be sure that the bitmap is valid enough to be drawn (i.e.
+ it has non-null pixels, and if required by its colortype, it has a
+ non-null colortable. Returns true if all of the above are met.
+ */
+ bool readyToDraw() const {
+ return this->getPixels() != NULL &&
+ (this->colorType() != kIndex_8_SkColorType || fColorTable);
+ }
+
+ /** Return the bitmap's colortable, if it uses one (i.e. colorType is
+ Index_8) and the pixels are locked.
+ Otherwise returns NULL. Does not affect the colortable's
+ reference count.
+ */
+ SkColorTable* getColorTable() const { return fColorTable; }
+
+ /** Returns a non-zero, unique value corresponding to the pixels in our
+ pixelref. Each time the pixels are changed (and notifyPixelsChanged
+ is called), a different generation ID will be returned. Finally, if
+ there is no pixelRef then zero is returned.
+ */
+ uint32_t getGenerationID() const;
+
+ /** Call this if you have changed the contents of the pixels. This will in-
+ turn cause a different generation ID value to be returned from
+ getGenerationID().
+ */
+ void notifyPixelsChanged() const;
+
+ /**
+ * Fill the entire bitmap with the specified color.
+ * If the bitmap's colortype does not support alpha (e.g. 565) then the alpha
+ * of the color is ignored (treated as opaque). If the colortype only supports
+ * alpha (e.g. A1 or A8) then the color's r,g,b components are ignored.
+ */
+ void eraseColor(SkColor c) const;
+
+ /**
+ * Fill the entire bitmap with the specified color.
+ * If the bitmap's colortype does not support alpha (e.g. 565) then the alpha
+ * of the color is ignored (treated as opaque). If the colortype only supports
+ * alpha (e.g. A1 or A8) then the color's r,g,b components are ignored.
+ */
+ void eraseARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b) const {
+ this->eraseColor(SkColorSetARGB(a, r, g, b));
+ }
+
+ SK_ATTR_DEPRECATED("use eraseARGB or eraseColor")
+ void eraseRGB(U8CPU r, U8CPU g, U8CPU b) const {
+ this->eraseARGB(0xFF, r, g, b);
+ }
+
+ /**
+ * Fill the specified area of this bitmap with the specified color.
+ * If the bitmap's colortype does not support alpha (e.g. 565) then the alpha
+ * of the color is ignored (treated as opaque). If the colortype only supports
+ * alpha (e.g. A1 or A8) then the color's r,g,b components are ignored.
+ */
+ void erase(SkColor c, const SkIRect& area) const;
+
+ // DEPRECATED
+ void eraseArea(const SkIRect& area, SkColor c) const {
+ this->erase(c, area);
+ }
+
+ /**
+ * Return the SkColor of the specified pixel. In most cases this will
+ * require un-premultiplying the color. Alpha only colortypes (e.g. kAlpha_8_SkColorType)
+ * return black with the appropriate alpha set. The value is undefined
+ * for kUnknown_SkColorType or if x or y are out of bounds, or if the bitmap
+ * does not have any pixels (or has not be locked with lockPixels()).
+ */
+ SkColor getColor(int x, int y) const;
+
+ /** Returns the address of the specified pixel. This performs a runtime
+ check to know the size of the pixels, and will return the same answer
+ as the corresponding size-specific method (e.g. getAddr16). Since the
+ check happens at runtime, it is much slower than using a size-specific
+ version. Unlike the size-specific methods, this routine also checks if
+ getPixels() returns null, and returns that. The size-specific routines
+ perform a debugging assert that getPixels() is not null, but they do
+ not do any runtime checks.
+ */
+ void* getAddr(int x, int y) const;
+
+ /** Returns the address of the pixel specified by x,y for 32bit pixels.
+ * In debug build, this asserts that the pixels are allocated and locked,
+ * and that the colortype is 32-bit, however none of these checks are performed
+ * in the release build.
+ */
+ inline uint32_t* getAddr32(int x, int y) const;
+
+ /** Returns the address of the pixel specified by x,y for 16bit pixels.
+ * In debug build, this asserts that the pixels are allocated and locked,
+ * and that the colortype is 16-bit, however none of these checks are performed
+ * in the release build.
+ */
+ inline uint16_t* getAddr16(int x, int y) const;
+
+ /** Returns the address of the pixel specified by x,y for 8bit pixels.
+ * In debug build, this asserts that the pixels are allocated and locked,
+ * and that the colortype is 8-bit, however none of these checks are performed
+ * in the release build.
+ */
+ inline uint8_t* getAddr8(int x, int y) const;
+
+ /** Returns the color corresponding to the pixel specified by x,y for
+ * colortable based bitmaps.
+ * In debug build, this asserts that the pixels are allocated and locked,
+ * that the colortype is indexed, and that the colortable is allocated,
+ * however none of these checks are performed in the release build.
+ */
+ inline SkPMColor getIndex8Color(int x, int y) const;
+
+ /** Set dst to be a setset of this bitmap. If possible, it will share the
+ pixel memory, and just point into a subset of it. However, if the colortype
+ does not support this, a local copy will be made and associated with
+ the dst bitmap. If the subset rectangle, intersected with the bitmap's
+ dimensions is empty, or if there is an unsupported colortype, false will be
+ returned and dst will be untouched.
+ @param dst The bitmap that will be set to a subset of this bitmap
+ @param subset The rectangle of pixels in this bitmap that dst will
+ reference.
+ @return true if the subset copy was successfully made.
+ */
+ bool extractSubset(SkBitmap* dst, const SkIRect& subset) const;
+
+ /** Makes a deep copy of this bitmap, respecting the requested colorType,
+ * and allocating the dst pixels on the cpu.
+ * Returns false if either there is an error (i.e. the src does not have
+ * pixels) or the request cannot be satisfied (e.g. the src has per-pixel
+ * alpha, and the requested colortype does not support alpha).
+ * @param dst The bitmap to be sized and allocated
+ * @param ct The desired colorType for dst
+ * @param allocator Allocator used to allocate the pixelref for the dst
+ * bitmap. If this is null, the standard HeapAllocator
+ * will be used.
+ * @return true if the copy was made.
+ */
+ bool copyTo(SkBitmap* dst, SkColorType ct, Allocator* = NULL) const;
+
+ bool copyTo(SkBitmap* dst, Allocator* allocator = NULL) const {
+ return this->copyTo(dst, this->colorType(), allocator);
+ }
+
+ /**
+ * Copy the bitmap's pixels into the specified buffer (pixels + rowBytes),
+ * converting them into the requested format (SkImageInfo). The src pixels are read
+ * starting at the specified (srcX,srcY) offset, relative to the top-left corner.
+ *
+ * The specified ImageInfo and (srcX,srcY) offset specifies a source rectangle
+ *
+ * srcR.setXYWH(srcX, srcY, dstInfo.width(), dstInfo.height());
+ *
+ * srcR is intersected with the bounds of the bitmap. If this intersection is not empty,
+ * then we have two sets of pixels (of equal size). Replace the dst pixels with the
+ * corresponding src pixels, performing any colortype/alphatype transformations needed
+ * (in the case where the src and dst have different colortypes or alphatypes).
+ *
+ * This call can fail, returning false, for several reasons:
+ * - If srcR does not intersect the bitmap bounds.
+ * - If the requested colortype/alphatype cannot be converted from the src's types.
+ * - If the src pixels are not available.
+ */
+ bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY) const;
+
+ /**
+ * Returns true if this bitmap's pixels can be converted into the requested
+ * colorType, such that copyTo() could succeed.
+ */
+ bool canCopyTo(SkColorType colorType) const;
+
+ /** Makes a deep copy of this bitmap, keeping the copied pixels
+ * in the same domain as the source: If the src pixels are allocated for
+ * the cpu, then so will the dst. If the src pixels are allocated on the
+ * gpu (typically as a texture), the it will do the same for the dst.
+ * If the request cannot be fulfilled, returns false and dst is unmodified.
+ */
+ bool deepCopyTo(SkBitmap* dst) const;
+
+#ifdef SK_BUILD_FOR_ANDROID
+ bool hasHardwareMipMap() const {
+ return (fFlags & kHasHardwareMipMap_Flag) != 0;
+ }
+
+ void setHasHardwareMipMap(bool hasHardwareMipMap) {
+ if (hasHardwareMipMap) {
+ fFlags |= kHasHardwareMipMap_Flag;
+ } else {
+ fFlags &= ~kHasHardwareMipMap_Flag;
+ }
+ }
+#endif
+
+ bool extractAlpha(SkBitmap* dst) const {
+ return this->extractAlpha(dst, NULL, NULL, NULL);
+ }
+
+ bool extractAlpha(SkBitmap* dst, const SkPaint* paint,
+ SkIPoint* offset) const {
+ return this->extractAlpha(dst, paint, NULL, offset);
+ }
+
+ /** Set dst to contain alpha layer of this bitmap. If destination bitmap
+ fails to be initialized, e.g. because allocator can't allocate pixels
+ for it, dst will not be modified and false will be returned.
+
+ @param dst The bitmap to be filled with alpha layer
+ @param paint The paint to draw with
+ @param allocator Allocator used to allocate the pixelref for the dst
+ bitmap. If this is null, the standard HeapAllocator
+ will be used.
+ @param offset If not null, it is set to top-left coordinate to position
+ the returned bitmap so that it visually lines up with the
+ original
+ */
+ bool extractAlpha(SkBitmap* dst, const SkPaint* paint, Allocator* allocator,
+ SkIPoint* offset) const;
+
+ /**
+ * If the pixels are available from this bitmap (w/o locking) return true, and fill out the
+ * specified pixmap (if not null). If the pixels are not available (either because there are
+ * none, or becuase accessing them would require locking or other machinary) return false and
+ * ignore the pixmap parameter.
+ *
+ * Note: if this returns true, the results (in the pixmap) are only valid until the bitmap
+ * is changed in anyway, in which case the results are invalid.
+ */
+ bool peekPixels(SkPixmap*) const;
+
+ SkDEBUGCODE(void validate() const;)
+
+ class Allocator : public SkRefCnt {
+ public:
+ /** Allocate the pixel memory for the bitmap, given its dimensions and
+ colortype. Return true on success, where success means either setPixels
+ or setPixelRef was called. The pixels need not be locked when this
+ returns. If the colortype requires a colortable, it also must be
+ installed via setColorTable. If false is returned, the bitmap and
+ colortable should be left unchanged.
+ */
+ virtual bool allocPixelRef(SkBitmap*, SkColorTable*) = 0;
+ private:
+ typedef SkRefCnt INHERITED;
+ };
+
+ /** Subclass of Allocator that returns a pixelref that allocates its pixel
+ memory from the heap. This is the default Allocator invoked by
+ allocPixels().
+ */
+ class HeapAllocator : public Allocator {
+ public:
+ bool allocPixelRef(SkBitmap*, SkColorTable*) override;
+ };
+
+ class RLEPixels {
+ public:
+ RLEPixels(int width, int height);
+ virtual ~RLEPixels();
+
+ uint8_t* packedAtY(int y) const {
+ SkASSERT((unsigned)y < (unsigned)fHeight);
+ return fYPtrs[y];
+ }
+
+ // called by subclasses during creation
+ void setPackedAtY(int y, uint8_t* addr) {
+ SkASSERT((unsigned)y < (unsigned)fHeight);
+ fYPtrs[y] = addr;
+ }
+
+ private:
+ uint8_t** fYPtrs;
+ int fHeight;
+ };
+
+ SK_TO_STRING_NONVIRT()
+
+private:
+ mutable SkPixelRef* fPixelRef;
+ mutable int fPixelLockCount;
+ // These are just caches from the locked pixelref
+ mutable void* fPixels;
+ mutable SkColorTable* fColorTable; // only meaningful for kIndex8
+
+ SkIPoint fPixelRefOrigin;
+
+ enum Flags {
+ kImageIsVolatile_Flag = 0x02,
+#ifdef SK_BUILD_FOR_ANDROID
+ /* A hint for the renderer responsible for drawing this bitmap
+ * indicating that it should attempt to use mipmaps when this bitmap
+ * is drawn scaled down.
+ */
+ kHasHardwareMipMap_Flag = 0x08,
+#endif
+ };
+
+ SkImageInfo fInfo;
+ uint32_t fRowBytes;
+ uint8_t fFlags;
+
+ /* Unreference any pixelrefs or colortables
+ */
+ void freePixels();
+ void updatePixelsFromRef() const;
+
+ static void WriteRawPixels(SkWriteBuffer*, const SkBitmap&);
+ static bool ReadRawPixels(SkReadBuffer*, SkBitmap*);
+
+ friend class SkReadBuffer; // unflatten, rawpixels
+ friend class SkBinaryWriteBuffer; // rawpixels
+ friend struct SkBitmapProcState;
+};
+
+class SkAutoLockPixels : SkNoncopyable {
+public:
+ SkAutoLockPixels(const SkBitmap& bm, bool doLock = true) : fBitmap(bm) {
+ fDidLock = doLock;
+ if (doLock) {
+ bm.lockPixels();
+ }
+ }
+ ~SkAutoLockPixels() {
+ if (fDidLock) {
+ fBitmap.unlockPixels();
+ }
+ }
+
+private:
+ const SkBitmap& fBitmap;
+ bool fDidLock;
+};
+//TODO(mtklein): uncomment when 71713004 lands and Chromium's fixed.
+//#define SkAutoLockPixels(...) SK_REQUIRE_LOCAL_VAR(SkAutoLockPixels)
+
+///////////////////////////////////////////////////////////////////////////////
+
+inline uint32_t* SkBitmap::getAddr32(int x, int y) const {
+ SkASSERT(fPixels);
+ SkASSERT(4 == this->bytesPerPixel());
+ SkASSERT((unsigned)x < (unsigned)this->width() && (unsigned)y < (unsigned)this->height());
+ return (uint32_t*)((char*)fPixels + y * fRowBytes + (x << 2));
+}
+
+inline uint16_t* SkBitmap::getAddr16(int x, int y) const {
+ SkASSERT(fPixels);
+ SkASSERT(2 == this->bytesPerPixel());
+ SkASSERT((unsigned)x < (unsigned)this->width() && (unsigned)y < (unsigned)this->height());
+ return (uint16_t*)((char*)fPixels + y * fRowBytes + (x << 1));
+}
+
+inline uint8_t* SkBitmap::getAddr8(int x, int y) const {
+ SkASSERT(fPixels);
+ SkASSERT(1 == this->bytesPerPixel());
+ SkASSERT((unsigned)x < (unsigned)this->width() && (unsigned)y < (unsigned)this->height());
+ return (uint8_t*)fPixels + y * fRowBytes + x;
+}
+
+inline SkPMColor SkBitmap::getIndex8Color(int x, int y) const {
+ SkASSERT(fPixels);
+ SkASSERT(kIndex_8_SkColorType == this->colorType());
+ SkASSERT((unsigned)x < (unsigned)this->width() && (unsigned)y < (unsigned)this->height());
+ SkASSERT(fColorTable);
+ return (*fColorTable)[*((const uint8_t*)fPixels + y * fRowBytes + x)];
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkBitmapDevice.h b/gfx/skia/skia/include/core/SkBitmapDevice.h
new file mode 100644
index 000000000..31c0aa3a3
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkBitmapDevice.h
@@ -0,0 +1,181 @@
+
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapDevice_DEFINED
+#define SkBitmapDevice_DEFINED
+
+#include "SkBitmap.h"
+#include "SkCanvas.h"
+#include "SkColor.h"
+#include "SkDevice.h"
+#include "SkImageInfo.h"
+#include "SkRect.h"
+#include "SkScalar.h"
+#include "SkSize.h"
+#include "SkSurfaceProps.h"
+#include "SkTypes.h"
+
+class SkDraw;
+class SkImageFilterCache;
+class SkMatrix;
+class SkPaint;
+class SkPath;
+class SkPixelRef;
+class SkPixmap;
+class SkRRect;
+class SkSurface;
+class SkXfermode;
+struct SkPoint;
+
+///////////////////////////////////////////////////////////////////////////////
+class SK_API SkBitmapDevice : public SkBaseDevice {
+public:
+ /**
+ * Construct a new device with the specified bitmap as its backend. It is
+ * valid for the bitmap to have no pixels associated with it. In that case,
+ * any drawing to this device will have no effect.
+ */
+ SkBitmapDevice(const SkBitmap& bitmap);
+
+ /**
+ * Create a new device along with its requisite pixel memory using
+ * default SkSurfaceProps (i.e., kLegacyFontHost_InitType-style).
+ * Note: this entry point is slated for removal - no one should call it.
+ */
+ static SkBitmapDevice* Create(const SkImageInfo& info);
+
+ /**
+ * Construct a new device with the specified bitmap as its backend. It is
+ * valid for the bitmap to have no pixels associated with it. In that case,
+ * any drawing to this device will have no effect.
+ */
+ SkBitmapDevice(const SkBitmap& bitmap, const SkSurfaceProps& surfaceProps);
+
+ static SkBitmapDevice* Create(const SkImageInfo&, const SkSurfaceProps&);
+
+protected:
+ bool onShouldDisableLCD(const SkPaint&) const override;
+
+ /** These are called inside the per-device-layer loop for each draw call.
+ When these are called, we have already applied any saveLayer operations,
+ and are handling any looping from the paint, and any effects from the
+ DrawFilter.
+ */
+ void drawPaint(const SkDraw&, const SkPaint& paint) override;
+ virtual void drawPoints(const SkDraw&, SkCanvas::PointMode mode, size_t count,
+ const SkPoint[], const SkPaint& paint) override;
+ virtual void drawRect(const SkDraw&, const SkRect& r,
+ const SkPaint& paint) override;
+ virtual void drawOval(const SkDraw&, const SkRect& oval,
+ const SkPaint& paint) override;
+ virtual void drawRRect(const SkDraw&, const SkRRect& rr,
+ const SkPaint& paint) override;
+
+ /**
+ * If pathIsMutable, then the implementation is allowed to cast path to a
+ * non-const pointer and modify it in place (as an optimization). Canvas
+ * may do this to implement helpers such as drawOval, by placing a temp
+ * path on the stack to hold the representation of the oval.
+ *
+ * If prePathMatrix is not null, it should logically be applied before any
+ * stroking or other effects. If there are no effects on the paint that
+ * affect the geometry/rasterization, then the pre matrix can just be
+ * pre-concated with the current matrix.
+ */
+ virtual void drawPath(const SkDraw&, const SkPath& path,
+ const SkPaint& paint,
+ const SkMatrix* prePathMatrix = NULL,
+ bool pathIsMutable = false) override;
+ virtual void drawBitmap(const SkDraw&, const SkBitmap& bitmap,
+ const SkMatrix& matrix, const SkPaint& paint) override;
+ virtual void drawSprite(const SkDraw&, const SkBitmap& bitmap,
+ int x, int y, const SkPaint& paint) override;
+
+ /**
+ * The default impl. will create a bitmap-shader from the bitmap,
+ * and call drawRect with it.
+ */
+ void drawBitmapRect(const SkDraw&, const SkBitmap&, const SkRect*, const SkRect&,
+ const SkPaint&, SkCanvas::SrcRectConstraint) override;
+
+ /**
+ * Does not handle text decoration.
+ * Decorations (underline and stike-thru) will be handled by SkCanvas.
+ */
+ virtual void drawText(const SkDraw&, const void* text, size_t len,
+ SkScalar x, SkScalar y, const SkPaint& paint) override;
+ virtual void drawPosText(const SkDraw&, const void* text, size_t len,
+ const SkScalar pos[], int scalarsPerPos,
+ const SkPoint& offset, const SkPaint& paint) override;
+ virtual void drawVertices(const SkDraw&, SkCanvas::VertexMode, int vertexCount,
+ const SkPoint verts[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint) override;
+ virtual void drawDevice(const SkDraw&, SkBaseDevice*, int x, int y, const SkPaint&) override;
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ void drawSpecial(const SkDraw&, SkSpecialImage*, int x, int y, const SkPaint&) override;
+ sk_sp<SkSpecialImage> makeSpecial(const SkBitmap&) override;
+ sk_sp<SkSpecialImage> makeSpecial(const SkImage*) override;
+ sk_sp<SkSpecialImage> snapSpecial() override;
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ /** Update as needed the pixel value in the bitmap, so that the caller can
+ access the pixels directly. Note: only the pixels field should be
+ altered. The config/width/height/rowbytes must remain unchanged.
+ @return the device contents as a bitmap
+ */
+#ifdef SK_SUPPORT_LEGACY_ACCESSBITMAP
+ const SkBitmap& onAccessBitmap() override;
+#else
+ const SkBitmap& onAccessBitmap();
+#endif
+
+ SkPixelRef* getPixelRef() const { return fBitmap.pixelRef(); }
+ // just for subclasses, to assign a custom pixelref
+ SkPixelRef* setPixelRef(SkPixelRef* pr) {
+ fBitmap.setPixelRef(pr);
+ return pr;
+ }
+
+ bool onReadPixels(const SkImageInfo&, void*, size_t, int x, int y) override;
+ bool onWritePixels(const SkImageInfo&, const void*, size_t, int, int) override;
+ bool onPeekPixels(SkPixmap*) override;
+ bool onAccessPixels(SkPixmap*) override;
+
+private:
+ friend class SkCanvas;
+ friend struct DeviceCM; //for setMatrixClip
+ friend class SkDraw;
+ friend class SkDrawIter;
+ friend class SkDeviceFilteredPaint;
+
+ friend class SkSurface_Raster;
+
+ // used to change the backend's pixels (and possibly config/rowbytes)
+ // but cannot change the width/height, so there should be no change to
+ // any clip information.
+ void replaceBitmapBackendForRasterSurface(const SkBitmap&) override;
+
+ SkBaseDevice* onCreateDevice(const CreateInfo&, const SkPaint*) override;
+
+ sk_sp<SkSurface> makeSurface(const SkImageInfo&, const SkSurfaceProps&) override;
+
+ SkImageFilterCache* getImageFilterCache() override;
+
+ SkBitmap fBitmap;
+
+ void setNewSize(const SkISize&); // Used by SkCanvas for resetForNextPicture().
+
+ typedef SkBaseDevice INHERITED;
+};
+
+#endif // SkBitmapDevice_DEFINED
diff --git a/gfx/skia/skia/include/core/SkBlendMode.h b/gfx/skia/skia/include/core/SkBlendMode.h
new file mode 100644
index 000000000..eb3469f25
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkBlendMode.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlendMode_DEFINED
+#define SkBlendMode_DEFINED
+
+enum class SkBlendMode {
+ kClear, //!< [0, 0]
+ kSrc, //!< [Sa, Sc]
+ kDst, //!< [Da, Dc]
+ kSrcOver, //!< [Sa + Da * (1 - Sa), Sc + Dc * (1 - Sa)]
+ kDstOver, //!< [Da + Sa * (1 - Da), Dc + Sc * (1 - Da)]
+ kSrcIn, //!< [Sa * Da, Sc * Da]
+ kDstIn, //!< [Da * Sa, Dc * Sa]
+ kSrcOut, //!< [Sa * (1 - Da), Sc * (1 - Da)]
+ kDstOut, //!< [Da * (1 - Sa), Dc * (1 - Sa)]
+ kSrcATop, //!< [Da, Sc * Da + Dc * (1 - Sa)]
+ kDstATop, //!< [Sa, Dc * Sa + Sc * (1 - Da)]
+ kXor, //!< [Sa + Da - 2 * Sa * Da, Sc * (1 - Da) + Dc * (1 - Sa)]
+ kPlus, //!< [Sa + Da, Sc + Dc]
+ kModulate, // multiplies all components (= alpha and color)
+
+ // Following blend modes are defined in the CSS Compositing standard:
+ // https://dvcs.w3.org/hg/FXTF/rawfile/tip/compositing/index.html#blending
+ kScreen,
+ kLastCoeffMode = kScreen,
+
+ kOverlay,
+ kDarken,
+ kLighten,
+ kColorDodge,
+ kColorBurn,
+ kHardLight,
+ kSoftLight,
+ kDifference,
+ kExclusion,
+ kMultiply,
+ kLastSeparableMode = kMultiply,
+
+ kHue,
+ kSaturation,
+ kColor,
+ kLuminosity,
+ kLastMode = kLuminosity
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkBlitRow.h b/gfx/skia/skia/include/core/SkBlitRow.h
new file mode 100644
index 000000000..56121eba7
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkBlitRow.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlitRow_DEFINED
+#define SkBlitRow_DEFINED
+
+#include "SkBitmap.h"
+#include "SkColor.h"
+
+class SkBlitRow {
+public:
+ enum Flags16 {
+ //! If set, the alpha parameter will be != 255
+ kGlobalAlpha_Flag = 0x01,
+ //! If set, the src colors may have alpha != 255
+ kSrcPixelAlpha_Flag = 0x02,
+ //! If set, the resulting 16bit colors should be dithered
+ kDither_Flag = 0x04
+ };
+
+ /** Function pointer that reads a scanline of src SkPMColors, and writes
+ a corresponding scanline of 16bit colors (specific format based on the
+ config passed to the Factory.
+
+ The x,y params provide the dithering phase for the start of the scanline
+
+ @param alpha A global alpha to be applied to all of the src colors
+ @param x The x coordinate of the beginning of the scanline
+ @param y THe y coordinate of the scanline
+ */
+ typedef void (*Proc16)(uint16_t dst[], const SkPMColor src[], int count,
+ U8CPU alpha, int x, int y);
+
+ static Proc16 Factory16(unsigned flags);
+
+ /**
+ * Function pointer that blends a single src color onto a scaline of dst colors.
+ *
+ * The x,y params provide the dithering phase for the start of the scanline
+ */
+ typedef void (*ColorProc16)(uint16_t dst[], SkPMColor src, int count, int x, int y);
+
+ // Note : we ignore the kGlobalAlpha_Flag setting, but do respect kSrcPixelAlpha_Flag
+ static ColorProc16 ColorFactory16(unsigned flags);
+
+ ///////////// D32 version
+
+ enum Flags32 {
+ kGlobalAlpha_Flag32 = 1 << 0,
+ kSrcPixelAlpha_Flag32 = 1 << 1
+ };
+
+ /** Function pointer that blends 32bit colors onto a 32bit destination.
+ @param dst array of dst 32bit colors
+ @param src array of src 32bit colors (w/ or w/o alpha)
+ @param count number of colors to blend
+ @param alpha global alpha to be applied to all src colors
+ */
+ typedef void (*Proc32)(uint32_t dst[], const SkPMColor src[], int count, U8CPU alpha);
+
+ static Proc32 Factory32(unsigned flags32);
+
+ /** Blend a single color onto a row of S32 pixels, writing the result
+ into a row of D32 pixels. src and dst may be the same memory, but
+ if they are not, they may not overlap.
+ */
+ static void Color32(SkPMColor dst[], const SkPMColor src[], int count, SkPMColor color);
+
+ /** These static functions are called by the Factory and Factory32
+ functions, and should return either NULL, or a
+ platform-specific function-ptr to be used in place of the
+ system default.
+ */
+
+ static Proc32 PlatformProcs32(unsigned flags);
+
+ static Proc16 PlatformFactory565(unsigned flags);
+ static ColorProc16 PlatformColorFactory565(unsigned flags);
+
+private:
+ enum {
+ kFlags16_Mask = 7,
+ kFlags32_Mask = 3
+ };
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkBlurTypes.h b/gfx/skia/skia/include/core/SkBlurTypes.h
new file mode 100644
index 000000000..afbec19b6
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkBlurTypes.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlurTypes_DEFINED
+#define SkBlurTypes_DEFINED
+
+#include "SkTypes.h"
+
+enum SkBlurStyle {
+ kNormal_SkBlurStyle, //!< fuzzy inside and outside
+ kSolid_SkBlurStyle, //!< solid inside, fuzzy outside
+ kOuter_SkBlurStyle, //!< nothing inside, fuzzy outside
+ kInner_SkBlurStyle, //!< fuzzy inside, nothing outside
+
+ kLastEnum_SkBlurStyle = kInner_SkBlurStyle
+};
+
+enum SkBlurQuality {
+ kLow_SkBlurQuality, //!< e.g. box filter
+ kHigh_SkBlurQuality, //!< e.g. 3-pass similar to gaussian
+
+ kLastEnum_SkBlurQuality
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkCanvas.h b/gfx/skia/skia/include/core/SkCanvas.h
new file mode 100644
index 000000000..5078f4255
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkCanvas.h
@@ -0,0 +1,1774 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCanvas_DEFINED
+#define SkCanvas_DEFINED
+
+#include "SkTypes.h"
+#include "SkBlendMode.h"
+#include "SkBitmap.h"
+#include "SkClipOp.h"
+#include "SkDeque.h"
+#include "SkImage.h"
+#include "SkPaint.h"
+#include "SkRefCnt.h"
+#include "SkRegion.h"
+#include "SkSurfaceProps.h"
+#include "SkXfermode.h"
+#include "SkLights.h"
+#include "../private/SkShadowParams.h"
+
+class GrContext;
+class GrDrawContext;
+class SkBaseDevice;
+class SkCanvasClipVisitor;
+class SkClipStack;
+class SkData;
+class SkDraw;
+class SkDrawable;
+class SkDrawFilter;
+class SkImageFilter;
+class SkMetaData;
+class SkPath;
+class SkPicture;
+class SkPixmap;
+class SkRasterClip;
+class SkRRect;
+struct SkRSXform;
+class SkSurface;
+class SkSurface_Base;
+class SkTextBlob;
+
+//#define SK_SUPPORT_LEGACY_CLIP_REGIONOPS
+
+/** \class SkCanvas
+
+ A Canvas encapsulates all of the state about drawing into a device (bitmap).
+ This includes a reference to the device itself, and a stack of matrix/clip
+ values. For any given draw call (e.g. drawRect), the geometry of the object
+ being drawn is transformed by the concatenation of all the matrices in the
+ stack. The transformed geometry is clipped by the intersection of all of
+ the clips in the stack.
+
+ While the Canvas holds the state of the drawing device, the state (style)
+ of the object being drawn is held by the Paint, which is provided as a
+ parameter to each of the draw() methods. The Paint holds attributes such as
+ color, typeface, textSize, strokeWidth, shader (e.g. gradients, patterns),
+ etc.
+*/
+class SK_API SkCanvas : public SkRefCnt {
+ enum PrivateSaveLayerFlags {
+ kDontClipToLayer_PrivateSaveLayerFlag = 1U << 31,
+ };
+
+public:
+#ifdef SK_SUPPORT_LEGACY_CLIP_REGIONOPS
+ typedef SkRegion::Op ClipOp;
+
+ static const ClipOp kDifference_Op = SkRegion::kDifference_Op;
+ static const ClipOp kIntersect_Op = SkRegion::kIntersect_Op;
+ static const ClipOp kUnion_Op = SkRegion::kUnion_Op;
+ static const ClipOp kXOR_Op = SkRegion::kXOR_Op;
+ static const ClipOp kReverseDifference_Op = SkRegion::kReverseDifference_Op;
+ static const ClipOp kReplace_Op = SkRegion::kReplace_Op;
+#else
+ typedef SkClipOp ClipOp;
+
+ static const ClipOp kDifference_Op = kDifference_SkClipOp;
+ static const ClipOp kIntersect_Op = kIntersect_SkClipOp;
+ static const ClipOp kUnion_Op = kUnion_SkClipOp;
+ static const ClipOp kXOR_Op = kXOR_SkClipOp;
+ static const ClipOp kReverseDifference_Op = kReverseDifference_SkClipOp;
+ static const ClipOp kReplace_Op = kReplace_SkClipOp;
+#endif
+ /**
+ * Attempt to allocate raster canvas, matching the ImageInfo, that will draw directly into the
+ * specified pixels. To access the pixels after drawing to them, the caller should call
+ * flush() or call peekPixels(...).
+ *
+ * On failure, return NULL. This can fail for several reasons:
+ * 1. invalid ImageInfo (e.g. negative dimensions)
+ * 2. unsupported ImageInfo for a canvas
+ * - kUnknown_SkColorType, kIndex_8_SkColorType
+ * - kUnknown_SkAlphaType
+ * - this list is not complete, so others may also be unsupported
+ *
+ * Note: it is valid to request a supported ImageInfo, but with zero
+ * dimensions.
+ */
+ static SkCanvas* NewRasterDirect(const SkImageInfo&, void*, size_t);
+
+ static SkCanvas* NewRasterDirectN32(int width, int height, SkPMColor* pixels, size_t rowBytes) {
+ return NewRasterDirect(SkImageInfo::MakeN32Premul(width, height), pixels, rowBytes);
+ }
+
+ /**
+ * Creates an empty canvas with no backing device/pixels, and zero
+ * dimensions.
+ */
+ SkCanvas();
+
+ /**
+ * Creates a canvas of the specified dimensions, but explicitly not backed
+ * by any device/pixels. Typically this use used by subclasses who handle
+ * the draw calls in some other way.
+ */
+ SkCanvas(int width, int height, const SkSurfaceProps* = NULL);
+
+ /** Construct a canvas with the specified device to draw into.
+
+ @param device Specifies a device for the canvas to draw into.
+ */
+ explicit SkCanvas(SkBaseDevice* device);
+
+ /** Construct a canvas with the specified bitmap to draw into.
+ @param bitmap Specifies a bitmap for the canvas to draw into. Its
+ structure are copied to the canvas.
+ */
+ explicit SkCanvas(const SkBitmap& bitmap);
+
+ /** Construct a canvas with the specified bitmap to draw into.
+ @param bitmap Specifies a bitmap for the canvas to draw into. Its
+ structure are copied to the canvas.
+ @param props New canvas surface properties.
+ */
+ SkCanvas(const SkBitmap& bitmap, const SkSurfaceProps& props);
+
+ virtual ~SkCanvas();
+
+ SkMetaData& getMetaData();
+
+ /**
+ * Return ImageInfo for this canvas. If the canvas is not backed by pixels
+ * (cpu or gpu), then the info's ColorType will be kUnknown_SkColorType.
+ */
+ SkImageInfo imageInfo() const;
+
+ /**
+ * If the canvas is backed by pixels (cpu or gpu), this writes a copy of the SurfaceProps
+ * for the canvas to the location supplied by the caller, and returns true. Otherwise,
+ * return false and leave the supplied props unchanged.
+ */
+ bool getProps(SkSurfaceProps*) const;
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ /**
+ * Trigger the immediate execution of all pending draw operations. For the GPU
+ * backend this will resolve all rendering to the GPU surface backing the
+ * SkSurface that owns this canvas.
+ */
+ void flush();
+
+ /**
+ * Gets the size of the base or root layer in global canvas coordinates. The
+ * origin of the base layer is always (0,0). The current drawable area may be
+ * smaller (due to clipping or saveLayer).
+ */
+ virtual SkISize getBaseLayerSize() const;
+
+ /**
+ * DEPRECATED: call getBaseLayerSize
+ */
+ SkISize getDeviceSize() const { return this->getBaseLayerSize(); }
+
+ /**
+ * DEPRECATED.
+ * Return the canvas' device object, which may be null. The device holds
+ * the bitmap of the pixels that the canvas draws into. The reference count
+ * of the returned device is not changed by this call.
+ */
+#ifndef SK_SUPPORT_LEGACY_GETDEVICE
+protected: // Can we make this private?
+#endif
+ SkBaseDevice* getDevice() const;
+public:
+ SkBaseDevice* getDevice_just_for_deprecated_compatibility_testing() const {
+ return this->getDevice();
+ }
+
+ /**
+ * saveLayer() can create another device (which is later drawn onto
+ * the previous device). getTopDevice() returns the top-most device current
+ * installed. Note that this can change on other calls like save/restore,
+ * so do not access this device after subsequent canvas calls.
+ * The reference count of the device is not changed.
+ *
+ * @param updateMatrixClip If this is true, then before the device is
+ * returned, we ensure that its has been notified about the current
+ * matrix and clip. Note: this happens automatically when the device
+ * is drawn to, but is optional here, as there is a small perf hit
+ * sometimes.
+ */
+#ifndef SK_SUPPORT_LEGACY_GETTOPDEVICE
+private:
+#endif
+ SkBaseDevice* getTopDevice(bool updateMatrixClip = false) const;
+public:
+
+ /**
+ * Create a new surface matching the specified info, one that attempts to
+ * be maximally compatible when used with this canvas. If there is no matching Surface type,
+ * NULL is returned.
+ *
+ * If surfaceprops is specified, those are passed to the new surface, otherwise the new surface
+ * inherits the properties of the surface that owns this canvas. If this canvas has no parent
+ * surface, then the new surface is created with default properties.
+ */
+ sk_sp<SkSurface> makeSurface(const SkImageInfo&, const SkSurfaceProps* = nullptr);
+#ifdef SK_SUPPORT_LEGACY_NEW_SURFACE_API
+ SkSurface* newSurface(const SkImageInfo& info, const SkSurfaceProps* props = NULL);
+#endif
+
+ /**
+ * Return the GPU context of the device that is associated with the canvas.
+ * For a canvas with non-GPU device, NULL is returned.
+ */
+ GrContext* getGrContext();
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ /**
+ * If the canvas has writable pixels in its top layer (and is not recording to a picture
+ * or other non-raster target) and has direct access to its pixels (i.e. they are in
+ * local RAM) return the address of those pixels, and if not null,
+ * return the ImageInfo, rowBytes and origin. The returned address is only valid
+ * while the canvas object is in scope and unchanged. Any API calls made on
+ * canvas (or its parent surface if any) will invalidate the
+ * returned address (and associated information).
+ *
+ * On failure, returns NULL and the info, rowBytes, and origin parameters are ignored.
+ */
+ void* accessTopLayerPixels(SkImageInfo* info, size_t* rowBytes, SkIPoint* origin = NULL);
+
+ /**
+ * If the canvas has readable pixels in its base layer (and is not recording to a picture
+ * or other non-raster target) and has direct access to its pixels (i.e. they are in
+ * local RAM) return true, and if not null, return in the pixmap parameter information about
+ * the pixels. The pixmap's pixel address is only valid
+ * while the canvas object is in scope and unchanged. Any API calls made on
+ * canvas (or its parent surface if any) will invalidate the pixel address
+ * (and associated information).
+ *
+ * On failure, returns false and the pixmap parameter will be ignored.
+ */
+ bool peekPixels(SkPixmap*);
+
+#ifdef SK_SUPPORT_LEGACY_PEEKPIXELS_PARMS
+ const void* peekPixels(SkImageInfo* info, size_t* rowBytes);
+#endif
+
+ /**
+ * Copy the pixels from the base-layer into the specified buffer (pixels + rowBytes),
+ * converting them into the requested format (SkImageInfo). The base-layer pixels are read
+ * starting at the specified (srcX,srcY) location in the coordinate system of the base-layer.
+ *
+ * The specified ImageInfo and (srcX,srcY) offset specifies a source rectangle
+ *
+ * srcR.setXYWH(srcX, srcY, dstInfo.width(), dstInfo.height());
+ *
+ * srcR is intersected with the bounds of the base-layer. If this intersection is not empty,
+ * then we have two sets of pixels (of equal size). Replace the dst pixels with the
+ * corresponding src pixels, performing any colortype/alphatype transformations needed
+ * (in the case where the src and dst have different colortypes or alphatypes).
+ *
+ * This call can fail, returning false, for several reasons:
+ * - If srcR does not intersect the base-layer bounds.
+ * - If the requested colortype/alphatype cannot be converted from the base-layer's types.
+ * - If this canvas is not backed by pixels (e.g. picture or PDF)
+ */
+ bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY);
+
+ /**
+ * Helper for calling readPixels(info, ...). This call will check if bitmap has been allocated.
+ * If not, it will attempt to call allocPixels(). If this fails, it will return false. If not,
+ * it calls through to readPixels(info, ...) and returns its result.
+ */
+ bool readPixels(SkBitmap* bitmap, int srcX, int srcY);
+
+ /**
+ * Helper for allocating pixels and then calling readPixels(info, ...). The bitmap is resized
+ * to the intersection of srcRect and the base-layer bounds. On success, pixels will be
+ * allocated in bitmap and true returned. On failure, false is returned and bitmap will be
+ * set to empty.
+ */
+ bool readPixels(const SkIRect& srcRect, SkBitmap* bitmap);
+
+ /**
+ * This method affects the pixels in the base-layer, and operates in pixel coordinates,
+ * ignoring the matrix and clip.
+ *
+ * The specified ImageInfo and (x,y) offset specifies a rectangle: target.
+ *
+ * target.setXYWH(x, y, info.width(), info.height());
+ *
+ * Target is intersected with the bounds of the base-layer. If this intersection is not empty,
+ * then we have two sets of pixels (of equal size), the "src" specified by info+pixels+rowBytes
+ * and the "dst" by the canvas' backend. Replace the dst pixels with the corresponding src
+ * pixels, performing any colortype/alphatype transformations needed (in the case where the
+ * src and dst have different colortypes or alphatypes).
+ *
+ * This call can fail, returning false, for several reasons:
+ * - If the src colortype/alphatype cannot be converted to the canvas' types
+ * - If this canvas is not backed by pixels (e.g. picture or PDF)
+ */
+ bool writePixels(const SkImageInfo&, const void* pixels, size_t rowBytes, int x, int y);
+
+ /**
+ * Helper for calling writePixels(info, ...) by passing its pixels and rowbytes. If the bitmap
+ * is just wrapping a texture, returns false and does nothing.
+ */
+ bool writePixels(const SkBitmap& bitmap, int x, int y);
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ /** This call saves the current matrix, clip, and drawFilter, and pushes a
+ copy onto a private stack. Subsequent calls to translate, scale,
+ rotate, skew, concat or clipRect, clipPath, and setDrawFilter all
+ operate on this copy.
+ When the balancing call to restore() is made, the previous matrix, clip,
+ and drawFilter are restored.
+
+ @return The value to pass to restoreToCount() to balance this save()
+ */
+ int save();
+
+ /** This behaves the same as save(), but in addition it allocates an
+ offscreen bitmap. All drawing calls are directed there, and only when
+ the balancing call to restore() is made is that offscreen transfered to
+ the canvas (or the previous layer).
+ @param bounds (may be null) This rect, if non-null, is used as a hint to
+ limit the size of the offscreen, and thus drawing may be
+ clipped to it, though that clipping is not guaranteed to
+ happen. If exact clipping is desired, use clipRect().
+ @param paint (may be null) This is copied, and is applied to the
+ offscreen when restore() is called
+ @return The value to pass to restoreToCount() to balance this save()
+ */
+ int saveLayer(const SkRect* bounds, const SkPaint* paint);
+ int saveLayer(const SkRect& bounds, const SkPaint* paint) {
+ return this->saveLayer(&bounds, paint);
+ }
+
+ /**
+ * Temporary name.
+ * Will allow any requests for LCD text to be respected, so the caller must be careful to
+ * only draw on top of opaque sections of the layer to get good results.
+ */
+ int saveLayerPreserveLCDTextRequests(const SkRect* bounds, const SkPaint* paint);
+
+ /** This behaves the same as save(), but in addition it allocates an
+ offscreen bitmap. All drawing calls are directed there, and only when
+ the balancing call to restore() is made is that offscreen transfered to
+ the canvas (or the previous layer).
+ @param bounds (may be null) This rect, if non-null, is used as a hint to
+ limit the size of the offscreen, and thus drawing may be
+ clipped to it, though that clipping is not guaranteed to
+ happen. If exact clipping is desired, use clipRect().
+ @param alpha This is applied to the offscreen when restore() is called.
+ @return The value to pass to restoreToCount() to balance this save()
+ */
+ int saveLayerAlpha(const SkRect* bounds, U8CPU alpha);
+
+ enum {
+ kIsOpaque_SaveLayerFlag = 1 << 0,
+ kPreserveLCDText_SaveLayerFlag = 1 << 1,
+
+#ifdef SK_SUPPORT_LEGACY_CLIPTOLAYERFLAG
+ kDontClipToLayer_Legacy_SaveLayerFlag = kDontClipToLayer_PrivateSaveLayerFlag,
+#endif
+ };
+ typedef uint32_t SaveLayerFlags;
+
+ struct SaveLayerRec {
+ SaveLayerRec()
+ : fBounds(nullptr), fPaint(nullptr), fBackdrop(nullptr), fSaveLayerFlags(0)
+ {}
+ SaveLayerRec(const SkRect* bounds, const SkPaint* paint, SaveLayerFlags saveLayerFlags = 0)
+ : fBounds(bounds)
+ , fPaint(paint)
+ , fBackdrop(nullptr)
+ , fSaveLayerFlags(saveLayerFlags)
+ {}
+ SaveLayerRec(const SkRect* bounds, const SkPaint* paint, const SkImageFilter* backdrop,
+ SaveLayerFlags saveLayerFlags)
+ : fBounds(bounds)
+ , fPaint(paint)
+ , fBackdrop(backdrop)
+ , fSaveLayerFlags(saveLayerFlags)
+ {}
+
+ const SkRect* fBounds; // optional
+ const SkPaint* fPaint; // optional
+ const SkImageFilter* fBackdrop; // optional
+ SaveLayerFlags fSaveLayerFlags;
+ };
+
+ int saveLayer(const SaveLayerRec&);
+
+ /** This call balances a previous call to save(), and is used to remove all
+ modifications to the matrix/clip/drawFilter state since the last save
+ call.
+ It is an error to call restore() more times than save() was called.
+ */
+ void restore();
+
+ /** Returns the number of matrix/clip states on the SkCanvas' private stack.
+ This will equal # save() calls - # restore() calls + 1. The save count on
+ a new canvas is 1.
+ */
+ int getSaveCount() const;
+
+ /** Efficient way to pop any calls to save() that happened after the save
+ count reached saveCount. It is an error for saveCount to be greater than
+ getSaveCount(). To pop all the way back to the initial matrix/clip context
+ pass saveCount == 1.
+ @param saveCount The number of save() levels to restore from
+ */
+ void restoreToCount(int saveCount);
+
+ /** Preconcat the current matrix with the specified translation
+ @param dx The distance to translate in X
+ @param dy The distance to translate in Y
+ */
+ void translate(SkScalar dx, SkScalar dy);
+
+ /** Preconcat the current matrix with the specified scale.
+ @param sx The amount to scale in X
+ @param sy The amount to scale in Y
+ */
+ void scale(SkScalar sx, SkScalar sy);
+
+ /** Preconcat the current matrix with the specified rotation about the origin.
+ @param degrees The amount to rotate, in degrees
+ */
+ void rotate(SkScalar degrees);
+
+ /** Preconcat the current matrix with the specified rotation about a given point.
+ @param degrees The amount to rotate, in degrees
+ @param px The x coordinate of the point to rotate about.
+ @param py The y coordinate of the point to rotate about.
+ */
+ void rotate(SkScalar degrees, SkScalar px, SkScalar py);
+
+ /** Preconcat the current matrix with the specified skew.
+ @param sx The amount to skew in X
+ @param sy The amount to skew in Y
+ */
+ void skew(SkScalar sx, SkScalar sy);
+
+ /** Preconcat the current matrix with the specified matrix.
+ @param matrix The matrix to preconcatenate with the current matrix
+ */
+ void concat(const SkMatrix& matrix);
+
+ /** Replace the current matrix with a copy of the specified matrix.
+ @param matrix The matrix that will be copied into the current matrix.
+ */
+ void setMatrix(const SkMatrix& matrix);
+
+ /** Helper for setMatrix(identity). Sets the current matrix to identity.
+ */
+ void resetMatrix();
+
+#ifdef SK_EXPERIMENTAL_SHADOWING
+ /** Add the specified translation to the current draw depth of the canvas.
+ @param z The distance to translate in Z.
+ Negative into screen, positive out of screen.
+ Without translation, the draw depth defaults to 0.
+ */
+ void translateZ(SkScalar z);
+
+ /** Set the current set of lights in the canvas.
+ @param lights The lights that we want the canvas to have.
+ */
+ void setLights(sk_sp<SkLights> lights);
+
+ /** Returns the current set of lights the canvas uses
+ */
+ sk_sp<SkLights> getLights() const;
+#endif
+
+ /**
+ * Modify the current clip with the specified rectangle.
+ * @param rect The rect to combine with the current clip
+ * @param op The region op to apply to the current clip
+ * @param doAntiAlias true if the clip should be antialiased
+ */
+ void clipRect(const SkRect& rect, ClipOp, bool doAntiAlias);
+ void clipRect(const SkRect& rect, ClipOp op) {
+ this->clipRect(rect, op, false);
+ }
+ void clipRect(const SkRect& rect, bool doAntiAlias = false) {
+ this->clipRect(rect, kIntersect_Op, doAntiAlias);
+ }
+
+ /**
+ * Modify the current clip with the specified SkRRect.
+ * @param rrect The rrect to combine with the current clip
+ * @param op The region op to apply to the current clip
+ * @param doAntiAlias true if the clip should be antialiased
+ */
+ void clipRRect(const SkRRect& rrect, ClipOp op, bool doAntiAlias);
+ void clipRRect(const SkRRect& rrect, ClipOp op) {
+ this->clipRRect(rrect, op, false);
+ }
+ void clipRRect(const SkRRect& rrect, bool doAntiAlias = false) {
+ this->clipRRect(rrect, kIntersect_Op, doAntiAlias);
+ }
+
+ /**
+ * Modify the current clip with the specified path.
+ * @param path The path to combine with the current clip
+ * @param op The region op to apply to the current clip
+ * @param doAntiAlias true if the clip should be antialiased
+ */
+ void clipPath(const SkPath& path, ClipOp op, bool doAntiAlias);
+ void clipPath(const SkPath& path, ClipOp op) {
+ this->clipPath(path, op, false);
+ }
+ void clipPath(const SkPath& path, bool doAntiAlias = false) {
+ this->clipPath(path, kIntersect_Op, doAntiAlias);
+ }
+
+ /** EXPERIMENTAL -- only used for testing
+ Set to simplify clip stack using path ops.
+ */
+ void setAllowSimplifyClip(bool allow) {
+ fAllowSimplifyClip = allow;
+ }
+
+ /** Modify the current clip with the specified region. Note that unlike
+ clipRect() and clipPath() which transform their arguments by the current
+ matrix, clipRegion() assumes its argument is already in device
+ coordinates, and so no transformation is performed.
+ @param deviceRgn The region to apply to the current clip
+ @param op The region op to apply to the current clip
+ */
+ void clipRegion(const SkRegion& deviceRgn, ClipOp op = kIntersect_Op);
+
+ /** Return true if the specified rectangle, after being transformed by the
+ current matrix, would lie completely outside of the current clip. Call
+ this to check if an area you intend to draw into is clipped out (and
+ therefore you can skip making the draw calls).
+ @param rect the rect to compare with the current clip
+ @return true if the rect (transformed by the canvas' matrix) does not
+ intersect with the canvas' clip
+ */
+ bool quickReject(const SkRect& rect) const;
+
+ /** Return true if the specified path, after being transformed by the
+ current matrix, would lie completely outside of the current clip. Call
+ this to check if an area you intend to draw into is clipped out (and
+ therefore you can skip making the draw calls). Note, for speed it may
+ return false even if the path itself might not intersect the clip
+ (i.e. the bounds of the path intersects, but the path does not).
+ @param path The path to compare with the current clip
+ @return true if the path (transformed by the canvas' matrix) does not
+ intersect with the canvas' clip
+ */
+ bool quickReject(const SkPath& path) const;
+
+ /** Return the bounds of the current clip (in local coordinates) in the
+ bounds parameter, and return true if it is non-empty. This can be useful
+ in a way similar to quickReject, in that it tells you that drawing
+ outside of these bounds will be clipped out.
+ */
+ virtual bool getClipBounds(SkRect* bounds) const;
+
+ /** Return the bounds of the current clip, in device coordinates; returns
+ true if non-empty. Maybe faster than getting the clip explicitly and
+ then taking its bounds.
+ */
+ virtual bool getClipDeviceBounds(SkIRect* bounds) const;
+
+
+ /** Fill the entire canvas' bitmap (restricted to the current clip) with the
+ specified ARGB color, using the specified mode.
+ @param a the alpha component (0..255) of the color to fill the canvas
+ @param r the red component (0..255) of the color to fill the canvas
+ @param g the green component (0..255) of the color to fill the canvas
+ @param b the blue component (0..255) of the color to fill the canvas
+ @param mode the mode to apply the color in (defaults to SrcOver)
+ */
+ void drawARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b, SkBlendMode mode = SkBlendMode::kSrcOver);
+#ifdef SK_SUPPORT_LEGACY_XFERMODE_OBJECT
+ void drawARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b, SkXfermode::Mode mode) {
+ this->drawARGB(a, r, g, b, (SkBlendMode)mode);
+ }
+#endif
+
+ /** Fill the entire canvas' bitmap (restricted to the current clip) with the
+ specified color and mode.
+ @param color the color to draw with
+ @param mode the mode to apply the color in (defaults to SrcOver)
+ */
+ void drawColor(SkColor color, SkBlendMode mode = SkBlendMode::kSrcOver);
+#ifdef SK_SUPPORT_LEGACY_XFERMODE_OBJECT
+ void drawColor(SkColor color, SkXfermode::Mode mode) {
+ this->drawColor(color, (SkBlendMode)mode);
+ }
+#endif
+
+ /**
+ * Helper method for drawing a color in SRC mode, completely replacing all the pixels
+ * in the current clip with this color.
+ */
+ void clear(SkColor color) {
+ this->drawColor(color, SkBlendMode::kSrc);
+ }
+
+ /**
+ * This makes the contents of the canvas undefined. Subsequent calls that
+ * require reading the canvas contents will produce undefined results. Examples
+ * include blending and readPixels. The actual implementation is backend-
+ * dependent and one legal implementation is to do nothing. This method
+ * ignores the current clip.
+ *
+ * This function should only be called if the caller intends to subsequently
+ * draw to the canvas. The canvas may do real work at discard() time in order
+ * to optimize performance on subsequent draws. Thus, if you call this and then
+ * never draw to the canvas subsequently you may pay a perfomance penalty.
+ */
+ void discard() { this->onDiscard(); }
+
+ /**
+ * Fill the entire canvas (restricted to the current clip) with the
+ * specified paint.
+ * @param paint The paint used to fill the canvas
+ */
+ void drawPaint(const SkPaint& paint);
+
+ enum PointMode {
+ /** drawPoints draws each point separately */
+ kPoints_PointMode,
+ /** drawPoints draws each pair of points as a line segment */
+ kLines_PointMode,
+ /** drawPoints draws the array of points as a polygon */
+ kPolygon_PointMode
+ };
+
+ /** Draw a series of points, interpreted based on the PointMode mode. For
+ all modes, the count parameter is interpreted as the total number of
+ points. For kLine mode, count/2 line segments are drawn.
+ For kPoint mode, each point is drawn centered at its coordinate, and its
+ size is specified by the paint's stroke-width. It draws as a square,
+ unless the paint's cap-type is round, in which the points are drawn as
+ circles.
+ For kLine mode, each pair of points is drawn as a line segment,
+ respecting the paint's settings for cap/join/width.
+ For kPolygon mode, the entire array is drawn as a series of connected
+ line segments.
+ Note that, while similar, kLine and kPolygon modes draw slightly
+ differently than the equivalent path built with a series of moveto,
+ lineto calls, in that the path will draw all of its contours at once,
+ with no interactions if contours intersect each other (think XOR
+ xfermode). drawPoints always draws each element one at a time.
+ @param mode PointMode specifying how to draw the array of points.
+ @param count The number of points in the array
+ @param pts Array of points to draw
+ @param paint The paint used to draw the points
+ */
+ void drawPoints(PointMode mode, size_t count, const SkPoint pts[], const SkPaint& paint);
+
+ /** Helper method for drawing a single point. See drawPoints() for a more
+ details.
+ */
+ void drawPoint(SkScalar x, SkScalar y, const SkPaint& paint);
+
+ /** Draws a single pixel in the specified color.
+ @param x The X coordinate of which pixel to draw
+ @param y The Y coordiante of which pixel to draw
+ @param color The color to draw
+ */
+ void drawPoint(SkScalar x, SkScalar y, SkColor color);
+
+ /** Draw a line segment with the specified start and stop x,y coordinates,
+ using the specified paint. NOTE: since a line is always "framed", the
+ paint's Style is ignored.
+ @param x0 The x-coordinate of the start point of the line
+ @param y0 The y-coordinate of the start point of the line
+ @param x1 The x-coordinate of the end point of the line
+ @param y1 The y-coordinate of the end point of the line
+ @param paint The paint used to draw the line
+ */
+ void drawLine(SkScalar x0, SkScalar y0, SkScalar x1, SkScalar y1,
+ const SkPaint& paint);
+
+ /** Draw the specified rectangle using the specified paint. The rectangle
+ will be filled or stroked based on the Style in the paint.
+ @param rect The rect to be drawn
+ @param paint The paint used to draw the rect
+ */
+ void drawRect(const SkRect& rect, const SkPaint& paint);
+
+ /** Draw the specified rectangle using the specified paint. The rectangle
+ will be filled or framed based on the Style in the paint.
+ @param rect The rect to be drawn
+ @param paint The paint used to draw the rect
+ */
+ void drawIRect(const SkIRect& rect, const SkPaint& paint) {
+ SkRect r;
+ r.set(rect); // promotes the ints to scalars
+ this->drawRect(r, paint);
+ }
+
+ /** Draw the specified rectangle using the specified paint. The rectangle
+ will be filled or framed based on the Style in the paint.
+ @param left The left side of the rectangle to be drawn
+ @param top The top side of the rectangle to be drawn
+ @param right The right side of the rectangle to be drawn
+ @param bottom The bottom side of the rectangle to be drawn
+ @param paint The paint used to draw the rect
+ */
+ void drawRectCoords(SkScalar left, SkScalar top, SkScalar right,
+ SkScalar bottom, const SkPaint& paint);
+
+ /** Draw the outline of the specified region using the specified paint.
+ @param region The region to be drawn
+ @param paint The paint used to draw the region
+ */
+ void drawRegion(const SkRegion& region, const SkPaint& paint);
+
+ /** Draw the specified oval using the specified paint. The oval will be
+ filled or framed based on the Style in the paint.
+ @param oval The rectangle bounds of the oval to be drawn
+ @param paint The paint used to draw the oval
+ */
+ void drawOval(const SkRect& oval, const SkPaint&);
+
+ /**
+ * Draw the specified RRect using the specified paint The rrect will be filled or stroked
+ * based on the Style in the paint.
+ *
+ * @param rrect The round-rect to draw
+ * @param paint The paint used to draw the round-rect
+ */
+ void drawRRect(const SkRRect& rrect, const SkPaint& paint);
+
+ /**
+ * Draw the annulus formed by the outer and inner rrects. The results
+ * are undefined if the outer does not contain the inner.
+ */
+ void drawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint&);
+
+ /** Draw the specified circle using the specified paint. If radius is <= 0,
+ then nothing will be drawn. The circle will be filled
+ or framed based on the Style in the paint.
+ @param cx The x-coordinate of the center of the cirle to be drawn
+ @param cy The y-coordinate of the center of the cirle to be drawn
+ @param radius The radius of the cirle to be drawn
+ @param paint The paint used to draw the circle
+ */
+ void drawCircle(SkScalar cx, SkScalar cy, SkScalar radius,
+ const SkPaint& paint);
+
+ /** Draw the specified arc, which will be scaled to fit inside the
+ specified oval. Sweep angles are not treated as modulo 360 and thus can
+ exceed a full sweep of the oval. Note that this differs slightly from
+ SkPath::arcTo, which treats the sweep angle mod 360. If the oval is empty
+ or the sweep angle is zero nothing is drawn. If useCenter is true the oval
+ center is inserted into the implied path before the arc and the path is
+ closed back to the, center forming a wedge. Otherwise, the implied path
+ contains just the arc and is not closed.
+ @param oval The bounds of oval used to define the shape of the arc.
+ @param startAngle Starting angle (in degrees) where the arc begins
+ @param sweepAngle Sweep angle (in degrees) measured clockwise.
+ @param useCenter true means include the center of the oval.
+ @param paint The paint used to draw the arc
+ */
+ void drawArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint);
+
+ /** Draw the specified round-rect using the specified paint. The round-rect
+ will be filled or framed based on the Style in the paint.
+ @param rect The rectangular bounds of the roundRect to be drawn
+ @param rx The x-radius of the oval used to round the corners
+ @param ry The y-radius of the oval used to round the corners
+ @param paint The paint used to draw the roundRect
+ */
+ void drawRoundRect(const SkRect& rect, SkScalar rx, SkScalar ry,
+ const SkPaint& paint);
+
+ /** Draw the specified path using the specified paint. The path will be
+ filled or framed based on the Style in the paint.
+ @param path The path to be drawn
+ @param paint The paint used to draw the path
+ */
+ void drawPath(const SkPath& path, const SkPaint& paint);
+
+ /** Draw the specified image, with its top/left corner at (x,y), using the
+ specified paint, transformed by the current matrix.
+
+ @param image The image to be drawn
+ @param left The position of the left side of the image being drawn
+ @param top The position of the top side of the image being drawn
+ @param paint The paint used to draw the image, or NULL
+ */
+ void drawImage(const SkImage* image, SkScalar left, SkScalar top, const SkPaint* paint = NULL);
+ void drawImage(const sk_sp<SkImage>& image, SkScalar left, SkScalar top,
+ const SkPaint* paint = NULL) {
+ this->drawImage(image.get(), left, top, paint);
+ }
+
+ /**
+ * Controls the behavior at the edge of the src-rect, when specified in drawImageRect,
+ * trading off speed for exactness.
+ *
+ * When filtering is enabled (in the Paint), skia may need to sample in a neighborhood around
+ * the pixels in the image. If there is a src-rect specified, it is intended to restrict the
+ * pixels that will be read. However, for performance reasons, some implementations may slow
+ * down if they cannot read 1-pixel past the src-rect boundary at times.
+ *
+ * This enum allows the caller to specify if such a 1-pixel "slop" will be visually acceptable.
+ * If it is, the caller should pass kFast, and it may result in a faster draw. If the src-rect
+ * must be strictly respected, the caller should pass kStrict.
+ */
+ enum SrcRectConstraint {
+ /**
+ * If kStrict is specified, the implementation must respect the src-rect
+ * (if specified) strictly, and will never sample outside of those bounds during sampling
+ * even when filtering. This may be slower than kFast.
+ */
+ kStrict_SrcRectConstraint,
+
+ /**
+ * If kFast is specified, the implementation may sample outside of the src-rect
+ * (if specified) by half the width of filter. This allows greater flexibility
+ * to the implementation and can make the draw much faster.
+ */
+ kFast_SrcRectConstraint,
+ };
+
+ /** Draw the specified image, scaling and translating so that it fills the specified
+ * dst rect. If the src rect is non-null, only that subset of the image is transformed
+ * and drawn.
+ *
+ * @param image The image to be drawn
+ * @param src Optional: specify the subset of the image to be drawn
+ * @param dst The destination rectangle where the scaled/translated
+ * image will be drawn
+ * @param paint The paint used to draw the image, or NULL
+ * @param constraint Control the tradeoff between speed and exactness w.r.t. the src-rect.
+ */
+ void drawImageRect(const SkImage* image, const SkRect& src, const SkRect& dst,
+ const SkPaint* paint,
+ SrcRectConstraint constraint = kStrict_SrcRectConstraint);
+ // variant that takes src SkIRect
+ void drawImageRect(const SkImage* image, const SkIRect& isrc, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint = kStrict_SrcRectConstraint);
+ // variant that assumes src == image-bounds
+ void drawImageRect(const SkImage* image, const SkRect& dst, const SkPaint* paint,
+ SrcRectConstraint = kStrict_SrcRectConstraint);
+
+ void drawImageRect(const sk_sp<SkImage>& image, const SkRect& src, const SkRect& dst,
+ const SkPaint* paint,
+ SrcRectConstraint constraint = kStrict_SrcRectConstraint) {
+ this->drawImageRect(image.get(), src, dst, paint, constraint);
+ }
+ void drawImageRect(const sk_sp<SkImage>& image, const SkIRect& isrc, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint cons = kStrict_SrcRectConstraint) {
+ this->drawImageRect(image.get(), isrc, dst, paint, cons);
+ }
+ void drawImageRect(const sk_sp<SkImage>& image, const SkRect& dst, const SkPaint* paint,
+ SrcRectConstraint cons = kStrict_SrcRectConstraint) {
+ this->drawImageRect(image.get(), dst, paint, cons);
+ }
+
+ /**
+ * Draw the image stretched differentially to fit into dst.
+ * center is a rect within the image, and logically divides the image
+ * into 9 sections (3x3). For example, if the middle pixel of a [5x5]
+ * image is the "center", then the center-rect should be [2, 2, 3, 3].
+ *
+ * If the dst is >= the image size, then...
+ * - The 4 corners are not stretched at all.
+ * - The sides are stretched in only one axis.
+ * - The center is stretched in both axes.
+ * Else, for each axis where dst < image,
+ * - The corners shrink proportionally
+ * - The sides (along the shrink axis) and center are not drawn
+ */
+ void drawImageNine(const SkImage*, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint = nullptr);
+ void drawImageNine(const sk_sp<SkImage>& image, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint = nullptr) {
+ this->drawImageNine(image.get(), center, dst, paint);
+ }
+
+ /** Draw the specified bitmap, with its top/left corner at (x,y), using the
+ specified paint, transformed by the current matrix. Note: if the paint
+ contains a maskfilter that generates a mask which extends beyond the
+ bitmap's original width/height, then the bitmap will be drawn as if it
+ were in a Shader with CLAMP mode. Thus the color outside of the original
+ width/height will be the edge color replicated.
+
+ If a shader is present on the paint it will be ignored, except in the
+ case where the bitmap is kAlpha_8_SkColorType. In that case, the color is
+ generated by the shader.
+
+ @param bitmap The bitmap to be drawn
+ @param left The position of the left side of the bitmap being drawn
+ @param top The position of the top side of the bitmap being drawn
+ @param paint The paint used to draw the bitmap, or NULL
+ */
+ void drawBitmap(const SkBitmap& bitmap, SkScalar left, SkScalar top,
+ const SkPaint* paint = NULL);
+
+ /** Draw the specified bitmap, scaling and translating so that it fills the specified
+ * dst rect. If the src rect is non-null, only that subset of the bitmap is transformed
+ * and drawn.
+ *
+ * @param bitmap The bitmap to be drawn
+ * @param src Optional: specify the subset of the bitmap to be drawn
+ * @param dst The destination rectangle where the scaled/translated
+ * bitmap will be drawn
+ * @param paint The paint used to draw the bitmap, or NULL
+ * @param constraint Control the tradeoff between speed and exactness w.r.t. the src-rect.
+ */
+ void drawBitmapRect(const SkBitmap& bitmap, const SkRect& src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint = kStrict_SrcRectConstraint);
+ // variant where src is SkIRect
+ void drawBitmapRect(const SkBitmap& bitmap, const SkIRect& isrc, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint = kStrict_SrcRectConstraint);
+ void drawBitmapRect(const SkBitmap& bitmap, const SkRect& dst, const SkPaint* paint,
+ SrcRectConstraint = kStrict_SrcRectConstraint);
+
+ /**
+ * Draw the bitmap stretched or shrunk differentially to fit into dst.
+ * center is a rect within the bitmap, and logically divides the bitmap
+ * into 9 sections (3x3). For example, if the middle pixel of a [5x5]
+ * bitmap is the "center", then the center-rect should be [2, 2, 3, 3].
+ *
+ * If the dst is >= the bitmap size, then...
+ * - The 4 corners are not stretched at all.
+ * - The sides are stretched in only one axis.
+ * - The center is stretched in both axes.
+ * Else, for each axis where dst < bitmap,
+ * - The corners shrink proportionally
+ * - The sides (along the shrink axis) and center are not drawn
+ */
+ void drawBitmapNine(const SkBitmap& bitmap, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint = NULL);
+
+ /**
+ * Specifies coordinates to divide a bitmap into (xCount*yCount) rects.
+ *
+ * If the lattice divs or bounds are invalid, the entire lattice
+ * struct will be ignored on the draw call.
+ */
+ struct Lattice {
+ enum Flags : uint8_t {
+ // If set, indicates that we should not draw corresponding rect.
+ kTransparent_Flags = 1 << 0,
+ };
+
+ // An array of x-coordinates that divide the bitmap vertically.
+ // These must be unique, increasing, and in the set [fBounds.fLeft, fBounds.fRight).
+ // Does not have ownership.
+ const int* fXDivs;
+
+ // An array of y-coordinates that divide the bitmap horizontally.
+ // These must be unique, increasing, and in the set [fBounds.fTop, fBounds.fBottom).
+ // Does not have ownership.
+ const int* fYDivs;
+
+ // If non-null, the length of this array must be equal to
+ // (fXCount + 1) * (fYCount + 1). Note that we allow the first rect
+ // in each direction to be empty (ex: fXDivs[0] = fBounds.fLeft).
+ // In this case, the caller still must specify a flag (as a placeholder)
+ // for these empty rects.
+ // The flags correspond to the rects in the lattice, first moving
+ // left to right and then top to bottom.
+ const Flags* fFlags;
+
+ // The number of fXDivs.
+ int fXCount;
+
+ // The number of fYDivs.
+ int fYCount;
+
+ // The bound to draw from. Must be contained by the src that is being drawn,
+ // non-empty, and non-inverted.
+ // If nullptr, the bounds are the entire src.
+ const SkIRect* fBounds;
+ };
+
+ /**
+ * Draw the bitmap stretched or shrunk differentially to fit into dst.
+ *
+ * Moving horizontally across the bitmap, alternating rects will be "scalable"
+ * (in the x-dimension) to fit into dst or must be left "fixed". The first rect
+ * is treated as "fixed", but it's possible to specify an empty first rect by
+ * making lattice.fXDivs[0] = 0.
+ *
+ * The scale factor for all "scalable" rects will be the same, and may be greater
+ * than or less than 1 (meaning we can stretch or shrink). If the number of
+ * "fixed" pixels is greater than the width of the dst, we will collapse all of
+ * the "scalable" regions and appropriately downscale the "fixed" regions.
+ *
+ * The same interpretation also applies to the y-dimension.
+ */
+ void drawBitmapLattice(const SkBitmap& bitmap, const Lattice& lattice, const SkRect& dst,
+ const SkPaint* paint = nullptr);
+ void drawImageLattice(const SkImage* image, const Lattice& lattice, const SkRect& dst,
+ const SkPaint* paint = nullptr);
+
+ /** Draw the text, with origin at (x,y), using the specified paint.
+ The origin is interpreted based on the Align setting in the paint.
+ @param text The text to be drawn
+ @param byteLength The number of bytes to read from the text parameter
+ @param x The x-coordinate of the origin of the text being drawn
+ @param y The y-coordinate of the origin of the text being drawn
+ @param paint The paint used for the text (e.g. color, size, style)
+ */
+ void drawText(const void* text, size_t byteLength, SkScalar x, SkScalar y,
+ const SkPaint& paint);
+
+ /** Draw the text, with each character/glyph origin specified by the pos[]
+ array. The origin is interpreted by the Align setting in the paint.
+ @param text The text to be drawn
+ @param byteLength The number of bytes to read from the text parameter
+ @param pos Array of positions, used to position each character
+ @param paint The paint used for the text (e.g. color, size, style)
+ */
+ void drawPosText(const void* text, size_t byteLength, const SkPoint pos[],
+ const SkPaint& paint);
+
+ /** Draw the text, with each character/glyph origin specified by the x
+ coordinate taken from the xpos[] array, and the y from the constY param.
+ The origin is interpreted by the Align setting in the paint.
+ @param text The text to be drawn
+ @param byteLength The number of bytes to read from the text parameter
+ @param xpos Array of x-positions, used to position each character
+ @param constY The shared Y coordinate for all of the positions
+ @param paint The paint used for the text (e.g. color, size, style)
+ */
+ void drawPosTextH(const void* text, size_t byteLength, const SkScalar xpos[], SkScalar constY,
+ const SkPaint& paint);
+
+ /** Draw the text, with origin at (x,y), using the specified paint, along
+ the specified path. The paint's Align setting determins where along the
+ path to start the text.
+ @param text The text to be drawn
+ @param byteLength The number of bytes to read from the text parameter
+ @param path The path the text should follow for its baseline
+ @param hOffset The distance along the path to add to the text's
+ starting position
+ @param vOffset The distance above(-) or below(+) the path to
+ position the text
+ @param paint The paint used for the text
+ */
+ void drawTextOnPathHV(const void* text, size_t byteLength, const SkPath& path, SkScalar hOffset,
+ SkScalar vOffset, const SkPaint& paint);
+
+ /** Draw the text, with origin at (x,y), using the specified paint, along
+ the specified path. The paint's Align setting determins where along the
+ path to start the text.
+ @param text The text to be drawn
+ @param byteLength The number of bytes to read from the text parameter
+ @param path The path the text should follow for its baseline
+ @param matrix (may be null) Applied to the text before it is
+ mapped onto the path
+ @param paint The paint used for the text
+ */
+ void drawTextOnPath(const void* text, size_t byteLength, const SkPath& path,
+ const SkMatrix* matrix, const SkPaint& paint);
+
+ /**
+ * Draw the text with each character/glyph individually transformed by its xform.
+ * If cullRect is not null, it is a conservative bounds of what will be drawn
+ * taking into account the xforms and the paint, and will be used to accelerate culling.
+ */
+ void drawTextRSXform(const void* text, size_t byteLength, const SkRSXform[],
+ const SkRect* cullRect, const SkPaint& paint);
+
+ /** Draw the text blob, offset by (x,y), using the specified paint.
+ @param blob The text blob to be drawn
+ @param x The x-offset of the text being drawn
+ @param y The y-offset of the text being drawn
+ @param paint The paint used for the text (e.g. color, size, style)
+ */
+ void drawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y, const SkPaint& paint);
+ void drawTextBlob(const sk_sp<SkTextBlob>& blob, SkScalar x, SkScalar y, const SkPaint& paint) {
+ this->drawTextBlob(blob.get(), x, y, paint);
+ }
+
+ /** Draw the picture into this canvas. This method effective brackets the
+ playback of the picture's draw calls with save/restore, so the state
+ of this canvas will be unchanged after this call.
+ @param picture The recorded drawing commands to playback into this
+ canvas.
+ */
+ void drawPicture(const SkPicture* picture) {
+ this->drawPicture(picture, NULL, NULL);
+ }
+ void drawPicture(const sk_sp<SkPicture>& picture) {
+ this->drawPicture(picture.get());
+ }
+
+ /**
+ * Draw the picture into this canvas.
+ *
+ * If matrix is non-null, apply that matrix to the CTM when drawing this picture. This is
+ * logically equivalent to
+ * save/concat/drawPicture/restore
+ *
+ * If paint is non-null, draw the picture into a temporary buffer, and then apply the paint's
+ * alpha/colorfilter/imagefilter/xfermode to that buffer as it is drawn to the canvas.
+ * This is logically equivalent to
+ * saveLayer(paint)/drawPicture/restore
+ */
+ void drawPicture(const SkPicture*, const SkMatrix* matrix, const SkPaint* paint);
+ void drawPicture(const sk_sp<SkPicture>& picture, const SkMatrix* matrix, const SkPaint* paint) {
+ this->drawPicture(picture.get(), matrix, paint);
+ }
+
+#ifdef SK_EXPERIMENTAL_SHADOWING
+ /**
+ * Draw the picture into this canvas, with shadows!
+ *
+ * We will use the canvas's lights along with the picture information (draw depths of
+ * objects, etc) to first create a set of shadowmaps for the light-picture pairs, and
+ * then use that set of shadowmaps to render the scene with shadows.
+ *
+ * If matrix is non-null, apply that matrix to the CTM when drawing this picture. This is
+ * logically equivalent to
+ * save/concat/drawPicture/restore
+ *
+ * If paint is non-null, draw the picture into a temporary buffer, and then apply the paint's
+ * alpha/colorfilter/imagefilter/xfermode to that buffer as it is drawn to the canvas.
+ * This is logically equivalent to
+ * saveLayer(paint)/drawPicture/restore
+ *
+ * We also support using variance shadow maps for blurred shadows; the user can specify
+ * what shadow mapping algorithm to use with params.
+ * - Variance Shadow Mapping works by storing both the depth and depth^2 in the shadow map.
+ * - Then, the shadow map can be blurred, and when reading from it, the fragment shader
+ * can calculate the variance of the depth at a position by doing E(x^2) - E(x)^2.
+ * - We can then use the depth variance and depth at a fragment to arrive at an upper bound
+ * of the probability that the current surface is shadowed by using Chebyshev's
+ * inequality, and then use that to shade the fragment.
+ *
+ * - There are a few problems with VSM.
+ * * Light Bleeding | Areas with high variance, such as near the edges of high up rects,
+ * will cause their shadow penumbras to overwrite otherwise solid
+ * shadows.
+ * * Shape Distortion | We can combat Light Bleeding by biasing the shadow (setting
+ * mostly shaded fragments to completely shaded) and increasing
+ * the minimum allowed variance. However, this warps and rounds
+ * out the shape of the shadow.
+ */
+ void drawShadowedPicture(const SkPicture*,
+ const SkMatrix* matrix,
+ const SkPaint* paint,
+ const SkShadowParams& params);
+ void drawShadowedPicture(const sk_sp<SkPicture>& picture,
+ const SkMatrix* matrix,
+ const SkPaint* paint,
+ const SkShadowParams& params) {
+ this->drawShadowedPicture(picture.get(), matrix, paint, params);
+ }
+#endif
+
+ enum VertexMode {
+ kTriangles_VertexMode,
+ kTriangleStrip_VertexMode,
+ kTriangleFan_VertexMode
+ };
+
+ /** Draw the array of vertices, interpreted as triangles (based on mode).
+
+ If both textures and vertex-colors are NULL, it strokes hairlines with
+ the paint's color. This behavior is a useful debugging mode to visualize
+ the mesh.
+
+ @param vmode How to interpret the array of vertices
+ @param vertexCount The number of points in the vertices array (and
+ corresponding texs and colors arrays if non-null)
+ @param vertices Array of vertices for the mesh
+ @param texs May be null. If not null, specifies the coordinate
+ in _texture_ space (not uv space) for each vertex.
+ @param colors May be null. If not null, specifies a color for each
+ vertex, to be interpolated across the triangle.
+ @param xmode Used if both texs and colors are present. In this
+ case the colors are combined with the texture using mode,
+ before being drawn using the paint. If mode is null, then
+ kModulate_Mode is used.
+ @param indices If not null, array of indices to reference into the
+ vertex (texs, colors) array.
+ @param indexCount number of entries in the indices array (if not null)
+ @param paint Specifies the shader/texture if present.
+ */
+ void drawVertices(VertexMode vmode, int vertexCount,
+ const SkPoint vertices[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint);
+ void drawVertices(VertexMode vmode, int vertexCount,
+ const SkPoint vertices[], const SkPoint texs[],
+ const SkColor colors[], const sk_sp<SkXfermode>& xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint) {
+ this->drawVertices(vmode, vertexCount, vertices, texs, colors, xmode.get(),
+ indices, indexCount, paint);
+ }
+
+ /**
+ Draw a cubic coons patch
+
+ @param cubic specifies the 4 bounding cubic bezier curves of a patch with clockwise order
+ starting at the top left corner.
+ @param colors specifies the colors for the corners which will be bilerp across the patch,
+ their order is clockwise starting at the top left corner.
+ @param texCoords specifies the texture coordinates that will be bilerp across the patch,
+ their order is the same as the colors.
+ @param xmode specifies how are the colors and the textures combined if both of them are
+ present.
+ @param paint Specifies the shader/texture if present.
+ */
+ void drawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkXfermode* xmode, const SkPaint& paint);
+ void drawPatch(const SkPoint cubics[12], const SkColor colors[4], const SkPoint texCoords[4],
+ const sk_sp<SkXfermode>& xmode, const SkPaint& paint) {
+ this->drawPatch(cubics, colors, texCoords, xmode.get(), paint);
+ }
+
+ /**
+ * Draw a set of sprites from the atlas. Each is specified by a tex rectangle in the
+ * coordinate space of the atlas, and a corresponding xform which transforms the tex rectangle
+ * into a quad.
+ *
+ * xform maps [0, 0, tex.width, tex.height] -> quad
+ *
+ * The color array is optional. When specified, each color modulates the pixels in its
+ * corresponding quad (via the specified SkXfermode::Mode).
+ *
+ * The cullRect is optional. When specified, it must be a conservative bounds of all of the
+ * resulting transformed quads, allowing the canvas to skip drawing if the cullRect does not
+ * intersect the current clip.
+ *
+ * The paint is optional. If specified, its antialiasing, alpha, color-filter, image-filter
+ * and xfermode are used to affect each of the quads.
+ */
+ void drawAtlas(const SkImage* atlas, const SkRSXform xform[], const SkRect tex[],
+ const SkColor colors[], int count, SkXfermode::Mode, const SkRect* cullRect,
+ const SkPaint* paint);
+
+ void drawAtlas(const SkImage* atlas, const SkRSXform xform[], const SkRect tex[], int count,
+ const SkRect* cullRect, const SkPaint* paint) {
+ this->drawAtlas(atlas, xform, tex, NULL, count, SkXfermode::kDst_Mode, cullRect, paint);
+ }
+
+ void drawAtlas(const sk_sp<SkImage>& atlas, const SkRSXform xform[], const SkRect tex[],
+ const SkColor colors[], int count, SkXfermode::Mode mode, const SkRect* cull,
+ const SkPaint* paint) {
+ this->drawAtlas(atlas.get(), xform, tex, colors, count, mode, cull, paint);
+ }
+ void drawAtlas(const sk_sp<SkImage>& atlas, const SkRSXform xform[], const SkRect tex[],
+ int count, const SkRect* cullRect, const SkPaint* paint) {
+ this->drawAtlas(atlas.get(), xform, tex, nullptr, count, SkXfermode::kDst_Mode,
+ cullRect, paint);
+ }
+
+ /**
+ * Draw the contents of this drawable into the canvas. If the canvas is async
+ * (e.g. it is recording into a picture) then the drawable will be referenced instead,
+ * to have its draw() method called when the picture is finalized.
+ *
+ * If the intent is to force the contents of the drawable into this canvas immediately,
+ * then drawable->draw(canvas) may be called.
+ */
+ void drawDrawable(SkDrawable* drawable, const SkMatrix* = NULL);
+ void drawDrawable(SkDrawable*, SkScalar x, SkScalar y);
+
+ /**
+ * Send an "annotation" to the canvas. The annotation is a key/value pair, where the key is
+ * a null-terminated utf8 string, and the value is a blob of data stored in an SkData
+ * (which may be null). The annotation is associated with the specified rectangle.
+ *
+ * The caller still retains its ownership of the data (if any).
+ *
+ * Note: on may canvas types, this information is ignored, but some canvases (e.g. recording
+ * a picture or drawing to a PDF document) will pass on this information.
+ */
+ void drawAnnotation(const SkRect&, const char key[], SkData* value);
+ void drawAnnotation(const SkRect& rect, const char key[], const sk_sp<SkData>& value) {
+ this->drawAnnotation(rect, key, value.get());
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+#ifdef SK_INTERNAL
+#ifndef SK_SUPPORT_LEGACY_DRAWFILTER
+ #define SK_SUPPORT_LEGACY_DRAWFILTER
+#endif
+#endif
+
+#ifdef SK_SUPPORT_LEGACY_DRAWFILTER
+ /** Get the current filter object. The filter's reference count is not
+ affected. The filter is saved/restored, just like the matrix and clip.
+ @return the canvas' filter (or NULL).
+ */
+ SK_ATTR_EXTERNALLY_DEPRECATED("getDrawFilter use is deprecated")
+ SkDrawFilter* getDrawFilter() const;
+
+ /** Set the new filter (or NULL). Pass NULL to clear any existing filter.
+ As a convenience, the parameter is returned. If an existing filter
+ exists, its refcnt is decrement. If the new filter is not null, its
+ refcnt is incremented. The filter is saved/restored, just like the
+ matrix and clip.
+ @param filter the new filter (or NULL)
+ @return the new filter
+ */
+ SK_ATTR_EXTERNALLY_DEPRECATED("setDrawFilter use is deprecated")
+ virtual SkDrawFilter* setDrawFilter(SkDrawFilter* filter);
+#endif
+ //////////////////////////////////////////////////////////////////////////
+
+ /**
+ * Return true if the current clip is empty (i.e. nothing will draw).
+ * Note: this is not always a free call, so it should not be used
+ * more often than necessary. However, once the canvas has computed this
+ * result, subsequent calls will be cheap (until the clip state changes,
+ * which can happen on any clip..() or restore() call.
+ */
+ virtual bool isClipEmpty() const;
+
+ /**
+ * Returns true if the current clip is just a (non-empty) rectangle.
+ * Returns false if the clip is empty, or if it is complex.
+ */
+ virtual bool isClipRect() const;
+
+ /** Return the current matrix on the canvas.
+ This does not account for the translate in any of the devices.
+ @return The current matrix on the canvas.
+ */
+ const SkMatrix& getTotalMatrix() const;
+
+ /** Return the clip stack. The clip stack stores all the individual
+ * clips organized by the save/restore frame in which they were
+ * added.
+ * @return the current clip stack ("list" of individual clip elements)
+ */
+ const SkClipStack* getClipStack() const {
+ return fClipStack;
+ }
+
+ typedef SkCanvasClipVisitor ClipVisitor;
+ /**
+ * Replays the clip operations, back to front, that have been applied to
+ * the canvas, calling the appropriate method on the visitor for each
+ * clip. All clips have already been transformed into device space.
+ */
+ void replayClips(ClipVisitor*) const;
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ // don't call
+ GrDrawContext* internal_private_accessTopLayerDrawContext();
+
+ // don't call
+ static void Internal_Private_SetIgnoreSaveLayerBounds(bool);
+ static bool Internal_Private_GetIgnoreSaveLayerBounds();
+ static void Internal_Private_SetTreatSpriteAsBitmap(bool);
+ static bool Internal_Private_GetTreatSpriteAsBitmap();
+
+ // TEMP helpers until we switch virtual over to const& for src-rect
+ void legacy_drawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint,
+ SrcRectConstraint constraint = kStrict_SrcRectConstraint);
+ void legacy_drawBitmapRect(const SkBitmap& bitmap, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint,
+ SrcRectConstraint constraint = kStrict_SrcRectConstraint);
+
+ // expose minimum amount of information necessary for transitional refactoring
+ /**
+ * Returns CTM and clip bounds, translated from canvas coordinates to top layer coordinates.
+ */
+ void temporary_internal_describeTopLayer(SkMatrix* matrix, SkIRect* clip_bounds);
+
+protected:
+#ifdef SK_EXPERIMENTAL_SHADOWING
+ /** Returns the current (cumulative) draw depth of the canvas.
+ */
+ SkScalar getZ() const;
+
+ sk_sp<SkLights> fLights;
+#endif
+
+ // default impl defers to getDevice()->newSurface(info)
+ virtual sk_sp<SkSurface> onNewSurface(const SkImageInfo&, const SkSurfaceProps&);
+
+ // default impl defers to its device
+ virtual bool onPeekPixels(SkPixmap*);
+ virtual bool onAccessTopLayerPixels(SkPixmap*);
+ virtual SkImageInfo onImageInfo() const;
+ virtual bool onGetProps(SkSurfaceProps*) const;
+ virtual void onFlush();
+
+ // Subclass save/restore notifiers.
+ // Overriders should call the corresponding INHERITED method up the inheritance chain.
+ // getSaveLayerStrategy()'s return value may suppress full layer allocation.
+ enum SaveLayerStrategy {
+ kFullLayer_SaveLayerStrategy,
+ kNoLayer_SaveLayerStrategy,
+ };
+
+ virtual void willSave() {}
+ // Overriders should call the corresponding INHERITED method up the inheritance chain.
+ virtual SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec&) {
+ return kFullLayer_SaveLayerStrategy;
+ }
+ virtual void willRestore() {}
+ virtual void didRestore() {}
+ virtual void didConcat(const SkMatrix&) {}
+ virtual void didSetMatrix(const SkMatrix&) {}
+ virtual void didTranslate(SkScalar dx, SkScalar dy) {
+ this->didConcat(SkMatrix::MakeTrans(dx, dy));
+ }
+
+#ifdef SK_EXPERIMENTAL_SHADOWING
+ virtual void didTranslateZ(SkScalar) {}
+#endif
+
+ virtual void onDrawAnnotation(const SkRect&, const char key[], SkData* value);
+ virtual void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&);
+
+ virtual void onDrawText(const void* text, size_t byteLength, SkScalar x,
+ SkScalar y, const SkPaint& paint);
+
+ virtual void onDrawPosText(const void* text, size_t byteLength,
+ const SkPoint pos[], const SkPaint& paint);
+
+ virtual void onDrawPosTextH(const void* text, size_t byteLength,
+ const SkScalar xpos[], SkScalar constY,
+ const SkPaint& paint);
+
+ virtual void onDrawTextOnPath(const void* text, size_t byteLength,
+ const SkPath& path, const SkMatrix* matrix,
+ const SkPaint& paint);
+ virtual void onDrawTextRSXform(const void* text, size_t byteLength, const SkRSXform[],
+ const SkRect* cullRect, const SkPaint& paint);
+
+ virtual void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint);
+
+ virtual void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkXfermode* xmode, const SkPaint& paint);
+
+ virtual void onDrawDrawable(SkDrawable*, const SkMatrix*);
+
+ virtual void onDrawPaint(const SkPaint&);
+ virtual void onDrawRect(const SkRect&, const SkPaint&);
+ virtual void onDrawRegion(const SkRegion& region, const SkPaint& paint);
+ virtual void onDrawOval(const SkRect&, const SkPaint&);
+ virtual void onDrawArc(const SkRect&, SkScalar startAngle, SkScalar sweepAngle, bool useCenter,
+ const SkPaint&);
+ virtual void onDrawRRect(const SkRRect&, const SkPaint&);
+ virtual void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&);
+ virtual void onDrawVertices(VertexMode, int vertexCount, const SkPoint vertices[],
+ const SkPoint texs[], const SkColor colors[], SkXfermode*,
+ const uint16_t indices[], int indexCount, const SkPaint&);
+
+ virtual void onDrawAtlas(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[],
+ int count, SkXfermode::Mode, const SkRect* cull, const SkPaint*);
+ virtual void onDrawPath(const SkPath&, const SkPaint&);
+ virtual void onDrawImage(const SkImage*, SkScalar dx, SkScalar dy, const SkPaint*);
+ virtual void onDrawImageRect(const SkImage*, const SkRect*, const SkRect&, const SkPaint*,
+ SrcRectConstraint);
+ virtual void onDrawImageNine(const SkImage*, const SkIRect& center, const SkRect& dst,
+ const SkPaint*);
+ virtual void onDrawImageLattice(const SkImage*, const Lattice& lattice, const SkRect& dst,
+ const SkPaint*);
+
+ virtual void onDrawBitmap(const SkBitmap&, SkScalar dx, SkScalar dy, const SkPaint*);
+ virtual void onDrawBitmapRect(const SkBitmap&, const SkRect*, const SkRect&, const SkPaint*,
+ SrcRectConstraint);
+ virtual void onDrawBitmapNine(const SkBitmap&, const SkIRect& center, const SkRect& dst,
+ const SkPaint*);
+ virtual void onDrawBitmapLattice(const SkBitmap&, const Lattice& lattice, const SkRect& dst,
+ const SkPaint*);
+
+ enum ClipEdgeStyle {
+ kHard_ClipEdgeStyle,
+ kSoft_ClipEdgeStyle
+ };
+
+ virtual void onClipRect(const SkRect& rect, ClipOp, ClipEdgeStyle);
+ virtual void onClipRRect(const SkRRect& rrect, ClipOp, ClipEdgeStyle);
+ virtual void onClipPath(const SkPath& path, ClipOp, ClipEdgeStyle);
+ virtual void onClipRegion(const SkRegion& deviceRgn, ClipOp);
+
+ virtual void onDiscard();
+
+ virtual void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*);
+
+#ifdef SK_EXPERIMENTAL_SHADOWING
+ virtual void onDrawShadowedPicture(const SkPicture*,
+ const SkMatrix*,
+ const SkPaint*,
+ const SkShadowParams& params);
+#endif
+
+ // Returns the canvas to be used by DrawIter. Default implementation
+ // returns this. Subclasses that encapsulate an indirect canvas may
+ // need to overload this method. The impl must keep track of this, as it
+ // is not released or deleted by the caller.
+ virtual SkCanvas* canvasForDrawIter();
+
+ // Clip rectangle bounds. Called internally by saveLayer.
+ // returns false if the entire rectangle is entirely clipped out
+ // If non-NULL, The imageFilter parameter will be used to expand the clip
+ // and offscreen bounds for any margin required by the filter DAG.
+ bool clipRectBounds(const SkRect* bounds, SaveLayerFlags, SkIRect* intersection,
+ const SkImageFilter* imageFilter = NULL);
+
+private:
+ /** After calling saveLayer(), there can be any number of devices that make
+ up the top-most drawing area. LayerIter can be used to iterate through
+ those devices. Note that the iterator is only valid until the next API
+ call made on the canvas. Ownership of all pointers in the iterator stays
+ with the canvas, so none of them should be modified or deleted.
+ */
+ class LayerIter /*: SkNoncopyable*/ {
+ public:
+ /** Initialize iterator with canvas, and set values for 1st device */
+ LayerIter(SkCanvas*);
+ ~LayerIter();
+
+ /** Return true if the iterator is done */
+ bool done() const { return fDone; }
+ /** Cycle to the next device */
+ void next();
+
+ // These reflect the current device in the iterator
+
+ SkBaseDevice* device() const;
+ const SkMatrix& matrix() const;
+ const SkRasterClip& clip() const;
+ const SkPaint& paint() const;
+ int x() const;
+ int y() const;
+
+ private:
+ // used to embed the SkDrawIter object directly in our instance, w/o
+ // having to expose that class def to the public. There is an assert
+ // in our constructor to ensure that fStorage is large enough
+ // (though needs to be a compile-time-assert!). We use intptr_t to work
+ // safely with 32 and 64 bit machines (to ensure the storage is enough)
+ intptr_t fStorage[32];
+ class SkDrawIter* fImpl; // this points at fStorage
+ SkPaint fDefaultPaint;
+ bool fDone;
+ };
+
+ static bool BoundsAffectsClip(SaveLayerFlags);
+ static SaveLayerFlags LegacySaveFlagsToSaveLayerFlags(uint32_t legacySaveFlags);
+
+ static void DrawDeviceWithFilter(SkBaseDevice* src, const SkImageFilter* filter,
+ SkBaseDevice* dst, const SkMatrix& ctm,
+ const SkClipStack* clipStack);
+
+ enum ShaderOverrideOpacity {
+ kNone_ShaderOverrideOpacity, //!< there is no overriding shader (bitmap or image)
+ kOpaque_ShaderOverrideOpacity, //!< the overriding shader is opaque
+ kNotOpaque_ShaderOverrideOpacity, //!< the overriding shader may not be opaque
+ };
+
+ // notify our surface (if we have one) that we are about to draw, so it
+ // can perform copy-on-write or invalidate any cached images
+ void predrawNotify(bool willOverwritesEntireSurface = false);
+ void predrawNotify(const SkRect* rect, const SkPaint* paint, ShaderOverrideOpacity);
+ void predrawNotify(const SkRect* rect, const SkPaint* paint, bool shaderOverrideIsOpaque) {
+ this->predrawNotify(rect, paint, shaderOverrideIsOpaque ? kOpaque_ShaderOverrideOpacity
+ : kNotOpaque_ShaderOverrideOpacity);
+ }
+
+ class MCRec;
+
+ SkAutoTUnref<SkClipStack> fClipStack;
+ SkDeque fMCStack;
+ // points to top of stack
+ MCRec* fMCRec;
+ // the first N recs that can fit here mean we won't call malloc
+ enum {
+ kMCRecSize = 128, // most recent measurement
+ kMCRecCount = 32, // common depth for save/restores
+ kDeviceCMSize = 176, // most recent measurement
+ };
+ intptr_t fMCRecStorage[kMCRecSize * kMCRecCount / sizeof(intptr_t)];
+ intptr_t fDeviceCMStorage[kDeviceCMSize / sizeof(intptr_t)];
+
+ const SkSurfaceProps fProps;
+
+ int fSaveCount; // value returned by getSaveCount()
+
+ SkMetaData* fMetaData;
+
+ SkSurface_Base* fSurfaceBase;
+ SkSurface_Base* getSurfaceBase() const { return fSurfaceBase; }
+ void setSurfaceBase(SkSurface_Base* sb) {
+ fSurfaceBase = sb;
+ }
+ friend class SkSurface_Base;
+ friend class SkSurface_Gpu;
+
+ bool fDeviceCMDirty; // cleared by updateDeviceCMCache()
+ void updateDeviceCMCache();
+
+ void doSave();
+ void checkForDeferredSave();
+ void internalSetMatrix(const SkMatrix&);
+
+ friend class SkDrawIter; // needs setupDrawForLayerDevice()
+ friend class AutoDrawLooper;
+ friend class SkLua; // needs top layer size and offset
+ friend class SkDebugCanvas; // needs experimental fAllowSimplifyClip
+ friend class SkSurface_Raster; // needs getDevice()
+ friend class SkRecorder; // InitFlags
+ friend class SkLiteRecorder; // InitFlags
+ friend class SkNoSaveLayerCanvas; // InitFlags
+ friend class SkPictureImageFilter; // SkCanvas(SkBaseDevice*, SkSurfaceProps*, InitFlags)
+ friend class SkPictureRecord; // predrawNotify (why does it need it? <reed>)
+ friend class SkPicturePlayback; // SaveFlagsToSaveLayerFlags
+
+ enum InitFlags {
+ kDefault_InitFlags = 0,
+ kConservativeRasterClip_InitFlag = 1 << 0,
+ };
+ SkCanvas(const SkIRect& bounds, InitFlags);
+ SkCanvas(SkBaseDevice* device, InitFlags);
+
+ void resetForNextPicture(const SkIRect& bounds);
+
+ // needs gettotalclip()
+ friend class SkCanvasStateUtils;
+
+ // call this each time we attach ourselves to a device
+ // - constructor
+ // - internalSaveLayer
+ void setupDevice(SkBaseDevice*);
+
+ SkBaseDevice* init(SkBaseDevice*, InitFlags);
+
+ /**
+ * Gets the bounds of the top level layer in global canvas coordinates. We don't want this
+ * to be public because it exposes decisions about layer sizes that are internal to the canvas.
+ */
+ SkIRect getTopLayerBounds() const;
+
+ void internalDrawBitmapRect(const SkBitmap& bitmap, const SkRect* src,
+ const SkRect& dst, const SkPaint* paint,
+ SrcRectConstraint);
+ void internalDrawPaint(const SkPaint& paint);
+ void internalSaveLayer(const SaveLayerRec&, SaveLayerStrategy);
+ void internalDrawDevice(SkBaseDevice*, int x, int y, const SkPaint*);
+
+ // shared by save() and saveLayer()
+ void internalSave();
+ void internalRestore();
+ static void DrawRect(const SkDraw& draw, const SkPaint& paint,
+ const SkRect& r, SkScalar textSize);
+ static void DrawTextDecorations(const SkDraw& draw, const SkPaint& paint,
+ const char text[], size_t byteLength,
+ SkScalar x, SkScalar y);
+
+ // only for canvasutils
+ const SkRegion& internal_private_getTotalClip() const;
+
+ /*
+ * Returns true if drawing the specified rect (or all if it is null) with the specified
+ * paint (or default if null) would overwrite the entire root device of the canvas
+ * (i.e. the canvas' surface if it had one).
+ */
+ bool wouldOverwriteEntireSurface(const SkRect*, const SkPaint*, ShaderOverrideOpacity) const;
+
+ /**
+ * Returns true if the paint's imagefilter can be invoked directly, without needed a layer.
+ */
+ bool canDrawBitmapAsSprite(SkScalar x, SkScalar y, int w, int h, const SkPaint&);
+
+
+ /**
+ * Keep track of the device clip bounds and if the matrix is scale-translate. This allows
+ * us to do a fast quick reject in the common case.
+ */
+ bool fIsScaleTranslate;
+ SkRect fDeviceClipBounds;
+
+ bool fAllowSoftClip;
+ bool fAllowSimplifyClip;
+ const bool fConservativeRasterClip;
+
+ class AutoValidateClip : ::SkNoncopyable {
+ public:
+ explicit AutoValidateClip(SkCanvas* canvas) : fCanvas(canvas) {
+ fCanvas->validateClip();
+ }
+ ~AutoValidateClip() { fCanvas->validateClip(); }
+
+ private:
+ const SkCanvas* fCanvas;
+ };
+
+#ifdef SK_DEBUG
+ void validateClip() const;
+#else
+ void validateClip() const {}
+#endif
+
+ typedef SkRefCnt INHERITED;
+};
+
+/** Stack helper class to automatically call restoreToCount() on the canvas
+ when this object goes out of scope. Use this to guarantee that the canvas
+ is restored to a known state.
+*/
+class SkAutoCanvasRestore : SkNoncopyable {
+public:
+ SkAutoCanvasRestore(SkCanvas* canvas, bool doSave) : fCanvas(canvas), fSaveCount(0) {
+ if (fCanvas) {
+ fSaveCount = canvas->getSaveCount();
+ if (doSave) {
+ canvas->save();
+ }
+ }
+ }
+ ~SkAutoCanvasRestore() {
+ if (fCanvas) {
+ fCanvas->restoreToCount(fSaveCount);
+ }
+ }
+
+ /**
+ * Perform the restore now, instead of waiting for the destructor. Will
+ * only do this once.
+ */
+ void restore() {
+ if (fCanvas) {
+ fCanvas->restoreToCount(fSaveCount);
+ fCanvas = NULL;
+ }
+ }
+
+private:
+ SkCanvas* fCanvas;
+ int fSaveCount;
+};
+#define SkAutoCanvasRestore(...) SK_REQUIRE_LOCAL_VAR(SkAutoCanvasRestore)
+
+class SkCanvasClipVisitor {
+public:
+ virtual ~SkCanvasClipVisitor();
+ virtual void clipRect(const SkRect&, SkCanvas::ClipOp, bool antialias) = 0;
+ virtual void clipRRect(const SkRRect&, SkCanvas::ClipOp, bool antialias) = 0;
+ virtual void clipPath(const SkPath&, SkCanvas::ClipOp, bool antialias) = 0;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkChunkAlloc.h b/gfx/skia/skia/include/core/SkChunkAlloc.h
new file mode 100644
index 000000000..bb4ec8fae
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkChunkAlloc.h
@@ -0,0 +1,89 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkChunkAlloc_DEFINED
+#define SkChunkAlloc_DEFINED
+
+#include "SkTypes.h"
+
+class SkChunkAlloc : SkNoncopyable {
+public:
+ SkChunkAlloc(size_t minSize);
+ ~SkChunkAlloc();
+
+ /**
+ * Free up all allocated blocks. This invalidates all returned
+ * pointers.
+ */
+ void reset();
+ /**
+ * Reset to 0 used bytes preserving as much memory as possible.
+ * This invalidates all returned pointers.
+ */
+ void rewind();
+
+ enum AllocFailType {
+ kReturnNil_AllocFailType,
+ kThrow_AllocFailType
+ };
+
+ /**
+ * Allocates a memory block of size bytes.
+ * On success: returns a pointer to beginning of memory block that is
+ * 8 byte aligned. The content of allocated block is not initialized.
+ * On failure: calls abort() if called with kThrow_AllocFailType,
+ * otherwise returns NULL pointer.
+ */
+ void* alloc(size_t bytes, AllocFailType);
+
+ /**
+ * Shortcut for calling alloc with kThrow_AllocFailType.
+ */
+ void* allocThrow(size_t bytes) {
+ return this->alloc(bytes, kThrow_AllocFailType);
+ }
+
+ /** Call this to unalloc the most-recently allocated ptr by alloc(). On
+ success, the number of bytes freed is returned, or 0 if the block could
+ not be unallocated. This is a hint to the underlying allocator that
+ the previous allocation may be reused, but the implementation is free
+ to ignore this call (and return 0).
+ */
+ size_t unalloc(void* ptr);
+
+ size_t totalCapacity() const { return fTotalCapacity; }
+ size_t totalUsed() const { return fTotalUsed; }
+ SkDEBUGCODE(int blockCount() const { return fBlockCount; })
+ SkDEBUGCODE(size_t totalLost() const { return fTotalLost; })
+
+ /**
+ * Returns true if the specified address is within one of the chunks, and
+ * has at least 1-byte following the address (i.e. if addr points to the
+ * end of a chunk, then contains() will return false).
+ */
+ bool contains(const void* addr) const;
+
+private:
+ struct Block;
+
+ Block* fBlock;
+ size_t fMinSize;
+ size_t fChunkSize;
+ size_t fTotalCapacity;
+ size_t fTotalUsed; // will be <= fTotalCapacity
+ SkDEBUGCODE(int fBlockCount;)
+ SkDEBUGCODE(size_t fTotalLost;) // will be <= fTotalCapacity
+
+ Block* newBlock(size_t bytes, AllocFailType ftype);
+ Block* addBlockIfNecessary(size_t bytes, AllocFailType ftype);
+
+ SkDEBUGCODE(void validate();)
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkClipOp.h b/gfx/skia/skia/include/core/SkClipOp.h
new file mode 100644
index 000000000..2e4fbbf86
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkClipOp.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkClipOp_DEFINED
+#define SkClipOp_DEFINED
+
+#include "SkTypes.h"
+
+// these kept in SkRegion::Op order for now ...
+enum SkClipOp {
+ kDifference_SkClipOp = 0,
+ kIntersect_SkClipOp = 1,
+
+ // Goal: remove these, since they can grow the current clip
+
+ kUnion_SkClipOp = 2,
+ kXOR_SkClipOp = 3,
+ kReverseDifference_SkClipOp = 4,
+ kReplace_SkClipOp = 5,
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkClipStack.h b/gfx/skia/skia/include/core/SkClipStack.h
new file mode 100644
index 000000000..7a8eb5ca8
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkClipStack.h
@@ -0,0 +1,520 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkClipStack_DEFINED
+#define SkClipStack_DEFINED
+
+#include "SkCanvas.h"
+#include "SkDeque.h"
+#include "SkPath.h"
+#include "SkRect.h"
+#include "SkRRect.h"
+#include "SkRegion.h"
+#include "SkTLazy.h"
+
+class SkCanvasClipVisitor;
+
+// Because a single save/restore state can have multiple clips, this class
+// stores the stack depth (fSaveCount) and clips (fDeque) separately.
+// Each clip in fDeque stores the stack state to which it belongs
+// (i.e., the fSaveCount in force when it was added). Restores are thus
+// implemented by removing clips from fDeque that have an fSaveCount larger
+// then the freshly decremented count.
+class SK_API SkClipStack : public SkNVRefCnt<SkClipStack> {
+public:
+ enum BoundsType {
+ // The bounding box contains all the pixels that can be written to
+ kNormal_BoundsType,
+ // The bounding box contains all the pixels that cannot be written to.
+ // The real bound extends out to infinity and all the pixels outside
+ // of the bound can be written to. Note that some of the pixels inside
+ // the bound may also be writeable but all pixels that cannot be
+ // written to are guaranteed to be inside.
+ kInsideOut_BoundsType
+ };
+
+ class Element {
+ public:
+ enum Type {
+ //!< This element makes the clip empty (regardless of previous elements).
+ kEmpty_Type,
+ //!< This element combines a rect with the current clip using a set operation
+ kRect_Type,
+ //!< This element combines a round-rect with the current clip using a set operation
+ kRRect_Type,
+ //!< This element combines a path with the current clip using a set operation
+ kPath_Type,
+
+ kLastType = kPath_Type
+ };
+ static const int kTypeCnt = kLastType + 1;
+
+ Element() {
+ this->initCommon(0, SkCanvas::kReplace_Op, false);
+ this->setEmpty();
+ }
+
+ Element(const Element&);
+
+ Element(const SkRect& rect, SkCanvas::ClipOp op, bool doAA) {
+ this->initRect(0, rect, op, doAA);
+ }
+
+ Element(const SkRRect& rrect, SkCanvas::ClipOp op, bool doAA) {
+ this->initRRect(0, rrect, op, doAA);
+ }
+
+ Element(const SkPath& path, SkCanvas::ClipOp op, bool doAA) {
+ this->initPath(0, path, op, doAA);
+ }
+
+ bool operator== (const Element& element) const;
+ bool operator!= (const Element& element) const { return !(*this == element); }
+
+ //!< Call to get the type of the clip element.
+ Type getType() const { return fType; }
+
+ //!< Call to get the save count associated with this clip element.
+ int getSaveCount() const { return fSaveCount; }
+
+ //!< Call if getType() is kPath to get the path.
+ const SkPath& getPath() const { SkASSERT(kPath_Type == fType); return *fPath.get(); }
+
+ //!< Call if getType() is kRRect to get the round-rect.
+ const SkRRect& getRRect() const { SkASSERT(kRRect_Type == fType); return fRRect; }
+
+ //!< Call if getType() is kRect to get the rect.
+ const SkRect& getRect() const {
+ SkASSERT(kRect_Type == fType && (fRRect.isRect() || fRRect.isEmpty()));
+ return fRRect.getBounds();
+ }
+
+ //!< Call if getType() is not kEmpty to get the set operation used to combine this element.
+ SkCanvas::ClipOp getOp() const { return fOp; }
+
+ //!< Call to get the element as a path, regardless of its type.
+ void asPath(SkPath* path) const;
+
+ //!< Call if getType() is not kPath to get the element as a round rect.
+ const SkRRect& asRRect() const { SkASSERT(kPath_Type != fType); return fRRect; }
+
+ /** If getType() is not kEmpty this indicates whether the clip shape should be anti-aliased
+ when it is rasterized. */
+ bool isAA() const { return fDoAA; }
+
+ //!< Inverts the fill of the clip shape. Note that a kEmpty element remains kEmpty.
+ void invertShapeFillType();
+
+ //!< Sets the set operation represented by the element.
+ void setOp(SkCanvas::ClipOp op) { fOp = op; }
+
+ /** The GenID can be used by clip stack clients to cache representations of the clip. The
+ ID corresponds to the set of clip elements up to and including this element within the
+ stack not to the element itself. That is the same clip path in different stacks will
+ have a different ID since the elements produce different clip result in the context of
+ their stacks. */
+ int32_t getGenID() const { SkASSERT(kInvalidGenID != fGenID); return fGenID; }
+
+ /**
+ * Gets the bounds of the clip element, either the rect or path bounds. (Whether the shape
+ * is inverse filled is not considered.)
+ */
+ const SkRect& getBounds() const {
+ static const SkRect kEmpty = { 0, 0, 0, 0 };
+ switch (fType) {
+ case kRect_Type: // fallthrough
+ case kRRect_Type:
+ return fRRect.getBounds();
+ case kPath_Type:
+ return fPath.get()->getBounds();
+ case kEmpty_Type:
+ return kEmpty;
+ default:
+ SkDEBUGFAIL("Unexpected type.");
+ return kEmpty;
+ }
+ }
+
+ /**
+ * Conservatively checks whether the clip shape contains the rect param. (Whether the shape
+ * is inverse filled is not considered.)
+ */
+ bool contains(const SkRect& rect) const {
+ switch (fType) {
+ case kRect_Type:
+ return this->getRect().contains(rect);
+ case kRRect_Type:
+ return fRRect.contains(rect);
+ case kPath_Type:
+ return fPath.get()->conservativelyContainsRect(rect);
+ case kEmpty_Type:
+ return false;
+ default:
+ SkDEBUGFAIL("Unexpected type.");
+ return false;
+ }
+ }
+
+ bool contains(const SkRRect& rrect) const {
+ switch (fType) {
+ case kRect_Type:
+ return this->getRect().contains(rrect.getBounds());
+ case kRRect_Type:
+ // We don't currently have a generalized rrect-rrect containment.
+ return fRRect.contains(rrect.getBounds()) || rrect == fRRect;
+ case kPath_Type:
+ return fPath.get()->conservativelyContainsRect(rrect.getBounds());
+ case kEmpty_Type:
+ return false;
+ default:
+ SkDEBUGFAIL("Unexpected type.");
+ return false;
+ }
+ }
+
+ /**
+ * Is the clip shape inverse filled.
+ */
+ bool isInverseFilled() const {
+ return kPath_Type == fType && fPath.get()->isInverseFillType();
+ }
+
+ /**
+ * Replay this clip into the visitor.
+ */
+ void replay(SkCanvasClipVisitor*) const;
+
+#ifdef SK_DEBUG
+ /**
+ * Dumps the element to SkDebugf. This is intended for Skia development debugging
+ * Don't rely on the existence of this function or the formatting of its output.
+ */
+ void dump() const;
+#endif
+
+ private:
+ friend class SkClipStack;
+
+ SkTLazy<SkPath> fPath;
+ SkRRect fRRect;
+ int fSaveCount; // save count of stack when this element was added.
+ SkCanvas::ClipOp fOp;
+ Type fType;
+ bool fDoAA;
+
+ /* fFiniteBoundType and fFiniteBound are used to incrementally update the clip stack's
+ bound. When fFiniteBoundType is kNormal_BoundsType, fFiniteBound represents the
+ conservative bounding box of the pixels that aren't clipped (i.e., any pixels that can be
+ drawn to are inside the bound). When fFiniteBoundType is kInsideOut_BoundsType (which
+ occurs when a clip is inverse filled), fFiniteBound represents the conservative bounding
+ box of the pixels that _are_ clipped (i.e., any pixels that cannot be drawn to are inside
+ the bound). When fFiniteBoundType is kInsideOut_BoundsType the actual bound is the
+ infinite plane. This behavior of fFiniteBoundType and fFiniteBound is required so that we
+ can capture the cancelling out of the extensions to infinity when two inverse filled
+ clips are Booleaned together. */
+ SkClipStack::BoundsType fFiniteBoundType;
+ SkRect fFiniteBound;
+
+ // When element is applied to the previous elements in the stack is the result known to be
+ // equivalent to a single rect intersection? IIOW, is the clip effectively a rectangle.
+ bool fIsIntersectionOfRects;
+
+ int fGenID;
+
+ Element(int saveCount) {
+ this->initCommon(saveCount, SkCanvas::kReplace_Op, false);
+ this->setEmpty();
+ }
+
+ Element(int saveCount, const SkRRect& rrect, SkCanvas::ClipOp op, bool doAA) {
+ this->initRRect(saveCount, rrect, op, doAA);
+ }
+
+ Element(int saveCount, const SkRect& rect, SkCanvas::ClipOp op, bool doAA) {
+ this->initRect(saveCount, rect, op, doAA);
+ }
+
+ Element(int saveCount, const SkPath& path, SkCanvas::ClipOp op, bool doAA) {
+ this->initPath(saveCount, path, op, doAA);
+ }
+
+ void initCommon(int saveCount, SkCanvas::ClipOp op, bool doAA) {
+ fSaveCount = saveCount;
+ fOp = op;
+ fDoAA = doAA;
+ // A default of inside-out and empty bounds means the bounds are effectively void as it
+ // indicates that nothing is known to be outside the clip.
+ fFiniteBoundType = kInsideOut_BoundsType;
+ fFiniteBound.setEmpty();
+ fIsIntersectionOfRects = false;
+ fGenID = kInvalidGenID;
+ }
+
+ void initRect(int saveCount, const SkRect& rect, SkCanvas::ClipOp op, bool doAA) {
+ fRRect.setRect(rect);
+ fType = kRect_Type;
+ this->initCommon(saveCount, op, doAA);
+ }
+
+ void initRRect(int saveCount, const SkRRect& rrect, SkCanvas::ClipOp op, bool doAA) {
+ SkRRect::Type type = rrect.getType();
+ fRRect = rrect;
+ if (SkRRect::kRect_Type == type || SkRRect::kEmpty_Type == type) {
+ fType = kRect_Type;
+ } else {
+ fType = kRRect_Type;
+ }
+ this->initCommon(saveCount, op, doAA);
+ }
+
+ void initPath(int saveCount, const SkPath& path, SkCanvas::ClipOp op, bool doAA);
+
+ void setEmpty();
+
+ // All Element methods below are only used within SkClipStack.cpp
+ inline void checkEmpty() const;
+ inline bool canBeIntersectedInPlace(int saveCount, SkCanvas::ClipOp op) const;
+ /* This method checks to see if two rect clips can be safely merged into one. The issue here
+ is that to be strictly correct all the edges of the resulting rect must have the same
+ anti-aliasing. */
+ bool rectRectIntersectAllowed(const SkRect& newR, bool newAA) const;
+ /** Determines possible finite bounds for the Element given the previous element of the
+ stack */
+ void updateBoundAndGenID(const Element* prior);
+ // The different combination of fill & inverse fill when combining bounding boxes
+ enum FillCombo {
+ kPrev_Cur_FillCombo,
+ kPrev_InvCur_FillCombo,
+ kInvPrev_Cur_FillCombo,
+ kInvPrev_InvCur_FillCombo
+ };
+ // per-set operation functions used by updateBoundAndGenID().
+ inline void combineBoundsDiff(FillCombo combination, const SkRect& prevFinite);
+ inline void combineBoundsXOR(int combination, const SkRect& prevFinite);
+ inline void combineBoundsUnion(int combination, const SkRect& prevFinite);
+ inline void combineBoundsIntersection(int combination, const SkRect& prevFinite);
+ inline void combineBoundsRevDiff(int combination, const SkRect& prevFinite);
+ };
+
+ SkClipStack();
+ SkClipStack(const SkClipStack& b);
+ ~SkClipStack();
+
+ SkClipStack& operator=(const SkClipStack& b);
+ bool operator==(const SkClipStack& b) const;
+ bool operator!=(const SkClipStack& b) const { return !(*this == b); }
+
+ void reset();
+
+ int getSaveCount() const { return fSaveCount; }
+ void save();
+ void restore();
+
+ /**
+ * getBounds places the current finite bound in its first parameter. In its
+ * second, it indicates which kind of bound is being returned. If
+ * 'canvFiniteBound' is a normal bounding box then it encloses all writeable
+ * pixels. If 'canvFiniteBound' is an inside out bounding box then it
+ * encloses all the un-writeable pixels and the true/normal bound is the
+ * infinite plane. isIntersectionOfRects is an optional parameter
+ * that is true if 'canvFiniteBound' resulted from an intersection of rects.
+ */
+ void getBounds(SkRect* canvFiniteBound,
+ BoundsType* boundType,
+ bool* isIntersectionOfRects = NULL) const;
+
+ /**
+ * Returns true if the input (r)rect in device space is entirely contained
+ * by the clip. A return value of false does not guarantee that the (r)rect
+ * is not contained by the clip.
+ */
+ bool quickContains(const SkRect& devRect) const {
+ return this->isWideOpen() || this->internalQuickContains(devRect);
+ }
+
+ bool quickContains(const SkRRect& devRRect) const {
+ return this->isWideOpen() || this->internalQuickContains(devRRect);
+ }
+
+ /**
+ * Flattens the clip stack into a single SkPath. Returns true if any of
+ * the clip stack components requires anti-aliasing.
+ */
+ bool asPath(SkPath* path) const;
+
+ void clipDevRect(const SkIRect& ir, SkCanvas::ClipOp op) {
+ SkRect r;
+ r.set(ir);
+ this->clipRect(r, SkMatrix::I(), op, false);
+ }
+ void clipRect(const SkRect&, const SkMatrix& matrix, SkCanvas::ClipOp, bool doAA);
+ void clipRRect(const SkRRect&, const SkMatrix& matrix, SkCanvas::ClipOp, bool doAA);
+ void clipPath(const SkPath&, const SkMatrix& matrix, SkCanvas::ClipOp, bool doAA);
+ // An optimized version of clipDevRect(emptyRect, kIntersect, ...)
+ void clipEmpty();
+
+ /**
+ * isWideOpen returns true if the clip state corresponds to the infinite
+ * plane (i.e., draws are not limited at all)
+ */
+ bool isWideOpen() const { return this->getTopmostGenID() == kWideOpenGenID; }
+
+ /**
+ * This method quickly and conservatively determines whether the entire stack is equivalent to
+ * intersection with a rrect given a bounds, where the rrect must not contain the entire bounds.
+ *
+ * @param bounds A bounds on what will be drawn through the clip. The clip only need be
+ * equivalent to a intersection with a rrect for draws within the bounds. The
+ * returned rrect must intersect the bounds but need not be contained by the
+ * bounds.
+ * @param rrect If return is true rrect will contain the rrect equivalent to the stack.
+ * @param aa If return is true aa will indicate whether the equivalent rrect clip is
+ * antialiased.
+ * @return true if the stack is equivalent to a single rrect intersect clip, false otherwise.
+ */
+ bool isRRect(const SkRect& bounds, SkRRect* rrect, bool* aa) const;
+
+ /**
+ * The generation ID has three reserved values to indicate special
+ * (potentially ignorable) cases
+ */
+ static const int32_t kInvalidGenID = 0; //!< Invalid id that is never returned by
+ //!< SkClipStack. Useful when caching clips
+ //!< based on GenID.
+ static const int32_t kEmptyGenID = 1; // no pixels writeable
+ static const int32_t kWideOpenGenID = 2; // all pixels writeable
+
+ int32_t getTopmostGenID() const;
+
+#ifdef SK_DEBUG
+ /**
+ * Dumps the contents of the clip stack to SkDebugf. This is intended for Skia development
+ * debugging. Don't rely on the existence of this function or the formatting of its output.
+ */
+ void dump() const;
+#endif
+
+public:
+ class Iter {
+ public:
+ enum IterStart {
+ kBottom_IterStart = SkDeque::Iter::kFront_IterStart,
+ kTop_IterStart = SkDeque::Iter::kBack_IterStart
+ };
+
+ /**
+ * Creates an uninitialized iterator. Must be reset()
+ */
+ Iter();
+
+ Iter(const SkClipStack& stack, IterStart startLoc);
+
+ /**
+ * Return the clip element for this iterator. If next()/prev() returns NULL, then the
+ * iterator is done.
+ */
+ const Element* next();
+ const Element* prev();
+
+ /**
+ * Moves the iterator to the topmost element with the specified RegionOp and returns that
+ * element. If no clip element with that op is found, the first element is returned.
+ */
+ const Element* skipToTopmost(SkCanvas::ClipOp op);
+
+ /**
+ * Restarts the iterator on a clip stack.
+ */
+ void reset(const SkClipStack& stack, IterStart startLoc);
+
+ private:
+ const SkClipStack* fStack;
+ SkDeque::Iter fIter;
+ };
+
+ /**
+ * The B2TIter iterates from the bottom of the stack to the top.
+ * It inherits privately from Iter to prevent access to reverse iteration.
+ */
+ class B2TIter : private Iter {
+ public:
+ B2TIter() {}
+
+ /**
+ * Wrap Iter's 2 parameter ctor to force initialization to the
+ * beginning of the deque/bottom of the stack
+ */
+ B2TIter(const SkClipStack& stack)
+ : INHERITED(stack, kBottom_IterStart) {
+ }
+
+ using Iter::next;
+
+ /**
+ * Wrap Iter::reset to force initialization to the
+ * beginning of the deque/bottom of the stack
+ */
+ void reset(const SkClipStack& stack) {
+ this->INHERITED::reset(stack, kBottom_IterStart);
+ }
+
+ private:
+
+ typedef Iter INHERITED;
+ };
+
+ /**
+ * GetConservativeBounds returns a conservative bound of the current clip.
+ * Since this could be the infinite plane (if inverse fills were involved) the
+ * maxWidth and maxHeight parameters can be used to limit the returned bound
+ * to the expected drawing area. Similarly, the offsetX and offsetY parameters
+ * allow the caller to offset the returned bound to account for translated
+ * drawing areas (i.e., those resulting from a saveLayer). For finite bounds,
+ * the translation (+offsetX, +offsetY) is applied before the clamp to the
+ * maximum rectangle: [0,maxWidth) x [0,maxHeight).
+ * isIntersectionOfRects is an optional parameter that is true when
+ * 'devBounds' is the result of an intersection of rects. In this case
+ * 'devBounds' is the exact answer/clip.
+ */
+ void getConservativeBounds(int offsetX,
+ int offsetY,
+ int maxWidth,
+ int maxHeight,
+ SkRect* devBounds,
+ bool* isIntersectionOfRects = NULL) const;
+
+private:
+ friend class Iter;
+
+ SkDeque fDeque;
+ int fSaveCount;
+
+ // Generation ID for the clip stack. This is incremented for each
+ // clipDevRect and clipDevPath call. 0 is reserved to indicate an
+ // invalid ID.
+ static int32_t gGenID;
+
+ bool internalQuickContains(const SkRect& devRect) const;
+ bool internalQuickContains(const SkRRect& devRRect) const;
+
+ /**
+ * Helper for clipDevPath, etc.
+ */
+ void pushElement(const Element& element);
+
+ /**
+ * Restore the stack back to the specified save count.
+ */
+ void restoreTo(int saveCount);
+
+ /**
+ * Return the next unique generation ID.
+ */
+ static int32_t GetNextGenID();
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkColor.h b/gfx/skia/skia/include/core/SkColor.h
new file mode 100644
index 000000000..8f2776da6
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkColor.h
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColor_DEFINED
+#define SkColor_DEFINED
+
+#include "SkScalar.h"
+#include "SkPoint3.h"
+#include "SkTypes.h"
+
+/** \file SkColor.h
+
+ Types and macros for colors
+*/
+
+/** 8-bit type for an alpha value. 0xFF is 100% opaque, 0x00 is 100% transparent.
+*/
+typedef uint8_t SkAlpha;
+/** 32 bit ARGB color value, not premultiplied. The color components are always in
+ a known order. This is different from SkPMColor, which has its bytes in a configuration
+ dependent order, to match the format of kARGB32 bitmaps. SkColor is the type used to
+ specify colors in SkPaint and in gradients.
+*/
+typedef uint32_t SkColor;
+
+/** Return a SkColor value from 8 bit component values
+*/
+static inline SkColor SkColorSetARGBInline(U8CPU a, U8CPU r, U8CPU g, U8CPU b)
+{
+ SkASSERT(a <= 255 && r <= 255 && g <= 255 && b <= 255);
+
+ return (a << 24) | (r << 16) | (g << 8) | (b << 0);
+}
+
+#define SkColorSetARGBMacro(a, r, g, b) \
+ static_cast<SkColor>( \
+ (static_cast<U8CPU>(a) << 24) | \
+ (static_cast<U8CPU>(r) << 16) | \
+ (static_cast<U8CPU>(g) << 8) | \
+ (static_cast<U8CPU>(b) << 0))
+
+/** gcc will generate static initializers for code of this form:
+ * static const SkColor kMyColor = SkColorSetARGB(0xFF, 0x01, 0x02, 0x03)
+ * if SkColorSetARGB() is a static inline, but not if it's a macro.
+ */
+#if defined(NDEBUG)
+#define SkColorSetARGB(a, r, g, b) SkColorSetARGBMacro(a, r, g, b)
+#else
+#define SkColorSetARGB(a, r, g, b) SkColorSetARGBInline(a, r, g, b)
+#endif
+
+/** Return a SkColor value from 8 bit component values, with an implied value
+ of 0xFF for alpha (fully opaque)
+*/
+#define SkColorSetRGB(r, g, b) SkColorSetARGB(0xFF, r, g, b)
+
+/** return the alpha byte from a SkColor value */
+#define SkColorGetA(color) (((color) >> 24) & 0xFF)
+/** return the red byte from a SkColor value */
+#define SkColorGetR(color) (((color) >> 16) & 0xFF)
+/** return the green byte from a SkColor value */
+#define SkColorGetG(color) (((color) >> 8) & 0xFF)
+/** return the blue byte from a SkColor value */
+#define SkColorGetB(color) (((color) >> 0) & 0xFF)
+
+static inline SkColor SkColorSetA(SkColor c, U8CPU a) {
+ return (c & 0x00FFFFFF) | (a << 24);
+}
+
+// common colors
+
+#define SK_AlphaTRANSPARENT 0x00 //!< transparent SkAlpha value
+#define SK_AlphaOPAQUE 0xFF //!< opaque SkAlpha value
+
+#define SK_ColorTRANSPARENT 0x00000000 //!< transparent SkColor value
+
+#define SK_ColorBLACK 0xFF000000 //!< black SkColor value
+#define SK_ColorDKGRAY 0xFF444444 //!< dark gray SkColor value
+#define SK_ColorGRAY 0xFF888888 //!< gray SkColor value
+#define SK_ColorLTGRAY 0xFFCCCCCC //!< light gray SkColor value
+#define SK_ColorWHITE 0xFFFFFFFF //!< white SkColor value
+
+#define SK_ColorRED 0xFFFF0000 //!< red SkColor value
+#define SK_ColorGREEN 0xFF00FF00 //!< green SkColor value
+#define SK_ColorBLUE 0xFF0000FF //!< blue SkColor value
+#define SK_ColorYELLOW 0xFFFFFF00 //!< yellow SkColor value
+#define SK_ColorCYAN 0xFF00FFFF //!< cyan SkColor value
+#define SK_ColorMAGENTA 0xFFFF00FF //!< magenta SkColor value
+
+////////////////////////////////////////////////////////////////////////
+
+/** Convert RGB components to HSV.
+ hsv[0] is Hue [0 .. 360)
+ hsv[1] is Saturation [0...1]
+ hsv[2] is Value [0...1]
+ @param red red component value [0..255]
+ @param green green component value [0..255]
+ @param blue blue component value [0..255]
+ @param hsv 3 element array which holds the resulting HSV components.
+*/
+SK_API void SkRGBToHSV(U8CPU red, U8CPU green, U8CPU blue, SkScalar hsv[3]);
+
+/** Convert the argb color to its HSV components.
+ hsv[0] is Hue [0 .. 360)
+ hsv[1] is Saturation [0...1]
+ hsv[2] is Value [0...1]
+ @param color the argb color to convert. Note: the alpha component is ignored.
+ @param hsv 3 element array which holds the resulting HSV components.
+*/
+static inline void SkColorToHSV(SkColor color, SkScalar hsv[3]) {
+ SkRGBToHSV(SkColorGetR(color), SkColorGetG(color), SkColorGetB(color), hsv);
+}
+
+/** Convert HSV components to an ARGB color. The alpha component is passed through unchanged.
+ hsv[0] is Hue [0 .. 360)
+ hsv[1] is Saturation [0...1]
+ hsv[2] is Value [0...1]
+ If hsv values are out of range, they are pinned.
+ @param alpha the alpha component of the returned argb color.
+ @param hsv 3 element array which holds the input HSV components.
+ @return the resulting argb color
+*/
+SK_API SkColor SkHSVToColor(U8CPU alpha, const SkScalar hsv[3]);
+
+/** Convert HSV components to an ARGB color. The alpha component set to 0xFF.
+ hsv[0] is Hue [0 .. 360)
+ hsv[1] is Saturation [0...1]
+ hsv[2] is Value [0...1]
+ If hsv values are out of range, they are pinned.
+ @param hsv 3 element array which holds the input HSV components.
+ @return the resulting argb color
+*/
+static inline SkColor SkHSVToColor(const SkScalar hsv[3]) {
+ return SkHSVToColor(0xFF, hsv);
+}
+
+////////////////////////////////////////////////////////////////////////
+
+/** 32 bit ARGB color value, premultiplied. The byte order for this value is
+ configuration dependent, matching the format of kARGB32 bitmaps. This is different
+ from SkColor, which is nonpremultiplied, and is always in the same byte order.
+*/
+typedef uint32_t SkPMColor;
+
+/** Return a SkPMColor value from unpremultiplied 8 bit component values
+*/
+SK_API SkPMColor SkPreMultiplyARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b);
+/** Return a SkPMColor value from a SkColor value. This is done by multiplying the color
+ components by the color's alpha, and by arranging the bytes in a configuration
+ dependent order, to match the format of kARGB32 bitmaps.
+*/
+SK_API SkPMColor SkPreMultiplyColor(SkColor c);
+
+/** Define a function pointer type for combining two premultiplied colors
+*/
+typedef SkPMColor (*SkXfermodeProc)(SkPMColor src, SkPMColor dst);
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+struct SkPM4f;
+
+/*
+ * The float values are 0...1 unpremultiplied
+ */
+struct SkColor4f {
+ float fR;
+ float fG;
+ float fB;
+ float fA;
+
+ bool operator==(const SkColor4f& other) const {
+ return fA == other.fA && fR == other.fR && fG == other.fG && fB == other.fB;
+ }
+ bool operator!=(const SkColor4f& other) const {
+ return !(*this == other);
+ }
+
+ const float* vec() const { return &fR; }
+ float* vec() { return &fR; }
+
+ static SkColor4f Pin(float r, float g, float b, float a);
+ static SkColor4f FromColor(SkColor);
+ static SkColor4f FromColor3f(SkColor3f, float a);
+
+ SkColor toSkColor() const;
+
+ SkColor4f pin() const {
+ return Pin(fR, fG, fB, fA);
+ }
+
+ SkPM4f premul() const;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkColorFilter.h b/gfx/skia/skia/include/core/SkColorFilter.h
new file mode 100644
index 000000000..5a23a343a
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkColorFilter.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorFilter_DEFINED
+#define SkColorFilter_DEFINED
+
+#include "SkColor.h"
+#include "SkFlattenable.h"
+#include "SkRefCnt.h"
+#include "SkXfermode.h"
+
+class GrContext;
+class GrFragmentProcessor;
+class SkBitmap;
+class SkRasterPipeline;
+
+/**
+ * ColorFilters are optional objects in the drawing pipeline. When present in
+ * a paint, they are called with the "src" colors, and return new colors, which
+ * are then passed onto the next stage (either ImageFilter or Xfermode).
+ *
+ * All subclasses are required to be reentrant-safe : it must be legal to share
+ * the same instance between several threads.
+ */
+class SK_API SkColorFilter : public SkFlattenable {
+public:
+ /**
+ * If the filter can be represented by a source color plus Mode, this
+ * returns true, and sets (if not NULL) the color and mode appropriately.
+ * If not, this returns false and ignores the parameters.
+ */
+ virtual bool asColorMode(SkColor* color, SkXfermode::Mode* mode) const;
+
+ /**
+ * If the filter can be represented by a 5x4 matrix, this
+ * returns true, and sets the matrix appropriately.
+ * If not, this returns false and ignores the parameter.
+ */
+ virtual bool asColorMatrix(SkScalar matrix[20]) const;
+
+ /**
+ * If the filter can be represented by per-component table, return true,
+ * and if table is not null, copy the bitmap containing the table into it.
+ *
+ * The table bitmap will be in SkBitmap::kA8_Config. Each row corresponding
+ * to each component in ARGB order. e.g. row[0] == alpha, row[1] == red,
+ * etc. To transform a color, you (logically) perform the following:
+ *
+ * a' = *table.getAddr8(a, 0);
+ * r' = *table.getAddr8(r, 1);
+ * g' = *table.getAddr8(g, 2);
+ * b' = *table.getAddr8(b, 3);
+ *
+ * The original component value is the horizontal index for a given row,
+ * and the stored value at that index is the new value for that component.
+ */
+ virtual bool asComponentTable(SkBitmap* table) const;
+
+ /** Called with a scanline of colors, as if there was a shader installed.
+ The implementation writes out its filtered version into result[].
+ Note: shader and result may be the same buffer.
+ @param src array of colors, possibly generated by a shader
+ @param count the number of entries in the src[] and result[] arrays
+ @param result written by the filter
+ */
+ virtual void filterSpan(const SkPMColor src[], int count, SkPMColor result[]) const = 0;
+
+ virtual void filterSpan4f(const SkPM4f src[], int count, SkPM4f result[]) const;
+
+ bool appendStages(SkRasterPipeline*) const;
+
+ enum Flags {
+ /** If set the filter methods will not change the alpha channel of the colors.
+ */
+ kAlphaUnchanged_Flag = 1 << 0,
+ };
+
+ /** Returns the flags for this filter. Override in subclasses to return custom flags.
+ */
+ virtual uint32_t getFlags() const { return 0; }
+
+ /**
+ * If this subclass can optimally createa composition with the inner filter, return it as
+ * a new filter (which the caller must unref() when it is done). If no such optimization
+ * is known, return NULL.
+ *
+ * e.g. result(color) == this_filter(inner(color))
+ */
+ virtual sk_sp<SkColorFilter> makeComposed(sk_sp<SkColorFilter>) const { return nullptr; }
+
+ /**
+ * Apply this colorfilter to the specified SkColor. This routine handles
+ * converting to SkPMColor, calling the filter, and then converting back
+ * to SkColor. This method is not virtual, but will call filterSpan()
+ * which is virtual.
+ */
+ SkColor filterColor(SkColor) const;
+
+ /**
+ * Filters a single color.
+ */
+ SkColor4f filterColor4f(const SkColor4f&) const;
+
+ /** Create a colorfilter that uses the specified color and mode.
+ If the Mode is DST, this function will return NULL (since that
+ mode will have no effect on the result).
+ @param c The source color used with the specified mode
+ @param mode The xfermode mode that is applied to each color in
+ the colorfilter's filterSpan[16,32] methods
+ @return colorfilter object that applies the src color and mode,
+ or NULL if the mode will have no effect.
+ */
+ static sk_sp<SkColorFilter> MakeModeFilter(SkColor c, SkXfermode::Mode mode);
+ static sk_sp<SkColorFilter> MakeModeFilter(SkColor c, SkBlendMode mode) {
+ return MakeModeFilter(c, (SkXfermode::Mode)mode);
+ }
+
+ /** Construct a colorfilter whose effect is to first apply the inner filter and then apply
+ * the outer filter to the result of the inner's.
+ * The reference counts for outer and inner are incremented.
+ *
+ * Due to internal limits, it is possible that this will return NULL, so the caller must
+ * always check.
+ */
+ static sk_sp<SkColorFilter> MakeComposeFilter(sk_sp<SkColorFilter> outer,
+ sk_sp<SkColorFilter> inner);
+
+ /** Construct a color filter that transforms a color by a 4x5 matrix. The matrix is in row-
+ * major order and the translation column is specified in unnormalized, 0...255, space.
+ */
+ static sk_sp<SkColorFilter> MakeMatrixFilterRowMajor255(const SkScalar array[20]);
+
+#ifdef SK_SUPPORT_LEGACY_COLORFILTER_PTR
+ static SkColorFilter* CreateModeFilter(SkColor c, SkXfermode::Mode mode) {
+ return MakeModeFilter(c, mode).release();
+ }
+ static SkColorFilter* CreateComposeFilter(SkColorFilter* outer, SkColorFilter* inner) {
+ return MakeComposeFilter(sk_ref_sp(outer), sk_ref_sp(inner)).release();
+ }
+ static SkColorFilter* CreateMatrixFilterRowMajor255(const SkScalar array[20]) {
+ return MakeMatrixFilterRowMajor255(array).release();
+ }
+ virtual SkColorFilter* newComposed(const SkColorFilter* inner) const {
+ return this->makeComposed(sk_ref_sp(const_cast<SkColorFilter*>(inner))).release();
+ }
+#endif
+
+#if SK_SUPPORT_GPU
+ /**
+ * A subclass may implement this factory function to work with the GPU backend. It returns
+ * a GrFragmentProcessor that implemets the color filter in GPU shader code.
+ *
+ * The fragment processor receives a premultiplied input color and produces a premultiplied
+ * output color.
+ *
+ * A null return indicates that the color filter isn't implemented for the GPU backend.
+ */
+ virtual sk_sp<GrFragmentProcessor> asFragmentProcessor(GrContext*) const;
+#endif
+
+ bool affectsTransparentBlack() const {
+ return this->filterColor(0) != 0;
+ }
+
+ SK_TO_STRING_PUREVIRT()
+
+ SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP()
+ SK_DEFINE_FLATTENABLE_TYPE(SkColorFilter)
+
+protected:
+ SkColorFilter() {}
+
+ virtual bool onAppendStages(SkRasterPipeline*) const;
+
+private:
+ /*
+ * Returns 1 if this is a single filter (not a composition of other filters), otherwise it
+ * reutrns the number of leaf-node filters in a composition. This should be the same value
+ * as the number of GrFragmentProcessors returned by asFragmentProcessors's array parameter.
+ *
+ * e.g. compose(filter, compose(compose(filter, filter), filter)) --> 4
+ */
+ virtual int privateComposedFilterCount() const { return 1; }
+ friend class SkComposeColorFilter;
+
+ typedef SkFlattenable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkColorPriv.h b/gfx/skia/skia/include/core/SkColorPriv.h
new file mode 100644
index 000000000..694d32472
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkColorPriv.h
@@ -0,0 +1,1098 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorPriv_DEFINED
+#define SkColorPriv_DEFINED
+
+// turn this own for extra debug checking when blending onto 565
+#ifdef SK_DEBUG
+ #define CHECK_FOR_565_OVERFLOW
+#endif
+
+#include "SkColor.h"
+#include "SkMath.h"
+
+//////////////////////////////////////////////////////////////////////////////
+
+#define SkASSERT_IS_BYTE(x) SkASSERT(0 == ((x) & ~0xFF))
+
+/*
+ * Skia's 32bit backend only supports 1 sizzle order at a time (compile-time).
+ * This is specified by 4 defines SK_A32_SHIFT, SK_R32_SHIFT, ... for G and B.
+ *
+ * For easier compatibility with Skia's GPU backend, we further restrict these
+ * to either (in memory-byte-order) RGBA or BGRA. Note that this "order" does
+ * not directly correspond to the same shift-order, since we have to take endianess
+ * into account.
+ *
+ * Here we enforce this constraint.
+ */
+
+#ifdef SK_CPU_BENDIAN
+ #define SK_RGBA_R32_SHIFT 24
+ #define SK_RGBA_G32_SHIFT 16
+ #define SK_RGBA_B32_SHIFT 8
+ #define SK_RGBA_A32_SHIFT 0
+
+ #define SK_BGRA_B32_SHIFT 24
+ #define SK_BGRA_G32_SHIFT 16
+ #define SK_BGRA_R32_SHIFT 8
+ #define SK_BGRA_A32_SHIFT 0
+#else
+ #define SK_RGBA_R32_SHIFT 0
+ #define SK_RGBA_G32_SHIFT 8
+ #define SK_RGBA_B32_SHIFT 16
+ #define SK_RGBA_A32_SHIFT 24
+
+ #define SK_BGRA_B32_SHIFT 0
+ #define SK_BGRA_G32_SHIFT 8
+ #define SK_BGRA_R32_SHIFT 16
+ #define SK_BGRA_A32_SHIFT 24
+#endif
+
+#if defined(SK_PMCOLOR_IS_RGBA) && defined(SK_PMCOLOR_IS_BGRA)
+ #error "can't define PMCOLOR to be RGBA and BGRA"
+#endif
+
+#define LOCAL_PMCOLOR_SHIFTS_EQUIVALENT_TO_RGBA \
+ (SK_A32_SHIFT == SK_RGBA_A32_SHIFT && \
+ SK_R32_SHIFT == SK_RGBA_R32_SHIFT && \
+ SK_G32_SHIFT == SK_RGBA_G32_SHIFT && \
+ SK_B32_SHIFT == SK_RGBA_B32_SHIFT)
+
+#define LOCAL_PMCOLOR_SHIFTS_EQUIVALENT_TO_BGRA \
+ (SK_A32_SHIFT == SK_BGRA_A32_SHIFT && \
+ SK_R32_SHIFT == SK_BGRA_R32_SHIFT && \
+ SK_G32_SHIFT == SK_BGRA_G32_SHIFT && \
+ SK_B32_SHIFT == SK_BGRA_B32_SHIFT)
+
+
+#define SK_A_INDEX (SK_A32_SHIFT/8)
+#define SK_R_INDEX (SK_R32_SHIFT/8)
+#define SK_G_INDEX (SK_G32_SHIFT/8)
+#define SK_B_INDEX (SK_B32_SHIFT/8)
+
+#if defined(SK_PMCOLOR_IS_RGBA) && !LOCAL_PMCOLOR_SHIFTS_EQUIVALENT_TO_RGBA
+ #error "SK_PMCOLOR_IS_RGBA does not match SK_*32_SHIFT values"
+#endif
+
+#if defined(SK_PMCOLOR_IS_BGRA) && !LOCAL_PMCOLOR_SHIFTS_EQUIVALENT_TO_BGRA
+ #error "SK_PMCOLOR_IS_BGRA does not match SK_*32_SHIFT values"
+#endif
+
+#if !defined(SK_PMCOLOR_IS_RGBA) && !defined(SK_PMCOLOR_IS_BGRA)
+ // deduce which to define from the _SHIFT defines
+
+ #if LOCAL_PMCOLOR_SHIFTS_EQUIVALENT_TO_RGBA
+ #define SK_PMCOLOR_IS_RGBA
+ #elif LOCAL_PMCOLOR_SHIFTS_EQUIVALENT_TO_BGRA
+ #define SK_PMCOLOR_IS_BGRA
+ #else
+ #error "need 32bit packing to be either RGBA or BGRA"
+ #endif
+#endif
+
+// hide these now that we're done
+#undef LOCAL_PMCOLOR_SHIFTS_EQUIVALENT_TO_RGBA
+#undef LOCAL_PMCOLOR_SHIFTS_EQUIVALENT_TO_BGRA
+
+//////////////////////////////////////////////////////////////////////////////
+
+// Reverse the bytes coorsponding to RED and BLUE in a packed pixels. Note the
+// pair of them are in the same 2 slots in both RGBA and BGRA, thus there is
+// no need to pass in the colortype to this function.
+static inline uint32_t SkSwizzle_RB(uint32_t c) {
+ static const uint32_t kRBMask = (0xFF << SK_R32_SHIFT) | (0xFF << SK_B32_SHIFT);
+
+ unsigned c0 = (c >> SK_R32_SHIFT) & 0xFF;
+ unsigned c1 = (c >> SK_B32_SHIFT) & 0xFF;
+ return (c & ~kRBMask) | (c0 << SK_B32_SHIFT) | (c1 << SK_R32_SHIFT);
+}
+
+static inline uint32_t SkPackARGB_as_RGBA(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ SkASSERT_IS_BYTE(a);
+ SkASSERT_IS_BYTE(r);
+ SkASSERT_IS_BYTE(g);
+ SkASSERT_IS_BYTE(b);
+ return (a << SK_RGBA_A32_SHIFT) | (r << SK_RGBA_R32_SHIFT) |
+ (g << SK_RGBA_G32_SHIFT) | (b << SK_RGBA_B32_SHIFT);
+}
+
+static inline uint32_t SkPackARGB_as_BGRA(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ SkASSERT_IS_BYTE(a);
+ SkASSERT_IS_BYTE(r);
+ SkASSERT_IS_BYTE(g);
+ SkASSERT_IS_BYTE(b);
+ return (a << SK_BGRA_A32_SHIFT) | (r << SK_BGRA_R32_SHIFT) |
+ (g << SK_BGRA_G32_SHIFT) | (b << SK_BGRA_B32_SHIFT);
+}
+
+static inline SkPMColor SkSwizzle_RGBA_to_PMColor(uint32_t c) {
+#ifdef SK_PMCOLOR_IS_RGBA
+ return c;
+#else
+ return SkSwizzle_RB(c);
+#endif
+}
+
+static inline SkPMColor SkSwizzle_BGRA_to_PMColor(uint32_t c) {
+#ifdef SK_PMCOLOR_IS_BGRA
+ return c;
+#else
+ return SkSwizzle_RB(c);
+#endif
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+///@{
+/** See ITU-R Recommendation BT.709 at http://www.itu.int/rec/R-REC-BT.709/ .*/
+#define SK_ITU_BT709_LUM_COEFF_R (0.2126f)
+#define SK_ITU_BT709_LUM_COEFF_G (0.7152f)
+#define SK_ITU_BT709_LUM_COEFF_B (0.0722f)
+///@}
+
+///@{
+/** A float value which specifies this channel's contribution to luminance. */
+#define SK_LUM_COEFF_R SK_ITU_BT709_LUM_COEFF_R
+#define SK_LUM_COEFF_G SK_ITU_BT709_LUM_COEFF_G
+#define SK_LUM_COEFF_B SK_ITU_BT709_LUM_COEFF_B
+///@}
+
+/** Computes the luminance from the given r, g, and b in accordance with
+ SK_LUM_COEFF_X. For correct results, r, g, and b should be in linear space.
+*/
+static inline U8CPU SkComputeLuminance(U8CPU r, U8CPU g, U8CPU b) {
+ //The following is
+ //r * SK_LUM_COEFF_R + g * SK_LUM_COEFF_G + b * SK_LUM_COEFF_B
+ //with SK_LUM_COEFF_X in 1.8 fixed point (rounding adjusted to sum to 256).
+ return (r * 54 + g * 183 + b * 19) >> 8;
+}
+
+/** Turn 0..255 into 0..256 by adding 1 at the half-way point. Used to turn a
+ byte into a scale value, so that we can say scale * value >> 8 instead of
+ alpha * value / 255.
+
+ In debugging, asserts that alpha is 0..255
+*/
+static inline unsigned SkAlpha255To256(U8CPU alpha) {
+ SkASSERT(SkToU8(alpha) == alpha);
+ // this one assues that blending on top of an opaque dst keeps it that way
+ // even though it is less accurate than a+(a>>7) for non-opaque dsts
+ return alpha + 1;
+}
+
+/**
+ * Turn a 0..255 value into a 0..256 value, rounding up if the value is >= 0x80.
+ * This is slightly more accurate than SkAlpha255To256.
+ */
+static inline unsigned Sk255To256(U8CPU value) {
+ SkASSERT(SkToU8(value) == value);
+ return value + (value >> 7);
+}
+
+/** Multiplify value by 0..256, and shift the result down 8
+ (i.e. return (value * alpha256) >> 8)
+ */
+#define SkAlphaMul(value, alpha256) (((value) * (alpha256)) >> 8)
+
+/** Calculates 256 - (value * alpha256) / 255 in range [0,256],
+ * for [0,255] value and [0,256] alpha256.
+ */
+static inline U16CPU SkAlphaMulInv256(U16CPU value, U16CPU alpha256) {
+#ifdef SK_SUPPORT_LEGACY_BROKEN_LERP
+ return SkAlpha255To256(255 - SkAlphaMul(value, alpha256));
+#else
+ unsigned prod = 0xFFFF - value * alpha256;
+ return (prod + (prod >> 8)) >> 8;
+#endif
+}
+
+// The caller may want negative values, so keep all params signed (int)
+// so we don't accidentally slip into unsigned math and lose the sign
+// extension when we shift (in SkAlphaMul)
+static inline int SkAlphaBlend(int src, int dst, int scale256) {
+ SkASSERT((unsigned)scale256 <= 256);
+ return dst + SkAlphaMul(src - dst, scale256);
+}
+
+/**
+ * Returns (src * alpha + dst * (255 - alpha)) / 255
+ *
+ * This is more accurate than SkAlphaBlend, but slightly slower
+ */
+static inline int SkAlphaBlend255(S16CPU src, S16CPU dst, U8CPU alpha) {
+ SkASSERT((int16_t)src == src);
+ SkASSERT((int16_t)dst == dst);
+ SkASSERT((uint8_t)alpha == alpha);
+
+ int prod = (src - dst) * alpha + 128;
+ prod = (prod + (prod >> 8)) >> 8;
+ return dst + prod;
+}
+
+static inline U8CPU SkUnitScalarClampToByte(SkScalar x) {
+ return static_cast<U8CPU>(SkScalarPin(x, 0, 1) * 255 + 0.5);
+}
+
+#define SK_R16_BITS 5
+#define SK_G16_BITS 6
+#define SK_B16_BITS 5
+
+#define SK_R16_SHIFT (SK_B16_BITS + SK_G16_BITS)
+#define SK_G16_SHIFT (SK_B16_BITS)
+#define SK_B16_SHIFT 0
+
+#define SK_R16_MASK ((1 << SK_R16_BITS) - 1)
+#define SK_G16_MASK ((1 << SK_G16_BITS) - 1)
+#define SK_B16_MASK ((1 << SK_B16_BITS) - 1)
+
+#define SkGetPackedR16(color) (((unsigned)(color) >> SK_R16_SHIFT) & SK_R16_MASK)
+#define SkGetPackedG16(color) (((unsigned)(color) >> SK_G16_SHIFT) & SK_G16_MASK)
+#define SkGetPackedB16(color) (((unsigned)(color) >> SK_B16_SHIFT) & SK_B16_MASK)
+
+#define SkR16Assert(r) SkASSERT((unsigned)(r) <= SK_R16_MASK)
+#define SkG16Assert(g) SkASSERT((unsigned)(g) <= SK_G16_MASK)
+#define SkB16Assert(b) SkASSERT((unsigned)(b) <= SK_B16_MASK)
+
+static inline uint16_t SkPackRGB16(unsigned r, unsigned g, unsigned b) {
+ SkASSERT(r <= SK_R16_MASK);
+ SkASSERT(g <= SK_G16_MASK);
+ SkASSERT(b <= SK_B16_MASK);
+
+ return SkToU16((r << SK_R16_SHIFT) | (g << SK_G16_SHIFT) | (b << SK_B16_SHIFT));
+}
+
+#define SK_R16_MASK_IN_PLACE (SK_R16_MASK << SK_R16_SHIFT)
+#define SK_G16_MASK_IN_PLACE (SK_G16_MASK << SK_G16_SHIFT)
+#define SK_B16_MASK_IN_PLACE (SK_B16_MASK << SK_B16_SHIFT)
+
+/** Expand the 16bit color into a 32bit value that can be scaled all at once
+ by a value up to 32. Used in conjunction with SkCompact_rgb_16.
+*/
+static inline uint32_t SkExpand_rgb_16(U16CPU c) {
+ SkASSERT(c == (uint16_t)c);
+
+ return ((c & SK_G16_MASK_IN_PLACE) << 16) | (c & ~SK_G16_MASK_IN_PLACE);
+}
+
+/** Compress an expanded value (from SkExpand_rgb_16) back down to a 16bit
+ color value. The computation yields only 16bits of valid data, but we claim
+ to return 32bits, so that the compiler won't generate extra instructions to
+ "clean" the top 16bits. However, the top 16 can contain garbage, so it is
+ up to the caller to safely ignore them.
+*/
+static inline U16CPU SkCompact_rgb_16(uint32_t c) {
+ return ((c >> 16) & SK_G16_MASK_IN_PLACE) | (c & ~SK_G16_MASK_IN_PLACE);
+}
+
+/** Scale the 16bit color value by the 0..256 scale parameter.
+ The computation yields only 16bits of valid data, but we claim
+ to return 32bits, so that the compiler won't generate extra instructions to
+ "clean" the top 16bits.
+*/
+static inline U16CPU SkAlphaMulRGB16(U16CPU c, unsigned scale) {
+ return SkCompact_rgb_16(SkExpand_rgb_16(c) * (scale >> 3) >> 5);
+}
+
+// this helper explicitly returns a clean 16bit value (but slower)
+#define SkAlphaMulRGB16_ToU16(c, s) (uint16_t)SkAlphaMulRGB16(c, s)
+
+/** Blend pre-expanded RGB32 with 16bit color value by the 0..32 scale parameter.
+ The computation yields only 16bits of valid data, but we claim to return
+ 32bits, so that the compiler won't generate extra instructions to "clean"
+ the top 16bits.
+*/
+static inline U16CPU SkBlend32_RGB16(uint32_t src_expand, uint16_t dst, unsigned scale) {
+ uint32_t dst_expand = SkExpand_rgb_16(dst) * scale;
+ return SkCompact_rgb_16((src_expand + dst_expand) >> 5);
+}
+
+/** Blend src and dst 16bit colors by the 0..256 scale parameter.
+ The computation yields only 16bits of valid data, but we claim
+ to return 32bits, so that the compiler won't generate extra instructions to
+ "clean" the top 16bits.
+*/
+static inline U16CPU SkBlendRGB16(U16CPU src, U16CPU dst, int srcScale) {
+ SkASSERT((unsigned)srcScale <= 256);
+
+ srcScale >>= 3;
+
+ uint32_t src32 = SkExpand_rgb_16(src);
+ uint32_t dst32 = SkExpand_rgb_16(dst);
+ return SkCompact_rgb_16(dst32 + ((src32 - dst32) * srcScale >> 5));
+}
+
+static inline void SkBlendRGB16(const uint16_t src[], uint16_t dst[],
+ int srcScale, int count) {
+ SkASSERT(count > 0);
+ SkASSERT((unsigned)srcScale <= 256);
+
+ srcScale >>= 3;
+
+ do {
+ uint32_t src32 = SkExpand_rgb_16(*src++);
+ uint32_t dst32 = SkExpand_rgb_16(*dst);
+ *dst++ = static_cast<uint16_t>(
+ SkCompact_rgb_16(dst32 + ((src32 - dst32) * srcScale >> 5)));
+ } while (--count > 0);
+}
+
+#ifdef SK_DEBUG
+ static inline U16CPU SkRGB16Add(U16CPU a, U16CPU b) {
+ SkASSERT(SkGetPackedR16(a) + SkGetPackedR16(b) <= SK_R16_MASK);
+ SkASSERT(SkGetPackedG16(a) + SkGetPackedG16(b) <= SK_G16_MASK);
+ SkASSERT(SkGetPackedB16(a) + SkGetPackedB16(b) <= SK_B16_MASK);
+
+ return a + b;
+ }
+#else
+ #define SkRGB16Add(a, b) ((a) + (b))
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define SK_A32_BITS 8
+#define SK_R32_BITS 8
+#define SK_G32_BITS 8
+#define SK_B32_BITS 8
+
+#define SK_A32_MASK ((1 << SK_A32_BITS) - 1)
+#define SK_R32_MASK ((1 << SK_R32_BITS) - 1)
+#define SK_G32_MASK ((1 << SK_G32_BITS) - 1)
+#define SK_B32_MASK ((1 << SK_B32_BITS) - 1)
+
+#define SkGetPackedA32(packed) ((uint32_t)((packed) << (24 - SK_A32_SHIFT)) >> 24)
+#define SkGetPackedR32(packed) ((uint32_t)((packed) << (24 - SK_R32_SHIFT)) >> 24)
+#define SkGetPackedG32(packed) ((uint32_t)((packed) << (24 - SK_G32_SHIFT)) >> 24)
+#define SkGetPackedB32(packed) ((uint32_t)((packed) << (24 - SK_B32_SHIFT)) >> 24)
+
+#define SkA32Assert(a) SkASSERT((unsigned)(a) <= SK_A32_MASK)
+#define SkR32Assert(r) SkASSERT((unsigned)(r) <= SK_R32_MASK)
+#define SkG32Assert(g) SkASSERT((unsigned)(g) <= SK_G32_MASK)
+#define SkB32Assert(b) SkASSERT((unsigned)(b) <= SK_B32_MASK)
+
+#ifdef SK_DEBUG
+ #define SkPMColorAssert(color_value) \
+ do { \
+ SkPMColor pm_color_value = (color_value); \
+ uint32_t alpha_color_value = SkGetPackedA32(pm_color_value); \
+ SkA32Assert(alpha_color_value); \
+ SkASSERT(SkGetPackedR32(pm_color_value) <= alpha_color_value); \
+ SkASSERT(SkGetPackedG32(pm_color_value) <= alpha_color_value); \
+ SkASSERT(SkGetPackedB32(pm_color_value) <= alpha_color_value); \
+ } while (false)
+#else
+ #define SkPMColorAssert(c)
+#endif
+
+static inline bool SkPMColorValid(SkPMColor c) {
+ auto a = SkGetPackedA32(c);
+ bool valid = a <= SK_A32_MASK
+ && SkGetPackedR32(c) <= a
+ && SkGetPackedG32(c) <= a
+ && SkGetPackedB32(c) <= a;
+ if (valid) {
+ SkPMColorAssert(c); // Make sure we're consistent when it counts.
+ }
+ return valid;
+}
+
+/**
+ * Pack the components into a SkPMColor, checking (in the debug version) that
+ * the components are 0..255, and are already premultiplied (i.e. alpha >= color)
+ */
+static inline SkPMColor SkPackARGB32(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ SkA32Assert(a);
+ SkASSERT(r <= a);
+ SkASSERT(g <= a);
+ SkASSERT(b <= a);
+
+ return (a << SK_A32_SHIFT) | (r << SK_R32_SHIFT) |
+ (g << SK_G32_SHIFT) | (b << SK_B32_SHIFT);
+}
+
+static inline uint32_t SkPackPMColor_as_RGBA(SkPMColor c) {
+ return SkPackARGB_as_RGBA(SkGetPackedA32(c), SkGetPackedR32(c),
+ SkGetPackedG32(c), SkGetPackedB32(c));
+}
+
+static inline uint32_t SkPackPMColor_as_BGRA(SkPMColor c) {
+ return SkPackARGB_as_BGRA(SkGetPackedA32(c), SkGetPackedR32(c),
+ SkGetPackedG32(c), SkGetPackedB32(c));
+}
+
+/**
+ * Abstract 4-byte interpolation, implemented on top of SkPMColor
+ * utility functions. Third parameter controls blending of the first two:
+ * (src, dst, 0) returns dst
+ * (src, dst, 0xFF) returns src
+ * srcWeight is [0..256], unlike SkFourByteInterp which takes [0..255]
+ */
+static inline SkPMColor SkFourByteInterp256(SkPMColor src, SkPMColor dst,
+ unsigned scale) {
+ unsigned a = SkAlphaBlend(SkGetPackedA32(src), SkGetPackedA32(dst), scale);
+ unsigned r = SkAlphaBlend(SkGetPackedR32(src), SkGetPackedR32(dst), scale);
+ unsigned g = SkAlphaBlend(SkGetPackedG32(src), SkGetPackedG32(dst), scale);
+ unsigned b = SkAlphaBlend(SkGetPackedB32(src), SkGetPackedB32(dst), scale);
+
+ return SkPackARGB32(a, r, g, b);
+}
+
+/**
+ * Abstract 4-byte interpolation, implemented on top of SkPMColor
+ * utility functions. Third parameter controls blending of the first two:
+ * (src, dst, 0) returns dst
+ * (src, dst, 0xFF) returns src
+ */
+static inline SkPMColor SkFourByteInterp(SkPMColor src, SkPMColor dst,
+ U8CPU srcWeight) {
+ unsigned scale = SkAlpha255To256(srcWeight);
+ return SkFourByteInterp256(src, dst, scale);
+}
+
+/**
+ * 0xAARRGGBB -> 0x00AA00GG, 0x00RR00BB
+ */
+static inline void SkSplay(uint32_t color, uint32_t* ag, uint32_t* rb) {
+ const uint32_t mask = 0x00FF00FF;
+ *ag = (color >> 8) & mask;
+ *rb = color & mask;
+}
+
+/**
+ * 0xAARRGGBB -> 0x00AA00GG00RR00BB
+ * (note, ARGB -> AGRB)
+ */
+static inline uint64_t SkSplay(uint32_t color) {
+ const uint32_t mask = 0x00FF00FF;
+ uint64_t agrb = (color >> 8) & mask; // 0x0000000000AA00GG
+ agrb <<= 32; // 0x00AA00GG00000000
+ agrb |= color & mask; // 0x00AA00GG00RR00BB
+ return agrb;
+}
+
+/**
+ * 0xAAxxGGxx, 0xRRxxBBxx-> 0xAARRGGBB
+ */
+static inline uint32_t SkUnsplay(uint32_t ag, uint32_t rb) {
+ const uint32_t mask = 0xFF00FF00;
+ return (ag & mask) | ((rb & mask) >> 8);
+}
+
+/**
+ * 0xAAxxGGxxRRxxBBxx -> 0xAARRGGBB
+ * (note, AGRB -> ARGB)
+ */
+static inline uint32_t SkUnsplay(uint64_t agrb) {
+ const uint32_t mask = 0xFF00FF00;
+ return SkPMColor(
+ ((agrb & mask) >> 8) | // 0x00RR00BB
+ ((agrb >> 32) & mask)); // 0xAARRGGBB
+}
+
+static inline SkPMColor SkFastFourByteInterp256_32(SkPMColor src, SkPMColor dst, unsigned scale) {
+ SkASSERT(scale <= 256);
+
+ // Two 8-bit blends per two 32-bit registers, with space to make sure the math doesn't collide.
+ uint32_t src_ag, src_rb, dst_ag, dst_rb;
+ SkSplay(src, &src_ag, &src_rb);
+ SkSplay(dst, &dst_ag, &dst_rb);
+
+ const uint32_t ret_ag = src_ag * scale + (256 - scale) * dst_ag;
+ const uint32_t ret_rb = src_rb * scale + (256 - scale) * dst_rb;
+
+ return SkUnsplay(ret_ag, ret_rb);
+}
+
+static inline SkPMColor SkFastFourByteInterp256_64(SkPMColor src, SkPMColor dst, unsigned scale) {
+ SkASSERT(scale <= 256);
+ // Four 8-bit blends in one 64-bit register, with space to make sure the math doesn't collide.
+ return SkUnsplay(SkSplay(src) * scale + (256-scale) * SkSplay(dst));
+}
+
+// TODO(mtklein): Replace slow versions with fast versions, using scale + (scale>>7) everywhere.
+
+/**
+ * Same as SkFourByteInterp256, but faster.
+ */
+static inline SkPMColor SkFastFourByteInterp256(SkPMColor src, SkPMColor dst, unsigned scale) {
+ // On a 64-bit machine, _64 is about 10% faster than _32, but ~40% slower on a 32-bit machine.
+ if (sizeof(void*) == 4) {
+ return SkFastFourByteInterp256_32(src, dst, scale);
+ } else {
+ return SkFastFourByteInterp256_64(src, dst, scale);
+ }
+}
+
+/**
+ * Nearly the same as SkFourByteInterp, but faster and a touch more accurate, due to better
+ * srcWeight scaling to [0, 256].
+ */
+static inline SkPMColor SkFastFourByteInterp(SkPMColor src,
+ SkPMColor dst,
+ U8CPU srcWeight) {
+ SkASSERT(srcWeight <= 255);
+ // scale = srcWeight + (srcWeight >> 7) is more accurate than
+ // scale = srcWeight + 1, but 7% slower
+ return SkFastFourByteInterp256(src, dst, srcWeight + (srcWeight >> 7));
+}
+
+/**
+ * Same as SkPackARGB32, but this version guarantees to not check that the
+ * values are premultiplied in the debug version.
+ */
+static inline SkPMColor SkPackARGB32NoCheck(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ return (a << SK_A32_SHIFT) | (r << SK_R32_SHIFT) |
+ (g << SK_G32_SHIFT) | (b << SK_B32_SHIFT);
+}
+
+static inline
+SkPMColor SkPremultiplyARGBInline(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ SkA32Assert(a);
+ SkR32Assert(r);
+ SkG32Assert(g);
+ SkB32Assert(b);
+
+ if (a != 255) {
+ r = SkMulDiv255Round(r, a);
+ g = SkMulDiv255Round(g, a);
+ b = SkMulDiv255Round(b, a);
+ }
+ return SkPackARGB32(a, r, g, b);
+}
+
+// When Android is compiled optimizing for size, SkAlphaMulQ doesn't get
+// inlined; forcing inlining significantly improves performance.
+static SK_ALWAYS_INLINE uint32_t SkAlphaMulQ(uint32_t c, unsigned scale) {
+ uint32_t mask = 0xFF00FF;
+
+ uint32_t rb = ((c & mask) * scale) >> 8;
+ uint32_t ag = ((c >> 8) & mask) * scale;
+ return (rb & mask) | (ag & ~mask);
+}
+
+static inline SkPMColor SkPMSrcOver(SkPMColor src, SkPMColor dst) {
+ return src + SkAlphaMulQ(dst, SkAlpha255To256(255 - SkGetPackedA32(src)));
+}
+
+/**
+ * Interpolates between colors src and dst using [0,256] scale.
+ */
+static inline SkPMColor SkPMLerp(SkPMColor src, SkPMColor dst, unsigned scale) {
+#ifdef SK_SUPPORT_LEGACY_BROKEN_LERP
+ return SkAlphaMulQ(src, scale) + SkAlphaMulQ(dst, 256 - scale);
+#else
+ return SkFastFourByteInterp256(src, dst, scale);
+#endif
+}
+
+static inline SkPMColor SkBlendARGB32(SkPMColor src, SkPMColor dst, U8CPU aa) {
+ SkASSERT((unsigned)aa <= 255);
+
+ unsigned src_scale = SkAlpha255To256(aa);
+#ifdef SK_SUPPORT_LEGACY_BROKEN_LERP
+ unsigned dst_scale = SkAlpha255To256(255 - SkAlphaMul(SkGetPackedA32(src), src_scale));
+
+ return SkAlphaMulQ(src, src_scale) + SkAlphaMulQ(dst, dst_scale);
+#else
+ unsigned dst_scale = SkAlphaMulInv256(SkGetPackedA32(src), src_scale);
+
+ const uint32_t mask = 0xFF00FF;
+
+ uint32_t src_rb = (src & mask) * src_scale;
+ uint32_t src_ag = ((src >> 8) & mask) * src_scale;
+
+ uint32_t dst_rb = (dst & mask) * dst_scale;
+ uint32_t dst_ag = ((dst >> 8) & mask) * dst_scale;
+
+ return (((src_rb + dst_rb) >> 8) & mask) | ((src_ag + dst_ag) & ~mask);
+#endif
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////
+// Convert a 32bit pixel to a 16bit pixel (no dither)
+
+#define SkR32ToR16_MACRO(r) ((unsigned)(r) >> (SK_R32_BITS - SK_R16_BITS))
+#define SkG32ToG16_MACRO(g) ((unsigned)(g) >> (SK_G32_BITS - SK_G16_BITS))
+#define SkB32ToB16_MACRO(b) ((unsigned)(b) >> (SK_B32_BITS - SK_B16_BITS))
+
+#ifdef SK_DEBUG
+ static inline unsigned SkR32ToR16(unsigned r) {
+ SkR32Assert(r);
+ return SkR32ToR16_MACRO(r);
+ }
+ static inline unsigned SkG32ToG16(unsigned g) {
+ SkG32Assert(g);
+ return SkG32ToG16_MACRO(g);
+ }
+ static inline unsigned SkB32ToB16(unsigned b) {
+ SkB32Assert(b);
+ return SkB32ToB16_MACRO(b);
+ }
+#else
+ #define SkR32ToR16(r) SkR32ToR16_MACRO(r)
+ #define SkG32ToG16(g) SkG32ToG16_MACRO(g)
+ #define SkB32ToB16(b) SkB32ToB16_MACRO(b)
+#endif
+
+#define SkPacked32ToR16(c) (((unsigned)(c) >> (SK_R32_SHIFT + SK_R32_BITS - SK_R16_BITS)) & SK_R16_MASK)
+#define SkPacked32ToG16(c) (((unsigned)(c) >> (SK_G32_SHIFT + SK_G32_BITS - SK_G16_BITS)) & SK_G16_MASK)
+#define SkPacked32ToB16(c) (((unsigned)(c) >> (SK_B32_SHIFT + SK_B32_BITS - SK_B16_BITS)) & SK_B16_MASK)
+
+static inline U16CPU SkPixel32ToPixel16(SkPMColor c) {
+ unsigned r = ((c >> (SK_R32_SHIFT + (8 - SK_R16_BITS))) & SK_R16_MASK) << SK_R16_SHIFT;
+ unsigned g = ((c >> (SK_G32_SHIFT + (8 - SK_G16_BITS))) & SK_G16_MASK) << SK_G16_SHIFT;
+ unsigned b = ((c >> (SK_B32_SHIFT + (8 - SK_B16_BITS))) & SK_B16_MASK) << SK_B16_SHIFT;
+ return r | g | b;
+}
+
+static inline U16CPU SkPack888ToRGB16(U8CPU r, U8CPU g, U8CPU b) {
+ return (SkR32ToR16(r) << SK_R16_SHIFT) |
+ (SkG32ToG16(g) << SK_G16_SHIFT) |
+ (SkB32ToB16(b) << SK_B16_SHIFT);
+}
+
+#define SkPixel32ToPixel16_ToU16(src) SkToU16(SkPixel32ToPixel16(src))
+
+/////////////////////////////////////////////////////////////////////////////////////////
+// Fast dither from 32->16
+
+#define SkShouldDitherXY(x, y) (((x) ^ (y)) & 1)
+
+static inline uint16_t SkDitherPack888ToRGB16(U8CPU r, U8CPU g, U8CPU b) {
+ r = ((r << 1) - ((r >> (8 - SK_R16_BITS) << (8 - SK_R16_BITS)) | (r >> SK_R16_BITS))) >> (8 - SK_R16_BITS);
+ g = ((g << 1) - ((g >> (8 - SK_G16_BITS) << (8 - SK_G16_BITS)) | (g >> SK_G16_BITS))) >> (8 - SK_G16_BITS);
+ b = ((b << 1) - ((b >> (8 - SK_B16_BITS) << (8 - SK_B16_BITS)) | (b >> SK_B16_BITS))) >> (8 - SK_B16_BITS);
+
+ return SkPackRGB16(r, g, b);
+}
+
+static inline uint16_t SkDitherPixel32ToPixel16(SkPMColor c) {
+ return SkDitherPack888ToRGB16(SkGetPackedR32(c), SkGetPackedG32(c), SkGetPackedB32(c));
+}
+
+/* Return c in expanded_rgb_16 format, but also scaled up by 32 (5 bits)
+ It is now suitable for combining with a scaled expanded_rgb_16 color
+ as in SkSrcOver32To16().
+ We must do this 565 high-bit replication, in order for the subsequent add
+ to saturate properly (and not overflow). If we take the 8 bits as is, it is
+ possible to overflow.
+*/
+static inline uint32_t SkPMColorToExpanded16x5(SkPMColor c) {
+ unsigned sr = SkPacked32ToR16(c);
+ unsigned sg = SkPacked32ToG16(c);
+ unsigned sb = SkPacked32ToB16(c);
+
+ sr = (sr << 5) | sr;
+ sg = (sg << 5) | (sg >> 1);
+ sb = (sb << 5) | sb;
+ return (sr << 11) | (sg << 21) | (sb << 0);
+}
+
+/* SrcOver the 32bit src color with the 16bit dst, returning a 16bit value
+ (with dirt in the high 16bits, so caller beware).
+*/
+static inline U16CPU SkSrcOver32To16(SkPMColor src, uint16_t dst) {
+ unsigned sr = SkGetPackedR32(src);
+ unsigned sg = SkGetPackedG32(src);
+ unsigned sb = SkGetPackedB32(src);
+
+ unsigned dr = SkGetPackedR16(dst);
+ unsigned dg = SkGetPackedG16(dst);
+ unsigned db = SkGetPackedB16(dst);
+
+ unsigned isa = 255 - SkGetPackedA32(src);
+
+ dr = (sr + SkMul16ShiftRound(dr, isa, SK_R16_BITS)) >> (8 - SK_R16_BITS);
+ dg = (sg + SkMul16ShiftRound(dg, isa, SK_G16_BITS)) >> (8 - SK_G16_BITS);
+ db = (sb + SkMul16ShiftRound(db, isa, SK_B16_BITS)) >> (8 - SK_B16_BITS);
+
+ return SkPackRGB16(dr, dg, db);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////
+// Convert a 16bit pixel to a 32bit pixel
+
+static inline unsigned SkR16ToR32(unsigned r) {
+ return (r << (8 - SK_R16_BITS)) | (r >> (2 * SK_R16_BITS - 8));
+}
+
+static inline unsigned SkG16ToG32(unsigned g) {
+ return (g << (8 - SK_G16_BITS)) | (g >> (2 * SK_G16_BITS - 8));
+}
+
+static inline unsigned SkB16ToB32(unsigned b) {
+ return (b << (8 - SK_B16_BITS)) | (b >> (2 * SK_B16_BITS - 8));
+}
+
+#define SkPacked16ToR32(c) SkR16ToR32(SkGetPackedR16(c))
+#define SkPacked16ToG32(c) SkG16ToG32(SkGetPackedG16(c))
+#define SkPacked16ToB32(c) SkB16ToB32(SkGetPackedB16(c))
+
+static inline SkPMColor SkPixel16ToPixel32(U16CPU src) {
+ SkASSERT(src == SkToU16(src));
+
+ unsigned r = SkPacked16ToR32(src);
+ unsigned g = SkPacked16ToG32(src);
+ unsigned b = SkPacked16ToB32(src);
+
+ SkASSERT((r >> (8 - SK_R16_BITS)) == SkGetPackedR16(src));
+ SkASSERT((g >> (8 - SK_G16_BITS)) == SkGetPackedG16(src));
+ SkASSERT((b >> (8 - SK_B16_BITS)) == SkGetPackedB16(src));
+
+ return SkPackARGB32(0xFF, r, g, b);
+}
+
+// similar to SkPixel16ToPixel32, but returns SkColor instead of SkPMColor
+static inline SkColor SkPixel16ToColor(U16CPU src) {
+ SkASSERT(src == SkToU16(src));
+
+ unsigned r = SkPacked16ToR32(src);
+ unsigned g = SkPacked16ToG32(src);
+ unsigned b = SkPacked16ToB32(src);
+
+ SkASSERT((r >> (8 - SK_R16_BITS)) == SkGetPackedR16(src));
+ SkASSERT((g >> (8 - SK_G16_BITS)) == SkGetPackedG16(src));
+ SkASSERT((b >> (8 - SK_B16_BITS)) == SkGetPackedB16(src));
+
+ return SkColorSetRGB(r, g, b);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef uint16_t SkPMColor16;
+
+// Put in OpenGL order (r g b a)
+#define SK_A4444_SHIFT 0
+#define SK_R4444_SHIFT 12
+#define SK_G4444_SHIFT 8
+#define SK_B4444_SHIFT 4
+
+#define SkA32To4444(a) ((unsigned)(a) >> 4)
+#define SkR32To4444(r) ((unsigned)(r) >> 4)
+#define SkG32To4444(g) ((unsigned)(g) >> 4)
+#define SkB32To4444(b) ((unsigned)(b) >> 4)
+
+static inline U8CPU SkReplicateNibble(unsigned nib) {
+ SkASSERT(nib <= 0xF);
+ return (nib << 4) | nib;
+}
+
+#define SkA4444ToA32(a) SkReplicateNibble(a)
+#define SkR4444ToR32(r) SkReplicateNibble(r)
+#define SkG4444ToG32(g) SkReplicateNibble(g)
+#define SkB4444ToB32(b) SkReplicateNibble(b)
+
+#define SkGetPackedA4444(c) (((unsigned)(c) >> SK_A4444_SHIFT) & 0xF)
+#define SkGetPackedR4444(c) (((unsigned)(c) >> SK_R4444_SHIFT) & 0xF)
+#define SkGetPackedG4444(c) (((unsigned)(c) >> SK_G4444_SHIFT) & 0xF)
+#define SkGetPackedB4444(c) (((unsigned)(c) >> SK_B4444_SHIFT) & 0xF)
+
+#define SkPacked4444ToA32(c) SkReplicateNibble(SkGetPackedA4444(c))
+#define SkPacked4444ToR32(c) SkReplicateNibble(SkGetPackedR4444(c))
+#define SkPacked4444ToG32(c) SkReplicateNibble(SkGetPackedG4444(c))
+#define SkPacked4444ToB32(c) SkReplicateNibble(SkGetPackedB4444(c))
+
+#ifdef SK_DEBUG
+static inline void SkPMColor16Assert(U16CPU c) {
+ unsigned a = SkGetPackedA4444(c);
+ unsigned r = SkGetPackedR4444(c);
+ unsigned g = SkGetPackedG4444(c);
+ unsigned b = SkGetPackedB4444(c);
+
+ SkASSERT(a <= 0xF);
+ SkASSERT(r <= a);
+ SkASSERT(g <= a);
+ SkASSERT(b <= a);
+}
+#else
+#define SkPMColor16Assert(c)
+#endif
+
+static inline unsigned SkAlpha15To16(unsigned a) {
+ SkASSERT(a <= 0xF);
+ return a + (a >> 3);
+}
+
+#ifdef SK_DEBUG
+ static inline int SkAlphaMul4(int value, int scale) {
+ SkASSERT((unsigned)scale <= 0x10);
+ return value * scale >> 4;
+ }
+#else
+ #define SkAlphaMul4(value, scale) ((value) * (scale) >> 4)
+#endif
+
+static inline unsigned SkR4444ToR565(unsigned r) {
+ SkASSERT(r <= 0xF);
+ return (r << (SK_R16_BITS - 4)) | (r >> (8 - SK_R16_BITS));
+}
+
+static inline unsigned SkG4444ToG565(unsigned g) {
+ SkASSERT(g <= 0xF);
+ return (g << (SK_G16_BITS - 4)) | (g >> (8 - SK_G16_BITS));
+}
+
+static inline unsigned SkB4444ToB565(unsigned b) {
+ SkASSERT(b <= 0xF);
+ return (b << (SK_B16_BITS - 4)) | (b >> (8 - SK_B16_BITS));
+}
+
+static inline SkPMColor16 SkPackARGB4444(unsigned a, unsigned r,
+ unsigned g, unsigned b) {
+ SkASSERT(a <= 0xF);
+ SkASSERT(r <= a);
+ SkASSERT(g <= a);
+ SkASSERT(b <= a);
+
+ return (SkPMColor16)((a << SK_A4444_SHIFT) | (r << SK_R4444_SHIFT) |
+ (g << SK_G4444_SHIFT) | (b << SK_B4444_SHIFT));
+}
+
+static inline SkPMColor16 SkAlphaMulQ4(SkPMColor16 c, int scale) {
+ SkASSERT(scale <= 16);
+
+ const unsigned mask = 0xF0F; //gMask_0F0F;
+
+#if 0
+ unsigned rb = ((c & mask) * scale) >> 4;
+ unsigned ag = ((c >> 4) & mask) * scale;
+ return (rb & mask) | (ag & ~mask);
+#else
+ unsigned expanded_c = (c & mask) | ((c & (mask << 4)) << 12);
+ unsigned scaled_c = (expanded_c * scale) >> 4;
+ return (scaled_c & mask) | ((scaled_c >> 12) & (mask << 4));
+#endif
+}
+
+/** Expand the SkPMColor16 color into a 32bit value that can be scaled all at
+ once by a value up to 16.
+*/
+static inline uint32_t SkExpand_4444(U16CPU c) {
+ SkASSERT(c == (uint16_t)c);
+
+ const unsigned mask = 0xF0F; //gMask_0F0F;
+ return (c & mask) | ((c & ~mask) << 12);
+}
+
+static inline uint16_t SkSrcOver4444To16(SkPMColor16 s, uint16_t d) {
+ unsigned sa = SkGetPackedA4444(s);
+ unsigned sr = SkR4444ToR565(SkGetPackedR4444(s));
+ unsigned sg = SkG4444ToG565(SkGetPackedG4444(s));
+ unsigned sb = SkB4444ToB565(SkGetPackedB4444(s));
+
+ // To avoid overflow, we have to clear the low bit of the synthetic sg
+ // if the src alpha is <= 7.
+ // to see why, try blending 0x4444 on top of 565-white and watch green
+ // overflow (sum == 64)
+ sg &= ~(~(sa >> 3) & 1);
+
+ unsigned scale = SkAlpha15To16(15 - sa);
+ unsigned dr = SkAlphaMul4(SkGetPackedR16(d), scale);
+ unsigned dg = SkAlphaMul4(SkGetPackedG16(d), scale);
+ unsigned db = SkAlphaMul4(SkGetPackedB16(d), scale);
+
+#if 0
+ if (sg + dg > 63) {
+ SkDebugf("---- SkSrcOver4444To16 src=%x dst=%x scale=%d, sg=%d dg=%d\n", s, d, scale, sg, dg);
+ }
+#endif
+ return SkPackRGB16(sr + dr, sg + dg, sb + db);
+}
+
+static inline uint16_t SkBlend4444To16(SkPMColor16 src, uint16_t dst, int scale16) {
+ SkASSERT((unsigned)scale16 <= 16);
+
+ return SkSrcOver4444To16(SkAlphaMulQ4(src, scale16), dst);
+}
+
+static inline SkPMColor SkPixel4444ToPixel32(U16CPU c) {
+ uint32_t d = (SkGetPackedA4444(c) << SK_A32_SHIFT) |
+ (SkGetPackedR4444(c) << SK_R32_SHIFT) |
+ (SkGetPackedG4444(c) << SK_G32_SHIFT) |
+ (SkGetPackedB4444(c) << SK_B32_SHIFT);
+ return d | (d << 4);
+}
+
+static inline SkPMColor16 SkPixel32ToPixel4444(SkPMColor c) {
+ return (((c >> (SK_A32_SHIFT + 4)) & 0xF) << SK_A4444_SHIFT) |
+ (((c >> (SK_R32_SHIFT + 4)) & 0xF) << SK_R4444_SHIFT) |
+ (((c >> (SK_G32_SHIFT + 4)) & 0xF) << SK_G4444_SHIFT) |
+ (((c >> (SK_B32_SHIFT + 4)) & 0xF) << SK_B4444_SHIFT);
+}
+
+// cheap 2x2 dither
+static inline SkPMColor16 SkDitherARGB32To4444(U8CPU a, U8CPU r,
+ U8CPU g, U8CPU b) {
+ // to ensure that we stay a legal premultiplied color, we take the max()
+ // of the truncated and dithered alpha values. If we didn't, cases like
+ // SkDitherARGB32To4444(0x31, 0x2E, ...) would generate SkPackARGB4444(2, 3, ...)
+ // which is not legal premultiplied, since a < color
+ unsigned dithered_a = ((a << 1) - ((a >> 4 << 4) | (a >> 4))) >> 4;
+ a = SkMax32(a >> 4, dithered_a);
+ // these we just dither in place
+ r = ((r << 1) - ((r >> 4 << 4) | (r >> 4))) >> 4;
+ g = ((g << 1) - ((g >> 4 << 4) | (g >> 4))) >> 4;
+ b = ((b << 1) - ((b >> 4 << 4) | (b >> 4))) >> 4;
+
+ return SkPackARGB4444(a, r, g, b);
+}
+
+static inline SkPMColor16 SkDitherPixel32To4444(SkPMColor c) {
+ return SkDitherARGB32To4444(SkGetPackedA32(c), SkGetPackedR32(c),
+ SkGetPackedG32(c), SkGetPackedB32(c));
+}
+
+/* Assumes 16bit is in standard RGBA order.
+ Transforms a normal ARGB_8888 into the same byte order as
+ expanded ARGB_4444, but keeps each component 8bits
+*/
+static inline uint32_t SkExpand_8888(SkPMColor c) {
+ return (((c >> SK_R32_SHIFT) & 0xFF) << 24) |
+ (((c >> SK_G32_SHIFT) & 0xFF) << 8) |
+ (((c >> SK_B32_SHIFT) & 0xFF) << 16) |
+ (((c >> SK_A32_SHIFT) & 0xFF) << 0);
+}
+
+/* Undo the operation of SkExpand_8888, turning the argument back into
+ a SkPMColor.
+*/
+static inline SkPMColor SkCompact_8888(uint32_t c) {
+ return (((c >> 24) & 0xFF) << SK_R32_SHIFT) |
+ (((c >> 8) & 0xFF) << SK_G32_SHIFT) |
+ (((c >> 16) & 0xFF) << SK_B32_SHIFT) |
+ (((c >> 0) & 0xFF) << SK_A32_SHIFT);
+}
+
+/* Like SkExpand_8888, this transforms a pmcolor into the expanded 4444 format,
+ but this routine just keeps the high 4bits of each component in the low
+ 4bits of the result (just like a newly expanded PMColor16).
+*/
+static inline uint32_t SkExpand32_4444(SkPMColor c) {
+ return (((c >> (SK_R32_SHIFT + 4)) & 0xF) << 24) |
+ (((c >> (SK_G32_SHIFT + 4)) & 0xF) << 8) |
+ (((c >> (SK_B32_SHIFT + 4)) & 0xF) << 16) |
+ (((c >> (SK_A32_SHIFT + 4)) & 0xF) << 0);
+}
+
+// takes two values and alternamtes them as part of a memset16
+// used for cheap 2x2 dithering when the colors are opaque
+void sk_dither_memset16(uint16_t dst[], uint16_t value, uint16_t other, int n);
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline int SkUpscale31To32(int value) {
+ SkASSERT((unsigned)value <= 31);
+ return value + (value >> 4);
+}
+
+static inline int SkBlend32(int src, int dst, int scale) {
+ SkASSERT((unsigned)src <= 0xFF);
+ SkASSERT((unsigned)dst <= 0xFF);
+ SkASSERT((unsigned)scale <= 32);
+ return dst + ((src - dst) * scale >> 5);
+}
+
+static inline SkPMColor SkBlendLCD16(int srcA, int srcR, int srcG, int srcB,
+ SkPMColor dst, uint16_t mask) {
+ if (mask == 0) {
+ return dst;
+ }
+
+ /* We want all of these in 5bits, hence the shifts in case one of them
+ * (green) is 6bits.
+ */
+ int maskR = SkGetPackedR16(mask) >> (SK_R16_BITS - 5);
+ int maskG = SkGetPackedG16(mask) >> (SK_G16_BITS - 5);
+ int maskB = SkGetPackedB16(mask) >> (SK_B16_BITS - 5);
+
+ // Now upscale them to 0..32, so we can use blend32
+ maskR = SkUpscale31To32(maskR);
+ maskG = SkUpscale31To32(maskG);
+ maskB = SkUpscale31To32(maskB);
+
+ // srcA has been upscaled to 256 before passed into this function
+ maskR = maskR * srcA >> 8;
+ maskG = maskG * srcA >> 8;
+ maskB = maskB * srcA >> 8;
+
+ int dstR = SkGetPackedR32(dst);
+ int dstG = SkGetPackedG32(dst);
+ int dstB = SkGetPackedB32(dst);
+
+ // LCD blitting is only supported if the dst is known/required
+ // to be opaque
+ return SkPackARGB32(0xFF,
+ SkBlend32(srcR, dstR, maskR),
+ SkBlend32(srcG, dstG, maskG),
+ SkBlend32(srcB, dstB, maskB));
+}
+
+static inline SkPMColor SkBlendLCD16Opaque(int srcR, int srcG, int srcB,
+ SkPMColor dst, uint16_t mask,
+ SkPMColor opaqueDst) {
+ if (mask == 0) {
+ return dst;
+ }
+
+ if (0xFFFF == mask) {
+ return opaqueDst;
+ }
+
+ /* We want all of these in 5bits, hence the shifts in case one of them
+ * (green) is 6bits.
+ */
+ int maskR = SkGetPackedR16(mask) >> (SK_R16_BITS - 5);
+ int maskG = SkGetPackedG16(mask) >> (SK_G16_BITS - 5);
+ int maskB = SkGetPackedB16(mask) >> (SK_B16_BITS - 5);
+
+ // Now upscale them to 0..32, so we can use blend32
+ maskR = SkUpscale31To32(maskR);
+ maskG = SkUpscale31To32(maskG);
+ maskB = SkUpscale31To32(maskB);
+
+ int dstR = SkGetPackedR32(dst);
+ int dstG = SkGetPackedG32(dst);
+ int dstB = SkGetPackedB32(dst);
+
+ // LCD blitting is only supported if the dst is known/required
+ // to be opaque
+ return SkPackARGB32(0xFF,
+ SkBlend32(srcR, dstR, maskR),
+ SkBlend32(srcG, dstG, maskG),
+ SkBlend32(srcB, dstB, maskB));
+}
+
+static inline void SkBlitLCD16Row(SkPMColor dst[], const uint16_t mask[],
+ SkColor src, int width, SkPMColor) {
+ int srcA = SkColorGetA(src);
+ int srcR = SkColorGetR(src);
+ int srcG = SkColorGetG(src);
+ int srcB = SkColorGetB(src);
+
+ srcA = SkAlpha255To256(srcA);
+
+ for (int i = 0; i < width; i++) {
+ dst[i] = SkBlendLCD16(srcA, srcR, srcG, srcB, dst[i], mask[i]);
+ }
+}
+
+static inline void SkBlitLCD16OpaqueRow(SkPMColor dst[], const uint16_t mask[],
+ SkColor src, int width,
+ SkPMColor opaqueDst) {
+ int srcR = SkColorGetR(src);
+ int srcG = SkColorGetG(src);
+ int srcB = SkColorGetB(src);
+
+ for (int i = 0; i < width; i++) {
+ dst[i] = SkBlendLCD16Opaque(srcR, srcG, srcB, dst[i], mask[i],
+ opaqueDst);
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkColorSpace.h b/gfx/skia/skia/include/core/SkColorSpace.h
new file mode 100644
index 000000000..a96f62209
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkColorSpace.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorSpace_DEFINED
+#define SkColorSpace_DEFINED
+
+#include "SkMatrix44.h"
+#include "SkRefCnt.h"
+
+class SkData;
+
+class SK_API SkColorSpace : public SkRefCnt {
+public:
+
+ /**
+ * Common, named profiles that we can recognize.
+ */
+ enum Named : uint8_t {
+ /**
+ * By far the most common color space.
+ * This is the default space for images, unmarked content, and monitors.
+ */
+ kSRGB_Named,
+
+ /**
+ * Very common wide gamut color space.
+ * Often used by images and monitors.
+ */
+ kAdobeRGB_Named,
+
+ /**
+ * Colorspace with the sRGB primaries, but a linear (1.0) gamma. Commonly used for
+ * half-float surfaces, and high precision individual colors (gradient stops, etc...)
+ */
+ kSRGBLinear_Named,
+ };
+
+ enum RenderTargetGamma : uint8_t {
+ kLinear_RenderTargetGamma,
+
+ /**
+ * Transfer function is the canonical sRGB curve, which has a short linear segment
+ * followed by a 2.4f exponential.
+ */
+ kSRGB_RenderTargetGamma,
+ };
+
+ /**
+ * Create an SkColorSpace from a transfer function and a color gamut transform to D50 XYZ.
+ */
+ static sk_sp<SkColorSpace> NewRGB(RenderTargetGamma gamma, const SkMatrix44& toXYZD50);
+
+ /**
+ * Create a common, named SkColorSpace.
+ */
+ static sk_sp<SkColorSpace> NewNamed(Named);
+
+ /**
+ * Create an SkColorSpace from an ICC profile.
+ */
+ static sk_sp<SkColorSpace> NewICC(const void*, size_t);
+
+ /**
+ * Create an SkColorSpace with the same gamut as this color space, but with linear gamma.
+ */
+ sk_sp<SkColorSpace> makeLinearGamma();
+
+ /**
+ * Returns true if the color space gamma is near enough to be approximated as sRGB.
+ */
+ bool gammaCloseToSRGB() const;
+
+ /**
+ * Returns true if the color space gamma is linear.
+ */
+ bool gammaIsLinear() const;
+
+ /**
+ * Returns nullptr on failure. Fails when we fallback to serializing ICC data and
+ * the data is too large to serialize.
+ */
+ sk_sp<SkData> serialize() const;
+
+ /**
+ * If |memory| is nullptr, returns the size required to serialize.
+ * Otherwise, serializes into |memory| and returns the size.
+ */
+ size_t writeToMemory(void* memory) const;
+
+ static sk_sp<SkColorSpace> Deserialize(const void* data, size_t length);
+
+ /**
+ * If both are null, we return true. If one is null and the other is not, we return false.
+ * If both are non-null, we do a deeper compare.
+ */
+ static bool Equals(const SkColorSpace* src, const SkColorSpace* dst);
+
+protected:
+ SkColorSpace() {}
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkColorTable.h b/gfx/skia/skia/include/core/SkColorTable.h
new file mode 100644
index 000000000..07dfd675b
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkColorTable.h
@@ -0,0 +1,86 @@
+
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkColorTable_DEFINED
+#define SkColorTable_DEFINED
+
+#include "../private/SkOnce.h"
+#include "SkColor.h"
+#include "SkFlattenable.h"
+#include "SkImageInfo.h"
+
+/** \class SkColorTable
+
+ SkColorTable holds an array SkPMColors (premultiplied 32-bit colors) used by
+ 8-bit bitmaps, where the bitmap bytes are interpreted as indices into the colortable.
+
+ SkColorTable is thread-safe.
+*/
+class SK_API SkColorTable : public SkRefCnt {
+public:
+ /** Copy up to 256 colors into a new SkColorTable.
+ */
+ SkColorTable(const SkPMColor colors[], int count);
+ virtual ~SkColorTable();
+
+ /** Returns the number of colors in the table.
+ */
+ int count() const { return fCount; }
+
+ /** Returns the specified color from the table. In the debug build, this asserts that
+ * the index is in range (0 <= index < count).
+ */
+ SkPMColor operator[](int index) const {
+ SkASSERT(fColors != NULL && (unsigned)index < (unsigned)fCount);
+ return fColors[index];
+ }
+
+ /** Return the array of colors for reading.
+ */
+ const SkPMColor* readColors() const { return fColors; }
+
+ /** read16BitCache() returns the array of RGB16 colors that mirror the 32bit colors.
+ */
+ const uint16_t* read16BitCache() const;
+
+ void writeToBuffer(SkWriteBuffer&) const;
+
+ // may return null
+ static SkColorTable* Create(SkReadBuffer&);
+
+private:
+ enum AllocatedWithMalloc {
+ kAllocatedWithMalloc
+ };
+ // assumes ownership of colors (assumes it was allocated w/ malloc)
+ SkColorTable(SkPMColor* colors, int count, AllocatedWithMalloc);
+
+ SkPMColor* fColors;
+ mutable uint16_t* f16BitCache = nullptr;
+ mutable SkOnce f16BitCacheOnce;
+ int fCount;
+
+ void init(const SkPMColor* colors, int count);
+
+ friend class SkImageGenerator;
+ friend class SkBitmapRegionCodec;
+ // Only call if no other thread or cache has seen this table.
+ void dangerous_overwriteColors(const SkPMColor newColors[], int count) {
+ if (count < 0 || count > fCount) {
+ sk_throw();
+ }
+ // assumes that f16BitCache nas NOT been initialized yet, so we don't try to update it
+ memcpy(fColors, newColors, count * sizeof(SkPMColor));
+ fCount = count; // update fCount, in case count is smaller
+ }
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkData.h b/gfx/skia/skia/include/core/SkData.h
new file mode 100644
index 000000000..76c9c9ebe
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkData.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkData_DEFINED
+#define SkData_DEFINED
+
+#include <stdio.h>
+
+#include "SkRefCnt.h"
+
+class SkStream;
+
+/**
+ * SkData holds an immutable data buffer. Not only is the data immutable,
+ * but the actual ptr that is returned (by data() or bytes()) is guaranteed
+ * to always be the same for the life of this instance.
+ */
+class SK_API SkData final : public SkNVRefCnt<SkData> {
+public:
+ /**
+ * Returns the number of bytes stored.
+ */
+ size_t size() const { return fSize; }
+
+ bool isEmpty() const { return 0 == fSize; }
+
+ /**
+ * Returns the ptr to the data.
+ */
+ const void* data() const { return fPtr; }
+
+ /**
+ * Like data(), returns a read-only ptr into the data, but in this case
+ * it is cast to uint8_t*, to make it easy to add an offset to it.
+ */
+ const uint8_t* bytes() const {
+ return reinterpret_cast<const uint8_t*>(fPtr);
+ }
+
+ /**
+ * USE WITH CAUTION.
+ * This call will assert that the refcnt is 1, as a precaution against modifying the
+ * contents when another client/thread has access to the data.
+ */
+ void* writable_data() {
+ if (fSize) {
+ // only assert we're unique if we're not empty
+ SkASSERT(this->unique());
+ }
+ return fPtr;
+ }
+
+ /**
+ * Helper to copy a range of the data into a caller-provided buffer.
+ * Returns the actual number of bytes copied, after clamping offset and
+ * length to the size of the data. If buffer is NULL, it is ignored, and
+ * only the computed number of bytes is returned.
+ */
+ size_t copyRange(size_t offset, size_t length, void* buffer) const;
+
+ /**
+ * Returns true if these two objects have the same length and contents,
+ * effectively returning 0 == memcmp(...)
+ */
+ bool equals(const SkData* other) const;
+
+ /**
+ * Function that, if provided, will be called when the SkData goes out
+ * of scope, allowing for custom allocation/freeing of the data's contents.
+ */
+ typedef void (*ReleaseProc)(const void* ptr, void* context);
+
+ /**
+ * Create a new dataref by copying the specified data
+ */
+ static sk_sp<SkData> MakeWithCopy(const void* data, size_t length);
+
+
+ /**
+ * Create a new data with uninitialized contents. The caller should call writable_data()
+ * to write into the buffer, but this must be done before another ref() is made.
+ */
+ static sk_sp<SkData> MakeUninitialized(size_t length);
+
+ /**
+ * Create a new dataref by copying the specified c-string
+ * (a null-terminated array of bytes). The returned SkData will have size()
+ * equal to strlen(cstr) + 1. If cstr is NULL, it will be treated the same
+ * as "".
+ */
+ static sk_sp<SkData> MakeWithCString(const char cstr[]);
+
+ /**
+ * Create a new dataref, taking the ptr as is, and using the
+ * releaseproc to free it. The proc may be NULL.
+ */
+ static sk_sp<SkData> MakeWithProc(const void* ptr, size_t length, ReleaseProc proc, void* ctx);
+
+ /**
+ * Call this when the data parameter is already const and will outlive the lifetime of the
+ * SkData. Suitable for with const globals.
+ */
+ static sk_sp<SkData> MakeWithoutCopy(const void* data, size_t length) {
+ return MakeWithProc(data, length, DummyReleaseProc, nullptr);
+ }
+
+ /**
+ * Create a new dataref from a pointer allocated by malloc. The Data object
+ * takes ownership of that allocation, and will handling calling sk_free.
+ */
+ static sk_sp<SkData> MakeFromMalloc(const void* data, size_t length);
+
+ /**
+ * Create a new dataref the file with the specified path.
+ * If the file cannot be opened, this returns NULL.
+ */
+ static sk_sp<SkData> MakeFromFileName(const char path[]);
+
+ /**
+ * Create a new dataref from a stdio FILE.
+ * This does not take ownership of the FILE, nor close it.
+ * The caller is free to close the FILE at its convenience.
+ * The FILE must be open for reading only.
+ * Returns NULL on failure.
+ */
+ static sk_sp<SkData> MakeFromFILE(FILE* f);
+
+ /**
+ * Create a new dataref from a file descriptor.
+ * This does not take ownership of the file descriptor, nor close it.
+ * The caller is free to close the file descriptor at its convenience.
+ * The file descriptor must be open for reading only.
+ * Returns NULL on failure.
+ */
+ static sk_sp<SkData> MakeFromFD(int fd);
+
+ /**
+ * Attempt to read size bytes into a SkData. If the read succeeds, return the data,
+ * else return NULL. Either way the stream's cursor may have been changed as a result
+ * of calling read().
+ */
+ static sk_sp<SkData> MakeFromStream(SkStream*, size_t size);
+
+ /**
+ * Create a new dataref using a subset of the data in the specified
+ * src dataref.
+ */
+ static sk_sp<SkData> MakeSubset(const SkData* src, size_t offset, size_t length);
+
+ /**
+ * Returns a new empty dataref (or a reference to a shared empty dataref).
+ * New or shared, the caller must see that unref() is eventually called.
+ */
+ static sk_sp<SkData> MakeEmpty();
+
+private:
+ friend class SkNVRefCnt<SkData>;
+ ReleaseProc fReleaseProc;
+ void* fReleaseProcContext;
+ void* fPtr;
+ size_t fSize;
+
+ SkData(const void* ptr, size_t size, ReleaseProc, void* context);
+ explicit SkData(size_t size); // inplace new/delete
+ ~SkData();
+
+
+ // Objects of this type are sometimes created in a custom fashion using sk_malloc_throw and
+ // therefore must be sk_freed. We overload new to also call sk_malloc_throw so that memory
+ // can be unconditionally released using sk_free in an overloaded delete. Overloading regular
+ // new means we must also overload placement new.
+ void* operator new(size_t size) { return sk_malloc_throw(size); }
+ void* operator new(size_t, void* p) { return p; }
+ void operator delete(void* p) { sk_free(p); }
+
+ // Called the first time someone calls NewEmpty to initialize the singleton.
+ friend SkData* sk_new_empty_data();
+
+ // shared internal factory
+ static sk_sp<SkData> PrivateNewWithCopy(const void* srcOrNull, size_t length);
+
+ static void DummyReleaseProc(const void*, void*); // {}
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkDataTable.h b/gfx/skia/skia/include/core/SkDataTable.h
new file mode 100644
index 000000000..2ec2d0f2e
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkDataTable.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDataTable_DEFINED
+#define SkDataTable_DEFINED
+
+#include "../private/SkTDArray.h"
+#include "SkChunkAlloc.h"
+#include "SkData.h"
+#include "SkString.h"
+
+/**
+ * Like SkData, SkDataTable holds an immutable data buffer. The data buffer is
+ * organized into a table of entries, each with a length, so the entries are
+ * not required to all be the same size.
+ */
+class SK_API SkDataTable : public SkRefCnt {
+public:
+ /**
+ * Returns true if the table is empty (i.e. has no entries).
+ */
+ bool isEmpty() const { return 0 == fCount; }
+
+ /**
+ * Return the number of entries in the table. 0 for an empty table
+ */
+ int count() const { return fCount; }
+
+ /**
+ * Return the size of the index'th entry in the table. The caller must
+ * ensure that index is valid for this table.
+ */
+ size_t atSize(int index) const;
+
+ /**
+ * Return a pointer to the data of the index'th entry in the table.
+ * The caller must ensure that index is valid for this table.
+ *
+ * @param size If non-null, this returns the byte size of this entry. This
+ * will be the same value that atSize(index) would return.
+ */
+ const void* at(int index, size_t* size = NULL) const;
+
+ template <typename T>
+ const T* atT(int index, size_t* size = NULL) const {
+ return reinterpret_cast<const T*>(this->at(index, size));
+ }
+
+ /**
+ * Returns the index'th entry as a c-string, and assumes that the trailing
+ * null byte had been copied into the table as well.
+ */
+ const char* atStr(int index) const {
+ size_t size;
+ const char* str = this->atT<const char>(index, &size);
+ SkASSERT(strlen(str) + 1 == size);
+ return str;
+ }
+
+ typedef void (*FreeProc)(void* context);
+
+ static sk_sp<SkDataTable> MakeEmpty();
+
+ /**
+ * Return a new DataTable that contains a copy of the data stored in each
+ * "array".
+ *
+ * @param ptrs array of points to each element to be copied into the table.
+ * @param sizes array of byte-lengths for each entry in the corresponding
+ * ptrs[] array.
+ * @param count the number of array elements in ptrs[] and sizes[] to copy.
+ */
+ static sk_sp<SkDataTable> MakeCopyArrays(const void * const * ptrs,
+ const size_t sizes[], int count);
+
+ /**
+ * Return a new table that contains a copy of the data in array.
+ *
+ * @param array contiguous array of data for all elements to be copied.
+ * @param elemSize byte-length for a given element.
+ * @param count the number of entries to be copied out of array. The number
+ * of bytes that will be copied is count * elemSize.
+ */
+ static sk_sp<SkDataTable> MakeCopyArray(const void* array, size_t elemSize, int count);
+
+ static sk_sp<SkDataTable> MakeArrayProc(const void* array, size_t elemSize, int count,
+ FreeProc proc, void* context);
+
+private:
+ struct Dir {
+ const void* fPtr;
+ uintptr_t fSize;
+ };
+
+ int fCount;
+ size_t fElemSize;
+ union {
+ const Dir* fDir;
+ const char* fElems;
+ } fU;
+
+ FreeProc fFreeProc;
+ void* fFreeProcContext;
+
+ SkDataTable();
+ SkDataTable(const void* array, size_t elemSize, int count,
+ FreeProc, void* context);
+ SkDataTable(const Dir*, int count, FreeProc, void* context);
+ virtual ~SkDataTable();
+
+ friend class SkDataTableBuilder; // access to Dir
+
+ typedef SkRefCnt INHERITED;
+};
+
+/**
+ * Helper class that allows for incrementally building up the data needed to
+ * create a SkDataTable.
+ */
+class SK_API SkDataTableBuilder : SkNoncopyable {
+public:
+ SkDataTableBuilder(size_t minChunkSize);
+ ~SkDataTableBuilder();
+
+ int count() const { return fDir.count(); }
+ size_t minChunkSize() const { return fMinChunkSize; }
+
+ /**
+ * Forget any previously appended entries, setting count() back to 0.
+ */
+ void reset(size_t minChunkSize);
+ void reset() {
+ this->reset(fMinChunkSize);
+ }
+
+ /**
+ * Copy size-bytes from data, and append it to the growing SkDataTable.
+ */
+ void append(const void* data, size_t size);
+
+ /**
+ * Helper version of append() passes strlen() + 1 for the size,
+ * so the trailing-zero will be copied as well.
+ */
+ void appendStr(const char str[]) {
+ this->append(str, strlen(str) + 1);
+ }
+
+ /**
+ * Helper version of append() passes string.size() + 1 for the size,
+ * so the trailing-zero will be copied as well.
+ */
+ void appendString(const SkString& string) {
+ this->append(string.c_str(), string.size() + 1);
+ }
+
+ /**
+ * Return an SkDataTable from the accumulated entries that were added by
+ * calls to append(). This call also clears any accumluated entries from
+ * this builder, so its count() will be 0 after this call.
+ */
+ sk_sp<SkDataTable> detachDataTable();
+
+private:
+ SkTDArray<SkDataTable::Dir> fDir;
+ SkChunkAlloc* fHeap;
+ size_t fMinChunkSize;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkDeque.h b/gfx/skia/skia/include/core/SkDeque.h
new file mode 100644
index 000000000..0b3e37f3d
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkDeque.h
@@ -0,0 +1,138 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDeque_DEFINED
+#define SkDeque_DEFINED
+
+#include "SkTypes.h"
+
+/*
+ * The deque class works by blindly creating memory space of a specified element
+ * size. It manages the memory as a doubly linked list of blocks each of which
+ * can contain multiple elements. Pushes and pops add/remove blocks from the
+ * beginning/end of the list as necessary while each block tracks the used
+ * portion of its memory.
+ * One behavior to be aware of is that the pops do not immediately remove an
+ * empty block from the beginning/end of the list (Presumably so push/pop pairs
+ * on the block boundaries don't cause thrashing). This can result in the first/
+ * last element not residing in the first/last block.
+ */
+class SK_API SkDeque : SkNoncopyable {
+public:
+ /**
+ * elemSize specifies the size of each individual element in the deque
+ * allocCount specifies how many elements are to be allocated as a block
+ */
+ explicit SkDeque(size_t elemSize, int allocCount = 1);
+ SkDeque(size_t elemSize, void* storage, size_t storageSize, int allocCount = 1);
+ ~SkDeque();
+
+ bool empty() const { return 0 == fCount; }
+ int count() const { return fCount; }
+ size_t elemSize() const { return fElemSize; }
+
+ const void* front() const { return fFront; }
+ const void* back() const { return fBack; }
+
+ void* front() {
+ return (void*)((const SkDeque*)this)->front();
+ }
+
+ void* back() {
+ return (void*)((const SkDeque*)this)->back();
+ }
+
+ /**
+ * push_front and push_back return a pointer to the memory space
+ * for the new element
+ */
+ void* push_front();
+ void* push_back();
+
+ void pop_front();
+ void pop_back();
+
+private:
+ struct Block;
+
+public:
+ class Iter {
+ public:
+ enum IterStart {
+ kFront_IterStart,
+ kBack_IterStart
+ };
+
+ /**
+ * Creates an uninitialized iterator. Must be reset()
+ */
+ Iter();
+
+ Iter(const SkDeque& d, IterStart startLoc);
+ void* next();
+ void* prev();
+
+ void reset(const SkDeque& d, IterStart startLoc);
+
+ private:
+ SkDeque::Block* fCurBlock;
+ char* fPos;
+ size_t fElemSize;
+ };
+
+ // Inherit privately from Iter to prevent access to reverse iteration
+ class F2BIter : private Iter {
+ public:
+ F2BIter() {}
+
+ /**
+ * Wrap Iter's 2 parameter ctor to force initialization to the
+ * beginning of the deque
+ */
+ F2BIter(const SkDeque& d) : INHERITED(d, kFront_IterStart) {}
+
+ using Iter::next;
+
+ /**
+ * Wrap Iter::reset to force initialization to the beginning of the
+ * deque
+ */
+ void reset(const SkDeque& d) {
+ this->INHERITED::reset(d, kFront_IterStart);
+ }
+
+ private:
+ typedef Iter INHERITED;
+ };
+
+private:
+ // allow unit test to call numBlocksAllocated
+ friend class DequeUnitTestHelper;
+
+ void* fFront;
+ void* fBack;
+
+ Block* fFrontBlock;
+ Block* fBackBlock;
+ size_t fElemSize;
+ void* fInitialStorage;
+ int fCount; // number of elements in the deque
+ int fAllocCount; // number of elements to allocate per block
+
+ Block* allocateBlock(int allocCount);
+ void freeBlock(Block* block);
+
+ /**
+ * This returns the number of chunk blocks allocated by the deque. It
+ * can be used to gauge the effectiveness of the selected allocCount.
+ */
+ int numBlocksAllocated() const;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkDevice.h b/gfx/skia/skia/include/core/SkDevice.h
new file mode 100644
index 000000000..c29a65d77
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkDevice.h
@@ -0,0 +1,374 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDevice_DEFINED
+#define SkDevice_DEFINED
+
+#include "SkRefCnt.h"
+#include "SkCanvas.h"
+#include "SkColor.h"
+#include "SkSurfaceProps.h"
+
+class SkBitmap;
+class SkClipStack;
+class SkDraw;
+class SkDrawFilter;
+class SkImageFilterCache;
+struct SkIRect;
+class SkMatrix;
+class SkMetaData;
+class SkRegion;
+class SkSpecialImage;
+class GrRenderTarget;
+
+class SK_API SkBaseDevice : public SkRefCnt {
+public:
+ /**
+ * Construct a new device.
+ */
+ explicit SkBaseDevice(const SkImageInfo&, const SkSurfaceProps&);
+ virtual ~SkBaseDevice();
+
+ SkMetaData& getMetaData();
+
+ /**
+ * Return ImageInfo for this device. If the canvas is not backed by pixels
+ * (cpu or gpu), then the info's ColorType will be kUnknown_SkColorType.
+ */
+ const SkImageInfo& imageInfo() const { return fInfo; }
+
+ /**
+ * Return SurfaceProps for this device.
+ */
+ const SkSurfaceProps& surfaceProps() const {
+ return fSurfaceProps;
+ }
+
+ /**
+ * Return the bounds of the device in the coordinate space of the root
+ * canvas. The root device will have its top-left at 0,0, but other devices
+ * such as those associated with saveLayer may have a non-zero origin.
+ */
+ void getGlobalBounds(SkIRect* bounds) const {
+ SkASSERT(bounds);
+ const SkIPoint& origin = this->getOrigin();
+ bounds->setXYWH(origin.x(), origin.y(), this->width(), this->height());
+ }
+
+ SkIRect getGlobalBounds() const {
+ SkIRect bounds;
+ this->getGlobalBounds(&bounds);
+ return bounds;
+ }
+
+ int width() const {
+ return this->imageInfo().width();
+ }
+
+ int height() const {
+ return this->imageInfo().height();
+ }
+
+ bool isOpaque() const {
+ return this->imageInfo().isOpaque();
+ }
+
+#ifdef SK_SUPPORT_LEGACY_ACCESSBITMAP
+ /** Return the bitmap associated with this device. Call this each time you need
+ to access the bitmap, as it notifies the subclass to perform any flushing
+ etc. before you examine the pixels.
+ @param changePixels set to true if the caller plans to change the pixels
+ @return the device's bitmap
+ */
+ const SkBitmap& accessBitmap(bool changePixels);
+#endif
+
+ bool writePixels(const SkImageInfo&, const void*, size_t rowBytes, int x, int y);
+
+ /**
+ * Try to get write-access to the pixels behind the device. If successful, this returns true
+ * and fills-out the pixmap parameter. On success it also bumps the genID of the underlying
+ * bitmap.
+ *
+ * On failure, returns false and ignores the pixmap parameter.
+ */
+ bool accessPixels(SkPixmap* pmap);
+
+ /**
+ * Try to get read-only-access to the pixels behind the device. If successful, this returns
+ * true and fills-out the pixmap parameter.
+ *
+ * On failure, returns false and ignores the pixmap parameter.
+ */
+ bool peekPixels(SkPixmap*);
+
+ /**
+ * Return the device's origin: its offset in device coordinates from
+ * the default origin in its canvas' matrix/clip
+ */
+ const SkIPoint& getOrigin() const { return fOrigin; }
+
+protected:
+ enum TileUsage {
+ kPossible_TileUsage, //!< the created device may be drawn tiled
+ kNever_TileUsage, //!< the created device will never be drawn tiled
+ };
+
+ struct TextFlags {
+ uint32_t fFlags; // SkPaint::getFlags()
+ };
+
+ /**
+ * Returns the text-related flags, possibly modified based on the state of the
+ * device (e.g. support for LCD).
+ */
+ uint32_t filterTextFlags(const SkPaint&) const;
+
+ virtual bool onShouldDisableLCD(const SkPaint&) const { return false; }
+
+ /** These are called inside the per-device-layer loop for each draw call.
+ When these are called, we have already applied any saveLayer operations,
+ and are handling any looping from the paint, and any effects from the
+ DrawFilter.
+ */
+ virtual void drawPaint(const SkDraw&, const SkPaint& paint) = 0;
+ virtual void drawPoints(const SkDraw&, SkCanvas::PointMode mode, size_t count,
+ const SkPoint[], const SkPaint& paint) = 0;
+ virtual void drawRect(const SkDraw&, const SkRect& r,
+ const SkPaint& paint) = 0;
+ virtual void drawRegion(const SkDraw&, const SkRegion& r,
+ const SkPaint& paint);
+ virtual void drawOval(const SkDraw&, const SkRect& oval,
+ const SkPaint& paint) = 0;
+ /** By the time this is called we know that abs(sweepAngle) is in the range [0, 360). */
+ virtual void drawArc(const SkDraw&, const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter, const SkPaint& paint);
+ virtual void drawRRect(const SkDraw&, const SkRRect& rr,
+ const SkPaint& paint) = 0;
+
+ // Default impl calls drawPath()
+ virtual void drawDRRect(const SkDraw&, const SkRRect& outer,
+ const SkRRect& inner, const SkPaint&);
+
+ /**
+ * If pathIsMutable, then the implementation is allowed to cast path to a
+ * non-const pointer and modify it in place (as an optimization). Canvas
+ * may do this to implement helpers such as drawOval, by placing a temp
+ * path on the stack to hold the representation of the oval.
+ *
+ * If prePathMatrix is not null, it should logically be applied before any
+ * stroking or other effects. If there are no effects on the paint that
+ * affect the geometry/rasterization, then the pre matrix can just be
+ * pre-concated with the current matrix.
+ */
+ virtual void drawPath(const SkDraw&, const SkPath& path,
+ const SkPaint& paint,
+ const SkMatrix* prePathMatrix = NULL,
+ bool pathIsMutable = false) = 0;
+ virtual void drawBitmap(const SkDraw&, const SkBitmap& bitmap,
+ const SkMatrix& matrix, const SkPaint& paint) = 0;
+ virtual void drawSprite(const SkDraw&, const SkBitmap& bitmap,
+ int x, int y, const SkPaint& paint) = 0;
+
+ /**
+ * The default impl. will create a bitmap-shader from the bitmap,
+ * and call drawRect with it.
+ */
+ virtual void drawBitmapRect(const SkDraw&, const SkBitmap&,
+ const SkRect* srcOrNull, const SkRect& dst,
+ const SkPaint& paint,
+ SkCanvas::SrcRectConstraint) = 0;
+ virtual void drawBitmapNine(const SkDraw&, const SkBitmap&, const SkIRect& center,
+ const SkRect& dst, const SkPaint&);
+ virtual void drawBitmapLattice(const SkDraw&, const SkBitmap&, const SkCanvas::Lattice&,
+ const SkRect& dst, const SkPaint&);
+
+ virtual void drawImage(const SkDraw&, const SkImage*, SkScalar x, SkScalar y, const SkPaint&);
+ virtual void drawImageRect(const SkDraw&, const SkImage*, const SkRect* src, const SkRect& dst,
+ const SkPaint&, SkCanvas::SrcRectConstraint);
+ virtual void drawImageNine(const SkDraw&, const SkImage*, const SkIRect& center,
+ const SkRect& dst, const SkPaint&);
+ virtual void drawImageLattice(const SkDraw&, const SkImage*, const SkCanvas::Lattice&,
+ const SkRect& dst, const SkPaint&);
+
+ /**
+ * Does not handle text decoration.
+ * Decorations (underline and stike-thru) will be handled by SkCanvas.
+ */
+ virtual void drawText(const SkDraw&, const void* text, size_t len,
+ SkScalar x, SkScalar y, const SkPaint& paint) = 0;
+ virtual void drawPosText(const SkDraw&, const void* text, size_t len,
+ const SkScalar pos[], int scalarsPerPos,
+ const SkPoint& offset, const SkPaint& paint) = 0;
+ virtual void drawVertices(const SkDraw&, SkCanvas::VertexMode, int vertexCount,
+ const SkPoint verts[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint) = 0;
+ // default implementation unrolls the blob runs.
+ virtual void drawTextBlob(const SkDraw&, const SkTextBlob*, SkScalar x, SkScalar y,
+ const SkPaint& paint, SkDrawFilter* drawFilter);
+ // default implementation calls drawVertices
+ virtual void drawPatch(const SkDraw&, const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkXfermode* xmode, const SkPaint& paint);
+
+ // default implementation calls drawPath
+ virtual void drawAtlas(const SkDraw&, const SkImage* atlas, const SkRSXform[], const SkRect[],
+ const SkColor[], int count, SkXfermode::Mode, const SkPaint&);
+
+ virtual void drawAnnotation(const SkDraw&, const SkRect&, const char[], SkData*) {}
+
+ /** The SkDevice passed will be an SkDevice which was returned by a call to
+ onCreateDevice on this device with kNeverTile_TileExpectation.
+ */
+ virtual void drawDevice(const SkDraw&, SkBaseDevice*, int x, int y,
+ const SkPaint&) = 0;
+
+ virtual void drawTextOnPath(const SkDraw&, const void* text, size_t len, const SkPath&,
+ const SkMatrix*, const SkPaint&);
+ virtual void drawTextRSXform(const SkDraw&, const void* text, size_t len, const SkRSXform[],
+ const SkPaint&);
+
+ virtual void drawSpecial(const SkDraw&, SkSpecialImage*, int x, int y, const SkPaint&);
+ virtual sk_sp<SkSpecialImage> makeSpecial(const SkBitmap&);
+ virtual sk_sp<SkSpecialImage> makeSpecial(const SkImage*);
+ virtual sk_sp<SkSpecialImage> snapSpecial();
+
+ bool readPixels(const SkImageInfo&, void* dst, size_t rowBytes, int x, int y);
+
+ ///////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_SUPPORT_LEGACY_ACCESSBITMAP
+ /** Update as needed the pixel value in the bitmap, so that the caller can
+ access the pixels directly.
+ @return The device contents as a bitmap
+ */
+ virtual const SkBitmap& onAccessBitmap() {
+ SkASSERT(0);
+ return fLegacyBitmap;
+ }
+#endif
+
+ virtual GrContext* context() const { return nullptr; }
+
+protected:
+ virtual sk_sp<SkSurface> makeSurface(const SkImageInfo&, const SkSurfaceProps&);
+ virtual bool onPeekPixels(SkPixmap*) { return false; }
+
+ /**
+ * The caller is responsible for "pre-clipping" the dst. The impl can assume that the dst
+ * image at the specified x,y offset will fit within the device's bounds.
+ *
+ * This is explicitly asserted in readPixels(), the public way to call this.
+ */
+ virtual bool onReadPixels(const SkImageInfo&, void*, size_t, int x, int y);
+
+ /**
+ * The caller is responsible for "pre-clipping" the src. The impl can assume that the src
+ * image at the specified x,y offset will fit within the device's bounds.
+ *
+ * This is explicitly asserted in writePixelsDirect(), the public way to call this.
+ */
+ virtual bool onWritePixels(const SkImageInfo&, const void*, size_t, int x, int y);
+
+ virtual bool onAccessPixels(SkPixmap*) { return false; }
+
+ struct CreateInfo {
+ static SkPixelGeometry AdjustGeometry(const SkImageInfo&, TileUsage, SkPixelGeometry,
+ bool preserveLCDText);
+
+ // The constructor may change the pixel geometry based on other parameters.
+ CreateInfo(const SkImageInfo& info,
+ TileUsage tileUsage,
+ SkPixelGeometry geo)
+ : fInfo(info)
+ , fTileUsage(tileUsage)
+ , fPixelGeometry(AdjustGeometry(info, tileUsage, geo, false))
+ {}
+
+ CreateInfo(const SkImageInfo& info,
+ TileUsage tileUsage,
+ SkPixelGeometry geo,
+ bool preserveLCDText)
+ : fInfo(info)
+ , fTileUsage(tileUsage)
+ , fPixelGeometry(AdjustGeometry(info, tileUsage, geo, preserveLCDText))
+ {}
+
+ const SkImageInfo fInfo;
+ const TileUsage fTileUsage;
+ const SkPixelGeometry fPixelGeometry;
+ };
+
+ /**
+ * Create a new device based on CreateInfo. If the paint is not null, then it represents a
+ * preview of how the new device will be composed with its creator device (this).
+ *
+ * The subclass may be handed this device in drawDevice(), so it must always return
+ * a device that it knows how to draw, and that it knows how to identify if it is not of the
+ * same subclass (since drawDevice is passed a SkBaseDevice*). If the subclass cannot fulfill
+ * that contract (e.g. PDF cannot support some settings on the paint) it should return NULL,
+ * and the caller may then decide to explicitly create a bitmapdevice, knowing that later
+ * it could not call drawDevice with it (but it could call drawSprite or drawBitmap).
+ */
+ virtual SkBaseDevice* onCreateDevice(const CreateInfo&, const SkPaint*) {
+ return NULL;
+ }
+
+ // A helper function used by derived classes to log the scale factor of a bitmap or image draw.
+ static void LogDrawScaleFactor(const SkMatrix&, SkFilterQuality);
+
+private:
+ friend class SkCanvas;
+ friend struct DeviceCM; //for setMatrixClip
+ friend class SkDraw;
+ friend class SkDrawIter;
+ friend class SkDeviceFilteredPaint;
+ friend class SkNoPixelsBitmapDevice;
+ friend class SkSurface_Raster;
+ friend class DeviceTestingAccess;
+
+ // used to change the backend's pixels (and possibly config/rowbytes)
+ // but cannot change the width/height, so there should be no change to
+ // any clip information.
+ // TODO: move to SkBitmapDevice
+ virtual void replaceBitmapBackendForRasterSurface(const SkBitmap&) {}
+
+ virtual bool forceConservativeRasterClip() const { return false; }
+
+ /**
+ * Don't call this!
+ */
+ virtual GrDrawContext* accessDrawContext() { return nullptr; }
+
+ // just called by SkCanvas when built as a layer
+ void setOrigin(int x, int y) { fOrigin.set(x, y); }
+
+ /** Causes any deferred drawing to the device to be completed.
+ */
+ virtual void flush() {}
+
+ virtual SkImageFilterCache* getImageFilterCache() { return NULL; }
+
+ friend class SkBitmapDevice;
+ void privateResize(int w, int h) {
+ *const_cast<SkImageInfo*>(&fInfo) = fInfo.makeWH(w, h);
+ }
+
+ SkIPoint fOrigin;
+ SkMetaData* fMetaData;
+ const SkImageInfo fInfo;
+ const SkSurfaceProps fSurfaceProps;
+
+#ifdef SK_SUPPORT_LEGACY_ACCESSBITMAP
+ SkBitmap fLegacyBitmap;
+#endif
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkDocument.h b/gfx/skia/skia/include/core/SkDocument.h
new file mode 100644
index 000000000..418a83743
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkDocument.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDocument_DEFINED
+#define SkDocument_DEFINED
+
+#include "SkBitmap.h"
+#include "SkPicture.h"
+#include "SkPixelSerializer.h"
+#include "SkRect.h"
+#include "SkRefCnt.h"
+#include "SkString.h"
+#include "SkTime.h"
+
+class SkCanvas;
+class SkWStream;
+
+/** SK_ScalarDefaultDPI is 72 DPI.
+*/
+#define SK_ScalarDefaultRasterDPI 72.0f
+
+/**
+ * High-level API for creating a document-based canvas. To use..
+ *
+ * 1. Create a document, specifying a stream to store the output.
+ * 2. For each "page" of content:
+ * a. canvas = doc->beginPage(...)
+ * b. draw_my_content(canvas);
+ * c. doc->endPage();
+ * 3. Close the document with doc->close().
+ */
+class SK_API SkDocument : public SkRefCnt {
+public:
+ struct OptionalTimestamp {
+ SkTime::DateTime fDateTime;
+ bool fEnabled;
+ OptionalTimestamp() : fEnabled(false) {}
+ };
+
+ /**
+ * Optional metadata to be passed into the PDF factory function.
+ */
+ struct PDFMetadata {
+ /**
+ * The document’s title.
+ */
+ SkString fTitle;
+ /**
+ * The name of the person who created the document.
+ */
+ SkString fAuthor;
+ /**
+ * The subject of the document.
+ */
+ SkString fSubject;
+ /**
+ * Keywords associated with the document. Commas may be used
+ * to delineate keywords within the string.
+ */
+ SkString fKeywords;
+ /**
+ * If the document was converted to PDF from another format,
+ * the name of the conforming product that created the
+ * original document from which it was converted.
+ */
+ SkString fCreator;
+ /**
+ * The product that is converting this document to PDF.
+ *
+ * Leave fProducer empty to get the default, correct value.
+ */
+ SkString fProducer;
+ /**
+ * The date and time the document was created.
+ */
+ OptionalTimestamp fCreation;
+ /**
+ * The date and time the document was most recently modified.
+ */
+ OptionalTimestamp fModified;
+ };
+
+ /**
+ * Create a PDF-backed document, writing the results into a
+ * SkWStream.
+ *
+ * PDF pages are sized in point units. 1 pt == 1/72 inch ==
+ * 127/360 mm.
+ *
+ * @param stream A PDF document will be written to this
+ * stream. The document may write to the stream at
+ * anytime during its lifetime, until either close() is
+ * called or the document is deleted.
+ * @param dpi The DPI (pixels-per-inch) at which features without
+ * native PDF support will be rasterized (e.g. draw image
+ * with perspective, draw text with perspective, ...) A
+ * larger DPI would create a PDF that reflects the
+ * original intent with better fidelity, but it can make
+ * for larger PDF files too, which would use more memory
+ * while rendering, and it would be slower to be processed
+ * or sent online or to printer.
+ * @param metadata a PDFmetadata object. Any fields may be left
+ * empty.
+ * @param jpegEncoder For PDF documents, if a jpegEncoder is set,
+ * use it to encode SkImages and SkBitmaps as [JFIF]JPEGs.
+ * This feature is deprecated and is only supplied for
+ * backwards compatability.
+ * The prefered method to create PDFs with JPEG images is
+ * to use SkImage::NewFromEncoded() and not jpegEncoder.
+ * Chromium uses NewFromEncoded.
+ * If the encoder is unset, or if jpegEncoder->onEncode()
+ * returns NULL, fall back on encoding images losslessly
+ * with Deflate.
+ * @param pdfa Iff true, include XMP metadata, a document UUID,
+ * and sRGB output intent information. This adds length
+ * to the document and makes it non-reproducable, but are
+ * necessary features for PDF/A-2b conformance
+ *
+ * @returns NULL if there is an error, otherwise a newly created
+ * PDF-backed SkDocument.
+ */
+ static sk_sp<SkDocument> MakePDF(SkWStream* stream,
+ SkScalar dpi,
+ const SkDocument::PDFMetadata& metadata,
+ sk_sp<SkPixelSerializer> jpegEncoder,
+ bool pdfa);
+
+ static sk_sp<SkDocument> MakePDF(SkWStream* stream,
+ SkScalar dpi = SK_ScalarDefaultRasterDPI) {
+ return SkDocument::MakePDF(stream, dpi, SkDocument::PDFMetadata(),
+ nullptr, false);
+ }
+
+ /**
+ * Create a PDF-backed document, writing the results into a file.
+ */
+ static sk_sp<SkDocument> MakePDF(const char outputFilePath[],
+ SkScalar dpi = SK_ScalarDefaultRasterDPI);
+
+ /**
+ * Create a XPS-backed document, writing the results into the stream.
+ * Returns NULL if XPS is not supported.
+ */
+ static sk_sp<SkDocument> MakeXPS(SkWStream* stream,
+ SkScalar dpi = SK_ScalarDefaultRasterDPI);
+
+ /**
+ * Create a XPS-backed document, writing the results into a file.
+ * Returns NULL if XPS is not supported.
+ */
+ static sk_sp<SkDocument> MakeXPS(const char path[],
+ SkScalar dpi = SK_ScalarDefaultRasterDPI);
+
+ /**
+ * Begin a new page for the document, returning the canvas that will draw
+ * into the page. The document owns this canvas, and it will go out of
+ * scope when endPage() or close() is called, or the document is deleted.
+ */
+ SkCanvas* beginPage(SkScalar width, SkScalar height,
+ const SkRect* content = NULL);
+
+ /**
+ * Call endPage() when the content for the current page has been drawn
+ * (into the canvas returned by beginPage()). After this call the canvas
+ * returned by beginPage() will be out-of-scope.
+ */
+ void endPage();
+
+ /**
+ * Call close() when all pages have been drawn. This will close the file
+ * or stream holding the document's contents. After close() the document
+ * can no longer add new pages. Deleting the document will automatically
+ * call close() if need be.
+ */
+ void close();
+
+ /**
+ * Call abort() to stop producing the document immediately.
+ * The stream output must be ignored, and should not be trusted.
+ */
+ void abort();
+
+protected:
+ SkDocument(SkWStream*, void (*)(SkWStream*, bool aborted));
+
+ // note: subclasses must call close() in their destructor, as the base class
+ // cannot do this for them.
+ virtual ~SkDocument();
+
+ virtual SkCanvas* onBeginPage(SkScalar width, SkScalar height,
+ const SkRect& content) = 0;
+ virtual void onEndPage() = 0;
+ virtual void onClose(SkWStream*) = 0;
+ virtual void onAbort() = 0;
+
+ // Allows subclasses to write to the stream as pages are written.
+ SkWStream* getStream() { return fStream; }
+
+ enum State {
+ kBetweenPages_State,
+ kInPage_State,
+ kClosed_State
+ };
+ State getState() const { return fState; }
+
+private:
+ SkWStream* fStream;
+ void (*fDoneProc)(SkWStream*, bool aborted);
+ State fState;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkDraw.h b/gfx/skia/skia/include/core/SkDraw.h
new file mode 100644
index 000000000..d7068fd3f
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkDraw.h
@@ -0,0 +1,163 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDraw_DEFINED
+#define SkDraw_DEFINED
+
+#include "SkCanvas.h"
+#include "SkMask.h"
+#include "SkPaint.h"
+#include "SkStrokeRec.h"
+
+class SkBitmap;
+class SkClipStack;
+class SkBaseDevice;
+class SkBlitter;
+class SkMatrix;
+class SkPath;
+class SkRegion;
+class SkRasterClip;
+struct SkDrawProcs;
+struct SkRect;
+class SkRRect;
+
+class SkDraw {
+public:
+ SkDraw();
+
+ void drawPaint(const SkPaint&) const;
+ void drawPoints(SkCanvas::PointMode, size_t count, const SkPoint[],
+ const SkPaint&, bool forceUseDevice = false) const;
+ void drawRect(const SkRect& prePaintRect, const SkPaint&, const SkMatrix* paintMatrix,
+ const SkRect* postPaintRect) const;
+ void drawRect(const SkRect& rect, const SkPaint& paint) const {
+ this->drawRect(rect, paint, NULL, NULL);
+ }
+ void drawRRect(const SkRRect&, const SkPaint&) const;
+ /**
+ * To save on mallocs, we allow a flag that tells us that srcPath is
+ * mutable, so that we don't have to make copies of it as we transform it.
+ *
+ * If prePathMatrix is not null, it should logically be applied before any
+ * stroking or other effects. If there are no effects on the paint that
+ * affect the geometry/rasterization, then the pre matrix can just be
+ * pre-concated with the current matrix.
+ */
+ void drawPath(const SkPath& path, const SkPaint& paint,
+ const SkMatrix* prePathMatrix, bool pathIsMutable) const {
+ this->drawPath(path, paint, prePathMatrix, pathIsMutable, false);
+ }
+
+ void drawPath(const SkPath& path, const SkPaint& paint,
+ SkBlitter* customBlitter = NULL) const {
+ this->drawPath(path, paint, NULL, false, false, customBlitter);
+ }
+
+ /* If dstOrNull is null, computes a dst by mapping the bitmap's bounds through the matrix. */
+ void drawBitmap(const SkBitmap&, const SkMatrix&, const SkRect* dstOrNull,
+ const SkPaint&) const;
+ void drawSprite(const SkBitmap&, int x, int y, const SkPaint&) const;
+ void drawText(const char text[], size_t byteLength, SkScalar x,
+ SkScalar y, const SkPaint& paint) const;
+ void drawPosText(const char text[], size_t byteLength,
+ const SkScalar pos[], int scalarsPerPosition,
+ const SkPoint& offset, const SkPaint& paint) const;
+ void drawVertices(SkCanvas::VertexMode mode, int count,
+ const SkPoint vertices[], const SkPoint textures[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int ptCount,
+ const SkPaint& paint) const;
+
+ /**
+ * Overwrite the target with the path's coverage (i.e. its mask).
+ * Will overwrite the entire device, so it need not be zero'd first.
+ *
+ * Only device A8 is supported right now.
+ */
+ void drawPathCoverage(const SkPath& src, const SkPaint& paint,
+ SkBlitter* customBlitter = NULL) const {
+ this->drawPath(src, paint, NULL, false, true, customBlitter);
+ }
+
+ /** Helper function that creates a mask from a path and an optional maskfilter.
+ Note however, that the resulting mask will not have been actually filtered,
+ that must be done afterwards (by calling filterMask). The maskfilter is provided
+ solely to assist in computing the mask's bounds (if the mode requests that).
+ */
+ static bool DrawToMask(const SkPath& devPath, const SkIRect* clipBounds,
+ const SkMaskFilter*, const SkMatrix* filterMatrix,
+ SkMask* mask, SkMask::CreateMode mode,
+ SkStrokeRec::InitStyle style);
+
+ enum RectType {
+ kHair_RectType,
+ kFill_RectType,
+ kStroke_RectType,
+ kPath_RectType
+ };
+
+ /**
+ * Based on the paint's style, strokeWidth, and the matrix, classify how
+ * to draw the rect. If no special-case is available, returns
+ * kPath_RectType.
+ *
+ * Iff RectType == kStroke_RectType, then strokeSize is set to the device
+ * width and height of the stroke.
+ */
+ static RectType ComputeRectType(const SkPaint&, const SkMatrix&,
+ SkPoint* strokeSize);
+
+ static bool ShouldDrawTextAsPaths(const SkPaint&, const SkMatrix&);
+ void drawText_asPaths(const char text[], size_t byteLength,
+ SkScalar x, SkScalar y, const SkPaint&) const;
+ void drawPosText_asPaths(const char text[], size_t byteLength,
+ const SkScalar pos[], int scalarsPerPosition,
+ const SkPoint& offset, const SkPaint&) const;
+ static SkScalar ComputeResScaleForStroking(const SkMatrix& );
+private:
+ void drawDevMask(const SkMask& mask, const SkPaint&) const;
+ void drawBitmapAsMask(const SkBitmap&, const SkPaint&) const;
+
+ void drawPath(const SkPath&, const SkPaint&, const SkMatrix* preMatrix,
+ bool pathIsMutable, bool drawCoverage,
+ SkBlitter* customBlitter = NULL) const;
+
+ void drawLine(const SkPoint[2], const SkPaint&) const;
+ void drawDevPath(const SkPath& devPath, const SkPaint& paint, bool drawCoverage,
+ SkBlitter* customBlitter, bool doFill) const;
+ /**
+ * Return the current clip bounds, in local coordinates, with slop to account
+ * for antialiasing or hairlines (i.e. device-bounds outset by 1, and then
+ * run through the inverse of the matrix).
+ *
+ * If the matrix cannot be inverted, or the current clip is empty, return
+ * false and ignore bounds parameter.
+ */
+ bool SK_WARN_UNUSED_RESULT
+ computeConservativeLocalClipBounds(SkRect* bounds) const;
+
+ /** Returns the current setting for using fake gamma and contrast. */
+ uint32_t SK_WARN_UNUSED_RESULT scalerContextFlags() const;
+
+public:
+ SkPixmap fDst;
+ const SkMatrix* fMatrix; // required
+ const SkRasterClip* fRC; // required
+
+ const SkClipStack* fClipStack; // optional, may be null
+ SkBaseDevice* fDevice; // optional, may be null
+
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkDrawFilter.h b/gfx/skia/skia/include/core/SkDrawFilter.h
new file mode 100644
index 000000000..2812017b7
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkDrawFilter.h
@@ -0,0 +1,55 @@
+
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawFilter_DEFINED
+#define SkDrawFilter_DEFINED
+
+#include "SkRefCnt.h"
+
+class SkCanvas;
+class SkPaint;
+
+/**
+ * DEPRECATED - use SkPaintFilterCanvas instead.
+ *
+ * Right before something is being draw, filter() is called with the
+ * paint. The filter may modify the paint as it wishes, which will then be
+ * used for the actual drawing. Note: this modification only lasts for the
+ * current draw, as a temporary copy of the paint is used.
+ */
+class SK_API SkDrawFilter : public SkRefCnt {
+public:
+ enum Type {
+ kPaint_Type,
+ kPoint_Type,
+ kLine_Type,
+ kBitmap_Type,
+ kRect_Type,
+ kRRect_Type,
+ kOval_Type,
+ kPath_Type,
+ kText_Type,
+ };
+
+ enum {
+ kTypeCount = kText_Type + 1
+ };
+
+ /**
+ * Called with the paint that will be used to draw the specified type.
+ * The implementation may modify the paint as they wish. If filter()
+ * returns false, the draw will be skipped.
+ */
+ virtual bool filter(SkPaint*, Type) = 0;
+
+private:
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkDrawLooper.h b/gfx/skia/skia/include/core/SkDrawLooper.h
new file mode 100644
index 000000000..28d7d8bee
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkDrawLooper.h
@@ -0,0 +1,119 @@
+
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawLooper_DEFINED
+#define SkDrawLooper_DEFINED
+
+#include "SkBlurTypes.h"
+#include "SkFlattenable.h"
+#include "SkPoint.h"
+#include "SkColor.h"
+
+class SkCanvas;
+class SkPaint;
+struct SkRect;
+class SkString;
+
+/** \class SkDrawLooper
+ Subclasses of SkDrawLooper can be attached to a SkPaint. Where they are,
+ and something is drawn to a canvas with that paint, the looper subclass will
+ be called, allowing it to modify the canvas and/or paint for that draw call.
+ More than that, via the next() method, the looper can modify the draw to be
+ invoked multiple times (hence the name loop-er), allow it to perform effects
+ like shadows or frame/fills, that require more than one pass.
+*/
+class SK_API SkDrawLooper : public SkFlattenable {
+public:
+ /**
+ * Holds state during a draw. Users call next() until it returns false.
+ *
+ * Subclasses of SkDrawLooper should create a subclass of this object to
+ * hold state specific to their subclass.
+ */
+ class SK_API Context : ::SkNoncopyable {
+ public:
+ Context() {}
+ virtual ~Context() {}
+
+ /**
+ * Called in a loop on objects returned by SkDrawLooper::createContext().
+ * Each time true is returned, the object is drawn (possibly with a modified
+ * canvas and/or paint). When false is finally returned, drawing for the object
+ * stops.
+ *
+ * On each call, the paint will be in its original state, but the
+ * canvas will be as it was following the previous call to next() or
+ * createContext().
+ *
+ * The implementation must ensure that, when next() finally returns
+ * false, the canvas has been restored to the state it was
+ * initially, before createContext() was first called.
+ */
+ virtual bool next(SkCanvas* canvas, SkPaint* paint) = 0;
+ };
+
+ /**
+ * Called right before something is being drawn. Returns a Context
+ * whose next() method should be called until it returns false.
+ * The caller has to ensure that the storage pointer provides enough
+ * memory for the Context. The required size can be queried by calling
+ * contextSize(). It is also the caller's responsibility to destroy the
+ * object after use.
+ */
+ virtual Context* createContext(SkCanvas*, void* storage) const = 0;
+
+ /**
+ * Returns the number of bytes needed to store subclasses of Context (belonging to the
+ * corresponding SkDrawLooper subclass).
+ */
+ virtual size_t contextSize() const = 0;
+
+
+ /**
+ * The fast bounds functions are used to enable the paint to be culled early
+ * in the drawing pipeline. If a subclass can support this feature it must
+ * return true for the canComputeFastBounds() function. If that function
+ * returns false then computeFastBounds behavior is undefined otherwise it
+ * is expected to have the following behavior. Given the parent paint and
+ * the parent's bounding rect the subclass must fill in and return the
+ * storage rect, where the storage rect is with the union of the src rect
+ * and the looper's bounding rect.
+ */
+ bool canComputeFastBounds(const SkPaint& paint) const;
+ void computeFastBounds(const SkPaint& paint, const SkRect& src, SkRect* dst) const;
+
+ struct BlurShadowRec {
+ SkScalar fSigma;
+ SkVector fOffset;
+ SkColor fColor;
+ SkBlurStyle fStyle;
+ SkBlurQuality fQuality;
+ };
+ /**
+ * If this looper can be interpreted as having two layers, such that
+ * 1. The first layer (bottom most) just has a blur and translate
+ * 2. The second layer has no modifications to either paint or canvas
+ * 3. No other layers.
+ * then return true, and if not null, fill out the BlurShadowRec).
+ *
+ * If any of the above are not met, return false and ignore the BlurShadowRec parameter.
+ */
+ virtual bool asABlurShadow(BlurShadowRec*) const;
+
+ SK_TO_STRING_PUREVIRT()
+ SK_DEFINE_FLATTENABLE_TYPE(SkDrawLooper)
+
+protected:
+ SkDrawLooper() {}
+
+private:
+ typedef SkFlattenable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkDrawable.h b/gfx/skia/skia/include/core/SkDrawable.h
new file mode 100644
index 000000000..6fec3fcf9
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkDrawable.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDrawable_DEFINED
+#define SkDrawable_DEFINED
+
+#include "SkFlattenable.h"
+
+class SkCanvas;
+class SkMatrix;
+class SkPicture;
+struct SkRect;
+
+/**
+ * Base-class for objects that draw into SkCanvas.
+ *
+ * The object has a generation ID, which is guaranteed to be unique across all drawables. To
+ * allow for clients of the drawable that may want to cache the results, the drawable must
+ * change its generation ID whenever its internal state changes such that it will draw differently.
+ */
+class SkDrawable : public SkFlattenable {
+public:
+ SkDrawable();
+
+ /**
+ * Draws into the specified content. The drawing sequence will be balanced upon return
+ * (i.e. the saveLevel() on the canvas will match what it was when draw() was called,
+ * and the current matrix and clip settings will not be changed.
+ */
+ void draw(SkCanvas*, const SkMatrix* = NULL);
+ void draw(SkCanvas*, SkScalar x, SkScalar y);
+
+ SkPicture* newPictureSnapshot();
+
+ /**
+ * Return a unique value for this instance. If two calls to this return the same value,
+ * it is presumed that calling the draw() method will render the same thing as well.
+ *
+ * Subclasses that change their state should call notifyDrawingChanged() to ensure that
+ * a new value will be returned the next time it is called.
+ */
+ uint32_t getGenerationID();
+
+ /**
+ * Return the (conservative) bounds of what the drawable will draw. If the drawable can
+ * change what it draws (e.g. animation or in response to some external change), then this
+ * must return a bounds that is always valid for all possible states.
+ */
+ SkRect getBounds();
+
+ /**
+ * Calling this invalidates the previous generation ID, and causes a new one to be computed
+ * the next time getGenerationID() is called. Typically this is called by the object itself,
+ * in response to its internal state changing.
+ */
+ void notifyDrawingChanged();
+
+ SK_DEFINE_FLATTENABLE_TYPE(SkDrawable)
+ Factory getFactory() const override { return nullptr; }
+
+protected:
+ virtual SkRect onGetBounds() = 0;
+ virtual void onDraw(SkCanvas*) = 0;
+
+ /**
+ * Default implementation calls onDraw() with a canvas that records into a picture. Subclasses
+ * may override if they have a more efficient way to return a picture for the current state
+ * of their drawable. Note: this picture must draw the same as what would be drawn from
+ * onDraw().
+ */
+ virtual SkPicture* onNewPictureSnapshot();
+
+private:
+ int32_t fGenerationID;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkError.h b/gfx/skia/skia/include/core/SkError.h
new file mode 100644
index 000000000..678c91025
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkError.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkError_DEFINED
+#define SkError_DEFINED
+
+
+/** \file SkError.h
+*/
+
+enum SkError {
+ /** All is well
+ */
+ kNoError_SkError=0,
+
+ /** User argument passed to Skia function was invalid: NULL when that’s
+ * not allowed, out of numeric range, bad enum, or violating some
+ * other general precondition.
+ */
+ kInvalidArgument_SkError,
+
+ /** User tried to perform some operation in a state when the operation
+ * was not legal, or the operands make no sense (e.g., asking for
+ * pixels from an SkPictureCanvas). Other examples might be
+ * inset()’ing a rectangle to make it degenerate (negative width/height).
+ */
+ kInvalidOperation_SkError,
+
+ /** Probably not needed right now, but in the future we could have opaque
+ * handles for SkPictures floating around, and it would be a good idea
+ * to anticipate this kind of issue.
+ */
+ kInvalidHandle_SkError,
+
+ /** This is probably not possible because paint surely has defaults for
+ * everything, but perhaps a paint can get into a bad state somehow.
+ */
+ kInvalidPaint_SkError,
+
+ /** Skia was unable to allocate memory to perform some task.
+ */
+ kOutOfMemory_SkError,
+
+ /** Skia failed while trying to consume some external resource.
+ */
+ kParseError_SkError,
+
+ /** Something went wrong internally; could be resource exhaustion but
+ * will often be a bug.
+ */
+ kInternalError_SkError
+};
+
+/** Return the current per-thread error code. Error codes are "sticky"; they
+ * are not not reset by subsequent successful operations.
+ */
+SkError SkGetLastError();
+
+/** Clear the current per-thread error code back to kNoError_SkError.
+ */
+void SkClearLastError();
+
+/** Type for callback functions to be invoked whenever an error is registered.
+ * Callback functions take the error code being set, as well as a context
+ * argument that is provided when the callback is registered.
+ */
+typedef void (*SkErrorCallbackFunction)(SkError, void *);
+
+/** Set the current per-thread error callback.
+ *
+ * @param cb The callback function to be invoked. Passing NULL
+ * for cb will revert to the default error callback which
+ * does nothing on release builds, but on debug builds will
+ * print an informative error message to the screen.
+ * @param context An arbitrary pointer that will be passed to
+ * the provided callback function.
+ */
+void SkSetErrorCallback(SkErrorCallbackFunction cb, void *context);
+
+/** Get a human-readable description of the last (per-thread) error that
+ * occurred. The returned error message will include not only a human
+ * readable version of the error code, but also information about the
+ * conditions that led to the error itself.
+ */
+const char *SkGetLastErrorString();
+
+#endif /* SkError_DEFINED */
diff --git a/gfx/skia/skia/include/core/SkFilterQuality.h b/gfx/skia/skia/include/core/SkFilterQuality.h
new file mode 100644
index 000000000..54fae51fe
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFilterQuality.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFilterQuality_DEFINED
+#define SkFilterQuality_DEFINED
+
+#include "SkTypes.h"
+
+/**
+ * Controls how much filtering to be done when scaling/transforming complex colors
+ * e.g. images
+ */
+enum SkFilterQuality {
+ kNone_SkFilterQuality, //!< fastest but lowest quality, typically nearest-neighbor
+ kLow_SkFilterQuality, //!< typically bilerp
+ kMedium_SkFilterQuality, //!< typically bilerp + mipmaps for down-scaling
+ kHigh_SkFilterQuality, //!< slowest but highest quality, typically bicubic or better
+
+ kLast_SkFilterQuality = kHigh_SkFilterQuality
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFlattenable.h b/gfx/skia/skia/include/core/SkFlattenable.h
new file mode 100644
index 000000000..88aeb7ee3
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFlattenable.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFlattenable_DEFINED
+#define SkFlattenable_DEFINED
+
+#include "SkRefCnt.h"
+
+class SkReadBuffer;
+class SkWriteBuffer;
+
+class SkPrivateEffectInitializer;
+
+/*
+ * Flattening is straight-forward:
+ * 1. call getFactory() so we have a function-ptr to recreate the subclass
+ * 2. call flatten(buffer) to write out enough data for the factory to read
+ *
+ * Unflattening is easy for the caller: new_instance = factory(buffer)
+ *
+ * The complexity of supporting this is as follows.
+ *
+ * If your subclass wants to control unflattening, use this macro in your declaration:
+ * SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS
+ * This will provide a getFactory(), and require that the subclass implements CreateProc.
+ *
+ * For older buffers (before the DEEPFLATTENING change, the macros below declare
+ * a thin factory DeepCreateProc. It checks the version of the buffer, and if it is pre-deep,
+ * then it calls through to a (usually protected) constructor, passing the buffer.
+ * If the buffer is newer, then it directly calls the "real" factory: CreateProc.
+ */
+
+#define SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP() static void InitializeFlattenables();
+
+#define SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(flattenable) \
+ void flattenable::InitializeFlattenables() {
+
+#define SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END \
+ }
+
+#define SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(flattenable) \
+ SkFlattenable::Register(#flattenable, flattenable::CreateProc, \
+ flattenable::GetFlattenableType());
+
+#define SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(flattenable) \
+ private: \
+ static sk_sp<SkFlattenable> CreateProc(SkReadBuffer&); \
+ friend class SkFlattenable::PrivateInitializer; \
+ public: \
+ Factory getFactory() const override { return CreateProc; }
+
+/** For SkFlattenable derived objects with a valid type
+ This macro should only be used in base class objects in core
+ */
+#define SK_DEFINE_FLATTENABLE_TYPE(flattenable) \
+ static Type GetFlattenableType() { \
+ return k##flattenable##_Type; \
+ }
+
+/** \class SkFlattenable
+
+ SkFlattenable is the base class for objects that need to be flattened
+ into a data stream for either transport or as part of the key to the
+ font cache.
+ */
+class SK_API SkFlattenable : public SkRefCnt {
+public:
+ enum Type {
+ kSkColorFilter_Type,
+ kSkDrawable_Type,
+ kSkDrawLooper_Type,
+ kSkImageFilter_Type,
+ kSkMaskFilter_Type,
+ kSkPathEffect_Type,
+ kSkPixelRef_Type,
+ kSkRasterizer_Type,
+ kSkShader_Type,
+ kSkUnused_Type, // used to be SkUnitMapper
+ kSkXfermode_Type,
+ kSkNormalSource_Type,
+ };
+
+ typedef sk_sp<SkFlattenable> (*Factory)(SkReadBuffer&);
+
+ SkFlattenable() {}
+
+ /** Implement this to return a factory function pointer that can be called
+ to recreate your class given a buffer (previously written to by your
+ override of flatten().
+ */
+ virtual Factory getFactory() const = 0;
+
+ /**
+ * Returns the name of the object's class.
+ *
+ * Subclasses should override this function if they intend to provide
+ * support for flattening without using the global registry.
+ *
+ * If the flattenable is registered, there is no need to override.
+ */
+ virtual const char* getTypeName() const { return FactoryToName(getFactory()); }
+
+ static Factory NameToFactory(const char name[]);
+ static const char* FactoryToName(Factory);
+ static bool NameToType(const char name[], Type* type);
+
+ static void Register(const char name[], Factory, Type);
+
+ /**
+ * Override this if your subclass needs to record data that it will need to recreate itself
+ * from its CreateProc (returned by getFactory()).
+ */
+ virtual void flatten(SkWriteBuffer&) const {}
+
+protected:
+ class PrivateInitializer {
+ public:
+ static void InitCore();
+ static void InitEffects();
+ };
+
+private:
+ static void InitializeFlattenablesIfNeeded();
+
+ friend class SkGraphics;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFlattenableSerialization.h b/gfx/skia/skia/include/core/SkFlattenableSerialization.h
new file mode 100644
index 000000000..ffb1b5ae9
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFlattenableSerialization.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFlattenableSerialization_DEFINED
+#define SkFlattenableSerialization_DEFINED
+
+#include "SkFlattenable.h"
+
+class SkData;
+
+SK_API SkData* SkValidatingSerializeFlattenable(SkFlattenable*);
+SK_API SkFlattenable* SkValidatingDeserializeFlattenable(const void* data, size_t size,
+ SkFlattenable::Type type);
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFont.h b/gfx/skia/skia/include/core/SkFont.h
new file mode 100644
index 000000000..e50909aae
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFont.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFont_DEFINED
+#define SkFont_DEFINED
+
+#include "SkRefCnt.h"
+#include "SkScalar.h"
+
+class SkPaint;
+class SkTypeface;
+
+enum SkTextEncoding {
+ kUTF8_SkTextEncoding,
+ kUTF16_SkTextEncoding,
+ kUTF32_SkTextEncoding,
+ kGlyphID_SkTextEncoding,
+};
+
+/*
+ 1. The Hinting enum in SkPaint is gone entirely, absorbed into SkFont's flags.
+
+ 2. SkPaint Flags look like this today
+
+ enum Flags {
+ kAntiAlias_Flag = 0x01, //!< mask to enable antialiasing
+ kDither_Flag = 0x04, //!< mask to enable dithering
+ kUnderlineText_Flag = 0x08, //!< mask to enable underline text
+ kStrikeThruText_Flag = 0x10, //!< mask to enable strike-thru text
+ kFakeBoldText_Flag = 0x20, //!< mask to enable fake-bold text
+ kLinearText_Flag = 0x40, //!< mask to enable linear-text
+ kSubpixelText_Flag = 0x80, //!< mask to enable subpixel text positioning
+ kDevKernText_Flag = 0x100, //!< mask to enable device kerning text
+ kLCDRenderText_Flag = 0x200, //!< mask to enable subpixel glyph renderering
+ kEmbeddedBitmapText_Flag = 0x400, //!< mask to enable embedded bitmap strikes
+ kAutoHinting_Flag = 0x800, //!< mask to force Freetype's autohinter
+ kVerticalText_Flag = 0x1000,
+ kGenA8FromLCD_Flag = 0x2000, // hack for GDI -- do not use if you can help it
+ };
+
+ SkFont would absorb these:
+
+ kFakeBoldText_Flag = 0x20, //!< mask to enable fake-bold text
+ kLinearText_Flag = 0x40, //!< mask to enable linear-text
+ kSubpixelText_Flag = 0x80, //!< mask to enable subpixel text positioning
+ kDevKernText_Flag = 0x100, //!< mask to enable device kerning text
+ kLCDRenderText_Flag = 0x200, //!< mask to enable subpixel glyph renderering
+ kEmbeddedBitmapText_Flag = 0x400, //!< mask to enable embedded bitmap strikes
+ kAutoHinting_Flag = 0x800, //!< mask to force Freetype's autohinter
+ kVerticalText_Flag = 0x1000,
+ kGenA8FromLCD_Flag = 0x2000, // hack for GDI -- do not use if you can help it
+
+ leaving these still in paint
+
+ kAntiAlias_Flag = 0x01, //!< mask to enable antialiasing
+ kDither_Flag = 0x04, //!< mask to enable dithering
+ kUnderlineText_Flag = 0x08, //!< mask to enable underline text
+ kStrikeThruText_Flag = 0x10, //!< mask to enable strike-thru text
+
+ 3. Antialiasing
+
+ SkFont has a mask-type: BW, AA, LCD
+ SkPaint has antialias boolean
+
+ What to do if the font's mask-type disagrees with the paint?
+
+ */
+
+class SkFont : public SkRefCnt {
+public:
+ enum Flags {
+ /**
+ * Use the system's automatic hinting mechanism to hint the typeface.
+ * This is a last resort hinting method applied only if other hinting methods do not apply.
+ * TODO: where to put auto-normal vs auto-light?
+ */
+ kEnableAutoHints_Flag = 1 << 0,
+
+ /**
+ * If the typeface contains explicit bytecodes for hinting, use them.
+ * If both bytecode and auto hints are specified, attempt to use the bytecodes first;
+ * if that fails (e.g. there are no codes), then attempt to autohint.
+ */
+ kEnableByteCodeHints_Flag = 1 << 1,
+
+ /**
+ * If the typeface contains explicit bitmaps for hinting, use them.
+ * If both bytecode and auto hints are also specified, attempt to use the bitmaps first;
+ * if that fails (e.g. there are no bitmaps), then attempt to bytecode or autohint.
+ */
+ kEmbeddedBitmaps_Flag = 1 << 2,
+
+ /**
+ * Use rounded metric values (e.g. advance).
+ * If either auto or bytecode hinting was used, apply those results to the metrics of the
+ * glyphs as well. If no hinting was applied, the metrics will just be rounded to the
+ * nearest integer.
+ *
+ * This applies to calls that return metrics (e.g. measureText) and to drawing the glyphs
+ * (see SkCanvas drawText and drawPosText).
+ */
+ kUseNonlinearMetrics_Flag = 1 << 3,
+
+ kVertical_Flag = 1 << 4,
+ kGenA8FromLCD_Flag = 1 << 5,
+ kEmbolden_Flag = 1 << 6,
+ kDevKern_Flag = 1 << 7, // ifdef ANDROID ?
+ };
+
+ enum MaskType {
+ kBW_MaskType,
+ kA8_MaskType,
+ kLCD_MaskType,
+ };
+
+ static sk_sp<SkFont> Make(sk_sp<SkTypeface>, SkScalar size, MaskType, uint32_t flags);
+ static sk_sp<SkFont> Make(sk_sp<SkTypeface>, SkScalar size, SkScalar scaleX, SkScalar skewX,
+ MaskType, uint32_t flags);
+
+ /**
+ * Return a font with the same attributes of this font, but with the specified size.
+ * If size is not supported (e.g. <= 0 or non-finite) NULL will be returned.
+ */
+ sk_sp<SkFont> makeWithSize(SkScalar size) const;
+ /**
+ * Return a font with the same attributes of this font, but with the flags.
+ */
+ sk_sp<SkFont> makeWithFlags(uint32_t newFlags) const;
+
+ SkTypeface* getTypeface() const { return fTypeface.get(); }
+ SkScalar getSize() const { return fSize; }
+ SkScalar getScaleX() const { return fScaleX; }
+ SkScalar getSkewX() const { return fSkewX; }
+ uint32_t getFlags() const { return fFlags; }
+ MaskType getMaskType() const { return (MaskType)fMaskType; }
+
+ bool isVertical() const { return SkToBool(fFlags & kVertical_Flag); }
+ bool isEmbolden() const { return SkToBool(fFlags & kEmbolden_Flag); }
+ bool isEnableAutoHints() const { return SkToBool(fFlags & kEnableAutoHints_Flag); }
+ bool isEnableByteCodeHints() const { return SkToBool(fFlags & kEnableByteCodeHints_Flag); }
+ bool isUseNonLinearMetrics() const { return SkToBool(fFlags & kUseNonlinearMetrics_Flag); }
+ bool isDevKern() const { return SkToBool(fFlags & kDevKern_Flag); }
+
+ int textToGlyphs(const void* text, size_t byteLength, SkTextEncoding,
+ SkGlyphID glyphs[], int maxGlyphCount) const;
+
+ int countText(const void* text, size_t byteLength, SkTextEncoding encoding) {
+ return this->textToGlyphs(text, byteLength, encoding, nullptr, 0);
+ }
+
+ SkScalar measureText(const void* text, size_t byteLength, SkTextEncoding) const;
+
+ static sk_sp<SkFont> Testing_CreateFromPaint(const SkPaint&);
+
+private:
+ enum {
+ kAllFlags = 0xFF,
+ };
+
+ SkFont(sk_sp<SkTypeface>, SkScalar size, SkScalar scaleX, SkScalar skewX, MaskType,
+ uint32_t flags);
+
+ sk_sp<SkTypeface> fTypeface;
+ SkScalar fSize;
+ SkScalar fScaleX;
+ SkScalar fSkewX;
+ uint16_t fFlags;
+ uint8_t fMaskType;
+// uint8_t fPad;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFontLCDConfig.h b/gfx/skia/skia/include/core/SkFontLCDConfig.h
new file mode 100644
index 000000000..58b5a82b9
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFontLCDConfig.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontLCDConfig_DEFINED
+#define SkFontLCDConfig_DEFINED
+
+#include "SkTypes.h"
+
+class SK_API SkFontLCDConfig {
+public:
+ /** LCDs either have their color elements arranged horizontally or
+ vertically. When rendering subpixel glyphs we need to know which way
+ round they are.
+
+ Note, if you change this after startup, you'll need to flush the glyph
+ cache because it'll have the wrong type of masks cached.
+
+ @deprecated use SkPixelGeometry instead.
+ */
+ enum LCDOrientation {
+ kHorizontal_LCDOrientation = 0, //!< this is the default
+ kVertical_LCDOrientation = 1
+ };
+
+ /** @deprecated set on Device creation. */
+ static void SetSubpixelOrientation(LCDOrientation orientation);
+ /** @deprecated get from Device. */
+ static LCDOrientation GetSubpixelOrientation();
+
+ /** LCD color elements can vary in order. For subpixel text we need to know
+ the order which the LCDs uses so that the color fringes are in the
+ correct place.
+
+ Note, if you change this after startup, you'll need to flush the glyph
+ cache because it'll have the wrong type of masks cached.
+
+ kNONE_LCDOrder means that the subpixel elements are not spatially
+ separated in any usable fashion.
+
+ @deprecated use SkPixelGeometry instead.
+ */
+ enum LCDOrder {
+ kRGB_LCDOrder = 0, //!< this is the default
+ kBGR_LCDOrder = 1,
+ kNONE_LCDOrder = 2
+ };
+
+ /** @deprecated set on Device creation. */
+ static void SetSubpixelOrder(LCDOrder order);
+ /** @deprecated get from Device. */
+ static LCDOrder GetSubpixelOrder();
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFontStyle.h b/gfx/skia/skia/include/core/SkFontStyle.h
new file mode 100644
index 000000000..7dd25910a
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFontStyle.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontStyle_DEFINED
+#define SkFontStyle_DEFINED
+
+#include "SkTypes.h"
+
+class SK_API SkFontStyle {
+public:
+ enum Weight {
+ kInvisible_Weight = 0,
+ kThin_Weight = 100,
+ kExtraLight_Weight = 200,
+ kLight_Weight = 300,
+ kNormal_Weight = 400,
+ kMedium_Weight = 500,
+ kSemiBold_Weight = 600,
+ kBold_Weight = 700,
+ kExtraBold_Weight = 800,
+ kBlack_Weight = 900,
+ kExtraBlack_Weight = 1000,
+ };
+
+ enum Width {
+ kUltraCondensed_Width = 1,
+ kExtraCondensed_Width = 2,
+ kCondensed_Width = 3,
+ kSemiCondensed_Width = 4,
+ kNormal_Width = 5,
+ kSemiExpanded_Width = 6,
+ kExpanded_Width = 7,
+ kExtraExpanded_Width = 8,
+ kUltraExpanded_Width = 9,
+ };
+
+ enum Slant {
+ kUpright_Slant,
+ kItalic_Slant,
+ kOblique_Slant,
+ };
+
+ SkFontStyle();
+ SkFontStyle(int weight, int width, Slant);
+
+ static SkFontStyle FromOldStyle(unsigned oldStyle);
+
+ bool operator==(const SkFontStyle& rhs) const {
+ return fUnion.fU32 == rhs.fUnion.fU32;
+ }
+
+ int weight() const { return fUnion.fR.fWeight; }
+ int width() const { return fUnion.fR.fWidth; }
+ Slant slant() const { return (Slant)fUnion.fR.fSlant; }
+
+private:
+ union {
+ struct {
+ uint16_t fWeight; // 100 .. 900
+ uint8_t fWidth; // 1 .. 9
+ uint8_t fSlant; // 0 .. 2
+ } fR;
+ uint32_t fU32;
+ } fUnion;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkGraphics.h b/gfx/skia/skia/include/core/SkGraphics.h
new file mode 100644
index 000000000..d5a730d9e
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkGraphics.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGraphics_DEFINED
+#define SkGraphics_DEFINED
+
+#include "SkTypes.h"
+
+class SkData;
+class SkImageGenerator;
+class SkTraceMemoryDump;
+
+class SK_API SkGraphics {
+public:
+ /**
+ * Call this at process initialization time if your environment does not
+ * permit static global initializers that execute code.
+ * Init() is thread-safe and idempotent.
+ */
+ static void Init();
+
+ // We're in the middle of cleaning this up.
+ static void Term() {}
+
+ /**
+ * Return the version numbers for the library. If the parameter is not
+ * null, it is set to the version number.
+ */
+ static void GetVersion(int32_t* major, int32_t* minor, int32_t* patch);
+
+ /**
+ * Return the max number of bytes that should be used by the font cache.
+ * If the cache needs to allocate more, it will purge previous entries.
+ * This max can be changed by calling SetFontCacheLimit().
+ */
+ static size_t GetFontCacheLimit();
+
+ /**
+ * Specify the max number of bytes that should be used by the font cache.
+ * If the cache needs to allocate more, it will purge previous entries.
+ *
+ * This function returns the previous setting, as if GetFontCacheLimit()
+ * had be called before the new limit was set.
+ */
+ static size_t SetFontCacheLimit(size_t bytes);
+
+ /**
+ * Return the number of bytes currently used by the font cache.
+ */
+ static size_t GetFontCacheUsed();
+
+ /**
+ * Return the number of entries in the font cache.
+ * A cache "entry" is associated with each typeface + pointSize + matrix.
+ */
+ static int GetFontCacheCountUsed();
+
+ /**
+ * Return the current limit to the number of entries in the font cache.
+ * A cache "entry" is associated with each typeface + pointSize + matrix.
+ */
+ static int GetFontCacheCountLimit();
+
+ /**
+ * Set the limit to the number of entries in the font cache, and return
+ * the previous value. If this new value is lower than the previous,
+ * it will automatically try to purge entries to meet the new limit.
+ */
+ static int SetFontCacheCountLimit(int count);
+
+ /**
+ * For debugging purposes, this will attempt to purge the font cache. It
+ * does not change the limit, but will cause subsequent font measures and
+ * draws to be recreated, since they will no longer be in the cache.
+ */
+ static void PurgeFontCache();
+
+ /**
+ * Scaling bitmaps with the kHigh_SkFilterQuality setting is
+ * expensive, so the result is saved in the global Scaled Image
+ * Cache.
+ *
+ * This function returns the memory usage of the Scaled Image Cache.
+ */
+ static size_t GetResourceCacheTotalBytesUsed();
+
+ /**
+ * These functions get/set the memory usage limit for the resource cache, used for temporary
+ * bitmaps and other resources. Entries are purged from the cache when the memory useage
+ * exceeds this limit.
+ */
+ static size_t GetResourceCacheTotalByteLimit();
+ static size_t SetResourceCacheTotalByteLimit(size_t newLimit);
+
+ /**
+ * For debugging purposes, this will attempt to purge the resource cache. It
+ * does not change the limit.
+ */
+ static void PurgeResourceCache();
+
+ /**
+ * When the cachable entry is very lage (e.g. a large scaled bitmap), adding it to the cache
+ * can cause most/all of the existing entries to be purged. To avoid the, the client can set
+ * a limit for a single allocation. If a cacheable entry would have been cached, but its size
+ * exceeds this limit, then we do not attempt to cache it at all.
+ *
+ * Zero is the default value, meaning we always attempt to cache entries.
+ */
+ static size_t GetResourceCacheSingleAllocationByteLimit();
+ static size_t SetResourceCacheSingleAllocationByteLimit(size_t newLimit);
+
+ /**
+ * Dumps memory usage of caches using the SkTraceMemoryDump interface. See SkTraceMemoryDump
+ * for usage of this method.
+ */
+ static void DumpMemoryStatistics(SkTraceMemoryDump* dump);
+
+ /**
+ * Free as much globally cached memory as possible. This will purge all private caches in Skia,
+ * including font and image caches.
+ *
+ * If there are caches associated with GPU context, those will not be affected by this call.
+ */
+ static void PurgeAllCaches();
+
+ /**
+ * Applications with command line options may pass optional state, such
+ * as cache sizes, here, for instance:
+ * font-cache-limit=12345678
+ *
+ * The flags format is name=value[;name=value...] with no spaces.
+ * This format is subject to change.
+ */
+ static void SetFlags(const char* flags);
+
+ /**
+ * Return the max number of bytes that should be used by the thread-local
+ * font cache.
+ * If the cache needs to allocate more, it will purge previous entries.
+ * This max can be changed by calling SetFontCacheLimit().
+ *
+ * If this thread has never called SetTLSFontCacheLimit, or has called it
+ * with 0, then this thread is using the shared font cache. In that case,
+ * this function will always return 0, and the caller may want to call
+ * GetFontCacheLimit.
+ */
+ static size_t GetTLSFontCacheLimit();
+
+ /**
+ * Specify the max number of bytes that should be used by the thread-local
+ * font cache. If this value is 0, then this thread will use the shared
+ * global font cache.
+ */
+ static void SetTLSFontCacheLimit(size_t bytes);
+
+ typedef SkImageGenerator* (*ImageGeneratorFromEncodedFactory)(SkData*);
+
+ /**
+ * To instantiate images from encoded data, first looks at this runtime function-ptr. If it
+ * exists, it is called to create an SkImageGenerator from SkData. If there is no function-ptr
+ * or there is, but it returns NULL, then skia will call its internal default implementation.
+ *
+ * Returns the previous factory (which could be NULL).
+ */
+ static ImageGeneratorFromEncodedFactory
+ SetImageGeneratorFromEncodedFactory(ImageGeneratorFromEncodedFactory);
+};
+
+class SkAutoGraphics {
+public:
+ SkAutoGraphics() {
+ SkGraphics::Init();
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkImage.h b/gfx/skia/skia/include/core/SkImage.h
new file mode 100644
index 000000000..f55b679c0
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkImage.h
@@ -0,0 +1,483 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImage_DEFINED
+#define SkImage_DEFINED
+
+#include "SkFilterQuality.h"
+#include "SkImageInfo.h"
+#include "SkImageEncoder.h"
+#include "SkRefCnt.h"
+#include "SkScalar.h"
+#include "SkShader.h"
+
+class SkData;
+class SkCanvas;
+class SkColorTable;
+class SkImageGenerator;
+class SkPaint;
+class SkPicture;
+class SkPixelSerializer;
+class SkString;
+class SkSurface;
+class GrContext;
+class GrContextThreadSafeProxy;
+class GrTexture;
+
+/**
+ * SkImage is an abstraction for drawing a rectagle of pixels, though the
+ * particular type of image could be actually storing its data on the GPU, or
+ * as drawing commands (picture or PDF or otherwise), ready to be played back
+ * into another canvas.
+ *
+ * The content of SkImage is always immutable, though the actual storage may
+ * change, if for example that image can be re-created via encoded data or
+ * other means.
+ *
+ * SkImage always has a non-zero dimensions. If there is a request to create a new image, either
+ * directly or via SkSurface, and either of the requested dimensions are zero, then NULL will be
+ * returned.
+ */
+class SK_API SkImage : public SkRefCnt {
+public:
+ typedef SkImageInfo Info;
+ typedef void* ReleaseContext;
+
+ static sk_sp<SkImage> MakeRasterCopy(const SkPixmap&);
+ static sk_sp<SkImage> MakeRasterData(const Info&, sk_sp<SkData> pixels, size_t rowBytes);
+
+ typedef void (*RasterReleaseProc)(const void* pixels, ReleaseContext);
+
+ /**
+ * Return a new Image referencing the specified pixels. These must remain valid and unchanged
+ * until the specified release-proc is called, indicating that Skia no longer has a reference
+ * to the pixels.
+ *
+ * Returns NULL if the requested pixmap info is unsupported.
+ */
+ static sk_sp<SkImage> MakeFromRaster(const SkPixmap&, RasterReleaseProc, ReleaseContext);
+
+ /**
+ * Construct a new image from the specified bitmap. If the bitmap is marked immutable, and
+ * its pixel memory is shareable, it may be shared instead of copied.
+ */
+ static sk_sp<SkImage> MakeFromBitmap(const SkBitmap&);
+
+ /**
+ * Construct a new SkImage based on the given ImageGenerator. Returns NULL on error.
+ * This function will always take ownership of the passed generator.
+ *
+ * If a subset is specified, it must be contained within the generator's bounds.
+ */
+ static sk_sp<SkImage> MakeFromGenerator(SkImageGenerator*, const SkIRect* subset = nullptr);
+
+ /**
+ * Construct a new SkImage based on the specified encoded data. Returns NULL on failure,
+ * which can mean that the format of the encoded data was not recognized/supported.
+ *
+ * If a subset is specified, it must be contained within the encoded data's bounds.
+ */
+ static sk_sp<SkImage> MakeFromEncoded(sk_sp<SkData> encoded, const SkIRect* subset = nullptr);
+
+ /**
+ * Create a new image from the specified descriptor. Note - the caller is responsible for
+ * managing the lifetime of the underlying platform texture.
+ *
+ * Will return NULL if the specified descriptor is unsupported.
+ */
+ static sk_sp<SkImage> MakeFromTexture(GrContext* ctx, const GrBackendTextureDesc& desc) {
+ return MakeFromTexture(ctx, desc, kPremul_SkAlphaType, nullptr, nullptr, nullptr);
+ }
+
+ static sk_sp<SkImage> MakeFromTexture(GrContext* ctx, const GrBackendTextureDesc& de,
+ SkAlphaType at) {
+ return MakeFromTexture(ctx, de, at, nullptr, nullptr, nullptr);
+ }
+
+ typedef void (*TextureReleaseProc)(ReleaseContext);
+
+ /**
+ * Create a new image from the specified descriptor. The underlying platform texture must stay
+ * valid and unaltered until the specified release-proc is invoked, indicating that Skia
+ * no longer is holding a reference to it.
+ *
+ * Will return NULL if the specified descriptor is unsupported.
+ */
+ static sk_sp<SkImage> MakeFromTexture(GrContext* ctx, const GrBackendTextureDesc& desc,
+ SkAlphaType at, TextureReleaseProc trp,
+ ReleaseContext rc) {
+ return MakeFromTexture(ctx, desc, at, nullptr, trp, rc);
+ }
+
+ /**
+ * Create a new image from the specified descriptor. The underlying platform texture must stay
+ * valid and unaltered until the specified release-proc is invoked, indicating that Skia
+ * no longer is holding a reference to it.
+ *
+ * Will return NULL if the specified descriptor is unsupported.
+ */
+ static sk_sp<SkImage> MakeFromTexture(GrContext*, const GrBackendTextureDesc&, SkAlphaType,
+ sk_sp<SkColorSpace>, TextureReleaseProc, ReleaseContext);
+
+ /**
+ * Create a new image from the specified descriptor. Note - Skia will delete or recycle the
+ * texture when the image is released.
+ *
+ * Will return NULL if the specified descriptor is unsupported.
+ */
+ static sk_sp<SkImage> MakeFromAdoptedTexture(GrContext*, const GrBackendTextureDesc&,
+ SkAlphaType = kPremul_SkAlphaType,
+ sk_sp<SkColorSpace> = nullptr);
+
+ /**
+ * Create a new image by copying the pixels from the specified y, u, v textures. The data
+ * from the textures is immediately ingested into the image and the textures can be modified or
+ * deleted after the function returns. The image will have the dimensions of the y texture.
+ */
+ static sk_sp<SkImage> MakeFromYUVTexturesCopy(GrContext*, SkYUVColorSpace,
+ const GrBackendObject yuvTextureHandles[3],
+ const SkISize yuvSizes[3],
+ GrSurfaceOrigin,
+ sk_sp<SkColorSpace> = nullptr);
+
+ /**
+ * Create a new image by copying the pixels from the specified y and uv textures. The data
+ * from the textures is immediately ingested into the image and the textures can be modified or
+ * deleted after the function returns. The image will have the dimensions of the y texture.
+ */
+ static sk_sp<SkImage> MakeFromNV12TexturesCopy(GrContext*, SkYUVColorSpace,
+ const GrBackendObject nv12TextureHandles[2],
+ const SkISize nv12Sizes[2], GrSurfaceOrigin,
+ sk_sp<SkColorSpace> = nullptr);
+
+ static sk_sp<SkImage> MakeFromPicture(sk_sp<SkPicture>, const SkISize& dimensions,
+ const SkMatrix*, const SkPaint*);
+
+ static sk_sp<SkImage> MakeTextureFromPixmap(GrContext*, const SkPixmap&, SkBudgeted budgeted);
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+ SkISize dimensions() const { return SkISize::Make(fWidth, fHeight); }
+ SkIRect bounds() const { return SkIRect::MakeWH(fWidth, fHeight); }
+ uint32_t uniqueID() const { return fUniqueID; }
+ SkAlphaType alphaType() const;
+ bool isOpaque() const { return SkAlphaTypeIsOpaque(this->alphaType()); }
+
+ /**
+ * Extracts YUV planes from the SkImage and stores them in client-provided memory. The sizes
+ * planes and rowBytes arrays are ordered [y, u, v].
+ */
+ bool readYUV8Planes(const SkISize[3], void* const planes[3], const size_t rowBytes[3],
+ SkYUVColorSpace) const;
+
+#ifdef SK_SUPPORT_LEGACY_CREATESHADER_PTR
+ SkShader* newShader(SkShader::TileMode, SkShader::TileMode,
+ const SkMatrix* localMatrix = nullptr) const;
+#endif
+
+ sk_sp<SkShader> makeShader(SkShader::TileMode, SkShader::TileMode,
+ const SkMatrix* localMatrix = nullptr) const;
+
+ /**
+ * If the image has direct access to its pixels (i.e. they are in local RAM)
+ * return true, and if not null, return in the pixmap parameter the info about the
+ * images pixels.
+ *
+ * On failure, return false and ignore the pixmap parameter.
+ */
+ bool peekPixels(SkPixmap* pixmap) const;
+
+#ifdef SK_SUPPORT_LEGACY_PEEKPIXELS_PARMS
+ /**
+ * If the image has direct access to its pixels (i.e. they are in local
+ * RAM) return the (const) address of those pixels, and if not null, return
+ * the ImageInfo and rowBytes. The returned address is only valid while
+ * the image object is in scope.
+ *
+ * On failure, returns NULL and the info and rowBytes parameters are
+ * ignored.
+ *
+ * DEPRECATED -- use the SkPixmap variant instead
+ */
+ const void* peekPixels(SkImageInfo* info, size_t* rowBytes) const;
+#endif
+
+ /**
+ * Some images have to perform preliminary work in preparation for drawing. This can be
+ * decoding, uploading to a GPU, or other tasks. These happen automatically when an image
+ * is drawn, and often they are cached so that the cost is only paid the first time.
+ *
+ * Preroll() can be called before drawing to try to perform this prepatory work ahead of time.
+ * For images that have no such work, this returns instantly. Others may do some thing to
+ * prepare their cache and then return.
+ *
+ * If the image will drawn to a GPU-backed canvas or surface, pass the associated GrContext.
+ * If the image will be drawn to any other type of canvas or surface, pass null.
+ */
+ void preroll(GrContext* = nullptr) const;
+
+ // DEPRECATED - currently used by Canvas2DLayerBridge in Chromium.
+ GrTexture* getTexture() const;
+
+ /**
+ * Returns true if the image is texture backed.
+ */
+ bool isTextureBacked() const;
+
+ /**
+ * Retrieves the backend API handle of the texture. If flushPendingGrContextIO then the
+ * GrContext will issue to the backend API any deferred IO operations on the texture before
+ * returning.
+ */
+ GrBackendObject getTextureHandle(bool flushPendingGrContextIO) const;
+
+ /**
+ * Hints to image calls where the system might cache computed intermediates (e.g. the results
+ * of decoding or a read-back from the GPU. Passing kAllow signals that the system's default
+ * behavior is fine. Passing kDisallow signals that caching should be avoided.
+ */
+ enum CachingHint {
+ kAllow_CachingHint,
+ kDisallow_CachingHint,
+ };
+
+ /**
+ * Copy the pixels from the image into the specified buffer (pixels + rowBytes),
+ * converting them into the requested format (dstInfo). The image pixels are read
+ * starting at the specified (srcX,srcY) location.
+ *
+ * The specified ImageInfo and (srcX,srcY) offset specifies a source rectangle
+ *
+ * srcR.setXYWH(srcX, srcY, dstInfo.width(), dstInfo.height());
+ *
+ * srcR is intersected with the bounds of the image. If this intersection is not empty,
+ * then we have two sets of pixels (of equal size). Replace the dst pixels with the
+ * corresponding src pixels, performing any colortype/alphatype transformations needed
+ * (in the case where the src and dst have different colortypes or alphatypes).
+ *
+ * This call can fail, returning false, for several reasons:
+ * - If srcR does not intersect the image bounds.
+ * - If the requested colortype/alphatype cannot be converted from the image's types.
+ */
+ bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY, CachingHint = kAllow_CachingHint) const;
+
+ bool readPixels(const SkPixmap& dst, int srcX, int srcY,
+ CachingHint = kAllow_CachingHint) const;
+
+ /**
+ * Copy the pixels from this image into the dst pixmap, converting as needed into dst's
+ * colortype/alphatype. If the conversion cannot be performed, false is returned.
+ *
+ * If dst's dimensions differ from the src dimension, the image will be scaled, applying the
+ * specified filter-quality.
+ */
+ bool scalePixels(const SkPixmap& dst, SkFilterQuality, CachingHint = kAllow_CachingHint) const;
+
+ /**
+ * Encode the image's pixels and return the result as a new SkData, which
+ * the caller must manage (i.e. call unref() when they are done).
+ *
+ * If the image type cannot be encoded, or the requested encoder type is
+ * not supported, this will return NULL.
+ *
+ * Note: this will attempt to encode the image's pixels in the specified format,
+ * even if the image returns a data from refEncoded(). That data will be ignored.
+ */
+ SkData* encode(SkImageEncoder::Type, int quality) const;
+
+ /**
+ * Encode the image and return the result as a caller-managed SkData. This will
+ * attempt to reuse existing encoded data (as returned by refEncoded).
+ *
+ * We defer to the SkPixelSerializer both for vetting existing encoded data
+ * (useEncodedData) and for encoding the image (encode) when no such data is
+ * present or is rejected by the serializer.
+ *
+ * If not specified, we use a default serializer which 1) always accepts existing data
+ * (in any format) and 2) encodes to PNG.
+ *
+ * If no compatible encoded data exists and encoding fails, this method will also
+ * fail (return NULL).
+ */
+ SkData* encode(SkPixelSerializer* = nullptr) const;
+
+ /**
+ * If the image already has its contents in encoded form (e.g. PNG or JPEG), return a ref
+ * to that data (which the caller must call unref() on). The caller is responsible for calling
+ * unref on the data when they are done.
+ *
+ * If the image does not already has its contents in encoded form, return NULL.
+ *
+ * Note: to force the image to return its contents as encoded data, try calling encode(...).
+ */
+ SkData* refEncoded() const;
+
+ const char* toString(SkString*) const;
+
+ /**
+ * Return a new image that is a subset of this image. The underlying implementation may
+ * share the pixels, or it may make a copy.
+ *
+ * If subset does not intersect the bounds of this image, or the copy/share cannot be made,
+ * NULL will be returned.
+ */
+ sk_sp<SkImage> makeSubset(const SkIRect& subset) const;
+
+ /**
+ * Ensures that an image is backed by a texture (when GrContext is non-null). If no
+ * transformation is required, the returned image may be the same as this image. If the this
+ * image is from a different GrContext, this will fail.
+ */
+ sk_sp<SkImage> makeTextureImage(GrContext*) const;
+
+ /**
+ * If the image is texture-backed this will make a raster copy of it (or nullptr if reading back
+ * the pixels fails). Otherwise, it returns the original image.
+ */
+ sk_sp<SkImage> makeNonTextureImage() const;
+ /**
+ * Apply a given image filter to this image, and return the filtered result.
+ *
+ * The subset represents the active portion of this image. The return value is similarly an
+ * SkImage, with an active subset (outSubset). This is usually used with texture-backed
+ * images, where the texture may be approx-match and thus larger than the required size.
+ *
+ * clipBounds constrains the device-space extent of the image which may be produced to the
+ * given rect.
+ *
+ * offset is the amount to translate the resulting image relative to the src when it is drawn.
+ * This is an out-param.
+ *
+ * If the result image cannot be created, or the result would be transparent black, null
+ * is returned, in which case the offset and outSubset parameters should be ignored by the
+ * caller.
+ */
+ sk_sp<SkImage> makeWithFilter(const SkImageFilter* filter, const SkIRect& subset,
+ const SkIRect& clipBounds, SkIRect* outSubset,
+ SkIPoint* offset) const;
+
+ /** Drawing params for which a deferred texture image data should be optimized. */
+ struct DeferredTextureImageUsageParams {
+ DeferredTextureImageUsageParams(const SkMatrix matrix, const SkFilterQuality quality,
+ int preScaleMipLevel)
+ : fMatrix(matrix), fQuality(quality), fPreScaleMipLevel(preScaleMipLevel) {}
+ SkMatrix fMatrix;
+ SkFilterQuality fQuality;
+ int fPreScaleMipLevel;
+ };
+
+ /**
+ * This method allows clients to capture the data necessary to turn a SkImage into a texture-
+ * backed image. If the original image is codec-backed this will decode into a format optimized
+ * for the context represented by the proxy. This method is thread safe with respect to the
+ * GrContext whence the proxy came. Clients allocate and manage the storage of the deferred
+ * texture data and control its lifetime. No cleanup is required, thus it is safe to simply free
+ * the memory out from under the data.
+ *
+ * The same method is used both for getting the size necessary for pre-uploaded texture data
+ * and for retrieving the data. The params array represents the set of draws over which to
+ * optimize the pre-upload data.
+ *
+ * When called with a null buffer this returns the size that the client must allocate in order
+ * to create deferred texture data for this image (or zero if this is an inappropriate
+ * candidate). The buffer allocated by the client should be 8 byte aligned.
+ *
+ * When buffer is not null this fills in the deferred texture data for this image in the
+ * provided buffer (assuming this is an appropriate candidate image and the buffer is
+ * appropriately aligned). Upon success the size written is returned, otherwise 0.
+ */
+ size_t getDeferredTextureImageData(const GrContextThreadSafeProxy&,
+ const DeferredTextureImageUsageParams[],
+ int paramCnt,
+ void* buffer,
+ SkSourceGammaTreatment treatment =
+ SkSourceGammaTreatment::kIgnore) const;
+
+ /**
+ * Returns a texture-backed image from data produced in SkImage::getDeferredTextureImageData.
+ * The context must be the context that provided the proxy passed to
+ * getDeferredTextureImageData.
+ */
+ static sk_sp<SkImage> MakeFromDeferredTextureImageData(GrContext*, const void*, SkBudgeted);
+
+ // Helper functions to convert to SkBitmap
+
+ enum LegacyBitmapMode {
+ kRO_LegacyBitmapMode,
+ kRW_LegacyBitmapMode,
+ };
+
+ /**
+ * Attempt to create a bitmap with the same pixels as the image. The result will always be
+ * a raster-backed bitmap (texture-backed bitmaps are DEPRECATED, and not supported here).
+ *
+ * If the mode is kRO (read-only), the resulting bitmap will be marked as immutable.
+ *
+ * On succcess, returns true. On failure, returns false and the bitmap parameter will be reset
+ * to empty.
+ */
+ bool asLegacyBitmap(SkBitmap*, LegacyBitmapMode) const;
+
+ /**
+ * Returns true if the image is backed by an image-generator or other src that creates
+ * (and caches) its pixels / texture on-demand.
+ */
+ bool isLazyGenerated() const;
+
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFACTORY
+ static SkImage* NewRasterCopy(const Info&, const void* pixels, size_t rowBytes,
+ SkColorTable* ctable = nullptr);
+ static SkImage* NewRasterData(const Info&, SkData* pixels, size_t rowBytes);
+ static SkImage* NewFromRaster(const Info&, const void* pixels, size_t rowBytes,
+ RasterReleaseProc, ReleaseContext);
+ static SkImage* NewFromBitmap(const SkBitmap&);
+ static SkImage* NewFromGenerator(SkImageGenerator*, const SkIRect* subset = nullptr);
+ static SkImage* NewFromEncoded(SkData* encoded, const SkIRect* subset = nullptr);
+ static SkImage* NewFromTexture(GrContext* ctx, const GrBackendTextureDesc& desc) {
+ return NewFromTexture(ctx, desc, kPremul_SkAlphaType, nullptr, nullptr);
+ }
+
+ static SkImage* NewFromTexture(GrContext* ctx, const GrBackendTextureDesc& de, SkAlphaType at) {
+ return NewFromTexture(ctx, de, at, nullptr, nullptr);
+ }
+ static SkImage* NewFromTexture(GrContext*, const GrBackendTextureDesc&, SkAlphaType,
+ TextureReleaseProc, ReleaseContext);
+ static SkImage* NewFromAdoptedTexture(GrContext*, const GrBackendTextureDesc&,
+ SkAlphaType = kPremul_SkAlphaType);
+ static SkImage* NewFromYUVTexturesCopy(GrContext*, SkYUVColorSpace,
+ const GrBackendObject yuvTextureHandles[3],
+ const SkISize yuvSizes[3],
+ GrSurfaceOrigin);
+ static SkImage* NewFromPicture(const SkPicture*, const SkISize& dimensions,
+ const SkMatrix*, const SkPaint*);
+ static SkImage* NewTextureFromPixmap(GrContext*, const SkPixmap&, SkBudgeted budgeted);
+ static SkImage* NewFromDeferredTextureImageData(GrContext*, const void*, SkBudgeted);
+
+ SkImage* newSubset(const SkIRect& subset) const { return this->makeSubset(subset).release(); }
+ SkImage* newTextureImage(GrContext* ctx) const { return this->makeTextureImage(ctx).release(); }
+#endif
+
+protected:
+ SkImage(int width, int height, uint32_t uniqueID);
+
+private:
+ static sk_sp<SkImage> MakeTextureFromMipMap(GrContext*, const SkImageInfo&,
+ const GrMipLevel* texels, int mipLevelCount,
+ SkBudgeted, SkSourceGammaTreatment);
+
+ const int fWidth;
+ const int fHeight;
+ const uint32_t fUniqueID;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkImageDeserializer.h b/gfx/skia/skia/include/core/SkImageDeserializer.h
new file mode 100644
index 000000000..ba1422647
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkImageDeserializer.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageDeserializer_DEFINED
+#define SkImageDeserializer_DEFINED
+
+#include "SkRefCnt.h"
+
+struct SkIRect;
+class SkData;
+class SkImage;
+
+class SK_API SkImageDeserializer {
+public:
+ virtual ~SkImageDeserializer() {}
+
+ /**
+ * Given a data containing serialized content, return an SkImage from it.
+ *
+ * @param data The data containing the encoded image. The subclass may ref this for later
+ * decoding, or read it and process it immediately.
+ * @param subset Optional rectangle represent the subset of the encoded data that is being
+ * requested to be turned into an image.
+ * @return The new image, or nullptr on failure.
+ *
+ * The default implementation is to call SkImage::MakeFromEncoded(...)
+ */
+ virtual sk_sp<SkImage> makeFromData(SkData*, const SkIRect* subset);
+ virtual sk_sp<SkImage> makeFromMemory(const void* data, size_t length, const SkIRect* subset);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkImageEncoder.h b/gfx/skia/skia/include/core/SkImageEncoder.h
new file mode 100644
index 000000000..7d1525091
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkImageEncoder.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageEncoder_DEFINED
+#define SkImageEncoder_DEFINED
+
+#include "SkImageInfo.h"
+#include "SkTRegistry.h"
+
+class SkBitmap;
+class SkPixelSerializer;
+class SkPixmap;
+class SkData;
+class SkWStream;
+
+class SkImageEncoder {
+public:
+ // TODO (scroggo): Merge with SkEncodedFormat.
+ enum Type {
+ kUnknown_Type,
+ kBMP_Type,
+ kGIF_Type,
+ kICO_Type,
+ kJPEG_Type,
+ kPNG_Type,
+ kWBMP_Type,
+ kWEBP_Type,
+ kKTX_Type,
+ };
+ static SkImageEncoder* Create(Type);
+
+ virtual ~SkImageEncoder();
+
+ /* Quality ranges from 0..100 */
+ enum {
+ kDefaultQuality = 80
+ };
+
+ /**
+ * Encode bitmap 'bm', returning the results in an SkData, at quality level
+ * 'quality' (which can be in range 0-100). If the bitmap cannot be
+ * encoded, return null. On success, the caller is responsible for
+ * calling unref() on the data when they are finished.
+ */
+ SkData* encodeData(const SkBitmap&, int quality);
+
+ /**
+ * Encode bitmap 'bm' in the desired format, writing results to
+ * file 'file', at quality level 'quality' (which can be in range
+ * 0-100). Returns false on failure.
+ */
+ bool encodeFile(const char file[], const SkBitmap& bm, int quality);
+
+ /**
+ * Encode bitmap 'bm' in the desired format, writing results to
+ * stream 'stream', at quality level 'quality' (which can be in
+ * range 0-100). Returns false on failure.
+ */
+ bool encodeStream(SkWStream* stream, const SkBitmap& bm, int quality);
+
+ static SkData* EncodeData(const SkImageInfo&, const void* pixels, size_t rowBytes,
+ Type, int quality);
+ static SkData* EncodeData(const SkBitmap&, Type, int quality);
+
+ static SkData* EncodeData(const SkPixmap&, Type, int quality);
+
+ static bool EncodeFile(const char file[], const SkBitmap&, Type,
+ int quality);
+ static bool EncodeStream(SkWStream*, const SkBitmap&, Type,
+ int quality);
+
+ /** Uses SkImageEncoder to serialize images that are not already
+ encoded as SkImageEncoder::kPNG_Type images. */
+ static SkPixelSerializer* CreatePixelSerializer();
+
+protected:
+ /**
+ * Encode bitmap 'bm' in the desired format, writing results to
+ * stream 'stream', at quality level 'quality' (which can be in
+ * range 0-100).
+ *
+ * This must be overridden by each SkImageEncoder implementation.
+ */
+ virtual bool onEncode(SkWStream* stream, const SkBitmap& bm, int quality) = 0;
+};
+
+// This macro declares a global (i.e., non-class owned) creation entry point
+// for each encoder (e.g., CreateJPEGImageEncoder)
+#define DECLARE_ENCODER_CREATOR(codec) \
+ SK_API SkImageEncoder *Create ## codec ();
+
+// This macro defines the global creation entry point for each encoder. Each
+// encoder implementation that registers with the encoder factory must call it.
+#define DEFINE_ENCODER_CREATOR(codec) \
+ SkImageEncoder* Create##codec() { return new Sk##codec; }
+
+// All the encoders known by Skia. Note that, depending on the compiler settings,
+// not all of these will be available
+DECLARE_ENCODER_CREATOR(JPEGImageEncoder);
+DECLARE_ENCODER_CREATOR(PNGImageEncoder);
+DECLARE_ENCODER_CREATOR(KTXImageEncoder);
+DECLARE_ENCODER_CREATOR(WEBPImageEncoder);
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+SkImageEncoder* CreateImageEncoder_CG(SkImageEncoder::Type type);
+#endif
+
+#if defined(SK_BUILD_FOR_WIN)
+SkImageEncoder* CreateImageEncoder_WIC(SkImageEncoder::Type type);
+#endif
+
+// Typedef to make registering encoder callback easier
+// This has to be defined outside SkImageEncoder. :(
+typedef SkTRegistry<SkImageEncoder*(*)(SkImageEncoder::Type)> SkImageEncoder_EncodeReg;
+#endif
diff --git a/gfx/skia/skia/include/core/SkImageFilter.h b/gfx/skia/skia/include/core/SkImageFilter.h
new file mode 100644
index 000000000..9188a89e2
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkImageFilter.h
@@ -0,0 +1,433 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageFilter_DEFINED
+#define SkImageFilter_DEFINED
+
+#include "../private/SkTArray.h"
+#include "../private/SkTemplates.h"
+#include "../private/SkMutex.h"
+#include "SkColorSpace.h"
+#include "SkFilterQuality.h"
+#include "SkFlattenable.h"
+#include "SkMatrix.h"
+#include "SkRect.h"
+
+class GrContext;
+class GrFragmentProcessor;
+class SkColorFilter;
+struct SkIPoint;
+class SkSpecialImage;
+class SkImageFilterCache;
+struct SkImageFilterCacheKey;
+
+/**
+ * Base class for image filters. If one is installed in the paint, then
+ * all drawing occurs as usual, but it is as if the drawing happened into an
+ * offscreen (before the xfermode is applied). This offscreen bitmap will
+ * then be handed to the imagefilter, who in turn creates a new bitmap which
+ * is what will finally be drawn to the device (using the original xfermode).
+ */
+class SK_API SkImageFilter : public SkFlattenable {
+public:
+ // Extra information about the output of a filter DAG. For now, this is just the color space
+ // (of the original requesting device). This is used when constructing intermediate rendering
+ // surfaces, so that we ensure we land in a surface that's similar/compatible to the final
+ // consumer of the DAG's output.
+ class OutputProperties {
+ public:
+ explicit OutputProperties(SkColorSpace* colorSpace) : fColorSpace(colorSpace) {}
+
+ SkColorSpace* colorSpace() const { return fColorSpace; }
+
+ private:
+ // This will be a pointer to the device's color space, and our lifetime is bounded by
+ // the device, so we can store a bare pointer.
+ SkColorSpace* fColorSpace;
+ };
+
+ class Context {
+ public:
+ Context(const SkMatrix& ctm, const SkIRect& clipBounds, SkImageFilterCache* cache,
+ const OutputProperties& outputProperties)
+ : fCTM(ctm)
+ , fClipBounds(clipBounds)
+ , fCache(cache)
+ , fOutputProperties(outputProperties)
+ {}
+
+ const SkMatrix& ctm() const { return fCTM; }
+ const SkIRect& clipBounds() const { return fClipBounds; }
+ SkImageFilterCache* cache() const { return fCache; }
+ const OutputProperties& outputProperties() const { return fOutputProperties; }
+
+ private:
+ SkMatrix fCTM;
+ SkIRect fClipBounds;
+ SkImageFilterCache* fCache;
+ OutputProperties fOutputProperties;
+ };
+
+ class CropRect {
+ public:
+ enum CropEdge {
+ kHasLeft_CropEdge = 0x01,
+ kHasTop_CropEdge = 0x02,
+ kHasWidth_CropEdge = 0x04,
+ kHasHeight_CropEdge = 0x08,
+ kHasAll_CropEdge = 0x0F,
+ };
+ CropRect() {}
+ explicit CropRect(const SkRect& rect, uint32_t flags = kHasAll_CropEdge)
+ : fRect(rect), fFlags(flags) {}
+ uint32_t flags() const { return fFlags; }
+ const SkRect& rect() const { return fRect; }
+#ifndef SK_IGNORE_TO_STRING
+ void toString(SkString* str) const;
+#endif
+
+ /**
+ * Apply this cropRect to the imageBounds. If a given edge of the cropRect is not
+ * set, then the corresponding edge from imageBounds will be used. If "embiggen"
+ * is true, the crop rect is allowed to enlarge the size of the rect, otherwise
+ * it may only reduce the rect. Filters that can affect transparent black should
+ * pass "true", while all other filters should pass "false".
+ *
+ * Note: imageBounds is in "device" space, as the output cropped rectangle will be,
+ * so the matrix is ignored for those. It is only applied the croprect's bounds.
+ */
+ void applyTo(const SkIRect& imageBounds, const SkMatrix&, bool embiggen,
+ SkIRect* cropped) const;
+
+ private:
+ SkRect fRect;
+ uint32_t fFlags;
+ };
+
+ enum TileUsage {
+ kPossible_TileUsage, //!< the created device may be drawn tiled
+ kNever_TileUsage, //!< the created device will never be drawn tiled
+ };
+
+ /**
+ * Request a new filtered image to be created from the src image.
+ *
+ * The context contains the environment in which the filter is occurring.
+ * It includes the clip bounds, CTM and cache.
+ *
+ * Offset is the amount to translate the resulting image relative to the
+ * src when it is drawn. This is an out-param.
+ *
+ * If the result image cannot be created, or the result would be
+ * transparent black, return null, in which case the offset parameter
+ * should be ignored by the caller.
+ *
+ * TODO: Right now the imagefilters sometimes return empty result bitmaps/
+ * specialimages. That doesn't seem quite right.
+ */
+ sk_sp<SkSpecialImage> filterImage(SkSpecialImage* src, const Context&, SkIPoint* offset) const;
+
+ enum MapDirection {
+ kForward_MapDirection,
+ kReverse_MapDirection
+ };
+ /**
+ * Map a device-space rect recursively forward or backward through the
+ * filter DAG. kForward_MapDirection is used to determine which pixels of
+ * the destination canvas a source image rect would touch after filtering.
+ * kReverse_MapDirection is used to determine which rect of the source
+ * image would be required to fill the given rect (typically, clip bounds).
+ * Used for clipping and temp-buffer allocations, so the result need not
+ * be exact, but should never be smaller than the real answer. The default
+ * implementation recursively unions all input bounds, or returns the
+ * source rect if no inputs.
+ */
+ SkIRect filterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection = kReverse_MapDirection) const;
+
+#if SK_SUPPORT_GPU
+ static sk_sp<SkSpecialImage> DrawWithFP(GrContext* context,
+ sk_sp<GrFragmentProcessor> fp,
+ const SkIRect& bounds,
+ const OutputProperties& outputProperties);
+#endif
+
+ /**
+ * Returns whether this image filter is a color filter and puts the color filter into the
+ * "filterPtr" parameter if it can. Does nothing otherwise.
+ * If this returns false, then the filterPtr is unchanged.
+ * If this returns true, then if filterPtr is not null, it must be set to a ref'd colorfitler
+ * (i.e. it may not be set to NULL).
+ */
+ bool isColorFilterNode(SkColorFilter** filterPtr) const {
+ return this->onIsColorFilterNode(filterPtr);
+ }
+
+ // DEPRECATED : use isColorFilterNode() instead
+ bool asColorFilter(SkColorFilter** filterPtr) const {
+ return this->isColorFilterNode(filterPtr);
+ }
+
+ static sk_sp<SkImageFilter> MakeBlur(SkScalar sigmaX, SkScalar sigmaY,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect = nullptr);
+
+ /**
+ * Returns true (and optionally returns a ref'd filter) if this imagefilter can be completely
+ * replaced by the returned colorfilter. i.e. the two effects will affect drawing in the
+ * same way.
+ */
+ bool asAColorFilter(SkColorFilter** filterPtr) const;
+
+ /**
+ * Returns the number of inputs this filter will accept (some inputs can
+ * be NULL).
+ */
+ int countInputs() const { return fInputs.count(); }
+
+ /**
+ * Returns the input filter at a given index, or NULL if no input is
+ * connected. The indices used are filter-specific.
+ */
+ SkImageFilter* getInput(int i) const {
+ SkASSERT(i < fInputs.count());
+ return fInputs[i].get();
+ }
+
+ /**
+ * Returns whether any edges of the crop rect have been set. The crop
+ * rect is set at construction time, and determines which pixels from the
+ * input image will be processed, and which pixels in the output image will be allowed.
+ * The size of the crop rect should be
+ * used as the size of the destination image. The origin of this rect
+ * should be used to offset access to the input images, and should also
+ * be added to the "offset" parameter in onFilterImage.
+ */
+ bool cropRectIsSet() const { return fCropRect.flags() != 0x0; }
+
+ CropRect getCropRect() const { return fCropRect; }
+
+ // Default impl returns union of all input bounds.
+ virtual SkRect computeFastBounds(const SkRect&) const;
+
+ // Can this filter DAG compute the resulting bounds of an object-space rectangle?
+ bool canComputeFastBounds() const;
+
+ /**
+ * If this filter can be represented by another filter + a localMatrix, return that filter,
+ * else return null.
+ */
+ sk_sp<SkImageFilter> makeWithLocalMatrix(const SkMatrix&) const;
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ SkImageFilter* newWithLocalMatrix(const SkMatrix& matrix) const {
+ return this->makeWithLocalMatrix(matrix).release();
+ }
+#endif
+
+ /**
+ * ImageFilters can natively handle scaling and translate components in the CTM. Only some of
+ * them can handle affine (or more complex) matrices. This call returns true iff the filter
+ * and all of its (non-null) inputs can handle these more complex matrices.
+ */
+ bool canHandleComplexCTM() const;
+
+ /**
+ * Return an imagefilter which transforms its input by the given matrix.
+ */
+ static sk_sp<SkImageFilter> MakeMatrixFilter(const SkMatrix& matrix,
+ SkFilterQuality quality,
+ sk_sp<SkImageFilter> input);
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* CreateMatrixFilter(const SkMatrix& matrix,
+ SkFilterQuality filterQuality,
+ SkImageFilter* input = nullptr) {
+ return MakeMatrixFilter(matrix, filterQuality, sk_ref_sp<SkImageFilter>(input)).release();
+ }
+#endif
+
+ SK_TO_STRING_PUREVIRT()
+ SK_DEFINE_FLATTENABLE_TYPE(SkImageFilter)
+ SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP()
+
+protected:
+ class Common {
+ public:
+ /**
+ * Attempt to unflatten the cropRect and the expected number of input filters.
+ * If any number of input filters is valid, pass -1.
+ * If this fails (i.e. corrupt buffer or contents) then return false and common will
+ * be left uninitialized.
+ * If this returns true, then inputCount() is the number of found input filters, each
+ * of which may be NULL or a valid imagefilter.
+ */
+ bool unflatten(SkReadBuffer&, int expectedInputs);
+
+ const CropRect& cropRect() const { return fCropRect; }
+ int inputCount() const { return fInputs.count(); }
+ sk_sp<SkImageFilter>* inputs() const { return fInputs.get(); }
+
+ sk_sp<SkImageFilter> getInput(int index) const { return fInputs[index]; }
+
+ private:
+ CropRect fCropRect;
+ // most filters accept at most 2 input-filters
+ SkAutoSTArray<2, sk_sp<SkImageFilter>> fInputs;
+
+ void allocInputs(int count);
+ };
+
+ SkImageFilter(sk_sp<SkImageFilter>* inputs, int inputCount, const CropRect* cropRect);
+
+ virtual ~SkImageFilter();
+
+ /**
+ * Constructs a new SkImageFilter read from an SkReadBuffer object.
+ *
+ * @param inputCount The exact number of inputs expected for this SkImageFilter object.
+ * -1 can be used if the filter accepts any number of inputs.
+ * @param rb SkReadBuffer object from which the SkImageFilter is read.
+ */
+ explicit SkImageFilter(int inputCount, SkReadBuffer& rb);
+
+ void flatten(SkWriteBuffer&) const override;
+
+ /**
+ * This is the virtual which should be overridden by the derived class
+ * to perform image filtering.
+ *
+ * src is the original primitive bitmap. If the filter has a connected
+ * input, it should recurse on that input and use that in place of src.
+ *
+ * The matrix is the current matrix on the canvas.
+ *
+ * Offset is the amount to translate the resulting image relative to the
+ * src when it is drawn. This is an out-param.
+ *
+ * If the result image cannot be created (either because of error or if, say, the result
+ * is entirely clipped out), this should return nullptr.
+ * Callers that affect transparent black should explicitly handle nullptr
+ * results and press on. In the error case this behavior will produce a better result
+ * than nothing and is necessary for the clipped out case.
+ * If the return value is nullptr then offset should be ignored.
+ */
+ virtual sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* src, const Context&,
+ SkIPoint* offset) const = 0;
+
+ /**
+ * This function recurses into its inputs with the given rect (first
+ * argument), calls filterBounds() with the given map direction on each,
+ * and returns the union of those results. If a derived class has special
+ * recursion requirements (e.g., it has an input which does not participate
+ * in bounds computation), it can be overridden here.
+ *
+ * Note that this function is *not* responsible for mapping the rect for
+ * this node's filter bounds requirements (i.e., calling
+ * onFilterNodeBounds()); that is handled by filterBounds().
+ */
+ virtual SkIRect onFilterBounds(const SkIRect&, const SkMatrix&, MapDirection) const;
+
+ /**
+ * Performs a forwards or reverse mapping of the given rect to accommodate
+ * this filter's margin requirements. kForward_MapDirection is used to
+ * determine the destination pixels which would be touched by filtering
+ * the given given source rect (e.g., given source bitmap bounds,
+ * determine the optimal bounds of the filtered offscreen bitmap).
+ * kReverse_MapDirection is used to determine which pixels of the
+ * input(s) would be required to fill the given destination rect
+ * (e.g., clip bounds). NOTE: these operations may not be the
+ * inverse of the other. For example, blurring expands the given rect
+ * in both forward and reverse directions. Unlike
+ * onFilterBounds(), this function is non-recursive.
+ */
+ virtual SkIRect onFilterNodeBounds(const SkIRect&, const SkMatrix&, MapDirection) const;
+
+ // Helper function which invokes filter processing on the input at the
+ // specified "index". If the input is null, it returns "src" and leaves
+ // "offset" untouched. If the input is non-null, it
+ // calls filterImage() on that input, and returns the result.
+ sk_sp<SkSpecialImage> filterInput(int index,
+ SkSpecialImage* src,
+ const Context&,
+ SkIPoint* offset) const;
+
+ /**
+ * Return true (and return a ref'd colorfilter) if this node in the DAG is just a
+ * colorfilter w/o CropRect constraints.
+ */
+ virtual bool onIsColorFilterNode(SkColorFilter** /*filterPtr*/) const {
+ return false;
+ }
+
+ /**
+ * Override this to describe the behavior of your subclass - as a leaf node. The caller will
+ * take care of calling your inputs (and return false if any of them could not handle it).
+ */
+ virtual bool onCanHandleComplexCTM() const { return false; }
+
+ /** Given a "srcBounds" rect, computes destination bounds for this filter.
+ * "dstBounds" are computed by transforming the crop rect by the context's
+ * CTM, applying it to the initial bounds, and intersecting the result with
+ * the context's clip bounds. "srcBounds" (if non-null) are computed by
+ * intersecting the initial bounds with "dstBounds", to ensure that we never
+ * sample outside of the crop rect (this restriction may be relaxed in the
+ * future).
+ */
+ bool applyCropRect(const Context&, const SkIRect& srcBounds, SkIRect* dstBounds) const;
+
+ /** A variant of the above call which takes the original source bitmap and
+ * source offset. If the resulting crop rect is not entirely contained by
+ * the source bitmap's bounds, it creates a new bitmap in "result" and
+ * pads the edges with transparent black. In that case, the srcOffset is
+ * modified to be the same as the bounds, since no further adjustment is
+ * needed by the caller. This version should only be used by filters
+ * which are not capable of processing a smaller source bitmap into a
+ * larger destination.
+ */
+ sk_sp<SkSpecialImage> applyCropRect(const Context&, SkSpecialImage* src, SkIPoint* srcOffset,
+ SkIRect* bounds) const;
+
+ /**
+ * Creates a modified Context for use when recursing up the image filter DAG.
+ * The clip bounds are adjusted to accommodate any margins that this
+ * filter requires by calling this node's
+ * onFilterNodeBounds(..., kReverse_MapDirection).
+ */
+ Context mapContext(const Context& ctx) const;
+
+private:
+ friend class SkGraphics;
+ static void PurgeCache();
+
+ void init(sk_sp<SkImageFilter>* inputs, int inputCount, const CropRect* cropRect);
+
+ bool usesSrcInput() const { return fUsesSrcInput; }
+ virtual bool affectsTransparentBlack() const { return false; }
+
+ SkAutoSTArray<2, sk_sp<SkImageFilter>> fInputs;
+
+ bool fUsesSrcInput;
+ CropRect fCropRect;
+ uint32_t fUniqueID; // Globally unique
+ mutable SkTArray<SkImageFilterCacheKey> fCacheKeys;
+ mutable SkMutex fMutex;
+ typedef SkFlattenable INHERITED;
+};
+
+/**
+ * Helper to unflatten the common data, and return NULL if we fail.
+ */
+#define SK_IMAGEFILTER_UNFLATTEN_COMMON(localVar, expectedCount) \
+ Common localVar; \
+ do { \
+ if (!localVar.unflatten(buffer, expectedCount)) { \
+ return NULL; \
+ } \
+ } while (0)
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkImageGenerator.h b/gfx/skia/skia/include/core/SkImageGenerator.h
new file mode 100644
index 000000000..3712a924a
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkImageGenerator.h
@@ -0,0 +1,295 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageGenerator_DEFINED
+#define SkImageGenerator_DEFINED
+
+#include "SkBitmap.h"
+#include "SkColor.h"
+#include "SkImageInfo.h"
+#include "SkYUVSizeInfo.h"
+
+class GrContext;
+class GrTexture;
+class GrTextureParams;
+class SkBitmap;
+class SkData;
+class SkImageGenerator;
+class SkMatrix;
+class SkPaint;
+class SkPicture;
+
+#ifdef SK_SUPPORT_LEGACY_REFENCODEDDATA_NOCTX
+ #define SK_REFENCODEDDATA_CTXPARAM
+#else
+ #define SK_REFENCODEDDATA_CTXPARAM GrContext* ctx
+#endif
+
+/**
+ * Takes ownership of SkImageGenerator. If this method fails for
+ * whatever reason, it will return false and immediatetely delete
+ * the generator. If it succeeds, it will modify destination
+ * bitmap.
+ *
+ * If generator is NULL, will safely return false.
+ *
+ * If this fails or when the SkDiscardablePixelRef that is
+ * installed into destination is destroyed, it will
+ * delete the generator. Therefore, generator should be
+ * allocated with new.
+ *
+ * @param destination Upon success, this bitmap will be
+ * configured and have a pixelref installed.
+ *
+ * @return true iff successful.
+ */
+SK_API bool SkDEPRECATED_InstallDiscardablePixelRef(SkImageGenerator*, SkBitmap* destination);
+
+/**
+ * On success, installs a discardable pixelref into destination, based on encoded data.
+ * Regardless of success or failure, the caller must still balance their ownership of encoded.
+ */
+SK_API bool SkDEPRECATED_InstallDiscardablePixelRef(SkData* encoded, SkBitmap* destination);
+
+/**
+ * An interface that allows a purgeable PixelRef (such as a
+ * SkDiscardablePixelRef) to decode and re-decode an image as needed.
+ */
+class SK_API SkImageGenerator : public SkNoncopyable {
+public:
+ /**
+ * The PixelRef which takes ownership of this SkImageGenerator
+ * will call the image generator's destructor.
+ */
+ virtual ~SkImageGenerator() { }
+
+ uint32_t uniqueID() const { return fUniqueID; }
+
+ /**
+ * Return a ref to the encoded (i.e. compressed) representation,
+ * of this data. If the GrContext is non-null, then the caller is only interested in
+ * gpu-specific formats, so the impl may return null even if they have encoded data,
+ * assuming they know it is not suitable for the gpu.
+ *
+ * If non-NULL is returned, the caller is responsible for calling
+ * unref() on the data when it is finished.
+ */
+ SkData* refEncodedData(GrContext* ctx = nullptr) {
+#ifdef SK_SUPPORT_LEGACY_REFENCODEDDATA_NOCTX
+ return this->onRefEncodedData();
+#else
+ return this->onRefEncodedData(ctx);
+#endif
+ }
+
+ /**
+ * Return the ImageInfo associated with this generator.
+ */
+ const SkImageInfo& getInfo() const { return fInfo; }
+
+ /**
+ * Decode into the given pixels, a block of memory of size at
+ * least (info.fHeight - 1) * rowBytes + (info.fWidth *
+ * bytesPerPixel)
+ *
+ * Repeated calls to this function should give the same results,
+ * allowing the PixelRef to be immutable.
+ *
+ * @param info A description of the format (config, size)
+ * expected by the caller. This can simply be identical
+ * to the info returned by getInfo().
+ *
+ * This contract also allows the caller to specify
+ * different output-configs, which the implementation can
+ * decide to support or not.
+ *
+ * A size that does not match getInfo() implies a request
+ * to scale. If the generator cannot perform this scale,
+ * it will return kInvalidScale.
+ *
+ * If info is kIndex8_SkColorType, then the caller must provide storage for up to 256
+ * SkPMColor values in ctable. On success the generator must copy N colors into that storage,
+ * (where N is the logical number of table entries) and set ctableCount to N.
+ *
+ * If info is not kIndex8_SkColorType, then the last two parameters may be NULL. If ctableCount
+ * is not null, it will be set to 0.
+ *
+ * @return true on success.
+ */
+ bool getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ SkPMColor ctable[], int* ctableCount);
+
+ /**
+ * Simplified version of getPixels() that asserts that info is NOT kIndex8_SkColorType and
+ * uses the default Options.
+ */
+ bool getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes);
+
+ /**
+ * If decoding to YUV is supported, this returns true. Otherwise, this
+ * returns false and does not modify any of the parameters.
+ *
+ * @param sizeInfo Output parameter indicating the sizes and required
+ * allocation widths of the Y, U, and V planes.
+ * @param colorSpace Output parameter.
+ */
+ bool queryYUV8(SkYUVSizeInfo* sizeInfo, SkYUVColorSpace* colorSpace) const;
+
+ /**
+ * Returns true on success and false on failure.
+ * This always attempts to perform a full decode. If the client only
+ * wants size, it should call queryYUV8().
+ *
+ * @param sizeInfo Needs to exactly match the values returned by the
+ * query, except the WidthBytes may be larger than the
+ * recommendation (but not smaller).
+ * @param planes Memory for each of the Y, U, and V planes.
+ */
+ bool getYUV8Planes(const SkYUVSizeInfo& sizeInfo, void* planes[3]);
+
+ /**
+ * If the generator can natively/efficiently return its pixels as a GPU image (backed by a
+ * texture) this will return that image. If not, this will return NULL.
+ *
+ * Regarding the GrContext parameter:
+ *
+ * The caller may pass NULL for the context. In that case the generator may assume that its
+ * internal context is current. If it has no internal context, then it should just return
+ * null.
+ *
+ * If the caller passes a non-null context, then the generator should only succeed if:
+ * - it has no intrinsic context, and will use the caller's
+ * - its internal context is the same
+ * - it can somehow convert its texture into one that is valid for the provided context.
+ *
+ * Regarding the GrTextureParams parameter:
+ *
+ * If the context (the provided one or the generator's intrinsic one) determines that to
+ * support the specified usage, it must return a different sized texture it may,
+ * so the caller must inspect the texture's width/height and compare them to the generator's
+ * getInfo() width/height. For readback usage use GrTextureParams::ClampNoFilter()
+ */
+ GrTexture* generateTexture(GrContext*, const SkIRect* subset = nullptr);
+
+ struct SupportedSizes {
+ SkISize fSizes[2];
+ };
+
+ /**
+ * Some generators can efficiently scale their contents. If this is supported, the generator
+ * may only support certain scaled dimensions. Call this with the desired scale factor,
+ * and it will return true if scaling is supported, and in supportedSizes[] it will return
+ * the nearest supported dimensions.
+ *
+ * If no native scaling is supported, or scale is invalid (e.g. scale <= 0 || scale > 1)
+ * this will return false, and the supportedsizes will be undefined.
+ */
+ bool computeScaledDimensions(SkScalar scale, SupportedSizes*);
+
+ /**
+ * Scale the generator's pixels to fit into scaledSize.
+ * This routine also support retrieving only a subset of the pixels. That subset is specified
+ * by the following rectangle (in the scaled space):
+ *
+ * subset = SkIRect::MakeXYWH(subsetOrigin.x(), subsetOrigin.y(),
+ * subsetPixels.width(), subsetPixels.height())
+ *
+ * If subset is not contained inside the scaledSize, this returns false.
+ *
+ * whole = SkIRect::MakeWH(scaledSize.width(), scaledSize.height())
+ * if (!whole.contains(subset)) {
+ * return false;
+ * }
+ *
+ * If the requested colortype/alphatype in pixels is not supported,
+ * or the requested scaledSize is not supported, or the generator encounters an error,
+ * this returns false.
+ */
+ bool generateScaledPixels(const SkISize& scaledSize, const SkIPoint& subsetOrigin,
+ const SkPixmap& subsetPixels);
+
+ bool generateScaledPixels(const SkPixmap& scaledPixels) {
+ return this->generateScaledPixels(SkISize::Make(scaledPixels.width(),
+ scaledPixels.height()),
+ SkIPoint::Make(0, 0), scaledPixels);
+ }
+
+ /**
+ * If the default image decoder system can interpret the specified (encoded) data, then
+ * this returns a new ImageGenerator for it. Otherwise this returns NULL. Either way
+ * the caller is still responsible for managing their ownership of the data.
+ */
+ static SkImageGenerator* NewFromEncoded(SkData*);
+
+ /** Return a new image generator backed by the specified picture. If the size is empty or
+ * the picture is NULL, this returns NULL.
+ * The optional matrix and paint arguments are passed to drawPicture() at rasterization
+ * time.
+ */
+ static SkImageGenerator* NewFromPicture(const SkISize&, const SkPicture*, const SkMatrix*,
+ const SkPaint*);
+
+ bool tryGenerateBitmap(SkBitmap* bm) {
+ return this->tryGenerateBitmap(bm, nullptr, nullptr);
+ }
+ bool tryGenerateBitmap(SkBitmap* bm, const SkImageInfo& info, SkBitmap::Allocator* allocator) {
+ return this->tryGenerateBitmap(bm, &info, allocator);
+ }
+ void generateBitmap(SkBitmap* bm) {
+ if (!this->tryGenerateBitmap(bm, nullptr, nullptr)) {
+ sk_throw();
+ }
+ }
+ void generateBitmap(SkBitmap* bm, const SkImageInfo& info) {
+ if (!this->tryGenerateBitmap(bm, &info, nullptr)) {
+ sk_throw();
+ }
+ }
+
+protected:
+ enum {
+ kNeedNewImageUniqueID = 0
+ };
+
+ SkImageGenerator(const SkImageInfo& info, uint32_t uniqueId = kNeedNewImageUniqueID);
+
+ virtual SkData* onRefEncodedData(SK_REFENCODEDDATA_CTXPARAM);
+
+ virtual bool onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ SkPMColor ctable[], int* ctableCount);
+
+ virtual bool onQueryYUV8(SkYUVSizeInfo*, SkYUVColorSpace*) const {
+ return false;
+ }
+ virtual bool onGetYUV8Planes(const SkYUVSizeInfo&, void*[3] /*planes*/) {
+ return false;
+ }
+
+ virtual GrTexture* onGenerateTexture(GrContext*, const SkIRect*) {
+ return nullptr;
+ }
+
+ virtual bool onComputeScaledDimensions(SkScalar, SupportedSizes*) {
+ return false;
+ }
+ virtual bool onGenerateScaledPixels(const SkISize&, const SkIPoint&, const SkPixmap&) {
+ return false;
+ }
+
+ bool tryGenerateBitmap(SkBitmap* bm, const SkImageInfo* optionalInfo, SkBitmap::Allocator*);
+
+private:
+ const SkImageInfo fInfo;
+ const uint32_t fUniqueID;
+
+ // This is our default impl, which may be different on different platforms.
+ // It is called from NewFromEncoded() after it has checked for any runtime factory.
+ // The SkData will never be NULL, as that will have been checked by NewFromEncoded.
+ static SkImageGenerator* NewFromEncodedImpl(SkData*);
+};
+
+#endif // SkImageGenerator_DEFINED
diff --git a/gfx/skia/skia/include/core/SkImageInfo.h b/gfx/skia/skia/include/core/SkImageInfo.h
new file mode 100644
index 000000000..cf50acaca
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkImageInfo.h
@@ -0,0 +1,359 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageInfo_DEFINED
+#define SkImageInfo_DEFINED
+
+#include "SkColorSpace.h"
+#include "SkMath.h"
+#include "SkRect.h"
+#include "SkSize.h"
+
+class SkReadBuffer;
+class SkWriteBuffer;
+
+/**
+ * Describes how to interpret the alpha component of a pixel.
+ */
+enum SkAlphaType {
+ kUnknown_SkAlphaType,
+
+ /**
+ * All pixels are stored as opaque. This differs slightly from kIgnore in
+ * that kOpaque has correct "opaque" values stored in the pixels, while
+ * kIgnore may not, but in both cases the caller should treat the pixels
+ * as opaque.
+ */
+ kOpaque_SkAlphaType,
+
+ /**
+ * All pixels have their alpha premultiplied in their color components.
+ * This is the natural format for the rendering target pixels.
+ */
+ kPremul_SkAlphaType,
+
+ /**
+ * All pixels have their color components stored without any regard to the
+ * alpha. e.g. this is the default configuration for PNG images.
+ *
+ * This alpha-type is ONLY supported for input images. Rendering cannot
+ * generate this on output.
+ */
+ kUnpremul_SkAlphaType,
+
+ kLastEnum_SkAlphaType = kUnpremul_SkAlphaType
+};
+
+static inline bool SkAlphaTypeIsOpaque(SkAlphaType at) {
+ return kOpaque_SkAlphaType == at;
+}
+
+static inline bool SkAlphaTypeIsValid(unsigned value) {
+ return value <= kLastEnum_SkAlphaType;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Describes how to interpret the components of a pixel.
+ *
+ * kN32_SkColorType is an alias for whichever 32bit ARGB format is the "native"
+ * form for skia's blitters. Use this if you don't have a swizzle preference
+ * for 32bit pixels.
+ */
+enum SkColorType {
+ kUnknown_SkColorType,
+ kAlpha_8_SkColorType,
+ kRGB_565_SkColorType,
+ kARGB_4444_SkColorType,
+ kRGBA_8888_SkColorType,
+ kBGRA_8888_SkColorType,
+ kIndex_8_SkColorType,
+ kGray_8_SkColorType,
+ kRGBA_F16_SkColorType,
+
+ kLastEnum_SkColorType = kRGBA_F16_SkColorType,
+
+#if SK_PMCOLOR_BYTE_ORDER(B,G,R,A)
+ kN32_SkColorType = kBGRA_8888_SkColorType,
+#elif SK_PMCOLOR_BYTE_ORDER(R,G,B,A)
+ kN32_SkColorType = kRGBA_8888_SkColorType,
+#else
+ #error "SK_*32_SHFIT values must correspond to BGRA or RGBA byte order"
+#endif
+};
+
+static int SkColorTypeBytesPerPixel(SkColorType ct) {
+ static const uint8_t gSize[] = {
+ 0, // Unknown
+ 1, // Alpha_8
+ 2, // RGB_565
+ 2, // ARGB_4444
+ 4, // RGBA_8888
+ 4, // BGRA_8888
+ 1, // kIndex_8
+ 1, // kGray_8
+ 8, // kRGBA_F16
+ };
+ static_assert(SK_ARRAY_COUNT(gSize) == (size_t)(kLastEnum_SkColorType + 1),
+ "size_mismatch_with_SkColorType_enum");
+
+ SkASSERT((size_t)ct < SK_ARRAY_COUNT(gSize));
+ return gSize[ct];
+}
+
+static int SkColorTypeShiftPerPixel(SkColorType ct) {
+ static const uint8_t gShift[] = {
+ 0, // Unknown
+ 0, // Alpha_8
+ 1, // RGB_565
+ 1, // ARGB_4444
+ 2, // RGBA_8888
+ 2, // BGRA_8888
+ 0, // kIndex_8
+ 0, // kGray_8
+ 3, // kRGBA_F16
+ };
+ static_assert(SK_ARRAY_COUNT(gShift) == (size_t)(kLastEnum_SkColorType + 1),
+ "size_mismatch_with_SkColorType_enum");
+
+ SkASSERT((size_t)ct < SK_ARRAY_COUNT(gShift));
+ return gShift[ct];
+}
+
+static inline size_t SkColorTypeMinRowBytes(SkColorType ct, int width) {
+ return width * SkColorTypeBytesPerPixel(ct);
+}
+
+static inline bool SkColorTypeIsValid(unsigned value) {
+ return value <= kLastEnum_SkColorType;
+}
+
+static inline size_t SkColorTypeComputeOffset(SkColorType ct, int x, int y, size_t rowBytes) {
+ if (kUnknown_SkColorType == ct) {
+ return 0;
+ }
+ return y * rowBytes + (x << SkColorTypeShiftPerPixel(ct));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Return true if alphaType is supported by colorType. If there is a canonical
+ * alphaType for this colorType, return it in canonical.
+ */
+bool SkColorTypeValidateAlphaType(SkColorType colorType, SkAlphaType alphaType,
+ SkAlphaType* canonical = NULL);
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Describes the color space a YUV pixel.
+ */
+enum SkYUVColorSpace {
+ /** Standard JPEG color space. */
+ kJPEG_SkYUVColorSpace,
+ /** SDTV standard Rec. 601 color space. Uses "studio swing" [16, 235] color
+ range. See http://en.wikipedia.org/wiki/Rec._601 for details. */
+ kRec601_SkYUVColorSpace,
+ /** HDTV standard Rec. 709 color space. Uses "studio swing" [16, 235] color
+ range. See http://en.wikipedia.org/wiki/Rec._709 for details. */
+ kRec709_SkYUVColorSpace,
+
+ kLastEnum_SkYUVColorSpace = kRec709_SkYUVColorSpace
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+enum class SkSourceGammaTreatment {
+ kRespect,
+ kIgnore,
+};
+
+/**
+ * Describe an image's dimensions and pixel type.
+ * Used for both src images and render-targets (surfaces).
+ */
+struct SK_API SkImageInfo {
+public:
+ SkImageInfo()
+ : fColorSpace(nullptr)
+ , fWidth(0)
+ , fHeight(0)
+ , fColorType(kUnknown_SkColorType)
+ , fAlphaType(kUnknown_SkAlphaType)
+ {}
+
+ static SkImageInfo Make(int width, int height, SkColorType ct, SkAlphaType at,
+ sk_sp<SkColorSpace> cs = nullptr) {
+ return SkImageInfo(width, height, ct, at, std::move(cs));
+ }
+
+ /**
+ * Sets colortype to the native ARGB32 type.
+ */
+ static SkImageInfo MakeN32(int width, int height, SkAlphaType at,
+ sk_sp<SkColorSpace> cs = nullptr) {
+ return Make(width, height, kN32_SkColorType, at, cs);
+ }
+
+ /**
+ * Create an ImageInfo marked as SRGB with N32 swizzle.
+ */
+ static SkImageInfo MakeS32(int width, int height, SkAlphaType at);
+
+ /**
+ * Sets colortype to the native ARGB32 type, and the alphatype to premul.
+ */
+ static SkImageInfo MakeN32Premul(int width, int height, sk_sp<SkColorSpace> cs = nullptr) {
+ return Make(width, height, kN32_SkColorType, kPremul_SkAlphaType, cs);
+ }
+
+ static SkImageInfo MakeN32Premul(const SkISize& size) {
+ return MakeN32Premul(size.width(), size.height());
+ }
+
+ static SkImageInfo MakeA8(int width, int height) {
+ return Make(width, height, kAlpha_8_SkColorType, kPremul_SkAlphaType, nullptr);
+ }
+
+ static SkImageInfo MakeUnknown(int width, int height) {
+ return Make(width, height, kUnknown_SkColorType, kUnknown_SkAlphaType, nullptr);
+ }
+
+ static SkImageInfo MakeUnknown() {
+ return MakeUnknown(0, 0);
+ }
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+ SkColorType colorType() const { return fColorType; }
+ SkAlphaType alphaType() const { return fAlphaType; }
+ SkColorSpace* colorSpace() const { return fColorSpace.get(); }
+
+ bool isEmpty() const { return fWidth <= 0 || fHeight <= 0; }
+
+ bool isOpaque() const {
+ return SkAlphaTypeIsOpaque(fAlphaType);
+ }
+
+ SkISize dimensions() const { return SkISize::Make(fWidth, fHeight); }
+ SkIRect bounds() const { return SkIRect::MakeWH(fWidth, fHeight); }
+
+ bool gammaCloseToSRGB() const {
+ return fColorSpace && fColorSpace->gammaCloseToSRGB();
+ }
+
+ /**
+ * Return a new ImageInfo with the same colortype and alphatype as this info,
+ * but with the specified width and height.
+ */
+ SkImageInfo makeWH(int newWidth, int newHeight) const {
+ return Make(newWidth, newHeight, fColorType, fAlphaType, fColorSpace);
+ }
+
+ SkImageInfo makeAlphaType(SkAlphaType newAlphaType) const {
+ return Make(fWidth, fHeight, fColorType, newAlphaType, fColorSpace);
+ }
+
+ SkImageInfo makeColorType(SkColorType newColorType) const {
+ return Make(fWidth, fHeight, newColorType, fAlphaType, fColorSpace);
+ }
+
+ SkImageInfo makeColorSpace(sk_sp<SkColorSpace> cs) const {
+ return Make(fWidth, fHeight, fColorType, fAlphaType, std::move(cs));
+ }
+
+ int bytesPerPixel() const { return SkColorTypeBytesPerPixel(fColorType); }
+
+ int shiftPerPixel() const { return SkColorTypeShiftPerPixel(fColorType); }
+
+ uint64_t minRowBytes64() const {
+ return sk_64_mul(fWidth, this->bytesPerPixel());
+ }
+
+ size_t minRowBytes() const {
+ return (size_t)this->minRowBytes64();
+ }
+
+ size_t computeOffset(int x, int y, size_t rowBytes) const {
+ SkASSERT((unsigned)x < (unsigned)fWidth);
+ SkASSERT((unsigned)y < (unsigned)fHeight);
+ return SkColorTypeComputeOffset(fColorType, x, y, rowBytes);
+ }
+
+ bool operator==(const SkImageInfo& other) const {
+ return fWidth == other.fWidth && fHeight == other.fHeight &&
+ fColorType == other.fColorType && fAlphaType == other.fAlphaType &&
+ SkColorSpace::Equals(fColorSpace.get(), other.fColorSpace.get());
+ }
+ bool operator!=(const SkImageInfo& other) const {
+ return !(*this == other);
+ }
+
+ void unflatten(SkReadBuffer&);
+ void flatten(SkWriteBuffer&) const;
+
+ int64_t getSafeSize64(size_t rowBytes) const {
+ if (0 == fHeight) {
+ return 0;
+ }
+ return sk_64_mul(fHeight - 1, rowBytes) + fWidth * this->bytesPerPixel();
+ }
+
+ size_t getSafeSize(size_t rowBytes) const {
+ int64_t size = this->getSafeSize64(rowBytes);
+ if (!sk_64_isS32(size)) {
+ return 0;
+ }
+ return sk_64_asS32(size);
+ }
+
+ bool validRowBytes(size_t rowBytes) const {
+ uint64_t rb = sk_64_mul(fWidth, this->bytesPerPixel());
+ return rowBytes >= rb;
+ }
+
+ void reset() {
+ fColorSpace = nullptr;
+ fWidth = 0;
+ fHeight = 0;
+ fColorType = kUnknown_SkColorType;
+ fAlphaType = kUnknown_SkAlphaType;
+ }
+
+ SkDEBUGCODE(void validate() const;)
+
+private:
+ sk_sp<SkColorSpace> fColorSpace;
+ int fWidth;
+ int fHeight;
+ SkColorType fColorType;
+ SkAlphaType fAlphaType;
+
+ SkImageInfo(int width, int height, SkColorType ct, SkAlphaType at, sk_sp<SkColorSpace> cs)
+ : fColorSpace(std::move(cs))
+ , fWidth(width)
+ , fHeight(height)
+ , fColorType(ct)
+ , fAlphaType(at)
+ {}
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline bool SkColorAndColorSpaceAreGammaCorrect(SkColorType ct, SkColorSpace* cs) {
+ // Anything with a color-space attached is gamma-correct, as is F16.
+ // To get legacy behavior, you need to ask for non-F16, with a nullptr color space.
+ return (cs != nullptr) || kRGBA_F16_SkColorType == ct;
+}
+
+static inline bool SkImageInfoIsGammaCorrect(const SkImageInfo& info) {
+ return SkColorAndColorSpaceAreGammaCorrect(info.colorType(), info.colorSpace());
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkLights.h b/gfx/skia/skia/include/core/SkLights.h
new file mode 100644
index 000000000..954168de4
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkLights.h
@@ -0,0 +1,199 @@
+
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLights_DEFINED
+#define SkLights_DEFINED
+
+#include "../private/SkTArray.h"
+#include "SkPoint3.h"
+#include "SkRefCnt.h"
+
+class SkReadBuffer;
+class SkWriteBuffer;
+class SkImage;
+
+class SK_API SkLights : public SkRefCnt {
+public:
+ class Light {
+ public:
+ enum LightType {
+ kDirectional_LightType,
+ kPoint_LightType
+ };
+
+ Light(const Light& other)
+ : fType(other.fType)
+ , fColor(other.fColor)
+ , fDirOrPos(other.fDirOrPos)
+ , fIntensity(other.fIntensity)
+ , fShadowMap(other.fShadowMap)
+ , fIsRadial(other.fIsRadial) {
+ }
+
+ Light(Light&& other)
+ : fType(other.fType)
+ , fColor(other.fColor)
+ , fDirOrPos(other.fDirOrPos)
+ , fIntensity(other.fIntensity)
+ , fShadowMap(std::move(other.fShadowMap))
+ , fIsRadial(other.fIsRadial) {
+ }
+
+ static Light MakeDirectional(const SkColor3f& color, const SkVector3& dir,
+ bool isRadial = false) {
+ Light light(kDirectional_LightType, color, dir, isRadial);
+ if (!light.fDirOrPos.normalize()) {
+ light.fDirOrPos.set(0.0f, 0.0f, 1.0f);
+ }
+ return light;
+ }
+
+ static Light MakePoint(const SkColor3f& color, const SkPoint3& pos, SkScalar intensity,
+ bool isRadial = false) {
+ return Light(kPoint_LightType, color, pos, intensity, isRadial);
+ }
+
+ LightType type() const { return fType; }
+ const SkColor3f& color() const { return fColor; }
+ const SkVector3& dir() const {
+ SkASSERT(kDirectional_LightType == fType);
+ return fDirOrPos;
+ }
+ const SkPoint3& pos() const {
+ SkASSERT(kPoint_LightType == fType);
+ return fDirOrPos;
+ }
+ SkScalar intensity() const {
+ SkASSERT(kPoint_LightType == fType);
+ return fIntensity;
+ }
+
+ void setShadowMap(sk_sp<SkImage> shadowMap) {
+ fShadowMap = std::move(shadowMap);
+ }
+
+ SkImage* getShadowMap() const {
+ return fShadowMap.get();
+ }
+
+ bool isRadial() const { return fIsRadial; }
+
+ Light& operator= (const Light& b) {
+ if (this == &b) {
+ return *this;
+ }
+
+ fColor = b.fColor;
+ fType = b.fType;
+ fDirOrPos = b.fDirOrPos;
+ fIntensity = b.fIntensity;
+ fShadowMap = b.fShadowMap;
+ fIsRadial = b.fIsRadial;
+ return *this;
+ }
+
+ bool operator== (const Light& b) {
+ if (this == &b) {
+ return true;
+ }
+
+ return (fColor == b.fColor) &&
+ (fType == b.fType) &&
+ (fDirOrPos == b.fDirOrPos) &&
+ (fShadowMap == b.fShadowMap) &&
+ (fIntensity == b.fIntensity) &&
+ (fIsRadial == b.fIsRadial);
+ }
+
+ bool operator!= (const Light& b) { return !(this->operator==(b)); }
+
+ private:
+ LightType fType;
+ SkColor3f fColor; // linear (unpremul) color. Range is 0..1 in each channel.
+
+ SkVector3 fDirOrPos; // For directional lights, holds the direction towards the
+ // light (+Z is out of the screen).
+ // If degenerate, it will be replaced with (0, 0, 1).
+ // For point lights, holds location of point light
+
+ SkScalar fIntensity; // For point lights, dictates the light intensity.
+ // Simply a multiplier to the final light output value.
+ sk_sp<SkImage> fShadowMap;
+ bool fIsRadial; // Whether the light is radial or not. Radial lights will
+ // cast shadows and lights radially outwards.
+
+ Light(LightType type, const SkColor3f& color, const SkVector3& dirOrPos,
+ SkScalar intensity = 0.0f, bool isRadial = false) {
+ fType = type;
+ fColor = color;
+ fDirOrPos = dirOrPos;
+ fIntensity = intensity;
+ fIsRadial = isRadial;
+ }
+ };
+
+ class Builder {
+ public:
+ Builder() : fLights(new SkLights) {}
+
+ void add(const Light& light) {
+ if (fLights) {
+ fLights->fLights.push_back(light);
+ }
+ }
+
+ void add(Light&& light) {
+ if (fLights) {
+ fLights->fLights.push_back(std::move(light));
+ }
+ }
+
+ void setAmbientLightColor(const SkColor3f& color) {
+ if (fLights) {
+ fLights->fAmbientLightColor = color;
+ }
+ }
+
+ sk_sp<SkLights> finish() {
+ return std::move(fLights);
+ }
+
+ private:
+ sk_sp<SkLights> fLights;
+ };
+
+ int numLights() const {
+ return fLights.count();
+ }
+
+ const Light& light(int index) const {
+ return fLights[index];
+ }
+
+ Light& light(int index) {
+ return fLights[index];
+ }
+
+ const SkColor3f& ambientLightColor() const {
+ return fAmbientLightColor;
+ }
+
+ static sk_sp<SkLights> MakeFromBuffer(SkReadBuffer& buf);
+
+ void flatten(SkWriteBuffer& buf) const;
+
+private:
+ SkLights() {
+ fAmbientLightColor.set(0.0f, 0.0f, 0.0f);
+ }
+ SkTArray<Light> fLights;
+ SkColor3f fAmbientLightColor;
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkMallocPixelRef.h b/gfx/skia/skia/include/core/SkMallocPixelRef.h
new file mode 100644
index 000000000..ab337b924
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMallocPixelRef.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkMallocPixelRef_DEFINED
+#define SkMallocPixelRef_DEFINED
+
+#include "SkPixelRef.h"
+
+/** We explicitly use the same allocator for our pixels that SkMask does,
+ so that we can freely assign memory allocated by one class to the other.
+*/
+class SK_API SkMallocPixelRef : public SkPixelRef {
+public:
+ /**
+ * Return a new SkMallocPixelRef with the provided pixel storage, rowBytes,
+ * and optional colortable. The caller is responsible for managing the
+ * lifetime of the pixel storage buffer, as this pixelref will not try
+ * to delete it.
+ *
+ * The pixelref will ref() the colortable (if not NULL).
+ *
+ * Returns NULL on failure.
+ */
+ static SkMallocPixelRef* NewDirect(const SkImageInfo&, void* addr,
+ size_t rowBytes, SkColorTable*);
+
+ /**
+ * Return a new SkMallocPixelRef, automatically allocating storage for the
+ * pixels. If rowBytes are 0, an optimal value will be chosen automatically.
+ * If rowBytes is > 0, then it will be respected, or NULL will be returned
+ * if rowBytes is invalid for the specified info.
+ *
+ * This pixelref will ref() the specified colortable (if not NULL).
+ *
+ * Returns NULL on failure.
+ */
+ static SkMallocPixelRef* NewAllocate(const SkImageInfo& info,
+ size_t rowBytes, SkColorTable*);
+
+ /**
+ * Identical to NewAllocate, except all pixel bytes are zeroed.
+ */
+ static SkMallocPixelRef* NewZeroed(const SkImageInfo& info,
+ size_t rowBytes, SkColorTable*);
+
+ /**
+ * Return a new SkMallocPixelRef with the provided pixel storage,
+ * rowBytes, and optional colortable. On destruction, ReleaseProc
+ * will be called.
+ *
+ * This pixelref will ref() the specified colortable (if not NULL).
+ *
+ * If ReleaseProc is NULL, the pixels will never be released. This
+ * can be useful if the pixels were stack allocated. However, such an
+ * SkMallocPixelRef must not live beyond its pixels (e.g. by copying
+ * an SkBitmap pointing to it, or drawing to an SkPicture).
+ *
+ * Returns NULL on failure.
+ */
+ typedef void (*ReleaseProc)(void* addr, void* context);
+ static SkMallocPixelRef* NewWithProc(const SkImageInfo& info,
+ size_t rowBytes, SkColorTable*,
+ void* addr, ReleaseProc proc,
+ void* context);
+
+ /**
+ * Return a new SkMallocPixelRef that will use the provided
+ * SkData, rowBytes, and optional colortable as pixel storage.
+ * The SkData will be ref()ed and on destruction of the PielRef,
+ * the SkData will be unref()ed.
+ *
+ * This pixelref will ref() the specified colortable (if not NULL).
+ *
+ * Returns NULL on failure.
+ */
+ static SkMallocPixelRef* NewWithData(const SkImageInfo& info,
+ size_t rowBytes,
+ SkColorTable* ctable,
+ SkData* data);
+
+ void* getAddr() const { return fStorage; }
+
+ class PRFactory : public SkPixelRefFactory {
+ public:
+ SkPixelRef* create(const SkImageInfo&, size_t rowBytes, SkColorTable*) override;
+ };
+
+ class ZeroedPRFactory : public SkPixelRefFactory {
+ public:
+ SkPixelRef* create(const SkImageInfo&, size_t rowBytes, SkColorTable*) override;
+ };
+
+protected:
+ // The ownPixels version of this constructor is deprecated.
+ SkMallocPixelRef(const SkImageInfo&, void* addr, size_t rb, SkColorTable*,
+ bool ownPixels);
+ virtual ~SkMallocPixelRef();
+
+ bool onNewLockPixels(LockRec*) override;
+ void onUnlockPixels() override;
+ size_t getAllocatedSizeInBytes() const override;
+
+private:
+ // Uses alloc to implement NewAllocate or NewZeroed.
+ static SkMallocPixelRef* NewUsing(void*(*alloc)(size_t),
+ const SkImageInfo&,
+ size_t rowBytes,
+ SkColorTable*);
+
+ void* fStorage;
+ SkColorTable* fCTable;
+ size_t fRB;
+ ReleaseProc fReleaseProc;
+ void* fReleaseProcContext;
+
+ SkMallocPixelRef(const SkImageInfo&, void* addr, size_t rb, SkColorTable*,
+ ReleaseProc proc, void* context);
+
+ typedef SkPixelRef INHERITED;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkMask.h b/gfx/skia/skia/include/core/SkMask.h
new file mode 100644
index 000000000..a6d560647
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMask.h
@@ -0,0 +1,150 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkMask_DEFINED
+#define SkMask_DEFINED
+
+#include "SkRect.h"
+
+/** \class SkMask
+ SkMask is used to describe alpha bitmaps, either 1bit, 8bit, or
+ the 3-channel 3D format. These are passed to SkMaskFilter objects.
+*/
+struct SkMask {
+ SkMask() : fImage(nullptr) {}
+
+ enum Format {
+ kBW_Format, //!< 1bit per pixel mask (e.g. monochrome)
+ kA8_Format, //!< 8bits per pixel mask (e.g. antialiasing)
+ k3D_Format, //!< 3 8bit per pixl planes: alpha, mul, add
+ kARGB32_Format, //!< SkPMColor
+ kLCD16_Format, //!< 565 alpha for r/g/b
+ };
+
+ enum {
+ kCountMaskFormats = kLCD16_Format + 1
+ };
+
+ uint8_t* fImage;
+ SkIRect fBounds;
+ uint32_t fRowBytes;
+ Format fFormat;
+
+ /** Returns true if the mask is empty: i.e. it has an empty bounds.
+ */
+ bool isEmpty() const { return fBounds.isEmpty(); }
+
+ /** Return the byte size of the mask, assuming only 1 plane.
+ Does not account for k3D_Format. For that, use computeTotalImageSize().
+ If there is an overflow of 32bits, then returns 0.
+ */
+ size_t computeImageSize() const;
+
+ /** Return the byte size of the mask, taking into account
+ any extra planes (e.g. k3D_Format).
+ If there is an overflow of 32bits, then returns 0.
+ */
+ size_t computeTotalImageSize() const;
+
+ /** Returns the address of the byte that holds the specified bit.
+ Asserts that the mask is kBW_Format, and that x,y are in range.
+ x,y are in the same coordiate space as fBounds.
+ */
+ uint8_t* getAddr1(int x, int y) const {
+ SkASSERT(kBW_Format == fFormat);
+ SkASSERT(fBounds.contains(x, y));
+ SkASSERT(fImage != NULL);
+ return fImage + ((x - fBounds.fLeft) >> 3) + (y - fBounds.fTop) * fRowBytes;
+ }
+
+ /** Returns the address of the specified byte.
+ Asserts that the mask is kA8_Format, and that x,y are in range.
+ x,y are in the same coordiate space as fBounds.
+ */
+ uint8_t* getAddr8(int x, int y) const {
+ SkASSERT(kA8_Format == fFormat);
+ SkASSERT(fBounds.contains(x, y));
+ SkASSERT(fImage != NULL);
+ return fImage + x - fBounds.fLeft + (y - fBounds.fTop) * fRowBytes;
+ }
+
+ /**
+ * Return the address of the specified 16bit mask. In the debug build,
+ * this asserts that the mask's format is kLCD16_Format, and that (x,y)
+ * are contained in the mask's fBounds.
+ */
+ uint16_t* getAddrLCD16(int x, int y) const {
+ SkASSERT(kLCD16_Format == fFormat);
+ SkASSERT(fBounds.contains(x, y));
+ SkASSERT(fImage != NULL);
+ uint16_t* row = (uint16_t*)(fImage + (y - fBounds.fTop) * fRowBytes);
+ return row + (x - fBounds.fLeft);
+ }
+
+ /**
+ * Return the address of the specified 32bit mask. In the debug build,
+ * this asserts that the mask's format is 32bits, and that (x,y)
+ * are contained in the mask's fBounds.
+ */
+ uint32_t* getAddr32(int x, int y) const {
+ SkASSERT(kARGB32_Format == fFormat);
+ SkASSERT(fBounds.contains(x, y));
+ SkASSERT(fImage != NULL);
+ uint32_t* row = (uint32_t*)(fImage + (y - fBounds.fTop) * fRowBytes);
+ return row + (x - fBounds.fLeft);
+ }
+
+ /**
+ * Returns the address of the specified pixel, computing the pixel-size
+ * at runtime based on the mask format. This will be slightly slower than
+ * using one of the routines where the format is implied by the name
+ * e.g. getAddr8 or getAddr32.
+ *
+ * x,y must be contained by the mask's bounds (this is asserted in the
+ * debug build, but not checked in the release build.)
+ *
+ * This should not be called with kBW_Format, as it will give unspecified
+ * results (and assert in the debug build).
+ */
+ void* getAddr(int x, int y) const;
+
+ static uint8_t* AllocImage(size_t bytes);
+ static void FreeImage(void* image);
+
+ enum CreateMode {
+ kJustComputeBounds_CreateMode, //!< compute bounds and return
+ kJustRenderImage_CreateMode, //!< render into preallocate mask
+ kComputeBoundsAndRenderImage_CreateMode //!< compute bounds, alloc image and render into it
+ };
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * \class SkAutoMaskImage
+ *
+ * Stack class used to manage the fImage buffer in a SkMask.
+ * When this object loses scope, the buffer is freed with SkMask::FreeImage().
+ */
+class SkAutoMaskFreeImage {
+public:
+ SkAutoMaskFreeImage(uint8_t* maskImage) {
+ fImage = maskImage;
+ }
+
+ ~SkAutoMaskFreeImage() {
+ SkMask::FreeImage(fImage);
+ }
+
+private:
+ uint8_t* fImage;
+};
+#define SkAutoMaskFreeImage(...) SK_REQUIRE_LOCAL_VAR(SkAutoMaskFreeImage)
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkMaskFilter.h b/gfx/skia/skia/include/core/SkMaskFilter.h
new file mode 100644
index 000000000..95a663d56
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMaskFilter.h
@@ -0,0 +1,243 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkMaskFilter_DEFINED
+#define SkMaskFilter_DEFINED
+
+#include "SkBlurTypes.h"
+#include "SkFlattenable.h"
+#include "SkMask.h"
+#include "SkPaint.h"
+#include "SkStrokeRec.h"
+
+class GrClip;
+class GrContext;
+class GrDrawContext;
+class GrPaint;
+class GrRenderTarget;
+class GrTextureProvider;
+class SkBitmap;
+class SkBlitter;
+class SkCachedData;
+class SkMatrix;
+class SkPath;
+class SkRasterClip;
+class SkRRect;
+
+/** \class SkMaskFilter
+
+ SkMaskFilter is the base class for object that perform transformations on
+ an alpha-channel mask before drawing it. A subclass of SkMaskFilter may be
+ installed into a SkPaint. Once there, each time a primitive is drawn, it
+ is first scan converted into a SkMask::kA8_Format mask, and handed to the
+ filter, calling its filterMask() method. If this returns true, then the
+ new mask is used to render into the device.
+
+ Blur and emboss are implemented as subclasses of SkMaskFilter.
+*/
+class SK_API SkMaskFilter : public SkFlattenable {
+public:
+ /** Returns the format of the resulting mask that this subclass will return
+ when its filterMask() method is called.
+ */
+ virtual SkMask::Format getFormat() const = 0;
+
+ /** Create a new mask by filter the src mask.
+ If src.fImage == null, then do not allocate or create the dst image
+ but do fill out the other fields in dstMask.
+ If you do allocate a dst image, use SkMask::AllocImage()
+ If this returns false, dst mask is ignored.
+ @param dst the result of the filter. If src.fImage == null, dst should not allocate its image
+ @param src the original image to be filtered.
+ @param matrix the CTM
+ @param margin if not null, return the buffer dx/dy need when calculating the effect. Used when
+ drawing a clipped object to know how much larger to allocate the src before
+ applying the filter. If returning false, ignore this parameter.
+ @return true if the dst mask was correctly created.
+ */
+ virtual bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix&,
+ SkIPoint* margin) const;
+
+#if SK_SUPPORT_GPU
+ /**
+ * Returns true if the filter can be expressed a single-pass GrProcessor without requiring an
+ * explicit input mask. Per-pixel, the effect receives the incoming mask's coverage as
+ * the input color and outputs the filtered covereage value. This means that each pixel's
+ * filtered coverage must only depend on the unfiltered mask value for that pixel and not on
+ * surrounding values.
+ *
+ * If effect is non-NULL, a new GrProcessor instance is stored in it. The caller assumes
+ * ownership of the effect and must unref it.
+ */
+ virtual bool asFragmentProcessor(GrFragmentProcessor**, GrTexture*, const SkMatrix& ctm) const;
+
+ /**
+ * If asFragmentProcessor() fails the filter may be implemented on the GPU by a subclass
+ * overriding filterMaskGPU (declared below). That code path requires constructing a
+ * src mask as input. Since that is a potentially expensive operation, the subclass must also
+ * override this function to indicate whether filterTextureMaskGPU would succeeed if the mask
+ * were to be created.
+ *
+ * 'maskRect' returns the device space portion of the mask that the filter needs. The mask
+ * passed into 'filterMaskGPU' should have the same extent as 'maskRect' but be
+ * translated to the upper-left corner of the mask (i.e., (maskRect.fLeft, maskRect.fTop)
+ * appears at (0, 0) in the mask).
+ *
+ * Logically, how this works is:
+ * canFilterMaskGPU is called
+ * if (it returns true)
+ * the returned mask rect is used for quick rejecting
+ * either directFilterMaskGPU or directFilterRRectMaskGPU is then called
+ * if (neither of them handle the blur)
+ * the mask rect is used to generate the mask
+ * filterMaskGPU is called to filter the mask
+ *
+ * TODO: this should work as:
+ * if (canFilterMaskGPU(devShape, ...)) // rect, rrect, drrect, path
+ * filterMaskGPU(devShape, ...)
+ * this would hide the RRect special case and the mask generation
+ */
+ virtual bool canFilterMaskGPU(const SkRRect& devRRect,
+ const SkIRect& clipBounds,
+ const SkMatrix& ctm,
+ SkRect* maskRect) const;
+
+ /**
+ * Try to directly render the mask filter into the target. Returns
+ * true if drawing was successful.
+ */
+ virtual bool directFilterMaskGPU(GrTextureProvider* texProvider,
+ GrDrawContext* drawContext,
+ GrPaint* grp,
+ const GrClip&,
+ const SkMatrix& viewMatrix,
+ const SkStrokeRec& strokeRec,
+ const SkPath& path) const;
+ /**
+ * Try to directly render a rounded rect mask filter into the target. Returns
+ * true if drawing was successful.
+ */
+ virtual bool directFilterRRectMaskGPU(GrContext*,
+ GrDrawContext* drawContext,
+ GrPaint* grp,
+ const GrClip&,
+ const SkMatrix& viewMatrix,
+ const SkStrokeRec& strokeRec,
+ const SkRRect& rrect,
+ const SkRRect& devRRect) const;
+
+ /**
+ * This function is used to implement filters that require an explicit src mask. It should only
+ * be called if canFilterMaskGPU returned true and the maskRect param should be the output from
+ * that call.
+ * Implementations are free to get the GrContext from the src texture in order to create
+ * additional textures and perform multiple passes.
+ */
+ virtual bool filterMaskGPU(GrTexture* src,
+ const SkMatrix& ctm,
+ const SkIRect& maskRect,
+ GrTexture** result) const;
+#endif
+
+ /**
+ * The fast bounds function is used to enable the paint to be culled early
+ * in the drawing pipeline. This function accepts the current bounds of the
+ * paint as its src param and the filter adjust those bounds using its
+ * current mask and returns the result using the dest param. Callers are
+ * allowed to provide the same struct for both src and dest so each
+ * implementation must accomodate that behavior.
+ *
+ * The default impl calls filterMask with the src mask having no image,
+ * but subclasses may override this if they can compute the rect faster.
+ */
+ virtual void computeFastBounds(const SkRect& src, SkRect* dest) const;
+
+ struct BlurRec {
+ SkScalar fSigma;
+ SkBlurStyle fStyle;
+ SkBlurQuality fQuality;
+ };
+ /**
+ * If this filter can be represented by a BlurRec, return true and (if not null) fill in the
+ * provided BlurRec parameter. If this effect cannot be represented as a BlurRec, return false
+ * and ignore the BlurRec parameter.
+ */
+ virtual bool asABlur(BlurRec*) const;
+
+ SK_TO_STRING_PUREVIRT()
+ SK_DEFINE_FLATTENABLE_TYPE(SkMaskFilter)
+
+protected:
+ SkMaskFilter() {}
+
+ enum FilterReturn {
+ kFalse_FilterReturn,
+ kTrue_FilterReturn,
+ kUnimplemented_FilterReturn
+ };
+
+ class NinePatch : ::SkNoncopyable {
+ public:
+ NinePatch() : fCache(nullptr) { }
+ ~NinePatch();
+
+ SkMask fMask; // fBounds must have [0,0] in its top-left
+ SkIRect fOuterRect; // width/height must be >= fMask.fBounds'
+ SkIPoint fCenter; // identifies center row/col for stretching
+ SkCachedData* fCache;
+ };
+
+ /**
+ * Override if your subclass can filter a rect, and return the answer as
+ * a ninepatch mask to be stretched over the returned outerRect. On success
+ * return kTrue_FilterReturn. On failure (e.g. out of memory) return
+ * kFalse_FilterReturn. If the normal filterMask() entry-point should be
+ * called (the default) return kUnimplemented_FilterReturn.
+ *
+ * By convention, the caller will take the center rol/col from the returned
+ * mask as the slice it can replicate horizontally and vertically as we
+ * stretch the mask to fit inside outerRect. It is an error for outerRect
+ * to be smaller than the mask's bounds. This would imply that the width
+ * and height of the mask should be odd. This is not required, just that
+ * the caller will call mask.fBounds.centerX() and centerY() to find the
+ * strips that will be replicated.
+ */
+ virtual FilterReturn filterRectsToNine(const SkRect[], int count,
+ const SkMatrix&,
+ const SkIRect& clipBounds,
+ NinePatch*) const;
+ /**
+ * Similar to filterRectsToNine, except it performs the work on a round rect.
+ */
+ virtual FilterReturn filterRRectToNine(const SkRRect&, const SkMatrix&,
+ const SkIRect& clipBounds,
+ NinePatch*) const;
+
+private:
+ friend class SkDraw;
+
+ /** Helper method that, given a path in device space, will rasterize it into a kA8_Format mask
+ and then call filterMask(). If this returns true, the specified blitter will be called
+ to render that mask. Returns false if filterMask() returned false.
+ This method is not exported to java.
+ */
+ bool filterPath(const SkPath& devPath, const SkMatrix& ctm, const SkRasterClip&, SkBlitter*,
+ SkStrokeRec::InitStyle) const;
+
+ /** Helper method that, given a roundRect in device space, will rasterize it into a kA8_Format
+ mask and then call filterMask(). If this returns true, the specified blitter will be called
+ to render that mask. Returns false if filterMask() returned false.
+ */
+ bool filterRRect(const SkRRect& devRRect, const SkMatrix& ctm, const SkRasterClip&,
+ SkBlitter*) const;
+
+ typedef SkFlattenable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkMath.h b/gfx/skia/skia/include/core/SkMath.h
new file mode 100644
index 000000000..6e252306d
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMath.h
@@ -0,0 +1,144 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkMath_DEFINED
+#define SkMath_DEFINED
+
+#include "SkTypes.h"
+
+// 64bit -> 32bit utilities
+
+/**
+ * Return true iff the 64bit value can exactly be represented in signed 32bits
+ */
+static inline bool sk_64_isS32(int64_t value) {
+ return (int32_t)value == value;
+}
+
+/**
+ * Return the 64bit argument as signed 32bits, asserting in debug that the arg
+ * exactly fits in signed 32bits. In the release build, no checks are preformed
+ * and the return value if the arg does not fit is undefined.
+ */
+static inline int32_t sk_64_asS32(int64_t value) {
+ SkASSERT(sk_64_isS32(value));
+ return (int32_t)value;
+}
+
+// Handy util that can be passed two ints, and will automatically promote to
+// 64bits before the multiply, so the caller doesn't have to remember to cast
+// e.g. (int64_t)a * b;
+static inline int64_t sk_64_mul(int64_t a, int64_t b) {
+ return a * b;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Computes numer1 * numer2 / denom in full 64 intermediate precision.
+ * It is an error for denom to be 0. There is no special handling if
+ * the result overflows 32bits.
+ */
+static inline int32_t SkMulDiv(int32_t numer1, int32_t numer2, int32_t denom) {
+ SkASSERT(denom);
+
+ int64_t tmp = sk_64_mul(numer1, numer2) / denom;
+ return sk_64_asS32(tmp);
+}
+
+/**
+ * Return the integer square root of value, with a bias of bitBias
+ */
+int32_t SkSqrtBits(int32_t value, int bitBias);
+
+/** Return the integer square root of n, treated as a SkFixed (16.16)
+ */
+#define SkSqrt32(n) SkSqrtBits(n, 15)
+
+/**
+ * Returns (value < 0 ? 0 : value) efficiently (i.e. no compares or branches)
+ */
+static inline int SkClampPos(int value) {
+ return value & ~(value >> 31);
+}
+
+/** Given an integer and a positive (max) integer, return the value
+ * pinned against 0 and max, inclusive.
+ * @param value The value we want returned pinned between [0...max]
+ * @param max The positive max value
+ * @return 0 if value < 0, max if value > max, else value
+ */
+static inline int SkClampMax(int value, int max) {
+ // ensure that max is positive
+ SkASSERT(max >= 0);
+ if (value < 0) {
+ value = 0;
+ }
+ if (value > max) {
+ value = max;
+ }
+ return value;
+}
+
+/**
+ * Returns true if value is a power of 2. Does not explicitly check for
+ * value <= 0.
+ */
+template <typename T> constexpr inline bool SkIsPow2(T value) {
+ return (value & (value - 1)) == 0;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Return a*b/((1 << shift) - 1), rounding any fractional bits.
+ * Only valid if a and b are unsigned and <= 32767 and shift is > 0 and <= 8
+ */
+static inline unsigned SkMul16ShiftRound(U16CPU a, U16CPU b, int shift) {
+ SkASSERT(a <= 32767);
+ SkASSERT(b <= 32767);
+ SkASSERT(shift > 0 && shift <= 8);
+ unsigned prod = a*b + (1 << (shift - 1));
+ return (prod + (prod >> shift)) >> shift;
+}
+
+/**
+ * Return a*b/255, rounding any fractional bits.
+ * Only valid if a and b are unsigned and <= 32767.
+ */
+static inline U8CPU SkMulDiv255Round(U16CPU a, U16CPU b) {
+ SkASSERT(a <= 32767);
+ SkASSERT(b <= 32767);
+ unsigned prod = a*b + 128;
+ return (prod + (prod >> 8)) >> 8;
+}
+
+/**
+ * Stores numer/denom and numer%denom into div and mod respectively.
+ */
+template <typename In, typename Out>
+inline void SkTDivMod(In numer, In denom, Out* div, Out* mod) {
+#ifdef SK_CPU_ARM32
+ // If we wrote this as in the else branch, GCC won't fuse the two into one
+ // divmod call, but rather a div call followed by a divmod. Silly! This
+ // version is just as fast as calling __aeabi_[u]idivmod manually, but with
+ // prettier code.
+ //
+ // This benches as around 2x faster than the code in the else branch.
+ const In d = numer/denom;
+ *div = static_cast<Out>(d);
+ *mod = static_cast<Out>(numer-d*denom);
+#else
+ // On x86 this will just be a single idiv.
+ *div = static_cast<Out>(numer/denom);
+ *mod = static_cast<Out>(numer%denom);
+#endif
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkMatrix.h b/gfx/skia/skia/include/core/SkMatrix.h
new file mode 100644
index 000000000..f565a537b
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMatrix.h
@@ -0,0 +1,850 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkMatrix_DEFINED
+#define SkMatrix_DEFINED
+
+#include "SkRect.h"
+
+struct SkRSXform;
+class SkString;
+
+/** \class SkMatrix
+
+ The SkMatrix class holds a 3x3 matrix for transforming coordinates.
+ SkMatrix does not have a constructor, so it must be explicitly initialized
+ using either reset() - to construct an identity matrix, or one of the set
+ functions (e.g. setTranslate, setRotate, etc.).
+*/
+SK_BEGIN_REQUIRE_DENSE
+class SK_API SkMatrix {
+public:
+ static SkMatrix SK_WARN_UNUSED_RESULT MakeScale(SkScalar sx, SkScalar sy) {
+ SkMatrix m;
+ m.setScale(sx, sy);
+ return m;
+ }
+
+ static SkMatrix SK_WARN_UNUSED_RESULT MakeScale(SkScalar scale) {
+ SkMatrix m;
+ m.setScale(scale, scale);
+ return m;
+ }
+
+ static SkMatrix SK_WARN_UNUSED_RESULT MakeTrans(SkScalar dx, SkScalar dy) {
+ SkMatrix m;
+ m.setTranslate(dx, dy);
+ return m;
+ }
+
+ /** Enum of bit fields for the mask return by getType().
+ Use this to identify the complexity of the matrix.
+ */
+ enum TypeMask {
+ kIdentity_Mask = 0,
+ kTranslate_Mask = 0x01, //!< set if the matrix has translation
+ kScale_Mask = 0x02, //!< set if the matrix has X or Y scale
+ kAffine_Mask = 0x04, //!< set if the matrix skews or rotates
+ kPerspective_Mask = 0x08 //!< set if the matrix is in perspective
+ };
+
+ /** Returns a bitfield describing the transformations the matrix may
+ perform. The bitfield is computed conservatively, so it may include
+ false positives. For example, when kPerspective_Mask is true, all
+ other bits may be set to true even in the case of a pure perspective
+ transform.
+ */
+ TypeMask getType() const {
+ if (fTypeMask & kUnknown_Mask) {
+ fTypeMask = this->computeTypeMask();
+ }
+ // only return the public masks
+ return (TypeMask)(fTypeMask & 0xF);
+ }
+
+ /** Returns true if the matrix is identity.
+ */
+ bool isIdentity() const {
+ return this->getType() == 0;
+ }
+
+ bool isScaleTranslate() const {
+ return !(this->getType() & ~(kScale_Mask | kTranslate_Mask));
+ }
+
+ /** Returns true if will map a rectangle to another rectangle. This can be
+ true if the matrix is identity, scale-only, or rotates a multiple of
+ 90 degrees, or mirrors in x or y.
+ */
+ bool rectStaysRect() const {
+ if (fTypeMask & kUnknown_Mask) {
+ fTypeMask = this->computeTypeMask();
+ }
+ return (fTypeMask & kRectStaysRect_Mask) != 0;
+ }
+ // alias for rectStaysRect()
+ bool preservesAxisAlignment() const { return this->rectStaysRect(); }
+
+ /**
+ * Returns true if the matrix contains perspective elements.
+ */
+ bool hasPerspective() const {
+ return SkToBool(this->getPerspectiveTypeMaskOnly() &
+ kPerspective_Mask);
+ }
+
+ /** Returns true if the matrix contains only translation, rotation/reflection or uniform scale
+ Returns false if other transformation types are included or is degenerate
+ */
+ bool isSimilarity(SkScalar tol = SK_ScalarNearlyZero) const;
+
+ /** Returns true if the matrix contains only translation, rotation/reflection or scale
+ (non-uniform scale is allowed).
+ Returns false if other transformation types are included or is degenerate
+ */
+ bool preservesRightAngles(SkScalar tol = SK_ScalarNearlyZero) const;
+
+ enum {
+ kMScaleX,
+ kMSkewX,
+ kMTransX,
+ kMSkewY,
+ kMScaleY,
+ kMTransY,
+ kMPersp0,
+ kMPersp1,
+ kMPersp2
+ };
+
+ /** Affine arrays are in column major order
+ because that's how PDF and XPS like it.
+ */
+ enum {
+ kAScaleX,
+ kASkewY,
+ kASkewX,
+ kAScaleY,
+ kATransX,
+ kATransY
+ };
+
+ SkScalar operator[](int index) const {
+ SkASSERT((unsigned)index < 9);
+ return fMat[index];
+ }
+
+ SkScalar get(int index) const {
+ SkASSERT((unsigned)index < 9);
+ return fMat[index];
+ }
+
+ SkScalar getScaleX() const { return fMat[kMScaleX]; }
+ SkScalar getScaleY() const { return fMat[kMScaleY]; }
+ SkScalar getSkewY() const { return fMat[kMSkewY]; }
+ SkScalar getSkewX() const { return fMat[kMSkewX]; }
+ SkScalar getTranslateX() const { return fMat[kMTransX]; }
+ SkScalar getTranslateY() const { return fMat[kMTransY]; }
+ SkScalar getPerspX() const { return fMat[kMPersp0]; }
+ SkScalar getPerspY() const { return fMat[kMPersp1]; }
+
+ SkScalar& operator[](int index) {
+ SkASSERT((unsigned)index < 9);
+ this->setTypeMask(kUnknown_Mask);
+ return fMat[index];
+ }
+
+ void set(int index, SkScalar value) {
+ SkASSERT((unsigned)index < 9);
+ fMat[index] = value;
+ this->setTypeMask(kUnknown_Mask);
+ }
+
+ void setScaleX(SkScalar v) { this->set(kMScaleX, v); }
+ void setScaleY(SkScalar v) { this->set(kMScaleY, v); }
+ void setSkewY(SkScalar v) { this->set(kMSkewY, v); }
+ void setSkewX(SkScalar v) { this->set(kMSkewX, v); }
+ void setTranslateX(SkScalar v) { this->set(kMTransX, v); }
+ void setTranslateY(SkScalar v) { this->set(kMTransY, v); }
+ void setPerspX(SkScalar v) { this->set(kMPersp0, v); }
+ void setPerspY(SkScalar v) { this->set(kMPersp1, v); }
+
+ void setAll(SkScalar scaleX, SkScalar skewX, SkScalar transX,
+ SkScalar skewY, SkScalar scaleY, SkScalar transY,
+ SkScalar persp0, SkScalar persp1, SkScalar persp2) {
+ fMat[kMScaleX] = scaleX;
+ fMat[kMSkewX] = skewX;
+ fMat[kMTransX] = transX;
+ fMat[kMSkewY] = skewY;
+ fMat[kMScaleY] = scaleY;
+ fMat[kMTransY] = transY;
+ fMat[kMPersp0] = persp0;
+ fMat[kMPersp1] = persp1;
+ fMat[kMPersp2] = persp2;
+ this->setTypeMask(kUnknown_Mask);
+ }
+
+ /**
+ * Copy the 9 scalars for this matrix into buffer, in the same order as the kMScaleX
+ * enum... scalex, skewx, transx, skewy, scaley, transy, persp0, persp1, persp2
+ */
+ void get9(SkScalar buffer[9]) const {
+ memcpy(buffer, fMat, 9 * sizeof(SkScalar));
+ }
+
+ /**
+ * Set this matrix to the 9 scalars from the buffer, in the same order as the kMScaleX
+ * enum... scalex, skewx, transx, skewy, scaley, transy, persp0, persp1, persp2
+ *
+ * Note: calling set9 followed by get9 may not return the exact same values. Since the matrix
+ * is used to map non-homogeneous coordinates, it is free to rescale the 9 values as needed.
+ */
+ void set9(const SkScalar buffer[9]);
+
+ /** Set the matrix to identity
+ */
+ void reset();
+ // alias for reset()
+ void setIdentity() { this->reset(); }
+
+ /** Set the matrix to translate by (dx, dy).
+ */
+ void setTranslate(SkScalar dx, SkScalar dy);
+ void setTranslate(const SkVector& v) { this->setTranslate(v.fX, v.fY); }
+
+ /** Set the matrix to scale by sx and sy, with a pivot point at (px, py).
+ The pivot point is the coordinate that should remain unchanged by the
+ specified transformation.
+ */
+ void setScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py);
+ /** Set the matrix to scale by sx and sy.
+ */
+ void setScale(SkScalar sx, SkScalar sy);
+ /** Set the matrix to scale by 1/divx and 1/divy. Returns false and doesn't
+ touch the matrix if either divx or divy is zero.
+ */
+ bool setIDiv(int divx, int divy);
+ /** Set the matrix to rotate by the specified number of degrees, with a
+ pivot point at (px, py). The pivot point is the coordinate that should
+ remain unchanged by the specified transformation.
+ */
+ void setRotate(SkScalar degrees, SkScalar px, SkScalar py);
+ /** Set the matrix to rotate about (0,0) by the specified number of degrees.
+ */
+ void setRotate(SkScalar degrees);
+ /** Set the matrix to rotate by the specified sine and cosine values, with
+ a pivot point at (px, py). The pivot point is the coordinate that
+ should remain unchanged by the specified transformation.
+ */
+ void setSinCos(SkScalar sinValue, SkScalar cosValue,
+ SkScalar px, SkScalar py);
+ /** Set the matrix to rotate by the specified sine and cosine values.
+ */
+ void setSinCos(SkScalar sinValue, SkScalar cosValue);
+
+ SkMatrix& setRSXform(const SkRSXform&);
+
+ /** Set the matrix to skew by sx and sy, with a pivot point at (px, py).
+ The pivot point is the coordinate that should remain unchanged by the
+ specified transformation.
+ */
+ void setSkew(SkScalar kx, SkScalar ky, SkScalar px, SkScalar py);
+ /** Set the matrix to skew by sx and sy.
+ */
+ void setSkew(SkScalar kx, SkScalar ky);
+ /** Set the matrix to the concatenation of the two specified matrices.
+ Either of the two matrices may also be the target matrix.
+ *this = a * b;
+ */
+ void setConcat(const SkMatrix& a, const SkMatrix& b);
+
+ /** Preconcats the matrix with the specified translation.
+ M' = M * T(dx, dy)
+ */
+ void preTranslate(SkScalar dx, SkScalar dy);
+ /** Preconcats the matrix with the specified scale.
+ M' = M * S(sx, sy, px, py)
+ */
+ void preScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py);
+ /** Preconcats the matrix with the specified scale.
+ M' = M * S(sx, sy)
+ */
+ void preScale(SkScalar sx, SkScalar sy);
+ /** Preconcats the matrix with the specified rotation.
+ M' = M * R(degrees, px, py)
+ */
+ void preRotate(SkScalar degrees, SkScalar px, SkScalar py);
+ /** Preconcats the matrix with the specified rotation.
+ M' = M * R(degrees)
+ */
+ void preRotate(SkScalar degrees);
+ /** Preconcats the matrix with the specified skew.
+ M' = M * K(kx, ky, px, py)
+ */
+ void preSkew(SkScalar kx, SkScalar ky, SkScalar px, SkScalar py);
+ /** Preconcats the matrix with the specified skew.
+ M' = M * K(kx, ky)
+ */
+ void preSkew(SkScalar kx, SkScalar ky);
+ /** Preconcats the matrix with the specified matrix.
+ M' = M * other
+ */
+ void preConcat(const SkMatrix& other);
+
+ /** Postconcats the matrix with the specified translation.
+ M' = T(dx, dy) * M
+ */
+ void postTranslate(SkScalar dx, SkScalar dy);
+ /** Postconcats the matrix with the specified scale.
+ M' = S(sx, sy, px, py) * M
+ */
+ void postScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py);
+ /** Postconcats the matrix with the specified scale.
+ M' = S(sx, sy) * M
+ */
+ void postScale(SkScalar sx, SkScalar sy);
+ /** Postconcats the matrix by dividing it by the specified integers.
+ M' = S(1/divx, 1/divy, 0, 0) * M
+ */
+ bool postIDiv(int divx, int divy);
+ /** Postconcats the matrix with the specified rotation.
+ M' = R(degrees, px, py) * M
+ */
+ void postRotate(SkScalar degrees, SkScalar px, SkScalar py);
+ /** Postconcats the matrix with the specified rotation.
+ M' = R(degrees) * M
+ */
+ void postRotate(SkScalar degrees);
+ /** Postconcats the matrix with the specified skew.
+ M' = K(kx, ky, px, py) * M
+ */
+ void postSkew(SkScalar kx, SkScalar ky, SkScalar px, SkScalar py);
+ /** Postconcats the matrix with the specified skew.
+ M' = K(kx, ky) * M
+ */
+ void postSkew(SkScalar kx, SkScalar ky);
+ /** Postconcats the matrix with the specified matrix.
+ M' = other * M
+ */
+ void postConcat(const SkMatrix& other);
+
+ enum ScaleToFit {
+ /**
+ * Scale in X and Y independently, so that src matches dst exactly.
+ * This may change the aspect ratio of the src.
+ */
+ kFill_ScaleToFit,
+ /**
+ * Compute a scale that will maintain the original src aspect ratio,
+ * but will also ensure that src fits entirely inside dst. At least one
+ * axis (X or Y) will fit exactly. kStart aligns the result to the
+ * left and top edges of dst.
+ */
+ kStart_ScaleToFit,
+ /**
+ * Compute a scale that will maintain the original src aspect ratio,
+ * but will also ensure that src fits entirely inside dst. At least one
+ * axis (X or Y) will fit exactly. The result is centered inside dst.
+ */
+ kCenter_ScaleToFit,
+ /**
+ * Compute a scale that will maintain the original src aspect ratio,
+ * but will also ensure that src fits entirely inside dst. At least one
+ * axis (X or Y) will fit exactly. kEnd aligns the result to the
+ * right and bottom edges of dst.
+ */
+ kEnd_ScaleToFit
+ };
+
+ /** Set the matrix to the scale and translate values that map the source
+ rectangle to the destination rectangle, returning true if the the result
+ can be represented.
+ @param src the source rectangle to map from.
+ @param dst the destination rectangle to map to.
+ @param stf the ScaleToFit option
+ @return true if the matrix can be represented by the rectangle mapping.
+ */
+ bool setRectToRect(const SkRect& src, const SkRect& dst, ScaleToFit stf);
+ static SkMatrix MakeRectToRect(const SkRect& src, const SkRect& dst, ScaleToFit stf) {
+ SkMatrix m;
+ m.setRectToRect(src, dst, stf);
+ return m;
+ }
+
+ /** Set the matrix such that the specified src points would map to the
+ specified dst points. count must be within [0..4].
+ @param src The array of src points
+ @param dst The array of dst points
+ @param count The number of points to use for the transformation
+ @return true if the matrix was set to the specified transformation
+ */
+ bool setPolyToPoly(const SkPoint src[], const SkPoint dst[], int count);
+
+ /** If this matrix can be inverted, return true and if inverse is not null,
+ set inverse to be the inverse of this matrix. If this matrix cannot be
+ inverted, ignore inverse and return false
+ */
+ bool SK_WARN_UNUSED_RESULT invert(SkMatrix* inverse) const {
+ // Allow the trivial case to be inlined.
+ if (this->isIdentity()) {
+ if (inverse) {
+ inverse->reset();
+ }
+ return true;
+ }
+ return this->invertNonIdentity(inverse);
+ }
+
+ /** Fills the passed array with affine identity values
+ in column major order.
+ @param affine The array to fill with affine identity values.
+ Must not be NULL.
+ */
+ static void SetAffineIdentity(SkScalar affine[6]);
+
+ /** Fills the passed array with the affine values in column major order.
+ If the matrix is a perspective transform, returns false
+ and does not change the passed array.
+ @param affine The array to fill with affine values. Ignored if NULL.
+ */
+ bool SK_WARN_UNUSED_RESULT asAffine(SkScalar affine[6]) const;
+
+ /** Set the matrix to the specified affine values.
+ * Note: these are passed in column major order.
+ */
+ void setAffine(const SkScalar affine[6]);
+
+ /** Apply this matrix to the array of points specified by src, and write
+ the transformed points into the array of points specified by dst.
+ dst[] = M * src[]
+ @param dst Where the transformed coordinates are written. It must
+ contain at least count entries
+ @param src The original coordinates that are to be transformed. It
+ must contain at least count entries
+ @param count The number of points in src to read, and then transform
+ into dst.
+ */
+ void mapPoints(SkPoint dst[], const SkPoint src[], int count) const {
+ SkASSERT((dst && src && count > 0) || 0 == count);
+ // no partial overlap
+ SkASSERT(src == dst || &dst[count] <= &src[0] || &src[count] <= &dst[0]);
+ this->getMapPtsProc()(*this, dst, src, count);
+ }
+
+ /** Apply this matrix to the array of points, overwriting it with the
+ transformed values.
+ dst[] = M * pts[]
+ @param pts The points to be transformed. It must contain at least
+ count entries
+ @param count The number of points in pts.
+ */
+ void mapPoints(SkPoint pts[], int count) const {
+ this->mapPoints(pts, pts, count);
+ }
+
+ /** Like mapPoints but with custom byte stride between the points. Stride
+ * should be a multiple of sizeof(SkScalar).
+ */
+ void mapPointsWithStride(SkPoint pts[], size_t stride, int count) const {
+ SkASSERT(stride >= sizeof(SkPoint));
+ SkASSERT(0 == stride % sizeof(SkScalar));
+ for (int i = 0; i < count; ++i) {
+ this->mapPoints(pts, pts, 1);
+ pts = (SkPoint*)((intptr_t)pts + stride);
+ }
+ }
+
+ /** Like mapPoints but with custom byte stride between the points.
+ */
+ void mapPointsWithStride(SkPoint dst[], SkPoint src[],
+ size_t stride, int count) const {
+ SkASSERT(stride >= sizeof(SkPoint));
+ SkASSERT(0 == stride % sizeof(SkScalar));
+ for (int i = 0; i < count; ++i) {
+ this->mapPoints(dst, src, 1);
+ src = (SkPoint*)((intptr_t)src + stride);
+ dst = (SkPoint*)((intptr_t)dst + stride);
+ }
+ }
+
+ /** Apply this matrix to the array of homogeneous points, specified by src,
+ where a homogeneous point is defined by 3 contiguous scalar values,
+ and write the transformed points into the array of scalars specified by dst.
+ dst[] = M * src[]
+ @param dst Where the transformed coordinates are written. It must
+ contain at least 3 * count entries
+ @param src The original coordinates that are to be transformed. It
+ must contain at least 3 * count entries
+ @param count The number of triples (homogeneous points) in src to read,
+ and then transform into dst.
+ */
+ void mapHomogeneousPoints(SkScalar dst[], const SkScalar src[], int count) const;
+
+ void mapXY(SkScalar x, SkScalar y, SkPoint* result) const {
+ SkASSERT(result);
+ this->getMapXYProc()(*this, x, y, result);
+ }
+
+ SkPoint mapXY(SkScalar x, SkScalar y) const {
+ SkPoint result;
+ this->getMapXYProc()(*this, x, y, &result);
+ return result;
+ }
+
+ /** Apply this matrix to the array of vectors specified by src, and write
+ the transformed vectors into the array of vectors specified by dst.
+ This is similar to mapPoints, but ignores any translation in the matrix.
+ @param dst Where the transformed coordinates are written. It must
+ contain at least count entries
+ @param src The original coordinates that are to be transformed. It
+ must contain at least count entries
+ @param count The number of vectors in src to read, and then transform
+ into dst.
+ */
+ void mapVectors(SkVector dst[], const SkVector src[], int count) const;
+
+ /** Apply this matrix to the array of vectors specified by src, and write
+ the transformed vectors into the array of vectors specified by dst.
+ This is similar to mapPoints, but ignores any translation in the matrix.
+ @param vecs The vectors to be transformed. It must contain at least
+ count entries
+ @param count The number of vectors in vecs.
+ */
+ void mapVectors(SkVector vecs[], int count) const {
+ this->mapVectors(vecs, vecs, count);
+ }
+
+ void mapVector(SkScalar dx, SkScalar dy, SkVector* result) const {
+ SkVector vec = { dx, dy };
+ this->mapVectors(result, &vec, 1);
+ }
+
+ SkVector mapVector(SkScalar dx, SkScalar dy) const {
+ SkVector vec = { dx, dy };
+ this->mapVectors(&vec, &vec, 1);
+ return vec;
+ }
+
+ /** Apply this matrix to the src rectangle, and write the transformed
+ rectangle into dst. This is accomplished by transforming the 4 corners
+ of src, and then setting dst to the bounds of those points.
+ @param dst Where the transformed rectangle is written.
+ @param src The original rectangle to be transformed.
+ @return the result of calling rectStaysRect()
+ */
+ bool mapRect(SkRect* dst, const SkRect& src) const;
+
+ /** Apply this matrix to the rectangle, and write the transformed rectangle
+ back into it. This is accomplished by transforming the 4 corners of
+ rect, and then setting it to the bounds of those points
+ @param rect The rectangle to transform.
+ @return the result of calling rectStaysRect()
+ */
+ bool mapRect(SkRect* rect) const {
+ return this->mapRect(rect, *rect);
+ }
+
+ /** Apply this matrix to the src rectangle, and write the four transformed
+ points into dst. The points written to dst will be the original top-left, top-right,
+ bottom-right, and bottom-left points transformed by the matrix.
+ @param dst Where the transformed quad is written.
+ @param rect The original rectangle to be transformed.
+ */
+ void mapRectToQuad(SkPoint dst[4], const SkRect& rect) const {
+ // This could potentially be faster if we only transformed each x and y of the rect once.
+ rect.toQuad(dst);
+ this->mapPoints(dst, 4);
+ }
+
+ /**
+ * Maps a rect to another rect, asserting (in debug mode) that the matrix only contains
+ * scale and translate elements. If it contains other elements, the results are undefined.
+ */
+ void mapRectScaleTranslate(SkRect* dst, const SkRect& src) const;
+
+ /** Return the mean radius of a circle after it has been mapped by
+ this matrix. NOTE: in perspective this value assumes the circle
+ has its center at the origin.
+ */
+ SkScalar mapRadius(SkScalar radius) const;
+
+ typedef void (*MapXYProc)(const SkMatrix& mat, SkScalar x, SkScalar y,
+ SkPoint* result);
+
+ static MapXYProc GetMapXYProc(TypeMask mask) {
+ SkASSERT((mask & ~kAllMasks) == 0);
+ return gMapXYProcs[mask & kAllMasks];
+ }
+
+ MapXYProc getMapXYProc() const {
+ return GetMapXYProc(this->getType());
+ }
+
+ typedef void (*MapPtsProc)(const SkMatrix& mat, SkPoint dst[],
+ const SkPoint src[], int count);
+
+ static MapPtsProc GetMapPtsProc(TypeMask mask) {
+ SkASSERT((mask & ~kAllMasks) == 0);
+ return gMapPtsProcs[mask & kAllMasks];
+ }
+
+ MapPtsProc getMapPtsProc() const {
+ return GetMapPtsProc(this->getType());
+ }
+
+ /** Returns true if the matrix can be stepped in X (not complex
+ perspective).
+ */
+ bool isFixedStepInX() const;
+
+ /** If the matrix can be stepped in X (not complex perspective)
+ then return the step value.
+ If it cannot, behavior is undefined.
+ */
+ SkVector fixedStepInX(SkScalar y) const;
+
+ /** Efficient comparison of two matrices. It distinguishes between zero and
+ * negative zero. It will return false when the sign of zero values is the
+ * only difference between the two matrices. It considers NaN values to be
+ * equal to themselves. So a matrix full of NaNs is "cheap equal" to
+ * another matrix full of NaNs iff the NaN values are bitwise identical
+ * while according to strict the strict == test a matrix with a NaN value
+ * is equal to nothing, including itself.
+ */
+ bool cheapEqualTo(const SkMatrix& m) const {
+ return 0 == memcmp(fMat, m.fMat, sizeof(fMat));
+ }
+
+ friend SK_API bool operator==(const SkMatrix& a, const SkMatrix& b);
+ friend SK_API bool operator!=(const SkMatrix& a, const SkMatrix& b) {
+ return !(a == b);
+ }
+
+ enum {
+ // writeTo/readFromMemory will never return a value larger than this
+ kMaxFlattenSize = 9 * sizeof(SkScalar) + sizeof(uint32_t)
+ };
+ // return the number of bytes written, whether or not buffer is null
+ size_t writeToMemory(void* buffer) const;
+ /**
+ * Reads data from the buffer parameter
+ *
+ * @param buffer Memory to read from
+ * @param length Amount of memory available in the buffer
+ * @return number of bytes read (must be a multiple of 4) or
+ * 0 if there was not enough memory available
+ */
+ size_t readFromMemory(const void* buffer, size_t length);
+
+ void dump() const;
+ void toString(SkString*) const;
+
+ /**
+ * Calculates the minimum scaling factor of the matrix as computed from the SVD of the upper
+ * left 2x2. If the max scale factor cannot be computed (for example overflow or perspective)
+ * -1 is returned.
+ *
+ * @return minimum scale factor
+ */
+ SkScalar getMinScale() const;
+
+ /**
+ * Calculates the maximum scaling factor of the matrix as computed from the SVD of the upper
+ * left 2x2. If the max scale factor cannot be computed (for example overflow or perspective)
+ * -1 is returned.
+ *
+ * @return maximum scale factor
+ */
+ SkScalar getMaxScale() const;
+
+ /**
+ * Gets both the min and max scale factors. The min scale factor is scaleFactors[0] and the max
+ * is scaleFactors[1]. If the min/max scale factors cannot be computed false is returned and the
+ * values of scaleFactors[] are undefined.
+ */
+ bool SK_WARN_UNUSED_RESULT getMinMaxScales(SkScalar scaleFactors[2]) const;
+
+ /**
+ * Attempt to decompose this matrix into a scale-only component and whatever remains, where
+ * the scale component is to be applied first.
+ *
+ * M -> Remaining * Scale
+ *
+ * On success, return true and assign the scale and remaining components (assuming their
+ * respective parameters are not null). On failure return false and ignore the parameters.
+ *
+ * Possible reasons to fail: perspective, one or more scale factors are zero.
+ */
+ bool decomposeScale(SkSize* scale, SkMatrix* remaining = NULL) const;
+
+ /**
+ * Return a reference to a const identity matrix
+ */
+ static const SkMatrix& I();
+
+ /**
+ * Return a reference to a const matrix that is "invalid", one that could
+ * never be used.
+ */
+ static const SkMatrix& InvalidMatrix();
+
+ /**
+ * Return the concatenation of two matrices, a * b.
+ */
+ static SkMatrix Concat(const SkMatrix& a, const SkMatrix& b) {
+ SkMatrix result;
+ result.setConcat(a, b);
+ return result;
+ }
+
+ /**
+ * Testing routine; the matrix's type cache should never need to be
+ * manually invalidated during normal use.
+ */
+ void dirtyMatrixTypeCache() {
+ this->setTypeMask(kUnknown_Mask);
+ }
+
+ /**
+ * Initialize the matrix to be scale + post-translate.
+ */
+ void setScaleTranslate(SkScalar sx, SkScalar sy, SkScalar tx, SkScalar ty) {
+ fMat[kMScaleX] = sx;
+ fMat[kMSkewX] = 0;
+ fMat[kMTransX] = tx;
+
+ fMat[kMSkewY] = 0;
+ fMat[kMScaleY] = sy;
+ fMat[kMTransY] = ty;
+
+ fMat[kMPersp0] = 0;
+ fMat[kMPersp1] = 0;
+ fMat[kMPersp2] = 1;
+
+ unsigned mask = 0;
+ if (sx != 1 || sy != 1) {
+ mask |= kScale_Mask;
+ }
+ if (tx || ty) {
+ mask |= kTranslate_Mask;
+ }
+ this->setTypeMask(mask | kRectStaysRect_Mask);
+ }
+
+ /**
+ * Are all elements of the matrix finite?
+ */
+ bool isFinite() const { return SkScalarsAreFinite(fMat, 9); }
+
+private:
+ enum {
+ /** Set if the matrix will map a rectangle to another rectangle. This
+ can be true if the matrix is scale-only, or rotates a multiple of
+ 90 degrees.
+
+ This bit will be set on identity matrices
+ */
+ kRectStaysRect_Mask = 0x10,
+
+ /** Set if the perspective bit is valid even though the rest of
+ the matrix is Unknown.
+ */
+ kOnlyPerspectiveValid_Mask = 0x40,
+
+ kUnknown_Mask = 0x80,
+
+ kORableMasks = kTranslate_Mask |
+ kScale_Mask |
+ kAffine_Mask |
+ kPerspective_Mask,
+
+ kAllMasks = kTranslate_Mask |
+ kScale_Mask |
+ kAffine_Mask |
+ kPerspective_Mask |
+ kRectStaysRect_Mask
+ };
+
+ SkScalar fMat[9];
+ mutable uint32_t fTypeMask;
+
+ static void ComputeInv(SkScalar dst[9], const SkScalar src[9], double invDet, bool isPersp);
+
+ uint8_t computeTypeMask() const;
+ uint8_t computePerspectiveTypeMask() const;
+
+ void setTypeMask(int mask) {
+ // allow kUnknown or a valid mask
+ SkASSERT(kUnknown_Mask == mask || (mask & kAllMasks) == mask ||
+ ((kUnknown_Mask | kOnlyPerspectiveValid_Mask) & mask)
+ == (kUnknown_Mask | kOnlyPerspectiveValid_Mask));
+ fTypeMask = SkToU8(mask);
+ }
+
+ void orTypeMask(int mask) {
+ SkASSERT((mask & kORableMasks) == mask);
+ fTypeMask = SkToU8(fTypeMask | mask);
+ }
+
+ void clearTypeMask(int mask) {
+ // only allow a valid mask
+ SkASSERT((mask & kAllMasks) == mask);
+ fTypeMask = fTypeMask & ~mask;
+ }
+
+ TypeMask getPerspectiveTypeMaskOnly() const {
+ if ((fTypeMask & kUnknown_Mask) &&
+ !(fTypeMask & kOnlyPerspectiveValid_Mask)) {
+ fTypeMask = this->computePerspectiveTypeMask();
+ }
+ return (TypeMask)(fTypeMask & 0xF);
+ }
+
+ /** Returns true if we already know that the matrix is identity;
+ false otherwise.
+ */
+ bool isTriviallyIdentity() const {
+ if (fTypeMask & kUnknown_Mask) {
+ return false;
+ }
+ return ((fTypeMask & 0xF) == 0);
+ }
+
+ bool SK_WARN_UNUSED_RESULT invertNonIdentity(SkMatrix* inverse) const;
+
+ static bool Poly2Proc(const SkPoint[], SkMatrix*, const SkPoint& scale);
+ static bool Poly3Proc(const SkPoint[], SkMatrix*, const SkPoint& scale);
+ static bool Poly4Proc(const SkPoint[], SkMatrix*, const SkPoint& scale);
+
+ static void Identity_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+ static void Trans_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+ static void Scale_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+ static void ScaleTrans_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+ static void Rot_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+ static void RotTrans_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+ static void Persp_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+
+ static const MapXYProc gMapXYProcs[];
+
+ static void Identity_pts(const SkMatrix&, SkPoint[], const SkPoint[], int);
+ static void Trans_pts(const SkMatrix&, SkPoint dst[], const SkPoint[], int);
+ static void Scale_pts(const SkMatrix&, SkPoint dst[], const SkPoint[], int);
+ static void ScaleTrans_pts(const SkMatrix&, SkPoint dst[], const SkPoint[],
+ int count);
+ static void Persp_pts(const SkMatrix&, SkPoint dst[], const SkPoint[], int);
+
+ static void Affine_vpts(const SkMatrix&, SkPoint dst[], const SkPoint[], int);
+
+ static const MapPtsProc gMapPtsProcs[];
+
+ friend class SkPerspIter;
+ friend class SkMatrixPriv;
+};
+SK_END_REQUIRE_DENSE
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkMatrix44.h b/gfx/skia/skia/include/core/SkMatrix44.h
new file mode 100644
index 000000000..9820ee58c
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMatrix44.h
@@ -0,0 +1,497 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMatrix44_DEFINED
+#define SkMatrix44_DEFINED
+
+#include "SkMatrix.h"
+#include "SkScalar.h"
+
+#ifdef SK_MSCALAR_IS_DOUBLE
+#ifdef SK_MSCALAR_IS_FLOAT
+ #error "can't define MSCALAR both as DOUBLE and FLOAT"
+#endif
+ typedef double SkMScalar;
+
+ static inline double SkFloatToMScalar(float x) {
+ return static_cast<double>(x);
+ }
+ static inline float SkMScalarToFloat(double x) {
+ return static_cast<float>(x);
+ }
+ static inline double SkDoubleToMScalar(double x) {
+ return x;
+ }
+ static inline double SkMScalarToDouble(double x) {
+ return x;
+ }
+ static inline double SkMScalarAbs(double x) {
+ return fabs(x);
+ }
+ static const SkMScalar SK_MScalarPI = 3.141592653589793;
+
+ #define SkMScalarFloor(x) sk_double_floor(x)
+ #define SkMScalarCeil(x) sk_double_ceil(x)
+ #define SkMScalarRound(x) sk_double_round(x)
+
+ #define SkMScalarFloorToInt(x) sk_double_floor2int(x)
+ #define SkMScalarCeilToInt(x) sk_double_ceil2int(x)
+ #define SkMScalarRoundToInt(x) sk_double_round2int(x)
+
+
+#elif defined SK_MSCALAR_IS_FLOAT
+#ifdef SK_MSCALAR_IS_DOUBLE
+ #error "can't define MSCALAR both as DOUBLE and FLOAT"
+#endif
+ typedef float SkMScalar;
+
+ static inline float SkFloatToMScalar(float x) {
+ return x;
+ }
+ static inline float SkMScalarToFloat(float x) {
+ return x;
+ }
+ static inline float SkDoubleToMScalar(double x) {
+ return static_cast<float>(x);
+ }
+ static inline double SkMScalarToDouble(float x) {
+ return static_cast<double>(x);
+ }
+ static inline float SkMScalarAbs(float x) {
+ return sk_float_abs(x);
+ }
+ static const SkMScalar SK_MScalarPI = 3.14159265f;
+
+ #define SkMScalarFloor(x) sk_float_floor(x)
+ #define SkMScalarCeil(x) sk_float_ceil(x)
+ #define SkMScalarRound(x) sk_float_round(x)
+
+ #define SkMScalarFloorToInt(x) sk_float_floor2int(x)
+ #define SkMScalarCeilToInt(x) sk_float_ceil2int(x)
+ #define SkMScalarRoundToInt(x) sk_float_round2int(x)
+
+#endif
+
+#define SkIntToMScalar(n) static_cast<SkMScalar>(n)
+
+#define SkMScalarToScalar(x) SkMScalarToFloat(x)
+#define SkScalarToMScalar(x) SkFloatToMScalar(x)
+
+static const SkMScalar SK_MScalar1 = 1;
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct SkVector4 {
+ SkScalar fData[4];
+
+ SkVector4() {
+ this->set(0, 0, 0, 1);
+ }
+ SkVector4(const SkVector4& src) {
+ memcpy(fData, src.fData, sizeof(fData));
+ }
+ SkVector4(SkScalar x, SkScalar y, SkScalar z, SkScalar w = SK_Scalar1) {
+ fData[0] = x;
+ fData[1] = y;
+ fData[2] = z;
+ fData[3] = w;
+ }
+
+ SkVector4& operator=(const SkVector4& src) {
+ memcpy(fData, src.fData, sizeof(fData));
+ return *this;
+ }
+
+ bool operator==(const SkVector4& v) {
+ return fData[0] == v.fData[0] && fData[1] == v.fData[1] &&
+ fData[2] == v.fData[2] && fData[3] == v.fData[3];
+ }
+ bool operator!=(const SkVector4& v) {
+ return !(*this == v);
+ }
+ bool equals(SkScalar x, SkScalar y, SkScalar z, SkScalar w = SK_Scalar1) {
+ return fData[0] == x && fData[1] == y &&
+ fData[2] == z && fData[3] == w;
+ }
+
+ void set(SkScalar x, SkScalar y, SkScalar z, SkScalar w = SK_Scalar1) {
+ fData[0] = x;
+ fData[1] = y;
+ fData[2] = z;
+ fData[3] = w;
+ }
+};
+
+class SK_API SkMatrix44 {
+public:
+
+ enum Uninitialized_Constructor {
+ kUninitialized_Constructor
+ };
+ enum Identity_Constructor {
+ kIdentity_Constructor
+ };
+
+ SkMatrix44(Uninitialized_Constructor) {}
+
+ constexpr SkMatrix44(Identity_Constructor)
+ : fMat{{ 1, 0, 0, 0, },
+ { 0, 1, 0, 0, },
+ { 0, 0, 1, 0, },
+ { 0, 0, 0, 1, }}
+ , fTypeMask(kIdentity_Mask)
+ {}
+
+ SK_ATTR_DEPRECATED("use the constructors that take an enum")
+ SkMatrix44() { this->setIdentity(); }
+
+ SkMatrix44(const SkMatrix44& src) {
+ memcpy(fMat, src.fMat, sizeof(fMat));
+ fTypeMask = src.fTypeMask;
+ }
+
+ SkMatrix44(const SkMatrix44& a, const SkMatrix44& b) {
+ this->setConcat(a, b);
+ }
+
+ SkMatrix44& operator=(const SkMatrix44& src) {
+ if (&src != this) {
+ memcpy(fMat, src.fMat, sizeof(fMat));
+ fTypeMask = src.fTypeMask;
+ }
+ return *this;
+ }
+
+ bool operator==(const SkMatrix44& other) const;
+ bool operator!=(const SkMatrix44& other) const {
+ return !(other == *this);
+ }
+
+ /* When converting from SkMatrix44 to SkMatrix, the third row and
+ * column is dropped. When converting from SkMatrix to SkMatrix44
+ * the third row and column remain as identity:
+ * [ a b c ] [ a b 0 c ]
+ * [ d e f ] -> [ d e 0 f ]
+ * [ g h i ] [ 0 0 1 0 ]
+ * [ g h 0 i ]
+ */
+ SkMatrix44(const SkMatrix&);
+ SkMatrix44& operator=(const SkMatrix& src);
+ operator SkMatrix() const;
+
+ /**
+ * Return a reference to a const identity matrix
+ */
+ static const SkMatrix44& I();
+
+ enum TypeMask {
+ kIdentity_Mask = 0,
+ kTranslate_Mask = 0x01, //!< set if the matrix has translation
+ kScale_Mask = 0x02, //!< set if the matrix has any scale != 1
+ kAffine_Mask = 0x04, //!< set if the matrix skews or rotates
+ kPerspective_Mask = 0x08 //!< set if the matrix is in perspective
+ };
+
+ /**
+ * Returns a bitfield describing the transformations the matrix may
+ * perform. The bitfield is computed conservatively, so it may include
+ * false positives. For example, when kPerspective_Mask is true, all
+ * other bits may be set to true even in the case of a pure perspective
+ * transform.
+ */
+ inline TypeMask getType() const {
+ if (fTypeMask & kUnknown_Mask) {
+ fTypeMask = this->computeTypeMask();
+ }
+ SkASSERT(!(fTypeMask & kUnknown_Mask));
+ return (TypeMask)fTypeMask;
+ }
+
+ /**
+ * Return true if the matrix is identity.
+ */
+ inline bool isIdentity() const {
+ return kIdentity_Mask == this->getType();
+ }
+
+ /**
+ * Return true if the matrix contains translate or is identity.
+ */
+ inline bool isTranslate() const {
+ return !(this->getType() & ~kTranslate_Mask);
+ }
+
+ /**
+ * Return true if the matrix only contains scale or translate or is identity.
+ */
+ inline bool isScaleTranslate() const {
+ return !(this->getType() & ~(kScale_Mask | kTranslate_Mask));
+ }
+
+ /**
+ * Returns true if the matrix only contains scale or is identity.
+ */
+ inline bool isScale() const {
+ return !(this->getType() & ~kScale_Mask);
+ }
+
+ inline bool hasPerspective() const {
+ return SkToBool(this->getType() & kPerspective_Mask);
+ }
+
+ void setIdentity();
+ inline void reset() { this->setIdentity();}
+
+ /**
+ * get a value from the matrix. The row,col parameters work as follows:
+ * (0, 0) scale-x
+ * (0, 3) translate-x
+ * (3, 0) perspective-x
+ */
+ inline SkMScalar get(int row, int col) const {
+ SkASSERT((unsigned)row <= 3);
+ SkASSERT((unsigned)col <= 3);
+ return fMat[col][row];
+ }
+
+ /**
+ * set a value in the matrix. The row,col parameters work as follows:
+ * (0, 0) scale-x
+ * (0, 3) translate-x
+ * (3, 0) perspective-x
+ */
+ inline void set(int row, int col, SkMScalar value) {
+ SkASSERT((unsigned)row <= 3);
+ SkASSERT((unsigned)col <= 3);
+ fMat[col][row] = value;
+ this->dirtyTypeMask();
+ }
+
+ inline double getDouble(int row, int col) const {
+ return SkMScalarToDouble(this->get(row, col));
+ }
+ inline void setDouble(int row, int col, double value) {
+ this->set(row, col, SkDoubleToMScalar(value));
+ }
+ inline float getFloat(int row, int col) const {
+ return SkMScalarToFloat(this->get(row, col));
+ }
+ inline void setFloat(int row, int col, float value) {
+ this->set(row, col, SkFloatToMScalar(value));
+ }
+
+ /** These methods allow one to efficiently read matrix entries into an
+ * array. The given array must have room for exactly 16 entries. Whenever
+ * possible, they will try to use memcpy rather than an entry-by-entry
+ * copy.
+ *
+ * Col major indicates that consecutive elements of columns will be stored
+ * contiguously in memory. Row major indicates that consecutive elements
+ * of rows will be stored contiguously in memory.
+ */
+ void asColMajorf(float[]) const;
+ void asColMajord(double[]) const;
+ void asRowMajorf(float[]) const;
+ void asRowMajord(double[]) const;
+
+ /** These methods allow one to efficiently set all matrix entries from an
+ * array. The given array must have room for exactly 16 entries. Whenever
+ * possible, they will try to use memcpy rather than an entry-by-entry
+ * copy.
+ *
+ * Col major indicates that input memory will be treated as if consecutive
+ * elements of columns are stored contiguously in memory. Row major
+ * indicates that input memory will be treated as if consecutive elements
+ * of rows are stored contiguously in memory.
+ */
+ void setColMajorf(const float[]);
+ void setColMajord(const double[]);
+ void setRowMajorf(const float[]);
+ void setRowMajord(const double[]);
+
+#ifdef SK_MSCALAR_IS_FLOAT
+ void setColMajor(const SkMScalar data[]) { this->setColMajorf(data); }
+ void setRowMajor(const SkMScalar data[]) { this->setRowMajorf(data); }
+#else
+ void setColMajor(const SkMScalar data[]) { this->setColMajord(data); }
+ void setRowMajor(const SkMScalar data[]) { this->setRowMajord(data); }
+#endif
+
+ /* This sets the top-left of the matrix and clears the translation and
+ * perspective components (with [3][3] set to 1). mXY is interpreted
+ * as the matrix entry at col = X, row = Y. */
+ void set3x3(SkMScalar m00, SkMScalar m01, SkMScalar m02,
+ SkMScalar m10, SkMScalar m11, SkMScalar m12,
+ SkMScalar m20, SkMScalar m21, SkMScalar m22);
+ void set3x3RowMajorf(const float[]);
+
+ void setTranslate(SkMScalar dx, SkMScalar dy, SkMScalar dz);
+ void preTranslate(SkMScalar dx, SkMScalar dy, SkMScalar dz);
+ void postTranslate(SkMScalar dx, SkMScalar dy, SkMScalar dz);
+
+ void setScale(SkMScalar sx, SkMScalar sy, SkMScalar sz);
+ void preScale(SkMScalar sx, SkMScalar sy, SkMScalar sz);
+ void postScale(SkMScalar sx, SkMScalar sy, SkMScalar sz);
+
+ inline void setScale(SkMScalar scale) {
+ this->setScale(scale, scale, scale);
+ }
+ inline void preScale(SkMScalar scale) {
+ this->preScale(scale, scale, scale);
+ }
+ inline void postScale(SkMScalar scale) {
+ this->postScale(scale, scale, scale);
+ }
+
+ void setRotateDegreesAbout(SkMScalar x, SkMScalar y, SkMScalar z,
+ SkMScalar degrees) {
+ this->setRotateAbout(x, y, z, degrees * SK_MScalarPI / 180);
+ }
+
+ /** Rotate about the vector [x,y,z]. If that vector is not unit-length,
+ it will be automatically resized.
+ */
+ void setRotateAbout(SkMScalar x, SkMScalar y, SkMScalar z,
+ SkMScalar radians);
+ /** Rotate about the vector [x,y,z]. Does not check the length of the
+ vector, assuming it is unit-length.
+ */
+ void setRotateAboutUnit(SkMScalar x, SkMScalar y, SkMScalar z,
+ SkMScalar radians);
+
+ void setConcat(const SkMatrix44& a, const SkMatrix44& b);
+ inline void preConcat(const SkMatrix44& m) {
+ this->setConcat(*this, m);
+ }
+ inline void postConcat(const SkMatrix44& m) {
+ this->setConcat(m, *this);
+ }
+
+ friend SkMatrix44 operator*(const SkMatrix44& a, const SkMatrix44& b) {
+ return SkMatrix44(a, b);
+ }
+
+ /** If this is invertible, return that in inverse and return true. If it is
+ not invertible, return false and leave the inverse parameter in an
+ unspecified state.
+ */
+ bool invert(SkMatrix44* inverse) const;
+
+ /** Transpose this matrix in place. */
+ void transpose();
+
+ /** Apply the matrix to the src vector, returning the new vector in dst.
+ It is legal for src and dst to point to the same memory.
+ */
+ void mapScalars(const SkScalar src[4], SkScalar dst[4]) const;
+ inline void mapScalars(SkScalar vec[4]) const {
+ this->mapScalars(vec, vec);
+ }
+
+ SK_ATTR_DEPRECATED("use mapScalars")
+ void map(const SkScalar src[4], SkScalar dst[4]) const {
+ this->mapScalars(src, dst);
+ }
+
+ SK_ATTR_DEPRECATED("use mapScalars")
+ void map(SkScalar vec[4]) const {
+ this->mapScalars(vec, vec);
+ }
+
+#ifdef SK_MSCALAR_IS_DOUBLE
+ void mapMScalars(const SkMScalar src[4], SkMScalar dst[4]) const;
+#elif defined SK_MSCALAR_IS_FLOAT
+ inline void mapMScalars(const SkMScalar src[4], SkMScalar dst[4]) const {
+ this->mapScalars(src, dst);
+ }
+#endif
+ inline void mapMScalars(SkMScalar vec[4]) const {
+ this->mapMScalars(vec, vec);
+ }
+
+ friend SkVector4 operator*(const SkMatrix44& m, const SkVector4& src) {
+ SkVector4 dst;
+ m.mapScalars(src.fData, dst.fData);
+ return dst;
+ }
+
+ /**
+ * map an array of [x, y, 0, 1] through the matrix, returning an array
+ * of [x', y', z', w'].
+ *
+ * @param src2 array of [x, y] pairs, with implied z=0 and w=1
+ * @param count number of [x, y] pairs in src2
+ * @param dst4 array of [x', y', z', w'] quads as the output.
+ */
+ void map2(const float src2[], int count, float dst4[]) const;
+ void map2(const double src2[], int count, double dst4[]) const;
+
+ /** Returns true if transformating an axis-aligned square in 2d by this matrix
+ will produce another 2d axis-aligned square; typically means the matrix
+ is a scale with perhaps a 90-degree rotation. A 3d rotation through 90
+ degrees into a perpendicular plane collapses a square to a line, but
+ is still considered to be axis-aligned.
+
+ By default, tolerates very slight error due to float imprecisions;
+ a 90-degree rotation can still end up with 10^-17 of
+ "non-axis-aligned" result.
+ */
+ bool preserves2dAxisAlignment(SkMScalar epsilon = SK_ScalarNearlyZero) const;
+
+ void dump() const;
+
+ double determinant() const;
+
+private:
+ /* This is indexed by [col][row]. */
+ SkMScalar fMat[4][4];
+ mutable unsigned fTypeMask;
+
+ enum {
+ kUnknown_Mask = 0x80,
+
+ kAllPublic_Masks = 0xF
+ };
+
+ void as3x4RowMajorf(float[]) const;
+ void set3x4RowMajorf(const float[]);
+
+ SkMScalar transX() const { return fMat[3][0]; }
+ SkMScalar transY() const { return fMat[3][1]; }
+ SkMScalar transZ() const { return fMat[3][2]; }
+
+ SkMScalar scaleX() const { return fMat[0][0]; }
+ SkMScalar scaleY() const { return fMat[1][1]; }
+ SkMScalar scaleZ() const { return fMat[2][2]; }
+
+ SkMScalar perspX() const { return fMat[0][3]; }
+ SkMScalar perspY() const { return fMat[1][3]; }
+ SkMScalar perspZ() const { return fMat[2][3]; }
+
+ int computeTypeMask() const;
+
+ inline void dirtyTypeMask() {
+ fTypeMask = kUnknown_Mask;
+ }
+
+ inline void setTypeMask(int mask) {
+ SkASSERT(0 == (~(kAllPublic_Masks | kUnknown_Mask) & mask));
+ fTypeMask = mask;
+ }
+
+ /**
+ * Does not take the time to 'compute' the typemask. Only returns true if
+ * we already know that this matrix is identity.
+ */
+ inline bool isTriviallyIdentity() const {
+ return 0 == fTypeMask;
+ }
+
+ friend class SkColorSpace;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkMetaData.h b/gfx/skia/skia/include/core/SkMetaData.h
new file mode 100644
index 000000000..c8ca7f141
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMetaData.h
@@ -0,0 +1,175 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkMetaData_DEFINED
+#define SkMetaData_DEFINED
+
+#include "SkScalar.h"
+
+class SkRefCnt;
+
+class SK_API SkMetaData {
+public:
+ /**
+ * Used to manage the life-cycle of a ptr in the metadata. This is option
+ * in setPtr, and is only invoked when either copying one metadata to
+ * another, or when the metadata is destroyed.
+ *
+ * setPtr(name, ptr, proc) {
+ * fPtr = proc(ptr, true);
+ * }
+ *
+ * copy: A = B {
+ * A.fPtr = B.fProc(B.fPtr, true);
+ * }
+ *
+ * ~SkMetaData {
+ * fProc(fPtr, false);
+ * }
+ */
+ typedef void* (*PtrProc)(void* ptr, bool doRef);
+
+ /**
+ * Implements PtrProc for SkRefCnt pointers
+ */
+ static void* RefCntProc(void* ptr, bool doRef);
+
+ SkMetaData();
+ SkMetaData(const SkMetaData& src);
+ ~SkMetaData();
+
+ SkMetaData& operator=(const SkMetaData& src);
+
+ void reset();
+
+ bool findS32(const char name[], int32_t* value = NULL) const;
+ bool findScalar(const char name[], SkScalar* value = NULL) const;
+ const SkScalar* findScalars(const char name[], int* count,
+ SkScalar values[] = NULL) const;
+ const char* findString(const char name[]) const;
+ bool findPtr(const char name[], void** value = NULL, PtrProc* = NULL) const;
+ bool findBool(const char name[], bool* value = NULL) const;
+ const void* findData(const char name[], size_t* byteCount = NULL) const;
+
+ bool hasS32(const char name[], int32_t value) const {
+ int32_t v;
+ return this->findS32(name, &v) && v == value;
+ }
+ bool hasScalar(const char name[], SkScalar value) const {
+ SkScalar v;
+ return this->findScalar(name, &v) && v == value;
+ }
+ bool hasString(const char name[], const char value[]) const {
+ const char* v = this->findString(name);
+ return (v == NULL && value == NULL) ||
+ (v != NULL && value != NULL && !strcmp(v, value));
+ }
+ bool hasPtr(const char name[], void* value) const {
+ void* v;
+ return this->findPtr(name, &v) && v == value;
+ }
+ bool hasBool(const char name[], bool value) const {
+ bool v;
+ return this->findBool(name, &v) && v == value;
+ }
+ bool hasData(const char name[], const void* data, size_t byteCount) const {
+ size_t len;
+ const void* ptr = this->findData(name, &len);
+ return ptr && len == byteCount && !memcmp(ptr, data, len);
+ }
+
+ void setS32(const char name[], int32_t value);
+ void setScalar(const char name[], SkScalar value);
+ SkScalar* setScalars(const char name[], int count, const SkScalar values[] = NULL);
+ void setString(const char name[], const char value[]);
+ void setPtr(const char name[], void* value, PtrProc proc = NULL);
+ void setBool(const char name[], bool value);
+ // the data is copied from the input pointer.
+ void setData(const char name[], const void* data, size_t byteCount);
+
+ bool removeS32(const char name[]);
+ bool removeScalar(const char name[]);
+ bool removeString(const char name[]);
+ bool removePtr(const char name[]);
+ bool removeBool(const char name[]);
+ bool removeData(const char name[]);
+
+ // helpers for SkRefCnt
+ bool findRefCnt(const char name[], SkRefCnt** ptr = NULL) {
+ return this->findPtr(name, reinterpret_cast<void**>(ptr));
+ }
+ bool hasRefCnt(const char name[], SkRefCnt* ptr) {
+ return this->hasPtr(name, ptr);
+ }
+ void setRefCnt(const char name[], SkRefCnt* ptr) {
+ this->setPtr(name, ptr, RefCntProc);
+ }
+ bool removeRefCnt(const char name[]) {
+ return this->removePtr(name);
+ }
+
+ enum Type {
+ kS32_Type,
+ kScalar_Type,
+ kString_Type,
+ kPtr_Type,
+ kBool_Type,
+ kData_Type,
+
+ kTypeCount
+ };
+
+ struct Rec;
+ class Iter;
+ friend class Iter;
+
+ class Iter {
+ public:
+ Iter() : fRec(NULL) {}
+ Iter(const SkMetaData&);
+
+ /** Reset the iterator, so that calling next() will return the first
+ data element. This is done implicitly in the constructor.
+ */
+ void reset(const SkMetaData&);
+
+ /** Each time next is called, it returns the name of the next data element,
+ or null when there are no more elements. If non-null is returned, then the
+ element's type is returned (if not null), and the number of data values
+ is returned in count (if not null).
+ */
+ const char* next(Type*, int* count);
+
+ private:
+ Rec* fRec;
+ };
+
+public:
+ struct Rec {
+ Rec* fNext;
+ uint16_t fDataCount; // number of elements
+ uint8_t fDataLen; // sizeof a single element
+ uint8_t fType;
+
+ const void* data() const { return (this + 1); }
+ void* data() { return (this + 1); }
+ const char* name() const { return (const char*)this->data() + fDataLen * fDataCount; }
+ char* name() { return (char*)this->data() + fDataLen * fDataCount; }
+
+ static Rec* Alloc(size_t);
+ static void Free(Rec*);
+ };
+ Rec* fRec;
+
+ const Rec* find(const char name[], Type) const;
+ void* set(const char name[], const void* data, size_t len, Type, int count);
+ bool remove(const char name[], Type);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkMilestone.h b/gfx/skia/skia/include/core/SkMilestone.h
new file mode 100644
index 000000000..4c7988ecd
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMilestone.h
@@ -0,0 +1,9 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SK_MILESTONE
+#define SK_MILESTONE 55
+#endif
diff --git a/gfx/skia/skia/include/core/SkMultiPictureDraw.h b/gfx/skia/skia/include/core/SkMultiPictureDraw.h
new file mode 100644
index 000000000..cd46a303a
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMultiPictureDraw.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMultiPictureDraw_DEFINED
+#define SkMultiPictureDraw_DEFINED
+
+#include "../private/SkTDArray.h"
+#include "SkMatrix.h"
+
+class SkCanvas;
+class SkPaint;
+class SkPicture;
+
+/** \class SkMultiPictureDraw
+
+ The MultiPictureDraw object accepts several picture/canvas pairs and
+ then attempts to optimally draw the pictures into the canvases, sharing
+ as many resources as possible.
+*/
+class SK_API SkMultiPictureDraw {
+public:
+ /**
+ * Create an object to optimize the drawing of multiple pictures.
+ * @param reserve Hint for the number of add calls expected to be issued
+ */
+ SkMultiPictureDraw(int reserve = 0);
+ ~SkMultiPictureDraw() { this->reset(); }
+
+ /**
+ * Add a canvas/picture pair for later rendering.
+ * @param canvas the canvas in which to draw picture
+ * @param picture the picture to draw into canvas
+ * @param matrix if non-NULL, applied to the CTM when drawing
+ * @param paint if non-NULL, draw picture to a temporary buffer
+ * and then apply the paint when the result is drawn
+ */
+ void add(SkCanvas* canvas,
+ const SkPicture* picture,
+ const SkMatrix* matrix = NULL,
+ const SkPaint* paint = NULL);
+
+ /**
+ * Perform all the previously added draws. This will reset the state
+ * of this object. If flush is true, all canvases are flushed after
+ * draw.
+ */
+ void draw(bool flush = false);
+
+ /**
+ * Abandon all buffered draws and reset to the initial state.
+ */
+ void reset();
+
+private:
+ struct DrawData {
+ SkCanvas* fCanvas; // reffed
+ const SkPicture* fPicture; // reffed
+ SkMatrix fMatrix;
+ SkPaint* fPaint; // owned
+
+ void init(SkCanvas*, const SkPicture*, const SkMatrix*, const SkPaint*);
+ void draw();
+
+ static void Reset(SkTDArray<DrawData>&);
+ };
+
+ SkTDArray<DrawData> fThreadSafeDrawData;
+ SkTDArray<DrawData> fGPUDrawData;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkOSFile.h b/gfx/skia/skia/include/core/SkOSFile.h
new file mode 100644
index 000000000..f977327e2
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkOSFile.h
@@ -0,0 +1,149 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+// TODO: add unittests for all these operations
+
+#ifndef SkOSFile_DEFINED
+#define SkOSFile_DEFINED
+
+#include <stdio.h>
+
+#include "SkString.h"
+
+enum SkFILE_Flags {
+ kRead_SkFILE_Flag = 0x01,
+ kWrite_SkFILE_Flag = 0x02
+};
+
+#ifdef _WIN32
+const static char SkPATH_SEPARATOR = '\\';
+#else
+const static char SkPATH_SEPARATOR = '/';
+#endif
+
+FILE* sk_fopen(const char path[], SkFILE_Flags);
+void sk_fclose(FILE*);
+
+size_t sk_fgetsize(FILE*);
+/** Return true if the file could seek back to the beginning
+*/
+bool sk_frewind(FILE*);
+
+size_t sk_fread(void* buffer, size_t byteCount, FILE*);
+size_t sk_fwrite(const void* buffer, size_t byteCount, FILE*);
+
+char* sk_fgets(char* str, int size, FILE* f);
+
+void sk_fflush(FILE*);
+void sk_fsync(FILE*);
+
+bool sk_fseek(FILE*, size_t);
+bool sk_fmove(FILE*, long);
+size_t sk_ftell(FILE*);
+
+/** Maps a file into memory. Returns the address and length on success, NULL otherwise.
+ * The mapping is read only.
+ * When finished with the mapping, free the returned pointer with sk_fmunmap.
+ */
+void* sk_fmmap(FILE* f, size_t* length);
+
+/** Maps a file descriptor into memory. Returns the address and length on success, NULL otherwise.
+ * The mapping is read only.
+ * When finished with the mapping, free the returned pointer with sk_fmunmap.
+ */
+void* sk_fdmmap(int fd, size_t* length);
+
+/** Unmaps a file previously mapped by sk_fmmap or sk_fdmmap.
+ * The length parameter must be the same as returned from sk_fmmap.
+ */
+void sk_fmunmap(const void* addr, size_t length);
+
+/** Returns true if the two point at the exact same filesystem object. */
+bool sk_fidentical(FILE* a, FILE* b);
+
+/** Returns the underlying file descriptor for the given file.
+ * The return value will be < 0 on failure.
+ */
+int sk_fileno(FILE* f);
+
+/** Returns true if something (file, directory, ???) exists at this path,
+ * and has the specified access flags.
+ */
+bool sk_exists(const char *path, SkFILE_Flags = (SkFILE_Flags)0);
+
+// Returns true if a directory exists at this path.
+bool sk_isdir(const char *path);
+
+// Have we reached the end of the file?
+int sk_feof(FILE *);
+
+
+// Create a new directory at this path; returns true if successful.
+// If the directory already existed, this will return true.
+// Description of the error, if any, will be written to stderr.
+bool sk_mkdir(const char* path);
+
+class SkOSFile {
+public:
+ class Iter {
+ public:
+ Iter();
+ Iter(const char path[], const char suffix[] = NULL);
+ ~Iter();
+
+ void reset(const char path[], const char suffix[] = NULL);
+ /** If getDir is true, only returns directories.
+ Results are undefined if true and false calls are
+ interleaved on a single iterator.
+ */
+ bool next(SkString* name, bool getDir = false);
+
+ static const size_t kStorageSize = 40;
+ private:
+ SkAlignedSStorage<kStorageSize> fSelf;
+ };
+};
+
+/**
+ * Functions for modifying SkStrings which represent paths on the filesystem.
+ */
+class SkOSPath {
+public:
+ /**
+ * Assembles rootPath and relativePath into a single path, like this:
+ * rootPath/relativePath.
+ * It is okay to call with a NULL rootPath and/or relativePath. A path
+ * separator will still be inserted.
+ *
+ * Uses SkPATH_SEPARATOR, to work on all platforms.
+ */
+ static SkString Join(const char* rootPath, const char* relativePath);
+
+ /**
+ * Return the name of the file, ignoring the directory structure.
+ * Behaves like python's os.path.basename. If the fullPath is
+ * /dir/subdir/, an empty string is returned.
+ * @param fullPath Full path to the file.
+ * @return SkString The basename of the file - anything beyond the
+ * final slash, or the full name if there is no slash.
+ */
+ static SkString Basename(const char* fullPath);
+
+ /**
+ * Given a qualified file name returns the directory.
+ * Behaves like python's os.path.dirname. If the fullPath is
+ * /dir/subdir/ the return will be /dir/subdir/
+ * @param fullPath Full path to the file.
+ * @return SkString The dir containing the file - anything preceding the
+ * final slash, or the full name if ending in a slash.
+ */
+ static SkString Dirname(const char* fullPath);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPaint.h b/gfx/skia/skia/include/core/SkPaint.h
new file mode 100644
index 000000000..ddc90ae19
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPaint.h
@@ -0,0 +1,1226 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPaint_DEFINED
+#define SkPaint_DEFINED
+
+#include "SkBlendMode.h"
+#include "SkColor.h"
+#include "SkFilterQuality.h"
+#include "SkMatrix.h"
+#include "SkXfermode.h"
+
+//#define SK_SUPPORT_LEGACY_XFERMODE_OBJECT
+
+class SkAutoDescriptor;
+class SkAutoGlyphCache;
+class SkColorFilter;
+class SkData;
+class SkDescriptor;
+class SkDrawLooper;
+class SkReadBuffer;
+class SkWriteBuffer;
+class SkGlyph;
+struct SkRect;
+class SkGlyphCache;
+class SkImageFilter;
+class SkMaskFilter;
+class SkPath;
+class SkPathEffect;
+struct SkPoint;
+class SkRasterizer;
+struct SkScalerContextEffects;
+class SkShader;
+class SkSurfaceProps;
+class SkTextBlob;
+class SkTypeface;
+
+#define kBicubicFilterBitmap_Flag kHighQualityFilterBitmap_Flag
+
+/** \class SkPaint
+
+ The SkPaint class holds the style and color information about how to draw
+ geometries, text and bitmaps.
+*/
+class SK_API SkPaint {
+public:
+ SkPaint();
+ SkPaint(const SkPaint& paint);
+ SkPaint(SkPaint&& paint);
+ ~SkPaint();
+
+ SkPaint& operator=(const SkPaint&);
+ SkPaint& operator=(SkPaint&&);
+
+ /** operator== may give false negatives: two paints that draw equivalently
+ may return false. It will never give false positives: two paints that
+ are not equivalent always return false.
+ */
+ SK_API friend bool operator==(const SkPaint& a, const SkPaint& b);
+ friend bool operator!=(const SkPaint& a, const SkPaint& b) {
+ return !(a == b);
+ }
+
+ /** getHash() is a shallow hash, with the same limitations as operator==.
+ * If operator== returns true for two paints, getHash() returns the same value for each.
+ */
+ uint32_t getHash() const;
+
+ void flatten(SkWriteBuffer&) const;
+ void unflatten(SkReadBuffer&);
+
+ /** Restores the paint to its initial settings.
+ */
+ void reset();
+
+ /** Specifies the level of hinting to be performed. These names are taken
+ from the Gnome/Cairo names for the same. They are translated into
+ Freetype concepts the same as in cairo-ft-font.c:
+ kNo_Hinting -> FT_LOAD_NO_HINTING
+ kSlight_Hinting -> FT_LOAD_TARGET_LIGHT
+ kNormal_Hinting -> <default, no option>
+ kFull_Hinting -> <same as kNormalHinting, unless we are rendering
+ subpixel glyphs, in which case TARGET_LCD or
+ TARGET_LCD_V is used>
+ */
+ enum Hinting {
+ kNo_Hinting = 0,
+ kSlight_Hinting = 1,
+ kNormal_Hinting = 2, //!< this is the default
+ kFull_Hinting = 3
+ };
+
+ Hinting getHinting() const {
+ return static_cast<Hinting>(fBitfields.fHinting);
+ }
+
+ void setHinting(Hinting hintingLevel);
+
+ /** Specifies the bit values that are stored in the paint's flags.
+ */
+ enum Flags {
+ kAntiAlias_Flag = 0x01, //!< mask to enable antialiasing
+ kDither_Flag = 0x04, //!< mask to enable dithering
+ kUnderlineText_Flag = 0x08, //!< mask to enable underline text
+ kStrikeThruText_Flag = 0x10, //!< mask to enable strike-thru text
+ kFakeBoldText_Flag = 0x20, //!< mask to enable fake-bold text
+ kLinearText_Flag = 0x40, //!< mask to enable linear-text
+ kSubpixelText_Flag = 0x80, //!< mask to enable subpixel text positioning
+ kDevKernText_Flag = 0x100, //!< mask to enable device kerning text
+ kLCDRenderText_Flag = 0x200, //!< mask to enable subpixel glyph renderering
+ kEmbeddedBitmapText_Flag = 0x400, //!< mask to enable embedded bitmap strikes
+ kAutoHinting_Flag = 0x800, //!< mask to force Freetype's autohinter
+ kVerticalText_Flag = 0x1000,
+ kGenA8FromLCD_Flag = 0x2000, // hack for GDI -- do not use if you can help it
+ // when adding extra flags, note that the fFlags member is specified
+ // with a bit-width and you'll have to expand it.
+
+ kAllFlags = 0xFFFF
+ };
+
+ /** Return the paint's flags. Use the Flag enum to test flag values.
+ @return the paint's flags (see enums ending in _Flag for bit masks)
+ */
+ uint32_t getFlags() const { return fBitfields.fFlags; }
+
+ /** Set the paint's flags. Use the Flag enum to specific flag values.
+ @param flags The new flag bits for the paint (see Flags enum)
+ */
+ void setFlags(uint32_t flags);
+
+ /** Helper for getFlags(), returning true if kAntiAlias_Flag bit is set
+ @return true if the antialias bit is set in the paint's flags.
+ */
+ bool isAntiAlias() const {
+ return SkToBool(this->getFlags() & kAntiAlias_Flag);
+ }
+
+ /** Helper for setFlags(), setting or clearing the kAntiAlias_Flag bit
+ @param aa true to enable antialiasing, false to disable it
+ */
+ void setAntiAlias(bool aa);
+
+ /** Helper for getFlags(), returning true if kDither_Flag bit is set
+ @return true if the dithering bit is set in the paint's flags.
+ */
+ bool isDither() const {
+ return SkToBool(this->getFlags() & kDither_Flag);
+ }
+
+ /** Helper for setFlags(), setting or clearing the kDither_Flag bit
+ @param dither true to enable dithering, false to disable it
+ */
+ void setDither(bool dither);
+
+ /** Helper for getFlags(), returning true if kLinearText_Flag bit is set
+ @return true if the lineartext bit is set in the paint's flags
+ */
+ bool isLinearText() const {
+ return SkToBool(this->getFlags() & kLinearText_Flag);
+ }
+
+ /** Helper for setFlags(), setting or clearing the kLinearText_Flag bit
+ @param linearText true to set the linearText bit in the paint's flags,
+ false to clear it.
+ */
+ void setLinearText(bool linearText);
+
+ /** Helper for getFlags(), returning true if kSubpixelText_Flag bit is set
+ @return true if the lineartext bit is set in the paint's flags
+ */
+ bool isSubpixelText() const {
+ return SkToBool(this->getFlags() & kSubpixelText_Flag);
+ }
+
+ /**
+ * Helper for setFlags(), setting or clearing the kSubpixelText_Flag.
+ * @param subpixelText true to set the subpixelText bit in the paint's
+ * flags, false to clear it.
+ */
+ void setSubpixelText(bool subpixelText);
+
+ bool isLCDRenderText() const {
+ return SkToBool(this->getFlags() & kLCDRenderText_Flag);
+ }
+
+ /**
+ * Helper for setFlags(), setting or clearing the kLCDRenderText_Flag.
+ * Note: antialiasing must also be on for lcd rendering
+ * @param lcdText true to set the LCDRenderText bit in the paint's flags,
+ * false to clear it.
+ */
+ void setLCDRenderText(bool lcdText);
+
+ bool isEmbeddedBitmapText() const {
+ return SkToBool(this->getFlags() & kEmbeddedBitmapText_Flag);
+ }
+
+ /** Helper for setFlags(), setting or clearing the kEmbeddedBitmapText_Flag bit
+ @param useEmbeddedBitmapText true to set the kEmbeddedBitmapText bit in the paint's flags,
+ false to clear it.
+ */
+ void setEmbeddedBitmapText(bool useEmbeddedBitmapText);
+
+ bool isAutohinted() const {
+ return SkToBool(this->getFlags() & kAutoHinting_Flag);
+ }
+
+ /** Helper for setFlags(), setting or clearing the kAutoHinting_Flag bit
+ @param useAutohinter true to set the kEmbeddedBitmapText bit in the
+ paint's flags,
+ false to clear it.
+ */
+ void setAutohinted(bool useAutohinter);
+
+ bool isVerticalText() const {
+ return SkToBool(this->getFlags() & kVerticalText_Flag);
+ }
+
+ /**
+ * Helper for setting or clearing the kVerticalText_Flag bit in
+ * setFlags(...).
+ *
+ * If this bit is set, then advances are treated as Y values rather than
+ * X values, and drawText will places its glyphs vertically rather than
+ * horizontally.
+ */
+ void setVerticalText(bool);
+
+ /** Helper for getFlags(), returning true if kUnderlineText_Flag bit is set
+ @return true if the underlineText bit is set in the paint's flags.
+ */
+ bool isUnderlineText() const {
+ return SkToBool(this->getFlags() & kUnderlineText_Flag);
+ }
+
+ /** Helper for setFlags(), setting or clearing the kUnderlineText_Flag bit
+ @param underlineText true to set the underlineText bit in the paint's
+ flags, false to clear it.
+ */
+ void setUnderlineText(bool underlineText);
+
+ /** Helper for getFlags(), returns true if kStrikeThruText_Flag bit is set
+ @return true if the strikeThruText bit is set in the paint's flags.
+ */
+ bool isStrikeThruText() const {
+ return SkToBool(this->getFlags() & kStrikeThruText_Flag);
+ }
+
+ /** Helper for setFlags(), setting or clearing the kStrikeThruText_Flag bit
+ @param strikeThruText true to set the strikeThruText bit in the
+ paint's flags, false to clear it.
+ */
+ void setStrikeThruText(bool strikeThruText);
+
+ /** Helper for getFlags(), returns true if kFakeBoldText_Flag bit is set
+ @return true if the kFakeBoldText_Flag bit is set in the paint's flags.
+ */
+ bool isFakeBoldText() const {
+ return SkToBool(this->getFlags() & kFakeBoldText_Flag);
+ }
+
+ /** Helper for setFlags(), setting or clearing the kFakeBoldText_Flag bit
+ @param fakeBoldText true to set the kFakeBoldText_Flag bit in the paint's
+ flags, false to clear it.
+ */
+ void setFakeBoldText(bool fakeBoldText);
+
+ /** Helper for getFlags(), returns true if kDevKernText_Flag bit is set
+ @return true if the kernText bit is set in the paint's flags.
+ */
+ bool isDevKernText() const {
+ return SkToBool(this->getFlags() & kDevKernText_Flag);
+ }
+
+ /** Helper for setFlags(), setting or clearing the kKernText_Flag bit
+ @param kernText true to set the kKernText_Flag bit in the paint's
+ flags, false to clear it.
+ */
+ void setDevKernText(bool devKernText);
+
+ /**
+ * Return the filter level. This affects the quality (and performance) of
+ * drawing scaled images.
+ */
+ SkFilterQuality getFilterQuality() const {
+ return (SkFilterQuality)fBitfields.fFilterQuality;
+ }
+
+ /**
+ * Set the filter quality. This affects the quality (and performance) of
+ * drawing scaled images.
+ */
+ void setFilterQuality(SkFilterQuality quality);
+
+ /** Styles apply to rect, oval, path, and text.
+ Bitmaps are always drawn in "fill", and lines are always drawn in
+ "stroke".
+
+ Note: strokeandfill implicitly draws the result with
+ SkPath::kWinding_FillType, so if the original path is even-odd, the
+ results may not appear the same as if it was drawn twice, filled and
+ then stroked.
+ */
+ enum Style {
+ kFill_Style, //!< fill the geometry
+ kStroke_Style, //!< stroke the geometry
+ kStrokeAndFill_Style, //!< fill and stroke the geometry
+ };
+ enum {
+ kStyleCount = kStrokeAndFill_Style + 1
+ };
+
+ /** Return the paint's style, used for controlling how primitives'
+ geometries are interpreted (except for drawBitmap, which always assumes
+ kFill_Style).
+ @return the paint's Style
+ */
+ Style getStyle() const { return (Style)fBitfields.fStyle; }
+
+ /** Set the paint's style, used for controlling how primitives'
+ geometries are interpreted (except for drawBitmap, which always assumes
+ Fill).
+ @param style The new style to set in the paint
+ */
+ void setStyle(Style style);
+
+ /** Return the paint's color. Note that the color is a 32bit value
+ containing alpha as well as r,g,b. This 32bit value is not
+ premultiplied, meaning that its alpha can be any value, regardless of
+ the values of r,g,b.
+ @return the paint's color (and alpha).
+ */
+ SkColor getColor() const { return fColor; }
+
+ /** Set the paint's color. Note that the color is a 32bit value containing
+ alpha as well as r,g,b. This 32bit value is not premultiplied, meaning
+ that its alpha can be any value, regardless of the values of r,g,b.
+ @param color The new color (including alpha) to set in the paint.
+ */
+ void setColor(SkColor color);
+
+ /** Helper to getColor() that just returns the color's alpha value.
+ @return the alpha component of the paint's color.
+ */
+ uint8_t getAlpha() const { return SkToU8(SkColorGetA(fColor)); }
+
+ /** Helper to setColor(), that only assigns the color's alpha value,
+ leaving its r,g,b values unchanged.
+ @param a set the alpha component (0..255) of the paint's color.
+ */
+ void setAlpha(U8CPU a);
+
+ /** Helper to setColor(), that takes a,r,g,b and constructs the color value
+ using SkColorSetARGB()
+ @param a The new alpha component (0..255) of the paint's color.
+ @param r The new red component (0..255) of the paint's color.
+ @param g The new green component (0..255) of the paint's color.
+ @param b The new blue component (0..255) of the paint's color.
+ */
+ void setARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b);
+
+ /** Return the width for stroking.
+ <p />
+ A value of 0 strokes in hairline mode.
+ Hairlines always draw 1-pixel wide, regardless of the matrix.
+ @return the paint's stroke width, used whenever the paint's style is
+ Stroke or StrokeAndFill.
+ */
+ SkScalar getStrokeWidth() const { return fWidth; }
+
+ /** Set the width for stroking.
+ Pass 0 to stroke in hairline mode.
+ Hairlines always draw 1-pixel wide, regardless of the matrix.
+ @param width set the paint's stroke width, used whenever the paint's
+ style is Stroke or StrokeAndFill.
+ */
+ void setStrokeWidth(SkScalar width);
+
+ /** Return the paint's stroke miter value. This is used to control the
+ behavior of miter joins when the joins angle is sharp.
+ @return the paint's miter limit, used whenever the paint's style is
+ Stroke or StrokeAndFill.
+ */
+ SkScalar getStrokeMiter() const { return fMiterLimit; }
+
+ /** Set the paint's stroke miter value. This is used to control the
+ behavior of miter joins when the joins angle is sharp. This value must
+ be >= 0.
+ @param miter set the miter limit on the paint, used whenever the
+ paint's style is Stroke or StrokeAndFill.
+ */
+ void setStrokeMiter(SkScalar miter);
+
+ /** Cap enum specifies the settings for the paint's strokecap. This is the
+ treatment that is applied to the beginning and end of each non-closed
+ contour (e.g. lines).
+
+ If the cap is round or square, the caps are drawn when the contour has
+ a zero length. Zero length contours can be created by following moveTo
+ with a lineTo at the same point, or a moveTo followed by a close.
+
+ A dash with an on interval of zero also creates a zero length contour.
+
+ The zero length contour draws the square cap without rotation, since
+ the no direction can be inferred.
+ */
+ enum Cap {
+ kButt_Cap, //!< begin/end contours with no extension
+ kRound_Cap, //!< begin/end contours with a semi-circle extension
+ kSquare_Cap, //!< begin/end contours with a half square extension
+
+ kLast_Cap = kSquare_Cap,
+ kDefault_Cap = kButt_Cap
+ };
+ static constexpr int kCapCount = kLast_Cap + 1;
+
+ /** Join enum specifies the settings for the paint's strokejoin. This is
+ the treatment that is applied to corners in paths and rectangles.
+ */
+ enum Join {
+ kMiter_Join, //!< connect path segments with a sharp join
+ kRound_Join, //!< connect path segments with a round join
+ kBevel_Join, //!< connect path segments with a flat bevel join
+
+ kLast_Join = kBevel_Join,
+ kDefault_Join = kMiter_Join
+ };
+ static constexpr int kJoinCount = kLast_Join + 1;
+
+ /** Return the paint's stroke cap type, controlling how the start and end
+ of stroked lines and paths are treated.
+ @return the line cap style for the paint, used whenever the paint's
+ style is Stroke or StrokeAndFill.
+ */
+ Cap getStrokeCap() const { return (Cap)fBitfields.fCapType; }
+
+ /** Set the paint's stroke cap type.
+ @param cap set the paint's line cap style, used whenever the paint's
+ style is Stroke or StrokeAndFill.
+ */
+ void setStrokeCap(Cap cap);
+
+ /** Return the paint's stroke join type.
+ @return the paint's line join style, used whenever the paint's style is
+ Stroke or StrokeAndFill.
+ */
+ Join getStrokeJoin() const { return (Join)fBitfields.fJoinType; }
+
+ /** Set the paint's stroke join type.
+ @param join set the paint's line join style, used whenever the paint's
+ style is Stroke or StrokeAndFill.
+ */
+ void setStrokeJoin(Join join);
+
+ /**
+ * Applies any/all effects (patheffect, stroking) to src, returning the
+ * result in dst. The result is that drawing src with this paint will be
+ * the same as drawing dst with a default paint (at least from the
+ * geometric perspective).
+ *
+ * @param src input path
+ * @param dst output path (may be the same as src)
+ * @param cullRect If not null, the dst path may be culled to this rect.
+ * @param resScale If > 1, increase precision, else if (0 < res < 1) reduce precision
+ * in favor of speed/size.
+ * @return true if the path should be filled, or false if it should be
+ * drawn with a hairline (width == 0)
+ */
+ bool getFillPath(const SkPath& src, SkPath* dst, const SkRect* cullRect,
+ SkScalar resScale = 1) const;
+
+ bool getFillPath(const SkPath& src, SkPath* dst) const {
+ return this->getFillPath(src, dst, NULL, 1);
+ }
+
+ /** Get the paint's shader object.
+ <p />
+ The shader's reference count is not affected.
+ @return the paint's shader (or NULL)
+ */
+ SkShader* getShader() const { return fShader.get(); }
+
+ /** Set or clear the shader object.
+ * Shaders specify the source color(s) for what is being drawn. If a paint
+ * has no shader, then the paint's color is used. If the paint has a
+ * shader, then the shader's color(s) are use instead, but they are
+ * modulated by the paint's alpha. This makes it easy to create a shader
+ * once (e.g. bitmap tiling or gradient) and then change its transparency
+ * w/o having to modify the original shader... only the paint's alpha needs
+ * to be modified.
+ *
+ * There is an exception to this only-respect-paint's-alpha rule: If the shader only generates
+ * alpha (e.g. SkShader::CreateBitmapShader(bitmap, ...) where bitmap's colortype is kAlpha_8)
+ * then the shader will use the paint's entire color to "colorize" its output (modulating the
+ * bitmap's alpha with the paint's color+alpha).
+ *
+ * Pass NULL to clear any previous shader.
+ * As a convenience, the parameter passed is also returned.
+ * If a previous shader exists, its reference count is decremented.
+ * If shader is not NULL, its reference count is incremented.
+ * @param shader May be NULL. The shader to be installed in the paint
+ * @return shader
+ */
+ void setShader(sk_sp<SkShader>);
+#ifdef SK_SUPPORT_LEGACY_CREATESHADER_PTR
+ SkShader* setShader(SkShader* shader);
+#endif
+
+ /** Get the paint's colorfilter. If there is a colorfilter, its reference
+ count is not changed.
+ @return the paint's colorfilter (or NULL)
+ */
+ SkColorFilter* getColorFilter() const { return fColorFilter.get(); }
+
+ /** Set or clear the paint's colorfilter, returning the parameter.
+ <p />
+ If the paint already has a filter, its reference count is decremented.
+ If filter is not NULL, its reference count is incremented.
+ @param filter May be NULL. The filter to be installed in the paint
+ @return filter
+ */
+#ifdef SK_SUPPORT_LEGACY_COLORFILTER_PTR
+ SkColorFilter* setColorFilter(SkColorFilter* filter);
+#endif
+ void setColorFilter(sk_sp<SkColorFilter>);
+
+#ifdef SK_SUPPORT_LEGACY_XFERMODE_OBJECT
+ /** Get the paint's xfermode object.
+ <p />
+ The xfermode's reference count is not affected.
+ @return the paint's xfermode (or NULL)
+ */
+ SkXfermode* getXfermode() const;
+
+ /** Set or clear the xfermode object.
+ <p />
+ Pass NULL to clear any previous xfermode.
+ As a convenience, the parameter passed is also returned.
+ If a previous xfermode exists, its reference count is decremented.
+ If xfermode is not NULL, its reference count is incremented.
+ @param xfermode May be NULL. The new xfermode to be installed in the
+ paint
+ @return xfermode
+ */
+ void setXfermode(sk_sp<SkXfermode>);
+#ifdef SK_SUPPORT_LEGACY_XFERMODE_PTR
+ SkXfermode* setXfermode(SkXfermode* xfermode);
+#endif
+
+ /** Create an xfermode based on the specified Mode, and assign it into the
+ paint, returning the mode that was set. If the Mode is SrcOver, then
+ the paint's xfermode is set to null.
+ */
+ SkXfermode* setXfermodeMode(SkXfermode::Mode);
+#endif
+
+ SkBlendMode getBlendMode() const { return (SkBlendMode)fBlendMode; }
+ bool isSrcOver() const { return (SkBlendMode)fBlendMode == SkBlendMode::kSrcOver; }
+ void setBlendMode(SkBlendMode mode) { fBlendMode = (unsigned)mode; }
+
+ /** Get the paint's patheffect object.
+ <p />
+ The patheffect reference count is not affected.
+ @return the paint's patheffect (or NULL)
+ */
+ SkPathEffect* getPathEffect() const { return fPathEffect.get(); }
+
+ /** Set or clear the patheffect object.
+ <p />
+ Pass NULL to clear any previous patheffect.
+ As a convenience, the parameter passed is also returned.
+ If a previous patheffect exists, its reference count is decremented.
+ If patheffect is not NULL, its reference count is incremented.
+ @param effect May be NULL. The new patheffect to be installed in the
+ paint
+ @return effect
+ */
+ void setPathEffect(sk_sp<SkPathEffect>);
+#ifdef SK_SUPPORT_LEGACY_PATHEFFECT_PTR
+ SkPathEffect* setPathEffect(SkPathEffect* effect);
+#endif
+
+ /** Get the paint's maskfilter object.
+ <p />
+ The maskfilter reference count is not affected.
+ @return the paint's maskfilter (or NULL)
+ */
+ SkMaskFilter* getMaskFilter() const { return fMaskFilter.get(); }
+
+ /** Set or clear the maskfilter object.
+ <p />
+ Pass NULL to clear any previous maskfilter.
+ As a convenience, the parameter passed is also returned.
+ If a previous maskfilter exists, its reference count is decremented.
+ If maskfilter is not NULL, its reference count is incremented.
+ @param maskfilter May be NULL. The new maskfilter to be installed in
+ the paint
+ @return maskfilter
+ */
+#ifdef SK_SUPPORT_LEGACY_MASKFILTER_PTR
+ SkMaskFilter* setMaskFilter(SkMaskFilter* maskfilter);
+#endif
+ void setMaskFilter(sk_sp<SkMaskFilter>);
+
+ // These attributes are for text/fonts
+
+ /** Get the paint's typeface object.
+ <p />
+ The typeface object identifies which font to use when drawing or
+ measuring text. The typeface reference count is not affected.
+ @return the paint's typeface (or NULL)
+ */
+ SkTypeface* getTypeface() const { return fTypeface.get(); }
+
+ /** Set or clear the typeface object.
+ <p />
+ Pass NULL to clear any previous typeface.
+ As a convenience, the parameter passed is also returned.
+ If a previous typeface exists, its reference count is decremented.
+ If typeface is not NULL, its reference count is incremented.
+ @param typeface May be NULL. The new typeface to be installed in the
+ paint
+ @return typeface
+ */
+ void setTypeface(sk_sp<SkTypeface>);
+#ifdef SK_SUPPORT_LEGACY_TYPEFACE_PTR
+ SkTypeface* setTypeface(SkTypeface* typeface);
+#endif
+
+ /** Get the paint's rasterizer (or NULL).
+ <p />
+ The raster controls how paths/text are turned into alpha masks.
+ @return the paint's rasterizer (or NULL)
+ */
+ SkRasterizer* getRasterizer() const { return fRasterizer.get(); }
+
+ /** Set or clear the rasterizer object.
+ <p />
+ Pass NULL to clear any previous rasterizer.
+ As a convenience, the parameter passed is also returned.
+ If a previous rasterizer exists in the paint, its reference count is
+ decremented. If rasterizer is not NULL, its reference count is
+ incremented.
+ @param rasterizer May be NULL. The new rasterizer to be installed in
+ the paint.
+ @return rasterizer
+ */
+#ifdef SK_SUPPORT_LEGACY_MINOR_EFFECT_PTR
+ SkRasterizer* setRasterizer(SkRasterizer* rasterizer);
+#endif
+ void setRasterizer(sk_sp<SkRasterizer>);
+
+ SkImageFilter* getImageFilter() const { return fImageFilter.get(); }
+ SkImageFilter* setImageFilter(SkImageFilter*);
+ void setImageFilter(sk_sp<SkImageFilter>);
+
+ /**
+ * Return the paint's SkDrawLooper (if any). Does not affect the looper's
+ * reference count.
+ */
+ SkDrawLooper* getDrawLooper() const { return fDrawLooper.get(); }
+ SkDrawLooper* getLooper() const { return fDrawLooper.get(); }
+ /**
+ * Set or clear the looper object.
+ * <p />
+ * Pass NULL to clear any previous looper.
+ * If a previous looper exists in the paint, its reference count is
+ * decremented. If looper is not NULL, its reference count is
+ * incremented.
+ * @param looper May be NULL. The new looper to be installed in the paint.
+ */
+ void setDrawLooper(sk_sp<SkDrawLooper>);
+#ifdef SK_SUPPORT_LEGACY_MINOR_EFFECT_PTR
+ SkDrawLooper* setLooper(SkDrawLooper* looper);
+#endif
+ void setLooper(sk_sp<SkDrawLooper>);
+
+ enum Align {
+ kLeft_Align,
+ kCenter_Align,
+ kRight_Align,
+ };
+ enum {
+ kAlignCount = 3
+ };
+
+ /** Return the paint's Align value for drawing text.
+ @return the paint's Align value for drawing text.
+ */
+ Align getTextAlign() const { return (Align)fBitfields.fTextAlign; }
+
+ /** Set the paint's text alignment.
+ @param align set the paint's Align value for drawing text.
+ */
+ void setTextAlign(Align align);
+
+ /** Return the paint's text size.
+ @return the paint's text size.
+ */
+ SkScalar getTextSize() const { return fTextSize; }
+
+ /** Set the paint's text size. This value must be > 0
+ @param textSize set the paint's text size.
+ */
+ void setTextSize(SkScalar textSize);
+
+ /** Return the paint's horizontal scale factor for text. The default value
+ is 1.0.
+ @return the paint's scale factor in X for drawing/measuring text
+ */
+ SkScalar getTextScaleX() const { return fTextScaleX; }
+
+ /** Set the paint's horizontal scale factor for text. The default value
+ is 1.0. Values > 1.0 will stretch the text wider. Values < 1.0 will
+ stretch the text narrower.
+ @param scaleX set the paint's scale factor in X for drawing/measuring
+ text.
+ */
+ void setTextScaleX(SkScalar scaleX);
+
+ /** Return the paint's horizontal skew factor for text. The default value
+ is 0.
+ @return the paint's skew factor in X for drawing text.
+ */
+ SkScalar getTextSkewX() const { return fTextSkewX; }
+
+ /** Set the paint's horizontal skew factor for text. The default value
+ is 0. For approximating oblique text, use values around -0.25.
+ @param skewX set the paint's skew factor in X for drawing text.
+ */
+ void setTextSkewX(SkScalar skewX);
+
+ /** Describes how to interpret the text parameters that are passed to paint
+ methods like measureText() and getTextWidths().
+ */
+ enum TextEncoding {
+ kUTF8_TextEncoding, //!< the text parameters are UTF8
+ kUTF16_TextEncoding, //!< the text parameters are UTF16
+ kUTF32_TextEncoding, //!< the text parameters are UTF32
+ kGlyphID_TextEncoding //!< the text parameters are glyph indices
+ };
+
+ TextEncoding getTextEncoding() const {
+ return (TextEncoding)fBitfields.fTextEncoding;
+ }
+
+ void setTextEncoding(TextEncoding encoding);
+
+ struct FontMetrics {
+ /** Flags which indicate the confidence level of various metrics.
+ A set flag indicates that the metric may be trusted.
+ */
+ enum FontMetricsFlags {
+ kUnderlineThinknessIsValid_Flag = 1 << 0,
+ kUnderlinePositionIsValid_Flag = 1 << 1,
+ };
+
+ uint32_t fFlags; //!< Bit field to identify which values are unknown
+ SkScalar fTop; //!< The greatest distance above the baseline for any glyph (will be <= 0)
+ SkScalar fAscent; //!< The recommended distance above the baseline (will be <= 0)
+ SkScalar fDescent; //!< The recommended distance below the baseline (will be >= 0)
+ SkScalar fBottom; //!< The greatest distance below the baseline for any glyph (will be >= 0)
+ SkScalar fLeading; //!< The recommended distance to add between lines of text (will be >= 0)
+ SkScalar fAvgCharWidth; //!< the average character width (>= 0)
+ SkScalar fMaxCharWidth; //!< the max character width (>= 0)
+ SkScalar fXMin; //!< The minimum bounding box x value for all glyphs
+ SkScalar fXMax; //!< The maximum bounding box x value for all glyphs
+ SkScalar fXHeight; //!< The height of an 'x' in px, or 0 if no 'x' in face
+ SkScalar fCapHeight; //!< The cap height (> 0), or 0 if cannot be determined.
+ SkScalar fUnderlineThickness; //!< underline thickness, or 0 if cannot be determined
+
+ /** Underline Position - position of the top of the Underline stroke
+ relative to the baseline, this can have following values
+ - Negative - means underline should be drawn above baseline.
+ - Positive - means below baseline.
+ - Zero - mean underline should be drawn on baseline.
+ */
+ SkScalar fUnderlinePosition; //!< underline position, or 0 if cannot be determined
+
+ /** If the fontmetrics has a valid underlinethickness, return true, and set the
+ thickness param to that value. If it doesn't return false and ignore the
+ thickness param.
+ */
+ bool hasUnderlineThickness(SkScalar* thickness) const {
+ if (SkToBool(fFlags & kUnderlineThinknessIsValid_Flag)) {
+ *thickness = fUnderlineThickness;
+ return true;
+ }
+ return false;
+ }
+
+ /** If the fontmetrics has a valid underlineposition, return true, and set the
+ thickness param to that value. If it doesn't return false and ignore the
+ thickness param.
+ */
+ bool hasUnderlinePosition(SkScalar* position) const {
+ if (SkToBool(fFlags & kUnderlinePositionIsValid_Flag)) {
+ *position = fUnderlinePosition;
+ return true;
+ }
+ return false;
+ }
+
+ };
+
+ /** Return the recommend spacing between lines (which will be
+ fDescent - fAscent + fLeading).
+ If metrics is not null, return in it the font metrics for the
+ typeface/pointsize/etc. currently set in the paint.
+ @param metrics If not null, returns the font metrics for the
+ current typeface/pointsize/etc setting in this
+ paint.
+ @param scale If not 0, return width as if the canvas were scaled
+ by this value
+ @param return the recommended spacing between lines
+ */
+ SkScalar getFontMetrics(FontMetrics* metrics, SkScalar scale = 0) const;
+
+ /** Return the recommend line spacing. This will be
+ fDescent - fAscent + fLeading
+ */
+ SkScalar getFontSpacing() const { return this->getFontMetrics(NULL, 0); }
+
+ /** Convert the specified text into glyph IDs, returning the number of
+ glyphs ID written. If glyphs is NULL, it is ignore and only the count
+ is returned.
+ */
+ int textToGlyphs(const void* text, size_t byteLength,
+ SkGlyphID glyphs[]) const;
+
+ /** Return true if all of the specified text has a corresponding non-zero
+ glyph ID. If any of the code-points in the text are not supported in
+ the typeface (i.e. the glyph ID would be zero), then return false.
+
+ If the text encoding for the paint is kGlyph_TextEncoding, then this
+ returns true if all of the specified glyph IDs are non-zero.
+ */
+ bool containsText(const void* text, size_t byteLength) const;
+
+ /** Convert the glyph array into Unichars. Unconvertable glyphs are mapped
+ to zero. Note: this does not look at the text-encoding setting in the
+ paint, only at the typeface.
+ */
+ void glyphsToUnichars(const SkGlyphID glyphs[], int count, SkUnichar text[]) const;
+
+ /** Return the number of drawable units in the specified text buffer.
+ This looks at the current TextEncoding field of the paint. If you also
+ want to have the text converted into glyph IDs, call textToGlyphs
+ instead.
+ */
+ int countText(const void* text, size_t byteLength) const {
+ return this->textToGlyphs(text, byteLength, NULL);
+ }
+
+ /** Return the width of the text. This will return the vertical measure
+ * if isVerticalText() is true, in which case the returned value should
+ * be treated has a height instead of a width.
+ *
+ * @param text The text to be measured
+ * @param length Number of bytes of text to measure
+ * @param bounds If not NULL, returns the bounds of the text,
+ * relative to (0, 0).
+ * @return The advance width of the text
+ */
+ SkScalar measureText(const void* text, size_t length, SkRect* bounds) const;
+
+ /** Return the width of the text. This will return the vertical measure
+ * if isVerticalText() is true, in which case the returned value should
+ * be treated has a height instead of a width.
+ *
+ * @param text Address of the text
+ * @param length Number of bytes of text to measure
+ * @return The advance width of the text
+ */
+ SkScalar measureText(const void* text, size_t length) const {
+ return this->measureText(text, length, NULL);
+ }
+
+ /** Return the number of bytes of text that were measured. If
+ * isVerticalText() is true, then the vertical advances are used for
+ * the measurement.
+ *
+ * @param text The text to be measured
+ * @param length Number of bytes of text to measure
+ * @param maxWidth Maximum width. Only the subset of text whose accumulated
+ * widths are <= maxWidth are measured.
+ * @param measuredWidth Optional. If non-null, this returns the actual
+ * width of the measured text.
+ * @return The number of bytes of text that were measured. Will be
+ * <= length.
+ */
+ size_t breakText(const void* text, size_t length, SkScalar maxWidth,
+ SkScalar* measuredWidth = NULL) const;
+
+ /** Return the advances for the text. These will be vertical advances if
+ * isVerticalText() returns true.
+ *
+ * @param text the text
+ * @param byteLength number of bytes to of text
+ * @param widths If not null, returns the array of advances for
+ * the glyphs. If not NULL, must be at least a large
+ * as the number of unichars in the specified text.
+ * @param bounds If not null, returns the bounds for each of
+ * character, relative to (0, 0)
+ * @return the number of unichars in the specified text.
+ */
+ int getTextWidths(const void* text, size_t byteLength, SkScalar widths[],
+ SkRect bounds[] = NULL) const;
+
+ /** Return the path (outline) for the specified text.
+ * Note: just like SkCanvas::drawText, this will respect the Align setting
+ * in the paint.
+ *
+ * @param text the text
+ * @param length number of bytes of text
+ * @param x The x-coordinate of the origin of the text.
+ * @param y The y-coordinate of the origin of the text.
+ * @param path The outline of the text.
+ */
+ void getTextPath(const void* text, size_t length, SkScalar x, SkScalar y,
+ SkPath* path) const;
+
+ /** Return the path (outline) for the specified text.
+ * Note: just like SkCanvas::drawText, this will respect the Align setting
+ * in the paint.
+ *
+ * @param text the text
+ * @param length number of bytes of text
+ * @param pos array of positions, used to position each character
+ * @param path The outline of the text.
+ */
+ void getPosTextPath(const void* text, size_t length,
+ const SkPoint pos[], SkPath* path) const;
+
+ /** Return the number of intervals that intersect the intercept along the axis of the advance.
+ * The return count is zero or a multiple of two, and is at most the number of glyphs * 2 in
+ * the string. The caller may pass nullptr for intervals to determine the size of the interval
+ * array, or may conservatively pre-allocate an array with length * 2 entries. The computed
+ * intervals are cached by glyph to improve performance for multiple calls.
+ * This permits constructing an underline that skips the descenders.
+ *
+ * @param text the text
+ * @param length number of bytes of text
+ * @param x The x-coordinate of the origin of the text.
+ * @param y The y-coordinate of the origin of the text.
+ * @param bounds The lower and upper line parallel to the advance.
+ * @param array If not null, the found intersections.
+ *
+ * @return The number of intersections, which may be zero.
+ */
+ int getTextIntercepts(const void* text, size_t length, SkScalar x, SkScalar y,
+ const SkScalar bounds[2], SkScalar* intervals) const;
+
+ /** Return the number of intervals that intersect the intercept along the axis of the advance.
+ * The return count is zero or a multiple of two, and is at most the number of glyphs * 2 in
+ * string. The caller may pass nullptr for intervals to determine the size of the interval
+ * array, or may conservatively pre-allocate an array with length * 2 entries. The computed
+ * intervals are cached by glyph to improve performance for multiple calls.
+ * This permits constructing an underline that skips the descenders.
+ *
+ * @param text the text
+ * @param length number of bytes of text
+ * @param pos array of positions, used to position each character
+ * @param bounds The lower and upper line parallel to the advance.
+ * @param array If not null, the glyph bounds contained by the advance parallel lines.
+ *
+ * @return The number of intersections, which may be zero.
+ */
+ int getPosTextIntercepts(const void* text, size_t length, const SkPoint pos[],
+ const SkScalar bounds[2], SkScalar* intervals) const;
+
+ /** Return the number of intervals that intersect the intercept along the axis of the advance.
+ * The return count is zero or a multiple of two, and is at most the number of glyphs * 2 in
+ * string. The caller may pass nullptr for intervals to determine the size of the interval
+ * array, or may conservatively pre-allocate an array with length * 2 entries. The computed
+ * intervals are cached by glyph to improve performance for multiple calls.
+ * This permits constructing an underline that skips the descenders.
+ *
+ * @param text The text.
+ * @param length Number of bytes of text.
+ * @param xpos Array of x-positions, used to position each character.
+ * @param constY The shared Y coordinate for all of the positions.
+ * @param bounds The lower and upper line parallel to the advance.
+ * @param array If not null, the glyph bounds contained by the advance parallel lines.
+ *
+ * @return The number of intersections, which may be zero.
+ */
+ int getPosTextHIntercepts(const void* text, size_t length, const SkScalar xpos[],
+ SkScalar constY, const SkScalar bounds[2], SkScalar* intervals) const;
+
+ /** Return the number of intervals that intersect the intercept along the axis of the advance.
+ * The return count is zero or a multiple of two, and is at most the number of glyphs * 2 in
+ * text blob. The caller may pass nullptr for intervals to determine the size of the interval
+ * array. The computed intervals are cached by glyph to improve performance for multiple calls.
+ * This permits constructing an underline that skips the descenders.
+ *
+ * @param blob The text blob.
+ * @param bounds The lower and upper line parallel to the advance.
+ * @param array If not null, the glyph bounds contained by the advance parallel lines.
+ *
+ * @return The number of intersections, which may be zero.
+ */
+ int getTextBlobIntercepts(const SkTextBlob* blob, const SkScalar bounds[2],
+ SkScalar* intervals) const;
+
+ /**
+ * Return a rectangle that represents the union of the bounds of all
+ * of the glyphs, but each one positioned at (0,0). This may be conservatively large, and
+ * will not take into account any hinting, but will respect any text-scale-x or text-skew-x
+ * on this paint.
+ */
+ SkRect getFontBounds() const;
+
+ // returns true if the paint's settings (e.g. xfermode + alpha) resolve to
+ // mean that we need not draw at all (e.g. SrcOver + 0-alpha)
+ bool nothingToDraw() const;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // would prefer to make these private...
+
+ /** Returns true if the current paint settings allow for fast computation of
+ bounds (i.e. there is nothing complex like a patheffect that would make
+ the bounds computation expensive.
+ */
+ bool canComputeFastBounds() const;
+
+ /** Only call this if canComputeFastBounds() returned true. This takes a
+ raw rectangle (the raw bounds of a shape), and adjusts it for stylistic
+ effects in the paint (e.g. stroking). If needed, it uses the storage
+ rect parameter. It returns the adjusted bounds that can then be used
+ for quickReject tests.
+
+ The returned rect will either be orig or storage, thus the caller
+ should not rely on storage being set to the result, but should always
+ use the retured value. It is legal for orig and storage to be the same
+ rect.
+
+ e.g.
+ if (paint.canComputeFastBounds()) {
+ SkRect r, storage;
+ path.computeBounds(&r, SkPath::kFast_BoundsType);
+ const SkRect& fastR = paint.computeFastBounds(r, &storage);
+ if (canvas->quickReject(fastR, ...)) {
+ // don't draw the path
+ }
+ }
+ */
+ const SkRect& computeFastBounds(const SkRect& orig, SkRect* storage) const {
+ SkPaint::Style style = this->getStyle();
+ // ultra fast-case: filling with no effects that affect geometry
+ if (kFill_Style == style) {
+ uintptr_t effects = reinterpret_cast<uintptr_t>(this->getLooper());
+ effects |= reinterpret_cast<uintptr_t>(this->getMaskFilter());
+ effects |= reinterpret_cast<uintptr_t>(this->getPathEffect());
+ effects |= reinterpret_cast<uintptr_t>(this->getImageFilter());
+ if (!effects) {
+ return orig;
+ }
+ }
+
+ return this->doComputeFastBounds(orig, storage, style);
+ }
+
+ const SkRect& computeFastStrokeBounds(const SkRect& orig,
+ SkRect* storage) const {
+ return this->doComputeFastBounds(orig, storage, kStroke_Style);
+ }
+
+ // Take the style explicitly, so the caller can force us to be stroked
+ // without having to make a copy of the paint just to change that field.
+ const SkRect& doComputeFastBounds(const SkRect& orig, SkRect* storage,
+ Style) const;
+
+ /**
+ * Return a matrix that applies the paint's text values: size, scale, skew
+ */
+ static SkMatrix* SetTextMatrix(SkMatrix* matrix, SkScalar size,
+ SkScalar scaleX, SkScalar skewX) {
+ matrix->setScale(size * scaleX, size);
+ if (skewX) {
+ matrix->postSkew(skewX, 0);
+ }
+ return matrix;
+ }
+
+ SkMatrix* setTextMatrix(SkMatrix* matrix) const {
+ return SetTextMatrix(matrix, fTextSize, fTextScaleX, fTextSkewX);
+ }
+
+ typedef const SkGlyph& (*GlyphCacheProc)(SkGlyphCache*, const char**);
+
+ SK_TO_STRING_NONVIRT()
+
+private:
+ sk_sp<SkTypeface> fTypeface;
+ sk_sp<SkPathEffect> fPathEffect;
+ sk_sp<SkShader> fShader;
+ sk_sp<SkMaskFilter> fMaskFilter;
+ sk_sp<SkColorFilter> fColorFilter;
+ sk_sp<SkRasterizer> fRasterizer;
+ sk_sp<SkDrawLooper> fDrawLooper;
+ sk_sp<SkImageFilter> fImageFilter;
+
+ SkScalar fTextSize;
+ SkScalar fTextScaleX;
+ SkScalar fTextSkewX;
+ SkColor fColor;
+ SkScalar fWidth;
+ SkScalar fMiterLimit;
+ uint32_t fBlendMode; // just need 5-6 bits for SkXfermode::Mode
+ union {
+ struct {
+ // all of these bitfields should add up to 32
+ unsigned fFlags : 16;
+ unsigned fTextAlign : 2;
+ unsigned fCapType : 2;
+ unsigned fJoinType : 2;
+ unsigned fStyle : 2;
+ unsigned fTextEncoding : 2; // 3 values
+ unsigned fHinting : 2;
+ unsigned fFilterQuality : 2;
+ //unsigned fFreeBits : 2;
+ } fBitfields;
+ uint32_t fBitfieldsUInt;
+ };
+
+ static GlyphCacheProc GetGlyphCacheProc(TextEncoding encoding,
+ bool isDevKern,
+ bool needFullMetrics);
+
+ SkScalar measure_text(SkGlyphCache*, const char* text, size_t length,
+ int* count, SkRect* bounds) const;
+
+ enum ScalerContextFlags : uint32_t {
+ kNone_ScalerContextFlags = 0,
+
+ kFakeGamma_ScalerContextFlag = 1 << 0,
+ kBoostContrast_ScalerContextFlag = 1 << 1,
+
+ kFakeGammaAndBoostContrast_ScalerContextFlags =
+ kFakeGamma_ScalerContextFlag | kBoostContrast_ScalerContextFlag,
+ };
+
+ /*
+ * Allocs an SkDescriptor on the heap and return it to the caller as a refcnted
+ * SkData. Caller is responsible for managing the lifetime of this object.
+ */
+ void getScalerContextDescriptor(SkScalerContextEffects*, SkAutoDescriptor*,
+ const SkSurfaceProps& surfaceProps,
+ uint32_t scalerContextFlags, const SkMatrix*) const;
+
+ SkGlyphCache* detachCache(const SkSurfaceProps* surfaceProps, uint32_t scalerContextFlags,
+ const SkMatrix*) const;
+
+ void descriptorProc(const SkSurfaceProps* surfaceProps, uint32_t scalerContextFlags,
+ const SkMatrix* deviceMatrix,
+ void (*proc)(SkTypeface*, const SkScalerContextEffects&,
+ const SkDescriptor*, void*),
+ void* context) const;
+
+ /*
+ * The luminance color is used to determine which Gamma Canonical color to map to. This is
+ * really only used by backends which want to cache glyph masks, and need some way to know if
+ * they need to generate new masks based off a given color.
+ */
+ SkColor computeLuminanceColor() const;
+
+ enum {
+ /* This is the size we use when we ask for a glyph's path. We then
+ * post-transform it as we draw to match the request.
+ * This is done to try to re-use cache entries for the path.
+ *
+ * This value is somewhat arbitrary. In theory, it could be 1, since
+ * we store paths as floats. However, we get the path from the font
+ * scaler, and it may represent its paths as fixed-point (or 26.6),
+ * so we shouldn't ask for something too big (might overflow 16.16)
+ * or too small (underflow 26.6).
+ *
+ * This value could track kMaxSizeForGlyphCache, assuming the above
+ * constraints, but since we ask for unhinted paths, the two values
+ * need not match per-se.
+ */
+ kCanonicalTextSizeForPaths = 64,
+
+ /*
+ * Above this size (taking into account CTM and textSize), we never use
+ * the cache for bits or metrics (we might overflow), so we just ask
+ * for a caononical size and post-transform that.
+ */
+ kMaxSizeForGlyphCache = 256,
+ };
+
+ static bool TooBigToUseCache(const SkMatrix& ctm, const SkMatrix& textM);
+
+ // Set flags/hinting/textSize up to use for drawing text as paths.
+ // Returns scale factor to restore the original textSize, since will will
+ // have change it to kCanonicalTextSizeForPaths.
+ SkScalar setupForAsPaths();
+
+ static SkScalar MaxCacheSize2() {
+ static const SkScalar kMaxSize = SkIntToScalar(kMaxSizeForGlyphCache);
+ static const SkScalar kMag2Max = kMaxSize * kMaxSize;
+ return kMag2Max;
+ }
+
+ friend class SkAutoGlyphCache;
+ friend class SkAutoGlyphCacheNoGamma;
+ friend class SkCanvas;
+ friend class SkDraw;
+ friend class SkPDFDevice;
+ friend class GrAtlasTextBlob;
+ friend class GrAtlasTextContext;
+ friend class GrStencilAndCoverTextContext;
+ friend class GrPathRendering;
+ friend class GrTextUtils;
+ friend class GrGLPathRendering;
+ friend class SkScalerContext;
+ friend class SkTextBaseIter;
+ friend class SkCanonicalizePaint;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPath.h b/gfx/skia/skia/include/core/SkPath.h
new file mode 100644
index 000000000..d1af4f31b
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPath.h
@@ -0,0 +1,1198 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPath_DEFINED
+#define SkPath_DEFINED
+
+#include "SkMatrix.h"
+#include "SkPathRef.h"
+#include "SkRefCnt.h"
+
+class SkReader32;
+class SkWriter32;
+class SkAutoPathBoundsUpdate;
+class SkString;
+class SkRRect;
+class SkWStream;
+
+/** \class SkPath
+
+ The SkPath class encapsulates compound (multiple contour) geometric paths
+ consisting of straight line segments, quadratic curves, and cubic curves.
+*/
+class SK_API SkPath {
+public:
+ enum Direction {
+ /** clockwise direction for adding closed contours */
+ kCW_Direction,
+ /** counter-clockwise direction for adding closed contours */
+ kCCW_Direction,
+ };
+
+ SkPath();
+ SkPath(const SkPath&);
+ ~SkPath();
+
+ SkPath& operator=(const SkPath&);
+ friend SK_API bool operator==(const SkPath&, const SkPath&);
+ friend bool operator!=(const SkPath& a, const SkPath& b) {
+ return !(a == b);
+ }
+
+ /** Return true if the paths contain an equal array of verbs and weights. Paths
+ * with equal verb counts can be readily interpolated. If the paths contain one
+ * or more conics, the conics' weights must also match.
+ *
+ * @param compare The path to compare.
+ *
+ * @return true if the paths have the same verbs and weights.
+ */
+ bool isInterpolatable(const SkPath& compare) const;
+
+ /** Interpolate between two paths with same-sized point arrays.
+ * The out path contains the verbs and weights of this path.
+ * The out points are a weighted average of this path and the ending path.
+ *
+ * @param ending The path to interpolate between.
+ * @param weight The weight, from 0 to 1. The output points are set to
+ * (this->points * weight) + ending->points * (1 - weight).
+ * @return true if the paths could be interpolated.
+ */
+ bool interpolate(const SkPath& ending, SkScalar weight, SkPath* out) const;
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ /** Returns true if the caller is the only owner of the underlying path data */
+ bool unique() const { return fPathRef->unique(); }
+#endif
+
+ enum FillType {
+ /** Specifies that "inside" is computed by a non-zero sum of signed
+ edge crossings
+ */
+ kWinding_FillType,
+ /** Specifies that "inside" is computed by an odd number of edge
+ crossings
+ */
+ kEvenOdd_FillType,
+ /** Same as Winding, but draws outside of the path, rather than inside
+ */
+ kInverseWinding_FillType,
+ /** Same as EvenOdd, but draws outside of the path, rather than inside
+ */
+ kInverseEvenOdd_FillType
+ };
+
+ /** Return the path's fill type. This is used to define how "inside" is
+ computed. The default value is kWinding_FillType.
+
+ @return the path's fill type
+ */
+ FillType getFillType() const { return (FillType)fFillType; }
+
+ /** Set the path's fill type. This is used to define how "inside" is
+ computed. The default value is kWinding_FillType.
+
+ @param ft The new fill type for this path
+ */
+ void setFillType(FillType ft) {
+ fFillType = SkToU8(ft);
+ }
+
+ /** Returns true if the filltype is one of the Inverse variants */
+ bool isInverseFillType() const { return IsInverseFillType((FillType)fFillType); }
+
+ /**
+ * Toggle between inverse and normal filltypes. This reverse the return
+ * value of isInverseFillType()
+ */
+ void toggleInverseFillType() {
+ fFillType ^= 2;
+ }
+
+ enum Convexity {
+ kUnknown_Convexity,
+ kConvex_Convexity,
+ kConcave_Convexity
+ };
+
+ /**
+ * Return the path's convexity, as stored in the path. If it is currently unknown,
+ * then this function will attempt to compute the convexity (and cache the result).
+ */
+ Convexity getConvexity() const {
+ if (kUnknown_Convexity != fConvexity) {
+ return static_cast<Convexity>(fConvexity);
+ } else {
+ return this->internalGetConvexity();
+ }
+ }
+
+ /**
+ * Return the currently cached value for convexity, even if that is set to
+ * kUnknown_Convexity. Note: getConvexity() will automatically call
+ * ComputeConvexity and cache its return value if the current setting is
+ * kUnknown.
+ */
+ Convexity getConvexityOrUnknown() const { return (Convexity)fConvexity; }
+
+ /**
+ * Store a convexity setting in the path. There is no automatic check to
+ * see if this value actually agrees with the return value that would be
+ * computed by getConvexity().
+ *
+ * Note: even if this is set to a "known" value, if the path is later
+ * changed (e.g. lineTo(), addRect(), etc.) then the cached value will be
+ * reset to kUnknown_Convexity.
+ */
+ void setConvexity(Convexity);
+
+ /**
+ * Returns true if the path is flagged as being convex. This is not a
+ * confirmed by any analysis, it is just the value set earlier.
+ */
+ bool isConvex() const {
+ return kConvex_Convexity == this->getConvexity();
+ }
+
+ /**
+ * Set the isConvex flag to true or false. Convex paths may draw faster if
+ * this flag is set, though setting this to true on a path that is in fact
+ * not convex can give undefined results when drawn. Paths default to
+ * isConvex == false
+ */
+ SK_ATTR_DEPRECATED("use setConvexity")
+ void setIsConvex(bool isConvex) {
+ this->setConvexity(isConvex ? kConvex_Convexity : kConcave_Convexity);
+ }
+
+ /** Returns true if the path is an oval.
+ *
+ * @param rect returns the bounding rect of this oval. It's a circle
+ * if the height and width are the same.
+ * @param dir is the oval CCW (or CW if false).
+ * @param start indicates where the contour starts on the oval (see
+ * SkPath::addOval for intepretation of the index).
+ * @return true if this path is an oval.
+ * Tracking whether a path is an oval is considered an
+ * optimization for performance and so some paths that are in
+ * fact ovals can report false.
+ */
+ bool isOval(SkRect* rect, Direction* dir = nullptr,
+ unsigned* start = nullptr) const {
+ bool isCCW = false;
+ bool result = fPathRef->isOval(rect, &isCCW, start);
+ if (dir && result) {
+ *dir = isCCW ? kCCW_Direction : kCW_Direction;
+ }
+ return result;
+ }
+
+ /** Returns true if the path is a round rect.
+ *
+ * @param rrect Returns the bounding rect and radii of this round rect.
+ * @param dir is the rrect CCW (or CW if false).
+ * @param start indicates where the contour starts on the rrect (see
+ * SkPath::addRRect for intepretation of the index).
+ *
+ * @return true if this path is a round rect.
+ * Tracking whether a path is a round rect is considered an
+ * optimization for performance and so some paths that are in
+ * fact round rects can report false.
+ */
+ bool isRRect(SkRRect* rrect, Direction* dir = nullptr,
+ unsigned* start = nullptr) const {
+ bool isCCW = false;
+ bool result = fPathRef->isRRect(rrect, &isCCW, start);
+ if (dir && result) {
+ *dir = isCCW ? kCCW_Direction : kCW_Direction;
+ }
+ return result;
+ }
+
+ /** Clear any lines and curves from the path, making it empty. This frees up
+ internal storage associated with those segments.
+ On Android, does not change fSourcePath.
+ */
+ void reset();
+
+ /** Similar to reset(), in that all lines and curves are removed from the
+ path. However, any internal storage for those lines/curves is retained,
+ making reuse of the path potentially faster.
+ On Android, does not change fSourcePath.
+ */
+ void rewind();
+
+ /** Returns true if the path is empty (contains no lines or curves)
+
+ @return true if the path is empty (contains no lines or curves)
+ */
+ bool isEmpty() const {
+ SkDEBUGCODE(this->validate();)
+ return 0 == fPathRef->countVerbs();
+ }
+
+ /** Return true if the last contour of this path ends with a close verb.
+ */
+ bool isLastContourClosed() const;
+
+ /**
+ * Returns true if all of the points in this path are finite, meaning there
+ * are no infinities and no NaNs.
+ */
+ bool isFinite() const {
+ SkDEBUGCODE(this->validate();)
+ return fPathRef->isFinite();
+ }
+
+ /** Returns true if the path is volatile (i.e. should not be cached by devices.)
+ */
+ bool isVolatile() const {
+ return SkToBool(fIsVolatile);
+ }
+
+ /** Specify whether this path is volatile. Paths are not volatile by
+ default. Temporary paths that are discarded or modified after use should be
+ marked as volatile. This provides a hint to the device that the path
+ should not be cached. Providing this hint when appropriate can
+ improve performance by avoiding unnecessary overhead and resource
+ consumption on the device.
+ */
+ void setIsVolatile(bool isVolatile) {
+ fIsVolatile = isVolatile;
+ }
+
+ /** Test a line for zero length
+
+ @return true if the line is of zero length; otherwise false.
+ */
+ static bool IsLineDegenerate(const SkPoint& p1, const SkPoint& p2, bool exact) {
+ return exact ? p1 == p2 : p1.equalsWithinTolerance(p2);
+ }
+
+ /** Test a quad for zero length
+
+ @return true if the quad is of zero length; otherwise false.
+ */
+ static bool IsQuadDegenerate(const SkPoint& p1, const SkPoint& p2,
+ const SkPoint& p3, bool exact) {
+ return exact ? p1 == p2 && p2 == p3 : p1.equalsWithinTolerance(p2) &&
+ p2.equalsWithinTolerance(p3);
+ }
+
+ /** Test a cubic curve for zero length
+
+ @return true if the cubic is of zero length; otherwise false.
+ */
+ static bool IsCubicDegenerate(const SkPoint& p1, const SkPoint& p2,
+ const SkPoint& p3, const SkPoint& p4, bool exact) {
+ return exact ? p1 == p2 && p2 == p3 && p3 == p4 : p1.equalsWithinTolerance(p2) &&
+ p2.equalsWithinTolerance(p3) &&
+ p3.equalsWithinTolerance(p4);
+ }
+
+ /**
+ * Returns true if the path specifies a single line (i.e. it contains just
+ * a moveTo and a lineTo). If so, and line[] is not null, it sets the 2
+ * points in line[] to the end-points of the line. If the path is not a
+ * line, returns false and ignores line[].
+ */
+ bool isLine(SkPoint line[2]) const;
+
+ /** Return the number of points in the path
+ */
+ int countPoints() const;
+
+ /** Return the point at the specified index. If the index is out of range
+ (i.e. is not 0 <= index < countPoints()) then the returned coordinates
+ will be (0,0)
+ */
+ SkPoint getPoint(int index) const;
+
+ /** Returns the number of points in the path. Up to max points are copied.
+
+ @param points If not null, receives up to max points
+ @param max The maximum number of points to copy into points
+ @return the actual number of points in the path
+ */
+ int getPoints(SkPoint points[], int max) const;
+
+ /** Return the number of verbs in the path
+ */
+ int countVerbs() const;
+
+ /** Returns the number of verbs in the path. Up to max verbs are copied. The
+ verbs are copied as one byte per verb.
+
+ @param verbs If not null, receives up to max verbs
+ @param max The maximum number of verbs to copy into verbs
+ @return the actual number of verbs in the path
+ */
+ int getVerbs(uint8_t verbs[], int max) const;
+
+ //! Swap contents of this and other. Guaranteed not to throw
+ void swap(SkPath& other);
+
+ /**
+ * Returns the bounds of the path's points. If the path contains zero points/verbs, this
+ * will return the "empty" rect [0, 0, 0, 0].
+ * Note: this bounds may be larger than the actual shape, since curves
+ * do not extend as far as their control points. Additionally this bound encompases all points,
+ * even isolated moveTos either preceeding or following the last non-degenerate contour.
+ */
+ const SkRect& getBounds() const {
+ return fPathRef->getBounds();
+ }
+
+ /** Calling this will, if the internal cache of the bounds is out of date,
+ update it so that subsequent calls to getBounds will be instantaneous.
+ This also means that any copies or simple transformations of the path
+ will inherit the cached bounds.
+ */
+ void updateBoundsCache() const {
+ // for now, just calling getBounds() is sufficient
+ this->getBounds();
+ }
+
+ /**
+ * Does a conservative test to see whether a rectangle is inside a path. Currently it only
+ * will ever return true for single convex contour paths. The empty-status of the rect is not
+ * considered (e.g. a rect that is a point can be inside a path). Points or line segments where
+ * the rect edge touches the path border are not considered containment violations.
+ */
+ bool conservativelyContainsRect(const SkRect& rect) const;
+
+ // Construction methods
+
+ /** Hint to the path to prepare for adding more points. This can allow the
+ path to more efficiently grow its storage.
+
+ @param extraPtCount The number of extra points the path should
+ preallocate for.
+ */
+ void incReserve(unsigned extraPtCount);
+
+ /** Set the beginning of the next contour to the point (x,y).
+
+ @param x The x-coordinate of the start of a new contour
+ @param y The y-coordinate of the start of a new contour
+ */
+ void moveTo(SkScalar x, SkScalar y);
+
+ /** Set the beginning of the next contour to the point
+
+ @param p The start of a new contour
+ */
+ void moveTo(const SkPoint& p) {
+ this->moveTo(p.fX, p.fY);
+ }
+
+ /** Set the beginning of the next contour relative to the last point on the
+ previous contour. If there is no previous contour, this is treated the
+ same as moveTo().
+
+ @param dx The amount to add to the x-coordinate of the end of the
+ previous contour, to specify the start of a new contour
+ @param dy The amount to add to the y-coordinate of the end of the
+ previous contour, to specify the start of a new contour
+ */
+ void rMoveTo(SkScalar dx, SkScalar dy);
+
+ /** Add a line from the last point to the specified point (x,y). If no
+ moveTo() call has been made for this contour, the first point is
+ automatically set to (0,0).
+
+ @param x The x-coordinate of the end of a line
+ @param y The y-coordinate of the end of a line
+ */
+ void lineTo(SkScalar x, SkScalar y);
+
+ /** Add a line from the last point to the specified point. If no moveTo()
+ call has been made for this contour, the first point is automatically
+ set to (0,0).
+
+ @param p The end of a line
+ */
+ void lineTo(const SkPoint& p) {
+ this->lineTo(p.fX, p.fY);
+ }
+
+ /** Same as lineTo, but the coordinates are considered relative to the last
+ point on this contour. If there is no previous point, then a moveTo(0,0)
+ is inserted automatically.
+
+ @param dx The amount to add to the x-coordinate of the previous point
+ on this contour, to specify a line
+ @param dy The amount to add to the y-coordinate of the previous point
+ on this contour, to specify a line
+ */
+ void rLineTo(SkScalar dx, SkScalar dy);
+
+ /** Add a quadratic bezier from the last point, approaching control point
+ (x1,y1), and ending at (x2,y2). If no moveTo() call has been made for
+ this contour, the first point is automatically set to (0,0).
+
+ @param x1 The x-coordinate of the control point on a quadratic curve
+ @param y1 The y-coordinate of the control point on a quadratic curve
+ @param x2 The x-coordinate of the end point on a quadratic curve
+ @param y2 The y-coordinate of the end point on a quadratic curve
+ */
+ void quadTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2);
+
+ /** Add a quadratic bezier from the last point, approaching control point
+ p1, and ending at p2. If no moveTo() call has been made for this
+ contour, the first point is automatically set to (0,0).
+
+ @param p1 The control point on a quadratic curve
+ @param p2 The end point on a quadratic curve
+ */
+ void quadTo(const SkPoint& p1, const SkPoint& p2) {
+ this->quadTo(p1.fX, p1.fY, p2.fX, p2.fY);
+ }
+
+ /** Same as quadTo, but the coordinates are considered relative to the last
+ point on this contour. If there is no previous point, then a moveTo(0,0)
+ is inserted automatically.
+
+ @param dx1 The amount to add to the x-coordinate of the last point on
+ this contour, to specify the control point of a quadratic curve
+ @param dy1 The amount to add to the y-coordinate of the last point on
+ this contour, to specify the control point of a quadratic curve
+ @param dx2 The amount to add to the x-coordinate of the last point on
+ this contour, to specify the end point of a quadratic curve
+ @param dy2 The amount to add to the y-coordinate of the last point on
+ this contour, to specify the end point of a quadratic curve
+ */
+ void rQuadTo(SkScalar dx1, SkScalar dy1, SkScalar dx2, SkScalar dy2);
+
+ void conicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2,
+ SkScalar w);
+ void conicTo(const SkPoint& p1, const SkPoint& p2, SkScalar w) {
+ this->conicTo(p1.fX, p1.fY, p2.fX, p2.fY, w);
+ }
+ void rConicTo(SkScalar dx1, SkScalar dy1, SkScalar dx2, SkScalar dy2,
+ SkScalar w);
+
+ /** Add a cubic bezier from the last point, approaching control points
+ (x1,y1) and (x2,y2), and ending at (x3,y3). If no moveTo() call has been
+ made for this contour, the first point is automatically set to (0,0).
+
+ @param x1 The x-coordinate of the 1st control point on a cubic curve
+ @param y1 The y-coordinate of the 1st control point on a cubic curve
+ @param x2 The x-coordinate of the 2nd control point on a cubic curve
+ @param y2 The y-coordinate of the 2nd control point on a cubic curve
+ @param x3 The x-coordinate of the end point on a cubic curve
+ @param y3 The y-coordinate of the end point on a cubic curve
+ */
+ void cubicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2,
+ SkScalar x3, SkScalar y3);
+
+ /** Add a cubic bezier from the last point, approaching control points p1
+ and p2, and ending at p3. If no moveTo() call has been made for this
+ contour, the first point is automatically set to (0,0).
+
+ @param p1 The 1st control point on a cubic curve
+ @param p2 The 2nd control point on a cubic curve
+ @param p3 The end point on a cubic curve
+ */
+ void cubicTo(const SkPoint& p1, const SkPoint& p2, const SkPoint& p3) {
+ this->cubicTo(p1.fX, p1.fY, p2.fX, p2.fY, p3.fX, p3.fY);
+ }
+
+ /** Same as cubicTo, but the coordinates are considered relative to the
+ current point on this contour. If there is no previous point, then a
+ moveTo(0,0) is inserted automatically.
+
+ @param dx1 The amount to add to the x-coordinate of the last point on
+ this contour, to specify the 1st control point of a cubic curve
+ @param dy1 The amount to add to the y-coordinate of the last point on
+ this contour, to specify the 1st control point of a cubic curve
+ @param dx2 The amount to add to the x-coordinate of the last point on
+ this contour, to specify the 2nd control point of a cubic curve
+ @param dy2 The amount to add to the y-coordinate of the last point on
+ this contour, to specify the 2nd control point of a cubic curve
+ @param dx3 The amount to add to the x-coordinate of the last point on
+ this contour, to specify the end point of a cubic curve
+ @param dy3 The amount to add to the y-coordinate of the last point on
+ this contour, to specify the end point of a cubic curve
+ */
+ void rCubicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2,
+ SkScalar x3, SkScalar y3);
+
+ /**
+ * Append the specified arc to the path. If the start of the arc is different from the path's
+ * current last point, then an automatic lineTo() is added to connect the current contour
+ * to the start of the arc. However, if the path is empty, then we call moveTo() with
+ * the first point of the arc. The sweep angle is treated mod 360.
+ *
+ * @param oval The bounding oval defining the shape and size of the arc
+ * @param startAngle Starting angle (in degrees) where the arc begins
+ * @param sweepAngle Sweep angle (in degrees) measured clockwise. This is treated mod 360.
+ * @param forceMoveTo If true, always begin a new contour with the arc
+ */
+ void arcTo(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle, bool forceMoveTo);
+
+ /**
+ * Append a line and arc to the current path. This is the same as the PostScript call "arct".
+ */
+ void arcTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2, SkScalar radius);
+
+ /** Append a line and arc to the current path. This is the same as the
+ PostScript call "arct".
+ */
+ void arcTo(const SkPoint p1, const SkPoint p2, SkScalar radius) {
+ this->arcTo(p1.fX, p1.fY, p2.fX, p2.fY, radius);
+ }
+
+ enum ArcSize {
+ /** the smaller of the two possible SVG arcs. */
+ kSmall_ArcSize,
+ /** the larger of the two possible SVG arcs. */
+ kLarge_ArcSize,
+ };
+
+ /**
+ * Append an elliptical arc from the current point in the format used by SVG.
+ * The center of the ellipse is computed to satisfy the constraints below.
+ *
+ * @param rx,ry The radii in the x and y directions respectively.
+ * @param xAxisRotate The angle in degrees relative to the x-axis.
+ * @param largeArc Determines whether the smallest or largest arc possible
+ * is drawn.
+ * @param sweep Determines if the arc should be swept in an anti-clockwise or
+ * clockwise direction. Note that this enum value is opposite the SVG
+ * arc sweep value.
+ * @param x,y The destination coordinates.
+ */
+ void arcTo(SkScalar rx, SkScalar ry, SkScalar xAxisRotate, ArcSize largeArc,
+ Direction sweep, SkScalar x, SkScalar y);
+
+ void arcTo(const SkPoint r, SkScalar xAxisRotate, ArcSize largeArc, Direction sweep,
+ const SkPoint xy) {
+ this->arcTo(r.fX, r.fY, xAxisRotate, largeArc, sweep, xy.fX, xy.fY);
+ }
+
+ /** Same as arcTo format used by SVG, but the destination coordinate is relative to the
+ * last point on this contour. If there is no previous point, then a
+ * moveTo(0,0) is inserted automatically.
+ *
+ * @param rx,ry The radii in the x and y directions respectively.
+ * @param xAxisRotate The angle in degrees relative to the x-axis.
+ * @param largeArc Determines whether the smallest or largest arc possible
+ * is drawn.
+ * @param sweep Determines if the arc should be swept in an anti-clockwise or
+ * clockwise direction. Note that this enum value is opposite the SVG
+ * arc sweep value.
+ * @param dx,dy The destination coordinates relative to the last point.
+ */
+ void rArcTo(SkScalar rx, SkScalar ry, SkScalar xAxisRotate, ArcSize largeArc,
+ Direction sweep, SkScalar dx, SkScalar dy);
+
+ /** Close the current contour. If the current point is not equal to the
+ first point of the contour, a line segment is automatically added.
+ */
+ void close();
+
+ /**
+ * Returns whether or not a fill type is inverted
+ *
+ * kWinding_FillType -> false
+ * kEvenOdd_FillType -> false
+ * kInverseWinding_FillType -> true
+ * kInverseEvenOdd_FillType -> true
+ */
+ static bool IsInverseFillType(FillType fill) {
+ static_assert(0 == kWinding_FillType, "fill_type_mismatch");
+ static_assert(1 == kEvenOdd_FillType, "fill_type_mismatch");
+ static_assert(2 == kInverseWinding_FillType, "fill_type_mismatch");
+ static_assert(3 == kInverseEvenOdd_FillType, "fill_type_mismatch");
+ return (fill & 2) != 0;
+ }
+
+ /**
+ * Returns the equivalent non-inverted fill type to the given fill type
+ *
+ * kWinding_FillType -> kWinding_FillType
+ * kEvenOdd_FillType -> kEvenOdd_FillType
+ * kInverseWinding_FillType -> kWinding_FillType
+ * kInverseEvenOdd_FillType -> kEvenOdd_FillType
+ */
+ static FillType ConvertToNonInverseFillType(FillType fill) {
+ static_assert(0 == kWinding_FillType, "fill_type_mismatch");
+ static_assert(1 == kEvenOdd_FillType, "fill_type_mismatch");
+ static_assert(2 == kInverseWinding_FillType, "fill_type_mismatch");
+ static_assert(3 == kInverseEvenOdd_FillType, "fill_type_mismatch");
+ return (FillType)(fill & 1);
+ }
+
+ /**
+ * Chop a conic into N quads, stored continguously in pts[], where
+ * N = 1 << pow2. The amount of storage needed is (1 + 2 * N)
+ */
+ static int ConvertConicToQuads(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2,
+ SkScalar w, SkPoint pts[], int pow2);
+
+ /**
+ * Returns true if the path specifies a rectangle.
+ *
+ * If this returns false, then all output parameters are ignored, and left
+ * unchanged. If this returns true, then each of the output parameters
+ * are checked for NULL. If they are not, they return their value.
+ *
+ * @param rect If not null, set to the bounds of the rectangle.
+ * Note : this bounds may be smaller than the path's bounds, since it is just
+ * the bounds of the "drawable" parts of the path. e.g. a trailing MoveTo would
+ * be ignored in this rect, but not by the path's bounds
+ * @param isClosed If not null, set to true if the path is closed
+ * @param direction If not null, set to the rectangle's direction
+ * @return true if the path specifies a rectangle
+ */
+ bool isRect(SkRect* rect, bool* isClosed = NULL, Direction* direction = NULL) const;
+
+ /** Returns true if the path specifies a pair of nested rectangles, or would draw a
+ pair of nested rectangles when filled. If so, and if
+ rect is not null, set rect[0] to the outer rectangle and rect[1] to the inner
+ rectangle. If so, and dirs is not null, set dirs[0] to the direction of
+ the outer rectangle and dirs[1] to the direction of the inner rectangle. If
+ the path does not specify a pair of nested rectangles, return
+ false and ignore rect and dirs.
+
+ @param rect If not null, returns the path as a pair of nested rectangles
+ @param dirs If not null, returns the direction of the rects
+ @return true if the path describes a pair of nested rectangles
+ */
+ bool isNestedFillRects(SkRect rect[2], Direction dirs[2] = NULL) const;
+
+ /**
+ * Add a closed rectangle contour to the path
+ * @param rect The rectangle to add as a closed contour to the path
+ * @param dir The direction to wind the rectangle's contour.
+ *
+ * Note: the contour initial point index is 0 (as defined below).
+ */
+ void addRect(const SkRect& rect, Direction dir = kCW_Direction);
+
+ /**
+ * Add a closed rectangle contour to the path
+ * @param rect The rectangle to add as a closed contour to the path
+ * @param dir The direction to wind the rectangle's contour.
+ * @param start Initial point of the contour (initial moveTo), expressed as
+ * a corner index, starting in the upper-left position, clock-wise:
+ *
+ * 0 1
+ * *-------*
+ * | |
+ * *-------*
+ * 3 2
+ */
+ void addRect(const SkRect& rect, Direction dir, unsigned start);
+
+ /**
+ * Add a closed rectangle contour to the path
+ *
+ * @param left The left side of a rectangle to add as a closed contour
+ * to the path
+ * @param top The top of a rectangle to add as a closed contour to the
+ * path
+ * @param right The right side of a rectangle to add as a closed contour
+ * to the path
+ * @param bottom The bottom of a rectangle to add as a closed contour to
+ * the path
+ * @param dir The direction to wind the rectangle's contour.
+ *
+ * Note: the contour initial point index is 0 (as defined above).
+ */
+ void addRect(SkScalar left, SkScalar top, SkScalar right, SkScalar bottom,
+ Direction dir = kCW_Direction);
+
+ /**
+ * Add a closed oval contour to the path
+ *
+ * @param oval The bounding oval to add as a closed contour to the path
+ * @param dir The direction to wind the oval's contour.
+ *
+ * Note: the contour initial point index is 1 (as defined below).
+ */
+ void addOval(const SkRect& oval, Direction dir = kCW_Direction);
+
+ /**
+ * Add a closed oval contour to the path
+ *
+ * @param oval The bounding oval to add as a closed contour to the path
+ * @param dir The direction to wind the oval's contour.
+ * @param start Initial point of the contour (initial moveTo), expressed
+ * as an ellipse vertex index, starting at the top, clock-wise
+ * (90/0/270/180deg order):
+ *
+ * 0
+ * -*-
+ * | |
+ * 3 * * 1
+ * | |
+ * -*-
+ * 2
+ */
+ void addOval(const SkRect& oval, Direction dir, unsigned start);
+
+ /**
+ * Add a closed circle contour to the path. The circle contour begins at
+ * the right-most point (as though 1 were passed to addOval's 'start' param).
+ *
+ * @param x The x-coordinate of the center of a circle to add as a
+ * closed contour to the path
+ * @param y The y-coordinate of the center of a circle to add as a
+ * closed contour to the path
+ * @param radius The radius of a circle to add as a closed contour to the
+ * path
+ * @param dir The direction to wind the circle's contour.
+ */
+ void addCircle(SkScalar x, SkScalar y, SkScalar radius,
+ Direction dir = kCW_Direction);
+
+ /** Add the specified arc to the path as a new contour.
+
+ @param oval The bounds of oval used to define the size of the arc
+ @param startAngle Starting angle (in degrees) where the arc begins
+ @param sweepAngle Sweep angle (in degrees) measured clockwise
+ */
+ void addArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle);
+
+ /**
+ * Add a closed round-rectangle contour to the path
+ * @param rect The bounds of a round-rectangle to add as a closed contour
+ * @param rx The x-radius of the rounded corners on the round-rectangle
+ * @param ry The y-radius of the rounded corners on the round-rectangle
+ * @param dir The direction to wind the rectangle's contour.
+ */
+ void addRoundRect(const SkRect& rect, SkScalar rx, SkScalar ry,
+ Direction dir = kCW_Direction);
+
+ /**
+ * Add a closed round-rectangle contour to the path. Each corner receives
+ * two radius values [X, Y]. The corners are ordered top-left, top-right,
+ * bottom-right, bottom-left.
+ * @param rect The bounds of a round-rectangle to add as a closed contour
+ * @param radii Array of 8 scalars, 4 [X,Y] pairs for each corner
+ * @param dir The direction to wind the rectangle's contour.
+ * Note: The radii here now go through the same constraint handling as the
+ * SkRRect radii (i.e., either radii at a corner being 0 implies a
+ * sqaure corner and oversized radii are proportionally scaled down).
+ */
+ void addRoundRect(const SkRect& rect, const SkScalar radii[],
+ Direction dir = kCW_Direction);
+
+ /**
+ * Add an SkRRect contour to the path
+ * @param rrect The rounded rect to add as a closed contour
+ * @param dir The winding direction for the new contour.
+ *
+ * Note: the contour initial point index is either 6 (for dir == kCW_Direction)
+ * or 7 (for dir == kCCW_Direction), as defined below.
+ *
+ */
+ void addRRect(const SkRRect& rrect, Direction dir = kCW_Direction);
+
+ /**
+ * Add an SkRRect contour to the path
+ * @param rrect The rounded rect to add as a closed contour
+ * @param dir The winding direction for the new contour.
+ * @param start Initial point of the contour (initial moveTo), expressed as
+ * an index of the radii minor/major points, ordered clock-wise:
+ *
+ * 0 1
+ * *----*
+ * 7 * * 2
+ * | |
+ * 6 * * 3
+ * *----*
+ * 5 4
+ */
+ void addRRect(const SkRRect& rrect, Direction dir, unsigned start);
+
+ /**
+ * Add a new contour made of just lines. This is just a fast version of
+ * the following:
+ * this->moveTo(pts[0]);
+ * for (int i = 1; i < count; ++i) {
+ * this->lineTo(pts[i]);
+ * }
+ * if (close) {
+ * this->close();
+ * }
+ */
+ void addPoly(const SkPoint pts[], int count, bool close);
+
+ enum AddPathMode {
+ /** Source path contours are added as new contours.
+ */
+ kAppend_AddPathMode,
+ /** Path is added by extending the last contour of the destination path
+ with the first contour of the source path. If the last contour of
+ the destination path is closed, then it will not be extended.
+ Instead, the start of source path will be extended by a straight
+ line to the end point of the destination path.
+ */
+ kExtend_AddPathMode
+ };
+
+ /** Add a copy of src to the path, offset by (dx,dy)
+ @param src The path to add as a new contour
+ @param dx The amount to translate the path in X as it is added
+ @param dx The amount to translate the path in Y as it is added
+ */
+ void addPath(const SkPath& src, SkScalar dx, SkScalar dy,
+ AddPathMode mode = kAppend_AddPathMode);
+
+ /** Add a copy of src to the path
+ */
+ void addPath(const SkPath& src, AddPathMode mode = kAppend_AddPathMode) {
+ SkMatrix m;
+ m.reset();
+ this->addPath(src, m, mode);
+ }
+
+ /** Add a copy of src to the path, transformed by matrix
+ @param src The path to add as a new contour
+ @param matrix Transform applied to src
+ @param mode Determines how path is added
+ */
+ void addPath(const SkPath& src, const SkMatrix& matrix, AddPathMode mode = kAppend_AddPathMode);
+
+ /**
+ * Same as addPath(), but reverses the src input
+ */
+ void reverseAddPath(const SkPath& src);
+
+ /** Offset the path by (dx,dy), returning true on success
+
+ @param dx The amount in the X direction to offset the entire path
+ @param dy The amount in the Y direction to offset the entire path
+ @param dst The translated path is written here
+ */
+ void offset(SkScalar dx, SkScalar dy, SkPath* dst) const;
+
+ /** Offset the path by (dx,dy), returning true on success
+
+ @param dx The amount in the X direction to offset the entire path
+ @param dy The amount in the Y direction to offset the entire path
+ */
+ void offset(SkScalar dx, SkScalar dy) {
+ this->offset(dx, dy, this);
+ }
+
+ /** Transform the points in this path by matrix, and write the answer into
+ dst.
+
+ @param matrix The matrix to apply to the path
+ @param dst The transformed path is written here
+ */
+ void transform(const SkMatrix& matrix, SkPath* dst) const;
+
+ /** Transform the points in this path by matrix
+
+ @param matrix The matrix to apply to the path
+ */
+ void transform(const SkMatrix& matrix) {
+ this->transform(matrix, this);
+ }
+
+ /** Return the last point on the path. If no points have been added, (0,0)
+ is returned. If there are no points, this returns false, otherwise it
+ returns true.
+
+ @param lastPt The last point on the path is returned here
+ */
+ bool getLastPt(SkPoint* lastPt) const;
+
+ /** Set the last point on the path. If no points have been added,
+ moveTo(x,y) is automatically called.
+
+ @param x The new x-coordinate for the last point
+ @param y The new y-coordinate for the last point
+ */
+ void setLastPt(SkScalar x, SkScalar y);
+
+ /** Set the last point on the path. If no points have been added, moveTo(p)
+ is automatically called.
+
+ @param p The new location for the last point
+ */
+ void setLastPt(const SkPoint& p) {
+ this->setLastPt(p.fX, p.fY);
+ }
+
+ enum SegmentMask {
+ kLine_SegmentMask = 1 << 0,
+ kQuad_SegmentMask = 1 << 1,
+ kConic_SegmentMask = 1 << 2,
+ kCubic_SegmentMask = 1 << 3,
+ };
+
+ /**
+ * Returns a mask, where each bit corresponding to a SegmentMask is
+ * set if the path contains 1 or more segments of that type.
+ * Returns 0 for an empty path (no segments).
+ */
+ uint32_t getSegmentMasks() const { return fPathRef->getSegmentMasks(); }
+
+ enum Verb {
+ kMove_Verb, //!< iter.next returns 1 point
+ kLine_Verb, //!< iter.next returns 2 points
+ kQuad_Verb, //!< iter.next returns 3 points
+ kConic_Verb, //!< iter.next returns 3 points + iter.conicWeight()
+ kCubic_Verb, //!< iter.next returns 4 points
+ kClose_Verb, //!< iter.next returns 0 points
+ kDone_Verb, //!< iter.next returns 0 points
+ };
+
+ /** Iterate through all of the segments (lines, quadratics, cubics) of
+ each contours in a path.
+
+ The iterator cleans up the segments along the way, removing degenerate
+ segments and adding close verbs where necessary. When the forceClose
+ argument is provided, each contour (as defined by a new starting
+ move command) will be completed with a close verb regardless of the
+ contour's contents.
+ */
+ class SK_API Iter {
+ public:
+ Iter();
+ Iter(const SkPath&, bool forceClose);
+
+ void setPath(const SkPath&, bool forceClose);
+
+ /** Return the next verb in this iteration of the path. When all
+ segments have been visited, return kDone_Verb.
+
+ @param pts The points representing the current verb and/or segment
+ @param doConsumeDegerates If true, first scan for segments that are
+ deemed degenerate (too short) and skip those.
+ @param exact if doConsumeDegenerates is true and exact is true, skip only
+ degenerate elements with lengths exactly equal to zero. If exact
+ is false, skip degenerate elements with lengths close to zero. If
+ doConsumeDegenerates is false, exact has no effect.
+ @return The verb for the current segment
+ */
+ Verb next(SkPoint pts[4], bool doConsumeDegerates = true, bool exact = false) {
+ if (doConsumeDegerates) {
+ this->consumeDegenerateSegments(exact);
+ }
+ return this->doNext(pts);
+ }
+
+ /**
+ * Return the weight for the current conic. Only valid if the current
+ * segment return by next() was a conic.
+ */
+ SkScalar conicWeight() const { return *fConicWeights; }
+
+ /** If next() returns kLine_Verb, then this query returns true if the
+ line was the result of a close() command (i.e. the end point is the
+ initial moveto for this contour). If next() returned a different
+ verb, this returns an undefined value.
+
+ @return If the last call to next() returned kLine_Verb, return true
+ if it was the result of an explicit close command.
+ */
+ bool isCloseLine() const { return SkToBool(fCloseLine); }
+
+ /** Returns true if the current contour is closed (has a kClose_Verb)
+ @return true if the current contour is closed (has a kClose_Verb)
+ */
+ bool isClosedContour() const;
+
+ private:
+ const SkPoint* fPts;
+ const uint8_t* fVerbs;
+ const uint8_t* fVerbStop;
+ const SkScalar* fConicWeights;
+ SkPoint fMoveTo;
+ SkPoint fLastPt;
+ SkBool8 fForceClose;
+ SkBool8 fNeedClose;
+ SkBool8 fCloseLine;
+ SkBool8 fSegmentState;
+
+ inline const SkPoint& cons_moveTo();
+ Verb autoClose(SkPoint pts[2]);
+ void consumeDegenerateSegments(bool exact);
+ Verb doNext(SkPoint pts[4]);
+ };
+
+ /** Iterate through the verbs in the path, providing the associated points.
+ */
+ class SK_API RawIter {
+ public:
+ RawIter() {}
+ RawIter(const SkPath& path) {
+ setPath(path);
+ }
+
+ void setPath(const SkPath& path) {
+ fRawIter.setPathRef(*path.fPathRef.get());
+ }
+
+ /** Return the next verb in this iteration of the path. When all
+ segments have been visited, return kDone_Verb.
+
+ @param pts The points representing the current verb and/or segment
+ This must not be NULL.
+ @return The verb for the current segment
+ */
+ Verb next(SkPoint pts[4]) {
+ return (Verb) fRawIter.next(pts);
+ }
+
+ /** Return what the next verb will be, but do not visit the next segment.
+
+ @return The verb for the next segment
+ */
+ Verb peek() const {
+ return (Verb) fRawIter.peek();
+ }
+
+ SkScalar conicWeight() const {
+ return fRawIter.conicWeight();
+ }
+
+ private:
+ SkPathRef::Iter fRawIter;
+ friend class SkPath;
+ };
+
+ /**
+ * Returns true if the point { x, y } is contained by the path, taking into
+ * account the FillType.
+ */
+ bool contains(SkScalar x, SkScalar y) const;
+
+ void dump(SkWStream* , bool forceClose, bool dumpAsHex) const;
+ void dump() const;
+ void dumpHex() const;
+
+ /**
+ * Write the path to the buffer, and return the number of bytes written.
+ * If buffer is NULL, it still returns the number of bytes.
+ */
+ size_t writeToMemory(void* buffer) const;
+ /**
+ * Initializes the path from the buffer
+ *
+ * @param buffer Memory to read from
+ * @param length Amount of memory available in the buffer
+ * @return number of bytes read (must be a multiple of 4) or
+ * 0 if there was not enough memory available
+ */
+ size_t readFromMemory(const void* buffer, size_t length);
+
+ /** Returns a non-zero, globally unique value corresponding to the set of verbs
+ and points in the path (but not the fill type [except on Android skbug.com/1762]).
+ Each time the path is modified, a different generation ID will be returned.
+ */
+ uint32_t getGenerationID() const;
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ static const int kPathRefGenIDBitCnt = 30; // leave room for the fill type (skbug.com/1762)
+#else
+ static const int kPathRefGenIDBitCnt = 32;
+#endif
+
+ SkDEBUGCODE(void validate() const;)
+ SkDEBUGCODE(void experimentalValidateRef() const { fPathRef->validate(); } )
+
+private:
+ enum SerializationOffsets {
+ // 1 free bit at 29
+ kUnused1_SerializationShift = 28, // 1 free bit
+ kDirection_SerializationShift = 26, // requires 2 bits
+ kIsVolatile_SerializationShift = 25, // requires 1 bit
+ // 1 free bit at 24
+ kConvexity_SerializationShift = 16, // requires 8 bits
+ kFillType_SerializationShift = 8, // requires 8 bits
+ // low-8-bits are version
+ };
+
+ enum SerializationVersions {
+ kPathPrivFirstDirection_Version = 1,
+ kPathPrivLastMoveToIndex_Version = 2,
+ kCurrent_Version = 2
+ };
+
+ SkAutoTUnref<SkPathRef> fPathRef;
+ int fLastMoveToIndex;
+ uint8_t fFillType;
+ mutable uint8_t fConvexity;
+ mutable SkAtomic<uint8_t, sk_memory_order_relaxed> fFirstDirection;// SkPathPriv::FirstDirection
+ mutable SkBool8 fIsVolatile;
+
+ /** Resets all fields other than fPathRef to their initial 'empty' values.
+ * Assumes the caller has already emptied fPathRef.
+ * On Android increments fGenerationID without reseting it.
+ */
+ void resetFields();
+
+ /** Sets all fields other than fPathRef to the values in 'that'.
+ * Assumes the caller has already set fPathRef.
+ * Doesn't change fGenerationID or fSourcePath on Android.
+ */
+ void copyFields(const SkPath& that);
+
+ friend class Iter;
+ friend class SkPathPriv;
+ friend class SkPathStroker;
+
+ /* Append, in reverse order, the first contour of path, ignoring path's
+ last point. If no moveTo() call has been made for this contour, the
+ first point is automatically set to (0,0).
+ */
+ void reversePathTo(const SkPath&);
+
+ // called before we add points for lineTo, quadTo, cubicTo, checking to see
+ // if we need to inject a leading moveTo first
+ //
+ // SkPath path; path.lineTo(...); <--- need a leading moveTo(0, 0)
+ // SkPath path; ... path.close(); path.lineTo(...) <-- need a moveTo(previous moveTo)
+ //
+ inline void injectMoveToIfNeeded();
+
+ inline bool hasOnlyMoveTos() const;
+
+ Convexity internalGetConvexity() const;
+
+ bool isRectContour(bool allowPartial, int* currVerb, const SkPoint** pts,
+ bool* isClosed, Direction* direction) const;
+
+ // called by stroker to see if all points are equal and worthy of a cap
+ // equivalent to a short-circuit version of getBounds().isEmpty()
+ bool isZeroLength() const;
+
+ /** Returns if the path can return a bound at no cost (true) or will have to
+ perform some computation (false).
+ */
+ bool hasComputedBounds() const {
+ SkDEBUGCODE(this->validate();)
+ return fPathRef->hasComputedBounds();
+ }
+
+
+ // 'rect' needs to be sorted
+ void setBounds(const SkRect& rect) {
+ SkPathRef::Editor ed(&fPathRef);
+
+ ed.setBounds(rect);
+ }
+
+ void setPt(int index, SkScalar x, SkScalar y);
+
+ friend class SkAutoPathBoundsUpdate;
+ friend class SkAutoDisableOvalCheck;
+ friend class SkAutoDisableDirectionCheck;
+ friend class SkBench_AddPathTest; // perf test reversePathTo
+ friend class PathTest_Private; // unit test reversePathTo
+ friend class ForceIsRRect_Private; // unit test isRRect
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPathEffect.h b/gfx/skia/skia/include/core/SkPathEffect.h
new file mode 100644
index 000000000..f5ca9183a
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPathEffect.h
@@ -0,0 +1,273 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPathEffect_DEFINED
+#define SkPathEffect_DEFINED
+
+#include "SkFlattenable.h"
+#include "SkPath.h"
+#include "SkPoint.h"
+#include "SkRect.h"
+
+class SkPath;
+class SkStrokeRec;
+
+/** \class SkPathEffect
+
+ SkPathEffect is the base class for objects in the SkPaint that affect
+ the geometry of a drawing primitive before it is transformed by the
+ canvas' matrix and drawn.
+
+ Dashing is implemented as a subclass of SkPathEffect.
+*/
+class SK_API SkPathEffect : public SkFlattenable {
+public:
+ /**
+ * Given a src path (input) and a stroke-rec (input and output), apply
+ * this effect to the src path, returning the new path in dst, and return
+ * true. If this effect cannot be applied, return false and ignore dst
+ * and stroke-rec.
+ *
+ * The stroke-rec specifies the initial request for stroking (if any).
+ * The effect can treat this as input only, or it can choose to change
+ * the rec as well. For example, the effect can decide to change the
+ * stroke's width or join, or the effect can change the rec from stroke
+ * to fill (or fill to stroke) in addition to returning a new (dst) path.
+ *
+ * If this method returns true, the caller will apply (as needed) the
+ * resulting stroke-rec to dst and then draw.
+ */
+ virtual bool filterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec*, const SkRect* cullR) const = 0;
+
+ /**
+ * Compute a conservative bounds for its effect, given the src bounds.
+ * The baseline implementation just assigns src to dst.
+ */
+ virtual void computeFastBounds(SkRect* dst, const SkRect& src) const;
+
+ /** \class PointData
+
+ PointData aggregates all the information needed to draw the point
+ primitives returned by an 'asPoints' call.
+ */
+ class PointData {
+ public:
+ PointData()
+ : fFlags(0)
+ , fPoints(NULL)
+ , fNumPoints(0) {
+ fSize.set(SK_Scalar1, SK_Scalar1);
+ // 'asPoints' needs to initialize/fill-in 'fClipRect' if it sets
+ // the kUseClip flag
+ }
+ ~PointData() {
+ delete [] fPoints;
+ }
+
+ // TODO: consider using passed-in flags to limit the work asPoints does.
+ // For example, a kNoPath flag could indicate don't bother generating
+ // stamped solutions.
+
+ // Currently none of these flags are supported.
+ enum PointFlags {
+ kCircles_PointFlag = 0x01, // draw points as circles (instead of rects)
+ kUsePath_PointFlag = 0x02, // draw points as stamps of the returned path
+ kUseClip_PointFlag = 0x04, // apply 'fClipRect' before drawing the points
+ };
+
+ uint32_t fFlags; // flags that impact the drawing of the points
+ SkPoint* fPoints; // the center point of each generated point
+ int fNumPoints; // number of points in fPoints
+ SkVector fSize; // the size to draw the points
+ SkRect fClipRect; // clip required to draw the points (if kUseClip is set)
+ SkPath fPath; // 'stamp' to be used at each point (if kUsePath is set)
+
+ SkPath fFirst; // If not empty, contains geometry for first point
+ SkPath fLast; // If not empty, contains geometry for last point
+ };
+
+ /**
+ * Does applying this path effect to 'src' yield a set of points? If so,
+ * optionally return the points in 'results'.
+ */
+ virtual bool asPoints(PointData* results, const SkPath& src,
+ const SkStrokeRec&, const SkMatrix&,
+ const SkRect* cullR) const;
+
+ /**
+ * If the PathEffect can be represented as a dash pattern, asADash will return kDash_DashType
+ * and None otherwise. If a non NULL info is passed in, the various DashInfo will be filled
+ * in if the PathEffect can be a dash pattern. If passed in info has an fCount equal or
+ * greater to that of the effect, it will memcpy the values of the dash intervals into the
+ * info. Thus the general approach will be call asADash once with default info to get DashType
+ * and fCount. If effect can be represented as a dash pattern, allocate space for the intervals
+ * in info, then call asADash again with the same info and the intervals will get copied in.
+ */
+
+ enum DashType {
+ kNone_DashType, //!< ignores the info parameter
+ kDash_DashType, //!< fills in all of the info parameter
+ };
+
+ struct DashInfo {
+ DashInfo() : fIntervals(NULL), fCount(0), fPhase(0) {}
+ DashInfo(SkScalar* intervals, int32_t count, SkScalar phase)
+ : fIntervals(intervals), fCount(count), fPhase(phase) {}
+
+ SkScalar* fIntervals; //!< Length of on/off intervals for dashed lines
+ // Even values represent ons, and odds offs
+ int32_t fCount; //!< Number of intervals in the dash. Should be even number
+ SkScalar fPhase; //!< Offset into the dashed interval pattern
+ // mod the sum of all intervals
+ };
+
+ virtual DashType asADash(DashInfo* info) const;
+
+ SK_TO_STRING_PUREVIRT()
+ SK_DEFINE_FLATTENABLE_TYPE(SkPathEffect)
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ /// Override for subclasses as appropriate.
+ virtual bool exposedInAndroidJavaAPI() const { return false; }
+#endif
+
+protected:
+ SkPathEffect() {}
+
+private:
+ // illegal
+ SkPathEffect(const SkPathEffect&);
+ SkPathEffect& operator=(const SkPathEffect&);
+
+ typedef SkFlattenable INHERITED;
+};
+
+/** \class SkPairPathEffect
+
+ Common baseclass for Compose and Sum. This subclass manages two pathEffects,
+ including flattening them. It does nothing in filterPath, and is only useful
+ for managing the lifetimes of its two arguments.
+*/
+class SK_API SkPairPathEffect : public SkPathEffect {
+protected:
+ SkPairPathEffect(sk_sp<SkPathEffect> pe0, sk_sp<SkPathEffect> pe1);
+
+ void flatten(SkWriteBuffer&) const override;
+
+ // these are visible to our subclasses
+ sk_sp<SkPathEffect> fPE0;
+ sk_sp<SkPathEffect> fPE1;
+
+ SK_TO_STRING_OVERRIDE()
+
+private:
+ typedef SkPathEffect INHERITED;
+};
+
+/** \class SkComposePathEffect
+
+ This subclass of SkPathEffect composes its two arguments, to create
+ a compound pathEffect.
+*/
+class SK_API SkComposePathEffect : public SkPairPathEffect {
+public:
+ /** Construct a pathEffect whose effect is to apply first the inner pathEffect
+ and the the outer pathEffect (e.g. outer(inner(path)))
+ The reference counts for outer and inner are both incremented in the constructor,
+ and decremented in the destructor.
+ */
+ static sk_sp<SkPathEffect> Make(sk_sp<SkPathEffect> outer, sk_sp<SkPathEffect> inner) {
+ if (!outer) {
+ return inner;
+ }
+ if (!inner) {
+ return outer;
+ }
+ return sk_sp<SkPathEffect>(new SkComposePathEffect(outer, inner));
+ }
+
+#ifdef SK_SUPPORT_LEGACY_PATHEFFECT_PTR
+ static SkPathEffect* Create(SkPathEffect* outer, SkPathEffect* inner) {
+ return Make(sk_ref_sp(outer), sk_ref_sp(inner)).release();
+ }
+#endif
+
+ virtual bool filterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec*, const SkRect*) const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkComposePathEffect)
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ bool exposedInAndroidJavaAPI() const override { return true; }
+#endif
+
+protected:
+ SkComposePathEffect(sk_sp<SkPathEffect> outer, sk_sp<SkPathEffect> inner)
+ : INHERITED(outer, inner) {}
+
+private:
+ // illegal
+ SkComposePathEffect(const SkComposePathEffect&);
+ SkComposePathEffect& operator=(const SkComposePathEffect&);
+
+ typedef SkPairPathEffect INHERITED;
+};
+
+/** \class SkSumPathEffect
+
+ This subclass of SkPathEffect applies two pathEffects, one after the other.
+ Its filterPath() returns true if either of the effects succeeded.
+*/
+class SK_API SkSumPathEffect : public SkPairPathEffect {
+public:
+ /** Construct a pathEffect whose effect is to apply two effects, in sequence.
+ (e.g. first(path) + second(path))
+ The reference counts for first and second are both incremented in the constructor,
+ and decremented in the destructor.
+ */
+ static sk_sp<SkPathEffect> Make(sk_sp<SkPathEffect> first, sk_sp<SkPathEffect> second) {
+ if (!first) {
+ return second;
+ }
+ if (!second) {
+ return first;
+ }
+ return sk_sp<SkPathEffect>(new SkSumPathEffect(first, second));
+ }
+
+#ifdef SK_SUPPORT_LEGACY_PATHEFFECT_PTR
+ static SkPathEffect* Create(SkPathEffect* first, SkPathEffect* second) {
+ return Make(sk_ref_sp(first), sk_ref_sp(second)).release();
+ }
+#endif
+ virtual bool filterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec*, const SkRect*) const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkSumPathEffect)
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ bool exposedInAndroidJavaAPI() const override { return true; }
+#endif
+
+protected:
+ SkSumPathEffect(sk_sp<SkPathEffect> first, sk_sp<SkPathEffect> second)
+ : INHERITED(first, second) {}
+
+private:
+ // illegal
+ SkSumPathEffect(const SkSumPathEffect&);
+ SkSumPathEffect& operator=(const SkSumPathEffect&);
+
+ typedef SkPairPathEffect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPathMeasure.h b/gfx/skia/skia/include/core/SkPathMeasure.h
new file mode 100644
index 000000000..1044f7eeb
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPathMeasure.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathMeasure_DEFINED
+#define SkPathMeasure_DEFINED
+
+#include "../private/SkTDArray.h"
+#include "SkPath.h"
+
+struct SkConic;
+
+class SK_API SkPathMeasure : SkNoncopyable {
+public:
+ SkPathMeasure();
+ /** Initialize the pathmeasure with the specified path. The path must remain valid
+ for the lifetime of the measure object, or until setPath() is called with
+ a different path (or null), since the measure object keeps a pointer to the
+ path object (does not copy its data).
+
+ resScale controls the precision of the measure. values > 1 increase the
+ precision (and possible slow down the computation).
+ */
+ SkPathMeasure(const SkPath& path, bool forceClosed, SkScalar resScale = 1);
+ ~SkPathMeasure();
+
+ /** Reset the pathmeasure with the specified path. The path must remain valid
+ for the lifetime of the measure object, or until setPath() is called with
+ a different path (or null), since the measure object keeps a pointer to the
+ path object (does not copy its data).
+ */
+ void setPath(const SkPath*, bool forceClosed);
+
+ /** Return the total length of the current contour, or 0 if no path
+ is associated (e.g. resetPath(null))
+ */
+ SkScalar getLength();
+
+ /** Pins distance to 0 <= distance <= getLength(), and then computes
+ the corresponding position and tangent.
+ Returns false if there is no path, or a zero-length path was specified, in which case
+ position and tangent are unchanged.
+ */
+ bool SK_WARN_UNUSED_RESULT getPosTan(SkScalar distance, SkPoint* position,
+ SkVector* tangent);
+
+ enum MatrixFlags {
+ kGetPosition_MatrixFlag = 0x01,
+ kGetTangent_MatrixFlag = 0x02,
+ kGetPosAndTan_MatrixFlag = kGetPosition_MatrixFlag | kGetTangent_MatrixFlag
+ };
+
+ /** Pins distance to 0 <= distance <= getLength(), and then computes
+ the corresponding matrix (by calling getPosTan).
+ Returns false if there is no path, or a zero-length path was specified, in which case
+ matrix is unchanged.
+ */
+ bool SK_WARN_UNUSED_RESULT getMatrix(SkScalar distance, SkMatrix* matrix,
+ MatrixFlags flags = kGetPosAndTan_MatrixFlag);
+
+ /** Given a start and stop distance, return in dst the intervening segment(s).
+ If the segment is zero-length, return false, else return true.
+ startD and stopD are pinned to legal values (0..getLength()). If startD > stopD
+ then return false (and leave dst untouched).
+ Begin the segment with a moveTo if startWithMoveTo is true
+ */
+ bool getSegment(SkScalar startD, SkScalar stopD, SkPath* dst, bool startWithMoveTo);
+
+ /** Return true if the current contour is closed()
+ */
+ bool isClosed();
+
+ /** Move to the next contour in the path. Return true if one exists, or false if
+ we're done with the path.
+ */
+ bool nextContour();
+
+#ifdef SK_DEBUG
+ void dump();
+#endif
+
+private:
+ SkPath::Iter fIter;
+ const SkPath* fPath;
+ SkScalar fTolerance;
+ SkScalar fLength; // relative to the current contour
+ int fFirstPtIndex; // relative to the current contour
+ bool fIsClosed; // relative to the current contour
+ bool fForceClosed;
+
+ struct Segment {
+ SkScalar fDistance; // total distance up to this point
+ unsigned fPtIndex; // index into the fPts array
+ unsigned fTValue : 30;
+ unsigned fType : 2; // actually the enum SkSegType
+ // See SkPathMeasurePriv.h
+
+ SkScalar getScalarT() const;
+ };
+ SkTDArray<Segment> fSegments;
+ SkTDArray<SkPoint> fPts; // Points used to define the segments
+
+ static const Segment* NextSegment(const Segment*);
+
+ void buildSegments();
+ SkScalar compute_quad_segs(const SkPoint pts[3], SkScalar distance,
+ int mint, int maxt, int ptIndex);
+ SkScalar compute_conic_segs(const SkConic&, SkScalar distance,
+ int mint, const SkPoint& minPt,
+ int maxt, const SkPoint& maxPt, int ptIndex);
+ SkScalar compute_cubic_segs(const SkPoint pts[3], SkScalar distance,
+ int mint, int maxt, int ptIndex);
+ const Segment* distanceToSegment(SkScalar distance, SkScalar* t);
+ bool quad_too_curvy(const SkPoint pts[3]);
+ bool conic_too_curvy(const SkPoint& firstPt, const SkPoint& midTPt,const SkPoint& lastPt);
+ bool cheap_dist_exceeds_limit(const SkPoint& pt, SkScalar x, SkScalar y);
+ bool cubic_too_curvy(const SkPoint pts[4]);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPathRef.h b/gfx/skia/skia/include/core/SkPathRef.h
new file mode 100644
index 000000000..0c5cc1aed
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPathRef.h
@@ -0,0 +1,552 @@
+
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathRef_DEFINED
+#define SkPathRef_DEFINED
+
+#include "../private/SkAtomics.h"
+#include "../private/SkTDArray.h"
+#include "SkMatrix.h"
+#include "SkPoint.h"
+#include "SkRRect.h"
+#include "SkRect.h"
+#include "SkRefCnt.h"
+#include "../private/SkTemplates.h"
+
+class SkRBuffer;
+class SkWBuffer;
+
+/**
+ * Holds the path verbs and points. It is versioned by a generation ID. None of its public methods
+ * modify the contents. To modify or append to the verbs/points wrap the SkPathRef in an
+ * SkPathRef::Editor object. Installing the editor resets the generation ID. It also performs
+ * copy-on-write if the SkPathRef is shared by multiple SkPaths. The caller passes the Editor's
+ * constructor a SkAutoTUnref, which may be updated to point to a new SkPathRef after the editor's
+ * constructor returns.
+ *
+ * The points and verbs are stored in a single allocation. The points are at the begining of the
+ * allocation while the verbs are stored at end of the allocation, in reverse order. Thus the points
+ * and verbs both grow into the middle of the allocation until the meet. To access verb i in the
+ * verb array use ref.verbs()[~i] (because verbs() returns a pointer just beyond the first
+ * logical verb or the last verb in memory).
+ */
+
+class SK_API SkPathRef final : public SkNVRefCnt<SkPathRef> {
+public:
+ class Editor {
+ public:
+ Editor(SkAutoTUnref<SkPathRef>* pathRef,
+ int incReserveVerbs = 0,
+ int incReservePoints = 0);
+
+ ~Editor() { SkDEBUGCODE(sk_atomic_dec(&fPathRef->fEditorsAttached);) }
+
+ /**
+ * Returns the array of points.
+ */
+ SkPoint* points() { return fPathRef->getPoints(); }
+ const SkPoint* points() const { return fPathRef->points(); }
+
+ /**
+ * Gets the ith point. Shortcut for this->points() + i
+ */
+ SkPoint* atPoint(int i) {
+ SkASSERT((unsigned) i < (unsigned) fPathRef->fPointCnt);
+ return this->points() + i;
+ }
+ const SkPoint* atPoint(int i) const {
+ SkASSERT((unsigned) i < (unsigned) fPathRef->fPointCnt);
+ return this->points() + i;
+ }
+
+ /**
+ * Adds the verb and allocates space for the number of points indicated by the verb. The
+ * return value is a pointer to where the points for the verb should be written.
+ * 'weight' is only used if 'verb' is kConic_Verb
+ */
+ SkPoint* growForVerb(int /*SkPath::Verb*/ verb, SkScalar weight = 0) {
+ SkDEBUGCODE(fPathRef->validate();)
+ return fPathRef->growForVerb(verb, weight);
+ }
+
+ /**
+ * Allocates space for multiple instances of a particular verb and the
+ * requisite points & weights.
+ * The return pointer points at the first new point (indexed normally [<i>]).
+ * If 'verb' is kConic_Verb, 'weights' will return a pointer to the
+ * space for the conic weights (indexed normally).
+ */
+ SkPoint* growForRepeatedVerb(int /*SkPath::Verb*/ verb,
+ int numVbs,
+ SkScalar** weights = NULL) {
+ return fPathRef->growForRepeatedVerb(verb, numVbs, weights);
+ }
+
+ /**
+ * Resets the path ref to a new verb and point count. The new verbs and points are
+ * uninitialized.
+ */
+ void resetToSize(int newVerbCnt, int newPointCnt, int newConicCount) {
+ fPathRef->resetToSize(newVerbCnt, newPointCnt, newConicCount);
+ }
+
+ /**
+ * Gets the path ref that is wrapped in the Editor.
+ */
+ SkPathRef* pathRef() { return fPathRef; }
+
+ void setIsOval(bool isOval, bool isCCW, unsigned start) {
+ fPathRef->setIsOval(isOval, isCCW, start);
+ }
+
+ void setIsRRect(bool isRRect, bool isCCW, unsigned start) {
+ fPathRef->setIsRRect(isRRect, isCCW, start);
+ }
+
+ void setBounds(const SkRect& rect) { fPathRef->setBounds(rect); }
+
+ private:
+ SkPathRef* fPathRef;
+ };
+
+ class SK_API Iter {
+ public:
+ Iter();
+ Iter(const SkPathRef&);
+
+ void setPathRef(const SkPathRef&);
+
+ /** Return the next verb in this iteration of the path. When all
+ segments have been visited, return kDone_Verb.
+
+ @param pts The points representing the current verb and/or segment
+ This must not be NULL.
+ @return The verb for the current segment
+ */
+ uint8_t next(SkPoint pts[4]);
+ uint8_t peek() const;
+
+ SkScalar conicWeight() const { return *fConicWeights; }
+
+ private:
+ const SkPoint* fPts;
+ const uint8_t* fVerbs;
+ const uint8_t* fVerbStop;
+ const SkScalar* fConicWeights;
+ };
+
+public:
+ /**
+ * Gets a path ref with no verbs or points.
+ */
+ static SkPathRef* CreateEmpty();
+
+ /**
+ * Returns true if all of the points in this path are finite, meaning there
+ * are no infinities and no NaNs.
+ */
+ bool isFinite() const {
+ if (fBoundsIsDirty) {
+ this->computeBounds();
+ }
+ return SkToBool(fIsFinite);
+ }
+
+ /**
+ * Returns a mask, where each bit corresponding to a SegmentMask is
+ * set if the path contains 1 or more segments of that type.
+ * Returns 0 for an empty path (no segments).
+ */
+ uint32_t getSegmentMasks() const { return fSegmentMask; }
+
+ /** Returns true if the path is an oval.
+ *
+ * @param rect returns the bounding rect of this oval. It's a circle
+ * if the height and width are the same.
+ * @param isCCW is the oval CCW (or CW if false).
+ * @param start indicates where the contour starts on the oval (see
+ * SkPath::addOval for intepretation of the index).
+ *
+ * @return true if this path is an oval.
+ * Tracking whether a path is an oval is considered an
+ * optimization for performance and so some paths that are in
+ * fact ovals can report false.
+ */
+ bool isOval(SkRect* rect, bool* isCCW, unsigned* start) const {
+ if (fIsOval) {
+ if (rect) {
+ *rect = this->getBounds();
+ }
+ if (isCCW) {
+ *isCCW = SkToBool(fRRectOrOvalIsCCW);
+ }
+ if (start) {
+ *start = fRRectOrOvalStartIdx;
+ }
+ }
+
+ return SkToBool(fIsOval);
+ }
+
+ bool isRRect(SkRRect* rrect, bool* isCCW, unsigned* start) const {
+ if (fIsRRect) {
+ if (rrect) {
+ *rrect = this->getRRect();
+ }
+ if (isCCW) {
+ *isCCW = SkToBool(fRRectOrOvalIsCCW);
+ }
+ if (start) {
+ *start = fRRectOrOvalStartIdx;
+ }
+ }
+ return SkToBool(fIsRRect);
+ }
+
+
+ bool hasComputedBounds() const {
+ return !fBoundsIsDirty;
+ }
+
+ /** Returns the bounds of the path's points. If the path contains 0 or 1
+ points, the bounds is set to (0,0,0,0), and isEmpty() will return true.
+ Note: this bounds may be larger than the actual shape, since curves
+ do not extend as far as their control points.
+ */
+ const SkRect& getBounds() const {
+ if (fBoundsIsDirty) {
+ this->computeBounds();
+ }
+ return fBounds;
+ }
+
+ SkRRect getRRect() const;
+
+ /**
+ * Transforms a path ref by a matrix, allocating a new one only if necessary.
+ */
+ static void CreateTransformedCopy(SkAutoTUnref<SkPathRef>* dst,
+ const SkPathRef& src,
+ const SkMatrix& matrix);
+
+ static SkPathRef* CreateFromBuffer(SkRBuffer* buffer);
+
+ /**
+ * Rollsback a path ref to zero verbs and points with the assumption that the path ref will be
+ * repopulated with approximately the same number of verbs and points. A new path ref is created
+ * only if necessary.
+ */
+ static void Rewind(SkAutoTUnref<SkPathRef>* pathRef);
+
+ ~SkPathRef();
+ int countPoints() const { SkDEBUGCODE(this->validate();) return fPointCnt; }
+ int countVerbs() const { SkDEBUGCODE(this->validate();) return fVerbCnt; }
+ int countWeights() const { SkDEBUGCODE(this->validate();) return fConicWeights.count(); }
+
+ /**
+ * Returns a pointer one beyond the first logical verb (last verb in memory order).
+ */
+ const uint8_t* verbs() const { SkDEBUGCODE(this->validate();) return fVerbs; }
+
+ /**
+ * Returns a const pointer to the first verb in memory (which is the last logical verb).
+ */
+ const uint8_t* verbsMemBegin() const { return this->verbs() - fVerbCnt; }
+
+ /**
+ * Returns a const pointer to the first point.
+ */
+ const SkPoint* points() const { SkDEBUGCODE(this->validate();) return fPoints; }
+
+ /**
+ * Shortcut for this->points() + this->countPoints()
+ */
+ const SkPoint* pointsEnd() const { return this->points() + this->countPoints(); }
+
+ const SkScalar* conicWeights() const { SkDEBUGCODE(this->validate();) return fConicWeights.begin(); }
+ const SkScalar* conicWeightsEnd() const { SkDEBUGCODE(this->validate();) return fConicWeights.end(); }
+
+ /**
+ * Convenience methods for getting to a verb or point by index.
+ */
+ uint8_t atVerb(int index) const {
+ SkASSERT((unsigned) index < (unsigned) fVerbCnt);
+ return this->verbs()[~index];
+ }
+ const SkPoint& atPoint(int index) const {
+ SkASSERT((unsigned) index < (unsigned) fPointCnt);
+ return this->points()[index];
+ }
+
+ bool operator== (const SkPathRef& ref) const;
+
+ /**
+ * Writes the path points and verbs to a buffer.
+ */
+ void writeToBuffer(SkWBuffer* buffer) const;
+
+ /**
+ * Gets the number of bytes that would be written in writeBuffer()
+ */
+ uint32_t writeSize() const;
+
+ void interpolate(const SkPathRef& ending, SkScalar weight, SkPathRef* out) const;
+
+ /**
+ * Gets an ID that uniquely identifies the contents of the path ref. If two path refs have the
+ * same ID then they have the same verbs and points. However, two path refs may have the same
+ * contents but different genIDs.
+ */
+ uint32_t genID() const;
+
+ struct GenIDChangeListener {
+ virtual ~GenIDChangeListener() {}
+ virtual void onChange() = 0;
+ };
+
+ void addGenIDChangeListener(GenIDChangeListener* listener);
+
+ SkDEBUGCODE(void validate() const;)
+
+private:
+ enum SerializationOffsets {
+ kRRectOrOvalStartIdx_SerializationShift = 28, // requires 3 bits
+ kRRectOrOvalIsCCW_SerializationShift = 27, // requires 1 bit
+ kIsRRect_SerializationShift = 26, // requires 1 bit
+ kIsFinite_SerializationShift = 25, // requires 1 bit
+ kIsOval_SerializationShift = 24, // requires 1 bit
+ kSegmentMask_SerializationShift = 0 // requires 4 bits
+ };
+
+ SkPathRef() {
+ fBoundsIsDirty = true; // this also invalidates fIsFinite
+ fPointCnt = 0;
+ fVerbCnt = 0;
+ fVerbs = NULL;
+ fPoints = NULL;
+ fFreeSpace = 0;
+ fGenerationID = kEmptyGenID;
+ fSegmentMask = 0;
+ fIsOval = false;
+ fIsRRect = false;
+ // The next two values don't matter unless fIsOval or fIsRRect are true.
+ fRRectOrOvalIsCCW = false;
+ fRRectOrOvalStartIdx = 0xAC;
+ SkDEBUGCODE(fEditorsAttached = 0;)
+ SkDEBUGCODE(this->validate();)
+ }
+
+ void copy(const SkPathRef& ref, int additionalReserveVerbs, int additionalReservePoints);
+
+ // Return true if the computed bounds are finite.
+ static bool ComputePtBounds(SkRect* bounds, const SkPathRef& ref) {
+ return bounds->setBoundsCheck(ref.points(), ref.countPoints());
+ }
+
+ // called, if dirty, by getBounds()
+ void computeBounds() const {
+ SkDEBUGCODE(this->validate();)
+ // TODO(mtklein): remove fBoundsIsDirty and fIsFinite,
+ // using an inverted rect instead of fBoundsIsDirty and always recalculating fIsFinite.
+ SkASSERT(fBoundsIsDirty);
+
+ fIsFinite = ComputePtBounds(&fBounds, *this);
+ fBoundsIsDirty = false;
+ }
+
+ void setBounds(const SkRect& rect) {
+ SkASSERT(rect.fLeft <= rect.fRight && rect.fTop <= rect.fBottom);
+ fBounds = rect;
+ fBoundsIsDirty = false;
+ fIsFinite = fBounds.isFinite();
+ }
+
+ /** Makes additional room but does not change the counts or change the genID */
+ void incReserve(int additionalVerbs, int additionalPoints) {
+ SkDEBUGCODE(this->validate();)
+ size_t space = additionalVerbs * sizeof(uint8_t) + additionalPoints * sizeof (SkPoint);
+ this->makeSpace(space);
+ SkDEBUGCODE(this->validate();)
+ }
+
+ /** Resets the path ref with verbCount verbs and pointCount points, all uninitialized. Also
+ * allocates space for reserveVerb additional verbs and reservePoints additional points.*/
+ void resetToSize(int verbCount, int pointCount, int conicCount,
+ int reserveVerbs = 0, int reservePoints = 0) {
+ SkDEBUGCODE(this->validate();)
+ fBoundsIsDirty = true; // this also invalidates fIsFinite
+ fGenerationID = 0;
+
+ fSegmentMask = 0;
+ fIsOval = false;
+ fIsRRect = false;
+
+ size_t newSize = sizeof(uint8_t) * verbCount + sizeof(SkPoint) * pointCount;
+ size_t newReserve = sizeof(uint8_t) * reserveVerbs + sizeof(SkPoint) * reservePoints;
+ size_t minSize = newSize + newReserve;
+
+ ptrdiff_t sizeDelta = this->currSize() - minSize;
+
+ if (sizeDelta < 0 || static_cast<size_t>(sizeDelta) >= 3 * minSize) {
+ sk_free(fPoints);
+ fPoints = NULL;
+ fVerbs = NULL;
+ fFreeSpace = 0;
+ fVerbCnt = 0;
+ fPointCnt = 0;
+ this->makeSpace(minSize);
+ fVerbCnt = verbCount;
+ fPointCnt = pointCount;
+ fFreeSpace -= newSize;
+ } else {
+ fPointCnt = pointCount;
+ fVerbCnt = verbCount;
+ fFreeSpace = this->currSize() - minSize;
+ }
+ fConicWeights.setCount(conicCount);
+ SkDEBUGCODE(this->validate();)
+ }
+
+ /**
+ * Increases the verb count by numVbs and point count by the required amount.
+ * The new points are uninitialized. All the new verbs are set to the specified
+ * verb. If 'verb' is kConic_Verb, 'weights' will return a pointer to the
+ * uninitialized conic weights.
+ */
+ SkPoint* growForRepeatedVerb(int /*SkPath::Verb*/ verb, int numVbs, SkScalar** weights);
+
+ /**
+ * Increases the verb count 1, records the new verb, and creates room for the requisite number
+ * of additional points. A pointer to the first point is returned. Any new points are
+ * uninitialized.
+ */
+ SkPoint* growForVerb(int /*SkPath::Verb*/ verb, SkScalar weight);
+
+ /**
+ * Ensures that the free space available in the path ref is >= size. The verb and point counts
+ * are not changed.
+ */
+ void makeSpace(size_t size) {
+ SkDEBUGCODE(this->validate();)
+ if (size <= fFreeSpace) {
+ return;
+ }
+ size_t growSize = size - fFreeSpace;
+ size_t oldSize = this->currSize();
+ // round to next multiple of 8 bytes
+ growSize = (growSize + 7) & ~static_cast<size_t>(7);
+ // we always at least double the allocation
+ if (growSize < oldSize) {
+ growSize = oldSize;
+ }
+ if (growSize < kMinSize) {
+ growSize = kMinSize;
+ }
+ constexpr size_t maxSize = std::numeric_limits<size_t>::max();
+ size_t newSize;
+ if (growSize <= maxSize - oldSize) {
+ newSize = oldSize + growSize;
+ } else {
+ SK_ABORT("Path too big.");
+ }
+ // Note that realloc could memcpy more than we need. It seems to be a win anyway. TODO:
+ // encapsulate this.
+ fPoints = reinterpret_cast<SkPoint*>(sk_realloc_throw(fPoints, newSize));
+ size_t oldVerbSize = fVerbCnt * sizeof(uint8_t);
+ void* newVerbsDst = SkTAddOffset<void>(fPoints, newSize - oldVerbSize);
+ void* oldVerbsSrc = SkTAddOffset<void>(fPoints, oldSize - oldVerbSize);
+ memmove(newVerbsDst, oldVerbsSrc, oldVerbSize);
+ fVerbs = SkTAddOffset<uint8_t>(fPoints, newSize);
+ fFreeSpace += growSize;
+ SkDEBUGCODE(this->validate();)
+ }
+
+ /**
+ * Private, non-const-ptr version of the public function verbsMemBegin().
+ */
+ uint8_t* verbsMemWritable() {
+ SkDEBUGCODE(this->validate();)
+ return fVerbs - fVerbCnt;
+ }
+
+ /**
+ * Gets the total amount of space allocated for verbs, points, and reserve.
+ */
+ size_t currSize() const {
+ return reinterpret_cast<intptr_t>(fVerbs) - reinterpret_cast<intptr_t>(fPoints);
+ }
+
+ /**
+ * Called the first time someone calls CreateEmpty to actually create the singleton.
+ */
+ friend SkPathRef* sk_create_empty_pathref();
+
+ void setIsOval(bool isOval, bool isCCW, unsigned start) {
+ fIsOval = isOval;
+ fRRectOrOvalIsCCW = isCCW;
+ fRRectOrOvalStartIdx = start;
+ }
+
+ void setIsRRect(bool isRRect, bool isCCW, unsigned start) {
+ fIsRRect = isRRect;
+ fRRectOrOvalIsCCW = isCCW;
+ fRRectOrOvalStartIdx = start;
+ }
+
+ // called only by the editor. Note that this is not a const function.
+ SkPoint* getPoints() {
+ SkDEBUGCODE(this->validate();)
+ fIsOval = false;
+ fIsRRect = false;
+ return fPoints;
+ }
+
+ const SkPoint* getPoints() const {
+ SkDEBUGCODE(this->validate();)
+ return fPoints;
+ }
+
+ void callGenIDChangeListeners();
+
+ enum {
+ kMinSize = 256,
+ };
+
+ mutable SkRect fBounds;
+
+ SkPoint* fPoints; // points to begining of the allocation
+ uint8_t* fVerbs; // points just past the end of the allocation (verbs grow backwards)
+ int fVerbCnt;
+ int fPointCnt;
+ size_t fFreeSpace; // redundant but saves computation
+ SkTDArray<SkScalar> fConicWeights;
+
+ enum {
+ kEmptyGenID = 1, // GenID reserved for path ref with zero points and zero verbs.
+ };
+ mutable uint32_t fGenerationID;
+ SkDEBUGCODE(int32_t fEditorsAttached;) // assert that only one editor in use at any time.
+
+ SkTDArray<GenIDChangeListener*> fGenIDChangeListeners; // pointers are owned
+
+ mutable uint8_t fBoundsIsDirty;
+ mutable SkBool8 fIsFinite; // only meaningful if bounds are valid
+
+ SkBool8 fIsOval;
+ SkBool8 fIsRRect;
+ // Both the circle and rrect special cases have a notion of direction and starting point
+ // The next two variables store that information for either.
+ SkBool8 fRRectOrOvalIsCCW;
+ uint8_t fRRectOrOvalStartIdx;
+ uint8_t fSegmentMask;
+
+ friend class PathRefTest_Private;
+ friend class ForceIsRRect_Private; // unit test isRRect
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPicture.h b/gfx/skia/skia/include/core/SkPicture.h
new file mode 100644
index 000000000..c2d05f9c4
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPicture.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPicture_DEFINED
+#define SkPicture_DEFINED
+
+#include "SkRefCnt.h"
+#include "SkRect.h"
+#include "SkTypes.h"
+
+class GrContext;
+class SkBigPicture;
+class SkBitmap;
+class SkCanvas;
+class SkData;
+class SkImage;
+class SkImageDeserializer;
+class SkPath;
+class SkPictureData;
+class SkPixelSerializer;
+class SkReadBuffer;
+class SkRefCntSet;
+class SkStream;
+class SkTypefacePlayback;
+class SkWStream;
+class SkWriteBuffer;
+struct SkPictInfo;
+
+/** \class SkPicture
+
+ An SkPicture records drawing commands made to a canvas to be played back at a later time.
+ This base class handles serialization and a few other miscellany.
+*/
+class SK_API SkPicture : public SkRefCnt {
+public:
+ virtual ~SkPicture();
+
+ /**
+ * Function signature defining a function that sets up an SkBitmap from encoded data. On
+ * success, the SkBitmap should have its Config, width, height, rowBytes and pixelref set.
+ * If the installed pixelref has decoded the data into pixels, then the src buffer need not be
+ * copied. If the pixelref defers the actual decode until its lockPixels() is called, then it
+ * must make a copy of the src buffer.
+ * @param src Encoded data.
+ * @param length Size of the encoded data, in bytes.
+ * @param dst SkBitmap to install the pixel ref on.
+ * @param bool Whether or not a pixel ref was successfully installed.
+ */
+ typedef bool (*InstallPixelRefProc)(const void* src, size_t length, SkBitmap* dst);
+
+#ifdef SK_SUPPORT_LEGACY_PICTUREINSTALLPIXELREF
+ /**
+ * Recreate a picture that was serialized into a stream.
+ * @param SkStream Serialized picture data. Ownership is unchanged by this call.
+ * @param proc Function pointer for installing pixelrefs on SkBitmaps representing the
+ * encoded bitmap data from the stream.
+ * @return A new SkPicture representing the serialized data, or NULL if the stream is
+ * invalid.
+ */
+ static sk_sp<SkPicture> MakeFromStream(SkStream*, InstallPixelRefProc proc);
+ static sk_sp<SkPicture> MakeFromStream(SkStream* stream, std::nullptr_t) {
+ return MakeFromStream(stream);
+ }
+#endif
+
+ /**
+ * Recreate a picture that was serialized into a stream.
+ *
+ * Any serialized images in the stream will be passed the image-deserializer, or if that is
+ * null, to the default deserializer that will call SkImage::MakeFromEncoded().
+ */
+ static sk_sp<SkPicture> MakeFromStream(SkStream*, SkImageDeserializer*);
+ static sk_sp<SkPicture> MakeFromStream(SkStream*);
+ static sk_sp<SkPicture> MakeFromData(const void* data, size_t size,
+ SkImageDeserializer* = nullptr);
+ static sk_sp<SkPicture> MakeFromData(const SkData* data, SkImageDeserializer* = nullptr);
+
+ /**
+ * Recreate a picture that was serialized into a buffer. If the creation requires bitmap
+ * decoding, the decoder must be set on the SkReadBuffer parameter by calling
+ * SkReadBuffer::setBitmapDecoder() before calling SkPicture::CreateFromBuffer().
+ * @param SkReadBuffer Serialized picture data.
+ * @return A new SkPicture representing the serialized data, or NULL if the buffer is
+ * invalid.
+ */
+ static sk_sp<SkPicture> MakeFromBuffer(SkReadBuffer&);
+
+ /**
+ * Subclasses of this can be passed to playback(). During the playback
+ * of the picture, this callback will periodically be invoked. If its
+ * abort() returns true, then picture playback will be interrupted.
+ *
+ * The resulting drawing is undefined, as there is no guarantee how often the
+ * callback will be invoked. If the abort happens inside some level of nested
+ * calls to save(), restore will automatically be called to return the state
+ * to the same level it was before the playback call was made.
+ */
+ class SK_API AbortCallback {
+ public:
+ AbortCallback() {}
+ virtual ~AbortCallback() {}
+ virtual bool abort() = 0;
+ };
+
+ /** Replays the drawing commands on the specified canvas. Note that
+ this has the effect of unfurling this picture into the destination
+ canvas. Using the SkCanvas::drawPicture entry point gives the destination
+ canvas the option of just taking a ref.
+ @param canvas the canvas receiving the drawing commands.
+ @param callback a callback that allows interruption of playback
+ */
+ virtual void playback(SkCanvas*, AbortCallback* = NULL) const = 0;
+
+ /** Return a cull rect for this picture.
+ Ops recorded into this picture that attempt to draw outside the cull might not be drawn.
+ */
+ virtual SkRect cullRect() const = 0;
+
+ /** Returns a non-zero value unique among all pictures. */
+ uint32_t uniqueID() const;
+
+ /**
+ * Serialize the picture to SkData. If non nullptr, pixel-serializer will be used to
+ * customize how images reference by the picture are serialized/compressed.
+ */
+ sk_sp<SkData> serialize(SkPixelSerializer* = nullptr) const;
+
+ /**
+ * Serialize to a stream. If non nullptr, pixel-serializer will be used to
+ * customize how images reference by the picture are serialized/compressed.
+ */
+ void serialize(SkWStream*, SkPixelSerializer* = nullptr) const;
+
+ /**
+ * Serialize to a buffer.
+ */
+ void flatten(SkWriteBuffer&) const;
+
+ /**
+ * Returns true if any bitmaps may be produced when this SkPicture
+ * is replayed.
+ */
+ virtual bool willPlayBackBitmaps() const = 0;
+
+ /** Return the approximate number of operations in this picture. This
+ * number may be greater or less than the number of SkCanvas calls
+ * recorded: some calls may be recorded as more than one operation, or some
+ * calls may be optimized away.
+ */
+ virtual int approximateOpCount() const = 0;
+
+ /** Returns the approximate byte size of this picture, not including large ref'd objects. */
+ virtual size_t approximateBytesUsed() const = 0;
+
+ /** Return true if the SkStream/Buffer represents a serialized picture, and
+ fills out SkPictInfo. After this function returns, the data source is not
+ rewound so it will have to be manually reset before passing to
+ CreateFromStream or CreateFromBuffer. Note, CreateFromStream and
+ CreateFromBuffer perform this check internally so these entry points are
+ intended for stand alone tools.
+ If false is returned, SkPictInfo is unmodified.
+ */
+ static bool InternalOnly_StreamIsSKP(SkStream*, SkPictInfo*);
+ static bool InternalOnly_BufferIsSKP(SkReadBuffer*, SkPictInfo*);
+
+#ifdef SK_SUPPORT_LEGACY_PICTURE_GPUVETO
+ /** Return true if the picture is suitable for rendering on the GPU. */
+ bool suitableForGpuRasterization(GrContext*, const char** whyNot = NULL) const;
+#endif
+
+ // Sent via SkMessageBus from destructor.
+ struct DeletionMessage { int32_t fUniqueID; }; // TODO: -> uint32_t?
+
+ // Returns NULL if this is not an SkBigPicture.
+ virtual const SkBigPicture* asSkBigPicture() const { return NULL; }
+
+ // Global setting to enable or disable security precautions for serialization.
+ static void SetPictureIOSecurityPrecautionsEnabled_Dangerous(bool set);
+ static bool PictureIOSecurityPrecautionsEnabled();
+
+#ifdef SK_SUPPORT_LEGACY_PICTURE_PTR
+ static SkPicture* CreateFromStream(SkStream* stream, InstallPixelRefProc proc) {
+ return MakeFromStream(stream, proc).release();
+ }
+ static SkPicture* CreateFromStream(SkStream* stream) {
+ return MakeFromStream(stream).release();
+ }
+ static SkPicture* CreateFromBuffer(SkReadBuffer& rbuf) {
+ return MakeFromBuffer(rbuf).release();
+ }
+#endif
+
+private:
+ // Subclass whitelist.
+ SkPicture();
+ friend class SkBigPicture;
+ friend class SkEmptyPicture;
+ template <typename> friend class SkMiniPicture;
+
+ void serialize(SkWStream*, SkPixelSerializer*, SkRefCntSet* typefaces) const;
+ static sk_sp<SkPicture> MakeFromStream(SkStream*, SkImageDeserializer*, SkTypefacePlayback*);
+ friend class SkPictureData;
+
+ virtual int numSlowPaths() const = 0;
+ friend class SkPictureGpuAnalyzer;
+ friend struct SkPathCounter;
+
+ // V35: Store SkRect (rather then width & height) in header
+ // V36: Remove (obsolete) alphatype from SkColorTable
+ // V37: Added shadow only option to SkDropShadowImageFilter (last version to record CLEAR)
+ // V38: Added PictureResolution option to SkPictureImageFilter
+ // V39: Added FilterLevel option to SkPictureImageFilter
+ // V40: Remove UniqueID serialization from SkImageFilter.
+ // V41: Added serialization of SkBitmapSource's filterQuality parameter
+ // V42: Added a bool to SkPictureShader serialization to indicate did-we-serialize-a-picture?
+ // V43: Added DRAW_IMAGE and DRAW_IMAGE_RECT opt codes to serialized data
+ // V44: Move annotations from paint to drawAnnotation
+ // V45: Add invNormRotation to SkLightingShader.
+ // V46: Add drawTextRSXform
+ // V47: Add occluder rect to SkBlurMaskFilter
+ // V48: Read and write extended SkTextBlobs.
+ // V49: Gradients serialized as SkColor4f + SkColorSpace
+ // V50: SkXfermode -> SkBlendMode
+
+ // Only SKPs within the min/current picture version range (inclusive) can be read.
+ static const uint32_t MIN_PICTURE_VERSION = 35; // Produced by Chrome M39.
+ static const uint32_t CURRENT_PICTURE_VERSION = 50;
+
+ static_assert(MIN_PICTURE_VERSION <= 41,
+ "Remove kFontFileName and related code from SkFontDescriptor.cpp.");
+
+ static_assert(MIN_PICTURE_VERSION <= 42,
+ "Remove COMMENT API handlers from SkPicturePlayback.cpp");
+
+ static_assert(MIN_PICTURE_VERSION <= 43,
+ "Remove SkBitmapSourceDeserializer.");
+
+ static_assert(MIN_PICTURE_VERSION <= 45,
+ "Remove decoding of old SkTypeface::Style from SkFontDescriptor.cpp.");
+
+ static_assert(MIN_PICTURE_VERSION <= 48,
+ "Remove legacy gradient deserialization code from SkGradientShader.cpp.");
+
+ static bool IsValidPictInfo(const SkPictInfo& info);
+ static sk_sp<SkPicture> Forwardport(const SkPictInfo&,
+ const SkPictureData*,
+ SkReadBuffer* buffer);
+
+ SkPictInfo createHeader() const;
+ SkPictureData* backport() const;
+
+ mutable uint32_t fUniqueID;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPictureAnalyzer.h b/gfx/skia/skia/include/core/SkPictureAnalyzer.h
new file mode 100644
index 000000000..62dac30f0
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPictureAnalyzer.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPictureAnalyzer_DEFINED
+#define SkPictureAnalyzer_DEFINED
+
+#include "SkCanvas.h"
+#include "SkRefCnt.h"
+#include "SkRegion.h"
+#include "SkTypes.h"
+
+#if SK_SUPPORT_GPU
+#include "GrContext.h"
+
+class SkPath;
+class SkPicture;
+
+/** \class SkPictureGpuAnalyzer
+
+ Gathers GPU-related statistics for one or more SkPictures.
+*/
+class SK_API SkPictureGpuAnalyzer final : public SkNoncopyable {
+public:
+ explicit SkPictureGpuAnalyzer(sk_sp<GrContextThreadSafeProxy> = nullptr);
+ explicit SkPictureGpuAnalyzer(const sk_sp<SkPicture>& picture,
+ sk_sp<GrContextThreadSafeProxy> = nullptr);
+
+ /**
+ * Process the given picture and accumulate its stats.
+ */
+ void analyzePicture(const SkPicture*);
+
+ /**
+ * Process an explicit clipPath op.
+ */
+ void analyzeClipPath(const SkPath&, SkCanvas::ClipOp, bool doAntiAlias);
+
+ /**
+ * Reset all accumulated stats.
+ */
+ void reset();
+
+ /**
+ * Returns true if the analyzed pictures are suitable for rendering on the GPU.
+ */
+ bool suitableForGpuRasterization(const char** whyNot = nullptr) const;
+
+ /**
+ * Returns the number of commands which are slow to draw on the GPU, capped at the predicate
+ * max.
+ */
+ uint32_t numSlowGpuCommands() { return fNumSlowPaths; }
+
+private:
+ uint32_t fNumSlowPaths;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+#endif // SK_SUPPORT_GPU
+
+#endif // SkPictureAnalyzer_DEFINED
diff --git a/gfx/skia/skia/include/core/SkPictureRecorder.h b/gfx/skia/skia/include/core/SkPictureRecorder.h
new file mode 100644
index 000000000..c82418930
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPictureRecorder.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPictureRecorder_DEFINED
+#define SkPictureRecorder_DEFINED
+
+#include "../private/SkMiniRecorder.h"
+#include "SkBBHFactory.h"
+#include "SkPicture.h"
+#include "SkRefCnt.h"
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+namespace android {
+ class Picture;
+};
+#endif
+
+class GrContext;
+class SkCanvas;
+class SkDrawable;
+class SkPictureRecord;
+class SkRecord;
+class SkRecorder;
+
+class SK_API SkPictureRecorder : SkNoncopyable {
+public:
+ SkPictureRecorder();
+ ~SkPictureRecorder();
+
+ enum RecordFlags {
+ // If you call drawPicture() or drawDrawable() on the recording canvas, this flag forces
+ // that object to playback its contents immediately rather than reffing the object.
+ kPlaybackDrawPicture_RecordFlag = 1 << 0,
+ };
+
+ enum FinishFlags {
+ kReturnNullForEmpty_FinishFlag = 1 << 0, // no draw-ops will return nullptr
+ };
+
+ /** Returns the canvas that records the drawing commands.
+ @param bounds the cull rect used when recording this picture. Any drawing the falls outside
+ of this rect is undefined, and may be drawn or it may not.
+ @param bbhFactory factory to create desired acceleration structure
+ @param recordFlags optional flags that control recording.
+ @return the canvas.
+ */
+ SkCanvas* beginRecording(const SkRect& bounds,
+ SkBBHFactory* bbhFactory = NULL,
+ uint32_t recordFlags = 0);
+
+ SkCanvas* beginRecording(SkScalar width, SkScalar height,
+ SkBBHFactory* bbhFactory = NULL,
+ uint32_t recordFlags = 0) {
+ return this->beginRecording(SkRect::MakeWH(width, height), bbhFactory, recordFlags);
+ }
+
+ /** Returns the recording canvas if one is active, or NULL if recording is
+ not active. This does not alter the refcnt on the canvas (if present).
+ */
+ SkCanvas* getRecordingCanvas();
+
+ /**
+ * Signal that the caller is done recording. This invalidates the canvas returned by
+ * beginRecording/getRecordingCanvas. Ownership of the object is passed to the caller, who
+ * must call unref() when they are done using it.
+ *
+ * The returned picture is immutable. If during recording drawables were added to the canvas,
+ * these will have been "drawn" into a recording canvas, so that this resulting picture will
+ * reflect their current state, but will not contain a live reference to the drawables
+ * themselves.
+ */
+ sk_sp<SkPicture> finishRecordingAsPicture(uint32_t endFlags = 0);
+
+ /**
+ * Signal that the caller is done recording, and update the cull rect to use for bounding
+ * box hierarchy (BBH) generation. The behavior is the same as calling
+ * endRecordingAsPicture(), except that this method updates the cull rect initially passed
+ * into beginRecording.
+ * @param cullRect the new culling rectangle to use as the overall bound for BBH generation
+ * and subsequent culling operations.
+ * @return the picture containing the recorded content.
+ */
+ sk_sp<SkPicture> finishRecordingAsPictureWithCull(const SkRect& cullRect,
+ uint32_t endFlags = 0);
+
+ /**
+ * Signal that the caller is done recording. This invalidates the canvas returned by
+ * beginRecording/getRecordingCanvas. Ownership of the object is passed to the caller, who
+ * must call unref() when they are done using it.
+ *
+ * Unlike endRecordingAsPicture(), which returns an immutable picture, the returned drawable
+ * may contain live references to other drawables (if they were added to the recording canvas)
+ * and therefore this drawable will reflect the current state of those nested drawables anytime
+ * it is drawn or a new picture is snapped from it (by calling drawable->newPictureSnapshot()).
+ */
+ sk_sp<SkDrawable> finishRecordingAsDrawable(uint32_t endFlags = 0);
+
+#ifdef SK_SUPPORT_LEGACY_PICTURE_PTR
+ SkPicture* SK_WARN_UNUSED_RESULT endRecordingAsPicture() {
+ return this->finishRecordingAsPicture().release();
+ }
+ SkPicture* SK_WARN_UNUSED_RESULT endRecordingAsPicture(const SkRect& cullRect) {
+ return this->finishRecordingAsPictureWithCull(cullRect).release();
+ }
+ SkDrawable* SK_WARN_UNUSED_RESULT endRecordingAsDrawable() {
+ return this->finishRecordingAsDrawable().release();
+ }
+ SkPicture* SK_WARN_UNUSED_RESULT endRecording() { return this->endRecordingAsPicture(); }
+#endif
+
+private:
+ void reset();
+
+ /** Replay the current (partially recorded) operation stream into
+ canvas. This call doesn't close the current recording.
+ */
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ friend class android::Picture;
+#endif
+ friend class SkPictureRecorderReplayTester; // for unit testing
+ void partialReplay(SkCanvas* canvas) const;
+
+ bool fActivelyRecording;
+ uint32_t fFlags;
+ SkRect fCullRect;
+ SkAutoTUnref<SkBBoxHierarchy> fBBH;
+ SkAutoTUnref<SkRecorder> fRecorder;
+ SkAutoTUnref<SkRecord> fRecord;
+ SkMiniRecorder fMiniRecorder;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPixelRef.h b/gfx/skia/skia/include/core/SkPixelRef.h
new file mode 100644
index 000000000..2677e5f22
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPixelRef.h
@@ -0,0 +1,405 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPixelRef_DEFINED
+#define SkPixelRef_DEFINED
+
+#include "../private/SkAtomics.h"
+#include "../private/SkMutex.h"
+#include "../private/SkTDArray.h"
+#include "SkBitmap.h"
+#include "SkFilterQuality.h"
+#include "SkImageInfo.h"
+#include "SkPixmap.h"
+#include "SkRefCnt.h"
+#include "SkSize.h"
+#include "SkString.h"
+#include "SkYUVSizeInfo.h"
+
+class SkColorTable;
+class SkData;
+struct SkIRect;
+
+class GrTexture;
+class SkDiscardableMemory;
+
+/** \class SkPixelRef
+
+ This class is the smart container for pixel memory, and is used with
+ SkBitmap. A pixelref is installed into a bitmap, and then the bitmap can
+ access the actual pixel memory by calling lockPixels/unlockPixels.
+
+ This class can be shared/accessed between multiple threads.
+*/
+class SK_API SkPixelRef : public SkRefCnt {
+public:
+ explicit SkPixelRef(const SkImageInfo&);
+ virtual ~SkPixelRef();
+
+ const SkImageInfo& info() const {
+ return fInfo;
+ }
+
+ /** Return the pixel memory returned from lockPixels, or null if the
+ lockCount is 0.
+ */
+ void* pixels() const { return fRec.fPixels; }
+
+ /** Return the current colorTable (if any) if pixels are locked, or null.
+ */
+ SkColorTable* colorTable() const { return fRec.fColorTable; }
+
+ size_t rowBytes() const { return fRec.fRowBytes; }
+
+ /**
+ * To access the actual pixels of a pixelref, it must be "locked".
+ * Calling lockPixels returns a LockRec struct (on success).
+ */
+ struct LockRec {
+ LockRec() : fPixels(NULL), fColorTable(NULL) {}
+
+ void* fPixels;
+ SkColorTable* fColorTable;
+ size_t fRowBytes;
+
+ void zero() { sk_bzero(this, sizeof(*this)); }
+
+ bool isZero() const {
+ return NULL == fPixels && NULL == fColorTable && 0 == fRowBytes;
+ }
+ };
+
+ SkDEBUGCODE(bool isLocked() const { return fLockCount > 0; })
+ SkDEBUGCODE(int getLockCount() const { return fLockCount; })
+
+ /**
+ * Call to access the pixel memory. Return true on success. Balance this
+ * with a call to unlockPixels().
+ */
+ bool lockPixels();
+
+ /**
+ * Call to access the pixel memory. On success, return true and fill out
+ * the specified rec. On failure, return false and ignore the rec parameter.
+ * Balance this with a call to unlockPixels().
+ */
+ bool lockPixels(LockRec* rec);
+
+ /** Call to balanace a previous call to lockPixels(). Returns the pixels
+ (or null) after the unlock. NOTE: lock calls can be nested, but the
+ matching number of unlock calls must be made in order to free the
+ memory (if the subclass implements caching/deferred-decoding.)
+ */
+ void unlockPixels();
+
+ /**
+ * Some bitmaps can return a copy of their pixels for lockPixels(), but
+ * that copy, if modified, will not be pushed back. These bitmaps should
+ * not be used as targets for a raster device/canvas (since all pixels
+ * modifications will be lost when unlockPixels() is called.)
+ */
+ bool lockPixelsAreWritable() const;
+
+ /** Returns a non-zero, unique value corresponding to the pixels in this
+ pixelref. Each time the pixels are changed (and notifyPixelsChanged is
+ called), a different generation ID will be returned.
+ */
+ uint32_t getGenerationID() const;
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ /** Returns a non-zero, unique value corresponding to this SkPixelRef.
+ Unlike the generation ID, this ID remains the same even when the pixels
+ are changed. IDs are not reused (until uint32_t wraps), so it is safe
+ to consider this ID unique even after this SkPixelRef is deleted.
+
+ Can be used as a key which uniquely identifies this SkPixelRef
+ regardless of changes to its pixels or deletion of this object.
+ */
+ uint32_t getStableID() const { return fStableID; }
+#endif
+
+ /**
+ * Call this if you have changed the contents of the pixels. This will in-
+ * turn cause a different generation ID value to be returned from
+ * getGenerationID().
+ */
+ void notifyPixelsChanged();
+
+ /**
+ * Change the info's AlphaType. Note that this does not automatically
+ * invalidate the generation ID. If the pixel values themselves have
+ * changed, then you must explicitly call notifyPixelsChanged() as well.
+ */
+ void changeAlphaType(SkAlphaType at);
+
+ /** Returns true if this pixelref is marked as immutable, meaning that the
+ contents of its pixels will not change for the lifetime of the pixelref.
+ */
+ bool isImmutable() const { return fMutability != kMutable; }
+
+ /** Marks this pixelref is immutable, meaning that the contents of its
+ pixels will not change for the lifetime of the pixelref. This state can
+ be set on a pixelref, but it cannot be cleared once it is set.
+ */
+ void setImmutable();
+
+ /** Return the optional URI string associated with this pixelref. May be
+ null.
+ */
+ const char* getURI() const { return fURI.size() ? fURI.c_str() : NULL; }
+
+ /** Copy a URI string to this pixelref, or clear the URI if the uri is null
+ */
+ void setURI(const char uri[]) {
+ fURI.set(uri);
+ }
+
+ /** Copy a URI string to this pixelref
+ */
+ void setURI(const char uri[], size_t len) {
+ fURI.set(uri, len);
+ }
+
+ /** Assign a URI string to this pixelref.
+ */
+ void setURI(const SkString& uri) { fURI = uri; }
+
+ /**
+ * If the pixelRef has an encoded (i.e. compressed) representation,
+ * return a ref to its data. If the pixelRef
+ * is uncompressed or otherwise does not have this form, return NULL.
+ *
+ * If non-null is returned, the caller is responsible for calling unref()
+ * on the data when it is finished.
+ */
+ SkData* refEncodedData() {
+ return this->onRefEncodedData();
+ }
+
+ struct LockRequest {
+ SkISize fSize;
+ SkFilterQuality fQuality;
+ };
+
+ struct LockResult {
+ LockResult() : fPixels(NULL), fCTable(NULL) {}
+
+ void (*fUnlockProc)(void* ctx);
+ void* fUnlockContext;
+
+ const void* fPixels;
+ SkColorTable* fCTable; // should be NULL unless colortype is kIndex8
+ size_t fRowBytes;
+ SkISize fSize;
+
+ void unlock() {
+ if (fUnlockProc) {
+ fUnlockProc(fUnlockContext);
+ fUnlockProc = NULL; // can't unlock twice!
+ }
+ }
+ };
+
+ bool requestLock(const LockRequest&, LockResult*);
+
+ /**
+ * If this can efficiently return YUV data, this should return true.
+ * Otherwise this returns false and does not modify any of the parameters.
+ *
+ * @param sizeInfo Output parameter indicating the sizes and required
+ * allocation widths of the Y, U, and V planes.
+ * @param colorSpace Output parameter.
+ */
+ bool queryYUV8(SkYUVSizeInfo* sizeInfo, SkYUVColorSpace* colorSpace) const {
+ return this->onQueryYUV8(sizeInfo, colorSpace);
+ }
+
+ /**
+ * Returns true on success and false on failure.
+ * Copies YUV data into the provided YUV planes.
+ *
+ * @param sizeInfo Needs to exactly match the values returned by the
+ * query, except the WidthBytes may be larger than the
+ * recommendation (but not smaller).
+ * @param planes Memory for each of the Y, U, and V planes.
+ */
+ bool getYUV8Planes(const SkYUVSizeInfo& sizeInfo, void* planes[3]) {
+ return this->onGetYUV8Planes(sizeInfo, planes);
+ }
+
+ /** Populates dst with the pixels of this pixelRef, converting them to colorType. */
+ bool readPixels(SkBitmap* dst, SkColorType colorType, const SkIRect* subset = NULL);
+
+ // Register a listener that may be called the next time our generation ID changes.
+ //
+ // We'll only call the listener if we're confident that we are the only SkPixelRef with this
+ // generation ID. If our generation ID changes and we decide not to call the listener, we'll
+ // never call it: you must add a new listener for each generation ID change. We also won't call
+ // the listener when we're certain no one knows what our generation ID is.
+ //
+ // This can be used to invalidate caches keyed by SkPixelRef generation ID.
+ struct GenIDChangeListener {
+ virtual ~GenIDChangeListener() {}
+ virtual void onChange() = 0;
+ };
+
+ // Takes ownership of listener.
+ void addGenIDChangeListener(GenIDChangeListener* listener);
+
+ // Call when this pixelref is part of the key to a resourcecache entry. This allows the cache
+ // to know automatically those entries can be purged when this pixelref is changed or deleted.
+ void notifyAddedToCache() {
+ fAddedToCache.store(true);
+ }
+
+ virtual SkDiscardableMemory* diagnostic_only_getDiscardable() const { return NULL; }
+
+ /**
+ * Returns true if the pixels are generated on-the-fly (when required).
+ */
+ bool isLazyGenerated() const { return this->onIsLazyGenerated(); }
+
+protected:
+ /**
+ * On success, returns true and fills out the LockRec for the pixels. On
+ * failure returns false and ignores the LockRec parameter.
+ *
+ * The caller will have already acquired a mutex for thread safety, so this
+ * method need not do that.
+ */
+ virtual bool onNewLockPixels(LockRec*) = 0;
+
+ /**
+ * Balancing the previous successful call to onNewLockPixels. The locked
+ * pixel address will no longer be referenced, so the subclass is free to
+ * move or discard that memory.
+ *
+ * The caller will have already acquired a mutex for thread safety, so this
+ * method need not do that.
+ */
+ virtual void onUnlockPixels() = 0;
+
+ /** Default impl returns true */
+ virtual bool onLockPixelsAreWritable() const;
+
+ /**
+ * For pixelrefs that don't have access to their raw pixels, they may be
+ * able to make a copy of them (e.g. if the pixels are on the GPU).
+ *
+ * The base class implementation returns false;
+ */
+ virtual bool onReadPixels(SkBitmap* dst, SkColorType colorType, const SkIRect* subsetOrNull);
+
+ // default impl returns NULL.
+ virtual SkData* onRefEncodedData();
+
+ // default impl does nothing.
+ virtual void onNotifyPixelsChanged();
+
+ virtual bool onQueryYUV8(SkYUVSizeInfo*, SkYUVColorSpace*) const {
+ return false;
+ }
+ virtual bool onGetYUV8Planes(const SkYUVSizeInfo&, void*[3] /*planes*/) {
+ return false;
+ }
+
+ /**
+ * Returns the size (in bytes) of the internally allocated memory.
+ * This should be implemented in all serializable SkPixelRef derived classes.
+ * SkBitmap::fPixelRefOffset + SkBitmap::getSafeSize() should never overflow this value,
+ * otherwise the rendering code may attempt to read memory out of bounds.
+ *
+ * @return default impl returns 0.
+ */
+ virtual size_t getAllocatedSizeInBytes() const;
+
+ virtual bool onRequestLock(const LockRequest&, LockResult*);
+
+ virtual bool onIsLazyGenerated() const { return false; }
+
+ /** Return the mutex associated with this pixelref. This value is assigned
+ in the constructor, and cannot change during the lifetime of the object.
+ */
+ SkBaseMutex* mutex() const { return &fMutex; }
+
+ // only call from constructor. Flags this to always be locked, removing
+ // the need to grab the mutex and call onLockPixels/onUnlockPixels.
+ // Performance tweak to avoid those calls (esp. in multi-thread use case).
+ void setPreLocked(void*, size_t rowBytes, SkColorTable*);
+
+private:
+ mutable SkMutex fMutex;
+
+ // mostly const. fInfo.fAlpahType can be changed at runtime.
+ const SkImageInfo fInfo;
+
+ // LockRec is only valid if we're in a locked state (isLocked())
+ LockRec fRec;
+ int fLockCount;
+
+ bool lockPixelsInsideMutex();
+
+ // Bottom bit indicates the Gen ID is unique.
+ bool genIDIsUnique() const { return SkToBool(fTaggedGenID.load() & 1); }
+ mutable SkAtomic<uint32_t> fTaggedGenID;
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ const uint32_t fStableID;
+#endif
+
+ SkTDArray<GenIDChangeListener*> fGenIDChangeListeners; // pointers are owned
+
+ SkString fURI;
+
+ // Set true by caches when they cache content that's derived from the current pixels.
+ SkAtomic<bool> fAddedToCache;
+
+ enum {
+ kMutable, // PixelRefs begin mutable.
+ kTemporarilyImmutable, // Considered immutable, but can revert to mutable.
+ kImmutable, // Once set to this state, it never leaves.
+ } fMutability : 8; // easily fits inside a byte
+
+ // only ever set in constructor, const after that
+ bool fPreLocked;
+
+ void needsNewGenID();
+ void callGenIDChangeListeners();
+
+ void setTemporarilyImmutable();
+ void restoreMutability();
+ friend class SkSurface_Raster; // For the two methods above.
+
+ bool isPreLocked() const { return fPreLocked; }
+ friend class SkImage_Raster;
+ friend class SkSpecialImage_Raster;
+
+ // When copying a bitmap to another with the same shape and config, we can safely
+ // clone the pixelref generation ID too, which makes them equivalent under caching.
+ friend class SkBitmap; // only for cloneGenID
+ void cloneGenID(const SkPixelRef&);
+
+ void setImmutableWithID(uint32_t genID);
+ friend class SkImage_Gpu;
+ friend class SkImageCacherator;
+ friend class SkSpecialImage_Gpu;
+
+ typedef SkRefCnt INHERITED;
+};
+
+class SkPixelRefFactory : public SkRefCnt {
+public:
+ /**
+ * Allocate a new pixelref matching the specified ImageInfo, allocating
+ * the memory for the pixels. If the ImageInfo requires a ColorTable,
+ * the pixelref will ref() the colortable.
+ * On failure return NULL.
+ */
+ virtual SkPixelRef* create(const SkImageInfo&, size_t rowBytes, SkColorTable*) = 0;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPixelSerializer.h b/gfx/skia/skia/include/core/SkPixelSerializer.h
new file mode 100644
index 000000000..b168f79dd
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPixelSerializer.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPixelSerializer_DEFINED
+#define SkPixelSerializer_DEFINED
+
+#include "SkRefCnt.h"
+#include "SkPixmap.h"
+
+class SkData;
+
+/**
+ * Interface for serializing pixels, e.g. SkBitmaps in an SkPicture.
+ */
+class SkPixelSerializer : public SkRefCnt {
+public:
+ virtual ~SkPixelSerializer() {}
+
+ /**
+ * Call to determine if the client wants to serialize the encoded data. If
+ * false, serialize another version (e.g. the result of encodePixels).
+ */
+ bool useEncodedData(const void* data, size_t len) {
+ return this->onUseEncodedData(data, len);
+ }
+
+ /**
+ * Call to get the client's version of encoding these pixels. If it
+ * returns NULL, serialize the raw pixels.
+ */
+ SkData* encode(const SkPixmap& pixmap) { return this->onEncode(pixmap); }
+
+protected:
+ /**
+ * Return true if you want to serialize the encoded data, false if you want
+ * another version serialized (e.g. the result of this->encode()).
+ */
+ virtual bool onUseEncodedData(const void* data, size_t len) = 0;
+
+ /**
+ * If you want to encode these pixels, return the encoded data as an SkData
+ * Return null if you want to serialize the raw pixels.
+ */
+ virtual SkData* onEncode(const SkPixmap&) = 0;
+};
+#endif // SkPixelSerializer_DEFINED
diff --git a/gfx/skia/skia/include/core/SkPixmap.h b/gfx/skia/skia/include/core/SkPixmap.h
new file mode 100644
index 000000000..699ddb4d4
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPixmap.h
@@ -0,0 +1,255 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPixmap_DEFINED
+#define SkPixmap_DEFINED
+
+#include "SkColor.h"
+#include "SkFilterQuality.h"
+#include "SkImageInfo.h"
+
+class SkColorTable;
+class SkData;
+struct SkMask;
+
+/**
+ * Pairs SkImageInfo with actual pixels and rowbytes. This class does not try to manage the
+ * lifetime of the pixel memory (nor the colortable if provided).
+ */
+class SK_API SkPixmap {
+public:
+ SkPixmap()
+ : fPixels(NULL), fCTable(NULL), fRowBytes(0), fInfo(SkImageInfo::MakeUnknown(0, 0))
+ {}
+
+ SkPixmap(const SkImageInfo& info, const void* addr, size_t rowBytes,
+ SkColorTable* ctable = NULL)
+ : fPixels(addr), fCTable(ctable), fRowBytes(rowBytes), fInfo(info)
+ {
+ if (kIndex_8_SkColorType == info.colorType()) {
+ SkASSERT(ctable);
+ } else {
+ SkASSERT(NULL == ctable);
+ }
+ }
+
+ void reset();
+ void reset(const SkImageInfo& info, const void* addr, size_t rowBytes,
+ SkColorTable* ctable = NULL);
+ void reset(const SkImageInfo& info) {
+ this->reset(info, NULL, 0, NULL);
+ }
+
+ // overrides the colorspace in the SkImageInfo of the pixmap
+ void setColorSpace(sk_sp<SkColorSpace>);
+
+ /**
+ * If supported, set this pixmap to point to the pixels in the specified mask and return true.
+ * On failure, return false and set this pixmap to empty.
+ */
+ bool SK_WARN_UNUSED_RESULT reset(const SkMask&);
+
+ /**
+ * Computes the intersection of area and this pixmap. If that intersection is non-empty,
+ * set subset to that intersection and return true.
+ *
+ * On failure, return false and ignore the subset parameter.
+ */
+ bool SK_WARN_UNUSED_RESULT extractSubset(SkPixmap* subset, const SkIRect& area) const;
+
+ const SkImageInfo& info() const { return fInfo; }
+ size_t rowBytes() const { return fRowBytes; }
+ const void* addr() const { return fPixels; }
+ SkColorTable* ctable() const { return fCTable; }
+
+ int width() const { return fInfo.width(); }
+ int height() const { return fInfo.height(); }
+ SkColorType colorType() const { return fInfo.colorType(); }
+ SkAlphaType alphaType() const { return fInfo.alphaType(); }
+ bool isOpaque() const { return fInfo.isOpaque(); }
+
+ SkIRect bounds() const { return SkIRect::MakeWH(this->width(), this->height()); }
+
+ /**
+ * Return the rowbytes expressed as a number of pixels (like width and height).
+ */
+ int rowBytesAsPixels() const { return int(fRowBytes >> this->shiftPerPixel()); }
+
+ /**
+ * Return the shift amount per pixel (i.e. 0 for 1-byte per pixel, 1 for 2-bytes per pixel
+ * colortypes, 2 for 4-bytes per pixel colortypes). Return 0 for kUnknown_SkColorType.
+ */
+ int shiftPerPixel() const { return fInfo.shiftPerPixel(); }
+
+ uint64_t getSize64() const { return sk_64_mul(fInfo.height(), fRowBytes); }
+ uint64_t getSafeSize64() const { return fInfo.getSafeSize64(fRowBytes); }
+ size_t getSafeSize() const { return fInfo.getSafeSize(fRowBytes); }
+
+ const void* addr(int x, int y) const {
+ return (const char*)fPixels + fInfo.computeOffset(x, y, fRowBytes);
+ }
+ const uint8_t* addr8() const {
+ SkASSERT(1 == SkColorTypeBytesPerPixel(fInfo.colorType()));
+ return reinterpret_cast<const uint8_t*>(fPixels);
+ }
+ const uint16_t* addr16() const {
+ SkASSERT(2 == SkColorTypeBytesPerPixel(fInfo.colorType()));
+ return reinterpret_cast<const uint16_t*>(fPixels);
+ }
+ const uint32_t* addr32() const {
+ SkASSERT(4 == SkColorTypeBytesPerPixel(fInfo.colorType()));
+ return reinterpret_cast<const uint32_t*>(fPixels);
+ }
+ const uint64_t* addr64() const {
+ SkASSERT(8 == SkColorTypeBytesPerPixel(fInfo.colorType()));
+ return reinterpret_cast<const uint64_t*>(fPixels);
+ }
+ const uint16_t* addrF16() const {
+ SkASSERT(8 == SkColorTypeBytesPerPixel(fInfo.colorType()));
+ SkASSERT(kRGBA_F16_SkColorType == fInfo.colorType());
+ return reinterpret_cast<const uint16_t*>(fPixels);
+ }
+
+ // Offset by the specified x,y coordinates
+
+ const uint8_t* addr8(int x, int y) const {
+ SkASSERT((unsigned)x < (unsigned)fInfo.width());
+ SkASSERT((unsigned)y < (unsigned)fInfo.height());
+ return (const uint8_t*)((const char*)this->addr8() + y * fRowBytes + (x << 0));
+ }
+ const uint16_t* addr16(int x, int y) const {
+ SkASSERT((unsigned)x < (unsigned)fInfo.width());
+ SkASSERT((unsigned)y < (unsigned)fInfo.height());
+ return (const uint16_t*)((const char*)this->addr16() + y * fRowBytes + (x << 1));
+ }
+ const uint32_t* addr32(int x, int y) const {
+ SkASSERT((unsigned)x < (unsigned)fInfo.width());
+ SkASSERT((unsigned)y < (unsigned)fInfo.height());
+ return (const uint32_t*)((const char*)this->addr32() + y * fRowBytes + (x << 2));
+ }
+ const uint64_t* addr64(int x, int y) const {
+ SkASSERT((unsigned)x < (unsigned)fInfo.width());
+ SkASSERT((unsigned)y < (unsigned)fInfo.height());
+ return (const uint64_t*)((const char*)this->addr64() + y * fRowBytes + (x << 3));
+ }
+ const uint16_t* addrF16(int x, int y) const {
+ SkASSERT(kRGBA_F16_SkColorType == fInfo.colorType());
+ return reinterpret_cast<const uint16_t*>(this->addr64(x, y));
+ }
+
+ // Writable versions
+
+ void* writable_addr() const { return const_cast<void*>(fPixels); }
+ void* writable_addr(int x, int y) const {
+ return const_cast<void*>(this->addr(x, y));
+ }
+ uint8_t* writable_addr8(int x, int y) const {
+ return const_cast<uint8_t*>(this->addr8(x, y));
+ }
+ uint16_t* writable_addr16(int x, int y) const {
+ return const_cast<uint16_t*>(this->addr16(x, y));
+ }
+ uint32_t* writable_addr32(int x, int y) const {
+ return const_cast<uint32_t*>(this->addr32(x, y));
+ }
+ uint64_t* writable_addr64(int x, int y) const {
+ return const_cast<uint64_t*>(this->addr64(x, y));
+ }
+ uint16_t* writable_addrF16(int x, int y) const {
+ return reinterpret_cast<uint16_t*>(writable_addr64(x, y));
+ }
+
+ // copy methods
+
+ bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY) const;
+ bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes) const {
+ return this->readPixels(dstInfo, dstPixels, dstRowBytes, 0, 0);
+ }
+ bool readPixels(const SkPixmap& dst, int srcX, int srcY) const {
+ return this->readPixels(dst.info(), dst.writable_addr(), dst.rowBytes(), srcX, srcY);
+ }
+ bool readPixels(const SkPixmap& dst) const {
+ return this->readPixels(dst.info(), dst.writable_addr(), dst.rowBytes(), 0, 0);
+ }
+
+ /**
+ * Copy the pixels from this pixmap into the dst pixmap, converting as needed into dst's
+ * colortype/alphatype. If the conversion cannot be performed, false is returned.
+ *
+ * If dst's dimensions differ from the src dimension, the image will be scaled, applying the
+ * specified filter-quality.
+ */
+ bool scalePixels(const SkPixmap& dst, SkFilterQuality) const;
+
+ /**
+ * Returns true if pixels were written to (e.g. if colorType is kUnknown_SkColorType, this
+ * will return false). If subset does not intersect the bounds of this pixmap, returns false.
+ */
+ bool erase(SkColor, const SkIRect& subset) const;
+
+ bool erase(SkColor color) const { return this->erase(color, this->bounds()); }
+ bool erase(const SkColor4f&, const SkIRect* subset = nullptr) const;
+
+private:
+ const void* fPixels;
+ SkColorTable* fCTable;
+ size_t fRowBytes;
+ SkImageInfo fInfo;
+};
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+
+class SK_API SkAutoPixmapUnlock : ::SkNoncopyable {
+public:
+ SkAutoPixmapUnlock() : fUnlockProc(NULL), fIsLocked(false) {}
+ SkAutoPixmapUnlock(const SkPixmap& pm, void (*unlock)(void*), void* ctx)
+ : fUnlockProc(unlock), fUnlockContext(ctx), fPixmap(pm), fIsLocked(true)
+ {}
+ ~SkAutoPixmapUnlock() { this->unlock(); }
+
+ /**
+ * Return the currently locked pixmap. Undefined if it has been unlocked.
+ */
+ const SkPixmap& pixmap() const {
+ SkASSERT(this->isLocked());
+ return fPixmap;
+ }
+
+ bool isLocked() const { return fIsLocked; }
+
+ /**
+ * Unlocks the pixmap. Can safely be called more than once as it will only call the underlying
+ * unlock-proc once.
+ */
+ void unlock() {
+ if (fUnlockProc) {
+ SkASSERT(fIsLocked);
+ fUnlockProc(fUnlockContext);
+ fUnlockProc = NULL;
+ fIsLocked = false;
+ }
+ }
+
+ /**
+ * If there is a currently locked pixmap, unlock it, then copy the specified pixmap
+ * and (optional) unlock proc/context.
+ */
+ void reset(const SkPixmap& pm, void (*unlock)(void*), void* ctx);
+
+private:
+ void (*fUnlockProc)(void*);
+ void* fUnlockContext;
+ SkPixmap fPixmap;
+ bool fIsLocked;
+
+ friend class SkBitmap;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPngChunkReader.h b/gfx/skia/skia/include/core/SkPngChunkReader.h
new file mode 100644
index 000000000..0cd6634bc
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPngChunkReader.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPngChunkReader_DEFINED
+#define SkPngChunkReader_DEFINED
+
+#include "SkTypes.h"
+#include "SkRefCnt.h"
+
+/**
+ * SkPngChunkReader
+ *
+ * Base class for optional callbacks to retrieve meta/chunk data out of a PNG
+ * encoded image as it is being decoded.
+ * Used by SkCodec.
+ */
+class SkPngChunkReader : public SkRefCnt {
+public:
+ /**
+ * This will be called by the decoder when it sees an unknown chunk.
+ *
+ * Use by SkCodec:
+ * Depending on the location of the unknown chunks, this callback may be
+ * called by
+ * - the factory (NewFromStream/NewFromData)
+ * - getPixels
+ * - startScanlineDecode
+ * - the first call to getScanlines/skipScanlines
+ * The callback may be called from a different thread (e.g. if the SkCodec
+ * is passed to another thread), and it may be called multiple times, if
+ * the SkCodec is used multiple times.
+ *
+ * @param tag Name for this type of chunk.
+ * @param data Data to be interpreted by the subclass.
+ * @param length Number of bytes of data in the chunk.
+ * @return true to continue decoding, or false to indicate an error, which
+ * will cause the decoder to not return the image.
+ */
+ virtual bool readChunk(const char tag[], const void* data, size_t length) = 0;
+};
+#endif // SkPngChunkReader_DEFINED
diff --git a/gfx/skia/skia/include/core/SkPoint.h b/gfx/skia/skia/include/core/SkPoint.h
new file mode 100644
index 000000000..f5ecbab78
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPoint.h
@@ -0,0 +1,556 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPoint_DEFINED
+#define SkPoint_DEFINED
+
+#include "SkMath.h"
+#include "SkScalar.h"
+
+/** \struct SkIPoint16
+
+ SkIPoint holds two 16 bit integer coordinates
+*/
+struct SkIPoint16 {
+ int16_t fX, fY;
+
+ static SkIPoint16 Make(int x, int y) {
+ SkIPoint16 pt;
+ pt.set(x, y);
+ return pt;
+ }
+
+ int16_t x() const { return fX; }
+ int16_t y() const { return fY; }
+
+ void set(int x, int y) {
+ fX = SkToS16(x);
+ fY = SkToS16(y);
+ }
+};
+
+/** \struct SkIPoint
+
+ SkIPoint holds two 32 bit integer coordinates
+*/
+struct SkIPoint {
+ int32_t fX, fY;
+
+ static SkIPoint Make(int32_t x, int32_t y) {
+ SkIPoint pt;
+ pt.set(x, y);
+ return pt;
+ }
+
+ int32_t x() const { return fX; }
+ int32_t y() const { return fY; }
+ void setX(int32_t x) { fX = x; }
+ void setY(int32_t y) { fY = y; }
+
+ /**
+ * Returns true iff fX and fY are both zero.
+ */
+ bool isZero() const { return (fX | fY) == 0; }
+
+ /**
+ * Set both fX and fY to zero. Same as set(0, 0)
+ */
+ void setZero() { fX = fY = 0; }
+
+ /** Set the x and y values of the point. */
+ void set(int32_t x, int32_t y) { fX = x; fY = y; }
+
+ /** Rotate the point clockwise, writing the new point into dst
+ It is legal for dst == this
+ */
+ void rotateCW(SkIPoint* dst) const;
+
+ /** Rotate the point clockwise, writing the new point back into the point
+ */
+
+ void rotateCW() { this->rotateCW(this); }
+
+ /** Rotate the point counter-clockwise, writing the new point into dst.
+ It is legal for dst == this
+ */
+ void rotateCCW(SkIPoint* dst) const;
+
+ /** Rotate the point counter-clockwise, writing the new point back into
+ the point
+ */
+ void rotateCCW() { this->rotateCCW(this); }
+
+ /** Negate the X and Y coordinates of the point.
+ */
+ void negate() { fX = -fX; fY = -fY; }
+
+ /** Return a new point whose X and Y coordinates are the negative of the
+ original point's
+ */
+ SkIPoint operator-() const {
+ SkIPoint neg;
+ neg.fX = -fX;
+ neg.fY = -fY;
+ return neg;
+ }
+
+ /** Add v's coordinates to this point's */
+ void operator+=(const SkIPoint& v) {
+ fX += v.fX;
+ fY += v.fY;
+ }
+
+ /** Subtract v's coordinates from this point's */
+ void operator-=(const SkIPoint& v) {
+ fX -= v.fX;
+ fY -= v.fY;
+ }
+
+ /** Returns true if the point's coordinates equal (x,y) */
+ bool equals(int32_t x, int32_t y) const {
+ return fX == x && fY == y;
+ }
+
+ friend bool operator==(const SkIPoint& a, const SkIPoint& b) {
+ return a.fX == b.fX && a.fY == b.fY;
+ }
+
+ friend bool operator!=(const SkIPoint& a, const SkIPoint& b) {
+ return a.fX != b.fX || a.fY != b.fY;
+ }
+
+ /** Returns a new point whose coordinates are the difference between
+ a and b (i.e. a - b)
+ */
+ friend SkIPoint operator-(const SkIPoint& a, const SkIPoint& b) {
+ SkIPoint v;
+ v.set(a.fX - b.fX, a.fY - b.fY);
+ return v;
+ }
+
+ /** Returns a new point whose coordinates are the sum of a and b (a + b)
+ */
+ friend SkIPoint operator+(const SkIPoint& a, const SkIPoint& b) {
+ SkIPoint v;
+ v.set(a.fX + b.fX, a.fY + b.fY);
+ return v;
+ }
+
+ /** Returns the dot product of a and b, treating them as 2D vectors
+ */
+ static int32_t DotProduct(const SkIPoint& a, const SkIPoint& b) {
+ return a.fX * b.fX + a.fY * b.fY;
+ }
+
+ /** Returns the cross product of a and b, treating them as 2D vectors
+ */
+ static int32_t CrossProduct(const SkIPoint& a, const SkIPoint& b) {
+ return a.fX * b.fY - a.fY * b.fX;
+ }
+};
+
+struct SK_API SkPoint {
+ SkScalar fX, fY;
+
+ static SkPoint Make(SkScalar x, SkScalar y) {
+ SkPoint pt;
+ pt.set(x, y);
+ return pt;
+ }
+
+ SkScalar x() const { return fX; }
+ SkScalar y() const { return fY; }
+
+ /**
+ * Returns true iff fX and fY are both zero.
+ */
+ bool isZero() const { return (0 == fX) & (0 == fY); }
+
+ /** Set the point's X and Y coordinates */
+ void set(SkScalar x, SkScalar y) { fX = x; fY = y; }
+
+ /** Set the point's X and Y coordinates by automatically promoting (x,y) to
+ SkScalar values.
+ */
+ void iset(int32_t x, int32_t y) {
+ fX = SkIntToScalar(x);
+ fY = SkIntToScalar(y);
+ }
+
+ /** Set the point's X and Y coordinates by automatically promoting p's
+ coordinates to SkScalar values.
+ */
+ void iset(const SkIPoint& p) {
+ fX = SkIntToScalar(p.fX);
+ fY = SkIntToScalar(p.fY);
+ }
+
+ void setAbs(const SkPoint& pt) {
+ fX = SkScalarAbs(pt.fX);
+ fY = SkScalarAbs(pt.fY);
+ }
+
+ // counter-clockwise fan
+ void setIRectFan(int l, int t, int r, int b) {
+ SkPoint* v = this;
+ v[0].set(SkIntToScalar(l), SkIntToScalar(t));
+ v[1].set(SkIntToScalar(l), SkIntToScalar(b));
+ v[2].set(SkIntToScalar(r), SkIntToScalar(b));
+ v[3].set(SkIntToScalar(r), SkIntToScalar(t));
+ }
+ void setIRectFan(int l, int t, int r, int b, size_t stride);
+
+ // counter-clockwise fan
+ void setRectFan(SkScalar l, SkScalar t, SkScalar r, SkScalar b) {
+ SkPoint* v = this;
+ v[0].set(l, t);
+ v[1].set(l, b);
+ v[2].set(r, b);
+ v[3].set(r, t);
+ }
+
+ void setRectFan(SkScalar l, SkScalar t, SkScalar r, SkScalar b, size_t stride) {
+ SkASSERT(stride >= sizeof(SkPoint));
+
+ ((SkPoint*)((intptr_t)this + 0 * stride))->set(l, t);
+ ((SkPoint*)((intptr_t)this + 1 * stride))->set(l, b);
+ ((SkPoint*)((intptr_t)this + 2 * stride))->set(r, b);
+ ((SkPoint*)((intptr_t)this + 3 * stride))->set(r, t);
+ }
+
+
+ static void Offset(SkPoint points[], int count, const SkPoint& offset) {
+ Offset(points, count, offset.fX, offset.fY);
+ }
+
+ static void Offset(SkPoint points[], int count, SkScalar dx, SkScalar dy) {
+ for (int i = 0; i < count; ++i) {
+ points[i].offset(dx, dy);
+ }
+ }
+
+ void offset(SkScalar dx, SkScalar dy) {
+ fX += dx;
+ fY += dy;
+ }
+
+ /** Return the euclidian distance from (0,0) to the point
+ */
+ SkScalar length() const { return SkPoint::Length(fX, fY); }
+ SkScalar distanceToOrigin() const { return this->length(); }
+
+ /**
+ * Return true if the computed length of the vector is >= the internal
+ * tolerance (used to avoid dividing by tiny values).
+ */
+ static bool CanNormalize(SkScalar dx, SkScalar dy) {
+ // Simple enough (and performance critical sometimes) so we inline it.
+ return (dx*dx + dy*dy) > (SK_ScalarNearlyZero * SK_ScalarNearlyZero);
+ }
+
+ bool canNormalize() const {
+ return CanNormalize(fX, fY);
+ }
+
+ /** Set the point (vector) to be unit-length in the same direction as it
+ already points. If the point has a degenerate length (i.e. nearly 0)
+ then set it to (0,0) and return false; otherwise return true.
+ */
+ bool normalize();
+
+ /** Set the point (vector) to be unit-length in the same direction as the
+ x,y params. If the vector (x,y) has a degenerate length (i.e. nearly 0)
+ then set it to (0,0) and return false, otherwise return true.
+ */
+ bool setNormalize(SkScalar x, SkScalar y);
+
+ /** Scale the point (vector) to have the specified length, and return that
+ length. If the original length is degenerately small (nearly zero),
+ set it to (0,0) and return false, otherwise return true.
+ */
+ bool setLength(SkScalar length);
+
+ /** Set the point (vector) to have the specified length in the same
+ direction as (x,y). If the vector (x,y) has a degenerate length
+ (i.e. nearly 0) then set it to (0,0) and return false, otherwise return true.
+ */
+ bool setLength(SkScalar x, SkScalar y, SkScalar length);
+
+ /** Same as setLength, but favoring speed over accuracy.
+ */
+ bool setLengthFast(SkScalar length);
+
+ /** Same as setLength, but favoring speed over accuracy.
+ */
+ bool setLengthFast(SkScalar x, SkScalar y, SkScalar length);
+
+ /** Scale the point's coordinates by scale, writing the answer into dst.
+ It is legal for dst == this.
+ */
+ void scale(SkScalar scale, SkPoint* dst) const;
+
+ /** Scale the point's coordinates by scale, writing the answer back into
+ the point.
+ */
+ void scale(SkScalar value) { this->scale(value, this); }
+
+ /** Rotate the point clockwise by 90 degrees, writing the answer into dst.
+ It is legal for dst == this.
+ */
+ void rotateCW(SkPoint* dst) const;
+
+ /** Rotate the point clockwise by 90 degrees, writing the answer back into
+ the point.
+ */
+ void rotateCW() { this->rotateCW(this); }
+
+ /** Rotate the point counter-clockwise by 90 degrees, writing the answer
+ into dst. It is legal for dst == this.
+ */
+ void rotateCCW(SkPoint* dst) const;
+
+ /** Rotate the point counter-clockwise by 90 degrees, writing the answer
+ back into the point.
+ */
+ void rotateCCW() { this->rotateCCW(this); }
+
+ /** Negate the point's coordinates
+ */
+ void negate() {
+ fX = -fX;
+ fY = -fY;
+ }
+
+ /** Returns a new point whose coordinates are the negative of the point's
+ */
+ SkPoint operator-() const {
+ SkPoint neg;
+ neg.fX = -fX;
+ neg.fY = -fY;
+ return neg;
+ }
+
+ /** Add v's coordinates to the point's
+ */
+ void operator+=(const SkPoint& v) {
+ fX += v.fX;
+ fY += v.fY;
+ }
+
+ /** Subtract v's coordinates from the point's
+ */
+ void operator-=(const SkPoint& v) {
+ fX -= v.fX;
+ fY -= v.fY;
+ }
+
+ SkPoint operator*(SkScalar scale) const {
+ return Make(fX * scale, fY * scale);
+ }
+
+ SkPoint& operator*=(SkScalar scale) {
+ fX *= scale;
+ fY *= scale;
+ return *this;
+ }
+
+ /**
+ * Returns true if both X and Y are finite (not infinity or NaN)
+ */
+ bool isFinite() const {
+ SkScalar accum = 0;
+ accum *= fX;
+ accum *= fY;
+
+ // accum is either NaN or it is finite (zero).
+ SkASSERT(0 == accum || SkScalarIsNaN(accum));
+
+ // value==value will be true iff value is not NaN
+ // TODO: is it faster to say !accum or accum==accum?
+ return !SkScalarIsNaN(accum);
+ }
+
+ /**
+ * Returns true if the point's coordinates equal (x,y)
+ */
+ bool equals(SkScalar x, SkScalar y) const {
+ return fX == x && fY == y;
+ }
+
+ friend bool operator==(const SkPoint& a, const SkPoint& b) {
+ return a.fX == b.fX && a.fY == b.fY;
+ }
+
+ friend bool operator!=(const SkPoint& a, const SkPoint& b) {
+ return a.fX != b.fX || a.fY != b.fY;
+ }
+
+ /** Return true if this point and the given point are far enough apart
+ such that a vector between them would be non-degenerate.
+
+ WARNING: Unlike the explicit tolerance version,
+ this method does not use componentwise comparison. Instead, it
+ uses a comparison designed to match judgments elsewhere regarding
+ degeneracy ("points A and B are so close that the vector between them
+ is essentially zero").
+ */
+ bool equalsWithinTolerance(const SkPoint& p) const {
+ return !CanNormalize(fX - p.fX, fY - p.fY);
+ }
+
+ /** WARNING: There is no guarantee that the result will reflect judgments
+ elsewhere regarding degeneracy ("points A and B are so close that the
+ vector between them is essentially zero").
+ */
+ bool equalsWithinTolerance(const SkPoint& p, SkScalar tol) const {
+ return SkScalarNearlyZero(fX - p.fX, tol)
+ && SkScalarNearlyZero(fY - p.fY, tol);
+ }
+
+ /** Returns a new point whose coordinates are the difference between
+ a's and b's (a - b)
+ */
+ friend SkPoint operator-(const SkPoint& a, const SkPoint& b) {
+ SkPoint v;
+ v.set(a.fX - b.fX, a.fY - b.fY);
+ return v;
+ }
+
+ /** Returns a new point whose coordinates are the sum of a's and b's (a + b)
+ */
+ friend SkPoint operator+(const SkPoint& a, const SkPoint& b) {
+ SkPoint v;
+ v.set(a.fX + b.fX, a.fY + b.fY);
+ return v;
+ }
+
+ /** Returns the euclidian distance from (0,0) to (x,y)
+ */
+ static SkScalar Length(SkScalar x, SkScalar y);
+
+ /** Normalize pt, returning its previous length. If the prev length is too
+ small (degenerate), set pt to (0,0) and return 0. This uses the same
+ tolerance as CanNormalize.
+
+ Note that this method may be significantly more expensive than
+ the non-static normalize(), because it has to return the previous length
+ of the point. If you don't need the previous length, call the
+ non-static normalize() method instead.
+ */
+ static SkScalar Normalize(SkPoint* pt);
+
+ /** Returns the euclidian distance between a and b
+ */
+ static SkScalar Distance(const SkPoint& a, const SkPoint& b) {
+ return Length(a.fX - b.fX, a.fY - b.fY);
+ }
+
+ /** Returns the dot product of a and b, treating them as 2D vectors
+ */
+ static SkScalar DotProduct(const SkPoint& a, const SkPoint& b) {
+ return a.fX * b.fX + a.fY * b.fY;
+ }
+
+ /** Returns the cross product of a and b, treating them as 2D vectors
+ */
+ static SkScalar CrossProduct(const SkPoint& a, const SkPoint& b) {
+ return a.fX * b.fY - a.fY * b.fX;
+ }
+
+ SkScalar cross(const SkPoint& vec) const {
+ return CrossProduct(*this, vec);
+ }
+
+ SkScalar dot(const SkPoint& vec) const {
+ return DotProduct(*this, vec);
+ }
+
+ SkScalar lengthSqd() const {
+ return DotProduct(*this, *this);
+ }
+
+ SkScalar distanceToSqd(const SkPoint& pt) const {
+ SkScalar dx = fX - pt.fX;
+ SkScalar dy = fY - pt.fY;
+ return dx * dx + dy * dy;
+ }
+
+ /**
+ * The side of a point relative to a line. If the line is from a to b then
+ * the values are consistent with the sign of (b-a) cross (pt-a)
+ */
+ enum Side {
+ kLeft_Side = -1,
+ kOn_Side = 0,
+ kRight_Side = 1
+ };
+
+ /**
+ * Returns the squared distance to the infinite line between two pts. Also
+ * optionally returns the side of the line that the pt falls on (looking
+ * along line from a to b)
+ */
+ SkScalar distanceToLineBetweenSqd(const SkPoint& a,
+ const SkPoint& b,
+ Side* side = NULL) const;
+
+ /**
+ * Returns the distance to the infinite line between two pts. Also
+ * optionally returns the side of the line that the pt falls on (looking
+ * along the line from a to b)
+ */
+ SkScalar distanceToLineBetween(const SkPoint& a,
+ const SkPoint& b,
+ Side* side = NULL) const {
+ return SkScalarSqrt(this->distanceToLineBetweenSqd(a, b, side));
+ }
+
+ /**
+ * Returns the squared distance to the line segment between pts a and b
+ */
+ SkScalar distanceToLineSegmentBetweenSqd(const SkPoint& a,
+ const SkPoint& b) const;
+
+ /**
+ * Returns the distance to the line segment between pts a and b.
+ */
+ SkScalar distanceToLineSegmentBetween(const SkPoint& a,
+ const SkPoint& b) const {
+ return SkScalarSqrt(this->distanceToLineSegmentBetweenSqd(a, b));
+ }
+
+ /**
+ * Make this vector be orthogonal to vec. Looking down vec the
+ * new vector will point in direction indicated by side (which
+ * must be kLeft_Side or kRight_Side).
+ */
+ void setOrthog(const SkPoint& vec, Side side = kLeft_Side) {
+ // vec could be this
+ SkScalar tmp = vec.fX;
+ if (kRight_Side == side) {
+ fX = -vec.fY;
+ fY = tmp;
+ } else {
+ SkASSERT(kLeft_Side == side);
+ fX = vec.fY;
+ fY = -tmp;
+ }
+ }
+
+ /**
+ * cast-safe way to treat the point as an array of (2) SkScalars.
+ */
+ const SkScalar* asScalars() const { return &fX; }
+};
+
+typedef SkPoint SkVector;
+
+static inline bool SkPointsAreFinite(const SkPoint array[], int count) {
+ return SkScalarsAreFinite(&array[0].fX, count << 1);
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPoint3.h b/gfx/skia/skia/include/core/SkPoint3.h
new file mode 100644
index 000000000..af24a8df5
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPoint3.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPoint3_DEFINED
+#define SkPoint3_DEFINED
+
+#include "SkScalar.h"
+
+struct SK_API SkPoint3 {
+ SkScalar fX, fY, fZ;
+
+ static SkPoint3 Make(SkScalar x, SkScalar y, SkScalar z) {
+ SkPoint3 pt;
+ pt.set(x, y, z);
+ return pt;
+ }
+
+ SkScalar x() const { return fX; }
+ SkScalar y() const { return fY; }
+ SkScalar z() const { return fZ; }
+
+ void set(SkScalar x, SkScalar y, SkScalar z) { fX = x; fY = y; fZ = z; }
+
+ friend bool operator==(const SkPoint3& a, const SkPoint3& b) {
+ return a.fX == b.fX && a.fY == b.fY && a.fZ == b.fZ;
+ }
+
+ friend bool operator!=(const SkPoint3& a, const SkPoint3& b) {
+ return !(a == b);
+ }
+
+ /** Returns the Euclidian distance from (0,0,0) to (x,y,z)
+ */
+ static SkScalar Length(SkScalar x, SkScalar y, SkScalar z);
+
+ /** Return the Euclidian distance from (0,0,0) to the point
+ */
+ SkScalar length() const { return SkPoint3::Length(fX, fY, fZ); }
+
+ /** Set the point (vector) to be unit-length in the same direction as it
+ already points. If the point has a degenerate length (i.e., nearly 0)
+ then set it to (0,0,0) and return false; otherwise return true.
+ */
+ bool normalize();
+
+ /** Return a new point whose X, Y and Z coordinates are scaled.
+ */
+ SkPoint3 makeScale(SkScalar scale) const {
+ SkPoint3 p;
+ p.set(scale * fX, scale * fY, scale * fZ);
+ return p;
+ }
+
+ /** Scale the point's coordinates by scale.
+ */
+ void scale(SkScalar value) {
+ fX *= value;
+ fY *= value;
+ fZ *= value;
+ }
+
+ /** Return a new point whose X, Y and Z coordinates are the negative of the
+ original point's
+ */
+ SkPoint3 operator-() const {
+ SkPoint3 neg;
+ neg.fX = -fX;
+ neg.fY = -fY;
+ neg.fZ = -fZ;
+ return neg;
+ }
+
+ /** Returns a new point whose coordinates are the difference between
+ a and b (i.e., a - b)
+ */
+ friend SkPoint3 operator-(const SkPoint3& a, const SkPoint3& b) {
+ SkPoint3 v;
+ v.set(a.fX - b.fX, a.fY - b.fY, a.fZ - b.fZ);
+ return v;
+ }
+
+ /** Returns a new point whose coordinates are the sum of a and b (a + b)
+ */
+ friend SkPoint3 operator+(const SkPoint3& a, const SkPoint3& b) {
+ SkPoint3 v;
+ v.set(a.fX + b.fX, a.fY + b.fY, a.fZ + b.fZ);
+ return v;
+ }
+
+ /** Add v's coordinates to the point's
+ */
+ void operator+=(const SkPoint3& v) {
+ fX += v.fX;
+ fY += v.fY;
+ fZ += v.fZ;
+ }
+
+ /** Subtract v's coordinates from the point's
+ */
+ void operator-=(const SkPoint3& v) {
+ fX -= v.fX;
+ fY -= v.fY;
+ fZ -= v.fZ;
+ }
+
+ /** Returns the dot product of a and b, treating them as 3D vectors
+ */
+ static SkScalar DotProduct(const SkPoint3& a, const SkPoint3& b) {
+ return a.fX * b.fX + a.fY * b.fY + a.fZ * b.fZ;
+ }
+
+ SkScalar dot(const SkPoint3& vec) const {
+ return DotProduct(*this, vec);
+ }
+};
+
+typedef SkPoint3 SkVector3;
+typedef SkPoint3 SkColor3f;
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPostConfig.h b/gfx/skia/skia/include/core/SkPostConfig.h
new file mode 100644
index 000000000..1b1cb3e75
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPostConfig.h
@@ -0,0 +1,371 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// IWYU pragma: private, include "SkTypes.h"
+
+#ifndef SkPostConfig_DEFINED
+#define SkPostConfig_DEFINED
+
+#if defined(SK_BUILD_FOR_WIN32)
+# define SK_BUILD_FOR_WIN
+#endif
+
+#if !defined(SK_DEBUG) && !defined(SK_RELEASE)
+ #ifdef NDEBUG
+ #define SK_RELEASE
+ #else
+ #define SK_DEBUG
+ #endif
+#endif
+
+#if defined(SK_DEBUG) && defined(SK_RELEASE)
+# error "cannot define both SK_DEBUG and SK_RELEASE"
+#elif !defined(SK_DEBUG) && !defined(SK_RELEASE)
+# error "must define either SK_DEBUG or SK_RELEASE"
+#endif
+
+#if defined(SK_SUPPORT_UNITTEST) && !defined(SK_DEBUG)
+# error "can't have unittests without debug"
+#endif
+
+/**
+ * Matrix calculations may be float or double.
+ * The default is float, as that's what Chromium's using.
+ */
+#if defined(SK_MSCALAR_IS_DOUBLE) && defined(SK_MSCALAR_IS_FLOAT)
+# error "cannot define both SK_MSCALAR_IS_DOUBLE and SK_MSCALAR_IS_FLOAT"
+#elif !defined(SK_MSCALAR_IS_DOUBLE) && !defined(SK_MSCALAR_IS_FLOAT)
+# define SK_MSCALAR_IS_FLOAT
+#endif
+
+#if defined(SK_CPU_LENDIAN) && defined(SK_CPU_BENDIAN)
+# error "cannot define both SK_CPU_LENDIAN and SK_CPU_BENDIAN"
+#elif !defined(SK_CPU_LENDIAN) && !defined(SK_CPU_BENDIAN)
+# error "must define either SK_CPU_LENDIAN or SK_CPU_BENDIAN"
+#endif
+
+/**
+ * Ensure the port has defined all of SK_X32_SHIFT, or none of them.
+ */
+#ifdef SK_A32_SHIFT
+# if !defined(SK_R32_SHIFT) || !defined(SK_G32_SHIFT) || !defined(SK_B32_SHIFT)
+# error "all or none of the 32bit SHIFT amounts must be defined"
+# endif
+#else
+# if defined(SK_R32_SHIFT) || defined(SK_G32_SHIFT) || defined(SK_B32_SHIFT)
+# error "all or none of the 32bit SHIFT amounts must be defined"
+# endif
+#endif
+
+#if !defined(SK_HAS_COMPILER_FEATURE)
+# if defined(__has_feature)
+# define SK_HAS_COMPILER_FEATURE(x) __has_feature(x)
+# else
+# define SK_HAS_COMPILER_FEATURE(x) 0
+# endif
+#endif
+
+#if !defined(SK_ATTRIBUTE)
+# if defined(__clang__) || defined(__GNUC__)
+# define SK_ATTRIBUTE(attr) __attribute__((attr))
+# else
+# define SK_ATTRIBUTE(attr)
+# endif
+#endif
+
+// As usual, there are two ways to increase alignment... the MSVC way and the everyone-else way.
+#ifndef SK_STRUCT_ALIGN
+ #ifdef _MSC_VER
+ #define SK_STRUCT_ALIGN(N) __declspec(align(N))
+ #else
+ #define SK_STRUCT_ALIGN(N) __attribute__((aligned(N)))
+ #endif
+#endif
+
+#if defined(_MSC_VER) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #define SK_VECTORCALL __vectorcall
+#elif defined(SK_CPU_ARM32) && defined(SK_ARM_HAS_NEON)
+ #define SK_VECTORCALL __attribute__((pcs("aapcs-vfp")))
+#else
+ #define SK_VECTORCALL
+#endif
+
+#if !defined(SK_SUPPORT_GPU)
+# define SK_SUPPORT_GPU 1
+#endif
+
+/**
+ * The clang static analyzer likes to know that when the program is not
+ * expected to continue (crash, assertion failure, etc). It will notice that
+ * some combination of parameters lead to a function call that does not return.
+ * It can then make appropriate assumptions about the parameters in code
+ * executed only if the non-returning function was *not* called.
+ */
+#if !defined(SkNO_RETURN_HINT)
+# if SK_HAS_COMPILER_FEATURE(attribute_analyzer_noreturn)
+ static inline void SkNO_RETURN_HINT() __attribute__((analyzer_noreturn));
+ static inline void SkNO_RETURN_HINT() {}
+# else
+# define SkNO_RETURN_HINT() do {} while (false)
+# endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+// TODO(mdempsky): Move elsewhere as appropriate.
+#include <new>
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_BUILD_FOR_WIN
+# ifndef SK_A32_SHIFT
+# define SK_A32_SHIFT 24
+# define SK_R32_SHIFT 16
+# define SK_G32_SHIFT 8
+# define SK_B32_SHIFT 0
+# endif
+#
+#endif
+
+#if defined(GOOGLE3)
+ void SkDebugfForDumpStackTrace(const char* data, void* unused);
+ void DumpStackTrace(int skip_count, void w(const char*, void*), void* arg);
+# define SK_DUMP_GOOGLE3_STACK() DumpStackTrace(0, SkDebugfForDumpStackTrace, nullptr)
+#else
+# define SK_DUMP_GOOGLE3_STACK()
+#endif
+
+#ifndef SK_ABORT
+# define SK_ABORT(message) \
+ do { \
+ SkNO_RETURN_HINT(); \
+ SkDebugf("%s:%d: fatal error: \"%s\"\n", __FILE__, __LINE__, message); \
+ SK_DUMP_GOOGLE3_STACK(); \
+ sk_abort_no_print(); \
+ } while (false)
+#endif
+
+/**
+ * We check to see if the SHIFT value has already been defined.
+ * if not, we define it ourself to some default values. We default to OpenGL
+ * order (in memory: r,g,b,a)
+ */
+#ifndef SK_A32_SHIFT
+# ifdef SK_CPU_BENDIAN
+# define SK_R32_SHIFT 24
+# define SK_G32_SHIFT 16
+# define SK_B32_SHIFT 8
+# define SK_A32_SHIFT 0
+# else
+# define SK_R32_SHIFT 0
+# define SK_G32_SHIFT 8
+# define SK_B32_SHIFT 16
+# define SK_A32_SHIFT 24
+# endif
+#endif
+
+/**
+ * SkColor has well defined shift values, but SkPMColor is configurable. This
+ * macro is a convenience that returns true if the shift values are equal while
+ * ignoring the machine's endianness.
+ */
+#define SK_COLOR_MATCHES_PMCOLOR_BYTE_ORDER \
+ (SK_A32_SHIFT == 24 && SK_R32_SHIFT == 16 && SK_G32_SHIFT == 8 && SK_B32_SHIFT == 0)
+
+/**
+ * SK_PMCOLOR_BYTE_ORDER can be used to query the byte order of SkPMColor at compile time. The
+ * relationship between the byte order and shift values depends on machine endianness. If the shift
+ * order is R=0, G=8, B=16, A=24 then ((char*)&pmcolor)[0] will produce the R channel on a little
+ * endian machine and the A channel on a big endian machine. Thus, given those shifts values,
+ * SK_PMCOLOR_BYTE_ORDER(R,G,B,A) will be true on a little endian machine and
+ * SK_PMCOLOR_BYTE_ORDER(A,B,G,R) will be true on a big endian machine.
+ */
+#ifdef SK_CPU_BENDIAN
+# define SK_PMCOLOR_BYTE_ORDER(C0, C1, C2, C3) \
+ (SK_ ## C3 ## 32_SHIFT == 0 && \
+ SK_ ## C2 ## 32_SHIFT == 8 && \
+ SK_ ## C1 ## 32_SHIFT == 16 && \
+ SK_ ## C0 ## 32_SHIFT == 24)
+#else
+# define SK_PMCOLOR_BYTE_ORDER(C0, C1, C2, C3) \
+ (SK_ ## C0 ## 32_SHIFT == 0 && \
+ SK_ ## C1 ## 32_SHIFT == 8 && \
+ SK_ ## C2 ## 32_SHIFT == 16 && \
+ SK_ ## C3 ## 32_SHIFT == 24)
+#endif
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+
+#if defined SK_DEBUG && defined SK_BUILD_FOR_WIN32
+# ifdef free
+# undef free
+# endif
+# include <crtdbg.h>
+# undef free
+#
+# ifdef SK_DEBUGx
+# if defined(SK_SIMULATE_FAILED_MALLOC) && defined(__cplusplus)
+ void * operator new(
+ size_t cb,
+ int nBlockUse,
+ const char * szFileName,
+ int nLine,
+ int foo
+ );
+ void * operator new[](
+ size_t cb,
+ int nBlockUse,
+ const char * szFileName,
+ int nLine,
+ int foo
+ );
+ void operator delete(
+ void *pUserData,
+ int, const char*, int, int
+ );
+ void operator delete(
+ void *pUserData
+ );
+ void operator delete[]( void * p );
+# define DEBUG_CLIENTBLOCK new( _CLIENT_BLOCK, __FILE__, __LINE__, 0)
+# else
+# define DEBUG_CLIENTBLOCK new( _CLIENT_BLOCK, __FILE__, __LINE__)
+# endif
+# define new DEBUG_CLIENTBLOCK
+# else
+# define DEBUG_CLIENTBLOCK
+# endif
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#if !defined(SK_UNUSED)
+# if defined(_MSC_VER)
+# define SK_UNUSED __pragma(warning(suppress:4189))
+# else
+# define SK_UNUSED SK_ATTRIBUTE(unused)
+# endif
+#endif
+
+#if !defined(SK_ATTR_DEPRECATED)
+ // FIXME: we ignore msg for now...
+# define SK_ATTR_DEPRECATED(msg) SK_ATTRIBUTE(deprecated)
+#endif
+
+#if !defined(SK_ATTR_EXTERNALLY_DEPRECATED)
+# if !defined(SK_INTERNAL)
+# define SK_ATTR_EXTERNALLY_DEPRECATED(msg) SK_ATTR_DEPRECATED(msg)
+# else
+# define SK_ATTR_EXTERNALLY_DEPRECATED(msg)
+# endif
+#endif
+
+/**
+ * If your judgment is better than the compiler's (i.e. you've profiled it),
+ * you can use SK_ALWAYS_INLINE to force inlining. E.g.
+ * inline void someMethod() { ... } // may not be inlined
+ * SK_ALWAYS_INLINE void someMethod() { ... } // should always be inlined
+ */
+#if !defined(SK_ALWAYS_INLINE)
+# if defined(SK_BUILD_FOR_WIN)
+# define SK_ALWAYS_INLINE __forceinline
+# else
+# define SK_ALWAYS_INLINE SK_ATTRIBUTE(always_inline) inline
+# endif
+#endif
+
+/**
+ * If your judgment is better than the compiler's (i.e. you've profiled it),
+ * you can use SK_NEVER_INLINE to prevent inlining.
+ */
+#if !defined(SK_NEVER_INLINE)
+# if defined(SK_BUILD_FOR_WIN)
+# define SK_NEVER_INLINE __declspec(noinline)
+# else
+# define SK_NEVER_INLINE SK_ATTRIBUTE(noinline)
+# endif
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
+ #define SK_PREFETCH(ptr) _mm_prefetch(reinterpret_cast<const char*>(ptr), _MM_HINT_T0)
+ #define SK_WRITE_PREFETCH(ptr) _mm_prefetch(reinterpret_cast<const char*>(ptr), _MM_HINT_T0)
+#elif defined(__GNUC__)
+ #define SK_PREFETCH(ptr) __builtin_prefetch(ptr)
+ #define SK_WRITE_PREFETCH(ptr) __builtin_prefetch(ptr, 1)
+#else
+ #define SK_PREFETCH(ptr)
+ #define SK_WRITE_PREFETCH(ptr)
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#ifndef SK_PRINTF_LIKE
+# if defined(__clang__) || defined(__GNUC__)
+# define SK_PRINTF_LIKE(A, B) __attribute__((format(printf, (A), (B))))
+# else
+# define SK_PRINTF_LIKE(A, B)
+# endif
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#ifndef SK_SIZE_T_SPECIFIER
+# if defined(_MSC_VER)
+# define SK_SIZE_T_SPECIFIER "%Iu"
+# else
+# define SK_SIZE_T_SPECIFIER "%zu"
+# endif
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#ifndef SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
+# define SK_ALLOW_STATIC_GLOBAL_INITIALIZERS 1
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#ifndef SK_EGL
+# if defined(SK_BUILD_FOR_ANDROID)
+# define SK_EGL 1
+# else
+# define SK_EGL 0
+# endif
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#if !defined(SK_GAMMA_EXPONENT)
+ #define SK_GAMMA_EXPONENT (0.0f) // SRGB
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#ifndef GR_TEST_UTILS
+# define GR_TEST_UTILS 1
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#if defined(SK_HISTOGRAM_ENUMERATION) && defined(SK_HISTOGRAM_BOOLEAN)
+# define SK_HISTOGRAMS_ENABLED 1
+#else
+# define SK_HISTOGRAMS_ENABLED 0
+#endif
+
+#ifndef SK_HISTOGRAM_BOOLEAN
+# define SK_HISTOGRAM_BOOLEAN(name, value)
+#endif
+
+#ifndef SK_HISTOGRAM_ENUMERATION
+# define SK_HISTOGRAM_ENUMERATION(name, value, boundary_value)
+#endif
+
+#endif // SkPostConfig_DEFINED
diff --git a/gfx/skia/skia/include/core/SkPreConfig.h b/gfx/skia/skia/include/core/SkPreConfig.h
new file mode 100644
index 000000000..cb7eb3f3e
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPreConfig.h
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// IWYU pragma: private, include "SkTypes.h"
+
+#ifndef SkPreConfig_DEFINED
+#define SkPreConfig_DEFINED
+
+// Allows embedders that want to disable macros that take arguments to just
+// define that symbol to be one of these
+#define SK_NOTHING_ARG1(arg1)
+#define SK_NOTHING_ARG2(arg1, arg2)
+#define SK_NOTHING_ARG3(arg1, arg2, arg3)
+
+//////////////////////////////////////////////////////////////////////
+
+#if !defined(SK_BUILD_FOR_ANDROID) && !defined(SK_BUILD_FOR_IOS) && !defined(SK_BUILD_FOR_WIN32) && !defined(SK_BUILD_FOR_UNIX) && !defined(SK_BUILD_FOR_MAC)
+
+ #ifdef __APPLE__
+ #include "TargetConditionals.h"
+ #endif
+
+ #if defined(_WIN32) || defined(__SYMBIAN32__)
+ #define SK_BUILD_FOR_WIN32
+ #elif defined(ANDROID) || defined(__ANDROID__)
+ #define SK_BUILD_FOR_ANDROID
+ #elif defined(linux) || defined(__linux) || defined(__FreeBSD__) || \
+ defined(__OpenBSD__) || defined(__sun) || defined(__NetBSD__) || \
+ defined(__DragonFly__) || defined(__Fuchsia__) || \
+ defined(__GLIBC__) || defined(__GNU__) || defined(__unix__)
+ #define SK_BUILD_FOR_UNIX
+ #elif TARGET_OS_IPHONE || TARGET_IPHONE_SIMULATOR
+ #define SK_BUILD_FOR_IOS
+ #else
+ #define SK_BUILD_FOR_MAC
+ #endif
+
+#endif
+
+/* Even if the user only defined the framework variant we still need to build
+ * the default (NDK-compliant) Android code. Therefore, when attempting to
+ * include/exclude something from the framework variant check first that we are
+ * building for Android then check the status of the framework define.
+ */
+#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) && !defined(SK_BUILD_FOR_ANDROID)
+ #define SK_BUILD_FOR_ANDROID
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#ifdef SK_BUILD_FOR_WIN32
+ #if !defined(SK_RESTRICT)
+ #define SK_RESTRICT __restrict
+ #endif
+ #if !defined(SK_WARN_UNUSED_RESULT)
+ #define SK_WARN_UNUSED_RESULT
+ #endif
+#endif
+
+#if !defined(SK_RESTRICT)
+ #define SK_RESTRICT __restrict__
+#endif
+
+#if !defined(SK_WARN_UNUSED_RESULT)
+ #define SK_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#if !defined(SK_CPU_BENDIAN) && !defined(SK_CPU_LENDIAN)
+ #if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+ #define SK_CPU_BENDIAN
+ #elif defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+ #define SK_CPU_LENDIAN
+ #elif defined(__sparc) || defined(__sparc__) || \
+ defined(_POWER) || defined(__powerpc__) || \
+ defined(__ppc__) || defined(__hppa) || \
+ defined(__PPC__) || defined(__PPC64__) || \
+ defined(_MIPSEB) || defined(__ARMEB__) || \
+ defined(__s390__) || \
+ (defined(__sh__) && defined(__BIG_ENDIAN__)) || \
+ (defined(__ia64) && defined(__BIG_ENDIAN__))
+ #define SK_CPU_BENDIAN
+ #else
+ #define SK_CPU_LENDIAN
+ #endif
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
+ #define SK_CPU_X86 1
+#endif
+
+/**
+ * SK_CPU_SSE_LEVEL
+ *
+ * If defined, SK_CPU_SSE_LEVEL should be set to the highest supported level.
+ * On non-intel CPU this should be undefined.
+ */
+
+#define SK_CPU_SSE_LEVEL_SSE1 10
+#define SK_CPU_SSE_LEVEL_SSE2 20
+#define SK_CPU_SSE_LEVEL_SSE3 30
+#define SK_CPU_SSE_LEVEL_SSSE3 31
+#define SK_CPU_SSE_LEVEL_SSE41 41
+#define SK_CPU_SSE_LEVEL_SSE42 42
+#define SK_CPU_SSE_LEVEL_AVX 51
+#define SK_CPU_SSE_LEVEL_AVX2 52
+
+// When targetting iOS and using gyp to generate the build files, it is not
+// possible to select files to build depending on the architecture (i.e. it
+// is not possible to use hand optimized assembly implementation). In that
+// configuration SK_BUILD_NO_OPTS is defined. Remove optimisation then.
+#ifdef SK_BUILD_NO_OPTS
+ #define SK_CPU_SSE_LEVEL 0
+#endif
+
+// Are we in GCC?
+#ifndef SK_CPU_SSE_LEVEL
+ // These checks must be done in descending order to ensure we set the highest
+ // available SSE level.
+ #if defined(__AVX2__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX2
+ #elif defined(__AVX__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX
+ #elif defined(__SSE4_2__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE42
+ #elif defined(__SSE4_1__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE41
+ #elif defined(__SSSE3__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSSE3
+ #elif defined(__SSE3__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE3
+ #elif defined(__SSE2__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE2
+ #endif
+#endif
+
+// Are we in VisualStudio?
+#ifndef SK_CPU_SSE_LEVEL
+ // These checks must be done in descending order to ensure we set the highest
+ // available SSE level. 64-bit intel guarantees at least SSE2 support.
+ #if defined(__AVX2__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX2
+ #elif defined(__AVX__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX
+ #elif defined(_M_X64) || defined(_M_AMD64)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE2
+ #elif defined(_M_IX86_FP)
+ #if _M_IX86_FP >= 2
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE2
+ #elif _M_IX86_FP == 1
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE1
+ #endif
+ #endif
+#endif
+
+//////////////////////////////////////////////////////////////////////
+// ARM defines
+
+#if defined(__arm__) && (!defined(__APPLE__) || !TARGET_IPHONE_SIMULATOR)
+ #define SK_CPU_ARM32
+
+ #if defined(__GNUC__)
+ #if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
+ || defined(__ARM_ARCH_7EM__) || defined(_ARM_ARCH_7)
+ #define SK_ARM_ARCH 7
+ #elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
+ || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
+ || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \
+ || defined(__ARM_ARCH_6M__) || defined(_ARM_ARCH_6)
+ #define SK_ARM_ARCH 6
+ #elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
+ || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
+ || defined(__ARM_ARCH_5TEJ__) || defined(_ARM_ARCH_5)
+ #define SK_ARM_ARCH 5
+ #elif defined(__ARM_ARCH_4__) || defined(__ARM_ARCH_4T__) || defined(_ARM_ARCH_4)
+ #define SK_ARM_ARCH 4
+ #else
+ #define SK_ARM_ARCH 3
+ #endif
+ #endif
+#endif
+
+#if defined(__aarch64__) && !defined(SK_BUILD_NO_OPTS)
+ #define SK_CPU_ARM64
+#endif
+
+// All 64-bit ARM chips have NEON. Many 32-bit ARM chips do too.
+#if !defined(SK_ARM_HAS_NEON) && !defined(SK_BUILD_NO_OPTS) && (defined(__ARM_NEON__) || defined(__ARM_NEON))
+ #define SK_ARM_HAS_NEON
+#endif
+
+// Really this __APPLE__ check shouldn't be necessary, but it seems that Apple's Clang defines
+// __ARM_FEATURE_CRC32 for -arch arm64, even though their chips don't support those instructions!
+#if defined(__ARM_FEATURE_CRC32) && !defined(__APPLE__)
+ #define SK_ARM_HAS_CRC32
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+#if !defined(SKIA_IMPLEMENTATION)
+ #define SKIA_IMPLEMENTATION 0
+#endif
+
+#if !defined(SK_API)
+ #if defined(SKIA_DLL)
+ #if defined(_MSC_VER)
+ #if SKIA_IMPLEMENTATION
+ #define SK_API __declspec(dllexport)
+ #else
+ #define SK_API __declspec(dllimport)
+ #endif
+ #else
+ #define SK_API __attribute__((visibility("default")))
+ #endif
+ #else
+ #define SK_API
+ #endif
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+/**
+ * Use SK_PURE_FUNC as an attribute to indicate that a function's
+ * return value only depends on the value of its parameters. This
+ * can help the compiler optimize out successive calls.
+ *
+ * Usage:
+ * void function(int params) SK_PURE_FUNC;
+ */
+#if defined(__GNUC__)
+# define SK_PURE_FUNC __attribute__((pure))
+#else
+# define SK_PURE_FUNC /* nothing */
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+/**
+ * SK_HAS_ATTRIBUTE(<name>) should return true iff the compiler
+ * supports __attribute__((<name>)). Mostly important because
+ * Clang doesn't support all of GCC attributes.
+ */
+#if defined(__has_attribute)
+# define SK_HAS_ATTRIBUTE(x) __has_attribute(x)
+#elif defined(__GNUC__)
+# define SK_HAS_ATTRIBUTE(x) 1
+#else
+# define SK_HAS_ATTRIBUTE(x) 0
+#endif
+
+/**
+ * SK_ATTRIBUTE_OPTIMIZE_O1 can be used as a function attribute
+ * to specify individual optimization level of -O1, if the compiler
+ * supports it.
+ *
+ * NOTE: Clang/ARM (r161757) does not support the 'optimize' attribute.
+ */
+#if SK_HAS_ATTRIBUTE(optimize)
+# define SK_ATTRIBUTE_OPTIMIZE_O1 __attribute__((optimize("O1")))
+#else
+# define SK_ATTRIBUTE_OPTIMIZE_O1 /* nothing */
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkRRect.h b/gfx/skia/skia/include/core/SkRRect.h
new file mode 100644
index 000000000..3b691aab1
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkRRect.h
@@ -0,0 +1,353 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRRect_DEFINED
+#define SkRRect_DEFINED
+
+#include "SkRect.h"
+#include "SkPoint.h"
+
+class SkPath;
+class SkMatrix;
+
+// Path forward:
+// core work
+// add contains(SkRect&) - for clip stack
+// add contains(SkRRect&) - for clip stack
+// add heart rect computation (max rect inside RR)
+// add 9patch rect computation
+// add growToInclude(SkPath&)
+// analysis
+// use growToInclude to fit skp round rects & generate stats (RRs vs. real paths)
+// check on # of rectorus's the RRs could handle
+// rendering work
+// update SkPath.addRRect() to only use quads
+// add GM and bench
+// further out
+// detect and triangulate RRectorii rather than falling back to SW in Ganesh
+//
+
+/** \class SkRRect
+
+ The SkRRect class represents a rounded rect with a potentially different
+ radii for each corner. It does not have a constructor so must be
+ initialized with one of the initialization functions (e.g., setEmpty,
+ setRectRadii, etc.)
+
+ This class is intended to roughly match CSS' border-*-*-radius capabilities.
+ This means:
+ If either of a corner's radii are 0 the corner will be square.
+ Negative radii are not allowed (they are clamped to zero).
+ If the corner curves overlap they will be proportionally reduced to fit.
+*/
+class SK_API SkRRect {
+public:
+ SkRRect() { /* unititialized */ }
+ SkRRect(const SkRRect&) = default;
+ SkRRect& operator=(const SkRRect&) = default;
+
+ /**
+ * Enum to capture the various possible subtypes of RR. Accessed
+ * by type(). The subtypes become progressively less restrictive.
+ */
+ enum Type {
+ // !< The RR is empty
+ kEmpty_Type,
+
+ //!< The RR is actually a (non-empty) rect (i.e., at least one radius
+ //!< at each corner is zero)
+ kRect_Type,
+
+ //!< The RR is actually a (non-empty) oval (i.e., all x radii are equal
+ //!< and >= width/2 and all the y radii are equal and >= height/2
+ kOval_Type,
+
+ //!< The RR is non-empty and all the x radii are equal & all y radii
+ //!< are equal but it is not an oval (i.e., there are lines between
+ //!< the curves) nor a rect (i.e., both radii are non-zero)
+ kSimple_Type,
+
+ //!< The RR is non-empty and the two left x radii are equal, the two top
+ //!< y radii are equal, and the same for the right and bottom but it is
+ //!< neither an rect, oval, nor a simple RR. It is called "nine patch"
+ //!< because the centers of the corner ellipses form an axis aligned
+ //!< rect with edges that divide the RR into an 9 rectangular patches:
+ //!< an interior patch, four edge patches, and four corner patches.
+ kNinePatch_Type,
+
+ //!< A fully general (non-empty) RR. Some of the x and/or y radii are
+ //!< different from the others and there must be one corner where
+ //!< both radii are non-zero.
+ kComplex_Type,
+ };
+
+ /**
+ * Returns the RR's sub type.
+ */
+ Type getType() const {
+ SkASSERT(this->isValid());
+ return static_cast<Type>(fType);
+ }
+
+ Type type() const { return this->getType(); }
+
+ inline bool isEmpty() const { return kEmpty_Type == this->getType(); }
+ inline bool isRect() const { return kRect_Type == this->getType(); }
+ inline bool isOval() const { return kOval_Type == this->getType(); }
+ inline bool isSimple() const { return kSimple_Type == this->getType(); }
+ // TODO: should isSimpleCircular & isCircle take a tolerance? This could help
+ // instances where the mapping to device space is noisy.
+ inline bool isSimpleCircular() const {
+ return this->isSimple() && SkScalarNearlyEqual(fRadii[0].fX, fRadii[0].fY);
+ }
+ inline bool isCircle() const {
+ return this->isOval() && SkScalarNearlyEqual(fRadii[0].fX, fRadii[0].fY);
+ }
+ inline bool isNinePatch() const { return kNinePatch_Type == this->getType(); }
+ inline bool isComplex() const { return kComplex_Type == this->getType(); }
+
+ bool allCornersCircular() const;
+
+ SkScalar width() const { return fRect.width(); }
+ SkScalar height() const { return fRect.height(); }
+
+ /**
+ * Set this RR to the empty rectangle (0,0,0,0) with 0 x & y radii.
+ */
+ void setEmpty() {
+ fRect.setEmpty();
+ memset(fRadii, 0, sizeof(fRadii));
+ fType = kEmpty_Type;
+
+ SkASSERT(this->isValid());
+ }
+
+ /**
+ * Set this RR to match the supplied rect. All radii will be 0.
+ */
+ void setRect(const SkRect& rect) {
+ fRect = rect;
+ fRect.sort();
+
+ if (fRect.isEmpty()) {
+ this->setEmpty();
+ return;
+ }
+
+ memset(fRadii, 0, sizeof(fRadii));
+ fType = kRect_Type;
+
+ SkASSERT(this->isValid());
+ }
+
+ static SkRRect MakeEmpty() {
+ SkRRect rr;
+ rr.setEmpty();
+ return rr;
+ }
+
+ static SkRRect MakeRect(const SkRect& r) {
+ SkRRect rr;
+ rr.setRect(r);
+ return rr;
+ }
+
+ static SkRRect MakeOval(const SkRect& oval) {
+ SkRRect rr;
+ rr.setOval(oval);
+ return rr;
+ }
+
+ static SkRRect MakeRectXY(const SkRect& rect, SkScalar xRad, SkScalar yRad) {
+ SkRRect rr;
+ rr.setRectXY(rect, xRad, yRad);
+ return rr;
+ }
+
+ /**
+ * Set this RR to match the supplied oval. All x radii will equal half the
+ * width and all y radii will equal half the height.
+ */
+ void setOval(const SkRect& oval) {
+ fRect = oval;
+ fRect.sort();
+
+ if (fRect.isEmpty()) {
+ this->setEmpty();
+ return;
+ }
+
+ SkScalar xRad = SkScalarHalf(fRect.width());
+ SkScalar yRad = SkScalarHalf(fRect.height());
+
+ for (int i = 0; i < 4; ++i) {
+ fRadii[i].set(xRad, yRad);
+ }
+ fType = kOval_Type;
+
+ SkASSERT(this->isValid());
+ }
+
+ /**
+ * Initialize the RR with the same radii for all four corners.
+ */
+ void setRectXY(const SkRect& rect, SkScalar xRad, SkScalar yRad);
+
+ /**
+ * Initialize the rr with one radius per-side.
+ */
+ void setNinePatch(const SkRect& rect, SkScalar leftRad, SkScalar topRad,
+ SkScalar rightRad, SkScalar bottomRad);
+
+ /**
+ * Initialize the RR with potentially different radii for all four corners.
+ */
+ void setRectRadii(const SkRect& rect, const SkVector radii[4]);
+
+ // The radii are stored in UL, UR, LR, LL order.
+ enum Corner {
+ kUpperLeft_Corner,
+ kUpperRight_Corner,
+ kLowerRight_Corner,
+ kLowerLeft_Corner
+ };
+
+ const SkRect& rect() const { return fRect; }
+ const SkVector& radii(Corner corner) const { return fRadii[corner]; }
+ const SkRect& getBounds() const { return fRect; }
+
+ /**
+ * When a rrect is simple, all of its radii are equal. This returns one
+ * of those radii. This call requires the rrect to be non-complex.
+ */
+ const SkVector& getSimpleRadii() const {
+ SkASSERT(!this->isComplex());
+ return fRadii[0];
+ }
+
+ friend bool operator==(const SkRRect& a, const SkRRect& b) {
+ return a.fRect == b.fRect &&
+ SkScalarsEqual(a.fRadii[0].asScalars(),
+ b.fRadii[0].asScalars(), 8);
+ }
+
+ friend bool operator!=(const SkRRect& a, const SkRRect& b) {
+ return a.fRect != b.fRect ||
+ !SkScalarsEqual(a.fRadii[0].asScalars(),
+ b.fRadii[0].asScalars(), 8);
+ }
+
+ /**
+ * Call inset on the bounds, and adjust the radii to reflect what happens
+ * in stroking: If the corner is sharp (no curvature), leave it alone,
+ * otherwise we grow/shrink the radii by the amount of the inset. If a
+ * given radius becomes negative, it is pinned to 0.
+ *
+ * It is valid for dst == this.
+ */
+ void inset(SkScalar dx, SkScalar dy, SkRRect* dst) const;
+
+ void inset(SkScalar dx, SkScalar dy) {
+ this->inset(dx, dy, this);
+ }
+
+ /**
+ * Call outset on the bounds, and adjust the radii to reflect what happens
+ * in stroking: If the corner is sharp (no curvature), leave it alone,
+ * otherwise we grow/shrink the radii by the amount of the inset. If a
+ * given radius becomes negative, it is pinned to 0.
+ *
+ * It is valid for dst == this.
+ */
+ void outset(SkScalar dx, SkScalar dy, SkRRect* dst) const {
+ this->inset(-dx, -dy, dst);
+ }
+ void outset(SkScalar dx, SkScalar dy) {
+ this->inset(-dx, -dy, this);
+ }
+
+ /**
+ * Translate the rrect by (dx, dy).
+ */
+ void offset(SkScalar dx, SkScalar dy) {
+ fRect.offset(dx, dy);
+ }
+
+ SkRRect SK_WARN_UNUSED_RESULT makeOffset(SkScalar dx, SkScalar dy) const {
+ return SkRRect(fRect.makeOffset(dx, dy), fRadii, fType);
+ }
+
+ /**
+ * Returns true if 'rect' is wholy inside the RR, and both
+ * are not empty.
+ */
+ bool contains(const SkRect& rect) const;
+
+ bool isValid() const;
+
+ enum {
+ kSizeInMemory = 12 * sizeof(SkScalar)
+ };
+
+ /**
+ * Write the rrect into the specified buffer. This is guaranteed to always
+ * write kSizeInMemory bytes, and that value is guaranteed to always be
+ * a multiple of 4. Return kSizeInMemory.
+ */
+ size_t writeToMemory(void* buffer) const;
+
+ /**
+ * Reads the rrect from the specified buffer
+ *
+ * If the specified buffer is large enough, this will read kSizeInMemory bytes,
+ * and that value is guaranteed to always be a multiple of 4.
+ *
+ * @param buffer Memory to read from
+ * @param length Amount of memory available in the buffer
+ * @return number of bytes read (must be a multiple of 4) or
+ * 0 if there was not enough memory available
+ */
+ size_t readFromMemory(const void* buffer, size_t length);
+
+ /**
+ * Transform by the specified matrix, and put the result in dst.
+ *
+ * @param matrix SkMatrix specifying the transform. Must only contain
+ * scale and/or translate, or this call will fail.
+ * @param dst SkRRect to store the result. It is an error to use this,
+ * which would make this function no longer const.
+ * @return true on success, false on failure. If false, dst is unmodified.
+ */
+ bool transform(const SkMatrix& matrix, SkRRect* dst) const;
+
+ void dump(bool asHex) const;
+ void dump() const { this->dump(false); }
+ void dumpHex() const { this->dump(true); }
+
+private:
+ SkRRect(const SkRect& rect, const SkVector radii[4], int32_t type)
+ : fRect(rect)
+ , fRadii{radii[0], radii[1], radii[2], radii[3]}
+ , fType(type) {}
+
+ SkRect fRect;
+ // Radii order is UL, UR, LR, LL. Use Corner enum to index into fRadii[]
+ SkVector fRadii[4];
+ // use an explicitly sized type so we're sure the class is dense (no uninitialized bytes)
+ int32_t fType;
+ // TODO: add padding so we can use memcpy for flattening and not copy
+ // uninitialized data
+
+ void computeType();
+ bool checkCornerContainment(SkScalar x, SkScalar y) const;
+ void scaleRadii();
+
+ // to access fRadii directly
+ friend class SkPath;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkRSXform.h b/gfx/skia/skia/include/core/SkRSXform.h
new file mode 100644
index 000000000..7af6e67c1
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkRSXform.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRSXform_DEFINED
+#define SkRSXform_DEFINED
+
+#include "SkScalar.h"
+
+/**
+ * A compressed form of a rotation+scale matrix.
+ *
+ * [ fSCos -fSSin fTx ]
+ * [ fSSin fSCos fTy ]
+ * [ 0 0 1 ]
+ */
+struct SkRSXform {
+ static SkRSXform Make(SkScalar scos, SkScalar ssin, SkScalar tx, SkScalar ty) {
+ SkRSXform xform = { scos, ssin, tx, ty };
+ return xform;
+ }
+
+ /*
+ * Initialize a new xform based on the scale, rotation (in radians), final tx,ty location
+ * and anchor-point ax,ay within the src quad.
+ *
+ * Note: the anchor point is not normalized (e.g. 0...1) but is in pixels of the src image.
+ */
+ static SkRSXform MakeFromRadians(SkScalar scale, SkScalar radians, SkScalar tx, SkScalar ty,
+ SkScalar ax, SkScalar ay) {
+ const SkScalar s = SkScalarSin(radians) * scale;
+ const SkScalar c = SkScalarCos(radians) * scale;
+ return Make(c, s, tx + -c * ax + s * ay, ty + -s * ax - c * ay);
+ }
+
+ SkScalar fSCos;
+ SkScalar fSSin;
+ SkScalar fTx;
+ SkScalar fTy;
+
+ bool rectStaysRect() const {
+ return 0 == fSCos || 0 == fSSin;
+ }
+
+ void setIdentity() {
+ fSCos = 1;
+ fSSin = fTx = fTy = 0;
+ }
+
+ void set(SkScalar scos, SkScalar ssin, SkScalar tx, SkScalar ty) {
+ fSCos = scos;
+ fSSin = ssin;
+ fTx = tx;
+ fTy = ty;
+ }
+
+ void toQuad(SkScalar width, SkScalar height, SkPoint quad[4]) const;
+ void toQuad(const SkSize& size, SkPoint quad[4]) const {
+ this->toQuad(size.width(), size.height(), quad);
+ }
+};
+
+#endif
+
diff --git a/gfx/skia/skia/include/core/SkRWBuffer.h b/gfx/skia/skia/include/core/SkRWBuffer.h
new file mode 100644
index 000000000..451933f35
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkRWBuffer.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRWBuffer_DEFINED
+#define SkRWBuffer_DEFINED
+
+#include "SkRefCnt.h"
+
+struct SkBufferBlock;
+struct SkBufferHead;
+class SkRWBuffer;
+class SkStreamAsset;
+
+/**
+ * Contains a read-only, thread-sharable block of memory. To access the memory, the caller must
+ * instantiate a local iterator, as the memory is stored in 1 or more contiguous blocks.
+ */
+class SK_API SkROBuffer : public SkRefCnt {
+public:
+ /**
+ * Return the logical length of the data owned/shared by this buffer. It may be stored in
+ * multiple contiguous blocks, accessible via the iterator.
+ */
+ size_t size() const { return fAvailable; }
+
+ class SK_API Iter {
+ public:
+ Iter(const SkROBuffer*);
+
+ void reset(const SkROBuffer*);
+
+ /**
+ * Return the current continuous block of memory, or nullptr if the iterator is exhausted
+ */
+ const void* data() const;
+
+ /**
+ * Returns the number of bytes in the current continguous block of memory, or 0 if the
+ * iterator is exhausted.
+ */
+ size_t size() const;
+
+ /**
+ * Advance to the next contiguous block of memory, returning true if there is another
+ * block, or false if the iterator is exhausted.
+ */
+ bool next();
+
+ private:
+ const SkBufferBlock* fBlock;
+ size_t fRemaining;
+ const SkROBuffer* fBuffer;
+ };
+
+private:
+ SkROBuffer(const SkBufferHead* head, size_t available, const SkBufferBlock* fTail);
+ virtual ~SkROBuffer();
+
+ const SkBufferHead* fHead;
+ const size_t fAvailable;
+ const SkBufferBlock* fTail;
+
+ friend class SkRWBuffer;
+};
+
+/**
+ * Accumulates bytes of memory that are "appended" to it, growing internal storage as needed.
+ * The growth is done such that at any time in the writer's thread, an RBuffer or StreamAsset
+ * can be snapped off (and safely passed to another thread). The RBuffer/StreamAsset snapshot
+ * can see the previously stored bytes, but will be unaware of any future writes.
+ */
+class SK_API SkRWBuffer {
+public:
+ SkRWBuffer(size_t initialCapacity = 0);
+ ~SkRWBuffer();
+
+ size_t size() const { return fTotalUsed; }
+
+ /**
+ * Append |length| bytes from |buffer|.
+ *
+ * If the caller knows in advance how much more data they are going to append, they can
+ * pass a |reserve| hint (representing the number of upcoming bytes *in addition* to the
+ * current append), to minimize the number of internal allocations.
+ */
+ void append(const void* buffer, size_t length, size_t reserve = 0);
+
+ SkROBuffer* newRBufferSnapshot() const;
+ SkStreamAsset* newStreamSnapshot() const;
+
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+private:
+ SkBufferHead* fHead;
+ SkBufferBlock* fTail;
+ size_t fTotalUsed;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkRasterizer.h b/gfx/skia/skia/include/core/SkRasterizer.h
new file mode 100644
index 000000000..1881ccef2
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkRasterizer.h
@@ -0,0 +1,41 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkRasterizer_DEFINED
+#define SkRasterizer_DEFINED
+
+#include "SkFlattenable.h"
+#include "SkMask.h"
+
+class SkMaskFilter;
+class SkMatrix;
+class SkPath;
+struct SkIRect;
+
+class SK_API SkRasterizer : public SkFlattenable {
+public:
+ /** Turn the path into a mask, respecting the specified local->device matrix.
+ */
+ bool rasterize(const SkPath& path, const SkMatrix& matrix,
+ const SkIRect* clipBounds, SkMaskFilter* filter,
+ SkMask* mask, SkMask::CreateMode mode) const;
+
+ SK_DEFINE_FLATTENABLE_TYPE(SkRasterizer)
+
+protected:
+ SkRasterizer() {}
+ virtual bool onRasterize(const SkPath& path, const SkMatrix& matrix,
+ const SkIRect* clipBounds,
+ SkMask* mask, SkMask::CreateMode mode) const;
+
+private:
+ typedef SkFlattenable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkRect.h b/gfx/skia/skia/include/core/SkRect.h
new file mode 100644
index 000000000..27a648fee
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkRect.h
@@ -0,0 +1,908 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRect_DEFINED
+#define SkRect_DEFINED
+
+#include "SkPoint.h"
+#include "SkSize.h"
+
+struct SkRect;
+
+/** \struct SkIRect
+
+ SkIRect holds four 32 bit integer coordinates for a rectangle
+*/
+struct SK_API SkIRect {
+ int32_t fLeft, fTop, fRight, fBottom;
+
+ static SkIRect SK_WARN_UNUSED_RESULT MakeEmpty() {
+ SkIRect r;
+ r.setEmpty();
+ return r;
+ }
+
+ static SkIRect SK_WARN_UNUSED_RESULT MakeLargest() {
+ SkIRect r;
+ r.setLargest();
+ return r;
+ }
+
+ static SkIRect SK_WARN_UNUSED_RESULT MakeWH(int32_t w, int32_t h) {
+ SkIRect r;
+ r.set(0, 0, w, h);
+ return r;
+ }
+
+ static SkIRect SK_WARN_UNUSED_RESULT MakeSize(const SkISize& size) {
+ SkIRect r;
+ r.set(0, 0, size.width(), size.height());
+ return r;
+ }
+
+ static SkIRect SK_WARN_UNUSED_RESULT MakeLTRB(int32_t l, int32_t t, int32_t r, int32_t b) {
+ SkIRect rect;
+ rect.set(l, t, r, b);
+ return rect;
+ }
+
+ static SkIRect SK_WARN_UNUSED_RESULT MakeXYWH(int32_t x, int32_t y, int32_t w, int32_t h) {
+ SkIRect r;
+ r.set(x, y, x + w, y + h);
+ return r;
+ }
+
+ int left() const { return fLeft; }
+ int top() const { return fTop; }
+ int right() const { return fRight; }
+ int bottom() const { return fBottom; }
+
+ /** return the left edge of the rect */
+ int x() const { return fLeft; }
+ /** return the top edge of the rect */
+ int y() const { return fTop; }
+ /**
+ * Returns the rectangle's width. This does not check for a valid rect
+ * (i.e. left <= right) so the result may be negative.
+ */
+ int width() const { return fRight - fLeft; }
+
+ /**
+ * Returns the rectangle's height. This does not check for a valid rect
+ * (i.e. top <= bottom) so the result may be negative.
+ */
+ int height() const { return fBottom - fTop; }
+
+ SkISize size() const { return SkISize::Make(this->width(), this->height()); }
+
+ /**
+ * Since the center of an integer rect may fall on a factional value, this
+ * method is defined to return (right + left) >> 1.
+ *
+ * This is a specific "truncation" of the average, which is different than
+ * (right + left) / 2 when the sum is negative.
+ */
+ int centerX() const { return (fRight + fLeft) >> 1; }
+
+ /**
+ * Since the center of an integer rect may fall on a factional value, this
+ * method is defined to return (bottom + top) >> 1
+ *
+ * This is a specific "truncation" of the average, which is different than
+ * (bottom + top) / 2 when the sum is negative.
+ */
+ int centerY() const { return (fBottom + fTop) >> 1; }
+
+ /**
+ * Return true if the rectangle's width or height are <= 0
+ */
+ bool isEmpty() const { return fLeft >= fRight || fTop >= fBottom; }
+
+ bool isLargest() const { return SK_MinS32 == fLeft &&
+ SK_MinS32 == fTop &&
+ SK_MaxS32 == fRight &&
+ SK_MaxS32 == fBottom; }
+
+ friend bool operator==(const SkIRect& a, const SkIRect& b) {
+ return !memcmp(&a, &b, sizeof(a));
+ }
+
+ friend bool operator!=(const SkIRect& a, const SkIRect& b) {
+ return !(a == b);
+ }
+
+ bool is16Bit() const {
+ return SkIsS16(fLeft) && SkIsS16(fTop) &&
+ SkIsS16(fRight) && SkIsS16(fBottom);
+ }
+
+ /** Set the rectangle to (0,0,0,0)
+ */
+ void setEmpty() { memset(this, 0, sizeof(*this)); }
+
+ void set(int32_t left, int32_t top, int32_t right, int32_t bottom) {
+ fLeft = left;
+ fTop = top;
+ fRight = right;
+ fBottom = bottom;
+ }
+ // alias for set(l, t, r, b)
+ void setLTRB(int32_t left, int32_t top, int32_t right, int32_t bottom) {
+ this->set(left, top, right, bottom);
+ }
+
+ void setXYWH(int32_t x, int32_t y, int32_t width, int32_t height) {
+ fLeft = x;
+ fTop = y;
+ fRight = x + width;
+ fBottom = y + height;
+ }
+
+ /**
+ * Make the largest representable rectangle
+ */
+ void setLargest() {
+ fLeft = fTop = SK_MinS32;
+ fRight = fBottom = SK_MaxS32;
+ }
+
+ /**
+ * Make the largest representable rectangle, but inverted (e.g. fLeft will
+ * be max 32bit and right will be min 32bit).
+ */
+ void setLargestInverted() {
+ fLeft = fTop = SK_MaxS32;
+ fRight = fBottom = SK_MinS32;
+ }
+
+ /**
+ * Return a new IRect, built as an offset of this rect.
+ */
+ SkIRect makeOffset(int32_t dx, int32_t dy) const {
+ return MakeLTRB(fLeft + dx, fTop + dy, fRight + dx, fBottom + dy);
+ }
+
+ /**
+ * Return a new IRect, built as an inset of this rect.
+ */
+ SkIRect makeInset(int32_t dx, int32_t dy) const {
+ return MakeLTRB(fLeft + dx, fTop + dy, fRight - dx, fBottom - dy);
+ }
+
+ /**
+ * Return a new Rect, built as an outset of this rect.
+ */
+ SkIRect makeOutset(int32_t dx, int32_t dy) const {
+ return MakeLTRB(fLeft - dx, fTop - dy, fRight + dx, fBottom + dy);
+ }
+
+ /** Offset set the rectangle by adding dx to its left and right,
+ and adding dy to its top and bottom.
+ */
+ void offset(int32_t dx, int32_t dy) {
+ fLeft += dx;
+ fTop += dy;
+ fRight += dx;
+ fBottom += dy;
+ }
+
+ void offset(const SkIPoint& delta) {
+ this->offset(delta.fX, delta.fY);
+ }
+
+ /**
+ * Offset this rect such its new x() and y() will equal newX and newY.
+ */
+ void offsetTo(int32_t newX, int32_t newY) {
+ fRight += newX - fLeft;
+ fBottom += newY - fTop;
+ fLeft = newX;
+ fTop = newY;
+ }
+
+ /** Inset the rectangle by (dx,dy). If dx is positive, then the sides are moved inwards,
+ making the rectangle narrower. If dx is negative, then the sides are moved outwards,
+ making the rectangle wider. The same holds true for dy and the top and bottom.
+ */
+ void inset(int32_t dx, int32_t dy) {
+ fLeft += dx;
+ fTop += dy;
+ fRight -= dx;
+ fBottom -= dy;
+ }
+
+ /** Outset the rectangle by (dx,dy). If dx is positive, then the sides are
+ moved outwards, making the rectangle wider. If dx is negative, then the
+ sides are moved inwards, making the rectangle narrower. The same holds
+ true for dy and the top and bottom.
+ */
+ void outset(int32_t dx, int32_t dy) { this->inset(-dx, -dy); }
+
+ bool quickReject(int l, int t, int r, int b) const {
+ return l >= fRight || fLeft >= r || t >= fBottom || fTop >= b;
+ }
+
+ /** Returns true if (x,y) is inside the rectangle and the rectangle is not
+ empty. The left and top are considered to be inside, while the right
+ and bottom are not. Thus for the rectangle (0, 0, 5, 10), the
+ points (0,0) and (0,9) are inside, while (-1,0) and (5,9) are not.
+ */
+ bool contains(int32_t x, int32_t y) const {
+ return (unsigned)(x - fLeft) < (unsigned)(fRight - fLeft) &&
+ (unsigned)(y - fTop) < (unsigned)(fBottom - fTop);
+ }
+
+ /** Returns true if the 4 specified sides of a rectangle are inside or equal to this rectangle.
+ If either rectangle is empty, contains() returns false.
+ */
+ bool contains(int32_t left, int32_t top, int32_t right, int32_t bottom) const {
+ return left < right && top < bottom && !this->isEmpty() && // check for empties
+ fLeft <= left && fTop <= top &&
+ fRight >= right && fBottom >= bottom;
+ }
+
+ /** Returns true if the specified rectangle r is inside or equal to this rectangle.
+ */
+ bool contains(const SkIRect& r) const {
+ return !r.isEmpty() && !this->isEmpty() && // check for empties
+ fLeft <= r.fLeft && fTop <= r.fTop &&
+ fRight >= r.fRight && fBottom >= r.fBottom;
+ }
+
+ /** Returns true if the specified rectangle r is inside or equal to this rectangle.
+ */
+ bool contains(const SkRect& r) const;
+
+ /** Return true if this rectangle contains the specified rectangle.
+ For speed, this method does not check if either this or the specified
+ rectangles are empty, and if either is, its return value is undefined.
+ In the debugging build however, we assert that both this and the
+ specified rectangles are non-empty.
+ */
+ bool containsNoEmptyCheck(int32_t left, int32_t top,
+ int32_t right, int32_t bottom) const {
+ SkASSERT(fLeft < fRight && fTop < fBottom);
+ SkASSERT(left < right && top < bottom);
+
+ return fLeft <= left && fTop <= top &&
+ fRight >= right && fBottom >= bottom;
+ }
+
+ bool containsNoEmptyCheck(const SkIRect& r) const {
+ return containsNoEmptyCheck(r.fLeft, r.fTop, r.fRight, r.fBottom);
+ }
+
+ /** If r intersects this rectangle, return true and set this rectangle to that
+ intersection, otherwise return false and do not change this rectangle.
+ If either rectangle is empty, do nothing and return false.
+ */
+ bool SK_WARN_UNUSED_RESULT intersect(const SkIRect& r) {
+ return this->intersect(r.fLeft, r.fTop, r.fRight, r.fBottom);
+ }
+
+ /** If rectangles a and b intersect, return true and set this rectangle to
+ that intersection, otherwise return false and do not change this
+ rectangle. If either rectangle is empty, do nothing and return false.
+ */
+ bool SK_WARN_UNUSED_RESULT intersect(const SkIRect& a, const SkIRect& b) {
+
+ if (!a.isEmpty() && !b.isEmpty() &&
+ a.fLeft < b.fRight && b.fLeft < a.fRight &&
+ a.fTop < b.fBottom && b.fTop < a.fBottom) {
+ fLeft = SkMax32(a.fLeft, b.fLeft);
+ fTop = SkMax32(a.fTop, b.fTop);
+ fRight = SkMin32(a.fRight, b.fRight);
+ fBottom = SkMin32(a.fBottom, b.fBottom);
+ return true;
+ }
+ return false;
+ }
+
+ /** If rectangles a and b intersect, return true and set this rectangle to
+ that intersection, otherwise return false and do not change this
+ rectangle. For speed, no check to see if a or b are empty is performed.
+ If either is, then the return result is undefined. In the debug build,
+ we assert that both rectangles are non-empty.
+ */
+ bool SK_WARN_UNUSED_RESULT intersectNoEmptyCheck(const SkIRect& a, const SkIRect& b) {
+ SkASSERT(!a.isEmpty() && !b.isEmpty());
+
+ if (a.fLeft < b.fRight && b.fLeft < a.fRight &&
+ a.fTop < b.fBottom && b.fTop < a.fBottom) {
+ fLeft = SkMax32(a.fLeft, b.fLeft);
+ fTop = SkMax32(a.fTop, b.fTop);
+ fRight = SkMin32(a.fRight, b.fRight);
+ fBottom = SkMin32(a.fBottom, b.fBottom);
+ return true;
+ }
+ return false;
+ }
+
+ /** If the rectangle specified by left,top,right,bottom intersects this rectangle,
+ return true and set this rectangle to that intersection,
+ otherwise return false and do not change this rectangle.
+ If either rectangle is empty, do nothing and return false.
+ */
+ bool SK_WARN_UNUSED_RESULT intersect(int32_t left, int32_t top,
+ int32_t right, int32_t bottom) {
+ if (left < right && top < bottom && !this->isEmpty() &&
+ fLeft < right && left < fRight && fTop < bottom && top < fBottom) {
+ if (fLeft < left) fLeft = left;
+ if (fTop < top) fTop = top;
+ if (fRight > right) fRight = right;
+ if (fBottom > bottom) fBottom = bottom;
+ return true;
+ }
+ return false;
+ }
+
+ /** Returns true if a and b are not empty, and they intersect
+ */
+ static bool Intersects(const SkIRect& a, const SkIRect& b) {
+ return !a.isEmpty() && !b.isEmpty() && // check for empties
+ a.fLeft < b.fRight && b.fLeft < a.fRight &&
+ a.fTop < b.fBottom && b.fTop < a.fBottom;
+ }
+
+ /**
+ * Returns true if a and b intersect. debug-asserts that neither are empty.
+ */
+ static bool IntersectsNoEmptyCheck(const SkIRect& a, const SkIRect& b) {
+ SkASSERT(!a.isEmpty());
+ SkASSERT(!b.isEmpty());
+ return a.fLeft < b.fRight && b.fLeft < a.fRight &&
+ a.fTop < b.fBottom && b.fTop < a.fBottom;
+ }
+
+ /** Update this rectangle to enclose itself and the specified rectangle.
+ If this rectangle is empty, just set it to the specified rectangle. If the specified
+ rectangle is empty, do nothing.
+ */
+ void join(int32_t left, int32_t top, int32_t right, int32_t bottom);
+
+ /** Update this rectangle to enclose itself and the specified rectangle.
+ If this rectangle is empty, just set it to the specified rectangle. If the specified
+ rectangle is empty, do nothing.
+ */
+ void join(const SkIRect& r) {
+ this->join(r.fLeft, r.fTop, r.fRight, r.fBottom);
+ }
+
+ /** Swap top/bottom or left/right if there are flipped.
+ This can be called if the edges are computed separately,
+ and may have crossed over each other.
+ When this returns, left <= right && top <= bottom
+ */
+ void sort();
+
+ static const SkIRect& SK_WARN_UNUSED_RESULT EmptyIRect() {
+ static const SkIRect gEmpty = { 0, 0, 0, 0 };
+ return gEmpty;
+ }
+};
+
+/** \struct SkRect
+*/
+struct SK_API SkRect {
+ SkScalar fLeft, fTop, fRight, fBottom;
+
+ static constexpr SkRect SK_WARN_UNUSED_RESULT MakeEmpty() {
+ return SkRect{0, 0, 0, 0};
+ }
+
+ static SkRect SK_WARN_UNUSED_RESULT MakeLargest() {
+ SkRect r;
+ r.setLargest();
+ return r;
+ }
+
+ static SkRect SK_WARN_UNUSED_RESULT MakeWH(SkScalar w, SkScalar h) {
+ SkRect r;
+ r.set(0, 0, w, h);
+ return r;
+ }
+
+ static SkRect SK_WARN_UNUSED_RESULT MakeIWH(int w, int h) {
+ SkRect r;
+ r.set(0, 0, SkIntToScalar(w), SkIntToScalar(h));
+ return r;
+ }
+
+ static SkRect SK_WARN_UNUSED_RESULT MakeSize(const SkSize& size) {
+ SkRect r;
+ r.set(0, 0, size.width(), size.height());
+ return r;
+ }
+
+ static constexpr SkRect SK_WARN_UNUSED_RESULT MakeLTRB(SkScalar l, SkScalar t, SkScalar r,
+ SkScalar b) {
+ return SkRect {l, t, r, b};
+ }
+
+ static SkRect SK_WARN_UNUSED_RESULT MakeXYWH(SkScalar x, SkScalar y, SkScalar w, SkScalar h) {
+ SkRect r;
+ r.set(x, y, x + w, y + h);
+ return r;
+ }
+
+ SK_ATTR_DEPRECATED("use Make()")
+ static SkRect SK_WARN_UNUSED_RESULT MakeFromIRect(const SkIRect& irect) {
+ SkRect r;
+ r.set(SkIntToScalar(irect.fLeft),
+ SkIntToScalar(irect.fTop),
+ SkIntToScalar(irect.fRight),
+ SkIntToScalar(irect.fBottom));
+ return r;
+ }
+
+ static SkRect Make(const SkISize& size) {
+ return MakeIWH(size.width(), size.height());
+ }
+
+ static SkRect SK_WARN_UNUSED_RESULT Make(const SkIRect& irect) {
+ SkRect r;
+ r.set(SkIntToScalar(irect.fLeft),
+ SkIntToScalar(irect.fTop),
+ SkIntToScalar(irect.fRight),
+ SkIntToScalar(irect.fBottom));
+ return r;
+ }
+
+ /**
+ * Return true if the rectangle's width or height are <= 0
+ */
+ bool isEmpty() const { return fLeft >= fRight || fTop >= fBottom; }
+
+ bool isLargest() const { return SK_ScalarMin == fLeft &&
+ SK_ScalarMin == fTop &&
+ SK_ScalarMax == fRight &&
+ SK_ScalarMax == fBottom; }
+
+ /**
+ * Returns true iff all values in the rect are finite. If any are
+ * infinite or NaN then this returns false.
+ */
+ bool isFinite() const {
+ float accum = 0;
+ accum *= fLeft;
+ accum *= fTop;
+ accum *= fRight;
+ accum *= fBottom;
+
+ // accum is either NaN or it is finite (zero).
+ SkASSERT(0 == accum || SkScalarIsNaN(accum));
+
+ // value==value will be true iff value is not NaN
+ // TODO: is it faster to say !accum or accum==accum?
+ return !SkScalarIsNaN(accum);
+ }
+
+ SkScalar x() const { return fLeft; }
+ SkScalar y() const { return fTop; }
+ SkScalar left() const { return fLeft; }
+ SkScalar top() const { return fTop; }
+ SkScalar right() const { return fRight; }
+ SkScalar bottom() const { return fBottom; }
+ SkScalar width() const { return fRight - fLeft; }
+ SkScalar height() const { return fBottom - fTop; }
+ SkScalar centerX() const { return SkScalarHalf(fLeft + fRight); }
+ SkScalar centerY() const { return SkScalarHalf(fTop + fBottom); }
+
+ friend bool operator==(const SkRect& a, const SkRect& b) {
+ return SkScalarsEqual((SkScalar*)&a, (SkScalar*)&b, 4);
+ }
+
+ friend bool operator!=(const SkRect& a, const SkRect& b) {
+ return !SkScalarsEqual((SkScalar*)&a, (SkScalar*)&b, 4);
+ }
+
+ /** return the 4 points that enclose the rectangle (top-left, top-right, bottom-right,
+ bottom-left). TODO: Consider adding param to control whether quad is CW or CCW.
+ */
+ void toQuad(SkPoint quad[4]) const;
+
+ /** Set this rectangle to the empty rectangle (0,0,0,0)
+ */
+ void setEmpty() { *this = MakeEmpty(); }
+
+ void set(const SkIRect& src) {
+ fLeft = SkIntToScalar(src.fLeft);
+ fTop = SkIntToScalar(src.fTop);
+ fRight = SkIntToScalar(src.fRight);
+ fBottom = SkIntToScalar(src.fBottom);
+ }
+
+ void set(SkScalar left, SkScalar top, SkScalar right, SkScalar bottom) {
+ fLeft = left;
+ fTop = top;
+ fRight = right;
+ fBottom = bottom;
+ }
+ // alias for set(l, t, r, b)
+ void setLTRB(SkScalar left, SkScalar top, SkScalar right, SkScalar bottom) {
+ this->set(left, top, right, bottom);
+ }
+
+ /** Initialize the rect with the 4 specified integers. The routine handles
+ converting them to scalars (by calling SkIntToScalar)
+ */
+ void iset(int left, int top, int right, int bottom) {
+ fLeft = SkIntToScalar(left);
+ fTop = SkIntToScalar(top);
+ fRight = SkIntToScalar(right);
+ fBottom = SkIntToScalar(bottom);
+ }
+
+ /**
+ * Set this rectangle to be left/top at 0,0, and have the specified width
+ * and height (automatically converted to SkScalar).
+ */
+ void isetWH(int width, int height) {
+ fLeft = fTop = 0;
+ fRight = SkIntToScalar(width);
+ fBottom = SkIntToScalar(height);
+ }
+
+ /** Set this rectangle to be the bounds of the array of points.
+ If the array is empty (count == 0), then set this rectangle
+ to the empty rectangle (0,0,0,0)
+ */
+ void set(const SkPoint pts[], int count) {
+ // set() had been checking for non-finite values, so keep that behavior
+ // for now. Now that we have setBoundsCheck(), we may decide to make
+ // set() be simpler/faster, and not check for those.
+ (void)this->setBoundsCheck(pts, count);
+ }
+
+ // alias for set(pts, count)
+ void setBounds(const SkPoint pts[], int count) {
+ (void)this->setBoundsCheck(pts, count);
+ }
+
+ /**
+ * Compute the bounds of the array of points, and set this rect to that
+ * bounds and return true... unless a non-finite value is encountered,
+ * in which case this rect is set to empty and false is returned.
+ */
+ bool setBoundsCheck(const SkPoint pts[], int count);
+
+ void set(const SkPoint& p0, const SkPoint& p1) {
+ fLeft = SkMinScalar(p0.fX, p1.fX);
+ fRight = SkMaxScalar(p0.fX, p1.fX);
+ fTop = SkMinScalar(p0.fY, p1.fY);
+ fBottom = SkMaxScalar(p0.fY, p1.fY);
+ }
+
+ void setXYWH(SkScalar x, SkScalar y, SkScalar width, SkScalar height) {
+ fLeft = x;
+ fTop = y;
+ fRight = x + width;
+ fBottom = y + height;
+ }
+
+ void setWH(SkScalar width, SkScalar height) {
+ fLeft = 0;
+ fTop = 0;
+ fRight = width;
+ fBottom = height;
+ }
+
+ /**
+ * Make the largest representable rectangle
+ */
+ void setLargest() {
+ fLeft = fTop = SK_ScalarMin;
+ fRight = fBottom = SK_ScalarMax;
+ }
+
+ /**
+ * Make the largest representable rectangle, but inverted (e.g. fLeft will
+ * be max and right will be min).
+ */
+ void setLargestInverted() {
+ fLeft = fTop = SK_ScalarMax;
+ fRight = fBottom = SK_ScalarMin;
+ }
+
+ /**
+ * Return a new Rect, built as an offset of this rect.
+ */
+ SkRect makeOffset(SkScalar dx, SkScalar dy) const {
+ return MakeLTRB(fLeft + dx, fTop + dy, fRight + dx, fBottom + dy);
+ }
+
+ /**
+ * Return a new Rect, built as an inset of this rect.
+ */
+ SkRect makeInset(SkScalar dx, SkScalar dy) const {
+ return MakeLTRB(fLeft + dx, fTop + dy, fRight - dx, fBottom - dy);
+ }
+
+ /**
+ * Return a new Rect, built as an outset of this rect.
+ */
+ SkRect makeOutset(SkScalar dx, SkScalar dy) const {
+ return MakeLTRB(fLeft - dx, fTop - dy, fRight + dx, fBottom + dy);
+ }
+
+ /** Offset set the rectangle by adding dx to its left and right,
+ and adding dy to its top and bottom.
+ */
+ void offset(SkScalar dx, SkScalar dy) {
+ fLeft += dx;
+ fTop += dy;
+ fRight += dx;
+ fBottom += dy;
+ }
+
+ void offset(const SkPoint& delta) {
+ this->offset(delta.fX, delta.fY);
+ }
+
+ /**
+ * Offset this rect such its new x() and y() will equal newX and newY.
+ */
+ void offsetTo(SkScalar newX, SkScalar newY) {
+ fRight += newX - fLeft;
+ fBottom += newY - fTop;
+ fLeft = newX;
+ fTop = newY;
+ }
+
+ /** Inset the rectangle by (dx,dy). If dx is positive, then the sides are
+ moved inwards, making the rectangle narrower. If dx is negative, then
+ the sides are moved outwards, making the rectangle wider. The same holds
+ true for dy and the top and bottom.
+ */
+ void inset(SkScalar dx, SkScalar dy) {
+ fLeft += dx;
+ fTop += dy;
+ fRight -= dx;
+ fBottom -= dy;
+ }
+
+ /** Outset the rectangle by (dx,dy). If dx is positive, then the sides are
+ moved outwards, making the rectangle wider. If dx is negative, then the
+ sides are moved inwards, making the rectangle narrower. The same holds
+ true for dy and the top and bottom.
+ */
+ void outset(SkScalar dx, SkScalar dy) { this->inset(-dx, -dy); }
+
+ /** If this rectangle intersects r, return true and set this rectangle to that
+ intersection, otherwise return false and do not change this rectangle.
+ If either rectangle is empty, do nothing and return false.
+ */
+ bool SK_WARN_UNUSED_RESULT intersect(const SkRect& r);
+
+ /** If this rectangle intersects the rectangle specified by left, top, right, bottom,
+ return true and set this rectangle to that intersection, otherwise return false
+ and do not change this rectangle.
+ If either rectangle is empty, do nothing and return false.
+ */
+ bool SK_WARN_UNUSED_RESULT intersect(SkScalar left, SkScalar top,
+ SkScalar right, SkScalar bottom);
+
+ /**
+ * If rectangles a and b intersect, return true and set this rectangle to
+ * that intersection, otherwise return false and do not change this
+ * rectangle. If either rectangle is empty, do nothing and return false.
+ */
+ bool SK_WARN_UNUSED_RESULT intersect(const SkRect& a, const SkRect& b);
+
+
+private:
+ static bool Intersects(SkScalar al, SkScalar at, SkScalar ar, SkScalar ab,
+ SkScalar bl, SkScalar bt, SkScalar br, SkScalar bb) {
+ SkScalar L = SkMaxScalar(al, bl);
+ SkScalar R = SkMinScalar(ar, br);
+ SkScalar T = SkMaxScalar(at, bt);
+ SkScalar B = SkMinScalar(ab, bb);
+ return L < R && T < B;
+ }
+
+public:
+ /**
+ * Return true if this rectangle is not empty, and the specified sides of
+ * a rectangle are not empty, and they intersect.
+ */
+ bool intersects(SkScalar left, SkScalar top, SkScalar right, SkScalar bottom) const {
+ return Intersects(fLeft, fTop, fRight, fBottom, left, top, right, bottom);
+ }
+
+ bool intersects(const SkRect& r) const {
+ return Intersects(fLeft, fTop, fRight, fBottom,
+ r.fLeft, r.fTop, r.fRight, r.fBottom);
+ }
+
+ /**
+ * Return true if rectangles a and b are not empty and intersect.
+ */
+ static bool Intersects(const SkRect& a, const SkRect& b) {
+ return Intersects(a.fLeft, a.fTop, a.fRight, a.fBottom,
+ b.fLeft, b.fTop, b.fRight, b.fBottom);
+ }
+
+ /**
+ * Update this rectangle to enclose itself and the specified rectangle.
+ * If this rectangle is empty, just set it to the specified rectangle.
+ * If the specified rectangle is empty, do nothing.
+ */
+ void join(SkScalar left, SkScalar top, SkScalar right, SkScalar bottom);
+
+ /** Update this rectangle to enclose itself and the specified rectangle.
+ If this rectangle is empty, just set it to the specified rectangle. If the specified
+ rectangle is empty, do nothing.
+ */
+ void join(const SkRect& r) {
+ this->join(r.fLeft, r.fTop, r.fRight, r.fBottom);
+ }
+
+ void joinNonEmptyArg(const SkRect& r) {
+ SkASSERT(!r.isEmpty());
+ // if we are empty, just assign
+ if (fLeft >= fRight || fTop >= fBottom) {
+ *this = r;
+ } else {
+ this->joinPossiblyEmptyRect(r);
+ }
+ }
+
+ /**
+ * Joins the rectangle with another without checking if either are empty (may produce unexpected
+ * results if either rect is inverted).
+ */
+ void joinPossiblyEmptyRect(const SkRect& r) {
+ fLeft = SkMinScalar(fLeft, r.left());
+ fTop = SkMinScalar(fTop, r.top());
+ fRight = SkMaxScalar(fRight, r.right());
+ fBottom = SkMaxScalar(fBottom, r.bottom());
+ }
+
+ /**
+ * Grow the rect to include the specified (x,y). After this call, the
+ * following will be true: fLeft <= x <= fRight && fTop <= y <= fBottom.
+ *
+ * This is close, but not quite the same contract as contains(), since
+ * contains() treats the left and top different from the right and bottom.
+ * contains(x,y) -> fLeft <= x < fRight && fTop <= y < fBottom. Also note
+ * that contains(x,y) always returns false if the rect is empty.
+ */
+ void growToInclude(SkScalar x, SkScalar y) {
+ fLeft = SkMinScalar(x, fLeft);
+ fRight = SkMaxScalar(x, fRight);
+ fTop = SkMinScalar(y, fTop);
+ fBottom = SkMaxScalar(y, fBottom);
+ }
+
+ /** Bulk version of growToInclude */
+ void growToInclude(const SkPoint pts[], int count) {
+ this->growToInclude(pts, sizeof(SkPoint), count);
+ }
+
+ /** Bulk version of growToInclude with stride. */
+ void growToInclude(const SkPoint pts[], size_t stride, int count) {
+ SkASSERT(count >= 0);
+ SkASSERT(stride >= sizeof(SkPoint));
+ const SkPoint* end = (const SkPoint*)((intptr_t)pts + count * stride);
+ for (; pts < end; pts = (const SkPoint*)((intptr_t)pts + stride)) {
+ this->growToInclude(pts->fX, pts->fY);
+ }
+ }
+
+ /**
+ * Return true if this rectangle contains r, and if both rectangles are
+ * not empty.
+ */
+ bool contains(const SkRect& r) const {
+ // todo: can we eliminate the this->isEmpty check?
+ return !r.isEmpty() && !this->isEmpty() &&
+ fLeft <= r.fLeft && fTop <= r.fTop &&
+ fRight >= r.fRight && fBottom >= r.fBottom;
+ }
+
+ /**
+ * Returns true if the specified rectangle r is inside or equal to this rectangle.
+ */
+ bool contains(const SkIRect& r) const {
+ // todo: can we eliminate the this->isEmpty check?
+ return !r.isEmpty() && !this->isEmpty() &&
+ fLeft <= SkIntToScalar(r.fLeft) && fTop <= SkIntToScalar(r.fTop) &&
+ fRight >= SkIntToScalar(r.fRight) && fBottom >= SkIntToScalar(r.fBottom);
+ }
+
+ /**
+ * Set the dst rectangle by rounding this rectangle's coordinates to their
+ * nearest integer values using SkScalarRoundToInt.
+ */
+ void round(SkIRect* dst) const {
+ SkASSERT(dst);
+ dst->set(SkScalarRoundToInt(fLeft), SkScalarRoundToInt(fTop),
+ SkScalarRoundToInt(fRight), SkScalarRoundToInt(fBottom));
+ }
+
+ /**
+ * Set the dst rectangle by rounding "out" this rectangle, choosing the
+ * SkScalarFloor of top and left, and the SkScalarCeil of right and bottom.
+ */
+ void roundOut(SkIRect* dst) const {
+ SkASSERT(dst);
+ dst->set(SkScalarFloorToInt(fLeft), SkScalarFloorToInt(fTop),
+ SkScalarCeilToInt(fRight), SkScalarCeilToInt(fBottom));
+ }
+
+ /**
+ * Set the dst rectangle by rounding "out" this rectangle, choosing the
+ * SkScalarFloorToScalar of top and left, and the SkScalarCeilToScalar of right and bottom.
+ *
+ * It is safe for this == dst
+ */
+ void roundOut(SkRect* dst) const {
+ dst->set(SkScalarFloorToScalar(fLeft),
+ SkScalarFloorToScalar(fTop),
+ SkScalarCeilToScalar(fRight),
+ SkScalarCeilToScalar(fBottom));
+ }
+
+ /**
+ * Set the dst rectangle by rounding "in" this rectangle, choosing the
+ * ceil of top and left, and the floor of right and bottom. This does *not*
+ * call sort(), so it is possible that the resulting rect is inverted...
+ * e.g. left >= right or top >= bottom. Call isEmpty() to detect that.
+ */
+ void roundIn(SkIRect* dst) const {
+ SkASSERT(dst);
+ dst->set(SkScalarCeilToInt(fLeft), SkScalarCeilToInt(fTop),
+ SkScalarFloorToInt(fRight), SkScalarFloorToInt(fBottom));
+ }
+
+ //! Returns the result of calling round(&dst)
+ SkIRect round() const {
+ SkIRect ir;
+ this->round(&ir);
+ return ir;
+ }
+
+ //! Returns the result of calling roundOut(&dst)
+ SkIRect roundOut() const {
+ SkIRect ir;
+ this->roundOut(&ir);
+ return ir;
+ }
+
+ /**
+ * Swap top/bottom or left/right if there are flipped (i.e. if width()
+ * or height() would have returned a negative value.) This should be called
+ * if the edges are computed separately, and may have crossed over each
+ * other. When this returns, left <= right && top <= bottom
+ */
+ void sort() {
+ if (fLeft > fRight) {
+ SkTSwap<SkScalar>(fLeft, fRight);
+ }
+
+ if (fTop > fBottom) {
+ SkTSwap<SkScalar>(fTop, fBottom);
+ }
+ }
+
+ /**
+ * cast-safe way to treat the rect as an array of (4) SkScalars.
+ */
+ const SkScalar* asScalars() const { return &fLeft; }
+
+ void dump(bool asHex) const;
+ void dump() const { this->dump(false); }
+ void dumpHex() const { this->dump(true); }
+};
+
+inline bool SkIRect::contains(const SkRect& r) const {
+ return !r.isEmpty() && !this->isEmpty() && // check for empties
+ (SkScalar)fLeft <= r.fLeft && (SkScalar)fTop <= r.fTop &&
+ (SkScalar)fRight >= r.fRight && (SkScalar)fBottom >= r.fBottom;
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkRefCnt.h b/gfx/skia/skia/include/core/SkRefCnt.h
new file mode 100644
index 000000000..7e39125b7
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkRefCnt.h
@@ -0,0 +1,466 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRefCnt_DEFINED
+#define SkRefCnt_DEFINED
+
+#include "../private/SkTLogic.h"
+#include "SkTypes.h"
+#include <atomic>
+#include <functional>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#define SK_SUPPORT_TRANSITION_TO_SP_INTERFACES
+
+/** \class SkRefCntBase
+
+ SkRefCntBase is the base class for objects that may be shared by multiple
+ objects. When an existing owner wants to share a reference, it calls ref().
+ When an owner wants to release its reference, it calls unref(). When the
+ shared object's reference count goes to zero as the result of an unref()
+ call, its (virtual) destructor is called. It is an error for the
+ destructor to be called explicitly (or via the object going out of scope on
+ the stack or calling delete) if getRefCnt() > 1.
+*/
+class SK_API SkRefCntBase : SkNoncopyable {
+public:
+ /** Default construct, initializing the reference count to 1.
+ */
+ SkRefCntBase() : fRefCnt(1) {}
+
+ /** Destruct, asserting that the reference count is 1.
+ */
+ virtual ~SkRefCntBase() {
+#ifdef SK_DEBUG
+ SkASSERTF(getRefCnt() == 1, "fRefCnt was %d", getRefCnt());
+ // illegal value, to catch us if we reuse after delete
+ fRefCnt.store(0, std::memory_order_relaxed);
+#endif
+ }
+
+ /** Return the reference count. Use only for debugging. */
+ int32_t getRefCnt() const {
+ return fRefCnt.load(std::memory_order_relaxed);
+ }
+
+#ifdef SK_DEBUG
+ void validate() const {
+ SkASSERT(getRefCnt() > 0);
+ }
+#endif
+
+ /** May return true if the caller is the only owner.
+ * Ensures that all previous owner's actions are complete.
+ */
+ bool unique() const {
+ if (1 == fRefCnt.load(std::memory_order_acquire)) {
+ // The acquire barrier is only really needed if we return true. It
+ // prevents code conditioned on the result of unique() from running
+ // until previous owners are all totally done calling unref().
+ return true;
+ }
+ return false;
+ }
+
+ /** Increment the reference count. Must be balanced by a call to unref().
+ */
+ void ref() const {
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ // Android employs some special subclasses that enable the fRefCnt to
+ // go to zero, but not below, prior to reusing the object. This breaks
+ // the use of unique() on such objects and as such should be removed
+ // once the Android code is fixed.
+ SkASSERT(getRefCnt() >= 0);
+#else
+ SkASSERT(getRefCnt() > 0);
+#endif
+ // No barrier required.
+ (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
+ }
+
+ /** Decrement the reference count. If the reference count is 1 before the
+ decrement, then delete the object. Note that if this is the case, then
+ the object needs to have been allocated via new, and not on the stack.
+ */
+ void unref() const {
+ SkASSERT(getRefCnt() > 0);
+ // A release here acts in place of all releases we "should" have been doing in ref().
+ if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
+ // Like unique(), the acquire is only needed on success, to make sure
+ // code in internal_dispose() doesn't happen before the decrement.
+ this->internal_dispose();
+ }
+ }
+
+protected:
+ /**
+ * Allow subclasses to call this if they've overridden internal_dispose
+ * so they can reset fRefCnt before the destructor is called or if they
+ * choose not to call the destructor (e.g. using a free list).
+ */
+ void internal_dispose_restore_refcnt_to_1() const {
+ SkASSERT(0 == getRefCnt());
+ fRefCnt.store(1, std::memory_order_relaxed);
+ }
+
+private:
+ /**
+ * Called when the ref count goes to 0.
+ */
+ virtual void internal_dispose() const {
+ this->internal_dispose_restore_refcnt_to_1();
+ delete this;
+ }
+
+ // The following friends are those which override internal_dispose()
+ // and conditionally call SkRefCnt::internal_dispose().
+ friend class SkWeakRefCnt;
+
+ mutable std::atomic<int32_t> fRefCnt;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+#ifdef SK_REF_CNT_MIXIN_INCLUDE
+// It is the responsibility of the following include to define the type SkRefCnt.
+// This SkRefCnt should normally derive from SkRefCntBase.
+#include SK_REF_CNT_MIXIN_INCLUDE
+#else
+class SK_API SkRefCnt : public SkRefCntBase {
+ // "#include SK_REF_CNT_MIXIN_INCLUDE" doesn't work with this build system.
+ #if defined(GOOGLE3)
+ public:
+ void deref() const { this->unref(); }
+ #endif
+};
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** Helper macro to safely assign one SkRefCnt[TS]* to another, checking for
+ null in on each side of the assignment, and ensuring that ref() is called
+ before unref(), in case the two pointers point to the same object.
+ */
+#define SkRefCnt_SafeAssign(dst, src) \
+ do { \
+ if (src) src->ref(); \
+ if (dst) dst->unref(); \
+ dst = src; \
+ } while (0)
+
+
+/** Call obj->ref() and return obj. The obj must not be nullptr.
+ */
+template <typename T> static inline T* SkRef(T* obj) {
+ SkASSERT(obj);
+ obj->ref();
+ return obj;
+}
+
+/** Check if the argument is non-null, and if so, call obj->ref() and return obj.
+ */
+template <typename T> static inline T* SkSafeRef(T* obj) {
+ if (obj) {
+ obj->ref();
+ }
+ return obj;
+}
+
+/** Check if the argument is non-null, and if so, call obj->unref()
+ */
+template <typename T> static inline void SkSafeUnref(T* obj) {
+ if (obj) {
+ obj->unref();
+ }
+}
+
+template<typename T> static inline void SkSafeSetNull(T*& obj) {
+ if (obj) {
+ obj->unref();
+ obj = nullptr;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+template <typename T> struct SkTUnref {
+ void operator()(T* t) { t->unref(); }
+};
+
+/**
+ * Utility class that simply unref's its argument in the destructor.
+ */
+template <typename T> class SkAutoTUnref : public std::unique_ptr<T, SkTUnref<T>> {
+public:
+ explicit SkAutoTUnref(T* obj = nullptr) : std::unique_ptr<T, SkTUnref<T>>(obj) {}
+
+ operator T*() const { return this->get(); }
+
+#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+ // Need to update graphics/Shader.cpp.
+ T* detach() { return this->release(); }
+#endif
+};
+// Can't use the #define trick below to guard a bare SkAutoTUnref(...) because it's templated. :(
+
+class SkAutoUnref : public SkAutoTUnref<SkRefCnt> {
+public:
+ SkAutoUnref(SkRefCnt* obj) : SkAutoTUnref<SkRefCnt>(obj) {}
+};
+#define SkAutoUnref(...) SK_REQUIRE_LOCAL_VAR(SkAutoUnref)
+
+// This is a variant of SkRefCnt that's Not Virtual, so weighs 4 bytes instead of 8 or 16.
+// There's only benefit to using this if the deriving class does not otherwise need a vtable.
+template <typename Derived>
+class SkNVRefCnt : SkNoncopyable {
+public:
+ SkNVRefCnt() : fRefCnt(1) {}
+ ~SkNVRefCnt() { SkASSERTF(1 == getRefCnt(), "NVRefCnt was %d", getRefCnt()); }
+
+ // Implementation is pretty much the same as SkRefCntBase. All required barriers are the same:
+ // - unique() needs acquire when it returns true, and no barrier if it returns false;
+ // - ref() doesn't need any barrier;
+ // - unref() needs a release barrier, and an acquire if it's going to call delete.
+
+ bool unique() const { return 1 == fRefCnt.load(std::memory_order_acquire); }
+ void ref() const { (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed); }
+ void unref() const {
+ if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
+ // restore the 1 for our destructor's assert
+ SkDEBUGCODE(fRefCnt.store(1, std::memory_order_relaxed));
+ delete (const Derived*)this;
+ }
+ }
+ void deref() const { this->unref(); }
+
+private:
+ mutable std::atomic<int32_t> fRefCnt;
+ int32_t getRefCnt() const {
+ return fRefCnt.load(std::memory_order_relaxed);
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Shared pointer class to wrap classes that support a ref()/unref() interface.
+ *
+ * This can be used for classes inheriting from SkRefCnt, but it also works for other
+ * classes that match the interface, but have different internal choices: e.g. the hosted class
+ * may have its ref/unref be thread-safe, but that is not assumed/imposed by sk_sp.
+ */
+template <typename T> class sk_sp {
+ /** Supports safe bool idiom. Obsolete with explicit operator bool. */
+ using unspecified_bool_type = T* sk_sp::*;
+public:
+ using element_type = T;
+
+ constexpr sk_sp() : fPtr(nullptr) {}
+ constexpr sk_sp(std::nullptr_t) : fPtr(nullptr) {}
+
+ /**
+ * Shares the underlying object by calling ref(), so that both the argument and the newly
+ * created sk_sp both have a reference to it.
+ */
+ sk_sp(const sk_sp<T>& that) : fPtr(SkSafeRef(that.get())) {}
+ template <typename U, typename = skstd::enable_if_t<std::is_convertible<U*, T*>::value>>
+ sk_sp(const sk_sp<U>& that) : fPtr(SkSafeRef(that.get())) {}
+
+ /**
+ * Move the underlying object from the argument to the newly created sk_sp. Afterwards only
+ * the new sk_sp will have a reference to the object, and the argument will point to null.
+ * No call to ref() or unref() will be made.
+ */
+ sk_sp(sk_sp<T>&& that) : fPtr(that.release()) {}
+ template <typename U, typename = skstd::enable_if_t<std::is_convertible<U*, T*>::value>>
+ sk_sp(sk_sp<U>&& that) : fPtr(that.release()) {}
+
+ /**
+ * Adopt the bare pointer into the newly created sk_sp.
+ * No call to ref() or unref() will be made.
+ */
+ explicit sk_sp(T* obj) : fPtr(obj) {}
+
+ /**
+ * Calls unref() on the underlying object pointer.
+ */
+ ~sk_sp() {
+ SkSafeUnref(fPtr);
+ SkDEBUGCODE(fPtr = nullptr);
+ }
+
+ sk_sp<T>& operator=(std::nullptr_t) { this->reset(); return *this; }
+
+ /**
+ * Shares the underlying object referenced by the argument by calling ref() on it. If this
+ * sk_sp previously had a reference to an object (i.e. not null) it will call unref() on that
+ * object.
+ */
+ sk_sp<T>& operator=(const sk_sp<T>& that) {
+ this->reset(SkSafeRef(that.get()));
+ return *this;
+ }
+ template <typename U, typename = skstd::enable_if_t<std::is_convertible<U*, T*>::value>>
+ sk_sp<T>& operator=(const sk_sp<U>& that) {
+ this->reset(SkSafeRef(that.get()));
+ return *this;
+ }
+
+ /**
+ * Move the underlying object from the argument to the sk_sp. If the sk_sp previously held
+ * a reference to another object, unref() will be called on that object. No call to ref()
+ * will be made.
+ */
+ sk_sp<T>& operator=(sk_sp<T>&& that) {
+ this->reset(that.release());
+ return *this;
+ }
+ template <typename U, typename = skstd::enable_if_t<std::is_convertible<U*, T*>::value>>
+ sk_sp<T>& operator=(sk_sp<U>&& that) {
+ this->reset(that.release());
+ return *this;
+ }
+
+ T& operator*() const {
+ SkASSERT(this->get() != nullptr);
+ return *this->get();
+ }
+
+ // MSVC 2013 does not work correctly with explicit operator bool.
+ // https://chromium-cpp.appspot.com/#core-blacklist
+ // When explicit operator bool can be used, remove operator! and operator unspecified_bool_type.
+ //explicit operator bool() const { return this->get() != nullptr; }
+ operator unspecified_bool_type() const { return this->get() ? &sk_sp::fPtr : nullptr; }
+ bool operator!() const { return this->get() == nullptr; }
+
+ T* get() const { return fPtr; }
+ T* operator->() const { return fPtr; }
+
+ /**
+ * Adopt the new bare pointer, and call unref() on any previously held object (if not null).
+ * No call to ref() will be made.
+ */
+ void reset(T* ptr = nullptr) {
+ // Calling fPtr->unref() may call this->~() or this->reset(T*).
+ // http://wg21.cmeerw.net/lwg/issue998
+ // http://wg21.cmeerw.net/lwg/issue2262
+ T* oldPtr = fPtr;
+ fPtr = ptr;
+ SkSafeUnref(oldPtr);
+ }
+
+ /**
+ * Return the bare pointer, and set the internal object pointer to nullptr.
+ * The caller must assume ownership of the object, and manage its reference count directly.
+ * No call to unref() will be made.
+ */
+ T* SK_WARN_UNUSED_RESULT release() {
+ T* ptr = fPtr;
+ fPtr = nullptr;
+ return ptr;
+ }
+
+ void swap(sk_sp<T>& that) /*noexcept*/ {
+ using std::swap;
+ swap(fPtr, that.fPtr);
+ }
+
+private:
+ T* fPtr;
+};
+
+template <typename T> inline void swap(sk_sp<T>& a, sk_sp<T>& b) /*noexcept*/ {
+ a.swap(b);
+}
+
+template <typename T, typename U> inline bool operator==(const sk_sp<T>& a, const sk_sp<U>& b) {
+ return a.get() == b.get();
+}
+template <typename T> inline bool operator==(const sk_sp<T>& a, std::nullptr_t) /*noexcept*/ {
+ return !a;
+}
+template <typename T> inline bool operator==(std::nullptr_t, const sk_sp<T>& b) /*noexcept*/ {
+ return !b;
+}
+
+template <typename T, typename U> inline bool operator!=(const sk_sp<T>& a, const sk_sp<U>& b) {
+ return a.get() != b.get();
+}
+template <typename T> inline bool operator!=(const sk_sp<T>& a, std::nullptr_t) /*noexcept*/ {
+ return static_cast<bool>(a);
+}
+template <typename T> inline bool operator!=(std::nullptr_t, const sk_sp<T>& b) /*noexcept*/ {
+ return static_cast<bool>(b);
+}
+
+template <typename T, typename U> inline bool operator<(const sk_sp<T>& a, const sk_sp<U>& b) {
+ // Provide defined total order on sk_sp.
+ // http://wg21.cmeerw.net/lwg/issue1297
+ // http://wg21.cmeerw.net/lwg/issue1401 .
+ return std::less<skstd::common_type_t<T*, U*>>()(a.get(), b.get());
+}
+template <typename T> inline bool operator<(const sk_sp<T>& a, std::nullptr_t) {
+ return std::less<T*>()(a.get(), nullptr);
+}
+template <typename T> inline bool operator<(std::nullptr_t, const sk_sp<T>& b) {
+ return std::less<T*>()(nullptr, b.get());
+}
+
+template <typename T, typename U> inline bool operator<=(const sk_sp<T>& a, const sk_sp<U>& b) {
+ return !(b < a);
+}
+template <typename T> inline bool operator<=(const sk_sp<T>& a, std::nullptr_t) {
+ return !(nullptr < a);
+}
+template <typename T> inline bool operator<=(std::nullptr_t, const sk_sp<T>& b) {
+ return !(b < nullptr);
+}
+
+template <typename T, typename U> inline bool operator>(const sk_sp<T>& a, const sk_sp<U>& b) {
+ return b < a;
+}
+template <typename T> inline bool operator>(const sk_sp<T>& a, std::nullptr_t) {
+ return nullptr < a;
+}
+template <typename T> inline bool operator>(std::nullptr_t, const sk_sp<T>& b) {
+ return b < nullptr;
+}
+
+template <typename T, typename U> inline bool operator>=(const sk_sp<T>& a, const sk_sp<U>& b) {
+ return !(a < b);
+}
+template <typename T> inline bool operator>=(const sk_sp<T>& a, std::nullptr_t) {
+ return !(a < nullptr);
+}
+template <typename T> inline bool operator>=(std::nullptr_t, const sk_sp<T>& b) {
+ return !(nullptr < b);
+}
+
+template <typename T, typename... Args>
+sk_sp<T> sk_make_sp(Args&&... args) {
+ return sk_sp<T>(new T(std::forward<Args>(args)...));
+}
+
+#ifdef SK_SUPPORT_TRANSITION_TO_SP_INTERFACES
+
+/*
+ * Returns a sk_sp wrapping the provided ptr AND calls ref on it (if not null).
+ *
+ * This is different than the semantics of the constructor for sk_sp, which just wraps the ptr,
+ * effectively "adopting" it.
+ *
+ * This function may be helpful while we convert callers from ptr-based to sk_sp-based parameters.
+ */
+template <typename T> sk_sp<T> sk_ref_sp(T* obj) {
+ return sk_sp<T>(SkSafeRef(obj));
+}
+
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkRegion.h b/gfx/skia/skia/include/core/SkRegion.h
new file mode 100644
index 000000000..a0f0e4ad3
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkRegion.h
@@ -0,0 +1,460 @@
+
+/*
+ * Copyright 2005 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkRegion_DEFINED
+#define SkRegion_DEFINED
+
+#include "SkRect.h"
+
+class SkPath;
+class SkRgnBuilder;
+
+namespace android {
+ class Region;
+}
+
+#define SkRegion_gEmptyRunHeadPtr ((SkRegion::RunHead*)-1)
+#define SkRegion_gRectRunHeadPtr 0
+
+/** \class SkRegion
+
+ The SkRegion class encapsulates the geometric region used to specify
+ clipping areas for drawing.
+*/
+class SK_API SkRegion {
+public:
+ typedef int32_t RunType;
+ enum {
+ kRunTypeSentinel = 0x7FFFFFFF
+ };
+
+ SkRegion();
+ SkRegion(const SkRegion&);
+ explicit SkRegion(const SkIRect&);
+ ~SkRegion();
+
+ SkRegion& operator=(const SkRegion&);
+
+ /**
+ * Return true if the two regions are equal. i.e. The enclose exactly
+ * the same area.
+ */
+ bool operator==(const SkRegion& other) const;
+
+ /**
+ * Return true if the two regions are not equal.
+ */
+ bool operator!=(const SkRegion& other) const {
+ return !(*this == other);
+ }
+
+ /**
+ * Replace this region with the specified region, and return true if the
+ * resulting region is non-empty.
+ */
+ bool set(const SkRegion& src) {
+ *this = src;
+ return !this->isEmpty();
+ }
+
+ /**
+ * Swap the contents of this and the specified region. This operation
+ * is gauarenteed to never fail.
+ */
+ void swap(SkRegion&);
+
+ /** Return true if this region is empty */
+ bool isEmpty() const { return fRunHead == SkRegion_gEmptyRunHeadPtr; }
+
+ /** Return true if this region is a single, non-empty rectangle */
+ bool isRect() const { return fRunHead == SkRegion_gRectRunHeadPtr; }
+
+ /** Return true if this region consists of more than 1 rectangular area */
+ bool isComplex() const { return !this->isEmpty() && !this->isRect(); }
+
+ /**
+ * Return the bounds of this region. If the region is empty, returns an
+ * empty rectangle.
+ */
+ const SkIRect& getBounds() const { return fBounds; }
+
+ /**
+ * Returns a value that grows approximately linearly with the number of
+ * intervals comprised in the region. Empty region will return 0, Rect
+ * will return 1, Complex will return a value > 1.
+ *
+ * Use this to compare two regions, where the larger count likely
+ * indicates a more complex region.
+ */
+ int computeRegionComplexity() const;
+
+ /**
+ * Returns true if the region is non-empty, and if so, appends the
+ * boundary(s) of the region to the specified path.
+ * If the region is empty, returns false, and path is left unmodified.
+ */
+ bool getBoundaryPath(SkPath* path) const;
+
+ /**
+ * Set the region to be empty, and return false, since the resulting
+ * region is empty
+ */
+ bool setEmpty();
+
+ /**
+ * If rect is non-empty, set this region to that rectangle and return true,
+ * otherwise set this region to empty and return false.
+ */
+ bool setRect(const SkIRect&);
+
+ /**
+ * If left < right and top < bottom, set this region to that rectangle and
+ * return true, otherwise set this region to empty and return false.
+ */
+ bool setRect(int32_t left, int32_t top, int32_t right, int32_t bottom);
+
+ /**
+ * Set this region to the union of an array of rects. This is generally
+ * faster than calling region.op(rect, kUnion_Op) in a loop. If count is
+ * 0, then this region is set to the empty region.
+ * @return true if the resulting region is non-empty
+ */
+ bool setRects(const SkIRect rects[], int count);
+
+ /**
+ * Set this region to the specified region, and return true if it is
+ * non-empty.
+ */
+ bool setRegion(const SkRegion&);
+
+ /**
+ * Set this region to the area described by the path, clipped.
+ * Return true if the resulting region is non-empty.
+ * This produces a region that is identical to the pixels that would be
+ * drawn by the path (with no antialiasing) with the specified clip.
+ */
+ bool setPath(const SkPath&, const SkRegion& clip);
+
+ /**
+ * Returns true if the specified rectangle has a non-empty intersection
+ * with this region.
+ */
+ bool intersects(const SkIRect&) const;
+
+ /**
+ * Returns true if the specified region has a non-empty intersection
+ * with this region.
+ */
+ bool intersects(const SkRegion&) const;
+
+ /**
+ * Return true if the specified x,y coordinate is inside the region.
+ */
+ bool contains(int32_t x, int32_t y) const;
+
+ /**
+ * Return true if the specified rectangle is completely inside the region.
+ * This works for simple (rectangular) and complex regions, and always
+ * returns the correct result. Note: if either this region or the rectangle
+ * is empty, contains() returns false.
+ */
+ bool contains(const SkIRect&) const;
+
+ /**
+ * Return true if the specified region is completely inside the region.
+ * This works for simple (rectangular) and complex regions, and always
+ * returns the correct result. Note: if either region is empty, contains()
+ * returns false.
+ */
+ bool contains(const SkRegion&) const;
+
+ /**
+ * Return true if this region is a single rectangle (not complex) and the
+ * specified rectangle is contained by this region. Returning false is not
+ * a guarantee that the rectangle is not contained by this region, but
+ * return true is a guarantee that the rectangle is contained by this region.
+ */
+ bool quickContains(const SkIRect& r) const {
+ return this->quickContains(r.fLeft, r.fTop, r.fRight, r.fBottom);
+ }
+
+ /**
+ * Return true if this region is a single rectangle (not complex) and the
+ * specified rectangle is contained by this region. Returning false is not
+ * a guarantee that the rectangle is not contained by this region, but
+ * return true is a guarantee that the rectangle is contained by this
+ * region.
+ */
+ bool quickContains(int32_t left, int32_t top, int32_t right,
+ int32_t bottom) const {
+ SkASSERT(this->isEmpty() == fBounds.isEmpty()); // valid region
+
+ return left < right && top < bottom &&
+ fRunHead == SkRegion_gRectRunHeadPtr && // this->isRect()
+ /* fBounds.contains(left, top, right, bottom); */
+ fBounds.fLeft <= left && fBounds.fTop <= top &&
+ fBounds.fRight >= right && fBounds.fBottom >= bottom;
+ }
+
+ /**
+ * Return true if this region is empty, or if the specified rectangle does
+ * not intersect the region. Returning false is not a guarantee that they
+ * intersect, but returning true is a guarantee that they do not.
+ */
+ bool quickReject(const SkIRect& rect) const {
+ return this->isEmpty() || rect.isEmpty() ||
+ !SkIRect::Intersects(fBounds, rect);
+ }
+
+ /**
+ * Return true if this region, or rgn, is empty, or if their bounds do not
+ * intersect. Returning false is not a guarantee that they intersect, but
+ * returning true is a guarantee that they do not.
+ */
+ bool quickReject(const SkRegion& rgn) const {
+ return this->isEmpty() || rgn.isEmpty() ||
+ !SkIRect::Intersects(fBounds, rgn.fBounds);
+ }
+
+ /** Translate the region by the specified (dx, dy) amount. */
+ void translate(int dx, int dy) { this->translate(dx, dy, this); }
+
+ /**
+ * Translate the region by the specified (dx, dy) amount, writing the
+ * resulting region into dst. Note: it is legal to pass this region as the
+ * dst parameter, effectively translating the region in place. If dst is
+ * null, nothing happens.
+ */
+ void translate(int dx, int dy, SkRegion* dst) const;
+
+ /**
+ * The logical operations that can be performed when combining two regions.
+ */
+ enum Op {
+ kDifference_Op, //!< subtract the op region from the first region
+ kIntersect_Op, //!< intersect the two regions
+ kUnion_Op, //!< union (inclusive-or) the two regions
+ kXOR_Op, //!< exclusive-or the two regions
+ /** subtract the first region from the op region */
+ kReverseDifference_Op,
+ kReplace_Op, //!< replace the dst region with the op region
+
+ kLastOp = kReplace_Op
+ };
+
+ static const int kOpCnt = kLastOp + 1;
+
+ /**
+ * Set this region to the result of applying the Op to this region and the
+ * specified rectangle: this = (this op rect).
+ * Return true if the resulting region is non-empty.
+ */
+ bool op(const SkIRect& rect, Op op) {
+ if (this->isRect() && kIntersect_Op == op) {
+ if (!fBounds.intersect(rect)) {
+ return this->setEmpty();
+ }
+ return true;
+ }
+ return this->op(*this, rect, op);
+ }
+
+ /**
+ * Set this region to the result of applying the Op to this region and the
+ * specified rectangle: this = (this op rect).
+ * Return true if the resulting region is non-empty.
+ */
+ bool op(int left, int top, int right, int bottom, Op op) {
+ SkIRect rect;
+ rect.set(left, top, right, bottom);
+ return this->op(*this, rect, op);
+ }
+
+ /**
+ * Set this region to the result of applying the Op to this region and the
+ * specified region: this = (this op rgn).
+ * Return true if the resulting region is non-empty.
+ */
+ bool op(const SkRegion& rgn, Op op) { return this->op(*this, rgn, op); }
+
+ /**
+ * Set this region to the result of applying the Op to the specified
+ * rectangle and region: this = (rect op rgn).
+ * Return true if the resulting region is non-empty.
+ */
+ bool op(const SkIRect& rect, const SkRegion& rgn, Op);
+
+ /**
+ * Set this region to the result of applying the Op to the specified
+ * region and rectangle: this = (rgn op rect).
+ * Return true if the resulting region is non-empty.
+ */
+ bool op(const SkRegion& rgn, const SkIRect& rect, Op);
+
+ /**
+ * Set this region to the result of applying the Op to the specified
+ * regions: this = (rgna op rgnb).
+ * Return true if the resulting region is non-empty.
+ */
+ bool op(const SkRegion& rgna, const SkRegion& rgnb, Op op);
+
+#ifdef SK_BUILD_FOR_ANDROID
+ /** Returns a new char* containing the list of rectangles in this region
+ */
+ char* toString();
+#endif
+
+ /**
+ * Returns the sequence of rectangles, sorted in Y and X, that make up
+ * this region.
+ */
+ class SK_API Iterator {
+ public:
+ Iterator() : fRgn(NULL), fDone(true) {}
+ Iterator(const SkRegion&);
+ // if we have a region, reset to it and return true, else return false
+ bool rewind();
+ // reset the iterator, using the new region
+ void reset(const SkRegion&);
+ bool done() const { return fDone; }
+ void next();
+ const SkIRect& rect() const { return fRect; }
+ // may return null
+ const SkRegion* rgn() const { return fRgn; }
+
+ private:
+ const SkRegion* fRgn;
+ const RunType* fRuns;
+ SkIRect fRect;
+ bool fDone;
+ };
+
+ /**
+ * Returns the sequence of rectangles, sorted in Y and X, that make up
+ * this region intersected with the specified clip rectangle.
+ */
+ class SK_API Cliperator {
+ public:
+ Cliperator(const SkRegion&, const SkIRect& clip);
+ bool done() { return fDone; }
+ void next();
+ const SkIRect& rect() const { return fRect; }
+
+ private:
+ Iterator fIter;
+ SkIRect fClip;
+ SkIRect fRect;
+ bool fDone;
+ };
+
+ /**
+ * Returns the sequence of runs that make up this region for the specified
+ * Y scanline, clipped to the specified left and right X values.
+ */
+ class Spanerator {
+ public:
+ Spanerator(const SkRegion&, int y, int left, int right);
+ bool next(int* left, int* right);
+
+ private:
+ const SkRegion::RunType* fRuns;
+ int fLeft, fRight;
+ bool fDone;
+ };
+
+ /**
+ * Write the region to the buffer, and return the number of bytes written.
+ * If buffer is NULL, it still returns the number of bytes.
+ */
+ size_t writeToMemory(void* buffer) const;
+ /**
+ * Initializes the region from the buffer
+ *
+ * @param buffer Memory to read from
+ * @param length Amount of memory available in the buffer
+ * @return number of bytes read (must be a multiple of 4) or
+ * 0 if there was not enough memory available
+ */
+ size_t readFromMemory(const void* buffer, size_t length);
+
+ /**
+ * Returns a reference to a global empty region. Just a convenience for
+ * callers that need a const empty region.
+ */
+ static const SkRegion& GetEmptyRegion();
+
+ SkDEBUGCODE(void dump() const;)
+ SkDEBUGCODE(void validate() const;)
+ SkDEBUGCODE(static void UnitTest();)
+
+ // expose this to allow for regression test on complex regions
+ SkDEBUGCODE(bool debugSetRuns(const RunType runs[], int count);)
+
+private:
+ enum {
+ kOpCount = kReplace_Op + 1
+ };
+
+ enum {
+ // T
+ // [B N L R S]
+ // S
+ kRectRegionRuns = 7
+ };
+
+ friend class android::Region; // needed for marshalling efficiently
+
+ struct RunHead;
+
+ // allocate space for count runs
+ void allocateRuns(int count);
+ void allocateRuns(int count, int ySpanCount, int intervalCount);
+ void allocateRuns(const RunHead& src);
+
+ SkIRect fBounds;
+ RunHead* fRunHead;
+
+ void freeRuns();
+
+ /**
+ * Return the runs from this region, consing up fake runs if the region
+ * is empty or a rect. In those 2 cases, we use tmpStorage to hold the
+ * run data.
+ */
+ const RunType* getRuns(RunType tmpStorage[], int* intervals) const;
+
+ // This is called with runs[] that do not yet have their interval-count
+ // field set on each scanline. That is computed as part of this call
+ // (inside ComputeRunBounds).
+ bool setRuns(RunType runs[], int count);
+
+ int count_runtype_values(int* itop, int* ibot) const;
+
+ static void BuildRectRuns(const SkIRect& bounds,
+ RunType runs[kRectRegionRuns]);
+
+ // If the runs define a simple rect, return true and set bounds to that
+ // rect. If not, return false and ignore bounds.
+ static bool RunsAreARect(const SkRegion::RunType runs[], int count,
+ SkIRect* bounds);
+
+ /**
+ * If the last arg is null, just return if the result is non-empty,
+ * else store the result in the last arg.
+ */
+ static bool Oper(const SkRegion&, const SkRegion&, SkRegion::Op, SkRegion*);
+
+ friend struct RunHead;
+ friend class Iterator;
+ friend class Spanerator;
+ friend class SkRgnBuilder;
+ friend class SkFlatRegion;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkScalar.h b/gfx/skia/skia/include/core/SkScalar.h
new file mode 100644
index 000000000..922840fd1
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkScalar.h
@@ -0,0 +1,268 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkScalar_DEFINED
+#define SkScalar_DEFINED
+
+#include "../private/SkFloatingPoint.h"
+
+// TODO: move this sort of check into SkPostConfig.h
+#define SK_SCALAR_IS_DOUBLE 0
+#undef SK_SCALAR_IS_FLOAT
+#define SK_SCALAR_IS_FLOAT 1
+
+
+#if SK_SCALAR_IS_FLOAT
+
+typedef float SkScalar;
+
+#define SK_Scalar1 1.0f
+#define SK_ScalarHalf 0.5f
+#define SK_ScalarSqrt2 1.41421356f
+#define SK_ScalarPI 3.14159265f
+#define SK_ScalarTanPIOver8 0.414213562f
+#define SK_ScalarRoot2Over2 0.707106781f
+#define SK_ScalarMax 3.402823466e+38f
+#define SK_ScalarInfinity SK_FloatInfinity
+#define SK_ScalarNegativeInfinity SK_FloatNegativeInfinity
+#define SK_ScalarNaN SK_FloatNaN
+
+#define SkScalarFloorToScalar(x) sk_float_floor(x)
+#define SkScalarCeilToScalar(x) sk_float_ceil(x)
+#define SkScalarRoundToScalar(x) sk_float_floor((x) + 0.5f)
+#define SkScalarTruncToScalar(x) sk_float_trunc(x)
+
+#define SkScalarFloorToInt(x) sk_float_floor2int(x)
+#define SkScalarCeilToInt(x) sk_float_ceil2int(x)
+#define SkScalarRoundToInt(x) sk_float_round2int(x)
+
+#define SkScalarAbs(x) sk_float_abs(x)
+#define SkScalarCopySign(x, y) sk_float_copysign(x, y)
+#define SkScalarMod(x, y) sk_float_mod(x,y)
+#define SkScalarSqrt(x) sk_float_sqrt(x)
+#define SkScalarPow(b, e) sk_float_pow(b, e)
+
+#define SkScalarSin(radians) (float)sk_float_sin(radians)
+#define SkScalarCos(radians) (float)sk_float_cos(radians)
+#define SkScalarTan(radians) (float)sk_float_tan(radians)
+#define SkScalarASin(val) (float)sk_float_asin(val)
+#define SkScalarACos(val) (float)sk_float_acos(val)
+#define SkScalarATan2(y, x) (float)sk_float_atan2(y,x)
+#define SkScalarExp(x) (float)sk_float_exp(x)
+#define SkScalarLog(x) (float)sk_float_log(x)
+#define SkScalarLog2(x) (float)sk_float_log2(x)
+
+#else // SK_SCALAR_IS_DOUBLE
+
+typedef double SkScalar;
+
+#define SK_Scalar1 1.0
+#define SK_ScalarHalf 0.5
+#define SK_ScalarSqrt2 1.414213562373095
+#define SK_ScalarPI 3.141592653589793
+#define SK_ScalarTanPIOver8 0.4142135623731
+#define SK_ScalarRoot2Over2 0.70710678118655
+#define SK_ScalarMax 1.7976931348623157+308
+#define SK_ScalarInfinity SK_DoubleInfinity
+#define SK_ScalarNegativeInfinity SK_DoubleNegativeInfinity
+#define SK_ScalarNaN SK_DoubleNaN
+
+#define SkScalarFloorToScalar(x) floor(x)
+#define SkScalarCeilToScalar(x) ceil(x)
+#define SkScalarRoundToScalar(x) floor((x) + 0.5)
+#define SkScalarTruncToScalar(x) trunc(x)
+
+#define SkScalarFloorToInt(x) (int)floor(x)
+#define SkScalarCeilToInt(x) (int)ceil(x)
+#define SkScalarRoundToInt(x) (int)floor((x) + 0.5)
+
+#define SkScalarAbs(x) abs(x)
+#define SkScalarCopySign(x, y) copysign(x, y)
+#define SkScalarMod(x, y) fmod(x,y)
+#define SkScalarSqrt(x) sqrt(x)
+#define SkScalarPow(b, e) pow(b, e)
+
+#define SkScalarSin(radians) sin(radians)
+#define SkScalarCos(radians) cos(radians)
+#define SkScalarTan(radians) tan(radians)
+#define SkScalarASin(val) asin(val)
+#define SkScalarACos(val) acos(val)
+#define SkScalarATan2(y, x) atan2(y,x)
+#define SkScalarExp(x) exp(x)
+#define SkScalarLog(x) log(x)
+#define SkScalarLog2(x) log2(x)
+
+#endif
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+#define SkIntToScalar(x) static_cast<SkScalar>(x)
+#define SkIntToFloat(x) static_cast<float>(x)
+#define SkScalarTruncToInt(x) static_cast<int>(x)
+
+#define SkScalarToFloat(x) static_cast<float>(x)
+#define SkFloatToScalar(x) static_cast<SkScalar>(x)
+#define SkScalarToDouble(x) static_cast<double>(x)
+#define SkDoubleToScalar(x) static_cast<SkScalar>(x)
+
+#define SK_ScalarMin (-SK_ScalarMax)
+
+static inline bool SkScalarIsNaN(SkScalar x) { return x != x; }
+
+/** Returns true if x is not NaN and not infinite
+ */
+static inline bool SkScalarIsFinite(SkScalar x) {
+ // We rely on the following behavior of infinities and nans
+ // 0 * finite --> 0
+ // 0 * infinity --> NaN
+ // 0 * NaN --> NaN
+ SkScalar prod = x * 0;
+ // At this point, prod will either be NaN or 0
+ return !SkScalarIsNaN(prod);
+}
+
+static inline bool SkScalarsAreFinite(SkScalar a, SkScalar b) {
+ SkScalar prod = 0;
+ prod *= a;
+ prod *= b;
+ // At this point, prod will either be NaN or 0
+ return !SkScalarIsNaN(prod);
+}
+
+static inline bool SkScalarsAreFinite(const SkScalar array[], int count) {
+ SkScalar prod = 0;
+ for (int i = 0; i < count; ++i) {
+ prod *= array[i];
+ }
+ // At this point, prod will either be NaN or 0
+ return !SkScalarIsNaN(prod);
+}
+
+/**
+ * Variant of SkScalarRoundToInt, that performs the rounding step (adding 0.5) explicitly using
+ * double, to avoid possibly losing the low bit(s) of the answer before calling floor().
+ *
+ * This routine will likely be slower than SkScalarRoundToInt(), and should only be used when the
+ * extra precision is known to be valuable.
+ *
+ * In particular, this catches the following case:
+ * SkScalar x = 0.49999997;
+ * int ix = SkScalarRoundToInt(x);
+ * SkASSERT(0 == ix); // <--- fails
+ * ix = SkDScalarRoundToInt(x);
+ * SkASSERT(0 == ix); // <--- succeeds
+ */
+static inline int SkDScalarRoundToInt(SkScalar x) {
+ double xx = x;
+ xx += 0.5;
+ return (int)floor(xx);
+}
+
+/** Returns the fractional part of the scalar. */
+static inline SkScalar SkScalarFraction(SkScalar x) {
+ return x - SkScalarTruncToScalar(x);
+}
+
+static inline SkScalar SkScalarClampMax(SkScalar x, SkScalar max) {
+ x = SkTMin(x, max);
+ x = SkTMax<SkScalar>(x, 0);
+ return x;
+}
+
+static inline SkScalar SkScalarPin(SkScalar x, SkScalar min, SkScalar max) {
+ return SkTPin(x, min, max);
+}
+
+SkScalar SkScalarSinCos(SkScalar radians, SkScalar* cosValue);
+
+static inline SkScalar SkScalarSquare(SkScalar x) { return x * x; }
+
+#define SkScalarMul(a, b) ((SkScalar)(a) * (b))
+#define SkScalarMulAdd(a, b, c) ((SkScalar)(a) * (b) + (c))
+#define SkScalarMulDiv(a, b, c) ((SkScalar)(a) * (b) / (c))
+#define SkScalarInvert(x) (SK_Scalar1 / (x))
+#define SkScalarFastInvert(x) (SK_Scalar1 / (x))
+#define SkScalarAve(a, b) (((a) + (b)) * SK_ScalarHalf)
+#define SkScalarHalf(a) ((a) * SK_ScalarHalf)
+
+#define SkDegreesToRadians(degrees) ((degrees) * (SK_ScalarPI / 180))
+#define SkRadiansToDegrees(radians) ((radians) * (180 / SK_ScalarPI))
+
+static inline SkScalar SkMaxScalar(SkScalar a, SkScalar b) { return a > b ? a : b; }
+static inline SkScalar SkMinScalar(SkScalar a, SkScalar b) { return a < b ? a : b; }
+
+static inline bool SkScalarIsInt(SkScalar x) {
+ return x == (SkScalar)(int)x;
+}
+
+/**
+ * Returns -1 || 0 || 1 depending on the sign of value:
+ * -1 if x < 0
+ * 0 if x == 0
+ * 1 if x > 0
+ */
+static inline int SkScalarSignAsInt(SkScalar x) {
+ return x < 0 ? -1 : (x > 0);
+}
+
+// Scalar result version of above
+static inline SkScalar SkScalarSignAsScalar(SkScalar x) {
+ return x < 0 ? -SK_Scalar1 : ((x > 0) ? SK_Scalar1 : 0);
+}
+
+#define SK_ScalarNearlyZero (SK_Scalar1 / (1 << 12))
+
+static inline bool SkScalarNearlyZero(SkScalar x,
+ SkScalar tolerance = SK_ScalarNearlyZero) {
+ SkASSERT(tolerance >= 0);
+ return SkScalarAbs(x) <= tolerance;
+}
+
+static inline bool SkScalarNearlyEqual(SkScalar x, SkScalar y,
+ SkScalar tolerance = SK_ScalarNearlyZero) {
+ SkASSERT(tolerance >= 0);
+ return SkScalarAbs(x-y) <= tolerance;
+}
+
+/** Linearly interpolate between A and B, based on t.
+ If t is 0, return A
+ If t is 1, return B
+ else interpolate.
+ t must be [0..SK_Scalar1]
+*/
+static inline SkScalar SkScalarInterp(SkScalar A, SkScalar B, SkScalar t) {
+ SkASSERT(t >= 0 && t <= SK_Scalar1);
+ return A + (B - A) * t;
+}
+
+/** Interpolate along the function described by (keys[length], values[length])
+ for the passed searchKey. SearchKeys outside the range keys[0]-keys[Length]
+ clamp to the min or max value. This function was inspired by a desire
+ to change the multiplier for thickness in fakeBold; therefore it assumes
+ the number of pairs (length) will be small, and a linear search is used.
+ Repeated keys are allowed for discontinuous functions (so long as keys is
+ monotonically increasing), and if key is the value of a repeated scalar in
+ keys, the first one will be used. However, that may change if a binary
+ search is used.
+*/
+SkScalar SkScalarInterpFunc(SkScalar searchKey, const SkScalar keys[],
+ const SkScalar values[], int length);
+
+/*
+ * Helper to compare an array of scalars.
+ */
+static inline bool SkScalarsEqual(const SkScalar a[], const SkScalar b[], int n) {
+ SkASSERT(n >= 0);
+ for (int i = 0; i < n; ++i) {
+ if (a[i] != b[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkShader.h b/gfx/skia/skia/include/core/SkShader.h
new file mode 100644
index 000000000..efd9aa075
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkShader.h
@@ -0,0 +1,549 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkShader_DEFINED
+#define SkShader_DEFINED
+
+#include "SkBitmap.h"
+#include "SkFlattenable.h"
+#include "SkImageInfo.h"
+#include "SkMask.h"
+#include "SkMatrix.h"
+#include "SkPaint.h"
+#include "../gpu/GrColor.h"
+
+class SkColorFilter;
+class SkColorSpace;
+class SkImage;
+class SkPath;
+class SkPicture;
+class SkXfermode;
+class GrContext;
+class GrFragmentProcessor;
+
+/** \class SkShader
+ *
+ * Shaders specify the source color(s) for what is being drawn. If a paint
+ * has no shader, then the paint's color is used. If the paint has a
+ * shader, then the shader's color(s) are use instead, but they are
+ * modulated by the paint's alpha. This makes it easy to create a shader
+ * once (e.g. bitmap tiling or gradient) and then change its transparency
+ * w/o having to modify the original shader... only the paint's alpha needs
+ * to be modified.
+ */
+class SK_API SkShader : public SkFlattenable {
+public:
+ SkShader(const SkMatrix* localMatrix = NULL);
+ virtual ~SkShader();
+
+ /**
+ * Returns the local matrix.
+ *
+ * FIXME: This can be incorrect for a Shader with its own local matrix
+ * that is also wrapped via CreateLocalMatrixShader.
+ */
+ const SkMatrix& getLocalMatrix() const { return fLocalMatrix; }
+
+ enum TileMode {
+ /** replicate the edge color if the shader draws outside of its
+ * original bounds
+ */
+ kClamp_TileMode,
+
+ /** repeat the shader's image horizontally and vertically */
+ kRepeat_TileMode,
+
+ /** repeat the shader's image horizontally and vertically, alternating
+ * mirror images so that adjacent images always seam
+ */
+ kMirror_TileMode,
+
+#if 0
+ /** only draw within the original domain, return 0 everywhere else */
+ kDecal_TileMode,
+#endif
+ };
+
+ enum {
+ kTileModeCount = kMirror_TileMode + 1
+ };
+
+ // override these in your subclass
+
+ enum Flags {
+ //!< set if all of the colors will be opaque
+ kOpaqueAlpha_Flag = 1 << 0,
+
+ /** set if the spans only vary in X (const in Y).
+ e.g. an Nx1 bitmap that is being tiled in Y, or a linear-gradient
+ that varies from left-to-right. This flag specifies this for
+ shadeSpan().
+ */
+ kConstInY32_Flag = 1 << 1,
+
+ /** hint for the blitter that 4f is the preferred shading mode.
+ */
+ kPrefers4f_Flag = 1 << 2,
+ };
+
+ /**
+ * Returns true if the shader is guaranteed to produce only opaque
+ * colors, subject to the SkPaint using the shader to apply an opaque
+ * alpha value. Subclasses should override this to allow some
+ * optimizations.
+ */
+ virtual bool isOpaque() const { return false; }
+
+ /**
+ * ContextRec acts as a parameter bundle for creating Contexts.
+ */
+ struct ContextRec {
+ enum DstType {
+ kPMColor_DstType, // clients prefer shading into PMColor dest
+ kPM4f_DstType, // clients prefer shading into PM4f dest
+ };
+
+ ContextRec(const SkPaint& paint, const SkMatrix& matrix, const SkMatrix* localM,
+ DstType dstType)
+ : fPaint(&paint)
+ , fMatrix(&matrix)
+ , fLocalMatrix(localM)
+ , fPreferredDstType(dstType) {}
+
+ const SkPaint* fPaint; // the current paint associated with the draw
+ const SkMatrix* fMatrix; // the current matrix in the canvas
+ const SkMatrix* fLocalMatrix; // optional local matrix
+ const DstType fPreferredDstType; // the "natural" client dest type
+ };
+
+ class Context : public ::SkNoncopyable {
+ public:
+ Context(const SkShader& shader, const ContextRec&);
+
+ virtual ~Context();
+
+ /**
+ * Called sometimes before drawing with this shader. Return the type of
+ * alpha your shader will return. The default implementation returns 0.
+ * Your subclass should override if it can (even sometimes) report a
+ * non-zero value, since that will enable various blitters to perform
+ * faster.
+ */
+ virtual uint32_t getFlags() const { return 0; }
+
+ /**
+ * Called for each span of the object being drawn. Your subclass should
+ * set the appropriate colors (with premultiplied alpha) that correspond
+ * to the specified device coordinates.
+ */
+ virtual void shadeSpan(int x, int y, SkPMColor[], int count) = 0;
+
+ virtual void shadeSpan4f(int x, int y, SkPM4f[], int count);
+
+ struct BlitState;
+ typedef void (*BlitBW)(BlitState*,
+ int x, int y, const SkPixmap&, int count);
+ typedef void (*BlitAA)(BlitState*,
+ int x, int y, const SkPixmap&, int count, const SkAlpha[]);
+
+ struct BlitState {
+ // inputs
+ Context* fCtx;
+ SkXfermode* fXfer;
+
+ // outputs
+ enum { N = 2 };
+ void* fStorage[N];
+ BlitBW fBlitBW;
+ BlitAA fBlitAA;
+ };
+
+ // Returns true if one or more of the blitprocs are set in the BlitState
+ bool chooseBlitProcs(const SkImageInfo& info, BlitState* state) {
+ state->fBlitBW = nullptr;
+ state->fBlitAA = nullptr;
+ if (this->onChooseBlitProcs(info, state)) {
+ SkASSERT(state->fBlitBW || state->fBlitAA);
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * The const void* ctx is only const because all the implementations are const.
+ * This can be changed to non-const if a new shade proc needs to change the ctx.
+ */
+ typedef void (*ShadeProc)(const void* ctx, int x, int y, SkPMColor[], int count);
+ virtual ShadeProc asAShadeProc(void** ctx);
+
+ /**
+ * Similar to shadeSpan, but only returns the alpha-channel for a span.
+ * The default implementation calls shadeSpan() and then extracts the alpha
+ * values from the returned colors.
+ */
+ virtual void shadeSpanAlpha(int x, int y, uint8_t alpha[], int count);
+
+ // Notification from blitter::blitMask in case we need to see the non-alpha channels
+ virtual void set3DMask(const SkMask*) {}
+
+ protected:
+ // Reference to shader, so we don't have to dupe information.
+ const SkShader& fShader;
+
+ enum MatrixClass {
+ kLinear_MatrixClass, // no perspective
+ kFixedStepInX_MatrixClass, // fast perspective, need to call fixedStepInX() each
+ // scanline
+ kPerspective_MatrixClass // slow perspective, need to mappoints each pixel
+ };
+ static MatrixClass ComputeMatrixClass(const SkMatrix&);
+
+ uint8_t getPaintAlpha() const { return fPaintAlpha; }
+ const SkMatrix& getTotalInverse() const { return fTotalInverse; }
+ MatrixClass getInverseClass() const { return (MatrixClass)fTotalInverseClass; }
+ const SkMatrix& getCTM() const { return fCTM; }
+
+ virtual bool onChooseBlitProcs(const SkImageInfo&, BlitState*) { return false; }
+
+ private:
+ SkMatrix fCTM;
+ SkMatrix fTotalInverse;
+ uint8_t fPaintAlpha;
+ uint8_t fTotalInverseClass;
+
+ typedef SkNoncopyable INHERITED;
+ };
+
+ /**
+ * Create the actual object that does the shading.
+ * Size of storage must be >= contextSize.
+ */
+ Context* createContext(const ContextRec&, void* storage) const;
+
+ /**
+ * Return the size of a Context returned by createContext.
+ */
+ size_t contextSize(const ContextRec&) const;
+
+#ifdef SK_SUPPORT_LEGACY_SHADER_ISABITMAP
+ /**
+ * Returns true if this shader is just a bitmap, and if not null, returns the bitmap,
+ * localMatrix, and tilemodes. If this is not a bitmap, returns false and ignores the
+ * out-parameters.
+ */
+ bool isABitmap(SkBitmap* outTexture, SkMatrix* outMatrix, TileMode xy[2]) const {
+ return this->onIsABitmap(outTexture, outMatrix, xy);
+ }
+
+ bool isABitmap() const {
+ return this->isABitmap(nullptr, nullptr, nullptr);
+ }
+#endif
+
+ /**
+ * Iff this shader is backed by a single SkImage, return its ptr (the caller must ref this
+ * if they want to keep it longer than the lifetime of the shader). If not, return nullptr.
+ */
+ SkImage* isAImage(SkMatrix* localMatrix, TileMode xy[2]) const {
+ return this->onIsAImage(localMatrix, xy);
+ }
+
+ bool isAImage() const {
+ return this->isAImage(nullptr, nullptr) != nullptr;
+ }
+
+ /**
+ * If the shader subclass can be represented as a gradient, asAGradient
+ * returns the matching GradientType enum (or kNone_GradientType if it
+ * cannot). Also, if info is not null, asAGradient populates info with
+ * the relevant (see below) parameters for the gradient. fColorCount
+ * is both an input and output parameter. On input, it indicates how
+ * many entries in fColors and fColorOffsets can be used, if they are
+ * non-NULL. After asAGradient has run, fColorCount indicates how
+ * many color-offset pairs there are in the gradient. If there is
+ * insufficient space to store all of the color-offset pairs, fColors
+ * and fColorOffsets will not be altered. fColorOffsets specifies
+ * where on the range of 0 to 1 to transition to the given color.
+ * The meaning of fPoint and fRadius is dependant on the type of gradient.
+ *
+ * None:
+ * info is ignored.
+ * Color:
+ * fColorOffsets[0] is meaningless.
+ * Linear:
+ * fPoint[0] and fPoint[1] are the end-points of the gradient
+ * Radial:
+ * fPoint[0] and fRadius[0] are the center and radius
+ * Conical:
+ * fPoint[0] and fRadius[0] are the center and radius of the 1st circle
+ * fPoint[1] and fRadius[1] are the center and radius of the 2nd circle
+ * Sweep:
+ * fPoint[0] is the center of the sweep.
+ */
+
+ enum GradientType {
+ kNone_GradientType,
+ kColor_GradientType,
+ kLinear_GradientType,
+ kRadial_GradientType,
+ kSweep_GradientType,
+ kConical_GradientType,
+ kLast_GradientType = kConical_GradientType
+ };
+
+ struct GradientInfo {
+ int fColorCount; //!< In-out parameter, specifies passed size
+ // of fColors/fColorOffsets on input, and
+ // actual number of colors/offsets on
+ // output.
+ SkColor* fColors; //!< The colors in the gradient.
+ SkScalar* fColorOffsets; //!< The unit offset for color transitions.
+ SkPoint fPoint[2]; //!< Type specific, see above.
+ SkScalar fRadius[2]; //!< Type specific, see above.
+ TileMode fTileMode; //!< The tile mode used.
+ uint32_t fGradientFlags; //!< see SkGradientShader::Flags
+ };
+
+ virtual GradientType asAGradient(GradientInfo* info) const;
+
+ /**
+ * If the shader subclass is composed of two shaders, return true, and if rec is not NULL,
+ * fill it out with info about the shader.
+ *
+ * These are bare pointers; the ownership and reference count are unchanged.
+ */
+
+ struct ComposeRec {
+ const SkShader* fShaderA;
+ const SkShader* fShaderB;
+ const SkXfermode* fMode;
+ };
+
+ virtual bool asACompose(ComposeRec*) const { return false; }
+
+#if SK_SUPPORT_GPU
+ struct AsFPArgs {
+ AsFPArgs(GrContext* context,
+ const SkMatrix* viewMatrix,
+ const SkMatrix* localMatrix,
+ SkFilterQuality filterQuality,
+ SkColorSpace* dstColorSpace,
+ SkSourceGammaTreatment gammaTreatment)
+ : fContext(context)
+ , fViewMatrix(viewMatrix)
+ , fLocalMatrix(localMatrix)
+ , fFilterQuality(filterQuality)
+ , fDstColorSpace(dstColorSpace)
+ , fGammaTreatment(gammaTreatment) {}
+
+ GrContext* fContext;
+ const SkMatrix* fViewMatrix;
+ const SkMatrix* fLocalMatrix;
+ SkFilterQuality fFilterQuality;
+ SkColorSpace* fDstColorSpace;
+ SkSourceGammaTreatment fGammaTreatment;
+ };
+
+ /**
+ * Returns a GrFragmentProcessor that implements the shader for the GPU backend. NULL is
+ * returned if there is no GPU implementation.
+ *
+ * The GPU device does not call SkShader::createContext(), instead we pass the view matrix,
+ * local matrix, and filter quality directly.
+ *
+ * The GrContext may be used by the to create textures that are required by the returned
+ * processor.
+ *
+ * The returned GrFragmentProcessor should expect an unpremultiplied input color and
+ * produce a premultiplied output.
+ */
+ virtual sk_sp<GrFragmentProcessor> asFragmentProcessor(const AsFPArgs&) const;
+#endif
+
+ /**
+ * If the shader can represent its "average" luminance in a single color, return true and
+ * if color is not NULL, return that color. If it cannot, return false and ignore the color
+ * parameter.
+ *
+ * Note: if this returns true, the returned color will always be opaque, as only the RGB
+ * components are used to compute luminance.
+ */
+ bool asLuminanceColor(SkColor*) const;
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ /**
+ * If the shader is a custom shader which has data the caller might want, call this function
+ * to get that data.
+ */
+ virtual bool asACustomShader(void** /* customData */) const { return false; }
+#endif
+
+ //////////////////////////////////////////////////////////////////////////
+ // Methods to create combinations or variants of shaders
+
+ /**
+ * Return a shader that will apply the specified localMatrix to this shader.
+ * The specified matrix will be applied before any matrix associated with this shader.
+ */
+ sk_sp<SkShader> makeWithLocalMatrix(const SkMatrix&) const;
+
+ /**
+ * Create a new shader that produces the same colors as invoking this shader and then applying
+ * the colorfilter.
+ */
+ sk_sp<SkShader> makeWithColorFilter(sk_sp<SkColorFilter>) const;
+
+ //////////////////////////////////////////////////////////////////////////
+ // Factory methods for stock shaders
+
+ /**
+ * Call this to create a new "empty" shader, that will not draw anything.
+ */
+ static sk_sp<SkShader> MakeEmptyShader();
+
+ /**
+ * Call this to create a new shader that just draws the specified color. This should always
+ * draw the same as a paint with this color (and no shader).
+ */
+ static sk_sp<SkShader> MakeColorShader(SkColor);
+
+ /**
+ * Create a shader that draws the specified color (in the specified colorspace).
+ *
+ * This works around the limitation that SkPaint::setColor() only takes byte values, and does
+ * not support specific colorspaces.
+ */
+ static sk_sp<SkShader> MakeColorShader(const SkColor4f&, sk_sp<SkColorSpace>);
+
+ static sk_sp<SkShader> MakeComposeShader(sk_sp<SkShader> dst, sk_sp<SkShader> src,
+ SkXfermode::Mode);
+
+#ifdef SK_SUPPORT_LEGACY_CREATESHADER_PTR
+ static SkShader* CreateEmptyShader() { return MakeEmptyShader().release(); }
+ static SkShader* CreateColorShader(SkColor c) { return MakeColorShader(c).release(); }
+ static SkShader* CreateBitmapShader(const SkBitmap& src, TileMode tmx, TileMode tmy,
+ const SkMatrix* localMatrix = nullptr) {
+ return MakeBitmapShader(src, tmx, tmy, localMatrix).release();
+ }
+ static SkShader* CreateComposeShader(SkShader* dst, SkShader* src, SkXfermode::Mode mode);
+ static SkShader* CreateComposeShader(SkShader* dst, SkShader* src, SkXfermode* xfer);
+ static SkShader* CreatePictureShader(const SkPicture* src, TileMode tmx, TileMode tmy,
+ const SkMatrix* localMatrix, const SkRect* tile);
+
+ SkShader* newWithLocalMatrix(const SkMatrix& matrix) const {
+ return this->makeWithLocalMatrix(matrix).release();
+ }
+ SkShader* newWithColorFilter(SkColorFilter* filter) const;
+#endif
+
+ /**
+ * Create a new compose shader, given shaders dst, src, and a combining xfermode mode.
+ * The xfermode is called with the output of the two shaders, and its output is returned.
+ * If xfer is null, SkXfermode::kSrcOver_Mode is assumed.
+ *
+ * The caller is responsible for managing its reference-count for the xfer (if not null).
+ */
+ static sk_sp<SkShader> MakeComposeShader(sk_sp<SkShader> dst, sk_sp<SkShader> src,
+ sk_sp<SkXfermode> xfer);
+#ifdef SK_SUPPORT_LEGACY_XFERMODE_PTR
+ static sk_sp<SkShader> MakeComposeShader(sk_sp<SkShader> dst, sk_sp<SkShader> src,
+ SkXfermode* xfer);
+#endif
+
+ /** Call this to create a new shader that will draw with the specified bitmap.
+ *
+ * If the bitmap cannot be used (e.g. has no pixels, or its dimensions
+ * exceed implementation limits (currently at 64K - 1)) then SkEmptyShader
+ * may be returned.
+ *
+ * If the src is kA8_Config then that mask will be colorized using the color on
+ * the paint.
+ *
+ * @param src The bitmap to use inside the shader
+ * @param tmx The tiling mode to use when sampling the bitmap in the x-direction.
+ * @param tmy The tiling mode to use when sampling the bitmap in the y-direction.
+ * @return Returns a new shader object. Note: this function never returns null.
+ */
+ static sk_sp<SkShader> MakeBitmapShader(const SkBitmap& src, TileMode tmx, TileMode tmy,
+ const SkMatrix* localMatrix = nullptr);
+
+ // NOTE: You can create an SkImage Shader with SkImage::newShader().
+
+ /** Call this to create a new shader that will draw with the specified picture.
+ *
+ * @param src The picture to use inside the shader (if not NULL, its ref count
+ * is incremented). The SkPicture must not be changed after
+ * successfully creating a picture shader.
+ * @param tmx The tiling mode to use when sampling the bitmap in the x-direction.
+ * @param tmy The tiling mode to use when sampling the bitmap in the y-direction.
+ * @param tile The tile rectangle in picture coordinates: this represents the subset
+ * (or superset) of the picture used when building a tile. It is not
+ * affected by localMatrix and does not imply scaling (only translation
+ * and cropping). If null, the tile rect is considered equal to the picture
+ * bounds.
+ * @return Returns a new shader object. Note: this function never returns null.
+ */
+ static sk_sp<SkShader> MakePictureShader(sk_sp<SkPicture> src, TileMode tmx, TileMode tmy,
+ const SkMatrix* localMatrix, const SkRect* tile);
+
+ /**
+ * If this shader can be represented by another shader + a localMatrix, return that shader
+ * and, if not NULL, the localMatrix. If not, return NULL and ignore the localMatrix parameter.
+ *
+ * Note: the returned shader (if not NULL) will have been ref'd, and it is the responsibility
+ * of the caller to balance that with unref() when they are done.
+ */
+ virtual SkShader* refAsALocalMatrixShader(SkMatrix* localMatrix) const;
+
+ SK_TO_STRING_VIRT()
+ SK_DEFINE_FLATTENABLE_TYPE(SkShader)
+ SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP()
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+ bool computeTotalInverse(const ContextRec&, SkMatrix* totalInverse) const;
+
+ /**
+ * Your subclass must also override contextSize() if it overrides onCreateContext().
+ * Base class impl returns NULL.
+ */
+ virtual Context* onCreateContext(const ContextRec&, void* storage) const;
+
+ /**
+ * Override this if your subclass overrides createContext, to return the correct size of
+ * your subclass' context.
+ */
+ virtual size_t onContextSize(const ContextRec&) const;
+
+ virtual bool onAsLuminanceColor(SkColor*) const {
+ return false;
+ }
+
+#ifdef SK_SUPPORT_LEGACY_SHADER_ISABITMAP
+ virtual bool onIsABitmap(SkBitmap*, SkMatrix*, TileMode[2]) const {
+ return false;
+ }
+#endif
+
+ virtual SkImage* onIsAImage(SkMatrix*, TileMode[2]) const {
+ return nullptr;
+ }
+
+private:
+ // This is essentially const, but not officially so it can be modified in
+ // constructors.
+ SkMatrix fLocalMatrix;
+
+ // So the SkLocalMatrixShader can whack fLocalMatrix in its SkReadBuffer constructor.
+ friend class SkLocalMatrixShader;
+ friend class SkBitmapProcLegacyShader; // for computeTotalInverse()
+
+ typedef SkFlattenable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkSize.h b/gfx/skia/skia/include/core/SkSize.h
new file mode 100644
index 000000000..7bc8c7165
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkSize.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSize_DEFINED
+#define SkSize_DEFINED
+
+#include "SkScalar.h"
+
+template <typename T> struct SkTSize {
+ T fWidth;
+ T fHeight;
+
+ static SkTSize Make(T w, T h) {
+ SkTSize s;
+ s.fWidth = w;
+ s.fHeight = h;
+ return s;
+ }
+
+ void set(T w, T h) {
+ fWidth = w;
+ fHeight = h;
+ }
+
+ /** Returns true iff fWidth == 0 && fHeight == 0
+ */
+ bool isZero() const {
+ return 0 == fWidth && 0 == fHeight;
+ }
+
+ /** Returns true if either widht or height are <= 0 */
+ bool isEmpty() const {
+ return fWidth <= 0 || fHeight <= 0;
+ }
+
+ /** Set the width and height to 0 */
+ void setEmpty() {
+ fWidth = fHeight = 0;
+ }
+
+ T width() const { return fWidth; }
+ T height() const { return fHeight; }
+
+ /** If width or height is < 0, it is set to 0 */
+ void clampNegToZero() {
+ if (fWidth < 0) {
+ fWidth = 0;
+ }
+ if (fHeight < 0) {
+ fHeight = 0;
+ }
+ }
+
+ bool equals(T w, T h) const {
+ return fWidth == w && fHeight == h;
+ }
+};
+
+template <typename T>
+static inline bool operator==(const SkTSize<T>& a, const SkTSize<T>& b) {
+ return a.fWidth == b.fWidth && a.fHeight == b.fHeight;
+}
+
+template <typename T>
+static inline bool operator!=(const SkTSize<T>& a, const SkTSize<T>& b) {
+ return !(a == b);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef SkTSize<int32_t> SkISize;
+
+struct SkSize : public SkTSize<SkScalar> {
+ static SkSize Make(SkScalar w, SkScalar h) {
+ SkSize s;
+ s.fWidth = w;
+ s.fHeight = h;
+ return s;
+ }
+
+
+ SkSize& operator=(const SkISize& src) {
+ this->set(SkIntToScalar(src.fWidth), SkIntToScalar(src.fHeight));
+ return *this;
+ }
+
+ SkISize toRound() const {
+ SkISize s;
+ s.set(SkScalarRoundToInt(fWidth), SkScalarRoundToInt(fHeight));
+ return s;
+ }
+
+ SkISize toCeil() const {
+ SkISize s;
+ s.set(SkScalarCeilToInt(fWidth), SkScalarCeilToInt(fHeight));
+ return s;
+ }
+
+ SkISize toFloor() const {
+ SkISize s;
+ s.set(SkScalarFloorToInt(fWidth), SkScalarFloorToInt(fHeight));
+ return s;
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkStream.h b/gfx/skia/skia/include/core/SkStream.h
new file mode 100644
index 000000000..7afce7124
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkStream.h
@@ -0,0 +1,467 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStream_DEFINED
+#define SkStream_DEFINED
+
+#include "SkData.h"
+#include "SkRefCnt.h"
+#include "SkScalar.h"
+
+class SkStream;
+class SkStreamRewindable;
+class SkStreamSeekable;
+class SkStreamAsset;
+class SkStreamMemory;
+
+/**
+ * SkStream -- abstraction for a source of bytes. Subclasses can be backed by
+ * memory, or a file, or something else.
+ *
+ * NOTE:
+ *
+ * Classic "streams" APIs are sort of async, in that on a request for N
+ * bytes, they may return fewer than N bytes on a given call, in which case
+ * the caller can "try again" to get more bytes, eventually (modulo an error)
+ * receiving their total N bytes.
+ *
+ * Skia streams behave differently. They are effectively synchronous, and will
+ * always return all N bytes of the request if possible. If they return fewer
+ * (the read() call returns the number of bytes read) then that means there is
+ * no more data (at EOF or hit an error). The caller should *not* call again
+ * in hopes of fulfilling more of the request.
+ */
+class SK_API SkStream : public SkNoncopyable {
+public:
+ virtual ~SkStream() {}
+
+ /**
+ * Attempts to open the specified file as a stream, returns nullptr on failure.
+ */
+ static std::unique_ptr<SkStreamAsset> MakeFromFile(const char path[]);
+
+ /** Reads or skips size number of bytes.
+ * If buffer == NULL, skip size bytes, return how many were skipped.
+ * If buffer != NULL, copy size bytes into buffer, return how many were copied.
+ * @param buffer when NULL skip size bytes, otherwise copy size bytes into buffer
+ * @param size the number of bytes to skip or copy
+ * @return the number of bytes actually read.
+ */
+ virtual size_t read(void* buffer, size_t size) = 0;
+
+ /** Skip size number of bytes.
+ * @return the actual number bytes that could be skipped.
+ */
+ size_t skip(size_t size) {
+ return this->read(NULL, size);
+ }
+
+ /**
+ * Attempt to peek at size bytes.
+ * If this stream supports peeking, copy min(size, peekable bytes) into
+ * buffer, and return the number of bytes copied.
+ * If the stream does not support peeking, or cannot peek any bytes,
+ * return 0 and leave buffer unchanged.
+ * The stream is guaranteed to be in the same visible state after this
+ * call, regardless of success or failure.
+ * @param buffer Must not be NULL, and must be at least size bytes. Destination
+ * to copy bytes.
+ * @param size Number of bytes to copy.
+ * @return The number of bytes peeked/copied.
+ */
+ virtual size_t peek(void* /*buffer*/, size_t /*size*/) const { return 0; }
+
+ /** Returns true when all the bytes in the stream have been read.
+ * This may return true early (when there are no more bytes to be read)
+ * or late (after the first unsuccessful read).
+ */
+ virtual bool isAtEnd() const = 0;
+
+ int8_t readS8();
+ int16_t readS16();
+ int32_t readS32();
+
+ uint8_t readU8() { return (uint8_t)this->readS8(); }
+ uint16_t readU16() { return (uint16_t)this->readS16(); }
+ uint32_t readU32() { return (uint32_t)this->readS32(); }
+
+ bool readBool() { return this->readU8() != 0; }
+ SkScalar readScalar();
+ size_t readPackedUInt();
+
+//SkStreamRewindable
+ /** Rewinds to the beginning of the stream. Returns true if the stream is known
+ * to be at the beginning after this call returns.
+ */
+ virtual bool rewind() { return false; }
+
+ /** Duplicates this stream. If this cannot be done, returns NULL.
+ * The returned stream will be positioned at the beginning of its data.
+ */
+ virtual SkStreamRewindable* duplicate() const { return NULL; }
+
+//SkStreamSeekable
+ /** Returns true if this stream can report it's current position. */
+ virtual bool hasPosition() const { return false; }
+ /** Returns the current position in the stream. If this cannot be done, returns 0. */
+ virtual size_t getPosition() const { return 0; }
+
+ /** Seeks to an absolute position in the stream. If this cannot be done, returns false.
+ * If an attempt is made to seek past the end of the stream, the position will be set
+ * to the end of the stream.
+ */
+ virtual bool seek(size_t /*position*/) { return false; }
+
+ /** Seeks to an relative offset in the stream. If this cannot be done, returns false.
+ * If an attempt is made to move to a position outside the stream, the position will be set
+ * to the closest point within the stream (beginning or end).
+ */
+ virtual bool move(long /*offset*/) { return false; }
+
+ /** Duplicates this stream. If this cannot be done, returns NULL.
+ * The returned stream will be positioned the same as this stream.
+ */
+ virtual SkStreamSeekable* fork() const { return NULL; }
+
+//SkStreamAsset
+ /** Returns true if this stream can report it's total length. */
+ virtual bool hasLength() const { return false; }
+ /** Returns the total length of the stream. If this cannot be done, returns 0. */
+ virtual size_t getLength() const { return 0; }
+
+//SkStreamMemory
+ /** Returns the starting address for the data. If this cannot be done, returns NULL. */
+ //TODO: replace with virtual const SkData* getData()
+ virtual const void* getMemoryBase() { return NULL; }
+};
+
+/** SkStreamRewindable is a SkStream for which rewind and duplicate are required. */
+class SK_API SkStreamRewindable : public SkStream {
+public:
+ bool rewind() override = 0;
+ SkStreamRewindable* duplicate() const override = 0;
+};
+
+/** SkStreamSeekable is a SkStreamRewindable for which position, seek, move, and fork are required. */
+class SK_API SkStreamSeekable : public SkStreamRewindable {
+public:
+ SkStreamSeekable* duplicate() const override = 0;
+
+ bool hasPosition() const override { return true; }
+ size_t getPosition() const override = 0;
+ bool seek(size_t position) override = 0;
+ bool move(long offset) override = 0;
+ SkStreamSeekable* fork() const override = 0;
+};
+
+/** SkStreamAsset is a SkStreamSeekable for which getLength is required. */
+class SK_API SkStreamAsset : public SkStreamSeekable {
+public:
+ SkStreamAsset* duplicate() const override = 0;
+ SkStreamAsset* fork() const override = 0;
+
+ bool hasLength() const override { return true; }
+ size_t getLength() const override = 0;
+};
+
+/** SkStreamMemory is a SkStreamAsset for which getMemoryBase is required. */
+class SK_API SkStreamMemory : public SkStreamAsset {
+public:
+ SkStreamMemory* duplicate() const override = 0;
+ SkStreamMemory* fork() const override = 0;
+
+ const void* getMemoryBase() override = 0;
+};
+
+class SK_API SkWStream : SkNoncopyable {
+public:
+ virtual ~SkWStream();
+
+ /** Called to write bytes to a SkWStream. Returns true on success
+ @param buffer the address of at least size bytes to be written to the stream
+ @param size The number of bytes in buffer to write to the stream
+ @return true on success
+ */
+ virtual bool write(const void* buffer, size_t size) = 0;
+ virtual void newline();
+ virtual void flush();
+
+ virtual size_t bytesWritten() const = 0;
+
+ // helpers
+
+ bool write8(U8CPU);
+ bool write16(U16CPU);
+ bool write32(uint32_t);
+
+ bool writeText(const char text[]) {
+ SkASSERT(text);
+ return this->write(text, strlen(text));
+ }
+ bool writeDecAsText(int32_t);
+ bool writeBigDecAsText(int64_t, int minDigits = 0);
+ bool writeHexAsText(uint32_t, int minDigits = 0);
+ bool writeScalarAsText(SkScalar);
+
+ bool writeBool(bool v) { return this->write8(v); }
+ bool writeScalar(SkScalar);
+ bool writePackedUInt(size_t);
+
+ bool writeStream(SkStream* input, size_t length);
+
+ /**
+ * This returns the number of bytes in the stream required to store
+ * 'value'.
+ */
+ static int SizeOfPackedUInt(size_t value);
+};
+
+////////////////////////////////////////////////////////////////////////////////////////
+
+#include "SkString.h"
+#include <stdio.h>
+
+/** A stream that wraps a C FILE* file stream. */
+class SK_API SkFILEStream : public SkStreamAsset {
+public:
+ /** Initialize the stream by calling sk_fopen on the specified path.
+ * This internal stream will be closed in the destructor.
+ */
+ explicit SkFILEStream(const char path[] = NULL);
+
+ enum Ownership {
+ kCallerPasses_Ownership,
+ kCallerRetains_Ownership
+ };
+ /** Initialize the stream with an existing C file stream.
+ * While this stream exists, it assumes exclusive access to the C file stream.
+ * The C file stream will be closed in the destructor unless the caller specifies
+ * kCallerRetains_Ownership.
+ */
+ explicit SkFILEStream(FILE* file, Ownership ownership = kCallerPasses_Ownership);
+
+ virtual ~SkFILEStream();
+
+ /** Returns true if the current path could be opened. */
+ bool isValid() const { return fFILE != NULL; }
+
+ /** Close the current file, and open a new file with the specified path.
+ * If path is NULL, just close the current file.
+ */
+ void setPath(const char path[]);
+
+ size_t read(void* buffer, size_t size) override;
+ bool isAtEnd() const override;
+
+ bool rewind() override;
+ SkStreamAsset* duplicate() const override;
+
+ size_t getPosition() const override;
+ bool seek(size_t position) override;
+ bool move(long offset) override;
+ SkStreamAsset* fork() const override;
+
+ size_t getLength() const override;
+
+ const void* getMemoryBase() override;
+
+private:
+ FILE* fFILE;
+ SkString fName;
+ Ownership fOwnership;
+ // fData is lazilly initialized when needed.
+ mutable sk_sp<SkData> fData;
+
+ typedef SkStreamAsset INHERITED;
+};
+
+class SK_API SkMemoryStream : public SkStreamMemory {
+public:
+ SkMemoryStream();
+
+ /** We allocate (and free) the memory. Write to it via getMemoryBase() */
+ SkMemoryStream(size_t length);
+
+ /** If copyData is true, the stream makes a private copy of the data. */
+ SkMemoryStream(const void* data, size_t length, bool copyData = false);
+
+#ifdef SK_SUPPORT_LEGACY_STREAM_DATA
+ /** Use the specified data as the memory for this stream.
+ * The stream will call ref() on the data (assuming it is not NULL).
+ * DEPRECATED
+ */
+ SkMemoryStream(SkData*);
+#endif
+
+ /** Creates the stream to read from the specified data */
+ SkMemoryStream(sk_sp<SkData>);
+
+ /** Resets the stream to the specified data and length,
+ just like the constructor.
+ if copyData is true, the stream makes a private copy of the data
+ */
+ virtual void setMemory(const void* data, size_t length,
+ bool copyData = false);
+ /** Replace any memory buffer with the specified buffer. The caller
+ must have allocated data with sk_malloc or sk_realloc, since it
+ will be freed with sk_free.
+ */
+ void setMemoryOwned(const void* data, size_t length);
+
+ sk_sp<SkData> asData() const { return fData; }
+ void setData(sk_sp<SkData>);
+#ifdef SK_SUPPORT_LEGACY_STREAM_DATA
+ /** Return the stream's data in a SkData.
+ * The caller must call unref() when it is finished using the data.
+ */
+ SkData* copyToData() const { return asData().release(); }
+
+ /**
+ * Use the specified data as the memory for this stream.
+ * The stream will call ref() on the data (assuming it is not NULL).
+ * The function returns the data parameter as a convenience.
+ */
+ SkData* setData(SkData* data) {
+ this->setData(sk_ref_sp(data));
+ return data;
+ }
+#endif
+
+ void skipToAlign4();
+ const void* getAtPos();
+
+ size_t read(void* buffer, size_t size) override;
+ bool isAtEnd() const override;
+
+ size_t peek(void* buffer, size_t size) const override;
+
+ bool rewind() override;
+ SkMemoryStream* duplicate() const override;
+
+ size_t getPosition() const override;
+ bool seek(size_t position) override;
+ bool move(long offset) override;
+ SkMemoryStream* fork() const override;
+
+ size_t getLength() const override;
+
+ const void* getMemoryBase() override;
+
+private:
+ sk_sp<SkData> fData;
+ size_t fOffset;
+
+ typedef SkStreamMemory INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+
+class SK_API SkFILEWStream : public SkWStream {
+public:
+ SkFILEWStream(const char path[]);
+ virtual ~SkFILEWStream();
+
+ /** Returns true if the current path could be opened.
+ */
+ bool isValid() const { return fFILE != NULL; }
+
+ bool write(const void* buffer, size_t size) override;
+ void flush() override;
+ void fsync();
+ size_t bytesWritten() const override;
+
+private:
+ FILE* fFILE;
+
+ typedef SkWStream INHERITED;
+};
+
+class SK_API SkMemoryWStream : public SkWStream {
+public:
+ SkMemoryWStream(void* buffer, size_t size);
+ bool write(const void* buffer, size_t size) override;
+ size_t bytesWritten() const override { return fBytesWritten; }
+
+private:
+ char* fBuffer;
+ size_t fMaxLength;
+ size_t fBytesWritten;
+
+ typedef SkWStream INHERITED;
+};
+
+class SK_API SkDynamicMemoryWStream : public SkWStream {
+public:
+ SkDynamicMemoryWStream();
+ virtual ~SkDynamicMemoryWStream();
+
+ bool write(const void* buffer, size_t size) override;
+ size_t bytesWritten() const override { return fBytesWritten; }
+ // random access write
+ // modifies stream and returns true if offset + size is less than or equal to getOffset()
+ bool write(const void* buffer, size_t offset, size_t size);
+ bool read(void* buffer, size_t offset, size_t size);
+ size_t getOffset() const { return fBytesWritten; }
+
+ // copy what has been written to the stream into dst
+ void copyTo(void* dst) const;
+ void writeToStream(SkWStream* dst) const;
+
+ sk_sp<SkData> snapshotAsData() const;
+ // Return the contents as SkData, and then reset the stream.
+ sk_sp<SkData> detachAsData();
+#ifdef SK_SUPPORT_LEGACY_STREAM_DATA
+ /**
+ * Return a copy of the data written so far. This call is responsible for
+ * calling unref() when they are finished with the data.
+ */
+ SkData* copyToData() const {
+ return snapshotAsData().release();
+ }
+#endif
+
+ /** Reset, returning a reader stream with the current content. */
+ SkStreamAsset* detachAsStream();
+
+ /** Reset the stream to its original, empty, state. */
+ void reset();
+ void padToAlign4();
+private:
+ struct Block;
+ Block* fHead;
+ Block* fTail;
+ size_t fBytesWritten;
+ mutable sk_sp<SkData> fCopy; // is invalidated if we write after it is created
+
+ void invalidateCopy();
+
+ // For access to the Block type.
+ friend class SkBlockMemoryStream;
+ friend class SkBlockMemoryRefCnt;
+
+ typedef SkWStream INHERITED;
+};
+
+
+class SK_API SkDebugWStream : public SkWStream {
+public:
+ SkDebugWStream() : fBytesWritten(0) {}
+
+ // overrides
+ bool write(const void* buffer, size_t size) override;
+ void newline() override;
+ size_t bytesWritten() const override { return fBytesWritten; }
+
+private:
+ size_t fBytesWritten;
+ typedef SkWStream INHERITED;
+};
+
+// for now
+typedef SkFILEStream SkURLStream;
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkString.h b/gfx/skia/skia/include/core/SkString.h
new file mode 100644
index 000000000..4a2d91f2d
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkString.h
@@ -0,0 +1,289 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkString_DEFINED
+#define SkString_DEFINED
+
+#include "../private/SkTArray.h"
+#include "SkScalar.h"
+
+#include <stdarg.h>
+
+/* Some helper functions for C strings
+*/
+
+static bool SkStrStartsWith(const char string[], const char prefixStr[]) {
+ SkASSERT(string);
+ SkASSERT(prefixStr);
+ return !strncmp(string, prefixStr, strlen(prefixStr));
+}
+static bool SkStrStartsWith(const char string[], const char prefixChar) {
+ SkASSERT(string);
+ return (prefixChar == *string);
+}
+
+bool SkStrEndsWith(const char string[], const char suffixStr[]);
+bool SkStrEndsWith(const char string[], const char suffixChar);
+
+int SkStrStartsWithOneOf(const char string[], const char prefixes[]);
+
+static int SkStrFind(const char string[], const char substring[]) {
+ const char *first = strstr(string, substring);
+ if (NULL == first) return -1;
+ return SkToInt(first - &string[0]);
+}
+
+static int SkStrFindLastOf(const char string[], const char subchar) {
+ const char* last = strrchr(string, subchar);
+ if (NULL == last) return -1;
+ return SkToInt(last - &string[0]);
+}
+
+static bool SkStrContains(const char string[], const char substring[]) {
+ SkASSERT(string);
+ SkASSERT(substring);
+ return (-1 != SkStrFind(string, substring));
+}
+static bool SkStrContains(const char string[], const char subchar) {
+ SkASSERT(string);
+ char tmp[2];
+ tmp[0] = subchar;
+ tmp[1] = '\0';
+ return (-1 != SkStrFind(string, tmp));
+}
+
+static inline char *SkStrDup(const char string[]) {
+ char *ret = (char *) sk_malloc_throw(strlen(string)+1);
+ memcpy(ret,string,strlen(string)+1);
+ return ret;
+}
+
+/*
+ * The SkStrAppend... methods will write into the provided buffer, assuming it is large enough.
+ * Each method has an associated const (e.g. SkStrAppendU32_MaxSize) which will be the largest
+ * value needed for that method's buffer.
+ *
+ * char storage[SkStrAppendU32_MaxSize];
+ * SkStrAppendU32(storage, value);
+ *
+ * Note : none of the SkStrAppend... methods write a terminating 0 to their buffers. Instead,
+ * the methods return the ptr to the end of the written part of the buffer. This can be used
+ * to compute the length, and/or know where to write a 0 if that is desired.
+ *
+ * char storage[SkStrAppendU32_MaxSize + 1];
+ * char* stop = SkStrAppendU32(storage, value);
+ * size_t len = stop - storage;
+ * *stop = 0; // valid, since storage was 1 byte larger than the max.
+ */
+
+#define SkStrAppendU32_MaxSize 10
+char* SkStrAppendU32(char buffer[], uint32_t);
+#define SkStrAppendU64_MaxSize 20
+char* SkStrAppendU64(char buffer[], uint64_t, int minDigits);
+
+#define SkStrAppendS32_MaxSize (SkStrAppendU32_MaxSize + 1)
+char* SkStrAppendS32(char buffer[], int32_t);
+#define SkStrAppendS64_MaxSize (SkStrAppendU64_MaxSize + 1)
+char* SkStrAppendS64(char buffer[], int64_t, int minDigits);
+
+/**
+ * Floats have at most 8 significant digits, so we limit our %g to that.
+ * However, the total string could be 15 characters: -1.2345678e-005
+ *
+ * In theory we should only expect up to 2 digits for the exponent, but on
+ * some platforms we have seen 3 (as in the example above).
+ */
+#define SkStrAppendScalar_MaxSize 15
+
+/**
+ * Write the scaler in decimal format into buffer, and return a pointer to
+ * the next char after the last one written. Note: a terminating 0 is not
+ * written into buffer, which must be at least SkStrAppendScalar_MaxSize.
+ * Thus if the caller wants to add a 0 at the end, buffer must be at least
+ * SkStrAppendScalar_MaxSize + 1 bytes large.
+ */
+#define SkStrAppendScalar SkStrAppendFloat
+
+char* SkStrAppendFloat(char buffer[], float);
+
+/** \class SkString
+
+ Light weight class for managing strings. Uses reference
+ counting to make string assignments and copies very fast
+ with no extra RAM cost. Assumes UTF8 encoding.
+*/
+class SK_API SkString {
+public:
+ SkString();
+ explicit SkString(size_t len);
+ explicit SkString(const char text[]);
+ SkString(const char text[], size_t len);
+ SkString(const SkString&);
+ SkString(SkString&&);
+ ~SkString();
+
+ bool isEmpty() const { return 0 == fRec->fLength; }
+ size_t size() const { return (size_t) fRec->fLength; }
+ const char* c_str() const { return fRec->data(); }
+ char operator[](size_t n) const { return this->c_str()[n]; }
+
+ bool equals(const SkString&) const;
+ bool equals(const char text[]) const;
+ bool equals(const char text[], size_t len) const;
+
+ bool startsWith(const char prefixStr[]) const {
+ return SkStrStartsWith(fRec->data(), prefixStr);
+ }
+ bool startsWith(const char prefixChar) const {
+ return SkStrStartsWith(fRec->data(), prefixChar);
+ }
+ bool endsWith(const char suffixStr[]) const {
+ return SkStrEndsWith(fRec->data(), suffixStr);
+ }
+ bool endsWith(const char suffixChar) const {
+ return SkStrEndsWith(fRec->data(), suffixChar);
+ }
+ bool contains(const char substring[]) const {
+ return SkStrContains(fRec->data(), substring);
+ }
+ bool contains(const char subchar) const {
+ return SkStrContains(fRec->data(), subchar);
+ }
+ int find(const char substring[]) const {
+ return SkStrFind(fRec->data(), substring);
+ }
+ int findLastOf(const char subchar) const {
+ return SkStrFindLastOf(fRec->data(), subchar);
+ }
+
+ friend bool operator==(const SkString& a, const SkString& b) {
+ return a.equals(b);
+ }
+ friend bool operator!=(const SkString& a, const SkString& b) {
+ return !a.equals(b);
+ }
+
+ // these methods edit the string
+
+ SkString& operator=(const SkString&);
+ SkString& operator=(SkString&&);
+ SkString& operator=(const char text[]);
+
+ char* writable_str();
+ char& operator[](size_t n) { return this->writable_str()[n]; }
+
+ void reset();
+ /** Destructive resize, does not preserve contents. */
+ void resize(size_t len) { this->set(NULL, len); }
+ void set(const SkString& src) { *this = src; }
+ void set(const char text[]);
+ void set(const char text[], size_t len);
+ void setUTF16(const uint16_t[]);
+ void setUTF16(const uint16_t[], size_t len);
+
+ void insert(size_t offset, const SkString& src) { this->insert(offset, src.c_str(), src.size()); }
+ void insert(size_t offset, const char text[]);
+ void insert(size_t offset, const char text[], size_t len);
+ void insertUnichar(size_t offset, SkUnichar);
+ void insertS32(size_t offset, int32_t value);
+ void insertS64(size_t offset, int64_t value, int minDigits = 0);
+ void insertU32(size_t offset, uint32_t value);
+ void insertU64(size_t offset, uint64_t value, int minDigits = 0);
+ void insertHex(size_t offset, uint32_t value, int minDigits = 0);
+ void insertScalar(size_t offset, SkScalar);
+
+ void append(const SkString& str) { this->insert((size_t)-1, str); }
+ void append(const char text[]) { this->insert((size_t)-1, text); }
+ void append(const char text[], size_t len) { this->insert((size_t)-1, text, len); }
+ void appendUnichar(SkUnichar uni) { this->insertUnichar((size_t)-1, uni); }
+ void appendS32(int32_t value) { this->insertS32((size_t)-1, value); }
+ void appendS64(int64_t value, int minDigits = 0) { this->insertS64((size_t)-1, value, minDigits); }
+ void appendU32(uint32_t value) { this->insertU32((size_t)-1, value); }
+ void appendU64(uint64_t value, int minDigits = 0) { this->insertU64((size_t)-1, value, minDigits); }
+ void appendHex(uint32_t value, int minDigits = 0) { this->insertHex((size_t)-1, value, minDigits); }
+ void appendScalar(SkScalar value) { this->insertScalar((size_t)-1, value); }
+
+ void prepend(const SkString& str) { this->insert(0, str); }
+ void prepend(const char text[]) { this->insert(0, text); }
+ void prepend(const char text[], size_t len) { this->insert(0, text, len); }
+ void prependUnichar(SkUnichar uni) { this->insertUnichar(0, uni); }
+ void prependS32(int32_t value) { this->insertS32(0, value); }
+ void prependS64(int32_t value, int minDigits = 0) { this->insertS64(0, value, minDigits); }
+ void prependHex(uint32_t value, int minDigits = 0) { this->insertHex(0, value, minDigits); }
+ void prependScalar(SkScalar value) { this->insertScalar((size_t)-1, value); }
+
+ void printf(const char format[], ...) SK_PRINTF_LIKE(2, 3);
+ void appendf(const char format[], ...) SK_PRINTF_LIKE(2, 3);
+ void appendVAList(const char format[], va_list);
+ void prependf(const char format[], ...) SK_PRINTF_LIKE(2, 3);
+ void prependVAList(const char format[], va_list);
+
+ void remove(size_t offset, size_t length);
+
+ SkString& operator+=(const SkString& s) { this->append(s); return *this; }
+ SkString& operator+=(const char text[]) { this->append(text); return *this; }
+ SkString& operator+=(const char c) { this->append(&c, 1); return *this; }
+
+ /**
+ * Swap contents between this and other. This function is guaranteed
+ * to never fail or throw.
+ */
+ void swap(SkString& other);
+
+private:
+ struct Rec {
+ public:
+ uint32_t fLength; // logically size_t, but we want it to stay 32bits
+ int32_t fRefCnt;
+ char fBeginningOfData;
+
+ char* data() { return &fBeginningOfData; }
+ const char* data() const { return &fBeginningOfData; }
+ };
+ Rec* fRec;
+
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+ static const Rec gEmptyRec;
+ static Rec* AllocRec(const char text[], size_t len);
+ static Rec* RefRec(Rec*);
+};
+
+/// Creates a new string and writes into it using a printf()-style format.
+SkString SkStringPrintf(const char* format, ...);
+
+// Specialized to take advantage of SkString's fast swap path. The unspecialized function is
+// declared in SkTypes.h and called by SkTSort.
+template <> inline void SkTSwap(SkString& a, SkString& b) {
+ a.swap(b);
+}
+
+enum SkStrSplitMode {
+ // Strictly return all results. If the input is ",," and the separator is ',' this will return
+ // an array of three empty strings.
+ kStrict_SkStrSplitMode,
+
+ // Only nonempty results will be added to the results. Multiple separators will be
+ // coalesced. Separators at the beginning and end of the input will be ignored. If the input is
+ // ",," and the separator is ',', this will return an empty vector.
+ kCoalesce_SkStrSplitMode
+};
+
+// Split str on any characters in delimiters into out. (Think, strtok with a sane API.)
+void SkStrSplit(const char* str, const char* delimiters, SkStrSplitMode splitMode,
+ SkTArray<SkString>* out);
+inline void SkStrSplit(const char* str, const char* delimiters, SkTArray<SkString>* out) {
+ SkStrSplit(str, delimiters, kCoalesce_SkStrSplitMode, out);
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkStrokeRec.h b/gfx/skia/skia/include/core/SkStrokeRec.h
new file mode 100644
index 000000000..9a49a3da7
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkStrokeRec.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStrokeRec_DEFINED
+#define SkStrokeRec_DEFINED
+
+#include "SkPaint.h"
+
+class SkPath;
+
+SK_BEGIN_REQUIRE_DENSE
+class SkStrokeRec {
+public:
+ enum InitStyle {
+ kHairline_InitStyle,
+ kFill_InitStyle
+ };
+ SkStrokeRec(InitStyle style);
+ SkStrokeRec(const SkPaint&, SkPaint::Style, SkScalar resScale = 1);
+ explicit SkStrokeRec(const SkPaint&, SkScalar resScale = 1);
+
+ enum Style {
+ kHairline_Style,
+ kFill_Style,
+ kStroke_Style,
+ kStrokeAndFill_Style
+ };
+ enum {
+ kStyleCount = kStrokeAndFill_Style + 1
+ };
+
+ Style getStyle() const;
+ SkScalar getWidth() const { return fWidth; }
+ SkScalar getMiter() const { return fMiterLimit; }
+ SkPaint::Cap getCap() const { return (SkPaint::Cap)fCap; }
+ SkPaint::Join getJoin() const { return (SkPaint::Join)fJoin; }
+
+ bool isHairlineStyle() const {
+ return kHairline_Style == this->getStyle();
+ }
+
+ bool isFillStyle() const {
+ return kFill_Style == this->getStyle();
+ }
+
+ void setFillStyle();
+ void setHairlineStyle();
+ /**
+ * Specify the strokewidth, and optionally if you want stroke + fill.
+ * Note, if width==0, then this request is taken to mean:
+ * strokeAndFill==true -> new style will be Fill
+ * strokeAndFill==false -> new style will be Hairline
+ */
+ void setStrokeStyle(SkScalar width, bool strokeAndFill = false);
+
+ void setStrokeParams(SkPaint::Cap cap, SkPaint::Join join, SkScalar miterLimit) {
+ fCap = cap;
+ fJoin = join;
+ fMiterLimit = miterLimit;
+ }
+
+ SkScalar getResScale() const {
+ return fResScale;
+ }
+
+ void setResScale(SkScalar rs) {
+ SkASSERT(rs > 0 && SkScalarIsFinite(rs));
+ fResScale = rs;
+ }
+
+ /**
+ * Returns true if this specifes any thick stroking, i.e. applyToPath()
+ * will return true.
+ */
+ bool needToApply() const {
+ Style style = this->getStyle();
+ return (kStroke_Style == style) || (kStrokeAndFill_Style == style);
+ }
+
+ /**
+ * Apply these stroke parameters to the src path, returning the result
+ * in dst.
+ *
+ * If there was no change (i.e. style == hairline or fill) this returns
+ * false and dst is unchanged. Otherwise returns true and the result is
+ * stored in dst.
+ *
+ * src and dst may be the same path.
+ */
+ bool applyToPath(SkPath* dst, const SkPath& src) const;
+
+ /**
+ * Apply these stroke parameters to a paint.
+ */
+ void applyToPaint(SkPaint* paint) const;
+
+ /**
+ * Gives a conservative value for the outset that should applied to a
+ * geometries bounds to account for any inflation due to applying this
+ * strokeRec to the geometry.
+ */
+ SkScalar getInflationRadius() const;
+
+ /**
+ * Equivalent to:
+ * SkStrokeRec rec(paint, style);
+ * rec.getInflationRadius();
+ * This does not account for other effects on the paint (i.e. path
+ * effect).
+ */
+ static SkScalar GetInflationRadius(const SkPaint&, SkPaint::Style);
+
+ /**
+ * Compare if two SkStrokeRecs have an equal effect on a path.
+ * Equal SkStrokeRecs produce equal paths. Equality of produced
+ * paths does not take the ResScale parameter into account.
+ */
+ bool hasEqualEffect(const SkStrokeRec& other) const {
+ if (!this->needToApply()) {
+ return this->getStyle() == other.getStyle();
+ }
+ return fWidth == other.fWidth &&
+ fMiterLimit == other.fMiterLimit &&
+ fCap == other.fCap &&
+ fJoin == other.fJoin &&
+ fStrokeAndFill == other.fStrokeAndFill;
+ }
+
+private:
+ void init(const SkPaint&, SkPaint::Style, SkScalar resScale);
+
+ SkScalar fResScale;
+ SkScalar fWidth;
+ SkScalar fMiterLimit;
+ // The following three members are packed together into a single u32.
+ // This is to avoid unnecessary padding and ensure binary equality for
+ // hashing (because the padded areas might contain garbage values).
+ //
+ // fCap and fJoin are larger than needed to avoid having to initialize
+ // any pad values
+ uint32_t fCap : 16; // SkPaint::Cap
+ uint32_t fJoin : 15; // SkPaint::Join
+ uint32_t fStrokeAndFill : 1; // bool
+};
+SK_END_REQUIRE_DENSE
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkSurface.h b/gfx/skia/skia/include/core/SkSurface.h
new file mode 100644
index 000000000..8e7e148cb
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkSurface.h
@@ -0,0 +1,402 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSurface_DEFINED
+#define SkSurface_DEFINED
+
+#include "SkRefCnt.h"
+#include "SkImage.h"
+#include "SkSurfaceProps.h"
+
+class SkCanvas;
+class SkPaint;
+class GrContext;
+class GrRenderTarget;
+
+/**
+ * SkSurface represents the backend/results of drawing to a canvas. For raster
+ * drawing, the surface will be pixels, but (for example) when drawing into
+ * a PDF or Picture canvas, the surface stores the recorded commands.
+ *
+ * To draw into a canvas, first create the appropriate type of Surface, and
+ * then request the canvas from the surface.
+ *
+ * SkSurface always has non-zero dimensions. If there is a request for a new surface, and either
+ * of the requested dimensions are zero, then NULL will be returned.
+ */
+class SK_API SkSurface : public SkRefCnt {
+public:
+ /**
+ * Create a new surface, using the specified pixels/rowbytes as its
+ * backend.
+ *
+ * If the requested surface cannot be created, or the request is not a
+ * supported configuration, NULL will be returned.
+ *
+ * Callers are responsible for initialiazing the surface pixels.
+ */
+ static sk_sp<SkSurface> MakeRasterDirect(const SkImageInfo&, void* pixels, size_t rowBytes,
+ const SkSurfaceProps* = nullptr);
+
+ /**
+ * The same as NewRasterDirect, but also accepts a call-back routine, which is invoked
+ * when the surface is deleted, and is passed the pixel memory and the specified context.
+ */
+ static sk_sp<SkSurface> MakeRasterDirectReleaseProc(const SkImageInfo&, void* pixels, size_t rowBytes,
+ void (*releaseProc)(void* pixels, void* context),
+ void* context, const SkSurfaceProps* = nullptr);
+
+ /**
+ * Return a new surface, with the memory for the pixels automatically allocated but respecting
+ * the specified rowBytes. If rowBytes==0, then a default value will be chosen. If a non-zero
+ * rowBytes is specified, then any images snapped off of this surface (via makeImageSnapshot())
+ * are guaranteed to have the same rowBytes.
+ *
+ * If the requested alpha type is not opaque, then the surface's pixel memory will be
+ * zero-initialized. If it is opaque, then it will be left uninitialized, and the caller is
+ * responsible for initially clearing the surface.
+ *
+ * If the requested surface cannot be created, or the request is not a
+ * supported configuration, NULL will be returned.
+ */
+ static sk_sp<SkSurface> MakeRaster(const SkImageInfo&, size_t rowBytes, const SkSurfaceProps*);
+
+ /**
+ * Allocate a new surface, automatically computing the rowBytes.
+ */
+ static sk_sp<SkSurface> MakeRaster(const SkImageInfo& info,
+ const SkSurfaceProps* props = nullptr) {
+ return MakeRaster(info, 0, props);
+ }
+
+ /**
+ * Helper version of NewRaster. It creates a SkImageInfo with the
+ * specified width and height, and populates the rest of info to match
+ * pixels in SkPMColor format.
+ */
+ static sk_sp<SkSurface> MakeRasterN32Premul(int width, int height,
+ const SkSurfaceProps* props = nullptr) {
+ return MakeRaster(SkImageInfo::MakeN32Premul(width, height), props);
+ }
+
+ /**
+ * Used to wrap a pre-existing backend 3D API texture as a SkSurface. The kRenderTarget flag
+ * must be set on GrBackendTextureDesc for this to succeed. Skia will not assume ownership
+ * of the texture and the client must ensure the texture is valid for the lifetime of the
+ * SkSurface.
+ */
+ static sk_sp<SkSurface> MakeFromBackendTexture(GrContext*, const GrBackendTextureDesc&,
+ sk_sp<SkColorSpace>, const SkSurfaceProps*);
+
+ /**
+ * Used to wrap a pre-existing 3D API rendering target as a SkSurface. Skia will not assume
+ * ownership of the render target and the client must ensure the render target is valid for the
+ * lifetime of the SkSurface.
+ */
+ static sk_sp<SkSurface> MakeFromBackendRenderTarget(GrContext*,
+ const GrBackendRenderTargetDesc&,
+ sk_sp<SkColorSpace>,
+ const SkSurfaceProps*);
+
+ /**
+ * Used to wrap a pre-existing 3D API texture as a SkSurface. Skia will treat the texture as
+ * a rendering target only, but unlike NewFromBackendRenderTarget, Skia will manage and own
+ * the associated render target objects (but not the provided texture). The kRenderTarget flag
+ * must be set on GrBackendTextureDesc for this to succeed. Skia will not assume ownership
+ * of the texture and the client must ensure the texture is valid for the lifetime of the
+ * SkSurface.
+ */
+ static sk_sp<SkSurface> MakeFromBackendTextureAsRenderTarget(
+ GrContext*, const GrBackendTextureDesc&, sk_sp<SkColorSpace>, const SkSurfaceProps*);
+
+ /**
+ * Legacy versions of the above factories, without color space support. These create "legacy"
+ * surfaces that operate without gamma correction or color management.
+ */
+ static sk_sp<SkSurface> MakeFromBackendTexture(GrContext* ctx, const GrBackendTextureDesc& desc,
+ const SkSurfaceProps* props) {
+ return MakeFromBackendTexture(ctx, desc, nullptr, props);
+ }
+
+ static sk_sp<SkSurface> MakeFromBackendRenderTarget(GrContext* ctx,
+ const GrBackendRenderTargetDesc& desc,
+ const SkSurfaceProps* props) {
+ return MakeFromBackendRenderTarget(ctx, desc, nullptr, props);
+ }
+
+ static sk_sp<SkSurface> MakeFromBackendTextureAsRenderTarget(
+ GrContext* ctx, const GrBackendTextureDesc& desc, const SkSurfaceProps* props) {
+ return MakeFromBackendTextureAsRenderTarget(ctx, desc, nullptr, props);
+ }
+
+
+ /**
+ * Return a new surface whose contents will be drawn to an offscreen
+ * render target, allocated by the surface.
+ */
+ static sk_sp<SkSurface> MakeRenderTarget(GrContext*, SkBudgeted, const SkImageInfo&,
+ int sampleCount, GrSurfaceOrigin,
+ const SkSurfaceProps*);
+
+ static sk_sp<SkSurface> MakeRenderTarget(GrContext* context, SkBudgeted budgeted,
+ const SkImageInfo& info, int sampleCount,
+ const SkSurfaceProps* props) {
+ return MakeRenderTarget(context, budgeted, info, sampleCount,
+ kBottomLeft_GrSurfaceOrigin, props);
+ }
+
+ static sk_sp<SkSurface> MakeRenderTarget(GrContext* gr, SkBudgeted b, const SkImageInfo& info) {
+ return MakeRenderTarget(gr, b, info, 0, kBottomLeft_GrSurfaceOrigin, nullptr);
+ }
+
+#ifdef SK_SUPPORT_LEGACY_NEW_SURFACE_API
+ static SkSurface* NewRasterDirect(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const SkSurfaceProps* props = NULL) {
+ return MakeRasterDirect(info, pixels, rowBytes, props).release();
+ }
+ static SkSurface* NewRasterDirectReleaseProc(const SkImageInfo& info, void* pixels,
+ size_t rowBytes,
+ void (*releaseProc)(void* pixels, void* context),
+ void* context, const SkSurfaceProps* props = NULL){
+ return MakeRasterDirectReleaseProc(info, pixels, rowBytes, releaseProc, context,
+ props).release();
+ }
+ static SkSurface* NewRaster(const SkImageInfo& info, size_t rowBytes,
+ const SkSurfaceProps* props) {
+ return MakeRaster(info, rowBytes, props).release();
+ }
+ static SkSurface* NewRaster(const SkImageInfo& info, const SkSurfaceProps* props = NULL) {
+ return MakeRaster(info, props).release();
+ }
+ static SkSurface* NewRasterN32Premul(int width, int height,
+ const SkSurfaceProps* props = NULL) {
+ return NewRaster(SkImageInfo::MakeN32Premul(width, height), props);
+ }
+ static SkSurface* NewFromBackendTexture(GrContext* ctx, const GrBackendTextureDesc& desc,
+ const SkSurfaceProps* props) {
+ return MakeFromBackendTexture(ctx, desc, props).release();
+ }
+ // Legacy alias
+ static SkSurface* NewWrappedRenderTarget(GrContext* ctx, const GrBackendTextureDesc& desc,
+ const SkSurfaceProps* props) {
+ return NewFromBackendTexture(ctx, desc, props);
+ }
+ static SkSurface* NewFromBackendRenderTarget(GrContext* ctx, const GrBackendRenderTargetDesc& d,
+ const SkSurfaceProps* props) {
+ return MakeFromBackendRenderTarget(ctx, d, props).release();
+ }
+ static SkSurface* NewFromBackendTextureAsRenderTarget(GrContext* ctx,
+ const GrBackendTextureDesc& desc,
+ const SkSurfaceProps* props) {
+ return MakeFromBackendTextureAsRenderTarget(ctx, desc, props).release();
+ }
+ static SkSurface* NewRenderTarget(GrContext* ctx, SkBudgeted b, const SkImageInfo& info,
+ int sampleCount, const SkSurfaceProps* props = NULL) {
+ return MakeRenderTarget(ctx, b, info, sampleCount, props).release();
+ }
+ static SkSurface* NewRenderTarget(GrContext* gr, SkBudgeted b, const SkImageInfo& info) {
+ return NewRenderTarget(gr, b, info, 0);
+ }
+ SkSurface* newSurface(const SkImageInfo& info) { return this->makeSurface(info).release(); }
+#endif
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+
+ /**
+ * Returns a unique non-zero, unique value identifying the content of this
+ * surface. Each time the content is changed changed, either by drawing
+ * into this surface, or explicitly calling notifyContentChanged()) this
+ * method will return a new value.
+ *
+ * If this surface is empty (i.e. has a zero-dimention), this will return
+ * 0.
+ */
+ uint32_t generationID();
+
+ /**
+ * Modes that can be passed to notifyContentWillChange
+ */
+ enum ContentChangeMode {
+ /**
+ * Use this mode if it is known that the upcoming content changes will
+ * clear or overwrite prior contents, thus making them discardable.
+ */
+ kDiscard_ContentChangeMode,
+ /**
+ * Use this mode if prior surface contents need to be preserved or
+ * if in doubt.
+ */
+ kRetain_ContentChangeMode,
+ };
+
+ /**
+ * Call this if the contents are about to change. This will (lazily) force a new
+ * value to be returned from generationID() when it is called next.
+ *
+ * CAN WE DEPRECATE THIS?
+ */
+ void notifyContentWillChange(ContentChangeMode mode);
+
+ enum BackendHandleAccess {
+ kFlushRead_BackendHandleAccess, //!< caller may read from the backend object
+ kFlushWrite_BackendHandleAccess, //!< caller may write to the backend object
+ kDiscardWrite_BackendHandleAccess, //!< caller must over-write the entire backend object
+ };
+
+ /*
+ * These are legacy aliases which will be removed soon
+ */
+ static const BackendHandleAccess kFlushRead_TextureHandleAccess =
+ kFlushRead_BackendHandleAccess;
+ static const BackendHandleAccess kFlushWrite_TextureHandleAccess =
+ kFlushWrite_BackendHandleAccess;
+ static const BackendHandleAccess kDiscardWrite_TextureHandleAccess =
+ kDiscardWrite_BackendHandleAccess;
+
+
+ /**
+ * Retrieves the backend API handle of the texture used by this surface, or 0 if the surface
+ * is not backed by a GPU texture.
+ *
+ * The returned texture-handle is only valid until the next draw-call into the surface,
+ * or the surface is deleted.
+ */
+ GrBackendObject getTextureHandle(BackendHandleAccess);
+
+ /**
+ * Retrieves the backend API handle of the RenderTarget backing this surface. Callers must
+ * ensure this function returns 'true' or else the GrBackendObject will be invalid
+ *
+ * In OpenGL this will return the FramebufferObject ID.
+ */
+ bool getRenderTargetHandle(GrBackendObject*, BackendHandleAccess);
+
+ /**
+ * Return a canvas that will draw into this surface. This will always
+ * return the same canvas for a given surface, and is manged/owned by the
+ * surface. It should not be used when its parent surface has gone out of
+ * scope.
+ */
+ SkCanvas* getCanvas();
+
+ /**
+ * Return a new surface that is "compatible" with this one, in that it will
+ * efficiently be able to be drawn into this surface. Typical calling
+ * pattern:
+ *
+ * SkSurface* A = SkSurface::New...();
+ * SkCanvas* canvasA = surfaceA->newCanvas();
+ * ...
+ * SkSurface* surfaceB = surfaceA->newSurface(...);
+ * SkCanvas* canvasB = surfaceB->newCanvas();
+ * ... // draw using canvasB
+ * canvasA->drawSurface(surfaceB); // <--- this will always be optimal!
+ */
+ sk_sp<SkSurface> makeSurface(const SkImageInfo&);
+
+ /**
+ * Returns an image of the current state of the surface pixels up to this
+ * point. Subsequent changes to the surface (by drawing into its canvas)
+ * will not be reflected in this image. If a copy must be made the Budgeted
+ * parameter controls whether it counts against the resource budget
+ * (currently for the gpu backend only).
+ */
+ sk_sp<SkImage> makeImageSnapshot(SkBudgeted = SkBudgeted::kYes);
+
+ /**
+ * In rare instances a client may want a unique copy of the SkSurface's contents in an image
+ * snapshot. This enum can be used to enforce that the image snapshot's backing store is not
+ * shared with another image snapshot or the surface's backing store. This is generally more
+ * expensive. This was added for Chromium bug 585250.
+ */
+ enum ForceUnique {
+ kNo_ForceUnique,
+ kYes_ForceUnique
+ };
+ sk_sp<SkImage> makeImageSnapshot(SkBudgeted, ForceUnique);
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFACTORY
+ SkImage* newImageSnapshot(SkBudgeted budgeted = SkBudgeted::kYes) {
+ return this->makeImageSnapshot(budgeted).release();
+ }
+ SkImage* newImageSnapshot(SkBudgeted budgeted, ForceUnique force) {
+ return this->makeImageSnapshot(budgeted, force).release();
+ }
+#endif
+
+ /**
+ * Though the caller could get a snapshot image explicitly, and draw that,
+ * it seems that directly drawing a surface into another canvas might be
+ * a common pattern, and that we could possibly be more efficient, since
+ * we'd know that the "snapshot" need only live until we've handed it off
+ * to the canvas.
+ */
+ void draw(SkCanvas*, SkScalar x, SkScalar y, const SkPaint*);
+
+ /**
+ * If the surface has direct access to its pixels (i.e. they are in local
+ * RAM) return true, and if not null, set the pixmap parameter to point to the information
+ * about the surface's pixels. The pixel address in the pixmap is only valid while
+ * the surface object is in scope, and no API call is made on the surface
+ * or its canvas.
+ *
+ * On failure, returns false and the pixmap parameter is ignored.
+ */
+ bool peekPixels(SkPixmap*);
+
+#ifdef SK_SUPPORT_LEGACY_PEEKPIXELS_PARMS
+ const void* peekPixels(SkImageInfo* info, size_t* rowBytes);
+#endif
+
+ /**
+ * Copy the pixels from the surface into the specified buffer (pixels + rowBytes),
+ * converting them into the requested format (dstInfo). The surface pixels are read
+ * starting at the specified (srcX,srcY) location.
+ *
+ * The specified ImageInfo and (srcX,srcY) offset specifies a source rectangle
+ *
+ * srcR.setXYWH(srcX, srcY, dstInfo.width(), dstInfo.height());
+ *
+ * srcR is intersected with the bounds of the base-layer. If this intersection is not empty,
+ * then we have two sets of pixels (of equal size). Replace the dst pixels with the
+ * corresponding src pixels, performing any colortype/alphatype transformations needed
+ * (in the case where the src and dst have different colortypes or alphatypes).
+ *
+ * This call can fail, returning false, for several reasons:
+ * - If srcR does not intersect the surface bounds.
+ * - If the requested colortype/alphatype cannot be converted from the surface's types.
+ */
+ bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY);
+
+ const SkSurfaceProps& props() const { return fProps; }
+
+ /**
+ * Issue any pending surface IO to the current backend 3D API and resolve any surface MSAA.
+ */
+ void prepareForExternalIO();
+
+protected:
+ SkSurface(int width, int height, const SkSurfaceProps*);
+ SkSurface(const SkImageInfo&, const SkSurfaceProps*);
+
+ // called by subclass if their contents have changed
+ void dirtyGenerationID() {
+ fGenerationID = 0;
+ }
+
+private:
+ const SkSurfaceProps fProps;
+ const int fWidth;
+ const int fHeight;
+ uint32_t fGenerationID;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkSurfaceProps.h b/gfx/skia/skia/include/core/SkSurfaceProps.h
new file mode 100644
index 000000000..da04d1fe9
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkSurfaceProps.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSurfaceProps_DEFINED
+#define SkSurfaceProps_DEFINED
+
+#include "SkTypes.h"
+
+/**
+ * Description of how the LCD strips are arranged for each pixel. If this is unknown, or the
+ * pixels are meant to be "portable" and/or transformed before showing (e.g. rotated, scaled)
+ * then use kUnknown_SkPixelGeometry.
+ */
+enum SkPixelGeometry {
+ kUnknown_SkPixelGeometry,
+ kRGB_H_SkPixelGeometry,
+ kBGR_H_SkPixelGeometry,
+ kRGB_V_SkPixelGeometry,
+ kBGR_V_SkPixelGeometry,
+};
+
+// Returns true iff geo is a known geometry and is RGB.
+static inline bool SkPixelGeometryIsRGB(SkPixelGeometry geo) {
+ return kRGB_H_SkPixelGeometry == geo || kRGB_V_SkPixelGeometry == geo;
+}
+
+// Returns true iff geo is a known geometry and is BGR.
+static inline bool SkPixelGeometryIsBGR(SkPixelGeometry geo) {
+ return kBGR_H_SkPixelGeometry == geo || kBGR_V_SkPixelGeometry == geo;
+}
+
+// Returns true iff geo is a known geometry and is horizontal.
+static inline bool SkPixelGeometryIsH(SkPixelGeometry geo) {
+ return kRGB_H_SkPixelGeometry == geo || kBGR_H_SkPixelGeometry == geo;
+}
+
+// Returns true iff geo is a known geometry and is vertical.
+static inline bool SkPixelGeometryIsV(SkPixelGeometry geo) {
+ return kRGB_V_SkPixelGeometry == geo || kBGR_V_SkPixelGeometry == geo;
+}
+
+/**
+ * Describes properties and constraints of a given SkSurface. The rendering engine can parse these
+ * during drawing, and can sometimes optimize its performance (e.g. disabling an expensive
+ * feature).
+ */
+class SK_API SkSurfaceProps {
+public:
+ enum Flags {
+ kUseDeviceIndependentFonts_Flag = 1 << 0,
+ };
+ /** Deprecated alias used by Chromium. Will be removed. */
+ static const Flags kUseDistanceFieldFonts_Flag = kUseDeviceIndependentFonts_Flag;
+
+ SkSurfaceProps(uint32_t flags, SkPixelGeometry);
+
+ enum InitType {
+ kLegacyFontHost_InitType
+ };
+ SkSurfaceProps(InitType);
+ SkSurfaceProps(uint32_t flags, InitType);
+ SkSurfaceProps(const SkSurfaceProps& other);
+
+ uint32_t flags() const { return fFlags; }
+ SkPixelGeometry pixelGeometry() const { return fPixelGeometry; }
+
+ bool isUseDeviceIndependentFonts() const {
+ return SkToBool(fFlags & kUseDeviceIndependentFonts_Flag);
+ }
+
+private:
+ SkSurfaceProps();
+
+ uint32_t fFlags;
+ SkPixelGeometry fPixelGeometry;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkSwizzle.h b/gfx/skia/skia/include/core/SkSwizzle.h
new file mode 100644
index 000000000..253f4e39a
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkSwizzle.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSwizzle_DEFINED
+#define SkSwizzle_DEFINED
+
+#include "SkTypes.h"
+
+/**
+ Swizzles byte order of |count| 32-bit pixels, swapping R and B.
+ (RGBA <-> BGRA)
+*/
+SK_API void SkSwapRB(uint32_t* dest, const uint32_t* src, int count);
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkTLazy.h b/gfx/skia/skia/include/core/SkTLazy.h
new file mode 100644
index 000000000..cb08387bb
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkTLazy.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTLazy_DEFINED
+#define SkTLazy_DEFINED
+
+#include "../private/SkTemplates.h"
+#include "SkTypes.h"
+#include <new>
+#include <utility>
+
+/**
+ * Efficient way to defer allocating/initializing a class until it is needed
+ * (if ever).
+ */
+template <typename T> class SkTLazy {
+public:
+ SkTLazy() : fPtr(nullptr) {}
+
+ explicit SkTLazy(const T* src)
+ : fPtr(src ? new (fStorage.get()) T(*src) : nullptr) {}
+
+ SkTLazy(const SkTLazy& src) : fPtr(nullptr) { *this = src; }
+
+ ~SkTLazy() {
+ if (this->isValid()) {
+ fPtr->~T();
+ }
+ }
+
+ SkTLazy& operator=(const SkTLazy& src) {
+ if (src.isValid()) {
+ this->set(*src.get());
+ } else {
+ this->reset();
+ }
+ return *this;
+ }
+
+ /**
+ * Return a pointer to an instance of the class initialized with 'args'.
+ * If a previous instance had been initialized (either from init() or
+ * set()) it will first be destroyed, so that a freshly initialized
+ * instance is always returned.
+ */
+ template <typename... Args> T* init(Args&&... args) {
+ if (this->isValid()) {
+ fPtr->~T();
+ }
+ fPtr = new (SkTCast<T*>(fStorage.get())) T(std::forward<Args>(args)...);
+ return fPtr;
+ }
+
+ /**
+ * Copy src into this, and return a pointer to a copy of it. Note this
+ * will always return the same pointer, so if it is called on a lazy that
+ * has already been initialized, then this will copy over the previous
+ * contents.
+ */
+ T* set(const T& src) {
+ if (this->isValid()) {
+ *fPtr = src;
+ } else {
+ fPtr = new (SkTCast<T*>(fStorage.get())) T(src);
+ }
+ return fPtr;
+ }
+
+ /**
+ * Destroy the lazy object (if it was created via init() or set())
+ */
+ void reset() {
+ if (this->isValid()) {
+ fPtr->~T();
+ fPtr = nullptr;
+ }
+ }
+
+ /**
+ * Returns true if a valid object has been initialized in the SkTLazy,
+ * false otherwise.
+ */
+ bool isValid() const { return SkToBool(fPtr); }
+
+ /**
+ * Returns the object. This version should only be called when the caller
+ * knows that the object has been initialized.
+ */
+ T* get() const { SkASSERT(this->isValid()); return fPtr; }
+
+ /**
+ * Like above but doesn't assert if object isn't initialized (in which case
+ * nullptr is returned).
+ */
+ T* getMaybeNull() const { return fPtr; }
+
+private:
+ SkAlignedSTStorage<1, T> fStorage;
+ T* fPtr; // nullptr or fStorage
+};
+
+/**
+ * A helper built on top of SkTLazy to do copy-on-first-write. The object is initialized
+ * with a const pointer but provides a non-const pointer accessor. The first time the
+ * accessor is called (if ever) the object is cloned.
+ *
+ * In the following example at most one copy of constThing is made:
+ *
+ * SkTCopyOnFirstWrite<Thing> thing(&constThing);
+ * ...
+ * function_that_takes_a_const_thing_ptr(thing); // constThing is passed
+ * ...
+ * if (need_to_modify_thing()) {
+ * thing.writable()->modifyMe(); // makes a copy of constThing
+ * }
+ * ...
+ * x = thing->readSomething();
+ * ...
+ * if (need_to_modify_thing_now()) {
+ * thing.writable()->changeMe(); // makes a copy of constThing if we didn't call modifyMe()
+ * }
+ *
+ * consume_a_thing(thing); // could be constThing or a modified copy.
+ */
+template <typename T>
+class SkTCopyOnFirstWrite {
+public:
+ SkTCopyOnFirstWrite(const T& initial) : fObj(&initial) {}
+
+ SkTCopyOnFirstWrite(const T* initial) : fObj(initial) {}
+
+ // Constructor for delayed initialization.
+ SkTCopyOnFirstWrite() : fObj(nullptr) {}
+
+ // Should only be called once, and only if the default constructor was used.
+ void init(const T& initial) {
+ SkASSERT(nullptr == fObj);
+ SkASSERT(!fLazy.isValid());
+ fObj = &initial;
+ }
+
+ /**
+ * Returns a writable T*. The first time this is called the initial object is cloned.
+ */
+ T* writable() {
+ SkASSERT(fObj);
+ if (!fLazy.isValid()) {
+ fLazy.set(*fObj);
+ fObj = fLazy.get();
+ }
+ return const_cast<T*>(fObj);
+ }
+
+ /**
+ * Operators for treating this as though it were a const pointer.
+ */
+
+ const T *operator->() const { return fObj; }
+
+ operator const T*() const { return fObj; }
+
+ const T& operator *() const { return *fObj; }
+
+private:
+ const T* fObj;
+ SkTLazy<T> fLazy;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkTRegistry.h b/gfx/skia/skia/include/core/SkTRegistry.h
new file mode 100644
index 000000000..0994c990d
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkTRegistry.h
@@ -0,0 +1,55 @@
+
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTRegistry_DEFINED
+#define SkTRegistry_DEFINED
+
+#include "SkTypes.h"
+
+/** Template class that registers itself (in the constructor) into a linked-list
+ and provides a function-pointer. This can be used to auto-register a set of
+ services, e.g. a set of image codecs.
+ */
+template <typename T> class SkTRegistry : SkNoncopyable {
+public:
+ typedef T Factory;
+
+ explicit SkTRegistry(T fact) : fFact(fact) {
+#ifdef SK_BUILD_FOR_ANDROID
+ // work-around for double-initialization bug
+ {
+ SkTRegistry* reg = gHead;
+ while (reg) {
+ if (reg == this) {
+ return;
+ }
+ reg = reg->fChain;
+ }
+ }
+#endif
+ fChain = gHead;
+ gHead = this;
+ }
+
+ static const SkTRegistry* Head() { return gHead; }
+
+ const SkTRegistry* next() const { return fChain; }
+ const Factory& factory() const { return fFact; }
+
+private:
+ Factory fFact;
+ SkTRegistry* fChain;
+
+ static SkTRegistry* gHead;
+};
+
+// The caller still needs to declare an instance of this somewhere
+template <typename T> SkTRegistry<T>* SkTRegistry<T>::gHead;
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkTextBlob.h b/gfx/skia/skia/include/core/SkTextBlob.h
new file mode 100644
index 000000000..35d5dc417
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkTextBlob.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTextBlob_DEFINED
+#define SkTextBlob_DEFINED
+
+#include "../private/SkTemplates.h"
+#include "SkPaint.h"
+#include "SkString.h"
+#include "SkRefCnt.h"
+
+class SkReadBuffer;
+class SkWriteBuffer;
+
+/** \class SkTextBlob
+
+ SkTextBlob combines multiple text runs into an immutable, ref-counted structure.
+*/
+class SK_API SkTextBlob final : public SkNVRefCnt<SkTextBlob> {
+public:
+ /**
+ * Returns a conservative blob bounding box.
+ */
+ const SkRect& bounds() const { return fBounds; }
+
+ /**
+ * Return a non-zero, unique value representing the text blob.
+ */
+ uint32_t uniqueID() const { return fUniqueID; }
+
+ /**
+ * Serialize to a buffer.
+ */
+ void flatten(SkWriteBuffer&) const;
+
+ /**
+ * Recreate an SkTextBlob that was serialized into a buffer.
+ *
+ * @param SkReadBuffer Serialized blob data.
+ * @return A new SkTextBlob representing the serialized data, or NULL if the buffer is
+ * invalid.
+ */
+ static sk_sp<SkTextBlob> MakeFromBuffer(SkReadBuffer&);
+
+ static const SkTextBlob* CreateFromBuffer(SkReadBuffer& buffer) {
+ return MakeFromBuffer(buffer).release();
+ }
+
+ enum GlyphPositioning : uint8_t {
+ kDefault_Positioning = 0, // Default glyph advances -- zero scalars per glyph.
+ kHorizontal_Positioning = 1, // Horizontal positioning -- one scalar per glyph.
+ kFull_Positioning = 2 // Point positioning -- two scalars per glyph.
+ };
+
+private:
+ friend class SkNVRefCnt<SkTextBlob>;
+ class RunRecord;
+
+ SkTextBlob(int runCount, const SkRect& bounds);
+
+ ~SkTextBlob();
+
+ // Memory for objects of this class is created with sk_malloc rather than operator new and must
+ // be freed with sk_free.
+ void operator delete(void* p) { sk_free(p); }
+ void* operator new(size_t) {
+ SkFAIL("All blobs are created by placement new.");
+ return sk_malloc_throw(0);
+ }
+ void* operator new(size_t, void* p) { return p; }
+
+ static unsigned ScalarsPerGlyph(GlyphPositioning pos);
+
+ friend class SkTextBlobBuilder;
+ friend class SkTextBlobRunIterator;
+
+ const int fRunCount;
+ const SkRect fBounds;
+ const uint32_t fUniqueID;
+
+ SkDEBUGCODE(size_t fStorageSize;)
+
+ // The actual payload resides in externally-managed storage, following the object.
+ // (see the .cpp for more details)
+
+ typedef SkRefCnt INHERITED;
+};
+
+/** \class SkTextBlobBuilder
+
+ Helper class for constructing SkTextBlobs.
+ */
+class SK_API SkTextBlobBuilder {
+public:
+ SkTextBlobBuilder();
+
+ ~SkTextBlobBuilder();
+
+ /**
+ * Returns an immutable SkTextBlob for the current runs/glyphs. The builder is reset and
+ * can be reused.
+ */
+ sk_sp<SkTextBlob> make();
+
+#ifdef SK_SUPPORT_LEGACY_TEXTBLOB_BUILDER
+ const SkTextBlob* build() {
+ return this->make().release();
+ }
+#endif
+
+ /**
+ * Glyph and position buffers associated with a run.
+ *
+ * A run is a sequence of glyphs sharing the same font metrics
+ * and positioning mode.
+ *
+ * If textByteCount is 0, utf8text and clusters will be NULL (no
+ * character information will be associated with the glyphs).
+ *
+ * utf8text will point to a buffer of size textByteCount bytes.
+ *
+ * clusters (if not NULL) will point to an array of size count.
+ * For each glyph, give the byte-offset into the text for the
+ * first byte in the first character in that glyph's cluster.
+ * Each value in the array should be an integer less than
+ * textByteCount. Values in the array should either be
+ * monotonically increasing (left-to-right text) or monotonically
+ * decreasing (right-to-left text). This definiton is conviently
+ * the same as used by Harfbuzz's hb_glyph_info_t::cluster field,
+ * except that Harfbuzz interleaves glyphs and clusters.
+ */
+ struct RunBuffer {
+ SkGlyphID* glyphs;
+ SkScalar* pos;
+ char* utf8text;
+ uint32_t* clusters;
+ };
+
+ /**
+ * Allocates a new default-positioned run and returns its writable glyph buffer
+ * for direct manipulation.
+ *
+ * @param font The font to be used for this run.
+ * @param count Number of glyphs.
+ * @param x,y Position within the blob.
+ * @param textByteCount length of the original UTF-8 text that
+ * corresponds to this sequence of glyphs. If 0,
+ * text will not be included in the textblob.
+ * @param lang Language code, currently unimplemented.
+ * @param bounds Optional run bounding box. If known in advance (!= NULL), it will
+ * be used when computing the blob bounds, to avoid re-measuring.
+ *
+ * @return A writable glyph buffer, valid until the next allocRun() or
+ * build() call. The buffer is guaranteed to hold @count@ glyphs.
+ */
+ const RunBuffer& allocRunText(const SkPaint& font,
+ int count,
+ SkScalar x,
+ SkScalar y,
+ int textByteCount,
+ SkString lang,
+ const SkRect* bounds = NULL);
+ const RunBuffer& allocRun(const SkPaint& font, int count, SkScalar x, SkScalar y,
+ const SkRect* bounds = NULL) {
+ return this->allocRunText(font, count, x, y, 0, SkString(), bounds);
+ }
+
+ /**
+ * Allocates a new horizontally-positioned run and returns its writable glyph and position
+ * buffers for direct manipulation.
+ *
+ * @param font The font to be used for this run.
+ * @param count Number of glyphs.
+ * @param y Vertical offset within the blob.
+ * @param textByteCount length of the original UTF-8 text that
+ * corresponds to this sequence of glyphs. If 0,
+ * text will not be included in the textblob.
+ * @param lang Language code, currently unimplemented.
+ * @param bounds Optional run bounding box. If known in advance (!= NULL), it will
+ * be used when computing the blob bounds, to avoid re-measuring.
+ *
+ * @return Writable glyph and position buffers, valid until the next allocRun()
+ * or build() call. The buffers are guaranteed to hold @count@ elements.
+ */
+ const RunBuffer& allocRunTextPosH(const SkPaint& font, int count, SkScalar y,
+ int textByteCount, SkString lang,
+ const SkRect* bounds = NULL);
+ const RunBuffer& allocRunPosH(const SkPaint& font, int count, SkScalar y,
+ const SkRect* bounds = NULL) {
+ return this->allocRunTextPosH(font, count, y, 0, SkString(), bounds);
+ }
+
+ /**
+ * Allocates a new fully-positioned run and returns its writable glyph and position
+ * buffers for direct manipulation.
+ *
+ * @param font The font to be used for this run.
+ * @param count Number of glyphs.
+ * @param textByteCount length of the original UTF-8 text that
+ * corresponds to this sequence of glyphs. If 0,
+ * text will not be included in the textblob.
+ * @param lang Language code, currently unimplemented.
+ * @param bounds Optional run bounding box. If known in advance (!= NULL), it will
+ * be used when computing the blob bounds, to avoid re-measuring.
+ *
+ * @return Writable glyph and position buffers, valid until the next allocRun()
+ * or build() call. The glyph buffer and position buffer are
+ * guaranteed to hold @count@ and 2 * @count@ elements, respectively.
+ */
+ const RunBuffer& allocRunTextPos(const SkPaint& font, int count,
+ int textByteCount, SkString lang,
+ const SkRect* bounds = NULL);
+ const RunBuffer& allocRunPos(const SkPaint& font, int count,
+ const SkRect* bounds = NULL) {
+ return this->allocRunTextPos(font, count, 0, SkString(), bounds);
+ }
+
+private:
+ void reserve(size_t size);
+ void allocInternal(const SkPaint& font, SkTextBlob::GlyphPositioning positioning,
+ int count, int textBytes, SkPoint offset, const SkRect* bounds);
+ bool mergeRun(const SkPaint& font, SkTextBlob::GlyphPositioning positioning,
+ int count, SkPoint offset);
+ void updateDeferredBounds();
+
+ static SkRect ConservativeRunBounds(const SkTextBlob::RunRecord&);
+ static SkRect TightRunBounds(const SkTextBlob::RunRecord&);
+
+ SkAutoTMalloc<uint8_t> fStorage;
+ size_t fStorageSize;
+ size_t fStorageUsed;
+
+ SkRect fBounds;
+ int fRunCount;
+ bool fDeferredBounds;
+ size_t fLastRun; // index into fStorage
+
+ RunBuffer fCurrentRunBuffer;
+};
+
+#endif // SkTextBlob_DEFINED
diff --git a/gfx/skia/skia/include/core/SkTime.h b/gfx/skia/skia/include/core/SkTime.h
new file mode 100644
index 000000000..e9a894812
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkTime.h
@@ -0,0 +1,61 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTime_DEFINED
+#define SkTime_DEFINED
+
+#include "SkTypes.h"
+
+class SkString;
+
+/** \class SkTime
+ Platform-implemented utilities to return time of day, and millisecond counter.
+*/
+class SK_API SkTime {
+public:
+ struct DateTime {
+ int16_t fTimeZoneMinutes; // The number of minutes that GetDateTime()
+ // is ahead of or behind UTC.
+ uint16_t fYear; //!< e.g. 2005
+ uint8_t fMonth; //!< 1..12
+ uint8_t fDayOfWeek; //!< 0..6, 0==Sunday
+ uint8_t fDay; //!< 1..31
+ uint8_t fHour; //!< 0..23
+ uint8_t fMinute; //!< 0..59
+ uint8_t fSecond; //!< 0..59
+
+ void toISO8601(SkString* dst) const;
+ };
+ static void GetDateTime(DateTime*);
+
+ static double GetSecs() { return GetNSecs() * 1e-9; }
+ static double GetMSecs() { return GetNSecs() * 1e-6; }
+ static double GetNSecs();
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkAutoTime {
+public:
+ // The label is not deep-copied, so its address must remain valid for the
+ // lifetime of this object
+ SkAutoTime(const char* label = nullptr)
+ : fLabel(label)
+ , fNow(SkTime::GetMSecs()) {}
+ ~SkAutoTime() {
+ uint64_t dur = static_cast<uint64_t>(SkTime::GetMSecs() - fNow);
+ SkDebugf("%s %ld\n", fLabel ? fLabel : "", dur);
+ }
+private:
+ const char* fLabel;
+ double fNow;
+};
+#define SkAutoTime(...) SK_REQUIRE_LOCAL_VAR(SkAutoTime)
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkTraceMemoryDump.h b/gfx/skia/skia/include/core/SkTraceMemoryDump.h
new file mode 100644
index 000000000..8383190cc
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkTraceMemoryDump.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTraceMemoryDump_DEFINED
+#define SkTraceMemoryDump_DEFINED
+
+#include "SkTypes.h"
+
+class SkDiscardableMemory;
+
+/**
+ * Interface for memory tracing.
+ * This interface is meant to be passed as argument to the memory dump methods of Skia objects.
+ * The implementation of this interface is provided by the embedder.
+ */
+class SK_API SkTraceMemoryDump {
+public:
+ /**
+ * Enum to specify the level of the requested details for the dump from the Skia objects.
+ */
+ enum LevelOfDetail {
+ // Dump only the minimal details to get the total memory usage (Usually just the totals).
+ kLight_LevelOfDetail,
+
+ // Dump the detailed breakdown of the objects in the caches.
+ kObjectsBreakdowns_LevelOfDetail
+ };
+
+ /**
+ * Appends a new memory dump (i.e. a row) to the trace memory infrastructure.
+ * If dumpName does not exist yet, a new one is created. Otherwise, a new column is appended to
+ * the previously created dump.
+ * Arguments:
+ * dumpName: an absolute, slash-separated, name for the item being dumped
+ * e.g., "skia/CacheX/EntryY".
+ * valueName: a string indicating the name of the column.
+ * e.g., "size", "active_size", "number_of_objects".
+ * This string is supposed to be long lived and is NOT copied.
+ * units: a string indicating the units for the value.
+ * e.g., "bytes", "objects".
+ * This string is supposed to be long lived and is NOT copied.
+ * value: the actual value being dumped.
+ */
+ virtual void dumpNumericValue(const char* dumpName,
+ const char* valueName,
+ const char* units,
+ uint64_t value) = 0;
+
+ /**
+ * Sets the memory backing for an existing dump.
+ * backingType and backingObjectId are used by the embedder to associate the memory dumped via
+ * dumpNumericValue with the corresponding dump that backs the memory.
+ */
+ virtual void setMemoryBacking(const char* dumpName,
+ const char* backingType,
+ const char* backingObjectId) = 0;
+
+ /**
+ * Specialization for memory backed by discardable memory.
+ */
+ virtual void setDiscardableMemoryBacking(
+ const char* dumpName,
+ const SkDiscardableMemory& discardableMemoryObject) = 0;
+
+ /**
+ * Returns the type of details requested in the dump. The granularity of the dump is supposed to
+ * match the LevelOfDetail argument. The level of detail must not affect the total size
+ * reported, but only granularity of the child entries.
+ */
+ virtual LevelOfDetail getRequestedDetails() const = 0;
+
+protected:
+ virtual ~SkTraceMemoryDump() { }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkTypeface.h b/gfx/skia/skia/include/core/SkTypeface.h
new file mode 100644
index 000000000..c25552a2d
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkTypeface.h
@@ -0,0 +1,433 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTypeface_DEFINED
+#define SkTypeface_DEFINED
+
+#include "../private/SkBitmaskEnum.h"
+#include "../private/SkOnce.h"
+#include "../private/SkWeakRefCnt.h"
+#include "SkFontStyle.h"
+#include "SkRect.h"
+#include "SkString.h"
+
+class SkDescriptor;
+class SkFontData;
+class SkFontDescriptor;
+class SkScalerContext;
+struct SkScalerContextRec;
+struct SkScalerContextEffects;
+class SkStream;
+class SkStreamAsset;
+class SkAdvancedTypefaceMetrics;
+class SkWStream;
+
+typedef uint32_t SkFontID;
+/** Machine endian. */
+typedef uint32_t SkFontTableTag;
+
+/** \class SkTypeface
+
+ The SkTypeface class specifies the typeface and intrinsic style of a font.
+ This is used in the paint, along with optionally algorithmic settings like
+ textSize, textSkewX, textScaleX, kFakeBoldText_Mask, to specify
+ how text appears when drawn (and measured).
+
+ Typeface objects are immutable, and so they can be shared between threads.
+*/
+class SK_API SkTypeface : public SkWeakRefCnt {
+public:
+ /** Style specifies the intrinsic style attributes of a given typeface
+ */
+ enum Style {
+ kNormal = 0,
+ kBold = 0x01,
+ kItalic = 0x02,
+
+ // helpers
+ kBoldItalic = 0x03
+ };
+
+ /** Returns the typeface's intrinsic style attributes. */
+ SkFontStyle fontStyle() const {
+ return fStyle;
+ }
+
+ /** Returns the typeface's intrinsic style attributes.
+ * @deprecated use fontStyle() instead.
+ */
+ Style style() const {
+ return static_cast<Style>(
+ (fStyle.weight() >= SkFontStyle::kSemiBold_Weight ? kBold : kNormal) |
+ (fStyle.slant() != SkFontStyle::kUpright_Slant ? kItalic : kNormal));
+ }
+
+ /** Returns true if style() has the kBold bit set. */
+ bool isBold() const { return fStyle.weight() >= SkFontStyle::kSemiBold_Weight; }
+
+ /** Returns true if style() has the kItalic bit set. */
+ bool isItalic() const { return fStyle.slant() != SkFontStyle::kUpright_Slant; }
+
+ /** Returns true if the typeface claims to be fixed-pitch.
+ * This is a style bit, advance widths may vary even if this returns true.
+ */
+ bool isFixedPitch() const { return fIsFixedPitch; }
+
+ /** Return a 32bit value for this typeface, unique for the underlying font
+ data. Will never return 0.
+ */
+ SkFontID uniqueID() const { return fUniqueID; }
+
+ /** Return the uniqueID for the specified typeface. If the face is null,
+ resolve it to the default font and return its uniqueID. Will never
+ return 0.
+ */
+ static SkFontID UniqueID(const SkTypeface* face);
+
+ /** Returns true if the two typefaces reference the same underlying font,
+ handling either being null (treating null as the default font)
+ */
+ static bool Equal(const SkTypeface* facea, const SkTypeface* faceb);
+
+ /** Returns the default typeface, which is never nullptr. */
+ static sk_sp<SkTypeface> MakeDefault(Style style = SkTypeface::kNormal);
+#ifdef SK_SUPPORT_LEGACY_TYPEFACE_PTR
+ static SkTypeface* RefDefault(Style style = SkTypeface::kNormal) {
+ return MakeDefault(style).release();
+ }
+#endif
+
+ /** Creates a new reference to the typeface that most closely matches the
+ requested familyName and fontStyle. This method allows extended font
+ face specifiers as in the SkFontStyle type. Will never return null.
+
+ @param familyName May be NULL. The name of the font family.
+ @param fontStyle The style of the typeface.
+ @return reference to the closest-matching typeface. Call must call
+ unref() when they are done.
+ */
+ static sk_sp<SkTypeface> MakeFromName(const char familyName[], SkFontStyle fontStyle);
+
+#ifdef SK_SUPPORT_LEGACY_TYPEFACE_PTR
+ static SkTypeface* CreateFromName(const char familyName[], Style style) {
+ return MakeFromName(familyName, SkFontStyle::FromOldStyle(style)).release();
+ }
+#endif
+
+ /** Return the typeface that most closely matches the requested typeface and style.
+ Use this to pick a new style from the same family of the existing typeface.
+ If family is nullptr, this selects from the default font's family.
+
+ @param family May be NULL. The name of the existing type face.
+ @param s The style (normal, bold, italic) of the type face.
+ @return the closest-matching typeface.
+ */
+ static sk_sp<SkTypeface> MakeFromTypeface(SkTypeface* family, Style);
+
+ /** Return a new typeface given a file. If the file does not exist, or is
+ not a valid font file, returns nullptr.
+ */
+ static sk_sp<SkTypeface> MakeFromFile(const char path[], int index = 0);
+#ifdef SK_SUPPORT_LEGACY_TYPEFACE_PTR
+ static SkTypeface* CreateFromFile(const char path[], int index = 0) {
+ return MakeFromFile(path, index).release();
+ }
+#endif
+
+ /** Return a new typeface given a stream. If the stream is
+ not a valid font file, returns nullptr. Ownership of the stream is
+ transferred, so the caller must not reference it again.
+ */
+ static sk_sp<SkTypeface> MakeFromStream(SkStreamAsset* stream, int index = 0);
+#ifdef SK_SUPPORT_LEGACY_TYPEFACE_PTR
+ static SkTypeface* CreateFromStream(SkStreamAsset* stream, int index = 0) {
+ return MakeFromStream(stream, index).release();
+ }
+#endif
+
+ /** Return a new typeface given font data and configuration. If the data
+ is not valid font data, returns nullptr.
+ */
+ static sk_sp<SkTypeface> MakeFromFontData(std::unique_ptr<SkFontData>);
+
+ /** Write a unique signature to a stream, sufficient to reconstruct a
+ typeface referencing the same font when Deserialize is called.
+ */
+ void serialize(SkWStream*) const;
+
+ /** Given the data previously written by serialize(), return a new instance
+ of a typeface referring to the same font. If that font is not available,
+ return nullptr.
+ Does not affect ownership of SkStream.
+ */
+ static sk_sp<SkTypeface> MakeDeserialize(SkStream*);
+
+ enum Encoding {
+ kUTF8_Encoding,
+ kUTF16_Encoding,
+ kUTF32_Encoding
+ };
+
+ /**
+ * Given an array of character codes, of the specified encoding,
+ * optionally return their corresponding glyph IDs (if glyphs is not NULL).
+ *
+ * @param chars pointer to the array of character codes
+ * @param encoding how the characters are encoded
+ * @param glyphs (optional) returns the corresponding glyph IDs for each
+ * character code, up to glyphCount values. If a character code is
+ * not found in the typeface, the corresponding glyph ID will be 0.
+ * @param glyphCount number of code points in 'chars' to process. If glyphs
+ * is not NULL, then it must point sufficient memory to write
+ * glyphCount values into it.
+ * @return the number of number of continuous non-zero glyph IDs computed
+ * from the beginning of chars. This value is valid, even if the
+ * glyphs parameter is NULL.
+ */
+ int charsToGlyphs(const void* chars, Encoding encoding, SkGlyphID glyphs[],
+ int glyphCount) const;
+
+ /**
+ * Return the number of glyphs in the typeface.
+ */
+ int countGlyphs() const;
+
+ // Table getters -- may fail if the underlying font format is not organized
+ // as 4-byte tables.
+
+ /** Return the number of tables in the font. */
+ int countTables() const;
+
+ /** Copy into tags[] (allocated by the caller) the list of table tags in
+ * the font, and return the number. This will be the same as CountTables()
+ * or 0 if an error occured. If tags == NULL, this only returns the count
+ * (the same as calling countTables()).
+ */
+ int getTableTags(SkFontTableTag tags[]) const;
+
+ /** Given a table tag, return the size of its contents, or 0 if not present
+ */
+ size_t getTableSize(SkFontTableTag) const;
+
+ /** Copy the contents of a table into data (allocated by the caller). Note
+ * that the contents of the table will be in their native endian order
+ * (which for most truetype tables is big endian). If the table tag is
+ * not found, or there is an error copying the data, then 0 is returned.
+ * If this happens, it is possible that some or all of the memory pointed
+ * to by data may have been written to, even though an error has occured.
+ *
+ * @param fontID the font to copy the table from
+ * @param tag The table tag whose contents are to be copied
+ * @param offset The offset in bytes into the table's contents where the
+ * copy should start from.
+ * @param length The number of bytes, starting at offset, of table data
+ * to copy.
+ * @param data storage address where the table contents are copied to
+ * @return the number of bytes actually copied into data. If offset+length
+ * exceeds the table's size, then only the bytes up to the table's
+ * size are actually copied, and this is the value returned. If
+ * offset > the table's size, or tag is not a valid table,
+ * then 0 is returned.
+ */
+ size_t getTableData(SkFontTableTag tag, size_t offset, size_t length,
+ void* data) const;
+
+ /**
+ * Return the units-per-em value for this typeface, or zero if there is an
+ * error.
+ */
+ int getUnitsPerEm() const;
+
+ /**
+ * Given a run of glyphs, return the associated horizontal adjustments.
+ * Adjustments are in "design units", which are integers relative to the
+ * typeface's units per em (see getUnitsPerEm).
+ *
+ * Some typefaces are known to never support kerning. Calling this method
+ * with all zeros (e.g. getKerningPairAdustments(NULL, 0, NULL)) returns
+ * a boolean indicating if the typeface might support kerning. If it
+ * returns false, then it will always return false (no kerning) for all
+ * possible glyph runs. If it returns true, then it *may* return true for
+ * somne glyph runs.
+ *
+ * If count is non-zero, then the glyphs parameter must point to at least
+ * [count] valid glyph IDs, and the adjustments parameter must be
+ * sized to at least [count - 1] entries. If the method returns true, then
+ * [count-1] entries in the adjustments array will be set. If the method
+ * returns false, then no kerning should be applied, and the adjustments
+ * array will be in an undefined state (possibly some values may have been
+ * written, but none of them should be interpreted as valid values).
+ */
+ bool getKerningPairAdjustments(const SkGlyphID glyphs[], int count,
+ int32_t adjustments[]) const;
+
+ struct LocalizedString {
+ SkString fString;
+ SkString fLanguage;
+ };
+ class LocalizedStrings : ::SkNoncopyable {
+ public:
+ virtual ~LocalizedStrings() { }
+ virtual bool next(LocalizedString* localizedString) = 0;
+ void unref() { delete this; }
+ };
+ /**
+ * Returns an iterator which will attempt to enumerate all of the
+ * family names specified by the font.
+ * It is the caller's responsibility to unref() the returned pointer.
+ */
+ LocalizedStrings* createFamilyNameIterator() const;
+
+ /**
+ * Return the family name for this typeface. It will always be returned
+ * encoded as UTF8, but the language of the name is whatever the host
+ * platform chooses.
+ */
+ void getFamilyName(SkString* name) const;
+
+ /**
+ * Return a stream for the contents of the font data, or NULL on failure.
+ * If ttcIndex is not null, it is set to the TrueTypeCollection index
+ * of this typeface within the stream, or 0 if the stream is not a
+ * collection.
+ * The caller is responsible for deleting the stream.
+ */
+ SkStreamAsset* openStream(int* ttcIndex) const;
+
+ /**
+ * Return the font data, or nullptr on failure.
+ */
+ std::unique_ptr<SkFontData> makeFontData() const;
+
+ /**
+ * Return a scalercontext for the given descriptor. If this fails, then
+ * if allowFailure is true, this returns NULL, else it returns a
+ * dummy scalercontext that will not crash, but will draw nothing.
+ */
+ SkScalerContext* createScalerContext(const SkScalerContextEffects&, const SkDescriptor*,
+ bool allowFailure = false) const;
+
+ /**
+ * Return a rectangle (scaled to 1-pt) that represents the union of the bounds of all
+ * of the glyphs, but each one positioned at (0,). This may be conservatively large, and
+ * will not take into account any hinting or other size-specific adjustments.
+ */
+ SkRect getBounds() const;
+
+ /***
+ * Returns whether this typeface has color glyphs and therefore cannot be
+ * rendered as a path. e.g. Emojis.
+ */
+ virtual bool hasColorGlyphs() const { return false; }
+
+ // PRIVATE / EXPERIMENTAL -- do not call
+ void filterRec(SkScalerContextRec* rec) const {
+ this->onFilterRec(rec);
+ }
+ // PRIVATE / EXPERIMENTAL -- do not call
+ void getFontDescriptor(SkFontDescriptor* desc, bool* isLocal) const {
+ this->onGetFontDescriptor(desc, isLocal);
+ }
+
+protected:
+ // The type of advance data wanted.
+ enum PerGlyphInfo {
+ kNo_PerGlyphInfo = 0x0, // Don't populate any per glyph info.
+ kGlyphNames_PerGlyphInfo = 0x1, // Populate glyph names (Type 1 only).
+ kToUnicode_PerGlyphInfo = 0x2 // Populate ToUnicode table, ignored
+ // for Type 1 fonts
+ };
+
+ /** uniqueID must be unique and non-zero
+ */
+ SkTypeface(const SkFontStyle& style, bool isFixedPitch = false);
+ virtual ~SkTypeface();
+
+ /** Sets the fixedPitch bit. If used, must be called in the constructor. */
+ void setIsFixedPitch(bool isFixedPitch) { fIsFixedPitch = isFixedPitch; }
+ /** Sets the font style. If used, must be called in the constructor. */
+ void setFontStyle(SkFontStyle style) { fStyle = style; }
+
+ friend class SkScalerContext;
+ static SkTypeface* GetDefaultTypeface(Style style = SkTypeface::kNormal);
+
+ virtual SkScalerContext* onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*) const = 0;
+ virtual void onFilterRec(SkScalerContextRec*) const = 0;
+ virtual SkAdvancedTypefaceMetrics* onGetAdvancedTypefaceMetrics(
+ PerGlyphInfo,
+ const uint32_t* glyphIDs,
+ uint32_t glyphIDsCount) const = 0;
+
+ virtual SkStreamAsset* onOpenStream(int* ttcIndex) const = 0;
+ // TODO: make pure virtual.
+ virtual std::unique_ptr<SkFontData> onMakeFontData() const;
+
+ virtual void onGetFontDescriptor(SkFontDescriptor*, bool* isLocal) const = 0;
+
+ virtual int onCharsToGlyphs(const void* chars, Encoding, SkGlyphID glyphs[],
+ int glyphCount) const = 0;
+ virtual int onCountGlyphs() const = 0;
+
+ virtual int onGetUPEM() const = 0;
+ virtual bool onGetKerningPairAdjustments(const SkGlyphID glyphs[], int count,
+ int32_t adjustments[]) const;
+
+ /** Returns the family name of the typeface as known by its font manager.
+ * This name may or may not be produced by the family name iterator.
+ */
+ virtual void onGetFamilyName(SkString* familyName) const = 0;
+
+ /** Returns an iterator over the family names in the font. */
+ virtual LocalizedStrings* onCreateFamilyNameIterator() const = 0;
+
+ virtual int onGetTableTags(SkFontTableTag tags[]) const = 0;
+ virtual size_t onGetTableData(SkFontTableTag, size_t offset,
+ size_t length, void* data) const = 0;
+
+ virtual bool onComputeBounds(SkRect*) const;
+
+private:
+ friend class SkGTypeface;
+ friend class SkRandomTypeface;
+ friend class SkPDFFont;
+ friend class GrPathRendering;
+ friend class GrGLPathRendering;
+
+ /** Retrieve detailed typeface metrics. Used by the PDF backend.
+ @param perGlyphInfo Indicate what glyph specific information (advances,
+ names, etc.) should be populated.
+ @param glyphIDs For per-glyph info, specify subset of the font by
+ giving glyph ids. Each integer represents a glyph
+ id. Passing NULL means all glyphs in the font.
+ @param glyphIDsCount Number of elements in subsetGlyphIds. Ignored if
+ glyphIDs is NULL.
+ @return The returned object has already been referenced.
+ */
+ SkAdvancedTypefaceMetrics* getAdvancedTypefaceMetrics(
+ PerGlyphInfo,
+ const uint32_t* glyphIDs = NULL,
+ uint32_t glyphIDsCount = 0) const;
+
+private:
+ SkFontID fUniqueID;
+ SkFontStyle fStyle;
+ mutable SkRect fBounds;
+ mutable SkOnce fBoundsOnce;
+ bool fIsFixedPitch;
+
+ friend class SkPaint;
+ friend class SkGlyphCache; // GetDefaultTypeface
+
+ typedef SkWeakRefCnt INHERITED;
+};
+
+namespace skstd {
+template <> struct is_bitmask_enum<SkTypeface::PerGlyphInfo> : std::true_type {};
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkTypes.h b/gfx/skia/skia/include/core/SkTypes.h
new file mode 100644
index 000000000..0cef8a125
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkTypes.h
@@ -0,0 +1,730 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTypes_DEFINED
+#define SkTypes_DEFINED
+
+// IWYU pragma: begin_exports
+
+// In at least two known scenarios when using GCC with libc++:
+// * GCC 4.8 targeting ARMv7 with NEON
+// * GCC 4.9 targeting ARMv8 64 bit
+// we need to typedef float float32_t (or include <arm_neon.h> which does that)
+// before #including <memory>. This makes no sense. I'm not very interested in
+// understanding why... these are old, bizarre platform configuration that we
+// should just let die.
+// See https://llvm.org/bugs/show_bug.cgi?id=25608 .
+#include <ciso646> // Include something innocuous to define _LIBCPP_VERISON if it's libc++.
+#if defined(__GNUC__) && __GNUC__ == 4 \
+ && ((defined(__arm__) && (defined(__ARM_NEON__) || defined(__ARM_NEON))) || defined(__aarch64__)) \
+ && defined(_LIBCPP_VERSION)
+ typedef float float32_t;
+ #include <memory>
+#endif
+
+#include "SkPreConfig.h"
+#include "SkUserConfig.h"
+#include "SkPostConfig.h"
+#include <stddef.h>
+#include <stdint.h>
+// IWYU pragma: end_exports
+
+#include <string.h>
+
+/**
+ * sk_careful_memcpy() is just like memcpy(), but guards against undefined behavior.
+ *
+ * It is undefined behavior to call memcpy() with null dst or src, even if len is 0.
+ * If an optimizer is "smart" enough, it can exploit this to do unexpected things.
+ * memcpy(dst, src, 0);
+ * if (src) {
+ * printf("%x\n", *src);
+ * }
+ * In this code the compiler can assume src is not null and omit the if (src) {...} check,
+ * unconditionally running the printf, crashing the program if src really is null.
+ * Of the compilers we pay attention to only GCC performs this optimization in practice.
+ */
+static inline void* sk_careful_memcpy(void* dst, const void* src, size_t len) {
+ // When we pass >0 len we had better already be passing valid pointers.
+ // So we just need to skip calling memcpy when len == 0.
+ if (len) {
+ memcpy(dst,src,len);
+ }
+ return dst;
+}
+
+/** \file SkTypes.h
+*/
+
+/** See SkGraphics::GetVersion() to retrieve these at runtime
+ */
+#define SKIA_VERSION_MAJOR 1
+#define SKIA_VERSION_MINOR 0
+#define SKIA_VERSION_PATCH 0
+
+/*
+ memory wrappers to be implemented by the porting layer (platform)
+*/
+
+/** Called internally if we run out of memory. The platform implementation must
+ not return, but should either throw an exception or otherwise exit.
+*/
+SK_API extern void sk_out_of_memory(void);
+/** Called internally if we hit an unrecoverable error.
+ The platform implementation must not return, but should either throw
+ an exception or otherwise exit.
+*/
+SK_API extern void sk_abort_no_print(void);
+
+enum {
+ SK_MALLOC_TEMP = 0x01, //!< hint to sk_malloc that the requested memory will be freed in the scope of the stack frame
+ SK_MALLOC_THROW = 0x02 //!< instructs sk_malloc to call sk_throw if the memory cannot be allocated.
+};
+/** Return a block of memory (at least 4-byte aligned) of at least the
+ specified size. If the requested memory cannot be returned, either
+ return null (if SK_MALLOC_TEMP bit is clear) or throw an exception
+ (if SK_MALLOC_TEMP bit is set). To free the memory, call sk_free().
+*/
+SK_API extern void* sk_malloc_flags(size_t size, unsigned flags);
+/** Same as sk_malloc(), but hard coded to pass SK_MALLOC_THROW as the flag
+*/
+SK_API extern void* sk_malloc_throw(size_t size);
+/** Same as standard realloc(), but this one never returns null on failure. It will throw
+ an exception if it fails.
+*/
+SK_API extern void* sk_realloc_throw(void* buffer, size_t size);
+/** Free memory returned by sk_malloc(). It is safe to pass null.
+*/
+SK_API extern void sk_free(void*);
+
+/** Much like calloc: returns a pointer to at least size zero bytes, or NULL on failure.
+ */
+SK_API extern void* sk_calloc(size_t size);
+
+/** Same as sk_calloc, but throws an exception instead of returning NULL on failure.
+ */
+SK_API extern void* sk_calloc_throw(size_t size);
+
+// bzero is safer than memset, but we can't rely on it, so... sk_bzero()
+static inline void sk_bzero(void* buffer, size_t size) {
+ // Please c.f. sk_careful_memcpy. It's undefined behavior to call memset(null, 0, 0).
+ if (size) {
+ memset(buffer, 0, size);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef override_GLOBAL_NEW
+#include <new>
+
+inline void* operator new(size_t size) {
+ return sk_malloc_throw(size);
+}
+
+inline void operator delete(void* p) {
+ sk_free(p);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define SK_INIT_TO_AVOID_WARNING = 0
+
+#ifndef SkDebugf
+ SK_API void SkDebugf(const char format[], ...);
+#endif
+
+#define SkREQUIRE_SEMICOLON_AFTER(code) do { code } while (false)
+
+#define SkASSERT_RELEASE(cond) \
+ SkREQUIRE_SEMICOLON_AFTER(if (!(cond)) { SK_ABORT(#cond); } )
+
+#ifdef SK_DEBUG
+ #define SkASSERT(cond) \
+ SkREQUIRE_SEMICOLON_AFTER(if (!(cond)) { SK_ABORT("assert(" #cond ")"); })
+ #define SkASSERTF(cond, fmt, ...) \
+ SkREQUIRE_SEMICOLON_AFTER(if (!(cond)) { \
+ SkDebugf(fmt"\n", __VA_ARGS__); \
+ SK_ABORT("assert(" #cond ")"); \
+ })
+ #define SkDEBUGFAIL(message) SK_ABORT(message)
+ #define SkDEBUGFAILF(fmt, ...) SkASSERTF(false, fmt, ##__VA_ARGS__)
+ #define SkDEBUGCODE(...) __VA_ARGS__
+ #define SkDECLAREPARAM(type, var) , type var
+ #define SkPARAM(var) , var
+ #define SkDEBUGF(args ) SkDebugf args
+ #define SkAssertResult(cond) SkASSERT(cond)
+#else
+ #define SkASSERT(cond)
+ #define SkASSERTF(cond, fmt, ...)
+ #define SkDEBUGFAIL(message)
+ #define SkDEBUGFAILF(fmt, ...)
+ #define SkDEBUGCODE(...)
+ #define SkDEBUGF(args)
+ #define SkDECLAREPARAM(type, var)
+ #define SkPARAM(var)
+
+ // unlike SkASSERT, this guy executes its condition in the non-debug build.
+ // The if is present so that this can be used with functions marked SK_WARN_UNUSED_RESULT.
+ #define SkAssertResult(cond) if (cond) {} do {} while(false)
+#endif
+
+// Legacy macro names for SK_ABORT
+#define SkFAIL(message) SK_ABORT(message)
+#define sk_throw() SK_ABORT("sk_throw")
+
+#ifdef SK_IGNORE_TO_STRING
+ #define SK_TO_STRING_NONVIRT()
+ #define SK_TO_STRING_VIRT()
+ #define SK_TO_STRING_PUREVIRT()
+ #define SK_TO_STRING_OVERRIDE()
+#else
+ class SkString;
+ // the 'toString' helper functions convert Sk* objects to human-readable
+ // form in developer mode
+ #define SK_TO_STRING_NONVIRT() void toString(SkString* str) const;
+ #define SK_TO_STRING_VIRT() virtual void toString(SkString* str) const;
+ #define SK_TO_STRING_PUREVIRT() virtual void toString(SkString* str) const = 0;
+ #define SK_TO_STRING_OVERRIDE() void toString(SkString* str) const override;
+#endif
+
+/*
+ * Usage: SK_MACRO_CONCAT(a, b) to construct the symbol ab
+ *
+ * SK_MACRO_CONCAT_IMPL_PRIV just exists to make this work. Do not use directly
+ *
+ */
+#define SK_MACRO_CONCAT(X, Y) SK_MACRO_CONCAT_IMPL_PRIV(X, Y)
+#define SK_MACRO_CONCAT_IMPL_PRIV(X, Y) X ## Y
+
+/*
+ * Usage: SK_MACRO_APPEND_LINE(foo) to make foo123, where 123 is the current
+ * line number. Easy way to construct
+ * unique names for local functions or
+ * variables.
+ */
+#define SK_MACRO_APPEND_LINE(name) SK_MACRO_CONCAT(name, __LINE__)
+
+/**
+ * For some classes, it's almost always an error to instantiate one without a name, e.g.
+ * {
+ * SkAutoMutexAcquire(&mutex);
+ * <some code>
+ * }
+ * In this case, the writer meant to hold mutex while the rest of the code in the block runs,
+ * but instead the mutex is acquired and then immediately released. The correct usage is
+ * {
+ * SkAutoMutexAcquire lock(&mutex);
+ * <some code>
+ * }
+ *
+ * To prevent callers from instantiating your class without a name, use SK_REQUIRE_LOCAL_VAR
+ * like this:
+ * class classname {
+ * <your class>
+ * };
+ * #define classname(...) SK_REQUIRE_LOCAL_VAR(classname)
+ *
+ * This won't work with templates, and you must inline the class' constructors and destructors.
+ * Take a look at SkAutoFree and SkAutoMalloc in this file for examples.
+ */
+#define SK_REQUIRE_LOCAL_VAR(classname) \
+ static_assert(false, "missing name for " #classname)
+
+///////////////////////////////////////////////////////////////////////
+
+/**
+ * Fast type for signed 8 bits. Use for parameter passing and local variables,
+ * not for storage.
+ */
+typedef int S8CPU;
+
+/**
+ * Fast type for unsigned 8 bits. Use for parameter passing and local
+ * variables, not for storage
+ */
+typedef unsigned U8CPU;
+
+/**
+ * Fast type for signed 16 bits. Use for parameter passing and local variables,
+ * not for storage
+ */
+typedef int S16CPU;
+
+/**
+ * Fast type for unsigned 16 bits. Use for parameter passing and local
+ * variables, not for storage
+ */
+typedef unsigned U16CPU;
+
+/**
+ * Meant to be a small version of bool, for storage purposes. Will be 0 or 1
+ */
+typedef uint8_t SkBool8;
+
+#include "../private/SkTFitsIn.h"
+template <typename D, typename S> D SkTo(S s) {
+ SkASSERT(SkTFitsIn<D>(s));
+ return static_cast<D>(s);
+}
+#define SkToS8(x) SkTo<int8_t>(x)
+#define SkToU8(x) SkTo<uint8_t>(x)
+#define SkToS16(x) SkTo<int16_t>(x)
+#define SkToU16(x) SkTo<uint16_t>(x)
+#define SkToS32(x) SkTo<int32_t>(x)
+#define SkToU32(x) SkTo<uint32_t>(x)
+#define SkToInt(x) SkTo<int>(x)
+#define SkToUInt(x) SkTo<unsigned>(x)
+#define SkToSizeT(x) SkTo<size_t>(x)
+
+/** Returns 0 or 1 based on the condition
+*/
+#define SkToBool(cond) ((cond) != 0)
+
+#define SK_MaxS16 32767
+#define SK_MinS16 -32767
+#define SK_MaxU16 0xFFFF
+#define SK_MinU16 0
+#define SK_MaxS32 0x7FFFFFFF
+#define SK_MinS32 -SK_MaxS32
+#define SK_MaxU32 0xFFFFFFFF
+#define SK_MinU32 0
+#define SK_NaN32 ((int) (1U << 31))
+
+/** Returns true if the value can be represented with signed 16bits
+ */
+static inline bool SkIsS16(long x) {
+ return (int16_t)x == x;
+}
+
+/** Returns true if the value can be represented with unsigned 16bits
+ */
+static inline bool SkIsU16(long x) {
+ return (uint16_t)x == x;
+}
+
+static inline int32_t SkLeftShift(int32_t value, int32_t shift) {
+ return (int32_t) ((uint32_t) value << shift);
+}
+
+static inline int64_t SkLeftShift(int64_t value, int32_t shift) {
+ return (int64_t) ((uint64_t) value << shift);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+/** Returns the number of entries in an array (not a pointer) */
+template <typename T, size_t N> char (&SkArrayCountHelper(T (&array)[N]))[N];
+#define SK_ARRAY_COUNT(array) (sizeof(SkArrayCountHelper(array)))
+
+// Can be used to bracket data types that must be dense, e.g. hash keys.
+#if defined(__clang__) // This should work on GCC too, but GCC diagnostic pop didn't seem to work!
+ #define SK_BEGIN_REQUIRE_DENSE _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic error \"-Wpadded\"")
+ #define SK_END_REQUIRE_DENSE _Pragma("GCC diagnostic pop")
+#else
+ #define SK_BEGIN_REQUIRE_DENSE
+ #define SK_END_REQUIRE_DENSE
+#endif
+
+#define SkAlign2(x) (((x) + 1) >> 1 << 1)
+#define SkIsAlign2(x) (0 == ((x) & 1))
+
+#define SkAlign4(x) (((x) + 3) >> 2 << 2)
+#define SkIsAlign4(x) (0 == ((x) & 3))
+
+#define SkAlign8(x) (((x) + 7) >> 3 << 3)
+#define SkIsAlign8(x) (0 == ((x) & 7))
+
+#define SkAlign16(x) (((x) + 15) >> 4 << 4)
+#define SkIsAlign16(x) (0 == ((x) & 15))
+
+#define SkAlignPtr(x) (sizeof(void*) == 8 ? SkAlign8(x) : SkAlign4(x))
+#define SkIsAlignPtr(x) (sizeof(void*) == 8 ? SkIsAlign8(x) : SkIsAlign4(x))
+
+typedef uint32_t SkFourByteTag;
+#define SkSetFourByteTag(a, b, c, d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
+
+/** 32 bit integer to hold a unicode value
+*/
+typedef int32_t SkUnichar;
+
+/** 16 bit unsigned integer to hold a glyph index
+*/
+typedef uint16_t SkGlyphID;
+
+/** 32 bit value to hold a millisecond duration
+ * Note that SK_MSecMax is about 25 days.
+ */
+typedef uint32_t SkMSec;
+/** 1 second measured in milliseconds
+*/
+#define SK_MSec1 1000
+/** maximum representable milliseconds; 24d 20h 31m 23.647s.
+*/
+#define SK_MSecMax 0x7FFFFFFF
+/** Returns a < b for milliseconds, correctly handling wrap-around from 0xFFFFFFFF to 0
+*/
+#define SkMSec_LT(a, b) ((int32_t)(a) - (int32_t)(b) < 0)
+/** Returns a <= b for milliseconds, correctly handling wrap-around from 0xFFFFFFFF to 0
+*/
+#define SkMSec_LE(a, b) ((int32_t)(a) - (int32_t)(b) <= 0)
+
+/** The generation IDs in Skia reserve 0 has an invalid marker.
+ */
+#define SK_InvalidGenID 0
+/** The unique IDs in Skia reserve 0 has an invalid marker.
+ */
+#define SK_InvalidUniqueID 0
+
+/****************************************************************************
+ The rest of these only build with C++
+*/
+#ifdef __cplusplus
+
+/** Faster than SkToBool for integral conditions. Returns 0 or 1
+*/
+static inline constexpr int Sk32ToBool(uint32_t n) {
+ return (n | (0-n)) >> 31;
+}
+
+/** Generic swap function. Classes with efficient swaps should specialize this function to take
+ their fast path. This function is used by SkTSort. */
+template <typename T> inline void SkTSwap(T& a, T& b) {
+ T c(a);
+ a = b;
+ b = c;
+}
+
+static inline int32_t SkAbs32(int32_t value) {
+ SkASSERT(value != SK_NaN32); // The most negative int32_t can't be negated.
+ if (value < 0) {
+ value = -value;
+ }
+ return value;
+}
+
+template <typename T> inline T SkTAbs(T value) {
+ if (value < 0) {
+ value = -value;
+ }
+ return value;
+}
+
+static inline int32_t SkMax32(int32_t a, int32_t b) {
+ if (a < b)
+ a = b;
+ return a;
+}
+
+static inline int32_t SkMin32(int32_t a, int32_t b) {
+ if (a > b)
+ a = b;
+ return a;
+}
+
+template <typename T> constexpr const T& SkTMin(const T& a, const T& b) {
+ return (a < b) ? a : b;
+}
+
+template <typename T> constexpr const T& SkTMax(const T& a, const T& b) {
+ return (b < a) ? a : b;
+}
+
+static inline int32_t SkSign32(int32_t a) {
+ return (a >> 31) | ((unsigned) -a >> 31);
+}
+
+static inline int32_t SkFastMin32(int32_t value, int32_t max) {
+ if (value > max) {
+ value = max;
+ }
+ return value;
+}
+
+/** Returns value pinned between min and max, inclusively. */
+template <typename T> static constexpr const T& SkTPin(const T& value, const T& min, const T& max) {
+ return SkTMax(SkTMin(value, max), min);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Indicates whether an allocation should count against a cache budget.
+ */
+enum class SkBudgeted : bool {
+ kNo = false,
+ kYes = true
+};
+
+/**
+ * Indicates whether a backing store needs to be an exact match or can be larger
+ * than is strictly necessary
+ */
+enum class SkBackingFit {
+ kApprox,
+ kExact
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** Use to combine multiple bits in a bitmask in a type safe way.
+ */
+template <typename T>
+T SkTBitOr(T a, T b) {
+ return (T)(a | b);
+}
+
+/**
+ * Use to cast a pointer to a different type, and maintaining strict-aliasing
+ */
+template <typename Dst> Dst SkTCast(const void* ptr) {
+ union {
+ const void* src;
+ Dst dst;
+ } data;
+ data.src = ptr;
+ return data.dst;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+/** \class SkNoncopyable
+
+SkNoncopyable is the base class for objects that do not want to
+be copied. It hides its copy-constructor and its assignment-operator.
+*/
+class SK_API SkNoncopyable {
+public:
+ SkNoncopyable() {}
+
+private:
+ SkNoncopyable(const SkNoncopyable&);
+ SkNoncopyable& operator=(const SkNoncopyable&);
+};
+
+class SkAutoFree : SkNoncopyable {
+public:
+ SkAutoFree() : fPtr(NULL) {}
+ explicit SkAutoFree(void* ptr) : fPtr(ptr) {}
+ ~SkAutoFree() { sk_free(fPtr); }
+
+ /** Return the currently allocate buffer, or null
+ */
+ void* get() const { return fPtr; }
+
+ /** Assign a new ptr allocated with sk_malloc (or null), and return the
+ previous ptr. Note it is the caller's responsibility to sk_free the
+ returned ptr.
+ */
+ void* set(void* ptr) {
+ void* prev = fPtr;
+ fPtr = ptr;
+ return prev;
+ }
+
+ /** Transfer ownership of the current ptr to the caller, setting the
+ internal reference to null. Note the caller is reponsible for calling
+ sk_free on the returned address.
+ */
+ void* release() { return this->set(NULL); }
+
+ /** Free the current buffer, and set the internal reference to NULL. Same
+ as calling sk_free(release())
+ */
+ void reset() {
+ sk_free(fPtr);
+ fPtr = NULL;
+ }
+
+private:
+ void* fPtr;
+ // illegal
+ SkAutoFree(const SkAutoFree&);
+ SkAutoFree& operator=(const SkAutoFree&);
+};
+#define SkAutoFree(...) SK_REQUIRE_LOCAL_VAR(SkAutoFree)
+
+/**
+ * Manage an allocated block of heap memory. This object is the sole manager of
+ * the lifetime of the block, so the caller must not call sk_free() or delete
+ * on the block, unless release() was called.
+ */
+class SkAutoMalloc : SkNoncopyable {
+public:
+ explicit SkAutoMalloc(size_t size = 0) {
+ fPtr = size ? sk_malloc_throw(size) : NULL;
+ fSize = size;
+ }
+
+ ~SkAutoMalloc() {
+ sk_free(fPtr);
+ }
+
+ /**
+ * Passed to reset to specify what happens if the requested size is smaller
+ * than the current size (and the current block was dynamically allocated).
+ */
+ enum OnShrink {
+ /**
+ * If the requested size is smaller than the current size, and the
+ * current block is dynamically allocated, free the old block and
+ * malloc a new block of the smaller size.
+ */
+ kAlloc_OnShrink,
+
+ /**
+ * If the requested size is smaller than the current size, and the
+ * current block is dynamically allocated, just return the old
+ * block.
+ */
+ kReuse_OnShrink
+ };
+
+ /**
+ * Reallocates the block to a new size. The ptr may or may not change.
+ */
+ void* reset(size_t size = 0, OnShrink shrink = kAlloc_OnShrink, bool* didChangeAlloc = NULL) {
+ if (size == fSize || (kReuse_OnShrink == shrink && size < fSize)) {
+ if (didChangeAlloc) {
+ *didChangeAlloc = false;
+ }
+ return fPtr;
+ }
+
+ sk_free(fPtr);
+ fPtr = size ? sk_malloc_throw(size) : NULL;
+ fSize = size;
+ if (didChangeAlloc) {
+ *didChangeAlloc = true;
+ }
+
+ return fPtr;
+ }
+
+ /**
+ * Return the allocated block.
+ */
+ void* get() { return fPtr; }
+ const void* get() const { return fPtr; }
+
+ /** Transfer ownership of the current ptr to the caller, setting the
+ internal reference to null. Note the caller is reponsible for calling
+ sk_free on the returned address.
+ */
+ void* release() {
+ void* ptr = fPtr;
+ fPtr = NULL;
+ fSize = 0;
+ return ptr;
+ }
+
+private:
+ void* fPtr;
+ size_t fSize; // can be larger than the requested size (see kReuse)
+};
+#define SkAutoMalloc(...) SK_REQUIRE_LOCAL_VAR(SkAutoMalloc)
+
+/**
+ * Manage an allocated block of memory. If the requested size is <= kSizeRequested (or slightly
+ * more), then the allocation will come from the stack rather than the heap. This object is the
+ * sole manager of the lifetime of the block, so the caller must not call sk_free() or delete on
+ * the block.
+ */
+template <size_t kSizeRequested> class SkAutoSMalloc : SkNoncopyable {
+public:
+ /**
+ * Creates initially empty storage. get() returns a ptr, but it is to a zero-byte allocation.
+ * Must call reset(size) to return an allocated block.
+ */
+ SkAutoSMalloc() {
+ fPtr = fStorage;
+ fSize = kSize;
+ }
+
+ /**
+ * Allocate a block of the specified size. If size <= kSizeRequested (or slightly more), then
+ * the allocation will come from the stack, otherwise it will be dynamically allocated.
+ */
+ explicit SkAutoSMalloc(size_t size) {
+ fPtr = fStorage;
+ fSize = kSize;
+ this->reset(size);
+ }
+
+ /**
+ * Free the allocated block (if any). If the block was small enough to have been allocated on
+ * the stack, then this does nothing.
+ */
+ ~SkAutoSMalloc() {
+ if (fPtr != (void*)fStorage) {
+ sk_free(fPtr);
+ }
+ }
+
+ /**
+ * Return the allocated block. May return non-null even if the block is of zero size. Since
+ * this may be on the stack or dynamically allocated, the caller must not call sk_free() on it,
+ * but must rely on SkAutoSMalloc to manage it.
+ */
+ void* get() const { return fPtr; }
+
+ /**
+ * Return a new block of the requested size, freeing (as necessary) any previously allocated
+ * block. As with the constructor, if size <= kSizeRequested (or slightly more) then the return
+ * block may be allocated locally, rather than from the heap.
+ */
+ void* reset(size_t size,
+ SkAutoMalloc::OnShrink shrink = SkAutoMalloc::kAlloc_OnShrink,
+ bool* didChangeAlloc = NULL) {
+ size = (size < kSize) ? kSize : size;
+ bool alloc = size != fSize && (SkAutoMalloc::kAlloc_OnShrink == shrink || size > fSize);
+ if (didChangeAlloc) {
+ *didChangeAlloc = alloc;
+ }
+ if (alloc) {
+ if (fPtr != (void*)fStorage) {
+ sk_free(fPtr);
+ }
+
+ if (size == kSize) {
+ SkASSERT(fPtr != fStorage); // otherwise we lied when setting didChangeAlloc.
+ fPtr = fStorage;
+ } else {
+ fPtr = sk_malloc_flags(size, SK_MALLOC_THROW | SK_MALLOC_TEMP);
+ }
+
+ fSize = size;
+ }
+ SkASSERT(fSize >= size && fSize >= kSize);
+ SkASSERT((fPtr == fStorage) || fSize > kSize);
+ return fPtr;
+ }
+
+private:
+ // Align up to 32 bits.
+ static const size_t kSizeAlign4 = SkAlign4(kSizeRequested);
+#if defined(GOOGLE3)
+ // Stack frame size is limited for GOOGLE3. 4k is less than the actual max, but some functions
+ // have multiple large stack allocations.
+ static const size_t kMaxBytes = 4 * 1024;
+ static const size_t kSize = kSizeRequested > kMaxBytes ? kMaxBytes : kSizeAlign4;
+#else
+ static const size_t kSize = kSizeAlign4;
+#endif
+
+ void* fPtr;
+ size_t fSize; // can be larger than the requested size (see kReuse)
+ uint32_t fStorage[kSize >> 2];
+};
+// Can't guard the constructor because it's a template class.
+
+#endif /* C++ */
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkUnPreMultiply.h b/gfx/skia/skia/include/core/SkUnPreMultiply.h
new file mode 100644
index 000000000..16181ce4e
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkUnPreMultiply.h
@@ -0,0 +1,58 @@
+
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+
+
+#ifndef SkUnPreMultiply_DEFINED
+#define SkUnPreMultiply_DEFINED
+
+#include "SkColor.h"
+
+class SK_API SkUnPreMultiply {
+public:
+ typedef uint32_t Scale;
+
+ // index this table with alpha [0..255]
+ static const Scale* GetScaleTable() {
+ return gTable;
+ }
+
+ static Scale GetScale(U8CPU alpha) {
+ SkASSERT(alpha <= 255);
+ return gTable[alpha];
+ }
+
+ /** Usage:
+
+ const Scale* table = SkUnPreMultiply::GetScaleTable();
+
+ for (...) {
+ unsigned a = ...
+ SkUnPreMultiply::Scale scale = table[a];
+
+ red = SkUnPreMultiply::ApplyScale(scale, red);
+ ...
+ // now red is unpremultiplied
+ }
+ */
+ static U8CPU ApplyScale(Scale scale, U8CPU component) {
+ SkASSERT(component <= 255);
+ return (scale * component + (1 << 23)) >> 24;
+ }
+
+ static SkColor PMColorToColor(SkPMColor c);
+
+ static uint32_t UnPreMultiplyPreservingByteOrder(SkPMColor c);
+
+private:
+ static const uint32_t gTable[256];
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkWriteBuffer.h b/gfx/skia/skia/include/core/SkWriteBuffer.h
new file mode 100644
index 000000000..29f923fed
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkWriteBuffer.h
@@ -0,0 +1,157 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkWriteBuffer_DEFINED
+#define SkWriteBuffer_DEFINED
+
+#include "SkData.h"
+#include "SkImage.h"
+#include "SkPath.h"
+#include "SkPicture.h"
+#include "SkPixelSerializer.h"
+#include "SkRefCnt.h"
+#include "SkWriter32.h"
+#include "../private/SkTHash.h"
+
+class SkBitmap;
+class SkDeduper;
+class SkFactorySet;
+class SkFlattenable;
+class SkRefCntSet;
+
+class SkWriteBuffer {
+public:
+ SkWriteBuffer() {}
+ virtual ~SkWriteBuffer() {}
+
+ virtual bool isCrossProcess() const = 0;
+
+ virtual void writeByteArray(const void* data, size_t size) = 0;
+ void writeDataAsByteArray(SkData* data) {
+ this->writeByteArray(data->data(), data->size());
+ }
+ virtual void writeBool(bool value) = 0;
+ virtual void writeScalar(SkScalar value) = 0;
+ virtual void writeScalarArray(const SkScalar* value, uint32_t count) = 0;
+ virtual void writeInt(int32_t value) = 0;
+ virtual void writeIntArray(const int32_t* value, uint32_t count) = 0;
+ virtual void writeUInt(uint32_t value) = 0;
+ void write32(int32_t value) {
+ this->writeInt(value);
+ }
+ virtual void writeString(const char* value) = 0;
+
+ virtual void writeFlattenable(const SkFlattenable* flattenable) = 0;
+ virtual void writeColor(SkColor color) = 0;
+ virtual void writeColorArray(const SkColor* color, uint32_t count) = 0;
+ virtual void writeColor4f(const SkColor4f& color) = 0;
+ virtual void writeColor4fArray(const SkColor4f* color, uint32_t count) = 0;
+ virtual void writePoint(const SkPoint& point) = 0;
+ virtual void writePointArray(const SkPoint* point, uint32_t count) = 0;
+ virtual void writeMatrix(const SkMatrix& matrix) = 0;
+ virtual void writeIRect(const SkIRect& rect) = 0;
+ virtual void writeRect(const SkRect& rect) = 0;
+ virtual void writeRegion(const SkRegion& region) = 0;
+ virtual void writePath(const SkPath& path) = 0;
+ virtual size_t writeStream(SkStream* stream, size_t length) = 0;
+ virtual void writeBitmap(const SkBitmap& bitmap) = 0;
+ virtual void writeImage(const SkImage*) = 0;
+ virtual void writeTypeface(SkTypeface* typeface) = 0;
+ virtual void writePaint(const SkPaint& paint) = 0;
+
+ void setDeduper(SkDeduper* deduper) { fDeduper = deduper; }
+
+protected:
+ SkDeduper* fDeduper = nullptr;
+};
+
+/**
+ * Concrete implementation that serializes to a flat binary blob.
+ */
+class SkBinaryWriteBuffer : public SkWriteBuffer {
+public:
+ enum Flags {
+ kCrossProcess_Flag = 1 << 0,
+ };
+
+ SkBinaryWriteBuffer(uint32_t flags = 0);
+ SkBinaryWriteBuffer(void* initialStorage, size_t storageSize, uint32_t flags = 0);
+ ~SkBinaryWriteBuffer();
+
+ bool isCrossProcess() const override {
+ return SkToBool(fFlags & kCrossProcess_Flag);
+ }
+
+ void write(const void* buffer, size_t bytes) {
+ fWriter.write(buffer, bytes);
+ }
+
+ void reset(void* storage = NULL, size_t storageSize = 0) {
+ fWriter.reset(storage, storageSize);
+ }
+
+ size_t bytesWritten() const { return fWriter.bytesWritten(); }
+
+ void writeByteArray(const void* data, size_t size) override;
+ void writeBool(bool value) override;
+ void writeScalar(SkScalar value) override;
+ void writeScalarArray(const SkScalar* value, uint32_t count) override;
+ void writeInt(int32_t value) override;
+ void writeIntArray(const int32_t* value, uint32_t count) override;
+ void writeUInt(uint32_t value) override;
+ void writeString(const char* value) override;
+
+ void writeFlattenable(const SkFlattenable* flattenable) override;
+ void writeColor(SkColor color) override;
+ void writeColorArray(const SkColor* color, uint32_t count) override;
+ void writeColor4f(const SkColor4f& color) override;
+ void writeColor4fArray(const SkColor4f* color, uint32_t count) override;
+ void writePoint(const SkPoint& point) override;
+ void writePointArray(const SkPoint* point, uint32_t count) override;
+ void writeMatrix(const SkMatrix& matrix) override;
+ void writeIRect(const SkIRect& rect) override;
+ void writeRect(const SkRect& rect) override;
+ void writeRegion(const SkRegion& region) override;
+ void writePath(const SkPath& path) override;
+ size_t writeStream(SkStream* stream, size_t length) override;
+ void writeBitmap(const SkBitmap& bitmap) override;
+ void writeImage(const SkImage*) override;
+ void writeTypeface(SkTypeface* typeface) override;
+ void writePaint(const SkPaint& paint) override;
+
+ bool writeToStream(SkWStream*);
+ void writeToMemory(void* dst) { fWriter.flatten(dst); }
+
+ SkFactorySet* setFactoryRecorder(SkFactorySet*);
+ SkRefCntSet* setTypefaceRecorder(SkRefCntSet*);
+
+ /**
+ * Set an SkPixelSerializer to store an encoded representation of pixels,
+ * e.g. SkBitmaps.
+ *
+ * Calls ref() on the serializer.
+ *
+ * TODO: Encode SkImage pixels as well.
+ */
+ void setPixelSerializer(SkPixelSerializer*);
+ SkPixelSerializer* getPixelSerializer() const { return fPixelSerializer; }
+
+private:
+ const uint32_t fFlags;
+ SkFactorySet* fFactorySet;
+ SkWriter32 fWriter;
+
+ SkRefCntSet* fTFSet;
+
+ SkAutoTUnref<SkPixelSerializer> fPixelSerializer;
+
+ // Only used if we do not have an fFactorySet
+ SkTHashMap<SkString, uint32_t> fFlattenableDict;
+};
+
+#endif // SkWriteBuffer_DEFINED
diff --git a/gfx/skia/skia/include/core/SkWriter32.h b/gfx/skia/skia/include/core/SkWriter32.h
new file mode 100644
index 000000000..a5ecb3f24
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkWriter32.h
@@ -0,0 +1,276 @@
+
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkWriter32_DEFINED
+#define SkWriter32_DEFINED
+
+#include "../private/SkTemplates.h"
+#include "SkData.h"
+#include "SkMatrix.h"
+#include "SkPath.h"
+#include "SkPoint.h"
+#include "SkRRect.h"
+#include "SkRect.h"
+#include "SkRegion.h"
+#include "SkScalar.h"
+#include "SkStream.h"
+#include "SkTypes.h"
+
+class SK_API SkWriter32 : SkNoncopyable {
+public:
+ /**
+ * The caller can specify an initial block of storage, which the caller manages.
+ *
+ * SkWriter32 will try to back reserve and write calls with this external storage until the
+ * first time an allocation doesn't fit. From then it will use dynamically allocated storage.
+ * This used to be optional behavior, but pipe now relies on it.
+ */
+ SkWriter32(void* external = NULL, size_t externalBytes = 0) {
+ this->reset(external, externalBytes);
+ }
+
+ // return the current offset (will always be a multiple of 4)
+ size_t bytesWritten() const { return fUsed; }
+
+ SK_ATTR_DEPRECATED("use bytesWritten")
+ size_t size() const { return this->bytesWritten(); }
+
+ void reset(void* external = NULL, size_t externalBytes = 0) {
+ SkASSERT(SkIsAlign4((uintptr_t)external));
+ SkASSERT(SkIsAlign4(externalBytes));
+
+ fData = (uint8_t*)external;
+ fCapacity = externalBytes;
+ fUsed = 0;
+ fExternal = external;
+ }
+
+ // size MUST be multiple of 4
+ uint32_t* reserve(size_t size) {
+ SkASSERT(SkAlign4(size) == size);
+ size_t offset = fUsed;
+ size_t totalRequired = fUsed + size;
+ if (totalRequired > fCapacity) {
+ this->growToAtLeast(totalRequired);
+ }
+ fUsed = totalRequired;
+ return (uint32_t*)(fData + offset);
+ }
+
+ /**
+ * Read a T record at offset, which must be a multiple of 4. Only legal if the record
+ * was written atomically using the write methods below.
+ */
+ template<typename T>
+ const T& readTAt(size_t offset) const {
+ SkASSERT(SkAlign4(offset) == offset);
+ SkASSERT(offset < fUsed);
+ return *(T*)(fData + offset);
+ }
+
+ /**
+ * Overwrite a T record at offset, which must be a multiple of 4. Only legal if the record
+ * was written atomically using the write methods below.
+ */
+ template<typename T>
+ void overwriteTAt(size_t offset, const T& value) {
+ SkASSERT(SkAlign4(offset) == offset);
+ SkASSERT(offset < fUsed);
+ *(T*)(fData + offset) = value;
+ }
+
+ bool writeBool(bool value) {
+ this->write32(value);
+ return value;
+ }
+
+ void writeInt(int32_t value) {
+ this->write32(value);
+ }
+
+ void write8(int32_t value) {
+ *(int32_t*)this->reserve(sizeof(value)) = value & 0xFF;
+ }
+
+ void write16(int32_t value) {
+ *(int32_t*)this->reserve(sizeof(value)) = value & 0xFFFF;
+ }
+
+ void write32(int32_t value) {
+ *(int32_t*)this->reserve(sizeof(value)) = value;
+ }
+
+ void writePtr(void* value) {
+ *(void**)this->reserve(sizeof(value)) = value;
+ }
+
+ void writeScalar(SkScalar value) {
+ *(SkScalar*)this->reserve(sizeof(value)) = value;
+ }
+
+ void writePoint(const SkPoint& pt) {
+ *(SkPoint*)this->reserve(sizeof(pt)) = pt;
+ }
+
+ void writeRect(const SkRect& rect) {
+ *(SkRect*)this->reserve(sizeof(rect)) = rect;
+ }
+
+ void writeIRect(const SkIRect& rect) {
+ *(SkIRect*)this->reserve(sizeof(rect)) = rect;
+ }
+
+ void writeRRect(const SkRRect& rrect) {
+ rrect.writeToMemory(this->reserve(SkRRect::kSizeInMemory));
+ }
+
+ void writePath(const SkPath& path) {
+ size_t size = path.writeToMemory(NULL);
+ SkASSERT(SkAlign4(size) == size);
+ path.writeToMemory(this->reserve(size));
+ }
+
+ void writeMatrix(const SkMatrix& matrix) {
+ size_t size = matrix.writeToMemory(NULL);
+ SkASSERT(SkAlign4(size) == size);
+ matrix.writeToMemory(this->reserve(size));
+ }
+
+ void writeRegion(const SkRegion& rgn) {
+ size_t size = rgn.writeToMemory(NULL);
+ SkASSERT(SkAlign4(size) == size);
+ rgn.writeToMemory(this->reserve(size));
+ }
+
+ // write count bytes (must be a multiple of 4)
+ void writeMul4(const void* values, size_t size) {
+ this->write(values, size);
+ }
+
+ /**
+ * Write size bytes from values. size must be a multiple of 4, though
+ * values need not be 4-byte aligned.
+ */
+ void write(const void* values, size_t size) {
+ SkASSERT(SkAlign4(size) == size);
+ sk_careful_memcpy(this->reserve(size), values, size);
+ }
+
+ /**
+ * Reserve size bytes. Does not need to be 4 byte aligned. The remaining space (if any) will be
+ * filled in with zeroes.
+ */
+ uint32_t* reservePad(size_t size) {
+ size_t alignedSize = SkAlign4(size);
+ uint32_t* p = this->reserve(alignedSize);
+ if (alignedSize != size) {
+ SkASSERT(alignedSize >= 4);
+ p[alignedSize / 4 - 1] = 0;
+ }
+ return p;
+ }
+
+ /**
+ * Write size bytes from src, and pad to 4 byte alignment with zeroes.
+ */
+ void writePad(const void* src, size_t size) {
+ sk_careful_memcpy(this->reservePad(size), src, size);
+ }
+
+ /**
+ * Writes a string to the writer, which can be retrieved with
+ * SkReader32::readString().
+ * The length can be specified, or if -1 is passed, it will be computed by
+ * calling strlen(). The length must be < max size_t.
+ *
+ * If you write NULL, it will be read as "".
+ */
+ void writeString(const char* str, size_t len = (size_t)-1);
+
+ /**
+ * Computes the size (aligned to multiple of 4) need to write the string
+ * in a call to writeString(). If the length is not specified, it will be
+ * computed by calling strlen().
+ */
+ static size_t WriteStringSize(const char* str, size_t len = (size_t)-1);
+
+ void writeData(const SkData* data) {
+ uint32_t len = data ? SkToU32(data->size()) : 0;
+ this->write32(len);
+ if (data) {
+ this->writePad(data->data(), len);
+ }
+ }
+
+ static size_t WriteDataSize(const SkData* data) {
+ return 4 + SkAlign4(data ? data->size() : 0);
+ }
+
+ /**
+ * Move the cursor back to offset bytes from the beginning.
+ * offset must be a multiple of 4 no greater than size().
+ */
+ void rewindToOffset(size_t offset) {
+ SkASSERT(SkAlign4(offset) == offset);
+ SkASSERT(offset <= bytesWritten());
+ fUsed = offset;
+ }
+
+ // copy into a single buffer (allocated by caller). Must be at least size()
+ void flatten(void* dst) const {
+ memcpy(dst, fData, fUsed);
+ }
+
+ bool writeToStream(SkWStream* stream) const {
+ return stream->write(fData, fUsed);
+ }
+
+ // read from the stream, and write up to length bytes. Return the actual
+ // number of bytes written.
+ size_t readFromStream(SkStream* stream, size_t length) {
+ return stream->read(this->reservePad(length), length);
+ }
+
+ /**
+ * Captures a snapshot of the data as it is right now, and return it.
+ */
+ sk_sp<SkData> snapshotAsData() const;
+private:
+ void growToAtLeast(size_t size);
+
+ uint8_t* fData; // Points to either fInternal or fExternal.
+ size_t fCapacity; // Number of bytes we can write to fData.
+ size_t fUsed; // Number of bytes written.
+ void* fExternal; // Unmanaged memory block.
+ SkAutoTMalloc<uint8_t> fInternal; // Managed memory block.
+};
+
+/**
+ * Helper class to allocated SIZE bytes as part of the writer, and to provide
+ * that storage to the constructor as its initial storage buffer.
+ *
+ * This wrapper ensures proper alignment rules are met for the storage.
+ */
+template <size_t SIZE> class SkSWriter32 : public SkWriter32 {
+public:
+ SkSWriter32() { this->reset(); }
+
+ void reset() {this->INHERITED::reset(fData.fStorage, SIZE); }
+
+private:
+ union {
+ void* fPtrAlignment;
+ double fDoubleAlignment;
+ char fStorage[SIZE];
+ } fData;
+
+ typedef SkWriter32 INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkXfermode.h b/gfx/skia/skia/include/core/SkXfermode.h
new file mode 100644
index 000000000..253ee1b40
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkXfermode.h
@@ -0,0 +1,331 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkXfermode_DEFINED
+#define SkXfermode_DEFINED
+
+#include "SkBlendMode.h"
+#include "SkColor.h"
+#include "SkFlattenable.h"
+
+class GrFragmentProcessor;
+class GrTexture;
+class GrXPFactory;
+class SkRasterPipeline;
+class SkString;
+
+struct SkArithmeticParams;
+
+struct SkPM4f;
+typedef SkPM4f (*SkXfermodeProc4f)(const SkPM4f& src, const SkPM4f& dst);
+
+/** \class SkXfermode
+ *
+ * SkXfermode is the base class for objects that are called to implement custom
+ * "transfer-modes" in the drawing pipeline. The static function Create(Modes)
+ * can be called to return an instance of any of the predefined subclasses as
+ * specified in the Modes enum. When an SkXfermode is assigned to an SkPaint,
+ * then objects drawn with that paint have the xfermode applied.
+ *
+ * All subclasses are required to be reentrant-safe : it must be legal to share
+ * the same instance between several threads.
+ */
+class SK_API SkXfermode : public SkFlattenable {
+public:
+ virtual void xfer32(SkPMColor dst[], const SkPMColor src[], int count,
+ const SkAlpha aa[]) const;
+ virtual void xfer16(uint16_t dst[], const SkPMColor src[], int count,
+ const SkAlpha aa[]) const;
+ virtual void xferA8(SkAlpha dst[], const SkPMColor src[], int count,
+ const SkAlpha aa[]) const;
+
+ /** Enum of possible coefficients to describe some xfermodes
+ */
+ enum Coeff {
+ kZero_Coeff, /** 0 */
+ kOne_Coeff, /** 1 */
+ kSC_Coeff, /** src color */
+ kISC_Coeff, /** inverse src color (i.e. 1 - sc) */
+ kDC_Coeff, /** dst color */
+ kIDC_Coeff, /** inverse dst color (i.e. 1 - dc) */
+ kSA_Coeff, /** src alpha */
+ kISA_Coeff, /** inverse src alpha (i.e. 1 - sa) */
+ kDA_Coeff, /** dst alpha */
+ kIDA_Coeff, /** inverse dst alpha (i.e. 1 - da) */
+
+ kCoeffCount
+ };
+
+ /** List of predefined xfermodes.
+ The algebra for the modes uses the following symbols:
+ Sa, Sc - source alpha and color
+ Da, Dc - destination alpha and color (before compositing)
+ [a, c] - Resulting (alpha, color) values
+ For these equations, the colors are in premultiplied state.
+ If no xfermode is specified, kSrcOver is assumed.
+ The modes are ordered by those that can be expressed as a pair of Coeffs, followed by those
+ that aren't Coeffs but have separable r,g,b computations, and finally
+ those that are not separable.
+ */
+ enum Mode {
+ kClear_Mode, //!< [0, 0]
+ kSrc_Mode, //!< [Sa, Sc]
+ kDst_Mode, //!< [Da, Dc]
+ kSrcOver_Mode, //!< [Sa + Da * (1 - Sa), Sc + Dc * (1 - Sa)]
+ kDstOver_Mode, //!< [Da + Sa * (1 - Da), Dc + Sc * (1 - Da)]
+ kSrcIn_Mode, //!< [Sa * Da, Sc * Da]
+ kDstIn_Mode, //!< [Da * Sa, Dc * Sa]
+ kSrcOut_Mode, //!< [Sa * (1 - Da), Sc * (1 - Da)]
+ kDstOut_Mode, //!< [Da * (1 - Sa), Dc * (1 - Sa)]
+ kSrcATop_Mode, //!< [Da, Sc * Da + Dc * (1 - Sa)]
+ kDstATop_Mode, //!< [Sa, Dc * Sa + Sc * (1 - Da)]
+ kXor_Mode, //!< [Sa + Da - 2 * Sa * Da, Sc * (1 - Da) + Dc * (1 - Sa)]
+ kPlus_Mode, //!< [Sa + Da, Sc + Dc]
+ kModulate_Mode, // multiplies all components (= alpha and color)
+
+ // Following blend modes are defined in the CSS Compositing standard:
+ // https://dvcs.w3.org/hg/FXTF/rawfile/tip/compositing/index.html#blending
+ kScreen_Mode,
+ kLastCoeffMode = kScreen_Mode,
+
+ kOverlay_Mode,
+ kDarken_Mode,
+ kLighten_Mode,
+ kColorDodge_Mode,
+ kColorBurn_Mode,
+ kHardLight_Mode,
+ kSoftLight_Mode,
+ kDifference_Mode,
+ kExclusion_Mode,
+ kMultiply_Mode,
+ kLastSeparableMode = kMultiply_Mode,
+
+ kHue_Mode,
+ kSaturation_Mode,
+ kColor_Mode,
+ kLuminosity_Mode,
+ kLastMode = kLuminosity_Mode
+ };
+
+ /**
+ * Gets the name of the Mode as a string.
+ */
+ static const char* ModeName(Mode);
+ static const char* ModeName(SkBlendMode mode) {
+ return ModeName(Mode(mode));
+ }
+
+ /**
+ * If the xfermode is one of the modes in the Mode enum, then asMode()
+ * returns true and sets (if not null) mode accordingly. Otherwise it
+ * returns false and ignores the mode parameter.
+ */
+ virtual bool asMode(Mode* mode) const;
+
+ /**
+ * The same as calling xfermode->asMode(mode), except that this also checks
+ * if the xfermode is NULL, and if so, treats it as kSrcOver_Mode.
+ */
+ static bool AsMode(const SkXfermode*, Mode* mode);
+ static bool AsMode(const sk_sp<SkXfermode>& xfer, Mode* mode) {
+ return AsMode(xfer.get(), mode);
+ }
+
+ /**
+ * Returns true if the xfermode claims to be the specified Mode. This works
+ * correctly even if the xfermode is NULL (which equates to kSrcOver.) Thus
+ * you can say this without checking for a null...
+ *
+ * If (SkXfermode::IsMode(paint.getXfermode(),
+ * SkXfermode::kDstOver_Mode)) {
+ * ...
+ * }
+ */
+ static bool IsMode(const SkXfermode* xfer, Mode mode);
+ static bool IsMode(const sk_sp<SkXfermode>& xfer, Mode mode) {
+ return IsMode(xfer.get(), mode);
+ }
+
+ /** Return an SkXfermode object for the specified mode.
+ */
+ static sk_sp<SkXfermode> Make(Mode);
+#ifdef SK_SUPPORT_LEGACY_XFERMODE_PTR
+ static SkXfermode* Create(Mode mode) {
+ return Make(mode).release();
+ }
+ SK_ATTR_DEPRECATED("use AsMode(...)")
+ static bool IsMode(const SkXfermode* xfer, Mode* mode) {
+ return AsMode(xfer, mode);
+ }
+#endif
+
+ /**
+ * Skia maintains global xfermode objects corresponding to each BlendMode. This returns a
+ * ptr to that global xfermode (or null if the mode is srcover). Thus the caller may use
+ * the returned ptr, but it should leave its refcnt untouched.
+ */
+ static SkXfermode* Peek(SkBlendMode mode) {
+ sk_sp<SkXfermode> xfer = Make(mode);
+ if (!xfer) {
+ SkASSERT(SkBlendMode::kSrcOver == mode);
+ return nullptr;
+ }
+ SkASSERT(!xfer->unique());
+ return xfer.get();
+ }
+
+ static sk_sp<SkXfermode> Make(SkBlendMode bm) {
+ return Make((Mode)bm);
+ }
+
+ SkBlendMode blend() const {
+ Mode mode;
+ SkAssertResult(this->asMode(&mode));
+ return (SkBlendMode)mode;
+ }
+
+ /** Return a function pointer to a routine that applies the specified
+ porter-duff transfer mode.
+ */
+ static SkXfermodeProc GetProc(Mode mode);
+ static SkXfermodeProc4f GetProc4f(Mode);
+
+ virtual SkXfermodeProc4f getProc4f() const;
+
+ bool appendStages(SkRasterPipeline*) const;
+
+ /**
+ * If the specified mode can be represented by a pair of Coeff, then return
+ * true and set (if not NULL) the corresponding coeffs. If the mode is
+ * not representable as a pair of Coeffs, return false and ignore the
+ * src and dst parameters.
+ */
+ static bool ModeAsCoeff(Mode mode, Coeff* src, Coeff* dst);
+
+ /**
+ * Returns whether or not the xfer mode can support treating coverage as alpha
+ */
+ virtual bool supportsCoverageAsAlpha() const;
+
+ /**
+ * The same as calling xfermode->supportsCoverageAsAlpha(), except that this also checks if
+ * the xfermode is NULL, and if so, treats it as kSrcOver_Mode.
+ */
+ static bool SupportsCoverageAsAlpha(const SkXfermode* xfer);
+ static bool SupportsCoverageAsAlpha(const sk_sp<SkXfermode>& xfer) {
+ return SupportsCoverageAsAlpha(xfer.get());
+ }
+
+ enum SrcColorOpacity {
+ // The src color is known to be opaque (alpha == 255)
+ kOpaque_SrcColorOpacity = 0,
+ // The src color is known to be fully transparent (color == 0)
+ kTransparentBlack_SrcColorOpacity = 1,
+ // The src alpha is known to be fully transparent (alpha == 0)
+ kTransparentAlpha_SrcColorOpacity = 2,
+ // The src color opacity is unknown
+ kUnknown_SrcColorOpacity = 3
+ };
+
+ /**
+ * Returns whether or not the result of the draw with the xfer mode will be opaque or not. The
+ * input to this call is an enum describing known information about the opacity of the src color
+ * that will be given to the xfer mode.
+ */
+ virtual bool isOpaque(SrcColorOpacity opacityType) const;
+
+ /**
+ * The same as calling xfermode->isOpaque(...), except that this also checks if
+ * the xfermode is NULL, and if so, treats it as kSrcOver_Mode.
+ */
+ static bool IsOpaque(const SkXfermode* xfer, SrcColorOpacity opacityType);
+ static bool IsOpaque(const sk_sp<SkXfermode>& xfer, SrcColorOpacity opacityType) {
+ return IsOpaque(xfer.get(), opacityType);
+ }
+ static bool IsOpaque(SkBlendMode, SrcColorOpacity);
+
+#if SK_SUPPORT_GPU
+ /** Used by the SkXfermodeImageFilter to blend two colors via a GrFragmentProcessor.
+ The input to the returned FP is the src color. The dst color is
+ provided by the dst param which becomes a child FP of the returned FP.
+ It is legal for the function to return a null output. This indicates that
+ the output of the blend is simply the src color.
+ */
+ virtual sk_sp<GrFragmentProcessor> makeFragmentProcessorForImageFilter(
+ sk_sp<GrFragmentProcessor> dst) const;
+
+ /** A subclass must implement this factory function to work with the GPU backend.
+ The xfermode will return a factory for which the caller will get a ref. It is up
+ to the caller to install it. XferProcessors cannot use a background texture.
+ */
+ virtual sk_sp<GrXPFactory> asXPFactory() const;
+#endif
+
+ SK_TO_STRING_PUREVIRT()
+ SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP()
+ SK_DEFINE_FLATTENABLE_TYPE(SkXfermode)
+
+ enum D32Flags {
+ kSrcIsOpaque_D32Flag = 1 << 0,
+ kSrcIsSingle_D32Flag = 1 << 1,
+ kDstIsSRGB_D32Flag = 1 << 2,
+ };
+ typedef void (*D32Proc)(const SkXfermode*, uint32_t dst[], const SkPM4f src[],
+ int count, const SkAlpha coverage[]);
+ static D32Proc GetD32Proc(SkXfermode*, uint32_t flags);
+ static D32Proc GetD32Proc(const sk_sp<SkXfermode>& xfer, uint32_t flags) {
+ return GetD32Proc(xfer.get(), flags);
+ }
+
+ enum F16Flags {
+ kSrcIsOpaque_F16Flag = 1 << 0,
+ kSrcIsSingle_F16Flag = 1 << 1,
+ };
+ typedef void (*F16Proc)(const SkXfermode*, uint64_t dst[], const SkPM4f src[], int count,
+ const SkAlpha coverage[]);
+ static F16Proc GetF16Proc(SkXfermode*, uint32_t flags);
+ static F16Proc GetF16Proc(const sk_sp<SkXfermode>& xfer, uint32_t flags) {
+ return GetF16Proc(xfer.get(), flags);
+ }
+
+ enum LCDFlags {
+ kSrcIsOpaque_LCDFlag = 1 << 0, // else src(s) may have alpha < 1
+ kSrcIsSingle_LCDFlag = 1 << 1, // else src[count]
+ kDstIsSRGB_LCDFlag = 1 << 2, // else l32 or f16
+ };
+ typedef void (*LCD32Proc)(uint32_t* dst, const SkPM4f* src, int count, const uint16_t lcd[]);
+ typedef void (*LCDF16Proc)(uint64_t* dst, const SkPM4f* src, int count, const uint16_t lcd[]);
+ static LCD32Proc GetLCD32Proc(uint32_t flags);
+ static LCDF16Proc GetLCDF16Proc(uint32_t) { return nullptr; }
+
+ virtual bool isArithmetic(SkArithmeticParams*) const { return false; }
+
+protected:
+ SkXfermode() {}
+ /** The default implementation of xfer32/xfer16/xferA8 in turn call this
+ method, 1 color at a time (upscaled to a SkPMColor). The default
+ implementation of this method just returns dst. If performance is
+ important, your subclass should override xfer32/xfer16/xferA8 directly.
+
+ This method will not be called directly by the client, so it need not
+ be implemented if your subclass has overridden xfer32/xfer16/xferA8
+ */
+ virtual SkPMColor xferColor(SkPMColor src, SkPMColor dst) const;
+
+ virtual D32Proc onGetD32Proc(uint32_t flags) const;
+ virtual F16Proc onGetF16Proc(uint32_t flags) const;
+ virtual bool onAppendStages(SkRasterPipeline*) const;
+
+private:
+ enum {
+ kModeCount = kLastMode + 1
+ };
+
+ typedef SkFlattenable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkYUVSizeInfo.h b/gfx/skia/skia/include/core/SkYUVSizeInfo.h
new file mode 100644
index 000000000..2c5a51d79
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkYUVSizeInfo.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkYUVSizeInfo_DEFINED
+#define SkYUVSizeInfo_DEFINED
+
+struct SkYUVSizeInfo {
+ enum {
+ kY = 0,
+ kU = 1,
+ kV = 2,
+ };
+ SkISize fSizes[3];
+
+ /**
+ * While the widths of the Y, U, and V planes are not restricted, the
+ * implementation often requires that the width of the memory allocated
+ * for each plane be a multiple of 8.
+ *
+ * This struct allows us to inform the client how many "widthBytes"
+ * that we need. Note that we use the new idea of "widthBytes"
+ * because this idea is distinct from "rowBytes" (used elsewhere in
+ * Skia). "rowBytes" allow the last row of the allocation to not
+ * include any extra padding, while, in this case, every single row of
+ * the allocation must be at least "widthBytes".
+ */
+ size_t fWidthBytes[3];
+};
+
+#endif // SkYUVSizeInfo_DEFINED
diff --git a/gfx/skia/skia/include/effects/Sk1DPathEffect.h b/gfx/skia/skia/include/effects/Sk1DPathEffect.h
new file mode 100644
index 000000000..d5315a873
--- /dev/null
+++ b/gfx/skia/skia/include/effects/Sk1DPathEffect.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Sk1DPathEffect_DEFINED
+#define Sk1DPathEffect_DEFINED
+
+#include "SkPathEffect.h"
+#include "SkPath.h"
+
+class SkPathMeasure;
+
+// This class is not exported to java.
+class SK_API Sk1DPathEffect : public SkPathEffect {
+public:
+ virtual bool filterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec*, const SkRect*) const override;
+
+protected:
+ /** Called at the start of each contour, returns the initial offset
+ into that contour.
+ */
+ virtual SkScalar begin(SkScalar contourLength) const = 0;
+ /** Called with the current distance along the path, with the current matrix
+ for the point/tangent at the specified distance.
+ Return the distance to travel for the next call. If return <= 0, then that
+ contour is done.
+ */
+ virtual SkScalar next(SkPath* dst, SkScalar dist, SkPathMeasure&) const = 0;
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ bool exposedInAndroidJavaAPI() const override { return true; }
+#endif
+
+private:
+ typedef SkPathEffect INHERITED;
+};
+
+class SK_API SkPath1DPathEffect : public Sk1DPathEffect {
+public:
+ enum Style {
+ kTranslate_Style, // translate the shape to each position
+ kRotate_Style, // rotate the shape about its center
+ kMorph_Style, // transform each point, and turn lines into curves
+
+ kLastEnum_Style = kMorph_Style,
+ };
+
+ /** Dash by replicating the specified path.
+ @param path The path to replicate (dash)
+ @param advance The space between instances of path
+ @param phase distance (mod advance) along path for its initial position
+ @param style how to transform path at each point (based on the current
+ position and tangent)
+ */
+ static sk_sp<SkPathEffect> Make(const SkPath& path, SkScalar advance, SkScalar phase, Style);
+
+#ifdef SK_SUPPORT_LEGACY_PATHEFFECT_PTR
+ static SkPathEffect* Create(const SkPath& path, SkScalar advance, SkScalar phase, Style s) {
+ return Make(path, advance, phase, s).release();
+ }
+#endif
+
+ virtual bool filterPath(SkPath*, const SkPath&,
+ SkStrokeRec*, const SkRect*) const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkPath1DPathEffect)
+
+protected:
+ SkPath1DPathEffect(const SkPath& path, SkScalar advance, SkScalar phase, Style);
+ void flatten(SkWriteBuffer&) const override;
+
+ // overrides from Sk1DPathEffect
+ SkScalar begin(SkScalar contourLength) const override;
+ SkScalar next(SkPath*, SkScalar, SkPathMeasure&) const override;
+
+private:
+ SkPath fPath; // copied from constructor
+ SkScalar fAdvance; // copied from constructor
+ SkScalar fInitialOffset; // computed from phase
+ Style fStyle; // copied from constructor
+
+ typedef Sk1DPathEffect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/Sk2DPathEffect.h b/gfx/skia/skia/include/effects/Sk2DPathEffect.h
new file mode 100644
index 000000000..823a6ad9d
--- /dev/null
+++ b/gfx/skia/skia/include/effects/Sk2DPathEffect.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Sk2DPathEffect_DEFINED
+#define Sk2DPathEffect_DEFINED
+
+#include "SkPath.h"
+#include "SkPathEffect.h"
+#include "SkMatrix.h"
+
+class SK_API Sk2DPathEffect : public SkPathEffect {
+public:
+ bool filterPath(SkPath*, const SkPath&, SkStrokeRec*, const SkRect*) const override;
+
+protected:
+ /** New virtual, to be overridden by subclasses.
+ This is called once from filterPath, and provides the
+ uv parameter bounds for the path. Subsequent calls to
+ next() will receive u and v values within these bounds,
+ and then a call to end() will signal the end of processing.
+ */
+ virtual void begin(const SkIRect& uvBounds, SkPath* dst) const;
+ virtual void next(const SkPoint& loc, int u, int v, SkPath* dst) const;
+ virtual void end(SkPath* dst) const;
+
+ /** Low-level virtual called per span of locations in the u-direction.
+ The default implementation calls next() repeatedly with each
+ location.
+ */
+ virtual void nextSpan(int u, int v, int ucount, SkPath* dst) const;
+
+ const SkMatrix& getMatrix() const { return fMatrix; }
+
+ // protected so that subclasses can call this during unflattening
+ explicit Sk2DPathEffect(const SkMatrix& mat);
+ void flatten(SkWriteBuffer&) const override;
+
+ SK_TO_STRING_OVERRIDE()
+
+private:
+ SkMatrix fMatrix, fInverse;
+ bool fMatrixIsInvertible;
+
+ // illegal
+ Sk2DPathEffect(const Sk2DPathEffect&);
+ Sk2DPathEffect& operator=(const Sk2DPathEffect&);
+
+ friend class Sk2DPathEffectBlitter;
+ typedef SkPathEffect INHERITED;
+};
+
+class SK_API SkLine2DPathEffect : public Sk2DPathEffect {
+public:
+ static sk_sp<SkPathEffect> Make(SkScalar width, const SkMatrix& matrix) {
+ return sk_sp<SkPathEffect>(new SkLine2DPathEffect(width, matrix));
+ }
+
+ virtual bool filterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec*, const SkRect*) const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkLine2DPathEffect)
+
+protected:
+ SkLine2DPathEffect(SkScalar width, const SkMatrix& matrix)
+ : Sk2DPathEffect(matrix), fWidth(width) {}
+ void flatten(SkWriteBuffer&) const override;
+
+ void nextSpan(int u, int v, int ucount, SkPath*) const override;
+
+private:
+ SkScalar fWidth;
+
+ typedef Sk2DPathEffect INHERITED;
+};
+
+class SK_API SkPath2DPathEffect : public Sk2DPathEffect {
+public:
+ /**
+ * Stamp the specified path to fill the shape, using the matrix to define
+ * the latice.
+ */
+ static sk_sp<SkPathEffect> Make(const SkMatrix& matrix, const SkPath& path) {
+ return sk_sp<SkPathEffect>(new SkPath2DPathEffect(matrix, path));
+ }
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkPath2DPathEffect)
+
+protected:
+ SkPath2DPathEffect(const SkMatrix&, const SkPath&);
+ void flatten(SkWriteBuffer&) const override;
+
+ void next(const SkPoint&, int u, int v, SkPath*) const override;
+
+private:
+ SkPath fPath;
+
+ typedef Sk2DPathEffect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkAlphaThresholdFilter.h b/gfx/skia/skia/include/effects/SkAlphaThresholdFilter.h
new file mode 100644
index 000000000..18b760fc9
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkAlphaThresholdFilter.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAlphaThresholdFilter_DEFINED
+#define SkAlphaThresholdFilter_DEFINED
+
+#include "SkImageFilter.h"
+
+class SkRegion;
+
+class SK_API SkAlphaThresholdFilter {
+public:
+ /**
+ * Creates an image filter that samples a region. If the sample is inside the
+ * region the alpha of the image is boosted up to a threshold value. If it is
+ * outside the region then the alpha is decreased to the threshold value.
+ * The 0,0 point of the region corresponds to the upper left corner of the
+ * source image.
+ */
+ static sk_sp<SkImageFilter> Make(const SkRegion& region, SkScalar innerMin,
+ SkScalar outerMax, sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect = nullptr);
+
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* Create(const SkRegion& region, SkScalar innerMin,
+ SkScalar outerMax, SkImageFilter* input = nullptr) {
+ return Make(region, innerMin, outerMax, sk_ref_sp<SkImageFilter>(input)).release();
+ }
+#endif
+
+ SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP();
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkArcToPathEffect.h b/gfx/skia/skia/include/effects/SkArcToPathEffect.h
new file mode 100644
index 000000000..fcf4a3a5d
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkArcToPathEffect.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkArcToPathEffect_DEFINED
+#define SkArcToPathEffect_DEFINED
+
+#include "SkPathEffect.h"
+
+class SK_API SkArcToPathEffect : public SkPathEffect {
+public:
+ /** radius must be > 0 to have an effect. It specifies the distance from each corner
+ that should be "rounded".
+ */
+ static sk_sp<SkPathEffect> Make(SkScalar radius) {
+ if (radius <= 0) {
+ return NULL;
+ }
+ return sk_sp<SkPathEffect>(new SkArcToPathEffect(radius));
+ }
+
+ bool filterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*) const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkArcToPathEffect)
+
+protected:
+ explicit SkArcToPathEffect(SkScalar radius);
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ SkScalar fRadius;
+
+ typedef SkPathEffect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkArithmeticMode.h b/gfx/skia/skia/include/effects/SkArithmeticMode.h
new file mode 100644
index 000000000..81b9f8539
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkArithmeticMode.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkArithmeticMode_DEFINED
+#define SkArithmeticMode_DEFINED
+
+#include "SkFlattenable.h"
+#include "SkScalar.h"
+#include "SkXfermode.h"
+
+#ifdef SK_SUPPORT_LEGACY_ARITHMETICMODE
+
+class SK_API SkArithmeticMode {
+public:
+ /**
+ * result = clamp[k1 * src * dst + k2 * src + k3 * dst + k4]
+ *
+ * k1=k2=k3=0, k4=1.0 results in returning opaque white
+ * k1=k3=k4=0, k2=1.0 results in returning the src
+ * k1=k2=k4=0, k3=1.0 results in returning the dst
+ */
+ static sk_sp<SkXfermode> Make(SkScalar k1, SkScalar k2, SkScalar k3, SkScalar k4,
+ bool enforcePMColor = true);
+#ifdef SK_SUPPORT_LEGACY_XFERMODE_PTR
+ static SkXfermode* Create(SkScalar k1, SkScalar k2,
+ SkScalar k3, SkScalar k4,
+ bool enforcePMColor = true) {
+ return Make(k1, k2, k3, k4, enforcePMColor).release();
+ }
+#endif
+
+ SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP();
+
+private:
+ SkArithmeticMode(); // can't be instantiated
+};
+
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkBlurDrawLooper.h b/gfx/skia/skia/include/effects/SkBlurDrawLooper.h
new file mode 100644
index 000000000..9b87683f0
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkBlurDrawLooper.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkBlurDrawLooper_DEFINED
+#define SkBlurDrawLooper_DEFINED
+
+#include "SkDrawLooper.h"
+#include "SkColor.h"
+
+class SkMaskFilter;
+class SkColorFilter;
+
+/** \class SkBlurDrawLooper
+ This class draws a shadow of the object (possibly offset), and then draws
+ the original object in its original position.
+ should there be an option to just draw the shadow/blur layer? webkit?
+*/
+class SK_API SkBlurDrawLooper : public SkDrawLooper {
+public:
+ enum BlurFlags {
+ kNone_BlurFlag = 0x00,
+ /**
+ The blur layer's dx/dy/radius aren't affected by the canvas
+ transform.
+ */
+ kIgnoreTransform_BlurFlag = 0x01,
+ kOverrideColor_BlurFlag = 0x02,
+ kHighQuality_BlurFlag = 0x04,
+ /** mask for all blur flags */
+ kAll_BlurFlag = 0x07
+ };
+
+ static sk_sp<SkDrawLooper> Make(SkColor color, SkScalar sigma, SkScalar dx, SkScalar dy,
+ uint32_t flags = kNone_BlurFlag) {
+ return sk_sp<SkDrawLooper>(new SkBlurDrawLooper(color, sigma, dx, dy, flags));
+ }
+#ifdef SK_SUPPORT_LEGACY_MINOR_EFFECT_PTR
+ static SkDrawLooper* Create(SkColor color, SkScalar sigma, SkScalar dx, SkScalar dy,
+ uint32_t flags = kNone_BlurFlag) {
+ return Make(color, sigma, dx, dy, flags).release();
+ }
+#endif
+
+ SkDrawLooper::Context* createContext(SkCanvas*, void* storage) const override;
+
+ size_t contextSize() const override { return sizeof(BlurDrawLooperContext); }
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkBlurDrawLooper)
+
+protected:
+ SkBlurDrawLooper(SkColor color, SkScalar sigma, SkScalar dx, SkScalar dy,
+ uint32_t flags);
+
+ void flatten(SkWriteBuffer&) const override;
+
+ bool asABlurShadow(BlurShadowRec*) const override;
+
+private:
+ sk_sp<SkMaskFilter> fBlur;
+ sk_sp<SkColorFilter> fColorFilter;
+ SkScalar fDx, fDy, fSigma;
+ SkColor fBlurColor;
+ uint32_t fBlurFlags;
+
+ enum State {
+ kBeforeEdge,
+ kAfterEdge,
+ kDone
+ };
+
+ class BlurDrawLooperContext : public SkDrawLooper::Context {
+ public:
+ explicit BlurDrawLooperContext(const SkBlurDrawLooper* looper);
+
+ bool next(SkCanvas* canvas, SkPaint* paint) override;
+
+ private:
+ const SkBlurDrawLooper* fLooper;
+ State fState;
+ };
+
+ void init(SkScalar sigma, SkScalar dx, SkScalar dy, SkColor color, uint32_t flags);
+ void initEffects();
+
+ typedef SkDrawLooper INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkBlurImageFilter.h b/gfx/skia/skia/include/effects/SkBlurImageFilter.h
new file mode 100644
index 000000000..e2109d056
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkBlurImageFilter.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlurImageFilter_DEFINED
+#define SkBlurImageFilter_DEFINED
+
+#include "SkImageFilter.h"
+
+class SK_API SkBlurImageFilter {
+public:
+ static sk_sp<SkImageFilter> Make(SkScalar sigmaX, SkScalar sigmaY,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect = nullptr) {
+ return SkImageFilter::MakeBlur(sigmaX, sigmaY, input, cropRect);
+ }
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* Create(SkScalar sigmaX, SkScalar sigmaY,
+ SkImageFilter * input = nullptr,
+ const SkImageFilter::CropRect* cropRect = nullptr) {
+ return SkImageFilter::MakeBlur(sigmaX, sigmaY, sk_ref_sp<SkImageFilter>(input),
+ cropRect).release();
+ }
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkBlurMaskFilter.h b/gfx/skia/skia/include/effects/SkBlurMaskFilter.h
new file mode 100644
index 000000000..dfbae6b68
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkBlurMaskFilter.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlurMaskFilter_DEFINED
+#define SkBlurMaskFilter_DEFINED
+
+// we include this since our callers will need to at least be able to ref/unref
+#include "SkMaskFilter.h"
+#include "SkScalar.h"
+#include "SkBlurTypes.h"
+
+class SK_API SkBlurMaskFilter {
+public:
+ /**
+ * If radius > 0, return the corresponding sigma, else return 0. Use this to convert from the
+ * (legacy) idea of specify the blur "radius" to the standard notion of specifying its sigma.
+ */
+ static SkScalar ConvertRadiusToSigma(SkScalar radius);
+
+ enum BlurFlags {
+ kNone_BlurFlag = 0x00,
+ /** The blur layer's radius is not affected by transforms */
+ kIgnoreTransform_BlurFlag = 0x01,
+ /** Use a smother, higher qulity blur algorithm */
+ kHighQuality_BlurFlag = 0x02,
+ /** mask for all blur flags */
+ kAll_BlurFlag = 0x03
+ };
+
+ /** Create a blur maskfilter.
+ * @param style The SkBlurStyle to use
+ * @param sigma Standard deviation of the Gaussian blur to apply. Must be > 0.
+ * @param occluder The rect for which no pixels need be drawn (b.c. it will be overdrawn
+ * with some opaque object. This is just a hint which backends are free to
+ * ignore.
+ * @param flags Flags to use - defaults to none
+ * @return The new blur maskfilter
+ */
+ static sk_sp<SkMaskFilter> Make(SkBlurStyle style, SkScalar sigma,
+ const SkRect& occluder, uint32_t flags = kNone_BlurFlag);
+
+ static sk_sp<SkMaskFilter> Make(SkBlurStyle style, SkScalar sigma,
+ uint32_t flags = kNone_BlurFlag) {
+ return Make(style, sigma, SkRect::MakeEmpty(), flags);
+ }
+
+ /** Create an emboss maskfilter
+ @param blurSigma standard deviation of the Gaussian blur to apply
+ before applying lighting (e.g. 3)
+ @param direction array of 3 scalars [x, y, z] specifying the direction of the light source
+ @param ambient 0...1 amount of ambient light
+ @param specular coefficient for specular highlights (e.g. 8)
+ @return the emboss maskfilter
+ */
+ static sk_sp<SkMaskFilter> MakeEmboss(SkScalar blurSigma, const SkScalar direction[3],
+ SkScalar ambient, SkScalar specular);
+
+#ifdef SK_SUPPORT_LEGACY_MASKFILTER_PTR
+ static SkMaskFilter* Create(SkBlurStyle style, SkScalar sigma, uint32_t flags = kNone_BlurFlag){
+ return Make(style, sigma, flags).release();
+ }
+ static SkMaskFilter* CreateEmboss(SkScalar blurSigma, const SkScalar direction[3],
+ SkScalar ambient, SkScalar specular) {
+ return MakeEmboss(blurSigma, direction, ambient, specular).release();
+ }
+ SK_ATTR_DEPRECATED("use sigma version")
+ static SkMaskFilter* CreateEmboss(const SkScalar direction[3],
+ SkScalar ambient, SkScalar specular,
+ SkScalar blurRadius);
+#endif
+
+ static const int kMaxDivisions = 6;
+
+ // This method computes all the parameters for drawing a partially occluded nine-patched
+ // blurred rrect mask:
+ // rrectToDraw - the integerized rrect to draw in the mask
+ // widthHeight - how large to make the mask (rrectToDraw will be centered in this coord sys)
+ // rectXs, rectYs - the x & y coordinates of the covering geometry lattice
+ // texXs, texYs - the texture coordinate at each point in rectXs & rectYs
+ // numXs, numYs - number of coordinates in the x & y directions
+ // skipMask - bit mask that contains a 1-bit whenever one of the cells is occluded
+ // It returns true if 'devRRect' is nine-patchable
+ static bool ComputeBlurredRRectParams(const SkRRect& srcRRect, const SkRRect& devRRect,
+ const SkRect& occluder,
+ SkScalar sigma, SkScalar xformedSigma,
+ SkRRect* rrectToDraw,
+ SkISize* widthHeight,
+ SkScalar rectXs[kMaxDivisions],
+ SkScalar rectYs[kMaxDivisions],
+ SkScalar texXs[kMaxDivisions],
+ SkScalar texYs[kMaxDivisions],
+ int* numXs, int* numYs, uint32_t* skipMask);
+
+ SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP()
+
+private:
+ SkBlurMaskFilter(); // can't be instantiated
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkColorCubeFilter.h b/gfx/skia/skia/include/effects/SkColorCubeFilter.h
new file mode 100644
index 000000000..fbfe698ba
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkColorCubeFilter.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorCubeFilter_DEFINED
+#define SkColorCubeFilter_DEFINED
+
+#include "SkColorFilter.h"
+#include "SkData.h"
+#include "../private/SkOnce.h"
+#include "../private/SkTemplates.h"
+
+class SK_API SkColorCubeFilter : public SkColorFilter {
+public:
+ /** cubeData must containt a 3D data in the form of cube of the size:
+ * cubeDimension * cubeDimension * cubeDimension * sizeof(SkColor)
+ * This cube contains a transform where (x,y,z) maps to the (r,g,b).
+ * The alpha components of the colors must be 0xFF.
+ */
+ static sk_sp<SkColorFilter> Make(sk_sp<SkData> cubeData, int cubeDimension);
+
+#ifdef SK_SUPPORT_LEGACY_COLORFILTER_PTR
+ static SkColorFilter* Create(SkData* cubeData, int cubeDimension);
+#endif
+
+ void filterSpan(const SkPMColor src[], int count, SkPMColor[]) const override;
+ uint32_t getFlags() const override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(GrContext*) const override;
+#endif
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkColorCubeFilter)
+
+protected:
+ SkColorCubeFilter(sk_sp<SkData> cubeData, int cubeDimension);
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ /** The cache is initialized on-demand when getProcessingLuts is called.
+ */
+ class ColorCubeProcesingCache {
+ public:
+ ColorCubeProcesingCache(int cubeDimension);
+
+ void getProcessingLuts(const int* (*colorToIndex)[2],
+ const SkScalar* (*colorToFactors)[2],
+ const SkScalar** colorToScalar);
+
+ int cubeDimension() const { return fCubeDimension; }
+
+ private:
+ // Working pointers. If any of these is NULL,
+ // we need to recompute the corresponding cache values.
+ int* fColorToIndex[2];
+ SkScalar* fColorToFactors[2];
+ SkScalar* fColorToScalar;
+
+ SkAutoTMalloc<uint8_t> fLutStorage;
+
+ const int fCubeDimension;
+
+ // Make sure we only initialize the caches once.
+ SkOnce fLutsInitOnce;
+
+ static void initProcessingLuts(ColorCubeProcesingCache* cache);
+ };
+
+ sk_sp<SkData> fCubeData;
+ int32_t fUniqueID;
+
+ mutable ColorCubeProcesingCache fCache;
+
+ typedef SkColorFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkColorFilterImageFilter.h b/gfx/skia/skia/include/effects/SkColorFilterImageFilter.h
new file mode 100644
index 000000000..4d438e351
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkColorFilterImageFilter.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorFilterImageFilter_DEFINED
+#define SkColorFilterImageFilter_DEFINED
+
+#include "SkImageFilter.h"
+
+class SkColorFilter;
+
+class SK_API SkColorFilterImageFilter : public SkImageFilter {
+public:
+ static sk_sp<SkImageFilter> Make(sk_sp<SkColorFilter> cf,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect = NULL);
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkColorFilterImageFilter)
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* Create(SkColorFilter* cf,
+ SkImageFilter* input = NULL,
+ const CropRect* cropRect = NULL) {
+ return Make(sk_ref_sp<SkColorFilter>(cf),
+ sk_ref_sp<SkImageFilter>(input),
+ cropRect).release();
+ }
+#endif
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* source, const Context&,
+ SkIPoint* offset) const override;
+ bool onIsColorFilterNode(SkColorFilter**) const override;
+ bool onCanHandleComplexCTM() const override { return true; }
+ bool affectsTransparentBlack() const override;
+
+private:
+ SkColorFilterImageFilter(sk_sp<SkColorFilter> cf,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect);
+
+ sk_sp<SkColorFilter> fColorFilter;
+
+ typedef SkImageFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkColorMatrix.h b/gfx/skia/skia/include/effects/SkColorMatrix.h
new file mode 100644
index 000000000..a38585c12
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkColorMatrix.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorMatrix_DEFINED
+#define SkColorMatrix_DEFINED
+
+#include "SkScalar.h"
+
+class SK_API SkColorMatrix {
+public:
+ enum {
+ kCount = 20
+ };
+ SkScalar fMat[kCount];
+
+ enum Elem {
+ kR_Scale = 0,
+ kG_Scale = 6,
+ kB_Scale = 12,
+ kA_Scale = 18,
+
+ kR_Trans = 4,
+ kG_Trans = 9,
+ kB_Trans = 14,
+ kA_Trans = 19,
+ };
+
+ void setIdentity();
+ void setScale(SkScalar rScale, SkScalar gScale, SkScalar bScale,
+ SkScalar aScale = SK_Scalar1);
+ void preScale(SkScalar rScale, SkScalar gScale, SkScalar bScale,
+ SkScalar aScale = SK_Scalar1);
+ void postScale(SkScalar rScale, SkScalar gScale, SkScalar bScale,
+ SkScalar aScale = SK_Scalar1);
+ void postTranslate(SkScalar rTrans, SkScalar gTrans, SkScalar bTrans,
+ SkScalar aTrans = 0);
+
+ enum Axis {
+ kR_Axis = 0,
+ kG_Axis = 1,
+ kB_Axis = 2
+ };
+ void setRotate(Axis, SkScalar degrees);
+ void setSinCos(Axis, SkScalar sine, SkScalar cosine);
+ void preRotate(Axis, SkScalar degrees);
+ void postRotate(Axis, SkScalar degrees);
+
+ void setConcat(const SkColorMatrix& a, const SkColorMatrix& b);
+ void preConcat(const SkColorMatrix& mat) { this->setConcat(*this, mat); }
+ void postConcat(const SkColorMatrix& mat) { this->setConcat(mat, *this); }
+
+ void setSaturation(SkScalar sat);
+ void setRGB2YUV();
+ void setYUV2RGB();
+
+ bool operator==(const SkColorMatrix& other) const {
+ return 0 == memcmp(fMat, other.fMat, sizeof(fMat));
+ }
+
+ bool operator!=(const SkColorMatrix& other) const { return !((*this) == other); }
+
+ static bool NeedsClamping(const SkScalar[20]);
+ static void SetConcat(SkScalar result[20], const SkScalar outer[20], const SkScalar inner[20]);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkColorMatrixFilter.h b/gfx/skia/skia/include/effects/SkColorMatrixFilter.h
new file mode 100644
index 000000000..6e74bee31
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkColorMatrixFilter.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorMatrixFilter_DEFINED
+#define SkColorMatrixFilter_DEFINED
+
+#include "SkColorFilter.h"
+#include "SkColorMatrix.h"
+
+class SK_API SkColorMatrixFilter : public SkColorFilter {
+public:
+ /**
+ * Create a colorfilter that multiplies the RGB channels by one color, and
+ * then adds a second color, pinning the result for each component to
+ * [0..255]. The alpha components of the mul and add arguments
+ * are ignored.
+ */
+ static sk_sp<SkColorFilter> MakeLightingFilter(SkColor mul, SkColor add);
+
+#ifdef SK_SUPPORT_LEGACY_COLORFILTER_PTR
+ static SkColorFilter* Create(const SkColorMatrix& cm) {
+ return SkColorFilter::MakeMatrixFilterRowMajor255(cm.fMat).release();
+ }
+ static SkColorFilter* Create(const SkScalar array[20]) {
+ return SkColorFilter::MakeMatrixFilterRowMajor255(array).release();
+ }
+ static SkColorFilter* CreateLightingFilter(SkColor mul, SkColor add) {
+ return MakeLightingFilter(mul, add).release();
+ }
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkComposeImageFilter.h b/gfx/skia/skia/include/effects/SkComposeImageFilter.h
new file mode 100644
index 000000000..378b90471
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkComposeImageFilter.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkComposeImageFilter_DEFINED
+#define SkComposeImageFilter_DEFINED
+
+#include "SkImageFilter.h"
+
+class SK_API SkComposeImageFilter : public SkImageFilter {
+public:
+ static sk_sp<SkImageFilter> Make(sk_sp<SkImageFilter> outer, sk_sp<SkImageFilter> inner);
+
+ SkRect computeFastBounds(const SkRect& src) const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkComposeImageFilter)
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* Create(SkImageFilter* outer, SkImageFilter* inner) {
+ return Make(sk_ref_sp<SkImageFilter>(outer),
+ sk_ref_sp<SkImageFilter>(inner)).release();
+ }
+#endif
+
+protected:
+ explicit SkComposeImageFilter(sk_sp<SkImageFilter> inputs[2]) : INHERITED(inputs, 2, nullptr) {
+ SkASSERT(inputs[0].get());
+ SkASSERT(inputs[1].get());
+ }
+ sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* source, const Context&,
+ SkIPoint* offset) const override;
+ SkIRect onFilterBounds(const SkIRect&, const SkMatrix&, MapDirection) const override;
+ bool onCanHandleComplexCTM() const override { return true; }
+
+private:
+ typedef SkImageFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkCornerPathEffect.h b/gfx/skia/skia/include/effects/SkCornerPathEffect.h
new file mode 100644
index 000000000..cf0346353
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkCornerPathEffect.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCornerPathEffect_DEFINED
+#define SkCornerPathEffect_DEFINED
+
+#include "SkPathEffect.h"
+
+/** \class SkCornerPathEffect
+
+ SkCornerPathEffect is a subclass of SkPathEffect that can turn sharp corners
+ into various treatments (e.g. rounded corners)
+*/
+class SK_API SkCornerPathEffect : public SkPathEffect {
+public:
+ /** radius must be > 0 to have an effect. It specifies the distance from each corner
+ that should be "rounded".
+ */
+ static sk_sp<SkPathEffect> Make(SkScalar radius) {
+ return sk_sp<SkPathEffect>(new SkCornerPathEffect(radius));
+ }
+
+#ifdef SK_SUPPORT_LEGACY_PATHEFFECT_PTR
+ static SkPathEffect* Create(SkScalar radius) {
+ return Make(radius).release();
+ }
+#endif
+
+ virtual bool filterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec*, const SkRect*) const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkCornerPathEffect)
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ bool exposedInAndroidJavaAPI() const override { return true; }
+#endif
+
+protected:
+ virtual ~SkCornerPathEffect();
+
+ explicit SkCornerPathEffect(SkScalar radius);
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ SkScalar fRadius;
+
+ typedef SkPathEffect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkDashPathEffect.h b/gfx/skia/skia/include/effects/SkDashPathEffect.h
new file mode 100644
index 000000000..ccb1a4e44
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkDashPathEffect.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDashPathEffect_DEFINED
+#define SkDashPathEffect_DEFINED
+
+#include "SkPathEffect.h"
+
+/** \class SkDashPathEffect
+
+ SkDashPathEffect is a subclass of SkPathEffect that implements dashing
+*/
+class SK_API SkDashPathEffect : public SkPathEffect {
+public:
+ /** intervals: array containing an even number of entries (>=2), with
+ the even indices specifying the length of "on" intervals, and the odd
+ indices specifying the length of "off" intervals.
+ count: number of elements in the intervals array
+ phase: offset into the intervals array (mod the sum of all of the
+ intervals).
+
+ For example: if intervals[] = {10, 20}, count = 2, and phase = 25,
+ this will set up a dashed path like so:
+ 5 pixels off
+ 10 pixels on
+ 20 pixels off
+ 10 pixels on
+ 20 pixels off
+ ...
+ A phase of -5, 25, 55, 85, etc. would all result in the same path,
+ because the sum of all the intervals is 30.
+
+ Note: only affects stroked paths.
+ */
+ static sk_sp<SkPathEffect> Make(const SkScalar intervals[], int count, SkScalar phase);
+
+#ifdef SK_SUPPORT_LEGACY_PATHEFFECT_PTR
+ static SkPathEffect* Create(const SkScalar intervals[], int count, SkScalar phase) {
+ return Make(intervals, count, phase).release();
+ }
+#endif
+
+ virtual bool filterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec*, const SkRect*) const override;
+
+ virtual bool asPoints(PointData* results, const SkPath& src,
+ const SkStrokeRec&, const SkMatrix&,
+ const SkRect*) const override;
+
+ DashType asADash(DashInfo* info) const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkDashPathEffect)
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ bool exposedInAndroidJavaAPI() const override { return true; }
+#endif
+
+protected:
+ virtual ~SkDashPathEffect();
+ SkDashPathEffect(const SkScalar intervals[], int count, SkScalar phase);
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ SkScalar* fIntervals;
+ int32_t fCount;
+ SkScalar fPhase;
+ // computed from phase
+
+ SkScalar fInitialDashLength;
+ int32_t fInitialDashIndex;
+ SkScalar fIntervalLength;
+
+ typedef SkPathEffect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkDiscretePathEffect.h b/gfx/skia/skia/include/effects/SkDiscretePathEffect.h
new file mode 100644
index 000000000..78d4516ee
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkDiscretePathEffect.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDiscretePathEffect_DEFINED
+#define SkDiscretePathEffect_DEFINED
+
+#include "SkPathEffect.h"
+
+/** \class SkDiscretePathEffect
+
+ This path effect chops a path into discrete segments, and randomly displaces them.
+*/
+class SK_API SkDiscretePathEffect : public SkPathEffect {
+public:
+ /** Break the path into segments of segLength length, and randomly move the endpoints
+ away from the original path by a maximum of deviation.
+ Note: works on filled or framed paths
+
+ @param seedAssist This is a caller-supplied seedAssist that modifies
+ the seed value that is used to randomize the path
+ segments' endpoints. If not supplied it defaults to 0,
+ in which case filtering a path multiple times will
+ result in the same set of segments (this is useful for
+ testing). If a caller does not want this behaviour
+ they can pass in a different seedAssist to get a
+ different set of path segments.
+ */
+ static sk_sp<SkPathEffect> Make(SkScalar segLength, SkScalar dev, uint32_t seedAssist = 0);
+
+#ifdef SK_SUPPORT_LEGACY_PATHEFFECT_PTR
+ static SkPathEffect* Create(SkScalar segLength, SkScalar deviation, uint32_t seedAssist = 0) {
+ return Make(segLength, deviation, seedAssist).release();
+ }
+#endif
+
+ virtual bool filterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec*, const SkRect*) const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkDiscretePathEffect)
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ bool exposedInAndroidJavaAPI() const override { return true; }
+#endif
+
+protected:
+ SkDiscretePathEffect(SkScalar segLength,
+ SkScalar deviation,
+ uint32_t seedAssist);
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ SkScalar fSegLength, fPerterb;
+
+ /* Caller-supplied 32 bit seed assist */
+ uint32_t fSeedAssist;
+
+ typedef SkPathEffect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkDisplacementMapEffect.h b/gfx/skia/skia/include/effects/SkDisplacementMapEffect.h
new file mode 100644
index 000000000..f93f2c445
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkDisplacementMapEffect.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDisplacementMapEffect_DEFINED
+#define SkDisplacementMapEffect_DEFINED
+
+#include "SkImageFilter.h"
+
+class SK_API SkDisplacementMapEffect : public SkImageFilter {
+public:
+ enum ChannelSelectorType {
+ kUnknown_ChannelSelectorType,
+ kR_ChannelSelectorType,
+ kG_ChannelSelectorType,
+ kB_ChannelSelectorType,
+ kA_ChannelSelectorType
+ };
+
+ ~SkDisplacementMapEffect() override;
+
+ static sk_sp<SkImageFilter> Make(ChannelSelectorType xChannelSelector,
+ ChannelSelectorType yChannelSelector,
+ SkScalar scale,
+ sk_sp<SkImageFilter> displacement,
+ sk_sp<SkImageFilter> color,
+ const CropRect* cropRect = nullptr);
+
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkDisplacementMapEffect)
+
+ SkRect computeFastBounds(const SkRect& src) const override;
+
+ virtual SkIRect onFilterBounds(const SkIRect& src, const SkMatrix&,
+ MapDirection) const override;
+ SkIRect onFilterNodeBounds(const SkIRect&, const SkMatrix&, MapDirection) const override;
+
+ SK_TO_STRING_OVERRIDE()
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* Create(ChannelSelectorType xChannelSelector,
+ ChannelSelectorType yChannelSelector,
+ SkScalar scale, SkImageFilter* displacement,
+ SkImageFilter* color = nullptr,
+ const CropRect* cropRect = nullptr) {
+ return Make(xChannelSelector, yChannelSelector, scale,
+ sk_ref_sp<SkImageFilter>(displacement),
+ sk_ref_sp<SkImageFilter>(color),
+ cropRect).release();
+ }
+#endif
+
+protected:
+ sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* source, const Context&,
+ SkIPoint* offset) const override;
+
+ SkDisplacementMapEffect(ChannelSelectorType xChannelSelector,
+ ChannelSelectorType yChannelSelector,
+ SkScalar scale, sk_sp<SkImageFilter> inputs[2],
+ const CropRect* cropRect);
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ ChannelSelectorType fXChannelSelector;
+ ChannelSelectorType fYChannelSelector;
+ SkScalar fScale;
+ typedef SkImageFilter INHERITED;
+ const SkImageFilter* getDisplacementInput() const { return getInput(0); }
+ const SkImageFilter* getColorInput() const { return getInput(1); }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkDropShadowImageFilter.h b/gfx/skia/skia/include/effects/SkDropShadowImageFilter.h
new file mode 100644
index 000000000..87e740682
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkDropShadowImageFilter.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDropShadowImageFilter_DEFINED
+#define SkDropShadowImageFilter_DEFINED
+
+#include "SkColor.h"
+#include "SkImageFilter.h"
+#include "SkScalar.h"
+
+class SK_API SkDropShadowImageFilter : public SkImageFilter {
+public:
+ enum ShadowMode {
+ kDrawShadowAndForeground_ShadowMode,
+ kDrawShadowOnly_ShadowMode,
+
+ kLast_ShadowMode = kDrawShadowOnly_ShadowMode
+ };
+
+ static const int kShadowModeCount = kLast_ShadowMode+1;
+
+ static sk_sp<SkImageFilter> Make(SkScalar dx, SkScalar dy, SkScalar sigmaX, SkScalar sigmaY,
+ SkColor color, ShadowMode shadowMode,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect = nullptr);
+
+ SkRect computeFastBounds(const SkRect&) const override;
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkDropShadowImageFilter)
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* Create(SkScalar dx, SkScalar dy, SkScalar sigmaX, SkScalar sigmaY,
+ SkColor color, ShadowMode shadowMode,
+ SkImageFilter* input = nullptr,
+ const CropRect* cropRect = nullptr) {
+ return Make(dx, dy, sigmaX, sigmaY, color, shadowMode,
+ sk_ref_sp<SkImageFilter>(input), cropRect).release();
+ }
+#endif
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* source, const Context&,
+ SkIPoint* offset) const override;
+ SkIRect onFilterNodeBounds(const SkIRect& src, const SkMatrix&, MapDirection) const override;
+
+private:
+ SkDropShadowImageFilter(SkScalar dx, SkScalar dy, SkScalar sigmaX, SkScalar sigmaY, SkColor,
+ ShadowMode shadowMode, sk_sp<SkImageFilter> input,
+ const CropRect* cropRect);
+
+ SkScalar fDx, fDy, fSigmaX, fSigmaY;
+ SkColor fColor;
+ ShadowMode fShadowMode;
+
+ typedef SkImageFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkEmbossMaskFilter.h b/gfx/skia/skia/include/effects/SkEmbossMaskFilter.h
new file mode 100644
index 000000000..8a3428245
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkEmbossMaskFilter.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEmbossMaskFilter_DEFINED
+#define SkEmbossMaskFilter_DEFINED
+
+#include "SkMaskFilter.h"
+
+/** \class SkEmbossMaskFilter
+
+ This mask filter creates a 3D emboss look, by specifying a light and blur amount.
+*/
+class SK_API SkEmbossMaskFilter : public SkMaskFilter {
+public:
+ struct Light {
+ SkScalar fDirection[3]; // x,y,z
+ uint16_t fPad;
+ uint8_t fAmbient;
+ uint8_t fSpecular; // exponent, 4.4 right now
+ };
+
+ static sk_sp<SkMaskFilter> Make(SkScalar blurSigma, const Light& light);
+
+#ifdef SK_SUPPORT_LEGACY_MASKFILTER_PTR
+ static SkMaskFilter* Create(SkScalar blurSigma, const Light& light) {
+ return Make(blurSigma, light).release();
+ }
+#endif
+
+ // overrides from SkMaskFilter
+ // This method is not exported to java.
+ SkMask::Format getFormat() const override;
+ // This method is not exported to java.
+ bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix&,
+ SkIPoint* margin) const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkEmbossMaskFilter)
+
+protected:
+ SkEmbossMaskFilter(SkScalar blurSigma, const Light& light);
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ Light fLight;
+ SkScalar fBlurSigma;
+
+ typedef SkMaskFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkGammaColorFilter.h b/gfx/skia/skia/include/effects/SkGammaColorFilter.h
new file mode 100644
index 000000000..308926a3a
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkGammaColorFilter.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGammaColorFilter_DEFINED
+#define SkGammaColorFilter_DEFINED
+
+#include "SkColorFilter.h"
+#include "SkRefCnt.h"
+
+// This colorfilter can be used to perform pixel-by-pixel conversion between linear and
+// power-law color spaces. A gamma of 2.2 is interpreted to mean convert from sRGB to linear
+// while a gamma of 1/2.2 is interpreted to mean convert from linear to sRGB. Any other
+// values are just directly applied (i.e., out = in^gamma)
+//
+// More complicated color space mapping (i.e., ICC profiles) should be handled via the
+// SkColorSpace object.
+class SK_API SkGammaColorFilter : public SkColorFilter {
+public:
+ static sk_sp<SkColorFilter> Make(SkScalar gamma);
+
+#ifdef SK_SUPPORT_LEGACY_COLORFILTER_PTR
+ static SkColorFilter* Create(SkScalar gamma) { return Make(gamma).release(); }
+#endif
+
+ void filterSpan(const SkPMColor src[], int count, SkPMColor[]) const override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(GrContext*) const override;
+#endif
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkLumaColorFilter)
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ SkGammaColorFilter(SkScalar gamma);
+
+ SkScalar fGamma;
+ typedef SkColorFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkGaussianEdgeShader.h b/gfx/skia/skia/include/effects/SkGaussianEdgeShader.h
new file mode 100644
index 000000000..ef54ece56
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkGaussianEdgeShader.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGaussianEdgeShader_DEFINED
+#define SkGaussianEdgeShader_DEFINED
+
+#include "SkShader.h"
+
+class SK_API SkGaussianEdgeShader {
+public:
+ /** Returns a shader that applies a Gaussian blur depending on distance to the edge
+ * Currently this is only useable with Circle and RRect shapes on the GPU backend.
+ * Raster will draw nothing.
+ */
+ static sk_sp<SkShader> Make();
+
+ SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP()
+
+private:
+ SkGaussianEdgeShader(); // can't be instantiated
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkGradientShader.h b/gfx/skia/skia/include/effects/SkGradientShader.h
new file mode 100644
index 000000000..2fcce75c3
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkGradientShader.h
@@ -0,0 +1,258 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGradientShader_DEFINED
+#define SkGradientShader_DEFINED
+
+#include "SkShader.h"
+
+/** \class SkGradientShader
+
+ SkGradientShader hosts factories for creating subclasses of SkShader that
+ render linear and radial gradients.
+*/
+class SK_API SkGradientShader {
+public:
+ enum Flags {
+ /** By default gradients will interpolate their colors in unpremul space
+ * and then premultiply each of the results. By setting this flag, the
+ * gradients will premultiply their colors first, and then interpolate
+ * between them.
+ */
+ kInterpolateColorsInPremul_Flag = 1 << 0,
+ };
+
+ /** Returns a shader that generates a linear gradient between the two specified points.
+ <p />
+ @param pts The start and end points for the gradient.
+ @param colors The array[count] of colors, to be distributed between the two points
+ @param pos May be NULL. array[count] of SkScalars, or NULL, of the relative position of
+ each corresponding color in the colors array. If this is NULL,
+ the the colors are distributed evenly between the start and end point.
+ If this is not null, the values must begin with 0, end with 1.0, and
+ intermediate values must be strictly increasing.
+ @param count Must be >=2. The number of colors (and pos if not NULL) entries.
+ @param mode The tiling mode
+ */
+ static sk_sp<SkShader> MakeLinear(const SkPoint pts[2],
+ const SkColor colors[], const SkScalar pos[], int count,
+ SkShader::TileMode mode,
+ uint32_t flags, const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeLinear(const SkPoint pts[2],
+ const SkColor colors[], const SkScalar pos[], int count,
+ SkShader::TileMode mode) {
+ return MakeLinear(pts, colors, pos, count, mode, 0, NULL);
+ }
+
+ /** Returns a shader that generates a linear gradient between the two specified points.
+ <p />
+ @param pts The start and end points for the gradient.
+ @param colors The array[count] of colors, to be distributed between the two points
+ @param pos May be NULL. array[count] of SkScalars, or NULL, of the relative position of
+ each corresponding color in the colors array. If this is NULL,
+ the the colors are distributed evenly between the start and end point.
+ If this is not null, the values must begin with 0, end with 1.0, and
+ intermediate values must be strictly increasing.
+ @param count Must be >=2. The number of colors (and pos if not NULL) entries.
+ @param mode The tiling mode
+ */
+ static sk_sp<SkShader> MakeLinear(const SkPoint pts[2],
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int count, SkShader::TileMode mode,
+ uint32_t flags, const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeLinear(const SkPoint pts[2],
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int count, SkShader::TileMode mode) {
+ return MakeLinear(pts, colors, std::move(colorSpace), pos, count, mode, 0, NULL);
+ }
+
+ /** Returns a shader that generates a radial gradient given the center and radius.
+ <p />
+ @param center The center of the circle for this gradient
+ @param radius Must be positive. The radius of the circle for this gradient
+ @param colors The array[count] of colors, to be distributed between the center and edge of the circle
+ @param pos May be NULL. The array[count] of SkScalars, or NULL, of the relative position of
+ each corresponding color in the colors array. If this is NULL,
+ the the colors are distributed evenly between the center and edge of the circle.
+ If this is not null, the values must begin with 0, end with 1.0, and
+ intermediate values must be strictly increasing.
+ @param count Must be >= 2. The number of colors (and pos if not NULL) entries
+ @param mode The tiling mode
+ */
+ static sk_sp<SkShader> MakeRadial(const SkPoint& center, SkScalar radius,
+ const SkColor colors[], const SkScalar pos[], int count,
+ SkShader::TileMode mode,
+ uint32_t flags, const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeRadial(const SkPoint& center, SkScalar radius,
+ const SkColor colors[], const SkScalar pos[], int count,
+ SkShader::TileMode mode) {
+ return MakeRadial(center, radius, colors, pos, count, mode, 0, NULL);
+ }
+
+ /** Returns a shader that generates a radial gradient given the center and radius.
+ <p />
+ @param center The center of the circle for this gradient
+ @param radius Must be positive. The radius of the circle for this gradient
+ @param colors The array[count] of colors, to be distributed between the center and edge of the circle
+ @param pos May be NULL. The array[count] of SkScalars, or NULL, of the relative position of
+ each corresponding color in the colors array. If this is NULL,
+ the the colors are distributed evenly between the center and edge of the circle.
+ If this is not null, the values must begin with 0, end with 1.0, and
+ intermediate values must be strictly increasing.
+ @param count Must be >= 2. The number of colors (and pos if not NULL) entries
+ @param mode The tiling mode
+ */
+ static sk_sp<SkShader> MakeRadial(const SkPoint& center, SkScalar radius,
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int count, SkShader::TileMode mode,
+ uint32_t flags, const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeRadial(const SkPoint& center, SkScalar radius,
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int count, SkShader::TileMode mode) {
+ return MakeRadial(center, radius, colors, std::move(colorSpace), pos, count, mode, 0, NULL);
+ }
+
+ /**
+ * Returns a shader that generates a conical gradient given two circles, or
+ * returns NULL if the inputs are invalid. The gradient interprets the
+ * two circles according to the following HTML spec.
+ * http://dev.w3.org/html5/2dcontext/#dom-context-2d-createradialgradient
+ */
+ static sk_sp<SkShader> MakeTwoPointConical(const SkPoint& start, SkScalar startRadius,
+ const SkPoint& end, SkScalar endRadius,
+ const SkColor colors[], const SkScalar pos[],
+ int count, SkShader::TileMode mode,
+ uint32_t flags, const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeTwoPointConical(const SkPoint& start, SkScalar startRadius,
+ const SkPoint& end, SkScalar endRadius,
+ const SkColor colors[], const SkScalar pos[],
+ int count, SkShader::TileMode mode) {
+ return MakeTwoPointConical(start, startRadius, end, endRadius, colors, pos, count, mode,
+ 0, NULL);
+ }
+
+ /**
+ * Returns a shader that generates a conical gradient given two circles, or
+ * returns NULL if the inputs are invalid. The gradient interprets the
+ * two circles according to the following HTML spec.
+ * http://dev.w3.org/html5/2dcontext/#dom-context-2d-createradialgradient
+ */
+ static sk_sp<SkShader> MakeTwoPointConical(const SkPoint& start, SkScalar startRadius,
+ const SkPoint& end, SkScalar endRadius,
+ const SkColor4f colors[],
+ sk_sp<SkColorSpace> colorSpace, const SkScalar pos[],
+ int count, SkShader::TileMode mode,
+ uint32_t flags, const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeTwoPointConical(const SkPoint& start, SkScalar startRadius,
+ const SkPoint& end, SkScalar endRadius,
+ const SkColor4f colors[],
+ sk_sp<SkColorSpace> colorSpace, const SkScalar pos[],
+ int count, SkShader::TileMode mode) {
+ return MakeTwoPointConical(start, startRadius, end, endRadius, colors,
+ std::move(colorSpace), pos, count, mode, 0, NULL);
+ }
+
+ /** Returns a shader that generates a sweep gradient given a center.
+ <p />
+ @param cx The X coordinate of the center of the sweep
+ @param cx The Y coordinate of the center of the sweep
+ @param colors The array[count] of colors, to be distributed around the center.
+ @param pos May be NULL. The array[count] of SkScalars, or NULL, of the relative position of
+ each corresponding color in the colors array. If this is NULL,
+ the the colors are distributed evenly between the center and edge of the circle.
+ If this is not null, the values must begin with 0, end with 1.0, and
+ intermediate values must be strictly increasing.
+ @param count Must be >= 2. The number of colors (and pos if not NULL) entries
+ */
+ static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor colors[], const SkScalar pos[], int count,
+ uint32_t flags, const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor colors[], const SkScalar pos[], int count) {
+ return MakeSweep(cx, cy, colors, pos, count, 0, NULL);
+ }
+
+ /** Returns a shader that generates a sweep gradient given a center.
+ <p />
+ @param cx The X coordinate of the center of the sweep
+ @param cx The Y coordinate of the center of the sweep
+ @param colors The array[count] of colors, to be distributed around the center.
+ @param pos May be NULL. The array[count] of SkScalars, or NULL, of the relative position of
+ each corresponding color in the colors array. If this is NULL,
+ the the colors are distributed evenly between the center and edge of the circle.
+ If this is not null, the values must begin with 0, end with 1.0, and
+ intermediate values must be strictly increasing.
+ @param count Must be >= 2. The number of colors (and pos if not NULL) entries
+ */
+ static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int count,
+ uint32_t flags, const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int count) {
+ return MakeSweep(cx, cy, colors, std::move(colorSpace), pos, count, 0, NULL);
+ }
+
+#ifdef SK_SUPPORT_LEGACY_CREATESHADER_PTR
+ static SkShader* CreateLinear(const SkPoint pts[2],
+ const SkColor colors[], const SkScalar pos[], int count,
+ SkShader::TileMode mode,
+ uint32_t flags, const SkMatrix* localMatrix) {
+ return MakeLinear(pts, colors, pos, count, mode, flags, localMatrix).release();
+ }
+ static SkShader* CreateLinear(const SkPoint pts[2],
+ const SkColor colors[], const SkScalar pos[], int count,
+ SkShader::TileMode mode) {
+ return CreateLinear(pts, colors, pos, count, mode, 0, NULL);
+ }
+
+ static SkShader* CreateRadial(const SkPoint& center, SkScalar radius,
+ const SkColor colors[], const SkScalar pos[], int count,
+ SkShader::TileMode mode,
+ uint32_t flags, const SkMatrix* localMatrix) {
+ return MakeRadial(center, radius, colors, pos, count, mode, flags, localMatrix).release();
+ }
+
+ static SkShader* CreateRadial(const SkPoint& center, SkScalar radius,
+ const SkColor colors[], const SkScalar pos[], int count,
+ SkShader::TileMode mode) {
+ return CreateRadial(center, radius, colors, pos, count, mode, 0, NULL);
+ }
+
+ static SkShader* CreateTwoPointConical(const SkPoint& start, SkScalar startRadius,
+ const SkPoint& end, SkScalar endRadius,
+ const SkColor colors[], const SkScalar pos[], int count,
+ SkShader::TileMode mode,
+ uint32_t flags, const SkMatrix* localMatrix) {
+ return MakeTwoPointConical(start, startRadius, end, endRadius, colors, pos, count, mode,
+ flags, localMatrix).release();
+ }
+ static SkShader* CreateTwoPointConical(const SkPoint& start, SkScalar startRadius,
+ const SkPoint& end, SkScalar endRadius,
+ const SkColor colors[], const SkScalar pos[], int count,
+ SkShader::TileMode mode) {
+ return CreateTwoPointConical(start, startRadius, end, endRadius, colors, pos, count, mode,
+ 0, NULL);
+ }
+
+ static SkShader* CreateSweep(SkScalar cx, SkScalar cy,
+ const SkColor colors[], const SkScalar pos[], int count,
+ uint32_t flags, const SkMatrix* localMatrix) {
+ return MakeSweep(cx, cy, colors, pos, count, flags, localMatrix).release();
+ }
+ static SkShader* CreateSweep(SkScalar cx, SkScalar cy,
+ const SkColor colors[], const SkScalar pos[], int count) {
+ return CreateSweep(cx, cy, colors, pos, count, 0, NULL);
+ }
+#endif
+
+
+ SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP()
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkImageSource.h b/gfx/skia/skia/include/effects/SkImageSource.h
new file mode 100644
index 000000000..4ceff9566
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkImageSource.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageSource_DEFINED
+#define SkImageSource_DEFINED
+
+#include "SkImage.h"
+#include "SkImageFilter.h"
+
+class SK_API SkImageSource : public SkImageFilter {
+public:
+ static sk_sp<SkImageFilter> Make(sk_sp<SkImage> image);
+ static sk_sp<SkImageFilter> Make(sk_sp<SkImage> image,
+ const SkRect& srcRect,
+ const SkRect& dstRect,
+ SkFilterQuality filterQuality);
+
+ SkRect computeFastBounds(const SkRect& src) const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkImageSource)
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* Create(SkImage* image) {
+ return Make(sk_ref_sp<SkImage>(image)).release();
+ }
+ static SkImageFilter* Create(SkImage* image,
+ const SkRect& srcRect,
+ const SkRect& dstRect,
+ SkFilterQuality filterQuality) {
+ return Make(sk_ref_sp<SkImage>(image), srcRect, dstRect, filterQuality).release();
+ }
+#endif
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* source, const Context&,
+ SkIPoint* offset) const override;
+
+private:
+ explicit SkImageSource(sk_sp<SkImage>);
+ SkImageSource(sk_sp<SkImage>,
+ const SkRect& srcRect,
+ const SkRect& dstRect,
+ SkFilterQuality);
+
+ sk_sp<SkImage> fImage;
+ SkRect fSrcRect, fDstRect;
+ SkFilterQuality fFilterQuality;
+
+ typedef SkImageFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkLayerDrawLooper.h b/gfx/skia/skia/include/effects/SkLayerDrawLooper.h
new file mode 100644
index 000000000..186d44a65
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkLayerDrawLooper.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLayerDrawLooper_DEFINED
+#define SkLayerDrawLooper_DEFINED
+
+#include "SkDrawLooper.h"
+#include "SkPaint.h"
+#include "SkPoint.h"
+#include "SkXfermode.h"
+
+class SK_API SkLayerDrawLooper : public SkDrawLooper {
+public:
+ virtual ~SkLayerDrawLooper();
+
+ /**
+ * Bits specifies which aspects of the layer's paint should replace the
+ * corresponding aspects on the draw's paint.
+ * kEntirePaint_Bits means use the layer's paint completely.
+ * 0 means ignore the layer's paint... except for fColorMode, which is
+ * always applied.
+ */
+ enum Bits {
+ kStyle_Bit = 1 << 0, //!< use this layer's Style/stroke settings
+ kTextSkewX_Bit = 1 << 1, //!< use this layer's textskewx
+ kPathEffect_Bit = 1 << 2, //!< use this layer's patheffect
+ kMaskFilter_Bit = 1 << 3, //!< use this layer's maskfilter
+ kShader_Bit = 1 << 4, //!< use this layer's shader
+ kColorFilter_Bit = 1 << 5, //!< use this layer's colorfilter
+ kXfermode_Bit = 1 << 6, //!< use this layer's xfermode
+
+ /**
+ * Use the layer's paint entirely, with these exceptions:
+ * - We never override the draw's paint's text_encoding, since that is
+ * used to interpret the text/len parameters in draw[Pos]Text.
+ * - Color is always computed using the LayerInfo's fColorMode.
+ */
+ kEntirePaint_Bits = -1
+
+ };
+ typedef int32_t BitFlags;
+
+ /**
+ * Info for how to apply the layer's paint and offset.
+ *
+ * fColorMode controls how we compute the final color for the layer:
+ * The layer's paint's color is treated as the SRC
+ * The draw's paint's color is treated as the DST
+ * final-color = Mode(layers-color, draws-color);
+ * Any SkXfermode::Mode will work. Two common choices are:
+ * kSrc_Mode: to use the layer's color, ignoring the draw's
+ * kDst_Mode: to just keep the draw's color, ignoring the layer's
+ */
+ struct SK_API LayerInfo {
+ BitFlags fPaintBits;
+ SkXfermode::Mode fColorMode;
+ SkVector fOffset;
+ bool fPostTranslate; //!< applies to fOffset
+
+ /**
+ * Initial the LayerInfo. Defaults to settings that will draw the
+ * layer with no changes: e.g.
+ * fPaintBits == 0
+ * fColorMode == kDst_Mode
+ * fOffset == (0, 0)
+ */
+ LayerInfo();
+ };
+
+ SkDrawLooper::Context* createContext(SkCanvas*, void* storage) const override;
+
+ size_t contextSize() const override { return sizeof(LayerDrawLooperContext); }
+
+ bool asABlurShadow(BlurShadowRec* rec) const override;
+
+ SK_TO_STRING_OVERRIDE()
+
+ Factory getFactory() const override { return CreateProc; }
+ static sk_sp<SkFlattenable> CreateProc(SkReadBuffer& buffer);
+
+protected:
+ SkLayerDrawLooper();
+
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ struct Rec {
+ Rec* fNext;
+ SkPaint fPaint;
+ LayerInfo fInfo;
+ };
+ Rec* fRecs;
+ int fCount;
+
+ // state-machine during the init/next cycle
+ class LayerDrawLooperContext : public SkDrawLooper::Context {
+ public:
+ explicit LayerDrawLooperContext(const SkLayerDrawLooper* looper);
+
+ protected:
+ bool next(SkCanvas*, SkPaint* paint) override;
+
+ private:
+ Rec* fCurrRec;
+
+ static void ApplyInfo(SkPaint* dst, const SkPaint& src, const LayerInfo&);
+ };
+
+ typedef SkDrawLooper INHERITED;
+
+public:
+ class SK_API Builder {
+ public:
+ Builder();
+ ~Builder();
+
+ /**
+ * Call for each layer you want to add (from top to bottom).
+ * This returns a paint you can modify, but that ptr is only valid until
+ * the next call made to addLayer().
+ */
+ SkPaint* addLayer(const LayerInfo&);
+
+ /**
+ * This layer will draw with the original paint, at the specified offset
+ */
+ void addLayer(SkScalar dx, SkScalar dy);
+
+ /**
+ * This layer will with the original paint and no offset.
+ */
+ void addLayer() { this->addLayer(0, 0); }
+
+ /// Similar to addLayer, but adds a layer to the top.
+ SkPaint* addLayerOnTop(const LayerInfo&);
+
+ /**
+ * Pass list of layers on to newly built looper and return it. This will
+ * also reset the builder, so it can be used to build another looper.
+ */
+ sk_sp<SkDrawLooper> detach();
+#ifdef SK_SUPPORT_LEGACY_MINOR_EFFECT_PTR
+ SkLayerDrawLooper* detachLooper() {
+ return (SkLayerDrawLooper*)this->detach().release();
+ }
+#endif
+
+ private:
+ Rec* fRecs;
+ Rec* fTopRec;
+ int fCount;
+ };
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkLayerRasterizer.h b/gfx/skia/skia/include/effects/SkLayerRasterizer.h
new file mode 100644
index 000000000..9ddcd4e6c
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkLayerRasterizer.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLayerRasterizer_DEFINED
+#define SkLayerRasterizer_DEFINED
+
+#include "SkRasterizer.h"
+#include "SkDeque.h"
+#include "SkScalar.h"
+
+class SkPaint;
+
+class SK_API SkLayerRasterizer : public SkRasterizer {
+public:
+ virtual ~SkLayerRasterizer();
+
+ class SK_API Builder {
+ public:
+ Builder();
+ ~Builder();
+
+ void addLayer(const SkPaint& paint) {
+ this->addLayer(paint, 0, 0);
+ }
+
+ /**
+ * Add a new layer (above any previous layers) to the rasterizer.
+ * The layer will extract those fields that affect the mask from
+ * the specified paint, but will not retain a reference to the paint
+ * object itself, so it may be reused without danger of side-effects.
+ */
+ void addLayer(const SkPaint& paint, SkScalar dx, SkScalar dy);
+
+ /**
+ * Pass queue of layers on to newly created layer rasterizer and return it. The builder
+ * *cannot* be used any more after calling this function. If no layers have been added,
+ * returns NULL.
+ *
+ * The caller is responsible for calling unref() on the returned object, if non NULL.
+ */
+ sk_sp<SkLayerRasterizer> detach();
+
+ /**
+ * Create and return a new immutable SkLayerRasterizer that contains a shapshot of the
+ * layers that were added to the Builder, without modifying the Builder. The Builder
+ * *may* be used after calling this function. It will continue to hold any layers
+ * previously added, so consecutive calls to this function will return identical objects,
+ * and objects returned by future calls to this function contain all the layers in
+ * previously returned objects. If no layers have been added, returns NULL.
+ *
+ * Future calls to addLayer will not affect rasterizers previously returned by this call.
+ *
+ * The caller is responsible for calling unref() on the returned object, if non NULL.
+ */
+ sk_sp<SkLayerRasterizer> snapshot() const;
+
+#ifdef SK_SUPPORT_LEGACY_MINOR_EFFECT_PTR
+ SkLayerRasterizer* detachRasterizer() {
+ return this->detach().release();
+ }
+ SkLayerRasterizer* snapshotRasterizer() const {
+ return this->snapshot().release();
+ }
+#endif
+
+ private:
+ SkDeque* fLayers;
+ };
+
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkLayerRasterizer)
+
+protected:
+ SkLayerRasterizer();
+ SkLayerRasterizer(SkDeque* layers);
+ void flatten(SkWriteBuffer&) const override;
+
+ // override from SkRasterizer
+ virtual bool onRasterize(const SkPath& path, const SkMatrix& matrix,
+ const SkIRect* clipBounds,
+ SkMask* mask, SkMask::CreateMode mode) const override;
+
+private:
+ const SkDeque* const fLayers;
+
+ static SkDeque* ReadLayers(SkReadBuffer& buffer);
+
+ friend class LayerRasterizerTester;
+
+ typedef SkRasterizer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkLightingImageFilter.h b/gfx/skia/skia/include/effects/SkLightingImageFilter.h
new file mode 100644
index 000000000..4d4785da2
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkLightingImageFilter.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLightingImageFilter_DEFINED
+#define SkLightingImageFilter_DEFINED
+
+#include "SkImageFilter.h"
+#include "SkColor.h"
+
+
+class SkImageFilterLight;
+struct SkPoint3;
+
+class SK_API SkLightingImageFilter : public SkImageFilter {
+public:
+ static sk_sp<SkImageFilter> MakeDistantLitDiffuse(const SkPoint3& direction,
+ SkColor lightColor, SkScalar surfaceScale, SkScalar kd,
+ sk_sp<SkImageFilter> input, const CropRect* cropRect = nullptr);
+ static sk_sp<SkImageFilter> MakePointLitDiffuse(const SkPoint3& location,
+ SkColor lightColor, SkScalar surfaceScale, SkScalar kd,
+ sk_sp<SkImageFilter> input, const CropRect* cropRect = nullptr);
+ static sk_sp<SkImageFilter> MakeSpotLitDiffuse(const SkPoint3& location,
+ const SkPoint3& target, SkScalar specularExponent, SkScalar cutoffAngle,
+ SkColor lightColor, SkScalar surfaceScale, SkScalar kd,
+ sk_sp<SkImageFilter> input, const CropRect* cropRect = nullptr);
+ static sk_sp<SkImageFilter> MakeDistantLitSpecular(const SkPoint3& direction,
+ SkColor lightColor, SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess, sk_sp<SkImageFilter> input, const CropRect* cropRect = nullptr);
+ static sk_sp<SkImageFilter> MakePointLitSpecular(const SkPoint3& location,
+ SkColor lightColor, SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess, sk_sp<SkImageFilter> input, const CropRect* cropRect = nullptr);
+ static sk_sp<SkImageFilter> MakeSpotLitSpecular(const SkPoint3& location,
+ const SkPoint3& target, SkScalar specularExponent, SkScalar cutoffAngle,
+ SkColor lightColor, SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess, sk_sp<SkImageFilter> input, const CropRect* cropRect = nullptr);
+ ~SkLightingImageFilter() override;
+
+ SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP()
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* CreateDistantLitDiffuse(const SkPoint3& direction,
+ SkColor lightColor, SkScalar surfaceScale, SkScalar kd,
+ SkImageFilter* input = NULL, const CropRect* cropRect = NULL) {
+ return MakeDistantLitDiffuse(direction, lightColor, surfaceScale, kd,
+ sk_ref_sp<SkImageFilter>(input), cropRect).release();
+ }
+ static SkImageFilter* CreatePointLitDiffuse(const SkPoint3& location,
+ SkColor lightColor, SkScalar surfaceScale, SkScalar kd,
+ SkImageFilter* input = NULL, const CropRect* cropRect = NULL) {
+ return MakePointLitDiffuse(location, lightColor, surfaceScale, kd,
+ sk_ref_sp<SkImageFilter>(input), cropRect).release();
+ }
+ static SkImageFilter* CreateSpotLitDiffuse(const SkPoint3& location,
+ const SkPoint3& target, SkScalar specularExponent, SkScalar cutoffAngle,
+ SkColor lightColor, SkScalar surfaceScale, SkScalar kd,
+ SkImageFilter* input = NULL, const CropRect* cropRect = NULL) {
+ return MakeSpotLitDiffuse(location, target, specularExponent, cutoffAngle,
+ lightColor, surfaceScale, kd,
+ sk_ref_sp<SkImageFilter>(input), cropRect).release();
+ }
+ static SkImageFilter* CreateDistantLitSpecular(const SkPoint3& direction,
+ SkColor lightColor, SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess, SkImageFilter* input = NULL, const CropRect* cropRect = NULL) {
+ return MakeDistantLitSpecular(direction, lightColor, surfaceScale, ks, shininess,
+ sk_ref_sp<SkImageFilter>(input), cropRect).release();
+ }
+ static SkImageFilter* CreatePointLitSpecular(const SkPoint3& location,
+ SkColor lightColor, SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess, SkImageFilter* input = NULL, const CropRect* cropRect = NULL) {
+ return MakePointLitSpecular(location, lightColor, surfaceScale, ks, shininess,
+ sk_ref_sp<SkImageFilter>(input), cropRect).release();
+ }
+ static SkImageFilter* CreateSpotLitSpecular(const SkPoint3& location,
+ const SkPoint3& target, SkScalar specularExponent, SkScalar cutoffAngle,
+ SkColor lightColor, SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess, SkImageFilter* input = NULL, const CropRect* cropRect = NULL) {
+ return MakeSpotLitSpecular(location, target, specularExponent, cutoffAngle,
+ lightColor, surfaceScale, ks, shininess,
+ sk_ref_sp<SkImageFilter>(input), cropRect).release();
+ }
+#endif
+
+protected:
+ SkLightingImageFilter(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect);
+ void flatten(SkWriteBuffer&) const override;
+ const SkImageFilterLight* light() const { return fLight.get(); }
+ SkScalar surfaceScale() const { return fSurfaceScale; }
+ bool affectsTransparentBlack() const override { return true; }
+
+private:
+ sk_sp<SkImageFilterLight> fLight;
+ SkScalar fSurfaceScale;
+
+ typedef SkImageFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkLumaColorFilter.h b/gfx/skia/skia/include/effects/SkLumaColorFilter.h
new file mode 100644
index 000000000..1ffaa733b
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkLumaColorFilter.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLumaColorFilter_DEFINED
+#define SkLumaColorFilter_DEFINED
+
+#include "SkColorFilter.h"
+#include "SkRefCnt.h"
+
+/**
+ * Luminance-to-alpha color filter, as defined in
+ * http://www.w3.org/TR/SVG/masking.html#Masking
+ * http://www.w3.org/TR/css-masking/#MaskValues
+ *
+ * The resulting color is black with transparency equal to the
+ * luminance value modulated by alpha:
+ *
+ * C' = [ Lum * a, 0, 0, 0 ]
+ *
+ */
+class SK_API SkLumaColorFilter : public SkColorFilter {
+public:
+ static sk_sp<SkColorFilter> Make();
+
+#ifdef SK_SUPPORT_LEGACY_COLORFILTER_PTR
+ static SkColorFilter* Create() { return Make().release(); }
+#endif
+
+ void filterSpan(const SkPMColor src[], int count, SkPMColor[]) const override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(GrContext*) const override;
+#endif
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkLumaColorFilter)
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ SkLumaColorFilter();
+
+ typedef SkColorFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkMagnifierImageFilter.h b/gfx/skia/skia/include/effects/SkMagnifierImageFilter.h
new file mode 100644
index 000000000..6e20297ff
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkMagnifierImageFilter.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkMagnifierImageFilter_DEFINED
+#define SkMagnifierImageFilter_DEFINED
+
+#include "SkRect.h"
+#include "SkImageFilter.h"
+
+class SK_API SkMagnifierImageFilter : public SkImageFilter {
+public:
+ static sk_sp<SkImageFilter> Make(const SkRect& src, SkScalar inset,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect = nullptr);
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkMagnifierImageFilter)
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* Create(const SkRect& src, SkScalar inset,
+ SkImageFilter* input = nullptr) {
+ return Make(src, inset, sk_ref_sp<SkImageFilter>(input)).release();
+ }
+#endif
+
+protected:
+ SkMagnifierImageFilter(const SkRect& srcRect,
+ SkScalar inset,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect);
+ void flatten(SkWriteBuffer&) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* source, const Context&,
+ SkIPoint* offset) const override;
+
+private:
+ SkRect fSrcRect;
+ SkScalar fInset;
+ typedef SkImageFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkMatrixConvolutionImageFilter.h b/gfx/skia/skia/include/effects/SkMatrixConvolutionImageFilter.h
new file mode 100644
index 000000000..9a45486d8
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkMatrixConvolutionImageFilter.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMatrixConvolutionImageFilter_DEFINED
+#define SkMatrixConvolutionImageFilter_DEFINED
+
+#include "SkImageFilter.h"
+#include "SkScalar.h"
+#include "SkSize.h"
+#include "SkPoint.h"
+
+class SkBitmap;
+
+/*! \class SkMatrixConvolutionImageFilter
+ Matrix convolution image filter. This filter applies an NxM image
+ processing kernel to a given input image. This can be used to produce
+ effects such as sharpening, blurring, edge detection, etc.
+ */
+
+class SK_API SkMatrixConvolutionImageFilter : public SkImageFilter {
+public:
+ /*! \enum TileMode */
+ enum TileMode {
+ kClamp_TileMode = 0, /*!< Clamp to the image's edge pixels. */
+ kRepeat_TileMode, /*!< Wrap around to the image's opposite edge. */
+ kClampToBlack_TileMode, /*!< Fill with transparent black. */
+ kMax_TileMode = kClampToBlack_TileMode
+ };
+
+ ~SkMatrixConvolutionImageFilter() override;
+
+ /** Construct a matrix convolution image filter.
+ @param kernelSize The kernel size in pixels, in each dimension (N by M).
+ @param kernel The image processing kernel. Must contain N * M
+ elements, in row order.
+ @param gain A scale factor applied to each pixel after
+ convolution. This can be used to normalize the
+ kernel, if it does not sum to 1.
+ @param bias A bias factor added to each pixel after convolution.
+ @param kernelOffset An offset applied to each pixel coordinate before
+ convolution. This can be used to center the kernel
+ over the image (e.g., a 3x3 kernel should have an
+ offset of {1, 1}).
+ @param tileMode How accesses outside the image are treated. (@see
+ TileMode).
+ @param convolveAlpha If true, all channels are convolved. If false,
+ only the RGB channels are convolved, and
+ alpha is copied from the source image.
+ @param input The input image filter. If NULL, the src bitmap
+ passed to filterImage() is used instead.
+ @param cropRect The rectangle to which the output processing will be limited.
+ */
+ static sk_sp<SkImageFilter> Make(const SkISize& kernelSize,
+ const SkScalar* kernel,
+ SkScalar gain,
+ SkScalar bias,
+ const SkIPoint& kernelOffset,
+ TileMode tileMode,
+ bool convolveAlpha,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect = nullptr);
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkMatrixConvolutionImageFilter)
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* Create(const SkISize& kernelSize,
+ const SkScalar* kernel,
+ SkScalar gain,
+ SkScalar bias,
+ const SkIPoint& kernelOffset,
+ TileMode tileMode,
+ bool convolveAlpha,
+ SkImageFilter* input = NULL,
+ const CropRect* cropRect = NULL) {
+ return Make(kernelSize, kernel, gain, bias, kernelOffset, tileMode, convolveAlpha,
+ sk_ref_sp<SkImageFilter>(input), cropRect).release();
+ }
+#endif
+
+protected:
+ SkMatrixConvolutionImageFilter(const SkISize& kernelSize,
+ const SkScalar* kernel,
+ SkScalar gain,
+ SkScalar bias,
+ const SkIPoint& kernelOffset,
+ TileMode tileMode,
+ bool convolveAlpha,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect);
+ void flatten(SkWriteBuffer&) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* source, const Context&,
+ SkIPoint* offset) const override;
+ SkIRect onFilterNodeBounds(const SkIRect&, const SkMatrix&, MapDirection) const override;
+ bool affectsTransparentBlack() const override;
+
+private:
+ SkISize fKernelSize;
+ SkScalar* fKernel;
+ SkScalar fGain;
+ SkScalar fBias;
+ SkIPoint fKernelOffset;
+ TileMode fTileMode;
+ bool fConvolveAlpha;
+
+ template <class PixelFetcher, bool convolveAlpha>
+ void filterPixels(const SkBitmap& src,
+ SkBitmap* result,
+ const SkIRect& rect,
+ const SkIRect& bounds) const;
+ template <class PixelFetcher>
+ void filterPixels(const SkBitmap& src,
+ SkBitmap* result,
+ const SkIRect& rect,
+ const SkIRect& bounds) const;
+ void filterInteriorPixels(const SkBitmap& src,
+ SkBitmap* result,
+ const SkIRect& rect,
+ const SkIRect& bounds) const;
+ void filterBorderPixels(const SkBitmap& src,
+ SkBitmap* result,
+ const SkIRect& rect,
+ const SkIRect& bounds) const;
+
+ typedef SkImageFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkMergeImageFilter.h b/gfx/skia/skia/include/effects/SkMergeImageFilter.h
new file mode 100644
index 000000000..20620d6d7
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkMergeImageFilter.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMergeImageFilter_DEFINED
+#define SkMergeImageFilter_DEFINED
+
+#include "SkImageFilter.h"
+
+#include "SkXfermode.h"
+
+class SK_API SkMergeImageFilter : public SkImageFilter {
+public:
+ ~SkMergeImageFilter() override;
+
+ static sk_sp<SkImageFilter> Make(sk_sp<SkImageFilter> first, sk_sp<SkImageFilter> second,
+ SkXfermode::Mode mode = SkXfermode::kSrcOver_Mode,
+ const CropRect* cropRect = nullptr);
+ static sk_sp<SkImageFilter> Make(sk_sp<SkImageFilter> filters[],
+ int count,
+ const SkXfermode::Mode modes[] = nullptr,
+ const CropRect* cropRect = nullptr);
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkMergeImageFilter)
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* Create(SkImageFilter* first, SkImageFilter* second,
+ SkXfermode::Mode mode = SkXfermode::kSrcOver_Mode,
+ const CropRect* cropRect = nullptr) {
+ return Make(sk_ref_sp<SkImageFilter>(first),
+ sk_ref_sp<SkImageFilter>(second),
+ mode, cropRect).release();
+ }
+
+ static SkImageFilter* Create(SkImageFilter* filters[], int count,
+ const SkXfermode::Mode modes[] = nullptr,
+ const CropRect* cropRect = nullptr) {
+ SkAutoTDeleteArray<sk_sp<SkImageFilter>> temp(new sk_sp<SkImageFilter>[count]);
+ for (int i = 0; i < count; ++i) {
+ temp[i] = sk_ref_sp<SkImageFilter>(filters[i]);
+ }
+ return Make(temp.get(), count, modes, cropRect).release();
+ }
+#endif
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* source, const Context&,
+ SkIPoint* offset) const override;
+ bool onCanHandleComplexCTM() const override { return true; }
+
+private:
+ SkMergeImageFilter(sk_sp<SkImageFilter> filters[], int count, const SkXfermode::Mode modes[],
+ const CropRect* cropRect);
+
+ uint8_t* fModes; // SkXfermode::Mode
+
+ // private storage, to avoid dynamically allocating storage for our copy
+ // of the modes (unless the count is so large we can't fit).
+ intptr_t fStorage[16];
+
+ void initAllocModes();
+ void initModes(const SkXfermode::Mode []);
+
+ typedef SkImageFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkMorphologyImageFilter.h b/gfx/skia/skia/include/effects/SkMorphologyImageFilter.h
new file mode 100644
index 000000000..fbbbe207a
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkMorphologyImageFilter.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMorphologyImageFilter_DEFINED
+#define SkMorphologyImageFilter_DEFINED
+
+#include "SkColor.h"
+#include "SkImageFilter.h"
+#include "SkSize.h"
+
+///////////////////////////////////////////////////////////////////////////////
+class SK_API SkMorphologyImageFilter : public SkImageFilter {
+public:
+ SkRect computeFastBounds(const SkRect& src) const override;
+ SkIRect onFilterNodeBounds(const SkIRect& src, const SkMatrix&, MapDirection) const override;
+
+ /**
+ * All morphology procs have the same signature: src is the source buffer, dst the
+ * destination buffer, radius is the morphology radius, width and height are the bounds
+ * of the destination buffer (in pixels), and srcStride and dstStride are the
+ * number of pixels per row in each buffer. All buffers are 8888.
+ */
+
+ typedef void (*Proc)(const SkPMColor* src, SkPMColor* dst, int radius,
+ int width, int height, int srcStride, int dstStride);
+
+protected:
+ enum Op {
+ kErode_Op,
+ kDilate_Op,
+ };
+
+ virtual Op op() const = 0;
+
+ SkMorphologyImageFilter(int radiusX, int radiusY,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect);
+ sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* source,
+ const Context&,
+ SkIPoint* offset) const override;
+ void flatten(SkWriteBuffer&) const override;
+
+ SkISize radius() const { return fRadius; }
+
+private:
+ SkISize fRadius;
+
+ typedef SkImageFilter INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+class SK_API SkDilateImageFilter : public SkMorphologyImageFilter {
+public:
+ static sk_sp<SkImageFilter> Make(int radiusX, int radiusY,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect = nullptr);
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkDilateImageFilter)
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* Create(int radiusX, int radiusY,
+ SkImageFilter* input = nullptr,
+ const CropRect* cropRect = nullptr) {
+ return Make(radiusX, radiusY,
+ sk_ref_sp<SkImageFilter>(input),
+ cropRect).release();
+ }
+#endif
+
+protected:
+ Op op() const override { return kDilate_Op; }
+
+private:
+ SkDilateImageFilter(int radiusX, int radiusY,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect)
+ : INHERITED(radiusX, radiusY, input, cropRect) {}
+
+ typedef SkMorphologyImageFilter INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+class SK_API SkErodeImageFilter : public SkMorphologyImageFilter {
+public:
+ static sk_sp<SkImageFilter> Make(int radiusX, int radiusY,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect = nullptr);
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkErodeImageFilter)
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* Create(int radiusX, int radiusY,
+ SkImageFilter* input = nullptr,
+ const CropRect* cropRect = nullptr) {
+ return Make(radiusX, radiusY,
+ sk_ref_sp<SkImageFilter>(input),
+ cropRect).release();
+ }
+#endif
+
+protected:
+ Op op() const override { return kErode_Op; }
+
+private:
+ SkErodeImageFilter(int radiusX, int radiusY,
+ sk_sp<SkImageFilter> input, const CropRect* cropRect)
+ : INHERITED(radiusX, radiusY, input, cropRect) {}
+
+ typedef SkMorphologyImageFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkOffsetImageFilter.h b/gfx/skia/skia/include/effects/SkOffsetImageFilter.h
new file mode 100644
index 000000000..c1005ef78
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkOffsetImageFilter.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOffsetImageFilter_DEFINED
+#define SkOffsetImageFilter_DEFINED
+
+#include "SkImageFilter.h"
+#include "SkPoint.h"
+
+class SK_API SkOffsetImageFilter : public SkImageFilter {
+public:
+ static sk_sp<SkImageFilter> Make(SkScalar dx, SkScalar dy,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect = nullptr);
+
+ SkRect computeFastBounds(const SkRect& src) const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkOffsetImageFilter)
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* Create(SkScalar dx, SkScalar dy, SkImageFilter* input = nullptr,
+ const CropRect* cropRect = nullptr) {
+ return Make(dx, dy, sk_ref_sp(input), cropRect).release();
+ }
+#endif
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* source, const Context&,
+ SkIPoint* offset) const override;
+ SkIRect onFilterNodeBounds(const SkIRect&, const SkMatrix&, MapDirection) const override;
+
+private:
+ SkOffsetImageFilter(SkScalar dx, SkScalar dy, sk_sp<SkImageFilter> input, const CropRect*);
+
+ SkVector fOffset;
+
+ typedef SkImageFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkPaintFlagsDrawFilter.h b/gfx/skia/skia/include/effects/SkPaintFlagsDrawFilter.h
new file mode 100644
index 000000000..10dd3c4d6
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkPaintFlagsDrawFilter.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPaintFlagsDrawFilter_DEFINED
+#define SkPaintFlagsDrawFilter_DEFINED
+
+#include "SkDrawFilter.h"
+
+class SK_API SkPaintFlagsDrawFilter : public SkDrawFilter {
+public:
+ SkPaintFlagsDrawFilter(uint32_t clearFlags, uint32_t setFlags);
+
+ bool filter(SkPaint*, Type) override;
+
+private:
+ uint16_t fClearFlags; // user specified
+ uint16_t fSetFlags; // user specified
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkPaintImageFilter.h b/gfx/skia/skia/include/effects/SkPaintImageFilter.h
new file mode 100644
index 000000000..8a59da6c6
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkPaintImageFilter.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPaintImageFilter_DEFINED
+#define SkPaintImageFilter_DEFINED
+
+#include "SkImageFilter.h"
+#include "SkPaint.h"
+
+class SK_API SkPaintImageFilter : public SkImageFilter {
+public:
+ /** Create a new image filter which fills the given rectangle using the
+ * given paint. If no rectangle is specified, an output is produced with
+ * the same bounds as the input primitive (even though the input
+ * primitive's pixels are not used for processing).
+ * @param paint Paint to use when filling the rect.
+ * @param rect Rectangle of output pixels. If NULL or a given crop edge is
+ * not specified, the source primitive's bounds are used
+ * instead.
+ */
+ static sk_sp<SkImageFilter> Make(const SkPaint& paint, const CropRect* cropRect = nullptr);
+
+ bool affectsTransparentBlack() const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkPaintImageFilter)
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* Create(const SkPaint& paint, const CropRect* rect = nullptr) {
+ return Make(paint, rect).release();
+ }
+#endif
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* source, const Context&,
+ SkIPoint* offset) const override;
+
+private:
+ SkPaintImageFilter(const SkPaint& paint, const CropRect* rect);
+
+ SkPaint fPaint;
+
+ typedef SkImageFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkPerlinNoiseShader.h b/gfx/skia/skia/include/effects/SkPerlinNoiseShader.h
new file mode 100644
index 000000000..60dc53a6a
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkPerlinNoiseShader.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPerlinNoiseShader_DEFINED
+#define SkPerlinNoiseShader_DEFINED
+
+#include "SkShader.h"
+
+/** \class SkPerlinNoiseShader
+
+ SkPerlinNoiseShader creates an image using the Perlin turbulence function.
+
+ It can produce tileable noise if asked to stitch tiles and provided a tile size.
+ In order to fill a large area with repeating noise, set the stitchTiles flag to
+ true, and render exactly a single tile of noise. Without this flag, the result
+ will contain visible seams between tiles.
+
+ The algorithm used is described here :
+ http://www.w3.org/TR/SVG/filters.html#feTurbulenceElement
+*/
+class SK_API SkPerlinNoiseShader : public SkShader {
+public:
+ struct StitchData;
+ struct PaintingData;
+
+ /**
+ * About the noise types : the difference between the 2 is just minor tweaks to the algorithm,
+ * they're not 2 entirely different noises. The output looks different, but once the noise is
+ * generated in the [1, -1] range, the output is brought back in the [0, 1] range by doing :
+ * kFractalNoise_Type : noise * 0.5 + 0.5
+ * kTurbulence_Type : abs(noise)
+ * Very little differences between the 2 types, although you can tell the difference visually.
+ */
+ enum Type {
+ kFractalNoise_Type,
+ kTurbulence_Type,
+ kFirstType = kFractalNoise_Type,
+ kLastType = kTurbulence_Type
+ };
+ /**
+ * This will construct Perlin noise of the given type (Fractal Noise or Turbulence).
+ *
+ * Both base frequencies (X and Y) have a usual range of (0..1).
+ *
+ * The number of octaves provided should be fairly small, although no limit is enforced.
+ * Each octave doubles the frequency, so 10 octaves would produce noise from
+ * baseFrequency * 1, * 2, * 4, ..., * 512, which quickly yields insignificantly small
+ * periods and resembles regular unstructured noise rather than Perlin noise.
+ *
+ * If tileSize isn't NULL or an empty size, the tileSize parameter will be used to modify
+ * the frequencies so that the noise will be tileable for the given tile size. If tileSize
+ * is NULL or an empty size, the frequencies will be used as is without modification.
+ */
+ static sk_sp<SkShader> MakeFractalNoise(SkScalar baseFrequencyX, SkScalar baseFrequencyY,
+ int numOctaves, SkScalar seed,
+ const SkISize* tileSize = nullptr);
+ static sk_sp<SkShader> MakeTurbulence(SkScalar baseFrequencyX, SkScalar baseFrequencyY,
+ int numOctaves, SkScalar seed,
+ const SkISize* tileSize = nullptr);
+
+#ifdef SK_SUPPORT_LEGACY_CREATESHADER_PTR
+ static SkShader* CreateFractalNoise(SkScalar baseFrequencyX, SkScalar baseFrequencyY,
+ int numOctaves, SkScalar seed,
+ const SkISize* tileSize = NULL) {
+ return MakeFractalNoise(baseFrequencyX, baseFrequencyY, numOctaves, seed, tileSize).release();
+ }
+ static SkShader* CreateTurbulence(SkScalar baseFrequencyX, SkScalar baseFrequencyY,
+ int numOctaves, SkScalar seed,
+ const SkISize* tileSize = NULL) {
+ return MakeTurbulence(baseFrequencyX, baseFrequencyY, numOctaves, seed, tileSize).release();
+ }
+ static SkShader* CreateTubulence(SkScalar baseFrequencyX, SkScalar baseFrequencyY,
+ int numOctaves, SkScalar seed,
+ const SkISize* tileSize = NULL) {
+ return CreateTurbulence(baseFrequencyX, baseFrequencyY, numOctaves, seed, tileSize);
+ }
+#endif
+
+ class PerlinNoiseShaderContext : public SkShader::Context {
+ public:
+ PerlinNoiseShaderContext(const SkPerlinNoiseShader& shader, const ContextRec&);
+ virtual ~PerlinNoiseShaderContext();
+
+ void shadeSpan(int x, int y, SkPMColor[], int count) override;
+
+ private:
+ SkPMColor shade(const SkPoint& point, StitchData& stitchData) const;
+ SkScalar calculateTurbulenceValueForPoint(
+ int channel,
+ StitchData& stitchData, const SkPoint& point) const;
+ SkScalar noise2D(int channel,
+ const StitchData& stitchData, const SkPoint& noiseVector) const;
+
+ SkMatrix fMatrix;
+ PaintingData* fPaintingData;
+
+ typedef SkShader::Context INHERITED;
+ };
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(const AsFPArgs&) const override;
+#endif
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkPerlinNoiseShader)
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ Context* onCreateContext(const ContextRec&, void* storage) const override;
+ size_t onContextSize(const ContextRec&) const override;
+
+private:
+ SkPerlinNoiseShader(SkPerlinNoiseShader::Type type, SkScalar baseFrequencyX,
+ SkScalar baseFrequencyY, int numOctaves, SkScalar seed,
+ const SkISize* tileSize);
+ virtual ~SkPerlinNoiseShader();
+
+ const SkPerlinNoiseShader::Type fType;
+ const SkScalar fBaseFrequencyX;
+ const SkScalar fBaseFrequencyY;
+ const int fNumOctaves;
+ const SkScalar fSeed;
+ const SkISize fTileSize;
+ const bool fStitchTiles;
+
+ typedef SkShader INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkPictureImageFilter.h b/gfx/skia/skia/include/effects/SkPictureImageFilter.h
new file mode 100644
index 000000000..2ca1c5b4f
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkPictureImageFilter.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPictureImageFilter_DEFINED
+#define SkPictureImageFilter_DEFINED
+
+#include "SkImageFilter.h"
+#include "SkPicture.h"
+
+class SK_API SkPictureImageFilter : public SkImageFilter {
+public:
+ /**
+ * Refs the passed-in picture.
+ */
+ static sk_sp<SkImageFilter> Make(sk_sp<SkPicture> picture);
+
+ /**
+ * Refs the passed-in picture. cropRect can be used to crop or expand the destination rect when
+ * the picture is drawn. (No scaling is implied by the dest rect; only the CTM is applied.)
+ */
+ static sk_sp<SkImageFilter> Make(sk_sp<SkPicture> picture, const SkRect& cropRect);
+
+ /**
+ * Refs the passed-in picture. The picture is rasterized at a resolution that matches the
+ * local coordinate space. If the picture needs to be resampled for drawing it into the
+ * destination canvas, bilinear filtering will be used. cropRect can be used to crop or
+ * expand the destination rect when the picture is drawn. (No scaling is implied by the
+ * dest rect; only the CTM is applied.)
+ */
+ static sk_sp<SkImageFilter> MakeForLocalSpace(sk_sp<SkPicture> picture,
+ const SkRect& cropRect,
+ SkFilterQuality filterQuality);
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* Create(const SkPicture* picture) {
+ return Make(sk_ref_sp(const_cast<SkPicture*>(picture))).release();
+ }
+ static SkImageFilter* Create(const SkPicture* picture, const SkRect& cropRect) {
+ return Make(sk_ref_sp(const_cast<SkPicture*>(picture)), cropRect).release();
+ }
+ static SkImageFilter* CreateForLocalSpace(const SkPicture* picture,
+ const SkRect& cropRect,
+ SkFilterQuality filterQuality) {
+ return MakeForLocalSpace(sk_ref_sp(const_cast<SkPicture*>(picture)),
+ cropRect,
+ filterQuality).release();
+ }
+#endif
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkPictureImageFilter)
+
+protected:
+ enum PictureResolution {
+ kDeviceSpace_PictureResolution,
+ kLocalSpace_PictureResolution
+ };
+
+ /* Constructs an SkPictureImageFilter object from an SkReadBuffer.
+ * Note: If the SkPictureImageFilter object construction requires bitmap
+ * decoding, the decoder must be set on the SkReadBuffer parameter by calling
+ * SkReadBuffer::setBitmapDecoder() before calling this constructor.
+ * @param SkReadBuffer Serialized picture data.
+ */
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* source, const Context&,
+ SkIPoint* offset) const override;
+
+private:
+ explicit SkPictureImageFilter(sk_sp<SkPicture> picture);
+ SkPictureImageFilter(sk_sp<SkPicture> picture, const SkRect& cropRect,
+ PictureResolution, SkFilterQuality);
+
+ void drawPictureAtDeviceResolution(SkCanvas* canvas,
+ const SkIRect& deviceBounds,
+ const Context&) const;
+ void drawPictureAtLocalResolution(SkSpecialImage* source,
+ SkCanvas*,
+ const SkIRect& deviceBounds,
+ const Context&) const;
+
+ sk_sp<SkPicture> fPicture;
+ SkRect fCropRect;
+ PictureResolution fPictureResolution;
+ SkFilterQuality fFilterQuality;
+
+ typedef SkImageFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkRRectsGaussianEdgeShader.h b/gfx/skia/skia/include/effects/SkRRectsGaussianEdgeShader.h
new file mode 100644
index 000000000..087e7c25f
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkRRectsGaussianEdgeShader.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRRectsGaussianEdgeShader_DEFINED
+#define SkRRectsGaussianEdgeShader_DEFINED
+
+#include "SkShader.h"
+
+class SkRRect;
+
+class SK_API SkRRectsGaussianEdgeShader {
+public:
+ /** Returns a shader that applies a Gaussian blur depending on distance to the edge
+ * of the intersection of two round rects.
+ * Currently this is only useable with round rects that have the same radii at
+ * all the corners and for which the x & y radii are equal.
+ * Raster will draw nothing.
+ *
+ * The coverage geometry that should be drawn should be no larger than the intersection
+ * of the bounding boxes of the two round rects. Ambitious users can omit the center
+ * area of the coverage geometry if it is known to be occluded.
+ */
+ static sk_sp<SkShader> Make(const SkRRect& first,
+ const SkRRect& second,
+ SkScalar radius, SkScalar unused = 0.0f);
+
+ SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP()
+
+private:
+ SkRRectsGaussianEdgeShader(); // can't be instantiated
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkTableColorFilter.h b/gfx/skia/skia/include/effects/SkTableColorFilter.h
new file mode 100644
index 000000000..fe3114946
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkTableColorFilter.h
@@ -0,0 +1,54 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef SkTableColorFilter_DEFINED
+#define SkTableColorFilter_DEFINED
+
+#include "SkColorFilter.h"
+
+class SK_API SkTableColorFilter {
+public:
+ /**
+ * Create a table colorfilter, copying the table into the filter, and
+ * applying it to all 4 components.
+ * a' = table[a];
+ * r' = table[r];
+ * g' = table[g];
+ * b' = table[b];
+ * Compoents are operated on in unpremultiplied space. If the incomming
+ * colors are premultiplied, they are temporarily unpremultiplied, then
+ * the table is applied, and then the result is remultiplied.
+ */
+ static sk_sp<SkColorFilter> Make(const uint8_t table[256]);
+
+ /**
+ * Create a table colorfilter, with a different table for each
+ * component [A, R, G, B]. If a given table is NULL, then it is
+ * treated as identity, with the component left unchanged. If a table
+ * is not null, then its contents are copied into the filter.
+ */
+ static sk_sp<SkColorFilter> MakeARGB(const uint8_t tableA[256],
+ const uint8_t tableR[256],
+ const uint8_t tableG[256],
+ const uint8_t tableB[256]);
+
+#ifdef SK_SUPPORT_LEGACY_COLORFILTER_PTR
+ static SkColorFilter* Create(const uint8_t table[256]) {
+ return Make(table).release();
+ }
+ static SkColorFilter* CreateARGB(const uint8_t tableA[256],
+ const uint8_t tableR[256],
+ const uint8_t tableG[256],
+ const uint8_t tableB[256]) {
+ return MakeARGB(tableA, tableR, tableG, tableB).release();
+ }
+#endif
+
+ SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP()
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkTableMaskFilter.h b/gfx/skia/skia/include/effects/SkTableMaskFilter.h
new file mode 100644
index 000000000..757ddf208
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkTableMaskFilter.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTableMaskFilter_DEFINED
+#define SkTableMaskFilter_DEFINED
+
+#include "SkMaskFilter.h"
+#include "SkScalar.h"
+
+/** \class SkTableMaskFilter
+
+ Applies a table lookup on each of the alpha values in the mask.
+ Helper methods create some common tables (e.g. gamma, clipping)
+ */
+class SK_API SkTableMaskFilter : public SkMaskFilter {
+public:
+ /** Utility that sets the gamma table
+ */
+ static void MakeGammaTable(uint8_t table[256], SkScalar gamma);
+
+ /** Utility that creates a clipping table: clamps values below min to 0
+ and above max to 255, and rescales the remaining into 0..255
+ */
+ static void MakeClipTable(uint8_t table[256], uint8_t min, uint8_t max);
+
+ static SkMaskFilter* Create(const uint8_t table[256]) {
+ return new SkTableMaskFilter(table);
+ }
+
+ static SkMaskFilter* CreateGamma(SkScalar gamma) {
+ uint8_t table[256];
+ MakeGammaTable(table, gamma);
+ return new SkTableMaskFilter(table);
+ }
+
+ static SkMaskFilter* CreateClip(uint8_t min, uint8_t max) {
+ uint8_t table[256];
+ MakeClipTable(table, min, max);
+ return new SkTableMaskFilter(table);
+ }
+
+ SkMask::Format getFormat() const override;
+ bool filterMask(SkMask*, const SkMask&, const SkMatrix&, SkIPoint*) const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkTableMaskFilter)
+
+protected:
+ virtual ~SkTableMaskFilter();
+
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ SkTableMaskFilter();
+ explicit SkTableMaskFilter(const uint8_t table[256]);
+
+ uint8_t fTable[256];
+
+ typedef SkMaskFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkTileImageFilter.h b/gfx/skia/skia/include/effects/SkTileImageFilter.h
new file mode 100644
index 000000000..ae951e318
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkTileImageFilter.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTileImageFilter_DEFINED
+#define SkTileImageFilter_DEFINED
+
+#include "SkImageFilter.h"
+
+class SK_API SkTileImageFilter : public SkImageFilter {
+public:
+ /** Create a tile image filter
+ @param src Defines the pixels to tile
+ @param dst Defines the pixels where tiles are drawn
+ @param input Input from which the subregion defined by srcRect will be tiled
+ */
+ static sk_sp<SkImageFilter> Make(const SkRect& src,
+ const SkRect& dst,
+ sk_sp<SkImageFilter> input);
+
+ SkIRect onFilterBounds(const SkIRect& src, const SkMatrix&, MapDirection) const override;
+ SkIRect onFilterNodeBounds(const SkIRect&, const SkMatrix&, MapDirection) const override;
+ SkRect computeFastBounds(const SkRect& src) const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkTileImageFilter)
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* Create(const SkRect& src, const SkRect& dst, SkImageFilter* input) {
+ return Make(src, dst, sk_ref_sp<SkImageFilter>(input)).release();
+ }
+#endif
+
+protected:
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* source, const Context&,
+ SkIPoint* offset) const override;
+
+private:
+ SkTileImageFilter(const SkRect& srcRect, const SkRect& dstRect, sk_sp<SkImageFilter> input)
+ : INHERITED(&input, 1, nullptr), fSrcRect(srcRect), fDstRect(dstRect) {}
+
+ SkRect fSrcRect;
+ SkRect fDstRect;
+
+ typedef SkImageFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkXfermodeImageFilter.h b/gfx/skia/skia/include/effects/SkXfermodeImageFilter.h
new file mode 100644
index 000000000..fa9c857a7
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkXfermodeImageFilter.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkXfermodeImageFilter_DEFINED
+#define SkXfermodeImageFilter_DEFINED
+
+#include "SkBlendMode.h"
+#include "SkImageFilter.h"
+
+class SkXfermode;
+
+/**
+ * This filter takes an xfermode, and uses it to composite the foreground
+ * over the background. If foreground or background is NULL, the input
+ * bitmap (src) is used instead.
+ */
+class SK_API SkXfermodeImageFilter {
+public:
+ static sk_sp<SkImageFilter> Make(SkBlendMode, sk_sp<SkImageFilter> background,
+ sk_sp<SkImageFilter> foreground,
+ const SkImageFilter::CropRect* cropRect);
+ static sk_sp<SkImageFilter> Make(SkBlendMode mode, sk_sp<SkImageFilter> background) {
+ return Make(mode, std::move(background), nullptr, nullptr);
+ }
+
+ static sk_sp<SkImageFilter> MakeArithmetic(float k1, float k2, float k3, float k4,
+ bool enforcePMColor,
+ sk_sp<SkImageFilter> background,
+ sk_sp<SkImageFilter> foreground,
+ const SkImageFilter::CropRect* cropRect);
+ static sk_sp<SkImageFilter> MakeArithmetic(float k1, float k2, float k3, float k4,
+ bool enforcePMColor,
+ sk_sp<SkImageFilter> background) {
+ return MakeArithmetic(k1, k2, k3, k4, enforcePMColor, std::move(background),
+ nullptr, nullptr);
+ }
+
+#ifdef SK_SUPPORT_LEGACY_XFERMODE_OBJECT
+ static sk_sp<SkImageFilter> Make(sk_sp<SkXfermode> mode, sk_sp<SkImageFilter> background,
+ sk_sp<SkImageFilter> foreground,
+ const SkImageFilter::CropRect* cropRect);
+ static sk_sp<SkImageFilter> Make(sk_sp<SkXfermode> mode, sk_sp<SkImageFilter> background) {
+ return Make(std::move(mode), std::move(background), nullptr, nullptr);
+ }
+
+#endif
+
+#ifdef SK_SUPPORT_LEGACY_XFERMODE_PTR
+ static SkImageFilter* Create(SkXfermode* mode, SkImageFilter* background,
+ SkImageFilter* foreground = NULL,
+ const SkImageFilter::CropRect* cropRect = NULL) {
+ return Make(sk_ref_sp(mode),
+ sk_ref_sp(background),
+ sk_ref_sp(foreground),
+ cropRect).release();
+ }
+#endif
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static sk_sp<SkImageFilter> Make(sk_sp<SkXfermode> mode, SkImageFilter* background,
+ SkImageFilter* foreground,
+ const SkImageFilter::CropRect* cropRect) {
+ return Make(std::move(mode),
+ sk_ref_sp(background),
+ sk_ref_sp(foreground),
+ cropRect);
+ }
+ static sk_sp<SkImageFilter> Make(sk_sp<SkXfermode> mode, SkImageFilter* background) {
+ return Make(std::move(mode), sk_ref_sp(background));
+ }
+#endif
+
+ SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP();
+
+private:
+ SkXfermodeImageFilter(); // can't instantiate
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrBlend.h b/gfx/skia/skia/include/gpu/GrBlend.h
new file mode 100644
index 000000000..5100bb053
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrBlend.h
@@ -0,0 +1,228 @@
+
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBlend_DEFINED
+#define GrBlend_DEFINED
+
+#include "GrColor.h"
+#include "../private/SkTLogic.h"
+
+/**
+ * Equations for alpha-blending.
+ */
+enum GrBlendEquation {
+ // Basic blend equations.
+ kAdd_GrBlendEquation, //<! Cs*S + Cd*D
+ kSubtract_GrBlendEquation, //<! Cs*S - Cd*D
+ kReverseSubtract_GrBlendEquation, //<! Cd*D - Cs*S
+
+ // Advanced blend equations. These are described in the SVG and PDF specs.
+ kScreen_GrBlendEquation,
+ kOverlay_GrBlendEquation,
+ kDarken_GrBlendEquation,
+ kLighten_GrBlendEquation,
+ kColorDodge_GrBlendEquation,
+ kColorBurn_GrBlendEquation,
+ kHardLight_GrBlendEquation,
+ kSoftLight_GrBlendEquation,
+ kDifference_GrBlendEquation,
+ kExclusion_GrBlendEquation,
+ kMultiply_GrBlendEquation,
+ kHSLHue_GrBlendEquation,
+ kHSLSaturation_GrBlendEquation,
+ kHSLColor_GrBlendEquation,
+ kHSLLuminosity_GrBlendEquation,
+
+ kFirstAdvancedGrBlendEquation = kScreen_GrBlendEquation,
+ kLast_GrBlendEquation = kHSLLuminosity_GrBlendEquation
+};
+
+static const int kGrBlendEquationCnt = kLast_GrBlendEquation + 1;
+
+
+/**
+ * Coefficients for alpha-blending.
+ */
+enum GrBlendCoeff {
+ kZero_GrBlendCoeff, //<! 0
+ kOne_GrBlendCoeff, //<! 1
+ kSC_GrBlendCoeff, //<! src color
+ kISC_GrBlendCoeff, //<! one minus src color
+ kDC_GrBlendCoeff, //<! dst color
+ kIDC_GrBlendCoeff, //<! one minus dst color
+ kSA_GrBlendCoeff, //<! src alpha
+ kISA_GrBlendCoeff, //<! one minus src alpha
+ kDA_GrBlendCoeff, //<! dst alpha
+ kIDA_GrBlendCoeff, //<! one minus dst alpha
+ kConstC_GrBlendCoeff, //<! constant color
+ kIConstC_GrBlendCoeff, //<! one minus constant color
+ kConstA_GrBlendCoeff, //<! constant color alpha
+ kIConstA_GrBlendCoeff, //<! one minus constant color alpha
+ kS2C_GrBlendCoeff,
+ kIS2C_GrBlendCoeff,
+ kS2A_GrBlendCoeff,
+ kIS2A_GrBlendCoeff,
+
+ kLast_GrBlendCoeff = kIS2A_GrBlendCoeff
+};
+
+static const int kGrBlendCoeffCnt = kLast_GrBlendCoeff + 1;
+
+/**
+ * Given a known blend equation in the form of srcCoeff * srcColor + dstCoeff * dstColor where
+ * there may be partial knowledge of the srcColor and dstColor component values, determine what
+ * components of the blended output color are known. Coeffs must not refer to the constant or
+ * secondary src color.
+ */
+void GrGetCoeffBlendKnownComponents(GrBlendCoeff srcCoeff, GrBlendCoeff dstCoeff,
+ GrColor srcColor,
+ GrColorComponentFlags srcColorFlags,
+ GrColor dstColor,
+ GrColorComponentFlags dstColorFlags,
+ GrColor* outColor,
+ GrColorComponentFlags* outFlags);
+
+template<GrBlendCoeff Coeff>
+struct GrTBlendCoeffRefsSrc : skstd::bool_constant<kSC_GrBlendCoeff == Coeff ||
+ kISC_GrBlendCoeff == Coeff ||
+ kSA_GrBlendCoeff == Coeff ||
+ kISA_GrBlendCoeff == Coeff> {};
+
+#define GR_BLEND_COEFF_REFS_SRC(COEFF) \
+ GrTBlendCoeffRefsSrc<COEFF>::value
+
+inline bool GrBlendCoeffRefsSrc(GrBlendCoeff coeff) {
+ switch (coeff) {
+ case kSC_GrBlendCoeff:
+ case kISC_GrBlendCoeff:
+ case kSA_GrBlendCoeff:
+ case kISA_GrBlendCoeff:
+ return true;
+ default:
+ return false;
+ }
+}
+
+template<GrBlendCoeff Coeff>
+struct GrTBlendCoeffRefsDst : skstd::bool_constant<kDC_GrBlendCoeff == Coeff ||
+ kIDC_GrBlendCoeff == Coeff ||
+ kDA_GrBlendCoeff == Coeff ||
+ kIDA_GrBlendCoeff == Coeff> {};
+
+#define GR_BLEND_COEFF_REFS_DST(COEFF) \
+ GrTBlendCoeffRefsDst<COEFF>::value
+
+inline bool GrBlendCoeffRefsDst(GrBlendCoeff coeff) {
+ switch (coeff) {
+ case kDC_GrBlendCoeff:
+ case kIDC_GrBlendCoeff:
+ case kDA_GrBlendCoeff:
+ case kIDA_GrBlendCoeff:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+template<GrBlendCoeff Coeff>
+struct GrTBlendCoeffRefsSrc2 : skstd::bool_constant<kS2C_GrBlendCoeff == Coeff ||
+ kIS2C_GrBlendCoeff == Coeff ||
+ kS2A_GrBlendCoeff == Coeff ||
+ kIS2A_GrBlendCoeff == Coeff> {};
+
+#define GR_BLEND_COEFF_REFS_SRC2(COEFF) \
+ GrTBlendCoeffRefsSrc2<COEFF>::value
+
+inline bool GrBlendCoeffRefsSrc2(GrBlendCoeff coeff) {
+ switch (coeff) {
+ case kS2C_GrBlendCoeff:
+ case kIS2C_GrBlendCoeff:
+ case kS2A_GrBlendCoeff:
+ case kIS2A_GrBlendCoeff:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+template<GrBlendCoeff SrcCoeff, GrBlendCoeff DstCoeff>
+struct GrTBlendCoeffsUseSrcColor : skstd::bool_constant<kZero_GrBlendCoeff != SrcCoeff ||
+ GR_BLEND_COEFF_REFS_SRC(DstCoeff)> {};
+
+#define GR_BLEND_COEFFS_USE_SRC_COLOR(SRC_COEFF, DST_COEFF) \
+ GrTBlendCoeffsUseSrcColor<SRC_COEFF, DST_COEFF>::value
+
+
+template<GrBlendCoeff SrcCoeff, GrBlendCoeff DstCoeff>
+struct GrTBlendCoeffsUseDstColor : skstd::bool_constant<GR_BLEND_COEFF_REFS_DST(SrcCoeff) ||
+ kZero_GrBlendCoeff != DstCoeff> {};
+
+#define GR_BLEND_COEFFS_USE_DST_COLOR(SRC_COEFF, DST_COEFF) \
+ GrTBlendCoeffsUseDstColor<SRC_COEFF, DST_COEFF>::value
+
+
+template<GrBlendEquation Equation>
+struct GrTBlendEquationIsAdvanced : skstd::bool_constant<Equation >= kFirstAdvancedGrBlendEquation> {};
+
+#define GR_BLEND_EQUATION_IS_ADVANCED(EQUATION) \
+ GrTBlendEquationIsAdvanced<EQUATION>::value
+
+inline bool GrBlendEquationIsAdvanced(GrBlendEquation equation) {
+ return equation >= kFirstAdvancedGrBlendEquation;
+}
+
+
+template<GrBlendEquation BlendEquation, GrBlendCoeff SrcCoeff, GrBlendCoeff DstCoeff>
+struct GrTBlendModifiesDst : skstd::bool_constant<
+ (kAdd_GrBlendEquation != BlendEquation && kReverseSubtract_GrBlendEquation != BlendEquation) ||
+ kZero_GrBlendCoeff != SrcCoeff ||
+ kOne_GrBlendCoeff != DstCoeff> {};
+
+#define GR_BLEND_MODIFIES_DST(EQUATION, SRC_COEFF, DST_COEFF) \
+ GrTBlendModifiesDst<EQUATION, SRC_COEFF, DST_COEFF>::value
+
+
+/**
+ * Advanced blend equations can always tweak alpha for coverage. (See GrCustomXfermode.cpp)
+ *
+ * For "add" and "reverse subtract" the blend equation with f=coverage is:
+ *
+ * D' = f * (S * srcCoeff + D * dstCoeff) + (1-f) * D
+ * = f * S * srcCoeff + D * (f * dstCoeff + (1 - f))
+ *
+ * (Let srcCoeff be negative for reverse subtract.) We can tweak alpha for coverage when the
+ * following relationship holds:
+ *
+ * (f*S) * srcCoeff' + D * dstCoeff' == f * S * srcCoeff + D * (f * dstCoeff + (1 - f))
+ *
+ * (Where srcCoeff' and dstCoeff' have any reference to S pre-multiplied by f.)
+ *
+ * It's easy to see this works for the src term as long as srcCoeff' == srcCoeff (meaning srcCoeff
+ * does not reference S). For the dst term, this will work as long as the following is true:
+ *|
+ * dstCoeff' == f * dstCoeff + (1 - f)
+ * dstCoeff' == 1 - f * (1 - dstCoeff)
+ *
+ * By inspection we can see this will work as long as dstCoeff has a 1, and any other term in
+ * dstCoeff references S.
+ */
+template<GrBlendEquation Equation, GrBlendCoeff SrcCoeff, GrBlendCoeff DstCoeff>
+struct GrTBlendCanTweakAlphaForCoverage : skstd::bool_constant<
+ GR_BLEND_EQUATION_IS_ADVANCED(Equation) ||
+ ((kAdd_GrBlendEquation == Equation || kReverseSubtract_GrBlendEquation == Equation) &&
+ !GR_BLEND_COEFF_REFS_SRC(SrcCoeff) &&
+ (kOne_GrBlendCoeff == DstCoeff ||
+ kISC_GrBlendCoeff == DstCoeff ||
+ kISA_GrBlendCoeff == DstCoeff))> {};
+
+#define GR_BLEND_CAN_TWEAK_ALPHA_FOR_COVERAGE(EQUATION, SRC_COEFF, DST_COEFF) \
+ GrTBlendCanTweakAlphaForCoverage<EQUATION, SRC_COEFF, DST_COEFF>::value
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrBuffer.h b/gfx/skia/skia/include/gpu/GrBuffer.h
new file mode 100644
index 000000000..b2201a140
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrBuffer.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBuffer_DEFINED
+#define GrBuffer_DEFINED
+
+#include "GrGpuResource.h"
+
+class GrGpu;
+
+class GrBuffer : public GrGpuResource {
+public:
+ /**
+ * Creates a client-side buffer.
+ */
+ static SK_WARN_UNUSED_RESULT GrBuffer* CreateCPUBacked(GrGpu*, size_t sizeInBytes, GrBufferType,
+ const void* data = nullptr);
+
+ /**
+ * Computes a scratch key for a GPU-side buffer with a "dynamic" access pattern. (Buffers with
+ * "static" and "stream" patterns are disqualified by nature from being cached and reused.)
+ */
+ static void ComputeScratchKeyForDynamicVBO(size_t size, GrBufferType, GrScratchKey*);
+
+ GrAccessPattern accessPattern() const { return fAccessPattern; }
+ size_t sizeInBytes() const { return fSizeInBytes; }
+
+ /**
+ * Returns true if the buffer is a wrapper around a CPU array. If true it
+ * indicates that map will always succeed and will be free.
+ */
+ bool isCPUBacked() const { return SkToBool(fCPUData); }
+ size_t baseOffset() const { return reinterpret_cast<size_t>(fCPUData); }
+
+ /**
+ * Maps the buffer to be written by the CPU.
+ *
+ * The previous content of the buffer is invalidated. It is an error
+ * to draw from the buffer while it is mapped. It may fail if the backend
+ * doesn't support mapping the buffer. If the buffer is CPU backed then
+ * it will always succeed and is a free operation. Once a buffer is mapped,
+ * subsequent calls to map() are ignored.
+ *
+ * Note that buffer mapping does not go through GrContext and therefore is
+ * not serialized with other operations.
+ *
+ * @return a pointer to the data or nullptr if the map fails.
+ */
+ void* map() {
+ if (!fMapPtr) {
+ this->onMap();
+ }
+ return fMapPtr;
+ }
+
+ /**
+ * Unmaps the buffer.
+ *
+ * The pointer returned by the previous map call will no longer be valid.
+ */
+ void unmap() {
+ SkASSERT(fMapPtr);
+ this->onUnmap();
+ fMapPtr = nullptr;
+ }
+
+ /**
+ * Returns the same ptr that map() returned at time of map or nullptr if the
+ * is not mapped.
+ *
+ * @return ptr to mapped buffer data or nullptr if buffer is not mapped.
+ */
+ void* mapPtr() const { return fMapPtr; }
+
+ /**
+ Queries whether the buffer has been mapped.
+
+ @return true if the buffer is mapped, false otherwise.
+ */
+ bool isMapped() const { return SkToBool(fMapPtr); }
+
+ /**
+ * Updates the buffer data.
+ *
+ * The size of the buffer will be preserved. The src data will be
+ * placed at the beginning of the buffer and any remaining contents will
+ * be undefined. srcSizeInBytes must be <= to the buffer size.
+ *
+ * The buffer must not be mapped.
+ *
+ * Note that buffer updates do not go through GrContext and therefore are
+ * not serialized with other operations.
+ *
+ * @return returns true if the update succeeds, false otherwise.
+ */
+ bool updateData(const void* src, size_t srcSizeInBytes) {
+ SkASSERT(!this->isMapped());
+ SkASSERT(srcSizeInBytes <= fSizeInBytes);
+ return this->onUpdateData(src, srcSizeInBytes);
+ }
+
+ ~GrBuffer() override {
+ sk_free(fCPUData);
+ }
+
+protected:
+ GrBuffer(GrGpu*, size_t sizeInBytes, GrBufferType, GrAccessPattern);
+
+ void* fMapPtr;
+
+private:
+ /**
+ * Internal constructor to make a CPU-backed buffer.
+ */
+ GrBuffer(GrGpu*, size_t sizeInBytes, GrBufferType, void* cpuData);
+
+ virtual void onMap() { SkASSERT(this->isCPUBacked()); fMapPtr = fCPUData; }
+ virtual void onUnmap() { SkASSERT(this->isCPUBacked()); }
+ virtual bool onUpdateData(const void* src, size_t srcSizeInBytes);
+
+ size_t onGpuMemorySize() const override { return fSizeInBytes; } // TODO: zero for cpu backed?
+ void computeScratchKey(GrScratchKey* key) const override;
+
+ size_t fSizeInBytes;
+ GrAccessPattern fAccessPattern;
+ void* fCPUData;
+ GrBufferType fIntendedType;
+
+ typedef GrGpuResource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrBufferAccess.h b/gfx/skia/skia/include/gpu/GrBufferAccess.h
new file mode 100644
index 000000000..a5d8f0a68
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrBufferAccess.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBufferAccess_DEFINED
+#define GrBufferAccess_DEFINED
+
+#include "GrBuffer.h"
+#include "GrGpuResourceRef.h"
+
+/**
+ * Used to represent a texel buffer that will be read in a GrProcessor. It holds a GrBuffer along
+ * with an associated offset and texel config.
+ */
+class GrBufferAccess : public SkNoncopyable {
+public:
+ /**
+ * Must be initialized before adding to a GrProcessor's buffer access list.
+ */
+ void reset(GrPixelConfig texelConfig, GrBuffer* buffer,
+ GrShaderFlags visibility = kFragment_GrShaderFlag) {
+ fTexelConfig = texelConfig;
+ fBuffer.set(SkRef(buffer), kRead_GrIOType);
+ fVisibility = visibility;
+ }
+
+ bool operator==(const GrBufferAccess& that) const {
+ return fTexelConfig == that.fTexelConfig &&
+ this->buffer() == that.buffer() &&
+ fVisibility == that.fVisibility;
+ }
+
+ bool operator!=(const GrBufferAccess& that) const { return !(*this == that); }
+
+ GrPixelConfig texelConfig() const { return fTexelConfig; }
+ GrBuffer* buffer() const { return fBuffer.get(); }
+ GrShaderFlags visibility() const { return fVisibility; }
+
+ /**
+ * For internal use by GrProcessor.
+ */
+ const GrGpuResourceRef* getProgramBuffer() const { return &fBuffer;}
+
+private:
+ GrPixelConfig fTexelConfig;
+ GrTGpuResourceRef<GrBuffer> fBuffer;
+ GrShaderFlags fVisibility;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrCaps.h b/gfx/skia/skia/include/gpu/GrCaps.h
new file mode 100644
index 000000000..a97be72c3
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrCaps.h
@@ -0,0 +1,359 @@
+
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef GrCaps_DEFINED
+#define GrCaps_DEFINED
+
+#include "GrTypes.h"
+#include "GrTypesPriv.h"
+#include "GrBlend.h"
+#include "GrShaderVar.h"
+#include "SkRefCnt.h"
+#include "SkString.h"
+
+struct GrContextOptions;
+
+class GrShaderCaps : public SkRefCnt {
+public:
+ /** Info about shader variable precision within a given shader stage. That is, this info
+ is relevant to a float (or vecNf) variable declared with a GrSLPrecision
+ in a given GrShaderType. The info here is hoisted from the OpenGL spec. */
+ struct PrecisionInfo {
+ PrecisionInfo() {
+ fLogRangeLow = 0;
+ fLogRangeHigh = 0;
+ fBits = 0;
+ }
+
+ /** Is this precision level allowed in the shader stage? */
+ bool supported() const { return 0 != fBits; }
+
+ bool operator==(const PrecisionInfo& that) const {
+ return fLogRangeLow == that.fLogRangeLow && fLogRangeHigh == that.fLogRangeHigh &&
+ fBits == that.fBits;
+ }
+ bool operator!=(const PrecisionInfo& that) const { return !(*this == that); }
+
+ /** floor(log2(|min_value|)) */
+ int fLogRangeLow;
+ /** floor(log2(|max_value|)) */
+ int fLogRangeHigh;
+ /** Number of bits of precision. As defined in OpenGL (with names modified to reflect this
+ struct) :
+ """
+ If the smallest representable value greater than 1 is 1 + e, then fBits will
+ contain floor(log2(e)), and every value in the range [2^fLogRangeLow,
+ 2^fLogRangeHigh] can be represented to at least one part in 2^fBits.
+ """
+ */
+ int fBits;
+ };
+
+ GrShaderCaps();
+
+ virtual SkString dump() const;
+
+ bool shaderDerivativeSupport() const { return fShaderDerivativeSupport; }
+ bool geometryShaderSupport() const { return fGeometryShaderSupport; }
+ bool pathRenderingSupport() const { return fPathRenderingSupport; }
+ bool dstReadInShaderSupport() const { return fDstReadInShaderSupport; }
+ bool dualSourceBlendingSupport() const { return fDualSourceBlendingSupport; }
+ bool integerSupport() const { return fIntegerSupport; }
+ bool texelBufferSupport() const { return fTexelBufferSupport; }
+
+ /**
+ * Get the precision info for a variable of type kFloat_GrSLType, kVec2f_GrSLType, etc in a
+ * given shader type. If the shader type is not supported or the precision level is not
+ * supported in that shader type then the returned struct will report false when supported() is
+ * called.
+ */
+ const PrecisionInfo& getFloatShaderPrecisionInfo(GrShaderType shaderType,
+ GrSLPrecision precision) const {
+ return fFloatPrecisions[shaderType][precision];
+ }
+
+ /**
+ * Is there any difference between the float shader variable precision types? If this is true
+ * then unless the shader type is not supported, any call to getFloatShaderPrecisionInfo() would
+ * report the same info for all precisions in all shader types.
+ */
+ bool floatPrecisionVaries() const { return fShaderPrecisionVaries; }
+
+ /**
+ * PLS storage size in bytes (0 when not supported). The PLS spec defines a minimum size of 16
+ * bytes whenever PLS is supported.
+ */
+ int pixelLocalStorageSize() const { return fPixelLocalStorageSize; }
+
+ /**
+ * True if this context supports the necessary extensions and features to enable the PLS path
+ * renderer.
+ */
+ bool plsPathRenderingSupport() const {
+#if GR_ENABLE_PLS_PATH_RENDERING
+ return fPLSPathRenderingSupport;
+#else
+ return false;
+#endif
+ }
+
+protected:
+ /** Subclasses must call this after initialization in order to apply caps overrides requested by
+ the client. Note that overrides will only reduce the caps never expand them. */
+ void applyOptionsOverrides(const GrContextOptions& options);
+
+ bool fShaderDerivativeSupport : 1;
+ bool fGeometryShaderSupport : 1;
+ bool fPathRenderingSupport : 1;
+ bool fDstReadInShaderSupport : 1;
+ bool fDualSourceBlendingSupport : 1;
+ bool fIntegerSupport : 1;
+ bool fTexelBufferSupport : 1;
+
+ bool fShaderPrecisionVaries;
+ PrecisionInfo fFloatPrecisions[kGrShaderTypeCount][kGrSLPrecisionCount];
+ int fPixelLocalStorageSize;
+ bool fPLSPathRenderingSupport;
+
+private:
+ virtual void onApplyOptionsOverrides(const GrContextOptions&) {}
+ typedef SkRefCnt INHERITED;
+};
+
+/**
+ * Represents the capabilities of a GrContext.
+ */
+class GrCaps : public SkRefCnt {
+public:
+ GrCaps(const GrContextOptions&);
+
+ virtual SkString dump() const;
+
+ GrShaderCaps* shaderCaps() const { return fShaderCaps; }
+
+ bool npotTextureTileSupport() const { return fNPOTTextureTileSupport; }
+ /** To avoid as-yet-unnecessary complexity we don't allow any partial support of MIP Maps (e.g.
+ only for POT textures) */
+ bool mipMapSupport() const { return fMipMapSupport; }
+
+ /**
+ * Skia convention is that a device only has sRGB support if it supports sRGB formats for both
+ * textures and framebuffers. In addition:
+ * Decoding to linear of an sRGB texture can be disabled.
+ */
+ bool srgbSupport() const { return fSRGBSupport; }
+ /**
+ * Is there support for enabling/disabling sRGB writes for sRGB-capable color buffers?
+ */
+ bool srgbWriteControl() const { return fSRGBWriteControl; }
+ bool twoSidedStencilSupport() const { return fTwoSidedStencilSupport; }
+ bool stencilWrapOpsSupport() const { return fStencilWrapOpsSupport; }
+ bool discardRenderTargetSupport() const { return fDiscardRenderTargetSupport; }
+ bool gpuTracingSupport() const { return fGpuTracingSupport; }
+ bool compressedTexSubImageSupport() const { return fCompressedTexSubImageSupport; }
+ bool oversizedStencilSupport() const { return fOversizedStencilSupport; }
+ bool textureBarrierSupport() const { return fTextureBarrierSupport; }
+ bool sampleLocationsSupport() const { return fSampleLocationsSupport; }
+ bool multisampleDisableSupport() const { return fMultisampleDisableSupport; }
+ bool usesMixedSamples() const { return fUsesMixedSamples; }
+ bool preferClientSideDynamicBuffers() const { return fPreferClientSideDynamicBuffers; }
+
+ bool useDrawInsteadOfClear() const { return fUseDrawInsteadOfClear; }
+ bool useDrawInsteadOfPartialRenderTargetWrite() const {
+ return fUseDrawInsteadOfPartialRenderTargetWrite;
+ }
+
+ bool useDrawInsteadOfAllRenderTargetWrites() const {
+ return fUseDrawInsteadOfAllRenderTargetWrites;
+ }
+
+ bool preferVRAMUseOverFlushes() const { return fPreferVRAMUseOverFlushes; }
+
+ /**
+ * Indicates the level of support for gr_instanced::* functionality. A higher level includes
+ * all functionality from the levels below it.
+ */
+ enum class InstancedSupport {
+ kNone,
+ kBasic,
+ kMultisampled,
+ kMixedSampled
+ };
+
+ InstancedSupport instancedSupport() const { return fInstancedSupport; }
+
+ bool avoidInstancedDrawsToFPTargets() const { return fAvoidInstancedDrawsToFPTargets; }
+
+ /**
+ * Indicates the capabilities of the fixed function blend unit.
+ */
+ enum BlendEquationSupport {
+ kBasic_BlendEquationSupport, //<! Support to select the operator that
+ // combines src and dst terms.
+ kAdvanced_BlendEquationSupport, //<! Additional fixed function support for specific
+ // SVG/PDF blend modes. Requires blend barriers.
+ kAdvancedCoherent_BlendEquationSupport, //<! Advanced blend equation support that does not
+ // require blend barriers, and permits overlap.
+
+ kLast_BlendEquationSupport = kAdvancedCoherent_BlendEquationSupport
+ };
+
+ BlendEquationSupport blendEquationSupport() const { return fBlendEquationSupport; }
+
+ bool advancedBlendEquationSupport() const {
+ return fBlendEquationSupport >= kAdvanced_BlendEquationSupport;
+ }
+
+ bool advancedCoherentBlendEquationSupport() const {
+ return kAdvancedCoherent_BlendEquationSupport == fBlendEquationSupport;
+ }
+
+ bool canUseAdvancedBlendEquation(GrBlendEquation equation) const {
+ SkASSERT(GrBlendEquationIsAdvanced(equation));
+ return SkToBool(fAdvBlendEqBlacklist & (1 << equation));
+ }
+
+ /**
+ * Indicates whether GPU->CPU memory mapping for GPU resources such as vertex buffers and
+ * textures allows partial mappings or full mappings.
+ */
+ enum MapFlags {
+ kNone_MapFlags = 0x0, //<! Cannot map the resource.
+
+ kCanMap_MapFlag = 0x1, //<! The resource can be mapped. Must be set for any of
+ // the other flags to have meaning.k
+ kSubset_MapFlag = 0x2, //<! The resource can be partially mapped.
+ };
+
+ uint32_t mapBufferFlags() const { return fMapBufferFlags; }
+
+ // Scratch textures not being reused means that those scratch textures
+ // that we upload to (i.e., don't have a render target) will not be
+ // recycled in the texture cache. This is to prevent ghosting by drivers
+ // (in particular for deferred architectures).
+ bool reuseScratchTextures() const { return fReuseScratchTextures; }
+ bool reuseScratchBuffers() const { return fReuseScratchBuffers; }
+
+ /// maximum number of attribute values per vertex
+ int maxVertexAttributes() const { return fMaxVertexAttributes; }
+
+ int maxRenderTargetSize() const { return fMaxRenderTargetSize; }
+ int maxTextureSize() const { return fMaxTextureSize; }
+ /** This is the maximum tile size to use by GPU devices for rendering sw-backed images/bitmaps.
+ It is usually the max texture size, unless we're overriding it for testing. */
+ int maxTileSize() const { SkASSERT(fMaxTileSize <= fMaxTextureSize); return fMaxTileSize; }
+
+ // Will be 0 if MSAA is not supported
+ int maxColorSampleCount() const { return fMaxColorSampleCount; }
+ // Will be 0 if MSAA is not supported
+ int maxStencilSampleCount() const { return fMaxStencilSampleCount; }
+ // Will be 0 if raster multisample is not supported. Raster multisample is a special HW mode
+ // where the rasterizer runs with more samples than are in the target framebuffer.
+ int maxRasterSamples() const { return fMaxRasterSamples; }
+ // We require the sample count to be less than maxColorSampleCount and maxStencilSampleCount.
+ // If we are using mixed samples, we only care about stencil.
+ int maxSampleCount() const {
+ if (this->usesMixedSamples()) {
+ return this->maxStencilSampleCount();
+ } else {
+ return SkTMin(this->maxColorSampleCount(), this->maxStencilSampleCount());
+ }
+ }
+
+ int maxWindowRectangles() const { return fMaxWindowRectangles; }
+
+ virtual bool isConfigTexturable(GrPixelConfig config) const = 0;
+ virtual bool isConfigRenderable(GrPixelConfig config, bool withMSAA) const = 0;
+
+ bool suppressPrints() const { return fSuppressPrints; }
+
+ bool immediateFlush() const { return fImmediateFlush; }
+
+ size_t bufferMapThreshold() const {
+ SkASSERT(fBufferMapThreshold >= 0);
+ return fBufferMapThreshold;
+ }
+
+ bool fullClearIsFree() const { return fFullClearIsFree; }
+
+ /** True in environments that will issue errors if memory uploaded to buffers
+ is not initialized (even if not read by draw calls). */
+ bool mustClearUploadedBufferData() const { return fMustClearUploadedBufferData; }
+
+ bool sampleShadingSupport() const { return fSampleShadingSupport; }
+
+ bool fenceSyncSupport() const { return fFenceSyncSupport; }
+
+protected:
+ /** Subclasses must call this at the end of their constructors in order to apply caps
+ overrides requested by the client. Note that overrides will only reduce the caps never
+ expand them. */
+ void applyOptionsOverrides(const GrContextOptions& options);
+
+ SkAutoTUnref<GrShaderCaps> fShaderCaps;
+
+ bool fNPOTTextureTileSupport : 1;
+ bool fMipMapSupport : 1;
+ bool fSRGBSupport : 1;
+ bool fSRGBWriteControl : 1;
+ bool fTwoSidedStencilSupport : 1;
+ bool fStencilWrapOpsSupport : 1;
+ bool fDiscardRenderTargetSupport : 1;
+ bool fReuseScratchTextures : 1;
+ bool fReuseScratchBuffers : 1;
+ bool fGpuTracingSupport : 1;
+ bool fCompressedTexSubImageSupport : 1;
+ bool fOversizedStencilSupport : 1;
+ bool fTextureBarrierSupport : 1;
+ bool fSampleLocationsSupport : 1;
+ bool fMultisampleDisableSupport : 1;
+ bool fUsesMixedSamples : 1;
+ bool fPreferClientSideDynamicBuffers : 1;
+ bool fFullClearIsFree : 1;
+ bool fMustClearUploadedBufferData : 1;
+
+ // Driver workaround
+ bool fUseDrawInsteadOfClear : 1;
+ bool fUseDrawInsteadOfPartialRenderTargetWrite : 1;
+ bool fUseDrawInsteadOfAllRenderTargetWrites : 1;
+ bool fAvoidInstancedDrawsToFPTargets : 1;
+
+ // ANGLE workaround
+ bool fPreferVRAMUseOverFlushes : 1;
+
+ bool fSampleShadingSupport : 1;
+ // TODO: this may need to be an enum to support different fence types
+ bool fFenceSyncSupport : 1;
+
+ InstancedSupport fInstancedSupport;
+
+ BlendEquationSupport fBlendEquationSupport;
+ uint32_t fAdvBlendEqBlacklist;
+ GR_STATIC_ASSERT(kLast_GrBlendEquation < 32);
+
+ uint32_t fMapBufferFlags;
+ int fBufferMapThreshold;
+
+ int fMaxRenderTargetSize;
+ int fMaxVertexAttributes;
+ int fMaxTextureSize;
+ int fMaxTileSize;
+ int fMaxColorSampleCount;
+ int fMaxStencilSampleCount;
+ int fMaxRasterSamples;
+ int fMaxWindowRectangles;
+
+private:
+ virtual void onApplyOptionsOverrides(const GrContextOptions&) {}
+
+ bool fSuppressPrints : 1;
+ bool fImmediateFlush: 1;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrClip.h b/gfx/skia/skia/include/gpu/GrClip.h
new file mode 100644
index 000000000..96c6291ef
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrClip.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrClip_DEFINED
+#define GrClip_DEFINED
+
+#include "SkRect.h"
+#include "SkRRect.h"
+
+class GrAppliedClip;
+class GrContext;
+class GrDrawContext;
+
+/**
+ * GrClip is an abstract base class for applying a clip. It constructs a clip mask if necessary, and
+ * fills out a GrAppliedClip instructing the caller on how to set up the draw state.
+ */
+class GrClip {
+public:
+ virtual bool quickContains(const SkRect&) const = 0;
+ virtual bool quickContains(const SkRRect& rrect) const {
+ return this->quickContains(rrect.getBounds());
+ }
+ virtual void getConservativeBounds(int width, int height, SkIRect* devResult,
+ bool* isIntersectionOfRects = nullptr) const = 0;
+ virtual bool apply(GrContext*, GrDrawContext*, bool useHWAA, bool hasUserStencilSettings,
+ GrAppliedClip* out) const = 0;
+
+ virtual ~GrClip() {}
+
+ /**
+ * This method quickly and conservatively determines whether the entire clip is equivalent to
+ * intersection with a rrect. This will only return true if the rrect does not fully contain
+ * the render target bounds. Moreover, the returned rrect need not be contained by the render
+ * target bounds. We assume all draws will be implicitly clipped by the render target bounds.
+ *
+ * @param rtBounds The bounds of the render target that the clip will be applied to.
+ * @param rrect If return is true rrect will contain the rrect equivalent to the clip within
+ * rtBounds.
+ * @param aa If return is true aa will indicate whether the rrect clip is antialiased.
+ * @return true if the clip is equivalent to a single rrect, false otherwise.
+ *
+ */
+ virtual bool isRRect(const SkRect& rtBounds, SkRRect* rrect, bool* aa) const = 0;
+
+ /**
+ * This is the maximum distance that a draw may extend beyond a clip's boundary and still count
+ * count as "on the other side". We leave some slack because floating point rounding error is
+ * likely to blame. The rationale for 1e-3 is that in the coverage case (and barring unexpected
+ * rounding), as long as coverage stays within 0.5 * 1/256 of its intended value it shouldn't
+ * have any effect on the final pixel values.
+ */
+ constexpr static SkScalar kBoundsTolerance = 1e-3f;
+
+ /**
+ * Returns true if the given query bounds count as entirely inside the clip.
+ *
+ * @param innerClipBounds device-space rect contained by the clip (SkRect or SkIRect).
+ * @param queryBounds device-space bounds of the query region.
+ */
+ template<typename TRect> constexpr static bool IsInsideClip(const TRect& innerClipBounds,
+ const SkRect& queryBounds) {
+ return innerClipBounds.fRight - innerClipBounds.fLeft > kBoundsTolerance &&
+ innerClipBounds.fBottom - innerClipBounds.fTop > kBoundsTolerance &&
+ innerClipBounds.fLeft < queryBounds.fLeft + kBoundsTolerance &&
+ innerClipBounds.fTop < queryBounds.fTop + kBoundsTolerance &&
+ innerClipBounds.fRight > queryBounds.fRight - kBoundsTolerance &&
+ innerClipBounds.fBottom > queryBounds.fBottom - kBoundsTolerance;
+ }
+
+ /**
+ * Returns true if the given query bounds count as entirely outside the clip.
+ *
+ * @param outerClipBounds device-space rect that contains the clip (SkRect or SkIRect).
+ * @param queryBounds device-space bounds of the query region.
+ */
+ template<typename TRect> constexpr static bool IsOutsideClip(const TRect& outerClipBounds,
+ const SkRect& queryBounds) {
+ return outerClipBounds.fRight - outerClipBounds.fLeft <= kBoundsTolerance ||
+ outerClipBounds.fBottom - outerClipBounds.fTop <= kBoundsTolerance ||
+ outerClipBounds.fLeft >= queryBounds.fRight - kBoundsTolerance ||
+ outerClipBounds.fTop >= queryBounds.fBottom - kBoundsTolerance ||
+ outerClipBounds.fRight <= queryBounds.fLeft + kBoundsTolerance ||
+ outerClipBounds.fBottom <= queryBounds.fTop + kBoundsTolerance;
+ }
+
+ /**
+ * Returns the minimal integer rect that counts as containing a given set of bounds.
+ */
+ static SkIRect GetPixelIBounds(const SkRect& bounds) {
+ return SkIRect::MakeLTRB(SkScalarFloorToInt(bounds.fLeft + kBoundsTolerance),
+ SkScalarFloorToInt(bounds.fTop + kBoundsTolerance),
+ SkScalarCeilToInt(bounds.fRight - kBoundsTolerance),
+ SkScalarCeilToInt(bounds.fBottom - kBoundsTolerance));
+ }
+
+ /**
+ * Returns the minimal pixel-aligned rect that counts as containing a given set of bounds.
+ */
+ static SkRect GetPixelBounds(const SkRect& bounds) {
+ return SkRect::MakeLTRB(SkScalarFloorToScalar(bounds.fLeft + kBoundsTolerance),
+ SkScalarFloorToScalar(bounds.fTop + kBoundsTolerance),
+ SkScalarCeilToScalar(bounds.fRight - kBoundsTolerance),
+ SkScalarCeilToScalar(bounds.fBottom - kBoundsTolerance));
+ }
+
+ /**
+ * Returns true if the given rect counts as aligned with pixel boundaries.
+ */
+ static bool IsPixelAligned(const SkRect& rect) {
+ return SkScalarAbs(SkScalarRoundToScalar(rect.fLeft) - rect.fLeft) <= kBoundsTolerance &&
+ SkScalarAbs(SkScalarRoundToScalar(rect.fTop) - rect.fTop) <= kBoundsTolerance &&
+ SkScalarAbs(SkScalarRoundToScalar(rect.fRight) - rect.fRight) <= kBoundsTolerance &&
+ SkScalarAbs(SkScalarRoundToScalar(rect.fBottom) - rect.fBottom) <= kBoundsTolerance;
+ }
+};
+
+/**
+ * Specialized implementation for no clip.
+ */
+class GrNoClip final : public GrClip {
+private:
+ bool quickContains(const SkRect&) const final {
+ return true;
+ }
+ bool quickContains(const SkRRect&) const final {
+ return true;
+ }
+ void getConservativeBounds(int width, int height, SkIRect* devResult,
+ bool* isIntersectionOfRects) const final {
+ devResult->setXYWH(0, 0, width, height);
+ if (isIntersectionOfRects) {
+ *isIntersectionOfRects = true;
+ }
+ }
+ bool apply(GrContext*, GrDrawContext*, bool, bool, GrAppliedClip*) const final {
+ return true;
+ }
+ bool isRRect(const SkRect&, SkRRect*, bool*) const override { return false; }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrColor.h b/gfx/skia/skia/include/gpu/GrColor.h
new file mode 100644
index 000000000..f52671732
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrColor.h
@@ -0,0 +1,309 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrColor_DEFINED
+#define GrColor_DEFINED
+
+#include "GrTypes.h"
+#include "SkColor.h"
+#include "SkColorPriv.h"
+#include "SkUnPreMultiply.h"
+
+/**
+ * GrColor is 4 bytes for R, G, B, A, in a specific order defined below. Whether the color is
+ * premultiplied or not depends on the context in which it is being used.
+ */
+typedef uint32_t GrColor;
+
+// shift amount to assign a component to a GrColor int
+// These shift values are chosen for compatibility with GL attrib arrays
+// ES doesn't allow BGRA vertex attrib order so if they were not in this order
+// we'd have to swizzle in shaders.
+#ifdef SK_CPU_BENDIAN
+ #define GrColor_SHIFT_R 24
+ #define GrColor_SHIFT_G 16
+ #define GrColor_SHIFT_B 8
+ #define GrColor_SHIFT_A 0
+#else
+ #define GrColor_SHIFT_R 0
+ #define GrColor_SHIFT_G 8
+ #define GrColor_SHIFT_B 16
+ #define GrColor_SHIFT_A 24
+#endif
+
+/**
+ * Pack 4 components (RGBA) into a GrColor int
+ */
+static inline GrColor GrColorPackRGBA(unsigned r, unsigned g, unsigned b, unsigned a) {
+ SkASSERT((uint8_t)r == r);
+ SkASSERT((uint8_t)g == g);
+ SkASSERT((uint8_t)b == b);
+ SkASSERT((uint8_t)a == a);
+ return (r << GrColor_SHIFT_R) |
+ (g << GrColor_SHIFT_G) |
+ (b << GrColor_SHIFT_B) |
+ (a << GrColor_SHIFT_A);
+}
+
+/**
+ * Packs a color with an alpha channel replicated across all four channels.
+ */
+static inline GrColor GrColorPackA4(unsigned a) {
+ SkASSERT((uint8_t)a == a);
+ return (a << GrColor_SHIFT_R) |
+ (a << GrColor_SHIFT_G) |
+ (a << GrColor_SHIFT_B) |
+ (a << GrColor_SHIFT_A);
+}
+
+// extract a component (byte) from a GrColor int
+
+#define GrColorUnpackR(color) (((color) >> GrColor_SHIFT_R) & 0xFF)
+#define GrColorUnpackG(color) (((color) >> GrColor_SHIFT_G) & 0xFF)
+#define GrColorUnpackB(color) (((color) >> GrColor_SHIFT_B) & 0xFF)
+#define GrColorUnpackA(color) (((color) >> GrColor_SHIFT_A) & 0xFF)
+
+/**
+ * Since premultiplied means that alpha >= color, we construct a color with
+ * each component==255 and alpha == 0 to be "illegal"
+ */
+#define GrColor_ILLEGAL (~(0xFF << GrColor_SHIFT_A))
+
+#define GrColor_WHITE 0xFFFFFFFF
+#define GrColor_TRANSPARENT_BLACK 0x0
+
+/**
+ * Assert in debug builds that a GrColor is premultiplied.
+ */
+static inline void GrColorIsPMAssert(GrColor SkDEBUGCODE(c)) {
+#ifdef SK_DEBUG
+ unsigned a = GrColorUnpackA(c);
+ unsigned r = GrColorUnpackR(c);
+ unsigned g = GrColorUnpackG(c);
+ unsigned b = GrColorUnpackB(c);
+
+ SkASSERT(r <= a);
+ SkASSERT(g <= a);
+ SkASSERT(b <= a);
+#endif
+}
+
+/** Inverts each color channel. */
+static inline GrColor GrInvertColor(GrColor c) {
+ U8CPU a = GrColorUnpackA(c);
+ U8CPU r = GrColorUnpackR(c);
+ U8CPU g = GrColorUnpackG(c);
+ U8CPU b = GrColorUnpackB(c);
+ return GrColorPackRGBA(0xff - r, 0xff - g, 0xff - b, 0xff - a);
+}
+
+static inline GrColor GrColorMul(GrColor c0, GrColor c1) {
+ U8CPU r = SkMulDiv255Round(GrColorUnpackR(c0), GrColorUnpackR(c1));
+ U8CPU g = SkMulDiv255Round(GrColorUnpackG(c0), GrColorUnpackG(c1));
+ U8CPU b = SkMulDiv255Round(GrColorUnpackB(c0), GrColorUnpackB(c1));
+ U8CPU a = SkMulDiv255Round(GrColorUnpackA(c0), GrColorUnpackA(c1));
+ return GrColorPackRGBA(r, g, b, a);
+}
+
+static inline GrColor GrColorSatAdd(GrColor c0, GrColor c1) {
+ unsigned r = SkTMin<unsigned>(GrColorUnpackR(c0) + GrColorUnpackR(c1), 0xff);
+ unsigned g = SkTMin<unsigned>(GrColorUnpackG(c0) + GrColorUnpackG(c1), 0xff);
+ unsigned b = SkTMin<unsigned>(GrColorUnpackB(c0) + GrColorUnpackB(c1), 0xff);
+ unsigned a = SkTMin<unsigned>(GrColorUnpackA(c0) + GrColorUnpackA(c1), 0xff);
+ return GrColorPackRGBA(r, g, b, a);
+}
+
+/** Converts a GrColor to an rgba array of GrGLfloat */
+static inline void GrColorToRGBAFloat(GrColor color, float rgba[4]) {
+ static const float ONE_OVER_255 = 1.f / 255.f;
+ rgba[0] = GrColorUnpackR(color) * ONE_OVER_255;
+ rgba[1] = GrColorUnpackG(color) * ONE_OVER_255;
+ rgba[2] = GrColorUnpackB(color) * ONE_OVER_255;
+ rgba[3] = GrColorUnpackA(color) * ONE_OVER_255;
+}
+
+/** Normalizes and coverts an uint8_t to a float. [0, 255] -> [0.0, 1.0] */
+static inline float GrNormalizeByteToFloat(uint8_t value) {
+ static const float ONE_OVER_255 = 1.f / 255.f;
+ return value * ONE_OVER_255;
+}
+
+/** Determines whether the color is opaque or not. */
+static inline bool GrColorIsOpaque(GrColor color) {
+ return (color & (0xFFU << GrColor_SHIFT_A)) == (0xFFU << GrColor_SHIFT_A);
+}
+
+static inline GrColor GrPremulColor(GrColor color) {
+ unsigned r = GrColorUnpackR(color);
+ unsigned g = GrColorUnpackG(color);
+ unsigned b = GrColorUnpackB(color);
+ unsigned a = GrColorUnpackA(color);
+ return GrColorPackRGBA(SkMulDiv255Round(r, a),
+ SkMulDiv255Round(g, a),
+ SkMulDiv255Round(b, a),
+ a);
+}
+
+/** Returns an unpremuled version of the GrColor. */
+static inline GrColor GrUnpremulColor(GrColor color) {
+ GrColorIsPMAssert(color);
+ unsigned r = GrColorUnpackR(color);
+ unsigned g = GrColorUnpackG(color);
+ unsigned b = GrColorUnpackB(color);
+ unsigned a = GrColorUnpackA(color);
+ SkPMColor colorPM = SkPackARGB32(a, r, g, b);
+ SkColor colorUPM = SkUnPreMultiply::PMColorToColor(colorPM);
+
+ r = SkColorGetR(colorUPM);
+ g = SkColorGetG(colorUPM);
+ b = SkColorGetB(colorUPM);
+ a = SkColorGetA(colorUPM);
+
+ return GrColorPackRGBA(r, g, b, a);
+}
+
+
+/**
+* Similarly, GrColor4f is 4 floats for R, G, B, A, in that order. And like GrColor, whether
+* the color is premultiplied or not depends on the context.
+*/
+struct GrColor4f {
+ float fRGBA[4];
+
+ GrColor4f() {}
+ GrColor4f(float r, float g, float b, float a) {
+ fRGBA[0] = r;
+ fRGBA[1] = g;
+ fRGBA[2] = b;
+ fRGBA[3] = a;
+ }
+
+ static GrColor4f FromGrColor(GrColor color) {
+ GrColor4f result;
+ GrColorToRGBAFloat(color, result.fRGBA);
+ return result;
+ }
+
+ static GrColor4f FromSkColor4f(const SkColor4f& color) {
+ return GrColor4f(color.fR, color.fG, color.fB, color.fA);
+ }
+
+ bool operator==(const GrColor4f& other) const {
+ return
+ fRGBA[0] == other.fRGBA[0] &&
+ fRGBA[1] == other.fRGBA[1] &&
+ fRGBA[2] == other.fRGBA[2] &&
+ fRGBA[3] == other.fRGBA[3];
+ }
+ bool operator!=(const GrColor4f& other) const {
+ return !(*this == other);
+ }
+
+ GrColor toGrColor() const {
+ return GrColorPackRGBA(
+ SkTPin<unsigned>(static_cast<unsigned>(fRGBA[0] * 255.0f + 0.5f), 0, 255),
+ SkTPin<unsigned>(static_cast<unsigned>(fRGBA[1] * 255.0f + 0.5f), 0, 255),
+ SkTPin<unsigned>(static_cast<unsigned>(fRGBA[2] * 255.0f + 0.5f), 0, 255),
+ SkTPin<unsigned>(static_cast<unsigned>(fRGBA[3] * 255.0f + 0.5f), 0, 255));
+ }
+
+ SkColor4f toSkColor4f() const {
+ return SkColor4f { fRGBA[0], fRGBA[1], fRGBA[2], fRGBA[3] };
+ }
+
+ GrColor4f opaque() const {
+ return GrColor4f(fRGBA[0], fRGBA[1], fRGBA[2], 1.0f);
+ }
+
+ GrColor4f premul() const {
+ float a = fRGBA[3];
+ return GrColor4f(fRGBA[0] * a, fRGBA[1] * a, fRGBA[2] * a, a);
+ }
+};
+
+/**
+ * Flags used for bitfields of color components. They are defined so that the bit order reflects the
+ * GrColor shift order.
+ */
+enum GrColorComponentFlags {
+ kR_GrColorComponentFlag = 1 << (GrColor_SHIFT_R / 8),
+ kG_GrColorComponentFlag = 1 << (GrColor_SHIFT_G / 8),
+ kB_GrColorComponentFlag = 1 << (GrColor_SHIFT_B / 8),
+ kA_GrColorComponentFlag = 1 << (GrColor_SHIFT_A / 8),
+
+ kNone_GrColorComponentFlags = 0,
+
+ kRGB_GrColorComponentFlags = (kR_GrColorComponentFlag | kG_GrColorComponentFlag |
+ kB_GrColorComponentFlag),
+
+ kRGBA_GrColorComponentFlags = (kR_GrColorComponentFlag | kG_GrColorComponentFlag |
+ kB_GrColorComponentFlag | kA_GrColorComponentFlag)
+};
+
+GR_MAKE_BITFIELD_OPS(GrColorComponentFlags)
+
+static inline char GrColorComponentFlagToChar(GrColorComponentFlags component) {
+ SkASSERT(SkIsPow2(component));
+ switch (component) {
+ case kR_GrColorComponentFlag:
+ return 'r';
+ case kG_GrColorComponentFlag:
+ return 'g';
+ case kB_GrColorComponentFlag:
+ return 'b';
+ case kA_GrColorComponentFlag:
+ return 'a';
+ default:
+ SkFAIL("Invalid color component flag.");
+ return '\0';
+ }
+}
+
+static inline uint32_t GrPixelConfigComponentMask(GrPixelConfig config) {
+ static const uint32_t kFlags[] = {
+ 0, // kUnknown_GrPixelConfig
+ kA_GrColorComponentFlag, // kAlpha_8_GrPixelConfig
+ kRGBA_GrColorComponentFlags, // kIndex_8_GrPixelConfig
+ kRGB_GrColorComponentFlags, // kRGB_565_GrPixelConfig
+ kRGBA_GrColorComponentFlags, // kRGBA_4444_GrPixelConfig
+ kRGBA_GrColorComponentFlags, // kRGBA_8888_GrPixelConfig
+ kRGBA_GrColorComponentFlags, // kBGRA_8888_GrPixelConfig
+ kRGBA_GrColorComponentFlags, // kSRGBA_8888_GrPixelConfig
+ kRGBA_GrColorComponentFlags, // kSBGRA_8888_GrPixelConfig
+ kRGB_GrColorComponentFlags, // kETC1_GrPixelConfig
+ kA_GrColorComponentFlag, // kLATC_GrPixelConfig
+ kA_GrColorComponentFlag, // kR11_EAC_GrPixelConfig
+ kRGBA_GrColorComponentFlags, // kASTC_12x12_GrPixelConfig
+ kRGBA_GrColorComponentFlags, // kRGBA_float_GrPixelConfig
+ kA_GrColorComponentFlag, // kAlpha_16_GrPixelConfig
+ kRGBA_GrColorComponentFlags, // kRGBA_half_GrPixelConfig
+ };
+ return kFlags[config];
+
+ GR_STATIC_ASSERT(0 == kUnknown_GrPixelConfig);
+ GR_STATIC_ASSERT(1 == kAlpha_8_GrPixelConfig);
+ GR_STATIC_ASSERT(2 == kIndex_8_GrPixelConfig);
+ GR_STATIC_ASSERT(3 == kRGB_565_GrPixelConfig);
+ GR_STATIC_ASSERT(4 == kRGBA_4444_GrPixelConfig);
+ GR_STATIC_ASSERT(5 == kRGBA_8888_GrPixelConfig);
+ GR_STATIC_ASSERT(6 == kBGRA_8888_GrPixelConfig);
+ GR_STATIC_ASSERT(7 == kSRGBA_8888_GrPixelConfig);
+ GR_STATIC_ASSERT(8 == kSBGRA_8888_GrPixelConfig);
+ GR_STATIC_ASSERT(9 == kETC1_GrPixelConfig);
+ GR_STATIC_ASSERT(10 == kLATC_GrPixelConfig);
+ GR_STATIC_ASSERT(11 == kR11_EAC_GrPixelConfig);
+ GR_STATIC_ASSERT(12 == kASTC_12x12_GrPixelConfig);
+ GR_STATIC_ASSERT(13 == kRGBA_float_GrPixelConfig);
+ GR_STATIC_ASSERT(14 == kAlpha_half_GrPixelConfig);
+ GR_STATIC_ASSERT(15 == kRGBA_half_GrPixelConfig);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kFlags) == kGrPixelConfigCnt);
+}
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrColorSpaceXform.h b/gfx/skia/skia/include/gpu/GrColorSpaceXform.h
new file mode 100644
index 000000000..7c88c6285
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrColorSpaceXform.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrColorSpaceXform_DEFINED
+#define GrColorSpaceXform_DEFINED
+
+#include "GrColor.h"
+#include "SkMatrix44.h"
+#include "SkRefCnt.h"
+
+class SkColorSpace;
+
+ /**
+ * Represents a color gamut transformation (as a 4x4 color matrix)
+ */
+class GrColorSpaceXform : public SkRefCnt {
+public:
+ GrColorSpaceXform(const SkMatrix44& srcToDst);
+
+ static sk_sp<GrColorSpaceXform> Make(SkColorSpace* src, SkColorSpace* dst);
+
+ const SkMatrix44& srcToDst() const { return fSrcToDst; }
+
+ /**
+ * GrGLSLFragmentProcessor::GenKey() must call this and include the returned value in its
+ * computed key.
+ */
+ static uint32_t XformKey(GrColorSpaceXform* xform) {
+ // Code generation changes if there is an xform, but it otherwise constant
+ return SkToBool(xform) ? 1 : 0;
+ }
+
+ static bool Equals(const GrColorSpaceXform* a, const GrColorSpaceXform* b);
+
+ GrColor4f apply(const GrColor4f& srcColor);
+
+private:
+ SkMatrix44 fSrcToDst;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrConfig.h b/gfx/skia/skia/include/gpu/GrConfig.h
new file mode 100644
index 000000000..acdff3af2
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrConfig.h
@@ -0,0 +1,183 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrConfig_DEFINED
+#define GrConfig_DEFINED
+
+#include "SkTypes.h"
+
+///////////////////////////////////////////////////////////////////////////////
+// preconfig section:
+//
+// All the work before including GrUserConfig.h should center around guessing
+// what platform we're on, and defining low-level symbols based on that.
+//
+// A build environment may have already defined symbols, so we first check
+// for that
+//
+
+// hack to ensure we know what sort of Apple platform we're on
+#if defined(__APPLE_CPP__) || defined(__APPLE_CC__)
+ #include <TargetConditionals.h>
+#endif
+
+/**
+ * Gr defines are set to 0 or 1, rather than being undefined or defined
+ */
+
+#if !defined(GR_CACHE_STATS)
+ #if defined(SK_DEBUG) || defined(SK_DUMP_STATS)
+ #define GR_CACHE_STATS 1
+ #else
+ #define GR_CACHE_STATS 0
+ #endif
+#endif
+
+#if !defined(GR_GPU_STATS)
+ #if defined(SK_DEBUG) || defined(SK_DUMP_STATS)
+ #define GR_GPU_STATS 1
+ #else
+ #define GR_GPU_STATS 0
+ #endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+#if defined(SK_BUILD_FOR_WIN32)
+// VC8 doesn't support stdint.h, so we define those types here.
+typedef signed char int8_t;
+typedef unsigned char uint8_t;
+typedef short int16_t;
+typedef unsigned short uint16_t;
+typedef int int32_t;
+typedef unsigned uint32_t;
+typedef __int64 int64_t;
+typedef unsigned __int64 uint64_t;
+#else
+/*
+ * Include stdint.h with defines that trigger declaration of C99 limit/const
+ * macros here before anyone else has a chance to include stdint.h without
+ * these.
+ */
+#ifndef __STDC_LIMIT_MACROS
+#define __STDC_LIMIT_MACROS
+#endif
+#ifndef __STDC_CONSTANT_MACROS
+#define __STDC_CONSTANT_MACROS
+#endif
+#include <stdint.h>
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+// postconfig section:
+//
+
+/**
+ * GR_STRING makes a string of X where X is expanded before conversion to a string
+ * if X itself contains macros.
+ */
+#define GR_STRING(X) GR_STRING_IMPL(X)
+#define GR_STRING_IMPL(X) #X
+
+/**
+ * GR_CONCAT concatenates X and Y where each is expanded before
+ * contanenation if either contains macros.
+ */
+#define GR_CONCAT(X,Y) GR_CONCAT_IMPL(X,Y)
+#define GR_CONCAT_IMPL(X,Y) X##Y
+
+/**
+ * Creates a string of the form "<filename>(<linenumber>) : "
+ */
+#define GR_FILE_AND_LINE_STR __FILE__ "(" GR_STRING(__LINE__) ") : "
+
+/**
+ * Compilers have different ways of issuing warnings. This macro
+ * attempts to abstract them, but may need to be specialized for your
+ * particular compiler.
+ * To insert compiler warnings use "#pragma message GR_WARN(<string>)"
+ */
+#if defined(_MSC_VER)
+ #define GR_WARN(MSG) (GR_FILE_AND_LINE_STR "WARNING: " MSG)
+#else//__GNUC__ - may need other defines for different compilers
+ #define GR_WARN(MSG) ("WARNING: " MSG)
+#endif
+
+/**
+ * GR_ALWAYSBREAK is an unconditional break in all builds.
+ */
+#if !defined(GR_ALWAYSBREAK)
+ #if defined(SK_BUILD_FOR_WIN32)
+ #define GR_ALWAYSBREAK SkNO_RETURN_HINT(); __debugbreak()
+ #else
+ // TODO: do other platforms really not have continuable breakpoints?
+ // sign extend for 64bit architectures to be sure this is
+ // in the high address range
+ #define GR_ALWAYSBREAK SkNO_RETURN_HINT(); *((int*)(int64_t)(int32_t)0xbeefcafe) = 0;
+ #endif
+#endif
+
+/**
+ * GR_DEBUGBREAK is an unconditional break in debug builds.
+ */
+#if !defined(GR_DEBUGBREAK)
+ #ifdef SK_DEBUG
+ #define GR_DEBUGBREAK GR_ALWAYSBREAK
+ #else
+ #define GR_DEBUGBREAK
+ #endif
+#endif
+
+/**
+ * GR_ALWAYSASSERT is an assertion in all builds.
+ */
+#if !defined(GR_ALWAYSASSERT)
+ #define GR_ALWAYSASSERT(COND) \
+ do { \
+ if (!(COND)) { \
+ SkDebugf("%s %s failed\n", GR_FILE_AND_LINE_STR, #COND); \
+ GR_ALWAYSBREAK; \
+ } \
+ } while (false)
+#endif
+
+/**
+ * GR_DEBUGASSERT is an assertion in debug builds only.
+ */
+#if !defined(GR_DEBUGASSERT)
+ #ifdef SK_DEBUG
+ #define GR_DEBUGASSERT(COND) GR_ALWAYSASSERT(COND)
+ #else
+ #define GR_DEBUGASSERT(COND)
+ #endif
+#endif
+
+/**
+ * Prettier forms of the above macros.
+ */
+#define GrAlwaysAssert(COND) GR_ALWAYSASSERT(COND)
+
+/**
+ * GR_STATIC_ASSERT is a compile time assertion. Depending on the platform
+ * it may print the message in the compiler log. Obviously, the condition must
+ * be evaluatable at compile time.
+ */
+#define GR_STATIC_ASSERT(CONDITION) static_assert(CONDITION, "bug")
+
+/**
+ * Set to 1 to enable pixel local storage path rendering on supported devices.
+ */
+#if !defined(GR_ENABLE_PLS_PATH_RENDERING)
+ #define GR_ENABLE_PLS_PATH_RENDERING 0
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrContext.h b/gfx/skia/skia/include/gpu/GrContext.h
new file mode 100644
index 000000000..996b77f2d
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrContext.h
@@ -0,0 +1,479 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrContext_DEFINED
+#define GrContext_DEFINED
+
+#include "GrCaps.h"
+#include "GrClip.h"
+#include "GrColor.h"
+#include "GrPaint.h"
+#include "GrRenderTarget.h"
+#include "GrTextureProvider.h"
+#include "SkMatrix.h"
+#include "SkPathEffect.h"
+#include "SkTypes.h"
+#include "../private/GrAuditTrail.h"
+#include "../private/GrSingleOwner.h"
+#include "../private/SkMutex.h"
+
+struct GrBatchAtlasConfig;
+class GrBatchFontCache;
+struct GrContextOptions;
+class GrContextPriv;
+class GrContextThreadSafeProxy;
+class GrDrawingManager;
+class GrDrawContext;
+class GrFragmentProcessor;
+class GrGpu;
+class GrIndexBuffer;
+class GrOvalRenderer;
+class GrPath;
+class GrPipelineBuilder;
+class GrResourceEntry;
+class GrResourceCache;
+class GrResourceProvider;
+class GrTestTarget;
+class GrTextBlobCache;
+class GrTextContext;
+class GrTextureParams;
+class GrVertexBuffer;
+class GrSwizzle;
+class SkTraceMemoryDump;
+
+class SK_API GrContext : public SkRefCnt {
+public:
+ /**
+ * Creates a GrContext for a backend context.
+ */
+ static GrContext* Create(GrBackend, GrBackendContext, const GrContextOptions& options);
+ static GrContext* Create(GrBackend, GrBackendContext);
+
+ /**
+ * Only defined in test apps.
+ */
+ static GrContext* CreateMockContext();
+
+ virtual ~GrContext();
+
+ GrContextThreadSafeProxy* threadSafeProxy();
+
+ /**
+ * The GrContext normally assumes that no outsider is setting state
+ * within the underlying 3D API's context/device/whatever. This call informs
+ * the context that the state was modified and it should resend. Shouldn't
+ * be called frequently for good performance.
+ * The flag bits, state, is dpendent on which backend is used by the
+ * context, either GL or D3D (possible in future).
+ */
+ void resetContext(uint32_t state = kAll_GrBackendState);
+
+ /**
+ * Callback function to allow classes to cleanup on GrContext destruction.
+ * The 'info' field is filled in with the 'info' passed to addCleanUp.
+ */
+ typedef void (*PFCleanUpFunc)(const GrContext* context, void* info);
+
+ /**
+ * Add a function to be called from within GrContext's destructor.
+ * This gives classes a chance to free resources held on a per context basis.
+ * The 'info' parameter will be stored and passed to the callback function.
+ */
+ void addCleanUp(PFCleanUpFunc cleanUp, void* info) {
+ CleanUpData* entry = fCleanUpData.push();
+
+ entry->fFunc = cleanUp;
+ entry->fInfo = info;
+ }
+
+ /**
+ * Abandons all GPU resources and assumes the underlying backend 3D API context is not longer
+ * usable. Call this if you have lost the associated GPU context, and thus internal texture,
+ * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the
+ * GrContext and any of its created resource objects will not make backend 3D API calls. Content
+ * rendered but not previously flushed may be lost. After this function is called all subsequent
+ * calls on the GrContext will fail or be no-ops.
+ *
+ * The typical use case for this function is that the underlying 3D context was lost and further
+ * API calls may crash.
+ */
+ void abandonContext();
+
+ /**
+ * This is similar to abandonContext() however the underlying 3D context is not yet lost and
+ * the GrContext will cleanup all allocated resources before returning. After returning it will
+ * assume that the underlying context may no longer be valid.
+ *
+ * The typical use case for this function is that the client is going to destroy the 3D context
+ * but can't guarantee that GrContext will be destroyed first (perhaps because it may be ref'ed
+ * elsewhere by either the client or Skia objects).
+ */
+ void releaseResourcesAndAbandonContext();
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Resource Cache
+
+ /**
+ * Return the current GPU resource cache limits.
+ *
+ * @param maxResources If non-null, returns maximum number of resources that
+ * can be held in the cache.
+ * @param maxResourceBytes If non-null, returns maximum number of bytes of
+ * video memory that can be held in the cache.
+ */
+ void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const;
+
+ /**
+ * Gets the current GPU resource cache usage.
+ *
+ * @param resourceCount If non-null, returns the number of resources that are held in the
+ * cache.
+ * @param maxResourceBytes If non-null, returns the total number of bytes of video memory held
+ * in the cache.
+ */
+ void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const;
+
+ /**
+ * Specify the GPU resource cache limits. If the current cache exceeds either
+ * of these, it will be purged (LRU) to keep the cache within these limits.
+ *
+ * @param maxResources The maximum number of resources that can be held in
+ * the cache.
+ * @param maxResourceBytes The maximum number of bytes of video memory
+ * that can be held in the cache.
+ */
+ void setResourceCacheLimits(int maxResources, size_t maxResourceBytes);
+
+ GrTextureProvider* textureProvider() { return fTextureProvider; }
+ const GrTextureProvider* textureProvider() const { return fTextureProvider; }
+
+ /**
+ * Frees GPU created by the context. Can be called to reduce GPU memory
+ * pressure.
+ */
+ void freeGpuResources();
+
+ /**
+ * Purge all the unlocked resources from the cache.
+ * This entry point is mainly meant for timing texture uploads
+ * and is not defined in normal builds of Skia.
+ */
+ void purgeAllUnlockedResources();
+
+ /** Access the context capabilities */
+ const GrCaps* caps() const { return fCaps; }
+
+ /**
+ * Returns the recommended sample count for a render target when using this
+ * context.
+ *
+ * @param config the configuration of the render target.
+ * @param dpi the display density in dots per inch.
+ *
+ * @return sample count that should be perform well and have good enough
+ * rendering quality for the display. Alternatively returns 0 if
+ * MSAA is not supported or recommended to be used by default.
+ */
+ int getRecommendedSampleCount(GrPixelConfig config, SkScalar dpi) const;
+
+ /**
+ * Create both a GrRenderTarget and a matching GrDrawContext to wrap it.
+ * We guarantee that "asTexture" will succeed for drawContexts created
+ * via this entry point.
+ */
+ sk_sp<GrDrawContext> makeDrawContext(SkBackingFit fit,
+ int width, int height,
+ GrPixelConfig config,
+ sk_sp<SkColorSpace> colorSpace,
+ int sampleCnt = 0,
+ GrSurfaceOrigin origin = kDefault_GrSurfaceOrigin,
+ const SkSurfaceProps* surfaceProps = nullptr,
+ SkBudgeted = SkBudgeted::kYes);
+
+ /*
+ * This method will attempt to create a drawContext that has, at least, the number of
+ * channels and precision per channel as requested in 'config' (e.g., A8 and 888 can be
+ * converted to 8888). It may also swizzle the channels (e.g., BGRA -> RGBA).
+ * SRGB-ness will be preserved.
+ */
+ sk_sp<GrDrawContext> makeDrawContextWithFallback(
+ SkBackingFit fit,
+ int width, int height,
+ GrPixelConfig config,
+ sk_sp<SkColorSpace> colorSpace,
+ int sampleCnt = 0,
+ GrSurfaceOrigin origin = kDefault_GrSurfaceOrigin,
+ const SkSurfaceProps* surfaceProps = nullptr,
+ SkBudgeted budgeted = SkBudgeted::kYes);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Misc.
+
+ /**
+ * Call to ensure all drawing to the context has been issued to the
+ * underlying 3D API.
+ */
+ void flush();
+
+ /**
+ * These flags can be used with the read/write pixels functions below.
+ */
+ enum PixelOpsFlags {
+ /** The GrContext will not be flushed before the surface read or write. This means that
+ the read or write may occur before previous draws have executed. */
+ kDontFlush_PixelOpsFlag = 0x1,
+ /** Any surface writes should be flushed to the backend 3D API after the surface operation
+ is complete */
+ kFlushWrites_PixelOp = 0x2,
+ /** The src for write or dst read is unpremultiplied. This is only respected if both the
+ config src and dst configs are an RGBA/BGRA 8888 format. */
+ kUnpremul_PixelOpsFlag = 0x4,
+ };
+
+ /**
+ * Reads a rectangle of pixels from a surface.
+ * @param surface the surface to read from.
+ * @param left left edge of the rectangle to read (inclusive)
+ * @param top top edge of the rectangle to read (inclusive)
+ * @param width width of rectangle to read in pixels.
+ * @param height height of rectangle to read in pixels.
+ * @param config the pixel config of the destination buffer
+ * @param buffer memory to read the rectangle into.
+ * @param rowBytes number of bytes bewtween consecutive rows. Zero means rows are tightly
+ * packed.
+ * @param pixelOpsFlags see PixelOpsFlags enum above.
+ *
+ * @return true if the read succeeded, false if not. The read can fail because of an unsupported
+ * pixel configs
+ */
+ bool readSurfacePixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config, void* buffer,
+ size_t rowBytes = 0,
+ uint32_t pixelOpsFlags = 0);
+
+ /**
+ * Writes a rectangle of pixels to a surface.
+ * @param surface the surface to write to.
+ * @param left left edge of the rectangle to write (inclusive)
+ * @param top top edge of the rectangle to write (inclusive)
+ * @param width width of rectangle to write in pixels.
+ * @param height height of rectangle to write in pixels.
+ * @param config the pixel config of the source buffer
+ * @param buffer memory to read pixels from
+ * @param rowBytes number of bytes between consecutive rows. Zero
+ * means rows are tightly packed.
+ * @param pixelOpsFlags see PixelOpsFlags enum above.
+ * @return true if the write succeeded, false if not. The write can fail because of an
+ * unsupported combination of surface and src configs.
+ */
+ bool writeSurfacePixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config, const void* buffer,
+ size_t rowBytes,
+ uint32_t pixelOpsFlags = 0);
+
+ /**
+ * Copies a rectangle of texels from src to dst.
+ * @param dst the surface to copy to.
+ * @param src the surface to copy from.
+ * @param srcRect the rectangle of the src that should be copied.
+ * @param dstPoint the translation applied when writing the srcRect's pixels to the dst.
+ */
+ bool copySurface(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ /** Helper that copies the whole surface but fails when the two surfaces are not identically
+ sized. */
+ bool copySurface(GrSurface* dst, GrSurface* src) {
+ return this->copySurface(dst, src, SkIRect::MakeWH(dst->width(), dst->height()),
+ SkIPoint::Make(0,0));
+ }
+
+ /**
+ * After this returns any pending writes to the surface will have been issued to the backend 3D API.
+ */
+ void flushSurfaceWrites(GrSurface* surface);
+
+ /**
+ * After this returns any pending reads or writes to the surface will have been issued to the
+ * backend 3D API.
+ */
+ void flushSurfaceIO(GrSurface* surface);
+
+ /**
+ * Finalizes all pending reads and writes to the surface and also performs an MSAA resolve
+ * if necessary.
+ *
+ * It is not necessary to call this before reading the render target via Skia/GrContext.
+ * GrContext will detect when it must perform a resolve before reading pixels back from the
+ * surface or using it as a texture.
+ */
+ void prepareSurfaceForExternalIO(GrSurface*);
+
+ /**
+ * An ID associated with this context, guaranteed to be unique.
+ */
+ uint32_t uniqueID() { return fUniqueID; }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Functions intended for internal use only.
+ GrGpu* getGpu() { return fGpu; }
+ const GrGpu* getGpu() const { return fGpu; }
+ GrBatchFontCache* getBatchFontCache() { return fBatchFontCache; }
+ GrTextBlobCache* getTextBlobCache() { return fTextBlobCache; }
+ bool abandoned() const;
+ GrResourceProvider* resourceProvider() { return fResourceProvider; }
+ const GrResourceProvider* resourceProvider() const { return fResourceProvider; }
+ GrResourceCache* getResourceCache() { return fResourceCache; }
+
+ // Called by tests that draw directly to the context via GrDrawContext
+ void getTestTarget(GrTestTarget*, sk_sp<GrDrawContext>);
+
+ /** Reset GPU stats */
+ void resetGpuStats() const ;
+
+ /** Prints cache stats to the string if GR_CACHE_STATS == 1. */
+ void dumpCacheStats(SkString*) const;
+ void dumpCacheStatsKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const;
+ void printCacheStats() const;
+
+ /** Prints GPU stats to the string if GR_GPU_STATS == 1. */
+ void dumpGpuStats(SkString*) const;
+ void dumpGpuStatsKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const;
+ void printGpuStats() const;
+
+ /** Specify the TextBlob cache limit. If the current cache exceeds this limit it will purge.
+ this is for testing only */
+ void setTextBlobCacheLimit_ForTesting(size_t bytes);
+
+ /** Specify the sizes of the GrAtlasTextContext atlases. The configs pointer below should be
+ to an array of 3 entries */
+ void setTextContextAtlasSizes_ForTesting(const GrBatchAtlasConfig* configs);
+
+ /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */
+ void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
+
+ /** Get pointer to atlas texture for given mask format */
+ GrTexture* getFontAtlasTexture(GrMaskFormat format);
+
+ GrAuditTrail* getAuditTrail() { return &fAuditTrail; }
+
+ /** This is only useful for debug purposes */
+ SkDEBUGCODE(GrSingleOwner* debugSingleOwner() const { return &fSingleOwner; } )
+
+ // Provides access to functions that aren't part of the public API.
+ GrContextPriv contextPriv();
+ const GrContextPriv contextPriv() const;
+
+private:
+ GrGpu* fGpu;
+ const GrCaps* fCaps;
+ GrResourceCache* fResourceCache;
+ // this union exists because the inheritance of GrTextureProvider->GrResourceProvider
+ // is in a private header.
+ union {
+ GrResourceProvider* fResourceProvider;
+ GrTextureProvider* fTextureProvider;
+ };
+
+ SkAutoTUnref<GrContextThreadSafeProxy> fThreadSafeProxy;
+
+ GrBatchFontCache* fBatchFontCache;
+ SkAutoTDelete<GrTextBlobCache> fTextBlobCache;
+
+ bool fDidTestPMConversions;
+ int fPMToUPMConversion;
+ int fUPMToPMConversion;
+ // The sw backend may call GrContext::readSurfacePixels on multiple threads
+ // We may transfer the responsibilty for using a mutex to the sw backend
+ // when there are fewer code paths that lead to a readSurfacePixels call
+ // from the sw backend. readSurfacePixels is reentrant in one case - when performing
+ // the PM conversions test. To handle this we do the PM conversions test outside
+ // of fReadPixelsMutex and use a separate mutex to guard it. When it re-enters
+ // readSurfacePixels it will grab fReadPixelsMutex and release it before the outer
+ // readSurfacePixels proceeds to grab it.
+ // TODO: Stop pretending to make GrContext thread-safe for sw rasterization and provide
+ // a mechanism to make a SkPicture safe for multithreaded sw rasterization.
+ SkMutex fReadPixelsMutex;
+ SkMutex fTestPMConversionsMutex;
+
+ // In debug builds we guard against improper thread handling
+ // This guard is passed to the GrDrawingManager and, from there to all the
+ // GrDrawContexts. It is also passed to the GrTextureProvider and SkGpuDevice.
+ mutable GrSingleOwner fSingleOwner;
+
+ struct CleanUpData {
+ PFCleanUpFunc fFunc;
+ void* fInfo;
+ };
+
+ SkTDArray<CleanUpData> fCleanUpData;
+
+ const uint32_t fUniqueID;
+
+ SkAutoTDelete<GrDrawingManager> fDrawingManager;
+
+ GrAuditTrail fAuditTrail;
+
+ // TODO: have the GrClipStackClip use drawContexts and rm this friending
+ friend class GrContextPriv;
+
+ GrContext(); // init must be called after the constructor.
+ bool init(GrBackend, GrBackendContext, const GrContextOptions& options);
+
+ void initMockContext();
+ void initCommon(const GrContextOptions&);
+
+ /**
+ * These functions create premul <-> unpremul effects if it is possible to generate a pair
+ * of effects that make a readToUPM->writeToPM->readToUPM cycle invariant. Otherwise, they
+ * return NULL. They also can perform a swizzle as part of the draw.
+ */
+ sk_sp<GrFragmentProcessor> createPMToUPMEffect(GrTexture*, const GrSwizzle&,
+ const SkMatrix&) const;
+ sk_sp<GrFragmentProcessor> createUPMToPMEffect(GrTexture*, const GrSwizzle&,
+ const SkMatrix&) const;
+ /** Called before either of the above two functions to determine the appropriate fragment
+ processors for conversions. This must be called by readSurfacePixels before a mutex is
+ taken, since testingvPM conversions itself will call readSurfacePixels */
+ void testPMConversionsIfNecessary(uint32_t flags);
+ /** Returns true if we've already determined that createPMtoUPMEffect and createUPMToPMEffect
+ will fail. In such cases fall back to SW conversion. */
+ bool didFailPMUPMConversionTest() const;
+
+ /**
+ * A callback similar to the above for use by the TextBlobCache
+ * TODO move textblob draw calls below context so we can use the call above.
+ */
+ static void TextBlobCacheOverBudgetCB(void* data);
+
+ typedef SkRefCnt INHERITED;
+};
+
+/**
+ * Can be used to perform actions related to the generating GrContext in a thread safe manner. The
+ * proxy does not access the 3D API (e.g. OpenGL) that backs the generating GrContext.
+ */
+class GrContextThreadSafeProxy : public SkRefCnt {
+private:
+ GrContextThreadSafeProxy(const GrCaps* caps, uint32_t uniqueID)
+ : fCaps(SkRef(caps))
+ , fContextUniqueID(uniqueID) {}
+
+ SkAutoTUnref<const GrCaps> fCaps;
+ uint32_t fContextUniqueID;
+
+ friend class GrContext;
+ friend class SkImage;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrContextOptions.h b/gfx/skia/skia/include/gpu/GrContextOptions.h
new file mode 100644
index 000000000..0522b9d20
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrContextOptions.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrContextOptions_DEFINED
+#define GrContextOptions_DEFINED
+
+#include "SkTypes.h"
+
+struct GrContextOptions {
+ GrContextOptions() {}
+
+ // Suppress prints for the GrContext.
+ bool fSuppressPrints = false;
+
+ /** Overrides: These options override feature detection using backend API queries. These
+ overrides can only reduce the feature set or limits, never increase them beyond the
+ detected values. */
+
+ int fMaxTextureSizeOverride = SK_MaxS32;
+
+ /** If non-zero, overrides the maximum size of a tile for sw-backed images and bitmaps rendered
+ by SkGpuDevice. */
+ int fMaxTileSizeOverride = 0;
+ bool fSuppressDualSourceBlending = false;
+
+ /** the threshold in bytes above which we will use a buffer mapping API to map vertex and index
+ buffers to CPU memory in order to update them. A value of -1 means the GrContext should
+ deduce the optimal value for this platform. */
+ int fBufferMapThreshold = -1;
+
+ /** some gpus have problems with partial writes of the rendertarget */
+ bool fUseDrawInsteadOfPartialRenderTargetWrite = false;
+
+ /** The GrContext operates in immediate mode. It will issue all draws to the backend API
+ immediately. Intended to ease debugging. */
+ bool fImmediateMode = false;
+
+ /** For debugging purposes turn each GrBatch's bounds into a clip rect. This is used to
+ verify that the clip bounds are conservative. */
+ bool fClipBatchToBounds = false;
+
+ /** For debugging purposes draw a wireframe device bounds rect for each GrBatch. The wire
+ frame rect is draw before the GrBatch in order to visualize batches that draw outside
+ of their dev bounds. */
+ bool fDrawBatchBounds = false;
+
+ /** For debugging, override the default maximum look-back or look-ahead window for GrBatch
+ combining. */
+ int fMaxBatchLookback = -1;
+ int fMaxBatchLookahead = -1;
+
+ /** Force us to do all swizzling manually in the shader and don't rely on extensions to do
+ swizzling. */
+ bool fUseShaderSwizzling = false;
+
+ /** Construct mipmaps manually, via repeated downsampling draw-calls. This is used when
+ the driver's implementation (glGenerateMipmap) contains bugs. This requires mipmap
+ level and LOD control (ie desktop or ES3). */
+ bool fDoManualMipmapping = false;
+
+ /** Enable instanced rendering as long as all required functionality is supported by the HW.
+ Instanced rendering is still experimental at this point and disabled by default. */
+ bool fEnableInstancedRendering = false;
+
+ /** Disables distance field rendering for paths. Distance field computation can be expensive
+ and yields no benefit if a path is not rendered multiple times with different transforms */
+ bool fDisableDistanceFieldPaths = false;
+
+ /**
+ * If true this allows path mask textures to be cached. This is only really useful if paths
+ * are commonly rendered at the same scale and fractional translation.
+ */
+ bool fAllowPathMaskCaching = false;
+
+ /**
+ * Force all path draws to go through through the sw-rasterize-to-texture code path (assuming
+ * the path is not recognized as a simpler shape (e.g. a rrect). This is intended for testing
+ * purposes.
+ */
+ bool fForceSWPathMasks = false;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrCoordTransform.h b/gfx/skia/skia/include/gpu/GrCoordTransform.h
new file mode 100644
index 000000000..3080523eb
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrCoordTransform.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCoordTransform_DEFINED
+#define GrCoordTransform_DEFINED
+
+#include "GrProcessor.h"
+#include "SkMatrix.h"
+#include "GrTexture.h"
+#include "GrTypes.h"
+#include "GrShaderVar.h"
+
+/**
+ * A class representing a linear transformation of local coordinates. GrFragnentProcessors
+ * these transformations, and the GrGeometryProcessor implements the transformation.
+ */
+class GrCoordTransform : SkNoncopyable {
+public:
+ GrCoordTransform() { SkDEBUGCODE(fInProcessor = false); }
+
+ /**
+ * Create a transformation that maps [0, 1] to a texture's boundaries. The precision is inferred
+ * from the texture size and filter. The texture origin also implies whether a y-reversal should
+ * be performed.
+ */
+ GrCoordTransform(const GrTexture* texture, GrTextureParams::FilterMode filter) {
+ SkASSERT(texture);
+ SkDEBUGCODE(fInProcessor = false);
+ this->reset(texture, filter);
+ }
+
+ /**
+ * Create a transformation from a matrix. The precision is inferred from the texture size and
+ * filter. The texture origin also implies whether a y-reversal should be performed.
+ */
+ GrCoordTransform(const SkMatrix& m, const GrTexture* texture,
+ GrTextureParams::FilterMode filter) {
+ SkDEBUGCODE(fInProcessor = false);
+ SkASSERT(texture);
+ this->reset(m, texture, filter);
+ }
+
+ /**
+ * Create a transformation that applies the matrix to a coord set.
+ */
+ GrCoordTransform(const SkMatrix& m, GrSLPrecision precision = kDefault_GrSLPrecision) {
+ SkDEBUGCODE(fInProcessor = false);
+ this->reset(m, precision);
+ }
+
+ void reset(const GrTexture* texture, GrTextureParams::FilterMode filter) {
+ SkASSERT(!fInProcessor);
+ SkASSERT(texture);
+ this->reset(MakeDivByTextureWHMatrix(texture), texture, filter);
+ }
+
+ void reset(const SkMatrix&, const GrTexture*, GrTextureParams::FilterMode filter);
+ void reset(const SkMatrix& m, GrSLPrecision precision = kDefault_GrSLPrecision);
+
+ GrCoordTransform& operator= (const GrCoordTransform& that) {
+ SkASSERT(!fInProcessor);
+ fMatrix = that.fMatrix;
+ fReverseY = that.fReverseY;
+ fPrecision = that.fPrecision;
+ return *this;
+ }
+
+ /**
+ * Access the matrix for editing. Note, this must be done before adding the transform to an
+ * effect, since effects are immutable.
+ */
+ SkMatrix* accessMatrix() {
+ SkASSERT(!fInProcessor);
+ return &fMatrix;
+ }
+
+ bool operator==(const GrCoordTransform& that) const {
+ return fMatrix.cheapEqualTo(that.fMatrix) &&
+ fReverseY == that.fReverseY &&
+ fPrecision == that.fPrecision;
+ }
+
+ bool operator!=(const GrCoordTransform& that) const { return !(*this == that); }
+
+ const SkMatrix& getMatrix() const { return fMatrix; }
+ bool reverseY() const { return fReverseY; }
+ GrSLPrecision precision() const { return fPrecision; }
+
+ /** Useful for effects that want to insert a texture matrix that is implied by the texture
+ dimensions */
+ static inline SkMatrix MakeDivByTextureWHMatrix(const GrTexture* texture) {
+ SkASSERT(texture);
+ SkMatrix mat;
+ (void)mat.setIDiv(texture->width(), texture->height());
+ return mat;
+ }
+
+private:
+ SkMatrix fMatrix;
+ bool fReverseY;
+ GrSLPrecision fPrecision;
+ typedef SkNoncopyable INHERITED;
+
+#ifdef SK_DEBUG
+public:
+ void setInProcessor() const { fInProcessor = true; }
+private:
+ mutable bool fInProcessor;
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrDrawContext.h b/gfx/skia/skia/include/gpu/GrDrawContext.h
new file mode 100644
index 000000000..72de15dea
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrDrawContext.h
@@ -0,0 +1,438 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDrawContext_DEFINED
+#define GrDrawContext_DEFINED
+
+#include "GrColor.h"
+#include "GrContext.h"
+#include "GrPaint.h"
+#include "GrRenderTarget.h"
+#include "SkRefCnt.h"
+#include "SkRegion.h"
+#include "SkSurfaceProps.h"
+#include "../private/GrInstancedPipelineInfo.h"
+#include "../private/GrSingleOwner.h"
+
+class GrAuditTrail;
+class GrClip;
+class GrDrawBatch;
+class GrDrawContextPriv;
+class GrDrawPathBatchBase;
+class GrDrawingManager;
+class GrDrawTarget;
+class GrFixedClip;
+class GrPaint;
+class GrPathProcessor;
+class GrPipelineBuilder;
+class GrRenderTarget;
+class GrStyle;
+class GrSurface;
+struct GrUserStencilSettings;
+class SkDrawFilter;
+struct SkIPoint;
+struct SkIRect;
+class SkLatticeIter;
+class SkMatrix;
+class SkPaint;
+class SkPath;
+struct SkPoint;
+struct SkRect;
+class SkRRect;
+struct SkRSXform;
+class SkTextBlob;
+
+/*
+ * A helper object to orchestrate draws
+ */
+class SK_API GrDrawContext : public SkRefCnt {
+public:
+ ~GrDrawContext() override;
+
+ bool copySurface(GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint);
+
+ // TODO: it is odd that we need both the SkPaint in the following 3 methods.
+ // We should extract the text parameters from SkPaint and pass them separately
+ // akin to GrStyle (GrTextInfo?)
+ virtual void drawText(const GrClip&, const GrPaint&, const SkPaint&,
+ const SkMatrix& viewMatrix, const char text[], size_t byteLength,
+ SkScalar x, SkScalar y, const SkIRect& clipBounds);
+ virtual void drawPosText(const GrClip&, const GrPaint&, const SkPaint&,
+ const SkMatrix& viewMatrix, const char text[], size_t byteLength,
+ const SkScalar pos[], int scalarsPerPosition,
+ const SkPoint& offset, const SkIRect& clipBounds);
+ virtual void drawTextBlob(const GrClip&, const SkPaint&,
+ const SkMatrix& viewMatrix, const SkTextBlob*,
+ SkScalar x, SkScalar y,
+ SkDrawFilter*, const SkIRect& clipBounds);
+
+ /**
+ * Provides a perfomance hint that the render target's contents are allowed
+ * to become undefined.
+ */
+ void discard();
+
+ /**
+ * Clear the entire or rect of the render target, ignoring any clips.
+ * @param rect the rect to clear or the whole thing if rect is NULL.
+ * @param color the color to clear to.
+ * @param canIgnoreRect allows partial clears to be converted to whole
+ * clears on platforms for which that is cheap
+ */
+ void clear(const SkIRect* rect, GrColor color, bool canIgnoreRect);
+
+ /**
+ * Draw everywhere (respecting the clip) with the paint.
+ */
+ void drawPaint(const GrClip&, const GrPaint&, const SkMatrix& viewMatrix);
+
+ /**
+ * Draw the rect using a paint.
+ * @param paint describes how to color pixels.
+ * @param viewMatrix transformation matrix
+ * @param style The style to apply. Null means fill. Currently path effects are not
+ * allowed.
+ * The rects coords are used to access the paint (through texture matrix)
+ */
+ void drawRect(const GrClip&,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect&,
+ const GrStyle* style = nullptr);
+
+ /**
+ * Maps a rectangle of shader coordinates to a rectangle and fills that rectangle.
+ *
+ * @param paint describes how to color pixels.
+ * @param viewMatrix transformation matrix which applies to rectToDraw
+ * @param rectToDraw the rectangle to draw
+ * @param localRect the rectangle of shader coordinates applied to rectToDraw
+ */
+ void fillRectToRect(const GrClip&,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& rectToDraw,
+ const SkRect& localRect);
+
+ /**
+ * Fills a rect with a paint and a localMatrix.
+ */
+ void fillRectWithLocalMatrix(const GrClip& clip,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkMatrix& localMatrix);
+
+ /**
+ * Draw a roundrect using a paint.
+ *
+ * @param paint describes how to color pixels.
+ * @param viewMatrix transformation matrix
+ * @param rrect the roundrect to draw
+ * @param style style to apply to the rrect. Currently path effects are not allowed.
+ */
+ void drawRRect(const GrClip&,
+ const GrPaint&,
+ const SkMatrix& viewMatrix,
+ const SkRRect& rrect,
+ const GrStyle& style);
+
+ /**
+ * Shortcut for drawing an SkPath consisting of nested rrects using a paint.
+ * Does not support stroking. The result is undefined if outer does not contain
+ * inner.
+ *
+ * @param paint describes how to color pixels.
+ * @param viewMatrix transformation matrix
+ * @param outer the outer roundrect
+ * @param inner the inner roundrect
+ */
+ void drawDRRect(const GrClip&,
+ const GrPaint&,
+ const SkMatrix& viewMatrix,
+ const SkRRect& outer,
+ const SkRRect& inner);
+
+ /**
+ * Draws a path.
+ *
+ * @param paint describes how to color pixels.
+ * @param viewMatrix transformation matrix
+ * @param path the path to draw
+ * @param style style to apply to the path.
+ */
+ void drawPath(const GrClip&,
+ const GrPaint&,
+ const SkMatrix& viewMatrix,
+ const SkPath&,
+ const GrStyle& style);
+
+ /**
+ * Draws vertices with a paint.
+ *
+ * @param paint describes how to color pixels.
+ * @param viewMatrix transformation matrix
+ * @param primitiveType primitives type to draw.
+ * @param vertexCount number of vertices.
+ * @param positions array of vertex positions, required.
+ * @param texCoords optional array of texture coordinates used
+ * to access the paint.
+ * @param colors optional array of per-vertex colors, supercedes
+ * the paint's color field.
+ * @param indices optional array of indices. If NULL vertices
+ * are drawn non-indexed.
+ * @param indexCount if indices is non-null then this is the
+ * number of indices.
+ */
+ void drawVertices(const GrClip&,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ GrPrimitiveType primitiveType,
+ int vertexCount,
+ const SkPoint positions[],
+ const SkPoint texs[],
+ const GrColor colors[],
+ const uint16_t indices[],
+ int indexCount);
+
+ /**
+ * Draws textured sprites from an atlas with a paint.
+ *
+ * @param paint describes how to color pixels.
+ * @param viewMatrix transformation matrix
+ * @param spriteCount number of sprites.
+ * @param xform array of compressed transformation data, required.
+ * @param texRect array of texture rectangles used to access the paint.
+ * @param colors optional array of per-sprite colors, supercedes
+ * the paint's color field.
+ */
+ void drawAtlas(const GrClip&,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ int spriteCount,
+ const SkRSXform xform[],
+ const SkRect texRect[],
+ const SkColor colors[]);
+
+ /**
+ * Draws a region.
+ *
+ * @param paint describes how to color pixels
+ * @param viewMatrix transformation matrix
+ * @param region the region to be drawn
+ * @param style style to apply to the region
+ */
+ void drawRegion(const GrClip&,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkRegion& region,
+ const GrStyle& style);
+
+ /**
+ * Draws an oval.
+ *
+ * @param paint describes how to color pixels.
+ * @param viewMatrix transformation matrix
+ * @param oval the bounding rect of the oval.
+ * @param style style to apply to the oval. Currently path effects are not allowed.
+ */
+ void drawOval(const GrClip&,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& oval,
+ const GrStyle& style);
+ /**
+ * Draws a partial arc of an oval.
+ *
+ * @param paint describes how to color pixels.
+ * @param viewMatrix transformation matrix.
+ * @param oval the bounding rect of the oval.
+ * @param startAngle starting angle in degrees.
+ * @param sweepAngle angle to sweep in degrees. Must be in (-360, 360)
+ * @param useCenter true means that the implied path begins at the oval center, connects as a
+ * line to the point indicated by the start contains the arc indicated by
+ * the sweep angle. If false the line beginning at the center point is
+ * omitted.
+ * @param style style to apply to the oval.
+ */
+ void drawArc(const GrClip&,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& oval,
+ SkScalar startAngle,
+ SkScalar sweepAngle,
+ bool useCenter,
+ const GrStyle& style);
+
+ /**
+ * Draw the image as a set of rects, specified by |iter|.
+ */
+ void drawImageLattice(const GrClip&,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ int imageWidth,
+ int imageHeight,
+ std::unique_ptr<SkLatticeIter> iter,
+ const SkRect& dst);
+
+ /**
+ * After this returns any pending surface IO will be issued to the backend 3D API and
+ * if the surface has MSAA it will be resolved.
+ */
+ void prepareForExternalIO();
+
+ /**
+ * Reads a rectangle of pixels from the draw context.
+ * @param dstInfo image info for the destination
+ * @param dstBuffer destination pixels for the read
+ * @param dstRowBytes bytes in a row of 'dstBuffer'
+ * @param x x offset w/in the draw context from which to read
+ * @param y y offset w/in the draw context from which to read
+ *
+ * @return true if the read succeeded, false if not. The read can fail because of an
+ * unsupported pixel config.
+ */
+ bool readPixels(const SkImageInfo& dstInfo, void* dstBuffer, size_t dstRowBytes, int x, int y);
+
+ /**
+ * Writes a rectangle of pixels [srcInfo, srcBuffer, srcRowbytes] into the
+ * drawContext at the specified position.
+ * @param srcInfo image info for the source pixels
+ * @param srcBuffer source for the write
+ * @param srcRowBytes bytes in a row of 'srcBuffer'
+ * @param x x offset w/in the draw context at which to write
+ * @param y y offset w/in the draw context at which to write
+ *
+ * @return true if the write succeeded, false if not. The write can fail because of an
+ * unsupported pixel config.
+ */
+ bool writePixels(const SkImageInfo& srcInfo, const void* srcBuffer, size_t srcRowBytes,
+ int x, int y);
+
+ bool isStencilBufferMultisampled() const {
+ return fRenderTarget->isStencilBufferMultisampled();
+ }
+ bool isUnifiedMultisampled() const { return fRenderTarget->isUnifiedMultisampled(); }
+ bool hasMixedSamples() const { return fRenderTarget->isMixedSampled(); }
+
+ bool mustUseHWAA(const GrPaint& paint) const {
+ return paint.isAntiAlias() && fRenderTarget->isUnifiedMultisampled();
+ }
+
+ const GrCaps* caps() const { return fContext->caps(); }
+ const GrSurfaceDesc& desc() const { return fRenderTarget->desc(); }
+ int width() const { return fRenderTarget->width(); }
+ int height() const { return fRenderTarget->height(); }
+ GrPixelConfig config() const { return fRenderTarget->config(); }
+ int numColorSamples() const { return fRenderTarget->numColorSamples(); }
+ bool isGammaCorrect() const { return SkToBool(fColorSpace.get()); }
+ SkSourceGammaTreatment sourceGammaTreatment() const {
+ return this->isGammaCorrect() ? SkSourceGammaTreatment::kRespect
+ : SkSourceGammaTreatment::kIgnore;
+ }
+ const SkSurfaceProps& surfaceProps() const { return fSurfaceProps; }
+ SkColorSpace* getColorSpace() const { return fColorSpace.get(); }
+ GrColorSpaceXform* getColorXformFromSRGB() const { return fColorXformFromSRGB.get(); }
+ GrSurfaceOrigin origin() const { return fRenderTarget->origin(); }
+
+ bool wasAbandoned() const;
+
+ GrRenderTarget* accessRenderTarget() { return fRenderTarget.get(); }
+
+ sk_sp<GrTexture> asTexture() { return sk_ref_sp(fRenderTarget->asTexture()); }
+
+ // Provides access to functions that aren't part of the public API.
+ GrDrawContextPriv drawContextPriv();
+ const GrDrawContextPriv drawContextPriv() const;
+
+ GrAuditTrail* auditTrail() { return fAuditTrail; }
+
+protected:
+ GrDrawContext(GrContext*, GrDrawingManager*, sk_sp<GrRenderTarget>, sk_sp<SkColorSpace>,
+ const SkSurfaceProps* surfaceProps, GrAuditTrail*, GrSingleOwner*);
+
+ GrDrawingManager* drawingManager() { return fDrawingManager; }
+
+ SkDEBUGCODE(GrSingleOwner* singleOwner() { return fSingleOwner; })
+ SkDEBUGCODE(void validate() const;)
+
+private:
+ friend class GrAtlasTextBlob; // for access to drawBatch
+ friend class GrStencilAndCoverTextContext; // for access to drawBatch
+
+ friend class GrDrawingManager; // for ctor
+ friend class GrDrawContextPriv;
+ friend class GrTestTarget; // for access to getDrawTarget
+ friend class GrSWMaskHelper; // for access to drawBatch
+
+ // All the path renderers currently make their own batches
+ friend class GrSoftwarePathRenderer; // for access to drawBatch
+ friend class GrAAConvexPathRenderer; // for access to drawBatch
+ friend class GrDashLinePathRenderer; // for access to drawBatch
+ friend class GrAAHairLinePathRenderer; // for access to drawBatch
+ friend class GrAALinearizingConvexPathRenderer; // for access to drawBatch
+ friend class GrAADistanceFieldPathRenderer; // for access to drawBatch
+ friend class GrDefaultPathRenderer; // for access to drawBatch
+ friend class GrPLSPathRenderer; // for access to drawBatch
+ friend class GrMSAAPathRenderer; // for access to drawBatch
+ friend class GrStencilAndCoverPathRenderer; // for access to drawBatch
+ friend class GrTessellatingPathRenderer; // for access to drawBatch
+
+ void internalClear(const GrFixedClip&, const GrColor, bool canIgnoreClip);
+
+ bool drawFilledDRRect(const GrClip& clip,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkRRect& origOuter,
+ const SkRRect& origInner);
+
+ bool drawFilledRect(const GrClip& clip,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const GrUserStencilSettings* ss);
+
+ void drawNonAAFilledRect(const GrClip&,
+ const GrPaint&,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkRect* localRect,
+ const SkMatrix* localMatrix,
+ const GrUserStencilSettings* ss,
+ bool useHWAA);
+
+ void internalDrawPath(const GrClip& clip,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkPath& path,
+ const GrStyle& style);
+
+ // This entry point allows the GrTextContext-derived classes to add their batches to
+ // the drawTarget.
+ void drawBatch(const GrPipelineBuilder& pipelineBuilder, const GrClip&, GrDrawBatch* batch);
+
+ GrDrawTarget* getDrawTarget();
+
+ GrDrawingManager* fDrawingManager;
+ sk_sp<GrRenderTarget> fRenderTarget;
+
+ // In MDB-mode the drawTarget can be closed by some other drawContext that has picked
+ // it up. For this reason, the drawTarget should only ever be accessed via 'getDrawTarget'.
+ GrDrawTarget* fDrawTarget;
+ GrContext* fContext;
+ GrInstancedPipelineInfo fInstancedPipelineInfo;
+
+ sk_sp<SkColorSpace> fColorSpace;
+ sk_sp<GrColorSpaceXform> fColorXformFromSRGB;
+ SkSurfaceProps fSurfaceProps;
+ GrAuditTrail* fAuditTrail;
+
+ // In debug builds we guard against improper thread handling
+ SkDEBUGCODE(mutable GrSingleOwner* fSingleOwner;)
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrFragmentProcessor.h b/gfx/skia/skia/include/gpu/GrFragmentProcessor.h
new file mode 100644
index 000000000..d7011f826
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrFragmentProcessor.h
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrFragmentProcessor_DEFINED
+#define GrFragmentProcessor_DEFINED
+
+#include "GrProcessor.h"
+
+class GrCoordTransform;
+class GrGLSLCaps;
+class GrGLSLFragmentProcessor;
+class GrInvariantOutput;
+class GrPipeline;
+class GrProcessorKeyBuilder;
+
+/** Provides custom fragment shader code. Fragment processors receive an input color (vec4f) and
+ produce an output color. They may reference textures and uniforms. They may use
+ GrCoordTransforms to receive a transformation of the local coordinates that map from local space
+ to the fragment being processed.
+ */
+class GrFragmentProcessor : public GrProcessor {
+public:
+ /**
+ * In many instances (e.g. SkShader::asFragmentProcessor() implementations) it is desirable to
+ * only consider the input color's alpha. However, there is a competing desire to have reusable
+ * GrFragmentProcessor subclasses that can be used in other scenarios where the entire input
+ * color is considered. This function exists to filter the input color and pass it to a FP. It
+ * does so by returning a parent FP that multiplies the passed in FPs output by the parent's
+ * input alpha. The passed in FP will not receive an input color.
+ */
+ static sk_sp<GrFragmentProcessor> MulOutputByInputAlpha(sk_sp<GrFragmentProcessor>);
+
+ /**
+ * Similar to the above but it modulates the output r,g,b of the child processor by the input
+ * rgb and then multiplies all the components by the input alpha. This effectively modulates
+ * the child processor's premul color by a unpremul'ed input and produces a premul output
+ */
+ static sk_sp<GrFragmentProcessor> MulOutputByInputUnpremulColor(sk_sp<GrFragmentProcessor>);
+
+ /**
+ * Returns a parent fragment processor that adopts the passed fragment processor as a child.
+ * The parent will ignore its input color and instead feed the passed in color as input to the
+ * child.
+ */
+ static sk_sp<GrFragmentProcessor> OverrideInput(sk_sp<GrFragmentProcessor>, GrColor4f);
+
+ /**
+ * Returns a fragment processor that premuls the input before calling the passed in fragment
+ * processor.
+ */
+ static sk_sp<GrFragmentProcessor> PremulInput(sk_sp<GrFragmentProcessor>);
+
+ /**
+ * Returns a fragment processor that runs the passed in array of fragment processors in a
+ * series. The original input is passed to the first, the first's output is passed to the
+ * second, etc. The output of the returned processor is the output of the last processor of the
+ * series.
+ *
+ * The array elements with be moved.
+ */
+ static sk_sp<GrFragmentProcessor> RunInSeries(sk_sp<GrFragmentProcessor>*, int cnt);
+
+ GrFragmentProcessor()
+ : INHERITED()
+ , fUsesDistanceVectorField(false)
+ , fUsesLocalCoords(false) {}
+
+ ~GrFragmentProcessor() override;
+
+ GrGLSLFragmentProcessor* createGLSLInstance() const;
+
+ void getGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const {
+ this->onGetGLSLProcessorKey(caps, b);
+ for (int i = 0; i < fChildProcessors.count(); ++i) {
+ fChildProcessors[i]->getGLSLProcessorKey(caps, b);
+ }
+ }
+
+ int numCoordTransforms() const { return fCoordTransforms.count(); }
+
+ /** Returns the coordinate transformation at index. index must be valid according to
+ numTransforms(). */
+ const GrCoordTransform& coordTransform(int index) const { return *fCoordTransforms[index]; }
+
+ const SkTArray<const GrCoordTransform*, true>& coordTransforms() const {
+ return fCoordTransforms;
+ }
+
+ int numChildProcessors() const { return fChildProcessors.count(); }
+
+ const GrFragmentProcessor& childProcessor(int index) const { return *fChildProcessors[index]; }
+
+ /** Do any of the coordtransforms for this processor require local coords? */
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+
+ /** Does this FP need a vector to the nearest edge? */
+ bool usesDistanceVectorField() const { return fUsesDistanceVectorField; }
+
+ /** Returns true if this and other processor conservatively draw identically. It can only return
+ true when the two processor are of the same subclass (i.e. they return the same object from
+ from getFactory()).
+
+ A return value of true from isEqual() should not be used to test whether the processor would
+ generate the same shader code. To test for identical code generation use getGLSLProcessorKey
+ */
+ bool isEqual(const GrFragmentProcessor& that) const;
+
+ /**
+ * This function is used to perform optimizations. When called the invarientOuput param
+ * indicate whether the input components to this processor in the FS will have known values.
+ * In inout the validFlags member is a bitfield of GrColorComponentFlags. The isSingleComponent
+ * member indicates whether the input will be 1 or 4 bytes. The function updates the members of
+ * inout to indicate known values of its output. A component of the color member only has
+ * meaning if the corresponding bit in validFlags is set.
+ */
+ void computeInvariantOutput(GrInvariantOutput* inout) const {
+ this->onComputeInvariantOutput(inout);
+ }
+
+ /**
+ * Pre-order traversal of a FP hierarchy, or of the forest of FPs in a GrPipeline. In the latter
+ * case the tree rooted at each FP in the GrPipeline is visited successively.
+ */
+ class Iter : public SkNoncopyable {
+ public:
+ explicit Iter(const GrFragmentProcessor* fp) { fFPStack.push_back(fp); }
+ explicit Iter(const GrPipeline& pipeline);
+ const GrFragmentProcessor* next();
+
+ private:
+ SkSTArray<4, const GrFragmentProcessor*, true> fFPStack;
+ };
+
+ /**
+ * Iterates over all the Ts owned by a GrFragmentProcessor and its children or over all the Ts
+ * owned by the forest of GrFragmentProcessors in a GrPipeline. FPs are visited in the same
+ * order as Iter and each of an FP's Ts are visited in order.
+ */
+ template <typename T, typename BASE,
+ int (BASE::*COUNT)() const,
+ const T& (BASE::*GET)(int) const>
+ class FPItemIter : public SkNoncopyable {
+ public:
+ explicit FPItemIter(const GrFragmentProcessor* fp)
+ : fCurrFP(nullptr)
+ , fCTIdx(0)
+ , fFPIter(fp) {
+ fCurrFP = fFPIter.next();
+ }
+ explicit FPItemIter(const GrPipeline& pipeline)
+ : fCurrFP(nullptr)
+ , fCTIdx(0)
+ , fFPIter(pipeline) {
+ fCurrFP = fFPIter.next();
+ }
+
+ const T* next() {
+ if (!fCurrFP) {
+ return nullptr;
+ }
+ while (fCTIdx == (fCurrFP->*COUNT)()) {
+ fCTIdx = 0;
+ fCurrFP = fFPIter.next();
+ if (!fCurrFP) {
+ return nullptr;
+ }
+ }
+ return &(fCurrFP->*GET)(fCTIdx++);
+ }
+
+ private:
+ const GrFragmentProcessor* fCurrFP;
+ int fCTIdx;
+ GrFragmentProcessor::Iter fFPIter;
+ };
+
+ using CoordTransformIter = FPItemIter<GrCoordTransform,
+ GrFragmentProcessor,
+ &GrFragmentProcessor::numCoordTransforms,
+ &GrFragmentProcessor::coordTransform>;
+
+ using TextureAccessIter = FPItemIter<GrTextureAccess,
+ GrProcessor,
+ &GrProcessor::numTextures,
+ &GrProcessor::textureAccess>;
+
+protected:
+ void addTextureAccess(const GrTextureAccess* textureAccess) override;
+ void addBufferAccess(const GrBufferAccess*) override;
+
+ /**
+ * Fragment Processor subclasses call this from their constructor to register coordinate
+ * transformations. Coord transforms provide a mechanism for a processor to receive coordinates
+ * in their FS code. The matrix expresses a transformation from local space. For a given
+ * fragment the matrix will be applied to the local coordinate that maps to the fragment.
+ *
+ * When the transformation has perspective, the transformed coordinates will have
+ * 3 components. Otherwise they'll have 2.
+ *
+ * This must only be called from the constructor because GrProcessors are immutable. The
+ * processor subclass manages the lifetime of the transformations (this function only stores a
+ * pointer). The GrCoordTransform is typically a member field of the GrProcessor subclass.
+ *
+ * A processor subclass that has multiple methods of construction should always add its coord
+ * transforms in a consistent order. The non-virtual implementation of isEqual() automatically
+ * compares transforms and will assume they line up across the two processor instances.
+ */
+ void addCoordTransform(const GrCoordTransform*);
+
+ /**
+ * FragmentProcessor subclasses call this from their constructor to register any child
+ * FragmentProcessors they have. This must be called AFTER all texture accesses and coord
+ * transforms have been added.
+ * This is for processors whose shader code will be composed of nested processors whose output
+ * colors will be combined somehow to produce its output color. Registering these child
+ * processors will allow the ProgramBuilder to automatically handle their transformed coords and
+ * texture accesses and mangle their uniform and output color names.
+ */
+ int registerChildProcessor(sk_sp<GrFragmentProcessor> child);
+
+ /**
+ * Subclass implements this to support getConstantColorComponents(...).
+ *
+ * Note: it's up to the subclass implementation to do any recursive call to compute the child
+ * procs' output invariants; computeInvariantOutput will not be recursive.
+ */
+ virtual void onComputeInvariantOutput(GrInvariantOutput* inout) const = 0;
+
+ /* Sub-classes should set this to true in their constructors if they need access to a distance
+ * vector field to the nearest edge
+ */
+ bool fUsesDistanceVectorField;
+
+private:
+ void notifyRefCntIsZero() const final;
+
+ /** Returns a new instance of the appropriate *GL* implementation class
+ for the given GrFragmentProcessor; caller is responsible for deleting
+ the object. */
+ virtual GrGLSLFragmentProcessor* onCreateGLSLInstance() const = 0;
+
+ /** Implemented using GLFragmentProcessor::GenKey as described in this class's comment. */
+ virtual void onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const = 0;
+
+ /**
+ * Subclass implements this to support isEqual(). It will only be called if it is known that
+ * the two processors are of the same subclass (i.e. they return the same object from
+ * getFactory()). The processor subclass should not compare its coord transforms as that will
+ * be performed automatically in the non-virtual isEqual().
+ */
+ virtual bool onIsEqual(const GrFragmentProcessor&) const = 0;
+
+ bool hasSameTransforms(const GrFragmentProcessor&) const;
+
+ bool fUsesLocalCoords;
+
+ SkSTArray<4, const GrCoordTransform*, true> fCoordTransforms;
+
+ /**
+ * This is not SkSTArray<1, sk_sp<GrFragmentProcessor>> because this class holds strong
+ * references until notifyRefCntIsZero and then it holds pending executions.
+ */
+ SkSTArray<1, GrFragmentProcessor*, true> fChildProcessors;
+
+ typedef GrProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrGpuResource.h b/gfx/skia/skia/include/gpu/GrGpuResource.h
new file mode 100644
index 000000000..364a88640
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrGpuResource.h
@@ -0,0 +1,309 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGpuResource_DEFINED
+#define GrGpuResource_DEFINED
+
+#include "GrResourceKey.h"
+#include "GrTypesPriv.h"
+
+class GrContext;
+class GrGpu;
+class GrResourceCache;
+class SkTraceMemoryDump;
+
+/**
+ * Base class for GrGpuResource. Handles the various types of refs we need. Separated out as a base
+ * class to isolate the ref-cnting behavior and provide friendship without exposing all of
+ * GrGpuResource.
+ *
+ * Gpu resources can have three types of refs:
+ * 1) Normal ref (+ by ref(), - by unref()): These are used by code that is issuing draw calls
+ * that read and write the resource via GrDrawTarget and by any object that must own a
+ * GrGpuResource and is itself owned (directly or indirectly) by Skia-client code.
+ * 2) Pending read (+ by addPendingRead(), - by completedRead()): GrContext has scheduled a read
+ * of the resource by the GPU as a result of a skia API call but hasn't executed it yet.
+ * 3) Pending write (+ by addPendingWrite(), - by completedWrite()): GrContext has scheduled a
+ * write to the resource by the GPU as a result of a skia API call but hasn't executed it yet.
+ *
+ * The latter two ref types are private and intended only for Gr core code.
+ *
+ * When all the ref/io counts reach zero DERIVED::notifyAllCntsAreZero() will be called (static poly
+ * morphism using CRTP). Similarly when the ref (but not necessarily pending read/write) count
+ * reaches 0 DERIVED::notifyRefCountIsZero() will be called. In the case when an unref() causes both
+ * the ref cnt to reach zero and the other counts are zero, notifyRefCountIsZero() will be called
+ * before notifyIsPurgeable(). Moreover, if notifyRefCountIsZero() returns false then
+ * notifyAllRefCntsAreZero() won't be called at all. notifyRefCountIsZero() must return false if the
+ * object may be deleted after notifyRefCntIsZero() returns.
+ *
+ * GrIORef and GrGpuResource are separate classes for organizational reasons and to be
+ * able to give access via friendship to only the functions related to pending IO operations.
+ */
+template <typename DERIVED> class GrIORef : public SkNoncopyable {
+public:
+ // Some of the signatures are written to mirror SkRefCnt so that GrGpuResource can work with
+ // templated helper classes (e.g. SkAutoTUnref). However, we have different categories of
+ // refs (e.g. pending reads). We also don't require thread safety as GrCacheable objects are
+ // not intended to cross thread boundaries.
+ void ref() const {
+ this->validate();
+ ++fRefCnt;
+ }
+
+ void unref() const {
+ this->validate();
+
+ if (!(--fRefCnt)) {
+ if (!static_cast<const DERIVED*>(this)->notifyRefCountIsZero()) {
+ return;
+ }
+ }
+
+ this->didRemoveRefOrPendingIO(kRef_CntType);
+ }
+
+ void validate() const {
+#ifdef SK_DEBUG
+ SkASSERT(fRefCnt >= 0);
+ SkASSERT(fPendingReads >= 0);
+ SkASSERT(fPendingWrites >= 0);
+ SkASSERT(fRefCnt + fPendingReads + fPendingWrites >= 0);
+#endif
+ }
+
+protected:
+ GrIORef() : fRefCnt(1), fPendingReads(0), fPendingWrites(0) { }
+
+ enum CntType {
+ kRef_CntType,
+ kPendingRead_CntType,
+ kPendingWrite_CntType,
+ };
+
+ bool isPurgeable() const { return !this->internalHasRef() && !this->internalHasPendingIO(); }
+
+ bool internalHasPendingRead() const { return SkToBool(fPendingReads); }
+ bool internalHasPendingWrite() const { return SkToBool(fPendingWrites); }
+ bool internalHasPendingIO() const { return SkToBool(fPendingWrites | fPendingReads); }
+
+ bool internalHasRef() const { return SkToBool(fRefCnt); }
+
+private:
+ void addPendingRead() const {
+ this->validate();
+ ++fPendingReads;
+ }
+
+ void completedRead() const {
+ this->validate();
+ --fPendingReads;
+ this->didRemoveRefOrPendingIO(kPendingRead_CntType);
+ }
+
+ void addPendingWrite() const {
+ this->validate();
+ ++fPendingWrites;
+ }
+
+ void completedWrite() const {
+ this->validate();
+ --fPendingWrites;
+ this->didRemoveRefOrPendingIO(kPendingWrite_CntType);
+ }
+
+private:
+ void didRemoveRefOrPendingIO(CntType cntTypeRemoved) const {
+ if (0 == fPendingReads && 0 == fPendingWrites && 0 == fRefCnt) {
+ static_cast<const DERIVED*>(this)->notifyAllCntsAreZero(cntTypeRemoved);
+ }
+ }
+
+ mutable int32_t fRefCnt;
+ mutable int32_t fPendingReads;
+ mutable int32_t fPendingWrites;
+
+ // This class is used to manage conversion of refs to pending reads/writes.
+ friend class GrGpuResourceRef;
+ friend class GrResourceCache; // to check IO ref counts.
+
+ template <typename, GrIOType> friend class GrPendingIOResource;
+};
+
+/**
+ * Base class for objects that can be kept in the GrResourceCache.
+ */
+class SK_API GrGpuResource : public GrIORef<GrGpuResource> {
+public:
+
+ /**
+ * Tests whether a object has been abandoned or released. All objects will
+ * be in this state after their creating GrContext is destroyed or has
+ * contextLost called. It's up to the client to test wasDestroyed() before
+ * attempting to use an object if it holds refs on objects across
+ * ~GrContext, freeResources with the force flag, or contextLost.
+ *
+ * @return true if the object has been released or abandoned,
+ * false otherwise.
+ */
+ bool wasDestroyed() const { return NULL == fGpu; }
+
+ /**
+ * Retrieves the context that owns the object. Note that it is possible for
+ * this to return NULL. When objects have been release()ed or abandon()ed
+ * they no longer have an owning context. Destroying a GrContext
+ * automatically releases all its resources.
+ */
+ const GrContext* getContext() const;
+ GrContext* getContext();
+
+ /**
+ * Retrieves the amount of GPU memory used by this resource in bytes. It is
+ * approximate since we aren't aware of additional padding or copies made
+ * by the driver.
+ *
+ * @return the amount of GPU memory used in bytes
+ */
+ size_t gpuMemorySize() const {
+ if (kInvalidGpuMemorySize == fGpuMemorySize) {
+ fGpuMemorySize = this->onGpuMemorySize();
+ SkASSERT(kInvalidGpuMemorySize != fGpuMemorySize);
+ }
+ return fGpuMemorySize;
+ }
+
+ /**
+ * Gets an id that is unique for this GrGpuResource object. It is static in that it does
+ * not change when the content of the GrGpuResource object changes. This will never return
+ * 0.
+ */
+ uint32_t uniqueID() const { return fUniqueID; }
+
+ /** Returns the current unique key for the resource. It will be invalid if the resource has no
+ associated unique key. */
+ const GrUniqueKey& getUniqueKey() const { return fUniqueKey; }
+
+ /**
+ * Internal-only helper class used for manipulations of the resource by the cache.
+ */
+ class CacheAccess;
+ inline CacheAccess cacheAccess();
+ inline const CacheAccess cacheAccess() const;
+
+ /**
+ * Internal-only helper class used for manipulations of the resource by internal code.
+ */
+ class ResourcePriv;
+ inline ResourcePriv resourcePriv();
+ inline const ResourcePriv resourcePriv() const;
+
+ /**
+ * Removes references to objects in the underlying 3D API without freeing them.
+ * Called by CacheAccess.
+ * In general this method should not be called outside of skia. It was
+ * made by public for a special case where it needs to be called in Blink
+ * when a texture becomes unsafe to use after having been shared through
+ * a texture mailbox.
+ */
+ void abandon();
+
+ /**
+ * Dumps memory usage information for this GrGpuResource to traceMemoryDump.
+ * Typically, subclasses should not need to override this, and should only
+ * need to override setMemoryBacking.
+ **/
+ virtual void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
+
+ static uint32_t CreateUniqueID();
+
+protected:
+ // This must be called by every non-wrapped GrGpuObject. It should be called once the object is
+ // fully initialized (i.e. only from the constructors of the final class).
+ void registerWithCache(SkBudgeted);
+
+ // This must be called by every GrGpuObject that references any wrapped backend objects. It
+ // should be called once the object is fully initialized (i.e. only from the constructors of the
+ // final class).
+ void registerWithCacheWrapped();
+
+ GrGpuResource(GrGpu*);
+ virtual ~GrGpuResource();
+
+ GrGpu* getGpu() const { return fGpu; }
+
+ /** Overridden to free GPU resources in the backend API. */
+ virtual void onRelease() { }
+ /** Overridden to abandon any internal handles, ptrs, etc to backend API resources.
+ This may be called when the underlying 3D context is no longer valid and so no
+ backend API calls should be made. */
+ virtual void onAbandon() { }
+
+ /**
+ * This entry point should be called whenever gpuMemorySize() should report a different size.
+ * The cache will call gpuMemorySize() to update the current size of the resource.
+ */
+ void didChangeGpuMemorySize() const;
+
+ /**
+ * Allows subclasses to add additional backing information to the SkTraceMemoryDump. Called by
+ * onMemoryDump. The default implementation adds no backing information.
+ **/
+ virtual void setMemoryBacking(SkTraceMemoryDump*, const SkString&) const {}
+
+private:
+ /**
+ * Called by the registerWithCache if the resource is available to be used as scratch.
+ * Resource subclasses should override this if the instances should be recycled as scratch
+ * resources and populate the scratchKey with the key.
+ * By default resources are not recycled as scratch.
+ **/
+ virtual void computeScratchKey(GrScratchKey*) const { }
+
+ /**
+ * Frees the object in the underlying 3D API. Called by CacheAccess.
+ */
+ void release();
+
+ virtual size_t onGpuMemorySize() const = 0;
+
+ // See comments in CacheAccess and ResourcePriv.
+ void setUniqueKey(const GrUniqueKey&);
+ void removeUniqueKey();
+ void notifyAllCntsAreZero(CntType) const;
+ bool notifyRefCountIsZero() const;
+ void removeScratchKey();
+ void makeBudgeted();
+ void makeUnbudgeted();
+
+#ifdef SK_DEBUG
+ friend class GrGpu; // for assert in GrGpu to access getGpu
+#endif
+ // An index into a heap when this resource is purgeable or an array when not. This is maintained
+ // by the cache.
+ int fCacheArrayIndex;
+ // This value reflects how recently this resource was accessed in the cache. This is maintained
+ // by the cache.
+ uint32_t fTimestamp;
+ uint32_t fExternalFlushCntWhenBecamePurgeable;
+
+ static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
+ GrScratchKey fScratchKey;
+ GrUniqueKey fUniqueKey;
+
+ // This is not ref'ed but abandon() or release() will be called before the GrGpu object
+ // is destroyed. Those calls set will this to NULL.
+ GrGpu* fGpu;
+ mutable size_t fGpuMemorySize;
+
+ SkBudgeted fBudgeted;
+ bool fRefsWrappedObjects;
+ const uint32_t fUniqueID;
+
+ typedef GrIORef<GrGpuResource> INHERITED;
+ friend class GrIORef<GrGpuResource>; // to access notifyAllCntsAreZero and notifyRefCntIsZero.
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrGpuResourceRef.h b/gfx/skia/skia/include/gpu/GrGpuResourceRef.h
new file mode 100644
index 000000000..4511adce6
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrGpuResourceRef.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGpuResourceRef_DEFINED
+#define GrGpuResourceRef_DEFINED
+
+#include "GrGpuResource.h"
+#include "GrRenderTarget.h"
+#include "GrTexture.h"
+#include "SkRefCnt.h"
+
+/**
+ * This class is intended only for internal use in core Gr code.
+ *
+ * Class that wraps a resource referenced by a GrProgramElement or GrDrawState. It manages
+ * converting refs to pending IO operations. It allows a resource ownership to be in three
+ * states:
+ * 1. Owns a single ref
+ * 2. Owns a single ref and a pending IO operation (read, write, or read-write)
+ * 3. Owns a single pending IO operation.
+ *
+ * It is legal to destroy the GrGpuResourceRef in any of these states. It starts in state
+ * 1. Calling markPendingIO() converts it from state 1 to state 2. Calling removeRef() goes from
+ * state 2 to state 3. Calling pendingIOComplete() moves from state 2 to state 1. There is no
+ * valid way of going from state 3 back to 2 or 1.
+ *
+ * Like SkAutoTUnref, its constructor and setter adopt a ref from their caller.
+ *
+ * TODO: Once GrDODrawState no longer exists and therefore GrDrawState and GrOptDrawState no
+ * longer share an instance of this class, attempt to make the resource owned by GrGpuResourceRef
+ * only settable via the constructor.
+ */
+class GrGpuResourceRef : SkNoncopyable {
+public:
+ ~GrGpuResourceRef();
+
+ GrGpuResource* getResource() const { return fResource; }
+
+ /** Does this object own a pending read or write on the resource it is wrapping. */
+ bool ownsPendingIO() const { return fPendingIO; }
+
+ /** Shortcut for calling setResource() with NULL. It cannot be called after markingPendingIO
+ is called. */
+ void reset();
+
+protected:
+ GrGpuResourceRef();
+
+ /** Adopts a ref from the caller. ioType expresses what type of IO operations will be marked as
+ pending on the resource when markPendingIO is called. */
+ GrGpuResourceRef(GrGpuResource*, GrIOType);
+
+ /** Adopts a ref from the caller. ioType expresses what type of IO operations will be marked as
+ pending on the resource when markPendingIO is called. */
+ void setResource(GrGpuResource*, GrIOType);
+
+private:
+ /** Called by owning GrProgramElement when the program element is first scheduled for
+ execution. It can only be called once. */
+ void markPendingIO() const;
+
+ /** Called when the program element/draw state is no longer owned by GrDrawTarget-client code.
+ This lets the cache know that the drawing code will no longer schedule additional reads or
+ writes to the resource using the program element or draw state. It can only be called once.
+ */
+ void removeRef() const;
+
+ /** Called to indicate that the previous pending IO is complete. Useful when the owning object
+ still has refs, so it is not about to destroy this GrGpuResourceRef, but its previously
+ pending executions have been complete. Can only be called if removeRef() was not previously
+ called. */
+ void pendingIOComplete() const;
+
+ friend class GrProgramElement;
+
+ GrGpuResource* fResource;
+ mutable bool fOwnRef;
+ mutable bool fPendingIO;
+ GrIOType fIOType;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+/**
+ * Templated version of GrGpuResourceRef to enforce type safety.
+ */
+template <typename T> class GrTGpuResourceRef : public GrGpuResourceRef {
+public:
+ GrTGpuResourceRef() {}
+
+ /** Adopts a ref from the caller. ioType expresses what type of IO operations will be marked as
+ pending on the resource when markPendingIO is called. */
+ GrTGpuResourceRef(T* resource, GrIOType ioType) : INHERITED(resource, ioType) { }
+
+ T* get() const { return static_cast<T*>(this->getResource()); }
+
+ /** Adopts a ref from the caller. ioType expresses what type of IO operations will be marked as
+ pending on the resource when markPendingIO is called. */
+ void set(T* resource, GrIOType ioType) { this->setResource(resource, ioType); }
+
+private:
+ typedef GrGpuResourceRef INHERITED;
+};
+
+// Specializations for GrTexture and GrRenderTarget because they use virtual inheritance.
+template<> class GrTGpuResourceRef<GrTexture> : public GrGpuResourceRef {
+public:
+ GrTGpuResourceRef() {}
+
+ GrTGpuResourceRef(GrTexture* texture, GrIOType ioType) : INHERITED(texture, ioType) { }
+
+ GrTexture* get() const {
+ GrSurface* surface = static_cast<GrSurface*>(this->getResource());
+ if (surface) {
+ return surface->asTexture();
+ } else {
+ return NULL;
+ }
+ }
+
+ void set(GrTexture* texture, GrIOType ioType) { this->setResource(texture, ioType); }
+
+private:
+ typedef GrGpuResourceRef INHERITED;
+};
+
+template<> class GrTGpuResourceRef<GrRenderTarget> : public GrGpuResourceRef {
+public:
+ GrTGpuResourceRef() {}
+
+ GrTGpuResourceRef(GrRenderTarget* rt, GrIOType ioType) : INHERITED(rt, ioType) { }
+
+ GrRenderTarget* get() const {
+ GrSurface* surface = static_cast<GrSurface*>(this->getResource());
+ if (surface) {
+ return surface->asRenderTarget();
+ } else {
+ return NULL;
+ }
+ }
+
+ void set(GrRenderTarget* rt, GrIOType ioType) { this->setResource(rt, ioType); }
+
+private:
+ typedef GrGpuResourceRef INHERITED;
+};
+
+/**
+ * This is similar to GrTGpuResourceRef but can only be in the pending IO state. It never owns a
+ * ref.
+ */
+template <typename T, GrIOType IO_TYPE> class GrPendingIOResource : SkNoncopyable {
+public:
+ GrPendingIOResource(T* resource = NULL) : fResource(NULL) {
+ this->reset(resource);
+ }
+
+ void reset(T* resource) {
+ if (resource) {
+ switch (IO_TYPE) {
+ case kRead_GrIOType:
+ resource->addPendingRead();
+ break;
+ case kWrite_GrIOType:
+ resource->addPendingWrite();
+ break;
+ case kRW_GrIOType:
+ resource->addPendingRead();
+ resource->addPendingWrite();
+ break;
+ }
+ }
+ this->release();
+ fResource = resource;
+ }
+
+ ~GrPendingIOResource() {
+ this->release();
+ }
+
+ explicit operator bool() const { return SkToBool(fResource); }
+
+ bool operator==(const GrPendingIOResource& other) const {
+ return fResource == other.fResource;
+ }
+
+ T* get() const { return fResource; }
+
+private:
+ void release() {
+ if (fResource) {
+ switch (IO_TYPE) {
+ case kRead_GrIOType:
+ fResource->completedRead();
+ break;
+ case kWrite_GrIOType:
+ fResource->completedWrite();
+ break;
+ case kRW_GrIOType:
+ fResource->completedRead();
+ fResource->completedWrite();
+ break;
+ }
+ }
+ }
+
+ T* fResource;
+};
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrInvariantOutput.h b/gfx/skia/skia/include/gpu/GrInvariantOutput.h
new file mode 100644
index 000000000..6e2cbe84f
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrInvariantOutput.h
@@ -0,0 +1,342 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrInvariantOutput_DEFINED
+#define GrInvariantOutput_DEFINED
+
+#include "GrColor.h"
+
+struct GrInitInvariantOutput {
+ GrInitInvariantOutput()
+ : fValidFlags(kNone_GrColorComponentFlags)
+ , fColor(0)
+ , fIsSingleComponent(false)
+ , fIsLCDCoverage(false) {}
+
+ void setKnownFourComponents(GrColor color) {
+ fColor = color;
+ fValidFlags = kRGBA_GrColorComponentFlags;
+ fIsSingleComponent = false;
+ }
+
+ void setUnknownFourComponents() {
+ fValidFlags = kNone_GrColorComponentFlags;
+ fIsSingleComponent = false;
+ }
+
+ void setUnknownOpaqueFourComponents() {
+ fColor = 0xffU << GrColor_SHIFT_A;
+ fValidFlags = kA_GrColorComponentFlag;
+ fIsSingleComponent = false;
+ }
+
+ void setKnownSingleComponent(uint8_t alpha) {
+ fColor = GrColorPackRGBA(alpha, alpha, alpha, alpha);
+ fValidFlags = kRGBA_GrColorComponentFlags;
+ fIsSingleComponent = true;
+ }
+
+ void setUnknownSingleComponent() {
+ fValidFlags = kNone_GrColorComponentFlags;
+ fIsSingleComponent = true;
+ }
+
+ void setUsingLCDCoverage() { fIsLCDCoverage = true; }
+
+ GrColorComponentFlags fValidFlags;
+ GrColor fColor;
+ bool fIsSingleComponent;
+ bool fIsLCDCoverage; // Temorary data member until texture pixel configs are
+ // updated
+};
+
+class GrInvariantOutput {
+public:
+ GrInvariantOutput(GrColor color, GrColorComponentFlags flags, bool isSingleComponent)
+ : fColor(color)
+ , fValidFlags(flags)
+ , fIsSingleComponent(isSingleComponent)
+ , fNonMulStageFound(false)
+ , fWillUseInputColor(true)
+ , fIsLCDCoverage(false) {}
+
+ GrInvariantOutput(const GrInitInvariantOutput& io)
+ : fColor(io.fColor)
+ , fValidFlags(io.fValidFlags)
+ , fIsSingleComponent(io.fIsSingleComponent)
+ , fNonMulStageFound(false)
+ , fWillUseInputColor(false)
+ , fIsLCDCoverage(io.fIsLCDCoverage) {}
+
+ virtual ~GrInvariantOutput() {}
+
+ enum ReadInput {
+ kWill_ReadInput,
+ kWillNot_ReadInput,
+ };
+
+ void mulByUnknownOpaqueFourComponents() {
+ SkDEBUGCODE(this->validate());
+ if (this->isOpaque()) {
+ fValidFlags = kA_GrColorComponentFlag;
+ fIsSingleComponent = false;
+ } else {
+ // Since the current state is not opaque we no longer care if the color being
+ // multiplied is opaque.
+ this->mulByUnknownFourComponents();
+ }
+ SkDEBUGCODE(this->validate());
+ }
+
+ void mulByUnknownFourComponents() {
+ SkDEBUGCODE(this->validate());
+ if (this->hasZeroAlpha()) {
+ this->internalSetToTransparentBlack();
+ } else {
+ this->internalSetToUnknown();
+ }
+ SkDEBUGCODE(this->validate());
+ }
+
+ void mulByUnknownSingleComponent() {
+ SkDEBUGCODE(this->validate());
+ if (this->hasZeroAlpha()) {
+ this->internalSetToTransparentBlack();
+ } else {
+ // We don't need to change fIsSingleComponent in this case
+ fValidFlags = kNone_GrColorComponentFlags;
+ }
+ SkDEBUGCODE(this->validate());
+ }
+
+ void mulByKnownSingleComponent(uint8_t alpha) {
+ SkDEBUGCODE(this->validate());
+ if (this->hasZeroAlpha() || 0 == alpha) {
+ this->internalSetToTransparentBlack();
+ } else {
+ if (alpha != 255) {
+ // Multiply color by alpha
+ fColor = GrColorPackRGBA(SkMulDiv255Round(GrColorUnpackR(fColor), alpha),
+ SkMulDiv255Round(GrColorUnpackG(fColor), alpha),
+ SkMulDiv255Round(GrColorUnpackB(fColor), alpha),
+ SkMulDiv255Round(GrColorUnpackA(fColor), alpha));
+ // We don't need to change fIsSingleComponent in this case
+ }
+ }
+ SkDEBUGCODE(this->validate());
+ }
+
+ void mulByKnownFourComponents(GrColor color) {
+ SkDEBUGCODE(this->validate());
+ uint32_t a;
+ if (GetAlphaAndCheckSingleChannel(color, &a)) {
+ this->mulByKnownSingleComponent(a);
+ } else {
+ if (color != 0xffffffff) {
+ fColor = GrColorPackRGBA(
+ SkMulDiv255Round(GrColorUnpackR(fColor), GrColorUnpackR(color)),
+ SkMulDiv255Round(GrColorUnpackG(fColor), GrColorUnpackG(color)),
+ SkMulDiv255Round(GrColorUnpackB(fColor), GrColorUnpackB(color)),
+ SkMulDiv255Round(GrColorUnpackA(fColor), a));
+ if (kRGBA_GrColorComponentFlags == fValidFlags) {
+ fIsSingleComponent = GetAlphaAndCheckSingleChannel(fColor, &a);
+ }
+ }
+ }
+ SkDEBUGCODE(this->validate());
+ }
+
+ // Ignores the incoming color's RGB and muls its alpha by color.
+ void mulAlphaByKnownFourComponents(GrColor color) {
+ SkDEBUGCODE(this->validate());
+ uint32_t a;
+ if (GetAlphaAndCheckSingleChannel(color, &a)) {
+ this->mulAlphaByKnownSingleComponent(a);
+ } else if (fValidFlags & kA_GrColorComponentFlag) {
+ GrColor preAlpha = GrColorUnpackA(fColor);
+ if (0 == preAlpha) {
+ this->internalSetToTransparentBlack();
+ } else {
+ // We know that color has different component values
+ fIsSingleComponent = false;
+ fColor = GrColorPackRGBA(
+ SkMulDiv255Round(preAlpha, GrColorUnpackR(color)),
+ SkMulDiv255Round(preAlpha, GrColorUnpackG(color)),
+ SkMulDiv255Round(preAlpha, GrColorUnpackB(color)),
+ SkMulDiv255Round(preAlpha, a));
+ fValidFlags = kRGBA_GrColorComponentFlags;
+ }
+ } else {
+ fIsSingleComponent = false;
+ fValidFlags = kNone_GrColorComponentFlags;
+ }
+ SkDEBUGCODE(this->validate());
+ }
+
+ // Ignores the incoming color's RGB and muls its alpha by the alpha param and sets all channels
+ // equal to that value.
+ void mulAlphaByKnownSingleComponent(uint8_t alpha) {
+ SkDEBUGCODE(this->validate());
+ if (0 == alpha || this->hasZeroAlpha()) {
+ this->internalSetToTransparentBlack();
+ } else {
+ if (fValidFlags & kA_GrColorComponentFlag) {
+ GrColor a = GrColorUnpackA(fColor);
+ a = SkMulDiv255Round(alpha, a);
+ fColor = GrColorPackRGBA(a, a, a, a);
+ fValidFlags = kRGBA_GrColorComponentFlags;
+ } else {
+ fValidFlags = kNone_GrColorComponentFlags;
+ }
+ fIsSingleComponent = true;
+ }
+ SkDEBUGCODE(this->validate());
+ }
+
+ void premulFourChannelColor() {
+ SkDEBUGCODE(this->validate());
+ SkASSERT(!fIsSingleComponent);
+ fNonMulStageFound = true;
+ if (!(fValidFlags & kA_GrColorComponentFlag)) {
+ fValidFlags = kNone_GrColorComponentFlags;
+ } else {
+ fColor = GrPremulColor(fColor);
+ }
+ SkDEBUGCODE(this->validate());
+ }
+
+ void invalidateComponents(GrColorComponentFlags invalidateFlags, ReadInput readsInput) {
+ SkDEBUGCODE(this->validate());
+ fValidFlags = (fValidFlags & ~invalidateFlags);
+ fIsSingleComponent = false;
+ fNonMulStageFound = true;
+ if (kWillNot_ReadInput == readsInput) {
+ fWillUseInputColor = false;
+ }
+ SkDEBUGCODE(this->validate());
+ }
+
+ void setToOther(GrColorComponentFlags validFlags, GrColor color, ReadInput readsInput) {
+ SkDEBUGCODE(this->validate());
+ fValidFlags = validFlags;
+ fColor = color;
+ fIsSingleComponent = false;
+ fNonMulStageFound = true;
+ if (kWillNot_ReadInput == readsInput) {
+ fWillUseInputColor = false;
+ }
+ if (kRGBA_GrColorComponentFlags == fValidFlags) {
+ uint32_t a;
+ if (GetAlphaAndCheckSingleChannel(color, &a)) {
+ fIsSingleComponent = true;
+ }
+ }
+ SkDEBUGCODE(this->validate());
+ }
+
+ void setToUnknown(ReadInput readsInput) {
+ SkDEBUGCODE(this->validate());
+ this->internalSetToUnknown();
+ fNonMulStageFound= true;
+ if (kWillNot_ReadInput == readsInput) {
+ fWillUseInputColor = false;
+ }
+ SkDEBUGCODE(this->validate());
+ }
+
+ // Temporary setter to handle LCD text correctly until we improve texture pixel config queries
+ // and thus can rely solely on number of coverage components for RGA vs single channel coverage.
+ void setUsingLCDCoverage() {
+ fIsLCDCoverage = true;
+ }
+
+ GrColor color() const { return fColor; }
+ GrColorComponentFlags validFlags() const { return fValidFlags; }
+ bool willUseInputColor() const { return fWillUseInputColor; }
+
+ /**
+ * If isSingleComponent is true, then the flag values for r, g, b, and a must all be the
+ * same. If the flags are all set then all color components must be equal.
+ */
+ SkDEBUGCODE(void validate() const;)
+
+private:
+ friend class GrProcOptInfo;
+
+ /** Extracts the alpha channel and returns true if r,g,b == a. */
+ static bool GetAlphaAndCheckSingleChannel(GrColor color, uint32_t* alpha) {
+ *alpha = GrColorUnpackA(color);
+ return *alpha == GrColorUnpackR(color) && *alpha == GrColorUnpackG(color) &&
+ *alpha == GrColorUnpackB(color);
+ }
+
+ void reset(GrColor color, GrColorComponentFlags flags, bool isSingleComponent) {
+ fColor = color;
+ fValidFlags = flags;
+ fIsSingleComponent = isSingleComponent;
+ fNonMulStageFound = false;
+ fWillUseInputColor = true;
+ }
+
+ void reset(const GrInitInvariantOutput& io) {
+ fColor = io.fColor;
+ fValidFlags = io.fValidFlags;
+ fIsSingleComponent = io.fIsSingleComponent;
+ fNonMulStageFound = false;
+ fWillUseInputColor = true;
+ fIsLCDCoverage = io.fIsLCDCoverage;
+ }
+
+ void internalSetToTransparentBlack() {
+ fValidFlags = kRGBA_GrColorComponentFlags;
+ fColor = 0;
+ fIsSingleComponent = true;
+ }
+
+ void internalSetToUnknown() {
+ fValidFlags = kNone_GrColorComponentFlags;
+ fIsSingleComponent = false;
+ }
+
+ bool hasZeroAlpha() const {
+ return ((fValidFlags & kA_GrColorComponentFlag) && 0 == GrColorUnpackA(fColor));
+ }
+
+ bool isOpaque() const {
+ return ((fValidFlags & kA_GrColorComponentFlag) && 0xFF == GrColorUnpackA(fColor));
+ }
+
+ bool isSolidWhite() const {
+ return (fValidFlags == kRGBA_GrColorComponentFlags && 0xFFFFFFFF == fColor);
+ }
+
+ bool isSingleComponent() const { return fIsSingleComponent; }
+
+ void resetWillUseInputColor() { fWillUseInputColor = true; }
+
+ bool allStagesMulInput() const { return !fNonMulStageFound; }
+ void resetNonMulStageFound() { fNonMulStageFound = false; }
+
+ bool isLCDCoverage() const { return fIsLCDCoverage; }
+
+ SkDEBUGCODE(bool colorComponentsAllEqual() const;)
+ /**
+ * If alpha is valid, check that any valid R,G,B values are <= A
+ */
+ SkDEBUGCODE(bool validPreMulColor() const;)
+
+ GrColor fColor;
+ GrColorComponentFlags fValidFlags;
+ bool fIsSingleComponent;
+ bool fNonMulStageFound;
+ bool fWillUseInputColor;
+ bool fIsLCDCoverage; // Temorary data member until texture pixel configs are updated
+
+};
+
+#endif
+
diff --git a/gfx/skia/skia/include/gpu/GrPaint.h b/gfx/skia/skia/include/gpu/GrPaint.h
new file mode 100644
index 000000000..a8af3c2f1
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrPaint.h
@@ -0,0 +1,200 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrPaint_DEFINED
+#define GrPaint_DEFINED
+
+#include "GrColor.h"
+#include "GrColorSpaceXform.h"
+#include "GrXferProcessor.h"
+#include "effects/GrPorterDuffXferProcessor.h"
+#include "GrFragmentProcessor.h"
+
+#include "SkBlendMode.h"
+#include "SkRefCnt.h"
+#include "SkRegion.h"
+
+/**
+ * The paint describes how color and coverage are computed at each pixel by GrContext draw
+ * functions and the how color is blended with the destination pixel.
+ *
+ * The paint allows installation of custom color and coverage stages. New types of stages are
+ * created by subclassing GrProcessor.
+ *
+ * The primitive color computation starts with the color specified by setColor(). This color is the
+ * input to the first color stage. Each color stage feeds its output to the next color stage.
+ *
+ * Fractional pixel coverage follows a similar flow. The coverage is initially the value specified
+ * by setCoverage(). This is input to the first coverage stage. Coverage stages are chained
+ * together in the same manner as color stages. The output of the last stage is modulated by any
+ * fractional coverage produced by anti-aliasing. This last step produces the final coverage, C.
+ *
+ * setXPFactory is used to control blending between the output color and dest. It also implements
+ * the application of fractional coverage from the coverage pipeline.
+ */
+class GrPaint {
+public:
+ GrPaint();
+
+ GrPaint(const GrPaint& paint) { *this = paint; }
+
+ ~GrPaint() { }
+
+ /**
+ * The initial color of the drawn primitive. Defaults to solid white.
+ */
+ void setColor4f(const GrColor4f& color) { fColor = color; }
+ const GrColor4f& getColor4f() const { return fColor; }
+
+ /**
+ * Legacy getter, until all code handles 4f directly.
+ */
+ GrColor getColor() const { return fColor.toGrColor(); }
+
+ /**
+ * Should primitives be anti-aliased or not. Defaults to false.
+ */
+ void setAntiAlias(bool aa) { fAntiAlias = aa; }
+ bool isAntiAlias() const { return fAntiAlias; }
+
+ /**
+ * Should shader output conversion from linear to sRGB be disabled.
+ * Only relevant if the destination is sRGB. Defaults to false.
+ */
+ void setDisableOutputConversionToSRGB(bool srgb) { fDisableOutputConversionToSRGB = srgb; }
+ bool getDisableOutputConversionToSRGB() const { return fDisableOutputConversionToSRGB; }
+
+ /**
+ * Should sRGB inputs be allowed to perform sRGB to linear conversion. With this flag
+ * set to false, sRGB textures will be treated as linear (including filtering).
+ */
+ void setAllowSRGBInputs(bool allowSRGBInputs) { fAllowSRGBInputs = allowSRGBInputs; }
+ bool getAllowSRGBInputs() const { return fAllowSRGBInputs; }
+
+ /**
+ * Does one of the fragment processors need a field of distance vectors to the nearest edge?
+ */
+ bool usesDistanceVectorField() const { return fUsesDistanceVectorField; }
+
+ /**
+ * Should rendering be gamma-correct, end-to-end. Causes sRGB render targets to behave
+ * as such (with linear blending), and sRGB inputs to be filtered and decoded correctly.
+ */
+ void setGammaCorrect(bool gammaCorrect) {
+ setDisableOutputConversionToSRGB(!gammaCorrect);
+ setAllowSRGBInputs(gammaCorrect);
+ }
+
+ void setXPFactory(sk_sp<GrXPFactory> xpFactory) {
+ fXPFactory = std::move(xpFactory);
+ }
+
+ void setPorterDuffXPFactory(SkBlendMode mode) {
+ fXPFactory = GrPorterDuffXPFactory::Make((SkXfermode::Mode)mode);
+ }
+
+ void setPorterDuffXPFactory(SkXfermode::Mode mode) {
+ fXPFactory = GrPorterDuffXPFactory::Make(mode);
+ }
+
+ void setCoverageSetOpXPFactory(SkRegion::Op, bool invertCoverage = false);
+
+ /**
+ * Appends an additional color processor to the color computation.
+ */
+ void addColorFragmentProcessor(sk_sp<GrFragmentProcessor> fp) {
+ SkASSERT(fp);
+ fUsesDistanceVectorField |= fp->usesDistanceVectorField();
+ fColorFragmentProcessors.push_back(std::move(fp));
+ }
+
+ /**
+ * Appends an additional coverage processor to the coverage computation.
+ */
+ void addCoverageFragmentProcessor(sk_sp<GrFragmentProcessor> fp) {
+ SkASSERT(fp);
+ fUsesDistanceVectorField |= fp->usesDistanceVectorField();
+ fCoverageFragmentProcessors.push_back(std::move(fp));
+ }
+
+ /**
+ * Helpers for adding color or coverage effects that sample a texture. The matrix is applied
+ * to the src space position to compute texture coordinates.
+ */
+ void addColorTextureProcessor(GrTexture*, sk_sp<GrColorSpaceXform>, const SkMatrix&);
+ void addCoverageTextureProcessor(GrTexture*, const SkMatrix&);
+ void addColorTextureProcessor(GrTexture*, sk_sp<GrColorSpaceXform>, const SkMatrix&,
+ const GrTextureParams&);
+ void addCoverageTextureProcessor(GrTexture*, const SkMatrix&, const GrTextureParams&);
+
+ int numColorFragmentProcessors() const { return fColorFragmentProcessors.count(); }
+ int numCoverageFragmentProcessors() const { return fCoverageFragmentProcessors.count(); }
+ int numTotalFragmentProcessors() const { return this->numColorFragmentProcessors() +
+ this->numCoverageFragmentProcessors(); }
+
+ GrXPFactory* getXPFactory() const {
+ return fXPFactory.get();
+ }
+
+ GrFragmentProcessor* getColorFragmentProcessor(int i) const {
+ return fColorFragmentProcessors[i].get();
+ }
+ GrFragmentProcessor* getCoverageFragmentProcessor(int i) const {
+ return fCoverageFragmentProcessors[i].get();
+ }
+
+ GrPaint& operator=(const GrPaint& paint) {
+ fAntiAlias = paint.fAntiAlias;
+ fDisableOutputConversionToSRGB = paint.fDisableOutputConversionToSRGB;
+ fAllowSRGBInputs = paint.fAllowSRGBInputs;
+ fUsesDistanceVectorField = paint.fUsesDistanceVectorField;
+
+ fColor = paint.fColor;
+ fColorFragmentProcessors = paint.fColorFragmentProcessors;
+ fCoverageFragmentProcessors = paint.fCoverageFragmentProcessors;
+
+ fXPFactory = paint.fXPFactory;
+
+ return *this;
+ }
+
+ /**
+ * Returns true if the paint's output color will be constant after blending. If the result is
+ * true, constantColor will be updated to contain the constant color. Note that we can conflate
+ * coverage and color, so the actual values written to pixels with partial coverage may still
+ * not seem constant, even if this function returns true.
+ */
+ bool isConstantBlendedColor(GrColor* constantColor) const {
+ GrColor paintColor = this->getColor();
+ if (!fXPFactory && fColorFragmentProcessors.empty()) {
+ if (!GrColorIsOpaque(paintColor)) {
+ return false;
+ }
+ *constantColor = paintColor;
+ return true;
+ }
+ return this->internalIsConstantBlendedColor(paintColor, constantColor);
+ }
+
+private:
+ bool internalIsConstantBlendedColor(GrColor paintColor, GrColor* constantColor) const;
+
+ mutable sk_sp<GrXPFactory> fXPFactory;
+ SkSTArray<4, sk_sp<GrFragmentProcessor>> fColorFragmentProcessors;
+ SkSTArray<2, sk_sp<GrFragmentProcessor>> fCoverageFragmentProcessors;
+
+ bool fAntiAlias;
+ bool fDisableOutputConversionToSRGB;
+ bool fAllowSRGBInputs;
+ bool fUsesDistanceVectorField;
+
+ GrColor4f fColor;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrProcessor.h b/gfx/skia/skia/include/gpu/GrProcessor.h
new file mode 100644
index 000000000..d374a7f3d
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrProcessor.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrProcessor_DEFINED
+#define GrProcessor_DEFINED
+
+#include "GrColor.h"
+#include "GrProcessorUnitTest.h"
+#include "GrProgramElement.h"
+#include "GrTextureAccess.h"
+#include "GrBufferAccess.h"
+#include "SkMath.h"
+#include "SkString.h"
+#include "../private/SkAtomics.h"
+
+class GrContext;
+class GrCoordTransform;
+class GrInvariantOutput;
+
+/**
+ * Used by processors to build their keys. It incorporates each per-processor key into a larger
+ * shader key.
+ */
+class GrProcessorKeyBuilder {
+public:
+ GrProcessorKeyBuilder(SkTArray<unsigned char, true>* data) : fData(data), fCount(0) {
+ SkASSERT(0 == fData->count() % sizeof(uint32_t));
+ }
+
+ void add32(uint32_t v) {
+ ++fCount;
+ fData->push_back_n(4, reinterpret_cast<uint8_t*>(&v));
+ }
+
+ /** Inserts count uint32_ts into the key. The returned pointer is only valid until the next
+ add*() call. */
+ uint32_t* SK_WARN_UNUSED_RESULT add32n(int count) {
+ SkASSERT(count > 0);
+ fCount += count;
+ return reinterpret_cast<uint32_t*>(fData->push_back_n(4 * count));
+ }
+
+ size_t size() const { return sizeof(uint32_t) * fCount; }
+
+private:
+ SkTArray<uint8_t, true>* fData; // unowned ptr to the larger key.
+ int fCount; // number of uint32_ts added to fData by the processor.
+};
+
+/** Provides custom shader code to the Ganesh shading pipeline. GrProcessor objects *must* be
+ immutable: after being constructed, their fields may not change.
+
+ Dynamically allocated GrProcessors are managed by a per-thread memory pool. The ref count of an
+ processor must reach 0 before the thread terminates and the pool is destroyed.
+ */
+class GrProcessor : public GrProgramElement {
+public:
+ virtual ~GrProcessor();
+
+ /** Human-meaningful string to identify this prcoessor; may be embedded
+ in generated shader code. */
+ virtual const char* name() const = 0;
+
+ // Human-readable dump of all information
+ virtual SkString dumpInfo() const {
+ SkString str;
+ str.appendf("Missing data");
+ return str;
+ }
+
+ int numTextures() const { return fTextureAccesses.count(); }
+
+ /** Returns the access pattern for the texture at index. index must be valid according to
+ numTextures(). */
+ const GrTextureAccess& textureAccess(int index) const { return *fTextureAccesses[index]; }
+
+ /** Shortcut for textureAccess(index).texture(); */
+ GrTexture* texture(int index) const { return this->textureAccess(index).getTexture(); }
+
+ int numBuffers() const { return fBufferAccesses.count(); }
+
+ /** Returns the access pattern for the buffer at index. index must be valid according to
+ numBuffers(). */
+ const GrBufferAccess& bufferAccess(int index) const {
+ return *fBufferAccesses[index];
+ }
+
+ /**
+ * Platform specific built-in features that a processor can request for the fragment shader.
+ */
+ enum RequiredFeatures {
+ kNone_RequiredFeatures = 0,
+ kFragmentPosition_RequiredFeature = 1 << 0,
+ kSampleLocations_RequiredFeature = 1 << 1
+ };
+
+ GR_DECL_BITFIELD_OPS_FRIENDS(RequiredFeatures);
+
+ RequiredFeatures requiredFeatures() const { return fRequiredFeatures; }
+
+ void* operator new(size_t size);
+ void operator delete(void* target);
+
+ void* operator new(size_t size, void* placement) {
+ return ::operator new(size, placement);
+ }
+ void operator delete(void* target, void* placement) {
+ ::operator delete(target, placement);
+ }
+
+ /**
+ * Helper for down-casting to a GrProcessor subclass
+ */
+ template <typename T> const T& cast() const { return *static_cast<const T*>(this); }
+
+ uint32_t classID() const { SkASSERT(kIllegalProcessorClassID != fClassID); return fClassID; }
+
+protected:
+ GrProcessor() : fClassID(kIllegalProcessorClassID), fRequiredFeatures(kNone_RequiredFeatures) {}
+
+ /**
+ * Subclasses call these from their constructor to register sampler sources. The processor
+ * subclass manages the lifetime of the objects (these functions only store pointers). The
+ * GrTextureAccess and/or GrBufferAccess instances are typically member fields of the
+ * GrProcessor subclass. These must only be called from the constructor because GrProcessors
+ * are immutable.
+ */
+ virtual void addTextureAccess(const GrTextureAccess* textureAccess);
+ virtual void addBufferAccess(const GrBufferAccess* bufferAccess);
+
+ bool hasSameSamplers(const GrProcessor&) const;
+
+ /**
+ * If the prcoessor will generate code that uses platform specific built-in features, then it
+ * must call these methods from its constructor. Otherwise, requests to use these features will
+ * be denied.
+ */
+ void setWillReadFragmentPosition() { fRequiredFeatures |= kFragmentPosition_RequiredFeature; }
+ void setWillUseSampleLocations() { fRequiredFeatures |= kSampleLocations_RequiredFeature; }
+
+ void combineRequiredFeatures(const GrProcessor& other) {
+ fRequiredFeatures |= other.fRequiredFeatures;
+ }
+
+ template <typename PROC_SUBCLASS> void initClassID() {
+ static uint32_t kClassID = GenClassID();
+ fClassID = kClassID;
+ }
+
+ uint32_t fClassID;
+ SkSTArray<4, const GrTextureAccess*, true> fTextureAccesses;
+ SkSTArray<2, const GrBufferAccess*, true> fBufferAccesses;
+
+private:
+ static uint32_t GenClassID() {
+ // fCurrProcessorClassID has been initialized to kIllegalProcessorClassID. The
+ // atomic inc returns the old value not the incremented value. So we add
+ // 1 to the returned value.
+ uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrProcessorClassID)) + 1;
+ if (!id) {
+ SkFAIL("This should never wrap as it should only be called once for each GrProcessor "
+ "subclass.");
+ }
+ return id;
+ }
+
+ enum {
+ kIllegalProcessorClassID = 0,
+ };
+ static int32_t gCurrProcessorClassID;
+
+ RequiredFeatures fRequiredFeatures;
+
+ typedef GrProgramElement INHERITED;
+};
+
+GR_MAKE_BITFIELD_OPS(GrProcessor::RequiredFeatures);
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrProcessorUnitTest.h b/gfx/skia/skia/include/gpu/GrProcessorUnitTest.h
new file mode 100644
index 000000000..4f26665cb
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrProcessorUnitTest.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrProcessorUnitTest_DEFINED
+#define GrProcessorUnitTest_DEFINED
+
+#include "../private/SkTArray.h"
+#include "GrTestUtils.h"
+#include "SkTypes.h"
+
+class SkMatrix;
+class GrCaps;
+class GrContext;
+class GrDrawContext;
+struct GrProcessorTestData;
+
+namespace GrProcessorUnitTest {
+
+// Used to access the dummy textures in TestCreate procs.
+enum {
+ kSkiaPMTextureIdx = 0,
+ kAlphaTextureIdx = 1,
+};
+
+/** This allows parent FPs to implement a test create with known leaf children in order to avoid
+creating an unbounded FP tree which may overflow various shader limits. */
+sk_sp<GrFragmentProcessor> MakeChildFP(GrProcessorTestData*);
+
+}
+
+/*
+ * GrProcessorTestData is an argument struct to TestCreate functions
+ * fTextures are valid textures that can optionally be used to construct
+ * GrTextureAccesses. The first texture has config kSkia8888_GrPixelConfig and the second has
+ * kAlpha_8_GrPixelConfig. TestCreate functions are also free to create additional textures using
+ * the GrContext.
+ */
+struct GrProcessorTestData {
+ GrProcessorTestData(SkRandom* random,
+ GrContext* context,
+ const GrCaps* caps,
+ const GrDrawContext* drawContext,
+ GrTexture* textures[2])
+ : fRandom(random)
+ , fContext(context)
+ , fCaps(caps)
+ , fDrawContext(drawContext) {
+ fTextures[0] = textures[0];
+ fTextures[1] = textures[1];
+ }
+ SkRandom* fRandom;
+ GrContext* fContext;
+ const GrCaps* fCaps;
+ const GrDrawContext* fDrawContext;
+ GrTexture* fTextures[2];
+};
+
+#if SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
+
+class GrProcessor;
+class GrTexture;
+
+template <class Processor> class GrProcessorTestFactory : SkNoncopyable {
+public:
+ typedef sk_sp<Processor> (*MakeProc)(GrProcessorTestData*);
+
+ GrProcessorTestFactory(MakeProc makeProc) {
+ fMakeProc = makeProc;
+ GetFactories()->push_back(this);
+ }
+
+ /** Pick a random factory function and create a processor. */
+ static sk_sp<Processor> Make(GrProcessorTestData* data) {
+ VerifyFactoryCount();
+ SkASSERT(GetFactories()->count());
+ uint32_t idx = data->fRandom->nextRangeU(0, GetFactories()->count() - 1);
+ return MakeIdx(idx, data);
+ }
+
+ /** Number of registered factory functions */
+ static int Count() { return GetFactories()->count(); }
+
+ /** Use factory function at Index idx to create a processor. */
+ static sk_sp<Processor> MakeIdx(int idx, GrProcessorTestData* data) {
+ GrProcessorTestFactory<Processor>* factory = (*GetFactories())[idx];
+ return factory->fMakeProc(data);
+ }
+
+ /*
+ * A test function which verifies the count of factories.
+ */
+ static void VerifyFactoryCount();
+
+private:
+ MakeProc fMakeProc;
+
+ static SkTArray<GrProcessorTestFactory<Processor>*, true>* GetFactories();
+};
+
+/** GrProcessor subclasses should insert this macro in their declaration to be included in the
+ * program generation unit test.
+ */
+#define GR_DECLARE_GEOMETRY_PROCESSOR_TEST \
+ static GrProcessorTestFactory<GrGeometryProcessor> gTestFactory SK_UNUSED; \
+ static sk_sp<GrGeometryProcessor> TestCreate(GrProcessorTestData*)
+
+#define GR_DECLARE_FRAGMENT_PROCESSOR_TEST \
+ static GrProcessorTestFactory<GrFragmentProcessor> gTestFactory SK_UNUSED; \
+ static sk_sp<GrFragmentProcessor> TestCreate(GrProcessorTestData*)
+
+#define GR_DECLARE_XP_FACTORY_TEST \
+ static GrProcessorTestFactory<GrXPFactory> gTestFactory SK_UNUSED; \
+ static sk_sp<GrXPFactory> TestCreate(GrProcessorTestData*)
+
+/** GrProcessor subclasses should insert this macro in their implementation file. They must then
+ * also implement this static function:
+ * GrProcessor* TestCreate(GrProcessorTestData*);
+ */
+#define GR_DEFINE_FRAGMENT_PROCESSOR_TEST(Effect) \
+ GrProcessorTestFactory<GrFragmentProcessor> Effect :: gTestFactory(Effect :: TestCreate)
+
+#define GR_DEFINE_XP_FACTORY_TEST(Factory) \
+ GrProcessorTestFactory<GrXPFactory> Factory :: gTestFactory(Factory :: TestCreate)
+
+#define GR_DEFINE_GEOMETRY_PROCESSOR_TEST(Effect) \
+ GrProcessorTestFactory<GrGeometryProcessor> Effect :: gTestFactory(Effect :: TestCreate)
+
+#else // !SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
+
+// The unit test relies on static initializers. Just declare the TestCreate function so that
+// its definitions will compile.
+#define GR_DECLARE_FRAGMENT_PROCESSOR_TEST \
+ static sk_sp<GrFragmentProcessor> TestCreate(GrProcessorTestData*)
+#define GR_DEFINE_FRAGMENT_PROCESSOR_TEST(X)
+
+// The unit test relies on static initializers. Just declare the TestCreate function so that
+// its definitions will compile.
+#define GR_DECLARE_XP_FACTORY_TEST \
+ static sk_sp<GrXPFactory> TestCreate(GrProcessorTestData*)
+#define GR_DEFINE_XP_FACTORY_TEST(X)
+
+// The unit test relies on static initializers. Just declare the TestCreate function so that
+// its definitions will compile.
+#define GR_DECLARE_GEOMETRY_PROCESSOR_TEST \
+ static sk_sp<GrGeometryProcessor> TestCreate(GrProcessorTestData*)
+#define GR_DEFINE_GEOMETRY_PROCESSOR_TEST(X)
+
+#endif // !SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrProgramElement.h b/gfx/skia/skia/include/gpu/GrProgramElement.h
new file mode 100644
index 000000000..ba9daf715
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrProgramElement.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrProgramElement_DEFINED
+#define GrProgramElement_DEFINED
+
+#include "../private/SkTArray.h"
+#include "SkRefCnt.h"
+
+class GrGpuResourceRef;
+
+/**
+ * Base class for GrProcessor. This exists to manage transitioning a GrProcessor from being owned by
+ * a client to being scheduled for execution. While a GrProcessor is ref'ed by drawing code its
+ * GrGpu resources must also be ref'ed to prevent incorrectly recycling them through the cache.
+ * However, once the GrProcessor is baked into a GrPipeline and the drawing code has stopped ref'ing
+ * it, it's internal resources can be recycled in some cases.
+ *
+ * We track this using two types of refs on GrProgramElement. A regular ref is owned by any client
+ * that may continue to issue draws that use the GrProgramElement. The GrPipeline owns "pending
+ * executions" instead of refs. A pending execution is cleared by ~GrPipeline().
+ *
+ * While a GrProgramElement is ref'ed any resources it owns are also ref'ed. However, once it gets
+ * into the state where it has pending executions AND no refs then it converts its ownership of
+ * its GrGpuResources from refs to pending IOs. The pending IOs allow the cache to track when it is
+ * safe to recycle a resource even though we still have buffered GrBatches that read or write to the
+ * the resource.
+ *
+ * To make this work all GrGpuResource objects owned by a GrProgramElement or derived classes
+ * (either directly or indirectly) must be wrapped in a GrGpuResourceRef and registered with the
+ * GrProgramElement using addGpuResource(). This allows the regular refs to be converted to pending
+ * IO events when the program element is scheduled for deferred execution.
+ *
+ * Moreover, a GrProgramElement that in turn owns other GrProgramElements must convert its ownership
+ * of its children to pending executions when its ref count reaches zero so that the GrGpuResources
+ * owned by the children GrProgramElements are correctly converted from ownership by ref to
+ * ownership by pending IO. Any GrProgramElement hierarchy is managed by subclasses which must
+ * implement notifyRefCntIsZero() in order to convert refs of children to pending executions.
+ */
+class GrProgramElement : public SkNoncopyable {
+public:
+ virtual ~GrProgramElement() {
+ // fRefCnt can be one when an effect is created statically using GR_CREATE_STATIC_EFFECT
+ SkASSERT((0 == fRefCnt || 1 == fRefCnt) && 0 == fPendingExecutions);
+ // Set to invalid values.
+ SkDEBUGCODE(fRefCnt = fPendingExecutions = -10;)
+ }
+
+ void ref() const {
+ this->validate();
+ // Once the ref cnt reaches zero it should never be ref'ed again.
+ SkASSERT(fRefCnt > 0);
+ ++fRefCnt;
+ this->validate();
+ }
+
+ void unref() const {
+ this->validate();
+ --fRefCnt;
+ if (0 == fRefCnt) {
+ this->notifyRefCntIsZero();
+ if (0 == fPendingExecutions) {
+ delete this;
+ return;
+ } else {
+ this->removeRefs();
+ }
+ }
+ this->validate();
+ }
+
+ /**
+ * Gets an id that is unique for this GrProgramElement object. This will never return 0.
+ */
+ uint32_t getUniqueID() const { return fUniqueID; }
+
+ void validate() const {
+#ifdef SK_DEBUG
+ SkASSERT(fRefCnt >= 0);
+ SkASSERT(fPendingExecutions >= 0);
+ SkASSERT(fRefCnt + fPendingExecutions > 0);
+#endif
+ }
+
+protected:
+ GrProgramElement() : fRefCnt(1), fPendingExecutions(0), fUniqueID(CreateUniqueID()) {}
+
+ /** Subclasses registers their resources using this function. It is assumed the GrProgramResouce
+ is and will remain owned by the subclass and this function will retain a raw ptr. Once a
+ GrGpuResourceRef is registered its setResource must not be called.
+ */
+ void addGpuResource(const GrGpuResourceRef* res) {
+ fGpuResources.push_back(res);
+ }
+
+ void addPendingExecution() const {
+ this->validate();
+ SkASSERT(fRefCnt > 0);
+ if (0 == fPendingExecutions) {
+ this->addPendingIOs();
+ }
+ ++fPendingExecutions;
+ this->validate();
+ }
+
+ void completedExecution() const {
+ this->validate();
+ --fPendingExecutions;
+ if (0 == fPendingExecutions) {
+ if (0 == fRefCnt) {
+ delete this;
+ return;
+ } else {
+ this->pendingIOComplete();
+ }
+ }
+ this->validate();
+ }
+
+private:
+ /** This will be called when the ref cnt is zero. The object may or may not have pending
+ executions. */
+ virtual void notifyRefCntIsZero() const = 0;
+
+ static uint32_t CreateUniqueID();
+
+ void removeRefs() const;
+ void addPendingIOs() const;
+ void pendingIOComplete() const;
+
+ mutable int32_t fRefCnt;
+ // Count of deferred executions not yet issued to the 3D API.
+ mutable int32_t fPendingExecutions;
+ uint32_t fUniqueID;
+
+ SkSTArray<4, const GrGpuResourceRef*, true> fGpuResources;
+
+ // Only this class can access addPendingExecution() and completedExecution().
+ template <typename T> friend class GrPendingProgramElement;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrRenderTarget.h b/gfx/skia/skia/include/gpu/GrRenderTarget.h
new file mode 100644
index 000000000..1f87787d4
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrRenderTarget.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRenderTarget_DEFINED
+#define GrRenderTarget_DEFINED
+
+#include "GrSurface.h"
+#include "SkRect.h"
+
+class GrCaps;
+class GrDrawTarget;
+class GrStencilAttachment;
+class GrRenderTargetPriv;
+
+/**
+ * GrRenderTarget represents a 2D buffer of pixels that can be rendered to.
+ * A context's render target is set by setRenderTarget(). Render targets are
+ * created by a createTexture with the kRenderTarget_SurfaceFlag flag.
+ * Additionally, GrContext provides methods for creating GrRenderTargets
+ * that wrap externally created render targets.
+ */
+class GrRenderTarget : virtual public GrSurface {
+public:
+ // GrSurface overrides
+ GrRenderTarget* asRenderTarget() override { return this; }
+ const GrRenderTarget* asRenderTarget() const override { return this; }
+
+ // GrRenderTarget
+ bool isStencilBufferMultisampled() const { return fDesc.fSampleCnt > 0; }
+
+ /**
+ * For our purposes, "Mixed Sampled" means the stencil buffer is multisampled but the color
+ * buffer is not.
+ */
+ bool isMixedSampled() const { return fFlags & Flags::kMixedSampled; }
+
+ /**
+ * "Unified Sampled" means the stencil and color buffers are both multisampled.
+ */
+ bool isUnifiedMultisampled() const { return fDesc.fSampleCnt > 0 && !this->isMixedSampled(); }
+
+ /**
+ * Returns the number of samples/pixel in the stencil buffer (Zero if non-MSAA).
+ */
+ int numStencilSamples() const { return fDesc.fSampleCnt; }
+
+ /**
+ * Returns the number of samples/pixel in the color buffer (Zero if non-MSAA or mixed sampled).
+ */
+ int numColorSamples() const { return this->isMixedSampled() ? 0 : fDesc.fSampleCnt; }
+
+ /**
+ * Call to indicate the multisample contents were modified such that the
+ * render target needs to be resolved before it can be used as texture. Gr
+ * tracks this for its own drawing and thus this only needs to be called
+ * when the render target has been modified outside of Gr. This has no
+ * effect on wrapped backend render targets.
+ *
+ * @param rect a rect bounding the area needing resolve. NULL indicates
+ * the whole RT needs resolving.
+ */
+ void flagAsNeedingResolve(const SkIRect* rect = NULL);
+
+ /**
+ * Call to override the region that needs to be resolved.
+ */
+ void overrideResolveRect(const SkIRect rect);
+
+ /**
+ * Call to indicate that GrRenderTarget was externally resolved. This may
+ * allow Gr to skip a redundant resolve step.
+ */
+ void flagAsResolved() { fResolveRect.setLargestInverted(); }
+
+ /**
+ * @return true if the GrRenderTarget requires MSAA resolving
+ */
+ bool needsResolve() const { return !fResolveRect.isEmpty(); }
+
+ /**
+ * Returns a rect bounding the region needing resolving.
+ */
+ const SkIRect& getResolveRect() const { return fResolveRect; }
+
+ /**
+ * Provide a performance hint that the render target's contents are allowed
+ * to become undefined.
+ */
+ void discard();
+
+ // a MSAA RT may require explicit resolving , it may auto-resolve (e.g. FBO
+ // 0 in GL), or be unresolvable because the client didn't give us the
+ // resolve destination.
+ enum ResolveType {
+ kCanResolve_ResolveType,
+ kAutoResolves_ResolveType,
+ kCantResolve_ResolveType,
+ };
+ virtual ResolveType getResolveType() const = 0;
+
+ /**
+ * Return the native ID or handle to the rendertarget, depending on the
+ * platform. e.g. on OpenGL, return the FBO ID.
+ */
+ virtual GrBackendObject getRenderTargetHandle() const = 0;
+
+ // Checked when this object is asked to attach a stencil buffer.
+ virtual bool canAttemptStencilAttachment() const = 0;
+
+ // Provides access to functions that aren't part of the public API.
+ GrRenderTargetPriv renderTargetPriv();
+ const GrRenderTargetPriv renderTargetPriv() const;
+
+ void setLastDrawTarget(GrDrawTarget* dt);
+ GrDrawTarget* getLastDrawTarget() { return fLastDrawTarget; }
+
+protected:
+ enum class Flags {
+ kNone = 0,
+ kMixedSampled = 1 << 0,
+ kWindowRectsSupport = 1 << 1
+ };
+
+ GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(Flags);
+
+ GrRenderTarget(GrGpu*, const GrSurfaceDesc&, Flags = Flags::kNone,
+ GrStencilAttachment* = nullptr);
+ ~GrRenderTarget() override;
+
+ // override of GrResource
+ void onAbandon() override;
+ void onRelease() override;
+
+private:
+ // Allows the backends to perform any additional work that is required for attaching a
+ // GrStencilAttachment. When this is called, the GrStencilAttachment has already been put onto
+ // the GrRenderTarget. This function must return false if any failures occur when completing the
+ // stencil attachment.
+ virtual bool completeStencilAttachment() = 0;
+
+ friend class GrRenderTargetPriv;
+
+ GrStencilAttachment* fStencilAttachment;
+ uint8_t fMultisampleSpecsID;
+ Flags fFlags;
+
+ SkIRect fResolveRect;
+
+ // The last drawTarget that wrote to or is currently going to write to this renderTarget
+ // The drawTarget can be closed (e.g., no draw context is currently bound
+ // to this renderTarget).
+ // This back-pointer is required so that we can add a dependancy between
+ // the drawTarget used to create the current contents of this renderTarget
+ // and the drawTarget of a destination renderTarget to which this one is being drawn.
+ GrDrawTarget* fLastDrawTarget;
+
+ typedef GrSurface INHERITED;
+};
+
+GR_MAKE_BITFIELD_CLASS_OPS(GrRenderTarget::Flags);
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrResourceKey.h b/gfx/skia/skia/include/gpu/GrResourceKey.h
new file mode 100644
index 000000000..0ead35ea3
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrResourceKey.h
@@ -0,0 +1,322 @@
+
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrResourceKey_DEFINED
+#define GrResourceKey_DEFINED
+
+#include "../private/SkTemplates.h"
+#include "GrTypes.h"
+#include "SkData.h"
+#include "../private/SkOnce.h"
+
+uint32_t GrResourceKeyHash(const uint32_t* data, size_t size);
+
+/**
+ * Base class for all GrGpuResource cache keys. There are two types of cache keys. Refer to the
+ * comments for each key type below.
+ */
+class GrResourceKey {
+public:
+ uint32_t hash() const {
+ this->validate();
+ return fKey[kHash_MetaDataIdx];
+ }
+
+ size_t size() const {
+ this->validate();
+ SkASSERT(this->isValid());
+ return this->internalSize();
+ }
+
+protected:
+ static const uint32_t kInvalidDomain = 0;
+
+ GrResourceKey() { this->reset(); }
+
+ /** Reset to an invalid key. */
+ void reset() {
+ GR_STATIC_ASSERT((uint16_t)kInvalidDomain == kInvalidDomain);
+ fKey.reset(kMetaDataCnt);
+ fKey[kHash_MetaDataIdx] = 0;
+ fKey[kDomainAndSize_MetaDataIdx] = kInvalidDomain;
+ }
+
+ bool operator==(const GrResourceKey& that) const {
+ return this->hash() == that.hash() &&
+ 0 == memcmp(&fKey[kHash_MetaDataIdx + 1],
+ &that.fKey[kHash_MetaDataIdx + 1],
+ this->internalSize() - sizeof(uint32_t));
+ }
+
+ GrResourceKey& operator=(const GrResourceKey& that) {
+ SkASSERT(that.isValid());
+ if (this != &that) {
+ size_t bytes = that.size();
+ SkASSERT(SkIsAlign4(bytes));
+ fKey.reset(SkToInt(bytes / sizeof(uint32_t)));
+ memcpy(fKey.get(), that.fKey.get(), bytes);
+ this->validate();
+ }
+ return *this;
+ }
+
+ bool isValid() const { return kInvalidDomain != this->domain(); }
+
+ uint32_t domain() const { return fKey[kDomainAndSize_MetaDataIdx] & 0xffff; }
+
+ /** size of the key data, excluding meta-data (hash, domain, etc). */
+ size_t dataSize() const { return this->size() - 4 * kMetaDataCnt; }
+
+ /** ptr to the key data, excluding meta-data (hash, domain, etc). */
+ const uint32_t* data() const {
+ this->validate();
+ return &fKey[kMetaDataCnt];
+ }
+
+ /** Used to initialize a key. */
+ class Builder {
+ public:
+ Builder(GrResourceKey* key, uint32_t domain, int data32Count) : fKey(key) {
+ SkASSERT(data32Count >= 0);
+ SkASSERT(domain != kInvalidDomain);
+ key->fKey.reset(kMetaDataCnt + data32Count);
+ int size = (data32Count + kMetaDataCnt) * sizeof(uint32_t);
+ SkASSERT(SkToU16(size) == size);
+ SkASSERT(SkToU16(domain) == domain);
+ key->fKey[kDomainAndSize_MetaDataIdx] = domain | (size << 16);
+ }
+
+ ~Builder() { this->finish(); }
+
+ void finish() {
+ if (NULL == fKey) {
+ return;
+ }
+ GR_STATIC_ASSERT(0 == kHash_MetaDataIdx);
+ uint32_t* hash = &fKey->fKey[kHash_MetaDataIdx];
+ *hash = GrResourceKeyHash(hash + 1, fKey->internalSize() - sizeof(uint32_t));
+ fKey->validate();
+ fKey = NULL;
+ }
+
+ uint32_t& operator[](int dataIdx) {
+ SkASSERT(fKey);
+ SkDEBUGCODE(size_t dataCount = fKey->internalSize() / sizeof(uint32_t) - kMetaDataCnt;)
+ SkASSERT(SkToU32(dataIdx) < dataCount);
+ return fKey->fKey[kMetaDataCnt + dataIdx];
+ }
+
+ private:
+ GrResourceKey* fKey;
+ };
+
+private:
+ enum MetaDataIdx {
+ kHash_MetaDataIdx,
+ // The key domain and size are packed into a single uint32_t.
+ kDomainAndSize_MetaDataIdx,
+
+ kLastMetaDataIdx = kDomainAndSize_MetaDataIdx
+ };
+ static const uint32_t kMetaDataCnt = kLastMetaDataIdx + 1;
+
+ size_t internalSize() const {
+ return fKey[kDomainAndSize_MetaDataIdx] >> 16;
+ }
+
+ void validate() const {
+ SkASSERT(fKey[kHash_MetaDataIdx] ==
+ GrResourceKeyHash(&fKey[kHash_MetaDataIdx] + 1,
+ this->internalSize() - sizeof(uint32_t)));
+ SkASSERT(SkIsAlign4(this->internalSize()));
+ }
+
+ friend class TestResource; // For unit test to access kMetaDataCnt.
+
+ // bmp textures require 5 uint32_t values.
+ SkAutoSTMalloc<kMetaDataCnt + 5, uint32_t> fKey;
+};
+
+/**
+ * A key used for scratch resources. There are three important rules about scratch keys:
+ * * Multiple resources can share the same scratch key. Therefore resources assigned the same
+ * scratch key should be interchangeable with respect to the code that uses them.
+ * * A resource can have at most one scratch key and it is set at resource creation by the
+ * resource itself.
+ * * When a scratch resource is ref'ed it will not be returned from the
+ * cache for a subsequent cache request until all refs are released. This facilitates using
+ * a scratch key for multiple render-to-texture scenarios. An example is a separable blur:
+ *
+ * GrTexture* texture[2];
+ * texture[0] = get_scratch_texture(scratchKey);
+ * texture[1] = get_scratch_texture(scratchKey); // texture[0] is already owned so we will get a
+ * // different one for texture[1]
+ * draw_mask(texture[0], path); // draws path mask to texture[0]
+ * blur_x(texture[0], texture[1]); // blurs texture[0] in y and stores result in texture[1]
+ * blur_y(texture[1], texture[0]); // blurs texture[1] in y and stores result in texture[0]
+ * texture[1]->unref(); // texture 1 can now be recycled for the next request with scratchKey
+ * consume_blur(texture[0]);
+ * texture[0]->unref(); // texture 0 can now be recycled for the next request with scratchKey
+ */
+class GrScratchKey : public GrResourceKey {
+private:
+ typedef GrResourceKey INHERITED;
+
+public:
+ /** Uniquely identifies the type of resource that is cached as scratch. */
+ typedef uint32_t ResourceType;
+
+ /** Generate a unique ResourceType. */
+ static ResourceType GenerateResourceType();
+
+ /** Creates an invalid scratch key. It must be initialized using a Builder object before use. */
+ GrScratchKey() {}
+
+ GrScratchKey(const GrScratchKey& that) { *this = that; }
+
+ /** reset() returns the key to the invalid state. */
+ using INHERITED::reset;
+
+ using INHERITED::isValid;
+
+ ResourceType resourceType() const { return this->domain(); }
+
+ GrScratchKey& operator=(const GrScratchKey& that) {
+ this->INHERITED::operator=(that);
+ return *this;
+ }
+
+ bool operator==(const GrScratchKey& that) const {
+ return this->INHERITED::operator==(that);
+ }
+ bool operator!=(const GrScratchKey& that) const { return !(*this == that); }
+
+ class Builder : public INHERITED::Builder {
+ public:
+ Builder(GrScratchKey* key, ResourceType type, int data32Count)
+ : INHERITED::Builder(key, type, data32Count) {}
+ };
+};
+
+/**
+ * A key that allows for exclusive use of a resource for a use case (AKA "domain"). There are three
+ * rules governing the use of unique keys:
+ * * Only one resource can have a given unique key at a time. Hence, "unique".
+ * * A resource can have at most one unique key at a time.
+ * * Unlike scratch keys, multiple requests for a unique key will return the same
+ * resource even if the resource already has refs.
+ * This key type allows a code path to create cached resources for which it is the exclusive user.
+ * The code path creates a domain which it sets on its keys. This guarantees that there are no
+ * cross-domain collisions.
+ *
+ * Unique keys preempt scratch keys. While a resource has a unique key it is inaccessible via its
+ * scratch key. It can become scratch again if the unique key is removed.
+ */
+class GrUniqueKey : public GrResourceKey {
+private:
+ typedef GrResourceKey INHERITED;
+
+public:
+ typedef uint32_t Domain;
+ /** Generate a Domain for unique keys. */
+ static Domain GenerateDomain();
+
+ /** Creates an invalid unique key. It must be initialized using a Builder object before use. */
+ GrUniqueKey() {}
+
+ GrUniqueKey(const GrUniqueKey& that) { *this = that; }
+
+ /** reset() returns the key to the invalid state. */
+ using INHERITED::reset;
+
+ using INHERITED::isValid;
+
+ GrUniqueKey& operator=(const GrUniqueKey& that) {
+ this->INHERITED::operator=(that);
+ this->setCustomData(sk_ref_sp(that.getCustomData()));
+ return *this;
+ }
+
+ bool operator==(const GrUniqueKey& that) const {
+ return this->INHERITED::operator==(that);
+ }
+ bool operator!=(const GrUniqueKey& that) const { return !(*this == that); }
+
+ void setCustomData(sk_sp<SkData> data) {
+ fData = std::move(data);
+ }
+ SkData* getCustomData() const {
+ return fData.get();
+ }
+
+ class Builder : public INHERITED::Builder {
+ public:
+ Builder(GrUniqueKey* key, Domain domain, int data32Count)
+ : INHERITED::Builder(key, domain, data32Count) {}
+
+ /** Used to build a key that wraps another key and adds additional data. */
+ Builder(GrUniqueKey* key, const GrUniqueKey& innerKey, Domain domain,
+ int extraData32Cnt)
+ : INHERITED::Builder(key, domain, Data32CntForInnerKey(innerKey) + extraData32Cnt) {
+ SkASSERT(&innerKey != key);
+ // add the inner key to the end of the key so that op[] can be indexed normally.
+ uint32_t* innerKeyData = &this->operator[](extraData32Cnt);
+ const uint32_t* srcData = innerKey.data();
+ (*innerKeyData++) = innerKey.domain();
+ memcpy(innerKeyData, srcData, innerKey.dataSize());
+ }
+
+ private:
+ static int Data32CntForInnerKey(const GrUniqueKey& innerKey) {
+ // key data + domain
+ return SkToInt((innerKey.dataSize() >> 2) + 1);
+ }
+ };
+
+private:
+ sk_sp<SkData> fData;
+};
+
+/**
+ * It is common to need a frequently reused GrUniqueKey where the only requirement is that the key
+ * is unique. These macros create such a key in a thread safe manner so the key can be truly global
+ * and only constructed once.
+ */
+
+/** Place outside of function/class definitions. */
+#define GR_DECLARE_STATIC_UNIQUE_KEY(name) static SkOnce name##_once
+
+/** Place inside function where the key is used. */
+#define GR_DEFINE_STATIC_UNIQUE_KEY(name) \
+ static SkAlignedSTStorage<1, GrUniqueKey> name##_storage; \
+ name##_once(gr_init_static_unique_key_once, &name##_storage); \
+ static const GrUniqueKey& name = *reinterpret_cast<GrUniqueKey*>(name##_storage.get());
+
+static inline void gr_init_static_unique_key_once(SkAlignedSTStorage<1,GrUniqueKey>* keyStorage) {
+ GrUniqueKey* key = new (keyStorage->get()) GrUniqueKey;
+ GrUniqueKey::Builder builder(key, GrUniqueKey::GenerateDomain(), 0);
+}
+
+// The cache listens for these messages to purge junk resources proactively.
+class GrUniqueKeyInvalidatedMessage {
+public:
+ explicit GrUniqueKeyInvalidatedMessage(const GrUniqueKey& key) : fKey(key) {}
+
+ GrUniqueKeyInvalidatedMessage(const GrUniqueKeyInvalidatedMessage& that) : fKey(that.fKey) {}
+
+ GrUniqueKeyInvalidatedMessage& operator=(const GrUniqueKeyInvalidatedMessage& that) {
+ fKey = that.fKey;
+ return *this;
+ }
+
+ const GrUniqueKey& key() const { return fKey; }
+
+private:
+ GrUniqueKey fKey;
+};
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrShaderVar.h b/gfx/skia/skia/include/gpu/GrShaderVar.h
new file mode 100644
index 000000000..78e08e0d9
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrShaderVar.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrShaderVar_DEFINED
+#define GrShaderVar_DEFINED
+
+#include "GrTypesPriv.h"
+#include "SkString.h"
+
+class GrShaderVar {
+public:
+ /**
+ * Early versions of GLSL have Varying and Attribute; those are later
+ * deprecated, but we still need to know whether a Varying variable
+ * should be treated as In or Out.
+ *
+ * TODO This really shouldn't live here, but until we have c++11, there is really no good way
+ * to write extensible enums. In reality, only none, out, in, inout, and uniform really
+ * make sense on this base class
+ */
+ enum TypeModifier {
+ kNone_TypeModifier,
+ kOut_TypeModifier,
+ kIn_TypeModifier,
+ kInOut_TypeModifier,
+ kUniform_TypeModifier,
+ // GL Specific types below
+ kAttribute_TypeModifier,
+ kVaryingIn_TypeModifier,
+ kVaryingOut_TypeModifier
+ };
+
+ /**
+ * Defaults to a float with no precision specifier
+ */
+ GrShaderVar()
+ : fType(kFloat_GrSLType)
+ , fTypeModifier(kNone_TypeModifier)
+ , fCount(kNonArray)
+ , fPrecision(kDefault_GrSLPrecision) {
+ }
+
+ GrShaderVar(const SkString& name, GrSLType type, int arrayCount = kNonArray,
+ GrSLPrecision precision = kDefault_GrSLPrecision)
+ : fType(type)
+ , fTypeModifier(kNone_TypeModifier)
+ , fName(name)
+ , fCount(arrayCount)
+ , fPrecision(precision) {
+ SkASSERT(kVoid_GrSLType != type);
+ }
+
+ GrShaderVar(const char* name, GrSLType type, int arrayCount = kNonArray,
+ GrSLPrecision precision = kDefault_GrSLPrecision)
+ : fType(type)
+ , fTypeModifier(kNone_TypeModifier)
+ , fName(name)
+ , fCount(arrayCount)
+ , fPrecision(precision) {
+ SkASSERT(kVoid_GrSLType != type);
+ }
+
+ GrShaderVar(const char* name, GrSLType type, TypeModifier typeModifier,
+ int arrayCount = kNonArray, GrSLPrecision precision = kDefault_GrSLPrecision)
+ : fType(type)
+ , fTypeModifier(typeModifier)
+ , fName(name)
+ , fCount(arrayCount)
+ , fPrecision(precision) {
+ SkASSERT(kVoid_GrSLType != type);
+ }
+
+ /**
+ * Values for array count that have special meaning. We allow 1-sized arrays.
+ */
+ enum {
+ kNonArray = 0, // not an array
+ kUnsizedArray = -1, // an unsized array (declared with [])
+ };
+
+ void set(GrSLType type,
+ const SkString& name,
+ TypeModifier typeModifier = kNone_TypeModifier,
+ GrSLPrecision precision = kDefault_GrSLPrecision,
+ int count = kNonArray) {
+ SkASSERT(kVoid_GrSLType != type);
+ fType = type;
+ fTypeModifier = typeModifier;
+ fName = name;
+ fCount = count;
+ fPrecision = precision;
+ }
+
+ void set(GrSLType type,
+ const char* name,
+ TypeModifier typeModifier = kNone_TypeModifier,
+ GrSLPrecision precision = kDefault_GrSLPrecision,
+ int count = kNonArray) {
+ SkASSERT(kVoid_GrSLType != type);
+ fType = type;
+ fTypeModifier = typeModifier;
+ fName = name;
+ fCount = count;
+ fPrecision = precision;
+ }
+
+ /**
+ * Is the var an array.
+ */
+ bool isArray() const { return kNonArray != fCount; }
+ /**
+ * Is this an unsized array, (i.e. declared with []).
+ */
+ bool isUnsizedArray() const { return kUnsizedArray == fCount; }
+ /**
+ * Get the array length of the var.
+ */
+ int getArrayCount() const { return fCount; }
+ /**
+ * Set the array length of the var
+ */
+ void setArrayCount(int count) { fCount = count; }
+ /**
+ * Set to be a non-array.
+ */
+ void setNonArray() { fCount = kNonArray; }
+ /**
+ * Set to be an unsized array.
+ */
+ void setUnsizedArray() { fCount = kUnsizedArray; }
+
+ /**
+ * Access the var name as a writable string
+ */
+ SkString* accessName() { return &fName; }
+ /**
+ * Set the var name
+ */
+ void setName(const SkString& n) { fName = n; }
+ void setName(const char* n) { fName = n; }
+
+ /**
+ * Get the var name.
+ */
+ const SkString& getName() const { return fName; }
+
+ /**
+ * Shortcut for this->getName().c_str();
+ */
+ const char* c_str() const { return this->getName().c_str(); }
+
+ /**
+ * Get the type of the var
+ */
+ GrSLType getType() const { return fType; }
+ /**
+ * Set the type of the var
+ */
+ void setType(GrSLType type) { fType = type; }
+
+ TypeModifier getTypeModifier() const { return fTypeModifier; }
+ void setTypeModifier(TypeModifier type) { fTypeModifier = type; }
+
+ /**
+ * Get the precision of the var
+ */
+ GrSLPrecision getPrecision() const { return fPrecision; }
+
+ /**
+ * Set the precision of the var
+ */
+ void setPrecision(GrSLPrecision p) { fPrecision = p; }
+
+protected:
+ GrSLType fType;
+ TypeModifier fTypeModifier;
+ SkString fName;
+ int fCount;
+ GrSLPrecision fPrecision;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrSurface.h b/gfx/skia/skia/include/gpu/GrSurface.h
new file mode 100644
index 000000000..ac5c5fa1b
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrSurface.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrSurface_DEFINED
+#define GrSurface_DEFINED
+
+#include "GrTypes.h"
+#include "GrGpuResource.h"
+#include "SkImageInfo.h"
+#include "SkRect.h"
+
+class GrRenderTarget;
+class GrSurfacePriv;
+class GrTexture;
+
+class SK_API GrSurface : public GrGpuResource {
+public:
+ /**
+ * Retrieves the width of the surface.
+ */
+ int width() const { return fDesc.fWidth; }
+
+ /**
+ * Retrieves the height of the surface.
+ */
+ int height() const { return fDesc.fHeight; }
+
+ /**
+ * Helper that gets the width and height of the surface as a bounding rectangle.
+ */
+ SkRect getBoundsRect() const { return SkRect::MakeIWH(this->width(), this->height()); }
+
+ GrSurfaceOrigin origin() const {
+ SkASSERT(kTopLeft_GrSurfaceOrigin == fDesc.fOrigin || kBottomLeft_GrSurfaceOrigin == fDesc.fOrigin);
+ return fDesc.fOrigin;
+ }
+
+ /**
+ * Retrieves the pixel config specified when the surface was created.
+ * For render targets this can be kUnknown_GrPixelConfig
+ * if client asked us to render to a target that has a pixel
+ * config that isn't equivalent with one of our configs.
+ */
+ GrPixelConfig config() const { return fDesc.fConfig; }
+
+ /**
+ * Return the descriptor describing the surface
+ */
+ const GrSurfaceDesc& desc() const { return fDesc; }
+
+ /**
+ * @return the texture associated with the surface, may be NULL.
+ */
+ virtual GrTexture* asTexture() { return NULL; }
+ virtual const GrTexture* asTexture() const { return NULL; }
+
+ /**
+ * @return the render target underlying this surface, may be NULL.
+ */
+ virtual GrRenderTarget* asRenderTarget() { return NULL; }
+ virtual const GrRenderTarget* asRenderTarget() const { return NULL; }
+
+ /**
+ * Reads a rectangle of pixels from the surface.
+ * @param left left edge of the rectangle to read (inclusive)
+ * @param top top edge of the rectangle to read (inclusive)
+ * @param width width of rectangle to read in pixels.
+ * @param height height of rectangle to read in pixels.
+ * @param config the pixel config of the destination buffer
+ * @param buffer memory to read the rectangle into.
+ * @param rowBytes number of bytes between consecutive rows. Zero means rows are tightly
+ * packed.
+ * @param pixelOpsFlags See the GrContext::PixelOpsFlags enum.
+ *
+ * @return true if the read succeeded, false if not. The read can fail because of an unsupported
+ * pixel config.
+ */
+ bool readPixels(int left, int top, int width, int height,
+ GrPixelConfig config,
+ void* buffer,
+ size_t rowBytes = 0,
+ uint32_t pixelOpsFlags = 0);
+
+ /**
+ * Copy the src pixels [buffer, rowbytes, pixelconfig] into the surface at the specified
+ * rectangle.
+ * @param left left edge of the rectangle to write (inclusive)
+ * @param top top edge of the rectangle to write (inclusive)
+ * @param width width of rectangle to write in pixels.
+ * @param height height of rectangle to write in pixels.
+ * @param config the pixel config of the source buffer
+ * @param buffer memory to read the rectangle from.
+ * @param rowBytes number of bytes between consecutive rows. Zero means rows are tightly
+ * packed.
+ * @param pixelOpsFlags See the GrContext::PixelOpsFlags enum.
+ *
+ * @return true if the write succeeded, false if not. The write can fail because of an
+ * unsupported pixel config.
+ */
+ bool writePixels(int left, int top, int width, int height,
+ GrPixelConfig config,
+ const void* buffer,
+ size_t rowBytes = 0,
+ uint32_t pixelOpsFlags = 0);
+
+ /**
+ * After this returns any pending writes to the surface will be issued to the backend 3D API.
+ */
+ void flushWrites();
+
+ /** Access methods that are only to be used within Skia code. */
+ inline GrSurfacePriv surfacePriv();
+ inline const GrSurfacePriv surfacePriv() const;
+
+ typedef void* ReleaseCtx;
+ typedef void (*ReleaseProc)(ReleaseCtx);
+
+ void setRelease(ReleaseProc proc, ReleaseCtx ctx) {
+ fReleaseProc = proc;
+ fReleaseCtx = ctx;
+ }
+
+ static size_t WorstCaseSize(const GrSurfaceDesc& desc);
+
+protected:
+ // Methods made available via GrSurfacePriv
+ bool savePixels(const char* filename);
+ bool hasPendingRead() const;
+ bool hasPendingWrite() const;
+ bool hasPendingIO() const;
+
+ // Provides access to methods that should be public within Skia code.
+ friend class GrSurfacePriv;
+
+ GrSurface(GrGpu* gpu, const GrSurfaceDesc& desc)
+ : INHERITED(gpu)
+ , fDesc(desc)
+ , fReleaseProc(NULL)
+ , fReleaseCtx(NULL)
+ {}
+
+ ~GrSurface() override {
+ // check that invokeReleaseProc has been called (if needed)
+ SkASSERT(NULL == fReleaseProc);
+ }
+
+ GrSurfaceDesc fDesc;
+
+ void onRelease() override;
+ void onAbandon() override;
+
+private:
+ void invokeReleaseProc() {
+ if (fReleaseProc) {
+ fReleaseProc(fReleaseCtx);
+ fReleaseProc = NULL;
+ }
+ }
+
+ ReleaseProc fReleaseProc;
+ ReleaseCtx fReleaseCtx;
+
+ typedef GrGpuResource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrTestUtils.h b/gfx/skia/skia/include/gpu/GrTestUtils.h
new file mode 100644
index 000000000..17bf12af3
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrTestUtils.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTestUtils_DEFINED
+#define GrTestUtils_DEFINED
+
+#include "SkTypes.h"
+
+#ifdef GR_TEST_UTILS
+
+#include "GrColor.h"
+#include "GrColorSpaceXform.h"
+#include "SkPathEffect.h"
+#include "SkRandom.h"
+#include "SkStrokeRec.h"
+#include "../private/SkTemplates.h"
+
+class GrStyle;
+class SkMatrix;
+class SkPath;
+class SkRRect;
+struct SkRect;
+
+namespace GrTest {
+/**
+ * Helpers for use in Test functions.
+ */
+const SkMatrix& TestMatrix(SkRandom*);
+const SkMatrix& TestMatrixPreservesRightAngles(SkRandom*);
+const SkMatrix& TestMatrixRectStaysRect(SkRandom*);
+const SkMatrix& TestMatrixInvertible(SkRandom*);
+const SkMatrix& TestMatrixPerspective(SkRandom*);
+const SkRect& TestRect(SkRandom*);
+const SkRect& TestSquare(SkRandom*);
+const SkRRect& TestRRectSimple(SkRandom*);
+const SkPath& TestPath(SkRandom*);
+const SkPath& TestPathConvex(SkRandom*);
+SkStrokeRec TestStrokeRec(SkRandom*);
+/** Creates styles with dash path effects and null path effects */
+void TestStyle(SkRandom*, GrStyle*);
+sk_sp<SkColorSpace> TestColorSpace(SkRandom*);
+sk_sp<GrColorSpaceXform> TestColorXform(SkRandom*);
+
+// We have a simplified dash path effect here to avoid relying on SkDashPathEffect which
+// is in the optional build target effects.
+class TestDashPathEffect : public SkPathEffect {
+public:
+ static sk_sp<SkPathEffect> Make(const SkScalar* intervals, int count, SkScalar phase) {
+ return sk_sp<SkPathEffect>(new TestDashPathEffect(intervals, count, phase));
+ }
+
+ bool filterPath(SkPath* dst, const SkPath&, SkStrokeRec* , const SkRect*) const override;
+ DashType asADash(DashInfo* info) const override;
+ Factory getFactory() const override { return nullptr; }
+ void toString(SkString*) const override {}
+
+private:
+ TestDashPathEffect(const SkScalar* intervals, int count, SkScalar phase);
+
+ int fCount;
+ SkAutoTArray<SkScalar> fIntervals;
+ SkScalar fPhase;
+ SkScalar fInitialDashLength;
+ int fInitialDashIndex;
+ SkScalar fIntervalLength;
+};
+
+} // namespace GrTest
+
+static inline GrColor GrRandomColor(SkRandom* random) {
+ // There are only a few cases of random colors which interest us
+ enum ColorMode {
+ kAllOnes_ColorMode,
+ kAllZeros_ColorMode,
+ kAlphaOne_ColorMode,
+ kRandom_ColorMode,
+ kLast_ColorMode = kRandom_ColorMode
+ };
+
+ ColorMode colorMode = ColorMode(random->nextULessThan(kLast_ColorMode + 1));
+ GrColor color SK_INIT_TO_AVOID_WARNING;
+ switch (colorMode) {
+ case kAllOnes_ColorMode:
+ color = GrColorPackRGBA(0xFF, 0xFF, 0xFF, 0xFF);
+ break;
+ case kAllZeros_ColorMode:
+ color = GrColorPackRGBA(0, 0, 0, 0);
+ break;
+ case kAlphaOne_ColorMode:
+ color = GrColorPackRGBA(random->nextULessThan(256),
+ random->nextULessThan(256),
+ random->nextULessThan(256),
+ 0xFF);
+ break;
+ case kRandom_ColorMode: {
+ uint8_t alpha = random->nextULessThan(256);
+ color = GrColorPackRGBA(random->nextRangeU(0, alpha),
+ random->nextRangeU(0, alpha),
+ random->nextRangeU(0, alpha),
+ alpha);
+ break;
+ }
+ }
+ GrColorIsPMAssert(color);
+ return color;
+}
+
+static inline uint8_t GrRandomCoverage(SkRandom* random) {
+ enum CoverageMode {
+ kZero_CoverageMode,
+ kAllOnes_CoverageMode,
+ kRandom_CoverageMode,
+ kLast_CoverageMode = kRandom_CoverageMode
+ };
+
+ CoverageMode colorMode = CoverageMode(random->nextULessThan(kLast_CoverageMode + 1));
+ uint8_t coverage SK_INIT_TO_AVOID_WARNING;
+ switch (colorMode) {
+ case kZero_CoverageMode:
+ coverage = 0;
+ break;
+ case kAllOnes_CoverageMode:
+ coverage = 0xff;
+ break;
+ case kRandom_CoverageMode:
+ coverage = random->nextULessThan(256);
+ break;
+ }
+ return coverage;
+}
+
+#endif
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrTexture.h b/gfx/skia/skia/include/gpu/GrTexture.h
new file mode 100644
index 000000000..211f1937d
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrTexture.h
@@ -0,0 +1,74 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTexture_DEFINED
+#define GrTexture_DEFINED
+
+#include "GrSurface.h"
+#include "SkPoint.h"
+#include "SkRefCnt.h"
+
+class GrTextureParams;
+class GrTexturePriv;
+
+class GrTexture : virtual public GrSurface {
+public:
+ GrTexture* asTexture() override { return this; }
+ const GrTexture* asTexture() const override { return this; }
+ GrSLType samplerType() const { return fSamplerType; }
+
+ /**
+ * Return the native ID or handle to the texture, depending on the
+ * platform. e.g. on OpenGL, return the texture ID.
+ */
+ virtual GrBackendObject getTextureHandle() const = 0;
+
+ /**
+ * This function indicates that the texture parameters (wrap mode, filtering, ...) have been
+ * changed externally to Skia.
+ */
+ virtual void textureParamsModified() = 0;
+
+#ifdef SK_DEBUG
+ void validate() const {
+ this->INHERITED::validate();
+ this->validateDesc();
+ }
+#endif
+
+ /** Access methods that are only to be used within Skia code. */
+ inline GrTexturePriv texturePriv();
+ inline const GrTexturePriv texturePriv() const;
+
+protected:
+ GrTexture(GrGpu*, const GrSurfaceDesc&, GrSLType, bool wasMipMapDataProvided);
+
+ void validateDesc() const;
+
+private:
+ void computeScratchKey(GrScratchKey*) const override;
+ size_t onGpuMemorySize() const override;
+ void dirtyMipMaps(bool mipMapsDirty);
+
+ enum MipMapsStatus {
+ kNotAllocated_MipMapsStatus,
+ kAllocated_MipMapsStatus,
+ kValid_MipMapsStatus
+ };
+
+ GrSLType fSamplerType;
+ MipMapsStatus fMipMapsStatus;
+ int fMaxMipMapLevel;
+ SkSourceGammaTreatment fGammaTreatment;
+
+ friend class GrTexturePriv;
+
+ typedef GrSurface INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrTextureAccess.h b/gfx/skia/skia/include/gpu/GrTextureAccess.h
new file mode 100644
index 000000000..1b5de0ce9
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrTextureAccess.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextureAccess_DEFINED
+#define GrTextureAccess_DEFINED
+
+#include "GrGpuResourceRef.h"
+#include "GrTexture.h"
+#include "GrTextureParams.h"
+#include "SkRefCnt.h"
+#include "SkShader.h"
+
+/**
+ * Used to represent a texture that is required by a GrProcessor. It holds a GrTexture along with
+ * an associated GrTextureParams
+ */
+class GrTextureAccess : public SkNoncopyable {
+public:
+ /**
+ * Must be initialized before adding to a GrProcessor's texture access list.
+ */
+ GrTextureAccess();
+
+ GrTextureAccess(GrTexture*, const GrTextureParams&);
+
+ explicit GrTextureAccess(GrTexture*,
+ GrTextureParams::FilterMode = GrTextureParams::kNone_FilterMode,
+ SkShader::TileMode tileXAndY = SkShader::kClamp_TileMode,
+ GrShaderFlags visibility = kFragment_GrShaderFlag);
+
+ void reset(GrTexture*, const GrTextureParams&,
+ GrShaderFlags visibility = kFragment_GrShaderFlag);
+ void reset(GrTexture*,
+ GrTextureParams::FilterMode = GrTextureParams::kNone_FilterMode,
+ SkShader::TileMode tileXAndY = SkShader::kClamp_TileMode,
+ GrShaderFlags visibility = kFragment_GrShaderFlag);
+
+ bool operator==(const GrTextureAccess& that) const {
+ return this->getTexture() == that.getTexture() &&
+ fParams == that.fParams &&
+ fVisibility == that.fVisibility;
+ }
+
+ bool operator!=(const GrTextureAccess& other) const { return !(*this == other); }
+
+ GrTexture* getTexture() const { return fTexture.get(); }
+ GrShaderFlags getVisibility() const { return fVisibility; }
+
+ /**
+ * For internal use by GrProcessor.
+ */
+ const GrGpuResourceRef* getProgramTexture() const { return &fTexture; }
+
+ const GrTextureParams& getParams() const { return fParams; }
+
+private:
+
+ typedef GrTGpuResourceRef<GrTexture> ProgramTexture;
+
+ ProgramTexture fTexture;
+ GrTextureParams fParams;
+ GrShaderFlags fVisibility;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrTextureParams.h b/gfx/skia/skia/include/gpu/GrTextureParams.h
new file mode 100644
index 000000000..3186b1b02
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrTextureParams.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextureParams_DEFINED
+#define GrTextureParams_DEFINED
+
+#include "GrTypes.h"
+#include "SkShader.h"
+
+/**
+ * Represents the filtering and tile modes used to access a texture.
+ */
+class GrTextureParams {
+public:
+ static const GrTextureParams& ClampNoFilter() {
+ static const GrTextureParams gParams;
+ return gParams;
+ }
+ static const GrTextureParams& ClampBilerp() {
+ static const GrTextureParams gParams(SkShader::kClamp_TileMode, kBilerp_FilterMode);
+ return gParams;
+ }
+
+ GrTextureParams() {
+ this->reset();
+ }
+
+ enum FilterMode {
+ kNone_FilterMode,
+ kBilerp_FilterMode,
+ kMipMap_FilterMode
+ };
+
+ GrTextureParams(SkShader::TileMode tileXAndY, FilterMode filterMode) {
+ this->reset(tileXAndY, filterMode);
+ }
+
+ GrTextureParams(const SkShader::TileMode tileModes[2], FilterMode filterMode) {
+ this->reset(tileModes, filterMode);
+ }
+
+ GrTextureParams(const GrTextureParams& params) {
+ *this = params;
+ }
+
+ GrTextureParams& operator= (const GrTextureParams& params) {
+ fTileModes[0] = params.fTileModes[0];
+ fTileModes[1] = params.fTileModes[1];
+ fFilterMode = params.fFilterMode;
+ return *this;
+ }
+
+ void reset() {
+ this->reset(SkShader::kClamp_TileMode, kNone_FilterMode);
+ }
+
+ void reset(SkShader::TileMode tileXAndY, FilterMode filterMode) {
+ fTileModes[0] = fTileModes[1] = tileXAndY;
+ fFilterMode = filterMode;
+ }
+
+ void reset(const SkShader::TileMode tileModes[2], FilterMode filterMode) {
+ fTileModes[0] = tileModes[0];
+ fTileModes[1] = tileModes[1];
+ fFilterMode = filterMode;
+ }
+
+ void setClampNoFilter() {
+ fTileModes[0] = fTileModes[1] = SkShader::kClamp_TileMode;
+ fFilterMode = kNone_FilterMode;
+ }
+
+ void setClamp() {
+ fTileModes[0] = fTileModes[1] = SkShader::kClamp_TileMode;
+ }
+
+ void setFilterMode(FilterMode filterMode) { fFilterMode = filterMode; }
+
+ void setTileModeX(const SkShader::TileMode tm) { fTileModes[0] = tm; }
+ void setTileModeY(const SkShader::TileMode tm) { fTileModes[1] = tm; }
+ void setTileModeXAndY(const SkShader::TileMode tm) { fTileModes[0] = fTileModes[1] = tm; }
+
+ SkShader::TileMode getTileModeX() const { return fTileModes[0]; }
+
+ SkShader::TileMode getTileModeY() const { return fTileModes[1]; }
+
+ bool isTiled() const {
+ return SkShader::kClamp_TileMode != fTileModes[0] ||
+ SkShader::kClamp_TileMode != fTileModes[1];
+ }
+
+ FilterMode filterMode() const { return fFilterMode; }
+
+ bool operator== (const GrTextureParams& other) const {
+ return fTileModes[0] == other.fTileModes[0] &&
+ fTileModes[1] == other.fTileModes[1] &&
+ fFilterMode == other.fFilterMode;
+ }
+
+ bool operator!= (const GrTextureParams& other) const { return !(*this == other); }
+
+private:
+ SkShader::TileMode fTileModes[2];
+ FilterMode fFilterMode;
+};
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrTextureProvider.h b/gfx/skia/skia/include/gpu/GrTextureProvider.h
new file mode 100644
index 000000000..e013bfff0
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrTextureProvider.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextureProvider_DEFINED
+#define GrTextureProvider_DEFINED
+
+#include "GrTexture.h"
+#include "GrTypes.h"
+
+class GrSingleOwner;
+
+class SK_API GrTextureProvider {
+public:
+ ///////////////////////////////////////////////////////////////////////////
+ // Textures
+
+ /**
+ * Creates a new texture in the resource cache and returns it. The caller owns a
+ * ref on the returned texture which must be balanced by a call to unref.
+ *
+ * @param desc Description of the texture properties.
+ * @param budgeted Does the texture count against the resource cache budget?
+ * @param texels A contiguous array of mipmap levels
+ * @param mipLevelCount The amount of elements in the texels array
+ */
+ GrTexture* createMipMappedTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
+ const GrMipLevel* texels, int mipLevelCount);
+
+ /**
+ * This function is a shim which creates a SkTArray<GrMipLevel> of size 1.
+ * It then calls createTexture with that SkTArray.
+ *
+ * @param srcData Pointer to the pixel values (optional).
+ * @param rowBytes The number of bytes between rows of the texture. Zero
+ * implies tightly packed rows. For compressed pixel configs, this
+ * field is ignored.
+ */
+ GrTexture* createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted, const void* srcData,
+ size_t rowBytes);
+
+ /** Shortcut for creating a texture with no initial data to upload. */
+ GrTexture* createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted) {
+ return this->createTexture(desc, budgeted, nullptr, 0);
+ }
+
+ /** Assigns a unique key to the texture. The texture will be findable via this key using
+ findTextureByUniqueKey(). If an existing texture has this key, it's key will be removed. */
+ void assignUniqueKeyToTexture(const GrUniqueKey& key, GrTexture* texture) {
+ this->assignUniqueKeyToResource(key, texture);
+ }
+
+ /** Finds a texture by unique key. If the texture is found it is ref'ed and returned. */
+ GrTexture* findAndRefTextureByUniqueKey(const GrUniqueKey& key);
+
+ /**
+ * Determines whether a texture is associated with the unique key. If the texture is found it
+ * will not be locked or returned. This call does not affect the priority of the resource for
+ * deletion.
+ */
+ bool existsTextureWithUniqueKey(const GrUniqueKey& key) const {
+ return this->existsResourceWithUniqueKey(key);
+ }
+
+ /**
+ * Finds a texture that approximately matches the descriptor. Will be at least as large in width
+ * and height as desc specifies. If desc specifies that the texture should be a render target
+ * then result will be a render target. Format and sample count will always match the request.
+ * The contents of the texture are undefined. The caller owns a ref on the returned texture and
+ * must balance with a call to unref.
+ */
+ GrTexture* createApproxTexture(const GrSurfaceDesc&);
+
+ /** Legacy function that no longer should be used. */
+ enum ScratchTexMatch {
+ kExact_ScratchTexMatch,
+ kApprox_ScratchTexMatch
+ };
+ GrTexture* refScratchTexture(const GrSurfaceDesc& desc, ScratchTexMatch match) {
+ if (kApprox_ScratchTexMatch == match) {
+ return this->createApproxTexture(desc);
+ } else {
+ return this->createTexture(desc, SkBudgeted::kYes);
+ }
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Wrapped Backend Surfaces
+
+ /**
+ * Wraps an existing texture with a GrTexture object.
+ *
+ * OpenGL: if the object is a texture Gr may change its GL texture params
+ * when it is drawn.
+ *
+ * @return GrTexture object or NULL on failure.
+ */
+ GrTexture* wrapBackendTexture(const GrBackendTextureDesc& desc,
+ GrWrapOwnership = kBorrow_GrWrapOwnership);
+
+ /**
+ * Wraps an existing render target with a GrRenderTarget object. It is
+ * similar to wrapBackendTexture but can be used to draw into surfaces
+ * that are not also textures (e.g. FBO 0 in OpenGL, or an MSAA buffer that
+ * the client will resolve to a texture). Currently wrapped render targets
+ * always use the kBorrow_GrWrapOwnership semantics.
+ *
+ * @return GrRenderTarget object or NULL on failure.
+ */
+ GrRenderTarget* wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc);
+
+protected:
+ GrTextureProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* singleOwner);
+
+ /**
+ * Assigns a unique key to a resource. If the key is associated with another resource that
+ * association is removed and replaced by this resource.
+ */
+ void assignUniqueKeyToResource(const GrUniqueKey&, GrGpuResource*);
+
+ /**
+ * Finds a resource in the cache, based on the specified key. This is intended for use in
+ * conjunction with addResourceToCache(). The return value will be NULL if not found. The
+ * caller must balance with a call to unref().
+ */
+ GrGpuResource* findAndRefResourceByUniqueKey(const GrUniqueKey&);
+
+ /**
+ * Determines whether a resource is in the cache. If the resource is found it
+ * will not be locked or returned. This call does not affect the priority of
+ * the resource for deletion.
+ */
+ bool existsResourceWithUniqueKey(const GrUniqueKey& key) const;
+
+ enum ScratchTextureFlags {
+ kExact_ScratchTextureFlag = 0x1,
+ kNoPendingIO_ScratchTextureFlag = 0x2, // (http://skbug.com/4156)
+ kNoCreate_ScratchTextureFlag = 0x4,
+ };
+
+ /** A common impl for GrTextureProvider and GrResourceProvider variants. */
+ GrTexture* internalCreateApproxTexture(const GrSurfaceDesc& desc, uint32_t scratchTextureFlags);
+
+ GrTexture* refScratchTexture(const GrSurfaceDesc&, uint32_t scratchTextureFlags);
+
+ void abandon() {
+ fCache = NULL;
+ fGpu = NULL;
+ }
+
+ GrResourceCache* cache() { return fCache; }
+ const GrResourceCache* cache() const { return fCache; }
+
+ GrGpu* gpu() { return fGpu; }
+ const GrGpu* gpu() const { return fGpu; }
+
+ bool isAbandoned() const {
+ SkASSERT(SkToBool(fGpu) == SkToBool(fCache));
+ return !SkToBool(fCache);
+ }
+
+private:
+ GrResourceCache* fCache;
+ GrGpu* fGpu;
+
+ // In debug builds we guard against improper thread handling
+ SkDEBUGCODE(mutable GrSingleOwner* fSingleOwner;)
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrTypes.h b/gfx/skia/skia/include/gpu/GrTypes.h
new file mode 100644
index 000000000..6b73f3c07
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrTypes.h
@@ -0,0 +1,668 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTypes_DEFINED
+#define GrTypes_DEFINED
+
+#include "SkMath.h"
+#include "SkTypes.h"
+#include "GrConfig.h"
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Defines overloaded bitwise operators to make it easier to use an enum as a
+ * bitfield.
+ */
+#define GR_MAKE_BITFIELD_OPS(X) \
+ inline X operator | (X a, X b) { \
+ return (X) (+a | +b); \
+ } \
+ inline X& operator |= (X& a, X b) { \
+ return (a = a | b); \
+ } \
+ \
+ inline X operator & (X a, X b) { \
+ return (X) (+a & +b); \
+ } \
+ template <typename T> \
+ inline X operator & (T a, X b) { \
+ return (X) (+a & +b); \
+ } \
+ template <typename T> \
+ inline X operator & (X a, T b) { \
+ return (X) (+a & +b); \
+ } \
+
+#define GR_DECL_BITFIELD_OPS_FRIENDS(X) \
+ friend X operator | (X a, X b); \
+ friend X& operator |= (X& a, X b); \
+ \
+ friend X operator & (X a, X b); \
+ \
+ template <typename T> \
+ friend X operator & (T a, X b); \
+ \
+ template <typename T> \
+ friend X operator & (X a, T b); \
+
+/**
+ * Defines bitwise operators that make it possible to use an enum class as a
+ * very basic bitfield.
+ */
+#define GR_MAKE_BITFIELD_CLASS_OPS(X) \
+ inline X operator | (X a, X b) { \
+ return (X) ((int)a | (int)b); \
+ } \
+ inline X& operator |= (X& a, X b) { \
+ return (a = a | b); \
+ } \
+ inline bool operator & (X a, X b) { \
+ return SkToBool((int)a & (int)b); \
+ }
+
+#define GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(X) \
+ friend X operator | (X a, X b); \
+ friend X& operator |= (X& a, X b); \
+ friend bool operator & (X a, X b);
+
+////////////////////////////////////////////////////////////////////////////////
+
+// compile time versions of min/max
+#define GR_CT_MAX(a, b) (((b) < (a)) ? (a) : (b))
+#define GR_CT_MIN(a, b) (((b) < (a)) ? (b) : (a))
+
+/**
+ * divide, rounding up
+ */
+static inline int32_t GrIDivRoundUp(int x, int y) {
+ SkASSERT(y > 0);
+ return (x + (y-1)) / y;
+}
+static inline uint32_t GrUIDivRoundUp(uint32_t x, uint32_t y) {
+ return (x + (y-1)) / y;
+}
+static inline size_t GrSizeDivRoundUp(size_t x, size_t y) {
+ return (x + (y-1)) / y;
+}
+
+// compile time, evaluates Y multiple times
+#define GR_CT_DIV_ROUND_UP(X, Y) (((X) + ((Y)-1)) / (Y))
+
+/**
+ * align up
+ */
+static inline uint32_t GrUIAlignUp(uint32_t x, uint32_t alignment) {
+ return GrUIDivRoundUp(x, alignment) * alignment;
+}
+static inline size_t GrSizeAlignUp(size_t x, size_t alignment) {
+ return GrSizeDivRoundUp(x, alignment) * alignment;
+}
+
+// compile time, evaluates A multiple times
+#define GR_CT_ALIGN_UP(X, A) (GR_CT_DIV_ROUND_UP((X),(A)) * (A))
+
+/**
+ * amount of pad needed to align up
+ */
+static inline uint32_t GrUIAlignUpPad(uint32_t x, uint32_t alignment) {
+ return (alignment - x % alignment) % alignment;
+}
+static inline size_t GrSizeAlignUpPad(size_t x, size_t alignment) {
+ return (alignment - x % alignment) % alignment;
+}
+
+/**
+ * align down
+ */
+static inline uint32_t GrUIAlignDown(uint32_t x, uint32_t alignment) {
+ return (x / alignment) * alignment;
+}
+static inline size_t GrSizeAlignDown(size_t x, uint32_t alignment) {
+ return (x / alignment) * alignment;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Possible 3D APIs that may be used by Ganesh.
+ */
+enum GrBackend {
+ kOpenGL_GrBackend,
+ kVulkan_GrBackend,
+
+ kLast_GrBackend = kVulkan_GrBackend
+};
+const int kBackendCount = kLast_GrBackend + 1;
+
+/**
+ * Backend-specific 3D context handle
+ * GrGLInterface* for OpenGL. If NULL will use the default GL interface.
+ * GrVkBackendContext* for Vulkan.
+ */
+typedef intptr_t GrBackendContext;
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+* Geometric primitives used for drawing.
+*/
+enum GrPrimitiveType {
+ kTriangles_GrPrimitiveType,
+ kTriangleStrip_GrPrimitiveType,
+ kTriangleFan_GrPrimitiveType,
+ kPoints_GrPrimitiveType,
+ kLines_GrPrimitiveType, // 1 pix wide only
+ kLineStrip_GrPrimitiveType, // 1 pix wide only
+ kLast_GrPrimitiveType = kLineStrip_GrPrimitiveType
+};
+
+static inline bool GrIsPrimTypeLines(GrPrimitiveType type) {
+ return kLines_GrPrimitiveType == type || kLineStrip_GrPrimitiveType == type;
+}
+
+static inline bool GrIsPrimTypeTris(GrPrimitiveType type) {
+ return kTriangles_GrPrimitiveType == type ||
+ kTriangleStrip_GrPrimitiveType == type ||
+ kTriangleFan_GrPrimitiveType == type;
+}
+
+/**
+ * Formats for masks, used by the font cache.
+ * Important that these are 0-based.
+ */
+enum GrMaskFormat {
+ kA8_GrMaskFormat, //!< 1-byte per pixel
+ kA565_GrMaskFormat, //!< 2-bytes per pixel, RGB represent 3-channel LCD coverage
+ kARGB_GrMaskFormat, //!< 4-bytes per pixel, color format
+
+ kLast_GrMaskFormat = kARGB_GrMaskFormat
+};
+static const int kMaskFormatCount = kLast_GrMaskFormat + 1;
+
+/**
+ * Return the number of bytes-per-pixel for the specified mask format.
+ */
+static inline int GrMaskFormatBytesPerPixel(GrMaskFormat format) {
+ SkASSERT(format < kMaskFormatCount);
+ // kA8 (0) -> 1
+ // kA565 (1) -> 2
+ // kARGB (2) -> 4
+ static const int sBytesPerPixel[] = { 1, 2, 4 };
+ static_assert(SK_ARRAY_COUNT(sBytesPerPixel) == kMaskFormatCount, "array_size_mismatch");
+ static_assert(kA8_GrMaskFormat == 0, "enum_order_dependency");
+ static_assert(kA565_GrMaskFormat == 1, "enum_order_dependency");
+ static_assert(kARGB_GrMaskFormat == 2, "enum_order_dependency");
+
+ return sBytesPerPixel[(int) format];
+}
+
+/**
+ * Pixel configurations.
+ */
+enum GrPixelConfig {
+ kUnknown_GrPixelConfig,
+ kAlpha_8_GrPixelConfig,
+ kIndex_8_GrPixelConfig,
+ kRGB_565_GrPixelConfig,
+ /**
+ * Premultiplied
+ */
+ kRGBA_4444_GrPixelConfig,
+ /**
+ * Premultiplied. Byte order is r,g,b,a.
+ */
+ kRGBA_8888_GrPixelConfig,
+ /**
+ * Premultiplied. Byte order is b,g,r,a.
+ */
+ kBGRA_8888_GrPixelConfig,
+ /**
+ * Premultiplied and sRGB. Byte order is r,g,b,a.
+ */
+ kSRGBA_8888_GrPixelConfig,
+ /**
+ * Premultiplied and sRGB. Byte order is b,g,r,a.
+ */
+ kSBGRA_8888_GrPixelConfig,
+ /**
+ * ETC1 Compressed Data
+ */
+ kETC1_GrPixelConfig,
+ /**
+ * LATC/RGTC/3Dc/BC4 Compressed Data
+ */
+ kLATC_GrPixelConfig,
+ /**
+ * R11 EAC Compressed Data
+ * (Corresponds to section C.3.5 of the OpenGL 4.4 core profile spec)
+ */
+ kR11_EAC_GrPixelConfig,
+
+ /**
+ * 12x12 ASTC Compressed Data
+ * ASTC stands for Adaptive Scalable Texture Compression. It is a technique
+ * that allows for a lot of customization in the compressed representataion
+ * of a block. The only thing fixed in the representation is the block size,
+ * which means that a texture that contains ASTC data must be treated as
+ * having RGBA values. However, there are single-channel encodings which set
+ * the alpha to opaque and all three RGB channels equal effectively making the
+ * compression format a single channel such as R11 EAC and LATC.
+ */
+ kASTC_12x12_GrPixelConfig,
+
+ /**
+ * Byte order is r, g, b, a. This color format is 32 bits per channel
+ */
+ kRGBA_float_GrPixelConfig,
+
+ /**
+ * This color format is a single 16 bit float channel
+ */
+ kAlpha_half_GrPixelConfig,
+
+ /**
+ * Byte order is r, g, b, a. This color format is 16 bits per channel
+ */
+ kRGBA_half_GrPixelConfig,
+
+ kLast_GrPixelConfig = kRGBA_half_GrPixelConfig
+};
+static const int kGrPixelConfigCnt = kLast_GrPixelConfig + 1;
+
+// Aliases for pixel configs that match skia's byte order.
+#ifndef SK_CPU_LENDIAN
+ #error "Skia gpu currently assumes little endian"
+#endif
+#if SK_PMCOLOR_BYTE_ORDER(B,G,R,A)
+ static const GrPixelConfig kSkia8888_GrPixelConfig = kBGRA_8888_GrPixelConfig;
+ static const GrPixelConfig kSkiaGamma8888_GrPixelConfig = kSBGRA_8888_GrPixelConfig;
+#elif SK_PMCOLOR_BYTE_ORDER(R,G,B,A)
+ static const GrPixelConfig kSkia8888_GrPixelConfig = kRGBA_8888_GrPixelConfig;
+ static const GrPixelConfig kSkiaGamma8888_GrPixelConfig = kSRGBA_8888_GrPixelConfig;
+#else
+ #error "SK_*32_SHIFT values must correspond to GL_BGRA or GL_RGBA format."
+#endif
+
+// Returns true if the pixel config is a GPU-specific compressed format
+// representation.
+static inline bool GrPixelConfigIsCompressed(GrPixelConfig config) {
+ switch (config) {
+ case kIndex_8_GrPixelConfig:
+ case kETC1_GrPixelConfig:
+ case kLATC_GrPixelConfig:
+ case kR11_EAC_GrPixelConfig:
+ case kASTC_12x12_GrPixelConfig:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/** If the pixel config is compressed, return an equivalent uncompressed format. */
+static inline GrPixelConfig GrMakePixelConfigUncompressed(GrPixelConfig config) {
+ switch (config) {
+ case kIndex_8_GrPixelConfig:
+ case kETC1_GrPixelConfig:
+ case kASTC_12x12_GrPixelConfig:
+ return kRGBA_8888_GrPixelConfig;
+ case kLATC_GrPixelConfig:
+ case kR11_EAC_GrPixelConfig:
+ return kAlpha_8_GrPixelConfig;
+ default:
+ SkASSERT(!GrPixelConfigIsCompressed(config));
+ return config;
+ }
+}
+
+// Returns true if the pixel config is 32 bits per pixel
+static inline bool GrPixelConfigIs8888(GrPixelConfig config) {
+ switch (config) {
+ case kRGBA_8888_GrPixelConfig:
+ case kBGRA_8888_GrPixelConfig:
+ case kSRGBA_8888_GrPixelConfig:
+ case kSBGRA_8888_GrPixelConfig:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Returns true if the color (non-alpha) components represent sRGB values. It does NOT indicate that
+// all three color components are present in the config or anything about their order.
+static inline bool GrPixelConfigIsSRGB(GrPixelConfig config) {
+ switch (config) {
+ case kSRGBA_8888_GrPixelConfig:
+ case kSBGRA_8888_GrPixelConfig:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Takes a config and returns the equivalent config with the R and B order
+// swapped if such a config exists. Otherwise, kUnknown_GrPixelConfig
+static inline GrPixelConfig GrPixelConfigSwapRAndB(GrPixelConfig config) {
+ switch (config) {
+ case kBGRA_8888_GrPixelConfig:
+ return kRGBA_8888_GrPixelConfig;
+ case kRGBA_8888_GrPixelConfig:
+ return kBGRA_8888_GrPixelConfig;
+ case kSBGRA_8888_GrPixelConfig:
+ return kSRGBA_8888_GrPixelConfig;
+ case kSRGBA_8888_GrPixelConfig:
+ return kSBGRA_8888_GrPixelConfig;
+ default:
+ return kUnknown_GrPixelConfig;
+ }
+}
+
+static inline size_t GrBytesPerPixel(GrPixelConfig config) {
+ SkASSERT(!GrPixelConfigIsCompressed(config));
+ switch (config) {
+ case kAlpha_8_GrPixelConfig:
+ return 1;
+ case kRGB_565_GrPixelConfig:
+ case kRGBA_4444_GrPixelConfig:
+ case kAlpha_half_GrPixelConfig:
+ return 2;
+ case kRGBA_8888_GrPixelConfig:
+ case kBGRA_8888_GrPixelConfig:
+ case kSRGBA_8888_GrPixelConfig:
+ case kSBGRA_8888_GrPixelConfig:
+ return 4;
+ case kRGBA_half_GrPixelConfig:
+ return 8;
+ case kRGBA_float_GrPixelConfig:
+ return 16;
+ default:
+ return 0;
+ }
+}
+
+static inline bool GrPixelConfigIsOpaque(GrPixelConfig config) {
+ switch (config) {
+ case kETC1_GrPixelConfig:
+ case kRGB_565_GrPixelConfig:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool GrPixelConfigIsAlphaOnly(GrPixelConfig config) {
+ switch (config) {
+ case kR11_EAC_GrPixelConfig:
+ case kLATC_GrPixelConfig:
+ case kASTC_12x12_GrPixelConfig:
+ case kAlpha_8_GrPixelConfig:
+ case kAlpha_half_GrPixelConfig:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool GrPixelConfigIsFloatingPoint(GrPixelConfig config) {
+ switch (config) {
+ case kRGBA_float_GrPixelConfig:
+ case kAlpha_half_GrPixelConfig:
+ case kRGBA_half_GrPixelConfig:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * Optional bitfield flags that can be set on GrSurfaceDesc (below).
+ */
+enum GrSurfaceFlags {
+ kNone_GrSurfaceFlags = 0x0,
+ /**
+ * Creates a texture that can be rendered to as a GrRenderTarget. Use
+ * GrTexture::asRenderTarget() to access.
+ */
+ kRenderTarget_GrSurfaceFlag = 0x1,
+ /**
+ * Placeholder for managing zero-copy textures
+ */
+ kZeroCopy_GrSurfaceFlag = 0x2,
+ /**
+ * Indicates that all allocations (color buffer, FBO completeness, etc)
+ * should be verified.
+ */
+ kCheckAllocation_GrSurfaceFlag = 0x4,
+};
+
+GR_MAKE_BITFIELD_OPS(GrSurfaceFlags)
+
+// opaque type for 3D API object handles
+typedef intptr_t GrBackendObject;
+
+/**
+ * Some textures will be stored such that the upper and left edges of the content meet at the
+ * the origin (in texture coord space) and for other textures the lower and left edges meet at
+ * the origin. kDefault_GrSurfaceOrigin sets textures to TopLeft, and render targets
+ * to BottomLeft.
+ */
+
+enum GrSurfaceOrigin {
+ kDefault_GrSurfaceOrigin, // DEPRECATED; to be removed
+ kTopLeft_GrSurfaceOrigin,
+ kBottomLeft_GrSurfaceOrigin,
+};
+
+struct GrMipLevel {
+ const void* fPixels;
+ size_t fRowBytes;
+};
+
+/**
+ * Describes a surface to be created.
+ */
+struct GrSurfaceDesc {
+ GrSurfaceDesc()
+ : fFlags(kNone_GrSurfaceFlags)
+ , fOrigin(kDefault_GrSurfaceOrigin)
+ , fWidth(0)
+ , fHeight(0)
+ , fConfig(kUnknown_GrPixelConfig)
+ , fSampleCnt(0)
+ , fIsMipMapped(false) {
+ }
+
+ GrSurfaceFlags fFlags; //!< bitfield of TextureFlags
+ GrSurfaceOrigin fOrigin; //!< origin of the texture
+ int fWidth; //!< Width of the texture
+ int fHeight; //!< Height of the texture
+
+ /**
+ * Format of source data of the texture. Not guaranteed to be the same as
+ * internal format used by 3D API.
+ */
+ GrPixelConfig fConfig;
+
+ /**
+ * The number of samples per pixel or 0 to disable full scene AA. This only
+ * applies if the kRenderTarget_GrSurfaceFlag is set. The actual number
+ * of samples may not exactly match the request. The request will be rounded
+ * up to the next supported sample count, or down if it is larger than the
+ * max supported count.
+ */
+ int fSampleCnt;
+ bool fIsMipMapped; //!< Indicates if the texture has mipmaps
+};
+
+// Legacy alias
+typedef GrSurfaceDesc GrTextureDesc;
+
+/**
+ * Clips are composed from these objects.
+ */
+enum GrClipType {
+ kRect_ClipType,
+ kPath_ClipType
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+
+/** Ownership rules for external GPU resources imported into Skia. */
+enum GrWrapOwnership {
+ /** Skia will assume the client will keep the resource alive and Skia will not free it. */
+ kBorrow_GrWrapOwnership,
+
+ /** Skia will assume ownership of the resource and free it. */
+ kAdopt_GrWrapOwnership,
+};
+
+/**
+ * Gr can wrap an existing texture created by the client with a GrTexture
+ * object. The client is responsible for ensuring that the texture lives at
+ * least as long as the GrTexture object wrapping it. We require the client to
+ * explicitly provide information about the texture, such as width, height,
+ * and pixel config, rather than querying the 3D APIfor these values. We expect
+ * these to be immutable even if the 3D API doesn't require this (OpenGL).
+ *
+ * Textures that are also render targets are supported as well. Gr will manage
+ * any ancillary 3D API (stencil buffer, FBO id, etc) objects necessary for
+ * Gr to draw into the render target. To access the render target object
+ * call GrTexture::asRenderTarget().
+ *
+ * If in addition to the render target flag, the caller also specifies a sample
+ * count Gr will create an MSAA buffer that resolves into the texture. Gr auto-
+ * resolves when it reads from the texture. The client can explictly resolve
+ * using the GrRenderTarget interface.
+ *
+ * Note: These flags currently form a subset of GrTexture's flags.
+ */
+
+enum GrBackendTextureFlags {
+ /**
+ * No flags enabled
+ */
+ kNone_GrBackendTextureFlag = 0,
+ /**
+ * Indicates that the texture is also a render target, and thus should have
+ * a GrRenderTarget object.
+ */
+ kRenderTarget_GrBackendTextureFlag = kRenderTarget_GrSurfaceFlag,
+};
+GR_MAKE_BITFIELD_OPS(GrBackendTextureFlags)
+
+struct GrBackendTextureDesc {
+ GrBackendTextureDesc() { memset(this, 0, sizeof(*this)); }
+ GrBackendTextureFlags fFlags;
+ GrSurfaceOrigin fOrigin;
+ int fWidth; //<! width in pixels
+ int fHeight; //<! height in pixels
+ GrPixelConfig fConfig; //<! color format
+ /**
+ * If the render target flag is set and sample count is greater than 0
+ * then Gr will create an MSAA buffer that resolves to the texture.
+ */
+ int fSampleCnt;
+ /**
+ * Handle to the 3D API object.
+ * OpenGL: Texture ID.
+ * Vulkan: GrVkImageInfo*
+ */
+ GrBackendObject fTextureHandle;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Gr can wrap an existing render target created by the client in the 3D API
+ * with a GrRenderTarget object. The client is responsible for ensuring that the
+ * underlying 3D API object lives at least as long as the GrRenderTarget object
+ * wrapping it. We require the client to explicitly provide information about
+ * the target, such as width, height, and pixel config rather than querying the
+ * 3D API for these values. We expect these properties to be immutable even if
+ * the 3D API doesn't require this (OpenGL).
+ */
+
+struct GrBackendRenderTargetDesc {
+ GrBackendRenderTargetDesc() { memset(this, 0, sizeof(*this)); }
+ int fWidth; //<! width in pixels
+ int fHeight; //<! height in pixels
+ GrPixelConfig fConfig; //<! color format
+ GrSurfaceOrigin fOrigin; //<! pixel origin
+ /**
+ * The number of samples per pixel. Gr uses this to influence decisions
+ * about applying other forms of anti-aliasing.
+ */
+ int fSampleCnt;
+ /**
+ * Number of bits of stencil per-pixel.
+ */
+ int fStencilBits;
+ /**
+ * Handle to the 3D API object.
+ * OpenGL: FBO ID
+ * Vulkan: GrVkImageInfo*
+ */
+ GrBackendObject fRenderTargetHandle;
+};
+
+/**
+ * The GrContext's cache of backend context state can be partially invalidated.
+ * These enums are specific to the GL backend and we'd add a new set for an alternative backend.
+ */
+enum GrGLBackendState {
+ kRenderTarget_GrGLBackendState = 1 << 0,
+ kTextureBinding_GrGLBackendState = 1 << 1,
+ // View state stands for scissor and viewport
+ kView_GrGLBackendState = 1 << 2,
+ kBlend_GrGLBackendState = 1 << 3,
+ kMSAAEnable_GrGLBackendState = 1 << 4,
+ kVertex_GrGLBackendState = 1 << 5,
+ kStencil_GrGLBackendState = 1 << 6,
+ kPixelStore_GrGLBackendState = 1 << 7,
+ kProgram_GrGLBackendState = 1 << 8,
+ kFixedFunction_GrGLBackendState = 1 << 9,
+ kMisc_GrGLBackendState = 1 << 10,
+ kPathRendering_GrGLBackendState = 1 << 11,
+ kALL_GrGLBackendState = 0xffff
+};
+
+/**
+ * Returns the data size for the given compressed pixel config
+ */
+static inline size_t GrCompressedFormatDataSize(GrPixelConfig config,
+ int width, int height) {
+ SkASSERT(GrPixelConfigIsCompressed(config));
+ static const int kGrIndex8TableSize = 256 * 4; // 4 == sizeof(GrColor)
+
+ switch (config) {
+ case kIndex_8_GrPixelConfig:
+ return width * height + kGrIndex8TableSize;
+ case kR11_EAC_GrPixelConfig:
+ case kLATC_GrPixelConfig:
+ case kETC1_GrPixelConfig:
+ SkASSERT((width & 3) == 0);
+ SkASSERT((height & 3) == 0);
+ return (width >> 2) * (height >> 2) * 8;
+
+ case kASTC_12x12_GrPixelConfig:
+ SkASSERT((width % 12) == 0);
+ SkASSERT((height % 12) == 0);
+ return (width / 12) * (height / 12) * 16;
+
+ default:
+ SkFAIL("Unknown compressed pixel config");
+ return 4 * width * height;
+ }
+}
+
+/**
+ * This value translates to reseting all the context state for any backend.
+ */
+static const uint32_t kAll_GrBackendState = 0xffffffff;
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrTypesPriv.h b/gfx/skia/skia/include/gpu/GrTypesPriv.h
new file mode 100644
index 000000000..636e72a01
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrTypesPriv.h
@@ -0,0 +1,490 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTypesPriv_DEFINED
+#define GrTypesPriv_DEFINED
+
+#include "GrTypes.h"
+#include "SkRefCnt.h"
+
+ /**
+ * Types of shader-language-specific boxed variables we can create. (Currently only GrGLShaderVars,
+ * but should be applicable to other shader languages.)
+ */
+enum GrSLType {
+ kVoid_GrSLType,
+ kFloat_GrSLType,
+ kVec2f_GrSLType,
+ kVec3f_GrSLType,
+ kVec4f_GrSLType,
+ kMat22f_GrSLType,
+ kMat33f_GrSLType,
+ kMat44f_GrSLType,
+ kTexture2DSampler_GrSLType,
+ kTextureExternalSampler_GrSLType,
+ kTexture2DRectSampler_GrSLType,
+ kTextureBufferSampler_GrSLType,
+ kBool_GrSLType,
+ kInt_GrSLType,
+ kUint_GrSLType,
+ kTexture2D_GrSLType,
+ kSampler_GrSLType,
+
+ kLast_GrSLType = kSampler_GrSLType
+};
+static const int kGrSLTypeCount = kLast_GrSLType + 1;
+
+enum GrShaderType {
+ kVertex_GrShaderType,
+ kGeometry_GrShaderType,
+ kFragment_GrShaderType,
+
+ kLastkFragment_GrShaderType = kFragment_GrShaderType
+};
+static const int kGrShaderTypeCount = kLastkFragment_GrShaderType + 1;
+
+enum GrShaderFlags {
+ kNone_GrShaderFlags = 0,
+ kVertex_GrShaderFlag = 1 << kVertex_GrShaderType,
+ kGeometry_GrShaderFlag = 1 << kGeometry_GrShaderType,
+ kFragment_GrShaderFlag = 1 << kFragment_GrShaderType
+};
+GR_MAKE_BITFIELD_OPS(GrShaderFlags);
+
+enum class GrDrawFace {
+ kInvalid = -1,
+
+ kBoth,
+ kCCW,
+ kCW,
+};
+
+/**
+ * Precisions of shader language variables. Not all shading languages support precisions or actually
+ * vary the internal precision based on the qualifiers. These currently only apply to float types (
+ * including float vectors and matrices).
+ */
+enum GrSLPrecision {
+ kLow_GrSLPrecision,
+ kMedium_GrSLPrecision,
+ kHigh_GrSLPrecision,
+
+ // Default precision is medium. This is because on OpenGL ES 2 highp support is not
+ // guaranteed. On (non-ES) OpenGL the specifiers have no effect on precision.
+ kDefault_GrSLPrecision = kMedium_GrSLPrecision,
+
+ kLast_GrSLPrecision = kHigh_GrSLPrecision
+};
+
+static const int kGrSLPrecisionCount = kLast_GrSLPrecision + 1;
+
+/**
+ * Gets the vector size of the SLType. Returns -1 for void, matrices, and samplers.
+ */
+static inline int GrSLTypeVectorCount(GrSLType type) {
+ SkASSERT(type >= 0 && type < static_cast<GrSLType>(kGrSLTypeCount));
+ static const int kCounts[] = { -1, 1, 2, 3, 4, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1 };
+ return kCounts[type];
+
+ GR_STATIC_ASSERT(0 == kVoid_GrSLType);
+ GR_STATIC_ASSERT(1 == kFloat_GrSLType);
+ GR_STATIC_ASSERT(2 == kVec2f_GrSLType);
+ GR_STATIC_ASSERT(3 == kVec3f_GrSLType);
+ GR_STATIC_ASSERT(4 == kVec4f_GrSLType);
+ GR_STATIC_ASSERT(5 == kMat22f_GrSLType);
+ GR_STATIC_ASSERT(6 == kMat33f_GrSLType);
+ GR_STATIC_ASSERT(7 == kMat44f_GrSLType);
+ GR_STATIC_ASSERT(8 == kTexture2DSampler_GrSLType);
+ GR_STATIC_ASSERT(9 == kTextureExternalSampler_GrSLType);
+ GR_STATIC_ASSERT(10 == kTexture2DRectSampler_GrSLType);
+ GR_STATIC_ASSERT(11 == kTextureBufferSampler_GrSLType);
+ GR_STATIC_ASSERT(12 == kBool_GrSLType);
+ GR_STATIC_ASSERT(13 == kInt_GrSLType);
+ GR_STATIC_ASSERT(14 == kUint_GrSLType);
+ GR_STATIC_ASSERT(15 == kTexture2D_GrSLType);
+ GR_STATIC_ASSERT(16 == kSampler_GrSLType);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kCounts) == kGrSLTypeCount);
+}
+
+/** Return the type enum for a vector of floats of length n (1..4),
+ e.g. 1 -> kFloat_GrSLType, 2 -> kVec2_GrSLType, ... */
+static inline GrSLType GrSLFloatVectorType(int count) {
+ SkASSERT(count > 0 && count <= 4);
+ return (GrSLType)(count);
+
+ GR_STATIC_ASSERT(kFloat_GrSLType == 1);
+ GR_STATIC_ASSERT(kVec2f_GrSLType == 2);
+ GR_STATIC_ASSERT(kVec3f_GrSLType == 3);
+ GR_STATIC_ASSERT(kVec4f_GrSLType == 4);
+}
+
+/** Is the shading language type float (including vectors/matrices)? */
+static inline bool GrSLTypeIsFloatType(GrSLType type) {
+ SkASSERT(type >= 0 && type < static_cast<GrSLType>(kGrSLTypeCount));
+ return type >= kFloat_GrSLType && type <= kMat44f_GrSLType;
+
+ GR_STATIC_ASSERT(0 == kVoid_GrSLType);
+ GR_STATIC_ASSERT(1 == kFloat_GrSLType);
+ GR_STATIC_ASSERT(2 == kVec2f_GrSLType);
+ GR_STATIC_ASSERT(3 == kVec3f_GrSLType);
+ GR_STATIC_ASSERT(4 == kVec4f_GrSLType);
+ GR_STATIC_ASSERT(5 == kMat22f_GrSLType);
+ GR_STATIC_ASSERT(6 == kMat33f_GrSLType);
+ GR_STATIC_ASSERT(7 == kMat44f_GrSLType);
+ GR_STATIC_ASSERT(8 == kTexture2DSampler_GrSLType);
+ GR_STATIC_ASSERT(9 == kTextureExternalSampler_GrSLType);
+ GR_STATIC_ASSERT(10 == kTexture2DRectSampler_GrSLType);
+ GR_STATIC_ASSERT(11 == kTextureBufferSampler_GrSLType);
+ GR_STATIC_ASSERT(12 == kBool_GrSLType);
+ GR_STATIC_ASSERT(13 == kInt_GrSLType);
+ GR_STATIC_ASSERT(14 == kUint_GrSLType);
+ GR_STATIC_ASSERT(15 == kTexture2D_GrSLType);
+ GR_STATIC_ASSERT(16 == kSampler_GrSLType);
+ GR_STATIC_ASSERT(17 == kGrSLTypeCount);
+}
+
+/** Is the shading language type integral (including vectors/matrices)? */
+static inline bool GrSLTypeIsIntType(GrSLType type) {
+ SkASSERT(type >= 0 && type < static_cast<GrSLType>(kGrSLTypeCount));
+ return type >= kInt_GrSLType && type <= kUint_GrSLType;
+
+ GR_STATIC_ASSERT(0 == kVoid_GrSLType);
+ GR_STATIC_ASSERT(1 == kFloat_GrSLType);
+ GR_STATIC_ASSERT(2 == kVec2f_GrSLType);
+ GR_STATIC_ASSERT(3 == kVec3f_GrSLType);
+ GR_STATIC_ASSERT(4 == kVec4f_GrSLType);
+ GR_STATIC_ASSERT(5 == kMat22f_GrSLType);
+ GR_STATIC_ASSERT(6 == kMat33f_GrSLType);
+ GR_STATIC_ASSERT(7 == kMat44f_GrSLType);
+ GR_STATIC_ASSERT(8 == kTexture2DSampler_GrSLType);
+ GR_STATIC_ASSERT(9 == kTextureExternalSampler_GrSLType);
+ GR_STATIC_ASSERT(10 == kTexture2DRectSampler_GrSLType);
+ GR_STATIC_ASSERT(11 == kTextureBufferSampler_GrSLType);
+ GR_STATIC_ASSERT(12 == kBool_GrSLType);
+ GR_STATIC_ASSERT(13 == kInt_GrSLType);
+ GR_STATIC_ASSERT(14 == kUint_GrSLType);
+ GR_STATIC_ASSERT(15 == kTexture2D_GrSLType);
+ GR_STATIC_ASSERT(16 == kSampler_GrSLType);
+ GR_STATIC_ASSERT(17 == kGrSLTypeCount);
+}
+
+/** Is the shading language type numeric (including vectors/matrices)? */
+static inline bool GrSLTypeIsNumeric(GrSLType type) {
+ return GrSLTypeIsFloatType(type) || GrSLTypeIsIntType(type);
+}
+
+/** Returns the size in bytes for floating point GrSLTypes. For non floating point type returns 0 */
+static inline size_t GrSLTypeSize(GrSLType type) {
+ SkASSERT(GrSLTypeIsFloatType(type));
+ static const size_t kSizes[] = {
+ 0, // kVoid_GrSLType
+ sizeof(float), // kFloat_GrSLType
+ 2 * sizeof(float), // kVec2f_GrSLType
+ 3 * sizeof(float), // kVec3f_GrSLType
+ 4 * sizeof(float), // kVec4f_GrSLType
+ 2 * 2 * sizeof(float), // kMat22f_GrSLType
+ 3 * 3 * sizeof(float), // kMat33f_GrSLType
+ 4 * 4 * sizeof(float), // kMat44f_GrSLType
+ 0, // kTexture2DSampler_GrSLType
+ 0, // kTextureExternalSampler_GrSLType
+ 0, // kTexture2DRectSampler_GrSLType
+ 0, // kTextureBufferSampler_GrSLType
+ 0, // kBool_GrSLType
+ 0, // kInt_GrSLType
+ 0, // kUint_GrSLType
+ 0, // kTexture2D_GrSLType
+ 0, // kSampler_GrSLType
+ };
+ return kSizes[type];
+
+ GR_STATIC_ASSERT(0 == kVoid_GrSLType);
+ GR_STATIC_ASSERT(1 == kFloat_GrSLType);
+ GR_STATIC_ASSERT(2 == kVec2f_GrSLType);
+ GR_STATIC_ASSERT(3 == kVec3f_GrSLType);
+ GR_STATIC_ASSERT(4 == kVec4f_GrSLType);
+ GR_STATIC_ASSERT(5 == kMat22f_GrSLType);
+ GR_STATIC_ASSERT(6 == kMat33f_GrSLType);
+ GR_STATIC_ASSERT(7 == kMat44f_GrSLType);
+ GR_STATIC_ASSERT(8 == kTexture2DSampler_GrSLType);
+ GR_STATIC_ASSERT(9 == kTextureExternalSampler_GrSLType);
+ GR_STATIC_ASSERT(10 == kTexture2DRectSampler_GrSLType);
+ GR_STATIC_ASSERT(11 == kTextureBufferSampler_GrSLType);
+ GR_STATIC_ASSERT(12 == kBool_GrSLType);
+ GR_STATIC_ASSERT(13 == kInt_GrSLType);
+ GR_STATIC_ASSERT(14 == kUint_GrSLType);
+ GR_STATIC_ASSERT(15 == kTexture2D_GrSLType);
+ GR_STATIC_ASSERT(16 == kSampler_GrSLType);
+ GR_STATIC_ASSERT(17 == kGrSLTypeCount);
+}
+
+static inline bool GrSLTypeIs2DCombinedSamplerType(GrSLType type) {
+ SkASSERT(type >= 0 && type < static_cast<GrSLType>(kGrSLTypeCount));
+ return type >= kTexture2DSampler_GrSLType && type <= kTexture2DRectSampler_GrSLType;
+
+ GR_STATIC_ASSERT(8 == kTexture2DSampler_GrSLType);
+ GR_STATIC_ASSERT(9 == kTextureExternalSampler_GrSLType);
+ GR_STATIC_ASSERT(10 == kTexture2DRectSampler_GrSLType);
+}
+
+static inline bool GrSLTypeIsCombinedSamplerType(GrSLType type) {
+ SkASSERT(type >= 0 && type < static_cast<GrSLType>(kGrSLTypeCount));
+ return type >= kTexture2DSampler_GrSLType && type <= kTextureBufferSampler_GrSLType;
+
+ GR_STATIC_ASSERT(8 == kTexture2DSampler_GrSLType);
+ GR_STATIC_ASSERT(9 == kTextureExternalSampler_GrSLType);
+ GR_STATIC_ASSERT(10 == kTexture2DRectSampler_GrSLType);
+ GR_STATIC_ASSERT(11 == kTextureBufferSampler_GrSLType);
+}
+
+static inline bool GrSLTypeAcceptsPrecision(GrSLType type) {
+ return type != kVoid_GrSLType && type != kBool_GrSLType;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Types used to describe format of vertices in arrays.
+ */
+enum GrVertexAttribType {
+ kFloat_GrVertexAttribType = 0,
+ kVec2f_GrVertexAttribType,
+ kVec3f_GrVertexAttribType,
+ kVec4f_GrVertexAttribType,
+
+ kUByte_GrVertexAttribType, // unsigned byte, e.g. coverage
+ kVec4ub_GrVertexAttribType, // vector of 4 unsigned bytes, e.g. colors
+
+ kVec2us_GrVertexAttribType, // vector of 2 shorts, e.g. texture coordinates
+
+ kInt_GrVertexAttribType,
+ kUint_GrVertexAttribType,
+
+ kLast_GrVertexAttribType = kUint_GrVertexAttribType
+};
+static const int kGrVertexAttribTypeCount = kLast_GrVertexAttribType + 1;
+
+/**
+ * Returns the vector size of the type.
+ */
+static inline int GrVertexAttribTypeVectorCount(GrVertexAttribType type) {
+ SkASSERT(type >= 0 && type < kGrVertexAttribTypeCount);
+ static const int kCounts[] = { 1, 2, 3, 4, 1, 4, 2, 1, 1 };
+ return kCounts[type];
+
+ GR_STATIC_ASSERT(0 == kFloat_GrVertexAttribType);
+ GR_STATIC_ASSERT(1 == kVec2f_GrVertexAttribType);
+ GR_STATIC_ASSERT(2 == kVec3f_GrVertexAttribType);
+ GR_STATIC_ASSERT(3 == kVec4f_GrVertexAttribType);
+ GR_STATIC_ASSERT(4 == kUByte_GrVertexAttribType);
+ GR_STATIC_ASSERT(5 == kVec4ub_GrVertexAttribType);
+ GR_STATIC_ASSERT(6 == kVec2us_GrVertexAttribType);
+ GR_STATIC_ASSERT(7 == kInt_GrVertexAttribType);
+ GR_STATIC_ASSERT(8 == kUint_GrVertexAttribType);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kCounts) == kGrVertexAttribTypeCount);
+}
+
+/**
+ * Returns the size of the attrib type in bytes.
+ */
+static inline size_t GrVertexAttribTypeSize(GrVertexAttribType type) {
+ static const size_t kSizes[] = {
+ sizeof(float), // kFloat_GrVertexAttribType
+ 2*sizeof(float), // kVec2f_GrVertexAttribType
+ 3*sizeof(float), // kVec3f_GrVertexAttribType
+ 4*sizeof(float), // kVec4f_GrVertexAttribType
+ 1*sizeof(char), // kUByte_GrVertexAttribType
+ 4*sizeof(char), // kVec4ub_GrVertexAttribType
+ 2*sizeof(int16_t), // kVec2us_GrVertexAttribType
+ sizeof(int32_t), // kInt_GrVertexAttribType
+ sizeof(uint32_t) // kUint_GrVertexAttribType
+ };
+ return kSizes[type];
+
+ GR_STATIC_ASSERT(0 == kFloat_GrVertexAttribType);
+ GR_STATIC_ASSERT(1 == kVec2f_GrVertexAttribType);
+ GR_STATIC_ASSERT(2 == kVec3f_GrVertexAttribType);
+ GR_STATIC_ASSERT(3 == kVec4f_GrVertexAttribType);
+ GR_STATIC_ASSERT(4 == kUByte_GrVertexAttribType);
+ GR_STATIC_ASSERT(5 == kVec4ub_GrVertexAttribType);
+ GR_STATIC_ASSERT(6 == kVec2us_GrVertexAttribType);
+ GR_STATIC_ASSERT(7 == kInt_GrVertexAttribType);
+ GR_STATIC_ASSERT(8 == kUint_GrVertexAttribType);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kSizes) == kGrVertexAttribTypeCount);
+}
+
+/**
+ * Is the attrib type integral?
+ */
+static inline bool GrVertexAttribTypeIsIntType(GrVertexAttribType type) {
+ SkASSERT(type >= 0 && type < static_cast<GrVertexAttribType>(kGrVertexAttribTypeCount));
+ return type >= kInt_GrVertexAttribType;
+
+ GR_STATIC_ASSERT(0 == kFloat_GrVertexAttribType);
+ GR_STATIC_ASSERT(1 == kVec2f_GrVertexAttribType);
+ GR_STATIC_ASSERT(2 == kVec3f_GrVertexAttribType);
+ GR_STATIC_ASSERT(3 == kVec4f_GrVertexAttribType);
+ GR_STATIC_ASSERT(4 == kUByte_GrVertexAttribType);
+ GR_STATIC_ASSERT(5 == kVec4ub_GrVertexAttribType);
+ GR_STATIC_ASSERT(6 == kVec2us_GrVertexAttribType);
+ GR_STATIC_ASSERT(7 == kInt_GrVertexAttribType);
+ GR_STATIC_ASSERT(8 == kUint_GrVertexAttribType);
+ GR_STATIC_ASSERT(9 == kGrVertexAttribTypeCount);
+}
+
+/**
+ * converts a GrVertexAttribType to a GrSLType
+ */
+static inline GrSLType GrVertexAttribTypeToSLType(GrVertexAttribType type) {
+ switch (type) {
+ default:
+ SkFAIL("Unsupported type conversion");
+ return kVoid_GrSLType;
+ case kUByte_GrVertexAttribType:
+ case kFloat_GrVertexAttribType:
+ return kFloat_GrSLType;
+ case kVec2us_GrVertexAttribType:
+ case kVec2f_GrVertexAttribType:
+ return kVec2f_GrSLType;
+ case kVec3f_GrVertexAttribType:
+ return kVec3f_GrSLType;
+ case kVec4ub_GrVertexAttribType:
+ case kVec4f_GrVertexAttribType:
+ return kVec4f_GrSLType;
+ case kInt_GrVertexAttribType:
+ return kInt_GrSLType;
+ case kUint_GrVertexAttribType:
+ return kUint_GrSLType;
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+/**
+* We have coverage effects that clip rendering to the edge of some geometric primitive.
+* This enum specifies how that clipping is performed. Not all factories that take a
+* GrProcessorEdgeType will succeed with all values and it is up to the caller to check for
+* a NULL return.
+*/
+enum GrPrimitiveEdgeType {
+ kFillBW_GrProcessorEdgeType,
+ kFillAA_GrProcessorEdgeType,
+ kInverseFillBW_GrProcessorEdgeType,
+ kInverseFillAA_GrProcessorEdgeType,
+ kHairlineAA_GrProcessorEdgeType,
+
+ kLast_GrProcessorEdgeType = kHairlineAA_GrProcessorEdgeType
+};
+
+static const int kGrProcessorEdgeTypeCnt = kLast_GrProcessorEdgeType + 1;
+
+static inline bool GrProcessorEdgeTypeIsFill(const GrPrimitiveEdgeType edgeType) {
+ return (kFillAA_GrProcessorEdgeType == edgeType || kFillBW_GrProcessorEdgeType == edgeType);
+}
+
+static inline bool GrProcessorEdgeTypeIsInverseFill(const GrPrimitiveEdgeType edgeType) {
+ return (kInverseFillAA_GrProcessorEdgeType == edgeType ||
+ kInverseFillBW_GrProcessorEdgeType == edgeType);
+}
+
+static inline bool GrProcessorEdgeTypeIsAA(const GrPrimitiveEdgeType edgeType) {
+ return (kFillBW_GrProcessorEdgeType != edgeType && kInverseFillBW_GrProcessorEdgeType != edgeType);
+}
+
+static inline GrPrimitiveEdgeType GrInvertProcessorEdgeType(const GrPrimitiveEdgeType edgeType) {
+ switch (edgeType) {
+ case kFillBW_GrProcessorEdgeType:
+ return kInverseFillBW_GrProcessorEdgeType;
+ case kFillAA_GrProcessorEdgeType:
+ return kInverseFillAA_GrProcessorEdgeType;
+ case kInverseFillBW_GrProcessorEdgeType:
+ return kFillBW_GrProcessorEdgeType;
+ case kInverseFillAA_GrProcessorEdgeType:
+ return kFillAA_GrProcessorEdgeType;
+ case kHairlineAA_GrProcessorEdgeType:
+ SkFAIL("Hairline fill isn't invertible.");
+ }
+ return kFillAA_GrProcessorEdgeType; // suppress warning.
+}
+
+/**
+ * Indicates the type of pending IO operations that can be recorded for gpu resources.
+ */
+enum GrIOType {
+ kRead_GrIOType,
+ kWrite_GrIOType,
+ kRW_GrIOType
+};
+
+/**
+* Indicates the type of data that a GPU buffer will be used for.
+*/
+enum GrBufferType {
+ kVertex_GrBufferType,
+ kIndex_GrBufferType,
+ kTexel_GrBufferType,
+ kDrawIndirect_GrBufferType,
+ kXferCpuToGpu_GrBufferType,
+ kXferGpuToCpu_GrBufferType,
+
+ kLast_GrBufferType = kXferGpuToCpu_GrBufferType
+};
+static const int kGrBufferTypeCount = kLast_GrBufferType + 1;
+
+static inline bool GrBufferTypeIsVertexOrIndex(GrBufferType type) {
+ SkASSERT(type >= 0 && type < kGrBufferTypeCount);
+ return type <= kIndex_GrBufferType;
+
+ GR_STATIC_ASSERT(0 == kVertex_GrBufferType);
+ GR_STATIC_ASSERT(1 == kIndex_GrBufferType);
+}
+
+/**
+* Provides a performance hint regarding the frequency at which a data store will be accessed.
+*/
+enum GrAccessPattern {
+ /** Data store will be respecified repeatedly and used many times. */
+ kDynamic_GrAccessPattern,
+ /** Data store will be specified once and used many times. (Thus disqualified from caching.) */
+ kStatic_GrAccessPattern,
+ /** Data store will be specified once and used at most a few times. (Also can't be cached.) */
+ kStream_GrAccessPattern,
+
+ kLast_GrAccessPattern = kStream_GrAccessPattern
+};
+
+
+#ifdef SK_DEBUG
+// Takes a pointer to a GrCaps, and will suppress prints if required
+#define GrCapsDebugf(caps, ...) \
+ if (!caps->suppressPrints()) { \
+ SkDebugf(__VA_ARGS__); \
+ }
+#else
+#define GrCapsDebugf(caps, ...)
+#endif
+
+/**
+ * Specifies if the holder owns the backend, OpenGL or Vulkan, object.
+ */
+enum class GrBackendObjectOwnership : bool {
+ /** Holder does not destroy the backend object. */
+ kBorrowed = false,
+ /** Holder destroys the backend object. */
+ kOwned = true
+};
+
+template <typename T> T * const * sk_sp_address_as_pointer_address(sk_sp<T> const * sp) {
+ static_assert(sizeof(T*) == sizeof(sk_sp<T>), "sk_sp not expected size.");
+ return reinterpret_cast<T * const *>(sp);
+}
+
+/*
+ * Object for CPU-GPU synchronization
+ */
+typedef intptr_t GrFence;
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrXferProcessor.h b/gfx/skia/skia/include/gpu/GrXferProcessor.h
new file mode 100644
index 000000000..1d4717e15
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrXferProcessor.h
@@ -0,0 +1,388 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrXferProcessor_DEFINED
+#define GrXferProcessor_DEFINED
+
+#include "GrBlend.h"
+#include "GrColor.h"
+#include "GrProcessor.h"
+#include "GrTexture.h"
+#include "GrTypes.h"
+#include "SkXfermode.h"
+
+class GrShaderCaps;
+class GrGLSLCaps;
+class GrGLSLXferProcessor;
+class GrProcOptInfo;
+struct GrPipelineOptimizations;
+
+/**
+ * Barriers for blending. When a shader reads the dst directly, an Xfer barrier is sometimes
+ * required after a pixel has been written, before it can be safely read again.
+ */
+enum GrXferBarrierType {
+ kNone_GrXferBarrierType = 0, //<! No barrier is required
+ kTexture_GrXferBarrierType, //<! Required when a shader reads and renders to the same texture.
+ kBlend_GrXferBarrierType, //<! Required by certain blend extensions.
+};
+/** Should be able to treat kNone as false in boolean expressions */
+GR_STATIC_ASSERT(SkToBool(kNone_GrXferBarrierType) == false);
+
+/**
+ * GrXferProcessor is responsible for implementing the xfer mode that blends the src color and dst
+ * color, and for applying any coverage. It does this by emitting fragment shader code and
+ * controlling the fixed-function blend state. When dual-source blending is available, it may also
+ * write a seconday fragment shader output color. GrXferProcessor has two modes of operation:
+ *
+ * Dst read: When allowed by the backend API, or when supplied a texture of the destination, the
+ * GrXferProcessor may read the destination color. While operating in this mode, the subclass only
+ * provides shader code that blends the src and dst colors, and the base class applies coverage.
+ *
+ * No dst read: When not performing a dst read, the subclass is given full control of the fixed-
+ * function blend state and/or secondary output, and is responsible to apply coverage on its own.
+ *
+ * A GrXferProcessor is never installed directly into our draw state, but instead is created from a
+ * GrXPFactory once we have finalized the state of our draw.
+ */
+class GrXferProcessor : public GrProcessor {
+public:
+ /**
+ * A texture that contains the dst pixel values and an integer coord offset from device space
+ * to the space of the texture. Depending on GPU capabilities a DstTexture may be used by a
+ * GrXferProcessor for blending in the fragment shader.
+ */
+ class DstTexture {
+ public:
+ DstTexture() { fOffset.set(0, 0); }
+
+ DstTexture(const DstTexture& other) {
+ *this = other;
+ }
+
+ DstTexture(GrTexture* texture, const SkIPoint& offset)
+ : fTexture(SkSafeRef(texture))
+ , fOffset(offset) {
+ }
+
+ DstTexture& operator=(const DstTexture& other) {
+ fTexture.reset(SkSafeRef(other.fTexture.get()));
+ fOffset = other.fOffset;
+ return *this;
+ }
+
+ const SkIPoint& offset() const { return fOffset; }
+
+ void setOffset(const SkIPoint& offset) { fOffset = offset; }
+ void setOffset(int ox, int oy) { fOffset.set(ox, oy); }
+
+ GrTexture* texture() const { return fTexture.get(); }
+
+ GrTexture* setTexture(GrTexture* texture) {
+ fTexture.reset(SkSafeRef(texture));
+ return texture;
+ }
+
+ private:
+ SkAutoTUnref<GrTexture> fTexture;
+ SkIPoint fOffset;
+ };
+
+ /**
+ * Sets a unique key on the GrProcessorKeyBuilder calls onGetGLSLProcessorKey(...) to get the
+ * specific subclass's key.
+ */
+ void getGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const;
+
+ /** Returns a new instance of the appropriate *GL* implementation class
+ for the given GrXferProcessor; caller is responsible for deleting
+ the object. */
+ virtual GrGLSLXferProcessor* createGLSLInstance() const = 0;
+
+ /**
+ * Optimizations for blending / coverage that an OptDrawState should apply to itself.
+ */
+ enum OptFlags {
+ /**
+ * The draw can be skipped completely.
+ */
+ kSkipDraw_OptFlag = 0x1,
+ /**
+ * GrXferProcessor will ignore color, thus no need to provide
+ */
+ kIgnoreColor_OptFlag = 0x2,
+ /**
+ * GrXferProcessor will ignore coverage, thus no need to provide
+ */
+ kIgnoreCoverage_OptFlag = 0x4,
+ /**
+ * Clear color stages and override input color to that returned by getOptimizations
+ */
+ kOverrideColor_OptFlag = 0x8,
+ /**
+ * Can tweak alpha for coverage. Currently this flag should only be used by a batch
+ */
+ kCanTweakAlphaForCoverage_OptFlag = 0x20,
+ };
+
+ static const OptFlags kNone_OptFlags = (OptFlags)0;
+
+ GR_DECL_BITFIELD_OPS_FRIENDS(OptFlags);
+
+ /**
+ * Determines which optimizations (as described by the ptFlags above) can be performed by
+ * the draw with this xfer processor. If this function is called, the xfer processor may change
+ * its state to reflected the given blend optimizations. If the XP needs to see a specific input
+ * color to blend correctly, it will set the OverrideColor flag and the output parameter
+ * overrideColor will be the required value that should be passed into the XP.
+ * A caller who calls this function on a XP is required to honor the returned OptFlags
+ * and color values for its draw.
+ */
+ OptFlags getOptimizations(const GrPipelineOptimizations& optimizations,
+ bool doesStencilWrite,
+ GrColor* overrideColor,
+ const GrCaps& caps) const;
+
+ /**
+ * Returns whether this XP will require an Xfer barrier on the given rt. If true, outBarrierType
+ * is updated to contain the type of barrier needed.
+ */
+ GrXferBarrierType xferBarrierType(const GrRenderTarget* rt, const GrCaps& caps) const;
+
+ struct BlendInfo {
+ void reset() {
+ fEquation = kAdd_GrBlendEquation;
+ fSrcBlend = kOne_GrBlendCoeff;
+ fDstBlend = kZero_GrBlendCoeff;
+ fBlendConstant = 0;
+ fWriteColor = true;
+ }
+
+ SkDEBUGCODE(SkString dump() const;)
+
+ GrBlendEquation fEquation;
+ GrBlendCoeff fSrcBlend;
+ GrBlendCoeff fDstBlend;
+ GrColor fBlendConstant;
+ bool fWriteColor;
+ };
+
+ void getBlendInfo(BlendInfo* blendInfo) const;
+
+ bool willReadDstColor() const { return fWillReadDstColor; }
+
+ /**
+ * Returns the texture to be used as the destination when reading the dst in the fragment
+ * shader. If the returned texture is NULL then the XP is either not reading the dst or we have
+ * extentions that support framebuffer fetching and thus don't need a copy of the dst texture.
+ */
+ const GrTexture* getDstTexture() const { return fDstTexture.getTexture(); }
+
+ /**
+ * Returns the offset in device coords to use when accessing the dst texture to get the dst
+ * pixel color in the shader. This value is only valid if getDstTexture() != NULL.
+ */
+ const SkIPoint& dstTextureOffset() const {
+ SkASSERT(this->getDstTexture());
+ return fDstTextureOffset;
+ }
+
+ /**
+ * If we are performing a dst read, returns whether the base class will use mixed samples to
+ * antialias the shader's final output. If not doing a dst read, the subclass is responsible
+ * for antialiasing and this returns false.
+ */
+ bool dstReadUsesMixedSamples() const { return fDstReadUsesMixedSamples; }
+
+ /**
+ * Returns whether or not this xferProcossor will set a secondary output to be used with dual
+ * source blending.
+ */
+ bool hasSecondaryOutput() const;
+
+ /** Returns true if this and other processor conservatively draw identically. It can only return
+ true when the two processor are of the same subclass (i.e. they return the same object from
+ from getFactory()).
+
+ A return value of true from isEqual() should not be used to test whether the processor would
+ generate the same shader code. To test for identical code generation use getGLSLProcessorKey
+ */
+
+ bool isEqual(const GrXferProcessor& that) const {
+ if (this->classID() != that.classID()) {
+ return false;
+ }
+ if (this->fWillReadDstColor != that.fWillReadDstColor) {
+ return false;
+ }
+ if (this->fDstTexture.getTexture() != that.fDstTexture.getTexture()) {
+ return false;
+ }
+ if (this->fDstTextureOffset != that.fDstTextureOffset) {
+ return false;
+ }
+ if (this->fDstReadUsesMixedSamples != that.fDstReadUsesMixedSamples) {
+ return false;
+ }
+ return this->onIsEqual(that);
+ }
+
+protected:
+ GrXferProcessor();
+ GrXferProcessor(const DstTexture*, bool willReadDstColor, bool hasMixedSamples);
+
+private:
+ void notifyRefCntIsZero() const final {}
+
+ virtual OptFlags onGetOptimizations(const GrPipelineOptimizations& optimizations,
+ bool doesStencilWrite,
+ GrColor* overrideColor,
+ const GrCaps& caps) const = 0;
+
+ /**
+ * Sets a unique key on the GrProcessorKeyBuilder that is directly associated with this xfer
+ * processor's GL backend implementation.
+ */
+ virtual void onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const = 0;
+
+ /**
+ * Determines the type of barrier (if any) required by the subclass. Note that the possibility
+ * that a kTexture type barrier is required is handled by the base class and need not be
+ * considered by subclass overrides of this function.
+ */
+ virtual GrXferBarrierType onXferBarrier(const GrRenderTarget*, const GrCaps&) const {
+ return kNone_GrXferBarrierType;
+ }
+
+ /**
+ * If we are not performing a dst read, returns whether the subclass will set a secondary
+ * output. When using dst reads, the base class controls the secondary output and this method
+ * will not be called.
+ */
+ virtual bool onHasSecondaryOutput() const { return false; }
+
+ /**
+ * If we are not performing a dst read, retrieves the fixed-function blend state required by the
+ * subclass. When using dst reads, the base class controls the fixed-function blend state and
+ * this method will not be called. The BlendInfo struct comes initialized to "no blending".
+ */
+ virtual void onGetBlendInfo(BlendInfo*) const {}
+
+ virtual bool onIsEqual(const GrXferProcessor&) const = 0;
+
+ bool fWillReadDstColor;
+ bool fDstReadUsesMixedSamples;
+ SkIPoint fDstTextureOffset;
+ GrTextureAccess fDstTexture;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+GR_MAKE_BITFIELD_OPS(GrXferProcessor::OptFlags);
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * We install a GrXPFactory (XPF) early on in the pipeline before all the final draw information is
+ * known (e.g. whether there is fractional pixel coverage, will coverage be 1 or 4 channel, is the
+ * draw opaque, etc.). Once the state of the draw is finalized, we use the XPF along with all the
+ * draw information to create a GrXferProcessor (XP) which can implement the desired blending for
+ * the draw.
+ *
+ * Before the XP is created, the XPF is able to answer queries about what functionality the XPs it
+ * creates will have. For example, can it create an XP that supports RGB coverage or will the XP
+ * blend with the destination color.
+ */
+class GrXPFactory : public SkRefCnt {
+public:
+ typedef GrXferProcessor::DstTexture DstTexture;
+ GrXferProcessor* createXferProcessor(const GrPipelineOptimizations& optimizations,
+ bool hasMixedSamples,
+ const DstTexture*,
+ const GrCaps& caps) const;
+ /**
+ * Known color information after blending, but before accounting for any coverage.
+ */
+ struct InvariantBlendedColor {
+ bool fWillBlendWithDst;
+ GrColor fKnownColor;
+ GrColorComponentFlags fKnownColorFlags;
+ };
+
+ /**
+ * Returns information about the output color, produced by XPs from this factory, that will be
+ * known after blending. Note that we can conflate coverage and color, so the actual values
+ * written to pixels with partial coverage may not always seem consistent with the invariant
+ * information returned by this function.
+ */
+ virtual void getInvariantBlendedColor(const GrProcOptInfo& colorPOI,
+ InvariantBlendedColor*) const = 0;
+
+ bool willNeedDstTexture(const GrCaps& caps, const GrPipelineOptimizations& optimizations) const;
+
+ bool isEqual(const GrXPFactory& that) const {
+ if (this->classID() != that.classID()) {
+ return false;
+ }
+ return this->onIsEqual(that);
+ }
+
+ /**
+ * Helper for down-casting to a GrXPFactory subclass
+ */
+ template <typename T> const T& cast() const { return *static_cast<const T*>(this); }
+
+ uint32_t classID() const { SkASSERT(kIllegalXPFClassID != fClassID); return fClassID; }
+
+protected:
+ GrXPFactory() : fClassID(kIllegalXPFClassID) {}
+
+ template <typename XPF_SUBCLASS> void initClassID() {
+ static uint32_t kClassID = GenClassID();
+ fClassID = kClassID;
+ }
+
+ uint32_t fClassID;
+
+private:
+ virtual GrXferProcessor* onCreateXferProcessor(const GrCaps& caps,
+ const GrPipelineOptimizations& optimizations,
+ bool hasMixedSamples,
+ const DstTexture*) const = 0;
+
+ virtual bool onIsEqual(const GrXPFactory&) const = 0;
+
+ bool willReadDstColor(const GrCaps&, const GrPipelineOptimizations&) const;
+ /**
+ * Returns true if the XP generated by this factory will explicitly read dst in the fragment
+ * shader.
+ */
+ virtual bool onWillReadDstColor(const GrCaps&, const GrPipelineOptimizations&) const = 0;
+
+ static uint32_t GenClassID() {
+ // fCurrXPFactoryID has been initialized to kIllegalXPFactoryID. The
+ // atomic inc returns the old value not the incremented value. So we add
+ // 1 to the returned value.
+ uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrXPFClassID)) + 1;
+ if (!id) {
+ SkFAIL("This should never wrap as it should only be called once for each GrXPFactory "
+ "subclass.");
+ }
+ return id;
+ }
+
+ enum {
+ kIllegalXPFClassID = 0,
+ };
+ static int32_t gCurrXPFClassID;
+
+ typedef GrProgramElement INHERITED;
+};
+
+#endif
+
diff --git a/gfx/skia/skia/include/gpu/SkGr.h b/gfx/skia/skia/include/gpu/SkGr.h
new file mode 100644
index 000000000..43d61e250
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/SkGr.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGr_DEFINED
+#define SkGr_DEFINED
+
+#include "GrColor.h"
+#include "GrTextureAccess.h"
+#include "SkColor.h"
+#include "SkColorPriv.h"
+#include "SkFilterQuality.h"
+#include "SkImageInfo.h"
+
+class GrCaps;
+class GrColorSpaceXform;
+class GrContext;
+class GrTexture;
+class GrTextureParams;
+class SkBitmap;
+
+////////////////////////////////////////////////////////////////////////////////
+// Sk to Gr Type conversions
+
+static inline GrColor SkColorToPremulGrColor(SkColor c) {
+ SkPMColor pm = SkPreMultiplyColor(c);
+ unsigned r = SkGetPackedR32(pm);
+ unsigned g = SkGetPackedG32(pm);
+ unsigned b = SkGetPackedB32(pm);
+ unsigned a = SkGetPackedA32(pm);
+ return GrColorPackRGBA(r, g, b, a);
+}
+
+static inline GrColor SkColorToUnpremulGrColor(SkColor c) {
+ unsigned r = SkColorGetR(c);
+ unsigned g = SkColorGetG(c);
+ unsigned b = SkColorGetB(c);
+ unsigned a = SkColorGetA(c);
+ return GrColorPackRGBA(r, g, b, a);
+}
+
+GrColor4f SkColorToPremulGrColor4f(SkColor c, bool gammaCorrect, GrColorSpaceXform* gamutXform);
+GrColor4f SkColorToUnpremulGrColor4f(SkColor c, bool gammaCorrect, GrColorSpaceXform* gamutXform);
+
+static inline GrColor SkColorToOpaqueGrColor(SkColor c) {
+ unsigned r = SkColorGetR(c);
+ unsigned g = SkColorGetG(c);
+ unsigned b = SkColorGetB(c);
+ return GrColorPackRGBA(r, g, b, 0xFF);
+}
+
+/** Replicates the SkColor's alpha to all four channels of the GrColor. */
+static inline GrColor SkColorAlphaToGrColor(SkColor c) {
+ U8CPU a = SkColorGetA(c);
+ return GrColorPackRGBA(a, a, a, a);
+}
+
+static inline SkPMColor GrColorToSkPMColor(GrColor c) {
+ GrColorIsPMAssert(c);
+ return SkPackARGB32(GrColorUnpackA(c), GrColorUnpackR(c), GrColorUnpackG(c), GrColorUnpackB(c));
+}
+
+static inline GrColor SkPMColorToGrColor(SkPMColor c) {
+ return GrColorPackRGBA(SkGetPackedR32(c), SkGetPackedG32(c), SkGetPackedB32(c),
+ SkGetPackedA32(c));
+}
+
+////////////////////////////////////////////////////////////////////////////////
+/** Returns a texture representing the bitmap that is compatible with the GrTextureParams. The
+ texture is inserted into the cache (unless the bitmap is marked volatile) and can be
+ retrieved again via this function. */
+GrTexture* GrRefCachedBitmapTexture(GrContext*, const SkBitmap&, const GrTextureParams&,
+ SkSourceGammaTreatment);
+
+sk_sp<GrTexture> GrMakeCachedBitmapTexture(GrContext*, const SkBitmap&, const GrTextureParams&,
+ SkSourceGammaTreatment);
+
+// TODO: Move SkImageInfo2GrPixelConfig to SkGrPriv.h (requires cleanup to SkWindow its subclasses).
+GrPixelConfig SkImageInfo2GrPixelConfig(SkColorType, SkAlphaType, const SkColorSpace*,
+ const GrCaps&);
+
+static inline GrPixelConfig SkImageInfo2GrPixelConfig(const SkImageInfo& info, const GrCaps& caps) {
+ return SkImageInfo2GrPixelConfig(info.colorType(), info.alphaType(), info.colorSpace(), caps);
+}
+
+GrTextureParams::FilterMode GrSkFilterQualityToGrFilterMode(SkFilterQuality paintFilterQuality,
+ const SkMatrix& viewM,
+ const SkMatrix& localM,
+ bool* doBicubic);
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/effects/GrConstColorProcessor.h b/gfx/skia/skia/include/gpu/effects/GrConstColorProcessor.h
new file mode 100644
index 000000000..e9781bb22
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/effects/GrConstColorProcessor.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrColorProcessor_DEFINED
+#define GrColorProcessor_DEFINED
+
+#include "GrFragmentProcessor.h"
+
+/**
+ * This is a simple GrFragmentProcessor that outputs a constant color. It may do one of the
+ * following with its input color: ignore it, or multiply it by the constant color, multiply its
+ * alpha by the constant color and ignore the input color's r, g, and b.
+ */
+class GrConstColorProcessor : public GrFragmentProcessor {
+public:
+ enum InputMode {
+ kIgnore_InputMode,
+ kModulateRGBA_InputMode,
+ kModulateA_InputMode,
+
+ kLastInputMode = kModulateA_InputMode
+ };
+ static const int kInputModeCnt = kLastInputMode + 1;
+
+ static sk_sp<GrFragmentProcessor> Make(GrColor color, InputMode mode) {
+ return sk_sp<GrFragmentProcessor>(new GrConstColorProcessor(color, mode));
+ }
+
+ const char* name() const override { return "Color"; }
+
+ SkString dumpInfo() const override {
+ SkString str;
+ str.appendf("Color: 0x%08x", fColor);
+ return str;
+ }
+
+ GrColor color() const { return fColor; }
+
+ InputMode inputMode() const { return fMode; }
+
+private:
+ GrConstColorProcessor(GrColor color, InputMode mode) : fColor(color), fMode(mode) {
+ this->initClassID<GrConstColorProcessor>();
+ }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ GrColor fColor;
+ InputMode fMode;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/effects/GrCoverageSetOpXP.h b/gfx/skia/skia/include/gpu/effects/GrCoverageSetOpXP.h
new file mode 100644
index 000000000..e5d197f33
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/effects/GrCoverageSetOpXP.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCoverageSetOpXP_DEFINED
+#define GrCoverageSetOpXP_DEFINED
+
+#include "GrTypes.h"
+#include "GrXferProcessor.h"
+#include "SkRegion.h"
+
+class GrProcOptInfo;
+
+/**
+ * This xfer processor directly blends the the src coverage with the dst using a set operator. It is
+ * useful for rendering coverage masks using CSG. It can optionally invert the src coverage before
+ * applying the set operator.
+ */
+class GrCoverageSetOpXPFactory : public GrXPFactory {
+public:
+ static sk_sp<GrXPFactory> Make(SkRegion::Op regionOp, bool invertCoverage = false);
+
+ void getInvariantBlendedColor(const GrProcOptInfo& colorPOI,
+ GrXPFactory::InvariantBlendedColor*) const override;
+
+private:
+ GrCoverageSetOpXPFactory(SkRegion::Op regionOp, bool invertCoverage);
+
+ GrXferProcessor* onCreateXferProcessor(const GrCaps& caps,
+ const GrPipelineOptimizations& optimizations,
+ bool hasMixedSamples,
+ const DstTexture*) const override;
+
+ bool onWillReadDstColor(const GrCaps&, const GrPipelineOptimizations&) const override {
+ return false;
+ }
+
+ bool onIsEqual(const GrXPFactory& xpfBase) const override {
+ const GrCoverageSetOpXPFactory& xpf = xpfBase.cast<GrCoverageSetOpXPFactory>();
+ return fRegionOp == xpf.fRegionOp;
+ }
+
+ GR_DECLARE_XP_FACTORY_TEST;
+
+ SkRegion::Op fRegionOp;
+ bool fInvertCoverage;
+
+ typedef GrXPFactory INHERITED;
+};
+#endif
+
diff --git a/gfx/skia/skia/include/gpu/effects/GrCustomXfermode.h b/gfx/skia/skia/include/gpu/effects/GrCustomXfermode.h
new file mode 100644
index 000000000..3bd321447
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/effects/GrCustomXfermode.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCustomXfermode_DEFINED
+#define GrCustomXfermode_DEFINED
+
+#include "SkXfermode.h"
+
+class GrTexture;
+
+/**
+ * Custom Xfer modes are used for blending when the blend mode cannot be represented using blend
+ * coefficients.
+ */
+namespace GrCustomXfermode {
+ bool IsSupportedMode(SkXfermode::Mode mode);
+ sk_sp<GrXPFactory> MakeXPFactory(SkXfermode::Mode mode);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/effects/GrPorterDuffXferProcessor.h b/gfx/skia/skia/include/gpu/effects/GrPorterDuffXferProcessor.h
new file mode 100644
index 000000000..6777d7604
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/effects/GrPorterDuffXferProcessor.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPorterDuffXferProcessor_DEFINED
+#define GrPorterDuffXferProcessor_DEFINED
+
+#include "GrTypes.h"
+#include "GrXferProcessor.h"
+#include "SkXfermode.h"
+
+class GrProcOptInfo;
+
+class GrPorterDuffXPFactory : public GrXPFactory {
+public:
+ static sk_sp<GrXPFactory> Make(SkXfermode::Mode mode);
+ static sk_sp<GrXPFactory> Make(SkBlendMode mode) {
+ return Make((SkXfermode::Mode)mode);
+ }
+
+ void getInvariantBlendedColor(const GrProcOptInfo& colorPOI,
+ GrXPFactory::InvariantBlendedColor*) const override;
+
+
+ /** Because src-over is so common we special case it for performance reasons. If this returns
+ null then the SimpleSrcOverXP() below should be used. */
+ static GrXferProcessor* CreateSrcOverXferProcessor(const GrCaps& caps,
+ const GrPipelineOptimizations& optimizations,
+ bool hasMixedSamples,
+ const GrXferProcessor::DstTexture*);
+ /** This XP implements non-LCD src-over using hw blend with no optimizations. It is returned
+ by reference because it is global and its ref-cnting methods are not thread safe. */
+ static const GrXferProcessor& SimpleSrcOverXP();
+
+ static inline void SrcOverInvariantBlendedColor(
+ GrColor inputColor,
+ GrColorComponentFlags validColorFlags,
+ bool isOpaque,
+ GrXPFactory::InvariantBlendedColor* blendedColor) {
+ if (!isOpaque) {
+ blendedColor->fWillBlendWithDst = true;
+ blendedColor->fKnownColorFlags = kNone_GrColorComponentFlags;
+ return;
+ }
+ blendedColor->fWillBlendWithDst = false;
+
+ blendedColor->fKnownColor = inputColor;
+ blendedColor->fKnownColorFlags = validColorFlags;
+ }
+
+ static bool SrcOverWillNeedDstTexture(const GrCaps&, const GrPipelineOptimizations&);
+
+private:
+ GrPorterDuffXPFactory(SkXfermode::Mode);
+
+ GrXferProcessor* onCreateXferProcessor(const GrCaps& caps,
+ const GrPipelineOptimizations& optimizations,
+ bool hasMixedSamples,
+ const DstTexture*) const override;
+
+ bool onWillReadDstColor(const GrCaps&, const GrPipelineOptimizations&) const override;
+
+ bool onIsEqual(const GrXPFactory& xpfBase) const override {
+ const GrPorterDuffXPFactory& xpf = xpfBase.cast<GrPorterDuffXPFactory>();
+ return fXfermode == xpf.fXfermode;
+ }
+
+ GR_DECLARE_XP_FACTORY_TEST;
+ static void TestGetXPOutputTypes(const GrXferProcessor*, int* outPrimary, int* outSecondary);
+
+ SkXfermode::Mode fXfermode;
+
+ friend class GrPorterDuffTest; // for TestGetXPOutputTypes()
+ typedef GrXPFactory INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/effects/GrXfermodeFragmentProcessor.h b/gfx/skia/skia/include/gpu/effects/GrXfermodeFragmentProcessor.h
new file mode 100644
index 000000000..0e2435ea9
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/effects/GrXfermodeFragmentProcessor.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrXfermodeFragmentProcessor_DEFINED
+#define GrXfermodeFragmentProcessor_DEFINED
+
+#include "SkXfermode.h"
+
+class GrFragmentProcessor;
+
+namespace GrXfermodeFragmentProcessor {
+ /** The color input to the returned processor is treated as the src and the passed in processor
+ is the dst. */
+ sk_sp<GrFragmentProcessor> MakeFromDstProcessor(sk_sp<GrFragmentProcessor> dst,
+ SkXfermode::Mode mode);
+
+ /** The color input to the returned processor is treated as the dst and the passed in processor
+ is the src. */
+ sk_sp<GrFragmentProcessor> MakeFromSrcProcessor(sk_sp<GrFragmentProcessor> src,
+ SkXfermode::Mode mode);
+
+ /** Takes the input color, which is assumed to be unpremultiplied, passes it as an opaque color
+ to both src and dst. The outputs of a src and dst are blended using mode and the original
+ input's alpha is applied to the blended color to produce a premul output. */
+ sk_sp<GrFragmentProcessor> MakeFromTwoProcessors(sk_sp<GrFragmentProcessor> src,
+ sk_sp<GrFragmentProcessor> dst,
+ SkXfermode::Mode mode);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLAssembleInterface.h b/gfx/skia/skia/include/gpu/gl/GrGLAssembleInterface.h
new file mode 100644
index 000000000..b9881a99b
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLAssembleInterface.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "gl/GrGLInterface.h"
+
+typedef GrGLFuncPtr (*GrGLGetProc)(void* ctx, const char name[]);
+
+
+/**
+ * Generic function for creating a GrGLInterface for an either OpenGL or GLES. It calls
+ * get() to get each function address. ctx is a generic ptr passed to and interpreted by get().
+ */
+SK_API const GrGLInterface* GrGLAssembleInterface(void* ctx, GrGLGetProc get);
+
+/**
+ * Generic function for creating a GrGLInterface for an OpenGL (but not GLES) context. It calls
+ * get() to get each function address. ctx is a generic ptr passed to and interpreted by get().
+ */
+SK_API const GrGLInterface* GrGLAssembleGLInterface(void* ctx, GrGLGetProc get);
+
+/**
+ * Generic function for creating a GrGLInterface for an OpenGL ES (but not Open GL) context. It
+ * calls get() to get each function address. ctx is a generic ptr passed to and interpreted by
+ * get().
+ */
+SK_API const GrGLInterface* GrGLAssembleGLESInterface(void* ctx, GrGLGetProc get);
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLConfig.h b/gfx/skia/skia/include/gpu/gl/GrGLConfig.h
new file mode 100644
index 000000000..20ee37fe3
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLConfig.h
@@ -0,0 +1,129 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrGLConfig_DEFINED
+#define GrGLConfig_DEFINED
+
+#include "GrTypes.h"
+
+/**
+ * Optional GL config file.
+ */
+#ifdef GR_GL_CUSTOM_SETUP_HEADER
+ #include GR_GL_CUSTOM_SETUP_HEADER
+#endif
+
+#if !defined(GR_GL_FUNCTION_TYPE)
+ #if defined(SK_BUILD_FOR_WIN32)
+ #define GR_GL_FUNCTION_TYPE __stdcall
+ #else
+ #define GR_GL_FUNCTION_TYPE
+ #endif
+#endif
+
+/**
+ * The following are optional defines that can be enabled at the compiler
+ * command line, in a IDE project, in a GrUserConfig.h file, or in a GL custom
+ * file (if one is in use). If a GR_GL_CUSTOM_SETUP_HEADER is used they can
+ * also be placed there.
+ *
+ * GR_GL_LOG_CALLS: if 1 Gr can print every GL call using SkDebugf. Defaults to
+ * 0. Logging can be enabled and disabled at runtime using a debugger via to
+ * global gLogCallsGL. The initial value of gLogCallsGL is controlled by
+ * GR_GL_LOG_CALLS_START.
+ *
+ * GR_GL_LOG_CALLS_START: controls the initial value of gLogCallsGL when
+ * GR_GL_LOG_CALLS is 1. Defaults to 0.
+ *
+ * GR_GL_CHECK_ERROR: if enabled Gr can do a glGetError() after every GL call.
+ * Defaults to 1 if SK_DEBUG is set, otherwise 0. When GR_GL_CHECK_ERROR is 1
+ * this can be toggled in a debugger using the gCheckErrorGL global. The initial
+ * value of gCheckErrorGL is controlled by by GR_GL_CHECK_ERROR_START.
+ *
+ * GR_GL_CHECK_ERROR_START: controls the initial value of gCheckErrorGL
+ * when GR_GL_CHECK_ERROR is 1. Defaults to 1.
+ *
+ * GR_GL_USE_BUFFER_DATA_NULL_HINT: When specifing new data for a vertex/index
+ * buffer that replaces old data Ganesh can give a hint to the driver that the
+ * previous data will not be used in future draws like this:
+ * glBufferData(GL_..._BUFFER, size, NULL, usage); //<--hint, NULL means
+ * glBufferSubData(GL_..._BUFFER, 0, lessThanSize, data) // old data can't be
+ * // used again.
+ * However, this can be an unoptimization on some platforms, esp. Chrome.
+ * Chrome's cmd buffer will create a new allocation and memset the whole thing
+ * to zero (for security reasons). Defaults to 1 (enabled).
+ *
+ * GR_GL_CHECK_ALLOC_WITH_GET_ERROR: If set to 1 this will then glTexImage,
+ * glBufferData, glRenderbufferStorage, etc will be checked for errors. This
+ * amounts to ensuring the error is GL_NO_ERROR, calling the allocating
+ * function, and then checking that the error is still GL_NO_ERROR. When the
+ * value is 0 we will assume no error was generated without checking.
+ *
+ * GR_GL_CHECK_FBO_STATUS_ONCE_PER_FORMAT: We will normally check the FBO status
+ * every time we bind a texture or renderbuffer to an FBO. However, in some
+ * environments CheckFrameBufferStatus is very expensive. If this is set we will
+ * check the first time we use a color format or a combination of color /
+ * stencil formats as attachments. If the FBO is complete we will assume
+ * subsequent attachments with the same formats are complete as well.
+ *
+ * GR_GL_MUST_USE_VBO: Indicates that all vertices and indices must be rendered
+ * from VBOs. Chromium's command buffer doesn't allow glVertexAttribArray with
+ * ARARY_BUFFER 0 bound or glDrawElements with ELEMENT_ARRAY_BUFFER 0 bound.
+ *
+ * GR_GL_USE_NEW_SHADER_SOURCE_SIGNATURE is for compatibility with the new version
+ * of the OpenGLES2.0 headers from Khronos. glShaderSource now takes a const char * const *,
+ * instead of a const char
+ */
+
+#if !defined(GR_GL_LOG_CALLS)
+ #ifdef SK_DEBUG
+ #define GR_GL_LOG_CALLS 1
+ #else
+ #define GR_GL_LOG_CALLS 0
+ #endif
+#endif
+
+#if !defined(GR_GL_LOG_CALLS_START)
+ #define GR_GL_LOG_CALLS_START 0
+#endif
+
+#if !defined(GR_GL_CHECK_ERROR)
+ #ifdef SK_DEBUG
+ #define GR_GL_CHECK_ERROR 1
+ #else
+ #define GR_GL_CHECK_ERROR 0
+ #endif
+#endif
+
+#if !defined(GR_GL_CHECK_ERROR_START)
+ #define GR_GL_CHECK_ERROR_START 1
+#endif
+
+#if !defined(GR_GL_USE_BUFFER_DATA_NULL_HINT)
+ #define GR_GL_USE_BUFFER_DATA_NULL_HINT 1
+#endif
+
+#if !defined(GR_GL_CHECK_ALLOC_WITH_GET_ERROR)
+ #define GR_GL_CHECK_ALLOC_WITH_GET_ERROR 1
+#endif
+
+#if !defined(GR_GL_CHECK_FBO_STATUS_ONCE_PER_FORMAT)
+ #define GR_GL_CHECK_FBO_STATUS_ONCE_PER_FORMAT 0
+#endif
+
+#if !defined(GR_GL_MUST_USE_VBO)
+ #define GR_GL_MUST_USE_VBO 0
+#endif
+
+#if !defined(GR_GL_USE_NEW_SHADER_SOURCE_SIGNATURE)
+ #define GR_GL_USE_NEW_SHADER_SOURCE_SIGNATURE 0
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLConfig_chrome.h b/gfx/skia/skia/include/gpu/gl/GrGLConfig_chrome.h
new file mode 100644
index 000000000..838e0543e
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLConfig_chrome.h
@@ -0,0 +1,36 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef GrGLConfig_chrome_DEFINED
+#define GrGLConfig_chrome_DEFINED
+
+// glGetError() forces a sync with gpu process on chrome
+#define GR_GL_CHECK_ERROR_START 0
+
+// cmd buffer allocates memory and memsets it to zero when it sees glBufferData
+// with NULL.
+#define GR_GL_USE_BUFFER_DATA_NULL_HINT 0
+
+// Check error is even more expensive in chrome (cmd buffer flush). The
+// compositor also doesn't check its allocations.
+#define GR_GL_CHECK_ALLOC_WITH_GET_ERROR 0
+
+// CheckFramebufferStatus in chrome synchronizes the gpu and renderer processes.
+#define GR_GL_CHECK_FBO_STATUS_ONCE_PER_FORMAT 1
+
+// Non-VBO vertices and indices are not allowed in Chromium.
+#define GR_GL_MUST_USE_VBO 1
+
+// Use updated Khronos signature for glShaderSource
+// (const char* const instead of char**).
+#define GR_GL_USE_NEW_SHADER_SOURCE_SIGNATURE 1
+
+#if !defined(GR_GL_IGNORE_ES3_MSAA)
+ #define GR_GL_IGNORE_ES3_MSAA 1
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLExtensions.h b/gfx/skia/skia/include/gpu/gl/GrGLExtensions.h
new file mode 100644
index 000000000..dd088de67
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLExtensions.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLExtensions_DEFINED
+#define GrGLExtensions_DEFINED
+
+#include "../../private/SkTArray.h"
+#include "GrGLFunctions.h"
+#include "SkString.h"
+
+struct GrGLInterface;
+
+/**
+ * This helper queries the current GL context for its extensions, remembers them, and can be
+ * queried. It supports both glGetString- and glGetStringi-style extension string APIs and will
+ * use the latter if it is available. It also will query for EGL extensions if a eglQueryString
+ * implementation is provided.
+ */
+class SK_API GrGLExtensions {
+public:
+ GrGLExtensions() : fInitialized(false), fStrings(new SkTArray<SkString>) {}
+
+ GrGLExtensions(const GrGLExtensions&);
+
+ GrGLExtensions& operator=(const GrGLExtensions&);
+
+ void swap(GrGLExtensions* that) {
+ fStrings.swap(that->fStrings);
+ SkTSwap(fInitialized, that->fInitialized);
+ }
+
+ /**
+ * We sometimes need to use this class without having yet created a GrGLInterface. This version
+ * of init expects that getString is always non-NULL while getIntegerv and getStringi are non-
+ * NULL if on desktop GL with version 3.0 or higher. Otherwise it will fail.
+ */
+ bool init(GrGLStandard standard,
+ GrGLFunction<GrGLGetStringProc> getString,
+ GrGLFunction<GrGLGetStringiProc> getStringi,
+ GrGLFunction<GrGLGetIntegervProc> getIntegerv,
+ GrGLFunction<GrEGLQueryStringProc> queryString = nullptr,
+ GrEGLDisplay eglDisplay = nullptr);
+
+ bool isInitialized() const { return fInitialized; }
+
+ /**
+ * Queries whether an extension is present. This will fail if init() has not been called.
+ */
+ bool has(const char[]) const;
+
+ /**
+ * Removes an extension if present. Returns true if the extension was present before the call.
+ */
+ bool remove(const char[]);
+
+ /**
+ * Adds an extension to list
+ */
+ void add(const char[]);
+
+ void reset() { fStrings->reset(); }
+
+ void print(const char* sep = "\n") const;
+
+private:
+ bool fInitialized;
+ SkAutoTDelete<SkTArray<SkString> > fStrings;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLFunctions.h b/gfx/skia/skia/include/gpu/gl/GrGLFunctions.h
new file mode 100644
index 000000000..eccd1bf00
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLFunctions.h
@@ -0,0 +1,374 @@
+
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLFunctions_DEFINED
+#define GrGLFunctions_DEFINED
+
+#include <functional>
+#include "GrGLTypes.h"
+#include "../private/SkTLogic.h"
+
+extern "C" {
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLActiveTextureProc)(GrGLenum texture);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLAttachShaderProc)(GrGLuint program, GrGLuint shader);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLBeginQueryProc)(GrGLenum target, GrGLuint id);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLBindAttribLocationProc)(GrGLuint program, GrGLuint index, const char* name);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLBindBufferProc)(GrGLenum target, GrGLuint buffer);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLBindFramebufferProc)(GrGLenum target, GrGLuint framebuffer);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLBindRenderbufferProc)(GrGLenum target, GrGLuint renderbuffer);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLBindTextureProc)(GrGLenum target, GrGLuint texture);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLBindFragDataLocationProc)(GrGLuint program, GrGLuint colorNumber, const GrGLchar* name);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLBindFragDataLocationIndexedProc)(GrGLuint program, GrGLuint colorNumber, GrGLuint index, const GrGLchar * name);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLBindVertexArrayProc)(GrGLuint array);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLBlendBarrierProc)();
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLBlendColorProc)(GrGLclampf red, GrGLclampf green, GrGLclampf blue, GrGLclampf alpha);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLBlendEquationProc)(GrGLenum mode);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLBlendFuncProc)(GrGLenum sfactor, GrGLenum dfactor);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLBlitFramebufferProc)(GrGLint srcX0, GrGLint srcY0, GrGLint srcX1, GrGLint srcY1, GrGLint dstX0, GrGLint dstY0, GrGLint dstX1, GrGLint dstY1, GrGLbitfield mask, GrGLenum filter);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLBufferDataProc)(GrGLenum target, GrGLsizeiptr size, const GrGLvoid* data, GrGLenum usage);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLBufferSubDataProc)(GrGLenum target, GrGLintptr offset, GrGLsizeiptr size, const GrGLvoid* data);
+typedef GrGLenum (GR_GL_FUNCTION_TYPE* GrGLCheckFramebufferStatusProc)(GrGLenum target);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLClearProc)(GrGLbitfield mask);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLClearColorProc)(GrGLclampf red, GrGLclampf green, GrGLclampf blue, GrGLclampf alpha);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLClearStencilProc)(GrGLint s);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLColorMaskProc)(GrGLboolean red, GrGLboolean green, GrGLboolean blue, GrGLboolean alpha);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCompileShaderProc)(GrGLuint shader);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCompressedTexImage2DProc)(GrGLenum target, GrGLint level, GrGLenum internalformat, GrGLsizei width, GrGLsizei height, GrGLint border, GrGLsizei imageSize, const GrGLvoid* data);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCompressedTexSubImage2DProc)(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLsizei imageSize, const GrGLvoid* data);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCopyTexSubImage2DProc)(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height);
+typedef GrGLuint (GR_GL_FUNCTION_TYPE* GrGLCreateProgramProc)();
+typedef GrGLuint (GR_GL_FUNCTION_TYPE* GrGLCreateShaderProc)(GrGLenum type);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCullFaceProc)(GrGLenum mode);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDeleteBuffersProc)(GrGLsizei n, const GrGLuint* buffers);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDeleteFramebuffersProc)(GrGLsizei n, const GrGLuint *framebuffers);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDeleteProgramProc)(GrGLuint program);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDeleteQueriesProc)(GrGLsizei n, const GrGLuint *ids);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDeleteRenderbuffersProc)(GrGLsizei n, const GrGLuint *renderbuffers);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDeleteShaderProc)(GrGLuint shader);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDeleteTexturesProc)(GrGLsizei n, const GrGLuint* textures);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDeleteVertexArraysProc)(GrGLsizei n, const GrGLuint *arrays);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDepthMaskProc)(GrGLboolean flag);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDisableProc)(GrGLenum cap);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDisableVertexAttribArrayProc)(GrGLuint index);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDrawArraysProc)(GrGLenum mode, GrGLint first, GrGLsizei count);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDrawArraysInstancedProc)(GrGLenum mode, GrGLint first, GrGLsizei count, GrGLsizei primcount);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDrawArraysIndirectProc)(GrGLenum mode, const GrGLvoid* indirect);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDrawBufferProc)(GrGLenum mode);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDrawBuffersProc)(GrGLsizei n, const GrGLenum* bufs);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDrawElementsProc)(GrGLenum mode, GrGLsizei count, GrGLenum type, const GrGLvoid* indices);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDrawElementsInstancedProc)(GrGLenum mode, GrGLsizei count, GrGLenum type, const GrGLvoid *indices, GrGLsizei primcount);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDrawElementsIndirectProc)(GrGLenum mode, GrGLenum type, const GrGLvoid* indirect);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDrawRangeElementsProc)(GrGLenum mode, GrGLuint start, GrGLuint end, GrGLsizei count, GrGLenum type, const GrGLvoid* indices);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLEnableProc)(GrGLenum cap);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLEnableVertexAttribArrayProc)(GrGLuint index);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLEndQueryProc)(GrGLenum target);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLFinishProc)();
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLFlushProc)();
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLFlushMappedBufferRangeProc)(GrGLenum target, GrGLintptr offset, GrGLsizeiptr length);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLFramebufferRenderbufferProc)(GrGLenum target, GrGLenum attachment, GrGLenum renderbuffertarget, GrGLuint renderbuffer);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLFramebufferTexture2DProc)(GrGLenum target, GrGLenum attachment, GrGLenum textarget, GrGLuint texture, GrGLint level);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLFramebufferTexture2DMultisampleProc)(GrGLenum target, GrGLenum attachment, GrGLenum textarget, GrGLuint texture, GrGLint level, GrGLsizei samples);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLFrontFaceProc)(GrGLenum mode);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGenBuffersProc)(GrGLsizei n, GrGLuint* buffers);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGenFramebuffersProc)(GrGLsizei n, GrGLuint *framebuffers);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGenerateMipmapProc)(GrGLenum target);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGenQueriesProc)(GrGLsizei n, GrGLuint *ids);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGenRenderbuffersProc)(GrGLsizei n, GrGLuint *renderbuffers);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGenTexturesProc)(GrGLsizei n, GrGLuint* textures);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGenVertexArraysProc)(GrGLsizei n, GrGLuint *arrays);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetBufferParameterivProc)(GrGLenum target, GrGLenum pname, GrGLint* params);
+typedef GrGLenum (GR_GL_FUNCTION_TYPE* GrGLGetErrorProc)();
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetFramebufferAttachmentParameterivProc)(GrGLenum target, GrGLenum attachment, GrGLenum pname, GrGLint* params);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetIntegervProc)(GrGLenum pname, GrGLint* params);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetMultisamplefvProc)(GrGLenum pname, GrGLuint index, GrGLfloat* val);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetProgramInfoLogProc)(GrGLuint program, GrGLsizei bufsize, GrGLsizei* length, char* infolog);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetProgramivProc)(GrGLuint program, GrGLenum pname, GrGLint* params);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetQueryivProc)(GrGLenum GLtarget, GrGLenum pname, GrGLint *params);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetQueryObjecti64vProc)(GrGLuint id, GrGLenum pname, GrGLint64 *params);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetQueryObjectivProc)(GrGLuint id, GrGLenum pname, GrGLint *params);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetQueryObjectui64vProc)(GrGLuint id, GrGLenum pname, GrGLuint64 *params);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetQueryObjectuivProc)(GrGLuint id, GrGLenum pname, GrGLuint *params);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetRenderbufferParameterivProc)(GrGLenum target, GrGLenum pname, GrGLint* params);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetShaderInfoLogProc)(GrGLuint shader, GrGLsizei bufsize, GrGLsizei* length, char* infolog);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetShaderivProc)(GrGLuint shader, GrGLenum pname, GrGLint* params);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetShaderPrecisionFormatProc)(GrGLenum shadertype, GrGLenum precisiontype, GrGLint *range, GrGLint *precision);
+typedef const GrGLubyte* (GR_GL_FUNCTION_TYPE* GrGLGetStringProc)(GrGLenum name);
+typedef const GrGLubyte* (GR_GL_FUNCTION_TYPE* GrGLGetStringiProc)(GrGLenum name, GrGLuint index);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetTexLevelParameterivProc)(GrGLenum target, GrGLint level, GrGLenum pname, GrGLint* params);
+typedef GrGLint (GR_GL_FUNCTION_TYPE* GrGLGetUniformLocationProc)(GrGLuint program, const char* name);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLInsertEventMarkerProc)(GrGLsizei length, const char* marker);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLInvalidateBufferDataProc)(GrGLuint buffer);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLInvalidateBufferSubDataProc)(GrGLuint buffer, GrGLintptr offset, GrGLsizeiptr length);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLInvalidateFramebufferProc)(GrGLenum target, GrGLsizei numAttachments, const GrGLenum *attachments);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLInvalidateSubFramebufferProc)(GrGLenum target, GrGLsizei numAttachments, const GrGLenum *attachments, GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLInvalidateTexImageProc)(GrGLuint texture, GrGLint level);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLInvalidateTexSubImageProc)(GrGLuint texture, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint zoffset, GrGLsizei width, GrGLsizei height, GrGLsizei depth);
+typedef GrGLboolean (GR_GL_FUNCTION_TYPE* GrGLIsTextureProc)(GrGLuint texture);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLLineWidthProc)(GrGLfloat width);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLLinkProgramProc)(GrGLuint program);
+typedef GrGLvoid* (GR_GL_FUNCTION_TYPE* GrGLMapBufferProc)(GrGLenum target, GrGLenum access);
+typedef GrGLvoid* (GR_GL_FUNCTION_TYPE* GrGLMapBufferRangeProc)(GrGLenum target, GrGLintptr offset, GrGLsizeiptr length, GrGLbitfield access);
+typedef GrGLvoid* (GR_GL_FUNCTION_TYPE* GrGLMapBufferSubDataProc)(GrGLuint target, GrGLintptr offset, GrGLsizeiptr size, GrGLenum access);
+typedef GrGLvoid* (GR_GL_FUNCTION_TYPE* GrGLMapTexSubImage2DProc)(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, GrGLenum access);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLPixelStoreiProc)(GrGLenum pname, GrGLint param);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLPopGroupMarkerProc)();
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLPushGroupMarkerProc)(GrGLsizei length, const char* marker);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLQueryCounterProc)(GrGLuint id, GrGLenum target);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLRasterSamplesProc)(GrGLuint samples, GrGLboolean fixedsamplelocations);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLReadBufferProc)(GrGLenum src);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLReadPixelsProc)(GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, GrGLvoid* pixels);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLRenderbufferStorageProc)(GrGLenum target, GrGLenum internalformat, GrGLsizei width, GrGLsizei height);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLRenderbufferStorageMultisampleProc)(GrGLenum target, GrGLsizei samples, GrGLenum internalformat, GrGLsizei width, GrGLsizei height);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLResolveMultisampleFramebufferProc)();
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLScissorProc)(GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height);
+// GL_CHROMIUM_bind_uniform_location
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLBindUniformLocationProc)(GrGLuint program, GrGLint location, const char* name);
+
+#if GR_GL_USE_NEW_SHADER_SOURCE_SIGNATURE
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLShaderSourceProc)(GrGLuint shader, GrGLsizei count, const char* const * str, const GrGLint* length);
+#else
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLShaderSourceProc)(GrGLuint shader, GrGLsizei count, const char** str, const GrGLint* length);
+#endif
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLStencilFuncProc)(GrGLenum func, GrGLint ref, GrGLuint mask);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLStencilFuncSeparateProc)(GrGLenum face, GrGLenum func, GrGLint ref, GrGLuint mask);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLStencilMaskProc)(GrGLuint mask);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLStencilMaskSeparateProc)(GrGLenum face, GrGLuint mask);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLStencilOpProc)(GrGLenum fail, GrGLenum zfail, GrGLenum zpass);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLStencilOpSeparateProc)(GrGLenum face, GrGLenum fail, GrGLenum zfail, GrGLenum zpass);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLTexBufferProc)(GrGLenum target, GrGLenum internalformat, GrGLuint buffer);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLTexBufferRangeProc)(GrGLenum target, GrGLenum internalformat, GrGLuint buffer, GrGLintptr offset, GrGLsizeiptr size);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLTexImage2DProc)(GrGLenum target, GrGLint level, GrGLint internalformat, GrGLsizei width, GrGLsizei height, GrGLint border, GrGLenum format, GrGLenum type, const GrGLvoid* pixels);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLTexParameteriProc)(GrGLenum target, GrGLenum pname, GrGLint param);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLTexParameterivProc)(GrGLenum target, GrGLenum pname, const GrGLint* params);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLTexStorage2DProc)(GrGLenum target, GrGLsizei levels, GrGLenum internalformat, GrGLsizei width, GrGLsizei height);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDiscardFramebufferProc)(GrGLenum target, GrGLsizei numAttachments, const GrGLenum* attachments);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLTexSubImage2DProc)(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, const GrGLvoid* pixels);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLTextureBarrierProc)();
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniform1fProc)(GrGLint location, GrGLfloat v0);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniform1iProc)(GrGLint location, GrGLint v0);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniform1fvProc)(GrGLint location, GrGLsizei count, const GrGLfloat* v);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniform1ivProc)(GrGLint location, GrGLsizei count, const GrGLint* v);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniform2fProc)(GrGLint location, GrGLfloat v0, GrGLfloat v1);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniform2iProc)(GrGLint location, GrGLint v0, GrGLint v1);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniform2fvProc)(GrGLint location, GrGLsizei count, const GrGLfloat* v);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniform2ivProc)(GrGLint location, GrGLsizei count, const GrGLint* v);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniform3fProc)(GrGLint location, GrGLfloat v0, GrGLfloat v1, GrGLfloat v2);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniform3iProc)(GrGLint location, GrGLint v0, GrGLint v1, GrGLint v2);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniform3fvProc)(GrGLint location, GrGLsizei count, const GrGLfloat* v);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniform3ivProc)(GrGLint location, GrGLsizei count, const GrGLint* v);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniform4fProc)(GrGLint location, GrGLfloat v0, GrGLfloat v1, GrGLfloat v2, GrGLfloat v3);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniform4iProc)(GrGLint location, GrGLint v0, GrGLint v1, GrGLint v2, GrGLint v3);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniform4fvProc)(GrGLint location, GrGLsizei count, const GrGLfloat* v);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniform4ivProc)(GrGLint location, GrGLsizei count, const GrGLint* v);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniformMatrix2fvProc)(GrGLint location, GrGLsizei count, GrGLboolean transpose, const GrGLfloat* value);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniformMatrix3fvProc)(GrGLint location, GrGLsizei count, GrGLboolean transpose, const GrGLfloat* value);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniformMatrix4fvProc)(GrGLint location, GrGLsizei count, GrGLboolean transpose, const GrGLfloat* value);
+typedef GrGLboolean (GR_GL_FUNCTION_TYPE* GrGLUnmapBufferProc)(GrGLenum target);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUnmapBufferSubDataProc)(const GrGLvoid* mem);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUnmapTexSubImage2DProc)(const GrGLvoid* mem);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUseProgramProc)(GrGLuint program);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLVertexAttrib1fProc)(GrGLuint indx, const GrGLfloat value);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLVertexAttrib2fvProc)(GrGLuint indx, const GrGLfloat* values);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLVertexAttrib3fvProc)(GrGLuint indx, const GrGLfloat* values);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLVertexAttrib4fvProc)(GrGLuint indx, const GrGLfloat* values);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLVertexAttribDivisorProc)(GrGLuint index, GrGLuint divisor);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLVertexAttribIPointerProc)(GrGLuint indx, GrGLint size, GrGLenum type, GrGLsizei stride, const GrGLvoid* ptr);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLVertexAttribPointerProc)(GrGLuint indx, GrGLint size, GrGLenum type, GrGLboolean normalized, GrGLsizei stride, const GrGLvoid* ptr);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLViewportProc)(GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height);
+
+/* GL_NV_path_rendering */
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLMatrixLoadfProc)(GrGLenum matrixMode, const GrGLfloat* m);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLMatrixLoadIdentityProc)(GrGLenum);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLPathCommandsProc)(GrGLuint path, GrGLsizei numCommands, const GrGLubyte *commands, GrGLsizei numCoords, GrGLenum coordType, const GrGLvoid *coords);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLPathParameteriProc)(GrGLuint path, GrGLenum pname, GrGLint value);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLPathParameterfProc)(GrGLuint path, GrGLenum pname, GrGLfloat value);
+typedef GrGLuint (GR_GL_FUNCTION_TYPE* GrGLGenPathsProc)(GrGLsizei range);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDeletePathsProc)(GrGLuint path, GrGLsizei range);
+typedef GrGLboolean (GR_GL_FUNCTION_TYPE* GrGLIsPathProc)(GrGLuint path);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLPathStencilFuncProc)(GrGLenum func, GrGLint ref, GrGLuint mask);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLStencilFillPathProc)(GrGLuint path, GrGLenum fillMode, GrGLuint mask);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLStencilStrokePathProc)(GrGLuint path, GrGLint reference, GrGLuint mask);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLStencilFillPathInstancedProc)(GrGLsizei numPaths, GrGLenum pathNameType, const GrGLvoid *paths, GrGLuint pathBase, GrGLenum fillMode, GrGLuint mask, GrGLenum transformType, const GrGLfloat *transformValues);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLStencilStrokePathInstancedProc)(GrGLsizei numPaths, GrGLenum pathNameType, const GrGLvoid *paths, GrGLuint pathBase, GrGLint reference, GrGLuint mask, GrGLenum transformType, const GrGLfloat *transformValues);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCoverFillPathProc)(GrGLuint path, GrGLenum coverMode);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCoverStrokePathProc)(GrGLuint name, GrGLenum coverMode);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCoverFillPathInstancedProc)(GrGLsizei numPaths, GrGLenum pathNameType, const GrGLvoid *paths, GrGLuint pathBase, GrGLenum coverMode, GrGLenum transformType, const GrGLfloat *transformValues);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCoverStrokePathInstancedProc)(GrGLsizei numPaths, GrGLenum pathNameType, const GrGLvoid *paths, GrGLuint pathBase, GrGLenum coverMode, GrGLenum transformType, const GrGLfloat* transformValues);
+// NV_path_rendering v1.2
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLStencilThenCoverFillPathProc)(GrGLuint path, GrGLenum fillMode, GrGLuint mask, GrGLenum coverMode);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLStencilThenCoverStrokePathProc)(GrGLuint path, GrGLint reference, GrGLuint mask, GrGLenum coverMode);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLStencilThenCoverFillPathInstancedProc)(GrGLsizei numPaths, GrGLenum pathNameType, const GrGLvoid *paths, GrGLuint pathBase, GrGLenum fillMode, GrGLuint mask, GrGLenum coverMode, GrGLenum transformType, const GrGLfloat *transformValues);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLStencilThenCoverStrokePathInstancedProc)(GrGLsizei numPaths, GrGLenum pathNameType, const GrGLvoid *paths, GrGLuint pathBase, GrGLint reference, GrGLuint mask, GrGLenum coverMode, GrGLenum transformType, const GrGLfloat *transformValues);
+// NV_path_rendering v1.3
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramPathFragmentInputGenProc)(GrGLuint program, GrGLint location, GrGLenum genMode, GrGLint components,const GrGLfloat *coeffs);
+// CHROMIUM_path_rendering
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLBindFragmentInputLocationProc)(GrGLuint program, GrGLint location, const GrGLchar* name);
+
+/* ARB_program_interface_query */
+typedef GrGLint (GR_GL_FUNCTION_TYPE* GrGLGetProgramResourceLocationProc)(GrGLuint program, GrGLenum programInterface, const GrGLchar *name);
+
+/* GL_NV_framebuffer_mixed_samples */
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCoverageModulationProc)(GrGLenum components);
+
+/* EXT_multi_draw_indirect */
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLMultiDrawArraysIndirectProc)(GrGLenum mode, const GrGLvoid *indirect, GrGLsizei drawcount, GrGLsizei stride);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLMultiDrawElementsIndirectProc)(GrGLenum mode, GrGLenum type, const GrGLvoid *indirect, GrGLsizei drawcount, GrGLsizei stride);
+
+/* NV_bindless_texture */
+typedef GrGLuint64 (GR_GL_FUNCTION_TYPE* GrGLGetTextureHandleProc)(GrGLuint texture);
+typedef GrGLuint64 (GR_GL_FUNCTION_TYPE* GrGLGetTextureSamplerHandleProc)(GrGLuint texture, GrGLuint sampler);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLMakeTextureHandleResidentProc)(GrGLuint64 handle);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLMakeTextureHandleNonResidentProc)(GrGLuint64 handle);
+typedef GrGLuint64 (GR_GL_FUNCTION_TYPE* GrGLGetImageHandleProc)(GrGLuint texture, GrGLint level, GrGLboolean layered, GrGLint layer, GrGLint format);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLMakeImageHandleResidentProc)(GrGLuint64 handle, GrGLenum access);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLMakeImageHandleNonResidentProc)(GrGLuint64 handle);
+typedef GrGLboolean (GR_GL_FUNCTION_TYPE* GrGLIsTextureHandleResidentProc)(GrGLuint64 handle);
+typedef GrGLboolean (GR_GL_FUNCTION_TYPE* GrGLIsImageHandleResidentProc)(GrGLuint64 handle);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniformHandleui64Proc)(GrGLint location, GrGLuint64 v0);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLUniformHandleui64vProc)(GrGLint location, GrGLsizei count, const GrGLuint64 *value);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniformHandleui64Proc)(GrGLuint program, GrGLint location, GrGLuint64 v0);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniformHandleui64vProc)(GrGLuint program, GrGLint location, GrGLsizei count, const GrGLuint64 *value);
+
+/* ARB_sample_shading */
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLMinSampleShadingProc)(GrGLfloat value);
+
+/* EXT_direct_state_access */
+// (In the future some of these methods may be omitted)
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLTextureParameteriProc)(GrGLuint texture, GrGLenum target, GrGLenum pname, GrGLint param);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLTextureParameterivProc)(GrGLuint texture, GrGLenum target, GrGLenum pname, const GrGLint *param);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLTextureParameterfProc)(GrGLuint texture, GrGLenum target, GrGLenum pname, float param);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLTextureParameterfvProc)(GrGLuint texture, GrGLenum target, GrGLenum pname, const float *param);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLTextureImage1DProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint GrGLinternalformat, GrGLsizei width, GrGLint border, GrGLenum format, GrGLenum type, const GrGLvoid *pixels);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLTextureImage2DProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint GrGLinternalformat, GrGLsizei width, GrGLsizei height, GrGLint border, GrGLenum format, GrGLenum type, const GrGLvoid *pixels);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLTextureSubImage1DProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint xoffset, GrGLsizei width, GrGLenum format, GrGLenum type, const GrGLvoid *pixels);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLTextureSubImage2DProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, const GrGLvoid *pixels);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCopyTextureImage1DProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLenum GrGLinternalformat, GrGLint x, GrGLint y, GrGLsizei width, GrGLint border);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCopyTextureImage2DProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLenum GrGLinternalformat, GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height, GrGLint border);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCopyTextureSubImage1DProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint x, GrGLint y, GrGLsizei width);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCopyTextureSubImage2DProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetTextureImageProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLenum format, GrGLenum type, GrGLvoid *pixels);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetTextureParameterfvProc)(GrGLuint texture, GrGLenum target, GrGLenum pname, float *params);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetTextureParameterivProc)(GrGLuint texture, GrGLenum target, GrGLenum pname, GrGLint *params);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetTextureLevelParameterfvProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLenum pname, float *params);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetTextureLevelParameterivProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLenum pname, GrGLint *params);
+// OpenGL 1.2
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLTextureImage3DProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint GrGLinternalformat, GrGLsizei width, GrGLsizei height, GrGLsizei depth, GrGLint border, GrGLenum format, GrGLenum type, const GrGLvoid *pixels);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLTextureSubImage3DProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint zoffset, GrGLsizei width, GrGLsizei height, GrGLsizei depth, GrGLenum format, GrGLenum type, const GrGLvoid *pixels);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCopyTextureSubImage3DProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint zoffset, GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCompressedTextureImage3DProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLenum GrGLinternalformat, GrGLsizei width, GrGLsizei height, GrGLsizei depth, GrGLint border, GrGLsizei imageSize, const GrGLvoid *data);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCompressedTextureImage2DProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLenum GrGLinternalformat, GrGLsizei width, GrGLsizei height, GrGLint border, GrGLsizei imageSize, const GrGLvoid *data);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCompressedTextureImage1DProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLenum GrGLinternalformat, GrGLsizei width, GrGLint border, GrGLsizei imageSize, const GrGLvoid *data);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCompressedTextureSubImage3DProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint zoffset, GrGLsizei width, GrGLsizei height, GrGLsizei depth, GrGLenum format, GrGLsizei imageSize, const GrGLvoid *data);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCompressedTextureSubImage2DProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLsizei imageSize, const GrGLvoid *data);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLCompressedTextureSubImage1DProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint xoffset, GrGLsizei width, GrGLenum format, GrGLsizei imageSize, const GrGLvoid *data);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetCompressedTextureImageProc)(GrGLuint texture, GrGLenum target, GrGLint level, GrGLvoid *img);
+// OpenGL 1.5
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLNamedBufferDataProc)(GrGLuint buffer, GrGLsizeiptr size, const GrGLvoid *data, GrGLenum usage);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLNamedBufferSubDataProc)(GrGLuint buffer, GrGLintptr offset, GrGLsizeiptr size, const GrGLvoid *data);
+typedef GrGLvoid* (GR_GL_FUNCTION_TYPE* GrGLMapNamedBufferProc)(GrGLuint buffer, GrGLenum access);
+typedef GrGLboolean (GR_GL_FUNCTION_TYPE* GrGLUnmapNamedBufferProc)(GrGLuint buffer);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetNamedBufferParameterivProc)(GrGLuint buffer, GrGLenum pname, GrGLint *params);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetNamedBufferPointervProc)(GrGLuint buffer, GrGLenum pname, GrGLvoid* *params);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetNamedBufferSubDataProc)(GrGLuint buffer, GrGLintptr offset, GrGLsizeiptr size, GrGLvoid *data);
+// OpenGL 2.0
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniform1fProc)(GrGLuint program, GrGLint location, float v0);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniform2fProc)(GrGLuint program, GrGLint location, float v0, float v1);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniform3fProc)(GrGLuint program, GrGLint location, float v0, float v1, float v2);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniform4fProc)(GrGLuint program, GrGLint location, float v0, float v1, float v2, float v3);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniform1iProc)(GrGLuint program, GrGLint location, GrGLint v0);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniform2iProc)(GrGLuint program, GrGLint location, GrGLint v0, GrGLint v1);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniform3iProc)(GrGLuint program, GrGLint location, GrGLint v0, GrGLint v1, GrGLint v2);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniform4iProc)(GrGLuint program, GrGLint location, GrGLint v0, GrGLint v1, GrGLint v2, GrGLint v3);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniform1fvProc)(GrGLuint program, GrGLint location, GrGLsizei count, const float *value);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniform2fvProc)(GrGLuint program, GrGLint location, GrGLsizei count, const float *value);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniform3fvProc)(GrGLuint program, GrGLint location, GrGLsizei count, const float *value);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniform4fvProc)(GrGLuint program, GrGLint location, GrGLsizei count, const float *value);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniform1ivProc)(GrGLuint program, GrGLint location, GrGLsizei count, const GrGLint *value);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniform2ivProc)(GrGLuint program, GrGLint location, GrGLsizei count, const GrGLint *value);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniform3ivProc)(GrGLuint program, GrGLint location, GrGLsizei count, const GrGLint *value);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniform4ivProc)(GrGLuint program, GrGLint location, GrGLsizei count, const GrGLint *value);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniformMatrix2fvProc)(GrGLuint program, GrGLint location, GrGLsizei count, GrGLboolean transpose, const float *value);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniformMatrix3fvProc)(GrGLuint program, GrGLint location, GrGLsizei count, GrGLboolean transpose, const float *value);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniformMatrix4fvProc)(GrGLuint program, GrGLint location, GrGLsizei count, GrGLboolean transpose, const float *value);
+// OpenGL 2.1
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniformMatrix2x3fvProc)(GrGLuint program, GrGLint location, GrGLsizei count, GrGLboolean transpose, const float *value);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniformMatrix3x2fvProc)(GrGLuint program, GrGLint location, GrGLsizei count, GrGLboolean transpose, const float *value);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniformMatrix2x4fvProc)(GrGLuint program, GrGLint location, GrGLsizei count, GrGLboolean transpose, const float *value);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniformMatrix4x2fvProc)(GrGLuint program, GrGLint location, GrGLsizei count, GrGLboolean transpose, const float *value);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniformMatrix3x4fvProc)(GrGLuint program, GrGLint location, GrGLsizei count, GrGLboolean transpose, const float *value);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLProgramUniformMatrix4x3fvProc)(GrGLuint program, GrGLint location, GrGLsizei count, GrGLboolean transpose, const float *value);
+// OpenGL 3.0
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLNamedRenderbufferStorageProc)(GrGLuint renderbuffer, GrGLenum GrGLinternalformat, GrGLsizei width, GrGLsizei height);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetNamedRenderbufferParameterivProc)(GrGLuint renderbuffer, GrGLenum pname, GrGLint *params);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLNamedRenderbufferStorageMultisampleProc)(GrGLuint renderbuffer, GrGLsizei samples, GrGLenum GrGLinternalformat, GrGLsizei width, GrGLsizei height);
+typedef GrGLenum (GR_GL_FUNCTION_TYPE* GrGLCheckNamedFramebufferStatusProc)(GrGLuint framebuffer, GrGLenum target);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLNamedFramebufferTexture1DProc)(GrGLuint framebuffer, GrGLenum attachment, GrGLenum textarget, GrGLuint texture, GrGLint level);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLNamedFramebufferTexture2DProc)(GrGLuint framebuffer, GrGLenum attachment, GrGLenum textarget, GrGLuint texture, GrGLint level);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLNamedFramebufferTexture3DProc)(GrGLuint framebuffer, GrGLenum attachment, GrGLenum textarget, GrGLuint texture, GrGLint level, GrGLint zoffset);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLNamedFramebufferRenderbufferProc)(GrGLuint framebuffer, GrGLenum attachment, GrGLenum renderbuffertarget, GrGLuint renderbuffer);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetNamedFramebufferAttachmentParameterivProc)(GrGLuint framebuffer, GrGLenum attachment, GrGLenum pname, GrGLint *params);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGenerateTextureMipmapProc)(GrGLuint texture, GrGLenum target);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLFramebufferDrawBufferProc)(GrGLuint framebuffer, GrGLenum mode);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLFramebufferDrawBuffersProc)(GrGLuint framebuffer, GrGLsizei n, const GrGLenum *bufs);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLFramebufferReadBufferProc)(GrGLuint framebuffer, GrGLenum mode);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetFramebufferParameterivProc)(GrGLuint framebuffer, GrGLenum pname, GrGLint *param);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLNamedCopyBufferSubDataProc)(GrGLuint readBuffer, GrGLuint writeBuffer, GrGLintptr readOffset, GrGLintptr writeOffset, GrGLsizeiptr size);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLVertexArrayVertexOffsetProc)(GrGLuint vaobj, GrGLuint buffer, GrGLint size, GrGLenum type, GrGLsizei stride, GrGLintptr offset);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLVertexArrayColorOffsetProc)(GrGLuint vaobj, GrGLuint buffer, GrGLint size, GrGLenum type, GrGLsizei stride, GrGLintptr offset);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLVertexArrayEdgeFlagOffsetProc)(GrGLuint vaobj, GrGLuint buffer, GrGLsizei stride, GrGLintptr offset);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLVertexArrayIndexOffsetProc)(GrGLuint vaobj, GrGLuint buffer, GrGLenum type, GrGLsizei stride, GrGLintptr offset);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLVertexArrayNormalOffsetProc)(GrGLuint vaobj, GrGLuint buffer, GrGLenum type, GrGLsizei stride, GrGLintptr offset);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLVertexArrayTexCoordOffsetProc)(GrGLuint vaobj, GrGLuint buffer, GrGLint size, GrGLenum type, GrGLsizei stride, GrGLintptr offset);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLVertexArrayMultiTexCoordOffsetProc)(GrGLuint vaobj, GrGLuint buffer, GrGLenum texunit, GrGLint size, GrGLenum type, GrGLsizei stride, GrGLintptr offset);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLVertexArrayFogCoordOffsetProc)(GrGLuint vaobj, GrGLuint buffer, GrGLenum type, GrGLsizei stride, GrGLintptr offset);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLVertexArraySecondaryColorOffsetProc)(GrGLuint vaobj, GrGLuint buffer, GrGLint size, GrGLenum type, GrGLsizei stride, GrGLintptr offset);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLVertexArrayVertexAttribOffsetProc)(GrGLuint vaobj, GrGLuint buffer, GrGLuint index, GrGLint size, GrGLenum type, GrGLboolean normalized, GrGLsizei stride, GrGLintptr offset);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLVertexArrayVertexAttribIOffsetProc)(GrGLuint vaobj, GrGLuint buffer, GrGLuint index, GrGLint size, GrGLenum type, GrGLsizei stride, GrGLintptr offset);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLEnableVertexArrayProc)(GrGLuint vaobj, GrGLenum array);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDisableVertexArrayProc)(GrGLuint vaobj, GrGLenum array);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLEnableVertexArrayAttribProc)(GrGLuint vaobj, GrGLuint index);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDisableVertexArrayAttribProc)(GrGLuint vaobj, GrGLuint index);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetVertexArrayIntegervProc)(GrGLuint vaobj, GrGLenum pname, GrGLint *param);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetVertexArrayPointervProc)(GrGLuint vaobj, GrGLenum pname, GrGLvoid **param);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetVertexArrayIntegeri_vProc)(GrGLuint vaobj, GrGLuint index, GrGLenum pname, GrGLint *param);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLGetVertexArrayPointeri_vProc)(GrGLuint vaobj, GrGLuint index, GrGLenum pname, GrGLvoid **param);
+typedef GrGLvoid* (GR_GL_FUNCTION_TYPE* GrGLMapNamedBufferRangeProc)(GrGLuint buffer, GrGLintptr offset, GrGLsizeiptr length, GrGLbitfield access);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLFlushMappedNamedBufferRangeProc)(GrGLuint buffer, GrGLintptr offset, GrGLsizeiptr length);
+// OpenGL 3.1
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLTextureBufferProc)(GrGLuint texture, GrGLenum target, GrGLenum internalformat, GrGLuint buffer);
+
+/* ARB_sync */
+typedef GrGLsync (GR_GL_FUNCTION_TYPE* GrGLFenceSyncProc)(GrGLenum condition, GrGLbitfield flags);
+typedef GrGLenum (GR_GL_FUNCTION_TYPE* GrGLClientWaitSyncProc)(GrGLsync sync, GrGLbitfield flags, GrGLuint64 timeout);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDeleteSyncProc)(GrGLsync sync);
+
+/* KHR_debug */
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDebugMessageControlProc)(GrGLenum source, GrGLenum type, GrGLenum severity, GrGLsizei count, const GrGLuint* ids, GrGLboolean enabled);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDebugMessageInsertProc)(GrGLenum source, GrGLenum type, GrGLuint id, GrGLenum severity, GrGLsizei length, const GrGLchar* buf);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLDebugMessageCallbackProc)(GRGLDEBUGPROC callback, const GrGLvoid* userParam);
+typedef GrGLuint (GR_GL_FUNCTION_TYPE* GrGLGetDebugMessageLogProc)(GrGLuint count, GrGLsizei bufSize, GrGLenum* sources, GrGLenum* types, GrGLuint* ids, GrGLenum* severities, GrGLsizei* lengths, GrGLchar* messageLog);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLPushDebugGroupProc)(GrGLenum source, GrGLuint id, GrGLsizei length, const GrGLchar * message);
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLPopDebugGroupProc)();
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLObjectLabelProc)(GrGLenum identifier, GrGLuint name, GrGLsizei length, const GrGLchar *label);
+
+/** EXT_window_rectangles */
+typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLWindowRectanglesProc)(GrGLenum mode, GrGLsizei count, const GrGLint box[]);
+
+/** EGL functions */
+typedef const char* (GR_GL_FUNCTION_TYPE* GrEGLQueryStringProc)(GrEGLDisplay dpy, GrEGLint name);
+typedef GrEGLDisplay (GR_GL_FUNCTION_TYPE* GrEGLGetCurrentDisplayProc)();
+typedef GrEGLImage (GR_GL_FUNCTION_TYPE* GrEGLCreateImageProc)(GrEGLDisplay dpy, GrEGLContext ctx, GrEGLenum target, GrEGLClientBuffer buffer, const GrEGLint *attrib_list);
+typedef GrEGLBoolean (GR_GL_FUNCTION_TYPE* GrEGLDestroyImageProc)(GrEGLDisplay dpy, GrEGLImage image);
+} // extern "C"
+
+template <typename GLPTR> using GrGLFunction = std::function<skstd::remove_pointer_t<GLPTR>>;
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLInterface.h b/gfx/skia/skia/include/gpu/gl/GrGLInterface.h
new file mode 100644
index 000000000..60109ec18
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLInterface.h
@@ -0,0 +1,481 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLInterface_DEFINED
+#define GrGLInterface_DEFINED
+
+#include "GrGLFunctions.h"
+#include "GrGLExtensions.h"
+#include "SkRefCnt.h"
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Rather than depend on platform-specific GL headers and libraries, we require
+ * the client to provide a struct of GL function pointers. This struct can be
+ * specified per-GrContext as a parameter to GrContext::Create. If NULL is
+ * passed to Create then a "default" GL interface is created. If the default is
+ * also NULL GrContext creation will fail.
+ *
+ * The default interface is returned by GrGLDefaultInterface. This function's
+ * implementation is platform-specific. Several have been provided, along with
+ * an implementation that simply returns NULL.
+ *
+ * By defining GR_GL_PER_GL_CALL_IFACE_CALLBACK to 1 the client can specify a
+ * callback function that will be called prior to each GL function call. See
+ * comments in GrGLConfig.h
+ */
+
+typedef void(*GrGLFuncPtr)();
+
+struct GrGLInterface;
+
+const GrGLInterface* GrGLDefaultInterface();
+
+/**
+ * Creates a GrGLInterface for a "native" GL context (e.g. WGL on windows,
+ * GLX on linux, AGL on Mac). The interface is only valid for the GL context
+ * that is current when the interface is created.
+ */
+SK_API const GrGLInterface* GrGLCreateNativeInterface();
+
+#if GR_GL_PER_GL_FUNC_CALLBACK
+typedef void (*GrGLInterfaceCallbackProc)(const GrGLInterface*);
+typedef intptr_t GrGLInterfaceCallbackData;
+#endif
+
+/**
+ * Creates a null GrGLInterface that doesn't draw anything. Used for measuring
+ * CPU overhead. TODO: We would like to move this to tools/gpu/gl/null but currently
+ * Chromium is using it in its unit tests.
+ */
+const SK_API GrGLInterface* GrGLCreateNullInterface(bool enableNVPR = false);
+
+/** Function that returns a new interface identical to "interface" but without support for
+ GL_NV_path_rendering. */
+const GrGLInterface* GrGLInterfaceRemoveNVPR(const GrGLInterface*);
+
+/** Function that returns a new interface identical to "interface" but with support for
+ test version of GL_EXT_debug_marker. */
+const GrGLInterface* GrGLInterfaceAddTestDebugMarker(const GrGLInterface*,
+ GrGLInsertEventMarkerProc insertEventMarkerFn,
+ GrGLPushGroupMarkerProc pushGroupMarkerFn,
+ GrGLPopGroupMarkerProc popGroupMarkerFn);
+
+/**
+ * GrContext uses the following interface to make all calls into OpenGL. When a
+ * GrContext is created it is given a GrGLInterface. The interface's function
+ * pointers must be valid for the OpenGL context associated with the GrContext.
+ * On some platforms, such as Windows, function pointers for OpenGL extensions
+ * may vary between OpenGL contexts. So the caller must be careful to use a
+ * GrGLInterface initialized for the correct context. All functions that should
+ * be available based on the OpenGL's version and extension string must be
+ * non-NULL or GrContext creation will fail. This can be tested with the
+ * validate() method when the OpenGL context has been made current.
+ */
+struct SK_API GrGLInterface : public SkRefCnt {
+private:
+ typedef SkRefCnt INHERITED;
+
+public:
+ GrGLInterface();
+
+ static GrGLInterface* NewClone(const GrGLInterface*);
+
+ // Validates that the GrGLInterface supports its advertised standard. This means the necessary
+ // function pointers have been initialized for both the GL version and any advertised
+ // extensions.
+ bool validate() const;
+
+ // Indicates the type of GL implementation
+ union {
+ GrGLStandard fStandard;
+ GrGLStandard fBindingsExported; // Legacy name, will be remove when Chromium is updated.
+ };
+
+ GrGLExtensions fExtensions;
+
+ bool hasExtension(const char ext[]) const { return fExtensions.has(ext); }
+
+ /**
+ * The function pointers are in a struct so that we can have a compiler generated assignment
+ * operator.
+ */
+ struct Functions {
+ GrGLFunction<GrGLActiveTextureProc> fActiveTexture;
+ GrGLFunction<GrGLAttachShaderProc> fAttachShader;
+ GrGLFunction<GrGLBeginQueryProc> fBeginQuery;
+ GrGLFunction<GrGLBindAttribLocationProc> fBindAttribLocation;
+ GrGLFunction<GrGLBindBufferProc> fBindBuffer;
+ GrGLFunction<GrGLBindFragDataLocationProc> fBindFragDataLocation;
+ GrGLFunction<GrGLBindFragDataLocationIndexedProc> fBindFragDataLocationIndexed;
+ GrGLFunction<GrGLBindFramebufferProc> fBindFramebuffer;
+ GrGLFunction<GrGLBindRenderbufferProc> fBindRenderbuffer;
+ GrGLFunction<GrGLBindTextureProc> fBindTexture;
+ GrGLFunction<GrGLBindVertexArrayProc> fBindVertexArray;
+ GrGLFunction<GrGLBlendBarrierProc> fBlendBarrier;
+ GrGLFunction<GrGLBlendColorProc> fBlendColor;
+ GrGLFunction<GrGLBlendEquationProc> fBlendEquation;
+ GrGLFunction<GrGLBlendFuncProc> fBlendFunc;
+ GrGLFunction<GrGLBlitFramebufferProc> fBlitFramebuffer;
+ GrGLFunction<GrGLBufferDataProc> fBufferData;
+ GrGLFunction<GrGLBufferSubDataProc> fBufferSubData;
+ GrGLFunction<GrGLCheckFramebufferStatusProc> fCheckFramebufferStatus;
+ GrGLFunction<GrGLClearProc> fClear;
+ GrGLFunction<GrGLClearColorProc> fClearColor;
+ GrGLFunction<GrGLClearStencilProc> fClearStencil;
+ GrGLFunction<GrGLColorMaskProc> fColorMask;
+ GrGLFunction<GrGLCompileShaderProc> fCompileShader;
+ GrGLFunction<GrGLCompressedTexImage2DProc> fCompressedTexImage2D;
+ GrGLFunction<GrGLCompressedTexSubImage2DProc> fCompressedTexSubImage2D;
+ GrGLFunction<GrGLCopyTexSubImage2DProc> fCopyTexSubImage2D;
+ GrGLFunction<GrGLCreateProgramProc> fCreateProgram;
+ GrGLFunction<GrGLCreateShaderProc> fCreateShader;
+ GrGLFunction<GrGLCullFaceProc> fCullFace;
+ GrGLFunction<GrGLDeleteBuffersProc> fDeleteBuffers;
+ GrGLFunction<GrGLDeleteFramebuffersProc> fDeleteFramebuffers;
+ GrGLFunction<GrGLDeleteProgramProc> fDeleteProgram;
+ GrGLFunction<GrGLDeleteQueriesProc> fDeleteQueries;
+ GrGLFunction<GrGLDeleteRenderbuffersProc> fDeleteRenderbuffers;
+ GrGLFunction<GrGLDeleteShaderProc> fDeleteShader;
+ GrGLFunction<GrGLDeleteTexturesProc> fDeleteTextures;
+ GrGLFunction<GrGLDeleteVertexArraysProc> fDeleteVertexArrays;
+ GrGLFunction<GrGLDepthMaskProc> fDepthMask;
+ GrGLFunction<GrGLDisableProc> fDisable;
+ GrGLFunction<GrGLDisableVertexAttribArrayProc> fDisableVertexAttribArray;
+ GrGLFunction<GrGLDrawArraysProc> fDrawArrays;
+ GrGLFunction<GrGLDrawArraysIndirectProc> fDrawArraysIndirect;
+ GrGLFunction<GrGLDrawArraysInstancedProc> fDrawArraysInstanced;
+ GrGLFunction<GrGLDrawBufferProc> fDrawBuffer;
+ GrGLFunction<GrGLDrawBuffersProc> fDrawBuffers;
+ GrGLFunction<GrGLDrawElementsProc> fDrawElements;
+ GrGLFunction<GrGLDrawElementsIndirectProc> fDrawElementsIndirect;
+ GrGLFunction<GrGLDrawElementsInstancedProc> fDrawElementsInstanced;
+ GrGLFunction<GrGLDrawRangeElementsProc> fDrawRangeElements;
+ GrGLFunction<GrGLEnableProc> fEnable;
+ GrGLFunction<GrGLEnableVertexAttribArrayProc> fEnableVertexAttribArray;
+ GrGLFunction<GrGLEndQueryProc> fEndQuery;
+ GrGLFunction<GrGLFinishProc> fFinish;
+ GrGLFunction<GrGLFlushProc> fFlush;
+ GrGLFunction<GrGLFlushMappedBufferRangeProc> fFlushMappedBufferRange;
+ GrGLFunction<GrGLFramebufferRenderbufferProc> fFramebufferRenderbuffer;
+ GrGLFunction<GrGLFramebufferTexture2DProc> fFramebufferTexture2D;
+ GrGLFunction<GrGLFramebufferTexture2DMultisampleProc> fFramebufferTexture2DMultisample;
+ GrGLFunction<GrGLFrontFaceProc> fFrontFace;
+ GrGLFunction<GrGLGenBuffersProc> fGenBuffers;
+ GrGLFunction<GrGLGenFramebuffersProc> fGenFramebuffers;
+ GrGLFunction<GrGLGenerateMipmapProc> fGenerateMipmap;
+ GrGLFunction<GrGLGenQueriesProc> fGenQueries;
+ GrGLFunction<GrGLGenRenderbuffersProc> fGenRenderbuffers;
+ GrGLFunction<GrGLGenTexturesProc> fGenTextures;
+ GrGLFunction<GrGLGenVertexArraysProc> fGenVertexArrays;
+ GrGLFunction<GrGLGetBufferParameterivProc> fGetBufferParameteriv;
+ GrGLFunction<GrGLGetErrorProc> fGetError;
+ GrGLFunction<GrGLGetFramebufferAttachmentParameterivProc> fGetFramebufferAttachmentParameteriv;
+ GrGLFunction<GrGLGetIntegervProc> fGetIntegerv;
+ GrGLFunction<GrGLGetMultisamplefvProc> fGetMultisamplefv;
+ GrGLFunction<GrGLGetQueryObjecti64vProc> fGetQueryObjecti64v;
+ GrGLFunction<GrGLGetQueryObjectivProc> fGetQueryObjectiv;
+ GrGLFunction<GrGLGetQueryObjectui64vProc> fGetQueryObjectui64v;
+ GrGLFunction<GrGLGetQueryObjectuivProc> fGetQueryObjectuiv;
+ GrGLFunction<GrGLGetQueryivProc> fGetQueryiv;
+ GrGLFunction<GrGLGetProgramInfoLogProc> fGetProgramInfoLog;
+ GrGLFunction<GrGLGetProgramivProc> fGetProgramiv;
+ GrGLFunction<GrGLGetRenderbufferParameterivProc> fGetRenderbufferParameteriv;
+ GrGLFunction<GrGLGetShaderInfoLogProc> fGetShaderInfoLog;
+ GrGLFunction<GrGLGetShaderivProc> fGetShaderiv;
+ GrGLFunction<GrGLGetShaderPrecisionFormatProc> fGetShaderPrecisionFormat;
+ GrGLFunction<GrGLGetStringProc> fGetString;
+ GrGLFunction<GrGLGetStringiProc> fGetStringi;
+ GrGLFunction<GrGLGetTexLevelParameterivProc> fGetTexLevelParameteriv;
+ GrGLFunction<GrGLGetUniformLocationProc> fGetUniformLocation;
+ GrGLFunction<GrGLInsertEventMarkerProc> fInsertEventMarker;
+ GrGLFunction<GrGLInvalidateBufferDataProc> fInvalidateBufferData;
+ GrGLFunction<GrGLInvalidateBufferSubDataProc> fInvalidateBufferSubData;
+ GrGLFunction<GrGLInvalidateFramebufferProc> fInvalidateFramebuffer;
+ GrGLFunction<GrGLInvalidateSubFramebufferProc> fInvalidateSubFramebuffer;
+ GrGLFunction<GrGLInvalidateTexImageProc> fInvalidateTexImage;
+ GrGLFunction<GrGLInvalidateTexSubImageProc> fInvalidateTexSubImage;
+ GrGLFunction<GrGLIsTextureProc> fIsTexture;
+ GrGLFunction<GrGLLineWidthProc> fLineWidth;
+ GrGLFunction<GrGLLinkProgramProc> fLinkProgram;
+ GrGLFunction<GrGLMapBufferProc> fMapBuffer;
+ GrGLFunction<GrGLMapBufferRangeProc> fMapBufferRange;
+ GrGLFunction<GrGLMapBufferSubDataProc> fMapBufferSubData;
+ GrGLFunction<GrGLMapTexSubImage2DProc> fMapTexSubImage2D;
+ GrGLFunction<GrGLMultiDrawArraysIndirectProc> fMultiDrawArraysIndirect;
+ GrGLFunction<GrGLMultiDrawElementsIndirectProc> fMultiDrawElementsIndirect;
+ GrGLFunction<GrGLPixelStoreiProc> fPixelStorei;
+ GrGLFunction<GrGLPopGroupMarkerProc> fPopGroupMarker;
+ GrGLFunction<GrGLPushGroupMarkerProc> fPushGroupMarker;
+ GrGLFunction<GrGLQueryCounterProc> fQueryCounter;
+ GrGLFunction<GrGLRasterSamplesProc> fRasterSamples;
+ GrGLFunction<GrGLReadBufferProc> fReadBuffer;
+ GrGLFunction<GrGLReadPixelsProc> fReadPixels;
+ GrGLFunction<GrGLRenderbufferStorageProc> fRenderbufferStorage;
+
+ // On OpenGL ES there are multiple incompatible extensions that add support for MSAA
+ // and ES3 adds MSAA support to the standard. On an ES3 driver we may still use the
+ // older extensions for performance reasons or due to ES3 driver bugs. We want the function
+ // that creates the GrGLInterface to provide all available functions and internally
+ // we will select among them. They all have a method called glRenderbufferStorageMultisample*.
+ // So we have separate function pointers for GL_IMG/EXT_multisampled_to_texture,
+ // GL_CHROMIUM/ANGLE_framebuffer_multisample/ES3, and GL_APPLE_framebuffer_multisample
+ // variations.
+ //
+ // If a driver supports multiple GL_ARB_framebuffer_multisample-style extensions then we will
+ // assume the function pointers for the standard (or equivalent GL_ARB) version have
+ // been preferred over GL_EXT, GL_CHROMIUM, or GL_ANGLE variations that have reduced
+ // functionality.
+
+ // GL_EXT_multisampled_render_to_texture (preferred) or GL_IMG_multisampled_render_to_texture
+ GrGLFunction<GrGLRenderbufferStorageMultisampleProc> fRenderbufferStorageMultisampleES2EXT;
+ // GL_APPLE_framebuffer_multisample
+ GrGLFunction<GrGLRenderbufferStorageMultisampleProc> fRenderbufferStorageMultisampleES2APPLE;
+
+ // This is used to store the pointer for GL_ARB/EXT/ANGLE/CHROMIUM_framebuffer_multisample or
+ // the standard function in ES3+ or GL 3.0+.
+ GrGLFunction<GrGLRenderbufferStorageMultisampleProc> fRenderbufferStorageMultisample;
+
+ // Pointer to BindUniformLocationCHROMIUM from the GL_CHROMIUM_bind_uniform_location extension.
+ GrGLFunction<GrGLBindUniformLocationProc> fBindUniformLocation;
+
+ GrGLFunction<GrGLResolveMultisampleFramebufferProc> fResolveMultisampleFramebuffer;
+ GrGLFunction<GrGLScissorProc> fScissor;
+ GrGLFunction<GrGLShaderSourceProc> fShaderSource;
+ GrGLFunction<GrGLStencilFuncProc> fStencilFunc;
+ GrGLFunction<GrGLStencilFuncSeparateProc> fStencilFuncSeparate;
+ GrGLFunction<GrGLStencilMaskProc> fStencilMask;
+ GrGLFunction<GrGLStencilMaskSeparateProc> fStencilMaskSeparate;
+ GrGLFunction<GrGLStencilOpProc> fStencilOp;
+ GrGLFunction<GrGLStencilOpSeparateProc> fStencilOpSeparate;
+ GrGLFunction<GrGLTexBufferProc> fTexBuffer;
+ GrGLFunction<GrGLTexBufferRangeProc> fTexBufferRange;
+ GrGLFunction<GrGLTexImage2DProc> fTexImage2D;
+ GrGLFunction<GrGLTexParameteriProc> fTexParameteri;
+ GrGLFunction<GrGLTexParameterivProc> fTexParameteriv;
+ GrGLFunction<GrGLTexSubImage2DProc> fTexSubImage2D;
+ GrGLFunction<GrGLTexStorage2DProc> fTexStorage2D;
+ GrGLFunction<GrGLTextureBarrierProc> fTextureBarrier;
+ GrGLFunction<GrGLDiscardFramebufferProc> fDiscardFramebuffer;
+ GrGLFunction<GrGLUniform1fProc> fUniform1f;
+ GrGLFunction<GrGLUniform1iProc> fUniform1i;
+ GrGLFunction<GrGLUniform1fvProc> fUniform1fv;
+ GrGLFunction<GrGLUniform1ivProc> fUniform1iv;
+ GrGLFunction<GrGLUniform2fProc> fUniform2f;
+ GrGLFunction<GrGLUniform2iProc> fUniform2i;
+ GrGLFunction<GrGLUniform2fvProc> fUniform2fv;
+ GrGLFunction<GrGLUniform2ivProc> fUniform2iv;
+ GrGLFunction<GrGLUniform3fProc> fUniform3f;
+ GrGLFunction<GrGLUniform3iProc> fUniform3i;
+ GrGLFunction<GrGLUniform3fvProc> fUniform3fv;
+ GrGLFunction<GrGLUniform3ivProc> fUniform3iv;
+ GrGLFunction<GrGLUniform4fProc> fUniform4f;
+ GrGLFunction<GrGLUniform4iProc> fUniform4i;
+ GrGLFunction<GrGLUniform4fvProc> fUniform4fv;
+ GrGLFunction<GrGLUniform4ivProc> fUniform4iv;
+ GrGLFunction<GrGLUniformMatrix2fvProc> fUniformMatrix2fv;
+ GrGLFunction<GrGLUniformMatrix3fvProc> fUniformMatrix3fv;
+ GrGLFunction<GrGLUniformMatrix4fvProc> fUniformMatrix4fv;
+ GrGLFunction<GrGLUnmapBufferProc> fUnmapBuffer;
+ GrGLFunction<GrGLUnmapBufferSubDataProc> fUnmapBufferSubData;
+ GrGLFunction<GrGLUnmapTexSubImage2DProc> fUnmapTexSubImage2D;
+ GrGLFunction<GrGLUseProgramProc> fUseProgram;
+ GrGLFunction<GrGLVertexAttrib1fProc> fVertexAttrib1f;
+ GrGLFunction<GrGLVertexAttrib2fvProc> fVertexAttrib2fv;
+ GrGLFunction<GrGLVertexAttrib3fvProc> fVertexAttrib3fv;
+ GrGLFunction<GrGLVertexAttrib4fvProc> fVertexAttrib4fv;
+ GrGLFunction<GrGLVertexAttribDivisorProc> fVertexAttribDivisor;
+ GrGLFunction<GrGLVertexAttribIPointerProc> fVertexAttribIPointer;
+ GrGLFunction<GrGLVertexAttribPointerProc> fVertexAttribPointer;
+ GrGLFunction<GrGLViewportProc> fViewport;
+
+ /* GL_NV_path_rendering */
+ GrGLFunction<GrGLMatrixLoadfProc> fMatrixLoadf;
+ GrGLFunction<GrGLMatrixLoadIdentityProc> fMatrixLoadIdentity;
+ GrGLFunction<GrGLGetProgramResourceLocationProc> fGetProgramResourceLocation;
+ GrGLFunction<GrGLPathCommandsProc> fPathCommands;
+ GrGLFunction<GrGLPathParameteriProc> fPathParameteri;
+ GrGLFunction<GrGLPathParameterfProc> fPathParameterf;
+ GrGLFunction<GrGLGenPathsProc> fGenPaths;
+ GrGLFunction<GrGLDeletePathsProc> fDeletePaths;
+ GrGLFunction<GrGLIsPathProc> fIsPath;
+ GrGLFunction<GrGLPathStencilFuncProc> fPathStencilFunc;
+ GrGLFunction<GrGLStencilFillPathProc> fStencilFillPath;
+ GrGLFunction<GrGLStencilStrokePathProc> fStencilStrokePath;
+ GrGLFunction<GrGLStencilFillPathInstancedProc> fStencilFillPathInstanced;
+ GrGLFunction<GrGLStencilStrokePathInstancedProc> fStencilStrokePathInstanced;
+ GrGLFunction<GrGLCoverFillPathProc> fCoverFillPath;
+ GrGLFunction<GrGLCoverStrokePathProc> fCoverStrokePath;
+ GrGLFunction<GrGLCoverFillPathInstancedProc> fCoverFillPathInstanced;
+ GrGLFunction<GrGLCoverStrokePathInstancedProc> fCoverStrokePathInstanced;
+ // NV_path_rendering v1.2
+ GrGLFunction<GrGLStencilThenCoverFillPathProc> fStencilThenCoverFillPath;
+ GrGLFunction<GrGLStencilThenCoverStrokePathProc> fStencilThenCoverStrokePath;
+ GrGLFunction<GrGLStencilThenCoverFillPathInstancedProc> fStencilThenCoverFillPathInstanced;
+ GrGLFunction<GrGLStencilThenCoverStrokePathInstancedProc> fStencilThenCoverStrokePathInstanced;
+ // NV_path_rendering v1.3
+ GrGLFunction<GrGLProgramPathFragmentInputGenProc> fProgramPathFragmentInputGen;
+ // CHROMIUM_path_rendering
+ GrGLFunction<GrGLBindFragmentInputLocationProc> fBindFragmentInputLocation;
+
+ /* NV_framebuffer_mixed_samples */
+ GrGLFunction<GrGLCoverageModulationProc> fCoverageModulation;
+
+ /* NV_bindless_texture */
+ // We use the NVIDIA verson for now because it does not require dynamically uniform handles.
+ // We may switch the the ARB version and/or omit methods in the future.
+ GrGLFunction<GrGLGetTextureHandleProc> fGetTextureHandle;
+ GrGLFunction<GrGLGetTextureSamplerHandleProc> fGetTextureSamplerHandle;
+ GrGLFunction<GrGLMakeTextureHandleResidentProc> fMakeTextureHandleResident;
+ GrGLFunction<GrGLMakeTextureHandleNonResidentProc> fMakeTextureHandleNonResident;
+ GrGLFunction<GrGLGetImageHandleProc> fGetImageHandle;
+ GrGLFunction<GrGLMakeImageHandleResidentProc> fMakeImageHandleResident;
+ GrGLFunction<GrGLMakeImageHandleNonResidentProc> fMakeImageHandleNonResident;
+ GrGLFunction<GrGLIsTextureHandleResidentProc> fIsTextureHandleResident;
+ GrGLFunction<GrGLIsImageHandleResidentProc> fIsImageHandleResident;
+ GrGLFunction<GrGLUniformHandleui64Proc> fUniformHandleui64;
+ GrGLFunction<GrGLUniformHandleui64vProc> fUniformHandleui64v;
+ GrGLFunction<GrGLProgramUniformHandleui64Proc> fProgramUniformHandleui64;
+ GrGLFunction<GrGLProgramUniformHandleui64vProc> fProgramUniformHandleui64v;
+
+ /* ARB_sample_shading */
+ GrGLFunction<GrGLMinSampleShadingProc> fMinSampleShading;
+
+ /* EXT_direct_state_access */
+ // We use the EXT verson because it is more expansive and interacts with more extensions
+ // than the ARB or core (4.5) versions. We may switch and/or omit methods in the future.
+ GrGLFunction<GrGLTextureParameteriProc> fTextureParameteri;
+ GrGLFunction<GrGLTextureParameterivProc> fTextureParameteriv;
+ GrGLFunction<GrGLTextureParameterfProc> fTextureParameterf;
+ GrGLFunction<GrGLTextureParameterfvProc> fTextureParameterfv;
+ GrGLFunction<GrGLTextureImage1DProc> fTextureImage1D;
+ GrGLFunction<GrGLTextureImage2DProc> fTextureImage2D;
+ GrGLFunction<GrGLTextureSubImage1DProc> fTextureSubImage1D;
+ GrGLFunction<GrGLTextureSubImage2DProc> fTextureSubImage2D;
+ GrGLFunction<GrGLCopyTextureImage1DProc> fCopyTextureImage1D;
+ GrGLFunction<GrGLCopyTextureImage2DProc> fCopyTextureImage2D;
+ GrGLFunction<GrGLCopyTextureSubImage1DProc> fCopyTextureSubImage1D;
+ GrGLFunction<GrGLCopyTextureSubImage2DProc> fCopyTextureSubImage2D;
+ GrGLFunction<GrGLGetTextureImageProc> fGetTextureImage;
+ GrGLFunction<GrGLGetTextureParameterfvProc> fGetTextureParameterfv;
+ GrGLFunction<GrGLGetTextureParameterivProc> fGetTextureParameteriv;
+ GrGLFunction<GrGLGetTextureLevelParameterfvProc> fGetTextureLevelParameterfv;
+ GrGLFunction<GrGLGetTextureLevelParameterivProc> fGetTextureLevelParameteriv;
+ // OpenGL 1.2
+ GrGLFunction<GrGLTextureImage3DProc> fTextureImage3D;
+ GrGLFunction<GrGLTextureSubImage3DProc> fTextureSubImage3D;
+ GrGLFunction<GrGLCopyTextureSubImage3DProc> fCopyTextureSubImage3D;
+ GrGLFunction<GrGLCompressedTextureImage3DProc> fCompressedTextureImage3D;
+ GrGLFunction<GrGLCompressedTextureImage2DProc> fCompressedTextureImage2D;
+ GrGLFunction<GrGLCompressedTextureImage1DProc> fCompressedTextureImage1D;
+ GrGLFunction<GrGLCompressedTextureSubImage3DProc> fCompressedTextureSubImage3D;
+ GrGLFunction<GrGLCompressedTextureSubImage2DProc> fCompressedTextureSubImage2D;
+ GrGLFunction<GrGLCompressedTextureSubImage1DProc> fCompressedTextureSubImage1D;
+ GrGLFunction<GrGLGetCompressedTextureImageProc> fGetCompressedTextureImage;
+ // OpenGL 1.5
+ GrGLFunction<GrGLNamedBufferDataProc> fNamedBufferData;
+ GrGLFunction<GrGLNamedBufferSubDataProc> fNamedBufferSubData;
+ GrGLFunction<GrGLMapNamedBufferProc> fMapNamedBuffer;
+ GrGLFunction<GrGLUnmapNamedBufferProc> fUnmapNamedBuffer;
+ GrGLFunction<GrGLGetNamedBufferParameterivProc> fGetNamedBufferParameteriv;
+ GrGLFunction<GrGLGetNamedBufferPointervProc> fGetNamedBufferPointerv;
+ GrGLFunction<GrGLGetNamedBufferSubDataProc> fGetNamedBufferSubData;
+ // OpenGL 2.0
+ GrGLFunction<GrGLProgramUniform1fProc> fProgramUniform1f;
+ GrGLFunction<GrGLProgramUniform2fProc> fProgramUniform2f;
+ GrGLFunction<GrGLProgramUniform3fProc> fProgramUniform3f;
+ GrGLFunction<GrGLProgramUniform4fProc> fProgramUniform4f;
+ GrGLFunction<GrGLProgramUniform1iProc> fProgramUniform1i;
+ GrGLFunction<GrGLProgramUniform2iProc> fProgramUniform2i;
+ GrGLFunction<GrGLProgramUniform3iProc> fProgramUniform3i;
+ GrGLFunction<GrGLProgramUniform4iProc> fProgramUniform4i;
+ GrGLFunction<GrGLProgramUniform1fvProc> fProgramUniform1fv;
+ GrGLFunction<GrGLProgramUniform2fvProc> fProgramUniform2fv;
+ GrGLFunction<GrGLProgramUniform3fvProc> fProgramUniform3fv;
+ GrGLFunction<GrGLProgramUniform4fvProc> fProgramUniform4fv;
+ GrGLFunction<GrGLProgramUniform1ivProc> fProgramUniform1iv;
+ GrGLFunction<GrGLProgramUniform2ivProc> fProgramUniform2iv;
+ GrGLFunction<GrGLProgramUniform3ivProc> fProgramUniform3iv;
+ GrGLFunction<GrGLProgramUniform4ivProc> fProgramUniform4iv;
+ GrGLFunction<GrGLProgramUniformMatrix2fvProc> fProgramUniformMatrix2fv;
+ GrGLFunction<GrGLProgramUniformMatrix3fvProc> fProgramUniformMatrix3fv;
+ GrGLFunction<GrGLProgramUniformMatrix4fvProc> fProgramUniformMatrix4fv;
+ // OpenGL 2.1
+ GrGLFunction<GrGLProgramUniformMatrix2x3fvProc> fProgramUniformMatrix2x3fv;
+ GrGLFunction<GrGLProgramUniformMatrix3x2fvProc> fProgramUniformMatrix3x2fv;
+ GrGLFunction<GrGLProgramUniformMatrix2x4fvProc> fProgramUniformMatrix2x4fv;
+ GrGLFunction<GrGLProgramUniformMatrix4x2fvProc> fProgramUniformMatrix4x2fv;
+ GrGLFunction<GrGLProgramUniformMatrix3x4fvProc> fProgramUniformMatrix3x4fv;
+ GrGLFunction<GrGLProgramUniformMatrix4x3fvProc> fProgramUniformMatrix4x3fv;
+ // OpenGL 3.0
+ GrGLFunction<GrGLNamedRenderbufferStorageProc> fNamedRenderbufferStorage;
+ GrGLFunction<GrGLGetNamedRenderbufferParameterivProc> fGetNamedRenderbufferParameteriv;
+ GrGLFunction<GrGLNamedRenderbufferStorageMultisampleProc> fNamedRenderbufferStorageMultisample;
+ GrGLFunction<GrGLCheckNamedFramebufferStatusProc> fCheckNamedFramebufferStatus;
+ GrGLFunction<GrGLNamedFramebufferTexture1DProc> fNamedFramebufferTexture1D;
+ GrGLFunction<GrGLNamedFramebufferTexture2DProc> fNamedFramebufferTexture2D;
+ GrGLFunction<GrGLNamedFramebufferTexture3DProc> fNamedFramebufferTexture3D;
+ GrGLFunction<GrGLNamedFramebufferRenderbufferProc> fNamedFramebufferRenderbuffer;
+ GrGLFunction<GrGLGetNamedFramebufferAttachmentParameterivProc> fGetNamedFramebufferAttachmentParameteriv;
+ GrGLFunction<GrGLGenerateTextureMipmapProc> fGenerateTextureMipmap;
+ GrGLFunction<GrGLFramebufferDrawBufferProc> fFramebufferDrawBuffer;
+ GrGLFunction<GrGLFramebufferDrawBuffersProc> fFramebufferDrawBuffers;
+ GrGLFunction<GrGLFramebufferReadBufferProc> fFramebufferReadBuffer;
+ GrGLFunction<GrGLGetFramebufferParameterivProc> fGetFramebufferParameteriv;
+ GrGLFunction<GrGLNamedCopyBufferSubDataProc> fNamedCopyBufferSubData;
+ GrGLFunction<GrGLVertexArrayVertexOffsetProc> fVertexArrayVertexOffset;
+ GrGLFunction<GrGLVertexArrayColorOffsetProc> fVertexArrayColorOffset;
+ GrGLFunction<GrGLVertexArrayEdgeFlagOffsetProc> fVertexArrayEdgeFlagOffset;
+ GrGLFunction<GrGLVertexArrayIndexOffsetProc> fVertexArrayIndexOffset;
+ GrGLFunction<GrGLVertexArrayNormalOffsetProc> fVertexArrayNormalOffset;
+ GrGLFunction<GrGLVertexArrayTexCoordOffsetProc> fVertexArrayTexCoordOffset;
+ GrGLFunction<GrGLVertexArrayMultiTexCoordOffsetProc> fVertexArrayMultiTexCoordOffset;
+ GrGLFunction<GrGLVertexArrayFogCoordOffsetProc> fVertexArrayFogCoordOffset;
+ GrGLFunction<GrGLVertexArraySecondaryColorOffsetProc> fVertexArraySecondaryColorOffset;
+ GrGLFunction<GrGLVertexArrayVertexAttribOffsetProc> fVertexArrayVertexAttribOffset;
+ GrGLFunction<GrGLVertexArrayVertexAttribIOffsetProc> fVertexArrayVertexAttribIOffset;
+ GrGLFunction<GrGLEnableVertexArrayProc> fEnableVertexArray;
+ GrGLFunction<GrGLDisableVertexArrayProc> fDisableVertexArray;
+ GrGLFunction<GrGLEnableVertexArrayAttribProc> fEnableVertexArrayAttrib;
+ GrGLFunction<GrGLDisableVertexArrayAttribProc> fDisableVertexArrayAttrib;
+ GrGLFunction<GrGLGetVertexArrayIntegervProc> fGetVertexArrayIntegerv;
+ GrGLFunction<GrGLGetVertexArrayPointervProc> fGetVertexArrayPointerv;
+ GrGLFunction<GrGLGetVertexArrayIntegeri_vProc> fGetVertexArrayIntegeri_v;
+ GrGLFunction<GrGLGetVertexArrayPointeri_vProc> fGetVertexArrayPointeri_v;
+ GrGLFunction<GrGLMapNamedBufferRangeProc> fMapNamedBufferRange;
+ GrGLFunction<GrGLFlushMappedNamedBufferRangeProc> fFlushMappedNamedBufferRange;
+ // OpenGL 3.1
+ GrGLFunction<GrGLTextureBufferProc> fTextureBuffer;
+
+ /* ARB_sync */
+ GrGLFunction<GrGLFenceSyncProc> fFenceSync;
+ GrGLFunction<GrGLClientWaitSyncProc> fClientWaitSync;
+ GrGLFunction<GrGLDeleteSyncProc> fDeleteSync;
+
+ /* KHR_debug */
+ GrGLFunction<GrGLDebugMessageControlProc> fDebugMessageControl;
+ GrGLFunction<GrGLDebugMessageInsertProc> fDebugMessageInsert;
+ GrGLFunction<GrGLDebugMessageCallbackProc> fDebugMessageCallback;
+ GrGLFunction<GrGLGetDebugMessageLogProc> fGetDebugMessageLog;
+ GrGLFunction<GrGLPushDebugGroupProc> fPushDebugGroup;
+ GrGLFunction<GrGLPopDebugGroupProc> fPopDebugGroup;
+ GrGLFunction<GrGLObjectLabelProc> fObjectLabel;
+
+ /* EXT_window_rectangles */
+ GrGLFunction<GrGLWindowRectanglesProc> fWindowRectangles;
+
+ /* EGL functions */
+ GrGLFunction<GrEGLCreateImageProc> fEGLCreateImage;
+ GrGLFunction<GrEGLDestroyImageProc> fEGLDestroyImage;
+ } fFunctions;
+
+ // This exists for internal testing.
+ virtual void abandon() const {}
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLSLPrettyPrint.h b/gfx/skia/skia/include/gpu/gl/GrGLSLPrettyPrint.h
new file mode 100644
index 000000000..52fb74557
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLSLPrettyPrint.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef GrGLSLPrettyPrint_DEFINED
+#define GrGLSLPrettyPrint_DEFINED
+
+#include "SkString.h"
+
+namespace GrGLSLPrettyPrint {
+ SkString PrettyPrintGLSL(const char** strings,
+ int* lengths,
+ int count,
+ bool countlines);
+};
+
+#endif /* GRGLPRETTYPRINTSL_H_ */
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLTypes.h b/gfx/skia/skia/include/gpu/gl/GrGLTypes.h
new file mode 100644
index 000000000..5b9e31de1
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLTypes.h
@@ -0,0 +1,117 @@
+
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLTypes_DEFINED
+#define GrGLTypes_DEFINED
+
+#include "GrGLConfig.h"
+
+/**
+ * Classifies GL contexts by which standard they implement (currently as OpenGL vs. OpenGL ES).
+ */
+enum GrGLStandard {
+ kNone_GrGLStandard,
+ kGL_GrGLStandard,
+ kGLES_GrGLStandard,
+};
+static const int kGrGLStandardCnt = 3;
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Declares typedefs for all the GL functions used in GrGLInterface
+ */
+
+typedef unsigned int GrGLenum;
+typedef unsigned char GrGLboolean;
+typedef unsigned int GrGLbitfield;
+typedef signed char GrGLbyte;
+typedef char GrGLchar;
+typedef short GrGLshort;
+typedef int GrGLint;
+typedef int GrGLsizei;
+typedef int64_t GrGLint64;
+typedef unsigned char GrGLubyte;
+typedef unsigned short GrGLushort;
+typedef unsigned int GrGLuint;
+typedef uint64_t GrGLuint64;
+typedef float GrGLfloat;
+typedef float GrGLclampf;
+typedef double GrGLdouble;
+typedef double GrGLclampd;
+typedef void GrGLvoid;
+#ifndef SK_IGNORE_64BIT_OPENGL_CHANGES
+#ifdef _WIN64
+typedef signed long long int GrGLintptr;
+typedef signed long long int GrGLsizeiptr;
+#else
+typedef signed long int GrGLintptr;
+typedef signed long int GrGLsizeiptr;
+#endif
+#else
+typedef signed long int GrGLintptr;
+typedef signed long int GrGLsizeiptr;
+#endif
+typedef void* GrGLeglImage;
+typedef void* GrGLsync;
+
+struct GrGLDrawArraysIndirectCommand {
+ GrGLuint fCount;
+ GrGLuint fInstanceCount;
+ GrGLuint fFirst;
+ GrGLuint fBaseInstance; // Requires EXT_base_instance on ES.
+};
+
+GR_STATIC_ASSERT(16 == sizeof(GrGLDrawArraysIndirectCommand));
+
+struct GrGLDrawElementsIndirectCommand {
+ GrGLuint fCount;
+ GrGLuint fInstanceCount;
+ GrGLuint fFirstIndex;
+ GrGLuint fBaseVertex;
+ GrGLuint fBaseInstance; // Requires EXT_base_instance on ES.
+};
+
+GR_STATIC_ASSERT(20 == sizeof(GrGLDrawElementsIndirectCommand));
+
+/**
+ * KHR_debug
+ */
+typedef void (GR_GL_FUNCTION_TYPE* GRGLDEBUGPROC)(GrGLenum source,
+ GrGLenum type,
+ GrGLuint id,
+ GrGLenum severity,
+ GrGLsizei length,
+ const GrGLchar* message,
+ const void* userParam);
+
+/**
+ * EGL types.
+ */
+typedef void* GrEGLImage;
+typedef void* GrEGLDisplay;
+typedef void* GrEGLContext;
+typedef void* GrEGLClientBuffer;
+typedef unsigned int GrEGLenum;
+typedef int32_t GrEGLint;
+typedef unsigned int GrEGLBoolean;
+
+///////////////////////////////////////////////////////////////////////////////
+/**
+ * Types for interacting with GL resources created externally to Skia. GrBackendObjects for GL
+ * textures are really const GrGLTexture*
+ */
+
+struct GrGLTextureInfo {
+ GrGLenum fTarget;
+ GrGLuint fID;
+};
+
+GR_STATIC_ASSERT(sizeof(GrBackendObject) >= sizeof(const GrGLTextureInfo*));
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/vk/GrVkBackendContext.h b/gfx/skia/skia/include/gpu/vk/GrVkBackendContext.h
new file mode 100644
index 000000000..994201692
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/GrVkBackendContext.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkBackendContext_DEFINED
+#define GrVkBackendContext_DEFINED
+
+#include "SkRefCnt.h"
+
+#include "vk/GrVkDefines.h"
+
+struct GrVkInterface;
+
+enum GrVkExtensionFlags {
+ kEXT_debug_report_GrVkExtensionFlag = 0x0001,
+ kNV_glsl_shader_GrVkExtensionFlag = 0x0002,
+ kKHR_surface_GrVkExtensionFlag = 0x0004,
+ kKHR_swapchain_GrVkExtensionFlag = 0x0008,
+ kKHR_win32_surface_GrVkExtensionFlag = 0x0010,
+ kKHR_android_surface_GrVkExtensionFlag = 0x0020,
+ kKHR_xcb_surface_GrVkExtensionFlag = 0x0040,
+};
+
+enum GrVkFeatureFlags {
+ kGeometryShader_GrVkFeatureFlag = 0x0001,
+ kDualSrcBlend_GrVkFeatureFlag = 0x0002,
+ kSampleRateShading_GrVkFeatureFlag = 0x0004,
+};
+
+// The BackendContext contains all of the base Vulkan objects needed by the GrVkGpu. The assumption
+// is that the client will set these up and pass them to the GrVkGpu constructor. The VkDevice
+// created must support at least one graphics queue, which is passed in as well.
+// The QueueFamilyIndex must match the family of the given queue. It is needed for CommandPool
+// creation, and any GrBackendObjects handed to us (e.g., for wrapped textures) need to be created
+// in or transitioned to that family.
+struct GrVkBackendContext : public SkRefCnt {
+ VkInstance fInstance;
+ VkPhysicalDevice fPhysicalDevice;
+ VkDevice fDevice;
+ VkQueue fQueue;
+ uint32_t fGraphicsQueueIndex;
+ uint32_t fMinAPIVersion;
+ uint32_t fExtensions;
+ uint32_t fFeatures;
+ SkAutoTUnref<const GrVkInterface> fInterface;
+
+ using CanPresentFn = std::function<bool(VkInstance, VkPhysicalDevice,
+ uint32_t queueFamilyIndex)>;
+
+ // Helper function to create the default Vulkan objects needed by the GrVkGpu object
+ // If presentQueueIndex is non-NULL, will try to set up presentQueue as part of device
+ // creation using the platform-specific canPresent() function.
+ static const GrVkBackendContext* Create(uint32_t* presentQueueIndex = nullptr,
+ CanPresentFn = CanPresentFn());
+
+ ~GrVkBackendContext() override;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/vk/GrVkDefines.h b/gfx/skia/skia/include/gpu/vk/GrVkDefines.h
new file mode 100644
index 000000000..9caf2d75e
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/GrVkDefines.h
@@ -0,0 +1,26 @@
+
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkDefines_DEFINED
+#define GrVkDefines_DEFINED
+
+#if defined(SK_BUILD_FOR_WIN) || defined(SK_BUILD_FOR_WIN32)
+# define VK_USE_PLATFORM_WIN32_KHR
+#elif defined(SK_BUILD_FOR_ANDROID)
+# define VK_USE_PLATFORM_ANDROID_KHR
+#elif defined(SK_BUILD_FOR_UNIX)
+# define VK_USE_PLATFORM_XCB_KHR
+#endif
+
+#if defined(Bool) || defined(Status) || defined(True) || defined(False)
+# pragma error "Macros unexpectedly defined."
+#endif
+
+#include <vulkan/vulkan.h>
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/vk/GrVkInterface.h b/gfx/skia/skia/include/gpu/vk/GrVkInterface.h
new file mode 100644
index 000000000..1f1786516
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/GrVkInterface.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkInterface_DEFINED
+#define GrVkInterface_DEFINED
+
+#include "SkRefCnt.h"
+
+#include "vk/GrVkDefines.h"
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * The default interface is returned by GrVkCreateInterface. This function's
+ * implementation is platform-specific.
+ */
+
+struct GrVkInterface;
+
+/**
+ * Creates a GrVkInterface.
+ */
+const GrVkInterface* GrVkCreateInterface(VkInstance instance, VkDevice device,
+ uint32_t extensionFlags);
+
+
+/**
+ * GrContext uses the following interface to make all calls into Vulkan. When a
+ * GrContext is created it is given a GrVkInterface. All functions that should be
+ * available based on the Vulkan's version must be non-NULL or GrContext creation
+ * will fail. This can be tested with the validate() method.
+ */
+struct SK_API GrVkInterface : public SkRefCnt {
+private:
+ // simple wrapper class that exists only to initialize a pointer to NULL
+ template <typename FNPTR_TYPE> class VkPtr {
+ public:
+ VkPtr() : fPtr(NULL) {}
+ VkPtr operator=(FNPTR_TYPE ptr) { fPtr = ptr; return *this; }
+ operator FNPTR_TYPE() const { return fPtr; }
+ private:
+ FNPTR_TYPE fPtr;
+ };
+
+ typedef SkRefCnt INHERITED;
+
+public:
+ GrVkInterface();
+
+ // Validates that the GrVkInterface supports its advertised standard. This means the necessary
+ // function pointers have been initialized for Vulkan version.
+ bool validate() const;
+
+ /**
+ * The function pointers are in a struct so that we can have a compiler generated assignment
+ * operator.
+ */
+ struct Functions {
+ VkPtr<PFN_vkCreateInstance> fCreateInstance;
+ VkPtr<PFN_vkDestroyInstance> fDestroyInstance;
+ VkPtr<PFN_vkEnumeratePhysicalDevices> fEnumeratePhysicalDevices;
+ VkPtr<PFN_vkGetPhysicalDeviceFeatures> fGetPhysicalDeviceFeatures;
+ VkPtr<PFN_vkGetPhysicalDeviceFormatProperties> fGetPhysicalDeviceFormatProperties;
+ VkPtr<PFN_vkGetPhysicalDeviceImageFormatProperties> fGetPhysicalDeviceImageFormatProperties;
+ VkPtr<PFN_vkGetPhysicalDeviceProperties> fGetPhysicalDeviceProperties;
+ VkPtr<PFN_vkGetPhysicalDeviceQueueFamilyProperties> fGetPhysicalDeviceQueueFamilyProperties;
+ VkPtr<PFN_vkGetPhysicalDeviceMemoryProperties> fGetPhysicalDeviceMemoryProperties;
+ VkPtr<PFN_vkCreateDevice> fCreateDevice;
+ VkPtr<PFN_vkDestroyDevice> fDestroyDevice;
+ VkPtr<PFN_vkEnumerateInstanceExtensionProperties> fEnumerateInstanceExtensionProperties;
+ VkPtr<PFN_vkEnumerateDeviceExtensionProperties> fEnumerateDeviceExtensionProperties;
+ VkPtr<PFN_vkEnumerateInstanceLayerProperties> fEnumerateInstanceLayerProperties;
+ VkPtr<PFN_vkEnumerateDeviceLayerProperties> fEnumerateDeviceLayerProperties;
+ VkPtr<PFN_vkGetDeviceQueue> fGetDeviceQueue;
+ VkPtr<PFN_vkQueueSubmit> fQueueSubmit;
+ VkPtr<PFN_vkQueueWaitIdle> fQueueWaitIdle;
+ VkPtr<PFN_vkDeviceWaitIdle> fDeviceWaitIdle;
+ VkPtr<PFN_vkAllocateMemory> fAllocateMemory;
+ VkPtr<PFN_vkFreeMemory> fFreeMemory;
+ VkPtr<PFN_vkMapMemory> fMapMemory;
+ VkPtr<PFN_vkUnmapMemory> fUnmapMemory;
+ VkPtr<PFN_vkFlushMappedMemoryRanges> fFlushMappedMemoryRanges;
+ VkPtr<PFN_vkInvalidateMappedMemoryRanges> fInvalidateMappedMemoryRanges;
+ VkPtr<PFN_vkGetDeviceMemoryCommitment> fGetDeviceMemoryCommitment;
+ VkPtr<PFN_vkBindBufferMemory> fBindBufferMemory;
+ VkPtr<PFN_vkBindImageMemory> fBindImageMemory;
+ VkPtr<PFN_vkGetBufferMemoryRequirements> fGetBufferMemoryRequirements;
+ VkPtr<PFN_vkGetImageMemoryRequirements> fGetImageMemoryRequirements;
+ VkPtr<PFN_vkGetImageSparseMemoryRequirements> fGetImageSparseMemoryRequirements;
+ VkPtr<PFN_vkGetPhysicalDeviceSparseImageFormatProperties> fGetPhysicalDeviceSparseImageFormatProperties;
+ VkPtr<PFN_vkQueueBindSparse> fQueueBindSparse;
+ VkPtr<PFN_vkCreateFence> fCreateFence;
+ VkPtr<PFN_vkDestroyFence> fDestroyFence;
+ VkPtr<PFN_vkResetFences> fResetFences;
+ VkPtr<PFN_vkGetFenceStatus> fGetFenceStatus;
+ VkPtr<PFN_vkWaitForFences> fWaitForFences;
+ VkPtr<PFN_vkCreateSemaphore> fCreateSemaphore;
+ VkPtr<PFN_vkDestroySemaphore> fDestroySemaphore;
+ VkPtr<PFN_vkCreateEvent> fCreateEvent;
+ VkPtr<PFN_vkDestroyEvent> fDestroyEvent;
+ VkPtr<PFN_vkGetEventStatus> fGetEventStatus;
+ VkPtr<PFN_vkSetEvent> fSetEvent;
+ VkPtr<PFN_vkResetEvent> fResetEvent;
+ VkPtr<PFN_vkCreateQueryPool> fCreateQueryPool;
+ VkPtr<PFN_vkDestroyQueryPool> fDestroyQueryPool;
+ VkPtr<PFN_vkGetQueryPoolResults> fGetQueryPoolResults;
+ VkPtr<PFN_vkCreateBuffer> fCreateBuffer;
+ VkPtr<PFN_vkDestroyBuffer> fDestroyBuffer;
+ VkPtr<PFN_vkCreateBufferView> fCreateBufferView;
+ VkPtr<PFN_vkDestroyBufferView> fDestroyBufferView;
+ VkPtr<PFN_vkCreateImage> fCreateImage;
+ VkPtr<PFN_vkDestroyImage> fDestroyImage;
+ VkPtr<PFN_vkGetImageSubresourceLayout> fGetImageSubresourceLayout;
+ VkPtr<PFN_vkCreateImageView> fCreateImageView;
+ VkPtr<PFN_vkDestroyImageView> fDestroyImageView;
+ VkPtr<PFN_vkCreateShaderModule> fCreateShaderModule;
+ VkPtr<PFN_vkDestroyShaderModule> fDestroyShaderModule;
+ VkPtr<PFN_vkCreatePipelineCache> fCreatePipelineCache;
+ VkPtr<PFN_vkDestroyPipelineCache> fDestroyPipelineCache;
+ VkPtr<PFN_vkGetPipelineCacheData> fGetPipelineCacheData;
+ VkPtr<PFN_vkMergePipelineCaches> fMergePipelineCaches;
+ VkPtr<PFN_vkCreateGraphicsPipelines> fCreateGraphicsPipelines;
+ VkPtr<PFN_vkCreateComputePipelines> fCreateComputePipelines;
+ VkPtr<PFN_vkDestroyPipeline> fDestroyPipeline;
+ VkPtr<PFN_vkCreatePipelineLayout> fCreatePipelineLayout;
+ VkPtr<PFN_vkDestroyPipelineLayout> fDestroyPipelineLayout;
+ VkPtr<PFN_vkCreateSampler> fCreateSampler;
+ VkPtr<PFN_vkDestroySampler> fDestroySampler;
+ VkPtr<PFN_vkCreateDescriptorSetLayout> fCreateDescriptorSetLayout;
+ VkPtr<PFN_vkDestroyDescriptorSetLayout> fDestroyDescriptorSetLayout;
+ VkPtr<PFN_vkCreateDescriptorPool> fCreateDescriptorPool;
+ VkPtr<PFN_vkDestroyDescriptorPool> fDestroyDescriptorPool;
+ VkPtr<PFN_vkResetDescriptorPool> fResetDescriptorPool;
+ VkPtr<PFN_vkAllocateDescriptorSets> fAllocateDescriptorSets;
+ VkPtr<PFN_vkFreeDescriptorSets> fFreeDescriptorSets;
+ VkPtr<PFN_vkUpdateDescriptorSets> fUpdateDescriptorSets;
+ VkPtr<PFN_vkCreateFramebuffer> fCreateFramebuffer;
+ VkPtr<PFN_vkDestroyFramebuffer> fDestroyFramebuffer;
+ VkPtr<PFN_vkCreateRenderPass> fCreateRenderPass;
+ VkPtr<PFN_vkDestroyRenderPass> fDestroyRenderPass;
+ VkPtr<PFN_vkGetRenderAreaGranularity> fGetRenderAreaGranularity;
+ VkPtr<PFN_vkCreateCommandPool> fCreateCommandPool;
+ VkPtr<PFN_vkDestroyCommandPool> fDestroyCommandPool;
+ VkPtr<PFN_vkResetCommandPool> fResetCommandPool;
+ VkPtr<PFN_vkAllocateCommandBuffers> fAllocateCommandBuffers;
+ VkPtr<PFN_vkFreeCommandBuffers> fFreeCommandBuffers;
+ VkPtr<PFN_vkBeginCommandBuffer> fBeginCommandBuffer;
+ VkPtr<PFN_vkEndCommandBuffer> fEndCommandBuffer;
+ VkPtr<PFN_vkResetCommandBuffer> fResetCommandBuffer;
+ VkPtr<PFN_vkCmdBindPipeline> fCmdBindPipeline;
+ VkPtr<PFN_vkCmdSetViewport> fCmdSetViewport;
+ VkPtr<PFN_vkCmdSetScissor> fCmdSetScissor;
+ VkPtr<PFN_vkCmdSetLineWidth> fCmdSetLineWidth;
+ VkPtr<PFN_vkCmdSetDepthBias> fCmdSetDepthBias;
+ VkPtr<PFN_vkCmdSetBlendConstants> fCmdSetBlendConstants;
+ VkPtr<PFN_vkCmdSetDepthBounds> fCmdSetDepthBounds;
+ VkPtr<PFN_vkCmdSetStencilCompareMask> fCmdSetStencilCompareMask;
+ VkPtr<PFN_vkCmdSetStencilWriteMask> fCmdSetStencilWriteMask;
+ VkPtr<PFN_vkCmdSetStencilReference> fCmdSetStencilReference;
+ VkPtr<PFN_vkCmdBindDescriptorSets> fCmdBindDescriptorSets;
+ VkPtr<PFN_vkCmdBindIndexBuffer> fCmdBindIndexBuffer;
+ VkPtr<PFN_vkCmdBindVertexBuffers> fCmdBindVertexBuffers;
+ VkPtr<PFN_vkCmdDraw> fCmdDraw;
+ VkPtr<PFN_vkCmdDrawIndexed> fCmdDrawIndexed;
+ VkPtr<PFN_vkCmdDrawIndirect> fCmdDrawIndirect;
+ VkPtr<PFN_vkCmdDrawIndexedIndirect> fCmdDrawIndexedIndirect;
+ VkPtr<PFN_vkCmdDispatch> fCmdDispatch;
+ VkPtr<PFN_vkCmdDispatchIndirect> fCmdDispatchIndirect;
+ VkPtr<PFN_vkCmdCopyBuffer> fCmdCopyBuffer;
+ VkPtr<PFN_vkCmdCopyImage> fCmdCopyImage;
+ VkPtr<PFN_vkCmdBlitImage> fCmdBlitImage;
+ VkPtr<PFN_vkCmdCopyBufferToImage> fCmdCopyBufferToImage;
+ VkPtr<PFN_vkCmdCopyImageToBuffer> fCmdCopyImageToBuffer;
+ VkPtr<PFN_vkCmdUpdateBuffer> fCmdUpdateBuffer;
+ VkPtr<PFN_vkCmdFillBuffer> fCmdFillBuffer;
+ VkPtr<PFN_vkCmdClearColorImage> fCmdClearColorImage;
+ VkPtr<PFN_vkCmdClearDepthStencilImage> fCmdClearDepthStencilImage;
+ VkPtr<PFN_vkCmdClearAttachments> fCmdClearAttachments;
+ VkPtr<PFN_vkCmdResolveImage> fCmdResolveImage;
+ VkPtr<PFN_vkCmdSetEvent> fCmdSetEvent;
+ VkPtr<PFN_vkCmdResetEvent> fCmdResetEvent;
+ VkPtr<PFN_vkCmdWaitEvents> fCmdWaitEvents;
+ VkPtr<PFN_vkCmdPipelineBarrier> fCmdPipelineBarrier;
+ VkPtr<PFN_vkCmdBeginQuery> fCmdBeginQuery;
+ VkPtr<PFN_vkCmdEndQuery> fCmdEndQuery;
+ VkPtr<PFN_vkCmdResetQueryPool> fCmdResetQueryPool;
+ VkPtr<PFN_vkCmdWriteTimestamp> fCmdWriteTimestamp;
+ VkPtr<PFN_vkCmdCopyQueryPoolResults> fCmdCopyQueryPoolResults;
+ VkPtr<PFN_vkCmdPushConstants> fCmdPushConstants;
+ VkPtr<PFN_vkCmdBeginRenderPass> fCmdBeginRenderPass;
+ VkPtr<PFN_vkCmdNextSubpass> fCmdNextSubpass;
+ VkPtr<PFN_vkCmdEndRenderPass> fCmdEndRenderPass;
+ VkPtr<PFN_vkCmdExecuteCommands> fCmdExecuteCommands;
+
+ VkPtr<PFN_vkCreateDebugReportCallbackEXT> fCreateDebugReportCallbackEXT;
+ VkPtr<PFN_vkDebugReportMessageEXT> fDebugReportMessageEXT;
+ VkPtr<PFN_vkDestroyDebugReportCallbackEXT> fDestroyDebugReportCallbackEXT;
+ } fFunctions;
+
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/vk/GrVkTypes.h b/gfx/skia/skia/include/gpu/vk/GrVkTypes.h
new file mode 100644
index 000000000..aa1334adc
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/GrVkTypes.h
@@ -0,0 +1,64 @@
+
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkTypes_DEFINED
+#define GrVkTypes_DEFINED
+
+#include "GrTypes.h"
+#include "vk/GrVkDefines.h"
+
+/**
+ * KHR_debug
+ */
+/*typedef void (GR_GL_FUNCTION_TYPE* GrVkDEBUGPROC)(GrVkenum source,
+ GrVkenum type,
+ GrVkuint id,
+ GrVkenum severity,
+ GrVksizei length,
+ const GrVkchar* message,
+ const void* userParam);*/
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+/**
+ * Types for interacting with Vulkan resources created externally to Skia. GrBackendObjects for
+ * Vulkan textures are really const GrVkImageInfo*
+ */
+struct GrVkAlloc {
+ VkDeviceMemory fMemory; // can be VK_NULL_HANDLE iff Tex is an RT and uses borrow semantics
+ VkDeviceSize fOffset;
+ VkDeviceSize fSize; // this can be indeterminate iff Tex uses borrow semantics
+ uint32_t fFlags;
+
+ enum Flag {
+ kNoncoherent_Flag = 0x1, // memory must be flushed to device after mapping
+ };
+};
+
+struct GrVkImageInfo {
+ /**
+ * If the image's format is sRGB (GrVkFormatIsSRGB returns true), then the image must have
+ * been created with VkImageCreateFlags containing VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT.
+ */
+ VkImage fImage;
+ GrVkAlloc fAlloc;
+ VkImageTiling fImageTiling;
+ VkImageLayout fImageLayout;
+ VkFormat fFormat;
+ uint32_t fLevelCount;
+
+ // This gives a way for a client to update the layout of the Image if they change the layout
+ // while we're still holding onto the wrapped texture. They will first need to get a handle
+ // to our internal GrVkImageInfo by calling getTextureHandle on a GrVkTexture.
+ void updateImageLayout(VkImageLayout layout) { fImageLayout = layout; }
+};
+
+GR_STATIC_ASSERT(sizeof(GrBackendObject) >= sizeof(const GrVkImageInfo*));
+
+#endif
diff --git a/gfx/skia/skia/include/images/SkForceLinking.h b/gfx/skia/skia/include/images/SkForceLinking.h
new file mode 100644
index 000000000..5de8918e6
--- /dev/null
+++ b/gfx/skia/skia/include/images/SkForceLinking.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+
+/**
+ * This function's sole purpose is to trick the linker into not discarding
+ * SkImageDecoder subclasses just because we do not directly call them.
+ * This is necessary in applications that will create image decoders from
+ * a stream.
+ * Call this function with an expression that evaluates to false to ensure
+ * that the linker includes the subclasses.
+ * Passing true will result in leaked objects.
+ */
+int SkForceLinking(bool doNotPassTrue);
+
+#define __SK_FORCE_IMAGE_DECODER_LINKING \
+SK_UNUSED static int linking_forced = SkForceLinking(false)
diff --git a/gfx/skia/skia/include/images/SkMovie.h b/gfx/skia/skia/include/images/SkMovie.h
new file mode 100644
index 000000000..00dad6788
--- /dev/null
+++ b/gfx/skia/skia/include/images/SkMovie.h
@@ -0,0 +1,80 @@
+
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkMovie_DEFINED
+#define SkMovie_DEFINED
+
+#include "SkRefCnt.h"
+#include "SkCanvas.h"
+
+class SkStreamRewindable;
+
+class SkMovie : public SkRefCnt {
+public:
+
+
+ /** Try to create a movie from the stream. If the stream format is not
+ supported, return NULL.
+ */
+ static SkMovie* DecodeStream(SkStreamRewindable*);
+ /** Try to create a movie from the specified file path. If the file is not
+ found, or the format is not supported, return NULL. If a movie is
+ returned, the stream may be retained by the movie (via ref()) until
+ the movie is finished with it (by calling unref()).
+ */
+ static SkMovie* DecodeFile(const char path[]);
+ /** Try to create a movie from the specified memory.
+ If the format is not supported, return NULL. If a movie is returned,
+ the data will have been read or copied, and so the caller may free
+ it.
+ */
+ static SkMovie* DecodeMemory(const void* data, size_t length);
+
+ SkMSec duration();
+ int width();
+ int height();
+ int isOpaque();
+
+ /** Specify the time code (between 0...duration) to sample a bitmap
+ from the movie. Returns true if this time code generated a different
+ bitmap/frame from the previous state (i.e. true means you need to
+ redraw).
+ */
+ bool setTime(SkMSec);
+
+ // return the right bitmap for the current time code
+ const SkBitmap& bitmap();
+
+protected:
+ struct Info {
+ SkMSec fDuration;
+ int fWidth;
+ int fHeight;
+ bool fIsOpaque;
+ };
+
+ virtual bool onGetInfo(Info*) = 0;
+ virtual bool onSetTime(SkMSec) = 0;
+ virtual bool onGetBitmap(SkBitmap*) = 0;
+
+ // visible for subclasses
+ SkMovie();
+
+private:
+ Info fInfo;
+ SkMSec fCurrTime;
+ SkBitmap fBitmap;
+ bool fNeedBitmap;
+
+ void ensureInfo();
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/pathops/SkPathOps.h b/gfx/skia/skia/include/pathops/SkPathOps.h
new file mode 100644
index 000000000..fa0178839
--- /dev/null
+++ b/gfx/skia/skia/include/pathops/SkPathOps.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOps_DEFINED
+#define SkPathOps_DEFINED
+
+#include "../private/SkTArray.h"
+#include "../private/SkTDArray.h"
+#include "SkPreConfig.h"
+
+class SkPath;
+struct SkRect;
+
+
+// FIXME: move everything below into the SkPath class
+/**
+ * The logical operations that can be performed when combining two paths.
+ */
+enum SkPathOp {
+ kDifference_SkPathOp, //!< subtract the op path from the first path
+ kIntersect_SkPathOp, //!< intersect the two paths
+ kUnion_SkPathOp, //!< union (inclusive-or) the two paths
+ kXOR_SkPathOp, //!< exclusive-or the two paths
+ kReverseDifference_SkPathOp, //!< subtract the first path from the op path
+};
+
+/** Set this path to the result of applying the Op to this path and the
+ specified path: this = (this op operand).
+ The resulting path will be constructed from non-overlapping contours.
+ The curve order is reduced where possible so that cubics may be turned
+ into quadratics, and quadratics maybe turned into lines.
+
+ Returns true if operation was able to produce a result;
+ otherwise, result is unmodified.
+
+ @param one The first operand (for difference, the minuend)
+ @param two The second operand (for difference, the subtrahend)
+ @param op The operator to apply.
+ @param result The product of the operands. The result may be one of the
+ inputs.
+ @return True if the operation succeeded.
+ */
+bool SK_API Op(const SkPath& one, const SkPath& two, SkPathOp op, SkPath* result);
+
+/** Set this path to a set of non-overlapping contours that describe the
+ same area as the original path.
+ The curve order is reduced where possible so that cubics may
+ be turned into quadratics, and quadratics maybe turned into lines.
+
+ Returns true if operation was able to produce a result;
+ otherwise, result is unmodified.
+
+ @param path The path to simplify.
+ @param result The simplified path. The result may be the input.
+ @return True if simplification succeeded.
+ */
+bool SK_API Simplify(const SkPath& path, SkPath* result);
+
+/** Set the resulting rectangle to the tight bounds of the path.
+
+ @param path The path measured.
+ @param result The tight bounds of the path.
+ @return True if the bounds could be computed.
+ */
+bool SK_API TightBounds(const SkPath& path, SkRect* result);
+
+/** Perform a series of path operations, optimized for unioning many paths together.
+ */
+class SK_API SkOpBuilder {
+public:
+ /** Add one or more paths and their operand. The builder is empty before the first
+ path is added, so the result of a single add is (emptyPath OP path).
+
+ @param path The second operand.
+ @param _operator The operator to apply to the existing and supplied paths.
+ */
+ void add(const SkPath& path, SkPathOp _operator);
+
+ /** Computes the sum of all paths and operands, and resets the builder to its
+ initial state.
+
+ @param result The product of the operands.
+ @return True if the operation succeeded.
+ */
+ bool resolve(SkPath* result);
+
+private:
+ SkTArray<SkPath> fPathRefs;
+ SkTDArray<SkPathOp> fOps;
+
+ void reset();
+};
+
+#endif
diff --git a/gfx/skia/skia/include/ports/SkFontConfigInterface.h b/gfx/skia/skia/include/ports/SkFontConfigInterface.h
new file mode 100644
index 000000000..74f766f52
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontConfigInterface.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontConfigInterface_DEFINED
+#define SkFontConfigInterface_DEFINED
+
+#include "SkDataTable.h"
+#include "SkFontStyle.h"
+#include "SkRefCnt.h"
+#include "SkTypeface.h"
+
+class SkFontMgr;
+
+/**
+ * \class SkFontConfigInterface
+ *
+ * A simple interface for remotable font management.
+ * The global instance can be found with RefGlobal().
+ */
+class SK_API SkFontConfigInterface : public SkRefCnt {
+public:
+
+ /**
+ * Returns the global SkFontConfigInterface instance. If it is not
+ * nullptr, calls ref() on it. The caller must balance this with a call to
+ * unref(). The default SkFontConfigInterface is the result of calling
+ * GetSingletonDirectInterface.
+ */
+ static SkFontConfigInterface* RefGlobal();
+
+ /**
+ * Replace the current global instance with the specified one, safely
+ * ref'ing the new instance, and unref'ing the previous. Returns its
+ * parameter (the new global instance).
+ */
+ static SkFontConfigInterface* SetGlobal(SkFontConfigInterface*);
+
+ /**
+ * This should be treated as private to the impl of SkFontConfigInterface.
+ * Callers should not change or expect any particular values. It is meant
+ * to be a union of possible storage types to aid the impl.
+ */
+ struct FontIdentity {
+ FontIdentity() : fID(0), fTTCIndex(0) {}
+
+ bool operator==(const FontIdentity& other) const {
+ return fID == other.fID &&
+ fTTCIndex == other.fTTCIndex &&
+ fString == other.fString;
+ }
+ bool operator!=(const FontIdentity& other) const {
+ return !(*this == other);
+ }
+
+ uint32_t fID;
+ int32_t fTTCIndex;
+ SkString fString;
+ SkFontStyle fStyle;
+
+ // If buffer is NULL, just return the number of bytes that would have
+ // been written. Will pad contents to a multiple of 4.
+ size_t writeToMemory(void* buffer = NULL) const;
+
+ // Recreate from a flattened buffer, returning the number of bytes read.
+ size_t readFromMemory(const void* buffer, size_t length);
+ };
+
+ /**
+ * Given a familyName and style, find the best match.
+ *
+ * If a match is found, return true and set its outFontIdentifier.
+ * If outFamilyName is not null, assign the found familyName to it
+ * (which may differ from the requested familyName).
+ * If outStyle is not null, assign the found style to it
+ * (which may differ from the requested style).
+ *
+ * If a match is not found, return false, and ignore all out parameters.
+ */
+ virtual bool matchFamilyName(const char familyName[],
+ SkFontStyle requested,
+ FontIdentity* outFontIdentifier,
+ SkString* outFamilyName,
+ SkFontStyle* outStyle) = 0;
+
+ /**
+ * Given a FontRef, open a stream to access its data, or return null
+ * if the FontRef's data is not available. The caller is responsible for
+ * deleting the stream when it is done accessing the data.
+ */
+ virtual SkStreamAsset* openStream(const FontIdentity&) = 0;
+
+ /**
+ * Return an SkTypeface for the given FontIdentity.
+ *
+ * The default implementation simply returns a new typeface built using data obtained from
+ * openStream(), but derived classes may implement more complex caching schemes.
+ */
+ virtual sk_sp<SkTypeface> makeTypeface(const FontIdentity& identity) {
+ return SkTypeface::MakeFromStream(this->openStream(identity), identity.fTTCIndex);
+ }
+
+ /**
+ * Return a singleton instance of a direct subclass that calls into
+ * libfontconfig. This does not affect the refcnt of the returned instance.
+ */
+ static SkFontConfigInterface* GetSingletonDirectInterface();
+
+ // New APIS, which have default impls for now (which do nothing)
+
+ virtual sk_sp<SkDataTable> getFamilyNames() { return SkDataTable::MakeEmpty(); }
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/ports/SkFontMgr.h b/gfx/skia/skia/include/ports/SkFontMgr.h
new file mode 100644
index 000000000..afadeaaa9
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_DEFINED
+#define SkFontMgr_DEFINED
+
+#include "SkFontStyle.h"
+#include "SkRefCnt.h"
+#include "SkScalar.h"
+#include "SkTypes.h"
+
+class SkData;
+class SkFontData;
+class SkStreamAsset;
+class SkString;
+class SkTypeface;
+
+class SK_API SkFontStyleSet : public SkRefCnt {
+public:
+ virtual int count() = 0;
+ virtual void getStyle(int index, SkFontStyle*, SkString* style) = 0;
+ virtual SkTypeface* createTypeface(int index) = 0;
+ virtual SkTypeface* matchStyle(const SkFontStyle& pattern) = 0;
+
+ static SkFontStyleSet* CreateEmpty();
+
+protected:
+ SkTypeface* matchStyleCSS3(const SkFontStyle& pattern);
+
+private:
+ typedef SkRefCnt INHERITED;
+};
+
+class SK_API SkFontMgr : public SkRefCnt {
+public:
+ int countFamilies() const;
+ void getFamilyName(int index, SkString* familyName) const;
+ SkFontStyleSet* createStyleSet(int index) const;
+
+ /**
+ * The caller must call unref() on the returned object.
+ * Never returns NULL; will return an empty set if the name is not found.
+ *
+ * Passing |nullptr| as the parameter will return the default system font.
+ *
+ * It is possible that this will return a style set not accessible from
+ * createStyleSet(int) due to hidden or auto-activated fonts.
+ */
+ SkFontStyleSet* matchFamily(const char familyName[]) const;
+
+ /**
+ * Find the closest matching typeface to the specified familyName and style
+ * and return a ref to it. The caller must call unref() on the returned
+ * object. Will never return NULL, as it will return the default font if
+ * no matching font is found.
+ *
+ * Passing |nullptr| as the parameter for |familyName| will return the
+ * default system font.
+ *
+ * It is possible that this will return a style set not accessible from
+ * createStyleSet(int) or matchFamily(const char[]) due to hidden or
+ * auto-activated fonts.
+ */
+ SkTypeface* matchFamilyStyle(const char familyName[], const SkFontStyle&) const;
+
+ /**
+ * Use the system fallback to find a typeface for the given character.
+ * Note that bcp47 is a combination of ISO 639, 15924, and 3166-1 codes,
+ * so it is fine to just pass a ISO 639 here.
+ *
+ * Will return NULL if no family can be found for the character
+ * in the system fallback.
+ *
+ * Passing |nullptr| as the parameter for |familyName| will return the
+ * default system font.
+ *
+ * bcp47[0] is the least significant fallback, bcp47[bcp47Count-1] is the
+ * most significant. If no specified bcp47 codes match, any font with the
+ * requested character will be matched.
+ */
+ SkTypeface* matchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const;
+
+ SkTypeface* matchFaceStyle(const SkTypeface*, const SkFontStyle&) const;
+
+ /**
+ * Create a typeface for the specified data and TTC index (pass 0 for none)
+ * or NULL if the data is not recognized. The caller must call unref() on
+ * the returned object if it is not null.
+ */
+ SkTypeface* createFromData(SkData*, int ttcIndex = 0) const;
+
+ /**
+ * Create a typeface for the specified stream and TTC index
+ * (pass 0 for none) or NULL if the stream is not recognized. The caller
+ * must call unref() on the returned object if it is not null.
+ */
+ SkTypeface* createFromStream(SkStreamAsset*, int ttcIndex = 0) const;
+
+ struct FontParameters {
+ struct Axis {
+ SkFourByteTag fTag;
+ SkScalar fStyleValue;
+ };
+
+ FontParameters() : fCollectionIndex(0), fAxisCount(0), fAxes(nullptr) {}
+
+ /** Specify the index of the desired font.
+ *
+ * Font formats like ttc, dfont, cff, cid, pfr, t42, t1, and fon may actually be indexed
+ * collections of fonts.
+ */
+ FontParameters& setCollectionIndex(int collectionIndex) {
+ fCollectionIndex = collectionIndex;
+ return *this;
+ }
+
+ /** Specify the GX variation axis values.
+ *
+ * Any axes not specified will use the default value. Specified axes not present in the
+ * font will be ignored.
+ *
+ * @param axes not copied. This pointer must remain valid for life of FontParameters.
+ */
+ FontParameters& setAxes(const Axis* axes, int axisCount) {
+ fAxisCount = axisCount;
+ fAxes = axes;
+ return *this;
+ }
+
+ int getCollectionIndex() const {
+ return fCollectionIndex;
+ }
+ const Axis* getAxes(int* axisCount) const {
+ *axisCount = fAxisCount;
+ return fAxes;
+ }
+ private:
+ int fCollectionIndex;
+ int fAxisCount;
+ const Axis* fAxes;
+ };
+ /* Experimental, API subject to change. */
+ SkTypeface* createFromStream(SkStreamAsset*, const FontParameters&) const;
+
+ /**
+ * Create a typeface from the specified font data.
+ * Will return NULL if the typeface could not be created.
+ * The caller must call unref() on the returned object if it is not null.
+ */
+ SkTypeface* createFromFontData(std::unique_ptr<SkFontData>) const;
+
+ /**
+ * Create a typeface for the specified fileName and TTC index
+ * (pass 0 for none) or NULL if the file is not found, or its contents are
+ * not recognized. The caller must call unref() on the returned object
+ * if it is not null.
+ */
+ SkTypeface* createFromFile(const char path[], int ttcIndex = 0) const;
+
+ SkTypeface* legacyCreateTypeface(const char familyName[], SkFontStyle style) const;
+
+ /**
+ * Return a ref to the default fontmgr. The caller must call unref() on
+ * the returned object.
+ */
+ static SkFontMgr* RefDefault();
+
+protected:
+ virtual int onCountFamilies() const = 0;
+ virtual void onGetFamilyName(int index, SkString* familyName) const = 0;
+ virtual SkFontStyleSet* onCreateStyleSet(int index)const = 0;
+
+ /** May return NULL if the name is not found. */
+ virtual SkFontStyleSet* onMatchFamily(const char familyName[]) const = 0;
+
+ virtual SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle&) const = 0;
+ virtual SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const = 0;
+ virtual SkTypeface* onMatchFaceStyle(const SkTypeface*,
+ const SkFontStyle&) const = 0;
+
+ virtual SkTypeface* onCreateFromData(SkData*, int ttcIndex) const = 0;
+ virtual SkTypeface* onCreateFromStream(SkStreamAsset*, int ttcIndex) const = 0;
+ // TODO: make pure virtual.
+ virtual SkTypeface* onCreateFromStream(SkStreamAsset*, const FontParameters&) const;
+ virtual SkTypeface* onCreateFromFontData(std::unique_ptr<SkFontData>) const;
+ virtual SkTypeface* onCreateFromFile(const char path[], int ttcIndex) const = 0;
+
+ virtual SkTypeface* onLegacyCreateTypeface(const char familyName[], SkFontStyle) const = 0;
+
+private:
+ static SkFontMgr* Factory(); // implemented by porting layer
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_FontConfigInterface.h b/gfx/skia/skia/include/ports/SkFontMgr_FontConfigInterface.h
new file mode 100644
index 000000000..356e54c87
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_FontConfigInterface.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_FontConfigInterface_DEFINED
+#define SkFontMgr_FontConfigInterface_DEFINED
+
+#include "SkTypes.h"
+#include "SkRefCnt.h"
+
+class SkFontMgr;
+class SkFontConfigInterface;
+
+/** Creates a SkFontMgr which wraps a SkFontConfigInterface. */
+SK_API SkFontMgr* SkFontMgr_New_FCI(sk_sp<SkFontConfigInterface> fci);
+
+#endif // #ifndef SkFontMgr_FontConfigInterface_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_android.h b/gfx/skia/skia/include/ports/SkFontMgr_android.h
new file mode 100644
index 000000000..f12f51f36
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_android.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_android_DEFINED
+#define SkFontMgr_android_DEFINED
+
+#include "SkTypes.h"
+
+class SkFontMgr;
+
+/**
+ * For test only -- this only affects the default factory.
+ * Load font config from given xml files, instead of those from Android system.
+ */
+SK_API void SkUseTestFontConfigFile(const char* mainconf, const char* fallbackconf,
+ const char* fontsdir);
+
+struct SkFontMgr_Android_CustomFonts {
+ /** When specifying custom fonts, indicates how to use system fonts. */
+ enum SystemFontUse {
+ kOnlyCustom, /** Use only custom fonts. NDK compliant. */
+ kPreferCustom, /** Use custom fonts before system fonts. */
+ kPreferSystem /** Use system fonts before custom fonts. */
+ };
+ /** Whether or not to use system fonts. */
+ SystemFontUse fSystemFontUse;
+
+ /** Base path to resolve relative font file names. If a directory, should end with '/'. */
+ const char* fBasePath;
+
+ /** Optional custom configuration file to use. */
+ const char* fFontsXml;
+
+ /** Optional custom configuration file for fonts which provide fallback.
+ * In the new style (version > 21) fontsXml format is used, this should be NULL.
+ */
+ const char* fFallbackFontsXml;
+
+ /** Optional custom flag. If set to true the SkFontMgr will acquire all requisite
+ * system IO resources on initialization.
+ */
+ bool fIsolated;
+};
+
+/** Create a font manager for Android. If 'custom' is NULL, use only system fonts. */
+SK_API SkFontMgr* SkFontMgr_New_Android(const SkFontMgr_Android_CustomFonts* custom);
+
+#endif // SkFontMgr_android_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_custom.h b/gfx/skia/skia/include/ports/SkFontMgr_custom.h
new file mode 100644
index 000000000..53be63db1
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_custom.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_custom_DEFINED
+#define SkFontMgr_custom_DEFINED
+
+#include "SkTypes.h"
+
+class SkFontMgr;
+
+/** Create a custom font manager which scans a given directory for font files. */
+SK_API SkFontMgr* SkFontMgr_New_Custom_Directory(const char* dir);
+
+/** Create a custom font manager that contains no built-in fonts. */
+SK_API SkFontMgr* SkFontMgr_New_Custom_Empty();
+
+#endif // SkFontMgr_custom_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_fontconfig.h b/gfx/skia/skia/include/ports/SkFontMgr_fontconfig.h
new file mode 100644
index 000000000..7a59ff0c4
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_fontconfig.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_fontconfig_DEFINED
+#define SkFontMgr_fontconfig_DEFINED
+
+#include "SkTypes.h"
+#include <fontconfig/fontconfig.h>
+
+class SkFontMgr;
+
+/** Create a font manager around a FontConfig instance.
+ * If 'fc' is NULL, will use a new default config.
+ * Takes ownership of 'fc' and will call FcConfigDestroy on it.
+ */
+SK_API SkFontMgr* SkFontMgr_New_FontConfig(FcConfig* fc);
+
+#endif // #ifndef SkFontMgr_fontconfig_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_indirect.h b/gfx/skia/skia/include/ports/SkFontMgr_indirect.h
new file mode 100644
index 000000000..406a75a7e
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_indirect.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_indirect_DEFINED
+#define SkFontMgr_indirect_DEFINED
+
+#include "../private/SkMutex.h"
+#include "../private/SkOnce.h"
+#include "../private/SkTArray.h"
+#include "SkDataTable.h"
+#include "SkFontMgr.h"
+#include "SkRefCnt.h"
+#include "SkRemotableFontMgr.h"
+#include "SkTypeface.h"
+#include "SkTypes.h"
+
+class SkData;
+class SkFontStyle;
+class SkStreamAsset;
+class SkString;
+
+class SK_API SkFontMgr_Indirect : public SkFontMgr {
+public:
+ // TODO: The SkFontMgr is only used for createFromStream/File/Data.
+ // In the future these calls should be broken out into their own interface
+ // with a name like SkFontRenderer.
+ SkFontMgr_Indirect(SkFontMgr* impl, SkRemotableFontMgr* proxy)
+ : fImpl(SkRef(impl)), fProxy(SkRef(proxy))
+ { }
+
+protected:
+ int onCountFamilies() const override;
+ void onGetFamilyName(int index, SkString* familyName) const override;
+ SkFontStyleSet* onCreateStyleSet(int index) const override;
+
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override;
+
+ SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontStyle) const override;
+
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle&,
+ const char* bcp47[],
+ int bcp47Count,
+ SkUnichar character) const override;
+
+ SkTypeface* onMatchFaceStyle(const SkTypeface* familyMember,
+ const SkFontStyle& fontStyle) const override;
+
+ SkTypeface* onCreateFromStream(SkStreamAsset* stream, int ttcIndex) const override;
+ SkTypeface* onCreateFromFile(const char path[], int ttcIndex) const override;
+ SkTypeface* onCreateFromData(SkData* data, int ttcIndex) const override;
+
+ SkTypeface* onLegacyCreateTypeface(const char familyName[], SkFontStyle) const override;
+
+private:
+ SkTypeface* createTypefaceFromFontId(const SkFontIdentity& fontId) const;
+
+ SkAutoTUnref<SkFontMgr> fImpl;
+ SkAutoTUnref<SkRemotableFontMgr> fProxy;
+
+ struct DataEntry {
+ uint32_t fDataId; // key1
+ uint32_t fTtcIndex; // key2
+ SkTypeface* fTypeface; // value: weak ref to typeface
+
+ DataEntry() { }
+
+ DataEntry(DataEntry&& that)
+ : fDataId(that.fDataId)
+ , fTtcIndex(that.fTtcIndex)
+ , fTypeface(that.fTypeface)
+ {
+ SkDEBUGCODE(that.fDataId = SkFontIdentity::kInvalidDataId;)
+ SkDEBUGCODE(that.fTtcIndex = 0xbbadbeef;)
+ that.fTypeface = NULL;
+ }
+
+ ~DataEntry() {
+ if (fTypeface) {
+ fTypeface->weak_unref();
+ }
+ }
+ };
+ /**
+ * This cache is essentially { dataId: { ttcIndex: typeface } }
+ * For data caching we want a mapping from data id to weak references to
+ * typefaces with that data id. By storing the index next to the typeface,
+ * this data cache also acts as a typeface cache.
+ */
+ mutable SkTArray<DataEntry> fDataCache;
+ mutable SkMutex fDataCacheMutex;
+
+ mutable sk_sp<SkDataTable> fFamilyNames;
+ mutable SkOnce fFamilyNamesInitOnce;
+ static void set_up_family_names(const SkFontMgr_Indirect* self);
+
+ friend class SkStyleSet_Indirect;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/ports/SkRemotableFontMgr.h b/gfx/skia/skia/include/ports/SkRemotableFontMgr.h
new file mode 100644
index 000000000..2e028cee2
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkRemotableFontMgr.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRemotableFontMgr_DEFINED
+#define SkRemotableFontMgr_DEFINED
+
+#include "../private/SkTemplates.h"
+#include "SkFontStyle.h"
+#include "SkRefCnt.h"
+#include "SkTypes.h"
+
+class SkDataTable;
+class SkStreamAsset;
+
+struct SK_API SkFontIdentity {
+ static const uint32_t kInvalidDataId = 0xFFFFFFFF;
+
+ // Note that fDataId is a data identifier, not a font identifier.
+ // (fDataID, fTtcIndex) can be seen as a font identifier.
+ uint32_t fDataId;
+ uint32_t fTtcIndex;
+
+ // On Linux/FontConfig there is also the ability to specify preferences for rendering
+ // antialias, embedded bitmaps, autohint, hinting, hintstyle, lcd rendering
+ // may all be set or set to no-preference
+ // (No-preference is resolved against globals set by the platform)
+ // Since they may be selected against, these are really 'extensions' to SkFontStyle.
+ // SkFontStyle should pick these up.
+ SkFontStyle fFontStyle;
+};
+
+class SK_API SkRemotableFontIdentitySet : public SkRefCnt {
+public:
+ SkRemotableFontIdentitySet(int count, SkFontIdentity** data);
+
+ int count() const { return fCount; }
+ const SkFontIdentity& at(int index) const { return fData[index]; }
+
+ static SkRemotableFontIdentitySet* NewEmpty();
+
+private:
+ SkRemotableFontIdentitySet() : fCount(0), fData() { }
+
+ friend SkRemotableFontIdentitySet* sk_remotable_font_identity_set_new();
+
+ int fCount;
+ SkAutoTMalloc<SkFontIdentity> fData;
+
+ typedef SkRefCnt INHERITED;
+};
+
+class SK_API SkRemotableFontMgr : public SkRefCnt {
+public:
+ /**
+ * Returns the names of the known fonts on the system.
+ * Will not return NULL, will return an empty table if no families exist.
+ *
+ * The indexes may be used with getIndex(int) and
+ * matchIndexStyle(int, SkFontStyle).
+ */
+ virtual sk_sp<SkDataTable> getFamilyNames() const = 0;
+
+ /**
+ * Returns all of the fonts with the given familyIndex.
+ * Returns NULL if the index is out of bounds.
+ * Returns empty if there are no fonts at the given index.
+ *
+ * The caller must unref() the returned object.
+ */
+ virtual SkRemotableFontIdentitySet* getIndex(int familyIndex) const = 0;
+
+ /**
+ * Returns the closest match to the given style in the given index.
+ * If there are no available fonts at the given index, the return value's
+ * data id will be kInvalidDataId.
+ */
+ virtual SkFontIdentity matchIndexStyle(int familyIndex, const SkFontStyle&) const = 0;
+
+ /**
+ * Returns all the fonts on the system with the given name.
+ * If the given name is NULL, will return the default font family.
+ * Never returns NULL; will return an empty set if the name is not found.
+ *
+ * It is possible that this will return fonts not accessible from
+ * getIndex(int) or matchIndexStyle(int, SkFontStyle) due to
+ * hidden or auto-activated fonts.
+ *
+ * The matching may be done in a system dependent way. The name may be
+ * matched case-insensitive, there may be system aliases which resolve,
+ * and names outside the current locale may be considered. However, this
+ * should only return fonts which are somehow associated with the requested
+ * name.
+ *
+ * The caller must unref() the returned object.
+ */
+ virtual SkRemotableFontIdentitySet* matchName(const char familyName[]) const = 0;
+
+ /**
+ * Returns the closest matching font to the specified name and style.
+ * If there are no available fonts which match the name, the return value's
+ * data id will be kInvalidDataId.
+ * If the given name is NULL, the match will be against any default fonts.
+ *
+ * It is possible that this will return a font identity not accessible from
+ * methods returning sets due to hidden or auto-activated fonts.
+ *
+ * The matching may be done in a system dependent way. The name may be
+ * matched case-insensitive, there may be system aliases which resolve,
+ * and names outside the current locale may be considered. However, this
+ * should only return a font which is somehow associated with the requested
+ * name.
+ *
+ * The caller must unref() the returned object.
+ */
+ virtual SkFontIdentity matchNameStyle(const char familyName[], const SkFontStyle&) const = 0;
+
+ /**
+ * Use the system fall-back to find a font for the given character.
+ * If no font can be found for the character, the return value's data id
+ * will be kInvalidDataId.
+ * If the name is NULL, the match will start against any default fonts.
+ * If the bpc47 is NULL, a default locale will be assumed.
+ *
+ * Note that bpc47 is a combination of ISO 639, 15924, and 3166-1 codes,
+ * so it is fine to just pass a ISO 639 here.
+ */
+ virtual SkFontIdentity matchNameStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const=0;
+
+ /**
+ * Returns the data for the given data id.
+ * Will return NULL if the data id is invalid.
+ * Note that this is a data id, not a font id.
+ *
+ * The caller must unref() the returned object.
+ */
+ virtual SkStreamAsset* getData(int dataId) const = 0;
+
+private:
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/ports/SkTypeface_cairo.h b/gfx/skia/skia/include/ports/SkTypeface_cairo.h
new file mode 100644
index 000000000..5baa174c4
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkTypeface_cairo.h
@@ -0,0 +1,18 @@
+#ifndef SkTypeface_cairo_DEFINED
+#define SkTypeface_cairo_DEFINED
+
+#include <cairo.h>
+#include <cairo-ft.h>
+
+#include "SkTypeface.h"
+
+SK_API extern void SkInitCairoFT(bool fontHintingEnabled);
+
+SK_API extern SkTypeface* SkCreateTypefaceFromCairoFTFont(cairo_scaled_font_t* scaledFont);
+
+#ifdef CAIRO_HAS_FC_FONT
+SK_API extern SkTypeface* SkCreateTypefaceFromCairoFTFontWithFontconfig(cairo_scaled_font_t* scaledFont, FcPattern* pattern);
+#endif
+
+#endif
+
diff --git a/gfx/skia/skia/include/ports/SkTypeface_mac.h b/gfx/skia/skia/include/ports/SkTypeface_mac.h
new file mode 100644
index 000000000..14440b538
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkTypeface_mac.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTypeface_mac_DEFINED
+#define SkTypeface_mac_DEFINED
+
+#include "SkTypeface.h"
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#ifdef SK_BUILD_FOR_MAC
+#import <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreText/CoreText.h>
+#endif
+
+/**
+ * Like the other Typeface create methods, this returns a new reference to the
+ * corresponding typeface for the specified CTFontRef. The caller must call
+ * unref() when it is finished.
+ *
+ * The CFTypeRef parameter, if provided, will be kept referenced for the
+ * lifetime of the SkTypeface. This was introduced as a means to work around
+ * https://crbug.com/413332 .
+ */
+SK_API extern SkTypeface* SkCreateTypefaceFromCTFont(CTFontRef, CFTypeRef = NULL);
+
+/**
+ * Returns the platform-specific CTFontRef handle for a
+ * given SkTypeface. Note that the returned CTFontRef gets
+ * released when the source SkTypeface is destroyed.
+ *
+ * This method is deprecated. It may only be used by Blink Mac
+ * legacy code in special cases related to text-shaping
+ * with AAT fonts, clipboard handling and font fallback.
+ * See https://code.google.com/p/skia/issues/detail?id=3408
+ */
+SK_API extern CTFontRef SkTypeface_GetCTFontRef(const SkTypeface* face);
+
+#endif // defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+#endif // SkTypeface_mac_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkTypeface_win.h b/gfx/skia/skia/include/ports/SkTypeface_win.h
new file mode 100644
index 000000000..87dd60c37
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkTypeface_win.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTypeface_win_DEFINED
+#define SkTypeface_win_DEFINED
+
+#include "../private/SkLeanWindows.h"
+#include "SkTypeface.h"
+#include <dwrite.h>
+
+#ifdef SK_BUILD_FOR_WIN
+
+/**
+ * Like the other Typeface create methods, this returns a new reference to the
+ * corresponding typeface for the specified logfont. The caller is responsible
+ * for calling unref() when it is finished.
+ */
+SK_API SkTypeface* SkCreateTypefaceFromLOGFONT(const LOGFONT&);
+
+/**
+ * Copy the LOGFONT associated with this typeface into the lf parameter. Note
+ * that the lfHeight will need to be set afterwards, since the typeface does
+ * not track this (the paint does).
+ * typeface may be NULL, in which case we return the logfont for the default font.
+ */
+SK_API void SkLOGFONTFromTypeface(const SkTypeface* typeface, LOGFONT* lf);
+
+/**
+ * Set an optional callback to ensure that the data behind a LOGFONT is loaded.
+ * This will get called if Skia tries to access the data but hits a failure.
+ * Normally this is null, and is only required if the font data needs to be
+ * remotely (re)loaded.
+ */
+SK_API void SkTypeface_SetEnsureLOGFONTAccessibleProc(void (*)(const LOGFONT&));
+
+// Experimental!
+//
+class SkFontMgr;
+class SkRemotableFontMgr;
+struct IDWriteFactory;
+struct IDWriteFontCollection;
+struct IDWriteFontFallback;
+
+/**
+ * Like the other Typeface create methods, this returns a new reference to the
+ * corresponding typeface for the specified dwrite font. The caller is responsible
+ * for calling unref() when it is finished.
+ */
+SK_API SkTypeface* SkCreateTypefaceFromDWriteFont(IDWriteFactory* aFactory,
+ IDWriteFontFace* aFontFace,
+ SkFontStyle aStyle,
+ bool aForceGDI);
+
+SK_API SkFontMgr* SkFontMgr_New_GDI();
+SK_API SkFontMgr* SkFontMgr_New_DirectWrite(IDWriteFactory* factory = NULL,
+ IDWriteFontCollection* collection = NULL);
+SK_API SkFontMgr* SkFontMgr_New_DirectWrite(IDWriteFactory* factory,
+ IDWriteFontCollection* collection,
+ IDWriteFontFallback* fallback);
+
+/**
+ * Creates an SkFontMgr which renders using DirectWrite and obtains its data
+ * from the SkRemotableFontMgr.
+ *
+ * If DirectWrite could not be initialized, will return NULL.
+ */
+SK_API SkFontMgr* SkFontMgr_New_DirectWriteRenderer(SkRemotableFontMgr*);
+
+/**
+ * Creates an SkRemotableFontMgr backed by DirectWrite using the default
+ * system font collection in the current locale.
+ *
+ * If DirectWrite could not be initialized, will return NULL.
+ */
+SK_API SkRemotableFontMgr* SkRemotableFontMgr_New_DirectWrite();
+
+#endif // SK_BUILD_FOR_WIN
+#endif // SkTypeface_win_DEFINED
diff --git a/gfx/skia/skia/include/private/GrAuditTrail.h b/gfx/skia/skia/include/private/GrAuditTrail.h
new file mode 100644
index 000000000..3bb7bea43
--- /dev/null
+++ b/gfx/skia/skia/include/private/GrAuditTrail.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAuditTrail_DEFINED
+#define GrAuditTrail_DEFINED
+
+#include "GrConfig.h"
+#include "SkRect.h"
+#include "SkString.h"
+#include "SkTArray.h"
+#include "SkTHash.h"
+
+class GrBatch;
+
+/*
+ * GrAuditTrail collects a list of draw ops, detailed information about those ops, and can dump them
+ * to json.
+ *
+ * Capturing this information is expensive and consumes a lot of memory, therefore it is important
+ * to enable auditing only when required and disable it promptly. The AutoEnable class helps to
+ * ensure that the audit trail is disabled in a timely fashion. Once the information has been dealt
+ * with, be sure to call reset(), or the log will simply keep growing.
+ */
+class GrAuditTrail {
+public:
+ GrAuditTrail()
+ : fClientID(kGrAuditTrailInvalidID)
+ , fEnabled(false) {}
+
+ class AutoEnable {
+ public:
+ AutoEnable(GrAuditTrail* auditTrail)
+ : fAuditTrail(auditTrail) {
+ SkASSERT(!fAuditTrail->isEnabled());
+ fAuditTrail->setEnabled(true);
+ }
+
+ ~AutoEnable() {
+ SkASSERT(fAuditTrail->isEnabled());
+ fAuditTrail->setEnabled(false);
+ }
+
+ private:
+ GrAuditTrail* fAuditTrail;
+ };
+
+ class AutoManageBatchList {
+ public:
+ AutoManageBatchList(GrAuditTrail* auditTrail)
+ : fAutoEnable(auditTrail)
+ , fAuditTrail(auditTrail) {
+ }
+
+ ~AutoManageBatchList() {
+ fAuditTrail->fullReset();
+ }
+
+ private:
+ AutoEnable fAutoEnable;
+ GrAuditTrail* fAuditTrail;
+ };
+
+ class AutoCollectBatches {
+ public:
+ AutoCollectBatches(GrAuditTrail* auditTrail, int clientID)
+ : fAutoEnable(auditTrail)
+ , fAuditTrail(auditTrail) {
+ fAuditTrail->setClientID(clientID);
+ }
+
+ ~AutoCollectBatches() { fAuditTrail->setClientID(kGrAuditTrailInvalidID); }
+
+ private:
+ AutoEnable fAutoEnable;
+ GrAuditTrail* fAuditTrail;
+ };
+
+ void pushFrame(const char* framename) {
+ SkASSERT(fEnabled);
+ fCurrentStackTrace.push_back(SkString(framename));
+ }
+
+ void addBatch(const GrBatch* batch);
+
+ void batchingResultCombined(const GrBatch* consumer, const GrBatch* consumed);
+
+ // Because batching is heavily dependent on sequence of draw calls, these calls will only
+ // produce valid information for the given draw sequence which preceeded them.
+ // Specifically, future draw calls may change the batching and thus would invalidate
+ // the json. What this means is that for some sequence of draw calls N, the below toJson
+ // calls will only produce JSON which reflects N draw calls. This JSON may or may not be
+ // accurate for N + 1 or N - 1 draws depending on the actual batching algorithm used.
+ SkString toJson(bool prettyPrint = false) const;
+
+ // returns a json string of all of the batches associated with a given client id
+ SkString toJson(int clientID, bool prettyPrint = false) const;
+
+ bool isEnabled() { return fEnabled; }
+ void setEnabled(bool enabled) { fEnabled = enabled; }
+
+ void setClientID(int clientID) { fClientID = clientID; }
+
+ // We could just return our internal bookkeeping struct if copying the data out becomes
+ // a performance issue, but until then its nice to decouple
+ struct BatchInfo {
+ SkRect fBounds;
+ uint32_t fRenderTargetUniqueID;
+ struct Batch {
+ int fClientID;
+ SkRect fBounds;
+ };
+ SkTArray<Batch> fBatches;
+ };
+
+ void getBoundsByClientID(SkTArray<BatchInfo>* outInfo, int clientID);
+ void getBoundsByBatchListID(BatchInfo* outInfo, int batchListID);
+
+ void fullReset();
+
+ static const int kGrAuditTrailInvalidID;
+
+private:
+ // TODO if performance becomes an issue, we can move to using SkVarAlloc
+ struct Batch {
+ SkString toJson() const;
+ SkString fName;
+ SkTArray<SkString> fStackTrace;
+ SkRect fBounds;
+ int fClientID;
+ int fBatchListID;
+ int fChildID;
+ };
+ typedef SkTArray<SkAutoTDelete<Batch>, true> BatchPool;
+
+ typedef SkTArray<Batch*> Batches;
+
+ struct BatchNode {
+ SkString toJson() const;
+ SkRect fBounds;
+ Batches fChildren;
+ uint32_t fRenderTargetUniqueID;
+ };
+ typedef SkTArray<SkAutoTDelete<BatchNode>, true> BatchList;
+
+ void copyOutFromBatchList(BatchInfo* outBatchInfo, int batchListID);
+
+ template <typename T>
+ static void JsonifyTArray(SkString* json, const char* name, const T& array,
+ bool addComma);
+
+ BatchPool fBatchPool;
+ SkTHashMap<uint32_t, int> fIDLookup;
+ SkTHashMap<int, Batches*> fClientIDLookup;
+ BatchList fBatchList;
+ SkTArray<SkString> fCurrentStackTrace;
+
+ // The client cas pass in an optional client ID which we will use to mark the batches
+ int fClientID;
+ bool fEnabled;
+};
+
+#define GR_AUDIT_TRAIL_INVOKE_GUARD(audit_trail, invoke, ...) \
+ if (audit_trail->isEnabled()) { \
+ audit_trail->invoke(__VA_ARGS__); \
+ }
+
+#define GR_AUDIT_TRAIL_AUTO_FRAME(audit_trail, framename) \
+ GR_AUDIT_TRAIL_INVOKE_GUARD((audit_trail), pushFrame, framename);
+
+#define GR_AUDIT_TRAIL_RESET(audit_trail) \
+ //GR_AUDIT_TRAIL_INVOKE_GUARD(audit_trail, fullReset);
+
+#define GR_AUDIT_TRAIL_ADDBATCH(audit_trail, batch) \
+ GR_AUDIT_TRAIL_INVOKE_GUARD(audit_trail, addBatch, batch);
+
+#define GR_AUDIT_TRAIL_BATCHING_RESULT_COMBINED(audit_trail, combineWith, batch) \
+ GR_AUDIT_TRAIL_INVOKE_GUARD(audit_trail, batchingResultCombined, combineWith, batch);
+
+#define GR_AUDIT_TRAIL_BATCHING_RESULT_NEW(audit_trail, batch) \
+ // Doesn't do anything now, one day...
+
+#endif
diff --git a/gfx/skia/skia/include/private/GrInstancedPipelineInfo.h b/gfx/skia/skia/include/private/GrInstancedPipelineInfo.h
new file mode 100644
index 000000000..7e6482da9
--- /dev/null
+++ b/gfx/skia/skia/include/private/GrInstancedPipelineInfo.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGrInstancedPipelineInfo_DEFINED
+#define GrGrInstancedPipelineInfo_DEFINED
+
+#include "GrRenderTarget.h"
+
+/**
+ * Provides info about the pipeline that GrInstancedRendering needs in order to select appropriate
+ * drawing algorithms.
+ */
+struct GrInstancedPipelineInfo {
+ GrInstancedPipelineInfo(const GrRenderTarget* rt)
+ : fIsMultisampled(rt->isStencilBufferMultisampled()),
+ fIsMixedSampled(rt->isMixedSampled()),
+ fIsRenderingToFloat(GrPixelConfigIsFloatingPoint(rt->desc().fConfig)),
+ fColorDisabled(false),
+ fDrawingShapeToStencil(false),
+ fCanDiscard(false) {
+ }
+
+ bool canUseCoverageAA() const {
+ return !fIsMultisampled || (fIsMixedSampled && !fDrawingShapeToStencil);
+ }
+
+ bool fIsMultisampled : 1;
+ bool fIsMixedSampled : 1;
+ bool fIsRenderingToFloat : 1;
+ bool fColorDisabled : 1;
+ /**
+ * Indicates that the instanced renderer should take extra precautions to ensure the shape gets
+ * drawn correctly to the stencil buffer (e.g. no coverage AA). NOTE: this does not mean a
+ * stencil test is or is not active.
+ */
+ bool fDrawingShapeToStencil : 1;
+ /**
+ * Indicates that the instanced renderer can use processors with discard instructions. This
+ * should not be set if the shader will use derivatives, automatic mipmap LOD, or other features
+ * that depend on neighboring pixels. Some draws will fail to create if this is not set.
+ */
+ bool fCanDiscard : 1;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/GrRenderTargetProxy.h b/gfx/skia/skia/include/private/GrRenderTargetProxy.h
new file mode 100644
index 000000000..e4bc70f21
--- /dev/null
+++ b/gfx/skia/skia/include/private/GrRenderTargetProxy.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRenderTargetProxy_DEFINED
+#define GrRenderTargetProxy_DEFINED
+
+#include "GrRenderTarget.h"
+#include "GrRenderTargetPriv.h"
+#include "GrSurfaceProxy.h"
+#include "GrTypes.h"
+
+class GrTextureProvider;
+
+// This class delays the acquisition of RenderTargets until they are actually
+// required
+// Beware: the uniqueID of the RenderTargetProxy will usually be different than
+// the uniqueID of the RenderTarget it represents!
+class GrRenderTargetProxy : public GrSurfaceProxy {
+public:
+ /**
+ * The caller gets the creation ref.
+ */
+ static sk_sp<GrRenderTargetProxy> Make(const GrCaps&, const GrSurfaceDesc&,
+ SkBackingFit, SkBudgeted);
+ static sk_sp<GrRenderTargetProxy> Make(const GrCaps&, sk_sp<GrRenderTarget>);
+
+ ~GrRenderTargetProxy() override;
+
+ // TODO: add asTextureProxy variants
+ GrRenderTargetProxy* asRenderTargetProxy() override { return this; }
+ const GrRenderTargetProxy* asRenderTargetProxy() const override { return this; }
+
+ // Actually instantiate the backing rendertarget, if necessary.
+ GrRenderTarget* instantiate(GrTextureProvider* texProvider);
+
+ bool isStencilBufferMultisampled() const { return fDesc.fSampleCnt > 0; }
+
+ /**
+ * For our purposes, "Mixed Sampled" means the stencil buffer is multisampled but the color
+ * buffer is not.
+ */
+ bool isMixedSampled() const { return fFlags & GrRenderTargetPriv::Flags::kMixedSampled; }
+
+ /**
+ * "Unified Sampled" means the stencil and color buffers are both multisampled.
+ */
+ bool isUnifiedMultisampled() const { return fDesc.fSampleCnt > 0 && !this->isMixedSampled(); }
+
+ /**
+ * Returns the number of samples/pixel in the stencil buffer (Zero if non-MSAA).
+ */
+ int numStencilSamples() const { return fDesc.fSampleCnt; }
+
+ /**
+ * Returns the number of samples/pixel in the color buffer (Zero if non-MSAA or mixed sampled).
+ */
+ int numColorSamples() const { return this->isMixedSampled() ? 0 : fDesc.fSampleCnt; }
+
+ void setLastDrawTarget(GrDrawTarget* dt);
+ GrDrawTarget* getLastDrawTarget() { return fLastDrawTarget; }
+
+ GrRenderTargetPriv::Flags testingOnly_getFlags() const;
+
+private:
+ // Deferred version
+ GrRenderTargetProxy(const GrCaps&, const GrSurfaceDesc&, SkBackingFit, SkBudgeted);
+
+ // Wrapped version
+ GrRenderTargetProxy(const GrCaps&, sk_sp<GrRenderTarget> rt);
+
+ // For wrapped render targets we store it here.
+ // For deferred proxies we will fill this in when we need to instantiate the deferred resource
+ sk_sp<GrRenderTarget> fTarget;
+
+ // These don't usually get computed until the render target is instantiated, but the render
+ // target proxy may need to answer queries about it before then. And since in the deferred case
+ // we know the newly created render target will be internal, we are able to precompute what the
+ // flags will ultimately end up being. In the wrapped case we just copy the wrapped
+ // rendertarget's info here.
+ GrRenderTargetPriv::Flags fFlags;
+
+ // The last drawTarget that wrote to or is currently going to write to this renderTarget
+ // The drawTarget can be closed (e.g., no draw context is currently bound
+ // to this renderTarget).
+ // This back-pointer is required so that we can add a dependancy between
+ // the drawTarget used to create the current contents of this renderTarget
+ // and the drawTarget of a destination renderTarget to which this one is being drawn.
+ GrDrawTarget* fLastDrawTarget;
+
+ typedef GrSurfaceProxy INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/GrSingleOwner.h b/gfx/skia/skia/include/private/GrSingleOwner.h
new file mode 100644
index 000000000..64e63d3b1
--- /dev/null
+++ b/gfx/skia/skia/include/private/GrSingleOwner.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSingleOwner_DEFINED
+#define GrSingleOwner_DEFINED
+
+#include "SkTypes.h"
+
+#ifdef SK_DEBUG
+#include "SkMutex.h"
+#include "SkThreadID.h"
+
+// This is a debug tool to verify an object is only being used from one thread at a time.
+class GrSingleOwner {
+public:
+ GrSingleOwner() : fOwner(kIllegalThreadID), fReentranceCount(0) {}
+
+ struct AutoEnforce {
+ AutoEnforce(GrSingleOwner* so) : fSO(so) { fSO->enter(); }
+ ~AutoEnforce() { fSO->exit(); }
+
+ GrSingleOwner* fSO;
+ };
+
+private:
+ void enter() {
+ SkAutoMutexAcquire lock(fMutex);
+ SkThreadID self = SkGetThreadID();
+ SkASSERT(fOwner == self || fOwner == kIllegalThreadID);
+ fReentranceCount++;
+ fOwner = self;
+ }
+
+ void exit() {
+ SkAutoMutexAcquire lock(fMutex);
+ SkASSERT(fOwner == SkGetThreadID());
+ fReentranceCount--;
+ if (fReentranceCount == 0) {
+ fOwner = kIllegalThreadID;
+ }
+ }
+
+ SkMutex fMutex;
+ SkThreadID fOwner; // guarded by fMutex
+ int fReentranceCount; // guarded by fMutex
+};
+#else
+class GrSingleOwner {}; // Provide a dummy implementation so we can pass pointers to constructors
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/private/GrSurfaceProxy.h b/gfx/skia/skia/include/private/GrSurfaceProxy.h
new file mode 100644
index 000000000..69656fe4f
--- /dev/null
+++ b/gfx/skia/skia/include/private/GrSurfaceProxy.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSurfaceProxy_DEFINED
+#define GrSurfaceProxy_DEFINED
+
+#include "GrGpuResource.h"
+#include "SkRect.h"
+
+class GrTextureProxy;
+class GrRenderTargetProxy;
+
+class GrSurfaceProxy : public GrIORef<GrSurfaceProxy> {
+public:
+ const GrSurfaceDesc& desc() const { return fDesc; }
+
+ GrSurfaceOrigin origin() const {
+ SkASSERT(kTopLeft_GrSurfaceOrigin == fDesc.fOrigin ||
+ kBottomLeft_GrSurfaceOrigin == fDesc.fOrigin);
+ return fDesc.fOrigin;
+ }
+ int width() const { return fDesc.fWidth; }
+ int height() const { return fDesc.fWidth; }
+ GrPixelConfig config() const { return fDesc.fConfig; }
+
+ uint32_t uniqueID() const { return fUniqueID; }
+
+ /**
+ * Helper that gets the width and height of the surface as a bounding rectangle.
+ */
+ SkRect getBoundsRect() const { return SkRect::MakeIWH(this->width(), this->height()); }
+
+ /**
+ * @return the texture proxy associated with the surface proxy, may be NULL.
+ */
+ virtual GrTextureProxy* asTextureProxy() { return nullptr; }
+ virtual const GrTextureProxy* asTextureProxy() const { return nullptr; }
+
+ /**
+ * @return the render target proxy associated with the surface proxy, may be NULL.
+ */
+ virtual GrRenderTargetProxy* asRenderTargetProxy() { return nullptr; }
+ virtual const GrRenderTargetProxy* asRenderTargetProxy() const { return nullptr; }
+
+ /**
+ * Does the resource count against the resource budget?
+ */
+ SkBudgeted isBudgeted() const { return fBudgeted; }
+
+protected:
+ // Deferred version
+ GrSurfaceProxy(const GrSurfaceDesc& desc, SkBackingFit fit, SkBudgeted budgeted)
+ : fDesc(desc)
+ , fFit(fit)
+ , fBudgeted(budgeted)
+ , fUniqueID(GrGpuResource::CreateUniqueID()) {
+ }
+
+ // Wrapped version
+ GrSurfaceProxy(const GrSurfaceDesc& desc, SkBackingFit fit,
+ SkBudgeted budgeted, uint32_t uniqueID)
+ : fDesc(desc)
+ , fFit(fit)
+ , fBudgeted(budgeted)
+ , fUniqueID(uniqueID) {
+ }
+
+ virtual ~GrSurfaceProxy() {}
+
+ // For wrapped resources, 'fDesc' will always be filled in from the wrapped resource.
+ const GrSurfaceDesc fDesc;
+ const SkBackingFit fFit; // always exact for wrapped resources
+ const SkBudgeted fBudgeted; // set from the backing resource for wrapped resources
+ const uint32_t fUniqueID; // set from the backing resource for wrapped resources
+
+private:
+
+ // See comment in GrGpuResource.h.
+ void notifyAllCntsAreZero(CntType) const { delete this; }
+ bool notifyRefCountIsZero() const { return true; }
+
+ typedef GrIORef<GrSurfaceProxy> INHERITED;
+
+ // to access notifyAllCntsAreZero and notifyRefCntIsZero.
+ friend class GrIORef<GrSurfaceProxy>;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/GrTextureProxy.h b/gfx/skia/skia/include/private/GrTextureProxy.h
new file mode 100644
index 000000000..63cb3c835
--- /dev/null
+++ b/gfx/skia/skia/include/private/GrTextureProxy.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextureProxy_DEFINED
+#define GrTextureProxy_DEFINED
+
+#include "GrSurfaceProxy.h"
+#include "GrTexture.h"
+
+class GrTextureProvider;
+
+// This class delays the acquisition of textures until they are actually required
+class GrTextureProxy : public GrSurfaceProxy {
+public:
+ // TODO: need to refine ownership semantics of 'srcData' if we're in completely
+ // deferred mode
+ static sk_sp<GrTextureProxy> Make(const GrSurfaceDesc&, SkBackingFit, SkBudgeted,
+ const void* srcData = nullptr, size_t rowBytes = 0);
+ static sk_sp<GrTextureProxy> Make(sk_sp<GrTexture>);
+
+ // TODO: add asRenderTargetProxy variants
+ GrTextureProxy* asTextureProxy() override { return this; }
+ const GrTextureProxy* asTextureProxy() const override { return this; }
+
+ // Actually instantiate the backing texture, if necessary
+ GrTexture* instantiate(GrTextureProvider* texProvider);
+
+private:
+ // Deferred version
+ GrTextureProxy(const GrSurfaceDesc& srcDesc, SkBackingFit, SkBudgeted,
+ const void* srcData, size_t srcRowBytes);
+ // Wrapped version
+ GrTextureProxy(sk_sp<GrTexture> tex);
+
+ // For wrapped textures we store it here.
+ // For deferred proxies we will fill this in when we need to instantiate the deferred resource
+ sk_sp<GrTexture> fTexture;
+
+ typedef GrSurfaceProxy INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/GrTextureStripAtlas.h b/gfx/skia/skia/include/private/GrTextureStripAtlas.h
new file mode 100644
index 000000000..5b90a342d
--- /dev/null
+++ b/gfx/skia/skia/include/private/GrTextureStripAtlas.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextureStripAtlas_DEFINED
+#define GrTextureStripAtlas_DEFINED
+
+#include "SkBitmap.h"
+#include "SkOpts.h"
+#include "SkGr.h"
+#include "SkTDArray.h"
+#include "SkTDynamicHash.h"
+#include "SkTypes.h"
+
+/**
+ * Maintains a single large texture whose rows store many textures of a small fixed height,
+ * stored in rows across the x-axis such that we can safely wrap/repeat them horizontally.
+ */
+class GrTextureStripAtlas {
+public:
+ /**
+ * Descriptor struct which we'll use as a hash table key
+ **/
+ struct Desc {
+ Desc() { sk_bzero(this, sizeof(*this)); }
+ GrContext* fContext;
+ GrPixelConfig fConfig;
+ uint16_t fWidth, fHeight, fRowHeight;
+ uint16_t fUnusedPadding;
+ bool operator==(const Desc& other) const {
+ return 0 == memcmp(this, &other, sizeof(Desc));
+ }
+ };
+
+ /**
+ * Try to find an atlas with the required parameters, creates a new one if necessary
+ */
+ static GrTextureStripAtlas* GetAtlas(const Desc& desc);
+
+ ~GrTextureStripAtlas();
+
+ /**
+ * Add a texture to the atlas
+ * @param data Bitmap data to copy into the row
+ * @return The row index we inserted into, or -1 if we failed to find an open row. The caller
+ * is responsible for calling unlockRow() with this row index when it's done with it.
+ */
+ int lockRow(const SkBitmap& data);
+ void unlockRow(int row);
+
+ /**
+ * These functions help turn an integer row index in [0, 1, 2, ... numRows] into a scalar y
+ * texture coordinate in [0, 1] that we can use in a shader.
+ *
+ * If a regular texture access without using the atlas looks like:
+ *
+ * texture2D(sampler, vec2(x, y))
+ *
+ * Then when using the atlas we'd replace it with:
+ *
+ * texture2D(sampler, vec2(x, yOffset + y * scaleFactor))
+ *
+ * Where yOffset, returned by getYOffset(), is the offset to the start of the row within the
+ * atlas and scaleFactor, returned by getNormalizedTexelHeight, is the normalized height of
+ * one texel row.
+ */
+ SkScalar getYOffset(int row) const { return SkIntToScalar(row) / fNumRows; }
+ SkScalar getNormalizedTexelHeight() const { return fNormalizedYHeight; }
+
+ GrContext* getContext() const { return fDesc.fContext; }
+ GrTexture* getTexture() const { return fTexture; }
+
+private:
+
+ // Key to indicate an atlas row without any meaningful data stored in it
+ const static uint32_t kEmptyAtlasRowKey = 0xffffffff;
+
+ /**
+ * The state of a single row in our cache, next/prev pointers allow these to be chained
+ * together to represent LRU status
+ */
+ struct AtlasRow : SkNoncopyable {
+ AtlasRow() : fKey(kEmptyAtlasRowKey), fLocks(0), fNext(nullptr), fPrev(nullptr) { }
+ // GenerationID of the bitmap that is represented by this row, 0xffffffff means "empty"
+ uint32_t fKey;
+ // How many times this has been locked (0 == unlocked)
+ int32_t fLocks;
+ // We maintain an LRU linked list between unlocked nodes with these pointers
+ AtlasRow* fNext;
+ AtlasRow* fPrev;
+ };
+
+ /**
+ * We'll only allow construction via the static GrTextureStripAtlas::GetAtlas
+ */
+ GrTextureStripAtlas(Desc desc);
+
+ void lockTexture();
+ void unlockTexture();
+
+ /**
+ * Initialize our LRU list (if one already exists, clear it and start anew)
+ */
+ void initLRU();
+
+ /**
+ * Grabs the least recently used free row out of the LRU list, returns nullptr if no rows are free.
+ */
+ AtlasRow* getLRU();
+
+ void appendLRU(AtlasRow* row);
+ void removeFromLRU(AtlasRow* row);
+
+ /**
+ * Searches the key table for a key and returns the index if found; if not found, it returns
+ * the bitwise not of the index at which we could insert the key to maintain a sorted list.
+ **/
+ int searchByKey(uint32_t key);
+
+ /**
+ * Compare two atlas rows by key, so we can sort/search by key
+ */
+ static bool KeyLess(const AtlasRow& lhs, const AtlasRow& rhs) {
+ return lhs.fKey < rhs.fKey;
+ }
+
+#ifdef SK_DEBUG
+ void validate();
+#endif
+
+ /**
+ * Clean up callback registered with GrContext. Allows this class to
+ * free up any allocated AtlasEntry and GrTextureStripAtlas objects
+ */
+ static void CleanUp(const GrContext* context, void* info);
+
+ // Hash table entry for atlases
+ class AtlasEntry : public ::SkNoncopyable {
+ public:
+ // for SkTDynamicHash
+ static const Desc& GetKey(const AtlasEntry& entry) { return entry.fDesc; }
+ static uint32_t Hash(const Desc& desc) { return SkOpts::hash(&desc, sizeof(Desc)); }
+
+ // AtlasEntry proper
+ AtlasEntry() : fAtlas(nullptr) {}
+ ~AtlasEntry() { delete fAtlas; }
+ Desc fDesc;
+ GrTextureStripAtlas* fAtlas;
+ };
+
+ class Hash;
+ static Hash* gAtlasCache;
+
+ static Hash* GetCache();
+
+ // We increment gCacheCount for each atlas
+ static int32_t gCacheCount;
+
+ // A unique ID for this texture (formed with: gCacheCount++), so we can be sure that if we
+ // get a texture back from the texture cache, that it's the same one we last used.
+ const int32_t fCacheKey;
+
+ // Total locks on all rows (when this reaches zero, we can unlock our texture)
+ int32_t fLockedRows;
+
+ const Desc fDesc;
+ const uint16_t fNumRows;
+ GrTexture* fTexture;
+
+ SkScalar fNormalizedYHeight;
+
+ // Array of AtlasRows which store the state of all our rows. Stored in a contiguous array, in
+ // order that they appear in our texture, this means we can subtract this pointer from a row
+ // pointer to get its index in the texture, and can save storing a row number in AtlasRow.
+ AtlasRow* fRows;
+
+ // Head and tail for linked list of least-recently-used rows (front = least recently used).
+ // Note that when a texture is locked, it gets removed from this list until it is unlocked.
+ AtlasRow* fLRUFront;
+ AtlasRow* fLRUBack;
+
+ // A list of pointers to AtlasRows that currently contain cached images, sorted by key
+ SkTDArray<AtlasRow*> fKeyTable;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkAtomics.h b/gfx/skia/skia/include/private/SkAtomics.h
new file mode 100644
index 000000000..1e26df86e
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkAtomics.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAtomics_DEFINED
+#define SkAtomics_DEFINED
+
+// This file is not part of the public Skia API.
+#include "SkTypes.h"
+#include <atomic>
+
+// ~~~~~~~~ APIs ~~~~~~~~~
+
+enum sk_memory_order {
+ sk_memory_order_relaxed,
+ sk_memory_order_consume,
+ sk_memory_order_acquire,
+ sk_memory_order_release,
+ sk_memory_order_acq_rel,
+ sk_memory_order_seq_cst,
+};
+
+template <typename T>
+T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst);
+
+template <typename T>
+void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst);
+
+template <typename T>
+T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst);
+
+template <typename T>
+T sk_atomic_fetch_sub(T*, T, sk_memory_order = sk_memory_order_seq_cst);
+
+template <typename T>
+bool sk_atomic_compare_exchange(T*, T* expected, T desired,
+ sk_memory_order success = sk_memory_order_seq_cst,
+ sk_memory_order failure = sk_memory_order_seq_cst);
+
+template <typename T>
+T sk_atomic_exchange(T*, T, sk_memory_order = sk_memory_order_seq_cst);
+
+// A little wrapper class for small T (think, builtins: int, float, void*) to
+// ensure they're always used atomically. This is our stand-in for std::atomic<T>.
+// !!! Please _really_ know what you're doing if you change default_memory_order. !!!
+template <typename T, sk_memory_order default_memory_order = sk_memory_order_seq_cst>
+class SkAtomic : SkNoncopyable {
+public:
+ SkAtomic() {}
+ explicit SkAtomic(const T& val) : fVal(val) {}
+
+ // It is essential we return by value rather than by const&. fVal may change at any time.
+ T load(sk_memory_order mo = default_memory_order) const {
+ return sk_atomic_load(&fVal, mo);
+ }
+
+ void store(const T& val, sk_memory_order mo = default_memory_order) {
+ sk_atomic_store(&fVal, val, mo);
+ }
+
+ // Alias for .load(default_memory_order).
+ MOZ_IMPLICIT operator T() const {
+ return this->load();
+ }
+
+ // Alias for .store(v, default_memory_order).
+ T operator=(const T& v) {
+ this->store(v);
+ return v;
+ }
+
+ T fetch_add(const T& val, sk_memory_order mo = default_memory_order) {
+ return sk_atomic_fetch_add(&fVal, val, mo);
+ }
+
+ T fetch_sub(const T& val, sk_memory_order mo = default_memory_order) {
+ return sk_atomic_fetch_sub(&fVal, val, mo);
+ }
+
+ bool compare_exchange(T* expected, const T& desired,
+ sk_memory_order success = default_memory_order,
+ sk_memory_order failure = default_memory_order) {
+ return sk_atomic_compare_exchange(&fVal, expected, desired, success, failure);
+ }
+private:
+ T fVal;
+};
+
+// ~~~~~~~~ Implementations ~~~~~~~~~
+
+template <typename T>
+T sk_atomic_load(const T* ptr, sk_memory_order mo) {
+ SkASSERT(mo == sk_memory_order_relaxed ||
+ mo == sk_memory_order_seq_cst ||
+ mo == sk_memory_order_acquire ||
+ mo == sk_memory_order_consume);
+ const std::atomic<T>* ap = reinterpret_cast<const std::atomic<T>*>(ptr);
+ return std::atomic_load_explicit(ap, (std::memory_order)mo);
+}
+
+template <typename T>
+void sk_atomic_store(T* ptr, T val, sk_memory_order mo) {
+ SkASSERT(mo == sk_memory_order_relaxed ||
+ mo == sk_memory_order_seq_cst ||
+ mo == sk_memory_order_release);
+ std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
+ return std::atomic_store_explicit(ap, val, (std::memory_order)mo);
+}
+
+template <typename T>
+T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order mo) {
+ // All values of mo are valid.
+ std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
+ return std::atomic_fetch_add_explicit(ap, val, (std::memory_order)mo);
+}
+
+template <typename T>
+T sk_atomic_fetch_sub(T* ptr, T val, sk_memory_order mo) {
+ // All values of mo are valid.
+ std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
+ return std::atomic_fetch_sub_explicit(ap, val, (std::memory_order)mo);
+}
+
+template <typename T>
+bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired,
+ sk_memory_order success,
+ sk_memory_order failure) {
+ // All values of success are valid.
+ SkASSERT(failure == sk_memory_order_relaxed ||
+ failure == sk_memory_order_seq_cst ||
+ failure == sk_memory_order_acquire ||
+ failure == sk_memory_order_consume);
+ SkASSERT(failure <= success);
+ std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
+ return std::atomic_compare_exchange_strong_explicit(ap, expected, desired,
+ (std::memory_order)success,
+ (std::memory_order)failure);
+}
+
+template <typename T>
+T sk_atomic_exchange(T* ptr, T val, sk_memory_order mo) {
+ // All values of mo are valid.
+ std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
+ return std::atomic_exchange_explicit(ap, val, (std::memory_order)mo);
+}
+
+// ~~~~~~~~ Legacy APIs ~~~~~~~~~
+
+// From here down we have shims for our old atomics API, to be weaned off of.
+// We use the default sequentially-consistent memory order to make things simple
+// and to match the practical reality of our old _sync and _win implementations.
+
+inline int32_t sk_atomic_inc(int32_t* ptr) { return sk_atomic_fetch_add(ptr, +1); }
+inline int32_t sk_atomic_dec(int32_t* ptr) { return sk_atomic_fetch_add(ptr, -1); }
+
+#endif//SkAtomics_DEFINED
diff --git a/gfx/skia/skia/include/private/SkBitmaskEnum.h b/gfx/skia/skia/include/private/SkBitmaskEnum.h
new file mode 100644
index 000000000..f787d3b04
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkBitmaskEnum.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkEnumOperators_DEFINED
+#define SkEnumOperators_DEFINED
+
+#include "SkTLogic.h"
+
+namespace skstd {
+template <typename T> struct is_bitmask_enum : std::false_type {};
+}
+
+template <typename E> SK_WHEN(skstd::is_bitmask_enum<E>::value, E) operator|(E l, E r) {
+ using U = skstd::underlying_type_t<E>;
+ return static_cast<E>(static_cast<U>(l) | static_cast<U>(r));
+}
+
+template <typename E> SK_WHEN(skstd::is_bitmask_enum<E>::value, E&) operator|=(E& l, E r) {
+ return l = l | r;
+}
+
+template <typename E> SK_WHEN(skstd::is_bitmask_enum<E>::value, E) operator&(E l, E r) {
+ using U = skstd::underlying_type_t<E>;
+ return static_cast<E>(static_cast<U>(l) & static_cast<U>(r));
+}
+
+template <typename E> SK_WHEN(skstd::is_bitmask_enum<E>::value, E&) operator&=(E& l, E r) {
+ return l = l & r;
+}
+
+#endif // SkEnumOperators_DEFINED
diff --git a/gfx/skia/skia/include/private/SkChecksum.h b/gfx/skia/skia/include/private/SkChecksum.h
new file mode 100644
index 000000000..8a04c89ae
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkChecksum.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkChecksum_DEFINED
+#define SkChecksum_DEFINED
+
+#include "SkString.h"
+#include "SkTLogic.h"
+#include "SkTypes.h"
+
+// #include "SkOpts.h"
+// It's sort of pesky to be able to include SkOpts.h here, so we'll just re-declare what we need.
+namespace SkOpts {
+ extern uint32_t (*hash_fn)(const void*, size_t, uint32_t);
+}
+
+class SkChecksum : SkNoncopyable {
+public:
+ /**
+ * uint32_t -> uint32_t hash, useful for when you're about to trucate this hash but you
+ * suspect its low bits aren't well mixed.
+ *
+ * This is the Murmur3 finalizer.
+ */
+ static uint32_t Mix(uint32_t hash) {
+ hash ^= hash >> 16;
+ hash *= 0x85ebca6b;
+ hash ^= hash >> 13;
+ hash *= 0xc2b2ae35;
+ hash ^= hash >> 16;
+ return hash;
+ }
+
+ /**
+ * uint32_t -> uint32_t hash, useful for when you're about to trucate this hash but you
+ * suspect its low bits aren't well mixed.
+ *
+ * This version is 2-lines cheaper than Mix, but seems to be sufficient for the font cache.
+ */
+ static uint32_t CheapMix(uint32_t hash) {
+ hash ^= hash >> 16;
+ hash *= 0x85ebca6b;
+ hash ^= hash >> 16;
+ return hash;
+ }
+};
+
+// SkGoodHash should usually be your first choice in hashing data.
+// It should be both reasonably fast and high quality.
+struct SkGoodHash {
+ template <typename K>
+ SK_WHEN(sizeof(K) == 4, uint32_t) operator()(const K& k) const {
+ return SkChecksum::Mix(*(const uint32_t*)&k);
+ }
+
+ template <typename K>
+ SK_WHEN(sizeof(K) != 4, uint32_t) operator()(const K& k) const {
+ return SkOpts::hash_fn(&k, sizeof(K), 0);
+ }
+
+ uint32_t operator()(const SkString& k) const {
+ return SkOpts::hash_fn(k.c_str(), k.size(), 0);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkFixed.h b/gfx/skia/skia/include/private/SkFixed.h
new file mode 100644
index 000000000..be3bb5d60
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkFixed.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFixed_DEFINED
+#define SkFixed_DEFINED
+
+#include "SkScalar.h"
+#include "math.h"
+
+#include "SkTypes.h"
+
+/** \file SkFixed.h
+
+ Types and macros for 16.16 fixed point
+*/
+
+/** 32 bit signed integer used to represent fractions values with 16 bits to the right of the decimal point
+*/
+typedef int32_t SkFixed;
+#define SK_Fixed1 (1 << 16)
+#define SK_FixedHalf (1 << 15)
+#define SK_FixedMax (0x7FFFFFFF)
+#define SK_FixedMin (-SK_FixedMax)
+#define SK_FixedPI (0x3243F)
+#define SK_FixedSqrt2 (92682)
+#define SK_FixedTanPIOver8 (0x6A0A)
+#define SK_FixedRoot2Over2 (0xB505)
+
+#define SkFixedToFloat(x) ((x) * 1.52587890625e-5f)
+#define SkFloatToFixed(x) ((SkFixed)((x) * SK_Fixed1))
+
+#ifdef SK_DEBUG
+ static inline SkFixed SkFloatToFixed_Check(float x) {
+ int64_t n64 = (int64_t)(x * SK_Fixed1);
+ SkFixed n32 = (SkFixed)n64;
+ SkASSERT(n64 == n32);
+ return n32;
+ }
+#else
+ #define SkFloatToFixed_Check(x) SkFloatToFixed(x)
+#endif
+
+#define SkFixedToDouble(x) ((x) * 1.52587890625e-5)
+#define SkDoubleToFixed(x) ((SkFixed)((x) * SK_Fixed1))
+
+/** Converts an integer to a SkFixed, asserting that the result does not overflow
+ a 32 bit signed integer
+*/
+#ifdef SK_DEBUG
+ inline SkFixed SkIntToFixed(int n)
+ {
+ SkASSERT(n >= -32768 && n <= 32767);
+ // Left shifting a negative value has undefined behavior in C, so we cast to unsigned before
+ // shifting.
+ return (unsigned)n << 16;
+ }
+#else
+ // Left shifting a negative value has undefined behavior in C, so we cast to unsigned before
+ // shifting. Then we force the cast to SkFixed to ensure that the answer is signed (like the
+ // debug version).
+ #define SkIntToFixed(n) (SkFixed)((unsigned)(n) << 16)
+#endif
+
+#define SkFixedRoundToInt(x) (((x) + SK_FixedHalf) >> 16)
+#define SkFixedCeilToInt(x) (((x) + SK_Fixed1 - 1) >> 16)
+#define SkFixedFloorToInt(x) ((x) >> 16)
+
+#define SkFixedRoundToFixed(x) (((x) + SK_FixedHalf) & 0xFFFF0000)
+#define SkFixedCeilToFixed(x) (((x) + SK_Fixed1 - 1) & 0xFFFF0000)
+#define SkFixedFloorToFixed(x) ((x) & 0xFFFF0000)
+
+#define SkFixedAbs(x) SkAbs32(x)
+#define SkFixedAve(a, b) (((a) + (b)) >> 1)
+
+// The divide may exceed 32 bits. Clamp to a signed 32 bit result.
+#define SkFixedDiv(numer, denom) \
+ SkToS32(SkTPin<int64_t>((SkLeftShift((int64_t)(numer), 16) / (denom)), SK_MinS32, SK_MaxS32))
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////
+// Now look for ASM overrides for our portable versions (should consider putting this in its own file)
+
+inline SkFixed SkFixedMul_longlong(SkFixed a, SkFixed b) {
+ return (SkFixed)((int64_t)a * b >> 16);
+}
+#define SkFixedMul(a,b) SkFixedMul_longlong(a,b)
+
+
+#if defined(SK_CPU_ARM32)
+ /* This guy does not handle NaN or other obscurities, but is faster than
+ than (int)(x*65536). When built on Android with -Os, needs forcing
+ to inline or we lose the speed benefit.
+ */
+ SK_ALWAYS_INLINE SkFixed SkFloatToFixed_arm(float x)
+ {
+ int32_t y, z;
+ asm("movs %1, %3, lsl #1 \n"
+ "mov %2, #0x8E \n"
+ "sub %1, %2, %1, lsr #24 \n"
+ "mov %2, %3, lsl #8 \n"
+ "orr %2, %2, #0x80000000 \n"
+ "mov %1, %2, lsr %1 \n"
+ "it cs \n"
+ "rsbcs %1, %1, #0 \n"
+ : "=r"(x), "=&r"(y), "=&r"(z)
+ : "r"(x)
+ : "cc"
+ );
+ return y;
+ }
+ inline SkFixed SkFixedMul_arm(SkFixed x, SkFixed y)
+ {
+ int32_t t;
+ asm("smull %0, %2, %1, %3 \n"
+ "mov %0, %0, lsr #16 \n"
+ "orr %0, %0, %2, lsl #16 \n"
+ : "=r"(x), "=&r"(y), "=r"(t)
+ : "r"(x), "1"(y)
+ :
+ );
+ return x;
+ }
+ #undef SkFixedMul
+ #define SkFixedMul(x, y) SkFixedMul_arm(x, y)
+
+ #undef SkFloatToFixed
+ #define SkFloatToFixed(x) SkFloatToFixed_arm(x)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if SK_SCALAR_IS_FLOAT
+
+#define SkFixedToScalar(x) SkFixedToFloat(x)
+#define SkScalarToFixed(x) SkFloatToFixed(x)
+
+#else // SK_SCALAR_IS_DOUBLE
+
+#define SkFixedToScalar(x) SkFixedToDouble(x)
+#define SkScalarToFixed(x) SkDoubleToFixed(x)
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef int64_t SkFixed3232; // 32.32
+
+#define SkIntToFixed3232(x) (SkLeftShift((SkFixed3232)(x), 32))
+#define SkFixed3232ToInt(x) ((int)((x) >> 32))
+#define SkFixedToFixed3232(x) (SkLeftShift((SkFixed3232)(x), 16))
+#define SkFixed3232ToFixed(x) ((SkFixed)((x) >> 16))
+#define SkFloatToFixed3232(x) ((SkFixed3232)((x) * (65536.0f * 65536.0f)))
+
+#define SkScalarToFixed3232(x) SkFloatToFixed3232(x)
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkFloatBits.h b/gfx/skia/skia/include/private/SkFloatBits.h
new file mode 100644
index 000000000..7aa13cf67
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkFloatBits.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkFloatBits_DEFINED
+#define SkFloatBits_DEFINED
+
+#include "SkTypes.h"
+#include <math.h>
+
+/** Convert a sign-bit int (i.e. float interpreted as int) into a 2s compliement
+ int. This also converts -0 (0x80000000) to 0. Doing this to a float allows
+ it to be compared using normal C operators (<, <=, etc.)
+*/
+static inline int32_t SkSignBitTo2sCompliment(int32_t x) {
+ if (x < 0) {
+ x &= 0x7FFFFFFF;
+ x = -x;
+ }
+ return x;
+}
+
+/** Convert a 2s compliment int to a sign-bit (i.e. int interpreted as float).
+ This undoes the result of SkSignBitTo2sCompliment().
+ */
+static inline int32_t Sk2sComplimentToSignBit(int32_t x) {
+ int sign = x >> 31;
+ // make x positive
+ x = (x ^ sign) - sign;
+ // set the sign bit as needed
+ x |= SkLeftShift(sign, 31);
+ return x;
+}
+
+union SkFloatIntUnion {
+ float fFloat;
+ int32_t fSignBitInt;
+};
+
+// Helper to see a float as its bit pattern (w/o aliasing warnings)
+static inline int32_t SkFloat2Bits(float x) {
+ SkFloatIntUnion data;
+ data.fFloat = x;
+ return data.fSignBitInt;
+}
+
+// Helper to see a bit pattern as a float (w/o aliasing warnings)
+static inline float SkBits2Float(int32_t floatAsBits) {
+ SkFloatIntUnion data;
+ data.fSignBitInt = floatAsBits;
+ return data.fFloat;
+}
+
+/** Return the float as a 2s compliment int. Just to be used to compare floats
+ to each other or against positive float-bit-constants (like 0). This does
+ not return the int equivalent of the float, just something cheaper for
+ compares-only.
+ */
+static inline int32_t SkFloatAs2sCompliment(float x) {
+ return SkSignBitTo2sCompliment(SkFloat2Bits(x));
+}
+
+/** Return the 2s compliment int as a float. This undos the result of
+ SkFloatAs2sCompliment
+ */
+static inline float Sk2sComplimentAsFloat(int32_t x) {
+ return SkBits2Float(Sk2sComplimentToSignBit(x));
+}
+
+static inline int32_t pin_double_to_int(double x) {
+ return (int32_t)SkTPin<double>(x, SK_MinS32, SK_MaxS32);
+}
+
+/** Return the floor of the float as an int.
+ If the value is out of range, or NaN, return +/- SK_MaxS32
+*/
+static inline int32_t SkFloatToIntFloor(float x) {
+ return pin_double_to_int(floor(x));
+}
+
+/** Return the float rounded to an int.
+ If the value is out of range, or NaN, return +/- SK_MaxS32
+*/
+static inline int32_t SkFloatToIntRound(float x) {
+ return pin_double_to_int(floor((double)x + 0.5));
+}
+
+/** Return the ceiling of the float as an int.
+ If the value is out of range, or NaN, return +/- SK_MaxS32
+*/
+static inline int32_t SkFloatToIntCeil(float x) {
+ return pin_double_to_int(ceil(x));
+}
+
+// Scalar wrappers for float-bit routines
+
+#define SkScalarAs2sCompliment(x) SkFloatAs2sCompliment(x)
+#define Sk2sComplimentAsScalar(x) Sk2sComplimentAsFloat(x)
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkFloatingPoint.h b/gfx/skia/skia/include/private/SkFloatingPoint.h
new file mode 100644
index 000000000..6a6edf365
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkFloatingPoint.h
@@ -0,0 +1,157 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkFloatingPoint_DEFINED
+#define SkFloatingPoint_DEFINED
+
+#include "SkTypes.h"
+
+#include <math.h>
+#include <float.h>
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
+ #include <xmmintrin.h>
+#elif defined(SK_ARM_HAS_NEON)
+ #include <arm_neon.h>
+#endif
+
+// For _POSIX_VERSION
+#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__))
+#include <unistd.h>
+#endif
+
+#include "SkFloatBits.h"
+
+// C++98 cmath std::pow seems to be the earliest portable way to get float pow.
+// However, on Linux including cmath undefines isfinite.
+// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=14608
+static inline float sk_float_pow(float base, float exp) {
+ return powf(base, exp);
+}
+
+#define sk_float_sqrt(x) sqrtf(x)
+#define sk_float_sin(x) sinf(x)
+#define sk_float_cos(x) cosf(x)
+#define sk_float_tan(x) tanf(x)
+#define sk_float_floor(x) floorf(x)
+#define sk_float_ceil(x) ceilf(x)
+#define sk_float_trunc(x) truncf(x)
+#ifdef SK_BUILD_FOR_MAC
+# define sk_float_acos(x) static_cast<float>(acos(x))
+# define sk_float_asin(x) static_cast<float>(asin(x))
+#else
+# define sk_float_acos(x) acosf(x)
+# define sk_float_asin(x) asinf(x)
+#endif
+#define sk_float_atan2(y,x) atan2f(y,x)
+#define sk_float_abs(x) fabsf(x)
+#define sk_float_copysign(x, y) copysignf(x, y)
+#define sk_float_mod(x,y) fmodf(x,y)
+#define sk_float_exp(x) expf(x)
+#define sk_float_log(x) logf(x)
+
+#define sk_float_round(x) sk_float_floor((x) + 0.5f)
+
+// can't find log2f on android, but maybe that just a tool bug?
+#ifdef SK_BUILD_FOR_ANDROID
+ static inline float sk_float_log2(float x) {
+ const double inv_ln_2 = 1.44269504088896;
+ return (float)(log(x) * inv_ln_2);
+ }
+#else
+ #define sk_float_log2(x) log2f(x)
+#endif
+
+#ifdef SK_BUILD_FOR_WIN
+ #define sk_float_isfinite(x) _finite(x)
+ #define sk_float_isnan(x) _isnan(x)
+ static inline int sk_float_isinf(float x) {
+ int32_t bits = SkFloat2Bits(x);
+ return (bits << 1) == (0xFF << 24);
+ }
+#else
+ #define sk_float_isfinite(x) isfinite(x)
+ #define sk_float_isnan(x) isnan(x)
+ #define sk_float_isinf(x) isinf(x)
+#endif
+
+#define sk_double_isnan(a) sk_float_isnan(a)
+
+#ifdef SK_USE_FLOATBITS
+ #define sk_float_floor2int(x) SkFloatToIntFloor(x)
+ #define sk_float_round2int(x) SkFloatToIntRound(x)
+ #define sk_float_ceil2int(x) SkFloatToIntCeil(x)
+#else
+ #define sk_float_floor2int(x) (int)sk_float_floor(x)
+ #define sk_float_round2int(x) (int)sk_float_floor((x) + 0.5f)
+ #define sk_float_ceil2int(x) (int)sk_float_ceil(x)
+#endif
+
+#define sk_double_floor(x) floor(x)
+#define sk_double_round(x) floor((x) + 0.5)
+#define sk_double_ceil(x) ceil(x)
+#define sk_double_floor2int(x) (int)floor(x)
+#define sk_double_round2int(x) (int)floor((x) + 0.5f)
+#define sk_double_ceil2int(x) (int)ceil(x)
+
+static const uint32_t kIEEENotANumber = 0x7fffffff;
+#define SK_FloatNaN (*SkTCast<const float*>(&kIEEENotANumber))
+#define SK_FloatInfinity (+(float)INFINITY)
+#define SK_FloatNegativeInfinity (-(float)INFINITY)
+
+static inline float sk_float_rsqrt_portable(float x) {
+ // Get initial estimate.
+ int i;
+ memcpy(&i, &x, 4);
+ i = 0x5F1FFFF9 - (i>>1);
+ float estimate;
+ memcpy(&estimate, &i, 4);
+
+ // One step of Newton's method to refine.
+ const float estimate_sq = estimate*estimate;
+ estimate *= 0.703952253f*(2.38924456f-x*estimate_sq);
+ return estimate;
+}
+
+// Fast, approximate inverse square root.
+// Compare to name-brand "1.0f / sk_float_sqrt(x)". Should be around 10x faster on SSE, 2x on NEON.
+static inline float sk_float_rsqrt(float x) {
+// We want all this inlined, so we'll inline SIMD and just take the hit when we don't know we've got
+// it at compile time. This is going to be too fast to productively hide behind a function pointer.
+//
+// We do one step of Newton's method to refine the estimates in the NEON and portable paths. No
+// refinement is faster, but very innacurate. Two steps is more accurate, but slower than 1/sqrt.
+//
+// Optimized constants in the portable path courtesy of http://rrrola.wz.cz/inv_sqrt.html
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
+ return _mm_cvtss_f32(_mm_rsqrt_ss(_mm_set_ss(x)));
+#elif defined(SK_ARM_HAS_NEON)
+ // Get initial estimate.
+ const float32x2_t xx = vdup_n_f32(x); // Clever readers will note we're doing everything 2x.
+ float32x2_t estimate = vrsqrte_f32(xx);
+
+ // One step of Newton's method to refine.
+ const float32x2_t estimate_sq = vmul_f32(estimate, estimate);
+ estimate = vmul_f32(estimate, vrsqrts_f32(xx, estimate_sq));
+ return vget_lane_f32(estimate, 0); // 1 will work fine too; the answer's in both places.
+#else
+ return sk_float_rsqrt_portable(x);
+#endif
+}
+
+// This is the number of significant digits we can print in a string such that when we read that
+// string back we get the floating point number we expect. The minimum value C requires is 6, but
+// most compilers support 9
+#ifdef FLT_DECIMAL_DIG
+#define SK_FLT_DECIMAL_DIG FLT_DECIMAL_DIG
+#else
+#define SK_FLT_DECIMAL_DIG 9
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkLeanWindows.h b/gfx/skia/skia/include/private/SkLeanWindows.h
new file mode 100644
index 000000000..2bdddbba3
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkLeanWindows.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkLeanWindows_DEFINED
+#define SkLeanWindows_DEFINED
+
+#include "SkTypes.h"
+
+#ifdef SK_BUILD_FOR_WIN
+# ifndef WIN32_LEAN_AND_MEAN
+# define WIN32_LEAN_AND_MEAN
+# define WIN32_IS_MEAN_WAS_LOCALLY_DEFINED
+# endif
+# ifndef NOMINMAX
+# define NOMINMAX
+# define NOMINMAX_WAS_LOCALLY_DEFINED
+# endif
+#
+# include <windows.h>
+#
+# ifdef WIN32_IS_MEAN_WAS_LOCALLY_DEFINED
+# undef WIN32_IS_MEAN_WAS_LOCALLY_DEFINED
+# undef WIN32_LEAN_AND_MEAN
+# endif
+# ifdef NOMINMAX_WAS_LOCALLY_DEFINED
+# undef NOMINMAX_WAS_LOCALLY_DEFINED
+# undef NOMINMAX
+# endif
+#endif
+
+#endif // SkLeanWindows_DEFINED
diff --git a/gfx/skia/skia/include/private/SkMiniRecorder.h b/gfx/skia/skia/include/private/SkMiniRecorder.h
new file mode 100644
index 000000000..6365ebc65
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkMiniRecorder.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMiniRecorder_DEFINED
+#define SkMiniRecorder_DEFINED
+
+#include "SkRecords.h"
+#include "SkScalar.h"
+#include "SkTypes.h"
+class SkCanvas;
+
+// Records small pictures, but only a limited subset of the canvas API, and may fail.
+class SkMiniRecorder : SkNoncopyable {
+public:
+ SkMiniRecorder();
+ ~SkMiniRecorder();
+
+ // Try to record an op. Returns false on failure.
+ bool drawPath(const SkPath&, const SkPaint&);
+ bool drawRect(const SkRect&, const SkPaint&);
+ bool drawTextBlob(const SkTextBlob*, SkScalar x, SkScalar y, const SkPaint&);
+
+ // Detach anything we've recorded as a picture, resetting this SkMiniRecorder.
+ sk_sp<SkPicture> detachAsPicture(const SkRect& cull);
+
+ // Flush anything we've recorded to the canvas, resetting this SkMiniRecorder.
+ // This is logically the same as but rather more efficient than:
+ // SkAutoTUnref<SkPicture> pic(this->detachAsPicture(SkRect::MakeEmpty()));
+ // pic->playback(canvas);
+ void flushAndReset(SkCanvas*);
+
+private:
+ enum class State {
+ kEmpty,
+ kDrawPath,
+ kDrawRect,
+ kDrawTextBlob,
+ };
+
+ State fState;
+
+ template <size_t A, size_t B>
+ struct Max { static const size_t val = A > B ? A : B; };
+
+ static const size_t kInlineStorage =
+ Max<sizeof(SkRecords::DrawPath),
+ Max<sizeof(SkRecords::DrawRect),
+ sizeof(SkRecords::DrawTextBlob)>::val>::val;
+ SkAlignedSStorage<kInlineStorage> fBuffer;
+};
+
+#endif//SkMiniRecorder_DEFINED
diff --git a/gfx/skia/skia/include/private/SkMutex.h b/gfx/skia/skia/include/private/SkMutex.h
new file mode 100644
index 000000000..7cfdb1132
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkMutex.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMutex_DEFINED
+#define SkMutex_DEFINED
+
+#include "../private/SkSemaphore.h"
+#include "../private/SkThreadID.h"
+#include "SkTypes.h"
+
+#define SK_DECLARE_STATIC_MUTEX(name) static SkBaseMutex name;
+
+class SkBaseMutex {
+public:
+ constexpr SkBaseMutex() = default;
+
+ void acquire() {
+ fSemaphore.wait();
+ SkDEBUGCODE(fOwner = SkGetThreadID();)
+ }
+
+ void release() {
+ this->assertHeld();
+ SkDEBUGCODE(fOwner = kIllegalThreadID;)
+ fSemaphore.signal();
+ }
+
+ void assertHeld() {
+ SkASSERT(fOwner == SkGetThreadID());
+ }
+
+protected:
+ SkBaseSemaphore fSemaphore{1};
+ SkDEBUGCODE(SkThreadID fOwner{kIllegalThreadID};)
+};
+
+class SkMutex : public SkBaseMutex {
+public:
+ using SkBaseMutex::SkBaseMutex;
+ ~SkMutex() { fSemaphore.cleanup(); }
+};
+
+class SkAutoMutexAcquire {
+public:
+ template <typename T>
+ SkAutoMutexAcquire(T* mutex) : fMutex(mutex) {
+ if (mutex) {
+ mutex->acquire();
+ }
+ fRelease = [](void* mutex) { ((T*)mutex)->release(); };
+ }
+
+ template <typename T>
+ SkAutoMutexAcquire(T& mutex) : SkAutoMutexAcquire(&mutex) {}
+
+ ~SkAutoMutexAcquire() { this->release(); }
+
+ void release() {
+ if (fMutex) {
+ fRelease(fMutex);
+ }
+ fMutex = nullptr;
+ }
+
+private:
+ void* fMutex;
+ void (*fRelease)(void*);
+};
+#define SkAutoMutexAcquire(...) SK_REQUIRE_LOCAL_VAR(SkAutoMutexAcquire)
+
+// SkAutoExclusive is a lighter weight version of SkAutoMutexAcquire.
+// It assumes that there is a valid mutex, obviating the null check.
+class SkAutoExclusive {
+public:
+ template <typename T>
+ SkAutoExclusive(T& mutex) : fMutex(&mutex) {
+ mutex.acquire();
+
+ fRelease = [](void* mutex) { ((T*)mutex)->release(); };
+ }
+ ~SkAutoExclusive() { fRelease(fMutex); }
+
+private:
+ void* fMutex;
+ void (*fRelease)(void*);
+};
+#define SkAutoExclusive(...) SK_REQUIRE_LOCAL_VAR(SkAutoExclusive)
+
+#endif//SkMutex_DEFINED
diff --git a/gfx/skia/skia/include/private/SkOnce.h b/gfx/skia/skia/include/private/SkOnce.h
new file mode 100644
index 000000000..65334e3a2
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkOnce.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOnce_DEFINED
+#define SkOnce_DEFINED
+
+#include <atomic>
+#include <utility>
+#include "SkTypes.h"
+
+// SkOnce provides call-once guarantees for Skia, much like std::once_flag/std::call_once().
+//
+// There should be no particularly error-prone gotcha use cases when using SkOnce.
+// It works correctly as a class member, a local, a global, a function-scoped static, whatever.
+
+class SkOnce {
+public:
+ constexpr SkOnce() = default;
+
+ template <typename Fn, typename... Args>
+ void operator()(Fn&& fn, Args&&... args) {
+ auto state = fState.load(std::memory_order_acquire);
+
+ if (state == Done) {
+ return;
+ }
+
+ // If it looks like no one has started calling fn(), try to claim that job.
+ if (state == NotStarted && fState.compare_exchange_strong(state, Claimed,
+ std::memory_order_relaxed)) {
+ // Great! We'll run fn() then notify the other threads by releasing Done into fState.
+ fn(std::forward<Args>(args)...);
+ return fState.store(Done, std::memory_order_release);
+ }
+
+ // Some other thread is calling fn().
+ // We'll just spin here acquiring until it releases Done into fState.
+ while (fState.load(std::memory_order_acquire) != Done) { /*spin*/ }
+ }
+
+private:
+ enum State : uint8_t { NotStarted, Claimed, Done};
+ std::atomic<uint8_t> fState{NotStarted};
+};
+
+#endif // SkOnce_DEFINED
diff --git a/gfx/skia/skia/include/private/SkRecords.h b/gfx/skia/skia/include/private/SkRecords.h
new file mode 100644
index 000000000..05f935b3c
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkRecords.h
@@ -0,0 +1,358 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRecords_DEFINED
+#define SkRecords_DEFINED
+
+#include "SkData.h"
+#include "SkCanvas.h"
+#include "SkDrawable.h"
+#include "SkImageFilter.h"
+#include "SkMatrix.h"
+#include "SkPath.h"
+#include "SkPicture.h"
+#include "SkRect.h"
+#include "SkRRect.h"
+#include "SkRSXform.h"
+#include "SkString.h"
+#include "SkTextBlob.h"
+
+// Windows.h, will pull in all of the GDI defines. GDI #defines
+// DrawText to DrawTextA or DrawTextW, but SkRecord has a struct
+// called DrawText. Since this file does not use GDI, undefing
+// DrawText makes things less confusing.
+#ifdef DrawText
+#undef DrawText
+#endif
+
+namespace SkRecords {
+
+// A list of all the types of canvas calls we can record.
+// Each of these is reified into a struct below.
+//
+// (We're using the macro-of-macro trick here to do several different things with the same list.)
+//
+// We leave this SK_RECORD_TYPES macro defined for use by code that wants to operate on SkRecords
+// types polymorphically. (See SkRecord::Record::{visit,mutate} for an example.)
+//
+// Order doesn't technically matter here, but the compiler can generally generate better code if
+// you keep them semantically grouped, especially the Draws. It's also nice to leave NoOp at 0.
+#define SK_RECORD_TYPES(M) \
+ M(NoOp) \
+ M(Restore) \
+ M(Save) \
+ M(SaveLayer) \
+ M(SetMatrix) \
+ M(Translate) \
+ M(TranslateZ) \
+ M(Concat) \
+ M(ClipPath) \
+ M(ClipRRect) \
+ M(ClipRect) \
+ M(ClipRegion) \
+ M(DrawArc) \
+ M(DrawDrawable) \
+ M(DrawImage) \
+ M(DrawImageLattice) \
+ M(DrawImageRect) \
+ M(DrawImageNine) \
+ M(DrawDRRect) \
+ M(DrawOval) \
+ M(DrawPaint) \
+ M(DrawPath) \
+ M(DrawPatch) \
+ M(DrawPicture) \
+ M(DrawShadowedPicture) \
+ M(DrawPoints) \
+ M(DrawPosText) \
+ M(DrawPosTextH) \
+ M(DrawText) \
+ M(DrawTextOnPath) \
+ M(DrawTextRSXform) \
+ M(DrawRRect) \
+ M(DrawRect) \
+ M(DrawRegion) \
+ M(DrawTextBlob) \
+ M(DrawAtlas) \
+ M(DrawVertices) \
+ M(DrawAnnotation)
+
+// Defines SkRecords::Type, an enum of all record types.
+#define ENUM(T) T##_Type,
+enum Type { SK_RECORD_TYPES(ENUM) };
+#undef ENUM
+
+#define ACT_AS_PTR(ptr) \
+ operator T*() const { return ptr; } \
+ T* operator->() const { return ptr; }
+
+// An Optional doesn't own the pointer's memory, but may need to destroy non-POD data.
+template <typename T>
+class Optional : SkNoncopyable {
+public:
+ Optional() : fPtr(nullptr) {}
+ Optional(T* ptr) : fPtr(ptr) {}
+ Optional(Optional&& o) : fPtr(o.fPtr) {
+ o.fPtr = nullptr;
+ }
+ ~Optional() { if (fPtr) fPtr->~T(); }
+
+ ACT_AS_PTR(fPtr)
+private:
+ T* fPtr;
+};
+
+// Like Optional, but ptr must not be NULL.
+template <typename T>
+class Adopted : SkNoncopyable {
+public:
+ Adopted(T* ptr) : fPtr(ptr) { SkASSERT(fPtr); }
+ Adopted(Adopted* source) {
+ // Transfer ownership from source to this.
+ fPtr = source->fPtr;
+ source->fPtr = NULL;
+ }
+ ~Adopted() { if (fPtr) fPtr->~T(); }
+
+ ACT_AS_PTR(fPtr)
+private:
+ T* fPtr;
+};
+
+// PODArray doesn't own the pointer's memory, and we assume the data is POD.
+template <typename T>
+class PODArray {
+public:
+ PODArray() {}
+ PODArray(T* ptr) : fPtr(ptr) {}
+ // Default copy and assign.
+
+ ACT_AS_PTR(fPtr)
+private:
+ T* fPtr;
+};
+
+#undef ACT_AS_PTR
+
+// SkPath::getBounds() isn't thread safe unless we precache the bounds in a singlethreaded context.
+// SkPath::cheapComputeDirection() is similar.
+// Recording is a convenient time to cache these, or we can delay it to between record and playback.
+struct PreCachedPath : public SkPath {
+ PreCachedPath() {}
+ PreCachedPath(const SkPath& path);
+};
+
+// Like SkPath::getBounds(), SkMatrix::getType() isn't thread safe unless we precache it.
+// This may not cover all SkMatrices used by the picture (e.g. some could be hiding in a shader).
+struct TypedMatrix : public SkMatrix {
+ TypedMatrix() {}
+ TypedMatrix(const SkMatrix& matrix);
+};
+
+enum Tags {
+ kDraw_Tag = 1, // May draw something (usually named DrawFoo).
+ kHasImage_Tag = 2, // Contains an SkImage or SkBitmap.
+ kHasText_Tag = 4, // Contains text.
+ kHasPaint_Tag = 8, // May have an SkPaint field, at least optionally.
+};
+
+// A macro to make it a little easier to define a struct that can be stored in SkRecord.
+#define RECORD(T, tags, ...) \
+struct T { \
+ static const Type kType = T##_Type; \
+ static const int kTags = tags; \
+ __VA_ARGS__; \
+};
+
+RECORD(NoOp, 0);
+RECORD(Restore, 0,
+ SkIRect devBounds;
+ TypedMatrix matrix);
+RECORD(Save, 0);
+
+RECORD(SaveLayer, kHasPaint_Tag,
+ Optional<SkRect> bounds;
+ Optional<SkPaint> paint;
+ sk_sp<const SkImageFilter> backdrop;
+ SkCanvas::SaveLayerFlags saveLayerFlags);
+
+RECORD(SetMatrix, 0,
+ TypedMatrix matrix);
+RECORD(Concat, 0,
+ TypedMatrix matrix);
+
+RECORD(Translate, 0,
+ SkScalar dx;
+ SkScalar dy);
+RECORD(TranslateZ, 0, SkScalar z);
+
+struct ClipOpAndAA {
+ ClipOpAndAA() {}
+ ClipOpAndAA(SkCanvas::ClipOp op, bool aa) : op(op), aa(aa) {}
+ SkCanvas::ClipOp op : 31; // This really only needs to be 3, but there's no win today to do so.
+ unsigned aa : 1; // MSVC won't pack an enum with an bool, so we call this an unsigned.
+};
+static_assert(sizeof(ClipOpAndAA) == 4, "ClipOpAndAASize");
+
+RECORD(ClipPath, 0,
+ SkIRect devBounds;
+ PreCachedPath path;
+ ClipOpAndAA opAA);
+RECORD(ClipRRect, 0,
+ SkIRect devBounds;
+ SkRRect rrect;
+ ClipOpAndAA opAA);
+RECORD(ClipRect, 0,
+ SkIRect devBounds;
+ SkRect rect;
+ ClipOpAndAA opAA);
+RECORD(ClipRegion, 0,
+ SkIRect devBounds;
+ SkRegion region;
+ SkCanvas::ClipOp op);
+
+// While not strictly required, if you have an SkPaint, it's fastest to put it first.
+RECORD(DrawArc, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkRect oval;
+ SkScalar startAngle;
+ SkScalar sweepAngle;
+ unsigned useCenter);
+RECORD(DrawDRRect, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkRRect outer;
+ SkRRect inner);
+RECORD(DrawDrawable, kDraw_Tag,
+ Optional<SkMatrix> matrix;
+ SkRect worstCaseBounds;
+ int32_t index);
+RECORD(DrawImage, kDraw_Tag|kHasImage_Tag|kHasPaint_Tag,
+ Optional<SkPaint> paint;
+ sk_sp<const SkImage> image;
+ SkScalar left;
+ SkScalar top);
+RECORD(DrawImageLattice, kDraw_Tag|kHasImage_Tag|kHasPaint_Tag,
+ Optional<SkPaint> paint;
+ sk_sp<const SkImage> image;
+ int xCount;
+ PODArray<int> xDivs;
+ int yCount;
+ PODArray<int> yDivs;
+ int flagCount;
+ PODArray<SkCanvas::Lattice::Flags> flags;
+ SkIRect src;
+ SkRect dst);
+RECORD(DrawImageRect, kDraw_Tag|kHasImage_Tag|kHasPaint_Tag,
+ Optional<SkPaint> paint;
+ sk_sp<const SkImage> image;
+ Optional<SkRect> src;
+ SkRect dst;
+ SkCanvas::SrcRectConstraint constraint);
+RECORD(DrawImageNine, kDraw_Tag|kHasImage_Tag|kHasPaint_Tag,
+ Optional<SkPaint> paint;
+ sk_sp<const SkImage> image;
+ SkIRect center;
+ SkRect dst);
+RECORD(DrawOval, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkRect oval);
+RECORD(DrawPaint, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint);
+RECORD(DrawPath, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ PreCachedPath path);
+RECORD(DrawPicture, kDraw_Tag|kHasPaint_Tag,
+ Optional<SkPaint> paint;
+ sk_sp<const SkPicture> picture;
+ TypedMatrix matrix);
+RECORD(DrawShadowedPicture, kDraw_Tag|kHasPaint_Tag,
+ Optional<SkPaint> paint;
+ sk_sp<const SkPicture> picture;
+ TypedMatrix matrix;
+ const SkShadowParams& params);
+RECORD(DrawPoints, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkCanvas::PointMode mode;
+ unsigned count;
+ SkPoint* pts);
+RECORD(DrawPosText, kDraw_Tag|kHasText_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ PODArray<char> text;
+ size_t byteLength;
+ PODArray<SkPoint> pos);
+RECORD(DrawPosTextH, kDraw_Tag|kHasText_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ PODArray<char> text;
+ unsigned byteLength;
+ SkScalar y;
+ PODArray<SkScalar> xpos);
+RECORD(DrawRRect, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkRRect rrect);
+RECORD(DrawRect, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkRect rect);
+RECORD(DrawRegion, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkRegion region);
+RECORD(DrawText, kDraw_Tag|kHasText_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ PODArray<char> text;
+ size_t byteLength;
+ SkScalar x;
+ SkScalar y);
+RECORD(DrawTextBlob, kDraw_Tag|kHasText_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ sk_sp<const SkTextBlob> blob;
+ SkScalar x;
+ SkScalar y);
+RECORD(DrawTextOnPath, kDraw_Tag|kHasText_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ PODArray<char> text;
+ size_t byteLength;
+ PreCachedPath path;
+ TypedMatrix matrix);
+RECORD(DrawTextRSXform, kDraw_Tag|kHasText_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ PODArray<char> text;
+ size_t byteLength;
+ PODArray<SkRSXform> xforms;
+ Optional<SkRect> cull);
+RECORD(DrawPatch, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ PODArray<SkPoint> cubics;
+ PODArray<SkColor> colors;
+ PODArray<SkPoint> texCoords;
+ sk_sp<SkXfermode> xmode);
+RECORD(DrawAtlas, kDraw_Tag|kHasImage_Tag|kHasPaint_Tag,
+ Optional<SkPaint> paint;
+ sk_sp<const SkImage> atlas;
+ PODArray<SkRSXform> xforms;
+ PODArray<SkRect> texs;
+ PODArray<SkColor> colors;
+ int count;
+ SkXfermode::Mode mode;
+ Optional<SkRect> cull);
+RECORD(DrawVertices, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkCanvas::VertexMode vmode;
+ int vertexCount;
+ PODArray<SkPoint> vertices;
+ PODArray<SkPoint> texs;
+ PODArray<SkColor> colors;
+ sk_sp<SkXfermode> xmode;
+ PODArray<uint16_t> indices;
+ int indexCount);
+RECORD(DrawAnnotation, 0, // TODO: kDraw_Tag, skia:5548
+ SkRect rect;
+ SkString key;
+ sk_sp<SkData> value);
+#undef RECORD
+
+} // namespace SkRecords
+
+#endif//SkRecords_DEFINED
diff --git a/gfx/skia/skia/include/private/SkSemaphore.h b/gfx/skia/skia/include/private/SkSemaphore.h
new file mode 100644
index 000000000..3da2b99ab
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSemaphore.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSemaphore_DEFINED
+#define SkSemaphore_DEFINED
+
+#include "../private/SkOnce.h"
+#include "SkTypes.h"
+#include <atomic>
+
+class SkBaseSemaphore {
+public:
+ constexpr SkBaseSemaphore(int count = 0)
+ : fCount(count), fOSSemaphore(nullptr) {}
+
+ // Increment the counter n times.
+ // Generally it's better to call signal(n) instead of signal() n times.
+ void signal(int n = 1);
+
+ // Decrement the counter by 1,
+ // then if the counter is <= 0, sleep this thread until the counter is > 0.
+ void wait();
+
+ // SkBaseSemaphore has no destructor. Call this to clean it up.
+ void cleanup();
+
+private:
+ // This implementation follows the general strategy of
+ // 'A Lightweight Semaphore with Partial Spinning'
+ // found here
+ // http://preshing.com/20150316/semaphores-are-surprisingly-versatile/
+ // That article (and entire blog) are very much worth reading.
+ //
+ // We wrap an OS-provided semaphore with a user-space atomic counter that
+ // lets us avoid interacting with the OS semaphore unless strictly required:
+ // moving the count from >0 to <=0 or vice-versa, i.e. sleeping or waking threads.
+ struct OSSemaphore;
+
+ void osSignal(int n);
+ void osWait();
+
+ std::atomic<int> fCount;
+ SkOnce fOSSemaphoreOnce;
+ OSSemaphore* fOSSemaphore;
+};
+
+class SkSemaphore : public SkBaseSemaphore {
+public:
+ using SkBaseSemaphore::SkBaseSemaphore;
+ ~SkSemaphore() { this->cleanup(); }
+};
+
+inline void SkBaseSemaphore::signal(int n) {
+ int prev = fCount.fetch_add(n, std::memory_order_release);
+
+ // We only want to call the OS semaphore when our logical count crosses
+ // from <= 0 to >0 (when we need to wake sleeping threads).
+ //
+ // This is easiest to think about with specific examples of prev and n.
+ // If n == 5 and prev == -3, there are 3 threads sleeping and we signal
+ // SkTMin(-(-3), 5) == 3 times on the OS semaphore, leaving the count at 2.
+ //
+ // If prev >= 0, no threads are waiting, SkTMin(-prev, n) is always <= 0,
+ // so we don't call the OS semaphore, leaving the count at (prev + n).
+ int toSignal = SkTMin(-prev, n);
+ if (toSignal > 0) {
+ this->osSignal(toSignal);
+ }
+}
+
+inline void SkBaseSemaphore::wait() {
+ // Since this fetches the value before the subtract, zero and below means that there are no
+ // resources left, so the thread needs to wait.
+ if (fCount.fetch_sub(1, std::memory_order_acquire) <= 0) {
+ this->osWait();
+ }
+}
+
+#endif//SkSemaphore_DEFINED
diff --git a/gfx/skia/skia/include/private/SkShadowParams.h b/gfx/skia/skia/include/private/SkShadowParams.h
new file mode 100644
index 000000000..3df0a4427
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkShadowParams.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkShadowParams_DEFINED
+#define SkShadowParams_DEFINED
+
+/** \struct SkShadowParams
+
+ This struct holds information needed for drawing shadows.
+
+ fShadowRadius - radius of the shadow blur
+
+ fBiasingConstant - A constant used in variance shadow mapping to directly
+ 0.0 - 1.0 reduce light bleeding. Essentially sets all shadows
+ ~.25 below a certain brightness equal to no light, and does
+ a linear step on the rest. Essentially makes shadows
+ darker and more rounded at higher values.
+
+ fMinVariance - Too low of a variance (near the outer edges of blurry
+ ~512, 1024 shadows) will lead to ugly sharp shadow brightness
+ distortions. This enforces a minimum amount of variance
+ in the calculation to smooth out the outside edges of
+ blurry shadows. However, too high of a value for this will
+ cause all shadows to be lighter by visibly different
+ amounts varying on depth.
+
+ fType - Decides which algorithm to use to draw shadows.
+*/
+struct SkShadowParams {
+ SkScalar fShadowRadius;
+ SkScalar fBiasingConstant;
+ SkScalar fMinVariance;
+
+ enum ShadowType {
+ kNoBlur_ShadowType,
+ kVariance_ShadowType,
+
+ kLast_ShadowType = kVariance_ShadowType
+ };
+ static const int kShadowTypeCount = kLast_ShadowType + 1;
+
+ ShadowType fType;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSpinlock.h b/gfx/skia/skia/include/private/SkSpinlock.h
new file mode 100644
index 000000000..a5d378289
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSpinlock.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSpinlock_DEFINED
+#define SkSpinlock_DEFINED
+
+#include "SkTypes.h"
+#include <atomic>
+
+class SkSpinlock {
+public:
+ constexpr SkSpinlock() = default;
+
+ void acquire() {
+ // To act as a mutex, we need an acquire barrier when we acquire the lock.
+ if (fLocked.exchange(true, std::memory_order_acquire)) {
+ // Lock was contended. Fall back to an out-of-line spin loop.
+ this->contendedAcquire();
+ }
+ }
+
+ void release() {
+ // To act as a mutex, we need a release barrier when we release the lock.
+ fLocked.store(false, std::memory_order_release);
+ }
+
+private:
+ SK_API void contendedAcquire();
+
+ std::atomic<bool> fLocked{false};
+};
+
+#endif//SkSpinlock_DEFINED
diff --git a/gfx/skia/skia/include/private/SkTArray.h b/gfx/skia/skia/include/private/SkTArray.h
new file mode 100644
index 000000000..f7c18be40
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkTArray.h
@@ -0,0 +1,560 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTArray_DEFINED
+#define SkTArray_DEFINED
+
+#include "../private/SkTLogic.h"
+#include "../private/SkTemplates.h"
+#include "SkTypes.h"
+
+#include <new>
+#include <utility>
+
+/** When MEM_COPY is true T will be bit copied when moved.
+ When MEM_COPY is false, T will be copy constructed / destructed.
+ In all cases T will be default-initialized on allocation,
+ and its destructor will be called from this object's destructor.
+*/
+template <typename T, bool MEM_COPY = false> class SkTArray {
+public:
+ /**
+ * Creates an empty array with no initial storage
+ */
+ SkTArray() {
+ fCount = 0;
+ fReserveCount = gMIN_ALLOC_COUNT;
+ fAllocCount = 0;
+ fMemArray = NULL;
+ fPreAllocMemArray = NULL;
+ }
+
+ /**
+ * Creates an empty array that will preallocate space for reserveCount
+ * elements.
+ */
+ explicit SkTArray(int reserveCount) {
+ this->init(0, NULL, reserveCount);
+ }
+
+ /**
+ * Copies one array to another. The new array will be heap allocated.
+ */
+ explicit SkTArray(const SkTArray& that) {
+ this->init(that.fCount, NULL, 0);
+ this->copy(that.fItemArray);
+ }
+ SkTArray(SkTArray&& that) {
+ this->init(that.fCount, NULL, 0);
+ that.move(fMemArray);
+ that.fCount = 0;
+ }
+
+ /**
+ * Creates a SkTArray by copying contents of a standard C array. The new
+ * array will be heap allocated. Be careful not to use this constructor
+ * when you really want the (void*, int) version.
+ */
+ SkTArray(const T* array, int count) {
+ this->init(count, NULL, 0);
+ this->copy(array);
+ }
+
+ /**
+ * assign copy of array to this
+ */
+ SkTArray& operator =(const SkTArray& that) {
+ for (int i = 0; i < fCount; ++i) {
+ fItemArray[i].~T();
+ }
+ fCount = 0;
+ this->checkRealloc(that.count());
+ fCount = that.count();
+ this->copy(that.fItemArray);
+ return *this;
+ }
+ SkTArray& operator =(SkTArray&& that) {
+ for (int i = 0; i < fCount; ++i) {
+ fItemArray[i].~T();
+ }
+ fCount = 0;
+ this->checkRealloc(that.count());
+ fCount = that.count();
+ that.move(fMemArray);
+ that.fCount = 0;
+ return *this;
+ }
+
+ ~SkTArray() {
+ for (int i = 0; i < fCount; ++i) {
+ fItemArray[i].~T();
+ }
+ if (fMemArray != fPreAllocMemArray) {
+ sk_free(fMemArray);
+ }
+ }
+
+ /**
+ * Resets to count() == 0
+ */
+ void reset() { this->pop_back_n(fCount); }
+
+ /**
+ * Resets to count() = n newly constructed T objects.
+ */
+ void reset(int n) {
+ SkASSERT(n >= 0);
+ for (int i = 0; i < fCount; ++i) {
+ fItemArray[i].~T();
+ }
+ // Set fCount to 0 before calling checkRealloc so that no elements are moved.
+ fCount = 0;
+ this->checkRealloc(n);
+ fCount = n;
+ for (int i = 0; i < fCount; ++i) {
+ new (fItemArray + i) T;
+ }
+ }
+
+ /**
+ * Ensures there is enough reserved space for n elements.
+ */
+ void reserve(int n) {
+ if (fCount < n) {
+ this->checkRealloc(n - fCount);
+ }
+ }
+
+ /**
+ * Resets to a copy of a C array.
+ */
+ void reset(const T* array, int count) {
+ for (int i = 0; i < fCount; ++i) {
+ fItemArray[i].~T();
+ }
+ fCount = 0;
+ this->checkRealloc(count);
+ fCount = count;
+ this->copy(array);
+ }
+
+ void removeShuffle(int n) {
+ SkASSERT(n < fCount);
+ int newCount = fCount - 1;
+ fCount = newCount;
+ fItemArray[n].~T();
+ if (n != newCount) {
+ this->move(n, newCount);
+ }
+ }
+
+ /**
+ * Number of elements in the array.
+ */
+ int count() const { return fCount; }
+
+ /**
+ * Is the array empty.
+ */
+ bool empty() const { return !fCount; }
+
+ /**
+ * Adds 1 new default-initialized T value and returns it by reference. Note
+ * the reference only remains valid until the next call that adds or removes
+ * elements.
+ */
+ T& push_back() {
+ void* newT = this->push_back_raw(1);
+ return *new (newT) T;
+ }
+
+ /**
+ * Version of above that uses a copy constructor to initialize the new item
+ */
+ T& push_back(const T& t) {
+ void* newT = this->push_back_raw(1);
+ return *new (newT) T(t);
+ }
+
+ /**
+ * Version of above that uses a move constructor to initialize the new item
+ */
+ T& push_back(T&& t) {
+ void* newT = this->push_back_raw(1);
+ return *new (newT) T(std::move(t));
+ }
+
+ /**
+ * Construct a new T at the back of this array.
+ */
+ template<class... Args> T& emplace_back(Args&&... args) {
+ void* newT = this->push_back_raw(1);
+ return *new (newT) T(std::forward<Args>(args)...);
+ }
+
+ /**
+ * Allocates n more default-initialized T values, and returns the address of
+ * the start of that new range. Note: this address is only valid until the
+ * next API call made on the array that might add or remove elements.
+ */
+ T* push_back_n(int n) {
+ SkASSERT(n >= 0);
+ void* newTs = this->push_back_raw(n);
+ for (int i = 0; i < n; ++i) {
+ new (static_cast<char*>(newTs) + i * sizeof(T)) T;
+ }
+ return static_cast<T*>(newTs);
+ }
+
+ /**
+ * Version of above that uses a copy constructor to initialize all n items
+ * to the same T.
+ */
+ T* push_back_n(int n, const T& t) {
+ SkASSERT(n >= 0);
+ void* newTs = this->push_back_raw(n);
+ for (int i = 0; i < n; ++i) {
+ new (static_cast<char*>(newTs) + i * sizeof(T)) T(t);
+ }
+ return static_cast<T*>(newTs);
+ }
+
+ /**
+ * Version of above that uses a copy constructor to initialize the n items
+ * to separate T values.
+ */
+ T* push_back_n(int n, const T t[]) {
+ SkASSERT(n >= 0);
+ this->checkRealloc(n);
+ for (int i = 0; i < n; ++i) {
+ new (fItemArray + fCount + i) T(t[i]);
+ }
+ fCount += n;
+ return fItemArray + fCount - n;
+ }
+
+ /**
+ * Version of above that uses the move constructor to set n items.
+ */
+ T* move_back_n(int n, T* t) {
+ SkASSERT(n >= 0);
+ this->checkRealloc(n);
+ for (int i = 0; i < n; ++i) {
+ new (fItemArray + fCount + i) T(std::move(t[i]));
+ }
+ fCount += n;
+ return fItemArray + fCount - n;
+ }
+
+ /**
+ * Removes the last element. Not safe to call when count() == 0.
+ */
+ void pop_back() {
+ SkASSERT(fCount > 0);
+ --fCount;
+ fItemArray[fCount].~T();
+ this->checkRealloc(0);
+ }
+
+ /**
+ * Removes the last n elements. Not safe to call when count() < n.
+ */
+ void pop_back_n(int n) {
+ SkASSERT(n >= 0);
+ SkASSERT(fCount >= n);
+ fCount -= n;
+ for (int i = 0; i < n; ++i) {
+ fItemArray[fCount + i].~T();
+ }
+ this->checkRealloc(0);
+ }
+
+ /**
+ * Pushes or pops from the back to resize. Pushes will be default
+ * initialized.
+ */
+ void resize_back(int newCount) {
+ SkASSERT(newCount >= 0);
+
+ if (newCount > fCount) {
+ this->push_back_n(newCount - fCount);
+ } else if (newCount < fCount) {
+ this->pop_back_n(fCount - newCount);
+ }
+ }
+
+ /** Swaps the contents of this array with that array. Does a pointer swap if possible,
+ otherwise copies the T values. */
+ void swap(SkTArray* that) {
+ if (this == that) {
+ return;
+ }
+ if (this->fPreAllocMemArray != this->fItemArray &&
+ that->fPreAllocMemArray != that->fItemArray) {
+ // If neither is using a preallocated array then just swap.
+ SkTSwap(fItemArray, that->fItemArray);
+ SkTSwap(fCount, that->fCount);
+ SkTSwap(fAllocCount, that->fAllocCount);
+ } else {
+ // This could be more optimal...
+ SkTArray copy(std::move(*that));
+ *that = std::move(*this);
+ *this = std::move(copy);
+ }
+ }
+
+ T* begin() {
+ return fItemArray;
+ }
+ const T* begin() const {
+ return fItemArray;
+ }
+ T* end() {
+ return fItemArray ? fItemArray + fCount : NULL;
+ }
+ const T* end() const {
+ return fItemArray ? fItemArray + fCount : NULL;
+ }
+
+ /**
+ * Get the i^th element.
+ */
+ T& operator[] (int i) {
+ SkASSERT(i < fCount);
+ SkASSERT(i >= 0);
+ return fItemArray[i];
+ }
+
+ const T& operator[] (int i) const {
+ SkASSERT(i < fCount);
+ SkASSERT(i >= 0);
+ return fItemArray[i];
+ }
+
+ /**
+ * equivalent to operator[](0)
+ */
+ T& front() { SkASSERT(fCount > 0); return fItemArray[0];}
+
+ const T& front() const { SkASSERT(fCount > 0); return fItemArray[0];}
+
+ /**
+ * equivalent to operator[](count() - 1)
+ */
+ T& back() { SkASSERT(fCount); return fItemArray[fCount - 1];}
+
+ const T& back() const { SkASSERT(fCount > 0); return fItemArray[fCount - 1];}
+
+ /**
+ * equivalent to operator[](count()-1-i)
+ */
+ T& fromBack(int i) {
+ SkASSERT(i >= 0);
+ SkASSERT(i < fCount);
+ return fItemArray[fCount - i - 1];
+ }
+
+ const T& fromBack(int i) const {
+ SkASSERT(i >= 0);
+ SkASSERT(i < fCount);
+ return fItemArray[fCount - i - 1];
+ }
+
+ bool operator==(const SkTArray<T, MEM_COPY>& right) const {
+ int leftCount = this->count();
+ if (leftCount != right.count()) {
+ return false;
+ }
+ for (int index = 0; index < leftCount; ++index) {
+ if (fItemArray[index] != right.fItemArray[index]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool operator!=(const SkTArray<T, MEM_COPY>& right) const {
+ return !(*this == right);
+ }
+
+protected:
+ /**
+ * Creates an empty array that will use the passed storage block until it
+ * is insufficiently large to hold the entire array.
+ */
+ template <int N>
+ SkTArray(SkAlignedSTStorage<N,T>* storage) {
+ this->init(0, storage->get(), N);
+ }
+
+ /**
+ * Copy another array, using preallocated storage if preAllocCount >=
+ * array.count(). Otherwise storage will only be used when array shrinks
+ * to fit.
+ */
+ template <int N>
+ SkTArray(const SkTArray& array, SkAlignedSTStorage<N,T>* storage) {
+ this->init(array.fCount, storage->get(), N);
+ this->copy(array.fItemArray);
+ }
+
+ /**
+ * Copy a C array, using preallocated storage if preAllocCount >=
+ * count. Otherwise storage will only be used when array shrinks
+ * to fit.
+ */
+ template <int N>
+ SkTArray(const T* array, int count, SkAlignedSTStorage<N,T>* storage) {
+ this->init(count, storage->get(), N);
+ this->copy(array);
+ }
+
+ void init(int count, void* preAllocStorage, int preAllocOrReserveCount) {
+ SkASSERT(count >= 0);
+ SkASSERT(preAllocOrReserveCount >= 0);
+ fCount = count;
+ fReserveCount = (preAllocOrReserveCount > 0) ?
+ preAllocOrReserveCount :
+ gMIN_ALLOC_COUNT;
+ fPreAllocMemArray = preAllocStorage;
+ if (fReserveCount >= fCount &&
+ preAllocStorage) {
+ fAllocCount = fReserveCount;
+ fMemArray = preAllocStorage;
+ } else {
+ fAllocCount = SkMax32(fCount, fReserveCount);
+ fMemArray = sk_malloc_throw(fAllocCount * sizeof(T));
+ }
+ }
+
+private:
+ /** In the following move and copy methods, 'dst' is assumed to be uninitialized raw storage.
+ * In the following move methods, 'src' is destroyed leaving behind uninitialized raw storage.
+ */
+ template <bool E = MEM_COPY> SK_WHEN(E, void) copy(const T* src) {
+ sk_careful_memcpy(fMemArray, src, fCount * sizeof(T));
+ }
+ template <bool E = MEM_COPY> SK_WHEN(E, void) move(int dst, int src) {
+ memcpy(&fItemArray[dst], &fItemArray[src], sizeof(T));
+ }
+ template <bool E = MEM_COPY> SK_WHEN(E, void) move(void* dst) {
+ sk_careful_memcpy(dst, fMemArray, fCount * sizeof(T));
+ }
+
+ template <bool E = MEM_COPY> SK_WHEN(!E, void) copy(const T* src) {
+ for (int i = 0; i < fCount; ++i) {
+ new (fItemArray + i) T(src[i]);
+ }
+ }
+ template <bool E = MEM_COPY> SK_WHEN(!E, void) move(int dst, int src) {
+ new (&fItemArray[dst]) T(std::move(fItemArray[src]));
+ fItemArray[src].~T();
+ }
+ template <bool E = MEM_COPY> SK_WHEN(!E, void) move(void* dst) {
+ for (int i = 0; i < fCount; ++i) {
+ new (static_cast<char*>(dst) + sizeof(T) * i) T(std::move(fItemArray[i]));
+ fItemArray[i].~T();
+ }
+ }
+
+ static const int gMIN_ALLOC_COUNT = 8;
+
+ // Helper function that makes space for n objects, adjusts the count, but does not initialize
+ // the new objects.
+ void* push_back_raw(int n) {
+ this->checkRealloc(n);
+ void* ptr = fItemArray + fCount;
+ fCount += n;
+ return ptr;
+ }
+
+ inline void checkRealloc(int delta) {
+ SkASSERT(fCount >= 0);
+ SkASSERT(fAllocCount >= 0);
+
+ SkASSERT(-delta <= fCount);
+
+ int newCount = fCount + delta;
+ int newAllocCount = fAllocCount;
+
+ if (newCount > fAllocCount || newCount < (fAllocCount / 3)) {
+ // whether we're growing or shrinking, we leave at least 50% extra space for future
+ // growth (clamped to the reserve count).
+ newAllocCount = SkMax32(newCount + ((newCount + 1) >> 1), fReserveCount);
+ }
+ if (newAllocCount != fAllocCount) {
+
+ fAllocCount = newAllocCount;
+ void* newMemArray;
+
+ if (fAllocCount == fReserveCount && fPreAllocMemArray) {
+ newMemArray = fPreAllocMemArray;
+ } else {
+ newMemArray = sk_malloc_throw(fAllocCount*sizeof(T));
+ }
+
+ this->move(newMemArray);
+
+ if (fMemArray != fPreAllocMemArray) {
+ sk_free(fMemArray);
+ }
+ fMemArray = newMemArray;
+ }
+ }
+
+ int fReserveCount;
+ int fCount;
+ int fAllocCount;
+ void* fPreAllocMemArray;
+ union {
+ T* fItemArray;
+ void* fMemArray;
+ };
+};
+
+/**
+ * Subclass of SkTArray that contains a preallocated memory block for the array.
+ */
+template <int N, typename T, bool MEM_COPY = false>
+class SkSTArray : public SkTArray<T, MEM_COPY> {
+private:
+ typedef SkTArray<T, MEM_COPY> INHERITED;
+
+public:
+ SkSTArray() : INHERITED(&fStorage) {
+ }
+
+ SkSTArray(const SkSTArray& array)
+ : INHERITED(array, &fStorage) {
+ }
+
+ explicit SkSTArray(const INHERITED& array)
+ : INHERITED(array, &fStorage) {
+ }
+
+ explicit SkSTArray(int reserveCount)
+ : INHERITED(reserveCount) {
+ }
+
+ SkSTArray(const T* array, int count)
+ : INHERITED(array, count, &fStorage) {
+ }
+
+ SkSTArray& operator= (const SkSTArray& array) {
+ return *this = *(const INHERITED*)&array;
+ }
+
+ SkSTArray& operator= (const INHERITED& array) {
+ INHERITED::operator=(array);
+ return *this;
+ }
+
+private:
+ SkAlignedSTStorage<N,T> fStorage;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkTDArray.h b/gfx/skia/skia/include/private/SkTDArray.h
new file mode 100644
index 000000000..f71d35700
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkTDArray.h
@@ -0,0 +1,381 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTDArray_DEFINED
+#define SkTDArray_DEFINED
+
+#include "SkTypes.h"
+
+template <typename T> class SkTDArray {
+public:
+ SkTDArray() : fArray(nullptr), fReserve(0), fCount(0) {}
+ SkTDArray(const T src[], int count) {
+ SkASSERT(src || count == 0);
+
+ fReserve = fCount = 0;
+ fArray = NULL;
+ if (count) {
+ fArray = (T*)sk_malloc_throw(count * sizeof(T));
+ memcpy(fArray, src, sizeof(T) * count);
+ fReserve = fCount = count;
+ }
+ }
+ SkTDArray(const SkTDArray<T>& src) : fArray(nullptr), fReserve(0), fCount(0) {
+ SkTDArray<T> tmp(src.fArray, src.fCount);
+ this->swap(tmp);
+ }
+ SkTDArray(SkTDArray<T>&& src) : fArray(nullptr), fReserve(0), fCount(0) {
+ this->swap(src);
+ }
+ ~SkTDArray() {
+ sk_free(fArray);
+ }
+
+ SkTDArray<T>& operator=(const SkTDArray<T>& src) {
+ if (this != &src) {
+ if (src.fCount > fReserve) {
+ SkTDArray<T> tmp(src.fArray, src.fCount);
+ this->swap(tmp);
+ } else {
+ sk_careful_memcpy(fArray, src.fArray, sizeof(T) * src.fCount);
+ fCount = src.fCount;
+ }
+ }
+ return *this;
+ }
+ SkTDArray<T>& operator=(SkTDArray<T>&& src) {
+ if (this != &src) {
+ this->swap(src);
+ src.reset();
+ }
+ return *this;
+ }
+
+ friend bool operator==(const SkTDArray<T>& a, const SkTDArray<T>& b) {
+ return a.fCount == b.fCount &&
+ (a.fCount == 0 ||
+ !memcmp(a.fArray, b.fArray, a.fCount * sizeof(T)));
+ }
+ friend bool operator!=(const SkTDArray<T>& a, const SkTDArray<T>& b) {
+ return !(a == b);
+ }
+
+ void swap(SkTDArray<T>& other) {
+ SkTSwap(fArray, other.fArray);
+ SkTSwap(fReserve, other.fReserve);
+ SkTSwap(fCount, other.fCount);
+ }
+
+ /** Return a ptr to the array of data, to be freed with sk_free. This also
+ resets the SkTDArray to be empty.
+ */
+ T* release() {
+ T* array = fArray;
+ fArray = NULL;
+ fReserve = fCount = 0;
+ return array;
+ }
+
+ bool isEmpty() const { return fCount == 0; }
+
+ /**
+ * Return the number of elements in the array
+ */
+ int count() const { return fCount; }
+
+ /**
+ * Return the total number of elements allocated.
+ * reserved() - count() gives you the number of elements you can add
+ * without causing an allocation.
+ */
+ int reserved() const { return fReserve; }
+
+ /**
+ * return the number of bytes in the array: count * sizeof(T)
+ */
+ size_t bytes() const { return fCount * sizeof(T); }
+
+ T* begin() { return fArray; }
+ const T* begin() const { return fArray; }
+ T* end() { return fArray ? fArray + fCount : NULL; }
+ const T* end() const { return fArray ? fArray + fCount : NULL; }
+
+ T& operator[](int index) {
+ SkASSERT(index < fCount);
+ return fArray[index];
+ }
+ const T& operator[](int index) const {
+ SkASSERT(index < fCount);
+ return fArray[index];
+ }
+
+ T& getAt(int index) {
+ return (*this)[index];
+ }
+ const T& getAt(int index) const {
+ return (*this)[index];
+ }
+
+ void reset() {
+ if (fArray) {
+ sk_free(fArray);
+ fArray = NULL;
+ fReserve = fCount = 0;
+ } else {
+ SkASSERT(fReserve == 0 && fCount == 0);
+ }
+ }
+
+ void rewind() {
+ // same as setCount(0)
+ fCount = 0;
+ }
+
+ /**
+ * Sets the number of elements in the array.
+ * If the array does not have space for count elements, it will increase
+ * the storage allocated to some amount greater than that required.
+ * It will never shrink the storage.
+ */
+ void setCount(int count) {
+ SkASSERT(count >= 0);
+ if (count > fReserve) {
+ this->resizeStorageToAtLeast(count);
+ }
+ fCount = count;
+ }
+
+ void setReserve(int reserve) {
+ if (reserve > fReserve) {
+ this->resizeStorageToAtLeast(reserve);
+ }
+ }
+
+ T* prepend() {
+ this->adjustCount(1);
+ memmove(fArray + 1, fArray, (fCount - 1) * sizeof(T));
+ return fArray;
+ }
+
+ T* append() {
+ return this->append(1, NULL);
+ }
+ T* append(int count, const T* src = NULL) {
+ int oldCount = fCount;
+ if (count) {
+ SkASSERT(src == NULL || fArray == NULL ||
+ src + count <= fArray || fArray + oldCount <= src);
+
+ this->adjustCount(count);
+ if (src) {
+ memcpy(fArray + oldCount, src, sizeof(T) * count);
+ }
+ }
+ return fArray + oldCount;
+ }
+
+ T* appendClear() {
+ T* result = this->append();
+ *result = 0;
+ return result;
+ }
+
+ T* insert(int index) {
+ return this->insert(index, 1, NULL);
+ }
+ T* insert(int index, int count, const T* src = NULL) {
+ SkASSERT(count);
+ SkASSERT(index <= fCount);
+ size_t oldCount = fCount;
+ this->adjustCount(count);
+ T* dst = fArray + index;
+ memmove(dst + count, dst, sizeof(T) * (oldCount - index));
+ if (src) {
+ memcpy(dst, src, sizeof(T) * count);
+ }
+ return dst;
+ }
+
+ void remove(int index, int count = 1) {
+ SkASSERT(index + count <= fCount);
+ fCount = fCount - count;
+ memmove(fArray + index, fArray + index + count, sizeof(T) * (fCount - index));
+ }
+
+ void removeShuffle(int index) {
+ SkASSERT(index < fCount);
+ int newCount = fCount - 1;
+ fCount = newCount;
+ if (index != newCount) {
+ memcpy(fArray + index, fArray + newCount, sizeof(T));
+ }
+ }
+
+ template <typename S> int select(S&& selector) const {
+ const T* iter = fArray;
+ const T* stop = fArray + fCount;
+
+ for (; iter < stop; iter++) {
+ if (selector(*iter)) {
+ return SkToInt(iter - fArray);
+ }
+ }
+ return -1;
+ }
+
+ int find(const T& elem) const {
+ const T* iter = fArray;
+ const T* stop = fArray + fCount;
+
+ for (; iter < stop; iter++) {
+ if (*iter == elem) {
+ return SkToInt(iter - fArray);
+ }
+ }
+ return -1;
+ }
+
+ int rfind(const T& elem) const {
+ const T* iter = fArray + fCount;
+ const T* stop = fArray;
+
+ while (iter > stop) {
+ if (*--iter == elem) {
+ return SkToInt(iter - stop);
+ }
+ }
+ return -1;
+ }
+
+ /**
+ * Returns true iff the array contains this element.
+ */
+ bool contains(const T& elem) const {
+ return (this->find(elem) >= 0);
+ }
+
+ /**
+ * Copies up to max elements into dst. The number of items copied is
+ * capped by count - index. The actual number copied is returned.
+ */
+ int copyRange(T* dst, int index, int max) const {
+ SkASSERT(max >= 0);
+ SkASSERT(!max || dst);
+ if (index >= fCount) {
+ return 0;
+ }
+ int count = SkMin32(max, fCount - index);
+ memcpy(dst, fArray + index, sizeof(T) * count);
+ return count;
+ }
+
+ void copy(T* dst) const {
+ this->copyRange(dst, 0, fCount);
+ }
+
+ // routines to treat the array like a stack
+ T* push() { return this->append(); }
+ void push(const T& elem) { *this->append() = elem; }
+ const T& top() const { return (*this)[fCount - 1]; }
+ T& top() { return (*this)[fCount - 1]; }
+ void pop(T* elem) { SkASSERT(fCount > 0); if (elem) *elem = (*this)[fCount - 1]; --fCount; }
+ void pop() { SkASSERT(fCount > 0); --fCount; }
+
+ void deleteAll() {
+ T* iter = fArray;
+ T* stop = fArray + fCount;
+ while (iter < stop) {
+ delete *iter;
+ iter += 1;
+ }
+ this->reset();
+ }
+
+ void freeAll() {
+ T* iter = fArray;
+ T* stop = fArray + fCount;
+ while (iter < stop) {
+ sk_free(*iter);
+ iter += 1;
+ }
+ this->reset();
+ }
+
+ void unrefAll() {
+ T* iter = fArray;
+ T* stop = fArray + fCount;
+ while (iter < stop) {
+ (*iter)->unref();
+ iter += 1;
+ }
+ this->reset();
+ }
+
+ void safeUnrefAll() {
+ T* iter = fArray;
+ T* stop = fArray + fCount;
+ while (iter < stop) {
+ SkSafeUnref(*iter);
+ iter += 1;
+ }
+ this->reset();
+ }
+
+ void visitAll(void visitor(T&)) {
+ T* stop = this->end();
+ for (T* curr = this->begin(); curr < stop; curr++) {
+ if (*curr) {
+ visitor(*curr);
+ }
+ }
+ }
+
+#ifdef SK_DEBUG
+ void validate() const {
+ SkASSERT((fReserve == 0 && fArray == NULL) ||
+ (fReserve > 0 && fArray != NULL));
+ SkASSERT(fCount <= fReserve);
+ }
+#endif
+
+ void shrinkToFit() {
+ fReserve = fCount;
+ fArray = (T*)sk_realloc_throw(fArray, fReserve * sizeof(T));
+ }
+
+private:
+ T* fArray;
+ int fReserve;
+ int fCount;
+
+ /**
+ * Adjusts the number of elements in the array.
+ * This is the same as calling setCount(count() + delta).
+ */
+ void adjustCount(int delta) {
+ this->setCount(fCount + delta);
+ }
+
+ /**
+ * Increase the storage allocation such that it can hold (fCount + extra)
+ * elements.
+ * It never shrinks the allocation, and it may increase the allocation by
+ * more than is strictly required, based on a private growth heuristic.
+ *
+ * note: does NOT modify fCount
+ */
+ void resizeStorageToAtLeast(int count) {
+ SkASSERT(count > fReserve);
+ fReserve = count + 4;
+ fReserve += fReserve / 4;
+ fArray = (T*)sk_realloc_throw(fArray, fReserve * sizeof(T));
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkTDict.h b/gfx/skia/skia/include/private/SkTDict.h
new file mode 100644
index 000000000..106cace2f
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkTDict.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTDict_DEFINED
+#define SkTDict_DEFINED
+
+#include "SkChunkAlloc.h"
+#include "SkTSearch.h"
+#include "SkTDArray.h"
+
+template <typename T> class SkTDict : SkNoncopyable {
+public:
+ SkTDict(size_t minStringAlloc) : fStrings(minStringAlloc) {}
+
+ void reset() {
+ fArray.reset();
+ fStrings.reset();
+ }
+
+ int count() const { return fArray.count(); }
+
+ bool set(const char name[], const T& value) {
+ return set(name, strlen(name), value);
+ }
+
+ bool set(const char name[], size_t len, const T& value) {
+ SkASSERT(name);
+
+ int index = this->find_index(name, len);
+
+ if (index >= 0) {
+ fArray[index].fValue = value;
+ return false;
+ } else {
+ Pair* pair = fArray.insert(~index);
+ char* copy = (char*)fStrings.alloc(len + 1, SkChunkAlloc::kThrow_AllocFailType);
+ memcpy(copy, name, len);
+ copy[len] = '\0';
+ pair->fName = copy;
+ pair->fValue = value;
+ return true;
+ }
+ }
+
+ bool find(const char name[]) const {
+ return this->find_index(name) >= 0;
+ }
+
+ bool find(const char name[], size_t len) const {
+ return this->find_index(name, len) >= 0;
+ }
+
+ bool find(const char name[], T* value) const {
+ return find(name, strlen(name), value);
+ }
+
+ bool find(const char name[], size_t len, T* value) const {
+ int index = this->find_index(name, len);
+
+ if (index >= 0) {
+ if (value) {
+ *value = fArray[index].fValue;
+ }
+ return true;
+ }
+ return false;
+ }
+
+ bool findKey(T& value, const char** name) const {
+ const Pair* end = fArray.end();
+ for (const Pair* pair = fArray.begin(); pair < end; pair++) {
+ if (pair->fValue != value) {
+ continue;
+ }
+ *name = pair->fName;
+ return true;
+ }
+ return false;
+ }
+
+public:
+ struct Pair {
+ const char* fName;
+ T fValue;
+
+ friend int operator<(const Pair& a, const Pair& b) {
+ return strcmp(a.fName, b.fName);
+ }
+
+ friend int operator!=(const Pair& a, const Pair& b) {
+ return strcmp(a.fName, b.fName);
+ }
+ };
+ friend class Iter;
+
+public:
+ class Iter {
+ public:
+ Iter(const SkTDict<T>& dict) {
+ fIter = dict.fArray.begin();
+ fStop = dict.fArray.end();
+ }
+
+ const char* next(T* value) {
+ const char* name = NULL;
+ if (fIter < fStop) {
+ name = fIter->fName;
+ if (value) {
+ *value = fIter->fValue;
+ }
+ fIter += 1;
+ }
+ return name;
+ }
+ private:
+ const Pair* fIter;
+ const Pair* fStop;
+ };
+
+private:
+ SkTDArray<Pair> fArray;
+ SkChunkAlloc fStrings;
+
+ int find_index(const char name[]) const {
+ return find_index(name, strlen(name));
+ }
+
+ int find_index(const char name[], size_t len) const {
+ SkASSERT(name);
+
+ int count = fArray.count();
+ int index = ~0;
+
+ if (count) {
+ index = SkStrSearch(&fArray.begin()->fName, count, name, len, sizeof(Pair));
+ }
+ return index;
+ }
+ friend class Iter;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkTFitsIn.h b/gfx/skia/skia/include/private/SkTFitsIn.h
new file mode 100644
index 000000000..4802aff1e
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkTFitsIn.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTFitsIn_DEFINED
+#define SkTFitsIn_DEFINED
+
+#include "../private/SkTLogic.h"
+#include <limits>
+#include <type_traits>
+
+namespace sktfitsin {
+namespace Private {
+
+/** SkTMux::type = (a && b) ? Both : (a) ? A : (b) ? B : Neither; */
+template <bool a, bool b, typename Both, typename A, typename B, typename Neither>
+struct SkTMux {
+ using type = skstd::conditional_t<a, skstd::conditional_t<b, Both, A>,
+ skstd::conditional_t<b, B, Neither>>;
+};
+
+/** SkTHasMoreDigits = (digits(A) >= digits(B)) ? true_type : false_type. */
+template <typename A, typename B> struct SkTHasMoreDigits
+ : skstd::bool_constant<std::numeric_limits<A>::digits >= std::numeric_limits<B>::digits>
+{ };
+
+/** A high or low side predicate which is used when it is statically known
+ * that source values are in the range of the Destination.
+ */
+template <typename S> struct SkTOutOfRange_False {
+ using can_be_true = std::false_type;
+ using source_type = S;
+ static bool apply(S) {
+ return false;
+ }
+};
+
+/** A low side predicate which tests if the source value < Min(D).
+ * Assumes that Min(S) <= Min(D).
+ */
+template <typename D, typename S> struct SkTOutOfRange_LT_MinD {
+ using can_be_true = std::true_type;
+ using source_type = S;
+ static bool apply(S s) {
+ using precondition = SkTHasMoreDigits<S, D>;
+ static_assert(precondition::value, "minS > minD");
+
+ return s < static_cast<S>((std::numeric_limits<D>::min)());
+ }
+};
+
+/** A low side predicate which tests if the source value is less than 0. */
+template <typename D, typename S> struct SkTOutOfRange_LT_Zero {
+ using can_be_true = std::true_type;
+ using source_type = S;
+ static bool apply(S s) {
+ return s < static_cast<S>(0);
+ }
+};
+
+/** A high side predicate which tests if the source value > Max(D).
+ * Assumes that Max(S) >= Max(D).
+ */
+template <typename D, typename S> struct SkTOutOfRange_GT_MaxD {
+ using can_be_true = std::true_type;
+ using source_type = S;
+ static bool apply(S s) {
+ using precondition = SkTHasMoreDigits<S, D>;
+ static_assert(precondition::value, "maxS < maxD");
+
+ return s > static_cast<S>((std::numeric_limits<D>::max)());
+ }
+};
+
+/** Composes two SkTOutOfRange predicates.
+ * First checks OutOfRange_Low then, if in range, OutOfRange_High.
+ */
+template <typename OutOfRange_Low, typename OutOfRange_High> struct SkTOutOfRange_Either {
+ using can_be_true = std::true_type;
+ using source_type = typename OutOfRange_Low::source_type;
+ static bool apply(source_type s) {
+ bool outOfRange = OutOfRange_Low::apply(s);
+ if (!outOfRange) {
+ outOfRange = OutOfRange_High::apply(s);
+ }
+ return outOfRange;
+ }
+};
+
+/** SkTCombineOutOfRange::type is an SkTOutOfRange_XXX type which is the
+ * optimal combination of OutOfRange_Low and OutOfRange_High.
+ */
+template <typename OutOfRange_Low, typename OutOfRange_High> struct SkTCombineOutOfRange {
+ using Both = SkTOutOfRange_Either<OutOfRange_Low, OutOfRange_High>;
+ using Neither = SkTOutOfRange_False<typename OutOfRange_Low::source_type>;
+
+ using apply_low = typename OutOfRange_Low::can_be_true;
+ using apply_high = typename OutOfRange_High::can_be_true;
+
+ using type = typename SkTMux<apply_low::value, apply_high::value,
+ Both, OutOfRange_Low, OutOfRange_High, Neither>::type;
+};
+
+template <typename D, typename S, typename OutOfRange_Low, typename OutOfRange_High>
+struct SkTRangeChecker {
+ /** This is the method which is called at runtime to do the range check. */
+ static bool OutOfRange(S s) {
+ using Combined = typename SkTCombineOutOfRange<OutOfRange_Low, OutOfRange_High>::type;
+ return Combined::apply(s);
+ }
+};
+
+/** SkTFitsIn_Unsigned2Unsiged::type is an SkTRangeChecker with an OutOfRange(S s) method
+ * the implementation of which is tailored for the source and destination types.
+ * Assumes that S and D are unsigned integer types.
+ */
+template <typename D, typename S> struct SkTFitsIn_Unsigned2Unsiged {
+ using OutOfRange_Low = SkTOutOfRange_False<S>;
+ using OutOfRange_High = SkTOutOfRange_GT_MaxD<D, S>;
+
+ using HighSideOnlyCheck = SkTRangeChecker<D, S, OutOfRange_Low, OutOfRange_High>;
+ using NoCheck = SkTRangeChecker<D, S, SkTOutOfRange_False<S>, SkTOutOfRange_False<S>>;
+
+ // If std::numeric_limits<D>::digits >= std::numeric_limits<S>::digits, nothing to check.
+ // This also protects the precondition of SkTOutOfRange_GT_MaxD.
+ using sourceFitsInDesitination = SkTHasMoreDigits<D, S>;
+ using type = skstd::conditional_t<sourceFitsInDesitination::value, NoCheck, HighSideOnlyCheck>;
+};
+
+/** SkTFitsIn_Signed2Signed::type is an SkTRangeChecker with an OutOfRange(S s) method
+ * the implementation of which is tailored for the source and destination types.
+ * Assumes that S and D are signed integer types.
+ */
+template <typename D, typename S> struct SkTFitsIn_Signed2Signed {
+ using OutOfRange_Low = SkTOutOfRange_LT_MinD<D, S>;
+ using OutOfRange_High = SkTOutOfRange_GT_MaxD<D, S>;
+
+ using FullCheck = SkTRangeChecker<D, S, OutOfRange_Low, OutOfRange_High>;
+ using NoCheck = SkTRangeChecker<D, S, SkTOutOfRange_False<S>, SkTOutOfRange_False<S>>;
+
+ // If std::numeric_limits<D>::digits >= std::numeric_limits<S>::digits, nothing to check.
+ // This also protects the precondition of SkTOutOfRange_LT_MinD and SkTOutOfRange_GT_MaxD.
+ using sourceFitsInDesitination = SkTHasMoreDigits<D, S>;
+ using type = skstd::conditional_t<sourceFitsInDesitination::value, NoCheck, FullCheck>;
+};
+
+/** SkTFitsIn_Signed2Unsigned::type is an SkTRangeChecker with an OutOfRange(S s) method
+ * the implementation of which is tailored for the source and destination types.
+ * Assumes that S is a signed integer type and D is an unsigned integer type.
+ */
+template <typename D, typename S> struct SkTFitsIn_Signed2Unsigned {
+ using OutOfRange_Low = SkTOutOfRange_LT_Zero<D, S>;
+ using OutOfRange_High = SkTOutOfRange_GT_MaxD<D, S>;
+
+ using FullCheck = SkTRangeChecker<D, S, OutOfRange_Low, OutOfRange_High>;
+ using LowSideOnlyCheck = SkTRangeChecker<D, S, OutOfRange_Low, SkTOutOfRange_False<S>>;
+
+ // If std::numeric_limits<D>::max() >= std::numeric_limits<S>::max(),
+ // no need to check the high side. (Until C++11, assume more digits means greater max.)
+ // This also protects the precondition of SkTOutOfRange_GT_MaxD.
+ using sourceCannotExceedDest = SkTHasMoreDigits<D, S>;
+ using type = skstd::conditional_t<sourceCannotExceedDest::value, LowSideOnlyCheck, FullCheck>;
+};
+
+/** SkTFitsIn_Unsigned2Signed::type is an SkTRangeChecker with an OutOfRange(S s) method
+ * the implementation of which is tailored for the source and destination types.
+ * Assumes that S is an usigned integer type and D is a signed integer type.
+ */
+template <typename D, typename S> struct SkTFitsIn_Unsigned2Signed {
+ using OutOfRange_Low = SkTOutOfRange_False<S>;
+ using OutOfRange_High = SkTOutOfRange_GT_MaxD<D, S>;
+
+ using HighSideOnlyCheck = SkTRangeChecker<D, S, OutOfRange_Low, OutOfRange_High>;
+ using NoCheck = SkTRangeChecker<D, S, SkTOutOfRange_False<S>, SkTOutOfRange_False<S>>;
+
+ // If std::numeric_limits<D>::max() >= std::numeric_limits<S>::max(), nothing to check.
+ // (Until C++11, assume more digits means greater max.)
+ // This also protects the precondition of SkTOutOfRange_GT_MaxD.
+ using sourceCannotExceedDest = SkTHasMoreDigits<D, S>;
+ using type = skstd::conditional_t<sourceCannotExceedDest::value, NoCheck, HighSideOnlyCheck>;
+};
+
+/** SkTFitsIn::type is an SkTRangeChecker with an OutOfRange(S s) method
+ * the implementation of which is tailored for the source and destination types.
+ * Assumes that S and D are integer types.
+ */
+template <typename D, typename S> struct SkTFitsIn {
+ // One of the following will be the 'selector' type.
+ using S2S = SkTFitsIn_Signed2Signed<D, S>;
+ using S2U = SkTFitsIn_Signed2Unsigned<D, S>;
+ using U2S = SkTFitsIn_Unsigned2Signed<D, S>;
+ using U2U = SkTFitsIn_Unsigned2Unsiged<D, S>;
+
+ using S_is_signed = skstd::bool_constant<std::numeric_limits<S>::is_signed>;
+ using D_is_signed = skstd::bool_constant<std::numeric_limits<D>::is_signed>;
+
+ using selector = typename SkTMux<S_is_signed::value, D_is_signed::value,
+ S2S, S2U, U2S, U2U>::type;
+ // This type is an SkTRangeChecker.
+ using type = typename selector::type;
+};
+
+template <typename T, bool = std::is_enum<T>::value> struct underlying_type {
+ using type = skstd::underlying_type_t<T>;
+};
+template <typename T> struct underlying_type<T, false> {
+ using type = T;
+};
+
+} // namespace Private
+} // namespace sktfitsin
+
+/** Returns true if the integer source value 's' will fit in the integer destination type 'D'. */
+template <typename D, typename S> inline bool SkTFitsIn(S s) {
+ static_assert(std::is_integral<S>::value || std::is_enum<S>::value, "S must be integral.");
+ static_assert(std::is_integral<D>::value || std::is_enum<D>::value, "D must be integral.");
+
+ using RealS = typename sktfitsin::Private::underlying_type<S>::type;
+ using RealD = typename sktfitsin::Private::underlying_type<D>::type;
+
+ return !sktfitsin::Private::SkTFitsIn<RealD, RealS>::type::OutOfRange(s);
+}
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkTHash.h b/gfx/skia/skia/include/private/SkTHash.h
new file mode 100644
index 000000000..8a644e3b0
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkTHash.h
@@ -0,0 +1,301 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTHash_DEFINED
+#define SkTHash_DEFINED
+
+#include "SkChecksum.h"
+#include "SkTypes.h"
+#include "SkTemplates.h"
+
+// Before trying to use SkTHashTable, look below to see if SkTHashMap or SkTHashSet works for you.
+// They're easier to use, usually perform the same, and have fewer sharp edges.
+
+// T and K are treated as ordinary copyable C++ types.
+// Traits must have:
+// - static K GetKey(T)
+// - static uint32_t Hash(K)
+// If the key is large and stored inside T, you may want to make K a const&.
+// Similarly, if T is large you might want it to be a pointer.
+template <typename T, typename K, typename Traits = T>
+class SkTHashTable : SkNoncopyable {
+public:
+ SkTHashTable() : fCount(0), fRemoved(0), fCapacity(0) {}
+
+ // Clear the table.
+ void reset() {
+ this->~SkTHashTable();
+ new (this) SkTHashTable;
+ }
+
+ // How many entries are in the table?
+ int count() const { return fCount; }
+
+ // Approximately how many bytes of memory do we use beyond sizeof(*this)?
+ size_t approxBytesUsed() const { return fCapacity * sizeof(Slot); }
+
+ // !!!!!!!!!!!!!!!!! CAUTION !!!!!!!!!!!!!!!!!
+ // set(), find() and foreach() all allow mutable access to table entries.
+ // If you change an entry so that it no longer has the same key, all hell
+ // will break loose. Do not do that!
+ //
+ // Please prefer to use SkTHashMap or SkTHashSet, which do not have this danger.
+
+ // The pointers returned by set() and find() are valid only until the next call to set().
+ // The pointers you receive in foreach() are only valid for its duration.
+
+ // Copy val into the hash table, returning a pointer to the copy now in the table.
+ // If there already is an entry in the table with the same key, we overwrite it.
+ T* set(const T& val) {
+ if (4 * (fCount+fRemoved) >= 3 * fCapacity) {
+ this->resize(fCapacity > 0 ? fCapacity * 2 : 4);
+ }
+ return this->uncheckedSet(val);
+ }
+
+ // If there is an entry in the table with this key, return a pointer to it. If not, NULL.
+ T* find(const K& key) const {
+ uint32_t hash = Hash(key);
+ int index = hash & (fCapacity-1);
+ for (int n = 0; n < fCapacity; n++) {
+ Slot& s = fSlots[index];
+ if (s.empty()) {
+ return NULL;
+ }
+ if (!s.removed() && hash == s.hash && key == Traits::GetKey(s.val)) {
+ return &s.val;
+ }
+ index = this->next(index, n);
+ }
+ SkASSERT(fCapacity == 0);
+ return NULL;
+ }
+
+ // Remove the value with this key from the hash table.
+ void remove(const K& key) {
+ SkASSERT(this->find(key));
+
+ uint32_t hash = Hash(key);
+ int index = hash & (fCapacity-1);
+ for (int n = 0; n < fCapacity; n++) {
+ Slot& s = fSlots[index];
+ SkASSERT(!s.empty());
+ if (!s.removed() && hash == s.hash && key == Traits::GetKey(s.val)) {
+ fRemoved++;
+ fCount--;
+ s.markRemoved();
+ return;
+ }
+ index = this->next(index, n);
+ }
+ SkASSERT(fCapacity == 0);
+ }
+
+ // Call fn on every entry in the table. You may mutate the entries, but be very careful.
+ template <typename Fn> // f(T*)
+ void foreach(Fn&& fn) {
+ for (int i = 0; i < fCapacity; i++) {
+ if (!fSlots[i].empty() && !fSlots[i].removed()) {
+ fn(&fSlots[i].val);
+ }
+ }
+ }
+
+ // Call fn on every entry in the table. You may not mutate anything.
+ template <typename Fn> // f(T) or f(const T&)
+ void foreach(Fn&& fn) const {
+ for (int i = 0; i < fCapacity; i++) {
+ if (!fSlots[i].empty() && !fSlots[i].removed()) {
+ fn(fSlots[i].val);
+ }
+ }
+ }
+
+private:
+ T* uncheckedSet(const T& val) {
+ const K& key = Traits::GetKey(val);
+ uint32_t hash = Hash(key);
+ int index = hash & (fCapacity-1);
+ for (int n = 0; n < fCapacity; n++) {
+ Slot& s = fSlots[index];
+ if (s.empty() || s.removed()) {
+ // New entry.
+ if (s.removed()) {
+ fRemoved--;
+ }
+ s.val = val;
+ s.hash = hash;
+ fCount++;
+ return &s.val;
+ }
+ if (hash == s.hash && key == Traits::GetKey(s.val)) {
+ // Overwrite previous entry.
+ // Note: this triggers extra copies when adding the same value repeatedly.
+ s.val = val;
+ return &s.val;
+ }
+ index = this->next(index, n);
+ }
+ SkASSERT(false);
+ return NULL;
+ }
+
+ void resize(int capacity) {
+ int oldCapacity = fCapacity;
+ SkDEBUGCODE(int oldCount = fCount);
+
+ fCount = fRemoved = 0;
+ fCapacity = capacity;
+ SkAutoTArray<Slot> oldSlots(capacity);
+ oldSlots.swap(fSlots);
+
+ for (int i = 0; i < oldCapacity; i++) {
+ const Slot& s = oldSlots[i];
+ if (!s.empty() && !s.removed()) {
+ this->uncheckedSet(s.val);
+ }
+ }
+ SkASSERT(fCount == oldCount);
+ }
+
+ int next(int index, int n) const {
+ // A valid strategy explores all slots in [0, fCapacity) as n walks from 0 to fCapacity-1.
+ // Both of these strategies are valid:
+ //return (index + 0 + 1) & (fCapacity-1); // Linear probing.
+ return (index + n + 1) & (fCapacity-1); // Quadratic probing.
+ }
+
+ static uint32_t Hash(const K& key) {
+ uint32_t hash = Traits::Hash(key);
+ return hash < 2 ? hash+2 : hash; // We reserve hash 0 and 1 to mark empty or removed slots.
+ }
+
+ struct Slot {
+ Slot() : hash(0) {}
+ bool empty() const { return this->hash == 0; }
+ bool removed() const { return this->hash == 1; }
+
+ void markRemoved() { this->hash = 1; }
+
+ T val;
+ uint32_t hash;
+ };
+
+ int fCount, fRemoved, fCapacity;
+ SkAutoTArray<Slot> fSlots;
+};
+
+// Maps K->V. A more user-friendly wrapper around SkTHashTable, suitable for most use cases.
+// K and V are treated as ordinary copyable C++ types, with no assumed relationship between the two.
+template <typename K, typename V, typename HashK = SkGoodHash>
+class SkTHashMap : SkNoncopyable {
+public:
+ SkTHashMap() {}
+
+ // Clear the map.
+ void reset() { fTable.reset(); }
+
+ // How many key/value pairs are in the table?
+ int count() const { return fTable.count(); }
+
+ // Approximately how many bytes of memory do we use beyond sizeof(*this)?
+ size_t approxBytesUsed() const { return fTable.approxBytesUsed(); }
+
+ // N.B. The pointers returned by set() and find() are valid only until the next call to set().
+
+ // Set key to val in the table, replacing any previous value with the same key.
+ // We copy both key and val, and return a pointer to the value copy now in the table.
+ V* set(const K& key, const V& val) {
+ Pair in = { key, val };
+ Pair* out = fTable.set(in);
+ return &out->val;
+ }
+
+ // If there is key/value entry in the table with this key, return a pointer to the value.
+ // If not, return NULL.
+ V* find(const K& key) const {
+ if (Pair* p = fTable.find(key)) {
+ return &p->val;
+ }
+ return NULL;
+ }
+
+ // Remove the key/value entry in the table with this key.
+ void remove(const K& key) {
+ SkASSERT(this->find(key));
+ fTable.remove(key);
+ }
+
+ // Call fn on every key/value pair in the table. You may mutate the value but not the key.
+ template <typename Fn> // f(K, V*) or f(const K&, V*)
+ void foreach(Fn&& fn) {
+ fTable.foreach([&fn](Pair* p){ fn(p->key, &p->val); });
+ }
+
+ // Call fn on every key/value pair in the table. You may not mutate anything.
+ template <typename Fn> // f(K, V), f(const K&, V), f(K, const V&) or f(const K&, const V&).
+ void foreach(Fn&& fn) const {
+ fTable.foreach([&fn](const Pair& p){ fn(p.key, p.val); });
+ }
+
+private:
+ struct Pair {
+ K key;
+ V val;
+ static const K& GetKey(const Pair& p) { return p.key; }
+ static uint32_t Hash(const K& key) { return HashK()(key); }
+ };
+
+ SkTHashTable<Pair, K> fTable;
+};
+
+// A set of T. T is treated as an ordiary copyable C++ type.
+template <typename T, typename HashT = SkGoodHash>
+class SkTHashSet : SkNoncopyable {
+public:
+ SkTHashSet() {}
+
+ // Clear the set.
+ void reset() { fTable.reset(); }
+
+ // How many items are in the set?
+ int count() const { return fTable.count(); }
+
+ // Approximately how many bytes of memory do we use beyond sizeof(*this)?
+ size_t approxBytesUsed() const { return fTable.approxBytesUsed(); }
+
+ // Copy an item into the set.
+ void add(const T& item) { fTable.set(item); }
+
+ // Is this item in the set?
+ bool contains(const T& item) const { return SkToBool(this->find(item)); }
+
+ // If an item equal to this is in the set, return a pointer to it, otherwise null.
+ // This pointer remains valid until the next call to add().
+ const T* find(const T& item) const { return fTable.find(item); }
+
+ // Remove the item in the set equal to this.
+ void remove(const T& item) {
+ SkASSERT(this->contains(item));
+ fTable.remove(item);
+ }
+
+ // Call fn on every item in the set. You may not mutate anything.
+ template <typename Fn> // f(T), f(const T&)
+ void foreach (Fn&& fn) const {
+ fTable.foreach(fn);
+ }
+
+private:
+ struct Traits {
+ static const T& GetKey(const T& item) { return item; }
+ static uint32_t Hash(const T& item) { return HashT()(item); }
+ };
+ SkTHashTable<T, T, Traits> fTable;
+};
+
+#endif//SkTHash_DEFINED
diff --git a/gfx/skia/skia/include/private/SkTLogic.h b/gfx/skia/skia/include/private/SkTLogic.h
new file mode 100644
index 000000000..2b12434d0
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkTLogic.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ *
+ * This header provides some of the helpers (like std::enable_if_t) which will
+ * become available with C++14 in the type_traits header (in the skstd
+ * namespace). This header also provides several Skia specific additions such
+ * as SK_WHEN and the sknonstd namespace.
+ */
+
+#ifndef SkTLogic_DEFINED
+#define SkTLogic_DEFINED
+
+#include <stddef.h>
+#include <stdint.h>
+#include <new> // see bug 981264
+#include <type_traits>
+#include <utility>
+
+namespace skstd {
+
+template <bool B> using bool_constant = std::integral_constant<bool, B>;
+
+template <bool B, typename T, typename F> using conditional_t = typename std::conditional<B, T, F>::type;
+template <bool B, typename T = void> using enable_if_t = typename std::enable_if<B, T>::type;
+
+template <typename T> using remove_const_t = typename std::remove_const<T>::type;
+template <typename T> using remove_volatile_t = typename std::remove_volatile<T>::type;
+template <typename T> using remove_cv_t = typename std::remove_cv<T>::type;
+template <typename T> using remove_pointer_t = typename std::remove_pointer<T>::type;
+template <typename T> using remove_reference_t = typename std::remove_reference<T>::type;
+template <typename T> using remove_extent_t = typename std::remove_extent<T>::type;
+
+template <typename T> using add_const_t = typename std::add_const<T>::type;
+template <typename T> using add_volatile_t = typename std::add_volatile<T>::type;
+template <typename T> using add_cv_t = typename std::add_cv<T>::type;
+template <typename T> using add_pointer_t = typename std::add_pointer<T>::type;
+template <typename T> using add_lvalue_reference_t = typename std::add_lvalue_reference<T>::type;
+
+template <typename... T> using common_type_t = typename std::common_type<T...>::type;
+
+// Chromium currently requires gcc 4.8.2 or a recent clang compiler, but uses libstdc++4.6.4.
+// Note that Precise actually uses libstdc++4.6.3.
+// Unfortunately, libstdc++ STL before libstdc++4.7 do not define std::underlying_type.
+// Newer gcc and clang compilers have __underlying_type which does not depend on runtime support.
+// See https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html for __GLIBCXX__ values.
+// Unfortunately __GLIBCXX__ is a date, but no updates to versions before 4.7 are now anticipated.
+#define SK_GLIBCXX_4_7_0 20120322
+// Updates to versions before 4.7 but released after 4.7 was released.
+#define SK_GLIBCXX_4_5_4 20120702
+#define SK_GLIBCXX_4_6_4 20121127
+#if defined(__GLIBCXX__) && (__GLIBCXX__ < SK_GLIBCXX_4_7_0 || \
+ __GLIBCXX__ == SK_GLIBCXX_4_5_4 || \
+ __GLIBCXX__ == SK_GLIBCXX_4_6_4)
+template <typename T> struct underlying_type {
+ using type = __underlying_type(T);
+};
+#else
+template <typename T> using underlying_type = std::underlying_type<T>;
+#endif
+template <typename T> using underlying_type_t = typename skstd::underlying_type<T>::type;
+
+} // namespace skstd
+
+// The sknonstd namespace contains things we would like to be proposed and feel std-ish.
+namespace sknonstd {
+
+// The name 'copy' here is fraught with peril. In this case it means 'append', not 'overwrite'.
+// Alternate proposed names are 'propagate', 'augment', or 'append' (and 'add', but already taken).
+// std::experimental::propagate_const already exists for other purposes in TSv2.
+// These also follow the <dest, source> pattern used by boost.
+template <typename D, typename S> struct copy_const {
+ using type = skstd::conditional_t<std::is_const<S>::value, skstd::add_const_t<D>, D>;
+};
+template <typename D, typename S> using copy_const_t = typename copy_const<D, S>::type;
+
+template <typename D, typename S> struct copy_volatile {
+ using type = skstd::conditional_t<std::is_volatile<S>::value, skstd::add_volatile_t<D>, D>;
+};
+template <typename D, typename S> using copy_volatile_t = typename copy_volatile<D, S>::type;
+
+template <typename D, typename S> struct copy_cv {
+ using type = copy_volatile_t<copy_const_t<D, S>, S>;
+};
+template <typename D, typename S> using copy_cv_t = typename copy_cv<D, S>::type;
+
+// The name 'same' here means 'overwrite'.
+// Alternate proposed names are 'replace', 'transfer', or 'qualify_from'.
+// same_xxx<D, S> can be written as copy_xxx<remove_xxx_t<D>, S>
+template <typename D, typename S> using same_const = copy_const<skstd::remove_const_t<D>, S>;
+template <typename D, typename S> using same_const_t = typename same_const<D, S>::type;
+template <typename D, typename S> using same_volatile =copy_volatile<skstd::remove_volatile_t<D>,S>;
+template <typename D, typename S> using same_volatile_t = typename same_volatile<D, S>::type;
+template <typename D, typename S> using same_cv = copy_cv<skstd::remove_cv_t<D>, S>;
+template <typename D, typename S> using same_cv_t = typename same_cv<D, S>::type;
+
+} // namespace sknonstd
+
+// Just a pithier wrapper for enable_if_t.
+#define SK_WHEN(condition, T) skstd::enable_if_t<!!(condition), T>
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkTSearch.h b/gfx/skia/skia/include/private/SkTSearch.h
new file mode 100644
index 000000000..549bcfd7c
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkTSearch.h
@@ -0,0 +1,146 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTSearch_DEFINED
+#define SkTSearch_DEFINED
+
+#include "SkTypes.h"
+
+/**
+ * All of the SkTSearch variants want to return the index (0...N-1) of the
+ * found element, or the bit-not of where to insert the element.
+ *
+ * At a simple level, if the return value is negative, it was not found.
+ *
+ * For clients that want to insert the new element if it was not found, use
+ * the following logic:
+ *
+ * int index = SkTSearch(...);
+ * if (index >= 0) {
+ * // found at index
+ * } else {
+ * index = ~index; // now we are positive
+ * // insert at index
+ * }
+ */
+
+
+// The most general form of SkTSearch takes an array of T and a key of type K. A functor, less, is
+// used to perform comparisons. It has two function operators:
+// bool operator() (const T& t, const K& k)
+// bool operator() (const K& t, const T& k)
+template <typename T, typename K, typename LESS>
+int SkTSearch(const T base[], int count, const K& key, size_t elemSize, LESS& less)
+{
+ SkASSERT(count >= 0);
+ if (count <= 0) {
+ return ~0;
+ }
+
+ SkASSERT(base != NULL); // base may be NULL if count is zero
+
+ int lo = 0;
+ int hi = count - 1;
+
+ while (lo < hi) {
+ int mid = lo + ((hi - lo) >> 1);
+ const T* elem = (const T*)((const char*)base + mid * elemSize);
+
+ if (less(*elem, key))
+ lo = mid + 1;
+ else
+ hi = mid;
+ }
+
+ const T* elem = (const T*)((const char*)base + hi * elemSize);
+ if (less(*elem, key)) {
+ hi += 1;
+ hi = ~hi;
+ } else if (less(key, *elem)) {
+ hi = ~hi;
+ }
+ return hi;
+}
+
+// Adapts a less-than function to a functor.
+template <typename T, bool (LESS)(const T&, const T&)> struct SkTLessFunctionToFunctorAdaptor {
+ bool operator()(const T& a, const T& b) { return LESS(a, b); }
+};
+
+// Specialization for case when T==K and the caller wants to use a function rather than functor.
+template <typename T, bool (LESS)(const T&, const T&)>
+int SkTSearch(const T base[], int count, const T& target, size_t elemSize) {
+ static SkTLessFunctionToFunctorAdaptor<T, LESS> functor;
+ return SkTSearch(base, count, target, elemSize, functor);
+}
+
+// Adapts operator < to a functor.
+template <typename T> struct SkTLessFunctor {
+ bool operator()(const T& a, const T& b) { return a < b; }
+};
+
+// Specialization for T==K, compare using op <.
+template <typename T>
+int SkTSearch(const T base[], int count, const T& target, size_t elemSize) {
+ static SkTLessFunctor<T> functor;
+ return SkTSearch(base, count, target, elemSize, functor);
+}
+
+// Similar to SkLessFunctionToFunctorAdaptor but makes the functor interface take T* rather than T.
+template <typename T, bool (LESS)(const T&, const T&)> struct SkTLessFunctionToPtrFunctorAdaptor {
+ bool operator() (const T* t, const T* k) { return LESS(*t, *k); }
+};
+
+// Specialization for case where domain is an array of T* and the key value is a T*, and you want
+// to compare the T objects, not the pointers.
+template <typename T, bool (LESS)(const T&, const T&)>
+int SkTSearch(T* base[], int count, T* target, size_t elemSize) {
+ static SkTLessFunctionToPtrFunctorAdaptor<T, LESS> functor;
+ return SkTSearch(base, count, target, elemSize, functor);
+}
+
+int SkStrSearch(const char*const* base, int count, const char target[],
+ size_t target_len, size_t elemSize);
+int SkStrSearch(const char*const* base, int count, const char target[],
+ size_t elemSize);
+
+/** Like SkStrSearch, but treats target as if it were all lower-case. Assumes that
+ base points to a table of lower-case strings.
+*/
+int SkStrLCSearch(const char*const* base, int count, const char target[],
+ size_t target_len, size_t elemSize);
+int SkStrLCSearch(const char*const* base, int count, const char target[],
+ size_t elemSize);
+
+/** Helper class to convert a string to lower-case, but only modifying the ascii
+ characters. This makes the routine very fast and never changes the string
+ length, but it is not suitable for linguistic purposes. Normally this is
+ used for buiding and searching string tables.
+*/
+class SkAutoAsciiToLC {
+public:
+ SkAutoAsciiToLC(const char str[], size_t len = (size_t)-1);
+ ~SkAutoAsciiToLC();
+
+ const char* lc() const { return fLC; }
+ size_t length() const { return fLength; }
+
+private:
+ char* fLC; // points to either the heap or fStorage
+ size_t fLength;
+ enum {
+ STORAGE = 64
+ };
+ char fStorage[STORAGE+1];
+};
+
+// Helper when calling qsort with a compare proc that has typed its arguments
+#define SkCastForQSort(compare) reinterpret_cast<int (*)(const void*, const void*)>(compare)
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkTemplates.h b/gfx/skia/skia/include/private/SkTemplates.h
new file mode 100644
index 000000000..01a8ec0c3
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkTemplates.h
@@ -0,0 +1,492 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTemplates_DEFINED
+#define SkTemplates_DEFINED
+
+#include "SkMath.h"
+#include "SkTLogic.h"
+#include "SkTypes.h"
+#include <limits.h>
+#include <memory>
+#include <new>
+
+/** \file SkTemplates.h
+
+ This file contains light-weight template classes for type-safe and exception-safe
+ resource management.
+*/
+
+/**
+ * Marks a local variable as known to be unused (to avoid warnings).
+ * Note that this does *not* prevent the local variable from being optimized away.
+ */
+template<typename T> inline void sk_ignore_unused_variable(const T&) { }
+
+/**
+ * Returns a pointer to a D which comes immediately after S[count].
+ */
+template <typename D, typename S> static D* SkTAfter(S* ptr, size_t count = 1) {
+ return reinterpret_cast<D*>(ptr + count);
+}
+
+/**
+ * Returns a pointer to a D which comes byteOffset bytes after S.
+ */
+template <typename D, typename S> static D* SkTAddOffset(S* ptr, size_t byteOffset) {
+ // The intermediate char* has the same cv-ness as D as this produces better error messages.
+ // This relies on the fact that reinterpret_cast can add constness, but cannot remove it.
+ return reinterpret_cast<D*>(reinterpret_cast<sknonstd::same_cv_t<char, D>*>(ptr) + byteOffset);
+}
+
+template <typename R, typename T, R (*P)(T*)> struct SkFunctionWrapper {
+ R operator()(T* t) { return P(t); }
+};
+
+/** \class SkAutoTCallVProc
+
+ Call a function when this goes out of scope. The template uses two
+ parameters, the object, and a function that is to be called in the destructor.
+ If release() is called, the object reference is set to null. If the object
+ reference is null when the destructor is called, we do not call the
+ function.
+*/
+template <typename T, void (*P)(T*)> class SkAutoTCallVProc
+ : public std::unique_ptr<T, SkFunctionWrapper<void, T, P>> {
+public:
+ SkAutoTCallVProc(T* obj): std::unique_ptr<T, SkFunctionWrapper<void, T, P>>(obj) {}
+
+ operator T*() const { return this->get(); }
+};
+
+/** \class SkAutoTCallIProc
+
+Call a function when this goes out of scope. The template uses two
+parameters, the object, and a function that is to be called in the destructor.
+If release() is called, the object reference is set to null. If the object
+reference is null when the destructor is called, we do not call the
+function.
+*/
+template <typename T, int (*P)(T*)> class SkAutoTCallIProc
+ : public std::unique_ptr<T, SkFunctionWrapper<int, T, P>> {
+public:
+ SkAutoTCallIProc(T* obj): std::unique_ptr<T, SkFunctionWrapper<int, T, P>>(obj) {}
+
+ operator T*() const { return this->get(); }
+};
+
+/** \class SkAutoTDelete
+ An SkAutoTDelete<T> is like a T*, except that the destructor of SkAutoTDelete<T>
+ automatically deletes the pointer it holds (if any). That is, SkAutoTDelete<T>
+ owns the T object that it points to. Like a T*, an SkAutoTDelete<T> may hold
+ either NULL or a pointer to a T object. Also like T*, SkAutoTDelete<T> is
+ thread-compatible, and once you dereference it, you get the threadsafety
+ guarantees of T.
+
+ The size of a SkAutoTDelete is small: sizeof(SkAutoTDelete<T>) == sizeof(T*)
+*/
+template <typename T> class SkAutoTDelete : public std::unique_ptr<T> {
+public:
+ SkAutoTDelete(T* obj = NULL) : std::unique_ptr<T>(obj) {}
+
+ operator T*() const { return this->get(); }
+
+#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+ // Need to update graphics/BitmapRegionDecoder.cpp.
+ T* detach() { return this->release(); }
+#endif
+};
+
+template <typename T> class SkAutoTDeleteArray : public std::unique_ptr<T[]> {
+public:
+ SkAutoTDeleteArray(T array[]) : std::unique_ptr<T[]>(array) {}
+};
+
+/** Allocate an array of T elements, and free the array in the destructor
+ */
+template <typename T> class SkAutoTArray : SkNoncopyable {
+public:
+ SkAutoTArray() {
+ fArray = NULL;
+ SkDEBUGCODE(fCount = 0;)
+ }
+ /** Allocate count number of T elements
+ */
+ explicit SkAutoTArray(int count) {
+ SkASSERT(count >= 0);
+ fArray = NULL;
+ if (count) {
+ fArray = new T[count];
+ }
+ SkDEBUGCODE(fCount = count;)
+ }
+
+ /** Reallocates given a new count. Reallocation occurs even if new count equals old count.
+ */
+ void reset(int count) {
+ delete[] fArray;
+ SkASSERT(count >= 0);
+ fArray = NULL;
+ if (count) {
+ fArray = new T[count];
+ }
+ SkDEBUGCODE(fCount = count;)
+ }
+
+ ~SkAutoTArray() { delete[] fArray; }
+
+ /** Return the array of T elements. Will be NULL if count == 0
+ */
+ T* get() const { return fArray; }
+
+ /** Return the nth element in the array
+ */
+ T& operator[](int index) const {
+ SkASSERT((unsigned)index < (unsigned)fCount);
+ return fArray[index];
+ }
+
+ void swap(SkAutoTArray& other) {
+ SkTSwap(fArray, other.fArray);
+ SkDEBUGCODE(SkTSwap(fCount, other.fCount));
+ }
+
+private:
+ T* fArray;
+ SkDEBUGCODE(int fCount;)
+};
+
+/** Wraps SkAutoTArray, with room for kCountRequested elements preallocated.
+ */
+template <int kCountRequested, typename T> class SkAutoSTArray : SkNoncopyable {
+public:
+ /** Initialize with no objects */
+ SkAutoSTArray() {
+ fArray = NULL;
+ fCount = 0;
+ }
+
+ /** Allocate count number of T elements
+ */
+ SkAutoSTArray(int count) {
+ fArray = NULL;
+ fCount = 0;
+ this->reset(count);
+ }
+
+ ~SkAutoSTArray() {
+ this->reset(0);
+ }
+
+ /** Destroys previous objects in the array and default constructs count number of objects */
+ void reset(int count) {
+ T* start = fArray;
+ T* iter = start + fCount;
+ while (iter > start) {
+ (--iter)->~T();
+ }
+
+ SkASSERT(count >= 0);
+ if (fCount != count) {
+ if (fCount > kCount) {
+ // 'fArray' was allocated last time so free it now
+ SkASSERT((T*) fStorage != fArray);
+ sk_free(fArray);
+ }
+
+ if (count > kCount) {
+ const uint64_t size64 = sk_64_mul(count, sizeof(T));
+ const size_t size = static_cast<size_t>(size64);
+ if (size != size64) {
+ sk_out_of_memory();
+ }
+ fArray = (T*) sk_malloc_throw(size);
+ } else if (count > 0) {
+ fArray = (T*) fStorage;
+ } else {
+ fArray = NULL;
+ }
+
+ fCount = count;
+ }
+
+ iter = fArray;
+ T* stop = fArray + count;
+ while (iter < stop) {
+ new (iter++) T;
+ }
+ }
+
+ /** Return the number of T elements in the array
+ */
+ int count() const { return fCount; }
+
+ /** Return the array of T elements. Will be NULL if count == 0
+ */
+ T* get() const { return fArray; }
+
+ /** Return the nth element in the array
+ */
+ T& operator[](int index) const {
+ SkASSERT(index < fCount);
+ return fArray[index];
+ }
+
+private:
+#if defined(GOOGLE3)
+ // Stack frame size is limited for GOOGLE3. 4k is less than the actual max, but some functions
+ // have multiple large stack allocations.
+ static const int kMaxBytes = 4 * 1024;
+ static const int kCount = kCountRequested * sizeof(T) > kMaxBytes
+ ? kMaxBytes / sizeof(T)
+ : kCountRequested;
+#else
+ static const int kCount = kCountRequested;
+#endif
+
+ int fCount;
+ T* fArray;
+ // since we come right after fArray, fStorage should be properly aligned
+ char fStorage[kCount * sizeof(T)];
+};
+
+/** Manages an array of T elements, freeing the array in the destructor.
+ * Does NOT call any constructors/destructors on T (T must be POD).
+ */
+template <typename T> class SkAutoTMalloc : SkNoncopyable {
+public:
+ /** Takes ownership of the ptr. The ptr must be a value which can be passed to sk_free. */
+ explicit SkAutoTMalloc(T* ptr = NULL) {
+ fPtr = ptr;
+ }
+
+ /** Allocates space for 'count' Ts. */
+ explicit SkAutoTMalloc(size_t count) {
+ fPtr = count ? (T*)sk_malloc_flags(count * sizeof(T), SK_MALLOC_THROW) : nullptr;
+ }
+
+ ~SkAutoTMalloc() {
+ sk_free(fPtr);
+ }
+
+ /** Resize the memory area pointed to by the current ptr preserving contents. */
+ void realloc(size_t count) {
+ if (count) {
+ fPtr = reinterpret_cast<T*>(sk_realloc_throw(fPtr, count * sizeof(T)));
+ } else {
+ this->reset(0);
+ }
+ }
+
+ /** Resize the memory area pointed to by the current ptr without preserving contents. */
+ T* reset(size_t count = 0) {
+ sk_free(fPtr);
+ fPtr = count ? (T*)sk_malloc_flags(count * sizeof(T), SK_MALLOC_THROW) : nullptr;
+ return fPtr;
+ }
+
+ T* get() const { return fPtr; }
+
+ operator T*() {
+ return fPtr;
+ }
+
+ operator const T*() const {
+ return fPtr;
+ }
+
+ T& operator[](int index) {
+ return fPtr[index];
+ }
+
+ const T& operator[](int index) const {
+ return fPtr[index];
+ }
+
+ /**
+ * Transfer ownership of the ptr to the caller, setting the internal
+ * pointer to NULL. Note that this differs from get(), which also returns
+ * the pointer, but it does not transfer ownership.
+ */
+ T* release() {
+ T* ptr = fPtr;
+ fPtr = NULL;
+ return ptr;
+ }
+
+private:
+ T* fPtr;
+};
+
+template <size_t kCountRequested, typename T> class SkAutoSTMalloc : SkNoncopyable {
+public:
+ SkAutoSTMalloc() : fPtr(fTStorage) {}
+
+ SkAutoSTMalloc(size_t count) {
+ if (count > kCount) {
+ fPtr = (T*)sk_malloc_flags(count * sizeof(T), SK_MALLOC_THROW | SK_MALLOC_TEMP);
+ } else if (count) {
+ fPtr = fTStorage;
+ } else {
+ fPtr = nullptr;
+ }
+ }
+
+ ~SkAutoSTMalloc() {
+ if (fPtr != fTStorage) {
+ sk_free(fPtr);
+ }
+ }
+
+ // doesn't preserve contents
+ T* reset(size_t count) {
+ if (fPtr != fTStorage) {
+ sk_free(fPtr);
+ }
+ if (count > kCount) {
+ fPtr = (T*)sk_malloc_throw(count * sizeof(T));
+ } else if (count) {
+ fPtr = fTStorage;
+ } else {
+ fPtr = nullptr;
+ }
+ return fPtr;
+ }
+
+ T* get() const { return fPtr; }
+
+ operator T*() {
+ return fPtr;
+ }
+
+ operator const T*() const {
+ return fPtr;
+ }
+
+ T& operator[](int index) {
+ return fPtr[index];
+ }
+
+ const T& operator[](int index) const {
+ return fPtr[index];
+ }
+
+ // Reallocs the array, can be used to shrink the allocation. Makes no attempt to be intelligent
+ void realloc(size_t count) {
+ if (count > kCount) {
+ if (fPtr == fTStorage) {
+ fPtr = (T*)sk_malloc_throw(count * sizeof(T));
+ memcpy(fPtr, fTStorage, kCount * sizeof(T));
+ } else {
+ fPtr = (T*)sk_realloc_throw(fPtr, count * sizeof(T));
+ }
+ } else if (count) {
+ if (fPtr != fTStorage) {
+ fPtr = (T*)sk_realloc_throw(fPtr, count * sizeof(T));
+ }
+ } else {
+ this->reset(0);
+ }
+ }
+
+private:
+ // Since we use uint32_t storage, we might be able to get more elements for free.
+ static const size_t kCountWithPadding = SkAlign4(kCountRequested*sizeof(T)) / sizeof(T);
+#if defined(GOOGLE3)
+ // Stack frame size is limited for GOOGLE3. 4k is less than the actual max, but some functions
+ // have multiple large stack allocations.
+ static const size_t kMaxBytes = 4 * 1024;
+ static const size_t kCount = kCountRequested * sizeof(T) > kMaxBytes
+ ? kMaxBytes / sizeof(T)
+ : kCountWithPadding;
+#else
+ static const size_t kCount = kCountWithPadding;
+#endif
+
+ T* fPtr;
+ union {
+ uint32_t fStorage32[SkAlign4(kCount*sizeof(T)) >> 2];
+ T fTStorage[1]; // do NOT want to invoke T::T()
+ };
+};
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Pass the object and the storage that was offered during SkInPlaceNewCheck, and this will
+ * safely destroy (and free if it was dynamically allocated) the object.
+ */
+template <typename T> void SkInPlaceDeleteCheck(T* obj, void* storage) {
+ if (storage == obj) {
+ obj->~T();
+ } else {
+ delete obj;
+ }
+}
+
+/**
+ * Allocates T, using storage if it is large enough, and allocating on the heap (via new) if
+ * storage is not large enough.
+ *
+ * obj = SkInPlaceNewCheck<Type>(storage, size);
+ * ...
+ * SkInPlaceDeleteCheck(obj, storage);
+ */
+template <typename T> T* SkInPlaceNewCheck(void* storage, size_t size) {
+ return (sizeof(T) <= size) ? new (storage) T : new T;
+}
+
+template <typename T, typename A1, typename A2, typename A3>
+T* SkInPlaceNewCheck(void* storage, size_t size, const A1& a1, const A2& a2, const A3& a3) {
+ return (sizeof(T) <= size) ? new (storage) T(a1, a2, a3) : new T(a1, a2, a3);
+}
+
+template <typename T, typename A1, typename A2, typename A3, typename A4>
+T* SkInPlaceNewCheck(void* storage, size_t size,
+ const A1& a1, const A2& a2, const A3& a3, const A4& a4) {
+ return (sizeof(T) <= size) ? new (storage) T(a1, a2, a3, a4) : new T(a1, a2, a3, a4);
+}
+
+/**
+ * Reserves memory that is aligned on double and pointer boundaries.
+ * Hopefully this is sufficient for all practical purposes.
+ */
+template <size_t N> class SkAlignedSStorage : SkNoncopyable {
+public:
+ size_t size() const { return N; }
+ void* get() { return fData; }
+ const void* get() const { return fData; }
+
+private:
+ union {
+ void* fPtr;
+ double fDouble;
+ char fData[N];
+ };
+};
+
+/**
+ * Reserves memory that is aligned on double and pointer boundaries.
+ * Hopefully this is sufficient for all practical purposes. Otherwise,
+ * we have to do some arcane trickery to determine alignment of non-POD
+ * types. Lifetime of the memory is the lifetime of the object.
+ */
+template <int N, typename T> class SkAlignedSTStorage : SkNoncopyable {
+public:
+ /**
+ * Returns void* because this object does not initialize the
+ * memory. Use placement new for types that require a cons.
+ */
+ void* get() { return fStorage.get(); }
+ const void* get() const { return fStorage.get(); }
+private:
+ SkAlignedSStorage<sizeof(T)*N> fStorage;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkThreadID.h b/gfx/skia/skia/include/private/SkThreadID.h
new file mode 100644
index 000000000..a210a929f
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkThreadID.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkThreadID_DEFINED
+#define SkThreadID_DEFINED
+
+#include "SkTypes.h"
+
+typedef int64_t SkThreadID;
+
+SkThreadID SkGetThreadID();
+
+const SkThreadID kIllegalThreadID = 0;
+
+#endif // SkThreadID_DEFINED
diff --git a/gfx/skia/skia/include/private/SkWeakRefCnt.h b/gfx/skia/skia/include/private/SkWeakRefCnt.h
new file mode 100644
index 000000000..d6631e946
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkWeakRefCnt.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkWeakRefCnt_DEFINED
+#define SkWeakRefCnt_DEFINED
+
+#include "SkRefCnt.h"
+#include <atomic>
+
+/** \class SkWeakRefCnt
+
+ SkWeakRefCnt is the base class for objects that may be shared by multiple
+ objects. When an existing strong owner wants to share a reference, it calls
+ ref(). When a strong owner wants to release its reference, it calls
+ unref(). When the shared object's strong reference count goes to zero as
+ the result of an unref() call, its (virtual) weak_dispose method is called.
+ It is an error for the destructor to be called explicitly (or via the
+ object going out of scope on the stack or calling delete) if
+ getRefCnt() > 1.
+
+ In addition to strong ownership, an owner may instead obtain a weak
+ reference by calling weak_ref(). A call to weak_ref() must be balanced by a
+ call to weak_unref(). To obtain a strong reference from a weak reference,
+ call try_ref(). If try_ref() returns true, the owner's pointer is now also
+ a strong reference on which unref() must be called. Note that this does not
+ affect the original weak reference, weak_unref() must still be called. When
+ the weak reference count goes to zero, the object is deleted. While the
+ weak reference count is positive and the strong reference count is zero the
+ object still exists, but will be in the disposed state. It is up to the
+ object to define what this means.
+
+ Note that a strong reference implicitly implies a weak reference. As a
+ result, it is allowable for the owner of a strong ref to call try_ref().
+ This will have the same effect as calling ref(), but may be more expensive.
+
+ Example:
+
+ SkWeakRefCnt myRef = strongRef.weak_ref();
+ ... // strongRef.unref() may or may not be called
+ if (myRef.try_ref()) {
+ ... // use myRef
+ myRef.unref();
+ } else {
+ // myRef is in the disposed state
+ }
+ myRef.weak_unref();
+*/
+class SK_API SkWeakRefCnt : public SkRefCnt {
+public:
+ /** Default construct, initializing the reference counts to 1.
+ The strong references collectively hold one weak reference. When the
+ strong reference count goes to zero, the collectively held weak
+ reference is released.
+ */
+ SkWeakRefCnt() : SkRefCnt(), fWeakCnt(1) {}
+
+ /** Destruct, asserting that the weak reference count is 1.
+ */
+ virtual ~SkWeakRefCnt() {
+#ifdef SK_DEBUG
+ SkASSERT(getWeakCnt() == 1);
+ fWeakCnt.store(0, std::memory_order_relaxed);
+#endif
+ }
+
+#ifdef SK_DEBUG
+ /** Return the weak reference count. */
+ int32_t getWeakCnt() const {
+ return fWeakCnt.load(std::memory_order_relaxed);
+ }
+
+ void validate() const {
+ this->INHERITED::validate();
+ SkASSERT(getWeakCnt() > 0);
+ }
+#endif
+
+private:
+ /** If fRefCnt is 0, returns 0.
+ * Otherwise increments fRefCnt, acquires, and returns the old value.
+ */
+ int32_t atomic_conditional_acquire_strong_ref() const {
+ int32_t prev = fRefCnt.load(std::memory_order_relaxed);
+ do {
+ if (0 == prev) {
+ break;
+ }
+ } while(!fRefCnt.compare_exchange_weak(prev, prev+1, std::memory_order_acquire,
+ std::memory_order_relaxed));
+ return prev;
+ }
+
+public:
+ /** Creates a strong reference from a weak reference, if possible. The
+ caller must already be an owner. If try_ref() returns true the owner
+ is in posession of an additional strong reference. Both the original
+ reference and new reference must be properly unreferenced. If try_ref()
+ returns false, no strong reference could be created and the owner's
+ reference is in the same state as before the call.
+ */
+ bool SK_WARN_UNUSED_RESULT try_ref() const {
+ if (atomic_conditional_acquire_strong_ref() != 0) {
+ // Acquire barrier (L/SL), if not provided above.
+ // Prevents subsequent code from happening before the increment.
+ return true;
+ }
+ return false;
+ }
+
+ /** Increment the weak reference count. Must be balanced by a call to
+ weak_unref().
+ */
+ void weak_ref() const {
+ SkASSERT(getRefCnt() > 0);
+ SkASSERT(getWeakCnt() > 0);
+ // No barrier required.
+ (void)fWeakCnt.fetch_add(+1, std::memory_order_relaxed);
+ }
+
+ /** Decrement the weak reference count. If the weak reference count is 1
+ before the decrement, then call delete on the object. Note that if this
+ is the case, then the object needs to have been allocated via new, and
+ not on the stack.
+ */
+ void weak_unref() const {
+ SkASSERT(getWeakCnt() > 0);
+ // A release here acts in place of all releases we "should" have been doing in ref().
+ if (1 == fWeakCnt.fetch_add(-1, std::memory_order_acq_rel)) {
+ // Like try_ref(), the acquire is only needed on success, to make sure
+ // code in internal_dispose() doesn't happen before the decrement.
+#ifdef SK_DEBUG
+ // so our destructor won't complain
+ fWeakCnt.store(1, std::memory_order_relaxed);
+#endif
+ this->INHERITED::internal_dispose();
+ }
+ }
+
+ /** Returns true if there are no strong references to the object. When this
+ is the case all future calls to try_ref() will return false.
+ */
+ bool weak_expired() const {
+ return fRefCnt.load(std::memory_order_relaxed) == 0;
+ }
+
+protected:
+ /** Called when the strong reference count goes to zero. This allows the
+ object to free any resources it may be holding. Weak references may
+ still exist and their level of allowed access to the object is defined
+ by the object's class.
+ */
+ virtual void weak_dispose() const {
+ }
+
+private:
+ /** Called when the strong reference count goes to zero. Calls weak_dispose
+ on the object and releases the implicit weak reference held
+ collectively by the strong references.
+ */
+ void internal_dispose() const override {
+ weak_dispose();
+ weak_unref();
+ }
+
+ /* Invariant: fWeakCnt = #weak + (fRefCnt > 0 ? 1 : 0) */
+ mutable std::atomic<int32_t> fWeakCnt;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/svg/SkSVGCanvas.h b/gfx/skia/skia/include/svg/SkSVGCanvas.h
new file mode 100644
index 000000000..e285faa45
--- /dev/null
+++ b/gfx/skia/skia/include/svg/SkSVGCanvas.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSVGCanvas_DEFINED
+#define SkSVGCanvas_DEFINED
+
+#include "SkCanvas.h"
+
+class SkXMLWriter;
+
+class SK_API SkSVGCanvas {
+public:
+ /**
+ * Returns a new canvas that will generate SVG commands from its draw calls, and send
+ * them to the provided xmlwriter. Ownership of the xmlwriter is not transfered to the canvas,
+ * but it must stay valid during the lifetime of the returned canvas.
+ *
+ * The canvas may buffer some drawing calls, so the output is not guaranteed to be valid
+ * or complete until the canvas instance is deleted.
+ *
+ * The 'bounds' parameter defines an initial SVG viewport (viewBox attribute on the root
+ * SVG element).
+ */
+ static SkCanvas* Create(const SkRect& bounds, SkXMLWriter*);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkBoundaryPatch.h b/gfx/skia/skia/include/utils/SkBoundaryPatch.h
new file mode 100644
index 000000000..f8edd594b
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkBoundaryPatch.h
@@ -0,0 +1,66 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkBoundaryPatch_DEFINED
+#define SkBoundaryPatch_DEFINED
+
+#include "SkPoint.h"
+#include "SkRefCnt.h"
+
+class SkBoundary : public SkRefCnt {
+public:
+
+
+ // These must be 0, 1, 2, 3 for efficiency in the subclass implementations
+ enum Edge {
+ kTop = 0,
+ kRight = 1,
+ kBottom = 2,
+ kLeft = 3
+ };
+ // Edge index goes clockwise around the boundary, beginning at the "top"
+ virtual SkPoint eval(Edge, SkScalar unitInterval) = 0;
+
+private:
+ typedef SkRefCnt INHERITED;
+};
+
+class SkBoundaryPatch {
+public:
+ SkBoundaryPatch();
+ ~SkBoundaryPatch();
+
+ SkBoundary* getBoundary() const { return fBoundary; }
+ SkBoundary* setBoundary(SkBoundary*);
+
+ SkPoint eval(SkScalar unitU, SkScalar unitV);
+ bool evalPatch(SkPoint verts[], int rows, int cols);
+
+private:
+ SkBoundary* fBoundary;
+};
+
+////////////////////////////////////////////////////////////////////////
+
+class SkLineBoundary : public SkBoundary {
+public:
+ SkPoint fPts[4];
+
+ // override
+ virtual SkPoint eval(Edge, SkScalar);
+};
+
+class SkCubicBoundary : public SkBoundary {
+public:
+ // the caller sets the first 12 entries. The 13th is used by the impl.
+ SkPoint fPts[13];
+
+ // override
+ virtual SkPoint eval(Edge, SkScalar);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkCamera.h b/gfx/skia/skia/include/utils/SkCamera.h
new file mode 100644
index 000000000..4b77ec685
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkCamera.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Inspired by Rob Johnson's most excellent QuickDraw GX sample code
+
+#ifndef SkCamera_DEFINED
+#define SkCamera_DEFINED
+
+#include "SkMatrix.h"
+
+class SkCanvas;
+
+struct SkUnit3D {
+ SkScalar fX, fY, fZ;
+
+ void set(SkScalar x, SkScalar y, SkScalar z) {
+ fX = x; fY = y; fZ = z;
+ }
+ static SkScalar Dot(const SkUnit3D&, const SkUnit3D&);
+ static void Cross(const SkUnit3D&, const SkUnit3D&, SkUnit3D* cross);
+};
+
+struct SkPoint3D {
+ SkScalar fX, fY, fZ;
+
+ void set(SkScalar x, SkScalar y, SkScalar z) {
+ fX = x; fY = y; fZ = z;
+ }
+ SkScalar normalize(SkUnit3D*) const;
+};
+typedef SkPoint3D SkVector3D;
+
+struct SkMatrix3D {
+ SkScalar fMat[3][4];
+
+ void reset();
+
+ void setRow(int row, SkScalar a, SkScalar b, SkScalar c, SkScalar d = 0) {
+ SkASSERT((unsigned)row < 3);
+ fMat[row][0] = a;
+ fMat[row][1] = b;
+ fMat[row][2] = c;
+ fMat[row][3] = d;
+ }
+
+ void setRotateX(SkScalar deg);
+ void setRotateY(SkScalar deg);
+ void setRotateZ(SkScalar deg);
+ void setTranslate(SkScalar x, SkScalar y, SkScalar z);
+
+ void preRotateX(SkScalar deg);
+ void preRotateY(SkScalar deg);
+ void preRotateZ(SkScalar deg);
+ void preTranslate(SkScalar x, SkScalar y, SkScalar z);
+
+ void setConcat(const SkMatrix3D& a, const SkMatrix3D& b);
+ void mapPoint(const SkPoint3D& src, SkPoint3D* dst) const;
+ void mapVector(const SkVector3D& src, SkVector3D* dst) const;
+
+ void mapPoint(SkPoint3D* v) const {
+ this->mapPoint(*v, v);
+ }
+
+ void mapVector(SkVector3D* v) const {
+ this->mapVector(*v, v);
+ }
+};
+
+class SkPatch3D {
+public:
+ SkPatch3D();
+
+ void reset();
+ void transform(const SkMatrix3D&, SkPatch3D* dst = NULL) const;
+
+ // dot a unit vector with the patch's normal
+ SkScalar dotWith(SkScalar dx, SkScalar dy, SkScalar dz) const;
+ SkScalar dotWith(const SkVector3D& v) const {
+ return this->dotWith(v.fX, v.fY, v.fZ);
+ }
+
+ // deprecated, but still here for animator (for now)
+ void rotate(SkScalar /*x*/, SkScalar /*y*/, SkScalar /*z*/) {}
+ void rotateDegrees(SkScalar /*x*/, SkScalar /*y*/, SkScalar /*z*/) {}
+
+private:
+public: // make public for SkDraw3D for now
+ SkVector3D fU, fV;
+ SkPoint3D fOrigin;
+
+ friend class SkCamera3D;
+};
+
+class SkCamera3D {
+public:
+ SkCamera3D();
+
+ void reset();
+ void update();
+ void patchToMatrix(const SkPatch3D&, SkMatrix* matrix) const;
+
+ SkPoint3D fLocation;
+ SkPoint3D fAxis;
+ SkPoint3D fZenith;
+ SkPoint3D fObserver;
+
+private:
+ mutable SkMatrix fOrientation;
+ mutable bool fNeedToUpdate;
+
+ void doUpdate() const;
+};
+
+class Sk3DView : SkNoncopyable {
+public:
+ Sk3DView();
+ ~Sk3DView();
+
+ void save();
+ void restore();
+
+ void translate(SkScalar x, SkScalar y, SkScalar z);
+ void rotateX(SkScalar deg);
+ void rotateY(SkScalar deg);
+ void rotateZ(SkScalar deg);
+
+#ifdef SK_BUILD_FOR_ANDROID
+ void setCameraLocation(SkScalar x, SkScalar y, SkScalar z);
+ SkScalar getCameraLocationX();
+ SkScalar getCameraLocationY();
+ SkScalar getCameraLocationZ();
+#endif
+
+ void getMatrix(SkMatrix*) const;
+ void applyToCanvas(SkCanvas*) const;
+
+ SkScalar dotWithNormal(SkScalar dx, SkScalar dy, SkScalar dz) const;
+
+private:
+ struct Rec {
+ Rec* fNext;
+ SkMatrix3D fMatrix;
+ };
+ Rec* fRec;
+ Rec fInitialRec;
+ SkCamera3D fCamera;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkCanvasStateUtils.h b/gfx/skia/skia/include/utils/SkCanvasStateUtils.h
new file mode 100644
index 000000000..3071c7547
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkCanvasStateUtils.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCanvasStateUtils_DEFINED
+#define SkCanvasStateUtils_DEFINED
+
+#include "SkCanvas.h"
+
+class SkCanvasState;
+
+/**
+ * A set of functions that are useful for copying the state of an SkCanvas
+ * across a library boundary where the Skia library on the other side of the
+ * boundary may be newer. The expected usage is outline below...
+ *
+ * Lib Boundary
+ * CaptureCanvasState(...) |||
+ * SkCanvas --> SkCanvasState |||
+ * ||| CreateFromCanvasState(...)
+ * ||| SkCanvasState --> SkCanvas`
+ * ||| Draw into SkCanvas`
+ * ||| Unref SkCanvas`
+ * ReleaseCanvasState(...) |||
+ *
+ */
+class SK_API SkCanvasStateUtils {
+public:
+ /**
+ * Captures the current state of the canvas into an opaque ptr that is safe
+ * to pass to a different instance of Skia (which may be the same version,
+ * or may be newer). The function will return NULL in the event that one of the
+ * following conditions are true.
+ * 1) the canvas device type is not supported (currently only raster is supported)
+ * 2) the canvas clip type is not supported (currently only non-AA clips are supported)
+ *
+ * It is recommended that the original canvas also not be used until all
+ * canvases that have been created using its captured state have been dereferenced.
+ *
+ * Finally, it is important to note that any draw filters attached to the
+ * canvas are NOT currently captured.
+ *
+ * @param canvas The canvas you wish to capture the current state of.
+ * @return NULL or an opaque ptr that can be passed to CreateFromCanvasState
+ * to reconstruct the canvas. The caller is responsible for calling
+ * ReleaseCanvasState to free the memory associated with this state.
+ */
+ static SkCanvasState* CaptureCanvasState(SkCanvas* canvas);
+
+ /**
+ * Create a new SkCanvas from the captured state of another SkCanvas. The
+ * function will return NULL in the event that one of the
+ * following conditions are true.
+ * 1) the captured state is in an unrecognized format
+ * 2) the captured canvas device type is not supported
+ *
+ * @param state Opaque object created by CaptureCanvasState.
+ * @return NULL or an SkCanvas* whose devices and matrix/clip state are
+ * identical to the captured canvas. The caller is responsible for
+ * calling unref on the SkCanvas.
+ */
+ static SkCanvas* CreateFromCanvasState(const SkCanvasState* state);
+
+ /**
+ * Free the memory associated with the captured canvas state. The state
+ * should not be released until all SkCanvas objects created using that
+ * state have been dereferenced. Must be called from the same library
+ * instance that created the state via CaptureCanvasState.
+ *
+ * @param state The captured state you wish to dispose of.
+ */
+ static void ReleaseCanvasState(SkCanvasState* state);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkDumpCanvas.h b/gfx/skia/skia/include/utils/SkDumpCanvas.h
new file mode 100644
index 000000000..e11185336
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkDumpCanvas.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDumpCanvas_DEFINED
+#define SkDumpCanvas_DEFINED
+
+#include "SkCanvas.h"
+
+/** This class overrides all the draw methods on SkCanvas, and formats them
+ as text, and then sends that to a Dumper helper object.
+
+ Typical use might be to dump a display list to a log file to see what is
+ being drawn.
+ */
+class SkDumpCanvas : public SkCanvas {
+public:
+ class Dumper;
+
+ explicit SkDumpCanvas(Dumper* = 0);
+ virtual ~SkDumpCanvas();
+
+ enum Verb {
+ kNULL_Verb,
+
+ kSave_Verb,
+ kRestore_Verb,
+
+ kMatrix_Verb,
+
+ kClip_Verb,
+
+ kDrawPaint_Verb,
+ kDrawPoints_Verb,
+ kDrawOval_Verb,
+ kDrawArc_Verb,
+ kDrawRect_Verb,
+ kDrawRRect_Verb,
+ kDrawDRRect_Verb,
+ kDrawPath_Verb,
+ kDrawBitmap_Verb,
+ kDrawText_Verb,
+ kDrawPicture_Verb,
+ kDrawVertices_Verb,
+ kDrawPatch_Verb,
+ kDrawData_Verb, // obsolete
+ kDrawAnnotation_Verb,
+
+ kCull_Verb
+ };
+
+ /** Subclasses of this are installed on the DumpCanvas, and then called for
+ each drawing command.
+ */
+ class Dumper : public SkRefCnt {
+ public:
+
+
+ virtual void dump(SkDumpCanvas*, SkDumpCanvas::Verb, const char str[],
+ const SkPaint*) = 0;
+
+ private:
+ typedef SkRefCnt INHERITED;
+ };
+
+ Dumper* getDumper() const { return fDumper; }
+ void setDumper(Dumper*);
+
+ int getNestLevel() const { return fNestLevel; }
+
+protected:
+ void willSave() override;
+ SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec&) override;
+ void willRestore() override;
+
+ void didConcat(const SkMatrix&) override;
+ void didSetMatrix(const SkMatrix&) override;
+
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override;
+ virtual void onDrawText(const void* text, size_t byteLength, SkScalar x, SkScalar y,
+ const SkPaint&) override;
+ virtual void onDrawPosText(const void* text, size_t byteLength, const SkPoint pos[],
+ const SkPaint&) override;
+ virtual void onDrawPosTextH(const void* text, size_t byteLength, const SkScalar xpos[],
+ SkScalar constY, const SkPaint&) override;
+ virtual void onDrawTextOnPath(const void* text, size_t byteLength, const SkPath& path,
+ const SkMatrix* matrix, const SkPaint&) override;
+ void onDrawTextRSXform(const void* text, size_t byteLength, const SkRSXform xform[],
+ const SkRect* cull, const SkPaint& paint) override;
+ virtual void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) override;
+ virtual void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkXfermode* xmode,
+ const SkPaint& paint) override;
+
+ void onDrawPaint(const SkPaint&) override;
+ void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&) override;
+ void onDrawRect(const SkRect&, const SkPaint&) override;
+ void onDrawOval(const SkRect&, const SkPaint&) override;
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override;
+ void onDrawRRect(const SkRRect&, const SkPaint&) override;
+ void onDrawPath(const SkPath&, const SkPaint&) override;
+ void onDrawBitmap(const SkBitmap&, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawBitmapRect(const SkBitmap&, const SkRect* src, const SkRect& dst, const SkPaint*,
+ SrcRectConstraint) override;
+ void onDrawImage(const SkImage*, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawImageRect(const SkImage*, const SkRect* src, const SkRect& dst,
+ const SkPaint*, SrcRectConstraint) override;
+ void onDrawBitmapNine(const SkBitmap&, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawVertices(VertexMode vmode, int vertexCount,
+ const SkPoint vertices[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint&) override;
+
+ void onClipRect(const SkRect&, ClipOp, ClipEdgeStyle) override;
+ void onClipRRect(const SkRRect&, ClipOp, ClipEdgeStyle) override;
+ void onClipPath(const SkPath&, ClipOp, ClipEdgeStyle) override;
+ void onClipRegion(const SkRegion&, ClipOp) override;
+
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override;
+ void onDrawAnnotation(const SkRect&, const char key[], SkData* value) override;
+
+ static const char* EdgeStyleToAAString(ClipEdgeStyle edgeStyle);
+
+private:
+ Dumper* fDumper;
+ int fNestLevel; // for nesting recursive elements like pictures
+
+ void dump(Verb, const SkPaint*, const char format[], ...);
+
+ typedef SkCanvas INHERITED;
+};
+
+/** Formats the draw commands, and send them to a function-pointer provided
+ by the caller.
+ */
+class SkFormatDumper : public SkDumpCanvas::Dumper {
+public:
+ SkFormatDumper(void (*)(const char text[], void* refcon), void* refcon);
+
+ // override from baseclass that does the formatting, and in turn calls
+ // the function pointer that was passed to the constructor
+ virtual void dump(SkDumpCanvas*, SkDumpCanvas::Verb, const char str[],
+ const SkPaint*) override;
+
+private:
+ void (*fProc)(const char*, void*);
+ void* fRefcon;
+
+ typedef SkDumpCanvas::Dumper INHERITED;
+};
+
+/** Subclass of Dumper that dumps the drawing command to SkDebugf
+ */
+class SkDebugfDumper : public SkFormatDumper {
+public:
+ SkDebugfDumper();
+
+private:
+ typedef SkFormatDumper INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkEventTracer.h b/gfx/skia/skia/include/utils/SkEventTracer.h
new file mode 100644
index 000000000..f4f8676a7
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkEventTracer.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2014 Google Inc. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEventTracer_DEFINED
+#define SkEventTracer_DEFINED
+
+// The class in this header defines the interface between Skia's internal
+// tracing macros and an external entity (e.g., Chrome) that will consume them.
+// Such an entity should subclass SkEventTracer and provide an instance of
+// that event to SkEventTracer::SetInstance.
+
+// If you're looking for the tracing macros to instrument Skia itself, those
+// live in src/core/SkTraceEvent.h
+
+#include "SkTypes.h"
+
+// This will mark the trace event as disabled by default. The user will need
+// to explicitly enable the event.
+#define TRACE_DISABLED_BY_DEFAULT(name) "disabled-by-default-" name
+
+class SK_API SkEventTracer {
+public:
+
+ typedef uint64_t Handle;
+
+ static SkEventTracer* GetInstance();
+
+ static void SetInstance(SkEventTracer*);
+
+ virtual ~SkEventTracer() { }
+
+ // The pointer returned from GetCategoryGroupEnabled() points to a
+ // value with zero or more of the following bits. Used in this class only.
+ // The TRACE_EVENT macros should only use the value as a bool.
+ // These values must be in sync with macro values in trace_event.h in chromium.
+ enum CategoryGroupEnabledFlags {
+ // Category group enabled for the recording mode.
+ kEnabledForRecording_CategoryGroupEnabledFlags = 1 << 0,
+ // Category group enabled for the monitoring mode.
+ kEnabledForMonitoring_CategoryGroupEnabledFlags = 1 << 1,
+ // Category group enabled by SetEventCallbackEnabled().
+ kEnabledForEventCallback_CategoryGroupEnabledFlags = 1 << 2,
+ };
+
+ virtual const uint8_t* getCategoryGroupEnabled(const char* name) = 0;
+ virtual const char* getCategoryGroupName(
+ const uint8_t* categoryEnabledFlag) = 0;
+
+ virtual SkEventTracer::Handle
+ addTraceEvent(char phase,
+ const uint8_t* categoryEnabledFlag,
+ const char* name,
+ uint64_t id,
+ int32_t numArgs,
+ const char** argNames,
+ const uint8_t* argTypes,
+ const uint64_t* argValues,
+ uint8_t flags) = 0;
+
+ virtual void
+ updateTraceEventDuration(const uint8_t* categoryEnabledFlag,
+ const char* name,
+ SkEventTracer::Handle handle) = 0;
+};
+
+#endif // SkEventTracer_DEFINED
diff --git a/gfx/skia/skia/include/utils/SkFrontBufferedStream.h b/gfx/skia/skia/include/utils/SkFrontBufferedStream.h
new file mode 100644
index 000000000..bfc2728ef
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkFrontBufferedStream.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+
+class SkStream;
+class SkStreamRewindable;
+
+/**
+ * Specialized stream that buffers the first X bytes of a stream,
+ * where X is passed in by the user. Note that unlike some buffered
+ * stream APIs, once more bytes than can fit in the buffer are read,
+ * no more buffering is done. This stream is designed for a use case
+ * where the caller knows that rewind will only be called from within
+ * X bytes (inclusive), and the wrapped stream is not necessarily
+ * able to rewind at all.
+ */
+class SkFrontBufferedStream {
+public:
+ /**
+ * Creates a new stream that wraps and buffers an SkStream.
+ * @param stream SkStream to buffer. If stream is NULL, NULL is
+ * returned. When this call succeeds (i.e. returns non NULL),
+ * SkFrontBufferedStream is expected to be the only owner of
+ * stream, so it should no be longer used directly.
+ * SkFrontBufferedStream will delete stream upon deletion.
+ * @param minBufferSize Minimum size of buffer required.
+ * @return An SkStream that can buffer at least minBufferSize, or
+ * NULL on failure. The caller is required to delete when finished with
+ * this object.
+ */
+ static SkStreamRewindable* Create(SkStream* stream, size_t minBufferSize);
+};
diff --git a/gfx/skia/skia/include/utils/SkInterpolator.h b/gfx/skia/skia/include/utils/SkInterpolator.h
new file mode 100644
index 000000000..e062b38f6
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkInterpolator.h
@@ -0,0 +1,137 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkInterpolator_DEFINED
+#define SkInterpolator_DEFINED
+
+#include "SkScalar.h"
+
+class SkInterpolatorBase : SkNoncopyable {
+public:
+ enum Result {
+ kNormal_Result,
+ kFreezeStart_Result,
+ kFreezeEnd_Result
+ };
+protected:
+ SkInterpolatorBase();
+ ~SkInterpolatorBase();
+public:
+ void reset(int elemCount, int frameCount);
+
+ /** Return the start and end time for this interpolator.
+ If there are no key frames, return false.
+ @param startTime If not null, returns the time (in milliseconds) of the
+ first keyframe. If there are no keyframes, this param
+ is ignored (left unchanged).
+ @param endTime If not null, returns the time (in milliseconds) of the
+ last keyframe. If there are no keyframes, this parameter
+ is ignored (left unchanged).
+ @return True if there are key frames, or false if there are none.
+ */
+ bool getDuration(SkMSec* startTime, SkMSec* endTime) const;
+
+
+ /** Set the whether the repeat is mirrored.
+ @param mirror If true, the odd repeats interpolate from the last key
+ frame and the first.
+ */
+ void setMirror(bool mirror) {
+ fFlags = SkToU8((fFlags & ~kMirror) | (int)mirror);
+ }
+
+ /** Set the repeat count. The repeat count may be fractional.
+ @param repeatCount Multiplies the total time by this scalar.
+ */
+ void setRepeatCount(SkScalar repeatCount) { fRepeat = repeatCount; }
+
+ /** Set the whether the repeat is mirrored.
+ @param reset If true, the odd repeats interpolate from the last key
+ frame and the first.
+ */
+ void setReset(bool reset) {
+ fFlags = SkToU8((fFlags & ~kReset) | (int)reset);
+ }
+
+ Result timeToT(SkMSec time, SkScalar* T, int* index, bool* exact) const;
+
+protected:
+ enum Flags {
+ kMirror = 1,
+ kReset = 2,
+ kHasBlend = 4
+ };
+ static SkScalar ComputeRelativeT(SkMSec time, SkMSec prevTime,
+ SkMSec nextTime, const SkScalar blend[4] = NULL);
+ int16_t fFrameCount;
+ uint8_t fElemCount;
+ uint8_t fFlags;
+ SkScalar fRepeat;
+ struct SkTimeCode {
+ SkMSec fTime;
+ SkScalar fBlend[4];
+ };
+ SkTimeCode* fTimes; // pointer into fStorage
+ void* fStorage;
+#ifdef SK_DEBUG
+ SkTimeCode(* fTimesArray)[10];
+#endif
+};
+
+class SkInterpolator : public SkInterpolatorBase {
+public:
+ SkInterpolator();
+ SkInterpolator(int elemCount, int frameCount);
+ void reset(int elemCount, int frameCount);
+
+ /** Add or replace a key frame, copying the values[] data into the
+ interpolator.
+ @param index The index of this frame (frames must be ordered by time)
+ @param time The millisecond time for this frame
+ @param values The array of values [elemCount] for this frame. The data
+ is copied into the interpolator.
+ @param blend A positive scalar specifying how to blend between this
+ and the next key frame. [0...1) is a cubic lag/log/lag
+ blend (slow to change at the beginning and end)
+ 1 is a linear blend (default)
+ */
+ bool setKeyFrame(int index, SkMSec time, const SkScalar values[],
+ const SkScalar blend[4] = NULL);
+
+ /** Return the computed values given the specified time. Return whether
+ those values are the result of pinning to either the first
+ (kFreezeStart) or last (kFreezeEnd), or from interpolated the two
+ nearest key values (kNormal).
+ @param time The time to sample (in milliseconds)
+ @param (may be null) where to write the computed values.
+ */
+ Result timeToValues(SkMSec time, SkScalar values[] = NULL) const;
+
+private:
+ SkScalar* fValues; // pointer into fStorage
+#ifdef SK_DEBUG
+ SkScalar(* fScalarsArray)[10];
+#endif
+ typedef SkInterpolatorBase INHERITED;
+};
+
+/** Interpolate a cubic curve, typically to provide an ease-in ease-out transition.
+ All the parameters are in the range of [0...1].
+ The input value is treated as the x-coordinate of the cubic.
+ The output value is the y-coordinate on the cubic at the x-coordinate.
+
+ @param value The x-coordinate pinned between [0..1].
+ @param bx,by,cx,cy The cubic control points where the cubic is specified
+ as (0,0) (bx,by) (cx,cy) (1,1)
+ @return the corresponding y-coordinate value, from [0..1].
+*/
+SkScalar SkUnitCubicInterp(SkScalar value, SkScalar bx, SkScalar by,
+ SkScalar cx, SkScalar cy);
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkLayer.h b/gfx/skia/skia/include/utils/SkLayer.h
new file mode 100644
index 000000000..25bc32886
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkLayer.h
@@ -0,0 +1,130 @@
+
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkLayer_DEFINED
+#define SkLayer_DEFINED
+
+#include "../private/SkTDArray.h"
+#include "SkRefCnt.h"
+#include "SkColor.h"
+#include "SkMatrix.h"
+#include "SkPoint.h"
+#include "SkRect.h"
+#include "SkSize.h"
+
+class SkCanvas;
+
+class SkLayer : public SkRefCnt {
+
+public:
+
+
+ SkLayer();
+ SkLayer(const SkLayer&);
+ virtual ~SkLayer();
+
+ bool isInheritFromRootTransform() const;
+ SkScalar getOpacity() const { return m_opacity; }
+ const SkSize& getSize() const { return m_size; }
+ const SkPoint& getPosition() const { return m_position; }
+ const SkPoint& getAnchorPoint() const { return m_anchorPoint; }
+ const SkMatrix& getMatrix() const { return fMatrix; }
+ const SkMatrix& getChildrenMatrix() const { return fChildrenMatrix; }
+
+ SkScalar getWidth() const { return m_size.width(); }
+ SkScalar getHeight() const { return m_size.height(); }
+
+ void setInheritFromRootTransform(bool);
+ void setOpacity(SkScalar opacity) { m_opacity = opacity; }
+ void setSize(SkScalar w, SkScalar h) { m_size.set(w, h); }
+ void setPosition(SkScalar x, SkScalar y) { m_position.set(x, y); }
+ void setAnchorPoint(SkScalar x, SkScalar y) { m_anchorPoint.set(x, y); }
+ void setMatrix(const SkMatrix&);
+ void setChildrenMatrix(const SkMatrix&);
+
+ // children
+
+ /** Return the number of layers in our child list.
+ */
+ int countChildren() const;
+
+ /** Return the child at the specified index (starting at 0). This does not
+ affect the reference count of the child.
+ */
+ SkLayer* getChild(int index) const;
+
+ /** Add this layer to our child list at the end (top-most), and ref() it.
+ If it was already in another hierarchy, remove it from that list.
+ Return the new child.
+ */
+ SkLayer* addChild(SkLayer* child);
+
+ /** Remove this layer from its parent's list (or do nothing if it has no
+ parent.) If it had a parent, then unref() is called.
+ */
+ void detachFromParent();
+
+ /** Remove, and unref(), all of the layers in our child list.
+ */
+ void removeChildren();
+
+ /** Return our parent layer, or NULL if we have none.
+ */
+ SkLayer* getParent() const { return fParent; }
+
+ /** Return the root layer in this hiearchy. If this layer is the root
+ (i.e. has no parent), then this returns itself.
+ */
+ SkLayer* getRootLayer() const;
+
+ // coordinate system transformations
+
+ /** Return, in matrix, the matix transfomations that are applied locally
+ when this layer draws (i.e. its position and matrix/anchorPoint).
+ This does not include the childrenMatrix, since that is only applied
+ after this layer draws (but before its children draw).
+ */
+ void getLocalTransform(SkMatrix* matrix) const;
+
+ /** Return, in matrix, the concatenation of transforms that are applied
+ from this layer's root parent to the layer itself.
+ This is the matrix that is applied to the layer during drawing.
+ */
+ void localToGlobal(SkMatrix* matrix) const;
+
+ // paint method
+
+ void draw(SkCanvas*, SkScalar opacity);
+ void draw(SkCanvas* canvas) {
+ this->draw(canvas, SK_Scalar1);
+ }
+
+protected:
+ virtual void onDraw(SkCanvas*, SkScalar opacity);
+
+private:
+ enum Flags {
+ kInheritFromRootTransform_Flag = 0x01
+ };
+
+ SkLayer* fParent;
+ SkScalar m_opacity;
+ SkSize m_size;
+ SkPoint m_position;
+ SkPoint m_anchorPoint;
+ SkMatrix fMatrix;
+ SkMatrix fChildrenMatrix;
+ uint32_t fFlags;
+
+ SkTDArray<SkLayer*> m_children;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkLua.h b/gfx/skia/skia/include/utils/SkLua.h
new file mode 100644
index 000000000..ad6f996ac
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkLua.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLua_DEFINED
+#define SkLua_DEFINED
+
+#include "SkClipStack.h"
+#include "SkColor.h"
+#include "SkPathEffect.h"
+#include "SkScalar.h"
+#include "SkString.h"
+
+struct lua_State;
+
+class SkCanvas;
+class SkMatrix;
+class SkPaint;
+class SkPath;
+struct SkRect;
+class SkRRect;
+class SkTextBlob;
+
+#define SkScalarToLua(x) SkScalarToDouble(x)
+#define SkLuaToScalar(x) SkDoubleToScalar(x)
+
+class SkLua {
+public:
+ static void Load(lua_State*);
+
+ SkLua(const char termCode[] = NULL); // creates a new L, will close it
+ SkLua(lua_State*); // uses L, will not close it
+ ~SkLua();
+
+ lua_State* get() const { return fL; }
+ lua_State* operator*() const { return fL; }
+ lua_State* operator->() const { return fL; }
+
+ bool runCode(const char code[]);
+ bool runCode(const void* code, size_t size);
+
+ void pushBool(bool, const char tableKey[] = NULL);
+ void pushString(const char[], const char tableKey[] = NULL);
+ void pushString(const char[], size_t len, const char tableKey[] = NULL);
+ void pushString(const SkString&, const char tableKey[] = NULL);
+ void pushArrayU16(const uint16_t[], int count, const char tableKey[] = NULL);
+ void pushArrayPoint(const SkPoint[], int count, const char key[] = NULL);
+ void pushArrayScalar(const SkScalar[], int count, const char key[] = NULL);
+ void pushColor(SkColor, const char tableKey[] = NULL);
+ void pushU32(uint32_t, const char tableKey[] = NULL);
+ void pushScalar(SkScalar, const char tableKey[] = NULL);
+ void pushRect(const SkRect&, const char tableKey[] = NULL);
+ void pushRRect(const SkRRect&, const char tableKey[] = NULL);
+ void pushDash(const SkPathEffect::DashInfo&, const char tableKey[] = NULL);
+ void pushMatrix(const SkMatrix&, const char tableKey[] = NULL);
+ void pushPaint(const SkPaint&, const char tableKey[] = NULL);
+ void pushPath(const SkPath&, const char tableKey[] = NULL);
+ void pushCanvas(SkCanvas*, const char tableKey[] = NULL);
+ void pushClipStack(const SkClipStack&, const char tableKey[] = NULL);
+ void pushClipStackElement(const SkClipStack::Element& element, const char tableKey[] = NULL);
+ void pushTextBlob(const SkTextBlob*, const char tableKey[] = NULL);
+
+ // This SkCanvas lua methods is declared here to benefit from SkLua's friendship with SkCanvas.
+ static int lcanvas_getReducedClipStack(lua_State* L);
+
+private:
+ lua_State* fL;
+ SkString fTermCode;
+ bool fWeOwnL;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkLuaCanvas.h b/gfx/skia/skia/include/utils/SkLuaCanvas.h
new file mode 100644
index 000000000..ac29f6f27
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkLuaCanvas.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLuaCanvas_DEFINED
+#define SkLuaCanvas_DEFINED
+
+#include "SkCanvas.h"
+#include "SkString.h"
+
+struct lua_State;
+
+class SkLuaCanvas : public SkCanvas {
+public:
+ void pushThis();
+
+ SkLuaCanvas(int width, int height, lua_State*, const char function[]);
+ virtual ~SkLuaCanvas();
+
+protected:
+ void willSave() override;
+ SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec&) override;
+ void willRestore() override;
+
+ void didConcat(const SkMatrix&) override;
+ void didSetMatrix(const SkMatrix&) override;
+
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override;
+ virtual void onDrawText(const void* text, size_t byteLength, SkScalar x, SkScalar y,
+ const SkPaint&) override;
+ virtual void onDrawPosText(const void* text, size_t byteLength, const SkPoint pos[],
+ const SkPaint&) override;
+ virtual void onDrawPosTextH(const void* text, size_t byteLength, const SkScalar xpos[],
+ SkScalar constY, const SkPaint&) override;
+ virtual void onDrawTextOnPath(const void* text, size_t byteLength, const SkPath& path,
+ const SkMatrix* matrix, const SkPaint&) override;
+ void onDrawTextRSXform(const void* text, size_t byteLength, const SkRSXform xform[],
+ const SkRect* cull, const SkPaint& paint) override;
+ virtual void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) override;
+
+ void onDrawPaint(const SkPaint&) override;
+ void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&) override;
+ void onDrawRect(const SkRect&, const SkPaint&) override;
+ void onDrawOval(const SkRect&, const SkPaint&) override;
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override;
+ void onDrawRRect(const SkRRect&, const SkPaint&) override;
+ void onDrawPath(const SkPath&, const SkPaint&) override;
+ void onDrawBitmap(const SkBitmap&, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawBitmapRect(const SkBitmap&, const SkRect* src, const SkRect& dst, const SkPaint*,
+ SrcRectConstraint) override;
+ void onDrawImage(const SkImage*, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawImageRect(const SkImage*, const SkRect* src, const SkRect& dst,
+ const SkPaint*, SrcRectConstraint) override;
+ void onDrawBitmapNine(const SkBitmap&, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawVertices(VertexMode vmode, int vertexCount,
+ const SkPoint vertices[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint&) override;
+
+ void onClipRect(const SkRect&, ClipOp, ClipEdgeStyle) override;
+ void onClipRRect(const SkRRect&, ClipOp, ClipEdgeStyle) override;
+ void onClipPath(const SkPath&, ClipOp, ClipEdgeStyle) override;
+ void onClipRegion(const SkRegion&, ClipOp) override;
+
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override;
+
+private:
+ lua_State* fL;
+ SkString fFunc;
+
+ void sendverb(const char verb[]);
+
+ typedef SkCanvas INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkMeshUtils.h b/gfx/skia/skia/include/utils/SkMeshUtils.h
new file mode 100644
index 000000000..7e0e8f4de
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkMeshUtils.h
@@ -0,0 +1,50 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkMeshUtils_DEFINED
+#define SkMeshUtils_DEFINED
+
+#include "SkPoint.h"
+#include "SkColor.h"
+
+class SkBitmap;
+class SkCanvas;
+class SkPaint;
+
+class SkMeshIndices {
+public:
+ SkMeshIndices();
+ ~SkMeshIndices();
+
+ bool init(int texW, int texH, int rows, int cols) {
+ return this->init(NULL, NULL, texW, texH, rows, cols);
+ }
+
+ bool init(SkPoint tex[], uint16_t indices[],
+ int texW, int texH, int rows, int cols);
+
+ int indexCount() const { return fIndexCount; }
+ const uint16_t* indices() const { return fIndices; }
+
+ size_t texCount() const { return fTexCount; }
+ const SkPoint* tex() const { return fTex; }
+
+private:
+ int fIndexCount, fTexCount;
+ SkPoint* fTex;
+ uint16_t* fIndices;
+ void* fStorage; // may be null
+};
+
+class SkMeshUtils {
+public:
+ static void Draw(SkCanvas*, const SkBitmap&, int rows, int cols,
+ const SkPoint verts[], const SkColor colors[],
+ const SkPaint& paint);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkNWayCanvas.h b/gfx/skia/skia/include/utils/SkNWayCanvas.h
new file mode 100644
index 000000000..4e7f4224f
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkNWayCanvas.h
@@ -0,0 +1,94 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNWayCanvas_DEFINED
+#define SkNWayCanvas_DEFINED
+
+#include "../private/SkTDArray.h"
+#include "SkCanvas.h"
+
+class SK_API SkNWayCanvas : public SkCanvas {
+public:
+ SkNWayCanvas(int width, int height);
+ virtual ~SkNWayCanvas();
+
+ virtual void addCanvas(SkCanvas*);
+ virtual void removeCanvas(SkCanvas*);
+ virtual void removeAll();
+
+ ///////////////////////////////////////////////////////////////////////////
+ // These are forwarded to the N canvases we're referencing
+
+#ifdef SK_SUPPORT_LEGACY_DRAWFILTER
+ SkDrawFilter* setDrawFilter(SkDrawFilter*) override;
+#endif
+
+protected:
+ SkTDArray<SkCanvas*> fList;
+
+ void willSave() override;
+ SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec&) override;
+ void willRestore() override;
+
+ void didConcat(const SkMatrix&) override;
+ void didSetMatrix(const SkMatrix&) override;
+
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override;
+ virtual void onDrawText(const void* text, size_t byteLength, SkScalar x, SkScalar y,
+ const SkPaint&) override;
+ virtual void onDrawPosText(const void* text, size_t byteLength, const SkPoint pos[],
+ const SkPaint&) override;
+ virtual void onDrawPosTextH(const void* text, size_t byteLength, const SkScalar xpos[],
+ SkScalar constY, const SkPaint&) override;
+ virtual void onDrawTextOnPath(const void* text, size_t byteLength, const SkPath& path,
+ const SkMatrix* matrix, const SkPaint&) override;
+ virtual void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) override;
+ void onDrawTextRSXform(const void* text, size_t byteLength, const SkRSXform xform[],
+ const SkRect* cull, const SkPaint& paint) override;
+ virtual void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkXfermode* xmode,
+ const SkPaint& paint) override;
+
+ void onDrawPaint(const SkPaint&) override;
+ void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&) override;
+ void onDrawRect(const SkRect&, const SkPaint&) override;
+ void onDrawOval(const SkRect&, const SkPaint&) override;
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override;
+ void onDrawRRect(const SkRRect&, const SkPaint&) override;
+ void onDrawPath(const SkPath&, const SkPaint&) override;
+ void onDrawBitmap(const SkBitmap&, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawBitmapRect(const SkBitmap&, const SkRect* src, const SkRect& dst, const SkPaint*,
+ SrcRectConstraint) override;
+ void onDrawImage(const SkImage*, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawImageRect(const SkImage*, const SkRect* src, const SkRect& dst,
+ const SkPaint*, SrcRectConstraint) override;
+ void onDrawBitmapNine(const SkBitmap&, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawVertices(VertexMode vmode, int vertexCount,
+ const SkPoint vertices[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint&) override;
+
+ void onClipRect(const SkRect&, ClipOp, ClipEdgeStyle) override;
+ void onClipRRect(const SkRRect&, ClipOp, ClipEdgeStyle) override;
+ void onClipPath(const SkPath&, ClipOp, ClipEdgeStyle) override;
+ void onClipRegion(const SkRegion&, ClipOp) override;
+
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override;
+ void onDrawAnnotation(const SkRect&, const char[], SkData*) override;
+
+ class Iter;
+
+private:
+ typedef SkCanvas INHERITED;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkNoSaveLayerCanvas.h b/gfx/skia/skia/include/utils/SkNoSaveLayerCanvas.h
new file mode 100644
index 000000000..3d786c532
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkNoSaveLayerCanvas.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNoSaveLayerCanvas_DEFINED
+#define SkNoSaveLayerCanvas_DEFINED
+
+#include "SkCanvas.h"
+#include "SkRRect.h"
+
+// The NoSaveLayerCanvas is used to play back SkPictures when the saveLayer
+// functionality isn't required (e.g., during analysis of the draw calls).
+// It also simplifies the clipping calls to only use rectangles.
+class SK_API SkNoSaveLayerCanvas : public SkCanvas {
+public:
+ SkNoSaveLayerCanvas(SkBaseDevice* device)
+ : INHERITED(device, kConservativeRasterClip_InitFlag)
+ {}
+
+protected:
+ SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec& rec) override {
+ (void)this->INHERITED::getSaveLayerStrategy(rec);
+ return kNoLayer_SaveLayerStrategy;
+ }
+
+private:
+ typedef SkCanvas INHERITED;
+};
+
+#endif // SkNoSaveLayerCanvas_DEFINED
diff --git a/gfx/skia/skia/include/utils/SkNullCanvas.h b/gfx/skia/skia/include/utils/SkNullCanvas.h
new file mode 100644
index 000000000..99a26dafd
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkNullCanvas.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNullCanvas_DEFINED
+#define SkNullCanvas_DEFINED
+
+#include "SkBitmap.h"
+
+class SkCanvas;
+
+/**
+ * Creates a canvas that draws nothing. This is useful for performance testing.
+ */
+SK_API SkCanvas* SkCreateNullCanvas();
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkPaintFilterCanvas.h b/gfx/skia/skia/include/utils/SkPaintFilterCanvas.h
new file mode 100644
index 000000000..63eaaa2fb
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkPaintFilterCanvas.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPaintFilterCanvas_DEFINED
+#define SkPaintFilterCanvas_DEFINED
+
+#include "SkNWayCanvas.h"
+#include "SkTLazy.h"
+
+/** \class SkPaintFilterCanvas
+
+ A utility proxy base class for implementing draw/paint filters.
+*/
+class SK_API SkPaintFilterCanvas : public SkNWayCanvas {
+public:
+ /**
+ * DEPRECATED: use the variant below.
+ */
+ SkPaintFilterCanvas(int width, int height);
+
+ /**
+ * The new SkPaintFilterCanvas is configured for forwarding to the
+ * specified canvas. Also copies the target canvas matrix and clip bounds.
+ */
+ SkPaintFilterCanvas(SkCanvas* canvas);
+
+ enum Type {
+ kPaint_Type,
+ kPoint_Type,
+ kArc_Type,
+ kBitmap_Type,
+ kRect_Type,
+ kRRect_Type,
+ kDRRect_Type,
+ kOval_Type,
+ kPath_Type,
+ kPicture_Type,
+ kText_Type,
+ kTextBlob_Type,
+ kVertices_Type,
+ kPatch_Type,
+
+ kTypeCount
+ };
+
+protected:
+ /**
+ * Called with the paint that will be used to draw the specified type.
+ * The implementation may modify the paint as they wish (using SkTCopyOnFirstWrite::writable).
+ *
+ * The result bool is used to determine whether the draw op is to be
+ * executed (true) or skipped (false).
+ *
+ * Note: The base implementation calls onFilter() for top-level/explicit paints only.
+ * To also filter encapsulated paints (e.g. SkPicture, SkTextBlob), clients may need to
+ * override the relevant methods (i.e. drawPicture, drawTextBlob).
+ */
+ virtual bool onFilter(SkTCopyOnFirstWrite<SkPaint>* paint, Type type) const = 0;
+
+ void onDrawPaint(const SkPaint&) override;
+ void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&) override;
+ void onDrawRect(const SkRect&, const SkPaint&) override;
+ void onDrawRRect(const SkRRect&, const SkPaint&) override;
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override;
+ void onDrawOval(const SkRect&, const SkPaint&) override;
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override;
+ void onDrawPath(const SkPath&, const SkPaint&) override;
+ void onDrawBitmap(const SkBitmap&, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawBitmapRect(const SkBitmap&, const SkRect* src, const SkRect& dst, const SkPaint*,
+ SrcRectConstraint) override;
+ void onDrawBitmapNine(const SkBitmap&, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawImage(const SkImage*, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawImageRect(const SkImage*, const SkRect* src, const SkRect& dst,
+ const SkPaint*, SrcRectConstraint) override;
+ void onDrawImageNine(const SkImage*, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawVertices(VertexMode vmode, int vertexCount,
+ const SkPoint vertices[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint&) override;
+ void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkXfermode* xmode,
+ const SkPaint& paint) override;
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override;
+
+ void onDrawText(const void* text, size_t byteLength, SkScalar x, SkScalar y,
+ const SkPaint&) override;
+ void onDrawPosText(const void* text, size_t byteLength, const SkPoint pos[],
+ const SkPaint&) override;
+ void onDrawPosTextH(const void* text, size_t byteLength, const SkScalar xpos[],
+ SkScalar constY, const SkPaint&) override;
+ void onDrawTextOnPath(const void* text, size_t byteLength, const SkPath& path,
+ const SkMatrix* matrix, const SkPaint&) override;
+ void onDrawTextRSXform(const void* text, size_t byteLength, const SkRSXform xform[],
+ const SkRect* cull, const SkPaint& paint) override;
+ void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) override;
+
+private:
+ class AutoPaintFilter;
+
+ typedef SkNWayCanvas INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkParse.h b/gfx/skia/skia/include/utils/SkParse.h
new file mode 100644
index 000000000..411605f7a
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkParse.h
@@ -0,0 +1,36 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkParse_DEFINED
+#define SkParse_DEFINED
+
+#include "SkColor.h"
+
+class SK_API SkParse {
+public:
+ static int Count(const char str[]); // number of scalars or int values
+ static int Count(const char str[], char separator);
+ static const char* FindColor(const char str[], SkColor* value);
+ static const char* FindHex(const char str[], uint32_t* value);
+ static const char* FindMSec(const char str[], SkMSec* value);
+ static const char* FindNamedColor(const char str[], size_t len, SkColor* color);
+ static const char* FindS32(const char str[], int32_t* value);
+ static const char* FindScalar(const char str[], SkScalar* value);
+ static const char* FindScalars(const char str[], SkScalar value[], int count);
+
+ static bool FindBool(const char str[], bool* value);
+ // return the index of str in list[], or -1 if not found
+ static int FindList(const char str[], const char list[]);
+#ifdef SK_SUPPORT_UNITTEST
+ static void TestColor();
+ static void UnitTest();
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkParsePath.h b/gfx/skia/skia/include/utils/SkParsePath.h
new file mode 100644
index 000000000..c52b3c0bc
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkParsePath.h
@@ -0,0 +1,23 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkParsePath_DEFINED
+#define SkParsePath_DEFINED
+
+#include "SkPath.h"
+
+class SkString;
+
+class SkParsePath {
+public:
+ static bool FromSVGString(const char str[], SkPath*);
+ static void ToSVGString(const SkPath&, SkString*);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkPictureUtils.h b/gfx/skia/skia/include/utils/SkPictureUtils.h
new file mode 100644
index 000000000..b65a64d57
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkPictureUtils.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPictureUtils_DEFINED
+#define SkPictureUtils_DEFINED
+
+#include "SkPicture.h"
+
+// TODO: remove this file?
+
+class SK_API SkPictureUtils {
+public:
+ /**
+ * How many bytes are allocated to hold the SkPicture.
+ * Includes operations, parameters, bounding data, deletion listeners;
+ * includes nested SkPictures, but does not include large objects that
+ * SkRecord holds a reference to (e.g. paths, or pixels backing bitmaps).
+ */
+ static size_t ApproximateBytesUsed(const SkPicture* pict) {
+ return pict->approximateBytesUsed();
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkRandom.h b/gfx/skia/skia/include/utils/SkRandom.h
new file mode 100644
index 000000000..7b5663118
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkRandom.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRandom_DEFINED
+#define SkRandom_DEFINED
+
+#include "../private/SkFixed.h"
+#include "SkScalar.h"
+
+/** \class SkRandom
+
+ Utility class that implements pseudo random 32bit numbers using Marsaglia's
+ multiply-with-carry "mother of all" algorithm. Unlike rand(), this class holds
+ its own state, so that multiple instances can be used with no side-effects.
+
+ Has a large period and all bits are well-randomized.
+ */
+class SkRandom {
+public:
+ SkRandom() { init(0); }
+ SkRandom(uint32_t seed) { init(seed); }
+ SkRandom(const SkRandom& rand) : fK(rand.fK), fJ(rand.fJ) {}
+
+ SkRandom& operator=(const SkRandom& rand) {
+ fK = rand.fK;
+ fJ = rand.fJ;
+
+ return *this;
+ }
+
+ /** Return the next pseudo random number as an unsigned 32bit value.
+ */
+ uint32_t nextU() {
+ fK = kKMul*(fK & 0xffff) + (fK >> 16);
+ fJ = kJMul*(fJ & 0xffff) + (fJ >> 16);
+ return (((fK << 16) | (fK >> 16)) + fJ);
+ }
+
+ /** Return the next pseudo random number as a signed 32bit value.
+ */
+ int32_t nextS() { return (int32_t)this->nextU(); }
+
+ /** Return the next pseudo random number as an unsigned 16bit value.
+ */
+ U16CPU nextU16() { return this->nextU() >> 16; }
+
+ /** Return the next pseudo random number as a signed 16bit value.
+ */
+ S16CPU nextS16() { return this->nextS() >> 16; }
+
+ /**
+ * Returns value [0...1) as an IEEE float
+ */
+ float nextF() {
+ unsigned int floatint = 0x3f800000 | (this->nextU() >> 9);
+ float f = SkBits2Float(floatint) - 1.0f;
+ return f;
+ }
+
+ /**
+ * Returns value [min...max) as a float
+ */
+ float nextRangeF(float min, float max) {
+ return min + this->nextF() * (max - min);
+ }
+
+ /** Return the next pseudo random number, as an unsigned value of
+ at most bitCount bits.
+ @param bitCount The maximum number of bits to be returned
+ */
+ uint32_t nextBits(unsigned bitCount) {
+ SkASSERT(bitCount > 0 && bitCount <= 32);
+ return this->nextU() >> (32 - bitCount);
+ }
+
+ /** Return the next pseudo random unsigned number, mapped to lie within
+ [min, max] inclusive.
+ */
+ uint32_t nextRangeU(uint32_t min, uint32_t max) {
+ SkASSERT(min <= max);
+ uint32_t range = max - min + 1;
+ if (0 == range) {
+ return this->nextU();
+ } else {
+ return min + this->nextU() % range;
+ }
+ }
+
+ /** Return the next pseudo random unsigned number, mapped to lie within
+ [0, count).
+ */
+ uint32_t nextULessThan(uint32_t count) {
+ SkASSERT(count > 0);
+ return this->nextRangeU(0, count - 1);
+ }
+
+ /** Return the next pseudo random number expressed as a SkScalar
+ in the range [0..SK_Scalar1).
+ */
+ SkScalar nextUScalar1() { return SkFixedToScalar(this->nextUFixed1()); }
+
+ /** Return the next pseudo random number expressed as a SkScalar
+ in the range [min..max).
+ */
+ SkScalar nextRangeScalar(SkScalar min, SkScalar max) {
+ return this->nextUScalar1() * (max - min) + min;
+ }
+
+ /** Return the next pseudo random number expressed as a SkScalar
+ in the range [-SK_Scalar1..SK_Scalar1).
+ */
+ SkScalar nextSScalar1() { return SkFixedToScalar(this->nextSFixed1()); }
+
+ /** Return the next pseudo random number as a bool.
+ */
+ bool nextBool() { return this->nextU() >= 0x80000000; }
+
+ /** A biased version of nextBool().
+ */
+ bool nextBiasedBool(SkScalar fractionTrue) {
+ SkASSERT(fractionTrue >= 0 && fractionTrue <= SK_Scalar1);
+ return this->nextUScalar1() <= fractionTrue;
+ }
+
+ /**
+ * Return the next pseudo random number as a signed 64bit value.
+ */
+ int64_t next64() {
+ int64_t hi = this->nextS();
+ return (hi << 32) | this->nextU();
+ }
+
+ /** Reset the random object.
+ */
+ void setSeed(uint32_t seed) { init(seed); }
+
+private:
+ // Initialize state variables with LCG.
+ // We must ensure that both J and K are non-zero, otherwise the
+ // multiply-with-carry step will forevermore return zero.
+ void init(uint32_t seed) {
+ fK = NextLCG(seed);
+ if (0 == fK) {
+ fK = NextLCG(fK);
+ }
+ fJ = NextLCG(fK);
+ if (0 == fJ) {
+ fJ = NextLCG(fJ);
+ }
+ SkASSERT(0 != fK && 0 != fJ);
+ }
+ static uint32_t NextLCG(uint32_t seed) { return kMul*seed + kAdd; }
+
+ /** Return the next pseudo random number expressed as an unsigned SkFixed
+ in the range [0..SK_Fixed1).
+ */
+ SkFixed nextUFixed1() { return this->nextU() >> 16; }
+
+ /** Return the next pseudo random number expressed as a signed SkFixed
+ in the range [-SK_Fixed1..SK_Fixed1).
+ */
+ SkFixed nextSFixed1() { return this->nextS() >> 15; }
+
+ // See "Numerical Recipes in C", 1992 page 284 for these constants
+ // For the LCG that sets the initial state from a seed
+ enum {
+ kMul = 1664525,
+ kAdd = 1013904223
+ };
+ // Constants for the multiply-with-carry steps
+ enum {
+ kKMul = 30345,
+ kJMul = 18000,
+ };
+
+ uint32_t fK;
+ uint32_t fJ;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkTextBox.h b/gfx/skia/skia/include/utils/SkTextBox.h
new file mode 100644
index 000000000..90903b558
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkTextBox.h
@@ -0,0 +1,87 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTextBox_DEFINED
+#define SkTextBox_DEFINED
+
+#include "SkCanvas.h"
+
+/** \class SkTextBox
+
+ SkTextBox is a helper class for drawing 1 or more lines of text
+ within a rectangle. The textbox is positioned and clipped by its Frame.
+ The Margin rectangle controls where the text is drawn relative to
+ the Frame. Line-breaks occur inside the Margin rectangle.
+
+ Spacing is a linear equation used to compute the distance between lines
+ of text. Spacing consists of two scalars: mul and add, and the spacing
+ between lines is computed as: spacing = paint.getTextSize() * mul + add
+*/
+class SkTextBox {
+public:
+ SkTextBox();
+
+ enum Mode {
+ kOneLine_Mode,
+ kLineBreak_Mode,
+
+ kModeCount
+ };
+ Mode getMode() const { return (Mode)fMode; }
+ void setMode(Mode);
+
+ enum SpacingAlign {
+ kStart_SpacingAlign,
+ kCenter_SpacingAlign,
+ kEnd_SpacingAlign,
+
+ kSpacingAlignCount
+ };
+ SpacingAlign getSpacingAlign() const { return (SpacingAlign)fSpacingAlign; }
+ void setSpacingAlign(SpacingAlign);
+
+ void getBox(SkRect*) const;
+ void setBox(const SkRect&);
+ void setBox(SkScalar left, SkScalar top, SkScalar right, SkScalar bottom);
+
+ void getSpacing(SkScalar* mul, SkScalar* add) const;
+ void setSpacing(SkScalar mul, SkScalar add);
+
+ void draw(SkCanvas*, const char text[], size_t len, const SkPaint&);
+
+ void setText(const char text[], size_t len, const SkPaint&);
+ void draw(SkCanvas*);
+ int countLines() const;
+ SkScalar getTextHeight() const;
+
+ sk_sp<SkTextBlob> snapshotTextBlob(SkScalar* computedBottom) const;
+
+ class Visitor {
+ public:
+ virtual ~Visitor() {}
+ virtual void operator()(const char*, size_t, SkScalar x, SkScalar y, const SkPaint&) = 0;
+ };
+
+private:
+ SkRect fBox;
+ SkScalar fSpacingMul, fSpacingAdd;
+ uint8_t fMode, fSpacingAlign;
+ const char* fText;
+ size_t fLen;
+ const SkPaint* fPaint;
+
+ SkScalar visit(Visitor&, const char text[], size_t len, const SkPaint&) const;
+};
+
+class SkTextLineBreaker {
+public:
+ static int CountLines(const char text[], size_t len, const SkPaint&, SkScalar width);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/mac/SkCGUtils.h b/gfx/skia/skia/include/utils/mac/SkCGUtils.h
new file mode 100644
index 000000000..29df8b81a
--- /dev/null
+++ b/gfx/skia/skia/include/utils/mac/SkCGUtils.h
@@ -0,0 +1,76 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkCGUtils_DEFINED
+#define SkCGUtils_DEFINED
+
+#include "SkSize.h"
+#include "SkImageInfo.h"
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#ifdef SK_BUILD_FOR_MAC
+#include <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreGraphics/CoreGraphics.h>
+#endif
+
+class SkBitmap;
+class SkData;
+class SkStreamRewindable;
+
+/**
+ * Given a CGImage, allocate an SkBitmap and copy the image's pixels into it. If scaleToFit is not
+ * null, use it to determine the size of the bitmap, and scale the image to fill the bitmap.
+ * Otherwise use the image's width/height.
+ *
+ * On failure, return false, and leave bitmap unchanged.
+ */
+SK_API bool SkCreateBitmapFromCGImage(SkBitmap* dst, CGImageRef src, SkISize* scaleToFit = NULL);
+
+/**
+ * Copy the pixels from src into the memory specified by info/rowBytes/dstPixels. On failure,
+ * return false (e.g. ImageInfo incompatible with src).
+ */
+SK_API bool SkCopyPixelsFromCGImage(const SkImageInfo& info, size_t rowBytes, void* dstPixels,
+ CGImageRef src);
+
+/**
+ * Create an imageref from the specified bitmap using the specified colorspace.
+ * If space is NULL, then CGColorSpaceCreateDeviceRGB() is used.
+ */
+SK_API CGImageRef SkCreateCGImageRefWithColorspace(const SkBitmap& bm,
+ CGColorSpaceRef space);
+
+/**
+ * Create an imageref from the specified bitmap using the colorspace returned
+ * by CGColorSpaceCreateDeviceRGB()
+ */
+static inline CGImageRef SkCreateCGImageRef(const SkBitmap& bm) {
+ return SkCreateCGImageRefWithColorspace(bm, NULL);
+}
+
+/**
+ * Draw the bitmap into the specified CG context. The bitmap will be converted
+ * to a CGImage using the generic RGB colorspace. (x,y) specifies the position
+ * of the top-left corner of the bitmap. The bitmap is converted using the
+ * colorspace returned by CGColorSpaceCreateDeviceRGB()
+ */
+void SkCGDrawBitmap(CGContextRef, const SkBitmap&, float x, float y);
+
+/**
+ * Return a provider that wraps the specified stream.
+ * When the provider is finally deleted, it will delete the stream.
+ */
+CGDataProviderRef SkCreateDataProviderFromStream(std::unique_ptr<SkStreamRewindable>);
+
+CGDataProviderRef SkCreateDataProviderFromData(sk_sp<SkData>);
+
+#endif // defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+#endif // SkCGUtils_DEFINED
diff --git a/gfx/skia/skia/include/views/SkApplication.h b/gfx/skia/skia/include/views/SkApplication.h
new file mode 100644
index 000000000..8f63539a3
--- /dev/null
+++ b/gfx/skia/skia/include/views/SkApplication.h
@@ -0,0 +1,30 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkApplication_DEFINED
+#define SkApplication_DEFINED
+
+class SkOSWindow;
+
+extern SkOSWindow* create_sk_window(void* hwnd, int argc, char** argv);
+extern void application_init();
+extern void application_term();
+
+#ifdef SK_BUILD_FOR_IOS
+enum IOS_launch_type {
+ kError_iOSLaunchType = -1,
+ kTool_iOSLaunchType = 0,
+ kApplication__iOSLaunchType = 1
+};
+
+extern IOS_launch_type set_cmd_line_args(int argc, char *argv[],
+ const char* resourceDir);
+#endif
+
+#endif // SkApplication_DEFINED
diff --git a/gfx/skia/skia/include/views/SkEvent.h b/gfx/skia/skia/include/views/SkEvent.h
new file mode 100644
index 000000000..649553000
--- /dev/null
+++ b/gfx/skia/skia/include/views/SkEvent.h
@@ -0,0 +1,293 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEvent_DEFINED
+#define SkEvent_DEFINED
+
+#include "SkDOM.h"
+#include "SkMetaData.h"
+#include "SkString.h"
+
+#include "../private/SkLeanWindows.h"
+
+/** Unique 32bit id used to identify an instance of SkEventSink. When events are
+ posted, they are posted to a specific sinkID. When it is time to dispatch the
+ event, the sinkID is used to find the specific SkEventSink object. If it is found,
+ its doEvent() method is called with the event.
+*/
+typedef uint32_t SkEventSinkID;
+
+/**
+ * \class SkEvent
+ *
+ * When an event is dispatched from the event queue, it is either sent to
+ * the eventsink matching the target ID (if not 0), or the target proc is
+ * called (if not NULL).
+ */
+class SkEvent {
+public:
+ /**
+ * Function pointer that takes an event, returns true if it "handled" it.
+ */
+ typedef bool (*Proc)(const SkEvent& evt);
+
+ SkEvent();
+ explicit SkEvent(const SkString& type, SkEventSinkID = 0);
+ explicit SkEvent(const char type[], SkEventSinkID = 0);
+ SkEvent(const SkEvent& src);
+ ~SkEvent();
+
+ /** Copy the event's type into the specified SkString parameter */
+ void getType(SkString* str) const;
+
+ /** Returns true if the event's type matches exactly the specified type (case sensitive) */
+ bool isType(const SkString& str) const;
+
+ /** Returns true if the event's type matches exactly the specified type (case sensitive) */
+ bool isType(const char type[], size_t len = 0) const;
+
+ /**
+ * Set the event's type to the specified string.
+ */
+ void setType(const SkString&);
+
+ /**
+ * Set the event's type to the specified string.
+ */
+ void setType(const char type[], size_t len = 0);
+
+ /**
+ * Return the target ID, or 0 if there is none.
+ *
+ * When an event is dispatched from the event queue, it is either sent to
+ * the eventsink matching the targetID (if not 0), or the target proc is
+ * called (if not NULL).
+ */
+ SkEventSinkID getTargetID() const { return fTargetID; }
+
+ /**
+ * Set the target ID for this event. 0 means none. Calling this will
+ * automatically clear the targetProc to null.
+ *
+ * When an event is dispatched from the event queue, it is either sent to
+ * the eventsink matching the targetID (if not 0), or the target proc is
+ * called (if not NULL).
+ */
+ SkEvent* setTargetID(SkEventSinkID targetID) {
+ fTargetProc = NULL;
+ fTargetID = targetID;
+ return this;
+ }
+
+ /**
+ * Return the target proc, or NULL if it has none.
+ *
+ * When an event is dispatched from the event queue, it is either sent to
+ * the eventsink matching the targetID (if not 0), or the target proc is
+ * called (if not NULL).
+ */
+ Proc getTargetProc() const { return fTargetProc; }
+
+ /**
+ * Set the target ID for this event. NULL means none. Calling this will
+ * automatically clear the targetID to 0.
+ *
+ * When an event is dispatched from the event queue, it is either sent to
+ * the eventsink matching the targetID (if not 0), or the target proc is
+ * called (if not NULL).
+ */
+ SkEvent* setTargetProc(Proc proc) {
+ fTargetID = 0;
+ fTargetProc = proc;
+ return this;
+ }
+
+ /**
+ * Return the event's unnamed 32bit field. Default value is 0
+ */
+ uint32_t getFast32() const { return f32; }
+
+ /**
+ * Set the event's unnamed 32bit field.
+ */
+ void setFast32(uint32_t x) { f32 = x; }
+
+ /** Return true if the event contains the named 32bit field, and return the field
+ in value (if value is non-null). If there is no matching named field, return false
+ and ignore the value parameter.
+ */
+ bool findS32(const char name[], int32_t* value = NULL) const { return fMeta.findS32(name, value); }
+ /** Return true if the event contains the named SkScalar field, and return the field
+ in value (if value is non-null). If there is no matching named field, return false
+ and ignore the value parameter.
+ */
+ bool findScalar(const char name[], SkScalar* value = NULL) const { return fMeta.findScalar(name, value); }
+ /** Return true if the event contains the named SkScalar field, and return the fields
+ in value[] (if value is non-null), and return the number of SkScalars in count (if count is non-null).
+ If there is no matching named field, return false and ignore the value and count parameters.
+ */
+ const SkScalar* findScalars(const char name[], int* count, SkScalar values[] = NULL) const { return fMeta.findScalars(name, count, values); }
+ /** Return the value of the named string field, or if no matching named field exists, return null.
+ */
+ const char* findString(const char name[]) const { return fMeta.findString(name); }
+ /** Return true if the event contains the named pointer field, and return the field
+ in value (if value is non-null). If there is no matching named field, return false
+ and ignore the value parameter.
+ */
+ bool findPtr(const char name[], void** value) const { return fMeta.findPtr(name, value); }
+ bool findBool(const char name[], bool* value) const { return fMeta.findBool(name, value); }
+ const void* findData(const char name[], size_t* byteCount = NULL) const {
+ return fMeta.findData(name, byteCount);
+ }
+
+ /** Returns true if ethe event contains the named 32bit field, and if it equals the specified value */
+ bool hasS32(const char name[], int32_t value) const { return fMeta.hasS32(name, value); }
+ /** Returns true if ethe event contains the named SkScalar field, and if it equals the specified value */
+ bool hasScalar(const char name[], SkScalar value) const { return fMeta.hasScalar(name, value); }
+ /** Returns true if ethe event contains the named string field, and if it equals (using strcmp) the specified value */
+ bool hasString(const char name[], const char value[]) const { return fMeta.hasString(name, value); }
+ /** Returns true if ethe event contains the named pointer field, and if it equals the specified value */
+ bool hasPtr(const char name[], void* value) const { return fMeta.hasPtr(name, value); }
+ bool hasBool(const char name[], bool value) const { return fMeta.hasBool(name, value); }
+ bool hasData(const char name[], const void* data, size_t byteCount) const {
+ return fMeta.hasData(name, data, byteCount);
+ }
+
+ /** Add/replace the named 32bit field to the event. In XML use the subelement <data name=... s32=... /> */
+ void setS32(const char name[], int32_t value) { fMeta.setS32(name, value); }
+ /** Add/replace the named SkScalar field to the event. In XML use the subelement <data name=... scalar=... /> */
+ void setScalar(const char name[], SkScalar value) { fMeta.setScalar(name, value); }
+ /** Add/replace the named SkScalar[] field to the event. */
+ SkScalar* setScalars(const char name[], int count, const SkScalar values[] = NULL) { return fMeta.setScalars(name, count, values); }
+ /** Add/replace the named string field to the event. In XML use the subelement <data name=... string=... */
+ void setString(const char name[], const SkString& value) { fMeta.setString(name, value.c_str()); }
+ /** Add/replace the named string field to the event. In XML use the subelement <data name=... string=... */
+ void setString(const char name[], const char value[]) { fMeta.setString(name, value); }
+ /** Add/replace the named pointer field to the event. There is no XML equivalent for this call */
+ void setPtr(const char name[], void* value) { fMeta.setPtr(name, value); }
+ void setBool(const char name[], bool value) { fMeta.setBool(name, value); }
+ void setData(const char name[], const void* data, size_t byteCount) {
+ fMeta.setData(name, data, byteCount);
+ }
+
+ /** Return the underlying metadata object */
+ SkMetaData& getMetaData() { return fMeta; }
+ /** Return the underlying metadata object */
+ const SkMetaData& getMetaData() const { return fMeta; }
+
+ /** Call this to initialize the event from the specified XML node */
+ void inflate(const SkDOM&, const SkDOM::Node*);
+
+ SkDEBUGCODE(void dump(const char title[] = NULL);)
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ /**
+ * Post to the event queue using the event's targetID or target-proc.
+ *
+ * The event must be dynamically allocated, as ownership is transferred to
+ * the event queue. It cannot be allocated on the stack or in a global.
+ */
+ void post() {
+ return this->postDelay(0);
+ }
+
+ /**
+ * Post to the event queue using the event's targetID or target-proc and
+ * the specifed millisecond delay.
+ *
+ * The event must be dynamically allocated, as ownership is transferred to
+ * the event queue. It cannot be allocated on the stack or in a global.
+ */
+ void postDelay(SkMSec delay);
+
+ /**
+ * Post to the event queue using the event's targetID or target-proc.
+ * The event will be delivered no sooner than the specified millisecond
+ * time, as measured by GetMSecsSinceStartup().
+ *
+ * The event must be dynamically allocated, as ownership is transferred to
+ * the event queue. It cannot be allocated on the stack or in a global.
+ */
+ void postTime(SkMSec time);
+
+ /**
+ * Returns ~zero the first time it's called, then returns the number of
+ * milliseconds since the first call. Behavior is undefined if the program
+ * runs more than ~25 days.
+ */
+ static SkMSec GetMSecsSinceStartup();
+
+ ///////////////////////////////////////////////
+ /** Porting layer must call these functions **/
+ ///////////////////////////////////////////////
+
+ /** Global initialization function for the SkEvent system. Should be called exactly
+ once before any other event method is called, and should be called after the
+ call to SkGraphics::Init().
+ */
+ static void Init();
+ /** Global cleanup function for the SkEvent system. Should be called exactly once after
+ all event methods have been called.
+ */
+ static void Term();
+
+ /** Call this to process one event from the queue. If it returns true, there are more events
+ to process.
+ */
+ static bool ProcessEvent();
+ /** Call this whenever the requested timer has expired (requested by a call to SetQueueTimer).
+ It will post any delayed events whose time as "expired" onto the event queue.
+ It may also call SignalQueueTimer() and SignalNonEmptyQueue().
+ */
+ static void ServiceQueueTimer();
+
+ /** Return the number of queued events. note that this value may be obsolete
+ upon return, since another thread may have called ProcessEvent() or
+ Post() after the count was made.
+ */
+ static int CountEventsOnQueue();
+
+ ////////////////////////////////////////////////////
+ /** Porting layer must implement these functions **/
+ ////////////////////////////////////////////////////
+
+ /** Called whenever an SkEvent is posted to an empty queue, so that the OS
+ can be told to later call Dequeue().
+ */
+ static void SignalNonEmptyQueue();
+ /** Called whenever the delay until the next delayed event changes. If zero is
+ passed, then there are no more queued delay events.
+ */
+ static void SignalQueueTimer(SkMSec delay);
+
+#if defined(SK_BUILD_FOR_WIN)
+ static bool WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam);
+#endif
+
+private:
+ SkMetaData fMeta;
+ mutable char* fType; // may be characters with low bit set to know that it is not a pointer
+ uint32_t f32;
+
+ // 'there can be only one' (non-zero) between target-id and target-proc
+ SkEventSinkID fTargetID;
+ Proc fTargetProc;
+
+ // these are for our implementation of the event queue
+ SkMSec fTime;
+ SkEvent* fNextEvent; // either in the delay or normal event queue
+
+ void initialize(const char* type, size_t typeLen, SkEventSinkID);
+
+ static bool Enqueue(SkEvent* evt);
+ static SkMSec EnqueueTime(SkEvent* evt, SkMSec time);
+ static SkEvent* Dequeue();
+ static bool QHasEvents();
+};
+
+#endif
diff --git a/gfx/skia/skia/include/views/SkEventSink.h b/gfx/skia/skia/include/views/SkEventSink.h
new file mode 100644
index 000000000..9714532f9
--- /dev/null
+++ b/gfx/skia/skia/include/views/SkEventSink.h
@@ -0,0 +1,112 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkEventSink_DEFINED
+#define SkEventSink_DEFINED
+
+#include "SkRefCnt.h"
+#include "SkEvent.h"
+
+struct SkTagList;
+
+/** \class SkEventSink
+
+ SkEventSink is the base class for all objects that receive SkEvents.
+*/
+class SkEventSink : public SkRefCnt {
+public:
+
+
+ SkEventSink();
+ virtual ~SkEventSink();
+
+ /**
+ * Returns this eventsink's unique ID. Use this to post SkEvents to
+ * this eventsink.
+ */
+ SkEventSinkID getSinkID() const { return fID; }
+
+ /**
+ * Call this to pass an event to this object for processing. Returns true if the
+ * event was handled.
+ */
+ bool doEvent(const SkEvent&);
+
+ /** Returns true if the sink (or one of its subclasses) understands the event as a query.
+ If so, the sink may modify the event to communicate its "answer".
+ */
+ bool doQuery(SkEvent* query);
+
+ /**
+ * Add sinkID to the list of listeners, to receive events from calls to sendToListeners()
+ * and postToListeners(). If sinkID already exists in the listener list, no change is made.
+ */
+ void addListenerID(SkEventSinkID sinkID);
+
+ /**
+ * Copy listeners from one event sink to another, typically from parent to child.
+ * @param from the event sink to copy the listeners from
+ */
+ void copyListeners(const SkEventSink& from);
+
+ /**
+ * Remove sinkID from the list of listeners. If sinkID does not appear in the list,
+ * no change is made.
+ */
+ void removeListenerID(SkEventSinkID);
+
+ /**
+ * Returns true if there are 1 or more listeners attached to this eventsink
+ */
+ bool hasListeners() const;
+
+ /**
+ * Posts a copy of evt to each of the eventsinks in the lisener list.
+ * This ignores the targetID and target proc in evt.
+ */
+ void postToListeners(const SkEvent& evt, SkMSec delay = 0);
+
+ enum EventResult {
+ kHandled_EventResult, //!< the eventsink returned true from its doEvent method
+ kNotHandled_EventResult, //!< the eventsink returned false from its doEvent method
+ kSinkNotFound_EventResult //!< no matching eventsink was found for the event's getSink().
+ };
+
+ /**
+ * DoEvent handles dispatching the event to its target ID or proc.
+ */
+ static EventResult DoEvent(const SkEvent&);
+
+ /**
+ * Returns the matching eventsink, or null if not found
+ */
+ static SkEventSink* FindSink(SkEventSinkID);
+
+protected:
+ /** Override this to handle events in your subclass. Be sure to call the inherited version
+ for events that you don't handle.
+ */
+ virtual bool onEvent(const SkEvent&);
+ virtual bool onQuery(SkEvent*);
+
+ SkTagList* findTagList(U8CPU tag) const;
+ void addTagList(SkTagList*);
+ void removeTagList(U8CPU tag);
+
+private:
+ SkEventSinkID fID;
+ SkTagList* fTagHead;
+
+ // for our private link-list
+ SkEventSink* fNextSink;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/views/SkKey.h b/gfx/skia/skia/include/views/SkKey.h
new file mode 100644
index 000000000..036e2c317
--- /dev/null
+++ b/gfx/skia/skia/include/views/SkKey.h
@@ -0,0 +1,62 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkKey_DEFINED
+#define SkKey_DEFINED
+
+#include "SkTypes.h"
+
+enum SkKey {
+ //reordering these to match android.app.KeyEvent
+ kNONE_SkKey, //corresponds to android's UNKNOWN
+
+ kLeftSoftKey_SkKey,
+ kRightSoftKey_SkKey,
+
+ kHome_SkKey, //!< the home key - added to match android
+ kBack_SkKey, //!< (CLR)
+ kSend_SkKey, //!< the green (talk) key
+ kEnd_SkKey, //!< the red key
+
+ k0_SkKey,
+ k1_SkKey,
+ k2_SkKey,
+ k3_SkKey,
+ k4_SkKey,
+ k5_SkKey,
+ k6_SkKey,
+ k7_SkKey,
+ k8_SkKey,
+ k9_SkKey,
+ kStar_SkKey, //!< the * key
+ kHash_SkKey, //!< the # key
+
+ kUp_SkKey,
+ kDown_SkKey,
+ kLeft_SkKey,
+ kRight_SkKey,
+
+ kOK_SkKey, //!< the center key
+
+ kVolUp_SkKey, //!< volume up - match android
+ kVolDown_SkKey, //!< volume down - same
+ kPower_SkKey, //!< power button - same
+ kCamera_SkKey, //!< camera - same
+
+ kSkKeyCount
+};
+
+enum SkModifierKeys {
+ kShift_SkModifierKey = 1 << 0,
+ kControl_SkModifierKey = 1 << 1,
+ kOption_SkModifierKey = 1 << 2, // same as ALT
+ kCommand_SkModifierKey = 1 << 3,
+};
+
+#endif
diff --git a/gfx/skia/skia/include/views/SkOSMenu.h b/gfx/skia/skia/include/views/SkOSMenu.h
new file mode 100644
index 000000000..913b91538
--- /dev/null
+++ b/gfx/skia/skia/include/views/SkOSMenu.h
@@ -0,0 +1,182 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkOSMenu_DEFINED
+#define SkOSMenu_DEFINED
+
+#include "../private/SkTDArray.h"
+#include "SkEvent.h"
+
+class SkOSMenu {
+public:
+ explicit SkOSMenu(const char title[] = "");
+ ~SkOSMenu();
+
+ /**
+ * Each of these (except action) has an associated value, which is stored in
+ * the event payload for the item.
+ * Each type has a specific type for its value...
+ * Action : none
+ * List : int (selected index)
+ * Segmented : int (selected index)
+ * Slider : float
+ * Switch : bool
+ * TextField : string
+ * TriState : TriState
+ * Custom : custom object/value
+ */
+ enum Type {
+ kAction_Type,
+ kList_Type,
+ kSlider_Type,
+ kSwitch_Type,
+ kTriState_Type,
+ kTextField_Type,
+ kCustom_Type
+ };
+
+ enum TriState {
+ kMixedState = -1,
+ kOffState = 0,
+ kOnState = 1
+ };
+
+ class Item {
+ public:
+ /**
+ * Auto increments a global to generate an unique ID for each new item
+ * Note: Thread safe
+ */
+ Item(const char label[], SkOSMenu::Type type, const char slotName[],
+ SkEvent* evt);
+ ~Item() { delete fEvent; }
+
+ SkEvent* getEvent() const { return fEvent; }
+ int getID() const { return fID; }
+ const char* getLabel() const { return fLabel.c_str(); }
+ const char* getSlotName() const { return fSlotName.c_str(); }
+ Type getType() const { return fType; }
+ void setKeyEquivalent(SkUnichar key) { fKey = key; }
+ SkUnichar getKeyEquivalent() const { return fKey; }
+
+ /**
+ * Helper functions for predefined types
+ */
+ void setBool(bool value) const; //For Switch
+ void setScalar(SkScalar value) const; //For Slider
+ void setInt(int value) const; //For List
+ void setTriState(TriState value) const; //For Tristate
+ void setString(const char value[]) const; //For TextField
+
+ /**
+ * Post event associated with the menu item to target, any changes to
+ * the associated event must be made prior to calling this method
+ */
+ void postEvent() const { (new SkEvent(*(fEvent)))->post(); }
+
+ private:
+ int fID;
+ SkEvent* fEvent;
+ SkString fLabel;
+ SkString fSlotName;
+ Type fType;
+ SkUnichar fKey;
+ };
+
+ void reset();
+ const char* getTitle() const { return fTitle.c_str(); }
+ void setTitle (const char title[]) { fTitle.set(title); }
+ int getCount() const { return fItems.count(); }
+ const Item* getItemByID(int itemID) const;
+ void getItems(const Item* items[]) const;
+
+ /**
+ * Assign key to the menu item with itemID, will do nothing if there's no
+ * item with the id given
+ */
+ void assignKeyEquivalentToItem(int itemID, SkUnichar key);
+ /**
+ * Call this in a SkView's onHandleChar to trigger any menu items with the
+ * given key equivalent. If such an item is found, the method will return
+ * true and its corresponding event will be triggered (default behavior
+ * defined for switches(toggling), tristates(cycle), and lists(cycle),
+ * for anything else, the event attached is posted without state changes)
+ * If no menu item can be matched with the key, false will be returned
+ */
+ bool handleKeyEquivalent(SkUnichar key);
+
+ /**
+ * The following functions append new items to the menu and returns their
+ * associated unique id, which can be used to by the client to refer to
+ * the menu item created and change its state. slotName specifies the string
+ * identifier of any state/value to be returned in the item's SkEvent object
+ * NOTE: evt must be dynamically allocated
+ */
+ int appendItem(const char label[], Type type, const char slotName[],
+ SkEvent* evt);
+
+ /**
+ * Create predefined items with the given parameters. To be used with the
+ * other helper functions below to retrive/update state information.
+ * Note: the helper functions below assume that slotName is UNIQUE for all
+ * menu items of the same type since it's used to identify the event
+ */
+ int appendAction(const char label[], SkEventSinkID target);
+ int appendList(const char label[], const char slotName[],
+ SkEventSinkID target, int defaultIndex, const char* ...);
+ int appendSlider(const char label[], const char slotName[],
+ SkEventSinkID target, SkScalar min, SkScalar max,
+ SkScalar defaultValue);
+ int appendSwitch(const char label[], const char slotName[],
+ SkEventSinkID target, bool defaultState = false);
+ int appendTriState(const char label[], const char slotName[],
+ SkEventSinkID target, TriState defaultState = kOffState);
+ int appendTextField(const char label[], const char slotName[],
+ SkEventSinkID target, const char placeholder[] = "");
+
+
+ /**
+ * Helper functions to retrieve information other than the stored value for
+ * some predefined types
+ */
+ static bool FindListItemCount(const SkEvent& evt, int* count);
+ /**
+ * Ensure that the items array can store n SkStrings where n is the count
+ * extracted using FindListItemCount
+ */
+ static bool FindListItems(const SkEvent& evt, SkString items[]);
+ static bool FindSliderMin(const SkEvent& evt, SkScalar* min);
+ static bool FindSliderMax(const SkEvent& evt, SkScalar* max);
+
+ /**
+ * Returns true if an action with the given label is found, false otherwise
+ */
+ static bool FindAction(const SkEvent& evt, const char label[]);
+ /**
+ * The following helper functions will return true if evt is generated from
+ * a predefined item type and retrieve the corresponding state information.
+ * They will return false and leave value unchanged if there's a type
+ * mismatch or slotName is incorrect
+ */
+ static bool FindListIndex(const SkEvent& evt, const char slotName[], int* value);
+ static bool FindSliderValue(const SkEvent& evt, const char slotName[], SkScalar* value);
+ static bool FindSwitchState(const SkEvent& evt, const char slotName[], bool* value);
+ static bool FindTriState(const SkEvent& evt, const char slotName[], TriState* value);
+ static bool FindText(const SkEvent& evt, const char slotName[], SkString* value);
+
+private:
+ SkString fTitle;
+ SkTDArray<Item*> fItems;
+
+ // illegal
+ SkOSMenu(const SkOSMenu&);
+ SkOSMenu& operator=(const SkOSMenu&);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/views/SkOSWindow_Mac.h b/gfx/skia/skia/include/views/SkOSWindow_Mac.h
new file mode 100644
index 000000000..41766a0f5
--- /dev/null
+++ b/gfx/skia/skia/include/views/SkOSWindow_Mac.h
@@ -0,0 +1,60 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOSWindow_MacCocoa_DEFINED
+#define SkOSWindow_MacCocoa_DEFINED
+
+#include "SkWindow.h"
+
+class SkOSWindow : public SkWindow {
+public:
+ SkOSWindow(void* hwnd);
+ ~SkOSWindow();
+ void* getHWND() const { return fHWND; }
+
+ virtual bool onDispatchClick(int x, int y, Click::State state,
+ void* owner, unsigned modi);
+ enum SkBackEndTypes {
+ kNone_BackEndType,
+#if SK_SUPPORT_GPU
+ kNativeGL_BackEndType,
+#endif
+#if SK_ANGLE
+ kANGLE_BackEndType,
+#endif // SK_ANGLE
+ };
+
+ void release();
+ bool attach(SkBackEndTypes attachType, int msaaSampleCount, bool deepColor,
+ AttachmentInfo*);
+ void present();
+
+ bool makeFullscreen();
+ void closeWindow();
+ void setVsync(bool);
+protected:
+ // overrides from SkEventSink
+ virtual bool onEvent(const SkEvent& evt);
+ // overrides from SkWindow
+ virtual void onHandleInval(const SkIRect&);
+ // overrides from SkView
+ virtual void onAddMenu(const SkOSMenu*);
+ virtual void onUpdateMenu(const SkOSMenu*);
+ virtual void onSetTitle(const char[]);
+
+private:
+ void* fHWND;
+ bool fInvalEventIsPending;
+ void* fNotifier;
+#if SK_SUPPORT_GPU
+ void* fGLContext;
+#endif
+ typedef SkWindow INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/views/SkOSWindow_SDL.h b/gfx/skia/skia/include/views/SkOSWindow_SDL.h
new file mode 100644
index 000000000..65685d1d2
--- /dev/null
+++ b/gfx/skia/skia/include/views/SkOSWindow_SDL.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOSWindow_SDL_DEFINED
+#define SkOSWindow_SDL_DEFINED
+
+#include "SDL.h"
+#include "SDL_opengl.h"
+#include "SkWindow.h"
+
+class SkOSWindow : public SkWindow {
+public:
+ SkOSWindow(void*);
+ virtual ~SkOSWindow();
+
+ enum SkBackEndTypes {
+ kNone_BackEndType, // TODO: remove this, it's not a real option.
+ kNativeGL_BackEndType,
+#if SK_ANGLE
+ kANGLE_BackEndType,
+#endif // SK_ANGLE
+ };
+
+ void release();
+ bool attach(SkBackEndTypes attachType, int msaaSampleCount, bool deepColor, AttachmentInfo*);
+ void present();
+ bool makeFullscreen();
+ void setVsync(bool);
+ void closeWindow();
+ static void RunEventLoop();
+
+protected:
+ void onSetTitle(const char title[]) override;
+
+private:
+ void createWindow(int msaaSampleCount);
+ void destroyWindow();
+ void updateWindowTitle();
+ static SkOSWindow* GetInstanceForWindowID(Uint32 windowID);
+ static bool HasDirtyWindows();
+ static void UpdateDirtyWindows();
+ static void HandleEvent(const SDL_Event&);
+
+ SDL_Window* fWindow;
+ SDL_GLContext fGLContext;
+ int fWindowMSAASampleCount;
+ typedef SkWindow INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/views/SkOSWindow_Unix.h b/gfx/skia/skia/include/views/SkOSWindow_Unix.h
new file mode 100644
index 000000000..9d1b8e039
--- /dev/null
+++ b/gfx/skia/skia/include/views/SkOSWindow_Unix.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOSWindow_Unix_DEFINED
+#define SkOSWindow_Unix_DEFINED
+
+#include <GL/glx.h>
+#include <X11/Xlib.h>
+
+#include "SkWindow.h"
+
+class SkEvent;
+
+struct SkUnixWindow {
+ Display* fDisplay;
+ Window fWin;
+ size_t fOSWin;
+ GC fGc;
+ GLXContext fGLContext;
+};
+
+class SkOSWindow : public SkWindow {
+public:
+ SkOSWindow(void*);
+ ~SkOSWindow();
+
+ void* getHWND() const { return (void*)fUnixWindow.fWin; }
+ void* getDisplay() const { return (void*)fUnixWindow.fDisplay; }
+ void* getUnixWindow() const { return (void*)&fUnixWindow; }
+ void loop();
+
+ enum SkBackEndTypes {
+ kNone_BackEndType,
+ kNativeGL_BackEndType,
+#if SK_ANGLE
+ kANGLE_BackEndType,
+#endif // SK_ANGLE
+ };
+
+ bool attach(SkBackEndTypes attachType, int msaaSampleCount, bool deepColor, AttachmentInfo*);
+ void release();
+ void present();
+
+ int getMSAASampleCount() const { return fMSAASampleCount; }
+
+ //static bool PostEvent(SkEvent* evt, SkEventSinkID, SkMSec delay);
+
+ bool makeFullscreen();
+ void setVsync(bool);
+ void closeWindow();
+
+protected:
+ // Overridden from from SkWindow:
+ void onSetTitle(const char title[]) override;
+
+private:
+ enum NextXEventResult {
+ kContinue_NextXEventResult,
+ kQuitRequest_NextXEventResult,
+ kPaintRequest_NextXEventResult
+ };
+
+ NextXEventResult nextXEvent();
+ void doPaint();
+ void mapWindowAndWait();
+
+ // Forcefully closes the window. If a graceful shutdown is desired then call the public
+ // closeWindow method
+ void internalCloseWindow();
+ void initWindow(int newMSAASampleCount, AttachmentInfo* info);
+
+ SkUnixWindow fUnixWindow;
+
+ // Needed for GL
+ XVisualInfo* fVi;
+ // we recreate the underlying xwindow if this changes
+ int fMSAASampleCount;
+
+ typedef SkWindow INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/views/SkOSWindow_Win.h b/gfx/skia/skia/include/views/SkOSWindow_Win.h
new file mode 100644
index 000000000..7ed22a651
--- /dev/null
+++ b/gfx/skia/skia/include/views/SkOSWindow_Win.h
@@ -0,0 +1,135 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkOSWindow_Win_DEFINED
+#define SkOSWindow_Win_DEFINED
+
+#include "../private/SkTHash.h"
+#include "SkWindow.h"
+#include <functional>
+
+#if SK_ANGLE
+#include "EGL/egl.h"
+#endif
+
+class SkOSWindow : public SkWindow {
+public:
+ struct WindowInit {
+ const TCHAR* fClass;
+ HINSTANCE fInstance;
+ };
+
+ SkOSWindow(const void* winInit);
+ virtual ~SkOSWindow();
+
+ static bool PostEvent(SkEvent* evt, SkEventSinkID, SkMSec delay);
+
+ enum SkBackEndTypes {
+ kNone_BackEndType,
+#if SK_SUPPORT_GPU
+ kNativeGL_BackEndType,
+#if SK_ANGLE
+ kANGLE_BackEndType,
+#endif // SK_ANGLE
+#endif // SK_SUPPORT_GPU
+ };
+
+ bool attach(SkBackEndTypes attachType, int msaaSampleCount, bool deepColor, AttachmentInfo*);
+ void release();
+ void present();
+
+ bool wndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam);
+ static bool QuitOnDeactivate(HWND hWnd);
+
+ enum {
+ SK_WM_SkEvent = WM_APP + 1000,
+ SK_WM_SkTimerID = 0xFFFF // just need a non-zero value
+ };
+
+ bool makeFullscreen();
+ void setVsync(bool);
+ void closeWindow();
+
+ static SkOSWindow* GetOSWindowForHWND(void* hwnd) {
+ SkOSWindow** win = gHwndToOSWindowMap.find(hwnd);
+ if (!win) {
+ return NULL;
+ }
+ return *win;
+ }
+
+ // Iterates f over all the SkOSWindows and their corresponding HWNDs.
+ // The void* argument to f is a HWND.
+ static void ForAllWindows(const std::function<void(void*, SkOSWindow**)>& f) {
+ gHwndToOSWindowMap.foreach(f);
+ }
+
+protected:
+ virtual bool quitOnDeactivate() { return true; }
+
+ // overrides from SkWindow
+ virtual void onHandleInval(const SkIRect&);
+ // overrides from SkView
+ virtual void onAddMenu(const SkOSMenu*);
+
+ virtual void onSetTitle(const char title[]);
+
+private:
+ static SkTHashMap<void*, SkOSWindow*> gHwndToOSWindowMap;
+
+ WindowInit fWinInit;
+ void* fHWND;
+
+ void doPaint(void* ctx);
+
+#if SK_SUPPORT_GPU
+ void* fHGLRC;
+#if SK_ANGLE
+ EGLDisplay fDisplay;
+ EGLContext fContext;
+ EGLSurface fSurface;
+ EGLConfig fConfig;
+ SkAutoTUnref<const GrGLInterface> fANGLEInterface;
+#endif // SK_ANGLE
+#endif // SK_SUPPORT_GPU
+
+ bool fFullscreen;
+ struct SavedWindowState {
+ bool fZoomed;
+ LONG fStyle;
+ LONG fExStyle;
+ RECT fRect;
+ LONG fScreenWidth;
+ LONG fScreenHeight;
+ LONG fScreenBits;
+ void* fHWND;
+ } fSavedWindowState;
+
+ HMENU fMBar;
+
+ SkBackEndTypes fAttached;
+
+ void updateSize();
+#if SK_SUPPORT_GPU
+ bool attachGL(int msaaSampleCount, bool deepColor, AttachmentInfo* info);
+ void detachGL();
+ void presentGL();
+
+#if SK_ANGLE
+ bool attachANGLE(int msaaSampleCount, AttachmentInfo* info);
+ void detachANGLE();
+ void presentANGLE();
+#endif // SK_ANGLE
+
+#endif // SK_SUPPORT_GPU
+
+ typedef SkWindow INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/views/SkOSWindow_iOS.h b/gfx/skia/skia/include/views/SkOSWindow_iOS.h
new file mode 100644
index 000000000..c0b2fc3f0
--- /dev/null
+++ b/gfx/skia/skia/include/views/SkOSWindow_iOS.h
@@ -0,0 +1,50 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOSWindow_iOS_DEFINED
+#define SkOSWindow_iOS_DEFINED
+
+#include "SkWindow.h"
+
+class SkOSWindow : public SkWindow {
+public:
+ SkOSWindow(void* hwnd);
+ ~SkOSWindow();
+ void* getHWND() const { return fHWND; }
+
+ enum SkBackEndTypes {
+ kNone_BackEndType,
+ kNativeGL_BackEndType,
+ };
+
+ void release();
+ bool attach(SkBackEndTypes attachType, int msaaSampleCount, bool deepColor,
+ AttachmentInfo*);
+ void present();
+
+ bool makeFullscreen() { return true; }
+ void closeWindow() { /* Not impl yet */ }
+ void setVsync(bool) { /* Can't turn off vsync? */ }
+
+protected:
+ // overrides from SkEventSink
+ virtual bool onEvent(const SkEvent& evt);
+ // overrides from SkWindow
+ virtual void onHandleInval(const SkIRect&);
+ // overrides from SkView
+ virtual void onAddMenu(const SkOSMenu*);
+ virtual void onUpdateMenu(SkOSMenu*);
+ virtual void onSetTitle(const char[]);
+
+private:
+ void* fHWND;
+ bool fInvalEventIsPending;
+ void* fNotifier;
+ typedef SkWindow INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/views/SkSystemEventTypes.h b/gfx/skia/skia/include/views/SkSystemEventTypes.h
new file mode 100644
index 000000000..bb2b5d527
--- /dev/null
+++ b/gfx/skia/skia/include/views/SkSystemEventTypes.h
@@ -0,0 +1,25 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkSystemEventTypes_DEFINED
+#define SkSystemEventTypes_DEFINED
+
+/*
+ The goal of these strings is two-fold:
+ 1) make funny strings (containing at least one char < 32) to avoid colliding with "user" strings
+ 2) keep them <= 4 bytes, so we can avoid an allocation in SkEvent::setType()
+*/
+#define SK_EventType_Delay "\xd" "lay"
+#define SK_EventType_Inval "nv" "\xa" "l"
+#define SK_EventType_Key "key" "\x1"
+#define SK_EventType_OnEnd "on" "\xe" "n"
+#define SK_EventType_Unichar "\xc" "har"
+#define SK_EventType_KeyUp "key" "\xf"
+
+#endif
diff --git a/gfx/skia/skia/include/views/SkTouchGesture.h b/gfx/skia/skia/include/views/SkTouchGesture.h
new file mode 100644
index 000000000..4d4c0312d
--- /dev/null
+++ b/gfx/skia/skia/include/views/SkTouchGesture.h
@@ -0,0 +1,83 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkTouchGesture_DEFINED
+#define SkTouchGesture_DEFINED
+
+#include "../private/SkTDArray.h"
+#include "SkMatrix.h"
+
+struct SkFlingState {
+ SkFlingState() : fActive(false) {}
+
+ bool isActive() const { return fActive; }
+ void stop() { fActive = false; }
+
+ void reset(float sx, float sy);
+ bool evaluateMatrix(SkMatrix* matrix);
+
+private:
+ SkPoint fDirection;
+ SkScalar fSpeed0;
+ double fTime0;
+ bool fActive;
+};
+
+class SkTouchGesture {
+public:
+ SkTouchGesture();
+ ~SkTouchGesture();
+
+ void touchBegin(void* owner, float x, float y);
+ void touchMoved(void* owner, float x, float y);
+ void touchEnd(void* owner);
+ void reset();
+
+ bool isActive() { return fFlinger.isActive(); }
+ void stop() { fFlinger.stop(); }
+
+ const SkMatrix& localM();
+ const SkMatrix& globalM() const { return fGlobalM; }
+
+ void setTransLimit(const SkRect& contentRect, const SkRect& windowRect);
+
+private:
+ enum State {
+ kEmpty_State,
+ kTranslate_State,
+ kZoom_State,
+ };
+
+ struct Rec {
+ void* fOwner;
+ float fStartX, fStartY;
+ float fPrevX, fPrevY;
+ float fLastX, fLastY;
+ float fPrevT, fLastT;
+ };
+ SkTDArray<Rec> fTouches;
+
+ State fState;
+ SkMatrix fLocalM, fGlobalM;
+ SkFlingState fFlinger;
+ double fLastUpMillis;
+ SkPoint fLastUpP;
+
+ // The following rects are used to limit the translation so the content never leaves the window
+ SkRect fContentRect, fWindowRect;
+ bool fIsTransLimited = false;
+
+ void limitTrans(); // here we only limit the translation with respect to globalM
+ void flushLocalM();
+ int findRec(void* owner) const;
+ void appendNewRec(void* owner, float x, float y);
+ float computePinch(const Rec&, const Rec&);
+ float limitTotalZoom(float scale) const;
+ bool handleDblTap(float, float);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/views/SkView.h b/gfx/skia/skia/include/views/SkView.h
new file mode 100644
index 000000000..17eb3800f
--- /dev/null
+++ b/gfx/skia/skia/include/views/SkView.h
@@ -0,0 +1,405 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkView_DEFINED
+#define SkView_DEFINED
+
+#include "SkEventSink.h"
+#include "SkRect.h"
+#include "SkDOM.h"
+#include "../private/SkTDict.h"
+#include "SkMatrix.h"
+#include "SkMetaData.h"
+
+class SkCanvas;
+class SkLayerView;
+
+/** \class SkView
+
+ SkView is the base class for screen management. All widgets and controls inherit
+ from SkView.
+*/
+class SkView : public SkEventSink {
+public:
+ enum Flag_Shift {
+ kVisible_Shift,
+ kEnabled_Shift,
+ kFocusable_Shift,
+ kFlexH_Shift,
+ kFlexV_Shift,
+ kNoClip_Shift,
+
+ kFlagShiftCount
+ };
+ enum Flag_Mask {
+ kVisible_Mask = 1 << kVisible_Shift, //!< set if the view is visible
+ kEnabled_Mask = 1 << kEnabled_Shift, //!< set if the view is enabled
+ kFocusable_Mask = 1 << kFocusable_Shift, //!< set if the view can receive focus
+ kFlexH_Mask = 1 << kFlexH_Shift, //!< set if the view's width is stretchable
+ kFlexV_Mask = 1 << kFlexV_Shift, //!< set if the view's height is stretchable
+ kNoClip_Mask = 1 << kNoClip_Shift, //!< set if the view is not clipped to its bounds
+
+ kAllFlagMasks = (uint32_t)(0 - 1) >> (32 - kFlagShiftCount)
+ };
+
+ SkView(uint32_t flags = 0);
+ virtual ~SkView();
+
+ /** Return the flags associated with the view
+ */
+ uint32_t getFlags() const { return fFlags; }
+ /** Set the flags associated with the view
+ */
+ void setFlags(uint32_t flags);
+
+ /** Helper that returns non-zero if the kVisible_Mask bit is set in the view's flags
+ */
+ int isVisible() const { return fFlags & kVisible_Mask; }
+ int isEnabled() const { return fFlags & kEnabled_Mask; }
+ int isFocusable() const { return fFlags & kFocusable_Mask; }
+ int isClipToBounds() const { return !(fFlags & kNoClip_Mask); }
+ /** Helper to set/clear the view's kVisible_Mask flag */
+ void setVisibleP(bool);
+ void setEnabledP(bool);
+ void setFocusableP(bool);
+ void setClipToBounds(bool);
+
+ /** Return the view's width */
+ SkScalar width() const { return fWidth; }
+ /** Return the view's height */
+ SkScalar height() const { return fHeight; }
+ /** Set the view's width and height. These must both be >= 0. This does not affect the view's loc */
+ void setSize(SkScalar width, SkScalar height);
+ void setSize(const SkPoint& size) { this->setSize(size.fX, size.fY); }
+ void setWidth(SkScalar width) { this->setSize(width, fHeight); }
+ void setHeight(SkScalar height) { this->setSize(fWidth, height); }
+ /** Return a rectangle set to [0, 0, width, height] */
+ void getLocalBounds(SkRect* bounds) const;
+
+ /** Loc - the view's offset with respect to its parent in its view hiearchy.
+ NOTE: For more complex transforms, use Local Matrix. The tranformations
+ are applied in the following order:
+ canvas->translate(fLoc.fX, fLoc.fY);
+ canvas->concat(fMatrix);
+ */
+ /** Return the view's left edge */
+ SkScalar locX() const { return fLoc.fX; }
+ /** Return the view's top edge */
+ SkScalar locY() const { return fLoc.fY; }
+ /** Set the view's left and top edge. This does not affect the view's size */
+ void setLoc(SkScalar x, SkScalar y);
+ void setLoc(const SkPoint& loc) { this->setLoc(loc.fX, loc.fY); }
+ void setLocX(SkScalar x) { this->setLoc(x, fLoc.fY); }
+ void setLocY(SkScalar y) { this->setLoc(fLoc.fX, y); }
+
+ /** Local Matrix - matrix used to tranform the view with respect to its
+ parent in its view hiearchy. Use setLocalMatrix to apply matrix
+ transformations to the current view and in turn affect its children.
+ NOTE: For simple offsets, use Loc. The transformations are applied in
+ the following order:
+ canvas->translate(fLoc.fX, fLoc.fY);
+ canvas->concat(fMatrix);
+ */
+ const SkMatrix& getLocalMatrix() const { return fMatrix; }
+ void setLocalMatrix(const SkMatrix& matrix);
+
+ /** Offset (move) the view by the specified dx and dy. This does not affect the view's size */
+ void offset(SkScalar dx, SkScalar dy);
+
+ /** Call this to have the view draw into the specified canvas. */
+ virtual void draw(SkCanvas* canvas);
+
+ /** Call this to invalidate part of all of a view, requesting that the view's
+ draw method be called. The rectangle parameter specifies the part of the view
+ that should be redrawn. If it is null, it specifies the entire view bounds.
+ */
+ void inval(SkRect* rectOrNull);
+
+ // Focus management
+
+ SkView* getFocusView() const;
+ bool hasFocus() const;
+
+ enum FocusDirection {
+ kNext_FocusDirection,
+ kPrev_FocusDirection,
+
+ kFocusDirectionCount
+ };
+ bool acceptFocus();
+ SkView* moveFocus(FocusDirection);
+
+ // Click handling
+
+ class Click {
+ public:
+ Click(SkView* target);
+ virtual ~Click();
+
+ const char* getType() const { return fType; }
+ bool isType(const char type[]) const;
+ void setType(const char type[]); // does NOT make a copy of the string
+ void copyType(const char type[]); // makes a copy of the string
+
+ enum State {
+ kDown_State,
+ kMoved_State,
+ kUp_State
+ };
+ SkPoint fOrig, fPrev, fCurr;
+ SkIPoint fIOrig, fIPrev, fICurr;
+ State fState;
+ void* fOwner;
+ unsigned fModifierKeys;
+
+ SkMetaData fMeta;
+ private:
+ SkEventSinkID fTargetID;
+ char* fType;
+ bool fWeOwnTheType;
+
+ void resetType();
+
+ friend class SkView;
+ };
+ Click* findClickHandler(SkScalar x, SkScalar y, unsigned modifierKeys);
+
+ static void DoClickDown(Click*, int x, int y, unsigned modi);
+ static void DoClickMoved(Click*, int x, int y, unsigned modi);
+ static void DoClickUp(Click*, int x, int y, unsigned modi);
+
+ /** Send the event to the view's parent, and its parent etc. until one of them
+ returns true from its onEvent call. This view is returned. If no parent handles
+ the event, null is returned.
+ */
+ SkView* sendEventToParents(const SkEvent&);
+ /** Send the query to the view's parent, and its parent etc. until one of them
+ returns true from its onQuery call. This view is returned. If no parent handles
+ the query, null is returned.
+ */
+ SkView* sendQueryToParents(SkEvent*);
+
+ // View hierarchy management
+
+ /** Return the view's parent, or null if it has none. This does not affect the parent's reference count. */
+ SkView* getParent() const { return fParent; }
+ SkView* attachChildToFront(SkView* child);
+ /** Attach the child view to this view, and increment the child's reference count. The child view is added
+ such that it will be drawn before all other child views.
+ The child view parameter is returned.
+ */
+ SkView* attachChildToBack(SkView* child);
+ /** If the view has a parent, detach the view from its parent and decrement the view's reference count.
+ If the parent was the only owner of the view, this will cause the view to be deleted.
+ */
+ void detachFromParent();
+ /** Attach the child view to this view, and increment the child's reference count. The child view is added
+ such that it will be drawn after all other child views.
+ The child view parameter is returned.
+ */
+ /** Detach all child views from this view. */
+ void detachAllChildren();
+
+ /** Convert the specified point from global coordinates into view-local coordinates
+ * Return true on success; false on failure
+ */
+ bool globalToLocal(SkPoint* pt) const {
+ if (pt) {
+ return this->globalToLocal(pt->fX, pt->fY, pt);
+ }
+ return true; // nothing to do so return true
+ }
+ /** Convert the specified x,y from global coordinates into view-local coordinates, returning
+ the answer in the local parameter.
+ */
+ bool globalToLocal(SkScalar globalX, SkScalar globalY, SkPoint* local) const;
+
+ /** \class F2BIter
+
+ Iterator that will return each of this view's children, in
+ front-to-back order (the order used for clicking). The first
+ call to next() returns the front-most child view. When
+ next() returns null, there are no more child views.
+ */
+ class F2BIter {
+ public:
+ F2BIter(const SkView* parent);
+ SkView* next();
+ private:
+ SkView* fFirstChild, *fChild;
+ };
+
+ /** \class B2FIter
+
+ Iterator that will return each of this view's children, in
+ back-to-front order (the order they are drawn). The first
+ call to next() returns the back-most child view. When
+ next() returns null, there are no more child views.
+ */
+ class B2FIter {
+ public:
+ B2FIter(const SkView* parent);
+ SkView* next();
+ private:
+ SkView* fFirstChild, *fChild;
+ };
+
+ /** \class Artist
+
+ Install a subclass of this in a view (calling setArtist()), and then the
+ default implementation of that view's onDraw() will invoke this object
+ automatically.
+ */
+ class Artist : public SkRefCnt {
+ public:
+
+
+ void draw(SkView*, SkCanvas*);
+ void inflate(const SkDOM&, const SkDOM::Node*);
+ protected:
+ virtual void onDraw(SkView*, SkCanvas*) = 0;
+ virtual void onInflate(const SkDOM&, const SkDOM::Node*);
+ private:
+ typedef SkRefCnt INHERITED;
+ };
+ /** Return the artist attached to this view (or null). The artist's reference
+ count is not affected.
+ */
+ Artist* getArtist() const;
+ /** Attach the specified artist (or null) to the view, replacing any existing
+ artist. If the new artist is not null, its reference count is incremented.
+ The artist parameter is returned.
+ */
+ Artist* setArtist(Artist* artist);
+
+ /** \class Layout
+
+ Install a subclass of this in a view (calling setLayout()), and then the
+ default implementation of that view's onLayoutChildren() will invoke
+ this object automatically.
+ */
+ class Layout : public SkRefCnt {
+ public:
+
+
+ void layoutChildren(SkView* parent);
+ void inflate(const SkDOM&, const SkDOM::Node*);
+ protected:
+ virtual void onLayoutChildren(SkView* parent) = 0;
+ virtual void onInflate(const SkDOM&, const SkDOM::Node*);
+ private:
+ typedef SkRefCnt INHERITED;
+ };
+
+ /** Return the layout attached to this view (or null). The layout's reference
+ count is not affected.
+ */
+ Layout* getLayout() const;
+ /** Attach the specified layout (or null) to the view, replacing any existing
+ layout. If the new layout is not null, its reference count is incremented.
+ The layout parameter is returned.
+ */
+ Layout* setLayout(Layout*, bool invokeLayoutNow = true);
+ /** If a layout is attached to this view, call its layoutChildren() method
+ */
+ void invokeLayout();
+
+ /** Call this to initialize this view based on the specified XML node
+ */
+ void inflate(const SkDOM& dom, const SkDOM::Node* node);
+ /** After a view hierarchy is inflated, this may be called with a dictionary
+ containing pairs of <name, view*>, where the name string was the view's
+ "id" attribute when it was inflated.
+
+ This will call the virtual onPostInflate for this view, and the recursively
+ call postInflate on all of the view's children.
+ */
+ void postInflate(const SkTDict<SkView*>& ids);
+
+ SkDEBUGCODE(void dump(bool recurse) const;)
+
+protected:
+ /** Override this to draw inside the view. Be sure to call the inherited version too */
+ virtual void onDraw(SkCanvas*);
+ /** Override this to be notified when the view's size changes. Be sure to call the inherited version too */
+ virtual void onSizeChange();
+ /** Override this if you want to handle an inval request from this view or one of its children.
+ Tyically this is only overridden by the by the "window". If your subclass does handle the
+ request, return true so the request will not continue to propogate to the parent.
+ */
+ virtual bool handleInval(const SkRect*);
+ //! called once before all of the children are drawn (or clipped/translated)
+ virtual SkCanvas* beforeChildren(SkCanvas* c) { return c; }
+ //! called once after all of the children are drawn (or clipped/translated)
+ virtual void afterChildren(SkCanvas*) {}
+
+ //! called right before this child's onDraw is called
+ virtual void beforeChild(SkView* /*child*/, SkCanvas*) {}
+ //! called right after this child's onDraw is called
+ virtual void afterChild(SkView* /*child*/, SkCanvas*) {}
+
+ /** Override this if you might handle the click
+ */
+ virtual Click* onFindClickHandler(SkScalar x, SkScalar y, unsigned modi);
+ /** Override this to decide if your children are targets for a click.
+ The default returns true, in which case your children views will be
+ candidates for onFindClickHandler. Returning false wil skip the children
+ and just call your onFindClickHandler.
+ */
+ virtual bool onSendClickToChildren(SkScalar x, SkScalar y, unsigned modi);
+ /** Override this to track clicks, returning true as long as you want to track
+ the pen/mouse.
+ */
+ virtual bool onClick(Click*);
+ /** Override this to initialize your subclass from the XML node. Be sure to call the inherited version too */
+ virtual void onInflate(const SkDOM& dom, const SkDOM::Node* node);
+ /** Override this if you want to perform post initialization work based on the ID dictionary built
+ during XML parsing. Be sure to call the inherited version too.
+ */
+ virtual void onPostInflate(const SkTDict<SkView*>&);
+
+public:
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+ // default action is to inval the view
+ virtual void onFocusChange(bool gainFocusP);
+
+protected:
+
+ // override these if you're acting as a layer/host
+ virtual bool onGetFocusView(SkView**) const { return false; }
+ virtual bool onSetFocusView(SkView*) { return false; }
+
+private:
+ SkScalar fWidth, fHeight;
+ SkMatrix fMatrix;
+ SkPoint fLoc;
+ SkView* fParent;
+ SkView* fFirstChild;
+ SkView* fNextSibling;
+ SkView* fPrevSibling;
+ uint8_t fFlags;
+ uint8_t fContainsFocus;
+
+ friend class B2FIter;
+ friend class F2BIter;
+
+ friend class SkLayerView;
+
+ bool setFocusView(SkView* fvOrNull);
+ SkView* acceptFocus(FocusDirection);
+ void detachFromParent_NoLayout();
+ /** Compute the matrix to transform view-local coordinates into global ones */
+ void localToGlobal(SkMatrix* matrix) const;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/views/SkWindow.h b/gfx/skia/skia/include/views/SkWindow.h
new file mode 100644
index 000000000..e964fc681
--- /dev/null
+++ b/gfx/skia/skia/include/views/SkWindow.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkWindow_DEFINED
+#define SkWindow_DEFINED
+
+#include "../private/SkTDArray.h"
+#include "SkView.h"
+#include "SkBitmap.h"
+#include "SkMatrix.h"
+#include "SkRegion.h"
+#include "SkEvent.h"
+#include "SkKey.h"
+#include "SkSurfaceProps.h"
+
+class SkSurface;
+class SkOSMenu;
+
+#if SK_SUPPORT_GPU
+struct GrGLInterface;
+class GrContext;
+class GrRenderTarget;
+#endif
+
+class SkWindow : public SkView {
+public:
+ SkWindow();
+ virtual ~SkWindow();
+
+ struct AttachmentInfo {
+ AttachmentInfo()
+ : fSampleCount(0)
+ , fStencilBits(0)
+ , fColorBits(0) {}
+
+ int fSampleCount;
+ int fStencilBits;
+ int fColorBits;
+ };
+
+ SkSurfaceProps getSurfaceProps() const { return fSurfaceProps; }
+ void setSurfaceProps(const SkSurfaceProps& props) {
+ fSurfaceProps = props;
+ }
+
+ SkImageInfo info() const { return fBitmap.info(); }
+ const SkBitmap& getBitmap() const { return fBitmap; }
+
+ void resize(int width, int height);
+ void resize(const SkImageInfo&);
+ void setColorType(SkColorType, sk_sp<SkColorSpace>);
+
+ bool isDirty() const { return !fDirtyRgn.isEmpty(); }
+ bool update(SkIRect* updateArea);
+ // does not call through to onHandleInval(), but does force the fDirtyRgn
+ // to be wide open. Call before update() to ensure we redraw everything.
+ void forceInvalAll();
+ // return the bounds of the dirty/inval rgn, or [0,0,0,0] if none
+ const SkIRect& getDirtyBounds() const { return fDirtyRgn.getBounds(); }
+
+ bool handleClick(int x, int y, Click::State, void* owner, unsigned modi = 0);
+ bool handleChar(SkUnichar);
+ bool handleKey(SkKey);
+ bool handleKeyUp(SkKey);
+
+ void addMenu(SkOSMenu*);
+ const SkTDArray<SkOSMenu*>* getMenus() { return &fMenus; }
+
+ const char* getTitle() const { return fTitle.c_str(); }
+ void setTitle(const char title[]);
+
+ const SkMatrix& getMatrix() const { return fMatrix; }
+ void setMatrix(const SkMatrix&);
+ void preConcat(const SkMatrix&);
+ void postConcat(const SkMatrix&);
+
+ virtual sk_sp<SkSurface> makeSurface();
+
+protected:
+ virtual bool onEvent(const SkEvent&);
+ virtual bool onDispatchClick(int x, int y, Click::State, void* owner, unsigned modi);
+ // called if part of our bitmap is invalidated
+ virtual void onHandleInval(const SkIRect&);
+ virtual bool onHandleChar(SkUnichar);
+ virtual bool onHandleKey(SkKey);
+ virtual bool onHandleKeyUp(SkKey);
+ virtual void onAddMenu(const SkOSMenu*) {};
+ virtual void onUpdateMenu(const SkOSMenu*) {};
+ virtual void onSetTitle(const char title[]) {}
+
+ // overrides from SkView
+ virtual bool handleInval(const SkRect*);
+ virtual bool onGetFocusView(SkView** focus) const;
+ virtual bool onSetFocusView(SkView* focus);
+
+#if SK_SUPPORT_GPU
+ sk_sp<SkSurface> makeGpuBackedSurface(const AttachmentInfo& attachmentInfo,
+ const GrGLInterface* , GrContext* grContext);
+#endif
+
+private:
+ SkSurfaceProps fSurfaceProps;
+ SkBitmap fBitmap;
+ SkRegion fDirtyRgn;
+
+ SkTDArray<Click*> fClicks; // to track clicks
+
+ SkTDArray<SkOSMenu*> fMenus;
+
+ SkView* fFocusView;
+ bool fWaitingOnInval;
+
+ SkString fTitle;
+ SkMatrix fMatrix;
+
+ typedef SkView INHERITED;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+#if defined(SK_USE_SDL)
+ #include "SkOSWindow_SDL.h"
+#elif defined(SK_BUILD_FOR_MAC)
+ #include "SkOSWindow_Mac.h"
+#elif defined(SK_BUILD_FOR_WIN)
+ #include "SkOSWindow_Win.h"
+#elif defined(SK_BUILD_FOR_ANDROID)
+ #error Android does not support SkOSWindow and SampleApp. Please use Viewer instead.
+#elif defined(SK_BUILD_FOR_UNIX)
+ #include "SkOSWindow_Unix.h"
+#elif defined(SK_BUILD_FOR_IOS)
+ #include "SkOSWindow_iOS.h"
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/xml/SkDOM.h b/gfx/skia/skia/include/xml/SkDOM.h
new file mode 100644
index 000000000..b6f611af6
--- /dev/null
+++ b/gfx/skia/skia/include/xml/SkDOM.h
@@ -0,0 +1,99 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDOM_DEFINED
+#define SkDOM_DEFINED
+
+#include "../private/SkTemplates.h"
+#include "SkChunkAlloc.h"
+#include "SkScalar.h"
+#include "SkTypes.h"
+
+struct SkDOMNode;
+struct SkDOMAttr;
+
+class SkDOMParser;
+class SkStream;
+class SkXMLParser;
+
+class SK_API SkDOM : public SkNoncopyable {
+public:
+ SkDOM();
+ ~SkDOM();
+
+ typedef SkDOMNode Node;
+ typedef SkDOMAttr Attr;
+
+ /** Returns null on failure
+ */
+ const Node* build(SkStream&);
+ const Node* copy(const SkDOM& dom, const Node* node);
+
+ const Node* getRootNode() const;
+
+ SkXMLParser* beginParsing();
+ const Node* finishParsing();
+
+ enum Type {
+ kElement_Type,
+ kText_Type
+ };
+ Type getType(const Node*) const;
+
+ const char* getName(const Node*) const;
+ const Node* getFirstChild(const Node*, const char elem[] = NULL) const;
+ const Node* getNextSibling(const Node*, const char elem[] = NULL) const;
+
+ const char* findAttr(const Node*, const char attrName[]) const;
+ const Attr* getFirstAttr(const Node*) const;
+ const Attr* getNextAttr(const Node*, const Attr*) const;
+ const char* getAttrName(const Node*, const Attr*) const;
+ const char* getAttrValue(const Node*, const Attr*) const;
+
+ // helpers for walking children
+ int countChildren(const Node* node, const char elem[] = NULL) const;
+
+ // helpers for calling SkParse
+ bool findS32(const Node*, const char name[], int32_t* value) const;
+ bool findScalars(const Node*, const char name[], SkScalar value[], int count) const;
+ bool findHex(const Node*, const char name[], uint32_t* value) const;
+ bool findBool(const Node*, const char name[], bool*) const;
+ int findList(const Node*, const char name[], const char list[]) const;
+
+ bool findScalar(const Node* node, const char name[], SkScalar value[]) const
+ {
+ return this->findScalars(node, name, value, 1);
+ }
+
+ bool hasAttr(const Node*, const char name[], const char value[]) const;
+ bool hasS32(const Node*, const char name[], int32_t value) const;
+ bool hasScalar(const Node*, const char name[], SkScalar value) const;
+ bool hasHex(const Node*, const char name[], uint32_t value) const;
+ bool hasBool(const Node*, const char name[], bool value) const;
+
+ class AttrIter {
+ public:
+ AttrIter(const SkDOM&, const Node*);
+ const char* next(const char** value);
+ private:
+ const Attr* fAttr;
+ const Attr* fStop;
+ };
+
+ SkDEBUGCODE(void dump(const Node* node = NULL, int tabLevel = 0) const;)
+
+private:
+ SkChunkAlloc fAlloc;
+ Node* fRoot;
+ SkAutoTDelete<SkDOMParser> fParser;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/xml/SkXMLParser.h b/gfx/skia/skia/include/xml/SkXMLParser.h
new file mode 100644
index 000000000..3f69013ce
--- /dev/null
+++ b/gfx/skia/skia/include/xml/SkXMLParser.h
@@ -0,0 +1,87 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkXMLParser_DEFINED
+#define SkXMLParser_DEFINED
+
+#include "SkString.h"
+
+class SkStream;
+
+class SkDOM;
+struct SkDOMNode;
+
+class SkXMLParserError {
+public:
+ enum ErrorCode {
+ kNoError,
+ kEmptyFile,
+ kUnknownElement,
+ kUnknownAttributeName,
+ kErrorInAttributeValue,
+ kDuplicateIDs,
+ kUnknownError
+ };
+
+ SkXMLParserError();
+ virtual ~SkXMLParserError();
+ ErrorCode getErrorCode() const { return fCode; }
+ virtual void getErrorString(SkString* str) const;
+ int getLineNumber() const { return fLineNumber; }
+ int getNativeCode() const { return fNativeCode; }
+ bool hasError() const { return fCode != kNoError || fNativeCode != -1; }
+ bool hasNoun() const { return fNoun.size() > 0; }
+ void reset();
+ void setCode(ErrorCode code) { fCode = code; }
+ void setNoun(const SkString& str) { fNoun.set(str); }
+ void setNoun(const char* ch) { fNoun.set(ch); }
+ void setNoun(const char* ch, size_t len) { fNoun.set(ch, len); }
+protected:
+ ErrorCode fCode;
+private:
+ int fLineNumber;
+ int fNativeCode;
+ SkString fNoun;
+ friend class SkXMLParser;
+};
+
+class SkXMLParser {
+public:
+ SkXMLParser(SkXMLParserError* parserError = NULL);
+ virtual ~SkXMLParser();
+
+ /** Returns true for success
+ */
+ bool parse(const char doc[], size_t len);
+ bool parse(SkStream& docStream);
+ bool parse(const SkDOM&, const SkDOMNode*);
+
+ static void GetNativeErrorString(int nativeErrorCode, SkString* str);
+
+protected:
+ // override in subclasses; return true to stop parsing
+ virtual bool onStartElement(const char elem[]);
+ virtual bool onAddAttribute(const char name[], const char value[]);
+ virtual bool onEndElement(const char elem[]);
+ virtual bool onText(const char text[], int len);
+
+public:
+ // public for ported implementation, not meant for clients to call
+ bool startElement(const char elem[]);
+ bool addAttribute(const char name[], const char value[]);
+ bool endElement(const char elem[]);
+ bool text(const char text[], int len);
+ void* fParser;
+protected:
+ SkXMLParserError* fError;
+private:
+ void reportError(void* parser);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/xml/SkXMLWriter.h b/gfx/skia/skia/include/xml/SkXMLWriter.h
new file mode 100644
index 000000000..32901267d
--- /dev/null
+++ b/gfx/skia/skia/include/xml/SkXMLWriter.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkXMLWriter_DEFINED
+#define SkXMLWriter_DEFINED
+
+#include "../private/SkTDArray.h"
+#include "SkString.h"
+#include "SkDOM.h"
+
+class SkWStream;
+class SkXMLParser;
+
+class SkXMLWriter {
+public:
+ SkXMLWriter(bool doEscapeMarkup = true);
+ virtual ~SkXMLWriter();
+
+ void addS32Attribute(const char name[], int32_t value);
+ void addAttribute(const char name[], const char value[]);
+ void addAttributeLen(const char name[], const char value[], size_t length);
+ void addHexAttribute(const char name[], uint32_t value, int minDigits = 0);
+ void addScalarAttribute(const char name[], SkScalar value);
+ void addText(const char text[], size_t length);
+ void endElement() { this->onEndElement(); }
+ void startElement(const char elem[]);
+ void startElementLen(const char elem[], size_t length);
+ void writeDOM(const SkDOM&, const SkDOM::Node*, bool skipRoot);
+ void flush();
+ virtual void writeHeader();
+
+protected:
+ virtual void onStartElementLen(const char elem[], size_t length) = 0;
+ virtual void onAddAttributeLen(const char name[], const char value[], size_t length) = 0;
+ virtual void onAddText(const char text[], size_t length) = 0;
+ virtual void onEndElement() = 0;
+
+ struct Elem {
+ Elem(const char name[], size_t len)
+ : fName(name, len)
+ , fHasChildren(false)
+ , fHasText(false) {}
+
+ SkString fName;
+ bool fHasChildren;
+ bool fHasText;
+ };
+ void doEnd(Elem* elem);
+ bool doStart(const char name[], size_t length);
+ Elem* getEnd();
+ const char* getHeader();
+ SkTDArray<Elem*> fElems;
+
+private:
+ bool fDoEscapeMarkup;
+ // illegal
+ SkXMLWriter& operator=(const SkXMLWriter&);
+};
+
+class SkXMLStreamWriter : public SkXMLWriter {
+public:
+ SkXMLStreamWriter(SkWStream*);
+ virtual ~SkXMLStreamWriter();
+ void writeHeader() override;
+ SkDEBUGCODE(static void UnitTest();)
+
+protected:
+ void onStartElementLen(const char elem[], size_t length) override;
+ void onEndElement() override;
+ void onAddAttributeLen(const char name[], const char value[], size_t length) override;
+ void onAddText(const char text[], size_t length) override;
+
+private:
+ SkWStream& fStream;
+};
+
+class SkXMLParserWriter : public SkXMLWriter {
+public:
+ SkXMLParserWriter(SkXMLParser*);
+ virtual ~SkXMLParserWriter();
+protected:
+ void onStartElementLen(const char elem[], size_t length) override;
+ void onEndElement() override;
+ void onAddAttributeLen(const char name[], const char value[], size_t length) override;
+ void onAddText(const char text[], size_t length) override;
+private:
+ SkXMLParser& fParser;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/android/SkBitmapRegionCodec.cpp b/gfx/skia/skia/src/android/SkBitmapRegionCodec.cpp
new file mode 100644
index 000000000..df0a32ca3
--- /dev/null
+++ b/gfx/skia/skia/src/android/SkBitmapRegionCodec.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAndroidCodec.h"
+#include "SkBitmapRegionCodec.h"
+#include "SkBitmapRegionDecoderPriv.h"
+#include "SkCodecPriv.h"
+#include "SkPixelRef.h"
+
+SkBitmapRegionCodec::SkBitmapRegionCodec(SkAndroidCodec* codec)
+ : INHERITED(codec->getInfo().width(), codec->getInfo().height())
+ , fCodec(codec)
+{}
+
+bool SkBitmapRegionCodec::decodeRegion(SkBitmap* bitmap, SkBRDAllocator* allocator,
+ const SkIRect& desiredSubset, int sampleSize, SkColorType prefColorType,
+ bool requireUnpremul) {
+
+ // Fix the input sampleSize if necessary.
+ if (sampleSize < 1) {
+ sampleSize = 1;
+ }
+
+ // The size of the output bitmap is determined by the size of the
+ // requested subset, not by the size of the intersection of the subset
+ // and the image dimensions.
+ // If inputX is negative, we will need to place decoded pixels into the
+ // output bitmap starting at a left offset. Call this outX.
+ // If outX is non-zero, subsetX must be zero.
+ // If inputY is negative, we will need to place decoded pixels into the
+ // output bitmap starting at a top offset. Call this outY.
+ // If outY is non-zero, subsetY must be zero.
+ int outX;
+ int outY;
+ SkIRect subset = desiredSubset;
+ SubsetType type = adjust_subset_rect(fCodec->getInfo().dimensions(), &subset, &outX, &outY);
+ if (SubsetType::kOutside_SubsetType == type) {
+ return false;
+ }
+
+ // Ask the codec for a scaled subset
+ if (!fCodec->getSupportedSubset(&subset)) {
+ SkCodecPrintf("Error: Could not get subset.\n");
+ return false;
+ }
+ SkISize scaledSize = fCodec->getSampledSubsetDimensions(sampleSize, subset);
+
+ // Create the image info for the decode
+ SkColorType dstColorType = fCodec->computeOutputColorType(prefColorType);
+ SkAlphaType dstAlphaType = fCodec->computeOutputAlphaType(requireUnpremul);
+
+ // Enable legacy behavior to avoid any gamma correction. Android's assets are
+ // adjusted to expect a non-gamma correct premultiply.
+ sk_sp<SkColorSpace> colorSpace = nullptr;
+ SkImageInfo decodeInfo = SkImageInfo::Make(scaledSize.width(), scaledSize.height(),
+ dstColorType, dstAlphaType, colorSpace);
+
+ // Construct a color table for the decode if necessary
+ SkAutoTUnref<SkColorTable> colorTable(nullptr);
+ int maxColors = 256;
+ SkPMColor colors[256];
+ if (kIndex_8_SkColorType == dstColorType) {
+ colorTable.reset(new SkColorTable(colors, maxColors));
+ }
+
+ // Initialize the destination bitmap
+ int scaledOutX = 0;
+ int scaledOutY = 0;
+ int scaledOutWidth = scaledSize.width();
+ int scaledOutHeight = scaledSize.height();
+ if (SubsetType::kPartiallyInside_SubsetType == type) {
+ scaledOutX = outX / sampleSize;
+ scaledOutY = outY / sampleSize;
+ // We need to be safe here because getSupportedSubset() may have modified the subset.
+ const int extraX = SkTMax(0, desiredSubset.width() - outX - subset.width());
+ const int extraY = SkTMax(0, desiredSubset.height() - outY - subset.height());
+ const int scaledExtraX = extraX / sampleSize;
+ const int scaledExtraY = extraY / sampleSize;
+ scaledOutWidth += scaledOutX + scaledExtraX;
+ scaledOutHeight += scaledOutY + scaledExtraY;
+ }
+ SkImageInfo outInfo = decodeInfo.makeWH(scaledOutWidth, scaledOutHeight);
+ if (kGray_8_SkColorType == dstColorType) {
+ // The legacy implementations of BitmapFactory and BitmapRegionDecoder
+ // used kAlpha8 for grayscale images (before kGray8 existed). While
+ // the codec recognizes kGray8, we need to decode into a kAlpha8
+ // bitmap in order to avoid a behavior change.
+ outInfo = outInfo.makeColorType(kAlpha_8_SkColorType).makeAlphaType(kPremul_SkAlphaType);
+ }
+ bitmap->setInfo(outInfo);
+ if (!bitmap->tryAllocPixels(allocator, colorTable.get())) {
+ SkCodecPrintf("Error: Could not allocate pixels.\n");
+ return false;
+ }
+
+ // Zero the bitmap if the region is not completely within the image.
+ // TODO (msarett): Can we make this faster by implementing it to only
+ // zero parts of the image that we won't overwrite with
+ // pixels?
+ SkCodec::ZeroInitialized zeroInit = allocator ? allocator->zeroInit() :
+ SkCodec::kNo_ZeroInitialized;
+ if (SubsetType::kPartiallyInside_SubsetType == type &&
+ SkCodec::kNo_ZeroInitialized == zeroInit) {
+ void* pixels = bitmap->getPixels();
+ size_t bytes = outInfo.getSafeSize(bitmap->rowBytes());
+ memset(pixels, 0, bytes);
+ }
+
+ // Decode into the destination bitmap
+ SkAndroidCodec::AndroidOptions options;
+ options.fSampleSize = sampleSize;
+ options.fSubset = &subset;
+ options.fColorPtr = colors;
+ options.fColorCount = &maxColors;
+ options.fZeroInitialized = zeroInit;
+ void* dst = bitmap->getAddr(scaledOutX, scaledOutY);
+
+ SkCodec::Result result = fCodec->getAndroidPixels(decodeInfo, dst, bitmap->rowBytes(),
+ &options);
+ if (SkCodec::kSuccess != result && SkCodec::kIncompleteInput != result) {
+ SkCodecPrintf("Error: Could not get pixels.\n");
+ return false;
+ }
+
+ // Intialize the color table
+ if (kIndex_8_SkColorType == dstColorType) {
+ colorTable->dangerous_overwriteColors(colors, maxColors);
+ }
+
+ return true;
+}
+
+bool SkBitmapRegionCodec::conversionSupported(SkColorType colorType) {
+ // Enable legacy behavior.
+ sk_sp<SkColorSpace> colorSpace = nullptr;
+ SkImageInfo dstInfo = fCodec->getInfo().makeColorType(colorType).makeColorSpace(colorSpace);
+ return conversion_possible_ignore_color_space(dstInfo, fCodec->getInfo());
+}
diff --git a/gfx/skia/skia/src/android/SkBitmapRegionCodec.h b/gfx/skia/skia/src/android/SkBitmapRegionCodec.h
new file mode 100644
index 000000000..79774173b
--- /dev/null
+++ b/gfx/skia/skia/src/android/SkBitmapRegionCodec.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmap.h"
+#include "SkBitmapRegionDecoder.h"
+#include "SkAndroidCodec.h"
+
+/*
+ * This class implements SkBitmapRegionDecoder using an SkAndroidCodec.
+ */
+class SkBitmapRegionCodec : public SkBitmapRegionDecoder {
+public:
+
+ /*
+ * Takes ownership of pointer to codec
+ */
+ SkBitmapRegionCodec(SkAndroidCodec* codec);
+
+ bool decodeRegion(SkBitmap* bitmap, SkBRDAllocator* allocator,
+ const SkIRect& desiredSubset, int sampleSize,
+ SkColorType colorType, bool requireUnpremul) override;
+
+ bool conversionSupported(SkColorType colorType) override;
+
+ SkEncodedFormat getEncodedFormat() override { return fCodec->getEncodedFormat(); }
+
+private:
+
+ SkAutoTDelete<SkAndroidCodec> fCodec;
+
+ typedef SkBitmapRegionDecoder INHERITED;
+
+};
diff --git a/gfx/skia/skia/src/android/SkBitmapRegionDecoder.cpp b/gfx/skia/skia/src/android/SkBitmapRegionDecoder.cpp
new file mode 100644
index 000000000..6dd48c5f8
--- /dev/null
+++ b/gfx/skia/skia/src/android/SkBitmapRegionDecoder.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapRegionCodec.h"
+#include "SkBitmapRegionDecoder.h"
+#include "SkAndroidCodec.h"
+#include "SkCodec.h"
+#include "SkCodecPriv.h"
+
+SkBitmapRegionDecoder* SkBitmapRegionDecoder::Create(
+ sk_sp<SkData> data, Strategy strategy) {
+ return SkBitmapRegionDecoder::Create(new SkMemoryStream(data),
+ strategy);
+}
+
+SkBitmapRegionDecoder* SkBitmapRegionDecoder::Create(
+ SkStreamRewindable* stream, Strategy strategy) {
+ SkAutoTDelete<SkStreamRewindable> streamDeleter(stream);
+ switch (strategy) {
+ case kAndroidCodec_Strategy: {
+ SkAutoTDelete<SkAndroidCodec> codec =
+ SkAndroidCodec::NewFromStream(streamDeleter.release());
+ if (nullptr == codec) {
+ SkCodecPrintf("Error: Failed to create codec.\n");
+ return NULL;
+ }
+
+ SkEncodedFormat format = codec->getEncodedFormat();
+ switch (format) {
+ case SkEncodedFormat::kJPEG_SkEncodedFormat:
+ case SkEncodedFormat::kPNG_SkEncodedFormat:
+ case SkEncodedFormat::kWEBP_SkEncodedFormat:
+ break;
+ default:
+ return nullptr;
+ }
+
+ return new SkBitmapRegionCodec(codec.release());
+ }
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+}
diff --git a/gfx/skia/skia/src/android/SkBitmapRegionDecoderPriv.h b/gfx/skia/skia/src/android/SkBitmapRegionDecoderPriv.h
new file mode 100644
index 000000000..baa891e9c
--- /dev/null
+++ b/gfx/skia/skia/src/android/SkBitmapRegionDecoderPriv.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapRegionDecoderPriv_DEFINED
+#define SkBitmapRegionDecoderPriv_DEFINED
+
+enum SubsetType {
+ kFullyInside_SubsetType,
+ kPartiallyInside_SubsetType,
+ kOutside_SubsetType,
+};
+
+/*
+ * Corrects image subset offsets and dimensions in order to perform a valid decode.
+ * Also indicates if the image subset should be placed at an offset within the
+ * output bitmap.
+ *
+ * Values of output variables are undefined if the SubsetType is kInvalid.
+ *
+ * @param imageDims Original image dimensions.
+ * @param subset As input, the subset that the client requested.
+ * As output, the image subset that we will decode.
+ * @param outX The left offset of the image subset within the output bitmap.
+ * @param outY The top offset of the image subset within the output bitmap.
+ *
+ * @return An indication of how the subset is contained in the image.
+ * If the return value is kInvalid, values of output variables are undefined.
+ */
+inline SubsetType adjust_subset_rect(const SkISize& imageDims, SkIRect* subset, int* outX,
+ int* outY) {
+ // These must be at least zero, we can't start decoding the image at a negative coordinate.
+ int left = SkTMax(0, subset->fLeft);
+ int top = SkTMax(0, subset->fTop);
+
+ // If input offsets are less than zero, we decode to an offset location in the output bitmap.
+ *outX = left - subset->fLeft;
+ *outY = top - subset->fTop;
+
+ // Make sure we don't decode pixels past the edge of the image or past the edge of the subset.
+ int width = SkTMin(imageDims.width() - left, subset->width() - *outX);
+ int height = SkTMin(imageDims.height() - top, subset->height() - *outY);
+ if (width <= 0 || height <= 0) {
+ return SubsetType::kOutside_SubsetType;
+ }
+
+ subset->setXYWH(left, top, width, height);
+ if ((*outX != 0) || (*outY != 0) || (width != subset->width()) ||
+ (height != subset->height())) {
+ return SubsetType::kPartiallyInside_SubsetType;
+ }
+
+ return SubsetType::kFullyInside_SubsetType;
+}
+
+#endif // SkBitmapRegionDecoderPriv_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkADrawable.cpp b/gfx/skia/skia/src/animator/SkADrawable.cpp
new file mode 100644
index 000000000..9ac4095e1
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkADrawable.cpp
@@ -0,0 +1,24 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkADrawable.h"
+
+bool SkADrawable::doEvent(SkDisplayEvent::Kind , SkEventState* ) {
+ return false;
+}
+
+bool SkADrawable::isDrawable() const {
+ return true;
+}
+
+void SkADrawable::initialize() {
+}
+
+void SkADrawable::setSteps(int steps) {
+}
diff --git a/gfx/skia/skia/src/animator/SkADrawable.h b/gfx/skia/skia/src/animator/SkADrawable.h
new file mode 100644
index 000000000..26be50d3e
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkADrawable.h
@@ -0,0 +1,28 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkADrawable_DEFINED
+#define SkADrawable_DEFINED
+
+#include "SkDisplayable.h"
+#include "SkDisplayEvent.h"
+#include "SkMath.h"
+
+struct SkEventState;
+
+class SkADrawable : public SkDisplayable {
+public:
+ virtual bool doEvent(SkDisplayEvent::Kind , SkEventState* state );
+ virtual bool draw(SkAnimateMaker& ) = 0;
+ virtual void initialize();
+ virtual bool isDrawable() const;
+ virtual void setSteps(int steps);
+};
+
+#endif // SkADrawable_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkAnimate.h b/gfx/skia/skia/src/animator/SkAnimate.h
new file mode 100644
index 000000000..ee391fcd9
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkAnimate.h
@@ -0,0 +1,34 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkAnimate_DEFINED
+#define SkAnimate_DEFINED
+
+#include "SkAnimateBase.h"
+#include "SkDisplayType.h"
+#include "SkIntArray.h"
+#include "SkUtils.h"
+
+class SkAnimate : public SkAnimateBase {
+ DECLARE_MEMBER_INFO(Animate);
+ SkAnimate();
+ virtual ~SkAnimate();
+ int components() override;
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+ void onEndElement(SkAnimateMaker& maker) override;
+protected:
+ bool resolveCommon(SkAnimateMaker& );
+ int fComponents;
+private:
+ typedef SkAnimateBase INHERITED;
+};
+
+#endif // SkAnimateField_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkAnimate3DSchema.xsd b/gfx/skia/skia/src/animator/SkAnimate3DSchema.xsd
new file mode 100644
index 000000000..5063b7572
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkAnimate3DSchema.xsd
@@ -0,0 +1,39 @@
+<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
+ xmlns:Sk="http://www.skia.com/schema/SkAnimateSchema.xsd"
+ targetNamespace="urn:skia3D" xmlns:Sk3D="urn:skia3D">
+
+ <xs:simpleType name="Patch" >
+ <xs:restriction base="xs:string" >
+ </xs:restriction>
+ </xs:simpleType>
+
+ <xs:simpleType name="Point" >
+ <xs:restriction base="xs:string" >
+ <xs:pattern value="[+-]?([0-9]*\.[0-9]+|[0-9]+\.?)( *[ ,] *[+-]?([0-9]*\.[0-9]+|[0-9]+\.?)){2}" />
+ </xs:restriction>
+ </xs:simpleType>
+
+ <xs:element name="camera">
+ <xs:complexType >
+ <xs:attribute name="axis" type="Sk3D:Point" />
+ <xs:attribute name="hackHeight" type="Sk:Float" />
+ <xs:attribute name="hackWidth" type="Sk:Float" />
+ <xs:attribute name="location" type="Sk3D:Point" />
+ <xs:attribute name="observer" type="Sk3D:Point" />
+ <xs:attribute name="patch" type="Sk3D:Patch" />
+ <xs:attribute name="zenith" type="Sk3D:Point" />
+ <xs:attribute name="id" type="xs:ID" />
+ </xs:complexType>
+ </xs:element>
+
+ <xs:element name="patch">
+ <xs:complexType >
+ <xs:attribute name="origin" type="Sk3D:Point" />
+ <xs:attribute name="rotateDegrees" type="Sk:MemberFunction" />
+ <xs:attribute name="u" type="Sk3D:Point" />
+ <xs:attribute name="v" type="Sk3D:Point" />
+ <xs:attribute name="id" type="xs:ID" />
+ </xs:complexType>
+ </xs:element>
+
+</xs:schema>
diff --git a/gfx/skia/skia/src/animator/SkAnimate3DSchema.xsx b/gfx/skia/skia/src/animator/SkAnimate3DSchema.xsx
new file mode 100644
index 000000000..ceb7d890c
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkAnimate3DSchema.xsx
@@ -0,0 +1,3 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--This file is auto-generated by the XML Schema Designer. It holds layout information for components on the designer surface.-->
+<XSDDesignerLayout />
diff --git a/gfx/skia/skia/src/animator/SkAnimateActive.cpp b/gfx/skia/skia/src/animator/SkAnimateActive.cpp
new file mode 100644
index 000000000..0dbe9c9f0
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkAnimateActive.cpp
@@ -0,0 +1,504 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkAnimateActive.h"
+#include "SkAnimateBase.h"
+#include "SkAnimateMaker.h"
+#include "SkAnimateSet.h"
+#include "SkDrawGroup.h"
+#ifdef SK_DEBUG
+#include "SkTime.h"
+#endif
+
+// SkActive holds array of interpolators
+
+SkActive::SkActive(SkApply& apply, SkAnimateMaker& maker) : fApply(apply),
+ fMaxTime(0), fMaker(maker), fDrawIndex(0), fDrawMax(0) {
+}
+
+void SkActive::init()
+{
+ fAnimators = fApply.fAnimators;
+ int animators = fAnimators.count();
+ fInterpolators.setCount(animators);
+ memset(fInterpolators.begin(), 0, animators * sizeof(SkOperandInterpolator*));
+ fState.setCount(animators);
+ int index;
+ for (index = 0; index < animators; index++)
+ fInterpolators[index] = new SkOperandInterpolator;
+ initState(&fApply, 0);
+// for (index = 0; index < animators; index++)
+// fState[index].bumpSave();
+ SkASSERT(fInterpolators.count() == fAnimators.count());
+}
+
+SkActive::~SkActive() {
+ int index;
+ for (index = 0; index < fSaveRestore.count(); index++)
+ delete[] fSaveRestore[index];
+ for (index = 0; index < fSaveInterpolators.count(); index++)
+ delete[] fSaveInterpolators[index];
+ for (index = 0; index < fInterpolators.count(); index++)
+ delete fInterpolators[index];
+}
+
+void SkActive::advance() {
+ if (fDrawMax < fDrawIndex)
+ fDrawMax = fDrawIndex;
+ fDrawIndex += fAnimators.count();
+}
+
+void SkActive::append(SkApply* apply) {
+ int oldCount = fAnimators.count();
+ SkTDAnimateArray& animates = apply->fAnimators;
+ int newCount = animates.count();
+ int index;
+ int total = oldCount + newCount;
+ if (total == 0)
+ return;
+ fInterpolators.setCount(total);
+ memset(&fInterpolators.begin()[oldCount], 0, newCount * sizeof(SkOperandInterpolator*));
+ for (index = oldCount; index < total; index++)
+ fInterpolators[index] = new SkOperandInterpolator;
+ fAnimators.setCount(total);
+ memcpy(&fAnimators[oldCount], animates.begin(), sizeof(fAnimators[0]) *
+ newCount);
+ fState.setCount(total);
+ initState(apply, oldCount);
+ SkASSERT(fApply.scope == apply->scope);
+ for (index = 0; index < newCount; index++) {
+ SkAnimateBase* test = animates[index];
+// SkASSERT(fApply.scope == test->fTarget || fApply.scope->contains(test->fTarget));
+ SkActive::SkState& testState = fState[oldCount + index];
+ for (int inner = 0; inner < oldCount; inner++) {
+ SkAnimateBase* oldGuard = fAnimators[inner];
+ SkActive::SkState& oldState = fState[inner];
+ if (oldGuard->fTarget == test->fTarget && oldGuard->fFieldInfo == test->fFieldInfo &&
+ testState.fBegin == oldState.fBegin) {
+ delete fInterpolators[inner];
+ fInterpolators.remove(inner);
+ fAnimators.remove(inner);
+ testState.fSave = oldState.fSave;
+ if (oldState.fUnpostedEndEvent) {
+// SkDEBUGF(("%8x %8x active append: post on end\n", this, oldGuard));
+ fMaker.postOnEnd(oldGuard, oldState.fBegin + oldState.fDuration);
+ }
+ fState.remove(inner);
+ if (fApply.restore) {
+ int saveIndex = fSaveRestore.count();
+ SkASSERT(fSaveInterpolators.count() == saveIndex);
+ saveIndex += inner;
+ do {
+ saveIndex -= oldCount;
+ delete[] fSaveRestore[saveIndex];
+ fSaveRestore.remove(saveIndex);
+ delete[] fSaveInterpolators[saveIndex];
+ fSaveInterpolators.remove(saveIndex);
+ } while (saveIndex > 0);
+ }
+ oldCount--;
+ break;
+ }
+ }
+ }
+// total = oldCount + newCount;
+// for (index = oldCount; index < total; index++)
+// fState[index].bumpSave();
+ SkASSERT(fInterpolators.count() == fAnimators.count());
+}
+
+void SkActive::appendSave(int oldCount) {
+ SkASSERT(fDrawMax == 0); // if true, we can optimize below quite a bit
+ int newCount = fAnimators.count();
+ int saveIndex = fSaveRestore.count();
+ SkASSERT(fSaveInterpolators.count() == saveIndex);
+ int records = saveIndex / oldCount;
+ int newTotal = records * newCount;
+ fSaveRestore.setCount(newTotal);
+ do {
+ saveIndex -= oldCount;
+ newTotal -= newCount;
+ SkASSERT(saveIndex >= 0);
+ SkASSERT(newTotal >= 0);
+ memmove(&fSaveRestore[newTotal], &fSaveRestore[saveIndex], oldCount);
+ memset(&fSaveRestore[newTotal + oldCount], 0,
+ sizeof(fSaveRestore[0]) * (newCount - oldCount));
+ memmove(&fSaveInterpolators[newTotal],
+ &fSaveInterpolators[saveIndex], oldCount);
+ memset(&fSaveInterpolators[newTotal + oldCount], 0,
+ sizeof(fSaveRestore[0]) * (newCount - oldCount));
+ } while (saveIndex > 0);
+ SkASSERT(newTotal == 0);
+}
+
+void SkActive::calcDurations(int index)
+{
+ SkAnimateBase* animate = fAnimators[index];
+ SkMSec duration = animate->dur;
+ SkState& state = fState[index];
+ switch (state.fMode) {
+ case SkApply::kMode_immediate:
+ case SkApply::kMode_create:
+ duration = state.fSteps ? state.fSteps * SK_MSec1 : 1;
+ break;
+// case SkApply::kMode_hold: {
+// int entries = animate->entries();
+// SkScriptValue value;
+// value.fOperand = animate->getValues()[entries - 1];
+// value.fType = animate->getValuesType();
+// bool result = SkScriptEngine::ConvertTo(nullptr, SkType_Int, &value);
+// SkASSERT(result);
+// duration = value.fOperand.fS32 * SK_MSec1;
+// break;
+// }
+ }
+ state.fDuration = duration;
+ SkMSec maxTime = state.fBegin + duration;
+ if (fMaxTime < maxTime)
+ fMaxTime = maxTime;
+}
+
+void SkActive::create(SkADrawable* drawable, SkMSec time) {
+ fApply.fLastTime = time;
+ fApply.refresh(fMaker);
+ for (int index = 0; index < fAnimators.count(); index++) {
+ SkAnimateBase* animate = fAnimators[index];
+ SkOperandInterpolator& interpolator = *fInterpolators[index];
+ int count = animate->components();
+ if (animate->formula.size() > 0) {
+ SkTDOperandArray values;
+ values.setCount(count);
+ SkDEBUGCODE(bool success = ) animate->fFieldInfo->setValue(fMaker, &values, 0, 0, nullptr,
+ animate->getValuesType(), animate->formula);
+ SkASSERT(success);
+ fApply.applyValues(index, values.begin(), count, animate->getValuesType(), time);
+ } else {
+ SkAutoSTMalloc<16, SkOperand> values(count);
+ interpolator.timeToValues(time, values.get());
+ fApply.applyValues(index, values.get(), count, animate->getValuesType(), time);
+ }
+ }
+ drawable->enable(fMaker);
+ SkASSERT(fAnimators.count() == fInterpolators.count());
+}
+
+bool SkActive::immediate(bool enable) {
+ SkMSec time = 0;
+ bool result = false;
+ SkADrawable* drawable = fApply.scope;
+ SkMSec final = fMaxTime;
+ do {
+ bool applied = fAnimators.count() == 0;
+ fApply.fLastTime = time;
+ fApply.refresh(fMaker);
+ for (int index = 0; index < fAnimators.count(); index++) {
+ SkAnimateBase* animate = fAnimators[index];
+ SkState& state = fState[index];
+ if (state.fMode != SkApply::kMode_immediate)
+ continue;
+ if (state.fBegin > time)
+ continue;
+ if (time > state.fBegin + state.fDuration)
+ continue;
+ applied = true;
+ SkOperandInterpolator& interpolator = *fInterpolators[index];
+ int count = animate->components();
+ if (animate->formula.size() > 0) {
+ SkTDOperandArray values;
+ values.setCount(count);
+ SkDEBUGCODE(bool success = ) animate->fFieldInfo->setValue(fMaker, &values, 0, 0, nullptr,
+ animate->getValuesType(), animate->formula);
+ SkASSERT(success);
+ fApply.applyValues(index, values.begin(), count, animate->getValuesType(), time);
+ } else {
+ SkAutoSTMalloc<16, SkOperand> values(count);
+ interpolator.timeToValues(time, values.get());
+ fApply.applyValues(index, values.get(), count, animate->getValuesType(), time);
+ }
+ }
+ if (enable)
+ drawable->enable(fMaker);
+ else if (applied)
+ result |= drawable->draw(fMaker);
+ time += SK_MSec1;
+ } while (time <= final);
+ return result;
+}
+
+void SkActive::fixInterpolator(SkBool save) {
+ int animators = fAnimators.count();
+ for (int index = 0; index < animators; index++) {
+ SkAnimateBase* animate = fAnimators[index];
+ if (save) { // saved slots increased
+ animate->refresh(fMaker);
+ SkOperand* values = animate->getValues();
+ setInterpolator(index, values);
+ saveInterpolatorValues(index);
+ } else
+ restoreInterpolatorValues(index);
+ }
+}
+
+SkMSec SkActive::getTime(SkMSec inTime, int animatorIndex) {
+ fState[animatorIndex].fTicks = inTime;
+ return inTime - fState[animatorIndex].fStartTime;
+}
+
+bool SkActive::initializeSave() {
+ int animators = fAnimators.count();
+ int activeTotal = fDrawIndex + animators;
+ int oldCount = fSaveRestore.count();
+ if (oldCount < activeTotal) {
+ fSaveRestore.setCount(activeTotal);
+ memset(&fSaveRestore[oldCount], 0, sizeof(fSaveRestore[0]) * (activeTotal - oldCount));
+ SkASSERT(fSaveInterpolators.count() == oldCount);
+ fSaveInterpolators.setCount(activeTotal);
+ memset(&fSaveInterpolators[oldCount], 0,
+ sizeof(fSaveInterpolators[0]) * (activeTotal - oldCount));
+ return true;
+ }
+ return false;
+}
+
+void SkActive::initState(SkApply* apply, int offset) {
+ int count = fState.count();
+ for (int index = offset; index < count; index++) {
+ SkState& state = fState[index];
+ SkAnimateBase* animate = fAnimators[index];
+#if 0 // def SK_DEBUG
+ if (animate->fHasEndEvent)
+ SkDebugf("%8x %8x active initState:\n", this, animate);
+#endif
+ SkOperand* from = animate->getValues();
+ state.fStartTime = state.fBegin = apply->begin + animate->begin;
+ state.fMode = apply->mode;
+ state.fTransition = apply->transition;
+#if 0
+ state.fPickup = (SkBool8) apply->pickup;
+#endif
+ state.fRestore = (SkBool8) apply->restore;
+ state.fSave = apply->begin;
+ state.fStarted = false;
+ state.fSteps = apply->steps;
+ state.fTicks = 0;
+ state.fUnpostedEndEvent = (SkBool8) animate->fHasEndEvent;
+ calcDurations(index);
+ setInterpolator(index, from);
+ }
+ if (count == 0 && (apply->mode == SkApply::kMode_immediate || apply->mode == SkApply::kMode_create))
+ fMaxTime = apply->begin + apply->steps * SK_MSec1;
+}
+
+void SkActive::pickUp(SkActive* existing) {
+ SkTDOperandArray existingValues;
+ for (int index = 0; index < fAnimators.count(); index++) {
+ SkAnimateBase* animate = fAnimators[index];
+ SkASSERT(animate->getValuesType() == SkType_Float);
+ int components = animate->components();
+ SkOperand* from = animate->getValues();
+ SkOperand* to = &from[animate->components()];
+ existingValues.setCount(components);
+ existing->fInterpolators[index]->timeToValues(
+ existing->fState[index].fTicks - existing->fState[index].fStartTime, existingValues.begin());
+ SkScalar originalSum = 0;
+ SkScalar workingSum = 0;
+ for (int cIndex = 0; cIndex < components; cIndex++) {
+ SkScalar delta = to[cIndex].fScalar - from[cIndex].fScalar;
+ originalSum += SkScalarMul(delta, delta);
+ delta = to[cIndex].fScalar - existingValues[cIndex].fScalar;
+ workingSum += SkScalarMul(delta, delta);
+ }
+ if (workingSum < originalSum) {
+ SkScalar originalDistance = SkScalarSqrt(originalSum);
+ SkScalar workingDistance = SkScalarSqrt(workingSum);
+ existing->fState[index].fDuration = (SkMSec) SkScalarMulDiv(fState[index].fDuration,
+ workingDistance, originalDistance);
+ }
+ fInterpolators[index]->reset(components, 2, SkType_Float);
+ fInterpolators[index]->setKeyFrame(0, 0, existingValues.begin(), animate->blend[0]);
+ fInterpolators[index]->setKeyFrame(1, fState[index].fDuration, to, animate->blend[0]);
+ }
+}
+
+void SkActive::resetInterpolators() {
+ int animators = fAnimators.count();
+ for (int index = 0; index < animators; index++) {
+ SkAnimateBase* animate = fAnimators[index];
+ SkOperand* values = animate->getValues();
+ setInterpolator(index, values);
+ }
+}
+
+void SkActive::resetState() {
+ fDrawIndex = 0;
+ int count = fState.count();
+ for (int index = 0; index < count; index++) {
+ SkState& state = fState[index];
+ SkAnimateBase* animate = fAnimators[index];
+#if 0 // def SK_DEBUG
+ if (animate->fHasEndEvent)
+ SkDebugf("%8x %8x active resetState: has end event\n", this, animate);
+#endif
+ state.fStartTime = state.fBegin = fApply.begin + animate->begin;
+ state.fStarted = false;
+ state.fTicks = 0;
+ }
+}
+
+void SkActive::restoreInterpolatorValues(int index) {
+ SkOperandInterpolator& interpolator = *fInterpolators[index];
+ index += fDrawIndex ;
+ int count = interpolator.getValuesCount();
+ memcpy(interpolator.getValues(), fSaveInterpolators[index], count * sizeof(SkOperand));
+}
+
+void SkActive::saveInterpolatorValues(int index) {
+ SkOperandInterpolator& interpolator = *fInterpolators[index];
+ index += fDrawIndex ;
+ int count = interpolator.getValuesCount();
+ SkOperand* cache = new SkOperand[count]; // this should use sk_malloc/sk_free since SkOperand does not have a constructor/destructor
+ fSaveInterpolators[index] = cache;
+ memcpy(cache, interpolator.getValues(), count * sizeof(SkOperand));
+}
+
+void SkActive::setInterpolator(int index, SkOperand* from) {
+ if (from == nullptr) // legitimate for set string
+ return;
+ SkAnimateBase* animate = fAnimators[index];
+ int entries = animate->entries();
+ SkASSERT(entries > 0);
+ SkMSec duration = fState[index].fDuration;
+ int components = animate->components();
+ SkOperandInterpolator& interpolator = *fInterpolators[index];
+ interpolator.reset(components, entries == 1 ? 2 : entries, animate->getValuesType());
+ interpolator.setMirror(SkToBool(animate->fMirror));
+ interpolator.setReset(SkToBool(animate->fReset));
+ interpolator.setRepeatCount(animate->repeat);
+ if (entries == 1) {
+ interpolator.setKeyFrame(0, 0, from, animate->blend[0]);
+ interpolator.setKeyFrame(1, duration, from, animate->blend[0]);
+ return;
+ }
+ for (int entry = 0; entry < entries; entry++) {
+ int blendIndex = SkMin32(animate->blend.count() - 1, entry);
+ interpolator.setKeyFrame(entry, entry * duration / (entries - 1), from,
+ animate->blend[blendIndex]);
+ from += components;
+ }
+}
+
+void SkActive::setSteps(int steps) {
+ int count = fState.count();
+ fMaxTime = 0;
+ for (int index = 0; index < count; index++) {
+ SkState& state = fState[index];
+ state.fSteps = steps;
+ calcDurations(index);
+ }
+}
+
+void SkActive::start() {
+ int count = fState.count();
+ SkASSERT(count == fAnimators.count());
+ SkASSERT(count == fInterpolators.count());
+ for (int index = 0; index < count; index++) {
+ SkState& state = fState[index];
+ if (state.fStarted)
+ continue;
+ state.fStarted = true;
+#if defined SK_DEBUG && defined SK_DEBUG_ANIMATION_TIMING
+ SkString debugOut;
+ SkMSec time = fMaker.getAppTime();
+ debugOut.appendS32(time - fMaker.fDebugTimeBase);
+ debugOut.append(" active start adjust delay id=");
+ debugOut.append(fApply._id);
+ debugOut.append("; ");
+ debugOut.append(fAnimators[index]->_id);
+ debugOut.append("=");
+ debugOut.appendS32(fAnimators[index]->fStart - fMaker.fDebugTimeBase);
+ debugOut.append(":");
+ debugOut.appendS32(state.fStartTime);
+#endif
+ if (state.fStartTime > 0) {
+ SkMSec future = fAnimators[index]->fStart + state.fStartTime;
+ if (future > fMaker.fEnableTime)
+ fMaker.notifyInvalTime(future);
+ else
+ fMaker.notifyInval();
+#if defined SK_DEBUG && defined SK_DEBUG_ANIMATION_TIMING
+ debugOut.append(":");
+ debugOut.appendS32(future - fMaker.fDebugTimeBase);
+#endif
+ }
+ if (state.fStartTime >= fMaker.fAdjustedStart) {
+ state.fStartTime -= fMaker.fAdjustedStart;
+#if defined SK_DEBUG && defined SK_DEBUG_ANIMATION_TIMING
+ debugOut.append(" (less adjust = ");
+ debugOut.appendS32(fMaker.fAdjustedStart);
+#endif
+ }
+ state.fStartTime += fAnimators[index]->fStart;
+#if defined SK_DEBUG && defined SK_DEBUG_ANIMATION_TIMING
+ debugOut.append(") new start = ");
+ debugOut.appendS32(state.fStartTime - fMaker.fDebugTimeBase);
+ SkDebugf("%s\n", debugOut.c_str());
+// SkASSERT((int) (state.fStartTime - fMaker.fDebugTimeBase) >= 0);
+#endif
+ }
+ SkASSERT(fAnimators.count() == fInterpolators.count());
+}
+
+#ifdef SK_DEBUG
+void SkActive::validate() {
+ int count = fState.count();
+ SkASSERT(count == fAnimators.count());
+ SkASSERT(count == fInterpolators.count());
+ for (int index = 0; index < count; index++) {
+ SkASSERT(fAnimators[index]);
+ SkASSERT(fInterpolators[index]);
+// SkAnimateBase* test = fAnimators[index];
+// SkASSERT(fApply.scope == test->fTarget || fApply.scope->contains(test->fTarget));
+ }
+}
+#endif
+
+// think about this
+// there should only be one animate object, not two, to go up and down
+// when the apply with reverse came into play, it needs to pick up the value
+// of the existing animate object then remove it from the list
+// the code below should only be bumping fSave, and there shouldn't be anything
+// it needs to be synchronized with
+
+// however, if there are two animates both operating on the same field, then
+// when one replaces the other, it may make sense to pick up the old value as a starting
+// value for the new one somehow.
+
+//void SkActive::SkState::bumpSave() {
+// if (fMode != SkApply::kMode_hold)
+// return;
+// if (fTransition == SkApply::kTransition_reverse) {
+// if (fSave > 0)
+// fSave -= SK_MSec1;
+// } else if (fSave < fDuration)
+// fSave += SK_MSec1;
+//}
+
+SkMSec SkActive::SkState::getRelativeTime(SkMSec time) {
+ SkMSec result = time;
+// if (fMode == SkApply::kMode_hold)
+// result = fSave;
+// else
+ if (fTransition == SkApply::kTransition_reverse) {
+ if (SkMSec_LT(fDuration, time))
+ result = 0;
+ else
+ result = fDuration - time;
+ }
+ return result;
+}
diff --git a/gfx/skia/skia/src/animator/SkAnimateActive.h b/gfx/skia/skia/src/animator/SkAnimateActive.h
new file mode 100644
index 000000000..2bc458bae
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkAnimateActive.h
@@ -0,0 +1,79 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkAnimateActive_DEFINED
+#define SkAnimateActive_DEFINED
+
+#include "SkDisplayApply.h"
+#include "SkOperandInterpolator.h"
+#include "SkIntArray.h"
+
+class SkAnimateMaker;
+
+class SkActive {
+public:
+ SkActive(SkApply& , SkAnimateMaker& );
+ ~SkActive();
+ void advance();
+ void append(SkApply* );
+ void calcDurations(int index);
+ void create(SkADrawable* scope, SkMSec time);
+ bool draw() { return immediate(false); }
+ bool enable() { return immediate(true); }
+ void init( );
+ SkMSec getTime(SkMSec inTime, int animatorIndex);
+ void pickUp(SkActive* existing);
+ void reset() { fDrawIndex = 0; }
+ void setInterpolator(int index, SkOperand* from);
+ void start();
+#ifdef SK_DEBUG
+ void validate();
+#endif
+private:
+ void appendSave(int oldCount);
+ void fixInterpolator(SkBool save);
+ bool immediate(bool enable);
+ bool initializeSave();
+ void initState(SkApply* , int offset);
+ void resetInterpolators();
+ void resetState();
+ void restoreInterpolatorValues(int index);
+ void saveInterpolatorValues(int index);
+ void setSteps(int steps);
+ struct SkState {
+// void bumpSave();
+ SkMSec getRelativeTime(SkMSec time);
+ SkApply::Mode fMode;
+ SkApply::Transition fTransition;
+ SkBool8 fPickup;
+ SkBool8 fRestore;
+ SkBool8 fStarted;
+ SkBool8 fUnpostedEndEvent;
+ int32_t fSteps;
+ SkMSec fBegin;
+ SkMSec fStartTime;
+ SkMSec fDuration;
+ SkMSec fSave;
+ SkMSec fTicks;
+ };
+ SkActive& operator= (const SkActive& );
+ SkTDArray<SkOperandInterpolator*> fInterpolators;
+ SkApply& fApply;
+ SkTDArray<SkState> fState; // one per animator
+ SkTDOperandPtrArray fSaveRestore; // if apply has restore="true"
+ SkTDOperandPtrArray fSaveInterpolators;
+ SkTDAnimateArray fAnimators;
+ SkMSec fMaxTime; // greatest of all animation durations; only used by immediate mode
+ SkAnimateMaker& fMaker;
+ int fDrawIndex;
+ int fDrawMax;
+ friend class SkApply;
+};
+
+#endif // SkAnimateActive_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkAnimateBase.cpp b/gfx/skia/skia/src/animator/SkAnimateBase.cpp
new file mode 100644
index 000000000..a4b2c64d1
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkAnimateBase.cpp
@@ -0,0 +1,235 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkAnimateBase.h"
+#include "SkAnimateMaker.h"
+#include "SkAnimateProperties.h"
+#include "SkAnimatorScript.h"
+#include "SkDisplayApply.h"
+#include "SkADrawable.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkAnimateBase::fInfo[] = {
+ SK_MEMBER(begin, MSec),
+ SK_MEMBER_ARRAY(blend, Float),
+ SK_MEMBER(dur, MSec),
+ SK_MEMBER_PROPERTY(dynamic, Boolean),
+ SK_MEMBER(field, String), // name of member info in target
+ SK_MEMBER(formula, DynamicString),
+ SK_MEMBER(from, DynamicString),
+ SK_MEMBER(lval, DynamicString),
+ SK_MEMBER_PROPERTY(mirror, Boolean),
+ SK_MEMBER(repeat, Float),
+ SK_MEMBER_PROPERTY(reset, Boolean),
+ SK_MEMBER_PROPERTY(step, Int),
+ SK_MEMBER(target, DynamicString),
+ SK_MEMBER(to, DynamicString),
+ SK_MEMBER_PROPERTY(values, DynamicString)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkAnimateBase);
+
+SkAnimateBase::SkAnimateBase() : begin(0), dur(1), repeat(SK_Scalar1),
+ fApply(nullptr), fFieldInfo(nullptr), fFieldOffset(0), fStart((SkMSec) -1), fTarget(nullptr),
+ fChanged(0), fDelayed(0), fDynamic(0), fHasEndEvent(0), fHasValues(0),
+ fMirror(0), fReset(0), fResetPending(0), fTargetIsScope(0) {
+ blend.setCount(1);
+ blend[0] = SK_Scalar1;
+}
+
+SkAnimateBase::~SkAnimateBase() {
+ SkDisplayTypes type = fValues.getType();
+ if (type == SkType_String || type == SkType_DynamicString) {
+ SkASSERT(fValues.count() == 1);
+ delete fValues[0].fString;
+ }
+}
+
+int SkAnimateBase::components() {
+ return 1;
+}
+
+SkDisplayable* SkAnimateBase::deepCopy(SkAnimateMaker* maker) {
+ SkAnimateBase* result = (SkAnimateBase*) INHERITED::deepCopy(maker);
+ result->fApply = fApply;
+ result->fFieldInfo =fFieldInfo;
+ result->fHasValues = false;
+ return result;
+}
+
+void SkAnimateBase::dirty() {
+ fChanged = true;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkAnimateBase::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ if (target.size() > 0)
+ SkDebugf("target=\"%s\" ", target.c_str());
+ else if (fTarget && strcmp(fTarget->id, ""))
+ SkDebugf("target=\"%s\" ", fTarget->id);
+ if (lval.size() > 0)
+ SkDebugf("lval=\"%s\" ", lval.c_str());
+ if (field.size() > 0)
+ SkDebugf("field=\"%s\" ", field.c_str());
+ else if (fFieldInfo)
+ SkDebugf("field=\"%s\" ", fFieldInfo->fName);
+ if (formula.size() > 0)
+ SkDebugf("formula=\"%s\" ", formula.c_str());
+ else {
+ if (from.size() > 0)
+ SkDebugf("from=\"%s\" ", from.c_str());
+ SkDebugf("to=\"%s\" ", to.c_str());
+ }
+ if (begin != 0) {
+ SkDebugf("begin=\"%g\" ", begin * 0.001);
+ }
+}
+#endif
+
+SkDisplayable* SkAnimateBase::getParent() const {
+ return (SkDisplayable*) fApply;
+}
+
+bool SkAnimateBase::getProperty(int index, SkScriptValue* value) const {
+ int boolResult;
+ switch (index) {
+ case SK_PROPERTY(dynamic):
+ boolResult = fDynamic;
+ goto returnBool;
+ case SK_PROPERTY(mirror):
+ boolResult = fMirror;
+ goto returnBool;
+ case SK_PROPERTY(reset):
+ boolResult = fReset;
+returnBool:
+ value->fOperand.fS32 = SkToBool(boolResult);
+ value->fType = SkType_Boolean;
+ break;
+ case SK_PROPERTY(step):
+ if (fApply == nullptr)
+ return false; // !!! notify there's an error?
+ fApply->getStep(value);
+ break;
+ case SK_PROPERTY(values):
+ value->fOperand.fString = (SkString*) &to;
+ value->fType = SkType_String;
+ break;
+ default:
+ SkASSERT(0);
+ return false;
+ }
+ return true;
+}
+
+bool SkAnimateBase::hasExecute() const
+{
+ return false;
+}
+
+void SkAnimateBase::onEndElement(SkAnimateMaker& maker) {
+ fChanged = false;
+ setTarget(maker);
+ if (field.size()) {
+ SkASSERT(fTarget);
+ fFieldInfo = fTarget->getMember(field.c_str());
+ field.reset();
+ }
+ if (lval.size()) {
+ // lval must be of the form x[y]
+ const char* lvalStr = lval.c_str();
+ const char* arrayEnd = strchr(lvalStr, '[');
+ if (arrayEnd == nullptr)
+ return; //should this return an error?
+ size_t arrayNameLen = arrayEnd - lvalStr;
+ SkString arrayStr(lvalStr, arrayNameLen);
+ SkASSERT(fTarget); //this return an error?
+ fFieldInfo = fTarget->getMember(arrayStr.c_str());
+ SkString scriptStr(arrayEnd + 1, lval.size() - arrayNameLen - 2);
+ SkAnimatorScript::EvaluateInt(maker, this, scriptStr.c_str(), &fFieldOffset);
+ }
+}
+
+void SkAnimateBase::packARGB(SkScalar array[], int count, SkTDOperandArray* converted)
+{
+ SkASSERT(count == 4);
+ converted->setCount(1);
+ SkColor color = SkColorSetARGB(SkScalarRoundToInt(array[0]),
+ SkScalarRoundToInt(array[1]),
+ SkScalarRoundToInt(array[2]),
+ SkScalarRoundToInt(array[3]));
+ (*converted)[0].fS32 = color;
+}
+
+
+
+void SkAnimateBase::refresh(SkAnimateMaker& ) {
+}
+
+bool SkAnimateBase::setParent(SkDisplayable* apply) {
+ SkASSERT(apply->isApply());
+ fApply = (SkApply*) apply;
+ return false;
+}
+
+bool SkAnimateBase::setProperty(int index, SkScriptValue& value) {
+ bool boolValue = SkToBool(value.fOperand.fS32);
+ switch (index) {
+ case SK_PROPERTY(dynamic):
+ fDynamic = boolValue;
+ goto checkForBool;
+ case SK_PROPERTY(values):
+ fHasValues = true;
+ SkASSERT(value.fType == SkType_String);
+ to = *value.fOperand.fString;
+ break;
+ case SK_PROPERTY(mirror):
+ fMirror = boolValue;
+ goto checkForBool;
+ case SK_PROPERTY(reset):
+ fReset = boolValue;
+checkForBool:
+ SkASSERT(value.fType == SkType_Boolean);
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+void SkAnimateBase::setTarget(SkAnimateMaker& maker) {
+ if (target.size()) {
+ SkAnimatorScript engine(maker, this, SkType_Displayable);
+ const char* script = target.c_str();
+ SkScriptValue scriptValue;
+ bool success = engine.evaluateScript(&script, &scriptValue);
+ if (success && scriptValue.fType == SkType_Displayable)
+ fTarget = scriptValue.fOperand.fDrawable;
+ else if (maker.find(target.c_str(), (SkDisplayable**) &fTarget) == false) {
+ if (fApply->getMode() == SkApply::kMode_create)
+ return; // may not be an error
+ if (engine.getError() != SkScriptEngine::kNoError)
+ maker.setScriptError(engine);
+ else {
+ maker.setErrorNoun(target);
+ maker.setErrorCode(SkDisplayXMLParserError::kTargetIDNotFound);
+ }
+ return;
+ }
+ if (fApply && fApply->getMode() != SkApply::kMode_create)
+ target.reset();
+ }
+}
+
+bool SkAnimateBase::targetNeedsInitialization() const {
+ return false;
+}
diff --git a/gfx/skia/skia/src/animator/SkAnimateBase.h b/gfx/skia/skia/src/animator/SkAnimateBase.h
new file mode 100644
index 000000000..0da4af117
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkAnimateBase.h
@@ -0,0 +1,83 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkAnimateBase_DEFINED
+#define SkAnimateBase_DEFINED
+
+#include "SkDisplayable.h"
+#include "SkMath.h"
+#include "SkMemberInfo.h"
+#include "SkTypedArray.h"
+
+class SkApply;
+class SkADrawable;
+
+class SkAnimateBase : public SkDisplayable {
+public:
+ DECLARE_MEMBER_INFO(AnimateBase);
+ SkAnimateBase();
+ virtual ~SkAnimateBase();
+ virtual int components();
+ SkDisplayable* deepCopy(SkAnimateMaker* ) override;
+ void dirty() override;
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+ int entries() { return fValues.count() / components(); }
+ virtual bool hasExecute() const;
+ bool isDynamic() const { return SkToBool(fDynamic); }
+ SkDisplayable* getParent() const override;
+ bool getProperty(int index, SkScriptValue* value) const override;
+ SkMSec getStart() const { return fStart; }
+ SkOperand* getValues() { return fValues.begin(); }
+ SkDisplayTypes getValuesType() { return fValues.getType(); }
+ void onEndElement(SkAnimateMaker& ) override;
+ void packARGB(SkScalar [], int count, SkTDOperandArray* );
+ virtual void refresh(SkAnimateMaker& );
+ void setChanged(bool changed) { fChanged = changed; }
+ void setHasEndEvent() { fHasEndEvent = true; }
+ bool setParent(SkDisplayable* ) override;
+ bool setProperty(int index, SkScriptValue& value) override;
+ void setTarget(SkAnimateMaker& );
+ virtual bool targetNeedsInitialization() const;
+protected:
+ SkMSec begin;
+ SkTDScalarArray blend;
+ SkMSec dur;
+ // !!! make field part of a union with fFieldInfo, or fValues, something known later?
+ SkString field; // temporary; once target is known, this is reset
+ SkString formula;
+ SkString from;
+ SkString lval;
+ SkScalar repeat;
+ SkString target; // temporary; once target is known, this is reset
+ SkString to;
+ SkApply* fApply;
+ const SkMemberInfo* fFieldInfo;
+ int fFieldOffset;
+ SkMSec fStart; // corrected time when this apply was enabled
+ SkADrawable* fTarget;
+ SkTypedArray fValues;
+ unsigned fChanged : 1; // true when value referenced by script has changed
+ unsigned fDelayed : 1; // enabled, but undrawn pending delay
+ unsigned fDynamic : 1;
+ unsigned fHasEndEvent : 1;
+ unsigned fHasValues : 1; // set if 'values' passed instead of 'to'
+ unsigned fMirror : 1;
+ unsigned fReset : 1;
+ unsigned fResetPending : 1;
+ unsigned fTargetIsScope : 1;
+private:
+ typedef SkDisplayable INHERITED;
+ friend class SkActive;
+ friend class SkApply;
+ friend class SkDisplayList;
+};
+
+#endif // SkAnimateBase_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkAnimateField.cpp b/gfx/skia/skia/src/animator/SkAnimateField.cpp
new file mode 100644
index 000000000..00113823e
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkAnimateField.cpp
@@ -0,0 +1,111 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkAnimate.h"
+#include "SkAnimateMaker.h"
+#include "SkADrawable.h"
+#include "SkParse.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkAnimate::fInfo[] = {
+ SK_MEMBER_INHERITED
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkAnimate);
+
+SkAnimate::SkAnimate() : fComponents(0) {
+}
+
+SkAnimate::~SkAnimate() {
+}
+
+int SkAnimate::components() {
+ return fComponents;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkAnimate::dump(SkAnimateMaker* maker) {
+ INHERITED::dump(maker); //from animateBase
+ //SkSet inherits from this class
+ if (getType() != SkType_Set) {
+ if (fMirror)
+ SkDebugf("mirror=\"true\" ");
+ if (fReset)
+ SkDebugf("reset=\"true\" ");
+ SkDebugf("dur=\"%g\" ", dur * 0.001);
+ if (repeat != SK_Scalar1)
+ SkDebugf("repeat=\"%g\" ", SkScalarToFloat(repeat));
+ //if (fHasValues)
+ // SkDebugf("values=\"%s\" ", values);
+ if (blend.count() != 1 || blend[0] != SK_Scalar1) {
+ SkDebugf("blend=\"[");
+ bool firstElem = true;
+ for (int i = 0; i < blend.count(); i++) {
+ if (!firstElem)
+ SkDebugf(",");
+ firstElem = false;
+ SkDebugf("%g", SkScalarToFloat(blend[i]));
+ }
+ SkDebugf("]\" ");
+ }
+ SkDebugf("/>\n");//i assume that if it IS, we will do it separately
+ }
+}
+#endif
+
+bool SkAnimate::resolveCommon(SkAnimateMaker& maker) {
+ if (fTarget == nullptr) // if nullptr, recall onEndElement after apply closes and sets target to scope
+ return false;
+ INHERITED::onEndElement(maker);
+ return maker.hasError() == false;
+}
+
+void SkAnimate::onEndElement(SkAnimateMaker& maker) {
+ bool resolved = resolveCommon(maker);
+ if (resolved && fFieldInfo == nullptr) {
+ maker.setErrorNoun(field);
+ maker.setErrorCode(SkDisplayXMLParserError::kFieldNotInTarget);
+ }
+ if (resolved == false || fFieldInfo == nullptr)
+ return;
+ SkDisplayTypes outType = fFieldInfo->getType();
+ if (fHasValues) {
+ SkASSERT(to.size() > 0);
+ fFieldInfo->setValue(maker, &fValues, 0, 0, nullptr, outType, to);
+ SkASSERT(0);
+ // !!! this needs to set fComponents
+ return;
+ }
+ fComponents = fFieldInfo->getCount();
+ if (fFieldInfo->fType == SkType_Array) {
+ SkTypedArray* array = (SkTypedArray*) fFieldInfo->memberData(fTarget);
+ int count = array->count();
+ if (count > 0)
+ fComponents = count;
+ }
+ if (outType == SkType_ARGB) {
+ fComponents <<= 2; // four color components
+ outType = SkType_Float;
+ }
+ fValues.setType(outType);
+ if (formula.size() > 0){
+ fComponents = 1;
+ from.set("0");
+ to.set("dur");
+ outType = SkType_MSec;
+ }
+ int max = fComponents * 2;
+ fValues.setCount(max);
+ memset(fValues.begin(), 0, max * sizeof(fValues.begin()[0]));
+ fFieldInfo->setValue(maker, &fValues, fFieldOffset, max, this, outType, from);
+ fFieldInfo->setValue(maker, &fValues, fComponents + fFieldOffset, max, this, outType, to);
+}
diff --git a/gfx/skia/skia/src/animator/SkAnimateMaker.cpp b/gfx/skia/skia/src/animator/SkAnimateMaker.cpp
new file mode 100644
index 000000000..066f877a9
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkAnimateMaker.cpp
@@ -0,0 +1,372 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkAnimateMaker.h"
+#include "SkAnimator.h"
+#include "SkAnimatorScript.h"
+#include "SkDisplayable.h"
+#include "SkDisplayApply.h"
+#include "SkDisplayList.h"
+#include "SkDisplayMovie.h"
+#include "SkDisplayType.h"
+#include "SkExtras.h"
+#include "SkMemberInfo.h"
+#include "SkStream.h"
+#include "SkSystemEventTypes.h"
+#include "SkTime.h"
+
+class DefaultTimeline : public SkAnimator::Timeline {
+ virtual SkMSec getMSecs() const {
+ return SkEvent::GetMSecsSinceStartup();
+ }
+} gDefaultTimeline;
+
+SkAnimateMaker::SkAnimateMaker(SkAnimator* animator, SkCanvas* canvas, SkPaint* paint)
+ : fActiveEvent(nullptr), fAdjustedStart(0), fCanvas(canvas), fEnableTime(0),
+ fHostEventSinkID(0), fMinimumInterval((SkMSec) -1), fPaint(paint), fParentMaker(nullptr),
+ fTimeline(&gDefaultTimeline), fInInclude(false), fInMovie(false),
+ fFirstScriptError(false), fLoaded(false), fIDs(256), fAnimator(animator)
+{
+ fScreenplay.time = 0;
+#if defined SK_DEBUG && defined SK_DEBUG_ANIMATION_TIMING
+ fDebugTimeBase = (SkMSec) -1;
+#endif
+#ifdef SK_DUMP_ENABLED
+ fDumpEvents = fDumpGConditions = fDumpPosts = false;
+#endif
+}
+
+SkAnimateMaker::~SkAnimateMaker() {
+ deleteMembers();
+}
+
+#if 0
+SkMSec SkAnimateMaker::adjustDelay(SkMSec expectedBase, SkMSec delay) {
+ SkMSec appTime = (*fTimeCallBack)();
+ if (appTime)
+ delay -= appTime - expectedBase;
+ if (delay < 0)
+ delay = 0;
+ return delay;
+}
+#endif
+
+void SkAnimateMaker::appendActive(SkActive* active) {
+ fDisplayList.append(active);
+}
+
+void SkAnimateMaker::clearExtraPropertyCallBack(SkDisplayTypes type) {
+ SkExtras** end = fExtras.end();
+ for (SkExtras** extraPtr = fExtras.begin(); extraPtr < end; extraPtr++) {
+ SkExtras* extra = *extraPtr;
+ if (extra->definesType(type)) {
+ extra->fExtraCallBack = nullptr;
+ extra->fExtraStorage = nullptr;
+ break;
+ }
+ }
+}
+
+bool SkAnimateMaker::computeID(SkDisplayable* displayable, SkDisplayable* parent, SkString* newID) {
+ const char* script;
+ if (findKey(displayable, &script) == false)
+ return true;
+ return SkAnimatorScript::EvaluateString(*this, displayable, parent, script, newID);
+}
+
+SkDisplayable* SkAnimateMaker::createInstance(const char name[], size_t len) {
+ SkDisplayTypes type = SkDisplayType::GetType(this, name, len );
+ if ((int)type >= 0)
+ return SkDisplayType::CreateInstance(this, type);
+ return nullptr;
+}
+
+// differs from SkAnimator::decodeStream in that it does not reset error state
+bool SkAnimateMaker::decodeStream(SkStream* stream)
+{
+ SkDisplayXMLParser parser(*this);
+ return parser.parse(*stream);
+}
+
+// differs from SkAnimator::decodeURI in that it does not set URI base
+bool SkAnimateMaker::decodeURI(const char uri[]) {
+// SkDebugf("animator decode %s\n", uri);
+
+// SkStream* stream = SkStream::GetURIStream(fPrefix.c_str(), uri);
+ std::unique_ptr<SkStream> stream = SkStream::MakeFromFile(uri);
+ if (stream) {
+ bool success = decodeStream(stream.get());
+ if (hasError() && fError.hasNoun() == false)
+ fError.setNoun(uri);
+ return success;
+ } else {
+ return false;
+ }
+}
+
+#if defined SK_DEBUG && 0
+//used for the if'd out section of deleteMembers
+#include "SkTSearch.h"
+
+extern "C" {
+ int compare_disp(const void* a, const void* b) {
+ return *(const SkDisplayable**)a - *(const SkDisplayable**)b;
+ }
+}
+#endif
+
+void SkAnimateMaker::delayEnable(SkApply* apply, SkMSec time) {
+ int index = fDelayed.find(apply);
+ if (index < 0) {
+ *fDelayed.append() = apply;
+ }
+
+ (new SkEvent(SK_EventType_Delay, fAnimator->getSinkID()))->postTime(time);
+}
+
+void SkAnimateMaker::deleteMembers() {
+ int index;
+#if defined SK_DEBUG && 0
+ //this code checks to see if helpers are among the children, but it is not complete -
+ //it should check the children of the children
+ int result;
+ SkTDArray<SkDisplayable*> children(fChildren.begin(), fChildren.count());
+ SkQSort(children.begin(), children.count(), sizeof(SkDisplayable*),compare_disp);
+ for (index = 0; index < fHelpers.count(); index++) {
+ SkDisplayable* helper = fHelpers[index];
+ result = SkTSearch(children.begin(), children.count(), helper, sizeof(SkDisplayable*));
+ SkASSERT(result < 0);
+ }
+#endif
+ for (index = 0; index < fChildren.count(); index++) {
+ SkDisplayable* child = fChildren[index];
+ delete child;
+ }
+ for (index = 0; index < fHelpers.count(); index++) {
+ SkDisplayable* helper = fHelpers[index];
+ delete helper;
+ }
+ for (index = 0; index < fExtras.count(); index++) {
+ SkExtras* extras = fExtras[index];
+ delete extras;
+ }
+}
+
+void SkAnimateMaker::doDelayedEvent() {
+ fEnableTime = getAppTime();
+ for (int index = 0; index < fDelayed.count(); ) {
+ SkDisplayable* child = fDelayed[index];
+ SkASSERT(child->isApply());
+ SkApply* apply = (SkApply*) child;
+ apply->interpolate(*this, fEnableTime);
+ if (apply->hasDelayedAnimator())
+ index++;
+ else
+ fDelayed.remove(index);
+ }
+}
+
+bool SkAnimateMaker::doEvent(const SkEvent& event) {
+ return (!fInMovie || fLoaded) && fAnimator->doEvent(event);
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkAnimateMaker::dump(const char* match) {
+ SkTDict<SkDisplayable*>::Iter iter(fIDs);
+ const char* name;
+ SkDisplayable* result;
+ while ((name = iter.next(&result)) != nullptr) {
+ if (strcmp(match,name) == 0)
+ result->dump(this);
+ }
+}
+#endif
+
+int SkAnimateMaker::dynamicProperty(SkString& nameStr, SkDisplayable** displayablePtr ) {
+ const char* name = nameStr.c_str();
+ const char* dot = strchr(name, '.');
+ SkASSERT(dot);
+ SkDisplayable* displayable;
+ if (find(name, dot - name, &displayable) == false) {
+ SkASSERT(0);
+ return 0;
+ }
+ const char* fieldName = dot + 1;
+ const SkMemberInfo* memberInfo = displayable->getMember(fieldName);
+ *displayablePtr = displayable;
+ return (int) memberInfo->fOffset;
+}
+
+SkMSec SkAnimateMaker::getAppTime() const {
+ return fTimeline->getMSecs();
+}
+
+#ifdef SK_DEBUG
+SkAnimator* SkAnimateMaker::getRoot()
+{
+ SkAnimateMaker* maker = this;
+ while (maker->fParentMaker)
+ maker = maker->fParentMaker;
+ return maker == this ? nullptr : maker->fAnimator;
+}
+#endif
+
+void SkAnimateMaker::helperAdd(SkDisplayable* trackMe) {
+ SkASSERT(fHelpers.find(trackMe) < 0);
+ *fHelpers.append() = trackMe;
+}
+
+void SkAnimateMaker::helperRemove(SkDisplayable* alreadyTracked) {
+ int helperIndex = fHelpers.find(alreadyTracked);
+ if (helperIndex >= 0)
+ fHelpers.remove(helperIndex);
+}
+
+#if 0
+void SkAnimateMaker::loadMovies() {
+ for (SkDisplayable** dispPtr = fMovies.begin(); dispPtr < fMovies.end(); dispPtr++) {
+ SkDisplayable* displayable = *dispPtr;
+ SkASSERT(displayable->getType() == SkType_Movie);
+ SkDisplayMovie* movie = (SkDisplayMovie*) displayable;
+ SkAnimateMaker* movieMaker = movie->fMovie.fMaker;
+ movieMaker->fEvents.doEvent(*movieMaker, SkDisplayEvent::kOnload, nullptr);
+ movieMaker->fEvents.removeEvent(SkDisplayEvent::kOnload, nullptr);
+ movieMaker->loadMovies();
+ }
+}
+#endif
+
+void SkAnimateMaker::notifyInval() {
+ if (fHostEventSinkID)
+ fAnimator->onEventPost(new SkEvent(SK_EventType_Inval), fHostEventSinkID);
+}
+
+void SkAnimateMaker::notifyInvalTime(SkMSec time) {
+ if (fHostEventSinkID)
+ fAnimator->onEventPostTime(new SkEvent(SK_EventType_Inval), fHostEventSinkID, time);
+}
+
+void SkAnimateMaker::postOnEnd(SkAnimateBase* animate, SkMSec end) {
+ SkEvent evt;
+ evt.setS32("time", animate->getStart() + end);
+ evt.setPtr("anim", animate);
+ evt.setType(SK_EventType_OnEnd);
+ SkEventSinkID sinkID = fAnimator->getSinkID();
+ fAnimator->onEventPost(new SkEvent(evt), sinkID);
+}
+
+void SkAnimateMaker::reset() {
+ deleteMembers();
+ fChildren.reset();
+ fHelpers.reset();
+ fIDs.reset();
+ fEvents.reset();
+ fDisplayList.hardReset();
+}
+
+void SkAnimateMaker::removeActive(SkActive* active) {
+ if (active == nullptr)
+ return;
+ fDisplayList.remove(active);
+}
+
+bool SkAnimateMaker::resolveID(SkDisplayable* displayable, SkDisplayable* original) {
+ SkString newID;
+ bool success = computeID(original, nullptr, &newID);
+ if (success)
+ setID(displayable, newID);
+ return success;
+}
+
+void SkAnimateMaker::setErrorString() {
+ fErrorString.reset();
+ if (fError.hasError()) {
+ SkString err;
+ if (fFileName.size() > 0)
+ fErrorString.set(fFileName.c_str());
+ else
+ fErrorString.set("screenplay error");
+ int line = fError.getLineNumber();
+ if (line >= 0) {
+ fErrorString.append(", ");
+ fErrorString.append("line ");
+ fErrorString.appendS32(line);
+ }
+ fErrorString.append(": ");
+ fError.getErrorString(&err);
+ fErrorString.append(err);
+#if defined SK_DEBUG
+ SkDebugf("%s\n", fErrorString.c_str());
+#endif
+ }
+}
+
+void SkAnimateMaker::setEnableTime(SkMSec appTime, SkMSec expectedTime) {
+#if defined SK_DEBUG && defined SK_DEBUG_ANIMATION_TIMING
+ SkString debugOut;
+ SkMSec time = getAppTime();
+ debugOut.appendS32(time - fDebugTimeBase);
+ debugOut.append(" set enable old enable=");
+ debugOut.appendS32(fEnableTime - fDebugTimeBase);
+ debugOut.append(" old adjust=");
+ debugOut.appendS32(fAdjustedStart);
+ debugOut.append(" new enable=");
+ debugOut.appendS32(expectedTime - fDebugTimeBase);
+ debugOut.append(" new adjust=");
+ debugOut.appendS32(appTime - expectedTime);
+ SkDebugf("%s\n", debugOut.c_str());
+#endif
+ fAdjustedStart = appTime - expectedTime;
+ fEnableTime = expectedTime;
+ SkDisplayable** firstMovie = fMovies.begin();
+ SkDisplayable** endMovie = fMovies.end();
+ for (SkDisplayable** ptr = firstMovie; ptr < endMovie; ptr++) {
+ SkDisplayMovie* movie = (SkDisplayMovie*) *ptr;
+ movie->fMovie.fMaker->setEnableTime(appTime, expectedTime);
+ }
+}
+
+void SkAnimateMaker::setExtraPropertyCallBack(SkDisplayTypes type,
+ SkScriptEngine::_propertyCallBack callBack, void* userStorage) {
+ SkExtras** end = fExtras.end();
+ for (SkExtras** extraPtr = fExtras.begin(); extraPtr < end; extraPtr++) {
+ SkExtras* extra = *extraPtr;
+ if (extra->definesType(type)) {
+ extra->fExtraCallBack = callBack;
+ extra->fExtraStorage = userStorage;
+ break;
+ }
+ }
+}
+
+void SkAnimateMaker::setID(SkDisplayable* displayable, const SkString& newID) {
+ fIDs.set(newID.c_str(), displayable);
+#ifdef SK_DEBUG
+ displayable->_id.set(newID);
+ displayable->id = displayable->_id.c_str();
+#endif
+}
+
+void SkAnimateMaker::setScriptError(const SkScriptEngine& engine) {
+ SkString errorString;
+#ifdef SK_DEBUG
+ engine.getErrorString(&errorString);
+#endif
+ setErrorNoun(errorString);
+ setErrorCode(SkDisplayXMLParserError::kErrorInScript);
+}
+
+bool SkAnimateMaker::GetStep(const char* token, size_t len, void* stepPtr, SkScriptValue* value) {
+ if (SK_LITERAL_STR_EQUAL("step", token, len)) {
+ value->fOperand.fS32 = *(int32_t*) stepPtr;
+ value->fType = SkType_Int;
+ return true;
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/animator/SkAnimateMaker.h b/gfx/skia/skia/src/animator/SkAnimateMaker.h
new file mode 100644
index 000000000..035affa56
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkAnimateMaker.h
@@ -0,0 +1,160 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkAnimateMaker_DEFINED
+#define SkAnimateMaker_DEFINED
+
+// #define SK_DEBUG_ANIMATION_TIMING
+
+#include "SkAnimator.h"
+#include "SkBitmap.h"
+#include "SkIntArray.h"
+#include "SkDisplayEvents.h"
+#include "SkDisplayList.h"
+#include "SkDisplayScreenplay.h"
+#include "SkDisplayXMLParser.h"
+#include "SkScript.h"
+#include "SkString.h"
+#include "SkTDict.h"
+
+// not sure where this little helper macro should go
+
+
+class SkActive;
+class SkAnimate;
+class SkCanvas;
+class SkDisplayable;
+class SkADrawable;
+class SkDump;
+class SkEvent;
+class SkEventSink;
+class SkExtras;
+class SkGroup;
+class SkPaint;
+class SkStream;
+
+class SkAnimateMaker {
+public:
+ SkAnimateMaker(SkAnimator* animator, SkCanvas* canvas, SkPaint* paint);
+ ~SkAnimateMaker();
+ void appendActive(SkActive* );
+ void childrenAdd(SkDisplayable* child) { *fChildren.append() = child; }
+ void clearExtraPropertyCallBack(SkDisplayTypes type);
+ bool computeID(SkDisplayable* displayable, SkDisplayable* parent, SkString* newID);
+ SkDisplayable* createInstance(const char name[], size_t len);
+ bool decodeStream(SkStream* stream);
+ bool decodeURI(const char uri[]);
+ void delayEnable(SkApply* apply, SkMSec time);
+ void doDelayedEvent();
+ bool doEvent(const SkEvent& event);
+#ifdef SK_DUMP_ENABLED
+ void dump(const char* match);
+#endif
+ int dynamicProperty(SkString& nameStr, SkDisplayable** );
+ bool find(const char* str, SkDisplayable** displayablePtr) const {
+ return fIDs.find(str, displayablePtr);
+ }
+ bool find(const char* str, size_t len, SkDisplayable** displayablePtr) const {
+ return fIDs.find(str, len, displayablePtr);
+ }
+ bool findKey(SkDisplayable* displayable, const char** string) const {
+ return fIDs.findKey(displayable, string);
+ }
+// bool find(SkString& string, SkDisplayable** displayablePtr) {
+// return fIDs.find(string.c_str(), displayablePtr);
+// }
+ SkAnimator* getAnimator() { return fAnimator; }
+ SkMSec getAppTime() const; // call caller to get current time
+#ifdef SK_DEBUG
+ SkAnimator* getRoot();
+#endif
+ SkXMLParserError::ErrorCode getErrorCode() const { return fError.getErrorCode(); }
+ SkMSec getInTime() { return fDisplayList.getTime(); }
+ int getNativeCode() const { return fError.getNativeCode(); }
+ bool hasError() { return fError.hasError(); }
+ void helperAdd(SkDisplayable* trackMe);
+ void helperRemove(SkDisplayable* alreadyTracked);
+ void idsSet(const char* attrValue, size_t len, SkDisplayable* displayable) {
+ fIDs.set(attrValue, len, displayable); }
+// void loadMovies();
+ void notifyInval();
+ void notifyInvalTime(SkMSec time);
+ void postOnEnd(SkAnimateBase* animate, SkMSec end);
+ void removeActive(SkActive* );
+ void reset();
+ bool resolveID(SkDisplayable* displayable, SkDisplayable* original);
+ void setEnableTime(SkMSec appTime, SkMSec expectedTime);
+ void setErrorCode(SkXMLParserError::ErrorCode err) { if (fError.hasError() == false) fError.INHERITED::setCode(err); }
+ void setErrorCode(SkDisplayXMLParserError::ErrorCode err) { if (fError.hasError() == false) fError.setCode(err); }
+ void setErrorNoun(const SkString& str) { if (fError.hasError() == false) fError.setNoun(str); }
+ void setErrorString();
+ void setExtraPropertyCallBack(SkDisplayTypes type, SkScriptEngine::_propertyCallBack , void* userStorage);
+ void setID(SkDisplayable* displayable, const SkString& newID);
+ void setInnerError(SkAnimateMaker* maker, const SkString& str) { fError.setInnerError(maker, str); }
+ void setScriptError(const SkScriptEngine& );
+#ifdef SK_DEBUG
+ void validate() { fDisplayList.validate(); }
+#else
+ void validate() {}
+#endif
+ SkDisplayEvent* fActiveEvent;
+ SkMSec fAdjustedStart;
+ SkCanvas* fCanvas;
+ SkMSec fEnableTime;
+ int fEndDepth; // passed parameter to onEndElement
+ SkEvents fEvents;
+ SkDisplayList fDisplayList;
+ SkEventSinkID fHostEventSinkID;
+ SkMSec fMinimumInterval;
+ SkPaint* fPaint;
+ SkAnimateMaker* fParentMaker;
+ SkString fPrefix;
+ SkDisplayScreenplay fScreenplay;
+ const SkAnimator::Timeline* fTimeline;
+ SkBool8 fInInclude;
+ SkBool8 fInMovie;
+ SkBool8 fFirstScriptError;
+#if defined SK_DEBUG && defined SK_DEBUG_ANIMATION_TIMING
+ SkMSec fDebugTimeBase;
+#endif
+#ifdef SK_DUMP_ENABLED
+ SkString fDumpAnimated;
+ SkBool8 fDumpEvents;
+ SkBool8 fDumpGConditions;
+ SkBool8 fDumpPosts;
+#endif
+private:
+ void deleteMembers();
+ static bool GetStep(const char* token, size_t len, void* stepPtr, SkScriptValue* );
+ SkAnimateMaker& operator=(SkAnimateMaker& );
+ SkTDDisplayableArray fChildren;
+ SkTDDisplayableArray fDelayed; // SkApply that contain delayed enable events
+ SkDisplayXMLParserError fError;
+ SkString fErrorString;
+ SkTDArray<SkExtras*> fExtras;
+ SkString fFileName;
+ SkTDDisplayableArray fHelpers; // helper displayables
+ SkBool8 fLoaded;
+ SkTDDisplayableArray fMovies;
+ SkTDict<SkDisplayable*> fIDs;
+ SkAnimator* fAnimator;
+ friend class SkAdd;
+ friend class SkAnimateBase;
+ friend class SkDisplayXMLParser;
+ friend class SkAnimator;
+ friend class SkAnimatorScript;
+ friend class SkApply;
+ friend class SkDisplayMovie;
+ friend class SkDisplayType;
+ friend class SkEvents;
+ friend class SkGroup;
+ friend struct SkMemberInfo;
+};
+
+#endif // SkAnimateMaker_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkAnimateProperties.h b/gfx/skia/skia/src/animator/SkAnimateProperties.h
new file mode 100644
index 000000000..b0706405a
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkAnimateProperties.h
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkAnimateProperties_DEFINED
+#define SkAnimateProperties_DEFINED
+
+enum SkAnimateBase_Properties {
+ SK_PROPERTY(dynamic),
+ SK_PROPERTY(mirror),
+ SK_PROPERTY(reset),
+ SK_PROPERTY(step),
+ SK_PROPERTY(values)
+};
+
+#endif // SkAnimateProperties_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkAnimateSchema.xsd b/gfx/skia/skia/src/animator/SkAnimateSchema.xsd
new file mode 100644
index 000000000..f7af332cf
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkAnimateSchema.xsd
@@ -0,0 +1,2787 @@
+<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
+xmlns:Sk="urn:screenplay" targetNamespace="urn:screenplay">
+
+ <!-- /** Animate
+ An ID of an element of type <animate> or <set>
+ */ -->
+ <xs:simpleType name="Animate">
+ <xs:restriction base="xs:string"/>
+ </xs:simpleType>
+
+ <!-- /** 3D_Point
+ An array of three floats in ECMAScript notation: [x, y, z].
+ */ -->
+ <xs:simpleType name="3D_Point">
+ <xs:restriction base="xs:string">
+ <xs:pattern value="[+-]?([0-9]*\.[0-9]+|[0-9]+\.?)( *, *[+-]?([0-9]*\.[0-9]+|[0-9]+\.?)){2}" />
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** ARGB
+ The red, green, blue, and optional alpha color components.
+ */ -->
+ <xs:simpleType name="ARGB">
+ <xs:restriction base="xs:string">
+ <!-- @pattern #[0-9a-fA-F]{3} #rgb contains three hexadecimal digits. #rgb is equivalent to 0xFFrrggbb. -->
+ <xs:pattern value="#[0-9a-fA-F]{3}"/>
+ <!-- @pattern #[0-9a-fA-F]{4} #argb contains four hexadecimal digits. #argb is equivalent to 0xaarrggbb. -->
+ <xs:pattern value="#[0-9a-fA-F]{4}"/>
+ <!-- @pattern #[0-9a-fA-F]{6} #rrggbb contains six hexadecimal digits. #rrggbb is equivalent to 0xFFrrggbb. -->
+ <xs:pattern value="#[0-9a-fA-F]{6}"/>
+ <!-- @pattern #[0-9a-fA-F]{8} #aarrggbb contains eight hexadecimal digits. #aarrggbb is equivalent to 0xaarrggbb. -->
+ <xs:pattern value="#[0-9a-fA-F]{8}"/>
+ <!-- @pattern 0[xX][0-9a-fA-F]{8} 0xaarrggbb describes the color as a packed hexadecimal; each pair of digits
+ corresponds to alpha, red, green, and blue respectively. -->
+ <xs:pattern value="0[xX][0-9a-fA-F]{8}"/>
+ <!-- @pattern rgb\(\d+{1,3},\d+{1,3},\d+{1,3}\) rgb(r, g, b) describes color with three integers ranging from 0 to 255,
+ corresponding to red, green, and blue respectively. -->
+ <xs:pattern value="rgb\(\d+{1,3},\d+{1,3},\d+{1,3}\)"/>
+ <!-- @patternList Color can be described by the following standard CSS color names. -->
+ <xs:pattern value="aliceblue"/>
+ <xs:pattern value="antiquewhite"/>
+ <xs:pattern value="aqua"/>
+ <xs:pattern value="aquamarine"/>
+ <xs:pattern value="azure"/>
+ <xs:pattern value="beige"/>
+ <xs:pattern value="bisque"/>
+ <xs:pattern value="black"/>
+ <xs:pattern value="blanchedalmond"/>
+ <xs:pattern value="blue"/>
+ <xs:pattern value="blueviolet"/>
+ <xs:pattern value="brown"/>
+ <xs:pattern value="burlywood"/>
+ <xs:pattern value="cadetblue"/>
+ <xs:pattern value="chartreuse"/>
+ <xs:pattern value="chocolate"/>
+ <xs:pattern value="coral"/>
+ <xs:pattern value="cornflowerblue"/>
+ <xs:pattern value="cornsilk"/>
+ <xs:pattern value="crimson"/>
+ <xs:pattern value="cyan"/>
+ <xs:pattern value="darkblue"/>
+ <xs:pattern value="darkcyan"/>
+ <xs:pattern value="darkgoldenrod"/>
+ <xs:pattern value="darkgray"/>
+ <xs:pattern value="darkgreen"/>
+ <xs:pattern value="darkkhaki"/>
+ <xs:pattern value="darkmagenta"/>
+ <xs:pattern value="darkolivegreen"/>
+ <xs:pattern value="darkorange"/>
+ <xs:pattern value="darkorchid"/>
+ <xs:pattern value="darkred"/>
+ <xs:pattern value="darksalmon"/>
+ <xs:pattern value="darkseagreen"/>
+ <xs:pattern value="darkslateblue"/>
+ <xs:pattern value="darkslategray"/>
+ <xs:pattern value="darkturquoise"/>
+ <xs:pattern value="darkviolet"/>
+ <xs:pattern value="deeppink"/>
+ <xs:pattern value="deepskyblue"/>
+ <xs:pattern value="dimgray"/>
+ <xs:pattern value="dodgerblue"/>
+ <xs:pattern value="firebrick"/>
+ <xs:pattern value="floralwhite"/>
+ <xs:pattern value="forestgreen"/>
+ <xs:pattern value="fuchsia"/>
+ <xs:pattern value="gainsboro"/>
+ <xs:pattern value="ghostwhite"/>
+ <xs:pattern value="gold"/>
+ <xs:pattern value="goldenrod"/>
+ <xs:pattern value="gray"/>
+ <xs:pattern value="green"/>
+ <xs:pattern value="greenyellow"/>
+ <xs:pattern value="honeydew"/>
+ <xs:pattern value="hotpink"/>
+ <xs:pattern value="indianred"/>
+ <xs:pattern value="indigo"/>
+ <xs:pattern value="ivory"/>
+ <xs:pattern value="khaki"/>
+ <xs:pattern value="lavender"/>
+ <xs:pattern value="lavenderblush"/>
+ <xs:pattern value="lawngreen"/>
+ <xs:pattern value="lemonchiffon"/>
+ <xs:pattern value="lightblue"/>
+ <xs:pattern value="lightcoral"/>
+ <xs:pattern value="lightcyan"/>
+ <xs:pattern value="lightgoldenrodyellow"/>
+ <xs:pattern value="lightgreen"/>
+ <xs:pattern value="lightgrey"/>
+ <xs:pattern value="lightpink"/>
+ <xs:pattern value="lightsalmon"/>
+ <xs:pattern value="lightseagreen"/>
+ <xs:pattern value="lightskyblue"/>
+ <xs:pattern value="lightslategray"/>
+ <xs:pattern value="lightsteelblue"/>
+ <xs:pattern value="lightyellow"/>
+ <xs:pattern value="lime"/>
+ <xs:pattern value="limegreen"/>
+ <xs:pattern value="linen"/>
+ <xs:pattern value="magenta"/>
+ <xs:pattern value="maroon"/>
+ <xs:pattern value="mediumaquamarine"/>
+ <xs:pattern value="mediumblue"/>
+ <xs:pattern value="mediumorchid"/>
+ <xs:pattern value="mediumpurple"/>
+ <xs:pattern value="mediumseagreen"/>
+ <xs:pattern value="mediumslateblue"/>
+ <xs:pattern value="mediumspringgreen"/>
+ <xs:pattern value="mediumturquoise"/>
+ <xs:pattern value="mediumvioletred"/>
+ <xs:pattern value="midnightblue"/>
+ <xs:pattern value="mintcream"/>
+ <xs:pattern value="mistyrose"/>
+ <xs:pattern value="moccasin"/>
+ <xs:pattern value="navajowhite"/>
+ <xs:pattern value="navy"/>
+ <xs:pattern value="oldlace"/>
+ <xs:pattern value="olive"/>
+ <xs:pattern value="olivedrab"/>
+ <xs:pattern value="orange"/>
+ <xs:pattern value="orangered"/>
+ <xs:pattern value="orchid"/>
+ <xs:pattern value="palegoldenrod"/>
+ <xs:pattern value="palegreen"/>
+ <xs:pattern value="paleturquoise"/>
+ <xs:pattern value="palevioletred"/>
+ <xs:pattern value="papayawhip"/>
+ <xs:pattern value="peachpuff"/>
+ <xs:pattern value="peru"/>
+ <xs:pattern value="pink"/>
+ <xs:pattern value="plum"/>
+ <xs:pattern value="powderblue"/>
+ <xs:pattern value="purple"/>
+ <xs:pattern value="red"/>
+ <xs:pattern value="rosybrown"/>
+ <xs:pattern value="royalblue"/>
+ <xs:pattern value="saddlebrown"/>
+ <xs:pattern value="salmon"/>
+ <xs:pattern value="sandybrown"/>
+ <xs:pattern value="seagreen"/>
+ <xs:pattern value="seashell"/>
+ <xs:pattern value="sienna"/>
+ <xs:pattern value="silver"/>
+ <xs:pattern value="skyblue"/>
+ <xs:pattern value="slateblue"/>
+ <xs:pattern value="slategray"/>
+ <xs:pattern value="snow"/>
+ <xs:pattern value="springgreen"/>
+ <xs:pattern value="steelblue"/>
+ <xs:pattern value="tan"/>
+ <xs:pattern value="teal"/>
+ <xs:pattern value="thistle"/>
+ <xs:pattern value="tomato"/>
+ <xs:pattern value="turquoise"/>
+ <xs:pattern value="violet"/>
+ <xs:pattern value="wheat"/>
+ <xs:pattern value="white"/>
+ <xs:pattern value="whitesmoke"/>
+ <xs:pattern value="yellow"/>
+ <!--@patternListLast -->
+ <xs:pattern value="yellowgreen"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** AddMode
+ AddMode controls how the add element adds its referenced element to the
+ display list. By default, the referenced element remains in the add element
+ so that the add element's use attribute may be animated to change the
+ element it refers to. Setting the mode attribute to "immediate" causes the
+ add element to put the referenced element in the display list directly.
+ The move and replace elements are not affected by the mode attribute;
+ they always move or replace the referenced element directly.
+ */ -->
+ <xs:simpleType name="AddMode">
+ <xs:restriction base="xs:string">
+ <!-- @pattern immediate Puts the referenced element in the display list. -->
+ <xs:pattern value="immediate"/>
+ <!-- @pattern indirect Puts the containing element in the display list. -->
+ <xs:pattern value="indirect"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** Align
+ Align places text to the left, center, or right of the text position.
+ */ -->
+ <xs:simpleType name="Align">
+ <xs:restriction base="xs:string">
+ <!-- @pattern left The first character in the text string is drawn at the text position. -->
+ <xs:pattern value="left"/>
+ <!-- @pattern center The text string is measured and centered on the text position. -->
+ <xs:pattern value="center"/>
+ <!-- @pattern right The last character in the text string is drawn to the left of the text position. -->
+ <xs:pattern value="right"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** ApplyMode
+ ApplyMode affects how the apply element animates values.
+ */ -->
+ <xs:simpleType name="ApplyMode">
+ <xs:restriction base="xs:string">
+ <!-- @pattern immediate Iterates through all animation values immediately. -->
+ <xs:pattern value="immediate"/>
+ <!-- @pattern once Performs the animation at once without adding the scope to
+ the display list. -->
+ <xs:pattern value="once"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** ApplyTransition
+ ApplyTransition affects how the apply element sets the time of the animators.
+ */ -->
+ <xs:simpleType name="ApplyTransition">
+ <xs:restriction base="xs:string">
+ <!-- @pattern reverse Performs the animation in reverse. -->
+ <xs:pattern value="reverse"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** Base64
+ Base64 describes 8 bit binary using 64 character values.
+ See http://rfc.net/rfc2045.html for the base64 format.
+ */ -->
+ <xs:simpleType name="Base64">
+ <xs:restriction base="xs:string">
+ <xs:pattern value="[A-Za-z0-9+/ ]+"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** BaseBitmap
+ A reference to an image like a JPEG, GIF, or PNG; or a reference to a bitmap element
+ that has been drawn into with a drawTo element.
+ */ -->
+ <xs:simpleType name="BaseBitmap">
+ <xs:restriction base="xs:string"/>
+ </xs:simpleType>
+
+ <!-- /** BitmapEncoding
+ Used to specify the compression format for writing an image file with the snapshot element.
+ */ -->
+ <xs:simpleType name="BitmapEncoding">
+ <xs:restriction base="xs:string">
+ <!-- @pattern jpeg See http://www.jpeg.org/jpeg/ for more information about JPEG. -->
+ <xs:pattern value="jpeg"/>
+ <!-- @pattern png See http://www.libpng.org/pub/png/ for more information about PNG. -->
+ <xs:pattern value="png"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** BitmapFormat
+ Determines the number of bits per pixel in a bitmap.
+ */ -->
+ <xs:simpleType name="BitmapFormat">
+ <xs:restriction base="xs:string">
+ <xs:pattern value="none"/>
+ <!-- @pattern A1 1-bit per pixel, (0 is transparent, 1 is opaque). -->
+ <xs:pattern value="A1"/>
+ <!-- @pattern A8 8-bits per pixel, with only alpha specified (0 is transparent, 0xFF is opaque). -->
+ <xs:pattern value="A8"/>
+ <!-- @pattern Index8 8-bits per pixel, using a ColorTable element to specify the colors. -->
+ <xs:pattern value="Index8"/>
+ <!-- @pattern RGB16 16-bits per pixel, compile-time configured to be either 555 or 565. -->
+ <xs:pattern value="RGB16"/>
+ <!-- @pattern RGB32 32-bits per pixel, plus alpha. -->
+ <xs:pattern value="RGB32"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** Boolean
+ Either "true" (non-zero) or "false" (zero).
+ */ -->
+ <xs:simpleType name="Boolean">
+ <xs:restriction base="xs:string">
+ <xs:pattern value="false"/>
+ <xs:pattern value="true"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** Cap
+ The values for the strokeCap attribute.
+ */ -->
+ <xs:simpleType name="Cap">
+ <xs:restriction base="xs:string">
+ <!-- @pattern butt begin and end a contour with no extension -->
+ <xs:pattern value="butt"/>
+ <!-- @pattern round begin and end a contour with a semi-circle extension -->
+ <xs:pattern value="round"/>
+ <!-- @pattern square begin and end a contour with a half square extension -->
+ <xs:pattern value="square"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** Color
+ A reference to a color element.
+ */ -->
+ <xs:simpleType name="Color">
+ <xs:restriction base="xs:string"/>
+ </xs:simpleType>
+
+ <!-- /** Displayable
+ A reference to any element: @list(Displayable)
+ */ -->
+ <xs:simpleType name="Displayable">
+ <xs:restriction base="xs:string"/>
+ </xs:simpleType>
+
+ <!-- /** DisplayableArray
+ An array of one or more element IDs.
+ */ -->
+ <xs:simpleType name="DisplayableArray">
+ <xs:restriction base="xs:string"/>
+ </xs:simpleType>
+
+ <!-- /** Drawable
+ A reference to an element that can be drawn: @list(Drawable)
+ */ -->
+ <xs:simpleType name="Drawable">
+ <xs:restriction base="xs:string"/>
+ </xs:simpleType>
+
+ <!-- /** DynamicString
+ Dynamic strings contain scripts that are re-evaluated each time the script is enabled.
+ */ -->
+ <xs:simpleType name="DynamicString">
+ <xs:restriction base="xs:string"/>
+ </xs:simpleType>
+
+ <!-- /** EventCode
+ Key codes that can trigger events, usually corresponding to physical buttons on the device.
+ */ -->
+ <xs:simpleType name="EventCode">
+ <xs:restriction base="xs:string">
+ <xs:pattern value="none"/>
+ <!-- @pattern up The up arrow. -->
+ <xs:pattern value="up"/>
+ <!-- @pattern down The down arrow. -->
+ <xs:pattern value="down"/>
+ <!-- @pattern left The left arrow. -->
+ <xs:pattern value="left"/>
+ <!-- @pattern right The right arrow. -->
+ <xs:pattern value="right"/>
+ <!-- @pattern back The back button (may not be present; the Backspace key on a PC). -->
+ <xs:pattern value="back"/>
+ <!-- @pattern end The end button (may not be present; the Esc key on a PC). -->
+ <xs:pattern value="end"/>
+ <!-- @pattern OK The OK button (the Enter key on a PC). -->
+ <xs:pattern value="OK"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** EventKind
+ Specifies how an event is triggered; by a key, when an animation ends, when the
+ document is loaded, or when this event is triggered by the user's C++ or XML.
+ */ -->
+ <xs:simpleType name="EventKind">
+ <xs:restriction base="xs:string">
+ <xs:pattern value="none"/>
+ <!-- @pattern keyChar A key corresponding to a Unichar value. -->
+ <xs:pattern value="keyChar"/>
+ <!-- @pattern keyPress A key with a particular function, such as an arrow key or the OK button. -->
+ <xs:pattern value="keyPress"/>
+ <!-- @pattern mouseDown Triggered when the primary mouse button is pressed. -->
+ <xs:pattern value="mouseDown"/>
+ <!-- @pattern mouseDrag Triggered when the primary mouse is moved while the button is pressed. -->
+ <xs:pattern value="mouseDrag"/>
+ <!-- @pattern mouseMove Triggered when the primary mouse is moved. -->
+ <xs:pattern value="mouseMove"/>
+ <!-- @pattern mouseUp Triggered when the primary mouse button is released. -->
+ <xs:pattern value="mouseUp"/>
+ <!-- @pattern onEnd Triggered when an event ends. -->
+ <xs:pattern value="onEnd"/>
+ <!-- @pattern onLoad Triggered when the document loads. -->
+ <xs:pattern value="onLoad"/>
+ <!-- @pattern user Triggered when a post element or C++ event is activated. -->
+ <xs:pattern value="user"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** EventMode
+ Specifies whether the event is delivered immediately to matching event element or deferred to
+ the application-wide event handler.
+ */ -->
+ <xs:simpleType name="EventMode">
+ <xs:restriction base="xs:string">
+ <!-- @pattern deferred Process the event using the host's event queue. -->
+ <xs:pattern value="deferred"/>
+ <!-- @pattern immediate Activate the event element immediately. -->
+ <xs:pattern value="immediate"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** FillType
+ Filled paths that self-intersect use the winding or evenOdd rule to determine whether the
+ overlaps are filled or are holes.
+ */ -->
+ <xs:simpleType name="FillType">
+ <xs:restriction base="xs:string">
+ <!-- @pattern winding Fill if the sum of edge directions is non-zero. -->
+ <xs:pattern value="winding"/>
+ <!-- @pattern evenOdd Fill if the sum of edges is an odd number. -->
+ <xs:pattern value="evenOdd"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** FilterType
+ Scaled bitmaps without a filter type set point-sample the source bitmap to determine the
+ destination pixels' colors. Bilinear and bicubic compute the values of intermediate pixels
+ by sampling the pixels around them.
+ */ -->
+ <xs:simpleType name="FilterType">
+ <xs:restriction base="xs:string">
+ <xs:pattern value="none"/>
+ <!-- @pattern bilinear Compute the pixel value as the linear interpolation of adjacent pixels. -->
+ <xs:pattern value="bilinear"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** Float
+ A signed fractional value.
+ */ -->
+ <xs:simpleType name="Float">
+ <xs:restriction base="xs:float">
+ <xs:pattern value="[+-]?([0-9]*\.[0-9]+|[0-9]+\.?)"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** FloatArray
+ An array of one or more signed fractional values.
+ */ -->
+ <xs:simpleType name="FloatArray">
+ <xs:restriction base="xs:float">
+ <xs:pattern value="\[[+-]?([0-9]*\.[0-9]+|[0-9]+\.?)( *, *[+-]?([0-9]*\.[0-9]+|[0-9]+\.?))*\]"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** FromPathMode
+ A matrix computed from an offset along a path may include the point's position, the angle
+ tangent, or both.
+ .
+ */ -->
+ <xs:simpleType name="FromPathMode">
+ <xs:restriction base="xs:string">
+ <!-- @pattern normal Compute the matrix using the path's angle and position. -->
+ <xs:pattern value="normal"/>
+ <!-- @pattern angle Compute the matrix using only the path's angle. -->
+ <xs:pattern value="angle"/>
+ <!-- @pattern position Compute the matrix using only the path's position. -->
+ <xs:pattern value="position"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** Int
+ A signed integer.
+ */ -->
+ <xs:simpleType name="Int">
+ <xs:restriction base="xs:integer"/>
+ </xs:simpleType>
+
+ <!-- /** IntArray
+ An array of one or more signed integer values.
+ */ -->
+ <xs:simpleType name="IntArray">
+ <xs:restriction base="xs:integer">
+ <xs:pattern value="\[[+-]?[0-9]+( *, *[+-]?[0-9]+)*\]"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** Join
+ The edges of thick lines in a path are joined by extending the outer edges to form a miter,
+ or by adding a round circle at the intersection point, or by connecting the outer edges with a line
+ to form a blunt joint.
+ */ -->
+ <xs:simpleType name="Join">
+ <xs:restriction base="xs:string">
+ <!-- @pattern miter Extend the outer edges to form a miter. -->
+ <xs:pattern value="miter"/>
+ <!-- @pattern round Join the outer edges with a circular arc. -->
+ <xs:pattern value="round"/>
+ <!-- @pattern blunt Connect the outer edges with a line. -->
+ <xs:pattern value="blunt"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** MaskFilterBlurStyle
+ A blur can affect the inside or outside part of the shape, or it can affect both. The shape
+ itself can be drawn solid, or can be invisible.
+ */ -->
+ <xs:simpleType name="MaskFilterBlurStyle">
+ <xs:restriction base="xs:string">
+ <!-- @pattern normal Blur inside and outside. -->
+ <xs:pattern value="normal"/>
+ <!-- @pattern solid Solid inside, blur outside. -->
+ <xs:pattern value="solid"/>
+ <!-- @pattern outer Invisible inside, blur outside. -->
+ <xs:pattern value="outer"/>
+ <!-- @pattern inner Blur inside only.. -->
+ <xs:pattern value="inner"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** MaskFilter
+ The ID of a blur or emboss element.
+ */ -->
+ <xs:simpleType name="MaskFilter">
+ <xs:restriction base="xs:string"/>
+ </xs:simpleType>
+
+ <!-- /** Matrix
+ The ID of a matrix element.
+ */ -->
+ <xs:simpleType name="Matrix">
+ <xs:restriction base="xs:string"/>
+ </xs:simpleType>
+
+ <!-- /** MSec
+ A fractional second with millisecond resolution.
+ */ -->
+ <xs:simpleType name="MSec">
+ <xs:restriction base="xs:float"/>
+ </xs:simpleType>
+
+ <!-- /** Paint
+ The ID of a paint element.
+ */ -->
+ <xs:simpleType name="Paint">
+ <xs:restriction base="xs:string"/>
+ </xs:simpleType>
+
+ <!-- /** Path
+ The ID of a path element.
+ */ -->
+ <xs:simpleType name="Path">
+ <xs:restriction base="xs:string"/>
+ </xs:simpleType>
+
+ <!-- /** PathDirection
+ PathDirection determines if the path is traveled clockwise or counterclockwise.
+ */ -->
+ <xs:simpleType name="PathDirection">
+ <xs:restriction base="xs:string">
+ <!-- @pattern cw The path is traveled clockwise. -->
+ <xs:pattern value="cw"/>
+ <!-- @pattern ccw The path is traveled counterclockwise. -->
+ <xs:pattern value="ccw"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** PathEffect
+ The ID of a dash or discrete element.
+ */ -->
+ <xs:simpleType name="PathEffect">
+ <xs:restriction base="xs:string"/>
+ </xs:simpleType>
+
+ <!-- /** Point
+ A pair of signed values representing the x and y coordinates of a point.
+ */ -->
+ <xs:simpleType name="Point">
+ <xs:restriction base="xs:string">
+ <xs:pattern value="\[ *[+-]?([0-9]*\.[0-9]+|[0-9]+\.?) *[ ,] *[+-]?([0-9]*\.[0-9]+|[0-9]+\.?)\]"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** Rect
+ The ID of a rectangle element.
+ */ -->
+ <xs:simpleType name="Rect">
+ <xs:restriction base="xs:string"/>
+ </xs:simpleType>
+
+ <!-- /** Shader
+ The ID of a linear or radial gradient.
+ */ -->
+ <xs:simpleType name="Shader">
+ <xs:restriction base="xs:string"/>
+ </xs:simpleType>
+
+ <!-- /** String
+ A sequence of characters.
+ */ -->
+ <xs:simpleType name="String">
+ <xs:restriction base="xs:string"/>
+ </xs:simpleType>
+
+ <!-- /** Style
+ Geometry can be filled, stroked or both.
+ */ -->
+ <xs:simpleType name="Style">
+ <xs:restriction base="xs:string">
+ <!-- @pattern fill The interior of the geometry is filled with the paint's color. -->
+ <xs:pattern value="fill"/>
+ <!-- @pattern stroke The outline of the geometry is stroked with the paint's color. -->
+ <xs:pattern value="stroke"/>
+ <!-- @pattern strokeAndFill The interior is filled and outline is stroked with the paint's color. -->
+ <xs:pattern value="strokeAndFill"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** Text
+ The ID of a text element.
+ */ -->
+ <xs:simpleType name="Text">
+ <xs:restriction base="xs:string"/>
+ </xs:simpleType>
+
+ <!-- /** TextBoxAlign
+ Multiple lines of text may be aligned to the start of the box, the center, or the end.
+ */ -->
+ <xs:simpleType name="TextBoxAlign">
+ <xs:restriction base="xs:string">
+ <!-- @pattern start The text begins within the upper left of the box. -->
+ <xs:pattern value="start"/>
+ <!-- @pattern center The text is positioned in the center of the box. -->
+ <xs:pattern value="center"/>
+ <!-- @pattern end The text ends within the lower right of the box. -->
+ <xs:pattern value="end"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** TextBoxMode
+ Fitting the text may optionally introduce line breaks.
+ */ -->
+ <xs:simpleType name="TextBoxMode">
+ <xs:restriction base="xs:string">
+ <!-- @pattern oneLine No additional linebreaks are added. -->
+ <xs:pattern value="oneLine"/>
+ <!-- @pattern lineBreak Line breaks may be added to fit the text to the box. -->
+ <xs:pattern value="lineBreak"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** TileMode
+ A shader describes how to draw within a rectangle.
+ Outside of the rectangle, the shader may be ignored, clamped on the edges, or repeated.
+ The repetitions may be mirrored from the original shader.
+ */ -->
+ <xs:simpleType name="TileMode">
+ <xs:restriction base="xs:string">
+ <!-- @pattern clamp The edge shader color is extended. -->
+ <xs:pattern value="clamp"/>
+ <!-- @pattern repeat The shader is repeated horizontally and vertically. -->
+ <xs:pattern value="repeat"/>
+ <!-- @pattern mirror The shader is mirrored horizontally and vertically. -->
+ <xs:pattern value="mirror"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** Typeface
+ The ID of a typeface element.
+ */ -->
+ <xs:simpleType name="Typeface">
+ <xs:restriction base="xs:string"/>
+ </xs:simpleType>
+
+ <!-- /** UnknownArray
+ An array of values of any type.
+ */ -->
+ <xs:simpleType name="UnknownArray">
+ <xs:restriction base="xs:string"/>
+ </xs:simpleType>
+
+ <!-- /** Xfermode
+ The operation applied when drawing a color to the destination background.
+ */ -->
+ <xs:simpleType name="Xfermode">
+ <xs:restriction base="xs:string">
+ <!-- @pattern clear Set the destination alpha to zero and the destination color to black. -->
+ <xs:pattern value="clear"/>
+ <!-- @pattern src Set the destination to the source alpha and color. -->
+ <xs:pattern value="src"/>
+ <!-- @pattern dst Set the destination to the destination alpha and color. -->
+ <xs:pattern value="dst"/>
+ <!-- @pattern srcOver The default. Set the destination to the source color blended
+ with the destination by the source alpha. -->
+ <xs:pattern value="srcOver"/>
+ <!-- @pattern dstOver Set the destination to the destination color blended
+ with the source by the destination alpha. -->
+ <xs:pattern value="dstOver"/>
+ <!-- @pattern srcIn Set the destination to the source color scaled by the destination
+ alpha. -->
+ <xs:pattern value="srcIn"/>
+ <!-- @pattern dstIn Set the destination to the destination color scaled by the source
+ alpha. -->
+ <xs:pattern value="dstIn"/>
+ <!-- @pattern srcOut Set the destination to the source color scaled by the
+ inverse of the destination alpha. -->
+ <xs:pattern value="srcOut"/>
+ <!-- @pattern dstOut Set the destination to the destination color scaled by the
+ inverse of the source alpha. -->
+ <xs:pattern value="dstOut"/>
+ <!-- @pattern srcATop Set the destination to the source color times the destination alpha,
+ blended with the destination times the inverse of the source alpha. -->
+ <xs:pattern value="srcATop"/>
+ <!-- @pattern dstATop Set the destination to the destination color times the source alpha,
+ blended with the source times the inverse of the destination alpha. -->
+ <xs:pattern value="dstATop"/>
+ <!-- @pattern xor Set the destination to the destination color times the
+ inverse of the source alpha,
+ blended with the source times the inverse of the destination alpha. -->
+ <xs:pattern value="xor"/>
+ </xs:restriction>
+ </xs:simpleType>
+
+ <!-- /** Math
+ Math provides functions and properties in the ECMAScript library to screenplay script expressions.
+ The Math element is always implicitly added at the top of every screenplay description, so
+ its functions and properties are always available.
+ */ -->
+ <xs:element name="Math">
+ <xs:complexType>
+ <!-- @attribute E The value 2.718281828. -->
+ <xs:attribute name="E" type="Sk:Float"/>
+ <!-- @attribute LN10 The value 2.302585093. -->
+ <xs:attribute name="LN10" type="Sk:Float"/>
+ <!-- @attribute LN2 The value 0.693147181. -->
+ <xs:attribute name="LN2" type="Sk:Float"/>
+ <!-- @attribute LOG10E The value 0.434294482. -->
+ <xs:attribute name="LOG10E" type="Sk:Float"/>
+ <!-- @attribute LOG2E The value 1.442695041. -->
+ <xs:attribute name="LOG2E" type="Sk:Float"/>
+ <!-- @attribute PI The value 3.141592654. -->
+ <xs:attribute name="PI" type="Sk:Float"/>
+ <!-- @attribute SQRT1_2 The value 0.707106781. -->
+ <xs:attribute name="SQRT1_2" type="Sk:Float"/>
+ <!-- @attribute SQRT2 The value 1.414213562. -->
+ <xs:attribute name="SQRT2" type="Sk:Float"/>
+ <!-- @attribute abs A function that returns the absolute value of its argument. -->
+ <xs:attribute name="abs" type="Sk:Float"/>
+ <!-- @attribute acos A function that returns the arc cosine of its argument. -->
+ <xs:attribute name="acos" type="Sk:Float"/>
+ <!-- @attribute asin A function that returns the arc sine of its argument. -->
+ <xs:attribute name="asin" type="Sk:Float"/>
+ <!-- @attribute atan A function that returns the arc tan of its argument. -->
+ <xs:attribute name="atan" type="Sk:Float"/>
+ <!-- @attribute atan2 A function that returns the arc tan of the ratio of its two arguments. -->
+ <xs:attribute name="atan2" type="Sk:Float"/>
+ <!-- @attribute ceil A function that returns the rounded up value of its argument. -->
+ <xs:attribute name="ceil" type="Sk:Float"/>
+ <!-- @attribute cos A function that returns the cosine of its argument. -->
+ <xs:attribute name="cos" type="Sk:Float"/>
+ <!-- @attribute exp A function that returns E raised to a power (the argument). -->
+ <xs:attribute name="exp" type="Sk:Float"/>
+ <!-- @attribute floor A function that returns the rounded down value of its argument. -->
+ <xs:attribute name="floor" type="Sk:Float"/>
+ <!-- @attribute log A function that returns the natural logarithm its argument. -->
+ <xs:attribute name="log" type="Sk:Float"/>
+ <!-- @attribute max A function that returns the largest of any number of arguments. -->
+ <xs:attribute name="max" type="Sk:Float"/>
+ <!-- @attribute min A function that returns the smallest of any number of arguments. -->
+ <xs:attribute name="min" type="Sk:Float"/>
+ <!-- @attribute pow A function that returns the first argument raised to the power of the second argument. -->
+ <xs:attribute name="pow" type="Sk:Float"/>
+ <!-- @attribute random A function that returns a random value from zero to one.
+ (See also the &lt;random&gt; element.) -->
+ <xs:attribute name="random" type="Sk:Float"/>
+ <!-- @attribute round A function that returns the rounded value of its argument. -->
+ <xs:attribute name="round" type="Sk:Float"/>
+ <!-- @attribute sin A function that returns the sine of its argument. -->
+ <xs:attribute name="sin" type="Sk:Float"/>
+ <!-- @attribute sqrt A function that returns the square root of its argument. -->
+ <xs:attribute name="sqrt" type="Sk:Float"/>
+ <!-- @attribute tan A function that returns the tangent of its argument. -->
+ <xs:attribute name="tan" type="Sk:Float"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** Number
+ Number provides properties in the ECMAScript library to screenplay script expressions.
+ The Number element is always implicitly added at the top of every screenplay description, so
+ its properties are always available.
+ */ -->
+ <xs:element name="Number">
+ <xs:complexType>
+ <!-- @attribute MAX_VALUE The maximum number value; approximately 32767.999985 fixed point,
+ 3.4028235e+38 floating point. -->
+ <xs:attribute name="MAX_VALUE" type="Sk:Float"/>
+ <!-- @attribute MIN_VALUE The minimum number value; approximately 0.000015 fixed point,
+ 1.1754944e-38 floating point. -->
+ <xs:attribute name="MIN_VALUE" type="Sk:Float"/>
+ <!-- @attribute NEGATIVE_INFINITY The most negative number value. Fixed point does not
+ have a value for negative infinity, and approximates it with -32767.999985. -->
+ <xs:attribute name="NEGATIVE_INFINITY" type="Sk:Float"/>
+ <!-- @attribute NaN A bit pattern representing "Not a Number". Fixed point does not
+ have a value for NaN, and approximates it with -32768. -->
+ <xs:attribute name="NaN" type="Sk:Float"/>
+ <!-- @attribute POSITIVE_INFINITY The greatest positive number value. Fixed point does not
+ have a value for positive infinity, and approximates it with 32767.999985. -->
+ <xs:attribute name="POSITIVE_INFINITY" type="Sk:Float"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** add
+ Add references a drawable element, and adds it to the display list.
+ If where and offset are omitted, the element is appended to the end of the display list.
+ If where is specified, the element is inserted at the first occurance of where in the display list.
+ If offset and where are specified, the element is inserted at where plus offset.
+ A positive offset without where inserts the element at the start of the list plus offset.
+ A negative offset without where inserts the element at the end of the list minus offset.
+ */ -->
+ <xs:element name="add">
+ <xs:complexType>
+ <!-- @attribute mode If indirect (the default), keep the add element in the display list,
+ and draw the add's use element. If immediate, put the add's use element in the display list. -->
+ <xs:attribute name="mode" type="Sk:AddMode"/>
+ <!-- @attribute offset The offset added to the insert index. -->
+ <xs:attribute name="offset" type="Sk:Int"/>
+ <!-- @attribute use The drawable element to add to the display list. -->
+ <xs:attribute name="use" type="Sk:Drawable"/>
+ <!-- @attribute where The drawable element marking where to insert. -->
+ <xs:attribute name="where" type="Sk:Drawable"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** addCircle
+ AddCircle adds a closed circle to the parent path element.
+ */ -->
+ <xs:element name="addCircle">
+ <xs:complexType>
+ <!-- @attribute direction One of @pattern. @patternDescription -->
+ <xs:attribute name="direction" type="Sk:PathDirection"/>
+ <!-- @attribute radius The distance from the center to the edge of the circle. -->
+ <xs:attribute name="radius" type="Sk:Float"/>
+ <!-- @attribute x The x coordinate of the circle's center. -->
+ <xs:attribute name="x" type="Sk:Float"/>
+ <!-- @attribute y The y coordinate of the circle's center.-->
+ <xs:attribute name="y" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** addOval
+ AddOval adds a closed oval described by its bounding box to the parent path element.
+ */ -->
+ <xs:element name="addOval">
+ <xs:complexType>
+ <!-- @attribute direction One of @pattern. @patternDescription -->
+ <xs:attribute name="direction" type="Sk:PathDirection"/>
+ <!-- @attribute bottom The bottom edge of the oval's bounding box. -->
+ <xs:attribute name="bottom" type="Sk:Float"/>
+ <!-- @attribute left The left edge of the oval's bounding box. -->
+ <xs:attribute name="left" type="Sk:Float"/>
+ <!-- @attribute right The right edge of the oval's bounding box. -->
+ <xs:attribute name="right" type="Sk:Float"/>
+ <!-- @attribute top The top edge of the oval's bounding box. -->
+ <xs:attribute name="top" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** addPath
+ AddPath adds a path to the parent path element.
+ An optional matrix may transform the path as it is added.
+ */ -->
+ <xs:element name="addPath">
+ <xs:complexType>
+ <!-- @attribute matrix The matrix applied to the path as it is added. -->
+ <xs:attribute name="matrix" type="Sk:Matrix"/>
+ <!-- @attribute path The path to add. -->
+ <xs:attribute name="path" type="Sk:Path"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** addRect
+ AddRect adds a closed rectangle to the parent path element.
+ */ -->
+ <xs:element name="addRect">
+ <xs:complexType>
+ <!-- @attribute direction One of @pattern. @patternDescription -->
+ <xs:attribute name="direction" type="Sk:PathDirection"/>
+ <!-- @attribute bottom The bottom edge of the rectangle. -->
+ <xs:attribute name="bottom" type="Sk:Float"/>
+ <!-- @attribute left The left edge of the rectangle. -->
+ <xs:attribute name="left" type="Sk:Float"/>
+ <!-- @attribute right The right edge of the rectangle. -->
+ <xs:attribute name="right" type="Sk:Float"/>
+ <!-- @attribute top" The top" edge of the rectangle. -->
+ <xs:attribute name="top" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** addRoundRect
+ AddRoundRect adds a closed rounded rectangle to the parent path element.
+ */ -->
+ <xs:element name="addRoundRect">
+ <xs:complexType>
+ <!-- @attribute direction One of @pattern. @patternDescription -->
+ <xs:attribute name="direction" type="Sk:PathDirection"/>
+ <!-- @attribute bottom The bottom edge of the rounded rectangle's bounding box. -->
+ <xs:attribute name="bottom" type="Sk:Float"/>
+ <!-- @attribute left The left edge of the rounded rectangle's bounding box. -->
+ <xs:attribute name="left" type="Sk:Float"/>
+ <!-- @attribute right The right edge of the rounded rectangle's bounding box. -->
+ <xs:attribute name="right" type="Sk:Float"/>
+ <!-- @attribute top The top edge of the rounded rectangle's bounding box. -->
+ <xs:attribute name="top" type="Sk:Float"/>
+ <!-- @attribute rx The X-radius of the oval used to round the corners. -->
+ <xs:attribute name="rx" type="Sk:Float"/>
+ <!-- @attribute ry The Y-radius of the oval used to round the corners. -->
+ <xs:attribute name="ry" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** animate
+ Animate varies the value of an element's attribute over time.
+ The animation may vary starting at the 'from' attribute, and ending at the 'to' attribute,
+ or may compute the value using the 'formula' attribute.
+ */ -->
+ <xs:element name="animate">
+ <xs:complexType>
+ <!-- @attribute begin An optional offset that must elapse before the animation begins. The apply
+ begin attribute is added to any animator's begin attribute. -->
+ <xs:attribute name="begin" type="Sk:MSec"/>
+ <!-- @attribute blend Specifies how the from and to values are blended. A value from 0.0 to
+ 1.0 specifies a cubic lag/log/lag blend (slow to change at the beginning and end); the closer
+ blend is to 1.0, the more linear the blend. If omitted, the blend is linear. -->
+ <xs:attribute name="blend" type="Sk:FloatArray"/>
+ <!-- @attribute dur The duration of the animation in milliseconds. -->
+ <xs:attribute name="dur" type="Sk:MSec"/>
+ <!-- @attribute dynamic If true, restart the animation if any of the simple values the 'from', 'formula',
+ 'lval', or 'to' attributes reference are changed. Simple values are contained by the array, boolean, float, int,
+ and string elements. -->
+ <xs:attribute name="dynamic" type="Sk:Boolean" />
+ <!-- @attribute field The attribute to animate. -->
+ <xs:attribute name="field" type="Sk:String"/>
+ <!-- @attribute formula A script to execute over time to compute the field's value. Typically,
+ the fomula is a script expression which includes a reference to the time attribute of the
+ containing apply element. Requires a dur. For animations that do not stop, set dur="Number.POSITIVE_INFINITY" -->
+ <xs:attribute name="formula" type="Sk:DynamicString"/>
+ <!-- @attribute from The starting value (requires a 'to' attribute) -->
+ <xs:attribute name="from" type="Sk:DynamicString"/>
+ <!-- @attribute lval An expression evaluating to the attribute to animate.
+ If present, lval overrides 'field'. The expression is typically an array element,
+ e.g. lval="x[y]" . -->
+ <xs:attribute name="lval" type="Sk:DynamicString"/>
+ <!-- @attribute mirror If true, reverses the interpolated value during even repeat cycles. -->
+ <xs:attribute name="mirror" type="Sk:Boolean"/>
+ <!-- @attribute repeat Specifies the number of times to repeat the animation.
+ (May be fractional.) -->
+ <xs:attribute name="repeat" type="Sk:Float"/>
+ <!-- @attribute reset If true, the computed value is the initial value after the
+ animation is complete. If false, or by default, the computed value is the final value
+ after the animation is complete. -->
+ <xs:attribute name="reset" type="Sk:Boolean"/>
+ <!-- @attribute step When the apply's attribute mode="immediate" or "create", the step attribute can be read by
+ script to determine the current animation iteration. -->
+ <xs:attribute name="step" type="Sk:Int" />
+ <!-- @attribute target The element to animate. By default, the element contained by the apply
+ or referenced by the apply's scope attribute is the animate target. -->
+ <xs:attribute name="target" type="Sk:DynamicString"/>
+ <!-- @attribute to The ending value (requires a 'from' attribute) -->
+ <xs:attribute name="to" type="Sk:DynamicString"/>
+ <!-- @attribute values [Depreciated] -->
+ <xs:attribute name="values" type="Sk:DynamicString"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** apply
+ Apply changes one or more attributes of an element.
+ Apply either contains one displayable element or references the element scoping the change
+ with the 'scope' attribute. Apply either contains one animator element or references it with
+ the 'animator' attribute.
+ In the display list, apply draws the element it scopes after evaluating the animation.
+ */ -->
+ <xs:element name="apply">
+ <xs:complexType>
+ <xs:choice minOccurs="0" maxOccurs="1">
+ <xs:element ref="Sk:animate"/>
+ <xs:element ref="Sk:set" />
+ <!-- not quite right; want to say 'one of the above, one of the below'
+ </xs:choice>
+ <xs:choice minOccurs="0" maxOccurs="1">
+ -->
+ <xs:element ref="Sk:add"/>
+ <xs:element ref="Sk:array"/>
+ <xs:element ref="Sk:apply"/>
+ <xs:element ref="Sk:bitmap"/>
+ <xs:element ref="Sk:boolean"/>
+ <xs:element ref="Sk:bounds"/>
+ <!-- <xs:element ref="Sk3D:camera"/> -->
+ <xs:element ref="Sk:clear"/>
+ <xs:element ref="Sk:clip"/>
+ <xs:element ref="Sk:color"/>
+ <xs:element ref="Sk:drawTo"/>
+ <xs:element ref="Sk:float"/>
+ <xs:element ref="Sk:full"/>
+ <xs:element ref="Sk:group"/>
+ <xs:element ref="Sk:image"/>
+ <xs:element ref="Sk:int"/>
+ <xs:element ref="Sk:line"/>
+ <xs:element ref="Sk:matrix"/>
+ <xs:element ref="Sk:move"/>
+ <xs:element ref="Sk:oval"/>
+ <xs:element ref="Sk:paint"/>
+ <!-- <xs:element ref="Sk:patch"/> -->
+ <xs:element ref="Sk:path"/>
+ <xs:element ref="Sk:point"/>
+ <xs:element ref="Sk:polygon"/>
+ <xs:element ref="Sk:polyline"/>
+ <xs:element ref="Sk:post"/>
+ <xs:element ref="Sk:random"/>
+ <xs:element ref="Sk:rect"/>
+ <xs:element ref="Sk:remove"/>
+ <xs:element ref="Sk:replace"/>
+ <xs:element ref="Sk:roundRect"/>
+ <xs:element ref="Sk:save"/>
+ <xs:element ref="Sk:snapshot"/>
+ <xs:element ref="Sk:string"/>
+ <xs:element ref="Sk:text"/>
+ <xs:element ref="Sk:textBox"/>
+ <xs:element ref="Sk:textOnPath"/>
+ <xs:element ref="Sk:textToPath"/>
+ </xs:choice>
+ <!-- @attribute animator The description of how the element is changed over time. -->
+ <xs:attribute name="animator" type="Sk:Animate"/>
+ <!-- @attribute begin An optional offset that must elapse before the animation begins. The apply
+ begin attribute is added to any animator's begin attribute. -->
+ <xs:attribute name="begin" type="Sk:MSec" />
+ <!-- @attribute dontDraw Edits an element's attribute without drawing it; for instance,
+ to edit a clip's rectangle without drawing the rectangle, set dontDraw="true". -->
+ <xs:attribute name="dontDraw" type="Sk:Boolean"/>
+ <!-- @attribute dynamicScope The location in the display list where animations are stored. Use
+ dynamicScope instead of scope if a script expression with potentially different values is desired to
+ describe the scope. -->
+ <xs:attribute name="dynamicScope" type="Sk:String"/>
+ <!-- @attribute interval The optional time interval from one animation frame to the next. -->
+ <xs:attribute name="interval" type="Sk:MSec" />
+ <!-- @attribute mode One of @pattern. @patternDescription -->
+ <xs:attribute name="mode" type="Sk:ApplyMode"/>
+ <!-- @attribute pickup Starts the animation at the current target's attribute value. Enabling
+ 'pickup' permits omitting the 'from' attribute of the animator. -->
+ <xs:attribute name="pickup" type="Sk:Boolean"/>
+ <!-- @attribute restore If true, multiple references to the same apply statement save and
+ restore the interpolated target values. -->
+ <xs:attribute name="restore" type="Sk:Boolean"/>
+ <!-- @attribute scope The location in the display list where animations are stored. -->
+ <xs:attribute name="scope" type="Sk:Drawable"/>
+ <!-- @attribute step When mode="immediate" or "create", the step attribute can be read by
+ script to determine the current animation iteration. -->
+ <xs:attribute name="step" type="Sk:Int" />
+ <!-- @attribute steps When mode="immediate", the number of times the animation
+ is stepped. The animation iterates 'steps' times plus one. -->
+ <xs:attribute name="steps" type="Sk:Int" />
+ <!-- @attribute time When read from script, returns the animation time. Typically used by
+ an animate element's formula attribute. -->
+ <xs:attribute name="time" type="Sk:MSec" />
+ <!-- @attribute transition One of @pattern. @patternDescription -->
+ <xs:attribute name="transition" type="Sk:ApplyTransition"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** array
+ Array contains an array of values of the same type. The values may be
+ numbers or strings.
+ */ -->
+ <xs:element name="array">
+ <xs:complexType>
+ <!-- @attribute length The number of elements in the array (read only). -->
+ <xs:attribute name="length" type="Sk:Int"/>
+ <!-- @attribute values The elements in the array. -->
+ <xs:attribute name="values" type="Sk:UnknownArray"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** bitmap
+ Bitmap describes a rectangle of pixels.
+ Use the <drawTo> element to draw to a bitmap.
+ Add the bitmap to the display list to draw from a bitmap.
+ */ -->
+ <xs:element name="bitmap">
+ <xs:complexType>
+ <!-- @attribute erase The color, including the alpha, the bitmap is intially set to. -->
+ <xs:attribute name="erase" type="Sk:ARGB"/>
+ <!-- @attribute format One of @pattern. @patternDescription -->
+ <xs:attribute name="format" type="Sk:BitmapFormat"/>
+ <!-- @attribute height The height of the bitmap in pixels. -->
+ <xs:attribute name="height" type="Sk:Int"/>
+ <!-- @attribute rowBytes The number of byte describing each row of pixels (optional). -->
+ <xs:attribute name="rowBytes" type="Sk:Int"/>
+ <!-- @attribute width The height of the width in pixels. -->
+ <xs:attribute name="width" type="Sk:Int"/>
+ <!-- @attribute x The left edge of the bitmap in unit space. -->
+ <xs:attribute name="x" type="Sk:Float"/>
+ <!-- @attribute y The top edge of teh bitmap in unit space. -->
+ <xs:attribute name="y" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** bitmapShader
+ BitmapShader sets the paint shader to draw the bitmap as a texture.
+ */ -->
+ <xs:element name="bitmapShader">
+ <xs:complexType>
+ <xs:choice >
+ <xs:element ref="Sk:image" minOccurs="0" />
+ <xs:element ref="Sk:matrix" minOccurs="0" />
+ </xs:choice>
+ <!-- @attribute matrix Matrix applies a 3x3 transform to the gradient. -->
+ <xs:attribute name="matrix" type="Sk:Matrix"/>
+ <!-- @attribute tileMode One of @pattern. @patternDescription -->
+ <xs:attribute name="tileMode" type="Sk:TileMode"/>
+ <!-- @attribute filterType The bitmap filter to employ, one of @pattern. -->
+ <xs:attribute name="filterType" type="Sk:FilterType"/>
+ <!-- @attribute image The bitmap to draw. -->
+ <xs:attribute name="image" type="Sk:BaseBitmap"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+
+ <!-- /** blur
+ Blur describes an image filter in the paint that blurs the drawn geometry.
+ */ -->
+ <xs:element name="blur">
+ <xs:complexType>
+ <!-- @attribute blurStyle One of @pattern. @patternDescription -->
+ <xs:attribute name="blurStyle" type="Sk:MaskFilterBlurStyle"/>
+ <!-- @attribute radius The extent of the filter effect in unit space. If the radius is less
+ than zero, the blur has no effect. -->
+ <xs:attribute name="radius" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** boolean
+ Boolean contains an boolean. The boolean element cannot be added to a display list, but can
+ by set by animations and read by any attribute definition. An boolean element may be referenced,
+ for instance, by a group's condition attribute to make an animation conditionally execute.
+ */ -->
+ <xs:element name="boolean">
+ <xs:complexType>
+ <!-- @attribute value The contained boolean. -->
+ <xs:attribute name="value" type="Sk:Boolean"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** bounds
+ Bounds describes a bounding box that is not drawn. Bounds is used to specify a rectangle to
+ invalidate or record whether the specified area was drawn.
+ The width and height attribute compute the rectangle's right and bottom edges when the rectangle
+ description is first seen. Animating the rectangle's left or top will not recompute the right or bottom
+ if the width or height have been specified.
+ */ -->
+ <xs:element name="bounds">
+ <xs:complexType>
+ <!-- @attribute bottom The bottom edge of the rectangle. -->
+ <xs:attribute name="bottom" type="Sk:Float"/>
+ <!-- @attribute height The height of the rectangle. Setting height computes the
+ bottom attribute from the top attribute. -->
+ <xs:attribute name="height" type="Sk:Float"/>
+ <!-- @attribute inval If set to true, union the drawn bounds to compute an inval area. -->
+ <xs:attribute name="inval" type="Sk:Boolean"/>
+ <!-- @attribute left The left edge of the rectangle. -->
+ <xs:attribute name="left" type="Sk:Float"/>
+ <!-- @attribute needsRedraw Set to true if last draw was visible. -->
+ <xs:attribute name="needsRedraw" type="Sk:Boolean"/>
+ <!-- @attribute right The right edge of the rectangle. -->
+ <xs:attribute name="right" type="Sk:Float"/>
+ <!-- @attribute top The top edge of the rectangle. -->
+ <xs:attribute name="top" type="Sk:Float"/>
+ <!-- @attribute width The width of the rectangle. -->
+ <xs:attribute name="width" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** clear
+ Clear removes all entries in the display list.
+ */ -->
+ <xs:element name="clear">
+ <xs:complexType>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** clip
+ Clip sets the canvas to clip drawing to an element's geometry.
+ A clip element may contain an element or reference an element with the path or
+ rectangle attributes. To make the clip unrestricted, enclose a 'full' element.
+ */ -->
+ <xs:element name="clip">
+ <xs:complexType>
+ <xs:choice minOccurs="0" maxOccurs="1">
+ <xs:element ref="Sk:full"/>
+ <xs:element ref="Sk:rect"/>
+ <xs:element ref="Sk:path"/>
+ <xs:element ref="Sk:polygon"/>
+ <xs:element ref="Sk:polyline"/>
+ </xs:choice>
+ <!-- @attribute path A path-derived element to clip to: either an oval,
+ a path, a polygon, a polyline, or a roundRect. -->
+ <xs:attribute name="path" type="Sk:Path"/>
+ <!-- @attribute rect A rectangle element to clip to. -->
+ <xs:attribute name="rect" type="Sk:Rect"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** close
+ Close connects the last point in the path's contour to the first if the contour is not already closed.
+ */ -->
+ <xs:element name="close">
+ <xs:complexType>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** color
+ Color describes a color in RGB space or HSV space, and its alpha (transparency).
+ */ -->
+ <xs:element name="color">
+ <xs:complexType>
+ <!-- @attribute alpha The alpha component, which describes transparency.
+ Alpha ranges from 0.0 (transparent) to 1.0 (completely opaque). -->
+ <xs:attribute name="alpha" type="Sk:Float"/>
+ <!-- @attribute blue The blue component of an RGB color. Blue ranges from 0 to 255. -->
+ <xs:attribute name="blue" type="Sk:Float"/>
+ <!-- @attribute color The complete color. The color can be specified by name,
+ by hexadecimal value, or with the rgb function. -->
+ <xs:attribute name="color" type="Sk:ARGB"/>
+ <!-- @attribute green The green component of an RGB color. Green ranges from 0 to 255. -->
+ <xs:attribute name="green" type="Sk:Float"/>
+ <!-- @attribute hue The hue component of an HSV color. Hue ranges from 0 to 360. -->
+ <xs:attribute name="hue" type="Sk:Float"/>
+ <!-- @attribute red The red component of an RGB color. Red ranges from 0 to 255. -->
+ <xs:attribute name="red" type="Sk:Float"/>
+ <!-- @attribute saturation The saturation component of an HSV color. Saturation ranges from 0 to 1. -->
+ <xs:attribute name="saturation" type="Sk:Float"/>
+ <!-- @attribute value The value component of an HSV color. Value ranges from 0 to 1. -->
+ <xs:attribute name="value" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** cubicTo
+ CubicTo adds a cubic to the path, using the last point in the path as the first point of the cubic.
+ */ -->
+ <xs:element name="cubicTo">
+ <xs:complexType>
+ <!-- @attribute x1 The x position of the first off-curve point. -->
+ <xs:attribute name="x1" type="Sk:Float"/>
+ <!-- @attribute x2 The x position of the second off-curve point. -->
+ <xs:attribute name="x2" type="Sk:Float"/>
+ <!-- @attribute x3 The x position of the final on-curve point. -->
+ <xs:attribute name="x3" type="Sk:Float"/>
+ <!-- @attribute y1 The y position of the first off-curve point. -->
+ <xs:attribute name="y1" type="Sk:Float"/>
+ <!-- @attribute y2 The y position of the second off-curve point. -->
+ <xs:attribute name="y2" type="Sk:Float"/>
+ <!-- @attribute y3 The y position of the final on-curve point. -->
+ <xs:attribute name="y3" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** dash
+ Dash describes an array of dashes and gaps that describe how the paint strokes lines,
+ rectangles, and paths. The intervals, phase, and dashed path are all measured in the same
+ unit space. The phase and distance between dashes is unaffected by the paint's stroke width.
+ */ -->
+ <xs:element name="dash">
+ <xs:complexType>
+ <!-- @attribute intervals An array of floats that alternately describe the lengths of
+ dashes and gaps. Intervals must contain an even number of entries. -->
+ <xs:attribute name="intervals" type="Sk:FloatArray"/>
+ <!-- @attribute phase Phase advances the placement of the first dash. A positive phase
+ preceeds the first dash with a gap. A negative phase shortens the length of the first dash. -->
+ <xs:attribute name="phase" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** data
+ Data provides metadata to an event. The metadata may be an integer, a float,
+ or a string.
+ */ -->
+ <xs:element name="data">
+ <xs:complexType>
+ <!-- @attribute float The float value associated with the metadata. -->
+ <xs:attribute name="float" type="Sk:Float"/>
+ <!-- @attribute initialized A read-only value set to false (unused by data). -->
+ <xs:attribute name="initialized" type="Sk:Boolean"/>
+ <!-- @attribute int The integer value associated with the metadata. -->
+ <xs:attribute name="int" type="Sk:Int"/>
+ <!-- @attribute name The name of the metadata. This is the name of the data. -->
+ <xs:attribute name="name" type="Sk:String"/>
+ <!-- @attribute string The string value associated with the metadata. -->
+ <xs:attribute name="string" type="Sk:String"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** discrete
+ Discrete alters the edge of the stroke randomly. Discrete is a path effect, and only has an
+ effect when referenced from a paint.. A <pathEffect/>
+ element with no attributes will dissable discrete.
+ */ -->
+ <xs:element name="discrete">
+ <xs:complexType>
+ <!-- @attribute deviation The amount of wobble in the stroke. -->
+ <xs:attribute name="deviation" type="Sk:Float"/>
+ <!-- @attribute segLength The length of wobble in the stroke. -->
+ <xs:attribute name="segLength" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** drawTo
+ DrawTo images to a bitmap. The bitmap can be added to the display list
+ to draw the composite image.
+ DrawTo can be used as an offscreen to speed complicated animations, and
+ for bitmap effects such as pixelated zooming.
+ DrawTo can only reference a single drawable element. Use <add>,
+ <group>, or <save> to draw multiple elements with <drawTo>.
+ */ -->
+ <xs:element name="drawTo">
+ <xs:complexType>
+ <xs:choice maxOccurs="unbounded" >
+ <xs:element ref="Sk:add"/>
+ <xs:element ref="Sk:apply"/>
+ <xs:element ref="Sk:bitmap"/>
+ <xs:element ref="Sk:bounds"/>
+ <!-- <xs:element ref="Sk3D:camera"/> -->
+ <xs:element ref="Sk:clear"/>
+ <xs:element ref="Sk:clip"/>
+ <xs:element ref="Sk:color"/>
+ <xs:element ref="Sk:full"/>
+ <xs:element ref="Sk:group"/>
+ <xs:element ref="Sk:image"/>
+ <xs:element ref="Sk:line"/>
+ <xs:element ref="Sk:matrix"/>
+ <xs:element ref="Sk:move"/>
+ <xs:element ref="Sk:oval"/>
+ <xs:element ref="Sk:paint"/>
+ <!-- <xs:element ref="Sk:patch"/> -->
+ <xs:element ref="Sk:path"/>
+ <xs:element ref="Sk:point"/>
+ <xs:element ref="Sk:polygon"/>
+ <xs:element ref="Sk:polyline"/>
+ <xs:element ref="Sk:rect"/>
+ <xs:element ref="Sk:remove"/>
+ <xs:element ref="Sk:replace"/>
+ <xs:element ref="Sk:roundRect"/>
+ <xs:element ref="Sk:save"/>
+ <xs:element ref="Sk:text"/>
+ <xs:element ref="Sk:textBox"/>
+ <xs:element ref="Sk:textOnPath"/>
+ <xs:element ref="Sk:textToPath"/>
+ </xs:choice>
+ <!-- @attribute drawOnce If set, the drawTo will only draw a single time. -->
+ <xs:attribute name="drawOnce" type="Sk:Boolean"/>
+ <!-- @attribute use The bitmap to draw into. -->
+ <xs:attribute name="use" type="Sk:bitmap"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** dump
+ Dump prints a list of the items in the display list and all items'
+ children to the debug console. Dump is only available in Debug
+ builds. */ -->
+ <xs:element name="dump">
+ <xs:complexType>
+ <!-- @attribute displayList Dumps the current display list if true. The display list is also
+ dumped if dump has no attributes. -->
+ <xs:attribute name="displayList" type="Sk:Boolean"/>
+ <!-- @attribute eventList Dumps the list of events, both enabled and disabled. -->
+ <xs:attribute name="eventList" type="Sk:Boolean"/>
+ <!-- @attribute events Outputs each event element as it is enabled. -->
+ <xs:attribute name="events" type="Sk:Boolean"/>
+ <!-- @attribute groups Outputs each group element as its condition is evaluated. -->
+ <xs:attribute name="groups" type="Sk:Boolean"/>
+ <!-- @attribute name Outputs the values associated with a single named element. -->
+ <xs:attribute name="name" type="Sk:String"/>
+ <!-- @attribute posts Outputs each post element as it is enabled. -->
+ <xs:attribute name="posts" type="Sk:Boolean"/>
+ <!-- @attribute script Evaluates the provided script -->
+ <xs:attribute name="script" type="Sk:String"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** emboss
+ PRELIMINARY [to be replaced with SkEmbossMaskFilter.h doxyfomation
+ at some point]
+ Emboss applies a mask filter to the paint that makes bias the object's color
+ towards white or black depending on the normals of the path contour, giving
+ the shape a 3D raised or depressed effect.
+ Embossing is replaced by subsequent mask filter elements, or
+ disabled a negative radius, or by an empty <mask filter> element.
+ */ -->
+ <xs:element name="emboss">
+ <xs:complexType>
+ <!-- @attribute ambient The amount of ambient light, from 0 to 1. -->
+ <xs:attribute name="ambient" type="Sk:Float"/>
+ <!-- @attribute direction The direction of the light source, as descibed by a 3D vector.
+ (The vector is normalized to a unit length of 1.0.) -->
+ <xs:attribute name="direction" type="Sk:FloatArray"/>
+ <!-- @attribute radius The extent of the filter effect in unit space. If the radius is less
+ than zero, the emboss has no effect. -->
+ <xs:attribute name="radius" type="Sk:Float"/>
+ <!-- @attribute specular The expotential intensity of the light, from 0 to 1.
+ Each increase of 0.0625 doubles the intensity. -->
+ <xs:attribute name="specular" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** event
+ Event contains a series of actions performed each time the event's criteria are satisfied.
+ These actions may modify the display list, may enable animations which in turn modify
+ elements' attributes, and may post other events.
+ */ -->
+ <xs:element name="event">
+ <xs:complexType>
+ <xs:choice maxOccurs="unbounded" >
+ <xs:element ref="Sk:add"/>
+ <xs:element ref="Sk:apply"/>
+ <xs:element ref="Sk:array"/>
+ <xs:element ref="Sk:bitmap"/>
+ <xs:element ref="Sk:boolean"/>
+ <xs:element ref="Sk:bounds"/>
+ <!-- <xs:element ref="Sk3D:camera"/> -->
+ <xs:element ref="Sk:clear"/>
+ <xs:element ref="Sk:clip"/>
+ <xs:element ref="Sk:color"/>
+ <xs:element ref="Sk:drawTo"/>
+ <xs:element ref="Sk:dump"/>
+ <xs:element ref="Sk:float"/>
+ <xs:element ref="Sk:full"/>
+ <xs:element ref="Sk:group"/>
+ <xs:element ref="Sk:hitClear"/>
+ <xs:element ref="Sk:hitTest"/>
+ <xs:element ref="Sk:image"/>
+ <xs:element ref="Sk:input"/>
+ <xs:element ref="Sk:int"/>
+ <xs:element ref="Sk:line"/>
+ <xs:element ref="Sk:matrix"/>
+ <xs:element ref="Sk:move"/>
+ <xs:element ref="Sk:movie"/>
+ <xs:element ref="Sk:oval"/>
+ <xs:element ref="Sk:paint"/>
+ <!-- <xs:element ref="Sk:patch"/> -->
+ <xs:element ref="Sk:path"/>
+ <xs:element ref="Sk:point"/>
+ <xs:element ref="Sk:polygon"/>
+ <xs:element ref="Sk:polyline"/>
+ <xs:element ref="Sk:post"/>
+ <xs:element ref="Sk:random"/>
+ <xs:element ref="Sk:rect"/>
+ <xs:element ref="Sk:remove"/>
+ <xs:element ref="Sk:replace"/>
+ <xs:element ref="Sk:roundRect"/>
+ <xs:element ref="Sk:save"/>
+ <xs:element ref="Sk:snapshot"/>
+ <xs:element ref="Sk:string"/>
+ <xs:element ref="Sk:text"/>
+ <xs:element ref="Sk:textBox"/>
+ <xs:element ref="Sk:textOnPath"/>
+ <xs:element ref="Sk:textToPath"/>
+ </xs:choice>
+ <!-- @attribute code The key code to match to a key press event, one of @pattern.
+ If the code is set to @pattern[0], the event is never activated. -->
+ <xs:attribute name="code" type="Sk:EventCode"/>
+ <!-- @attribute disable If true, the event cannot be activated. By default false.. -->
+ <xs:attribute name="disable" type="Sk:Boolean"/>
+ <!-- @attribute key The character code to match to a key down event.
+ When read, the key that activated this event. -->
+ <xs:attribute name="key" type="Sk:String"/>
+ <!-- @attribute keys A dash-separated continuous range of character codes to match
+ to a key down event. Read the key attribute to determine the key that activated this event. -->
+ <xs:attribute name="keys" type="Sk:String"/> <!-- single or range of keys -->
+ <!-- @attribute kind The event kind that activates this event, one of @pattern.
+ If kind equals keyChar, either attribute key or keys is expected.
+ If kind equals keyPress, attribute code is expected.
+ If kind equals onEnd, attribute target is expected.
+ If kind equals onLoad, the event is activated when the document containing the event
+ is loaded. The onLoad attribute cannot be activated through a post event.
+ If kind equals user, the event is activated when the posted event targets this event's ID. -->
+ <xs:attribute name="kind" type="Sk:EventKind"/>
+ <!-- @attribute target The element to listen to which activates this event. -->
+ <xs:attribute name="target" type="Sk:String" />
+ <!-- @attribute x For click events, the x-coordinate of the click. -->
+ <xs:attribute name="x" type="Sk:Float" />
+ <!-- @attribute y For click events, the y-coordinate of the click. -->
+ <xs:attribute name="y" type="Sk:Float" />
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** float
+ Float contains a signed fractional value. The float element cannot be added to a display list,
+ but can be set by animations and read by any attribute definition.
+ */ -->
+ <xs:element name="float">
+ <xs:complexType>
+ <!-- @attribute value The contained float. -->
+ <xs:attribute name="value" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** fromPath
+ FromPath concatenates the parent matrix with a new matrix
+ that maps a unit vector to a point on the given path.
+ A fromPath element may contain a path element, or may refer to a previously
+ defined path element with the path attribute.
+ */ -->
+ <xs:element name="fromPath">
+ <xs:complexType>
+ <xs:choice >
+ <!-- @element path The path to evaluate. -->
+ <xs:element ref="Sk:path" minOccurs="0" />
+ </xs:choice>
+ <!-- @attribute mode One of @pattern.
+ If mode is set to normal, the matrix maps the unit vector's angle and position.
+ If mode is set to angle, the matrix maps only the unit vector's angle.
+ If mode is set to position, the matrix maps only the unit vector's position. -->
+ <xs:attribute name="mode" type="Sk:FromPathMode"/>
+ <!-- @attribute offset The distance along the path to evaluate. -->
+ <xs:attribute name="offset" type="Sk:Float"/>
+ <!-- @attribute path The path to evaluate. -->
+ <xs:attribute name="path" type="Sk:Path"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** full
+ Full paints the entire canvas to the limit of the canvas' clip.
+ */ -->
+ <xs:element name="full">
+ <xs:complexType>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** group
+ The group element collects a series of elements into a group. The group can be referenced
+ or defined within elements, like apply, which operate on any kind of element. Groups
+ may contain groups. An element in a group draws identically to an element outside a group.
+ */ -->
+ <xs:element name="group">
+ <xs:complexType>
+ <xs:choice maxOccurs="unbounded">
+ <xs:element ref="Sk:add"/>
+ <xs:element ref="Sk:apply"/>
+ <xs:element ref="Sk:array"/>
+ <xs:element ref="Sk:bitmap"/>
+ <xs:element ref="Sk:boolean"/>
+ <xs:element ref="Sk:bounds"/>
+ <!-- <xs:element ref="Sk3D:camera"/> -->
+ <xs:element ref="Sk:clear"/>
+ <xs:element ref="Sk:clip"/>
+ <xs:element ref="Sk:drawTo"/>
+ <xs:element ref="Sk:float"/>
+ <xs:element ref="Sk:full"/>
+ <xs:element ref="Sk:group"/>
+ <xs:element ref="Sk:hitClear"/>
+ <xs:element ref="Sk:hitTest"/>
+ <xs:element ref="Sk:image"/>
+ <xs:element ref="Sk:int"/>
+ <xs:element ref="Sk:line"/>
+ <xs:element ref="Sk:matrix"/>
+ <xs:element ref="Sk:move"/>
+ <xs:element ref="Sk:oval"/>
+ <xs:element ref="Sk:paint"/>
+ <!-- <xs:element ref="Sk:patch"/> -->
+ <xs:element ref="Sk:path"/>
+ <xs:element ref="Sk:point"/>
+ <xs:element ref="Sk:polygon"/>
+ <xs:element ref="Sk:polyline"/>
+ <xs:element ref="Sk:post"/>
+ <xs:element ref="Sk:random"/>
+ <xs:element ref="Sk:rect"/>
+ <xs:element ref="Sk:remove"/>
+ <xs:element ref="Sk:replace"/>
+ <xs:element ref="Sk:roundRect"/>
+ <xs:element ref="Sk:save"/>
+ <xs:element ref="Sk:snapshot"/>
+ <xs:element ref="Sk:string"/>
+ <xs:element ref="Sk:text"/>
+ <xs:element ref="Sk:textBox"/>
+ <xs:element ref="Sk:textOnPath"/>
+ <xs:element ref="Sk:textToPath"/>
+ </xs:choice>
+ <!-- @attribute condition If present and zero, the contained elements are ignored
+ when drawn. -->
+ <xs:attribute name="condition" type="Sk:DynamicString"/>
+ <!-- @attribute enableCondition If present and zero, the contained elements are ignored
+ when enabled. -->
+ <xs:attribute name="enableCondition" type="Sk:DynamicString"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <xs:element name="hitClear" >
+ <xs:complexType>
+ <xs:choice maxOccurs="1">
+ <xs:element ref="Sk:array"/>
+ </xs:choice>
+ <!-- @attribute targets An array of element IDs to clear their hit-tested state. -->
+ <xs:attribute name="targets" type="Sk:DisplayableArray"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <xs:element name="hitTest" >
+ <xs:complexType>
+ <xs:choice maxOccurs="2">
+ <xs:element ref="Sk:array"/>
+ </xs:choice>
+ <!-- @attribute bullets An array of element IDs to test for intersection with targets. -->
+ <xs:attribute name="bullets" type="Sk:DisplayableArray"/>
+ <!-- @attribute hits The targets the bullets hit. A read-only array of indices, one index
+ per bullet. The value of the array element is the index of the target hit, or -1 if no
+ target was hit. -->
+ <xs:attribute name="hits" type="Sk:IntArray"/>
+ <!-- @attribute targets An array of element IDs to test for intersection with bullets. -->
+ <xs:attribute name="targets" type="Sk:DisplayableArray"/>
+ <!-- @attribute value Read only; set to true if some bullet hit some target. -->
+ <xs:attribute name="value" type="Sk:Boolean"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** image
+ Image creates a reference to a JPEG, PNG or GIF. The image may be referenced
+ through the local file system, the internet, or embedded in the document in Base64
+ format. The specific image type is determined by examining the byte stream.
+ */ -->
+ <xs:element name="image">
+ <xs:complexType>
+ <!-- @attribute base64 The image in Base64 notation. See http://rfc.net/rfc2045.html
+ for the base64 format. -->
+ <xs:attribute name="base64" type="Sk:Base64"/>
+ <!-- @attribute height The height of the image (read-only). -->
+ <xs:attribute name="height" type="Sk:Int"/>
+ <!-- @attribute src The URI reference, local to the contaiing document. -->
+ <xs:attribute name="src" type="Sk:String"/>
+ <!-- @attribute width The width of the image (read-only). -->
+ <xs:attribute name="width" type="Sk:Int"/>
+ <!-- @attribute x The position of the left edge of the image in local coordinates. -->
+ <xs:attribute name="x" type="Sk:Float"/>
+ <!-- @attribute y The position of the top edge of the image in local coordinates. -->
+ <xs:attribute name="y" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** include
+ Include adds the referenced XML to the containing document. Unlike movie, the XML
+ directives can reference the document's IDs and can define new IDs that are referenced
+ by the remainder of the document or subsequent includes.
+ */ -->
+ <xs:element name="include">
+ <xs:complexType>
+ <!-- @attribute src The URI reference, local to the containing document,
+ containing the include's XML. -->
+ <xs:attribute name="src" type="Sk:String"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** input
+ Input captures the metadata passed from an event. When the metadata's name or id
+ matches the metadata's name, the metadata's payload is copied to the corresponding
+ input attribute.
+ */ -->
+ <xs:element name="input">
+ <xs:complexType>
+ <!-- @attribute float The floating point payload carried by the metadata. -->
+ <xs:attribute name="float" type="Sk:Float"/>
+ <!-- @attribute initialized A read-only value set to true if the input received a value
+ from the event. -->
+ <xs:attribute name="initialized" type="Sk:Boolean"/>
+ <!-- @attribute int The signed integer payload carried by the metadata. -->
+ <xs:attribute name="int" type="Sk:Int"/>
+ <!-- @attribute name The name of the metadata containing the payload. Note that
+ the name or id may match the payload, but that XML requires the id to be
+ uniquely defined in the document, while multiple input elements may reuse
+ the name. -->
+ <xs:attribute name="name" type="Sk:String"/>
+ <!-- @attribute string The text payload carried by the metadata. -->
+ <xs:attribute name="string" type="Sk:String"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** int
+ Int contains an integer. The int element cannot be added to a display list, but can
+ by set by animations and read by any attribute definition. An int element may be used,
+ for instance, to index through an array element.
+ */ -->
+ <xs:element name="int">
+ <xs:complexType>
+ <!-- @attribute value The contained integer. -->
+ <xs:attribute name="value" type="Sk:Int"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** line
+ Line describes a line between two points. As noted below, the paint's stroke and
+ strokeAndFill attributes are ignored.
+ */ -->
+ <xs:element name="line">
+ <xs:complexType>
+ <!-- @attribute x1 The start point's x value. -->
+ <xs:attribute name="x1" type="Sk:Float"/>
+ <!-- @attribute x2 The stop point's x value. -->
+ <xs:attribute name="x2" type="Sk:Float"/>
+ <!-- @attribute y1 The start point's y value. -->
+ <xs:attribute name="y1" type="Sk:Float"/>
+ <!-- @attribute y2 The stop point's y value. -->
+ <xs:attribute name="y2" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** lineTo
+ LineTo adds a line from the last point in a path to the specified point.
+ */ -->
+ <xs:element name="lineTo">
+ <xs:complexType>
+ <!-- @attribute x The final path x coordinate. -->
+ <xs:attribute name="x" type="Sk:Float"/>
+ <!-- @attribute y The final path y coordinate. -->
+ <xs:attribute name="y" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** linearGradient
+ LinearGradient sets the paint shader to ramp between two or more colors.
+ */ -->
+ <xs:element name="linearGradient">
+ <xs:complexType>
+ <xs:choice maxOccurs="unbounded">
+ <xs:element ref="Sk:color"/>
+ <xs:element ref="Sk:matrix"/>
+ </xs:choice>
+ <!-- @attribute matrix Matrix applies a 3x3 transform to the gradient. -->
+ <xs:attribute name="matrix" type="Sk:Matrix"/>
+ <!-- @attribute tileMode One of @pattern. @patternDescription -->
+ <xs:attribute name="tileMode" type="Sk:TileMode"/>
+ <!-- @attribute offsets An optional array of values used to bias the colors. The first entry
+ in the array must be 0.0, the last must be 1.0, and intermediate values must ascend. -->
+ <xs:attribute name="offsets" type="Sk:FloatArray"/>
+ <!-- @attribute points Two points describing the start and end of the gradient. -->
+ <xs:attribute name="points" type="Sk:Point"/> <!-- not right; should be array of 2 points -->
+ <!-- @attribute unitMapper A script that returns the mapping for [0,1] for the gradient.
+ The script can use the predefined variable 'unit' to compute the mapping. For instance,
+ "unit*unit" squares the value (while still keeping it in the range of [0,1].) The computed number
+ is pinned to from 0 to 1 after the script is executed. -->
+ <xs:attribute name="unitMapper" type="Sk:String"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** maskFilter
+ MaskFilter disables any mask filter referenced by the paint.
+ */ -->
+ <xs:element name="maskFilter">
+ <xs:complexType>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** matrix
+ Matrix transforms all points drawn to the canvas. The matrix may translate, scale, skew, rotate,
+ or apply perspective, or apply any combination.
+ */ -->
+ <xs:element name="matrix">
+ <xs:complexType>
+ <xs:choice maxOccurs="unbounded">
+ <!-- @element fromPath FromPath maps a unit vector to a position and direction on a path. -->
+ <xs:element ref="Sk:fromPath"/>
+ <!-- @element polyToPoly PolyToPoly maps a points between two polygons. -->
+ <xs:element ref="Sk:polyToPoly"/>
+ <!-- @element rectToRect RectToRect maps a points between two rectangles. -->
+ <xs:element ref="Sk:rectToRect"/>
+ <!-- @element rotate Rotate computes the matrix rotation in degrees. -->
+ <xs:element ref="Sk:rotate"/>
+ <!-- @element scale Scale stretches or shrinks horizontally, vertically, or both. -->
+ <xs:element ref="Sk:scale"/>
+ <!-- @element skew Skew slants horizontally, vertically, or both. -->
+ <xs:element ref="Sk:skew"/>
+ <!-- @element translate Translate moves horizontally, vertically, or both. -->
+ <xs:element ref="Sk:translate"/>
+ </xs:choice>
+ <!-- @attribute matrix Nine floats describing a 3x3 matrix. -->
+ <xs:attribute name="matrix" type="Sk:FloatArray"/>
+ <!-- @attribute perspectX The [0][2] element of the 3x3 matrix. -->
+ <xs:attribute name="perspectX" type="Sk:Float"/>
+ <!-- @attribute perspectY The [1][2] element of the 3x3 matrix. -->
+ <xs:attribute name="perspectY" type="Sk:Float"/>
+ <!-- @attribute rotate The angle to rotate in degrees. -->
+ <xs:attribute name="rotate" type="Sk:Float"/>
+ <!-- @attribute scale The scale to apply in both X and Y.. -->
+ <xs:attribute name="scale" type="Sk:Float"/>
+ <!-- @attribute scaleX The [0][0] element of the 3x3 matrix. -->
+ <xs:attribute name="scaleX" type="Sk:Float"/>
+ <!-- @attribute scaleY The [1][1] element of the 3x3 matrix. -->
+ <xs:attribute name="scaleY" type="Sk:Float"/>
+ <!-- @attribute skewX The [0][1] element of the 3x3 matrix. -->
+ <xs:attribute name="skewX" type="Sk:Float"/>
+ <!-- @attribute skewY The [1][0] element of the 3x3 matrix. -->
+ <xs:attribute name="skewY" type="Sk:Float"/>
+ <!-- @attribute translate A point specifying the translation in X and Y. -->
+ <xs:attribute name="translate" type="Sk:Point"/>
+ <!-- @attribute translateX The [2][0] element of the 3x3 matrix. -->
+ <xs:attribute name="translateX" type="Sk:Float"/>
+ <!-- @attribute translateY The [2][1] element of the 3x3 matrix. -->
+ <xs:attribute name="translateY" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** move
+ Move an element in the display list in front or behind other elements.
+ If where and offset are omitted, the element is moved to the end of the display list.
+ If where is specified, the element is moved before the first occurance of where in the display list.
+ If offset and where are specified, the element is moved before where plus offset.
+ A positive offset without where moves the element to the start of the list plus offset.
+ A negative offset without where moves the element to the end of the list minus offset.
+ */ -->
+ <xs:element name="move">
+ <xs:complexType>
+ <!-- @attribute mode Has no effect. -->
+ <xs:attribute name="mode" type="Sk:AddMode"/>
+ <!-- @attribute offset The destination position using the rules listed above. -->
+ <xs:attribute name="offset" type="Sk:Int"/>
+ <!-- @attribute use The element to move. -->
+ <xs:attribute name="use" type="Sk:Drawable"/>
+ <!-- @attribute where The ID of the first display list entry to move to. -->
+ <xs:attribute name="where" type="Sk:Drawable"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** moveTo
+ MoveTo specifies the first point in a path contour.
+ */ -->
+ <xs:element name="moveTo">
+ <xs:complexType>
+ <!-- @attribute x The point's x coordinate. -->
+ <xs:attribute name="x" type="Sk:Float"/>
+ <!-- @attribute y The point's y coordinate. -->
+ <xs:attribute name="y" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** movie
+ Movie describes a display list within the current canvas and paint. Movies can contain
+ movies. One movie cannot affect how another movie draws, but movies can communicate
+ with each other by posting events.
+ */ -->
+ <xs:element name="movie">
+ <xs:complexType>
+ <!-- @attribute src The URI reference, local to the containing document, containing the movie's XML. -->
+ <xs:attribute name="src" type="Sk:String"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** oval
+ Oval describes a circle stretched to fit in a rectangle.
+ The width and height attribute compute the oval's right and bottom edges when the oval
+ description is first seen. Animating the oval's left or top will not recompute the right or bottom
+ if the width or height have been specified.
+ */ -->
+ <xs:element name="oval">
+ <xs:complexType>
+ <!-- @attribute bottom The bottom edge of the oval. -->
+ <xs:attribute name="bottom" type="Sk:Float"/>
+ <!-- @attribute height The height of the oval. -->
+ <xs:attribute name="height" type="Sk:Float"/>
+ <!-- @attribute left The left edge of the oval. -->
+ <xs:attribute name="left" type="Sk:Float"/>
+ <!-- @attribute needsRedraw Set to true if last draw was visible. -->
+ <xs:attribute name="needsRedraw" type="Sk:Boolean"/>
+ <!-- @attribute right The right edge of the oval. -->
+ <xs:attribute name="right" type="Sk:Float"/>
+ <!-- @attribute top The top edge of the oval. -->
+ <xs:attribute name="top" type="Sk:Float"/>
+ <!-- @attribute width The width of the oval. -->
+ <xs:attribute name="width" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** paint
+ Paint uses color, flags, path effects, mask filters, shaders, and stroke effects when drawing
+ geometries, images, and text.
+ */ -->
+ <xs:element name="paint">
+ <xs:complexType>
+ <xs:choice maxOccurs="unbounded">
+ <!-- @element bitmapShader Sets or cancels an image to draw as the color. -->
+ <xs:element ref="Sk:bitmapShader"/>
+ <!-- @element blur Blur radially draws the shape with varying transparency. -->
+ <xs:element ref="Sk:blur"/>
+ <!-- @element color Color specifies a solid color in RGB or HSV. -->
+ <xs:element ref="Sk:color"/>
+ <!-- @element dash Dashes alternates stroking with dashes and gaps. -->
+ <xs:element ref="Sk:dash"/>
+ <!-- @element discrete Discrete wobbles the geometry randomly. -->
+ <xs:element ref="Sk:discrete"/>
+ <!-- @element emboss Emboss simulates a 3D light to show highlights and relief. -->
+ <xs:element ref="Sk:emboss"/>
+ <!-- @element linearGradient LinearGradient linearly ramps between two or more colors. -->
+ <xs:element ref="Sk:linearGradient"/>
+ <!-- @element maskFilter MaskFilter cancels a blur or emboss. -->
+ <xs:element ref="Sk:maskFilter"/>
+ <!-- @element pathEffect PathEffect cancels a discrete or dash. -->
+ <xs:element ref="Sk:pathEffect"/>
+ <!-- @element radialGradient RadialGradient radially ramps between two or more colors. -->
+ <xs:element ref="Sk:radialGradient"/>
+ <!-- @element shader Shader cancels a linear or radial gradient. -->
+ <xs:element ref="Sk:shader"/>
+ <!-- @element typeface Typeface chooses a font out of a font family. -->
+ <xs:element ref="Sk:typeface"/>
+ <!-- @element transparentShader TransparentShader ? [not sure what this is for] -->
+ <xs:element ref="Sk:transparentShader"/>
+ </xs:choice>
+ <!-- @attribute antiAlias AntiAlias uses gray shades to increase the definition of paths. -->
+ <xs:attribute name="antiAlias" type="Sk:Boolean"/>
+ <!-- @attribute ascent Ascent returns the height above the baseline defined by the font. -->
+ <xs:attribute name="ascent" type="Sk:Float"/>
+ <!-- @attribute color Color sets the paint to the color element with this ID. -->
+ <xs:attribute name="color" type="Sk:Color"/>
+ <!-- @attribute descent Descent returns the height below the baseline defined by thte font -->
+ <xs:attribute name="descent" type="Sk:Float"/>
+ <!-- @attribute fakeBold FakeBold enables a faked bold for text. -->
+ <xs:attribute name="fakeBold" type="Sk:Boolean"/>
+ <!-- @attribute filterType FilterType -->
+ <xs:attribute name="filterType" type="Sk:FilterType"/>
+ <!-- @attribute linearText LinearText uses the ideal path metrics at all sizes to describe text. -->
+ <xs:attribute name="linearText" type="Sk:Boolean"/>
+ <!-- @attribute maskFilter MaskFilter specifies a blur or emboss with this ID. -->
+ <xs:attribute name="maskFilter" type="Sk:MaskFilter"/>
+ <!-- @attribute measureText MeasureText(String) returns the width of the string in this paint. -->
+ <xs:attribute name="measureText" type="Sk:Float"/>
+ <!-- @attribute pathEffect PathEffect specifies a discrete or dash with this ID. -->
+ <xs:attribute name="pathEffect" type="Sk:PathEffect"/>
+ <!-- @attribute shader Shader specifies a gradient with this ID. -->
+ <xs:attribute name="shader" type="Sk:Shader"/>
+ <!-- @attribute strikeThru StrikeThru adds a line through the middle of drawn text. -->
+ <xs:attribute name="strikeThru" type="Sk:Boolean"/>
+ <!-- @attribute stroke Stroke draws the outline of geometry according to the pen attributes.
+ If style is also present, its setting overrides stroke. -->
+ <xs:attribute name="stroke" type="Sk:Boolean"/>
+ <!-- @attribute strokeCap StrokeCap is one of @pattern. -->
+ <xs:attribute name="strokeCap" type="Sk:Cap"/>
+ <!-- @attribute strokeJoin StrokeJoin is one of @pattern. -->
+ <xs:attribute name="strokeJoin" type="Sk:Join"/>
+ <!-- @attribute strokeMiter StrokeMiter limits the pen's joins on narrow angles. -->
+ <xs:attribute name="strokeMiter" type="Sk:Float"/>
+ <!-- @attribute strokeWidth StrokeWidth specifies the width of the pen. -->
+ <xs:attribute name="strokeWidth" type="Sk:Float"/>
+ <!-- @attribute style Style fills, strokes, or strokes and fills the geometry with the paint's color. -->
+ <xs:attribute name="style" type="Sk:Style"/>
+ <!-- @attribute textAlign TextAlign is one of @pattern. -->
+ <xs:attribute name="textAlign" type="Sk:Align"/>
+ <!-- @attribute textScaleX TextScaleX condenses or exapnds the text. -->
+ <xs:attribute name="textScaleX" type="Sk:Float"/>
+ <!-- @attribute textSize TextSize specifies the point size of the text. -->
+ <xs:attribute name="textSize" type="Sk:Float"/>
+ <!-- @attribute textSkewX TextSkewX draws the text obliquely. -->
+ <xs:attribute name="textSkewX" type="Sk:Float"/>
+ <!-- @attribute textTracking TextTracking specifies the space between letters. -->
+ <xs:attribute name="textTracking" type="Sk:Float"/>
+ <!-- @attribute typeface Typeface specifies a typeface element with this ID. -->
+ <xs:attribute name="typeface" type="Sk:Typeface"/>
+ <!-- @attribute underline Underline draws a line under the baseline of the text. -->
+ <xs:attribute name="underline" type="Sk:Boolean"/>
+ <!-- @attribute xfermode Xfermode specifies a transfer mode, one of @pattern. -->
+ <xs:attribute name="xfermode" type="Sk:Xfermode"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** path
+ Path creates a geometry out of lines and curves.
+ */ -->
+ <xs:element name="path">
+ <xs:complexType>
+ <xs:choice maxOccurs="unbounded">
+ <!-- @element addCircle Adds a circle to the path. -->
+ <xs:element ref="Sk:addCircle"/>
+ <!-- @element addOval Adds an oval to the path. -->
+ <xs:element ref="Sk:addOval"/>
+ <!-- @element addPath Adds another path to the path. -->
+ <xs:element ref="Sk:addPath"/>
+ <!-- @element addRoundRect Adds a rounded-corner rectangle to the path. -->
+ <xs:element ref="Sk:addRoundRect"/>
+ <!-- @element close Connects the last point on the path to the first. -->
+ <xs:element ref="Sk:close"/>
+ <!-- @element cubicTo Extends the path with a cubic curve. -->
+ <xs:element ref="Sk:cubicTo"/>
+ <!-- @element lineTo Extends the path with a line. -->
+ <xs:element ref="Sk:lineTo"/>
+ <!-- @element moveTo Starts a new path contour. -->
+ <xs:element ref="Sk:moveTo"/>
+ <!-- @element quadTo Extends the path with a quadratic curve. -->
+ <xs:element ref="Sk:quadTo"/>
+ <!-- @element rCubicTo Extends the path with a cubic curve expressed with relative offsets. -->
+ <xs:element ref="Sk:rCubicTo"/>
+ <!-- @element rLineTo Extends the path with a line expressed with relative offsets. -->
+ <xs:element ref="Sk:rLineTo"/>
+ <!-- @element rMoveTo Starts a new path contour relative to the path's last point. -->
+ <xs:element ref="Sk:rMoveTo"/>
+ <!-- @element rQuadTo Extends the path with a quadratic curve expressed with relative offsets. -->
+ <xs:element ref="Sk:rQuadTo"/>
+ </xs:choice>
+ <!-- @attribute d Creates a path using SVG path notation. -->
+ <xs:attribute name="d" type="Sk:String"/>
+ <!-- @attribute fillType One of @pattern. -->
+ <xs:attribute name="fillType" type="Sk:FillType"/>
+ <!-- @attribute length Returns the length of the path. -->
+ <xs:attribute name="length" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** pathEffect
+ PathEffect cancels any current path effect within the paint, such as dashing or discrete.
+ */ -->
+ <xs:element name="pathEffect">
+ <xs:complexType>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** point
+ Point describes a two dimensional point in space. The point element can be added
+ to the display list and drawn.
+ */ -->
+ <xs:element name="point">
+ <xs:complexType>
+ <!-- @attribute x The x coordinate of the point. -->
+ <xs:attribute name="x" type="Sk:Float"/>
+ <!-- @attribute y The y coordinate of the point. -->
+ <xs:attribute name="y" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** polygon
+ Polygon creates a geometry out of lines. Polygon is a specialization of path; element that
+ refers to a path can refer to a polygon also. A polygon specified through elements behaves identically
+ to a path. A polygon specified by the points attribute contains a single contour, and the contour is
+ automatically closed.
+ */ -->
+ <xs:element name="polygon">
+ <xs:complexType>
+ <xs:choice maxOccurs="unbounded">
+ <!-- @element close Connects the last point on the path to the first. -->
+ <xs:element ref="Sk:close"/>
+ <!-- @element addPath Adds another path to the path. -->
+ <xs:element ref="Sk:addPath"/>
+ <!-- @element lineTo Extends the path with a line. -->
+ <xs:element ref="Sk:lineTo"/>
+ <!-- @element moveTo Starts a new path contour. -->
+ <xs:element ref="Sk:moveTo"/>
+ <!-- @element rLineTo Extends the path with a line expressed with relative offsets. -->
+ <xs:element ref="Sk:rLineTo"/>
+ <!-- @element rMoveTo Starts a new path contour relative to the path's last point. -->
+ <xs:element ref="Sk:rMoveTo"/>
+ </xs:choice>
+ <!-- @attribute points An array of values that describe a sequence of points, compatible with SVG. -->
+ <xs:attribute name="points" type="Sk:FloatArray"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** polyline
+ Polyline creates a geometry out of lines. Polygon is a specialization of path; element that
+ refers to a path can refer to a polygon also. A polygon specified through elements behaves identically
+ to a path. A polygon specified by the points attribute contains a single contour, and the contour is
+ not automatically closed.
+ */ -->
+ <xs:element name="polyline">
+ <xs:complexType>
+ <xs:choice maxOccurs="unbounded">
+ <!-- @element close Connects the last point on the path to the first. -->
+ <xs:element ref="Sk:close"/>
+ <!-- @element addPath Adds another path to the path. -->
+ <xs:element ref="Sk:addPath"/>
+ <!-- @element lineTo Extends the path with a line. -->
+ <xs:element ref="Sk:lineTo"/>
+ <!-- @element moveTo Starts a new path contour. -->
+ <xs:element ref="Sk:moveTo"/>
+ <!-- @element rLineTo Extends the path with a line expressed with relative offsets. -->
+ <xs:element ref="Sk:rLineTo"/>
+ <!-- @element rMoveTo Starts a new path contour relative to the path's last point. -->
+ <xs:element ref="Sk:rMoveTo"/>
+ </xs:choice>
+ <!-- @attribute points An array of values that describe a sequence of points, compatible with SVG. -->
+ <xs:attribute name="points" type="Sk:FloatArray"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** polyToPoly
+ PolyToPoly creates a matrix which maps points proportionally from one polygon to the other.
+ */ -->
+ <xs:element name="polyToPoly">
+ <xs:complexType>
+ <xs:choice maxOccurs="2">
+ <xs:element ref="Sk:polygon"/>
+ </xs:choice>
+ <!-- @attribute source The polygon to map from.. -->
+ <xs:attribute name="source" type="Sk:polygon"/>
+ <!-- @attribute destination The polygon to map to.. -->
+ <xs:attribute name="destination" type="Sk:polygon"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** post
+ Post activates an event. The event can trigger one or more actions, and can carry a data payload.
+ */ -->
+ <xs:element name="post">
+ <xs:complexType>
+ <xs:choice maxOccurs="unbounded">
+ <xs:element ref="Sk:data"/>
+ </xs:choice>
+ <!-- @attribute delay Time in seconds that must elapse before the target event is activated. -->
+ <xs:attribute name="delay" type="Sk:MSec"/>
+ <!-- @attribute mode One of @pattern. @patternDescription -->
+ <xs:attribute name="mode" type="Sk:EventMode"/>
+ <!-- @attribute sink The optional named EventSink to direct the event to. -->
+ <xs:attribute name="sink" type="Sk:String"/>
+ <!-- @attribute target The ID of the user event to trigger. -->
+ <xs:attribute name="target" type="Sk:String"/>
+ <!-- @attribute type The name of the external event to post. -->
+ <xs:attribute name="type" type="Sk:String"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** quadTo
+ QuadTo adds a quadratic curve to a path.
+ */ -->
+ <xs:element name="quadTo">
+ <xs:complexType>
+ <!-- @attribute x1 The x position of the off-curve point. -->
+ <xs:attribute name="x1" type="Sk:Float"/>
+ <!-- @attribute x2 The x position of the final point. -->
+ <xs:attribute name="x2" type="Sk:Float"/>
+ <!-- @attribute y1 The y position of the off-curve point. -->
+ <xs:attribute name="y1" type="Sk:Float"/>
+ <!-- @attribute y2 The y position of the final point. -->
+ <xs:attribute name="y2" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** rCubicTo
+ RCubicTo adds a cubic to the path, using the last point in the path as the first point of the cubic. THe
+ added points are offsets from the last point in the path.
+ */ -->
+ <xs:element name="rCubicTo">
+ <xs:complexType>
+ <!-- @attribute x1 The x offset of the first off-curve point. -->
+ <xs:attribute name="x1" type="Sk:Float"/>
+ <!-- @attribute x2 The x offset of the second off-curve point. -->
+ <xs:attribute name="x2" type="Sk:Float"/>
+ <!-- @attribute x3 The x offset of the final on-curve point. -->
+ <xs:attribute name="x3" type="Sk:Float"/>
+ <!-- @attribute y1 The y offset of the first off-curve point. -->
+ <xs:attribute name="y1" type="Sk:Float"/>
+ <!-- @attribute y2 The y offset of the second off-curve point. -->
+ <xs:attribute name="y2" type="Sk:Float"/>
+ <!-- @attribute y3 The y offset of the final on-curve point. -->
+ <xs:attribute name="y3" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** rLineTo
+ RLineTo adds a line from the last point in a path to the specified point. The specified
+ point is relative to the last point in the path.
+ */ -->
+ <xs:element name="rLineTo">
+ <xs:complexType>
+ <!-- @attribute x The final path x coordinate. -->
+ <xs:attribute name="x" type="Sk:Float"/>
+ <!-- @attribute y The final path y coordinate. -->
+ <xs:attribute name="y" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** rMoveTo
+ RMoveTo specifies the first point in a path contour. The specified
+ point is relative to the last point in the path.
+ */ -->
+ <xs:element name="rMoveTo">
+ <xs:complexType>
+ <!-- @attribute x The point's x coordinate. -->
+ <xs:attribute name="x" type="Sk:Float"/>
+ <!-- @attribute y The point's y coordinate. -->
+ <xs:attribute name="y" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** rQuadTo
+ RQuadTo adds a quadratic curve to a path. The quadratic
+ points are relative to the last point in the path.
+ */ -->
+ <xs:element name="rQuadTo">
+ <xs:complexType>
+ <!-- @attribute x1 The x position of the off-curve point. -->
+ <xs:attribute name="x1" type="Sk:Float"/>
+ <!-- @attribute x2 The x position of the final point. -->
+ <xs:attribute name="x2" type="Sk:Float"/>
+ <!-- @attribute y1 The y position of the off-curve point. -->
+ <xs:attribute name="y1" type="Sk:Float"/>
+ <!-- @attribute y2 The y position of the final point. -->
+ <xs:attribute name="y2" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** radialGradient
+ RadialGradient sets the paint shader to ramp between two or more colors in concentric circles.
+ */ -->
+ <xs:element name="radialGradient">
+ <xs:complexType>
+ <xs:choice maxOccurs="unbounded">
+ <xs:element ref="Sk:color"/>
+ <xs:element ref="Sk:matrix"/>
+ </xs:choice>
+ <!-- @attribute matrix Matrix applies a 3x3 transform to the gradient. -->
+ <xs:attribute name="matrix" type="Sk:Matrix"/>
+ <!-- @attribute tileMode One of @pattern. @patternDescription -->
+ <xs:attribute name="tileMode" type="Sk:TileMode"/>
+ <!-- @attribute center The center point of the radial gradient. -->
+ <xs:attribute name="center" type="Sk:Point"/>
+ <!-- @attribute offsets An optional array of values used to bias the colors. The first entry
+ in the array must be 0.0, the last must be 1.0, and intermediate values must ascend. -->
+ <xs:attribute name="offsets" type="Sk:FloatArray"/>
+ <!-- @attribute radius The distance from the first color to the last color. -->
+ <xs:attribute name="radius" type="Sk:Float"/>
+ <!-- @attribute unitMapper A script that returns the mapping for [0,1] for the gradient.
+ The script can use the predefined variable 'unit' to compute the mapping. For instance,
+ "unit*unit" squares the value (while still keeping it in the range of [0,1].) The computed number
+ is pinned to from 0 to 1 after the script is executed. -->
+ <xs:attribute name="unitMapper" type="Sk:String"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** random
+ Random generates a random number, from min to max. Each time the random attribute is
+ read, a new random number is generated.
+ */ -->
+ <xs:element name="random">
+ <xs:complexType>
+ <!-- @attribute blend The random bias from 0.0 to 1.0.
+ 0.0 biias the number towards the start and end of the range.
+ 1.0 (the default) generates a linear distribution.-->
+ <xs:attribute name="blend" type="Sk:Float"/>
+ <!-- @attribute max The largest value to generate. -->
+ <xs:attribute name="max" type="Sk:Float"/>
+ <!-- @attribute min The smallest value to generate. -->
+ <xs:attribute name="min" type="Sk:Float"/>
+ <!-- @attribute random The generated value. -->
+ <xs:attribute name="random" type="Sk:Float"/>
+ <!-- @attribute seed The random seed. Identical seeds generate the same series of
+ numbers. -->
+ <xs:attribute name="seed" type="Sk:Int"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** rect
+ Rect describes a bounding box.
+ The width and height attribute compute the rectangle's right and bottom edges when the rectangle
+ description is first seen. Animating the rectangle's left or top will not recompute the right or bottom
+ if the width or height have been specified.
+ */ -->
+ <xs:element name="rect">
+ <xs:complexType>
+ <!-- @attribute bottom The bottom edge of the rectangle. -->
+ <xs:attribute name="bottom" type="Sk:Float"/>
+ <!-- @attribute height The height of the rectangle. Setting height computes the
+ bottom attribute from the top attribute. -->
+ <xs:attribute name="height" type="Sk:Float"/>
+ <!-- @attribute left The left edge of the rectangle. -->
+ <xs:attribute name="left" type="Sk:Float"/>
+ <!-- @attribute needsRedraw Set to true if last draw was visible. -->
+ <xs:attribute name="needsRedraw" type="Sk:Boolean"/>
+ <!-- @attribute right The right edge of the rectangle. -->
+ <xs:attribute name="right" type="Sk:Float"/>
+ <!-- @attribute top The top edge of the rectangle. -->
+ <xs:attribute name="top" type="Sk:Float"/>
+ <!-- @attribute width The width of the rectangle. -->
+ <xs:attribute name="width" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** rectToRect
+ RectToRect adds a matrix to map one rectangle's coordinates to another.
+ */ -->
+ <xs:element name="rectToRect">
+ <xs:complexType>
+ <xs:choice maxOccurs="2">
+ <xs:element ref="Sk:rect"/>
+ </xs:choice>
+ <!-- @attribute source The rectangle to map from. -->
+ <xs:attribute name="source" type="Sk:rect"/>
+ <!-- @attribute destination The rectangle to map to. -->
+ <xs:attribute name="destination" type="Sk:rect"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** remove
+ Remove an item from the display list.
+ If where is specified, the first occurance of where in the display list is removed.
+ If offset and where are specified, the element at where plus offset is removed.
+ A positive offset without where removes the element at the start of the list plus offset.
+ A negative offset without where removes the element at the end of the list minus offset.
+ */ -->
+ <xs:element name="remove">
+ <xs:complexType>
+ <!-- @attribute delete If true, reverse the action of apply's attribute mode="create".
+ (Experimental.) -->
+ <xs:attribute name="delete" type="Sk:Boolean"/>
+ <!-- @attribute offset The destination position using the rules listed above. -->
+ <xs:attribute name="offset" type="Sk:Int"/>
+ <!-- @attribute where The ID of the first display list entry to remove. -->
+ <xs:attribute name="where" type="Sk:Drawable"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** replace
+ Replace an item in the display list.
+ If where is specified, the first occurance of where in the display list is replaced by use.
+ If offset and where are specified, the element at where plus offset is replaced by use.
+ A positive offset without where replaces the element at the start of the list plus offset.
+ A negative offset without where replaces the element at the end of the list minus offset.
+ */ -->
+ <xs:element name="replace">
+ <xs:complexType>
+ <!-- @attribute mode Has no effect. -->
+ <xs:attribute name="mode" type="Sk:AddMode"/>
+ <!-- @attribute offset The destination position using the rules listed above. -->
+ <xs:attribute name="offset" type="Sk:Int"/>
+ <!-- @attribute use The element to be added to the display list.. -->
+ <xs:attribute name="use" type="Sk:Drawable"/>
+ <!-- @attribute where The ID of the first display list entry to remove. -->
+ <xs:attribute name="where" type="Sk:Drawable"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** rotate
+ Rotate creates a matrix that rotates a unit vector about a center point, and concatenated
+ with the containing matrix.
+ */ -->
+ <xs:element name="rotate">
+ <xs:complexType>
+ <!-- @attribute center A point the rotation is centered about; by default, [0.0, 0.0]. -->
+ <xs:attribute name="center" type="Sk:Point"/>
+ <!-- @attribute degrees The rotation in degrees. -->
+ <xs:attribute name="degrees" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** roundRect
+ RoundRect creates a rectangle with rounded corners. The rounded corners are specified by
+ two axes, which describe an quarter-section of the oval which is used in each corner.
+ The width and height attribute compute the rectangle's right and bottom edges when the rectangle
+ description is first seen. Animating the rectangle's left or top will not recompute the right or bottom
+ if the width or height have been specified.
+ */ -->
+ <xs:element name="roundRect">
+ <xs:complexType>
+ <!-- @attribute bottom The bottom edge of the rectangle. -->
+ <xs:attribute name="bottom" type="Sk:Float"/>
+ <!-- @attribute height The height of the rectangle. Setting height computes the
+ bottom attribute from the top attribute. -->
+ <xs:attribute name="height" type="Sk:Float"/>
+ <!-- @attribute left The left edge of the rectangle. -->
+ <xs:attribute name="left" type="Sk:Float"/>
+ <!-- @attribute needsRedraw Set to true if last draw was visible. -->
+ <xs:attribute name="needsRedraw" type="Sk:Boolean"/>
+ <!-- @attribute right The right edge of the rectangle. -->
+ <xs:attribute name="right" type="Sk:Float"/>
+ <!-- @attribute top The top edge of the rectangle. -->
+ <xs:attribute name="top" type="Sk:Float"/>
+ <!-- @attribute rx The radius of the corners on the x axis. -->
+ <xs:attribute name="rx" type="Sk:Float"/>
+ <!-- @attribute ry The radius of the corners on the y axis. -->
+ <xs:attribute name="ry" type="Sk:Float"/>
+ <!-- @attribute width The width of the rectangle. Setting width computes the
+ right attribute from the left attribute. -->
+ <xs:attribute name="width" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** save
+ The save element collects a series of elements into a group. The state of the paint and
+ canvas are saved, so that edits to the paint and canvas within the group are restored
+ to their original value at the end of the group.
+ The save element can be referenced
+ or defined within elements, like apply, which operate on any kind of element. Groups
+ may contain groups.
+ */ -->
+ <xs:element name="save">
+ <xs:complexType>
+ <xs:choice maxOccurs="unbounded">
+ <xs:element ref="Sk:add"/>
+ <xs:element ref="Sk:apply"/>
+ <xs:element ref="Sk:array"/>
+ <xs:element ref="Sk:bitmap"/>
+ <xs:element ref="Sk:boolean"/>
+ <xs:element ref="Sk:bounds"/>
+ <!-- <xs:element ref="Sk3D:camera"/> -->
+ <xs:element ref="Sk:clear"/>
+ <xs:element ref="Sk:clip"/>
+ <xs:element ref="Sk:color"/>
+ <xs:element ref="Sk:drawTo"/>
+ <xs:element ref="Sk:float"/>
+ <xs:element ref="Sk:full"/>
+ <xs:element ref="Sk:group"/>
+ <xs:element ref="Sk:hitClear"/>
+ <xs:element ref="Sk:hitTest"/>
+ <xs:element ref="Sk:image"/>
+ <xs:element ref="Sk:int"/>
+ <xs:element ref="Sk:line"/>
+ <xs:element ref="Sk:matrix"/>
+ <xs:element ref="Sk:move"/>
+ <xs:element ref="Sk:oval"/>
+ <xs:element ref="Sk:paint"/>
+ <!-- <xs:element ref="Sk:patch"/> -->
+ <xs:element ref="Sk:path"/>
+ <xs:element ref="Sk:point"/>
+ <xs:element ref="Sk:polygon"/>
+ <xs:element ref="Sk:polyline"/>
+ <xs:element ref="Sk:post"/>
+ <xs:element ref="Sk:random"/>
+ <xs:element ref="Sk:rect"/>
+ <xs:element ref="Sk:remove"/>
+ <xs:element ref="Sk:replace"/>
+ <xs:element ref="Sk:roundRect"/>
+ <xs:element ref="Sk:save"/>
+ <xs:element ref="Sk:set"/>
+ <xs:element ref="Sk:snapshot"/>
+ <xs:element ref="Sk:string"/>
+ <xs:element ref="Sk:text"/>
+ <xs:element ref="Sk:textBox"/>
+ <xs:element ref="Sk:textOnPath"/>
+ <xs:element ref="Sk:textToPath"/>
+ </xs:choice>
+ <!-- @attribute condition If present and zero, the contained elements are ignored. -->
+ <xs:attribute name="condition" type="Sk:DynamicString"/>
+ <!-- @attribute enableCondition If present and zero, the contained elements are ignored
+ when enabled. -->
+ <xs:attribute name="enableCondition" type="Sk:DynamicString"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** scale
+ Scale creates a matrix that scales a unit vector about a center point, and concatenated
+ with the containing matrix.
+ */ -->
+ <xs:element name="scale">
+ <xs:complexType>
+ <!-- @attribute center A point the scale is centered about; by default, [0.0, 0.0]. -->
+ <xs:attribute name="center" type="Sk:Point"/>
+ <!-- @attribute x The factor all x values are scaled by; by default, 1.0. -->
+ <xs:attribute name="x" type="Sk:Float"/>
+ <!-- @attribute y The factor all y values are scaled by; by default, 1.0. -->
+ <xs:attribute name="y" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** screenplay
+ Screenplay contains all events and elements referenced by the events.
+ A document may only contain a single screenplay element.
+ */ -->
+ <xs:element name="screenplay">
+ <xs:complexType>
+ <xs:choice maxOccurs="unbounded" >
+ <xs:element ref="Sk:add"/>
+ <xs:element ref="Sk:apply"/>
+ <xs:element ref="Sk:array"/>
+ <xs:element ref="Sk:bitmap"/>
+ <xs:element ref="Sk:boolean"/>
+ <xs:element ref="Sk:bounds"/>
+ <!-- <xs:element ref="Sk3D:camera"/> -->
+ <xs:element ref="Sk:clear"/>
+ <xs:element ref="Sk:clip"/>
+ <xs:element ref="Sk:color"/>
+ <xs:element ref="Sk:drawTo"/>
+ <xs:element ref="Sk:event"/>
+ <xs:element ref="Sk:float"/>
+ <xs:element ref="Sk:full"/>
+ <xs:element ref="Sk:group"/>
+ <xs:element ref="Sk:hitClear"/>
+ <xs:element ref="Sk:hitTest"/>
+ <xs:element ref="Sk:image"/>
+ <xs:element ref="Sk:include"/>
+ <xs:element ref="Sk:int"/>
+ <xs:element ref="Sk:line"/>
+ <xs:element ref="Sk:matrix"/>
+ <xs:element ref="Sk:move"/>
+ <xs:element ref="Sk:movie"/>
+ <xs:element ref="Sk:oval"/>
+ <xs:element ref="Sk:paint"/>
+ <!-- <xs:element ref="Sk:patch"/> -->
+ <xs:element ref="Sk:path"/>
+ <xs:element ref="Sk:point"/>
+ <xs:element ref="Sk:polygon"/>
+ <xs:element ref="Sk:polyline"/>
+ <xs:element ref="Sk:post"/>
+ <xs:element ref="Sk:random"/>
+ <xs:element ref="Sk:rect"/>
+ <xs:element ref="Sk:remove"/>
+ <xs:element ref="Sk:replace"/>
+ <xs:element ref="Sk:roundRect"/>
+ <xs:element ref="Sk:save"/>
+ <xs:element ref="Sk:set"/>
+ <xs:element ref="Sk:snapshot"/>
+ <xs:element ref="Sk:string"/>
+ <xs:element ref="Sk:text"/>
+ <xs:element ref="Sk:textBox"/>
+ <xs:element ref="Sk:textOnPath"/>
+ <xs:element ref="Sk:textToPath"/>
+ </xs:choice>
+ <!-- @attribute time The time of the draw (readable from script; not part of the document XML) -->
+ <xs:attribute name="time" type="Sk:MSec"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** set
+ Set animates the target element's attribute directly to the specified value.
+ */ -->
+ <xs:element name="set">
+ <xs:complexType>
+ <!-- @attribute begin An optional offset that must elapse before the animation begins. The apply
+ begin attribute is added to any animator's begin attribute. -->
+ <xs:attribute name="begin" type="Sk:MSec"/>
+ <!-- @attribute dur The duration of the animation in milliseconds. -->
+ <xs:attribute name="dur" type="Sk:MSec"/>
+ <!-- @attribute dynamic If true, restart the animation if any of the simple values the
+ 'lval' or 'to' attributes reference are changed. Simple values are contained by the array, boolean, float, int,
+ and string elements. -->
+ <!-- @attribute dynamic [Depreciated.] -->
+ <xs:attribute name="dynamic" type="Sk:Boolean" />
+ <!-- @attribute field The attribute to animate. -->
+ <xs:attribute name="field" type="Sk:String"/>
+ <!-- @attribute formula A script to execute over time to compute the field's value. Typically,
+ the fomula is a script expression which includes a reference to the time attribute of the
+ containing apply element. -->
+ <xs:attribute name="formula" type="Sk:DynamicString"/>
+ <!-- @attribute lval An expression evaluating to the attribute to animate.
+ If present, lval overrides 'field'. The expression is typically an array element,
+ e.g. lval="x[y]" . -->
+ <xs:attribute name="lval" type="Sk:DynamicString"/>
+ <!-- @attribute reset If true, the computed value is the initial value after the
+ animation is complete. If false, or by default, the computed value is the final value
+ after the animation is complete. -->
+ <xs:attribute name="reset" type="Sk:Boolean"/>
+ <!-- @attribute step When apply's attribute mode="immediate" or "create", the step attribute can be read by
+ script to determine the current animation iteration. -->
+ <xs:attribute name="step" type="Sk:Int" />
+ <!-- @attribute target The element to animate. By default, the element contained by the apply
+ or referenced by the apply's scope attribute is the animate target. -->
+ <xs:attribute name="target" type="Sk:DynamicString"/>
+ <!-- @attribute to The ending value (requires a 'from' attribute) -->
+ <xs:attribute name="to" type="Sk:DynamicString"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** skew
+ Skew creates a matrix that skews a unit vector about a center point, and concatenated
+ with the containing matrix.
+ */ -->
+ <xs:element name="skew">
+ <xs:complexType>
+ <!-- @attribute center A point the skew is centered about; by default, [0.0, 0.0]. -->
+ <xs:attribute name="center" type="Sk:Point"/>
+ <!-- @attribute x The factor all x values are skewed by; by default, 0.0. -->
+ <xs:attribute name="x" type="Sk:Float"/>
+ <!-- @attribute y The factor all y values are skewed by; by default, 0.0. -->
+ <xs:attribute name="y" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** snapshot
+ Snapshot creates an image file containing the display list.
+ */ -->
+ <xs:element name="snapshot">
+ <xs:complexType>
+ <!-- @attribute filename The name of the file to generate. -->
+ <xs:attribute name="filename" type="Sk:String"/>
+ <!-- @attribute quality The quality of the image, from 0 to 100. -->
+ <xs:attribute name="quality" type="Sk:Float"/>
+ <!-- @attribute sequence Set to true to number the filenames sequentially. -->
+ <xs:attribute name="sequence" type="Sk:Boolean"/>
+ <!-- @attribute type One of @pattern. The type of encoding to use. -->
+ <xs:attribute name="type" type="Sk:BitmapEncoding"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** string
+ String contains an array of characters.
+ */ -->
+ <xs:element name="string" >
+ <xs:complexType>
+ <!-- @attribute length The number of characters in the string (read only). -->
+ <xs:attribute name="length" type="Sk:Int"/>
+ <!-- @attribute slice An ECMAScript compatible function that returns part of the string. -->
+ <xs:attribute name="slice" type="Sk:String"/>
+ <!-- @attribute value The string itself. -->
+ <xs:attribute name="value" type="Sk:String"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** text
+ A drawable string with a position.
+ */ -->
+ <xs:element name="text">
+ <xs:complexType>
+ <!-- @attribute length The number of characters in the string (read only). -->
+ <xs:attribute name="length" type="Sk:Int"/>
+ <!-- @attribute text The string itself. -->
+ <xs:attribute name="text" type="Sk:String"/>
+ <!-- @attribute x The x coordinate of the string. -->
+ <xs:attribute name="x" type="Sk:Float"/>
+ <!-- @attribute y The y coordinate of the string. -->
+ <xs:attribute name="y" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** textBox
+ A drawable string fit into a box.
+ */ -->
+ <xs:element name="textBox" >
+ <xs:complexType>
+ <!-- @attribute bottom The bottom of the box. -->
+ <xs:attribute name="bottom" type="Sk:Float"/>
+ <!-- @attribute height The height of the box, computed from top and bottom. -->
+ <xs:attribute name="height" type="Sk:Float"/>
+ <!-- @attribute left The left side of the box. -->
+ <xs:attribute name="left" type="Sk:Float"/>
+ <!-- @attribute mode One of @pattern. -->
+ <xs:attribute name="mode" type="Sk:TextBoxMode"/>
+ <!-- @attribute needsRedraw Set to true if last draw was visible. -->
+ <xs:attribute name="needsRedraw" type="Sk:Boolean"/>
+ <!-- @attribute right The right side of the box. -->
+ <xs:attribute name="right" type="Sk:Float"/>
+ <!-- @attribute spacingAdd The extra spacing between lines. -->
+ <xs:attribute name="spacingAdd" type="Sk:Float"/>
+ <!-- @attribute spacingAlign One of @pattern. -->
+ <xs:attribute name="spacingAlign" type="Sk:TextBoxAlign"/>
+ <!-- @attribute spacingMul The line spacing scaled by the text height. -->
+ <xs:attribute name="spacingMul" type="Sk:Float"/>
+ <!-- @attribute text The text to fit to the box. -->
+ <xs:attribute name="text" type="Sk:String"/>
+ <!-- @attribute top The top of the box. -->
+ <xs:attribute name="top" type="Sk:Float"/>
+ <!-- @attribute width The width of the box, computed from left and right. -->
+ <xs:attribute name="width" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** textOnPath
+ TextOnPath specifies the baseline for a string of text with a path.
+ */ -->
+ <xs:element name="textOnPath">
+ <xs:complexType>
+ <xs:choice >
+ <xs:element ref="Sk:text" minOccurs="0" />
+ <xs:element ref="Sk:path" minOccurs="0" />
+ </xs:choice>
+ <!-- @attribute offset The distance along the path to place the first text character. -->
+ <xs:attribute name="offset" type="Sk:Float"/>
+ <!-- @attribute path The baseline of the text. -->
+ <xs:attribute name="path" type="Sk:Path"/>
+ <!-- @attribute text The text to place along the path. -->
+ <xs:attribute name="text" type="Sk:Text"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** textToPath
+ TextToPath sets the path to the contours described by the text's glyphs, using the current paint.
+ */ -->
+ <xs:element name="textToPath">
+ <xs:complexType>
+ <xs:choice >
+ <xs:element ref="Sk:text" minOccurs="0" />
+ <xs:element ref="Sk:paint" minOccurs="0" />
+ <xs:element ref="Sk:path" minOccurs="0" />
+ </xs:choice>
+ <!-- @attribute paint The paint selects the text font, size and other text properties. -->
+ <xs:attribute name="paint" type="Sk:Paint"/>
+ <!-- @attribute path The reference to the path element where the text as path is stored. -->
+ <xs:attribute name="path" type="Sk:Path"/>
+ <!-- @attribute text The reference to the text element to turn into a path. -->
+ <xs:attribute name="text" type="Sk:Text"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** translate
+ Translate concatenates a translation-only matrix onto the current matrix.
+ */ -->
+ <xs:element name="translate">
+ <xs:complexType>
+ <!-- @attribute x The translation in x. -->
+ <xs:attribute name="x" type="Sk:Float"/>
+ <!-- @attribute y The translation in y. -->
+ <xs:attribute name="y" type="Sk:Float"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** transparentShader
+ TransparentShader uses the background for its paint. Works well with emboss.
+ */ -->
+ <xs:element name="transparentShader">
+ <xs:complexType>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <!-- /** typeface
+ Typeface describes the text font.
+ */ -->
+ <xs:element name="typeface">
+ <xs:complexType>
+ <!-- @attribute fontName The name of the font. -->
+ <xs:attribute name="fontName" type="Sk:String"/>
+ </xs:complexType>
+ </xs:element>
+
+</xs:schema>
+
diff --git a/gfx/skia/skia/src/animator/SkAnimateSchema.xsx b/gfx/skia/skia/src/animator/SkAnimateSchema.xsx
new file mode 100644
index 000000000..ceb7d890c
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkAnimateSchema.xsx
@@ -0,0 +1,3 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--This file is auto-generated by the XML Schema Designer. It holds layout information for components on the designer surface.-->
+<XSDDesignerLayout />
diff --git a/gfx/skia/skia/src/animator/SkAnimateSet.cpp b/gfx/skia/skia/src/animator/SkAnimateSet.cpp
new file mode 100644
index 000000000..c05e0299d
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkAnimateSet.cpp
@@ -0,0 +1,87 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkAnimateSet.h"
+#include "SkAnimateMaker.h"
+#include "SkAnimateProperties.h"
+#include "SkParse.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkSet::fInfo[] = {
+ SK_MEMBER(begin, MSec),
+ SK_MEMBER(dur, MSec),
+ SK_MEMBER_PROPERTY(dynamic, Boolean),
+ SK_MEMBER(field, String),
+// SK_MEMBER(formula, DynamicString),
+ SK_MEMBER(lval, DynamicString),
+// SK_MEMBER_PROPERTY(reset, Boolean),
+ SK_MEMBER_PROPERTY(step, Int),
+ SK_MEMBER(target, DynamicString),
+ SK_MEMBER(to, DynamicString)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkSet);
+
+SkSet::SkSet() {
+ dur = 1;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkSet::dump(SkAnimateMaker* maker) {
+ INHERITED::dump(maker);
+ if (dur != 1) {
+ SkDebugf("dur=\"%g\" ", dur * 0.001);
+ }
+ //don't want double />\n's
+ SkDebugf("/>\n");
+
+}
+#endif
+
+void SkSet::refresh(SkAnimateMaker& maker) {
+ fFieldInfo->setValue(maker, &fValues, 0, fFieldInfo->fCount, nullptr,
+ fFieldInfo->getType(), to);
+}
+
+void SkSet::onEndElement(SkAnimateMaker& maker) {
+ if (resolveCommon(maker) == false)
+ return;
+ if (fFieldInfo == nullptr) {
+ maker.setErrorCode(SkDisplayXMLParserError::kFieldNotInTarget);
+ return;
+ }
+ fReset = dur != 1;
+ SkDisplayTypes outType = fFieldInfo->getType();
+ int comps = outType == SkType_String || outType == SkType_DynamicString ? 1 :
+ (int)fFieldInfo->getSize((const SkDisplayable*) fTarget) / sizeof(int);
+ if (fValues.getType() == SkType_Unknown) {
+ fValues.setType(outType);
+ fValues.setCount(comps);
+ if (outType == SkType_String || outType == SkType_DynamicString)
+ fValues[0].fString = new SkString;
+ else
+ memset(fValues.begin(), 0, fValues.count() * sizeof(fValues.begin()[0]));
+ } else {
+ SkASSERT(fValues.getType() == outType);
+ if (fFieldInfo->fType == SkType_Array)
+ comps = fValues.count();
+ else {
+ SkASSERT(fValues.count() == comps);
+ }
+ }
+ if (formula.size() > 0) {
+ comps = 1;
+ outType = SkType_MSec;
+ }
+ fFieldInfo->setValue(maker, &fValues, fFieldOffset, comps, this, outType, formula.size() > 0 ? formula : to);
+ fComponents = fValues.count();
+}
diff --git a/gfx/skia/skia/src/animator/SkAnimateSet.h b/gfx/skia/skia/src/animator/SkAnimateSet.h
new file mode 100644
index 000000000..32a91979e
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkAnimateSet.h
@@ -0,0 +1,27 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkAnimateSet_DEFINED
+#define SkAnimateSet_DEFINED
+
+#include "SkAnimate.h"
+
+class SkSet : public SkAnimate {
+ DECLARE_MEMBER_INFO(Set);
+ SkSet();
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+ void onEndElement(SkAnimateMaker& ) override;
+ void refresh(SkAnimateMaker& ) override;
+private:
+ typedef SkAnimate INHERITED;
+};
+
+#endif // SkAnimateSet_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkAnimator.cpp b/gfx/skia/skia/src/animator/SkAnimator.cpp
new file mode 100644
index 000000000..c5aabbba4
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkAnimator.cpp
@@ -0,0 +1,704 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkAnimator.h"
+#include "SkAnimateMaker.h"
+#include "SkCanvas.h"
+#include "SkDisplayApply.h"
+#include "SkDisplayMovie.h"
+#include "SkDisplayTypes.h"
+#include "SkDisplayXMLParser.h"
+#include "SkStream.h"
+#include "SkScript.h"
+#include "SkScript2.h" // compiled script experiment
+#include "SkSystemEventTypes.h"
+#include "SkTypedArray.h"
+#ifdef SK_BUILD_FOR_ANDROID
+#include "SkDrawExtraPathEffect.h"
+#endif
+#ifdef SK_DEBUG
+#include "SkTime.h"
+#endif
+
+#if defined SK_BUILD_FOR_WIN32 && defined SK_DEBUG
+ #define _static
+ extern const char gMathPrimerText[];
+ extern const char gMathPrimerBinary[];
+#else
+ #define _static static
+#endif
+
+_static const char gMathPrimerText[] =
+"<screenplay>"
+ "<Math id=\"Math\"/>"
+ "<Number id=\"Number\"/>"
+"</screenplay>";
+
+#define gMathPrimer gMathPrimerText
+
+SkAnimator::SkAnimator() : fMaker(nullptr) {
+ initialize();
+}
+
+SkAnimator::~SkAnimator() { delete fMaker; }
+
+void SkAnimator::addExtras(SkExtras* extras) {
+ *fMaker->fExtras.append() = extras;
+}
+
+bool SkAnimator::appendStream(SkStream* stream) {
+ return decodeStream(stream);
+}
+
+bool SkAnimator::decodeMemory(const void* buffer, size_t size)
+{
+ fMaker->fFileName.reset();
+ SkDisplayXMLParser parser(*fMaker);
+ return parser.parse((const char*)buffer, size);
+}
+
+bool SkAnimator::decodeStream(SkStream* stream)
+{
+ SkDisplayXMLParser parser(*fMaker);
+ bool result = parser.parse(*stream);
+ fMaker->setErrorString();
+ return result;
+}
+
+bool SkAnimator::decodeDOM(const SkDOM& dom, const SkDOMNode* node)
+{
+ fMaker->fFileName.reset();
+ SkDisplayXMLParser parser(*fMaker);
+ return parser.parse(dom, node);
+}
+
+bool SkAnimator::decodeURI(const char uri[]) {
+// SkDebugf("animator decode %s\n", uri);
+
+// SkStream* stream = SkStream::GetURIStream(fMaker->fPrefix.c_str(), uri);
+ std::unique_ptr<SkStream> stream = SkStream::MakeFromFile(uri);
+ if (stream) {
+ this->setURIBase(uri);
+ return decodeStream(stream.get());
+ } else {
+ return false;
+ }
+}
+
+bool SkAnimator::doCharEvent(SkUnichar code) {
+ if (code == 0)
+ return false;
+ struct SkEventState state;
+ state.fCode = code;
+ fMaker->fEnableTime = fMaker->getAppTime();
+ bool result = fMaker->fEvents.doEvent(*fMaker, SkDisplayEvent::kKeyChar, &state);
+ fMaker->notifyInval();
+ return result;
+}
+
+bool SkAnimator::doClickEvent(int clickState, SkScalar x, SkScalar y) {
+ SkASSERT(clickState >= 0 && clickState <= 2);
+ struct SkEventState state;
+ state.fX = x;
+ state.fY = y;
+ fMaker->fEnableTime = fMaker->getAppTime();
+ bool result = fMaker->fEvents.doEvent(*fMaker,
+ clickState == 0 ? SkDisplayEvent::kMouseDown :
+ clickState == 1 ? SkDisplayEvent::kMouseDrag :
+ SkDisplayEvent::kMouseUp, &state);
+ fMaker->notifyInval();
+ return result;
+}
+
+bool SkAnimator::doKeyEvent(SkKey code) {
+ if (code == 0)
+ return false;
+ struct SkEventState state;
+ state.fCode = code;
+ fMaker->fEnableTime = fMaker->getAppTime();
+ bool result = fMaker->fEvents.doEvent(*fMaker, SkDisplayEvent::kKeyPress, &state);
+ fMaker->notifyInval();
+ return result;
+}
+
+bool SkAnimator::doKeyUpEvent(SkKey code) {
+ if (code == 0)
+ return false;
+ struct SkEventState state;
+ state.fCode = code;
+ fMaker->fEnableTime = fMaker->getAppTime();
+ bool result = fMaker->fEvents.doEvent(*fMaker, SkDisplayEvent::kKeyPressUp, &state);
+ fMaker->notifyInval();
+ return result;
+}
+
+bool SkAnimator::doUserEvent(const SkEvent& evt) {
+ fMaker->fEnableTime = fMaker->getAppTime();
+ return onEvent(evt);
+}
+
+SkAnimator::DifferenceType SkAnimator::draw(SkCanvas* canvas, SkPaint* paint, SkMSec time) {
+ if (paint == nullptr)
+ return draw(canvas, time);
+ fMaker->fScreenplay.time = time;
+ fMaker->fCanvas = canvas;
+ fMaker->fPaint = paint;
+ fMaker->fDisplayList.fHasUnion = false;
+ int result = fMaker->fDisplayList.draw(*fMaker, time);
+ if (result)
+ result += fMaker->fDisplayList.fHasUnion;
+ return (DifferenceType) result;
+}
+
+SkAnimator::DifferenceType SkAnimator::draw(SkCanvas* canvas, SkMSec time) {
+ SkPaint paint;
+ return draw(canvas, &paint, time);
+}
+
+#ifdef SK_DEBUG
+void SkAnimator::eventDone(const SkEvent& ) {
+}
+#endif
+
+bool SkAnimator::findClickEvent(SkScalar x, SkScalar y) {
+ struct SkEventState state;
+ state.fDisable = true;
+ state.fX = x;
+ state.fY = y;
+ fMaker->fEnableTime = fMaker->getAppTime();
+ bool result = fMaker->fEvents.doEvent(*fMaker, SkDisplayEvent::kMouseDown, &state);
+ fMaker->notifyInval();
+ return result;
+}
+
+const SkAnimator* SkAnimator::getAnimator(const SkDisplayable* displayable) const {
+ if (displayable->getType() != SkType_Movie)
+ return nullptr;
+ const SkDisplayMovie* movie = (const SkDisplayMovie*) displayable;
+ return movie->getAnimator();
+}
+
+const SkDisplayable* SkAnimator::getElement(const char* id) {
+ SkDisplayable* element;
+ if (fMaker->find(id, &element) == false)
+ return nullptr;
+ return (const SkDisplayable*) element;
+}
+
+SkElementType SkAnimator::getElementType(const SkDisplayable* ae) {
+ SkDisplayable* element = (SkDisplayable*) ae;
+ const SkMemberInfo* info = SkDisplayType::GetMembers(fMaker, element->getType(), nullptr);
+ return (SkElementType) SkDisplayType::Find(fMaker, info);
+}
+
+SkElementType SkAnimator::getElementType(const char* id) {
+ const SkDisplayable* element = getElement(id);
+ return getElementType(element);
+}
+
+const SkMemberInfo* SkAnimator::getField(const SkDisplayable* ae, const char* field) {
+ SkDisplayable* element = (SkDisplayable*) ae;
+ const SkMemberInfo* info = element->getMember(field);
+ return (const SkMemberInfo*) info;
+}
+
+const SkMemberInfo* SkAnimator::getField(const char* elementID, const char* field) {
+ const SkDisplayable* element = getElement(elementID);
+ return getField(element, field);
+}
+
+SkFieldType SkAnimator::getFieldType(const SkMemberInfo* ai) {
+ const SkMemberInfo* info = (const SkMemberInfo*) ai;
+ return (SkFieldType) info->getType();
+}
+
+SkFieldType SkAnimator::getFieldType(const char* id, const char* fieldID) {
+ const SkMemberInfo* field = getField(id, fieldID);
+ return getFieldType(field);
+}
+
+static bool getArrayCommon(const SkDisplayable* ae, const SkMemberInfo* ai,
+ int index, SkOperand* operand) {
+ const SkDisplayable* element = (const SkDisplayable*) ae;
+ const SkMemberInfo* info = (const SkMemberInfo*) ai;
+ SkASSERT(info->fType == SkType_Array);
+ return info->getArrayValue(element, index, operand);
+}
+
+int32_t SkAnimator::getArrayInt(const SkDisplayable* ae,
+ const SkMemberInfo* ai, int index) {
+ SkOperand operand;
+ bool result = getArrayCommon(ae, ai, index, &operand);
+ return result ? operand.fS32 : SK_NaN32;
+}
+
+int32_t SkAnimator::getArrayInt(const char* id, const char* fieldID, int index) {
+ const SkDisplayable* element = getElement(id);
+ if (element == nullptr)
+ return SK_NaN32;
+ const SkMemberInfo* field = getField(element, fieldID);
+ if (field == nullptr)
+ return SK_NaN32;
+ return getArrayInt(element, field, index);
+}
+
+SkScalar SkAnimator::getArrayScalar(const SkDisplayable* ae,
+ const SkMemberInfo* ai, int index) {
+ SkOperand operand;
+ bool result = getArrayCommon(ae, ai, index, &operand);
+ return result ? operand.fScalar : SK_ScalarNaN;
+}
+
+SkScalar SkAnimator::getArrayScalar(const char* id, const char* fieldID, int index) {
+ const SkDisplayable* element = getElement(id);
+ if (element == nullptr)
+ return SK_ScalarNaN;
+ const SkMemberInfo* field = getField(element, fieldID);
+ if (field == nullptr)
+ return SK_ScalarNaN;
+ return getArrayScalar(element, field, index);
+}
+
+const char* SkAnimator::getArrayString(const SkDisplayable* ae,
+ const SkMemberInfo* ai, int index) {
+ SkOperand operand;
+ bool result = getArrayCommon(ae, ai, index, &operand);
+ return result ? operand.fString->c_str() : nullptr;
+}
+
+const char* SkAnimator::getArrayString(const char* id, const char* fieldID, int index) {
+ const SkDisplayable* element = getElement(id);
+ if (element == nullptr)
+ return nullptr;
+ const SkMemberInfo* field = getField(element, fieldID);
+ if (field == nullptr)
+ return nullptr;
+ return getArrayString(element, field, index);
+}
+
+SkMSec SkAnimator::getInterval() {
+ return fMaker->fMinimumInterval == (SkMSec) -1 ? 0 : fMaker->fMinimumInterval;
+}
+
+void SkAnimator::getInvalBounds(SkRect* inval) {
+ if (fMaker->fDisplayList.fHasUnion) {
+ inval->fLeft = SkIntToScalar(fMaker->fDisplayList.fInvalBounds.fLeft);
+ inval->fTop = SkIntToScalar(fMaker->fDisplayList.fInvalBounds.fTop);
+ inval->fRight = SkIntToScalar(fMaker->fDisplayList.fInvalBounds.fRight);
+ inval->fBottom = SkIntToScalar(fMaker->fDisplayList.fInvalBounds.fBottom);
+ } else {
+ inval->fLeft = inval->fTop = -SK_ScalarMax;
+ inval->fRight = inval->fBottom = SK_ScalarMax;
+ }
+}
+
+const SkXMLParserError* SkAnimator::getParserError() {
+ return &fMaker->fError;
+}
+
+const char* SkAnimator::getParserErrorString() {
+ if (fMaker->fErrorString.size() == 0 && fMaker->fError.hasError())
+ fMaker->setErrorString();
+ return fMaker->fErrorString.c_str();
+}
+
+int32_t SkAnimator::getInt(const SkDisplayable* element, const SkMemberInfo* info) {
+ if (info->fType != SkType_MemberProperty) {
+ SkOperand operand;
+ if (info->getType() == SkType_Int) {
+ info->getValue(element, &operand, 1);
+ return operand.fS32;
+ }
+ return SK_NaN32;
+ }
+ SkScriptValue scriptValue;
+ bool success = element->getProperty(info->propertyIndex(), &scriptValue);
+ if (success && scriptValue.fType == SkType_Int)
+ return scriptValue.fOperand.fS32;
+ return SK_NaN32;
+}
+
+int32_t SkAnimator::getInt(const char* id, const char* fieldID) {
+ const SkDisplayable* element = getElement(id);
+ if (element == nullptr)
+ return SK_NaN32;
+ const SkMemberInfo* field = getField(element, fieldID);
+ if (field == nullptr)
+ return SK_NaN32;
+ return getInt(element, field);
+}
+
+SkScalar SkAnimator::getScalar(const SkDisplayable* element, const SkMemberInfo* info) {
+ if (info->fType != SkType_MemberProperty) {
+ SkOperand operand;
+ if (info->getType() == SkType_Float) {
+ info->getValue(element, &operand, 1);
+ return operand.fScalar;
+ }
+ return SK_ScalarNaN;
+ }
+ SkScriptValue scriptValue;
+ bool success = element->getProperty(info->propertyIndex(), &scriptValue);
+ if (success && scriptValue.fType == SkType_Float)
+ return scriptValue.fOperand.fScalar;
+ return SK_ScalarNaN;
+}
+
+SkScalar SkAnimator::getScalar(const char* id, const char* fieldID) {
+ const SkDisplayable* element = getElement(id);
+ if (element == nullptr)
+ return SK_ScalarNaN;
+ const SkMemberInfo* field = getField(element, fieldID);
+ if (field == nullptr)
+ return SK_ScalarNaN;
+ return getScalar(element, field);
+}
+
+const char* SkAnimator::getString(const SkDisplayable* ae,
+ const SkMemberInfo* ai) {
+ const SkDisplayable* element = (const SkDisplayable*) ae;
+ const SkMemberInfo* info = (const SkMemberInfo*) ai;
+ SkString* temp;
+ info->getString(element, &temp);
+ return temp->c_str();
+}
+
+const char* SkAnimator::getString(const char* id, const char* fieldID) {
+ const SkDisplayable* element = getElement(id);
+ if (element == nullptr)
+ return nullptr;
+ const SkMemberInfo* field = getField(element, fieldID);
+ if (field == nullptr)
+ return nullptr;
+ return getString(element, field);
+}
+
+const char* SkAnimator::getURIBase() {
+ return fMaker->fPrefix.c_str();
+}
+
+void SkAnimator::initialize() {
+ delete fMaker;
+ fMaker = new SkAnimateMaker(this, nullptr, nullptr);
+ decodeMemory(gMathPrimer, sizeof(gMathPrimer)-1);
+#ifdef SK_BUILD_FOR_ANDROID
+ InitializeSkExtraPathEffects(this);
+#endif
+}
+
+
+#ifdef SK_DEBUG
+bool SkAnimator::isTrackingEvents() {
+ return false;
+}
+#endif
+
+bool SkAnimator::onEvent(const SkEvent& evt) {
+#ifdef SK_DEBUG
+ SkAnimator* root = fMaker->getRoot();
+ if (root == nullptr)
+ root = this;
+ if (root->isTrackingEvents())
+ root->eventDone(evt);
+#endif
+ if (evt.isType(SK_EventType_OnEnd)) {
+ SkEventState eventState;
+ SkDEBUGCODE(bool success =) evt.findPtr("anim", (void**) &eventState.fDisplayable);
+ SkASSERT(success);
+ SkDEBUGCODE(success =) evt.findS32("time", (int32_t*) &fMaker->fEnableTime);
+ SkASSERT(success);
+ fMaker->fAdjustedStart = fMaker->getAppTime() - fMaker->fEnableTime;
+ fMaker->fEvents.doEvent(*fMaker, SkDisplayEvent::kOnEnd, &eventState);
+ fMaker->fAdjustedStart = 0;
+ goto inval;
+ }
+ if (evt.isType(SK_EventType_Delay)) {
+ fMaker->doDelayedEvent();
+ goto inval;
+ }
+ {
+ const char* id = evt.findString("id");
+ if (id == nullptr)
+ return false;
+ SkDisplayable** firstMovie = fMaker->fMovies.begin();
+ SkDisplayable** endMovie = fMaker->fMovies.end();
+ for (SkDisplayable** ptr = firstMovie; ptr < endMovie; ptr++) {
+ SkDisplayMovie* movie = (SkDisplayMovie*) *ptr;
+ movie->doEvent(evt);
+ }
+ {
+ SkDisplayable* event;
+ if (fMaker->find(id, &event) == false)
+ return false;
+ #if defined SK_DEBUG && defined SK_DEBUG_ANIMATION_TIMING
+ SkString debugOut;
+ SkMSec realTime = fMaker->getAppTime();
+ debugOut.appendS32(realTime - fMaker->fDebugTimeBase);
+ debugOut.append(" onEvent id=");
+ debugOut.append(id);
+ #endif
+ SkMSec time = evt.getFast32();
+ if (time != 0) {
+ SkMSec app = fMaker->getAppTime();
+ fMaker->setEnableTime(app, time);
+ #if defined SK_DEBUG && defined SK_DEBUG_ANIMATION_TIMING
+ debugOut.append(" time=");
+ debugOut.appendS32(time - fMaker->fDebugTimeBase);
+ debugOut.append(" adjust=");
+ debugOut.appendS32(fMaker->fAdjustedStart);
+ #endif
+ }
+ #if defined SK_DEBUG && defined SK_DEBUG_ANIMATION_TIMING
+ SkDebugf("%s\n", debugOut.c_str());
+ #endif
+ SkASSERT(event->isEvent());
+ SkDisplayEvent* displayEvent = (SkDisplayEvent*) event;
+ displayEvent->populateInput(*fMaker, evt);
+ displayEvent->enableEvent(*fMaker);
+ }
+ }
+inval:
+ fMaker->notifyInval();
+ return true;
+}
+
+void SkAnimator::onEventPost(SkEvent* evt, SkEventSinkID sinkID)
+{
+#ifdef SK_DEBUG
+ SkAnimator* root = fMaker->getRoot();
+ if (root) {
+ root->onEventPost(evt, sinkID);
+ return;
+ }
+#else
+ SkASSERT(sinkID == this->getSinkID() || this->getHostEventSinkID() == sinkID);
+#endif
+ evt->setTargetID(sinkID)->post();
+}
+
+void SkAnimator::onEventPostTime(SkEvent* evt, SkEventSinkID sinkID, SkMSec time)
+{
+#ifdef SK_DEBUG
+ SkAnimator* root = fMaker->getRoot();
+ if (root) {
+ root->onEventPostTime(evt, sinkID, time);
+ return;
+ }
+#else
+ SkASSERT(sinkID == this->getSinkID() || this->getHostEventSinkID() == sinkID);
+#endif
+ evt->setTargetID(sinkID)->postTime(time);
+}
+
+void SkAnimator::reset() {
+ fMaker->fDisplayList.reset();
+}
+
+SkEventSinkID SkAnimator::getHostEventSinkID() const {
+ return fMaker->fHostEventSinkID;
+}
+
+void SkAnimator::setHostEventSinkID(SkEventSinkID target) {
+ fMaker->fHostEventSinkID = target;
+}
+
+void SkAnimator::onSetHostHandler(Handler ) {
+}
+
+void SkAnimator::setJavaOwner(Handler ) {
+}
+
+bool SkAnimator::setArrayString(const char* id, const char* fieldID, const char** array, int num)
+{
+ SkTypedArray tArray(SkType_String);
+ tArray.setCount(num);
+ for (int i = 0; i < num; i++) {
+ SkOperand op;
+ op.fString = new SkString(array[i]);
+ tArray[i] = op;
+ }
+ return setArray(id, fieldID, tArray);
+}
+bool SkAnimator::setArrayInt(const char* id, const char* fieldID, const int* array, int num)
+{
+ SkTypedArray tArray(SkType_Int);
+ tArray.setCount(num);
+ for (int i = 0; i < num; i++) {
+ SkOperand op;
+ op.fS32 = array[i];
+ tArray[i] = op;
+ }
+ return setArray(id, fieldID, tArray);
+}
+
+bool SkAnimator::setArray(SkDisplayable* element, const SkMemberInfo* info, SkTypedArray array) {
+ if (info->fType != SkType_Array)
+ return false; //the field is not an array
+ //i think we can handle the case where the displayable itself is an array differently from the
+ //case where it has an array - for one thing, if it is an array, i think we can change its type
+ //if it's not, we cannot
+ SkDisplayTypes type = element->getType();
+ if (type == SkType_Array) {
+ SkDisplayArray* dispArray = (SkDisplayArray*) element;
+ dispArray->values = array;
+ return true;
+ }
+ else
+ return false; //currently i don't care about this case
+}
+
+bool SkAnimator::setArray(const char* id, const char* fieldID, SkTypedArray array) {
+ SkDisplayable* element = (SkDisplayable*) getElement(id);
+ //should I go ahead and change all 'nullptr's to 'nullptr'?
+ if (element == nullptr)
+ return false;
+ const SkMemberInfo* field = getField(element, fieldID);
+ if (field == nullptr)
+ return false;
+ return setArray(element, field, array);
+}
+
+bool SkAnimator::setInt(SkDisplayable* element, const SkMemberInfo* info, int32_t s32) {
+ if (info->fType != SkType_MemberProperty) {
+ SkOperand operand;
+ operand.fS32 = s32;
+ SkASSERT(info->getType() == SkType_Int);
+ info->setValue(element, &operand, 1);
+ } else {
+ SkScriptValue scriptValue;
+ scriptValue.fType = SkType_Int;
+ scriptValue.fOperand.fS32 = s32;
+ element->setProperty(info->propertyIndex(), scriptValue);
+ }
+ return true;
+}
+
+bool SkAnimator::setInt(const char* id, const char* fieldID, int32_t s32) {
+ SkDisplayable* element = (SkDisplayable*) getElement(id);
+ if (element == nullptr)
+ return false;
+ const SkMemberInfo* field = getField(element, fieldID);
+ if (field == nullptr)
+ return false;
+ return setInt(element, field, s32);
+}
+
+bool SkAnimator::setScalar(SkDisplayable* element, const SkMemberInfo* info, SkScalar scalar) {
+ if (info->fType != SkType_MemberProperty) {
+ SkOperand operand;
+ operand.fScalar = scalar;
+ SkASSERT(info->getType() == SkType_Float);
+ info->setValue(element, &operand, 1);
+ } else {
+ SkScriptValue scriptValue;
+ scriptValue.fType = SkType_Float;
+ scriptValue.fOperand.fScalar = scalar;
+ element->setProperty(info->propertyIndex(), scriptValue);
+ }
+ return true;
+}
+
+bool SkAnimator::setScalar(const char* id, const char* fieldID, SkScalar scalar) {
+ SkDisplayable* element = (SkDisplayable*) getElement(id);
+ if (element == nullptr)
+ return false;
+ const SkMemberInfo* field = getField(element, fieldID);
+ if (field == nullptr)
+ return false;
+ return setScalar(element, field, scalar);
+}
+
+bool SkAnimator::setString(SkDisplayable* element,
+ const SkMemberInfo* info, const char* str) {
+ // !!! until this is fixed, can't call script with global references from here
+ info->setValue(*fMaker, nullptr, 0, info->fCount, element, info->getType(), str, strlen(str));
+ return true;
+}
+
+bool SkAnimator::setString(const char* id, const char* fieldID, const char* str) {
+ SkDisplayable* element = (SkDisplayable*) getElement(id);
+ if (element == nullptr)
+ return false;
+ const SkMemberInfo* field = getField(element, fieldID);
+ if (field == nullptr)
+ return false;
+ return setString(element, field, str);
+}
+
+void SkAnimator::setTimeline(const Timeline& timeline) {
+ fMaker->fTimeline = &timeline;
+}
+
+void SkAnimator::setURIBase(const char* uri) {
+ if (uri)
+ {
+ const char* tail = strrchr(uri, '/');
+ if (tail) {
+ SkString prefix(uri, tail - uri + 1);
+ if (uri[0] != '.' /*SkStream::IsAbsoluteURI(uri)*/)
+ fMaker->fPrefix.reset();
+ fMaker->fPrefix.append(prefix);
+ fMaker->fFileName.set(tail + 1);
+ } else
+ fMaker->fFileName.set(uri);
+ }
+}
+
+#ifdef SK_DEBUG
+bool SkAnimator::NoLeaks() {
+#ifdef SK_BUILD_FOR_MAC
+ if (SkDisplayable::fAllocations.count() == 0)
+ return true;
+// return SkDisplayable::fAllocationCount == 0;
+ SkDebugf("!!! leaked %d displayables:\n", SkDisplayable::fAllocations.count());
+ for (SkDisplayable** leak = SkDisplayable::fAllocations.begin(); leak < SkDisplayable::fAllocations.end(); leak++)
+ SkDebugf("%08x %s\n", *leak, (*leak)->id);
+#endif
+ return false;
+}
+#endif
+
+#ifdef SK_SUPPORT_UNITTEST
+#include "SkAnimatorScript.h"
+#include "SkBase64.h"
+#include "SkParse.h"
+#include "SkMemberInfo.h"
+
+#define unittestline(type) { #type , type::UnitTest }
+#endif
+
+
+#ifdef SK_SUPPORT_UNITTEST
+void SkAnimator::Init(bool runUnitTests) {
+ if (runUnitTests == false)
+ return;
+ static const struct {
+ const char* fTypeName;
+ void (*fUnitTest)( );
+ } gUnitTests[] = {
+ unittestline(SkBase64),
+ unittestline(SkDisplayType),
+ unittestline(SkParse),
+ unittestline(SkScriptEngine),
+// unittestline(SkScriptEngine2), // compiled script experiment
+ unittestline(SkAnimatorScript)
+ };
+ for (int i = 0; i < (int)SK_ARRAY_COUNT(gUnitTests); i++)
+ {
+ SkDebugf("SkAnimator: Running UnitTest for %s\n", gUnitTests[i].fTypeName);
+ gUnitTests[i].fUnitTest();
+ SkDebugf("SkAnimator: End UnitTest for %s\n", gUnitTests[i].fTypeName);
+ }
+}
+#else
+void SkAnimator::Init(bool) {}
+#endif
+
+void SkAnimator::Term() {
+}
diff --git a/gfx/skia/skia/src/animator/SkAnimatorScript.cpp b/gfx/skia/skia/src/animator/SkAnimatorScript.cpp
new file mode 100644
index 000000000..cbe1d04b1
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkAnimatorScript.cpp
@@ -0,0 +1,594 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkAnimatorScript.h"
+#include "SkAnimateBase.h"
+#include "SkAnimateMaker.h"
+#include "SkDisplayTypes.h"
+#include "SkExtras.h"
+#include "SkMemberInfo.h"
+#include "SkParse.h"
+
+static const SkDisplayEnumMap gEnumMaps[] = {
+ { SkType_AddMode, "indirect|immediate" },
+ { SkType_Align, "left|center|right" },
+ { SkType_ApplyMode, "create|immediate|once" },
+ { SkType_ApplyTransition, "normal|reverse" },
+ { SkType_BitmapEncoding, "jpeg|png" },
+ { SkType_BitmapFormat, "none|A1|A8|Index8|RGB16|RGB32" },
+ { SkType_Boolean, "false|true" },
+ { SkType_Cap, "butt|round|square" },
+ { SkType_EventCode, "none|leftSoftKey|rightSoftKey|home|back|send|end|key0|key1|key2|key3|key4|key5|key6|key7|key8|key9|star|hash|up|down|left|right|OK|volUp|volDown|camera" },
+ { SkType_EventKind, "none|keyChar|keyPress|keyPressUp|mouseDown|mouseDrag|mouseMove|mouseUp|onEnd|onLoad|user" },
+ { SkType_EventMode, "deferred|immediate" },
+ { SkType_FillType, "winding|evenOdd" },
+ { SkType_FilterType, "none|bilinear" },
+ { SkType_FontStyle, "normal|bold|italic|boldItalic" },
+ { SkType_FromPathMode, "normal|angle|position" },
+ { SkType_Join, "miter|round|blunt" },
+ { SkType_MaskFilterBlurStyle, "normal|solid|outer|inner" },
+ { SkType_PathDirection, "cw|ccw" },
+ { SkType_Style, "fill|stroke|strokeAndFill" },
+ { SkType_TextBoxAlign, "start|center|end" },
+ { SkType_TextBoxMode, "oneLine|lineBreak" },
+ { SkType_TileMode, "clamp|repeat|mirror" },
+ { SkType_Xfermode, "clear|src|dst|srcOver|dstOver|srcIn|dstIn|srcOut|dstOut|"
+ "srcATop|dstATop|xor|darken|lighten" },
+};
+
+static int gEnumMapCount = SK_ARRAY_COUNT(gEnumMaps);
+
+SkAnimatorScript::SkAnimatorScript(SkAnimateMaker& maker, SkDisplayable* working, SkDisplayTypes type)
+ : SkScriptEngine(SkScriptEngine::ToOpType(type)), fMaker(maker), fParent(nullptr), fWorking(working)
+{
+ memberCallBack(EvalMember, (void*) this);
+ memberFunctionCallBack(EvalMemberFunction, (void*) this);
+ boxCallBack(Box, (void*) this);
+ unboxCallBack(Unbox, (void*) &maker);
+ propertyCallBack(EvalID, (void*) this); // must be first (entries are prepended, will be last), since it never fails
+ propertyCallBack(Infinity, (void*) this);
+ propertyCallBack(NaN, (void*) this);
+ functionCallBack(Eval, (void*) this);
+ functionCallBack(IsFinite, (void*) this);
+ functionCallBack(IsNaN, (void*) this);
+ if (type == SkType_ARGB) {
+ functionCallBack(EvalRGB, (void*) this);
+ propertyCallBack(EvalNamedColor, (void*) &maker.fIDs);
+ }
+ if (SkDisplayType::IsEnum(&maker, type)) {
+ // !!! for SpiderMonkey, iterate through the enum values, and map them to globals
+ const SkDisplayEnumMap& map = GetEnumValues(type);
+ propertyCallBack(EvalEnum, (void*) map.fValues);
+ }
+ for (SkExtras** extraPtr = maker.fExtras.begin(); extraPtr < maker.fExtras.end(); extraPtr++) {
+ SkExtras* extra = *extraPtr;
+ if (extra->fExtraCallBack)
+ propertyCallBack(extra->fExtraCallBack, extra->fExtraStorage);
+ }
+}
+
+SkAnimatorScript::~SkAnimatorScript() {
+ for (SkDisplayable** dispPtr = fTrackDisplayable.begin(); dispPtr < fTrackDisplayable.end(); dispPtr++)
+ delete *dispPtr;
+}
+
+bool SkAnimatorScript::evaluate(const char* original, SkScriptValue* result, SkDisplayTypes type) {
+ const char* script = original;
+ bool success = evaluateScript(&script, result);
+ if (success == false || result->fType != type) {
+ fMaker.setScriptError(*this);
+ return false;
+ }
+ return true;
+}
+
+bool SkAnimatorScript::Box(void* user, SkScriptValue* scriptValue) {
+ SkAnimatorScript* engine = (SkAnimatorScript*) user;
+ SkDisplayTypes type = scriptValue->fType;
+ SkDisplayable* displayable;
+ switch (type) {
+ case SkType_Array: {
+ SkDisplayArray* boxedValue = new SkDisplayArray(*scriptValue->fOperand.fArray);
+ displayable = boxedValue;
+ } break;
+ case SkType_Boolean: {
+ SkDisplayBoolean* boxedValue = new SkDisplayBoolean;
+ displayable = boxedValue;
+ boxedValue->value = !! scriptValue->fOperand.fS32;
+ } break;
+ case SkType_Int: {
+ SkDisplayInt* boxedValue = new SkDisplayInt;
+ displayable = boxedValue;
+ boxedValue->value = scriptValue->fOperand.fS32;
+ } break;
+ case SkType_Float: {
+ SkDisplayFloat* boxedValue = new SkDisplayFloat;
+ displayable = boxedValue;
+ boxedValue->value = scriptValue->fOperand.fScalar;
+ } break;
+ case SkType_String: {
+ SkDisplayString* boxedValue = new SkDisplayString(*scriptValue->fOperand.fString);
+ displayable = boxedValue;
+ } break;
+ case SkType_Displayable:
+ scriptValue->fOperand.fObject = scriptValue->fOperand.fDisplayable;
+ scriptValue->fType = SkType_Displayable;
+ return true;
+ default:
+ SkASSERT(0);
+ return false;
+ }
+ engine->track(displayable);
+ scriptValue->fOperand.fObject = displayable;
+ scriptValue->fType = SkType_Displayable;
+ return true;
+}
+
+bool SkAnimatorScript::Eval(const char* function, size_t len, SkTDArray<SkScriptValue>& params,
+ void* eng, SkScriptValue* value) {
+ if (SK_LITERAL_STR_EQUAL("eval", function, len) == false)
+ return false;
+ if (params.count() != 1)
+ return false;
+ SkAnimatorScript* host = (SkAnimatorScript*) eng;
+ SkAnimatorScript engine(host->fMaker, host->fWorking, SkScriptEngine::ToDisplayType(host->fReturnType));
+ SkScriptValue* scriptValue = params.begin();
+ bool success = true;
+ if (scriptValue->fType == SkType_String) {
+ const char* script = scriptValue->fOperand.fString->c_str();
+ success = engine.evaluateScript(&script, value);
+ } else
+ *value = *scriptValue;
+ return success;
+}
+
+bool SkAnimatorScript::EvalEnum(const char* token, size_t len, void* callBack, SkScriptValue* value) {
+ const char* tokens = (const char*) callBack;
+ value->fType = SkType_Int;
+ if (MapEnums(tokens, token, len, (int*)&value->fOperand.fS32))
+ return true;
+ return false;
+}
+
+bool SkAnimatorScript::EvalID(const char* token, size_t len, void* user, SkScriptValue* value) {
+ SkAnimatorScript* engine = (SkAnimatorScript*) user;
+ SkTDict<SkDisplayable*>* ids = &engine->fMaker.fIDs;
+ SkDisplayable* displayable;
+ bool success = ids->find(token, len, &displayable);
+ if (success == false) {
+ displayable = engine->fWorking;
+ if (SK_LITERAL_STR_EQUAL("parent", token, len)) {
+ SkDisplayable* parent = displayable->getParent();
+ if (parent == nullptr)
+ parent = engine->fParent;
+ if (parent) {
+ value->fOperand.fDisplayable = parent;
+ value->fType = SkType_Displayable;
+ return true;
+ }
+ }
+ if (displayable && EvalMember(token, len, displayable, engine, value))
+ return true;
+ value->fOperand.fString = nullptr;
+ value->fType = SkType_String;
+ } else {
+ SkDisplayable* working = engine->fWorking;
+ value->fOperand.fDisplayable = displayable;
+ value->fType = SkType_Displayable;
+ if (displayable->canContainDependents() && working && working->isAnimate()) {
+ SkAnimateBase* animator = (SkAnimateBase*) working;
+ if (animator->isDynamic()) {
+ SkDisplayDepend* depend = (SkDisplayDepend* ) displayable;
+ depend->addDependent(working);
+ }
+ }
+ }
+ return true;
+}
+
+bool SkAnimatorScript::EvalNamedColor(const char* token, size_t len, void* callback, SkScriptValue* value) {
+ value->fType = SkType_Int;
+ if (SkParse::FindNamedColor(token, len, (SkColor*) &value->fOperand.fS32) != nullptr)
+ return true;
+ return false;
+}
+
+bool SkAnimatorScript::EvalRGB(const char* function, size_t len, SkTDArray<SkScriptValue>& params,
+ void* eng, SkScriptValue* value) {
+ if (SK_LITERAL_STR_EQUAL("rgb", function, len) == false)
+ return false;
+ if (params.count() != 3)
+ return false;
+ SkScriptEngine* engine = (SkScriptEngine*) eng;
+ unsigned result = 0xFF000000;
+ int shift = 16;
+ for (SkScriptValue* valuePtr = params.begin(); valuePtr < params.end(); valuePtr++) {
+ engine->convertTo(SkType_Int, valuePtr);
+ result |= SkClampMax(valuePtr->fOperand.fS32, 255) << shift;
+ shift -= 8;
+ }
+ value->fOperand.fS32 = result;
+ value->fType = SkType_Int;
+ return true;
+}
+
+bool SkAnimatorScript::EvalMemberCommon(SkScriptEngine* engine, const SkMemberInfo* info,
+ SkDisplayable* displayable, SkScriptValue* value) {
+ SkDisplayTypes original;
+ SkDisplayTypes type = original = (SkDisplayTypes) info->getType();
+ if (info->fType == SkType_Array)
+ type = SkType_Array;
+ switch (type) {
+ case SkType_ARGB:
+ type = SkType_Int;
+ case SkType_Boolean:
+ case SkType_Int:
+ case SkType_MSec:
+ case SkType_Float:
+ SkASSERT(info->getCount() == 1);
+ if (info->fType != SkType_MemberProperty && info->fType != SkType_MemberFunction)
+ value->fOperand.fS32 = *(int32_t*) info->memberData(displayable); // OK for SkScalar too
+ if (type == SkType_MSec) {
+ value->fOperand.fScalar = value->fOperand.fS32 * 0.001f;
+ type = SkType_Float;
+ }
+ break;
+ case SkType_String: {
+ SkString* displayableString;
+ if (info->fType != SkType_MemberProperty && info->fType != SkType_MemberFunction) {
+ info->getString(displayable, &displayableString);
+ value->fOperand.fString = new SkString(*displayableString);
+ }
+ } break;
+ case SkType_Array: {
+ SkASSERT(info->fType != SkType_MemberProperty); // !!! incomplete
+ SkTDOperandArray* displayableArray = (SkTDOperandArray*) info->memberData(displayable);
+ if (displayable->getType() == SkType_Array) {
+ SkDisplayArray* typedArray = (SkDisplayArray*) displayable;
+ original = typedArray->values.getType();
+ }
+ SkASSERT(original != SkType_Unknown);
+ SkTypedArray* array = value->fOperand.fArray = new SkTypedArray(original);
+ engine->track(array);
+ int count = displayableArray->count();
+ if (count > 0) {
+ array->setCount(count);
+ memcpy(array->begin(), displayableArray->begin(), count * sizeof(SkOperand));
+ }
+ } break;
+ default:
+ SkASSERT(0); // unimplemented
+ }
+ value->fType = type;
+ return true;
+}
+
+bool SkAnimatorScript::EvalMember(const char* member, size_t len, void* object, void* eng,
+ SkScriptValue* value) {
+ SkScriptEngine* engine = (SkScriptEngine*) eng;
+ SkDisplayable* displayable = (SkDisplayable*) object;
+ SkString name(member, len);
+ SkDisplayable* named = displayable->contains(name);
+ if (named) {
+ value->fOperand.fDisplayable = named;
+ value->fType = SkType_Displayable;
+ return true;
+ }
+ const SkMemberInfo* info = displayable->getMember(name.c_str());
+ if (info == nullptr)
+ return false;
+ if (info->fType == SkType_MemberProperty) {
+ if (displayable->getProperty(info->propertyIndex(), value) == false) {
+ SkASSERT(0);
+ return false;
+ }
+ }
+ return EvalMemberCommon(engine, info, displayable, value);
+}
+
+bool SkAnimatorScript::EvalMemberFunction(const char* member, size_t len, void* object,
+ SkTDArray<SkScriptValue>& params, void* eng, SkScriptValue* value) {
+ SkScriptEngine* engine = (SkScriptEngine*) eng;
+ SkDisplayable* displayable = (SkDisplayable*) object;
+ SkString name(member, len);
+ const SkMemberInfo* info = displayable->getMember(name.c_str());
+ SkASSERT(info != nullptr); /* !!! error handling unimplemented */
+ if (info->fType != SkType_MemberFunction) {
+ SkASSERT(0);
+ return false;
+ }
+ displayable->executeFunction(displayable, info->functionIndex(), params, info->getType(),
+ value);
+ return EvalMemberCommon(engine, info, displayable, value);
+}
+
+bool SkAnimatorScript::EvaluateDisplayable(SkAnimateMaker& maker, SkDisplayable* displayable, const char* script, SkDisplayable** result) {
+ SkAnimatorScript engine(maker, displayable, SkType_Displayable);
+ SkScriptValue value;
+ bool success = engine.evaluate(script, &value, SkType_Displayable);
+ if (success)
+ *result = value.fOperand.fDisplayable;
+ return success;
+}
+
+bool SkAnimatorScript::EvaluateInt(SkAnimateMaker& maker, SkDisplayable* displayable, const char* script, int32_t* result) {
+ SkAnimatorScript engine(maker, displayable, SkType_Int);
+ SkScriptValue value;
+ bool success = engine.evaluate(script, &value, SkType_Int);
+ if (success)
+ *result = value.fOperand.fS32;
+ return success;
+}
+
+bool SkAnimatorScript::EvaluateFloat(SkAnimateMaker& maker, SkDisplayable* displayable, const char* script, SkScalar* result) {
+ SkAnimatorScript engine(maker, displayable, SkType_Float);
+ SkScriptValue value;
+ bool success = engine.evaluate(script, &value, SkType_Float);
+ if (success)
+ *result = value.fOperand.fScalar;
+ return success;
+}
+
+bool SkAnimatorScript::EvaluateString(SkAnimateMaker& maker, SkDisplayable* displayable, const char* script, SkString* result) {
+ SkAnimatorScript engine(maker, displayable, SkType_String);
+ SkScriptValue value;
+ bool success = engine.evaluate(script, &value, SkType_String);
+ if (success)
+ result->set(*(value.fOperand.fString));
+ return success;
+}
+
+bool SkAnimatorScript::EvaluateString(SkAnimateMaker& maker, SkDisplayable* displayable, SkDisplayable* parent, const char* script, SkString* result) {
+ SkAnimatorScript engine(maker, displayable, SkType_String);
+ engine.fParent = parent;
+ SkScriptValue value;
+ bool success = engine.evaluate(script, &value, SkType_String);
+ if (success)
+ result->set(*(value.fOperand.fString));
+ return success;
+}
+
+const SkDisplayEnumMap& SkAnimatorScript::GetEnumValues(SkDisplayTypes type) {
+ int index = SkTSearch<SkDisplayTypes>(&gEnumMaps[0].fType, gEnumMapCount, type,
+ sizeof(SkDisplayEnumMap));
+ SkASSERT(index >= 0);
+ return gEnumMaps[index];
+}
+
+bool SkAnimatorScript::Infinity(const char* token, size_t len, void* user, SkScriptValue* value) {
+ if (SK_LITERAL_STR_EQUAL("Infinity", token, len) == false)
+ return false;
+ value->fType = SkType_Float;
+ value->fOperand.fScalar = SK_ScalarInfinity;
+ return true;
+}
+
+bool SkAnimatorScript::IsFinite(const char* function, size_t len, SkTDArray<SkScriptValue>& params,
+ void* eng, SkScriptValue* value) {
+ if (SK_LITERAL_STR_EQUAL(function, "isFinite", len) == false)
+ return false;
+ if (params.count() != 1)
+ return false;
+ SkScriptValue* scriptValue = params.begin();
+ SkDisplayTypes type = scriptValue->fType;
+ SkScalar scalar = scriptValue->fOperand.fScalar;
+ value->fType = SkType_Int;
+ value->fOperand.fS32 = type == SkType_Float ? SkScalarIsNaN(scalar) == false &&
+ SkScalarAbs(scalar) != SK_ScalarInfinity : type == SkType_Int;
+ return true;
+}
+
+bool SkAnimatorScript::IsNaN(const char* function, size_t len, SkTDArray<SkScriptValue>& params,
+ void* eng, SkScriptValue* value) {
+ if (SK_LITERAL_STR_EQUAL("isNaN", function, len) == false)
+ return false;
+ if (params.count() != 1)
+ return false;
+ SkScriptValue* scriptValue = params.begin();
+ value->fType = SkType_Int;
+ value->fOperand.fS32 = scriptValue->fType == SkType_Float ? SkScalarIsNaN(scriptValue->fOperand.fScalar) : 0;
+ return true;
+}
+
+bool SkAnimatorScript::MapEnums(const char* ptr, const char* match, size_t len, int* value) {
+ int index = 0;
+ bool more = true;
+ do {
+ const char* last = strchr(ptr, '|');
+ if (last == nullptr) {
+ last = &ptr[strlen(ptr)];
+ more = false;
+ }
+ size_t length = last - ptr;
+ if (len == length && strncmp(ptr, match, length) == 0) {
+ *value = index;
+ return true;
+ }
+ index++;
+ ptr = last + 1;
+ } while (more);
+ return false;
+}
+
+bool SkAnimatorScript::NaN(const char* token, size_t len, void* user, SkScriptValue* value) {
+ if (SK_LITERAL_STR_EQUAL("NaN", token, len) == false)
+ return false;
+ value->fType = SkType_Float;
+ value->fOperand.fScalar = SK_ScalarNaN;
+ return true;
+}
+
+#if 0
+bool SkAnimatorScript::ObjectToString(void* object, void* user, SkScriptValue* value) {
+ SkTDict<SkDisplayable*>* ids = (SkTDict<SkDisplayable*>*) user;
+ SkDisplayable* displayable = (SkDisplayable*) object;
+ const char* key;
+ bool success = ids->findKey(displayable, &key);
+ if (success == false)
+ return false;
+ value->fOperand.fString = new SkString(key);
+ value->fType = SkType_String;
+ return true;
+}
+#endif
+
+bool SkAnimatorScript::Unbox(void* m, SkScriptValue* scriptValue) {
+ SkAnimateMaker* maker = (SkAnimateMaker*) m;
+ SkASSERT((unsigned) scriptValue->fType == (unsigned) SkType_Displayable);
+ SkDisplayable* displayable = (SkDisplayable*) scriptValue->fOperand.fObject;
+ SkDisplayTypes type = displayable->getType();
+ switch (displayable->getType()) {
+ case SkType_Array: {
+ SkDisplayArray* boxedValue = (SkDisplayArray*) displayable;
+ scriptValue->fOperand.fArray = &boxedValue->values;
+ } break;
+ case SkType_Boolean: {
+ SkDisplayBoolean* boxedValue = (SkDisplayBoolean*) displayable;
+ scriptValue->fOperand.fS32 = boxedValue->value;
+ } break;
+ case SkType_Int: {
+ SkDisplayInt* boxedValue = (SkDisplayInt*) displayable;
+ scriptValue->fOperand.fS32 = boxedValue->value;
+ } break;
+ case SkType_Float: {
+ SkDisplayFloat* boxedValue = (SkDisplayFloat*) displayable;
+ scriptValue->fOperand.fScalar = boxedValue->value;
+ } break;
+ case SkType_String: {
+ SkDisplayString* boxedValue = (SkDisplayString*) displayable;
+ scriptValue->fOperand.fString = new SkString(boxedValue->value);
+ } break;
+ default: {
+ const char* id = nullptr;
+ SkDEBUGCODE(bool success = ) maker->findKey(displayable, &id);
+ SkASSERT(success);
+ scriptValue->fOperand.fString = new SkString(id);
+ type = SkType_String;
+ }
+ }
+ scriptValue->fType = type;
+ return true;
+}
+
+#if defined SK_SUPPORT_UNITTEST
+
+#include "SkAnimator.h"
+
+static const char scriptTestSetup[] =
+"<screenplay>\n"
+ "<text id='label' text='defg'/>\n"
+ "<add id='addLabel' use='label'/>\n"
+ "<text id='text1' text='test'/>\n"
+ "<apply scope='addLabel'>\n"
+ "<set target='label' field='text' to='#script:text1.text'/>\n"
+ "</apply>\n"
+ "<apply>\n"
+ "<paint id='labelPaint'>\n"
+ "<emboss id='emboss' direction='[1,1,1]' />\n"
+ "</paint>\n"
+ "<animate id='animation' field='direction' target='emboss' from='[1,1,1]' to='[-1,1,1]' dur='1'/>\n"
+ "<set lval='direction[0]' target='emboss' to='-1' />\n"
+ "</apply>\n"
+ "<color id='testColor' color='0 ? rgb(0,0,0) : rgb(255,255,255)' />\n"
+ "<color id='xColor' color='rgb(12,34,56)' />\n"
+ "<array id='emptyArray' />\n"
+ "<array id='intArray' values='[1, 4, 6]' />\n"
+ "<int id='idx' value='2' />\n"
+ "<int id='idy' value='2' />\n"
+ "<string id='alpha' value='abc' />\n"
+ "<rect id='testRect' left='Math.cos(0)' top='2' right='12' bottom='5' />\n"
+ "<event id='evt'>\n"
+ "<input name='x' />\n"
+ "<apply scope='idy'>\n"
+ "<set field='value' to='evt.x.int' />\n"
+ "</apply>\n"
+ "</event>\n"
+"</screenplay>";
+
+#define DEFAULT_ANSWER , 0
+
+static const SkScriptNAnswer scriptTests[] = {
+ { "label.text.length == 4", SkType_Int, 1 DEFAULT_ANSWER DEFAULT_ANSWER },
+// { "labelPaint.measureText(label.text) > 0 ? labelPaint.measureText(label.text)+10 : 40", SkType_Float, 0, SkIntToScalar(0x23) },
+ { "Number.POSITIVE_INFINITY >= Number.MAX_VALUE ? 1 : 0", SkType_Int, 1 DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "Infinity >= Number.MAX_VALUE ? 1 : 0", SkType_Int, 1 DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "Number.NEGATIVE_INFINITY <= -Number.MAX_VALUE ? 1 : 0", SkType_Int, 1 DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "Number.MIN_VALUE > 0 ? 1 : 0", SkType_Int, 1 DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "isNaN(Number.NaN)", SkType_Int, 1 DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "isNaN(NaN)", SkType_Int, 1 DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "Math.sin(0)", SkType_Float, 0, SkIntToScalar(0) DEFAULT_ANSWER },
+ { "alpha+alpha", SkType_String, 0, 0, "abcabc" },
+ { "intArray[4]", SkType_Unknown DEFAULT_ANSWER DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "emptyArray[4]", SkType_Unknown DEFAULT_ANSWER DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "idx", SkType_Int, 2 DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "intArray.length", SkType_Int, 3 DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "intArray.values[0]", SkType_Int, 1 DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "intArray[0]", SkType_Int, 1 DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "idx.value", SkType_Int, 2 DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "alpha.value", SkType_String, 0, 0, "abc" },
+ { "alpha", SkType_String, 0, 0, "abc" },
+ { "alpha.value+alpha.value", SkType_String, 0, 0, "abcabc" },
+ { "alpha+idx", SkType_String, 0, 0, "abc2" },
+ { "idx+alpha", SkType_String, 0, 0, "2abc" },
+ { "intArray[idx]", SkType_Int, 6 DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "alpha.slice(1,2)", SkType_String, 0, 0, "b" },
+ { "alpha.value.slice(1,2)", SkType_String, 0, 0, "b" },
+ { "testRect.left+2", SkType_Float, 0, SkIntToScalar(3) DEFAULT_ANSWER },
+ { "0 ? Math.sin(0) : 1", SkType_Int, 1 DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "0 ? intArray[0] : 1", SkType_Int, 1 DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "0 ? intArray.values[0] : 1", SkType_Int, 1 DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "0 ? idx : 1", SkType_Int, 1 DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "0 ? idx.value : 1", SkType_Int, 1 DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "0 ? alpha.slice(1,2) : 1", SkType_Int, 1 DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "0 ? alpha.value.slice(1,2) : 1", SkType_Int, 1 DEFAULT_ANSWER DEFAULT_ANSWER },
+ { "idy", SkType_Int, 3 DEFAULT_ANSWER DEFAULT_ANSWER }
+};
+
+#define SkScriptNAnswer_testCount SK_ARRAY_COUNT(scriptTests)
+
+void SkAnimatorScript::UnitTest() {
+#if defined(SK_SUPPORT_UNITTEST)
+ SkAnimator animator;
+ SkASSERT(animator.decodeMemory(scriptTestSetup, sizeof(scriptTestSetup)-1));
+ SkEvent evt;
+ evt.setString("id", "evt");
+ evt.setS32("x", 3);
+ animator.doUserEvent(evt);
+ // set up animator with memory script above, then run value tests
+ for (unsigned index = 0; index < SkScriptNAnswer_testCount; index++) {
+ SkAnimatorScript engine(*animator.fMaker, nullptr, scriptTests[index].fType);
+ SkScriptValue value;
+ const char* script = scriptTests[index].fScript;
+ bool success = engine.evaluateScript(&script, &value);
+ if (success == false) {
+ SkDebugf("script failed: %s\n", scriptTests[index].fScript);
+ SkASSERT(scriptTests[index].fType == SkType_Unknown);
+ continue;
+ }
+ SkASSERT(value.fType == scriptTests[index].fType);
+ SkScalar error;
+ switch (value.fType) {
+ case SkType_Int:
+ SkASSERT(value.fOperand.fS32 == scriptTests[index].fIntAnswer);
+ break;
+ case SkType_Float:
+ error = SkScalarAbs(value.fOperand.fScalar - scriptTests[index].fScalarAnswer);
+ SkASSERT(error < SK_Scalar1 / 10000);
+ break;
+ case SkType_String:
+ SkASSERT(strcmp(value.fOperand.fString->c_str(), scriptTests[index].fStringAnswer) == 0);
+ break;
+ default:
+ SkASSERT(0);
+ }
+ }
+#endif
+}
+
+#endif
diff --git a/gfx/skia/skia/src/animator/SkAnimatorScript.h b/gfx/skia/skia/src/animator/SkAnimatorScript.h
new file mode 100644
index 000000000..8589388e1
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkAnimatorScript.h
@@ -0,0 +1,75 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkAnimatorScript_DEFINED
+#define SkAnimatorScript_DEFINED
+
+#include "SkDisplayable.h"
+#include "SkScript.h"
+#include "SkTypedArray.h"
+
+class SkAnimateMaker;
+struct SkMemberInfo;
+
+struct SkDisplayEnumMap {
+ SkDisplayTypes fType;
+ const char* fValues;
+};
+
+class SkAnimatorScript : public SkScriptEngine {
+public:
+ SkAnimatorScript(SkAnimateMaker& , SkDisplayable* , SkDisplayTypes type);
+ ~SkAnimatorScript();
+ bool evaluate(const char* script, SkScriptValue* , SkDisplayTypes type);
+ void track(SkDisplayable* displayable) {
+ SkASSERT(fTrackDisplayable.find(displayable) < 0);
+ *fTrackDisplayable.append() = displayable; }
+ static bool EvaluateDisplayable(SkAnimateMaker& , SkDisplayable* , const char* script, SkDisplayable** );
+ static bool EvaluateFloat(SkAnimateMaker& , SkDisplayable* , const char* script, SkScalar* );
+ static bool EvaluateInt(SkAnimateMaker& , SkDisplayable* , const char* script, int32_t* );
+ static bool EvaluateString(SkAnimateMaker& , SkDisplayable* , const char* script, SkString* );
+ static bool EvaluateString(SkAnimateMaker& , SkDisplayable* , SkDisplayable* parent, const char* script, SkString* );
+ static bool MapEnums(const char* ptr, const char* match, size_t len, int* value);
+protected:
+ static bool Box(void* user, SkScriptValue* );
+ static bool Eval(const char* function, size_t len, SkTDArray<SkScriptValue>& params,
+ void* callBack, SkScriptValue* );
+ static bool EvalEnum(const char* token, size_t len, void* callBack, SkScriptValue* );
+ static bool EvalID(const char* token, size_t len, void* callBack, SkScriptValue* );
+ static bool EvalMember(const char* member, size_t len, void* object, void* eng,
+ SkScriptValue* value);
+ static bool EvalMemberCommon(SkScriptEngine* , const SkMemberInfo* info,
+ SkDisplayable* displayable, SkScriptValue* value);
+ static bool EvalMemberFunction(const char* member, size_t len, void* object,
+ SkTDArray<SkScriptValue>& params, void* user, SkScriptValue* value);
+ static bool EvalNamedColor(const char* token, size_t len, void* callBack, SkScriptValue* );
+ static bool EvalRGB(const char* function, size_t len, SkTDArray<SkScriptValue>& params,
+ void* callBack, SkScriptValue* );
+ static const SkDisplayEnumMap& GetEnumValues(SkDisplayTypes type);
+ static bool Infinity(const char* token, size_t len, void* callBack, SkScriptValue* );
+ static bool IsFinite(const char* function, size_t len, SkTDArray<SkScriptValue>& params,
+ void* callBack, SkScriptValue* );
+ static bool IsNaN(const char* function, size_t len, SkTDArray<SkScriptValue>& params,
+ void* callBack, SkScriptValue* );
+ static bool NaN(const char* token, size_t len, void* callBack, SkScriptValue* );
+ static bool Unbox(void* , SkScriptValue* scriptValue);
+ SkTDDisplayableArray fTrackDisplayable;
+ SkAnimateMaker& fMaker;
+ SkDisplayable* fParent;
+ SkDisplayable* fWorking;
+private:
+ friend class SkDump;
+ friend struct SkScriptNAnswer;
+#ifdef SK_SUPPORT_UNITTEST
+public:
+ static void UnitTest();
+#endif
+};
+
+#endif // SkAnimatorScript_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkAnimatorScript2.cpp b/gfx/skia/skia/src/animator/SkAnimatorScript2.cpp
new file mode 100644
index 000000000..d246130fb
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkAnimatorScript2.cpp
@@ -0,0 +1,622 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkAnimatorScript2.h"
+#include "SkAnimateBase.h"
+#include "SkAnimateMaker.h"
+#include "SkDisplayTypes.h"
+#include "SkExtras.h"
+#include "SkMemberInfo.h"
+#include "SkOpArray.h"
+#include "SkParse.h"
+#include "SkScript2.h"
+#include "SkScriptCallBack.h"
+
+static const SkDisplayEnumMap gEnumMaps[] = {
+ { SkType_AddMode, "indirect|immediate" },
+ { SkType_Align, "left|center|right" },
+ { SkType_ApplyMode, "immediate|once" },
+ { SkType_ApplyTransition, "reverse" },
+ { SkType_BitmapEncoding, "jpeg|png" },
+ { SkType_BitmapFormat, "none|A1|A8|Index8|RGB16|RGB32" },
+ { SkType_Boolean, "false|true" },
+ { SkType_Cap, "butt|round|square" },
+ { SkType_EventCode, "none|up|down|left|right|back|end|OK|send|leftSoftKey|rightSoftKey|key0|key1|key2|key3|key4|key5|key6|key7|key8|key9|star|hash" },
+ { SkType_EventKind, "none|keyChar|keyPress|mouseDown|mouseDrag|mouseMove|mouseUp|onEnd|onLoad|user" },
+ { SkType_EventMode, "deferred|immediate" },
+ { SkType_FillType, "winding|evenOdd" },
+ { SkType_FilterType, "none|bilinear" },
+ { SkType_FromPathMode, "normal|angle|position" },
+ { SkType_Join, "miter|round|blunt" },
+ { SkType_MaskFilterBlurStyle, "normal|solid|outer|inner" },
+ { SkType_PathDirection, "cw|ccw" },
+ { SkType_Style, "fill|stroke|strokeAndFill" },
+ { SkType_TextBoxAlign, "start|center|end" },
+ { SkType_TextBoxMode, "oneLine|lineBreak" },
+ { SkType_TileMode, "clamp|repeat|mirror" },
+ { SkType_Xfermode, "clear|src|dst|srcOver|dstOver|srcIn|dstIn|srcOut|dstOut|"
+ "srcATop|dstATop|xor|darken|lighten" },
+};
+
+static int gEnumMapCount = SK_ARRAY_COUNT(gEnumMaps);
+
+
+class SkAnimatorScript_Box : public SkScriptCallBackConvert {
+public:
+ SkAnimatorScript_Box() {}
+
+ ~SkAnimatorScript_Box() {
+ for (SkDisplayable** dispPtr = fTrackDisplayable.begin(); dispPtr < fTrackDisplayable.end(); dispPtr++)
+ delete *dispPtr;
+ }
+
+ virtual bool convert(SkOperand2::OpType type, SkOperand2* operand) {
+ SkDisplayable* displayable;
+ switch (type) {
+ case SkOperand2::kArray: {
+ SkDisplayArray* boxedValue = new SkDisplayArray(*operand->fArray);
+ displayable = boxedValue;
+ } break;
+ case SkOperand2::kS32: {
+ SkDisplayInt* boxedValue = new SkDisplayInt;
+ displayable = boxedValue;
+ boxedValue->value = operand->fS32;
+ } break;
+ case SkOperand2::kScalar: {
+ SkDisplayFloat* boxedValue = new SkDisplayFloat;
+ displayable = boxedValue;
+ boxedValue->value = operand->fScalar;
+ } break;
+ case SkOperand2::kString: {
+ SkDisplayString* boxedValue = new SkDisplayString(*operand->fString);
+ displayable = boxedValue;
+ } break;
+ case SkOperand2::kObject:
+ return true;
+ default:
+ SkASSERT(0);
+ return false;
+ }
+ track(displayable);
+ operand->fObject = (void*) displayable;
+ return true;
+ }
+
+ virtual SkOperand2::OpType getReturnType(int index) {
+ return SkOperand2::kObject;
+ }
+
+ virtual Type getType() const {
+ return kBox;
+ }
+
+ void track(SkDisplayable* displayable) {
+ SkASSERT(fTrackDisplayable.find(displayable) < 0);
+ *fTrackDisplayable.append() = displayable;
+ }
+
+ SkTDDisplayableArray fTrackDisplayable;
+};
+
+
+class SkAnimatorScript_Enum : public SkScriptCallBackProperty {
+public:
+ SkAnimatorScript_Enum(const char* tokens) : fTokens(tokens) {}
+
+ virtual bool getConstValue(const char* name, int len, SkOperand2* value) {
+ return SkAnimatorScript2::MapEnums(fTokens, name, len, &value->fS32);
+ }
+
+private:
+ const char* fTokens;
+};
+
+ // !!! if type is string, call invoke
+ // if any other type, return original value
+ // distinction is undone: could do this by returning index == 0 only if param is string
+ // still, caller of getParamTypes will attempt to convert param to string (I guess)
+class SkAnimatorScript_Eval : public SkScriptCallBackFunction {
+public:
+ SkAnimatorScript_Eval(SkAnimatorScript2* engine) : fEngine(engine) {}
+
+ virtual bool getIndex(const char* name, int len, size_t* result) {
+ if (SK_LITERAL_STR_EQUAL("eval", name, len) != 0)
+ return false;
+ *result = 0;
+ return true;
+ }
+
+ virtual void getParamTypes(SkIntArray(SkOperand2::OpType)* types) {
+ types->setCount(1);
+ SkOperand2::OpType* type = types->begin();
+ type[0] = SkOperand2::kString;
+ }
+
+ virtual bool invoke(size_t index, SkOpArray* params, SkOperand2* answer) {
+ SkAnimatorScript2 engine(fEngine->getMaker(), fEngine->getWorking(),
+ SkAnimatorScript2::ToDisplayType(fEngine->getReturnType()));
+ SkOperand2* op = params->begin();
+ const char* script = op->fString->c_str();
+ SkScriptValue2 value;
+ return engine.evaluateScript(&script, &value);
+ SkASSERT(value.fType == fEngine->getReturnType());
+ *answer = value.fOperand;
+ // !!! incomplete ?
+ return true;
+ }
+
+private:
+ SkAnimatorScript2* fEngine;
+};
+
+class SkAnimatorScript_ID : public SkScriptCallBackProperty {
+public:
+ SkAnimatorScript_ID(SkAnimatorScript2* engine) : fEngine(engine) {}
+
+ virtual bool getIndex(const char* token, int len, size_t* result) {
+ SkDisplayable* displayable;
+ bool success = fEngine->getMaker().find(token, len, &displayable);
+ if (success == false) {
+ *result = 0;
+ } else {
+ *result = (size_t) displayable;
+ SkDisplayable* working = fEngine->getWorking();
+ if (displayable->canContainDependents() && working && working->isAnimate()) {
+ SkAnimateBase* animator = (SkAnimateBase*) working;
+ if (animator->isDynamic()) {
+ SkDisplayDepend* depend = (SkDisplayDepend* ) displayable;
+ depend->addDependent(working);
+ }
+ }
+ }
+ return true;
+ }
+
+ virtual bool getResult(size_t ref, SkOperand2* answer) {
+ answer->fObject = (void*) ref;
+ return true;
+ }
+
+ virtual SkOperand2::OpType getReturnType(size_t index) {
+ return index == 0 ? SkOperand2::kString : SkOperand2::kObject;
+ }
+
+private:
+ SkAnimatorScript2* fEngine;
+};
+
+
+class SkAnimatorScript_Member : public SkScriptCallBackMember {
+public:
+
+ SkAnimatorScript_Member(SkAnimatorScript2* engine) : fEngine(engine) {}
+
+ bool getMemberReference(const char* member, size_t len, void* object, SkScriptValue2* ref) {
+ SkDisplayable* displayable = (SkDisplayable*) object;
+ SkString name(member, len);
+ SkDisplayable* named = displayable->contains(name);
+ if (named) {
+ ref->fType = SkOperand2::kObject;
+ ref->fOperand.fObject = named;
+ return true;
+ }
+ const SkMemberInfo* info = displayable->getMember(name.c_str());
+ if (info == nullptr)
+ return false; // !!! add additional error info?
+ ref->fType = SkAnimatorScript2::ToOpType(info->getType());
+ ref->fOperand.fObject = (void*) info;
+ return true;
+ }
+
+ bool invoke(size_t ref, void* object, SkOperand2* value) {
+ const SkMemberInfo* info = (const SkMemberInfo* ) ref;
+ SkDisplayable* displayable = (SkDisplayable*) object;
+ if (info->fType == SkType_MemberProperty) {
+ if (displayable->getProperty2(info->propertyIndex(), value) == false) {
+ return false;
+ }
+ }
+ return fEngine->evalMemberCommon(info, displayable, value);
+ }
+
+ SkAnimatorScript2* fEngine;
+};
+
+
+class SkAnimatorScript_MemberFunction : public SkScriptCallBackMemberFunction {
+public:
+ SkAnimatorScript_MemberFunction(SkAnimatorScript2* engine) : fEngine(engine) {}
+
+ bool getMemberReference(const char* member, size_t len, void* object, SkScriptValue2* ref) {
+ SkDisplayable* displayable = (SkDisplayable*) object;
+ SkString name(member, len);
+ const SkMemberInfo* info = displayable->getMember(name.c_str());
+ if (info == nullptr || info->fType != SkType_MemberFunction)
+ return false; // !!! add additional error info?
+ ref->fType = SkAnimatorScript2::ToOpType(info->getType());
+ ref->fOperand.fObject = (void*) info;
+ return true;
+ }
+
+ virtual void getParamTypes(SkIntArray(SkOperand2::OpType)* types) {
+ types->setCount(3);
+ SkOperand2::OpType* type = types->begin();
+ type[0] = type[1] = type[2] = SkOperand2::kS32;
+ }
+
+ bool invoke(size_t ref, void* object, SkOpArray* params, SkOperand2* value)
+ {
+ const SkMemberInfo* info = (const SkMemberInfo* ) ref;
+ SkDisplayable* displayable = (SkDisplayable*) object;
+ displayable->executeFunction2(displayable, info->functionIndex(), params, info->getType(),
+ value);
+ return fEngine->evalMemberCommon(info, displayable, value);
+ }
+
+ SkAnimatorScript2* fEngine;
+};
+
+
+class SkAnimatorScript_NamedColor : public SkScriptCallBackProperty {
+public:
+ virtual bool getConstValue(const char* name, int len, SkOperand2* value) {
+ return SkParse::FindNamedColor(name, len, (SkColor*) &value->fS32) != nullptr;
+ }
+};
+
+
+class SkAnimatorScript_RGB : public SkScriptCallBackFunction {
+public:
+ virtual bool getIndex(const char* name, int len, size_t* result) {
+ if (SK_LITERAL_STR_EQUAL("rgb", name, len) != 0)
+ return false;
+ *result = 0;
+ return true;
+ }
+
+ virtual void getParamTypes(SkIntArray(SkOperand2::OpType)* types) {
+ types->setCount(3);
+ SkOperand2::OpType* type = types->begin();
+ type[0] = type[1] = type[2] = SkOperand2::kS32;
+ }
+
+ virtual bool invoke(size_t index, SkOpArray* params, SkOperand2* answer) {
+ SkASSERT(index == 0);
+ unsigned result = 0xFF000000;
+ int shift = 16;
+ for (int index = 0; index < 3; index++) {
+ result |= SkClampMax(params->begin()[index].fS32, 255) << shift;
+ shift -= 8;
+ }
+ answer->fS32 = result;
+ return true;
+ }
+
+};
+
+
+class SkAnimatorScript_Unbox : public SkScriptCallBackConvert {
+public:
+ SkAnimatorScript_Unbox(SkAnimatorScript2* engine) : fEngine(engine) {}
+
+ virtual bool convert(SkOperand2::OpType type, SkOperand2* operand) {
+ SkASSERT(type == SkOperand2::kObject);
+ SkDisplayable* displayable = (SkDisplayable*) operand->fObject;
+ switch (displayable->getType()) {
+ case SkType_Array: {
+ SkDisplayArray* boxedValue = (SkDisplayArray*) displayable;
+ operand->fArray = new SkOpArray(SkAnimatorScript2::ToOpType(boxedValue->values.getType()));
+ int count = boxedValue->values.count();
+ operand->fArray->setCount(count);
+ memcpy(operand->fArray->begin(), boxedValue->values.begin(), count * sizeof(SkOperand2));
+ fEngine->track(operand->fArray);
+ } break;
+ case SkType_Boolean: {
+ SkDisplayBoolean* boxedValue = (SkDisplayBoolean*) displayable;
+ operand->fS32 = boxedValue->value;
+ } break;
+ case SkType_Int: {
+ SkDisplayInt* boxedValue = (SkDisplayInt*) displayable;
+ operand->fS32 = boxedValue->value;
+ } break;
+ case SkType_Float: {
+ SkDisplayFloat* boxedValue = (SkDisplayFloat*) displayable;
+ operand->fScalar = boxedValue->value;
+ } break;
+ case SkType_String: {
+ SkDisplayString* boxedValue = (SkDisplayString*) displayable;
+ operand->fString = new SkString(boxedValue->value);
+ } break;
+ default: {
+ const char* id;
+ bool success = fEngine->getMaker().findKey(displayable, &id);
+ SkASSERT(success);
+ operand->fString = new SkString(id);
+ }
+ }
+ return true;
+ }
+
+ virtual SkOperand2::OpType getReturnType(int /*index*/, SkOperand2* operand) {
+ SkDisplayable* displayable = (SkDisplayable*) operand->fObject;
+ switch (displayable->getType()) {
+ case SkType_Array:
+ return SkOperand2::kArray;
+ case SkType_Int:
+ return SkOperand2::kS32;
+ case SkType_Float:
+ return SkOperand2::kScalar;
+ case SkType_String:
+ default:
+ return SkOperand2::kString;
+ }
+ }
+
+ virtual Type getType() const {
+ return kUnbox;
+ }
+
+ SkAnimatorScript2* fEngine;
+};
+
+SkAnimatorScript2::SkAnimatorScript2(SkAnimateMaker& maker, SkDisplayable* working, SkDisplayTypes type) :
+ SkScriptEngine2(ToOpType(type)), fMaker(maker), fWorking(working) {
+ *fCallBackArray.append() = new SkAnimatorScript_Member(this);
+ *fCallBackArray.append() = new SkAnimatorScript_MemberFunction(this);
+ *fCallBackArray.append() = new SkAnimatorScript_Box();
+ *fCallBackArray.append() = new SkAnimatorScript_Unbox(this);
+ *fCallBackArray.append() = new SkAnimatorScript_ID(this);
+ if (type == SkType_ARGB) {
+ *fCallBackArray.append() = new SkAnimatorScript_RGB();
+ *fCallBackArray.append() = new SkAnimatorScript_NamedColor();
+ }
+ if (SkDisplayType::IsEnum(&maker, type)) {
+ // !!! for SpiderMonkey, iterate through the enum values, and map them to globals
+ const SkDisplayEnumMap& map = GetEnumValues(type);
+ *fCallBackArray.append() = new SkAnimatorScript_Enum(map.fValues);
+ }
+ *fCallBackArray.append() = new SkAnimatorScript_Eval(this);
+#if 0 // !!! no extra support for now
+ for (SkExtras** extraPtr = maker.fExtras.begin(); extraPtr < maker.fExtras.end(); extraPtr++) {
+ SkExtras* extra = *extraPtr;
+ if (extra->fExtraCallBack)
+ *fCallBackArray.append() = new propertyCallBack(extra->fExtraCallBack, extra->fExtraStorage);
+ }
+#endif
+}
+
+SkAnimatorScript2::~SkAnimatorScript2() {
+ SkScriptCallBack** end = fCallBackArray.end();
+ for (SkScriptCallBack** ptr = fCallBackArray.begin(); ptr < end; ptr++)
+ delete *ptr;
+}
+
+bool SkAnimatorScript2::evalMemberCommon(const SkMemberInfo* info,
+ SkDisplayable* displayable, SkOperand2* value) {
+ SkDisplayTypes original;
+ SkDisplayTypes type = original = (SkDisplayTypes) info->getType();
+ if (info->fType == SkType_Array)
+ type = SkType_Array;
+ switch (type) {
+ case SkType_ARGB:
+ type = SkType_Int;
+ case SkType_Boolean:
+ case SkType_Int:
+ case SkType_MSec:
+ case SkType_Float:
+ SkASSERT(info->getCount() == 1);
+ if (info->fType != SkType_MemberProperty && info->fType != SkType_MemberFunction)
+ value->fS32 = *(int32_t*) info->memberData(displayable); // OK for SkScalar too
+ if (type == SkType_MSec) {
+ value->fScalar = value->fS32 * 0.001f;
+ type = SkType_Float;
+ }
+ break;
+ case SkType_String: {
+ SkString* displayableString;
+ if (info->fType != SkType_MemberProperty && info->fType != SkType_MemberFunction) {
+ info->getString(displayable, &displayableString);
+ value->fString = new SkString(*displayableString);
+ }
+ } break;
+ case SkType_Array: {
+ SkASSERT(info->fType != SkType_MemberProperty); // !!! incomplete
+ SkTDOperandArray* displayableArray = (SkTDOperandArray*) info->memberData(displayable);
+ if (displayable->getType() == SkType_Array) {
+ SkDisplayArray* typedArray = (SkDisplayArray*) displayable;
+ original = typedArray->values.getType();
+ }
+ SkASSERT(original != SkType_Unknown);
+ SkOpArray* array = value->fArray = new SkOpArray(ToOpType(original));
+ track(array);
+ int count = displayableArray->count();
+ if (count > 0) {
+ array->setCount(count);
+ memcpy(array->begin(), displayableArray->begin(), count * sizeof(SkOperand2));
+ }
+ } break;
+ default:
+ SkASSERT(0); // unimplemented
+ }
+ return true;
+}
+
+const SkDisplayEnumMap& SkAnimatorScript2::GetEnumValues(SkDisplayTypes type) {
+ int index = SkTSearch<SkDisplayTypes>(&gEnumMaps[0].fType, gEnumMapCount, type,
+ sizeof(SkDisplayEnumMap));
+ SkASSERT(index >= 0);
+ return gEnumMaps[index];
+}
+
+SkDisplayTypes SkAnimatorScript2::ToDisplayType(SkOperand2::OpType type) {
+ int val = type;
+ switch (val) {
+ case SkOperand2::kNoType:
+ return SkType_Unknown;
+ case SkOperand2::kS32:
+ return SkType_Int;
+ case SkOperand2::kScalar:
+ return SkType_Float;
+ case SkOperand2::kString:
+ return SkType_String;
+ case SkOperand2::kArray:
+ return SkType_Array;
+ case SkOperand2::kObject:
+ return SkType_Displayable;
+ default:
+ SkASSERT(0);
+ return SkType_Unknown;
+ }
+}
+
+SkOperand2::OpType SkAnimatorScript2::ToOpType(SkDisplayTypes type) {
+ if (SkDisplayType::IsDisplayable(nullptr /* fMaker */, type))
+ return SkOperand2::kObject;
+ if (SkDisplayType::IsEnum(nullptr /* fMaker */, type))
+ return SkOperand2::kS32;
+ switch (type) {
+ case SkType_ARGB:
+ case SkType_MSec:
+ case SkType_Int:
+ return SkOperand2::kS32;
+ case SkType_Float:
+ case SkType_Point:
+ case SkType_3D_Point:
+ return SkOperand2::kScalar;
+ case SkType_Base64:
+ case SkType_DynamicString:
+ case SkType_String:
+ return SkOperand2::kString;
+ case SkType_Array:
+ return SkOperand2::kArray;
+ case SkType_Unknown:
+ return SkOperand2::kNoType;
+ default:
+ SkASSERT(0);
+ return SkOperand2::kNoType;
+ }
+}
+
+bool SkAnimatorScript2::MapEnums(const char* ptr, const char* match, size_t len, int* value) {
+ int index = 0;
+ bool more = true;
+ do {
+ const char* last = strchr(ptr, '|');
+ if (last == nullptr) {
+ last = &ptr[strlen(ptr)];
+ more = false;
+ }
+ size_t length = last - ptr;
+ if (len == length && strncmp(ptr, match, length) == 0) {
+ *value = index;
+ return true;
+ }
+ index++;
+ ptr = last + 1;
+ } while (more);
+ return false;
+}
+
+#if defined SK_DEBUG
+
+#include "SkAnimator.h"
+
+static const char scriptTestSetup[] =
+"<screenplay>"
+ "<apply>"
+ "<paint>"
+ "<emboss id='emboss' direction='[1,1,1]' />"
+ "</paint>"
+ "<animateField id='animation' field='direction' target='emboss' from='[1,1,1]' to='[-1,1,1]' dur='1'/>"
+ "<set lval='direction[0]' target='emboss' to='-1' />"
+ "</apply>"
+ "<color id='testColor' color='0 ? rgb(0,0,0) : rgb(255,255,255)' />"
+ "<color id='xColor' color='rgb(12,34,56)' />"
+ "<typedArray id='emptyArray' />"
+ "<typedArray id='intArray' values='[1, 4, 6]' />"
+ "<s32 id='idx' value='2' />"
+ "<s32 id='idy' value='2' />"
+ "<string id='alpha' value='abc' />"
+ "<rectangle id='testRect' left='Math.cos(0)' top='2' right='12' bottom='5' />"
+ "<event id='evt'>"
+ "<input name='x' />"
+ "<apply scope='idy'>"
+ "<set field='value' to='evt.x.s32' />"
+ "</apply>"
+ "</event>"
+"</screenplay>";
+
+static const SkScriptNAnswer scriptTests[] = {
+ { "alpha+alpha", SkType_String, 0, 0, "abcabc" },
+ { "0 ? Math.sin(0) : 1", SkType_Int, 1 },
+ { "intArray[4]", SkType_Unknown },
+ { "emptyArray[4]", SkType_Unknown },
+ { "idx", SkType_Int, 2 },
+ { "intArray.length", SkType_Int, 3 },
+ { "intArray.values[0]", SkType_Int, 1 },
+ { "intArray[0]", SkType_Int, 1 },
+ { "idx.value", SkType_Int, 2 },
+ { "alpha.value", SkType_String, 0, 0, "abc" },
+ { "alpha", SkType_String, 0, 0, "abc" },
+ { "alpha.value+alpha.value", SkType_String, 0, 0, "abcabc" },
+ { "alpha+idx", SkType_String, 0, 0, "abc2" },
+ { "idx+alpha", SkType_String, 0, 0, "2abc" },
+ { "intArray[idx]", SkType_Int, 6 },
+ { "alpha.slice(1,2)", SkType_String, 0, 0, "b" },
+ { "alpha.value.slice(1,2)", SkType_String, 0, 0, "b" },
+ { "Math.sin(0)", SkType_Float, 0, SkIntToScalar(0) },
+ { "testRect.left+2", SkType_Float, 0, SkIntToScalar(3) },
+ { "0 ? intArray[0] : 1", SkType_Int, 1 },
+ { "0 ? intArray.values[0] : 1", SkType_Int, 1 },
+ { "0 ? idx : 1", SkType_Int, 1 },
+ { "0 ? idx.value : 1", SkType_Int, 1 },
+ { "0 ? alpha.slice(1,2) : 1", SkType_Int, 1 },
+ { "0 ? alpha.value.slice(1,2) : 1", SkType_Int, 1 },
+ { "idy", SkType_Int, 3 }
+};
+
+#define SkScriptNAnswer_testCount SK_ARRAY_COUNT(scriptTests)
+
+void SkAnimatorScript2::UnitTest() {
+#if defined(SK_SUPPORT_UNITTEST)
+ SkAnimator animator;
+ SkASSERT(animator.decodeMemory(scriptTestSetup, sizeof(scriptTestSetup)-1));
+ SkEvent evt;
+ evt.setString("id", "evt");
+ evt.setS32("x", 3);
+ animator.doUserEvent(evt);
+ // set up animator with memory script above, then run value tests
+ for (int index = 0; index < SkScriptNAnswer_testCount; index++) {
+ SkAnimatorScript2 engine(*animator.fMaker, nullptr, scriptTests[index].fType);
+ SkScriptValue2 value;
+ const char* script = scriptTests[index].fScript;
+ bool success = engine.evaluateScript(&script, &value);
+ if (success == false) {
+ SkASSERT(scriptTests[index].fType == SkType_Unknown);
+ continue;
+ }
+ SkASSERT(value.fType == ToOpType(scriptTests[index].fType));
+ SkScalar error;
+ switch (value.fType) {
+ case SkOperand2::kS32:
+ SkASSERT(value.fOperand.fS32 == scriptTests[index].fIntAnswer);
+ break;
+ case SkOperand2::kScalar:
+ error = SkScalarAbs(value.fOperand.fScalar - scriptTests[index].fScalarAnswer);
+ SkASSERT(error < SK_Scalar1 / 10000);
+ break;
+ case SkOperand2::kString:
+ SkASSERT(value.fOperand.fString->equals(scriptTests[index].fStringAnswer));
+ break;
+ default:
+ SkASSERT(0);
+ }
+ }
+#endif
+}
+
+#endif
diff --git a/gfx/skia/skia/src/animator/SkAnimatorScript2.h b/gfx/skia/skia/src/animator/SkAnimatorScript2.h
new file mode 100644
index 000000000..c3995f6fd
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkAnimatorScript2.h
@@ -0,0 +1,50 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkAnimatorScript2_DEFINED
+#define SkAnimatorScript2_DEFINED
+
+#include "SkDisplayable.h"
+#include "SkScript2.h"
+#include "SkTypedArray.h"
+
+class SkAnimateMaker;
+struct SkMemberInfo;
+
+#ifndef SkAnimatorScript_DEFINED
+struct SkDisplayEnumMap {
+ SkDisplayTypes fType;
+ const char* fValues;
+};
+#endif
+
+class SkAnimatorScript2 : public SkScriptEngine2 {
+public:
+ SkAnimatorScript2(SkAnimateMaker& , SkDisplayable* working, SkDisplayTypes type);
+ ~SkAnimatorScript2();
+ bool evalMemberCommon(const SkMemberInfo* info,
+ SkDisplayable* displayable, SkOperand2* value);
+ SkAnimateMaker& getMaker() { return fMaker; }
+ SkDisplayable* getWorking() { return fWorking; }
+ static bool MapEnums(const char* ptr, const char* match, size_t len, int* value);
+ static const SkDisplayEnumMap& GetEnumValues(SkDisplayTypes type);
+ static SkDisplayTypes ToDisplayType(SkOperand2::OpType type);
+ static SkOperand2::OpType ToOpType(SkDisplayTypes type);
+private:
+ SkAnimateMaker& fMaker;
+ SkDisplayable* fWorking;
+ friend class SkDump;
+ friend struct SkScriptNAnswer;
+ // illegal
+ SkAnimatorScript2& operator=(const SkAnimatorScript2&);
+#ifdef SK_DEBUG
+public:
+ static void UnitTest();
+#endif
+};
+
+#endif // SkAnimatorScript2_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkBoundable.cpp b/gfx/skia/skia/src/animator/SkBoundable.cpp
new file mode 100644
index 000000000..e784e9f6d
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkBoundable.cpp
@@ -0,0 +1,55 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkBoundable.h"
+#include "SkAnimateMaker.h"
+#include "SkCanvas.h"
+
+SkBoundable::SkBoundable() {
+ clearBounds();
+ fBounds.fTop = 0;
+ fBounds.fRight = 0;
+ fBounds.fBottom = 0;
+}
+
+void SkBoundable::clearBounder() {
+ fBounds.fLeft = 0x7fff;
+}
+
+void SkBoundable::getBounds(SkRect* rect) {
+ SkASSERT(rect);
+ if (fBounds.fLeft == (int16_t)0x8000U) {
+ INHERITED::getBounds(rect);
+ return;
+ }
+ rect->fLeft = SkIntToScalar(fBounds.fLeft);
+ rect->fTop = SkIntToScalar(fBounds.fTop);
+ rect->fRight = SkIntToScalar(fBounds.fRight);
+ rect->fBottom = SkIntToScalar(fBounds.fBottom);
+}
+
+void SkBoundable::enableBounder() {
+ fBounds.fLeft = 0;
+}
+
+
+SkBoundableAuto::SkBoundableAuto(SkBoundable* boundable,
+ SkAnimateMaker& maker) : fBoundable(boundable), fMaker(maker) {
+ if (fBoundable->hasBounds()) {
+// fMaker.fCanvas->setBounder(&maker.fDisplayList);
+ fMaker.fDisplayList.fBounds.setEmpty();
+ }
+}
+
+SkBoundableAuto::~SkBoundableAuto() {
+ if (fBoundable->hasBounds() == false)
+ return;
+// fMaker.fCanvas->setBounder(nullptr);
+ fBoundable->setBounds(fMaker.fDisplayList.fBounds);
+}
diff --git a/gfx/skia/skia/src/animator/SkBoundable.h b/gfx/skia/skia/src/animator/SkBoundable.h
new file mode 100644
index 000000000..48d130623
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkBoundable.h
@@ -0,0 +1,41 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkBoundable_DEFINED
+#define SkBoundable_DEFINED
+
+#include "SkADrawable.h"
+#include "SkRect.h"
+
+class SkBoundable : public SkADrawable {
+public:
+ SkBoundable();
+ virtual void clearBounder();
+ virtual void enableBounder();
+ virtual void getBounds(SkRect* );
+ bool hasBounds() { return fBounds.fLeft != (int16_t)0x8000U; }
+ void setBounds(SkIRect& bounds) { fBounds = bounds; }
+protected:
+ void clearBounds() { fBounds.fLeft = (int16_t) SkToU16(0x8000); }; // mark bounds as unset
+ SkIRect fBounds;
+private:
+ typedef SkADrawable INHERITED;
+};
+
+class SkBoundableAuto {
+public:
+ SkBoundableAuto(SkBoundable* boundable, SkAnimateMaker& maker);
+ ~SkBoundableAuto();
+private:
+ SkBoundable* fBoundable;
+ SkAnimateMaker& fMaker;
+ SkBoundableAuto& operator= (const SkBoundableAuto& );
+};
+
+#endif // SkBoundable_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkBuildCondensedInfo.cpp b/gfx/skia/skia/src/animator/SkBuildCondensedInfo.cpp
new file mode 100644
index 000000000..84a591f2c
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkBuildCondensedInfo.cpp
@@ -0,0 +1,282 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkTypes.h"
+#if defined SK_BUILD_CONDENSED
+#include "SkMemberInfo.h"
+#if SK_USE_CONDENSED_INFO == 1
+#error "SK_USE_CONDENSED_INFO must be zero to build condensed info"
+#endif
+#if !defined SK_BUILD_FOR_WIN32
+#error "SK_BUILD_FOR_WIN32 must be defined to build condensed info"
+#endif
+#include "SkDisplayType.h"
+#include "SkIntArray.h"
+#include <stdio.h>
+
+SkTDMemberInfoArray gInfos;
+SkTDIntArray gInfosCounts;
+SkTDDisplayTypesArray gInfosTypeIDs;
+SkTDMemberInfoArray gUnknowns;
+SkTDIntArray gUnknownsCounts;
+
+static void AddInfo(SkDisplayTypes type, const SkMemberInfo* info, int infoCount) {
+ SkASSERT(gInfos[type] == nullptr);
+ gInfos[type] = info;
+ gInfosCounts[type] = infoCount;
+ *gInfosTypeIDs.append() = type;
+ size_t allStrs = 0;
+ for (int inner = 0; inner < infoCount; inner++) {
+ SkASSERT(info[inner].fCount < 256);
+ int offset = (int) info[inner].fOffset;
+ SkASSERT(offset < 128 && offset > -129);
+ SkASSERT(allStrs < 256);
+ if (info[inner].fType == SkType_BaseClassInfo) {
+ const SkMemberInfo* innerInfo = (const SkMemberInfo*) info[inner].fName;
+ if (gUnknowns.find(innerInfo) == -1) {
+ *gUnknowns.append() = innerInfo;
+ *gUnknownsCounts.append() = info[inner].fCount;
+ }
+ }
+ if (info[inner].fType != SkType_BaseClassInfo && info[inner].fName)
+ allStrs += strlen(info[inner].fName);
+ allStrs += 1;
+ SkASSERT(info[inner].fType < 256);
+ }
+}
+
+static void WriteInfo(FILE* condensed, const SkMemberInfo* info, int infoCount,
+ const char* typeName, bool draw, bool display) {
+ fprintf(condensed, "static const char g%sStrings[] = \n", typeName);
+ int inner;
+ // write strings
+ for (inner = 0; inner < infoCount; inner++) {
+ const char* name = (info[inner].fType != SkType_BaseClassInfo && info[inner].fName) ?
+ info[inner].fName : "";
+ const char* zero = inner < infoCount - 1 ? "\\0" : "";
+ fprintf(condensed, "\t\"%s%s\"\n", name, zero);
+ }
+ fprintf(condensed, ";\n\nstatic const SkMemberInfo g%s", draw ? "Draw" : display ? "Display" : "");
+ fprintf(condensed, "%sInfo[] = {", typeName);
+ size_t nameOffset = 0;
+ // write info tables
+ for (inner = 0; inner < infoCount; inner++) {
+ size_t offset = info[inner].fOffset;
+ if (info[inner].fType == SkType_BaseClassInfo) {
+ offset = (size_t) gInfos.find((const SkMemberInfo* ) info[inner].fName);
+ SkASSERT((int) offset >= 0);
+ offset = gInfosTypeIDs.find((SkDisplayTypes) offset);
+ SkASSERT((int) offset >= 0);
+ }
+ fprintf(condensed, "\n\t{%d, %d, %d, %d}", nameOffset, offset,
+ info[inner].fType, info[inner].fCount);
+ if (inner < infoCount - 1)
+ putc(',', condensed);
+ if (info[inner].fType != SkType_BaseClassInfo && info[inner].fName)
+ nameOffset += strlen(info[inner].fName);
+ nameOffset += 1;
+ }
+ fprintf(condensed, "\n};\n\n");
+}
+
+static void Get3DName(char* scratch, const char* name) {
+ if (strncmp("skia3d:", name, sizeof("skia3d:") - 1) == 0) {
+ strcpy(scratch, "3D_");
+ scratch[3]= name[7] & ~0x20;
+ strcpy(&scratch[4], &name[8]);
+ } else {
+ scratch[0] = name[0] & ~0x20;
+ strcpy(&scratch[1], &name[1]);
+ }
+}
+
+int type_compare(const void* a, const void* b) {
+ SkDisplayTypes first = *(SkDisplayTypes*) a;
+ SkDisplayTypes second = *(SkDisplayTypes*) b;
+ return first < second ? -1 : first == second ? 0 : 1;
+}
+
+void SkDisplayType::BuildCondensedInfo(SkAnimateMaker* maker) {
+ gInfos.setCount(kNumberOfTypes);
+ memset(gInfos.begin(), 0, sizeof(gInfos[0]) * kNumberOfTypes);
+ gInfosCounts.setCount(kNumberOfTypes);
+ memset(gInfosCounts.begin(), -1, sizeof(gInfosCounts[0]) * kNumberOfTypes);
+ // check to see if it is condensable
+ int index, infoCount;
+ for (index = 0; index < kTypeNamesSize; index++) {
+ const SkMemberInfo* info = GetMembers(maker, gTypeNames[index].fType, &infoCount);
+ if (info == nullptr)
+ continue;
+ AddInfo(gTypeNames[index].fType, info, infoCount);
+ }
+ const SkMemberInfo* extraInfo =
+ SkDisplayType::GetMembers(maker, SkType_3D_Point, &infoCount);
+ AddInfo(SkType_Point, extraInfo, infoCount);
+ AddInfo(SkType_3D_Point, extraInfo, infoCount);
+// int baseInfos = gInfos.count();
+ do {
+ SkTDMemberInfoArray oldRefs = gUnknowns;
+ SkTDIntArray oldRefCounts = gUnknownsCounts;
+ gUnknowns.reset();
+ gUnknownsCounts.reset();
+ for (index = 0; index < oldRefs.count(); index++) {
+ const SkMemberInfo* info = oldRefs[index];
+ if (gInfos.find(info) == -1) {
+ int typeIndex = 0;
+ for (; typeIndex < kNumberOfTypes; typeIndex++) {
+ const SkMemberInfo* temp = SkDisplayType::GetMembers(
+ maker, (SkDisplayTypes) typeIndex, nullptr);
+ if (temp == info)
+ break;
+ }
+ SkASSERT(typeIndex < kNumberOfTypes);
+ AddInfo((SkDisplayTypes) typeIndex, info, oldRefCounts[index]);
+ }
+ }
+ } while (gUnknowns.count() > 0);
+ qsort(gInfosTypeIDs.begin(), gInfosTypeIDs.count(), sizeof(gInfosTypeIDs[0]), &type_compare);
+#ifdef SK_DEBUG
+ FILE* condensed = fopen("../../src/animator/SkCondensedDebug.inc", "w+");
+ fprintf(condensed, "#include \"SkTypes.h\"\n");
+ fprintf(condensed, "#ifdef SK_DEBUG\n");
+#else
+ FILE* condensed = fopen("../../src/animator/SkCondensedRelease.inc", "w+");
+ fprintf(condensed, "#include \"SkTypes.h\"\n");
+ fprintf(condensed, "#ifdef SK_RELEASE\n");
+#endif
+ // write header
+ fprintf(condensed, "// This file was automatically generated.\n");
+ fprintf(condensed, "// To change it, edit the file with the matching debug info.\n");
+ fprintf(condensed, "// Then execute SkDisplayType::BuildCondensedInfo() to "
+ "regenerate this file.\n\n");
+ // write name of memberInfo
+ int typeNameIndex = 0;
+ int unknown = 1;
+ for (index = 0; index < gInfos.count(); index++) {
+ const SkMemberInfo* info = gInfos[index];
+ if (info == nullptr)
+ continue;
+ char scratch[64];
+ bool drawPrefix, displayPrefix;
+ while (gTypeNames[typeNameIndex].fType < index)
+ typeNameIndex++;
+ if (gTypeNames[typeNameIndex].fType == index) {
+ Get3DName(scratch, gTypeNames[typeNameIndex].fName);
+ drawPrefix = gTypeNames[typeNameIndex].fDrawPrefix;
+ displayPrefix = gTypeNames[typeNameIndex].fDisplayPrefix;
+ } else {
+ sprintf(scratch, "Unknown%d", unknown++);
+ drawPrefix = displayPrefix = false;
+ }
+ WriteInfo(condensed, info, gInfosCounts[index], scratch, drawPrefix, displayPrefix);
+ }
+ // write array of table pointers
+// start here;
+ fprintf(condensed, "static const SkMemberInfo* const gInfoTables[] = {");
+ typeNameIndex = 0;
+ unknown = 1;
+ for (index = 0; index < gInfos.count(); index++) {
+ const SkMemberInfo* info = gInfos[index];
+ if (info == nullptr)
+ continue;
+ char scratch[64];
+ bool drawPrefix, displayPrefix;
+ while (gTypeNames[typeNameIndex].fType < index)
+ typeNameIndex++;
+ if (gTypeNames[typeNameIndex].fType == index) {
+ Get3DName(scratch, gTypeNames[typeNameIndex].fName);
+ drawPrefix = gTypeNames[typeNameIndex].fDrawPrefix;
+ displayPrefix = gTypeNames[typeNameIndex].fDisplayPrefix;
+ } else {
+ sprintf(scratch, "Unknown%d", unknown++);
+ drawPrefix = displayPrefix = false;
+ }
+ fprintf(condensed, "\n\tg");
+ if (drawPrefix)
+ fprintf(condensed, "Draw");
+ if (displayPrefix)
+ fprintf(condensed, "Display");
+ fprintf(condensed, "%sInfo", scratch);
+ if (index < gInfos.count() - 1)
+ putc(',', condensed);
+ }
+ fprintf(condensed, "\n};\n\n");
+ // write the array of number of entries in the info table
+ fprintf(condensed, "static const unsigned char gInfoCounts[] = {\n\t");
+ int written = 0;
+ for (index = 0; index < gInfosCounts.count(); index++) {
+ int count = gInfosCounts[index];
+ if (count < 0)
+ continue;
+ if (written > 0)
+ putc(',', condensed);
+ if (written % 20 == 19)
+ fprintf(condensed, "\n\t");
+ fprintf(condensed, "%d",count);
+ written++;
+ }
+ fprintf(condensed, "\n};\n\n");
+ // write array of type ids table entries correspond to
+ fprintf(condensed, "static const unsigned char gTypeIDs[] = {\n\t");
+ int typeIDCount = 0;
+ typeNameIndex = 0;
+ unknown = 1;
+ for (index = 0; index < gInfosCounts.count(); index++) {
+ const SkMemberInfo* info = gInfos[index];
+ if (info == nullptr)
+ continue;
+ typeIDCount++;
+ char scratch[64];
+ while (gTypeNames[typeNameIndex].fType < index)
+ typeNameIndex++;
+ if (gTypeNames[typeNameIndex].fType == index) {
+ Get3DName(scratch, gTypeNames[typeNameIndex].fName);
+ } else
+ sprintf(scratch, "Unknown%d", unknown++);
+ fprintf(condensed, "%d%c // %s\n\t", index,
+ index < gInfosCounts.count() ? ',' : ' ', scratch);
+ }
+ fprintf(condensed, "\n};\n\n");
+ fprintf(condensed, "static const int kTypeIDs = %d;\n\n", typeIDCount);
+ // write the array of string pointers
+ fprintf(condensed, "static const char* const gInfoNames[] = {");
+ typeNameIndex = 0;
+ unknown = 1;
+ written = 0;
+ for (index = 0; index < gInfosCounts.count(); index++) {
+ const SkMemberInfo* info = gInfos[index];
+ if (info == nullptr)
+ continue;
+ if (written > 0)
+ putc(',', condensed);
+ written++;
+ fprintf(condensed, "\n\tg");
+ char scratch[64];
+ while (gTypeNames[typeNameIndex].fType < index)
+ typeNameIndex++;
+ if (gTypeNames[typeNameIndex].fType == index) {
+ Get3DName(scratch, gTypeNames[typeNameIndex].fName);
+ } else
+ sprintf(scratch, "Unknown%d", unknown++);
+ fprintf(condensed, "%sStrings", scratch);
+ }
+ fprintf(condensed, "\n};\n\n");
+ fprintf(condensed, "#endif\n");
+ fclose(condensed);
+ gInfos.reset();
+ gInfosCounts.reset();
+ gInfosTypeIDs.reset();
+ gUnknowns.reset();
+ gUnknownsCounts.reset();
+}
+
+#elif defined SK_DEBUG
+#include "SkDisplayType.h"
+void SkDisplayType::BuildCondensedInfo(SkAnimateMaker* ) {}
+#endif
diff --git a/gfx/skia/skia/src/animator/SkCondensedDebug.inc b/gfx/skia/skia/src/animator/SkCondensedDebug.inc
new file mode 100644
index 000000000..dcebe0046
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkCondensedDebug.inc
@@ -0,0 +1,1387 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkTypes.h"
+#ifndef SK_BUILD_FOR_UNIX
+#ifdef SK_DEBUG
+// This file was automatically generated.
+// To change it, edit the file with the matching debug info.
+// Then execute SkDisplayType::BuildCondensedInfo() to regenerate this file.
+
+static const char gMathStrings[] =
+ "E\0"
+ "LN10\0"
+ "LN2\0"
+ "LOG10E\0"
+ "LOG2E\0"
+ "PI\0"
+ "SQRT1_2\0"
+ "SQRT2\0"
+ "abs\0"
+ "acos\0"
+ "asin\0"
+ "atan\0"
+ "atan2\0"
+ "ceil\0"
+ "cos\0"
+ "exp\0"
+ "floor\0"
+ "log\0"
+ "max\0"
+ "min\0"
+ "pow\0"
+ "random\0"
+ "round\0"
+ "sin\0"
+ "sqrt\0"
+ "tan"
+;
+
+static const SkMemberInfo gMathInfo[] = {
+ {0, -1, 67, 98},
+ {2, -2, 67, 98},
+ {7, -3, 67, 98},
+ {11, -4, 67, 98},
+ {18, -5, 67, 98},
+ {24, -6, 67, 98},
+ {27, -7, 67, 98},
+ {35, -8, 67, 98},
+ {41, -1, 66, 98},
+ {45, -2, 66, 98},
+ {50, -3, 66, 98},
+ {55, -4, 66, 98},
+ {60, -5, 66, 98},
+ {66, -6, 66, 98},
+ {71, -7, 66, 98},
+ {75, -8, 66, 98},
+ {79, -9, 66, 98},
+ {85, -10, 66, 98},
+ {89, -11, 66, 98},
+ {93, -12, 66, 98},
+ {97, -13, 66, 98},
+ {101, -14, 66, 98},
+ {108, -15, 66, 98},
+ {114, -16, 66, 98},
+ {118, -17, 66, 98},
+ {123, -18, 66, 98}
+};
+
+static const char gAddStrings[] =
+ "inPlace\0"
+ "offset\0"
+ "use\0"
+ "where"
+;
+
+static const SkMemberInfo gAddInfo[] = {
+ {0, 16, 26, 1},
+ {8, 20, 96, 1},
+ {15, 24, 37, 1},
+ {19, 28, 37, 1}
+};
+
+static const char gAddCircleStrings[] =
+ "\0"
+ "radius\0"
+ "x\0"
+ "y"
+;
+
+static const SkMemberInfo gAddCircleInfo[] = {
+ {0, 3, 18, 1},
+ {1, 24, 98, 1},
+ {8, 28, 98, 1},
+ {10, 32, 98, 1}
+};
+
+static const char gUnknown1Strings[] =
+ "direction"
+;
+
+static const SkMemberInfo gUnknown1Info[] = {
+ {0, 20, 75, 1}
+};
+
+static const char gAddOvalStrings[] =
+ ""
+;
+
+static const SkMemberInfo gAddOvalInfo[] = {
+ {0, 6, 18, 5}
+};
+
+static const char gAddPathStrings[] =
+ "matrix\0"
+ "path"
+;
+
+static const SkMemberInfo gAddPathInfo[] = {
+ {0, 20, 65, 1},
+ {7, 24, 74, 1}
+};
+
+static const char gAddRectangleStrings[] =
+ "\0"
+ "bottom\0"
+ "left\0"
+ "right\0"
+ "top"
+;
+
+static const SkMemberInfo gAddRectangleInfo[] = {
+ {0, 3, 18, 1},
+ {1, 36, 98, 1},
+ {8, 24, 98, 1},
+ {13, 32, 98, 1},
+ {19, 28, 98, 1}
+};
+
+static const char gAddRoundRectStrings[] =
+ "\0"
+ "rx\0"
+ "ry"
+;
+
+static const SkMemberInfo gAddRoundRectInfo[] = {
+ {0, 6, 18, 5},
+ {1, 40, 98, 1},
+ {4, 44, 98, 1}
+};
+
+static const char gUnknown2Strings[] =
+ "begin\0"
+ "blend\0"
+ "dur\0"
+ "dynamic\0"
+ "field\0"
+ "formula\0"
+ "from\0"
+ "mirror\0"
+ "repeat\0"
+ "reset\0"
+ "target\0"
+ "to\0"
+ "values"
+;
+
+static const SkMemberInfo gUnknown2Info[] = {
+ {0, 16, 71, 1},
+ {6, 20, 119, 98},
+ {12, 36, 71, 1},
+ {16, -1, 67, 26},
+ {24, 40, 108, 2},
+ {30, 48, 40, 2},
+ {38, 56, 40, 2},
+ {43, -2, 67, 26},
+ {50, 64, 98, 1},
+ {57, -3, 67, 26},
+ {63, 68, 40, 2},
+ {70, 76, 40, 2},
+ {73, -4, 67, 40}
+};
+
+static const char gAnimateFieldStrings[] =
+ ""
+;
+
+static const SkMemberInfo gAnimateFieldInfo[] = {
+ {0, 8, 18, 13}
+};
+
+static const char gApplyStrings[] =
+ "animator\0"
+ "begin\0"
+ "dontDraw\0"
+ "dynamicScope\0"
+ "interval\0"
+ "mode\0"
+ "pickup\0"
+ "restore\0"
+ "scope\0"
+ "step\0"
+ "steps\0"
+ "time\0"
+ "transition"
+;
+
+static const SkMemberInfo gApplyInfo[] = {
+ {0, -1, 67, 10},
+ {9, 16, 71, 1},
+ {15, 20, 26, 1},
+ {24, 24, 108, 2},
+ {37, 32, 71, 1},
+ {46, 36, 13, 1},
+ {51, 40, 26, 1},
+ {58, 44, 26, 1},
+ {66, 48, 37, 1},
+ {72, -2, 67, 96},
+ {77, 52, 96, 1},
+ {83, -3, 67, 71},
+ {88, 56, 14, 1}
+};
+
+static const char gUnknown3Strings[] =
+ "x\0"
+ "y"
+;
+
+static const SkMemberInfo gUnknown3Info[] = {
+ {0, 48, 98, 1},
+ {2, 52, 98, 1}
+};
+
+static const char gBitmapStrings[] =
+ "\0"
+ "erase\0"
+ "format\0"
+ "height\0"
+ "rowBytes\0"
+ "width"
+;
+
+static const SkMemberInfo gDrawBitmapInfo[] = {
+ {0, 11, 18, 2},
+ {1, -1, 67, 15},
+ {7, 56, 21, 1},
+ {14, 60, 96, 1},
+ {21, 64, 96, 1},
+ {30, 68, 96, 1}
+};
+
+static const char gBitmapShaderStrings[] =
+ "\0"
+ "filterType\0"
+ "image"
+;
+
+static const SkMemberInfo gDrawBitmapShaderInfo[] = {
+ {0, 67, 18, 2},
+ {1, 28, 47, 1},
+ {12, 32, 17, 1}
+};
+
+static const char gBlurStrings[] =
+ "blurStyle\0"
+ "radius"
+;
+
+static const SkMemberInfo gDrawBlurInfo[] = {
+ {0, 24, 63, 1},
+ {10, 20, 98, 1}
+};
+
+static const char gBoundsStrings[] =
+ "\0"
+ "inval"
+;
+
+static const SkMemberInfo gDisplayBoundsInfo[] = {
+ {0, 58, 18, 7},
+ {1, 44, 26, 1}
+};
+
+static const char gClipStrings[] =
+ "path\0"
+ "rectangle"
+;
+
+static const SkMemberInfo gDrawClipInfo[] = {
+ {0, 20, 74, 1},
+ {5, 16, 91, 1}
+};
+
+static const char gColorStrings[] =
+ "alpha\0"
+ "blue\0"
+ "color\0"
+ "green\0"
+ "hue\0"
+ "red\0"
+ "saturation\0"
+ "value"
+;
+
+static const SkMemberInfo gDrawColorInfo[] = {
+ {0, -1, 67, 98},
+ {6, -2, 67, 98},
+ {11, 20, 15, 1},
+ {17, -3, 67, 98},
+ {23, -4, 67, 98},
+ {27, -5, 67, 98},
+ {31, -6, 67, 98},
+ {42, -7, 67, 98}
+};
+
+static const char gCubicToStrings[] =
+ "x1\0"
+ "x2\0"
+ "x3\0"
+ "y1\0"
+ "y2\0"
+ "y3"
+;
+
+static const SkMemberInfo gCubicToInfo[] = {
+ {0, 20, 98, 1},
+ {3, 28, 98, 1},
+ {6, 36, 98, 1},
+ {9, 24, 98, 1},
+ {12, 32, 98, 1},
+ {15, 40, 98, 1}
+};
+
+static const char gDashStrings[] =
+ "intervals\0"
+ "phase"
+;
+
+static const SkMemberInfo gDashInfo[] = {
+ {0, 20, 119, 98},
+ {10, 36, 98, 1}
+};
+
+static const char gDataStrings[] =
+ "\0"
+ "name"
+;
+
+static const SkMemberInfo gDataInfo[] = {
+ {0, 33, 18, 3},
+ {1, 32, 108, 2}
+};
+
+static const char gDiscreteStrings[] =
+ "deviation\0"
+ "segLength"
+;
+
+static const SkMemberInfo gDiscreteInfo[] = {
+ {0, 20, 98, 1},
+ {10, 24, 98, 1}
+};
+
+static const char gDrawToStrings[] =
+ "drawOnce\0"
+ "use"
+;
+
+static const SkMemberInfo gDrawToInfo[] = {
+ {0, 72, 26, 1},
+ {9, 76, 19, 1}
+};
+
+static const char gDumpStrings[] =
+ "displayList\0"
+ "eventList\0"
+ "events\0"
+ "groups\0"
+ "name\0"
+ "posts"
+;
+
+static const SkMemberInfo gDumpInfo[] = {
+ {0, 16, 26, 1},
+ {12, 20, 26, 1},
+ {22, 24, 26, 1},
+ {29, 36, 26, 1},
+ {36, 28, 108, 2},
+ {41, 40, 26, 1}
+};
+
+static const char gEmbossStrings[] =
+ "ambient\0"
+ "direction\0"
+ "radius\0"
+ "specular"
+;
+
+static const SkMemberInfo gDrawEmbossInfo[] = {
+ {0, -1, 67, 98},
+ {8, 20, 119, 98},
+ {18, 36, 98, 1},
+ {25, -2, 67, 98}
+};
+
+static const char gEventStrings[] =
+ "code\0"
+ "disable\0"
+ "key\0"
+ "keys\0"
+ "kind\0"
+ "target\0"
+ "x\0"
+ "y"
+;
+
+static const SkMemberInfo gDisplayEventInfo[] = {
+ {0, 16, 43, 1},
+ {5, 20, 26, 1},
+ {13, -1, 67, 108},
+ {17, -2, 67, 108},
+ {22, 24, 44, 1},
+ {27, 28, 108, 2},
+ {34, 36, 98, 1},
+ {36, 40, 98, 1}
+};
+
+static const char gFromPathStrings[] =
+ "mode\0"
+ "offset\0"
+ "path"
+;
+
+static const SkMemberInfo gFromPathInfo[] = {
+ {0, 20, 49, 1},
+ {5, 24, 98, 1},
+ {12, 28, 74, 1}
+};
+
+static const char gUnknown4Strings[] =
+ "\0"
+ "offsets\0"
+ "unitMapper"
+;
+
+static const SkMemberInfo gUnknown4Info[] = {
+ {0, 67, 18, 2},
+ {1, 28, 119, 98},
+ {9, 44, 108, 2}
+};
+
+static const char gGStrings[] =
+ "condition\0"
+ "enableCondition"
+;
+
+static const SkMemberInfo gGInfo[] = {
+ {0, 16, 40, 2},
+ {10, 24, 40, 2}
+};
+
+static const char gHitClearStrings[] =
+ "targets"
+;
+
+static const SkMemberInfo gHitClearInfo[] = {
+ {0, 16, 119, 36}
+};
+
+static const char gHitTestStrings[] =
+ "bullets\0"
+ "hits\0"
+ "targets\0"
+ "value"
+;
+
+static const SkMemberInfo gHitTestInfo[] = {
+ {0, 16, 119, 36},
+ {8, 32, 119, 96},
+ {13, 48, 119, 36},
+ {21, 64, 26, 1}
+};
+
+static const char gImageStrings[] =
+ "\0"
+ "base64\0"
+ "src"
+;
+
+static const SkMemberInfo gImageInfo[] = {
+ {0, 11, 18, 2},
+ {1, 56, 16, 2},
+ {8, 64, 108, 2}
+};
+
+static const char gIncludeStrings[] =
+ "src"
+;
+
+static const SkMemberInfo gIncludeInfo[] = {
+ {0, 16, 108, 2}
+};
+
+static const char gInputStrings[] =
+ "s32\0"
+ "scalar\0"
+ "string"
+;
+
+static const SkMemberInfo gInputInfo[] = {
+ {0, 16, 96, 1},
+ {4, 20, 98, 1},
+ {11, 24, 108, 2}
+};
+
+static const char gLineStrings[] =
+ "x1\0"
+ "x2\0"
+ "y1\0"
+ "y2"
+;
+
+static const SkMemberInfo gLineInfo[] = {
+ {0, 24, 98, 1},
+ {3, 28, 98, 1},
+ {6, 32, 98, 1},
+ {9, 36, 98, 1}
+};
+
+static const char gLineToStrings[] =
+ "x\0"
+ "y"
+;
+
+static const SkMemberInfo gLineToInfo[] = {
+ {0, 20, 98, 1},
+ {2, 24, 98, 1}
+};
+
+static const char gLinearGradientStrings[] =
+ "\0"
+ "points"
+;
+
+static const SkMemberInfo gLinearGradientInfo[] = {
+ {0, 27, 18, 3},
+ {1, 88, 77, 4}
+};
+
+static const char gMatrixStrings[] =
+ "matrix\0"
+ "perspectX\0"
+ "perspectY\0"
+ "rotate\0"
+ "scale\0"
+ "scaleX\0"
+ "scaleY\0"
+ "skewX\0"
+ "skewY\0"
+ "translate\0"
+ "translateX\0"
+ "translateY"
+;
+
+static const SkMemberInfo gDrawMatrixInfo[] = {
+ {0, 16, 119, 98},
+ {7, -1, 67, 98},
+ {17, -2, 67, 98},
+ {27, -3, 67, 98},
+ {34, -4, 67, 98},
+ {40, -5, 67, 98},
+ {47, -6, 67, 98},
+ {54, -7, 67, 98},
+ {60, -8, 67, 98},
+ {66, -9, 67, 77},
+ {76, -10, 67, 98},
+ {87, -11, 67, 98}
+};
+
+static const char gMoveStrings[] =
+ ""
+;
+
+static const SkMemberInfo gMoveInfo[] = {
+ {0, 1, 18, 4}
+};
+
+static const char gMoveToStrings[] =
+ "x\0"
+ "y"
+;
+
+static const SkMemberInfo gMoveToInfo[] = {
+ {0, 20, 98, 1},
+ {2, 24, 98, 1}
+};
+
+static const char gMovieStrings[] =
+ "src"
+;
+
+static const SkMemberInfo gMovieInfo[] = {
+ {0, 16, 108, 2}
+};
+
+static const char gOvalStrings[] =
+ ""
+;
+
+static const SkMemberInfo gOvalInfo[] = {
+ {0, 58, 18, 7}
+};
+
+static const char gPaintStrings[] =
+ "antiAlias\0"
+ "ascent\0"
+ "color\0"
+ "descent\0"
+ "filterType\0"
+ "linearText\0"
+ "maskFilter\0"
+ "measureText\0"
+ "pathEffect\0"
+ "shader\0"
+ "strikeThru\0"
+ "stroke\0"
+ "strokeCap\0"
+ "strokeJoin\0"
+ "strokeMiter\0"
+ "strokeWidth\0"
+ "style\0"
+ "textAlign\0"
+ "textScaleX\0"
+ "textSize\0"
+ "textSkewX\0"
+ "textTracking\0"
+ "typeface\0"
+ "underline\0"
+ "xfermode"
+;
+
+static const SkMemberInfo gDrawPaintInfo[] = {
+ {0, 16, 26, 1},
+ {10, -1, 67, 98},
+ {17, 20, 31, 1},
+ {23, -2, 67, 98},
+ {31, 24, 47, 1},
+ {42, 28, 26, 1},
+ {53, 32, 62, 1},
+ {64, -1, 66, 98},
+ {76, 36, 76, 1},
+ {87, 40, 102, 1},
+ {94, 44, 26, 1},
+ {105, 48, 26, 1},
+ {112, 52, 27, 1},
+ {122, 56, 58, 1},
+ {133, 60, 98, 1},
+ {145, 64, 98, 1},
+ {157, 68, 109, 1},
+ {163, 72, 9, 1},
+ {173, 76, 98, 1},
+ {184, 80, 98, 1},
+ {193, 84, 98, 1},
+ {203, 88, 98, 1},
+ {216, 92, 120, 1},
+ {225, 96, 26, 1},
+ {235, 100, 121, 1}
+};
+
+static const char gPathStrings[] =
+ "d\0"
+ "fillType\0"
+ "length"
+;
+
+static const SkMemberInfo gDrawPathInfo[] = {
+ {0, 52, 108, 2},
+ {2, -1, 67, 46},
+ {11, -2, 67, 98}
+};
+
+static const char gUnknown5Strings[] =
+ "x\0"
+ "y\0"
+ "z"
+;
+
+static const SkMemberInfo gUnknown5Info[] = {
+ {0, 0, 98, 1},
+ {2, 4, 98, 1},
+ {4, 8, 98, 1}
+};
+
+static const char gPointStrings[] =
+ "x\0"
+ "y"
+;
+
+static const SkMemberInfo gDrawPointInfo[] = {
+ {0, 16, 98, 1},
+ {2, 20, 98, 1}
+};
+
+static const char gPolyToPolyStrings[] =
+ "destination\0"
+ "source"
+;
+
+static const SkMemberInfo gPolyToPolyInfo[] = {
+ {0, 24, 80, 1},
+ {12, 20, 80, 1}
+};
+
+static const char gPolygonStrings[] =
+ ""
+;
+
+static const SkMemberInfo gPolygonInfo[] = {
+ {0, 48, 18, 1}
+};
+
+static const char gPolylineStrings[] =
+ "points"
+;
+
+static const SkMemberInfo gPolylineInfo[] = {
+ {0, 88, 119, 98}
+};
+
+static const char gPostStrings[] =
+ "delay\0"
+ "initialized\0"
+ "mode\0"
+ "sink\0"
+ "target\0"
+ "type"
+;
+
+static const SkMemberInfo gPostInfo[] = {
+ {0, 16, 71, 1},
+ {6, 20, 26, 1},
+ {18, 24, 45, 1},
+ {23, -1, 67, 108},
+ {28, -2, 67, 108},
+ {35, -3, 67, 108}
+};
+
+static const char gQuadToStrings[] =
+ "x1\0"
+ "x2\0"
+ "y1\0"
+ "y2"
+;
+
+static const SkMemberInfo gQuadToInfo[] = {
+ {0, 20, 98, 1},
+ {3, 28, 98, 1},
+ {6, 24, 98, 1},
+ {9, 32, 98, 1}
+};
+
+static const char gRCubicToStrings[] =
+ ""
+;
+
+static const SkMemberInfo gRCubicToInfo[] = {
+ {0, 18, 18, 6}
+};
+
+static const char gRLineToStrings[] =
+ ""
+;
+
+static const SkMemberInfo gRLineToInfo[] = {
+ {0, 35, 18, 2}
+};
+
+static const char gRMoveToStrings[] =
+ ""
+;
+
+static const SkMemberInfo gRMoveToInfo[] = {
+ {0, 39, 18, 2}
+};
+
+static const char gRQuadToStrings[] =
+ ""
+;
+
+static const SkMemberInfo gRQuadToInfo[] = {
+ {0, 50, 18, 4}
+};
+
+static const char gRadialGradientStrings[] =
+ "\0"
+ "center\0"
+ "radius"
+;
+
+static const SkMemberInfo gRadialGradientInfo[] = {
+ {0, 27, 18, 3},
+ {1, 88, 77, 2},
+ {8, 96, 98, 1}
+};
+
+static const char gRandomStrings[] =
+ "blend\0"
+ "max\0"
+ "min\0"
+ "random\0"
+ "seed"
+;
+
+static const SkMemberInfo gDisplayRandomInfo[] = {
+ {0, 16, 98, 1},
+ {6, 24, 98, 1},
+ {10, 20, 98, 1},
+ {14, 1, 67, 98},
+ {21, -2, 67, 96}
+};
+
+static const char gRectToRectStrings[] =
+ "destination\0"
+ "source"
+;
+
+static const SkMemberInfo gRectToRectInfo[] = {
+ {0, 24, 91, 1},
+ {12, 20, 91, 1}
+};
+
+static const char gRectangleStrings[] =
+ "bottom\0"
+ "height\0"
+ "left\0"
+ "needsRedraw\0"
+ "right\0"
+ "top\0"
+ "width"
+;
+
+static const SkMemberInfo gRectangleInfo[] = {
+ {0, 36, 98, 1},
+ {7, -1, 67, 98},
+ {14, 24, 98, 1},
+ {19, -2, 67, 26},
+ {31, 32, 98, 1},
+ {37, 28, 98, 1},
+ {41, -3, 67, 98}
+};
+
+static const char gRemoveStrings[] =
+ "offset\0"
+ "where"
+;
+
+static const SkMemberInfo gRemoveInfo[] = {
+ {0, 20, 96, 1},
+ {7, 28, 37, 1}
+};
+
+static const char gReplaceStrings[] =
+ ""
+;
+
+static const SkMemberInfo gReplaceInfo[] = {
+ {0, 1, 18, 4}
+};
+
+static const char gRotateStrings[] =
+ "center\0"
+ "degrees"
+;
+
+static const SkMemberInfo gRotateInfo[] = {
+ {0, 24, 77, 2},
+ {7, 20, 98, 1}
+};
+
+static const char gRoundRectStrings[] =
+ "\0"
+ "rx\0"
+ "ry"
+;
+
+static const SkMemberInfo gRoundRectInfo[] = {
+ {0, 58, 18, 7},
+ {1, 44, 98, 1},
+ {4, 48, 98, 1}
+};
+
+static const char gS32Strings[] =
+ "value"
+;
+
+static const SkMemberInfo gS32Info[] = {
+ {0, 16, 96, 1}
+};
+
+static const char gScalarStrings[] =
+ "value"
+;
+
+static const SkMemberInfo gScalarInfo[] = {
+ {0, 16, 98, 1}
+};
+
+static const char gScaleStrings[] =
+ "center\0"
+ "x\0"
+ "y"
+;
+
+static const SkMemberInfo gScaleInfo[] = {
+ {0, 28, 77, 2},
+ {7, 20, 98, 1},
+ {9, 24, 98, 1}
+};
+
+static const char gSetStrings[] =
+ "begin\0"
+ "dur\0"
+ "dynamic\0"
+ "field\0"
+ "formula\0"
+ "reset\0"
+ "target\0"
+ "to"
+;
+
+static const SkMemberInfo gSetInfo[] = {
+ {0, 16, 71, 1},
+ {6, 36, 71, 1},
+ {10, -1, 67, 26},
+ {18, 40, 108, 2},
+ {24, 48, 40, 2},
+ {32, -3, 67, 26},
+ {38, 68, 40, 2},
+ {45, 76, 40, 2}
+};
+
+static const char gShaderStrings[] =
+ "matrix\0"
+ "tileMode"
+;
+
+static const SkMemberInfo gShaderInfo[] = {
+ {0, 20, 65, 1},
+ {7, 24, 116, 1}
+};
+
+static const char gSkewStrings[] =
+ "center\0"
+ "x\0"
+ "y"
+;
+
+static const SkMemberInfo gSkewInfo[] = {
+ {0, 28, 77, 2},
+ {7, 20, 98, 1},
+ {9, 24, 98, 1}
+};
+
+static const char g3D_CameraStrings[] =
+ "axis\0"
+ "hackHeight\0"
+ "hackWidth\0"
+ "location\0"
+ "observer\0"
+ "patch\0"
+ "zenith"
+;
+
+static const SkMemberInfo g3D_CameraInfo[] = {
+ {0, 36, 106, 3},
+ {5, 20, 98, 1},
+ {16, 16, 98, 1},
+ {26, 24, 106, 3},
+ {35, 60, 106, 3},
+ {44, 108, 105, 1},
+ {50, 48, 106, 3}
+};
+
+static const char g3D_PatchStrings[] =
+ "origin\0"
+ "rotateDegrees\0"
+ "u\0"
+ "v"
+;
+
+static const SkMemberInfo g3D_PatchInfo[] = {
+ {0, 40, 106, 3},
+ {7, -1, 66, 98},
+ {21, 16, 106, 3},
+ {23, 28, 106, 3}
+};
+
+static const char gUnknown6Strings[] =
+ "x\0"
+ "y\0"
+ "z"
+;
+
+static const SkMemberInfo gUnknown6Info[] = {
+ {0, 0, 98, 1},
+ {2, 4, 98, 1},
+ {4, 8, 98, 1}
+};
+
+static const char gSnapshotStrings[] =
+ "filename\0"
+ "quality\0"
+ "sequence\0"
+ "type"
+;
+
+static const SkMemberInfo gSnapshotInfo[] = {
+ {0, 16, 108, 2},
+ {9, 24, 98, 1},
+ {17, 28, 26, 1},
+ {26, 32, 20, 1}
+};
+
+static const char gStringStrings[] =
+ "length\0"
+ "slice\0"
+ "value"
+;
+
+static const SkMemberInfo gStringInfo[] = {
+ {0, -1, 67, 96},
+ {7, -1, 66, 108},
+ {13, 16, 108, 2}
+};
+
+static const char gTextStrings[] =
+ "length\0"
+ "text\0"
+ "x\0"
+ "y"
+;
+
+static const SkMemberInfo gTextInfo[] = {
+ {0, -1, 67, 96},
+ {7, 24, 108, 2},
+ {12, 32, 98, 1},
+ {14, 36, 98, 1}
+};
+
+static const char gTextBoxStrings[] =
+ "\0"
+ "mode\0"
+ "spacingAdd\0"
+ "spacingAlign\0"
+ "spacingMul\0"
+ "text"
+;
+
+static const SkMemberInfo gTextBoxInfo[] = {
+ {0, 58, 18, 7},
+ {1, 60, 113, 1},
+ {6, 56, 98, 1},
+ {17, 64, 112, 1},
+ {30, 52, 98, 1},
+ {41, 44, 108, 2}
+};
+
+static const char gTextOnPathStrings[] =
+ "offset\0"
+ "path\0"
+ "text"
+;
+
+static const SkMemberInfo gTextOnPathInfo[] = {
+ {0, 24, 98, 1},
+ {7, 28, 74, 1},
+ {12, 32, 110, 1}
+};
+
+static const char gTextToPathStrings[] =
+ "path\0"
+ "text"
+;
+
+static const SkMemberInfo gTextToPathInfo[] = {
+ {0, 16, 74, 1},
+ {5, 20, 110, 1}
+};
+
+static const char gTranslateStrings[] =
+ "x\0"
+ "y"
+;
+
+static const SkMemberInfo gTranslateInfo[] = {
+ {0, 20, 98, 1},
+ {2, 24, 98, 1}
+};
+
+static const char gTypedArrayStrings[] =
+ "length\0"
+ "values"
+;
+
+static const SkMemberInfo gTypedArrayInfo[] = {
+ {0, -1, 67, 96},
+ {7, 16, 119, 0}
+};
+
+static const char gTypefaceStrings[] =
+ "fontName"
+;
+
+static const SkMemberInfo gTypefaceInfo[] = {
+ {0, 20, 108, 2}
+};
+
+static const SkMemberInfo* const gInfoTables[] = {
+ gMathInfo,
+ gAddInfo,
+ gAddCircleInfo,
+ gUnknown1Info,
+ gAddOvalInfo,
+ gAddPathInfo,
+ gAddRectangleInfo,
+ gAddRoundRectInfo,
+ gUnknown2Info,
+ gAnimateFieldInfo,
+ gApplyInfo,
+ gUnknown3Info,
+ gDrawBitmapInfo,
+ gDrawBitmapShaderInfo,
+ gDrawBlurInfo,
+ gDisplayBoundsInfo,
+ gDrawClipInfo,
+ gDrawColorInfo,
+ gCubicToInfo,
+ gDashInfo,
+ gDataInfo,
+ gDiscreteInfo,
+ gDrawToInfo,
+ gDumpInfo,
+ gDrawEmbossInfo,
+ gDisplayEventInfo,
+ gFromPathInfo,
+ gUnknown4Info,
+ gGInfo,
+ gHitClearInfo,
+ gHitTestInfo,
+ gImageInfo,
+ gIncludeInfo,
+ gInputInfo,
+ gLineInfo,
+ gLineToInfo,
+ gLinearGradientInfo,
+ gDrawMatrixInfo,
+ gMoveInfo,
+ gMoveToInfo,
+ gMovieInfo,
+ gOvalInfo,
+ gDrawPaintInfo,
+ gDrawPathInfo,
+ gUnknown5Info,
+ gDrawPointInfo,
+ gPolyToPolyInfo,
+ gPolygonInfo,
+ gPolylineInfo,
+ gPostInfo,
+ gQuadToInfo,
+ gRCubicToInfo,
+ gRLineToInfo,
+ gRMoveToInfo,
+ gRQuadToInfo,
+ gRadialGradientInfo,
+ gDisplayRandomInfo,
+ gRectToRectInfo,
+ gRectangleInfo,
+ gRemoveInfo,
+ gReplaceInfo,
+ gRotateInfo,
+ gRoundRectInfo,
+ gS32Info,
+ gScalarInfo,
+ gScaleInfo,
+ gSetInfo,
+ gShaderInfo,
+ gSkewInfo,
+ g3D_CameraInfo,
+ g3D_PatchInfo,
+ gUnknown6Info,
+ gSnapshotInfo,
+ gStringInfo,
+ gTextInfo,
+ gTextBoxInfo,
+ gTextOnPathInfo,
+ gTextToPathInfo,
+ gTranslateInfo,
+ gTypedArrayInfo,
+ gTypefaceInfo,
+};
+
+static const unsigned char gInfoCounts[] = {
+ 26,4,4,1,1,2,5,3,13,1,13,2,6,3,2,2,2,8,6,
+ 2,2,2,2,6,4,8,3,3,2,1,4,3,1,3,4,2,2,12,1,
+ 2,1,1,25,3,3,2,2,1,1,6,4,1,1,1,1,3,5,2,7,
+ 2,1,2,3,1,1,3,8,2,3,7,4,3,4,3,4,6,3,2,2,
+ 2,1
+};
+
+static const unsigned char gTypeIDs[] = {
+ 1, // Math
+ 2, // Add
+ 3, // AddCircle
+ 4, // Unknown1
+ 5, // AddOval
+ 6, // AddPath
+ 7, // AddRectangle
+ 8, // AddRoundRect
+ 10, // Unknown2
+ 11, // AnimateField
+ 12, // Apply
+ 17, // Unknown3
+ 19, // Bitmap
+ 22, // BitmapShader
+ 23, // Blur
+ 25, // Bounds
+ 29, // Clip
+ 31, // Color
+ 32, // CubicTo
+ 33, // Dash
+ 34, // Data
+ 35, // Discrete
+ 38, // DrawTo
+ 39, // Dump
+ 41, // Emboss
+ 42, // Event
+ 48, // FromPath
+ 51, // Unknown4
+ 52, // G
+ 53, // HitClear
+ 54, // HitTest
+ 55, // Image
+ 56, // Include
+ 57, // Input
+ 59, // Line
+ 60, // LineTo
+ 61, // LinearGradient
+ 65, // Matrix
+ 68, // Move
+ 69, // MoveTo
+ 70, // Movie
+ 72, // Oval
+ 73, // Paint
+ 74, // Path
+ 77, // Unknown5
+ 78, // Point
+ 79, // PolyToPoly
+ 80, // Polygon
+ 81, // Polyline
+ 82, // Post
+ 83, // QuadTo
+ 84, // RCubicTo
+ 85, // RLineTo
+ 86, // RMoveTo
+ 87, // RQuadTo
+ 88, // RadialGradient
+ 89, // Random
+ 90, // RectToRect
+ 91, // Rectangle
+ 92, // Remove
+ 93, // Replace
+ 94, // Rotate
+ 95, // RoundRect
+ 96, // S32
+ 98, // Scalar
+ 99, // Scale
+ 101, // Set
+ 102, // Shader
+ 103, // Skew
+ 104, // 3D_Camera
+ 105, // 3D_Patch
+ 106, // Unknown6
+ 107, // Snapshot
+ 108, // String
+ 110, // Text
+ 111, // TextBox
+ 114, // TextOnPath
+ 115, // TextToPath
+ 117, // Translate
+ 119, // TypedArray
+ 120, // Typeface
+
+};
+
+static const int kTypeIDs = 81;
+
+static const char* const gInfoNames[] = {
+ gMathStrings,
+ gAddStrings,
+ gAddCircleStrings,
+ gUnknown1Strings,
+ gAddOvalStrings,
+ gAddPathStrings,
+ gAddRectangleStrings,
+ gAddRoundRectStrings,
+ gUnknown2Strings,
+ gAnimateFieldStrings,
+ gApplyStrings,
+ gUnknown3Strings,
+ gBitmapStrings,
+ gBitmapShaderStrings,
+ gBlurStrings,
+ gBoundsStrings,
+ gClipStrings,
+ gColorStrings,
+ gCubicToStrings,
+ gDashStrings,
+ gDataStrings,
+ gDiscreteStrings,
+ gDrawToStrings,
+ gDumpStrings,
+ gEmbossStrings,
+ gEventStrings,
+ gFromPathStrings,
+ gUnknown4Strings,
+ gGStrings,
+ gHitClearStrings,
+ gHitTestStrings,
+ gImageStrings,
+ gIncludeStrings,
+ gInputStrings,
+ gLineStrings,
+ gLineToStrings,
+ gLinearGradientStrings,
+ gMatrixStrings,
+ gMoveStrings,
+ gMoveToStrings,
+ gMovieStrings,
+ gOvalStrings,
+ gPaintStrings,
+ gPathStrings,
+ gUnknown5Strings,
+ gPointStrings,
+ gPolyToPolyStrings,
+ gPolygonStrings,
+ gPolylineStrings,
+ gPostStrings,
+ gQuadToStrings,
+ gRCubicToStrings,
+ gRLineToStrings,
+ gRMoveToStrings,
+ gRQuadToStrings,
+ gRadialGradientStrings,
+ gRandomStrings,
+ gRectToRectStrings,
+ gRectangleStrings,
+ gRemoveStrings,
+ gReplaceStrings,
+ gRotateStrings,
+ gRoundRectStrings,
+ gS32Strings,
+ gScalarStrings,
+ gScaleStrings,
+ gSetStrings,
+ gShaderStrings,
+ gSkewStrings,
+ g3D_CameraStrings,
+ g3D_PatchStrings,
+ gUnknown6Strings,
+ gSnapshotStrings,
+ gStringStrings,
+ gTextStrings,
+ gTextBoxStrings,
+ gTextOnPathStrings,
+ gTextToPathStrings,
+ gTranslateStrings,
+ gTypedArrayStrings,
+ gTypefaceStrings
+};
+
+#endif
+#endif
diff --git a/gfx/skia/skia/src/animator/SkCondensedRelease.inc b/gfx/skia/skia/src/animator/SkCondensedRelease.inc
new file mode 100644
index 000000000..122249605
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkCondensedRelease.inc
@@ -0,0 +1,1365 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkTypes.h"
+#ifndef SK_BUILD_FOR_UNIX
+#ifdef SK_RELEASE
+// This file was automatically generated.
+// To change it, edit the file with the matching debug info.
+// Then execute SkDisplayType::BuildCondensedInfo() to regenerate this file.
+
+static const char gMathStrings[] =
+ "E\0"
+ "LN10\0"
+ "LN2\0"
+ "LOG10E\0"
+ "LOG2E\0"
+ "PI\0"
+ "SQRT1_2\0"
+ "SQRT2\0"
+ "abs\0"
+ "acos\0"
+ "asin\0"
+ "atan\0"
+ "atan2\0"
+ "ceil\0"
+ "cos\0"
+ "exp\0"
+ "floor\0"
+ "log\0"
+ "max\0"
+ "min\0"
+ "pow\0"
+ "random\0"
+ "round\0"
+ "sin\0"
+ "sqrt\0"
+ "tan"
+;
+
+static const SkMemberInfo gMathInfo[] = {
+ {0, -1, 67, 98},
+ {2, -2, 67, 98},
+ {7, -3, 67, 98},
+ {11, -4, 67, 98},
+ {18, -5, 67, 98},
+ {24, -6, 67, 98},
+ {27, -7, 67, 98},
+ {35, -8, 67, 98},
+ {41, -1, 66, 98},
+ {45, -2, 66, 98},
+ {50, -3, 66, 98},
+ {55, -4, 66, 98},
+ {60, -5, 66, 98},
+ {66, -6, 66, 98},
+ {71, -7, 66, 98},
+ {75, -8, 66, 98},
+ {79, -9, 66, 98},
+ {85, -10, 66, 98},
+ {89, -11, 66, 98},
+ {93, -12, 66, 98},
+ {97, -13, 66, 98},
+ {101, -14, 66, 98},
+ {108, -15, 66, 98},
+ {114, -16, 66, 98},
+ {118, -17, 66, 98},
+ {123, -18, 66, 98}
+};
+
+static const char gAddStrings[] =
+ "inPlace\0"
+ "offset\0"
+ "use\0"
+ "where"
+;
+
+static const SkMemberInfo gAddInfo[] = {
+ {0, 4, 26, 1},
+ {8, 8, 96, 1},
+ {15, 12, 37, 1},
+ {19, 16, 37, 1}
+};
+
+static const char gAddCircleStrings[] =
+ "\0"
+ "radius\0"
+ "x\0"
+ "y"
+;
+
+static const SkMemberInfo gAddCircleInfo[] = {
+ {0, 3, 18, 1},
+ {1, 12, 98, 1},
+ {8, 16, 98, 1},
+ {10, 20, 98, 1}
+};
+
+static const char gUnknown1Strings[] =
+ "direction"
+;
+
+static const SkMemberInfo gUnknown1Info[] = {
+ {0, 8, 75, 1}
+};
+
+static const char gAddOvalStrings[] =
+ ""
+;
+
+static const SkMemberInfo gAddOvalInfo[] = {
+ {0, 6, 18, 5}
+};
+
+static const char gAddPathStrings[] =
+ "matrix\0"
+ "path"
+;
+
+static const SkMemberInfo gAddPathInfo[] = {
+ {0, 8, 65, 1},
+ {7, 12, 74, 1}
+};
+
+static const char gAddRectangleStrings[] =
+ "\0"
+ "bottom\0"
+ "left\0"
+ "right\0"
+ "top"
+;
+
+static const SkMemberInfo gAddRectangleInfo[] = {
+ {0, 3, 18, 1},
+ {1, 24, 98, 1},
+ {8, 12, 98, 1},
+ {13, 20, 98, 1},
+ {19, 16, 98, 1}
+};
+
+static const char gAddRoundRectStrings[] =
+ "\0"
+ "rx\0"
+ "ry"
+;
+
+static const SkMemberInfo gAddRoundRectInfo[] = {
+ {0, 6, 18, 5},
+ {1, 28, 98, 1},
+ {4, 32, 98, 1}
+};
+
+static const char gUnknown2Strings[] =
+ "begin\0"
+ "blend\0"
+ "dur\0"
+ "dynamic\0"
+ "field\0"
+ "formula\0"
+ "from\0"
+ "mirror\0"
+ "repeat\0"
+ "reset\0"
+ "target\0"
+ "to\0"
+ "values"
+;
+
+static const SkMemberInfo gUnknown2Info[] = {
+ {0, 4, 71, 1},
+ {6, 8, 119, 98},
+ {12, 16, 71, 1},
+ {16, -1, 67, 26},
+ {24, 20, 108, 1},
+ {30, 24, 40, 1},
+ {38, 28, 40, 1},
+ {43, -2, 67, 26},
+ {50, 32, 98, 1},
+ {57, -3, 67, 26},
+ {63, 36, 40, 1},
+ {70, 40, 40, 1},
+ {73, -4, 67, 40}
+};
+
+static const char gAnimateFieldStrings[] =
+ ""
+;
+
+static const SkMemberInfo gAnimateFieldInfo[] = {
+ {0, 8, 18, 13}
+};
+
+static const char gApplyStrings[] =
+ "animator\0"
+ "begin\0"
+ "dontDraw\0"
+ "dynamicScope\0"
+ "interval\0"
+ "mode\0"
+ "pickup\0"
+ "restore\0"
+ "scope\0"
+ "step\0"
+ "steps\0"
+ "time\0"
+ "transition"
+;
+
+static const SkMemberInfo gApplyInfo[] = {
+ {0, -1, 67, 10},
+ {9, 4, 71, 1},
+ {15, 8, 26, 1},
+ {24, 12, 108, 1},
+ {37, 16, 71, 1},
+ {46, 20, 13, 1},
+ {51, 24, 26, 1},
+ {58, 28, 26, 1},
+ {66, 32, 37, 1},
+ {72, -2, 67, 96},
+ {77, 36, 96, 1},
+ {83, -3, 67, 71},
+ {88, 40, 14, 1}
+};
+
+static const char gUnknown3Strings[] =
+ "x\0"
+ "y"
+;
+
+static const SkMemberInfo gUnknown3Info[] = {
+ {0, 36, 98, 1},
+ {2, 40, 98, 1}
+};
+
+static const char gBitmapStrings[] =
+ "\0"
+ "erase\0"
+ "format\0"
+ "height\0"
+ "rowBytes\0"
+ "width"
+;
+
+static const SkMemberInfo gDrawBitmapInfo[] = {
+ {0, 11, 18, 2},
+ {1, -1, 67, 15},
+ {7, 44, 21, 1},
+ {14, 48, 96, 1},
+ {21, 52, 96, 1},
+ {30, 56, 96, 1}
+};
+
+static const char gBitmapShaderStrings[] =
+ "\0"
+ "filterType\0"
+ "image"
+;
+
+static const SkMemberInfo gDrawBitmapShaderInfo[] = {
+ {0, 66, 18, 2},
+ {1, 16, 47, 1},
+ {12, 20, 17, 1}
+};
+
+static const char gBlurStrings[] =
+ "blurStyle\0"
+ "radius"
+;
+
+static const SkMemberInfo gDrawBlurInfo[] = {
+ {0, 12, 63, 1},
+ {10, 8, 98, 1}
+};
+
+static const char gBoundsStrings[] =
+ "\0"
+ "inval"
+;
+
+static const SkMemberInfo gDisplayBoundsInfo[] = {
+ {0, 57, 18, 7},
+ {1, 32, 26, 1}
+};
+
+static const char gClipStrings[] =
+ "path\0"
+ "rectangle"
+;
+
+static const SkMemberInfo gDrawClipInfo[] = {
+ {0, 8, 74, 1},
+ {5, 4, 91, 1}
+};
+
+static const char gColorStrings[] =
+ "alpha\0"
+ "blue\0"
+ "color\0"
+ "green\0"
+ "hue\0"
+ "red\0"
+ "saturation\0"
+ "value"
+;
+
+static const SkMemberInfo gDrawColorInfo[] = {
+ {0, -1, 67, 98},
+ {6, -2, 67, 98},
+ {11, 8, 15, 1},
+ {17, -3, 67, 98},
+ {23, -4, 67, 98},
+ {27, -5, 67, 98},
+ {31, -6, 67, 98},
+ {42, -7, 67, 98}
+};
+
+static const char gCubicToStrings[] =
+ "x1\0"
+ "x2\0"
+ "x3\0"
+ "y1\0"
+ "y2\0"
+ "y3"
+;
+
+static const SkMemberInfo gCubicToInfo[] = {
+ {0, 8, 98, 1},
+ {3, 16, 98, 1},
+ {6, 24, 98, 1},
+ {9, 12, 98, 1},
+ {12, 20, 98, 1},
+ {15, 28, 98, 1}
+};
+
+static const char gDashStrings[] =
+ "intervals\0"
+ "phase"
+;
+
+static const SkMemberInfo gDashInfo[] = {
+ {0, 8, 119, 98},
+ {10, 16, 98, 1}
+};
+
+static const char gDataStrings[] =
+ "\0"
+ "name"
+;
+
+static const SkMemberInfo gDataInfo[] = {
+ {0, 32, 18, 3},
+ {1, 16, 108, 1}
+};
+
+static const char gDiscreteStrings[] =
+ "deviation\0"
+ "segLength"
+;
+
+static const SkMemberInfo gDiscreteInfo[] = {
+ {0, 8, 98, 1},
+ {10, 12, 98, 1}
+};
+
+static const char gDrawToStrings[] =
+ "drawOnce\0"
+ "use"
+;
+
+static const SkMemberInfo gDrawToInfo[] = {
+ {0, 36, 26, 1},
+ {9, 40, 19, 1}
+};
+
+static const char gEmbossStrings[] =
+ "ambient\0"
+ "direction\0"
+ "radius\0"
+ "specular"
+;
+
+static const SkMemberInfo gDrawEmbossInfo[] = {
+ {0, -1, 67, 98},
+ {8, 8, 119, 98},
+ {18, 16, 98, 1},
+ {25, -2, 67, 98}
+};
+
+static const char gEventStrings[] =
+ "code\0"
+ "disable\0"
+ "key\0"
+ "keys\0"
+ "kind\0"
+ "target\0"
+ "x\0"
+ "y"
+;
+
+static const SkMemberInfo gDisplayEventInfo[] = {
+ {0, 4, 43, 1},
+ {5, 8, 26, 1},
+ {13, -1, 67, 108},
+ {17, -2, 67, 108},
+ {22, 12, 44, 1},
+ {27, 16, 108, 1},
+ {34, 20, 98, 1},
+ {36, 24, 98, 1}
+};
+
+static const char gFromPathStrings[] =
+ "mode\0"
+ "offset\0"
+ "path"
+;
+
+static const SkMemberInfo gFromPathInfo[] = {
+ {0, 8, 49, 1},
+ {5, 12, 98, 1},
+ {12, 16, 74, 1}
+};
+
+static const char gUnknown4Strings[] =
+ "\0"
+ "offsets\0"
+ "unitMapper"
+;
+
+static const SkMemberInfo gUnknown4Info[] = {
+ {0, 66, 18, 2},
+ {1, 16, 119, 98},
+ {9, 24, 108, 1}
+};
+
+static const char gGStrings[] =
+ "condition\0"
+ "enableCondition"
+;
+
+static const SkMemberInfo gGInfo[] = {
+ {0, 4, 40, 1},
+ {10, 8, 40, 1}
+};
+
+static const char gHitClearStrings[] =
+ "targets"
+;
+
+static const SkMemberInfo gHitClearInfo[] = {
+ {0, 4, 119, 36}
+};
+
+static const char gHitTestStrings[] =
+ "bullets\0"
+ "hits\0"
+ "targets\0"
+ "value"
+;
+
+static const SkMemberInfo gHitTestInfo[] = {
+ {0, 4, 119, 36},
+ {8, 12, 119, 96},
+ {13, 20, 119, 36},
+ {21, 28, 26, 1}
+};
+
+static const char gImageStrings[] =
+ "\0"
+ "base64\0"
+ "src"
+;
+
+static const SkMemberInfo gImageInfo[] = {
+ {0, 11, 18, 2},
+ {1, 44, 16, 2},
+ {8, 52, 108, 1}
+};
+
+static const char gIncludeStrings[] =
+ "src"
+;
+
+static const SkMemberInfo gIncludeInfo[] = {
+ {0, 4, 108, 1}
+};
+
+static const char gInputStrings[] =
+ "s32\0"
+ "scalar\0"
+ "string"
+;
+
+static const SkMemberInfo gInputInfo[] = {
+ {0, 4, 96, 1},
+ {4, 8, 98, 1},
+ {11, 12, 108, 1}
+};
+
+static const char gLineStrings[] =
+ "x1\0"
+ "x2\0"
+ "y1\0"
+ "y2"
+;
+
+static const SkMemberInfo gLineInfo[] = {
+ {0, 12, 98, 1},
+ {3, 16, 98, 1},
+ {6, 20, 98, 1},
+ {9, 24, 98, 1}
+};
+
+static const char gLineToStrings[] =
+ "x\0"
+ "y"
+;
+
+static const SkMemberInfo gLineToInfo[] = {
+ {0, 8, 98, 1},
+ {2, 12, 98, 1}
+};
+
+static const char gLinearGradientStrings[] =
+ "\0"
+ "points"
+;
+
+static const SkMemberInfo gLinearGradientInfo[] = {
+ {0, 26, 18, 3},
+ {1, 48, 77, 4}
+};
+
+static const char gMatrixStrings[] =
+ "matrix\0"
+ "perspectX\0"
+ "perspectY\0"
+ "rotate\0"
+ "scale\0"
+ "scaleX\0"
+ "scaleY\0"
+ "skewX\0"
+ "skewY\0"
+ "translate\0"
+ "translateX\0"
+ "translateY"
+;
+
+static const SkMemberInfo gDrawMatrixInfo[] = {
+ {0, 4, 119, 98},
+ {7, -1, 67, 98},
+ {17, -2, 67, 98},
+ {27, -3, 67, 98},
+ {34, -4, 67, 98},
+ {40, -5, 67, 98},
+ {47, -6, 67, 98},
+ {54, -7, 67, 98},
+ {60, -8, 67, 98},
+ {66, -9, 67, 77},
+ {76, -10, 67, 98},
+ {87, -11, 67, 98}
+};
+
+static const char gMoveStrings[] =
+ ""
+;
+
+static const SkMemberInfo gMoveInfo[] = {
+ {0, 1, 18, 4}
+};
+
+static const char gMoveToStrings[] =
+ "x\0"
+ "y"
+;
+
+static const SkMemberInfo gMoveToInfo[] = {
+ {0, 8, 98, 1},
+ {2, 12, 98, 1}
+};
+
+static const char gMovieStrings[] =
+ "src"
+;
+
+static const SkMemberInfo gMovieInfo[] = {
+ {0, 4, 108, 1}
+};
+
+static const char gOvalStrings[] =
+ ""
+;
+
+static const SkMemberInfo gOvalInfo[] = {
+ {0, 57, 18, 7}
+};
+
+static const char gPaintStrings[] =
+ "antiAlias\0"
+ "ascent\0"
+ "color\0"
+ "descent\0"
+ "filterType\0"
+ "linearText\0"
+ "maskFilter\0"
+ "measureText\0"
+ "pathEffect\0"
+ "shader\0"
+ "strikeThru\0"
+ "stroke\0"
+ "strokeCap\0"
+ "strokeJoin\0"
+ "strokeMiter\0"
+ "strokeWidth\0"
+ "style\0"
+ "textAlign\0"
+ "textScaleX\0"
+ "textSize\0"
+ "textSkewX\0"
+ "textTracking\0"
+ "typeface\0"
+ "underline\0"
+ "xfermode"
+;
+
+static const SkMemberInfo gDrawPaintInfo[] = {
+ {0, 4, 26, 1},
+ {10, -1, 67, 98},
+ {17, 8, 31, 1},
+ {23, -2, 67, 98},
+ {31, 12, 47, 1},
+ {42, 16, 26, 1},
+ {53, 20, 62, 1},
+ {64, -1, 66, 98},
+ {76, 24, 76, 1},
+ {87, 28, 102, 1},
+ {94, 32, 26, 1},
+ {105, 36, 26, 1},
+ {112, 40, 27, 1},
+ {122, 44, 58, 1},
+ {133, 48, 98, 1},
+ {145, 52, 98, 1},
+ {157, 56, 109, 1},
+ {163, 60, 9, 1},
+ {173, 64, 98, 1},
+ {184, 68, 98, 1},
+ {193, 72, 98, 1},
+ {203, 76, 98, 1},
+ {216, 80, 120, 1},
+ {225, 84, 26, 1},
+ {235, 88, 121, 1}
+};
+
+static const char gPathStrings[] =
+ "d\0"
+ "fillType\0"
+ "length"
+;
+
+static const SkMemberInfo gDrawPathInfo[] = {
+ {0, 32, 108, 1},
+ {2, -1, 67, 46},
+ {11, -2, 67, 98}
+};
+
+static const char gUnknown5Strings[] =
+ "x\0"
+ "y\0"
+ "z"
+;
+
+static const SkMemberInfo gUnknown5Info[] = {
+ {0, 0, 98, 1},
+ {2, 4, 98, 1},
+ {4, 8, 98, 1}
+};
+
+static const char gPointStrings[] =
+ "x\0"
+ "y"
+;
+
+static const SkMemberInfo gDrawPointInfo[] = {
+ {0, 4, 98, 1},
+ {2, 8, 98, 1}
+};
+
+static const char gPolyToPolyStrings[] =
+ "destination\0"
+ "source"
+;
+
+static const SkMemberInfo gPolyToPolyInfo[] = {
+ {0, 12, 80, 1},
+ {12, 8, 80, 1}
+};
+
+static const char gPolygonStrings[] =
+ ""
+;
+
+static const SkMemberInfo gPolygonInfo[] = {
+ {0, 47, 18, 1}
+};
+
+static const char gPolylineStrings[] =
+ "points"
+;
+
+static const SkMemberInfo gPolylineInfo[] = {
+ {0, 56, 119, 98}
+};
+
+static const char gPostStrings[] =
+ "delay\0"
+ "initialized\0"
+ "mode\0"
+ "sink\0"
+ "target\0"
+ "type"
+;
+
+static const SkMemberInfo gPostInfo[] = {
+ {0, 4, 71, 1},
+ {6, 8, 26, 1},
+ {18, 12, 45, 1},
+ {23, -1, 67, 108},
+ {28, -2, 67, 108},
+ {35, -3, 67, 108}
+};
+
+static const char gQuadToStrings[] =
+ "x1\0"
+ "x2\0"
+ "y1\0"
+ "y2"
+;
+
+static const SkMemberInfo gQuadToInfo[] = {
+ {0, 8, 98, 1},
+ {3, 16, 98, 1},
+ {6, 12, 98, 1},
+ {9, 20, 98, 1}
+};
+
+static const char gRCubicToStrings[] =
+ ""
+;
+
+static const SkMemberInfo gRCubicToInfo[] = {
+ {0, 18, 18, 6}
+};
+
+static const char gRLineToStrings[] =
+ ""
+;
+
+static const SkMemberInfo gRLineToInfo[] = {
+ {0, 34, 18, 2}
+};
+
+static const char gRMoveToStrings[] =
+ ""
+;
+
+static const SkMemberInfo gRMoveToInfo[] = {
+ {0, 38, 18, 2}
+};
+
+static const char gRQuadToStrings[] =
+ ""
+;
+
+static const SkMemberInfo gRQuadToInfo[] = {
+ {0, 49, 18, 4}
+};
+
+static const char gRadialGradientStrings[] =
+ "\0"
+ "center\0"
+ "radius"
+;
+
+static const SkMemberInfo gRadialGradientInfo[] = {
+ {0, 26, 18, 3},
+ {1, 48, 77, 2},
+ {8, 56, 98, 1}
+};
+
+static const char gRandomStrings[] =
+ "blend\0"
+ "max\0"
+ "min\0"
+ "random\0"
+ "seed"
+;
+
+static const SkMemberInfo gDisplayRandomInfo[] = {
+ {0, 4, 98, 1},
+ {6, 12, 98, 1},
+ {10, 8, 98, 1},
+ {14, 1, 67, 98},
+ {21, -2, 67, 96}
+};
+
+static const char gRectToRectStrings[] =
+ "destination\0"
+ "source"
+;
+
+static const SkMemberInfo gRectToRectInfo[] = {
+ {0, 12, 91, 1},
+ {12, 8, 91, 1}
+};
+
+static const char gRectangleStrings[] =
+ "bottom\0"
+ "height\0"
+ "left\0"
+ "needsRedraw\0"
+ "right\0"
+ "top\0"
+ "width"
+;
+
+static const SkMemberInfo gRectangleInfo[] = {
+ {0, 24, 98, 1},
+ {7, -1, 67, 98},
+ {14, 12, 98, 1},
+ {19, -2, 67, 26},
+ {31, 20, 98, 1},
+ {37, 16, 98, 1},
+ {41, -3, 67, 98}
+};
+
+static const char gRemoveStrings[] =
+ "offset\0"
+ "where"
+;
+
+static const SkMemberInfo gRemoveInfo[] = {
+ {0, 8, 96, 1},
+ {7, 16, 37, 1}
+};
+
+static const char gReplaceStrings[] =
+ ""
+;
+
+static const SkMemberInfo gReplaceInfo[] = {
+ {0, 1, 18, 4}
+};
+
+static const char gRotateStrings[] =
+ "center\0"
+ "degrees"
+;
+
+static const SkMemberInfo gRotateInfo[] = {
+ {0, 12, 77, 2},
+ {7, 8, 98, 1}
+};
+
+static const char gRoundRectStrings[] =
+ "\0"
+ "rx\0"
+ "ry"
+;
+
+static const SkMemberInfo gRoundRectInfo[] = {
+ {0, 57, 18, 7},
+ {1, 32, 98, 1},
+ {4, 36, 98, 1}
+};
+
+static const char gS32Strings[] =
+ "value"
+;
+
+static const SkMemberInfo gS32Info[] = {
+ {0, 4, 96, 1}
+};
+
+static const char gScalarStrings[] =
+ "value"
+;
+
+static const SkMemberInfo gScalarInfo[] = {
+ {0, 4, 98, 1}
+};
+
+static const char gScaleStrings[] =
+ "center\0"
+ "x\0"
+ "y"
+;
+
+static const SkMemberInfo gScaleInfo[] = {
+ {0, 16, 77, 2},
+ {7, 8, 98, 1},
+ {9, 12, 98, 1}
+};
+
+static const char gSetStrings[] =
+ "begin\0"
+ "dur\0"
+ "dynamic\0"
+ "field\0"
+ "formula\0"
+ "reset\0"
+ "target\0"
+ "to"
+;
+
+static const SkMemberInfo gSetInfo[] = {
+ {0, 4, 71, 1},
+ {6, 16, 71, 1},
+ {10, -1, 67, 26},
+ {18, 20, 108, 1},
+ {24, 24, 40, 1},
+ {32, -3, 67, 26},
+ {38, 36, 40, 1},
+ {45, 40, 40, 1}
+};
+
+static const char gShaderStrings[] =
+ "matrix\0"
+ "tileMode"
+;
+
+static const SkMemberInfo gShaderInfo[] = {
+ {0, 8, 65, 1},
+ {7, 12, 116, 1}
+};
+
+static const char gSkewStrings[] =
+ "center\0"
+ "x\0"
+ "y"
+;
+
+static const SkMemberInfo gSkewInfo[] = {
+ {0, 16, 77, 2},
+ {7, 8, 98, 1},
+ {9, 12, 98, 1}
+};
+
+static const char g3D_CameraStrings[] =
+ "axis\0"
+ "hackHeight\0"
+ "hackWidth\0"
+ "location\0"
+ "observer\0"
+ "patch\0"
+ "zenith"
+;
+
+static const SkMemberInfo g3D_CameraInfo[] = {
+ {0, 24, 106, 3},
+ {5, 8, 98, 1},
+ {16, 4, 98, 1},
+ {26, 12, 106, 3},
+ {35, 48, 106, 3},
+ {44, 96, 105, 1},
+ {50, 36, 106, 3}
+};
+
+static const char g3D_PatchStrings[] =
+ "origin\0"
+ "rotateDegrees\0"
+ "u\0"
+ "v"
+;
+
+static const SkMemberInfo g3D_PatchInfo[] = {
+ {0, 28, 106, 3},
+ {7, -1, 66, 98},
+ {21, 4, 106, 3},
+ {23, 16, 106, 3}
+};
+
+static const char gUnknown6Strings[] =
+ "x\0"
+ "y\0"
+ "z"
+;
+
+static const SkMemberInfo gUnknown6Info[] = {
+ {0, 0, 98, 1},
+ {2, 4, 98, 1},
+ {4, 8, 98, 1}
+};
+
+static const char gSnapshotStrings[] =
+ "filename\0"
+ "quality\0"
+ "sequence\0"
+ "type"
+;
+
+static const SkMemberInfo gSnapshotInfo[] = {
+ {0, 4, 108, 1},
+ {9, 8, 98, 1},
+ {17, 12, 26, 1},
+ {26, 16, 20, 1}
+};
+
+static const char gStringStrings[] =
+ "length\0"
+ "slice\0"
+ "value"
+;
+
+static const SkMemberInfo gStringInfo[] = {
+ {0, -1, 67, 96},
+ {7, -1, 66, 108},
+ {13, 4, 108, 1}
+};
+
+static const char gTextStrings[] =
+ "length\0"
+ "text\0"
+ "x\0"
+ "y"
+;
+
+static const SkMemberInfo gTextInfo[] = {
+ {0, -1, 67, 96},
+ {7, 12, 108, 1},
+ {12, 16, 98, 1},
+ {14, 20, 98, 1}
+};
+
+static const char gTextBoxStrings[] =
+ "\0"
+ "mode\0"
+ "spacingAdd\0"
+ "spacingAlign\0"
+ "spacingMul\0"
+ "text"
+;
+
+static const SkMemberInfo gTextBoxInfo[] = {
+ {0, 57, 18, 7},
+ {1, 44, 113, 1},
+ {6, 40, 98, 1},
+ {17, 48, 112, 1},
+ {30, 36, 98, 1},
+ {41, 32, 108, 1}
+};
+
+static const char gTextOnPathStrings[] =
+ "offset\0"
+ "path\0"
+ "text"
+;
+
+static const SkMemberInfo gTextOnPathInfo[] = {
+ {0, 12, 98, 1},
+ {7, 16, 74, 1},
+ {12, 20, 110, 1}
+};
+
+static const char gTextToPathStrings[] =
+ "path\0"
+ "text"
+;
+
+static const SkMemberInfo gTextToPathInfo[] = {
+ {0, 4, 74, 1},
+ {5, 8, 110, 1}
+};
+
+static const char gTranslateStrings[] =
+ "x\0"
+ "y"
+;
+
+static const SkMemberInfo gTranslateInfo[] = {
+ {0, 8, 98, 1},
+ {2, 12, 98, 1}
+};
+
+static const char gTypedArrayStrings[] =
+ "length\0"
+ "values"
+;
+
+static const SkMemberInfo gTypedArrayInfo[] = {
+ {0, -1, 67, 96},
+ {7, 4, 119, 0}
+};
+
+static const char gTypefaceStrings[] =
+ "fontName"
+;
+
+static const SkMemberInfo gTypefaceInfo[] = {
+ {0, 8, 108, 1}
+};
+
+static const SkMemberInfo* const gInfoTables[] = {
+ gMathInfo,
+ gAddInfo,
+ gAddCircleInfo,
+ gUnknown1Info,
+ gAddOvalInfo,
+ gAddPathInfo,
+ gAddRectangleInfo,
+ gAddRoundRectInfo,
+ gUnknown2Info,
+ gAnimateFieldInfo,
+ gApplyInfo,
+ gUnknown3Info,
+ gDrawBitmapInfo,
+ gDrawBitmapShaderInfo,
+ gDrawBlurInfo,
+ gDisplayBoundsInfo,
+ gDrawClipInfo,
+ gDrawColorInfo,
+ gCubicToInfo,
+ gDashInfo,
+ gDataInfo,
+ gDiscreteInfo,
+ gDrawToInfo,
+ gDrawEmbossInfo,
+ gDisplayEventInfo,
+ gFromPathInfo,
+ gUnknown4Info,
+ gGInfo,
+ gHitClearInfo,
+ gHitTestInfo,
+ gImageInfo,
+ gIncludeInfo,
+ gInputInfo,
+ gLineInfo,
+ gLineToInfo,
+ gLinearGradientInfo,
+ gDrawMatrixInfo,
+ gMoveInfo,
+ gMoveToInfo,
+ gMovieInfo,
+ gOvalInfo,
+ gDrawPaintInfo,
+ gDrawPathInfo,
+ gUnknown5Info,
+ gDrawPointInfo,
+ gPolyToPolyInfo,
+ gPolygonInfo,
+ gPolylineInfo,
+ gPostInfo,
+ gQuadToInfo,
+ gRCubicToInfo,
+ gRLineToInfo,
+ gRMoveToInfo,
+ gRQuadToInfo,
+ gRadialGradientInfo,
+ gDisplayRandomInfo,
+ gRectToRectInfo,
+ gRectangleInfo,
+ gRemoveInfo,
+ gReplaceInfo,
+ gRotateInfo,
+ gRoundRectInfo,
+ gS32Info,
+ gScalarInfo,
+ gScaleInfo,
+ gSetInfo,
+ gShaderInfo,
+ gSkewInfo,
+ g3D_CameraInfo,
+ g3D_PatchInfo,
+ gUnknown6Info,
+ gSnapshotInfo,
+ gStringInfo,
+ gTextInfo,
+ gTextBoxInfo,
+ gTextOnPathInfo,
+ gTextToPathInfo,
+ gTranslateInfo,
+ gTypedArrayInfo,
+ gTypefaceInfo,
+};
+
+static const unsigned char gInfoCounts[] = {
+ 26,4,4,1,1,2,5,3,13,1,13,2,6,3,2,2,2,8,6,
+ 2,2,2,2,4,8,3,3,2,1,4,3,1,3,4,2,2,12,1,2,
+ 1,1,25,3,3,2,2,1,1,6,4,1,1,1,1,3,5,2,7,2,
+ 1,2,3,1,1,3,8,2,3,7,4,3,4,3,4,6,3,2,2,2,
+ 1
+};
+
+static const unsigned char gTypeIDs[] = {
+ 1, // Math
+ 2, // Add
+ 3, // AddCircle
+ 4, // Unknown1
+ 5, // AddOval
+ 6, // AddPath
+ 7, // AddRectangle
+ 8, // AddRoundRect
+ 10, // Unknown2
+ 11, // AnimateField
+ 12, // Apply
+ 17, // Unknown3
+ 19, // Bitmap
+ 22, // BitmapShader
+ 23, // Blur
+ 25, // Bounds
+ 29, // Clip
+ 31, // Color
+ 32, // CubicTo
+ 33, // Dash
+ 34, // Data
+ 35, // Discrete
+ 38, // DrawTo
+ 41, // Emboss
+ 42, // Event
+ 48, // FromPath
+ 51, // Unknown4
+ 52, // G
+ 53, // HitClear
+ 54, // HitTest
+ 55, // Image
+ 56, // Include
+ 57, // Input
+ 59, // Line
+ 60, // LineTo
+ 61, // LinearGradient
+ 65, // Matrix
+ 68, // Move
+ 69, // MoveTo
+ 70, // Movie
+ 72, // Oval
+ 73, // Paint
+ 74, // Path
+ 77, // Unknown5
+ 78, // Point
+ 79, // PolyToPoly
+ 80, // Polygon
+ 81, // Polyline
+ 82, // Post
+ 83, // QuadTo
+ 84, // RCubicTo
+ 85, // RLineTo
+ 86, // RMoveTo
+ 87, // RQuadTo
+ 88, // RadialGradient
+ 89, // Random
+ 90, // RectToRect
+ 91, // Rectangle
+ 92, // Remove
+ 93, // Replace
+ 94, // Rotate
+ 95, // RoundRect
+ 96, // S32
+ 98, // Scalar
+ 99, // Scale
+ 101, // Set
+ 102, // Shader
+ 103, // Skew
+ 104, // 3D_Camera
+ 105, // 3D_Patch
+ 106, // Unknown6
+ 107, // Snapshot
+ 108, // String
+ 110, // Text
+ 111, // TextBox
+ 114, // TextOnPath
+ 115, // TextToPath
+ 117, // Translate
+ 119, // TypedArray
+ 120, // Typeface
+
+};
+
+static const int kTypeIDs = 80;
+
+static const char* const gInfoNames[] = {
+ gMathStrings,
+ gAddStrings,
+ gAddCircleStrings,
+ gUnknown1Strings,
+ gAddOvalStrings,
+ gAddPathStrings,
+ gAddRectangleStrings,
+ gAddRoundRectStrings,
+ gUnknown2Strings,
+ gAnimateFieldStrings,
+ gApplyStrings,
+ gUnknown3Strings,
+ gBitmapStrings,
+ gBitmapShaderStrings,
+ gBlurStrings,
+ gBoundsStrings,
+ gClipStrings,
+ gColorStrings,
+ gCubicToStrings,
+ gDashStrings,
+ gDataStrings,
+ gDiscreteStrings,
+ gDrawToStrings,
+ gEmbossStrings,
+ gEventStrings,
+ gFromPathStrings,
+ gUnknown4Strings,
+ gGStrings,
+ gHitClearStrings,
+ gHitTestStrings,
+ gImageStrings,
+ gIncludeStrings,
+ gInputStrings,
+ gLineStrings,
+ gLineToStrings,
+ gLinearGradientStrings,
+ gMatrixStrings,
+ gMoveStrings,
+ gMoveToStrings,
+ gMovieStrings,
+ gOvalStrings,
+ gPaintStrings,
+ gPathStrings,
+ gUnknown5Strings,
+ gPointStrings,
+ gPolyToPolyStrings,
+ gPolygonStrings,
+ gPolylineStrings,
+ gPostStrings,
+ gQuadToStrings,
+ gRCubicToStrings,
+ gRLineToStrings,
+ gRMoveToStrings,
+ gRQuadToStrings,
+ gRadialGradientStrings,
+ gRandomStrings,
+ gRectToRectStrings,
+ gRectangleStrings,
+ gRemoveStrings,
+ gReplaceStrings,
+ gRotateStrings,
+ gRoundRectStrings,
+ gS32Strings,
+ gScalarStrings,
+ gScaleStrings,
+ gSetStrings,
+ gShaderStrings,
+ gSkewStrings,
+ g3D_CameraStrings,
+ g3D_PatchStrings,
+ gUnknown6Strings,
+ gSnapshotStrings,
+ gStringStrings,
+ gTextStrings,
+ gTextBoxStrings,
+ gTextOnPathStrings,
+ gTextToPathStrings,
+ gTranslateStrings,
+ gTypedArrayStrings,
+ gTypefaceStrings
+};
+#endif
+#endif
diff --git a/gfx/skia/skia/src/animator/SkDisplayAdd.cpp b/gfx/skia/skia/src/animator/SkDisplayAdd.cpp
new file mode 100644
index 000000000..f5788a386
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayAdd.cpp
@@ -0,0 +1,245 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDisplayAdd.h"
+#include "SkAnimateMaker.h"
+#include "SkDisplayApply.h"
+#include "SkDisplayList.h"
+#include "SkADrawable.h"
+#include "SkDrawGroup.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkAdd::fInfo[] = {
+ SK_MEMBER(mode, AddMode),
+ SK_MEMBER(offset, Int),
+ SK_MEMBER(use, Drawable),
+ SK_MEMBER(where, Drawable)
+};
+
+#endif
+
+// start here;
+// add onEndElement to turn where string into f_Where
+// probably need new SkAnimateMaker::resolve flavor that takes
+// where="id", where="event-target" or not-specified
+// offset="#" (implements before, after, and index if no 'where')
+
+DEFINE_GET_MEMBER(SkAdd);
+
+SkAdd::SkAdd() : mode(kMode_indirect),
+ offset(SK_MaxS32), use(nullptr), where(nullptr) {
+}
+
+SkDisplayable* SkAdd::deepCopy(SkAnimateMaker* maker) {
+ SkADrawable* saveUse = use;
+ SkADrawable* saveWhere = where;
+ use = nullptr;
+ where = nullptr;
+ SkAdd* copy = (SkAdd*) INHERITED::deepCopy(maker);
+ copy->use = use = saveUse;
+ copy->where = where = saveWhere;
+ return copy;
+}
+
+bool SkAdd::draw(SkAnimateMaker& maker) {
+ SkASSERT(use);
+ SkASSERT(use->isDrawable());
+ if (mode == kMode_indirect)
+ use->draw(maker);
+ return false;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkAdd::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ dumpAttrs(maker);
+ if (where)
+ SkDebugf("where=\"%s\" ", where->id);
+ if (mode == kMode_immediate)
+ SkDebugf("mode=\"immediate\" ");
+ SkDebugf(">\n");
+ SkDisplayList::fIndent += 4;
+ int save = SkDisplayList::fDumpIndex;
+ if (use) //just in case
+ use->dump(maker);
+ SkDisplayList::fIndent -= 4;
+ SkDisplayList::fDumpIndex = save;
+ dumpEnd(maker);
+}
+#endif
+
+bool SkAdd::enable(SkAnimateMaker& maker ) {
+ SkDisplayTypes type = getType();
+ SkDisplayList& displayList = maker.fDisplayList;
+ SkTDDrawableArray* parentList = displayList.getDrawList();
+ if (type == SkType_Add) {
+ if (use == nullptr) // not set in apply yet
+ return true;
+ }
+ bool skipAddToParent = true;
+ SkASSERT(type != SkType_Replace || where);
+ SkTDDrawableArray* grandList SK_INIT_TO_AVOID_WARNING;
+ SkGroup* parentGroup = nullptr;
+ SkGroup* thisGroup = nullptr;
+ int index = where ? displayList.findGroup(where, &parentList, &parentGroup,
+ &thisGroup, &grandList) : 0;
+ if (index < 0)
+ return true;
+ int max = parentList->count();
+ if (where == nullptr && type == SkType_Move)
+ index = max;
+ if (offset != SK_MaxS32) {
+ index += offset;
+ if (index > max) {
+ maker.setErrorCode(SkDisplayXMLParserError::kIndexOutOfRange);
+ return true; // caller should not add
+ }
+ }
+ if (offset < 0 && where == nullptr)
+ index += max + 1;
+ switch (type) {
+ case SkType_Add:
+ if (offset == SK_MaxS32 && where == nullptr) {
+ if (use->isDrawable()) {
+ skipAddToParent = mode == kMode_immediate;
+ if (skipAddToParent) {
+ if (where == nullptr) {
+ SkTDDrawableArray* useParentList;
+ index = displayList.findGroup(this, &useParentList, &parentGroup,
+ &thisGroup, &grandList);
+ if (index >= 0) {
+ parentGroup->markCopySize(index);
+ parentGroup->markCopySet(index);
+ useParentList->begin()[index] = use;
+ break;
+ }
+ }
+ *parentList->append() = use;
+ }
+ }
+ break;
+ } else {
+ if (thisGroup)
+ thisGroup->markCopySize(index);
+ *parentList->insert(index) = use;
+ if (thisGroup)
+ thisGroup->markCopySet(index);
+ if (use->isApply())
+ ((SkApply*) use)->setEmbedded();
+ }
+ break;
+ case SkType_Move: {
+ int priorLocation = parentList->find(use);
+ if (priorLocation < 0)
+ break;
+ *parentList->insert(index) = use;
+ if (index < priorLocation)
+ priorLocation++;
+ parentList->remove(priorLocation);
+ } break;
+ case SkType_Remove: {
+ SkDisplayable* old = (*parentList)[index];
+ if (((SkRemove*)(this))->fDelete) {
+ delete old;
+ goto noHelperNeeded;
+ }
+ for (int inner = 0; inner < maker.fChildren.count(); inner++) {
+ SkDisplayable* child = maker.fChildren[inner];
+ if (child == old || child->contains(old))
+ goto noHelperNeeded;
+ }
+ if (maker.fHelpers.find(old) < 0)
+ maker.helperAdd(old);
+noHelperNeeded:
+ parentList->remove(index);
+ } break;
+ case SkType_Replace:
+ if (thisGroup) {
+ thisGroup->markCopySize(index);
+ if (thisGroup->markedForDelete(index)) {
+ SkDisplayable* old = (*parentList)[index];
+ if (maker.fHelpers.find(old) < 0)
+ maker.helperAdd(old);
+ }
+ }
+ (*parentList)[index] = use;
+ if (thisGroup)
+ thisGroup->markCopySet(index);
+ break;
+ default:
+ SkASSERT(0);
+ }
+ if (type == SkType_Remove)
+ return true;
+ if (use->hasEnable())
+ use->enable(maker);
+ return skipAddToParent; // append if indirect: *parentList->append() = this;
+}
+
+bool SkAdd::hasEnable() const {
+ return true;
+}
+
+void SkAdd::initialize() {
+ if (use)
+ use->initialize();
+}
+
+bool SkAdd::isDrawable() const {
+ return getType() == SkType_Add && mode == kMode_indirect && offset == SK_MaxS32 &&
+ where == nullptr && use != nullptr && use->isDrawable();
+}
+
+//SkDisplayable* SkAdd::resolveTarget(SkAnimateMaker& maker) {
+// return use;
+//}
+
+
+bool SkClear::enable(SkAnimateMaker& maker ) {
+ SkDisplayList& displayList = maker.fDisplayList;
+ displayList.clear();
+ return true;
+}
+
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkMove::fInfo[] = {
+ SK_MEMBER_INHERITED
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkMove);
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkRemove::fInfo[] = {
+ SK_MEMBER_ALIAS(delete, fDelete, Boolean), // !!! experimental
+ SK_MEMBER(offset, Int),
+ SK_MEMBER(where, Drawable)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkRemove);
+
+SkRemove::SkRemove() : fDelete(false) {
+}
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkReplace::fInfo[] = {
+ SK_MEMBER_INHERITED
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkReplace);
diff --git a/gfx/skia/skia/src/animator/SkDisplayAdd.h b/gfx/skia/skia/src/animator/SkDisplayAdd.h
new file mode 100644
index 000000000..eb4610b39
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayAdd.h
@@ -0,0 +1,71 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDisplayAdd_DEFINED
+#define SkDisplayAdd_DEFINED
+
+#include "SkADrawable.h"
+#include "SkMemberInfo.h"
+
+class SkAdd : public SkADrawable {
+ DECLARE_MEMBER_INFO(Add);
+ SkAdd();
+
+ enum Mode {
+ kMode_indirect,
+ kMode_immediate
+ };
+
+ SkDisplayable* deepCopy(SkAnimateMaker* ) override;
+ bool draw(SkAnimateMaker& ) override;
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+ bool enable(SkAnimateMaker& ) override;
+ bool hasEnable() const override;
+ void initialize() override;
+ bool isDrawable() const override;
+protected:
+// struct _A {
+ Mode mode;
+ int32_t offset;
+ SkADrawable* use;
+ SkADrawable* where; // if nullptr, offset becomes index
+// } A;
+private:
+ typedef SkADrawable INHERITED;
+};
+
+class SkClear : public SkDisplayable {
+ virtual bool enable(SkAnimateMaker& );
+};
+
+class SkMove : public SkAdd {
+ DECLARE_MEMBER_INFO(Move);
+private:
+ typedef SkAdd INHERITED;
+};
+
+class SkRemove : public SkAdd {
+ DECLARE_MEMBER_INFO(Remove);
+ SkRemove();
+protected:
+ SkBool fDelete;
+private:
+ friend class SkAdd;
+ typedef SkAdd INHERITED;
+};
+
+class SkReplace : public SkAdd {
+ DECLARE_MEMBER_INFO(Replace);
+private:
+ typedef SkAdd INHERITED;
+};
+
+#endif // SkDisplayAdd_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDisplayApply.cpp b/gfx/skia/skia/src/animator/SkDisplayApply.cpp
new file mode 100644
index 000000000..0d5f09d34
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayApply.cpp
@@ -0,0 +1,804 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDisplayApply.h"
+#include "SkAnimateActive.h"
+#include "SkAnimateMaker.h"
+#include "SkAnimateSet.h"
+#include "SkAnimatorScript.h"
+#include "SkDisplayType.h"
+#include "SkDrawGroup.h"
+#include "SkParse.h"
+#include "SkScript.h"
+#include "SkSystemEventTypes.h"
+#ifdef SK_DEBUG
+#include "SkTime.h"
+#endif
+#include <ctype.h>
+
+enum SkApply_Properties {
+ SK_PROPERTY(animator),
+ SK_PROPERTY(step),
+ SK_PROPERTY(steps),
+ SK_PROPERTY(time)
+};
+
+#if SK_USE_CONDENSED_INFO == 0
+
+// if no attibutes, enclosed displayable is both scope & target
+// only if both scope & target are specified, or if target and enclosed displayable, are scope and target different
+const SkMemberInfo SkApply::fInfo[] = {
+ SK_MEMBER_PROPERTY(animator, Animate),
+ SK_MEMBER(begin, MSec),
+ SK_MEMBER(dontDraw, Boolean),
+ SK_MEMBER(dynamicScope, String),
+ SK_MEMBER(interval, MSec), // recommended redraw interval
+ SK_MEMBER(mode, ApplyMode),
+#if 0
+ SK_MEMBER(pickup, Boolean),
+#endif
+ SK_MEMBER(restore, Boolean),
+ SK_MEMBER(scope, Drawable), // thing that scopes animation (unnamed enclosed displayable goes here)
+ SK_MEMBER_PROPERTY(step, Int),
+ SK_MEMBER_PROPERTY(steps, Int),
+ SK_MEMBER_PROPERTY(time, MSec),
+ SK_MEMBER(transition, ApplyTransition)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkApply);
+
+SkApply::SkApply() : begin(0), dontDraw(false), interval((SkMSec) -1), mode((Mode) -1), /*pickup(false), */
+ restore(false), scope(nullptr), steps(-1), transition((Transition) -1), fActive(nullptr), /*fCurrentScope(nullptr),*/
+ fLastTime(0), fAppended(false), fContainsScope(false), fDeleteScope(false), fEmbedded(false),
+ fEnabled(false), fEnabling(false) {
+}
+
+SkApply::~SkApply() {
+ for (SkADrawable** curPtr = fScopes.begin(); curPtr < fScopes.end(); curPtr++)
+ delete *curPtr;
+ if (fDeleteScope)
+ delete scope;
+ // !!! caller must call maker.removeActive(fActive)
+ delete fActive;
+}
+
+void SkApply::activate(SkAnimateMaker& maker) {
+ if (fActive != nullptr) {
+ if (fActive->fDrawIndex == 0 && fActive->fDrawMax == 0)
+ return; // if only one use, nothing more to do
+ if (restore == false)
+ return; // all share same state, regardless of instance number
+ bool save = fActive->initializeSave();
+ fActive->fixInterpolator(save);
+ } else {
+ fActive = new SkActive(*this, maker);
+ fActive->init();
+ maker.appendActive(fActive);
+ if (restore) {
+ fActive->initializeSave();
+ int animators = fAnimators.count();
+ for (int index = 0; index < animators; index++)
+ fActive->saveInterpolatorValues(index);
+ }
+ }
+}
+
+void SkApply::append(SkApply* apply) {
+ if (fActive == nullptr)
+ return;
+ int oldCount = fActive->fAnimators.count();
+ fActive->append(apply);
+ if (restore) {
+ fActive->appendSave(oldCount);
+ int newCount = fActive->fAnimators.count();
+ for (int index = oldCount; index < newCount; index++)
+ fActive->saveInterpolatorValues(index);
+ }
+}
+
+void SkApply::applyValues(int animatorIndex, SkOperand* values, int count,
+ SkDisplayTypes valuesType, SkMSec time)
+{
+ SkAnimateBase* animator = fActive->fAnimators[animatorIndex];
+ const SkMemberInfo * info = animator->fFieldInfo;
+ SkASSERT(animator);
+ SkASSERT(info != nullptr);
+ SkDisplayTypes type = (SkDisplayTypes) info->fType;
+ SkDisplayable* target = getTarget(animator);
+ if (animator->hasExecute() || type == SkType_MemberFunction || type == SkType_MemberProperty) {
+ SkDisplayable* executor = animator->hasExecute() ? animator : target;
+ if (type != SkType_MemberProperty) {
+ SkTDArray<SkScriptValue> typedValues;
+ for (int index = 0; index < count; index++) {
+ SkScriptValue temp;
+ temp.fType = valuesType;
+ temp.fOperand = values[index];
+ *typedValues.append() = temp;
+ }
+ executor->executeFunction(target, info->functionIndex(), typedValues, info->getType(), nullptr);
+ } else {
+ SkScriptValue scriptValue;
+ scriptValue.fOperand = values[0];
+ scriptValue.fType = info->getType();
+ target->setProperty(info->propertyIndex(), scriptValue);
+ }
+ } else {
+ SkTypedArray converted;
+ if (type == SkType_ARGB) {
+ if (count == 4) {
+ // !!! assert that it is SkType_Float ?
+ animator->packARGB(&values->fScalar, count, &converted);
+ values = converted.begin();
+ count = converted.count();
+ } else {
+ SkASSERT(count == 1);
+ }
+ }
+// SkASSERT(type == SkType_ARGB || type == SkType_String ||info->isSettable());
+ if (type == SkType_String || type == SkType_DynamicString)
+ info->setString(target, values->fString);
+ else if (type == SkType_Drawable || type == SkType_Displayable)
+ target->setReference(info, values->fDisplayable);
+ else
+ info->setValue(target, values, count);
+ }
+}
+
+bool SkApply::contains(SkDisplayable* child) {
+ for (SkADrawable** curPtr = fScopes.begin(); curPtr < fScopes.end(); curPtr++) {
+ if (*curPtr == child || (*curPtr)->contains(child))
+ return true;
+ }
+ return fDeleteScope && scope == child;
+}
+
+SkDisplayable* SkApply::deepCopy(SkAnimateMaker* maker) {
+ SkADrawable* saveScope = scope;
+ scope = nullptr;
+ SkApply* result = (SkApply*) INHERITED::deepCopy(maker);
+ result->scope = scope = saveScope;
+ SkAnimateBase** end = fAnimators.end();
+ for (SkAnimateBase** animPtr = fAnimators.begin(); animPtr < end; animPtr++) {
+ SkAnimateBase* anim = (SkAnimateBase*) (*animPtr)->deepCopy(maker);
+ *result->fAnimators.append() = anim;
+ maker->helperAdd(anim);
+ }
+ return result;
+}
+
+void SkApply::disable() {
+ //!!! this is the right thing to do, but has bad side effects because of other problems
+ // currently, if an apply is in a g and scopes a statement in another g, it ends up as members
+ // of both containers. The disabling here incorrectly disables both instances
+ // maybe the fEnabled flag needs to be moved to the fActive data so that both
+ // instances are not affected.
+// fEnabled = false;
+}
+
+bool SkApply::draw(SkAnimateMaker& maker) {
+ if (scope ==nullptr)
+ return false;
+ if (scope->isApply() || scope->isDrawable() == false)
+ return false;
+ if (fEnabled == false)
+ enable(maker);
+ SkASSERT(scope);
+ activate(maker);
+ if (mode == kMode_immediate)
+ return fActive->draw();
+ bool result = interpolate(maker, maker.getInTime());
+ if (dontDraw == false) {
+// if (scope->isDrawable())
+ result |= scope->draw(maker);
+ }
+ if (restore) {
+ for (int index = 0; index < fActive->fAnimators.count(); index++)
+ endSave(index);
+ fActive->advance();
+ }
+ return result;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkApply::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ if (dynamicScope.isEmpty() == false)
+ SkDebugf("dynamicScope=\"%s\" ", dynamicScope.c_str());
+ if (dontDraw)
+ SkDebugf("dontDraw=\"true\" ");
+ if (begin != 0) //perhaps we want this no matter what?
+ SkDebugf("begin=\"%g\" ", (float) begin/1000.0f); //is this correct?
+ if (interval != (SkMSec) -1)
+ SkDebugf("interval=\"%g\" ", (float) interval/1000.0f);
+ if (steps != -1)
+ SkDebugf("steps=\"%d\" ", steps);
+ if (restore)
+ SkDebugf("restore=\"true\" ");
+ if (transition == kTransition_reverse)
+ SkDebugf("transition=\"reverse\" ");
+ if (mode == kMode_immediate) {
+ SkDebugf("mode=\"immediate\" ");
+ }
+ else if (mode == kMode_create) {
+ SkDebugf("mode=\"create\" ");
+ }
+ bool closedYet = false;
+ SkDisplayList::fIndent += 4;
+ int save = SkDisplayList::fDumpIndex;
+ if (scope) {
+ if (closedYet == false) {
+ SkDebugf(">\n");
+ closedYet = true;
+ }
+ scope->dump(maker);
+ }
+ int index;
+// if (fActive) {
+ for (index = 0; index < fAnimators.count(); index++) {
+ if (closedYet == false) {
+ SkDebugf(">\n");
+ closedYet = true;
+ }
+ SkAnimateBase* animator = fAnimators[index];
+ animator->dump(maker);
+// }
+ }
+ SkDisplayList::fIndent -= 4;
+ SkDisplayList::fDumpIndex = save;
+ if (closedYet)
+ dumpEnd(maker);
+ else
+ SkDebugf("/>\n");
+}
+#endif
+
+bool SkApply::enable(SkAnimateMaker& maker) {
+ fEnabled = true;
+ bool initialized = fActive != nullptr;
+ if (dynamicScope.size() > 0)
+ enableDynamic(maker);
+ if (maker.fError.hasError())
+ return false;
+ int animators = fAnimators.count();
+ int index;
+ for (index = 0; index < animators; index++) {
+ SkAnimateBase* animator = fAnimators[index];
+ animator->fStart = maker.fEnableTime;
+ animator->fResetPending = animator->fReset;
+ }
+ if (scope && scope->isApply())
+ ((SkApply*) scope)->setEmbedded();
+/* if (mode == kMode_once) {
+ if (scope) {
+ activate(maker);
+ interpolate(maker, maker.fEnableTime);
+ inactivate(maker);
+ }
+ return true;
+ }*/
+ if ((mode == kMode_immediate || mode == kMode_create) && scope == nullptr)
+ return false; // !!! error?
+ bool enableMe = scope && (scope->hasEnable() || scope->isApply() || scope->isDrawable() == false);
+ if ((mode == kMode_immediate && enableMe) || mode == kMode_create)
+ activate(maker); // for non-drawables like post, prime them here
+ if (mode == kMode_immediate && enableMe)
+ fActive->enable();
+ if (mode == kMode_create && scope != nullptr) {
+ enableCreate(maker);
+ return true;
+ }
+ if (mode == kMode_immediate) {
+ return scope->isApply() || scope->isDrawable() == false;
+ }
+ refresh(maker);
+ SkDisplayList& displayList = maker.fDisplayList;
+ SkADrawable* drawable;
+#if defined SK_DEBUG && defined SK_DEBUG_ANIMATION_TIMING
+ SkString debugOut;
+ SkMSec time = maker.getAppTime();
+ debugOut.appendS32(time - maker.fDebugTimeBase);
+ debugOut.append(" apply enable id=");
+ debugOut.append(_id);
+ debugOut.append("; start=");
+ debugOut.appendS32(maker.fEnableTime - maker.fDebugTimeBase);
+ SkDebugf("%s\n", debugOut.c_str());
+#endif
+ if (scope == nullptr || scope->isApply() || scope->getType() == SkType_Movie || scope->isDrawable() == false) {
+ activate(maker); // for non-drawables like post, prime them here
+ if (initialized) {
+ append(this);
+ }
+ fEnabling = true;
+ interpolate(maker, maker.fEnableTime);
+ fEnabling = false;
+ if (scope != nullptr && dontDraw == false)
+ scope->enable(maker);
+ return true;
+ } else if (initialized && restore == false)
+ append(this);
+#if 0
+ bool wasActive = inactivate(maker); // start fresh
+ if (wasActive) {
+ activate(maker);
+ interpolate(maker, maker.fEnableTime);
+ return true;
+ }
+#endif
+// start here;
+ // now that one apply might embed another, only the parent apply should replace the scope
+ // or get appended to the display list
+ // similarly, an apply added by an add immediate has already been located in the display list
+ // and should not get moved or added again here
+ if (fEmbedded) {
+ return false; // already added to display list by embedder
+ }
+ drawable = (SkADrawable*) scope;
+ SkTDDrawableArray* parentList;
+ SkTDDrawableArray* grandList;
+ SkGroup* parentGroup;
+ SkGroup* thisGroup;
+ int old = displayList.findGroup(drawable, &parentList, &parentGroup, &thisGroup, &grandList);
+ if (old < 0)
+ goto append;
+ else if (fContainsScope) {
+ if ((*parentList)[old] != this || restore) {
+append:
+ if (parentGroup)
+ parentGroup->markCopySize(old);
+ if (parentList->count() < 10000) {
+ fAppended = true;
+ *parentList->append() = this;
+ } else
+ maker.setErrorCode(SkDisplayXMLParserError::kDisplayTreeTooDeep);
+ old = -1;
+ } else
+ reset();
+ } else {
+ SkASSERT(old < parentList->count());
+ if ((*parentList)[old]->isApply()) {
+ SkApply* apply = (SkApply*) (*parentList)[old];
+ if (apply != this && apply->fActive == nullptr)
+ apply->activate(maker);
+ apply->append(this);
+ parentGroup = nullptr;
+ } else {
+ if (parentGroup)
+ parentGroup->markCopySize(old);
+ SkADrawable** newApplyLocation = &(*parentList)[old];
+ SkGroup* pGroup;
+ int oldApply = displayList.findGroup(this, &parentList, &pGroup, &thisGroup, &grandList);
+ if (oldApply >= 0) {
+ (*parentList)[oldApply] = (SkADrawable*) SkDisplayType::CreateInstance(&maker, SkType_Apply);
+ parentGroup = nullptr;
+ fDeleteScope = true;
+ }
+ *newApplyLocation = this;
+ }
+ }
+ if (parentGroup) {
+ parentGroup->markCopySet(old);
+ fDeleteScope = dynamicScope.size() == 0;
+ }
+ return true;
+}
+
+void SkApply::enableCreate(SkAnimateMaker& maker) {
+ SkString newID;
+ for (int step = 0; step <= steps; step++) {
+ fLastTime = step * SK_MSec1;
+ bool success = maker.computeID(scope, this, &newID);
+ if (success == false)
+ return;
+ if (maker.find(newID.c_str(), nullptr))
+ continue;
+ SkApply* copy = (SkApply*) deepCopy(&maker); // work on copy of animator state
+ if (mode == kMode_create)
+ copy->mode = (Mode) -1;
+ SkADrawable* copyScope = copy->scope = (SkADrawable*) scope->deepCopy(&maker);
+ *fScopes.append() = copyScope;
+ if (copyScope->resolveIDs(maker, scope, this)) {
+ step = steps; // quit
+ goto next; // resolveIDs failed
+ }
+ if (newID.size() > 0)
+ maker.setID(copyScope, newID);
+ if (copy->resolveIDs(maker, this, this)) { // fix up all fields, including target
+ step = steps; // quit
+ goto next; // resolveIDs failed
+ }
+ copy->activate(maker);
+ copy->interpolate(maker, step * SK_MSec1);
+ maker.removeActive(copy->fActive);
+ next:
+ delete copy;
+ }
+}
+
+void SkApply::enableDynamic(SkAnimateMaker& maker) {
+ SkASSERT(mode != kMode_create); // create + dynamic are not currently compatible
+ SkDisplayable* newScope;
+ bool success = SkAnimatorScript::EvaluateDisplayable(maker, this, dynamicScope.c_str(),
+ &newScope);
+ if (success && scope != newScope) {
+ SkTDDrawableArray* pList, * gList;
+ SkGroup* pGroup = nullptr, * found = nullptr;
+ int old = maker.fDisplayList.findGroup(scope, &pList, &pGroup, &found, &gList);
+ if (pList && old >= 0 && (*pList)[old]->isApply() && (*pList)[old] != this) {
+ if (fAppended == false) {
+ if (found != nullptr) {
+ SkDisplayable* oldChild = (*pList)[old];
+ if (oldChild->isApply() && found->copySet(old)) {
+ found->markCopyClear(old);
+ // delete oldChild;
+ }
+ }
+ (*pList)[old] = scope;
+ } else
+ pList->remove(old);
+ }
+ scope = (SkADrawable*) newScope;
+ onEndElement(maker);
+ }
+ maker.removeActive(fActive);
+ delete fActive;
+ fActive = nullptr;
+}
+
+void SkApply::endSave(int index) {
+ SkAnimateBase* animate = fActive->fAnimators[index];
+ const SkMemberInfo* info = animate->fFieldInfo;
+ SkDisplayTypes type = (SkDisplayTypes) info->fType;
+ if (type == SkType_MemberFunction)
+ return;
+ SkDisplayable* target = getTarget(animate);
+ size_t size = info->getSize(target);
+ int count = (int) (size / sizeof(SkScalar));
+ int activeIndex = fActive->fDrawIndex + index;
+ SkOperand* last = new SkOperand[count];
+ SkAutoTDelete<SkOperand> autoLast(last);
+ if (type != SkType_MemberProperty) {
+ info->getValue(target, last, count);
+ SkOperand* saveOperand = fActive->fSaveRestore[activeIndex];
+ if (saveOperand)
+ info->setValue(target, fActive->fSaveRestore[activeIndex], count);
+ } else {
+ SkScriptValue scriptValue;
+ SkDEBUGCODE(bool success = ) target->getProperty(info->propertyIndex(), &scriptValue);
+ SkASSERT(success == true);
+ last[0] = scriptValue.fOperand;
+ scriptValue.fOperand = fActive->fSaveRestore[activeIndex][0];
+ target->setProperty(info->propertyIndex(), scriptValue);
+ }
+ SkOperand* save = fActive->fSaveRestore[activeIndex];
+ if (save)
+ memcpy(save, last, count * sizeof(SkOperand));
+}
+
+bool SkApply::getProperty(int index, SkScriptValue* value) const {
+ switch (index) {
+ case SK_PROPERTY(step):
+ value->fType = SkType_Int;
+ value->fOperand.fS32 = fLastTime / SK_MSec1;
+ break;
+ case SK_PROPERTY(steps):
+ value->fType = SkType_Int;
+ value->fOperand.fS32 = steps;
+ break;
+ case SK_PROPERTY(time):
+ value->fType = SkType_MSec;
+ value->fOperand.fS32 = fLastTime;
+ break;
+ default:
+ // SkASSERT(0);
+ return false;
+ }
+ return true;
+}
+
+void SkApply::getStep(SkScriptValue* value) {
+ getProperty(SK_PROPERTY(step), value);
+}
+
+SkADrawable* SkApply::getTarget(SkAnimateBase* animate) {
+ if (animate->fTargetIsScope == false || mode != kMode_create)
+ return animate->fTarget;
+ return scope;
+}
+
+bool SkApply::hasDelayedAnimator() const {
+ SkAnimateBase* const* animEnd = fAnimators.end();
+ for (SkAnimateBase* const* animPtr = fAnimators.begin(); animPtr < animEnd; animPtr++) {
+ SkAnimateBase* const animator = *animPtr;
+ if (animator->fDelayed)
+ return true;
+ }
+ return false;
+}
+
+bool SkApply::hasEnable() const {
+ return true;
+}
+
+bool SkApply::inactivate(SkAnimateMaker& maker) {
+ if (fActive == nullptr)
+ return false;
+ maker.removeActive(fActive);
+ delete fActive;
+ fActive = nullptr;
+ return true;
+}
+
+#ifdef SK_DEBUG
+SkMSec lastTime = (SkMSec) -1;
+#endif
+
+bool SkApply::interpolate(SkAnimateMaker& maker, SkMSec rawTime) {
+ if (fActive == nullptr)
+ return false;
+ bool result = false;
+#if defined SK_DEBUG && defined SK_DEBUG_ANIMATION_TIMING
+ SkMSec time = maker.getAppTime();
+ if (lastTime == (SkMSec) -1)
+ lastTime = rawTime - 1;
+ if (fActive != nullptr &&
+ strcmp(id, "a3") == 0 && rawTime > lastTime) {
+ lastTime += 1000;
+ SkString debugOut;
+ debugOut.appendS32(time - maker.fDebugTimeBase);
+ debugOut.append(" apply id=");
+ debugOut.append(_id);
+ debugOut.append("; ");
+ debugOut.append(fActive->fAnimators[0]->_id);
+ debugOut.append("=");
+ debugOut.appendS32(rawTime - fActive->fState[0].fStartTime);
+ debugOut.append(")");
+ SkDebugf("%s\n", debugOut.c_str());
+ }
+#endif
+ fActive->start();
+ if (restore)
+ fActive->initializeSave();
+ int animators = fActive->fAnimators.count();
+ for (int inner = 0; inner < animators; inner++) {
+ SkAnimateBase* animate = fActive->fAnimators[inner];
+ if (animate->fChanged) {
+ animate->fChanged = false;
+ animate->fStart = rawTime;
+ // SkTypedArray values;
+ // int count = animate->fValues.count();
+ // values.setCount(count);
+ // memcpy(values.begin(), animate->fValues.begin(), sizeof(SkOperand) * count);
+ animate->onEndElement(maker);
+ // if (memcmp(values.begin(), animate->fValues.begin(), sizeof(SkOperand) * count) != 0) {
+ fActive->append(this);
+ fActive->start();
+ // }
+ }
+ SkMSec time = fActive->getTime(rawTime, inner);
+ SkActive::SkState& state = fActive->fState[inner];
+ if (SkMSec_LT(rawTime, state.fStartTime)) {
+ if (fEnabling) {
+ animate->fDelayed = true;
+ maker.delayEnable(this, state.fStartTime);
+ }
+ continue;
+ } else
+ animate->fDelayed = false;
+ SkMSec innerTime = fLastTime = state.getRelativeTime(time);
+ if (restore)
+ fActive->restoreInterpolatorValues(inner);
+ if (animate->fReset) {
+ if (transition != SkApply::kTransition_reverse) {
+ if (SkMSec_LT(state.fBegin + state.fDuration, innerTime)) {
+ if (animate->fResetPending) {
+ innerTime = 0;
+ animate->fResetPending = false;
+ } else
+ continue;
+ }
+ } else if (innerTime == 0) {
+ if (animate->fResetPending) {
+ innerTime = state.fBegin + state.fDuration;
+ animate->fResetPending = false;
+ } else
+ continue;
+ }
+ }
+ int count = animate->components();
+ SkAutoSTMalloc<16, SkOperand> values(count);
+ SkInterpolatorBase::Result interpResult = fActive->fInterpolators[inner]->timeToValues(
+ innerTime, values.get());
+ result |= (interpResult != SkInterpolatorBase::kFreezeEnd_Result);
+ if (((transition != SkApply::kTransition_reverse && interpResult == SkInterpolatorBase::kFreezeEnd_Result) ||
+ (transition == SkApply::kTransition_reverse && fLastTime == 0)) && state.fUnpostedEndEvent) {
+// SkDEBUGF(("interpolate: post on end\n"));
+ state.fUnpostedEndEvent = false;
+ maker.postOnEnd(animate, state.fBegin + state.fDuration);
+ maker.fAdjustedStart = 0; // !!! left over from synchronizing animation days, undoubtably out of date (and broken)
+ }
+ if (animate->formula.size() > 0) {
+ if (fLastTime > animate->dur)
+ fLastTime = animate->dur;
+ SkTypedArray formulaValues;
+ formulaValues.setCount(count);
+ SkDEBUGCODE(bool success = ) animate->fFieldInfo->setValue(maker, &formulaValues, 0, 0, nullptr,
+ animate->getValuesType(), animate->formula);
+ SkASSERT(success);
+ if (restore)
+ save(inner); // save existing value
+ applyValues(inner, formulaValues.begin(), count, animate->getValuesType(), innerTime);
+ } else {
+ if (restore)
+ save(inner); // save existing value
+ applyValues(inner, values.get(), count, animate->getValuesType(), innerTime);
+ }
+ }
+ return result;
+}
+
+void SkApply::initialize() {
+ if (scope == nullptr)
+ return;
+ if (scope->isApply() || scope->isDrawable() == false)
+ return;
+ scope->initialize();
+}
+
+void SkApply::onEndElement(SkAnimateMaker& maker)
+{
+ SkADrawable* scopePtr = scope;
+ while (scopePtr && scopePtr->isApply()) {
+ SkApply* scopedApply = (SkApply*) scopePtr;
+ if (scopedApply->scope == this) {
+ maker.setErrorCode(SkDisplayXMLParserError::kApplyScopesItself);
+ return;
+ }
+ scopePtr = scopedApply->scope;
+ }
+ if (mode == kMode_create)
+ return;
+ if (scope != nullptr && steps >= 0 && scope->isApply() == false && scope->isDrawable())
+ scope->setSteps(steps);
+ for (SkAnimateBase** animPtr = fAnimators.begin(); animPtr < fAnimators.end(); animPtr++) {
+ SkAnimateBase* anim = *animPtr;
+ //for reusing apply statements with dynamic scope
+ if (anim->fTarget == nullptr || anim->fTargetIsScope) {
+ anim->fTargetIsScope = true;
+ if (scope)
+ anim->fTarget = scope;
+ else
+ anim->setTarget(maker);
+ anim->onEndElement(maker); // allows animate->fFieldInfo to be set
+ }
+ if (scope != nullptr && steps >= 0 && anim->fTarget != scope && anim->fTarget->isDrawable())
+ anim->fTarget->setSteps(steps);
+ }
+}
+
+const SkMemberInfo* SkApply::preferredChild(SkDisplayTypes type) {
+ SkASSERT(SkDisplayType::IsAnimate(type) == false);
+ fContainsScope = true;
+ return getMember("scope"); // !!! cwap! need to refer to member through enum like kScope instead
+}
+
+void SkApply::refresh(SkAnimateMaker& maker) {
+ for (SkAnimateBase** animPtr = fAnimators.begin(); animPtr < fAnimators.end(); animPtr++) {
+ SkAnimateBase* animate = *animPtr;
+ animate->onEndElement(maker);
+ }
+ if (fActive)
+ fActive->resetInterpolators();
+}
+
+void SkApply::reset() {
+ if (fActive)
+ fActive->resetState();
+}
+
+bool SkApply::resolveIDs(SkAnimateMaker& maker, SkDisplayable* original, SkApply* apply) { // replace to/formula strings in animators of the form xxx.step with the step value, if xxx.step is in scope
+ if (resolveField(maker, apply, &dynamicScope) == false)
+ return true; // failed
+ SkAnimateBase** endPtr = fAnimators.end();
+ SkAnimateBase** origPtr = ((SkApply*) original)->fAnimators.begin();
+ for (SkAnimateBase** animPtr = fAnimators.begin(); animPtr < endPtr; ) {
+ SkAnimateBase* animator = *animPtr++;
+ maker.resolveID(animator, *origPtr++);
+ if (resolveField(maker, this, &animator->target) == false)
+ return true;
+ if (resolveField(maker, this, &animator->from) == false)
+ return true;
+ if (resolveField(maker, this, &animator->to) == false)
+ return true;
+ if (resolveField(maker, this, &animator->formula) == false)
+ return true;
+ }
+// setEmbedded();
+ onEndElement(maker);
+ return false; // succeeded
+}
+
+bool SkApply::resolveField(SkAnimateMaker& maker, SkDisplayable* parent, SkString* str) {
+ const char* script = str->c_str();
+ if (str->startsWith("#string:") == false)
+ return true;
+ script += sizeof("#string:") - 1;
+ return SkAnimatorScript::EvaluateString(maker, this, parent, script, str);
+}
+
+void SkApply::save(int index) {
+ SkAnimateBase* animate = fActive->fAnimators[index];
+ const SkMemberInfo * info = animate->fFieldInfo;
+ SkDisplayable* target = getTarget(animate);
+// if (animate->hasExecute())
+// info = animate->getResolvedInfo();
+ SkDisplayTypes type = (SkDisplayTypes) info->fType;
+ if (type == SkType_MemberFunction)
+ return; // nothing to save
+ size_t size = info->getSize(target);
+ int count = (int) (size / sizeof(SkScalar));
+ bool useLast = true;
+// !!! this all may be unneeded, at least in the dynamic case ??
+ int activeIndex = fActive->fDrawIndex + index;
+ SkTDOperandArray last;
+ if (fActive->fSaveRestore[activeIndex] == nullptr) {
+ fActive->fSaveRestore[activeIndex] = new SkOperand[count];
+ useLast = false;
+ } else {
+ last.setCount(count);
+ memcpy(last.begin(), fActive->fSaveRestore[activeIndex], count * sizeof(SkOperand));
+ }
+ if (type != SkType_MemberProperty) {
+ info->getValue(target, fActive->fSaveRestore[activeIndex], count);
+ if (useLast)
+ info->setValue(target, last.begin(), count);
+ } else {
+ SkScriptValue scriptValue;
+ SkDEBUGCODE(bool success = ) target->getProperty(info->propertyIndex(), &scriptValue);
+ SkASSERT(success == true);
+ SkASSERT(scriptValue.fType == SkType_Float);
+ fActive->fSaveRestore[activeIndex][0] = scriptValue.fOperand;
+ if (useLast) {
+ SkScriptValue scriptValue;
+ scriptValue.fType = type;
+ scriptValue.fOperand = last[0];
+ target->setProperty(info->propertyIndex(), scriptValue);
+ }
+ }
+// !!! end of unneeded
+}
+
+bool SkApply::setProperty(int index, SkScriptValue& scriptValue) {
+ switch (index) {
+ case SK_PROPERTY(animator): {
+ SkAnimateBase* animate = (SkAnimateBase*) scriptValue.fOperand.fDisplayable;
+ SkASSERT(animate->isAnimate());
+ *fAnimators.append() = animate;
+ return true;
+ }
+ case SK_PROPERTY(steps):
+ steps = scriptValue.fOperand.fS32;
+ if (fActive)
+ fActive->setSteps(steps);
+ return true;
+ }
+ return false;
+}
+
+void SkApply::setSteps(int _steps) {
+ steps = _steps;
+}
+
+#ifdef SK_DEBUG
+void SkApply::validate() {
+ if (fActive)
+ fActive->validate();
+}
+#endif
diff --git a/gfx/skia/skia/src/animator/SkDisplayApply.h b/gfx/skia/skia/src/animator/SkDisplayApply.h
new file mode 100644
index 000000000..12cf6cee7
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayApply.h
@@ -0,0 +1,106 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDisplayApply_DEFINED
+#define SkDisplayApply_DEFINED
+
+#include "SkAnimateBase.h"
+#include "SkADrawable.h"
+#include "SkIntArray.h"
+
+class SkActive;
+
+class SkApply : public SkADrawable {
+ DECLARE_MEMBER_INFO(Apply);
+public:
+
+ SkApply();
+ virtual ~SkApply();
+
+ enum Transition {
+ kTransition_normal,
+ kTransition_reverse
+ };
+
+ enum Mode {
+ kMode_create,
+ kMode_immediate,
+ //kMode_once
+ };
+ void activate(SkAnimateMaker& );
+ void append(SkApply* apply);
+ void appendActive(SkActive* );
+ void applyValues(int animatorIndex, SkOperand* values, int count,
+ SkDisplayTypes , SkMSec time);
+ bool contains(SkDisplayable*) override;
+// void createActive(SkAnimateMaker& );
+ SkDisplayable* deepCopy(SkAnimateMaker* ) override;
+ void disable();
+ bool draw(SkAnimateMaker& ) override;
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+ bool enable(SkAnimateMaker& ) override;
+ void enableCreate(SkAnimateMaker& );
+ void enableDynamic(SkAnimateMaker& );
+ void endSave(int index);
+ Mode getMode() { return mode; }
+ bool getProperty(int index, SkScriptValue* value) const override;
+ SkADrawable* getScope() { return scope; }
+ void getStep(SkScriptValue* );
+ SkADrawable* getTarget(SkAnimateBase* );
+ bool hasDelayedAnimator() const;
+ bool hasEnable() const override;
+ bool inactivate(SkAnimateMaker& maker);
+ void initialize() override;
+ bool interpolate(SkAnimateMaker& , SkMSec time);
+ void onEndElement(SkAnimateMaker& ) override;
+ const SkMemberInfo* preferredChild(SkDisplayTypes type) override;
+ void refresh(SkAnimateMaker& );
+ void reset();
+ bool resolveIDs(SkAnimateMaker& maker, SkDisplayable* original, SkApply* ) override;
+ bool resolveField(SkAnimateMaker& , SkDisplayable* parent, SkString* str);
+ void save(int index);
+ void setEmbedded() { fEmbedded = true; }
+ bool setProperty(int index, SkScriptValue& ) override;
+ void setSteps(int _steps) override;
+// virtual void setTime(SkMSec time);
+#ifdef SK_DEBUG
+ void validate() override;
+#endif
+private:
+ SkMSec begin;
+ SkBool dontDraw;
+ SkString dynamicScope;
+ SkMSec interval;
+ Mode mode;
+#if 0
+ SkBool pickup;
+#endif
+ SkBool restore;
+ SkADrawable* scope;
+ int32_t steps;
+ Transition transition;
+ SkActive* fActive;
+ SkTDAnimateArray fAnimators;
+// SkADrawable* fCurrentScope;
+ SkMSec fLastTime; // used only to return script property time
+ SkTDDrawableArray fScopes;
+ SkBool fAppended : 1;
+ SkBool fContainsScope : 1;
+ SkBool fDeleteScope : 1;
+ SkBool fEmbedded : 1;
+ SkBool fEnabled : 1;
+ SkBool fEnabling : 1; // set if calling interpolate from enable
+ friend class SkActive;
+ friend class SkDisplayList;
+ typedef SkADrawable INHERITED;
+};
+
+#endif // SkDisplayApply_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDisplayBounds.cpp b/gfx/skia/skia/src/animator/SkDisplayBounds.cpp
new file mode 100644
index 000000000..49ec9b96a
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayBounds.cpp
@@ -0,0 +1,43 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDisplayBounds.h"
+#include "SkAnimateMaker.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDisplayBounds::fInfo[] = {
+ SK_MEMBER_INHERITED,
+ SK_MEMBER(inval, Boolean)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDisplayBounds);
+
+SkDisplayBounds::SkDisplayBounds() : inval(false) {
+}
+
+bool SkDisplayBounds::draw(SkAnimateMaker& maker) {
+ maker.fDisplayList.fUnionBounds = SkToBool(inval);
+ maker.fDisplayList.fDrawBounds = false;
+ fBounds.setEmpty();
+ bool result = INHERITED::draw(maker);
+ maker.fDisplayList.fUnionBounds = false;
+ maker.fDisplayList.fDrawBounds = true;
+ if (inval && fBounds.isEmpty() == false) {
+ SkIRect& rect = maker.fDisplayList.fInvalBounds;
+ maker.fDisplayList.fHasUnion = true;
+ if (rect.isEmpty())
+ rect = fBounds;
+ else
+ rect.join(fBounds);
+ }
+ return result;
+}
diff --git a/gfx/skia/skia/src/animator/SkDisplayBounds.h b/gfx/skia/skia/src/animator/SkDisplayBounds.h
new file mode 100644
index 000000000..547a29e81
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayBounds.h
@@ -0,0 +1,24 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDisplayBounds_DEFINED
+#define SkDisplayBounds_DEFINED
+
+#include "SkDrawRectangle.h"
+
+class SkDisplayBounds : public SkDrawRect {
+ DECLARE_DISPLAY_MEMBER_INFO(Bounds);
+ SkDisplayBounds();
+ bool draw(SkAnimateMaker& ) override;
+private:
+ SkBool inval;
+ typedef SkDrawRect INHERITED;
+};
+
+#endif // SkDisplayBounds_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDisplayEvent.cpp b/gfx/skia/skia/src/animator/SkDisplayEvent.cpp
new file mode 100644
index 000000000..746780df1
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayEvent.cpp
@@ -0,0 +1,252 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDisplayEvent.h"
+#include "SkAnimateMaker.h"
+#include "SkDisplayApply.h"
+#include "SkDisplayInput.h"
+#include "SkDisplayList.h"
+#ifdef SK_DEBUG
+#include "SkDump.h"
+#endif
+#include "SkEvent.h"
+#include "SkDisplayInput.h"
+#include "SkKey.h"
+#include "SkMetaData.h"
+#include "SkScript.h"
+#include "SkUtils.h"
+
+enum SkDisplayEvent_Properties {
+ SK_PROPERTY(key),
+ SK_PROPERTY(keys)
+};
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDisplayEvent::fInfo[] = {
+ SK_MEMBER(code, EventCode),
+ SK_MEMBER(disable, Boolean),
+ SK_MEMBER_PROPERTY(key, String), // a single key (also last key pressed)
+ SK_MEMBER_PROPERTY(keys, String), // a single key or dash-delimited range of keys
+ SK_MEMBER(kind, EventKind),
+ SK_MEMBER(target, String),
+ SK_MEMBER(x, Float),
+ SK_MEMBER(y, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDisplayEvent);
+
+SkDisplayEvent::SkDisplayEvent() : code((SkKey) -1), disable(false),
+ kind(kUser), x(0), y(0), fLastCode((SkKey) -1), fMax((SkKey) -1), fTarget(nullptr) {
+}
+
+SkDisplayEvent::~SkDisplayEvent() {
+ deleteMembers();
+}
+
+bool SkDisplayEvent::addChild(SkAnimateMaker& , SkDisplayable* child) {
+ *fChildren.append() = child;
+ return true;
+}
+
+bool SkDisplayEvent::contains(SkDisplayable* match) {
+ for (int index = 0; index < fChildren.count(); index++) {
+ if (fChildren[index] == match || fChildren[index]->contains(match))
+ return true;
+ }
+ return false;
+}
+
+SkDisplayable* SkDisplayEvent::contains(const SkString& match) {
+ for (int index = 0; index < fChildren.count(); index++) {
+ SkDisplayable* child = fChildren[index];
+ if (child->contains(match))
+ return child;
+ }
+ return nullptr;
+}
+
+void SkDisplayEvent::deleteMembers() {
+ for (int index = 0; index < fChildren.count(); index++) {
+ SkDisplayable* evt = fChildren[index];
+ delete evt;
+ }
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkDisplayEvent::dumpEvent(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ SkString str;
+ SkDump::GetEnumString(SkType_EventKind, kind, &str);
+ SkDebugf("kind=\"%s\" ", str.c_str());
+ if (kind == SkDisplayEvent::kKeyPress || kind == SkDisplayEvent::kKeyPressUp) {
+ if (code >= 0)
+ SkDump::GetEnumString(SkType_EventCode, code, &str);
+ else
+ str.set("none");
+ SkDebugf("code=\"%s\" ", str.c_str());
+ }
+ if (kind == SkDisplayEvent::kKeyChar) {
+ if (fMax != (SkKey) -1 && fMax != code)
+ SkDebugf("keys=\"%c - %c\" ", code, fMax);
+ else
+ SkDebugf("key=\"%c\" ", code);
+ }
+ if (fTarget != nullptr) {
+ SkDebugf("target=\"%s\" ", fTarget->id);
+ }
+ if (kind >= SkDisplayEvent::kMouseDown && kind <= SkDisplayEvent::kMouseUp) {
+ SkDebugf("x=\"%g\" y=\"%g\" ", SkScalarToFloat(x), SkScalarToFloat(y));
+ }
+ if (disable)
+ SkDebugf("disable=\"true\" ");
+ SkDebugf("/>\n");
+}
+#endif
+
+bool SkDisplayEvent::enableEvent(SkAnimateMaker& maker)
+{
+ maker.fActiveEvent = this;
+ if (fChildren.count() == 0)
+ return false;
+ if (disable)
+ return false;
+#ifdef SK_DUMP_ENABLED
+ if (maker.fDumpEvents) {
+ SkDebugf("enable: ");
+ dumpEvent(&maker);
+ }
+#endif
+ SkDisplayList& displayList = maker.fDisplayList;
+ for (int index = 0; index < fChildren.count(); index++) {
+ SkDisplayable* displayable = fChildren[index];
+ if (displayable->isGroup()) {
+ SkTDDrawableArray* parentList = displayList.getDrawList();
+ *parentList->append() = (SkADrawable*) displayable; // make it findable before children are enabled
+ }
+ if (displayable->enable(maker))
+ continue;
+ if (maker.hasError())
+ return true;
+ if (displayable->isDrawable() == false)
+ return true; // error
+ SkADrawable* drawable = (SkADrawable*) displayable;
+ SkTDDrawableArray* parentList = displayList.getDrawList();
+ *parentList->append() = drawable;
+ }
+ return false;
+}
+
+bool SkDisplayEvent::getProperty(int index, SkScriptValue* value) const {
+ switch (index) {
+ case SK_PROPERTY(key):
+ case SK_PROPERTY(keys): {
+ value->fType = SkType_String;
+ char scratch[8];
+ SkKey convert = index == SK_PROPERTY(keys) ? code : fLastCode;
+ size_t size = convert > 0 ? SkUTF8_FromUnichar(convert, scratch) : 0;
+ fKeyString.set(scratch, size);
+ value->fOperand.fString = &fKeyString;
+ if (index != SK_PROPERTY(keys) || fMax == (SkKey) -1 || fMax == code)
+ break;
+ value->fOperand.fString->append("-");
+ size = SkUTF8_FromUnichar(fMax, scratch);
+ value->fOperand.fString->append(scratch, size);
+ } break;
+ default:
+ SkASSERT(0);
+ return false;
+ }
+ return true;
+}
+
+void SkDisplayEvent::onEndElement(SkAnimateMaker& maker)
+{
+ if (kind == kUser)
+ return;
+ maker.fEvents.addEvent(this);
+ if (kind == kOnEnd) {
+ SkDEBUGCODE(bool found = ) maker.find(target.c_str(), &fTarget);
+ SkASSERT(found);
+ SkASSERT(fTarget && fTarget->isAnimate());
+ SkAnimateBase* animate = (SkAnimateBase*) fTarget;
+ animate->setHasEndEvent();
+ }
+}
+
+void SkDisplayEvent::populateInput(SkAnimateMaker& maker, const SkEvent& fEvent) {
+ const SkMetaData& meta = fEvent.getMetaData();
+ SkMetaData::Iter iter(meta);
+ SkMetaData::Type type;
+ int number;
+ const char* name;
+ while ((name = iter.next(&type, &number)) != nullptr) {
+ if (name[0] == '\0')
+ continue;
+ SkDisplayable* displayable;
+ SkInput* input;
+ for (int index = 0; index < fChildren.count(); index++) {
+ displayable = fChildren[index];
+ if (displayable->getType() != SkType_Input)
+ continue;
+ input = (SkInput*) displayable;
+ if (input->name.equals(name))
+ goto found;
+ }
+ if (!maker.find(name, &displayable) || displayable->getType() != SkType_Input)
+ continue;
+ input = (SkInput*) displayable;
+ found:
+ switch (type) {
+ case SkMetaData::kS32_Type:
+ meta.findS32(name, &input->fInt);
+ break;
+ case SkMetaData::kScalar_Type:
+ meta.findScalar(name, &input->fFloat);
+ break;
+ case SkMetaData::kPtr_Type:
+ SkASSERT(0);
+ break; // !!! not handled for now
+ case SkMetaData::kString_Type:
+ input->string.set(meta.findString(name));
+ break;
+ default:
+ SkASSERT(0);
+ }
+ }
+ // re-evaluate all animators that may have built their values from input strings
+ for (SkDisplayable** childPtr = fChildren.begin(); childPtr < fChildren.end(); childPtr++) {
+ SkDisplayable* displayable = *childPtr;
+ if (displayable->isApply() == false)
+ continue;
+ SkApply* apply = (SkApply*) displayable;
+ apply->refresh(maker);
+ }
+}
+
+bool SkDisplayEvent::setProperty(int index, SkScriptValue& value) {
+ SkASSERT(index == SK_PROPERTY(key) || index == SK_PROPERTY(keys));
+ SkASSERT(value.fType == SkType_String);
+ SkString* string = value.fOperand.fString;
+ const char* chars = string->c_str();
+ int count = SkUTF8_CountUnichars(chars);
+ SkASSERT(count >= 1);
+ code = (SkKey) SkUTF8_NextUnichar(&chars);
+ fMax = code;
+ SkASSERT(count == 1 || index == SK_PROPERTY(keys));
+ if (--count > 0) {
+ SkASSERT(*chars == '-');
+ chars++;
+ fMax = (SkKey) SkUTF8_NextUnichar(&chars);
+ SkASSERT(fMax >= code);
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/animator/SkDisplayEvent.h b/gfx/skia/skia/src/animator/SkDisplayEvent.h
new file mode 100644
index 000000000..d223771a7
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayEvent.h
@@ -0,0 +1,66 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDisplayEvent_DEFINED
+#define SkDisplayEvent_DEFINED
+
+#include "SkDisplayable.h"
+#include "SkMemberInfo.h"
+#include "SkIntArray.h"
+#include "SkKey.h"
+
+class SkEvent;
+
+class SkDisplayEvent : public SkDisplayable {
+ DECLARE_DISPLAY_MEMBER_INFO(Event);
+ enum Kind {
+ kNo_kind,
+ kKeyChar,
+ kKeyPress,
+ kKeyPressUp, //i assume the order here is intended to match with skanimatorscript.cpp
+ kMouseDown,
+ kMouseDrag,
+ kMouseMove,
+ kMouseUp,
+ kOnEnd,
+ kOnload,
+ kUser
+ };
+ SkDisplayEvent();
+ virtual ~SkDisplayEvent();
+ bool addChild(SkAnimateMaker& , SkDisplayable* child) override;
+ bool contains(SkDisplayable*) override;
+ SkDisplayable* contains(const SkString& ) override;
+#ifdef SK_DEBUG
+ void dumpEvent(SkAnimateMaker* );
+#endif
+ bool enableEvent(SkAnimateMaker& );
+ bool getProperty(int index, SkScriptValue* ) const override;
+ void onEndElement(SkAnimateMaker& maker) override;
+ void populateInput(SkAnimateMaker& , const SkEvent& fEvent);
+ bool setProperty(int index, SkScriptValue& ) override;
+protected:
+ SkKey code;
+ SkBool disable;
+ Kind kind;
+ SkString target;
+ SkScalar x;
+ SkScalar y;
+ SkTDDisplayableArray fChildren;
+ mutable SkString fKeyString;
+ SkKey fLastCode; // last key to trigger this event
+ SkKey fMax; // if the code expresses a range
+ SkDisplayable* fTarget; // used by onEnd
+private:
+ void deleteMembers();
+ friend class SkEvents;
+ typedef SkDisplayable INHERITED;
+};
+
+#endif // SkDisplayEvent_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDisplayEvents.cpp b/gfx/skia/skia/src/animator/SkDisplayEvents.cpp
new file mode 100644
index 000000000..d367cf11c
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayEvents.cpp
@@ -0,0 +1,113 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDisplayEvents.h"
+#include "SkAnimateMaker.h"
+#include "SkAnimator.h"
+#include "SkDisplayEvent.h"
+#include "SkDisplayMovie.h"
+#include "SkADrawable.h"
+#ifdef SK_DEBUG
+#include "SkDump.h"
+#endif
+
+SkEventState::SkEventState() : fCode(0), fDisable(false), fDisplayable(0), fX(0), fY(0) {
+}
+
+SkEvents::SkEvents() {
+}
+
+SkEvents::~SkEvents() {
+}
+
+bool SkEvents::doEvent(SkAnimateMaker& maker, SkDisplayEvent::Kind kind, SkEventState* state) {
+/*#ifdef SK_DUMP_ENABLED
+ if (maker.fDumpEvents) {
+ SkDebugf("doEvent: ");
+ SkString str;
+ SkDump::GetEnumString(SkType_EventKind, kind, &str);
+ SkDebugf("kind=%s ", str.c_str());
+ if (state && state->fDisplayable)
+ state->fDisplayable->SkDisplayable::dump(&maker);
+ else
+ SkDebugf("\n");
+ }
+#endif*/
+ bool handled = false;
+ SkDisplayable** firstMovie = maker.fMovies.begin();
+ SkDisplayable** endMovie = maker.fMovies.end();
+ for (SkDisplayable** ptr = firstMovie; ptr < endMovie; ptr++) {
+ SkDisplayMovie* movie = (SkDisplayMovie*) *ptr;
+ if (kind != SkDisplayEvent::kOnload)
+ movie->doEvent(kind, state);
+ }
+ SkDisplayable* displayable = state ? state->fDisplayable : nullptr;
+ int keyCode = state ? state->fCode : 0;
+ int count = fEvents.count();
+ for (int index = 0; index < count; index++) {
+ SkDisplayEvent* evt = fEvents[index];
+ if (evt->disable)
+ continue;
+ if (evt->kind != kind)
+ continue;
+ if (evt->code != (SkKey) -1) {
+ if ((int) evt->code > keyCode || (int) (evt->fMax != (SkKey) -1 ? evt->fMax : evt->code) < keyCode)
+ continue;
+ evt->fLastCode = (SkKey) keyCode;
+ }
+ if (evt->fTarget != nullptr && evt->fTarget != displayable)
+ continue;
+ if (state == nullptr || state->fDisable == 0) {
+ if (kind >= SkDisplayEvent::kMouseDown && kind <= SkDisplayEvent::kMouseUp) {
+ evt->x = state->fX;
+ evt->y = state->fY;
+ }
+ if (evt->enableEvent(maker))
+ fError = true;
+ }
+ handled = true;
+ }
+ return handled;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkEvents::dump(SkAnimateMaker& maker) {
+ int index;
+ SkTDDrawableArray& drawArray = maker.fDisplayList.fDrawList;
+ int count = drawArray.count();
+ for (index = 0; index < count; index++) {
+ SkADrawable* drawable = drawArray[index];
+ drawable->dumpEvents();
+ }
+ count = fEvents.count();
+ for (index = 0; index < count; index++) {
+ SkDisplayEvent* evt = fEvents[index];
+ evt->dumpEvent(&maker);
+ }
+}
+#endif
+
+// currently this only removes onLoad events
+void SkEvents::removeEvent(SkDisplayEvent::Kind kind, SkEventState* state) {
+ int keyCode = state ? state->fCode : 0;
+ SkDisplayable* displayable = state ? state->fDisplayable : nullptr;
+ for (SkDisplayEvent** evtPtr = fEvents.begin(); evtPtr < fEvents.end(); evtPtr++) {
+ SkDisplayEvent* evt = *evtPtr;
+ if (evt->kind != kind)
+ continue;
+ if (evt->code != (SkKey) -1) {
+ if ((int) evt->code > keyCode || (int) (evt->fMax != (SkKey) -1 ? evt->fMax : evt->code) < keyCode)
+ continue;
+ }
+ if (evt->fTarget != nullptr && evt->fTarget != displayable)
+ continue;
+ int index = fEvents.find(evt);
+ fEvents.remove(index);
+ }
+}
diff --git a/gfx/skia/skia/src/animator/SkDisplayEvents.h b/gfx/skia/skia/src/animator/SkDisplayEvents.h
new file mode 100644
index 000000000..276955ba6
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayEvents.h
@@ -0,0 +1,42 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDisplayEvents_DEFINED
+#define SkDisplayEvents_DEFINED
+
+#include "SkEvent.h"
+#include "SkDisplayEvent.h"
+
+struct SkEventState {
+ SkEventState();
+ int fCode;
+ SkBool fDisable;
+ SkDisplayable* fDisplayable;
+ SkScalar fX;
+ SkScalar fY;
+};
+
+class SkEvents {
+public:
+ SkEvents();
+ ~SkEvents();
+ void addEvent(SkDisplayEvent* evt) { *fEvents.append() = evt; }
+ bool doEvent(SkAnimateMaker& , SkDisplayEvent::Kind , SkEventState* );
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker& );
+#endif
+ void reset() { fEvents.reset(); }
+ void removeEvent(SkDisplayEvent::Kind kind, SkEventState* );
+private:
+ SkTDDisplayEventArray fEvents;
+ SkBool fError;
+ friend class SkDisplayXMLParser;
+};
+
+#endif // SkDisplayEvents_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDisplayInclude.cpp b/gfx/skia/skia/src/animator/SkDisplayInclude.cpp
new file mode 100644
index 000000000..023b3913a
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayInclude.cpp
@@ -0,0 +1,52 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDisplayInclude.h"
+#include "SkAnimateMaker.h"
+#include "SkAnimator.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkInclude::fInfo[] = {
+ SK_MEMBER(src, String)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkInclude);
+
+//SkInclude::SkInclude() {
+// src.init();
+//}
+
+//SkInclude::~SkInclude() {
+// src.unref();
+//}
+
+bool SkInclude::enable(SkAnimateMaker & ) {
+ return true;
+}
+
+bool SkInclude::hasEnable() const {
+ return true;
+}
+
+void SkInclude::onEndElement(SkAnimateMaker& maker) {
+ maker.fInInclude = true;
+ if (src.size() == 0 || maker.decodeURI(src.c_str()) == false) {
+ if (maker.getErrorCode() != SkXMLParserError::kNoError || maker.getNativeCode() != -1) {
+ maker.setInnerError(&maker, src);
+ maker.setErrorCode(SkDisplayXMLParserError::kInInclude);
+ } else {
+ maker.setErrorNoun(src);
+ maker.setErrorCode(SkDisplayXMLParserError::kIncludeNameUnknownOrMissing);
+ }
+ }
+ maker.fInInclude = false;
+}
diff --git a/gfx/skia/skia/src/animator/SkDisplayInclude.h b/gfx/skia/skia/src/animator/SkDisplayInclude.h
new file mode 100644
index 000000000..cc87dfa7c
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayInclude.h
@@ -0,0 +1,25 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDisplayInclude_DEFINED
+#define SkDisplayInclude_DEFINED
+
+#include "SkDisplayable.h"
+#include "SkMemberInfo.h"
+
+class SkInclude : public SkDisplayable {
+ DECLARE_MEMBER_INFO(Include);
+ void onEndElement(SkAnimateMaker & ) override;
+ bool enable(SkAnimateMaker & ) override;
+ bool hasEnable() const override;
+protected:
+ SkString src;
+};
+
+#endif // SkDisplayInclude_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDisplayInput.cpp b/gfx/skia/skia/src/animator/SkDisplayInput.cpp
new file mode 100644
index 000000000..facc70397
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayInput.cpp
@@ -0,0 +1,55 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDisplayInput.h"
+
+enum SkInput_Properties {
+ SK_PROPERTY(initialized)
+};
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkInput::fInfo[] = {
+ SK_MEMBER_ALIAS(float, fFloat, Float),
+ SK_MEMBER_PROPERTY(initialized, Boolean),
+ SK_MEMBER_ALIAS(int, fInt, Int),
+ SK_MEMBER(name, String),
+ SK_MEMBER(string, String)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkInput);
+
+SkInput::SkInput() : fInt((int) SK_NaN32), fFloat(SK_ScalarNaN) {}
+
+SkDisplayable* SkInput::contains(const SkString& string) {
+ return string.equals(name) ? this : nullptr;
+}
+
+bool SkInput::enable(SkAnimateMaker & ) {
+ return true;
+}
+
+bool SkInput::getProperty(int index, SkScriptValue* value) const {
+ switch (index) {
+ case SK_PROPERTY(initialized):
+ value->fType = SkType_Boolean;
+ value->fOperand.fS32 = fInt != (int) SK_NaN32 ||
+ SkScalarIsNaN(fFloat) == false || string.size() > 0;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+bool SkInput::hasEnable() const {
+ return true;
+}
diff --git a/gfx/skia/skia/src/animator/SkDisplayInput.h b/gfx/skia/skia/src/animator/SkDisplayInput.h
new file mode 100644
index 000000000..b9a1bac91
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayInput.h
@@ -0,0 +1,33 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDisplayInput_DEFINED
+#define SkDisplayInput_DEFINED
+
+#include "SkDisplayable.h"
+#include "SkMemberInfo.h"
+
+class SkInput : public SkDisplayable {
+ DECLARE_MEMBER_INFO(Input);
+ SkInput();
+ SkDisplayable* contains(const SkString& ) override;
+ bool getProperty(int index, SkScriptValue* value) const override;
+ bool enable(SkAnimateMaker & ) override;
+ bool hasEnable() const override;
+protected:
+ SkString name;
+ int32_t fInt;
+ SkScalar fFloat;
+ SkString string;
+private:
+ friend class SkDisplayEvent;
+ friend class SkPost;
+};
+
+#endif // SkDisplayInput_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDisplayList.cpp b/gfx/skia/skia/src/animator/SkDisplayList.cpp
new file mode 100644
index 000000000..fbba83f4f
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayList.cpp
@@ -0,0 +1,158 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDisplayList.h"
+#include "SkAnimateActive.h"
+#include "SkAnimateBase.h"
+#include "SkAnimateMaker.h"
+#include "SkDisplayApply.h"
+#include "SkADrawable.h"
+#include "SkDrawGroup.h"
+#include "SkDrawMatrix.h"
+#include "SkInterpolator.h"
+#include "SkTime.h"
+
+SkDisplayList::SkDisplayList() : fDrawBounds(true), fUnionBounds(false), fInTime(0) {
+}
+
+SkDisplayList::~SkDisplayList() {
+}
+
+void SkDisplayList::append(SkActive* active) {
+ *fActiveList.append() = active;
+}
+
+bool SkDisplayList::draw(SkAnimateMaker& maker, SkMSec inTime) {
+ validate();
+ fInTime = inTime;
+ bool result = false;
+ fInvalBounds.setEmpty();
+ if (fDrawList.count()) {
+ for (SkActive** activePtr = fActiveList.begin(); activePtr < fActiveList.end(); activePtr++) {
+ SkActive* active = *activePtr;
+ active->reset();
+ }
+ for (int index = 0; index < fDrawList.count(); index++) {
+ SkADrawable* draw = fDrawList[index];
+ draw->initialize(); // allow matrices to reset themselves
+ SkASSERT(draw->isDrawable());
+ validate();
+ result |= draw->draw(maker);
+ }
+ }
+ validate();
+ return result;
+}
+
+int SkDisplayList::findGroup(SkADrawable* match, SkTDDrawableArray** list,
+ SkGroup** parent, SkGroup** found, SkTDDrawableArray**grandList) {
+ *parent = nullptr;
+ *list = &fDrawList;
+ *grandList = &fDrawList;
+ return SearchForMatch(match, list, parent, found, grandList);
+}
+
+void SkDisplayList::hardReset() {
+ fDrawList.reset();
+ fActiveList.reset();
+}
+
+bool SkDisplayList::onIRect(const SkIRect& r) {
+ fBounds = r;
+ return fDrawBounds;
+}
+
+int SkDisplayList::SearchForMatch(SkADrawable* match, SkTDDrawableArray** list,
+ SkGroup** parent, SkGroup** found, SkTDDrawableArray**grandList) {
+ *found = nullptr;
+ for (int index = 0; index < (*list)->count(); index++) {
+ SkADrawable* draw = (**list)[index];
+ if (draw == match)
+ return index;
+ if (draw->isApply()) {
+ SkApply* apply = (SkApply*) draw;
+ if (apply->scope == match)
+ return index;
+ if (apply->scope->isGroup() && SearchGroupForMatch(apply->scope, match, list, parent, found, grandList, index))
+ return index;
+ if (apply->mode == SkApply::kMode_create) {
+ for (SkADrawable** ptr = apply->fScopes.begin(); ptr < apply->fScopes.end(); ptr++) {
+ SkADrawable* scope = *ptr;
+ if (scope == match)
+ return index;
+ //perhaps should call SearchGroupForMatch here as well (on scope)
+ }
+ }
+ }
+ if (draw->isGroup() && SearchGroupForMatch(draw, match, list, parent, found, grandList, index))
+ return index;
+
+ }
+ return -1;
+}
+
+bool SkDisplayList::SearchGroupForMatch(SkADrawable* draw, SkADrawable* match, SkTDDrawableArray** list,
+ SkGroup** parent, SkGroup** found, SkTDDrawableArray** grandList, int &index) {
+ SkGroup* group = (SkGroup*) draw;
+ if (group->getOriginal() == match)
+ return true;
+ SkTDDrawableArray* saveList = *list;
+ int groupIndex = group->findGroup(match, list, parent, found, grandList);
+ if (groupIndex >= 0) {
+ *found = group;
+ index = groupIndex;
+ return true;
+ }
+ *list = saveList;
+ return false;
+ }
+
+void SkDisplayList::reset() {
+ for (int index = 0; index < fDrawList.count(); index++) {
+ SkADrawable* draw = fDrawList[index];
+ if (draw->isApply() == false)
+ continue;
+ SkApply* apply = (SkApply*) draw;
+ apply->reset();
+ }
+}
+
+void SkDisplayList::remove(SkActive* active) {
+ int index = fActiveList.find(active);
+ SkASSERT(index >= 0);
+ fActiveList.remove(index); // !!! could use shuffle instead
+ SkASSERT(fActiveList.find(active) < 0);
+}
+
+#ifdef SK_DUMP_ENABLED
+int SkDisplayList::fDumpIndex;
+int SkDisplayList::fIndent;
+
+void SkDisplayList::dump(SkAnimateMaker* maker) {
+ fIndent = 0;
+ dumpInner(maker);
+}
+
+void SkDisplayList::dumpInner(SkAnimateMaker* maker) {
+ for (int index = 0; index < fDrawList.count(); index++) {
+ fDumpIndex = index;
+ fDrawList[fDumpIndex]->dump(maker);
+ }
+}
+
+#endif
+
+#ifdef SK_DEBUG
+void SkDisplayList::validate() {
+ for (int index = 0; index < fDrawList.count(); index++) {
+ SkADrawable* draw = fDrawList[index];
+ draw->validate();
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/animator/SkDisplayList.h b/gfx/skia/skia/src/animator/SkDisplayList.h
new file mode 100644
index 000000000..2beba3e1d
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayList.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDisplayList_DEFINED
+#define SkDisplayList_DEFINED
+
+#include "SkOperand.h"
+#include "SkIntArray.h"
+#include "SkRect.h"
+#include "SkRefCnt.h"
+
+class SkAnimateMaker;
+class SkActive;
+class SkApply;
+class SkADrawable;
+class SkGroup;
+
+class SkDisplayList : public SkRefCnt {
+public:
+ SkDisplayList();
+ virtual ~SkDisplayList();
+ void append(SkActive* );
+ void clear() { fDrawList.reset(); }
+ int count() { return fDrawList.count(); }
+ bool draw(SkAnimateMaker& , SkMSec time);
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* maker);
+ void dumpInner(SkAnimateMaker* maker);
+ static int fIndent;
+ static int fDumpIndex;
+#endif
+ int findGroup(SkADrawable* match, SkTDDrawableArray** list,
+ SkGroup** parent, SkGroup** found, SkTDDrawableArray** grandList);
+ SkADrawable* get(int index) { return fDrawList[index]; }
+ SkMSec getTime() { return fInTime; }
+ SkTDDrawableArray* getDrawList() { return &fDrawList; }
+ void hardReset();
+ virtual bool onIRect(const SkIRect& r);
+ void reset();
+ void remove(SkActive* );
+#ifdef SK_DEBUG
+ void validate();
+#else
+ void validate() {}
+#endif
+ static int SearchForMatch(SkADrawable* match, SkTDDrawableArray** list,
+ SkGroup** parent, SkGroup** found, SkTDDrawableArray**grandList);
+ static bool SearchGroupForMatch(SkADrawable* draw, SkADrawable* match,
+ SkTDDrawableArray** list, SkGroup** parent, SkGroup** found, SkTDDrawableArray** grandList,
+ int &index);
+public:
+ SkIRect fBounds;
+ SkIRect fInvalBounds;
+ bool fDrawBounds;
+ bool fHasUnion;
+ bool fUnionBounds;
+private:
+ SkTDDrawableArray fDrawList;
+ SkTDActiveArray fActiveList;
+ SkMSec fInTime;
+ friend class SkEvents;
+};
+
+#endif // SkDisplayList_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDisplayMath.cpp b/gfx/skia/skia/src/animator/SkDisplayMath.cpp
new file mode 100644
index 000000000..f52cf1937
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayMath.cpp
@@ -0,0 +1,229 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDisplayMath.h"
+
+enum SkDisplayMath_Properties {
+ SK_PROPERTY(E),
+ SK_PROPERTY(LN10),
+ SK_PROPERTY(LN2),
+ SK_PROPERTY(LOG10E),
+ SK_PROPERTY(LOG2E),
+ SK_PROPERTY(PI),
+ SK_PROPERTY(SQRT1_2),
+ SK_PROPERTY(SQRT2)
+};
+
+const SkScalar SkDisplayMath::gConstants[] = {
+ 2.718281828f, // E
+ 2.302585093f, // LN10
+ 0.693147181f, // LN2
+ 0.434294482f, // LOG10E
+ 1.442695041f, // LOG2E
+ 3.141592654f, // PI
+ 0.707106781f, // SQRT1_2
+ 1.414213562f // SQRT2
+};
+
+enum SkDisplayMath_Functions {
+ SK_FUNCTION(abs),
+ SK_FUNCTION(acos),
+ SK_FUNCTION(asin),
+ SK_FUNCTION(atan),
+ SK_FUNCTION(atan2),
+ SK_FUNCTION(ceil),
+ SK_FUNCTION(cos),
+ SK_FUNCTION(exp),
+ SK_FUNCTION(floor),
+ SK_FUNCTION(log),
+ SK_FUNCTION(max),
+ SK_FUNCTION(min),
+ SK_FUNCTION(pow),
+ SK_FUNCTION(random),
+ SK_FUNCTION(round),
+ SK_FUNCTION(sin),
+ SK_FUNCTION(sqrt),
+ SK_FUNCTION(tan)
+};
+
+const SkFunctionParamType SkDisplayMath::fFunctionParameters[] = {
+ (SkFunctionParamType) SkType_Float, // abs
+ (SkFunctionParamType) 0,
+ (SkFunctionParamType) SkType_Float, // acos
+ (SkFunctionParamType) 0,
+ (SkFunctionParamType) SkType_Float, // asin
+ (SkFunctionParamType) 0,
+ (SkFunctionParamType) SkType_Float, // atan
+ (SkFunctionParamType) 0,
+ (SkFunctionParamType) SkType_Float, // atan2
+ (SkFunctionParamType) SkType_Float,
+ (SkFunctionParamType) 0,
+ (SkFunctionParamType) SkType_Float, // ceil
+ (SkFunctionParamType) 0,
+ (SkFunctionParamType) SkType_Float, // cos
+ (SkFunctionParamType) 0,
+ (SkFunctionParamType) SkType_Float, // exp
+ (SkFunctionParamType) 0,
+ (SkFunctionParamType) SkType_Float, // floor
+ (SkFunctionParamType) 0,
+ (SkFunctionParamType) SkType_Float, // log
+ (SkFunctionParamType) 0,
+ (SkFunctionParamType) SkType_Array, // max
+ (SkFunctionParamType) 0,
+ (SkFunctionParamType) SkType_Array, // min
+ (SkFunctionParamType) 0,
+ (SkFunctionParamType) SkType_Float, // pow
+ (SkFunctionParamType) SkType_Float,
+ (SkFunctionParamType) 0,
+ (SkFunctionParamType) SkType_Float, // random
+ (SkFunctionParamType) 0,
+ (SkFunctionParamType) SkType_Float, // round
+ (SkFunctionParamType) 0,
+ (SkFunctionParamType) SkType_Float, // sin
+ (SkFunctionParamType) 0,
+ (SkFunctionParamType) SkType_Float, // sqrt
+ (SkFunctionParamType) 0,
+ (SkFunctionParamType) SkType_Float, // tan
+ (SkFunctionParamType) 0
+};
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDisplayMath::fInfo[] = {
+ SK_MEMBER_PROPERTY(E, Float),
+ SK_MEMBER_PROPERTY(LN10, Float),
+ SK_MEMBER_PROPERTY(LN2, Float),
+ SK_MEMBER_PROPERTY(LOG10E, Float),
+ SK_MEMBER_PROPERTY(LOG2E, Float),
+ SK_MEMBER_PROPERTY(PI, Float),
+ SK_MEMBER_PROPERTY(SQRT1_2, Float),
+ SK_MEMBER_PROPERTY(SQRT2, Float),
+ SK_MEMBER_FUNCTION(abs, Float),
+ SK_MEMBER_FUNCTION(acos, Float),
+ SK_MEMBER_FUNCTION(asin, Float),
+ SK_MEMBER_FUNCTION(atan, Float),
+ SK_MEMBER_FUNCTION(atan2, Float),
+ SK_MEMBER_FUNCTION(ceil, Float),
+ SK_MEMBER_FUNCTION(cos, Float),
+ SK_MEMBER_FUNCTION(exp, Float),
+ SK_MEMBER_FUNCTION(floor, Float),
+ SK_MEMBER_FUNCTION(log, Float),
+ SK_MEMBER_FUNCTION(max, Float),
+ SK_MEMBER_FUNCTION(min, Float),
+ SK_MEMBER_FUNCTION(pow, Float),
+ SK_MEMBER_FUNCTION(random, Float),
+ SK_MEMBER_FUNCTION(round, Float),
+ SK_MEMBER_FUNCTION(sin, Float),
+ SK_MEMBER_FUNCTION(sqrt, Float),
+ SK_MEMBER_FUNCTION(tan, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDisplayMath);
+
+void SkDisplayMath::executeFunction(SkDisplayable* target, int index,
+ SkTDArray<SkScriptValue>& parameters, SkDisplayTypes type,
+ SkScriptValue* scriptValue) {
+ if (scriptValue == nullptr)
+ return;
+ SkASSERT(target == this);
+ SkScriptValue* array = parameters.begin();
+ SkScriptValue* end = parameters.end();
+ SkScalar input = parameters[0].fOperand.fScalar;
+ SkScalar scalarResult;
+ switch (index) {
+ case SK_FUNCTION(abs):
+ scalarResult = SkScalarAbs(input);
+ break;
+ case SK_FUNCTION(acos):
+ scalarResult = SkScalarACos(input);
+ break;
+ case SK_FUNCTION(asin):
+ scalarResult = SkScalarASin(input);
+ break;
+ case SK_FUNCTION(atan):
+ scalarResult = SkScalarATan2(input, SK_Scalar1);
+ break;
+ case SK_FUNCTION(atan2):
+ scalarResult = SkScalarATan2(input, parameters[1].fOperand.fScalar);
+ break;
+ case SK_FUNCTION(ceil):
+ scalarResult = SkScalarCeilToScalar(input);
+ break;
+ case SK_FUNCTION(cos):
+ scalarResult = SkScalarCos(input);
+ break;
+ case SK_FUNCTION(exp):
+ scalarResult = SkScalarExp(input);
+ break;
+ case SK_FUNCTION(floor):
+ scalarResult = SkScalarFloorToScalar(input);
+ break;
+ case SK_FUNCTION(log):
+ scalarResult = SkScalarLog(input);
+ break;
+ case SK_FUNCTION(max):
+ scalarResult = -SK_ScalarMax;
+ while (array < end) {
+ scalarResult = SkMaxScalar(scalarResult, array->fOperand.fScalar);
+ array++;
+ }
+ break;
+ case SK_FUNCTION(min):
+ scalarResult = SK_ScalarMax;
+ while (array < end) {
+ scalarResult = SkMinScalar(scalarResult, array->fOperand.fScalar);
+ array++;
+ }
+ break;
+ case SK_FUNCTION(pow):
+ // not the greatest -- but use x^y = e^(y * ln(x))
+ scalarResult = SkScalarLog(input);
+ scalarResult = SkScalarMul(parameters[1].fOperand.fScalar, scalarResult);
+ scalarResult = SkScalarExp(scalarResult);
+ break;
+ case SK_FUNCTION(random):
+ scalarResult = fRandom.nextUScalar1();
+ break;
+ case SK_FUNCTION(round):
+ scalarResult = SkScalarRoundToScalar(input);
+ break;
+ case SK_FUNCTION(sin):
+ scalarResult = SkScalarSin(input);
+ break;
+ case SK_FUNCTION(sqrt): {
+ SkASSERT(parameters.count() == 1);
+ SkASSERT(type == SkType_Float);
+ scalarResult = SkScalarSqrt(input);
+ } break;
+ case SK_FUNCTION(tan):
+ scalarResult = SkScalarTan(input);
+ break;
+ default:
+ SkASSERT(0);
+ scalarResult = SK_ScalarNaN;
+ }
+ scriptValue->fOperand.fScalar = scalarResult;
+ scriptValue->fType = SkType_Float;
+}
+
+const SkFunctionParamType* SkDisplayMath::getFunctionsParameters() {
+ return fFunctionParameters;
+}
+
+bool SkDisplayMath::getProperty(int index, SkScriptValue* value) const {
+ if ((unsigned)index < SK_ARRAY_COUNT(gConstants)) {
+ value->fOperand.fScalar = gConstants[index];
+ value->fType = SkType_Float;
+ return true;
+ }
+ SkASSERT(0);
+ return false;
+}
diff --git a/gfx/skia/skia/src/animator/SkDisplayMath.h b/gfx/skia/skia/src/animator/SkDisplayMath.h
new file mode 100644
index 000000000..0311a7c14
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayMath.h
@@ -0,0 +1,31 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDisplayMath_DEFINED
+#define SkDisplayMath_DEFINED
+
+#include "SkDisplayable.h"
+#include "SkMemberInfo.h"
+#include "SkRandom.h"
+
+class SkDisplayMath : public SkDisplayable {
+ DECLARE_DISPLAY_MEMBER_INFO(Math);
+ void executeFunction(SkDisplayable* , int index,
+ SkTDArray<SkScriptValue>& parameters, SkDisplayTypes type,
+ SkScriptValue* ) override;
+ const SkFunctionParamType* getFunctionsParameters() override;
+ bool getProperty(int index, SkScriptValue* value) const override;
+private:
+ mutable SkRandom fRandom;
+ static const SkScalar gConstants[];
+ static const SkFunctionParamType fFunctionParameters[];
+
+};
+
+#endif // SkDisplayMath_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDisplayMovie.cpp b/gfx/skia/skia/src/animator/SkDisplayMovie.cpp
new file mode 100644
index 000000000..797d853be
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayMovie.cpp
@@ -0,0 +1,128 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDisplayMovie.h"
+#include "SkAnimateMaker.h"
+#include "SkCanvas.h"
+#include "SkPaint.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDisplayMovie::fInfo[] = {
+ SK_MEMBER(src, String)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDisplayMovie);
+
+SkDisplayMovie::SkDisplayMovie() : fDecodedSuccessfully(false), fLoaded(false), fMovieBuilt(false) {
+ fMovie.fMaker->fInMovie = true;
+}
+
+SkDisplayMovie::~SkDisplayMovie() {
+}
+
+void SkDisplayMovie::buildMovie() {
+ if (fMovieBuilt)
+ return;
+ SkAnimateMaker* movieMaker = fMovie.fMaker;
+ SkAnimateMaker* parentMaker = movieMaker->fParentMaker;
+ if (src.size() == 0 || parentMaker == nullptr)
+ return;
+ movieMaker->fPrefix.set(parentMaker->fPrefix);
+ fDecodedSuccessfully = fMovie.fMaker->decodeURI(src.c_str());
+ if (fDecodedSuccessfully == false) {
+
+ if (movieMaker->getErrorCode() != SkXMLParserError::kNoError || movieMaker->getNativeCode() != -1) {
+ movieMaker->setInnerError(parentMaker, src);
+ parentMaker->setErrorCode(SkDisplayXMLParserError::kInMovie);
+ } else {
+ parentMaker->setErrorNoun(src);
+ parentMaker->setErrorCode(SkDisplayXMLParserError::kMovieNameUnknownOrMissing);
+ }
+ }
+ fMovieBuilt = true;
+}
+
+SkDisplayable* SkDisplayMovie::deepCopy(SkAnimateMaker* maker) {
+ SkDisplayMovie* copy = (SkDisplayMovie*) INHERITED::deepCopy(maker);
+ copy->fMovie.fMaker->fParentMaker = fMovie.fMaker->fParentMaker;
+ copy->fMovie.fMaker->fHostEventSinkID = fMovie.fMaker->fHostEventSinkID;
+ copy->fMovieBuilt = false;
+ *fMovie.fMaker->fParentMaker->fMovies.append() = copy;
+ return copy;
+}
+
+void SkDisplayMovie::dirty() {
+ buildMovie();
+}
+
+bool SkDisplayMovie::doEvent(SkDisplayEvent::Kind kind, SkEventState* state) {
+ if (fLoaded == false)
+ return false;
+ fMovie.fMaker->fEnableTime = fMovie.fMaker->fParentMaker->fEnableTime;
+ return fMovie.fMaker->fEvents.doEvent(*fMovie.fMaker, kind, state);
+}
+
+bool SkDisplayMovie::draw(SkAnimateMaker& maker) {
+ if (fDecodedSuccessfully == false)
+ return false;
+ if (fLoaded == false)
+ enable(maker);
+ maker.fCanvas->save();
+ SkPaint local = SkPaint(*maker.fPaint);
+ bool result = fMovie.draw(maker.fCanvas, &local,
+ maker.fDisplayList.getTime()) != SkAnimator::kNotDifferent;
+ maker.fDisplayList.fInvalBounds.join(fMovie.fMaker->fDisplayList.fInvalBounds);
+ maker.fCanvas->restore();
+ return result;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkDisplayMovie::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ SkDebugf("src=\"%s\"/>\n", src.c_str());
+ SkAnimateMaker* movieMaker = fMovie.fMaker;
+ SkDisplayList::fIndent += 4;
+ movieMaker->fDisplayList.dumpInner(movieMaker);
+ SkDisplayList::fIndent -= 4;
+ dumpEnd(maker);
+}
+
+void SkDisplayMovie::dumpEvents() {
+ fMovie.fMaker->fEvents.dump(*fMovie.fMaker);
+}
+#endif
+
+bool SkDisplayMovie::enable(SkAnimateMaker&) {
+ if (fDecodedSuccessfully == false)
+ return false;
+ SkAnimateMaker* movieMaker = fMovie.fMaker;
+ movieMaker->fEvents.doEvent(*movieMaker, SkDisplayEvent::kOnload, nullptr);
+ movieMaker->fEvents.removeEvent(SkDisplayEvent::kOnload, nullptr);
+ fLoaded = true;
+ movieMaker->fLoaded = true;
+ return false;
+}
+
+bool SkDisplayMovie::hasEnable() const {
+ return true;
+}
+
+void SkDisplayMovie::onEndElement(SkAnimateMaker& maker) {
+#if defined SK_DEBUG && defined SK_DEBUG_ANIMATION_TIMING
+ fMovie.fMaker->fDebugTimeBase = maker.fDebugTimeBase;
+#endif
+ fMovie.fMaker->fPrefix.set(maker.fPrefix);
+ fMovie.fMaker->fHostEventSinkID = maker.fHostEventSinkID;
+ fMovie.fMaker->fParentMaker = &maker;
+ buildMovie();
+ *maker.fMovies.append() = this;
+}
diff --git a/gfx/skia/skia/src/animator/SkDisplayMovie.h b/gfx/skia/skia/src/animator/SkDisplayMovie.h
new file mode 100644
index 000000000..76e2d9ccc
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayMovie.h
@@ -0,0 +1,51 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDisplayMovie_DEFINED
+#define SkDisplayMovie_DEFINED
+
+#include "SkAnimator.h"
+#include "SkADrawable.h"
+#include "SkMemberInfo.h"
+
+struct SkEventState;
+
+class SkDisplayMovie : public SkADrawable {
+ DECLARE_DISPLAY_MEMBER_INFO(Movie);
+ SkDisplayMovie();
+ virtual ~SkDisplayMovie();
+ void buildMovie();
+ SkDisplayable* deepCopy(SkAnimateMaker* ) override;
+ void dirty() override;
+ bool doEvent(const SkEvent& evt) {
+ return fLoaded && fMovie.doEvent(evt);
+ }
+ bool doEvent(SkDisplayEvent::Kind , SkEventState* state ) override;
+ bool draw(SkAnimateMaker& ) override;
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+ void dumpEvents() override;
+#endif
+ bool enable(SkAnimateMaker& ) override;
+ const SkAnimator* getAnimator() const { return &fMovie; }
+ bool hasEnable() const override;
+ void onEndElement(SkAnimateMaker& ) override;
+protected:
+ SkString src;
+ SkAnimator fMovie;
+ SkBool8 fDecodedSuccessfully;
+ SkBool8 fLoaded;
+ SkBool8 fMovieBuilt;
+ friend class SkAnimateMaker;
+ friend class SkPost;
+private:
+ typedef SkADrawable INHERITED;
+};
+
+#endif // SkDisplayMovie_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDisplayNumber.cpp b/gfx/skia/skia/src/animator/SkDisplayNumber.cpp
new file mode 100644
index 000000000..82b658f7b
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayNumber.cpp
@@ -0,0 +1,70 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDisplayNumber.h"
+
+enum SkDisplayNumber_Properties {
+ SK_PROPERTY(MAX_VALUE),
+ SK_PROPERTY(MIN_VALUE),
+ SK_PROPERTY(NEGATIVE_INFINITY),
+ SK_PROPERTY(NaN),
+ SK_PROPERTY(POSITIVE_INFINITY)
+};
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDisplayNumber::fInfo[] = {
+ SK_MEMBER_PROPERTY(MAX_VALUE, Float),
+ SK_MEMBER_PROPERTY(MIN_VALUE, Float),
+ SK_MEMBER_PROPERTY(NEGATIVE_INFINITY, Float),
+ SK_MEMBER_PROPERTY(NaN, Float),
+ SK_MEMBER_PROPERTY(POSITIVE_INFINITY, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDisplayNumber);
+
+#if defined _WIN32
+#pragma warning ( push )
+// we are intentionally causing an overflow here
+// (warning C4756: overflow in constant arithmetic)
+#pragma warning ( disable : 4756 )
+#endif
+
+bool SkDisplayNumber::getProperty(int index, SkScriptValue* value) const {
+ SkScalar constant;
+ switch (index) {
+ case SK_PROPERTY(MAX_VALUE):
+ constant = SK_ScalarMax;
+ break;
+ case SK_PROPERTY(MIN_VALUE):
+ constant = SK_ScalarMin;
+ break;
+ case SK_PROPERTY(NEGATIVE_INFINITY):
+ constant = -SK_ScalarInfinity;
+ break;
+ case SK_PROPERTY(NaN):
+ constant = SK_ScalarNaN;
+ break;
+ case SK_PROPERTY(POSITIVE_INFINITY):
+ constant = SK_ScalarInfinity;
+ break;
+ default:
+ SkASSERT(0);
+ return false;
+ }
+ value->fOperand.fScalar = constant;
+ value->fType = SkType_Float;
+ return true;
+}
+
+#if defined _WIN32
+#pragma warning ( pop )
+#endif
diff --git a/gfx/skia/skia/src/animator/SkDisplayNumber.h b/gfx/skia/skia/src/animator/SkDisplayNumber.h
new file mode 100644
index 000000000..b92c31143
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayNumber.h
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDisplayNumber_DEFINED
+#define SkDisplayNumber_DEFINED
+
+#include "SkDisplayable.h"
+#include "SkMemberInfo.h"
+
+class SkDisplayNumber : public SkDisplayable {
+ DECLARE_DISPLAY_MEMBER_INFO(Number);
+ bool getProperty(int index, SkScriptValue* value) const override;
+private:
+};
+
+#endif // SkDisplayNumber_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDisplayPost.cpp b/gfx/skia/skia/src/animator/SkDisplayPost.cpp
new file mode 100644
index 000000000..a30fd4442
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayPost.cpp
@@ -0,0 +1,298 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDisplayPost.h"
+#include "SkAnimateMaker.h"
+#include "SkAnimator.h"
+#include "SkDisplayMovie.h"
+#include "SkPostParts.h"
+#include "SkScript.h"
+#ifdef SK_DEBUG
+#include "SkDump.h"
+#include "SkTime.h"
+#endif
+
+enum SkPost_Properties {
+ SK_PROPERTY(target),
+ SK_PROPERTY(type)
+};
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkPost::fInfo[] = {
+ SK_MEMBER(delay, MSec),
+// SK_MEMBER(initialized, Boolean),
+ SK_MEMBER(mode, EventMode),
+ SK_MEMBER(sink, String),
+ SK_MEMBER_PROPERTY(target, String),
+ SK_MEMBER_PROPERTY(type, String)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkPost);
+
+SkPost::SkPost() : delay(0), /*initialized(SkBool(-1)), */ mode(kImmediate), fMaker(nullptr),
+ fSinkID(0), fTargetMaker(nullptr), fChildHasID(false), fDirty(false) {
+}
+
+SkPost::~SkPost() {
+ for (SkDataInput** part = fParts.begin(); part < fParts.end(); part++)
+ delete *part;
+}
+
+bool SkPost::addChild(SkAnimateMaker& , SkDisplayable* child) {
+ SkASSERT(child && child->isDataInput());
+ SkDataInput* part = (SkDataInput*) child;
+ *fParts.append() = part;
+ return true;
+}
+
+bool SkPost::childrenNeedDisposing() const {
+ return false;
+}
+
+void SkPost::dirty() {
+ fDirty = true;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkPost::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ SkString* eventType = new SkString();
+ fEvent.getType(eventType);
+ if (eventType->equals("user")) {
+ const char* target = fEvent.findString("id");
+ SkDebugf("target=\"%s\" ", target);
+ }
+ else
+ SkDebugf("type=\"%s\" ", eventType->c_str());
+ delete eventType;
+
+ if (delay > 0) {
+ SkDebugf("delay=\"%g\" ", delay * 0.001);
+ }
+// if (initialized == false)
+// SkDebugf("(uninitialized) ");
+ SkString string;
+ SkDump::GetEnumString(SkType_EventMode, mode, &string);
+ if (!string.equals("immediate"))
+ SkDebugf("mode=\"%s\" ", string.c_str());
+ // !!! could enhance this to search through make hierarchy to show name of sink
+ if (sink.size() > 0) {
+ SkDebugf("sink=\"%s\" sinkID=\"%d\" ", sink.c_str(), fSinkID);
+ } else if (fSinkID != maker->getAnimator()->getSinkID() && fSinkID != 0) {
+ SkDebugf("sinkID=\"%d\" ", fSinkID);
+ }
+ const SkMetaData& meta = fEvent.getMetaData();
+ SkMetaData::Iter iter(meta);
+ SkMetaData::Type type;
+ int number;
+ const char* name;
+ bool closedYet = false;
+ SkDisplayList::fIndent += 4;
+ //this seems to work, but kinda hacky
+ //for some reason the last part is id, which i don't want
+ //and the parts seem to be in the reverse order from the one in which we find the
+ //data itself
+ //SkDataInput** ptr = fParts.end();
+ //SkDataInput* data;
+ //const char* ID;
+ while ((name = iter.next(&type, &number)) != nullptr) {
+ //ptr--;
+ if (strcmp(name, "id") == 0)
+ continue;
+ if (closedYet == false) {
+ SkDebugf(">\n");
+ closedYet = true;
+ }
+ //data = *ptr;
+ //if (data->id)
+ // ID = data->id;
+ //else
+ // ID = "";
+ SkDebugf("%*s<data name=\"%s\" ", SkDisplayList::fIndent, "", name);
+ switch (type) {
+ case SkMetaData::kS32_Type: {
+ int32_t s32;
+ meta.findS32(name, &s32);
+ SkDebugf("int=\"%d\" ", s32);
+ } break;
+ case SkMetaData::kScalar_Type: {
+ SkScalar scalar;
+ meta.findScalar(name, &scalar);
+ SkDebugf("float=\"%g\" ", SkScalarToFloat(scalar));
+ } break;
+ case SkMetaData::kString_Type:
+ SkDebugf("string=\"%s\" ", meta.findString(name));
+ break;
+ case SkMetaData::kPtr_Type: {//when do we have a pointer
+ void* ptr;
+ meta.findPtr(name, &ptr);
+ SkDebugf("0x%08x ", ptr);
+ } break;
+ case SkMetaData::kBool_Type: {
+ bool boolean;
+ meta.findBool(name, &boolean);
+ SkDebugf("boolean=\"%s\" ", boolean ? "true " : "false ");
+ } break;
+ default:
+ break;
+ }
+ SkDebugf("/>\n");
+ //ptr++;
+/* perhaps this should only be done in the case of a pointer?
+ SkDisplayable* displayable;
+ if (maker->find(name, &displayable))
+ displayable->dump(maker);
+ else
+ SkDebugf("\n");*/
+ }
+ SkDisplayList::fIndent -= 4;
+ if (closedYet)
+ dumpEnd(maker);
+ else
+ SkDebugf("/>\n");
+
+}
+#endif
+
+bool SkPost::enable(SkAnimateMaker& maker ) {
+ if (maker.hasError())
+ return true;
+ if (fDirty) {
+ if (sink.size() > 0)
+ findSinkID();
+ if (fChildHasID) {
+ SkString preserveID(fEvent.findString("id"));
+ fEvent.getMetaData().reset();
+ if (preserveID.size() > 0)
+ fEvent.setString("id", preserveID);
+ for (SkDataInput** part = fParts.begin(); part < fParts.end(); part++) {
+ if ((*part)->add())
+ maker.setErrorCode(SkDisplayXMLParserError::kErrorAddingDataToPost);
+ }
+ }
+ fDirty = false;
+ }
+#ifdef SK_DUMP_ENABLED
+ if (maker.fDumpPosts) {
+ SkDebugf("post enable: ");
+ dump(&maker);
+ }
+#if defined SK_DEBUG_ANIMATION_TIMING
+ SkString debugOut;
+ SkMSec time = maker.getAppTime();
+ debugOut.appendS32(time - maker.fDebugTimeBase);
+ debugOut.append(" post id=");
+ debugOut.append(_id);
+ debugOut.append(" enable=");
+ debugOut.appendS32(maker.fEnableTime - maker.fDebugTimeBase);
+ debugOut.append(" delay=");
+ debugOut.appendS32(delay);
+#endif
+#endif
+// SkMSec adjustedDelay = maker.adjustDelay(maker.fEnableTime, delay);
+ SkMSec futureTime = maker.fEnableTime + delay;
+ fEvent.setFast32(futureTime);
+#if defined SK_DEBUG && defined SK_DEBUG_ANIMATION_TIMING
+ debugOut.append(" future=");
+ debugOut.appendS32(futureTime - maker.fDebugTimeBase);
+ SkDebugf("%s\n", debugOut.c_str());
+#endif
+ SkEventSinkID targetID = fSinkID;
+ bool isAnimatorEvent = true;
+ SkAnimator* anim = maker.getAnimator();
+ if (targetID == 0) {
+ isAnimatorEvent = fEvent.findString("id") != nullptr;
+ if (isAnimatorEvent)
+ targetID = anim->getSinkID();
+ else if (maker.fHostEventSinkID)
+ targetID = maker.fHostEventSinkID;
+ else
+ return true;
+ } else
+ anim = fTargetMaker->getAnimator();
+ if (delay == 0) {
+ if (isAnimatorEvent && mode == kImmediate)
+ fTargetMaker->doEvent(fEvent);
+ else
+ anim->onEventPost(new SkEvent(fEvent), targetID);
+ } else
+ anim->onEventPostTime(new SkEvent(fEvent), targetID, futureTime);
+ return true;
+}
+
+void SkPost::findSinkID() {
+ // get the next delimiter '.' if any
+ fTargetMaker = fMaker;
+ const char* ch = sink.c_str();
+ do {
+ const char* end = strchr(ch, '.');
+ size_t len = end ? (size_t) (end - ch) : strlen(ch);
+ SkDisplayable* displayable = nullptr;
+ if (SK_LITERAL_STR_EQUAL("parent", ch, len)) {
+ if (fTargetMaker->fParentMaker)
+ fTargetMaker = fTargetMaker->fParentMaker;
+ else {
+ fTargetMaker->setErrorCode(SkDisplayXMLParserError::kNoParentAvailable);
+ return;
+ }
+ } else {
+ fTargetMaker->find(ch, len, &displayable);
+ if (displayable == nullptr || displayable->getType() != SkType_Movie) {
+ fTargetMaker->setErrorCode(SkDisplayXMLParserError::kExpectedMovie);
+ return;
+ }
+ SkDisplayMovie* movie = (SkDisplayMovie*) displayable;
+ fTargetMaker = movie->fMovie.fMaker;
+ }
+ if (end == nullptr)
+ break;
+ ch = ++end;
+ } while (true);
+ SkAnimator* anim = fTargetMaker->getAnimator();
+ fSinkID = anim->getSinkID();
+}
+
+bool SkPost::hasEnable() const {
+ return true;
+}
+
+void SkPost::onEndElement(SkAnimateMaker& maker) {
+ fTargetMaker = fMaker = &maker;
+ if (fChildHasID == false) {
+ for (SkDataInput** part = fParts.begin(); part < fParts.end(); part++)
+ delete *part;
+ fParts.reset();
+ }
+}
+
+void SkPost::setChildHasID() {
+ fChildHasID = true;
+}
+
+bool SkPost::setProperty(int index, SkScriptValue& value) {
+ SkASSERT(value.fType == SkType_String);
+ SkString* string = value.fOperand.fString;
+ switch(index) {
+ case SK_PROPERTY(target): {
+ fEvent.setType("user");
+ fEvent.setString("id", *string);
+ mode = kImmediate;
+ } break;
+ case SK_PROPERTY(type):
+ fEvent.setType(*string);
+ break;
+ default:
+ SkASSERT(0);
+ return false;
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/animator/SkDisplayPost.h b/gfx/skia/skia/src/animator/SkDisplayPost.h
new file mode 100644
index 000000000..80fdcfcdd
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayPost.h
@@ -0,0 +1,59 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDisplayPost_DEFINED
+#define SkDisplayPost_DEFINED
+
+#include "SkDisplayable.h"
+#include "SkEvent.h"
+#include "SkEventSink.h"
+#include "SkMemberInfo.h"
+#include "SkIntArray.h"
+
+class SkDataInput;
+class SkAnimateMaker;
+
+class SkPost : public SkDisplayable {
+ DECLARE_MEMBER_INFO(Post);
+ enum Mode {
+ kDeferred,
+ kImmediate
+ };
+ SkPost();
+ virtual ~SkPost();
+ bool addChild(SkAnimateMaker& , SkDisplayable* child) override;
+ bool childrenNeedDisposing() const override;
+ void dirty() override;
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+ bool enable(SkAnimateMaker& ) override;
+ bool hasEnable() const override;
+ void onEndElement(SkAnimateMaker& ) override;
+ void setChildHasID() override;
+ bool setProperty(int index, SkScriptValue& ) override;
+protected:
+ SkMSec delay;
+ SkString sink;
+// SkBool initialized;
+ Mode mode;
+ SkEvent fEvent;
+ SkAnimateMaker* fMaker;
+ SkTDDataArray fParts;
+ SkEventSinkID fSinkID;
+ SkAnimateMaker* fTargetMaker;
+ SkBool8 fChildHasID;
+ SkBool8 fDirty;
+private:
+ void findSinkID();
+ friend class SkDataInput;
+ typedef SkDisplayable INHERITED;
+};
+
+#endif //SkDisplayPost_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDisplayRandom.cpp b/gfx/skia/skia/src/animator/SkDisplayRandom.cpp
new file mode 100644
index 000000000..2efe8dc92
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayRandom.cpp
@@ -0,0 +1,65 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDisplayRandom.h"
+#include "SkInterpolator.h"
+
+enum SkDisplayRandom_Properties {
+ SK_PROPERTY(random),
+ SK_PROPERTY(seed)
+};
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDisplayRandom::fInfo[] = {
+ SK_MEMBER(blend, Float),
+ SK_MEMBER(max, Float),
+ SK_MEMBER(min, Float),
+ SK_MEMBER_DYNAMIC_PROPERTY(random, Float),
+ SK_MEMBER_PROPERTY(seed, Int)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDisplayRandom);
+
+SkDisplayRandom::SkDisplayRandom() : blend(0), min(0), max(SK_Scalar1) {
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkDisplayRandom::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ SkDebugf("min=\"%g\" ", SkScalarToFloat(min));
+ SkDebugf("max=\"%g\" ", SkScalarToFloat(max));
+ SkDebugf("blend=\"%g\" ", SkScalarToFloat(blend));
+ SkDebugf("/>\n");
+}
+#endif
+
+bool SkDisplayRandom::getProperty(int index, SkScriptValue* value) const {
+ switch(index) {
+ case SK_PROPERTY(random): {
+ SkScalar random = fRandom.nextUScalar1();
+ SkScalar relativeT = SkUnitCubicInterp(random, SK_Scalar1 - blend, 0, 0, SK_Scalar1 - blend);
+ value->fOperand.fScalar = min + SkScalarMul(max - min, relativeT);
+ value->fType = SkType_Float;
+ return true;
+ }
+ default:
+ SkASSERT(0);
+ }
+ return false;
+}
+
+bool SkDisplayRandom::setProperty(int index, SkScriptValue& value) {
+ SkASSERT(index == SK_PROPERTY(seed));
+ SkASSERT(value.fType == SkType_Int);
+ fRandom.setSeed(value.fOperand.fS32);
+ return true;
+}
diff --git a/gfx/skia/skia/src/animator/SkDisplayRandom.h b/gfx/skia/skia/src/animator/SkDisplayRandom.h
new file mode 100644
index 000000000..299915626
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayRandom.h
@@ -0,0 +1,40 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDisplayRandom_DEFINED
+#define SkDisplayRandom_DEFINED
+
+#include "SkDisplayable.h"
+#include "SkMemberInfo.h"
+#include "SkRandom.h"
+
+#ifdef min
+#undef min
+#endif
+
+#ifdef max
+#undef max
+#endif
+
+class SkDisplayRandom : public SkDisplayable {
+ DECLARE_DISPLAY_MEMBER_INFO(Random);
+ SkDisplayRandom();
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+ bool getProperty(int index, SkScriptValue* value) const override;
+ bool setProperty(int index, SkScriptValue& ) override;
+private:
+ SkScalar blend;
+ SkScalar min;
+ SkScalar max;
+ mutable SkRandom fRandom;
+};
+
+#endif // SkDisplayRandom_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDisplayScreenplay.cpp b/gfx/skia/skia/src/animator/SkDisplayScreenplay.cpp
new file mode 100644
index 000000000..2663b4314
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayScreenplay.cpp
@@ -0,0 +1,20 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDisplayScreenplay.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDisplayScreenplay::fInfo[] = {
+ SK_MEMBER(time, MSec)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDisplayScreenplay);
diff --git a/gfx/skia/skia/src/animator/SkDisplayScreenplay.h b/gfx/skia/skia/src/animator/SkDisplayScreenplay.h
new file mode 100644
index 000000000..0265548ed
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayScreenplay.h
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDisplayScreenplay_DEFINED
+#define SkDisplayScreenplay_DEFINED
+
+#include "SkDisplayable.h"
+#include "SkMemberInfo.h"
+
+class SkDisplayScreenplay : public SkDisplayable {
+ DECLARE_DISPLAY_MEMBER_INFO(Screenplay);
+ SkMSec time;
+};
+
+#endif // SkDisplayScreenplay_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDisplayType.cpp b/gfx/skia/skia/src/animator/SkDisplayType.cpp
new file mode 100644
index 000000000..92d120eb9
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayType.cpp
@@ -0,0 +1,761 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDisplayType.h"
+#include "SkAnimateMaker.h"
+#include "SkAnimateSet.h"
+#include "SkDisplayAdd.h"
+#include "SkDisplayApply.h"
+#include "SkDisplayBounds.h"
+#include "SkDisplayEvent.h"
+#include "SkDisplayInclude.h"
+#ifdef SK_DEBUG
+#include "SkDisplayList.h"
+#endif
+#include "SkDisplayMath.h"
+#include "SkDisplayMovie.h"
+#include "SkDisplayNumber.h"
+#include "SkDisplayPost.h"
+#include "SkDisplayRandom.h"
+#include "SkDisplayTypes.h"
+#include "SkDraw3D.h"
+#include "SkDrawBitmap.h"
+#include "SkDrawClip.h"
+#include "SkDrawDash.h"
+#include "SkDrawDiscrete.h"
+#include "SkDrawEmboss.h"
+#include "SkDrawFull.h"
+#include "SkDrawGradient.h"
+#include "SkDrawLine.h"
+#include "SkDrawMatrix.h"
+#include "SkDrawOval.h"
+#include "SkDrawPaint.h"
+#include "SkDrawPath.h"
+#include "SkDrawPoint.h"
+#include "SkDrawSaveLayer.h"
+#include "SkDrawText.h"
+#include "SkDrawTextBox.h"
+#include "SkDrawTo.h"
+#include "SkDump.h"
+#include "SkExtras.h"
+#include "SkHitClear.h"
+#include "SkHitTest.h"
+#include "SkMatrixParts.h"
+#include "SkPathParts.h"
+#include "SkPostParts.h"
+#include "SkSnapshot.h"
+#include "SkTextOnPath.h"
+#include "SkTextToPath.h"
+#include "SkTSearch.h"
+
+#define CASE_NEW(_class) \
+ case SkType_##_class: result = new Sk##_class(); break
+#define CASE_DRAW_NEW(_class) \
+ case SkType_##_class: result = new SkDraw##_class(); break
+#define CASE_DISPLAY_NEW(_class) \
+ case SkType_##_class: result = new SkDisplay##_class(); break
+#ifdef SK_DEBUG
+ #define CASE_DEBUG_RETURN_NIL(_class) \
+ case SkType_##_class: return nullptr
+#else
+ #define CASE_DEBUG_RETURN_NIL(_class)
+#endif
+
+
+SkDisplayTypes SkDisplayType::gNewTypes = kNumberOfTypes;
+
+SkDisplayable* SkDisplayType::CreateInstance(SkAnimateMaker* maker, SkDisplayTypes type) {
+ SkDisplayable* result = nullptr;
+ switch (type) {
+ // unknown
+ CASE_DISPLAY_NEW(Math);
+ CASE_DISPLAY_NEW(Number);
+ CASE_NEW(Add);
+ CASE_NEW(AddCircle);
+ // addgeom
+ CASE_DEBUG_RETURN_NIL(AddMode);
+ CASE_NEW(AddOval);
+ CASE_NEW(AddPath);
+ CASE_NEW(AddRect);
+ CASE_NEW(AddRoundRect);
+ CASE_DEBUG_RETURN_NIL(Align);
+ CASE_NEW(Animate);
+ // animatebase
+ CASE_NEW(Apply);
+ CASE_DEBUG_RETURN_NIL(ApplyMode);
+ CASE_DEBUG_RETURN_NIL(ApplyTransition);
+ CASE_DISPLAY_NEW(Array);
+ // argb
+ // base64
+ // basebitmap
+ // baseclassinfo
+ CASE_DRAW_NEW(Bitmap);
+ // bitmapencoding
+ // bitmapformat
+ CASE_DRAW_NEW(BitmapShader);
+ CASE_DRAW_NEW(Blur);
+ CASE_DISPLAY_NEW(Boolean);
+ // boundable
+ CASE_DISPLAY_NEW(Bounds);
+ CASE_DEBUG_RETURN_NIL(Cap);
+ CASE_NEW(Clear);
+ CASE_DRAW_NEW(Clip);
+ CASE_NEW(Close);
+ CASE_DRAW_NEW(Color);
+ CASE_NEW(CubicTo);
+ CASE_NEW(Dash);
+ CASE_NEW(DataInput);
+ CASE_NEW(Discrete);
+ // displayable
+ // drawable
+ CASE_NEW(DrawTo);
+ CASE_NEW(Dump);
+ // dynamicstring
+ CASE_DRAW_NEW(Emboss);
+ CASE_DISPLAY_NEW(Event);
+ CASE_DEBUG_RETURN_NIL(EventCode);
+ CASE_DEBUG_RETURN_NIL(EventKind);
+ CASE_DEBUG_RETURN_NIL(EventMode);
+ // filltype
+ // filtertype
+ CASE_DISPLAY_NEW(Float);
+ CASE_NEW(FromPath);
+ CASE_DEBUG_RETURN_NIL(FromPathMode);
+ CASE_NEW(Full);
+ // gradient
+ CASE_NEW(Group);
+ CASE_NEW(HitClear);
+ CASE_NEW(HitTest);
+ CASE_NEW(ImageBaseBitmap);
+ CASE_NEW(Include);
+ CASE_NEW(Input);
+ CASE_DISPLAY_NEW(Int);
+ CASE_DEBUG_RETURN_NIL(Join);
+ CASE_NEW(Line);
+ CASE_NEW(LineTo);
+ CASE_NEW(DrawLinearGradient);
+ CASE_DRAW_NEW(MaskFilter);
+ CASE_DEBUG_RETURN_NIL(MaskFilterBlurStyle);
+ // maskfilterlight
+ CASE_DRAW_NEW(Matrix);
+ // memberfunction
+ // memberproperty
+ CASE_NEW(Move);
+ CASE_NEW(MoveTo);
+ CASE_DISPLAY_NEW(Movie);
+ // msec
+ CASE_NEW(Oval);
+ CASE_DRAW_NEW(Paint);
+ CASE_DRAW_NEW(Path);
+ // pathdirection
+ CASE_DRAW_NEW(PathEffect);
+ // point
+ CASE_NEW(DrawPoint);
+ CASE_NEW(PolyToPoly);
+ CASE_NEW(Polygon);
+ CASE_NEW(Polyline);
+ CASE_NEW(Post);
+ CASE_NEW(QuadTo);
+ CASE_NEW(RCubicTo);
+ CASE_NEW(RLineTo);
+ CASE_NEW(RMoveTo);
+ CASE_NEW(RQuadTo);
+ CASE_NEW(DrawRadialGradient);
+ CASE_DISPLAY_NEW(Random);
+ CASE_DRAW_NEW(Rect);
+ CASE_NEW(RectToRect);
+ CASE_NEW(Remove);
+ CASE_NEW(Replace);
+ CASE_NEW(Rotate);
+ CASE_NEW(RoundRect);
+ CASE_NEW(Save);
+ CASE_NEW(SaveLayer);
+ CASE_NEW(Scale);
+ // screenplay
+ CASE_NEW(Set);
+ CASE_DRAW_NEW(Shader);
+ CASE_NEW(Skew);
+ CASE_NEW(3D_Camera);
+ CASE_NEW(3D_Patch);
+ // 3dpoint
+ CASE_NEW(Snapshot);
+ CASE_DISPLAY_NEW(String);
+ // style
+ CASE_NEW(Text);
+ CASE_DRAW_NEW(TextBox);
+ // textboxalign
+ // textboxmode
+ CASE_NEW(TextOnPath);
+ CASE_NEW(TextToPath);
+ CASE_DEBUG_RETURN_NIL(TileMode);
+ CASE_NEW(Translate);
+ CASE_DRAW_NEW(Typeface);
+ CASE_DEBUG_RETURN_NIL(Xfermode);
+ default:
+ SkExtras** end = maker->fExtras.end();
+ for (SkExtras** extraPtr = maker->fExtras.begin(); extraPtr < end; extraPtr++) {
+ if ((result = (*extraPtr)->createInstance(type)) != nullptr)
+ return result;
+ }
+ SkASSERT(0);
+ }
+ return result;
+}
+
+#undef CASE_NEW
+#undef CASE_DRAW_NEW
+#undef CASE_DISPLAY_NEW
+
+#if SK_USE_CONDENSED_INFO == 0
+
+#define CASE_GET_INFO(_class) case SkType_##_class: \
+ info = Sk##_class::fInfo; infoCount = Sk##_class::fInfoCount; break
+#define CASE_GET_DRAW_INFO(_class) case SkType_##_class: \
+ info = SkDraw##_class::fInfo; infoCount = SkDraw##_class::fInfoCount; break
+#define CASE_GET_DISPLAY_INFO(_class) case SkType_##_class: \
+ info = SkDisplay##_class::fInfo; infoCount = SkDisplay##_class::fInfoCount; \
+ break
+
+const SkMemberInfo* SkDisplayType::GetMembers(SkAnimateMaker* maker,
+ SkDisplayTypes type, int* infoCountPtr) {
+ const SkMemberInfo* info = nullptr;
+ int infoCount = 0;
+ switch (type) {
+ // unknown
+ CASE_GET_DISPLAY_INFO(Math);
+ CASE_GET_DISPLAY_INFO(Number);
+ CASE_GET_INFO(Add);
+ CASE_GET_INFO(AddCircle);
+ CASE_GET_INFO(AddGeom);
+ // addmode
+ CASE_GET_INFO(AddOval);
+ CASE_GET_INFO(AddPath);
+ CASE_GET_INFO(AddRect);
+ CASE_GET_INFO(AddRoundRect);
+ // align
+ CASE_GET_INFO(Animate);
+ CASE_GET_INFO(AnimateBase);
+ CASE_GET_INFO(Apply);
+ // applymode
+ // applytransition
+ CASE_GET_DISPLAY_INFO(Array);
+ // argb
+ // base64
+ CASE_GET_INFO(BaseBitmap);
+ // baseclassinfo
+ CASE_GET_DRAW_INFO(Bitmap);
+ // bitmapencoding
+ // bitmapformat
+ CASE_GET_DRAW_INFO(BitmapShader);
+ CASE_GET_DRAW_INFO(Blur);
+ CASE_GET_DISPLAY_INFO(Boolean);
+ // boundable
+ CASE_GET_DISPLAY_INFO(Bounds);
+ // cap
+ // clear
+ CASE_GET_DRAW_INFO(Clip);
+ // close
+ CASE_GET_DRAW_INFO(Color);
+ CASE_GET_INFO(CubicTo);
+ CASE_GET_INFO(Dash);
+ CASE_GET_INFO(DataInput);
+ CASE_GET_INFO(Discrete);
+ // displayable
+ // drawable
+ CASE_GET_INFO(DrawTo);
+ CASE_GET_INFO(Dump);
+ // dynamicstring
+ CASE_GET_DRAW_INFO(Emboss);
+ CASE_GET_DISPLAY_INFO(Event);
+ // eventcode
+ // eventkind
+ // eventmode
+ // filltype
+ // filtertype
+ CASE_GET_DISPLAY_INFO(Float);
+ CASE_GET_INFO(FromPath);
+ // frompathmode
+ // full
+ CASE_GET_INFO(DrawGradient);
+ CASE_GET_INFO(Group);
+ CASE_GET_INFO(HitClear);
+ CASE_GET_INFO(HitTest);
+ CASE_GET_INFO(ImageBaseBitmap);
+ CASE_GET_INFO(Include);
+ CASE_GET_INFO(Input);
+ CASE_GET_DISPLAY_INFO(Int);
+ // join
+ CASE_GET_INFO(Line);
+ CASE_GET_INFO(LineTo);
+ CASE_GET_INFO(DrawLinearGradient);
+ // maskfilter
+ // maskfilterblurstyle
+ // maskfilterlight
+ CASE_GET_DRAW_INFO(Matrix);
+ // memberfunction
+ // memberproperty
+ CASE_GET_INFO(Move);
+ CASE_GET_INFO(MoveTo);
+ CASE_GET_DISPLAY_INFO(Movie);
+ // msec
+ CASE_GET_INFO(Oval);
+ CASE_GET_DRAW_INFO(Path);
+ CASE_GET_DRAW_INFO(Paint);
+ // pathdirection
+ // patheffect
+ case SkType_Point: info = Sk_Point::fInfo; infoCount = Sk_Point::fInfoCount; break; // no virtual flavor
+ CASE_GET_INFO(DrawPoint); // virtual flavor
+ CASE_GET_INFO(PolyToPoly);
+ CASE_GET_INFO(Polygon);
+ CASE_GET_INFO(Polyline);
+ CASE_GET_INFO(Post);
+ CASE_GET_INFO(QuadTo);
+ CASE_GET_INFO(RCubicTo);
+ CASE_GET_INFO(RLineTo);
+ CASE_GET_INFO(RMoveTo);
+ CASE_GET_INFO(RQuadTo);
+ CASE_GET_INFO(DrawRadialGradient);
+ CASE_GET_DISPLAY_INFO(Random);
+ CASE_GET_DRAW_INFO(Rect);
+ CASE_GET_INFO(RectToRect);
+ CASE_GET_INFO(Remove);
+ CASE_GET_INFO(Replace);
+ CASE_GET_INFO(Rotate);
+ CASE_GET_INFO(RoundRect);
+ CASE_GET_INFO(Save);
+ CASE_GET_INFO(SaveLayer);
+ CASE_GET_INFO(Scale);
+ // screenplay
+ CASE_GET_INFO(Set);
+ CASE_GET_DRAW_INFO(Shader);
+ CASE_GET_INFO(Skew);
+ CASE_GET_INFO(3D_Camera);
+ CASE_GET_INFO(3D_Patch);
+ CASE_GET_INFO(3D_Point);
+ CASE_GET_INFO(Snapshot);
+ CASE_GET_DISPLAY_INFO(String);
+ // style
+ CASE_GET_INFO(Text);
+ CASE_GET_DRAW_INFO(TextBox);
+ // textboxalign
+ // textboxmode
+ CASE_GET_INFO(TextOnPath);
+ CASE_GET_INFO(TextToPath);
+ // tilemode
+ CASE_GET_INFO(Translate);
+ CASE_GET_DRAW_INFO(Typeface);
+ // xfermode
+ // knumberoftypes
+ default:
+ if (maker) {
+ SkExtras** end = maker->fExtras.end();
+ for (SkExtras** extraPtr = maker->fExtras.begin(); extraPtr < end; extraPtr++) {
+ if ((info = (*extraPtr)->getMembers(type, infoCountPtr)) != nullptr)
+ return info;
+ }
+ }
+ return nullptr;
+ }
+ if (infoCountPtr)
+ *infoCountPtr = infoCount;
+ return info;
+}
+
+const SkMemberInfo* SkDisplayType::GetMember(SkAnimateMaker* maker,
+ SkDisplayTypes type, const char** matchPtr ) {
+ int infoCount = 0; // Initialize to remove a warning.
+ const SkMemberInfo* info = GetMembers(maker, type, &infoCount);
+ info = SkMemberInfo::Find(info, infoCount, matchPtr);
+// SkASSERT(info);
+ return info;
+}
+
+#undef CASE_GET_INFO
+#undef CASE_GET_DRAW_INFO
+#undef CASE_GET_DISPLAY_INFO
+
+#endif // SK_USE_CONDENSED_INFO == 0
+
+#if defined SK_DEBUG || defined SK_BUILD_CONDENSED
+ #define DRAW_NAME(_name, _type) {_name, _type, true, false }
+ #define DISPLAY_NAME(_name, _type) {_name, _type, false, true }
+ #define INIT_BOOL_FIELDS , false, false
+#else
+ #define DRAW_NAME(_name, _type) {_name, _type }
+ #define DISPLAY_NAME(_name, _type) {_name, _type }
+ #define INIT_BOOL_FIELDS
+#endif
+
+const TypeNames gTypeNames[] = {
+ // unknown
+ { "Math", SkType_Math INIT_BOOL_FIELDS },
+ { "Number", SkType_Number INIT_BOOL_FIELDS },
+ { "add", SkType_Add INIT_BOOL_FIELDS },
+ { "addCircle", SkType_AddCircle INIT_BOOL_FIELDS },
+ // addgeom
+ // addmode
+ { "addOval", SkType_AddOval INIT_BOOL_FIELDS },
+ { "addPath", SkType_AddPath INIT_BOOL_FIELDS },
+ { "addRect", SkType_AddRect INIT_BOOL_FIELDS },
+ { "addRoundRect", SkType_AddRoundRect INIT_BOOL_FIELDS },
+ // align
+ { "animate", SkType_Animate INIT_BOOL_FIELDS },
+ // animateBase
+ { "apply", SkType_Apply INIT_BOOL_FIELDS },
+ // applymode
+ // applytransition
+ { "array", SkType_Array INIT_BOOL_FIELDS },
+ // argb
+ // base64
+ // basebitmap
+ // baseclassinfo
+ DRAW_NAME("bitmap", SkType_Bitmap),
+ // bitmapencoding
+ // bitmapformat
+ DRAW_NAME("bitmapShader", SkType_BitmapShader),
+ DRAW_NAME("blur", SkType_Blur),
+ { "boolean", SkType_Boolean INIT_BOOL_FIELDS },
+ // boundable
+ DISPLAY_NAME("bounds", SkType_Bounds),
+ // cap
+ { "clear", SkType_Clear INIT_BOOL_FIELDS },
+ DRAW_NAME("clip", SkType_Clip),
+ { "close", SkType_Close INIT_BOOL_FIELDS },
+ DRAW_NAME("color", SkType_Color),
+ { "cubicTo", SkType_CubicTo INIT_BOOL_FIELDS },
+ { "dash", SkType_Dash INIT_BOOL_FIELDS },
+ { "data", SkType_DataInput INIT_BOOL_FIELDS },
+ { "discrete", SkType_Discrete INIT_BOOL_FIELDS },
+ // displayable
+ // drawable
+ { "drawTo", SkType_DrawTo INIT_BOOL_FIELDS },
+ { "dump", SkType_Dump INIT_BOOL_FIELDS },
+ // dynamicstring
+ DRAW_NAME("emboss", SkType_Emboss),
+ DISPLAY_NAME("event", SkType_Event),
+ // eventcode
+ // eventkind
+ // eventmode
+ // filltype
+ // filtertype
+ { "float", SkType_Float INIT_BOOL_FIELDS },
+ { "fromPath", SkType_FromPath INIT_BOOL_FIELDS },
+ // frompathmode
+ { "full", SkType_Full INIT_BOOL_FIELDS },
+ // gradient
+ { "group", SkType_Group INIT_BOOL_FIELDS },
+ { "hitClear", SkType_HitClear INIT_BOOL_FIELDS },
+ { "hitTest", SkType_HitTest INIT_BOOL_FIELDS },
+ { "image", SkType_ImageBaseBitmap INIT_BOOL_FIELDS },
+ { "include", SkType_Include INIT_BOOL_FIELDS },
+ { "input", SkType_Input INIT_BOOL_FIELDS },
+ { "int", SkType_Int INIT_BOOL_FIELDS },
+ // join
+ { "line", SkType_Line INIT_BOOL_FIELDS },
+ { "lineTo", SkType_LineTo INIT_BOOL_FIELDS },
+ { "linearGradient", SkType_DrawLinearGradient INIT_BOOL_FIELDS },
+ { "maskFilter", SkType_MaskFilter INIT_BOOL_FIELDS },
+ // maskfilterblurstyle
+ // maskfilterlight
+ DRAW_NAME("matrix", SkType_Matrix),
+ // memberfunction
+ // memberproperty
+ { "move", SkType_Move INIT_BOOL_FIELDS },
+ { "moveTo", SkType_MoveTo INIT_BOOL_FIELDS },
+ { "movie", SkType_Movie INIT_BOOL_FIELDS },
+ // msec
+ { "oval", SkType_Oval INIT_BOOL_FIELDS },
+ DRAW_NAME("paint", SkType_Paint),
+ DRAW_NAME("path", SkType_Path),
+ // pathdirection
+ { "pathEffect", SkType_PathEffect INIT_BOOL_FIELDS },
+ // point
+ DRAW_NAME("point", SkType_DrawPoint),
+ { "polyToPoly", SkType_PolyToPoly INIT_BOOL_FIELDS },
+ { "polygon", SkType_Polygon INIT_BOOL_FIELDS },
+ { "polyline", SkType_Polyline INIT_BOOL_FIELDS },
+ { "post", SkType_Post INIT_BOOL_FIELDS },
+ { "quadTo", SkType_QuadTo INIT_BOOL_FIELDS },
+ { "rCubicTo", SkType_RCubicTo INIT_BOOL_FIELDS },
+ { "rLineTo", SkType_RLineTo INIT_BOOL_FIELDS },
+ { "rMoveTo", SkType_RMoveTo INIT_BOOL_FIELDS },
+ { "rQuadTo", SkType_RQuadTo INIT_BOOL_FIELDS },
+ { "radialGradient", SkType_DrawRadialGradient INIT_BOOL_FIELDS },
+ DISPLAY_NAME("random", SkType_Random),
+ { "rect", SkType_Rect INIT_BOOL_FIELDS },
+ { "rectToRect", SkType_RectToRect INIT_BOOL_FIELDS },
+ { "remove", SkType_Remove INIT_BOOL_FIELDS },
+ { "replace", SkType_Replace INIT_BOOL_FIELDS },
+ { "rotate", SkType_Rotate INIT_BOOL_FIELDS },
+ { "roundRect", SkType_RoundRect INIT_BOOL_FIELDS },
+ { "save", SkType_Save INIT_BOOL_FIELDS },
+ { "saveLayer", SkType_SaveLayer INIT_BOOL_FIELDS },
+ { "scale", SkType_Scale INIT_BOOL_FIELDS },
+ // screenplay
+ { "set", SkType_Set INIT_BOOL_FIELDS },
+ { "shader", SkType_Shader INIT_BOOL_FIELDS },
+ { "skew", SkType_Skew INIT_BOOL_FIELDS },
+ { "skia3d:camera", SkType_3D_Camera INIT_BOOL_FIELDS },
+ { "skia3d:patch", SkType_3D_Patch INIT_BOOL_FIELDS },
+ // point
+ { "snapshot", SkType_Snapshot INIT_BOOL_FIELDS },
+ { "string", SkType_String INIT_BOOL_FIELDS },
+ // style
+ { "text", SkType_Text INIT_BOOL_FIELDS },
+ { "textBox", SkType_TextBox INIT_BOOL_FIELDS },
+ // textboxalign
+ // textboxmode
+ { "textOnPath", SkType_TextOnPath INIT_BOOL_FIELDS },
+ { "textToPath", SkType_TextToPath INIT_BOOL_FIELDS },
+ // tilemode
+ { "translate", SkType_Translate INIT_BOOL_FIELDS },
+ { "typeface", SkType_Typeface INIT_BOOL_FIELDS }
+ // xfermode
+ // knumberoftypes
+};
+
+const int kTypeNamesSize = SK_ARRAY_COUNT(gTypeNames);
+
+SkDisplayTypes SkDisplayType::Find(SkAnimateMaker* maker, const SkMemberInfo* match) {
+ for (int index = 0; index < kTypeNamesSize; index++) {
+ SkDisplayTypes type = gTypeNames[index].fType;
+ const SkMemberInfo* info = SkDisplayType::GetMembers(maker, type, nullptr);
+ if (info == match)
+ return type;
+ }
+ return (SkDisplayTypes) -1;
+}
+
+// !!! optimize this by replacing function with a byte-sized lookup table
+SkDisplayTypes SkDisplayType::GetParent(SkAnimateMaker* maker, SkDisplayTypes base) {
+ if (base == SkType_Group || base == SkType_Save || base == SkType_SaveLayer) //!!! cheat a little until we have a lookup table
+ return SkType_Displayable;
+ if (base == SkType_Set)
+ return SkType_Animate; // another cheat until we have a lookup table
+ const SkMemberInfo* info = GetMembers(maker, base, nullptr); // get info for this type
+ SkASSERT(info);
+ if (info->fType != SkType_BaseClassInfo)
+ return SkType_Unknown; // if no base, done
+ // !!! could change SK_MEMBER_INHERITED macro to take type, stuff in offset, so that
+ // this (and table builder) could know type without the following steps:
+ const SkMemberInfo* inherited = info->getInherited();
+ SkDisplayTypes result = (SkDisplayTypes) (SkType_Unknown + 1);
+ for (; result <= SkType_Xfermode; result = (SkDisplayTypes) (result + 1)) {
+ const SkMemberInfo* match = GetMembers(maker, result, nullptr);
+ if (match == inherited)
+ break;
+ }
+ SkASSERT(result <= SkType_Xfermode);
+ return result;
+}
+
+SkDisplayTypes SkDisplayType::GetType(SkAnimateMaker* maker, const char match[], size_t len ) {
+ int index = SkStrSearch(&gTypeNames[0].fName, kTypeNamesSize, match,
+ len, sizeof(gTypeNames[0]));
+ if (index >= 0 && index < kTypeNamesSize)
+ return gTypeNames[index].fType;
+ SkExtras** end = maker->fExtras.end();
+ for (SkExtras** extraPtr = maker->fExtras.begin(); extraPtr < end; extraPtr++) {
+ SkDisplayTypes result = (*extraPtr)->getType(match, len);
+ if (result != SkType_Unknown)
+ return result;
+ }
+ return (SkDisplayTypes) -1;
+}
+
+bool SkDisplayType::IsEnum(SkAnimateMaker* , SkDisplayTypes type) {
+ switch (type) {
+ case SkType_AddMode:
+ case SkType_Align:
+ case SkType_ApplyMode:
+ case SkType_ApplyTransition:
+ case SkType_BitmapEncoding:
+ case SkType_BitmapFormat:
+ case SkType_Boolean:
+ case SkType_Cap:
+ case SkType_EventCode:
+ case SkType_EventKind:
+ case SkType_EventMode:
+ case SkType_FillType:
+ case SkType_FilterType:
+ case SkType_FontStyle:
+ case SkType_FromPathMode:
+ case SkType_Join:
+ case SkType_MaskFilterBlurStyle:
+ case SkType_PathDirection:
+ case SkType_Style:
+ case SkType_TextBoxAlign:
+ case SkType_TextBoxMode:
+ case SkType_TileMode:
+ case SkType_Xfermode:
+ return true;
+ default: // to avoid warnings
+ break;
+ }
+ return false;
+}
+
+bool SkDisplayType::IsDisplayable(SkAnimateMaker* , SkDisplayTypes type) {
+ switch (type) {
+ case SkType_Add:
+ case SkType_AddCircle:
+ case SkType_AddOval:
+ case SkType_AddPath:
+ case SkType_AddRect:
+ case SkType_AddRoundRect:
+ case SkType_Animate:
+ case SkType_AnimateBase:
+ case SkType_Apply:
+ case SkType_BaseBitmap:
+ case SkType_Bitmap:
+ case SkType_BitmapShader:
+ case SkType_Blur:
+ case SkType_Clear:
+ case SkType_Clip:
+ case SkType_Close:
+ case SkType_Color:
+ case SkType_CubicTo:
+ case SkType_Dash:
+ case SkType_DataInput:
+ case SkType_Discrete:
+ case SkType_Displayable:
+ case SkType_Drawable:
+ case SkType_DrawTo:
+ case SkType_Emboss:
+ case SkType_Event:
+ case SkType_FromPath:
+ case SkType_Full:
+ case SkType_Group:
+ case SkType_ImageBaseBitmap:
+ case SkType_Input:
+ case SkType_Line:
+ case SkType_LineTo:
+ case SkType_DrawLinearGradient:
+ case SkType_Matrix:
+ case SkType_Move:
+ case SkType_MoveTo:
+ case SkType_Movie:
+ case SkType_Oval:
+ case SkType_Paint:
+ case SkType_Path:
+ case SkType_PolyToPoly:
+ case SkType_Polygon:
+ case SkType_Polyline:
+ case SkType_Post:
+ case SkType_QuadTo:
+ case SkType_RCubicTo:
+ case SkType_RLineTo:
+ case SkType_RMoveTo:
+ case SkType_RQuadTo:
+ case SkType_DrawRadialGradient:
+ case SkType_Random:
+ case SkType_Rect:
+ case SkType_RectToRect:
+ case SkType_Remove:
+ case SkType_Replace:
+ case SkType_Rotate:
+ case SkType_RoundRect:
+ case SkType_Save:
+ case SkType_SaveLayer:
+ case SkType_Scale:
+ case SkType_Set:
+ case SkType_Shader:
+ case SkType_Skew:
+ case SkType_3D_Camera:
+ case SkType_3D_Patch:
+ case SkType_Snapshot:
+ case SkType_Text:
+ case SkType_TextBox:
+ case SkType_TextOnPath:
+ case SkType_TextToPath:
+ case SkType_Translate:
+ return true;
+ default: // to avoid warnings
+ break;
+ }
+ return false;
+}
+
+bool SkDisplayType::IsStruct(SkAnimateMaker* , SkDisplayTypes type) {
+ switch (type) {
+ case SkType_Point:
+ case SkType_3D_Point:
+ return true;
+ default: // to avoid warnings
+ break;
+ }
+ return false;
+}
+
+
+SkDisplayTypes SkDisplayType::RegisterNewType() {
+ gNewTypes = (SkDisplayTypes) (gNewTypes + 1);
+ return gNewTypes;
+}
+
+
+
+#ifdef SK_DEBUG
+const char* SkDisplayType::GetName(SkAnimateMaker* maker, SkDisplayTypes type) {
+ for (int index = 0; index < kTypeNamesSize - 1; index++) {
+ if (gTypeNames[index].fType == type)
+ return gTypeNames[index].fName;
+ }
+ SkExtras** end = maker->fExtras.end();
+ for (SkExtras** extraPtr = maker->fExtras.begin(); extraPtr < end; extraPtr++) {
+ const char* result = (*extraPtr)->getName(type);
+ if (result != nullptr)
+ return result;
+ }
+ return nullptr;
+}
+#endif
+
+#ifdef SK_SUPPORT_UNITTEST
+void SkDisplayType::UnitTest() {
+ SkAnimator animator;
+ SkAnimateMaker* maker = animator.fMaker;
+ int index;
+ for (index = 0; index < kTypeNamesSize - 1; index++) {
+ SkASSERT(strcmp(gTypeNames[index].fName, gTypeNames[index + 1].fName) < 0);
+ SkASSERT(gTypeNames[index].fType < gTypeNames[index + 1].fType);
+ }
+ for (index = 0; index < kTypeNamesSize; index++) {
+ SkDisplayable* test = CreateInstance(maker, gTypeNames[index].fType);
+ if (test == nullptr)
+ continue;
+#if defined _WIN32 && defined _INC_CRTDBG // only on windows, only if using "crtdbg.h"
+ // we know that crtdbg puts 0xfdfdfdfd at the end of the block
+ // look for unitialized memory, signature 0xcdcdcdcd prior to that
+ int* start = (int*) test;
+ while (*start != 0xfdfdfdfd) {
+ SkASSERT(*start != 0xcdcdcdcd);
+ start++;
+ }
+#endif
+ delete test;
+ }
+ for (index = 0; index < kTypeNamesSize; index++) {
+ int infoCount;
+ const SkMemberInfo* info = GetMembers(maker, gTypeNames[index].fType, &infoCount);
+ if (info == nullptr)
+ continue;
+#if SK_USE_CONDENSED_INFO == 0
+ for (int inner = 0; inner < infoCount - 1; inner++) {
+ if (info[inner].fType == SkType_BaseClassInfo)
+ continue;
+ SkASSERT(strcmp(info[inner].fName, info[inner + 1].fName) < 0);
+ }
+#endif
+ }
+#if defined SK_DEBUG || defined SK_BUILD_CONDENSED
+ BuildCondensedInfo(maker);
+#endif
+}
+#endif
diff --git a/gfx/skia/skia/src/animator/SkDisplayType.h b/gfx/skia/skia/src/animator/SkDisplayType.h
new file mode 100644
index 000000000..8ffcd75fe
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayType.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDisplayType_DEFINED
+#define SkDisplayType_DEFINED
+
+#include "SkMath.h"
+#include "SkScalar.h"
+
+typedef int SkBool;
+
+#ifdef SK_DEBUG
+ #define SK_DUMP_ENABLED
+ #ifdef SK_BUILD_FOR_MAC
+ #define SK_FIND_LEAKS
+ #endif
+#endif
+
+#define SK_LITERAL_STR_EQUAL(str, token, len) (sizeof(str) - 1 == len \
+ && strncmp(str, token, sizeof(str) - 1) == 0)
+
+class SkAnimateMaker;
+class SkDisplayable;
+struct SkMemberInfo;
+
+enum SkDisplayTypes {
+ SkType_Unknown,
+ SkType_Math, // for ecmascript compatible Math functions and constants
+ SkType_Number, // for for ecmascript compatible Number functions and constants
+ SkType_Add,
+ SkType_AddCircle,
+ SkType_AddGeom,
+ SkType_AddMode,
+ SkType_AddOval,
+ SkType_AddPath,
+ SkType_AddRect, // path part
+ SkType_AddRoundRect,
+ SkType_Align,
+ SkType_Animate,
+ SkType_AnimateBase, // base type for animate, set
+ SkType_Apply,
+ SkType_ApplyMode,
+ SkType_ApplyTransition,
+ SkType_Array,
+ SkType_ARGB,
+ SkType_Base64,
+ SkType_BaseBitmap,
+ SkType_BaseClassInfo,
+ SkType_Bitmap,
+ SkType_BitmapEncoding,
+ SkType_BitmapFormat,
+ SkType_BitmapShader,
+ SkType_Blur,
+ SkType_Boolean, // can have values -1 (uninitialized), 0, 1
+ SkType_Boundable,
+ SkType_Bounds,
+ SkType_Cap,
+ SkType_Clear,
+ SkType_Clip,
+ SkType_Close,
+ SkType_Color,
+ SkType_CubicTo,
+ SkType_Dash,
+ SkType_DataInput,
+ SkType_Discrete,
+ SkType_Displayable,
+ SkType_Drawable,
+ SkType_DrawTo,
+ SkType_Dump,
+ SkType_DynamicString, // evaluate at draw time
+ SkType_Emboss,
+ SkType_Event,
+ SkType_EventCode,
+ SkType_EventKind,
+ SkType_EventMode,
+ SkType_FillType,
+ SkType_FilterType,
+ SkType_Float,
+ SkType_FontStyle,
+ SkType_FromPath,
+ SkType_FromPathMode,
+ SkType_Full,
+ SkType_DrawGradient,
+ SkType_Group,
+ SkType_HitClear,
+ SkType_HitTest,
+ SkType_ImageBaseBitmap,
+ SkType_Include,
+ SkType_Input,
+ SkType_Int,
+ SkType_Join,
+ SkType_Line, // simple line primitive
+ SkType_LineTo, // used as part of path construction
+ SkType_DrawLinearGradient,
+ SkType_MaskFilter,
+ SkType_MaskFilterBlurStyle,
+ SkType_MaskFilterLight,
+ SkType_Matrix,
+ SkType_MemberFunction,
+ SkType_MemberProperty,
+ SkType_Move,
+ SkType_MoveTo,
+ SkType_Movie,
+ SkType_MSec,
+ SkType_Oval,
+ SkType_Paint,
+ SkType_Path,
+ SkType_PathDirection,
+ SkType_PathEffect,
+ SkType_Point, // used inside other structures, no vtable
+ SkType_DrawPoint, // used to draw points, has a vtable
+ SkType_PolyToPoly,
+ SkType_Polygon,
+ SkType_Polyline,
+ SkType_Post,
+ SkType_QuadTo,
+ SkType_RCubicTo,
+ SkType_RLineTo,
+ SkType_RMoveTo,
+ SkType_RQuadTo,
+ SkType_DrawRadialGradient,
+ SkType_Random,
+ SkType_Rect,
+ SkType_RectToRect,
+ SkType_Remove,
+ SkType_Replace,
+ SkType_Rotate,
+ SkType_RoundRect,
+ SkType_Save,
+ SkType_SaveLayer,
+ SkType_Scale,
+ SkType_Screenplay,
+ SkType_Set,
+ SkType_Shader,
+ SkType_Skew,
+ SkType_3D_Camera,
+ SkType_3D_Patch,
+ SkType_3D_Point,
+ SkType_Snapshot,
+ SkType_String, // pointer to SkString
+ SkType_Style,
+ SkType_Text,
+ SkType_TextBox,
+ SkType_TextBoxAlign,
+ SkType_TextBoxMode,
+ SkType_TextOnPath,
+ SkType_TextToPath,
+ SkType_TileMode,
+ SkType_Translate,
+ SkType_TransparentShader,
+ SkType_Typeface,
+ SkType_Xfermode,
+ kNumberOfTypes
+};
+
+struct TypeNames {
+ const char* fName;
+ SkDisplayTypes fType;
+#if defined SK_DEBUG || defined SK_BUILD_CONDENSED
+ bool fDrawPrefix;
+ bool fDisplayPrefix;
+#endif
+};
+
+#ifdef SK_DEBUG
+typedef SkDisplayTypes SkFunctionParamType;
+#else
+typedef unsigned char SkFunctionParamType;
+#endif
+
+extern const TypeNames gTypeNames[];
+extern const int kTypeNamesSize;
+
+class SkDisplayType {
+public:
+ static SkDisplayTypes Find(SkAnimateMaker* , const SkMemberInfo* );
+ static const SkMemberInfo* GetMember(SkAnimateMaker* , SkDisplayTypes , const char** );
+ static const SkMemberInfo* GetMembers(SkAnimateMaker* , SkDisplayTypes , int* infoCountPtr);
+ static SkDisplayTypes GetParent(SkAnimateMaker* , SkDisplayTypes );
+ static bool IsDisplayable(SkAnimateMaker* , SkDisplayTypes );
+ static bool IsEnum(SkAnimateMaker* , SkDisplayTypes );
+ static bool IsStruct(SkAnimateMaker* , SkDisplayTypes );
+ static SkDisplayTypes RegisterNewType();
+ static SkDisplayTypes Resolve(const char[] , const SkMemberInfo** );
+#ifdef SK_DEBUG
+ static bool IsAnimate(SkDisplayTypes type ) { return type == SkType_Animate ||
+ type == SkType_Set; }
+ static const char* GetName(SkAnimateMaker* , SkDisplayTypes );
+#endif
+#ifdef SK_SUPPORT_UNITTEST
+ static void UnitTest();
+#endif
+#if defined SK_DEBUG || defined SK_BUILD_CONDENSED
+ static void BuildCondensedInfo(SkAnimateMaker* );
+#endif
+ static SkDisplayTypes GetType(SkAnimateMaker* , const char[] , size_t len);
+ static SkDisplayable* CreateInstance(SkAnimateMaker* , SkDisplayTypes );
+private:
+ static SkDisplayTypes gNewTypes;
+};
+
+#endif // SkDisplayType_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDisplayTypes.cpp b/gfx/skia/skia/src/animator/SkDisplayTypes.cpp
new file mode 100644
index 000000000..d3d8c6814
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayTypes.cpp
@@ -0,0 +1,214 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDisplayTypes.h"
+#include "SkAnimateBase.h"
+
+bool SkDisplayDepend::canContainDependents() const {
+ return true;
+}
+
+void SkDisplayDepend::dirty() {
+ SkDisplayable** last = fDependents.end();
+ for (SkDisplayable** depPtr = fDependents.begin(); depPtr < last; depPtr++) {
+ SkAnimateBase* animate = (SkAnimateBase* ) *depPtr;
+ animate->setChanged(true);
+ }
+}
+
+// Boolean
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDisplayBoolean::fInfo[] = {
+ SK_MEMBER(value, Boolean)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDisplayBoolean);
+
+SkDisplayBoolean::SkDisplayBoolean() : value(false) {
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkDisplayBoolean::dump(SkAnimateMaker* maker){
+ dumpBase(maker);
+ SkDebugf("value=\"%s\" />\n", value ? "true" : "false");
+}
+#endif
+
+// int32_t
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDisplayInt::fInfo[] = {
+ SK_MEMBER(value, Int)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDisplayInt);
+
+SkDisplayInt::SkDisplayInt() : value(0) {
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkDisplayInt::dump(SkAnimateMaker* maker){
+ dumpBase(maker);
+ SkDebugf("value=\"%d\" />\n", value);
+}
+#endif
+
+// SkScalar
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDisplayFloat::fInfo[] = {
+ SK_MEMBER(value, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDisplayFloat);
+
+SkDisplayFloat::SkDisplayFloat() : value(0) {
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkDisplayFloat::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ SkDebugf("value=\"%g\" />\n", SkScalarToFloat(value));
+}
+#endif
+
+// SkString
+enum SkDisplayString_Functions {
+ SK_FUNCTION(slice)
+};
+
+enum SkDisplayString_Properties {
+ SK_PROPERTY(length)
+};
+
+const SkFunctionParamType SkDisplayString::fFunctionParameters[] = {
+ (SkFunctionParamType) SkType_Int, // slice
+ (SkFunctionParamType) SkType_Int,
+ (SkFunctionParamType) 0
+};
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDisplayString::fInfo[] = {
+ SK_MEMBER_PROPERTY(length, Int),
+ SK_MEMBER_FUNCTION(slice, String),
+ SK_MEMBER(value, String)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDisplayString);
+
+SkDisplayString::SkDisplayString() {
+}
+
+SkDisplayString::SkDisplayString(SkString& copyFrom) : value(copyFrom) {
+}
+
+void SkDisplayString::executeFunction(SkDisplayable* target, int index,
+ SkTDArray<SkScriptValue>& parameters, SkDisplayTypes type,
+ SkScriptValue* scriptValue) {
+ if (scriptValue == nullptr)
+ return;
+ SkASSERT(target == this);
+ switch (index) {
+ case SK_FUNCTION(slice):
+ scriptValue->fType = SkType_String;
+ SkASSERT(parameters[0].fType == SkType_Int);
+ int start = parameters[0].fOperand.fS32;
+ if (start < 0)
+ start = (int) (value.size() - start);
+ int end = (int) value.size();
+ if (parameters.count() > 1) {
+ SkASSERT(parameters[1].fType == SkType_Int);
+ end = parameters[1].fOperand.fS32;
+ }
+ //if (end >= 0 && end < (int) value.size())
+ if (end >= 0 && end <= (int) value.size())
+ scriptValue->fOperand.fString = new SkString(&value.c_str()[start], end - start);
+ else
+ scriptValue->fOperand.fString = new SkString(value);
+ break;
+ }
+}
+
+const SkFunctionParamType* SkDisplayString::getFunctionsParameters() {
+ return fFunctionParameters;
+}
+
+bool SkDisplayString::getProperty(int index, SkScriptValue* scriptValue) const {
+ switch (index) {
+ case SK_PROPERTY(length):
+ scriptValue->fType = SkType_Int;
+ scriptValue->fOperand.fS32 = (int32_t) value.size();
+ break;
+ default:
+ SkASSERT(0);
+ return false;
+ }
+ return true;
+}
+
+
+// SkArray
+#if 0 // !!! reason enough to qualify enum with class name or move typedArray into its own file
+enum SkDisplayArray_Properties {
+ SK_PROPERTY(length)
+};
+#endif
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDisplayArray::fInfo[] = {
+ SK_MEMBER_PROPERTY(length, Int),
+ SK_MEMBER_ARRAY(values, Unknown)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDisplayArray);
+
+SkDisplayArray::SkDisplayArray() {
+}
+
+SkDisplayArray::SkDisplayArray(SkTypedArray& copyFrom) : values(copyFrom) {
+
+}
+
+SkDisplayArray::~SkDisplayArray() {
+ if (values.getType() == SkType_String) {
+ for (int index = 0; index < values.count(); index++)
+ delete values[index].fString;
+ return;
+ }
+ if (values.getType() == SkType_Array) {
+ for (int index = 0; index < values.count(); index++)
+ delete values[index].fArray;
+ }
+}
+
+bool SkDisplayArray::getProperty(int index, SkScriptValue* value) const {
+ switch (index) {
+ case SK_PROPERTY(length):
+ value->fType = SkType_Int;
+ value->fOperand.fS32 = values.count();
+ break;
+ default:
+ SkASSERT(0);
+ return false;
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/animator/SkDisplayTypes.h b/gfx/skia/skia/src/animator/SkDisplayTypes.h
new file mode 100644
index 000000000..c24091f39
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayTypes.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDisplayTypes_DEFINED
+#define SkDisplayTypes_DEFINED
+
+#include "SkDisplayable.h"
+#include "SkMemberInfo.h"
+#include "SkTypedArray.h"
+
+class SkOpArray; // compiled script experiment
+
+
+class SkDisplayDepend : public SkDisplayable {
+public:
+ virtual bool canContainDependents() const;
+ void addDependent(SkDisplayable* displayable) {
+ if (fDependents.find(displayable) < 0)
+ *fDependents.append() = displayable;
+ }
+ virtual void dirty();
+private:
+ SkTDDisplayableArray fDependents;
+ typedef SkDisplayable INHERITED;
+};
+
+class SkDisplayBoolean : public SkDisplayDepend {
+ DECLARE_DISPLAY_MEMBER_INFO(Boolean);
+ SkDisplayBoolean();
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+ SkBool value;
+ friend class SkAnimatorScript;
+ friend class SkAnimatorScript_Box;
+ friend class SkAnimatorScript_Unbox;
+ typedef SkDisplayDepend INHERITED;
+};
+
+class SkDisplayInt : public SkDisplayDepend {
+ DECLARE_DISPLAY_MEMBER_INFO(Int);
+ SkDisplayInt();
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+private:
+ int32_t value;
+ friend class SkAnimatorScript;
+ friend class SkAnimatorScript_Box;
+ friend class SkAnimatorScript_Unbox;
+ typedef SkDisplayDepend INHERITED;
+};
+
+class SkDisplayFloat : public SkDisplayDepend {
+ DECLARE_DISPLAY_MEMBER_INFO(Float);
+ SkDisplayFloat();
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+private:
+ SkScalar value;
+ friend class SkAnimatorScript;
+ friend class SkAnimatorScript_Box;
+ friend class SkAnimatorScript_Unbox;
+ typedef SkDisplayDepend INHERITED;
+};
+
+class SkDisplayString : public SkDisplayDepend {
+ DECLARE_DISPLAY_MEMBER_INFO(String);
+ SkDisplayString();
+ SkDisplayString(SkString& );
+ void executeFunction(SkDisplayable* , int index,
+ SkTDArray<SkScriptValue>& parameters, SkDisplayTypes type,
+ SkScriptValue* ) override;
+ const SkFunctionParamType* getFunctionsParameters() override;
+ bool getProperty(int index, SkScriptValue* ) const override;
+ SkString value;
+private:
+ static const SkFunctionParamType fFunctionParameters[];
+};
+
+class SkDisplayArray : public SkDisplayDepend {
+ DECLARE_DISPLAY_MEMBER_INFO(Array);
+ SkDisplayArray();
+ SkDisplayArray(SkTypedArray& );
+ SkDisplayArray(SkOpArray& ); // compiled script experiment
+ virtual ~SkDisplayArray();
+ bool getProperty(int index, SkScriptValue* ) const override;
+private:
+ SkTypedArray values;
+ friend class SkAnimator;
+ friend class SkAnimatorScript;
+ friend class SkAnimatorScript2;
+ friend class SkAnimatorScript_Unbox;
+ friend class SkDisplayable;
+ friend struct SkMemberInfo;
+ friend class SkScriptEngine;
+};
+
+#endif // SkDisplayTypes_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDisplayXMLParser.cpp b/gfx/skia/skia/src/animator/SkDisplayXMLParser.cpp
new file mode 100644
index 000000000..1c8b67b6a
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayXMLParser.cpp
@@ -0,0 +1,316 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDisplayXMLParser.h"
+#include "SkAnimateMaker.h"
+#include "SkDisplayApply.h"
+#include "SkUtils.h"
+#ifdef SK_DEBUG
+#include "SkTime.h"
+#endif
+
+static char const* const gErrorStrings[] = {
+ "unknown error ",
+ "apply scopes itself",
+ "display tree too deep (circular reference?) ",
+ "element missing parent ",
+ "element type not allowed in parent ",
+ "error adding <data> to <post> ",
+ "error adding to <matrix> ",
+ "error adding to <paint> ",
+ "error adding to <path> ",
+ "error in attribute value ",
+ "error in script ",
+ "expected movie in sink attribute ",
+ "field not in target ",
+ "number of offsets in gradient must match number of colors",
+ "no offset in gradient may be greater than one",
+ "last offset in gradient must be one",
+ "offsets in gradient must be increasing",
+ "first offset in gradient must be zero",
+ "gradient attribute \"points\" must have length of four",
+ "in include ",
+ "in movie ",
+ "include name unknown or missing ",
+ "index out of range ",
+ "movie name unknown or missing ",
+ "no parent available to resolve sink attribute ",
+ "parent element can't contain ",
+ "saveLayer must specify a bounds",
+ "target id not found ",
+ "unexpected type "
+};
+
+SkDisplayXMLParserError::~SkDisplayXMLParserError() {
+}
+
+void SkDisplayXMLParserError::getErrorString(SkString* str) const {
+ if (fCode > kUnknownError)
+ str->set(gErrorStrings[fCode - kUnknownError]);
+ else
+ str->reset();
+ INHERITED::getErrorString(str);
+}
+
+void SkDisplayXMLParserError::setInnerError(SkAnimateMaker* parent, const SkString& src) {
+ SkString inner;
+ getErrorString(&inner);
+ inner.prepend(": ");
+ inner.prependS32(getLineNumber());
+ inner.prepend(", line ");
+ inner.prepend(src);
+ parent->setErrorNoun(inner);
+}
+
+
+SkDisplayXMLParser::SkDisplayXMLParser(SkAnimateMaker& maker)
+ : INHERITED(&maker.fError), fMaker(maker), fInInclude(maker.fInInclude),
+ fInSkia(maker.fInInclude), fCurrDisplayable(nullptr)
+{
+}
+
+SkDisplayXMLParser::~SkDisplayXMLParser() {
+ if (fCurrDisplayable && fMaker.fChildren.find(fCurrDisplayable) < 0)
+ delete fCurrDisplayable;
+ for (Parent* parPtr = fParents.begin() + 1; parPtr < fParents.end(); parPtr++) {
+ SkDisplayable* displayable = parPtr->fDisplayable;
+ if (displayable == fCurrDisplayable)
+ continue;
+ SkASSERT(fMaker.fChildren.find(displayable) < 0);
+ if (fMaker.fHelpers.find(displayable) < 0)
+ delete displayable;
+ }
+}
+
+
+
+bool SkDisplayXMLParser::onAddAttribute(const char name[], const char value[]) {
+ return onAddAttributeLen(name, value, strlen(value));
+}
+
+bool SkDisplayXMLParser::onAddAttributeLen(const char attrName[], const char attrValue[],
+ size_t attrValueLen)
+{
+ if (fCurrDisplayable == nullptr) // this signals we should ignore attributes for this element
+ return strncmp(attrName, "xmlns", sizeof("xmlns") - 1) != 0;
+ SkDisplayable* displayable = fCurrDisplayable;
+ SkDisplayTypes type = fCurrType;
+
+ if (strcmp(attrName, "id") == 0) {
+ if (fMaker.find(attrValue, attrValueLen, nullptr)) {
+ fError->setNoun(attrValue, attrValueLen);
+ fError->setCode(SkXMLParserError::kDuplicateIDs);
+ return true;
+ }
+#ifdef SK_DEBUG
+ displayable->_id.set(attrValue, attrValueLen);
+ displayable->id = displayable->_id.c_str();
+#endif
+ fMaker.idsSet(attrValue, attrValueLen, displayable);
+ int parentIndex = fParents.count() - 1;
+ if (parentIndex > 0) {
+ SkDisplayable* parent = fParents[parentIndex - 1].fDisplayable;
+ parent->setChildHasID();
+ }
+ return false;
+ }
+ const char* name = attrName;
+ const SkMemberInfo* info = SkDisplayType::GetMember(&fMaker, type, &name);
+ if (info == nullptr) {
+ fError->setNoun(name);
+ fError->setCode(SkXMLParserError::kUnknownAttributeName);
+ return true;
+ }
+ if (info->setValue(fMaker, nullptr, 0, info->getCount(), displayable, info->getType(), attrValue,
+ attrValueLen))
+ return false;
+ if (fMaker.fError.hasError()) {
+ fError->setNoun(attrValue, attrValueLen);
+ return true;
+ }
+ SkDisplayable* ref = nullptr;
+ if (fMaker.find(attrValue, attrValueLen, &ref) == false) {
+ ref = fMaker.createInstance(attrValue, attrValueLen);
+ if (ref == nullptr) {
+ fError->setNoun(attrValue, attrValueLen);
+ fError->setCode(SkXMLParserError::kErrorInAttributeValue);
+ return true;
+ } else
+ fMaker.helperAdd(ref);
+ }
+ if (info->fType != SkType_MemberProperty) {
+ fError->setNoun(name);
+ fError->setCode(SkXMLParserError::kUnknownAttributeName);
+ return true;
+ }
+ SkScriptValue scriptValue;
+ scriptValue.fOperand.fDisplayable = ref;
+ scriptValue.fType = ref->getType();
+ displayable->setProperty(info->propertyIndex(), scriptValue);
+ return false;
+}
+
+#if defined(SK_BUILD_FOR_WIN32)
+ #define SK_strcasecmp _stricmp
+ #define SK_strncasecmp _strnicmp
+#else
+ #define SK_strcasecmp strcasecmp
+ #define SK_strncasecmp strncasecmp
+#endif
+
+bool SkDisplayXMLParser::onEndElement(const char elem[])
+{
+ int parentIndex = fParents.count() - 1;
+ if (parentIndex >= 0) {
+ Parent& container = fParents[parentIndex];
+ SkDisplayable* displayable = container.fDisplayable;
+ fMaker.fEndDepth = parentIndex;
+ displayable->onEndElement(fMaker);
+ if (fMaker.fError.hasError())
+ return true;
+ if (parentIndex > 0) {
+ SkDisplayable* parent = fParents[parentIndex - 1].fDisplayable;
+ bool result = parent->addChild(fMaker, displayable);
+ if (fMaker.hasError())
+ return true;
+ if (result == false) {
+ int infoCount;
+ const SkMemberInfo* info =
+ SkDisplayType::GetMembers(&fMaker, fParents[parentIndex - 1].fType, &infoCount);
+ const SkMemberInfo* foundInfo;
+ if ((foundInfo = searchContainer(info, infoCount)) != nullptr) {
+ parent->setReference(foundInfo, displayable);
+ // if (displayable->isHelper() == false)
+ fMaker.helperAdd(displayable);
+ } else {
+ fMaker.setErrorCode(SkDisplayXMLParserError::kElementTypeNotAllowedInParent);
+ return true;
+ }
+ }
+ if (parent->childrenNeedDisposing())
+ delete displayable;
+ }
+ fParents.remove(parentIndex);
+ }
+ fCurrDisplayable = nullptr;
+ if (fInInclude == false && SK_strcasecmp(elem, "screenplay") == 0) {
+ if (fMaker.fInMovie == false) {
+ fMaker.fEnableTime = fMaker.getAppTime();
+#if defined SK_DEBUG && defined SK_DEBUG_ANIMATION_TIMING
+ if (fMaker.fDebugTimeBase == (SkMSec) -1)
+ fMaker.fDebugTimeBase = fMaker.fEnableTime;
+ SkString debugOut;
+ SkMSec time = fMaker.getAppTime();
+ debugOut.appendS32(time - fMaker.fDebugTimeBase);
+ debugOut.append(" onLoad enable=");
+ debugOut.appendS32(fMaker.fEnableTime - fMaker.fDebugTimeBase);
+ SkDebugf("%s\n", debugOut.c_str());
+#endif
+ fMaker.fEvents.doEvent(fMaker, SkDisplayEvent::kOnload, nullptr);
+ if (fMaker.fError.hasError())
+ return true;
+ fMaker.fEvents.removeEvent(SkDisplayEvent::kOnload, nullptr);
+
+ }
+ fInSkia = false;
+ }
+ return false;
+}
+
+bool SkDisplayXMLParser::onStartElement(const char name[])
+{
+ return onStartElementLen(name, strlen(name));
+}
+
+bool SkDisplayXMLParser::onStartElementLen(const char name[], size_t len) {
+ fCurrDisplayable = nullptr; // init so we'll ignore attributes if we exit early
+
+ if (SK_strncasecmp(name, "screenplay", len) == 0) {
+ fInSkia = true;
+ if (fInInclude == false)
+ fMaker.idsSet(name, len, &fMaker.fScreenplay);
+ return false;
+ }
+ if (fInSkia == false)
+ return false;
+
+ SkDisplayable* displayable = fMaker.createInstance(name, len);
+ if (displayable == nullptr) {
+ fError->setNoun(name, len);
+ fError->setCode(SkXMLParserError::kUnknownElement);
+ return true;
+ }
+ SkDisplayTypes type = displayable->getType();
+ Parent record = { displayable, type };
+ *fParents.append() = record;
+ if (fParents.count() == 1)
+ fMaker.childrenAdd(displayable);
+ else {
+ Parent* parent = fParents.end() - 2;
+ if (displayable->setParent(parent->fDisplayable)) {
+ fError->setNoun(name, len);
+ getError()->setCode(SkDisplayXMLParserError::kParentElementCantContain);
+ return true;
+ }
+ }
+
+ // set these for subsequent calls to addAttribute()
+ fCurrDisplayable = displayable;
+ fCurrType = type;
+ return false;
+}
+
+const SkMemberInfo* SkDisplayXMLParser::searchContainer(const SkMemberInfo* infoBase,
+ int infoCount) {
+ const SkMemberInfo* bestDisplayable = nullptr;
+ const SkMemberInfo* lastResort = nullptr;
+ for (int index = 0; index < infoCount; index++) {
+ const SkMemberInfo* info = &infoBase[index];
+ if (info->fType == SkType_BaseClassInfo) {
+ const SkMemberInfo* inherited = info->getInherited();
+ const SkMemberInfo* result = searchContainer(inherited, info->fCount);
+ if (result != nullptr)
+ return result;
+ continue;
+ }
+ Parent* container = fParents.end() - 1;
+ SkDisplayTypes type = (SkDisplayTypes) info->fType;
+ if (type == SkType_MemberProperty)
+ type = info->propertyType();
+ SkDisplayTypes containerType = container->fType;
+ if (type == containerType && (type == SkType_Rect || type == SkType_Polygon ||
+ type == SkType_Array || type == SkType_Int || type == SkType_Bitmap))
+ goto rectNext;
+ while (type != containerType) {
+ if (containerType == SkType_Displayable)
+ goto next;
+ containerType = SkDisplayType::GetParent(&fMaker, containerType);
+ if (containerType == SkType_Unknown)
+ goto next;
+ }
+ return info;
+next:
+ if (type == SkType_Drawable || (type == SkType_Displayable &&
+ container->fDisplayable->isDrawable())) {
+rectNext:
+ if (fParents.count() > 1) {
+ Parent* parent = fParents.end() - 2;
+ if (info == parent->fDisplayable->preferredChild(type))
+ bestDisplayable = info;
+ else
+ lastResort = info;
+ }
+ }
+ }
+ if (bestDisplayable)
+ return bestDisplayable;
+ if (lastResort)
+ return lastResort;
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/animator/SkDisplayXMLParser.h b/gfx/skia/skia/src/animator/SkDisplayXMLParser.h
new file mode 100644
index 000000000..9c561eda0
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayXMLParser.h
@@ -0,0 +1,91 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDisplayXMLParser_DEFINED
+#define SkDisplayXMLParser_DEFINED
+
+#include "SkIntArray.h"
+#include "SkTDict.h"
+#include "SkDisplayType.h"
+#include "SkXMLParser.h"
+
+class SkAnimateMaker;
+class SkDisplayable;
+
+class SkDisplayXMLParserError : public SkXMLParserError {
+public:
+ enum ErrorCode {
+ kApplyScopesItself = kUnknownError + 1,
+ kDisplayTreeTooDeep,
+ kElementMissingParent,
+ kElementTypeNotAllowedInParent,
+ kErrorAddingDataToPost,
+ kErrorAddingToMatrix,
+ kErrorAddingToPaint,
+ kErrorAddingToPath,
+ kErrorInAttributeValue,
+ kErrorInScript,
+ kExpectedMovie,
+ kFieldNotInTarget,
+ kGradientOffsetsDontMatchColors,
+ kGradientOffsetsMustBeNoMoreThanOne,
+ kGradientOffsetsMustEndWithOne,
+ kGradientOffsetsMustIncrease,
+ kGradientOffsetsMustStartWithZero,
+ kGradientPointsLengthMustBeFour,
+ kInInclude,
+ kInMovie,
+ kIncludeNameUnknownOrMissing,
+ kIndexOutOfRange,
+ kMovieNameUnknownOrMissing,
+ kNoParentAvailable,
+ kParentElementCantContain,
+ kSaveLayerNeedsBounds,
+ kTargetIDNotFound,
+ kUnexpectedType
+ };
+ virtual ~SkDisplayXMLParserError();
+ virtual void getErrorString(SkString* str) const;
+ void setCode(ErrorCode code) { INHERITED::setCode((INHERITED::ErrorCode) code); }
+ void setInnerError(SkAnimateMaker* maker, const SkString& str);
+ typedef SkXMLParserError INHERITED;
+ friend class SkDisplayXMLParser;
+};
+
+class SkDisplayXMLParser : public SkXMLParser {
+public:
+ SkDisplayXMLParser(SkAnimateMaker& maker);
+ virtual ~SkDisplayXMLParser();
+protected:
+ virtual bool onAddAttribute(const char name[], const char value[]);
+ bool onAddAttributeLen(const char name[], const char value[], size_t len);
+ virtual bool onEndElement(const char elem[]);
+ virtual bool onStartElement(const char elem[]);
+ bool onStartElementLen(const char elem[], size_t len);
+private:
+ struct Parent {
+ SkDisplayable* fDisplayable;
+ SkDisplayTypes fType;
+ };
+ SkTDArray<Parent> fParents;
+ SkDisplayXMLParser& operator= (const SkDisplayXMLParser& );
+ SkDisplayXMLParserError* getError() { return (SkDisplayXMLParserError*) fError; }
+ const SkMemberInfo* searchContainer(const SkMemberInfo* ,
+ int infoCount);
+ SkAnimateMaker& fMaker;
+ SkBool fInInclude;
+ SkBool fInSkia;
+ // local state between onStartElement and onAddAttribute
+ SkDisplayable* fCurrDisplayable;
+ SkDisplayTypes fCurrType;
+ friend class SkXMLAnimatorWriter;
+ typedef SkXMLParser INHERITED;
+};
+
+#endif // SkDisplayXMLParser_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDisplayable.cpp b/gfx/skia/skia/src/animator/SkDisplayable.cpp
new file mode 100644
index 000000000..dc3cecfdb
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayable.cpp
@@ -0,0 +1,540 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDisplayable.h"
+#include "SkDisplayApply.h"
+#include "SkParse.h"
+#ifdef SK_DEBUG
+#include "SkDisplayList.h"
+#endif
+#include "SkDisplayTypes.h"
+
+#ifdef SK_FIND_LEAKS
+// int SkDisplayable::fAllocationCount;
+SkTDDisplayableArray SkDisplayable::fAllocations;
+#endif
+
+#ifdef SK_DEBUG
+SkDisplayable::SkDisplayable() {
+ id = _id.c_str();
+#ifdef SK_FIND_LEAKS
+ // fAllocationCount++;
+ *fAllocations.append() = this;
+#endif
+}
+#endif
+
+SkDisplayable::~SkDisplayable() {
+#ifdef SK_FIND_LEAKS
+ // fAllocationCount--;
+ int index = fAllocations.find(this);
+ SkASSERT(index >= 0);
+ fAllocations.remove(index);
+#endif
+}
+
+bool SkDisplayable::addChild(SkAnimateMaker& , SkDisplayable* child) {
+ return false;
+}
+
+//void SkDisplayable::apply(SkAnimateMaker& , const SkMemberInfo* ,
+// SkDisplayable* , SkScalar [], int count) {
+// SkASSERT(0);
+//}
+
+bool SkDisplayable::canContainDependents() const {
+ return false;
+}
+
+bool SkDisplayable::childrenNeedDisposing() const {
+ return false;
+}
+
+void SkDisplayable::clearBounder() {
+}
+
+bool SkDisplayable::contains(SkDisplayable* ) {
+ return false;
+}
+
+SkDisplayable* SkDisplayable::contains(const SkString& ) {
+ return nullptr;
+}
+
+SkDisplayable* SkDisplayable::deepCopy(SkAnimateMaker* maker) {
+ SkDisplayTypes type = getType();
+ if (type == SkType_Unknown) {
+ SkASSERT(0);
+ return nullptr;
+ }
+ SkDisplayable* copy = SkDisplayType::CreateInstance(maker, type);
+ int index = -1;
+ int propIndex = 0;
+ const SkMemberInfo* info;
+ do {
+ info = copy->getMember(++index);
+ if (info == nullptr)
+ break;
+ if (info->fType == SkType_MemberProperty) {
+ SkScriptValue value;
+ if (getProperty(propIndex, &value))
+ copy->setProperty(propIndex, value);
+ propIndex++;
+ continue;
+ }
+ if (info->fType == SkType_MemberFunction)
+ continue;
+ if (info->fType == SkType_Array) {
+ SkTDOperandArray* array = (SkTDOperandArray*) info->memberData(this);
+ int arrayCount;
+ if (array == nullptr || (arrayCount = array->count()) == 0)
+ continue;
+ SkTDOperandArray* copyArray = (SkTDOperandArray*) info->memberData(copy);
+ copyArray->setCount(arrayCount);
+ SkDisplayTypes elementType;
+ if (type == SkType_Array) {
+ SkDisplayArray* dispArray = (SkDisplayArray*) this;
+ elementType = dispArray->values.getType();
+ } else
+ elementType = info->arrayType();
+ size_t elementSize = SkMemberInfo::GetSize(elementType);
+ size_t byteSize = elementSize * arrayCount;
+ memcpy(copyArray->begin(), array->begin(), byteSize);
+ continue;
+ }
+ if (SkDisplayType::IsDisplayable(maker, info->fType)) {
+ SkDisplayable** displayable = (SkDisplayable**) info->memberData(this);
+ if (*displayable == nullptr || *displayable == (SkDisplayable*) -1)
+ continue;
+ SkDisplayable* deeper = (*displayable)->deepCopy(maker);
+ info->setMemberData(copy, deeper, sizeof(deeper));
+ continue;
+ }
+ if (info->fType == SkType_String || info->fType == SkType_DynamicString) {
+ SkString* string;
+ info->getString(this, &string);
+ info->setString(copy, string);
+ continue;
+ }
+ void* data = info->memberData(this);
+ size_t size = SkMemberInfo::GetSize(info->fType);
+ info->setMemberData(copy, data, size);
+ } while (true);
+ copy->dirty();
+ return copy;
+}
+
+void SkDisplayable::dirty() {
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkDisplayable::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+#if SK_USE_CONDENSED_INFO == 0
+ this->dumpAttrs(maker);
+ this->dumpChildren(maker);
+#endif
+}
+
+void SkDisplayable::dumpAttrs(SkAnimateMaker* maker) {
+ SkDisplayTypes type = getType();
+ if (type == SkType_Unknown) {
+ //SkDebugf("/>\n");
+ return;
+ }
+ SkDisplayable* blankCopy = SkDisplayType::CreateInstance(maker, type);
+
+ int index = -1;
+ int propIndex = 0;
+ const SkMemberInfo* info;
+ const SkMemberInfo* blankInfo;
+ SkScriptValue value;
+ SkScriptValue blankValue;
+ SkOperand values[2];
+ SkOperand blankValues[2];
+ do {
+ info = this->getMember(++index);
+ if (nullptr == info) {
+ //SkDebugf("\n");
+ break;
+ }
+ if (SkType_MemberProperty == info->fType) {
+ if (getProperty(propIndex, &value)) {
+ blankCopy->getProperty(propIndex, &blankValue);
+ //last two are dummies
+ dumpValues(info, value.fType, value.fOperand, blankValue.fOperand, value.fOperand, blankValue.fOperand);
+ }
+
+ propIndex++;
+ continue;
+ }
+ if (SkDisplayType::IsDisplayable(maker, info->fType)) {
+ continue;
+ }
+
+ if (info->fType == SkType_MemberFunction)
+ continue;
+
+
+ if (info->fType == SkType_Array) {
+ SkTDOperandArray* array = (SkTDOperandArray*) info->memberData(this);
+ int arrayCount;
+ if (array == nullptr || (arrayCount = array->count()) == 0)
+ continue;
+ SkDisplayTypes elementType;
+ if (type == SkType_Array) {
+ SkDisplayArray* dispArray = (SkDisplayArray*) this;
+ elementType = dispArray->values.getType();
+ } else
+ elementType = info->arrayType();
+ bool firstElem = true;
+ SkDebugf("%s=\"[", info->fName);
+ for (SkOperand* op = array->begin(); op < array->end(); op++) {
+ if (!firstElem) SkDebugf(",");
+ switch (elementType) {
+ case SkType_Displayable:
+ SkDebugf("%s", op->fDisplayable->id);
+ break;
+ case SkType_Int:
+ SkDebugf("%d", op->fS32);
+ break;
+ case SkType_Float:
+ SkDebugf("%g", SkScalarToFloat(op->fScalar));
+ break;
+ case SkType_String:
+ case SkType_DynamicString:
+ SkDebugf("%s", op->fString->c_str());
+ break;
+ default:
+ break;
+ }
+ firstElem = false;
+ }
+ SkDebugf("]\" ");
+ continue;
+ }
+
+ if (info->fType == SkType_String || info->fType == SkType_DynamicString) {
+ SkString* string;
+ info->getString(this, &string);
+ if (string->isEmpty() == false)
+ SkDebugf("%s=\"%s\"\t", info->fName, string->c_str());
+ continue;
+ }
+
+
+ blankInfo = blankCopy->getMember(index);
+ int i = info->fCount;
+ info->getValue(this, values, i);
+ blankInfo->getValue(blankCopy, blankValues, i);
+ dumpValues(info, info->fType, values[0], blankValues[0], values[1], blankValues[1]);
+ } while (true);
+ delete blankCopy;
+}
+
+void SkDisplayable::dumpBase(SkAnimateMaker* maker) {
+ SkDisplayTypes type = getType();
+ const char* elementName = "(unknown)";
+ if (type != SkType_Unknown && type != SkType_Screenplay)
+ elementName = SkDisplayType::GetName(maker, type);
+ SkDebugf("%*s", SkDisplayList::fIndent, "");
+ if (SkDisplayList::fDumpIndex != 0 && SkDisplayList::fIndent == 0)
+ SkDebugf("%d: ", SkDisplayList::fDumpIndex);
+ SkDebugf("<%s ", elementName);
+ if (strcmp(id,"") != 0)
+ SkDebugf("id=\"%s\" ", id);
+}
+
+void SkDisplayable::dumpChildren(SkAnimateMaker* maker, bool closedAngle) {
+
+ int index = -1;
+ const SkMemberInfo* info;
+ index = -1;
+ SkDisplayList::fIndent += 4;
+ do {
+ info = this->getMember(++index);
+ if (nullptr == info) {
+ break;
+ }
+ if (SkDisplayType::IsDisplayable(maker, info->fType)) {
+ SkDisplayable** displayable = (SkDisplayable**) info->memberData(this);
+ if (*displayable == nullptr || *displayable == (SkDisplayable*) -1)
+ continue;
+ if (closedAngle == false) {
+ SkDebugf(">\n");
+ closedAngle = true;
+ }
+ (*displayable)->dump(maker);
+ }
+ } while (true);
+ SkDisplayList::fIndent -= 4;
+ if (closedAngle)
+ dumpEnd(maker);
+ else
+ SkDebugf("/>\n");
+}
+
+void SkDisplayable::dumpEnd(SkAnimateMaker* maker) {
+ SkDisplayTypes type = getType();
+ const char* elementName = "(unknown)";
+ if (type != SkType_Unknown && type != SkType_Screenplay)
+ elementName = SkDisplayType::GetName(maker, type);
+ SkDebugf("%*s", SkDisplayList::fIndent, "");
+ SkDebugf("</%s>\n", elementName);
+}
+
+void SkDisplayable::dumpEvents() {
+}
+
+void SkDisplayable::dumpValues(const SkMemberInfo* info, SkDisplayTypes type, SkOperand op, SkOperand blankOp,
+ SkOperand op2, SkOperand blankOp2) {
+ switch (type) {
+ case SkType_BitmapEncoding:
+ switch (op.fS32) {
+ case 0 : SkDebugf("type=\"jpeg\" ");
+ break;
+ case 1 : SkDebugf("type=\"png\" ");
+ break;
+ default: SkDebugf("type=\"UNDEFINED\" ");
+ }
+ break;
+ //should make this a separate case in dump attrs, rather than make dump values have a larger signature
+ case SkType_Point:
+ if (op.fScalar != blankOp.fScalar || op2.fScalar != blankOp.fScalar) {
+ SkDebugf("%s=\"[%g,%g]\" ", info->fName, SkScalarToFloat(op.fScalar), SkScalarToFloat(op2.fScalar));
+ }
+ break;
+ case SkType_FromPathMode:
+ switch (op.fS32) {
+ case 0:
+ //don't want to print anything for 0, just adding it to remove it from default:
+ break;
+ case 1:
+ SkDebugf("%s=\"%s\" ", info->fName, "angle");
+ break;
+ case 2:
+ SkDebugf("%s=\"%s\" ", info->fName, "position");
+ break;
+ default:
+ SkDebugf("%s=\"INVALID\" ", info->fName);
+ }
+ break;
+ case SkType_MaskFilterBlurStyle:
+ switch (op.fS32) {
+ case 0:
+ break;
+ case 1:
+ SkDebugf("%s=\"%s\" ", info->fName, "solid");
+ break;
+ case 2:
+ SkDebugf("%s=\"%s\" ", info->fName, "outer");
+ break;
+ case 3:
+ SkDebugf("%s=\"%s\" ", info->fName, "inner");
+ break;
+ default:
+ SkDebugf("%s=\"INVALID\" ", info->fName);
+ }
+ break;
+ case SkType_FilterType:
+ if (op.fS32 == 1)
+ SkDebugf("%s=\"%s\" ", info->fName, "bilinear");
+ break;
+ case SkType_PathDirection:
+ SkDebugf("%s=\"%s\" ", info->fName, op.fS32 == 0 ? "cw" : "ccw");
+ break;
+ case SkType_FillType:
+ SkDebugf("%s=\"%s\" ", info->fName, op.fS32 == 0 ? "winding" : "evenOdd");
+ break;
+ case SkType_TileMode:
+ //correct to look at the S32?
+ if (op.fS32 != blankOp.fS32)
+ SkDebugf("%s=\"%s\" ", info->fName, op.fS32 == 0 ? "clamp" : op.fS32 == 1 ? "repeat" : "mirror");
+ break;
+ case SkType_Boolean:
+ if (op.fS32 != blankOp.fS32)
+ SkDebugf("%s=\"%s\" ", info->fName, op.fS32 == 0 ? "false" : "true");
+ break;
+ case SkType_Int:
+ if (op.fS32 != blankOp.fS32)
+ SkDebugf(" %s=\"%d\" ", info->fName, op.fS32);
+ break;
+ case SkType_Float:
+ if (op.fScalar != blankOp.fScalar) { //or /65536?
+ SkDebugf("%s=\"%g\" ", info->fName, SkScalarToFloat(op.fScalar));
+ }
+ break;
+ case SkType_String:
+ case SkType_DynamicString:
+ if (op.fString->size() > 0)
+ SkDebugf("%s=\"%s\" ", info->fName, op.fString->c_str());
+ break;
+ case SkType_MSec:
+ if (op.fS32 != blankOp.fS32) {
+ SkDebugf(" %s=\"%g\" ", info->fName, op.fS32 * 0.001);
+ }
+ default:
+ SkDebugf("");
+ }
+}
+
+#endif
+
+bool SkDisplayable::enable( SkAnimateMaker& ) {
+ return false;
+}
+
+void SkDisplayable::enableBounder() {
+}
+
+void SkDisplayable::executeFunction(SkDisplayable* , int index,
+ SkTDArray<SkScriptValue>& , SkDisplayTypes, SkScriptValue* ) {
+ SkASSERT(0);
+}
+
+void SkDisplayable::executeFunction(SkDisplayable* target,
+ const SkMemberInfo* info, SkTypedArray* values, SkScriptValue* value) {
+ SkTDArray<SkScriptValue> typedValues;
+ for (SkOperand* op = values->begin(); op < values->end(); op++) {
+ SkScriptValue temp;
+ temp.fType = values->getType();
+ temp.fOperand = *op;
+ *typedValues.append() = temp;
+ }
+ executeFunction(target, info->functionIndex(), typedValues, info->getType(), value);
+}
+
+void SkDisplayable::executeFunction2(SkDisplayable* , int index,
+ SkOpArray* params, SkDisplayTypes, SkOperand2* ) {
+ SkASSERT(0);
+}
+
+void SkDisplayable::getBounds(SkRect* rect) {
+ SkASSERT(rect);
+ rect->fLeft = rect->fTop = SK_ScalarMax;
+ rect->fRight= rect->fBottom = -SK_ScalarMax;
+}
+
+const SkFunctionParamType* SkDisplayable::getFunctionsParameters() {
+ return nullptr;
+}
+
+const SkMemberInfo* SkDisplayable::getMember(int index) {
+ return nullptr;
+}
+
+const SkMemberInfo* SkDisplayable::getMember(const char name[]) {
+ return nullptr;
+}
+
+const SkFunctionParamType* SkDisplayable::getParameters(const SkMemberInfo* info,
+ int* paramCount) {
+ const SkFunctionParamType* params = getFunctionsParameters();
+ SkASSERT(params != nullptr);
+ int funcIndex = info->functionIndex();
+ // !!! eventually break traversing params into an external function (maybe this whole function)
+ int index = funcIndex;
+ int offset = 0;
+ while (--index >= 0) {
+ while (params[offset] != 0)
+ offset++;
+ offset++;
+ }
+ int count = 0;
+ while (params[offset] != 0) {
+ count++;
+ offset++;
+ }
+ *paramCount = count;
+ return &params[offset - count];
+}
+
+SkDisplayable* SkDisplayable::getParent() const {
+ return nullptr;
+}
+
+bool SkDisplayable::getProperty(int index, SkScriptValue* ) const {
+// SkASSERT(0);
+ return false;
+}
+
+bool SkDisplayable::getProperty2(int index, SkOperand2* value) const {
+ SkASSERT(0);
+ return false;
+}
+
+SkDisplayTypes SkDisplayable::getType() const {
+ return SkType_Unknown;
+}
+
+bool SkDisplayable::hasEnable() const {
+ return false;
+}
+
+bool SkDisplayable::isDrawable() const {
+ return false;
+}
+
+void SkDisplayable::onEndElement(SkAnimateMaker& ) {}
+
+const SkMemberInfo* SkDisplayable::preferredChild(SkDisplayTypes type) {
+ return nullptr;
+}
+
+bool SkDisplayable::resolveIDs(SkAnimateMaker& maker, SkDisplayable* original, SkApply* apply) {
+ return false;
+}
+
+//SkDisplayable* SkDisplayable::resolveTarget(SkAnimateMaker& ) {
+// return this;
+//}
+
+void SkDisplayable::setChildHasID() {
+}
+
+bool SkDisplayable::setParent(SkDisplayable* ) {
+ return false;
+}
+
+bool SkDisplayable::setProperty(int index, SkScriptValue& ) {
+ //SkASSERT(0);
+ return false;
+}
+
+void SkDisplayable::setReference(const SkMemberInfo* info, SkDisplayable* displayable) {
+ if (info->fType == SkType_MemberProperty) {
+ SkScriptValue scriptValue;
+ scriptValue.fOperand.fDisplayable = displayable;
+ scriptValue.fType = displayable->getType();
+ setProperty(info->propertyIndex(), scriptValue);
+ } else if (info->fType == SkType_Array) {
+ SkASSERT(displayable->getType() == SkType_Array);
+ SkDisplayArray* dispArray = (SkDisplayArray*) displayable;
+ SkTDScalarArray* array = (SkTDScalarArray* ) info->memberData(this);
+ array->setCount(dispArray->values.count());
+ memcpy(array->begin(), dispArray->values.begin(), dispArray->values.count() * sizeof(int));
+ //
+
+ // !!! need a way for interpreter engine to own array
+ // !!! probably need to replace all scriptable arrays with single bigger array
+ // that has operand and type on every element -- or
+ // when array is dirtied, need to get parent to reparse to local array
+ } else {
+ void* storage = info->memberData(this);
+ memcpy(storage, &displayable, sizeof(SkDisplayable*));
+ }
+// !!! unclear why displayable is dirtied here
+// if this is called, this breaks fromPath.xml
+// displayable->dirty();
+}
+
+#ifdef SK_DEBUG
+void SkDisplayable::validate() {
+}
+#endif
diff --git a/gfx/skia/skia/src/animator/SkDisplayable.h b/gfx/skia/skia/src/animator/SkDisplayable.h
new file mode 100644
index 000000000..4fd47abc3
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDisplayable.h
@@ -0,0 +1,112 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDisplayable_DEFINED
+#define SkDisplayable_DEFINED
+
+#include "SkOperand.h"
+#ifdef SK_DEBUG
+#include "SkString.h"
+#endif
+#include "SkIntArray.h"
+#include "SkRect.h"
+#include "SkTDArray.h"
+
+class SkAnimateMaker;
+class SkApply;
+class SkEvents;
+struct SkMemberInfo;
+struct SkScriptValue;
+class SkOpArray; // compiled scripting experiment
+union SkOperand2; // compiled scripting experiment
+
+class SkDisplayable {
+public:
+#ifdef SK_DEBUG
+ SkDisplayable();
+#endif
+ virtual ~SkDisplayable();
+ virtual bool addChild(SkAnimateMaker& , SkDisplayable* child);
+ virtual bool canContainDependents() const;
+ virtual bool childrenNeedDisposing() const;
+ virtual void clearBounder();
+ virtual bool contains(SkDisplayable* );
+ virtual SkDisplayable* contains(const SkString& );
+ virtual SkDisplayable* deepCopy(SkAnimateMaker* );
+ virtual void dirty();
+#ifdef SK_DUMP_ENABLED
+ virtual void dump(SkAnimateMaker* );
+ void dumpAttrs(SkAnimateMaker* );
+ void dumpBase(SkAnimateMaker* );
+ void dumpChildren(SkAnimateMaker* maker, bool closedAngle = false );
+ void dumpEnd(SkAnimateMaker* );
+ virtual void dumpEvents();
+#endif
+ virtual bool enable( SkAnimateMaker& );
+ virtual void enableBounder();
+ virtual void executeFunction(SkDisplayable* , int functionIndex,
+ SkTDArray<SkScriptValue>& , SkDisplayTypes , SkScriptValue* );
+ void executeFunction(SkDisplayable* , const SkMemberInfo* ,
+ SkTypedArray* , SkScriptValue* );
+ virtual void executeFunction2(SkDisplayable* , int functionIndex,
+ SkOpArray* params , SkDisplayTypes , SkOperand2* ); // compiled scripting experiment
+ virtual void getBounds(SkRect* );
+ virtual const SkFunctionParamType* getFunctionsParameters();
+ virtual const SkMemberInfo* getMember(int index);
+ virtual const SkMemberInfo* getMember(const char name[]);
+ const SkFunctionParamType* getParameters(const SkMemberInfo* info,
+ int* paramCount);
+ virtual SkDisplayable* getParent() const;
+ virtual bool getProperty(int index, SkScriptValue* value) const;
+ virtual bool getProperty2(int index, SkOperand2* value) const; // compiled scripting experiment
+ virtual SkDisplayTypes getType() const;
+ virtual bool hasEnable() const;
+ bool isAnimate() const {
+ SkDisplayTypes type = getType();
+ return type == SkType_Animate || type == SkType_Set; }
+ bool isApply() const { return getType() == SkType_Apply; }
+ bool isColor() const { return getType() == SkType_Color; }
+ virtual bool isDrawable() const;
+ bool isGroup() const { return getType() == SkType_Group ||
+ getType() == SkType_Save || getType() == SkType_DrawTo ||
+ getType() == SkType_SaveLayer; }
+ bool isMatrix() const { return getType() == SkType_Matrix; }
+ virtual bool isPaint() const { return getType() == SkType_Paint; }
+ virtual bool isPath() const { return false; }
+ bool isPost() const { return getType() == SkType_Post; }
+ virtual void onEndElement(SkAnimateMaker& );
+ virtual const SkMemberInfo* preferredChild(SkDisplayTypes type);
+ virtual bool resolveIDs(SkAnimateMaker& maker, SkDisplayable* original, SkApply* );
+ virtual void setChildHasID();
+ virtual bool setParent(SkDisplayable* );
+ virtual bool setProperty(int index, SkScriptValue& );
+ void setReference(const SkMemberInfo* info, SkDisplayable* ref);
+#ifdef SK_DEBUG
+ bool isDataInput() const { return getType() == SkType_DataInput; };
+ bool isEvent() const { return getType() == SkType_Event; }
+ virtual bool isMatrixPart() const { return false; }
+ bool isPatch() const { return getType() == SkType_3D_Patch; }
+ virtual bool isPaintPart() const { return false; }
+ virtual bool isPathPart() const { return false; }
+ virtual void validate();
+ SkString _id;
+ const char* id;
+// static int fAllocationCount;
+ static SkTDDisplayableArray fAllocations;
+#else
+ void validate() {}
+#endif
+#ifdef SK_DUMP_ENABLED
+private:
+ void dumpValues(const SkMemberInfo* info, SkDisplayTypes type, SkOperand op, SkOperand blankOp,
+ SkOperand op2, SkOperand blankOp2);
+#endif
+};
+
+#endif // SkDisplayable_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDraw3D.cpp b/gfx/skia/skia/src/animator/SkDraw3D.cpp
new file mode 100644
index 000000000..e7e92df3e
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDraw3D.cpp
@@ -0,0 +1,106 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDraw3D.h"
+#include "SkAnimateMaker.h"
+#include "SkCanvas.h"
+#include "SkTypedArray.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo Sk3D_Point::fInfo[] = {
+ SK_MEMBER_ALIAS(x, fPoint.fX, Float),
+ SK_MEMBER_ALIAS(y, fPoint.fY, Float),
+ SK_MEMBER_ALIAS(z, fPoint.fZ, Float)
+};
+
+#endif
+
+DEFINE_NO_VIRTUALS_GET_MEMBER(Sk3D_Point);
+
+Sk3D_Point::Sk3D_Point() {
+ fPoint.set(0, 0, 0);
+}
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo Sk3D_Camera::fInfo[] = {
+ SK_MEMBER_ALIAS(axis, fCamera.fAxis, 3D_Point),
+ SK_MEMBER(hackHeight, Float),
+ SK_MEMBER(hackWidth, Float),
+ SK_MEMBER_ALIAS(location, fCamera.fLocation, 3D_Point),
+ SK_MEMBER_ALIAS(observer, fCamera.fObserver, 3D_Point),
+ SK_MEMBER(patch, 3D_Patch),
+ SK_MEMBER_ALIAS(zenith, fCamera.fZenith, 3D_Point),
+};
+
+#endif
+
+DEFINE_GET_MEMBER(Sk3D_Camera);
+
+Sk3D_Camera::Sk3D_Camera() : hackWidth(0), hackHeight(0), patch(nullptr) {
+}
+
+Sk3D_Camera::~Sk3D_Camera() {
+}
+
+bool Sk3D_Camera::draw(SkAnimateMaker& maker) {
+ fCamera.update();
+ SkMatrix matrix;
+ fCamera.patchToMatrix(patch->fPatch, &matrix);
+ matrix.preTranslate(hackWidth / 2, -hackHeight / 2);
+ matrix.postTranslate(hackWidth / 2, hackHeight / 2);
+ maker.fCanvas->concat(matrix);
+ return false;
+}
+
+
+enum Sk3D_Patch_Functions {
+ SK_FUNCTION(rotateDegrees)
+};
+
+const SkFunctionParamType Sk3D_Patch::fFunctionParameters[] = {
+ (SkFunctionParamType) SkType_Float,
+ (SkFunctionParamType) SkType_Float,
+ (SkFunctionParamType) SkType_Float,
+ (SkFunctionParamType) 0 // terminator for parameter list (there may be multiple parameter lists)
+};
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo Sk3D_Patch::fInfo[] = {
+ SK_MEMBER_ALIAS(origin, fPatch.fOrigin, 3D_Point),
+ SK_MEMBER_FUNCTION(rotateDegrees, Float),
+ SK_MEMBER_ALIAS(u, fPatch.fU, 3D_Point),
+ SK_MEMBER_ALIAS(v, fPatch.fV, 3D_Point)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(Sk3D_Patch);
+
+void Sk3D_Patch::executeFunction(SkDisplayable* target, int index,
+ SkTDArray<SkScriptValue>& parameters, SkDisplayTypes type,
+ SkScriptValue* ) {
+ SkASSERT(target == this);
+ switch (index) {
+ case SK_FUNCTION(rotateDegrees):
+ SkASSERT(parameters.count() == 3);
+ SkASSERT(type == SkType_Float);
+ fPatch.rotateDegrees(parameters[0].fOperand.fScalar,
+ parameters[1].fOperand.fScalar, parameters[2].fOperand.fScalar);
+ break;
+ default:
+ SkASSERT(0);
+ }
+}
+
+const SkFunctionParamType* Sk3D_Patch::getFunctionsParameters() {
+ return fFunctionParameters;
+}
diff --git a/gfx/skia/skia/src/animator/SkDraw3D.h b/gfx/skia/skia/src/animator/SkDraw3D.h
new file mode 100644
index 000000000..8c79b5e7b
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDraw3D.h
@@ -0,0 +1,50 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDraw3D_DEFINED
+#define SkDraw3D_DEFINED
+
+#include "SkCamera.h"
+#include "SkADrawable.h"
+#include "SkMemberInfo.h"
+
+class Sk3D_Patch;
+
+struct Sk3D_Point {
+ DECLARE_NO_VIRTUALS_MEMBER_INFO(3D_Point);
+ Sk3D_Point();
+private:
+ SkPoint3D fPoint;
+};
+
+class Sk3D_Camera : public SkADrawable {
+ DECLARE_MEMBER_INFO(3D_Camera);
+ Sk3D_Camera();
+ virtual ~Sk3D_Camera();
+ bool draw(SkAnimateMaker& ) override;
+private:
+ SkScalar hackWidth;
+ SkScalar hackHeight;
+ SkCamera3D fCamera;
+ Sk3D_Patch* patch;
+};
+
+class Sk3D_Patch : public SkDisplayable {
+ DECLARE_MEMBER_INFO(3D_Patch);
+private:
+ void executeFunction(SkDisplayable* , int index,
+ SkTDArray<SkScriptValue>& parameters, SkDisplayTypes type,
+ SkScriptValue* ) override;
+ const SkFunctionParamType* getFunctionsParameters() override;
+ SkPatch3D fPatch;
+ static const SkFunctionParamType fFunctionParameters[];
+ friend class Sk3D_Camera;
+};
+
+#endif // SkDraw3D_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawBitmap.cpp b/gfx/skia/skia/src/animator/SkDrawBitmap.cpp
new file mode 100644
index 000000000..191240855
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawBitmap.cpp
@@ -0,0 +1,201 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawBitmap.h"
+#include "SkAnimateMaker.h"
+#include "SkCanvas.h"
+#include "SkData.h"
+#include "SkImage.h"
+#include "SkPaint.h"
+#include "SkStream.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkBaseBitmap::fInfo[] = {
+ SK_MEMBER(x, Float),
+ SK_MEMBER(y, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkBaseBitmap);
+
+SkBaseBitmap::SkBaseBitmap() : x(0), y(0) {
+}
+
+SkBaseBitmap::~SkBaseBitmap() {
+}
+
+bool SkBaseBitmap::draw(SkAnimateMaker& maker) {
+ SkBoundableAuto boundable(this, maker);
+ maker.fCanvas->drawBitmap(fBitmap, x, y, maker.fPaint);
+ return false;
+}
+
+enum SkDrawBitmap_Properties {
+ SK_PROPERTY(erase)
+};
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawBitmap::fInfo[] = {
+ SK_MEMBER_INHERITED,
+ SK_MEMBER_PROPERTY(erase, ARGB),
+ SK_MEMBER(format, BitmapFormat),
+ SK_MEMBER(height, Int),
+ SK_MEMBER(rowBytes, Int),
+ SK_MEMBER(width, Int),
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawBitmap);
+
+SkDrawBitmap::SkDrawBitmap() : format((SkColorType) -1), height(-1),
+ rowBytes(0), width(-1), fColor(0), fColorSet(false) {
+}
+
+SkDrawBitmap::~SkDrawBitmap() {
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkDrawBitmap::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ dumpAttrs(maker);
+ if (fColorSet)
+ SkDebugf("erase=\"argb(%d,%d,%d,%d)\" ", SkColorGetA(fColor)/255, SkColorGetR(fColor),
+ SkColorGetG(fColor), SkColorGetB(fColor));
+ if (rowBytes > 0)
+ SkDebugf("rowBytes=\"%d\" ", rowBytes);
+ const char* formatName SK_INIT_TO_AVOID_WARNING;
+ switch (format) {
+ case 0: formatName = "none"; break;
+ case 1: formatName = "A8"; break;
+ case 2: formatName = "Index8"; break;
+ case 3: formatName = "RGB16"; break;
+ case 4: formatName = "RGB32"; break;
+ }
+ SkDebugf("format=\"%s\" />\n", formatName);
+}
+#endif
+
+void SkDrawBitmap::onEndElement(SkAnimateMaker&) {
+ SkASSERT(width != -1);
+ SkASSERT(height != -1);
+ SkASSERT(rowBytes >= 0);
+ SkColorType colorType = SkColorType(format);
+ fBitmap.allocPixels(SkImageInfo::Make(width, height, colorType, kPremul_SkAlphaType),
+ rowBytes);
+ if (fColorSet)
+ fBitmap.eraseColor(fColor);
+}
+
+bool SkDrawBitmap::setProperty(int index, SkScriptValue& value)
+{
+ switch (index) {
+ case SK_PROPERTY(erase):
+ SkASSERT(value.fType == SkType_ARGB);
+ fColor = value.fOperand.fS32;
+ fColorSet = true;
+ break;
+ default:
+ SkASSERT(0);
+ return false;
+ }
+ return true;
+}
+
+
+enum SkImageBaseBitmap_Properties {
+ SK_PROPERTY(height),
+ SK_PROPERTY(width)
+};
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkImageBaseBitmap::fInfo[] = {
+ SK_MEMBER_INHERITED,
+ SK_MEMBER(base64, Base64),
+ SK_MEMBER_PROPERTY(height, Int),
+ SK_MEMBER(src, String),
+ SK_MEMBER_PROPERTY(width, Int)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkImageBaseBitmap);
+
+SkImageBaseBitmap::SkImageBaseBitmap() : fDirty(true), fUriBase(nullptr) {
+ base64.fData = nullptr;
+ base64.fLength = 0;
+}
+
+SkImageBaseBitmap::~SkImageBaseBitmap() {
+ delete[] base64.fData;
+}
+
+SkDisplayable* SkImageBaseBitmap::deepCopy(SkAnimateMaker* maker) {
+ SkDisplayable* copy = INHERITED::deepCopy(maker);
+ ((SkImageBaseBitmap*) copy)->fUriBase = ((SkImageBaseBitmap*) this)->fUriBase;
+ return copy;
+}
+
+void SkImageBaseBitmap::dirty() {
+ fDirty = true;
+}
+
+bool SkImageBaseBitmap::draw(SkAnimateMaker& maker) {
+ if (fDirty)
+ resolve();
+ return INHERITED::draw(maker);
+}
+
+bool SkImageBaseBitmap::getProperty(int index, SkScriptValue* value) const {
+ if (fDirty)
+ resolve();
+ switch (index) {
+ case SK_PROPERTY(height):
+ value->fOperand.fS32 = fBitmap.height();
+ break;
+ case SK_PROPERTY(width):
+ value->fOperand.fS32 = fBitmap.width();
+ break;
+ default:
+ SkASSERT(0);
+ return false;
+ }
+ value->fType = SkType_Int;
+ return true;
+}
+
+void SkImageBaseBitmap::onEndElement(SkAnimateMaker& maker) {
+ fUriBase = maker.fPrefix.c_str();
+}
+
+void SkImageBaseBitmap::resolve() {
+ fDirty = false;
+ if (base64.fData) {
+ fBitmap.reset();
+ sk_sp<SkData> data = SkData::MakeWithoutCopy(base64.fData, base64.fLength);
+ sk_sp<SkImage> image = SkImage::MakeFromEncoded(data);
+ image->asLegacyBitmap(&fBitmap, SkImage::kRO_LegacyBitmapMode);
+ } else if (src.size()) {
+ if (fLast.equals(src))
+ return;
+ fLast.set(src);
+ fBitmap.reset();
+
+ //SkStream* stream = SkStream::GetURIStream(fUriBase, src.c_str());
+ sk_sp<SkData> data = SkData::MakeFromFileName(src.c_str());
+ if (data) {
+ sk_sp<SkImage> image = SkImage::MakeFromEncoded(data);
+ image->asLegacyBitmap(&fBitmap, SkImage::kRO_LegacyBitmapMode);
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawBitmap.h b/gfx/skia/skia/src/animator/SkDrawBitmap.h
new file mode 100644
index 000000000..9fd25d604
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawBitmap.h
@@ -0,0 +1,73 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawBitmap_DEFINED
+#define SkDrawBitmap_DEFINED
+
+#include "SkBoundable.h"
+#include "SkBase64.h"
+#include "SkBitmap.h"
+#include "SkMemberInfo.h"
+
+class SkBaseBitmap : public SkBoundable {
+ DECLARE_MEMBER_INFO(BaseBitmap);
+ SkBaseBitmap();
+ virtual ~SkBaseBitmap();
+ bool draw(SkAnimateMaker& ) override;
+protected:
+ SkBitmap fBitmap;
+ SkScalar x;
+ SkScalar y;
+private:
+ friend class SkDrawTo;
+ friend class SkDrawBitmapShader;
+ typedef SkBoundable INHERITED;
+};
+
+class SkDrawBitmap : public SkBaseBitmap {
+ DECLARE_DRAW_MEMBER_INFO(Bitmap);
+ SkDrawBitmap();
+ virtual ~SkDrawBitmap();
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+ void onEndElement(SkAnimateMaker& ) override;
+ bool setProperty(int index, SkScriptValue& value) override;
+protected:
+ int /*SkBitmap::Config*/ format;
+ int32_t height;
+ int32_t rowBytes;
+ int32_t width;
+ SkColor fColor;
+ SkBool fColorSet;
+ typedef SkBaseBitmap INHERITED;
+};
+
+class SkImageBaseBitmap : public SkBaseBitmap {
+ DECLARE_MEMBER_INFO(ImageBaseBitmap);
+ SkImageBaseBitmap();
+ virtual ~SkImageBaseBitmap();
+ SkDisplayable* deepCopy(SkAnimateMaker* ) override;
+ void dirty() override;
+ bool draw(SkAnimateMaker& ) override;
+ bool getProperty(int index, SkScriptValue* value) const override;
+ void onEndElement(SkAnimateMaker& maker) override;
+private:
+ void resolve() const { (const_cast<SkImageBaseBitmap*>(this))->resolve(); }
+ void resolve();
+protected:
+ SkBase64 base64;
+ SkString src;
+ SkString fLast; // cache of src so that stream isn't unnecessarily decoded
+ SkBool fDirty;
+ const char* fUriBase;
+ typedef SkBaseBitmap INHERITED;
+};
+
+#endif // SkDrawBitmap_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawBlur.cpp b/gfx/skia/skia/src/animator/SkDrawBlur.cpp
new file mode 100644
index 000000000..0acb13ec5
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawBlur.cpp
@@ -0,0 +1,33 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawBlur.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawBlur::fInfo[] = {
+ SK_MEMBER(fBlurStyle, MaskFilterBlurStyle),
+ SK_MEMBER(fSigma, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawBlur);
+
+SkDrawBlur::SkDrawBlur()
+ : fSigma(-1)
+ , fBlurStyle(kNormal_SkBlurStyle) {
+}
+
+SkMaskFilter* SkDrawBlur::getMaskFilter() {
+ if (fSigma <= 0) {
+ return nullptr;
+ }
+ return SkBlurMaskFilter::Make((SkBlurStyle)fBlurStyle, fSigma).release();
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawBlur.h b/gfx/skia/skia/src/animator/SkDrawBlur.h
new file mode 100644
index 000000000..d3a528c22
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawBlur.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDrawBlur_DEFINED
+#define SkDrawBlur_DEFINED
+
+#include "SkPaintPart.h"
+#include "SkBlurMaskFilter.h"
+
+class SkDrawBlur : public SkDrawMaskFilter {
+ DECLARE_DRAW_MEMBER_INFO(Blur);
+ SkDrawBlur();
+ SkMaskFilter* getMaskFilter() override;
+protected:
+ SkScalar fSigma;
+ int /*SkBlurStyle*/ fBlurStyle;
+
+ typedef SkDrawMaskFilter INHERITED;
+};
+
+#endif // SkDrawBlur_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawClip.cpp b/gfx/skia/skia/src/animator/SkDrawClip.cpp
new file mode 100644
index 000000000..bca816f63
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawClip.cpp
@@ -0,0 +1,39 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawClip.h"
+#include "SkAnimateMaker.h"
+#include "SkCanvas.h"
+#include "SkDrawRectangle.h"
+#include "SkDrawPath.h"
+
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawClip::fInfo[] = {
+ SK_MEMBER(path, Path),
+ SK_MEMBER(rect, Rect)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawClip);
+
+SkDrawClip::SkDrawClip() : rect(nullptr), path(nullptr) {
+}
+
+bool SkDrawClip::draw(SkAnimateMaker& maker ) {
+ if (rect != nullptr)
+ maker.fCanvas->clipRect(rect->fRect);
+ else {
+ SkASSERT(path != nullptr);
+ maker.fCanvas->clipPath(path->fPath);
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawClip.h b/gfx/skia/skia/src/animator/SkDrawClip.h
new file mode 100644
index 000000000..6c64f93f2
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawClip.h
@@ -0,0 +1,29 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawClip_DEFINED
+#define SkDrawClip_DEFINED
+
+#include "SkADrawable.h"
+#include "SkMemberInfo.h"
+#include "SkRegion.h"
+
+class SkDrawPath;
+class SkDrawRect;
+
+class SkDrawClip : public SkADrawable {
+ DECLARE_DRAW_MEMBER_INFO(Clip);
+ SkDrawClip();
+ bool draw(SkAnimateMaker& ) override;
+private:
+ SkDrawRect* rect;
+ SkDrawPath* path;
+};
+
+#endif // SkDrawClip_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawColor.cpp b/gfx/skia/skia/src/animator/SkDrawColor.cpp
new file mode 100644
index 000000000..529d9a595
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawColor.cpp
@@ -0,0 +1,265 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawColor.h"
+#ifdef SK_DEBUG
+#include "SkDisplayList.h"
+#endif
+#include "SkDrawPaint.h"
+#include "SkParse.h"
+#include "SkScript.h"
+
+enum HSV_Choice {
+ kGetHue,
+ kGetSaturation,
+ kGetValue
+};
+
+static SkScalar RGB_to_HSV(SkColor color, HSV_Choice choice) {
+ SkScalar red = SkIntToScalar(SkColorGetR(color));
+ SkScalar green = SkIntToScalar(SkColorGetG(color));
+ SkScalar blue = SkIntToScalar(SkColorGetB(color));
+ SkScalar min = SkMinScalar(SkMinScalar(red, green), blue);
+ SkScalar value = SkMaxScalar(SkMaxScalar(red, green), blue);
+ if (choice == kGetValue)
+ return value/255;
+ SkScalar delta = value - min;
+ SkScalar saturation = value == 0 ? 0 : delta / value;
+ if (choice == kGetSaturation)
+ return saturation;
+ SkScalar hue;
+ if (saturation == 0)
+ hue = 0;
+ else {
+ SkScalar part60 = 60 / delta;
+ if (red == value) {
+ hue = SkScalarMul(green - blue, part60);
+ if (hue < 0)
+ hue += 360 * SK_Scalar1;
+ }
+ else if (green == value)
+ hue = 120 * SK_Scalar1 + SkScalarMul(blue - red, part60);
+ else // blue == value
+ hue = 240 * SK_Scalar1 + SkScalarMul(red - green, part60);
+ }
+ SkASSERT(choice == kGetHue);
+ return hue;
+}
+
+#if defined _WIN32 // disable 'red', etc. may be used without having been initialized
+#pragma warning ( push )
+#pragma warning ( disable : 4701 )
+#endif
+
+static SkColor HSV_to_RGB(SkColor color, HSV_Choice choice, SkScalar hsv) {
+ SkScalar hue = choice == kGetHue ? hsv : RGB_to_HSV(color, kGetHue);
+ SkScalar saturation = choice == kGetSaturation ? hsv : RGB_to_HSV(color, kGetSaturation);
+ SkScalar value = choice == kGetValue ? hsv : RGB_to_HSV(color, kGetValue);
+ value *= 255;
+ SkScalar red SK_INIT_TO_AVOID_WARNING;
+ SkScalar green SK_INIT_TO_AVOID_WARNING;
+ SkScalar blue SK_INIT_TO_AVOID_WARNING;
+ if (saturation == 0) // color is on black-and-white center line
+ red = green = blue = value;
+ else {
+ //SkScalar fraction = SkScalarMod(hue, 60 * SK_Scalar1);
+ int sextant = SkScalarFloorToInt(hue / 60);
+ SkScalar fraction = hue / 60 - SkIntToScalar(sextant);
+ SkScalar p = SkScalarMul(value , SK_Scalar1 - saturation);
+ SkScalar q = SkScalarMul(value, SK_Scalar1 - SkScalarMul(saturation, fraction));
+ SkScalar t = SkScalarMul(value, SK_Scalar1 -
+ SkScalarMul(saturation, SK_Scalar1 - fraction));
+ switch (sextant % 6) {
+ case 0: red = value; green = t; blue = p; break;
+ case 1: red = q; green = value; blue = p; break;
+ case 2: red = p; green = value; blue = t; break;
+ case 3: red = p; green = q; blue = value; break;
+ case 4: red = t; green = p; blue = value; break;
+ case 5: red = value; green = p; blue = q; break;
+ }
+ }
+ //used to say SkToU8((U8CPU) red) etc
+ return SkColorSetARGB(SkColorGetA(color), SkScalarRoundToInt(red),
+ SkScalarRoundToInt(green), SkScalarRoundToInt(blue));
+}
+
+#if defined _WIN32
+#pragma warning ( pop )
+#endif
+
+enum SkDrawColor_Properties {
+ SK_PROPERTY(alpha),
+ SK_PROPERTY(blue),
+ SK_PROPERTY(green),
+ SK_PROPERTY(hue),
+ SK_PROPERTY(red),
+ SK_PROPERTY(saturation),
+ SK_PROPERTY(value)
+};
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawColor::fInfo[] = {
+ SK_MEMBER_PROPERTY(alpha, Float),
+ SK_MEMBER_PROPERTY(blue, Float),
+ SK_MEMBER(color, ARGB),
+ SK_MEMBER_PROPERTY(green, Float),
+ SK_MEMBER_PROPERTY(hue, Float),
+ SK_MEMBER_PROPERTY(red, Float),
+ SK_MEMBER_PROPERTY(saturation, Float),
+ SK_MEMBER_PROPERTY(value, Float),
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawColor);
+
+SkDrawColor::SkDrawColor() : fDirty(false) {
+ color = SK_ColorBLACK;
+ fHue = fSaturation = fValue = SK_ScalarNaN;
+}
+
+bool SkDrawColor::add() {
+ if (fPaint->color != nullptr)
+ return true; // error (probably color in paint as attribute as well)
+ fPaint->color = this;
+ fPaint->fOwnsColor = true;
+ return false;
+}
+
+SkDisplayable* SkDrawColor::deepCopy(SkAnimateMaker*) {
+ SkDrawColor* copy = new SkDrawColor();
+ copy->color = color;
+ copy->fHue = fHue;
+ copy->fSaturation = fSaturation;
+ copy->fValue = fValue;
+ copy->fDirty = fDirty;
+ return copy;
+}
+
+void SkDrawColor::dirty(){
+ fDirty = true;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkDrawColor::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ SkDebugf("alpha=\"%d\" red=\"%d\" green=\"%d\" blue=\"%d\" />\n",
+ SkColorGetA(color)/255, SkColorGetR(color),
+ SkColorGetG(color), SkColorGetB(color));
+}
+#endif
+
+SkColor SkDrawColor::getColor() {
+ if (fDirty) {
+ if (SkScalarIsNaN(fValue) == false)
+ color = HSV_to_RGB(color, kGetValue, fValue);
+ if (SkScalarIsNaN(fSaturation) == false)
+ color = HSV_to_RGB(color, kGetSaturation, fSaturation);
+ if (SkScalarIsNaN(fHue) == false)
+ color = HSV_to_RGB(color, kGetHue, fHue);
+ fDirty = false;
+ }
+ return color;
+}
+
+SkDisplayable* SkDrawColor::getParent() const {
+ return fPaint;
+}
+
+bool SkDrawColor::getProperty(int index, SkScriptValue* value) const {
+ value->fType = SkType_Float;
+ SkScalar result;
+ switch(index) {
+ case SK_PROPERTY(alpha):
+ result = SkIntToScalar(SkColorGetA(color)) / 255;
+ break;
+ case SK_PROPERTY(blue):
+ result = SkIntToScalar(SkColorGetB(color));
+ break;
+ case SK_PROPERTY(green):
+ result = SkIntToScalar(SkColorGetG(color));
+ break;
+ case SK_PROPERTY(hue):
+ result = RGB_to_HSV(color, kGetHue);
+ break;
+ case SK_PROPERTY(red):
+ result = SkIntToScalar(SkColorGetR(color));
+ break;
+ case SK_PROPERTY(saturation):
+ result = RGB_to_HSV(color, kGetSaturation);
+ break;
+ case SK_PROPERTY(value):
+ result = RGB_to_HSV(color, kGetValue);
+ break;
+ default:
+ SkASSERT(0);
+ return false;
+ }
+ value->fOperand.fScalar = result;
+ return true;
+}
+
+void SkDrawColor::onEndElement(SkAnimateMaker&) {
+ fDirty = true;
+}
+
+bool SkDrawColor::setParent(SkDisplayable* parent) {
+ SkASSERT(parent != nullptr);
+ if (parent->getType() == SkType_DrawLinearGradient || parent->getType() == SkType_DrawRadialGradient)
+ return false;
+ if (parent->isPaint() == false)
+ return true;
+ fPaint = (SkDrawPaint*) parent;
+ return false;
+}
+
+bool SkDrawColor::setProperty(int index, SkScriptValue& value) {
+ SkASSERT(value.fType == SkType_Float);
+ SkScalar scalar = value.fOperand.fScalar;
+ switch (index) {
+ case SK_PROPERTY(alpha):
+ uint8_t alpha;
+ alpha = scalar == SK_Scalar1 ? 255 : SkToU8((U8CPU) (scalar * 256));
+ color = SkColorSetARGB(alpha, SkColorGetR(color),
+ SkColorGetG(color), SkColorGetB(color));
+ break;
+ case SK_PROPERTY(blue):
+ scalar = SkScalarClampMax(scalar, 255 * SK_Scalar1);
+ color = SkColorSetARGB(SkColorGetA(color), SkColorGetR(color),
+ SkColorGetG(color), SkToU8((U8CPU) scalar));
+ break;
+ case SK_PROPERTY(green):
+ scalar = SkScalarClampMax(scalar, 255 * SK_Scalar1);
+ color = SkColorSetARGB(SkColorGetA(color), SkColorGetR(color),
+ SkToU8((U8CPU) scalar), SkColorGetB(color));
+ break;
+ case SK_PROPERTY(hue):
+ fHue = scalar;//RGB_to_HSV(color, kGetHue);
+ fDirty = true;
+ break;
+ case SK_PROPERTY(red):
+ scalar = SkScalarClampMax(scalar, 255 * SK_Scalar1);
+ color = SkColorSetARGB(SkColorGetA(color), SkToU8((U8CPU) scalar),
+ SkColorGetG(color), SkColorGetB(color));
+ break;
+ case SK_PROPERTY(saturation):
+ fSaturation = scalar;//RGB_to_HSV(color, kGetSaturation);
+ fDirty = true;
+ break;
+ case SK_PROPERTY(value):
+ fValue = scalar;//RGB_to_HSV(color, kGetValue);
+ fDirty = true;
+ break;
+ default:
+ SkASSERT(0);
+ return false;
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawColor.h b/gfx/skia/skia/src/animator/SkDrawColor.h
new file mode 100644
index 000000000..14788e45b
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawColor.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDrawColor_DEFINED
+#define SkDrawColor_DEFINED
+
+#include "SkPaintPart.h"
+#include "SkColor.h"
+
+class SkDrawColor : public SkPaintPart {
+ DECLARE_DRAW_MEMBER_INFO(Color);
+ SkDrawColor();
+ bool add() override;
+ void dirty() override;
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+ SkColor getColor();
+ SkDisplayable* deepCopy(SkAnimateMaker* ) override;
+ SkDisplayable* getParent() const override;
+ bool getProperty(int index, SkScriptValue* value) const override;
+ void onEndElement(SkAnimateMaker& ) override;
+ bool setParent(SkDisplayable* parent) override;
+ bool setProperty(int index, SkScriptValue&) override;
+protected:
+ SkColor color;
+ SkScalar fHue;
+ SkScalar fSaturation;
+ SkScalar fValue;
+ SkBool fDirty;
+private:
+ friend class SkDrawGradient;
+ typedef SkPaintPart INHERITED;
+};
+
+#endif // SkDrawColor_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawDash.cpp b/gfx/skia/skia/src/animator/SkDrawDash.cpp
new file mode 100644
index 000000000..e7fcc8410
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawDash.cpp
@@ -0,0 +1,35 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawDash.h"
+#include "SkDashPathEffect.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDash::fInfo[] = {
+ SK_MEMBER_ARRAY(intervals, Float),
+ SK_MEMBER(phase, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDash);
+
+SkDash::SkDash() : phase(0) {
+}
+
+SkDash::~SkDash() {
+}
+
+SkPathEffect* SkDash::getPathEffect() {
+ int count = intervals.count();
+ if (count == 0)
+ return nullptr;
+ return SkDashPathEffect::Make(intervals.begin(), count, phase).release();
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawDash.h b/gfx/skia/skia/src/animator/SkDrawDash.h
new file mode 100644
index 000000000..3083fe812
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawDash.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDrawDash_DEFINED
+#define SkDrawDash_DEFINED
+
+#include "SkPaintPart.h"
+#include "SkIntArray.h"
+
+class SkDash : public SkDrawPathEffect {
+ DECLARE_MEMBER_INFO(Dash);
+ SkDash();
+ virtual ~SkDash();
+ SkPathEffect* getPathEffect() override;
+private:
+ SkTDScalarArray intervals;
+ SkScalar phase;
+};
+
+#endif // SkDrawDash_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawDiscrete.cpp b/gfx/skia/skia/src/animator/SkDrawDiscrete.cpp
new file mode 100644
index 000000000..11963a1d2
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawDiscrete.cpp
@@ -0,0 +1,34 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawDiscrete.h"
+#include "SkAnimateMaker.h"
+#include "SkPaint.h"
+#include "SkDiscretePathEffect.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDiscrete::fInfo[] = {
+ SK_MEMBER(deviation, Float),
+ SK_MEMBER(segLength, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDiscrete);
+
+SkDiscrete::SkDiscrete() : deviation(0), segLength(0) {
+}
+
+SkPathEffect* SkDiscrete::getPathEffect() {
+ if (deviation <= 0 || segLength <= 0)
+ return nullptr;
+ else
+ return SkDiscretePathEffect::Make(segLength, deviation).release();
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawDiscrete.h b/gfx/skia/skia/src/animator/SkDrawDiscrete.h
new file mode 100644
index 000000000..fe13f268b
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawDiscrete.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDrawDiscrete_DEFINED
+#define SkDrawDiscrete_DEFINED
+
+#include "SkPaintPart.h"
+
+class SkDiscrete : public SkDrawPathEffect {
+ DECLARE_MEMBER_INFO(Discrete);
+ SkDiscrete();
+ SkPathEffect* getPathEffect() override;
+private:
+ SkScalar deviation;
+ SkScalar segLength;
+};
+
+#endif //SkDrawDiscrete_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawEmboss.cpp b/gfx/skia/skia/src/animator/SkDrawEmboss.cpp
new file mode 100644
index 000000000..9f50f2449
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawEmboss.cpp
@@ -0,0 +1,34 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawEmboss.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawEmboss::fInfo[] = {
+ SK_MEMBER(fAmbient, Float),
+ SK_MEMBER_ARRAY(fDirection, Float),
+ SK_MEMBER(fSigma, Float),
+ SK_MEMBER(fSpecular, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawEmboss);
+
+SkDrawEmboss::SkDrawEmboss() : fSigma(-1) {
+ fDirection.setCount(3);
+}
+
+SkMaskFilter* SkDrawEmboss::getMaskFilter() {
+ if (fSigma < 0 || fDirection.count() !=3)
+ return nullptr;
+ return SkBlurMaskFilter::MakeEmboss(fSigma, fDirection.begin(),
+ fAmbient, fSpecular).release();
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawEmboss.h b/gfx/skia/skia/src/animator/SkDrawEmboss.h
new file mode 100644
index 000000000..941be61e2
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawEmboss.h
@@ -0,0 +1,28 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawEmboss_DEFINED
+#define SkDrawEmboss_DEFINED
+
+#include "SkDrawBlur.h"
+
+class SkDrawEmboss : public SkDrawMaskFilter {
+ DECLARE_DRAW_MEMBER_INFO(Emboss);
+ SkDrawEmboss();
+ SkMaskFilter* getMaskFilter() override;
+protected:
+ SkTDScalarArray fDirection;
+ SkScalar fSigma;
+ SkScalar fAmbient;
+ SkScalar fSpecular;
+
+ typedef SkDrawMaskFilter INHERITED;
+};
+
+#endif // SkDrawEmboss_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawExtraPathEffect.cpp b/gfx/skia/skia/src/animator/SkDrawExtraPathEffect.cpp
new file mode 100644
index 000000000..a444bc1cb
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawExtraPathEffect.cpp
@@ -0,0 +1,522 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkDrawExtraPathEffect.h"
+#include "SkDrawPath.h"
+#include "Sk1DPathEffect.h"
+#include "Sk2DPathEffect.h"
+#include "SkMemberInfo.h"
+#include "SkPaintPart.h"
+#include "SkPathEffect.h"
+#include "SkCornerPathEffect.h"
+
+#include "SkDashPathEffect.h"
+
+class SkDrawShapePathEffect : public SkDrawPathEffect {
+ DECLARE_PRIVATE_MEMBER_INFO(DrawShapePathEffect);
+ SkDrawShapePathEffect();
+ virtual ~SkDrawShapePathEffect();
+ bool addChild(SkAnimateMaker& , SkDisplayable* ) override;
+ SkPathEffect* getPathEffect() override;
+protected:
+ SkADrawable* addPath;
+ SkADrawable* addMatrix;
+ SkDrawPath* path;
+ SkPathEffect* fPathEffect;
+ friend class SkShape1DPathEffect;
+ friend class SkShape2DPathEffect;
+};
+
+class SkDrawShape1DPathEffect : public SkDrawShapePathEffect {
+ DECLARE_EXTRAS_MEMBER_INFO(SkDrawShape1DPathEffect);
+ SkDrawShape1DPathEffect(SkDisplayTypes );
+ virtual ~SkDrawShape1DPathEffect();
+ void onEndElement(SkAnimateMaker& ) override;
+private:
+ SkString phase;
+ SkString spacing;
+ friend class SkShape1DPathEffect;
+ typedef SkDrawShapePathEffect INHERITED;
+};
+
+class SkDrawShape2DPathEffect : public SkDrawShapePathEffect {
+ DECLARE_EXTRAS_MEMBER_INFO(SkDrawShape2DPathEffect);
+ SkDrawShape2DPathEffect(SkDisplayTypes );
+ virtual ~SkDrawShape2DPathEffect();
+ void onEndElement(SkAnimateMaker& ) override;
+private:
+ SkDrawMatrix* matrix;
+ friend class SkShape2DPathEffect;
+ typedef SkDrawShapePathEffect INHERITED;
+};
+
+class SkDrawComposePathEffect : public SkDrawPathEffect {
+ DECLARE_EXTRAS_MEMBER_INFO(SkDrawComposePathEffect);
+ SkDrawComposePathEffect(SkDisplayTypes );
+ virtual ~SkDrawComposePathEffect();
+ bool addChild(SkAnimateMaker& , SkDisplayable* ) override;
+ SkPathEffect* getPathEffect() override;
+ bool isPaint() const override;
+private:
+ SkDrawPathEffect* effect1;
+ SkDrawPathEffect* effect2;
+};
+
+class SkDrawCornerPathEffect : public SkDrawPathEffect {
+ DECLARE_EXTRAS_MEMBER_INFO(SkDrawCornerPathEffect);
+ SkDrawCornerPathEffect(SkDisplayTypes );
+ virtual ~SkDrawCornerPathEffect();
+ SkPathEffect* getPathEffect() override;
+private:
+ SkScalar radius;
+};
+
+//////////// SkShape1DPathEffect
+
+#include "SkAnimateMaker.h"
+#include "SkAnimatorScript.h"
+#include "SkDisplayApply.h"
+#include "SkDrawMatrix.h"
+#include "SkPaint.h"
+
+class SkShape1DPathEffect : public Sk1DPathEffect {
+public:
+ SkShape1DPathEffect(SkDrawShape1DPathEffect* draw, SkAnimateMaker* maker) :
+ fDraw(draw), fMaker(maker) {
+ }
+
+ // For serialization. This will never be called.
+ Factory getFactory() const override { sk_throw(); return nullptr; }
+
+protected:
+ SkScalar begin(SkScalar contourLength) const override {
+ SkScriptValue value;
+ SkAnimatorScript engine(*fMaker, nullptr, SkType_Float);
+ engine.propertyCallBack(GetContourLength, &contourLength);
+ value.fOperand.fScalar = 0;
+ engine.evaluate(fDraw->phase.c_str(), &value, SkType_Float);
+ return value.fOperand.fScalar;
+ }
+
+ SkScalar next(SkPath* dst, SkScalar distance, SkPathMeasure&) const override {
+ fMaker->setExtraPropertyCallBack(fDraw->fType, GetDistance, &distance);
+ SkDrawPath* drawPath = nullptr;
+ if (fDraw->addPath->isPath()) {
+ drawPath = (SkDrawPath*) fDraw->addPath;
+ } else {
+ SkApply* apply = (SkApply*) fDraw->addPath;
+ apply->refresh(*fMaker);
+ apply->activate(*fMaker);
+ apply->interpolate(*fMaker, SkScalarRoundToInt(distance * 1000));
+ drawPath = (SkDrawPath*) apply->getScope();
+ }
+ SkMatrix m;
+ m.reset();
+ if (fDraw->addMatrix) {
+ SkDrawMatrix* matrix;
+ if (fDraw->addMatrix->getType() == SkType_Matrix)
+ matrix = (SkDrawMatrix*) fDraw->addMatrix;
+ else {
+ SkApply* apply = (SkApply*) fDraw->addMatrix;
+ apply->refresh(*fMaker);
+ apply->activate(*fMaker);
+ apply->interpolate(*fMaker, SkScalarRoundToInt(distance * 1000));
+ matrix = (SkDrawMatrix*) apply->getScope();
+ }
+ if (matrix) {
+ m = matrix->getMatrix();
+ }
+ }
+ SkScalar result = 0;
+ SkAnimatorScript::EvaluateFloat(*fMaker, nullptr, fDraw->spacing.c_str(), &result);
+ if (drawPath)
+ dst->addPath(drawPath->getPath(), m);
+ fMaker->clearExtraPropertyCallBack(fDraw->fType);
+ return result;
+ }
+
+#ifndef SK_IGNORE_TO_STRING
+ void toString(SkString* str) const override {
+ str->appendf("SkShape1DPathEffect: (");
+ // TODO: fill in
+ str->appendf(")");
+ }
+#endif
+
+private:
+ static bool GetContourLength(const char* token, size_t len, void* clen, SkScriptValue* value) {
+ if (SK_LITERAL_STR_EQUAL("contourLength", token, len)) {
+ value->fOperand.fScalar = *(SkScalar*) clen;
+ value->fType = SkType_Float;
+ return true;
+ }
+ return false;
+ }
+
+ static bool GetDistance(const char* token, size_t len, void* dist, SkScriptValue* value) {
+ if (SK_LITERAL_STR_EQUAL("distance", token, len)) {
+ value->fOperand.fScalar = *(SkScalar*) dist;
+ value->fType = SkType_Float;
+ return true;
+ }
+ return false;
+ }
+
+ SkDrawShape1DPathEffect* fDraw;
+ SkAnimateMaker* fMaker;
+};
+
+//////////// SkDrawShapePathEffect
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawShapePathEffect::fInfo[] = {
+ SK_MEMBER(addMatrix, Drawable), // either matrix or apply
+ SK_MEMBER(addPath, Drawable), // either path or apply
+ SK_MEMBER(path, Path),
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawShapePathEffect);
+
+SkDrawShapePathEffect::SkDrawShapePathEffect() :
+ addPath(nullptr), addMatrix(nullptr), path(nullptr), fPathEffect(nullptr) {
+}
+
+SkDrawShapePathEffect::~SkDrawShapePathEffect() {
+ SkSafeUnref(fPathEffect);
+}
+
+bool SkDrawShapePathEffect::addChild(SkAnimateMaker& , SkDisplayable* child) {
+ path = (SkDrawPath*) child;
+ return true;
+}
+
+SkPathEffect* SkDrawShapePathEffect::getPathEffect() {
+ fPathEffect->ref();
+ return fPathEffect;
+}
+
+//////////// SkDrawShape1DPathEffect
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawShape1DPathEffect::fInfo[] = {
+ SK_MEMBER_INHERITED,
+ SK_MEMBER(phase, String),
+ SK_MEMBER(spacing, String),
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawShape1DPathEffect);
+
+SkDrawShape1DPathEffect::SkDrawShape1DPathEffect(SkDisplayTypes type) : fType(type) {
+}
+
+SkDrawShape1DPathEffect::~SkDrawShape1DPathEffect() {
+}
+
+void SkDrawShape1DPathEffect::onEndElement(SkAnimateMaker& maker) {
+ if (addPath == nullptr || (addPath->isPath() == false && addPath->isApply() == false))
+ maker.setErrorCode(SkDisplayXMLParserError::kUnknownError); // !!! add error
+ else
+ fPathEffect = new SkShape1DPathEffect(this, &maker);
+}
+
+////////// SkShape2DPathEffect
+
+class SkShape2DPathEffect : public Sk2DPathEffect {
+public:
+ SkShape2DPathEffect(SkDrawShape2DPathEffect* draw, SkAnimateMaker* maker,
+ const SkMatrix& matrix) : Sk2DPathEffect(matrix), fDraw(draw), fMaker(maker) {
+ }
+
+ // For serialization. This will never be called.
+ Factory getFactory() const override { sk_throw(); return nullptr; }
+
+protected:
+ void begin(const SkIRect& uvBounds, SkPath*) const override {
+ const_cast<SkShape2DPathEffect*>(this)->setUVBounds(uvBounds);
+ }
+
+ void next(const SkPoint& loc, int u, int v, SkPath* dst) const override {
+ const_cast<SkShape2DPathEffect*>(this)->addPath(loc, u, v, dst);
+ }
+
+private:
+ void setUVBounds(const SkIRect& uvBounds) {
+ fUVBounds.set(SkIntToScalar(uvBounds.fLeft), SkIntToScalar(uvBounds.fTop),
+ SkIntToScalar(uvBounds.fRight), SkIntToScalar(uvBounds.fBottom));
+ }
+
+ void addPath(const SkPoint& loc, int u, int v, SkPath* dst) {
+ fLoc = loc;
+ fU = u;
+ fV = v;
+ SkDrawPath* drawPath;
+ fMaker->setExtraPropertyCallBack(fDraw->fType, Get2D, this);
+ if (fDraw->addPath->isPath()) {
+ drawPath = (SkDrawPath*) fDraw->addPath;
+ } else {
+ SkApply* apply = (SkApply*) fDraw->addPath;
+ apply->refresh(*fMaker);
+ apply->activate(*fMaker);
+ apply->interpolate(*fMaker, v);
+ drawPath = (SkDrawPath*) apply->getScope();
+ }
+ if (drawPath == nullptr)
+ goto clearCallBack;
+ if (fDraw->matrix) {
+ SkDrawMatrix* matrix;
+ if (fDraw->matrix->getType() == SkType_Matrix)
+ matrix = (SkDrawMatrix*) fDraw->matrix;
+ else {
+ SkApply* apply = (SkApply*) fDraw->matrix;
+ apply->activate(*fMaker);
+ apply->interpolate(*fMaker, v);
+ matrix = (SkDrawMatrix*) apply->getScope();
+ }
+ if (matrix) {
+ dst->addPath(drawPath->getPath(), matrix->getMatrix());
+ goto clearCallBack;
+ }
+ }
+ dst->addPath(drawPath->getPath());
+clearCallBack:
+ fMaker->clearExtraPropertyCallBack(fDraw->fType);
+ }
+
+ static bool Get2D(const char* token, size_t len, void* s2D, SkScriptValue* value) {
+ static const char match[] = "locX|locY|left|top|right|bottom|u|v" ;
+ SkShape2DPathEffect* shape2D = (SkShape2DPathEffect*) s2D;
+ int index;
+ if (SkAnimatorScript::MapEnums(match, token, len, &index) == false)
+ return false;
+ SkASSERT((sizeof(SkPoint) + sizeof(SkRect)) / sizeof(SkScalar) == 6);
+ if (index < 6) {
+ value->fType = SkType_Float;
+ value->fOperand.fScalar = (&shape2D->fLoc.fX)[index];
+ } else {
+ value->fType = SkType_Int;
+ value->fOperand.fS32 = (&shape2D->fU)[index - 6];
+ }
+ return true;
+ }
+
+ SkPoint fLoc;
+ SkRect fUVBounds;
+ int32_t fU;
+ int32_t fV;
+ SkDrawShape2DPathEffect* fDraw;
+ SkAnimateMaker* fMaker;
+
+ // illegal
+ SkShape2DPathEffect(const SkShape2DPathEffect&);
+ SkShape2DPathEffect& operator=(const SkShape2DPathEffect&);
+};
+
+////////// SkDrawShape2DPathEffect
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawShape2DPathEffect::fInfo[] = {
+ SK_MEMBER_INHERITED,
+ SK_MEMBER(matrix, Matrix)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawShape2DPathEffect);
+
+SkDrawShape2DPathEffect::SkDrawShape2DPathEffect(SkDisplayTypes type) : fType(type) {
+}
+
+SkDrawShape2DPathEffect::~SkDrawShape2DPathEffect() {
+}
+
+void SkDrawShape2DPathEffect::onEndElement(SkAnimateMaker& maker) {
+ if (addPath == nullptr || (addPath->isPath() == false && addPath->isApply() == false) ||
+ matrix == nullptr)
+ maker.setErrorCode(SkDisplayXMLParserError::kUnknownError); // !!! add error
+ else
+ fPathEffect = new SkShape2DPathEffect(this, &maker, matrix->getMatrix());
+}
+
+////////// SkDrawComposePathEffect
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawComposePathEffect::fInfo[] = {
+ SK_MEMBER(effect1, PathEffect),
+ SK_MEMBER(effect2, PathEffect)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawComposePathEffect);
+
+SkDrawComposePathEffect::SkDrawComposePathEffect(SkDisplayTypes type) : fType(type),
+ effect1(nullptr), effect2(nullptr) {
+}
+
+SkDrawComposePathEffect::~SkDrawComposePathEffect() {
+ delete effect1;
+ delete effect2;
+}
+
+bool SkDrawComposePathEffect::addChild(SkAnimateMaker& , SkDisplayable* child) {
+ if (effect1 == nullptr)
+ effect1 = (SkDrawPathEffect*) child;
+ else
+ effect2 = (SkDrawPathEffect*) child;
+ return true;
+}
+
+SkPathEffect* SkDrawComposePathEffect::getPathEffect() {
+ auto e1 = sk_sp<SkPathEffect>(effect1->getPathEffect());
+ auto e2 = sk_sp<SkPathEffect>(effect2->getPathEffect());
+ return SkComposePathEffect::Make(e1, e2).release();
+}
+
+bool SkDrawComposePathEffect::isPaint() const {
+ return true;
+}
+
+//////////// SkDrawCornerPathEffect
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawCornerPathEffect::fInfo[] = {
+ SK_MEMBER(radius, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawCornerPathEffect);
+
+SkDrawCornerPathEffect::SkDrawCornerPathEffect(SkDisplayTypes type):
+ fType(type), radius(0) {
+}
+
+SkDrawCornerPathEffect::~SkDrawCornerPathEffect() {
+}
+
+SkPathEffect* SkDrawCornerPathEffect::getPathEffect() {
+ return SkCornerPathEffect::Make(radius).release();
+}
+
+/////////
+
+#include "SkExtras.h"
+
+const char kDrawShape1DPathEffectName[] = "pathEffect:shape1D";
+const char kDrawShape2DPathEffectName[] = "pathEffect:shape2D";
+const char kDrawComposePathEffectName[] = "pathEffect:compose";
+const char kDrawCornerPathEffectName[] = "pathEffect:corner";
+
+class SkExtraPathEffects : public SkExtras {
+public:
+ SkExtraPathEffects() :
+ skDrawShape1DPathEffectType(SkType_Unknown),
+ skDrawShape2DPathEffectType(SkType_Unknown),
+ skDrawComposePathEffectType(SkType_Unknown),
+ skDrawCornerPathEffectType(SkType_Unknown) {
+ }
+
+ virtual SkDisplayable* createInstance(SkDisplayTypes type) {
+ SkDisplayable* result = nullptr;
+ if (skDrawShape1DPathEffectType == type)
+ result = new SkDrawShape1DPathEffect(type);
+ else if (skDrawShape2DPathEffectType == type)
+ result = new SkDrawShape2DPathEffect(type);
+ else if (skDrawComposePathEffectType == type)
+ result = new SkDrawComposePathEffect(type);
+ else if (skDrawCornerPathEffectType == type)
+ result = new SkDrawCornerPathEffect(type);
+ return result;
+ }
+
+ virtual bool definesType(SkDisplayTypes type) {
+ return type == skDrawShape1DPathEffectType ||
+ type == skDrawShape2DPathEffectType ||
+ type == skDrawComposePathEffectType ||
+ type == skDrawCornerPathEffectType;
+ }
+
+#if SK_USE_CONDENSED_INFO == 0
+ virtual const SkMemberInfo* getMembers(SkDisplayTypes type, int* infoCountPtr) {
+ const SkMemberInfo* info = nullptr;
+ int infoCount = 0;
+ if (skDrawShape1DPathEffectType == type) {
+ info = SkDrawShape1DPathEffect::fInfo;
+ infoCount = SkDrawShape1DPathEffect::fInfoCount;
+ } else if (skDrawShape2DPathEffectType == type) {
+ info = SkDrawShape2DPathEffect::fInfo;
+ infoCount = SkDrawShape2DPathEffect::fInfoCount;
+ } else if (skDrawComposePathEffectType == type) {
+ info = SkDrawComposePathEffect::fInfo;
+ infoCount = SkDrawShape1DPathEffect::fInfoCount;
+ } else if (skDrawCornerPathEffectType == type) {
+ info = SkDrawCornerPathEffect::fInfo;
+ infoCount = SkDrawCornerPathEffect::fInfoCount;
+ }
+ if (infoCountPtr)
+ *infoCountPtr = infoCount;
+ return info;
+ }
+#endif
+
+#ifdef SK_DEBUG
+ virtual const char* getName(SkDisplayTypes type) {
+ if (skDrawShape1DPathEffectType == type)
+ return kDrawShape1DPathEffectName;
+ else if (skDrawShape2DPathEffectType == type)
+ return kDrawShape2DPathEffectName;
+ else if (skDrawComposePathEffectType == type)
+ return kDrawComposePathEffectName;
+ else if (skDrawCornerPathEffectType == type)
+ return kDrawCornerPathEffectName;
+ return nullptr;
+ }
+#endif
+
+ virtual SkDisplayTypes getType(const char name[], size_t len ) {
+ SkDisplayTypes* type = nullptr;
+ if (SK_LITERAL_STR_EQUAL(kDrawShape1DPathEffectName, name, len))
+ type = &skDrawShape1DPathEffectType;
+ else if (SK_LITERAL_STR_EQUAL(kDrawShape2DPathEffectName, name, len))
+ type = &skDrawShape2DPathEffectType;
+ else if (SK_LITERAL_STR_EQUAL(kDrawComposePathEffectName, name, len))
+ type = &skDrawComposePathEffectType;
+ else if (SK_LITERAL_STR_EQUAL(kDrawCornerPathEffectName, name, len))
+ type = &skDrawCornerPathEffectType;
+ if (type) {
+ if (*type == SkType_Unknown)
+ *type = SkDisplayType::RegisterNewType();
+ return *type;
+ }
+ return SkType_Unknown;
+ }
+
+private:
+ SkDisplayTypes skDrawShape1DPathEffectType;
+ SkDisplayTypes skDrawShape2DPathEffectType;
+ SkDisplayTypes skDrawComposePathEffectType;
+ SkDisplayTypes skDrawCornerPathEffectType;
+};
+
+void InitializeSkExtraPathEffects(SkAnimator* animator) {
+ animator->addExtras(new SkExtraPathEffects());
+}
+
+////////////////
+
+
+SkExtras::SkExtras() : fExtraCallBack(nullptr), fExtraStorage(nullptr) {
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawExtraPathEffect.h b/gfx/skia/skia/src/animator/SkDrawExtraPathEffect.h
new file mode 100644
index 000000000..392a46b8c
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawExtraPathEffect.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SK_DRAW_EXTRA_PATH_EFFECT_H
+#define SK_DRAW_EXTRA_PATH_EFFECT_H
+
+class SkAnimator;
+
+void InitializeSkExtraPathEffects(SkAnimator* animator);
+
+#endif
diff --git a/gfx/skia/skia/src/animator/SkDrawFull.cpp b/gfx/skia/skia/src/animator/SkDrawFull.cpp
new file mode 100644
index 000000000..a1a5fc9f1
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawFull.cpp
@@ -0,0 +1,18 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawFull.h"
+#include "SkAnimateMaker.h"
+#include "SkCanvas.h"
+
+bool SkFull::draw(SkAnimateMaker& maker) {
+ SkBoundableAuto boundable(this, maker);
+ maker.fCanvas->drawPaint(*maker.fPaint);
+ return false;
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawFull.h b/gfx/skia/skia/src/animator/SkDrawFull.h
new file mode 100644
index 000000000..8a79c4dba
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawFull.h
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawFull_DEFINED
+#define SkDrawFull_DEFINED
+
+#include "SkBoundable.h"
+
+class SkFull : public SkBoundable {
+ DECLARE_EMPTY_MEMBER_INFO(Full);
+ bool draw(SkAnimateMaker& ) override;
+private:
+ typedef SkBoundable INHERITED;
+};
+
+#endif // SkDrawFull_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawGradient.cpp b/gfx/skia/skia/src/animator/SkDrawGradient.cpp
new file mode 100644
index 000000000..2fa4502f6
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawGradient.cpp
@@ -0,0 +1,168 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawGradient.h"
+#include "SkAnimateMaker.h"
+#include "SkAnimatorScript.h"
+#include "SkGradientShader.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawGradient::fInfo[] = {
+ SK_MEMBER_INHERITED,
+ SK_MEMBER_ARRAY(offsets, Float),
+ SK_MEMBER(unitMapper, String)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawGradient);
+
+SkDrawGradient::SkDrawGradient() {
+}
+
+SkDrawGradient::~SkDrawGradient() {
+ for (int index = 0; index < fDrawColors.count(); index++)
+ delete fDrawColors[index];
+}
+
+bool SkDrawGradient::addChild(SkAnimateMaker& , SkDisplayable* child) {
+ SkASSERT(child);
+ if (child->isColor()) {
+ SkDrawColor* color = (SkDrawColor*) child;
+ *fDrawColors.append() = color;
+ return true;
+ }
+ return false;
+}
+
+int SkDrawGradient::addPrelude() {
+ int count = fDrawColors.count();
+ fColors.setCount(count);
+ for (int index = 0; index < count; index++)
+ fColors[index] = fDrawColors[index]->color;
+ return count;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkDrawGradient::dumpRest(SkAnimateMaker* maker) {
+ dumpAttrs(maker);
+ //can a gradient have no colors?
+ bool closedYet = false;
+ SkDisplayList::fIndent += 4;
+ for (SkDrawColor** ptr = fDrawColors.begin(); ptr < fDrawColors.end(); ptr++) {
+ if (closedYet == false) {
+ SkDebugf(">\n");
+ closedYet = true;
+ }
+ SkDrawColor* color = *ptr;
+ color->dump(maker);
+ }
+ SkDisplayList::fIndent -= 4;
+ dumpChildren(maker, closedYet); //dumps the matrix if it has one
+}
+#endif
+
+void SkDrawGradient::onEndElement(SkAnimateMaker& maker) {
+ if (offsets.count() != 0) {
+ if (offsets.count() != fDrawColors.count()) {
+ maker.setErrorCode(SkDisplayXMLParserError::kGradientOffsetsDontMatchColors);
+ return;
+ }
+ if (offsets[0] != 0) {
+ maker.setErrorCode(SkDisplayXMLParserError::kGradientOffsetsMustStartWithZero);
+ return;
+ }
+ if (offsets[offsets.count()-1] != SK_Scalar1) {
+ maker.setErrorCode(SkDisplayXMLParserError::kGradientOffsetsMustEndWithOne);
+ return;
+ }
+ for (int i = 1; i < offsets.count(); i++) {
+ if (offsets[i] <= offsets[i-1]) {
+ maker.setErrorCode(SkDisplayXMLParserError::kGradientOffsetsMustIncrease);
+ return;
+ }
+ if (offsets[i] > SK_Scalar1) {
+ maker.setErrorCode(SkDisplayXMLParserError::kGradientOffsetsMustBeNoMoreThanOne);
+ return;
+ }
+ }
+ }
+ INHERITED::onEndElement(maker);
+}
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawLinearGradient::fInfo[] = {
+ SK_MEMBER_INHERITED,
+ SK_MEMBER_ARRAY(points, Float),
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawLinearGradient);
+
+SkDrawLinearGradient::SkDrawLinearGradient() {
+}
+
+void SkDrawLinearGradient::onEndElement(SkAnimateMaker& maker)
+{
+ if (points.count() != 4)
+ maker.setErrorCode(SkDisplayXMLParserError::kGradientPointsLengthMustBeFour);
+ INHERITED::onEndElement(maker);
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkDrawLinearGradient::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ dumpRest(maker);
+ }
+#endif
+
+SkShader* SkDrawLinearGradient::getShader() {
+ if (addPrelude() == 0 || points.count() != 4) {
+ return nullptr;
+ }
+ return SkGradientShader::MakeLinear((SkPoint*)points.begin(),
+ fColors.begin(), offsets.begin(), fColors.count(), (SkShader::TileMode) tileMode,
+ 0, getMatrix()).release();
+}
+
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawRadialGradient::fInfo[] = {
+ SK_MEMBER_INHERITED,
+ SK_MEMBER(center, Point),
+ SK_MEMBER(radius, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawRadialGradient);
+
+SkDrawRadialGradient::SkDrawRadialGradient() : radius(0) {
+ center.set(0, 0);
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkDrawRadialGradient::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ dumpRest(maker);
+}
+#endif
+
+SkShader* SkDrawRadialGradient::getShader() {
+ if (addPrelude() == 0) {
+ return nullptr;
+ }
+ return SkGradientShader::MakeRadial(center,
+ radius, fColors.begin(), offsets.begin(), fColors.count(), (SkShader::TileMode) tileMode,
+ 0, getMatrix()).release();
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawGradient.h b/gfx/skia/skia/src/animator/SkDrawGradient.h
new file mode 100644
index 000000000..87df37624
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawGradient.h
@@ -0,0 +1,64 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawGradient_DEFINED
+#define SkDrawGradient_DEFINED
+
+#include "SkDrawColor.h"
+#include "SkDrawShader.h"
+#include "SkIntArray.h"
+
+class SkDrawGradient : public SkDrawShader {
+ DECLARE_PRIVATE_MEMBER_INFO(DrawGradient);
+ SkDrawGradient();
+ virtual ~SkDrawGradient();
+ bool addChild(SkAnimateMaker& , SkDisplayable* child) override;
+#ifdef SK_DUMP_ENABLED
+ virtual void dumpRest(SkAnimateMaker*);
+#endif
+ void onEndElement(SkAnimateMaker& ) override;
+protected:
+ SkTDScalarArray offsets;
+ SkString unitMapper;
+ SkTDColorArray fColors;
+ SkTDDrawColorArray fDrawColors;
+ int addPrelude();
+private:
+ typedef SkDrawShader INHERITED;
+};
+
+class SkDrawLinearGradient : public SkDrawGradient {
+ DECLARE_MEMBER_INFO(DrawLinearGradient);
+ SkDrawLinearGradient();
+ void onEndElement(SkAnimateMaker& ) override;
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker*) override;
+#endif
+ SkShader* getShader() override;
+protected:
+ SkTDScalarArray points;
+private:
+ typedef SkDrawGradient INHERITED;
+};
+
+class SkDrawRadialGradient : public SkDrawGradient {
+ DECLARE_MEMBER_INFO(DrawRadialGradient);
+ SkDrawRadialGradient();
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker*) override;
+#endif
+ SkShader* getShader() override;
+protected:
+ SkPoint center;
+ SkScalar radius;
+private:
+ typedef SkDrawGradient INHERITED;
+};
+
+#endif // SkDrawGradient_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawGroup.cpp b/gfx/skia/skia/src/animator/SkDrawGroup.cpp
new file mode 100644
index 000000000..76807bf59
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawGroup.cpp
@@ -0,0 +1,321 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawGroup.h"
+#include "SkAnimateMaker.h"
+#include "SkAnimatorScript.h"
+#include "SkCanvas.h"
+#include "SkDisplayApply.h"
+#include "SkPaint.h"
+#ifdef SK_DEBUG
+#include "SkDisplayList.h"
+#endif
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkGroup::fInfo[] = {
+ SK_MEMBER(condition, String),
+ SK_MEMBER(enableCondition, String)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkGroup);
+
+SkGroup::SkGroup() : fParentList(nullptr), fOriginal(nullptr) {
+}
+
+SkGroup::~SkGroup() {
+ if (fOriginal) // has been copied
+ return;
+ int index = 0;
+ int max = fCopies.count() << 5;
+ for (SkADrawable** ptr = fChildren.begin(); ptr < fChildren.end(); ptr++) {
+ if (index >= max || markedForDelete(index))
+ delete *ptr;
+// else {
+// SkApply* apply = (SkApply*) *ptr;
+// SkASSERT(apply->isApply());
+// SkASSERT(apply->getScope());
+// delete apply->getScope();
+// }
+ index++;
+ }
+}
+
+bool SkGroup::addChild(SkAnimateMaker& , SkDisplayable* child) {
+ SkASSERT(child);
+// SkASSERT(child->isDrawable());
+ *fChildren.append() = (SkADrawable*) child;
+ if (child->isGroup()) {
+ SkGroup* groupie = (SkGroup*) child;
+ SkASSERT(groupie->fParentList == nullptr);
+ groupie->fParentList = &fChildren;
+ }
+ return true;
+}
+
+bool SkGroup::contains(SkDisplayable* match) {
+ for (SkADrawable** ptr = fChildren.begin(); ptr < fChildren.end(); ptr++) {
+ SkADrawable* drawable = *ptr;
+ if (drawable == match || drawable->contains(match))
+ return true;
+ }
+ return false;
+}
+
+SkGroup* SkGroup::copy() {
+ SkGroup* result = new SkGroup();
+ result->fOriginal = this;
+ result->fChildren = fChildren;
+ return result;
+}
+
+SkBool SkGroup::copySet(int index) {
+ return (fCopies[index >> 5] & 1 << (index & 0x1f)) != 0;
+}
+
+SkDisplayable* SkGroup::deepCopy(SkAnimateMaker* maker) {
+ SkDisplayable* copy = INHERITED::deepCopy(maker);
+ for (SkADrawable** ptr = fChildren.begin(); ptr < fChildren.end(); ptr++) {
+ SkDisplayable* displayable = (SkDisplayable*)*ptr;
+ SkDisplayable* deeperCopy = displayable->deepCopy(maker);
+ ((SkGroup*)copy)->addChild(*maker, deeperCopy);
+ }
+ return copy;
+}
+
+bool SkGroup::doEvent(SkDisplayEvent::Kind kind, SkEventState* state) {
+ bool handled = false;
+ for (SkADrawable** ptr = fChildren.begin(); ptr < fChildren.end(); ptr++) {
+ SkADrawable* drawable = *ptr;
+ if (drawable->isDrawable() == false)
+ continue;
+ handled |= drawable->doEvent(kind, state);
+ }
+ return handled;
+}
+
+bool SkGroup::draw(SkAnimateMaker& maker) {
+ bool conditionTrue = ifCondition(maker, this, condition);
+ bool result = false;
+ for (SkADrawable** ptr = fChildren.begin(); ptr < fChildren.end(); ptr++) {
+ SkADrawable* drawable = *ptr;
+ if (drawable->isDrawable() == false)
+ continue;
+ if (conditionTrue == false) {
+ if (drawable->isApply())
+ ((SkApply*) drawable)->disable();
+ continue;
+ }
+ maker.validate();
+ result |= drawable->draw(maker);
+ maker.validate();
+ }
+ return result;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkGroup::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ if (condition.size() > 0)
+ SkDebugf("condition=\"%s\" ", condition.c_str());
+ if (enableCondition.size() > 0)
+ SkDebugf("enableCondition=\"%s\" ", enableCondition.c_str());
+ dumpDrawables(maker);
+}
+
+void SkGroup::dumpDrawables(SkAnimateMaker* maker) {
+ SkDisplayList::fIndent += 4;
+ int save = SkDisplayList::fDumpIndex;
+ SkDisplayList::fDumpIndex = 0;
+ bool closedYet = false;
+ for (SkADrawable** ptr = fChildren.begin(); ptr < fChildren.end(); ptr++) {
+ if (closedYet == false) {
+ closedYet = true;
+ SkDebugf(">\n");
+ }
+ SkADrawable* drawable = *ptr;
+ drawable->dump(maker);
+ SkDisplayList::fDumpIndex++;
+ }
+ SkDisplayList::fIndent -= 4;
+ SkDisplayList::fDumpIndex = save;
+ if (closedYet) //we had children, now it's time to close the group
+ dumpEnd(maker);
+ else //no children
+ SkDebugf("/>\n");
+}
+
+void SkGroup::dumpEvents() {
+ for (SkADrawable** ptr = fChildren.begin(); ptr < fChildren.end(); ptr++) {
+ SkADrawable* drawable = *ptr;
+ drawable->dumpEvents();
+ }
+}
+#endif
+
+bool SkGroup::enable(SkAnimateMaker& maker ) {
+ reset();
+ for (SkADrawable** ptr = fChildren.begin(); ptr < fChildren.end(); ptr++) {
+ SkADrawable* drawable = *ptr;
+ if (ifCondition(maker, drawable, enableCondition) == false)
+ continue;
+ drawable->enable(maker);
+ }
+ return true; // skip add; already added so that scope is findable by children
+}
+
+int SkGroup::findGroup(SkADrawable* match, SkTDDrawableArray** list,
+ SkGroup** parent, SkGroup** found, SkTDDrawableArray** grandList) {
+ *list = &fChildren;
+ for (SkADrawable** ptr = fChildren.begin(); ptr < fChildren.end(); ptr++) {
+ SkADrawable* drawable = *ptr;
+ if (drawable->isGroup()) {
+ SkGroup* childGroup = (SkGroup*) drawable;
+ if (childGroup->fOriginal == match)
+ goto foundMatch;
+ }
+ if (drawable == match) {
+foundMatch:
+ *parent = this;
+ return (int) (ptr - fChildren.begin());
+ }
+ }
+ *grandList = &fChildren;
+ return SkDisplayList::SearchForMatch(match, list, parent, found, grandList);
+}
+
+bool SkGroup::hasEnable() const {
+ return true;
+}
+
+bool SkGroup::ifCondition(SkAnimateMaker& maker, SkADrawable*,
+ SkString& conditionString) {
+ if (conditionString.size() == 0)
+ return true;
+ int32_t result;
+ bool success = SkAnimatorScript::EvaluateInt(maker, this, conditionString.c_str(), &result);
+#ifdef SK_DUMP_ENABLED
+ if (maker.fDumpGConditions) {
+ SkDebugf("group: ");
+ dumpBase(&maker);
+ SkDebugf("condition=%s ", conditionString.c_str());
+ if (success == false)
+ SkDebugf("(script failed)\n");
+ else
+ SkDebugf("success=%s\n", result != 0 ? "true" : "false");
+ }
+#endif
+ return success && result != 0;
+}
+
+void SkGroup::initialize() {
+ for (SkADrawable** ptr = fChildren.begin(); ptr < fChildren.end(); ptr++) {
+ SkADrawable* drawable = *ptr;
+ if (drawable->isDrawable() == false)
+ continue;
+ drawable->initialize();
+ }
+}
+
+void SkGroup::markCopyClear(int index) {
+ if (index < 0)
+ index = fChildren.count();
+ fCopies[index >> 5] &= ~(1 << (index & 0x1f));
+}
+
+void SkGroup::markCopySet(int index) {
+ if (index < 0)
+ index = fChildren.count();
+ fCopies[index >> 5] |= 1 << (index & 0x1f);
+}
+
+void SkGroup::markCopySize(int index) {
+ if (index < 0)
+ index = fChildren.count() + 1;
+ int oldLongs = fCopies.count();
+ int newLongs = (index >> 5) + 1;
+ if (oldLongs < newLongs) {
+ fCopies.setCount(newLongs);
+ memset(&fCopies[oldLongs], 0, (newLongs - oldLongs) << 2);
+ }
+}
+
+void SkGroup::reset() {
+ if (fOriginal) // has been copied
+ return;
+ int index = 0;
+ int max = fCopies.count() << 5;
+ for (SkADrawable** ptr = fChildren.begin(); ptr < fChildren.end(); ptr++) {
+ if (index >= max || copySet(index) == false)
+ continue;
+ SkApply* apply = (SkApply*) *ptr;
+ SkASSERT(apply->isApply());
+ SkASSERT(apply->getScope());
+ *ptr = apply->getScope();
+ markCopyClear(index);
+ index++;
+ }
+}
+
+bool SkGroup::resolveIDs(SkAnimateMaker& maker, SkDisplayable* orig, SkApply* apply) {
+ SkGroup* original = (SkGroup*) orig;
+ SkTDDrawableArray& originalChildren = original->fChildren;
+ SkADrawable** originalPtr = originalChildren.begin();
+ SkADrawable** ptr = fChildren.begin();
+ SkADrawable** end = fChildren.end();
+ SkADrawable** origChild = ((SkGroup*) orig)->fChildren.begin();
+ while (ptr < end) {
+ SkADrawable* drawable = *ptr++;
+ maker.resolveID(drawable, *origChild++);
+ if (drawable->resolveIDs(maker, *originalPtr++, apply) == true)
+ return true; // failed
+ }
+ return false;
+}
+
+void SkGroup::setSteps(int steps) {
+ for (SkADrawable** ptr = fChildren.begin(); ptr < fChildren.end(); ptr++) {
+ SkADrawable* drawable = *ptr;
+ if (drawable->isDrawable() == false)
+ continue;
+ drawable->setSteps(steps);
+ }
+}
+
+#ifdef SK_DEBUG
+void SkGroup::validate() {
+ for (SkADrawable** ptr = fChildren.begin(); ptr < fChildren.end(); ptr++) {
+ SkADrawable* drawable = *ptr;
+ drawable->validate();
+ }
+}
+#endif
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkSave::fInfo[] = {
+ SK_MEMBER_INHERITED
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkSave);
+
+bool SkSave::draw(SkAnimateMaker& maker) {
+ maker.fCanvas->save();
+ SkPaint* save = maker.fPaint;
+ SkPaint local = SkPaint(*maker.fPaint);
+ maker.fPaint = &local;
+ bool result = INHERITED::draw(maker);
+ maker.fPaint = save;
+ maker.fCanvas->restore();
+ return result;
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawGroup.h b/gfx/skia/skia/src/animator/SkDrawGroup.h
new file mode 100644
index 000000000..f1b7bb6e2
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawGroup.h
@@ -0,0 +1,72 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawGroup_DEFINED
+#define SkDrawGroup_DEFINED
+
+#include "SkADrawable.h"
+#include "SkIntArray.h"
+#include "SkMemberInfo.h"
+
+class SkGroup : public SkADrawable { //interface for schema element <g>
+public:
+ DECLARE_MEMBER_INFO(Group);
+ SkGroup();
+ virtual ~SkGroup();
+ bool addChild(SkAnimateMaker& , SkDisplayable* child) override;
+ bool contains(SkDisplayable* ) override;
+ SkGroup* copy();
+ SkBool copySet(int index);
+ SkDisplayable* deepCopy(SkAnimateMaker* ) override;
+ bool doEvent(SkDisplayEvent::Kind , SkEventState* state ) override;
+ bool draw(SkAnimateMaker& ) override;
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+ virtual void dumpDrawables(SkAnimateMaker* );
+ void dumpEvents() override;
+#endif
+ int findGroup(SkADrawable* drawable, SkTDDrawableArray** list,
+ SkGroup** parent, SkGroup** found, SkTDDrawableArray** grandList);
+ bool enable(SkAnimateMaker& ) override;
+ SkTDDrawableArray* getChildren() { return &fChildren; }
+ SkGroup* getOriginal() { return fOriginal; }
+ bool hasEnable() const override;
+ void initialize() override;
+ SkBool isACopy() { return fOriginal != nullptr; }
+ void markCopyClear(int index);
+ void markCopySet(int index);
+ void markCopySize(int index);
+ bool markedForDelete(int index) const { return (fCopies[index >> 5] & 1 << (index & 0x1f)) == 0; }
+ void reset();
+ bool resolveIDs(SkAnimateMaker& maker, SkDisplayable* original, SkApply* ) override;
+ void setSteps(int steps) override;
+#ifdef SK_DEBUG
+ void validate() override;
+#endif
+protected:
+ bool ifCondition(SkAnimateMaker& maker, SkADrawable* drawable,
+ SkString& conditionString);
+ SkString condition;
+ SkString enableCondition;
+ SkTDDrawableArray fChildren;
+ SkTDDrawableArray* fParentList;
+ SkTDIntArray fCopies;
+ SkGroup* fOriginal;
+private:
+ typedef SkADrawable INHERITED;
+};
+
+class SkSave: public SkGroup {
+ DECLARE_MEMBER_INFO(Save);
+ bool draw(SkAnimateMaker& ) override;
+private:
+ typedef SkGroup INHERITED;
+};
+
+#endif // SkDrawGroup_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawLine.cpp b/gfx/skia/skia/src/animator/SkDrawLine.cpp
new file mode 100644
index 000000000..d0ae7d929
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawLine.cpp
@@ -0,0 +1,35 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawLine.h"
+#include "SkAnimateMaker.h"
+#include "SkCanvas.h"
+#include "SkPaint.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkLine::fInfo[] = {
+ SK_MEMBER(x1, Float),
+ SK_MEMBER(x2, Float),
+ SK_MEMBER(y1, Float),
+ SK_MEMBER(y2, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkLine);
+
+SkLine::SkLine() : x1(0), x2(0), y1(0), y2(0) {
+}
+
+bool SkLine::draw(SkAnimateMaker& maker) {
+ SkBoundableAuto boundable(this, maker);
+ maker.fCanvas->drawLine(x1, y1, x2, y2, *maker.fPaint);
+ return false;
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawLine.h b/gfx/skia/skia/src/animator/SkDrawLine.h
new file mode 100644
index 000000000..996964a89
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawLine.h
@@ -0,0 +1,28 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawLine_DEFINED
+#define SkDrawLine_DEFINED
+
+#include "SkBoundable.h"
+#include "SkMemberInfo.h"
+
+class SkLine : public SkBoundable {
+ DECLARE_MEMBER_INFO(Line);
+ SkLine();
+ bool draw(SkAnimateMaker& ) override;
+private:
+ SkScalar x1;
+ SkScalar x2;
+ SkScalar y1;
+ SkScalar y2;
+ typedef SkBoundable INHERITED;
+};
+
+#endif // SkDrawLine_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawMatrix.cpp b/gfx/skia/skia/src/animator/SkDrawMatrix.cpp
new file mode 100644
index 000000000..890a3296a
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawMatrix.cpp
@@ -0,0 +1,268 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawMatrix.h"
+#include "SkAnimateMaker.h"
+#include "SkCanvas.h"
+#include "SkPaint.h"
+#include "SkParse.h"
+#include "SkMatrixParts.h"
+#include "SkScript.h"
+#include "SkTypedArray.h"
+
+enum SkDrawMatrix_Properties {
+ SK_PROPERTY(perspectX),
+ SK_PROPERTY(perspectY),
+ SK_PROPERTY(rotate),
+ SK_PROPERTY(scale),
+ SK_PROPERTY(scaleX),
+ SK_PROPERTY(scaleY),
+ SK_PROPERTY(skewX),
+ SK_PROPERTY(skewY),
+ SK_PROPERTY(translate),
+ SK_PROPERTY(translateX),
+ SK_PROPERTY(translateY)
+};
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawMatrix::fInfo[] = {
+ SK_MEMBER_ARRAY(matrix, Float),
+ SK_MEMBER_PROPERTY(perspectX, Float),
+ SK_MEMBER_PROPERTY(perspectY, Float),
+ SK_MEMBER_PROPERTY(rotate, Float),
+ SK_MEMBER_PROPERTY(scale, Float),
+ SK_MEMBER_PROPERTY(scaleX, Float),
+ SK_MEMBER_PROPERTY(scaleY, Float),
+ SK_MEMBER_PROPERTY(skewX, Float),
+ SK_MEMBER_PROPERTY(skewY, Float),
+ SK_MEMBER_PROPERTY(translate, Point),
+ SK_MEMBER_PROPERTY(translateX, Float),
+ SK_MEMBER_PROPERTY(translateY, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawMatrix);
+
+SkDrawMatrix::SkDrawMatrix() : fChildHasID(false), fDirty(false) {
+ fConcat.reset();
+ fMatrix.reset();
+}
+
+SkDrawMatrix::~SkDrawMatrix() {
+ for (SkMatrixPart** part = fParts.begin(); part < fParts.end(); part++)
+ delete *part;
+}
+
+bool SkDrawMatrix::addChild(SkAnimateMaker& maker, SkDisplayable* child) {
+ SkASSERT(child && child->isMatrixPart());
+ SkMatrixPart* part = (SkMatrixPart*) child;
+ *fParts.append() = part;
+ if (part->add())
+ maker.setErrorCode(SkDisplayXMLParserError::kErrorAddingToMatrix);
+ return true;
+}
+
+bool SkDrawMatrix::childrenNeedDisposing() const {
+ return false;
+}
+
+SkDisplayable* SkDrawMatrix::deepCopy(SkAnimateMaker* maker) {
+ SkDrawMatrix* copy = (SkDrawMatrix*)
+ SkDisplayType::CreateInstance(maker, SkType_Matrix);
+ SkASSERT(fParts.count() == 0);
+ copy->fMatrix = fMatrix;
+ copy->fConcat = fConcat;
+ return copy;
+}
+
+void SkDrawMatrix::dirty() {
+ fDirty = true;
+}
+
+bool SkDrawMatrix::draw(SkAnimateMaker& maker) {
+ SkMatrix& concat = getMatrix();
+ maker.fCanvas->concat(concat);
+ return false;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkDrawMatrix::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ if (fMatrix.isIdentity()) {
+ SkDebugf("matrix=\"identity\"/>\n");
+ return;
+ }
+ SkScalar result;
+ result = fMatrix[SkMatrix::kMScaleX];
+ if (result != SK_Scalar1)
+ SkDebugf("sx=\"%g\" ", SkScalarToFloat(result));
+ result = fMatrix.getScaleY();
+ if (result != SK_Scalar1)
+ SkDebugf("sy=\"%g\" ", SkScalarToFloat(result));
+ result = fMatrix.getSkewX();
+ if (result)
+ SkDebugf("skew-x=\"%g\" ", SkScalarToFloat(result));
+ result = fMatrix.getSkewY();
+ if (result)
+ SkDebugf("skew-y=\"%g\" ", SkScalarToFloat(result));
+ result = fMatrix.getTranslateX();
+ if (result)
+ SkDebugf("tx=\"%g\" ", SkScalarToFloat(result));
+ result = fMatrix.getTranslateY();
+ if (result)
+ SkDebugf("ty=\"%g\" ", SkScalarToFloat(result));
+ result = fMatrix.getPerspX();
+ if (result)
+ SkDebugf("perspect-x=\"%g\" ", SkScalarToFloat(result));
+ result = fMatrix.getPerspY();
+ if (result)
+ SkDebugf("perspect-y=\"%g\" ", SkScalarToFloat(result));
+ SkDebugf("/>\n");
+}
+#endif
+
+SkMatrix& SkDrawMatrix::getMatrix() {
+ if (fDirty == false)
+ return fConcat;
+ fMatrix.reset();
+ for (SkMatrixPart** part = fParts.begin(); part < fParts.end(); part++) {
+ (*part)->add();
+ fConcat = fMatrix;
+ }
+ fDirty = false;
+ return fConcat;
+}
+
+bool SkDrawMatrix::getProperty(int index, SkScriptValue* value) const {
+ value->fType = SkType_Float;
+ SkScalar result;
+ switch (index) {
+ case SK_PROPERTY(perspectX):
+ result = fMatrix.getPerspX();
+ break;
+ case SK_PROPERTY(perspectY):
+ result = fMatrix.getPerspY();
+ break;
+ case SK_PROPERTY(scaleX):
+ result = fMatrix.getScaleX();
+ break;
+ case SK_PROPERTY(scaleY):
+ result = fMatrix.getScaleY();
+ break;
+ case SK_PROPERTY(skewX):
+ result = fMatrix.getSkewX();
+ break;
+ case SK_PROPERTY(skewY):
+ result = fMatrix.getSkewY();
+ break;
+ case SK_PROPERTY(translateX):
+ result = fMatrix.getTranslateX();
+ break;
+ case SK_PROPERTY(translateY):
+ result = fMatrix.getTranslateY();
+ break;
+ default:
+// SkASSERT(0);
+ return false;
+ }
+ value->fOperand.fScalar = result;
+ return true;
+}
+
+void SkDrawMatrix::initialize() {
+ fConcat = fMatrix;
+}
+
+void SkDrawMatrix::onEndElement(SkAnimateMaker& ) {
+ if (matrix.count() > 0) {
+ SkScalar* vals = matrix.begin();
+ fMatrix.setScaleX(vals[0]);
+ fMatrix.setSkewX(vals[1]);
+ fMatrix.setTranslateX(vals[2]);
+ fMatrix.setSkewY(vals[3]);
+ fMatrix.setScaleY(vals[4]);
+ fMatrix.setTranslateY(vals[5]);
+ fMatrix.setPerspX(vals[6]);
+ fMatrix.setPerspY(vals[7]);
+// fMatrix.setPerspW(vals[8]);
+ goto setConcat;
+ }
+ if (fChildHasID == false) {
+ {
+ for (SkMatrixPart** part = fParts.begin(); part < fParts.end(); part++)
+ delete *part;
+ }
+ fParts.reset();
+setConcat:
+ fConcat = fMatrix;
+ fDirty = false;
+ }
+}
+
+void SkDrawMatrix::setChildHasID() {
+ fChildHasID = true;
+}
+
+bool SkDrawMatrix::setProperty(int index, SkScriptValue& scriptValue) {
+ SkScalar number = scriptValue.fOperand.fScalar;
+ switch (index) {
+ case SK_PROPERTY(translate):
+ // SkScalar xy[2];
+ SkASSERT(scriptValue.fType == SkType_Array);
+ SkASSERT(scriptValue.fOperand.fArray->getType() == SkType_Float);
+ SkASSERT(scriptValue.fOperand.fArray->count() == 2);
+ // SkParse::FindScalars(scriptValue.fOperand.fString->c_str(), xy, 2);
+ fMatrix.setTranslateX((*scriptValue.fOperand.fArray)[0].fScalar);
+ fMatrix.setTranslateY((*scriptValue.fOperand.fArray)[1].fScalar);
+ return true;
+ case SK_PROPERTY(perspectX):
+ fMatrix.setPerspX(number);
+ break;
+ case SK_PROPERTY(perspectY):
+ fMatrix.setPerspY(number);
+ break;
+ case SK_PROPERTY(rotate): {
+ SkMatrix temp;
+ temp.setRotate(number, 0, 0);
+ fMatrix.setScaleX(temp.getScaleX());
+ fMatrix.setScaleY(temp.getScaleY());
+ fMatrix.setSkewX(temp.getSkewX());
+ fMatrix.setSkewY(temp.getSkewY());
+ } break;
+ case SK_PROPERTY(scale):
+ fMatrix.setScaleX(number);
+ fMatrix.setScaleY(number);
+ break;
+ case SK_PROPERTY(scaleX):
+ fMatrix.setScaleX(number);
+ break;
+ case SK_PROPERTY(scaleY):
+ fMatrix.setScaleY(number);
+ break;
+ case SK_PROPERTY(skewX):
+ fMatrix.setSkewX(number);
+ break;
+ case SK_PROPERTY(skewY):
+ fMatrix.setSkewY(number);
+ break;
+ case SK_PROPERTY(translateX):
+ fMatrix.setTranslateX(number);
+ break;
+ case SK_PROPERTY(translateY):
+ fMatrix.setTranslateY(number);
+ break;
+ default:
+ SkASSERT(0);
+ return false;
+ }
+ fConcat = fMatrix;
+ return true;
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawMatrix.h b/gfx/skia/skia/src/animator/SkDrawMatrix.h
new file mode 100644
index 000000000..df17a9b75
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawMatrix.h
@@ -0,0 +1,74 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawMatrix_DEFINED
+#define SkDrawMatrix_DEFINED
+
+#include "SkADrawable.h"
+#include "SkMatrix.h"
+#include "SkMemberInfo.h"
+#include "SkIntArray.h"
+
+class SkMatrixPart;
+
+class SkDrawMatrix : public SkADrawable {
+ DECLARE_DRAW_MEMBER_INFO(Matrix);
+ SkDrawMatrix();
+ virtual ~SkDrawMatrix();
+ bool addChild(SkAnimateMaker& , SkDisplayable* child) override;
+ bool childrenNeedDisposing() const override;
+ void dirty() override;
+ bool draw(SkAnimateMaker& ) override;
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+ SkMatrix& getMatrix();
+ bool getProperty(int index, SkScriptValue* value) const override;
+ void initialize() override;
+ void onEndElement(SkAnimateMaker& ) override;
+ void setChildHasID() override;
+ bool setProperty(int index, SkScriptValue& ) override;
+
+ void concat(SkMatrix& inMatrix) {
+ fConcat.preConcat(inMatrix);
+ }
+
+ SkDisplayable* deepCopy(SkAnimateMaker* ) override;
+
+
+ void rotate(SkScalar degrees, SkPoint& center) {
+ fMatrix.preRotate(degrees, center.fX, center.fY);
+ }
+
+ void set(SkMatrix& src) {
+ fMatrix.preConcat(src);
+ }
+
+ void scale(SkScalar scaleX, SkScalar scaleY, SkPoint& center) {
+ fMatrix.preScale(scaleX, scaleY, center.fX, center.fY);
+ }
+
+ void skew(SkScalar skewX, SkScalar skewY, SkPoint& center) {
+ fMatrix.preSkew(skewX, skewY, center.fX, center.fY);
+ }
+
+ void translate(SkScalar x, SkScalar y) {
+ fMatrix.preTranslate(x, y);
+ }
+private:
+ SkTDScalarArray matrix;
+ SkMatrix fConcat;
+ SkMatrix fMatrix;
+ SkTDMatrixPartArray fParts;
+ SkBool8 fChildHasID;
+ SkBool8 fDirty;
+ typedef SkADrawable INHERITED;
+};
+
+#endif // SkDrawMatrix_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawOval.cpp b/gfx/skia/skia/src/animator/SkDrawOval.cpp
new file mode 100644
index 000000000..e5efa7d53
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawOval.cpp
@@ -0,0 +1,28 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawOval.h"
+#include "SkAnimateMaker.h"
+#include "SkCanvas.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkOval::fInfo[] = {
+ SK_MEMBER_INHERITED,
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkOval);
+
+bool SkOval::draw(SkAnimateMaker& maker) {
+ SkBoundableAuto boundable(this, maker);
+ maker.fCanvas->drawOval(fRect, *maker.fPaint);
+ return false;
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawOval.h b/gfx/skia/skia/src/animator/SkDrawOval.h
new file mode 100644
index 000000000..e4d471222
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawOval.h
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawOval_DEFINED
+#define SkDrawOval_DEFINED
+
+#include "SkDrawRectangle.h"
+
+class SkOval : public SkDrawRect {
+ DECLARE_MEMBER_INFO(Oval);
+ bool draw(SkAnimateMaker& ) override;
+private:
+ typedef SkDrawRect INHERITED;
+};
+
+#endif // SkDrawOval_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawPaint.cpp b/gfx/skia/skia/src/animator/SkDrawPaint.cpp
new file mode 100644
index 000000000..1026630eb
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawPaint.cpp
@@ -0,0 +1,267 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkDrawPaint.h"
+#include "SkAnimateMaker.h"
+#include "SkDrawColor.h"
+#include "SkDrawShader.h"
+#include "SkMaskFilter.h"
+#include "SkPaintPart.h"
+#include "SkPathEffect.h"
+
+enum SkPaint_Functions {
+ SK_FUNCTION(measureText)
+};
+
+enum SkPaint_Properties {
+ SK_PROPERTY(ascent),
+ SK_PROPERTY(descent)
+};
+
+// !!! in the future, this could be compiled by build-condensed-info into an array of parameters
+// with a lookup table to find the first parameter -- for now, it is iteratively searched through
+const SkFunctionParamType SkDrawPaint::fFunctionParameters[] = {
+ (SkFunctionParamType) SkType_String,
+ (SkFunctionParamType) 0 // terminator for parameter list (there may be multiple parameter lists)
+};
+
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawPaint::fInfo[] = {
+ SK_MEMBER(antiAlias, Boolean),
+ SK_MEMBER_PROPERTY(ascent, Float),
+ SK_MEMBER(color, Color),
+ SK_MEMBER_PROPERTY(descent, Float),
+ SK_MEMBER(fakeBold, Boolean),
+ SK_MEMBER(filterBitmap, Boolean),
+ SK_MEMBER(linearText, Boolean),
+ SK_MEMBER(maskFilter, MaskFilter),
+ SK_MEMBER_FUNCTION(measureText, Float),
+ SK_MEMBER(pathEffect, PathEffect),
+ SK_MEMBER(shader, Shader),
+ SK_MEMBER(strikeThru, Boolean),
+ SK_MEMBER(stroke, Boolean),
+ SK_MEMBER(strokeCap, Cap),
+ SK_MEMBER(strokeJoin, Join),
+ SK_MEMBER(strokeMiter, Float),
+ SK_MEMBER(strokeWidth, Float),
+ SK_MEMBER(style, Style),
+ SK_MEMBER(textAlign, Align),
+ SK_MEMBER(textScaleX, Float),
+ SK_MEMBER(textSize, Float),
+ SK_MEMBER(textSkewX, Float),
+ SK_MEMBER(typeface, Typeface),
+ SK_MEMBER(underline, Boolean),
+ SK_MEMBER(xfermode, Xfermode)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawPaint);
+
+SkDrawPaint::SkDrawPaint() : antiAlias(-1), color(nullptr), fakeBold(-1), filterBitmap(-1),
+ linearText(-1), maskFilter((SkDrawMaskFilter*) -1), pathEffect((SkDrawPathEffect*) -1),
+ shader((SkDrawShader*) -1), strikeThru(-1), stroke(-1),
+ strokeCap((SkPaint::Cap) -1), strokeJoin((SkPaint::Join) -1), strokeMiter(SK_ScalarNaN),
+ strokeWidth(SK_ScalarNaN), style((SkPaint::Style) -1),
+ textAlign((SkPaint::Align) -1), textScaleX(SK_ScalarNaN), textSize(SK_ScalarNaN),
+ textSkewX(SK_ScalarNaN), typeface((SkDrawTypeface*) -1),
+ underline(-1), xfermode((SkXfermode::Mode) -1), fOwnsColor(false), fOwnsMaskFilter(false),
+ fOwnsPathEffect(false), fOwnsShader(false), fOwnsTypeface(false) {
+}
+
+SkDrawPaint::~SkDrawPaint() {
+ if (fOwnsColor)
+ delete color;
+ if (fOwnsMaskFilter)
+ delete maskFilter;
+ if (fOwnsPathEffect)
+ delete pathEffect;
+ if (fOwnsShader)
+ delete shader;
+ if (fOwnsTypeface)
+ delete typeface;
+}
+
+bool SkDrawPaint::add(SkAnimateMaker* maker, SkDisplayable* child) {
+ SkASSERT(child && child->isPaintPart());
+ SkPaintPart* part = (SkPaintPart*) child;
+ if (part->add() && maker)
+ maker->setErrorCode(SkDisplayXMLParserError::kErrorAddingToPaint);
+ return true;
+}
+
+SkDisplayable* SkDrawPaint::deepCopy(SkAnimateMaker* maker) {
+ SkDrawColor* tempColor = color;
+ color = nullptr;
+ SkDrawPaint* copy = (SkDrawPaint*) INHERITED::deepCopy(maker);
+ color = tempColor;
+ tempColor = (SkDrawColor*) color->deepCopy(maker);
+ tempColor->setParent(copy);
+ tempColor->add();
+ copy->fOwnsColor = true;
+ return copy;
+}
+
+bool SkDrawPaint::draw(SkAnimateMaker& maker) {
+ SkPaint* paint = maker.fPaint;
+ setupPaint(paint);
+ return false;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkDrawPaint::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ dumpAttrs(maker);
+ bool closedYet = false;
+ SkDisplayList::fIndent +=4;
+ //should i say if (maskFilter && ...?
+ if (maskFilter != (SkDrawMaskFilter*)-1) {
+ SkDebugf(">\n");
+ maskFilter->dump(maker);
+ closedYet = true;
+ }
+ if (pathEffect != (SkDrawPathEffect*) -1) {
+ if (closedYet == false) {
+ SkDebugf(">\n");
+ closedYet = true;
+ }
+ pathEffect->dump(maker);
+ }
+ if (fOwnsTypeface) {
+ if (closedYet == false) {
+ SkDebugf(">\n");
+ closedYet = true;
+ }
+ typeface->dump(maker);
+ }
+ SkDisplayList::fIndent -= 4;
+ dumpChildren(maker, closedYet);
+}
+#endif
+
+void SkDrawPaint::executeFunction(SkDisplayable* target, int index,
+ SkTDArray<SkScriptValue>& parameters, SkDisplayTypes type,
+ SkScriptValue* scriptValue) {
+ if (scriptValue == nullptr)
+ return;
+ SkASSERT(target == this);
+ switch (index) {
+ case SK_FUNCTION(measureText): {
+ SkASSERT(parameters.count() == 1);
+ SkASSERT(type == SkType_Float);
+ SkPaint paint;
+ setupPaint(&paint);
+ scriptValue->fType = SkType_Float;
+ SkASSERT(parameters[0].fType == SkType_String);
+ scriptValue->fOperand.fScalar = paint.measureText(parameters[0].fOperand.fString->c_str(),
+ parameters[0].fOperand.fString->size());
+// SkDebugf("measureText: %s = %g\n", parameters[0].fOperand.fString->c_str(),
+// scriptValue->fOperand.fScalar / 65536.0f);
+ } break;
+ default:
+ SkASSERT(0);
+ }
+}
+
+const SkFunctionParamType* SkDrawPaint::getFunctionsParameters() {
+ return fFunctionParameters;
+}
+
+bool SkDrawPaint::getProperty(int index, SkScriptValue* value) const {
+ SkPaint::FontMetrics metrics;
+ SkPaint paint;
+ setupPaint(&paint);
+ paint.getFontMetrics(&metrics);
+ switch (index) {
+ case SK_PROPERTY(ascent):
+ value->fOperand.fScalar = metrics.fAscent;
+ break;
+ case SK_PROPERTY(descent):
+ value->fOperand.fScalar = metrics.fDescent;
+ break;
+ // should consider returning fLeading as well (or roll it into ascent/descent somehow
+ default:
+ SkASSERT(0);
+ return false;
+ }
+ value->fType = SkType_Float;
+ return true;
+}
+
+bool SkDrawPaint::resolveIDs(SkAnimateMaker& maker, SkDisplayable* origDisp, SkApply* ) {
+ SkASSERT(origDisp->isPaint());
+ SkDrawPaint* original = (SkDrawPaint*) origDisp;
+ if (fOwnsColor && maker.resolveID(color, original->color) == false)
+ return true;
+ if (fOwnsMaskFilter && maker.resolveID(maskFilter, original->maskFilter) == false)
+ return true;
+ if (fOwnsPathEffect && maker.resolveID(pathEffect, original->pathEffect) == false)
+ return true;
+ if (fOwnsShader && maker.resolveID(shader, original->shader) == false)
+ return true;
+ if (fOwnsTypeface && maker.resolveID(typeface, original->typeface) == false)
+ return true;
+ return false; // succeeded
+}
+
+void SkDrawPaint::setupPaint(SkPaint* paint) const {
+ if (antiAlias != -1)
+ paint->setAntiAlias(SkToBool(antiAlias));
+ if (color != nullptr)
+ paint->setColor(color->getColor());
+ if (fakeBold != -1)
+ paint->setFakeBoldText(SkToBool(fakeBold));
+ if (filterBitmap != -1)
+ paint->setFilterQuality(filterBitmap ? kLow_SkFilterQuality : kNone_SkFilterQuality);
+ // stroke is legacy; style setting if present overrides stroke
+ if (stroke != -1)
+ paint->setStyle(SkToBool(stroke) ? SkPaint::kStroke_Style : SkPaint::kFill_Style);
+ if (style != -1)
+ paint->setStyle((SkPaint::Style) style);
+ if (linearText != -1)
+ paint->setLinearText(SkToBool(linearText));
+ if (maskFilter == nullptr)
+ paint->setMaskFilter(nullptr);
+ else if (maskFilter != (SkDrawMaskFilter*) -1)
+ paint->setMaskFilter(sk_sp<SkMaskFilter>(maskFilter->getMaskFilter()));
+ if (pathEffect == nullptr)
+ paint->setPathEffect(nullptr);
+ else if (pathEffect != (SkDrawPathEffect*) -1)
+ paint->setPathEffect(sk_ref_sp(pathEffect->getPathEffect()));
+ if (shader == nullptr)
+ paint->setShader(nullptr);
+ else if (shader != (SkDrawShader*) -1)
+ paint->setShader(sk_ref_sp(shader->getShader()));
+ if (strikeThru != -1)
+ paint->setStrikeThruText(SkToBool(strikeThru));
+ if (strokeCap != -1)
+ paint->setStrokeCap((SkPaint::Cap) strokeCap);
+ if (strokeJoin != -1)
+ paint->setStrokeJoin((SkPaint::Join) strokeJoin);
+ if (SkScalarIsNaN(strokeMiter) == false)
+ paint->setStrokeMiter(strokeMiter);
+ if (SkScalarIsNaN(strokeWidth) == false)
+ paint->setStrokeWidth(strokeWidth);
+ if (textAlign != -1)
+ paint->setTextAlign((SkPaint::Align) textAlign);
+ if (SkScalarIsNaN(textScaleX) == false)
+ paint->setTextScaleX(textScaleX);
+ if (SkScalarIsNaN(textSize) == false)
+ paint->setTextSize(textSize);
+ if (SkScalarIsNaN(textSkewX) == false)
+ paint->setTextSkewX(textSkewX);
+ if (typeface == nullptr)
+ paint->setTypeface(nullptr);
+ else if (typeface != (SkDrawTypeface*) -1)
+ paint->setTypeface(typeface->getTypeface());
+ if (underline != -1)
+ paint->setUnderlineText(SkToBool(underline));
+ if (xfermode != -1)
+ paint->setXfermodeMode((SkXfermode::Mode) xfermode);
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawPaint.h b/gfx/skia/skia/src/animator/SkDrawPaint.h
new file mode 100644
index 000000000..bef52527b
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawPaint.h
@@ -0,0 +1,79 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawPaint_DEFINED
+#define SkDrawPaint_DEFINED
+
+#include "SkADrawable.h"
+#include "SkIntArray.h"
+#include "SkMemberInfo.h"
+#include "SkPaint.h"
+#include "SkXfermode.h"
+
+class SkDrawMaskFilter;
+class SkDrawPathEffect;
+class SkDrawShader;
+class SkTransferMode;
+class SkDrawTypeface;
+
+class SkDrawPaint : public SkADrawable {
+ DECLARE_DRAW_MEMBER_INFO(Paint);
+ SkDrawPaint();
+ virtual ~SkDrawPaint();
+ virtual bool add(SkAnimateMaker* , SkDisplayable* child);
+ SkDisplayable* deepCopy(SkAnimateMaker* ) override;
+ bool draw(SkAnimateMaker& ) override;
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+ void executeFunction(SkDisplayable* target, int index,
+ SkTDArray<SkScriptValue>& parameters, SkDisplayTypes type,
+ SkScriptValue* ) override;
+ const SkFunctionParamType* getFunctionsParameters() override;
+ bool getProperty(int index, SkScriptValue* value) const override;
+ bool resolveIDs(SkAnimateMaker& maker, SkDisplayable* original, SkApply* apply) override;
+protected:
+ static const SkFunctionParamType fFunctionParameters[];
+ void setupPaint(SkPaint* paint) const;
+public:
+ SkBool antiAlias;
+ SkDrawColor* color;
+ SkBool fakeBold;
+ SkBool filterBitmap;
+ SkBool linearText;
+ SkDrawMaskFilter* maskFilter;
+ SkDrawPathEffect* pathEffect;
+ SkDrawShader* shader;
+ SkBool strikeThru;
+ SkBool stroke;
+ int /*SkPaint::Cap*/ strokeCap;
+ int /*SkPaint::Join */ strokeJoin;
+ SkScalar strokeMiter;
+ SkScalar strokeWidth;
+ int /* SkPaint::Style */ style;
+ int /* SkPaint::Align */ textAlign;
+ SkScalar textScaleX;
+ SkScalar textSize;
+ SkScalar textSkewX;
+ SkDrawTypeface* typeface;
+ SkBool underline;
+ int /*SkXfermode::Modes*/ xfermode;
+ SkBool8 fOwnsColor;
+ SkBool8 fOwnsMaskFilter;
+ SkBool8 fOwnsPathEffect;
+ SkBool8 fOwnsShader;
+ SkBool8 fOwnsTransferMode;
+ SkBool8 fOwnsTypeface;
+private:
+ typedef SkADrawable INHERITED;
+ friend class SkTextToPath;
+ friend class SkSaveLayer;
+};
+
+#endif // SkDrawPaint_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawPath.cpp b/gfx/skia/skia/src/animator/SkDrawPath.cpp
new file mode 100644
index 000000000..c4c6f5caf
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawPath.cpp
@@ -0,0 +1,220 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawPath.h"
+#include "SkAnimateMaker.h"
+#include "SkCanvas.h"
+#include "SkMath.h"
+#include "SkMatrixParts.h"
+#include "SkPaint.h"
+#include "SkPathParts.h"
+
+enum SkPath_Properties {
+ SK_PROPERTY(fillType),
+ SK_PROPERTY(length)
+};
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawPath::fInfo[] = {
+ SK_MEMBER(d, String),
+ SK_MEMBER_PROPERTY(fillType, FillType),
+ SK_MEMBER_PROPERTY(length, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawPath);
+
+SkDrawPath::SkDrawPath()
+{
+ fParent = nullptr;
+ fLength = SK_ScalarNaN;
+ fChildHasID = false;
+ fDirty = false;
+}
+
+SkDrawPath::~SkDrawPath() {
+ for (SkPathPart** part = fParts.begin(); part < fParts.end(); part++)
+ delete *part;
+}
+
+bool SkDrawPath::addChild(SkAnimateMaker& maker, SkDisplayable* child) {
+ SkASSERT(child && child->isPathPart());
+ SkPathPart* part = (SkPathPart*) child;
+ *fParts.append() = part;
+ if (part->add())
+ maker.setErrorCode(SkDisplayXMLParserError::kErrorAddingToPath);
+ fDirty = false;
+ return true;
+}
+
+bool SkDrawPath::childrenNeedDisposing() const {
+ return false;
+}
+
+void SkDrawPath::dirty() {
+ fDirty = true;
+ fLength = SK_ScalarNaN;
+ if (fParent)
+ fParent->dirty();
+}
+
+bool SkDrawPath::draw(SkAnimateMaker& maker) {
+ SkPath& path = getPath();
+ SkBoundableAuto boundable(this, maker);
+ maker.fCanvas->drawPath(path, *maker.fPaint);
+ return false;
+}
+
+SkDisplayable* SkDrawPath::getParent() const {
+ return fParent;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkDrawPath::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ dumpAttrs(maker);
+ bool closedYet = false;
+ SkDisplayList::fIndent += 4;
+ for(SkPathPart** part = fParts.begin(); part < fParts.end(); part++) {
+ if (closedYet == false) {
+ SkDebugf(">\n");
+ closedYet = true;
+ }
+ (*part)->dump(maker);
+ }
+ SkDisplayList::fIndent -= 4;
+ if (closedYet)
+ dumpEnd(maker);
+ else
+ SkDebugf("/>\n");
+}
+#endif
+
+SkPath& SkDrawPath::getPath() {
+ if (fDirty == false)
+ return fPath;
+ if (d.size() > 0)
+ {
+ parseSVG();
+ d.reset();
+ }
+ else
+ {
+ fPath.reset();
+ for (SkPathPart** part = fParts.begin(); part < fParts.end(); part++)
+ (*part)->add();
+ }
+ fDirty = false;
+ return fPath;
+}
+
+void SkDrawPath::onEndElement(SkAnimateMaker& ) {
+ if (d.size() > 0) {
+ parseSVG();
+ d.reset();
+ fDirty = false;
+ return;
+ }
+ if (fChildHasID == false) {
+ for (SkPathPart** part = fParts.begin(); part < fParts.end(); part++)
+ delete *part;
+ fParts.reset();
+ fDirty = false;
+ }
+}
+
+bool SkDrawPath::getProperty(int index, SkScriptValue* value) const {
+ switch (index) {
+ case SK_PROPERTY(length):
+ if (SkScalarIsNaN(fLength)) {
+ const SkPath& path = ((SkDrawPath*) this)->getPath();
+ SkPathMeasure pathMeasure(path, false);
+ fLength = pathMeasure.getLength();
+ }
+ value->fType = SkType_Float;
+ value->fOperand.fScalar = fLength;
+ break;
+ case SK_PROPERTY(fillType):
+ value->fType = SkType_FillType;
+ value->fOperand.fS32 = (int) fPath.getFillType();
+ break;
+ default:
+ SkASSERT(0);
+ return false;
+ }
+ return true;
+}
+
+void SkDrawPath::setChildHasID() {
+ fChildHasID = true;
+}
+
+bool SkDrawPath::setParent(SkDisplayable* parent) {
+ fParent = parent;
+ return false;
+}
+
+bool SkDrawPath::setProperty(int index, SkScriptValue& value)
+{
+ switch (index) {
+ case SK_PROPERTY(fillType):
+ SkASSERT(value.fType == SkType_FillType);
+ SkASSERT(value.fOperand.fS32 >= SkPath::kWinding_FillType &&
+ value.fOperand.fS32 <= SkPath::kEvenOdd_FillType);
+ fPath.setFillType((SkPath::FillType) value.fOperand.fS32);
+ break;
+ default:
+ SkASSERT(0);
+ return false;
+ }
+ return true;
+}
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkPolyline::fInfo[] = {
+ SK_MEMBER_ARRAY(points, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkPolyline);
+
+bool SkPolyline::addChild(SkAnimateMaker& , SkDisplayable*) {
+ return false;
+}
+
+void SkPolyline::onEndElement(SkAnimateMaker& maker) {
+ INHERITED::onEndElement(maker);
+ if (points.count() <= 0)
+ return;
+ fPath.reset();
+ fPath.moveTo(points[0], points[1]);
+ int count = points.count();
+ for (int index = 2; index < count; index += 2)
+ fPath.lineTo(points[index], points[index+1]);
+}
+
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkPolygon::fInfo[] = {
+ SK_MEMBER_INHERITED
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkPolygon);
+
+void SkPolygon::onEndElement(SkAnimateMaker& maker) {
+ INHERITED::onEndElement(maker);
+ fPath.close();
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawPath.h b/gfx/skia/skia/src/animator/SkDrawPath.h
new file mode 100644
index 000000000..81978fb7d
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawPath.h
@@ -0,0 +1,69 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawPath_DEFINED
+#define SkDrawPath_DEFINED
+
+#include "SkBoundable.h"
+#include "SkIntArray.h"
+#include "SkMemberInfo.h"
+#include "SkPath.h"
+
+class SkDrawPath : public SkBoundable {
+ DECLARE_DRAW_MEMBER_INFO(Path);
+ SkDrawPath();
+ virtual ~SkDrawPath();
+ bool addChild(SkAnimateMaker& , SkDisplayable* child) override;
+ bool childHasID() { return SkToBool(fChildHasID); }
+ bool childrenNeedDisposing() const override;
+ void dirty() override;
+ bool draw(SkAnimateMaker& ) override;
+ SkDisplayable* getParent() const override;
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+ SkPath& getPath();
+ bool getProperty(int index, SkScriptValue* value) const override;
+ bool setProperty(int index, SkScriptValue& value) override;
+ void onEndElement(SkAnimateMaker& ) override;
+ void setChildHasID() override;
+ bool setParent(SkDisplayable* parent) override;
+ bool isPath() const override { return true; }
+public:
+ SkPath fPath;
+protected:
+ void parseSVG();
+ SkString d;
+ SkTDPathPartArray fParts;
+ mutable SkScalar fLength;
+ SkDisplayable* fParent; // SkPolyToPoly or SkFromPath, for instance
+ SkBool8 fChildHasID;
+ SkBool8 fDirty;
+private:
+ typedef SkBoundable INHERITED;
+};
+
+class SkPolyline : public SkDrawPath {
+ DECLARE_MEMBER_INFO(Polyline);
+ bool addChild(SkAnimateMaker& , SkDisplayable*) override;
+ void onEndElement(SkAnimateMaker& ) override;
+protected:
+ SkTDScalarArray points;
+private:
+ typedef SkDrawPath INHERITED;
+};
+
+class SkPolygon : public SkPolyline {
+ DECLARE_MEMBER_INFO(Polygon);
+ void onEndElement(SkAnimateMaker& ) override;
+private:
+ typedef SkPolyline INHERITED;
+};
+
+#endif // SkDrawPath_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawPoint.cpp b/gfx/skia/skia/src/animator/SkDrawPoint.cpp
new file mode 100644
index 000000000..41a6be4ba
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawPoint.cpp
@@ -0,0 +1,44 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawPoint.h"
+#include "SkAnimateMaker.h"
+#include "SkCanvas.h"
+#include "SkPaint.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo Sk_Point::fInfo[] = {
+ SK_MEMBER_ALIAS(x, fPoint.fX, Float),
+ SK_MEMBER_ALIAS(y, fPoint.fY, Float)
+};
+
+#endif
+
+DEFINE_NO_VIRTUALS_GET_MEMBER(Sk_Point);
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawPoint::fInfo[] = {
+ SK_MEMBER_ALIAS(x, fPoint.fX, Float),
+ SK_MEMBER_ALIAS(y, fPoint.fY, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawPoint);
+
+SkDrawPoint::SkDrawPoint() {
+ fPoint.set(0, 0);
+}
+
+void SkDrawPoint::getBounds(SkRect* rect ) {
+ rect->fLeft = rect->fRight = fPoint.fX;
+ rect->fTop = rect->fBottom = fPoint.fY;
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawPoint.h b/gfx/skia/skia/src/animator/SkDrawPoint.h
new file mode 100644
index 000000000..03c8521bd
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawPoint.h
@@ -0,0 +1,33 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawPoint_DEFINED
+#define SkDrawPoint_DEFINED
+
+#include "SkBoundable.h"
+#include "SkMemberInfo.h"
+#include "SkPoint.h"
+
+struct Sk_Point {
+ DECLARE_NO_VIRTUALS_MEMBER_INFO(_Point);
+ Sk_Point();
+private:
+ SkPoint fPoint;
+};
+
+class SkDrawPoint : public SkDisplayable {
+ DECLARE_MEMBER_INFO(DrawPoint);
+ SkDrawPoint();
+ void getBounds(SkRect* ) override;
+private:
+ SkPoint fPoint;
+ typedef SkDisplayable INHERITED;
+};
+
+#endif // SkDrawPoint_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawRectangle.cpp b/gfx/skia/skia/src/animator/SkDrawRectangle.cpp
new file mode 100644
index 000000000..a89b67cfd
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawRectangle.cpp
@@ -0,0 +1,142 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawRectangle.h"
+#include "SkAnimateMaker.h"
+#include "SkCanvas.h"
+#include "SkMatrixParts.h"
+#include "SkPaint.h"
+#include "SkScript.h"
+
+enum SkRectangle_Properties {
+ SK_PROPERTY(height),
+ SK_PROPERTY(needsRedraw),
+ SK_PROPERTY(width)
+};
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawRect::fInfo[] = {
+ SK_MEMBER_ALIAS(bottom, fRect.fBottom, Float),
+ SK_MEMBER_PROPERTY(height, Float),
+ SK_MEMBER_ALIAS(left, fRect.fLeft, Float),
+ SK_MEMBER_PROPERTY(needsRedraw, Boolean),
+ SK_MEMBER_ALIAS(right, fRect.fRight, Float),
+ SK_MEMBER_ALIAS(top, fRect.fTop, Float),
+ SK_MEMBER_PROPERTY(width, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawRect);
+
+SkDrawRect::SkDrawRect() : fParent(nullptr) {
+ fRect.setEmpty();
+}
+
+void SkDrawRect::dirty() {
+ if (fParent)
+ fParent->dirty();
+}
+
+bool SkDrawRect::draw(SkAnimateMaker& maker) {
+ SkBoundableAuto boundable(this, maker);
+ maker.fCanvas->drawRect(fRect, *maker.fPaint);
+ return false;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkDrawRect::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ SkDebugf("left=\"%g\" top=\"%g\" right=\"%g\" bottom=\"%g\" />\n",
+ SkScalarToFloat(fRect.fLeft), SkScalarToFloat(fRect.fTop), SkScalarToFloat(fRect.fRight),
+ SkScalarToFloat(fRect.fBottom));
+}
+#endif
+
+SkDisplayable* SkDrawRect::getParent() const {
+ return fParent;
+}
+
+bool SkDrawRect::getProperty(int index, SkScriptValue* value) const {
+ SkScalar result;
+ switch (index) {
+ case SK_PROPERTY(height):
+ result = fRect.height();
+ break;
+ case SK_PROPERTY(needsRedraw):
+ value->fType = SkType_Boolean;
+ value->fOperand.fS32 = fBounds.isEmpty() == false;
+ return true;
+ case SK_PROPERTY(width):
+ result = fRect.width();
+ break;
+ default:
+ SkASSERT(0);
+ return false;
+ }
+ value->fType = SkType_Float;
+ value->fOperand.fScalar = result;
+ return true;
+}
+
+
+bool SkDrawRect::setParent(SkDisplayable* parent) {
+ fParent = parent;
+ return false;
+}
+
+bool SkDrawRect::setProperty(int index, SkScriptValue& value) {
+ SkScalar scalar = value.fOperand.fScalar;
+ switch (index) {
+ case SK_PROPERTY(height):
+ SkASSERT(value.fType == SkType_Float);
+ fRect.fBottom = scalar + fRect.fTop;
+ return true;
+ case SK_PROPERTY(needsRedraw):
+ return false;
+ case SK_PROPERTY(width):
+ SkASSERT(value.fType == SkType_Float);
+ fRect.fRight = scalar + fRect.fLeft;
+ return true;
+ default:
+ SkASSERT(0);
+ }
+ return false;
+}
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkRoundRect::fInfo[] = {
+ SK_MEMBER_INHERITED,
+ SK_MEMBER(rx, Float),
+ SK_MEMBER(ry, Float),
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkRoundRect);
+
+SkRoundRect::SkRoundRect() : rx(0), ry(0) {
+}
+
+bool SkRoundRect::draw(SkAnimateMaker& maker) {
+ SkBoundableAuto boundable(this, maker);
+ maker.fCanvas->drawRoundRect(fRect, rx, ry, *maker.fPaint);
+ return false;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkRoundRect::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ SkDebugf("left=\"%g\" top=\"%g\" right=\"%g\" bottom=\"%g\" rx=\"%g\" ry=\"%g\" />\n",
+ SkScalarToFloat(fRect.fLeft), SkScalarToFloat(fRect.fTop), SkScalarToFloat(fRect.fRight),
+ SkScalarToFloat(fRect.fBottom), SkScalarToFloat(rx), SkScalarToFloat(ry));
+}
+#endif
diff --git a/gfx/skia/skia/src/animator/SkDrawRectangle.h b/gfx/skia/skia/src/animator/SkDrawRectangle.h
new file mode 100644
index 000000000..036d52ed6
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawRectangle.h
@@ -0,0 +1,55 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawRectangle_DEFINED
+#define SkDrawRectangle_DEFINED
+
+#include "SkBoundable.h"
+#include "SkMemberInfo.h"
+#include "SkRect.h"
+
+class SkRectToRect;
+
+class SkDrawRect : public SkBoundable {
+ DECLARE_DRAW_MEMBER_INFO(Rect);
+ SkDrawRect();
+ void dirty() override;
+ bool draw(SkAnimateMaker& ) override;
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+ SkDisplayable* getParent() const override;
+ bool getProperty(int index, SkScriptValue* value) const override;
+ bool setParent(SkDisplayable* parent) override;
+ bool setProperty(int index, SkScriptValue& ) override;
+protected:
+ SkRect fRect;
+ SkDisplayable* fParent;
+private:
+ friend class SkDrawClip;
+ friend class SkRectToRect;
+ friend class SkSaveLayer;
+ typedef SkBoundable INHERITED;
+};
+
+class SkRoundRect : public SkDrawRect {
+ DECLARE_MEMBER_INFO(RoundRect);
+ SkRoundRect();
+ bool draw(SkAnimateMaker& ) override;
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+protected:
+ SkScalar rx;
+ SkScalar ry;
+private:
+ typedef SkDrawRect INHERITED;
+};
+
+#endif // SkDrawRectangle_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawSaveLayer.cpp b/gfx/skia/skia/src/animator/SkDrawSaveLayer.cpp
new file mode 100644
index 000000000..c9a4d9fea
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawSaveLayer.cpp
@@ -0,0 +1,76 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawSaveLayer.h"
+#include "SkAnimateMaker.h"
+#include "SkCanvas.h"
+#include "SkDrawPaint.h"
+#include "SkDrawRectangle.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkSaveLayer::fInfo[] = {
+ SK_MEMBER(bounds, Rect),
+ SK_MEMBER(paint, Paint)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkSaveLayer);
+
+SkSaveLayer::SkSaveLayer() : paint(nullptr), bounds(nullptr) {
+}
+
+SkSaveLayer::~SkSaveLayer(){
+}
+
+bool SkSaveLayer::draw(SkAnimateMaker& maker)
+{
+ if (!bounds) {
+ return false;
+ }
+ SkPaint* save = maker.fPaint;
+ //paint is an SkDrawPaint
+ if (paint)
+ {
+ SkPaint realPaint;
+ paint->setupPaint(&realPaint);
+ maker.fCanvas->saveLayer(&bounds->fRect, &realPaint);
+ }
+ else
+ maker.fCanvas->saveLayer(&bounds->fRect, save);
+ SkPaint local = SkPaint(*maker.fPaint);
+ maker.fPaint = &local;
+ bool result = INHERITED::draw(maker);
+ maker.fPaint = save;
+ maker.fCanvas->restore();
+ return result;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkSaveLayer::dump(SkAnimateMaker* maker)
+{
+ dumpBase(maker);
+ //would dump enabled be defined but not debug?
+#ifdef SK_DEBUG
+ if (paint)
+ SkDebugf("paint=\"%s\" ", paint->id);
+ if (bounds)
+ SkDebugf("bounds=\"%s\" ", bounds->id);
+#endif
+ dumpDrawables(maker);
+}
+#endif
+
+void SkSaveLayer::onEndElement(SkAnimateMaker& maker)
+{
+ if (!bounds)
+ maker.setErrorCode(SkDisplayXMLParserError::kSaveLayerNeedsBounds);
+ INHERITED::onEndElement(maker);
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawSaveLayer.h b/gfx/skia/skia/src/animator/SkDrawSaveLayer.h
new file mode 100644
index 000000000..cb9c9a908
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawSaveLayer.h
@@ -0,0 +1,36 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawSaveLayer_DEFINED
+#define SkDrawSaveLayer_DEFINED
+
+#include "SkDrawGroup.h"
+#include "SkMemberInfo.h"
+
+class SkDrawPaint;
+class SkDrawRect;
+
+class SkSaveLayer : public SkGroup {
+ DECLARE_MEMBER_INFO(SaveLayer);
+ SkSaveLayer();
+ virtual ~SkSaveLayer();
+ bool draw(SkAnimateMaker& ) override;
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+ void onEndElement(SkAnimateMaker& ) override;
+protected:
+ SkDrawPaint* paint;
+ SkDrawRect* bounds;
+private:
+ typedef SkGroup INHERITED;
+
+};
+
+#endif //SkDrawSaveLayer_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawShader.cpp b/gfx/skia/skia/src/animator/SkDrawShader.cpp
new file mode 100644
index 000000000..2868b9f4e
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawShader.cpp
@@ -0,0 +1,78 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawShader.h"
+#include "SkDrawBitmap.h"
+#include "SkDrawMatrix.h"
+#include "SkDrawPaint.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawShader::fInfo[] = {
+ SK_MEMBER(matrix, Matrix),
+ SK_MEMBER(tileMode, TileMode)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawShader);
+
+SkDrawShader::SkDrawShader() : matrix(nullptr),
+ tileMode(SkShader::kClamp_TileMode) {
+}
+
+bool SkDrawShader::add() {
+ if (fPaint->shader != (SkDrawShader*) -1)
+ return true;
+ fPaint->shader = this;
+ fPaint->fOwnsShader = true;
+ return false;
+}
+
+SkMatrix* SkDrawShader::getMatrix() {
+ return matrix ? &matrix->getMatrix() : nullptr;
+}
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawBitmapShader::fInfo[] = {
+ SK_MEMBER_INHERITED,
+ SK_MEMBER(filterBitmap, Boolean),
+ SK_MEMBER(image, BaseBitmap)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawBitmapShader);
+
+SkDrawBitmapShader::SkDrawBitmapShader() : filterBitmap(-1), image(nullptr) {}
+
+bool SkDrawBitmapShader::add() {
+ if (fPaint->shader != (SkDrawShader*) -1)
+ return true;
+ fPaint->shader = this;
+ fPaint->fOwnsShader = true;
+ return false;
+}
+
+SkShader* SkDrawBitmapShader::getShader() {
+ if (image == nullptr)
+ return nullptr;
+
+ // note: bitmap shader now supports independent tile modes for X and Y
+ // we pass the same to both, but later we should extend this flexibility
+ // to the xml (e.g. tileModeX="repeat" tileModeY="clmap")
+ //
+ // oops, bitmapshader no longer takes filterBitmap, but deduces it at
+ // draw-time from the paint
+ return SkShader::MakeBitmapShader(image->fBitmap,
+ (SkShader::TileMode) tileMode,
+ (SkShader::TileMode) tileMode,
+ getMatrix()).release();
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawShader.h b/gfx/skia/skia/src/animator/SkDrawShader.h
new file mode 100644
index 000000000..f7ef29d7d
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawShader.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDrawShader_DEFINED
+#define SkDrawShader_DEFINED
+
+#include "SkPaintPart.h"
+#include "SkShader.h"
+
+class SkBaseBitmap;
+
+class SkDrawBitmapShader : public SkDrawShader {
+ DECLARE_DRAW_MEMBER_INFO(BitmapShader);
+ SkDrawBitmapShader();
+ bool add() override;
+ SkShader* getShader() override;
+protected:
+ SkBool filterBitmap;
+ SkBaseBitmap* image;
+private:
+ typedef SkDrawShader INHERITED;
+};
+
+#endif // SkDrawShader_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawText.cpp b/gfx/skia/skia/src/animator/SkDrawText.cpp
new file mode 100644
index 000000000..e7e5facab
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawText.cpp
@@ -0,0 +1,55 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawText.h"
+#include "SkAnimateMaker.h"
+#include "SkCanvas.h"
+#include "SkPaint.h"
+
+enum SkText_Properties {
+ SK_PROPERTY(length)
+};
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkText::fInfo[] = {
+ SK_MEMBER_PROPERTY(length, Int),
+ SK_MEMBER(text, String),
+ SK_MEMBER(x, Float),
+ SK_MEMBER(y, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkText);
+
+SkText::SkText() : x(0), y(0) {
+}
+
+SkText::~SkText() {
+}
+
+bool SkText::draw(SkAnimateMaker& maker) {
+ SkBoundableAuto boundable(this, maker);
+ maker.fCanvas->drawText(text.c_str(), text.size(), x, y, *maker.fPaint);
+ return false;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkText::dump(SkAnimateMaker* maker) {
+ INHERITED::dump(maker);
+}
+#endif
+
+bool SkText::getProperty(int index, SkScriptValue* value) const {
+ SkASSERT(index == SK_PROPERTY(length));
+ value->fType = SkType_Int;
+ value->fOperand.fS32 = (int32_t) text.size();
+ return true;
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawText.h b/gfx/skia/skia/src/animator/SkDrawText.h
new file mode 100644
index 000000000..e7632d0ce
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawText.h
@@ -0,0 +1,36 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawText_DEFINED
+#define SkDrawText_DEFINED
+
+#include "SkBoundable.h"
+#include "SkMemberInfo.h"
+
+class SkText : public SkBoundable {
+ DECLARE_MEMBER_INFO(Text);
+ SkText();
+ virtual ~SkText();
+ bool draw(SkAnimateMaker& ) override;
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+ bool getProperty(int index, SkScriptValue* value) const override;
+ const char* getText() { return text.c_str(); }
+ size_t getSize() { return text.size(); }
+protected:
+ SkString text;
+ SkScalar x;
+ SkScalar y;
+private:
+ friend class SkTextToPath;
+ typedef SkBoundable INHERITED;
+};
+
+#endif // SkDrawText_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawTextBox.cpp b/gfx/skia/skia/src/animator/SkDrawTextBox.cpp
new file mode 100644
index 000000000..7a3251a26
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawTextBox.cpp
@@ -0,0 +1,80 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawTextBox.h"
+#include "SkAnimateMaker.h"
+#include "SkCanvas.h"
+#include "SkPaint.h"
+
+enum SkDrawTextBox_Properties {
+ foo = 100,
+ SK_PROPERTY(spacingAlign),
+ SK_PROPERTY(mode)
+};
+
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawTextBox::fInfo[] = {
+ SK_MEMBER_INHERITED,
+ SK_MEMBER(mode, TextBoxMode),
+ SK_MEMBER_ALIAS(spacingAdd, fSpacingAdd, Float),
+ SK_MEMBER(spacingAlign, TextBoxAlign),
+ SK_MEMBER_ALIAS(spacingMul, fSpacingMul, Float),
+ SK_MEMBER_ALIAS(text, fText, String)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawTextBox);
+
+SkDrawTextBox::SkDrawTextBox()
+{
+ fSpacingMul = SK_Scalar1;
+ fSpacingAdd = 0;
+ spacingAlign = SkTextBox::kStart_SpacingAlign;
+ mode = SkTextBox::kLineBreak_Mode;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkDrawTextBox::dump(SkAnimateMaker* maker)
+{
+ dumpBase(maker);
+ dumpAttrs(maker);
+ if (mode == 0)
+ SkDebugf("mode=\"oneLine\" ");
+ if (spacingAlign == 1)
+ SkDebugf("spacingAlign=\"center\" ");
+ else if (spacingAlign == 2)
+ SkDebugf("spacingAlign=\"end\" ");
+ SkDebugf("/>\n");
+}
+#endif
+
+bool SkDrawTextBox::getProperty(int index, SkScriptValue* value) const
+{
+ return this->INHERITED::getProperty(index, value);
+}
+
+bool SkDrawTextBox::setProperty(int index, SkScriptValue& scriptValue)
+{
+ return this->INHERITED::setProperty(index, scriptValue);
+}
+
+bool SkDrawTextBox::draw(SkAnimateMaker& maker)
+{
+ SkTextBox box;
+ box.setMode((SkTextBox::Mode) mode);
+ box.setSpacingAlign((SkTextBox::SpacingAlign) spacingAlign);
+ box.setBox(fRect);
+ box.setSpacing(fSpacingMul, fSpacingAdd);
+ SkBoundableAuto boundable(this, maker);
+ box.draw(maker.fCanvas, fText.c_str(), fText.size(), *maker.fPaint);
+ return false;
+}
diff --git a/gfx/skia/skia/src/animator/SkDrawTextBox.h b/gfx/skia/skia/src/animator/SkDrawTextBox.h
new file mode 100644
index 000000000..8f99c73d1
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawTextBox.h
@@ -0,0 +1,38 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawTextBox_DEFINED
+#define SkDrawTextBox_DEFINED
+
+#include "SkDrawRectangle.h"
+#include "SkTextBox.h"
+
+class SkDrawTextBox : public SkDrawRect {
+ DECLARE_DRAW_MEMBER_INFO(TextBox);
+ SkDrawTextBox();
+
+ // overrides
+ bool draw(SkAnimateMaker& ) override;
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+ bool getProperty(int index, SkScriptValue* value) const override;
+ bool setProperty(int index, SkScriptValue& ) override;
+
+private:
+ SkString fText;
+ SkScalar fSpacingMul;
+ SkScalar fSpacingAdd;
+ int /*SkTextBox::Mode*/ mode;
+ int /*SkTextBox::SpacingAlign*/ spacingAlign;
+
+ typedef SkDrawRect INHERITED;
+};
+
+#endif // SkDrawTextBox_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDrawTo.cpp b/gfx/skia/skia/src/animator/SkDrawTo.cpp
new file mode 100644
index 000000000..7ae9ca58d
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawTo.cpp
@@ -0,0 +1,55 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDrawTo.h"
+#include "SkAnimateMaker.h"
+#include "SkCanvas.h"
+#include "SkDrawBitmap.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawTo::fInfo[] = {
+ SK_MEMBER(drawOnce, Boolean),
+ SK_MEMBER(use, Bitmap)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawTo);
+
+SkDrawTo::SkDrawTo() : drawOnce(false), use(nullptr), fDrawnOnce(false) {
+}
+
+#if 0
+SkDrawTo::~SkDrawTo() {
+ SkASSERT(0);
+}
+#endif
+
+bool SkDrawTo::draw(SkAnimateMaker& maker) {
+ if (fDrawnOnce)
+ return false;
+ SkCanvas canvas(use->fBitmap);
+ SkCanvas* save = maker.fCanvas;
+ maker.fCanvas = &canvas;
+ INHERITED::draw(maker);
+ maker.fCanvas = save;
+ fDrawnOnce = drawOnce;
+ return false;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkDrawTo::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ dumpAttrs(maker);
+ if (use)
+ SkDebugf("use=\"%s\" ", use->id);
+ dumpDrawables(maker);
+}
+#endif
diff --git a/gfx/skia/skia/src/animator/SkDrawTo.h b/gfx/skia/skia/src/animator/SkDrawTo.h
new file mode 100644
index 000000000..29f6b6303
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDrawTo.h
@@ -0,0 +1,34 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawTo_DEFINED
+#define SkDrawTo_DEFINED
+
+#include "SkDrawGroup.h"
+#include "SkMemberInfo.h"
+
+class SkDrawBitmap;
+
+class SkDrawTo : public SkGroup {
+ DECLARE_MEMBER_INFO(DrawTo);
+ SkDrawTo();
+// virtual ~SkDrawTo();
+ bool draw(SkAnimateMaker& ) override;
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+protected:
+ SkBool drawOnce;
+ SkDrawBitmap* use;
+private:
+ typedef SkGroup INHERITED;
+ SkBool fDrawnOnce;
+};
+
+#endif // SkDrawTo_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkDump.cpp b/gfx/skia/skia/src/animator/SkDump.cpp
new file mode 100644
index 000000000..346b30daf
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDump.cpp
@@ -0,0 +1,150 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDump.h"
+
+#ifdef SK_DUMP_ENABLED
+
+#include "SkAnimateMaker.h"
+#include "SkAnimatorScript.h"
+#include "SkDisplayEvents.h"
+#include "SkDisplayList.h"
+#include "SkString.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDump::fInfo[] = {
+ SK_MEMBER(displayList, Boolean),
+ SK_MEMBER(eventList, Boolean),
+ SK_MEMBER(events, Boolean),
+ SK_MEMBER(groups, Boolean),
+ SK_MEMBER(name, String),
+ SK_MEMBER(posts, Boolean),
+ SK_MEMBER(script, String)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDump);
+
+SkDump::SkDump() : displayList(-1), eventList(-1), events(-1), groups(-1), posts(-1) {
+}
+
+bool SkDump::enable(SkAnimateMaker& maker ) {
+ if (script.size() > 0)
+ return evaluate(maker);
+ bool hasAttr = false;
+ if (events > 0)
+ hasAttr |= maker.fDumpEvents = true;
+ if (posts > 0)
+ hasAttr |= maker.fDumpPosts = true;
+ if (groups > 0)
+ hasAttr |= maker.fDumpGConditions = true;
+ if ((hasAttr |= (eventList > 0)) == true)
+ maker.fEvents.dump(maker);
+ if ((hasAttr |= (name.size() > 0)) == true)
+ maker.dump(name.c_str());
+ if (displayList > 0 || (displayList != 0 && hasAttr == false))
+ maker.fDisplayList.dump(&maker);
+ return true;
+}
+
+bool SkDump::evaluate(SkAnimateMaker &maker) {
+ SkAnimatorScript scriptEngine(maker, nullptr, SkType_Int);
+ SkScriptValue value;
+ const char* cScript = script.c_str();
+ bool success = scriptEngine.evaluateScript(&cScript, &value);
+ SkDebugf("%*s<dump script=\"%s\" answer=\" ", SkDisplayList::fIndent, "", script.c_str());
+ if (success == false) {
+ SkDebugf("INVALID\" />\n");
+ return false;
+ }
+ switch (value.fType) {
+ case SkType_Float:
+ SkDebugf("%g\" />\n", SkScalarToFloat(value.fOperand.fScalar));
+ break;
+ case SkType_Int:
+ SkDebugf("%d\" />\n", value.fOperand.fS32);
+ break;
+ case SkType_String:
+ SkDebugf("%s\" />\n", value.fOperand.fString->c_str());
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+bool SkDump::hasEnable() const {
+ return true;
+}
+
+void SkDump::GetEnumString(SkDisplayTypes type, int index, SkString* result) {
+ int badEnum = index;
+ const SkDisplayEnumMap& map = SkAnimatorScript::GetEnumValues(type);
+ const char* str = map.fValues;
+ while (--index >= 0) {
+ str = strchr(str, '|');
+ if (str == nullptr) {
+ result->reset();
+ result->appendS32(badEnum);
+ return;
+ }
+ str += 1;
+ }
+ const char* end = strchr(str, '|');
+ if (end == nullptr)
+ end = str + strlen(str);
+ result->set(str, end - str);
+}
+
+#else
+
+// in the release version, <dump> is allowed, and its attributes are defined, but
+// are not stored and have no effect
+
+#if SK_USE_CONDENSED_INFO == 0
+
+enum SkDump_Properties {
+ SK_PROPERTY(displayList),
+ SK_PROPERTY(eventList),
+ SK_PROPERTY(events),
+ SK_PROPERTY(groups),
+ SK_PROPERTY(name),
+ SK_PROPERTY(posts),
+ SK_PROPERTY(script)
+};
+
+const SkMemberInfo SkDump::fInfo[] = {
+ SK_MEMBER_PROPERTY(displayList, Boolean),
+ SK_MEMBER_PROPERTY(eventList, Boolean),
+ SK_MEMBER_PROPERTY(events, Boolean),
+ SK_MEMBER_PROPERTY(groups, Boolean),
+ SK_MEMBER_PROPERTY(name, String),
+ SK_MEMBER_PROPERTY(posts, Boolean),
+ SK_MEMBER_PROPERTY(script, String)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDump);
+
+bool SkDump::enable(SkAnimateMaker&) {
+ return true;
+}
+
+bool SkDump::hasEnable() const {
+ return true;
+}
+
+bool SkDump::setProperty(int index, SkScriptValue&) {
+ return index <= SK_PROPERTY(posts);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/animator/SkDump.h b/gfx/skia/skia/src/animator/SkDump.h
new file mode 100644
index 000000000..bcf19fd9a
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkDump.h
@@ -0,0 +1,42 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDump_DEFINED
+#define SkDump_DEFINED
+
+#include "SkDisplayable.h"
+#include "SkMemberInfo.h"
+
+class SkAnimateMaker;
+class SkString;
+
+class SkDump : public SkDisplayable {
+ DECLARE_MEMBER_INFO(Dump);
+#ifdef SK_DUMP_ENABLED
+ SkDump();
+ bool enable(SkAnimateMaker & ) override;
+ bool evaluate(SkAnimateMaker &);
+ bool hasEnable() const override;
+ static void GetEnumString(SkDisplayTypes , int index, SkString* result);
+ SkBool displayList;
+ SkBool eventList;
+ SkBool events;
+ SkString name;
+ SkBool groups;
+ SkBool posts;
+ SkString script;
+#else
+ bool enable(SkAnimateMaker & ) override;
+ bool hasEnable() const override;
+ bool setProperty(int index, SkScriptValue& ) override;
+#endif
+};
+
+
+#endif // SkDump_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkExtraPathEffects.xsd b/gfx/skia/skia/src/animator/SkExtraPathEffects.xsd
new file mode 100644
index 000000000..9592443a0
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkExtraPathEffects.xsd
@@ -0,0 +1,33 @@
+<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
+xmlns:Sk="urn:screenplay"
+xmlns:extra="urn:extraPathEffects" targetNamespace="urn:extraPathEffects" >
+ <xs:import namespace="urn:screenplay"
+ schemaLocation="SkAnimateSchema.xsd" />
+
+ <xs:element name="composePathEffect" >
+ <xs:complexType>
+ <xs:choice maxOccurs="1">
+ <xs:element ref="Sk:dash"/>
+ <xs:element ref="extra:shape1DPathEffect"/>
+ </xs:choice>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+ <xs:element name="shape1DPathEffect" >
+ <xs:complexType>
+ <xs:choice maxOccurs="1">
+ <xs:element ref="Sk:matrix"/>
+ <xs:element ref="Sk:path"/>
+ </xs:choice>
+ <xs:attribute name="addPath" type="Sk:DynamicString" />
+ <xs:attribute name="matrix" type="Sk:DynamicString" />
+ <xs:attribute name="path" type="Sk:Path" />
+ <xs:attribute name="phase" type="Sk:DynamicString"/>
+ <xs:attribute name="spacing" type="Sk:DynamicString"/>
+ <xs:attribute name="id" type="xs:ID"/>
+ </xs:complexType>
+ </xs:element>
+
+</xs:schema>
+
diff --git a/gfx/skia/skia/src/animator/SkExtras.h b/gfx/skia/skia/src/animator/SkExtras.h
new file mode 100644
index 000000000..dcd390503
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkExtras.h
@@ -0,0 +1,34 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkExtras_DEFINED
+#define SkExtras_DEFINED
+
+#include "SkScript.h"
+
+class SkExtras {
+public:
+ SkExtras();
+ virtual ~SkExtras() {}
+
+ virtual SkDisplayable* createInstance(SkDisplayTypes type) = 0;
+ virtual bool definesType(SkDisplayTypes type) = 0;
+#if SK_USE_CONDENSED_INFO == 0
+ virtual const SkMemberInfo* getMembers(SkDisplayTypes type, int* infoCountPtr) = 0;
+#endif
+#ifdef SK_DEBUG
+ virtual const char* getName(SkDisplayTypes type) = 0;
+#endif
+ virtual SkDisplayTypes getType(const char match[], size_t len ) = 0;
+
+ SkScriptEngine::_propertyCallBack fExtraCallBack;
+ void* fExtraStorage;
+};
+
+#endif // SkExtras_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkGetCondensedInfo.cpp b/gfx/skia/skia/src/animator/SkGetCondensedInfo.cpp
new file mode 100644
index 000000000..4c6532562
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkGetCondensedInfo.cpp
@@ -0,0 +1,121 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkMemberInfo.h"
+
+#if SK_USE_CONDENSED_INFO == 1
+
+// SkCondensed.inc is auto-generated
+// To generate it, execute SkDisplayType::BuildCondensedInfo()
+#ifdef SK_DEBUG
+#include "SkCondensedDebug.inc"
+#else
+#include "SkCondensedRelease.inc"
+#endif
+
+static int _searchByName(const unsigned char* lengths, int count, const char* strings, const char target[]) {
+ int lo = 0;
+ int hi = count - 1;
+ while (lo < hi) {
+ int mid = (hi + lo) >> 1;
+ if (strcmp(&strings[lengths[mid << 2]], target) < 0)
+ lo = mid + 1;
+ else
+ hi = mid;
+ }
+ if (strcmp(&strings[lengths[hi << 2]], target) != 0)
+ return -1;
+ return hi;
+}
+
+static int _searchByType(SkDisplayTypes type) {
+ unsigned char match = (unsigned char) type;
+ int lo = 0;
+ int hi = kTypeIDs - 1;
+ while (lo < hi) {
+ int mid = (hi + lo) >> 1;
+ if (gTypeIDs[mid] < match)
+ lo = mid + 1;
+ else
+ hi = mid;
+ }
+ if (gTypeIDs[hi] != type)
+ return -1;
+ return hi;
+}
+
+const SkMemberInfo* SkDisplayType::GetMembers(SkAnimateMaker* , SkDisplayTypes type, int* infoCountPtr) {
+ int lookup = _searchByType(type);
+ if (lookup < 0)
+ return nullptr;
+ if (infoCountPtr)
+ *infoCountPtr = gInfoCounts[lookup];
+ return gInfoTables[lookup];
+}
+
+// !!! replace with inline
+const SkMemberInfo* SkDisplayType::GetMember(SkAnimateMaker* , SkDisplayTypes type, const char** matchPtr ) {
+ const SkMemberInfo* info = SkMemberInfo::Find(type, matchPtr);
+ SkASSERT(info);
+ return info;
+}
+
+static const SkMemberInfo* _lookup(int lookup, const char** matchPtr) {
+ int count = gInfoCounts[lookup];
+ const SkMemberInfo* info = gInfoTables[lookup];
+ if (info->fType == SkType_BaseClassInfo) {
+ int baseTypeLookup = info->fOffset;
+ const SkMemberInfo* result = _lookup(baseTypeLookup, matchPtr);
+ if (result != nullptr)
+ return result;
+ if (--count == 0)
+ return nullptr;
+ info++;
+ }
+ SkASSERT(info->fType != SkType_BaseClassInfo);
+ const char* match = *matchPtr;
+ const char* strings = gInfoNames[lookup];
+ int index = _searchByName(&info->fName, count, strings, match);
+ if (index < 0)
+ return nullptr;
+ return &info[index];
+}
+
+const SkMemberInfo* SkMemberInfo::Find(SkDisplayTypes type, int* index) {
+ int count = gInfoCounts[lookup];
+ const SkMemberInfo* info = gInfoTables[lookup];
+ if (info->fType == SkType_BaseClassInfo) {
+ int baseTypeLookup = info->fOffset;
+ const SkMemberInfo* result = Find(baseTypeLookup, index);
+ if (result != nullptr)
+ return result;
+ if (--count == 0)
+ return nullptr;
+ info++;
+ }
+ SkASSERT(info->fType != SkType_BaseClassInfo);
+ if (*index >= count) {
+ *index -= count;
+ return nullptr;
+ }
+ return &info[index];
+}
+
+const SkMemberInfo* SkMemberInfo::Find(SkDisplayTypes type, const char** matchPtr) {
+ int lookup = _searchByType(type);
+ SkASSERT(lookup >= 0);
+ return _lookup(lookup, matchPtr);
+}
+
+const SkMemberInfo* SkMemberInfo::getInherited() const {
+ int baseTypeLookup = fOffset;
+ return gInfoTables[baseTypeLookup];
+}
+
+#endif
diff --git a/gfx/skia/skia/src/animator/SkHitClear.cpp b/gfx/skia/skia/src/animator/SkHitClear.cpp
new file mode 100644
index 000000000..3ac521ae6
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkHitClear.cpp
@@ -0,0 +1,32 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkHitClear.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkHitClear::fInfo[] = {
+ SK_MEMBER_ARRAY(targets, Displayable)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkHitClear);
+
+bool SkHitClear::enable(SkAnimateMaker&) {
+ for (int tIndex = 0; tIndex < targets.count(); tIndex++) {
+ SkDisplayable* target = targets[tIndex];
+ target->clearBounder();
+ }
+ return true;
+}
+
+bool SkHitClear::hasEnable() const {
+ return true;
+}
diff --git a/gfx/skia/skia/src/animator/SkHitClear.h b/gfx/skia/skia/src/animator/SkHitClear.h
new file mode 100644
index 000000000..042c181ad
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkHitClear.h
@@ -0,0 +1,25 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkHitClear_DEFINED
+#define SkHitClear_DEFINED
+
+#include "SkDisplayable.h"
+#include "SkMemberInfo.h"
+#include "SkTypedArray.h"
+
+class SkHitClear : public SkDisplayable {
+ DECLARE_MEMBER_INFO(HitClear);
+ bool enable(SkAnimateMaker& ) override;
+ bool hasEnable() const override;
+private:
+ SkTDDisplayableArray targets;
+};
+
+#endif // SkHitClear_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkHitTest.cpp b/gfx/skia/skia/src/animator/SkHitTest.cpp
new file mode 100644
index 000000000..79dd25bd5
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkHitTest.cpp
@@ -0,0 +1,74 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkHitTest.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkHitTest::fInfo[] = {
+ SK_MEMBER_ARRAY(bullets, Displayable),
+ SK_MEMBER_ARRAY(hits, Int),
+ SK_MEMBER_ARRAY(targets, Displayable),
+ SK_MEMBER(value, Boolean)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkHitTest);
+
+SkHitTest::SkHitTest() : value(false) {
+}
+
+bool SkHitTest::draw(SkAnimateMaker&) {
+ hits.setCount(bullets.count());
+ value = false;
+ int bulletCount = bullets.count();
+ int targetCount = targets.count();
+ for (int bIndex = 0; bIndex < bulletCount; bIndex++) {
+ SkDisplayable* bullet = bullets[bIndex];
+ SkRect bBounds;
+ bullet->getBounds(&bBounds);
+ hits[bIndex] = -1;
+ if (bBounds.fLeft == (int16_t)0x8000U)
+ continue;
+ for (int tIndex = 0; tIndex < targetCount; tIndex++) {
+ SkDisplayable* target = targets[tIndex];
+ SkRect tBounds;
+ target->getBounds(&tBounds);
+ if (bBounds.intersect(tBounds)) {
+ hits[bIndex] = tIndex;
+ value = true;
+ break;
+ }
+ }
+ }
+ return false;
+}
+
+bool SkHitTest::enable(SkAnimateMaker&) {
+ for (int bIndex = 0; bIndex < bullets.count(); bIndex++) {
+ SkDisplayable* bullet = bullets[bIndex];
+ bullet->enableBounder();
+ }
+ for (int tIndex = 0; tIndex < targets.count(); tIndex++) {
+ SkDisplayable* target = targets[tIndex];
+ target->enableBounder();
+ }
+ return false;
+}
+
+bool SkHitTest::hasEnable() const {
+ return true;
+}
+
+const SkMemberInfo* SkHitTest::preferredChild(SkDisplayTypes) {
+ if (bullets.count() == 0)
+ return getMember("bullets");
+ return getMember("targets"); // !!! cwap! need to refer to member through enum like kScope instead
+}
diff --git a/gfx/skia/skia/src/animator/SkHitTest.h b/gfx/skia/skia/src/animator/SkHitTest.h
new file mode 100644
index 000000000..bd1cbe221
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkHitTest.h
@@ -0,0 +1,30 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkHitTest_DEFINED
+#define SkHitTest_DEFINED
+
+#include "SkADrawable.h"
+#include "SkTypedArray.h"
+
+class SkHitTest : public SkADrawable {
+ DECLARE_MEMBER_INFO(HitTest);
+ SkHitTest();
+ bool draw(SkAnimateMaker& ) override;
+ bool enable(SkAnimateMaker& ) override;
+ bool hasEnable() const override;
+ const SkMemberInfo* preferredChild(SkDisplayTypes type) override;
+private:
+ SkTDDisplayableArray bullets;
+ SkTDIntArray hits;
+ SkTDDisplayableArray targets;
+ SkBool value;
+};
+
+#endif // SkHitTest_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkIntArray.h b/gfx/skia/skia/src/animator/SkIntArray.h
new file mode 100644
index 000000000..ae8e36b94
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkIntArray.h
@@ -0,0 +1,55 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkIntArray_DEFINED
+#define SkIntArray_DEFINED
+
+#include "SkColor.h"
+#include "SkDisplayType.h"
+#include "SkMath.h"
+#include "SkTDArray_Experimental.h"
+
+class SkActive;
+class SkAnimateBase;
+class SkDataInput;
+class SkDisplayable;
+class SkDisplayEvent;
+class SkADrawable;
+class SkDrawColor;
+class SkMatrixPart;
+struct SkMemberInfo;
+class SkPathPart;
+class SkPaintPart;
+class SkTypedArray;
+class SkString;
+union SkOperand;
+
+typedef SkIntArray(int) SkTDIntArray;
+typedef SkIntArray(SkColor) SkTDColorArray;
+typedef SkIntArray(SkDisplayTypes) SkTDDisplayTypesArray;
+typedef SkIntArray(SkMSec) SkTDMSecArray;
+typedef SkIntArray(SkScalar) SkTDScalarArray;
+
+typedef SkLongArray(SkActive*) SkTDActiveArray;
+typedef SkLongArray(SkAnimateBase*) SkTDAnimateArray;
+typedef SkLongArray(SkDataInput*) SkTDDataArray;
+typedef SkLongArray(SkDisplayable*) SkTDDisplayableArray;
+typedef SkLongArray(SkDisplayEvent*) SkTDDisplayEventArray;
+typedef SkLongArray(SkADrawable*) SkTDDrawableArray;
+typedef SkLongArray(SkDrawColor*) SkTDDrawColorArray;
+typedef SkLongArray(SkMatrixPart*) SkTDMatrixPartArray;
+typedef SkLongArray(const SkMemberInfo*) SkTDMemberInfoArray;
+typedef SkLongArray(SkPaintPart*) SkTDPaintPartArray;
+typedef SkLongArray(SkPathPart*) SkTDPathPartArray;
+typedef SkLongArray(SkTypedArray*) SkTDTypedArrayArray;
+typedef SkLongArray(SkString*) SkTDStringArray;
+typedef SkLongArray(SkOperand) SkTDOperandArray;
+typedef SkLongArray(SkOperand*) SkTDOperandPtrArray;
+
+#endif // SkIntArray_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkMatrixParts.cpp b/gfx/skia/skia/src/animator/SkMatrixParts.cpp
new file mode 100644
index 000000000..c607f2523
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkMatrixParts.cpp
@@ -0,0 +1,292 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkMatrixParts.h"
+#include "SkAnimateMaker.h"
+#include "SkDrawMatrix.h"
+#include "SkDrawRectangle.h"
+#include "SkDrawPath.h"
+
+SkMatrixPart::SkMatrixPart() : fMatrix(nullptr) {
+}
+
+void SkMatrixPart::dirty() {
+ fMatrix->dirty();
+}
+
+SkDisplayable* SkMatrixPart::getParent() const {
+ return fMatrix;
+}
+
+bool SkMatrixPart::setParent(SkDisplayable* parent) {
+ SkASSERT(parent != nullptr);
+ if (parent->isMatrix() == false)
+ return true;
+ fMatrix = (SkDrawMatrix*) parent;
+ return false;
+}
+
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkRotate::fInfo[] = {
+ SK_MEMBER(center, Point),
+ SK_MEMBER(degrees, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkRotate);
+
+SkRotate::SkRotate() : degrees(0) {
+ center.fX = center.fY = 0;
+}
+
+bool SkRotate::add() {
+ fMatrix->rotate(degrees, center);
+ return false;
+}
+
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkScale::fInfo[] = {
+ SK_MEMBER(center, Point),
+ SK_MEMBER(x, Float),
+ SK_MEMBER(y, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkScale);
+
+SkScale::SkScale() : x(SK_Scalar1), y(SK_Scalar1) {
+ center.fX = center.fY = 0;
+}
+
+bool SkScale::add() {
+ fMatrix->scale(x, y, center);
+ return false;
+}
+
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkSkew::fInfo[] = {
+ SK_MEMBER(center, Point),
+ SK_MEMBER(x, Float),
+ SK_MEMBER(y, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkSkew);
+
+SkSkew::SkSkew() : x(0), y(0) {
+ center.fX = center.fY = 0;
+}
+
+bool SkSkew::add() {
+ fMatrix->skew(x, y, center);
+ return false;
+}
+
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkTranslate::fInfo[] = {
+ SK_MEMBER(x, Float),
+ SK_MEMBER(y, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkTranslate);
+
+SkTranslate::SkTranslate() : x(0), y(0) {
+}
+
+bool SkTranslate::add() {
+ fMatrix->translate(x, y);
+ return false;
+}
+
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkFromPath::fInfo[] = {
+ SK_MEMBER(mode, FromPathMode),
+ SK_MEMBER(offset, Float),
+ SK_MEMBER(path, Path)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkFromPath);
+
+SkFromPath::SkFromPath() :
+ mode(0), offset(0), path(nullptr) {
+}
+
+SkFromPath::~SkFromPath() {
+}
+
+bool SkFromPath::add() {
+ if (path == nullptr)
+ return true;
+ static const uint8_t gFlags[] = {
+ SkPathMeasure::kGetPosAndTan_MatrixFlag, // normal
+ SkPathMeasure::kGetTangent_MatrixFlag, // angle
+ SkPathMeasure::kGetPosition_MatrixFlag // position
+ };
+ if ((unsigned)mode >= SK_ARRAY_COUNT(gFlags))
+ return true;
+ SkMatrix result;
+ fPathMeasure.setPath(&path->getPath(), false);
+ if (fPathMeasure.getMatrix(offset, &result, (SkPathMeasure::MatrixFlags)gFlags[mode]))
+ fMatrix->set(result);
+ return false;
+}
+
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkRectToRect::fInfo[] = {
+ SK_MEMBER(destination, Rect),
+ SK_MEMBER(source, Rect)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkRectToRect);
+
+SkRectToRect::SkRectToRect() :
+ source(nullptr), destination(nullptr) {
+}
+
+SkRectToRect::~SkRectToRect() {
+}
+
+bool SkRectToRect::add() {
+ if (source == nullptr || destination == nullptr)
+ return true;
+ SkMatrix temp;
+ temp.setRectToRect(source->fRect, destination->fRect,
+ SkMatrix::kFill_ScaleToFit);
+ fMatrix->set(temp);
+ return false;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkRectToRect::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ SkDebugf("/>\n");
+ SkDisplayList::fIndent += 4;
+ if (source) {
+ SkDebugf("%*s<source>\n", SkDisplayList::fIndent, "");
+ SkDisplayList::fIndent += 4;
+ source->dump(maker);
+ SkDisplayList::fIndent -= 4;
+ SkDebugf("%*s</source>\n", SkDisplayList::fIndent, "");
+ }
+ if (destination) {
+ SkDebugf("%*s<destination>\n", SkDisplayList::fIndent, "");
+ SkDisplayList::fIndent += 4;
+ destination->dump(maker);
+ SkDisplayList::fIndent -= 4;
+ SkDebugf("%*s</destination>\n", SkDisplayList::fIndent, "");
+ }
+ SkDisplayList::fIndent -= 4;
+ dumpEnd(maker);
+}
+#endif
+
+const SkMemberInfo* SkRectToRect::preferredChild(SkDisplayTypes ) {
+ if (source == nullptr)
+ return getMember("source"); // !!! cwap! need to refer to member through enum like kScope instead
+ else {
+ SkASSERT(destination == nullptr);
+ return getMember("destination");
+ }
+}
+
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkPolyToPoly::fInfo[] = {
+ SK_MEMBER(destination, Polygon),
+ SK_MEMBER(source, Polygon)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkPolyToPoly);
+
+SkPolyToPoly::SkPolyToPoly() : source(nullptr), destination(nullptr) {
+}
+
+SkPolyToPoly::~SkPolyToPoly() {
+}
+
+bool SkPolyToPoly::add() {
+ SkASSERT(source);
+ SkASSERT(destination);
+ SkPoint src[4];
+ SkPoint dst[4];
+ SkPath& sourcePath = source->getPath();
+ int srcPts = sourcePath.getPoints(src, 4);
+ SkPath& destPath = destination->getPath();
+ int dstPts = destPath.getPoints(dst, 4);
+ if (srcPts != dstPts)
+ return true;
+ SkMatrix temp;
+ temp.setPolyToPoly(src, dst, srcPts);
+ fMatrix->set(temp);
+ return false;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkPolyToPoly::dump(SkAnimateMaker* maker) {
+ dumpBase(maker);
+ SkDebugf("/>\n");
+ SkDisplayList::fIndent += 4;
+ if (source) {
+ SkDebugf("%*s<source>\n", SkDisplayList::fIndent, "");
+ SkDisplayList::fIndent += 4;
+ source->dump(maker);
+ SkDisplayList::fIndent -= 4;
+ SkDebugf("%*s</source>\n", SkDisplayList::fIndent, "");
+ }
+ if (destination) {
+ SkDebugf("%*s<destination>\n", SkDisplayList::fIndent, "");
+ SkDisplayList::fIndent += 4;
+ destination->dump(maker);
+ SkDisplayList::fIndent -= 4;
+ SkDebugf("%*s</destination>\n", SkDisplayList::fIndent, "");
+ }
+ SkDisplayList::fIndent -= 4;
+ dumpEnd(maker);
+}
+#endif
+
+void SkPolyToPoly::onEndElement(SkAnimateMaker& ) {
+ SkASSERT(source);
+ SkASSERT(destination);
+ if (source->childHasID() || destination->childHasID())
+ fMatrix->setChildHasID();
+}
+
+const SkMemberInfo* SkPolyToPoly::preferredChild(SkDisplayTypes ) {
+ if (source == nullptr)
+ return getMember("source"); // !!! cwap! need to refer to member through enum like kScope instead
+ else {
+ SkASSERT(destination == nullptr);
+ return getMember("destination");
+ }
+}
diff --git a/gfx/skia/skia/src/animator/SkMatrixParts.h b/gfx/skia/skia/src/animator/SkMatrixParts.h
new file mode 100644
index 000000000..3276d023d
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkMatrixParts.h
@@ -0,0 +1,119 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkMatrixParts_DEFINED
+#define SkMatrixParts_DEFINED
+
+#include "SkDisplayable.h"
+#include "SkMemberInfo.h"
+#include "SkPathMeasure.h"
+
+class SkDrawPath;
+class SkDrawRect;
+class SkPolygon;
+
+class SkDrawMatrix;
+// class SkMatrix;
+
+class SkMatrixPart : public SkDisplayable {
+public:
+ SkMatrixPart();
+ virtual bool add() = 0;
+ virtual void dirty();
+ virtual SkDisplayable* getParent() const;
+ virtual bool setParent(SkDisplayable* parent);
+#ifdef SK_DEBUG
+ virtual bool isMatrixPart() const { return true; }
+#endif
+protected:
+ SkDrawMatrix* fMatrix;
+};
+
+class SkRotate : public SkMatrixPart {
+ DECLARE_MEMBER_INFO(Rotate);
+ SkRotate();
+protected:
+ bool add() override;
+ SkScalar degrees;
+ SkPoint center;
+};
+
+class SkScale : public SkMatrixPart {
+ DECLARE_MEMBER_INFO(Scale);
+ SkScale();
+protected:
+ bool add() override;
+ SkScalar x;
+ SkScalar y;
+ SkPoint center;
+};
+
+class SkSkew : public SkMatrixPart {
+ DECLARE_MEMBER_INFO(Skew);
+ SkSkew();
+protected:
+ bool add() override;
+ SkScalar x;
+ SkScalar y;
+ SkPoint center;
+};
+
+class SkTranslate : public SkMatrixPart {
+ DECLARE_MEMBER_INFO(Translate);
+ SkTranslate();
+protected:
+ bool add() override;
+ SkScalar x;
+ SkScalar y;
+};
+
+class SkFromPath : public SkMatrixPart {
+ DECLARE_MEMBER_INFO(FromPath);
+ SkFromPath();
+ virtual ~SkFromPath();
+protected:
+ bool add() override;
+ int32_t mode;
+ SkScalar offset;
+ SkDrawPath* path;
+ SkPathMeasure fPathMeasure;
+};
+
+class SkRectToRect : public SkMatrixPart {
+ DECLARE_MEMBER_INFO(RectToRect);
+ SkRectToRect();
+ virtual ~SkRectToRect();
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+ const SkMemberInfo* preferredChild(SkDisplayTypes type) override;
+protected:
+ bool add() override;
+ SkDrawRect* source;
+ SkDrawRect* destination;
+};
+
+class SkPolyToPoly : public SkMatrixPart {
+ DECLARE_MEMBER_INFO(PolyToPoly);
+ SkPolyToPoly();
+ virtual ~SkPolyToPoly();
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker* ) override;
+#endif
+ void onEndElement(SkAnimateMaker& ) override;
+ const SkMemberInfo* preferredChild(SkDisplayTypes type) override;
+protected:
+ bool add() override;
+ SkPolygon* source;
+ SkPolygon* destination;
+};
+
+// !!! add concat matrix ?
+
+#endif // SkMatrixParts_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkMemberInfo.cpp b/gfx/skia/skia/src/animator/SkMemberInfo.cpp
new file mode 100644
index 000000000..ea4c257b1
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkMemberInfo.cpp
@@ -0,0 +1,559 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkMemberInfo.h"
+#include "SkAnimateMaker.h"
+#include "SkAnimatorScript.h"
+#include "SkBase64.h"
+#include "SkCamera.h"
+#include "SkDisplayable.h"
+#include "SkDisplayTypes.h"
+#include "SkDraw3D.h"
+#include "SkDrawColor.h"
+#include "SkParse.h"
+#include "SkScript.h"
+#include "SkTSearch.h"
+#include "SkTypedArray.h"
+
+size_t SkMemberInfo::GetSize(SkDisplayTypes type) { // size of simple types only
+ size_t byteSize;
+ switch (type) {
+ case SkType_ARGB:
+ byteSize = sizeof(SkColor);
+ break;
+ case SkType_AddMode:
+ case SkType_Align:
+ case SkType_ApplyMode:
+ case SkType_ApplyTransition:
+ case SkType_BitmapEncoding:
+ case SkType_Boolean:
+ case SkType_Cap:
+ case SkType_EventCode:
+ case SkType_EventKind:
+ case SkType_EventMode:
+ case SkType_FilterType:
+ case SkType_FontStyle:
+ case SkType_FromPathMode:
+ case SkType_Join:
+ case SkType_MaskFilterBlurStyle:
+ case SkType_PathDirection:
+ case SkType_Style:
+ case SkType_TileMode:
+ case SkType_Xfermode:
+ byteSize = sizeof(int);
+ break;
+ case SkType_Base64: // assume base64 data is always const, copied by ref
+ case SkType_Displayable:
+ case SkType_Drawable:
+ case SkType_Matrix:
+ byteSize = sizeof(void*);
+ break;
+ case SkType_MSec:
+ byteSize = sizeof(SkMSec);
+ break;
+ case SkType_Point:
+ byteSize = sizeof(SkPoint);
+ break;
+ case SkType_3D_Point:
+ byteSize = sizeof(Sk3D_Point);
+ break;
+ case SkType_Int:
+ byteSize = sizeof(int32_t);
+ break;
+ case SkType_Float:
+ byteSize = sizeof(SkScalar);
+ break;
+ case SkType_DynamicString:
+ case SkType_String:
+ byteSize = sizeof(SkString); // assume we'll copy by reference, not value
+ break;
+ default:
+// SkASSERT(0);
+ byteSize = 0;
+ }
+ return byteSize;
+}
+
+bool SkMemberInfo::getArrayValue(const SkDisplayable* displayable, int index, SkOperand* value) const {
+ SkASSERT(fType != SkType_String && fType != SkType_MemberProperty);
+ char* valuePtr = (char*) *(SkOperand**) memberData(displayable);
+ SkDisplayTypes type = (SkDisplayTypes) 0;
+ if (displayable->getType() == SkType_Array) {
+ SkDisplayArray* dispArray = (SkDisplayArray*) displayable;
+ if (dispArray->values.count() <= index)
+ return false;
+ type = dispArray->values.getType();
+ } else {
+ SkASSERT(0); // incomplete
+ }
+ size_t byteSize = GetSize(type);
+ memcpy(value, valuePtr + index * byteSize, byteSize);
+ return true;
+}
+
+size_t SkMemberInfo::getSize(const SkDisplayable* displayable) const {
+ size_t byteSize;
+ switch (fType) {
+ case SkType_MemberProperty:
+ byteSize = GetSize(propertyType());
+ break;
+ case SkType_Array: {
+ SkDisplayTypes type;
+ if (displayable == nullptr)
+ return sizeof(int);
+ if (displayable->getType() == SkType_Array) {
+ SkDisplayArray* dispArray = (SkDisplayArray*) displayable;
+ type = dispArray->values.getType();
+ } else
+ type = propertyType();
+ SkTDOperandArray* array = (SkTDOperandArray*) memberData(displayable);
+ byteSize = GetSize(type) * array->count();
+ } break;
+ default:
+ byteSize = GetSize((SkDisplayTypes) fType);
+ }
+ return byteSize;
+}
+
+void SkMemberInfo::getString(const SkDisplayable* displayable, SkString** string) const {
+ if (fType == SkType_MemberProperty) {
+ SkScriptValue value;
+ displayable->getProperty(propertyIndex(), &value);
+ SkASSERT(value.fType == SkType_String);
+ *string = value.fOperand.fString;
+ return;
+ }
+ SkASSERT(fCount == sizeof(SkString) / sizeof(SkScalar));
+ SkASSERT(fType == SkType_String || fType == SkType_DynamicString);
+ void* valuePtr = memberData(displayable);
+ *string = (SkString*) valuePtr;
+}
+
+void SkMemberInfo::getValue(const SkDisplayable* displayable, SkOperand value[], int count) const {
+ SkASSERT(fType != SkType_String && fType != SkType_MemberProperty);
+ SkASSERT(count == fCount);
+ void* valuePtr = memberData(displayable);
+ size_t byteSize = getSize(displayable);
+ SkASSERT(sizeof(value[0].fScalar) == sizeof(value[0])); // no support for 64 bit pointers, yet
+ memcpy(value, valuePtr, byteSize);
+}
+
+void SkMemberInfo::setString(SkDisplayable* displayable, SkString* value) const {
+ SkString* string = (SkString*) memberData(displayable);
+ string->set(*value);
+ displayable->dirty();
+}
+
+void SkMemberInfo::setValue(SkDisplayable* displayable, const SkOperand values[],
+ int count) const {
+ SkASSERT(sizeof(values[0].fScalar) == sizeof(values[0])); // no support for 64 bit pointers, yet
+ char* dst = (char*) memberData(displayable);
+ if (fType == SkType_Array) {
+ SkTDScalarArray* array = (SkTDScalarArray* ) dst;
+ array->setCount(count);
+ dst = (char*) array->begin();
+ }
+ memcpy(dst, values, count * sizeof(SkOperand));
+ displayable->dirty();
+}
+
+
+static inline bool is_between(int c, int min, int max)
+{
+ return (unsigned)(c - min) <= (unsigned)(max - min);
+}
+
+static inline bool is_hex(int c)
+{
+ if (is_between(c, '0', '9'))
+ return true;
+ c |= 0x20; // make us lower-case
+ if (is_between(c, 'a', 'f'))
+ return true;
+ return false;
+}
+
+
+bool SkMemberInfo::setValue(SkAnimateMaker& maker, SkTDOperandArray* arrayStorage,
+ int storageOffset, int maxStorage, SkDisplayable* displayable, SkDisplayTypes outType,
+ const char rawValue[], size_t rawValueLen) const
+{
+ SkString valueStr(rawValue, rawValueLen);
+ SkScriptValue scriptValue;
+ scriptValue.fType = SkType_Unknown;
+ scriptValue.fOperand.fS32 = 0;
+ SkDisplayTypes type = getType();
+ SkAnimatorScript engine(maker, displayable, type);
+ if (arrayStorage)
+ displayable = nullptr;
+ bool success = true;
+ void* untypedStorage = nullptr;
+ if (displayable && fType != SkType_MemberProperty && fType != SkType_MemberFunction)
+ untypedStorage = (SkTDOperandArray*) memberData(displayable);
+
+ if (type == SkType_ARGB) {
+ // for both SpiderMonkey and SkiaScript, substitute any #xyz or #xxyyzz first
+ // it's enough to expand the colors into 0xFFxxyyzz
+ const char* poundPos;
+ while ((poundPos = strchr(valueStr.c_str(), '#')) != nullptr) {
+ size_t offset = poundPos - valueStr.c_str();
+ if (valueStr.size() - offset < 4)
+ break;
+ char r = poundPos[1];
+ char g = poundPos[2];
+ char b = poundPos[3];
+ if (is_hex(r) == false || is_hex(g) == false || is_hex(b) == false)
+ break;
+ char hex = poundPos[4];
+ if (is_hex(hex) == false) {
+ valueStr.insertUnichar(offset + 1, r);
+ valueStr.insertUnichar(offset + 3, g);
+ valueStr.insertUnichar(offset + 5, b);
+ }
+ *(char*) poundPos = '0'; // overwrite '#'
+ valueStr.insert(offset + 1, "xFF");
+ }
+ }
+ if (SkDisplayType::IsDisplayable(&maker, type) || SkDisplayType::IsEnum(&maker, type) || type == SkType_ARGB)
+ goto scriptCommon;
+ switch (type) {
+ case SkType_String:
+#if 0
+ if (displayable && displayable->isAnimate()) {
+
+ goto noScriptString;
+ }
+ if (strncmp(rawValue, "#string:", sizeof("#string:") - 1) == 0) {
+ SkASSERT(sizeof("string") == sizeof("script"));
+ char* stringHeader = valueStr.writable_str();
+ memcpy(&stringHeader[1], "script", sizeof("script") - 1);
+ rawValue = valueStr.c_str();
+ goto noScriptString;
+ } else
+#endif
+ if (strncmp(rawValue, "#script:", sizeof("#script:") - 1) != 0)
+ goto noScriptString;
+ valueStr.remove(0, 8);
+ case SkType_Unknown:
+ case SkType_Int:
+ case SkType_MSec: // for the purposes of script, MSec is treated as a Scalar
+ case SkType_Point:
+ case SkType_3D_Point:
+ case SkType_Float:
+ case SkType_Array:
+scriptCommon: {
+ const char* script = valueStr.c_str();
+ success = engine.evaluateScript(&script, &scriptValue);
+ if (success == false) {
+ maker.setScriptError(engine);
+ return false;
+ }
+ }
+ SkASSERT(success);
+ if (scriptValue.fType == SkType_Displayable) {
+ if (type == SkType_String) {
+ const char* charPtr = nullptr;
+ maker.findKey(scriptValue.fOperand.fDisplayable, &charPtr);
+ scriptValue.fOperand.fString = new SkString(charPtr);
+ scriptValue.fType = SkType_String;
+ engine.SkScriptEngine::track(scriptValue.fOperand.fString);
+ break;
+ }
+ SkASSERT(SkDisplayType::IsDisplayable(&maker, type));
+ if (displayable)
+ displayable->setReference(this, scriptValue.fOperand.fDisplayable);
+ else
+ arrayStorage->begin()[0].fDisplayable = scriptValue.fOperand.fDisplayable;
+ return true;
+ }
+ if (type != scriptValue.fType) {
+ if (scriptValue.fType == SkType_Array) {
+ engine.forget(scriptValue.getArray());
+ goto writeStruct; // real structs have already been written by script
+ }
+ switch (type) {
+ case SkType_String:
+ success = engine.convertTo(SkType_String, &scriptValue);
+ break;
+ case SkType_MSec:
+ case SkType_Float:
+ success = engine.convertTo(SkType_Float, &scriptValue);
+ break;
+ case SkType_Int:
+ success = engine.convertTo(SkType_Int, &scriptValue);
+ break;
+ case SkType_Array:
+ success = engine.convertTo(arrayType(), &scriptValue);
+ // !!! incomplete; create array of appropriate type and add scriptValue to it
+ SkASSERT(0);
+ break;
+ case SkType_Displayable:
+ case SkType_Drawable:
+ return false; // no way to convert other types to this
+ default: // to avoid warnings
+ break;
+ }
+ if (success == false)
+ return false;
+ }
+ if (type == SkType_MSec)
+ scriptValue.fOperand.fMSec = SkScalarRoundToInt(scriptValue.fOperand.fScalar * 1000);
+ scriptValue.fType = type;
+ break;
+ noScriptString:
+ case SkType_DynamicString:
+ if (fType == SkType_MemberProperty && displayable) {
+ SkString string(rawValue, rawValueLen);
+ SkScriptValue scriptValue;
+ scriptValue.fOperand.fString = &string;
+ scriptValue.fType = SkType_String;
+ displayable->setProperty(propertyIndex(), scriptValue);
+ } else if (displayable) {
+ SkString* string = (SkString*) memberData(displayable);
+ string->set(rawValue, rawValueLen);
+ } else {
+ SkASSERT(arrayStorage->count() == 1);
+ arrayStorage->begin()->fString->set(rawValue, rawValueLen);
+ }
+ goto dirty;
+ case SkType_Base64: {
+ SkBase64 base64;
+ base64.decode(rawValue, rawValueLen);
+ *(SkBase64* ) untypedStorage = base64;
+ } goto dirty;
+ default:
+ SkASSERT(0);
+ break;
+ }
+// if (SkDisplayType::IsStruct(type) == false)
+ {
+writeStruct:
+ if (writeValue(displayable, arrayStorage, storageOffset, maxStorage,
+ untypedStorage, outType, scriptValue)) {
+ maker.setErrorCode(SkDisplayXMLParserError::kUnexpectedType);
+ return false;
+ }
+ }
+dirty:
+ if (displayable)
+ displayable->dirty();
+ return true;
+}
+
+bool SkMemberInfo::setValue(SkAnimateMaker& maker, SkTDOperandArray* arrayStorage,
+ int storageOffset, int maxStorage, SkDisplayable* displayable, SkDisplayTypes outType,
+ SkString& raw) const {
+ return setValue(maker, arrayStorage, storageOffset, maxStorage, displayable, outType, raw.c_str(),
+ raw.size());
+}
+
+bool SkMemberInfo::writeValue(SkDisplayable* displayable, SkTDOperandArray* arrayStorage,
+ int storageOffset, int maxStorage, void* untypedStorage, SkDisplayTypes outType,
+ SkScriptValue& scriptValue) const
+{
+ SkOperand* storage = untypedStorage ? (SkOperand*) untypedStorage : arrayStorage ?
+ arrayStorage->begin() : nullptr;
+ if (storage)
+ storage += storageOffset;
+ SkDisplayTypes type = getType();
+ if (fType == SkType_MemberProperty) {
+ if(displayable)
+ displayable->setProperty(propertyIndex(), scriptValue);
+ else {
+ SkASSERT(storageOffset < arrayStorage->count());
+ switch (scriptValue.fType) {
+ case SkType_Boolean:
+ case SkType_Float:
+ case SkType_Int:
+ memcpy(&storage->fScalar, &scriptValue.fOperand.fScalar, sizeof(SkScalar));
+ break;
+ case SkType_Array:
+ memcpy(&storage->fScalar, scriptValue.fOperand.fArray->begin(), scriptValue.fOperand.fArray->count() * sizeof(SkScalar));
+ break;
+ case SkType_String:
+ storage->fString->set(*scriptValue.fOperand.fString);
+ break;
+ default:
+ SkASSERT(0); // type isn't handled yet
+ }
+ }
+ } else if (fType == SkType_MemberFunction) {
+ SkASSERT(scriptValue.fType == SkType_Array);
+ if (displayable)
+ displayable->executeFunction(displayable, this, scriptValue.fOperand.fArray, nullptr);
+ else {
+ int count = scriptValue.fOperand.fArray->count();
+ // SkASSERT(maxStorage == 0 || count == maxStorage);
+ if (arrayStorage->count() == 2)
+ arrayStorage->setCount(2 * count);
+ else {
+ storageOffset *= count;
+ SkASSERT(count + storageOffset <= arrayStorage->count());
+ }
+ memcpy(&(*arrayStorage)[storageOffset], scriptValue.fOperand.fArray->begin(), count * sizeof(SkOperand));
+ }
+
+ } else if (fType == SkType_Array) {
+ SkTypedArray* destArray = (SkTypedArray*) (untypedStorage ? untypedStorage : arrayStorage);
+ SkASSERT(destArray);
+ // destArray->setCount(0);
+ if (scriptValue.fType != SkType_Array) {
+ SkASSERT(type == scriptValue.fType);
+ // SkASSERT(storageOffset + 1 <= maxStorage);
+ destArray->setCount(storageOffset + 1);
+ (*destArray)[storageOffset] = scriptValue.fOperand;
+ } else {
+ if (type == SkType_Unknown) {
+ type = scriptValue.fOperand.fArray->getType();
+ destArray->setType(type);
+ }
+ SkASSERT(type == scriptValue.fOperand.fArray->getType());
+ int count = scriptValue.fOperand.fArray->count();
+ // SkASSERT(storageOffset + count <= maxStorage);
+ destArray->setCount(storageOffset + count);
+ memcpy(destArray->begin() + storageOffset, scriptValue.fOperand.fArray->begin(), sizeof(SkOperand) * count);
+ }
+ } else if (type == SkType_String) {
+ SkString* string = untypedStorage ? (SkString*) untypedStorage : (*arrayStorage)[storageOffset].fString;
+ string->set(*scriptValue.fOperand.fString);
+ } else if (type == SkType_ARGB && outType == SkType_Float) {
+ SkTypedArray* array = scriptValue.fOperand.fArray;
+ SkASSERT(scriptValue.fType == SkType_Int || scriptValue.fType == SkType_ARGB ||
+ scriptValue.fType == SkType_Array);
+ SkASSERT(scriptValue.fType != SkType_Array || (array != nullptr &&
+ array->getType() == SkType_Int));
+ int numberOfColors = scriptValue.fType == SkType_Array ? array->count() : 1;
+ int numberOfComponents = numberOfColors * 4;
+ // SkASSERT(maxStorage == 0 || maxStorage == numberOfComponents);
+ if (maxStorage == 0)
+ arrayStorage->setCount(numberOfComponents);
+ for (int index = 0; index < numberOfColors; index++) {
+ SkColor color = scriptValue.fType == SkType_Array ?
+ (SkColor) array->begin()[index].fS32 : (SkColor) scriptValue.fOperand.fS32;
+ storage[0].fScalar = SkIntToScalar(SkColorGetA(color));
+ storage[1].fScalar = SkIntToScalar(SkColorGetR(color));
+ storage[2].fScalar = SkIntToScalar(SkColorGetG(color));
+ storage[3].fScalar = SkIntToScalar(SkColorGetB(color));
+ storage += 4;
+ }
+ } else if (SkDisplayType::IsStruct(nullptr /* !!! maker*/, type)) {
+ if (scriptValue.fType != SkType_Array)
+ return true; // error
+ SkASSERT(sizeof(SkScalar) == sizeof(SkOperand)); // !!! no 64 bit pointer support yet
+ int count = scriptValue.fOperand.fArray->count();
+ if (count > 0) {
+ SkASSERT(fCount == count);
+ memcpy(storage, scriptValue.fOperand.fArray->begin(), count * sizeof(SkOperand));
+ }
+ } else if (scriptValue.fType == SkType_Array) {
+ SkASSERT(scriptValue.fOperand.fArray->getType() == type);
+ SkASSERT(scriptValue.fOperand.fArray->count() == getCount());
+ memcpy(storage, scriptValue.fOperand.fArray->begin(), getCount() * sizeof(SkOperand));
+ } else {
+ memcpy(storage, &scriptValue.fOperand, sizeof(SkOperand));
+ }
+ return false;
+}
+
+
+//void SkMemberInfo::setValue(SkDisplayable* displayable, const char value[], const char name[]) const {
+// void* valuePtr = (void*) ((char*) displayable + fOffset);
+// switch (fType) {
+// case SkType_Point3D: {
+// static const char xyz[] = "x|y|z";
+// int index = find_one(xyz, name);
+// SkASSERT(index >= 0);
+// valuePtr = (void*) ((char*) valuePtr + index * sizeof(SkScalar));
+// } break;
+// default:
+// SkASSERT(0);
+// }
+// SkParse::FindScalar(value, (SkScalar*) valuePtr);
+// displayable->dirty();
+//}
+
+#if SK_USE_CONDENSED_INFO == 0
+
+// Find Nth memberInfo
+const SkMemberInfo* SkMemberInfo::Find(const SkMemberInfo info[], int count, int* index) {
+ SkASSERT(*index >= 0);
+ if (info->fType == SkType_BaseClassInfo) {
+ const SkMemberInfo* inherited = (SkMemberInfo*) info->fName;
+ const SkMemberInfo* result = SkMemberInfo::Find(inherited, info->fCount, index);
+ if (result != nullptr)
+ return result;
+ if (--count == 0)
+ return nullptr;
+ info++;
+ }
+ SkASSERT(info->fName);
+ SkASSERT(info->fType != SkType_BaseClassInfo);
+ if (*index >= count) {
+ *index -= count;
+ return nullptr;
+ }
+ return &info[*index];
+}
+
+// Find named memberinfo
+const SkMemberInfo* SkMemberInfo::Find(const SkMemberInfo info[], int count, const char** matchPtr) {
+ const char* match = *matchPtr;
+ if (info->fType == SkType_BaseClassInfo) {
+ const SkMemberInfo* inherited = (SkMemberInfo*) info->fName;
+ const SkMemberInfo* result = SkMemberInfo::Find(inherited, info->fCount, matchPtr);
+ if (result != nullptr)
+ return result;
+ if (--count == 0)
+ return nullptr;
+ info++;
+ }
+ SkASSERT(info->fName);
+ SkASSERT(info->fType != SkType_BaseClassInfo);
+ int index = SkStrSearch(&info->fName, count, match, sizeof(*info));
+ if (index < 0 || index >= count)
+ return nullptr;
+ return &info[index];
+}
+
+const SkMemberInfo* SkMemberInfo::getInherited() const {
+ return (SkMemberInfo*) fName;
+}
+
+#endif // SK_USE_CONDENSED_INFO == 0
+
+#if 0
+bool SkMemberInfo::SetValue(void* valuePtr, const char value[], SkDisplayTypes type,
+ int count) {
+ switch (type) {
+ case SkType_Animate:
+ case SkType_BaseBitmap:
+ case SkType_Bitmap:
+ case SkType_Dash:
+ case SkType_Displayable:
+ case SkType_Drawable:
+ case SkType_Matrix:
+ case SkType_Path:
+ case SkType_Text:
+ case SkType_3D_Patch:
+ return false; // ref to object; caller must resolve
+ case SkType_MSec: {
+ SkParse::FindMSec(value, (SkMSec*) valuePtr);
+ } break;
+ case SkType_3D_Point:
+ case SkType_Point:
+ // case SkType_PointArray:
+ case SkType_ScalarArray:
+ SkParse::FindScalars(value, (SkScalar*) valuePtr, count);
+ break;
+ default:
+ SkASSERT(0);
+ }
+ return true;
+}
+#endif
diff --git a/gfx/skia/skia/src/animator/SkMemberInfo.h b/gfx/skia/skia/src/animator/SkMemberInfo.h
new file mode 100644
index 000000000..709d66ac8
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkMemberInfo.h
@@ -0,0 +1,276 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkMemberInfo_DEFINED
+#define SkMemberInfo_DEFINED
+
+#if defined SK_BUILD_CONDENSED
+ #define SK_USE_CONDENSED_INFO 0
+#endif
+
+#include "SkDisplayType.h"
+#include "SkScript.h"
+#include "SkString.h"
+#include "SkIntArray.h"
+#include <utility>
+
+class SkAnimateMaker;
+class SkDisplayable;
+class SkScriptEngine;
+
+// temporary hacks until name change is more complete
+#define SkFloat SkScalar
+#define SkInt SkS32
+
+struct SkMemberInfo {
+ //!!! alternative:
+ // if fCount == 0, record is member property
+ // then fType can be type, so caller doesn't have to check
+#if SK_USE_CONDENSED_INFO == 0
+ const char* fName; // may be nullptr for anonymous functions
+ size_t fOffset; // if negative, is index into member pointer table (for properties and functions)
+ SkDisplayTypes fType;
+ int fCount; // for properties, actual type (count is always assumed to be 1)
+#else
+ unsigned char fName;
+ signed char fOffset;
+ unsigned char fType;
+ signed char fCount;
+#endif
+ SkDisplayTypes arrayType() const {
+ SkASSERT(fType == SkType_Array);
+ return (SkDisplayTypes) fCount; // hack, but worth it?
+ }
+ int functionIndex() const {
+ SkASSERT(fType == SkType_MemberFunction);
+ return (signed) fOffset > 0 ? -1 + (int) fOffset : -1 - (int) fOffset;
+ }
+ bool getArrayValue(const SkDisplayable* displayable, int index, SkOperand* value) const;
+ int getCount() const {
+ return fType == SkType_MemberProperty || fType == SkType_Array ||
+ fType == SkType_MemberFunction ? 1 : fCount;
+ }
+ const SkMemberInfo* getInherited() const;
+ size_t getSize(const SkDisplayable* ) const;
+ void getString(const SkDisplayable* , SkString** string) const;
+ SkDisplayTypes getType() const {
+ return fType == SkType_MemberProperty || fType == SkType_Array ||
+ fType == SkType_MemberFunction ? (SkDisplayTypes) fCount : (SkDisplayTypes) fType;
+ }
+ void getValue(const SkDisplayable* , SkOperand values[], int count) const;
+ bool isEnum() const;
+ const char* mapEnums(const char* match, int* value) const;
+ void* memberData(const SkDisplayable* displayable) const {
+ SkASSERT(fType != SkType_MemberProperty && fType != SkType_MemberFunction);
+ return (void*) ((const char*) displayable + fOffset);
+ }
+ int propertyIndex() const {
+ SkASSERT(fType == SkType_MemberProperty);
+ return (signed) fOffset > 0 ? -1 + (int) fOffset : -1 - (int) fOffset;
+ }
+ SkDisplayTypes propertyType() const {
+ SkASSERT(fType == SkType_MemberProperty || fType == SkType_Array);
+ return (SkDisplayTypes) fCount; // hack, but worth it?
+ }
+ void setMemberData(SkDisplayable* displayable, const void* child, size_t size) const {
+ SkASSERT(fType != SkType_MemberProperty && fType != SkType_MemberFunction);
+ memcpy((char*) displayable + fOffset, child, size);
+ }
+ void setString(SkDisplayable* , SkString* ) const;
+ void setValue(SkDisplayable* , const SkOperand values[], int count) const;
+ bool setValue(SkAnimateMaker& , SkTDOperandArray* storage,
+ int storageOffset, int maxStorage, SkDisplayable* ,
+ SkDisplayTypes outType, const char value[], size_t len) const;
+ bool setValue(SkAnimateMaker& , SkTDOperandArray* storage,
+ int storageOffset, int maxStorage, SkDisplayable* ,
+ SkDisplayTypes outType, SkString& str) const;
+// void setValue(SkDisplayable* , const char value[], const char name[]) const;
+ bool writeValue(SkDisplayable* displayable, SkTDOperandArray* arrayStorage,
+ int storageOffset, int maxStorage, void* untypedStorage, SkDisplayTypes outType,
+ SkScriptValue& scriptValue) const;
+#if SK_USE_CONDENSED_INFO == 0
+ static const SkMemberInfo* Find(const SkMemberInfo [], int count, int* index);
+ static const SkMemberInfo* Find(const SkMemberInfo [], int count, const char** name);
+#else
+ static const SkMemberInfo* Find(SkDisplayTypes type, int* index);
+ static const SkMemberInfo* Find(SkDisplayTypes type, const char** name);
+#endif
+ static size_t GetSize(SkDisplayTypes type); // size of simple types only
+// static bool SetValue(void* value, const char* name, SkDisplayTypes , int count);
+};
+
+#ifndef SK_OFFSETOF
+ // This is offsetof for types which are not standard layout.
+ #define SK_OFFSETOF(type, field) (size_t)((char*)&(((type*)1024)->field) - (char*)1024)
+#endif
+
+#define SK_MEMBER(_member, _type) \
+ { #_member, SK_OFFSETOF(BASE_CLASS, _member), SkType_##_type, \
+ sizeof(std::declval<BASE_CLASS>()._member) / sizeof(SkScalar) }
+
+#define SK_MEMBER_ALIAS(_member, _alias, _type) \
+ { #_member, SK_OFFSETOF(BASE_CLASS, _alias), SkType_##_type, \
+ sizeof(std::declval<BASE_CLASS>()._alias) / sizeof(SkScalar) }
+
+#define SK_MEMBER_ARRAY(_member, _type) \
+ { #_member, SK_OFFSETOF(BASE_CLASS, _member), SkType_Array, \
+ (int) SkType_##_type }
+
+#define SK_MEMBER_INHERITED \
+ { (const char*) INHERITED::fInfo, 0, SkType_BaseClassInfo, INHERITED::fInfoCount }
+
+// #define SK_MEMBER_KEY_TYPE(_member, _type)
+// {#_member, (size_t) -1, SkType_##_type, 0}
+
+#define SK_FUNCTION(_member) \
+ k_##_member##Function
+
+#define SK_PROPERTY(_member) \
+ k_##_member##Property
+
+#define SK_MEMBER_DYNAMIC_FUNCTION(_member, _type) \
+ {#_member, (size_t) (+1 + SK_FUNCTION(_member)), SkType_MemberFunction, \
+ (int) SkType_##_type }
+
+#define SK_MEMBER_DYNAMIC_PROPERTY(_member, _type) \
+ {#_member, (size_t) (1 + SK_PROPERTY(_member)), SkType_MemberProperty, \
+ (int) SkType_##_type }
+
+#define SK_MEMBER_FUNCTION(_member, _type) \
+ {#_member, (size_t) (-1 - SK_FUNCTION(_member)), SkType_MemberFunction, \
+ (int) SkType_##_type }
+
+#define SK_MEMBER_PROPERTY(_member, _type) \
+ {#_member, (size_t) (-1 - SK_PROPERTY(_member)), SkType_MemberProperty, \
+ (int) SkType_##_type }
+
+#if SK_USE_CONDENSED_INFO == 0
+
+#define DECLARE_PRIVATE_MEMBER_INFO(_type) \
+public: \
+ static const SkMemberInfo fInfo[]; \
+ static const int fInfoCount; \
+ const SkMemberInfo* getMember(int index) override; \
+ const SkMemberInfo* getMember(const char name[]) override; \
+ typedef Sk##_type BASE_CLASS
+
+#define DECLARE_MEMBER_INFO(_type) \
+public: \
+ static const SkMemberInfo fInfo[]; \
+ static const int fInfoCount; \
+ const SkMemberInfo* getMember(int index) override; \
+ const SkMemberInfo* getMember(const char name[]) override; \
+ SkDisplayTypes getType() const override { return SkType_##_type; } \
+ typedef Sk##_type BASE_CLASS
+
+#define DECLARE_DRAW_MEMBER_INFO(_type) \
+public: \
+ static const SkMemberInfo fInfo[]; \
+ static const int fInfoCount; \
+ const SkMemberInfo* getMember(int index) override; \
+ const SkMemberInfo* getMember(const char name[]) override; \
+ SkDisplayTypes getType() const override { return SkType_##_type; } \
+ typedef SkDraw##_type BASE_CLASS
+
+#define DECLARE_DISPLAY_MEMBER_INFO(_type) \
+public: \
+ static const SkMemberInfo fInfo[]; \
+ static const int fInfoCount; \
+ const SkMemberInfo* getMember(int index) override; \
+ const SkMemberInfo* getMember(const char name[]) override; \
+ SkDisplayTypes getType() const override { return SkType_##_type; } \
+ typedef SkDisplay##_type BASE_CLASS
+
+#define DECLARE_EMPTY_MEMBER_INFO(_type) \
+public: \
+ SkDisplayTypes getType() const override { return SkType_##_type; }
+
+#define DECLARE_EXTRAS_MEMBER_INFO(_type) \
+public: \
+ static const SkMemberInfo fInfo[]; \
+ static const int fInfoCount; \
+ const SkMemberInfo* getMember(int index) override; \
+ const SkMemberInfo* getMember(const char name[]) override; \
+ SkDisplayTypes fType; \
+ SkDisplayTypes getType() const override { return fType; } \
+ typedef _type BASE_CLASS
+
+#define DECLARE_NO_VIRTUALS_MEMBER_INFO(_type) \
+public: \
+ static const SkMemberInfo fInfo[]; \
+ static const int fInfoCount; \
+ typedef Sk##_type BASE_CLASS
+
+#define DEFINE_GET_MEMBER(_class) \
+ const SkMemberInfo* _class::getMember(int index) { \
+ const SkMemberInfo* result = SkMemberInfo::Find(fInfo, SK_ARRAY_COUNT(fInfo), &index); \
+ return result; \
+ } \
+ const SkMemberInfo* _class::getMember(const char name[]) { \
+ const SkMemberInfo* result = SkMemberInfo::Find(fInfo, SK_ARRAY_COUNT(fInfo), &name); \
+ return result; \
+ } \
+ const int _class::fInfoCount = SK_ARRAY_COUNT(fInfo)
+
+#define DEFINE_NO_VIRTUALS_GET_MEMBER(_class) \
+ const int _class::fInfoCount = SK_ARRAY_COUNT(fInfo)
+
+#else
+
+#define DECLARE_PRIVATE_MEMBER_INFO(_type) \
+public: \
+ typedef Sk##_type BASE_CLASS
+
+#define DECLARE_MEMBER_INFO(_type) \
+public: \
+ virtual const SkMemberInfo* getMember(int index) { \
+ return SkDisplayType::GetMember(nullptr, SkType_##_type, &index); } \
+ virtual const SkMemberInfo* getMember(const char name[]) { \
+ return SkDisplayType::GetMember(nullptr, SkType_##_type, &name); } \
+ virtual SkDisplayTypes getType() const { return SkType_##_type; } \
+ typedef Sk##_type BASE_CLASS
+
+#define DECLARE_DRAW_MEMBER_INFO(_type) \
+public: \
+ virtual const SkMemberInfo* getMember(int index) { \
+ return SkDisplayType::GetMember(nullptr, SkType_##_type, &index); } \
+ virtual const SkMemberInfo* getMember(const char name[]) { \
+ return SkDisplayType::GetMember(nullptr, SkType_##_type, &name); } \
+ virtual SkDisplayTypes getType() const { return SkType_##_type; } \
+ typedef SkDraw##_type BASE_CLASS
+
+#define DECLARE_DISPLAY_MEMBER_INFO(_type) \
+public: \
+ virtual const SkMemberInfo* getMember(int index) { \
+ return SkDisplayType::GetMember(nullptr, SkType_##_type, &index); } \
+ virtual const SkMemberInfo* getMember(const char name[]) { \
+ return SkDisplayType::GetMember(nullptr, SkType_##_type, &name); } \
+ virtual SkDisplayTypes getType() const { return SkType_##_type; } \
+ typedef SkDisplay##_type BASE_CLASS
+
+#define DECLARE_EXTRAS_MEMBER_INFO(_type) \
+public: \
+ virtual const SkMemberInfo* getMember(int index) { \
+ return SkDisplayType::GetMember(nullptr, SkType_##_type, &index); } \
+ virtual const SkMemberInfo* getMember(const char name[]) { \
+ return SkDisplayType::GetMember(nullptr, fType, &name); } \
+ SkDisplayTypes fType; \
+ virtual SkDisplayTypes getType() const { return fType; } \
+ typedef _type BASE_CLASS
+
+#define DECLARE_NO_VIRTUALS_MEMBER_INFO(_type) \
+public: \
+ typedef Sk##_type BASE_CLASS
+
+#define DEFINE_GET_MEMBER(_class)
+#define DEFINE_NO_VIRTUALS_GET_MEMBER(_class)
+
+#endif
+
+#endif // SkMemberInfo_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkOpArray.cpp b/gfx/skia/skia/src/animator/SkOpArray.cpp
new file mode 100644
index 000000000..94298cc83
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkOpArray.cpp
@@ -0,0 +1,23 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkOpArray.h"
+
+SkOpArray::SkOpArray() : fType(SkOperand2::kNoType) {
+}
+
+SkOpArray::SkOpArray(SkOperand2::OpType type) : fType(type) {
+}
+
+bool SkOpArray::getIndex(int index, SkOperand2* operand) {
+ if (index >= count()) {
+ SkASSERT(0);
+ return false;
+ }
+ *operand = begin()[index];
+ return true;
+}
diff --git a/gfx/skia/skia/src/animator/SkOpArray.h b/gfx/skia/skia/src/animator/SkOpArray.h
new file mode 100644
index 000000000..260bf78be
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkOpArray.h
@@ -0,0 +1,29 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOpArray_DEFINED
+#define SkOpArray_DEFINED
+
+#include "SkOperand2.h"
+#include "SkTDArray_Experimental.h"
+
+typedef SkLongArray(SkOperand2) SkTDOperand2Array;
+
+class SkOpArray : public SkTDOperand2Array {
+public:
+ SkOpArray();
+ SkOpArray(SkOperand2::OpType type);
+ bool getIndex(int index, SkOperand2* operand);
+ SkOperand2::OpType getType() { return fType; }
+ void setType(SkOperand2::OpType type) {
+ fType = type;
+ }
+protected:
+ SkOperand2::OpType fType;
+};
+
+#endif // SkOpArray_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkOperand.h b/gfx/skia/skia/src/animator/SkOperand.h
new file mode 100644
index 000000000..14126fccb
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkOperand.h
@@ -0,0 +1,46 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkOperand_DEFINED
+#define SkOperand_DEFINED
+
+#include "SkDisplayType.h"
+
+class SkTypedArray;
+class SkDisplayable;
+class SkADrawable;
+class SkString;
+
+union SkOperand {
+// SkOperand() {}
+// SkOperand(SkScalar scalar) : fScalar(scalar) {}
+ SkTypedArray* fArray;
+ SkDisplayable* fDisplayable;
+ SkADrawable* fDrawable;
+ void* fObject;
+ int32_t fS32;
+ SkMSec fMSec;
+ SkScalar fScalar;
+ SkString* fString;
+};
+
+struct SkScriptValue {
+ SkOperand fOperand;
+ SkDisplayTypes fType;
+ SkTypedArray* getArray() { SkASSERT(fType == SkType_Array); return fOperand.fArray; }
+ SkDisplayable* getDisplayable() { SkASSERT(fType == SkType_Displayable); return fOperand.fDisplayable; }
+ SkADrawable* getDrawable() { SkASSERT(fType == SkType_Drawable); return fOperand.fDrawable; }
+ int32_t getS32(SkAnimateMaker* maker) { SkASSERT(fType == SkType_Int || fType == SkType_Boolean ||
+ SkDisplayType::IsEnum(maker, fType)); return fOperand.fS32; }
+ SkMSec getMSec() { SkASSERT(fType == SkType_MSec); return fOperand.fMSec; }
+ SkScalar getScalar() { SkASSERT(fType == SkType_Float); return fOperand.fScalar; }
+ SkString* getString() { SkASSERT(fType == SkType_String); return fOperand.fString; }
+};
+
+#endif // SkOperand_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkOperand2.h b/gfx/skia/skia/src/animator/SkOperand2.h
new file mode 100644
index 000000000..f844b6b4c
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkOperand2.h
@@ -0,0 +1,54 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOperand2_DEFINED
+#define SkOperand2_DEFINED
+
+#include "SkScalar.h"
+
+class SkOpArray;
+class SkString;
+
+union SkOperand2 {
+ enum OpType {
+ kNoType,
+ kS32 = 1,
+ kScalar = 2,
+ kString = 4,
+ kArray = 8,
+ kObject = 16
+ };
+ SkOpArray* fArray;
+ void* fObject;
+ size_t fReference;
+ int32_t fS32;
+ SkScalar fScalar;
+ SkString* fString;
+};
+
+struct SkScriptValue2 {
+ enum IsConstant {
+ kConstant,
+ kVariable
+ };
+ enum IsWritten {
+ kUnwritten,
+ kWritten
+ };
+ SkOperand2 fOperand;
+ SkOperand2::OpType fType : 8;
+ IsConstant fIsConstant : 8;
+ IsWritten fIsWritten : 8;
+ SkOpArray* getArray() { SkASSERT(fType == SkOperand2::kArray); return fOperand.fArray; }
+ void* getObject() { SkASSERT(fType == SkOperand2::kObject); return fOperand.fObject; }
+ int32_t getS32() { SkASSERT(fType == SkOperand2::kS32); return fOperand.fS32; }
+ SkScalar getScalar() { SkASSERT(fType == SkOperand2::kScalar); return fOperand.fScalar; }
+ SkString* getString() { SkASSERT(fType == SkOperand2::kString); return fOperand.fString; }
+ bool isConstant() const { return fIsConstant == kConstant; }
+};
+
+#endif // SkOperand2_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkOperandInterpolator.h b/gfx/skia/skia/src/animator/SkOperandInterpolator.h
new file mode 100644
index 000000000..adbe69f16
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkOperandInterpolator.h
@@ -0,0 +1,47 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkOperandInterpolator_DEFINED
+#define SkOperandInterpolator_DEFINED
+
+#include "SkDisplayType.h"
+#include "SkInterpolator.h"
+#include "SkOperand.h"
+
+class SkOperandInterpolator : public SkInterpolatorBase {
+public:
+ SkOperandInterpolator();
+ SkOperandInterpolator(int elemCount, int frameCount, SkDisplayTypes type);
+ SkOperand* getValues() { return fValues; }
+ int getValuesCount() { return fFrameCount * fElemCount; }
+ void reset(int elemCount, int frameCount, SkDisplayTypes type);
+
+ /** Add or replace a key frame, copying the values[] data into the interpolator.
+ @param index The index of this frame (frames must be ordered by time)
+ @param time The millisecond time for this frame
+ @param values The array of values [elemCount] for this frame. The data is copied
+ into the interpolator.
+ @param blend A positive scalar specifying how to blend between this and the next key frame.
+ [0...1) is a cubic lag/log/lag blend (slow to change at the beginning and end)
+ 1 is a linear blend (default)
+ (1...inf) is a cubic log/lag/log blend (fast to change at the beginning and end)
+ */
+ bool setKeyFrame(int index, SkMSec time, const SkOperand values[], SkScalar blend = SK_Scalar1);
+ Result timeToValues(SkMSec time, SkOperand values[]) const;
+ SkDEBUGCODE(static void UnitTest();)
+private:
+ SkDisplayTypes fType;
+ SkOperand* fValues; // pointer into fStorage
+#ifdef SK_DEBUG
+ SkOperand(* fValuesArray)[10];
+#endif
+ typedef SkInterpolatorBase INHERITED;
+};
+
+#endif // SkOperandInterpolator_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkOperandIterpolator.cpp b/gfx/skia/skia/src/animator/SkOperandIterpolator.cpp
new file mode 100644
index 000000000..89ac44dea
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkOperandIterpolator.cpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkOperandInterpolator.h"
+#include "SkScript.h"
+
+SkOperandInterpolator::SkOperandInterpolator() {
+ INHERITED::reset(0, 0);
+ fType = SkType_Unknown;
+}
+
+SkOperandInterpolator::SkOperandInterpolator(int elemCount, int frameCount,
+ SkDisplayTypes type)
+{
+ this->reset(elemCount, frameCount, type);
+}
+
+void SkOperandInterpolator::reset(int elemCount, int frameCount, SkDisplayTypes type)
+{
+// SkASSERT(type == SkType_String || type == SkType_Float || type == SkType_Int ||
+// type == SkType_Displayable || type == SkType_Drawable);
+ INHERITED::reset(elemCount, frameCount);
+ fType = type;
+ fStorage = sk_malloc_throw((sizeof(SkOperand) * elemCount + sizeof(SkTimeCode)) * frameCount);
+ fTimes = (SkTimeCode*) fStorage;
+ fValues = (SkOperand*) ((char*) fStorage + sizeof(SkTimeCode) * frameCount);
+#ifdef SK_DEBUG
+ fTimesArray = (SkTimeCode(*)[10]) fTimes;
+ fValuesArray = (SkOperand(*)[10]) fValues;
+#endif
+}
+
+bool SkOperandInterpolator::setKeyFrame(int index, SkMSec time, const SkOperand values[], SkScalar blend)
+{
+ SkASSERT(values != nullptr);
+ blend = SkScalarPin(blend, 0, SK_Scalar1);
+
+ bool success = ~index == SkTSearch<SkMSec>(&fTimes->fTime, index, time, sizeof(SkTimeCode));
+ SkASSERT(success);
+ if (success) {
+ SkTimeCode* timeCode = &fTimes[index];
+ timeCode->fTime = time;
+ timeCode->fBlend[0] = SK_Scalar1 - blend;
+ timeCode->fBlend[1] = 0;
+ timeCode->fBlend[2] = 0;
+ timeCode->fBlend[3] = SK_Scalar1 - blend;
+ SkOperand* dst = &fValues[fElemCount * index];
+ memcpy(dst, values, fElemCount * sizeof(SkOperand));
+ }
+ return success;
+}
+
+SkInterpolatorBase::Result SkOperandInterpolator::timeToValues(SkMSec time, SkOperand values[]) const
+{
+ SkScalar T;
+ int index;
+ bool exact;
+ Result result = timeToT(time, &T, &index, &exact);
+ if (values)
+ {
+ const SkOperand* nextSrc = &fValues[index * fElemCount];
+
+ if (exact)
+ memcpy(values, nextSrc, fElemCount * sizeof(SkScalar));
+ else
+ {
+ SkASSERT(index > 0);
+
+ const SkOperand* prevSrc = nextSrc - fElemCount;
+
+ if (fType == SkType_Float || fType == SkType_3D_Point) {
+ for (int i = fElemCount - 1; i >= 0; --i)
+ values[i].fScalar = SkScalarInterp(prevSrc[i].fScalar, nextSrc[i].fScalar, T);
+ } else if (fType == SkType_Int || fType == SkType_MSec) {
+ for (int i = fElemCount - 1; i >= 0; --i) {
+ int32_t a = prevSrc[i].fS32;
+ int32_t b = nextSrc[i].fS32;
+ values[i].fS32 = a + SkScalarRoundToInt((b - a) * T);
+ }
+ } else
+ memcpy(values, prevSrc, sizeof(SkOperand) * fElemCount);
+ }
+ }
+ return result;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+
+#ifdef SK_SUPPORT_UNITTEST
+ static SkOperand* iset(SkOperand array[3], int a, int b, int c)
+ {
+ array[0].fScalar = SkIntToScalar(a);
+ array[1].fScalar = SkIntToScalar(b);
+ array[2].fScalar = SkIntToScalar(c);
+ return array;
+ }
+#endif
+
+void SkOperandInterpolator::UnitTest()
+{
+#ifdef SK_SUPPORT_UNITTEST
+ SkOperandInterpolator inter(3, 2, SkType_Float);
+ SkOperand v1[3], v2[3], v[3], vv[3];
+ Result result;
+
+ inter.setKeyFrame(0, 100, iset(v1, 10, 20, 30), 0);
+ inter.setKeyFrame(1, 200, iset(v2, 110, 220, 330));
+
+ result = inter.timeToValues(0, v);
+ SkASSERT(result == kFreezeStart_Result);
+ SkASSERT(memcmp(v, v1, sizeof(v)) == 0);
+
+ result = inter.timeToValues(99, v);
+ SkASSERT(result == kFreezeStart_Result);
+ SkASSERT(memcmp(v, v1, sizeof(v)) == 0);
+
+ result = inter.timeToValues(100, v);
+ SkASSERT(result == kNormal_Result);
+ SkASSERT(memcmp(v, v1, sizeof(v)) == 0);
+
+ result = inter.timeToValues(200, v);
+ SkASSERT(result == kNormal_Result);
+ SkASSERT(memcmp(v, v2, sizeof(v)) == 0);
+
+ result = inter.timeToValues(201, v);
+ SkASSERT(result == kFreezeEnd_Result);
+ SkASSERT(memcmp(v, v2, sizeof(v)) == 0);
+
+ result = inter.timeToValues(150, v);
+ SkASSERT(result == kNormal_Result);
+ SkASSERT(memcmp(v, iset(vv, 60, 120, 180), sizeof(v)) == 0);
+
+ result = inter.timeToValues(125, v);
+ SkASSERT(result == kNormal_Result);
+ result = inter.timeToValues(175, v);
+ SkASSERT(result == kNormal_Result);
+#endif
+}
+
+#endif
diff --git a/gfx/skia/skia/src/animator/SkPaintPart.cpp b/gfx/skia/skia/src/animator/SkPaintPart.cpp
new file mode 100644
index 000000000..842dbc975
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkPaintPart.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPaintPart.h"
+#include "SkDrawPaint.h"
+#ifdef SK_DUMP_ENABLED
+#include "SkDisplayList.h"
+#include "SkDump.h"
+#endif
+
+SkPaintPart::SkPaintPart() : fPaint(nullptr) {
+}
+
+SkDisplayable* SkPaintPart::getParent() const {
+ return fPaint;
+}
+
+bool SkPaintPart::setParent(SkDisplayable* parent) {
+ SkASSERT(parent != nullptr);
+ if (parent->isPaint() == false)
+ return true;
+ fPaint = (SkDrawPaint*) parent;
+ return false;
+}
+
+
+// SkDrawMaskFilter
+bool SkDrawMaskFilter::add() {
+ if (fPaint->maskFilter != (SkDrawMaskFilter*) -1)
+ return true;
+ fPaint->maskFilter = this;
+ fPaint->fOwnsMaskFilter = true;
+ return false;
+}
+
+SkMaskFilter* SkDrawMaskFilter::getMaskFilter() {
+ return nullptr;
+}
+
+
+// SkDrawPathEffect
+bool SkDrawPathEffect::add() {
+ if (fPaint->isPaint()) {
+ if (fPaint->pathEffect != (SkDrawPathEffect*) -1)
+ return true;
+ fPaint->pathEffect = this;
+ fPaint->fOwnsPathEffect = true;
+ return false;
+ }
+ fPaint->add(nullptr, this);
+ return false;
+}
+
+SkPathEffect* SkDrawPathEffect::getPathEffect() {
+ return nullptr;
+}
+
+
+// SkDrawShader
+SkShader* SkDrawShader::getShader() {
+ return nullptr;
+}
+
+
+// Typeface
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDrawTypeface::fInfo[] = {
+ SK_MEMBER(fontName, String),
+ SK_MEMBER(style, FontStyle)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDrawTypeface);
+
+SkDrawTypeface::SkDrawTypeface() : style (SkTypeface::kNormal){
+}
+
+bool SkDrawTypeface::add() {
+ if (fPaint->typeface != (SkDrawTypeface*) -1)
+ return true;
+ fPaint->typeface = this;
+ fPaint->fOwnsTypeface = true;
+ return false;
+}
+
+#ifdef SK_DUMP_ENABLED
+void SkDrawTypeface::dump(SkAnimateMaker*) {
+ SkDebugf("%*s<typeface fontName=\"%s\" ", SkDisplayList::fIndent, "", fontName.c_str());
+ SkString string;
+ SkDump::GetEnumString(SkType_FontStyle, style, &string);
+ SkDebugf("style=\"%s\" />\n", string.c_str());
+}
+#endif
diff --git a/gfx/skia/skia/src/animator/SkPaintPart.h b/gfx/skia/skia/src/animator/SkPaintPart.h
new file mode 100644
index 000000000..5d94f049e
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkPaintPart.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPaintPart_DEFINED
+#define SkPaintPart_DEFINED
+
+#include "SkDisplayable.h"
+#include "SkMemberInfo.h"
+#include "SkPaint.h"
+#include "SkShader.h"
+#include "SkTypeface.h"
+#include "SkXfermode.h"
+
+class SkDrawPaint;
+class SkDrawMatrix;
+
+class SkPaintPart : public SkDisplayable {
+public:
+ SkPaintPart();
+ virtual bool add() = 0;
+ virtual SkDisplayable* getParent() const;
+ virtual bool setParent(SkDisplayable* parent);
+#ifdef SK_DEBUG
+ virtual bool isPaintPart() const { return true; }
+#endif
+protected:
+ SkDrawPaint* fPaint;
+};
+
+class SkDrawMaskFilter : public SkPaintPart {
+ DECLARE_EMPTY_MEMBER_INFO(MaskFilter);
+ virtual SkMaskFilter* getMaskFilter();
+protected:
+ bool add() override;
+};
+
+class SkDrawPathEffect : public SkPaintPart {
+ DECLARE_EMPTY_MEMBER_INFO(PathEffect);
+ virtual SkPathEffect* getPathEffect();
+protected:
+ bool add() override;
+};
+
+class SkDrawShader : public SkPaintPart {
+ DECLARE_DRAW_MEMBER_INFO(Shader);
+ SkDrawShader();
+ virtual SkShader* getShader();
+protected:
+ bool add() override;
+ SkMatrix* getMatrix(); // returns nullptr if matrix is nullptr
+ SkDrawMatrix* matrix;
+ int /*SkShader::TileMode*/ tileMode;
+};
+
+class SkDrawTypeface : public SkPaintPart {
+ DECLARE_DRAW_MEMBER_INFO(Typeface);
+ SkDrawTypeface();
+#ifdef SK_DUMP_ENABLED
+ void dump(SkAnimateMaker *) override;
+#endif
+ sk_sp<SkTypeface> getTypeface() { return SkTypeface::MakeFromName(fontName.c_str(), style); }
+protected:
+ bool add() override;
+ SkString fontName;
+ SkTypeface::Style style;
+};
+
+#endif // SkPaintPart_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkParseSVGPath.cpp b/gfx/skia/skia/src/animator/SkParseSVGPath.cpp
new file mode 100644
index 000000000..7050f7c1e
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkParseSVGPath.cpp
@@ -0,0 +1,234 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include <ctype.h>
+#include "SkDrawPath.h"
+#include "SkParse.h"
+#include "SkPoint.h"
+#include "SkUtils.h"
+#define QUADRATIC_APPROXIMATION 1
+
+#if QUADRATIC_APPROXIMATION
+////////////////////////////////////////////////////////////////////////////////////
+//functions to approximate a cubic using two quadratics
+
+// midPt sets the first argument to be the midpoint of the other two
+// it is used by quadApprox
+static inline void midPt(SkPoint& dest,const SkPoint& a,const SkPoint& b)
+{
+ dest.set(SkScalarAve(a.fX, b.fX),SkScalarAve(a.fY, b.fY));
+}
+// quadApprox - makes an approximation, which we hope is faster
+static void quadApprox(SkPath &fPath, const SkPoint &p0, const SkPoint &p1, const SkPoint &p2)
+{
+ //divide the cubic up into two cubics, then convert them into quadratics
+ //define our points
+ SkPoint c,j,k,l,m,n,o,p,q, mid;
+ fPath.getLastPt(&c);
+ midPt(j, p0, c);
+ midPt(k, p0, p1);
+ midPt(l, p1, p2);
+ midPt(o, j, k);
+ midPt(p, k, l);
+ midPt(q, o, p);
+ //compute the first half
+ m.set(SkScalarHalf(3*j.fX - c.fX), SkScalarHalf(3*j.fY - c.fY));
+ n.set(SkScalarHalf(3*o.fX -q.fX), SkScalarHalf(3*o.fY - q.fY));
+ midPt(mid,m,n);
+ fPath.quadTo(mid,q);
+ c = q;
+ //compute the second half
+ m.set(SkScalarHalf(3*p.fX - c.fX), SkScalarHalf(3*p.fY - c.fY));
+ n.set(SkScalarHalf(3*l.fX -p2.fX),SkScalarHalf(3*l.fY -p2.fY));
+ midPt(mid,m,n);
+ fPath.quadTo(mid,p2);
+}
+#endif
+
+
+static inline bool is_between(int c, int min, int max)
+{
+ return (unsigned)(c - min) <= (unsigned)(max - min);
+}
+
+static inline bool is_ws(int c)
+{
+ return is_between(c, 1, 32);
+}
+
+static inline bool is_digit(int c)
+{
+ return is_between(c, '0', '9');
+}
+
+static inline bool is_sep(int c)
+{
+ return is_ws(c) || c == ',';
+}
+
+static const char* skip_ws(const char str[])
+{
+ SkASSERT(str);
+ while (is_ws(*str))
+ str++;
+ return str;
+}
+
+static const char* skip_sep(const char str[])
+{
+ SkASSERT(str);
+ while (is_sep(*str))
+ str++;
+ return str;
+}
+
+static const char* find_points(const char str[], SkPoint value[], int count,
+ bool isRelative, SkPoint* relative)
+{
+ str = SkParse::FindScalars(str, &value[0].fX, count * 2);
+ if (isRelative) {
+ for (int index = 0; index < count; index++) {
+ value[index].fX += relative->fX;
+ value[index].fY += relative->fY;
+ }
+ }
+ return str;
+}
+
+static const char* find_scalar(const char str[], SkScalar* value,
+ bool isRelative, SkScalar relative)
+{
+ str = SkParse::FindScalar(str, value);
+ if (isRelative)
+ *value += relative;
+ return str;
+}
+
+void SkDrawPath::parseSVG() {
+ fPath.reset();
+ const char* data = d.c_str();
+ SkPoint f = {0, 0};
+ SkPoint c = {0, 0};
+ SkPoint lastc = {0, 0};
+ SkPoint points[3];
+ char op = '\0';
+ char previousOp = '\0';
+ bool relative = false;
+ do {
+ data = skip_ws(data);
+ if (data[0] == '\0')
+ break;
+ char ch = data[0];
+ if (is_digit(ch) || ch == '-' || ch == '+') {
+ if (op == '\0')
+ return;
+ }
+ else {
+ op = ch;
+ relative = false;
+ if (islower(op)) {
+ op = (char) toupper(op);
+ relative = true;
+ }
+ data++;
+ data = skip_sep(data);
+ }
+ switch (op) {
+ case 'M':
+ data = find_points(data, points, 1, relative, &c);
+ fPath.moveTo(points[0]);
+ op = 'L';
+ c = points[0];
+ break;
+ case 'L':
+ data = find_points(data, points, 1, relative, &c);
+ fPath.lineTo(points[0]);
+ c = points[0];
+ break;
+ case 'H': {
+ SkScalar x;
+ data = find_scalar(data, &x, relative, c.fX);
+ fPath.lineTo(x, c.fY);
+ c.fX = x;
+ }
+ break;
+ case 'V': {
+ SkScalar y;
+ data = find_scalar(data, &y, relative, c.fY);
+ fPath.lineTo(c.fX, y);
+ c.fY = y;
+ }
+ break;
+ case 'C':
+ data = find_points(data, points, 3, relative, &c);
+ goto cubicCommon;
+ case 'S':
+ data = find_points(data, &points[1], 2, relative, &c);
+ points[0] = c;
+ if (previousOp == 'C' || previousOp == 'S') {
+ points[0].fX -= lastc.fX - c.fX;
+ points[0].fY -= lastc.fY - c.fY;
+ }
+ cubicCommon:
+ // if (data[0] == '\0')
+ // return;
+#if QUADRATIC_APPROXIMATION
+ quadApprox(fPath, points[0], points[1], points[2]);
+#else //this way just does a boring, slow old cubic
+ fPath.cubicTo(points[0], points[1], points[2]);
+#endif
+ //if we are using the quadApprox, lastc is what it would have been if we had used
+ //cubicTo
+ lastc = points[1];
+ c = points[2];
+ break;
+ case 'Q': // Quadratic Bezier Curve
+ data = find_points(data, points, 2, relative, &c);
+ goto quadraticCommon;
+ case 'T':
+ data = find_points(data, &points[1], 1, relative, &c);
+ points[0] = points[1];
+ if (previousOp == 'Q' || previousOp == 'T') {
+ points[0].fX = c.fX * 2 - lastc.fX;
+ points[0].fY = c.fY * 2 - lastc.fY;
+ }
+ quadraticCommon:
+ fPath.quadTo(points[0], points[1]);
+ lastc = points[0];
+ c = points[1];
+ break;
+ case 'Z':
+ fPath.close();
+#if 0 // !!! still a bug?
+ if (fPath.isEmpty() && (f.fX != 0 || f.fY != 0)) {
+ c.fX -= SkScalar.Epsilon; // !!! enough?
+ fPath.moveTo(c);
+ fPath.lineTo(f);
+ fPath.close();
+ }
+#endif
+ c = f;
+ op = '\0';
+ break;
+ case '~': {
+ SkPoint args[2];
+ data = find_points(data, args, 2, false, nullptr);
+ fPath.moveTo(args[0].fX, args[0].fY);
+ fPath.lineTo(args[1].fX, args[1].fY);
+ }
+ break;
+ default:
+ SkASSERT(0);
+ return;
+ }
+ if (previousOp == 0)
+ f = c;
+ previousOp = op;
+ } while (data[0] > 0);
+}
diff --git a/gfx/skia/skia/src/animator/SkPathParts.cpp b/gfx/skia/skia/src/animator/SkPathParts.cpp
new file mode 100644
index 000000000..b5407ceeb
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkPathParts.cpp
@@ -0,0 +1,318 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkPathParts.h"
+#include "SkAnimateMaker.h"
+#include "SkDrawMatrix.h"
+#include "SkDrawRectangle.h"
+#include "SkDrawPath.h"
+
+SkPathPart::SkPathPart() : fPath(nullptr) {
+}
+
+void SkPathPart::dirty() {
+ fPath->dirty();
+}
+
+SkDisplayable* SkPathPart::getParent() const {
+ return fPath;
+}
+
+bool SkPathPart::setParent(SkDisplayable* parent) {
+ SkASSERT(parent != nullptr);
+ if (parent->isPath() == false)
+ return true;
+ fPath = (SkDrawPath*) parent;
+ return false;
+}
+
+// MoveTo
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkMoveTo::fInfo[] = {
+ SK_MEMBER(x, Float),
+ SK_MEMBER(y, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkMoveTo);
+
+SkMoveTo::SkMoveTo() : x(0), y(0) {
+}
+
+bool SkMoveTo::add() {
+ fPath->fPath.moveTo(x, y);
+ return false;
+}
+
+
+// RMoveTo
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkRMoveTo::fInfo[] = {
+ SK_MEMBER_INHERITED
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkRMoveTo);
+
+bool SkRMoveTo::add() {
+ fPath->fPath.rMoveTo(x, y);
+ return false;
+}
+
+
+// LineTo
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkLineTo::fInfo[] = {
+ SK_MEMBER(x, Float),
+ SK_MEMBER(y, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkLineTo);
+
+SkLineTo::SkLineTo() : x(0), y(0) {
+}
+
+bool SkLineTo::add() {
+ fPath->fPath.lineTo(x, y);
+ return false;
+}
+
+
+// RLineTo
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkRLineTo::fInfo[] = {
+ SK_MEMBER_INHERITED
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkRLineTo);
+
+bool SkRLineTo::add() {
+ fPath->fPath.rLineTo(x, y);
+ return false;
+}
+
+
+// QuadTo
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkQuadTo::fInfo[] = {
+ SK_MEMBER(x1, Float),
+ SK_MEMBER(x2, Float),
+ SK_MEMBER(y1, Float),
+ SK_MEMBER(y2, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkQuadTo);
+
+SkQuadTo::SkQuadTo() : x1(0), y1(0), x2(0), y2(0) {
+}
+
+bool SkQuadTo::add() {
+ fPath->fPath.quadTo(x1, y1, x2, y2);
+ return false;
+}
+
+
+// RQuadTo
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkRQuadTo::fInfo[] = {
+ SK_MEMBER_INHERITED
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkRQuadTo);
+
+bool SkRQuadTo::add() {
+ fPath->fPath.rQuadTo(x1, y1, x2, y2);
+ return false;
+}
+
+
+// CubicTo
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkCubicTo::fInfo[] = {
+ SK_MEMBER(x1, Float),
+ SK_MEMBER(x2, Float),
+ SK_MEMBER(x3, Float),
+ SK_MEMBER(y1, Float),
+ SK_MEMBER(y2, Float),
+ SK_MEMBER(y3, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkCubicTo);
+
+SkCubicTo::SkCubicTo() : x1(0), y1(0), x2(0), y2(0), x3(0), y3(0) {
+}
+
+bool SkCubicTo::add() {
+ fPath->fPath.cubicTo(x1, y1, x2, y2, x3, y3);
+ return false;
+}
+
+
+// RCubicTo
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkRCubicTo::fInfo[] = {
+ SK_MEMBER_INHERITED
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkRCubicTo);
+
+bool SkRCubicTo::add() {
+ fPath->fPath.rCubicTo(x1, y1, x2, y2, x3, y3);
+ return false;
+}
+
+
+// SkClose
+bool SkClose::add() {
+ fPath->fPath.close();
+ return false;
+}
+
+
+// SkAddGeom
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkAddGeom::fInfo[] = {
+ SK_MEMBER(direction, PathDirection)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkAddGeom);
+
+SkAddGeom::SkAddGeom() : direction(SkPath::kCCW_Direction) {
+}
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkAddRect::fInfo[] = {
+ SK_MEMBER_INHERITED,
+ SK_MEMBER_ALIAS(bottom, fRect.fBottom, Float),
+ SK_MEMBER_ALIAS(left, fRect.fLeft, Float),
+ SK_MEMBER_ALIAS(right, fRect.fRight, Float),
+ SK_MEMBER_ALIAS(top, fRect.fTop, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkAddRect);
+
+SkAddRect::SkAddRect() {
+ fRect.setEmpty();
+}
+
+bool SkAddRect::add() {
+ fPath->fPath.addRect(fRect, (SkPath::Direction) direction);
+ return false;
+}
+
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkAddOval::fInfo[] = {
+ SK_MEMBER_INHERITED
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkAddOval);
+
+bool SkAddOval::add() {
+ fPath->fPath.addOval(fRect, (SkPath::Direction) direction);
+ return false;
+}
+
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkAddCircle::fInfo[] = {
+ SK_MEMBER_INHERITED,
+ SK_MEMBER(radius, Float),
+ SK_MEMBER(x, Float),
+ SK_MEMBER(y, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkAddCircle);
+
+SkAddCircle::SkAddCircle() : radius(0), x(0), y(0) {
+}
+
+bool SkAddCircle::add() {
+ fPath->fPath.addCircle(x, y, radius, (SkPath::Direction) direction);
+ return false;
+}
+
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkAddRoundRect::fInfo[] = {
+ SK_MEMBER_INHERITED,
+ SK_MEMBER(rx, Float),
+ SK_MEMBER(ry, Float)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkAddRoundRect);
+
+SkAddRoundRect::SkAddRoundRect() : rx(0), ry(0) {
+}
+
+bool SkAddRoundRect::add() {
+ fPath->fPath.addRoundRect(fRect, rx, ry, (SkPath::Direction) direction);
+ return false;
+}
+
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkAddPath::fInfo[] = {
+ SK_MEMBER(matrix, Matrix),
+ SK_MEMBER(path, Path)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkAddPath);
+
+SkAddPath::SkAddPath() : matrix(nullptr), path(nullptr) {
+}
+
+bool SkAddPath::add() {
+ SkASSERT (path != nullptr);
+ if (matrix)
+ fPath->fPath.addPath(path->fPath, matrix->getMatrix());
+ else
+ fPath->fPath.addPath(path->fPath);
+ return false;
+}
diff --git a/gfx/skia/skia/src/animator/SkPathParts.h b/gfx/skia/skia/src/animator/SkPathParts.h
new file mode 100644
index 000000000..afa7b662d
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkPathParts.h
@@ -0,0 +1,164 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPathParts_DEFINED
+#define SkPathParts_DEFINED
+
+#include "SkDisplayable.h"
+#include "SkMemberInfo.h"
+#include "SkPath.h"
+
+class SkDrawPath;
+class SkDrawMatrix;
+
+class SkPathPart : public SkDisplayable {
+public:
+ SkPathPart();
+ virtual bool add() = 0;
+ virtual void dirty();
+ virtual SkDisplayable* getParent() const;
+ virtual bool setParent(SkDisplayable* parent);
+#ifdef SK_DEBUG
+ virtual bool isPathPart() const { return true; }
+#endif
+protected:
+ SkDrawPath* fPath;
+};
+
+class SkMoveTo : public SkPathPart {
+ DECLARE_MEMBER_INFO(MoveTo);
+ SkMoveTo();
+ bool add() override;
+protected:
+ SkScalar x;
+ SkScalar y;
+};
+
+class SkRMoveTo : public SkMoveTo {
+ DECLARE_MEMBER_INFO(RMoveTo);
+ bool add() override;
+private:
+ typedef SkMoveTo INHERITED;
+};
+
+class SkLineTo : public SkPathPart {
+ DECLARE_MEMBER_INFO(LineTo);
+ SkLineTo();
+ bool add() override;
+protected:
+ SkScalar x;
+ SkScalar y;
+};
+
+class SkRLineTo : public SkLineTo {
+ DECLARE_MEMBER_INFO(RLineTo);
+ bool add() override;
+private:
+ typedef SkLineTo INHERITED;
+};
+
+class SkQuadTo : public SkPathPart {
+ DECLARE_MEMBER_INFO(QuadTo);
+ SkQuadTo();
+ bool add() override;
+protected:
+ SkScalar x1;
+ SkScalar y1;
+ SkScalar x2;
+ SkScalar y2;
+};
+
+class SkRQuadTo : public SkQuadTo {
+ DECLARE_MEMBER_INFO(RQuadTo);
+ bool add() override;
+private:
+ typedef SkQuadTo INHERITED;
+};
+
+class SkCubicTo : public SkPathPart {
+ DECLARE_MEMBER_INFO(CubicTo);
+ SkCubicTo();
+ bool add() override;
+protected:
+ SkScalar x1;
+ SkScalar y1;
+ SkScalar x2;
+ SkScalar y2;
+ SkScalar x3;
+ SkScalar y3;
+};
+
+class SkRCubicTo : public SkCubicTo {
+ DECLARE_MEMBER_INFO(RCubicTo);
+ bool add() override;
+private:
+ typedef SkCubicTo INHERITED;
+};
+
+class SkClose : public SkPathPart {
+ DECLARE_EMPTY_MEMBER_INFO(Close);
+ bool add() override;
+};
+
+class SkAddGeom : public SkPathPart {
+ DECLARE_PRIVATE_MEMBER_INFO(AddGeom);
+ SkAddGeom();
+protected:
+ int /*SkPath::Direction*/ direction;
+};
+
+class SkAddRect : public SkAddGeom {
+ DECLARE_MEMBER_INFO(AddRect);
+ SkAddRect();
+ bool add() override;
+protected:
+ SkRect fRect;
+private:
+ typedef SkAddGeom INHERITED;
+};
+
+class SkAddOval : public SkAddRect {
+ DECLARE_MEMBER_INFO(AddOval);
+ bool add() override;
+private:
+ typedef SkAddRect INHERITED;
+};
+
+class SkAddCircle : public SkAddGeom {
+ DECLARE_MEMBER_INFO(AddCircle);
+ SkAddCircle();
+ bool add() override;
+private:
+ SkScalar radius;
+ SkScalar x;
+ SkScalar y;
+ typedef SkAddGeom INHERITED;
+};
+
+class SkAddRoundRect : public SkAddRect {
+ DECLARE_MEMBER_INFO(AddRoundRect);
+ SkAddRoundRect();
+ bool add() override;
+private:
+ SkScalar rx;
+ SkScalar ry;
+ typedef SkAddRect INHERITED;
+};
+
+class SkAddPath : public SkPathPart {
+ DECLARE_MEMBER_INFO(AddPath);
+ SkAddPath();
+ bool add() override;
+private:
+ typedef SkPathPart INHERITED;
+ SkDrawMatrix* matrix;
+ SkDrawPath* path;
+};
+
+#endif // SkPathParts_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkPostParts.cpp b/gfx/skia/skia/src/animator/SkPostParts.cpp
new file mode 100644
index 000000000..bfd538553
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkPostParts.cpp
@@ -0,0 +1,56 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkPostParts.h"
+#include "SkDisplayPost.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkDataInput::fInfo[] = {
+ SK_MEMBER_INHERITED
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkDataInput);
+
+SkDataInput::SkDataInput() : fParent(nullptr) {}
+
+bool SkDataInput::add() {
+ SkASSERT(name.size() > 0);
+ const char* dataName = name.c_str();
+ if (fInt != (int) SK_NaN32)
+ fParent->fEvent.setS32(dataName, fInt);
+ else if (SkScalarIsNaN(fFloat) == false)
+ fParent->fEvent.setScalar(dataName, fFloat);
+ else if (string.size() > 0)
+ fParent->fEvent.setString(dataName, string);
+// else
+// SkASSERT(0);
+ return false;
+}
+
+void SkDataInput::dirty() {
+ fParent->dirty();
+}
+
+SkDisplayable* SkDataInput::getParent() const {
+ return fParent;
+}
+
+bool SkDataInput::setParent(SkDisplayable* displayable) {
+ if (displayable->isPost() == false)
+ return true;
+ fParent = (SkPost*) displayable;
+ return false;
+}
+
+void SkDataInput::onEndElement(SkAnimateMaker&) {
+ add();
+}
diff --git a/gfx/skia/skia/src/animator/SkPostParts.h b/gfx/skia/skia/src/animator/SkPostParts.h
new file mode 100644
index 000000000..fb2845be6
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkPostParts.h
@@ -0,0 +1,31 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPostParts_DEFINED
+#define SkPostParts_DEFINED
+
+#include "SkDisplayInput.h"
+
+class SkPost;
+
+class SkDataInput: public SkInput {
+ DECLARE_MEMBER_INFO(DataInput);
+ SkDataInput();
+ bool add();
+ void dirty() override;
+ SkDisplayable* getParent() const override;
+ void onEndElement(SkAnimateMaker& ) override;
+ bool setParent(SkDisplayable* ) override;
+protected:
+ SkPost* fParent;
+ typedef SkInput INHERITED;
+ friend class SkPost;
+};
+
+#endif // SkPostParts_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkScript.cpp b/gfx/skia/skia/src/animator/SkScript.cpp
new file mode 100644
index 000000000..cd864146f
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkScript.cpp
@@ -0,0 +1,1890 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkScript.h"
+#include "SkMath.h"
+#include "SkParse.h"
+#include "SkString.h"
+#include "SkTypedArray.h"
+
+/* things to do
+ ? re-enable support for struct literals (e.g., for initializing points or rects)
+ {x:1, y:2}
+ ? use standard XML / script notation like document.getElementById("canvas");
+ finish support for typed arrays
+ ? allow indexing arrays by string
+ this could map to the 'name' attribute of a given child of an array
+ ? allow multiple types in the array
+ remove SkDisplayType.h // from SkOperand.h
+ merge type and operand arrays into scriptvalue array
+*/
+
+#ifdef SK_DEBUG
+static const char* errorStrings[] = {
+ "array index of out bounds", // kArrayIndexOutOfBounds
+ "could not find reference id", // kCouldNotFindReferencedID
+ "dot operator expects object", // kDotOperatorExpectsObject
+ "error in array index", // kErrorInArrrayIndex
+ "error in function parameters", // kErrorInFunctionParameters
+ "expected array", // kExpectedArray
+ "expected boolean expression", // kExpectedBooleanExpression
+ "expected field name", // kExpectedFieldName
+ "expected hex", // kExpectedHex
+ "expected int for condition operator", // kExpectedIntForConditionOperator
+ "expected number", // kExpectedNumber
+ "expected number for array index", // kExpectedNumberForArrayIndex
+ "expected operator", // kExpectedOperator
+ "expected token", // kExpectedToken
+ "expected token before dot operator", // kExpectedTokenBeforeDotOperator
+ "expected value", // kExpectedValue
+ "handle member failed", // kHandleMemberFailed
+ "handle member function failed", // kHandleMemberFunctionFailed
+ "handle unbox failed", // kHandleUnboxFailed
+ "index out of range", // kIndexOutOfRange
+ "mismatched array brace", // kMismatchedArrayBrace
+ "mismatched brackets", // kMismatchedBrackets
+ "no function handler found", // kNoFunctionHandlerFound
+ "premature end", // kPrematureEnd
+ "too many parameters", // kTooManyParameters
+ "type conversion failed", // kTypeConversionFailed
+ "unterminated string" // kUnterminatedString
+};
+#endif
+
+const SkScriptEngine::SkOperatorAttributes SkScriptEngine::gOpAttributes[] = {
+ { kNoType, kNoType, kNoBias }, // kUnassigned,
+ { SkOpType(kInt | kScalar | kString), SkOpType(kInt | kScalar | kString), kTowardsString }, // kAdd
+ // kAddInt = kAdd,
+ { kNoType, kNoType, kNoBias }, // kAddScalar,
+ { kNoType, kNoType, kNoBias }, // kAddString,
+ { kNoType, kNoType, kNoBias }, // kArrayOp,
+ { kInt, kInt, kNoBias }, // kBitAnd
+ { kNoType, kInt, kNoBias }, // kBitNot
+ { kInt, kInt, kNoBias }, // kBitOr
+ { SkOpType(kInt | kScalar), SkOpType(kInt | kScalar), kNoBias }, // kDivide
+ // kDivideInt = kDivide
+ { kNoType, kNoType, kNoBias }, // kDivideScalar
+ { kNoType, kNoType, kNoBias }, // kElse
+ { SkOpType(kInt | kScalar | kString), SkOpType(kInt | kScalar | kString), kTowardsNumber }, // kEqual
+ // kEqualInt = kEqual
+ { kNoType, kNoType, kNoBias }, // kEqualScalar
+ { kNoType, kNoType, kNoBias }, // kEqualString
+ { kInt, kNoType, kNoBias }, // kFlipOps
+ { SkOpType(kInt | kScalar | kString), SkOpType(kInt | kScalar | kString), kTowardsNumber }, // kGreaterEqual
+ // kGreaterEqualInt = kGreaterEqual
+ { kNoType, kNoType, kNoBias }, // kGreaterEqualScalar
+ { kNoType, kNoType, kNoBias }, // kGreaterEqualString
+ { kNoType, kNoType, kNoBias }, // kIf
+ { kNoType, kInt, kNoBias }, // kLogicalAnd (really, ToBool)
+ { kNoType, kInt, kNoBias }, // kLogicalNot
+ { kInt, kInt, kNoBias }, // kLogicalOr
+ { kNoType, SkOpType(kInt | kScalar), kNoBias }, // kMinus
+ // kMinusInt = kMinus
+ { kNoType, kNoType, kNoBias }, // kMinusScalar
+ { SkOpType(kInt | kScalar), SkOpType(kInt | kScalar), kNoBias }, // kModulo
+ // kModuloInt = kModulo
+ { kNoType, kNoType, kNoBias }, // kModuloScalar
+ { SkOpType(kInt | kScalar), SkOpType(kInt | kScalar), kNoBias }, // kMultiply
+ // kMultiplyInt = kMultiply
+ { kNoType, kNoType, kNoBias }, // kMultiplyScalar
+ { kNoType, kNoType, kNoBias }, // kParen
+ { kInt, kInt, kNoBias }, // kShiftLeft
+ { kInt, kInt, kNoBias }, // kShiftRight
+ { SkOpType(kInt | kScalar), SkOpType(kInt | kScalar), kNoBias }, // kSubtract
+ // kSubtractInt = kSubtract
+ { kNoType, kNoType, kNoBias }, // kSubtractScalar
+ { kInt, kInt, kNoBias } // kXor
+};
+
+// Note that the real precedence for () [] is '2'
+// but here, precedence means 'while an equal or smaller precedence than the current operator
+// is on the stack, process it. This allows 3+5*2 to defer the add until after the multiply
+// is preformed, since the add precedence is not smaller than multiply.
+// But, (3*4 does not process the '(', since brackets are greater than all other precedences
+#define kBracketPrecedence 16
+#define kIfElsePrecedence 15
+
+const signed char SkScriptEngine::gPrecedence[] = {
+ -1, // kUnassigned,
+ 6, // kAdd,
+ // kAddInt = kAdd,
+ 6, // kAddScalar,
+ 6, // kAddString, // string concat
+ kBracketPrecedence, // kArrayOp,
+ 10, // kBitAnd,
+ 4, // kBitNot,
+ 12, // kBitOr,
+ 5, // kDivide,
+ // kDivideInt = kDivide,
+ 5, // kDivideScalar,
+ kIfElsePrecedence, // kElse,
+ 9, // kEqual,
+ // kEqualInt = kEqual,
+ 9, // kEqualScalar,
+ 9, // kEqualString,
+ -1, // kFlipOps,
+ 8, // kGreaterEqual,
+ // kGreaterEqualInt = kGreaterEqual,
+ 8, // kGreaterEqualScalar,
+ 8, // kGreaterEqualString,
+ kIfElsePrecedence, // kIf,
+ 13, // kLogicalAnd,
+ 4, // kLogicalNot,
+ 14, // kLogicalOr,
+ 4, // kMinus,
+ // kMinusInt = kMinus,
+ 4, // kMinusScalar,
+ 5, // kModulo,
+ // kModuloInt = kModulo,
+ 5, // kModuloScalar,
+ 5, // kMultiply,
+ // kMultiplyInt = kMultiply,
+ 5, // kMultiplyScalar,
+ kBracketPrecedence, // kParen,
+ 7, // kShiftLeft,
+ 7, // kShiftRight, // signed
+ 6, // kSubtract,
+ // kSubtractInt = kSubtract,
+ 6, // kSubtractScalar,
+ 11, // kXor
+};
+
+static inline bool is_between(int c, int min, int max)
+{
+ return (unsigned)(c - min) <= (unsigned)(max - min);
+}
+
+static inline bool is_ws(int c)
+{
+ return is_between(c, 1, 32);
+}
+
+static int token_length(const char* start) {
+ char ch = start[0];
+ if (! is_between(ch, 'a' , 'z') && ! is_between(ch, 'A', 'Z') && ch != '_' && ch != '$')
+ return -1;
+ int length = 0;
+ do
+ ch = start[++length];
+ while (is_between(ch, 'a' , 'z') || is_between(ch, 'A', 'Z') || is_between(ch, '0', '9') ||
+ ch == '_' || ch == '$');
+ return length;
+}
+
+SkScriptEngine::SkScriptEngine(SkOpType returnType) :
+ fTokenLength(0), fReturnType(returnType), fError(kNoError)
+{
+ SkSuppress noInitialSuppress;
+ noInitialSuppress.fOperator = kUnassigned;
+ noInitialSuppress.fOpStackDepth = 0;
+ noInitialSuppress.fSuppress = false;
+ noInitialSuppress.fElse = 0;
+ fSuppressStack.push(noInitialSuppress);
+ *fOpStack.push() = kParen;
+ fTrackArray.appendClear();
+ fTrackString.appendClear();
+}
+
+SkScriptEngine::~SkScriptEngine() {
+ for (SkString** stringPtr = fTrackString.begin(); stringPtr < fTrackString.end(); stringPtr++)
+ delete *stringPtr;
+ for (SkTypedArray** arrayPtr = fTrackArray.begin(); arrayPtr < fTrackArray.end(); arrayPtr++)
+ delete *arrayPtr;
+}
+
+int SkScriptEngine::arithmeticOp(char ch, char nextChar, bool lastPush) {
+ SkOp op = kUnassigned;
+ bool reverseOperands = false;
+ bool negateResult = false;
+ int advance = 1;
+ switch (ch) {
+ case '+':
+ // !!! ignoring unary plus as implemented here has the side effect of
+ // suppressing errors like +"hi"
+ if (lastPush == false) // unary plus, don't push an operator
+ goto returnAdv;
+ op = kAdd;
+ break;
+ case '-':
+ op = lastPush ? kSubtract : kMinus;
+ break;
+ case '*':
+ op = kMultiply;
+ break;
+ case '/':
+ op = kDivide;
+ break;
+ case '>':
+ if (nextChar == '>') {
+ op = kShiftRight;
+ goto twoChar;
+ }
+ op = kGreaterEqual;
+ if (nextChar == '=')
+ goto twoChar;
+ reverseOperands = negateResult = true;
+ break;
+ case '<':
+ if (nextChar == '<') {
+ op = kShiftLeft;
+ goto twoChar;
+ }
+ op = kGreaterEqual;
+ reverseOperands = nextChar == '=';
+ negateResult = ! reverseOperands;
+ advance += reverseOperands;
+ break;
+ case '=':
+ if (nextChar == '=') {
+ op = kEqual;
+ goto twoChar;
+ }
+ break;
+ case '!':
+ if (nextChar == '=') {
+ op = kEqual;
+ negateResult = true;
+twoChar:
+ advance++;
+ break;
+ }
+ op = kLogicalNot;
+ break;
+ case '?':
+ op = kIf;
+ break;
+ case ':':
+ op = kElse;
+ break;
+ case '^':
+ op = kXor;
+ break;
+ case '(':
+ *fOpStack.push() = kParen; // push even if eval is suppressed
+ goto returnAdv;
+ case '&':
+ SkASSERT(nextChar != '&');
+ op = kBitAnd;
+ break;
+ case '|':
+ SkASSERT(nextChar != '|');
+ op = kBitOr;
+ break;
+ case '%':
+ op = kModulo;
+ break;
+ case '~':
+ op = kBitNot;
+ break;
+ }
+ if (op == kUnassigned)
+ return 0;
+ if (fSuppressStack.top().fSuppress == false) {
+ signed char precedence = gPrecedence[op];
+ do {
+ int idx = 0;
+ SkOp compare;
+ do {
+ compare = fOpStack.index(idx);
+ if ((compare & kArtificialOp) == 0)
+ break;
+ idx++;
+ } while (true);
+ signed char topPrecedence = gPrecedence[compare];
+ SkASSERT(topPrecedence != -1);
+ if (topPrecedence > precedence || (topPrecedence == precedence &&
+ gOpAttributes[op].fLeftType == kNoType)) {
+ break;
+ }
+ if (processOp() == false)
+ return 0; // error
+ } while (true);
+ if (negateResult)
+ *fOpStack.push() = (SkOp) (kLogicalNot | kArtificialOp);
+ fOpStack.push(op);
+ if (reverseOperands)
+ *fOpStack.push() = (SkOp) (kFlipOps | kArtificialOp);
+ }
+returnAdv:
+ return advance;
+}
+
+void SkScriptEngine::boxCallBack(_boxCallBack func, void* userStorage) {
+ UserCallBack callBack;
+ callBack.fBoxCallBack = func;
+ commonCallBack(kBox, callBack, userStorage);
+}
+
+void SkScriptEngine::commonCallBack(CallBackType type, UserCallBack& callBack, void* userStorage) {
+ callBack.fCallBackType = type;
+ callBack.fUserStorage = userStorage;
+ *fUserCallBacks.prepend() = callBack;
+}
+
+bool SkScriptEngine::convertParams(SkTDArray<SkScriptValue>& params,
+ const SkFunctionParamType* paramTypes, int paramCount) {
+ if (params.count() > paramCount) {
+ fError = kTooManyParameters;
+ return false; // too many parameters passed
+ }
+ for (int index = 0; index < params.count(); index++) {
+ if (convertTo((SkDisplayTypes) paramTypes[index], &params[index]) == false)
+ return false;
+ }
+ return true;
+}
+
+bool SkScriptEngine::convertTo(SkDisplayTypes toType, SkScriptValue* value ) {
+ SkDisplayTypes type = value->fType;
+ if (type == toType)
+ return true;
+ if (ToOpType(type) == kObject) {
+#if 0 // !!! I want object->string to get string from displaystringtype, not id
+ if (ToOpType(toType) == kString) {
+ bool success = handleObjectToString(value->fOperand.fObject);
+ if (success == false)
+ return false;
+ SkOpType type;
+ fTypeStack.pop(&type);
+ value->fType = ToDisplayType(type);
+ fOperandStack.pop(&value->fOperand);
+ return true;
+ }
+#endif
+ if (handleUnbox(value) == false) {
+ fError = kHandleUnboxFailed;
+ return false;
+ }
+ return convertTo(toType, value);
+ }
+ return ConvertTo(this, toType, value);
+}
+
+bool SkScriptEngine::evaluateDot(const char*& script, bool suppressed) {
+ size_t fieldLength = token_length(++script); // skip dot
+ if (fieldLength == 0) {
+ fError = kExpectedFieldName;
+ return false;
+ }
+ const char* field = script;
+ script += fieldLength;
+ bool success = handleProperty(suppressed);
+ if (success == false) {
+ fError = kCouldNotFindReferencedID; // note: never generated by standard animator plugins
+ return false;
+ }
+ return evaluateDotParam(script, suppressed, field, fieldLength);
+}
+
+bool SkScriptEngine::evaluateDotParam(const char*& script, bool suppressed,
+ const char* field, size_t fieldLength) {
+ void* object;
+ if (suppressed)
+ object = nullptr;
+ else {
+ if (fTypeStack.top() != kObject) {
+ fError = kDotOperatorExpectsObject;
+ return false;
+ }
+ object = fOperandStack.top().fObject;
+ fTypeStack.pop();
+ fOperandStack.pop();
+ }
+ char ch; // see if it is a simple member or a function
+ while (is_ws(ch = script[0]))
+ script++;
+ bool success = true;
+ if (ch != '(') {
+ if (suppressed == false) {
+ if ((success = handleMember(field, fieldLength, object)) == false)
+ fError = kHandleMemberFailed;
+ }
+ } else {
+ SkTDArray<SkScriptValue> params;
+ *fBraceStack.push() = kFunctionBrace;
+ success = functionParams(&script, params);
+ if (success && suppressed == false &&
+ (success = handleMemberFunction(field, fieldLength, object, params)) == false)
+ fError = kHandleMemberFunctionFailed;
+ }
+ return success;
+}
+
+bool SkScriptEngine::evaluateScript(const char** scriptPtr, SkScriptValue* value) {
+#ifdef SK_DEBUG
+ const char** original = scriptPtr;
+#endif
+ bool success;
+ const char* inner;
+ if (strncmp(*scriptPtr, "#script:", sizeof("#script:") - 1) == 0) {
+ *scriptPtr += sizeof("#script:") - 1;
+ if (fReturnType == kNoType || fReturnType == kString) {
+ success = innerScript(scriptPtr, value);
+ if (success == false)
+ goto end;
+ inner = value->fOperand.fString->c_str();
+ scriptPtr = &inner;
+ }
+ }
+ {
+ success = innerScript(scriptPtr, value);
+ if (success == false)
+ goto end;
+ const char* script = *scriptPtr;
+ char ch;
+ while (is_ws(ch = script[0]))
+ script++;
+ if (ch != '\0') {
+ // error may trigger on scripts like "50,0" that were intended to be written as "[50, 0]"
+ fError = kPrematureEnd;
+ success = false;
+ }
+ }
+end:
+#ifdef SK_DEBUG
+ if (success == false) {
+ SkDebugf("script failed: %s", *original);
+ if (fError)
+ SkDebugf(" %s", errorStrings[fError - 1]);
+ SkDebugf("\n");
+ }
+#endif
+ return success;
+}
+
+void SkScriptEngine::forget(SkTypedArray* array) {
+ if (array->getType() == SkType_String) {
+ for (int index = 0; index < array->count(); index++) {
+ SkString* string = (*array)[index].fString;
+ int found = fTrackString.find(string);
+ if (found >= 0)
+ fTrackString.remove(found);
+ }
+ return;
+ }
+ if (array->getType() == SkType_Array) {
+ for (int index = 0; index < array->count(); index++) {
+ SkTypedArray* child = (*array)[index].fArray;
+ forget(child); // forgets children of child
+ int found = fTrackArray.find(child);
+ if (found >= 0)
+ fTrackArray.remove(found);
+ }
+ }
+}
+
+void SkScriptEngine::functionCallBack(_functionCallBack func, void* userStorage) {
+ UserCallBack callBack;
+ callBack.fFunctionCallBack = func;
+ commonCallBack(kFunction, callBack, userStorage);
+}
+
+bool SkScriptEngine::functionParams(const char** scriptPtr, SkTDArray<SkScriptValue>& params) {
+ (*scriptPtr)++; // skip open paren
+ *fOpStack.push() = kParen;
+ *fBraceStack.push() = kFunctionBrace;
+ SkBool suppressed = fSuppressStack.top().fSuppress;
+ do {
+ SkScriptValue value;
+ bool success = innerScript(scriptPtr, suppressed ? nullptr : &value);
+ if (success == false) {
+ fError = kErrorInFunctionParameters;
+ return false;
+ }
+ if (suppressed)
+ continue;
+ *params.append() = value;
+ } while ((*scriptPtr)[-1] == ',');
+ fBraceStack.pop();
+ fOpStack.pop(); // pop paren
+ (*scriptPtr)++; // advance beyond close paren
+ return true;
+}
+
+#ifdef SK_DEBUG
+bool SkScriptEngine::getErrorString(SkString* str) const {
+ if (fError)
+ str->set(errorStrings[fError - 1]);
+ return fError != 0;
+}
+#endif
+
+bool SkScriptEngine::innerScript(const char** scriptPtr, SkScriptValue* value) {
+ const char* script = *scriptPtr;
+ char ch;
+ bool lastPush = false;
+ bool success = true;
+ int opBalance = fOpStack.count();
+ int baseBrace = fBraceStack.count();
+ int suppressBalance = fSuppressStack.count();
+ while ((ch = script[0]) != '\0') {
+ if (is_ws(ch)) {
+ script++;
+ continue;
+ }
+ SkBool suppressed = fSuppressStack.top().fSuppress;
+ SkOperand operand;
+ const char* dotCheck;
+ if (fBraceStack.count() > baseBrace) {
+#if 0 // disable support for struct brace
+ if (ch == ':') {
+ SkASSERT(fTokenLength > 0);
+ SkASSERT(fBraceStack.top() == kStructBrace);
+ ++script;
+ SkASSERT(fDisplayable);
+ SkString token(fToken, fTokenLength);
+ fTokenLength = 0;
+ const char* tokenName = token.c_str();
+ const SkMemberInfo* tokenInfo SK_INIT_TO_AVOID_WARNING;
+ if (suppressed == false) {
+ SkDisplayTypes type = fInfo->getType();
+ tokenInfo = SkDisplayType::GetMember(type, &tokenName);
+ SkASSERT(tokenInfo);
+ }
+ SkScriptValue tokenValue;
+ success = innerScript(&script, &tokenValue); // terminate and return on comma, close brace
+ SkASSERT(success);
+ if (suppressed == false) {
+ if (tokenValue.fType == SkType_Displayable) {
+ SkASSERT(SkDisplayType::IsDisplayable(tokenInfo->getType()));
+ fDisplayable->setReference(tokenInfo, tokenValue.fOperand.fDisplayable);
+ } else {
+ if (tokenValue.fType != tokenInfo->getType()) {
+ if (convertTo(tokenInfo->getType(), &tokenValue) == false)
+ return false;
+ }
+ tokenInfo->writeValue(fDisplayable, nullptr, 0, 0,
+ (void*) ((char*) fInfo->memberData(fDisplayable) + tokenInfo->fOffset + fArrayOffset),
+ tokenInfo->getType(), tokenValue);
+ }
+ }
+ lastPush = false;
+ continue;
+ } else
+#endif
+ if (fBraceStack.top() == kArrayBrace) {
+ SkScriptValue tokenValue;
+ success = innerScript(&script, &tokenValue); // terminate and return on comma, close brace
+ if (success == false) {
+ fError = kErrorInArrrayIndex;
+ return false;
+ }
+ if (suppressed == false) {
+#if 0 // no support for structures for now
+ if (tokenValue.fType == SkType_Structure) {
+ fArrayOffset += (int) fInfo->getSize(fDisplayable);
+ } else
+#endif
+ {
+ SkDisplayTypes type = ToDisplayType(fReturnType);
+ if (fReturnType == kNoType) {
+ // !!! short sighted; in the future, allow each returned array component to carry
+ // its own type, and let caller do any needed conversions
+ if (value->fOperand.fArray->count() == 0)
+ value->fOperand.fArray->setType(type = tokenValue.fType);
+ else
+ type = value->fOperand.fArray->getType();
+ }
+ if (tokenValue.fType != type) {
+ if (convertTo(type, &tokenValue) == false)
+ return false;
+ }
+ *value->fOperand.fArray->append() = tokenValue.fOperand;
+ }
+ }
+ lastPush = false;
+ continue;
+ } else {
+ if (token_length(script) == 0) {
+ fError = kExpectedToken;
+ return false;
+ }
+ }
+ }
+ if (lastPush != false && fTokenLength > 0) {
+ if (ch == '(') {
+ *fBraceStack.push() = kFunctionBrace;
+ if (handleFunction(&script, SkToBool(suppressed)) == false)
+ return false;
+ lastPush = true;
+ continue;
+ } else if (ch == '[') {
+ if (handleProperty(SkToBool(suppressed)) == false)
+ return false; // note: never triggered by standard animator plugins
+ if (handleArrayIndexer(&script, SkToBool(suppressed)) == false)
+ return false;
+ lastPush = true;
+ continue;
+ } else if (ch != '.') {
+ if (handleProperty(SkToBool(suppressed)) == false)
+ return false; // note: never triggered by standard animator plugins
+ lastPush = true;
+ continue;
+ }
+ }
+ if (ch == '0' && (script[1] & ~0x20) == 'X') {
+ if (lastPush != false) {
+ fError = kExpectedOperator;
+ return false;
+ }
+ script += 2;
+ script = SkParse::FindHex(script, (uint32_t*)&operand.fS32);
+ if (script == nullptr) {
+ fError = kExpectedHex;
+ return false;
+ }
+ goto intCommon;
+ }
+ if (lastPush == false && ch == '.')
+ goto scalarCommon;
+ if (ch >= '0' && ch <= '9') {
+ if (lastPush != false) {
+ fError = kExpectedOperator;
+ return false;
+ }
+ dotCheck = SkParse::FindS32(script, &operand.fS32);
+ if (dotCheck[0] != '.') {
+ script = dotCheck;
+intCommon:
+ if (suppressed == false)
+ *fTypeStack.push() = kInt;
+ } else {
+scalarCommon:
+ script = SkParse::FindScalar(script, &operand.fScalar);
+ if (suppressed == false)
+ *fTypeStack.push() = kScalar;
+ }
+ if (suppressed == false)
+ fOperandStack.push(operand);
+ lastPush = true;
+ continue;
+ }
+ int length = token_length(script);
+ if (length > 0) {
+ if (lastPush != false) {
+ fError = kExpectedOperator;
+ return false;
+ }
+ fToken = script;
+ fTokenLength = length;
+ script += length;
+ lastPush = true;
+ continue;
+ }
+ char startQuote = ch;
+ if (startQuote == '\'' || startQuote == '\"') {
+ if (lastPush != false) {
+ fError = kExpectedOperator;
+ return false;
+ }
+ operand.fString = new SkString();
+ track(operand.fString);
+ ++script;
+
+ // <mrr> this is a lot of calls to append() one char at at time
+ // how hard to preflight script so we know how much to grow fString by?
+ do {
+ if (script[0] == '\\')
+ ++script;
+ operand.fString->append(script, 1);
+ ++script;
+ if (script[0] == '\0') {
+ fError = kUnterminatedString;
+ return false;
+ }
+ } while (script[0] != startQuote);
+ ++script;
+ if (suppressed == false) {
+ *fTypeStack.push() = kString;
+ fOperandStack.push(operand);
+ }
+ lastPush = true;
+ continue;
+ }
+ ;
+ if (ch == '.') {
+ if (fTokenLength == 0) {
+ SkScriptValue scriptValue;
+ SkDEBUGCODE(scriptValue.fOperand.fObject = nullptr);
+ int tokenLength = token_length(++script);
+ const char* token = script;
+ script += tokenLength;
+ if (suppressed == false) {
+ if (fTypeStack.count() == 0) {
+ fError = kExpectedTokenBeforeDotOperator;
+ return false;
+ }
+ SkOpType topType;
+ fTypeStack.pop(&topType);
+ fOperandStack.pop(&scriptValue.fOperand);
+ scriptValue.fType = ToDisplayType(topType);
+ handleBox(&scriptValue);
+ }
+ success = evaluateDotParam(script, SkToBool(suppressed), token, tokenLength);
+ if (success == false)
+ return false;
+ lastPush = true;
+ continue;
+ }
+ // get next token, and evaluate immediately
+ success = evaluateDot(script, SkToBool(suppressed));
+ if (success == false)
+ return false;
+ lastPush = true;
+ continue;
+ }
+ if (ch == '[') {
+ if (lastPush == false) {
+ script++;
+ *fBraceStack.push() = kArrayBrace;
+ if (suppressed)
+ continue;
+ operand.fArray = value->fOperand.fArray = new SkTypedArray(ToDisplayType(fReturnType));
+ track(value->fOperand.fArray);
+ *fTypeStack.push() = (SkOpType) kArray;
+ fOperandStack.push(operand);
+ continue;
+ }
+ if (handleArrayIndexer(&script, SkToBool(suppressed)) == false)
+ return false;
+ lastPush = true;
+ continue;
+ }
+#if 0 // structs not supported for now
+ if (ch == '{') {
+ if (lastPush == false) {
+ script++;
+ *fBraceStack.push() = kStructBrace;
+ if (suppressed)
+ continue;
+ operand.fS32 = 0;
+ *fTypeStack.push() = (SkOpType) kStruct;
+ fOperandStack.push(operand);
+ continue;
+ }
+ SkASSERT(0); // braces in other contexts aren't supported yet
+ }
+#endif
+ if (ch == ')' && fBraceStack.count() > 0) {
+ SkBraceStyle braceStyle = fBraceStack.top();
+ if (braceStyle == kFunctionBrace) {
+ fBraceStack.pop();
+ break;
+ }
+ }
+ if (ch == ',' || ch == ']') {
+ if (ch != ',') {
+ SkBraceStyle match;
+ fBraceStack.pop(&match);
+ if (match != kArrayBrace) {
+ fError = kMismatchedArrayBrace;
+ return false;
+ }
+ }
+ script++;
+ // !!! see if brace or bracket is correct closer
+ break;
+ }
+ char nextChar = script[1];
+ int advance = logicalOp(ch, nextChar);
+ if (advance < 0) // error
+ return false;
+ if (advance == 0)
+ advance = arithmeticOp(ch, nextChar, lastPush);
+ if (advance == 0) // unknown token
+ return false;
+ if (advance > 0)
+ script += advance;
+ lastPush = ch == ']' || ch == ')';
+ }
+ bool suppressed = SkToBool(fSuppressStack.top().fSuppress);
+ if (fTokenLength > 0) {
+ success = handleProperty(suppressed);
+ if (success == false)
+ return false; // note: never triggered by standard animator plugins
+ }
+ while (fOpStack.count() > opBalance) { // leave open paren
+ if ((fError = opError()) != kNoError)
+ return false;
+ if (processOp() == false)
+ return false;
+ }
+ SkOpType topType = fTypeStack.count() > 0 ? fTypeStack.top() : kNoType;
+ if (suppressed == false && topType != fReturnType &&
+ topType == kString && fReturnType != kNoType) { // if result is a string, give handle property a chance to convert it to the property value
+ SkString* string = fOperandStack.top().fString;
+ fToken = string->c_str();
+ fTokenLength = string->size();
+ fOperandStack.pop();
+ fTypeStack.pop();
+ success = handleProperty(SkToBool(fSuppressStack.top().fSuppress));
+ if (success == false) { // if it couldn't convert, return string (error?)
+ SkOperand operand;
+ operand.fS32 = 0;
+ *fTypeStack.push() = kString;
+ operand.fString = string;
+ fOperandStack.push(operand);
+ }
+ }
+ if (value) {
+ if (fOperandStack.count() == 0)
+ return false;
+ SkASSERT(fOperandStack.count() >= 1);
+ SkASSERT(fTypeStack.count() >= 1);
+ fOperandStack.pop(&value->fOperand);
+ SkOpType type;
+ fTypeStack.pop(&type);
+ value->fType = ToDisplayType(type);
+// SkASSERT(value->fType != SkType_Unknown);
+ if (topType != fReturnType && topType == kObject && fReturnType != kNoType) {
+ if (convertTo(ToDisplayType(fReturnType), value) == false)
+ return false;
+ }
+ }
+ while (fSuppressStack.count() > suppressBalance)
+ fSuppressStack.pop();
+ *scriptPtr = script;
+ return true; // no error
+}
+
+void SkScriptEngine::memberCallBack(_memberCallBack member , void* userStorage) {
+ UserCallBack callBack;
+ callBack.fMemberCallBack = member;
+ commonCallBack(kMember, callBack, userStorage);
+}
+
+void SkScriptEngine::memberFunctionCallBack(_memberFunctionCallBack func, void* userStorage) {
+ UserCallBack callBack;
+ callBack.fMemberFunctionCallBack = func;
+ commonCallBack(kMemberFunction, callBack, userStorage);
+}
+
+#if 0
+void SkScriptEngine::objectToStringCallBack(_objectToStringCallBack func, void* userStorage) {
+ UserCallBack callBack;
+ callBack.fObjectToStringCallBack = func;
+ commonCallBack(kObjectToString, callBack, userStorage);
+}
+#endif
+
+bool SkScriptEngine::handleArrayIndexer(const char** scriptPtr, bool suppressed) {
+ SkScriptValue scriptValue;
+ (*scriptPtr)++;
+ *fOpStack.push() = kParen;
+ *fBraceStack.push() = kArrayBrace;
+ SkOpType saveType = fReturnType;
+ fReturnType = kInt;
+ bool success = innerScript(scriptPtr, suppressed == false ? &scriptValue : nullptr);
+ if (success == false)
+ return false;
+ fReturnType = saveType;
+ if (suppressed == false) {
+ if (convertTo(SkType_Int, &scriptValue) == false)
+ return false;
+ int index = scriptValue.fOperand.fS32;
+ SkScriptValue scriptValue;
+ SkOpType type;
+ fTypeStack.pop(&type);
+ fOperandStack.pop(&scriptValue.fOperand);
+ scriptValue.fType = ToDisplayType(type);
+ if (type == kObject) {
+ success = handleUnbox(&scriptValue);
+ if (success == false)
+ return false;
+ if (ToOpType(scriptValue.fType) != kArray) {
+ fError = kExpectedArray;
+ return false;
+ }
+ }
+ *fTypeStack.push() = scriptValue.fOperand.fArray->getOpType();
+// SkASSERT(index >= 0);
+ if ((unsigned) index >= (unsigned) scriptValue.fOperand.fArray->count()) {
+ fError = kArrayIndexOutOfBounds;
+ return false;
+ }
+ scriptValue.fOperand = scriptValue.fOperand.fArray->begin()[index];
+ fOperandStack.push(scriptValue.fOperand);
+ }
+ fOpStack.pop(); // pop paren
+ return success;
+}
+
+bool SkScriptEngine::handleBox(SkScriptValue* scriptValue) {
+ bool success = true;
+ for (UserCallBack* callBack = fUserCallBacks.begin(); callBack < fUserCallBacks.end(); callBack++) {
+ if (callBack->fCallBackType != kBox)
+ continue;
+ success = (*callBack->fBoxCallBack)(callBack->fUserStorage, scriptValue);
+ if (success) {
+ fOperandStack.push(scriptValue->fOperand);
+ *fTypeStack.push() = ToOpType(scriptValue->fType);
+ goto done;
+ }
+ }
+done:
+ return success;
+}
+
+bool SkScriptEngine::handleFunction(const char** scriptPtr, bool suppressed) {
+ SkScriptValue callbackResult;
+ SkTDArray<SkScriptValue> params;
+ SkString functionName(fToken, fTokenLength);
+ fTokenLength = 0;
+ bool success = functionParams(scriptPtr, params);
+ if (success == false)
+ goto done;
+ if (suppressed == true)
+ return true;
+ {
+ for (UserCallBack* callBack = fUserCallBacks.begin(); callBack < fUserCallBacks.end(); callBack++) {
+ if (callBack->fCallBackType != kFunction)
+ continue;
+ success = (*callBack->fFunctionCallBack)(functionName.c_str(), functionName.size(), params,
+ callBack->fUserStorage, &callbackResult);
+ if (success) {
+ fOperandStack.push(callbackResult.fOperand);
+ *fTypeStack.push() = ToOpType(callbackResult.fType);
+ goto done;
+ }
+ }
+ }
+ fError = kNoFunctionHandlerFound;
+ return false;
+done:
+ return success;
+}
+
+bool SkScriptEngine::handleMember(const char* field, size_t len, void* object) {
+ SkScriptValue callbackResult;
+ bool success = true;
+ for (UserCallBack* callBack = fUserCallBacks.begin(); callBack < fUserCallBacks.end(); callBack++) {
+ if (callBack->fCallBackType != kMember)
+ continue;
+ success = (*callBack->fMemberCallBack)(field, len, object, callBack->fUserStorage, &callbackResult);
+ if (success) {
+ if (callbackResult.fType == SkType_String)
+ track(callbackResult.fOperand.fString);
+ fOperandStack.push(callbackResult.fOperand);
+ *fTypeStack.push() = ToOpType(callbackResult.fType);
+ goto done;
+ }
+ }
+ return false;
+done:
+ return success;
+}
+
+bool SkScriptEngine::handleMemberFunction(const char* field, size_t len, void* object, SkTDArray<SkScriptValue>& params) {
+ SkScriptValue callbackResult;
+ bool success = true;
+ for (UserCallBack* callBack = fUserCallBacks.begin(); callBack < fUserCallBacks.end(); callBack++) {
+ if (callBack->fCallBackType != kMemberFunction)
+ continue;
+ success = (*callBack->fMemberFunctionCallBack)(field, len, object, params,
+ callBack->fUserStorage, &callbackResult);
+ if (success) {
+ if (callbackResult.fType == SkType_String)
+ track(callbackResult.fOperand.fString);
+ fOperandStack.push(callbackResult.fOperand);
+ *fTypeStack.push() = ToOpType(callbackResult.fType);
+ goto done;
+ }
+ }
+ return false;
+done:
+ return success;
+}
+
+#if 0
+bool SkScriptEngine::handleObjectToString(void* object) {
+ SkScriptValue callbackResult;
+ bool success = true;
+ for (UserCallBack* callBack = fUserCallBacks.begin(); callBack < fUserCallBacks.end(); callBack++) {
+ if (callBack->fCallBackType != kObjectToString)
+ continue;
+ success = (*callBack->fObjectToStringCallBack)(object,
+ callBack->fUserStorage, &callbackResult);
+ if (success) {
+ if (callbackResult.fType == SkType_String)
+ track(callbackResult.fOperand.fString);
+ fOperandStack.push(callbackResult.fOperand);
+ *fTypeStack.push() = ToOpType(callbackResult.fType);
+ goto done;
+ }
+ }
+ return false;
+done:
+ return success;
+}
+#endif
+
+bool SkScriptEngine::handleProperty(bool suppressed) {
+ SkScriptValue callbackResult;
+ bool success = true;
+ if (suppressed)
+ goto done;
+ success = false; // note that with standard animator-script plugins, callback never returns false
+ {
+ for (UserCallBack* callBack = fUserCallBacks.begin(); callBack < fUserCallBacks.end(); callBack++) {
+ if (callBack->fCallBackType != kProperty)
+ continue;
+ success = (*callBack->fPropertyCallBack)(fToken, fTokenLength,
+ callBack->fUserStorage, &callbackResult);
+ if (success) {
+ if (callbackResult.fType == SkType_String && callbackResult.fOperand.fString == nullptr) {
+ callbackResult.fOperand.fString = new SkString(fToken, fTokenLength);
+ track(callbackResult.fOperand.fString);
+ }
+ fOperandStack.push(callbackResult.fOperand);
+ *fTypeStack.push() = ToOpType(callbackResult.fType);
+ goto done;
+ }
+ }
+ }
+done:
+ fTokenLength = 0;
+ return success;
+}
+
+bool SkScriptEngine::handleUnbox(SkScriptValue* scriptValue) {
+ bool success = true;
+ for (UserCallBack* callBack = fUserCallBacks.begin(); callBack < fUserCallBacks.end(); callBack++) {
+ if (callBack->fCallBackType != kUnbox)
+ continue;
+ success = (*callBack->fUnboxCallBack)(callBack->fUserStorage, scriptValue);
+ if (success) {
+ if (scriptValue->fType == SkType_String)
+ track(scriptValue->fOperand.fString);
+ goto done;
+ }
+ }
+ return false;
+done:
+ return success;
+}
+
+// note that entire expression is treated as if it were enclosed in parens
+// an open paren is always the first thing in the op stack
+
+int SkScriptEngine::logicalOp(char ch, char nextChar) {
+ int advance = 1;
+ SkOp match;
+ signed char precedence;
+ switch (ch) {
+ case ')':
+ match = kParen;
+ break;
+ case ']':
+ match = kArrayOp;
+ break;
+ case '?':
+ match = kIf;
+ break;
+ case ':':
+ match = kElse;
+ break;
+ case '&':
+ if (nextChar != '&')
+ goto noMatch;
+ match = kLogicalAnd;
+ advance = 2;
+ break;
+ case '|':
+ if (nextChar != '|')
+ goto noMatch;
+ match = kLogicalOr;
+ advance = 2;
+ break;
+ default:
+noMatch:
+ return 0;
+ }
+ SkSuppress suppress;
+ precedence = gPrecedence[match];
+ if (fSuppressStack.top().fSuppress) {
+ if (fSuppressStack.top().fOpStackDepth < fOpStack.count()) {
+ SkOp topOp = fOpStack.top();
+ if (gPrecedence[topOp] <= precedence)
+ fOpStack.pop();
+ goto goHome;
+ }
+ bool changedPrecedence = gPrecedence[fSuppressStack.top().fOperator] < precedence;
+ if (changedPrecedence)
+ fSuppressStack.pop();
+ if (precedence == kIfElsePrecedence) {
+ if (match == kIf) {
+ if (changedPrecedence)
+ fOpStack.pop();
+ else
+ *fOpStack.push() = kIf;
+ } else {
+ if (fSuppressStack.top().fOpStackDepth == fOpStack.count()) {
+ goto flipSuppress;
+ }
+ fOpStack.pop();
+ }
+ }
+ if (changedPrecedence == false)
+ goto goHome;
+ }
+ while (gPrecedence[fOpStack.top() & ~kArtificialOp] < precedence) {
+ if (processOp() == false)
+ return false;
+ }
+ if (fSuppressStack.top().fOpStackDepth > fOpStack.count())
+ fSuppressStack.pop();
+ switch (match) {
+ case kParen:
+ case kArrayOp:
+ if (fOpStack.count() <= 1 || fOpStack.top() != match) {
+ fError = kMismatchedBrackets;
+ return -1;
+ }
+ if (match == kParen)
+ fOpStack.pop();
+ else {
+ SkOpType indexType;
+ fTypeStack.pop(&indexType);
+ if (indexType != kInt && indexType != kScalar) {
+ fError = kExpectedNumberForArrayIndex; // (although, could permit strings eventually)
+ return -1;
+ }
+ SkOperand indexOperand;
+ fOperandStack.pop(&indexOperand);
+ int index = indexType == kScalar ? SkScalarFloorToInt(indexOperand.fScalar) :
+ indexOperand.fS32;
+ SkOpType arrayType;
+ fTypeStack.pop(&arrayType);
+ if ((unsigned)arrayType != (unsigned)kArray) {
+ fError = kExpectedArray;
+ return -1;
+ }
+ SkOperand arrayOperand;
+ fOperandStack.pop(&arrayOperand);
+ SkTypedArray* array = arrayOperand.fArray;
+ SkOperand operand;
+ if (array->getIndex(index, &operand) == false) {
+ fError = kIndexOutOfRange;
+ return -1;
+ }
+ SkOpType resultType = array->getOpType();
+ fTypeStack.push(resultType);
+ fOperandStack.push(operand);
+ }
+ break;
+ case kIf: {
+ SkScriptValue ifValue;
+ SkOpType ifType;
+ fTypeStack.pop(&ifType);
+ ifValue.fType = ToDisplayType(ifType);
+ fOperandStack.pop(&ifValue.fOperand);
+ if (convertTo(SkType_Int, &ifValue) == false)
+ return -1;
+ if (ifValue.fType != SkType_Int) {
+ fError = kExpectedIntForConditionOperator;
+ return -1;
+ }
+ suppress.fSuppress = ifValue.fOperand.fS32 == 0;
+ suppress.fOperator = kIf;
+ suppress.fOpStackDepth = fOpStack.count();
+ suppress.fElse = false;
+ fSuppressStack.push(suppress);
+ // if left is true, do only up to colon
+ // if left is false, do only after colon
+ } break;
+ case kElse:
+flipSuppress:
+ if (fSuppressStack.top().fElse)
+ fSuppressStack.pop();
+ fSuppressStack.top().fElse = true;
+ fSuppressStack.top().fSuppress ^= true;
+ // flip last do / don't do consideration from last '?'
+ break;
+ case kLogicalAnd:
+ case kLogicalOr: {
+ if (fTypeStack.top() != kInt) {
+ fError = kExpectedBooleanExpression;
+ return -1;
+ }
+ int32_t topInt = fOperandStack.top().fS32;
+ if (fOpStack.top() != kLogicalAnd)
+ *fOpStack.push() = kLogicalAnd; // really means 'to bool', and is appropriate for 'or'
+ if (match == kLogicalOr ? topInt != 0 : topInt == 0) {
+ suppress.fSuppress = true;
+ suppress.fOperator = match;
+ suppress.fOpStackDepth = fOpStack.count();
+ suppress.fElse = false;
+ fSuppressStack.push(suppress);
+ } else {
+ fTypeStack.pop();
+ fOperandStack.pop();
+ }
+ } break;
+ default:
+ SkASSERT(0);
+ }
+goHome:
+ return advance;
+}
+
+SkScriptEngine::Error SkScriptEngine::opError() {
+ int opCount = fOpStack.count();
+ int operandCount = fOperandStack.count();
+ if (opCount == 0) {
+ if (operandCount != 1)
+ return kExpectedOperator;
+ return kNoError;
+ }
+ SkOp op = (SkOp) (fOpStack.top() & ~kArtificialOp);
+ const SkOperatorAttributes* attributes = &gOpAttributes[op];
+ if (attributes->fLeftType != kNoType && operandCount < 2)
+ return kExpectedValue;
+ if (attributes->fLeftType == kNoType && operandCount < 1)
+ return kExpectedValue;
+ return kNoError;
+}
+
+bool SkScriptEngine::processOp() {
+ SkOp op;
+ fOpStack.pop(&op);
+ op = (SkOp) (op & ~kArtificialOp);
+ const SkOperatorAttributes* attributes = &gOpAttributes[op];
+ SkOpType type2;
+ fTypeStack.pop(&type2);
+ SkOpType type1 = type2;
+ SkOperand operand2;
+ fOperandStack.pop(&operand2);
+ SkOperand operand1 = operand2; // !!! not really needed, suppresses warning
+ if (attributes->fLeftType != kNoType) {
+ fTypeStack.pop(&type1);
+ fOperandStack.pop(&operand1);
+ if (op == kFlipOps) {
+ SkTSwap(type1, type2);
+ SkTSwap(operand1, operand2);
+ fOpStack.pop(&op);
+ op = (SkOp) (op & ~kArtificialOp);
+ attributes = &gOpAttributes[op];
+ }
+ if (type1 == kObject && (type1 & attributes->fLeftType) == 0) {
+ SkScriptValue val;
+ val.fType = ToDisplayType(type1);
+ val.fOperand = operand1;
+ bool success = handleUnbox(&val);
+ if (success == false)
+ return false;
+ type1 = ToOpType(val.fType);
+ operand1 = val.fOperand;
+ }
+ }
+ if (type2 == kObject && (type2 & attributes->fLeftType) == 0) {
+ SkScriptValue val;
+ val.fType = ToDisplayType(type2);
+ val.fOperand = operand2;
+ bool success = handleUnbox(&val);
+ if (success == false)
+ return false;
+ type2 = ToOpType(val.fType);
+ operand2 = val.fOperand;
+ }
+ if (attributes->fLeftType != kNoType) {
+ if (type1 != type2) {
+ if ((attributes->fLeftType & kString) && attributes->fBias & kTowardsString && ((type1 | type2) & kString)) {
+ if (type1 == kInt || type1 == kScalar) {
+ convertToString(operand1, type1 == kInt ? SkType_Int : SkType_Float);
+ type1 = kString;
+ }
+ if (type2 == kInt || type2 == kScalar) {
+ convertToString(operand2, type2 == kInt ? SkType_Int : SkType_Float);
+ type2 = kString;
+ }
+ } else if (attributes->fLeftType & kScalar && ((type1 | type2) & kScalar)) {
+ if (type1 == kInt) {
+ operand1.fScalar = IntToScalar(operand1.fS32);
+ type1 = kScalar;
+ }
+ if (type2 == kInt) {
+ operand2.fScalar = IntToScalar(operand2.fS32);
+ type2 = kScalar;
+ }
+ }
+ }
+ if ((type1 & attributes->fLeftType) == 0 || type1 != type2) {
+ if (type1 == kString) {
+ const char* result = SkParse::FindScalar(operand1.fString->c_str(), &operand1.fScalar);
+ if (result == nullptr) {
+ fError = kExpectedNumber;
+ return false;
+ }
+ type1 = kScalar;
+ }
+ if (type1 == kScalar && (attributes->fLeftType == kInt || type2 == kInt)) {
+ operand1.fS32 = SkScalarFloorToInt(operand1.fScalar);
+ type1 = kInt;
+ }
+ }
+ }
+ if ((type2 & attributes->fRightType) == 0 || type1 != type2) {
+ if (type2 == kString) {
+ const char* result = SkParse::FindScalar(operand2.fString->c_str(), &operand2.fScalar);
+ if (result == nullptr) {
+ fError = kExpectedNumber;
+ return false;
+ }
+ type2 = kScalar;
+ }
+ if (type2 == kScalar && (attributes->fRightType == kInt || type1 == kInt)) {
+ operand2.fS32 = SkScalarFloorToInt(operand2.fScalar);
+ type2 = kInt;
+ }
+ }
+ if (type2 == kScalar)
+ op = (SkOp) (op + 1);
+ else if (type2 == kString)
+ op = (SkOp) (op + 2);
+ switch(op) {
+ case kAddInt:
+ operand2.fS32 += operand1.fS32;
+ break;
+ case kAddScalar:
+ operand2.fScalar += operand1.fScalar;
+ break;
+ case kAddString:
+ if (fTrackString.find(operand1.fString) < 0) {
+ operand1.fString = new SkString(*operand1.fString);
+ track(operand1.fString);
+ }
+ operand1.fString->append(*operand2.fString);
+ operand2 = operand1;
+ break;
+ case kBitAnd:
+ operand2.fS32 &= operand1.fS32;
+ break;
+ case kBitNot:
+ operand2.fS32 = ~operand2.fS32;
+ break;
+ case kBitOr:
+ operand2.fS32 |= operand1.fS32;
+ break;
+ case kDivideInt:
+ if (operand2.fS32 == 0) {
+ operand2.fS32 = operand1.fS32 == 0 ? SK_NaN32 : operand1.fS32 > 0 ? SK_MaxS32 : -SK_MaxS32;
+ break;
+ } else {
+ int32_t original = operand2.fS32;
+ operand2.fS32 = operand1.fS32 / operand2.fS32;
+ if (original * operand2.fS32 == operand1.fS32)
+ break; // integer divide was good enough
+ operand2.fS32 = original;
+ type2 = kScalar;
+ }
+ case kDivideScalar:
+ if (operand2.fScalar == 0)
+ operand2.fScalar = operand1.fScalar == 0 ? SK_ScalarNaN : operand1.fScalar > 0 ? SK_ScalarMax : -SK_ScalarMax;
+ else
+ operand2.fScalar = operand1.fScalar / operand2.fScalar;
+ break;
+ case kEqualInt:
+ operand2.fS32 = operand1.fS32 == operand2.fS32;
+ break;
+ case kEqualScalar:
+ operand2.fS32 = operand1.fScalar == operand2.fScalar;
+ type2 = kInt;
+ break;
+ case kEqualString:
+ operand2.fS32 = *operand1.fString == *operand2.fString;
+ type2 = kInt;
+ break;
+ case kGreaterEqualInt:
+ operand2.fS32 = operand1.fS32 >= operand2.fS32;
+ break;
+ case kGreaterEqualScalar:
+ operand2.fS32 = operand1.fScalar >= operand2.fScalar;
+ type2 = kInt;
+ break;
+ case kGreaterEqualString:
+ operand2.fS32 = strcmp(operand1.fString->c_str(), operand2.fString->c_str()) >= 0;
+ type2 = kInt;
+ break;
+ case kLogicalAnd:
+ operand2.fS32 = !! operand2.fS32; // really, ToBool
+ break;
+ case kLogicalNot:
+ operand2.fS32 = ! operand2.fS32;
+ break;
+ case kLogicalOr:
+ SkASSERT(0); // should have already been processed
+ break;
+ case kMinusInt:
+ operand2.fS32 = -operand2.fS32;
+ break;
+ case kMinusScalar:
+ operand2.fScalar = -operand2.fScalar;
+ break;
+ case kModuloInt:
+ operand2.fS32 = operand1.fS32 % operand2.fS32;
+ break;
+ case kModuloScalar:
+ operand2.fScalar = SkScalarMod(operand1.fScalar, operand2.fScalar);
+ break;
+ case kMultiplyInt:
+ operand2.fS32 *= operand1.fS32;
+ break;
+ case kMultiplyScalar:
+ operand2.fScalar = SkScalarMul(operand1.fScalar, operand2.fScalar);
+ break;
+ case kShiftLeft:
+ operand2.fS32 = operand1.fS32 << operand2.fS32;
+ break;
+ case kShiftRight:
+ operand2.fS32 = operand1.fS32 >> operand2.fS32;
+ break;
+ case kSubtractInt:
+ operand2.fS32 = operand1.fS32 - operand2.fS32;
+ break;
+ case kSubtractScalar:
+ operand2.fScalar = operand1.fScalar - operand2.fScalar;
+ break;
+ case kXor:
+ operand2.fS32 ^= operand1.fS32;
+ break;
+ default:
+ SkASSERT(0);
+ }
+ fTypeStack.push(type2);
+ fOperandStack.push(operand2);
+ return true;
+}
+
+void SkScriptEngine::propertyCallBack(_propertyCallBack prop, void* userStorage) {
+ UserCallBack callBack;
+ callBack.fPropertyCallBack = prop;
+ commonCallBack(kProperty, callBack, userStorage);
+}
+
+void SkScriptEngine::track(SkTypedArray* array) {
+ SkASSERT(fTrackArray.find(array) < 0);
+ *(fTrackArray.end() - 1) = array;
+ fTrackArray.appendClear();
+}
+
+void SkScriptEngine::track(SkString* string) {
+ SkASSERT(fTrackString.find(string) < 0);
+ *(fTrackString.end() - 1) = string;
+ fTrackString.appendClear();
+}
+
+void SkScriptEngine::unboxCallBack(_unboxCallBack func, void* userStorage) {
+ UserCallBack callBack;
+ callBack.fUnboxCallBack = func;
+ commonCallBack(kUnbox, callBack, userStorage);
+}
+
+bool SkScriptEngine::ConvertTo(SkScriptEngine* engine, SkDisplayTypes toType, SkScriptValue* value ) {
+ SkASSERT(value);
+ if (SkDisplayType::IsEnum(nullptr /* fMaker */, toType))
+ toType = SkType_Int;
+ if (toType == SkType_Point || toType == SkType_3D_Point)
+ toType = SkType_Float;
+ if (toType == SkType_Drawable)
+ toType = SkType_Displayable;
+ SkDisplayTypes type = value->fType;
+ if (type == toType)
+ return true;
+ SkOperand& operand = value->fOperand;
+ bool success = true;
+ switch (toType) {
+ case SkType_Int:
+ if (type == SkType_Boolean)
+ break;
+ if (type == SkType_Float)
+ operand.fS32 = SkScalarFloorToInt(operand.fScalar);
+ else {
+ if (type != SkType_String) {
+ success = false;
+ break; // error
+ }
+ success = SkParse::FindS32(operand.fString->c_str(), &operand.fS32) != nullptr;
+ }
+ break;
+ case SkType_Float:
+ if (type == SkType_Int) {
+ if (operand.fS32 == SK_NaN32)
+ operand.fScalar = SK_ScalarNaN;
+ else if (SkAbs32(operand.fS32) == SK_MaxS32)
+ operand.fScalar = SkSign32(operand.fS32) * SK_ScalarMax;
+ else
+ operand.fScalar = SkIntToScalar(operand.fS32);
+ } else {
+ if (type != SkType_String) {
+ success = false;
+ break; // error
+ }
+ success = SkParse::FindScalar(operand.fString->c_str(), &operand.fScalar) != nullptr;
+ }
+ break;
+ case SkType_String: {
+ SkString* strPtr = new SkString();
+ SkASSERT(engine);
+ engine->track(strPtr);
+ if (type == SkType_Int) {
+ strPtr->appendS32(operand.fS32);
+ } else if (type == SkType_Displayable) {
+ SkASSERT(0); // must call through instance version instead of static version
+ } else {
+ if (type != SkType_Float) {
+ success = false;
+ break;
+ }
+ strPtr->appendScalar(operand.fScalar);
+ }
+ operand.fString = strPtr;
+ } break;
+ case SkType_Array: {
+ SkTypedArray* array = new SkTypedArray(type);
+ *array->append() = operand;
+ engine->track(array);
+ operand.fArray = array;
+ } break;
+ default:
+ SkASSERT(0);
+ }
+ value->fType = toType;
+ if (success == false)
+ engine->fError = kTypeConversionFailed;
+ return success;
+}
+
+SkScalar SkScriptEngine::IntToScalar(int32_t s32) {
+ SkScalar scalar;
+ if (s32 == SK_NaN32)
+ scalar = SK_ScalarNaN;
+ else if (SkAbs32(s32) == SK_MaxS32)
+ scalar = SkSign32(s32) * SK_ScalarMax;
+ else
+ scalar = SkIntToScalar(s32);
+ return scalar;
+}
+
+SkDisplayTypes SkScriptEngine::ToDisplayType(SkOpType type) {
+ int val = type;
+ switch (val) {
+ case kNoType:
+ return SkType_Unknown;
+ case kInt:
+ return SkType_Int;
+ case kScalar:
+ return SkType_Float;
+ case kString:
+ return SkType_String;
+ case kArray:
+ return SkType_Array;
+ case kObject:
+ return SkType_Displayable;
+// case kStruct:
+// return SkType_Structure;
+ default:
+ SkASSERT(0);
+ return SkType_Unknown;
+ }
+}
+
+SkScriptEngine::SkOpType SkScriptEngine::ToOpType(SkDisplayTypes type) {
+ if (SkDisplayType::IsDisplayable(nullptr /* fMaker */, type))
+ return (SkOpType) kObject;
+ if (SkDisplayType::IsEnum(nullptr /* fMaker */, type))
+ return kInt;
+ switch (type) {
+ case SkType_ARGB:
+ case SkType_MSec:
+ case SkType_Int:
+ return kInt;
+ case SkType_Float:
+ case SkType_Point:
+ case SkType_3D_Point:
+ return kScalar;
+ case SkType_Base64:
+ case SkType_DynamicString:
+ case SkType_String:
+ return kString;
+ case SkType_Array:
+ return (SkOpType) kArray;
+ case SkType_Unknown:
+ return kNoType;
+ default:
+ SkASSERT(0);
+ return kNoType;
+ }
+}
+
+bool SkScriptEngine::ValueToString(SkScriptValue value, SkString* string) {
+ switch (value.fType) {
+ case kInt:
+ string->reset();
+ string->appendS32(value.fOperand.fS32);
+ break;
+ case kScalar:
+ string->reset();
+ string->appendScalar(value.fOperand.fScalar);
+ break;
+ case kString:
+ string->set(*value.fOperand.fString);
+ break;
+ default:
+ SkASSERT(0);
+ return false;
+ }
+ return true; // no error
+}
+
+#ifdef SK_SUPPORT_UNITTEST
+
+#include "SkFloatingPoint.h"
+
+#define DEF_SCALAR_ANSWER 0
+#define DEF_STRING_ANSWER nullptr
+
+#define testInt(expression) { #expression, SkType_Int, expression, DEF_SCALAR_ANSWER, DEF_STRING_ANSWER }
+ #define testScalar(expression) { #expression, SkType_Float, 0, (float) expression, DEF_STRING_ANSWER }
+ #define testRemainder(exp1, exp2) { #exp1 "%" #exp2, SkType_Float, 0, sk_float_mod(exp1, exp2), DEF_STRING_ANSWER }
+#define testTrue(expression) { #expression, SkType_Int, 1, DEF_SCALAR_ANSWER, DEF_STRING_ANSWER }
+#define testFalse(expression) { #expression, SkType_Int, 0, DEF_SCALAR_ANSWER, DEF_STRING_ANSWER }
+
+static const SkScriptNAnswer scriptTests[] = {
+ testInt(1>1/2),
+ testInt((6+7)*8),
+ testInt(0&&1?2:3),
+ testInt(3*(4+5)),
+ testScalar(1.0+2.0),
+ testScalar(1.0+5),
+ testScalar(3.0-1.0),
+ testScalar(6-1.0),
+ testScalar(- -5.5- -1.5),
+ testScalar(2.5*6.),
+ testScalar(0.5*4),
+ testScalar(4.5/.5),
+ testScalar(9.5/19),
+ testRemainder(9.5, 0.5),
+ testRemainder(9.,2),
+ testRemainder(9,2.5),
+ testRemainder(-9,2.5),
+ testTrue(-9==-9.0),
+ testTrue(-9.==-4.0-5),
+ testTrue(-9.*1==-4-5),
+ testFalse(-9!=-9.0),
+ testFalse(-9.!=-4.0-5),
+ testFalse(-9.*1!=-4-5),
+ testInt(0x123),
+ testInt(0XABC),
+ testInt(0xdeadBEEF),
+ { "'123'+\"456\"", SkType_String, 0, 0, "123456" },
+ { "123+\"456\"", SkType_String, 0, 0, "123456" },
+ { "'123'+456", SkType_String, 0, 0, "123456" },
+ { "'123'|\"456\"", SkType_Int, 123|456, DEF_SCALAR_ANSWER, DEF_STRING_ANSWER },
+ { "123|\"456\"", SkType_Int, 123|456, DEF_SCALAR_ANSWER, DEF_STRING_ANSWER },
+ { "'123'|456", SkType_Int, 123|456, DEF_SCALAR_ANSWER, DEF_STRING_ANSWER },
+ { "'2'<11", SkType_Int, 1, DEF_SCALAR_ANSWER, DEF_STRING_ANSWER },
+ { "2<'11'", SkType_Int, 1, DEF_SCALAR_ANSWER, DEF_STRING_ANSWER },
+ { "'2'<'11'", SkType_Int, 0, DEF_SCALAR_ANSWER, DEF_STRING_ANSWER },
+ testInt(123),
+ testInt(-345),
+ testInt(+678),
+ testInt(1+2+3),
+ testInt(3*4+5),
+ testInt(6+7*8),
+ testInt(-1-2-8/4),
+ testInt(-9%4),
+ testInt(9%-4),
+ testInt(-9%-4),
+ testInt(123|978),
+ testInt(123&978),
+ testInt(123^978),
+ testInt(2<<4),
+ testInt(99>>3),
+ testInt(~55),
+ testInt(~~55),
+ testInt(!55),
+ testInt(!!55),
+ // both int
+ testInt(2<2),
+ testInt(2<11),
+ testInt(20<11),
+ testInt(2<=2),
+ testInt(2<=11),
+ testInt(20<=11),
+ testInt(2>2),
+ testInt(2>11),
+ testInt(20>11),
+ testInt(2>=2),
+ testInt(2>=11),
+ testInt(20>=11),
+ testInt(2==2),
+ testInt(2==11),
+ testInt(20==11),
+ testInt(2!=2),
+ testInt(2!=11),
+ testInt(20!=11),
+ // left int, right scalar
+ testInt(2<2.),
+ testInt(2<11.),
+ testInt(20<11.),
+ testInt(2<=2.),
+ testInt(2<=11.),
+ testInt(20<=11.),
+ testInt(2>2.),
+ testInt(2>11.),
+ testInt(20>11.),
+ testInt(2>=2.),
+ testInt(2>=11.),
+ testInt(20>=11.),
+ testInt(2==2.),
+ testInt(2==11.),
+ testInt(20==11.),
+ testInt(2!=2.),
+ testInt(2!=11.),
+ testInt(20!=11.),
+ // left scalar, right int
+ testInt(2.<2),
+ testInt(2.<11),
+ testInt(20.<11),
+ testInt(2.<=2),
+ testInt(2.<=11),
+ testInt(20.<=11),
+ testInt(2.>2),
+ testInt(2.>11),
+ testInt(20.>11),
+ testInt(2.>=2),
+ testInt(2.>=11),
+ testInt(20.>=11),
+ testInt(2.==2),
+ testInt(2.==11),
+ testInt(20.==11),
+ testInt(2.!=2),
+ testInt(2.!=11),
+ testInt(20.!=11),
+ // both scalar
+ testInt(2.<11.),
+ testInt(20.<11.),
+ testInt(2.<=2.),
+ testInt(2.<=11.),
+ testInt(20.<=11.),
+ testInt(2.>2.),
+ testInt(2.>11.),
+ testInt(20.>11.),
+ testInt(2.>=2.),
+ testInt(2.>=11.),
+ testInt(20.>=11.),
+ testInt(2.==2.),
+ testInt(2.==11.),
+ testInt(20.==11.),
+ testInt(2.!=2.),
+ testInt(2.!=11.),
+ testInt(20.!=11.),
+ // int, string (string is int)
+ testFalse(2<'2'),
+ testTrue(2<'11'),
+ testFalse(20<'11'),
+ testTrue(2<='2'),
+ testTrue(2<='11'),
+ testFalse(20<='11'),
+ testFalse(2>'2'),
+ testFalse(2>'11'),
+ testTrue(20>'11'),
+ testTrue(2>='2'),
+ testFalse(2>='11'),
+ testTrue(20>='11'),
+ testTrue(2=='2'),
+ testFalse(2=='11'),
+ testFalse(2!='2'),
+ testTrue(2!='11'),
+ // int, string (string is scalar)
+ testFalse(2<'2.'),
+ testTrue(2<'11.'),
+ testFalse(20<'11.'),
+ testTrue(2=='2.'),
+ testFalse(2=='11.'),
+ // scalar, string
+ testFalse(2.<'2.'),
+ testTrue(2.<'11.'),
+ testFalse(20.<'11.'),
+ testTrue(2.=='2.'),
+ testFalse(2.=='11.'),
+ // string, int
+ testFalse('2'<2),
+ testTrue('2'<11),
+ testFalse('20'<11),
+ testTrue('2'==2),
+ testFalse('2'==11),
+ // string, scalar
+ testFalse('2'<2.),
+ testTrue('2'<11.),
+ testFalse('20'<11.),
+ testTrue('2'==2.),
+ testFalse('2'==11.),
+ // string, string
+ testFalse('2'<'2'),
+ testFalse('2'<'11'),
+ testFalse('20'<'11'),
+ testTrue('2'=='2'),
+ testFalse('2'=='11'),
+ // logic
+ testInt(1?2:3),
+ testInt(0?2:3),
+ testInt((1&&2)||3),
+ testInt((1&&0)||3),
+ testInt((1&&0)||0),
+ testInt(1||(0&&3)),
+ testInt(0||(0&&3)),
+ testInt(0||(1&&3)),
+ testInt(1?(2?3:4):5),
+ testInt(0?(2?3:4):5),
+ testInt(1?(0?3:4):5),
+ testInt(0?(0?3:4):5),
+ testInt(1?2?3:4:5),
+ testInt(0?2?3:4:5),
+ testInt(1?0?3:4:5),
+ testInt(0?0?3:4:5),
+
+ testInt(1?2:(3?4:5)),
+ testInt(0?2:(3?4:5)),
+ testInt(1?0:(3?4:5)),
+ testInt(0?0:(3?4:5)),
+ testInt(1?2:3?4:5),
+ testInt(0?2:3?4:5),
+ testInt(1?0:3?4:5),
+ testInt(0?0:3?4:5)
+ , { "123.5", SkType_Float, 0, SkIntToScalar(123) + SK_Scalar1/2, DEF_STRING_ANSWER }
+};
+
+#define SkScriptNAnswer_testCount SK_ARRAY_COUNT(scriptTests)
+
+void SkScriptEngine::UnitTest() {
+ for (unsigned index = 0; index < SkScriptNAnswer_testCount; index++) {
+ SkScriptEngine engine(SkScriptEngine::ToOpType(scriptTests[index].fType));
+ SkScriptValue value;
+ const char* script = scriptTests[index].fScript;
+ SkASSERT(engine.evaluateScript(&script, &value) == true);
+ SkASSERT(value.fType == scriptTests[index].fType);
+ SkScalar error;
+ switch (value.fType) {
+ case SkType_Int:
+ SkASSERT(value.fOperand.fS32 == scriptTests[index].fIntAnswer);
+ break;
+ case SkType_Float:
+ error = SkScalarAbs(value.fOperand.fScalar - scriptTests[index].fScalarAnswer);
+ SkASSERT(error < SK_Scalar1 / 10000);
+ break;
+ case SkType_String:
+ SkASSERT(strcmp(value.fOperand.fString->c_str(), scriptTests[index].fStringAnswer) == 0);
+ break;
+ default:
+ SkASSERT(0);
+ }
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/animator/SkScript.h b/gfx/skia/skia/src/animator/SkScript.h
new file mode 100644
index 000000000..074d10c3c
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkScript.h
@@ -0,0 +1,264 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkScript_DEFINED
+#define SkScript_DEFINED
+
+#include "SkOperand.h"
+#include "SkIntArray.h"
+#include "SkTDict.h"
+#include "SkTDStack.h"
+
+class SkAnimateMaker;
+
+class SkScriptEngine {
+public:
+ enum Error {
+ kNoError,
+ kArrayIndexOutOfBounds,
+ kCouldNotFindReferencedID,
+ kDotOperatorExpectsObject,
+ kErrorInArrrayIndex,
+ kErrorInFunctionParameters,
+ kExpectedArray,
+ kExpectedBooleanExpression,
+ kExpectedFieldName,
+ kExpectedHex,
+ kExpectedIntForConditionOperator,
+ kExpectedNumber,
+ kExpectedNumberForArrayIndex,
+ kExpectedOperator,
+ kExpectedToken,
+ kExpectedTokenBeforeDotOperator,
+ kExpectedValue,
+ kHandleMemberFailed,
+ kHandleMemberFunctionFailed,
+ kHandleUnboxFailed,
+ kIndexOutOfRange,
+ kMismatchedArrayBrace,
+ kMismatchedBrackets,
+ kNoFunctionHandlerFound,
+ kPrematureEnd,
+ kTooManyParameters,
+ kTypeConversionFailed,
+ kUnterminatedString
+ };
+
+ enum SkOpType {
+ kNoType,
+ kInt = 1,
+ kScalar = 2,
+ kString = 4,
+ kArray = 8,
+ kObject = 16
+// kStruct = 32
+ };
+
+ typedef bool (*_boxCallBack)(void* userStorage, SkScriptValue* result);
+ typedef bool (*_functionCallBack)(const char* func, size_t len, SkTDArray<SkScriptValue>& params,
+ void* userStorage, SkScriptValue* result);
+ typedef bool (*_memberCallBack)(const char* member, size_t len, void* object,
+ void* userStorage, SkScriptValue* result);
+ typedef bool (*_memberFunctionCallBack)(const char* member, size_t len, void* object,
+ SkTDArray<SkScriptValue>& params, void* userStorage, SkScriptValue* result);
+// typedef bool (*_objectToStringCallBack)(void* object, void* userStorage, SkScriptValue* result);
+ typedef bool (*_propertyCallBack)(const char* prop, size_t len, void* userStorage, SkScriptValue* result);
+ typedef bool (*_unboxCallBack)(void* userStorage, SkScriptValue* result);
+ SkScriptEngine(SkOpType returnType);
+ ~SkScriptEngine();
+ void boxCallBack(_boxCallBack func, void* userStorage);
+ bool convertTo(SkDisplayTypes , SkScriptValue* );
+ bool evaluateScript(const char** script, SkScriptValue* value);
+ void forget(SkTypedArray* array);
+ void functionCallBack(_functionCallBack func, void* userStorage);
+ Error getError() const { return fError; }
+#ifdef SK_DEBUG
+ bool getErrorString(SkString* err) const;
+#endif
+ void memberCallBack(_memberCallBack , void* userStorage);
+ void memberFunctionCallBack(_memberFunctionCallBack , void* userStorage);
+// void objectToStringCallBack(_objectToStringCallBack , void* userStorage);
+ void propertyCallBack(_propertyCallBack prop, void* userStorage);
+ void track(SkTypedArray* array);
+ void track(SkString* string);
+ void unboxCallBack(_unboxCallBack func, void* userStorage);
+ static bool ConvertTo(SkScriptEngine* , SkDisplayTypes toType, SkScriptValue* value);
+ static SkScalar IntToScalar(int32_t );
+ static SkDisplayTypes ToDisplayType(SkOpType type);
+ static SkOpType ToOpType(SkDisplayTypes type);
+ static bool ValueToString(SkScriptValue value, SkString* string);
+
+ enum CallBackType {
+ kBox,
+ kFunction,
+ kMember,
+ kMemberFunction,
+ // kObjectToString,
+ kProperty,
+ kUnbox
+ };
+
+ struct UserCallBack {
+ CallBackType fCallBackType;
+ void* fUserStorage;
+ union {
+ _boxCallBack fBoxCallBack;
+ _functionCallBack fFunctionCallBack;
+ _memberCallBack fMemberCallBack;
+ _memberFunctionCallBack fMemberFunctionCallBack;
+ // _objectToStringCallBack fObjectToStringCallBack;
+ _propertyCallBack fPropertyCallBack;
+ _unboxCallBack fUnboxCallBack;
+ };
+ };
+
+ enum SkOp {
+ kUnassigned,
+ kAdd,
+ kAddInt = kAdd,
+ kAddScalar,
+ kAddString, // string concat
+ kArrayOp,
+ kBitAnd,
+ kBitNot,
+ kBitOr,
+ kDivide,
+ kDivideInt = kDivide,
+ kDivideScalar,
+ kElse,
+ kEqual,
+ kEqualInt = kEqual,
+ kEqualScalar,
+ kEqualString,
+ kFlipOps,
+ kGreaterEqual,
+ kGreaterEqualInt = kGreaterEqual,
+ kGreaterEqualScalar,
+ kGreaterEqualString,
+ kIf,
+ kLogicalAnd,
+ kLogicalNot,
+ kLogicalOr,
+ kMinus,
+ kMinusInt = kMinus,
+ kMinusScalar,
+ kModulo,
+ kModuloInt = kModulo,
+ kModuloScalar,
+ kMultiply,
+ kMultiplyInt = kMultiply,
+ kMultiplyScalar,
+ kParen,
+ kShiftLeft,
+ kShiftRight, // signed
+ kSubtract,
+ kSubtractInt = kSubtract,
+ kSubtractScalar,
+ kXor,
+ kArtificialOp = 0x40
+ };
+
+ enum SkOpBias {
+ kNoBias,
+ kTowardsNumber = 0,
+ kTowardsString
+ };
+
+protected:
+
+ struct SkOperatorAttributes {
+ unsigned int fLeftType : 3; // SkOpType, but only lower values
+ unsigned int fRightType : 3; // SkOpType, but only lower values
+ SkOpBias fBias : 1;
+ };
+
+ struct SkSuppress { // !!! could be compressed to a long
+ SkOp fOperator; // operand which enabled suppression
+ int fOpStackDepth; // depth when suppression operator was found
+ SkBool8 fSuppress; // set if suppression happens now, as opposed to later
+ SkBool8 fElse; // set on the : half of ? :
+ };
+
+ static const SkOperatorAttributes gOpAttributes[];
+ static const signed char gPrecedence[];
+ int arithmeticOp(char ch, char nextChar, bool lastPush);
+ void commonCallBack(CallBackType type, UserCallBack& callBack, void* userStorage);
+ bool convertParams(SkTDArray<SkScriptValue>&, const SkFunctionParamType* ,
+ int paramTypeCount);
+ void convertToString(SkOperand& operand, SkDisplayTypes type) {
+ SkScriptValue scriptValue;
+ scriptValue.fOperand = operand;
+ scriptValue.fType = type;
+ convertTo(SkType_String, &scriptValue);
+ operand = scriptValue.fOperand;
+ }
+ bool evaluateDot(const char*& script, bool suppressed);
+ bool evaluateDotParam(const char*& script, bool suppressed, const char* field, size_t fieldLength);
+ bool functionParams(const char** scriptPtr, SkTDArray<SkScriptValue>& params);
+ bool handleArrayIndexer(const char** scriptPtr, bool suppressed);
+ bool handleBox(SkScriptValue* value);
+ bool handleFunction(const char** scriptPtr, bool suppressed);
+ bool handleMember(const char* field, size_t len, void* object);
+ bool handleMemberFunction(const char* field, size_t len, void* object, SkTDArray<SkScriptValue>& params);
+// bool handleObjectToString(void* object);
+ bool handleProperty(bool suppressed);
+ bool handleUnbox(SkScriptValue* scriptValue);
+ bool innerScript(const char** scriptPtr, SkScriptValue* value);
+ int logicalOp(char ch, char nextChar);
+ Error opError();
+ bool processOp();
+ void setAnimateMaker(SkAnimateMaker* maker) { fMaker = maker; }
+ bool setError(Error , const char* pos);
+ enum SkBraceStyle {
+ // kStructBrace,
+ kArrayBrace,
+ kFunctionBrace
+ };
+
+#if 0
+ SkIntArray(SkBraceStyle) fBraceStack; // curly, square, function paren
+ SkIntArray(SkOp) fOpStack;
+ SkIntArray(SkOpType) fTypeStack;
+ SkTDOperandArray fOperandStack;
+ SkTDArray<SkSuppress> fSuppressStack;
+#else
+ SkTDStack<SkBraceStyle> fBraceStack; // curly, square, function paren
+ SkTDStack<SkOp> fOpStack;
+ SkTDStack<SkOpType> fTypeStack;
+ SkTDStack<SkOperand> fOperandStack;
+ SkTDStack<SkSuppress> fSuppressStack;
+#endif
+ SkAnimateMaker* fMaker;
+ SkTDTypedArrayArray fTrackArray;
+ SkTDStringArray fTrackString;
+ const char* fToken; // one-deep stack
+ size_t fTokenLength;
+ SkTDArray<UserCallBack> fUserCallBacks;
+ SkOpType fReturnType;
+ Error fError;
+ int fErrorPosition;
+private:
+ friend class SkTypedArray;
+#ifdef SK_SUPPORT_UNITTEST
+public:
+ static void UnitTest();
+#endif
+};
+
+#ifdef SK_SUPPORT_UNITTEST
+
+struct SkScriptNAnswer {
+ const char* fScript;
+ SkDisplayTypes fType;
+ int32_t fIntAnswer;
+ SkScalar fScalarAnswer;
+ const char* fStringAnswer;
+};
+
+#endif
+
+#endif // SkScript_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkScript2.h b/gfx/skia/skia/src/animator/SkScript2.h
new file mode 100644
index 000000000..f257adb45
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkScript2.h
@@ -0,0 +1,293 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkScript2_DEFINED
+#define SkScript2_DEFINED
+
+#include "SkDisplayType.h"
+#include "SkOperand2.h"
+#include "SkStream.h"
+#include "SkTDArray.h"
+#include "SkTDArray_Experimental.h"
+#include "SkTDict.h"
+#include "SkTDStack.h"
+
+typedef SkLongArray(SkString*) SkTDStringArray;
+
+class SkAnimateMaker;
+class SkScriptCallBack;
+
+class SkScriptEngine2 {
+public:
+ enum Error {
+ kNoError,
+ kArrayIndexOutOfBounds,
+ kCouldNotFindReferencedID,
+ kFunctionCallFailed,
+ kMemberOpFailed,
+ kPropertyOpFailed
+ };
+
+ enum Attrs {
+ kConstant,
+ kVariable
+ };
+
+ SkScriptEngine2(SkOperand2::OpType returnType);
+ ~SkScriptEngine2();
+ bool convertTo(SkOperand2::OpType , SkScriptValue2* );
+ bool evaluateScript(const char** script, SkScriptValue2* value);
+ void forget(SkOpArray* array);
+ Error getError() { return fError; }
+ SkOperand2::OpType getReturnType() { return fReturnType; }
+ void track(SkOpArray* array) {
+ SkASSERT(fTrackArray.find(array) < 0);
+ *fTrackArray.append() = array; }
+ void track(SkString* string) {
+ SkASSERT(fTrackString.find(string) < 0);
+ *fTrackString.append() = string;
+ }
+ static bool ConvertTo(SkScriptEngine2* , SkOperand2::OpType toType, SkScriptValue2* value);
+ static SkScalar IntToScalar(int32_t );
+ static bool ValueToString(const SkScriptValue2& value, SkString* string);
+
+ enum Op { // used by tokenizer attribute table
+ kUnassigned,
+ kAdd,
+ kBitAnd,
+ kBitNot,
+ kBitOr,
+ kDivide,
+ kEqual,
+ kFlipOps,
+ kGreaterEqual,
+ kLogicalAnd,
+ kLogicalNot,
+ kLogicalOr,
+ kMinus,
+ kModulo,
+ kMultiply,
+ kShiftLeft,
+ kShiftRight, // signed
+ kSubtract,
+ kXor,
+// following not in attribute table
+ kArrayOp,
+ kElse,
+ kIf,
+ kParen,
+ kLastLogicalOp,
+ kArtificialOp = 0x20
+ };
+
+ enum TypeOp { // generated by tokenizer
+ kNop, // should never get generated
+ kAccumulatorPop,
+ kAccumulatorPush,
+ kAddInt,
+ kAddScalar,
+ kAddString, // string concat
+ kArrayIndex,
+ kArrayParam,
+ kArrayToken,
+ kBitAndInt,
+ kBitNotInt,
+ kBitOrInt,
+ kBoxToken,
+ kCallback,
+ kDivideInt,
+ kDivideScalar,
+ kDotOperator,
+ kElseOp,
+ kEnd,
+ kEqualInt,
+ kEqualScalar,
+ kEqualString,
+ kFunctionCall,
+ kFlipOpsOp,
+ kFunctionToken,
+ kGreaterEqualInt,
+ kGreaterEqualScalar,
+ kGreaterEqualString,
+ kIfOp,
+ kIntToScalar,
+ kIntToScalar2,
+ kIntToString,
+ kIntToString2,
+ kIntegerAccumulator,
+ kIntegerOperand,
+ kLogicalAndInt,
+ kLogicalNotInt,
+ kLogicalOrInt,
+ kMemberOp,
+ kMinusInt,
+ kMinusScalar,
+ kModuloInt,
+ kModuloScalar,
+ kMultiplyInt,
+ kMultiplyScalar,
+ kPropertyOp,
+ kScalarAccumulator,
+ kScalarOperand,
+ kScalarToInt,
+ kScalarToInt2,
+ kScalarToString,
+ kScalarToString2,
+ kShiftLeftInt,
+ kShiftRightInt, // signed
+ kStringAccumulator,
+ kStringOperand,
+ kStringToInt,
+ kStringToScalar,
+ kStringToScalar2,
+ kStringTrack,
+ kSubtractInt,
+ kSubtractScalar,
+ kToBool,
+ kUnboxToken,
+ kUnboxToken2,
+ kXorInt,
+ kLastTypeOp
+ };
+
+ enum OpBias {
+ kNoBias,
+ kTowardsNumber = 0,
+ kTowardsString
+ };
+
+protected:
+
+ enum BraceStyle {
+ // kStructBrace,
+ kArrayBrace,
+ kFunctionBrace
+ };
+
+ enum AddTokenRegister {
+ kAccumulator,
+ kOperand
+ };
+
+ enum ResultIsBoolean {
+ kResultIsNotBoolean,
+ kResultIsBoolean
+ };
+
+ struct OperatorAttributes {
+ unsigned int fLeftType : 3; // SkOpType union, but only lower values
+ unsigned int fRightType : 3; // SkOpType union, but only lower values
+ OpBias fBias : 1;
+ ResultIsBoolean fResultIsBoolean : 1;
+ };
+
+ struct Branch {
+ Branch() {
+ }
+
+ Branch(Op op, int depth, size_t offset)
+ : fOffset(SkToU16(offset)), fOpStackDepth(depth), fOperator(op)
+ , fPrimed(kIsNotPrimed), fDone(kIsNotDone) {
+ }
+
+ enum Primed {
+ kIsNotPrimed,
+ kIsPrimed
+ };
+
+ enum Done {
+ kIsNotDone,
+ kIsDone,
+ };
+
+ unsigned fOffset : 16; // offset in generated stream where branch needs to go
+ int fOpStackDepth : 7; // depth when operator was found
+ Op fOperator : 6; // operand which generated branch
+ mutable Primed fPrimed : 1; // mark when next instruction generates branch
+ Done fDone : 1; // mark when branch is complete
+ void prime() { fPrimed = kIsPrimed; }
+ void resolve(SkDynamicMemoryWStream* , size_t offset);
+ };
+
+ static const OperatorAttributes gOpAttributes[];
+ static const signed char gPrecedence[];
+ static const TypeOp gTokens[];
+ void addToken(TypeOp );
+ void addTokenConst(SkScriptValue2* , AddTokenRegister , SkOperand2::OpType , TypeOp );
+ void addTokenInt(int );
+ void addTokenScalar(SkScalar );
+ void addTokenString(const SkString& );
+ void addTokenValue(const SkScriptValue2& , AddTokenRegister );
+ int arithmeticOp(char ch, char nextChar, bool lastPush);
+ bool convertParams(SkTDArray<SkScriptValue2>* ,
+ const SkOperand2::OpType* paramTypes, int paramTypeCount);
+ void convertToString(SkOperand2* operand, SkOperand2::OpType type) {
+ SkScriptValue2 scriptValue;
+ scriptValue.fOperand = *operand;
+ scriptValue.fType = type;
+ convertTo(SkOperand2::kString, &scriptValue);
+ *operand = scriptValue.fOperand;
+ }
+ bool evaluateDot(const char*& script);
+ bool evaluateDotParam(const char*& script, const char* field, size_t fieldLength);
+ bool functionParams(const char** scriptPtr, SkTDArray<SkScriptValue2>* params);
+ size_t getTokenOffset();
+ SkOperand2::OpType getUnboxType(SkOperand2 scriptValue);
+ bool handleArrayIndexer(const char** scriptPtr);
+ bool handleFunction(const char** scriptPtr);
+ bool handleMember(const char* field, size_t len, void* object);
+ bool handleMemberFunction(const char* field, size_t len, void* object,
+ SkTDArray<SkScriptValue2>* params);
+ bool handleProperty();
+ bool handleUnbox(SkScriptValue2* scriptValue);
+ bool innerScript(const char** scriptPtr, SkScriptValue2* value);
+ int logicalOp(char ch, char nextChar);
+ void processLogicalOp(Op op);
+ bool processOp();
+ void resolveBranch(Branch& );
+// void setAnimateMaker(SkAnimateMaker* maker) { fMaker = maker; }
+ SkDynamicMemoryWStream fStream;
+ SkDynamicMemoryWStream* fActiveStream;
+ SkTDStack<BraceStyle> fBraceStack; // curly, square, function paren
+ SkTDStack<Branch> fBranchStack; // logical operators, slot to store forward branch
+ SkLongArray(SkScriptCallBack*) fCallBackArray;
+ SkTDStack<Op> fOpStack;
+ SkTDStack<SkScriptValue2> fValueStack;
+// SkAnimateMaker* fMaker;
+ SkLongArray(SkOpArray*) fTrackArray;
+ SkTDStringArray fTrackString;
+ const char* fToken; // one-deep stack
+ size_t fTokenLength;
+ SkOperand2::OpType fReturnType;
+ Error fError;
+ SkOperand2::OpType fAccumulatorType; // tracking for code generation
+ SkBool fBranchPopAllowed;
+ SkBool fConstExpression;
+ SkBool fOperandInUse;
+private:
+#ifdef SK_DEBUG
+public:
+ void decompile(const unsigned char* , size_t );
+ static void UnitTest();
+ static void ValidateDecompileTable();
+#endif
+};
+
+#ifdef SK_DEBUG
+
+struct SkScriptNAnswer2 {
+ const char* fScript;
+ SkOperand2::OpType fType;
+ int32_t fIntAnswer;
+ SkScalar fScalarAnswer;
+ const char* fStringAnswer;
+};
+
+#endif
+
+
+#endif // SkScript2_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkScriptCallBack.h b/gfx/skia/skia/src/animator/SkScriptCallBack.h
new file mode 100644
index 000000000..fefc482f6
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkScriptCallBack.h
@@ -0,0 +1,67 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkScriptCallBack_DEFINED
+#define SkScriptCallBack_DEFINED
+
+#include "SkOperand2.h"
+#include "SkTDArray_Experimental.h"
+
+class SkScriptCallBack {
+public:
+ virtual ~SkScriptCallBack() { }
+
+ enum Type {
+ kBox,
+ kFunction,
+ kMember,
+ kMemberFunction,
+ kProperty,
+ kUnbox
+ };
+
+ virtual bool getReference(const char* , size_t len, SkScriptValue2* result) { return false; }
+ virtual SkOperand2::OpType getReturnType(size_t ref, SkOperand2*) {
+ return SkOperand2::kS32; }
+ virtual Type getType() const = 0;
+};
+
+class SkScriptCallBackConvert : public SkScriptCallBack {
+public:
+ virtual bool convert(SkOperand2::OpType type, SkOperand2* operand) = 0;
+};
+
+class SkScriptCallBackFunction : public SkScriptCallBack {
+public:
+ virtual void getParamTypes(SkIntArray(SkOperand2::OpType)* types) = 0;
+ virtual Type getType() const { return kFunction; }
+ virtual bool invoke(size_t ref, SkOpArray* params, SkOperand2* value) = 0;
+};
+
+class SkScriptCallBackMember: public SkScriptCallBack {
+public:
+ bool getMemberReference(const char* , size_t len, void* object, SkScriptValue2* ref);
+ virtual Type getType() const { return kMember; }
+ virtual bool invoke(size_t ref, void* object, SkOperand2* value) = 0;
+};
+
+class SkScriptCallBackMemberFunction : public SkScriptCallBack {
+public:
+ bool getMemberReference(const char* , size_t len, void* object, SkScriptValue2* ref);
+ virtual void getParamTypes(SkIntArray(SkOperand2::OpType)* types) = 0;
+ virtual Type getType() const { return kMemberFunction; }
+ virtual bool invoke(size_t ref, void* object, SkOpArray* params, SkOperand2* value) = 0;
+};
+
+class SkScriptCallBackProperty : public SkScriptCallBack {
+public:
+ virtual bool getConstValue(const char* name, size_t len, SkOperand2* value) { return false; }
+ virtual bool getResult(size_t ref, SkOperand2* answer) { return false; }
+ virtual Type getType() const { return kProperty; }
+};
+
+#endif // SkScriptCallBack_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkScriptDecompile.cpp b/gfx/skia/skia/src/animator/SkScriptDecompile.cpp
new file mode 100644
index 000000000..995da874c
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkScriptDecompile.cpp
@@ -0,0 +1,211 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkScript2.h"
+
+#ifdef SK_DEBUG
+
+#define TypeOpName(op) {SkScriptEngine2::op, #op }
+
+static const struct OpName {
+ SkScriptEngine2::TypeOp fOp;
+ const char* fName;
+} gOpNames[] = {
+ TypeOpName(kNop), // should never get generated
+ TypeOpName(kAccumulatorPop),
+ TypeOpName(kAccumulatorPush),
+ TypeOpName(kAddInt),
+ TypeOpName(kAddScalar),
+ TypeOpName(kAddString), // string concat
+ TypeOpName(kArrayIndex),
+ TypeOpName(kArrayParam),
+ TypeOpName(kArrayToken),
+ TypeOpName(kBitAndInt),
+ TypeOpName(kBitNotInt),
+ TypeOpName(kBitOrInt),
+ TypeOpName(kBoxToken),
+ TypeOpName(kCallback),
+ TypeOpName(kDivideInt),
+ TypeOpName(kDivideScalar),
+ TypeOpName(kDotOperator),
+ TypeOpName(kElseOp),
+ TypeOpName(kEnd),
+ TypeOpName(kEqualInt),
+ TypeOpName(kEqualScalar),
+ TypeOpName(kEqualString),
+ TypeOpName(kFunctionCall),
+ TypeOpName(kFlipOpsOp),
+ TypeOpName(kFunctionToken),
+ TypeOpName(kGreaterEqualInt),
+ TypeOpName(kGreaterEqualScalar),
+ TypeOpName(kGreaterEqualString),
+ TypeOpName(kIfOp),
+ TypeOpName(kIntToScalar),
+ TypeOpName(kIntToScalar2),
+ TypeOpName(kIntToString),
+ TypeOpName(kIntToString2),
+ TypeOpName(kIntegerAccumulator),
+ TypeOpName(kIntegerOperand),
+ TypeOpName(kLogicalAndInt),
+ TypeOpName(kLogicalNotInt),
+ TypeOpName(kLogicalOrInt),
+ TypeOpName(kMemberOp),
+ TypeOpName(kMinusInt),
+ TypeOpName(kMinusScalar),
+ TypeOpName(kModuloInt),
+ TypeOpName(kModuloScalar),
+ TypeOpName(kMultiplyInt),
+ TypeOpName(kMultiplyScalar),
+ TypeOpName(kPropertyOp),
+ TypeOpName(kScalarAccumulator),
+ TypeOpName(kScalarOperand),
+ TypeOpName(kScalarToInt),
+ TypeOpName(kScalarToInt2),
+ TypeOpName(kScalarToString),
+ TypeOpName(kScalarToString2),
+ TypeOpName(kShiftLeftInt),
+ TypeOpName(kShiftRightInt), // signed
+ TypeOpName(kStringAccumulator),
+ TypeOpName(kStringOperand),
+ TypeOpName(kStringToInt),
+ TypeOpName(kStringToScalar),
+ TypeOpName(kStringToScalar2),
+ TypeOpName(kStringTrack),
+ TypeOpName(kSubtractInt),
+ TypeOpName(kSubtractScalar),
+ TypeOpName(kToBool),
+ TypeOpName(kUnboxToken),
+ TypeOpName(kUnboxToken2),
+ TypeOpName(kXorInt)
+};
+
+static size_t gOpNamesSize = sizeof(gOpNames) / sizeof(gOpNames[0]);
+
+#define OperandName(op) {SkOperand2::op, #op }
+
+static const struct OperName {
+ SkOperand2::OpType fType;
+ const char* fName;
+} gOperandNames[] = {
+ OperandName(kNoType),
+ OperandName(kS32),
+ OperandName(kScalar),
+ OperandName(kString),
+ OperandName(kArray),
+ OperandName(kObject)
+};
+
+static size_t gOperandNamesSize = sizeof(gOperandNames) / sizeof(gOperandNames[0]);
+
+// check to see that there are no missing or duplicate entries
+void SkScriptEngine2::ValidateDecompileTable() {
+ SkScriptEngine2::TypeOp op = SkScriptEngine2::kNop;
+ size_t index;
+ for (index = 0; index < gOpNamesSize; index++) {
+ SkASSERT(gOpNames[index].fOp == op);
+ op = (SkScriptEngine2::TypeOp) (op + 1);
+ }
+ index = 0;
+ SkOperand2::OpType type = SkOperand2::kNoType;
+ SkASSERT(gOperandNames[index].fType == type);
+ for (; index < gOperandNamesSize - 1; ) {
+ type = (SkOperand2::OpType) (1 << index);
+ SkASSERT(gOperandNames[++index].fType == type);
+ }
+}
+
+void SkScriptEngine2::decompile(const unsigned char* start, size_t length) {
+ SkASSERT(length > 0);
+ const unsigned char* opCode = start;
+ do {
+ SkASSERT((size_t)(opCode - start) < length);
+ SkScriptEngine2::TypeOp op = (SkScriptEngine2::TypeOp) *opCode++;
+ SkASSERT((size_t)op < gOpNamesSize);
+ SkDebugf("%d: %s", opCode - start - 1, gOpNames[op].fName);
+ switch (op) {
+ case SkScriptEngine2::kCallback: {
+ int index;
+ memcpy(&index, opCode, sizeof(index));
+ opCode += sizeof(index);
+ SkDebugf(" index: %d", index);
+ } break;
+ case SkScriptEngine2::kFunctionCall:
+ case SkScriptEngine2::kMemberOp:
+ case SkScriptEngine2::kPropertyOp: {
+ size_t ref;
+ memcpy(&ref, opCode, sizeof(ref));
+ opCode += sizeof(ref);
+ SkDebugf(" ref: %d", ref);
+ } break;
+ case SkScriptEngine2::kIntegerAccumulator:
+ case SkScriptEngine2::kIntegerOperand: {
+ int32_t integer;
+ memcpy(&integer, opCode, sizeof(integer));
+ opCode += sizeof(int32_t);
+ SkDebugf(" integer: %d", integer);
+ } break;
+ case SkScriptEngine2::kScalarAccumulator:
+ case SkScriptEngine2::kScalarOperand: {
+ SkScalar scalar;
+ memcpy(&scalar, opCode, sizeof(scalar));
+ opCode += sizeof(SkScalar);
+ SkDebugf(" scalar: %g", SkScalarToFloat(scalar));
+ } break;
+ case SkScriptEngine2::kStringAccumulator:
+ case SkScriptEngine2::kStringOperand: {
+ int size;
+ SkString* strPtr = new SkString();
+ memcpy(&size, opCode, sizeof(size));
+ opCode += sizeof(size);
+ strPtr->set((char*) opCode, size);
+ opCode += size;
+ SkDebugf(" string: %s", strPtr->c_str());
+ delete strPtr;
+ } break;
+ case SkScriptEngine2::kBoxToken: {
+ SkOperand2::OpType type;
+ memcpy(&type, opCode, sizeof(type));
+ opCode += sizeof(type);
+ size_t index = 0;
+ if (type == 0)
+ SkDebugf(" type: %s", gOperandNames[index].fName);
+ else {
+ while (type != 0) {
+ SkASSERT(index + 1 < gOperandNamesSize);
+ if (type & (1 << index)) {
+ type = (SkOperand2::OpType) (type & ~(1 << index));
+ SkDebugf(" type: %s", gOperandNames[index + 1].fName);
+ }
+ index++;
+ }
+ }
+ } break;
+ case SkScriptEngine2::kIfOp:
+ case SkScriptEngine2::kLogicalAndInt:
+ case SkScriptEngine2::kElseOp:
+ case SkScriptEngine2::kLogicalOrInt: {
+ int size;
+ memcpy(&size, opCode, sizeof(size));
+ opCode += sizeof(size);
+ SkDebugf(" offset (address): %d (%d)", size, opCode - start + size);
+ } break;
+ case SkScriptEngine2::kEnd:
+ goto done;
+ case SkScriptEngine2::kNop:
+ SkASSERT(0);
+ default:
+ break;
+ }
+ SkDebugf("\n");
+ } while (true);
+done:
+ SkDebugf("\n");
+}
+
+#endif
diff --git a/gfx/skia/skia/src/animator/SkScriptRuntime.cpp b/gfx/skia/skia/src/animator/SkScriptRuntime.cpp
new file mode 100644
index 000000000..7cb35a69d
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkScriptRuntime.cpp
@@ -0,0 +1,351 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkScriptRuntime.h"
+#include "SkScript2.h"
+#include "SkMath.h"
+#include "SkParse.h"
+#include "SkScriptCallBack.h"
+#include "SkString.h"
+#include "SkOpArray.h"
+
+// script tokenizer
+
+// turn text into token string
+// turn number literals into inline UTF8-style values
+// process operators to turn standard notation into stack notation
+
+// defer processing until the tokens can all be resolved
+// then, turn token strings into indices into the appropriate tables / dictionaries
+
+// consider: const evaluation?
+
+// replace script string with script tokens preceeded by special value
+
+// need second version of script plugins that return private index of found value?
+ // then would need in script index of plugin, private index
+
+// encode brace stack push/pop as opcodes
+
+// should token script enocde type where possible?
+
+// current flow:
+ // strip whitespace
+ // if in array brace [ recurse, continue
+ // if token, handle function, or array, or property (continue)
+ // parse number, continue
+ // parse token, continue
+ // parse string literal, continue
+ // if dot operator, handle dot, continue
+ // if [ , handle array literal or accessor, continue
+ // if ), pop (if function, break)
+ // if ], pop ; if ',' break
+ // handle logical ops
+ // or, handle arithmetic ops
+ // loop
+
+// !!! things to do
+ // add separate processing loop to advance while suppressed
+ // or, include jump offset to skip suppressed code?
+
+SkScriptRuntime::~SkScriptRuntime() {
+ for (SkString** stringPtr = fTrackString.begin(); stringPtr < fTrackString.end(); stringPtr++)
+ delete *stringPtr;
+ for (SkOpArray** arrayPtr = fTrackArray.begin(); arrayPtr < fTrackArray.end(); arrayPtr++)
+ delete *arrayPtr;
+}
+
+bool SkScriptRuntime::executeTokens(unsigned char* opCode) {
+ SkOperand2 operand[2]; // 1=accumulator and 2=operand
+ SkScriptEngine2::TypeOp op;
+ size_t ref;
+ int index, size;
+ int registerLoad;
+ SkScriptCallBack* callBack SK_INIT_TO_AVOID_WARNING;
+ do {
+ switch ((op = (SkScriptEngine2::TypeOp) *opCode++)) {
+ case SkScriptEngine2::kArrayToken: // create an array
+ operand[0].fArray = new SkOpArray(SkOperand2::kNoType /*fReturnType*/);
+ break;
+ case SkScriptEngine2::kArrayIndex: // array accessor
+ index = operand[1].fS32;
+ if (index >= operand[0].fArray->count()) {
+ fError = kArrayIndexOutOfBounds;
+ return false;
+ }
+ operand[0] = operand[0].fArray->begin()[index];
+ break;
+ case SkScriptEngine2::kArrayParam: // array initializer, or function param
+ *operand[0].fArray->append() = operand[1];
+ break;
+ case SkScriptEngine2::kCallback:
+ memcpy(&index, opCode, sizeof(index));
+ opCode += sizeof(index);
+ callBack = fCallBackArray[index];
+ break;
+ case SkScriptEngine2::kFunctionCall: {
+ memcpy(&ref, opCode, sizeof(ref));
+ opCode += sizeof(ref);
+ SkScriptCallBackFunction* callBackFunction = (SkScriptCallBackFunction*) callBack;
+ if (callBackFunction->invoke(ref, operand[0].fArray, /* params */
+ &operand[0] /* result */) == false) {
+ fError = kFunctionCallFailed;
+ return false;
+ }
+ } break;
+ case SkScriptEngine2::kMemberOp: {
+ memcpy(&ref, opCode, sizeof(ref));
+ opCode += sizeof(ref);
+ SkScriptCallBackMember* callBackMember = (SkScriptCallBackMember*) callBack;
+ if (callBackMember->invoke(ref, operand[0].fObject, &operand[0]) == false) {
+ fError = kMemberOpFailed;
+ return false;
+ }
+ } break;
+ case SkScriptEngine2::kPropertyOp: {
+ memcpy(&ref, opCode, sizeof(ref));
+ opCode += sizeof(ref);
+ SkScriptCallBackProperty* callBackProperty = (SkScriptCallBackProperty*) callBack;
+ if (callBackProperty->getResult(ref, &operand[0])== false) {
+ fError = kPropertyOpFailed;
+ return false;
+ }
+ } break;
+ case SkScriptEngine2::kAccumulatorPop:
+ fRunStack.pop(&operand[0]);
+ break;
+ case SkScriptEngine2::kAccumulatorPush:
+ *fRunStack.push() = operand[0];
+ break;
+ case SkScriptEngine2::kIntegerAccumulator:
+ case SkScriptEngine2::kIntegerOperand:
+ registerLoad = op - SkScriptEngine2::kIntegerAccumulator;
+ memcpy(&operand[registerLoad].fS32, opCode, sizeof(int32_t));
+ opCode += sizeof(int32_t);
+ break;
+ case SkScriptEngine2::kScalarAccumulator:
+ case SkScriptEngine2::kScalarOperand:
+ registerLoad = op - SkScriptEngine2::kScalarAccumulator;
+ memcpy(&operand[registerLoad].fScalar, opCode, sizeof(SkScalar));
+ opCode += sizeof(SkScalar);
+ break;
+ case SkScriptEngine2::kStringAccumulator:
+ case SkScriptEngine2::kStringOperand: {
+ SkString* strPtr = new SkString();
+ track(strPtr);
+ registerLoad = op - SkScriptEngine2::kStringAccumulator;
+ memcpy(&size, opCode, sizeof(size));
+ opCode += sizeof(size);
+ strPtr->set((char*) opCode, size);
+ opCode += size;
+ operand[registerLoad].fString = strPtr;
+ } break;
+ case SkScriptEngine2::kStringTrack: // call after kObjectToValue
+ track(operand[0].fString);
+ break;
+ case SkScriptEngine2::kBoxToken: {
+ SkOperand2::OpType type;
+ memcpy(&type, opCode, sizeof(type));
+ opCode += sizeof(type);
+ SkScriptCallBackConvert* callBackBox = (SkScriptCallBackConvert*) callBack;
+ if (callBackBox->convert(type, &operand[0]) == false)
+ return false;
+ } break;
+ case SkScriptEngine2::kUnboxToken:
+ case SkScriptEngine2::kUnboxToken2: {
+ SkScriptCallBackConvert* callBackUnbox = (SkScriptCallBackConvert*) callBack;
+ if (callBackUnbox->convert(SkOperand2::kObject, &operand[0]) == false)
+ return false;
+ } break;
+ case SkScriptEngine2::kIfOp:
+ case SkScriptEngine2::kLogicalAndInt:
+ memcpy(&size, opCode, sizeof(size));
+ opCode += sizeof(size);
+ if (operand[0].fS32 == 0)
+ opCode += size; // skip to else (or end of if predicate)
+ break;
+ case SkScriptEngine2::kElseOp:
+ memcpy(&size, opCode, sizeof(size));
+ opCode += sizeof(size);
+ opCode += size; // if true: after predicate, always skip to end of else
+ break;
+ case SkScriptEngine2::kLogicalOrInt:
+ memcpy(&size, opCode, sizeof(size));
+ opCode += sizeof(size);
+ if (operand[0].fS32 != 0)
+ opCode += size; // skip to kToBool opcode after || predicate
+ break;
+ // arithmetic conversion ops
+ case SkScriptEngine2::kFlipOpsOp:
+ SkTSwap(operand[0], operand[1]);
+ break;
+ case SkScriptEngine2::kIntToString:
+ case SkScriptEngine2::kIntToString2:
+ case SkScriptEngine2::kScalarToString:
+ case SkScriptEngine2::kScalarToString2:{
+ SkString* strPtr = new SkString();
+ track(strPtr);
+ if (op == SkScriptEngine2::kIntToString || op == SkScriptEngine2::kIntToString2)
+ strPtr->appendS32(operand[op - SkScriptEngine2::kIntToString].fS32);
+ else
+ strPtr->appendScalar(operand[op - SkScriptEngine2::kScalarToString].fScalar);
+ operand[0].fString = strPtr;
+ } break;
+ case SkScriptEngine2::kIntToScalar:
+ case SkScriptEngine2::kIntToScalar2:
+ operand[0].fScalar = SkScriptEngine2::IntToScalar(operand[op - SkScriptEngine2::kIntToScalar].fS32);
+ break;
+ case SkScriptEngine2::kStringToInt:
+ if (SkParse::FindS32(operand[0].fString->c_str(), &operand[0].fS32) == nullptr)
+ return false;
+ break;
+ case SkScriptEngine2::kStringToScalar:
+ case SkScriptEngine2::kStringToScalar2:
+ if (SkParse::FindScalar(operand[0].fString->c_str(),
+ &operand[op - SkScriptEngine2::kStringToScalar].fScalar) == nullptr)
+ return false;
+ break;
+ case SkScriptEngine2::kScalarToInt:
+ operand[0].fS32 = SkScalarFloorToInt(operand[0].fScalar);
+ break;
+ // arithmetic ops
+ case SkScriptEngine2::kAddInt:
+ operand[0].fS32 += operand[1].fS32;
+ break;
+ case SkScriptEngine2::kAddScalar:
+ operand[0].fScalar += operand[1].fScalar;
+ break;
+ case SkScriptEngine2::kAddString:
+// if (fTrackString.find(operand[1].fString) < 0) {
+ // operand[1].fString = new SkString (*operand[1].fString);
+ // track(operand[1].fString);
+ // }
+ operand[0].fString->append(*operand[1].fString);
+ break;
+ case SkScriptEngine2::kBitAndInt:
+ operand[0].fS32 &= operand[1].fS32;
+ break;
+ case SkScriptEngine2::kBitNotInt:
+ operand[0].fS32 = ~operand[0].fS32;
+ break;
+ case SkScriptEngine2::kBitOrInt:
+ operand[0].fS32 |= operand[1].fS32;
+ break;
+ case SkScriptEngine2::kDivideInt:
+ SkASSERT(operand[1].fS32 != 0);
+ if (operand[1].fS32 == 0)
+ operand[0].fS32 = operand[0].fS32 == 0 ? SK_NaN32 :
+ operand[0].fS32 > 0 ? SK_MaxS32 : -SK_MaxS32;
+ else
+ if (operand[1].fS32 != 0) // throw error on divide by zero?
+ operand[0].fS32 /= operand[1].fS32;
+ break;
+ case SkScriptEngine2::kDivideScalar:
+ if (operand[1].fScalar == 0)
+ operand[0].fScalar = operand[0].fScalar == 0 ? SK_ScalarNaN :
+ operand[0].fScalar > 0 ? SK_ScalarMax : -SK_ScalarMax;
+ else
+ operand[0].fScalar = operand[0].fScalar / operand[1].fScalar;
+ break;
+ case SkScriptEngine2::kEqualInt:
+ operand[0].fS32 = operand[0].fS32 == operand[1].fS32;
+ break;
+ case SkScriptEngine2::kEqualScalar:
+ operand[0].fS32 = operand[0].fScalar == operand[1].fScalar;
+ break;
+ case SkScriptEngine2::kEqualString:
+ operand[0].fS32 = *operand[0].fString == *operand[1].fString;
+ break;
+ case SkScriptEngine2::kGreaterEqualInt:
+ operand[0].fS32 = operand[0].fS32 >= operand[1].fS32;
+ break;
+ case SkScriptEngine2::kGreaterEqualScalar:
+ operand[0].fS32 = operand[0].fScalar >= operand[1].fScalar;
+ break;
+ case SkScriptEngine2::kGreaterEqualString:
+ operand[0].fS32 = strcmp(operand[0].fString->c_str(), operand[1].fString->c_str()) >= 0;
+ break;
+ case SkScriptEngine2::kToBool:
+ operand[0].fS32 = !! operand[0].fS32;
+ break;
+ case SkScriptEngine2::kLogicalNotInt:
+ operand[0].fS32 = ! operand[0].fS32;
+ break;
+ case SkScriptEngine2::kMinusInt:
+ operand[0].fS32 = -operand[0].fS32;
+ break;
+ case SkScriptEngine2::kMinusScalar:
+ operand[0].fScalar = -operand[0].fScalar;
+ break;
+ case SkScriptEngine2::kModuloInt:
+ operand[0].fS32 %= operand[1].fS32;
+ break;
+ case SkScriptEngine2::kModuloScalar:
+ operand[0].fScalar = SkScalarMod(operand[0].fScalar, operand[1].fScalar);
+ break;
+ case SkScriptEngine2::kMultiplyInt:
+ operand[0].fS32 *= operand[1].fS32;
+ break;
+ case SkScriptEngine2::kMultiplyScalar:
+ operand[0].fScalar = SkScalarMul(operand[0].fScalar, operand[1].fScalar);
+ break;
+ case SkScriptEngine2::kShiftLeftInt:
+ operand[0].fS32 <<= operand[1].fS32;
+ break;
+ case SkScriptEngine2::kShiftRightInt:
+ operand[0].fS32 >>= operand[1].fS32;
+ break;
+ case SkScriptEngine2::kSubtractInt:
+ operand[0].fS32 -= operand[1].fS32;
+ break;
+ case SkScriptEngine2::kSubtractScalar:
+ operand[0].fScalar -= operand[1].fScalar;
+ break;
+ case SkScriptEngine2::kXorInt:
+ operand[0].fS32 ^= operand[1].fS32;
+ break;
+ case SkScriptEngine2::kEnd:
+ goto done;
+ case SkScriptEngine2::kNop:
+ SkASSERT(0);
+ default:
+ break;
+ }
+ } while (true);
+done:
+ fRunStack.push(operand[0]);
+ return true;
+}
+
+bool SkScriptRuntime::getResult(SkOperand2* result) {
+ if (fRunStack.count() == 0)
+ return false;
+ fRunStack.pop(result);
+ return true;
+}
+
+void SkScriptRuntime::track(SkOpArray* array) {
+ SkASSERT(fTrackArray.find(array) < 0);
+ *fTrackArray.append() = array;
+}
+
+void SkScriptRuntime::track(SkString* string) {
+ SkASSERT(fTrackString.find(string) < 0);
+ *fTrackString.append() = string;
+}
+
+void SkScriptRuntime::untrack(SkOpArray* array) {
+ int index = fTrackArray.find(array);
+ SkASSERT(index >= 0);
+ fTrackArray.begin()[index] = nullptr;
+}
+
+void SkScriptRuntime::untrack(SkString* string) {
+ int index = fTrackString.find(string);
+ SkASSERT(index >= 0);
+ fTrackString.begin()[index] = nullptr;
+}
diff --git a/gfx/skia/skia/src/animator/SkScriptRuntime.h b/gfx/skia/skia/src/animator/SkScriptRuntime.h
new file mode 100644
index 000000000..3e7380114
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkScriptRuntime.h
@@ -0,0 +1,50 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkScriptRuntime_DEFINED
+#define SkScriptRuntime_DEFINED
+
+#include "SkOperand2.h"
+#include "SkTDArray_Experimental.h"
+#include "SkTDStack.h"
+
+class SkScriptCallBack;
+
+typedef SkLongArray(SkString*) SkTDStringArray;
+typedef SkLongArray(SkScriptCallBack*) SkTDScriptCallBackArray;
+
+class SkScriptRuntime {
+public:
+ enum SkError {
+ kNoError,
+ kArrayIndexOutOfBounds,
+ kCouldNotFindReferencedID,
+ kFunctionCallFailed,
+ kMemberOpFailed,
+ kPropertyOpFailed
+ };
+
+ SkScriptRuntime(SkTDScriptCallBackArray& callBackArray) : fCallBackArray(callBackArray)
+ { }
+ ~SkScriptRuntime();
+ bool executeTokens(unsigned char* opCode);
+ bool getResult(SkOperand2* result);
+ void untrack(SkOpArray* array);
+ void untrack(SkString* string);
+private:
+ void track(SkOpArray* array);
+ void track(SkString* string);
+ SkTDScriptCallBackArray& fCallBackArray;
+ SkError fError;
+ SkTDStack<SkOperand2> fRunStack;
+ SkLongArray(SkOpArray*) fTrackArray;
+ SkTDStringArray fTrackString;
+ // illegal
+ SkScriptRuntime& operator=(const SkScriptRuntime&);
+};
+
+#endif // SkScriptRuntime_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkScriptTokenizer.cpp b/gfx/skia/skia/src/animator/SkScriptTokenizer.cpp
new file mode 100644
index 000000000..4ab7584f7
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkScriptTokenizer.cpp
@@ -0,0 +1,1506 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkScript2.h"
+#include "SkData.h"
+#include "SkFloatingPoint.h"
+#include "SkMath.h"
+#include "SkParse.h"
+#include "SkScriptCallBack.h"
+#include "SkScriptRuntime.h"
+#include "SkString.h"
+#include "SkOpArray.h"
+
+const SkScriptEngine2::OperatorAttributes SkScriptEngine2::gOpAttributes[] = {
+{ SkOperand2::kNoType, SkOperand2::kNoType, kNoBias, kResultIsNotBoolean },
+{ SkOperand2::OpType(SkOperand2::kS32 | SkOperand2::kScalar | SkOperand2::kString),
+ SkOperand2::OpType(SkOperand2::kS32 | SkOperand2::kScalar | SkOperand2::kString), kTowardsString, kResultIsNotBoolean }, // kAdd
+{ SkOperand2::kS32, SkOperand2::kS32, kNoBias, kResultIsNotBoolean }, // kBitAnd
+{ SkOperand2::kNoType, SkOperand2::kS32, kNoBias, kResultIsNotBoolean }, // kBitNot
+{ SkOperand2::kS32, SkOperand2::kS32, kNoBias, kResultIsNotBoolean }, // kBitOr
+{ SkOperand2::OpType(SkOperand2::kS32 | SkOperand2::kScalar),
+ SkOperand2::OpType(SkOperand2::kS32 | SkOperand2::kScalar), kNoBias, kResultIsNotBoolean }, // kDivide
+{ SkOperand2::OpType(SkOperand2::kS32 | SkOperand2::kScalar | SkOperand2::kString),
+ SkOperand2::OpType(SkOperand2::kS32 | SkOperand2::kScalar |SkOperand2:: kString), kTowardsNumber,
+ kResultIsBoolean }, // kEqual
+{ SkOperand2::kS32, SkOperand2::kNoType, kNoBias, kResultIsNotBoolean }, // kFlipOps
+{ SkOperand2::OpType(SkOperand2::kS32 | SkOperand2::kScalar | SkOperand2::kString),
+ SkOperand2::OpType(SkOperand2::kS32 | SkOperand2::kScalar | SkOperand2::kString), kTowardsNumber,
+ kResultIsBoolean }, // kGreaterEqual
+{ SkOperand2::kNoType, SkOperand2::kS32, kNoBias, kResultIsNotBoolean }, // kLogicalAnd (really, ToBool)
+{ SkOperand2::kNoType, SkOperand2::kS32, kNoBias, kResultIsNotBoolean }, // kLogicalNot
+{ SkOperand2::kS32, SkOperand2::kS32, kNoBias, kResultIsNotBoolean }, // kLogicalOr
+{ SkOperand2::kNoType, SkOperand2::OpType(SkOperand2::kS32 | SkOperand2::kScalar), kNoBias, kResultIsNotBoolean }, // kMinus
+{ SkOperand2::OpType(SkOperand2::kS32 | SkOperand2::kScalar),
+ SkOperand2::OpType(SkOperand2::kS32 |SkOperand2:: kScalar), kNoBias, kResultIsNotBoolean }, // kModulo
+{ SkOperand2::OpType(SkOperand2::kS32 | SkOperand2::kScalar),
+ SkOperand2::OpType(SkOperand2::kS32 | SkOperand2::kScalar), kNoBias, kResultIsNotBoolean }, // kMultiply
+{ SkOperand2::kS32, SkOperand2::kS32, kNoBias, kResultIsNotBoolean }, // kShiftLeft
+{ SkOperand2::kS32, SkOperand2::kS32, kNoBias, kResultIsNotBoolean }, // kShiftRight
+{ SkOperand2::OpType(SkOperand2::kS32 | SkOperand2::kScalar),
+ SkOperand2::OpType(SkOperand2::kS32 | SkOperand2::kScalar), kNoBias, kResultIsNotBoolean }, // kSubtract
+{ SkOperand2::kS32, SkOperand2::kS32, kNoBias, kResultIsNotBoolean } // kXor
+};
+
+#define kBracketPrecedence 16
+#define kIfElsePrecedence 15
+
+const signed char SkScriptEngine2::gPrecedence[] = {
+ 17, // kUnassigned,
+ 6, // kAdd,
+ 10, // kBitAnd,
+ 4, // kBitNot,
+ 12, // kBitOr,
+ 5, // kDivide,
+ 9, // kEqual,
+ -1, // kFlipOps,
+ 8, // kGreaterEqual,
+ 13, // kLogicalAnd,
+ 4, // kLogicalNot,
+ 14, // kLogicalOr,
+ 4, // kMinus,
+ 5, // kModulo,
+ 5, // kMultiply,
+ 7, // kShiftLeft,
+ 7, // kShiftRight, // signed
+ 6, // kSubtract,
+ 11, // kXor
+ kBracketPrecedence, // kArrayOp
+ kIfElsePrecedence, // kElse
+ kIfElsePrecedence, // kIf
+ kBracketPrecedence, // kParen
+};
+
+const SkScriptEngine2::TypeOp SkScriptEngine2::gTokens[] = {
+ kNop, // unassigned
+ kAddInt, // kAdd,
+ kBitAndInt, // kBitAnd,
+ kBitNotInt, // kBitNot,
+ kBitOrInt, // kBitOr,
+ kDivideInt, // kDivide,
+ kEqualInt, // kEqual,
+ kFlipOpsOp, // kFlipOps,
+ kGreaterEqualInt, // kGreaterEqual,
+ kLogicalAndInt, // kLogicalAnd,
+ kLogicalNotInt, // kLogicalNot,
+ kLogicalOrInt, // kLogicalOr,
+ kMinusInt, // kMinus,
+ kModuloInt, // kModulo,
+ kMultiplyInt, // kMultiply,
+ kShiftLeftInt, // kShiftLeft,
+ kShiftRightInt, // kShiftRight, // signed
+ kSubtractInt, // kSubtract,
+ kXorInt // kXor
+};
+
+static inline bool is_between(int c, int min, int max)
+{
+ return (unsigned)(c - min) <= (unsigned)(max - min);
+}
+
+static inline bool is_ws(int c)
+{
+ return is_between(c, 1, 32);
+}
+
+static int token_length(const char* start) {
+ char ch = start[0];
+ if (! is_between(ch, 'a' , 'z') && ! is_between(ch, 'A', 'Z') && ch != '_' && ch != '$')
+ return -1;
+ int length = 0;
+ do
+ ch = start[++length];
+ while (is_between(ch, 'a' , 'z') || is_between(ch, 'A', 'Z') || is_between(ch, '0', '9') ||
+ ch == '_' || ch == '$');
+ return length;
+}
+
+SkScriptEngine2::SkScriptEngine2(SkOperand2::OpType returnType) : fActiveStream(&fStream),
+fTokenLength(0), fReturnType(returnType), fError(kNoError),
+fAccumulatorType(SkOperand2::kNoType),
+fBranchPopAllowed(true), fConstExpression(true), fOperandInUse(false)
+{
+ Branch branch(kUnassigned, 0, 0);
+ fBranchStack.push(branch);
+ *fOpStack.push() = (Op) kParen;
+}
+
+SkScriptEngine2::~SkScriptEngine2() {
+ for (SkString** stringPtr = fTrackString.begin(); stringPtr < fTrackString.end(); stringPtr++)
+ delete *stringPtr;
+ for (SkOpArray** arrayPtr = fTrackArray.begin(); arrayPtr < fTrackArray.end(); arrayPtr++)
+ delete *arrayPtr;
+}
+
+void SkScriptEngine2::addToken(SkScriptEngine2::TypeOp op) {
+ int limit = fBranchStack.count() - 1;
+ for (int index = 0; index < limit; index++) {
+ Branch& branch = fBranchStack.index(index);
+ if (branch.fPrimed == Branch::kIsPrimed)
+ resolveBranch(branch);
+ }
+ if (fBranchPopAllowed) {
+ while (fBranchStack.top().fDone == Branch::kIsDone)
+ fBranchStack.pop();
+ }
+ unsigned char charOp = (unsigned char) op;
+ fActiveStream->write(&charOp, sizeof(charOp));
+}
+
+void SkScriptEngine2::addTokenConst(SkScriptValue2* value, AddTokenRegister reg,
+ SkOperand2::OpType toType, SkScriptEngine2::TypeOp op) {
+ if (value->fIsConstant == SkScriptValue2::kConstant && convertTo(toType, value))
+ return;
+ addTokenValue(*value, reg);
+ addToken(op);
+ value->fIsWritten = SkScriptValue2::kWritten;
+ value->fType = toType;
+}
+
+void SkScriptEngine2::addTokenInt(int integer) {
+ fActiveStream->write(&integer, sizeof(integer));
+}
+
+void SkScriptEngine2::addTokenScalar(SkScalar scalar) {
+ fActiveStream->write(&scalar, sizeof(scalar));
+}
+
+void SkScriptEngine2::addTokenString(const SkString& string) {
+ int size = SkToInt(string.size());
+ addTokenInt(size);
+ fActiveStream->write(string.c_str(), size);
+}
+
+void SkScriptEngine2::addTokenValue(const SkScriptValue2& value, AddTokenRegister reg) {
+ if (value.isConstant() == false) {
+ if (reg == kAccumulator) {
+ if (fAccumulatorType == SkOperand2::kNoType)
+ addToken(kAccumulatorPop);
+ } else {
+ ; // !!! incomplete?
+ }
+ return;
+ }
+ if (reg == kAccumulator && fAccumulatorType != SkOperand2::kNoType)
+ addToken(kAccumulatorPush);
+ switch (value.fType) {
+ case SkOperand2::kS32:
+ addToken(reg == kAccumulator ? kIntegerAccumulator : kIntegerOperand);
+ addTokenInt(value.fOperand.fS32);
+ if (reg == kAccumulator)
+ fAccumulatorType = SkOperand2::kS32;
+ else
+ fOperandInUse = true;
+ break;
+ case SkOperand2::kScalar:
+ addToken(reg == kAccumulator ? kScalarAccumulator : kScalarOperand);
+ addTokenScalar(value.fOperand.fScalar);
+ if (reg == kAccumulator)
+ fAccumulatorType = SkOperand2::kScalar;
+ else
+ fOperandInUse = true;
+ break;
+ case SkOperand2::kString:
+ addToken(reg == kAccumulator ? kStringAccumulator : kStringOperand);
+ addTokenString(*value.fOperand.fString);
+ if (reg == kAccumulator)
+ fAccumulatorType = SkOperand2::kString;
+ else
+ fOperandInUse = true;
+ break;
+ default:
+ SkASSERT(0); //!!! not implemented yet
+ }
+}
+
+int SkScriptEngine2::arithmeticOp(char ch, char nextChar, bool lastPush) {
+ Op op = kUnassigned;
+ bool reverseOperands = false;
+ bool negateResult = false;
+ int advance = 1;
+ switch (ch) {
+ case '+':
+ // !!! ignoring unary plus as implemented here has the side effect of
+ // suppressing errors like +"hi"
+ if (lastPush == false) // unary plus, don't push an operator
+ return advance;
+ op = kAdd;
+ break;
+ case '-':
+ op = lastPush ? kSubtract : kMinus;
+ break;
+ case '*':
+ op = kMultiply;
+ break;
+ case '/':
+ op = kDivide;
+ break;
+ case '>':
+ if (nextChar == '>') {
+ op = kShiftRight;
+ goto twoChar;
+ }
+ op = kGreaterEqual;
+ if (nextChar == '=')
+ goto twoChar;
+ reverseOperands = negateResult = true;
+ break;
+ case '<':
+ if (nextChar == '<') {
+ op = kShiftLeft;
+ goto twoChar;
+ }
+ op = kGreaterEqual;
+ reverseOperands = nextChar == '=';
+ negateResult = ! reverseOperands;
+ advance += reverseOperands;
+ break;
+ case '=':
+ if (nextChar == '=') {
+ op = kEqual;
+ goto twoChar;
+ }
+ break;
+ case '!':
+ if (nextChar == '=') {
+ op = kEqual;
+ negateResult = true;
+twoChar:
+ advance++;
+ break;
+ }
+ op = kLogicalNot;
+ break;
+ case '?':
+ op =(Op) kIf;
+ break;
+ case ':':
+ op = (Op) kElse;
+ break;
+ case '^':
+ op = kXor;
+ break;
+ case '(':
+ *fOpStack.push() = (Op) kParen;
+ return advance;
+ case '&':
+ SkASSERT(nextChar != '&');
+ op = kBitAnd;
+ break;
+ case '|':
+ SkASSERT(nextChar != '|');
+ op = kBitOr;
+ break;
+ case '%':
+ op = kModulo;
+ break;
+ case '~':
+ op = kBitNot;
+ break;
+ }
+ if (op == kUnassigned)
+ return 0;
+ signed char precedence = gPrecedence[op];
+ do {
+ int idx = 0;
+ Op compare;
+ do {
+ compare = fOpStack.index(idx);
+ if ((compare & kArtificialOp) == 0)
+ break;
+ idx++;
+ } while (true);
+ signed char topPrecedence = gPrecedence[compare];
+ SkASSERT(topPrecedence != -1);
+ if (topPrecedence > precedence || (topPrecedence == precedence &&
+ gOpAttributes[op].fLeftType == SkOperand2::kNoType)) {
+ break;
+ }
+ processOp();
+ } while (true);
+ if (negateResult)
+ *fOpStack.push() = (Op) (kLogicalNot | kArtificialOp);
+ fOpStack.push(op);
+ if (reverseOperands)
+ *fOpStack.push() = (Op) (kFlipOps | kArtificialOp);
+
+ return advance;
+}
+
+bool SkScriptEngine2::convertParams(SkTDArray<SkScriptValue2>* params,
+ const SkOperand2::OpType* paramTypes, int paramCount) {
+ int count = params->count();
+ if (count > paramCount) {
+ SkASSERT(0);
+ return false; // too many parameters passed
+ }
+ for (int index = 0; index < count; index++)
+ convertTo(paramTypes[index], &(*params)[index]);
+ return true;
+}
+
+bool SkScriptEngine2::convertTo(SkOperand2::OpType toType, SkScriptValue2* value ) {
+ SkOperand2::OpType type = value->fType;
+ if (type == toType)
+ return true;
+ if (type == SkOperand2::kObject) {
+ if (handleUnbox(value) == false)
+ return false;
+ return convertTo(toType, value);
+ }
+ return ConvertTo(this, toType, value);
+}
+
+bool SkScriptEngine2::evaluateDot(const char*& script) {
+ size_t fieldLength = token_length(++script); // skip dot
+ SkASSERT(fieldLength > 0); // !!! add error handling
+ const char* field = script;
+ script += fieldLength;
+ bool success = handleProperty();
+ if (success == false) {
+ fError = kCouldNotFindReferencedID;
+ goto error;
+ }
+ return evaluateDotParam(script, field, fieldLength);
+error:
+ return false;
+}
+
+bool SkScriptEngine2::evaluateDotParam(const char*& script, const char* field, size_t fieldLength) {
+ SkScriptValue2& top = fValueStack.top();
+ if (top.fType != SkOperand2::kObject)
+ return false;
+ void* object = top.fOperand.fObject;
+ fValueStack.pop();
+ char ch; // see if it is a simple member or a function
+ while (is_ws(ch = script[0]))
+ script++;
+ bool success = true;
+ if (ch != '(')
+ success = handleMember(field, fieldLength, object);
+ else {
+ SkTDArray<SkScriptValue2> params;
+ *fBraceStack.push() = kFunctionBrace;
+ success = functionParams(&script, &params);
+ if (success)
+ success = handleMemberFunction(field, fieldLength, object, &params);
+ }
+ return success;
+}
+
+bool SkScriptEngine2::evaluateScript(const char** scriptPtr, SkScriptValue2* value) {
+ // fArrayOffset = 0; // no support for structures for now
+ bool success;
+ const char* inner;
+ if (strncmp(*scriptPtr, "#script:", sizeof("#script:") - 1) == 0) {
+ *scriptPtr += sizeof("#script:") - 1;
+ if (fReturnType == SkOperand2::kNoType || fReturnType == SkOperand2::kString) {
+ success = innerScript(scriptPtr, value);
+ SkASSERT(success);
+ inner = value->fOperand.fString->c_str();
+ scriptPtr = &inner;
+ }
+ }
+ success = innerScript(scriptPtr, value);
+ const char* script = *scriptPtr;
+ char ch;
+ while (is_ws(ch = script[0]))
+ script++;
+ if (ch != '\0') {
+ // error may trigger on scripts like "50,0" that were intended to be written as "[50, 0]"
+ return false;
+ }
+ return success;
+}
+
+void SkScriptEngine2::forget(SkOpArray* array) {
+ if (array->getType() == SkOperand2::kString) {
+ for (int index = 0; index < array->count(); index++) {
+ SkString* string = (*array)[index].fString;
+ int found = fTrackString.find(string);
+ if (found >= 0)
+ fTrackString.remove(found);
+ }
+ return;
+ }
+ if (array->getType() == SkOperand2::kArray) {
+ for (int index = 0; index < array->count(); index++) {
+ SkOpArray* child = (*array)[index].fArray;
+ forget(child); // forgets children of child
+ int found = fTrackArray.find(child);
+ if (found >= 0)
+ fTrackArray.remove(found);
+ }
+ }
+}
+
+bool SkScriptEngine2::functionParams(const char** scriptPtr, SkTDArray<SkScriptValue2>* params) {
+ (*scriptPtr)++; // skip open paren
+ *fOpStack.push() = (Op) kParen;
+ *fBraceStack.push() = kFunctionBrace;
+ do {
+ SkScriptValue2 value;
+ bool success = innerScript(scriptPtr, &value);
+ SkASSERT(success);
+ if (success == false)
+ return false;
+ *params->append() = value;
+ } while ((*scriptPtr)[-1] == ',');
+ fBraceStack.pop();
+ fOpStack.pop(); // pop paren
+ (*scriptPtr)++; // advance beyond close paren
+ return true;
+}
+
+size_t SkScriptEngine2::getTokenOffset() {
+ return fActiveStream->getOffset();
+}
+
+SkOperand2::OpType SkScriptEngine2::getUnboxType(SkOperand2 scriptValue) {
+ for (SkScriptCallBack** callBack = fCallBackArray.begin(); callBack < fCallBackArray.end(); callBack++) {
+ if ((*callBack)->getType() != SkScriptCallBack::kUnbox)
+ continue;
+ return (*callBack)->getReturnType(0, &scriptValue);
+ }
+ return SkOperand2::kObject;
+}
+
+bool SkScriptEngine2::innerScript(const char** scriptPtr, SkScriptValue2* value) {
+ const char* script = *scriptPtr;
+ char ch;
+ bool lastPush = false;
+ bool success = true;
+ int opBalance = fOpStack.count();
+ int baseBrace = fBraceStack.count();
+ int branchBalance = fBranchStack.count();
+ while ((ch = script[0]) != '\0') {
+ if (is_ws(ch)) {
+ script++;
+ continue;
+ }
+ SkScriptValue2 operand;
+ const char* dotCheck;
+ if (fBraceStack.count() > baseBrace) {
+ if (fBraceStack.top() == kArrayBrace) {
+ SkScriptValue2 tokenValue;
+ success = innerScript(&script, &tokenValue); // terminate and return on comma, close brace
+ SkASSERT(success);
+ {
+ SkOperand2::OpType type = fReturnType;
+ if (fReturnType == SkOperand2::kNoType) {
+ // !!! short sighted; in the future, allow each returned array component to carry
+ // its own type, and let caller do any needed conversions
+ if (value->fOperand.fArray->count() == 0)
+ value->fOperand.fArray->setType(type = tokenValue.fType);
+ else
+ type = value->fOperand.fArray->getType();
+ }
+ if (tokenValue.fType != type)
+ convertTo(type, &tokenValue);
+ *value->fOperand.fArray->append() = tokenValue.fOperand;
+ }
+ lastPush = false;
+ continue;
+ } else {
+ SkASSERT(token_length(script) > 0);
+ }
+ }
+ if (lastPush != false && fTokenLength > 0) {
+ if (ch == '(') {
+ *fBraceStack.push() = kFunctionBrace;
+ SkString functionName(fToken, fTokenLength);
+
+ if (handleFunction(&script) == false)
+ return false;
+ lastPush = true;
+ continue;
+ } else if (ch == '[') {
+ if (handleProperty() == false) {
+ SkASSERT(0);
+ return false;
+ }
+ if (handleArrayIndexer(&script) == false)
+ return false;
+ lastPush = true;
+ continue;
+ } else if (ch != '.') {
+ if (handleProperty() == false) {
+ SkASSERT(0);
+ return false;
+ }
+ lastPush = true;
+ continue;
+ }
+ }
+ if (ch == '0' && (script[1] & ~0x20) == 'X') {
+ SkASSERT(lastPush == false);
+ script += 2;
+ script = SkParse::FindHex(script, (uint32_t*) &operand.fOperand.fS32);
+ SkASSERT(script);
+ goto intCommon;
+ }
+ if (lastPush == false && ch == '.')
+ goto scalarCommon;
+ if (ch >= '0' && ch <= '9') {
+ SkASSERT(lastPush == false);
+ dotCheck = SkParse::FindS32(script, &operand.fOperand.fS32);
+ if (dotCheck[0] != '.') {
+ script = dotCheck;
+intCommon:
+ operand.fType = SkOperand2::kS32;
+ } else {
+scalarCommon:
+ script = SkParse::FindScalar(script, &operand.fOperand.fScalar);
+ operand.fType = SkOperand2::kScalar;
+ }
+ operand.fIsConstant = SkScriptValue2::kConstant;
+ fValueStack.push(operand);
+ lastPush = true;
+ continue;
+ }
+ int length = token_length(script);
+ if (length > 0) {
+ SkASSERT(lastPush == false);
+ fToken = script;
+ fTokenLength = length;
+ script += length;
+ lastPush = true;
+ continue;
+ }
+ char startQuote = ch;
+ if (startQuote == '\'' || startQuote == '\"') {
+ SkASSERT(lastPush == false);
+ operand.fOperand.fString = new SkString();
+ ++script;
+ const char* stringStart = script;
+ do { // measure string
+ if (script[0] == '\\')
+ ++script;
+ ++script;
+ SkASSERT(script[0]); // !!! throw an error
+ } while (script[0] != startQuote);
+ operand.fOperand.fString->set(stringStart, script - stringStart);
+ script = stringStart;
+ char* stringWrite = operand.fOperand.fString->writable_str();
+ do { // copy string
+ if (script[0] == '\\')
+ ++script;
+ *stringWrite++ = script[0];
+ ++script;
+ SkASSERT(script[0]); // !!! throw an error
+ } while (script[0] != startQuote);
+ ++script;
+ track(operand.fOperand.fString);
+ operand.fType = SkOperand2::kString;
+ operand.fIsConstant = SkScriptValue2::kConstant;
+ fValueStack.push(operand);
+ lastPush = true;
+ continue;
+ }
+ if (ch == '.') {
+ if (fTokenLength == 0) {
+ int tokenLength = token_length(++script);
+ const char* token = script;
+ script += tokenLength;
+ SkASSERT(fValueStack.count() > 0); // !!! add error handling
+ SkScriptValue2 top;
+ fValueStack.pop(&top);
+
+ addTokenInt(top.fType);
+ addToken(kBoxToken);
+ top.fType = SkOperand2::kObject;
+ top.fIsConstant = SkScriptValue2::kVariable;
+ fConstExpression = false;
+ fValueStack.push(top);
+ success = evaluateDotParam(script, token, tokenLength);
+ SkASSERT(success);
+ lastPush = true;
+ continue;
+ }
+ // get next token, and evaluate immediately
+ success = evaluateDot(script);
+ if (success == false) {
+ // SkASSERT(0);
+ return false;
+ }
+ lastPush = true;
+ continue;
+ }
+ if (ch == '[') {
+ if (lastPush == false) {
+ script++;
+ *fBraceStack.push() = kArrayBrace;
+ operand.fOperand.fArray = value->fOperand.fArray = new SkOpArray(fReturnType);
+ track(value->fOperand.fArray);
+
+ operand.fType = SkOperand2::kArray;
+ operand.fIsConstant = SkScriptValue2::kVariable;
+ fValueStack.push(operand);
+ continue;
+ }
+ if (handleArrayIndexer(&script) == false)
+ return false;
+ lastPush = true;
+ continue;
+ }
+#if 0 // structs not supported for now
+ if (ch == '{') {
+ if (lastPush == false) {
+ script++;
+ *fBraceStack.push() = kStructBrace;
+ operand.fS32 = 0;
+ *fTypeStack.push() = (SkOpType) kStruct;
+ fOperandStack.push(operand);
+ continue;
+ }
+ SkASSERT(0); // braces in other contexts aren't supported yet
+ }
+#endif
+ if (ch == ')' && fBraceStack.count() > 0) {
+ BraceStyle braceStyle = fBraceStack.top();
+ if (braceStyle == kFunctionBrace) {
+ fBraceStack.pop();
+ break;
+ }
+ }
+ if (ch == ',' || ch == ']') {
+ if (ch != ',') {
+ BraceStyle match;
+ fBraceStack.pop(&match);
+ SkASSERT(match == kArrayBrace);
+ }
+ script++;
+ // !!! see if brace or bracket is correct closer
+ break;
+ }
+ char nextChar = script[1];
+ int advance = logicalOp(ch, nextChar);
+ if (advance == 0)
+ advance = arithmeticOp(ch, nextChar, lastPush);
+ if (advance == 0) // unknown token
+ return false;
+ if (advance > 0)
+ script += advance;
+ lastPush = ch == ']' || ch == ')';
+ }
+ if (fTokenLength > 0) {
+ success = handleProperty();
+ SkASSERT(success);
+ }
+ int branchIndex = 0;
+ branchBalance = fBranchStack.count() - branchBalance;
+ fBranchPopAllowed = false;
+ while (branchIndex < branchBalance) {
+ Branch& branch = fBranchStack.index(branchIndex++);
+ if (branch.fPrimed == Branch::kIsPrimed)
+ break;
+ Op branchOp = branch.fOperator;
+ SkOperand2::OpType lastType = fValueStack.top().fType;
+ addTokenValue(fValueStack.top(), kAccumulator);
+ fValueStack.pop();
+ if (branchOp == kLogicalAnd || branchOp == kLogicalOr) {
+ if (branch.fOperator == kLogicalAnd)
+ branch.prime();
+ addToken(kToBool);
+ } else {
+ resolveBranch(branch);
+ SkScriptValue2 operand;
+ operand.fType = lastType;
+ // !!! note that many branching expressions could be constant
+ // today, we always evaluate branches as returning variables
+ operand.fIsConstant = SkScriptValue2::kVariable;
+ fValueStack.push(operand);
+ }
+ if (branch.fDone == Branch::kIsNotDone)
+ branch.prime();
+ }
+ fBranchPopAllowed = true;
+ while (fBranchStack.top().fDone == Branch::kIsDone)
+ fBranchStack.pop();
+ while (fOpStack.count() > opBalance) { // leave open paren
+ if (processOp() == false)
+ return false;
+ }
+ SkOperand2::OpType topType = fValueStack.count() > 0 ? fValueStack.top().fType : SkOperand2::kNoType;
+ if (topType != fReturnType &&
+ topType == SkOperand2::kString && fReturnType != SkOperand2::kNoType) { // if result is a string, give handle property a chance to convert it to the property value
+ SkString* string = fValueStack.top().fOperand.fString;
+ fToken = string->c_str();
+ fTokenLength = string->size();
+ fValueStack.pop();
+ success = handleProperty();
+ if (success == false) { // if it couldn't convert, return string (error?)
+ SkScriptValue2 operand;
+ operand.fType = SkOperand2::kString;
+ operand.fOperand.fString = string;
+ operand.fIsConstant = SkScriptValue2::kVariable; // !!! ?
+ fValueStack.push(operand);
+ }
+ }
+ if (fStream.getOffset() > 0) {
+ addToken(kEnd);
+ SkAutoDataUnref data(fStream.copyToData());
+#ifdef SK_DEBUG
+ decompile(data->bytes(), data->size());
+#endif
+ SkScriptRuntime runtime(fCallBackArray);
+ runtime.executeTokens((unsigned char*) data->bytes());
+ SkScriptValue2 value1;
+ runtime.getResult(&value1.fOperand);
+ value1.fType = fReturnType;
+ fValueStack.push(value1);
+ }
+ if (value) {
+ if (fValueStack.count() == 0)
+ return false;
+ fValueStack.pop(value);
+ if (value->fType != fReturnType && value->fType == SkOperand2::kObject &&
+ fReturnType != SkOperand2::kNoType)
+ convertTo(fReturnType, value);
+ }
+ // if (fBranchStack.top().fOpStackDepth > fOpStack.count())
+ // resolveBranch();
+ *scriptPtr = script;
+ return true; // no error
+}
+
+bool SkScriptEngine2::handleArrayIndexer(const char** scriptPtr) {
+ SkScriptValue2 scriptValue;
+ (*scriptPtr)++;
+ *fOpStack.push() = (Op) kParen;
+ *fBraceStack.push() = kArrayBrace;
+ SkOperand2::OpType saveType = fReturnType;
+ fReturnType = SkOperand2::kS32;
+ bool success = innerScript(scriptPtr, &scriptValue);
+ fReturnType = saveType;
+ SkASSERT(success);
+ success = convertTo(SkOperand2::kS32, &scriptValue);
+ SkASSERT(success);
+ int index = scriptValue.fOperand.fS32;
+ fValueStack.pop(&scriptValue);
+ if (scriptValue.fType == SkOperand2::kObject) {
+ success = handleUnbox(&scriptValue);
+ SkASSERT(success);
+ SkASSERT(scriptValue.fType == SkOperand2::kArray);
+ }
+ scriptValue.fType = scriptValue.fOperand.fArray->getType();
+ // SkASSERT(index >= 0);
+ if ((unsigned) index >= (unsigned) scriptValue.fOperand.fArray->count()) {
+ fError = kArrayIndexOutOfBounds;
+ return false;
+ }
+ scriptValue.fOperand = scriptValue.fOperand.fArray->begin()[index];
+ scriptValue.fIsConstant = SkScriptValue2::kVariable;
+ fValueStack.push(scriptValue);
+ fOpStack.pop(); // pop paren
+ return success;
+}
+
+bool SkScriptEngine2::handleFunction(const char** scriptPtr) {
+ const char* functionName = fToken;
+ size_t functionNameLen = fTokenLength;
+ fTokenLength = 0;
+ SkTDArray<SkScriptValue2> params;
+ bool success = functionParams(scriptPtr, &params);
+ if (success == false)
+ goto done;
+ {
+ for (SkScriptCallBack** callBack = fCallBackArray.begin(); callBack < fCallBackArray.end(); callBack++) {
+ if ((*callBack)->getType() != SkScriptCallBack::kFunction)
+ continue;
+ SkScriptValue2 callbackResult;
+ success = (*callBack)->getReference(functionName, functionNameLen, &callbackResult);
+ if (success) {
+ callbackResult.fType = (*callBack)->getReturnType(callbackResult.fOperand.fReference, nullptr);
+ callbackResult.fIsConstant = SkScriptValue2::kVariable;
+ fValueStack.push(callbackResult);
+ goto done;
+ }
+ }
+ }
+ return false;
+done:
+ fOpStack.pop();
+ return success;
+}
+
+bool SkScriptEngine2::handleMember(const char* field, size_t len, void* object) {
+ bool success = true;
+ for (SkScriptCallBack** callBack = fCallBackArray.begin(); callBack < fCallBackArray.end(); callBack++) {
+ if ((*callBack)->getType() != SkScriptCallBack::kMember)
+ continue;
+ SkScriptValue2 callbackResult;
+ success = (*callBack)->getReference(field, len, &callbackResult);
+ if (success) {
+ if (callbackResult.fType == SkOperand2::kString)
+ track(callbackResult.fOperand.fString);
+ callbackResult.fIsConstant = SkScriptValue2::kVariable;
+ fValueStack.push(callbackResult);
+ goto done;
+ }
+ }
+ return false;
+done:
+ return success;
+}
+
+bool SkScriptEngine2::handleMemberFunction(const char* field, size_t len, void* object,
+ SkTDArray<SkScriptValue2>* params) {
+ bool success = true;
+ for (SkScriptCallBack** callBack = fCallBackArray.begin(); callBack < fCallBackArray.end(); callBack++) {
+ if ((*callBack)->getType() != SkScriptCallBack::kMemberFunction)
+ continue;
+ SkScriptValue2 callbackResult;
+ success = (*callBack)->getReference(field, len, &callbackResult);
+ if (success) {
+ if (callbackResult.fType == SkOperand2::kString)
+ track(callbackResult.fOperand.fString);
+ callbackResult.fIsConstant = SkScriptValue2::kVariable;
+ fValueStack.push(callbackResult);
+ goto done;
+ }
+ }
+ return false;
+done:
+ return success;
+}
+
+bool SkScriptEngine2::handleProperty() {
+ bool success = true;
+ for (SkScriptCallBack** callBack = fCallBackArray.begin(); callBack < fCallBackArray.end(); callBack++) {
+ if ((*callBack)->getType() != SkScriptCallBack::kProperty)
+ continue;
+ SkScriptValue2 callbackResult;
+ success = (*callBack)->getReference(fToken, fTokenLength, &callbackResult);
+ if (success) {
+ if (callbackResult.fType == SkOperand2::kString && callbackResult.fOperand.fString == nullptr) {
+ callbackResult.fOperand.fString = new SkString(fToken, fTokenLength);
+ track(callbackResult.fOperand.fString);
+ }
+ callbackResult.fIsConstant = SkScriptValue2::kVariable;
+ fValueStack.push(callbackResult);
+ goto done;
+ }
+ }
+done:
+ fTokenLength = 0;
+ return success;
+}
+
+bool SkScriptEngine2::handleUnbox(SkScriptValue2* scriptValue) {
+ bool success = true;
+ for (SkScriptCallBack** callBack = fCallBackArray.begin(); callBack < fCallBackArray.end(); callBack++) {
+ if ((*callBack)->getType() != SkScriptCallBack::kUnbox)
+ continue;
+ SkScriptCallBackConvert* callBackConvert = (SkScriptCallBackConvert*) *callBack;
+ success = callBackConvert->convert(scriptValue->fType, &scriptValue->fOperand);
+ if (success) {
+ if (scriptValue->fType == SkOperand2::kString)
+ track(scriptValue->fOperand.fString);
+ goto done;
+ }
+ }
+ return false;
+done:
+ return success;
+}
+
+// note that entire expression is treated as if it were enclosed in parens
+// an open paren is always the first thing in the op stack
+
+int SkScriptEngine2::logicalOp(char ch, char nextChar) {
+ int advance = 1;
+ Op op;
+ signed char precedence;
+ switch (ch) {
+ case ')':
+ op = (Op) kParen;
+ break;
+ case ']':
+ op = (Op) kArrayOp;
+ break;
+ case '?':
+ op = (Op) kIf;
+ break;
+ case ':':
+ op = (Op) kElse;
+ break;
+ case '&':
+ if (nextChar != '&')
+ goto noMatch;
+ op = kLogicalAnd;
+ advance = 2;
+ break;
+ case '|':
+ if (nextChar != '|')
+ goto noMatch;
+ op = kLogicalOr;
+ advance = 2;
+ break;
+ default:
+ noMatch:
+ return 0;
+ }
+ precedence = gPrecedence[op];
+ int branchIndex = 0;
+ fBranchPopAllowed = false;
+ do {
+ while (gPrecedence[fOpStack.top() & ~kArtificialOp] < precedence)
+ processOp();
+ Branch& branch = fBranchStack.index(branchIndex++);
+ Op branchOp = branch.fOperator;
+ if (gPrecedence[branchOp] >= precedence)
+ break;
+ addTokenValue(fValueStack.top(), kAccumulator);
+ fValueStack.pop();
+ if (branchOp == kLogicalAnd || branchOp == kLogicalOr) {
+ if (branch.fOperator == kLogicalAnd)
+ branch.prime();
+ addToken(kToBool);
+ } else
+ resolveBranch(branch);
+ if (branch.fDone == Branch::kIsNotDone)
+ branch.prime();
+ } while (true);
+ fBranchPopAllowed = true;
+ while (fBranchStack.top().fDone == Branch::kIsDone)
+ fBranchStack.pop();
+ processLogicalOp(op);
+ return advance;
+}
+
+void SkScriptEngine2::processLogicalOp(Op op) {
+ switch (op) {
+ case kParen:
+ case kArrayOp:
+ SkASSERT(fOpStack.count() > 1 && fOpStack.top() == op); // !!! add error handling
+ if (op == kParen)
+ fOpStack.pop();
+ else {
+ SkScriptValue2 value;
+ fValueStack.pop(&value);
+ SkASSERT(value.fType == SkOperand2::kS32 || value.fType == SkOperand2::kScalar); // !!! add error handling (although, could permit strings eventually)
+ int index = value.fType == SkOperand2::kScalar ? SkScalarFloorToInt(value.fOperand.fScalar) :
+ value.fOperand.fS32;
+ SkScriptValue2 arrayValue;
+ fValueStack.pop(&arrayValue);
+ SkASSERT(arrayValue.fType == SkOperand2::kArray); // !!! add error handling
+ SkOpArray* array = arrayValue.fOperand.fArray;
+ SkOperand2 operand;
+ SkDEBUGCODE(bool success = ) array->getIndex(index, &operand);
+ SkASSERT(success); // !!! add error handling
+ SkScriptValue2 resultValue;
+ resultValue.fType = array->getType();
+ resultValue.fOperand = operand;
+ resultValue.fIsConstant = SkScriptValue2::kVariable;
+ fValueStack.push(resultValue);
+ }
+ break;
+ case kIf: {
+ if (fAccumulatorType == SkOperand2::kNoType) {
+ addTokenValue(fValueStack.top(), kAccumulator);
+ fValueStack.pop();
+ }
+ SkASSERT(fAccumulatorType != SkOperand2::kString); // !!! add error handling
+ addToken(kIfOp);
+ Branch branch(op, fOpStack.count(), getTokenOffset());
+ *fBranchStack.push() = branch;
+ addTokenInt(0); // placeholder for future branch
+ fAccumulatorType = SkOperand2::kNoType;
+ } break;
+ case kElse: {
+ addTokenValue(fValueStack.top(), kAccumulator);
+ fValueStack.pop();
+ addToken(kElseOp);
+ size_t newOffset = getTokenOffset();
+ addTokenInt(0); // placeholder for future branch
+ Branch& branch = fBranchStack.top();
+ resolveBranch(branch);
+ branch.fOperator = op;
+ branch.fDone = Branch::kIsNotDone;
+ SkASSERT(branch.fOpStackDepth == fOpStack.count());
+ branch.fOffset = SkToU16(newOffset);
+ fAccumulatorType = SkOperand2::kNoType;
+ } break;
+ case kLogicalAnd:
+ case kLogicalOr: {
+ Branch& oldTop = fBranchStack.top();
+ Branch::Primed wasPrime = oldTop.fPrimed;
+ Branch::Done wasDone = oldTop.fDone;
+ oldTop.fPrimed = Branch::kIsNotPrimed;
+ oldTop.fDone = Branch::kIsNotDone;
+ if (fAccumulatorType == SkOperand2::kNoType) {
+ SkASSERT(fValueStack.top().fType == SkOperand2::kS32); // !!! add error handling, and conversion to int?
+ addTokenValue(fValueStack.top(), kAccumulator);
+ fValueStack.pop();
+ } else {
+ SkASSERT(fAccumulatorType == SkOperand2::kS32);
+ }
+ // if 'and', write beq goto opcode after end of predicate (after to bool)
+ // if 'or', write bne goto to bool
+ addToken(op == kLogicalAnd ? kLogicalAndInt : kLogicalOrInt);
+ Branch branch(op, fOpStack.count(), getTokenOffset());
+ addTokenInt(0); // placeholder for future branch
+ oldTop.fPrimed = wasPrime;
+ oldTop.fDone = wasDone;
+ *fBranchStack.push() = branch;
+ fAccumulatorType = SkOperand2::kNoType;
+ } break;
+ default:
+ SkASSERT(0);
+ }
+}
+
+bool SkScriptEngine2::processOp() {
+ Op op;
+ fOpStack.pop(&op);
+ op = (Op) (op & ~kArtificialOp);
+ const OperatorAttributes* attributes = &gOpAttributes[op];
+ SkScriptValue2 value1;
+ memset(&value1, 0, sizeof(SkScriptValue2));
+ SkScriptValue2 value2;
+ fValueStack.pop(&value2);
+ value2.fIsWritten = SkScriptValue2::kUnwritten;
+ // SkScriptEngine2::SkTypeOp convert1[3];
+ // SkScriptEngine2::SkTypeOp convert2[3];
+ // SkScriptEngine2::SkTypeOp* convert2Ptr = convert2;
+ bool constantOperands = value2.fIsConstant == SkScriptValue2::kConstant;
+ if (attributes->fLeftType != SkOperand2::kNoType) {
+ fValueStack.pop(&value1);
+ constantOperands &= value1.fIsConstant == SkScriptValue2::kConstant;
+ value1.fIsWritten = SkScriptValue2::kUnwritten;
+ if (op == kFlipOps) {
+ SkTSwap(value1, value2);
+ fOpStack.pop(&op);
+ op = (Op) (op & ~kArtificialOp);
+ attributes = &gOpAttributes[op];
+ if (constantOperands == false)
+ addToken(kFlipOpsOp);
+ }
+ if (value1.fType == SkOperand2::kObject && (value1.fType & attributes->fLeftType) == 0) {
+ value1.fType = getUnboxType(value1.fOperand);
+ addToken(kUnboxToken);
+ }
+ }
+ if (value2.fType == SkOperand2::kObject && (value2.fType & attributes->fLeftType) == 0) {
+ value1.fType = getUnboxType(value2.fOperand);
+ addToken(kUnboxToken2);
+ }
+ if (attributes->fLeftType != SkOperand2::kNoType) {
+ if (value1.fType != value2.fType) {
+ if ((attributes->fLeftType & SkOperand2::kString) && attributes->fBias & kTowardsString &&
+ ((value1.fType | value2.fType) & SkOperand2::kString)) {
+ if (value1.fType == SkOperand2::kS32 || value1.fType == SkOperand2::kScalar) {
+ addTokenConst(&value1, kAccumulator, SkOperand2::kString,
+ value1.fType == SkOperand2::kS32 ? kIntToString : kScalarToString);
+ }
+ if (value2.fType == SkOperand2::kS32 || value2.fType == SkOperand2::kScalar) {
+ addTokenConst(&value2, kOperand, SkOperand2::kString,
+ value2.fType == SkOperand2::kS32 ? kIntToString2 : kScalarToString2);
+ }
+ } else if (attributes->fLeftType & SkOperand2::kScalar && ((value1.fType | value2.fType) &
+ SkOperand2::kScalar)) {
+ if (value1.fType == SkOperand2::kS32)
+ addTokenConst(&value1, kAccumulator, SkOperand2::kScalar, kIntToScalar);
+ if (value2.fType == SkOperand2::kS32)
+ addTokenConst(&value2, kOperand, SkOperand2::kScalar, kIntToScalar2);
+ }
+ }
+ if ((value1.fType & attributes->fLeftType) == 0 || value1.fType != value2.fType) {
+ if (value1.fType == SkOperand2::kString)
+ addTokenConst(&value1, kAccumulator, SkOperand2::kScalar, kStringToScalar);
+ if (value1.fType == SkOperand2::kScalar && (attributes->fLeftType == SkOperand2::kS32 ||
+ value2.fType == SkOperand2::kS32))
+ addTokenConst(&value1, kAccumulator, SkOperand2::kS32, kScalarToInt);
+ }
+ }
+ AddTokenRegister rhRegister = attributes->fLeftType != SkOperand2::kNoType ?
+ kOperand : kAccumulator;
+ if ((value2.fType & attributes->fRightType) == 0 || value1.fType != value2.fType) {
+ if (value2.fType == SkOperand2::kString)
+ addTokenConst(&value2, rhRegister, SkOperand2::kScalar, kStringToScalar2);
+ if (value2.fType == SkOperand2::kScalar && (attributes->fRightType == SkOperand2::kS32 ||
+ value1.fType == SkOperand2::kS32))
+ addTokenConst(&value2, rhRegister, SkOperand2::kS32, kScalarToInt2);
+ }
+ TypeOp typeOp = gTokens[op];
+ if (value2.fType == SkOperand2::kScalar)
+ typeOp = (TypeOp) (typeOp + 1);
+ else if (value2.fType == SkOperand2::kString)
+ typeOp = (TypeOp) (typeOp + 2);
+ SkDynamicMemoryWStream stream;
+ SkOperand2::OpType saveType = SkOperand2::kNoType;
+ SkBool saveOperand = false;
+ if (constantOperands) {
+ fActiveStream = &stream;
+ saveType = fAccumulatorType;
+ saveOperand = fOperandInUse;
+ fAccumulatorType = SkOperand2::kNoType;
+ fOperandInUse = false;
+ }
+ if (attributes->fLeftType != SkOperand2::kNoType) { // two operands
+ if (value1.fIsWritten == SkScriptValue2::kUnwritten)
+ addTokenValue(value1, kAccumulator);
+ }
+ if (value2.fIsWritten == SkScriptValue2::kUnwritten)
+ addTokenValue(value2, rhRegister);
+ addToken(typeOp);
+ if (constantOperands) {
+ addToken(kEnd);
+ SkAutoDataUnref data(fStream.copyToData());
+#ifdef SK_DEBUG
+ decompile(data->bytes(), data->size());
+#endif
+ SkScriptRuntime runtime(fCallBackArray);
+ runtime.executeTokens((unsigned char*)data->bytes());
+ runtime.getResult(&value1.fOperand);
+ if (attributes->fResultIsBoolean == kResultIsBoolean)
+ value1.fType = SkOperand2::kS32;
+ else if (attributes->fLeftType == SkOperand2::kNoType) // unary operand
+ value1.fType = value2.fType;
+ fValueStack.push(value1);
+ if (value1.fType == SkOperand2::kString)
+ runtime.untrack(value1.fOperand.fString);
+ else if (value1.fType == SkOperand2::kArray)
+ runtime.untrack(value1.fOperand.fArray);
+ fActiveStream = &fStream;
+ fAccumulatorType = saveType;
+ fOperandInUse = saveOperand;
+ return true;
+ }
+ value2.fIsConstant = SkScriptValue2::kVariable;
+ fValueStack.push(value2);
+ return true;
+}
+
+void SkScriptEngine2::Branch::resolve(SkDynamicMemoryWStream* stream, size_t off) {
+ SkASSERT(fDone == kIsNotDone);
+ fPrimed = kIsNotPrimed;
+ fDone = kIsDone;
+ SkASSERT(off > fOffset + sizeof(size_t));
+ size_t offset = off - fOffset - sizeof(offset);
+ stream->write(&offset, fOffset, sizeof(offset));
+}
+
+void SkScriptEngine2::resolveBranch(SkScriptEngine2::Branch& branch) {
+ branch.resolve(fActiveStream, getTokenOffset());
+}
+
+bool SkScriptEngine2::ConvertTo(SkScriptEngine2* engine, SkOperand2::OpType toType, SkScriptValue2* value ) {
+ SkASSERT(value);
+ SkOperand2::OpType type = value->fType;
+ if (type == toType)
+ return true;
+ SkOperand2& operand = value->fOperand;
+ bool success = true;
+ switch (toType) {
+ case SkOperand2::kS32:
+ if (type == SkOperand2::kScalar)
+ operand.fS32 = SkScalarFloorToInt(operand.fScalar);
+ else {
+ SkASSERT(type == SkOperand2::kString);
+ success = SkParse::FindS32(operand.fString->c_str(), &operand.fS32) != nullptr;
+ }
+ break;
+ case SkOperand2::kScalar:
+ if (type == SkOperand2::kS32)
+ operand.fScalar = IntToScalar(operand.fS32);
+ else {
+ SkASSERT(type == SkOperand2::kString);
+ success = SkParse::FindScalar(operand.fString->c_str(), &operand.fScalar) != nullptr;
+ }
+ break;
+ case SkOperand2::kString: {
+ SkString* strPtr = new SkString();
+ SkASSERT(engine);
+ engine->track(strPtr);
+ if (type == SkOperand2::kS32)
+ strPtr->appendS32(operand.fS32);
+ else {
+ SkASSERT(type == SkOperand2::kScalar);
+ strPtr->appendScalar(operand.fScalar);
+ }
+ operand.fString = strPtr;
+ } break;
+ case SkOperand2::kArray: {
+ SkOpArray* array = new SkOpArray(type);
+ *array->append() = operand;
+ engine->track(array);
+ operand.fArray = array;
+ } break;
+ default:
+ SkASSERT(0);
+ }
+ value->fType = toType;
+ return success;
+}
+
+SkScalar SkScriptEngine2::IntToScalar(int32_t s32) {
+ SkScalar scalar;
+ if (s32 == (int32_t) SK_NaN32)
+ scalar = SK_ScalarNaN;
+ else if (SkAbs32(s32) == SK_MaxS32)
+ scalar = SkSign32(s32) * SK_ScalarMax;
+ else
+ scalar = SkIntToScalar(s32);
+ return scalar;
+}
+
+bool SkScriptEngine2::ValueToString(const SkScriptValue2& value, SkString* string) {
+ switch (value.fType) {
+ case SkOperand2::kS32:
+ string->reset();
+ string->appendS32(value.fOperand.fS32);
+ break;
+ case SkOperand2::kScalar:
+ string->reset();
+ string->appendScalar(value.fOperand.fScalar);
+ break;
+ case SkOperand2::kString:
+ string->set(*value.fOperand.fString);
+ break;
+ default:
+ SkASSERT(0);
+ return false;
+ }
+ return true; // no error
+}
+
+#ifdef SK_DEBUG
+#if defined(SK_SUPPORT_UNITTEST)
+
+#define testInt(expression) { #expression, SkOperand2::kS32, expression, 0, nullptr }
+#define testScalar(expression) { #expression, SkOperand2::kScalar, 0, (float) (expression), nullptr }
+#define testRemainder(exp1, exp2) { #exp1 "%" #exp2, SkOperand2::kScalar, 0, fmodf((float) exp1, (float) exp2), nullptr }
+#define testTrue(expression) { #expression, SkOperand2::kS32, 1, 0, nullptr }
+#define testFalse(expression) { #expression, SkOperand2::kS32, 0, 0, nullptr }
+
+static const SkScriptNAnswer2 scriptTests[] = {
+ testInt(1||(0&&3)),
+ testScalar(- -5.5- -1.5),
+ testScalar(1.0+5),
+ testInt((6+7)*8),
+ testInt(3*(4+5)),
+ testScalar(1.0+2.0),
+ testScalar(3.0-1.0),
+ testScalar(6-1.0),
+ testScalar(2.5*6.),
+ testScalar(0.5*4),
+ testScalar(4.5/.5),
+ testScalar(9.5/19),
+ testRemainder(9.5, 0.5),
+ testRemainder(9.,2),
+ testRemainder(9,2.5),
+ testRemainder(-9,2.5),
+ testTrue(-9==-9.0),
+ testTrue(-9.==-4.0-5),
+ testTrue(-9.*1==-4-5),
+ testFalse(-9!=-9.0),
+ testFalse(-9.!=-4.0-5),
+ testFalse(-9.*1!=-4-5),
+ testInt(0x123),
+ testInt(0XABC),
+ testInt(0xdeadBEEF),
+ { "'123'+\"456\"", SkOperand2::kString, 0, 0, "123456" },
+ { "123+\"456\"", SkOperand2::kString, 0, 0, "123456" },
+ { "'123'+456", SkOperand2::kString, 0, 0, "123456" },
+ { "'123'|\"456\"", SkOperand2::kS32, 123|456, 0, nullptr },
+ { "123|\"456\"", SkOperand2::kS32, 123|456, 0, nullptr },
+ { "'123'|456", SkOperand2::kS32, 123|456, 0, nullptr },
+ { "'2'<11", SkOperand2::kS32, 1, 0, nullptr },
+ { "2<'11'", SkOperand2::kS32, 1, 0, nullptr },
+ { "'2'<'11'", SkOperand2::kS32, 0, 0, nullptr },
+ testInt(123),
+ testInt(-345),
+ testInt(+678),
+ testInt(1+2+3),
+ testInt(3*4+5),
+ testInt(6+7*8),
+ testInt(-1-2-8/4),
+ testInt(-9%4),
+ testInt(9%-4),
+ testInt(-9%-4),
+ testInt(123|978),
+ testInt(123&978),
+ testInt(123^978),
+ testInt(2<<4),
+ testInt(99>>3),
+ testInt(~55),
+ testInt(~~55),
+ testInt(!55),
+ testInt(!!55),
+ // both int
+ testInt(2<2),
+ testInt(2<11),
+ testInt(20<11),
+ testInt(2<=2),
+ testInt(2<=11),
+ testInt(20<=11),
+ testInt(2>2),
+ testInt(2>11),
+ testInt(20>11),
+ testInt(2>=2),
+ testInt(2>=11),
+ testInt(20>=11),
+ testInt(2==2),
+ testInt(2==11),
+ testInt(20==11),
+ testInt(2!=2),
+ testInt(2!=11),
+ testInt(20!=11),
+ // left int, right scalar
+ testInt(2<2.),
+ testInt(2<11.),
+ testInt(20<11.),
+ testInt(2<=2.),
+ testInt(2<=11.),
+ testInt(20<=11.),
+ testInt(2>2.),
+ testInt(2>11.),
+ testInt(20>11.),
+ testInt(2>=2.),
+ testInt(2>=11.),
+ testInt(20>=11.),
+ testInt(2==2.),
+ testInt(2==11.),
+ testInt(20==11.),
+ testInt(2!=2.),
+ testInt(2!=11.),
+ testInt(20!=11.),
+ // left scalar, right int
+ testInt(2.<2),
+ testInt(2.<11),
+ testInt(20.<11),
+ testInt(2.<=2),
+ testInt(2.<=11),
+ testInt(20.<=11),
+ testInt(2.>2),
+ testInt(2.>11),
+ testInt(20.>11),
+ testInt(2.>=2),
+ testInt(2.>=11),
+ testInt(20.>=11),
+ testInt(2.==2),
+ testInt(2.==11),
+ testInt(20.==11),
+ testInt(2.!=2),
+ testInt(2.!=11),
+ testInt(20.!=11),
+ // both scalar
+ testInt(2.<11.),
+ testInt(20.<11.),
+ testInt(2.<=2.),
+ testInt(2.<=11.),
+ testInt(20.<=11.),
+ testInt(2.>2.),
+ testInt(2.>11.),
+ testInt(20.>11.),
+ testInt(2.>=2.),
+ testInt(2.>=11.),
+ testInt(20.>=11.),
+ testInt(2.==2.),
+ testInt(2.==11.),
+ testInt(20.==11.),
+ testInt(2.!=2.),
+ testInt(2.!=11.),
+ testInt(20.!=11.),
+ // int, string (string is int)
+ testFalse(2<'2'),
+ testTrue(2<'11'),
+ testFalse(20<'11'),
+ testTrue(2<='2'),
+ testTrue(2<='11'),
+ testFalse(20<='11'),
+ testFalse(2>'2'),
+ testFalse(2>'11'),
+ testTrue(20>'11'),
+ testTrue(2>='2'),
+ testFalse(2>='11'),
+ testTrue(20>='11'),
+ testTrue(2=='2'),
+ testFalse(2=='11'),
+ testFalse(2!='2'),
+ testTrue(2!='11'),
+ // int, string (string is scalar)
+ testFalse(2<'2.'),
+ testTrue(2<'11.'),
+ testFalse(20<'11.'),
+ testTrue(2=='2.'),
+ testFalse(2=='11.'),
+ // scalar, string
+ testFalse(2.<'2.'),
+ testTrue(2.<'11.'),
+ testFalse(20.<'11.'),
+ testTrue(2.=='2.'),
+ testFalse(2.=='11.'),
+ // string, int
+ testFalse('2'<2),
+ testTrue('2'<11),
+ testFalse('20'<11),
+ testTrue('2'==2),
+ testFalse('2'==11),
+ // string, scalar
+ testFalse('2'<2.),
+ testTrue('2'<11.),
+ testFalse('20'<11.),
+ testTrue('2'==2.),
+ testFalse('2'==11.),
+ // string, string
+ testFalse('2'<'2'),
+ testFalse('2'<'11'),
+ testFalse('20'<'11'),
+ testTrue('2'=='2'),
+ testFalse('2'=='11'),
+ // logic
+ testInt(1?2:3),
+ testInt(0?2:3),
+ testInt((1&&2)||3),
+ testInt((1&&0)||3),
+ testInt((1&&0)||0),
+ testInt(1||(0&&3)),
+ testInt(0||(0&&3)),
+ testInt(0||(1&&3)),
+ testInt(0&&1?2:3)
+ , { "123.5", SkOperand2::kScalar, 0, SkIntToScalar(123) + SK_Scalar1/2, nullptr }
+};
+
+#define SkScriptNAnswer_testCount SK_ARRAY_COUNT(scriptTests)
+#endif // SK_SUPPORT_UNITTEST
+
+void SkScriptEngine2::UnitTest() {
+#if defined(SK_SUPPORT_UNITTEST)
+ ValidateDecompileTable();
+ for (size_t index = 0; index < SkScriptNAnswer_testCount; index++) {
+ SkScriptEngine2 engine(scriptTests[index].fType);
+ SkScriptValue2 value;
+ const char* script = scriptTests[index].fScript;
+ const char* scriptPtr = script;
+ SkASSERT(engine.evaluateScript(&scriptPtr, &value) == true);
+ SkASSERT(value.fType == scriptTests[index].fType);
+ SkScalar error;
+ switch (value.fType) {
+ case SkOperand2::kS32:
+ if (value.fOperand.fS32 != scriptTests[index].fIntAnswer)
+ SkDEBUGF(("script '%s' == value %d != expected answer %d\n", script, value.fOperand.fS32, scriptTests[index].fIntAnswer));
+ SkASSERT(value.fOperand.fS32 == scriptTests[index].fIntAnswer);
+ break;
+ case SkOperand2::kScalar:
+ error = SkScalarAbs(value.fOperand.fScalar - scriptTests[index].fScalarAnswer);
+ if (error >= SK_Scalar1 / 10000)
+ SkDEBUGF(("script '%s' == value %g != expected answer %g\n", script, value.fOperand.fScalar / (1.0f * SK_Scalar1), scriptTests[index].fScalarAnswer / (1.0f * SK_Scalar1)));
+ SkASSERT(error < SK_Scalar1 / 10000);
+ break;
+ case SkOperand2::kString:
+ SkASSERT(value.fOperand.fString->equals(scriptTests[index].fStringAnswer));
+ break;
+ default:
+ SkASSERT(0);
+ }
+ }
+#endif // SK_SUPPORT_UNITTEST
+}
+#endif // SK_DEBUG
diff --git a/gfx/skia/skia/src/animator/SkSnapshot.cpp b/gfx/skia/skia/src/animator/SkSnapshot.cpp
new file mode 100644
index 000000000..4d35432bd
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkSnapshot.cpp
@@ -0,0 +1,67 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkTypes.h"
+
+#include "SkSnapshot.h"
+#include "SkAnimateMaker.h"
+#include "SkCanvas.h"
+#include "SkImageEncoder.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkSnapshot::fInfo[] = {
+ SK_MEMBER(filename, String),
+ SK_MEMBER(quality, Float),
+ SK_MEMBER(sequence, Boolean),
+ SK_MEMBER(type, BitmapEncoding)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkSnapshot);
+
+SkSnapshot::SkSnapshot()
+{
+ quality = 100 * SK_Scalar1;
+ type = (SkImageEncoder::Type) -1;
+ sequence = false;
+ fSeqVal = 0;
+}
+
+bool SkSnapshot::draw(SkAnimateMaker& maker) {
+ SkASSERT(type >= 0);
+ SkASSERT(filename.size() > 0);
+ SkImageEncoder* encoder = SkImageEncoder::Create((SkImageEncoder::Type) type);
+ if (!encoder) {
+ return false;
+ }
+ SkAutoTDelete<SkImageEncoder> ad(encoder);
+
+ SkString name(filename);
+ if (sequence) {
+ char num[4] = "000";
+ num[0] = (char) (num[0] + fSeqVal / 100);
+ num[1] = (char) (num[1] + fSeqVal / 10 % 10);
+ num[2] = (char) (num[2] + fSeqVal % 10);
+ name.append(num);
+ if (++fSeqVal > 999)
+ sequence = false;
+ }
+ if (type == SkImageEncoder::kJPEG_Type)
+ name.append(".jpg");
+ else if (type == SkImageEncoder::kPNG_Type)
+ name.append(".png");
+
+ SkBitmap pixels;
+ pixels.allocPixels(maker.fCanvas->imageInfo());
+ maker.fCanvas->readPixels(&pixels, 0, 0);
+ encoder->encodeFile(name.c_str(), pixels, SkScalarFloorToInt(quality));
+ return false;
+}
diff --git a/gfx/skia/skia/src/animator/SkSnapshot.h b/gfx/skia/skia/src/animator/SkSnapshot.h
new file mode 100644
index 000000000..003a9dc79
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkSnapshot.h
@@ -0,0 +1,29 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkSnapShot_DEFINED
+#define SkSnapShot_DEFINED
+
+#include "SkADrawable.h"
+#include "SkMemberInfo.h"
+#include "SkString.h"
+
+class SkSnapshot: public SkADrawable {
+ DECLARE_MEMBER_INFO(Snapshot);
+ SkSnapshot();
+ bool draw(SkAnimateMaker& ) override;
+ private:
+ SkString filename;
+ SkScalar quality;
+ SkBool sequence;
+ int /*SkImageEncoder::Type*/ type;
+ int fSeqVal;
+};
+
+#endif // SkSnapShot_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkTDArray_Experimental.h b/gfx/skia/skia/src/animator/SkTDArray_Experimental.h
new file mode 100644
index 000000000..ff693ac39
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkTDArray_Experimental.h
@@ -0,0 +1,142 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTDArray_Experimental_DEFINED
+#define SkTDArray_Experimental_DEFINED
+
+#include "SkTypes.h"
+
+#ifdef SK_BUILD_FOR_UNIX
+#define SK_BUILD_FOR_ADS_12
+#endif
+
+#if !defined(SK_BUILD_FOR_ADS_12) && !defined(__x86_64__)
+#define SK_SMALLER_ARRAY_TEMPLATE_EXPERIMENT 1
+#else
+#define SK_SMALLER_ARRAY_TEMPLATE_EXPERIMENT 0
+#endif
+
+#if SK_SMALLER_ARRAY_TEMPLATE_EXPERIMENT == 0
+#include "SkTDArray.h"
+#define SkIntArray(type) SkTDArray<type>
+#define SkLongArray(type) SkTDArray<type>
+#else
+
+class SkDS32Array {
+protected:
+ SkDS32Array();
+ SkDS32Array(const SkDS32Array& src);
+ SkDS32Array(const int32_t src[], U16CPU count);
+ SkDS32Array& operator=(const SkDS32Array& src);
+ friend int operator==(const SkDS32Array& a, const SkDS32Array& b);
+ int32_t* append() { return this->append(1, nullptr); }
+ int32_t* append(U16CPU count, const int32_t* src = nullptr);
+
+ int32_t* appendClear()
+ {
+ int32_t* result = this->append();
+ *result = 0;
+ return result;
+ }
+
+ int find(const int32_t& elem) const;
+ int32_t* insert(U16CPU index, U16CPU count, const int32_t* src);
+ int rfind(const int32_t& elem) const;
+ void swap(SkDS32Array& other);
+public:
+ bool isEmpty() const { return fCount == 0; }
+ int count() const { return fCount; }
+
+ void remove(U16CPU index, U16CPU count = 1)
+ {
+ SkASSERT(index + count <= fCount);
+ fCount = SkToU16(fCount - count);
+ memmove(fArray + index, fArray + index + count, sizeof(int32_t) * (fCount - index));
+ }
+
+ void reset()
+ {
+ if (fArray)
+ {
+ sk_free(fArray);
+ fArray = nullptr;
+#ifdef SK_DEBUG
+ fData = nullptr;
+#endif
+ fReserve = fCount = 0;
+ }
+ else
+ {
+ SkASSERT(fReserve == 0 && fCount == 0);
+ }
+ }
+
+ void setCount(U16CPU count)
+ {
+ if (count > fReserve)
+ this->growBy(count - fCount);
+ else
+ fCount = SkToU16(count);
+ }
+protected:
+#ifdef SK_DEBUG
+ enum {
+ kDebugArraySize = 24
+ };
+ int32_t(* fData)[kDebugArraySize];
+#endif
+ int32_t* fArray;
+ uint16_t fReserve, fCount;
+ void growBy(U16CPU extra);
+};
+
+#ifdef SK_DEBUG
+ #define SYNC() fTData = (T (*)[kDebugArraySize]) fArray
+#else
+ #define SYNC()
+#endif
+
+template <typename T> class SkTDS32Array : public SkDS32Array {
+public:
+ SkTDS32Array() { SkDEBUGCODE(fTData=nullptr); SkASSERT(sizeof(T) == sizeof(int32_t)); }
+ SkTDS32Array(const SkTDS32Array<T>& src) : SkDS32Array(src) {}
+ ~SkTDS32Array() { sk_free(fArray); }
+ T& operator[](int index) const { SYNC(); SkASSERT((unsigned)index < fCount); return ((T*) fArray)[index]; }
+ SkTDS32Array<T>& operator=(const SkTDS32Array<T>& src) {
+ return (SkTDS32Array<T>&) SkDS32Array::operator=(src); }
+ friend int operator==(const SkTDS32Array<T>& a, const SkTDS32Array<T>& b) {
+ return operator==((const SkDS32Array&) a, (const SkDS32Array&) b); }
+ T* append() { return (T*) SkDS32Array::append(); }
+ T* appendClear() { return (T*) SkDS32Array::appendClear(); }
+ T* append(U16CPU count, const T* src = nullptr) { return (T*) SkDS32Array::append(count, (const int32_t*) src); }
+ T* begin() const { SYNC(); return (T*) fArray; }
+ T* end() const { return (T*) (fArray ? fArray + fCount : nullptr); }
+ int find(const T& elem) const { return SkDS32Array::find((const int32_t&) elem); }
+ T* insert(U16CPU index) { return this->insert(index, 1, nullptr); }
+ T* insert(U16CPU index, U16CPU count, const T* src = nullptr) {
+ return (T*) SkDS32Array::insert(index, count, (const int32_t*) src); }
+ int rfind(const T& elem) const { return SkDS32Array::rfind((const int32_t&) elem); }
+ T* push() { return this->append(); }
+ void push(T& elem) { *this->append() = elem; }
+ const T& top() const { return (*this)[fCount - 1]; }
+ T& top() { return (*this)[fCount - 1]; }
+ void pop(T* elem) { if (elem) *elem = (*this)[fCount - 1]; --fCount; }
+ void pop() { --fCount; }
+private:
+#ifdef SK_DEBUG
+ mutable T(* fTData)[kDebugArraySize];
+#endif
+};
+
+#define SkIntArray(type) SkTDS32Array<type> // holds 32 bit data types
+#define SkLongArray(type) SkTDS32Array<type>
+
+#endif // SK_SMALLER_ARRAY_TEMPLATE_EXPERIMENT
+
+#endif // SkTDArray_Experimental_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkTDStack.h b/gfx/skia/skia/src/animator/SkTDStack.h
new file mode 100644
index 000000000..e286e4a03
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkTDStack.h
@@ -0,0 +1,110 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTDStack_DEFINED
+#define SkTDStack_DEFINED
+
+#include "SkTypes.h"
+
+template <typename T> class SkTDStack : SkNoncopyable {
+public:
+ SkTDStack() : fCount(0), fTotalCount(0) {
+ fInitialRec.fNext = NULL;
+ fRec = &fInitialRec;
+
+ // fCount = kSlotCount;
+ }
+
+ ~SkTDStack() {
+ Rec* rec = fRec;
+ while (rec != &fInitialRec) {
+ Rec* next = rec->fNext;
+ sk_free(rec);
+ rec = next;
+ }
+ }
+
+ int count() const { return fTotalCount; }
+ int depth() const { return fTotalCount; }
+ bool empty() const { return fTotalCount == 0; }
+
+ T* push() {
+ SkASSERT(fCount <= kSlotCount);
+ if (fCount == kSlotCount) {
+ Rec* rec = (Rec*)sk_malloc_throw(sizeof(Rec));
+ rec->fNext = fRec;
+ fRec = rec;
+ fCount = 0;
+ }
+ ++fTotalCount;
+ return &fRec->fSlots[fCount++];
+ }
+
+ void push(const T& elem) { *this->push() = elem; }
+
+ const T& index(int idx) const {
+ SkASSERT(fRec && fCount > idx);
+ return fRec->fSlots[fCount - idx - 1];
+ }
+
+ T& index(int idx) {
+ SkASSERT(fRec && fCount > idx);
+ return fRec->fSlots[fCount - idx - 1];
+ }
+
+ const T& top() const {
+ SkASSERT(fRec && fCount > 0);
+ return fRec->fSlots[fCount - 1];
+ }
+
+ T& top() {
+ SkASSERT(fRec && fCount > 0);
+ return fRec->fSlots[fCount - 1];
+ }
+
+ void pop(T* elem) {
+ if (elem) {
+ *elem = fRec->fSlots[fCount - 1];
+ }
+ this->pop();
+ }
+
+ void pop() {
+ SkASSERT(fCount > 0 && fRec);
+ --fTotalCount;
+ if (--fCount == 0) {
+ if (fRec != &fInitialRec) {
+ Rec* rec = fRec->fNext;
+ sk_free(fRec);
+ fCount = kSlotCount;
+ fRec = rec;
+ } else {
+ SkASSERT(fTotalCount == 0);
+ }
+ }
+ }
+
+private:
+ enum {
+ kSlotCount = 8
+ };
+
+ struct Rec;
+ friend struct Rec;
+
+ struct Rec {
+ Rec* fNext;
+ T fSlots[kSlotCount];
+ };
+ Rec fInitialRec;
+ Rec* fRec;
+ int fCount, fTotalCount;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/animator/SkTextOnPath.cpp b/gfx/skia/skia/src/animator/SkTextOnPath.cpp
new file mode 100644
index 000000000..864741619
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkTextOnPath.cpp
@@ -0,0 +1,39 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkTextOnPath.h"
+#include "SkAnimateMaker.h"
+#include "SkCanvas.h"
+#include "SkDrawPath.h"
+#include "SkDrawText.h"
+#include "SkPaint.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkTextOnPath::fInfo[] = {
+ SK_MEMBER(offset, Float),
+ SK_MEMBER(path, Path),
+ SK_MEMBER(text, Text)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkTextOnPath);
+
+SkTextOnPath::SkTextOnPath() : offset(0), path(nullptr), text(nullptr) {
+}
+
+bool SkTextOnPath::draw(SkAnimateMaker& maker) {
+ SkASSERT(text);
+ SkASSERT(path);
+ SkBoundableAuto boundable(this, maker);
+ maker.fCanvas->drawTextOnPathHV(text->getText(), text->getSize(),
+ path->getPath(), offset, 0, *maker.fPaint);
+ return false;
+}
diff --git a/gfx/skia/skia/src/animator/SkTextOnPath.h b/gfx/skia/skia/src/animator/SkTextOnPath.h
new file mode 100644
index 000000000..36adfd522
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkTextOnPath.h
@@ -0,0 +1,30 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTextOnPath_DEFINED
+#define SkTextOnPath_DEFINED
+
+#include "SkBoundable.h"
+#include "SkMemberInfo.h"
+
+class SkDrawPath;
+class SkText;
+
+class SkTextOnPath : public SkBoundable {
+ DECLARE_MEMBER_INFO(TextOnPath);
+ SkTextOnPath();
+ bool draw(SkAnimateMaker& ) override;
+private:
+ SkScalar offset;
+ SkDrawPath* path;
+ SkText* text;
+ typedef SkBoundable INHERITED;
+};
+
+#endif // SkTextOnPath_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkTextToPath.cpp b/gfx/skia/skia/src/animator/SkTextToPath.cpp
new file mode 100644
index 000000000..d4b525b9a
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkTextToPath.cpp
@@ -0,0 +1,47 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkTextToPath.h"
+#include "SkAnimateMaker.h"
+#include "SkDrawPaint.h"
+#include "SkDrawPath.h"
+#include "SkDrawText.h"
+#include "SkPaint.h"
+
+#if SK_USE_CONDENSED_INFO == 0
+
+const SkMemberInfo SkTextToPath::fInfo[] = {
+ SK_MEMBER(paint, Paint),
+ SK_MEMBER(path, Path),
+ SK_MEMBER(text, Text)
+};
+
+#endif
+
+DEFINE_GET_MEMBER(SkTextToPath);
+
+SkTextToPath::SkTextToPath() : paint(nullptr), path(nullptr), text(nullptr) {
+}
+
+bool SkTextToPath::draw(SkAnimateMaker& maker) {
+ path->draw(maker);
+ return false;
+}
+
+void SkTextToPath::onEndElement(SkAnimateMaker& maker) {
+ if (paint == nullptr || path == nullptr || text == nullptr) {
+ // !!! add error message here
+ maker.setErrorCode(SkDisplayXMLParserError::kErrorInAttributeValue);
+ return;
+ }
+ SkPaint realPaint;
+ paint->setupPaint(&realPaint);
+ realPaint.getTextPath(text->getText(), text->getSize(), text->x,
+ text->y, &path->getPath());
+}
diff --git a/gfx/skia/skia/src/animator/SkTextToPath.h b/gfx/skia/skia/src/animator/SkTextToPath.h
new file mode 100644
index 000000000..23b6bfe8b
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkTextToPath.h
@@ -0,0 +1,31 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTextToPath_DEFINED
+#define SkTextToPath_DEFINED
+
+#include "SkDrawPath.h"
+#include "SkMemberInfo.h"
+
+class SkDrawPaint;
+class SkDrawPath;
+class SkText;
+
+class SkTextToPath : public SkADrawable {
+ DECLARE_MEMBER_INFO(TextToPath);
+ SkTextToPath();
+ bool draw(SkAnimateMaker& ) override;
+ void onEndElement(SkAnimateMaker& ) override;
+private:
+ SkDrawPaint* paint;
+ SkDrawPath* path;
+ SkText* text;
+};
+
+#endif // SkTextToPath_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkTypedArray.cpp b/gfx/skia/skia/src/animator/SkTypedArray.cpp
new file mode 100644
index 000000000..d9e2bc7f7
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkTypedArray.cpp
@@ -0,0 +1,179 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkTypedArray.h"
+
+SkTypedArray::SkTypedArray() : fType(SkType_Unknown) {
+}
+
+SkTypedArray::SkTypedArray(SkDisplayTypes type) : fType(type) {
+}
+
+bool SkTypedArray::getIndex(int index, SkOperand* operand) {
+ if (index >= count()) {
+ SkASSERT(0);
+ return false;
+ }
+ *operand = begin()[index];
+ return true;
+}
+
+
+#if SK_SMALLER_ARRAY_TEMPLATE_EXPERIMENT == 1
+SkDS32Array::SkDS32Array()
+{
+ fReserve = fCount = 0;
+ fArray = nullptr;
+#ifdef SK_DEBUG
+ fData = nullptr;
+#endif
+}
+
+SkDS32Array::SkDS32Array(const SkDS32Array& src)
+{
+ fReserve = fCount = 0;
+ fArray = nullptr;
+#ifdef SK_DEBUG
+ fData = nullptr;
+#endif
+ SkDS32Array tmp(src.fArray, src.fCount);
+ this->swap(tmp);
+}
+
+SkDS32Array::SkDS32Array(const int32_t src[], U16CPU count)
+{
+ SkASSERT(src || count == 0);
+
+ fReserve = fCount = 0;
+ fArray = nullptr;
+#ifdef SK_DEBUG
+ fData = nullptr;
+#endif
+ if (count)
+ {
+ fArray = (int32_t*)sk_malloc_throw(count * sizeof(int32_t));
+#ifdef SK_DEBUG
+ fData = (int32_t (*)[kDebugArraySize]) fArray;
+#endif
+ memcpy(fArray, src, sizeof(int32_t) * count);
+ fReserve = fCount = SkToU16(count);
+ }
+}
+
+SkDS32Array& SkDS32Array::operator=(const SkDS32Array& src)
+{
+ if (this != &src)
+ {
+ if (src.fCount > fReserve)
+ {
+ SkDS32Array tmp(src.fArray, src.fCount);
+ this->swap(tmp);
+ }
+ else
+ {
+ memcpy(fArray, src.fArray, sizeof(int32_t) * src.fCount);
+ fCount = src.fCount;
+ }
+ }
+ return *this;
+}
+
+int operator==(const SkDS32Array& a, const SkDS32Array& b)
+{
+ return a.fCount == b.fCount &&
+ (a.fCount == 0 || !memcmp(a.fArray, b.fArray, a.fCount * sizeof(int32_t)));
+}
+
+void SkDS32Array::swap(SkDS32Array& other)
+{
+ SkTSwap(fArray, other.fArray);
+#ifdef SK_DEBUG
+ SkTSwap(fData, other.fData);
+#endif
+ SkTSwap(fReserve, other.fReserve);
+ SkTSwap(fCount, other.fCount);
+}
+
+int32_t* SkDS32Array::append(U16CPU count, const int32_t* src)
+{
+ unsigned oldCount = fCount;
+ if (count)
+ {
+ SkASSERT(src == nullptr || fArray == nullptr ||
+ src + count <= fArray || fArray + count <= src);
+
+ this->growBy(count);
+ if (src)
+ memcpy(fArray + oldCount, src, sizeof(int32_t) * count);
+ }
+ return fArray + oldCount;
+}
+
+int SkDS32Array::find(const int32_t& elem) const
+{
+ const int32_t* iter = fArray;
+ const int32_t* stop = fArray + fCount;
+
+ for (; iter < stop; iter++)
+ {
+ if (*iter == elem)
+ return (int) (iter - fArray);
+ }
+ return -1;
+}
+
+void SkDS32Array::growBy(U16CPU extra)
+{
+ SkASSERT(extra);
+ SkASSERT(fCount + extra <= 0xFFFF);
+
+ if (fCount + extra > fReserve)
+ {
+ size_t size = fCount + extra + 4;
+ size += size >> 2;
+ int32_t* array = (int32_t*)sk_malloc_throw(size * sizeof(int32_t));
+ memcpy(array, fArray, fCount * sizeof(int32_t));
+
+ sk_free(fArray);
+ fArray = array;
+#ifdef SK_DEBUG
+ fData = (int32_t (*)[kDebugArraySize]) fArray;
+#endif
+ fReserve = SkToU16((U16CPU)size);
+ }
+ fCount = SkToU16(fCount + extra);
+}
+
+int32_t* SkDS32Array::insert(U16CPU index, U16CPU count, const int32_t* src)
+{
+ SkASSERT(count);
+ int oldCount = fCount;
+ this->growBy(count);
+ int32_t* dst = fArray + index;
+ memmove(dst + count, dst, sizeof(int32_t) * (oldCount - index));
+ if (src)
+ memcpy(dst, src, sizeof(int32_t) * count);
+ return dst;
+}
+
+
+ int SkDS32Array::rfind(const int32_t& elem) const
+ {
+ const int32_t* iter = fArray + fCount;
+ const int32_t* stop = fArray;
+
+ while (iter > stop)
+ {
+ if (*--iter == elem)
+ return (int) (iter - stop);
+ }
+ return -1;
+ }
+
+#endif
diff --git a/gfx/skia/skia/src/animator/SkTypedArray.h b/gfx/skia/skia/src/animator/SkTypedArray.h
new file mode 100644
index 000000000..e93b10670
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkTypedArray.h
@@ -0,0 +1,31 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTypedArray_DEFINED
+#define SkTypedArray_DEFINED
+
+#include "SkScript.h"
+#include "SkTDArray_Experimental.h"
+
+class SkTypedArray : public SkTDOperandArray {
+public:
+ SkTypedArray();
+ SkTypedArray(SkDisplayTypes type);
+ bool getIndex(int index, SkOperand* operand);
+ SkDisplayTypes getType() { return fType; }
+ SkScriptEngine::SkOpType getOpType() { return SkScriptEngine::ToOpType(fType); }
+ void setType(SkDisplayTypes type) {
+ // SkASSERT(count() == 0);
+ fType = type;
+ }
+protected:
+ SkDisplayTypes fType;
+};
+
+#endif // SkTypedArray_DEFINED
diff --git a/gfx/skia/skia/src/animator/SkXMLAnimatorWriter.cpp b/gfx/skia/skia/src/animator/SkXMLAnimatorWriter.cpp
new file mode 100644
index 000000000..25b10a903
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkXMLAnimatorWriter.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkXMLAnimatorWriter.h"
+#include "SkAnimator.h"
+#include "SkAnimateMaker.h"
+#include "SkDisplayXMLParser.h"
+
+SkXMLAnimatorWriter::SkXMLAnimatorWriter(SkAnimator* animator) : fAnimator(animator)
+{
+ fParser = new SkDisplayXMLParser(*fAnimator->fMaker);
+}
+
+SkXMLAnimatorWriter::~SkXMLAnimatorWriter() {
+ delete fParser;
+}
+
+void SkXMLAnimatorWriter::onAddAttributeLen(const char name[], const char value[], size_t length)
+{
+ fParser->onAddAttributeLen(name, value, length);
+}
+
+void SkXMLAnimatorWriter::onAddText(const char text[], size_t length) {
+ SkDebugf("not implemented: SkXMLAnimatorWriter::onAddText()\n");
+}
+
+void SkXMLAnimatorWriter::onEndElement()
+{
+ Elem* elem = getEnd();
+ fParser->onEndElement(elem->fName.c_str());
+ doEnd(elem);
+}
+
+void SkXMLAnimatorWriter::onStartElementLen(const char name[], size_t length)
+{
+ doStart(name, length);
+ fParser->onStartElementLen(name, length);
+}
+
+void SkXMLAnimatorWriter::writeHeader()
+{
+}
+
+#ifdef SK_DEBUG
+#include "SkCanvas.h"
+#include "SkPaint.h"
+
+void SkXMLAnimatorWriter::UnitTest(SkCanvas* canvas)
+{
+ SkAnimator s;
+ SkXMLAnimatorWriter w(&s);
+ w.startElement("screenplay");
+ w.startElement("animateField");
+ w.addAttribute("field", "x1");
+ w.addAttribute("id", "to100");
+ w.addAttribute("from", "0");
+ w.addAttribute("to", "100");
+ w.addAttribute("dur", "1");
+ w.endElement();
+ w.startElement("event");
+ w.addAttribute("kind", "onLoad");
+ w.startElement("line");
+ w.addAttribute("id", "line");
+ w.addAttribute("x1", "-1");
+ w.addAttribute("y1", "20");
+ w.addAttribute("x2", "150");
+ w.addAttribute("y2", "40");
+ w.endElement();
+ w.startElement("apply");
+ w.addAttribute("animator", "to100");
+ w.addAttribute("scope", "line");
+ w.endElement();
+ w.endElement();
+ w.endElement();
+ SkPaint paint;
+ canvas->drawColor(SK_ColorWHITE);
+ s.draw(canvas, &paint, 0);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/animator/SkXMLAnimatorWriter.h b/gfx/skia/skia/src/animator/SkXMLAnimatorWriter.h
new file mode 100644
index 000000000..87cf21841
--- /dev/null
+++ b/gfx/skia/skia/src/animator/SkXMLAnimatorWriter.h
@@ -0,0 +1,36 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkXMLAnimatorWriter_DEFINED
+#define SkXMLAnimatorWriter_DEFINED
+
+#include "SkXMLWriter.h"
+
+class SkAnimator;
+class SkDisplayXMLParser;
+
+class SkXMLAnimatorWriter : public SkXMLWriter {
+public:
+ SkXMLAnimatorWriter(SkAnimator*);
+ virtual ~SkXMLAnimatorWriter();
+ void writeHeader() override;
+ SkDEBUGCODE(static void UnitTest(class SkCanvas* canvas);)
+
+protected:
+ void onAddAttributeLen(const char name[], const char value[], size_t length) override;
+ void onEndElement() override;
+ void onStartElementLen(const char elem[], size_t length) override;
+ void onAddText(const char text[], size_t length) override;
+
+private:
+ SkAnimator* fAnimator;
+ SkDisplayXMLParser* fParser;
+};
+
+#endif // SkXMLAnimatorWriter_DEFINED
diff --git a/gfx/skia/skia/src/animator/thingstodo.txt b/gfx/skia/skia/src/animator/thingstodo.txt
new file mode 100644
index 000000000..8d0d47a02
--- /dev/null
+++ b/gfx/skia/skia/src/animator/thingstodo.txt
@@ -0,0 +1,21 @@
+things to do:
+ figure out where endless or very deep recursion is possible
+ at these points, generate an error if actual physical stack gets too large
+ candidates are scripts
+ eval(eval(eval... user callouts
+ ((((( operator precedence or similar making stack deep
+ groups within groups
+ very large apply create or apply immediate steps
+
+ write tests for math functions
+ looks like random takes a parameter when it should take zero parameters
+
+ add Math, Number files to perforce for docs
+ alphabetize attributes in docs
+
+ manually modified tools/screenplayDocs/xmlToJPEG.cpp
+
+ fix docs where lines are stitched together (insert space)
+
+ naked <data> outside of <post> asserts on name
+ handle errors for all element not contained by correct parents \ No newline at end of file
diff --git a/gfx/skia/skia/src/c/sk_c_from_to.h b/gfx/skia/skia/src/c/sk_c_from_to.h
new file mode 100644
index 000000000..19fda37a2
--- /dev/null
+++ b/gfx/skia/skia/src/c/sk_c_from_to.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+static bool find_sk(CType from, SKType* to) {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(CTypeSkTypeMap); ++i) {
+ if (CTypeSkTypeMap[i].fC == from) {
+ if (to) {
+ *to = CTypeSkTypeMap[i].fSK;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool find_c(SKType from, CType* to) {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(CTypeSkTypeMap); ++i) {
+ if (CTypeSkTypeMap[i].fSK == from) {
+ if (to) {
+ *to = CTypeSkTypeMap[i].fC;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+#undef CType
+#undef SKType
+#undef CTypeSkTypeMap
diff --git a/gfx/skia/skia/src/c/sk_paint.cpp b/gfx/skia/skia/src/c/sk_paint.cpp
new file mode 100644
index 000000000..126170ca4
--- /dev/null
+++ b/gfx/skia/skia/src/c/sk_paint.cpp
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBlendMode.h"
+#include "SkMaskFilter.h"
+#include "SkPaint.h"
+#include "SkShader.h"
+
+#include "sk_paint.h"
+#include "sk_types_priv.h"
+
+#define MAKE_FROM_TO_NAME(FROM) g_ ## FROM ## _map
+
+const struct {
+ sk_stroke_cap_t fC;
+ SkPaint::Cap fSK;
+} MAKE_FROM_TO_NAME(sk_stroke_cap_t)[] = {
+ { BUTT_SK_STROKE_CAP, SkPaint::kButt_Cap },
+ { ROUND_SK_STROKE_CAP, SkPaint::kRound_Cap },
+ { SQUARE_SK_STROKE_CAP, SkPaint::kSquare_Cap },
+};
+
+const struct {
+ sk_stroke_join_t fC;
+ SkPaint::Join fSK;
+} MAKE_FROM_TO_NAME(sk_stroke_join_t)[] = {
+ { MITER_SK_STROKE_JOIN, SkPaint::kMiter_Join },
+ { ROUND_SK_STROKE_JOIN, SkPaint::kRound_Join },
+ { BEVEL_SK_STROKE_JOIN, SkPaint::kBevel_Join },
+};
+
+#define CType sk_stroke_cap_t
+#define SKType SkPaint::Cap
+#define CTypeSkTypeMap MAKE_FROM_TO_NAME(sk_stroke_cap_t)
+#include "sk_c_from_to.h"
+
+#define CType sk_stroke_join_t
+#define SKType SkPaint::Join
+#define CTypeSkTypeMap MAKE_FROM_TO_NAME(sk_stroke_join_t)
+#include "sk_c_from_to.h"
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_paint_t* sk_paint_new() { return (sk_paint_t*)new SkPaint; }
+
+void sk_paint_delete(sk_paint_t* cpaint) { delete AsPaint(cpaint); }
+
+bool sk_paint_is_antialias(const sk_paint_t* cpaint) {
+ return AsPaint(*cpaint).isAntiAlias();
+}
+
+void sk_paint_set_antialias(sk_paint_t* cpaint, bool aa) {
+ AsPaint(cpaint)->setAntiAlias(aa);
+}
+
+sk_color_t sk_paint_get_color(const sk_paint_t* cpaint) {
+ return AsPaint(*cpaint).getColor();
+}
+
+void sk_paint_set_color(sk_paint_t* cpaint, sk_color_t c) {
+ AsPaint(cpaint)->setColor(c);
+}
+
+void sk_paint_set_shader(sk_paint_t* cpaint, sk_shader_t* cshader) {
+ AsPaint(cpaint)->setShader(sk_ref_sp(AsShader(cshader)));
+}
+
+void sk_paint_set_maskfilter(sk_paint_t* cpaint, sk_maskfilter_t* cfilter) {
+ AsPaint(cpaint)->setMaskFilter(sk_ref_sp(AsMaskFilter(cfilter)));
+}
+
+bool sk_paint_is_stroke(const sk_paint_t* cpaint) {
+ return AsPaint(*cpaint).getStyle() != SkPaint::kFill_Style;
+}
+
+void sk_paint_set_stroke(sk_paint_t* cpaint, bool doStroke) {
+ AsPaint(cpaint)->setStyle(doStroke ? SkPaint::kStroke_Style : SkPaint::kFill_Style);
+}
+
+float sk_paint_get_stroke_width(const sk_paint_t* cpaint) {
+ return AsPaint(*cpaint).getStrokeWidth();
+}
+
+void sk_paint_set_stroke_width(sk_paint_t* cpaint, float width) {
+ AsPaint(cpaint)->setStrokeWidth(width);
+}
+
+float sk_paint_get_stroke_miter(const sk_paint_t* cpaint) {
+ return AsPaint(*cpaint).getStrokeMiter();
+}
+
+void sk_paint_set_stroke_miter(sk_paint_t* cpaint, float miter) {
+ AsPaint(cpaint)->setStrokeMiter(miter);
+}
+
+sk_stroke_cap_t sk_paint_get_stroke_cap(const sk_paint_t* cpaint) {
+ sk_stroke_cap_t ccap;
+ if (find_c(AsPaint(*cpaint).getStrokeCap(), &ccap)) {
+ ccap = BUTT_SK_STROKE_CAP;
+ }
+ return ccap;
+}
+
+void sk_paint_set_stroke_cap(sk_paint_t* cpaint, sk_stroke_cap_t ccap) {
+ SkPaint::Cap skcap;
+ if (find_sk(ccap, &skcap)) {
+ AsPaint(cpaint)->setStrokeCap(skcap);
+ } else {
+ // unknown ccap
+ }
+}
+
+sk_stroke_join_t sk_paint_get_stroke_join(const sk_paint_t* cpaint) {
+ sk_stroke_join_t cjoin;
+ if (find_c(AsPaint(*cpaint).getStrokeJoin(), &cjoin)) {
+ cjoin = MITER_SK_STROKE_JOIN;
+ }
+ return cjoin;
+}
+
+void sk_paint_set_stroke_join(sk_paint_t* cpaint, sk_stroke_join_t cjoin) {
+ SkPaint::Join skjoin;
+ if (find_sk(cjoin, &skjoin)) {
+ AsPaint(cpaint)->setStrokeJoin(skjoin);
+ } else {
+ // unknown cjoin
+ }
+}
+
+void sk_paint_set_xfermode_mode(sk_paint_t* paint, sk_xfermode_mode_t mode) {
+ SkASSERT(paint);
+ SkBlendMode skmode;
+ switch (mode) {
+ #define MAP(X, Y) case (X): skmode = (Y); break
+ MAP( CLEAR_SK_XFERMODE_MODE, SkBlendMode::kClear );
+ MAP( SRC_SK_XFERMODE_MODE, SkBlendMode::kSrc );
+ MAP( DST_SK_XFERMODE_MODE, SkBlendMode::kDst );
+ MAP( SRCOVER_SK_XFERMODE_MODE, SkBlendMode::kSrcOver );
+ MAP( DSTOVER_SK_XFERMODE_MODE, SkBlendMode::kDstOver );
+ MAP( SRCIN_SK_XFERMODE_MODE, SkBlendMode::kSrcIn );
+ MAP( DSTIN_SK_XFERMODE_MODE, SkBlendMode::kDstIn );
+ MAP( SRCOUT_SK_XFERMODE_MODE, SkBlendMode::kSrcOut );
+ MAP( DSTOUT_SK_XFERMODE_MODE, SkBlendMode::kDstOut );
+ MAP( SRCATOP_SK_XFERMODE_MODE, SkBlendMode::kSrcATop );
+ MAP( DSTATOP_SK_XFERMODE_MODE, SkBlendMode::kDstATop );
+ MAP( XOR_SK_XFERMODE_MODE, SkBlendMode::kXor );
+ MAP( PLUS_SK_XFERMODE_MODE, SkBlendMode::kPlus );
+ MAP( MODULATE_SK_XFERMODE_MODE, SkBlendMode::kModulate );
+ MAP( SCREEN_SK_XFERMODE_MODE, SkBlendMode::kScreen );
+ MAP( OVERLAY_SK_XFERMODE_MODE, SkBlendMode::kOverlay );
+ MAP( DARKEN_SK_XFERMODE_MODE, SkBlendMode::kDarken );
+ MAP( LIGHTEN_SK_XFERMODE_MODE, SkBlendMode::kLighten );
+ MAP( COLORDODGE_SK_XFERMODE_MODE, SkBlendMode::kColorDodge );
+ MAP( COLORBURN_SK_XFERMODE_MODE, SkBlendMode::kColorBurn );
+ MAP( HARDLIGHT_SK_XFERMODE_MODE, SkBlendMode::kHardLight );
+ MAP( SOFTLIGHT_SK_XFERMODE_MODE, SkBlendMode::kSoftLight );
+ MAP( DIFFERENCE_SK_XFERMODE_MODE, SkBlendMode::kDifference );
+ MAP( EXCLUSION_SK_XFERMODE_MODE, SkBlendMode::kExclusion );
+ MAP( MULTIPLY_SK_XFERMODE_MODE, SkBlendMode::kMultiply );
+ MAP( HUE_SK_XFERMODE_MODE, SkBlendMode::kHue );
+ MAP( SATURATION_SK_XFERMODE_MODE, SkBlendMode::kSaturation );
+ MAP( COLOR_SK_XFERMODE_MODE, SkBlendMode::kColor );
+ MAP( LUMINOSITY_SK_XFERMODE_MODE, SkBlendMode::kLuminosity );
+ #undef MAP
+ default:
+ return;
+ }
+ AsPaint(paint)->setBlendMode(skmode);
+}
diff --git a/gfx/skia/skia/src/c/sk_surface.cpp b/gfx/skia/skia/src/c/sk_surface.cpp
new file mode 100644
index 000000000..c9b25675a
--- /dev/null
+++ b/gfx/skia/skia/src/c/sk_surface.cpp
@@ -0,0 +1,707 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCanvas.h"
+#include "SkData.h"
+#include "SkImage.h"
+#include "SkMaskFilter.h"
+#include "SkMatrix.h"
+#include "SkPaint.h"
+#include "SkPath.h"
+#include "SkPictureRecorder.h"
+#include "SkSurface.h"
+
+#include "sk_canvas.h"
+#include "sk_data.h"
+#include "sk_image.h"
+#include "sk_paint.h"
+#include "sk_path.h"
+#include "sk_surface.h"
+#include "sk_types_priv.h"
+
+const struct {
+ sk_colortype_t fC;
+ SkColorType fSK;
+} gColorTypeMap[] = {
+ { UNKNOWN_SK_COLORTYPE, kUnknown_SkColorType },
+ { RGBA_8888_SK_COLORTYPE, kRGBA_8888_SkColorType },
+ { BGRA_8888_SK_COLORTYPE, kBGRA_8888_SkColorType },
+ { ALPHA_8_SK_COLORTYPE, kAlpha_8_SkColorType },
+};
+
+const struct {
+ sk_alphatype_t fC;
+ SkAlphaType fSK;
+} gAlphaTypeMap[] = {
+ { OPAQUE_SK_ALPHATYPE, kOpaque_SkAlphaType },
+ { PREMUL_SK_ALPHATYPE, kPremul_SkAlphaType },
+ { UNPREMUL_SK_ALPHATYPE, kUnpremul_SkAlphaType },
+};
+
+static bool from_c_colortype(sk_colortype_t cCT, SkColorType* skCT) {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gColorTypeMap); ++i) {
+ if (gColorTypeMap[i].fC == cCT) {
+ if (skCT) {
+ *skCT = gColorTypeMap[i].fSK;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool to_c_colortype(SkColorType skCT, sk_colortype_t* cCT) {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gColorTypeMap); ++i) {
+ if (gColorTypeMap[i].fSK == skCT) {
+ if (cCT) {
+ *cCT = gColorTypeMap[i].fC;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool from_c_alphatype(sk_alphatype_t cAT, SkAlphaType* skAT) {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gAlphaTypeMap); ++i) {
+ if (gAlphaTypeMap[i].fC == cAT) {
+ if (skAT) {
+ *skAT = gAlphaTypeMap[i].fSK;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool from_c_info(const sk_imageinfo_t& cinfo, SkImageInfo* info) {
+ SkColorType ct;
+ SkAlphaType at;
+
+ if (!from_c_colortype(cinfo.colorType, &ct)) {
+ // optionally report error to client?
+ return false;
+ }
+ if (!from_c_alphatype(cinfo.alphaType, &at)) {
+ // optionally report error to client?
+ return false;
+ }
+ if (info) {
+ *info = SkImageInfo::Make(cinfo.width, cinfo.height, ct, at);
+ }
+ return true;
+}
+
+const struct {
+ sk_pixelgeometry_t fC;
+ SkPixelGeometry fSK;
+} gPixelGeometryMap[] = {
+ { UNKNOWN_SK_PIXELGEOMETRY, kUnknown_SkPixelGeometry },
+ { RGB_H_SK_PIXELGEOMETRY, kRGB_H_SkPixelGeometry },
+ { BGR_H_SK_PIXELGEOMETRY, kBGR_H_SkPixelGeometry },
+ { RGB_V_SK_PIXELGEOMETRY, kRGB_V_SkPixelGeometry },
+ { BGR_V_SK_PIXELGEOMETRY, kBGR_V_SkPixelGeometry },
+};
+
+
+static bool from_c_pixelgeometry(sk_pixelgeometry_t cGeom, SkPixelGeometry* skGeom) {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gPixelGeometryMap); ++i) {
+ if (gPixelGeometryMap[i].fC == cGeom) {
+ if (skGeom) {
+ *skGeom = gPixelGeometryMap[i].fSK;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+static void from_c_matrix(const sk_matrix_t* cmatrix, SkMatrix* matrix) {
+ matrix->setAll(cmatrix->mat[0], cmatrix->mat[1], cmatrix->mat[2],
+ cmatrix->mat[3], cmatrix->mat[4], cmatrix->mat[5],
+ cmatrix->mat[6], cmatrix->mat[7], cmatrix->mat[8]);
+}
+
+const struct {
+ sk_path_direction_t fC;
+ SkPath::Direction fSk;
+} gPathDirMap[] = {
+ { CW_SK_PATH_DIRECTION, SkPath::kCW_Direction },
+ { CCW_SK_PATH_DIRECTION, SkPath::kCCW_Direction },
+};
+
+static bool from_c_path_direction(sk_path_direction_t cdir, SkPath::Direction* dir) {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gPathDirMap); ++i) {
+ if (gPathDirMap[i].fC == cdir) {
+ if (dir) {
+ *dir = gPathDirMap[i].fSk;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+static SkData* AsData(const sk_data_t* cdata) {
+ return reinterpret_cast<SkData*>(const_cast<sk_data_t*>(cdata));
+}
+
+static sk_data_t* ToData(SkData* data) {
+ return reinterpret_cast<sk_data_t*>(data);
+}
+
+static sk_rect_t ToRect(const SkRect& rect) {
+ return reinterpret_cast<const sk_rect_t&>(rect);
+}
+
+static const SkRect& AsRect(const sk_rect_t& crect) {
+ return reinterpret_cast<const SkRect&>(crect);
+}
+
+static const SkPath& AsPath(const sk_path_t& cpath) {
+ return reinterpret_cast<const SkPath&>(cpath);
+}
+
+static SkPath* as_path(sk_path_t* cpath) {
+ return reinterpret_cast<SkPath*>(cpath);
+}
+
+static const SkImage* AsImage(const sk_image_t* cimage) {
+ return reinterpret_cast<const SkImage*>(cimage);
+}
+
+static sk_image_t* ToImage(SkImage* cimage) {
+ return reinterpret_cast<sk_image_t*>(cimage);
+}
+
+static sk_canvas_t* ToCanvas(SkCanvas* canvas) {
+ return reinterpret_cast<sk_canvas_t*>(canvas);
+}
+
+static SkCanvas* AsCanvas(sk_canvas_t* ccanvas) {
+ return reinterpret_cast<SkCanvas*>(ccanvas);
+}
+
+static SkPictureRecorder* AsPictureRecorder(sk_picture_recorder_t* crec) {
+ return reinterpret_cast<SkPictureRecorder*>(crec);
+}
+
+static sk_picture_recorder_t* ToPictureRecorder(SkPictureRecorder* rec) {
+ return reinterpret_cast<sk_picture_recorder_t*>(rec);
+}
+
+static const SkPicture* AsPicture(const sk_picture_t* cpic) {
+ return reinterpret_cast<const SkPicture*>(cpic);
+}
+
+static SkPicture* AsPicture(sk_picture_t* cpic) {
+ return reinterpret_cast<SkPicture*>(cpic);
+}
+
+static sk_picture_t* ToPicture(SkPicture* pic) {
+ return reinterpret_cast<sk_picture_t*>(pic);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+sk_colortype_t sk_colortype_get_default_8888() {
+ sk_colortype_t ct;
+ if (!to_c_colortype(kN32_SkColorType, &ct)) {
+ ct = UNKNOWN_SK_COLORTYPE;
+ }
+ return ct;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+sk_image_t* sk_image_new_raster_copy(const sk_imageinfo_t* cinfo, const void* pixels,
+ size_t rowBytes) {
+ SkImageInfo info;
+ if (!from_c_info(*cinfo, &info)) {
+ return NULL;
+ }
+ return (sk_image_t*)SkImage::MakeRasterCopy(SkPixmap(info, pixels, rowBytes)).release();
+}
+
+sk_image_t* sk_image_new_from_encoded(const sk_data_t* cdata, const sk_irect_t* subset) {
+ return ToImage(SkImage::MakeFromEncoded(sk_ref_sp(AsData(cdata)),
+ reinterpret_cast<const SkIRect*>(subset)).release());
+}
+
+sk_data_t* sk_image_encode(const sk_image_t* cimage) {
+ return ToData(AsImage(cimage)->encode());
+}
+
+void sk_image_ref(const sk_image_t* cimage) {
+ AsImage(cimage)->ref();
+}
+
+void sk_image_unref(const sk_image_t* cimage) {
+ AsImage(cimage)->unref();
+}
+
+int sk_image_get_width(const sk_image_t* cimage) {
+ return AsImage(cimage)->width();
+}
+
+int sk_image_get_height(const sk_image_t* cimage) {
+ return AsImage(cimage)->height();
+}
+
+uint32_t sk_image_get_unique_id(const sk_image_t* cimage) {
+ return AsImage(cimage)->uniqueID();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+sk_path_t* sk_path_new() { return (sk_path_t*)new SkPath; }
+
+void sk_path_delete(sk_path_t* cpath) { delete as_path(cpath); }
+
+void sk_path_move_to(sk_path_t* cpath, float x, float y) {
+ as_path(cpath)->moveTo(x, y);
+}
+
+void sk_path_line_to(sk_path_t* cpath, float x, float y) {
+ as_path(cpath)->lineTo(x, y);
+}
+
+void sk_path_quad_to(sk_path_t* cpath, float x0, float y0, float x1, float y1) {
+ as_path(cpath)->quadTo(x0, y0, x1, y1);
+}
+
+void sk_path_conic_to(sk_path_t* cpath, float x0, float y0, float x1, float y1, float w) {
+ as_path(cpath)->conicTo(x0, y0, x1, y1, w);
+}
+
+void sk_path_cubic_to(sk_path_t* cpath, float x0, float y0, float x1, float y1, float x2, float y2) {
+ as_path(cpath)->cubicTo(x0, y0, x1, y1, x2, y2);
+}
+
+void sk_path_close(sk_path_t* cpath) {
+ as_path(cpath)->close();
+}
+
+void sk_path_add_rect(sk_path_t* cpath, const sk_rect_t* crect, sk_path_direction_t cdir) {
+ SkPath::Direction dir;
+ if (!from_c_path_direction(cdir, &dir)) {
+ return;
+ }
+ as_path(cpath)->addRect(AsRect(*crect), dir);
+}
+
+void sk_path_add_oval(sk_path_t* cpath, const sk_rect_t* crect, sk_path_direction_t cdir) {
+ SkPath::Direction dir;
+ if (!from_c_path_direction(cdir, &dir)) {
+ return;
+ }
+ as_path(cpath)->addOval(AsRect(*crect), dir);
+}
+
+bool sk_path_get_bounds(const sk_path_t* cpath, sk_rect_t* crect) {
+ const SkPath& path = AsPath(*cpath);
+
+ if (path.isEmpty()) {
+ if (crect) {
+ *crect = ToRect(SkRect::MakeEmpty());
+ }
+ return false;
+ }
+
+ if (crect) {
+ *crect = ToRect(path.getBounds());
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+void sk_canvas_save(sk_canvas_t* ccanvas) {
+ AsCanvas(ccanvas)->save();
+}
+
+void sk_canvas_save_layer(sk_canvas_t* ccanvas, const sk_rect_t* crect, const sk_paint_t* cpaint) {
+ AsCanvas(ccanvas)->drawRect(AsRect(*crect), AsPaint(*cpaint));
+}
+
+void sk_canvas_restore(sk_canvas_t* ccanvas) {
+ AsCanvas(ccanvas)->restore();
+}
+
+void sk_canvas_translate(sk_canvas_t* ccanvas, float dx, float dy) {
+ AsCanvas(ccanvas)->translate(dx, dy);
+}
+
+void sk_canvas_scale(sk_canvas_t* ccanvas, float sx, float sy) {
+ AsCanvas(ccanvas)->scale(sx, sy);
+}
+
+void sk_canvas_rotate_degress(sk_canvas_t* ccanvas, float degrees) {
+ AsCanvas(ccanvas)->rotate(degrees);
+}
+
+void sk_canvas_rotate_radians(sk_canvas_t* ccanvas, float radians) {
+ AsCanvas(ccanvas)->rotate(SkRadiansToDegrees(radians));
+}
+
+void sk_canvas_skew(sk_canvas_t* ccanvas, float sx, float sy) {
+ AsCanvas(ccanvas)->skew(sx, sy);
+}
+
+void sk_canvas_concat(sk_canvas_t* ccanvas, const sk_matrix_t* cmatrix) {
+ SkASSERT(cmatrix);
+ SkMatrix matrix;
+ from_c_matrix(cmatrix, &matrix);
+ AsCanvas(ccanvas)->concat(matrix);
+}
+
+void sk_canvas_clip_rect(sk_canvas_t* ccanvas, const sk_rect_t* crect) {
+ AsCanvas(ccanvas)->clipRect(AsRect(*crect));
+}
+
+void sk_canvas_clip_path(sk_canvas_t* ccanvas, const sk_path_t* cpath) {
+ AsCanvas(ccanvas)->clipPath(AsPath(*cpath));
+}
+
+void sk_canvas_draw_paint(sk_canvas_t* ccanvas, const sk_paint_t* cpaint) {
+ AsCanvas(ccanvas)->drawPaint(AsPaint(*cpaint));
+}
+
+void sk_canvas_draw_rect(sk_canvas_t* ccanvas, const sk_rect_t* crect, const sk_paint_t* cpaint) {
+ AsCanvas(ccanvas)->drawRect(AsRect(*crect), AsPaint(*cpaint));
+}
+
+void sk_canvas_draw_circle(sk_canvas_t* ccanvas, float cx, float cy, float rad,
+ const sk_paint_t* cpaint) {
+ AsCanvas(ccanvas)->drawCircle(cx, cy, rad, AsPaint(*cpaint));
+}
+
+void sk_canvas_draw_oval(sk_canvas_t* ccanvas, const sk_rect_t* crect, const sk_paint_t* cpaint) {
+ AsCanvas(ccanvas)->drawOval(AsRect(*crect), AsPaint(*cpaint));
+}
+
+void sk_canvas_draw_path(sk_canvas_t* ccanvas, const sk_path_t* cpath, const sk_paint_t* cpaint) {
+ AsCanvas(ccanvas)->drawPath(AsPath(*cpath), AsPaint(*cpaint));
+}
+
+void sk_canvas_draw_image(sk_canvas_t* ccanvas, const sk_image_t* cimage, float x, float y,
+ const sk_paint_t* cpaint) {
+ AsCanvas(ccanvas)->drawImage(AsImage(cimage), x, y, AsPaint(cpaint));
+}
+
+void sk_canvas_draw_image_rect(sk_canvas_t* ccanvas, const sk_image_t* cimage,
+ const sk_rect_t* csrcR, const sk_rect_t* cdstR,
+ const sk_paint_t* cpaint) {
+ SkCanvas* canvas = AsCanvas(ccanvas);
+ const SkImage* image = AsImage(cimage);
+ const SkRect& dst = AsRect(*cdstR);
+ const SkPaint* paint = AsPaint(cpaint);
+
+ if (csrcR) {
+ canvas->drawImageRect(image, AsRect(*csrcR), dst, paint);
+ } else {
+ canvas->drawImageRect(image, dst, paint);
+ }
+}
+
+void sk_canvas_draw_picture(sk_canvas_t* ccanvas, const sk_picture_t* cpicture,
+ const sk_matrix_t* cmatrix, const sk_paint_t* cpaint) {
+ const SkMatrix* matrixPtr = NULL;
+ SkMatrix matrix;
+ if (cmatrix) {
+ from_c_matrix(cmatrix, &matrix);
+ matrixPtr = &matrix;
+ }
+ AsCanvas(ccanvas)->drawPicture(AsPicture(cpicture), matrixPtr, AsPaint(cpaint));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+sk_surface_t* sk_surface_new_raster(const sk_imageinfo_t* cinfo,
+ const sk_surfaceprops_t* props) {
+ SkImageInfo info;
+ if (!from_c_info(*cinfo, &info)) {
+ return NULL;
+ }
+ SkPixelGeometry geo = kUnknown_SkPixelGeometry;
+ if (props && !from_c_pixelgeometry(props->pixelGeometry, &geo)) {
+ return NULL;
+ }
+
+ SkSurfaceProps surfProps(0, geo);
+ return (sk_surface_t*)SkSurface::MakeRaster(info, &surfProps).release();
+}
+
+sk_surface_t* sk_surface_new_raster_direct(const sk_imageinfo_t* cinfo, void* pixels,
+ size_t rowBytes,
+ const sk_surfaceprops_t* props) {
+ SkImageInfo info;
+ if (!from_c_info(*cinfo, &info)) {
+ return NULL;
+ }
+ SkPixelGeometry geo = kUnknown_SkPixelGeometry;
+ if (props && !from_c_pixelgeometry(props->pixelGeometry, &geo)) {
+ return NULL;
+ }
+
+ SkSurfaceProps surfProps(0, geo);
+ return (sk_surface_t*)SkSurface::MakeRasterDirect(info, pixels, rowBytes, &surfProps).release();
+}
+
+void sk_surface_unref(sk_surface_t* csurf) {
+ SkSafeUnref((SkSurface*)csurf);
+}
+
+sk_canvas_t* sk_surface_get_canvas(sk_surface_t* csurf) {
+ SkSurface* surf = (SkSurface*)csurf;
+ return (sk_canvas_t*)surf->getCanvas();
+}
+
+sk_image_t* sk_surface_new_image_snapshot(sk_surface_t* csurf) {
+ SkSurface* surf = (SkSurface*)csurf;
+ return (sk_image_t*)surf->makeImageSnapshot().release();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+sk_picture_recorder_t* sk_picture_recorder_new() {
+ return ToPictureRecorder(new SkPictureRecorder);
+}
+
+void sk_picture_recorder_delete(sk_picture_recorder_t* crec) {
+ delete AsPictureRecorder(crec);
+}
+
+sk_canvas_t* sk_picture_recorder_begin_recording(sk_picture_recorder_t* crec,
+ const sk_rect_t* cbounds) {
+ return ToCanvas(AsPictureRecorder(crec)->beginRecording(AsRect(*cbounds)));
+}
+
+sk_picture_t* sk_picture_recorder_end_recording(sk_picture_recorder_t* crec) {
+ return ToPicture(AsPictureRecorder(crec)->finishRecordingAsPicture().release());
+}
+
+void sk_picture_ref(sk_picture_t* cpic) {
+ SkSafeRef(AsPicture(cpic));
+}
+
+void sk_picture_unref(sk_picture_t* cpic) {
+ SkSafeUnref(AsPicture(cpic));
+}
+
+uint32_t sk_picture_get_unique_id(sk_picture_t* cpic) {
+ return AsPicture(cpic)->uniqueID();
+}
+
+sk_rect_t sk_picture_get_bounds(sk_picture_t* cpic) {
+ return ToRect(AsPicture(cpic)->cullRect());
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+#include "../../include/effects/SkGradientShader.h"
+#include "sk_shader.h"
+
+const struct {
+ sk_shader_tilemode_t fC;
+ SkShader::TileMode fSK;
+} gTileModeMap[] = {
+ { CLAMP_SK_SHADER_TILEMODE, SkShader::kClamp_TileMode },
+ { REPEAT_SK_SHADER_TILEMODE, SkShader::kRepeat_TileMode },
+ { MIRROR_SK_SHADER_TILEMODE, SkShader::kMirror_TileMode },
+};
+
+static bool from_c_tilemode(sk_shader_tilemode_t cMode, SkShader::TileMode* skMode) {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gTileModeMap); ++i) {
+ if (cMode == gTileModeMap[i].fC) {
+ if (skMode) {
+ *skMode = gTileModeMap[i].fSK;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+void sk_shader_ref(sk_shader_t* cshader) {
+ SkSafeRef(AsShader(cshader));
+}
+
+void sk_shader_unref(sk_shader_t* cshader) {
+ SkSafeUnref(AsShader(cshader));
+}
+
+sk_shader_t* sk_shader_new_linear_gradient(const sk_point_t pts[2],
+ const sk_color_t colors[],
+ const float colorPos[],
+ int colorCount,
+ sk_shader_tilemode_t cmode,
+ const sk_matrix_t* cmatrix) {
+ SkShader::TileMode mode;
+ if (!from_c_tilemode(cmode, &mode)) {
+ return NULL;
+ }
+ SkMatrix matrix;
+ if (cmatrix) {
+ from_c_matrix(cmatrix, &matrix);
+ } else {
+ matrix.setIdentity();
+ }
+ return (sk_shader_t*)SkGradientShader::MakeLinear(reinterpret_cast<const SkPoint*>(pts),
+ reinterpret_cast<const SkColor*>(colors),
+ colorPos, colorCount,
+ mode, 0, &matrix).release();
+}
+
+static const SkPoint& to_skpoint(const sk_point_t& p) {
+ return reinterpret_cast<const SkPoint&>(p);
+}
+
+sk_shader_t* sk_shader_new_radial_gradient(const sk_point_t* ccenter,
+ float radius,
+ const sk_color_t colors[],
+ const float colorPos[],
+ int colorCount,
+ sk_shader_tilemode_t cmode,
+ const sk_matrix_t* cmatrix) {
+ SkShader::TileMode mode;
+ if (!from_c_tilemode(cmode, &mode)) {
+ return NULL;
+ }
+ SkMatrix matrix;
+ if (cmatrix) {
+ from_c_matrix(cmatrix, &matrix);
+ } else {
+ matrix.setIdentity();
+ }
+ SkPoint center = to_skpoint(*ccenter);
+ return (sk_shader_t*)SkGradientShader::MakeRadial(center, (SkScalar)radius,
+ reinterpret_cast<const SkColor*>(colors),
+ reinterpret_cast<const SkScalar*>(colorPos),
+ colorCount, mode, 0, &matrix).release();
+}
+
+sk_shader_t* sk_shader_new_sweep_gradient(const sk_point_t* ccenter,
+ const sk_color_t colors[],
+ const float colorPos[],
+ int colorCount,
+ const sk_matrix_t* cmatrix) {
+ SkMatrix matrix;
+ if (cmatrix) {
+ from_c_matrix(cmatrix, &matrix);
+ } else {
+ matrix.setIdentity();
+ }
+ return (sk_shader_t*)SkGradientShader::MakeSweep((SkScalar)(ccenter->x),
+ (SkScalar)(ccenter->y),
+ reinterpret_cast<const SkColor*>(colors),
+ reinterpret_cast<const SkScalar*>(colorPos),
+ colorCount, 0, &matrix).release();
+}
+
+sk_shader_t* sk_shader_new_two_point_conical_gradient(const sk_point_t* start,
+ float startRadius,
+ const sk_point_t* end,
+ float endRadius,
+ const sk_color_t colors[],
+ const float colorPos[],
+ int colorCount,
+ sk_shader_tilemode_t cmode,
+ const sk_matrix_t* cmatrix) {
+ SkShader::TileMode mode;
+ if (!from_c_tilemode(cmode, &mode)) {
+ return NULL;
+ }
+ SkMatrix matrix;
+ if (cmatrix) {
+ from_c_matrix(cmatrix, &matrix);
+ } else {
+ matrix.setIdentity();
+ }
+ SkPoint skstart = to_skpoint(*start);
+ SkPoint skend = to_skpoint(*end);
+ return (sk_shader_t*)SkGradientShader::MakeTwoPointConical(skstart, (SkScalar)startRadius,
+ skend, (SkScalar)endRadius,
+ reinterpret_cast<const SkColor*>(colors),
+ reinterpret_cast<const SkScalar*>(colorPos),
+ colorCount, mode, 0, &matrix).release();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+#include "../../include/effects/SkBlurMaskFilter.h"
+#include "sk_maskfilter.h"
+
+const struct {
+ sk_blurstyle_t fC;
+ SkBlurStyle fSk;
+} gBlurStylePairs[] = {
+ { NORMAL_SK_BLUR_STYLE, kNormal_SkBlurStyle },
+ { SOLID_SK_BLUR_STYLE, kSolid_SkBlurStyle },
+ { OUTER_SK_BLUR_STYLE, kOuter_SkBlurStyle },
+ { INNER_SK_BLUR_STYLE, kInner_SkBlurStyle },
+};
+
+static bool find_blurstyle(sk_blurstyle_t csrc, SkBlurStyle* dst) {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gBlurStylePairs); ++i) {
+ if (gBlurStylePairs[i].fC == csrc) {
+ if (dst) {
+ *dst = gBlurStylePairs[i].fSk;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+void sk_maskfilter_ref(sk_maskfilter_t* cfilter) {
+ SkSafeRef(AsMaskFilter(cfilter));
+}
+
+void sk_maskfilter_unref(sk_maskfilter_t* cfilter) {
+ SkSafeUnref(AsMaskFilter(cfilter));
+}
+
+sk_maskfilter_t* sk_maskfilter_new_blur(sk_blurstyle_t cstyle, float sigma) {
+ SkBlurStyle style;
+ if (!find_blurstyle(cstyle, &style)) {
+ return NULL;
+ }
+ return ToMaskFilter(SkBlurMaskFilter::Make(style, sigma).release());
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+sk_data_t* sk_data_new_with_copy(const void* src, size_t length) {
+ return ToData(SkData::MakeWithCopy(src, length).release());
+}
+
+sk_data_t* sk_data_new_from_malloc(const void* memory, size_t length) {
+ return ToData(SkData::MakeFromMalloc(memory, length).release());
+}
+
+sk_data_t* sk_data_new_subset(const sk_data_t* csrc, size_t offset, size_t length) {
+ return ToData(SkData::MakeSubset(AsData(csrc), offset, length).release());
+}
+
+void sk_data_ref(const sk_data_t* cdata) {
+ SkSafeRef(AsData(cdata));
+}
+
+void sk_data_unref(const sk_data_t* cdata) {
+ SkSafeUnref(AsData(cdata));
+}
+
+size_t sk_data_get_size(const sk_data_t* cdata) {
+ return AsData(cdata)->size();
+}
+
+const void* sk_data_get_data(const sk_data_t* cdata) {
+ return AsData(cdata)->data();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/skia/src/c/sk_types_priv.h b/gfx/skia/skia/src/c/sk_types_priv.h
new file mode 100644
index 000000000..92089d726
--- /dev/null
+++ b/gfx/skia/skia/src/c/sk_types_priv.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef sk_types_priv_DEFINED
+#define sk_types_priv_DEFINED
+
+#include "sk_types.h"
+
+class SkMaskFilter;
+class SkPaint;
+class SkShader;
+
+static inline const SkPaint& AsPaint(const sk_paint_t& cpaint) {
+ return reinterpret_cast<const SkPaint&>(cpaint);
+}
+
+static inline const SkPaint* AsPaint(const sk_paint_t* cpaint) {
+ return reinterpret_cast<const SkPaint*>(cpaint);
+}
+
+static inline SkPaint* AsPaint(sk_paint_t* cpaint) {
+ return reinterpret_cast<SkPaint*>(cpaint);
+}
+
+static inline SkMaskFilter* AsMaskFilter(sk_maskfilter_t* cfilter) {
+ return reinterpret_cast<SkMaskFilter*>(cfilter);
+}
+
+static inline sk_maskfilter_t* ToMaskFilter(SkMaskFilter* filter) {
+ return reinterpret_cast<sk_maskfilter_t*>(filter);
+}
+
+static inline SkShader* AsShader(sk_shader_t* cshader) {
+ return reinterpret_cast<SkShader*>(cshader);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkAndroidCodec.cpp b/gfx/skia/skia/src/codec/SkAndroidCodec.cpp
new file mode 100644
index 000000000..23242433b
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkAndroidCodec.cpp
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAndroidCodec.h"
+#include "SkCodec.h"
+#include "SkCodecPriv.h"
+#include "SkRawAdapterCodec.h"
+#include "SkSampledCodec.h"
+#include "SkWebpAdapterCodec.h"
+
+static bool is_valid_sample_size(int sampleSize) {
+ // FIXME: As Leon has mentioned elsewhere, surely there is also a maximum sampleSize?
+ return sampleSize > 0;
+}
+
+SkAndroidCodec::SkAndroidCodec(SkCodec* codec)
+ : fInfo(codec->getInfo())
+ , fCodec(codec)
+{}
+
+SkAndroidCodec* SkAndroidCodec::NewFromStream(SkStream* stream, SkPngChunkReader* chunkReader) {
+ SkAutoTDelete<SkCodec> codec(SkCodec::NewFromStream(stream, chunkReader));
+ if (nullptr == codec) {
+ return nullptr;
+ }
+
+ switch (codec->getEncodedFormat()) {
+#ifdef SK_HAS_PNG_LIBRARY
+ case kPNG_SkEncodedFormat:
+ case kICO_SkEncodedFormat:
+#endif
+#ifdef SK_HAS_JPEG_LIBRARY
+ case kJPEG_SkEncodedFormat:
+#endif
+#ifdef SK_HAS_GIF_LIBRARY
+ case kGIF_SkEncodedFormat:
+#endif
+ case kBMP_SkEncodedFormat:
+ case kWBMP_SkEncodedFormat:
+ return new SkSampledCodec(codec.release());
+#ifdef SK_HAS_WEBP_LIBRARY
+ case kWEBP_SkEncodedFormat:
+ return new SkWebpAdapterCodec((SkWebpCodec*) codec.release());
+#endif
+#ifdef SK_CODEC_DECODES_RAW
+ case kDNG_SkEncodedFormat:
+ return new SkRawAdapterCodec((SkRawCodec*)codec.release());
+#endif
+ default:
+ return nullptr;
+ }
+}
+
+SkAndroidCodec* SkAndroidCodec::NewFromData(sk_sp<SkData> data, SkPngChunkReader* chunkReader) {
+ if (!data) {
+ return nullptr;
+ }
+
+ return NewFromStream(new SkMemoryStream(data), chunkReader);
+}
+
+SkColorType SkAndroidCodec::computeOutputColorType(SkColorType requestedColorType) {
+ // The legacy GIF and WBMP decoders always decode to kIndex_8_SkColorType.
+ // We will maintain this behavior.
+ SkEncodedFormat format = this->getEncodedFormat();
+ if (kGIF_SkEncodedFormat == format || kWBMP_SkEncodedFormat == format) {
+ return kIndex_8_SkColorType;
+ }
+
+ SkColorType suggestedColorType = this->getInfo().colorType();
+ switch (requestedColorType) {
+ case kARGB_4444_SkColorType:
+ case kN32_SkColorType:
+ return kN32_SkColorType;
+ case kIndex_8_SkColorType:
+ if (kIndex_8_SkColorType == suggestedColorType) {
+ return kIndex_8_SkColorType;
+ }
+ break;
+ case kAlpha_8_SkColorType:
+ // Fall through to kGray_8. Before kGray_8_SkColorType existed,
+ // we allowed clients to request kAlpha_8 when they wanted a
+ // grayscale decode.
+ case kGray_8_SkColorType:
+ if (kGray_8_SkColorType == suggestedColorType) {
+ return kGray_8_SkColorType;
+ }
+ break;
+ case kRGB_565_SkColorType:
+ if (kOpaque_SkAlphaType == this->getInfo().alphaType()) {
+ return kRGB_565_SkColorType;
+ }
+ break;
+ default:
+ break;
+ }
+
+ // Android has limited support for kGray_8 (using kAlpha_8). We will not
+ // use kGray_8 for Android unless they specifically ask for it.
+ if (kGray_8_SkColorType == suggestedColorType) {
+ return kN32_SkColorType;
+ }
+
+ // This may be kN32_SkColorType or kIndex_8_SkColorType.
+ return suggestedColorType;
+}
+
+SkAlphaType SkAndroidCodec::computeOutputAlphaType(bool requestedUnpremul) {
+ if (kOpaque_SkAlphaType == this->getInfo().alphaType()) {
+ return kOpaque_SkAlphaType;
+ }
+ return requestedUnpremul ? kUnpremul_SkAlphaType : kPremul_SkAlphaType;
+}
+
+SkISize SkAndroidCodec::getSampledDimensions(int sampleSize) const {
+ if (!is_valid_sample_size(sampleSize)) {
+ return SkISize::Make(0, 0);
+ }
+
+ // Fast path for when we are not scaling.
+ if (1 == sampleSize) {
+ return fInfo.dimensions();
+ }
+
+ return this->onGetSampledDimensions(sampleSize);
+}
+
+bool SkAndroidCodec::getSupportedSubset(SkIRect* desiredSubset) const {
+ if (!desiredSubset || !is_valid_subset(*desiredSubset, fInfo.dimensions())) {
+ return false;
+ }
+
+ return this->onGetSupportedSubset(desiredSubset);
+}
+
+SkISize SkAndroidCodec::getSampledSubsetDimensions(int sampleSize, const SkIRect& subset) const {
+ if (!is_valid_sample_size(sampleSize)) {
+ return SkISize::Make(0, 0);
+ }
+
+ // We require that the input subset is a subset that is supported by SkAndroidCodec.
+ // We test this by calling getSupportedSubset() and verifying that no modifications
+ // are made to the subset.
+ SkIRect copySubset = subset;
+ if (!this->getSupportedSubset(&copySubset) || copySubset != subset) {
+ return SkISize::Make(0, 0);
+ }
+
+ // If the subset is the entire image, for consistency, use getSampledDimensions().
+ if (fInfo.dimensions() == subset.size()) {
+ return this->getSampledDimensions(sampleSize);
+ }
+
+ // This should perhaps call a virtual function, but currently both of our subclasses
+ // want the same implementation.
+ return SkISize::Make(get_scaled_dimension(subset.width(), sampleSize),
+ get_scaled_dimension(subset.height(), sampleSize));
+}
+
+SkCodec::Result SkAndroidCodec::getAndroidPixels(const SkImageInfo& info, void* pixels,
+ size_t rowBytes, const AndroidOptions* options) {
+ if (!pixels) {
+ return SkCodec::kInvalidParameters;
+ }
+ if (rowBytes < info.minRowBytes()) {
+ return SkCodec::kInvalidParameters;
+ }
+
+ AndroidOptions defaultOptions;
+ if (!options) {
+ options = &defaultOptions;
+ } else if (options->fSubset) {
+ if (!is_valid_subset(*options->fSubset, fInfo.dimensions())) {
+ return SkCodec::kInvalidParameters;
+ }
+
+ if (SkIRect::MakeSize(fInfo.dimensions()) == *options->fSubset) {
+ // The caller wants the whole thing, rather than a subset. Modify
+ // the AndroidOptions passed to onGetAndroidPixels to not specify
+ // a subset.
+ defaultOptions = *options;
+ defaultOptions.fSubset = nullptr;
+ options = &defaultOptions;
+ }
+ }
+
+ return this->onGetAndroidPixels(info, pixels, rowBytes, *options);
+}
+
+SkCodec::Result SkAndroidCodec::getAndroidPixels(const SkImageInfo& info, void* pixels,
+ size_t rowBytes) {
+ return this->getAndroidPixels(info, pixels, rowBytes, nullptr);
+}
diff --git a/gfx/skia/skia/src/codec/SkBmpCodec.cpp b/gfx/skia/skia/src/codec/SkBmpCodec.cpp
new file mode 100644
index 000000000..2f796ad66
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkBmpCodec.cpp
@@ -0,0 +1,629 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBmpCodec.h"
+#include "SkBmpMaskCodec.h"
+#include "SkBmpRLECodec.h"
+#include "SkBmpStandardCodec.h"
+#include "SkCodecPriv.h"
+#include "SkColorPriv.h"
+#include "SkStream.h"
+
+/*
+ * Defines the version and type of the second bitmap header
+ */
+enum BmpHeaderType {
+ kInfoV1_BmpHeaderType,
+ kInfoV2_BmpHeaderType,
+ kInfoV3_BmpHeaderType,
+ kInfoV4_BmpHeaderType,
+ kInfoV5_BmpHeaderType,
+ kOS2V1_BmpHeaderType,
+ kOS2VX_BmpHeaderType,
+ kUnknown_BmpHeaderType
+};
+
+/*
+ * Possible bitmap compression types
+ */
+enum BmpCompressionMethod {
+ kNone_BmpCompressionMethod = 0,
+ k8BitRLE_BmpCompressionMethod = 1,
+ k4BitRLE_BmpCompressionMethod = 2,
+ kBitMasks_BmpCompressionMethod = 3,
+ kJpeg_BmpCompressionMethod = 4,
+ kPng_BmpCompressionMethod = 5,
+ kAlphaBitMasks_BmpCompressionMethod = 6,
+ kCMYK_BmpCompressionMethod = 11,
+ kCMYK8BitRLE_BmpCompressionMethod = 12,
+ kCMYK4BitRLE_BmpCompressionMethod = 13
+};
+
+/*
+ * Used to define the input format of the bmp
+ */
+enum BmpInputFormat {
+ kStandard_BmpInputFormat,
+ kRLE_BmpInputFormat,
+ kBitMask_BmpInputFormat,
+ kUnknown_BmpInputFormat
+};
+
+/*
+ * Checks the start of the stream to see if the image is a bitmap
+ */
+bool SkBmpCodec::IsBmp(const void* buffer, size_t bytesRead) {
+ // TODO: Support "IC", "PT", "CI", "CP", "BA"
+ const char bmpSig[] = { 'B', 'M' };
+ return bytesRead >= sizeof(bmpSig) && !memcmp(buffer, bmpSig, sizeof(bmpSig));
+}
+
+/*
+ * Assumes IsBmp was called and returned true
+ * Creates a bmp decoder
+ * Reads enough of the stream to determine the image format
+ */
+SkCodec* SkBmpCodec::NewFromStream(SkStream* stream) {
+ return SkBmpCodec::NewFromStream(stream, false);
+}
+
+/*
+ * Creates a bmp decoder for a bmp embedded in ico
+ * Reads enough of the stream to determine the image format
+ */
+SkCodec* SkBmpCodec::NewFromIco(SkStream* stream) {
+ return SkBmpCodec::NewFromStream(stream, true);
+}
+
+/*
+ * Read enough of the stream to initialize the SkBmpCodec. Returns a bool
+ * representing success or failure. If it returned true, and codecOut was
+ * not nullptr, it will be set to a new SkBmpCodec.
+ * Does *not* take ownership of the passed in SkStream.
+ */
+bool SkBmpCodec::ReadHeader(SkStream* stream, bool inIco, SkCodec** codecOut) {
+ // Header size constants
+ static const uint32_t kBmpHeaderBytes = 14;
+ static const uint32_t kBmpHeaderBytesPlusFour = kBmpHeaderBytes + 4;
+ static const uint32_t kBmpOS2V1Bytes = 12;
+ static const uint32_t kBmpOS2V2Bytes = 64;
+ static const uint32_t kBmpInfoBaseBytes = 16;
+ static const uint32_t kBmpInfoV1Bytes = 40;
+ static const uint32_t kBmpInfoV2Bytes = 52;
+ static const uint32_t kBmpInfoV3Bytes = 56;
+ static const uint32_t kBmpInfoV4Bytes = 108;
+ static const uint32_t kBmpInfoV5Bytes = 124;
+ static const uint32_t kBmpMaskBytes = 12;
+
+ // The total bytes in the bmp file
+ // We only need to use this value for RLE decoding, so we will only
+ // check that it is valid in the RLE case.
+ uint32_t totalBytes;
+ // The offset from the start of the file where the pixel data begins
+ uint32_t offset;
+ // The size of the second (info) header in bytes
+ uint32_t infoBytes;
+
+ // Bmps embedded in Icos skip the first Bmp header
+ if (!inIco) {
+ // Read the first header and the size of the second header
+ SkAutoTDeleteArray<uint8_t> hBuffer(new uint8_t[kBmpHeaderBytesPlusFour]);
+ if (stream->read(hBuffer.get(), kBmpHeaderBytesPlusFour) !=
+ kBmpHeaderBytesPlusFour) {
+ SkCodecPrintf("Error: unable to read first bitmap header.\n");
+ return false;
+ }
+
+ totalBytes = get_int(hBuffer.get(), 2);
+ offset = get_int(hBuffer.get(), 10);
+ if (offset < kBmpHeaderBytes + kBmpOS2V1Bytes) {
+ SkCodecPrintf("Error: invalid starting location for pixel data\n");
+ return false;
+ }
+
+ // The size of the second (info) header in bytes
+ // The size is the first field of the second header, so we have already
+ // read the first four infoBytes.
+ infoBytes = get_int(hBuffer.get(), 14);
+ if (infoBytes < kBmpOS2V1Bytes) {
+ SkCodecPrintf("Error: invalid second header size.\n");
+ return false;
+ }
+ } else {
+ // This value is only used by RLE compression. Bmp in Ico files do not
+ // use RLE. If the compression field is incorrectly signaled as RLE,
+ // we will catch this and signal an error below.
+ totalBytes = 0;
+
+ // Bmps in Ico cannot specify an offset. We will always assume that
+ // pixel data begins immediately after the color table. This value
+ // will be corrected below.
+ offset = 0;
+
+ // Read the size of the second header
+ SkAutoTDeleteArray<uint8_t> hBuffer(new uint8_t[4]);
+ if (stream->read(hBuffer.get(), 4) != 4) {
+ SkCodecPrintf("Error: unable to read size of second bitmap header.\n");
+ return false;
+ }
+ infoBytes = get_int(hBuffer.get(), 0);
+ if (infoBytes < kBmpOS2V1Bytes) {
+ SkCodecPrintf("Error: invalid second header size.\n");
+ return false;
+ }
+ }
+
+ // We already read the first four bytes of the info header to get the size
+ const uint32_t infoBytesRemaining = infoBytes - 4;
+
+ // Read the second header
+ SkAutoTDeleteArray<uint8_t> iBuffer(new uint8_t[infoBytesRemaining]);
+ if (stream->read(iBuffer.get(), infoBytesRemaining) != infoBytesRemaining) {
+ SkCodecPrintf("Error: unable to read second bitmap header.\n");
+ return false;
+ }
+
+ // The number of bits used per pixel in the pixel data
+ uint16_t bitsPerPixel;
+
+ // The compression method for the pixel data
+ uint32_t compression = kNone_BmpCompressionMethod;
+
+ // Number of colors in the color table, defaults to 0 or max (see below)
+ uint32_t numColors = 0;
+
+ // Bytes per color in the color table, early versions use 3, most use 4
+ uint32_t bytesPerColor;
+
+ // The image width and height
+ int width, height;
+
+ // Determine image information depending on second header format
+ BmpHeaderType headerType;
+ if (infoBytes >= kBmpInfoBaseBytes) {
+ // Check the version of the header
+ switch (infoBytes) {
+ case kBmpInfoV1Bytes:
+ headerType = kInfoV1_BmpHeaderType;
+ break;
+ case kBmpInfoV2Bytes:
+ headerType = kInfoV2_BmpHeaderType;
+ break;
+ case kBmpInfoV3Bytes:
+ headerType = kInfoV3_BmpHeaderType;
+ break;
+ case kBmpInfoV4Bytes:
+ headerType = kInfoV4_BmpHeaderType;
+ break;
+ case kBmpInfoV5Bytes:
+ headerType = kInfoV5_BmpHeaderType;
+ break;
+ case 16:
+ case 20:
+ case 24:
+ case 28:
+ case 32:
+ case 36:
+ case 42:
+ case 46:
+ case 48:
+ case 60:
+ case kBmpOS2V2Bytes:
+ headerType = kOS2VX_BmpHeaderType;
+ break;
+ default:
+ // We do not signal an error here because there is the
+ // possibility of new or undocumented bmp header types. Most
+ // of the newer versions of bmp headers are similar to and
+ // build off of the older versions, so we may still be able to
+ // decode the bmp.
+ SkCodecPrintf("Warning: unknown bmp header format.\n");
+ headerType = kUnknown_BmpHeaderType;
+ break;
+ }
+ // We check the size of the header before entering the if statement.
+ // We should not reach this point unless the size is large enough for
+ // these required fields.
+ SkASSERT(infoBytesRemaining >= 12);
+ width = get_int(iBuffer.get(), 0);
+ height = get_int(iBuffer.get(), 4);
+ bitsPerPixel = get_short(iBuffer.get(), 10);
+
+ // Some versions do not have these fields, so we check before
+ // overwriting the default value.
+ if (infoBytesRemaining >= 16) {
+ compression = get_int(iBuffer.get(), 12);
+ if (infoBytesRemaining >= 32) {
+ numColors = get_int(iBuffer.get(), 28);
+ }
+ }
+
+ // All of the headers that reach this point, store color table entries
+ // using 4 bytes per pixel.
+ bytesPerColor = 4;
+ } else if (infoBytes >= kBmpOS2V1Bytes) {
+ // The OS2V1 is treated separately because it has a unique format
+ headerType = kOS2V1_BmpHeaderType;
+ width = (int) get_short(iBuffer.get(), 0);
+ height = (int) get_short(iBuffer.get(), 2);
+ bitsPerPixel = get_short(iBuffer.get(), 6);
+ bytesPerColor = 3;
+ } else {
+ // There are no valid bmp headers
+ SkCodecPrintf("Error: second bitmap header size is invalid.\n");
+ return false;
+ }
+
+ // Check for valid dimensions from header
+ SkCodec::SkScanlineOrder rowOrder = SkCodec::kBottomUp_SkScanlineOrder;
+ if (height < 0) {
+ height = -height;
+ rowOrder = SkCodec::kTopDown_SkScanlineOrder;
+ }
+ // The height field for bmp in ico is double the actual height because they
+ // contain an XOR mask followed by an AND mask
+ if (inIco) {
+ height /= 2;
+ }
+ if (width <= 0 || height <= 0) {
+ // TODO: Decide if we want to disable really large bmps as well.
+ // https://code.google.com/p/skia/issues/detail?id=3617
+ SkCodecPrintf("Error: invalid bitmap dimensions.\n");
+ return false;
+ }
+
+ // Create mask struct
+ SkMasks::InputMasks inputMasks;
+ memset(&inputMasks, 0, sizeof(SkMasks::InputMasks));
+
+ // Determine the input compression format and set bit masks if necessary
+ uint32_t maskBytes = 0;
+ BmpInputFormat inputFormat = kUnknown_BmpInputFormat;
+ switch (compression) {
+ case kNone_BmpCompressionMethod:
+ inputFormat = kStandard_BmpInputFormat;
+
+ // In addition to more standard pixel compression formats, bmp supports
+ // the use of bit masks to determine pixel components. The standard
+ // format for representing 16-bit colors is 555 (XRRRRRGGGGGBBBBB),
+ // which does not map well to any Skia color formats. For this reason,
+ // we will always enable mask mode with 16 bits per pixel.
+ if (16 == bitsPerPixel) {
+ inputMasks.red = 0x7C00;
+ inputMasks.green = 0x03E0;
+ inputMasks.blue = 0x001F;
+ inputFormat = kBitMask_BmpInputFormat;
+ }
+ break;
+ case k8BitRLE_BmpCompressionMethod:
+ if (bitsPerPixel != 8) {
+ SkCodecPrintf("Warning: correcting invalid bitmap format.\n");
+ bitsPerPixel = 8;
+ }
+ inputFormat = kRLE_BmpInputFormat;
+ break;
+ case k4BitRLE_BmpCompressionMethod:
+ if (bitsPerPixel != 4) {
+ SkCodecPrintf("Warning: correcting invalid bitmap format.\n");
+ bitsPerPixel = 4;
+ }
+ inputFormat = kRLE_BmpInputFormat;
+ break;
+ case kAlphaBitMasks_BmpCompressionMethod:
+ case kBitMasks_BmpCompressionMethod:
+ // Load the masks
+ inputFormat = kBitMask_BmpInputFormat;
+ switch (headerType) {
+ case kInfoV1_BmpHeaderType: {
+ // The V1 header stores the bit masks after the header
+ SkAutoTDeleteArray<uint8_t> mBuffer(new uint8_t[kBmpMaskBytes]);
+ if (stream->read(mBuffer.get(), kBmpMaskBytes) !=
+ kBmpMaskBytes) {
+ SkCodecPrintf("Error: unable to read bit inputMasks.\n");
+ return false;
+ }
+ maskBytes = kBmpMaskBytes;
+ inputMasks.red = get_int(mBuffer.get(), 0);
+ inputMasks.green = get_int(mBuffer.get(), 4);
+ inputMasks.blue = get_int(mBuffer.get(), 8);
+ break;
+ }
+ case kInfoV2_BmpHeaderType:
+ case kInfoV3_BmpHeaderType:
+ case kInfoV4_BmpHeaderType:
+ case kInfoV5_BmpHeaderType:
+ // Header types are matched based on size. If the header
+ // is V2+, we are guaranteed to be able to read at least
+ // this size.
+ SkASSERT(infoBytesRemaining >= 48);
+ inputMasks.red = get_int(iBuffer.get(), 36);
+ inputMasks.green = get_int(iBuffer.get(), 40);
+ inputMasks.blue = get_int(iBuffer.get(), 44);
+
+ if (kInfoV2_BmpHeaderType == headerType ||
+ (kInfoV3_BmpHeaderType == headerType && !inIco)) {
+ break;
+ }
+
+ // V3+ bmp files introduce an alpha mask and allow the creator of the image
+ // to use the alpha channels. However, many of these images leave the
+ // alpha channel blank and expect to be rendered as opaque. This is the
+ // case for almost all V3 images, so we ignore the alpha mask. For V4+
+ // images in kMask mode, we will use the alpha mask. Additionally, V3
+ // bmp-in-ico expect us to use the alpha mask.
+ //
+ // skbug.com/4116: We should perhaps also apply the alpha mask in kStandard
+ // mode. We just haven't seen any images that expect this
+ // behavior.
+ //
+ // Header types are matched based on size. If the header is
+ // V3+, we are guaranteed to be able to read at least this size.
+ SkASSERT(infoBytesRemaining > 52);
+ inputMasks.alpha = get_int(iBuffer.get(), 48);
+ break;
+ case kOS2VX_BmpHeaderType:
+ // TODO: Decide if we intend to support this.
+ // It is unsupported in the previous version and
+ // in chromium. I have not come across a test case
+ // that uses this format.
+ SkCodecPrintf("Error: huffman format unsupported.\n");
+ return false;
+ default:
+ SkCodecPrintf("Error: invalid bmp bit masks header.\n");
+ return false;
+ }
+ break;
+ case kJpeg_BmpCompressionMethod:
+ if (24 == bitsPerPixel) {
+ inputFormat = kRLE_BmpInputFormat;
+ break;
+ }
+ // Fall through
+ case kPng_BmpCompressionMethod:
+ // TODO: Decide if we intend to support this.
+ // It is unsupported in the previous version and
+ // in chromium. I think it is used mostly for printers.
+ SkCodecPrintf("Error: compression format not supported.\n");
+ return false;
+ case kCMYK_BmpCompressionMethod:
+ case kCMYK8BitRLE_BmpCompressionMethod:
+ case kCMYK4BitRLE_BmpCompressionMethod:
+ // TODO: Same as above.
+ SkCodecPrintf("Error: CMYK not supported for bitmap decoding.\n");
+ return false;
+ default:
+ SkCodecPrintf("Error: invalid format for bitmap decoding.\n");
+ return false;
+ }
+ iBuffer.reset();
+
+ // Calculate the number of bytes read so far
+ const uint32_t bytesRead = kBmpHeaderBytes + infoBytes + maskBytes;
+ if (!inIco && offset < bytesRead) {
+ // TODO (msarett): Do we really want to fail if the offset in the header is invalid?
+ // Seems like we can just assume that the offset is zero and try to decode?
+ // Maybe we don't want to try to decode corrupt images?
+ SkCodecPrintf("Error: pixel data offset less than header size.\n");
+ return false;
+ }
+
+
+
+ switch (inputFormat) {
+ case kStandard_BmpInputFormat: {
+ // BMPs are generally opaque, however BMPs-in-ICOs may contain
+ // a transparency mask after the image. Therefore, we mark the
+ // alpha as kBinary if the BMP is contained in an ICO.
+ // We use |isOpaque| to indicate if the BMP itself is opaque.
+ SkEncodedInfo::Alpha alpha = inIco ? SkEncodedInfo::kBinary_Alpha :
+ SkEncodedInfo::kOpaque_Alpha;
+ bool isOpaque = true;
+
+ SkEncodedInfo::Color color;
+ uint8_t bitsPerComponent;
+ switch (bitsPerPixel) {
+ // Palette formats
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ // In the case of ICO, kBGRA is actually the closest match,
+ // since we will need to apply a transparency mask.
+ if (inIco) {
+ color = SkEncodedInfo::kBGRA_Color;
+ bitsPerComponent = 8;
+ } else {
+ color = SkEncodedInfo::kPalette_Color;
+ bitsPerComponent = (uint8_t) bitsPerPixel;
+ }
+ break;
+ case 24:
+ // In the case of ICO, kBGRA is actually the closest match,
+ // since we will need to apply a transparency mask.
+ color = inIco ? SkEncodedInfo::kBGRA_Color : SkEncodedInfo::kBGR_Color;
+ bitsPerComponent = 8;
+ break;
+ case 32:
+ // 32-bit BMP-in-ICOs actually use the alpha channel in place of a
+ // transparency mask.
+ if (inIco) {
+ isOpaque = false;
+ alpha = SkEncodedInfo::kUnpremul_Alpha;
+ color = SkEncodedInfo::kBGRA_Color;
+ } else {
+ color = SkEncodedInfo::kBGRX_Color;
+ }
+ bitsPerComponent = 8;
+ break;
+ default:
+ SkCodecPrintf("Error: invalid input value for bits per pixel.\n");
+ return false;
+ }
+
+ if (codecOut) {
+ // We require streams to have a memory base for Bmp-in-Ico decodes.
+ SkASSERT(!inIco || nullptr != stream->getMemoryBase());
+
+ // Set the image info and create a codec.
+ const SkEncodedInfo info = SkEncodedInfo::Make(color, alpha, bitsPerComponent);
+ *codecOut = new SkBmpStandardCodec(width, height, info, stream, bitsPerPixel,
+ numColors, bytesPerColor, offset - bytesRead, rowOrder, isOpaque, inIco);
+ }
+ return true;
+ }
+
+ case kBitMask_BmpInputFormat: {
+ // Bmp-in-Ico must be standard mode
+ if (inIco) {
+ SkCodecPrintf("Error: Icos may not use bit mask format.\n");
+ return false;
+ }
+
+ switch (bitsPerPixel) {
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ SkCodecPrintf("Error: invalid input value for bits per pixel.\n");
+ return false;
+ }
+
+ // Skip to the start of the pixel array.
+ // We can do this here because there is no color table to read
+ // in bit mask mode.
+ if (stream->skip(offset - bytesRead) != offset - bytesRead) {
+ SkCodecPrintf("Error: unable to skip to image data.\n");
+ return false;
+ }
+
+ if (codecOut) {
+ // Check that input bit masks are valid and create the masks object
+ SkAutoTDelete<SkMasks> masks(SkMasks::CreateMasks(inputMasks, bitsPerPixel));
+ if (nullptr == masks) {
+ SkCodecPrintf("Error: invalid input masks.\n");
+ return false;
+ }
+
+ // Masked bmps are not a great fit for SkEncodedInfo, since they have
+ // arbitrary component orderings and bits per component. Here we choose
+ // somewhat reasonable values - it's ok that we don't match exactly
+ // because SkBmpMaskCodec has its own mask swizzler anyway.
+ SkEncodedInfo::Color color;
+ SkEncodedInfo::Alpha alpha;
+ if (masks->getAlphaMask()) {
+ color = SkEncodedInfo::kBGRA_Color;
+ alpha = SkEncodedInfo::kUnpremul_Alpha;
+ } else {
+ color = SkEncodedInfo::kBGR_Color;
+ alpha = SkEncodedInfo::kOpaque_Alpha;
+ }
+ const SkEncodedInfo info = SkEncodedInfo::Make(color, alpha, 8);
+ *codecOut = new SkBmpMaskCodec(width, height, info, stream, bitsPerPixel,
+ masks.release(), rowOrder);
+ }
+ return true;
+ }
+
+ case kRLE_BmpInputFormat: {
+ // We should not reach this point without a valid value of bitsPerPixel.
+ SkASSERT(4 == bitsPerPixel || 8 == bitsPerPixel || 24 == bitsPerPixel);
+
+ // Check for a valid number of total bytes when in RLE mode
+ if (totalBytes <= offset) {
+ SkCodecPrintf("Error: RLE requires valid input size.\n");
+ return false;
+ }
+ const size_t RLEBytes = totalBytes - offset;
+
+ // Bmp-in-Ico must be standard mode
+ // When inIco is true, this line cannot be reached, since we
+ // require that RLE Bmps have a valid number of totalBytes, and
+ // Icos skip the header that contains totalBytes.
+ SkASSERT(!inIco);
+
+ if (codecOut) {
+ // RLE inputs may skip pixels, leaving them as transparent. This
+ // is uncommon, but we cannot be certain that an RLE bmp will be
+ // opaque or that we will be able to represent it with a palette.
+ // For that reason, we always indicate that we are kBGRA.
+ const SkEncodedInfo info = SkEncodedInfo::Make(SkEncodedInfo::kBGRA_Color,
+ SkEncodedInfo::kBinary_Alpha, 8);
+ *codecOut = new SkBmpRLECodec(width, height, info, stream, bitsPerPixel, numColors,
+ bytesPerColor, offset - bytesRead, rowOrder, RLEBytes);
+ }
+ return true;
+ }
+ default:
+ SkASSERT(false);
+ return false;
+ }
+}
+
+/*
+ * Creates a bmp decoder
+ * Reads enough of the stream to determine the image format
+ */
+SkCodec* SkBmpCodec::NewFromStream(SkStream* stream, bool inIco) {
+ SkAutoTDelete<SkStream> streamDeleter(stream);
+ SkCodec* codec = nullptr;
+ if (ReadHeader(stream, inIco, &codec)) {
+ // codec has taken ownership of stream, so we do not need to
+ // delete it.
+ SkASSERT(codec);
+ streamDeleter.release();
+ return codec;
+ }
+ return nullptr;
+}
+
+SkBmpCodec::SkBmpCodec(int width, int height, const SkEncodedInfo& info, SkStream* stream,
+ uint16_t bitsPerPixel, SkCodec::SkScanlineOrder rowOrder)
+ : INHERITED(width, height, info, stream)
+ , fBitsPerPixel(bitsPerPixel)
+ , fRowOrder(rowOrder)
+ , fSrcRowBytes(SkAlign4(compute_row_bytes(width, fBitsPerPixel)))
+{}
+
+bool SkBmpCodec::onRewind() {
+ return SkBmpCodec::ReadHeader(this->stream(), this->inIco(), nullptr);
+}
+
+int32_t SkBmpCodec::getDstRow(int32_t y, int32_t height) const {
+ if (SkCodec::kTopDown_SkScanlineOrder == fRowOrder) {
+ return y;
+ }
+ SkASSERT(SkCodec::kBottomUp_SkScanlineOrder == fRowOrder);
+ return height - y - 1;
+}
+
+SkCodec::Result SkBmpCodec::onStartScanlineDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options, SkPMColor inputColorPtr[], int* inputColorCount) {
+ if (!conversion_possible_ignore_color_space(dstInfo, this->getInfo())) {
+ SkCodecPrintf("Error: cannot convert input type to output type.\n");
+ return kInvalidConversion;
+ }
+
+ return prepareToDecode(dstInfo, options, inputColorPtr, inputColorCount);
+}
+
+int SkBmpCodec::onGetScanlines(void* dst, int count, size_t rowBytes) {
+ // Create a new image info representing the portion of the image to decode
+ SkImageInfo rowInfo = this->dstInfo().makeWH(this->dstInfo().width(), count);
+
+ // Decode the requested rows
+ return this->decodeRows(rowInfo, dst, rowBytes, this->options());
+}
+
+bool SkBmpCodec::skipRows(int count) {
+ const size_t bytesToSkip = count * fSrcRowBytes;
+ return this->stream()->skip(bytesToSkip) == bytesToSkip;
+}
+
+bool SkBmpCodec::onSkipScanlines(int count) {
+ return this->skipRows(count);
+}
diff --git a/gfx/skia/skia/src/codec/SkBmpCodec.h b/gfx/skia/skia/src/codec/SkBmpCodec.h
new file mode 100644
index 000000000..0ece7ad6c
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkBmpCodec.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkBmpCodec_DEFINED
+#define SkBmpCodec_DEFINED
+
+#include "SkCodec.h"
+#include "SkColorSpace.h"
+#include "SkColorTable.h"
+#include "SkImageInfo.h"
+#include "SkStream.h"
+#include "SkSwizzler.h"
+#include "SkTypes.h"
+
+/*
+ * This class enables code sharing between its bmp codec subclasses. The
+ * subclasses actually do the work.
+ */
+class SkBmpCodec : public SkCodec {
+public:
+ static bool IsBmp(const void*, size_t);
+
+ /*
+ * Assumes IsBmp was called and returned true
+ * Creates a bmp decoder
+ * Reads enough of the stream to determine the image format
+ */
+ static SkCodec* NewFromStream(SkStream*);
+
+ /*
+ * Creates a bmp decoder for a bmp embedded in ico
+ * Reads enough of the stream to determine the image format
+ */
+ static SkCodec* NewFromIco(SkStream*);
+
+protected:
+
+ SkBmpCodec(int width, int height, const SkEncodedInfo& info, SkStream* stream,
+ uint16_t bitsPerPixel, SkCodec::SkScanlineOrder rowOrder);
+
+ SkEncodedFormat onGetEncodedFormat() const override { return kBMP_SkEncodedFormat; }
+
+ /*
+ * Read enough of the stream to initialize the SkBmpCodec. Returns a bool
+ * representing success or failure. If it returned true, and codecOut was
+ * not nullptr, it will be set to a new SkBmpCodec.
+ * Does *not* take ownership of the passed in SkStream.
+ */
+ static bool ReadHeader(SkStream*, bool inIco, SkCodec** codecOut);
+
+ bool onRewind() override;
+
+ /*
+ * Returns whether this BMP is part of an ICO image.
+ */
+ bool inIco() const {
+ return this->onInIco();
+ }
+
+ virtual bool onInIco() const {
+ return false;
+ }
+
+ /*
+ * Get the destination row number corresponding to the encoded row number.
+ * For kTopDown, we simply return y, but for kBottomUp, the rows will be
+ * decoded in reverse order.
+ *
+ * @param y Iterates from 0 to height, indicating the current row.
+ * @param height The height of the current subset of the image that we are
+ * decoding. This is generally equal to the full height
+ * when we want to decode the full or one when we are
+ * sampling.
+ */
+ int32_t getDstRow(int32_t y, int32_t height) const;
+
+ /*
+ * Accessors used by subclasses
+ */
+ uint16_t bitsPerPixel() const { return fBitsPerPixel; }
+ SkScanlineOrder onGetScanlineOrder() const override { return fRowOrder; }
+ size_t srcRowBytes() const { return fSrcRowBytes; }
+
+ /*
+ * To be overriden by bmp subclasses, which provide unique implementations.
+ * Performs subclass specific setup.
+ *
+ * @param dstInfo Contains output information. Height specifies
+ * the total number of rows that will be decoded.
+ * @param options Additonal options to pass to the decoder.
+ * @param inputColorPtr Client-provided memory for a color table. Must
+ * be enough for 256 colors. This will be
+ * populated with colors if the encoded image uses
+ * a color table.
+ * @param inputColorCount If the encoded image uses a color table, this
+ * will be set to the number of colors in the
+ * color table.
+ */
+ virtual SkCodec::Result prepareToDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options, SkPMColor inputColorPtr[],
+ int* inputColorCount) = 0;
+
+private:
+
+ /*
+ * Creates a bmp decoder
+ * Reads enough of the stream to determine the image format
+ */
+ static SkCodec* NewFromStream(SkStream*, bool inIco);
+
+ /*
+ * Decodes the next dstInfo.height() lines.
+ *
+ * onGetPixels() uses this for full image decodes.
+ * SkScaledCodec::onGetPixels() uses the scanline decoder to call this with
+ * dstInfo.height() = 1, in order to implement sampling.
+ * A potential future use is to allow the caller to decode a subset of the
+ * lines in the image.
+ *
+ * @param dstInfo Contains output information. Height specifies the
+ * number of rows to decode at this time.
+ * @param dst Memory location to store output pixels
+ * @param dstRowBytes Bytes in a row of the destination
+ * @return Number of rows successfully decoded
+ */
+ virtual int decodeRows(const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes,
+ const Options& opts) = 0;
+
+ virtual bool skipRows(int count);
+
+ Result onStartScanlineDecode(const SkImageInfo& dstInfo, const SkCodec::Options&,
+ SkPMColor inputColorPtr[], int* inputColorCount) override;
+
+ int onGetScanlines(void* dst, int count, size_t rowBytes) override;
+
+ bool onSkipScanlines(int count) override;
+
+ const uint16_t fBitsPerPixel;
+ const SkScanlineOrder fRowOrder;
+ const size_t fSrcRowBytes;
+
+ typedef SkCodec INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkBmpMaskCodec.cpp b/gfx/skia/skia/src/codec/SkBmpMaskCodec.cpp
new file mode 100644
index 000000000..5b28252f7
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkBmpMaskCodec.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBmpMaskCodec.h"
+#include "SkCodecPriv.h"
+#include "SkColorPriv.h"
+
+/*
+ * Creates an instance of the decoder
+ */
+SkBmpMaskCodec::SkBmpMaskCodec(int width, int height, const SkEncodedInfo& info, SkStream* stream,
+ uint16_t bitsPerPixel, SkMasks* masks,
+ SkCodec::SkScanlineOrder rowOrder)
+ : INHERITED(width, height, info, stream, bitsPerPixel, rowOrder)
+ , fMasks(masks)
+ , fMaskSwizzler(nullptr)
+ , fSrcBuffer(new uint8_t [this->srcRowBytes()])
+{}
+
+/*
+ * Initiates the bitmap decode
+ */
+SkCodec::Result SkBmpMaskCodec::onGetPixels(const SkImageInfo& dstInfo,
+ void* dst, size_t dstRowBytes,
+ const Options& opts,
+ SkPMColor* inputColorPtr,
+ int* inputColorCount,
+ int* rowsDecoded) {
+ if (opts.fSubset) {
+ // Subsets are not supported.
+ return kUnimplemented;
+ }
+ if (dstInfo.dimensions() != this->getInfo().dimensions()) {
+ SkCodecPrintf("Error: scaling not supported.\n");
+ return kInvalidScale;
+ }
+
+ if (!conversion_possible_ignore_color_space(dstInfo, this->getInfo())) {
+ SkCodecPrintf("Error: cannot convert input type to output type.\n");
+ return kInvalidConversion;
+ }
+
+ Result result = this->prepareToDecode(dstInfo, opts, inputColorPtr, inputColorCount);
+ if (kSuccess != result) {
+ return result;
+ }
+
+ int rows = this->decodeRows(dstInfo, dst, dstRowBytes, opts);
+ if (rows != dstInfo.height()) {
+ *rowsDecoded = rows;
+ return kIncompleteInput;
+ }
+ return kSuccess;
+}
+
+SkCodec::Result SkBmpMaskCodec::prepareToDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options, SkPMColor inputColorPtr[], int* inputColorCount) {
+ // Initialize the mask swizzler
+ fMaskSwizzler.reset(SkMaskSwizzler::CreateMaskSwizzler(dstInfo, this->getInfo(), fMasks,
+ this->bitsPerPixel(), options));
+ SkASSERT(fMaskSwizzler);
+
+ return SkCodec::kSuccess;
+}
+
+/*
+ * Performs the decoding
+ */
+int SkBmpMaskCodec::decodeRows(const SkImageInfo& dstInfo,
+ void* dst, size_t dstRowBytes,
+ const Options& opts) {
+ // Iterate over rows of the image
+ uint8_t* srcRow = fSrcBuffer.get();
+ const int height = dstInfo.height();
+ for (int y = 0; y < height; y++) {
+ // Read a row of the input
+ if (this->stream()->read(srcRow, this->srcRowBytes()) != this->srcRowBytes()) {
+ SkCodecPrintf("Warning: incomplete input stream.\n");
+ return y;
+ }
+
+ // Decode the row in destination format
+ uint32_t row = this->getDstRow(y, height);
+ void* dstRow = SkTAddOffset<void>(dst, row * dstRowBytes);
+ fMaskSwizzler->swizzle(dstRow, srcRow);
+ }
+
+ // Finished decoding the entire image
+ return height;
+}
diff --git a/gfx/skia/skia/src/codec/SkBmpMaskCodec.h b/gfx/skia/skia/src/codec/SkBmpMaskCodec.h
new file mode 100644
index 000000000..cc8af856e
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkBmpMaskCodec.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBmpCodec.h"
+#include "SkImageInfo.h"
+#include "SkMaskSwizzler.h"
+#include "SkTypes.h"
+
+/*
+ * This class implements the decoding for bmp images using bit masks
+ */
+class SkBmpMaskCodec : public SkBmpCodec {
+public:
+
+ /*
+ * Creates an instance of the decoder
+ *
+ * Called only by SkBmpCodec::NewFromStream
+ * There should be no other callers despite this being public
+ *
+ * @param info contains properties of the encoded data
+ * @param stream the stream of encoded image data
+ * @param bitsPerPixel the number of bits used to store each pixel
+ * @param masks color masks for certain bmp formats
+ * @param rowOrder indicates whether rows are ordered top-down or bottom-up
+ */
+ SkBmpMaskCodec(int width, int height, const SkEncodedInfo& info, SkStream* stream,
+ uint16_t bitsPerPixel, SkMasks* masks,
+ SkCodec::SkScanlineOrder rowOrder);
+
+protected:
+
+ Result onGetPixels(const SkImageInfo& dstInfo, void* dst,
+ size_t dstRowBytes, const Options&, SkPMColor*,
+ int*, int*) override;
+
+ SkCodec::Result prepareToDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options, SkPMColor inputColorPtr[],
+ int* inputColorCount) override;
+
+private:
+
+ SkSampler* getSampler(bool createIfNecessary) override {
+ SkASSERT(fMaskSwizzler);
+ return fMaskSwizzler;
+ }
+
+ int decodeRows(const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes,
+ const Options& opts) override;
+
+ SkAutoTDelete<SkMasks> fMasks; // owned
+ SkAutoTDelete<SkMaskSwizzler> fMaskSwizzler;
+ SkAutoTDeleteArray<uint8_t> fSrcBuffer;
+
+ typedef SkBmpCodec INHERITED;
+};
diff --git a/gfx/skia/skia/src/codec/SkBmpRLECodec.cpp b/gfx/skia/skia/src/codec/SkBmpRLECodec.cpp
new file mode 100644
index 000000000..dc5d68923
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkBmpRLECodec.cpp
@@ -0,0 +1,557 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBmpRLECodec.h"
+#include "SkCodecPriv.h"
+#include "SkColorPriv.h"
+#include "SkStream.h"
+
+/*
+ * Creates an instance of the decoder
+ * Called only by NewFromStream
+ */
+SkBmpRLECodec::SkBmpRLECodec(int width, int height, const SkEncodedInfo& info, SkStream* stream,
+ uint16_t bitsPerPixel, uint32_t numColors,
+ uint32_t bytesPerColor, uint32_t offset,
+ SkCodec::SkScanlineOrder rowOrder,
+ size_t RLEBytes)
+ : INHERITED(width, height, info, stream, bitsPerPixel, rowOrder)
+ , fColorTable(nullptr)
+ , fNumColors(numColors)
+ , fBytesPerColor(bytesPerColor)
+ , fOffset(offset)
+ , fStreamBuffer(new uint8_t[RLEBytes])
+ , fRLEBytes(RLEBytes)
+ , fOrigRLEBytes(RLEBytes)
+ , fCurrRLEByte(0)
+ , fSampleX(1)
+{}
+
+/*
+ * Initiates the bitmap decode
+ */
+SkCodec::Result SkBmpRLECodec::onGetPixels(const SkImageInfo& dstInfo,
+ void* dst, size_t dstRowBytes,
+ const Options& opts,
+ SkPMColor* inputColorPtr,
+ int* inputColorCount,
+ int* rowsDecoded) {
+ if (opts.fSubset) {
+ // Subsets are not supported.
+ return kUnimplemented;
+ }
+ if (!conversion_possible_ignore_color_space(dstInfo, this->getInfo())) {
+ SkCodecPrintf("Error: cannot convert input type to output type.\n");
+ return kInvalidConversion;
+ }
+
+ Result result = this->prepareToDecode(dstInfo, opts, inputColorPtr, inputColorCount);
+ if (kSuccess != result) {
+ return result;
+ }
+
+ // Perform the decode
+ int rows = this->decodeRows(dstInfo, dst, dstRowBytes, opts);
+ if (rows != dstInfo.height()) {
+ // We set rowsDecoded equal to the height because the background has already
+ // been filled. RLE encodings sometimes skip pixels, so we always start by
+ // filling the background.
+ *rowsDecoded = dstInfo.height();
+ return kIncompleteInput;
+ }
+
+ return kSuccess;
+}
+
+/*
+ * Process the color table for the bmp input
+ */
+ bool SkBmpRLECodec::createColorTable(SkColorType dstColorType, int* numColors) {
+ // Allocate memory for color table
+ uint32_t colorBytes = 0;
+ SkPMColor colorTable[256];
+ if (this->bitsPerPixel() <= 8) {
+ // Inform the caller of the number of colors
+ uint32_t maxColors = 1 << this->bitsPerPixel();
+ if (nullptr != numColors) {
+ // We set the number of colors to maxColors in order to ensure
+ // safe memory accesses. Otherwise, an invalid pixel could
+ // access memory outside of our color table array.
+ *numColors = maxColors;
+ }
+ // Don't bother reading more than maxColors.
+ const uint32_t numColorsToRead =
+ fNumColors == 0 ? maxColors : SkTMin(fNumColors, maxColors);
+
+ // Read the color table from the stream
+ colorBytes = numColorsToRead * fBytesPerColor;
+ SkAutoTDeleteArray<uint8_t> cBuffer(new uint8_t[colorBytes]);
+ if (stream()->read(cBuffer.get(), colorBytes) != colorBytes) {
+ SkCodecPrintf("Error: unable to read color table.\n");
+ return false;
+ }
+
+ // Fill in the color table
+ PackColorProc packARGB = choose_pack_color_proc(false, dstColorType);
+ uint32_t i = 0;
+ for (; i < numColorsToRead; i++) {
+ uint8_t blue = get_byte(cBuffer.get(), i*fBytesPerColor);
+ uint8_t green = get_byte(cBuffer.get(), i*fBytesPerColor + 1);
+ uint8_t red = get_byte(cBuffer.get(), i*fBytesPerColor + 2);
+ colorTable[i] = packARGB(0xFF, red, green, blue);
+ }
+
+ // To avoid segmentation faults on bad pixel data, fill the end of the
+ // color table with black. This is the same the behavior as the
+ // chromium decoder.
+ for (; i < maxColors; i++) {
+ colorTable[i] = SkPackARGB32NoCheck(0xFF, 0, 0, 0);
+ }
+
+ // Set the color table
+ fColorTable.reset(new SkColorTable(colorTable, maxColors));
+ }
+
+ // Check that we have not read past the pixel array offset
+ if(fOffset < colorBytes) {
+ // This may occur on OS 2.1 and other old versions where the color
+ // table defaults to max size, and the bmp tries to use a smaller
+ // color table. This is invalid, and our decision is to indicate
+ // an error, rather than try to guess the intended size of the
+ // color table.
+ SkCodecPrintf("Error: pixel data offset less than color table size.\n");
+ return false;
+ }
+
+ // After reading the color table, skip to the start of the pixel array
+ if (stream()->skip(fOffset - colorBytes) != fOffset - colorBytes) {
+ SkCodecPrintf("Error: unable to skip to image data.\n");
+ return false;
+ }
+
+ // Return true on success
+ return true;
+}
+
+bool SkBmpRLECodec::initializeStreamBuffer() {
+ // Setup a buffer to contain the full input stream
+ // TODO (msarett): I'm not sure it is smart or optimal to trust fRLEBytes (read from header)
+ // as the size of our buffer. First of all, the decode fails if fRLEBytes is
+ // corrupt (negative, zero, or small) when we might be able to decode
+ // successfully with a fixed size buffer. Additionally, we would save memory
+ // using a fixed size buffer if the RLE encoding is large. On the other hand,
+ // we may also waste memory with a fixed size buffer. And determining a
+ // minimum size for our buffer would depend on the image width (so it's not
+ // really "fixed" size), and we may end up allocating a buffer that is
+ // generally larger than the average encoded size anyway.
+ size_t totalBytes = this->stream()->read(fStreamBuffer.get(), fRLEBytes);
+ if (totalBytes < fRLEBytes) {
+ fRLEBytes = totalBytes;
+ SkCodecPrintf("Warning: incomplete RLE file.\n");
+ }
+ if (fRLEBytes == 0) {
+ SkCodecPrintf("Error: could not read RLE image data.\n");
+ return false;
+ }
+ fCurrRLEByte = 0;
+ return true;
+}
+
+/*
+ * Before signalling kIncompleteInput, we should attempt to load the
+ * stream buffer with additional data.
+ *
+ * @return the number of bytes remaining in the stream buffer after
+ * attempting to read more bytes from the stream
+ */
+size_t SkBmpRLECodec::checkForMoreData() {
+ const size_t remainingBytes = fRLEBytes - fCurrRLEByte;
+ uint8_t* buffer = fStreamBuffer.get();
+
+ // We will be reusing the same buffer, starting over from the beginning.
+ // Move any remaining bytes to the start of the buffer.
+ // We use memmove() instead of memcpy() because there is risk that the dst
+ // and src memory will overlap in corrupt images.
+ memmove(buffer, SkTAddOffset<uint8_t>(buffer, fCurrRLEByte), remainingBytes);
+
+ // Adjust the buffer ptr to the start of the unfilled data.
+ buffer += remainingBytes;
+
+ // Try to read additional bytes from the stream. There are fCurrRLEByte
+ // bytes of additional space remaining in the buffer, assuming that we
+ // have already copied remainingBytes to the start of the buffer.
+ size_t additionalBytes = this->stream()->read(buffer, fCurrRLEByte);
+
+ // Update counters and return the number of bytes we currently have
+ // available. We are at the start of the buffer again.
+ fCurrRLEByte = 0;
+ // If we were unable to fill the buffer, fRLEBytes is no longer equal to
+ // the size of the buffer. There will be unused space at the end. This
+ // should be fine, given that there are no more bytes in the stream.
+ fRLEBytes = remainingBytes + additionalBytes;
+ return fRLEBytes;
+}
+
+/*
+ * Set an RLE pixel using the color table
+ */
+void SkBmpRLECodec::setPixel(void* dst, size_t dstRowBytes,
+ const SkImageInfo& dstInfo, uint32_t x, uint32_t y,
+ uint8_t index) {
+ if (dst && is_coord_necessary(x, fSampleX, dstInfo.width())) {
+ // Set the row
+ uint32_t row = this->getDstRow(y, dstInfo.height());
+
+ // Set the pixel based on destination color type
+ const int dstX = get_dst_coord(x, fSampleX);
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType: {
+ SkPMColor* dstRow = SkTAddOffset<SkPMColor>(dst, row * (int) dstRowBytes);
+ dstRow[dstX] = fColorTable->operator[](index);
+ break;
+ }
+ case kRGB_565_SkColorType: {
+ uint16_t* dstRow = SkTAddOffset<uint16_t>(dst, row * (int) dstRowBytes);
+ dstRow[dstX] = SkPixel32ToPixel16(fColorTable->operator[](index));
+ break;
+ }
+ default:
+ // This case should not be reached. We should catch an invalid
+ // color type when we check that the conversion is possible.
+ SkASSERT(false);
+ break;
+ }
+ }
+}
+
+/*
+ * Set an RLE pixel from R, G, B values
+ */
+void SkBmpRLECodec::setRGBPixel(void* dst, size_t dstRowBytes,
+ const SkImageInfo& dstInfo, uint32_t x,
+ uint32_t y, uint8_t red, uint8_t green,
+ uint8_t blue) {
+ if (dst && is_coord_necessary(x, fSampleX, dstInfo.width())) {
+ // Set the row
+ uint32_t row = this->getDstRow(y, dstInfo.height());
+
+ // Set the pixel based on destination color type
+ const int dstX = get_dst_coord(x, fSampleX);
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType: {
+ SkPMColor* dstRow = SkTAddOffset<SkPMColor>(dst, row * (int) dstRowBytes);
+ dstRow[dstX] = SkPackARGB_as_RGBA(0xFF, red, green, blue);
+ break;
+ }
+ case kBGRA_8888_SkColorType: {
+ SkPMColor* dstRow = SkTAddOffset<SkPMColor>(dst, row * (int) dstRowBytes);
+ dstRow[dstX] = SkPackARGB_as_BGRA(0xFF, red, green, blue);
+ break;
+ }
+ case kRGB_565_SkColorType: {
+ uint16_t* dstRow = SkTAddOffset<uint16_t>(dst, row * (int) dstRowBytes);
+ dstRow[dstX] = SkPack888ToRGB16(red, green, blue);
+ break;
+ }
+ default:
+ // This case should not be reached. We should catch an invalid
+ // color type when we check that the conversion is possible.
+ SkASSERT(false);
+ break;
+ }
+ }
+}
+
+SkCodec::Result SkBmpRLECodec::prepareToDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options, SkPMColor inputColorPtr[], int* inputColorCount) {
+ // FIXME: Support subsets for scanline decodes.
+ if (options.fSubset) {
+ // Subsets are not supported.
+ return kUnimplemented;
+ }
+
+ // Reset fSampleX. If it needs to be a value other than 1, it will get modified by
+ // the sampler.
+ fSampleX = 1;
+ fLinesToSkip = 0;
+
+ // Create the color table if necessary and prepare the stream for decode
+ // Note that if it is non-NULL, inputColorCount will be modified
+ if (!this->createColorTable(dstInfo.colorType(), inputColorCount)) {
+ SkCodecPrintf("Error: could not create color table.\n");
+ return SkCodec::kInvalidInput;
+ }
+
+ // Copy the color table to the client if necessary
+ copy_color_table(dstInfo, this->fColorTable, inputColorPtr, inputColorCount);
+
+ // Initialize a buffer for encoded RLE data
+ fRLEBytes = fOrigRLEBytes;
+ if (!this->initializeStreamBuffer()) {
+ SkCodecPrintf("Error: cannot initialize stream buffer.\n");
+ return SkCodec::kInvalidInput;
+ }
+
+ return SkCodec::kSuccess;
+}
+
+/*
+ * Performs the bitmap decoding for RLE input format
+ * RLE decoding is performed all at once, rather than a one row at a time
+ */
+int SkBmpRLECodec::decodeRows(const SkImageInfo& info, void* dst, size_t dstRowBytes,
+ const Options& opts) {
+ // Set RLE flags
+ static const uint8_t RLE_ESCAPE = 0;
+ static const uint8_t RLE_EOL = 0;
+ static const uint8_t RLE_EOF = 1;
+ static const uint8_t RLE_DELTA = 2;
+
+ const int width = this->getInfo().width();
+ int height = info.height();
+
+ // Account for sampling.
+ SkImageInfo dstInfo = info.makeWH(get_scaled_dimension(width, fSampleX), height);
+
+ // Set the background as transparent. Then, if the RLE code skips pixels,
+ // the skipped pixels will be transparent.
+ // Because of the need for transparent pixels, kN32 is the only color
+ // type that makes sense for the destination format.
+ SkASSERT(kRGBA_8888_SkColorType == dstInfo.colorType() ||
+ kBGRA_8888_SkColorType == dstInfo.colorType());
+ if (dst) {
+ SkSampler::Fill(dstInfo, dst, dstRowBytes, SK_ColorTRANSPARENT, opts.fZeroInitialized);
+ }
+
+ // Adjust the height and the dst if the previous call to decodeRows() left us
+ // with lines that need to be skipped.
+ if (height > fLinesToSkip) {
+ height -= fLinesToSkip;
+ dst = SkTAddOffset<void>(dst, fLinesToSkip * dstRowBytes);
+ fLinesToSkip = 0;
+ } else {
+ fLinesToSkip -= height;
+ return height;
+ }
+
+ // Destination parameters
+ int x = 0;
+ int y = 0;
+
+ while (true) {
+ // If we have reached a row that is beyond the requested height, we have
+ // succeeded.
+ if (y >= height) {
+ // It would be better to check for the EOF marker before indicating
+ // success, but we may be performing a scanline decode, which
+ // would require us to stop before decoding the full height.
+ return height;
+ }
+
+ // Every entry takes at least two bytes
+ if ((int) fRLEBytes - fCurrRLEByte < 2) {
+ SkCodecPrintf("Warning: might be incomplete RLE input.\n");
+ if (this->checkForMoreData() < 2) {
+ return y;
+ }
+ }
+
+ // Read the next two bytes. These bytes have different meanings
+ // depending on their values. In the first interpretation, the first
+ // byte is an escape flag and the second byte indicates what special
+ // task to perform.
+ const uint8_t flag = fStreamBuffer.get()[fCurrRLEByte++];
+ const uint8_t task = fStreamBuffer.get()[fCurrRLEByte++];
+
+ // Perform decoding
+ if (RLE_ESCAPE == flag) {
+ switch (task) {
+ case RLE_EOL:
+ x = 0;
+ y++;
+ break;
+ case RLE_EOF:
+ return height;
+ case RLE_DELTA: {
+ // Two bytes are needed to specify delta
+ if ((int) fRLEBytes - fCurrRLEByte < 2) {
+ SkCodecPrintf("Warning: might be incomplete RLE input.\n");
+ if (this->checkForMoreData() < 2) {
+ return y;
+ }
+ }
+ // Modify x and y
+ const uint8_t dx = fStreamBuffer.get()[fCurrRLEByte++];
+ const uint8_t dy = fStreamBuffer.get()[fCurrRLEByte++];
+ x += dx;
+ y += dy;
+ if (x > width) {
+ SkCodecPrintf("Warning: invalid RLE input.\n");
+ return y - dy;
+ } else if (y > height) {
+ fLinesToSkip = y - height;
+ return height;
+ }
+ break;
+ }
+ default: {
+ // If task does not match any of the above signals, it
+ // indicates that we have a sequence of non-RLE pixels.
+ // Furthermore, the value of task is equal to the number
+ // of pixels to interpret.
+ uint8_t numPixels = task;
+ const size_t rowBytes = compute_row_bytes(numPixels,
+ this->bitsPerPixel());
+ // Abort if setting numPixels moves us off the edge of the
+ // image.
+ if (x + numPixels > width) {
+ SkCodecPrintf("Warning: invalid RLE input.\n");
+ return y;
+ }
+ // Also abort if there are not enough bytes
+ // remaining in the stream to set numPixels.
+ if ((int) fRLEBytes - fCurrRLEByte < SkAlign2(rowBytes)) {
+ SkCodecPrintf("Warning: might be incomplete RLE input.\n");
+ if (this->checkForMoreData() < SkAlign2(rowBytes)) {
+ return y;
+ }
+ }
+ // Set numPixels number of pixels
+ while (numPixels > 0) {
+ switch(this->bitsPerPixel()) {
+ case 4: {
+ SkASSERT(fCurrRLEByte < fRLEBytes);
+ uint8_t val = fStreamBuffer.get()[fCurrRLEByte++];
+ setPixel(dst, dstRowBytes, dstInfo, x++,
+ y, val >> 4);
+ numPixels--;
+ if (numPixels != 0) {
+ setPixel(dst, dstRowBytes, dstInfo,
+ x++, y, val & 0xF);
+ numPixels--;
+ }
+ break;
+ }
+ case 8:
+ SkASSERT(fCurrRLEByte < fRLEBytes);
+ setPixel(dst, dstRowBytes, dstInfo, x++,
+ y, fStreamBuffer.get()[fCurrRLEByte++]);
+ numPixels--;
+ break;
+ case 24: {
+ SkASSERT(fCurrRLEByte + 2 < fRLEBytes);
+ uint8_t blue = fStreamBuffer.get()[fCurrRLEByte++];
+ uint8_t green = fStreamBuffer.get()[fCurrRLEByte++];
+ uint8_t red = fStreamBuffer.get()[fCurrRLEByte++];
+ setRGBPixel(dst, dstRowBytes, dstInfo,
+ x++, y, red, green, blue);
+ numPixels--;
+ break;
+ }
+ default:
+ SkASSERT(false);
+ return y;
+ }
+ }
+ // Skip a byte if necessary to maintain alignment
+ if (!SkIsAlign2(rowBytes)) {
+ fCurrRLEByte++;
+ }
+ break;
+ }
+ }
+ } else {
+ // If the first byte read is not a flag, it indicates the number of
+ // pixels to set in RLE mode.
+ const uint8_t numPixels = flag;
+ const int endX = SkTMin<int>(x + numPixels, width);
+
+ if (24 == this->bitsPerPixel()) {
+ // In RLE24, the second byte read is part of the pixel color.
+ // There are two more required bytes to finish encoding the
+ // color.
+ if ((int) fRLEBytes - fCurrRLEByte < 2) {
+ SkCodecPrintf("Warning: might be incomplete RLE input.\n");
+ if (this->checkForMoreData() < 2) {
+ return y;
+ }
+ }
+
+ // Fill the pixels up to endX with the specified color
+ uint8_t blue = task;
+ uint8_t green = fStreamBuffer.get()[fCurrRLEByte++];
+ uint8_t red = fStreamBuffer.get()[fCurrRLEByte++];
+ while (x < endX) {
+ setRGBPixel(dst, dstRowBytes, dstInfo, x++, y, red, green, blue);
+ }
+ } else {
+ // In RLE8 or RLE4, the second byte read gives the index in the
+ // color table to look up the pixel color.
+ // RLE8 has one color index that gets repeated
+ // RLE4 has two color indexes in the upper and lower 4 bits of
+ // the bytes, which are alternated
+ uint8_t indices[2] = { task, task };
+ if (4 == this->bitsPerPixel()) {
+ indices[0] >>= 4;
+ indices[1] &= 0xf;
+ }
+
+ // Set the indicated number of pixels
+ for (int which = 0; x < endX; x++) {
+ setPixel(dst, dstRowBytes, dstInfo, x, y, indices[which]);
+ which = !which;
+ }
+ }
+ }
+ }
+}
+
+bool SkBmpRLECodec::skipRows(int count) {
+ const SkImageInfo rowInfo = SkImageInfo::Make(this->getInfo().width(), count, kN32_SkColorType,
+ kUnpremul_SkAlphaType);
+
+ return count == this->decodeRows(rowInfo, nullptr, 0, this->options());
+}
+
+// FIXME: Make SkBmpRLECodec have no knowledge of sampling.
+// Or it should do all sampling natively.
+// It currently is a hybrid that needs to know what SkScaledCodec is doing.
+class SkBmpRLESampler : public SkSampler {
+public:
+ SkBmpRLESampler(SkBmpRLECodec* codec)
+ : fCodec(codec)
+ {
+ SkASSERT(fCodec);
+ }
+
+private:
+ int onSetSampleX(int sampleX) override {
+ return fCodec->setSampleX(sampleX);
+ }
+
+ // Unowned pointer. fCodec will delete this class in its destructor.
+ SkBmpRLECodec* fCodec;
+};
+
+SkSampler* SkBmpRLECodec::getSampler(bool /*createIfNecessary*/) {
+ // We will always create an SkBmpRLESampler if one is requested.
+ // This allows clients to always use the SkBmpRLESampler's
+ // version of fill(), which does nothing since RLE decodes have
+ // already filled pixel memory. This seems fine, since creating
+ // an SkBmpRLESampler is pretty inexpensive.
+ if (!fSampler) {
+ fSampler.reset(new SkBmpRLESampler(this));
+ }
+
+ return fSampler;
+}
+
+int SkBmpRLECodec::setSampleX(int sampleX){
+ fSampleX = sampleX;
+ return get_scaled_dimension(this->getInfo().width(), sampleX);
+}
diff --git a/gfx/skia/skia/src/codec/SkBmpRLECodec.h b/gfx/skia/skia/src/codec/SkBmpRLECodec.h
new file mode 100644
index 000000000..c5236a810
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkBmpRLECodec.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBmpCodec.h"
+#include "SkColorTable.h"
+#include "SkImageInfo.h"
+#include "SkSampler.h"
+#include "SkTypes.h"
+
+/*
+ * This class implements the decoding for bmp images that use an RLE encoding
+ */
+class SkBmpRLECodec : public SkBmpCodec {
+public:
+
+ /*
+ * Creates an instance of the decoder
+ *
+ * Called only by SkBmpCodec::NewFromStream
+ * There should be no other callers despite this being public
+ *
+ * @param info contains properties of the encoded data
+ * @param stream the stream of encoded image data
+ * @param bitsPerPixel the number of bits used to store each pixel
+ * @param numColors the number of colors in the color table
+ * @param bytesPerColor the number of bytes in the stream used to represent
+ each color in the color table
+ * @param offset the offset of the image pixel data from the end of the
+ * headers
+ * @param rowOrder indicates whether rows are ordered top-down or bottom-up
+ * @param RLEBytes indicates the amount of data left in the stream
+ * after decoding the headers
+ */
+ SkBmpRLECodec(int width, int height, const SkEncodedInfo& info, SkStream* stream,
+ uint16_t bitsPerPixel, uint32_t numColors, uint32_t bytesPerColor,
+ uint32_t offset, SkCodec::SkScanlineOrder rowOrder,
+ size_t RLEBytes);
+
+ int setSampleX(int);
+
+protected:
+
+ Result onGetPixels(const SkImageInfo& dstInfo, void* dst,
+ size_t dstRowBytes, const Options&, SkPMColor*,
+ int*, int*) override;
+
+ SkCodec::Result prepareToDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options, SkPMColor inputColorPtr[],
+ int* inputColorCount) override;
+
+private:
+
+ /*
+ * Creates the color table
+ * Sets colorCount to the new color count if it is non-nullptr
+ */
+ bool createColorTable(SkColorType dstColorType, int* colorCount);
+
+ bool initializeStreamBuffer();
+
+ /*
+ * Before signalling kIncompleteInput, we should attempt to load the
+ * stream buffer with additional data.
+ *
+ * @return the number of bytes remaining in the stream buffer after
+ * attempting to read more bytes from the stream
+ */
+ size_t checkForMoreData();
+
+ /*
+ * Set an RLE pixel using the color table
+ */
+ void setPixel(void* dst, size_t dstRowBytes,
+ const SkImageInfo& dstInfo, uint32_t x, uint32_t y,
+ uint8_t index);
+ /*
+ * Set an RLE24 pixel from R, G, B values
+ */
+ void setRGBPixel(void* dst, size_t dstRowBytes,
+ const SkImageInfo& dstInfo, uint32_t x, uint32_t y,
+ uint8_t red, uint8_t green, uint8_t blue);
+
+ /*
+ * If dst is NULL, this is a signal to skip the rows.
+ */
+ int decodeRows(const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes,
+ const Options& opts) override;
+
+ bool skipRows(int count) override;
+
+ SkSampler* getSampler(bool createIfNecessary) override;
+
+ SkAutoTUnref<SkColorTable> fColorTable; // owned
+ // fNumColors is the number specified in the header, or 0 if not present in the header.
+ const uint32_t fNumColors;
+ const uint32_t fBytesPerColor;
+ const uint32_t fOffset;
+ SkAutoTDeleteArray<uint8_t> fStreamBuffer;
+ size_t fRLEBytes;
+ const size_t fOrigRLEBytes;
+ uint32_t fCurrRLEByte;
+ int fSampleX;
+ SkAutoTDelete<SkSampler> fSampler;
+
+ // Scanline decodes allow the client to ask for a single scanline at a time.
+ // This can be tricky when the RLE encoding instructs the decoder to jump down
+ // multiple lines. This field keeps track of lines that need to be skipped
+ // on subsequent calls to decodeRows().
+ int fLinesToSkip;
+
+ typedef SkBmpCodec INHERITED;
+};
diff --git a/gfx/skia/skia/src/codec/SkBmpStandardCodec.cpp b/gfx/skia/skia/src/codec/SkBmpStandardCodec.cpp
new file mode 100644
index 000000000..7d67d18c8
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkBmpStandardCodec.cpp
@@ -0,0 +1,312 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBmpStandardCodec.h"
+#include "SkCodecPriv.h"
+#include "SkColorPriv.h"
+#include "SkStream.h"
+
+/*
+ * Creates an instance of the decoder
+ * Called only by NewFromStream
+ */
+SkBmpStandardCodec::SkBmpStandardCodec(int width, int height, const SkEncodedInfo& info,
+ SkStream* stream, uint16_t bitsPerPixel, uint32_t numColors,
+ uint32_t bytesPerColor, uint32_t offset,
+ SkCodec::SkScanlineOrder rowOrder,
+ bool isOpaque, bool inIco)
+ : INHERITED(width, height, info, stream, bitsPerPixel, rowOrder)
+ , fColorTable(nullptr)
+ , fNumColors(numColors)
+ , fBytesPerColor(bytesPerColor)
+ , fOffset(offset)
+ , fSwizzler(nullptr)
+ , fSrcBuffer(new uint8_t [this->srcRowBytes()])
+ , fIsOpaque(isOpaque)
+ , fInIco(inIco)
+ , fAndMaskRowBytes(fInIco ? SkAlign4(compute_row_bytes(this->getInfo().width(), 1)) : 0)
+{}
+
+/*
+ * Initiates the bitmap decode
+ */
+SkCodec::Result SkBmpStandardCodec::onGetPixels(const SkImageInfo& dstInfo,
+ void* dst, size_t dstRowBytes,
+ const Options& opts,
+ SkPMColor* inputColorPtr,
+ int* inputColorCount,
+ int* rowsDecoded) {
+ if (opts.fSubset) {
+ // Subsets are not supported.
+ return kUnimplemented;
+ }
+ if (dstInfo.dimensions() != this->getInfo().dimensions()) {
+ SkCodecPrintf("Error: scaling not supported.\n");
+ return kInvalidScale;
+ }
+ if (!conversion_possible_ignore_color_space(dstInfo, this->getInfo())) {
+ SkCodecPrintf("Error: cannot convert input type to output type.\n");
+ return kInvalidConversion;
+ }
+
+ Result result = this->prepareToDecode(dstInfo, opts, inputColorPtr, inputColorCount);
+ if (kSuccess != result) {
+ return result;
+ }
+ int rows = this->decodeRows(dstInfo, dst, dstRowBytes, opts);
+ if (rows != dstInfo.height()) {
+ *rowsDecoded = rows;
+ return kIncompleteInput;
+ }
+ return kSuccess;
+}
+
+/*
+ * Process the color table for the bmp input
+ */
+ bool SkBmpStandardCodec::createColorTable(SkColorType dstColorType, SkAlphaType dstAlphaType,
+ int* numColors) {
+ // Allocate memory for color table
+ uint32_t colorBytes = 0;
+ SkPMColor colorTable[256];
+ if (this->bitsPerPixel() <= 8) {
+ // Inform the caller of the number of colors
+ uint32_t maxColors = 1 << this->bitsPerPixel();
+ if (nullptr != numColors) {
+ // We set the number of colors to maxColors in order to ensure
+ // safe memory accesses. Otherwise, an invalid pixel could
+ // access memory outside of our color table array.
+ *numColors = maxColors;
+ }
+ // Don't bother reading more than maxColors.
+ const uint32_t numColorsToRead =
+ fNumColors == 0 ? maxColors : SkTMin(fNumColors, maxColors);
+
+ // Read the color table from the stream
+ colorBytes = numColorsToRead * fBytesPerColor;
+ SkAutoTDeleteArray<uint8_t> cBuffer(new uint8_t[colorBytes]);
+ if (stream()->read(cBuffer.get(), colorBytes) != colorBytes) {
+ SkCodecPrintf("Error: unable to read color table.\n");
+ return false;
+ }
+
+ // Choose the proper packing function
+ bool isPremul = (kPremul_SkAlphaType == dstAlphaType) && !fIsOpaque;
+ PackColorProc packARGB = choose_pack_color_proc(isPremul, dstColorType);
+
+ // Fill in the color table
+ uint32_t i = 0;
+ for (; i < numColorsToRead; i++) {
+ uint8_t blue = get_byte(cBuffer.get(), i*fBytesPerColor);
+ uint8_t green = get_byte(cBuffer.get(), i*fBytesPerColor + 1);
+ uint8_t red = get_byte(cBuffer.get(), i*fBytesPerColor + 2);
+ uint8_t alpha;
+ if (fIsOpaque) {
+ alpha = 0xFF;
+ } else {
+ alpha = get_byte(cBuffer.get(), i*fBytesPerColor + 3);
+ }
+ colorTable[i] = packARGB(alpha, red, green, blue);
+ }
+
+ // To avoid segmentation faults on bad pixel data, fill the end of the
+ // color table with black. This is the same the behavior as the
+ // chromium decoder.
+ for (; i < maxColors; i++) {
+ colorTable[i] = SkPackARGB32NoCheck(0xFF, 0, 0, 0);
+ }
+
+ // Set the color table
+ fColorTable.reset(new SkColorTable(colorTable, maxColors));
+ }
+
+ // Bmp-in-Ico files do not use an offset to indicate where the pixel data
+ // begins. Pixel data always begins immediately after the color table.
+ if (!fInIco) {
+ // Check that we have not read past the pixel array offset
+ if(fOffset < colorBytes) {
+ // This may occur on OS 2.1 and other old versions where the color
+ // table defaults to max size, and the bmp tries to use a smaller
+ // color table. This is invalid, and our decision is to indicate
+ // an error, rather than try to guess the intended size of the
+ // color table.
+ SkCodecPrintf("Error: pixel data offset less than color table size.\n");
+ return false;
+ }
+
+ // After reading the color table, skip to the start of the pixel array
+ if (stream()->skip(fOffset - colorBytes) != fOffset - colorBytes) {
+ SkCodecPrintf("Error: unable to skip to image data.\n");
+ return false;
+ }
+ }
+
+ // Return true on success
+ return true;
+}
+
+void SkBmpStandardCodec::initializeSwizzler(const SkImageInfo& dstInfo, const Options& opts) {
+ // In the case of bmp-in-icos, we will report BGRA to the client,
+ // since we may be required to apply an alpha mask after the decode.
+ // However, the swizzler needs to know the actual format of the bmp.
+ SkEncodedInfo swizzlerInfo = this->getEncodedInfo();
+ if (fInIco) {
+ if (this->bitsPerPixel() <= 8) {
+ swizzlerInfo = SkEncodedInfo::Make(SkEncodedInfo::kPalette_Color,
+ swizzlerInfo.alpha(), this->bitsPerPixel());
+ } else if (this->bitsPerPixel() == 24) {
+ swizzlerInfo = SkEncodedInfo::Make(SkEncodedInfo::kBGR_Color,
+ SkEncodedInfo::kOpaque_Alpha, 8);
+ }
+ }
+
+ // Get a pointer to the color table if it exists
+ const SkPMColor* colorPtr = get_color_ptr(fColorTable.get());
+
+ // Create swizzler
+ fSwizzler.reset(SkSwizzler::CreateSwizzler(swizzlerInfo, colorPtr, dstInfo, opts));
+ SkASSERT(fSwizzler);
+}
+
+SkCodec::Result SkBmpStandardCodec::prepareToDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options, SkPMColor inputColorPtr[], int* inputColorCount) {
+ // Create the color table if necessary and prepare the stream for decode
+ // Note that if it is non-NULL, inputColorCount will be modified
+ if (!this->createColorTable(dstInfo.colorType(), dstInfo.alphaType(), inputColorCount)) {
+ SkCodecPrintf("Error: could not create color table.\n");
+ return SkCodec::kInvalidInput;
+ }
+
+ // Copy the color table to the client if necessary
+ copy_color_table(dstInfo, this->fColorTable, inputColorPtr, inputColorCount);
+
+ // Initialize a swizzler
+ this->initializeSwizzler(dstInfo, options);
+ return SkCodec::kSuccess;
+}
+
+/*
+ * Performs the bitmap decoding for standard input format
+ */
+int SkBmpStandardCodec::decodeRows(const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes,
+ const Options& opts) {
+ // Iterate over rows of the image
+ const int height = dstInfo.height();
+ for (int y = 0; y < height; y++) {
+ // Read a row of the input
+ if (this->stream()->read(fSrcBuffer.get(), this->srcRowBytes()) != this->srcRowBytes()) {
+ SkCodecPrintf("Warning: incomplete input stream.\n");
+ return y;
+ }
+
+ // Decode the row in destination format
+ uint32_t row = this->getDstRow(y, dstInfo.height());
+
+ void* dstRow = SkTAddOffset<void>(dst, row * dstRowBytes);
+ fSwizzler->swizzle(dstRow, fSrcBuffer.get());
+ }
+
+ if (fInIco && fIsOpaque) {
+ const int startScanline = this->currScanline();
+ if (startScanline < 0) {
+ // We are not performing a scanline decode.
+ // Just decode the entire ICO mask and return.
+ decodeIcoMask(this->stream(), dstInfo, dst, dstRowBytes);
+ return height;
+ }
+
+ // In order to perform a scanline ICO decode, we must be able
+ // to skip ahead in the stream in order to apply the AND mask
+ // to the requested scanlines.
+ // We will do this by taking advantage of the fact that
+ // SkIcoCodec always uses a SkMemoryStream as its underlying
+ // representation of the stream.
+ const void* memoryBase = this->stream()->getMemoryBase();
+ SkASSERT(nullptr != memoryBase);
+ SkASSERT(this->stream()->hasLength());
+ SkASSERT(this->stream()->hasPosition());
+
+ const size_t length = this->stream()->getLength();
+ const size_t currPosition = this->stream()->getPosition();
+
+ // Calculate how many bytes we must skip to reach the AND mask.
+ const int remainingScanlines = this->getInfo().height() - startScanline - height;
+ const size_t bytesToSkip = remainingScanlines * this->srcRowBytes() +
+ startScanline * fAndMaskRowBytes;
+ const size_t subStreamStartPosition = currPosition + bytesToSkip;
+ if (subStreamStartPosition >= length) {
+ // FIXME: How can we indicate that this decode was actually incomplete?
+ return height;
+ }
+
+ // Create a subStream to pass to decodeIcoMask(). It is useful to encapsulate
+ // the memory base into a stream in order to safely handle incomplete images
+ // without reading out of bounds memory.
+ const void* subStreamMemoryBase = SkTAddOffset<const void>(memoryBase,
+ subStreamStartPosition);
+ const size_t subStreamLength = length - subStreamStartPosition;
+ // This call does not transfer ownership of the subStreamMemoryBase.
+ SkMemoryStream subStream(subStreamMemoryBase, subStreamLength, false);
+
+ // FIXME: If decodeIcoMask does not succeed, is there a way that we can
+ // indicate the decode was incomplete?
+ decodeIcoMask(&subStream, dstInfo, dst, dstRowBytes);
+ }
+
+ return height;
+}
+
+void SkBmpStandardCodec::decodeIcoMask(SkStream* stream, const SkImageInfo& dstInfo,
+ void* dst, size_t dstRowBytes) {
+ // BMP in ICO have transparency, so this cannot be 565, and this mask
+ // prevents us from using kIndex8. The below code depends on the output
+ // being an SkPMColor.
+ SkASSERT(kRGBA_8888_SkColorType == dstInfo.colorType() ||
+ kBGRA_8888_SkColorType == dstInfo.colorType());
+
+ // If we are sampling, make sure that we only mask the sampled pixels.
+ // We do not need to worry about sampling in the y-dimension because that
+ // should be handled by SkSampledCodec.
+ const int sampleX = fSwizzler->sampleX();
+ const int sampledWidth = get_scaled_dimension(this->getInfo().width(), sampleX);
+ const int srcStartX = get_start_coord(sampleX);
+
+
+ SkPMColor* dstPtr = (SkPMColor*) dst;
+ for (int y = 0; y < dstInfo.height(); y++) {
+ // The srcBuffer will at least be large enough
+ if (stream->read(fSrcBuffer.get(), fAndMaskRowBytes) != fAndMaskRowBytes) {
+ SkCodecPrintf("Warning: incomplete AND mask for bmp-in-ico.\n");
+ return;
+ }
+
+ int row = this->getDstRow(y, dstInfo.height());
+
+ SkPMColor* dstRow =
+ SkTAddOffset<SkPMColor>(dstPtr, row * dstRowBytes);
+
+ int srcX = srcStartX;
+ for (int dstX = 0; dstX < sampledWidth; dstX++) {
+ int quotient;
+ int modulus;
+ SkTDivMod(srcX, 8, &quotient, &modulus);
+ uint32_t shift = 7 - modulus;
+ uint32_t alphaBit = (fSrcBuffer.get()[quotient] >> shift) & 0x1;
+ dstRow[dstX] &= alphaBit - 1;
+ srcX += sampleX;
+ }
+ }
+}
+
+uint64_t SkBmpStandardCodec::onGetFillValue(const SkImageInfo& dstInfo) const {
+ const SkPMColor* colorPtr = get_color_ptr(fColorTable.get());
+ if (colorPtr) {
+ return get_color_table_fill_value(dstInfo.colorType(), dstInfo.alphaType(), colorPtr, 0,
+ nullptr);
+ }
+ return INHERITED::onGetFillValue(dstInfo);
+}
diff --git a/gfx/skia/skia/src/codec/SkBmpStandardCodec.h b/gfx/skia/skia/src/codec/SkBmpStandardCodec.h
new file mode 100644
index 000000000..7039cf7ef
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkBmpStandardCodec.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBmpCodec.h"
+#include "SkColorTable.h"
+#include "SkImageInfo.h"
+#include "SkSwizzler.h"
+#include "SkTypes.h"
+
+/*
+ * This class implements the decoding for bmp images that use "standard" modes,
+ * which essentially means they do not contain bit masks or RLE codes.
+ */
+class SkBmpStandardCodec : public SkBmpCodec {
+public:
+
+ /*
+ * Creates an instance of the decoder
+ *
+ * Called only by SkBmpCodec::NewFromStream
+ * There should be no other callers despite this being public
+ *
+ * @param info contains properties of the encoded data
+ * @param stream the stream of encoded image data
+ * @param bitsPerPixel the number of bits used to store each pixel
+ * @param numColors the number of colors in the color table
+ * @param bytesPerColor the number of bytes in the stream used to represent
+ each color in the color table
+ * @param offset the offset of the image pixel data from the end of the
+ * headers
+ * @param rowOrder indicates whether rows are ordered top-down or bottom-up
+ * @param isOpaque indicates if the bmp itself is opaque (before applying
+ * the icp mask, if there is one)
+ * @param inIco indicates if the bmp is embedded in an ico file
+ */
+ SkBmpStandardCodec(int width, int height, const SkEncodedInfo& info, SkStream* stream,
+ uint16_t bitsPerPixel, uint32_t numColors, uint32_t bytesPerColor,
+ uint32_t offset, SkCodec::SkScanlineOrder rowOrder, bool isOpaque,
+ bool inIco);
+
+protected:
+
+ Result onGetPixels(const SkImageInfo& dstInfo, void* dst,
+ size_t dstRowBytes, const Options&, SkPMColor*,
+ int*, int*) override;
+
+ bool onInIco() const override {
+ return fInIco;
+ }
+
+ SkCodec::Result prepareToDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options, SkPMColor inputColorPtr[],
+ int* inputColorCount) override;
+
+
+ uint64_t onGetFillValue(const SkImageInfo&) const override;
+
+ SkSampler* getSampler(bool createIfNecessary) override {
+ SkASSERT(fSwizzler);
+ return fSwizzler;
+ }
+
+private:
+
+ /*
+ * Creates the color table
+ * Sets colorCount to the new color count if it is non-nullptr
+ */
+ bool createColorTable(SkColorType colorType, SkAlphaType alphaType, int* colorCount);
+
+ void initializeSwizzler(const SkImageInfo& dstInfo, const Options& opts);
+
+ int decodeRows(const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes,
+ const Options& opts) override;
+
+ /*
+ * @param stream This may be a pointer to the stream owned by the parent SkCodec
+ * or a sub-stream of the stream owned by the parent SkCodec.
+ * Either way, this stream is unowned.
+ */
+ void decodeIcoMask(SkStream* stream, const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes);
+
+ SkAutoTUnref<SkColorTable> fColorTable; // owned
+ // fNumColors is the number specified in the header, or 0 if not present in the header.
+ const uint32_t fNumColors;
+ const uint32_t fBytesPerColor;
+ const uint32_t fOffset;
+ SkAutoTDelete<SkSwizzler> fSwizzler;
+ SkAutoTDeleteArray<uint8_t> fSrcBuffer;
+ const bool fIsOpaque;
+ const bool fInIco;
+ const size_t fAndMaskRowBytes; // only used for fInIco decodes
+
+ typedef SkBmpCodec INHERITED;
+};
diff --git a/gfx/skia/skia/src/codec/SkCodec.cpp b/gfx/skia/skia/src/codec/SkCodec.cpp
new file mode 100644
index 000000000..84afc2bde
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkCodec.cpp
@@ -0,0 +1,485 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBmpCodec.h"
+#include "SkCodec.h"
+#include "SkCodecPriv.h"
+#include "SkColorSpace.h"
+#include "SkData.h"
+#include "SkGifCodec.h"
+#include "SkHalf.h"
+#include "SkIcoCodec.h"
+#include "SkJpegCodec.h"
+#ifdef SK_HAS_PNG_LIBRARY
+#include "SkPngCodec.h"
+#endif
+#include "SkRawCodec.h"
+#include "SkStream.h"
+#include "SkWbmpCodec.h"
+#include "SkWebpCodec.h"
+
+struct DecoderProc {
+ bool (*IsFormat)(const void*, size_t);
+ SkCodec* (*NewFromStream)(SkStream*);
+};
+
+static const DecoderProc gDecoderProcs[] = {
+#ifdef SK_HAS_JPEG_LIBRARY
+ { SkJpegCodec::IsJpeg, SkJpegCodec::NewFromStream },
+#endif
+#ifdef SK_HAS_WEBP_LIBRARY
+ { SkWebpCodec::IsWebp, SkWebpCodec::NewFromStream },
+#endif
+#ifdef SK_HAS_GIF_LIBRARY
+ { SkGifCodec::IsGif, SkGifCodec::NewFromStream },
+#endif
+#ifdef SK_HAS_PNG_LIBRARY
+ { SkIcoCodec::IsIco, SkIcoCodec::NewFromStream },
+#endif
+ { SkBmpCodec::IsBmp, SkBmpCodec::NewFromStream },
+ { SkWbmpCodec::IsWbmp, SkWbmpCodec::NewFromStream }
+};
+
+size_t SkCodec::MinBufferedBytesNeeded() {
+ return WEBP_VP8_HEADER_SIZE;
+}
+
+SkCodec* SkCodec::NewFromStream(SkStream* stream,
+ SkPngChunkReader* chunkReader) {
+ if (!stream) {
+ return nullptr;
+ }
+
+ SkAutoTDelete<SkStream> streamDeleter(stream);
+
+ // 14 is enough to read all of the supported types.
+ const size_t bytesToRead = 14;
+ SkASSERT(bytesToRead <= MinBufferedBytesNeeded());
+
+ char buffer[bytesToRead];
+ size_t bytesRead = stream->peek(buffer, bytesToRead);
+
+ // It is also possible to have a complete image less than bytesToRead bytes
+ // (e.g. a 1 x 1 wbmp), meaning peek() would return less than bytesToRead.
+ // Assume that if bytesRead < bytesToRead, but > 0, the stream is shorter
+ // than bytesToRead, so pass that directly to the decoder.
+ // It also is possible the stream uses too small a buffer for peeking, but
+ // we trust the caller to use a large enough buffer.
+
+ if (0 == bytesRead) {
+ // TODO: After implementing peek in CreateJavaOutputStreamAdaptor.cpp, this
+ // printf could be useful to notice failures.
+ // SkCodecPrintf("Encoded image data failed to peek!\n");
+
+ // It is possible the stream does not support peeking, but does support
+ // rewinding.
+ // Attempt to read() and pass the actual amount read to the decoder.
+ bytesRead = stream->read(buffer, bytesToRead);
+ if (!stream->rewind()) {
+ SkCodecPrintf("Encoded image data could not peek or rewind to determine format!\n");
+ return nullptr;
+ }
+ }
+
+ // PNG is special, since we want to be able to supply an SkPngChunkReader.
+ // But this code follows the same pattern as the loop.
+#ifdef SK_HAS_PNG_LIBRARY
+ if (SkPngCodec::IsPng(buffer, bytesRead)) {
+ return SkPngCodec::NewFromStream(streamDeleter.release(), chunkReader);
+ } else
+#endif
+ {
+ for (DecoderProc proc : gDecoderProcs) {
+ if (proc.IsFormat(buffer, bytesRead)) {
+ return proc.NewFromStream(streamDeleter.release());
+ }
+ }
+
+#ifdef SK_CODEC_DECODES_RAW
+ // Try to treat the input as RAW if all the other checks failed.
+ return SkRawCodec::NewFromStream(streamDeleter.release());
+#endif
+ }
+
+ return nullptr;
+}
+
+SkCodec* SkCodec::NewFromData(sk_sp<SkData> data, SkPngChunkReader* reader) {
+ if (!data) {
+ return nullptr;
+ }
+ return NewFromStream(new SkMemoryStream(data), reader);
+}
+
+SkCodec::SkCodec(int width, int height, const SkEncodedInfo& info, SkStream* stream,
+ sk_sp<SkColorSpace> colorSpace, Origin origin)
+ : fEncodedInfo(info)
+ , fSrcInfo(info.makeImageInfo(width, height, std::move(colorSpace)))
+ , fStream(stream)
+ , fNeedsRewind(false)
+ , fOrigin(origin)
+ , fDstInfo()
+ , fOptions()
+ , fCurrScanline(-1)
+{}
+
+SkCodec::SkCodec(const SkEncodedInfo& info, const SkImageInfo& imageInfo, SkStream* stream,
+ Origin origin)
+ : fEncodedInfo(info)
+ , fSrcInfo(imageInfo)
+ , fStream(stream)
+ , fNeedsRewind(false)
+ , fOrigin(origin)
+ , fDstInfo()
+ , fOptions()
+ , fCurrScanline(-1)
+{}
+
+SkCodec::~SkCodec() {}
+
+bool SkCodec::rewindIfNeeded() {
+ if (!fStream) {
+ // Some codecs do not have a stream. They may hold onto their own data or another codec.
+ // They must handle rewinding themselves.
+ return true;
+ }
+
+ // Store the value of fNeedsRewind so we can update it. Next read will
+ // require a rewind.
+ const bool needsRewind = fNeedsRewind;
+ fNeedsRewind = true;
+ if (!needsRewind) {
+ return true;
+ }
+
+ // startScanlineDecode will need to be called before decoding scanlines.
+ fCurrScanline = -1;
+ // startIncrementalDecode will need to be called before incrementalDecode.
+ fStartedIncrementalDecode = false;
+
+ if (!fStream->rewind()) {
+ return false;
+ }
+
+ return this->onRewind();
+}
+
+#define CHECK_COLOR_TABLE \
+ if (kIndex_8_SkColorType == info.colorType()) { \
+ if (nullptr == ctable || nullptr == ctableCount) { \
+ return SkCodec::kInvalidParameters; \
+ } \
+ } else { \
+ if (ctableCount) { \
+ *ctableCount = 0; \
+ } \
+ ctableCount = nullptr; \
+ ctable = nullptr; \
+ }
+
+
+SkCodec::Result SkCodec::getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const Options* options, SkPMColor ctable[], int* ctableCount) {
+ if (kUnknown_SkColorType == info.colorType()) {
+ return kInvalidConversion;
+ }
+ if (nullptr == pixels) {
+ return kInvalidParameters;
+ }
+ if (rowBytes < info.minRowBytes()) {
+ return kInvalidParameters;
+ }
+
+ CHECK_COLOR_TABLE;
+
+ if (!this->rewindIfNeeded()) {
+ return kCouldNotRewind;
+ }
+
+ // Default options.
+ Options optsStorage;
+ if (nullptr == options) {
+ options = &optsStorage;
+ } else if (options->fSubset) {
+ SkIRect subset(*options->fSubset);
+ if (!this->onGetValidSubset(&subset) || subset != *options->fSubset) {
+ // FIXME: How to differentiate between not supporting subset at all
+ // and not supporting this particular subset?
+ return kUnimplemented;
+ }
+ }
+
+ // FIXME: Support subsets somehow? Note that this works for SkWebpCodec
+ // because it supports arbitrary scaling/subset combinations.
+ if (!this->dimensionsSupported(info.dimensions())) {
+ return kInvalidScale;
+ }
+
+ fDstInfo = info;
+ // FIXME: fOptions should be updated to options here, since fillIncompleteImage (called below
+ // in this method) accesses it. Without updating, it uses the old value.
+ //fOptions = *options;
+
+ // On an incomplete decode, the subclass will specify the number of scanlines that it decoded
+ // successfully.
+ int rowsDecoded = 0;
+ const Result result = this->onGetPixels(info, pixels, rowBytes, *options, ctable, ctableCount,
+ &rowsDecoded);
+
+ if ((kIncompleteInput == result || kSuccess == result) && ctableCount) {
+ SkASSERT(*ctableCount >= 0 && *ctableCount <= 256);
+ }
+
+ // A return value of kIncompleteInput indicates a truncated image stream.
+ // In this case, we will fill any uninitialized memory with a default value.
+ // Some subclasses will take care of filling any uninitialized memory on
+ // their own. They indicate that all of the memory has been filled by
+ // setting rowsDecoded equal to the height.
+ if (kIncompleteInput == result && rowsDecoded != info.height()) {
+ this->fillIncompleteImage(info, pixels, rowBytes, options->fZeroInitialized, info.height(),
+ rowsDecoded);
+ }
+
+ return result;
+}
+
+SkCodec::Result SkCodec::getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes) {
+ return this->getPixels(info, pixels, rowBytes, nullptr, nullptr, nullptr);
+}
+
+SkCodec::Result SkCodec::startIncrementalDecode(const SkImageInfo& info, void* pixels,
+ size_t rowBytes, const SkCodec::Options* options, SkPMColor* ctable, int* ctableCount) {
+ fStartedIncrementalDecode = false;
+
+ if (kUnknown_SkColorType == info.colorType()) {
+ return kInvalidConversion;
+ }
+ if (nullptr == pixels) {
+ return kInvalidParameters;
+ }
+
+ // Ensure that valid color ptrs are passed in for kIndex8 color type
+ CHECK_COLOR_TABLE;
+
+ // FIXME: If the rows come after the rows of a previous incremental decode,
+ // we might be able to skip the rewind, but only the implementation knows
+ // that. (e.g. PNG will always need to rewind, since we called longjmp, but
+ // a bottom-up BMP could skip rewinding if the new rows are above the old
+ // rows.)
+ if (!this->rewindIfNeeded()) {
+ return kCouldNotRewind;
+ }
+
+ // Set options.
+ Options optsStorage;
+ if (nullptr == options) {
+ options = &optsStorage;
+ } else if (options->fSubset) {
+ SkIRect size = SkIRect::MakeSize(info.dimensions());
+ if (!size.contains(*options->fSubset)) {
+ return kInvalidParameters;
+ }
+
+ const int top = options->fSubset->top();
+ const int bottom = options->fSubset->bottom();
+ if (top < 0 || top >= info.height() || top >= bottom || bottom > info.height()) {
+ return kInvalidParameters;
+ }
+ }
+
+ if (!this->dimensionsSupported(info.dimensions())) {
+ return kInvalidScale;
+ }
+
+ fDstInfo = info;
+ fOptions = *options;
+
+ const Result result = this->onStartIncrementalDecode(info, pixels, rowBytes,
+ fOptions, ctable, ctableCount);
+ if (kSuccess == result) {
+ fStartedIncrementalDecode = true;
+ } else if (kUnimplemented == result) {
+ // FIXME: This is temporarily necessary, until we transition SkCodec
+ // implementations from scanline decoding to incremental decoding.
+ // SkAndroidCodec will first attempt to use incremental decoding, but
+ // will fall back to scanline decoding if incremental returns
+ // kUnimplemented. rewindIfNeeded(), above, set fNeedsRewind to true
+ // (after potentially rewinding), but we do not want the next call to
+ // startScanlineDecode() to do a rewind.
+ fNeedsRewind = false;
+ }
+ return result;
+}
+
+
+SkCodec::Result SkCodec::startScanlineDecode(const SkImageInfo& info,
+ const SkCodec::Options* options, SkPMColor ctable[], int* ctableCount) {
+ // Reset fCurrScanline in case of failure.
+ fCurrScanline = -1;
+ // Ensure that valid color ptrs are passed in for kIndex8 color type
+ CHECK_COLOR_TABLE;
+
+ if (!this->rewindIfNeeded()) {
+ return kCouldNotRewind;
+ }
+
+ // Set options.
+ Options optsStorage;
+ if (nullptr == options) {
+ options = &optsStorage;
+ } else if (options->fSubset) {
+ SkIRect size = SkIRect::MakeSize(info.dimensions());
+ if (!size.contains(*options->fSubset)) {
+ return kInvalidInput;
+ }
+
+ // We only support subsetting in the x-dimension for scanline decoder.
+ // Subsetting in the y-dimension can be accomplished using skipScanlines().
+ if (options->fSubset->top() != 0 || options->fSubset->height() != info.height()) {
+ return kInvalidInput;
+ }
+ }
+
+ // FIXME: Support subsets somehow?
+ if (!this->dimensionsSupported(info.dimensions())) {
+ return kInvalidScale;
+ }
+
+ const Result result = this->onStartScanlineDecode(info, *options, ctable, ctableCount);
+ if (result != SkCodec::kSuccess) {
+ return result;
+ }
+
+ fCurrScanline = 0;
+ fDstInfo = info;
+ fOptions = *options;
+ return kSuccess;
+}
+
+#undef CHECK_COLOR_TABLE
+
+SkCodec::Result SkCodec::startScanlineDecode(const SkImageInfo& info) {
+ return this->startScanlineDecode(info, nullptr, nullptr, nullptr);
+}
+
+int SkCodec::getScanlines(void* dst, int countLines, size_t rowBytes) {
+ if (fCurrScanline < 0) {
+ return 0;
+ }
+
+ SkASSERT(!fDstInfo.isEmpty());
+ if (countLines <= 0 || fCurrScanline + countLines > fDstInfo.height()) {
+ return 0;
+ }
+
+ const int linesDecoded = this->onGetScanlines(dst, countLines, rowBytes);
+ if (linesDecoded < countLines) {
+ this->fillIncompleteImage(this->dstInfo(), dst, rowBytes, this->options().fZeroInitialized,
+ countLines, linesDecoded);
+ }
+ fCurrScanline += countLines;
+ return linesDecoded;
+}
+
+bool SkCodec::skipScanlines(int countLines) {
+ if (fCurrScanline < 0) {
+ return false;
+ }
+
+ SkASSERT(!fDstInfo.isEmpty());
+ if (countLines < 0 || fCurrScanline + countLines > fDstInfo.height()) {
+ // Arguably, we could just skip the scanlines which are remaining,
+ // and return true. We choose to return false so the client
+ // can catch their bug.
+ return false;
+ }
+
+ bool result = this->onSkipScanlines(countLines);
+ fCurrScanline += countLines;
+ return result;
+}
+
+int SkCodec::outputScanline(int inputScanline) const {
+ SkASSERT(0 <= inputScanline && inputScanline < this->getInfo().height());
+ return this->onOutputScanline(inputScanline);
+}
+
+int SkCodec::onOutputScanline(int inputScanline) const {
+ switch (this->getScanlineOrder()) {
+ case kTopDown_SkScanlineOrder:
+ return inputScanline;
+ case kBottomUp_SkScanlineOrder:
+ return this->getInfo().height() - inputScanline - 1;
+ default:
+ // This case indicates an interlaced gif and is implemented by SkGifCodec.
+ SkASSERT(false);
+ return 0;
+ }
+}
+
+uint64_t SkCodec::onGetFillValue(const SkImageInfo& dstInfo) const {
+ switch (dstInfo.colorType()) {
+ case kRGBA_F16_SkColorType: {
+ static constexpr uint64_t transparentColor = 0;
+ static constexpr uint64_t opaqueColor = ((uint64_t) SK_Half1) << 48;
+ return (kOpaque_SkAlphaType == fSrcInfo.alphaType()) ? opaqueColor : transparentColor;
+ }
+ default: {
+ // This not only handles the kN32 case, but also k565, kGray8, kIndex8, since
+ // the low bits are zeros.
+ return (kOpaque_SkAlphaType == fSrcInfo.alphaType()) ?
+ SK_ColorBLACK : SK_ColorTRANSPARENT;
+ }
+ }
+}
+
+static void fill_proc(const SkImageInfo& info, void* dst, size_t rowBytes,
+ uint64_t colorOrIndex, SkCodec::ZeroInitialized zeroInit, SkSampler* sampler) {
+ if (sampler) {
+ sampler->fill(info, dst, rowBytes, colorOrIndex, zeroInit);
+ } else {
+ SkSampler::Fill(info, dst, rowBytes, colorOrIndex, zeroInit);
+ }
+}
+
+void SkCodec::fillIncompleteImage(const SkImageInfo& info, void* dst, size_t rowBytes,
+ ZeroInitialized zeroInit, int linesRequested, int linesDecoded) {
+
+ void* fillDst;
+ const uint64_t fillValue = this->getFillValue(info);
+ const int linesRemaining = linesRequested - linesDecoded;
+ SkSampler* sampler = this->getSampler(false);
+
+ int fillWidth = info.width();
+ if (fOptions.fSubset) {
+ fillWidth = fOptions.fSubset->width();
+ }
+
+ switch (this->getScanlineOrder()) {
+ case kTopDown_SkScanlineOrder: {
+ const SkImageInfo fillInfo = info.makeWH(fillWidth, linesRemaining);
+ fillDst = SkTAddOffset<void>(dst, linesDecoded * rowBytes);
+ fill_proc(fillInfo, fillDst, rowBytes, fillValue, zeroInit, sampler);
+ break;
+ }
+ case kBottomUp_SkScanlineOrder: {
+ fillDst = dst;
+ const SkImageInfo fillInfo = info.makeWH(fillWidth, linesRemaining);
+ fill_proc(fillInfo, fillDst, rowBytes, fillValue, zeroInit, sampler);
+ break;
+ }
+ case kOutOfOrder_SkScanlineOrder: {
+ SkASSERT(1 == linesRequested || this->getInfo().height() == linesRequested);
+ const SkImageInfo fillInfo = info.makeWH(fillWidth, 1);
+ for (int srcY = linesDecoded; srcY < linesRequested; srcY++) {
+ fillDst = SkTAddOffset<void>(dst, this->outputScanline(srcY) * rowBytes);
+ fill_proc(fillInfo, fillDst, rowBytes, fillValue, zeroInit, sampler);
+ }
+ break;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/codec/SkCodecImageGenerator.cpp b/gfx/skia/skia/src/codec/SkCodecImageGenerator.cpp
new file mode 100644
index 000000000..8108f0de4
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkCodecImageGenerator.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCodecImageGenerator.h"
+
+SkImageGenerator* SkCodecImageGenerator::NewFromEncodedCodec(sk_sp<SkData> data) {
+ SkCodec* codec = SkCodec::NewFromData(data);
+ if (nullptr == codec) {
+ return nullptr;
+ }
+
+ return new SkCodecImageGenerator(codec, data);
+}
+
+static SkImageInfo make_premul(const SkImageInfo& info) {
+ if (kUnpremul_SkAlphaType == info.alphaType()) {
+ return info.makeAlphaType(kPremul_SkAlphaType);
+ }
+
+ return info;
+}
+
+SkCodecImageGenerator::SkCodecImageGenerator(SkCodec* codec, sk_sp<SkData> data)
+ : INHERITED(make_premul(codec->getInfo()))
+ , fCodec(codec)
+ , fData(std::move(data))
+{}
+
+SkData* SkCodecImageGenerator::onRefEncodedData(SK_REFENCODEDDATA_CTXPARAM) {
+ return SkRef(fData.get());
+}
+
+bool SkCodecImageGenerator::onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ SkPMColor ctable[], int* ctableCount) {
+
+ // FIXME (msarett):
+ // We don't give the client the chance to request an SkColorSpace. Until we improve
+ // the API, let's assume that they want legacy mode.
+ SkImageInfo decodeInfo = info.makeColorSpace(nullptr);
+
+ SkCodec::Result result = fCodec->getPixels(decodeInfo, pixels, rowBytes, nullptr, ctable,
+ ctableCount);
+ switch (result) {
+ case SkCodec::kSuccess:
+ case SkCodec::kIncompleteInput:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SkCodecImageGenerator::onQueryYUV8(SkYUVSizeInfo* sizeInfo, SkYUVColorSpace* colorSpace) const
+{
+ return fCodec->queryYUV8(sizeInfo, colorSpace);
+}
+
+bool SkCodecImageGenerator::onGetYUV8Planes(const SkYUVSizeInfo& sizeInfo, void* planes[3]) {
+ SkCodec::Result result = fCodec->getYUV8Planes(sizeInfo, planes);
+
+ switch (result) {
+ case SkCodec::kSuccess:
+ case SkCodec::kIncompleteInput:
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/gfx/skia/skia/src/codec/SkCodecImageGenerator.h b/gfx/skia/skia/src/codec/SkCodecImageGenerator.h
new file mode 100644
index 000000000..22a39aaaa
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkCodecImageGenerator.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCodec.h"
+#include "SkData.h"
+#include "SkImageGenerator.h"
+
+class SkCodecImageGenerator : public SkImageGenerator {
+public:
+ /*
+ * If this data represents an encoded image that we know how to decode,
+ * return an SkCodecImageGenerator. Otherwise return nullptr.
+ */
+ static SkImageGenerator* NewFromEncodedCodec(sk_sp<SkData>);
+ static SkImageGenerator* NewFromEncodedCodec(SkData* data) {
+ return NewFromEncodedCodec(sk_ref_sp(data));
+ }
+
+protected:
+ SkData* onRefEncodedData(SK_REFENCODEDDATA_CTXPARAM) override;
+
+ bool onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes, SkPMColor ctable[],
+ int* ctableCount) override;
+
+ bool onQueryYUV8(SkYUVSizeInfo*, SkYUVColorSpace*) const override;
+
+ bool onGetYUV8Planes(const SkYUVSizeInfo&, void* planes[3]) override;
+
+private:
+ /*
+ * Takes ownership of codec
+ */
+ SkCodecImageGenerator(SkCodec* codec, sk_sp<SkData>);
+
+ SkAutoTDelete<SkCodec> fCodec;
+ sk_sp<SkData> fData;
+
+ typedef SkImageGenerator INHERITED;
+};
diff --git a/gfx/skia/skia/src/codec/SkCodecPriv.h b/gfx/skia/skia/src/codec/SkCodecPriv.h
new file mode 100644
index 000000000..b93def879
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkCodecPriv.h
@@ -0,0 +1,402 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCodecPriv_DEFINED
+#define SkCodecPriv_DEFINED
+
+#include "SkColorPriv.h"
+#include "SkColorSpaceXform.h"
+#include "SkColorTable.h"
+#include "SkImageInfo.h"
+#include "SkTypes.h"
+
+#ifdef SK_PRINT_CODEC_MESSAGES
+ #define SkCodecPrintf SkDebugf
+#else
+ #define SkCodecPrintf(...)
+#endif
+
+// FIXME: Consider sharing with dm, nanbench, and tools.
+static inline float get_scale_from_sample_size(int sampleSize) {
+ return 1.0f / ((float) sampleSize);
+}
+
+static inline bool is_valid_subset(const SkIRect& subset, const SkISize& imageDims) {
+ return SkIRect::MakeSize(imageDims).contains(subset);
+}
+
+/*
+ * returns a scaled dimension based on the original dimension and the sampleSize
+ * NOTE: we round down here for scaled dimension to match the behavior of SkImageDecoder
+ * FIXME: I think we should call this get_sampled_dimension().
+ */
+static inline int get_scaled_dimension(int srcDimension, int sampleSize) {
+ if (sampleSize > srcDimension) {
+ return 1;
+ }
+ return srcDimension / sampleSize;
+}
+
+/*
+ * Returns the first coordinate that we will keep during a scaled decode.
+ * The output can be interpreted as an x-coordinate or a y-coordinate.
+ *
+ * This does not need to be called and is not called when sampleFactor == 1.
+ */
+static inline int get_start_coord(int sampleFactor) { return sampleFactor / 2; };
+
+/*
+ * Given a coordinate in the original image, this returns the corresponding
+ * coordinate in the scaled image. This function is meaningless if
+ * IsCoordNecessary returns false.
+ * The output can be interpreted as an x-coordinate or a y-coordinate.
+ *
+ * This does not need to be called and is not called when sampleFactor == 1.
+ */
+static inline int get_dst_coord(int srcCoord, int sampleFactor) { return srcCoord / sampleFactor; };
+
+/*
+ * When scaling, we will discard certain y-coordinates (rows) and
+ * x-coordinates (columns). This function returns true if we should keep the
+ * coordinate and false otherwise.
+ * The inputs may be x-coordinates or y-coordinates.
+ *
+ * This does not need to be called and is not called when sampleFactor == 1.
+ */
+static inline bool is_coord_necessary(int srcCoord, int sampleFactor, int scaledDim) {
+ // Get the first coordinate that we want to keep
+ int startCoord = get_start_coord(sampleFactor);
+
+ // Return false on edge cases
+ if (srcCoord < startCoord || get_dst_coord(srcCoord, sampleFactor) >= scaledDim) {
+ return false;
+ }
+
+ // Every sampleFactor rows are necessary
+ return ((srcCoord - startCoord) % sampleFactor) == 0;
+}
+
+static inline bool valid_alpha(SkAlphaType dstAlpha, SkAlphaType srcAlpha) {
+ if (kUnknown_SkAlphaType == dstAlpha) {
+ return false;
+ }
+
+ if (srcAlpha != dstAlpha) {
+ if (kOpaque_SkAlphaType == srcAlpha) {
+ // If the source is opaque, we can support any.
+ SkCodecPrintf("Warning: an opaque image should be decoded as opaque "
+ "- it is being decoded as non-opaque, which will draw slower\n");
+ return true;
+ }
+
+ // The source is not opaque
+ switch (dstAlpha) {
+ case kPremul_SkAlphaType:
+ case kUnpremul_SkAlphaType:
+ // The source is not opaque, so either of these is okay
+ break;
+ default:
+ // We cannot decode a non-opaque image to opaque (or unknown)
+ return false;
+ }
+ }
+ return true;
+}
+
+/*
+ * Original version of conversion_possible that does not account for color spaces.
+ * Used by codecs that have not been updated to support color spaces.
+ *
+ * Most of our codecs support the same conversions:
+ * - opaque to any alpha type
+ * - 565 only if opaque
+ * - premul to unpremul and vice versa
+ * - always support RGBA, BGRA
+ * - otherwise match the src color type
+ */
+static inline bool conversion_possible_ignore_color_space(const SkImageInfo& dst,
+ const SkImageInfo& src) {
+ // Ensure the alpha type is valid
+ if (!valid_alpha(dst.alphaType(), src.alphaType())) {
+ return false;
+ }
+
+ // Check for supported color types
+ switch (dst.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ return true;
+ case kRGB_565_SkColorType:
+ return kOpaque_SkAlphaType == src.alphaType();
+ default:
+ return dst.colorType() == src.colorType();
+ }
+}
+
+/*
+ * If there is a color table, get a pointer to the colors, otherwise return nullptr
+ */
+static inline const SkPMColor* get_color_ptr(SkColorTable* colorTable) {
+ return nullptr != colorTable ? colorTable->readColors() : nullptr;
+}
+
+static inline SkColorSpaceXform::ColorFormat select_xform_format(SkColorType colorType) {
+ switch (colorType) {
+ case kRGBA_8888_SkColorType:
+ return SkColorSpaceXform::kRGBA_8888_ColorFormat;
+ case kBGRA_8888_SkColorType:
+ return SkColorSpaceXform::kBGRA_8888_ColorFormat;
+ case kRGBA_F16_SkColorType:
+ return SkColorSpaceXform::kRGBA_F16_ColorFormat;
+ default:
+ SkASSERT(false);
+ return SkColorSpaceXform::kRGBA_8888_ColorFormat;
+ }
+}
+
+/*
+ * Given that the encoded image uses a color table, return the fill value
+ */
+static inline uint64_t get_color_table_fill_value(SkColorType dstColorType, SkAlphaType alphaType,
+ const SkPMColor* colorPtr, uint8_t fillIndex, SkColorSpaceXform* colorXform) {
+ SkASSERT(nullptr != colorPtr);
+ switch (dstColorType) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ return colorPtr[fillIndex];
+ case kRGB_565_SkColorType:
+ return SkPixel32ToPixel16(colorPtr[fillIndex]);
+ case kIndex_8_SkColorType:
+ return fillIndex;
+ case kRGBA_F16_SkColorType: {
+ SkASSERT(colorXform);
+ uint64_t dstColor;
+ uint32_t srcColor = colorPtr[fillIndex];
+ colorXform->apply(&dstColor, &srcColor, 1, select_xform_format(dstColorType),
+ SkColorSpaceXform::kRGBA_8888_ColorFormat, alphaType);
+ return dstColor;
+ }
+ default:
+ SkASSERT(false);
+ return 0;
+ }
+}
+
+/*
+ *
+ * Copy the codec color table back to the client when kIndex8 color type is requested
+ */
+static inline void copy_color_table(const SkImageInfo& dstInfo, SkColorTable* colorTable,
+ SkPMColor* inputColorPtr, int* inputColorCount) {
+ if (kIndex_8_SkColorType == dstInfo.colorType()) {
+ SkASSERT(nullptr != inputColorPtr);
+ SkASSERT(nullptr != inputColorCount);
+ SkASSERT(nullptr != colorTable);
+ memcpy(inputColorPtr, colorTable->readColors(), *inputColorCount * sizeof(SkPMColor));
+ }
+}
+
+/*
+ * Compute row bytes for an image using pixels per byte
+ */
+static inline size_t compute_row_bytes_ppb(int width, uint32_t pixelsPerByte) {
+ return (width + pixelsPerByte - 1) / pixelsPerByte;
+}
+
+/*
+ * Compute row bytes for an image using bytes per pixel
+ */
+static inline size_t compute_row_bytes_bpp(int width, uint32_t bytesPerPixel) {
+ return width * bytesPerPixel;
+}
+
+/*
+ * Compute row bytes for an image
+ */
+static inline size_t compute_row_bytes(int width, uint32_t bitsPerPixel) {
+ if (bitsPerPixel < 16) {
+ SkASSERT(0 == 8 % bitsPerPixel);
+ const uint32_t pixelsPerByte = 8 / bitsPerPixel;
+ return compute_row_bytes_ppb(width, pixelsPerByte);
+ } else {
+ SkASSERT(0 == bitsPerPixel % 8);
+ const uint32_t bytesPerPixel = bitsPerPixel / 8;
+ return compute_row_bytes_bpp(width, bytesPerPixel);
+ }
+}
+
+/*
+ * Get a byte from a buffer
+ * This method is unsafe, the caller is responsible for performing a check
+ */
+static inline uint8_t get_byte(uint8_t* buffer, uint32_t i) {
+ return buffer[i];
+}
+
+/*
+ * Get a short from a buffer
+ * This method is unsafe, the caller is responsible for performing a check
+ */
+static inline uint16_t get_short(uint8_t* buffer, uint32_t i) {
+ uint16_t result;
+ memcpy(&result, &(buffer[i]), 2);
+#ifdef SK_CPU_BENDIAN
+ return SkEndianSwap16(result);
+#else
+ return result;
+#endif
+}
+
+/*
+ * Get an int from a buffer
+ * This method is unsafe, the caller is responsible for performing a check
+ */
+static inline uint32_t get_int(uint8_t* buffer, uint32_t i) {
+ uint32_t result;
+ memcpy(&result, &(buffer[i]), 4);
+#ifdef SK_CPU_BENDIAN
+ return SkEndianSwap32(result);
+#else
+ return result;
+#endif
+}
+
+/*
+ * @param data Buffer to read bytes from
+ * @param isLittleEndian Output parameter
+ * Indicates if the data is little endian
+ * Is unaffected on false returns
+ */
+static inline bool is_valid_endian_marker(const uint8_t* data, bool* isLittleEndian) {
+ // II indicates Intel (little endian) and MM indicates motorola (big endian).
+ if (('I' != data[0] || 'I' != data[1]) && ('M' != data[0] || 'M' != data[1])) {
+ return false;
+ }
+
+ *isLittleEndian = ('I' == data[0]);
+ return true;
+}
+
+static inline uint16_t get_endian_short(const uint8_t* data, bool littleEndian) {
+ if (littleEndian) {
+ return (data[1] << 8) | (data[0]);
+ }
+
+ return (data[0] << 8) | (data[1]);
+}
+
+static inline SkPMColor premultiply_argb_as_rgba(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ if (a != 255) {
+ r = SkMulDiv255Round(r, a);
+ g = SkMulDiv255Round(g, a);
+ b = SkMulDiv255Round(b, a);
+ }
+
+ return SkPackARGB_as_RGBA(a, r, g, b);
+}
+
+static inline SkPMColor premultiply_argb_as_bgra(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ if (a != 255) {
+ r = SkMulDiv255Round(r, a);
+ g = SkMulDiv255Round(g, a);
+ b = SkMulDiv255Round(b, a);
+ }
+
+ return SkPackARGB_as_BGRA(a, r, g, b);
+}
+
+static inline bool is_rgba(SkColorType colorType) {
+#ifdef SK_PMCOLOR_IS_RGBA
+ return (kBGRA_8888_SkColorType != colorType);
+#else
+ return (kRGBA_8888_SkColorType == colorType);
+#endif
+}
+
+// Method for coverting to a 32 bit pixel.
+typedef uint32_t (*PackColorProc)(U8CPU a, U8CPU r, U8CPU g, U8CPU b);
+
+static inline PackColorProc choose_pack_color_proc(bool isPremul, SkColorType colorType) {
+ bool isRGBA = is_rgba(colorType);
+ if (isPremul) {
+ if (isRGBA) {
+ return &premultiply_argb_as_rgba;
+ } else {
+ return &premultiply_argb_as_bgra;
+ }
+ } else {
+ if (isRGBA) {
+ return &SkPackARGB_as_RGBA;
+ } else {
+ return &SkPackARGB_as_BGRA;
+ }
+ }
+}
+
+static inline bool needs_premul(const SkImageInfo& dstInfo, const SkImageInfo& srcInfo) {
+ return kPremul_SkAlphaType == dstInfo.alphaType() &&
+ kUnpremul_SkAlphaType == srcInfo.alphaType();
+}
+
+static inline bool needs_color_xform(const SkImageInfo& dstInfo, const SkImageInfo& srcInfo) {
+ // Color xform is necessary in order to correctly perform premultiply in linear space.
+ bool needsPremul = needs_premul(dstInfo, srcInfo);
+
+ // F16 is by definition a linear space, so we always must perform a color xform.
+ bool isF16 = kRGBA_F16_SkColorType == dstInfo.colorType();
+
+ // Need a color xform when dst space does not match the src.
+ bool srcDstNotEqual = !SkColorSpace::Equals(srcInfo.colorSpace(), dstInfo.colorSpace());
+
+ // We never perform a color xform in legacy mode.
+ bool isLegacy = nullptr == dstInfo.colorSpace();
+
+ return !isLegacy && (needsPremul || isF16 || srcDstNotEqual);
+}
+
+static inline SkAlphaType select_xform_alpha(SkAlphaType dstAlphaType, SkAlphaType srcAlphaType) {
+ return (kOpaque_SkAlphaType == srcAlphaType) ? kOpaque_SkAlphaType : dstAlphaType;
+}
+
+/*
+ * Alpha Type Conversions
+ * - kOpaque to kOpaque, kUnpremul, kPremul is valid
+ * - kUnpremul to kUnpremul, kPremul is valid
+ *
+ * Color Type Conversions
+ * - Always support kRGBA_8888, kBGRA_8888
+ * - Support kRGBA_F16 when there is a linear dst color space
+ * - Support kIndex8 if it matches the src
+ * - Support k565 if kOpaque and color correction is not required
+ * - Support k565 if it matches the src, kOpaque, and color correction is not required
+ */
+static inline bool conversion_possible(const SkImageInfo& dst, const SkImageInfo& src) {
+ // Ensure the alpha type is valid.
+ if (!valid_alpha(dst.alphaType(), src.alphaType())) {
+ return false;
+ }
+
+ // Check for supported color types.
+ switch (dst.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ return true;
+ case kRGBA_F16_SkColorType:
+ return dst.colorSpace() && dst.colorSpace()->gammaIsLinear();
+ case kIndex_8_SkColorType:
+ return kIndex_8_SkColorType == src.colorType();
+ case kRGB_565_SkColorType:
+ return kOpaque_SkAlphaType == src.alphaType() && !needs_color_xform(dst, src);
+ case kGray_8_SkColorType:
+ return kGray_8_SkColorType == src.colorType() &&
+ kOpaque_SkAlphaType == src.alphaType() && !needs_color_xform(dst, src);
+ default:
+ return false;
+ }
+}
+
+#endif // SkCodecPriv_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkGifCodec.cpp b/gfx/skia/skia/src/codec/SkGifCodec.cpp
new file mode 100644
index 000000000..c35cd24ae
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkGifCodec.cpp
@@ -0,0 +1,607 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCodecPriv.h"
+#include "SkColorPriv.h"
+#include "SkColorTable.h"
+#include "SkGifCodec.h"
+#include "SkStream.h"
+#include "SkSwizzler.h"
+#include "SkUtils.h"
+
+#include "gif_lib.h"
+
+/*
+ * Checks the start of the stream to see if the image is a gif
+ */
+bool SkGifCodec::IsGif(const void* buf, size_t bytesRead) {
+ if (bytesRead >= GIF_STAMP_LEN) {
+ if (memcmp(GIF_STAMP, buf, GIF_STAMP_LEN) == 0 ||
+ memcmp(GIF87_STAMP, buf, GIF_STAMP_LEN) == 0 ||
+ memcmp(GIF89_STAMP, buf, GIF_STAMP_LEN) == 0)
+ {
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ * Error function
+ */
+static SkCodec::Result gif_error(const char* msg, SkCodec::Result result = SkCodec::kInvalidInput) {
+ SkCodecPrintf("Gif Error: %s\n", msg);
+ return result;
+}
+
+
+/*
+ * Read function that will be passed to gif_lib
+ */
+static int32_t read_bytes_callback(GifFileType* fileType, GifByteType* out, int32_t size) {
+ SkStream* stream = (SkStream*) fileType->UserData;
+ return (int32_t) stream->read(out, size);
+}
+
+/*
+ * Open the gif file
+ */
+static GifFileType* open_gif(SkStream* stream) {
+#if GIFLIB_MAJOR < 5
+ return DGifOpen(stream, read_bytes_callback);
+#else
+ return DGifOpen(stream, read_bytes_callback, nullptr);
+#endif
+}
+
+/*
+ * Check if a there is an index of the color table for a transparent pixel
+ */
+static uint32_t find_trans_index(const SavedImage& image) {
+ // If there is a transparent index specified, it will be contained in an
+ // extension block. We will loop through extension blocks in reverse order
+ // to check the most recent extension blocks first.
+ for (int32_t i = image.ExtensionBlockCount - 1; i >= 0; i--) {
+ // Get an extension block
+ const ExtensionBlock& extBlock = image.ExtensionBlocks[i];
+
+ // Specifically, we need to check for a graphics control extension,
+ // which may contain transparency information. Also, note that a valid
+ // graphics control extension is always four bytes. The fourth byte
+ // is the transparent index (if it exists), so we need at least four
+ // bytes.
+ if (GRAPHICS_EXT_FUNC_CODE == extBlock.Function && extBlock.ByteCount >= 4) {
+ // Check the transparent color flag which indicates whether a
+ // transparent index exists. It is the least significant bit of
+ // the first byte of the extension block.
+ if (1 == (extBlock.Bytes[0] & 1)) {
+ // Use uint32_t to prevent sign extending
+ return extBlock.Bytes[3];
+ }
+
+ // There should only be one graphics control extension for the image frame
+ break;
+ }
+ }
+
+ // Use maximum unsigned int (surely an invalid index) to indicate that a valid
+ // index was not found.
+ return SK_MaxU32;
+}
+
+inline uint32_t ceil_div(uint32_t a, uint32_t b) {
+ return (a + b - 1) / b;
+}
+
+/*
+ * Gets the output row corresponding to the encoded row for interlaced gifs
+ */
+inline uint32_t get_output_row_interlaced(uint32_t encodedRow, uint32_t height) {
+ SkASSERT(encodedRow < height);
+ // First pass
+ if (encodedRow * 8 < height) {
+ return encodedRow * 8;
+ }
+ // Second pass
+ if (encodedRow * 4 < height) {
+ return 4 + 8 * (encodedRow - ceil_div(height, 8));
+ }
+ // Third pass
+ if (encodedRow * 2 < height) {
+ return 2 + 4 * (encodedRow - ceil_div(height, 4));
+ }
+ // Fourth pass
+ return 1 + 2 * (encodedRow - ceil_div(height, 2));
+}
+
+/*
+ * This function cleans up the gif object after the decode completes
+ * It is used in a SkAutoTCallIProc template
+ */
+void SkGifCodec::CloseGif(GifFileType* gif) {
+#if GIFLIB_MAJOR < 5 || (GIFLIB_MAJOR == 5 && GIFLIB_MINOR == 0)
+ DGifCloseFile(gif);
+#else
+ DGifCloseFile(gif, nullptr);
+#endif
+}
+
+/*
+ * This function free extension data that has been saved to assist the image
+ * decoder
+ */
+void SkGifCodec::FreeExtension(SavedImage* image) {
+ if (NULL != image->ExtensionBlocks) {
+#if GIFLIB_MAJOR < 5
+ FreeExtension(image);
+#else
+ GifFreeExtensions(&image->ExtensionBlockCount, &image->ExtensionBlocks);
+#endif
+ }
+}
+
+/*
+ * Read enough of the stream to initialize the SkGifCodec.
+ * Returns a bool representing success or failure.
+ *
+ * @param codecOut
+ * If it returned true, and codecOut was not nullptr,
+ * codecOut will be set to a new SkGifCodec.
+ *
+ * @param gifOut
+ * If it returned true, and codecOut was nullptr,
+ * gifOut must be non-nullptr and gifOut will be set to a new
+ * GifFileType pointer.
+ *
+ * @param stream
+ * Deleted on failure.
+ * codecOut will take ownership of it in the case where we created a codec.
+ * Ownership is unchanged when we returned a gifOut.
+ *
+ */
+bool SkGifCodec::ReadHeader(SkStream* stream, SkCodec** codecOut, GifFileType** gifOut) {
+ SkAutoTDelete<SkStream> streamDeleter(stream);
+
+ // Read gif header, logical screen descriptor, and global color table
+ SkAutoTCallVProc<GifFileType, CloseGif> gif(open_gif(stream));
+
+ if (nullptr == gif) {
+ gif_error("DGifOpen failed.\n");
+ return false;
+ }
+
+ // Read through gif extensions to get to the image data. Set the
+ // transparent index based on the extension data.
+ uint32_t transIndex;
+ SkCodec::Result result = ReadUpToFirstImage(gif, &transIndex);
+ if (kSuccess != result){
+ return false;
+ }
+
+ // Read the image descriptor
+ if (GIF_ERROR == DGifGetImageDesc(gif)) {
+ return false;
+ }
+ // If reading the image descriptor is successful, the image count will be
+ // incremented.
+ SkASSERT(gif->ImageCount >= 1);
+
+ if (nullptr != codecOut) {
+ SkISize size;
+ SkIRect frameRect;
+ if (!GetDimensions(gif, &size, &frameRect)) {
+ gif_error("Invalid gif size.\n");
+ return false;
+ }
+ bool frameIsSubset = (size != frameRect.size());
+
+ // Determine the encoded alpha type. The transIndex might be valid if it less
+ // than 256. We are not certain that the index is valid until we process the color
+ // table, since some gifs have color tables with less than 256 colors. If
+ // there might be a valid transparent index, we must indicate that the image has
+ // alpha.
+ // In the case where we must support alpha, we indicate kBinary, since every
+ // pixel will either be fully opaque or fully transparent.
+ SkEncodedInfo::Alpha alpha = (transIndex < 256) ? SkEncodedInfo::kBinary_Alpha :
+ SkEncodedInfo::kOpaque_Alpha;
+
+ // Return the codec
+ // Use kPalette since Gifs are encoded with a color table.
+ // Use 8-bits per component, since this is the output we get from giflib.
+ // FIXME: Gifs can actually be encoded with 4-bits per pixel. Can we support this?
+ SkEncodedInfo info = SkEncodedInfo::Make(SkEncodedInfo::kPalette_Color, alpha, 8);
+ *codecOut = new SkGifCodec(size.width(), size.height(), info, streamDeleter.release(),
+ gif.release(), transIndex, frameRect, frameIsSubset);
+ } else {
+ SkASSERT(nullptr != gifOut);
+ streamDeleter.release();
+ *gifOut = gif.release();
+ }
+ return true;
+}
+
+/*
+ * Assumes IsGif was called and returned true
+ * Creates a gif decoder
+ * Reads enough of the stream to determine the image format
+ */
+SkCodec* SkGifCodec::NewFromStream(SkStream* stream) {
+ SkCodec* codec = nullptr;
+ if (ReadHeader(stream, &codec, nullptr)) {
+ return codec;
+ }
+ return nullptr;
+}
+
+SkGifCodec::SkGifCodec(int width, int height, const SkEncodedInfo& info, SkStream* stream,
+ GifFileType* gif, uint32_t transIndex, const SkIRect& frameRect, bool frameIsSubset)
+ : INHERITED(width, height, info, stream)
+ , fGif(gif)
+ , fSrcBuffer(new uint8_t[this->getInfo().width()])
+ , fFrameRect(frameRect)
+ // If it is valid, fTransIndex will be used to set fFillIndex. We don't know if
+ // fTransIndex is valid until we process the color table, since fTransIndex may
+ // be greater than the size of the color table.
+ , fTransIndex(transIndex)
+ // Default fFillIndex is 0. We will overwrite this if fTransIndex is valid, or if
+ // there is a valid background color.
+ , fFillIndex(0)
+ , fFrameIsSubset(frameIsSubset)
+ , fSwizzler(NULL)
+ , fColorTable(NULL)
+{}
+
+bool SkGifCodec::onRewind() {
+ GifFileType* gifOut = nullptr;
+ if (!ReadHeader(this->stream(), nullptr, &gifOut)) {
+ return false;
+ }
+
+ SkASSERT(nullptr != gifOut);
+ fGif.reset(gifOut);
+ return true;
+}
+
+SkCodec::Result SkGifCodec::ReadUpToFirstImage(GifFileType* gif, uint32_t* transIndex) {
+ // Use this as a container to hold information about any gif extension
+ // blocks. This generally stores transparency and animation instructions.
+ SavedImage saveExt;
+ SkAutoTCallVProc<SavedImage, FreeExtension> autoFreeExt(&saveExt);
+ saveExt.ExtensionBlocks = nullptr;
+ saveExt.ExtensionBlockCount = 0;
+ GifByteType* extData;
+ int32_t extFunction;
+
+ // We will loop over components of gif images until we find an image. Once
+ // we find an image, we will decode and return it. While many gif files
+ // contain more than one image, we will simply decode the first image.
+ GifRecordType recordType;
+ do {
+ // Get the current record type
+ if (GIF_ERROR == DGifGetRecordType(gif, &recordType)) {
+ return gif_error("DGifGetRecordType failed.\n", kInvalidInput);
+ }
+ switch (recordType) {
+ case IMAGE_DESC_RECORD_TYPE: {
+ *transIndex = find_trans_index(saveExt);
+
+ // FIXME: Gif files may have multiple images stored in a single
+ // file. This is most commonly used to enable
+ // animations. Since we are leaving animated gifs as a
+ // TODO, we will return kSuccess after decoding the
+ // first image in the file. This is the same behavior
+ // as SkImageDecoder_libgif.
+ //
+ // Most times this works pretty well, but sometimes it
+ // doesn't. For example, I have an animated test image
+ // where the first image in the file is 1x1, but the
+ // subsequent images are meaningful. This currently
+ // displays the 1x1 image, which is not ideal. Right
+ // now I am leaving this as an issue that will be
+ // addressed when we implement animated gifs.
+ //
+ // It is also possible (not explicitly disallowed in the
+ // specification) that gif files provide multiple
+ // images in a single file that are all meant to be
+ // displayed in the same frame together. I will
+ // currently leave this unimplemented until I find a
+ // test case that expects this behavior.
+ return kSuccess;
+ }
+ // Extensions are used to specify special properties of the image
+ // such as transparency or animation.
+ case EXTENSION_RECORD_TYPE:
+ // Read extension data
+ if (GIF_ERROR == DGifGetExtension(gif, &extFunction, &extData)) {
+ return gif_error("Could not get extension.\n", kIncompleteInput);
+ }
+
+ // Create an extension block with our data
+ while (nullptr != extData) {
+ // Add a single block
+
+#if GIFLIB_MAJOR < 5
+ if (AddExtensionBlock(&saveExt, extData[0],
+ &extData[1]) == GIF_ERROR) {
+#else
+ if (GIF_ERROR == GifAddExtensionBlock(&saveExt.ExtensionBlockCount,
+ &saveExt.ExtensionBlocks,
+ extFunction, extData[0], &extData[1])) {
+#endif
+ return gif_error("Could not add extension block.\n", kIncompleteInput);
+ }
+ // Move to the next block
+ if (GIF_ERROR == DGifGetExtensionNext(gif, &extData)) {
+ return gif_error("Could not get next extension.\n", kIncompleteInput);
+ }
+ }
+ break;
+
+ // Signals the end of the gif file
+ case TERMINATE_RECORD_TYPE:
+ break;
+
+ default:
+ // DGifGetRecordType returns an error if the record type does
+ // not match one of the above cases. This should not be
+ // reached.
+ SkASSERT(false);
+ break;
+ }
+ } while (TERMINATE_RECORD_TYPE != recordType);
+
+ return gif_error("Could not find any images to decode in gif file.\n", kInvalidInput);
+}
+
+bool SkGifCodec::GetDimensions(GifFileType* gif, SkISize* size, SkIRect* frameRect) {
+ // Get the encoded dimension values
+ SavedImage* image = &gif->SavedImages[gif->ImageCount - 1];
+ const GifImageDesc& desc = image->ImageDesc;
+ int frameLeft = desc.Left;
+ int frameTop = desc.Top;
+ int frameWidth = desc.Width;
+ int frameHeight = desc.Height;
+ int width = gif->SWidth;
+ int height = gif->SHeight;
+
+ // Ensure that the decode dimensions are large enough to contain the frame
+ width = SkTMax(width, frameWidth + frameLeft);
+ height = SkTMax(height, frameHeight + frameTop);
+
+ // All of these dimensions should be positive, as they are encoded as unsigned 16-bit integers.
+ // It is unclear why giflib casts them to ints. We will go ahead and check that they are
+ // in fact positive.
+ if (frameLeft < 0 || frameTop < 0 || frameWidth < 0 || frameHeight < 0 || width <= 0 ||
+ height <= 0) {
+ return false;
+ }
+
+ frameRect->setXYWH(frameLeft, frameTop, frameWidth, frameHeight);
+ size->set(width, height);
+ return true;
+}
+
+void SkGifCodec::initializeColorTable(const SkImageInfo& dstInfo, SkPMColor* inputColorPtr,
+ int* inputColorCount) {
+ // Set up our own color table
+ const uint32_t maxColors = 256;
+ SkPMColor colorPtr[256];
+ if (NULL != inputColorCount) {
+ // We set the number of colors to maxColors in order to ensure
+ // safe memory accesses. Otherwise, an invalid pixel could
+ // access memory outside of our color table array.
+ *inputColorCount = maxColors;
+ }
+
+ // Get local color table
+ ColorMapObject* colorMap = fGif->Image.ColorMap;
+ // If there is no local color table, use the global color table
+ if (NULL == colorMap) {
+ colorMap = fGif->SColorMap;
+ }
+
+ uint32_t colorCount = 0;
+ if (NULL != colorMap) {
+ colorCount = colorMap->ColorCount;
+ // giflib guarantees these properties
+ SkASSERT(colorCount == (unsigned) (1 << (colorMap->BitsPerPixel)));
+ SkASSERT(colorCount <= 256);
+ PackColorProc proc = choose_pack_color_proc(false, dstInfo.colorType());
+ for (uint32_t i = 0; i < colorCount; i++) {
+ colorPtr[i] = proc(0xFF, colorMap->Colors[i].Red,
+ colorMap->Colors[i].Green, colorMap->Colors[i].Blue);
+ }
+ }
+
+ // Fill in the color table for indices greater than color count.
+ // This allows for predictable, safe behavior.
+ if (colorCount > 0) {
+ // Gifs have the option to specify the color at a single index of the color
+ // table as transparent. If the transparent index is greater than the
+ // colorCount, we know that there is no valid transparent color in the color
+ // table. If there is not valid transparent index, we will try to use the
+ // backgroundIndex as the fill index. If the backgroundIndex is also not
+ // valid, we will let fFillIndex default to 0 (it is set to zero in the
+ // constructor). This behavior is not specified but matches
+ // SkImageDecoder_libgif.
+ uint32_t backgroundIndex = fGif->SBackGroundColor;
+ if (fTransIndex < colorCount) {
+ colorPtr[fTransIndex] = SK_ColorTRANSPARENT;
+ fFillIndex = fTransIndex;
+ } else if (backgroundIndex < colorCount) {
+ fFillIndex = backgroundIndex;
+ }
+
+ for (uint32_t i = colorCount; i < maxColors; i++) {
+ colorPtr[i] = colorPtr[fFillIndex];
+ }
+ } else {
+ sk_memset32(colorPtr, 0xFF000000, maxColors);
+ }
+
+ fColorTable.reset(new SkColorTable(colorPtr, maxColors));
+ copy_color_table(dstInfo, this->fColorTable, inputColorPtr, inputColorCount);
+}
+
+SkCodec::Result SkGifCodec::prepareToDecode(const SkImageInfo& dstInfo, SkPMColor* inputColorPtr,
+ int* inputColorCount, const Options& opts) {
+ // Check for valid input parameters
+ if (!conversion_possible_ignore_color_space(dstInfo, this->getInfo())) {
+ return gif_error("Cannot convert input type to output type.\n", kInvalidConversion);
+ }
+
+ // Initialize color table and copy to the client if necessary
+ this->initializeColorTable(dstInfo, inputColorPtr, inputColorCount);
+
+ this->initializeSwizzler(dstInfo, opts);
+ return kSuccess;
+}
+
+void SkGifCodec::initializeSwizzler(const SkImageInfo& dstInfo, const Options& opts) {
+ const SkPMColor* colorPtr = get_color_ptr(fColorTable.get());
+ const SkIRect* frameRect = fFrameIsSubset ? &fFrameRect : nullptr;
+ fSwizzler.reset(SkSwizzler::CreateSwizzler(this->getEncodedInfo(), colorPtr, dstInfo, opts,
+ frameRect));
+ SkASSERT(fSwizzler);
+}
+
+bool SkGifCodec::readRow() {
+ return GIF_ERROR != DGifGetLine(fGif, fSrcBuffer.get(), fFrameRect.width());
+}
+
+/*
+ * Initiates the gif decode
+ */
+SkCodec::Result SkGifCodec::onGetPixels(const SkImageInfo& dstInfo,
+ void* dst, size_t dstRowBytes,
+ const Options& opts,
+ SkPMColor* inputColorPtr,
+ int* inputColorCount,
+ int* rowsDecoded) {
+ Result result = this->prepareToDecode(dstInfo, inputColorPtr, inputColorCount, opts);
+ if (kSuccess != result) {
+ return result;
+ }
+
+ if (dstInfo.dimensions() != this->getInfo().dimensions()) {
+ return gif_error("Scaling not supported.\n", kInvalidScale);
+ }
+
+ // Initialize the swizzler
+ if (fFrameIsSubset) {
+ // Fill the background
+ SkSampler::Fill(dstInfo, dst, dstRowBytes, this->getFillValue(dstInfo),
+ opts.fZeroInitialized);
+ }
+
+ // Iterate over rows of the input
+ for (int y = fFrameRect.top(); y < fFrameRect.bottom(); y++) {
+ if (!this->readRow()) {
+ *rowsDecoded = y;
+ return gif_error("Could not decode line.\n", kIncompleteInput);
+ }
+ void* dstRow = SkTAddOffset<void>(dst, dstRowBytes * this->outputScanline(y));
+ fSwizzler->swizzle(dstRow, fSrcBuffer.get());
+ }
+ return kSuccess;
+}
+
+// FIXME: This is similar to the implementation for bmp and png. Can we share more code or
+// possibly make this non-virtual?
+uint64_t SkGifCodec::onGetFillValue(const SkImageInfo& dstInfo) const {
+ const SkPMColor* colorPtr = get_color_ptr(fColorTable.get());
+ return get_color_table_fill_value(dstInfo.colorType(), dstInfo.alphaType(), colorPtr,
+ fFillIndex, nullptr);
+}
+
+SkCodec::Result SkGifCodec::onStartScanlineDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& opts, SkPMColor inputColorPtr[], int* inputColorCount) {
+ return this->prepareToDecode(dstInfo, inputColorPtr, inputColorCount, opts);
+}
+
+void SkGifCodec::handleScanlineFrame(int count, int* rowsBeforeFrame, int* rowsInFrame) {
+ if (fFrameIsSubset) {
+ const int currRow = this->currScanline();
+
+ // The number of rows that remain to be skipped before reaching rows that we
+ // actually must decode into.
+ // This must be at least zero. We also make sure that it is less than or
+ // equal to count, since we will skip at most count rows.
+ *rowsBeforeFrame = SkTMin(count, SkTMax(0, fFrameRect.top() - currRow));
+
+ // Rows left to decode once we reach the start of the frame.
+ const int rowsLeft = count - *rowsBeforeFrame;
+
+ // Count the number of that extend beyond the bottom of the frame. We do not
+ // need to decode into these rows.
+ const int rowsAfterFrame = SkTMax(0, currRow + rowsLeft - fFrameRect.bottom());
+
+ // Set the actual number of source rows that we need to decode.
+ *rowsInFrame = rowsLeft - rowsAfterFrame;
+ } else {
+ *rowsBeforeFrame = 0;
+ *rowsInFrame = count;
+ }
+}
+
+int SkGifCodec::onGetScanlines(void* dst, int count, size_t rowBytes) {
+ int rowsBeforeFrame;
+ int rowsInFrame;
+ this->handleScanlineFrame(count, &rowsBeforeFrame, &rowsInFrame);
+
+ if (fFrameIsSubset) {
+ // Fill the requested rows
+ SkImageInfo fillInfo = this->dstInfo().makeWH(this->dstInfo().width(), count);
+ uint64_t fillValue = this->onGetFillValue(this->dstInfo());
+ fSwizzler->fill(fillInfo, dst, rowBytes, fillValue, this->options().fZeroInitialized);
+
+ // Start to write pixels at the start of the image frame
+ dst = SkTAddOffset<void>(dst, rowBytes * rowsBeforeFrame);
+ }
+
+ for (int i = 0; i < rowsInFrame; i++) {
+ if (!this->readRow()) {
+ return i + rowsBeforeFrame;
+ }
+ fSwizzler->swizzle(dst, fSrcBuffer.get());
+ dst = SkTAddOffset<void>(dst, rowBytes);
+ }
+
+ return count;
+}
+
+bool SkGifCodec::onSkipScanlines(int count) {
+ int rowsBeforeFrame;
+ int rowsInFrame;
+ this->handleScanlineFrame(count, &rowsBeforeFrame, &rowsInFrame);
+
+ for (int i = 0; i < rowsInFrame; i++) {
+ if (!this->readRow()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+SkCodec::SkScanlineOrder SkGifCodec::onGetScanlineOrder() const {
+ if (fGif->Image.Interlace) {
+ return kOutOfOrder_SkScanlineOrder;
+ }
+ return kTopDown_SkScanlineOrder;
+}
+
+int SkGifCodec::onOutputScanline(int inputScanline) const {
+ if (fGif->Image.Interlace) {
+ if (inputScanline < fFrameRect.top() || inputScanline >= fFrameRect.bottom()) {
+ return inputScanline;
+ }
+ return get_output_row_interlaced(inputScanline - fFrameRect.top(), fFrameRect.height()) +
+ fFrameRect.top();
+ }
+ return inputScanline;
+}
diff --git a/gfx/skia/skia/src/codec/SkGifCodec.h b/gfx/skia/skia/src/codec/SkGifCodec.h
new file mode 100644
index 000000000..c56d3719a
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkGifCodec.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCodec.h"
+#include "SkColorSpace.h"
+#include "SkColorTable.h"
+#include "SkImageInfo.h"
+#include "SkSwizzler.h"
+
+struct GifFileType;
+struct SavedImage;
+
+/*
+ *
+ * This class implements the decoding for gif images
+ *
+ */
+class SkGifCodec : public SkCodec {
+public:
+ static bool IsGif(const void*, size_t);
+
+ /*
+ * Assumes IsGif was called and returned true
+ * Creates a gif decoder
+ * Reads enough of the stream to determine the image format
+ */
+ static SkCodec* NewFromStream(SkStream*);
+
+protected:
+
+ /*
+ * Read enough of the stream to initialize the SkGifCodec.
+ * Returns a bool representing success or failure.
+ *
+ * @param codecOut
+ * If it returned true, and codecOut was not nullptr,
+ * codecOut will be set to a new SkGifCodec.
+ *
+ * @param gifOut
+ * If it returned true, and codecOut was nullptr,
+ * gifOut must be non-nullptr and gifOut will be set to a new
+ * GifFileType pointer.
+ *
+ * @param stream
+ * Deleted on failure.
+ * codecOut will take ownership of it in the case where we created a codec.
+ * Ownership is unchanged when we returned a gifOut.
+ *
+ */
+ static bool ReadHeader(SkStream* stream, SkCodec** codecOut,
+ GifFileType** gifOut);
+
+ /*
+ * Performs the full gif decode
+ */
+ Result onGetPixels(const SkImageInfo&, void*, size_t, const Options&,
+ SkPMColor*, int*, int*) override;
+
+ SkEncodedFormat onGetEncodedFormat() const override {
+ return kGIF_SkEncodedFormat;
+ }
+
+ bool onRewind() override;
+
+ uint64_t onGetFillValue(const SkImageInfo&) const override;
+
+ int onOutputScanline(int inputScanline) const override;
+
+private:
+
+ /*
+ * A gif can contain multiple image frames. We will only decode the first
+ * frame. This function reads up to the first image frame, processing
+ * transparency and/or animation information that comes before the image
+ * data.
+ *
+ * @param gif Pointer to the library type that manages the gif decode
+ * @param transIndex This call will set the transparent index based on the
+ * extension data.
+ */
+ static Result ReadUpToFirstImage(GifFileType* gif, uint32_t* transIndex);
+
+ /*
+ * A gif may contain many image frames, all of different sizes.
+ * This function checks if the gif dimensions are valid, based on the frame
+ * dimensions, and corrects the gif dimensions if necessary.
+ *
+ * @param gif Pointer to the library type that manages the gif decode
+ * @param size Size of the image that we will decode.
+ * Will be set by this function if the return value is true.
+ * @param frameRect Contains the dimenions and offset of the first image frame.
+ * Will be set by this function if the return value is true.
+ *
+ * @return true on success, false otherwise
+ */
+ static bool GetDimensions(GifFileType* gif, SkISize* size, SkIRect* frameRect);
+
+ /*
+ * Initializes the color table that we will use for decoding.
+ *
+ * @param dstInfo Contains the requested dst color type.
+ * @param inputColorPtr Copies the encoded color table to the client's
+ * input color table if the client requests kIndex8.
+ * @param inputColorCount If the client requests kIndex8, sets
+ * inputColorCount to 256. Since gifs always
+ * contain 8-bit indices, we need a 256 entry color
+ * table to ensure that indexing is always in
+ * bounds.
+ */
+ void initializeColorTable(const SkImageInfo& dstInfo, SkPMColor* colorPtr,
+ int* inputColorCount);
+
+ /*
+ * Checks for invalid inputs and calls setFrameDimensions(), and
+ * initializeColorTable() in the proper sequence.
+ */
+ Result prepareToDecode(const SkImageInfo& dstInfo, SkPMColor* inputColorPtr,
+ int* inputColorCount, const Options& opts);
+
+ /*
+ * Initializes the swizzler.
+ *
+ * @param dstInfo Output image information. Dimensions may have been
+ * adjusted if the image frame size does not match the size
+ * indicated in the header.
+ * @param options Informs the swizzler if destination memory is zero initialized.
+ * Contains subset information.
+ */
+ void initializeSwizzler(const SkImageInfo& dstInfo,
+ const Options& options);
+
+ SkSampler* getSampler(bool createIfNecessary) override {
+ SkASSERT(fSwizzler);
+ return fSwizzler;
+ }
+
+ /*
+ * @return true if the read is successful and false if the read fails.
+ */
+ bool readRow();
+
+ Result onStartScanlineDecode(const SkImageInfo& dstInfo, const Options& opts,
+ SkPMColor inputColorPtr[], int* inputColorCount) override;
+
+ int onGetScanlines(void* dst, int count, size_t rowBytes) override;
+
+ bool onSkipScanlines(int count) override;
+
+ /*
+ * For a scanline decode of "count" lines, this function indicates how
+ * many of the "count" lines should be skipped until we reach the top of
+ * the image frame and how many of the "count" lines are actually inside
+ * the image frame.
+ *
+ * @param count The number of scanlines requested.
+ * @param rowsBeforeFrame Output variable. The number of lines before
+ * we reach the top of the image frame.
+ * @param rowsInFrame Output variable. The number of lines to decode
+ * inside the image frame.
+ */
+ void handleScanlineFrame(int count, int* rowsBeforeFrame, int* rowsInFrame);
+
+ SkScanlineOrder onGetScanlineOrder() const override;
+
+ /*
+ * This function cleans up the gif object after the decode completes
+ * It is used in a SkAutoTCallIProc template
+ */
+ static void CloseGif(GifFileType* gif);
+
+ /*
+ * Frees any extension data used in the decode
+ * Used in a SkAutoTCallVProc
+ */
+ static void FreeExtension(SavedImage* image);
+
+ /*
+ * Creates an instance of the decoder
+ * Called only by NewFromStream
+ *
+ * @param info contains properties of the encoded data
+ * @param stream the stream of image data
+ * @param gif pointer to library type that manages gif decode
+ * takes ownership
+ * @param transIndex The transparent index. An invalid value
+ * indicates that there is no transparent index.
+ */
+ SkGifCodec(int width, int height, const SkEncodedInfo& info, SkStream* stream,
+ GifFileType* gif, uint32_t transIndex, const SkIRect& frameRect, bool frameIsSubset);
+
+ SkAutoTCallVProc<GifFileType, CloseGif> fGif; // owned
+ SkAutoTDeleteArray<uint8_t> fSrcBuffer;
+ const SkIRect fFrameRect;
+ const uint32_t fTransIndex;
+ uint32_t fFillIndex;
+ const bool fFrameIsSubset;
+ SkAutoTDelete<SkSwizzler> fSwizzler;
+ SkAutoTUnref<SkColorTable> fColorTable;
+
+ typedef SkCodec INHERITED;
+};
diff --git a/gfx/skia/skia/src/codec/SkIcoCodec.cpp b/gfx/skia/skia/src/codec/SkIcoCodec.cpp
new file mode 100644
index 000000000..63b72c403
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkIcoCodec.cpp
@@ -0,0 +1,391 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBmpCodec.h"
+#include "SkCodecPriv.h"
+#include "SkColorPriv.h"
+#include "SkData.h"
+#include "SkIcoCodec.h"
+#include "SkPngCodec.h"
+#include "SkStream.h"
+#include "SkTDArray.h"
+#include "SkTSort.h"
+
+/*
+ * Checks the start of the stream to see if the image is an Ico or Cur
+ */
+bool SkIcoCodec::IsIco(const void* buffer, size_t bytesRead) {
+ const char icoSig[] = { '\x00', '\x00', '\x01', '\x00' };
+ const char curSig[] = { '\x00', '\x00', '\x02', '\x00' };
+ return bytesRead >= sizeof(icoSig) &&
+ (!memcmp(buffer, icoSig, sizeof(icoSig)) ||
+ !memcmp(buffer, curSig, sizeof(curSig)));
+}
+
+/*
+ * Assumes IsIco was called and returned true
+ * Creates an Ico decoder
+ * Reads enough of the stream to determine the image format
+ */
+SkCodec* SkIcoCodec::NewFromStream(SkStream* stream) {
+ // Ensure that we do not leak the input stream
+ SkAutoTDelete<SkStream> inputStream(stream);
+
+ // Header size constants
+ static const uint32_t kIcoDirectoryBytes = 6;
+ static const uint32_t kIcoDirEntryBytes = 16;
+
+ // Read the directory header
+ SkAutoTDeleteArray<uint8_t> dirBuffer(new uint8_t[kIcoDirectoryBytes]);
+ if (inputStream.get()->read(dirBuffer.get(), kIcoDirectoryBytes) !=
+ kIcoDirectoryBytes) {
+ SkCodecPrintf("Error: unable to read ico directory header.\n");
+ return nullptr;
+ }
+
+ // Process the directory header
+ const uint16_t numImages = get_short(dirBuffer.get(), 4);
+ if (0 == numImages) {
+ SkCodecPrintf("Error: No images embedded in ico.\n");
+ return nullptr;
+ }
+
+ // Ensure that we can read all of indicated directory entries
+ SkAutoTDeleteArray<uint8_t> entryBuffer(new uint8_t[numImages * kIcoDirEntryBytes]);
+ if (inputStream.get()->read(entryBuffer.get(), numImages*kIcoDirEntryBytes) !=
+ numImages*kIcoDirEntryBytes) {
+ SkCodecPrintf("Error: unable to read ico directory entries.\n");
+ return nullptr;
+ }
+
+ // This structure is used to represent the vital information about entries
+ // in the directory header. We will obtain this information for each
+ // directory entry.
+ struct Entry {
+ uint32_t offset;
+ uint32_t size;
+ };
+ SkAutoTDeleteArray<Entry> directoryEntries(new Entry[numImages]);
+
+ // Iterate over directory entries
+ for (uint32_t i = 0; i < numImages; i++) {
+ // The directory entry contains information such as width, height,
+ // bits per pixel, and number of colors in the color palette. We will
+ // ignore these fields since they are repeated in the header of the
+ // embedded image. In the event of an inconsistency, we would always
+ // defer to the value in the embedded header anyway.
+
+ // Specifies the size of the embedded image, including the header
+ uint32_t size = get_int(entryBuffer.get(), 8 + i*kIcoDirEntryBytes);
+
+ // Specifies the offset of the embedded image from the start of file.
+ // It does not indicate the start of the pixel data, but rather the
+ // start of the embedded image header.
+ uint32_t offset = get_int(entryBuffer.get(), 12 + i*kIcoDirEntryBytes);
+
+ // Save the vital fields
+ directoryEntries.get()[i].offset = offset;
+ directoryEntries.get()[i].size = size;
+ }
+
+ // It is "customary" that the embedded images will be stored in order of
+ // increasing offset. However, the specification does not indicate that
+ // they must be stored in this order, so we will not trust that this is the
+ // case. Here we sort the embedded images by increasing offset.
+ struct EntryLessThan {
+ bool operator() (Entry a, Entry b) const {
+ return a.offset < b.offset;
+ }
+ };
+ EntryLessThan lessThan;
+ SkTQSort(directoryEntries.get(), directoryEntries.get() + numImages - 1,
+ lessThan);
+
+ // Now will construct a candidate codec for each of the embedded images
+ uint32_t bytesRead = kIcoDirectoryBytes + numImages * kIcoDirEntryBytes;
+ SkAutoTDelete<SkTArray<SkAutoTDelete<SkCodec>, true>> codecs(
+ new (SkTArray<SkAutoTDelete<SkCodec>, true>)(numImages));
+ for (uint32_t i = 0; i < numImages; i++) {
+ uint32_t offset = directoryEntries.get()[i].offset;
+ uint32_t size = directoryEntries.get()[i].size;
+
+ // Ensure that the offset is valid
+ if (offset < bytesRead) {
+ SkCodecPrintf("Warning: invalid ico offset.\n");
+ continue;
+ }
+
+ // If we cannot skip, assume we have reached the end of the stream and
+ // stop trying to make codecs
+ if (inputStream.get()->skip(offset - bytesRead) != offset - bytesRead) {
+ SkCodecPrintf("Warning: could not skip to ico offset.\n");
+ break;
+ }
+ bytesRead = offset;
+
+ // Create a new stream for the embedded codec
+ sk_sp<SkData> data(SkData::MakeFromStream(inputStream.get(), size));
+ if (nullptr == data.get()) {
+ SkCodecPrintf("Warning: could not create embedded stream.\n");
+ break;
+ }
+ SkAutoTDelete<SkMemoryStream> embeddedStream(new SkMemoryStream(data));
+ bytesRead += size;
+
+ // Check if the embedded codec is bmp or png and create the codec
+ SkCodec* codec = nullptr;
+ if (SkPngCodec::IsPng((const char*) data->bytes(), data->size())) {
+ codec = SkPngCodec::NewFromStream(embeddedStream.release());
+ } else {
+ codec = SkBmpCodec::NewFromIco(embeddedStream.release());
+ }
+
+ // Save a valid codec
+ if (nullptr != codec) {
+ codecs->push_back().reset(codec);
+ }
+ }
+
+ // Recognize if there are no valid codecs
+ if (0 == codecs->count()) {
+ SkCodecPrintf("Error: could not find any valid embedded ico codecs.\n");
+ return nullptr;
+ }
+
+ // Use the largest codec as a "suggestion" for image info
+ uint32_t maxSize = 0;
+ uint32_t maxIndex = 0;
+ for (int32_t i = 0; i < codecs->count(); i++) {
+ SkImageInfo info = codecs->operator[](i)->getInfo();
+ uint32_t size = info.width() * info.height();
+ if (size > maxSize) {
+ maxSize = size;
+ maxIndex = i;
+ }
+ }
+ int width = codecs->operator[](maxIndex)->getInfo().width();
+ int height = codecs->operator[](maxIndex)->getInfo().height();
+ SkEncodedInfo info = codecs->operator[](maxIndex)->getEncodedInfo();
+
+ // Note that stream is owned by the embedded codec, the ico does not need
+ // direct access to the stream.
+ return new SkIcoCodec(width, height, info, codecs.release());
+}
+
+/*
+ * Creates an instance of the decoder
+ * Called only by NewFromStream
+ */
+SkIcoCodec::SkIcoCodec(int width, int height, const SkEncodedInfo& info,
+ SkTArray<SkAutoTDelete<SkCodec>, true>* codecs)
+ : INHERITED(width, height, info, nullptr)
+ , fEmbeddedCodecs(codecs)
+ , fCurrScanlineCodec(nullptr)
+ , fCurrIncrementalCodec(nullptr)
+{}
+
+/*
+ * Chooses the best dimensions given the desired scale
+ */
+SkISize SkIcoCodec::onGetScaledDimensions(float desiredScale) const {
+ // We set the dimensions to the largest candidate image by default.
+ // Regardless of the scale request, this is the largest image that we
+ // will decode.
+ int origWidth = this->getInfo().width();
+ int origHeight = this->getInfo().height();
+ float desiredSize = desiredScale * origWidth * origHeight;
+ // At least one image will have smaller error than this initial value
+ float minError = ((float) (origWidth * origHeight)) - desiredSize + 1.0f;
+ int32_t minIndex = -1;
+ for (int32_t i = 0; i < fEmbeddedCodecs->count(); i++) {
+ int width = fEmbeddedCodecs->operator[](i)->getInfo().width();
+ int height = fEmbeddedCodecs->operator[](i)->getInfo().height();
+ float error = SkTAbs(((float) (width * height)) - desiredSize);
+ if (error < minError) {
+ minError = error;
+ minIndex = i;
+ }
+ }
+ SkASSERT(minIndex >= 0);
+
+ return fEmbeddedCodecs->operator[](minIndex)->getInfo().dimensions();
+}
+
+int SkIcoCodec::chooseCodec(const SkISize& requestedSize, int startIndex) {
+ SkASSERT(startIndex >= 0);
+
+ // FIXME: Cache the index from onGetScaledDimensions?
+ for (int i = startIndex; i < fEmbeddedCodecs->count(); i++) {
+ if (fEmbeddedCodecs->operator[](i)->getInfo().dimensions() == requestedSize) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+bool SkIcoCodec::onDimensionsSupported(const SkISize& dim) {
+ return this->chooseCodec(dim, 0) >= 0;
+}
+
+/*
+ * Initiates the Ico decode
+ */
+SkCodec::Result SkIcoCodec::onGetPixels(const SkImageInfo& dstInfo,
+ void* dst, size_t dstRowBytes,
+ const Options& opts, SkPMColor* colorTable,
+ int* colorCount, int* rowsDecoded) {
+ if (opts.fSubset) {
+ // Subsets are not supported.
+ return kUnimplemented;
+ }
+
+ int index = 0;
+ SkCodec::Result result = kInvalidScale;
+ while (true) {
+ index = this->chooseCodec(dstInfo.dimensions(), index);
+ if (index < 0) {
+ break;
+ }
+
+ SkCodec* embeddedCodec = fEmbeddedCodecs->operator[](index);
+ result = embeddedCodec->getPixels(dstInfo, dst, dstRowBytes, &opts, colorTable,
+ colorCount);
+
+ switch (result) {
+ case kSuccess:
+ case kIncompleteInput:
+ // The embedded codec will handle filling incomplete images, so we will indicate
+ // that all of the rows are initialized.
+ *rowsDecoded = dstInfo.height();
+ return result;
+ default:
+ // Continue trying to find a valid embedded codec on a failed decode.
+ break;
+ }
+
+ index++;
+ }
+
+ SkCodecPrintf("Error: No matching candidate image in ico.\n");
+ return result;
+}
+
+SkCodec::Result SkIcoCodec::onStartScanlineDecode(const SkImageInfo& dstInfo,
+ const SkCodec::Options& options, SkPMColor colorTable[], int* colorCount) {
+ int index = 0;
+ SkCodec::Result result = kInvalidScale;
+ while (true) {
+ index = this->chooseCodec(dstInfo.dimensions(), index);
+ if (index < 0) {
+ break;
+ }
+
+ SkCodec* embeddedCodec = fEmbeddedCodecs->operator[](index);
+ result = embeddedCodec->startScanlineDecode(dstInfo, &options, colorTable, colorCount);
+ if (kSuccess == result) {
+ fCurrScanlineCodec = embeddedCodec;
+ fCurrIncrementalCodec = nullptr;
+ return result;
+ }
+
+ index++;
+ }
+
+ SkCodecPrintf("Error: No matching candidate image in ico.\n");
+ return result;
+}
+
+int SkIcoCodec::onGetScanlines(void* dst, int count, size_t rowBytes) {
+ SkASSERT(fCurrScanlineCodec);
+ return fCurrScanlineCodec->getScanlines(dst, count, rowBytes);
+}
+
+bool SkIcoCodec::onSkipScanlines(int count) {
+ SkASSERT(fCurrScanlineCodec);
+ return fCurrScanlineCodec->skipScanlines(count);
+}
+
+SkCodec::Result SkIcoCodec::onStartIncrementalDecode(const SkImageInfo& dstInfo,
+ void* pixels, size_t rowBytes, const SkCodec::Options& options,
+ SkPMColor* colorTable, int* colorCount) {
+ int index = 0;
+ while (true) {
+ index = this->chooseCodec(dstInfo.dimensions(), index);
+ if (index < 0) {
+ break;
+ }
+
+ SkCodec* embeddedCodec = fEmbeddedCodecs->operator[](index);
+ switch (embeddedCodec->startIncrementalDecode(dstInfo,
+ pixels, rowBytes, &options, colorTable, colorCount)) {
+ case kSuccess:
+ fCurrIncrementalCodec = embeddedCodec;
+ fCurrScanlineCodec = nullptr;
+ return kSuccess;
+ case kUnimplemented:
+ // FIXME: embeddedCodec is a BMP. If scanline decoding would work,
+ // return kUnimplemented so that SkSampledCodec will fall through
+ // to use the scanline decoder.
+ // Note that calling startScanlineDecode will require an extra
+ // rewind. The embedded codec has an SkMemoryStream, which is
+ // cheap to rewind, though it will do extra work re-reading the
+ // header.
+ // Also note that we pass nullptr for Options. This is because
+ // Options that are valid for incremental decoding may not be
+ // valid for scanline decoding.
+ // Once BMP supports incremental decoding this workaround can go
+ // away.
+ if (embeddedCodec->startScanlineDecode(dstInfo, nullptr,
+ colorTable, colorCount) == kSuccess) {
+ return kUnimplemented;
+ }
+ // Move on to the next embedded codec.
+ break;
+ default:
+ break;
+ }
+
+ index++;
+ }
+
+ SkCodecPrintf("Error: No matching candidate image in ico.\n");
+ return kInvalidScale;
+}
+
+SkCodec::Result SkIcoCodec::onIncrementalDecode(int* rowsDecoded) {
+ SkASSERT(fCurrIncrementalCodec);
+ return fCurrIncrementalCodec->incrementalDecode(rowsDecoded);
+}
+
+SkCodec::SkScanlineOrder SkIcoCodec::onGetScanlineOrder() const {
+ // FIXME: This function will possibly return the wrong value if it is called
+ // before startScanlineDecode()/startIncrementalDecode().
+ if (fCurrScanlineCodec) {
+ SkASSERT(!fCurrIncrementalCodec);
+ return fCurrScanlineCodec->getScanlineOrder();
+ }
+
+ if (fCurrIncrementalCodec) {
+ return fCurrIncrementalCodec->getScanlineOrder();
+ }
+
+ return INHERITED::onGetScanlineOrder();
+}
+
+SkSampler* SkIcoCodec::getSampler(bool createIfNecessary) {
+ if (fCurrScanlineCodec) {
+ SkASSERT(!fCurrIncrementalCodec);
+ return fCurrScanlineCodec->getSampler(createIfNecessary);
+ }
+
+ if (fCurrIncrementalCodec) {
+ return fCurrIncrementalCodec->getSampler(createIfNecessary);
+ }
+
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/codec/SkIcoCodec.h b/gfx/skia/skia/src/codec/SkIcoCodec.h
new file mode 100644
index 000000000..a227d8c67
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkIcoCodec.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCodec.h"
+#include "SkImageInfo.h"
+#include "SkStream.h"
+#include "SkTypes.h"
+
+/*
+ * This class implements the decoding for bmp images
+ */
+class SkIcoCodec : public SkCodec {
+public:
+ static bool IsIco(const void*, size_t);
+
+ /*
+ * Assumes IsIco was called and returned true
+ * Creates an Ico decoder
+ * Reads enough of the stream to determine the image format
+ */
+ static SkCodec* NewFromStream(SkStream*);
+
+protected:
+
+ /*
+ * Chooses the best dimensions given the desired scale
+ */
+ SkISize onGetScaledDimensions(float desiredScale) const override;
+
+ bool onDimensionsSupported(const SkISize&) override;
+
+ /*
+ * Initiates the Ico decode
+ */
+ Result onGetPixels(const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes, const Options&,
+ SkPMColor*, int*, int*) override;
+
+ SkEncodedFormat onGetEncodedFormat() const override {
+ return kICO_SkEncodedFormat;
+ }
+
+ SkScanlineOrder onGetScanlineOrder() const override;
+
+private:
+
+ Result onStartScanlineDecode(const SkImageInfo& dstInfo, const SkCodec::Options& options,
+ SkPMColor inputColorPtr[], int* inputColorCount) override;
+
+ int onGetScanlines(void* dst, int count, size_t rowBytes) override;
+
+ bool onSkipScanlines(int count) override;
+
+ Result onStartIncrementalDecode(const SkImageInfo& dstInfo, void* pixels, size_t rowBytes,
+ const SkCodec::Options&, SkPMColor*, int*) override;
+
+ Result onIncrementalDecode(int* rowsDecoded) override;
+
+ SkSampler* getSampler(bool createIfNecessary) override;
+
+ /*
+ * Searches fEmbeddedCodecs for a codec that matches requestedSize.
+ * The search starts at startIndex and ends when an appropriate codec
+ * is found, or we have reached the end of the array.
+ *
+ * @return the index of the matching codec or -1 if there is no
+ * matching codec between startIndex and the end of
+ * the array.
+ */
+ int chooseCodec(const SkISize& requestedSize, int startIndex);
+
+ /*
+ * Constructor called by NewFromStream
+ * @param embeddedCodecs codecs for the embedded images, takes ownership
+ */
+ SkIcoCodec(int width, int height, const SkEncodedInfo& info,
+ SkTArray<SkAutoTDelete<SkCodec>, true>* embeddedCodecs);
+
+ SkAutoTDelete<SkTArray<SkAutoTDelete<SkCodec>, true>> fEmbeddedCodecs; // owned
+
+ // Only used by the scanline decoder. onStartScanlineDecode() will set
+ // fCurrScanlineCodec to one of the fEmbeddedCodecs, if it can find a
+ // codec of the appropriate size. We will use fCurrScanlineCodec for
+ // subsequent calls to onGetScanlines() or onSkipScanlines().
+ // fCurrScanlineCodec is owned by this class, but should not be an
+ // SkAutoTDelete. It will be deleted by the destructor of fEmbeddedCodecs.
+ SkCodec* fCurrScanlineCodec;
+
+ // Only used by incremental decoder. onStartIncrementalDecode() will set
+ // fCurrIncrementalCodec to one of the fEmbeddedCodecs, if it can find a
+ // codec of the appropriate size. We will use fCurrIncrementalCodec for
+ // subsequent calls to incrementalDecode().
+ // fCurrIncrementalCodec is owned by this class, but should not be an
+ // SkAutoTDelete. It will be deleted by the destructor of fEmbeddedCodecs.
+ SkCodec* fCurrIncrementalCodec;
+
+ typedef SkCodec INHERITED;
+};
diff --git a/gfx/skia/skia/src/codec/SkJpegCodec.cpp b/gfx/skia/skia/src/codec/SkJpegCodec.cpp
new file mode 100644
index 000000000..f6c856ee1
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkJpegCodec.cpp
@@ -0,0 +1,934 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCodec.h"
+#include "SkMSAN.h"
+#include "SkJpegCodec.h"
+#include "SkJpegDecoderMgr.h"
+#include "SkCodecPriv.h"
+#include "SkColorPriv.h"
+#include "SkStream.h"
+#include "SkTemplates.h"
+#include "SkTypes.h"
+
+// stdio is needed for libjpeg-turbo
+#include <stdio.h>
+#include "SkJpegUtility.h"
+
+// This warning triggers false postives way too often in here.
+#if defined(__GNUC__) && !defined(__clang__)
+ #pragma GCC diagnostic ignored "-Wclobbered"
+#endif
+
+extern "C" {
+ #include "jerror.h"
+ #include "jpeglib.h"
+}
+
+bool SkJpegCodec::IsJpeg(const void* buffer, size_t bytesRead) {
+ static const uint8_t jpegSig[] = { 0xFF, 0xD8, 0xFF };
+ return bytesRead >= 3 && !memcmp(buffer, jpegSig, sizeof(jpegSig));
+}
+
+static uint32_t get_endian_int(const uint8_t* data, bool littleEndian) {
+ if (littleEndian) {
+ return (data[3] << 24) | (data[2] << 16) | (data[1] << 8) | (data[0]);
+ }
+
+ return (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | (data[3]);
+}
+
+const uint32_t kExifHeaderSize = 14;
+const uint32_t kICCHeaderSize = 14;
+const uint32_t kExifMarker = JPEG_APP0 + 1;
+const uint32_t kICCMarker = JPEG_APP0 + 2;
+
+static bool is_orientation_marker(jpeg_marker_struct* marker, SkCodec::Origin* orientation) {
+ if (kExifMarker != marker->marker || marker->data_length < kExifHeaderSize) {
+ return false;
+ }
+
+ const uint8_t* data = marker->data;
+ static const uint8_t kExifSig[] { 'E', 'x', 'i', 'f', '\0' };
+ if (memcmp(data, kExifSig, sizeof(kExifSig))) {
+ return false;
+ }
+
+ bool littleEndian;
+ if (!is_valid_endian_marker(data + 6, &littleEndian)) {
+ return false;
+ }
+
+ // Get the offset from the start of the marker.
+ // Account for 'E', 'x', 'i', 'f', '\0', '<fill byte>'.
+ uint32_t offset = get_endian_int(data + 10, littleEndian);
+ offset += sizeof(kExifSig) + 1;
+
+ // Require that the marker is at least large enough to contain the number of entries.
+ if (marker->data_length < offset + 2) {
+ return false;
+ }
+ uint32_t numEntries = get_endian_short(data + offset, littleEndian);
+
+ // Tag (2 bytes), Datatype (2 bytes), Number of elements (4 bytes), Data (4 bytes)
+ const uint32_t kEntrySize = 12;
+ numEntries = SkTMin(numEntries, (marker->data_length - offset - 2) / kEntrySize);
+
+ // Advance the data to the start of the entries.
+ data += offset + 2;
+
+ const uint16_t kOriginTag = 0x112;
+ const uint16_t kOriginType = 3;
+ for (uint32_t i = 0; i < numEntries; i++, data += kEntrySize) {
+ uint16_t tag = get_endian_short(data, littleEndian);
+ uint16_t type = get_endian_short(data + 2, littleEndian);
+ uint32_t count = get_endian_int(data + 4, littleEndian);
+ if (kOriginTag == tag && kOriginType == type && 1 == count) {
+ uint16_t val = get_endian_short(data + 8, littleEndian);
+ if (0 < val && val <= SkCodec::kLast_Origin) {
+ *orientation = (SkCodec::Origin) val;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static SkCodec::Origin get_exif_orientation(jpeg_decompress_struct* dinfo) {
+ SkCodec::Origin orientation;
+ for (jpeg_marker_struct* marker = dinfo->marker_list; marker; marker = marker->next) {
+ if (is_orientation_marker(marker, &orientation)) {
+ return orientation;
+ }
+ }
+
+ return SkCodec::kDefault_Origin;
+}
+
+static bool is_icc_marker(jpeg_marker_struct* marker) {
+ if (kICCMarker != marker->marker || marker->data_length < kICCHeaderSize) {
+ return false;
+ }
+
+ static const uint8_t kICCSig[] { 'I', 'C', 'C', '_', 'P', 'R', 'O', 'F', 'I', 'L', 'E', '\0' };
+ return !memcmp(marker->data, kICCSig, sizeof(kICCSig));
+}
+
+/*
+ * ICC profiles may be stored using a sequence of multiple markers. We obtain the ICC profile
+ * in two steps:
+ * (1) Discover all ICC profile markers and verify that they are numbered properly.
+ * (2) Copy the data from each marker into a contiguous ICC profile.
+ */
+static sk_sp<SkData> get_icc_profile(jpeg_decompress_struct* dinfo) {
+ // Note that 256 will be enough storage space since each markerIndex is stored in 8-bits.
+ jpeg_marker_struct* markerSequence[256];
+ memset(markerSequence, 0, sizeof(markerSequence));
+ uint8_t numMarkers = 0;
+ size_t totalBytes = 0;
+
+ // Discover any ICC markers and verify that they are numbered properly.
+ for (jpeg_marker_struct* marker = dinfo->marker_list; marker; marker = marker->next) {
+ if (is_icc_marker(marker)) {
+ // Verify that numMarkers is valid and consistent.
+ if (0 == numMarkers) {
+ numMarkers = marker->data[13];
+ if (0 == numMarkers) {
+ SkCodecPrintf("ICC Profile Error: numMarkers must be greater than zero.\n");
+ return nullptr;
+ }
+ } else if (numMarkers != marker->data[13]) {
+ SkCodecPrintf("ICC Profile Error: numMarkers must be consistent.\n");
+ return nullptr;
+ }
+
+ // Verify that the markerIndex is valid and unique. Note that zero is not
+ // a valid index.
+ uint8_t markerIndex = marker->data[12];
+ if (markerIndex == 0 || markerIndex > numMarkers) {
+ SkCodecPrintf("ICC Profile Error: markerIndex is invalid.\n");
+ return nullptr;
+ }
+ if (markerSequence[markerIndex]) {
+ SkCodecPrintf("ICC Profile Error: Duplicate value of markerIndex.\n");
+ return nullptr;
+ }
+ markerSequence[markerIndex] = marker;
+ SkASSERT(marker->data_length >= kICCHeaderSize);
+ totalBytes += marker->data_length - kICCHeaderSize;
+ }
+ }
+
+ if (0 == totalBytes) {
+ // No non-empty ICC profile markers were found.
+ return nullptr;
+ }
+
+ // Combine the ICC marker data into a contiguous profile.
+ sk_sp<SkData> iccData = SkData::MakeUninitialized(totalBytes);
+ void* dst = iccData->writable_data();
+ for (uint32_t i = 1; i <= numMarkers; i++) {
+ jpeg_marker_struct* marker = markerSequence[i];
+ if (!marker) {
+ SkCodecPrintf("ICC Profile Error: Missing marker %d of %d.\n", i, numMarkers);
+ return nullptr;
+ }
+
+ void* src = SkTAddOffset<void>(marker->data, kICCHeaderSize);
+ size_t bytes = marker->data_length - kICCHeaderSize;
+ memcpy(dst, src, bytes);
+ dst = SkTAddOffset<void>(dst, bytes);
+ }
+
+ return iccData;
+}
+
+bool SkJpegCodec::ReadHeader(SkStream* stream, SkCodec** codecOut,
+ JpegDecoderMgr** decoderMgrOut) {
+
+ // Create a JpegDecoderMgr to own all of the decompress information
+ SkAutoTDelete<JpegDecoderMgr> decoderMgr(new JpegDecoderMgr(stream));
+
+ // libjpeg errors will be caught and reported here
+ if (setjmp(decoderMgr->getJmpBuf())) {
+ return decoderMgr->returnFalse("ReadHeader");
+ }
+
+ // Initialize the decompress info and the source manager
+ decoderMgr->init();
+
+ // Instruct jpeg library to save the markers that we care about. Since
+ // the orientation and color profile will not change, we can skip this
+ // step on rewinds.
+ if (codecOut) {
+ jpeg_save_markers(decoderMgr->dinfo(), kExifMarker, 0xFFFF);
+ jpeg_save_markers(decoderMgr->dinfo(), kICCMarker, 0xFFFF);
+ }
+
+ // Read the jpeg header
+ if (JPEG_HEADER_OK != jpeg_read_header(decoderMgr->dinfo(), true)) {
+ return decoderMgr->returnFalse("ReadHeader");
+ }
+
+ if (codecOut) {
+ // Get the encoded color type
+ SkEncodedInfo::Color color;
+ if (!decoderMgr->getEncodedColor(&color)) {
+ return false;
+ }
+
+ // Create image info object and the codec
+ SkEncodedInfo info = SkEncodedInfo::Make(color, SkEncodedInfo::kOpaque_Alpha, 8);
+
+ Origin orientation = get_exif_orientation(decoderMgr->dinfo());
+ sk_sp<SkData> iccData = get_icc_profile(decoderMgr->dinfo());
+ sk_sp<SkColorSpace> colorSpace = nullptr;
+ if (iccData) {
+ colorSpace = SkColorSpace::NewICC(iccData->data(), iccData->size());
+ if (!colorSpace) {
+ SkCodecPrintf("Could not create SkColorSpace from ICC data.\n");
+ }
+ }
+ if (!colorSpace) {
+ // Treat unmarked jpegs as sRGB.
+ colorSpace = SkColorSpace::NewNamed(SkColorSpace::kSRGB_Named);
+ }
+
+ const int width = decoderMgr->dinfo()->image_width;
+ const int height = decoderMgr->dinfo()->image_height;
+ *codecOut = new SkJpegCodec(width, height, info, stream, decoderMgr.release(),
+ std::move(colorSpace), orientation, std::move(iccData));
+ } else {
+ SkASSERT(nullptr != decoderMgrOut);
+ *decoderMgrOut = decoderMgr.release();
+ }
+ return true;
+}
+
+SkCodec* SkJpegCodec::NewFromStream(SkStream* stream) {
+ SkAutoTDelete<SkStream> streamDeleter(stream);
+ SkCodec* codec = nullptr;
+ if (ReadHeader(stream, &codec, nullptr)) {
+ // Codec has taken ownership of the stream, we do not need to delete it
+ SkASSERT(codec);
+ streamDeleter.release();
+ return codec;
+ }
+ return nullptr;
+}
+
+SkJpegCodec::SkJpegCodec(int width, int height, const SkEncodedInfo& info, SkStream* stream,
+ JpegDecoderMgr* decoderMgr, sk_sp<SkColorSpace> colorSpace, Origin origin,
+ sk_sp<SkData> iccData)
+ : INHERITED(width, height, info, stream, std::move(colorSpace), origin)
+ , fDecoderMgr(decoderMgr)
+ , fReadyState(decoderMgr->dinfo()->global_state)
+ , fSwizzleSrcRow(nullptr)
+ , fColorXformSrcRow(nullptr)
+ , fSwizzlerSubset(SkIRect::MakeEmpty())
+ , fICCData(std::move(iccData))
+{}
+
+/*
+ * Return the row bytes of a particular image type and width
+ */
+static size_t get_row_bytes(const j_decompress_ptr dinfo) {
+ const size_t colorBytes = (dinfo->out_color_space == JCS_RGB565) ? 2 :
+ dinfo->out_color_components;
+ return dinfo->output_width * colorBytes;
+
+}
+
+/*
+ * Calculate output dimensions based on the provided factors.
+ *
+ * Not to be used on the actual jpeg_decompress_struct used for decoding, since it will
+ * incorrectly modify num_components.
+ */
+void calc_output_dimensions(jpeg_decompress_struct* dinfo, unsigned int num, unsigned int denom) {
+ dinfo->num_components = 0;
+ dinfo->scale_num = num;
+ dinfo->scale_denom = denom;
+ jpeg_calc_output_dimensions(dinfo);
+}
+
+/*
+ * Return a valid set of output dimensions for this decoder, given an input scale
+ */
+SkISize SkJpegCodec::onGetScaledDimensions(float desiredScale) const {
+ // libjpeg-turbo supports scaling by 1/8, 1/4, 3/8, 1/2, 5/8, 3/4, 7/8, and 1/1, so we will
+ // support these as well
+ unsigned int num;
+ unsigned int denom = 8;
+ if (desiredScale >= 0.9375) {
+ num = 8;
+ } else if (desiredScale >= 0.8125) {
+ num = 7;
+ } else if (desiredScale >= 0.6875f) {
+ num = 6;
+ } else if (desiredScale >= 0.5625f) {
+ num = 5;
+ } else if (desiredScale >= 0.4375f) {
+ num = 4;
+ } else if (desiredScale >= 0.3125f) {
+ num = 3;
+ } else if (desiredScale >= 0.1875f) {
+ num = 2;
+ } else {
+ num = 1;
+ }
+
+ // Set up a fake decompress struct in order to use libjpeg to calculate output dimensions
+ jpeg_decompress_struct dinfo;
+ sk_bzero(&dinfo, sizeof(dinfo));
+ dinfo.image_width = this->getInfo().width();
+ dinfo.image_height = this->getInfo().height();
+ dinfo.global_state = fReadyState;
+ calc_output_dimensions(&dinfo, num, denom);
+
+ // Return the calculated output dimensions for the given scale
+ return SkISize::Make(dinfo.output_width, dinfo.output_height);
+}
+
+bool SkJpegCodec::onRewind() {
+ JpegDecoderMgr* decoderMgr = nullptr;
+ if (!ReadHeader(this->stream(), nullptr, &decoderMgr)) {
+ return fDecoderMgr->returnFalse("onRewind");
+ }
+ SkASSERT(nullptr != decoderMgr);
+ fDecoderMgr.reset(decoderMgr);
+
+ fSwizzler.reset(nullptr);
+ fSwizzleSrcRow = nullptr;
+ fColorXformSrcRow = nullptr;
+ fStorage.reset();
+ fColorXform.reset(nullptr);
+
+ return true;
+}
+
+/*
+ * Checks if the conversion between the input image and the requested output
+ * image has been implemented
+ * Sets the output color space
+ */
+bool SkJpegCodec::setOutputColorSpace(const SkImageInfo& dstInfo) {
+ if (kUnknown_SkAlphaType == dstInfo.alphaType()) {
+ return false;
+ }
+
+ if (kOpaque_SkAlphaType != dstInfo.alphaType()) {
+ SkCodecPrintf("Warning: an opaque image should be decoded as opaque "
+ "- it is being decoded as non-opaque, which will draw slower\n");
+ }
+
+ // Check if we will decode to CMYK. libjpeg-turbo does not convert CMYK to RGBA, so
+ // we must do it ourselves.
+ J_COLOR_SPACE encodedColorType = fDecoderMgr->dinfo()->jpeg_color_space;
+ bool isCMYK = (JCS_CMYK == encodedColorType || JCS_YCCK == encodedColorType);
+
+ // Check for valid color types and set the output color space
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ if (isCMYK) {
+ fDecoderMgr->dinfo()->out_color_space = JCS_CMYK;
+ } else {
+ fDecoderMgr->dinfo()->out_color_space = JCS_EXT_RGBA;
+ }
+ return true;
+ case kBGRA_8888_SkColorType:
+ if (isCMYK) {
+ fDecoderMgr->dinfo()->out_color_space = JCS_CMYK;
+ } else if (fColorXform) {
+ // Our color transformation code requires RGBA order inputs, but it'll swizzle
+ // to BGRA for us.
+ fDecoderMgr->dinfo()->out_color_space = JCS_EXT_RGBA;
+ } else {
+ fDecoderMgr->dinfo()->out_color_space = JCS_EXT_BGRA;
+ }
+ return true;
+ case kRGB_565_SkColorType:
+ if (fColorXform) {
+ return false;
+ }
+
+ if (isCMYK) {
+ fDecoderMgr->dinfo()->out_color_space = JCS_CMYK;
+ } else {
+ fDecoderMgr->dinfo()->dither_mode = JDITHER_NONE;
+ fDecoderMgr->dinfo()->out_color_space = JCS_RGB565;
+ }
+ return true;
+ case kGray_8_SkColorType:
+ if (fColorXform || JCS_GRAYSCALE != encodedColorType) {
+ return false;
+ }
+
+ fDecoderMgr->dinfo()->out_color_space = JCS_GRAYSCALE;
+ return true;
+ case kRGBA_F16_SkColorType:
+ SkASSERT(fColorXform);
+ if (!dstInfo.colorSpace()->gammaIsLinear()) {
+ return false;
+ }
+
+ if (isCMYK) {
+ fDecoderMgr->dinfo()->out_color_space = JCS_CMYK;
+ } else {
+ fDecoderMgr->dinfo()->out_color_space = JCS_EXT_RGBA;
+ }
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * Checks if we can natively scale to the requested dimensions and natively scales the
+ * dimensions if possible
+ */
+bool SkJpegCodec::onDimensionsSupported(const SkISize& size) {
+ if (setjmp(fDecoderMgr->getJmpBuf())) {
+ return fDecoderMgr->returnFalse("onDimensionsSupported");
+ }
+
+ const unsigned int dstWidth = size.width();
+ const unsigned int dstHeight = size.height();
+
+ // Set up a fake decompress struct in order to use libjpeg to calculate output dimensions
+ // FIXME: Why is this necessary?
+ jpeg_decompress_struct dinfo;
+ sk_bzero(&dinfo, sizeof(dinfo));
+ dinfo.image_width = this->getInfo().width();
+ dinfo.image_height = this->getInfo().height();
+ dinfo.global_state = fReadyState;
+
+ // libjpeg-turbo can scale to 1/8, 1/4, 3/8, 1/2, 5/8, 3/4, 7/8, and 1/1
+ unsigned int num = 8;
+ const unsigned int denom = 8;
+ calc_output_dimensions(&dinfo, num, denom);
+ while (dinfo.output_width != dstWidth || dinfo.output_height != dstHeight) {
+
+ // Return a failure if we have tried all of the possible scales
+ if (1 == num || dstWidth > dinfo.output_width || dstHeight > dinfo.output_height) {
+ return false;
+ }
+
+ // Try the next scale
+ num -= 1;
+ calc_output_dimensions(&dinfo, num, denom);
+ }
+
+ fDecoderMgr->dinfo()->scale_num = num;
+ fDecoderMgr->dinfo()->scale_denom = denom;
+ return true;
+}
+
+int SkJpegCodec::readRows(const SkImageInfo& dstInfo, void* dst, size_t rowBytes, int count) {
+ // Set the jump location for libjpeg-turbo errors
+ if (setjmp(fDecoderMgr->getJmpBuf())) {
+ return 0;
+ }
+
+ // When fSwizzleSrcRow is non-null, it means that we need to swizzle. In this case,
+ // we will always decode into fSwizzlerSrcRow before swizzling into the next buffer.
+ // We can never swizzle "in place" because the swizzler may perform sampling and/or
+ // subsetting.
+ // When fColorXformSrcRow is non-null, it means that we need to color xform and that
+ // we cannot color xform "in place" (many times we can, but not when the dst is F16).
+ // In this case, we will color xform from fColorXformSrc into the dst.
+ JSAMPLE* decodeDst = (JSAMPLE*) dst;
+ uint32_t* swizzleDst = (uint32_t*) dst;
+ size_t decodeDstRowBytes = rowBytes;
+ size_t swizzleDstRowBytes = rowBytes;
+ int dstWidth = dstInfo.width();
+ if (fSwizzleSrcRow && fColorXformSrcRow) {
+ decodeDst = (JSAMPLE*) fSwizzleSrcRow;
+ swizzleDst = fColorXformSrcRow;
+ decodeDstRowBytes = 0;
+ swizzleDstRowBytes = 0;
+ dstWidth = fSwizzler->swizzleWidth();
+ } else if (fColorXformSrcRow) {
+ decodeDst = (JSAMPLE*) fColorXformSrcRow;
+ swizzleDst = fColorXformSrcRow;
+ decodeDstRowBytes = 0;
+ swizzleDstRowBytes = 0;
+ } else if (fSwizzleSrcRow) {
+ decodeDst = (JSAMPLE*) fSwizzleSrcRow;
+ decodeDstRowBytes = 0;
+ dstWidth = fSwizzler->swizzleWidth();
+ }
+
+ for (int y = 0; y < count; y++) {
+ uint32_t lines = jpeg_read_scanlines(fDecoderMgr->dinfo(), &decodeDst, 1);
+ size_t srcRowBytes = get_row_bytes(fDecoderMgr->dinfo());
+ sk_msan_mark_initialized(decodeDst, decodeDst + srcRowBytes, "skbug.com/4550");
+ if (0 == lines) {
+ return y;
+ }
+
+ if (fSwizzler) {
+ fSwizzler->swizzle(swizzleDst, decodeDst);
+ }
+
+ if (fColorXform) {
+ fColorXform->apply(dst, swizzleDst, dstWidth, select_xform_format(dstInfo.colorType()),
+ SkColorSpaceXform::kRGBA_8888_ColorFormat, kOpaque_SkAlphaType);
+ dst = SkTAddOffset<void>(dst, rowBytes);
+ }
+
+ decodeDst = SkTAddOffset<JSAMPLE>(decodeDst, decodeDstRowBytes);
+ swizzleDst = SkTAddOffset<uint32_t>(swizzleDst, swizzleDstRowBytes);
+ }
+
+ return count;
+}
+
+/*
+ * Performs the jpeg decode
+ */
+SkCodec::Result SkJpegCodec::onGetPixels(const SkImageInfo& dstInfo,
+ void* dst, size_t dstRowBytes,
+ const Options& options, SkPMColor*, int*,
+ int* rowsDecoded) {
+ if (options.fSubset) {
+ // Subsets are not supported.
+ return kUnimplemented;
+ }
+
+ // Get a pointer to the decompress info since we will use it quite frequently
+ jpeg_decompress_struct* dinfo = fDecoderMgr->dinfo();
+
+ // Set the jump location for libjpeg errors
+ if (setjmp(fDecoderMgr->getJmpBuf())) {
+ return fDecoderMgr->returnFailure("setjmp", kInvalidInput);
+ }
+
+ this->initializeColorXform(dstInfo);
+
+ // Check if we can decode to the requested destination and set the output color space
+ if (!this->setOutputColorSpace(dstInfo)) {
+ return fDecoderMgr->returnFailure("setOutputColorSpace", kInvalidConversion);
+ }
+
+ if (!jpeg_start_decompress(dinfo)) {
+ return fDecoderMgr->returnFailure("startDecompress", kInvalidInput);
+ }
+
+ // The recommended output buffer height should always be 1 in high quality modes.
+ // If it's not, we want to know because it means our strategy is not optimal.
+ SkASSERT(1 == dinfo->rec_outbuf_height);
+
+ J_COLOR_SPACE colorSpace = dinfo->out_color_space;
+ if (JCS_CMYK == colorSpace) {
+ this->initializeSwizzler(dstInfo, options);
+ }
+
+ this->allocateStorage(dstInfo);
+
+ int rows = this->readRows(dstInfo, dst, dstRowBytes, dstInfo.height());
+ if (rows < dstInfo.height()) {
+ *rowsDecoded = rows;
+ return fDecoderMgr->returnFailure("Incomplete image data", kIncompleteInput);
+ }
+
+ return kSuccess;
+}
+
+void SkJpegCodec::allocateStorage(const SkImageInfo& dstInfo) {
+ int dstWidth = dstInfo.width();
+
+ size_t swizzleBytes = 0;
+ if (fSwizzler) {
+ swizzleBytes = get_row_bytes(fDecoderMgr->dinfo());
+ dstWidth = fSwizzler->swizzleWidth();
+ SkASSERT(!fColorXform || SkIsAlign4(swizzleBytes));
+ }
+
+ size_t xformBytes = 0;
+ if (kRGBA_F16_SkColorType == dstInfo.colorType()) {
+ SkASSERT(fColorXform);
+ xformBytes = dstWidth * sizeof(uint32_t);
+ }
+
+ size_t totalBytes = swizzleBytes + xformBytes;
+ if (totalBytes > 0) {
+ fStorage.reset(totalBytes);
+ fSwizzleSrcRow = (swizzleBytes > 0) ? fStorage.get() : nullptr;
+ fColorXformSrcRow = (xformBytes > 0) ?
+ SkTAddOffset<uint32_t>(fStorage.get(), swizzleBytes) : nullptr;
+ }
+}
+
+void SkJpegCodec::initializeSwizzler(const SkImageInfo& dstInfo, const Options& options) {
+ // libjpeg-turbo may have already performed color conversion. We must indicate the
+ // appropriate format to the swizzler.
+ SkEncodedInfo swizzlerInfo = this->getEncodedInfo();
+ bool preSwizzled = true;
+ if (JCS_CMYK == fDecoderMgr->dinfo()->out_color_space) {
+ preSwizzled = false;
+ swizzlerInfo = SkEncodedInfo::Make(SkEncodedInfo::kInvertedCMYK_Color,
+ swizzlerInfo.alpha(),
+ swizzlerInfo.bitsPerComponent());
+ }
+
+ Options swizzlerOptions = options;
+ if (options.fSubset) {
+ // Use fSwizzlerSubset if this is a subset decode. This is necessary in the case
+ // where libjpeg-turbo provides a subset and then we need to subset it further.
+ // Also, verify that fSwizzlerSubset is initialized and valid.
+ SkASSERT(!fSwizzlerSubset.isEmpty() && fSwizzlerSubset.x() <= options.fSubset->x() &&
+ fSwizzlerSubset.width() == options.fSubset->width());
+ swizzlerOptions.fSubset = &fSwizzlerSubset;
+ }
+ fSwizzler.reset(SkSwizzler::CreateSwizzler(swizzlerInfo, nullptr, dstInfo, swizzlerOptions,
+ nullptr, preSwizzled));
+ SkASSERT(fSwizzler);
+}
+
+void SkJpegCodec::initializeColorXform(const SkImageInfo& dstInfo) {
+ if (needs_color_xform(dstInfo, this->getInfo())) {
+ fColorXform = SkColorSpaceXform::New(this->getInfo().colorSpace(), dstInfo.colorSpace());
+ SkASSERT(fColorXform);
+ }
+}
+
+SkSampler* SkJpegCodec::getSampler(bool createIfNecessary) {
+ if (!createIfNecessary || fSwizzler) {
+ SkASSERT(!fSwizzler || (fSwizzleSrcRow && fStorage.get() == fSwizzleSrcRow));
+ return fSwizzler;
+ }
+
+ this->initializeSwizzler(this->dstInfo(), this->options());
+ this->allocateStorage(this->dstInfo());
+ return fSwizzler;
+}
+
+SkCodec::Result SkJpegCodec::onStartScanlineDecode(const SkImageInfo& dstInfo,
+ const Options& options, SkPMColor ctable[], int* ctableCount) {
+ // Set the jump location for libjpeg errors
+ if (setjmp(fDecoderMgr->getJmpBuf())) {
+ SkCodecPrintf("setjmp: Error from libjpeg\n");
+ return kInvalidInput;
+ }
+
+ this->initializeColorXform(dstInfo);
+
+ // Check if we can decode to the requested destination and set the output color space
+ if (!this->setOutputColorSpace(dstInfo)) {
+ return fDecoderMgr->returnFailure("setOutputColorSpace", kInvalidConversion);
+ }
+
+ if (!jpeg_start_decompress(fDecoderMgr->dinfo())) {
+ SkCodecPrintf("start decompress failed\n");
+ return kInvalidInput;
+ }
+
+ if (options.fSubset) {
+ uint32_t startX = options.fSubset->x();
+ uint32_t width = options.fSubset->width();
+
+ // libjpeg-turbo may need to align startX to a multiple of the IDCT
+ // block size. If this is the case, it will decrease the value of
+ // startX to the appropriate alignment and also increase the value
+ // of width so that the right edge of the requested subset remains
+ // the same.
+ jpeg_crop_scanline(fDecoderMgr->dinfo(), &startX, &width);
+
+ SkASSERT(startX <= (uint32_t) options.fSubset->x());
+ SkASSERT(width >= (uint32_t) options.fSubset->width());
+ SkASSERT(startX + width >= (uint32_t) options.fSubset->right());
+
+ // Instruct the swizzler (if it is necessary) to further subset the
+ // output provided by libjpeg-turbo.
+ //
+ // We set this here (rather than in the if statement below), so that
+ // if (1) we don't need a swizzler for the subset, and (2) we need a
+ // swizzler for CMYK, the swizzler will still use the proper subset
+ // dimensions.
+ //
+ // Note that the swizzler will ignore the y and height parameters of
+ // the subset. Since the scanline decoder (and the swizzler) handle
+ // one row at a time, only the subsetting in the x-dimension matters.
+ fSwizzlerSubset.setXYWH(options.fSubset->x() - startX, 0,
+ options.fSubset->width(), options.fSubset->height());
+
+ // We will need a swizzler if libjpeg-turbo cannot provide the exact
+ // subset that we request.
+ if (startX != (uint32_t) options.fSubset->x() ||
+ width != (uint32_t) options.fSubset->width()) {
+ this->initializeSwizzler(dstInfo, options);
+ }
+ }
+
+ // Make sure we have a swizzler if we are converting from CMYK.
+ if (!fSwizzler && JCS_CMYK == fDecoderMgr->dinfo()->out_color_space) {
+ this->initializeSwizzler(dstInfo, options);
+ }
+
+ this->allocateStorage(dstInfo);
+
+ return kSuccess;
+}
+
+int SkJpegCodec::onGetScanlines(void* dst, int count, size_t dstRowBytes) {
+ int rows = this->readRows(this->dstInfo(), dst, dstRowBytes, count);
+ if (rows < count) {
+ // This allows us to skip calling jpeg_finish_decompress().
+ fDecoderMgr->dinfo()->output_scanline = this->dstInfo().height();
+ }
+
+ return rows;
+}
+
+bool SkJpegCodec::onSkipScanlines(int count) {
+ // Set the jump location for libjpeg errors
+ if (setjmp(fDecoderMgr->getJmpBuf())) {
+ return fDecoderMgr->returnFalse("onSkipScanlines");
+ }
+
+ return (uint32_t) count == jpeg_skip_scanlines(fDecoderMgr->dinfo(), count);
+}
+
+static bool is_yuv_supported(jpeg_decompress_struct* dinfo) {
+ // Scaling is not supported in raw data mode.
+ SkASSERT(dinfo->scale_num == dinfo->scale_denom);
+
+ // I can't imagine that this would ever change, but we do depend on it.
+ static_assert(8 == DCTSIZE, "DCTSIZE (defined in jpeg library) should always be 8.");
+
+ if (JCS_YCbCr != dinfo->jpeg_color_space) {
+ return false;
+ }
+
+ SkASSERT(3 == dinfo->num_components);
+ SkASSERT(dinfo->comp_info);
+
+ // It is possible to perform a YUV decode for any combination of
+ // horizontal and vertical sampling that is supported by
+ // libjpeg/libjpeg-turbo. However, we will start by supporting only the
+ // common cases (where U and V have samp_factors of one).
+ //
+ // The definition of samp_factor is kind of the opposite of what SkCodec
+ // thinks of as a sampling factor. samp_factor is essentially a
+ // multiplier, and the larger the samp_factor is, the more samples that
+ // there will be. Ex:
+ // U_plane_width = image_width * (U_h_samp_factor / max_h_samp_factor)
+ //
+ // Supporting cases where the samp_factors for U or V were larger than
+ // that of Y would be an extremely difficult change, given that clients
+ // allocate memory as if the size of the Y plane is always the size of the
+ // image. However, this case is very, very rare.
+ if ((1 != dinfo->comp_info[1].h_samp_factor) ||
+ (1 != dinfo->comp_info[1].v_samp_factor) ||
+ (1 != dinfo->comp_info[2].h_samp_factor) ||
+ (1 != dinfo->comp_info[2].v_samp_factor))
+ {
+ return false;
+ }
+
+ // Support all common cases of Y samp_factors.
+ // TODO (msarett): As mentioned above, it would be possible to support
+ // more combinations of samp_factors. The issues are:
+ // (1) Are there actually any images that are not covered
+ // by these cases?
+ // (2) How much complexity would be added to the
+ // implementation in order to support these rare
+ // cases?
+ int hSampY = dinfo->comp_info[0].h_samp_factor;
+ int vSampY = dinfo->comp_info[0].v_samp_factor;
+ return (1 == hSampY && 1 == vSampY) ||
+ (2 == hSampY && 1 == vSampY) ||
+ (2 == hSampY && 2 == vSampY) ||
+ (1 == hSampY && 2 == vSampY) ||
+ (4 == hSampY && 1 == vSampY) ||
+ (4 == hSampY && 2 == vSampY);
+}
+
+bool SkJpegCodec::onQueryYUV8(SkYUVSizeInfo* sizeInfo, SkYUVColorSpace* colorSpace) const {
+ jpeg_decompress_struct* dinfo = fDecoderMgr->dinfo();
+ if (!is_yuv_supported(dinfo)) {
+ return false;
+ }
+
+ sizeInfo->fSizes[SkYUVSizeInfo::kY].set(dinfo->comp_info[0].downsampled_width,
+ dinfo->comp_info[0].downsampled_height);
+ sizeInfo->fSizes[SkYUVSizeInfo::kU].set(dinfo->comp_info[1].downsampled_width,
+ dinfo->comp_info[1].downsampled_height);
+ sizeInfo->fSizes[SkYUVSizeInfo::kV].set(dinfo->comp_info[2].downsampled_width,
+ dinfo->comp_info[2].downsampled_height);
+ sizeInfo->fWidthBytes[SkYUVSizeInfo::kY] = dinfo->comp_info[0].width_in_blocks * DCTSIZE;
+ sizeInfo->fWidthBytes[SkYUVSizeInfo::kU] = dinfo->comp_info[1].width_in_blocks * DCTSIZE;
+ sizeInfo->fWidthBytes[SkYUVSizeInfo::kV] = dinfo->comp_info[2].width_in_blocks * DCTSIZE;
+
+ if (colorSpace) {
+ *colorSpace = kJPEG_SkYUVColorSpace;
+ }
+
+ return true;
+}
+
+SkCodec::Result SkJpegCodec::onGetYUV8Planes(const SkYUVSizeInfo& sizeInfo, void* planes[3]) {
+ SkYUVSizeInfo defaultInfo;
+
+ // This will check is_yuv_supported(), so we don't need to here.
+ bool supportsYUV = this->onQueryYUV8(&defaultInfo, nullptr);
+ if (!supportsYUV ||
+ sizeInfo.fSizes[SkYUVSizeInfo::kY] != defaultInfo.fSizes[SkYUVSizeInfo::kY] ||
+ sizeInfo.fSizes[SkYUVSizeInfo::kU] != defaultInfo.fSizes[SkYUVSizeInfo::kU] ||
+ sizeInfo.fSizes[SkYUVSizeInfo::kV] != defaultInfo.fSizes[SkYUVSizeInfo::kV] ||
+ sizeInfo.fWidthBytes[SkYUVSizeInfo::kY] < defaultInfo.fWidthBytes[SkYUVSizeInfo::kY] ||
+ sizeInfo.fWidthBytes[SkYUVSizeInfo::kU] < defaultInfo.fWidthBytes[SkYUVSizeInfo::kU] ||
+ sizeInfo.fWidthBytes[SkYUVSizeInfo::kV] < defaultInfo.fWidthBytes[SkYUVSizeInfo::kV]) {
+ return fDecoderMgr->returnFailure("onGetYUV8Planes", kInvalidInput);
+ }
+
+ // Set the jump location for libjpeg errors
+ if (setjmp(fDecoderMgr->getJmpBuf())) {
+ return fDecoderMgr->returnFailure("setjmp", kInvalidInput);
+ }
+
+ // Get a pointer to the decompress info since we will use it quite frequently
+ jpeg_decompress_struct* dinfo = fDecoderMgr->dinfo();
+
+ dinfo->raw_data_out = TRUE;
+ if (!jpeg_start_decompress(dinfo)) {
+ return fDecoderMgr->returnFailure("startDecompress", kInvalidInput);
+ }
+
+ // A previous implementation claims that the return value of is_yuv_supported()
+ // may change after calling jpeg_start_decompress(). It looks to me like this
+ // was caused by a bug in the old code, but we'll be safe and check here.
+ SkASSERT(is_yuv_supported(dinfo));
+
+ // Currently, we require that the Y plane dimensions match the image dimensions
+ // and that the U and V planes are the same dimensions.
+ SkASSERT(sizeInfo.fSizes[SkYUVSizeInfo::kU] == sizeInfo.fSizes[SkYUVSizeInfo::kV]);
+ SkASSERT((uint32_t) sizeInfo.fSizes[SkYUVSizeInfo::kY].width() == dinfo->output_width &&
+ (uint32_t) sizeInfo.fSizes[SkYUVSizeInfo::kY].height() == dinfo->output_height);
+
+ // Build a JSAMPIMAGE to handle output from libjpeg-turbo. A JSAMPIMAGE has
+ // a 2-D array of pixels for each of the components (Y, U, V) in the image.
+ // Cheat Sheet:
+ // JSAMPIMAGE == JSAMPLEARRAY* == JSAMPROW** == JSAMPLE***
+ JSAMPARRAY yuv[3];
+
+ // Set aside enough space for pointers to rows of Y, U, and V.
+ JSAMPROW rowptrs[2 * DCTSIZE + DCTSIZE + DCTSIZE];
+ yuv[0] = &rowptrs[0]; // Y rows (DCTSIZE or 2 * DCTSIZE)
+ yuv[1] = &rowptrs[2 * DCTSIZE]; // U rows (DCTSIZE)
+ yuv[2] = &rowptrs[3 * DCTSIZE]; // V rows (DCTSIZE)
+
+ // Initialize rowptrs.
+ int numYRowsPerBlock = DCTSIZE * dinfo->comp_info[0].v_samp_factor;
+ for (int i = 0; i < numYRowsPerBlock; i++) {
+ rowptrs[i] = SkTAddOffset<JSAMPLE>(planes[SkYUVSizeInfo::kY],
+ i * sizeInfo.fWidthBytes[SkYUVSizeInfo::kY]);
+ }
+ for (int i = 0; i < DCTSIZE; i++) {
+ rowptrs[i + 2 * DCTSIZE] = SkTAddOffset<JSAMPLE>(planes[SkYUVSizeInfo::kU],
+ i * sizeInfo.fWidthBytes[SkYUVSizeInfo::kU]);
+ rowptrs[i + 3 * DCTSIZE] = SkTAddOffset<JSAMPLE>(planes[SkYUVSizeInfo::kV],
+ i * sizeInfo.fWidthBytes[SkYUVSizeInfo::kV]);
+ }
+
+ // After each loop iteration, we will increment pointers to Y, U, and V.
+ size_t blockIncrementY = numYRowsPerBlock * sizeInfo.fWidthBytes[SkYUVSizeInfo::kY];
+ size_t blockIncrementU = DCTSIZE * sizeInfo.fWidthBytes[SkYUVSizeInfo::kU];
+ size_t blockIncrementV = DCTSIZE * sizeInfo.fWidthBytes[SkYUVSizeInfo::kV];
+
+ uint32_t numRowsPerBlock = numYRowsPerBlock;
+
+ // We intentionally round down here, as this first loop will only handle
+ // full block rows. As a special case at the end, we will handle any
+ // remaining rows that do not make up a full block.
+ const int numIters = dinfo->output_height / numRowsPerBlock;
+ for (int i = 0; i < numIters; i++) {
+ JDIMENSION linesRead = jpeg_read_raw_data(dinfo, yuv, numRowsPerBlock);
+ if (linesRead < numRowsPerBlock) {
+ // FIXME: Handle incomplete YUV decodes without signalling an error.
+ return kInvalidInput;
+ }
+
+ // Update rowptrs.
+ for (int i = 0; i < numYRowsPerBlock; i++) {
+ rowptrs[i] += blockIncrementY;
+ }
+ for (int i = 0; i < DCTSIZE; i++) {
+ rowptrs[i + 2 * DCTSIZE] += blockIncrementU;
+ rowptrs[i + 3 * DCTSIZE] += blockIncrementV;
+ }
+ }
+
+ uint32_t remainingRows = dinfo->output_height - dinfo->output_scanline;
+ SkASSERT(remainingRows == dinfo->output_height % numRowsPerBlock);
+ SkASSERT(dinfo->output_scanline == numIters * numRowsPerBlock);
+ if (remainingRows > 0) {
+ // libjpeg-turbo needs memory to be padded by the block sizes. We will fulfill
+ // this requirement using a dummy row buffer.
+ // FIXME: Should SkCodec have an extra memory buffer that can be shared among
+ // all of the implementations that use temporary/garbage memory?
+ SkAutoTMalloc<JSAMPLE> dummyRow(sizeInfo.fWidthBytes[SkYUVSizeInfo::kY]);
+ for (int i = remainingRows; i < numYRowsPerBlock; i++) {
+ rowptrs[i] = dummyRow.get();
+ }
+ int remainingUVRows = dinfo->comp_info[1].downsampled_height - DCTSIZE * numIters;
+ for (int i = remainingUVRows; i < DCTSIZE; i++) {
+ rowptrs[i + 2 * DCTSIZE] = dummyRow.get();
+ rowptrs[i + 3 * DCTSIZE] = dummyRow.get();
+ }
+
+ JDIMENSION linesRead = jpeg_read_raw_data(dinfo, yuv, numRowsPerBlock);
+ if (linesRead < remainingRows) {
+ // FIXME: Handle incomplete YUV decodes without signalling an error.
+ return kInvalidInput;
+ }
+ }
+
+ return kSuccess;
+}
diff --git a/gfx/skia/skia/src/codec/SkJpegCodec.h b/gfx/skia/skia/src/codec/SkJpegCodec.h
new file mode 100644
index 000000000..30425eea3
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkJpegCodec.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkJpegCodec_DEFINED
+#define SkJpegCodec_DEFINED
+
+#include "SkCodec.h"
+#include "SkColorSpace.h"
+#include "SkColorSpaceXform.h"
+#include "SkImageInfo.h"
+#include "SkSwizzler.h"
+#include "SkStream.h"
+#include "SkTemplates.h"
+
+class JpegDecoderMgr;
+
+/*
+ *
+ * This class implements the decoding for jpeg images
+ *
+ */
+class SkJpegCodec : public SkCodec {
+public:
+ static bool IsJpeg(const void*, size_t);
+
+ /*
+ * Assumes IsJpeg was called and returned true
+ * Creates a jpeg decoder
+ * Takes ownership of the stream
+ */
+ static SkCodec* NewFromStream(SkStream*);
+
+protected:
+
+ /*
+ * Recommend a set of destination dimensions given a requested scale
+ */
+ SkISize onGetScaledDimensions(float desiredScale) const override;
+
+ /*
+ * Initiates the jpeg decode
+ */
+ Result onGetPixels(const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes, const Options&,
+ SkPMColor*, int*, int*) override;
+
+ bool onQueryYUV8(SkYUVSizeInfo* sizeInfo, SkYUVColorSpace* colorSpace) const override;
+
+ Result onGetYUV8Planes(const SkYUVSizeInfo& sizeInfo, void* planes[3]) override;
+
+ SkEncodedFormat onGetEncodedFormat() const override {
+ return kJPEG_SkEncodedFormat;
+ }
+
+ bool onRewind() override;
+
+ bool onDimensionsSupported(const SkISize&) override;
+
+ sk_sp<SkData> getICCData() const override { return fICCData; }
+
+private:
+
+ /*
+ * Read enough of the stream to initialize the SkJpegCodec.
+ * Returns a bool representing success or failure.
+ *
+ * @param codecOut
+ * If this returns true, and codecOut was not nullptr,
+ * codecOut will be set to a new SkJpegCodec.
+ *
+ * @param decoderMgrOut
+ * If this returns true, and codecOut was nullptr,
+ * decoderMgrOut must be non-nullptr and decoderMgrOut will be set to a new
+ * JpegDecoderMgr pointer.
+ *
+ * @param stream
+ * Deleted on failure.
+ * codecOut will take ownership of it in the case where we created a codec.
+ * Ownership is unchanged when we set decoderMgrOut.
+ *
+ */
+ static bool ReadHeader(SkStream* stream, SkCodec** codecOut,
+ JpegDecoderMgr** decoderMgrOut);
+
+ /*
+ * Creates an instance of the decoder
+ * Called only by NewFromStream
+ *
+ * @param info contains properties of the encoded data
+ * @param stream the encoded image data
+ * @param decoderMgr holds decompress struct, src manager, and error manager
+ * takes ownership
+ */
+ SkJpegCodec(int width, int height, const SkEncodedInfo& info, SkStream* stream,
+ JpegDecoderMgr* decoderMgr, sk_sp<SkColorSpace> colorSpace, Origin origin,
+ sk_sp<SkData> iccData);
+
+ /*
+ * Checks if the conversion between the input image and the requested output
+ * image has been implemented.
+ *
+ * Sets the output color space.
+ */
+ bool setOutputColorSpace(const SkImageInfo& dst);
+
+ void initializeSwizzler(const SkImageInfo& dstInfo, const Options& options);
+ void initializeColorXform(const SkImageInfo& dstInfo);
+ void allocateStorage(const SkImageInfo& dstInfo);
+ int readRows(const SkImageInfo& dstInfo, void* dst, size_t rowBytes, int count);
+
+ /*
+ * Scanline decoding.
+ */
+ SkSampler* getSampler(bool createIfNecessary) override;
+ Result onStartScanlineDecode(const SkImageInfo& dstInfo, const Options& options,
+ SkPMColor ctable[], int* ctableCount) override;
+ int onGetScanlines(void* dst, int count, size_t rowBytes) override;
+ bool onSkipScanlines(int count) override;
+
+ SkAutoTDelete<JpegDecoderMgr> fDecoderMgr;
+
+ // We will save the state of the decompress struct after reading the header.
+ // This allows us to safely call onGetScaledDimensions() at any time.
+ const int fReadyState;
+
+
+ SkAutoTMalloc<uint8_t> fStorage;
+ uint8_t* fSwizzleSrcRow;
+ uint32_t* fColorXformSrcRow;
+
+ // libjpeg-turbo provides some subsetting. In the case that libjpeg-turbo
+ // cannot take the exact the subset that we need, we will use the swizzler
+ // to further subset the output from libjpeg-turbo.
+ SkIRect fSwizzlerSubset;
+
+ SkAutoTDelete<SkSwizzler> fSwizzler;
+ std::unique_ptr<SkColorSpaceXform> fColorXform;
+
+ sk_sp<SkData> fICCData;
+
+ typedef SkCodec INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkJpegDecoderMgr.cpp b/gfx/skia/skia/src/codec/SkJpegDecoderMgr.cpp
new file mode 100644
index 000000000..70401c039
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkJpegDecoderMgr.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkJpegDecoderMgr.h"
+
+#include "SkJpegUtility.h"
+
+/*
+ * Print information, warning, and error messages
+ */
+static void print_message(const j_common_ptr info, const char caller[]) {
+ char buffer[JMSG_LENGTH_MAX];
+ info->err->format_message(info, buffer);
+ SkCodecPrintf("libjpeg error %d <%s> from %s\n", info->err->msg_code, buffer, caller);
+}
+
+/*
+ * Reporting function for error and warning messages.
+ */
+static void output_message(j_common_ptr info) {
+ print_message(info, "output_message");
+}
+
+bool JpegDecoderMgr::returnFalse(const char caller[]) {
+ print_message((j_common_ptr) &fDInfo, caller);
+ return false;
+}
+
+SkCodec::Result JpegDecoderMgr::returnFailure(const char caller[], SkCodec::Result result) {
+ print_message((j_common_ptr) &fDInfo, caller);
+ return result;
+}
+
+bool JpegDecoderMgr::getEncodedColor(SkEncodedInfo::Color* outColor) {
+ switch (fDInfo.jpeg_color_space) {
+ case JCS_GRAYSCALE:
+ *outColor = SkEncodedInfo::kGray_Color;
+ return true;
+ case JCS_YCbCr:
+ *outColor = SkEncodedInfo::kYUV_Color;
+ return true;
+ case JCS_RGB:
+ *outColor = SkEncodedInfo::kRGB_Color;
+ return true;
+ case JCS_YCCK:
+ *outColor = SkEncodedInfo::kYCCK_Color;
+ return true;
+ case JCS_CMYK:
+ *outColor = SkEncodedInfo::kInvertedCMYK_Color;
+ return true;
+ default:
+ return false;
+ }
+}
+
+JpegDecoderMgr::JpegDecoderMgr(SkStream* stream)
+ : fSrcMgr(stream)
+ , fInit(false)
+{
+ // Error manager must be set before any calls to libjeg in order to handle failures
+ fDInfo.err = jpeg_std_error(&fErrorMgr);
+ fErrorMgr.error_exit = skjpeg_err_exit;
+}
+
+void JpegDecoderMgr::init() {
+ jpeg_create_decompress(&fDInfo);
+ fInit = true;
+ fDInfo.src = &fSrcMgr;
+ fDInfo.err->output_message = &output_message;
+}
+
+JpegDecoderMgr::~JpegDecoderMgr() {
+ if (fInit) {
+ jpeg_destroy_decompress(&fDInfo);
+ }
+}
+
+jmp_buf& JpegDecoderMgr::getJmpBuf() {
+ return fErrorMgr.fJmpBuf;
+}
+
+jpeg_decompress_struct* JpegDecoderMgr::dinfo() {
+ return &fDInfo;
+}
diff --git a/gfx/skia/skia/src/codec/SkJpegDecoderMgr.h b/gfx/skia/skia/src/codec/SkJpegDecoderMgr.h
new file mode 100644
index 000000000..7bc422d4f
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkJpegDecoderMgr.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkJpegDecoderMgr_DEFINED
+#define SkJpegDecoderMgr_DEFINED
+
+#include "SkCodec.h"
+#include "SkCodecPriv.h"
+#include <stdio.h>
+#include "SkJpegUtility.h"
+
+extern "C" {
+ #include "jpeglib.h"
+}
+
+class JpegDecoderMgr : SkNoncopyable {
+public:
+
+ /*
+ * Print a useful error message and return false
+ */
+ bool returnFalse(const char caller[]);
+
+ /*
+ * Print a useful error message and return a decode failure
+ */
+ SkCodec::Result returnFailure(const char caller[], SkCodec::Result result);
+
+ /*
+ * Create the decode manager
+ * Does not take ownership of stream
+ */
+ JpegDecoderMgr(SkStream* stream);
+
+ /*
+ * Initialize decompress struct
+ * Initialize the source manager
+ */
+ void init();
+
+ /*
+ * Returns true if it successfully sets outColor to the encoded color,
+ * and false otherwise.
+ */
+ bool getEncodedColor(SkEncodedInfo::Color* outColor);
+
+ /*
+ * Free memory used by the decode manager
+ */
+ ~JpegDecoderMgr();
+
+ /*
+ * Get the jump buffer in order to set an error return point
+ */
+ jmp_buf& getJmpBuf();
+
+ /*
+ * Get function for the decompress info struct
+ */
+ jpeg_decompress_struct* dinfo();
+
+private:
+
+ jpeg_decompress_struct fDInfo;
+ skjpeg_source_mgr fSrcMgr;
+ skjpeg_error_mgr fErrorMgr;
+ bool fInit;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkJpegUtility.cpp b/gfx/skia/skia/src/codec/SkJpegUtility.cpp
new file mode 100644
index 000000000..2cf36bacf
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkJpegUtility.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkJpegUtility.h"
+
+#include "SkCodecPriv.h"
+
+/*
+ * Initialize the source manager
+ */
+static void sk_init_source(j_decompress_ptr dinfo) {
+ skjpeg_source_mgr* src = (skjpeg_source_mgr*) dinfo->src;
+ src->next_input_byte = (const JOCTET*) src->fBuffer;
+ src->bytes_in_buffer = 0;
+}
+
+/*
+ * Fill the input buffer from the stream
+ */
+static boolean sk_fill_input_buffer(j_decompress_ptr dinfo) {
+ skjpeg_source_mgr* src = (skjpeg_source_mgr*) dinfo->src;
+ size_t bytes = src->fStream->read(src->fBuffer, skjpeg_source_mgr::kBufferSize);
+
+ // libjpeg is still happy with a less than full read, as long as the result is non-zero
+ if (bytes == 0) {
+ return false;
+ }
+
+ src->next_input_byte = (const JOCTET*) src->fBuffer;
+ src->bytes_in_buffer = bytes;
+ return true;
+}
+
+/*
+ * Skip a certain number of bytes in the stream
+ */
+static void sk_skip_input_data(j_decompress_ptr dinfo, long numBytes) {
+ skjpeg_source_mgr* src = (skjpeg_source_mgr*) dinfo->src;
+ size_t bytes = (size_t) numBytes;
+
+ if (bytes > src->bytes_in_buffer) {
+ size_t bytesToSkip = bytes - src->bytes_in_buffer;
+ if (bytesToSkip != src->fStream->skip(bytesToSkip)) {
+ SkCodecPrintf("Failure to skip.\n");
+ dinfo->err->error_exit((j_common_ptr) dinfo);
+ return;
+ }
+
+ src->next_input_byte = (const JOCTET*) src->fBuffer;
+ src->bytes_in_buffer = 0;
+ } else {
+ src->next_input_byte += numBytes;
+ src->bytes_in_buffer -= numBytes;
+ }
+}
+
+/*
+ * We do not need to do anything to terminate our stream
+ */
+static void sk_term_source(j_decompress_ptr dinfo)
+{
+ // The current implementation of SkJpegCodec does not call
+ // jpeg_finish_decompress(), so this function is never called.
+ // If we want to modify this function to do something, we also
+ // need to modify SkJpegCodec to call jpeg_finish_decompress().
+}
+
+/*
+ * Constructor for the source manager that we provide to libjpeg
+ * We provide skia implementations of all of the stream processing functions required by libjpeg
+ */
+skjpeg_source_mgr::skjpeg_source_mgr(SkStream* stream)
+ : fStream(stream)
+{
+ init_source = sk_init_source;
+ fill_input_buffer = sk_fill_input_buffer;
+ skip_input_data = sk_skip_input_data;
+ resync_to_restart = jpeg_resync_to_restart;
+ term_source = sk_term_source;
+}
+
+/*
+ * Call longjmp to continue execution on an error
+ */
+void skjpeg_err_exit(j_common_ptr dinfo) {
+ // Simply return to Skia client code
+ // JpegDecoderMgr will take care of freeing memory
+ skjpeg_error_mgr* error = (skjpeg_error_mgr*) dinfo->err;
+ (*error->output_message) (dinfo);
+ longjmp(error->fJmpBuf, 1);
+}
diff --git a/gfx/skia/skia/src/codec/SkJpegUtility.h b/gfx/skia/skia/src/codec/SkJpegUtility.h
new file mode 100644
index 000000000..43391017b
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkJpegUtility.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkJpegUtility_codec_DEFINED
+#define SkJpegUtility_codec_DEFINED
+
+#include "SkStream.h"
+
+#include <setjmp.h>
+// stdio is needed for jpeglib
+#include <stdio.h>
+
+extern "C" {
+ #include "jpeglib.h"
+ #include "jerror.h"
+}
+
+/*
+ * Error handling struct
+ */
+struct skjpeg_error_mgr : jpeg_error_mgr {
+ jmp_buf fJmpBuf;
+};
+
+/*
+ * Error handling function
+ */
+void skjpeg_err_exit(j_common_ptr cinfo);
+
+/*
+ * Source handling struct for that allows libjpeg to use our stream object
+ */
+struct skjpeg_source_mgr : jpeg_source_mgr {
+ skjpeg_source_mgr(SkStream* stream);
+
+ SkStream* fStream; // unowned
+ enum {
+ // TODO (msarett): Experiment with different buffer sizes.
+ // This size was chosen because it matches SkImageDecoder.
+ kBufferSize = 1024
+ };
+ uint8_t fBuffer[kBufferSize];
+};
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkMaskSwizzler.cpp b/gfx/skia/skia/src/codec/SkMaskSwizzler.cpp
new file mode 100644
index 000000000..2df10ee24
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkMaskSwizzler.cpp
@@ -0,0 +1,568 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCodecPriv.h"
+#include "SkColorPriv.h"
+#include "SkMaskSwizzler.h"
+
+static void swizzle_mask16_to_rgba_opaque(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPackARGB_as_RGBA(0xFF, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask16_to_bgra_opaque(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPackARGB_as_BGRA(0xFF, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask16_to_rgba_unpremul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = SkPackARGB_as_RGBA(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask16_to_bgra_unpremul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = SkPackARGB_as_BGRA(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask16_to_rgba_premul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = premultiply_argb_as_rgba(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask16_to_bgra_premul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = premultiply_argb_as_bgra(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+// TODO (msarett): We have promoted a two byte per pixel image to 8888, only to
+// convert it back to 565. Instead, we should swizzle to 565 directly.
+static void swizzle_mask16_to_565(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ uint16_t* dstPtr = (uint16_t*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPack888ToRGB16(red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask24_to_rgba_opaque(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPackARGB_as_RGBA(0xFF, red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask24_to_bgra_opaque(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPackARGB_as_BGRA(0xFF, red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask24_to_rgba_unpremul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = SkPackARGB_as_RGBA(alpha, red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask24_to_bgra_unpremul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = SkPackARGB_as_BGRA(alpha, red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask24_to_rgba_premul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = premultiply_argb_as_rgba(alpha, red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask24_to_bgra_premul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = premultiply_argb_as_bgra(alpha, red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask24_to_565(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ uint16_t* dstPtr = (uint16_t*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPack888ToRGB16(red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask32_to_rgba_opaque(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPackARGB_as_RGBA(0xFF, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask32_to_bgra_opaque(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPackARGB_as_BGRA(0xFF, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask32_to_rgba_unpremul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = SkPackARGB_as_RGBA(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask32_to_bgra_unpremul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = SkPackARGB_as_BGRA(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask32_to_rgba_premul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = premultiply_argb_as_rgba(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask32_to_bgra_premul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = premultiply_argb_as_bgra(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask32_to_565(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ uint16_t* dstPtr = (uint16_t*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPack888ToRGB16(red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+/*
+ *
+ * Create a new mask swizzler
+ *
+ */
+SkMaskSwizzler* SkMaskSwizzler::CreateMaskSwizzler(const SkImageInfo& dstInfo,
+ const SkImageInfo& srcInfo, SkMasks* masks, uint32_t bitsPerPixel,
+ const SkCodec::Options& options) {
+
+ // Choose the appropriate row procedure
+ RowProc proc = nullptr;
+ switch (bitsPerPixel) {
+ case 16:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ if (kOpaque_SkAlphaType == srcInfo.alphaType()) {
+ proc = &swizzle_mask16_to_rgba_opaque;
+ } else {
+ switch (dstInfo.alphaType()) {
+ case kUnpremul_SkAlphaType:
+ proc = &swizzle_mask16_to_rgba_unpremul;
+ break;
+ case kPremul_SkAlphaType:
+ proc = &swizzle_mask16_to_rgba_premul;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case kBGRA_8888_SkColorType:
+ if (kOpaque_SkAlphaType == srcInfo.alphaType()) {
+ proc = &swizzle_mask16_to_bgra_opaque;
+ } else {
+ switch (dstInfo.alphaType()) {
+ case kUnpremul_SkAlphaType:
+ proc = &swizzle_mask16_to_bgra_unpremul;
+ break;
+ case kPremul_SkAlphaType:
+ proc = &swizzle_mask16_to_bgra_premul;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_mask16_to_565;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 24:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ if (kOpaque_SkAlphaType == srcInfo.alphaType()) {
+ proc = &swizzle_mask24_to_rgba_opaque;
+ } else {
+ switch (dstInfo.alphaType()) {
+ case kUnpremul_SkAlphaType:
+ proc = &swizzle_mask24_to_rgba_unpremul;
+ break;
+ case kPremul_SkAlphaType:
+ proc = &swizzle_mask24_to_rgba_premul;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case kBGRA_8888_SkColorType:
+ if (kOpaque_SkAlphaType == srcInfo.alphaType()) {
+ proc = &swizzle_mask24_to_bgra_opaque;
+ } else {
+ switch (dstInfo.alphaType()) {
+ case kUnpremul_SkAlphaType:
+ proc = &swizzle_mask24_to_bgra_unpremul;
+ break;
+ case kPremul_SkAlphaType:
+ proc = &swizzle_mask24_to_bgra_premul;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_mask24_to_565;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 32:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ if (kOpaque_SkAlphaType == srcInfo.alphaType()) {
+ proc = &swizzle_mask32_to_rgba_opaque;
+ } else {
+ switch (dstInfo.alphaType()) {
+ case kUnpremul_SkAlphaType:
+ proc = &swizzle_mask32_to_rgba_unpremul;
+ break;
+ case kPremul_SkAlphaType:
+ proc = &swizzle_mask32_to_rgba_premul;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case kBGRA_8888_SkColorType:
+ if (kOpaque_SkAlphaType == srcInfo.alphaType()) {
+ proc = &swizzle_mask32_to_bgra_opaque;
+ } else {
+ switch (dstInfo.alphaType()) {
+ case kUnpremul_SkAlphaType:
+ proc = &swizzle_mask32_to_bgra_unpremul;
+ break;
+ case kPremul_SkAlphaType:
+ proc = &swizzle_mask32_to_bgra_premul;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_mask32_to_565;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+
+ int srcOffset = 0;
+ int srcWidth = dstInfo.width();
+ if (options.fSubset) {
+ srcOffset = options.fSubset->left();
+ srcWidth = options.fSubset->width();
+ }
+
+ return new SkMaskSwizzler(masks, proc, srcOffset, srcWidth);
+}
+
+/*
+ *
+ * Constructor for mask swizzler
+ *
+ */
+SkMaskSwizzler::SkMaskSwizzler(SkMasks* masks, RowProc proc, int srcOffset, int subsetWidth)
+ : fMasks(masks)
+ , fRowProc(proc)
+ , fSubsetWidth(subsetWidth)
+ , fDstWidth(subsetWidth)
+ , fSampleX(1)
+ , fSrcOffset(srcOffset)
+ , fX0(srcOffset)
+{}
+
+int SkMaskSwizzler::onSetSampleX(int sampleX) {
+ // FIXME: Share this function with SkSwizzler?
+ SkASSERT(sampleX > 0); // Surely there is an upper limit? Should there be
+ // way to report failure?
+ fSampleX = sampleX;
+ fX0 = get_start_coord(sampleX) + fSrcOffset;
+ fDstWidth = get_scaled_dimension(fSubsetWidth, sampleX);
+
+ // check that fX0 is valid
+ SkASSERT(fX0 >= 0);
+ return fDstWidth;
+}
+
+/*
+ *
+ * Swizzle the specified row
+ *
+ */
+void SkMaskSwizzler::swizzle(void* dst, const uint8_t* SK_RESTRICT src) {
+ SkASSERT(nullptr != dst && nullptr != src);
+ fRowProc(dst, src, fDstWidth, fMasks, fX0, fSampleX);
+}
diff --git a/gfx/skia/skia/src/codec/SkMaskSwizzler.h b/gfx/skia/skia/src/codec/SkMaskSwizzler.h
new file mode 100644
index 000000000..3bf8d1758
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkMaskSwizzler.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkMaskSwizzler_DEFINED
+#define SkMaskSwizzler_DEFINED
+
+#include "SkMasks.h"
+#include "SkSampler.h"
+#include "SkSwizzler.h"
+#include "SkTypes.h"
+
+/*
+ *
+ * Used to swizzle images whose pixel components are extracted by bit masks
+ * Currently only used by bmp
+ *
+ */
+class SkMaskSwizzler : public SkSampler {
+public:
+
+ /*
+ * Create a new swizzler
+ * @param masks Unowned pointer to helper class
+ */
+ static SkMaskSwizzler* CreateMaskSwizzler(const SkImageInfo& dstInfo,
+ const SkImageInfo& srcInfo,
+ SkMasks* masks,
+ uint32_t bitsPerPixel,
+ const SkCodec::Options& options);
+
+ /*
+ * Swizzle a row
+ */
+ void swizzle(void* dst, const uint8_t* SK_RESTRICT src);
+
+ /**
+ * Implement fill using a custom width.
+ */
+ void fill(const SkImageInfo& info, void* dst, size_t rowBytes, uint64_t colorOrIndex,
+ SkCodec::ZeroInitialized zeroInit) override {
+ const SkImageInfo fillInfo = info.makeWH(fDstWidth, info.height());
+ SkSampler::Fill(fillInfo, dst, rowBytes, colorOrIndex, zeroInit);
+ }
+
+private:
+
+ /*
+ * Row procedure used for swizzle
+ */
+ typedef void (*RowProc)(void* dstRow, const uint8_t* srcRow, int width,
+ SkMasks* masks, uint32_t startX, uint32_t sampleX);
+
+ SkMaskSwizzler(SkMasks* masks, RowProc proc, int subsetWidth, int srcOffset);
+
+ int onSetSampleX(int) override;
+
+ SkMasks* fMasks; // unowned
+ const RowProc fRowProc;
+
+ // FIXME: Can this class share more with SkSwizzler? These variables are all the same.
+ const int fSubsetWidth; // Width of the subset of source before any sampling.
+ int fDstWidth; // Width of dst, which may differ with sampling.
+ int fSampleX;
+ int fSrcOffset;
+ int fX0;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkMasks.cpp b/gfx/skia/skia/src/codec/SkMasks.cpp
new file mode 100644
index 000000000..ac97a39d7
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkMasks.cpp
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCodecPriv.h"
+#include "SkMasks.h"
+#include "SkTypes.h"
+
+/*
+ *
+ * Used to convert 1-7 bit color components into 8-bit color components
+ *
+ */
+const static uint8_t n_bit_to_8_bit_lookup_table[] = {
+ // 1 bit
+ 0, 255,
+ // 2 bits
+ 0, 85, 170, 255,
+ // 3 bits
+ 0, 36, 73, 109, 146, 182, 219, 255,
+ // 4 bits
+ 0, 17, 34, 51, 68, 85, 102, 119, 136, 153, 170, 187, 204, 221, 238, 255,
+ // 5 bits
+ 0, 8, 16, 25, 33, 41, 49, 58, 66, 74, 82, 90, 99, 107, 115, 123, 132, 140,
+ 148, 156, 165, 173, 181, 189, 197, 206, 214, 222, 230, 239, 247, 255,
+ // 6 bits
+ 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 45, 49, 53, 57, 61, 65, 69, 73,
+ 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 130, 134, 138,
+ 142, 146, 150, 154, 158, 162, 166, 170, 174, 178, 182, 186, 190, 194, 198,
+ 202, 206, 210, 215, 219, 223, 227, 231, 235, 239, 243, 247, 251, 255,
+ // 7 bits
+ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38,
+ 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76,
+ 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110,
+ 112, 114, 116, 118, 120, 122, 124, 126, 129, 131, 133, 135, 137, 139, 141,
+ 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171,
+ 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201,
+ 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231,
+ 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255
+};
+
+/*
+ *
+ * Convert an n bit component to an 8-bit component
+ *
+ */
+static uint8_t convert_to_8(uint8_t component, uint32_t n) {
+ if (0 == n) {
+ return 0;
+ } else if (8 > n) {
+ return n_bit_to_8_bit_lookup_table[(1 << n) - 2 + component];
+ } else {
+ SkASSERT(8 == n);
+ return component;
+ }
+}
+
+static uint8_t get_comp(uint32_t pixel, uint32_t mask, uint32_t shift,
+ uint32_t size) {
+ return convert_to_8((pixel & mask) >> shift, size);
+}
+
+/*
+ *
+ * Get a color component
+ *
+ */
+uint8_t SkMasks::getRed(uint32_t pixel) const {
+ return get_comp(pixel, fRed.mask, fRed.shift, fRed.size);
+}
+uint8_t SkMasks::getGreen(uint32_t pixel) const {
+ return get_comp(pixel, fGreen.mask, fGreen.shift, fGreen.size);
+}
+uint8_t SkMasks::getBlue(uint32_t pixel) const {
+ return get_comp(pixel, fBlue.mask, fBlue.shift, fBlue.size);
+}
+uint8_t SkMasks::getAlpha(uint32_t pixel) const {
+ return get_comp(pixel, fAlpha.mask, fAlpha.shift, fAlpha.size);
+}
+
+/*
+ *
+ * Process an input mask to obtain the necessary information
+ *
+ */
+const SkMasks::MaskInfo process_mask(uint32_t mask, uint32_t bpp) {
+ // Determine properties of the mask
+ uint32_t tempMask = mask;
+ uint32_t shift = 0;
+ uint32_t size = 0;
+ if (tempMask != 0) {
+ // Count trailing zeros on masks
+ for (; (tempMask & 1) == 0; tempMask >>= 1) {
+ shift++;
+ }
+ // Count the size of the mask
+ for (; tempMask & 1; tempMask >>= 1) {
+ size++;
+ }
+ // Verify that the mask is continuous
+ if (tempMask) {
+ SkCodecPrintf("Warning: Bit mask is not continuous.\n");
+ // Finish processing the mask
+ for (; tempMask; tempMask >>= 1) {
+ size++;
+ }
+ }
+ // Truncate masks greater than 8 bits
+ if (size > 8) {
+ shift += size - 8;
+ size = 8;
+ mask &= 0xFF << shift;
+ }
+ }
+
+ // Save the calculated values
+ const SkMasks::MaskInfo info = { mask, shift, size };
+ return info;
+}
+
+/*
+ *
+ * Create the masks object
+ *
+ */
+SkMasks* SkMasks::CreateMasks(InputMasks masks, uint32_t bitsPerPixel) {
+ // Trim the input masks according to bitsPerPixel
+ if (bitsPerPixel < 32) {
+ masks.red &= (1 << bitsPerPixel) - 1;
+ masks.green &= (1 << bitsPerPixel) - 1;
+ masks.blue &= (1 << bitsPerPixel) - 1;
+ masks.alpha &= (1 << bitsPerPixel) - 1;
+ }
+
+ // Check that masks do not overlap
+ if (((masks.red & masks.green) | (masks.red & masks.blue) |
+ (masks.red & masks.alpha) | (masks.green & masks.blue) |
+ (masks.green & masks.alpha) | (masks.blue & masks.alpha)) != 0) {
+ return nullptr;
+ }
+
+ // Collect information about the masks
+ const MaskInfo red = process_mask(masks.red, bitsPerPixel);
+ const MaskInfo green = process_mask(masks.green, bitsPerPixel);
+ const MaskInfo blue = process_mask(masks.blue, bitsPerPixel);
+ const MaskInfo alpha = process_mask(masks.alpha, bitsPerPixel);
+
+ return new SkMasks(red, green, blue, alpha);
+}
+
+
+SkMasks::SkMasks(const MaskInfo& red, const MaskInfo& green,
+ const MaskInfo& blue, const MaskInfo& alpha)
+ : fRed(red)
+ , fGreen(green)
+ , fBlue(blue)
+ , fAlpha(alpha)
+{}
diff --git a/gfx/skia/skia/src/codec/SkMasks.h b/gfx/skia/skia/src/codec/SkMasks.h
new file mode 100644
index 000000000..9606cbfbe
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkMasks.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkMasks_DEFINED
+#define SkMasks_DEFINED
+
+#include "SkTypes.h"
+
+/*
+ *
+ * Contains useful mask routines for SkMaskSwizzler
+ *
+ */
+class SkMasks {
+public:
+
+ /*
+ *
+ * Input bit masks format
+ *
+ */
+ struct InputMasks {
+ uint32_t red;
+ uint32_t green;
+ uint32_t blue;
+ uint32_t alpha;
+ };
+
+ /*
+ *
+ * Contains all of the information for a single mask
+ *
+ */
+ struct MaskInfo {
+ uint32_t mask;
+ uint32_t shift;
+ uint32_t size;
+ };
+
+ /*
+ *
+ * Create the masks object
+ *
+ */
+ static SkMasks* CreateMasks(InputMasks masks, uint32_t bpp);
+
+ /*
+ *
+ * Get a color component
+ *
+ */
+ uint8_t getRed(uint32_t pixel) const;
+ uint8_t getGreen(uint32_t pixel) const;
+ uint8_t getBlue(uint32_t pixel) const;
+ uint8_t getAlpha(uint32_t pixel) const;
+
+ /*
+ *
+ * Getter for the alpha mask
+ * The alpha mask may be used in other decoding modes
+ *
+ */
+ uint32_t getAlphaMask() const {
+ return fAlpha.mask;
+ }
+
+private:
+
+ /*
+ *
+ * Constructor
+ *
+ */
+ SkMasks(const MaskInfo& red, const MaskInfo& green, const MaskInfo& blue,
+ const MaskInfo& alpha);
+
+ const MaskInfo fRed;
+ const MaskInfo fGreen;
+ const MaskInfo fBlue;
+ const MaskInfo fAlpha;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkPngCodec.cpp b/gfx/skia/skia/src/codec/SkPngCodec.cpp
new file mode 100644
index 000000000..30fffe116
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkPngCodec.cpp
@@ -0,0 +1,1309 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmap.h"
+#include "SkCodecPriv.h"
+#include "SkColorPriv.h"
+#include "SkColorSpace_Base.h"
+#include "SkColorTable.h"
+#include "SkMath.h"
+#include "SkOpts.h"
+#include "SkPngCodec.h"
+#include "SkPoint3.h"
+#include "SkSize.h"
+#include "SkStream.h"
+#include "SkSwizzler.h"
+#include "SkTemplates.h"
+#include "SkUtils.h"
+
+#include "png.h"
+
+// This warning triggers false postives way too often in here.
+#if defined(__GNUC__) && !defined(__clang__)
+ #pragma GCC diagnostic ignored "-Wclobbered"
+#endif
+
+#if PNG_LIBPNG_VER_MAJOR > 1 || (PNG_LIBPNG_VER_MAJOR == 1 && PNG_LIBPNG_VER_MINOR >= 5)
+ // This is not needed with version 1.5
+ #undef SK_GOOGLE3_PNG_HACK
+#endif
+
+// FIXME (scroggo): We can use png_jumpbuf directly once Google3 is on 1.6
+#define PNG_JMPBUF(x) png_jmpbuf((png_structp) x)
+
+///////////////////////////////////////////////////////////////////////////////
+// Callback functions
+///////////////////////////////////////////////////////////////////////////////
+
+// When setjmp is first called, it returns 0, meaning longjmp was not called.
+constexpr int kSetJmpOkay = 0;
+// An error internal to libpng.
+constexpr int kPngError = 1;
+// Passed to longjmp when we have decoded as many lines as we need.
+constexpr int kStopDecoding = 2;
+
+static void sk_error_fn(png_structp png_ptr, png_const_charp msg) {
+ SkCodecPrintf("------ png error %s\n", msg);
+ longjmp(PNG_JMPBUF(png_ptr), kPngError);
+}
+
+void sk_warning_fn(png_structp, png_const_charp msg) {
+ SkCodecPrintf("----- png warning %s\n", msg);
+}
+
+#ifdef PNG_READ_UNKNOWN_CHUNKS_SUPPORTED
+static int sk_read_user_chunk(png_structp png_ptr, png_unknown_chunkp chunk) {
+ SkPngChunkReader* chunkReader = (SkPngChunkReader*)png_get_user_chunk_ptr(png_ptr);
+ // readChunk() returning true means continue decoding
+ return chunkReader->readChunk((const char*)chunk->name, chunk->data, chunk->size) ? 1 : -1;
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// Helpers
+///////////////////////////////////////////////////////////////////////////////
+
+class AutoCleanPng : public SkNoncopyable {
+public:
+ /*
+ * This class does not take ownership of stream or reader, but if codecPtr
+ * is non-NULL, and decodeBounds succeeds, it will have created a new
+ * SkCodec (pointed to by *codecPtr) which will own/ref them, as well as
+ * the png_ptr and info_ptr.
+ */
+ AutoCleanPng(png_structp png_ptr, SkStream* stream, SkPngChunkReader* reader,
+ SkCodec** codecPtr)
+ : fPng_ptr(png_ptr)
+ , fInfo_ptr(nullptr)
+ , fDecodedBounds(false)
+ , fReadHeader(false)
+ , fStream(stream)
+ , fChunkReader(reader)
+ , fOutCodec(codecPtr)
+ {}
+
+ ~AutoCleanPng() {
+ // fInfo_ptr will never be non-nullptr unless fPng_ptr is.
+ if (fPng_ptr) {
+ png_infopp info_pp = fInfo_ptr ? &fInfo_ptr : nullptr;
+ png_destroy_read_struct(&fPng_ptr, info_pp, nullptr);
+ }
+ }
+
+ void setInfoPtr(png_infop info_ptr) {
+ SkASSERT(nullptr == fInfo_ptr);
+ fInfo_ptr = info_ptr;
+ }
+
+ /**
+ * Reads enough of the input stream to decode the bounds.
+ * @return false if the stream is not a valid PNG (or too short).
+ * true if it read enough of the stream to determine the bounds.
+ * In the latter case, the stream may have been read beyond the
+ * point to determine the bounds, and the png_ptr will have saved
+ * any extra data. Further, if the codecPtr supplied to the
+ * constructor was not NULL, it will now point to a new SkCodec,
+ * which owns (or refs, in the case of the SkPngChunkReader) the
+ * inputs. If codecPtr was NULL, the png_ptr and info_ptr are
+ * unowned, and it is up to the caller to destroy them.
+ */
+ bool decodeBounds();
+
+private:
+ png_structp fPng_ptr;
+ png_infop fInfo_ptr;
+ bool fDecodedBounds;
+ bool fReadHeader;
+ SkStream* fStream;
+ SkPngChunkReader* fChunkReader;
+ SkCodec** fOutCodec;
+
+ /**
+ * Supplied to libpng to call when it has read enough data to determine
+ * bounds.
+ */
+ static void InfoCallback(png_structp png_ptr, png_infop) {
+ // png_get_progressive_ptr returns the pointer we set on the png_ptr with
+ // png_set_progressive_read_fn
+ static_cast<AutoCleanPng*>(png_get_progressive_ptr(png_ptr))->infoCallback();
+ }
+
+ void infoCallback();
+
+#ifdef SK_GOOGLE3_PNG_HACK
+// public so it can be called by SkPngCodec::rereadHeaderIfNecessary().
+public:
+#endif
+ void releasePngPtrs() {
+ fPng_ptr = nullptr;
+ fInfo_ptr = nullptr;
+ }
+};
+#define AutoCleanPng(...) SK_REQUIRE_LOCAL_VAR(AutoCleanPng)
+
+bool AutoCleanPng::decodeBounds() {
+ if (setjmp(PNG_JMPBUF(fPng_ptr))) {
+ return false;
+ }
+
+ png_set_progressive_read_fn(fPng_ptr, this, InfoCallback, nullptr, nullptr);
+
+ // Arbitrary buffer size, though note that it matches (below)
+ // SkPngCodec::processData(). FIXME: Can we better suit this to the size of
+ // the PNG header?
+ constexpr size_t kBufferSize = 4096;
+ char buffer[kBufferSize];
+
+ while (true) {
+ const size_t bytesRead = fStream->read(buffer, kBufferSize);
+ if (!bytesRead) {
+ // We have read to the end of the input without decoding bounds.
+ break;
+ }
+
+ png_process_data(fPng_ptr, fInfo_ptr, (png_bytep) buffer, bytesRead);
+ if (fReadHeader) {
+ break;
+ }
+ }
+
+ // For safety, clear the pointer to this object.
+ png_set_progressive_read_fn(fPng_ptr, nullptr, nullptr, nullptr, nullptr);
+ return fDecodedBounds;
+}
+
+void SkPngCodec::processData() {
+ switch (setjmp(PNG_JMPBUF(fPng_ptr))) {
+ case kPngError:
+ // There was an error. Stop processing data.
+ // FIXME: Do we need to discard png_ptr?
+ return;
+ case kStopDecoding:
+ // We decoded all the lines we want.
+ return;
+ case kSetJmpOkay:
+ // Everything is okay.
+ break;
+ default:
+ // No other values should be passed to longjmp.
+ SkASSERT(false);
+ }
+
+ // Arbitrary buffer size
+ constexpr size_t kBufferSize = 4096;
+ char buffer[kBufferSize];
+
+ while (true) {
+ const size_t bytesRead = this->stream()->read(buffer, kBufferSize);
+ png_process_data(fPng_ptr, fInfo_ptr, (png_bytep) buffer, bytesRead);
+
+ if (!bytesRead) {
+ // We have read to the end of the input. Note that we quit *after*
+ // calling png_process_data, because decodeBounds may have told
+ // libpng to save the remainder of the buffer, in which case
+ // png_process_data will process the saved buffer, though the
+ // stream has no more to read.
+ break;
+ }
+ }
+}
+
+// Note: SkColorTable claims to store SkPMColors, which is not necessarily the case here.
+bool SkPngCodec::createColorTable(const SkImageInfo& dstInfo, int* ctableCount) {
+
+ int numColors;
+ png_color* palette;
+ if (!png_get_PLTE(fPng_ptr, fInfo_ptr, &palette, &numColors)) {
+ return false;
+ }
+
+ // Contents depend on tableColorType and our choice of if/when to premultiply:
+ // { kPremul, kUnpremul, kOpaque } x { RGBA, BGRA }
+ SkPMColor colorTable[256];
+ SkColorType tableColorType = fColorXform ? kRGBA_8888_SkColorType : dstInfo.colorType();
+
+ png_bytep alphas;
+ int numColorsWithAlpha = 0;
+ if (png_get_tRNS(fPng_ptr, fInfo_ptr, &alphas, &numColorsWithAlpha, nullptr)) {
+ // If we are performing a color xform, it will handle the premultiply. Otherwise,
+ // we'll do it here.
+ bool premultiply = !fColorXform && needs_premul(dstInfo, this->getInfo());
+
+ // Choose which function to use to create the color table. If the final destination's
+ // colortype is unpremultiplied, the color table will store unpremultiplied colors.
+ PackColorProc proc = choose_pack_color_proc(premultiply, tableColorType);
+
+ for (int i = 0; i < numColorsWithAlpha; i++) {
+ // We don't have a function in SkOpts that combines a set of alphas with a set
+ // of RGBs. We could write one, but it's hardly worth it, given that this
+ // is such a small fraction of the total decode time.
+ colorTable[i] = proc(alphas[i], palette->red, palette->green, palette->blue);
+ palette++;
+ }
+ }
+
+ if (numColorsWithAlpha < numColors) {
+ // The optimized code depends on a 3-byte png_color struct with the colors
+ // in RGB order. These checks make sure it is safe to use.
+ static_assert(3 == sizeof(png_color), "png_color struct has changed. Opts are broken.");
+#ifdef SK_DEBUG
+ SkASSERT(&palette->red < &palette->green);
+ SkASSERT(&palette->green < &palette->blue);
+#endif
+
+ if (is_rgba(tableColorType)) {
+ SkOpts::RGB_to_RGB1(colorTable + numColorsWithAlpha, palette,
+ numColors - numColorsWithAlpha);
+ } else {
+ SkOpts::RGB_to_BGR1(colorTable + numColorsWithAlpha, palette,
+ numColors - numColorsWithAlpha);
+ }
+ }
+
+ // If we are not decoding to F16, we can color xform now and store the results
+ // in the color table.
+ if (fColorXform && kRGBA_F16_SkColorType != dstInfo.colorType()) {
+ SkColorSpaceXform::ColorFormat xformColorFormat = is_rgba(dstInfo.colorType()) ?
+ SkColorSpaceXform::kRGBA_8888_ColorFormat :
+ SkColorSpaceXform::kBGRA_8888_ColorFormat;
+ SkAlphaType xformAlphaType = select_xform_alpha(dstInfo.alphaType(),
+ this->getInfo().alphaType());
+ fColorXform->apply(colorTable, colorTable, numColors, xformColorFormat,
+ SkColorSpaceXform::kRGBA_8888_ColorFormat, xformAlphaType);
+ }
+
+ // Pad the color table with the last color in the table (or black) in the case that
+ // invalid pixel indices exceed the number of colors in the table.
+ const int maxColors = 1 << fBitDepth;
+ if (numColors < maxColors) {
+ SkPMColor lastColor = numColors > 0 ? colorTable[numColors - 1] : SK_ColorBLACK;
+ sk_memset32(colorTable + numColors, lastColor, maxColors - numColors);
+ }
+
+ // Set the new color count.
+ if (ctableCount != nullptr) {
+ *ctableCount = maxColors;
+ }
+
+ fColorTable.reset(new SkColorTable(colorTable, maxColors));
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Creation
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkPngCodec::IsPng(const char* buf, size_t bytesRead) {
+ return !png_sig_cmp((png_bytep) buf, (png_size_t)0, bytesRead);
+}
+
+#if (PNG_LIBPNG_VER_MAJOR > 1) || (PNG_LIBPNG_VER_MAJOR == 1 && PNG_LIBPNG_VER_MINOR >= 6)
+
+static float png_fixed_point_to_float(png_fixed_point x) {
+ // We multiply by the same factor that libpng used to convert
+ // fixed point -> double. Since we want floats, we choose to
+ // do the conversion ourselves rather than convert
+ // fixed point -> double -> float.
+ return ((float) x) * 0.00001f;
+}
+
+static float png_inverted_fixed_point_to_float(png_fixed_point x) {
+ // This is necessary because the gAMA chunk actually stores 1/gamma.
+ return 1.0f / png_fixed_point_to_float(x);
+}
+
+static constexpr float gSRGB_toXYZD50[] {
+ 0.4358f, 0.3853f, 0.1430f, // Rx, Gx, Bx
+ 0.2224f, 0.7170f, 0.0606f, // Ry, Gy, Gz
+ 0.0139f, 0.0971f, 0.7139f, // Rz, Gz, Bz
+};
+
+static bool convert_to_D50(SkMatrix44* toXYZD50, float toXYZ[9], float whitePoint[2]) {
+ float wX = whitePoint[0];
+ float wY = whitePoint[1];
+ if (wX < 0.0f || wY < 0.0f || (wX + wY > 1.0f)) {
+ return false;
+ }
+
+ // Calculate the XYZ illuminant. Call this the src illuminant.
+ float wZ = 1.0f - wX - wY;
+ float scale = 1.0f / wY;
+ // TODO (msarett):
+ // What are common src illuminants? I'm guessing we will almost always see D65. Should
+ // we go ahead and save a precomputed D65->D50 Bradford matrix? Should we exit early if
+ // if the src illuminant is D50?
+ SkVector3 srcXYZ = SkVector3::Make(wX * scale, 1.0f, wZ * scale);
+
+ // The D50 illuminant.
+ SkVector3 dstXYZ = SkVector3::Make(0.96422f, 1.0f, 0.82521f);
+
+ // Calculate the chromatic adaptation matrix. We will use the Bradford method, thus
+ // the matrices below. The Bradford method is used by Adobe and is widely considered
+ // to be the best.
+ // http://www.brucelindbloom.com/index.html?Eqn_ChromAdapt.html
+ SkMatrix mA, mAInv;
+ mA.setAll(0.8951f, 0.2664f, -0.1614f, -0.7502f, 1.7135f, 0.0367f, 0.0389f, -0.0685f, 1.0296f);
+ mAInv.setAll(0.9869929f, -0.1470543f, 0.1599627f, 0.4323053f, 0.5183603f, 0.0492912f,
+ -0.0085287f, 0.0400428f, 0.9684867f);
+
+ // Map illuminant into cone response domain.
+ SkVector3 srcCone;
+ srcCone.fX = mA[0] * srcXYZ.fX + mA[1] * srcXYZ.fY + mA[2] * srcXYZ.fZ;
+ srcCone.fY = mA[3] * srcXYZ.fX + mA[4] * srcXYZ.fY + mA[5] * srcXYZ.fZ;
+ srcCone.fZ = mA[6] * srcXYZ.fX + mA[7] * srcXYZ.fY + mA[8] * srcXYZ.fZ;
+ SkVector3 dstCone;
+ dstCone.fX = mA[0] * dstXYZ.fX + mA[1] * dstXYZ.fY + mA[2] * dstXYZ.fZ;
+ dstCone.fY = mA[3] * dstXYZ.fX + mA[4] * dstXYZ.fY + mA[5] * dstXYZ.fZ;
+ dstCone.fZ = mA[6] * dstXYZ.fX + mA[7] * dstXYZ.fY + mA[8] * dstXYZ.fZ;
+
+ SkMatrix DXToD50;
+ DXToD50.setIdentity();
+ DXToD50[0] = dstCone.fX / srcCone.fX;
+ DXToD50[4] = dstCone.fY / srcCone.fY;
+ DXToD50[8] = dstCone.fZ / srcCone.fZ;
+ DXToD50.postConcat(mAInv);
+ DXToD50.preConcat(mA);
+
+ SkMatrix toXYZ3x3;
+ toXYZ3x3.setAll(toXYZ[0], toXYZ[3], toXYZ[6], toXYZ[1], toXYZ[4], toXYZ[7], toXYZ[2], toXYZ[5],
+ toXYZ[8]);
+ toXYZ3x3.postConcat(DXToD50);
+
+ toXYZD50->set3x3(toXYZ3x3[0], toXYZ3x3[3], toXYZ3x3[6],
+ toXYZ3x3[1], toXYZ3x3[4], toXYZ3x3[7],
+ toXYZ3x3[2], toXYZ3x3[5], toXYZ3x3[8]);
+ return true;
+}
+
+#endif // LIBPNG >= 1.6
+
+// Returns a colorSpace object that represents any color space information in
+// the encoded data. If the encoded data contains no color space, this will
+// return NULL.
+sk_sp<SkColorSpace> read_color_space(png_structp png_ptr, png_infop info_ptr) {
+
+#if (PNG_LIBPNG_VER_MAJOR > 1) || (PNG_LIBPNG_VER_MAJOR == 1 && PNG_LIBPNG_VER_MINOR >= 6)
+
+ // First check for an ICC profile
+ png_bytep profile;
+ png_uint_32 length;
+ // The below variables are unused, however, we need to pass them in anyway or
+ // png_get_iCCP() will return nothing.
+ // Could knowing the |name| of the profile ever be interesting? Maybe for debugging?
+ png_charp name;
+ // The |compression| is uninteresting since:
+ // (1) libpng has already decompressed the profile for us.
+ // (2) "deflate" is the only mode of decompression that libpng supports.
+ int compression;
+ if (PNG_INFO_iCCP == png_get_iCCP(png_ptr, info_ptr, &name, &compression, &profile,
+ &length)) {
+ return SkColorSpace::NewICC(profile, length);
+ }
+
+ // Second, check for sRGB.
+ if (png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB)) {
+
+ // sRGB chunks also store a rendering intent: Absolute, Relative,
+ // Perceptual, and Saturation.
+ // FIXME (msarett): Extract this information from the sRGB chunk once
+ // we are able to handle this information in
+ // SkColorSpace.
+ return SkColorSpace::NewNamed(SkColorSpace::kSRGB_Named);
+ }
+
+ // Next, check for chromaticities.
+ png_fixed_point toXYZFixed[9];
+ float toXYZ[9];
+ png_fixed_point whitePointFixed[2];
+ float whitePoint[2];
+ png_fixed_point gamma;
+ float gammas[3];
+ if (png_get_cHRM_XYZ_fixed(png_ptr, info_ptr, &toXYZFixed[0], &toXYZFixed[1], &toXYZFixed[2],
+ &toXYZFixed[3], &toXYZFixed[4], &toXYZFixed[5], &toXYZFixed[6],
+ &toXYZFixed[7], &toXYZFixed[8]) &&
+ png_get_cHRM_fixed(png_ptr, info_ptr, &whitePointFixed[0], &whitePointFixed[1], nullptr,
+ nullptr, nullptr, nullptr, nullptr, nullptr))
+ {
+ for (int i = 0; i < 9; i++) {
+ toXYZ[i] = png_fixed_point_to_float(toXYZFixed[i]);
+ }
+ whitePoint[0] = png_fixed_point_to_float(whitePointFixed[0]);
+ whitePoint[1] = png_fixed_point_to_float(whitePointFixed[1]);
+
+ SkMatrix44 toXYZD50(SkMatrix44::kUninitialized_Constructor);
+ if (!convert_to_D50(&toXYZD50, toXYZ, whitePoint)) {
+ toXYZD50.set3x3RowMajorf(gSRGB_toXYZD50);
+ }
+
+ if (PNG_INFO_gAMA == png_get_gAMA_fixed(png_ptr, info_ptr, &gamma)) {
+ float value = png_inverted_fixed_point_to_float(gamma);
+ gammas[0] = value;
+ gammas[1] = value;
+ gammas[2] = value;
+
+ return SkColorSpace_Base::NewRGB(gammas, toXYZD50);
+ }
+
+ // Default to sRGB gamma if the image has color space information,
+ // but does not specify gamma.
+ return SkColorSpace::NewRGB(SkColorSpace::kSRGB_RenderTargetGamma, toXYZD50);
+ }
+
+ // Last, check for gamma.
+ if (PNG_INFO_gAMA == png_get_gAMA_fixed(png_ptr, info_ptr, &gamma)) {
+
+ // Set the gammas.
+ float value = png_inverted_fixed_point_to_float(gamma);
+ gammas[0] = value;
+ gammas[1] = value;
+ gammas[2] = value;
+
+ // Since there is no cHRM, we will guess sRGB gamut.
+ SkMatrix44 toXYZD50(SkMatrix44::kUninitialized_Constructor);
+ toXYZD50.set3x3RowMajorf(gSRGB_toXYZD50);
+
+ return SkColorSpace_Base::NewRGB(gammas, toXYZD50);
+ }
+
+#endif // LIBPNG >= 1.6
+
+ // Report that there is no color space information in the PNG. SkPngCodec is currently
+ // implemented to guess sRGB in this case.
+ return nullptr;
+}
+
+void SkPngCodec::allocateStorage(const SkImageInfo& dstInfo) {
+ switch (fXformMode) {
+ case kSwizzleOnly_XformMode:
+ break;
+ case kColorOnly_XformMode:
+ // Intentional fall through. A swizzler hasn't been created yet, but one will
+ // be created later if we are sampling. We'll go ahead and allocate
+ // enough memory to swizzle if necessary.
+ case kSwizzleColor_XformMode: {
+ const size_t colorXformBytes = dstInfo.width() * sizeof(uint32_t);
+ fStorage.reset(colorXformBytes);
+ fColorXformSrcRow = (uint32_t*) fStorage.get();
+ break;
+ }
+ }
+}
+
+void SkPngCodec::applyXformRow(void* dst, const void* src) {
+ const SkColorSpaceXform::ColorFormat srcColorFormat = SkColorSpaceXform::kRGBA_8888_ColorFormat;
+ switch (fXformMode) {
+ case kSwizzleOnly_XformMode:
+ fSwizzler->swizzle(dst, (const uint8_t*) src);
+ break;
+ case kColorOnly_XformMode:
+ fColorXform->apply(dst, (const uint32_t*) src, fXformWidth, fXformColorFormat,
+ srcColorFormat, fXformAlphaType);
+ break;
+ case kSwizzleColor_XformMode:
+ fSwizzler->swizzle(fColorXformSrcRow, (const uint8_t*) src);
+ fColorXform->apply(dst, fColorXformSrcRow, fXformWidth, fXformColorFormat,
+ srcColorFormat, fXformAlphaType);
+ break;
+ }
+}
+
+class SkPngNormalDecoder : public SkPngCodec {
+public:
+ SkPngNormalDecoder(const SkEncodedInfo& info, const SkImageInfo& imageInfo, SkStream* stream,
+ SkPngChunkReader* reader, png_structp png_ptr, png_infop info_ptr, int bitDepth)
+ : INHERITED(info, imageInfo, stream, reader, png_ptr, info_ptr, bitDepth)
+ , fLinesDecoded(0)
+ , fDst(nullptr)
+ , fRowBytes(0)
+ , fFirstRow(0)
+ , fLastRow(0)
+ {}
+
+ static void AllRowsCallback(png_structp png_ptr, png_bytep row, png_uint_32 rowNum, int /*pass*/) {
+ GetDecoder(png_ptr)->allRowsCallback(row, rowNum);
+ }
+
+ static void RowCallback(png_structp png_ptr, png_bytep row, png_uint_32 rowNum, int /*pass*/) {
+ GetDecoder(png_ptr)->rowCallback(row, rowNum);
+ }
+
+#ifdef SK_GOOGLE3_PNG_HACK
+ static void RereadInfoCallback(png_structp png_ptr, png_infop) {
+ GetDecoder(png_ptr)->rereadInfoCallback();
+ }
+#endif
+
+private:
+ int fLinesDecoded; // FIXME: Move to baseclass?
+ void* fDst;
+ size_t fRowBytes;
+
+ // Variables for partial decode
+ int fFirstRow; // FIXME: Move to baseclass?
+ int fLastRow;
+
+ typedef SkPngCodec INHERITED;
+
+ static SkPngNormalDecoder* GetDecoder(png_structp png_ptr) {
+ return static_cast<SkPngNormalDecoder*>(png_get_progressive_ptr(png_ptr));
+ }
+
+ Result decodeAllRows(void* dst, size_t rowBytes, int* rowsDecoded) override {
+ const int height = this->getInfo().height();
+ png_progressive_info_ptr callback = nullptr;
+#ifdef SK_GOOGLE3_PNG_HACK
+ callback = RereadInfoCallback;
+#endif
+ png_set_progressive_read_fn(this->png_ptr(), this, callback, AllRowsCallback, nullptr);
+ fDst = dst;
+ fRowBytes = rowBytes;
+
+ fLinesDecoded = 0;
+
+ this->processData();
+
+ if (fLinesDecoded == height) {
+ return SkCodec::kSuccess;
+ }
+
+ if (rowsDecoded) {
+ *rowsDecoded = fLinesDecoded;
+ }
+
+ return SkCodec::kIncompleteInput;
+ }
+
+ void allRowsCallback(png_bytep row, int rowNum) {
+ SkASSERT(rowNum - fFirstRow == fLinesDecoded);
+ fLinesDecoded++;
+ this->applyXformRow(fDst, row);
+ fDst = SkTAddOffset<void>(fDst, fRowBytes);
+ }
+
+ void setRange(int firstRow, int lastRow, void* dst, size_t rowBytes) override {
+ png_progressive_info_ptr callback = nullptr;
+#ifdef SK_GOOGLE3_PNG_HACK
+ callback = RereadInfoCallback;
+#endif
+ png_set_progressive_read_fn(this->png_ptr(), this, callback, RowCallback, nullptr);
+ fFirstRow = firstRow;
+ fLastRow = lastRow;
+ fDst = dst;
+ fRowBytes = rowBytes;
+ fLinesDecoded = 0;
+ }
+
+ SkCodec::Result decode(int* rowsDecoded) override {
+ this->processData();
+
+ if (fLinesDecoded == fLastRow - fFirstRow + 1) {
+ return SkCodec::kSuccess;
+ }
+
+ if (rowsDecoded) {
+ *rowsDecoded = fLinesDecoded;
+ }
+
+ return SkCodec::kIncompleteInput;
+ }
+
+ void rowCallback(png_bytep row, int rowNum) {
+ if (rowNum < fFirstRow) {
+ // Ignore this row.
+ return;
+ }
+
+ SkASSERT(rowNum <= fLastRow);
+
+ // If there is no swizzler, all rows are needed.
+ if (!this->swizzler() || this->swizzler()->rowNeeded(fLinesDecoded)) {
+ this->applyXformRow(fDst, row);
+ fDst = SkTAddOffset<void>(fDst, fRowBytes);
+ }
+
+ fLinesDecoded++;
+
+ if (rowNum == fLastRow) {
+ // Fake error to stop decoding scanlines.
+ longjmp(PNG_JMPBUF(this->png_ptr()), kStopDecoding);
+ }
+ }
+};
+
+class SkPngInterlacedDecoder : public SkPngCodec {
+public:
+ SkPngInterlacedDecoder(const SkEncodedInfo& info, const SkImageInfo& imageInfo,
+ SkStream* stream, SkPngChunkReader* reader, png_structp png_ptr, png_infop info_ptr,
+ int bitDepth, int numberPasses)
+ : INHERITED(info, imageInfo, stream, reader, png_ptr, info_ptr, bitDepth)
+ , fNumberPasses(numberPasses)
+ , fFirstRow(0)
+ , fLastRow(0)
+ , fLinesDecoded(0)
+ , fInterlacedComplete(false)
+ , fPng_rowbytes(0)
+ {}
+
+ static void InterlacedRowCallback(png_structp png_ptr, png_bytep row, png_uint_32 rowNum, int pass) {
+ auto decoder = static_cast<SkPngInterlacedDecoder*>(png_get_progressive_ptr(png_ptr));
+ decoder->interlacedRowCallback(row, rowNum, pass);
+ }
+
+#ifdef SK_GOOGLE3_PNG_HACK
+ static void RereadInfoInterlacedCallback(png_structp png_ptr, png_infop) {
+ static_cast<SkPngInterlacedDecoder*>(png_get_progressive_ptr(png_ptr))->rereadInfoInterlaced();
+ }
+#endif
+
+private:
+ const int fNumberPasses;
+ int fFirstRow;
+ int fLastRow;
+ void* fDst;
+ size_t fRowBytes;
+ int fLinesDecoded;
+ bool fInterlacedComplete;
+ size_t fPng_rowbytes;
+ SkAutoTMalloc<png_byte> fInterlaceBuffer;
+
+ typedef SkPngCodec INHERITED;
+
+#ifdef SK_GOOGLE3_PNG_HACK
+ void rereadInfoInterlaced() {
+ this->rereadInfoCallback();
+ // Note: This allocates more memory than necessary, if we are sampling/subset.
+ this->setUpInterlaceBuffer(this->getInfo().height());
+ }
+#endif
+
+ // FIXME: Currently sharing interlaced callback for all rows and subset. It's not
+ // as expensive as the subset version of non-interlaced, but it still does extra
+ // work.
+ void interlacedRowCallback(png_bytep row, int rowNum, int pass) {
+ if (rowNum < fFirstRow || rowNum > fLastRow) {
+ // Ignore this row
+ return;
+ }
+
+ png_bytep oldRow = fInterlaceBuffer.get() + (rowNum - fFirstRow) * fPng_rowbytes;
+ png_progressive_combine_row(this->png_ptr(), oldRow, row);
+
+ if (0 == pass) {
+ // The first pass initializes all rows.
+ SkASSERT(row);
+ SkASSERT(fLinesDecoded == rowNum - fFirstRow);
+ fLinesDecoded++;
+ } else {
+ SkASSERT(fLinesDecoded == fLastRow - fFirstRow + 1);
+ if (fNumberPasses - 1 == pass && rowNum == fLastRow) {
+ // Last pass, and we have read all of the rows we care about. Note that
+ // we do not care about reading anything beyond the end of the image (or
+ // beyond the last scanline requested).
+ fInterlacedComplete = true;
+ // Fake error to stop decoding scanlines.
+ longjmp(PNG_JMPBUF(this->png_ptr()), kStopDecoding);
+ }
+ }
+ }
+
+ SkCodec::Result decodeAllRows(void* dst, size_t rowBytes, int* rowsDecoded) override {
+ const int height = this->getInfo().height();
+ this->setUpInterlaceBuffer(height);
+ png_progressive_info_ptr callback = nullptr;
+#ifdef SK_GOOGLE3_PNG_HACK
+ callback = RereadInfoInterlacedCallback;
+#endif
+ png_set_progressive_read_fn(this->png_ptr(), this, callback, InterlacedRowCallback,
+ nullptr);
+
+ fFirstRow = 0;
+ fLastRow = height - 1;
+ fLinesDecoded = 0;
+
+ this->processData();
+
+ png_bytep srcRow = fInterlaceBuffer.get();
+ // FIXME: When resuming, this may rewrite rows that did not change.
+ for (int rowNum = 0; rowNum < fLinesDecoded; rowNum++) {
+ this->applyXformRow(dst, srcRow);
+ dst = SkTAddOffset<void>(dst, rowBytes);
+ srcRow = SkTAddOffset<png_byte>(srcRow, fPng_rowbytes);
+ }
+ if (fInterlacedComplete) {
+ return SkCodec::kSuccess;
+ }
+
+ if (rowsDecoded) {
+ *rowsDecoded = fLinesDecoded;
+ }
+
+ return SkCodec::kIncompleteInput;
+ }
+
+ void setRange(int firstRow, int lastRow, void* dst, size_t rowBytes) override {
+ // FIXME: We could skip rows in the interlace buffer that we won't put in the output.
+ this->setUpInterlaceBuffer(lastRow - firstRow + 1);
+ png_progressive_info_ptr callback = nullptr;
+#ifdef SK_GOOGLE3_PNG_HACK
+ callback = RereadInfoInterlacedCallback;
+#endif
+ png_set_progressive_read_fn(this->png_ptr(), this, callback, InterlacedRowCallback, nullptr);
+ fFirstRow = firstRow;
+ fLastRow = lastRow;
+ fDst = dst;
+ fRowBytes = rowBytes;
+ fLinesDecoded = 0;
+ }
+
+ SkCodec::Result decode(int* rowsDecoded) override {
+ this->processData();
+
+ // Now apply Xforms on all the rows that were decoded.
+ if (!fLinesDecoded) {
+ return SkCodec::kIncompleteInput;
+ }
+ const int lastRow = fLinesDecoded + fFirstRow - 1;
+ SkASSERT(lastRow <= fLastRow);
+
+ // FIXME: For resuming interlace, we may swizzle a row that hasn't changed. But it
+ // may be too tricky/expensive to handle that correctly.
+ png_bytep srcRow = fInterlaceBuffer.get();
+ const int sampleY = this->swizzler() ? this->swizzler()->sampleY() : 1;
+ void* dst = fDst;
+ for (int rowNum = fFirstRow; rowNum <= lastRow; rowNum += sampleY) {
+ this->applyXformRow(dst, srcRow);
+ dst = SkTAddOffset<void>(dst, fRowBytes);
+ srcRow = SkTAddOffset<png_byte>(srcRow, fPng_rowbytes * sampleY);
+ }
+
+ if (fInterlacedComplete) {
+ return SkCodec::kSuccess;
+ }
+
+ if (rowsDecoded) {
+ *rowsDecoded = fLinesDecoded;
+ }
+ return SkCodec::kIncompleteInput;
+ }
+
+ void setUpInterlaceBuffer(int height) {
+ fPng_rowbytes = png_get_rowbytes(this->png_ptr(), this->info_ptr());
+ fInterlaceBuffer.reset(fPng_rowbytes * height);
+ fInterlacedComplete = false;
+ }
+};
+
+#ifdef SK_GOOGLE3_PNG_HACK
+bool SkPngCodec::rereadHeaderIfNecessary() {
+ if (!fNeedsToRereadHeader) {
+ return true;
+ }
+
+ // On the first call, we'll need to rewind ourselves. Future calls will
+ // have already rewound in rewindIfNecessary.
+ if (this->stream()->getPosition() > 0) {
+ this->stream()->rewind();
+ }
+
+ this->destroyReadStruct();
+ png_structp png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, nullptr,
+ sk_error_fn, sk_warning_fn);
+ if (!png_ptr) {
+ return false;
+ }
+
+ // Only use the AutoCleanPng to delete png_ptr as necessary.
+ // (i.e. not for reading bounds etc.)
+ AutoCleanPng autoClean(png_ptr, nullptr, nullptr, nullptr);
+
+ png_infop info_ptr = png_create_info_struct(png_ptr);
+ if (info_ptr == nullptr) {
+ return false;
+ }
+
+ autoClean.setInfoPtr(info_ptr);
+
+#ifdef PNG_READ_UNKNOWN_CHUNKS_SUPPORTED
+ // Hookup our chunkReader so we can see any user-chunks the caller may be interested in.
+ // This needs to be installed before we read the png header. Android may store ninepatch
+ // chunks in the header.
+ if (fPngChunkReader.get()) {
+ png_set_keep_unknown_chunks(png_ptr, PNG_HANDLE_CHUNK_ALWAYS, (png_byte*)"", 0);
+ png_set_read_user_chunk_fn(png_ptr, (png_voidp) fPngChunkReader.get(), sk_read_user_chunk);
+ }
+#endif
+
+ fPng_ptr = png_ptr;
+ fInfo_ptr = info_ptr;
+ autoClean.releasePngPtrs();
+ fNeedsToRereadHeader = false;
+ return true;
+}
+#endif // SK_GOOGLE3_PNG_HACK
+
+// Reads the header and initializes the output fields, if not NULL.
+//
+// @param stream Input data. Will be read to get enough information to properly
+// setup the codec.
+// @param chunkReader SkPngChunkReader, for reading unknown chunks. May be NULL.
+// If not NULL, png_ptr will hold an *unowned* pointer to it. The caller is
+// expected to continue to own it for the lifetime of the png_ptr.
+// @param outCodec Optional output variable. If non-NULL, will be set to a new
+// SkPngCodec on success.
+// @param png_ptrp Optional output variable. If non-NULL, will be set to a new
+// png_structp on success.
+// @param info_ptrp Optional output variable. If non-NULL, will be set to a new
+// png_infop on success;
+// @return true on success, in which case the caller is responsible for calling
+// png_destroy_read_struct(png_ptrp, info_ptrp).
+// If it returns false, the passed in fields (except stream) are unchanged.
+static bool read_header(SkStream* stream, SkPngChunkReader* chunkReader, SkCodec** outCodec,
+ png_structp* png_ptrp, png_infop* info_ptrp) {
+ // The image is known to be a PNG. Decode enough to know the SkImageInfo.
+ png_structp png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, nullptr,
+ sk_error_fn, sk_warning_fn);
+ if (!png_ptr) {
+ return false;
+ }
+
+ AutoCleanPng autoClean(png_ptr, stream, chunkReader, outCodec);
+
+ png_infop info_ptr = png_create_info_struct(png_ptr);
+ if (info_ptr == nullptr) {
+ return false;
+ }
+
+ autoClean.setInfoPtr(info_ptr);
+
+ // FIXME: Could we use the return value of setjmp to specify the type of
+ // error?
+ if (setjmp(PNG_JMPBUF(png_ptr))) {
+ return false;
+ }
+
+#ifdef PNG_READ_UNKNOWN_CHUNKS_SUPPORTED
+ // Hookup our chunkReader so we can see any user-chunks the caller may be interested in.
+ // This needs to be installed before we read the png header. Android may store ninepatch
+ // chunks in the header.
+ if (chunkReader) {
+ png_set_keep_unknown_chunks(png_ptr, PNG_HANDLE_CHUNK_ALWAYS, (png_byte*)"", 0);
+ png_set_read_user_chunk_fn(png_ptr, (png_voidp) chunkReader, sk_read_user_chunk);
+ }
+#endif
+
+ const bool decodedBounds = autoClean.decodeBounds();
+
+ if (!decodedBounds) {
+ return false;
+ }
+
+ // On success, decodeBounds releases ownership of png_ptr and info_ptr.
+ if (png_ptrp) {
+ *png_ptrp = png_ptr;
+ }
+ if (info_ptrp) {
+ *info_ptrp = info_ptr;
+ }
+
+ // decodeBounds takes care of setting outCodec
+ if (outCodec) {
+ SkASSERT(*outCodec);
+ }
+ return true;
+}
+
+// FIXME (scroggo): Once SK_GOOGLE3_PNG_HACK is no more, this method can be inline in
+// AutoCleanPng::infoCallback
+static void general_info_callback(png_structp png_ptr, png_infop info_ptr,
+ SkEncodedInfo::Color* outColor, SkEncodedInfo::Alpha* outAlpha) {
+ png_uint_32 origWidth, origHeight;
+ int bitDepth, encodedColorType;
+ png_get_IHDR(png_ptr, info_ptr, &origWidth, &origHeight, &bitDepth,
+ &encodedColorType, nullptr, nullptr, nullptr);
+
+ // Tell libpng to strip 16 bit/color files down to 8 bits/color.
+ // TODO: Should we handle this in SkSwizzler? Could this also benefit
+ // RAW decodes?
+ if (bitDepth == 16) {
+ SkASSERT(PNG_COLOR_TYPE_PALETTE != encodedColorType);
+ png_set_strip_16(png_ptr);
+ }
+
+ // Now determine the default colorType and alphaType and set the required transforms.
+ // Often, we depend on SkSwizzler to perform any transforms that we need. However, we
+ // still depend on libpng for many of the rare and PNG-specific cases.
+ SkEncodedInfo::Color color;
+ SkEncodedInfo::Alpha alpha;
+ switch (encodedColorType) {
+ case PNG_COLOR_TYPE_PALETTE:
+ // Extract multiple pixels with bit depths of 1, 2, and 4 from a single
+ // byte into separate bytes (useful for paletted and grayscale images).
+ if (bitDepth < 8) {
+ // TODO: Should we use SkSwizzler here?
+ png_set_packing(png_ptr);
+ }
+
+ color = SkEncodedInfo::kPalette_Color;
+ // Set the alpha depending on if a transparency chunk exists.
+ alpha = png_get_valid(png_ptr, info_ptr, PNG_INFO_tRNS) ?
+ SkEncodedInfo::kUnpremul_Alpha : SkEncodedInfo::kOpaque_Alpha;
+ break;
+ case PNG_COLOR_TYPE_RGB:
+ if (png_get_valid(png_ptr, info_ptr, PNG_INFO_tRNS)) {
+ // Convert to RGBA if transparency chunk exists.
+ png_set_tRNS_to_alpha(png_ptr);
+ color = SkEncodedInfo::kRGBA_Color;
+ alpha = SkEncodedInfo::kBinary_Alpha;
+ } else {
+ color = SkEncodedInfo::kRGB_Color;
+ alpha = SkEncodedInfo::kOpaque_Alpha;
+ }
+ break;
+ case PNG_COLOR_TYPE_GRAY:
+ // Expand grayscale images to the full 8 bits from 1, 2, or 4 bits/pixel.
+ if (bitDepth < 8) {
+ // TODO: Should we use SkSwizzler here?
+ png_set_expand_gray_1_2_4_to_8(png_ptr);
+ }
+
+ if (png_get_valid(png_ptr, info_ptr, PNG_INFO_tRNS)) {
+ png_set_tRNS_to_alpha(png_ptr);
+ color = SkEncodedInfo::kGrayAlpha_Color;
+ alpha = SkEncodedInfo::kBinary_Alpha;
+ } else {
+ color = SkEncodedInfo::kGray_Color;
+ alpha = SkEncodedInfo::kOpaque_Alpha;
+ }
+ break;
+ case PNG_COLOR_TYPE_GRAY_ALPHA:
+ color = SkEncodedInfo::kGrayAlpha_Color;
+ alpha = SkEncodedInfo::kUnpremul_Alpha;
+ break;
+ case PNG_COLOR_TYPE_RGBA:
+ color = SkEncodedInfo::kRGBA_Color;
+ alpha = SkEncodedInfo::kUnpremul_Alpha;
+ break;
+ default:
+ // All the color types have been covered above.
+ SkASSERT(false);
+ color = SkEncodedInfo::kRGBA_Color;
+ alpha = SkEncodedInfo::kUnpremul_Alpha;
+ }
+ if (outColor) {
+ *outColor = color;
+ }
+ if (outAlpha) {
+ *outAlpha = alpha;
+ }
+}
+
+#ifdef SK_GOOGLE3_PNG_HACK
+void SkPngCodec::rereadInfoCallback() {
+ general_info_callback(fPng_ptr, fInfo_ptr, nullptr, nullptr);
+ png_set_interlace_handling(fPng_ptr);
+ png_read_update_info(fPng_ptr, fInfo_ptr);
+}
+#endif
+
+void AutoCleanPng::infoCallback() {
+ SkEncodedInfo::Color color;
+ SkEncodedInfo::Alpha alpha;
+ general_info_callback(fPng_ptr, fInfo_ptr, &color, &alpha);
+
+ const int numberPasses = png_set_interlace_handling(fPng_ptr);
+
+ fReadHeader = true;
+ fDecodedBounds = true;
+#ifndef SK_GOOGLE3_PNG_HACK
+ // 1 tells libpng to save any extra data. We may be able to be more efficient by saving
+ // it ourselves.
+ png_process_data_pause(fPng_ptr, 1);
+#else
+ // Hack to make png_process_data stop.
+ fPng_ptr->buffer_size = 0;
+#endif
+ if (fOutCodec) {
+ SkASSERT(nullptr == *fOutCodec);
+ sk_sp<SkColorSpace> colorSpace = read_color_space(fPng_ptr, fInfo_ptr);
+ if (!colorSpace) {
+ // Treat unmarked pngs as sRGB.
+ colorSpace = SkColorSpace::NewNamed(SkColorSpace::kSRGB_Named);
+ }
+
+ SkEncodedInfo encodedInfo = SkEncodedInfo::Make(color, alpha, 8);
+ // FIXME (scroggo): Once we get rid of SK_GOOGLE3_PNG_HACK, general_info_callback can
+ // be inlined, so these values will already be set.
+ png_uint_32 origWidth = png_get_image_width(fPng_ptr, fInfo_ptr);
+ png_uint_32 origHeight = png_get_image_height(fPng_ptr, fInfo_ptr);
+ png_byte bitDepth = png_get_bit_depth(fPng_ptr, fInfo_ptr);
+ SkImageInfo imageInfo = encodedInfo.makeImageInfo(origWidth, origHeight, colorSpace);
+
+ if (SkEncodedInfo::kOpaque_Alpha == alpha) {
+ png_color_8p sigBits;
+ if (png_get_sBIT(fPng_ptr, fInfo_ptr, &sigBits)) {
+ if (5 == sigBits->red && 6 == sigBits->green && 5 == sigBits->blue) {
+ // Recommend a decode to 565 if the sBIT indicates 565.
+ imageInfo = imageInfo.makeColorType(kRGB_565_SkColorType);
+ }
+ }
+ }
+
+ if (1 == numberPasses) {
+ *fOutCodec = new SkPngNormalDecoder(encodedInfo, imageInfo, fStream,
+ fChunkReader, fPng_ptr, fInfo_ptr, bitDepth);
+ } else {
+ *fOutCodec = new SkPngInterlacedDecoder(encodedInfo, imageInfo, fStream,
+ fChunkReader, fPng_ptr, fInfo_ptr, bitDepth, numberPasses);
+ }
+ }
+
+
+ // Release the pointers, which are now owned by the codec or the caller is expected to
+ // take ownership.
+ this->releasePngPtrs();
+}
+
+SkPngCodec::SkPngCodec(const SkEncodedInfo& encodedInfo, const SkImageInfo& imageInfo,
+ SkStream* stream, SkPngChunkReader* chunkReader, void* png_ptr,
+ void* info_ptr, int bitDepth)
+ : INHERITED(encodedInfo, imageInfo, stream)
+ , fPngChunkReader(SkSafeRef(chunkReader))
+ , fPng_ptr(png_ptr)
+ , fInfo_ptr(info_ptr)
+ , fColorXformSrcRow(nullptr)
+ , fBitDepth(bitDepth)
+#ifdef SK_GOOGLE3_PNG_HACK
+ , fNeedsToRereadHeader(true)
+#endif
+{}
+
+SkPngCodec::~SkPngCodec() {
+ this->destroyReadStruct();
+}
+
+void SkPngCodec::destroyReadStruct() {
+ if (fPng_ptr) {
+ // We will never have a nullptr fInfo_ptr with a non-nullptr fPng_ptr
+ SkASSERT(fInfo_ptr);
+ png_destroy_read_struct((png_struct**)&fPng_ptr, (png_info**)&fInfo_ptr, nullptr);
+ fPng_ptr = nullptr;
+ fInfo_ptr = nullptr;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Getting the pixels
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkPngCodec::initializeXforms(const SkImageInfo& dstInfo, const Options& options,
+ SkPMColor ctable[], int* ctableCount) {
+ if (setjmp(PNG_JMPBUF((png_struct*)fPng_ptr))) {
+ SkCodecPrintf("Failed on png_read_update_info.\n");
+ return false;
+ }
+ png_read_update_info(fPng_ptr, fInfo_ptr);
+
+ // Reset fSwizzler and fColorXform. We can't do this in onRewind() because the
+ // interlaced scanline decoder may need to rewind.
+ fSwizzler.reset(nullptr);
+ fColorXform = nullptr;
+
+ if (needs_color_xform(dstInfo, this->getInfo())) {
+ fColorXform = SkColorSpaceXform::New(this->getInfo().colorSpace(), dstInfo.colorSpace());
+ SkASSERT(fColorXform);
+ }
+
+ // If the image is RGBA and we have a color xform, we can skip the swizzler.
+ // FIXME (msarett):
+ // Support more input types to fColorXform (ex: RGB, Gray) and skip the swizzler more often.
+ if (fColorXform && SkEncodedInfo::kRGBA_Color == this->getEncodedInfo().color() &&
+ !options.fSubset)
+ {
+ fXformMode = kColorOnly_XformMode;
+ return true;
+ }
+
+ if (SkEncodedInfo::kPalette_Color == this->getEncodedInfo().color()) {
+ if (!this->createColorTable(dstInfo, ctableCount)) {
+ return false;
+ }
+ }
+
+ // Copy the color table to the client if they request kIndex8 mode.
+ copy_color_table(dstInfo, fColorTable, ctable, ctableCount);
+
+ this->initializeSwizzler(dstInfo, options);
+ return true;
+}
+
+void SkPngCodec::initializeXformParams() {
+ switch (fXformMode) {
+ case kColorOnly_XformMode:
+ fXformColorFormat = select_xform_format(this->dstInfo().colorType());
+ fXformAlphaType = select_xform_alpha(this->dstInfo().alphaType(),
+ this->getInfo().alphaType());
+ fXformWidth = this->dstInfo().width();
+ break;
+ case kSwizzleColor_XformMode:
+ fXformColorFormat = select_xform_format(this->dstInfo().colorType());
+ fXformAlphaType = select_xform_alpha(this->dstInfo().alphaType(),
+ this->getInfo().alphaType());
+ fXformWidth = this->swizzler()->swizzleWidth();
+ break;
+ default:
+ break;
+ }
+}
+
+static inline bool apply_xform_on_decode(SkColorType dstColorType, SkEncodedInfo::Color srcColor) {
+ // We will apply the color xform when reading the color table, unless F16 is requested.
+ return SkEncodedInfo::kPalette_Color != srcColor || kRGBA_F16_SkColorType == dstColorType;
+}
+
+void SkPngCodec::initializeSwizzler(const SkImageInfo& dstInfo, const Options& options) {
+ SkImageInfo swizzlerInfo = dstInfo;
+ Options swizzlerOptions = options;
+ fXformMode = kSwizzleOnly_XformMode;
+ if (fColorXform && apply_xform_on_decode(dstInfo.colorType(), this->getEncodedInfo().color())) {
+ swizzlerInfo = swizzlerInfo.makeColorType(kRGBA_8888_SkColorType);
+ if (kPremul_SkAlphaType == dstInfo.alphaType()) {
+ swizzlerInfo = swizzlerInfo.makeAlphaType(kUnpremul_SkAlphaType);
+ }
+
+ fXformMode = kSwizzleColor_XformMode;
+
+ // Here, we swizzle into temporary memory, which is not zero initialized.
+ // FIXME (msarett):
+ // Is this a problem?
+ swizzlerOptions.fZeroInitialized = kNo_ZeroInitialized;
+ }
+
+ const SkPMColor* colors = get_color_ptr(fColorTable.get());
+ fSwizzler.reset(SkSwizzler::CreateSwizzler(this->getEncodedInfo(), colors, swizzlerInfo,
+ swizzlerOptions));
+ SkASSERT(fSwizzler);
+}
+
+SkSampler* SkPngCodec::getSampler(bool createIfNecessary) {
+ if (fSwizzler || !createIfNecessary) {
+ return fSwizzler;
+ }
+
+ this->initializeSwizzler(this->dstInfo(), this->options());
+ return fSwizzler;
+}
+
+bool SkPngCodec::onRewind() {
+#ifdef SK_GOOGLE3_PNG_HACK
+ fNeedsToRereadHeader = true;
+ return true;
+#else
+ // This sets fPng_ptr and fInfo_ptr to nullptr. If read_header
+ // succeeds, they will be repopulated, and if it fails, they will
+ // remain nullptr. Any future accesses to fPng_ptr and fInfo_ptr will
+ // come through this function which will rewind and again attempt
+ // to reinitialize them.
+ this->destroyReadStruct();
+
+ png_structp png_ptr;
+ png_infop info_ptr;
+ if (!read_header(this->stream(), fPngChunkReader.get(), nullptr, &png_ptr, &info_ptr)) {
+ return false;
+ }
+
+ fPng_ptr = png_ptr;
+ fInfo_ptr = info_ptr;
+ return true;
+#endif
+}
+
+SkCodec::Result SkPngCodec::onGetPixels(const SkImageInfo& dstInfo, void* dst,
+ size_t rowBytes, const Options& options,
+ SkPMColor ctable[], int* ctableCount,
+ int* rowsDecoded) {
+ if (!conversion_possible(dstInfo, this->getInfo()) ||
+ !this->initializeXforms(dstInfo, options, ctable, ctableCount))
+ {
+ return kInvalidConversion;
+ }
+#ifdef SK_GOOGLE3_PNG_HACK
+ // Note that this is done after initializeXforms. Otherwise that method
+ // would not have png_ptr to use.
+ if (!this->rereadHeaderIfNecessary()) {
+ return kCouldNotRewind;
+ }
+#endif
+
+ if (options.fSubset) {
+ return kUnimplemented;
+ }
+
+ this->allocateStorage(dstInfo);
+ this->initializeXformParams();
+ return this->decodeAllRows(dst, rowBytes, rowsDecoded);
+}
+
+SkCodec::Result SkPngCodec::onStartIncrementalDecode(const SkImageInfo& dstInfo,
+ void* dst, size_t rowBytes, const SkCodec::Options& options,
+ SkPMColor* ctable, int* ctableCount) {
+ if (!conversion_possible(dstInfo, this->getInfo()) ||
+ !this->initializeXforms(dstInfo, options, ctable, ctableCount))
+ {
+ return kInvalidConversion;
+ }
+#ifdef SK_GOOGLE3_PNG_HACK
+ // See note in onGetPixels.
+ if (!this->rereadHeaderIfNecessary()) {
+ return kCouldNotRewind;
+ }
+#endif
+
+ this->allocateStorage(dstInfo);
+
+ int firstRow, lastRow;
+ if (options.fSubset) {
+ firstRow = options.fSubset->top();
+ lastRow = options.fSubset->bottom() - 1;
+ } else {
+ firstRow = 0;
+ lastRow = dstInfo.height() - 1;
+ }
+ this->setRange(firstRow, lastRow, dst, rowBytes);
+ return kSuccess;
+}
+
+SkCodec::Result SkPngCodec::onIncrementalDecode(int* rowsDecoded) {
+ // FIXME: Only necessary on the first call.
+ this->initializeXformParams();
+
+ return this->decode(rowsDecoded);
+}
+
+uint64_t SkPngCodec::onGetFillValue(const SkImageInfo& dstInfo) const {
+ const SkPMColor* colorPtr = get_color_ptr(fColorTable.get());
+ if (colorPtr) {
+ SkAlphaType alphaType = select_xform_alpha(dstInfo.alphaType(),
+ this->getInfo().alphaType());
+ return get_color_table_fill_value(dstInfo.colorType(), alphaType, colorPtr, 0,
+ fColorXform.get());
+ }
+ return INHERITED::onGetFillValue(dstInfo);
+}
+
+SkCodec* SkPngCodec::NewFromStream(SkStream* stream, SkPngChunkReader* chunkReader) {
+ SkAutoTDelete<SkStream> streamDeleter(stream);
+
+ SkCodec* outCodec = nullptr;
+ if (read_header(streamDeleter.get(), chunkReader, &outCodec, nullptr, nullptr)) {
+ // Codec has taken ownership of the stream.
+ SkASSERT(outCodec);
+ streamDeleter.release();
+ return outCodec;
+ }
+
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/codec/SkPngCodec.h b/gfx/skia/skia/src/codec/SkPngCodec.h
new file mode 100644
index 000000000..1fc451757
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkPngCodec.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCodec.h"
+#include "SkColorSpaceXform.h"
+#include "SkColorTable.h"
+#include "SkPngChunkReader.h"
+#include "SkEncodedFormat.h"
+#include "SkImageInfo.h"
+#include "SkRefCnt.h"
+#include "SkSwizzler.h"
+
+// FIXME (scroggo): GOOGLE3 is currently using an outdated version of libpng,
+// so we need to work around the lack of the method png_process_data_pause.
+// This code will be unnecessary once we update GOOGLE3. It would make more
+// sense to condition this on the version of libpng being used, but we do not
+// know that here because png.h is only included by the cpp file.
+#define SK_GOOGLE3_PNG_HACK
+
+class SkStream;
+
+class SkPngCodec : public SkCodec {
+public:
+ static bool IsPng(const char*, size_t);
+
+ // Assume IsPng was called and returned true.
+ static SkCodec* NewFromStream(SkStream*, SkPngChunkReader* = NULL);
+
+ virtual ~SkPngCodec();
+
+protected:
+ // We hold the png_ptr and info_ptr as voidp to avoid having to include png.h
+ // or forward declare their types here. voidp auto-casts to the real pointer types.
+ struct voidp {
+ voidp(void* ptr) : fPtr(ptr) {}
+
+ template <typename T>
+ operator T*() const { return (T*)fPtr; }
+
+ explicit operator bool() const { return fPtr != nullptr; }
+
+ void* fPtr;
+ };
+
+ SkPngCodec(const SkEncodedInfo&, const SkImageInfo&, SkStream*, SkPngChunkReader*,
+ void* png_ptr, void* info_ptr, int bitDepth);
+
+ Result onGetPixels(const SkImageInfo&, void*, size_t, const Options&, SkPMColor*, int*, int*)
+ override;
+ SkEncodedFormat onGetEncodedFormat() const override { return kPNG_SkEncodedFormat; }
+ bool onRewind() override;
+ uint64_t onGetFillValue(const SkImageInfo&) const override;
+
+ SkSampler* getSampler(bool createIfNecessary) override;
+ void applyXformRow(void* dst, const void* src);
+
+ voidp png_ptr() { return fPng_ptr; }
+ voidp info_ptr() { return fInfo_ptr; }
+
+ SkSwizzler* swizzler() { return fSwizzler; }
+
+ // Initialize variables used by applyXformRow.
+ void initializeXformParams();
+
+ /**
+ * Pass available input to libpng to process it.
+ *
+ * libpng will call any relevant callbacks installed. This will continue decoding
+ * until it reaches the end of the file, or until a callback tells libpng to stop.
+ */
+ void processData();
+
+#ifdef SK_GOOGLE3_PNG_HACK
+ // In libpng 1.2.56, png_process_data_pause does not exist, so when we wanted to
+ // read the header, we may have read too far. In that case, we need to delete the
+ // png_ptr and info_ptr and recreate them. This method does that (and attaches the
+ // chunk reader.
+ bool rereadHeaderIfNecessary();
+
+ // This method sets up the new png_ptr/info_ptr (created in rereadHeaderIfNecessary)
+ // the way we set up the old one the first time in AutoCleanPng.decodeBounds's callback.
+ void rereadInfoCallback();
+#endif
+
+ Result onStartIncrementalDecode(const SkImageInfo& dstInfo, void* pixels, size_t rowBytes,
+ const SkCodec::Options&,
+ SkPMColor* ctable, int* ctableCount) override;
+ Result onIncrementalDecode(int*) override;
+
+ SkAutoTUnref<SkPngChunkReader> fPngChunkReader;
+ voidp fPng_ptr;
+ voidp fInfo_ptr;
+
+ // These are stored here so they can be used both by normal decoding and scanline decoding.
+ SkAutoTUnref<SkColorTable> fColorTable; // May be unpremul.
+ SkAutoTDelete<SkSwizzler> fSwizzler;
+ std::unique_ptr<SkColorSpaceXform> fColorXform;
+ SkAutoTMalloc<uint8_t> fStorage;
+ uint32_t* fColorXformSrcRow;
+ const int fBitDepth;
+
+private:
+
+ enum XformMode {
+ // Requires only a swizzle pass.
+ kSwizzleOnly_XformMode,
+
+ // Requires only a color xform pass.
+ kColorOnly_XformMode,
+
+ // Requires a swizzle and a color xform.
+ kSwizzleColor_XformMode,
+ };
+
+ bool createColorTable(const SkImageInfo& dstInfo, int* ctableCount);
+ // Helper to set up swizzler, color xforms, and color table. Also calls png_read_update_info.
+ bool initializeXforms(const SkImageInfo& dstInfo, const Options&, SkPMColor* colorPtr,
+ int* colorCount);
+ void initializeSwizzler(const SkImageInfo& dstInfo, const Options&);
+ void allocateStorage(const SkImageInfo& dstInfo);
+ void destroyReadStruct();
+
+ virtual Result decodeAllRows(void* dst, size_t rowBytes, int* rowsDecoded) = 0;
+ virtual void setRange(int firstRow, int lastRow, void* dst, size_t rowBytes) = 0;
+ virtual Result decode(int* rowsDecoded) = 0;
+
+ XformMode fXformMode;
+ SkColorSpaceXform::ColorFormat fXformColorFormat;
+ SkAlphaType fXformAlphaType;
+ int fXformWidth;
+
+#ifdef SK_GOOGLE3_PNG_HACK
+ bool fNeedsToRereadHeader;
+#endif
+
+ typedef SkCodec INHERITED;
+};
diff --git a/gfx/skia/skia/src/codec/SkRawAdapterCodec.cpp b/gfx/skia/skia/src/codec/SkRawAdapterCodec.cpp
new file mode 100644
index 000000000..76cbaa1a2
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkRawAdapterCodec.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCodec.h"
+#include "SkCodecPriv.h"
+#include "SkRawAdapterCodec.h"
+
+SkRawAdapterCodec::SkRawAdapterCodec(SkRawCodec* codec)
+ : INHERITED(codec)
+{}
+
+SkISize SkRawAdapterCodec::onGetSampledDimensions(int sampleSize) const {
+ float scale = 1.f / static_cast<float>(sampleSize);
+ return this->codec()->getScaledDimensions(scale);
+}
+
+SkCodec::Result SkRawAdapterCodec::onGetAndroidPixels(
+ const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const AndroidOptions& options) {
+ SkCodec::Options codecOptions;
+ codecOptions.fZeroInitialized = options.fZeroInitialized;
+ codecOptions.fSubset = options.fSubset;
+ return this->codec()->getPixels(
+ info, pixels, rowBytes, &codecOptions, options.fColorPtr,
+ options.fColorCount);
+}
diff --git a/gfx/skia/skia/src/codec/SkRawAdapterCodec.h b/gfx/skia/skia/src/codec/SkRawAdapterCodec.h
new file mode 100644
index 000000000..b552f2aae
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkRawAdapterCodec.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRawAdapterCodec_DEFINED
+#define SkRawAdapterCodec_DEFINED
+
+#include "SkAndroidCodec.h"
+#include "SkCodec.h"
+#include "SkEncodedFormat.h"
+#include "SkRawCodec.h"
+#include "SkStream.h"
+#include "SkTypes.h"
+
+/**
+ * This class implements the functionality of SkAndroidCodec. It uses an
+ * SkRawCodec.
+ */
+class SkRawAdapterCodec : public SkAndroidCodec {
+public:
+
+ explicit SkRawAdapterCodec(SkRawCodec*);
+
+ virtual ~SkRawAdapterCodec() {}
+
+protected:
+
+ SkISize onGetSampledDimensions(int sampleSize) const override;
+
+ bool onGetSupportedSubset(SkIRect* /*desiredSubset*/) const override { return false; }
+
+ SkCodec::Result onGetAndroidPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const AndroidOptions& options) override;
+
+private:
+
+ typedef SkAndroidCodec INHERITED;
+};
+#endif // SkRawAdapterCodec_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkRawCodec.cpp b/gfx/skia/skia/src/codec/SkRawCodec.cpp
new file mode 100644
index 000000000..2a6a48fdb
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkRawCodec.cpp
@@ -0,0 +1,782 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCodec.h"
+#include "SkCodecPriv.h"
+#include "SkColorPriv.h"
+#include "SkData.h"
+#include "SkJpegCodec.h"
+#include "SkMutex.h"
+#include "SkRawCodec.h"
+#include "SkRefCnt.h"
+#include "SkStream.h"
+#include "SkStreamPriv.h"
+#include "SkSwizzler.h"
+#include "SkTArray.h"
+#include "SkTaskGroup.h"
+#include "SkTemplates.h"
+#include "SkTypes.h"
+
+#include "dng_area_task.h"
+#include "dng_color_space.h"
+#include "dng_errors.h"
+#include "dng_exceptions.h"
+#include "dng_host.h"
+#include "dng_info.h"
+#include "dng_memory.h"
+#include "dng_render.h"
+#include "dng_stream.h"
+
+#include "src/piex.h"
+
+#include <cmath> // for std::round,floor,ceil
+#include <limits>
+
+namespace {
+
+// Caluclates the number of tiles of tile_size that fit into the area in vertical and horizontal
+// directions.
+dng_point num_tiles_in_area(const dng_point &areaSize,
+ const dng_point_real64 &tileSize) {
+ // FIXME: Add a ceil_div() helper in SkCodecPriv.h
+ return dng_point(static_cast<int32>((areaSize.v + tileSize.v - 1) / tileSize.v),
+ static_cast<int32>((areaSize.h + tileSize.h - 1) / tileSize.h));
+}
+
+int num_tasks_required(const dng_point& tilesInTask,
+ const dng_point& tilesInArea) {
+ return ((tilesInArea.v + tilesInTask.v - 1) / tilesInTask.v) *
+ ((tilesInArea.h + tilesInTask.h - 1) / tilesInTask.h);
+}
+
+// Calculate the number of tiles to process per task, taking into account the maximum number of
+// tasks. It prefers to increase horizontally for better locality of reference.
+dng_point num_tiles_per_task(const int maxTasks,
+ const dng_point &tilesInArea) {
+ dng_point tilesInTask = {1, 1};
+ while (num_tasks_required(tilesInTask, tilesInArea) > maxTasks) {
+ if (tilesInTask.h < tilesInArea.h) {
+ ++tilesInTask.h;
+ } else if (tilesInTask.v < tilesInArea.v) {
+ ++tilesInTask.v;
+ } else {
+ ThrowProgramError("num_tiles_per_task calculation is wrong.");
+ }
+ }
+ return tilesInTask;
+}
+
+std::vector<dng_rect> compute_task_areas(const int maxTasks, const dng_rect& area,
+ const dng_point& tileSize) {
+ std::vector<dng_rect> taskAreas;
+ const dng_point tilesInArea = num_tiles_in_area(area.Size(), tileSize);
+ const dng_point tilesPerTask = num_tiles_per_task(maxTasks, tilesInArea);
+ const dng_point taskAreaSize = {tilesPerTask.v * tileSize.v,
+ tilesPerTask.h * tileSize.h};
+ for (int v = 0; v < tilesInArea.v; v += tilesPerTask.v) {
+ for (int h = 0; h < tilesInArea.h; h += tilesPerTask.h) {
+ dng_rect taskArea;
+ taskArea.t = area.t + v * tileSize.v;
+ taskArea.l = area.l + h * tileSize.h;
+ taskArea.b = Min_int32(taskArea.t + taskAreaSize.v, area.b);
+ taskArea.r = Min_int32(taskArea.l + taskAreaSize.h, area.r);
+
+ taskAreas.push_back(taskArea);
+ }
+ }
+ return taskAreas;
+}
+
+class SkDngHost : public dng_host {
+public:
+ explicit SkDngHost(dng_memory_allocator* allocater) : dng_host(allocater) {}
+
+ void PerformAreaTask(dng_area_task& task, const dng_rect& area) override {
+ // The area task gets split up into max_tasks sub-tasks. The max_tasks is defined by the
+ // dng-sdks default implementation of dng_area_task::MaxThreads() which returns 8 or 32
+ // sub-tasks depending on the architecture.
+ const int maxTasks = static_cast<int>(task.MaxThreads());
+
+ SkTaskGroup taskGroup;
+
+ // tileSize is typically 256x256
+ const dng_point tileSize(task.FindTileSize(area));
+ const std::vector<dng_rect> taskAreas = compute_task_areas(maxTasks, area, tileSize);
+ const int numTasks = static_cast<int>(taskAreas.size());
+
+ SkMutex mutex;
+ SkTArray<dng_exception> exceptions;
+ task.Start(numTasks, tileSize, &Allocator(), Sniffer());
+ for (int taskIndex = 0; taskIndex < numTasks; ++taskIndex) {
+ taskGroup.add([&mutex, &exceptions, &task, this, taskIndex, taskAreas, tileSize] {
+ try {
+ task.ProcessOnThread(taskIndex, taskAreas[taskIndex], tileSize, this->Sniffer());
+ } catch (dng_exception& exception) {
+ SkAutoMutexAcquire lock(mutex);
+ exceptions.push_back(exception);
+ } catch (...) {
+ SkAutoMutexAcquire lock(mutex);
+ exceptions.push_back(dng_exception(dng_error_unknown));
+ }
+ });
+ }
+
+ taskGroup.wait();
+ task.Finish(numTasks);
+
+ // Currently we only re-throw the first catched exception.
+ if (!exceptions.empty()) {
+ Throw_dng_error(exceptions.front().ErrorCode(), nullptr, nullptr);
+ }
+ }
+
+ uint32 PerformAreaTaskThreads() override {
+ // FIXME: Need to get the real amount of available threads used in the SkTaskGroup.
+ return kMaxMPThreads;
+ }
+
+private:
+ typedef dng_host INHERITED;
+};
+
+// T must be unsigned type.
+template <class T>
+bool safe_add_to_size_t(T arg1, T arg2, size_t* result) {
+ SkASSERT(arg1 >= 0);
+ SkASSERT(arg2 >= 0);
+ if (arg1 >= 0 && arg2 <= std::numeric_limits<T>::max() - arg1) {
+ T sum = arg1 + arg2;
+ if (sum <= std::numeric_limits<size_t>::max()) {
+ *result = static_cast<size_t>(sum);
+ return true;
+ }
+ }
+ return false;
+}
+
+class SkDngMemoryAllocator : public dng_memory_allocator {
+public:
+ ~SkDngMemoryAllocator() override {}
+
+ dng_memory_block* Allocate(uint32 size) override {
+ // To avoid arbitary allocation requests which might lead to out-of-memory, limit the
+ // amount of memory that can be allocated at once. The memory limit is based on experiments
+ // and supposed to be sufficient for all valid DNG images.
+ if (size > 300 * 1024 * 1024) { // 300 MB
+ ThrowMemoryFull();
+ }
+ return dng_memory_allocator::Allocate(size);
+ }
+};
+
+bool is_asset_stream(const SkStream& stream) {
+ return stream.hasLength() && stream.hasPosition();
+}
+
+} // namespace
+
+class SkRawStream {
+public:
+ virtual ~SkRawStream() {}
+
+ /*
+ * Gets the length of the stream. Depending on the type of stream, this may require reading to
+ * the end of the stream.
+ */
+ virtual uint64 getLength() = 0;
+
+ virtual bool read(void* data, size_t offset, size_t length) = 0;
+
+ /*
+ * Creates an SkMemoryStream from the offset with size.
+ * Note: for performance reason, this function is destructive to the SkRawStream. One should
+ * abandon current object after the function call.
+ */
+ virtual SkMemoryStream* transferBuffer(size_t offset, size_t size) = 0;
+};
+
+class SkRawLimitedDynamicMemoryWStream : public SkDynamicMemoryWStream {
+public:
+ virtual ~SkRawLimitedDynamicMemoryWStream() {}
+
+ bool write(const void* buffer, size_t size) override {
+ size_t newSize;
+ if (!safe_add_to_size_t(this->bytesWritten(), size, &newSize) ||
+ newSize > kMaxStreamSize)
+ {
+ SkCodecPrintf("Error: Stream size exceeds the limit.\n");
+ return false;
+ }
+ return this->INHERITED::write(buffer, size);
+ }
+
+private:
+ // Most of valid RAW images will not be larger than 100MB. This limit is helpful to avoid
+ // streaming too large data chunk. We can always adjust the limit here if we need.
+ const size_t kMaxStreamSize = 100 * 1024 * 1024; // 100MB
+
+ typedef SkDynamicMemoryWStream INHERITED;
+};
+
+// Note: the maximum buffer size is 100MB (limited by SkRawLimitedDynamicMemoryWStream).
+class SkRawBufferedStream : public SkRawStream {
+public:
+ // Will take the ownership of the stream.
+ explicit SkRawBufferedStream(SkStream* stream)
+ : fStream(stream)
+ , fWholeStreamRead(false)
+ {
+ // Only use SkRawBufferedStream when the stream is not an asset stream.
+ SkASSERT(!is_asset_stream(*stream));
+ }
+
+ ~SkRawBufferedStream() override {}
+
+ uint64 getLength() override {
+ if (!this->bufferMoreData(kReadToEnd)) { // read whole stream
+ ThrowReadFile();
+ }
+ return fStreamBuffer.bytesWritten();
+ }
+
+ bool read(void* data, size_t offset, size_t length) override {
+ if (length == 0) {
+ return true;
+ }
+
+ size_t sum;
+ if (!safe_add_to_size_t(offset, length, &sum)) {
+ return false;
+ }
+
+ return this->bufferMoreData(sum) && fStreamBuffer.read(data, offset, length);
+ }
+
+ SkMemoryStream* transferBuffer(size_t offset, size_t size) override {
+ sk_sp<SkData> data(SkData::MakeUninitialized(size));
+ if (offset > fStreamBuffer.bytesWritten()) {
+ // If the offset is not buffered, read from fStream directly and skip the buffering.
+ const size_t skipLength = offset - fStreamBuffer.bytesWritten();
+ if (fStream->skip(skipLength) != skipLength) {
+ return nullptr;
+ }
+ const size_t bytesRead = fStream->read(data->writable_data(), size);
+ if (bytesRead < size) {
+ data = SkData::MakeSubset(data.get(), 0, bytesRead);
+ }
+ } else {
+ const size_t alreadyBuffered = SkTMin(fStreamBuffer.bytesWritten() - offset, size);
+ if (alreadyBuffered > 0 &&
+ !fStreamBuffer.read(data->writable_data(), offset, alreadyBuffered)) {
+ return nullptr;
+ }
+
+ const size_t remaining = size - alreadyBuffered;
+ if (remaining) {
+ auto* dst = static_cast<uint8_t*>(data->writable_data()) + alreadyBuffered;
+ const size_t bytesRead = fStream->read(dst, remaining);
+ size_t newSize;
+ if (bytesRead < remaining) {
+ if (!safe_add_to_size_t(alreadyBuffered, bytesRead, &newSize)) {
+ return nullptr;
+ }
+ data = SkData::MakeSubset(data.get(), 0, newSize);
+ }
+ }
+ }
+ return new SkMemoryStream(data);
+ }
+
+private:
+ // Note: if the newSize == kReadToEnd (0), this function will read to the end of stream.
+ bool bufferMoreData(size_t newSize) {
+ if (newSize == kReadToEnd) {
+ if (fWholeStreamRead) { // already read-to-end.
+ return true;
+ }
+
+ // TODO: optimize for the special case when the input is SkMemoryStream.
+ return SkStreamCopy(&fStreamBuffer, fStream.get());
+ }
+
+ if (newSize <= fStreamBuffer.bytesWritten()) { // already buffered to newSize
+ return true;
+ }
+ if (fWholeStreamRead) { // newSize is larger than the whole stream.
+ return false;
+ }
+
+ // Try to read at least 8192 bytes to avoid to many small reads.
+ const size_t kMinSizeToRead = 8192;
+ const size_t sizeRequested = newSize - fStreamBuffer.bytesWritten();
+ const size_t sizeToRead = SkTMax(kMinSizeToRead, sizeRequested);
+ SkAutoSTMalloc<kMinSizeToRead, uint8> tempBuffer(sizeToRead);
+ const size_t bytesRead = fStream->read(tempBuffer.get(), sizeToRead);
+ if (bytesRead < sizeRequested) {
+ return false;
+ }
+ return fStreamBuffer.write(tempBuffer.get(), bytesRead);
+ }
+
+ SkAutoTDelete<SkStream> fStream;
+ bool fWholeStreamRead;
+
+ // Use a size-limited stream to avoid holding too huge buffer.
+ SkRawLimitedDynamicMemoryWStream fStreamBuffer;
+
+ const size_t kReadToEnd = 0;
+};
+
+class SkRawAssetStream : public SkRawStream {
+public:
+ // Will take the ownership of the stream.
+ explicit SkRawAssetStream(SkStream* stream)
+ : fStream(stream)
+ {
+ // Only use SkRawAssetStream when the stream is an asset stream.
+ SkASSERT(is_asset_stream(*stream));
+ }
+
+ ~SkRawAssetStream() override {}
+
+ uint64 getLength() override {
+ return fStream->getLength();
+ }
+
+
+ bool read(void* data, size_t offset, size_t length) override {
+ if (length == 0) {
+ return true;
+ }
+
+ size_t sum;
+ if (!safe_add_to_size_t(offset, length, &sum)) {
+ return false;
+ }
+
+ return fStream->seek(offset) && (fStream->read(data, length) == length);
+ }
+
+ SkMemoryStream* transferBuffer(size_t offset, size_t size) override {
+ if (fStream->getLength() < offset) {
+ return nullptr;
+ }
+
+ size_t sum;
+ if (!safe_add_to_size_t(offset, size, &sum)) {
+ return nullptr;
+ }
+
+ // This will allow read less than the requested "size", because the JPEG codec wants to
+ // handle also a partial JPEG file.
+ const size_t bytesToRead = SkTMin(sum, fStream->getLength()) - offset;
+ if (bytesToRead == 0) {
+ return nullptr;
+ }
+
+ if (fStream->getMemoryBase()) { // directly copy if getMemoryBase() is available.
+ sk_sp<SkData> data(SkData::MakeWithCopy(
+ static_cast<const uint8_t*>(fStream->getMemoryBase()) + offset, bytesToRead));
+ fStream.reset();
+ return new SkMemoryStream(data);
+ } else {
+ sk_sp<SkData> data(SkData::MakeUninitialized(bytesToRead));
+ if (!fStream->seek(offset)) {
+ return nullptr;
+ }
+ const size_t bytesRead = fStream->read(data->writable_data(), bytesToRead);
+ if (bytesRead < bytesToRead) {
+ data = SkData::MakeSubset(data.get(), 0, bytesRead);
+ }
+ return new SkMemoryStream(data);
+ }
+ }
+private:
+ SkAutoTDelete<SkStream> fStream;
+};
+
+class SkPiexStream : public ::piex::StreamInterface {
+public:
+ // Will NOT take the ownership of the stream.
+ explicit SkPiexStream(SkRawStream* stream) : fStream(stream) {}
+
+ ~SkPiexStream() override {}
+
+ ::piex::Error GetData(const size_t offset, const size_t length,
+ uint8* data) override {
+ return fStream->read(static_cast<void*>(data), offset, length) ?
+ ::piex::Error::kOk : ::piex::Error::kFail;
+ }
+
+private:
+ SkRawStream* fStream;
+};
+
+class SkDngStream : public dng_stream {
+public:
+ // Will NOT take the ownership of the stream.
+ SkDngStream(SkRawStream* stream) : fStream(stream) {}
+
+ ~SkDngStream() override {}
+
+ uint64 DoGetLength() override { return fStream->getLength(); }
+
+ void DoRead(void* data, uint32 count, uint64 offset) override {
+ size_t sum;
+ if (!safe_add_to_size_t(static_cast<uint64>(count), offset, &sum) ||
+ !fStream->read(data, static_cast<size_t>(offset), static_cast<size_t>(count))) {
+ ThrowReadFile();
+ }
+ }
+
+private:
+ SkRawStream* fStream;
+};
+
+class SkDngImage {
+public:
+ /*
+ * Initializes the object with the information from Piex in a first attempt. This way it can
+ * save time and storage to obtain the DNG dimensions and color filter array (CFA) pattern
+ * which is essential for the demosaicing of the sensor image.
+ * Note: this will take the ownership of the stream.
+ */
+ static SkDngImage* NewFromStream(SkRawStream* stream) {
+ SkAutoTDelete<SkDngImage> dngImage(new SkDngImage(stream));
+ if (!dngImage->isTiffHeaderValid()) {
+ return nullptr;
+ }
+
+ if (!dngImage->initFromPiex()) {
+ if (!dngImage->readDng()) {
+ return nullptr;
+ }
+ }
+
+ return dngImage.release();
+ }
+
+ /*
+ * Renders the DNG image to the size. The DNG SDK only allows scaling close to integer factors
+ * down to 80 pixels on the short edge. The rendered image will be close to the specified size,
+ * but there is no guarantee that any of the edges will match the requested size. E.g.
+ * 100% size: 4000 x 3000
+ * requested size: 1600 x 1200
+ * returned size could be: 2000 x 1500
+ */
+ dng_image* render(int width, int height) {
+ if (!fHost || !fInfo || !fNegative || !fDngStream) {
+ if (!this->readDng()) {
+ return nullptr;
+ }
+ }
+
+ // DNG SDK preserves the aspect ratio, so it only needs to know the longer dimension.
+ const int preferredSize = SkTMax(width, height);
+ try {
+ // render() takes ownership of fHost, fInfo, fNegative and fDngStream when available.
+ SkAutoTDelete<dng_host> host(fHost.release());
+ SkAutoTDelete<dng_info> info(fInfo.release());
+ SkAutoTDelete<dng_negative> negative(fNegative.release());
+ SkAutoTDelete<dng_stream> dngStream(fDngStream.release());
+
+ host->SetPreferredSize(preferredSize);
+ host->ValidateSizes();
+
+ negative->ReadStage1Image(*host, *dngStream, *info);
+
+ if (info->fMaskIndex != -1) {
+ negative->ReadTransparencyMask(*host, *dngStream, *info);
+ }
+
+ negative->ValidateRawImageDigest(*host);
+ if (negative->IsDamaged()) {
+ return nullptr;
+ }
+
+ const int32 kMosaicPlane = -1;
+ negative->BuildStage2Image(*host);
+ negative->BuildStage3Image(*host, kMosaicPlane);
+
+ dng_render render(*host, *negative);
+ render.SetFinalSpace(dng_space_sRGB::Get());
+ render.SetFinalPixelType(ttByte);
+
+ dng_point stage3_size = negative->Stage3Image()->Size();
+ render.SetMaximumSize(SkTMax(stage3_size.h, stage3_size.v));
+
+ return render.Render();
+ } catch (...) {
+ return nullptr;
+ }
+ }
+
+ const SkEncodedInfo& getEncodedInfo() const {
+ return fEncodedInfo;
+ }
+
+ int width() const {
+ return fWidth;
+ }
+
+ int height() const {
+ return fHeight;
+ }
+
+ bool isScalable() const {
+ return fIsScalable;
+ }
+
+ bool isXtransImage() const {
+ return fIsXtransImage;
+ }
+
+private:
+ // Quick check if the image contains a valid TIFF header as requested by DNG format.
+ bool isTiffHeaderValid() const {
+ const size_t kHeaderSize = 4;
+ SkAutoSTMalloc<kHeaderSize, unsigned char> header(kHeaderSize);
+ if (!fStream->read(header.get(), 0 /* offset */, kHeaderSize)) {
+ return false;
+ }
+
+ // Check if the header is valid (endian info and magic number "42").
+ bool littleEndian;
+ if (!is_valid_endian_marker(header, &littleEndian)) {
+ return false;
+ }
+
+ return 0x2A == get_endian_short(header + 2, littleEndian);
+ }
+
+ void init(int width, int height, const dng_point& cfaPatternSize) {
+ fWidth = width;
+ fHeight = height;
+
+ // The DNG SDK scales only during demosaicing, so scaling is only possible when
+ // a mosaic info is available.
+ fIsScalable = cfaPatternSize.v != 0 && cfaPatternSize.h != 0;
+ fIsXtransImage = fIsScalable ? (cfaPatternSize.v == 6 && cfaPatternSize.h == 6) : false;
+ }
+
+ bool initFromPiex() {
+ // Does not take the ownership of rawStream.
+ SkPiexStream piexStream(fStream.get());
+ ::piex::PreviewImageData imageData;
+ if (::piex::IsRaw(&piexStream)
+ && ::piex::GetPreviewImageData(&piexStream, &imageData) == ::piex::Error::kOk)
+ {
+ // Verify the size information, as it is only optional information for PIEX.
+ if (imageData.full_width == 0 || imageData.full_height == 0) {
+ return false;
+ }
+
+ dng_point cfaPatternSize(imageData.cfa_pattern_dim[1], imageData.cfa_pattern_dim[0]);
+ this->init(static_cast<int>(imageData.full_width),
+ static_cast<int>(imageData.full_height), cfaPatternSize);
+ return true;
+ }
+ return false;
+ }
+
+ bool readDng() {
+ try {
+ // Due to the limit of DNG SDK, we need to reset host and info.
+ fHost.reset(new SkDngHost(&fAllocator));
+ fInfo.reset(new dng_info);
+ fDngStream.reset(new SkDngStream(fStream));
+
+ fHost->ValidateSizes();
+ fInfo->Parse(*fHost, *fDngStream);
+ fInfo->PostParse(*fHost);
+ if (!fInfo->IsValidDNG()) {
+ return false;
+ }
+
+ fNegative.reset(fHost->Make_dng_negative());
+ fNegative->Parse(*fHost, *fDngStream, *fInfo);
+ fNegative->PostParse(*fHost, *fDngStream, *fInfo);
+ fNegative->SynchronizeMetadata();
+
+ dng_point cfaPatternSize(0, 0);
+ if (fNegative->GetMosaicInfo() != nullptr) {
+ cfaPatternSize = fNegative->GetMosaicInfo()->fCFAPatternSize;
+ }
+ this->init(static_cast<int>(fNegative->DefaultCropSizeH().As_real64()),
+ static_cast<int>(fNegative->DefaultCropSizeV().As_real64()),
+ cfaPatternSize);
+ return true;
+ } catch (...) {
+ return false;
+ }
+ }
+
+ SkDngImage(SkRawStream* stream)
+ : fStream(stream)
+ , fEncodedInfo(SkEncodedInfo::Make(SkEncodedInfo::kRGB_Color,
+ SkEncodedInfo::kOpaque_Alpha, 8))
+ {}
+
+ SkDngMemoryAllocator fAllocator;
+ SkAutoTDelete<SkRawStream> fStream;
+ SkAutoTDelete<dng_host> fHost;
+ SkAutoTDelete<dng_info> fInfo;
+ SkAutoTDelete<dng_negative> fNegative;
+ SkAutoTDelete<dng_stream> fDngStream;
+
+ int fWidth;
+ int fHeight;
+ SkEncodedInfo fEncodedInfo;
+ bool fIsScalable;
+ bool fIsXtransImage;
+};
+
+/*
+ * Tries to handle the image with PIEX. If PIEX returns kOk and finds the preview image, create a
+ * SkJpegCodec. If PIEX returns kFail, then the file is invalid, return nullptr. In other cases,
+ * fallback to create SkRawCodec for DNG images.
+ */
+SkCodec* SkRawCodec::NewFromStream(SkStream* stream) {
+ SkAutoTDelete<SkRawStream> rawStream;
+ if (is_asset_stream(*stream)) {
+ rawStream.reset(new SkRawAssetStream(stream));
+ } else {
+ rawStream.reset(new SkRawBufferedStream(stream));
+ }
+
+ // Does not take the ownership of rawStream.
+ SkPiexStream piexStream(rawStream.get());
+ ::piex::PreviewImageData imageData;
+ if (::piex::IsRaw(&piexStream)) {
+ ::piex::Error error = ::piex::GetPreviewImageData(&piexStream, &imageData);
+
+ // Theoretically PIEX can return JPEG compressed image or uncompressed RGB image. We only
+ // handle the JPEG compressed preview image here.
+ if (error == ::piex::Error::kOk && imageData.preview.length > 0 &&
+ imageData.preview.format == ::piex::Image::kJpegCompressed)
+ {
+ // transferBuffer() is destructive to the rawStream. Abandon the rawStream after this
+ // function call.
+ // FIXME: one may avoid the copy of memoryStream and use the buffered rawStream.
+ SkMemoryStream* memoryStream =
+ rawStream->transferBuffer(imageData.preview.offset, imageData.preview.length);
+ return memoryStream ? SkJpegCodec::NewFromStream(memoryStream) : nullptr;
+ } else if (error == ::piex::Error::kFail) {
+ return nullptr;
+ }
+ }
+
+ // Takes the ownership of the rawStream.
+ SkAutoTDelete<SkDngImage> dngImage(SkDngImage::NewFromStream(rawStream.release()));
+ if (!dngImage) {
+ return nullptr;
+ }
+
+ return new SkRawCodec(dngImage.release());
+}
+
+SkCodec::Result SkRawCodec::onGetPixels(const SkImageInfo& requestedInfo, void* dst,
+ size_t dstRowBytes, const Options& options,
+ SkPMColor ctable[], int* ctableCount,
+ int* rowsDecoded) {
+ if (!conversion_possible_ignore_color_space(requestedInfo, this->getInfo())) {
+ SkCodecPrintf("Error: cannot convert input type to output type.\n");
+ return kInvalidConversion;
+ }
+
+ SkAutoTDelete<SkSwizzler> swizzler(SkSwizzler::CreateSwizzler(
+ this->getEncodedInfo(), nullptr, requestedInfo, options));
+ SkASSERT(swizzler);
+
+ const int width = requestedInfo.width();
+ const int height = requestedInfo.height();
+ SkAutoTDelete<dng_image> image(fDngImage->render(width, height));
+ if (!image) {
+ return kInvalidInput;
+ }
+
+ // Because the DNG SDK can not guarantee to render to requested size, we allow a small
+ // difference. Only the overlapping region will be converted.
+ const float maxDiffRatio = 1.03f;
+ const dng_point& imageSize = image->Size();
+ if (imageSize.h / width > maxDiffRatio || imageSize.h < width ||
+ imageSize.v / height > maxDiffRatio || imageSize.v < height) {
+ return SkCodec::kInvalidScale;
+ }
+
+ void* dstRow = dst;
+ SkAutoTMalloc<uint8_t> srcRow(width * 3);
+
+ dng_pixel_buffer buffer;
+ buffer.fData = &srcRow[0];
+ buffer.fPlane = 0;
+ buffer.fPlanes = 3;
+ buffer.fColStep = buffer.fPlanes;
+ buffer.fPlaneStep = 1;
+ buffer.fPixelType = ttByte;
+ buffer.fPixelSize = sizeof(uint8_t);
+ buffer.fRowStep = width * 3;
+
+ for (int i = 0; i < height; ++i) {
+ buffer.fArea = dng_rect(i, 0, i + 1, width);
+
+ try {
+ image->Get(buffer, dng_image::edge_zero);
+ } catch (...) {
+ *rowsDecoded = i;
+ return kIncompleteInput;
+ }
+
+ swizzler->swizzle(dstRow, &srcRow[0]);
+ dstRow = SkTAddOffset<void>(dstRow, dstRowBytes);
+ }
+ return kSuccess;
+}
+
+SkISize SkRawCodec::onGetScaledDimensions(float desiredScale) const {
+ SkASSERT(desiredScale <= 1.f);
+
+ const SkISize dim = this->getInfo().dimensions();
+ SkASSERT(dim.fWidth != 0 && dim.fHeight != 0);
+
+ if (!fDngImage->isScalable()) {
+ return dim;
+ }
+
+ // Limits the minimum size to be 80 on the short edge.
+ const float shortEdge = static_cast<float>(SkTMin(dim.fWidth, dim.fHeight));
+ if (desiredScale < 80.f / shortEdge) {
+ desiredScale = 80.f / shortEdge;
+ }
+
+ // For Xtrans images, the integer-factor scaling does not support the half-size scaling case
+ // (stronger downscalings are fine). In this case, returns the factor "3" scaling instead.
+ if (fDngImage->isXtransImage() && desiredScale > 1.f / 3.f && desiredScale < 1.f) {
+ desiredScale = 1.f / 3.f;
+ }
+
+ // Round to integer-factors.
+ const float finalScale = std::floor(1.f/ desiredScale);
+ return SkISize::Make(static_cast<int32_t>(std::floor(dim.fWidth / finalScale)),
+ static_cast<int32_t>(std::floor(dim.fHeight / finalScale)));
+}
+
+bool SkRawCodec::onDimensionsSupported(const SkISize& dim) {
+ const SkISize fullDim = this->getInfo().dimensions();
+ const float fullShortEdge = static_cast<float>(SkTMin(fullDim.fWidth, fullDim.fHeight));
+ const float shortEdge = static_cast<float>(SkTMin(dim.fWidth, dim.fHeight));
+
+ SkISize sizeFloor = this->onGetScaledDimensions(1.f / std::floor(fullShortEdge / shortEdge));
+ SkISize sizeCeil = this->onGetScaledDimensions(1.f / std::ceil(fullShortEdge / shortEdge));
+ return sizeFloor == dim || sizeCeil == dim;
+}
+
+SkRawCodec::~SkRawCodec() {}
+
+SkRawCodec::SkRawCodec(SkDngImage* dngImage)
+ : INHERITED(dngImage->width(), dngImage->height(), dngImage->getEncodedInfo(), nullptr)
+ , fDngImage(dngImage) {}
diff --git a/gfx/skia/skia/src/codec/SkRawCodec.h b/gfx/skia/skia/src/codec/SkRawCodec.h
new file mode 100644
index 000000000..3ca7b9c17
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkRawCodec.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRawCodec_DEFINED
+#define SkRawCodec_DEFINED
+
+#include "SkCodec.h"
+#include "SkColorSpace.h"
+#include "SkImageInfo.h"
+#include "SkTypes.h"
+
+class SkDngImage;
+class SkStream;
+
+/*
+ *
+ * This class implements the decoding for RAW images
+ *
+ */
+class SkRawCodec : public SkCodec {
+public:
+
+ /*
+ * Creates a RAW decoder
+ * Takes ownership of the stream
+ */
+ static SkCodec* NewFromStream(SkStream*);
+
+ ~SkRawCodec() override;
+
+protected:
+
+ Result onGetPixels(const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes, const Options&,
+ SkPMColor*, int*, int*) override;
+
+ SkEncodedFormat onGetEncodedFormat() const override {
+ return kDNG_SkEncodedFormat;
+ }
+
+ SkISize onGetScaledDimensions(float desiredScale) const override;
+
+ bool onDimensionsSupported(const SkISize&) override;
+
+private:
+
+ /*
+ * Creates an instance of the decoder
+ * Called only by NewFromStream, takes ownership of dngImage.
+ */
+ SkRawCodec(SkDngImage* dngImage);
+
+ SkAutoTDelete<SkDngImage> fDngImage;
+
+ typedef SkCodec INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkSampledCodec.cpp b/gfx/skia/skia/src/codec/SkSampledCodec.cpp
new file mode 100644
index 000000000..cca26000d
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkSampledCodec.cpp
@@ -0,0 +1,358 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCodec.h"
+#include "SkCodecPriv.h"
+#include "SkMath.h"
+#include "SkSampledCodec.h"
+#include "SkSampler.h"
+#include "SkTemplates.h"
+
+SkSampledCodec::SkSampledCodec(SkCodec* codec)
+ : INHERITED(codec)
+{}
+
+SkISize SkSampledCodec::accountForNativeScaling(int* sampleSizePtr, int* nativeSampleSize) const {
+ SkISize preSampledSize = this->codec()->getInfo().dimensions();
+ int sampleSize = *sampleSizePtr;
+ SkASSERT(sampleSize > 1);
+
+ if (nativeSampleSize) {
+ *nativeSampleSize = 1;
+ }
+
+ // Only JPEG supports native downsampling.
+ if (this->codec()->getEncodedFormat() == kJPEG_SkEncodedFormat) {
+ // See if libjpeg supports this scale directly
+ switch (sampleSize) {
+ case 2:
+ case 4:
+ case 8:
+ // This class does not need to do any sampling.
+ *sampleSizePtr = 1;
+ return this->codec()->getScaledDimensions(get_scale_from_sample_size(sampleSize));
+ default:
+ break;
+ }
+
+ // Check if sampleSize is a multiple of something libjpeg can support.
+ int remainder;
+ const int sampleSizes[] = { 8, 4, 2 };
+ for (int supportedSampleSize : sampleSizes) {
+ int actualSampleSize;
+ SkTDivMod(sampleSize, supportedSampleSize, &actualSampleSize, &remainder);
+ if (0 == remainder) {
+ float scale = get_scale_from_sample_size(supportedSampleSize);
+
+ // this->codec() will scale to this size.
+ preSampledSize = this->codec()->getScaledDimensions(scale);
+
+ // And then this class will sample it.
+ *sampleSizePtr = actualSampleSize;
+ if (nativeSampleSize) {
+ *nativeSampleSize = supportedSampleSize;
+ }
+ break;
+ }
+ }
+ }
+
+ return preSampledSize;
+}
+
+SkISize SkSampledCodec::onGetSampledDimensions(int sampleSize) const {
+ const SkISize size = this->accountForNativeScaling(&sampleSize);
+ return SkISize::Make(get_scaled_dimension(size.width(), sampleSize),
+ get_scaled_dimension(size.height(), sampleSize));
+}
+
+SkCodec::Result SkSampledCodec::onGetAndroidPixels(const SkImageInfo& info, void* pixels,
+ size_t rowBytes, const AndroidOptions& options) {
+ // Create an Options struct for the codec.
+ SkCodec::Options codecOptions;
+ codecOptions.fZeroInitialized = options.fZeroInitialized;
+
+ SkIRect* subset = options.fSubset;
+ if (!subset || subset->size() == this->codec()->getInfo().dimensions()) {
+ if (this->codec()->dimensionsSupported(info.dimensions())) {
+ return this->codec()->getPixels(info, pixels, rowBytes, &codecOptions,
+ options.fColorPtr, options.fColorCount);
+ }
+
+ // If the native codec does not support the requested scale, scale by sampling.
+ return this->sampledDecode(info, pixels, rowBytes, options);
+ }
+
+ // We are performing a subset decode.
+ int sampleSize = options.fSampleSize;
+ SkISize scaledSize = this->getSampledDimensions(sampleSize);
+ if (!this->codec()->dimensionsSupported(scaledSize)) {
+ // If the native codec does not support the requested scale, scale by sampling.
+ return this->sampledDecode(info, pixels, rowBytes, options);
+ }
+
+ // Calculate the scaled subset bounds.
+ int scaledSubsetX = subset->x() / sampleSize;
+ int scaledSubsetY = subset->y() / sampleSize;
+ int scaledSubsetWidth = info.width();
+ int scaledSubsetHeight = info.height();
+
+ const SkImageInfo scaledInfo = info.makeWH(scaledSize.width(), scaledSize.height());
+
+ {
+ // Although startScanlineDecode expects the bottom and top to match the
+ // SkImageInfo, startIncrementalDecode uses them to determine which rows to
+ // decode.
+ SkIRect incrementalSubset = SkIRect::MakeXYWH(scaledSubsetX, scaledSubsetY,
+ scaledSubsetWidth, scaledSubsetHeight);
+ codecOptions.fSubset = &incrementalSubset;
+ const SkCodec::Result startResult = this->codec()->startIncrementalDecode(
+ scaledInfo, pixels, rowBytes, &codecOptions,
+ options.fColorPtr, options.fColorCount);
+ if (SkCodec::kSuccess == startResult) {
+ int rowsDecoded;
+ const SkCodec::Result incResult = this->codec()->incrementalDecode(&rowsDecoded);
+ if (incResult == SkCodec::kSuccess) {
+ return SkCodec::kSuccess;
+ }
+ SkASSERT(SkCodec::kIncompleteInput == incResult);
+
+ // FIXME: Can zero initialized be read from SkCodec::fOptions?
+ this->codec()->fillIncompleteImage(scaledInfo, pixels, rowBytes,
+ options.fZeroInitialized, scaledSubsetHeight, rowsDecoded);
+ return SkCodec::kIncompleteInput;
+ } else if (startResult != SkCodec::kUnimplemented) {
+ return startResult;
+ }
+ // Otherwise fall down to use the old scanline decoder.
+ // codecOptions.fSubset will be reset below, so it will not continue to
+ // point to the object that is no longer on the stack.
+ }
+
+ // Start the scanline decode.
+ SkIRect scanlineSubset = SkIRect::MakeXYWH(scaledSubsetX, 0, scaledSubsetWidth,
+ scaledSize.height());
+ codecOptions.fSubset = &scanlineSubset;
+
+ SkCodec::Result result = this->codec()->startScanlineDecode(scaledInfo,
+ &codecOptions, options.fColorPtr, options.fColorCount);
+ if (SkCodec::kSuccess != result) {
+ return result;
+ }
+
+ // At this point, we are only concerned with subsetting. Either no scale was
+ // requested, or the this->codec() is handling the scale.
+ // Note that subsetting is only supported for kTopDown, so this code will not be
+ // reached for other orders.
+ SkASSERT(this->codec()->getScanlineOrder() == SkCodec::kTopDown_SkScanlineOrder);
+ if (!this->codec()->skipScanlines(scaledSubsetY)) {
+ this->codec()->fillIncompleteImage(info, pixels, rowBytes, options.fZeroInitialized,
+ scaledSubsetHeight, 0);
+ return SkCodec::kIncompleteInput;
+ }
+
+ int decodedLines = this->codec()->getScanlines(pixels, scaledSubsetHeight, rowBytes);
+ if (decodedLines != scaledSubsetHeight) {
+ return SkCodec::kIncompleteInput;
+ }
+ return SkCodec::kSuccess;
+}
+
+
+SkCodec::Result SkSampledCodec::sampledDecode(const SkImageInfo& info, void* pixels,
+ size_t rowBytes, const AndroidOptions& options) {
+ // We should only call this function when sampling.
+ SkASSERT(options.fSampleSize > 1);
+
+ // Create options struct for the codec.
+ SkCodec::Options sampledOptions;
+ sampledOptions.fZeroInitialized = options.fZeroInitialized;
+
+ // FIXME: This was already called by onGetAndroidPixels. Can we reduce that?
+ int sampleSize = options.fSampleSize;
+ int nativeSampleSize;
+ SkISize nativeSize = this->accountForNativeScaling(&sampleSize, &nativeSampleSize);
+
+ // Check if there is a subset.
+ SkIRect subset;
+ int subsetY = 0;
+ int subsetWidth = nativeSize.width();
+ int subsetHeight = nativeSize.height();
+ if (options.fSubset) {
+ // We will need to know about subsetting in the y-dimension in order to use the
+ // scanline decoder.
+ // Update the subset to account for scaling done by this->codec().
+ SkIRect* subsetPtr = options.fSubset;
+
+ // Do the divide ourselves, instead of calling get_scaled_dimension. If
+ // X and Y are 0, they should remain 0, rather than being upgraded to 1
+ // due to being smaller than the sampleSize.
+ const int subsetX = subsetPtr->x() / nativeSampleSize;
+ subsetY = subsetPtr->y() / nativeSampleSize;
+
+ subsetWidth = get_scaled_dimension(subsetPtr->width(), nativeSampleSize);
+ subsetHeight = get_scaled_dimension(subsetPtr->height(), nativeSampleSize);
+
+ // The scanline decoder only needs to be aware of subsetting in the x-dimension.
+ subset.setXYWH(subsetX, 0, subsetWidth, nativeSize.height());
+ sampledOptions.fSubset = &subset;
+ }
+
+ // Since we guarantee that output dimensions are always at least one (even if the sampleSize
+ // is greater than a given dimension), the input sampleSize is not always the sampleSize that
+ // we use in practice.
+ const int sampleX = subsetWidth / info.width();
+ const int sampleY = subsetHeight / info.height();
+
+ const int samplingOffsetY = get_start_coord(sampleY);
+ const int startY = samplingOffsetY + subsetY;
+ int dstHeight = info.height();
+
+ const SkImageInfo nativeInfo = info.makeWH(nativeSize.width(), nativeSize.height());
+
+ {
+ // Although startScanlineDecode expects the bottom and top to match the
+ // SkImageInfo, startIncrementalDecode uses them to determine which rows to
+ // decode.
+ // Note: We *could* use "subsetY" and "subsetHeight" (calculated above) for
+ // incrementalSubset, but this code gives us a tighter bounds on the subset,
+ // meaning that we can start with the first row actually needed by the output,
+ // and stop when we've decoded the last row needed by the output.
+ SkIRect incrementalSubset;
+ incrementalSubset.fTop = startY;
+ incrementalSubset.fBottom = startY + (dstHeight - 1) * sampleY + 1;
+ if (sampledOptions.fSubset) {
+ incrementalSubset.fLeft = sampledOptions.fSubset->fLeft;
+ incrementalSubset.fRight = sampledOptions.fSubset->fRight;
+ } else {
+ incrementalSubset.fLeft = 0;
+ incrementalSubset.fRight = nativeSize.width();
+ }
+ SkCodec::Options incrementalOptions = sampledOptions;
+ incrementalOptions.fSubset = &incrementalSubset;
+ const SkCodec::Result startResult = this->codec()->startIncrementalDecode(nativeInfo,
+ pixels, rowBytes, &incrementalOptions, options.fColorPtr, options.fColorCount);
+ if (SkCodec::kSuccess == startResult) {
+ SkSampler* sampler = this->codec()->getSampler(true);
+ if (!sampler) {
+ return SkCodec::kUnimplemented;
+ }
+
+ if (sampler->setSampleX(sampleX) != info.width()) {
+ return SkCodec::kInvalidScale;
+ }
+ if (get_scaled_dimension(subsetHeight, sampleY) != info.height()) {
+ return SkCodec::kInvalidScale;
+ }
+
+ sampler->setSampleY(sampleY);
+
+ int rowsDecoded;
+ const SkCodec::Result incResult = this->codec()->incrementalDecode(&rowsDecoded);
+ if (incResult == SkCodec::kSuccess) {
+ return SkCodec::kSuccess;
+ }
+ SkASSERT(incResult == SkCodec::kIncompleteInput);
+
+ // Count the rows that we decoded, and also did not skip.
+ const int trueRowsDecoded = (rowsDecoded + sampleY - 1) / sampleY;
+ this->codec()->fillIncompleteImage(info, pixels, rowBytes, options.fZeroInitialized,
+ info.height(), trueRowsDecoded);
+ return SkCodec::kIncompleteInput;
+ } else if (startResult != SkCodec::kUnimplemented) {
+ return startResult;
+ } // kUnimplemented means use the old method.
+ }
+
+ // Start the scanline decode.
+ SkCodec::Result result = this->codec()->startScanlineDecode(nativeInfo,
+ &sampledOptions, options.fColorPtr, options.fColorCount);
+ if (SkCodec::kSuccess != result) {
+ return result;
+ }
+
+ SkSampler* sampler = this->codec()->getSampler(true);
+ if (!sampler) {
+ return SkCodec::kUnimplemented;
+ }
+
+ if (sampler->setSampleX(sampleX) != info.width()) {
+ return SkCodec::kInvalidScale;
+ }
+ if (get_scaled_dimension(subsetHeight, sampleY) != info.height()) {
+ return SkCodec::kInvalidScale;
+ }
+
+ switch(this->codec()->getScanlineOrder()) {
+ case SkCodec::kTopDown_SkScanlineOrder: {
+ if (!this->codec()->skipScanlines(startY)) {
+ this->codec()->fillIncompleteImage(info, pixels, rowBytes, options.fZeroInitialized,
+ dstHeight, 0);
+ return SkCodec::kIncompleteInput;
+ }
+ void* pixelPtr = pixels;
+ for (int y = 0; y < dstHeight; y++) {
+ if (1 != this->codec()->getScanlines(pixelPtr, 1, rowBytes)) {
+ this->codec()->fillIncompleteImage(info, pixels, rowBytes,
+ options.fZeroInitialized, dstHeight, y + 1);
+ return SkCodec::kIncompleteInput;
+ }
+ if (y < dstHeight - 1) {
+ if (!this->codec()->skipScanlines(sampleY - 1)) {
+ this->codec()->fillIncompleteImage(info, pixels, rowBytes,
+ options.fZeroInitialized, dstHeight, y + 1);
+ return SkCodec::kIncompleteInput;
+ }
+ }
+ pixelPtr = SkTAddOffset<void>(pixelPtr, rowBytes);
+ }
+ return SkCodec::kSuccess;
+ }
+ case SkCodec::kOutOfOrder_SkScanlineOrder:
+ case SkCodec::kBottomUp_SkScanlineOrder: {
+ // Note that these modes do not support subsetting.
+ SkASSERT(0 == subsetY && nativeSize.height() == subsetHeight);
+ int y;
+ for (y = 0; y < nativeSize.height(); y++) {
+ int srcY = this->codec()->nextScanline();
+ if (is_coord_necessary(srcY, sampleY, dstHeight)) {
+ void* pixelPtr = SkTAddOffset<void>(pixels,
+ rowBytes * get_dst_coord(srcY, sampleY));
+ if (1 != this->codec()->getScanlines(pixelPtr, 1, rowBytes)) {
+ break;
+ }
+ } else {
+ if (!this->codec()->skipScanlines(1)) {
+ break;
+ }
+ }
+ }
+
+ if (nativeSize.height() == y) {
+ return SkCodec::kSuccess;
+ }
+
+ // We handle filling uninitialized memory here instead of using this->codec().
+ // this->codec() does not know that we are sampling.
+ const uint64_t fillValue = this->codec()->getFillValue(info);
+ const SkImageInfo fillInfo = info.makeWH(info.width(), 1);
+ for (; y < nativeSize.height(); y++) {
+ int srcY = this->codec()->outputScanline(y);
+ if (!is_coord_necessary(srcY, sampleY, dstHeight)) {
+ continue;
+ }
+
+ void* rowPtr = SkTAddOffset<void>(pixels, rowBytes * get_dst_coord(srcY, sampleY));
+ SkSampler::Fill(fillInfo, rowPtr, rowBytes, fillValue, options.fZeroInitialized);
+ }
+ return SkCodec::kIncompleteInput;
+ }
+ default:
+ SkASSERT(false);
+ return SkCodec::kUnimplemented;
+ }
+}
diff --git a/gfx/skia/skia/src/codec/SkSampledCodec.h b/gfx/skia/skia/src/codec/SkSampledCodec.h
new file mode 100644
index 000000000..35e4f571d
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkSampledCodec.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkSampledCodec_DEFINED
+#define SkSampledCodec_DEFINED
+
+#include "SkAndroidCodec.h"
+#include "SkCodec.h"
+
+/**
+ * This class implements the functionality of SkAndroidCodec. Scaling will
+ * be provided by sampling if it cannot be provided by fCodec.
+ */
+class SkSampledCodec : public SkAndroidCodec {
+public:
+
+ explicit SkSampledCodec(SkCodec*);
+
+ virtual ~SkSampledCodec() {}
+
+protected:
+
+ SkISize onGetSampledDimensions(int sampleSize) const override;
+
+ bool onGetSupportedSubset(SkIRect* desiredSubset) const override { return true; }
+
+ SkCodec::Result onGetAndroidPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const AndroidOptions& options) override;
+
+private:
+ /**
+ * Find the best way to account for native scaling.
+ *
+ * Return a size that fCodec can scale to, and adjust sampleSize to finish scaling.
+ *
+ * @param sampleSize As an input, the requested sample size.
+ * As an output, sampling needed after letting fCodec
+ * scale to the returned dimensions.
+ * @param nativeSampleSize Optional output parameter. Will be set to the
+ * effective sample size done by fCodec.
+ * @return SkISize The size that fCodec should scale to.
+ */
+ SkISize accountForNativeScaling(int* sampleSize, int* nativeSampleSize = nullptr) const;
+
+ /**
+ * This fulfills the same contract as onGetAndroidPixels().
+ *
+ * We call this function from onGetAndroidPixels() if we have determined
+ * that fCodec does not support the requested scale, and we need to
+ * provide the scale by sampling.
+ */
+ SkCodec::Result sampledDecode(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const AndroidOptions& options);
+
+ typedef SkAndroidCodec INHERITED;
+};
+#endif // SkSampledCodec_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkSampler.cpp b/gfx/skia/skia/src/codec/SkSampler.cpp
new file mode 100644
index 000000000..244aa3b87
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkSampler.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCodec.h"
+#include "SkCodecPriv.h"
+#include "SkSampler.h"
+#include "SkUtils.h"
+
+void SkSampler::Fill(const SkImageInfo& info, void* dst, size_t rowBytes,
+ uint64_t colorOrIndex, SkCodec::ZeroInitialized zeroInit) {
+ SkASSERT(dst != nullptr);
+
+ // Calculate bytes to fill. We use getSafeSize since the last row may not be padded.
+ const size_t bytesToFill = info.getSafeSize(rowBytes);
+ const int width = info.width();
+ const int numRows = info.height();
+
+ // Use the proper memset routine to fill the remaining bytes
+ switch (info.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType: {
+ // If memory is zero initialized, we may not need to fill
+ uint32_t color = (uint32_t) colorOrIndex;
+ if (SkCodec::kYes_ZeroInitialized == zeroInit && 0 == color) {
+ return;
+ }
+
+ uint32_t* dstRow = (uint32_t*) dst;
+ for (int row = 0; row < numRows; row++) {
+ sk_memset32((uint32_t*) dstRow, color, width);
+ dstRow = SkTAddOffset<uint32_t>(dstRow, rowBytes);
+ }
+ break;
+ }
+ case kRGB_565_SkColorType: {
+ // If the destination is k565, the caller passes in a 16-bit color.
+ // We will not assert that the high bits of colorOrIndex must be zeroed.
+ // This allows us to take advantage of the fact that the low 16 bits of an
+ // SKPMColor may be a valid a 565 color. For example, the low 16
+ // bits of SK_ColorBLACK are identical to the 565 representation
+ // for black.
+
+ // If memory is zero initialized, we may not need to fill
+ uint16_t color = (uint16_t) colorOrIndex;
+ if (SkCodec::kYes_ZeroInitialized == zeroInit && 0 == color) {
+ return;
+ }
+
+ uint16_t* dstRow = (uint16_t*) dst;
+ for (int row = 0; row < numRows; row++) {
+ sk_memset16((uint16_t*) dstRow, color, width);
+ dstRow = SkTAddOffset<uint16_t>(dstRow, rowBytes);
+ }
+ break;
+ }
+ case kIndex_8_SkColorType:
+ // On an index destination color type, always assume the input is an index.
+ // Fall through
+ case kGray_8_SkColorType:
+ // If the destination is kGray, the caller passes in an 8-bit color.
+ // We will not assert that the high bits of colorOrIndex must be zeroed.
+ // This allows us to take advantage of the fact that the low 8 bits of an
+ // SKPMColor may be a valid a grayscale color. For example, the low 8
+ // bits of SK_ColorBLACK are identical to the grayscale representation
+ // for black.
+
+ // If memory is zero initialized, we may not need to fill
+ if (SkCodec::kYes_ZeroInitialized == zeroInit && 0 == (uint8_t) colorOrIndex) {
+ return;
+ }
+
+ memset(dst, (uint8_t) colorOrIndex, bytesToFill);
+ break;
+ case kRGBA_F16_SkColorType: {
+ uint64_t color = colorOrIndex;
+ if (SkCodec::kYes_ZeroInitialized == zeroInit && 0 == color) {
+ return;
+ }
+
+ uint64_t* dstRow = (uint64_t*) dst;
+ for (int row = 0; row < numRows; row++) {
+ sk_memset64((uint64_t*) dstRow, color, width);
+ dstRow = SkTAddOffset<uint64_t>(dstRow, rowBytes);
+ }
+ break;
+ }
+ default:
+ SkCodecPrintf("Error: Unsupported dst color type for fill(). Doing nothing.\n");
+ SkASSERT(false);
+ break;
+ }
+}
diff --git a/gfx/skia/skia/src/codec/SkSampler.h b/gfx/skia/skia/src/codec/SkSampler.h
new file mode 100644
index 000000000..00015585a
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkSampler.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkSampler_DEFINED
+#define SkSampler_DEFINED
+
+#include "SkCodec.h"
+#include "SkTypes.h"
+
+class SkSampler : public SkNoncopyable {
+public:
+ /**
+ * Update the sampler to sample every sampleX'th pixel. Returns the
+ * width after sampling.
+ */
+ int setSampleX(int sampleX) {
+ return this->onSetSampleX(sampleX);
+ }
+
+ /**
+ * Update the sampler to sample every sampleY'th row.
+ */
+ void setSampleY(int sampleY) {
+ fSampleY = sampleY;
+ }
+
+ /**
+ * Retrieve the value set for sampleY.
+ */
+ int sampleY() const {
+ return fSampleY;
+ }
+
+ /**
+ * Based on fSampleY, return whether this row belongs in the output.
+ *
+ * @param row Row of the image, starting with the first row used in the
+ * output.
+ */
+ bool rowNeeded(int row) const {
+ return row % fSampleY == 0;
+ }
+
+ /**
+ * Fill the remainder of the destination with a single color
+ *
+ * @param info
+ * Contains the color type of the rows to fill.
+ * Contains the width of the destination rows to fill
+ * Contains the number of rows that we need to fill.
+ *
+ * @param dst
+ * The destination row to fill from.
+ *
+ * @param rowBytes
+ * Stride in bytes of the destination.
+ *
+ * @param colorOrIndex
+ * If colorType is kF16, colorOrIndex is treated as a 64-bit color.
+ * If colorType is kN32, colorOrIndex is treated as a 32-bit color.
+ * If colorType is k565, colorOrIndex is treated as a 16-bit color.
+ * If colorType is kGray, colorOrIndex is treated as an 8-bit color.
+ * If colorType is kIndex, colorOrIndex is treated as an 8-bit index.
+ * Other SkColorTypes are not supported.
+ *
+ * @param zeroInit
+ * Indicates whether memory is already zero initialized.
+ *
+ */
+ static void Fill(const SkImageInfo& info, void* dst, size_t rowBytes,
+ uint64_t colorOrIndex, SkCodec::ZeroInitialized zeroInit);
+
+ /**
+ * Allow subclasses to implement unique versions of fill().
+ */
+ virtual void fill(const SkImageInfo& info, void* dst, size_t rowBytes,
+ uint64_t colorOrIndex, SkCodec::ZeroInitialized zeroInit) {}
+
+ SkSampler()
+ : fSampleY(1)
+ {}
+
+ virtual ~SkSampler() {}
+private:
+ int fSampleY;
+
+ virtual int onSetSampleX(int) = 0;
+};
+
+#endif // SkSampler_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkSwizzler.cpp b/gfx/skia/skia/src/codec/SkSwizzler.cpp
new file mode 100644
index 000000000..c9eb92305
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkSwizzler.cpp
@@ -0,0 +1,1045 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCodecPriv.h"
+#include "SkColorPriv.h"
+#include "SkOpts.h"
+#include "SkSwizzler.h"
+#include "SkTemplates.h"
+
+static void copy(void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ memcpy(dst, src + offset, width * bpp);
+}
+
+static void sample1(void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ src += offset;
+ uint8_t* dst8 = (uint8_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst8[x] = *src;
+ src += deltaSrc;
+ }
+}
+
+static void sample2(void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ src += offset;
+ uint16_t* dst16 = (uint16_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst16[x] = *((const uint16_t*) src);
+ src += deltaSrc;
+ }
+}
+
+static void sample4(void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ src += offset;
+ uint32_t* dst32 = (uint32_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst32[x] = *((const uint32_t*) src);
+ src += deltaSrc;
+ }
+}
+
+// kBit
+// These routines exclusively choose between white and black
+
+#define GRAYSCALE_BLACK 0
+#define GRAYSCALE_WHITE 0xFF
+
+
+// same as swizzle_bit_to_index and swizzle_bit_to_n32 except for value assigned to dst[x]
+static void swizzle_bit_to_grayscale(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor* /*ctable*/) {
+
+ uint8_t* SK_RESTRICT dst = (uint8_t*) dstRow;
+
+ // increment src by byte offset and bitIndex by bit offset
+ src += offset / 8;
+ int bitIndex = offset % 8;
+ uint8_t currByte = *src;
+
+ dst[0] = ((currByte >> (7-bitIndex)) & 1) ? GRAYSCALE_WHITE : GRAYSCALE_BLACK;
+
+ for (int x = 1; x < dstWidth; x++) {
+ int bitOffset = bitIndex + deltaSrc;
+ bitIndex = bitOffset % 8;
+ currByte = *(src += bitOffset / 8);
+ dst[x] = ((currByte >> (7-bitIndex)) & 1) ? GRAYSCALE_WHITE : GRAYSCALE_BLACK;
+ }
+}
+
+#undef GRAYSCALE_BLACK
+#undef GRAYSCALE_WHITE
+
+// same as swizzle_bit_to_grayscale and swizzle_bit_to_n32 except for value assigned to dst[x]
+static void swizzle_bit_to_index(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor* /*ctable*/) {
+ uint8_t* SK_RESTRICT dst = (uint8_t*) dstRow;
+
+ // increment src by byte offset and bitIndex by bit offset
+ src += offset / 8;
+ int bitIndex = offset % 8;
+ uint8_t currByte = *src;
+
+ dst[0] = ((currByte >> (7-bitIndex)) & 1);
+
+ for (int x = 1; x < dstWidth; x++) {
+ int bitOffset = bitIndex + deltaSrc;
+ bitIndex = bitOffset % 8;
+ currByte = *(src += bitOffset / 8);
+ dst[x] = ((currByte >> (7-bitIndex)) & 1);
+ }
+}
+
+// same as swizzle_bit_to_grayscale and swizzle_bit_to_index except for value assigned to dst[x]
+static void swizzle_bit_to_n32(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor* /*ctable*/) {
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*) dstRow;
+
+ // increment src by byte offset and bitIndex by bit offset
+ src += offset / 8;
+ int bitIndex = offset % 8;
+ uint8_t currByte = *src;
+
+ dst[0] = ((currByte >> (7 - bitIndex)) & 1) ? SK_ColorWHITE : SK_ColorBLACK;
+
+ for (int x = 1; x < dstWidth; x++) {
+ int bitOffset = bitIndex + deltaSrc;
+ bitIndex = bitOffset % 8;
+ currByte = *(src += bitOffset / 8);
+ dst[x] = ((currByte >> (7 - bitIndex)) & 1) ? SK_ColorWHITE : SK_ColorBLACK;
+ }
+}
+
+#define RGB565_BLACK 0
+#define RGB565_WHITE 0xFFFF
+
+static void swizzle_bit_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor* /*ctable*/) {
+ uint16_t* SK_RESTRICT dst = (uint16_t*) dstRow;
+
+ // increment src by byte offset and bitIndex by bit offset
+ src += offset / 8;
+ int bitIndex = offset % 8;
+ uint8_t currByte = *src;
+
+ dst[0] = ((currByte >> (7 - bitIndex)) & 1) ? RGB565_WHITE : RGB565_BLACK;
+
+ for (int x = 1; x < dstWidth; x++) {
+ int bitOffset = bitIndex + deltaSrc;
+ bitIndex = bitOffset % 8;
+ currByte = *(src += bitOffset / 8);
+ dst[x] = ((currByte >> (7 - bitIndex)) & 1) ? RGB565_WHITE : RGB565_BLACK;
+ }
+}
+
+#undef RGB565_BLACK
+#undef RGB565_WHITE
+
+// kIndex1, kIndex2, kIndex4
+
+static void swizzle_small_index_to_index(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ uint8_t* dst = (uint8_t*) dstRow;
+ src += offset / 8;
+ int bitIndex = offset % 8;
+ uint8_t currByte = *src;
+ const uint8_t mask = (1 << bpp) - 1;
+ uint8_t index = (currByte >> (8 - bpp - bitIndex)) & mask;
+ dst[0] = index;
+
+ for (int x = 1; x < dstWidth; x++) {
+ int bitOffset = bitIndex + deltaSrc;
+ bitIndex = bitOffset % 8;
+ currByte = *(src += bitOffset / 8);
+ index = (currByte >> (8 - bpp - bitIndex)) & mask;
+ dst[x] = index;
+ }
+}
+
+static void swizzle_small_index_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ uint16_t* dst = (uint16_t*) dstRow;
+ src += offset / 8;
+ int bitIndex = offset % 8;
+ uint8_t currByte = *src;
+ const uint8_t mask = (1 << bpp) - 1;
+ uint8_t index = (currByte >> (8 - bpp - bitIndex)) & mask;
+ dst[0] = SkPixel32ToPixel16(ctable[index]);
+
+ for (int x = 1; x < dstWidth; x++) {
+ int bitOffset = bitIndex + deltaSrc;
+ bitIndex = bitOffset % 8;
+ currByte = *(src += bitOffset / 8);
+ index = (currByte >> (8 - bpp - bitIndex)) & mask;
+ dst[x] = SkPixel32ToPixel16(ctable[index]);
+ }
+}
+
+static void swizzle_small_index_to_n32(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ SkPMColor* dst = (SkPMColor*) dstRow;
+ src += offset / 8;
+ int bitIndex = offset % 8;
+ uint8_t currByte = *src;
+ const uint8_t mask = (1 << bpp) - 1;
+ uint8_t index = (currByte >> (8 - bpp - bitIndex)) & mask;
+ dst[0] = ctable[index];
+
+ for (int x = 1; x < dstWidth; x++) {
+ int bitOffset = bitIndex + deltaSrc;
+ bitIndex = bitOffset % 8;
+ currByte = *(src += bitOffset / 8);
+ index = (currByte >> (8 - bpp - bitIndex)) & mask;
+ dst[x] = ctable[index];
+ }
+}
+
+// kIndex
+
+static void swizzle_index_to_n32(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ SkPMColor c = ctable[*src];
+ dst[x] = c;
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_index_to_n32_skipZ(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ SkPMColor c = ctable[*src];
+ if (c != 0) {
+ dst[x] = c;
+ }
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_index_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bytesPerPixel, int deltaSrc, int offset, const SkPMColor ctable[]) {
+ src += offset;
+ uint16_t* SK_RESTRICT dst = (uint16_t*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPixel32ToPixel16(ctable[*src]);
+ src += deltaSrc;
+ }
+}
+
+// kGray
+
+static void swizzle_gray_to_n32(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPackARGB32NoCheck(0xFF, *src, *src, *src);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_gray_to_n32(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ // Note that there is no need to distinguish between RGB and BGR.
+ // Each color channel will get the same value.
+ SkOpts::gray_to_RGB1((uint32_t*) dst, src + offset, width);
+}
+
+static void swizzle_gray_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bytesPerPixel, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ uint16_t* SK_RESTRICT dst = (uint16_t*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPack888ToRGB16(src[0], src[0], src[0]);
+ src += deltaSrc;
+ }
+}
+
+// kGrayAlpha
+
+static void swizzle_grayalpha_to_n32_unpremul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* dst32 = (SkPMColor*) dst;
+ for (int x = 0; x < width; x++) {
+ dst32[x] = SkPackARGB32NoCheck(src[1], src[0], src[0], src[0]);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_grayalpha_to_n32_unpremul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ // Note that there is no need to distinguish between RGB and BGR.
+ // Each color channel will get the same value.
+ SkOpts::grayA_to_RGBA((uint32_t*) dst, src + offset, width);
+}
+
+static void swizzle_grayalpha_to_n32_premul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* dst32 = (SkPMColor*) dst;
+ for (int x = 0; x < width; x++) {
+ uint8_t pmgray = SkMulDiv255Round(src[1], src[0]);
+ dst32[x] = SkPackARGB32NoCheck(src[1], pmgray, pmgray, pmgray);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_grayalpha_to_n32_premul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ // Note that there is no need to distinguish between rgb and bgr.
+ // Each color channel will get the same value.
+ SkOpts::grayA_to_rgbA((uint32_t*) dst, src + offset, width);
+}
+
+// kBGR
+
+static void swizzle_bgr_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ uint16_t* SK_RESTRICT dst = (uint16_t*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPack888ToRGB16(src[2], src[1], src[0]);
+ src += deltaSrc;
+ }
+}
+
+// kRGB
+
+static void swizzle_rgb_to_rgba(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPackARGB_as_RGBA(0xFF, src[0], src[1], src[2]);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_rgb_to_bgra(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPackARGB_as_BGRA(0xFF, src[0], src[1], src[2]);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_rgb_to_rgba(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc,
+ int offset, const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::RGB_to_RGB1((uint32_t*) dst, src + offset, width);
+}
+
+static void fast_swizzle_rgb_to_bgra(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc,
+ int offset, const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::RGB_to_BGR1((uint32_t*) dst, src + offset, width);
+}
+
+static void swizzle_rgb_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bytesPerPixel, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ uint16_t* SK_RESTRICT dst = (uint16_t*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPack888ToRGB16(src[0], src[1], src[2]);
+ src += deltaSrc;
+ }
+}
+
+// kRGBA
+
+static void swizzle_rgba_to_rgba_premul(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = premultiply_argb_as_rgba(src[3], src[0], src[1], src[2]);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_rgba_to_bgra_premul(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = premultiply_argb_as_bgra(src[3], src[0], src[1], src[2]);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_rgba_to_rgba_premul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc,
+ int offset, const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::RGBA_to_rgbA((uint32_t*) dst, src + offset, width);
+}
+
+static void fast_swizzle_rgba_to_bgra_premul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc,
+ int offset, const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::RGBA_to_bgrA((uint32_t*) dst, src + offset, width);
+}
+
+static void swizzle_rgba_to_bgra_unpremul(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ uint32_t* SK_RESTRICT dst = reinterpret_cast<uint32_t*>(dstRow);
+ for (int x = 0; x < dstWidth; x++) {
+ unsigned alpha = src[3];
+ dst[x] = SkPackARGB_as_BGRA(alpha, src[0], src[1], src[2]);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_rgba_to_bgra_unpremul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::RGBA_to_BGRA((uint32_t*) dst, src + offset, width);
+}
+
+// kCMYK
+//
+// CMYK is stored as four bytes per pixel.
+//
+// We will implement a crude conversion from CMYK -> RGB using formulas
+// from easyrgb.com.
+//
+// CMYK -> CMY
+// C = C * (1 - K) + K
+// M = M * (1 - K) + K
+// Y = Y * (1 - K) + K
+//
+// libjpeg actually gives us inverted CMYK, so we must subtract the
+// original terms from 1.
+// CMYK -> CMY
+// C = (1 - C) * (1 - (1 - K)) + (1 - K)
+// M = (1 - M) * (1 - (1 - K)) + (1 - K)
+// Y = (1 - Y) * (1 - (1 - K)) + (1 - K)
+//
+// Simplifying the above expression.
+// CMYK -> CMY
+// C = 1 - CK
+// M = 1 - MK
+// Y = 1 - YK
+//
+// CMY -> RGB
+// R = (1 - C) * 255
+// G = (1 - M) * 255
+// B = (1 - Y) * 255
+//
+// Therefore the full conversion is below. This can be verified at
+// www.rapidtables.com (assuming inverted CMYK).
+// CMYK -> RGB
+// R = C * K * 255
+// G = M * K * 255
+// B = Y * K * 255
+//
+// As a final note, we have treated the CMYK values as if they were on
+// a scale from 0-1, when in fact they are 8-bit ints scaling from 0-255.
+// We must divide each CMYK component by 255 to obtain the true conversion
+// we should perform.
+// CMYK -> RGB
+// R = C * K / 255
+// G = M * K / 255
+// B = Y * K / 255
+static void swizzle_cmyk_to_rgba(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ const uint8_t r = SkMulDiv255Round(src[0], src[3]);
+ const uint8_t g = SkMulDiv255Round(src[1], src[3]);
+ const uint8_t b = SkMulDiv255Round(src[2], src[3]);
+
+ dst[x] = SkPackARGB_as_RGBA(0xFF, r, g, b);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_cmyk_to_bgra(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ const uint8_t r = SkMulDiv255Round(src[0], src[3]);
+ const uint8_t g = SkMulDiv255Round(src[1], src[3]);
+ const uint8_t b = SkMulDiv255Round(src[2], src[3]);
+
+ dst[x] = SkPackARGB_as_BGRA(0xFF, r, g, b);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_cmyk_to_rgba(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::inverted_CMYK_to_RGB1((uint32_t*) dst, src + offset, width);
+}
+
+static void fast_swizzle_cmyk_to_bgra(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::inverted_CMYK_to_BGR1((uint32_t*) dst, src + offset, width);
+}
+
+static void swizzle_cmyk_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ uint16_t* SK_RESTRICT dst = (uint16_t*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ const uint8_t r = SkMulDiv255Round(src[0], src[3]);
+ const uint8_t g = SkMulDiv255Round(src[1], src[3]);
+ const uint8_t b = SkMulDiv255Round(src[2], src[3]);
+
+ dst[x] = SkPack888ToRGB16(r, g, b);
+ src += deltaSrc;
+ }
+}
+
+template <SkSwizzler::RowProc proc>
+void SkSwizzler::SkipLeadingGrayAlphaZerosThen(
+ void* dst, const uint8_t* src, int width,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+ SkASSERT(!ctable);
+
+ const uint16_t* src16 = (const uint16_t*) (src + offset);
+ uint32_t* dst32 = (uint32_t*) dst;
+
+ // This may miss opportunities to skip when the output is premultiplied,
+ // e.g. for a src pixel 0x00FF which is not zero but becomes zero after premultiplication.
+ while (width > 0 && *src16 == 0x0000) {
+ width--;
+ dst32++;
+ src16 += deltaSrc / 2;
+ }
+ proc(dst32, (const uint8_t*)src16, width, bpp, deltaSrc, 0, ctable);
+}
+
+template <SkSwizzler::RowProc proc>
+void SkSwizzler::SkipLeading8888ZerosThen(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+ SkASSERT(!ctable);
+
+ auto src32 = (const uint32_t*)(src+offset);
+ auto dst32 = (uint32_t*)dstRow;
+
+ // This may miss opportunities to skip when the output is premultiplied,
+ // e.g. for a src pixel 0x00FFFFFF which is not zero but becomes zero after premultiplication.
+ while (dstWidth > 0 && *src32 == 0x00000000) {
+ dstWidth--;
+ dst32++;
+ src32 += deltaSrc/4;
+ }
+ proc(dst32, (const uint8_t*)src32, dstWidth, bpp, deltaSrc, 0, ctable);
+}
+
+SkSwizzler* SkSwizzler::CreateSwizzler(const SkEncodedInfo& encodedInfo,
+ const SkPMColor* ctable,
+ const SkImageInfo& dstInfo,
+ const SkCodec::Options& options,
+ const SkIRect* frame,
+ bool preSwizzled) {
+ if (SkEncodedInfo::kPalette_Color == encodedInfo.color() && nullptr == ctable) {
+ return nullptr;
+ }
+
+ RowProc fastProc = nullptr;
+ RowProc proc = nullptr;
+ if (preSwizzled) {
+ switch (dstInfo.colorType()) {
+ case kGray_8_SkColorType:
+ proc = &sample1;
+ fastProc = &copy;
+ break;
+ case kRGB_565_SkColorType:
+ proc = &sample2;
+ fastProc = &copy;
+ break;
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ proc = &sample4;
+ fastProc = &copy;
+ break;
+ default:
+ return nullptr;
+ }
+ } else {
+ SkCodec::ZeroInitialized zeroInit = options.fZeroInitialized;
+ const bool premultiply = (SkEncodedInfo::kOpaque_Alpha != encodedInfo.alpha()) &&
+ (kPremul_SkAlphaType == dstInfo.alphaType());
+
+ switch (encodedInfo.color()) {
+ case SkEncodedInfo::kGray_Color:
+ switch (encodedInfo.bitsPerComponent()) {
+ case 1:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ proc = &swizzle_bit_to_n32;
+ break;
+ case kIndex_8_SkColorType:
+ proc = &swizzle_bit_to_index;
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_bit_to_565;
+ break;
+ case kGray_8_SkColorType:
+ proc = &swizzle_bit_to_grayscale;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case 8:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ proc = &swizzle_gray_to_n32;
+ fastProc = &fast_swizzle_gray_to_n32;
+ break;
+ case kGray_8_SkColorType:
+ proc = &sample1;
+ fastProc = &copy;
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_gray_to_565;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kGrayAlpha_Color:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ if (premultiply) {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeadingGrayAlphaZerosThen
+ <swizzle_grayalpha_to_n32_premul>;
+ fastProc = &SkipLeadingGrayAlphaZerosThen
+ <fast_swizzle_grayalpha_to_n32_premul>;
+ } else {
+ proc = &swizzle_grayalpha_to_n32_premul;
+ fastProc = &fast_swizzle_grayalpha_to_n32_premul;
+ }
+ } else {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeadingGrayAlphaZerosThen
+ <swizzle_grayalpha_to_n32_unpremul>;
+ fastProc = &SkipLeadingGrayAlphaZerosThen
+ <fast_swizzle_grayalpha_to_n32_unpremul>;
+ } else {
+ proc = &swizzle_grayalpha_to_n32_unpremul;
+ fastProc = &fast_swizzle_grayalpha_to_n32_unpremul;
+ }
+ }
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kPalette_Color:
+ // We assume that the color table is premultiplied and swizzled
+ // as desired.
+ switch (encodedInfo.bitsPerComponent()) {
+ case 1:
+ case 2:
+ case 4:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ proc = &swizzle_small_index_to_n32;
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_small_index_to_565;
+ break;
+ case kIndex_8_SkColorType:
+ proc = &swizzle_small_index_to_index;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case 8:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &swizzle_index_to_n32_skipZ;
+ } else {
+ proc = &swizzle_index_to_n32;
+ }
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_index_to_565;
+ break;
+ case kIndex_8_SkColorType:
+ proc = &sample1;
+ fastProc = &copy;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kRGB_Color:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ proc = &swizzle_rgb_to_rgba;
+ fastProc = &fast_swizzle_rgb_to_rgba;
+ break;
+ case kBGRA_8888_SkColorType:
+ proc = &swizzle_rgb_to_bgra;
+ fastProc = &fast_swizzle_rgb_to_bgra;
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_rgb_to_565;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kRGBA_Color:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ if (premultiply) {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<swizzle_rgba_to_rgba_premul>;
+ fastProc = &SkipLeading8888ZerosThen
+ <fast_swizzle_rgba_to_rgba_premul>;
+ } else {
+ proc = &swizzle_rgba_to_rgba_premul;
+ fastProc = &fast_swizzle_rgba_to_rgba_premul;
+ }
+ } else {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<sample4>;
+ fastProc = &SkipLeading8888ZerosThen<copy>;
+ } else {
+ proc = &sample4;
+ fastProc = &copy;
+ }
+ }
+ break;
+ case kBGRA_8888_SkColorType:
+ if (premultiply) {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<swizzle_rgba_to_bgra_premul>;
+ fastProc = &SkipLeading8888ZerosThen
+ <fast_swizzle_rgba_to_bgra_premul>;
+ } else {
+ proc = &swizzle_rgba_to_bgra_premul;
+ fastProc = &fast_swizzle_rgba_to_bgra_premul;
+ }
+ } else {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<swizzle_rgba_to_bgra_unpremul>;
+ fastProc = &SkipLeading8888ZerosThen
+ <fast_swizzle_rgba_to_bgra_unpremul>;
+ } else {
+ proc = &swizzle_rgba_to_bgra_unpremul;
+ fastProc = &fast_swizzle_rgba_to_bgra_unpremul;
+ }
+ }
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kBGR_Color:
+ switch (dstInfo.colorType()) {
+ case kBGRA_8888_SkColorType:
+ proc = &swizzle_rgb_to_rgba;
+ fastProc = &fast_swizzle_rgb_to_rgba;
+ break;
+ case kRGBA_8888_SkColorType:
+ proc = &swizzle_rgb_to_bgra;
+ fastProc = &fast_swizzle_rgb_to_bgra;
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_bgr_to_565;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kBGRX_Color:
+ switch (dstInfo.colorType()) {
+ case kBGRA_8888_SkColorType:
+ proc = &swizzle_rgb_to_rgba;
+ break;
+ case kRGBA_8888_SkColorType:
+ proc = &swizzle_rgb_to_bgra;
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_bgr_to_565;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kBGRA_Color:
+ switch (dstInfo.colorType()) {
+ case kBGRA_8888_SkColorType:
+ if (premultiply) {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<swizzle_rgba_to_rgba_premul>;
+ fastProc = &SkipLeading8888ZerosThen
+ <fast_swizzle_rgba_to_rgba_premul>;
+ } else {
+ proc = &swizzle_rgba_to_rgba_premul;
+ fastProc = &fast_swizzle_rgba_to_rgba_premul;
+ }
+ } else {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<sample4>;
+ fastProc = &SkipLeading8888ZerosThen<copy>;
+ } else {
+ proc = &sample4;
+ fastProc = &copy;
+ }
+ }
+ break;
+ case kRGBA_8888_SkColorType:
+ if (premultiply) {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<swizzle_rgba_to_bgra_premul>;
+ fastProc = &SkipLeading8888ZerosThen
+ <fast_swizzle_rgba_to_bgra_premul>;
+ } else {
+ proc = &swizzle_rgba_to_bgra_premul;
+ fastProc = &fast_swizzle_rgba_to_bgra_premul;
+ }
+ } else {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<swizzle_rgba_to_bgra_unpremul>;
+ fastProc = &SkipLeading8888ZerosThen
+ <fast_swizzle_rgba_to_bgra_unpremul>;
+ } else {
+ proc = &swizzle_rgba_to_bgra_unpremul;
+ fastProc = &fast_swizzle_rgba_to_bgra_unpremul;
+ }
+ }
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kInvertedCMYK_Color:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ proc = &swizzle_cmyk_to_rgba;
+ fastProc = &fast_swizzle_cmyk_to_rgba;
+ break;
+ case kBGRA_8888_SkColorType:
+ proc = &swizzle_cmyk_to_bgra;
+ fastProc = &fast_swizzle_cmyk_to_bgra;
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_cmyk_to_565;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ default:
+ return nullptr;
+ }
+ }
+
+ int srcBPP;
+ const int dstBPP = SkColorTypeBytesPerPixel(dstInfo.colorType());
+ if (preSwizzled) {
+ srcBPP = dstBPP;
+ } else {
+ // Store bpp in bytes if it is an even multiple, otherwise use bits
+ uint8_t bitsPerPixel = encodedInfo.bitsPerPixel();
+ srcBPP = SkIsAlign8(bitsPerPixel) ? bitsPerPixel / 8 : bitsPerPixel;
+ }
+
+ int srcOffset = 0;
+ int srcWidth = dstInfo.width();
+ int dstOffset = 0;
+ int dstWidth = srcWidth;
+ if (options.fSubset) {
+ // We do not currently support subset decodes for image types that may have
+ // frames (gif).
+ SkASSERT(!frame);
+ srcOffset = options.fSubset->left();
+ srcWidth = options.fSubset->width();
+ dstWidth = srcWidth;
+ } else if (frame) {
+ dstOffset = frame->left();
+ srcWidth = frame->width();
+ }
+
+ return new SkSwizzler(fastProc, proc, ctable, srcOffset, srcWidth, dstOffset, dstWidth,
+ srcBPP, dstBPP);
+}
+
+SkSwizzler::SkSwizzler(RowProc fastProc, RowProc proc, const SkPMColor* ctable, int srcOffset,
+ int srcWidth, int dstOffset, int dstWidth, int srcBPP, int dstBPP)
+ : fFastProc(fastProc)
+ , fSlowProc(proc)
+ , fActualProc(fFastProc ? fFastProc : fSlowProc)
+ , fColorTable(ctable)
+ , fSrcOffset(srcOffset)
+ , fDstOffset(dstOffset)
+ , fSrcOffsetUnits(srcOffset * srcBPP)
+ , fDstOffsetBytes(dstOffset * dstBPP)
+ , fSrcWidth(srcWidth)
+ , fDstWidth(dstWidth)
+ , fSwizzleWidth(srcWidth)
+ , fAllocatedWidth(dstWidth)
+ , fSampleX(1)
+ , fSrcBPP(srcBPP)
+ , fDstBPP(dstBPP)
+{}
+
+int SkSwizzler::onSetSampleX(int sampleX) {
+ SkASSERT(sampleX > 0);
+
+ fSampleX = sampleX;
+ fSrcOffsetUnits = (get_start_coord(sampleX) + fSrcOffset) * fSrcBPP;
+ fDstOffsetBytes = (fDstOffset / sampleX) * fDstBPP;
+ fSwizzleWidth = get_scaled_dimension(fSrcWidth, sampleX);
+ fAllocatedWidth = get_scaled_dimension(fDstWidth, sampleX);
+
+ // The optimized swizzler functions do not support sampling. Sampled swizzles
+ // are already fast because they skip pixels. We haven't seen a situation
+ // where speeding up sampling has a significant impact on total decode time.
+ if (1 == fSampleX && fFastProc) {
+ fActualProc = fFastProc;
+ } else {
+ fActualProc = fSlowProc;
+ }
+
+ return fAllocatedWidth;
+}
+
+void SkSwizzler::swizzle(void* dst, const uint8_t* SK_RESTRICT src) {
+ SkASSERT(nullptr != dst && nullptr != src);
+ fActualProc(SkTAddOffset<void>(dst, fDstOffsetBytes), src, fSwizzleWidth, fSrcBPP,
+ fSampleX * fSrcBPP, fSrcOffsetUnits, fColorTable);
+}
diff --git a/gfx/skia/skia/src/codec/SkSwizzler.h b/gfx/skia/skia/src/codec/SkSwizzler.h
new file mode 100644
index 000000000..a535298a0
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkSwizzler.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSwizzler_DEFINED
+#define SkSwizzler_DEFINED
+
+#include "SkCodec.h"
+#include "SkColor.h"
+#include "SkImageInfo.h"
+#include "SkSampler.h"
+
+class SkSwizzler : public SkSampler {
+public:
+ /**
+ * Create a new SkSwizzler.
+ * @param encodedInfo Description of the format of the encoded data.
+ * @param ctable Unowned pointer to an array of up to 256 colors for an
+ * index source.
+ * @param dstInfo Describes the destination.
+ * @param options Indicates if dst is zero-initialized. The
+ * implementation may choose to skip writing zeroes
+ * if set to kYes_ZeroInitialized.
+ * Contains partial scanline information.
+ * @param frame Is non-NULL if the source pixels are part of an image
+ * frame that is a subset of the full image.
+ * @param preSwizzled Indicates that the codec has already swizzled to the
+ * destination format. The swizzler only needs to sample
+ * and/or subset.
+ *
+ * Note that a deeper discussion of partial scanline subsets and image frame
+ * subsets is below. Currently, we do not support both simultaneously. If
+ * options->fSubset is non-NULL, frame must be NULL.
+ *
+ * @return A new SkSwizzler or nullptr on failure.
+ */
+ static SkSwizzler* CreateSwizzler(const SkEncodedInfo& encodedInfo, const SkPMColor* ctable,
+ const SkImageInfo& dstInfo, const SkCodec::Options&,
+ const SkIRect* frame = nullptr, bool preSwizzled = false);
+
+ /**
+ * Swizzle a line. Generally this will be called height times, once
+ * for each row of source.
+ * By allowing the caller to pass in the dst pointer, we give the caller
+ * flexibility to use the swizzler even when the encoded data does not
+ * store the rows in order. This also improves usability for scaled and
+ * subset decodes.
+ * @param dst Where we write the output.
+ * @param src The next row of the source data.
+ */
+ void swizzle(void* dst, const uint8_t* SK_RESTRICT src);
+
+ /**
+ * Implement fill using a custom width.
+ */
+ void fill(const SkImageInfo& info, void* dst, size_t rowBytes, uint64_t colorOrIndex,
+ SkCodec::ZeroInitialized zeroInit) override {
+ const SkImageInfo fillInfo = info.makeWH(fAllocatedWidth, info.height());
+ SkSampler::Fill(fillInfo, dst, rowBytes, colorOrIndex, zeroInit);
+ }
+
+ /**
+ * If fSampleX > 1, the swizzler is sampling every fSampleX'th pixel and
+ * discarding the rest.
+ *
+ * This getter is currently used by SkBmpStandardCodec for Bmp-in-Ico decodes.
+ * Ideally, the subclasses of SkCodec would have no knowledge of sampling, but
+ * this allows us to apply a transparency mask to pixels after swizzling.
+ */
+ int sampleX() const { return fSampleX; }
+
+ /**
+ * Returns the actual number of pixels written to destination memory, taking
+ * scaling, subsetting, and partial frames into account.
+ */
+ int swizzleWidth() const { return fSwizzleWidth; }
+
+private:
+
+ /**
+ * Method for converting raw data to Skia pixels.
+ * @param dstRow Row in which to write the resulting pixels.
+ * @param src Row of src data, in format specified by SrcConfig
+ * @param dstWidth Width in pixels of the destination
+ * @param bpp if bitsPerPixel % 8 == 0, deltaSrc is bytesPerPixel
+ * else, deltaSrc is bitsPerPixel
+ * @param deltaSrc bpp * sampleX
+ * @param ctable Colors (used for kIndex source).
+ * @param offset The offset before the first pixel to sample.
+ Is in bytes or bits based on what deltaSrc is in.
+ */
+ typedef void (*RowProc)(void* SK_RESTRICT dstRow,
+ const uint8_t* SK_RESTRICT src,
+ int dstWidth, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]);
+
+ template <RowProc Proc>
+ static void SkipLeading8888ZerosThen(void* SK_RESTRICT dstRow,
+ const uint8_t* SK_RESTRICT src,
+ int dstWidth, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]);
+
+ template <RowProc Proc>
+ static void SkipLeadingGrayAlphaZerosThen(void* dst, const uint8_t* src, int width, int bpp,
+ int deltaSrc, int offset, const SkPMColor ctable[]);
+
+ // May be NULL. We have not implemented optimized functions for all supported transforms.
+ const RowProc fFastProc;
+ // Always non-NULL. Supports sampling.
+ const RowProc fSlowProc;
+ // The actual RowProc we are using. This depends on if fFastProc is non-NULL and
+ // whether or not we are sampling.
+ RowProc fActualProc;
+
+ const SkPMColor* fColorTable; // Unowned pointer
+
+ // Subset Swizzles
+ // There are two types of subset swizzles that we support. We do not
+ // support both at the same time.
+ // TODO: If we want to support partial scanlines for gifs (which may
+ // use frame subsets), we will need to support both subsetting
+ // modes at the same time.
+ // (1) Partial Scanlines
+ // The client only wants to write a subset of the source pixels
+ // to the destination. This subset is specified to CreateSwizzler
+ // using options->fSubset. We will store subset information in
+ // the following fields.
+ //
+ // fSrcOffset: The starting pixel of the source.
+ // fSrcOffsetUnits: Derived from fSrcOffset with two key
+ // differences:
+ // (1) This takes the size of source pixels into
+ // account by multiplying by fSrcBPP. This may
+ // be measured in bits or bytes depending on
+ // which is natural for the SrcConfig.
+ // (2) If we are sampling, this will be larger
+ // than fSrcOffset * fSrcBPP, since sampling
+ // implies that we will skip some pixels.
+ // fDstOffset: Will be zero. There is no destination offset
+ // for this type of subset.
+ // fDstOffsetBytes: Will be zero.
+ // fSrcWidth: The width of the desired subset of source
+ // pixels, before any sampling is performed.
+ // fDstWidth: Will be equal to fSrcWidth, since this is also
+ // calculated before any sampling is performed.
+ // For this type of subset, the destination width
+ // matches the desired subset of the source.
+ // fSwizzleWidth: The actual number of pixels that will be
+ // written by the RowProc. This is a scaled
+ // version of fSrcWidth/fDstWidth.
+ // fAllocatedWidth: Will be equal to fSwizzleWidth. For this type
+ // of subset, the number of pixels written is the
+ // same as the actual width of the destination.
+ // (2) Frame Subset
+ // The client will decode the entire width of the source into a
+ // subset of destination memory. This subset is specified to
+ // CreateSwizzler in the "frame" parameter. We store subset
+ // information in the following fields.
+ //
+ // fSrcOffset: Will be zero. The starting pixel of the source.
+ // fSrcOffsetUnits: Will only be non-zero if we are sampling,
+ // since sampling implies that we will skip some
+ // pixels. Note that this is measured in bits
+ // or bytes depending on which is natural for
+ // SrcConfig.
+ // fDstOffset: First pixel to write in destination.
+ // fDstOffsetBytes: fDstOffset * fDstBPP.
+ // fSrcWidth: The entire width of the source pixels, before
+ // any sampling is performed.
+ // fDstWidth: The entire width of the destination memory,
+ // before any sampling is performed.
+ // fSwizzleWidth: The actual number of pixels that will be
+ // written by the RowProc. This is a scaled
+ // version of fSrcWidth.
+ // fAllocatedWidth: The actual number of pixels in destination
+ // memory. This is a scaled version of
+ // fDstWidth.
+ //
+ // If we are not subsetting, these fields are more straightforward.
+ // fSrcOffset = fDstOffet = fDstOffsetBytes = 0
+ // fSrcOffsetUnits may be non-zero (we will skip the first few pixels when sampling)
+ // fSrcWidth = fDstWidth = Full original width
+ // fSwizzleWidth = fAllcoatedWidth = Scaled width (if we are sampling)
+ const int fSrcOffset;
+ const int fDstOffset;
+ int fSrcOffsetUnits;
+ int fDstOffsetBytes;
+ const int fSrcWidth;
+ const int fDstWidth;
+ int fSwizzleWidth;
+ int fAllocatedWidth;
+
+ int fSampleX; // Step between X samples
+ const int fSrcBPP; // Bits/bytes per pixel for the SrcConfig
+ // if bitsPerPixel % 8 == 0
+ // fBPP is bytesPerPixel
+ // else
+ // fBPP is bitsPerPixel
+ const int fDstBPP; // Bytes per pixel for the destination color type
+
+ SkSwizzler(RowProc fastProc, RowProc proc, const SkPMColor* ctable, int srcOffset,
+ int srcWidth, int dstOffset, int dstWidth, int srcBPP, int dstBPP);
+
+ int onSetSampleX(int) override;
+
+};
+#endif // SkSwizzler_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkWbmpCodec.cpp b/gfx/skia/skia/src/codec/SkWbmpCodec.cpp
new file mode 100644
index 000000000..099b6e472
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkWbmpCodec.cpp
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCodec.h"
+#include "SkCodecPriv.h"
+#include "SkColorPriv.h"
+#include "SkColorTable.h"
+#include "SkData.h"
+#include "SkStream.h"
+#include "SkWbmpCodec.h"
+
+// Each bit represents a pixel, so width is actually a number of bits.
+// A row will always be stored in bytes, so we round width up to the
+// nearest multiple of 8 to get the number of bits actually in the row.
+// We then divide by 8 to convert to bytes.
+static inline size_t get_src_row_bytes(int width) {
+ return SkAlign8(width) >> 3;
+}
+
+static inline void setup_color_table(SkColorType colorType,
+ SkPMColor* colorPtr, int* colorCount) {
+ if (kIndex_8_SkColorType == colorType) {
+ colorPtr[0] = SK_ColorBLACK;
+ colorPtr[1] = SK_ColorWHITE;
+ *colorCount = 2;
+ }
+}
+
+static inline bool valid_color_type(SkColorType colorType) {
+ switch (colorType) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ case kIndex_8_SkColorType:
+ case kGray_8_SkColorType:
+ case kRGB_565_SkColorType:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool read_byte(SkStream* stream, uint8_t* data)
+{
+ return stream->read(data, 1) == 1;
+}
+
+// http://en.wikipedia.org/wiki/Variable-length_quantity
+static bool read_mbf(SkStream* stream, uint64_t* value) {
+ uint64_t n = 0;
+ uint8_t data;
+ const uint64_t kLimit = 0xFE00000000000000;
+ SkASSERT(kLimit == ~((~static_cast<uint64_t>(0)) >> 7));
+ do {
+ if (n & kLimit) { // Will overflow on shift by 7.
+ return false;
+ }
+ if (stream->read(&data, 1) != 1) {
+ return false;
+ }
+ n = (n << 7) | (data & 0x7F);
+ } while (data & 0x80);
+ *value = n;
+ return true;
+}
+
+static bool read_header(SkStream* stream, SkISize* size) {
+ {
+ uint8_t data;
+ if (!read_byte(stream, &data) || data != 0) { // unknown type
+ return false;
+ }
+ if (!read_byte(stream, &data) || (data & 0x9F)) { // skip fixed header
+ return false;
+ }
+ }
+
+ uint64_t width, height;
+ if (!read_mbf(stream, &width) || width > 0xFFFF || !width) {
+ return false;
+ }
+ if (!read_mbf(stream, &height) || height > 0xFFFF || !height) {
+ return false;
+ }
+ if (size) {
+ *size = SkISize::Make(SkToS32(width), SkToS32(height));
+ }
+ return true;
+}
+
+bool SkWbmpCodec::onRewind() {
+ return read_header(this->stream(), nullptr);
+}
+
+SkSwizzler* SkWbmpCodec::initializeSwizzler(const SkImageInfo& info, const SkPMColor* ctable,
+ const Options& opts) {
+ return SkSwizzler::CreateSwizzler(this->getEncodedInfo(), ctable, info, opts);
+}
+
+bool SkWbmpCodec::readRow(uint8_t* row) {
+ return this->stream()->read(row, fSrcRowBytes) == fSrcRowBytes;
+}
+
+SkWbmpCodec::SkWbmpCodec(int width, int height, const SkEncodedInfo& info, SkStream* stream)
+ : INHERITED(width, height, info, stream)
+ , fSrcRowBytes(get_src_row_bytes(this->getInfo().width()))
+ , fSwizzler(nullptr)
+ , fColorTable(nullptr)
+{}
+
+SkEncodedFormat SkWbmpCodec::onGetEncodedFormat() const {
+ return kWBMP_SkEncodedFormat;
+}
+
+SkCodec::Result SkWbmpCodec::onGetPixels(const SkImageInfo& info,
+ void* dst,
+ size_t rowBytes,
+ const Options& options,
+ SkPMColor ctable[],
+ int* ctableCount,
+ int* rowsDecoded) {
+ if (options.fSubset) {
+ // Subsets are not supported.
+ return kUnimplemented;
+ }
+
+ if (!valid_color_type(info.colorType()) ||
+ !valid_alpha(info.alphaType(), this->getInfo().alphaType())) {
+ return kInvalidConversion;
+ }
+
+ // Prepare a color table if necessary
+ setup_color_table(info.colorType(), ctable, ctableCount);
+
+ // Initialize the swizzler
+ SkAutoTDelete<SkSwizzler> swizzler(this->initializeSwizzler(info, ctable, options));
+ SkASSERT(swizzler);
+
+ // Perform the decode
+ SkISize size = info.dimensions();
+ SkAutoTMalloc<uint8_t> src(fSrcRowBytes);
+ void* dstRow = dst;
+ for (int y = 0; y < size.height(); ++y) {
+ if (!this->readRow(src.get())) {
+ *rowsDecoded = y;
+ return kIncompleteInput;
+ }
+ swizzler->swizzle(dstRow, src.get());
+ dstRow = SkTAddOffset<void>(dstRow, rowBytes);
+ }
+ return kSuccess;
+}
+
+bool SkWbmpCodec::IsWbmp(const void* buffer, size_t bytesRead) {
+ SkMemoryStream stream(buffer, bytesRead, false);
+ return read_header(&stream, nullptr);
+}
+
+SkCodec* SkWbmpCodec::NewFromStream(SkStream* stream) {
+ SkAutoTDelete<SkStream> streamDeleter(stream);
+ SkISize size;
+ if (!read_header(stream, &size)) {
+ return nullptr;
+ }
+ SkEncodedInfo info = SkEncodedInfo::Make(SkEncodedInfo::kGray_Color,
+ SkEncodedInfo::kOpaque_Alpha, 1);
+ return new SkWbmpCodec(size.width(), size.height(), info, streamDeleter.release());
+}
+
+int SkWbmpCodec::onGetScanlines(void* dst, int count, size_t dstRowBytes) {
+ void* dstRow = dst;
+ for (int y = 0; y < count; ++y) {
+ if (!this->readRow(fSrcBuffer.get())) {
+ return y;
+ }
+ fSwizzler->swizzle(dstRow, fSrcBuffer.get());
+ dstRow = SkTAddOffset<void>(dstRow, dstRowBytes);
+ }
+ return count;
+}
+
+bool SkWbmpCodec::onSkipScanlines(int count) {
+ const size_t bytesToSkip = count * fSrcRowBytes;
+ return this->stream()->skip(bytesToSkip) == bytesToSkip;
+}
+
+SkCodec::Result SkWbmpCodec::onStartScanlineDecode(const SkImageInfo& dstInfo,
+ const Options& options, SkPMColor inputColorTable[], int* inputColorCount) {
+ if (options.fSubset) {
+ // Subsets are not supported.
+ return kUnimplemented;
+ }
+
+ if (!valid_color_type(dstInfo.colorType()) ||
+ !valid_alpha(dstInfo.alphaType(), this->getInfo().alphaType())) {
+ return kInvalidConversion;
+ }
+
+ // Fill in the color table
+ setup_color_table(dstInfo.colorType(), inputColorTable, inputColorCount);
+
+ // Copy the color table to a pointer that can be owned by the scanline decoder
+ if (kIndex_8_SkColorType == dstInfo.colorType()) {
+ fColorTable.reset(new SkColorTable(inputColorTable, 2));
+ }
+
+ // Initialize the swizzler
+ fSwizzler.reset(this->initializeSwizzler(dstInfo, get_color_ptr(fColorTable.get()), options));
+ SkASSERT(fSwizzler);
+
+ fSrcBuffer.reset(fSrcRowBytes);
+
+ return kSuccess;
+}
diff --git a/gfx/skia/skia/src/codec/SkWbmpCodec.h b/gfx/skia/skia/src/codec/SkWbmpCodec.h
new file mode 100644
index 000000000..9f29237e2
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkWbmpCodec.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCodec_wbmp_DEFINED
+#define SkCodec_wbmp_DEFINED
+
+#include "SkCodec.h"
+#include "SkColorSpace.h"
+#include "SkSwizzler.h"
+
+class SkWbmpCodec final : public SkCodec {
+public:
+ static bool IsWbmp(const void*, size_t);
+
+ /*
+ * Assumes IsWbmp was called and returned true
+ * Creates a wbmp codec
+ * Takes ownership of the stream
+ */
+ static SkCodec* NewFromStream(SkStream*);
+
+protected:
+ SkEncodedFormat onGetEncodedFormat() const override;
+ Result onGetPixels(const SkImageInfo&, void*, size_t,
+ const Options&, SkPMColor[], int*, int*) override;
+ bool onRewind() override;
+private:
+ /*
+ * Returns a swizzler on success, nullptr on failure
+ */
+ SkSwizzler* initializeSwizzler(const SkImageInfo& info, const SkPMColor* ctable,
+ const Options& opts);
+ SkSampler* getSampler(bool createIfNecessary) override {
+ SkASSERT(fSwizzler || !createIfNecessary);
+ return fSwizzler;
+ }
+
+ /*
+ * Read a src row from the encoded stream
+ */
+ bool readRow(uint8_t* row);
+
+ SkWbmpCodec(int width, int height, const SkEncodedInfo&, SkStream*);
+
+ const size_t fSrcRowBytes;
+
+ // Used for scanline decodes:
+ SkAutoTDelete<SkSwizzler> fSwizzler;
+ SkAutoTUnref<SkColorTable> fColorTable;
+ SkAutoTMalloc<uint8_t> fSrcBuffer;
+
+ int onGetScanlines(void* dst, int count, size_t dstRowBytes) override;
+ bool onSkipScanlines(int count) override;
+ Result onStartScanlineDecode(const SkImageInfo& dstInfo, const Options& options,
+ SkPMColor inputColorTable[], int* inputColorCount) override;
+
+ typedef SkCodec INHERITED;
+};
+
+#endif // SkCodec_wbmp_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkWebpAdapterCodec.cpp b/gfx/skia/skia/src/codec/SkWebpAdapterCodec.cpp
new file mode 100644
index 000000000..5aefe5d80
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkWebpAdapterCodec.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCodec.h"
+#include "SkCodecPriv.h"
+#include "SkWebpAdapterCodec.h"
+
+SkWebpAdapterCodec::SkWebpAdapterCodec(SkWebpCodec* codec)
+ : INHERITED(codec)
+{}
+
+SkISize SkWebpAdapterCodec::onGetSampledDimensions(int sampleSize) const {
+ float scale = get_scale_from_sample_size(sampleSize);
+ return this->codec()->getScaledDimensions(scale);
+}
+
+bool SkWebpAdapterCodec::onGetSupportedSubset(SkIRect* desiredSubset) const {
+ return this->codec()->getValidSubset(desiredSubset);
+}
+
+SkCodec::Result SkWebpAdapterCodec::onGetAndroidPixels(const SkImageInfo& info, void* pixels,
+ size_t rowBytes, const AndroidOptions& options) {
+ // SkWebpCodec will support pretty much any dimensions that we provide, but we want
+ // to be stricter about the type of scaling that we allow, so we will add an extra
+ // check here.
+ SkISize supportedSize;
+ if (!options.fSubset) {
+ supportedSize = this->onGetSampledDimensions(options.fSampleSize);
+ } else {
+ supportedSize = this->getSampledSubsetDimensions(options.fSampleSize, *options.fSubset);
+ }
+ if (supportedSize != info.dimensions()) {
+ return SkCodec::kInvalidParameters;
+ }
+
+ SkCodec::Options codecOptions;
+ codecOptions.fZeroInitialized = options.fZeroInitialized;
+ codecOptions.fSubset = options.fSubset;
+ return this->codec()->getPixels(info, pixels, rowBytes, &codecOptions, options.fColorPtr,
+ options.fColorCount);
+}
diff --git a/gfx/skia/skia/src/codec/SkWebpAdapterCodec.h b/gfx/skia/skia/src/codec/SkWebpAdapterCodec.h
new file mode 100644
index 000000000..ece46a668
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkWebpAdapterCodec.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkWebpAdapterCodec_DEFINED
+#define SkWebpAdapterCodec_DEFINED
+
+#include "SkAndroidCodec.h"
+#include "SkWebpCodec.h"
+
+/**
+ * This class implements the functionality of SkAndroidCodec. It uses an
+ * SkWebpCodec.
+ */
+class SkWebpAdapterCodec : public SkAndroidCodec {
+public:
+
+ explicit SkWebpAdapterCodec(SkWebpCodec*);
+
+ virtual ~SkWebpAdapterCodec() {}
+
+protected:
+
+ SkISize onGetSampledDimensions(int sampleSize) const override;
+
+ bool onGetSupportedSubset(SkIRect* desiredSubset) const override;
+
+ SkCodec::Result onGetAndroidPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const AndroidOptions& options) override;
+
+private:
+
+ typedef SkAndroidCodec INHERITED;
+};
+#endif // SkWebpAdapterCodec_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkWebpCodec.cpp b/gfx/skia/skia/src/codec/SkWebpCodec.cpp
new file mode 100644
index 000000000..3e5ef2aec
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkWebpCodec.cpp
@@ -0,0 +1,334 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCodecPriv.h"
+#include "SkColorSpaceXform.h"
+#include "SkWebpCodec.h"
+#include "SkStreamPriv.h"
+#include "SkTemplates.h"
+
+// A WebP decoder on top of (subset of) libwebp
+// For more information on WebP image format, and libwebp library, see:
+// https://code.google.com/speed/webp/
+// http://www.webmproject.org/code/#libwebp-webp-image-library
+// https://chromium.googlesource.com/webm/libwebp
+
+// If moving libwebp out of skia source tree, path for webp headers must be
+// updated accordingly. Here, we enforce using local copy in webp sub-directory.
+#include "webp/decode.h"
+#include "webp/demux.h"
+#include "webp/encode.h"
+
+bool SkWebpCodec::IsWebp(const void* buf, size_t bytesRead) {
+ // WEBP starts with the following:
+ // RIFFXXXXWEBPVP
+ // Where XXXX is unspecified.
+ const char* bytes = static_cast<const char*>(buf);
+ return bytesRead >= 14 && !memcmp(bytes, "RIFF", 4) && !memcmp(&bytes[8], "WEBPVP", 6);
+}
+
+// Parse headers of RIFF container, and check for valid Webp (VP8) content.
+// NOTE: This calls peek instead of read, since onGetPixels will need these
+// bytes again.
+// Returns an SkWebpCodec on success;
+SkCodec* SkWebpCodec::NewFromStream(SkStream* stream) {
+ SkAutoTDelete<SkStream> streamDeleter(stream);
+
+ // Webp demux needs a contiguous data buffer.
+ sk_sp<SkData> data = nullptr;
+ if (stream->getMemoryBase()) {
+ // It is safe to make without copy because we'll hold onto the stream.
+ data = SkData::MakeWithoutCopy(stream->getMemoryBase(), stream->getLength());
+ } else {
+ data = SkCopyStreamToData(stream);
+
+ // If we are forced to copy the stream to a data, we can go ahead and delete the stream.
+ streamDeleter.reset(nullptr);
+ }
+
+ // It's a little strange that the |demux| will outlive |webpData|, though it needs the
+ // pointer in |webpData| to remain valid. This works because the pointer remains valid
+ // until the SkData is freed.
+ WebPData webpData = { data->bytes(), data->size() };
+ SkAutoTCallVProc<WebPDemuxer, WebPDemuxDelete> demux(WebPDemuxPartial(&webpData, nullptr));
+ if (nullptr == demux) {
+ return nullptr;
+ }
+
+ WebPChunkIterator chunkIterator;
+ SkAutoTCallVProc<WebPChunkIterator, WebPDemuxReleaseChunkIterator> autoCI(&chunkIterator);
+ sk_sp<SkColorSpace> colorSpace = nullptr;
+ if (WebPDemuxGetChunk(demux, "ICCP", 1, &chunkIterator)) {
+ colorSpace = SkColorSpace::NewICC(chunkIterator.chunk.bytes, chunkIterator.chunk.size);
+ }
+
+ if (!colorSpace) {
+ colorSpace = SkColorSpace::NewNamed(SkColorSpace::kSRGB_Named);
+ }
+
+ // Since we do not yet support animation, we get the |width|, |height|, |color|, and |alpha|
+ // from the first frame. It's the only frame we will decode.
+ //
+ // TODO:
+ // When we support animation, we'll want to report the canvas width and canvas height instead.
+ // We can get these from the |demux| directly.
+ // What |color| and |alpha| will we want to report though? WebP allows different frames
+ // to be encoded in different ways, making the encoded format difficult to describe.
+ WebPIterator frame;
+ SkAutoTCallVProc<WebPIterator, WebPDemuxReleaseIterator> autoFrame(&frame);
+ if (!WebPDemuxGetFrame(demux, 1, &frame)) {
+ return nullptr;
+ }
+
+ // Sanity check for image size that's about to be decoded.
+ {
+ const int64_t size = sk_64_mul(frame.width, frame.height);
+ if (!sk_64_isS32(size)) {
+ return nullptr;
+ }
+ // now check that if we are 4-bytes per pixel, we also don't overflow
+ if (sk_64_asS32(size) > (0x7FFFFFFF >> 2)) {
+ return nullptr;
+ }
+ }
+
+ // TODO:
+ // The only reason we actually need to call WebPGetFeatures() is to get the |features.format|.
+ // This call actually re-reads the frame header. Should we suggest that libwebp expose
+ // the format on the |frame|?
+ WebPBitstreamFeatures features;
+ VP8StatusCode status = WebPGetFeatures(frame.fragment.bytes, frame.fragment.size, &features);
+ if (VP8_STATUS_OK != status) {
+ return nullptr;
+ }
+
+ SkEncodedInfo::Color color;
+ SkEncodedInfo::Alpha alpha;
+ switch (features.format) {
+ case 0:
+ // This indicates a "mixed" format. We would see this for
+ // animated webps or for webps encoded in multiple fragments.
+ // I believe that this is a rare case.
+ // We could also guess kYUV here, but I think it makes more
+ // sense to guess kBGRA which is likely closer to the final
+ // output. Otherwise, we might end up converting
+ // BGRA->YUVA->BGRA.
+ color = SkEncodedInfo::kBGRA_Color;
+ alpha = SkEncodedInfo::kUnpremul_Alpha;
+ break;
+ case 1:
+ // This is the lossy format (YUV).
+ if (SkToBool(features.has_alpha)) {
+ color = SkEncodedInfo::kYUVA_Color;
+ alpha = SkEncodedInfo::kUnpremul_Alpha;
+ } else {
+ color = SkEncodedInfo::kYUV_Color;
+ alpha = SkEncodedInfo::kOpaque_Alpha;
+ }
+ break;
+ case 2:
+ // This is the lossless format (BGRA).
+ color = SkEncodedInfo::kBGRA_Color;
+ alpha = SkEncodedInfo::kUnpremul_Alpha;
+ break;
+ default:
+ return nullptr;
+ }
+
+ SkEncodedInfo info = SkEncodedInfo::Make(color, alpha, 8);
+ return new SkWebpCodec(features.width, features.height, info, std::move(colorSpace),
+ streamDeleter.release(), demux.release(), std::move(data));
+}
+
+SkISize SkWebpCodec::onGetScaledDimensions(float desiredScale) const {
+ SkISize dim = this->getInfo().dimensions();
+ // SkCodec treats zero dimensional images as errors, so the minimum size
+ // that we will recommend is 1x1.
+ dim.fWidth = SkTMax(1, SkScalarRoundToInt(desiredScale * dim.fWidth));
+ dim.fHeight = SkTMax(1, SkScalarRoundToInt(desiredScale * dim.fHeight));
+ return dim;
+}
+
+bool SkWebpCodec::onDimensionsSupported(const SkISize& dim) {
+ const SkImageInfo& info = this->getInfo();
+ return dim.width() >= 1 && dim.width() <= info.width()
+ && dim.height() >= 1 && dim.height() <= info.height();
+}
+
+static WEBP_CSP_MODE webp_decode_mode(SkColorType ct, bool premultiply) {
+ switch (ct) {
+ case kBGRA_8888_SkColorType:
+ return premultiply ? MODE_bgrA : MODE_BGRA;
+ case kRGBA_8888_SkColorType:
+ return premultiply ? MODE_rgbA : MODE_RGBA;
+ case kRGB_565_SkColorType:
+ return MODE_RGB_565;
+ default:
+ return MODE_LAST;
+ }
+}
+
+bool SkWebpCodec::onGetValidSubset(SkIRect* desiredSubset) const {
+ if (!desiredSubset) {
+ return false;
+ }
+
+ SkIRect dimensions = SkIRect::MakeSize(this->getInfo().dimensions());
+ if (!dimensions.contains(*desiredSubset)) {
+ return false;
+ }
+
+ // As stated below, libwebp snaps to even left and top. Make sure top and left are even, so we
+ // decode this exact subset.
+ // Leave right and bottom unmodified, so we suggest a slightly larger subset than requested.
+ desiredSubset->fLeft = (desiredSubset->fLeft >> 1) << 1;
+ desiredSubset->fTop = (desiredSubset->fTop >> 1) << 1;
+ return true;
+}
+
+SkCodec::Result SkWebpCodec::onGetPixels(const SkImageInfo& dstInfo, void* dst, size_t rowBytes,
+ const Options& options, SkPMColor*, int*,
+ int* rowsDecodedPtr) {
+ if (!conversion_possible(dstInfo, this->getInfo())) {
+ return kInvalidConversion;
+ }
+
+ std::unique_ptr<SkColorSpaceXform> colorXform = nullptr;
+ if (needs_color_xform(dstInfo, this->getInfo())) {
+ colorXform = SkColorSpaceXform::New(this->getInfo().colorSpace(), dstInfo.colorSpace());
+ SkASSERT(colorXform);
+ }
+
+ WebPDecoderConfig config;
+ if (0 == WebPInitDecoderConfig(&config)) {
+ // ABI mismatch.
+ // FIXME: New enum for this?
+ return kInvalidInput;
+ }
+
+ // Free any memory associated with the buffer. Must be called last, so we declare it first.
+ SkAutoTCallVProc<WebPDecBuffer, WebPFreeDecBuffer> autoFree(&(config.output));
+
+ SkIRect bounds = SkIRect::MakeSize(this->getInfo().dimensions());
+ if (options.fSubset) {
+ // Caller is requesting a subset.
+ if (!bounds.contains(*options.fSubset)) {
+ // The subset is out of bounds.
+ return kInvalidParameters;
+ }
+
+ bounds = *options.fSubset;
+
+ // This is tricky. libwebp snaps the top and left to even values. We could let libwebp
+ // do the snap, and return a subset which is a different one than requested. The problem
+ // with that approach is that the caller may try to stitch subsets together, and if we
+ // returned different subsets than requested, there would be artifacts at the boundaries.
+ // Instead, we report that we cannot support odd values for top and left..
+ if (!SkIsAlign2(bounds.fLeft) || !SkIsAlign2(bounds.fTop)) {
+ return kInvalidParameters;
+ }
+
+#ifdef SK_DEBUG
+ {
+ // Make a copy, since getValidSubset can change its input.
+ SkIRect subset(bounds);
+ // That said, getValidSubset should *not* change its input, in this case; otherwise
+ // getValidSubset does not match the actual subsets we can do.
+ SkASSERT(this->getValidSubset(&subset) && subset == bounds);
+ }
+#endif
+
+ config.options.use_cropping = 1;
+ config.options.crop_left = bounds.fLeft;
+ config.options.crop_top = bounds.fTop;
+ config.options.crop_width = bounds.width();
+ config.options.crop_height = bounds.height();
+ }
+
+ SkISize dstDimensions = dstInfo.dimensions();
+ if (bounds.size() != dstDimensions) {
+ // Caller is requesting scaling.
+ config.options.use_scaling = 1;
+ config.options.scaled_width = dstDimensions.width();
+ config.options.scaled_height = dstDimensions.height();
+ }
+
+ // Swizzling between RGBA and BGRA is zero cost in a color transform. So when we have a
+ // color transform, we should decode to whatever is easiest for libwebp, and then let the
+ // color transform swizzle if necessary.
+ // Lossy webp is encoded as YUV (so RGBA and BGRA are the same cost). Lossless webp is
+ // encoded as BGRA. This means decoding to BGRA is either faster or the same cost as RGBA.
+ config.output.colorspace = colorXform ? MODE_BGRA :
+ webp_decode_mode(dstInfo.colorType(), dstInfo.alphaType() == kPremul_SkAlphaType);
+ config.output.is_external_memory = 1;
+
+ // We will decode the entire image and then perform the color transform. libwebp
+ // does not provide a row-by-row API. This is a shame particularly in the F16 case,
+ // where we need to allocate an extra image-sized buffer.
+ SkAutoTMalloc<uint32_t> pixels;
+ if (kRGBA_F16_SkColorType == dstInfo.colorType()) {
+ pixels.reset(dstDimensions.width() * dstDimensions.height());
+ config.output.u.RGBA.rgba = (uint8_t*) pixels.get();
+ config.output.u.RGBA.stride = (int) dstDimensions.width() * sizeof(uint32_t);
+ config.output.u.RGBA.size = config.output.u.RGBA.stride * dstDimensions.height();
+ } else {
+ config.output.u.RGBA.rgba = (uint8_t*) dst;
+ config.output.u.RGBA.stride = (int) rowBytes;
+ config.output.u.RGBA.size = dstInfo.getSafeSize(rowBytes);
+ }
+
+ WebPIterator frame;
+ SkAutoTCallVProc<WebPIterator, WebPDemuxReleaseIterator> autoFrame(&frame);
+ // If this succeeded in NewFromStream(), it should succeed again here.
+ SkAssertResult(WebPDemuxGetFrame(fDemux, 1, &frame));
+
+ SkAutoTCallVProc<WebPIDecoder, WebPIDelete> idec(WebPIDecode(nullptr, 0, &config));
+ if (!idec) {
+ return kInvalidInput;
+ }
+
+ int rowsDecoded;
+ SkCodec::Result result;
+ switch (WebPIUpdate(idec, frame.fragment.bytes, frame.fragment.size)) {
+ case VP8_STATUS_OK:
+ rowsDecoded = dstInfo.height();
+ result = kSuccess;
+ break;
+ case VP8_STATUS_SUSPENDED:
+ WebPIDecGetRGB(idec, rowsDecodedPtr, nullptr, nullptr, nullptr);
+ rowsDecoded = *rowsDecodedPtr;
+ result = kIncompleteInput;
+ break;
+ default:
+ return kInvalidInput;
+ }
+
+ if (colorXform) {
+ SkColorSpaceXform::ColorFormat dstColorFormat = select_xform_format(dstInfo.colorType());
+ SkAlphaType xformAlphaType = select_xform_alpha(dstInfo.alphaType(),
+ this->getInfo().alphaType());
+
+ uint32_t* src = (uint32_t*) config.output.u.RGBA.rgba;
+ size_t srcRowBytes = config.output.u.RGBA.stride;
+ for (int y = 0; y < rowsDecoded; y++) {
+ colorXform->apply(dst, src, dstInfo.width(), dstColorFormat,
+ SkColorSpaceXform::kBGRA_8888_ColorFormat, xformAlphaType);
+ dst = SkTAddOffset<void>(dst, rowBytes);
+ src = SkTAddOffset<uint32_t>(src, srcRowBytes);
+ }
+ }
+
+ return result;
+}
+
+SkWebpCodec::SkWebpCodec(int width, int height, const SkEncodedInfo& info,
+ sk_sp<SkColorSpace> colorSpace, SkStream* stream, WebPDemuxer* demux,
+ sk_sp<SkData> data)
+ : INHERITED(width, height, info, stream, std::move(colorSpace))
+ , fDemux(demux)
+ , fData(std::move(data))
+{}
diff --git a/gfx/skia/skia/src/codec/SkWebpCodec.h b/gfx/skia/skia/src/codec/SkWebpCodec.h
new file mode 100644
index 000000000..b9c493f20
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkWebpCodec.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkWebpCodec_DEFINED
+#define SkWebpCodec_DEFINED
+
+#include "SkCodec.h"
+#include "SkColorSpace.h"
+#include "SkEncodedFormat.h"
+#include "SkImageInfo.h"
+#include "SkTypes.h"
+
+class SkStream;
+extern "C" {
+ struct WebPDemuxer;
+ void WebPDemuxDelete(WebPDemuxer* dmux);
+}
+
+static const size_t WEBP_VP8_HEADER_SIZE = 30;
+
+class SkWebpCodec final : public SkCodec {
+public:
+ // Assumes IsWebp was called and returned true.
+ static SkCodec* NewFromStream(SkStream*);
+ static bool IsWebp(const void*, size_t);
+protected:
+ Result onGetPixels(const SkImageInfo&, void*, size_t, const Options&, SkPMColor*, int*, int*)
+ override;
+ SkEncodedFormat onGetEncodedFormat() const override { return kWEBP_SkEncodedFormat; }
+
+ SkISize onGetScaledDimensions(float desiredScale) const override;
+
+ bool onDimensionsSupported(const SkISize&) override;
+
+ bool onGetValidSubset(SkIRect* /* desiredSubset */) const override;
+private:
+ SkWebpCodec(int width, int height, const SkEncodedInfo&, sk_sp<SkColorSpace>, SkStream*,
+ WebPDemuxer*, sk_sp<SkData>);
+
+ SkAutoTCallVProc<WebPDemuxer, WebPDemuxDelete> fDemux;
+
+ // fDemux has a pointer into this data.
+ // This should not be freed until the decode is completed.
+ sk_sp<SkData> fData;
+
+ typedef SkCodec INHERITED;
+};
+#endif // SkWebpCodec_DEFINED
diff --git a/gfx/skia/skia/src/core/Sk4px.h b/gfx/skia/skia/src/core/Sk4px.h
new file mode 100644
index 000000000..05378a8bc
--- /dev/null
+++ b/gfx/skia/skia/src/core/Sk4px.h
@@ -0,0 +1,241 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Sk4px_DEFINED
+#define Sk4px_DEFINED
+
+#include "SkNx.h"
+#include "SkColor.h"
+#include "SkColorPriv.h"
+
+// This file may be included multiple times by .cpp files with different flags, leading
+// to different definitions. Usually that doesn't matter because it's all inlined, but
+// in Debug modes the compilers may not inline everything. So wrap everything in an
+// anonymous namespace to give each includer their own silo of this code (or the linker
+// will probably pick one randomly for us, which is rarely correct).
+namespace {
+
+// 1, 2 or 4 SkPMColors, generally vectorized.
+class Sk4px : public Sk16b {
+public:
+ static Sk4px DupAlpha(SkAlpha a) { return Sk16b(a); } // a -> aaaa aaaa aaaa aaaa
+ static Sk4px DupPMColor(SkPMColor c); // argb -> argb argb argb argb
+
+ Sk4px(const Sk16b& v) : INHERITED(v) {}
+
+ Sk4px alphas() const; // ARGB argb XYZW xyzw -> AAAA aaaa XXXX xxxx
+
+ // Mask away color or alpha lanes.
+ Sk4px zeroColors() const; // ARGB argb XYZW xyzw -> A000 a000 X000 x000
+ Sk4px zeroAlphas() const; // ARGB argb XYZW xyzw -> 0RGB 0rgb 0YZW 0yzw
+
+ Sk4px inv() const { return Sk16b(255) - *this; }
+
+ // When loading or storing fewer than 4 SkPMColors, we use the low lanes.
+ static Sk4px Load4(const SkPMColor[4]); // PMColor[4] -> ARGB argb XYZW xyzw
+ static Sk4px Load2(const SkPMColor[2]); // PMColor[2] -> ARGB argb ???? ????
+ static Sk4px Load1(const SkPMColor[1]); // PMColor[1] -> ARGB ???? ???? ????
+
+ // Ditto for Alphas... Load2Alphas fills the low two lanes of Sk4px.
+ static Sk4px Load4Alphas(const SkAlpha[4]); // AaXx -> AAAA aaaa XXXX xxxx
+ static Sk4px Load2Alphas(const SkAlpha[2]); // Aa -> AAAA aaaa ???? ????
+
+ void store4(SkPMColor[4]) const;
+ void store2(SkPMColor[2]) const;
+ void store1(SkPMColor[1]) const;
+
+ // 1, 2, or 4 SkPMColors with 16-bit components.
+ // This is most useful as the result of a multiply, e.g. from mulWiden().
+ class Wide : public Sk16h {
+ public:
+ Wide(const Sk16h& v) : Sk16h(v) {}
+
+ // Pack the top byte of each component back down into 4 SkPMColors.
+ Sk4px addNarrowHi(const Sk16h&) const;
+
+ // Rounds, i.e. (x+127) / 255.
+ Sk4px div255() const;
+
+ // These just keep the types as Wide so the user doesn't have to keep casting.
+ Wide operator * (const Wide& o) const { return INHERITED::operator*(o); }
+ Wide operator + (const Wide& o) const { return INHERITED::operator+(o); }
+ Wide operator - (const Wide& o) const { return INHERITED::operator-(o); }
+ Wide operator >> (int bits) const { return INHERITED::operator>>(bits); }
+ Wide operator << (int bits) const { return INHERITED::operator<<(bits); }
+ static Wide Min(const Wide& a, const Wide& b) { return INHERITED::Min(a,b); }
+ Wide thenElse(const Wide& t, const Wide& e) const { return INHERITED::thenElse(t,e); }
+
+ private:
+ typedef Sk16h INHERITED;
+ };
+
+ Wide widenLo() const; // ARGB -> 0A 0R 0G 0B
+ Wide widenHi() const; // ARGB -> A0 R0 G0 B0
+ Wide widenLoHi() const; // ARGB -> AA RR GG BB
+ Wide mulWiden(const Sk16b&) const; // 8-bit x 8-bit -> 16-bit components.
+
+ // The only 8-bit multiply we use is 8-bit x 8-bit -> 16-bit. Might as well make it pithy.
+ Wide operator * (const Sk4px& o) const { return this->mulWiden(o); }
+
+ // These just keep the types as Sk4px so the user doesn't have to keep casting.
+ Sk4px operator + (const Sk4px& o) const { return INHERITED::operator+(o); }
+ Sk4px operator - (const Sk4px& o) const { return INHERITED::operator-(o); }
+ Sk4px operator < (const Sk4px& o) const { return INHERITED::operator<(o); }
+ Sk4px thenElse(const Sk4px& t, const Sk4px& e) const { return INHERITED::thenElse(t,e); }
+
+ // Generally faster than (*this * o).div255().
+ // May be incorrect by +-1, but is always exactly correct when *this or o is 0 or 255.
+ Sk4px approxMulDiv255(const Sk16b& o) const {
+ // (x*y + x) / 256 meets these criteria. (As of course does (x*y + y) / 256 by symmetry.)
+ // FYI: (x*y + 255) / 256 also meets these criteria. In my brief testing, it was slower.
+ return this->widenLo().addNarrowHi(*this * o);
+ }
+
+ // A generic driver that maps fn over a src array into a dst array.
+ // fn should take an Sk4px (4 src pixels) and return an Sk4px (4 dst pixels).
+ template <typename Fn>
+ static void MapSrc(int n, SkPMColor* dst, const SkPMColor* src, const Fn& fn) {
+ SkASSERT(dst);
+ SkASSERT(src);
+ // This looks a bit odd, but it helps loop-invariant hoisting across different calls to fn.
+ // Basically, we need to make sure we keep things inside a single loop.
+ while (n > 0) {
+ if (n >= 8) {
+ Sk4px dst0 = fn(Load4(src+0)),
+ dst4 = fn(Load4(src+4));
+ dst0.store4(dst+0);
+ dst4.store4(dst+4);
+ dst += 8; src += 8; n -= 8;
+ continue; // Keep our stride at 8 pixels as long as possible.
+ }
+ SkASSERT(n <= 7);
+ if (n >= 4) {
+ fn(Load4(src)).store4(dst);
+ dst += 4; src += 4; n -= 4;
+ }
+ if (n >= 2) {
+ fn(Load2(src)).store2(dst);
+ dst += 2; src += 2; n -= 2;
+ }
+ if (n >= 1) {
+ fn(Load1(src)).store1(dst);
+ }
+ break;
+ }
+ }
+
+ // As above, but with dst4' = fn(dst4, src4).
+ template <typename Fn>
+ static void MapDstSrc(int n, SkPMColor* dst, const SkPMColor* src, const Fn& fn) {
+ SkASSERT(dst);
+ SkASSERT(src);
+ while (n > 0) {
+ if (n >= 8) {
+ Sk4px dst0 = fn(Load4(dst+0), Load4(src+0)),
+ dst4 = fn(Load4(dst+4), Load4(src+4));
+ dst0.store4(dst+0);
+ dst4.store4(dst+4);
+ dst += 8; src += 8; n -= 8;
+ continue; // Keep our stride at 8 pixels as long as possible.
+ }
+ SkASSERT(n <= 7);
+ if (n >= 4) {
+ fn(Load4(dst), Load4(src)).store4(dst);
+ dst += 4; src += 4; n -= 4;
+ }
+ if (n >= 2) {
+ fn(Load2(dst), Load2(src)).store2(dst);
+ dst += 2; src += 2; n -= 2;
+ }
+ if (n >= 1) {
+ fn(Load1(dst), Load1(src)).store1(dst);
+ }
+ break;
+ }
+ }
+
+ // As above, but with dst4' = fn(dst4, alpha4).
+ template <typename Fn>
+ static void MapDstAlpha(int n, SkPMColor* dst, const SkAlpha* a, const Fn& fn) {
+ SkASSERT(dst);
+ SkASSERT(a);
+ while (n > 0) {
+ if (n >= 8) {
+ Sk4px dst0 = fn(Load4(dst+0), Load4Alphas(a+0)),
+ dst4 = fn(Load4(dst+4), Load4Alphas(a+4));
+ dst0.store4(dst+0);
+ dst4.store4(dst+4);
+ dst += 8; a += 8; n -= 8;
+ continue; // Keep our stride at 8 pixels as long as possible.
+ }
+ SkASSERT(n <= 7);
+ if (n >= 4) {
+ fn(Load4(dst), Load4Alphas(a)).store4(dst);
+ dst += 4; a += 4; n -= 4;
+ }
+ if (n >= 2) {
+ fn(Load2(dst), Load2Alphas(a)).store2(dst);
+ dst += 2; a += 2; n -= 2;
+ }
+ if (n >= 1) {
+ fn(Load1(dst), DupAlpha(*a)).store1(dst);
+ }
+ break;
+ }
+ }
+
+ // As above, but with dst4' = fn(dst4, src4, alpha4).
+ template <typename Fn>
+ static void MapDstSrcAlpha(int n, SkPMColor* dst, const SkPMColor* src, const SkAlpha* a,
+ const Fn& fn) {
+ SkASSERT(dst);
+ SkASSERT(src);
+ SkASSERT(a);
+ while (n > 0) {
+ if (n >= 8) {
+ Sk4px dst0 = fn(Load4(dst+0), Load4(src+0), Load4Alphas(a+0)),
+ dst4 = fn(Load4(dst+4), Load4(src+4), Load4Alphas(a+4));
+ dst0.store4(dst+0);
+ dst4.store4(dst+4);
+ dst += 8; src += 8; a += 8; n -= 8;
+ continue; // Keep our stride at 8 pixels as long as possible.
+ }
+ SkASSERT(n <= 7);
+ if (n >= 4) {
+ fn(Load4(dst), Load4(src), Load4Alphas(a)).store4(dst);
+ dst += 4; src += 4; a += 4; n -= 4;
+ }
+ if (n >= 2) {
+ fn(Load2(dst), Load2(src), Load2Alphas(a)).store2(dst);
+ dst += 2; src += 2; a += 2; n -= 2;
+ }
+ if (n >= 1) {
+ fn(Load1(dst), Load1(src), DupAlpha(*a)).store1(dst);
+ }
+ break;
+ }
+ }
+
+private:
+ typedef Sk16b INHERITED;
+};
+
+} // namespace
+
+#ifdef SKNX_NO_SIMD
+ #include "../opts/Sk4px_none.h"
+#else
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include "../opts/Sk4px_SSE2.h"
+ #elif defined(SK_ARM_HAS_NEON)
+ #include "../opts/Sk4px_NEON.h"
+ #else
+ #include "../opts/Sk4px_none.h"
+ #endif
+#endif
+
+#endif//Sk4px_DEFINED
diff --git a/gfx/skia/skia/src/core/Sk4x4f.h b/gfx/skia/skia/src/core/Sk4x4f.h
new file mode 100644
index 000000000..9bd91973d
--- /dev/null
+++ b/gfx/skia/skia/src/core/Sk4x4f.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Sk4x4f_DEFINED
+#define Sk4x4f_DEFINED
+
+#include "SkNx.h"
+
+struct Sk4x4f {
+ Sk4f r,g,b,a;
+
+ static Sk4x4f Transpose(const Sk4f&, const Sk4f&, const Sk4f&, const Sk4f&);
+ static Sk4x4f Transpose(const float[16]);
+ static Sk4x4f Transpose(const uint8_t[16]);
+
+ void transpose(Sk4f* x, Sk4f* y, Sk4f* z, Sk4f* w) const {
+ auto t = Transpose(r,g,b,a);
+ *x = t.r;
+ *y = t.g;
+ *z = t.b;
+ *w = t.a;
+ }
+ void transpose( float[16]) const;
+ void transpose(uint8_t[16]) const;
+};
+
+#if 1 && !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+
+inline Sk4x4f Sk4x4f::Transpose(const Sk4f& x, const Sk4f& y, const Sk4f& z, const Sk4f& w) {
+ auto r = x.fVec,
+ g = y.fVec,
+ b = z.fVec,
+ a = w.fVec;
+ _MM_TRANSPOSE4_PS(r,g,b,a);
+ return { r,g,b,a };
+}
+
+inline Sk4x4f Sk4x4f::Transpose(const float fs[16]) {
+ return Transpose(Sk4f::Load(fs+0), Sk4f::Load(fs+4), Sk4f::Load(fs+8), Sk4f::Load(fs+12));
+}
+
+inline Sk4x4f Sk4x4f::Transpose(const uint8_t bs[16]) {
+ auto b16 = _mm_loadu_si128((const __m128i*)bs);
+
+ auto mask = _mm_set1_epi32(0xFF);
+ auto r = _mm_cvtepi32_ps(_mm_and_si128(mask, (b16 ))),
+ g = _mm_cvtepi32_ps(_mm_and_si128(mask, _mm_srli_epi32(b16, 8))),
+ b = _mm_cvtepi32_ps(_mm_and_si128(mask, _mm_srli_epi32(b16, 16))),
+ a = _mm_cvtepi32_ps( _mm_srli_epi32(b16, 24));
+ return { r,g,b,a };
+}
+
+inline void Sk4x4f::transpose(float fs[16]) const {
+ Sk4f x,y,z,w;
+ this->transpose(&x,&y,&z,&w);
+ x.store(fs+ 0);
+ y.store(fs+ 4);
+ z.store(fs+ 8);
+ w.store(fs+12);
+}
+
+inline void Sk4x4f::transpose(uint8_t bs[16]) const {
+ auto R = _mm_cvttps_epi32(r.fVec),
+ G = _mm_slli_epi32(_mm_cvttps_epi32(g.fVec), 8),
+ B = _mm_slli_epi32(_mm_cvttps_epi32(b.fVec), 16),
+ A = _mm_slli_epi32(_mm_cvttps_epi32(a.fVec), 24);
+ _mm_storeu_si128((__m128i*)bs, _mm_or_si128(A, _mm_or_si128(B, _mm_or_si128(G, R))));
+}
+
+#elif defined(SK_ARM_HAS_NEON)
+
+inline Sk4x4f Sk4x4f::Transpose(const Sk4f& x, const Sk4f& y, const Sk4f& z, const Sk4f& w) {
+ float32x4x2_t xy = vuzpq_f32(x.fVec, y.fVec),
+ zw = vuzpq_f32(z.fVec, w.fVec),
+ rb = vuzpq_f32(xy.val[0], zw.val[0]),
+ ga = vuzpq_f32(xy.val[1], zw.val[1]);
+ return { rb.val[0], ga.val[0], rb.val[1], ga.val[1] };
+}
+
+inline Sk4x4f Sk4x4f::Transpose(const float fs[16]) {
+ float32x4x4_t v = vld4q_f32(fs);
+ return { v.val[0], v.val[1], v.val[2], v.val[3] };
+}
+
+inline Sk4x4f Sk4x4f::Transpose(const uint8_t bs[16]) {
+ auto b16 = vreinterpretq_u32_u8(vld1q_u8(bs));
+ auto r = vcvtq_f32_u32(vandq_u32(vdupq_n_u32(0x000000FF), b16) ),
+ g = vcvtq_n_f32_u32(vandq_u32(vdupq_n_u32(0x0000FF00), b16), 8),
+ b = vcvtq_n_f32_u32(vandq_u32(vdupq_n_u32(0x00FF0000), b16), 16),
+ a = vcvtq_n_f32_u32(vandq_u32(vdupq_n_u32(0xFF000000), b16), 24);
+ return { r,g,b,a };
+}
+
+inline void Sk4x4f::transpose(float fs[16]) const {
+ float32x4x4_t v = {{ r.fVec, g.fVec, b.fVec, a.fVec }};
+ vst4q_f32(fs, v);
+}
+
+inline void Sk4x4f::transpose(uint8_t bs[16]) const {
+ auto R = vandq_u32(vdupq_n_u32(0x000000FF), vcvtq_u32_f32(r.fVec )),
+ G = vandq_u32(vdupq_n_u32(0x0000FF00), vcvtq_n_u32_f32(g.fVec, 8)),
+ B = vandq_u32(vdupq_n_u32(0x00FF0000), vcvtq_n_u32_f32(b.fVec, 16)),
+ A = vandq_u32(vdupq_n_u32(0xFF000000), vcvtq_n_u32_f32(a.fVec, 24));
+ vst1q_u8(bs, vreinterpretq_u8_u32(vorrq_u32(A, vorrq_u32(B, vorrq_u32(G, R)))));
+}
+
+#else
+
+inline Sk4x4f Sk4x4f::Transpose(const Sk4f& x, const Sk4f& y, const Sk4f& z, const Sk4f& w) {
+ return {
+ { x[0], y[0], z[0], w[0] },
+ { x[1], y[1], z[1], w[1] },
+ { x[2], y[2], z[2], w[2] },
+ { x[3], y[3], z[3], w[3] },
+ };
+}
+
+inline Sk4x4f Sk4x4f::Transpose(const float fs[16]) {
+ return Transpose(Sk4f::Load(fs+0), Sk4f::Load(fs+4), Sk4f::Load(fs+8), Sk4f::Load(fs+12));
+}
+
+inline Sk4x4f Sk4x4f::Transpose(const uint8_t bs[16]) {
+ return {
+ { (float)bs[0], (float)bs[4], (float)bs[ 8], (float)bs[12] },
+ { (float)bs[1], (float)bs[5], (float)bs[ 9], (float)bs[13] },
+ { (float)bs[2], (float)bs[6], (float)bs[10], (float)bs[14] },
+ { (float)bs[3], (float)bs[7], (float)bs[11], (float)bs[15] },
+ };
+}
+
+inline void Sk4x4f::transpose(float fs[16]) const {
+ Sk4f x,y,z,w;
+ this->transpose(&x,&y,&z,&w);
+ x.store(fs+ 0);
+ y.store(fs+ 4);
+ z.store(fs+ 8);
+ w.store(fs+12);
+}
+
+inline void Sk4x4f::transpose(uint8_t bs[16]) const {
+ bs[ 0] = (uint8_t)r[0]; bs[ 1] = (uint8_t)g[0]; bs[ 2] = (uint8_t)b[0]; bs[ 3] = (uint8_t)a[0];
+ bs[ 4] = (uint8_t)r[1]; bs[ 5] = (uint8_t)g[1]; bs[ 6] = (uint8_t)b[1]; bs[ 7] = (uint8_t)a[1];
+ bs[ 8] = (uint8_t)r[2]; bs[ 9] = (uint8_t)g[2]; bs[10] = (uint8_t)b[2]; bs[11] = (uint8_t)a[2];
+ bs[12] = (uint8_t)r[3]; bs[13] = (uint8_t)g[3]; bs[14] = (uint8_t)b[3]; bs[15] = (uint8_t)a[3];
+}
+
+#endif
+
+#endif//Sk4x4f_DEFINED
diff --git a/gfx/skia/skia/src/core/SkAAClip.cpp b/gfx/skia/skia/src/core/SkAAClip.cpp
new file mode 100644
index 000000000..088ee5584
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAAClip.cpp
@@ -0,0 +1,2220 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAAClip.h"
+#include "SkAtomics.h"
+#include "SkBlitter.h"
+#include "SkColorPriv.h"
+#include "SkPath.h"
+#include "SkScan.h"
+#include "SkUtils.h"
+
+class AutoAAClipValidate {
+public:
+ AutoAAClipValidate(const SkAAClip& clip) : fClip(clip) {
+ fClip.validate();
+ }
+ ~AutoAAClipValidate() {
+ fClip.validate();
+ }
+private:
+ const SkAAClip& fClip;
+};
+
+#ifdef SK_DEBUG
+ #define AUTO_AACLIP_VALIDATE(clip) AutoAAClipValidate acv(clip)
+#else
+ #define AUTO_AACLIP_VALIDATE(clip)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define kMaxInt32 0x7FFFFFFF
+
+#ifdef SK_DEBUG
+static inline bool x_in_rect(int x, const SkIRect& rect) {
+ return (unsigned)(x - rect.fLeft) < (unsigned)rect.width();
+}
+#endif
+
+static inline bool y_in_rect(int y, const SkIRect& rect) {
+ return (unsigned)(y - rect.fTop) < (unsigned)rect.height();
+}
+
+/*
+ * Data runs are packed [count, alpha]
+ */
+
+struct SkAAClip::YOffset {
+ int32_t fY;
+ uint32_t fOffset;
+};
+
+struct SkAAClip::RunHead {
+ int32_t fRefCnt;
+ int32_t fRowCount;
+ size_t fDataSize;
+
+ YOffset* yoffsets() {
+ return (YOffset*)((char*)this + sizeof(RunHead));
+ }
+ const YOffset* yoffsets() const {
+ return (const YOffset*)((const char*)this + sizeof(RunHead));
+ }
+ uint8_t* data() {
+ return (uint8_t*)(this->yoffsets() + fRowCount);
+ }
+ const uint8_t* data() const {
+ return (const uint8_t*)(this->yoffsets() + fRowCount);
+ }
+
+ static RunHead* Alloc(int rowCount, size_t dataSize) {
+ size_t size = sizeof(RunHead) + rowCount * sizeof(YOffset) + dataSize;
+ RunHead* head = (RunHead*)sk_malloc_throw(size);
+ head->fRefCnt = 1;
+ head->fRowCount = rowCount;
+ head->fDataSize = dataSize;
+ return head;
+ }
+
+ static int ComputeRowSizeForWidth(int width) {
+ // 2 bytes per segment, where each segment can store up to 255 for count
+ int segments = 0;
+ while (width > 0) {
+ segments += 1;
+ int n = SkMin32(width, 255);
+ width -= n;
+ }
+ return segments * 2; // each segment is row[0] + row[1] (n + alpha)
+ }
+
+ static RunHead* AllocRect(const SkIRect& bounds) {
+ SkASSERT(!bounds.isEmpty());
+ int width = bounds.width();
+ size_t rowSize = ComputeRowSizeForWidth(width);
+ RunHead* head = RunHead::Alloc(1, rowSize);
+ YOffset* yoff = head->yoffsets();
+ yoff->fY = bounds.height() - 1;
+ yoff->fOffset = 0;
+ uint8_t* row = head->data();
+ while (width > 0) {
+ int n = SkMin32(width, 255);
+ row[0] = n;
+ row[1] = 0xFF;
+ width -= n;
+ row += 2;
+ }
+ return head;
+ }
+};
+
+class SkAAClip::Iter {
+public:
+ Iter(const SkAAClip&);
+
+ bool done() const { return fDone; }
+ int top() const { return fTop; }
+ int bottom() const { return fBottom; }
+ const uint8_t* data() const { return fData; }
+ void next();
+
+private:
+ const YOffset* fCurrYOff;
+ const YOffset* fStopYOff;
+ const uint8_t* fData;
+
+ int fTop, fBottom;
+ bool fDone;
+};
+
+SkAAClip::Iter::Iter(const SkAAClip& clip) {
+ if (clip.isEmpty()) {
+ fDone = true;
+ fTop = fBottom = clip.fBounds.fBottom;
+ fData = nullptr;
+ fCurrYOff = nullptr;
+ fStopYOff = nullptr;
+ return;
+ }
+
+ const RunHead* head = clip.fRunHead;
+ fCurrYOff = head->yoffsets();
+ fStopYOff = fCurrYOff + head->fRowCount;
+ fData = head->data() + fCurrYOff->fOffset;
+
+ // setup first value
+ fTop = clip.fBounds.fTop;
+ fBottom = clip.fBounds.fTop + fCurrYOff->fY + 1;
+ fDone = false;
+}
+
+void SkAAClip::Iter::next() {
+ if (!fDone) {
+ const YOffset* prev = fCurrYOff;
+ const YOffset* curr = prev + 1;
+ SkASSERT(curr <= fStopYOff);
+
+ fTop = fBottom;
+ if (curr >= fStopYOff) {
+ fDone = true;
+ fBottom = kMaxInt32;
+ fData = nullptr;
+ } else {
+ fBottom += curr->fY - prev->fY;
+ fData += curr->fOffset - prev->fOffset;
+ fCurrYOff = curr;
+ }
+ }
+}
+
+#ifdef SK_DEBUG
+// assert we're exactly width-wide, and then return the number of bytes used
+static size_t compute_row_length(const uint8_t row[], int width) {
+ const uint8_t* origRow = row;
+ while (width > 0) {
+ int n = row[0];
+ SkASSERT(n > 0);
+ SkASSERT(n <= width);
+ row += 2;
+ width -= n;
+ }
+ SkASSERT(0 == width);
+ return row - origRow;
+}
+
+void SkAAClip::validate() const {
+ if (nullptr == fRunHead) {
+ SkASSERT(fBounds.isEmpty());
+ return;
+ }
+ SkASSERT(!fBounds.isEmpty());
+
+ const RunHead* head = fRunHead;
+ SkASSERT(head->fRefCnt > 0);
+ SkASSERT(head->fRowCount > 0);
+
+ const YOffset* yoff = head->yoffsets();
+ const YOffset* ystop = yoff + head->fRowCount;
+ const int lastY = fBounds.height() - 1;
+
+ // Y and offset must be monotonic
+ int prevY = -1;
+ int32_t prevOffset = -1;
+ while (yoff < ystop) {
+ SkASSERT(prevY < yoff->fY);
+ SkASSERT(yoff->fY <= lastY);
+ prevY = yoff->fY;
+ SkASSERT(prevOffset < (int32_t)yoff->fOffset);
+ prevOffset = yoff->fOffset;
+ const uint8_t* row = head->data() + yoff->fOffset;
+ size_t rowLength = compute_row_length(row, fBounds.width());
+ SkASSERT(yoff->fOffset + rowLength <= head->fDataSize);
+ yoff += 1;
+ }
+ // check the last entry;
+ --yoff;
+ SkASSERT(yoff->fY == lastY);
+}
+
+static void dump_one_row(const uint8_t* SK_RESTRICT row,
+ int width, int leading_num) {
+ if (leading_num) {
+ SkDebugf( "%03d ", leading_num );
+ }
+ while (width > 0) {
+ int n = row[0];
+ int val = row[1];
+ char out = '.';
+ if (val == 0xff) {
+ out = '*';
+ } else if (val > 0) {
+ out = '+';
+ }
+ for (int i = 0 ; i < n ; i++) {
+ SkDebugf( "%c", out );
+ }
+ row += 2;
+ width -= n;
+ }
+ SkDebugf( "\n" );
+}
+
+void SkAAClip::debug(bool compress_y) const {
+ Iter iter(*this);
+ const int width = fBounds.width();
+
+ int y = fBounds.fTop;
+ while (!iter.done()) {
+ if (compress_y) {
+ dump_one_row(iter.data(), width, iter.bottom() - iter.top() + 1);
+ } else {
+ do {
+ dump_one_row(iter.data(), width, 0);
+ } while (++y < iter.bottom());
+ }
+ iter.next();
+ }
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Count the number of zeros on the left and right edges of the passed in
+// RLE row. If 'row' is all zeros return 'width' in both variables.
+static void count_left_right_zeros(const uint8_t* row, int width,
+ int* leftZ, int* riteZ) {
+ int zeros = 0;
+ do {
+ if (row[1]) {
+ break;
+ }
+ int n = row[0];
+ SkASSERT(n > 0);
+ SkASSERT(n <= width);
+ zeros += n;
+ row += 2;
+ width -= n;
+ } while (width > 0);
+ *leftZ = zeros;
+
+ if (0 == width) {
+ // this line is completely empty return 'width' in both variables
+ *riteZ = *leftZ;
+ return;
+ }
+
+ zeros = 0;
+ while (width > 0) {
+ int n = row[0];
+ SkASSERT(n > 0);
+ if (0 == row[1]) {
+ zeros += n;
+ } else {
+ zeros = 0;
+ }
+ row += 2;
+ width -= n;
+ }
+ *riteZ = zeros;
+}
+
+#ifdef SK_DEBUG
+static void test_count_left_right_zeros() {
+ static bool gOnce;
+ if (gOnce) {
+ return;
+ }
+ gOnce = true;
+
+ const uint8_t data0[] = { 0, 0, 10, 0xFF };
+ const uint8_t data1[] = { 0, 0, 5, 0xFF, 2, 0, 3, 0xFF };
+ const uint8_t data2[] = { 7, 0, 5, 0, 2, 0, 3, 0xFF };
+ const uint8_t data3[] = { 0, 5, 5, 0xFF, 2, 0, 3, 0 };
+ const uint8_t data4[] = { 2, 3, 2, 0, 5, 0xFF, 3, 0 };
+ const uint8_t data5[] = { 10, 10, 10, 0 };
+ const uint8_t data6[] = { 2, 2, 2, 0, 2, 0xFF, 2, 0, 2, 0xFF, 2, 0 };
+
+ const uint8_t* array[] = {
+ data0, data1, data2, data3, data4, data5, data6
+ };
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(array); ++i) {
+ const uint8_t* data = array[i];
+ const int expectedL = *data++;
+ const int expectedR = *data++;
+ int L = 12345, R = 12345;
+ count_left_right_zeros(data, 10, &L, &R);
+ SkASSERT(expectedL == L);
+ SkASSERT(expectedR == R);
+ }
+}
+#endif
+
+// modify row in place, trimming off (zeros) from the left and right sides.
+// return the number of bytes that were completely eliminated from the left
+static int trim_row_left_right(uint8_t* row, int width, int leftZ, int riteZ) {
+ int trim = 0;
+ while (leftZ > 0) {
+ SkASSERT(0 == row[1]);
+ int n = row[0];
+ SkASSERT(n > 0);
+ SkASSERT(n <= width);
+ width -= n;
+ row += 2;
+ if (n > leftZ) {
+ row[-2] = n - leftZ;
+ break;
+ }
+ trim += 2;
+ leftZ -= n;
+ SkASSERT(leftZ >= 0);
+ }
+
+ if (riteZ) {
+ // walk row to the end, and then we'll back up to trim riteZ
+ while (width > 0) {
+ int n = row[0];
+ SkASSERT(n <= width);
+ width -= n;
+ row += 2;
+ }
+ // now skip whole runs of zeros
+ do {
+ row -= 2;
+ SkASSERT(0 == row[1]);
+ int n = row[0];
+ SkASSERT(n > 0);
+ if (n > riteZ) {
+ row[0] = n - riteZ;
+ break;
+ }
+ riteZ -= n;
+ SkASSERT(riteZ >= 0);
+ } while (riteZ > 0);
+ }
+
+ return trim;
+}
+
+#ifdef SK_DEBUG
+// assert that this row is exactly this width
+static void assert_row_width(const uint8_t* row, int width) {
+ while (width > 0) {
+ int n = row[0];
+ SkASSERT(n > 0);
+ SkASSERT(n <= width);
+ width -= n;
+ row += 2;
+ }
+ SkASSERT(0 == width);
+}
+
+static void test_trim_row_left_right() {
+ static bool gOnce;
+ if (gOnce) {
+ return;
+ }
+ gOnce = true;
+
+ uint8_t data0[] = { 0, 0, 0, 10, 10, 0xFF };
+ uint8_t data1[] = { 2, 0, 0, 10, 5, 0, 2, 0, 3, 0xFF };
+ uint8_t data2[] = { 5, 0, 2, 10, 5, 0, 2, 0, 3, 0xFF };
+ uint8_t data3[] = { 6, 0, 2, 10, 5, 0, 2, 0, 3, 0xFF };
+ uint8_t data4[] = { 0, 0, 0, 10, 2, 0, 2, 0xFF, 2, 0, 2, 0xFF, 2, 0 };
+ uint8_t data5[] = { 1, 0, 0, 10, 2, 0, 2, 0xFF, 2, 0, 2, 0xFF, 2, 0 };
+ uint8_t data6[] = { 0, 1, 0, 10, 2, 0, 2, 0xFF, 2, 0, 2, 0xFF, 2, 0 };
+ uint8_t data7[] = { 1, 1, 0, 10, 2, 0, 2, 0xFF, 2, 0, 2, 0xFF, 2, 0 };
+ uint8_t data8[] = { 2, 2, 2, 10, 2, 0, 2, 0xFF, 2, 0, 2, 0xFF, 2, 0 };
+ uint8_t data9[] = { 5, 2, 4, 10, 2, 0, 2, 0, 2, 0, 2, 0xFF, 2, 0 };
+ uint8_t data10[] ={ 74, 0, 4, 150, 9, 0, 65, 0, 76, 0xFF };
+
+ uint8_t* array[] = {
+ data0, data1, data2, data3, data4,
+ data5, data6, data7, data8, data9,
+ data10
+ };
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(array); ++i) {
+ uint8_t* data = array[i];
+ const int trimL = *data++;
+ const int trimR = *data++;
+ const int expectedSkip = *data++;
+ const int origWidth = *data++;
+ assert_row_width(data, origWidth);
+ int skip = trim_row_left_right(data, origWidth, trimL, trimR);
+ SkASSERT(expectedSkip == skip);
+ int expectedWidth = origWidth - trimL - trimR;
+ assert_row_width(data + skip, expectedWidth);
+ }
+}
+#endif
+
+bool SkAAClip::trimLeftRight() {
+ SkDEBUGCODE(test_trim_row_left_right();)
+
+ if (this->isEmpty()) {
+ return false;
+ }
+
+ AUTO_AACLIP_VALIDATE(*this);
+
+ const int width = fBounds.width();
+ RunHead* head = fRunHead;
+ YOffset* yoff = head->yoffsets();
+ YOffset* stop = yoff + head->fRowCount;
+ uint8_t* base = head->data();
+
+ // After this loop, 'leftZeros' & 'rightZeros' will contain the minimum
+ // number of zeros on the left and right of the clip. This information
+ // can be used to shrink the bounding box.
+ int leftZeros = width;
+ int riteZeros = width;
+ while (yoff < stop) {
+ int L, R;
+ count_left_right_zeros(base + yoff->fOffset, width, &L, &R);
+ SkASSERT(L + R < width || (L == width && R == width));
+ if (L < leftZeros) {
+ leftZeros = L;
+ }
+ if (R < riteZeros) {
+ riteZeros = R;
+ }
+ if (0 == (leftZeros | riteZeros)) {
+ // no trimming to do
+ return true;
+ }
+ yoff += 1;
+ }
+
+ SkASSERT(leftZeros || riteZeros);
+ if (width == leftZeros) {
+ SkASSERT(width == riteZeros);
+ return this->setEmpty();
+ }
+
+ this->validate();
+
+ fBounds.fLeft += leftZeros;
+ fBounds.fRight -= riteZeros;
+ SkASSERT(!fBounds.isEmpty());
+
+ // For now we don't realloc the storage (for time), we just shrink in place
+ // This means we don't have to do any memmoves either, since we can just
+ // play tricks with the yoff->fOffset for each row
+ yoff = head->yoffsets();
+ while (yoff < stop) {
+ uint8_t* row = base + yoff->fOffset;
+ SkDEBUGCODE((void)compute_row_length(row, width);)
+ yoff->fOffset += trim_row_left_right(row, width, leftZeros, riteZeros);
+ SkDEBUGCODE((void)compute_row_length(base + yoff->fOffset, width - leftZeros - riteZeros);)
+ yoff += 1;
+ }
+ return true;
+}
+
+static bool row_is_all_zeros(const uint8_t* row, int width) {
+ SkASSERT(width > 0);
+ do {
+ if (row[1]) {
+ return false;
+ }
+ int n = row[0];
+ SkASSERT(n <= width);
+ width -= n;
+ row += 2;
+ } while (width > 0);
+ SkASSERT(0 == width);
+ return true;
+}
+
+bool SkAAClip::trimTopBottom() {
+ if (this->isEmpty()) {
+ return false;
+ }
+
+ this->validate();
+
+ const int width = fBounds.width();
+ RunHead* head = fRunHead;
+ YOffset* yoff = head->yoffsets();
+ YOffset* stop = yoff + head->fRowCount;
+ const uint8_t* base = head->data();
+
+ // Look to trim away empty rows from the top.
+ //
+ int skip = 0;
+ while (yoff < stop) {
+ const uint8_t* data = base + yoff->fOffset;
+ if (!row_is_all_zeros(data, width)) {
+ break;
+ }
+ skip += 1;
+ yoff += 1;
+ }
+ SkASSERT(skip <= head->fRowCount);
+ if (skip == head->fRowCount) {
+ return this->setEmpty();
+ }
+ if (skip > 0) {
+ // adjust fRowCount and fBounds.fTop, and slide all the data up
+ // as we remove [skip] number of YOffset entries
+ yoff = head->yoffsets();
+ int dy = yoff[skip - 1].fY + 1;
+ for (int i = skip; i < head->fRowCount; ++i) {
+ SkASSERT(yoff[i].fY >= dy);
+ yoff[i].fY -= dy;
+ }
+ YOffset* dst = head->yoffsets();
+ size_t size = head->fRowCount * sizeof(YOffset) + head->fDataSize;
+ memmove(dst, dst + skip, size - skip * sizeof(YOffset));
+
+ fBounds.fTop += dy;
+ SkASSERT(!fBounds.isEmpty());
+ head->fRowCount -= skip;
+ SkASSERT(head->fRowCount > 0);
+
+ this->validate();
+ // need to reset this after the memmove
+ base = head->data();
+ }
+
+ // Look to trim away empty rows from the bottom.
+ // We know that we have at least one non-zero row, so we can just walk
+ // backwards without checking for running past the start.
+ //
+ stop = yoff = head->yoffsets() + head->fRowCount;
+ do {
+ yoff -= 1;
+ } while (row_is_all_zeros(base + yoff->fOffset, width));
+ skip = SkToInt(stop - yoff - 1);
+ SkASSERT(skip >= 0 && skip < head->fRowCount);
+ if (skip > 0) {
+ // removing from the bottom is easier than from the top, as we don't
+ // have to adjust any of the Y values, we just have to trim the array
+ memmove(stop - skip, stop, head->fDataSize);
+
+ fBounds.fBottom = fBounds.fTop + yoff->fY + 1;
+ SkASSERT(!fBounds.isEmpty());
+ head->fRowCount -= skip;
+ SkASSERT(head->fRowCount > 0);
+ }
+ this->validate();
+
+ return true;
+}
+
+// can't validate before we're done, since trimming is part of the process of
+// making us valid after the Builder. Since we build from top to bottom, its
+// possible our fBounds.fBottom is bigger than our last scanline of data, so
+// we trim fBounds.fBottom back up.
+//
+// TODO: check for duplicates in X and Y to further compress our data
+//
+bool SkAAClip::trimBounds() {
+ if (this->isEmpty()) {
+ return false;
+ }
+
+ const RunHead* head = fRunHead;
+ const YOffset* yoff = head->yoffsets();
+
+ SkASSERT(head->fRowCount > 0);
+ const YOffset& lastY = yoff[head->fRowCount - 1];
+ SkASSERT(lastY.fY + 1 <= fBounds.height());
+ fBounds.fBottom = fBounds.fTop + lastY.fY + 1;
+ SkASSERT(lastY.fY + 1 == fBounds.height());
+ SkASSERT(!fBounds.isEmpty());
+
+ return this->trimTopBottom() && this->trimLeftRight();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkAAClip::freeRuns() {
+ if (fRunHead) {
+ SkASSERT(fRunHead->fRefCnt >= 1);
+ if (1 == sk_atomic_dec(&fRunHead->fRefCnt)) {
+ sk_free(fRunHead);
+ }
+ }
+}
+
+SkAAClip::SkAAClip() {
+ fBounds.setEmpty();
+ fRunHead = nullptr;
+}
+
+SkAAClip::SkAAClip(const SkAAClip& src) {
+ SkDEBUGCODE(fBounds.setEmpty();) // need this for validate
+ fRunHead = nullptr;
+ *this = src;
+}
+
+SkAAClip::~SkAAClip() {
+ this->freeRuns();
+}
+
+SkAAClip& SkAAClip::operator=(const SkAAClip& src) {
+ AUTO_AACLIP_VALIDATE(*this);
+ src.validate();
+
+ if (this != &src) {
+ this->freeRuns();
+ fBounds = src.fBounds;
+ fRunHead = src.fRunHead;
+ if (fRunHead) {
+ sk_atomic_inc(&fRunHead->fRefCnt);
+ }
+ }
+ return *this;
+}
+
+bool operator==(const SkAAClip& a, const SkAAClip& b) {
+ a.validate();
+ b.validate();
+
+ if (&a == &b) {
+ return true;
+ }
+ if (a.fBounds != b.fBounds) {
+ return false;
+ }
+
+ const SkAAClip::RunHead* ah = a.fRunHead;
+ const SkAAClip::RunHead* bh = b.fRunHead;
+
+ // this catches empties and rects being equal
+ if (ah == bh) {
+ return true;
+ }
+
+ // now we insist that both are complex (but different ptrs)
+ if (!a.fRunHead || !b.fRunHead) {
+ return false;
+ }
+
+ return ah->fRowCount == bh->fRowCount &&
+ ah->fDataSize == bh->fDataSize &&
+ !memcmp(ah->data(), bh->data(), ah->fDataSize);
+}
+
+void SkAAClip::swap(SkAAClip& other) {
+ AUTO_AACLIP_VALIDATE(*this);
+ other.validate();
+
+ SkTSwap(fBounds, other.fBounds);
+ SkTSwap(fRunHead, other.fRunHead);
+}
+
+bool SkAAClip::set(const SkAAClip& src) {
+ *this = src;
+ return !this->isEmpty();
+}
+
+bool SkAAClip::setEmpty() {
+ this->freeRuns();
+ fBounds.setEmpty();
+ fRunHead = nullptr;
+ return false;
+}
+
+bool SkAAClip::setRect(const SkIRect& bounds) {
+ if (bounds.isEmpty()) {
+ return this->setEmpty();
+ }
+
+ AUTO_AACLIP_VALIDATE(*this);
+
+#if 0
+ SkRect r;
+ r.set(bounds);
+ SkPath path;
+ path.addRect(r);
+ return this->setPath(path);
+#else
+ this->freeRuns();
+ fBounds = bounds;
+ fRunHead = RunHead::AllocRect(bounds);
+ SkASSERT(!this->isEmpty());
+ return true;
+#endif
+}
+
+bool SkAAClip::isRect() const {
+ if (this->isEmpty()) {
+ return false;
+ }
+
+ const RunHead* head = fRunHead;
+ if (head->fRowCount != 1) {
+ return false;
+ }
+ const YOffset* yoff = head->yoffsets();
+ if (yoff->fY != fBounds.fBottom - 1) {
+ return false;
+ }
+
+ const uint8_t* row = head->data() + yoff->fOffset;
+ int width = fBounds.width();
+ do {
+ if (row[1] != 0xFF) {
+ return false;
+ }
+ int n = row[0];
+ SkASSERT(n <= width);
+ width -= n;
+ row += 2;
+ } while (width > 0);
+ return true;
+}
+
+bool SkAAClip::setRect(const SkRect& r, bool doAA) {
+ if (r.isEmpty()) {
+ return this->setEmpty();
+ }
+
+ AUTO_AACLIP_VALIDATE(*this);
+
+ // TODO: special case this
+
+ SkPath path;
+ path.addRect(r);
+ return this->setPath(path, nullptr, doAA);
+}
+
+static void append_run(SkTDArray<uint8_t>& array, uint8_t value, int count) {
+ SkASSERT(count >= 0);
+ while (count > 0) {
+ int n = count;
+ if (n > 255) {
+ n = 255;
+ }
+ uint8_t* data = array.append(2);
+ data[0] = n;
+ data[1] = value;
+ count -= n;
+ }
+}
+
+bool SkAAClip::setRegion(const SkRegion& rgn) {
+ if (rgn.isEmpty()) {
+ return this->setEmpty();
+ }
+ if (rgn.isRect()) {
+ return this->setRect(rgn.getBounds());
+ }
+
+#if 0
+ SkAAClip clip;
+ SkRegion::Iterator iter(rgn);
+ for (; !iter.done(); iter.next()) {
+ clip.op(iter.rect(), SkRegion::kUnion_Op);
+ }
+ this->swap(clip);
+ return !this->isEmpty();
+#else
+ const SkIRect& bounds = rgn.getBounds();
+ const int offsetX = bounds.fLeft;
+ const int offsetY = bounds.fTop;
+
+ SkTDArray<YOffset> yArray;
+ SkTDArray<uint8_t> xArray;
+
+ yArray.setReserve(SkMin32(bounds.height(), 1024));
+ xArray.setReserve(SkMin32(bounds.width() * 128, 64 * 1024));
+
+ SkRegion::Iterator iter(rgn);
+ int prevRight = 0;
+ int prevBot = 0;
+ YOffset* currY = nullptr;
+
+ for (; !iter.done(); iter.next()) {
+ const SkIRect& r = iter.rect();
+ SkASSERT(bounds.contains(r));
+
+ int bot = r.fBottom - offsetY;
+ SkASSERT(bot >= prevBot);
+ if (bot > prevBot) {
+ if (currY) {
+ // flush current row
+ append_run(xArray, 0, bounds.width() - prevRight);
+ }
+ // did we introduce an empty-gap from the prev row?
+ int top = r.fTop - offsetY;
+ if (top > prevBot) {
+ currY = yArray.append();
+ currY->fY = top - 1;
+ currY->fOffset = xArray.count();
+ append_run(xArray, 0, bounds.width());
+ }
+ // create a new record for this Y value
+ currY = yArray.append();
+ currY->fY = bot - 1;
+ currY->fOffset = xArray.count();
+ prevRight = 0;
+ prevBot = bot;
+ }
+
+ int x = r.fLeft - offsetX;
+ append_run(xArray, 0, x - prevRight);
+
+ int w = r.fRight - r.fLeft;
+ append_run(xArray, 0xFF, w);
+ prevRight = x + w;
+ SkASSERT(prevRight <= bounds.width());
+ }
+ // flush last row
+ append_run(xArray, 0, bounds.width() - prevRight);
+
+ // now pack everything into a RunHead
+ RunHead* head = RunHead::Alloc(yArray.count(), xArray.bytes());
+ memcpy(head->yoffsets(), yArray.begin(), yArray.bytes());
+ memcpy(head->data(), xArray.begin(), xArray.bytes());
+
+ this->setEmpty();
+ fBounds = bounds;
+ fRunHead = head;
+ this->validate();
+ return true;
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+const uint8_t* SkAAClip::findRow(int y, int* lastYForRow) const {
+ SkASSERT(fRunHead);
+
+ if (!y_in_rect(y, fBounds)) {
+ return nullptr;
+ }
+ y -= fBounds.y(); // our yoffs values are relative to the top
+
+ const YOffset* yoff = fRunHead->yoffsets();
+ while (yoff->fY < y) {
+ yoff += 1;
+ SkASSERT(yoff - fRunHead->yoffsets() < fRunHead->fRowCount);
+ }
+
+ if (lastYForRow) {
+ *lastYForRow = fBounds.y() + yoff->fY;
+ }
+ return fRunHead->data() + yoff->fOffset;
+}
+
+const uint8_t* SkAAClip::findX(const uint8_t data[], int x, int* initialCount) const {
+ SkASSERT(x_in_rect(x, fBounds));
+ x -= fBounds.x();
+
+ // first skip up to X
+ for (;;) {
+ int n = data[0];
+ if (x < n) {
+ if (initialCount) {
+ *initialCount = n - x;
+ }
+ break;
+ }
+ data += 2;
+ x -= n;
+ }
+ return data;
+}
+
+bool SkAAClip::quickContains(int left, int top, int right, int bottom) const {
+ if (this->isEmpty()) {
+ return false;
+ }
+ if (!fBounds.contains(left, top, right, bottom)) {
+ return false;
+ }
+#if 0
+ if (this->isRect()) {
+ return true;
+ }
+#endif
+
+ int lastY SK_INIT_TO_AVOID_WARNING;
+ const uint8_t* row = this->findRow(top, &lastY);
+ if (lastY < bottom) {
+ return false;
+ }
+ // now just need to check in X
+ int count;
+ row = this->findX(row, left, &count);
+#if 0
+ return count >= (right - left) && 0xFF == row[1];
+#else
+ int rectWidth = right - left;
+ while (0xFF == row[1]) {
+ if (count >= rectWidth) {
+ return true;
+ }
+ rectWidth -= count;
+ row += 2;
+ count = row[0];
+ }
+ return false;
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkAAClip::Builder {
+ SkIRect fBounds;
+ struct Row {
+ int fY;
+ int fWidth;
+ SkTDArray<uint8_t>* fData;
+ };
+ SkTDArray<Row> fRows;
+ Row* fCurrRow;
+ int fPrevY;
+ int fWidth;
+ int fMinY;
+
+public:
+ Builder(const SkIRect& bounds) : fBounds(bounds) {
+ fPrevY = -1;
+ fWidth = bounds.width();
+ fCurrRow = nullptr;
+ fMinY = bounds.fTop;
+ }
+
+ ~Builder() {
+ Row* row = fRows.begin();
+ Row* stop = fRows.end();
+ while (row < stop) {
+ delete row->fData;
+ row += 1;
+ }
+ }
+
+ const SkIRect& getBounds() const { return fBounds; }
+
+ void addRun(int x, int y, U8CPU alpha, int count) {
+ SkASSERT(count > 0);
+ SkASSERT(fBounds.contains(x, y));
+ SkASSERT(fBounds.contains(x + count - 1, y));
+
+ x -= fBounds.left();
+ y -= fBounds.top();
+
+ Row* row = fCurrRow;
+ if (y != fPrevY) {
+ SkASSERT(y > fPrevY);
+ fPrevY = y;
+ row = this->flushRow(true);
+ row->fY = y;
+ row->fWidth = 0;
+ SkASSERT(row->fData);
+ SkASSERT(0 == row->fData->count());
+ fCurrRow = row;
+ }
+
+ SkASSERT(row->fWidth <= x);
+ SkASSERT(row->fWidth < fBounds.width());
+
+ SkTDArray<uint8_t>& data = *row->fData;
+
+ int gap = x - row->fWidth;
+ if (gap) {
+ AppendRun(data, 0, gap);
+ row->fWidth += gap;
+ SkASSERT(row->fWidth < fBounds.width());
+ }
+
+ AppendRun(data, alpha, count);
+ row->fWidth += count;
+ SkASSERT(row->fWidth <= fBounds.width());
+ }
+
+ void addColumn(int x, int y, U8CPU alpha, int height) {
+ SkASSERT(fBounds.contains(x, y + height - 1));
+
+ this->addRun(x, y, alpha, 1);
+ this->flushRowH(fCurrRow);
+ y -= fBounds.fTop;
+ SkASSERT(y == fCurrRow->fY);
+ fCurrRow->fY = y + height - 1;
+ }
+
+ void addRectRun(int x, int y, int width, int height) {
+ SkASSERT(fBounds.contains(x + width - 1, y + height - 1));
+ this->addRun(x, y, 0xFF, width);
+
+ // we assum the rect must be all we'll see for these scanlines
+ // so we ensure our row goes all the way to our right
+ this->flushRowH(fCurrRow);
+
+ y -= fBounds.fTop;
+ SkASSERT(y == fCurrRow->fY);
+ fCurrRow->fY = y + height - 1;
+ }
+
+ void addAntiRectRun(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) {
+ SkASSERT(fBounds.contains(x + width - 1 +
+ (leftAlpha > 0 ? 1 : 0) + (rightAlpha > 0 ? 1 : 0),
+ y + height - 1));
+ SkASSERT(width >= 0);
+
+ // Conceptually we're always adding 3 runs, but we should
+ // merge or omit them if possible.
+ if (leftAlpha == 0xFF) {
+ width++;
+ } else if (leftAlpha > 0) {
+ this->addRun(x++, y, leftAlpha, 1);
+ }
+ if (rightAlpha == 0xFF) {
+ width++;
+ }
+ if (width > 0) {
+ this->addRun(x, y, 0xFF, width);
+ }
+ if (rightAlpha > 0 && rightAlpha < 255) {
+ this->addRun(x + width, y, rightAlpha, 1);
+ }
+
+ // we assume the rect must be all we'll see for these scanlines
+ // so we ensure our row goes all the way to our right
+ this->flushRowH(fCurrRow);
+
+ y -= fBounds.fTop;
+ SkASSERT(y == fCurrRow->fY);
+ fCurrRow->fY = y + height - 1;
+ }
+
+ bool finish(SkAAClip* target) {
+ this->flushRow(false);
+
+ const Row* row = fRows.begin();
+ const Row* stop = fRows.end();
+
+ size_t dataSize = 0;
+ while (row < stop) {
+ dataSize += row->fData->count();
+ row += 1;
+ }
+
+ if (0 == dataSize) {
+ return target->setEmpty();
+ }
+
+ SkASSERT(fMinY >= fBounds.fTop);
+ SkASSERT(fMinY < fBounds.fBottom);
+ int adjustY = fMinY - fBounds.fTop;
+ fBounds.fTop = fMinY;
+
+ RunHead* head = RunHead::Alloc(fRows.count(), dataSize);
+ YOffset* yoffset = head->yoffsets();
+ uint8_t* data = head->data();
+ uint8_t* baseData = data;
+
+ row = fRows.begin();
+ SkDEBUGCODE(int prevY = row->fY - 1;)
+ while (row < stop) {
+ SkASSERT(prevY < row->fY); // must be monotonic
+ SkDEBUGCODE(prevY = row->fY);
+
+ yoffset->fY = row->fY - adjustY;
+ yoffset->fOffset = SkToU32(data - baseData);
+ yoffset += 1;
+
+ size_t n = row->fData->count();
+ memcpy(data, row->fData->begin(), n);
+#ifdef SK_DEBUG
+ size_t bytesNeeded = compute_row_length(data, fBounds.width());
+ SkASSERT(bytesNeeded == n);
+#endif
+ data += n;
+
+ row += 1;
+ }
+
+ target->freeRuns();
+ target->fBounds = fBounds;
+ target->fRunHead = head;
+ return target->trimBounds();
+ }
+
+ void dump() {
+ this->validate();
+ int y;
+ for (y = 0; y < fRows.count(); ++y) {
+ const Row& row = fRows[y];
+ SkDebugf("Y:%3d W:%3d", row.fY, row.fWidth);
+ const SkTDArray<uint8_t>& data = *row.fData;
+ int count = data.count();
+ SkASSERT(!(count & 1));
+ const uint8_t* ptr = data.begin();
+ for (int x = 0; x < count; x += 2) {
+ SkDebugf(" [%3d:%02X]", ptr[0], ptr[1]);
+ ptr += 2;
+ }
+ SkDebugf("\n");
+ }
+ }
+
+ void validate() {
+#ifdef SK_DEBUG
+ if (false) { // avoid bit rot, suppress warning
+ test_count_left_right_zeros();
+ }
+ int prevY = -1;
+ for (int i = 0; i < fRows.count(); ++i) {
+ const Row& row = fRows[i];
+ SkASSERT(prevY < row.fY);
+ SkASSERT(fWidth == row.fWidth);
+ int count = row.fData->count();
+ const uint8_t* ptr = row.fData->begin();
+ SkASSERT(!(count & 1));
+ int w = 0;
+ for (int x = 0; x < count; x += 2) {
+ int n = ptr[0];
+ SkASSERT(n > 0);
+ w += n;
+ SkASSERT(w <= fWidth);
+ ptr += 2;
+ }
+ SkASSERT(w == fWidth);
+ prevY = row.fY;
+ }
+#endif
+ }
+
+ // only called by BuilderBlitter
+ void setMinY(int y) {
+ fMinY = y;
+ }
+
+private:
+ void flushRowH(Row* row) {
+ // flush current row if needed
+ if (row->fWidth < fWidth) {
+ AppendRun(*row->fData, 0, fWidth - row->fWidth);
+ row->fWidth = fWidth;
+ }
+ }
+
+ Row* flushRow(bool readyForAnother) {
+ Row* next = nullptr;
+ int count = fRows.count();
+ if (count > 0) {
+ this->flushRowH(&fRows[count - 1]);
+ }
+ if (count > 1) {
+ // are our last two runs the same?
+ Row* prev = &fRows[count - 2];
+ Row* curr = &fRows[count - 1];
+ SkASSERT(prev->fWidth == fWidth);
+ SkASSERT(curr->fWidth == fWidth);
+ if (*prev->fData == *curr->fData) {
+ prev->fY = curr->fY;
+ if (readyForAnother) {
+ curr->fData->rewind();
+ next = curr;
+ } else {
+ delete curr->fData;
+ fRows.removeShuffle(count - 1);
+ }
+ } else {
+ if (readyForAnother) {
+ next = fRows.append();
+ next->fData = new SkTDArray<uint8_t>;
+ }
+ }
+ } else {
+ if (readyForAnother) {
+ next = fRows.append();
+ next->fData = new SkTDArray<uint8_t>;
+ }
+ }
+ return next;
+ }
+
+ static void AppendRun(SkTDArray<uint8_t>& data, U8CPU alpha, int count) {
+ do {
+ int n = count;
+ if (n > 255) {
+ n = 255;
+ }
+ uint8_t* ptr = data.append(2);
+ ptr[0] = n;
+ ptr[1] = alpha;
+ count -= n;
+ } while (count > 0);
+ }
+};
+
+class SkAAClip::BuilderBlitter : public SkBlitter {
+ int fLastY;
+
+ /*
+ If we see a gap of 1 or more empty scanlines while building in Y-order,
+ we inject an explicit empty scanline (alpha==0)
+
+ See AAClipTest.cpp : test_path_with_hole()
+ */
+ void checkForYGap(int y) {
+ SkASSERT(y >= fLastY);
+ if (fLastY > -SK_MaxS32) {
+ int gap = y - fLastY;
+ if (gap > 1) {
+ fBuilder->addRun(fLeft, y - 1, 0, fRight - fLeft);
+ }
+ }
+ fLastY = y;
+ }
+
+public:
+
+ BuilderBlitter(Builder* builder) {
+ fBuilder = builder;
+ fLeft = builder->getBounds().fLeft;
+ fRight = builder->getBounds().fRight;
+ fMinY = SK_MaxS32;
+ fLastY = -SK_MaxS32; // sentinel
+ }
+
+ void finish() {
+ if (fMinY < SK_MaxS32) {
+ fBuilder->setMinY(fMinY);
+ }
+ }
+
+ /**
+ Must evaluate clips in scan-line order, so don't want to allow blitV(),
+ but an AAClip can be clipped down to a single pixel wide, so we
+ must support it (given AntiRect semantics: minimum width is 2).
+ Instead we'll rely on the runtime asserts to guarantee Y monotonicity;
+ any failure cases that misses may have minor artifacts.
+ */
+ void blitV(int x, int y, int height, SkAlpha alpha) override {
+ this->recordMinY(y);
+ fBuilder->addColumn(x, y, alpha, height);
+ fLastY = y + height - 1;
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ this->recordMinY(y);
+ this->checkForYGap(y);
+ fBuilder->addRectRun(x, y, width, height);
+ fLastY = y + height - 1;
+ }
+
+ virtual void blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) override {
+ this->recordMinY(y);
+ this->checkForYGap(y);
+ fBuilder->addAntiRectRun(x, y, width, height, leftAlpha, rightAlpha);
+ fLastY = y + height - 1;
+ }
+
+ void blitMask(const SkMask&, const SkIRect& clip) override
+ { unexpected(); }
+
+ const SkPixmap* justAnOpaqueColor(uint32_t*) override {
+ return nullptr;
+ }
+
+ void blitH(int x, int y, int width) override {
+ this->recordMinY(y);
+ this->checkForYGap(y);
+ fBuilder->addRun(x, y, 0xFF, width);
+ }
+
+ virtual void blitAntiH(int x, int y, const SkAlpha alpha[],
+ const int16_t runs[]) override {
+ this->recordMinY(y);
+ this->checkForYGap(y);
+ for (;;) {
+ int count = *runs;
+ if (count <= 0) {
+ return;
+ }
+
+ // The supersampler's buffer can be the width of the device, so
+ // we may have to trim the run to our bounds. If so, we assert that
+ // the extra spans are always alpha==0
+ int localX = x;
+ int localCount = count;
+ if (x < fLeft) {
+ SkASSERT(0 == *alpha);
+ int gap = fLeft - x;
+ SkASSERT(gap <= count);
+ localX += gap;
+ localCount -= gap;
+ }
+ int right = x + count;
+ if (right > fRight) {
+ SkASSERT(0 == *alpha);
+ localCount -= right - fRight;
+ SkASSERT(localCount >= 0);
+ }
+
+ if (localCount) {
+ fBuilder->addRun(localX, y, *alpha, localCount);
+ }
+ // Next run
+ runs += count;
+ alpha += count;
+ x += count;
+ }
+ }
+
+private:
+ Builder* fBuilder;
+ int fLeft; // cache of builder's bounds' left edge
+ int fRight;
+ int fMinY;
+
+ /*
+ * We track this, in case the scan converter skipped some number of
+ * scanlines at the (relative to the bounds it was given). This allows
+ * the builder, during its finish, to trip its bounds down to the "real"
+ * top.
+ */
+ void recordMinY(int y) {
+ if (y < fMinY) {
+ fMinY = y;
+ }
+ }
+
+ void unexpected() {
+ SkDebugf("---- did not expect to get called here");
+ sk_throw();
+ }
+};
+
+bool SkAAClip::setPath(const SkPath& path, const SkRegion* clip, bool doAA) {
+ AUTO_AACLIP_VALIDATE(*this);
+
+ if (clip && clip->isEmpty()) {
+ return this->setEmpty();
+ }
+
+ SkIRect ibounds;
+ path.getBounds().roundOut(&ibounds);
+
+ SkRegion tmpClip;
+ if (nullptr == clip) {
+ tmpClip.setRect(ibounds);
+ clip = &tmpClip;
+ }
+
+ if (path.isInverseFillType()) {
+ ibounds = clip->getBounds();
+ } else {
+ if (ibounds.isEmpty() || !ibounds.intersect(clip->getBounds())) {
+ return this->setEmpty();
+ }
+ }
+
+ Builder builder(ibounds);
+ BuilderBlitter blitter(&builder);
+
+ if (doAA) {
+ SkScan::AntiFillPath(path, *clip, &blitter, true);
+ } else {
+ SkScan::FillPath(path, *clip, &blitter);
+ }
+
+ blitter.finish();
+ return builder.finish(this);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef void (*RowProc)(SkAAClip::Builder&, int bottom,
+ const uint8_t* rowA, const SkIRect& rectA,
+ const uint8_t* rowB, const SkIRect& rectB);
+
+typedef U8CPU (*AlphaProc)(U8CPU alphaA, U8CPU alphaB);
+
+static U8CPU sectAlphaProc(U8CPU alphaA, U8CPU alphaB) {
+ // Multiply
+ return SkMulDiv255Round(alphaA, alphaB);
+}
+
+static U8CPU unionAlphaProc(U8CPU alphaA, U8CPU alphaB) {
+ // SrcOver
+ return alphaA + alphaB - SkMulDiv255Round(alphaA, alphaB);
+}
+
+static U8CPU diffAlphaProc(U8CPU alphaA, U8CPU alphaB) {
+ // SrcOut
+ return SkMulDiv255Round(alphaA, 0xFF - alphaB);
+}
+
+static U8CPU xorAlphaProc(U8CPU alphaA, U8CPU alphaB) {
+ // XOR
+ return alphaA + alphaB - 2 * SkMulDiv255Round(alphaA, alphaB);
+}
+
+static AlphaProc find_alpha_proc(SkRegion::Op op) {
+ switch (op) {
+ case SkRegion::kIntersect_Op:
+ return sectAlphaProc;
+ case SkRegion::kDifference_Op:
+ return diffAlphaProc;
+ case SkRegion::kUnion_Op:
+ return unionAlphaProc;
+ case SkRegion::kXOR_Op:
+ return xorAlphaProc;
+ default:
+ SkDEBUGFAIL("unexpected region op");
+ return sectAlphaProc;
+ }
+}
+
+class RowIter {
+public:
+ RowIter(const uint8_t* row, const SkIRect& bounds) {
+ fRow = row;
+ fLeft = bounds.fLeft;
+ fBoundsRight = bounds.fRight;
+ if (row) {
+ fRight = bounds.fLeft + row[0];
+ SkASSERT(fRight <= fBoundsRight);
+ fAlpha = row[1];
+ fDone = false;
+ } else {
+ fDone = true;
+ fRight = kMaxInt32;
+ fAlpha = 0;
+ }
+ }
+
+ bool done() const { return fDone; }
+ int left() const { return fLeft; }
+ int right() const { return fRight; }
+ U8CPU alpha() const { return fAlpha; }
+ void next() {
+ if (!fDone) {
+ fLeft = fRight;
+ if (fRight == fBoundsRight) {
+ fDone = true;
+ fRight = kMaxInt32;
+ fAlpha = 0;
+ } else {
+ fRow += 2;
+ fRight += fRow[0];
+ fAlpha = fRow[1];
+ SkASSERT(fRight <= fBoundsRight);
+ }
+ }
+ }
+
+private:
+ const uint8_t* fRow;
+ int fLeft;
+ int fRight;
+ int fBoundsRight;
+ bool fDone;
+ uint8_t fAlpha;
+};
+
+static void adjust_row(RowIter& iter, int& leftA, int& riteA, int rite) {
+ if (rite == riteA) {
+ iter.next();
+ leftA = iter.left();
+ riteA = iter.right();
+ }
+}
+
+#if 0 // UNUSED
+static bool intersect(int& min, int& max, int boundsMin, int boundsMax) {
+ SkASSERT(min < max);
+ SkASSERT(boundsMin < boundsMax);
+ if (min >= boundsMax || max <= boundsMin) {
+ return false;
+ }
+ if (min < boundsMin) {
+ min = boundsMin;
+ }
+ if (max > boundsMax) {
+ max = boundsMax;
+ }
+ return true;
+}
+#endif
+
+static void operatorX(SkAAClip::Builder& builder, int lastY,
+ RowIter& iterA, RowIter& iterB,
+ AlphaProc proc, const SkIRect& bounds) {
+ int leftA = iterA.left();
+ int riteA = iterA.right();
+ int leftB = iterB.left();
+ int riteB = iterB.right();
+
+ int prevRite = bounds.fLeft;
+
+ do {
+ U8CPU alphaA = 0;
+ U8CPU alphaB = 0;
+ int left, rite;
+
+ if (leftA < leftB) {
+ left = leftA;
+ alphaA = iterA.alpha();
+ if (riteA <= leftB) {
+ rite = riteA;
+ } else {
+ rite = leftA = leftB;
+ }
+ } else if (leftB < leftA) {
+ left = leftB;
+ alphaB = iterB.alpha();
+ if (riteB <= leftA) {
+ rite = riteB;
+ } else {
+ rite = leftB = leftA;
+ }
+ } else {
+ left = leftA; // or leftB, since leftA == leftB
+ rite = leftA = leftB = SkMin32(riteA, riteB);
+ alphaA = iterA.alpha();
+ alphaB = iterB.alpha();
+ }
+
+ if (left >= bounds.fRight) {
+ break;
+ }
+ if (rite > bounds.fRight) {
+ rite = bounds.fRight;
+ }
+
+ if (left >= bounds.fLeft) {
+ SkASSERT(rite > left);
+ builder.addRun(left, lastY, proc(alphaA, alphaB), rite - left);
+ prevRite = rite;
+ }
+
+ adjust_row(iterA, leftA, riteA, rite);
+ adjust_row(iterB, leftB, riteB, rite);
+ } while (!iterA.done() || !iterB.done());
+
+ if (prevRite < bounds.fRight) {
+ builder.addRun(prevRite, lastY, 0, bounds.fRight - prevRite);
+ }
+}
+
+static void adjust_iter(SkAAClip::Iter& iter, int& topA, int& botA, int bot) {
+ if (bot == botA) {
+ iter.next();
+ topA = botA;
+ SkASSERT(botA == iter.top());
+ botA = iter.bottom();
+ }
+}
+
+static void operateY(SkAAClip::Builder& builder, const SkAAClip& A,
+ const SkAAClip& B, SkRegion::Op op) {
+ AlphaProc proc = find_alpha_proc(op);
+ const SkIRect& bounds = builder.getBounds();
+
+ SkAAClip::Iter iterA(A);
+ SkAAClip::Iter iterB(B);
+
+ SkASSERT(!iterA.done());
+ int topA = iterA.top();
+ int botA = iterA.bottom();
+ SkASSERT(!iterB.done());
+ int topB = iterB.top();
+ int botB = iterB.bottom();
+
+ do {
+ const uint8_t* rowA = nullptr;
+ const uint8_t* rowB = nullptr;
+ int top, bot;
+
+ if (topA < topB) {
+ top = topA;
+ rowA = iterA.data();
+ if (botA <= topB) {
+ bot = botA;
+ } else {
+ bot = topA = topB;
+ }
+
+ } else if (topB < topA) {
+ top = topB;
+ rowB = iterB.data();
+ if (botB <= topA) {
+ bot = botB;
+ } else {
+ bot = topB = topA;
+ }
+ } else {
+ top = topA; // or topB, since topA == topB
+ bot = topA = topB = SkMin32(botA, botB);
+ rowA = iterA.data();
+ rowB = iterB.data();
+ }
+
+ if (top >= bounds.fBottom) {
+ break;
+ }
+
+ if (bot > bounds.fBottom) {
+ bot = bounds.fBottom;
+ }
+ SkASSERT(top < bot);
+
+ if (!rowA && !rowB) {
+ builder.addRun(bounds.fLeft, bot - 1, 0, bounds.width());
+ } else if (top >= bounds.fTop) {
+ SkASSERT(bot <= bounds.fBottom);
+ RowIter rowIterA(rowA, rowA ? A.getBounds() : bounds);
+ RowIter rowIterB(rowB, rowB ? B.getBounds() : bounds);
+ operatorX(builder, bot - 1, rowIterA, rowIterB, proc, bounds);
+ }
+
+ adjust_iter(iterA, topA, botA, bot);
+ adjust_iter(iterB, topB, botB, bot);
+ } while (!iterA.done() || !iterB.done());
+}
+
+bool SkAAClip::op(const SkAAClip& clipAOrig, const SkAAClip& clipBOrig,
+ SkRegion::Op op) {
+ AUTO_AACLIP_VALIDATE(*this);
+
+ if (SkRegion::kReplace_Op == op) {
+ return this->set(clipBOrig);
+ }
+
+ const SkAAClip* clipA = &clipAOrig;
+ const SkAAClip* clipB = &clipBOrig;
+
+ if (SkRegion::kReverseDifference_Op == op) {
+ SkTSwap(clipA, clipB);
+ op = SkRegion::kDifference_Op;
+ }
+
+ bool a_empty = clipA->isEmpty();
+ bool b_empty = clipB->isEmpty();
+
+ SkIRect bounds;
+ switch (op) {
+ case SkRegion::kDifference_Op:
+ if (a_empty) {
+ return this->setEmpty();
+ }
+ if (b_empty || !SkIRect::Intersects(clipA->fBounds, clipB->fBounds)) {
+ return this->set(*clipA);
+ }
+ bounds = clipA->fBounds;
+ break;
+
+ case SkRegion::kIntersect_Op:
+ if ((a_empty | b_empty) || !bounds.intersect(clipA->fBounds,
+ clipB->fBounds)) {
+ return this->setEmpty();
+ }
+ break;
+
+ case SkRegion::kUnion_Op:
+ case SkRegion::kXOR_Op:
+ if (a_empty) {
+ return this->set(*clipB);
+ }
+ if (b_empty) {
+ return this->set(*clipA);
+ }
+ bounds = clipA->fBounds;
+ bounds.join(clipB->fBounds);
+ break;
+
+ default:
+ SkDEBUGFAIL("unknown region op");
+ return !this->isEmpty();
+ }
+
+ SkASSERT(SkIRect::Intersects(bounds, clipB->fBounds));
+ SkASSERT(SkIRect::Intersects(bounds, clipB->fBounds));
+
+ Builder builder(bounds);
+ operateY(builder, *clipA, *clipB, op);
+
+ return builder.finish(this);
+}
+
+/*
+ * It can be expensive to build a local aaclip before applying the op, so
+ * we first see if we can restrict the bounds of new rect to our current
+ * bounds, or note that the new rect subsumes our current clip.
+ */
+
+bool SkAAClip::op(const SkIRect& rOrig, SkRegion::Op op) {
+ SkIRect rStorage;
+ const SkIRect* r = &rOrig;
+
+ switch (op) {
+ case SkRegion::kIntersect_Op:
+ if (!rStorage.intersect(rOrig, fBounds)) {
+ // no overlap, so we're empty
+ return this->setEmpty();
+ }
+ if (rStorage == fBounds) {
+ // we were wholly inside the rect, no change
+ return !this->isEmpty();
+ }
+ if (this->quickContains(rStorage)) {
+ // the intersection is wholly inside us, we're a rect
+ return this->setRect(rStorage);
+ }
+ r = &rStorage; // use the intersected bounds
+ break;
+ case SkRegion::kDifference_Op:
+ break;
+ case SkRegion::kUnion_Op:
+ if (rOrig.contains(fBounds)) {
+ return this->setRect(rOrig);
+ }
+ break;
+ default:
+ break;
+ }
+
+ SkAAClip clip;
+ clip.setRect(*r);
+ return this->op(*this, clip, op);
+}
+
+bool SkAAClip::op(const SkRect& rOrig, SkRegion::Op op, bool doAA) {
+ SkRect rStorage, boundsStorage;
+ const SkRect* r = &rOrig;
+
+ boundsStorage.set(fBounds);
+ switch (op) {
+ case SkRegion::kIntersect_Op:
+ case SkRegion::kDifference_Op:
+ if (!rStorage.intersect(rOrig, boundsStorage)) {
+ if (SkRegion::kIntersect_Op == op) {
+ return this->setEmpty();
+ } else { // kDifference
+ return !this->isEmpty();
+ }
+ }
+ r = &rStorage; // use the intersected bounds
+ break;
+ case SkRegion::kUnion_Op:
+ if (rOrig.contains(boundsStorage)) {
+ return this->setRect(rOrig);
+ }
+ break;
+ default:
+ break;
+ }
+
+ SkAAClip clip;
+ clip.setRect(*r, doAA);
+ return this->op(*this, clip, op);
+}
+
+bool SkAAClip::op(const SkAAClip& clip, SkRegion::Op op) {
+ return this->op(*this, clip, op);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkAAClip::translate(int dx, int dy, SkAAClip* dst) const {
+ if (nullptr == dst) {
+ return !this->isEmpty();
+ }
+
+ if (this->isEmpty()) {
+ return dst->setEmpty();
+ }
+
+ if (this != dst) {
+ sk_atomic_inc(&fRunHead->fRefCnt);
+ dst->freeRuns();
+ dst->fRunHead = fRunHead;
+ dst->fBounds = fBounds;
+ }
+ dst->fBounds.offset(dx, dy);
+ return true;
+}
+
+static void expand_row_to_mask(uint8_t* SK_RESTRICT mask,
+ const uint8_t* SK_RESTRICT row,
+ int width) {
+ while (width > 0) {
+ int n = row[0];
+ SkASSERT(width >= n);
+ memset(mask, row[1], n);
+ mask += n;
+ row += 2;
+ width -= n;
+ }
+ SkASSERT(0 == width);
+}
+
+void SkAAClip::copyToMask(SkMask* mask) const {
+ mask->fFormat = SkMask::kA8_Format;
+ if (this->isEmpty()) {
+ mask->fBounds.setEmpty();
+ mask->fImage = nullptr;
+ mask->fRowBytes = 0;
+ return;
+ }
+
+ mask->fBounds = fBounds;
+ mask->fRowBytes = fBounds.width();
+ size_t size = mask->computeImageSize();
+ mask->fImage = SkMask::AllocImage(size);
+
+ Iter iter(*this);
+ uint8_t* dst = mask->fImage;
+ const int width = fBounds.width();
+
+ int y = fBounds.fTop;
+ while (!iter.done()) {
+ do {
+ expand_row_to_mask(dst, iter.data(), width);
+ dst += mask->fRowBytes;
+ } while (++y < iter.bottom());
+ iter.next();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+static void expandToRuns(const uint8_t* SK_RESTRICT data, int initialCount, int width,
+ int16_t* SK_RESTRICT runs, SkAlpha* SK_RESTRICT aa) {
+ // we don't read our initial n from data, since the caller may have had to
+ // clip it, hence the initialCount parameter.
+ int n = initialCount;
+ for (;;) {
+ if (n > width) {
+ n = width;
+ }
+ SkASSERT(n > 0);
+ runs[0] = n;
+ runs += n;
+
+ aa[0] = data[1];
+ aa += n;
+
+ data += 2;
+ width -= n;
+ if (0 == width) {
+ break;
+ }
+ // load the next count
+ n = data[0];
+ }
+ runs[0] = 0; // sentinel
+}
+
+SkAAClipBlitter::~SkAAClipBlitter() {
+ sk_free(fScanlineScratch);
+}
+
+void SkAAClipBlitter::ensureRunsAndAA() {
+ if (nullptr == fScanlineScratch) {
+ // add 1 so we can store the terminating run count of 0
+ int count = fAAClipBounds.width() + 1;
+ // we use this either for fRuns + fAA, or a scaline of a mask
+ // which may be as deep as 32bits
+ fScanlineScratch = sk_malloc_throw(count * sizeof(SkPMColor));
+ fRuns = (int16_t*)fScanlineScratch;
+ fAA = (SkAlpha*)(fRuns + count);
+ }
+}
+
+void SkAAClipBlitter::blitH(int x, int y, int width) {
+ SkASSERT(width > 0);
+ SkASSERT(fAAClipBounds.contains(x, y));
+ SkASSERT(fAAClipBounds.contains(x + width - 1, y));
+
+ const uint8_t* row = fAAClip->findRow(y);
+ int initialCount;
+ row = fAAClip->findX(row, x, &initialCount);
+
+ if (initialCount >= width) {
+ SkAlpha alpha = row[1];
+ if (0 == alpha) {
+ return;
+ }
+ if (0xFF == alpha) {
+ fBlitter->blitH(x, y, width);
+ return;
+ }
+ }
+
+ this->ensureRunsAndAA();
+ expandToRuns(row, initialCount, width, fRuns, fAA);
+
+ fBlitter->blitAntiH(x, y, fAA, fRuns);
+}
+
+static void merge(const uint8_t* SK_RESTRICT row, int rowN,
+ const SkAlpha* SK_RESTRICT srcAA,
+ const int16_t* SK_RESTRICT srcRuns,
+ SkAlpha* SK_RESTRICT dstAA,
+ int16_t* SK_RESTRICT dstRuns,
+ int width) {
+ SkDEBUGCODE(int accumulated = 0;)
+ int srcN = srcRuns[0];
+ // do we need this check?
+ if (0 == srcN) {
+ return;
+ }
+
+ for (;;) {
+ SkASSERT(rowN > 0);
+ SkASSERT(srcN > 0);
+
+ unsigned newAlpha = SkMulDiv255Round(srcAA[0], row[1]);
+ int minN = SkMin32(srcN, rowN);
+ dstRuns[0] = minN;
+ dstRuns += minN;
+ dstAA[0] = newAlpha;
+ dstAA += minN;
+
+ if (0 == (srcN -= minN)) {
+ srcN = srcRuns[0]; // refresh
+ srcRuns += srcN;
+ srcAA += srcN;
+ srcN = srcRuns[0]; // reload
+ if (0 == srcN) {
+ break;
+ }
+ }
+ if (0 == (rowN -= minN)) {
+ row += 2;
+ rowN = row[0]; // reload
+ }
+
+ SkDEBUGCODE(accumulated += minN;)
+ SkASSERT(accumulated <= width);
+ }
+ dstRuns[0] = 0;
+}
+
+void SkAAClipBlitter::blitAntiH(int x, int y, const SkAlpha aa[],
+ const int16_t runs[]) {
+
+ const uint8_t* row = fAAClip->findRow(y);
+ int initialCount;
+ row = fAAClip->findX(row, x, &initialCount);
+
+ this->ensureRunsAndAA();
+
+ merge(row, initialCount, aa, runs, fAA, fRuns, fAAClipBounds.width());
+ fBlitter->blitAntiH(x, y, fAA, fRuns);
+}
+
+void SkAAClipBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ if (fAAClip->quickContains(x, y, x + 1, y + height)) {
+ fBlitter->blitV(x, y, height, alpha);
+ return;
+ }
+
+ for (;;) {
+ int lastY SK_INIT_TO_AVOID_WARNING;
+ const uint8_t* row = fAAClip->findRow(y, &lastY);
+ int dy = lastY - y + 1;
+ if (dy > height) {
+ dy = height;
+ }
+ height -= dy;
+
+ row = fAAClip->findX(row, x);
+ SkAlpha newAlpha = SkMulDiv255Round(alpha, row[1]);
+ if (newAlpha) {
+ fBlitter->blitV(x, y, dy, newAlpha);
+ }
+ SkASSERT(height >= 0);
+ if (height <= 0) {
+ break;
+ }
+ y = lastY + 1;
+ }
+}
+
+void SkAAClipBlitter::blitRect(int x, int y, int width, int height) {
+ if (fAAClip->quickContains(x, y, x + width, y + height)) {
+ fBlitter->blitRect(x, y, width, height);
+ return;
+ }
+
+ while (--height >= 0) {
+ this->blitH(x, y, width);
+ y += 1;
+ }
+}
+
+typedef void (*MergeAAProc)(const void* src, int width, const uint8_t* row,
+ int initialRowCount, void* dst);
+
+static void small_memcpy(void* dst, const void* src, size_t n) {
+ memcpy(dst, src, n);
+}
+
+static void small_bzero(void* dst, size_t n) {
+ sk_bzero(dst, n);
+}
+
+static inline uint8_t mergeOne(uint8_t value, unsigned alpha) {
+ return SkMulDiv255Round(value, alpha);
+}
+
+static inline uint16_t mergeOne(uint16_t value, unsigned alpha) {
+ unsigned r = SkGetPackedR16(value);
+ unsigned g = SkGetPackedG16(value);
+ unsigned b = SkGetPackedB16(value);
+ return SkPackRGB16(SkMulDiv255Round(r, alpha),
+ SkMulDiv255Round(g, alpha),
+ SkMulDiv255Round(b, alpha));
+}
+
+template <typename T>
+void mergeT(const void* inSrc, int srcN, const uint8_t* SK_RESTRICT row, int rowN, void* inDst) {
+ const T* SK_RESTRICT src = static_cast<const T*>(inSrc);
+ T* SK_RESTRICT dst = static_cast<T*>(inDst);
+ for (;;) {
+ SkASSERT(rowN > 0);
+ SkASSERT(srcN > 0);
+
+ int n = SkMin32(rowN, srcN);
+ unsigned rowA = row[1];
+ if (0xFF == rowA) {
+ small_memcpy(dst, src, n * sizeof(T));
+ } else if (0 == rowA) {
+ small_bzero(dst, n * sizeof(T));
+ } else {
+ for (int i = 0; i < n; ++i) {
+ dst[i] = mergeOne(src[i], rowA);
+ }
+ }
+
+ if (0 == (srcN -= n)) {
+ break;
+ }
+
+ src += n;
+ dst += n;
+
+ SkASSERT(rowN == n);
+ row += 2;
+ rowN = row[0];
+ }
+}
+
+static MergeAAProc find_merge_aa_proc(SkMask::Format format) {
+ switch (format) {
+ case SkMask::kBW_Format:
+ SkDEBUGFAIL("unsupported");
+ return nullptr;
+ case SkMask::kA8_Format:
+ case SkMask::k3D_Format:
+ return mergeT<uint8_t> ;
+ case SkMask::kLCD16_Format:
+ return mergeT<uint16_t>;
+ default:
+ SkDEBUGFAIL("unsupported");
+ return nullptr;
+ }
+}
+
+static U8CPU bit2byte(int bitInAByte) {
+ SkASSERT(bitInAByte <= 0xFF);
+ // negation turns any non-zero into 0xFFFFFF??, so we just shift down
+ // some value >= 8 to get a full FF value
+ return -bitInAByte >> 8;
+}
+
+static void upscaleBW2A8(SkMask* dstMask, const SkMask& srcMask) {
+ SkASSERT(SkMask::kBW_Format == srcMask.fFormat);
+ SkASSERT(SkMask::kA8_Format == dstMask->fFormat);
+
+ const int width = srcMask.fBounds.width();
+ const int height = srcMask.fBounds.height();
+
+ const uint8_t* SK_RESTRICT src = (const uint8_t*)srcMask.fImage;
+ const size_t srcRB = srcMask.fRowBytes;
+ uint8_t* SK_RESTRICT dst = (uint8_t*)dstMask->fImage;
+ const size_t dstRB = dstMask->fRowBytes;
+
+ const int wholeBytes = width >> 3;
+ const int leftOverBits = width & 7;
+
+ for (int y = 0; y < height; ++y) {
+ uint8_t* SK_RESTRICT d = dst;
+ for (int i = 0; i < wholeBytes; ++i) {
+ int srcByte = src[i];
+ d[0] = bit2byte(srcByte & (1 << 7));
+ d[1] = bit2byte(srcByte & (1 << 6));
+ d[2] = bit2byte(srcByte & (1 << 5));
+ d[3] = bit2byte(srcByte & (1 << 4));
+ d[4] = bit2byte(srcByte & (1 << 3));
+ d[5] = bit2byte(srcByte & (1 << 2));
+ d[6] = bit2byte(srcByte & (1 << 1));
+ d[7] = bit2byte(srcByte & (1 << 0));
+ d += 8;
+ }
+ if (leftOverBits) {
+ int srcByte = src[wholeBytes];
+ for (int x = 0; x < leftOverBits; ++x) {
+ *d++ = bit2byte(srcByte & 0x80);
+ srcByte <<= 1;
+ }
+ }
+ src += srcRB;
+ dst += dstRB;
+ }
+}
+
+void SkAAClipBlitter::blitMask(const SkMask& origMask, const SkIRect& clip) {
+ SkASSERT(fAAClip->getBounds().contains(clip));
+
+ if (fAAClip->quickContains(clip)) {
+ fBlitter->blitMask(origMask, clip);
+ return;
+ }
+
+ const SkMask* mask = &origMask;
+
+ // if we're BW, we need to upscale to A8 (ugh)
+ SkMask grayMask;
+ if (SkMask::kBW_Format == origMask.fFormat) {
+ grayMask.fFormat = SkMask::kA8_Format;
+ grayMask.fBounds = origMask.fBounds;
+ grayMask.fRowBytes = origMask.fBounds.width();
+ size_t size = grayMask.computeImageSize();
+ grayMask.fImage = (uint8_t*)fGrayMaskScratch.reset(size,
+ SkAutoMalloc::kReuse_OnShrink);
+
+ upscaleBW2A8(&grayMask, origMask);
+ mask = &grayMask;
+ }
+
+ this->ensureRunsAndAA();
+
+ // HACK -- we are devolving 3D into A8, need to copy the rest of the 3D
+ // data into a temp block to support it better (ugh)
+
+ const void* src = mask->getAddr(clip.fLeft, clip.fTop);
+ const size_t srcRB = mask->fRowBytes;
+ const int width = clip.width();
+ MergeAAProc mergeProc = find_merge_aa_proc(mask->fFormat);
+
+ SkMask rowMask;
+ rowMask.fFormat = SkMask::k3D_Format == mask->fFormat ? SkMask::kA8_Format : mask->fFormat;
+ rowMask.fBounds.fLeft = clip.fLeft;
+ rowMask.fBounds.fRight = clip.fRight;
+ rowMask.fRowBytes = mask->fRowBytes; // doesn't matter, since our height==1
+ rowMask.fImage = (uint8_t*)fScanlineScratch;
+
+ int y = clip.fTop;
+ const int stopY = y + clip.height();
+
+ do {
+ int localStopY SK_INIT_TO_AVOID_WARNING;
+ const uint8_t* row = fAAClip->findRow(y, &localStopY);
+ // findRow returns last Y, not stop, so we add 1
+ localStopY = SkMin32(localStopY + 1, stopY);
+
+ int initialCount;
+ row = fAAClip->findX(row, clip.fLeft, &initialCount);
+ do {
+ mergeProc(src, width, row, initialCount, rowMask.fImage);
+ rowMask.fBounds.fTop = y;
+ rowMask.fBounds.fBottom = y + 1;
+ fBlitter->blitMask(rowMask, rowMask.fBounds);
+ src = (const void*)((const char*)src + srcRB);
+ } while (++y < localStopY);
+ } while (y < stopY);
+}
+
+const SkPixmap* SkAAClipBlitter::justAnOpaqueColor(uint32_t* value) {
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkAAClip.h b/gfx/skia/skia/src/core/SkAAClip.h
new file mode 100644
index 000000000..7b29ef142
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAAClip.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAAClip_DEFINED
+#define SkAAClip_DEFINED
+
+#include "SkBlitter.h"
+#include "SkRegion.h"
+
+class SkAAClip {
+public:
+ SkAAClip();
+ SkAAClip(const SkAAClip&);
+ ~SkAAClip();
+
+ SkAAClip& operator=(const SkAAClip&);
+ friend bool operator==(const SkAAClip&, const SkAAClip&);
+ friend bool operator!=(const SkAAClip& a, const SkAAClip& b) {
+ return !(a == b);
+ }
+
+ void swap(SkAAClip&);
+
+ bool isEmpty() const { return nullptr == fRunHead; }
+ const SkIRect& getBounds() const { return fBounds; }
+
+ // Returns true iff the clip is not empty, and is just a hard-edged rect (no partial alpha).
+ // If true, getBounds() can be used in place of this clip.
+ bool isRect() const;
+
+ bool setEmpty();
+ bool setRect(const SkIRect&);
+ bool setRect(const SkRect&, bool doAA = true);
+ bool setPath(const SkPath&, const SkRegion* clip = nullptr, bool doAA = true);
+ bool setRegion(const SkRegion&);
+ bool set(const SkAAClip&);
+
+ bool op(const SkAAClip&, const SkAAClip&, SkRegion::Op);
+
+ // Helpers for op()
+ bool op(const SkIRect&, SkRegion::Op);
+ bool op(const SkRect&, SkRegion::Op, bool doAA);
+ bool op(const SkAAClip&, SkRegion::Op);
+
+ bool translate(int dx, int dy, SkAAClip* dst) const;
+ bool translate(int dx, int dy) {
+ return this->translate(dx, dy, this);
+ }
+
+ /**
+ * Allocates a mask the size of the aaclip, and expands its data into
+ * the mask, using kA8_Format
+ */
+ void copyToMask(SkMask*) const;
+
+ // called internally
+
+ bool quickContains(int left, int top, int right, int bottom) const;
+ bool quickContains(const SkIRect& r) const {
+ return this->quickContains(r.fLeft, r.fTop, r.fRight, r.fBottom);
+ }
+
+ const uint8_t* findRow(int y, int* lastYForRow = nullptr) const;
+ const uint8_t* findX(const uint8_t data[], int x, int* initialCount = nullptr) const;
+
+ class Iter;
+ struct RunHead;
+ struct YOffset;
+ class Builder;
+
+#ifdef SK_DEBUG
+ void validate() const;
+ void debug(bool compress_y=false) const;
+#else
+ void validate() const {}
+ void debug(bool compress_y=false) const {}
+#endif
+
+private:
+ SkIRect fBounds;
+ RunHead* fRunHead;
+
+ void freeRuns();
+ bool trimBounds();
+ bool trimTopBottom();
+ bool trimLeftRight();
+
+ friend class Builder;
+ class BuilderBlitter;
+ friend class BuilderBlitter;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkAAClipBlitter : public SkBlitter {
+public:
+ SkAAClipBlitter() : fScanlineScratch(nullptr) {}
+ virtual ~SkAAClipBlitter();
+
+ void init(SkBlitter* blitter, const SkAAClip* aaclip) {
+ SkASSERT(aaclip && !aaclip->isEmpty());
+ fBlitter = blitter;
+ fAAClip = aaclip;
+ fAAClipBounds = aaclip->getBounds();
+ }
+
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitMask(const SkMask&, const SkIRect& clip) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t* value) override;
+
+private:
+ SkBlitter* fBlitter;
+ const SkAAClip* fAAClip;
+ SkIRect fAAClipBounds;
+
+ // point into fScanlineScratch
+ int16_t* fRuns;
+ SkAlpha* fAA;
+
+ enum {
+ kSize = 32 * 32
+ };
+ SkAutoSMalloc<kSize> fGrayMaskScratch; // used for blitMask
+ void* fScanlineScratch; // enough for a mask at 32bit, or runs+aa
+
+ void ensureRunsAndAA();
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkAdvancedTypefaceMetrics.h b/gfx/skia/skia/src/core/SkAdvancedTypefaceMetrics.h
new file mode 100644
index 000000000..17255ab21
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAdvancedTypefaceMetrics.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAdvancedTypefaceMetrics_DEFINED
+#define SkAdvancedTypefaceMetrics_DEFINED
+
+#include "SkBitmaskEnum.h"
+#include "SkRect.h"
+#include "SkRefCnt.h"
+#include "SkString.h"
+#include "SkTDArray.h"
+
+/** \class SkAdvancedTypefaceMetrics
+
+ The SkAdvancedTypefaceMetrics class is used by the PDF backend to correctly
+ embed typefaces. This class is created and filled in with information by
+ SkTypeface::getAdvancedTypefaceMetrics.
+*/
+class SkAdvancedTypefaceMetrics : public SkRefCnt {
+public:
+
+ SkAdvancedTypefaceMetrics()
+ : fType(SkAdvancedTypefaceMetrics::kOther_Font)
+ , fFlags((FontFlags)0)
+ , fLastGlyphID(0)
+ , fEmSize(0)
+ , fStyle((StyleFlags)0)
+ , fItalicAngle(0)
+ , fAscent(0)
+ , fDescent(0)
+ , fStemV(0)
+ , fCapHeight(0)
+ , fBBox(SkIRect::MakeEmpty()) {}
+
+ ~SkAdvancedTypefaceMetrics() {}
+
+ SkString fFontName;
+
+ enum FontType : uint8_t {
+ kType1_Font,
+ kType1CID_Font,
+ kCFF_Font,
+ kTrueType_Font,
+ kOther_Font,
+ };
+ // The type of the underlying font program. This field determines which
+ // of the following fields are valid. If it is kOther_Font the per glyph
+ // information will never be populated.
+ FontType fType;
+
+ enum FontFlags : uint8_t {
+ kMultiMaster_FontFlag = 0x01, //!<May be true for Type1, CFF, or TrueType fonts.
+ kNotEmbeddable_FontFlag = 0x02, //!<May not be embedded.
+ kNotSubsettable_FontFlag = 0x04, //!<May not be subset.
+ };
+ FontFlags fFlags; // Global font flags.
+
+ uint16_t fLastGlyphID; // The last valid glyph ID in the font.
+ uint16_t fEmSize; // The size of the em box (defines font units).
+
+ // These enum values match the values used in the PDF file format.
+ enum StyleFlags : uint32_t {
+ kFixedPitch_Style = 0x00000001,
+ kSerif_Style = 0x00000002,
+ kScript_Style = 0x00000008,
+ kItalic_Style = 0x00000040,
+ kAllCaps_Style = 0x00010000,
+ kSmallCaps_Style = 0x00020000,
+ kForceBold_Style = 0x00040000
+ };
+ StyleFlags fStyle; // Font style characteristics.
+
+ int16_t fItalicAngle; // Counterclockwise degrees from vertical of the
+ // dominant vertical stroke for an Italic face.
+ // The following fields are all in font units.
+ int16_t fAscent; // Max height above baseline, not including accents.
+ int16_t fDescent; // Max depth below baseline (negative).
+ int16_t fStemV; // Thickness of dominant vertical stem.
+ int16_t fCapHeight; // Height (from baseline) of top of flat capitals.
+
+ SkIRect fBBox; // The bounding box of all glyphs (in font units).
+
+ // The names of each glyph, only populated for postscript fonts.
+ SkTArray<SkString> fGlyphNames;
+
+ // The mapping from glyph to Unicode, only populated if
+ // kToUnicode_PerGlyphInfo is passed to GetAdvancedTypefaceMetrics.
+ SkTDArray<SkUnichar> fGlyphToUnicode;
+
+private:
+ typedef SkRefCnt INHERITED;
+};
+
+namespace skstd {
+template <> struct is_bitmask_enum<SkAdvancedTypefaceMetrics::FontFlags> : std::true_type {};
+template <> struct is_bitmask_enum<SkAdvancedTypefaceMetrics::StyleFlags> : std::true_type {};
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkAlphaRuns.cpp b/gfx/skia/skia/src/core/SkAlphaRuns.cpp
new file mode 100644
index 000000000..92fb56097
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAlphaRuns.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkAntiRun.h"
+#include "SkUtils.h"
+
+void SkAlphaRuns::reset(int width) {
+ SkASSERT(width > 0);
+
+#ifdef SK_DEBUG
+#ifndef SK_DISABLE_SLOW_DEBUG_VALIDATION
+ sk_memset16((uint16_t*)fRuns, (uint16_t)(-42), width);
+#endif
+#endif
+ fRuns[0] = SkToS16(width);
+ fRuns[width] = 0;
+ fAlpha[0] = 0;
+
+ SkDEBUGCODE(fWidth = width;)
+ SkDEBUGCODE(this->validate();)
+}
+
+#ifdef SK_DEBUG
+ void SkAlphaRuns::assertValid(int y, int maxStep) const {
+#ifndef SK_DISABLE_SLOW_DEBUG_VALIDATION
+ int max = (y + 1) * maxStep - (y == maxStep - 1);
+
+ const int16_t* runs = fRuns;
+ const uint8_t* alpha = fAlpha;
+
+ while (*runs) {
+ SkASSERT(*alpha <= max);
+ alpha += *runs;
+ runs += *runs;
+ }
+#endif
+ }
+
+ void SkAlphaRuns::dump() const {
+ const int16_t* runs = fRuns;
+ const uint8_t* alpha = fAlpha;
+
+ SkDebugf("Runs");
+ while (*runs) {
+ int n = *runs;
+
+ SkDebugf(" %02x", *alpha);
+ if (n > 1) {
+ SkDebugf(",%d", n);
+ }
+ alpha += n;
+ runs += n;
+ }
+ SkDebugf("\n");
+ }
+
+ void SkAlphaRuns::validate() const {
+#ifndef SK_DISABLE_SLOW_DEBUG_VALIDATION
+ SkASSERT(fWidth > 0);
+
+ int count = 0;
+ const int16_t* runs = fRuns;
+
+ while (*runs) {
+ SkASSERT(*runs > 0);
+ count += *runs;
+ SkASSERT(count <= fWidth);
+ runs += *runs;
+ }
+ SkASSERT(count == fWidth);
+#endif
+ }
+#endif
diff --git a/gfx/skia/skia/src/core/SkAnnotation.cpp b/gfx/skia/skia/src/core/SkAnnotation.cpp
new file mode 100644
index 000000000..09a6296fb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAnnotation.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAnnotation.h"
+#include "SkAnnotationKeys.h"
+#include "SkCanvas.h"
+#include "SkPoint.h"
+#include "SkRect.h"
+
+const char* SkAnnotationKeys::URL_Key() {
+ return "SkAnnotationKey_URL";
+};
+
+const char* SkAnnotationKeys::Define_Named_Dest_Key() {
+ return "SkAnnotationKey_Define_Named_Dest";
+};
+
+const char* SkAnnotationKeys::Link_Named_Dest_Key() {
+ return "SkAnnotationKey_Link_Named_Dest";
+};
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkAnnotateRectWithURL(SkCanvas* canvas, const SkRect& rect, SkData* value) {
+ if (nullptr == value) {
+ return;
+ }
+ canvas->drawAnnotation(rect, SkAnnotationKeys::URL_Key(), value);
+}
+
+void SkAnnotateNamedDestination(SkCanvas* canvas, const SkPoint& point, SkData* name) {
+ if (nullptr == name) {
+ return;
+ }
+ const SkRect rect = SkRect::MakeXYWH(point.x(), point.y(), 0, 0);
+ canvas->drawAnnotation(rect, SkAnnotationKeys::Define_Named_Dest_Key(), name);
+}
+
+void SkAnnotateLinkToDestination(SkCanvas* canvas, const SkRect& rect, SkData* name) {
+ if (nullptr == name) {
+ return;
+ }
+ canvas->drawAnnotation(rect, SkAnnotationKeys::Link_Named_Dest_Key(), name);
+}
diff --git a/gfx/skia/skia/src/core/SkAnnotationKeys.h b/gfx/skia/skia/src/core/SkAnnotationKeys.h
new file mode 100644
index 000000000..dff933880
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAnnotationKeys.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAnnotationKeys_DEFINED
+#define SkAnnotationKeys_DEFINED
+
+#include "SkTypes.h"
+
+class SkAnnotationKeys {
+public:
+ /**
+ * Returns the canonical key whose payload is a URL
+ */
+ static const char* URL_Key();
+
+ /**
+ * Returns the canonical key whose payload is the name of a destination to
+ * be defined.
+ */
+ static const char* Define_Named_Dest_Key();
+
+ /**
+ * Returns the canonical key whose payload is the name of a destination to
+ * be linked to.
+ */
+ static const char* Link_Named_Dest_Key();
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkAntiRun.h b/gfx/skia/skia/src/core/SkAntiRun.h
new file mode 100644
index 000000000..8214e28d5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAntiRun.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkAntiRun_DEFINED
+#define SkAntiRun_DEFINED
+
+#include "SkBlitter.h"
+
+/** Sparse array of run-length-encoded alpha (supersampling coverage) values.
+ Sparseness allows us to independently compose several paths into the
+ same SkAlphaRuns buffer.
+*/
+
+class SkAlphaRuns {
+public:
+ int16_t* fRuns;
+ uint8_t* fAlpha;
+
+ /// Returns true if the scanline contains only a single run,
+ /// of alpha value 0.
+ bool empty() const {
+ SkASSERT(fRuns[0] > 0);
+ return fAlpha[0] == 0 && fRuns[fRuns[0]] == 0;
+ }
+
+ /// Reinitialize for a new scanline.
+ void reset(int width);
+
+ /**
+ * Insert into the buffer a run starting at (x-offsetX):
+ * if startAlpha > 0
+ * one pixel with value += startAlpha,
+ * max 255
+ * if middleCount > 0
+ * middleCount pixels with value += maxValue
+ * if stopAlpha > 0
+ * one pixel with value += stopAlpha
+ * Returns the offsetX value that should be passed on the next call,
+ * assuming we're on the same scanline. If the caller is switching
+ * scanlines, then offsetX should be 0 when this is called.
+ */
+ SK_ALWAYS_INLINE int add(int x, U8CPU startAlpha, int middleCount, U8CPU stopAlpha,
+ U8CPU maxValue, int offsetX) {
+ SkASSERT(middleCount >= 0);
+ SkASSERT(x >= 0 && x + (startAlpha != 0) + middleCount + (stopAlpha != 0) <= fWidth);
+
+ SkASSERT(fRuns[offsetX] >= 0);
+
+ int16_t* runs = fRuns + offsetX;
+ uint8_t* alpha = fAlpha + offsetX;
+ uint8_t* lastAlpha = alpha;
+ x -= offsetX;
+
+ if (startAlpha) {
+ SkAlphaRuns::Break(runs, alpha, x, 1);
+ /* I should be able to just add alpha[x] + startAlpha.
+ However, if the trailing edge of the previous span and the leading
+ edge of the current span round to the same super-sampled x value,
+ I might overflow to 256 with this add, hence the funny subtract (crud).
+ */
+ unsigned tmp = alpha[x] + startAlpha;
+ SkASSERT(tmp <= 256);
+ alpha[x] = SkToU8(tmp - (tmp >> 8)); // was (tmp >> 7), but that seems wrong if we're trying to catch 256
+
+ runs += x + 1;
+ alpha += x + 1;
+ x = 0;
+ SkDEBUGCODE(this->validate();)
+ }
+
+ if (middleCount) {
+ SkAlphaRuns::Break(runs, alpha, x, middleCount);
+ alpha += x;
+ runs += x;
+ x = 0;
+ do {
+ alpha[0] = SkToU8(alpha[0] + maxValue);
+ int n = runs[0];
+ SkASSERT(n <= middleCount);
+ alpha += n;
+ runs += n;
+ middleCount -= n;
+ } while (middleCount > 0);
+ SkDEBUGCODE(this->validate();)
+ lastAlpha = alpha;
+ }
+
+ if (stopAlpha) {
+ SkAlphaRuns::Break(runs, alpha, x, 1);
+ alpha += x;
+ alpha[0] = SkToU8(alpha[0] + stopAlpha);
+ SkDEBUGCODE(this->validate();)
+ lastAlpha = alpha;
+ }
+
+ return SkToS32(lastAlpha - fAlpha); // new offsetX
+ }
+
+ SkDEBUGCODE(void assertValid(int y, int maxStep) const;)
+ SkDEBUGCODE(void dump() const;)
+
+ /**
+ * Break the runs in the buffer at offsets x and x+count, properly
+ * updating the runs to the right and left.
+ * i.e. from the state AAAABBBB, run-length encoded as A4B4,
+ * Break(..., 2, 5) would produce AAAABBBB rle as A2A2B3B1.
+ * Allows add() to sum another run to some of the new sub-runs.
+ * i.e. adding ..CCCCC. would produce AADDEEEB, rle as A2D2E3B1.
+ */
+ static void Break(int16_t runs[], uint8_t alpha[], int x, int count) {
+ SkASSERT(count > 0 && x >= 0);
+
+ // SkAlphaRuns::BreakAt(runs, alpha, x);
+ // SkAlphaRuns::BreakAt(&runs[x], &alpha[x], count);
+
+ int16_t* next_runs = runs + x;
+ uint8_t* next_alpha = alpha + x;
+
+ while (x > 0) {
+ int n = runs[0];
+ SkASSERT(n > 0);
+
+ if (x < n) {
+ alpha[x] = alpha[0];
+ runs[0] = SkToS16(x);
+ runs[x] = SkToS16(n - x);
+ break;
+ }
+ runs += n;
+ alpha += n;
+ x -= n;
+ }
+
+ runs = next_runs;
+ alpha = next_alpha;
+ x = count;
+
+ for (;;) {
+ int n = runs[0];
+ SkASSERT(n > 0);
+
+ if (x < n) {
+ alpha[x] = alpha[0];
+ runs[0] = SkToS16(x);
+ runs[x] = SkToS16(n - x);
+ break;
+ }
+ x -= n;
+ if (x <= 0) {
+ break;
+ }
+ runs += n;
+ alpha += n;
+ }
+ }
+
+ /**
+ * Cut (at offset x in the buffer) a run into two shorter runs with
+ * matching alpha values.
+ * Used by the RectClipBlitter to trim a RLE encoding to match the
+ * clipping rectangle.
+ */
+ static void BreakAt(int16_t runs[], uint8_t alpha[], int x) {
+ while (x > 0) {
+ int n = runs[0];
+ SkASSERT(n > 0);
+
+ if (x < n) {
+ alpha[x] = alpha[0];
+ runs[0] = SkToS16(x);
+ runs[x] = SkToS16(n - x);
+ break;
+ }
+ runs += n;
+ alpha += n;
+ x -= n;
+ }
+ }
+
+private:
+ SkDEBUGCODE(int fWidth;)
+ SkDEBUGCODE(void validate() const;)
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkAutoKern.h b/gfx/skia/skia/src/core/SkAutoKern.h
new file mode 100644
index 000000000..8b032519b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAutoKern.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkAutoKern_DEFINED
+#define SkAutoKern_DEFINED
+
+#include "SkGlyph.h"
+
+#define SkAutoKern_Adjust(prev, next) SkIntToScalar(((next) - (prev) + 32) >> 6)
+
+/* this is a helper class to perform auto-kerning
+ * the adjust() method returns a SkScalar corresponding
+ * to a +1/0/-1 pixel adjustment
+ */
+
+class SkAutoKern {
+public:
+ SkAutoKern() : fPrevRsbDelta(0) {}
+
+ SkScalar adjust(const SkGlyph& glyph)
+ {
+// if (SkAbs32(glyph.fLsbDelta) > 47 || SkAbs32(glyph.fRsbDelta) > 47)
+// printf("------- %d> L %d R %d\n", glyph.f_GlyphID, glyph.fLsbDelta, glyph.fRsbDelta);
+
+#if 0
+ int distort = fPrevRsbDelta - glyph.fLsbDelta;
+
+ fPrevRsbDelta = glyph.fRsbDelta;
+
+ if (distort >= 32)
+ return -SK_Scalar1;
+ else if (distort < -32)
+ return +SK_Scalar1;
+ else
+ return 0;
+#else
+ SkScalar adjust = SkAutoKern_Adjust(fPrevRsbDelta, glyph.fLsbDelta);
+ fPrevRsbDelta = glyph.fRsbDelta;
+ return adjust;
+#endif
+ }
+private:
+ int fPrevRsbDelta;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkAutoPixmapStorage.cpp b/gfx/skia/skia/src/core/SkAutoPixmapStorage.cpp
new file mode 100644
index 000000000..865b3a602
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAutoPixmapStorage.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAutoPixmapStorage.h"
+#include "SkData.h"
+
+SkAutoPixmapStorage::SkAutoPixmapStorage() : fStorage(nullptr) {}
+
+SkAutoPixmapStorage::~SkAutoPixmapStorage() {
+ this->freeStorage();
+}
+
+size_t SkAutoPixmapStorage::AllocSize(const SkImageInfo& info, size_t* rowBytes) {
+ size_t rb = info.minRowBytes();
+ if (rowBytes) {
+ *rowBytes = rb;
+ }
+ return info.getSafeSize(rb);
+}
+
+bool SkAutoPixmapStorage::tryAlloc(const SkImageInfo& info) {
+ this->freeStorage();
+
+ size_t rb;
+ size_t size = AllocSize(info, &rb);
+ if (0 == size) {
+ return false;
+ }
+ void* pixels = sk_malloc_flags(size, 0);
+ if (nullptr == pixels) {
+ return false;
+ }
+ this->reset(info, pixels, rb);
+ fStorage = pixels;
+ return true;
+}
+
+void SkAutoPixmapStorage::alloc(const SkImageInfo& info) {
+ if (!this->tryAlloc(info)) {
+ sk_throw();
+ }
+}
+
+const SkData* SkAutoPixmapStorage::detachPixelsAsData() {
+ if (!fStorage) {
+ return nullptr;
+ }
+
+ auto data = SkData::MakeFromMalloc(fStorage, this->getSafeSize());
+ fStorage = nullptr;
+ this->INHERITED::reset();
+
+ return data.release();
+}
diff --git a/gfx/skia/skia/src/core/SkAutoPixmapStorage.h b/gfx/skia/skia/src/core/SkAutoPixmapStorage.h
new file mode 100644
index 000000000..379bf420b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAutoPixmapStorage.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAutoPixmapStorage_DEFINED
+#define SkAutoPixmapStorage_DEFINED
+
+#include "SkPixmap.h"
+
+class SK_API SkAutoPixmapStorage : public SkPixmap {
+public:
+ SkAutoPixmapStorage();
+ ~SkAutoPixmapStorage();
+
+ /**
+ * Try to allocate memory for the pixels needed to match the specified Info. On success
+ * return true and fill out the pixmap to point to that memory. The storage will be freed
+ * when this object is destroyed, or if another call to tryAlloc() or alloc() is made.
+ *
+ * On failure, return false and reset() the pixmap to empty.
+ */
+ bool tryAlloc(const SkImageInfo&);
+
+ /**
+ * Allocate memory for the pixels needed to match the specified Info and fill out the pixmap
+ * to point to that memory. The storage will be freed when this object is destroyed,
+ * or if another call to tryAlloc() or alloc() is made.
+ *
+ * If the memory cannot be allocated, calls sk_throw().
+ */
+ void alloc(const SkImageInfo&);
+
+ /**
+ * Gets the size and optionally the rowBytes that would be allocated by SkAutoPixmapStorage if
+ * alloc/tryAlloc was called.
+ */
+ static size_t AllocSize(const SkImageInfo& info, size_t* rowBytes);
+
+ /**
+ * Returns an SkData object wrapping the allocated pixels memory, and resets the pixmap.
+ * If the storage hasn't been allocated, the result is NULL.
+ */
+ const SkData* SK_WARN_UNUSED_RESULT detachPixelsAsData();
+
+ // We wrap these so we can clear our internal storage
+
+ void reset() {
+ this->freeStorage();
+ this->INHERITED::reset();
+ }
+ void reset(const SkImageInfo& info, const void* addr, size_t rb, SkColorTable* ctable = NULL) {
+ this->freeStorage();
+ this->INHERITED::reset(info, addr, rb, ctable);
+ }
+ void reset(const SkImageInfo& info) {
+ this->freeStorage();
+ this->INHERITED::reset(info);
+ }
+ bool SK_WARN_UNUSED_RESULT reset(const SkMask& mask) {
+ this->freeStorage();
+ return this->INHERITED::reset(mask);
+ }
+
+private:
+ void* fStorage;
+
+ void freeStorage() {
+ sk_free(fStorage);
+ fStorage = nullptr;
+ }
+
+ typedef SkPixmap INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBBHFactory.cpp b/gfx/skia/skia/src/core/SkBBHFactory.cpp
new file mode 100644
index 000000000..e8534cacb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBBHFactory.cpp
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBBHFactory.h"
+#include "SkRect.h"
+#include "SkRTree.h"
+#include "SkScalar.h"
+
+SkBBoxHierarchy* SkRTreeFactory::operator()(const SkRect& bounds) const {
+ SkScalar aspectRatio = bounds.width() / bounds.height();
+ return new SkRTree(aspectRatio);
+}
diff --git a/gfx/skia/skia/src/core/SkBBoxHierarchy.h b/gfx/skia/skia/src/core/SkBBoxHierarchy.h
new file mode 100644
index 000000000..63e62c2a4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBBoxHierarchy.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBBoxHierarchy_DEFINED
+#define SkBBoxHierarchy_DEFINED
+
+#include "SkRect.h"
+#include "SkRefCnt.h"
+#include "SkTDArray.h"
+
+/**
+ * Interface for a spatial data structure that stores axis-aligned bounding
+ * boxes and allows efficient retrieval of intersections with query rectangles.
+ */
+class SkBBoxHierarchy : public SkRefCnt {
+public:
+ SkBBoxHierarchy() {}
+ virtual ~SkBBoxHierarchy() {}
+
+ /**
+ * Insert N bounding boxes into the hierarchy.
+ */
+ virtual void insert(const SkRect[], int N) = 0;
+
+ /**
+ * Populate results with the indices of bounding boxes interesecting that query.
+ */
+ virtual void search(const SkRect& query, SkTDArray<int>* results) const = 0;
+
+ virtual size_t bytesUsed() const = 0;
+
+ // Get the root bound.
+ virtual SkRect getRootBound() const = 0;
+
+private:
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBigPicture.cpp b/gfx/skia/skia/src/core/SkBigPicture.cpp
new file mode 100644
index 000000000..2a2e438fd
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBigPicture.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBBoxHierarchy.h"
+#include "SkBigPicture.h"
+#include "SkPictureCommon.h"
+#include "SkRecord.h"
+#include "SkRecordDraw.h"
+#include "SkTraceEvent.h"
+
+SkBigPicture::SkBigPicture(const SkRect& cull,
+ SkRecord* record,
+ SnapshotArray* drawablePicts,
+ SkBBoxHierarchy* bbh,
+ size_t approxBytesUsedBySubPictures)
+ : fCullRect(cull)
+ , fApproxBytesUsedBySubPictures(approxBytesUsedBySubPictures)
+ , fRecord(record) // Take ownership of caller's ref.
+ , fDrawablePicts(drawablePicts) // Take ownership.
+ , fBBH(bbh) // Take ownership of caller's ref.
+{}
+
+void SkBigPicture::playback(SkCanvas* canvas, AbortCallback* callback) const {
+ SkASSERT(canvas);
+
+ // If the query contains the whole picture, don't bother with the BBH.
+ SkRect clipBounds = { 0, 0, 0, 0 };
+ (void)canvas->getClipBounds(&clipBounds);
+ const bool useBBH = !clipBounds.contains(this->cullRect());
+
+ SkRecordDraw(*fRecord,
+ canvas,
+ this->drawablePicts(),
+ nullptr,
+ this->drawableCount(),
+ useBBH ? fBBH.get() : nullptr,
+ callback);
+}
+
+void SkBigPicture::partialPlayback(SkCanvas* canvas,
+ int start,
+ int stop,
+ const SkMatrix& initialCTM) const {
+ SkASSERT(canvas);
+ SkRecordPartialDraw(*fRecord,
+ canvas,
+ this->drawablePicts(),
+ this->drawableCount(),
+ start,
+ stop,
+ initialCTM);
+}
+
+const SkBigPicture::Analysis& SkBigPicture::analysis() const {
+ fAnalysisOnce([this] { fAnalysis.init(*fRecord); });
+ return fAnalysis;
+}
+
+SkRect SkBigPicture::cullRect() const { return fCullRect; }
+bool SkBigPicture::willPlayBackBitmaps() const { return this->analysis().fWillPlaybackBitmaps; }
+int SkBigPicture::numSlowPaths() const { return this->analysis().fNumSlowPathsAndDashEffects; }
+int SkBigPicture::approximateOpCount() const { return fRecord->count(); }
+size_t SkBigPicture::approximateBytesUsed() const {
+ size_t bytes = sizeof(*this) + fRecord->bytesUsed() + fApproxBytesUsedBySubPictures;
+ if (fBBH) { bytes += fBBH->bytesUsed(); }
+ return bytes;
+}
+
+int SkBigPicture::drawableCount() const {
+ return fDrawablePicts ? fDrawablePicts->count() : 0;
+}
+
+SkPicture const* const* SkBigPicture::drawablePicts() const {
+ return fDrawablePicts ? fDrawablePicts->begin() : nullptr;
+}
+
+void SkBigPicture::Analysis::init(const SkRecord& record) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkBigPicture::Analysis::init()");
+ SkBitmapHunter bitmap;
+ SkPathCounter path;
+
+ bool hasBitmap = false;
+ for (int i = 0; i < record.count(); i++) {
+ hasBitmap = hasBitmap || record.visit(i, bitmap);
+ record.visit(i, path);
+ }
+
+ fWillPlaybackBitmaps = hasBitmap;
+ fNumSlowPathsAndDashEffects = SkTMin<int>(path.fNumSlowPathsAndDashEffects, 255);
+}
diff --git a/gfx/skia/skia/src/core/SkBigPicture.h b/gfx/skia/skia/src/core/SkBigPicture.h
new file mode 100644
index 000000000..c5dfda95e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBigPicture.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBigPicture_DEFINED
+#define SkBigPicture_DEFINED
+
+#include "SkOnce.h"
+#include "SkPicture.h"
+#include "SkRect.h"
+#include "SkTemplates.h"
+
+class SkBBoxHierarchy;
+class SkMatrix;
+class SkRecord;
+
+// An implementation of SkPicture supporting an arbitrary number of drawing commands.
+class SkBigPicture final : public SkPicture {
+public:
+ // An array of refcounted const SkPicture pointers.
+ class SnapshotArray : ::SkNoncopyable {
+ public:
+ SnapshotArray(const SkPicture* pics[], int count) : fPics(pics), fCount(count) {}
+ ~SnapshotArray() { for (int i = 0; i < fCount; i++) { fPics[i]->unref(); } }
+
+ const SkPicture* const* begin() const { return fPics; }
+ int count() const { return fCount; }
+ private:
+ SkAutoTMalloc<const SkPicture*> fPics;
+ int fCount;
+ };
+
+ SkBigPicture(const SkRect& cull,
+ SkRecord*, // We take ownership of the caller's ref.
+ SnapshotArray*, // We take exclusive ownership.
+ SkBBoxHierarchy*, // We take ownership of the caller's ref.
+ size_t approxBytesUsedBySubPictures);
+
+
+// SkPicture overrides
+ void playback(SkCanvas*, AbortCallback*) const override;
+ SkRect cullRect() const override;
+ bool willPlayBackBitmaps() const override;
+ int approximateOpCount() const override;
+ size_t approximateBytesUsed() const override;
+ const SkBigPicture* asSkBigPicture() const override { return this; }
+
+// Used by GrLayerHoister
+ void partialPlayback(SkCanvas*,
+ int start,
+ int stop,
+ const SkMatrix& initialCTM) const;
+// Used by GrRecordReplaceDraw
+ const SkBBoxHierarchy* bbh() const { return fBBH; }
+ const SkRecord* record() const { return fRecord; }
+
+private:
+ struct Analysis {
+ void init(const SkRecord&);
+
+ bool suitableForGpuRasterization(const char** reason) const;
+
+ uint8_t fNumSlowPathsAndDashEffects;
+ bool fWillPlaybackBitmaps : 1;
+ };
+
+ int numSlowPaths() const override;
+ const Analysis& analysis() const;
+ int drawableCount() const;
+ SkPicture const* const* drawablePicts() const;
+
+ const SkRect fCullRect;
+ const size_t fApproxBytesUsedBySubPictures;
+ mutable SkOnce fAnalysisOnce;
+ mutable Analysis fAnalysis;
+ SkAutoTUnref<const SkRecord> fRecord;
+ SkAutoTDelete<const SnapshotArray> fDrawablePicts;
+ SkAutoTUnref<const SkBBoxHierarchy> fBBH;
+};
+
+#endif//SkBigPicture_DEFINED
diff --git a/gfx/skia/skia/src/core/SkBitmap.cpp b/gfx/skia/skia/src/core/SkBitmap.cpp
new file mode 100644
index 000000000..c62f5f391
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmap.cpp
@@ -0,0 +1,1261 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAtomics.h"
+#include "SkBitmap.h"
+#include "SkColorPriv.h"
+#include "SkConfig8888.h"
+#include "SkData.h"
+#include "SkFilterQuality.h"
+#include "SkMallocPixelRef.h"
+#include "SkMask.h"
+#include "SkMath.h"
+#include "SkPixelRef.h"
+#include "SkReadBuffer.h"
+#include "SkRect.h"
+#include "SkScalar.h"
+#include "SkTemplates.h"
+#include "SkUnPreMultiply.h"
+#include "SkWriteBuffer.h"
+
+#include <string.h>
+
+static bool reset_return_false(SkBitmap* bm) {
+ bm->reset();
+ return false;
+}
+
+SkBitmap::SkBitmap() {
+ sk_bzero(this, sizeof(*this));
+}
+
+SkBitmap::SkBitmap(const SkBitmap& src) {
+ SkDEBUGCODE(src.validate();)
+ sk_bzero(this, sizeof(*this));
+ *this = src;
+ SkDEBUGCODE(this->validate();)
+}
+
+SkBitmap::SkBitmap(SkBitmap&& other) : SkBitmap() { this->swap(other); }
+
+SkBitmap::~SkBitmap() {
+ SkDEBUGCODE(this->validate();)
+ this->freePixels();
+}
+
+SkBitmap& SkBitmap::operator=(const SkBitmap& src) {
+ if (this != &src) {
+ this->freePixels();
+ this->fPixelRef = SkSafeRef(src.fPixelRef);
+ if (this->fPixelRef) {
+ // ignore the values if we have a pixelRef
+ this->fPixels = nullptr;
+ this->fColorTable = nullptr;
+ } else {
+ this->fPixels = src.fPixels;
+ this->fColorTable = src.fColorTable;
+ }
+ // we reset our locks if we get blown away
+ this->fPixelLockCount = 0;
+
+ this->fPixelRefOrigin = src.fPixelRefOrigin;
+ this->fInfo = src.fInfo;
+ this->fRowBytes = src.fRowBytes;
+ this->fFlags = src.fFlags;
+ }
+
+ SkDEBUGCODE(this->validate();)
+ return *this;
+}
+
+SkBitmap& SkBitmap::operator=(SkBitmap&& other) {
+ if (this != &other) {
+ this->swap(other);
+ other.reset();
+ }
+ return *this;
+}
+
+void SkBitmap::swap(SkBitmap& other) {
+ SkTSwap(fColorTable, other.fColorTable);
+ SkTSwap(fPixelRef, other.fPixelRef);
+ SkTSwap(fPixelRefOrigin, other.fPixelRefOrigin);
+ SkTSwap(fPixelLockCount, other.fPixelLockCount);
+ SkTSwap(fPixels, other.fPixels);
+ SkTSwap(fInfo, other.fInfo);
+ SkTSwap(fRowBytes, other.fRowBytes);
+ SkTSwap(fFlags, other.fFlags);
+
+ SkDEBUGCODE(this->validate();)
+}
+
+void SkBitmap::reset() {
+ this->freePixels();
+ this->fInfo.reset();
+ sk_bzero(this, sizeof(*this));
+}
+
+void SkBitmap::getBounds(SkRect* bounds) const {
+ SkASSERT(bounds);
+ bounds->set(0, 0,
+ SkIntToScalar(fInfo.width()), SkIntToScalar(fInfo.height()));
+}
+
+void SkBitmap::getBounds(SkIRect* bounds) const {
+ SkASSERT(bounds);
+ bounds->set(0, 0, fInfo.width(), fInfo.height());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkBitmap::setInfo(const SkImageInfo& info, size_t rowBytes) {
+ SkAlphaType newAT = info.alphaType();
+ if (!SkColorTypeValidateAlphaType(info.colorType(), info.alphaType(), &newAT)) {
+ return reset_return_false(this);
+ }
+ // don't look at info.alphaType(), since newAT is the real value...
+
+ // require that rowBytes fit in 31bits
+ int64_t mrb = info.minRowBytes64();
+ if ((int32_t)mrb != mrb) {
+ return reset_return_false(this);
+ }
+ if ((int64_t)rowBytes != (int32_t)rowBytes) {
+ return reset_return_false(this);
+ }
+
+ if (info.width() < 0 || info.height() < 0) {
+ return reset_return_false(this);
+ }
+
+ if (kUnknown_SkColorType == info.colorType()) {
+ rowBytes = 0;
+ } else if (0 == rowBytes) {
+ rowBytes = (size_t)mrb;
+ } else if (!info.validRowBytes(rowBytes)) {
+ return reset_return_false(this);
+ }
+
+ this->freePixels();
+
+ fInfo = info.makeAlphaType(newAT);
+ fRowBytes = SkToU32(rowBytes);
+ return true;
+}
+
+bool SkBitmap::setAlphaType(SkAlphaType newAlphaType) {
+ if (!SkColorTypeValidateAlphaType(fInfo.colorType(), newAlphaType, &newAlphaType)) {
+ return false;
+ }
+ if (fInfo.alphaType() != newAlphaType) {
+ fInfo = fInfo.makeAlphaType(newAlphaType);
+ if (fPixelRef) {
+ fPixelRef->changeAlphaType(newAlphaType);
+ }
+ }
+ return true;
+}
+
+void SkBitmap::updatePixelsFromRef() const {
+ if (fPixelRef) {
+ if (fPixelLockCount > 0) {
+ SkASSERT(fPixelRef->isLocked());
+
+ void* p = fPixelRef->pixels();
+ if (p) {
+ p = (char*)p
+ + fPixelRefOrigin.fY * fRowBytes
+ + fPixelRefOrigin.fX * fInfo.bytesPerPixel();
+ }
+ fPixels = p;
+ fColorTable = fPixelRef->colorTable();
+ } else {
+ SkASSERT(0 == fPixelLockCount);
+ fPixels = nullptr;
+ fColorTable = nullptr;
+ }
+ }
+}
+
+SkPixelRef* SkBitmap::setPixelRef(SkPixelRef* pr, int dx, int dy) {
+#ifdef SK_DEBUG
+ if (pr) {
+ if (kUnknown_SkColorType != fInfo.colorType()) {
+ const SkImageInfo& prInfo = pr->info();
+ SkASSERT(fInfo.width() <= prInfo.width());
+ SkASSERT(fInfo.height() <= prInfo.height());
+ SkASSERT(fInfo.colorType() == prInfo.colorType());
+ switch (prInfo.alphaType()) {
+ case kUnknown_SkAlphaType:
+ SkASSERT(fInfo.alphaType() == kUnknown_SkAlphaType);
+ break;
+ case kOpaque_SkAlphaType:
+ case kPremul_SkAlphaType:
+ SkASSERT(fInfo.alphaType() == kOpaque_SkAlphaType ||
+ fInfo.alphaType() == kPremul_SkAlphaType);
+ break;
+ case kUnpremul_SkAlphaType:
+ SkASSERT(fInfo.alphaType() == kOpaque_SkAlphaType ||
+ fInfo.alphaType() == kUnpremul_SkAlphaType);
+ break;
+ }
+ }
+ }
+#endif
+
+ if (pr) {
+ const SkImageInfo& info = pr->info();
+ fPixelRefOrigin.set(SkTPin(dx, 0, info.width()), SkTPin(dy, 0, info.height()));
+ } else {
+ // ignore dx,dy if there is no pixelref
+ fPixelRefOrigin.setZero();
+ }
+
+ if (fPixelRef != pr) {
+ this->freePixels();
+ SkASSERT(nullptr == fPixelRef);
+
+ SkSafeRef(pr);
+ fPixelRef = pr;
+ this->updatePixelsFromRef();
+ }
+
+ SkDEBUGCODE(this->validate();)
+ return pr;
+}
+
+void SkBitmap::lockPixels() const {
+ if (fPixelRef && 0 == sk_atomic_inc(&fPixelLockCount)) {
+ fPixelRef->lockPixels();
+ this->updatePixelsFromRef();
+ }
+ SkDEBUGCODE(this->validate();)
+}
+
+void SkBitmap::unlockPixels() const {
+ SkASSERT(nullptr == fPixelRef || fPixelLockCount > 0);
+
+ if (fPixelRef && 1 == sk_atomic_dec(&fPixelLockCount)) {
+ fPixelRef->unlockPixels();
+ this->updatePixelsFromRef();
+ }
+ SkDEBUGCODE(this->validate();)
+}
+
+bool SkBitmap::lockPixelsAreWritable() const {
+ return (fPixelRef) ? fPixelRef->lockPixelsAreWritable() : false;
+}
+
+void SkBitmap::setPixels(void* p, SkColorTable* ctable) {
+ if (nullptr == p) {
+ this->setPixelRef(nullptr);
+ return;
+ }
+
+ if (kUnknown_SkColorType == fInfo.colorType()) {
+ this->setPixelRef(nullptr);
+ return;
+ }
+
+ SkPixelRef* pr = SkMallocPixelRef::NewDirect(fInfo, p, fRowBytes, ctable);
+ if (nullptr == pr) {
+ this->setPixelRef(nullptr);
+ return;
+ }
+
+ this->setPixelRef(pr)->unref();
+
+ // since we're already allocated, we lockPixels right away
+ this->lockPixels();
+ SkDEBUGCODE(this->validate();)
+}
+
+bool SkBitmap::tryAllocPixels(Allocator* allocator, SkColorTable* ctable) {
+ HeapAllocator stdalloc;
+
+ if (nullptr == allocator) {
+ allocator = &stdalloc;
+ }
+ return allocator->allocPixelRef(this, ctable);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkBitmap::tryAllocPixels(const SkImageInfo& requestedInfo, size_t rowBytes) {
+ if (kIndex_8_SkColorType == requestedInfo.colorType()) {
+ return reset_return_false(this);
+ }
+ if (!this->setInfo(requestedInfo, rowBytes)) {
+ return reset_return_false(this);
+ }
+
+ // setInfo may have corrected info (e.g. 565 is always opaque).
+ const SkImageInfo& correctedInfo = this->info();
+ // setInfo may have computed a valid rowbytes if 0 were passed in
+ rowBytes = this->rowBytes();
+
+ SkMallocPixelRef::PRFactory defaultFactory;
+
+ SkPixelRef* pr = defaultFactory.create(correctedInfo, rowBytes, nullptr);
+ if (nullptr == pr) {
+ return reset_return_false(this);
+ }
+ this->setPixelRef(pr)->unref();
+
+ // TODO: lockPixels could/should return bool or void*/nullptr
+ this->lockPixels();
+ if (nullptr == this->getPixels()) {
+ return reset_return_false(this);
+ }
+ return true;
+}
+
+bool SkBitmap::tryAllocPixels(const SkImageInfo& requestedInfo, SkPixelRefFactory* factory,
+ SkColorTable* ctable) {
+ if (kIndex_8_SkColorType == requestedInfo.colorType() && nullptr == ctable) {
+ return reset_return_false(this);
+ }
+ if (!this->setInfo(requestedInfo)) {
+ return reset_return_false(this);
+ }
+
+ // setInfo may have corrected info (e.g. 565 is always opaque).
+ const SkImageInfo& correctedInfo = this->info();
+
+ SkMallocPixelRef::PRFactory defaultFactory;
+ if (nullptr == factory) {
+ factory = &defaultFactory;
+ }
+
+ SkPixelRef* pr = factory->create(correctedInfo, correctedInfo.minRowBytes(), ctable);
+ if (nullptr == pr) {
+ return reset_return_false(this);
+ }
+ this->setPixelRef(pr)->unref();
+
+ // TODO: lockPixels could/should return bool or void*/nullptr
+ this->lockPixels();
+ if (nullptr == this->getPixels()) {
+ return reset_return_false(this);
+ }
+ return true;
+}
+
+static void invoke_release_proc(void (*proc)(void* pixels, void* ctx), void* pixels, void* ctx) {
+ if (proc) {
+ proc(pixels, ctx);
+ }
+}
+
+bool SkBitmap::installPixels(const SkImageInfo& requestedInfo, void* pixels, size_t rb,
+ SkColorTable* ct, void (*releaseProc)(void* addr, void* context),
+ void* context) {
+ if (!this->setInfo(requestedInfo, rb)) {
+ invoke_release_proc(releaseProc, pixels, context);
+ this->reset();
+ return false;
+ }
+ if (nullptr == pixels) {
+ invoke_release_proc(releaseProc, pixels, context);
+ return true; // we behaved as if they called setInfo()
+ }
+
+ // setInfo may have corrected info (e.g. 565 is always opaque).
+ const SkImageInfo& correctedInfo = this->info();
+
+ SkPixelRef* pr = SkMallocPixelRef::NewWithProc(correctedInfo, rb, ct, pixels, releaseProc,
+ context);
+ if (!pr) {
+ this->reset();
+ return false;
+ }
+
+ this->setPixelRef(pr)->unref();
+
+ // since we're already allocated, we lockPixels right away
+ this->lockPixels();
+ SkDEBUGCODE(this->validate();)
+ return true;
+}
+
+bool SkBitmap::installPixels(const SkPixmap& pixmap) {
+ return this->installPixels(pixmap.info(), pixmap.writable_addr(),
+ pixmap.rowBytes(), pixmap.ctable(),
+ nullptr, nullptr);
+}
+
+bool SkBitmap::installMaskPixels(const SkMask& mask) {
+ if (SkMask::kA8_Format != mask.fFormat) {
+ this->reset();
+ return false;
+ }
+ return this->installPixels(SkImageInfo::MakeA8(mask.fBounds.width(),
+ mask.fBounds.height()),
+ mask.fImage, mask.fRowBytes);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkBitmap::freePixels() {
+ if (fPixelRef) {
+ if (fPixelLockCount > 0) {
+ fPixelRef->unlockPixels();
+ }
+ fPixelRef->unref();
+ fPixelRef = nullptr;
+ fPixelRefOrigin.setZero();
+ }
+ fPixelLockCount = 0;
+ fPixels = nullptr;
+ fColorTable = nullptr;
+}
+
+uint32_t SkBitmap::getGenerationID() const {
+ return (fPixelRef) ? fPixelRef->getGenerationID() : 0;
+}
+
+void SkBitmap::notifyPixelsChanged() const {
+ SkASSERT(!this->isImmutable());
+ if (fPixelRef) {
+ fPixelRef->notifyPixelsChanged();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** We explicitly use the same allocator for our pixels that SkMask does,
+ so that we can freely assign memory allocated by one class to the other.
+ */
+bool SkBitmap::HeapAllocator::allocPixelRef(SkBitmap* dst,
+ SkColorTable* ctable) {
+ const SkImageInfo info = dst->info();
+ if (kUnknown_SkColorType == info.colorType()) {
+// SkDebugf("unsupported config for info %d\n", dst->config());
+ return false;
+ }
+
+ SkPixelRef* pr = SkMallocPixelRef::NewAllocate(info, dst->rowBytes(), ctable);
+ if (nullptr == pr) {
+ return false;
+ }
+
+ dst->setPixelRef(pr)->unref();
+ // since we're already allocated, we lockPixels right away
+ dst->lockPixels();
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool copy_pixels_to(const SkPixmap& src, void* const dst, size_t dstSize,
+ size_t dstRowBytes, bool preserveDstPad) {
+ const SkImageInfo& info = src.info();
+
+ if (0 == dstRowBytes) {
+ dstRowBytes = src.rowBytes();
+ }
+ if (dstRowBytes < info.minRowBytes()) {
+ return false;
+ }
+
+ if (!preserveDstPad && static_cast<uint32_t>(dstRowBytes) == src.rowBytes()) {
+ size_t safeSize = src.getSafeSize();
+ if (safeSize > dstSize || safeSize == 0)
+ return false;
+ else {
+ // This implementation will write bytes beyond the end of each row,
+ // excluding the last row, if the bitmap's stride is greater than
+ // strictly required by the current config.
+ memcpy(dst, src.addr(), safeSize);
+ return true;
+ }
+ } else {
+ // If destination has different stride than us, then copy line by line.
+ if (info.getSafeSize(dstRowBytes) > dstSize) {
+ return false;
+ } else {
+ // Just copy what we need on each line.
+ size_t rowBytes = info.minRowBytes();
+ const uint8_t* srcP = reinterpret_cast<const uint8_t*>(src.addr());
+ uint8_t* dstP = reinterpret_cast<uint8_t*>(dst);
+ for (int row = 0; row < info.height(); ++row) {
+ memcpy(dstP, srcP, rowBytes);
+ srcP += src.rowBytes();
+ dstP += dstRowBytes;
+ }
+
+ return true;
+ }
+ }
+}
+
+bool SkBitmap::copyPixelsTo(void* dst, size_t dstSize, size_t dstRB, bool preserveDstPad) const {
+ if (nullptr == dst) {
+ return false;
+ }
+ SkAutoPixmapUnlock result;
+ if (!this->requestLock(&result)) {
+ return false;
+ }
+ return copy_pixels_to(result.pixmap(), dst, dstSize, dstRB, preserveDstPad);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkBitmap::isImmutable() const {
+ return fPixelRef ? fPixelRef->isImmutable() : false;
+}
+
+void SkBitmap::setImmutable() {
+ if (fPixelRef) {
+ fPixelRef->setImmutable();
+ }
+}
+
+bool SkBitmap::isVolatile() const {
+ return (fFlags & kImageIsVolatile_Flag) != 0;
+}
+
+void SkBitmap::setIsVolatile(bool isVolatile) {
+ if (isVolatile) {
+ fFlags |= kImageIsVolatile_Flag;
+ } else {
+ fFlags &= ~kImageIsVolatile_Flag;
+ }
+}
+
+void* SkBitmap::getAddr(int x, int y) const {
+ SkASSERT((unsigned)x < (unsigned)this->width());
+ SkASSERT((unsigned)y < (unsigned)this->height());
+
+ char* base = (char*)this->getPixels();
+ if (base) {
+ base += y * this->rowBytes();
+ switch (this->colorType()) {
+ case kRGBA_F16_SkColorType:
+ base += x << 3;
+ break;
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ base += x << 2;
+ break;
+ case kARGB_4444_SkColorType:
+ case kRGB_565_SkColorType:
+ base += x << 1;
+ break;
+ case kAlpha_8_SkColorType:
+ case kIndex_8_SkColorType:
+ case kGray_8_SkColorType:
+ base += x;
+ break;
+ default:
+ SkDEBUGFAIL("Can't return addr for config");
+ base = nullptr;
+ break;
+ }
+ }
+ return base;
+}
+
+#include "SkHalf.h"
+
+SkColor SkBitmap::getColor(int x, int y) const {
+ SkASSERT((unsigned)x < (unsigned)this->width());
+ SkASSERT((unsigned)y < (unsigned)this->height());
+
+ switch (this->colorType()) {
+ case kGray_8_SkColorType: {
+ uint8_t* addr = this->getAddr8(x, y);
+ return SkColorSetRGB(*addr, *addr, *addr);
+ }
+ case kAlpha_8_SkColorType: {
+ uint8_t* addr = this->getAddr8(x, y);
+ return SkColorSetA(0, addr[0]);
+ }
+ case kIndex_8_SkColorType: {
+ SkPMColor c = this->getIndex8Color(x, y);
+ return SkUnPreMultiply::PMColorToColor(c);
+ }
+ case kRGB_565_SkColorType: {
+ uint16_t* addr = this->getAddr16(x, y);
+ return SkPixel16ToColor(addr[0]);
+ }
+ case kARGB_4444_SkColorType: {
+ uint16_t* addr = this->getAddr16(x, y);
+ SkPMColor c = SkPixel4444ToPixel32(addr[0]);
+ return SkUnPreMultiply::PMColorToColor(c);
+ }
+ case kBGRA_8888_SkColorType: {
+ uint32_t* addr = this->getAddr32(x, y);
+ SkPMColor c = SkSwizzle_BGRA_to_PMColor(addr[0]);
+ return SkUnPreMultiply::PMColorToColor(c);
+ }
+ case kRGBA_8888_SkColorType: {
+ uint32_t* addr = this->getAddr32(x, y);
+ SkPMColor c = SkSwizzle_RGBA_to_PMColor(addr[0]);
+ return SkUnPreMultiply::PMColorToColor(c);
+ }
+ case kRGBA_F16_SkColorType: {
+ const uint64_t* addr = (const uint64_t*)fPixels + y * (fRowBytes >> 3) + x;
+ Sk4f p4 = SkHalfToFloat_finite_ftz(addr[0]);
+ if (p4[3]) {
+ float inva = 1 / p4[3];
+ p4 = p4 * Sk4f(inva, inva, inva, 1);
+ }
+ SkColor c;
+ SkNx_cast<uint8_t>(p4 * Sk4f(255) + Sk4f(0.5f)).store(&c);
+ // p4 is RGBA, but we want BGRA, so we need to swap next
+ return SkSwizzle_RB(c);
+ }
+ default:
+ SkASSERT(false);
+ return 0;
+ }
+ SkASSERT(false); // Not reached.
+ return 0;
+}
+
+static bool compute_is_opaque(const SkPixmap& pmap) {
+ const int height = pmap.height();
+ const int width = pmap.width();
+
+ switch (pmap.colorType()) {
+ case kAlpha_8_SkColorType: {
+ unsigned a = 0xFF;
+ for (int y = 0; y < height; ++y) {
+ const uint8_t* row = pmap.addr8(0, y);
+ for (int x = 0; x < width; ++x) {
+ a &= row[x];
+ }
+ if (0xFF != a) {
+ return false;
+ }
+ }
+ return true;
+ } break;
+ case kIndex_8_SkColorType: {
+ const SkColorTable* ctable = pmap.ctable();
+ if (nullptr == ctable) {
+ return false;
+ }
+ const SkPMColor* table = ctable->readColors();
+ SkPMColor c = (SkPMColor)~0;
+ for (int i = ctable->count() - 1; i >= 0; --i) {
+ c &= table[i];
+ }
+ return 0xFF == SkGetPackedA32(c);
+ } break;
+ case kRGB_565_SkColorType:
+ case kGray_8_SkColorType:
+ return true;
+ break;
+ case kARGB_4444_SkColorType: {
+ unsigned c = 0xFFFF;
+ for (int y = 0; y < height; ++y) {
+ const SkPMColor16* row = pmap.addr16(0, y);
+ for (int x = 0; x < width; ++x) {
+ c &= row[x];
+ }
+ if (0xF != SkGetPackedA4444(c)) {
+ return false;
+ }
+ }
+ return true;
+ } break;
+ case kBGRA_8888_SkColorType:
+ case kRGBA_8888_SkColorType: {
+ SkPMColor c = (SkPMColor)~0;
+ for (int y = 0; y < height; ++y) {
+ const SkPMColor* row = pmap.addr32(0, y);
+ for (int x = 0; x < width; ++x) {
+ c &= row[x];
+ }
+ if (0xFF != SkGetPackedA32(c)) {
+ return false;
+ }
+ }
+ return true;
+ }
+ default:
+ break;
+ }
+ return false;
+}
+
+bool SkBitmap::ComputeIsOpaque(const SkBitmap& bm) {
+ SkAutoPixmapUnlock result;
+ if (!bm.requestLock(&result)) {
+ return false;
+ }
+ return compute_is_opaque(result.pixmap());
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+void SkBitmap::erase(SkColor c, const SkIRect& area) const {
+ SkDEBUGCODE(this->validate();)
+
+ switch (fInfo.colorType()) {
+ case kUnknown_SkColorType:
+ case kIndex_8_SkColorType:
+ // TODO: can we ASSERT that we never get here?
+ return; // can't erase. Should we bzero so the memory is not uninitialized?
+ default:
+ break;
+ }
+
+ SkAutoPixmapUnlock result;
+ if (!this->requestLock(&result)) {
+ return;
+ }
+
+ if (result.pixmap().erase(c, area)) {
+ this->notifyPixelsChanged();
+ }
+}
+
+void SkBitmap::eraseColor(SkColor c) const {
+ this->erase(c, SkIRect::MakeWH(this->width(), this->height()));
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////////////////
+
+bool SkBitmap::extractSubset(SkBitmap* result, const SkIRect& subset) const {
+ SkDEBUGCODE(this->validate();)
+
+ if (nullptr == result || nullptr == fPixelRef) {
+ return false; // no src pixels
+ }
+
+ SkIRect srcRect, r;
+ srcRect.set(0, 0, this->width(), this->height());
+ if (!r.intersect(srcRect, subset)) {
+ return false; // r is empty (i.e. no intersection)
+ }
+
+ // If the upper left of the rectangle was outside the bounds of this SkBitmap, we should have
+ // exited above.
+ SkASSERT(static_cast<unsigned>(r.fLeft) < static_cast<unsigned>(this->width()));
+ SkASSERT(static_cast<unsigned>(r.fTop) < static_cast<unsigned>(this->height()));
+
+ SkBitmap dst;
+ dst.setInfo(this->info().makeWH(r.width(), r.height()), this->rowBytes());
+ dst.setIsVolatile(this->isVolatile());
+
+ if (fPixelRef) {
+ SkIPoint origin = fPixelRefOrigin;
+ origin.fX += r.fLeft;
+ origin.fY += r.fTop;
+ // share the pixelref with a custom offset
+ dst.setPixelRef(fPixelRef, origin);
+ }
+ SkDEBUGCODE(dst.validate();)
+
+ // we know we're good, so commit to result
+ result->swap(dst);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkBitmap::canCopyTo(SkColorType dstColorType) const {
+ const SkColorType srcCT = this->colorType();
+
+ if (srcCT == kUnknown_SkColorType) {
+ return false;
+ }
+
+ bool sameConfigs = (srcCT == dstColorType);
+ switch (dstColorType) {
+ case kAlpha_8_SkColorType:
+ case kRGB_565_SkColorType:
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ break;
+ case kIndex_8_SkColorType:
+ if (!sameConfigs) {
+ return false;
+ }
+ break;
+ case kARGB_4444_SkColorType:
+ return sameConfigs || kN32_SkColorType == srcCT || kIndex_8_SkColorType == srcCT;
+ case kGray_8_SkColorType:
+ switch (srcCT) {
+ case kGray_8_SkColorType:
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ return true;
+ default:
+ break;
+ }
+ return false;
+ default:
+ return false;
+ }
+ return true;
+}
+
+bool SkBitmap::readPixels(const SkImageInfo& requestedDstInfo, void* dstPixels, size_t dstRB,
+ int x, int y) const {
+ SkAutoPixmapUnlock src;
+ if (!this->requestLock(&src)) {
+ return false;
+ }
+ return src.pixmap().readPixels(requestedDstInfo, dstPixels, dstRB, x, y);
+}
+
+bool SkBitmap::copyTo(SkBitmap* dst, SkColorType dstColorType, Allocator* alloc) const {
+ if (!this->canCopyTo(dstColorType)) {
+ return false;
+ }
+
+ // if we have a texture, first get those pixels
+ SkBitmap tmpSrc;
+ const SkBitmap* src = this;
+
+ if (fPixelRef) {
+ SkIRect subset;
+ subset.setXYWH(fPixelRefOrigin.fX, fPixelRefOrigin.fY,
+ fInfo.width(), fInfo.height());
+ if (fPixelRef->readPixels(&tmpSrc, dstColorType, &subset)) {
+ if (fPixelRef->info().alphaType() == kUnpremul_SkAlphaType) {
+ // FIXME: The only meaningful implementation of readPixels
+ // (GrPixelRef) assumes premultiplied pixels.
+ return false;
+ }
+ SkASSERT(tmpSrc.width() == this->width());
+ SkASSERT(tmpSrc.height() == this->height());
+
+ // did we get lucky and we can just return tmpSrc?
+ if (tmpSrc.colorType() == dstColorType && nullptr == alloc) {
+ dst->swap(tmpSrc);
+ // If the result is an exact copy, clone the gen ID.
+ if (dst->pixelRef() && dst->pixelRef()->info() == fPixelRef->info()) {
+ dst->pixelRef()->cloneGenID(*fPixelRef);
+ }
+ return true;
+ }
+
+ // fall through to the raster case
+ src = &tmpSrc;
+ }
+ }
+
+ SkAutoPixmapUnlock srcUnlocker;
+ if (!src->requestLock(&srcUnlocker)) {
+ return false;
+ }
+ const SkPixmap& srcPM = srcUnlocker.pixmap();
+
+ const SkImageInfo dstInfo = srcPM.info().makeColorType(dstColorType);
+ SkBitmap tmpDst;
+ if (!tmpDst.setInfo(dstInfo)) {
+ return false;
+ }
+
+ // allocate colortable if srcConfig == kIndex8_Config
+ SkAutoTUnref<SkColorTable> ctable;
+ if (dstColorType == kIndex_8_SkColorType) {
+ ctable.reset(SkRef(srcPM.ctable()));
+ }
+ if (!tmpDst.tryAllocPixels(alloc, ctable)) {
+ return false;
+ }
+
+ SkAutoPixmapUnlock dstUnlocker;
+ if (!tmpDst.requestLock(&dstUnlocker)) {
+ return false;
+ }
+
+ if (!srcPM.readPixels(dstUnlocker.pixmap())) {
+ return false;
+ }
+
+ // (for BitmapHeap) Clone the pixelref genID even though we have a new pixelref.
+ // The old copyTo impl did this, so we continue it for now.
+ //
+ // TODO: should we ignore rowbytes (i.e. getSize)? Then it could just be
+ // if (src_pixelref->info == dst_pixelref->info)
+ //
+ if (srcPM.colorType() == dstColorType && tmpDst.getSize() == srcPM.getSize64()) {
+ SkPixelRef* dstPixelRef = tmpDst.pixelRef();
+ if (dstPixelRef->info() == fPixelRef->info()) {
+ dstPixelRef->cloneGenID(*fPixelRef);
+ }
+ }
+
+ dst->swap(tmpDst);
+ return true;
+}
+
+// TODO: can we merge this with copyTo?
+bool SkBitmap::deepCopyTo(SkBitmap* dst) const {
+ const SkColorType dstCT = this->colorType();
+
+ if (!this->canCopyTo(dstCT)) {
+ return false;
+ }
+ return this->copyTo(dst, dstCT, nullptr);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool GetBitmapAlpha(const SkBitmap& src, uint8_t* SK_RESTRICT alpha, int alphaRowBytes) {
+ SkASSERT(alpha != nullptr);
+ SkASSERT(alphaRowBytes >= src.width());
+
+ SkAutoPixmapUnlock apl;
+ if (!src.requestLock(&apl)) {
+ for (int y = 0; y < src.height(); ++y) {
+ memset(alpha, 0, src.width());
+ alpha += alphaRowBytes;
+ }
+ return false;
+ }
+ const SkPixmap& pmap = apl.pixmap();
+ SkPixelInfo::CopyPixels(SkImageInfo::MakeA8(pmap.width(), pmap.height()), alpha, alphaRowBytes,
+ pmap.info(), pmap.addr(), pmap.rowBytes(), pmap.ctable());
+ return true;
+}
+
+#include "SkPaint.h"
+#include "SkMaskFilter.h"
+#include "SkMatrix.h"
+
+bool SkBitmap::extractAlpha(SkBitmap* dst, const SkPaint* paint,
+ Allocator *allocator, SkIPoint* offset) const {
+ SkDEBUGCODE(this->validate();)
+
+ SkBitmap tmpBitmap;
+ SkMatrix identity;
+ SkMask srcM, dstM;
+
+ srcM.fBounds.set(0, 0, this->width(), this->height());
+ srcM.fRowBytes = SkAlign4(this->width());
+ srcM.fFormat = SkMask::kA8_Format;
+
+ SkMaskFilter* filter = paint ? paint->getMaskFilter() : nullptr;
+
+ // compute our (larger?) dst bounds if we have a filter
+ if (filter) {
+ identity.reset();
+ if (!filter->filterMask(&dstM, srcM, identity, nullptr)) {
+ goto NO_FILTER_CASE;
+ }
+ dstM.fRowBytes = SkAlign4(dstM.fBounds.width());
+ } else {
+ NO_FILTER_CASE:
+ tmpBitmap.setInfo(SkImageInfo::MakeA8(this->width(), this->height()), srcM.fRowBytes);
+ if (!tmpBitmap.tryAllocPixels(allocator, nullptr)) {
+ // Allocation of pixels for alpha bitmap failed.
+ SkDebugf("extractAlpha failed to allocate (%d,%d) alpha bitmap\n",
+ tmpBitmap.width(), tmpBitmap.height());
+ return false;
+ }
+ GetBitmapAlpha(*this, tmpBitmap.getAddr8(0, 0), srcM.fRowBytes);
+ if (offset) {
+ offset->set(0, 0);
+ }
+ tmpBitmap.swap(*dst);
+ return true;
+ }
+ srcM.fImage = SkMask::AllocImage(srcM.computeImageSize());
+ SkAutoMaskFreeImage srcCleanup(srcM.fImage);
+
+ GetBitmapAlpha(*this, srcM.fImage, srcM.fRowBytes);
+ if (!filter->filterMask(&dstM, srcM, identity, nullptr)) {
+ goto NO_FILTER_CASE;
+ }
+ SkAutoMaskFreeImage dstCleanup(dstM.fImage);
+
+ tmpBitmap.setInfo(SkImageInfo::MakeA8(dstM.fBounds.width(), dstM.fBounds.height()),
+ dstM.fRowBytes);
+ if (!tmpBitmap.tryAllocPixels(allocator, nullptr)) {
+ // Allocation of pixels for alpha bitmap failed.
+ SkDebugf("extractAlpha failed to allocate (%d,%d) alpha bitmap\n",
+ tmpBitmap.width(), tmpBitmap.height());
+ return false;
+ }
+ memcpy(tmpBitmap.getPixels(), dstM.fImage, dstM.computeImageSize());
+ if (offset) {
+ offset->set(dstM.fBounds.fLeft, dstM.fBounds.fTop);
+ }
+ SkDEBUGCODE(tmpBitmap.validate();)
+
+ tmpBitmap.swap(*dst);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void write_raw_pixels(SkWriteBuffer* buffer, const SkPixmap& pmap) {
+ const SkImageInfo& info = pmap.info();
+ const size_t snugRB = info.width() * info.bytesPerPixel();
+ const char* src = (const char*)pmap.addr();
+ const size_t ramRB = pmap.rowBytes();
+
+ buffer->write32(SkToU32(snugRB));
+ info.flatten(*buffer);
+
+ const size_t size = snugRB * info.height();
+ SkAutoTMalloc<char> storage(size);
+ char* dst = storage.get();
+ for (int y = 0; y < info.height(); ++y) {
+ memcpy(dst, src, snugRB);
+ dst += snugRB;
+ src += ramRB;
+ }
+ buffer->writeByteArray(storage.get(), size);
+
+ const SkColorTable* ct = pmap.ctable();
+ if (kIndex_8_SkColorType == info.colorType() && ct) {
+ buffer->writeBool(true);
+ ct->writeToBuffer(*buffer);
+ } else {
+ buffer->writeBool(false);
+ }
+}
+
+void SkBitmap::WriteRawPixels(SkWriteBuffer* buffer, const SkBitmap& bitmap) {
+ const SkImageInfo info = bitmap.info();
+ if (0 == info.width() || 0 == info.height() || nullptr == bitmap.pixelRef()) {
+ buffer->writeUInt(0); // instead of snugRB, signaling no pixels
+ return;
+ }
+
+ SkAutoPixmapUnlock result;
+ if (!bitmap.requestLock(&result)) {
+ buffer->writeUInt(0); // instead of snugRB, signaling no pixels
+ return;
+ }
+
+ write_raw_pixels(buffer, result.pixmap());
+}
+
+bool SkBitmap::ReadRawPixels(SkReadBuffer* buffer, SkBitmap* bitmap) {
+ const size_t snugRB = buffer->readUInt();
+ if (0 == snugRB) { // no pixels
+ return false;
+ }
+
+ SkImageInfo info;
+ info.unflatten(*buffer);
+
+ // If there was an error reading "info" or if it is bogus,
+ // don't use it to compute minRowBytes()
+ if (!buffer->validate(SkColorTypeValidateAlphaType(info.colorType(),
+ info.alphaType()))) {
+ return false;
+ }
+
+ const size_t ramRB = info.minRowBytes();
+ const int height = SkMax32(info.height(), 0);
+ const uint64_t snugSize = sk_64_mul(snugRB, height);
+ const uint64_t ramSize = sk_64_mul(ramRB, height);
+ static const uint64_t max_size_t = (size_t)(-1);
+ if (!buffer->validate((snugSize <= ramSize) && (ramSize <= max_size_t))) {
+ return false;
+ }
+
+ sk_sp<SkData> data(SkData::MakeUninitialized(SkToSizeT(ramSize)));
+ unsigned char* dst = (unsigned char*)data->writable_data();
+ buffer->readByteArray(dst, SkToSizeT(snugSize));
+
+ if (snugSize != ramSize) {
+ const unsigned char* srcRow = dst + snugRB * (height - 1);
+ unsigned char* dstRow = dst + ramRB * (height - 1);
+ for (int y = height - 1; y >= 1; --y) {
+ memmove(dstRow, srcRow, snugRB);
+ srcRow -= snugRB;
+ dstRow -= ramRB;
+ }
+ SkASSERT(srcRow == dstRow); // first row does not need to be moved
+ }
+
+ SkAutoTUnref<SkColorTable> ctable;
+ if (buffer->readBool()) {
+ ctable.reset(SkColorTable::Create(*buffer));
+ if (!ctable) {
+ return false;
+ }
+
+ if (info.isEmpty()) {
+ // require an empty ctable
+ if (ctable->count() != 0) {
+ buffer->validate(false);
+ return false;
+ }
+ } else {
+ // require a non-empty ctable
+ if (ctable->count() == 0) {
+ buffer->validate(false);
+ return false;
+ }
+ unsigned char maxIndex = ctable->count() - 1;
+ for (uint64_t i = 0; i < ramSize; ++i) {
+ dst[i] = SkTMin(dst[i], maxIndex);
+ }
+ }
+ }
+
+ SkAutoTUnref<SkPixelRef> pr(SkMallocPixelRef::NewWithData(info, info.minRowBytes(),
+ ctable.get(), data.get()));
+ if (!pr.get()) {
+ return false;
+ }
+ bitmap->setInfo(pr->info());
+ bitmap->setPixelRef(pr, 0, 0);
+ return true;
+}
+
+enum {
+ SERIALIZE_PIXELTYPE_NONE,
+ SERIALIZE_PIXELTYPE_REF_DATA
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkBitmap::RLEPixels::RLEPixels(int width, int height) {
+ fHeight = height;
+ fYPtrs = (uint8_t**)sk_calloc_throw(height * sizeof(uint8_t*));
+}
+
+SkBitmap::RLEPixels::~RLEPixels() {
+ sk_free(fYPtrs);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+void SkBitmap::validate() const {
+ fInfo.validate();
+
+ // ImageInfo may not require this, but Bitmap ensures that opaque-only
+ // colorTypes report opaque for their alphatype
+ if (kRGB_565_SkColorType == fInfo.colorType()) {
+ SkASSERT(kOpaque_SkAlphaType == fInfo.alphaType());
+ }
+
+ SkASSERT(fInfo.validRowBytes(fRowBytes));
+ uint8_t allFlags = kImageIsVolatile_Flag;
+#ifdef SK_BUILD_FOR_ANDROID
+ allFlags |= kHasHardwareMipMap_Flag;
+#endif
+ SkASSERT((~allFlags & fFlags) == 0);
+ SkASSERT(fPixelLockCount >= 0);
+
+ if (fPixels) {
+ SkASSERT(fPixelRef);
+ SkASSERT(fPixelLockCount > 0);
+ SkASSERT(fPixelRef->isLocked());
+ SkASSERT(fPixelRef->rowBytes() == fRowBytes);
+ SkASSERT(fPixelRefOrigin.fX >= 0);
+ SkASSERT(fPixelRefOrigin.fY >= 0);
+ SkASSERT(fPixelRef->info().width() >= (int)this->width() + fPixelRefOrigin.fX);
+ SkASSERT(fPixelRef->info().height() >= (int)this->height() + fPixelRefOrigin.fY);
+ SkASSERT(fPixelRef->rowBytes() >= fInfo.minRowBytes());
+ } else {
+ SkASSERT(nullptr == fColorTable);
+ }
+}
+#endif
+
+#ifndef SK_IGNORE_TO_STRING
+#include "SkString.h"
+void SkBitmap::toString(SkString* str) const {
+
+ static const char* gColorTypeNames[kLastEnum_SkColorType + 1] = {
+ "UNKNOWN", "A8", "565", "4444", "RGBA", "BGRA", "INDEX8",
+ };
+
+ str->appendf("bitmap: ((%d, %d) %s", this->width(), this->height(),
+ gColorTypeNames[this->colorType()]);
+
+ str->append(" (");
+ if (this->isOpaque()) {
+ str->append("opaque");
+ } else {
+ str->append("transparent");
+ }
+ if (this->isImmutable()) {
+ str->append(", immutable");
+ } else {
+ str->append(", not-immutable");
+ }
+ str->append(")");
+
+ SkPixelRef* pr = this->pixelRef();
+ if (nullptr == pr) {
+ // show null or the explicit pixel address (rare)
+ str->appendf(" pixels:%p", this->getPixels());
+ } else {
+ const char* uri = pr->getURI();
+ if (uri) {
+ str->appendf(" uri:\"%s\"", uri);
+ } else {
+ str->appendf(" pixelref:%p", pr);
+ }
+ }
+
+ str->append(")");
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkBitmap::requestLock(SkAutoPixmapUnlock* result) const {
+ SkASSERT(result);
+
+ SkPixelRef* pr = fPixelRef;
+ if (nullptr == pr) {
+ return false;
+ }
+
+ // We have to lock the whole thing (using the pixelref's dimensions) until the api supports
+ // a partial lock (with offset/origin). Hence we can't use our fInfo.
+ SkPixelRef::LockRequest req = { pr->info().dimensions(), kNone_SkFilterQuality };
+ SkPixelRef::LockResult res;
+ if (pr->requestLock(req, &res)) {
+ SkASSERT(res.fPixels);
+ // The bitmap may be a subset of the pixelref's dimensions
+ SkASSERT(fPixelRefOrigin.x() + fInfo.width() <= res.fSize.width());
+ SkASSERT(fPixelRefOrigin.y() + fInfo.height() <= res.fSize.height());
+ const void* addr = (const char*)res.fPixels + SkColorTypeComputeOffset(fInfo.colorType(),
+ fPixelRefOrigin.x(),
+ fPixelRefOrigin.y(),
+ res.fRowBytes);
+
+ result->reset(SkPixmap(this->info(), addr, res.fRowBytes, res.fCTable),
+ res.fUnlockProc, res.fUnlockContext);
+ return true;
+ }
+ return false;
+}
+
+bool SkBitmap::peekPixels(SkPixmap* pmap) const {
+ if (fPixels) {
+ if (pmap) {
+ pmap->reset(fInfo, fPixels, fRowBytes, fColorTable);
+ }
+ return true;
+ }
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+void SkImageInfo::validate() const {
+ SkASSERT(fWidth >= 0);
+ SkASSERT(fHeight >= 0);
+ SkASSERT(SkColorTypeIsValid(fColorType));
+ SkASSERT(SkAlphaTypeIsValid(fAlphaType));
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkBitmapCache.cpp b/gfx/skia/skia/src/core/SkBitmapCache.cpp
new file mode 100644
index 000000000..153a24748
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapCache.cpp
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapCache.h"
+#include "SkImage.h"
+#include "SkResourceCache.h"
+#include "SkMipMap.h"
+#include "SkPixelRef.h"
+#include "SkRect.h"
+
+/**
+ * Use this for bitmapcache and mipmapcache entries.
+ */
+uint64_t SkMakeResourceCacheSharedIDForBitmap(uint32_t bitmapGenID) {
+ uint64_t sharedID = SkSetFourByteTag('b', 'm', 'a', 'p');
+ return (sharedID << 32) | bitmapGenID;
+}
+
+void SkNotifyBitmapGenIDIsStale(uint32_t bitmapGenID) {
+ SkResourceCache::PostPurgeSharedID(SkMakeResourceCacheSharedIDForBitmap(bitmapGenID));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkBitmap::Allocator* SkBitmapCache::GetAllocator() {
+ return SkResourceCache::GetAllocator();
+}
+
+/**
+ This function finds the bounds of the bitmap *within its pixelRef*.
+ If the bitmap lacks a pixelRef, it will return an empty rect, since
+ that doesn't make sense. This may be a useful enough function that
+ it should be somewhere else (in SkBitmap?).
+ */
+static SkIRect get_bounds_from_bitmap(const SkBitmap& bm) {
+ if (!(bm.pixelRef())) {
+ return SkIRect::MakeEmpty();
+ }
+ SkIPoint origin = bm.pixelRefOrigin();
+ return SkIRect::MakeXYWH(origin.fX, origin.fY, bm.width(), bm.height());
+}
+
+/**
+ * This function finds the bounds of the image. Today this is just the entire bounds,
+ * but in the future we may support subsets within an image, in which case this should
+ * return that subset (see get_bounds_from_bitmap).
+ */
+static SkIRect get_bounds_from_image(const SkImage* image) {
+ return SkIRect::MakeWH(image->width(), image->height());
+}
+
+SkBitmapCacheDesc SkBitmapCacheDesc::Make(const SkBitmap& bm, int width, int height) {
+ SkBitmapCacheDesc desc;
+ desc.fImageID = bm.getGenerationID();
+ desc.fWidth = width;
+ desc.fHeight = height;
+ desc.fBounds = get_bounds_from_bitmap(bm);
+ return desc;
+}
+
+SkBitmapCacheDesc SkBitmapCacheDesc::Make(const SkBitmap& bm) {
+ return Make(bm, bm.width(), bm.height());
+}
+
+SkBitmapCacheDesc SkBitmapCacheDesc::Make(const SkImage* image, int width, int height) {
+ SkBitmapCacheDesc desc;
+ desc.fImageID = image->uniqueID();
+ desc.fWidth = width;
+ desc.fHeight = height;
+ desc.fBounds = get_bounds_from_image(image);
+ return desc;
+}
+
+SkBitmapCacheDesc SkBitmapCacheDesc::Make(const SkImage* image) {
+ return Make(image, image->width(), image->height());
+}
+
+namespace {
+static unsigned gBitmapKeyNamespaceLabel;
+
+struct BitmapKey : public SkResourceCache::Key {
+public:
+ BitmapKey(uint32_t genID, int width, int height, const SkIRect& bounds)
+ : fGenID(genID)
+ , fWidth(width)
+ , fHeight(height)
+ , fBounds(bounds)
+ {
+ this->init(&gBitmapKeyNamespaceLabel, SkMakeResourceCacheSharedIDForBitmap(fGenID),
+ sizeof(fGenID) + sizeof(fWidth) + sizeof(fHeight) + sizeof(fBounds));
+ }
+
+ BitmapKey(const SkBitmapCacheDesc& desc)
+ : fGenID(desc.fImageID)
+ , fWidth(desc.fWidth)
+ , fHeight(desc.fHeight)
+ , fBounds(desc.fBounds)
+ {
+ this->init(&gBitmapKeyNamespaceLabel, SkMakeResourceCacheSharedIDForBitmap(fGenID),
+ sizeof(fGenID) + sizeof(fWidth) + sizeof(fHeight) + sizeof(fBounds));
+ }
+
+ void dump() const {
+ SkDebugf("-- add [%d %d] %d [%d %d %d %d]\n", fWidth, fHeight, fGenID,
+ fBounds.x(), fBounds.y(), fBounds.width(), fBounds.height());
+ }
+
+ const uint32_t fGenID;
+ const int fWidth;
+ const int fHeight;
+ const SkIRect fBounds;
+};
+
+struct BitmapRec : public SkResourceCache::Rec {
+ BitmapRec(uint32_t genID, int width, int height, const SkIRect& bounds,
+ const SkBitmap& result)
+ : fKey(genID, width, height, bounds)
+ , fBitmap(result)
+ {
+#ifdef TRACE_NEW_BITMAP_CACHE_RECS
+ fKey.dump();
+#endif
+ }
+
+ BitmapRec(const SkBitmapCacheDesc& desc, const SkBitmap& result)
+ : fKey(desc)
+ , fBitmap(result)
+ {
+#ifdef TRACE_NEW_BITMAP_CACHE_RECS
+ fKey.dump();
+#endif
+ }
+
+ const Key& getKey() const override { return fKey; }
+ size_t bytesUsed() const override { return sizeof(fKey) + fBitmap.getSize(); }
+
+ const char* getCategory() const override { return "bitmap"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override {
+ return fBitmap.pixelRef()->diagnostic_only_getDiscardable();
+ }
+
+ static bool Finder(const SkResourceCache::Rec& baseRec, void* contextBitmap) {
+ const BitmapRec& rec = static_cast<const BitmapRec&>(baseRec);
+ SkBitmap* result = (SkBitmap*)contextBitmap;
+
+ *result = rec.fBitmap;
+ result->lockPixels();
+ return SkToBool(result->getPixels());
+ }
+
+private:
+ BitmapKey fKey;
+ SkBitmap fBitmap;
+};
+} // namespace
+
+#define CHECK_LOCAL(localCache, localName, globalName, ...) \
+ ((localCache) ? localCache->localName(__VA_ARGS__) : SkResourceCache::globalName(__VA_ARGS__))
+
+bool SkBitmapCache::FindWH(const SkBitmapCacheDesc& desc, SkBitmap* result,
+ SkResourceCache* localCache) {
+ if (0 == desc.fWidth || 0 == desc.fHeight) {
+ // degenerate
+ return false;
+ }
+ return CHECK_LOCAL(localCache, find, Find, BitmapKey(desc), BitmapRec::Finder, result);
+}
+
+bool SkBitmapCache::AddWH(const SkBitmapCacheDesc& desc, const SkBitmap& result,
+ SkResourceCache* localCache) {
+ if (0 == desc.fWidth || 0 == desc.fHeight) {
+ // degenerate, and the key we use for mipmaps
+ return false;
+ }
+ SkASSERT(result.isImmutable());
+ BitmapRec* rec = new BitmapRec(desc, result);
+ CHECK_LOCAL(localCache, add, Add, rec);
+ return true;
+}
+
+bool SkBitmapCache::Find(uint32_t genID, const SkIRect& subset, SkBitmap* result,
+ SkResourceCache* localCache) {
+ BitmapKey key(genID, SK_Scalar1, SK_Scalar1, subset);
+
+ return CHECK_LOCAL(localCache, find, Find, key, BitmapRec::Finder, result);
+}
+
+bool SkBitmapCache::Add(SkPixelRef* pr, const SkIRect& subset, const SkBitmap& result,
+ SkResourceCache* localCache) {
+ SkASSERT(result.isImmutable());
+
+ if (subset.isEmpty()
+ || subset.top() < 0
+ || subset.left() < 0
+ || result.width() != subset.width()
+ || result.height() != subset.height()) {
+ return false;
+ } else {
+ BitmapRec* rec = new BitmapRec(pr->getGenerationID(), 1, 1, subset, result);
+
+ CHECK_LOCAL(localCache, add, Add, rec);
+ pr->notifyAddedToCache();
+ return true;
+ }
+}
+
+bool SkBitmapCache::Find(uint32_t genID, SkBitmap* result, SkResourceCache* localCache) {
+ BitmapKey key(genID, SK_Scalar1, SK_Scalar1, SkIRect::MakeEmpty());
+
+ return CHECK_LOCAL(localCache, find, Find, key, BitmapRec::Finder, result);
+}
+
+void SkBitmapCache::Add(uint32_t genID, const SkBitmap& result, SkResourceCache* localCache) {
+ SkASSERT(result.isImmutable());
+
+ BitmapRec* rec = new BitmapRec(genID, 1, 1, SkIRect::MakeEmpty(), result);
+
+ CHECK_LOCAL(localCache, add, Add, rec);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+static unsigned gMipMapKeyNamespaceLabel;
+
+struct MipMapKey : public SkResourceCache::Key {
+public:
+ MipMapKey(uint32_t genID, SkSourceGammaTreatment treatment, const SkIRect& bounds)
+ : fGenID(genID), fSrcGammaTreatment(static_cast<uint32_t>(treatment)), fBounds(bounds)
+ {
+ this->init(&gMipMapKeyNamespaceLabel, SkMakeResourceCacheSharedIDForBitmap(genID),
+ sizeof(fGenID) + sizeof(fSrcGammaTreatment) + sizeof(fBounds));
+ }
+
+ uint32_t fGenID;
+ uint32_t fSrcGammaTreatment;
+ SkIRect fBounds;
+};
+
+struct MipMapRec : public SkResourceCache::Rec {
+ MipMapRec(const SkBitmap& src, SkSourceGammaTreatment treatment, const SkMipMap* result)
+ : fKey(src.getGenerationID(), treatment, get_bounds_from_bitmap(src))
+ , fMipMap(result)
+ {
+ fMipMap->attachToCacheAndRef();
+ }
+
+ virtual ~MipMapRec() {
+ fMipMap->detachFromCacheAndUnref();
+ }
+
+ const Key& getKey() const override { return fKey; }
+ size_t bytesUsed() const override { return sizeof(fKey) + fMipMap->size(); }
+ const char* getCategory() const override { return "mipmap"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override {
+ return fMipMap->diagnostic_only_getDiscardable();
+ }
+
+ static bool Finder(const SkResourceCache::Rec& baseRec, void* contextMip) {
+ const MipMapRec& rec = static_cast<const MipMapRec&>(baseRec);
+ const SkMipMap* mm = SkRef(rec.fMipMap);
+ // the call to ref() above triggers a "lock" in the case of discardable memory,
+ // which means we can now check for null (in case the lock failed).
+ if (nullptr == mm->data()) {
+ mm->unref(); // balance our call to ref()
+ return false;
+ }
+ // the call must call unref() when they are done.
+ *(const SkMipMap**)contextMip = mm;
+ return true;
+ }
+
+private:
+ MipMapKey fKey;
+ const SkMipMap* fMipMap;
+};
+}
+
+const SkMipMap* SkMipMapCache::FindAndRef(const SkBitmapCacheDesc& desc,
+ SkSourceGammaTreatment treatment,
+ SkResourceCache* localCache) {
+ // Note: we ignore width/height from desc, just need id and bounds
+ MipMapKey key(desc.fImageID, treatment, desc.fBounds);
+ const SkMipMap* result;
+
+ if (!CHECK_LOCAL(localCache, find, Find, key, MipMapRec::Finder, &result)) {
+ result = nullptr;
+ }
+ return result;
+}
+
+static SkResourceCache::DiscardableFactory get_fact(SkResourceCache* localCache) {
+ return localCache ? localCache->GetDiscardableFactory()
+ : SkResourceCache::GetDiscardableFactory();
+}
+
+const SkMipMap* SkMipMapCache::AddAndRef(const SkBitmap& src, SkSourceGammaTreatment treatment,
+ SkResourceCache* localCache) {
+ SkMipMap* mipmap = SkMipMap::Build(src, treatment, get_fact(localCache));
+ if (mipmap) {
+ MipMapRec* rec = new MipMapRec(src, treatment, mipmap);
+ CHECK_LOCAL(localCache, add, Add, rec);
+ src.pixelRef()->notifyAddedToCache();
+ }
+ return mipmap;
+}
diff --git a/gfx/skia/skia/src/core/SkBitmapCache.h b/gfx/skia/skia/src/core/SkBitmapCache.h
new file mode 100644
index 000000000..76bcef06c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapCache.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapCache_DEFINED
+#define SkBitmapCache_DEFINED
+
+#include "SkBitmap.h"
+#include "SkMipMap.h"
+
+class SkImage;
+class SkResourceCache;
+
+uint64_t SkMakeResourceCacheSharedIDForBitmap(uint32_t bitmapGenID);
+
+void SkNotifyBitmapGenIDIsStale(uint32_t bitmapGenID);
+
+struct SkBitmapCacheDesc {
+ uint32_t fImageID;
+ int32_t fWidth;
+ int32_t fHeight;
+ SkIRect fBounds;
+
+ static SkBitmapCacheDesc Make(const SkBitmap&, int width, int height);
+ static SkBitmapCacheDesc Make(const SkBitmap&);
+ static SkBitmapCacheDesc Make(const SkImage*, int width, int height);
+ static SkBitmapCacheDesc Make(const SkImage*);
+};
+
+class SkBitmapCache {
+public:
+ /**
+ * Use this allocator for bitmaps, so they can use ashmem when available.
+ * Returns nullptr if the ResourceCache has not been initialized with a DiscardableFactory.
+ */
+ static SkBitmap::Allocator* GetAllocator();
+
+ /**
+ * Search based on the desc. If found, returns true and
+ * result will be set to the matching bitmap with its pixels already locked.
+ */
+ static bool FindWH(const SkBitmapCacheDesc&, SkBitmap* result,
+ SkResourceCache* localCache = nullptr);
+
+ /*
+ * result must be marked isImmutable()
+ */
+ static bool AddWH(const SkBitmapCacheDesc&, const SkBitmap& result,
+ SkResourceCache* localCache = nullptr);
+
+ /**
+ * Search based on the bitmap's genID and subset. If found, returns true and
+ * result will be set to the matching bitmap with its pixels already locked.
+ */
+ static bool Find(uint32_t genID, const SkIRect& subset, SkBitmap* result,
+ SkResourceCache* localCache = nullptr);
+
+ /**
+ * The width and the height of the provided subset must be the same as the result bitmap ones.
+ * result must be marked isImmutable()
+ */
+ static bool Add(SkPixelRef*, const SkIRect& subset, const SkBitmap& result,
+ SkResourceCache* localCache = nullptr);
+
+ static bool Find(uint32_t genID, SkBitmap* result, SkResourceCache* localCache = nullptr);
+ // todo: eliminate the need to specify ID, since it should == the bitmap's
+ static void Add(uint32_t genID, const SkBitmap&, SkResourceCache* localCache = nullptr);
+};
+
+class SkMipMapCache {
+public:
+ static const SkMipMap* FindAndRef(const SkBitmapCacheDesc&, SkSourceGammaTreatment,
+ SkResourceCache* localCache = nullptr);
+ static const SkMipMap* AddAndRef(const SkBitmap& src, SkSourceGammaTreatment,
+ SkResourceCache* localCache = nullptr);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBitmapController.cpp b/gfx/skia/skia/src/core/SkBitmapController.cpp
new file mode 100644
index 000000000..f4ee0fb6c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapController.cpp
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmap.h"
+#include "SkBitmapController.h"
+#include "SkBitmapProvider.h"
+#include "SkMatrix.h"
+#include "SkPixelRef.h"
+#include "SkTemplates.h"
+
+// RESIZE_LANCZOS3 is another good option, but chrome prefers mitchell at the moment
+#define kHQ_RESIZE_METHOD SkBitmapScaler::RESIZE_MITCHELL
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkBitmapController::State* SkBitmapController::requestBitmap(const SkBitmapProvider& provider,
+ const SkMatrix& inv,
+ SkFilterQuality quality,
+ void* storage, size_t storageSize) {
+ if (!provider.validForDrawing()) {
+ return nullptr;
+ }
+
+ State* state = this->onRequestBitmap(provider, inv, quality, storage, storageSize);
+ if (state) {
+ if (nullptr == state->fPixmap.addr()) {
+ SkInPlaceDeleteCheck(state, storage);
+ state = nullptr;
+ }
+ }
+ return state;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "SkBitmapCache.h"
+#include "SkBitmapScaler.h"
+#include "SkMipMap.h"
+#include "SkResourceCache.h"
+
+class SkDefaultBitmapControllerState : public SkBitmapController::State {
+public:
+ SkDefaultBitmapControllerState(const SkBitmapProvider&, const SkMatrix& inv, SkFilterQuality,
+ SkSourceGammaTreatment);
+
+private:
+ SkBitmap fResultBitmap;
+ SkSourceGammaTreatment fSrcGammaTreatment;
+ SkAutoTUnref<const SkMipMap> fCurrMip;
+
+ bool processHQRequest(const SkBitmapProvider&);
+ bool processMediumRequest(const SkBitmapProvider&);
+};
+
+// Check to see that the size of the bitmap that would be produced by
+// scaling by the given inverted matrix is less than the maximum allowed.
+static inline bool cache_size_okay(const SkBitmapProvider& provider, const SkMatrix& invMat) {
+ size_t maximumAllocation = SkResourceCache::GetEffectiveSingleAllocationByteLimit();
+ if (0 == maximumAllocation) {
+ return true;
+ }
+ // float matrixScaleFactor = 1.0 / (invMat.scaleX * invMat.scaleY);
+ // return ((origBitmapSize * matrixScaleFactor) < maximumAllocationSize);
+ // Skip the division step:
+ const size_t size = provider.info().getSafeSize(provider.info().minRowBytes());
+ SkScalar invScaleSqr = invMat.getScaleX() * invMat.getScaleY();
+ return size < (maximumAllocation * SkScalarAbs(invScaleSqr));
+}
+
+/*
+ * High quality is implemented by performing up-right scale-only filtering and then
+ * using bilerp for any remaining transformations.
+ */
+bool SkDefaultBitmapControllerState::processHQRequest(const SkBitmapProvider& provider) {
+ if (fQuality != kHigh_SkFilterQuality) {
+ return false;
+ }
+
+ // Our default return state is to downgrade the request to Medium, w/ or w/o setting fBitmap
+ // to a valid bitmap. If we succeed, we will set this to Low instead.
+ fQuality = kMedium_SkFilterQuality;
+
+ if (kN32_SkColorType != provider.info().colorType() || !cache_size_okay(provider, fInvMatrix) ||
+ fInvMatrix.hasPerspective())
+ {
+ return false; // can't handle the reqeust
+ }
+
+ SkScalar invScaleX = fInvMatrix.getScaleX();
+ SkScalar invScaleY = fInvMatrix.getScaleY();
+ if (fInvMatrix.getType() & SkMatrix::kAffine_Mask) {
+ SkSize scale;
+ if (!fInvMatrix.decomposeScale(&scale)) {
+ return false;
+ }
+ invScaleX = scale.width();
+ invScaleY = scale.height();
+ }
+ invScaleX = SkScalarAbs(invScaleX);
+ invScaleY = SkScalarAbs(invScaleY);
+
+ if (SkScalarNearlyEqual(invScaleX, 1) && SkScalarNearlyEqual(invScaleY, 1)) {
+ return false; // no need for HQ
+ }
+
+ if (invScaleX > 1 || invScaleY > 1) {
+ return false; // only use HQ when upsampling
+ }
+
+ const int dstW = SkScalarRoundToScalar(provider.width() / invScaleX);
+ const int dstH = SkScalarRoundToScalar(provider.height() / invScaleY);
+ const SkBitmapCacheDesc desc = provider.makeCacheDesc(dstW, dstH);
+
+ if (!SkBitmapCache::FindWH(desc, &fResultBitmap)) {
+ SkBitmap orig;
+ if (!provider.asBitmap(&orig)) {
+ return false;
+ }
+ SkAutoPixmapUnlock src;
+ if (!orig.requestLock(&src)) {
+ return false;
+ }
+ if (!SkBitmapScaler::Resize(&fResultBitmap, src.pixmap(), kHQ_RESIZE_METHOD,
+ dstW, dstH, SkResourceCache::GetAllocator())) {
+ return false; // we failed to create fScaledBitmap
+ }
+
+ SkASSERT(fResultBitmap.getPixels());
+ fResultBitmap.setImmutable();
+ if (!provider.isVolatile()) {
+ if (SkBitmapCache::AddWH(desc, fResultBitmap)) {
+ provider.notifyAddedToCache();
+ }
+ }
+ }
+
+ SkASSERT(fResultBitmap.getPixels());
+
+ fInvMatrix.postScale(SkIntToScalar(dstW) / provider.width(),
+ SkIntToScalar(dstH) / provider.height());
+ fQuality = kLow_SkFilterQuality;
+ return true;
+}
+
+/*
+ * Modulo internal errors, this should always succeed *if* the matrix is downscaling
+ * (in this case, we have the inverse, so it succeeds if fInvMatrix is upscaling)
+ */
+bool SkDefaultBitmapControllerState::processMediumRequest(const SkBitmapProvider& provider) {
+ SkASSERT(fQuality <= kMedium_SkFilterQuality);
+ if (fQuality != kMedium_SkFilterQuality) {
+ return false;
+ }
+
+ // Our default return state is to downgrade the request to Low, w/ or w/o setting fBitmap
+ // to a valid bitmap.
+ fQuality = kLow_SkFilterQuality;
+
+ SkSize invScaleSize;
+ if (!fInvMatrix.decomposeScale(&invScaleSize, nullptr)) {
+ return false;
+ }
+
+ if (invScaleSize.width() > SK_Scalar1 || invScaleSize.height() > SK_Scalar1) {
+ fCurrMip.reset(SkMipMapCache::FindAndRef(provider.makeCacheDesc(), fSrcGammaTreatment));
+ if (nullptr == fCurrMip.get()) {
+ SkBitmap orig;
+ if (!provider.asBitmap(&orig)) {
+ return false;
+ }
+ fCurrMip.reset(SkMipMapCache::AddAndRef(orig, fSrcGammaTreatment));
+ if (nullptr == fCurrMip.get()) {
+ return false;
+ }
+ }
+ // diagnostic for a crasher...
+ if (nullptr == fCurrMip->data()) {
+ sk_throw();
+ }
+
+ const SkSize scale = SkSize::Make(SkScalarInvert(invScaleSize.width()),
+ SkScalarInvert(invScaleSize.height()));
+ SkMipMap::Level level;
+ if (fCurrMip->extractLevel(scale, &level)) {
+ const SkSize& invScaleFixup = level.fScale;
+ fInvMatrix.postScale(invScaleFixup.width(), invScaleFixup.height());
+
+ // todo: if we could wrap the fCurrMip in a pixelref, then we could just install
+ // that here, and not need to explicitly track it ourselves.
+ return fResultBitmap.installPixels(level.fPixmap);
+ } else {
+ // failed to extract, so release the mipmap
+ fCurrMip.reset(nullptr);
+ }
+ }
+ return false;
+}
+
+SkDefaultBitmapControllerState::SkDefaultBitmapControllerState(const SkBitmapProvider& provider,
+ const SkMatrix& inv,
+ SkFilterQuality qual,
+ SkSourceGammaTreatment treatment) {
+ fInvMatrix = inv;
+ fQuality = qual;
+ fSrcGammaTreatment = treatment;
+
+ if (this->processHQRequest(provider) || this->processMediumRequest(provider)) {
+ SkASSERT(fResultBitmap.getPixels());
+ } else {
+ (void)provider.asBitmap(&fResultBitmap);
+ fResultBitmap.lockPixels();
+ // lock may fail to give us pixels
+ }
+ SkASSERT(fQuality <= kLow_SkFilterQuality);
+
+ // fResultBitmap.getPixels() may be null, but our caller knows to check fPixmap.addr()
+ // and will destroy us if it is nullptr.
+ fPixmap.reset(fResultBitmap.info(), fResultBitmap.getPixels(), fResultBitmap.rowBytes(),
+ fResultBitmap.getColorTable());
+}
+
+SkBitmapController::State* SkDefaultBitmapController::onRequestBitmap(const SkBitmapProvider& bm,
+ const SkMatrix& inverse,
+ SkFilterQuality quality,
+ void* storage, size_t size) {
+ return SkInPlaceNewCheck<SkDefaultBitmapControllerState>(storage, size, bm, inverse, quality,
+ fSrcGammaTreatment);
+}
diff --git a/gfx/skia/skia/src/core/SkBitmapController.h b/gfx/skia/skia/src/core/SkBitmapController.h
new file mode 100644
index 000000000..f31c8eef5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapController.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapController_DEFINED
+#define SkBitmapController_DEFINED
+
+#include "SkBitmap.h"
+#include "SkBitmapCache.h"
+#include "SkFilterQuality.h"
+#include "SkMatrix.h"
+
+class SkBitmapProvider;
+
+/**
+ * Handles request to scale, filter, and lock a bitmap to be rasterized.
+ */
+class SkBitmapController : ::SkNoncopyable {
+public:
+ class State : ::SkNoncopyable {
+ public:
+ virtual ~State() {}
+
+ const SkPixmap& pixmap() const { return fPixmap; }
+ const SkMatrix& invMatrix() const { return fInvMatrix; }
+ SkFilterQuality quality() const { return fQuality; }
+
+ protected:
+ SkPixmap fPixmap;
+ SkMatrix fInvMatrix;
+ SkFilterQuality fQuality;
+
+ private:
+ friend class SkBitmapController;
+ };
+
+ virtual ~SkBitmapController() {}
+
+ State* requestBitmap(const SkBitmapProvider&, const SkMatrix& inverse, SkFilterQuality,
+ void* storage, size_t storageSize);
+
+ State* requestBitmap(const SkBitmapProvider& bp, const SkMatrix& inv, SkFilterQuality quality) {
+ return this->requestBitmap(bp, inv, quality, nullptr, 0);
+ }
+
+protected:
+ virtual State* onRequestBitmap(const SkBitmapProvider&, const SkMatrix& inv, SkFilterQuality,
+ void* storage, size_t storageSize) = 0;
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "SkMipMap.h"
+
+class SkDefaultBitmapController : public SkBitmapController {
+public:
+ SkDefaultBitmapController(SkSourceGammaTreatment treatment) : fSrcGammaTreatment(treatment) {}
+
+protected:
+ State* onRequestBitmap(const SkBitmapProvider&, const SkMatrix& inverse, SkFilterQuality,
+ void* storage, size_t storageSize) override;
+
+private:
+ const SkSourceGammaTreatment fSrcGammaTreatment;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBitmapDevice.cpp b/gfx/skia/skia/src/core/SkBitmapDevice.cpp
new file mode 100644
index 000000000..26d253cf3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapDevice.cpp
@@ -0,0 +1,454 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapDevice.h"
+#include "SkConfig8888.h"
+#include "SkDraw.h"
+#include "SkImageFilter.h"
+#include "SkImageFilterCache.h"
+#include "SkMallocPixelRef.h"
+#include "SkMatrix.h"
+#include "SkPaint.h"
+#include "SkPath.h"
+#include "SkPixelRef.h"
+#include "SkPixmap.h"
+#include "SkRasterClip.h"
+#include "SkShader.h"
+#include "SkSpecialImage.h"
+#include "SkSurface.h"
+#include "SkXfermode.h"
+
+class SkColorTable;
+
+static bool valid_for_bitmap_device(const SkImageInfo& info,
+ SkAlphaType* newAlphaType) {
+ if (info.width() < 0 || info.height() < 0) {
+ return false;
+ }
+
+ // TODO: can we stop supporting kUnknown in SkBitmkapDevice?
+ if (kUnknown_SkColorType == info.colorType()) {
+ if (newAlphaType) {
+ *newAlphaType = kUnknown_SkAlphaType;
+ }
+ return true;
+ }
+
+ switch (info.alphaType()) {
+ case kPremul_SkAlphaType:
+ case kOpaque_SkAlphaType:
+ break;
+ default:
+ return false;
+ }
+
+ SkAlphaType canonicalAlphaType = info.alphaType();
+
+ switch (info.colorType()) {
+ case kAlpha_8_SkColorType:
+ break;
+ case kRGB_565_SkColorType:
+ canonicalAlphaType = kOpaque_SkAlphaType;
+ break;
+ case kN32_SkColorType:
+ break;
+ case kRGBA_F16_SkColorType:
+ break;
+ default:
+ return false;
+ }
+
+ if (newAlphaType) {
+ *newAlphaType = canonicalAlphaType;
+ }
+ return true;
+}
+
+SkBitmapDevice::SkBitmapDevice(const SkBitmap& bitmap)
+ : INHERITED(bitmap.info(), SkSurfaceProps(SkSurfaceProps::kLegacyFontHost_InitType))
+ , fBitmap(bitmap)
+{
+ SkASSERT(valid_for_bitmap_device(bitmap.info(), nullptr));
+ fBitmap.lockPixels();
+}
+
+SkBitmapDevice* SkBitmapDevice::Create(const SkImageInfo& info) {
+ return Create(info, SkSurfaceProps(SkSurfaceProps::kLegacyFontHost_InitType));
+}
+
+SkBitmapDevice::SkBitmapDevice(const SkBitmap& bitmap, const SkSurfaceProps& surfaceProps)
+ : INHERITED(bitmap.info(), surfaceProps)
+ , fBitmap(bitmap)
+{
+ SkASSERT(valid_for_bitmap_device(bitmap.info(), nullptr));
+ fBitmap.lockPixels();
+}
+
+SkBitmapDevice* SkBitmapDevice::Create(const SkImageInfo& origInfo,
+ const SkSurfaceProps& surfaceProps) {
+ SkAlphaType newAT = origInfo.alphaType();
+ if (!valid_for_bitmap_device(origInfo, &newAT)) {
+ return nullptr;
+ }
+
+ const SkImageInfo info = origInfo.makeAlphaType(newAT);
+ SkBitmap bitmap;
+
+ if (kUnknown_SkColorType == info.colorType()) {
+ if (!bitmap.setInfo(info)) {
+ return nullptr;
+ }
+ } else if (info.isOpaque()) {
+ // If this bitmap is opaque, we don't have any sensible default color,
+ // so we just return uninitialized pixels.
+ if (!bitmap.tryAllocPixels(info)) {
+ return nullptr;
+ }
+ } else {
+ // This bitmap has transparency, so we'll zero the pixels (to transparent).
+ // We use a ZeroedPRFactory as a faster alloc-then-eraseColor(SK_ColorTRANSPARENT).
+ SkMallocPixelRef::ZeroedPRFactory factory;
+ if (!bitmap.tryAllocPixels(info, &factory, nullptr/*color table*/)) {
+ return nullptr;
+ }
+ }
+
+ return new SkBitmapDevice(bitmap, surfaceProps);
+}
+
+void SkBitmapDevice::setNewSize(const SkISize& size) {
+ SkASSERT(!fBitmap.pixelRef());
+ fBitmap.setInfo(fBitmap.info().makeWH(size.fWidth, size.fHeight));
+ this->privateResize(fBitmap.info().width(), fBitmap.info().height());
+}
+
+void SkBitmapDevice::replaceBitmapBackendForRasterSurface(const SkBitmap& bm) {
+ SkASSERT(bm.width() == fBitmap.width());
+ SkASSERT(bm.height() == fBitmap.height());
+ fBitmap = bm; // intent is to use bm's pixelRef (and rowbytes/config)
+ fBitmap.lockPixels();
+ this->privateResize(fBitmap.info().width(), fBitmap.info().height());
+}
+
+SkBaseDevice* SkBitmapDevice::onCreateDevice(const CreateInfo& cinfo, const SkPaint*) {
+ const SkSurfaceProps surfaceProps(this->surfaceProps().flags(), cinfo.fPixelGeometry);
+ return SkBitmapDevice::Create(cinfo.fInfo, surfaceProps);
+}
+
+const SkBitmap& SkBitmapDevice::onAccessBitmap() {
+ return fBitmap;
+}
+
+bool SkBitmapDevice::onAccessPixels(SkPixmap* pmap) {
+ if (this->onPeekPixels(pmap)) {
+ fBitmap.notifyPixelsChanged();
+ return true;
+ }
+ return false;
+}
+
+bool SkBitmapDevice::onPeekPixels(SkPixmap* pmap) {
+ const SkImageInfo info = fBitmap.info();
+ if (fBitmap.getPixels() && (kUnknown_SkColorType != info.colorType())) {
+ SkColorTable* ctable = nullptr;
+ pmap->reset(fBitmap.info(), fBitmap.getPixels(), fBitmap.rowBytes(), ctable);
+ return true;
+ }
+ return false;
+}
+
+bool SkBitmapDevice::onWritePixels(const SkImageInfo& srcInfo, const void* srcPixels,
+ size_t srcRowBytes, int x, int y) {
+ // since we don't stop creating un-pixeled devices yet, check for no pixels here
+ if (nullptr == fBitmap.getPixels()) {
+ return false;
+ }
+
+ const SkImageInfo dstInfo = fBitmap.info().makeWH(srcInfo.width(), srcInfo.height());
+
+ void* dstPixels = fBitmap.getAddr(x, y);
+ size_t dstRowBytes = fBitmap.rowBytes();
+
+ if (SkPixelInfo::CopyPixels(dstInfo, dstPixels, dstRowBytes, srcInfo, srcPixels, srcRowBytes)) {
+ fBitmap.notifyPixelsChanged();
+ return true;
+ }
+ return false;
+}
+
+bool SkBitmapDevice::onReadPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int x, int y) {
+ return fBitmap.readPixels(dstInfo, dstPixels, dstRowBytes, x, y);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkBitmapDevice::drawPaint(const SkDraw& draw, const SkPaint& paint) {
+ draw.drawPaint(paint);
+}
+
+void SkBitmapDevice::drawPoints(const SkDraw& draw, SkCanvas::PointMode mode, size_t count,
+ const SkPoint pts[], const SkPaint& paint) {
+ draw.drawPoints(mode, count, pts, paint);
+}
+
+void SkBitmapDevice::drawRect(const SkDraw& draw, const SkRect& r, const SkPaint& paint) {
+ draw.drawRect(r, paint);
+}
+
+void SkBitmapDevice::drawOval(const SkDraw& draw, const SkRect& oval, const SkPaint& paint) {
+ SkPath path;
+ path.addOval(oval);
+ // call the VIRTUAL version, so any subclasses who do handle drawPath aren't
+ // required to override drawOval.
+ this->drawPath(draw, path, paint, nullptr, true);
+}
+
+void SkBitmapDevice::drawRRect(const SkDraw& draw, const SkRRect& rrect, const SkPaint& paint) {
+#ifdef SK_IGNORE_BLURRED_RRECT_OPT
+ SkPath path;
+
+ path.addRRect(rrect);
+ // call the VIRTUAL version, so any subclasses who do handle drawPath aren't
+ // required to override drawRRect.
+ this->drawPath(draw, path, paint, nullptr, true);
+#else
+ draw.drawRRect(rrect, paint);
+#endif
+}
+
+void SkBitmapDevice::drawPath(const SkDraw& draw, const SkPath& path,
+ const SkPaint& paint, const SkMatrix* prePathMatrix,
+ bool pathIsMutable) {
+ draw.drawPath(path, paint, prePathMatrix, pathIsMutable);
+}
+
+void SkBitmapDevice::drawBitmap(const SkDraw& draw, const SkBitmap& bitmap,
+ const SkMatrix& matrix, const SkPaint& paint) {
+ LogDrawScaleFactor(SkMatrix::Concat(*draw.fMatrix, matrix), paint.getFilterQuality());
+ draw.drawBitmap(bitmap, matrix, nullptr, paint);
+}
+
+static inline bool CanApplyDstMatrixAsCTM(const SkMatrix& m, const SkPaint& paint) {
+ if (!paint.getMaskFilter()) {
+ return true;
+ }
+
+ // Some mask filters parameters (sigma) depend on the CTM/scale.
+ return m.getType() <= SkMatrix::kTranslate_Mask;
+}
+
+void SkBitmapDevice::drawBitmapRect(const SkDraw& draw, const SkBitmap& bitmap,
+ const SkRect* src, const SkRect& dst,
+ const SkPaint& paint, SkCanvas::SrcRectConstraint constraint) {
+ SkMatrix matrix;
+ SkRect bitmapBounds, tmpSrc, tmpDst;
+ SkBitmap tmpBitmap;
+
+ bitmapBounds.isetWH(bitmap.width(), bitmap.height());
+
+ // Compute matrix from the two rectangles
+ if (src) {
+ tmpSrc = *src;
+ } else {
+ tmpSrc = bitmapBounds;
+ }
+ matrix.setRectToRect(tmpSrc, dst, SkMatrix::kFill_ScaleToFit);
+
+ LogDrawScaleFactor(SkMatrix::Concat(*draw.fMatrix, matrix), paint.getFilterQuality());
+
+ const SkRect* dstPtr = &dst;
+ const SkBitmap* bitmapPtr = &bitmap;
+
+ // clip the tmpSrc to the bounds of the bitmap, and recompute dstRect if
+ // needed (if the src was clipped). No check needed if src==null.
+ if (src) {
+ if (!bitmapBounds.contains(*src)) {
+ if (!tmpSrc.intersect(bitmapBounds)) {
+ return; // nothing to draw
+ }
+ // recompute dst, based on the smaller tmpSrc
+ matrix.mapRect(&tmpDst, tmpSrc);
+ dstPtr = &tmpDst;
+ }
+ }
+
+ if (src && !src->contains(bitmapBounds) &&
+ SkCanvas::kFast_SrcRectConstraint == constraint &&
+ paint.getFilterQuality() != kNone_SkFilterQuality) {
+ // src is smaller than the bounds of the bitmap, and we are filtering, so we don't know
+ // how much more of the bitmap we need, so we can't use extractSubset or drawBitmap,
+ // but we must use a shader w/ dst bounds (which can access all of the bitmap needed).
+ goto USE_SHADER;
+ }
+
+ if (src) {
+ // since we may need to clamp to the borders of the src rect within
+ // the bitmap, we extract a subset.
+ const SkIRect srcIR = tmpSrc.roundOut();
+ if (!bitmap.extractSubset(&tmpBitmap, srcIR)) {
+ return;
+ }
+ bitmapPtr = &tmpBitmap;
+
+ // Since we did an extract, we need to adjust the matrix accordingly
+ SkScalar dx = 0, dy = 0;
+ if (srcIR.fLeft > 0) {
+ dx = SkIntToScalar(srcIR.fLeft);
+ }
+ if (srcIR.fTop > 0) {
+ dy = SkIntToScalar(srcIR.fTop);
+ }
+ if (dx || dy) {
+ matrix.preTranslate(dx, dy);
+ }
+
+ SkRect extractedBitmapBounds;
+ extractedBitmapBounds.isetWH(bitmapPtr->width(), bitmapPtr->height());
+ if (extractedBitmapBounds == tmpSrc) {
+ // no fractional part in src, we can just call drawBitmap
+ goto USE_DRAWBITMAP;
+ }
+ } else {
+ USE_DRAWBITMAP:
+ // We can go faster by just calling drawBitmap, which will concat the
+ // matrix with the CTM, and try to call drawSprite if it can. If not,
+ // it will make a shader and call drawRect, as we do below.
+ if (CanApplyDstMatrixAsCTM(matrix, paint)) {
+ draw.drawBitmap(*bitmapPtr, matrix, dstPtr, paint);
+ return;
+ }
+ }
+
+ USE_SHADER:
+
+ // Since the shader need only live for our stack-frame, pass in a custom allocator. This
+ // can save malloc calls, and signals to SkMakeBitmapShader to not try to copy the bitmap
+ // if its mutable, since that precaution is not needed (give the short lifetime of the shader).
+ SkTBlitterAllocator allocator;
+ // construct a shader, so we can call drawRect with the dst
+ auto s = SkMakeBitmapShader(*bitmapPtr, SkShader::kClamp_TileMode, SkShader::kClamp_TileMode,
+ &matrix, kNever_SkCopyPixelsMode, &allocator);
+ if (!s) {
+ return;
+ }
+ // we deliberately add a ref, since the allocator wants to be the last owner
+ s.get()->ref();
+
+ SkPaint paintWithShader(paint);
+ paintWithShader.setStyle(SkPaint::kFill_Style);
+ paintWithShader.setShader(s);
+
+ // Call ourself, in case the subclass wanted to share this setup code
+ // but handle the drawRect code themselves.
+ this->drawRect(draw, *dstPtr, paintWithShader);
+}
+
+void SkBitmapDevice::drawSprite(const SkDraw& draw, const SkBitmap& bitmap,
+ int x, int y, const SkPaint& paint) {
+ draw.drawSprite(bitmap, x, y, paint);
+}
+
+void SkBitmapDevice::drawText(const SkDraw& draw, const void* text, size_t len,
+ SkScalar x, SkScalar y, const SkPaint& paint) {
+ draw.drawText((const char*)text, len, x, y, paint);
+}
+
+void SkBitmapDevice::drawPosText(const SkDraw& draw, const void* text, size_t len,
+ const SkScalar xpos[], int scalarsPerPos,
+ const SkPoint& offset, const SkPaint& paint) {
+ draw.drawPosText((const char*)text, len, xpos, scalarsPerPos, offset, paint);
+}
+
+void SkBitmapDevice::drawVertices(const SkDraw& draw, SkCanvas::VertexMode vmode,
+ int vertexCount,
+ const SkPoint verts[], const SkPoint textures[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint) {
+ draw.drawVertices(vmode, vertexCount, verts, textures, colors, xmode,
+ indices, indexCount, paint);
+}
+
+void SkBitmapDevice::drawDevice(const SkDraw& draw, SkBaseDevice* device,
+ int x, int y, const SkPaint& paint) {
+ SkASSERT(!paint.getImageFilter());
+ draw.drawSprite(static_cast<SkBitmapDevice*>(device)->fBitmap, x, y, paint);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkBitmapDevice::drawSpecial(const SkDraw& draw, SkSpecialImage* srcImg, int x, int y,
+ const SkPaint& paint) {
+ SkASSERT(!srcImg->isTextureBacked());
+
+ SkBitmap resultBM;
+
+ SkImageFilter* filter = paint.getImageFilter();
+ if (filter) {
+ SkIPoint offset = SkIPoint::Make(0, 0);
+ SkMatrix matrix = *draw.fMatrix;
+ matrix.postTranslate(SkIntToScalar(-x), SkIntToScalar(-y));
+ const SkIRect clipBounds = draw.fRC->getBounds().makeOffset(-x, -y);
+ SkAutoTUnref<SkImageFilterCache> cache(this->getImageFilterCache());
+ SkImageFilter::OutputProperties outputProperties(fBitmap.colorSpace());
+ SkImageFilter::Context ctx(matrix, clipBounds, cache.get(), outputProperties);
+
+ sk_sp<SkSpecialImage> resultImg(filter->filterImage(srcImg, ctx, &offset));
+ if (resultImg) {
+ SkPaint tmpUnfiltered(paint);
+ tmpUnfiltered.setImageFilter(nullptr);
+ if (resultImg->getROPixels(&resultBM)) {
+ this->drawSprite(draw, resultBM, x + offset.x(), y + offset.y(), tmpUnfiltered);
+ }
+ }
+ } else {
+ if (srcImg->getROPixels(&resultBM)) {
+ this->drawSprite(draw, resultBM, x, y, paint);
+ }
+ }
+}
+
+sk_sp<SkSpecialImage> SkBitmapDevice::makeSpecial(const SkBitmap& bitmap) {
+ return SkSpecialImage::MakeFromRaster(bitmap.bounds(), bitmap);
+}
+
+sk_sp<SkSpecialImage> SkBitmapDevice::makeSpecial(const SkImage* image) {
+ return SkSpecialImage::MakeFromImage(SkIRect::MakeWH(image->width(), image->height()),
+ image->makeNonTextureImage());
+}
+
+sk_sp<SkSpecialImage> SkBitmapDevice::snapSpecial() {
+ return this->makeSpecial(fBitmap);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkSurface> SkBitmapDevice::makeSurface(const SkImageInfo& info, const SkSurfaceProps& props) {
+ return SkSurface::MakeRaster(info, &props);
+}
+
+SkImageFilterCache* SkBitmapDevice::getImageFilterCache() {
+ SkImageFilterCache* cache = SkImageFilterCache::Get();
+ cache->ref();
+ return cache;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkBitmapDevice::onShouldDisableLCD(const SkPaint& paint) const {
+ if (kN32_SkColorType != fBitmap.colorType() ||
+ paint.getRasterizer() ||
+ paint.getPathEffect() ||
+ paint.isFakeBoldText() ||
+ paint.getStyle() != SkPaint::kFill_Style ||
+ !paint.isSrcOver())
+ {
+ return true;
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/core/SkBitmapFilter.h b/gfx/skia/skia/src/core/SkBitmapFilter.h
new file mode 100644
index 000000000..ca3e0930f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapFilter.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapFilter_DEFINED
+#define SkBitmapFilter_DEFINED
+
+#include "SkFixed.h"
+#include "SkMath.h"
+#include "SkScalar.h"
+
+#include "SkNx.h"
+
+// size of the precomputed bitmap filter tables for high quality filtering.
+// Used to precompute the shape of the filter kernel.
+// Table size chosen from experiments to see where I could start to see a difference.
+
+#define SKBITMAP_FILTER_TABLE_SIZE 128
+
+class SkBitmapFilter {
+public:
+ SkBitmapFilter(float width) : fWidth(width), fInvWidth(1.f/width) {
+ fPrecomputed = false;
+ fLookupMultiplier = this->invWidth() * (SKBITMAP_FILTER_TABLE_SIZE-1);
+ }
+ virtual ~SkBitmapFilter() {}
+
+ SkScalar lookupScalar(float x) const {
+ if (!fPrecomputed) {
+ precomputeTable();
+ }
+ int filter_idx = int(sk_float_abs(x * fLookupMultiplier));
+ SkASSERT(filter_idx < SKBITMAP_FILTER_TABLE_SIZE);
+ return fFilterTableScalar[filter_idx];
+ }
+
+ float width() const { return fWidth; }
+ float invWidth() const { return fInvWidth; }
+ virtual float evaluate(float x) const = 0;
+
+ virtual float evaluate_n(float val, float diff, int count, float* output) const {
+ float sum = 0;
+ for (int index = 0; index < count; index++) {
+ float filterValue = evaluate(val);
+ *output++ = filterValue;
+ sum += filterValue;
+ val += diff;
+ }
+ return sum;
+ }
+
+protected:
+ float fWidth;
+ float fInvWidth;
+ float fLookupMultiplier;
+
+ mutable bool fPrecomputed;
+ mutable SkScalar fFilterTableScalar[SKBITMAP_FILTER_TABLE_SIZE];
+
+private:
+ void precomputeTable() const {
+ fPrecomputed = true;
+ SkScalar *ftpScalar = fFilterTableScalar;
+ for (int x = 0; x < SKBITMAP_FILTER_TABLE_SIZE; ++x) {
+ float fx = ((float)x + .5f) * this->width() / SKBITMAP_FILTER_TABLE_SIZE;
+ float filter_value = evaluate(fx);
+ *ftpScalar++ = filter_value;
+ }
+ }
+};
+
+class SkMitchellFilter final : public SkBitmapFilter {
+public:
+ SkMitchellFilter()
+ : INHERITED(2)
+ , fB(1.f / 3.f)
+ , fC(1.f / 3.f)
+ , fA1(-fB - 6*fC)
+ , fB1(6*fB + 30*fC)
+ , fC1(-12*fB - 48*fC)
+ , fD1(8*fB + 24*fC)
+ , fA2(12 - 9*fB - 6*fC)
+ , fB2(-18 + 12*fB + 6*fC)
+ , fD2(6 - 2*fB)
+ {}
+
+ float evaluate(float x) const override {
+ x = fabsf(x);
+ if (x > 2.f) {
+ return 0;
+ } else if (x > 1.f) {
+ return (((fA1 * x + fB1) * x + fC1) * x + fD1) * (1.f/6.f);
+ } else {
+ return ((fA2 * x + fB2) * x*x + fD2) * (1.f/6.f);
+ }
+ }
+
+ Sk4f evalcore_n(const Sk4f& val) const {
+ Sk4f x = val.abs();
+ Sk4f over2 = x > Sk4f(2);
+ Sk4f over1 = x > Sk4f(1);
+ Sk4f poly1 = (((Sk4f(fA1) * x + Sk4f(fB1)) * x + Sk4f(fC1)) * x + Sk4f(fD1))
+ * Sk4f(1.f/6.f);
+ Sk4f poly0 = ((Sk4f(fA2) * x + Sk4f(fB2)) * x*x + Sk4f(fD2)) * Sk4f(1.f/6.f);
+ return over2.thenElse(Sk4f(0), over1.thenElse(poly1, poly0));
+ }
+
+ float evaluate_n(float val, float diff, int count, float* output) const override {
+ Sk4f sum(0);
+ while (count >= 4) {
+ float v0 = val;
+ float v1 = val += diff;
+ float v2 = val += diff;
+ float v3 = val += diff;
+ val += diff;
+ Sk4f filterValue = evalcore_n(Sk4f(v0, v1, v2, v3));
+ filterValue.store(output);
+ output += 4;
+ sum = sum + filterValue;
+ count -= 4;
+ }
+ float sums[4];
+ sum.store(sums);
+ float result = sums[0] + sums[1] + sums[2] + sums[3];
+ result += INHERITED::evaluate_n(val, diff, count, output);
+ return result;
+ }
+
+ protected:
+ float fB, fC;
+ float fA1, fB1, fC1, fD1;
+ float fA2, fB2, fD2;
+private:
+ typedef SkBitmapFilter INHERITED;
+};
+
+class SkGaussianFilter final : public SkBitmapFilter {
+ float fAlpha, fExpWidth;
+
+public:
+ SkGaussianFilter(float a, float width = 2)
+ : SkBitmapFilter(width)
+ , fAlpha(a)
+ , fExpWidth(expf(-a * width * width))
+ {}
+
+ float evaluate(float x) const override {
+ return SkTMax(0.f, float(expf(-fAlpha*x*x) - fExpWidth));
+ }
+};
+
+class SkTriangleFilter final : public SkBitmapFilter {
+public:
+ SkTriangleFilter(float width = 1) : SkBitmapFilter(width) {}
+
+ float evaluate(float x) const override {
+ return SkTMax(0.f, fWidth - fabsf(x));
+ }
+};
+
+class SkBoxFilter final : public SkBitmapFilter {
+public:
+ SkBoxFilter(float width = 0.5f) : SkBitmapFilter(width) {}
+
+ float evaluate(float x) const override {
+ return (x >= -fWidth && x < fWidth) ? 1.0f : 0.0f;
+ }
+};
+
+class SkHammingFilter final : public SkBitmapFilter {
+public:
+ SkHammingFilter(float width = 1) : SkBitmapFilter(width) {}
+
+ float evaluate(float x) const override {
+ if (x <= -fWidth || x >= fWidth) {
+ return 0.0f; // Outside of the window.
+ }
+ if (x > -FLT_EPSILON && x < FLT_EPSILON) {
+ return 1.0f; // Special case the sinc discontinuity at the origin.
+ }
+ const float xpi = x * static_cast<float>(SK_ScalarPI);
+
+ return ((sk_float_sin(xpi) / xpi) * // sinc(x)
+ (0.54f + 0.46f * sk_float_cos(xpi / fWidth))); // hamming(x)
+ }
+};
+
+class SkLanczosFilter final : public SkBitmapFilter {
+public:
+ SkLanczosFilter(float width = 3.f) : SkBitmapFilter(width) {}
+
+ float evaluate(float x) const override {
+ if (x <= -fWidth || x >= fWidth) {
+ return 0.0f; // Outside of the window.
+ }
+ if (x > -FLT_EPSILON && x < FLT_EPSILON) {
+ return 1.0f; // Special case the discontinuity at the origin.
+ }
+ float xpi = x * static_cast<float>(SK_ScalarPI);
+ return (sk_float_sin(xpi) / xpi) * // sinc(x)
+ sk_float_sin(xpi / fWidth) / (xpi / fWidth); // sinc(x/fWidth)
+ }
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBitmapProcShader.cpp b/gfx/skia/skia/src/core/SkBitmapProcShader.cpp
new file mode 100644
index 000000000..e0d281b02
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapProcShader.cpp
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapProcShader.h"
+#include "SkBitmapProcState.h"
+#include "SkBitmapProvider.h"
+
+static bool only_scale_and_translate(const SkMatrix& matrix) {
+ unsigned mask = SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask;
+ return (matrix.getType() & ~mask) == 0;
+}
+
+class BitmapProcInfoContext : public SkShader::Context {
+public:
+ // The info has been allocated elsewhere, but we are responsible for calling its destructor.
+ BitmapProcInfoContext(const SkShader& shader, const SkShader::ContextRec& rec,
+ SkBitmapProcInfo* info)
+ : INHERITED(shader, rec)
+ , fInfo(info)
+ {
+ fFlags = 0;
+ if (fInfo->fPixmap.isOpaque() && (255 == this->getPaintAlpha())) {
+ fFlags |= SkShader::kOpaqueAlpha_Flag;
+ }
+
+ if (1 == fInfo->fPixmap.height() && only_scale_and_translate(this->getTotalInverse())) {
+ fFlags |= SkShader::kConstInY32_Flag;
+ }
+ }
+
+ ~BitmapProcInfoContext() override {
+ fInfo->~SkBitmapProcInfo();
+ }
+
+ uint32_t getFlags() const override { return fFlags; }
+
+private:
+ SkBitmapProcInfo* fInfo;
+ uint32_t fFlags;
+
+ typedef SkShader::Context INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class BitmapProcShaderContext : public BitmapProcInfoContext {
+public:
+ BitmapProcShaderContext(const SkShader& shader, const SkShader::ContextRec& rec,
+ SkBitmapProcState* state)
+ : INHERITED(shader, rec, state)
+ , fState(state)
+ {}
+
+ void shadeSpan(int x, int y, SkPMColor dstC[], int count) override {
+ const SkBitmapProcState& state = *fState;
+ if (state.getShaderProc32()) {
+ state.getShaderProc32()(&state, x, y, dstC, count);
+ return;
+ }
+
+ const int BUF_MAX = 128;
+ uint32_t buffer[BUF_MAX];
+ SkBitmapProcState::MatrixProc mproc = state.getMatrixProc();
+ SkBitmapProcState::SampleProc32 sproc = state.getSampleProc32();
+ const int max = state.maxCountForBufferSize(sizeof(buffer[0]) * BUF_MAX);
+
+ SkASSERT(state.fPixmap.addr());
+
+ for (;;) {
+ int n = SkTMin(count, max);
+ SkASSERT(n > 0 && n < BUF_MAX*2);
+ mproc(state, buffer, n, x, y);
+ sproc(state, buffer, n, dstC);
+
+ if ((count -= n) == 0) {
+ break;
+ }
+ SkASSERT(count > 0);
+ x += n;
+ dstC += n;
+ }
+ }
+
+ ShadeProc asAShadeProc(void** ctx) override {
+ if (fState->getShaderProc32()) {
+ *ctx = fState;
+ return (ShadeProc)fState->getShaderProc32();
+ }
+ return nullptr;
+ }
+
+private:
+ SkBitmapProcState* fState;
+
+ typedef BitmapProcInfoContext INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#include "SkLinearBitmapPipeline.h"
+#include "SkPM4f.h"
+#include "SkXfermode.h"
+
+class LinearPipelineContext : public BitmapProcInfoContext {
+public:
+ LinearPipelineContext(const SkShader& shader, const SkShader::ContextRec& rec,
+ SkBitmapProcInfo* info)
+ : INHERITED(shader, rec, info)
+ {
+ // Save things off in case we need to build a blitter pipeline.
+ fSrcPixmap = info->fPixmap;
+ fAlpha = SkColorGetA(info->fPaintColor) / 255.0f;
+ fXMode = info->fTileModeX;
+ fYMode = info->fTileModeY;
+ fFilterQuality = info->fFilterQuality;
+ fMatrixTypeMask = info->fRealInvMatrix.getType();
+
+ fShaderPipeline.init(
+ info->fRealInvMatrix, info->fFilterQuality,
+ info->fTileModeX, info->fTileModeY,
+ info->fPaintColor,
+ info->fPixmap);
+
+ // To implement the old shadeSpan entry-point, we need to efficiently convert our native
+ // floats into SkPMColor. The SkXfermode::D32Procs do exactly that.
+ //
+ sk_sp<SkXfermode> xfer(SkXfermode::Make(SkXfermode::kSrc_Mode));
+ fXferProc = SkXfermode::GetD32Proc(xfer.get(), 0);
+ }
+
+ void shadeSpan4f(int x, int y, SkPM4f dstC[], int count) override {
+ fShaderPipeline->shadeSpan4f(x, y, dstC, count);
+ }
+
+ void shadeSpan(int x, int y, SkPMColor dstC[], int count) override {
+ const int N = 128;
+ SkPM4f tmp[N];
+
+ while (count > 0) {
+ const int n = SkTMin(count, N);
+ fShaderPipeline->shadeSpan4f(x, y, tmp, n);
+ fXferProc(nullptr, dstC, tmp, n, nullptr);
+ dstC += n;
+ x += n;
+ count -= n;
+ }
+ }
+
+ bool onChooseBlitProcs(const SkImageInfo& dstInfo, BlitState* state) override {
+ SkXfermode::Mode mode;
+ if (!SkXfermode::AsMode(state->fXfer, &mode)) { return false; }
+
+ if (SkLinearBitmapPipeline::ClonePipelineForBlitting(
+ &fBlitterPipeline, *fShaderPipeline,
+ fMatrixTypeMask,
+ fXMode, fYMode,
+ fFilterQuality, fSrcPixmap,
+ fAlpha, mode, dstInfo))
+ {
+ state->fStorage[0] = fBlitterPipeline.get();
+ state->fBlitBW = &LinearPipelineContext::ForwardToPipeline;
+
+ return true;
+ }
+
+ return false;
+ }
+
+ static void ForwardToPipeline(BlitState* state, int x, int y, const SkPixmap& dst, int count) {
+ SkLinearBitmapPipeline* pipeline = static_cast<SkLinearBitmapPipeline*>(state->fStorage[0]);
+ void* addr = dst.writable_addr32(x, y);
+ pipeline->blitSpan(x, y, addr, count);
+ }
+
+private:
+ SkEmbeddableLinearPipeline fShaderPipeline;
+ SkEmbeddableLinearPipeline fBlitterPipeline;
+ SkXfermode::D32Proc fXferProc;
+ SkPixmap fSrcPixmap;
+ float fAlpha;
+ SkShader::TileMode fXMode;
+ SkShader::TileMode fYMode;
+ SkMatrix::TypeMask fMatrixTypeMask;
+ SkFilterQuality fFilterQuality;
+
+ typedef BitmapProcInfoContext INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static bool choose_linear_pipeline(const SkShader::ContextRec& rec, const SkImageInfo& srcInfo) {
+ // If we get here, we can reasonably use either context, respect the caller's preference
+ //
+ bool needsPremul = srcInfo.alphaType() == kUnpremul_SkAlphaType;
+ bool needsSwizzle = srcInfo.bytesPerPixel() == 4 && srcInfo.colorType() != kN32_SkColorType;
+ return SkShader::ContextRec::kPM4f_DstType == rec.fPreferredDstType
+ || needsPremul || needsSwizzle;
+}
+
+size_t SkBitmapProcLegacyShader::ContextSize(const ContextRec& rec, const SkImageInfo& srcInfo) {
+ size_t size0 = sizeof(BitmapProcShaderContext) + sizeof(SkBitmapProcState);
+ size_t size1 = sizeof(LinearPipelineContext) + sizeof(SkBitmapProcInfo);
+ size_t s = SkTMax(size0, size1);
+ return s;
+ return SkTMax(size0, size1);
+}
+
+SkShader::Context* SkBitmapProcLegacyShader::MakeContext(const SkShader& shader,
+ TileMode tmx, TileMode tmy,
+ const SkBitmapProvider& provider,
+ const ContextRec& rec, void* storage) {
+ SkMatrix totalInverse;
+ // Do this first, so we know the matrix can be inverted.
+ if (!shader.computeTotalInverse(rec, &totalInverse)) {
+ return nullptr;
+ }
+
+ // Decide if we can/want to use the new linear pipeline
+ bool useLinearPipeline = choose_linear_pipeline(rec, provider.info());
+ SkSourceGammaTreatment treatment = SkMipMap::DeduceTreatment(rec);
+
+ if (useLinearPipeline) {
+ void* infoStorage = (char*)storage + sizeof(LinearPipelineContext);
+ SkBitmapProcInfo* info = new (infoStorage) SkBitmapProcInfo(provider, tmx, tmy, treatment);
+ if (!info->init(totalInverse, *rec.fPaint)) {
+ info->~SkBitmapProcInfo();
+ return nullptr;
+ }
+
+ return new (storage) LinearPipelineContext(shader, rec, info);
+ } else {
+ void* stateStorage = (char*)storage + sizeof(BitmapProcShaderContext);
+ SkBitmapProcState* state = new (stateStorage) SkBitmapProcState(provider, tmx, tmy,
+ treatment);
+ if (!state->setup(totalInverse, *rec.fPaint)) {
+ state->~SkBitmapProcState();
+ return nullptr;
+ }
+ return new (storage) BitmapProcShaderContext(shader, rec, state);
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkBitmapProcShader.h b/gfx/skia/skia/src/core/SkBitmapProcShader.h
new file mode 100644
index 000000000..4b7447e52
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapProcShader.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkBitmapProcShader_DEFINED
+#define SkBitmapProcShader_DEFINED
+
+#include "SkImagePriv.h"
+#include "SkShader.h"
+
+class SkBitmapProvider;
+
+class SkBitmapProcLegacyShader : public SkShader {
+private:
+ friend class SkImageShader;
+
+ static size_t ContextSize(const ContextRec&, const SkImageInfo& srcInfo);
+ static Context* MakeContext(const SkShader&, TileMode tmx, TileMode tmy,
+ const SkBitmapProvider&, const ContextRec&, void* storage);
+
+ typedef SkShader INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBitmapProcState.cpp b/gfx/skia/skia/src/core/SkBitmapProcState.cpp
new file mode 100644
index 000000000..183016e69
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapProcState.cpp
@@ -0,0 +1,828 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapCache.h"
+#include "SkBitmapController.h"
+#include "SkBitmapProcState.h"
+#include "SkColorPriv.h"
+#include "SkFilterProc.h"
+#include "SkPaint.h"
+#include "SkShader.h" // for tilemodes
+#include "SkUtilsArm.h"
+#include "SkBitmapScaler.h"
+#include "SkMipMap.h"
+#include "SkPixelRef.h"
+#include "SkImageEncoder.h"
+#include "SkResourceCache.h"
+
+#if defined(SK_ARM_HAS_NEON) || defined(SK_ARM_HAS_OPTIONAL_NEON)
+// These are defined in src/opts/SkBitmapProcState_arm_neon.cpp
+extern const SkBitmapProcState::SampleProc32 gSkBitmapProcStateSample32_neon[];
+extern void S16_D16_filter_DX_neon(const SkBitmapProcState&, const uint32_t*, int, uint16_t*);
+extern void Clamp_S16_D16_filter_DX_shaderproc_neon(const void *, int, int, uint16_t*, int);
+extern void Repeat_S16_D16_filter_DX_shaderproc_neon(const void *, int, int, uint16_t*, int);
+extern void SI8_opaque_D32_filter_DX_neon(const SkBitmapProcState&, const uint32_t*, int, SkPMColor*);
+extern void SI8_opaque_D32_filter_DX_shaderproc_neon(const void *, int, int, uint32_t*, int);
+extern void Clamp_SI8_opaque_D32_filter_DX_shaderproc_neon(const void*, int, int, uint32_t*, int);
+#endif
+
+extern void Clamp_S32_opaque_D32_nofilter_DX_shaderproc(const void*, int, int, uint32_t*, int);
+
+#define NAME_WRAP(x) x
+#include "SkBitmapProcState_filter.h"
+#include "SkBitmapProcState_procs.h"
+
+SkBitmapProcInfo::SkBitmapProcInfo(const SkBitmapProvider& provider,
+ SkShader::TileMode tmx, SkShader::TileMode tmy,
+ SkSourceGammaTreatment treatment)
+ : fProvider(provider)
+ , fTileModeX(tmx)
+ , fTileModeY(tmy)
+ , fSrcGammaTreatment(treatment)
+ , fBMState(nullptr)
+{}
+
+SkBitmapProcInfo::SkBitmapProcInfo(const SkBitmap& bm,
+ SkShader::TileMode tmx, SkShader::TileMode tmy,
+ SkSourceGammaTreatment treatment)
+ : fProvider(SkBitmapProvider(bm))
+ , fTileModeX(tmx)
+ , fTileModeY(tmy)
+ , fSrcGammaTreatment(treatment)
+ , fBMState(nullptr)
+{}
+
+SkBitmapProcInfo::~SkBitmapProcInfo() {
+ SkInPlaceDeleteCheck(fBMState, fBMStateStorage.get());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// true iff the matrix contains, at most, scale and translate elements
+static bool matrix_only_scale_translate(const SkMatrix& m) {
+ return m.getType() <= (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask);
+}
+
+/**
+ * For the purposes of drawing bitmaps, if a matrix is "almost" translate
+ * go ahead and treat it as if it were, so that subsequent code can go fast.
+ */
+static bool just_trans_clamp(const SkMatrix& matrix, const SkPixmap& pixmap) {
+ SkASSERT(matrix_only_scale_translate(matrix));
+
+ if (matrix.getType() & SkMatrix::kScale_Mask) {
+ SkRect dst;
+ SkRect src = SkRect::Make(pixmap.bounds());
+
+ // Can't call mapRect(), since that will fix up inverted rectangles,
+ // e.g. when scale is negative, and we don't want to return true for
+ // those.
+ matrix.mapPoints(SkTCast<SkPoint*>(&dst),
+ SkTCast<const SkPoint*>(&src),
+ 2);
+
+ // Now round all 4 edges to device space, and then compare the device
+ // width/height to the original. Note: we must map all 4 and subtract
+ // rather than map the "width" and compare, since we care about the
+ // phase (in pixel space) that any translate in the matrix might impart.
+ SkIRect idst;
+ dst.round(&idst);
+ return idst.width() == pixmap.width() && idst.height() == pixmap.height();
+ }
+ // if we got here, we're either kTranslate_Mask or identity
+ return true;
+}
+
+static bool just_trans_general(const SkMatrix& matrix) {
+ SkASSERT(matrix_only_scale_translate(matrix));
+
+ if (matrix.getType() & SkMatrix::kScale_Mask) {
+ const SkScalar tol = SK_Scalar1 / 32768;
+
+ if (!SkScalarNearlyZero(matrix[SkMatrix::kMScaleX] - SK_Scalar1, tol)) {
+ return false;
+ }
+ if (!SkScalarNearlyZero(matrix[SkMatrix::kMScaleY] - SK_Scalar1, tol)) {
+ return false;
+ }
+ }
+ // if we got here, treat us as either kTranslate_Mask or identity
+ return true;
+}
+
+static bool valid_for_filtering(unsigned dimension) {
+ // for filtering, width and height must fit in 14bits, since we use steal
+ // 2 bits from each to store our 4bit subpixel data
+ return (dimension & ~0x3FFF) == 0;
+}
+
+bool SkBitmapProcInfo::init(const SkMatrix& inv, const SkPaint& paint) {
+ const int origW = fProvider.info().width();
+ const int origH = fProvider.info().height();
+
+ fPixmap.reset();
+ fInvMatrix = inv;
+ fFilterQuality = paint.getFilterQuality();
+
+ bool allow_ignore_fractional_translate = true; // historical default
+ if (kMedium_SkFilterQuality == fFilterQuality) {
+ allow_ignore_fractional_translate = false;
+ }
+
+ SkDefaultBitmapController controller(fSrcGammaTreatment);
+ fBMState = controller.requestBitmap(fProvider, inv, paint.getFilterQuality(),
+ fBMStateStorage.get(), fBMStateStorage.size());
+ // Note : we allow the controller to return an empty (zero-dimension) result. Should we?
+ if (nullptr == fBMState || fBMState->pixmap().info().isEmpty()) {
+ return false;
+ }
+ fPixmap = fBMState->pixmap();
+ fInvMatrix = fBMState->invMatrix();
+ fRealInvMatrix = fBMState->invMatrix();
+ fPaintColor = paint.getColor();
+ fFilterQuality = fBMState->quality();
+ SkASSERT(fPixmap.addr());
+
+ bool trivialMatrix = (fInvMatrix.getType() & ~SkMatrix::kTranslate_Mask) == 0;
+ bool clampClamp = SkShader::kClamp_TileMode == fTileModeX &&
+ SkShader::kClamp_TileMode == fTileModeY;
+
+ // Most of the scanline procs deal with "unit" texture coordinates, as this
+ // makes it easy to perform tiling modes (repeat = (x & 0xFFFF)). To generate
+ // those, we divide the matrix by its dimensions here.
+ //
+ // We don't do this if we're either trivial (can ignore the matrix) or clamping
+ // in both X and Y since clamping to width,height is just as easy as to 0xFFFF.
+
+ if (!(clampClamp || trivialMatrix)) {
+ fInvMatrix.postIDiv(fPixmap.width(), fPixmap.height());
+ }
+
+ // Now that all possible changes to the matrix have taken place, check
+ // to see if we're really close to a no-scale matrix. If so, explicitly
+ // set it to be so. Subsequent code may inspect this matrix to choose
+ // a faster path in this case.
+
+ // This code will only execute if the matrix has some scale component;
+ // if it's already pure translate then we won't do this inversion.
+
+ if (matrix_only_scale_translate(fInvMatrix)) {
+ SkMatrix forward;
+ if (fInvMatrix.invert(&forward)) {
+ if ((clampClamp && allow_ignore_fractional_translate)
+ ? just_trans_clamp(forward, fPixmap)
+ : just_trans_general(forward)) {
+ fInvMatrix.setTranslate(-forward.getTranslateX(), -forward.getTranslateY());
+ }
+ }
+ }
+
+ fInvType = fInvMatrix.getType();
+
+ // If our target pixmap is the same as the original, then we revert back to legacy behavior
+ // and allow the code to ignore fractional translate.
+ //
+ // The width/height check allows allow_ignore_fractional_translate to stay false if we
+ // previously set it that way (e.g. we started in kMedium).
+ //
+ if (fPixmap.width() == origW && fPixmap.height() == origH) {
+ allow_ignore_fractional_translate = true;
+ }
+
+ if (kLow_SkFilterQuality == fFilterQuality && allow_ignore_fractional_translate) {
+ // Only try bilerp if the matrix is "interesting" and
+ // the image has a suitable size.
+
+ if (fInvType <= SkMatrix::kTranslate_Mask ||
+ !valid_for_filtering(fPixmap.width() | fPixmap.height()))
+ {
+ fFilterQuality = kNone_SkFilterQuality;
+ }
+ }
+
+ return true;
+}
+
+/*
+ * Analyze filter-quality and matrix, and decide how to implement that.
+ *
+ * In general, we cascade down the request level [ High ... None ]
+ * - for a given level, if we can fulfill it, fine, else
+ * - else we downgrade to the next lower level and try again.
+ * We can always fulfill requests for Low and None
+ * - sometimes we will "ignore" Low and give None, but this is likely a legacy perf hack
+ * and may be removed.
+ */
+bool SkBitmapProcState::chooseProcs() {
+ fInvProc = fInvMatrix.getMapXYProc();
+ fInvSx = SkScalarToFixed(fInvMatrix.getScaleX());
+ fInvSxFractionalInt = SkScalarToFractionalInt(fInvMatrix.getScaleX());
+ fInvKy = SkScalarToFixed(fInvMatrix.getSkewY());
+ fInvKyFractionalInt = SkScalarToFractionalInt(fInvMatrix.getSkewY());
+
+ fAlphaScale = SkAlpha255To256(SkColorGetA(fPaintColor));
+
+ fShaderProc32 = nullptr;
+ fShaderProc16 = nullptr;
+ fSampleProc32 = nullptr;
+
+ const bool trivialMatrix = (fInvMatrix.getType() & ~SkMatrix::kTranslate_Mask) == 0;
+ const bool clampClamp = SkShader::kClamp_TileMode == fTileModeX &&
+ SkShader::kClamp_TileMode == fTileModeY;
+
+ return this->chooseScanlineProcs(trivialMatrix, clampClamp);
+}
+
+bool SkBitmapProcState::chooseScanlineProcs(bool trivialMatrix, bool clampClamp) {
+ fMatrixProc = this->chooseMatrixProc(trivialMatrix);
+ // TODO(dominikg): SkASSERT(fMatrixProc) instead? chooseMatrixProc never returns nullptr.
+ if (nullptr == fMatrixProc) {
+ return false;
+ }
+
+ ///////////////////////////////////////////////////////////////////////
+
+ const SkAlphaType at = fPixmap.alphaType();
+
+ // No need to do this if we're doing HQ sampling; if filter quality is
+ // still set to HQ by the time we get here, then we must have installed
+ // the shader procs above and can skip all this.
+
+ if (fFilterQuality < kHigh_SkFilterQuality) {
+
+ int index = 0;
+ if (fAlphaScale < 256) { // note: this distinction is not used for D16
+ index |= 1;
+ }
+ if (fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask)) {
+ index |= 2;
+ }
+ if (fFilterQuality > kNone_SkFilterQuality) {
+ index |= 4;
+ }
+ // bits 3,4,5 encoding the source bitmap format
+ switch (fPixmap.colorType()) {
+ case kN32_SkColorType:
+ if (kPremul_SkAlphaType != at && kOpaque_SkAlphaType != at) {
+ return false;
+ }
+ index |= 0;
+ break;
+ case kRGB_565_SkColorType:
+ index |= 8;
+ break;
+ case kIndex_8_SkColorType:
+ if (kPremul_SkAlphaType != at && kOpaque_SkAlphaType != at) {
+ return false;
+ }
+ index |= 16;
+ break;
+ case kARGB_4444_SkColorType:
+ if (kPremul_SkAlphaType != at && kOpaque_SkAlphaType != at) {
+ return false;
+ }
+ index |= 24;
+ break;
+ case kAlpha_8_SkColorType:
+ index |= 32;
+ fPaintPMColor = SkPreMultiplyColor(fPaintColor);
+ break;
+ case kGray_8_SkColorType:
+ index |= 40;
+ fPaintPMColor = SkPreMultiplyColor(fPaintColor);
+ break;
+ default:
+ // TODO(dominikg): Should we ever get here? SkASSERT(false) instead?
+ return false;
+ }
+
+#if !defined(SK_ARM_HAS_NEON) || defined(SK_ARM_HAS_OPTIONAL_NEON)
+ static const SampleProc32 gSkBitmapProcStateSample32[] = {
+ S32_opaque_D32_nofilter_DXDY,
+ S32_alpha_D32_nofilter_DXDY,
+ S32_opaque_D32_nofilter_DX,
+ S32_alpha_D32_nofilter_DX,
+ S32_opaque_D32_filter_DXDY,
+ S32_alpha_D32_filter_DXDY,
+ S32_opaque_D32_filter_DX,
+ S32_alpha_D32_filter_DX,
+
+ S16_opaque_D32_nofilter_DXDY,
+ S16_alpha_D32_nofilter_DXDY,
+ S16_opaque_D32_nofilter_DX,
+ S16_alpha_D32_nofilter_DX,
+ S16_opaque_D32_filter_DXDY,
+ S16_alpha_D32_filter_DXDY,
+ S16_opaque_D32_filter_DX,
+ S16_alpha_D32_filter_DX,
+
+ SI8_opaque_D32_nofilter_DXDY,
+ SI8_alpha_D32_nofilter_DXDY,
+ SI8_opaque_D32_nofilter_DX,
+ SI8_alpha_D32_nofilter_DX,
+ SI8_opaque_D32_filter_DXDY,
+ SI8_alpha_D32_filter_DXDY,
+ SI8_opaque_D32_filter_DX,
+ SI8_alpha_D32_filter_DX,
+
+ S4444_opaque_D32_nofilter_DXDY,
+ S4444_alpha_D32_nofilter_DXDY,
+ S4444_opaque_D32_nofilter_DX,
+ S4444_alpha_D32_nofilter_DX,
+ S4444_opaque_D32_filter_DXDY,
+ S4444_alpha_D32_filter_DXDY,
+ S4444_opaque_D32_filter_DX,
+ S4444_alpha_D32_filter_DX,
+
+ // A8 treats alpha/opaque the same (equally efficient)
+ SA8_alpha_D32_nofilter_DXDY,
+ SA8_alpha_D32_nofilter_DXDY,
+ SA8_alpha_D32_nofilter_DX,
+ SA8_alpha_D32_nofilter_DX,
+ SA8_alpha_D32_filter_DXDY,
+ SA8_alpha_D32_filter_DXDY,
+ SA8_alpha_D32_filter_DX,
+ SA8_alpha_D32_filter_DX,
+
+ // todo: possibly specialize on opaqueness
+ SG8_alpha_D32_nofilter_DXDY,
+ SG8_alpha_D32_nofilter_DXDY,
+ SG8_alpha_D32_nofilter_DX,
+ SG8_alpha_D32_nofilter_DX,
+ SG8_alpha_D32_filter_DXDY,
+ SG8_alpha_D32_filter_DXDY,
+ SG8_alpha_D32_filter_DX,
+ SG8_alpha_D32_filter_DX
+ };
+#endif
+
+ fSampleProc32 = SK_ARM_NEON_WRAP(gSkBitmapProcStateSample32)[index];
+
+ fShaderProc32 = this->chooseShaderProc32();
+ if (nullptr == fShaderProc32) {
+ // our special-case shaderprocs
+ if (SK_ARM_NEON_WRAP(SI8_opaque_D32_filter_DX) == fSampleProc32 && clampClamp) {
+ fShaderProc32 = SK_ARM_NEON_WRAP(Clamp_SI8_opaque_D32_filter_DX_shaderproc);
+ } else if (S32_opaque_D32_nofilter_DX == fSampleProc32 && clampClamp) {
+ fShaderProc32 = Clamp_S32_opaque_D32_nofilter_DX_shaderproc;
+ }
+ }
+ }
+
+ // see if our platform has any accelerated overrides
+ this->platformProcs();
+
+ return true;
+}
+
+static void Clamp_S32_D32_nofilter_trans_shaderproc(const void* sIn,
+ int x, int y,
+ SkPMColor* SK_RESTRICT colors,
+ int count) {
+ const SkBitmapProcState& s = *static_cast<const SkBitmapProcState*>(sIn);
+ SkASSERT(((s.fInvType & ~SkMatrix::kTranslate_Mask)) == 0);
+ SkASSERT(s.fInvKy == 0);
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(kNone_SkFilterQuality == s.fFilterQuality);
+
+ const int maxX = s.fPixmap.width() - 1;
+ const int maxY = s.fPixmap.height() - 1;
+ int ix = s.fFilterOneX + x;
+ int iy = SkClampMax(s.fFilterOneY + y, maxY);
+ const SkPMColor* row = s.fPixmap.addr32(0, iy);
+
+ // clamp to the left
+ if (ix < 0) {
+ int n = SkMin32(-ix, count);
+ sk_memset32(colors, row[0], n);
+ count -= n;
+ if (0 == count) {
+ return;
+ }
+ colors += n;
+ SkASSERT(-ix == n);
+ ix = 0;
+ }
+ // copy the middle
+ if (ix <= maxX) {
+ int n = SkMin32(maxX - ix + 1, count);
+ memcpy(colors, row + ix, n * sizeof(SkPMColor));
+ count -= n;
+ if (0 == count) {
+ return;
+ }
+ colors += n;
+ }
+ SkASSERT(count > 0);
+ // clamp to the right
+ sk_memset32(colors, row[maxX], count);
+}
+
+static inline int sk_int_mod(int x, int n) {
+ SkASSERT(n > 0);
+ if ((unsigned)x >= (unsigned)n) {
+ if (x < 0) {
+ x = n + ~(~x % n);
+ } else {
+ x = x % n;
+ }
+ }
+ return x;
+}
+
+static inline int sk_int_mirror(int x, int n) {
+ x = sk_int_mod(x, 2 * n);
+ if (x >= n) {
+ x = n + ~(x - n);
+ }
+ return x;
+}
+
+static void Repeat_S32_D32_nofilter_trans_shaderproc(const void* sIn,
+ int x, int y,
+ SkPMColor* SK_RESTRICT colors,
+ int count) {
+ const SkBitmapProcState& s = *static_cast<const SkBitmapProcState*>(sIn);
+ SkASSERT(((s.fInvType & ~SkMatrix::kTranslate_Mask)) == 0);
+ SkASSERT(s.fInvKy == 0);
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(kNone_SkFilterQuality == s.fFilterQuality);
+
+ const int stopX = s.fPixmap.width();
+ const int stopY = s.fPixmap.height();
+ int ix = s.fFilterOneX + x;
+ int iy = sk_int_mod(s.fFilterOneY + y, stopY);
+ const SkPMColor* row = s.fPixmap.addr32(0, iy);
+
+ ix = sk_int_mod(ix, stopX);
+ for (;;) {
+ int n = SkMin32(stopX - ix, count);
+ memcpy(colors, row + ix, n * sizeof(SkPMColor));
+ count -= n;
+ if (0 == count) {
+ return;
+ }
+ colors += n;
+ ix = 0;
+ }
+}
+
+static void S32_D32_constX_shaderproc(const void* sIn,
+ int x, int y,
+ SkPMColor* SK_RESTRICT colors,
+ int count) {
+ const SkBitmapProcState& s = *static_cast<const SkBitmapProcState*>(sIn);
+ SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask)) == 0);
+ SkASSERT(s.fInvKy == 0);
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(1 == s.fPixmap.width());
+
+ int iY0;
+ int iY1 SK_INIT_TO_AVOID_WARNING;
+ int iSubY SK_INIT_TO_AVOID_WARNING;
+
+ if (kNone_SkFilterQuality != s.fFilterQuality) {
+ SkBitmapProcState::MatrixProc mproc = s.getMatrixProc();
+ uint32_t xy[2];
+
+ mproc(s, xy, 1, x, y);
+
+ iY0 = xy[0] >> 18;
+ iY1 = xy[0] & 0x3FFF;
+ iSubY = (xy[0] >> 14) & 0xF;
+ } else {
+ int yTemp;
+
+ if (s.fInvType > SkMatrix::kTranslate_Mask) {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+
+ // When the matrix has a scale component the setup code in
+ // chooseProcs multiples the inverse matrix by the inverse of the
+ // bitmap's width and height. Since this method is going to do
+ // its own tiling and sampling we need to undo that here.
+ if (SkShader::kClamp_TileMode != s.fTileModeX ||
+ SkShader::kClamp_TileMode != s.fTileModeY) {
+ yTemp = SkFractionalIntToInt(mapper.fractionalIntY() * s.fPixmap.height());
+ } else {
+ yTemp = mapper.intY();
+ }
+ } else {
+ yTemp = s.fFilterOneY + y;
+ }
+
+ const int stopY = s.fPixmap.height();
+ switch (s.fTileModeY) {
+ case SkShader::kClamp_TileMode:
+ iY0 = SkClampMax(yTemp, stopY-1);
+ break;
+ case SkShader::kRepeat_TileMode:
+ iY0 = sk_int_mod(yTemp, stopY);
+ break;
+ case SkShader::kMirror_TileMode:
+ default:
+ iY0 = sk_int_mirror(yTemp, stopY);
+ break;
+ }
+
+#ifdef SK_DEBUG
+ {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ int iY2;
+
+ if (s.fInvType > SkMatrix::kTranslate_Mask &&
+ (SkShader::kClamp_TileMode != s.fTileModeX ||
+ SkShader::kClamp_TileMode != s.fTileModeY)) {
+ iY2 = SkFractionalIntToInt(mapper.fractionalIntY() * s.fPixmap.height());
+ } else {
+ iY2 = mapper.intY();
+ }
+
+ switch (s.fTileModeY) {
+ case SkShader::kClamp_TileMode:
+ iY2 = SkClampMax(iY2, stopY-1);
+ break;
+ case SkShader::kRepeat_TileMode:
+ iY2 = sk_int_mod(iY2, stopY);
+ break;
+ case SkShader::kMirror_TileMode:
+ default:
+ iY2 = sk_int_mirror(iY2, stopY);
+ break;
+ }
+
+ SkASSERT(iY0 == iY2);
+ }
+#endif
+ }
+
+ const SkPMColor* row0 = s.fPixmap.addr32(0, iY0);
+ SkPMColor color;
+
+ if (kNone_SkFilterQuality != s.fFilterQuality) {
+ const SkPMColor* row1 = s.fPixmap.addr32(0, iY1);
+
+ if (s.fAlphaScale < 256) {
+ Filter_32_alpha(iSubY, *row0, *row1, &color, s.fAlphaScale);
+ } else {
+ Filter_32_opaque(iSubY, *row0, *row1, &color);
+ }
+ } else {
+ if (s.fAlphaScale < 256) {
+ color = SkAlphaMulQ(*row0, s.fAlphaScale);
+ } else {
+ color = *row0;
+ }
+ }
+
+ sk_memset32(colors, color, count);
+}
+
+static void DoNothing_shaderproc(const void*, int x, int y,
+ SkPMColor* SK_RESTRICT colors, int count) {
+ // if we get called, the matrix is too tricky, so we just draw nothing
+ sk_memset32(colors, 0, count);
+}
+
+bool SkBitmapProcState::setupForTranslate() {
+ SkPoint pt;
+ const SkBitmapProcStateAutoMapper mapper(*this, 0, 0, &pt);
+
+ /*
+ * if the translate is larger than our ints, we can get random results, or
+ * worse, we might get 0x80000000, which wreaks havoc on us, since we can't
+ * negate it.
+ */
+ const SkScalar too_big = SkIntToScalar(1 << 30);
+ if (SkScalarAbs(pt.fX) > too_big || SkScalarAbs(pt.fY) > too_big) {
+ return false;
+ }
+
+ // Since we know we're not filtered, we re-purpose these fields allow
+ // us to go from device -> src coordinates w/ just an integer add,
+ // rather than running through the inverse-matrix
+ fFilterOneX = mapper.intX();
+ fFilterOneY = mapper.intY();
+
+ return true;
+}
+
+SkBitmapProcState::ShaderProc32 SkBitmapProcState::chooseShaderProc32() {
+
+ if (kN32_SkColorType != fPixmap.colorType()) {
+ return nullptr;
+ }
+
+ static const unsigned kMask = SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask;
+
+ if (1 == fPixmap.width() && 0 == (fInvType & ~kMask)) {
+ if (kNone_SkFilterQuality == fFilterQuality &&
+ fInvType <= SkMatrix::kTranslate_Mask &&
+ !this->setupForTranslate()) {
+ return DoNothing_shaderproc;
+ }
+ return S32_D32_constX_shaderproc;
+ }
+
+ if (fAlphaScale < 256) {
+ return nullptr;
+ }
+ if (fInvType > SkMatrix::kTranslate_Mask) {
+ return nullptr;
+ }
+ if (kNone_SkFilterQuality != fFilterQuality) {
+ return nullptr;
+ }
+
+ SkShader::TileMode tx = (SkShader::TileMode)fTileModeX;
+ SkShader::TileMode ty = (SkShader::TileMode)fTileModeY;
+
+ if (SkShader::kClamp_TileMode == tx && SkShader::kClamp_TileMode == ty) {
+ if (this->setupForTranslate()) {
+ return Clamp_S32_D32_nofilter_trans_shaderproc;
+ }
+ return DoNothing_shaderproc;
+ }
+ if (SkShader::kRepeat_TileMode == tx && SkShader::kRepeat_TileMode == ty) {
+ if (this->setupForTranslate()) {
+ return Repeat_S32_D32_nofilter_trans_shaderproc;
+ }
+ return DoNothing_shaderproc;
+ }
+ return nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+
+static void check_scale_nofilter(uint32_t bitmapXY[], int count,
+ unsigned mx, unsigned my) {
+ unsigned y = *bitmapXY++;
+ SkASSERT(y < my);
+
+ const uint16_t* xptr = reinterpret_cast<const uint16_t*>(bitmapXY);
+ for (int i = 0; i < count; ++i) {
+ SkASSERT(xptr[i] < mx);
+ }
+}
+
+static void check_scale_filter(uint32_t bitmapXY[], int count,
+ unsigned mx, unsigned my) {
+ uint32_t YY = *bitmapXY++;
+ unsigned y0 = YY >> 18;
+ unsigned y1 = YY & 0x3FFF;
+ SkASSERT(y0 < my);
+ SkASSERT(y1 < my);
+
+ for (int i = 0; i < count; ++i) {
+ uint32_t XX = bitmapXY[i];
+ unsigned x0 = XX >> 18;
+ unsigned x1 = XX & 0x3FFF;
+ SkASSERT(x0 < mx);
+ SkASSERT(x1 < mx);
+ }
+}
+
+static void check_affine_nofilter(uint32_t bitmapXY[], int count,
+ unsigned mx, unsigned my) {
+ for (int i = 0; i < count; ++i) {
+ uint32_t XY = bitmapXY[i];
+ unsigned x = XY & 0xFFFF;
+ unsigned y = XY >> 16;
+ SkASSERT(x < mx);
+ SkASSERT(y < my);
+ }
+}
+
+static void check_affine_filter(uint32_t bitmapXY[], int count,
+ unsigned mx, unsigned my) {
+ for (int i = 0; i < count; ++i) {
+ uint32_t YY = *bitmapXY++;
+ unsigned y0 = YY >> 18;
+ unsigned y1 = YY & 0x3FFF;
+ SkASSERT(y0 < my);
+ SkASSERT(y1 < my);
+
+ uint32_t XX = *bitmapXY++;
+ unsigned x0 = XX >> 18;
+ unsigned x1 = XX & 0x3FFF;
+ SkASSERT(x0 < mx);
+ SkASSERT(x1 < mx);
+ }
+}
+
+void SkBitmapProcState::DebugMatrixProc(const SkBitmapProcState& state,
+ uint32_t bitmapXY[], int count,
+ int x, int y) {
+ SkASSERT(bitmapXY);
+ SkASSERT(count > 0);
+
+ state.fMatrixProc(state, bitmapXY, count, x, y);
+
+ void (*proc)(uint32_t bitmapXY[], int count, unsigned mx, unsigned my);
+
+ // There are four formats possible:
+ // scale -vs- affine
+ // filter -vs- nofilter
+ if (state.fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask)) {
+ proc = state.fFilterQuality != kNone_SkFilterQuality ?
+ check_scale_filter : check_scale_nofilter;
+ } else {
+ proc = state.fFilterQuality != kNone_SkFilterQuality ?
+ check_affine_filter : check_affine_nofilter;
+ }
+ proc(bitmapXY, count, state.fPixmap.width(), state.fPixmap.height());
+}
+
+SkBitmapProcState::MatrixProc SkBitmapProcState::getMatrixProc() const {
+ return DebugMatrixProc;
+}
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+/*
+ The storage requirements for the different matrix procs are as follows,
+ where each X or Y is 2 bytes, and N is the number of pixels/elements:
+
+ scale/translate nofilter Y(4bytes) + N * X
+ affine/perspective nofilter N * (X Y)
+ scale/translate filter Y Y + N * (X X)
+ affine/perspective filter N * (Y Y X X)
+ */
+int SkBitmapProcState::maxCountForBufferSize(size_t bufferSize) const {
+ int32_t size = static_cast<int32_t>(bufferSize);
+
+ size &= ~3; // only care about 4-byte aligned chunks
+ if (fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask)) {
+ size -= 4; // the shared Y (or YY) coordinate
+ if (size < 0) {
+ size = 0;
+ }
+ size >>= 1;
+ } else {
+ size >>= 2;
+ }
+
+ if (fFilterQuality != kNone_SkFilterQuality) {
+ size >>= 1;
+ }
+
+ return size;
+}
+
+///////////////////////
+
+void Clamp_S32_opaque_D32_nofilter_DX_shaderproc(const void* sIn, int x, int y,
+ SkPMColor* SK_RESTRICT dst, int count) {
+ const SkBitmapProcState& s = *static_cast<const SkBitmapProcState*>(sIn);
+ SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
+ SkMatrix::kScale_Mask)) == 0);
+
+ const unsigned maxX = s.fPixmap.width() - 1;
+ SkFractionalInt fx;
+ int dstY;
+ {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ const unsigned maxY = s.fPixmap.height() - 1;
+ dstY = SkClampMax(mapper.intY(), maxY);
+ fx = mapper.fractionalIntX();
+ }
+
+ const SkPMColor* SK_RESTRICT src = s.fPixmap.addr32(0, dstY);
+ const SkFractionalInt dx = s.fInvSxFractionalInt;
+
+ // Check if we're safely inside [0...maxX] so no need to clamp each computed index.
+ //
+ if ((uint64_t)SkFractionalIntToInt(fx) <= maxX &&
+ (uint64_t)SkFractionalIntToInt(fx + dx * (count - 1)) <= maxX)
+ {
+ int count4 = count >> 2;
+ for (int i = 0; i < count4; ++i) {
+ SkPMColor src0 = src[SkFractionalIntToInt(fx)]; fx += dx;
+ SkPMColor src1 = src[SkFractionalIntToInt(fx)]; fx += dx;
+ SkPMColor src2 = src[SkFractionalIntToInt(fx)]; fx += dx;
+ SkPMColor src3 = src[SkFractionalIntToInt(fx)]; fx += dx;
+ dst[0] = src0;
+ dst[1] = src1;
+ dst[2] = src2;
+ dst[3] = src3;
+ dst += 4;
+ }
+ for (int i = (count4 << 2); i < count; ++i) {
+ unsigned index = SkFractionalIntToInt(fx);
+ SkASSERT(index <= maxX);
+ *dst++ = src[index];
+ fx += dx;
+ }
+ } else {
+ for (int i = 0; i < count; ++i) {
+ dst[i] = src[SkClampMax(SkFractionalIntToInt(fx), maxX)];
+ fx += dx;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkBitmapProcState.h b/gfx/skia/skia/src/core/SkBitmapProcState.h
new file mode 100644
index 000000000..e2e4f9695
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapProcState.h
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapProcState_DEFINED
+#define SkBitmapProcState_DEFINED
+
+#include "SkBitmap.h"
+#include "SkBitmapController.h"
+#include "SkBitmapFilter.h"
+#include "SkBitmapProvider.h"
+#include "SkFloatBits.h"
+#include "SkMatrix.h"
+#include "SkMipMap.h"
+#include "SkPaint.h"
+#include "SkShader.h"
+#include "SkTemplates.h"
+
+typedef SkFixed3232 SkFractionalInt;
+#define SkScalarToFractionalInt(x) SkScalarToFixed3232(x)
+#define SkFractionalIntToFixed(x) SkFixed3232ToFixed(x)
+#define SkFixedToFractionalInt(x) SkFixedToFixed3232(x)
+#define SkFractionalIntToInt(x) SkFixed3232ToInt(x)
+
+class SkPaint;
+
+struct SkBitmapProcInfo {
+ SkBitmapProcInfo(const SkBitmapProvider&, SkShader::TileMode tmx, SkShader::TileMode tmy,
+ SkSourceGammaTreatment);
+ SkBitmapProcInfo(const SkBitmap&, SkShader::TileMode tmx, SkShader::TileMode tmy,
+ SkSourceGammaTreatment);
+ ~SkBitmapProcInfo();
+
+ const SkBitmapProvider fProvider;
+
+ SkPixmap fPixmap;
+ SkMatrix fInvMatrix; // This changes based on tile mode.
+ // TODO: combine fInvMatrix and fRealInvMatrix.
+ SkMatrix fRealInvMatrix; // The actual inverse matrix.
+ SkColor fPaintColor;
+ SkShader::TileMode fTileModeX;
+ SkShader::TileMode fTileModeY;
+ SkFilterQuality fFilterQuality;
+ SkMatrix::TypeMask fInvType;
+ SkSourceGammaTreatment fSrcGammaTreatment;
+
+ bool init(const SkMatrix& inverse, const SkPaint&);
+
+private:
+ enum {
+ kBMStateSize = 136 // found by inspection. if too small, we will call new/delete
+ };
+ SkAlignedSStorage<kBMStateSize> fBMStateStorage;
+ SkBitmapController::State* fBMState;
+};
+
+struct SkBitmapProcState : public SkBitmapProcInfo {
+ SkBitmapProcState(const SkBitmapProvider& prov, SkShader::TileMode tmx, SkShader::TileMode tmy,
+ SkSourceGammaTreatment treatment)
+ : SkBitmapProcInfo(prov, tmx, tmy, treatment) {}
+ SkBitmapProcState(const SkBitmap& bitmap, SkShader::TileMode tmx, SkShader::TileMode tmy,
+ SkSourceGammaTreatment treatment)
+ : SkBitmapProcInfo(bitmap, tmx, tmy, treatment) {}
+
+ bool setup(const SkMatrix& inv, const SkPaint& paint) {
+ return this->init(inv, paint) && this->chooseProcs();
+ }
+
+ typedef void (*ShaderProc32)(const void* ctx, int x, int y, SkPMColor[], int count);
+
+ typedef void (*ShaderProc16)(const void* ctx, int x, int y, uint16_t[], int count);
+
+ typedef void (*MatrixProc)(const SkBitmapProcState&,
+ uint32_t bitmapXY[],
+ int count,
+ int x, int y);
+
+ typedef void (*SampleProc32)(const SkBitmapProcState&,
+ const uint32_t[],
+ int count,
+ SkPMColor colors[]);
+
+ typedef U16CPU (*FixedTileProc)(SkFixed); // returns 0..0xFFFF
+ typedef U16CPU (*FixedTileLowBitsProc)(SkFixed, int); // returns 0..0xF
+ typedef U16CPU (*IntTileProc)(int value, int count); // returns 0..count-1
+
+ SkMatrix::MapXYProc fInvProc; // chooseProcs
+ SkFractionalInt fInvSxFractionalInt;
+ SkFractionalInt fInvKyFractionalInt;
+
+ FixedTileProc fTileProcX; // chooseProcs
+ FixedTileProc fTileProcY; // chooseProcs
+ FixedTileLowBitsProc fTileLowBitsProcX; // chooseProcs
+ FixedTileLowBitsProc fTileLowBitsProcY; // chooseProcs
+ IntTileProc fIntTileProcY; // chooseProcs
+ SkFixed fFilterOneX;
+ SkFixed fFilterOneY;
+
+ SkFixed fInvSx; // chooseProcs
+ SkFixed fInvKy; // chooseProcs
+ SkPMColor fPaintPMColor; // chooseProcs - A8 config
+ uint16_t fAlphaScale; // chooseProcs
+
+ /** Platforms implement this, and can optionally overwrite only the
+ following fields:
+
+ fShaderProc32
+ fShaderProc16
+ fMatrixProc
+ fSampleProc32
+ fSampleProc32
+
+ They will already have valid function pointers, so a platform that does
+ not have an accelerated version can just leave that field as is. A valid
+ implementation can do nothing (see SkBitmapProcState_opts_none.cpp)
+ */
+ void platformProcs();
+
+ /** Given the byte size of the index buffer to be passed to the matrix proc,
+ return the maximum number of resulting pixels that can be computed
+ (i.e. the number of SkPMColor values to be written by the sample proc).
+ This routine takes into account that filtering and scale-vs-affine
+ affect the amount of buffer space needed.
+
+ Only valid to call after chooseProcs (setContext) has been called. It is
+ safe to call this inside the shader's shadeSpan() method.
+ */
+ int maxCountForBufferSize(size_t bufferSize) const;
+
+ // If a shader proc is present, then the corresponding matrix/sample procs
+ // are ignored
+ ShaderProc32 getShaderProc32() const { return fShaderProc32; }
+ ShaderProc16 getShaderProc16() const { return fShaderProc16; }
+
+#ifdef SK_DEBUG
+ MatrixProc getMatrixProc() const;
+#else
+ MatrixProc getMatrixProc() const { return fMatrixProc; }
+#endif
+ SampleProc32 getSampleProc32() const { return fSampleProc32; }
+
+private:
+ ShaderProc32 fShaderProc32; // chooseProcs
+ ShaderProc16 fShaderProc16; // chooseProcs
+ // These are used if the shaderproc is nullptr
+ MatrixProc fMatrixProc; // chooseProcs
+ SampleProc32 fSampleProc32; // chooseProcs
+
+ MatrixProc chooseMatrixProc(bool trivial_matrix);
+ bool chooseProcs(); // caller must have called init() first (on our base-class)
+ bool chooseScanlineProcs(bool trivialMatrix, bool clampClamp);
+ ShaderProc32 chooseShaderProc32();
+
+ // Return false if we failed to setup for fast translate (e.g. overflow)
+ bool setupForTranslate();
+
+#ifdef SK_DEBUG
+ static void DebugMatrixProc(const SkBitmapProcState&,
+ uint32_t[], int count, int x, int y);
+#endif
+};
+
+/* Macros for packing and unpacking pairs of 16bit values in a 32bit uint.
+ Used to allow access to a stream of uint16_t either one at a time, or
+ 2 at a time by unpacking a uint32_t
+ */
+#ifdef SK_CPU_BENDIAN
+ #define PACK_TWO_SHORTS(pri, sec) ((pri) << 16 | (sec))
+ #define UNPACK_PRIMARY_SHORT(packed) ((uint32_t)(packed) >> 16)
+ #define UNPACK_SECONDARY_SHORT(packed) ((packed) & 0xFFFF)
+#else
+ #define PACK_TWO_SHORTS(pri, sec) ((pri) | ((sec) << 16))
+ #define UNPACK_PRIMARY_SHORT(packed) ((packed) & 0xFFFF)
+ #define UNPACK_SECONDARY_SHORT(packed) ((uint32_t)(packed) >> 16)
+#endif
+
+#ifdef SK_DEBUG
+ static inline uint32_t pack_two_shorts(U16CPU pri, U16CPU sec) {
+ SkASSERT((uint16_t)pri == pri);
+ SkASSERT((uint16_t)sec == sec);
+ return PACK_TWO_SHORTS(pri, sec);
+ }
+#else
+ #define pack_two_shorts(pri, sec) PACK_TWO_SHORTS(pri, sec)
+#endif
+
+// These functions are generated via macros, but are exposed here so that
+// platformProcs may test for them by name.
+void S32_opaque_D32_filter_DX(const SkBitmapProcState& s, const uint32_t xy[],
+ int count, SkPMColor colors[]);
+void S32_alpha_D32_filter_DX(const SkBitmapProcState& s, const uint32_t xy[],
+ int count, SkPMColor colors[]);
+void S32_opaque_D32_filter_DXDY(const SkBitmapProcState& s,
+ const uint32_t xy[], int count, SkPMColor colors[]);
+void S32_alpha_D32_filter_DXDY(const SkBitmapProcState& s,
+ const uint32_t xy[], int count, SkPMColor colors[]);
+void ClampX_ClampY_filter_scale(const SkBitmapProcState& s, uint32_t xy[],
+ int count, int x, int y);
+void ClampX_ClampY_nofilter_scale(const SkBitmapProcState& s, uint32_t xy[],
+ int count, int x, int y);
+void ClampX_ClampY_filter_affine(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y);
+void ClampX_ClampY_nofilter_affine(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y);
+
+// Helper class for mapping the middle of pixel (x, y) into SkFractionalInt bitmap space.
+// Discussion:
+// Overall, this code takes a point in destination space, and uses the center of the pixel
+// at (x, y) to determine the sample point in source space. It then adjusts the pixel by different
+// amounts based in filtering and tiling.
+// This code can be broken into two main cases based on filtering:
+// * no filtering (nearest neighbor) - when using nearest neighbor filtering all tile modes reduce
+// the sampled by one ulp. If a simple point pt lies precisely on XXX.1/2 then it forced down
+// when positive making 1/2 + 1/2 = .999999 instead of 1.0.
+// * filtering - in the filtering case, the code calculates the -1/2 shift for starting the
+// bilerp kernel. There is a twist; there is a big difference between clamp and the other tile
+// modes. In tile and repeat the matrix has been reduced by an additional 1/width and 1/height
+// factor. This maps from destination space to [0, 1) (instead of source space) to allow easy
+// modulo arithmetic. This means that the -1/2 needed by bilerp is actually 1/2 * 1/width for x
+// and 1/2 * 1/height for y. This is what happens when the poorly named fFilterOne{X|Y} is
+// divided by two.
+class SkBitmapProcStateAutoMapper {
+public:
+ SkBitmapProcStateAutoMapper(const SkBitmapProcState& s, int x, int y,
+ SkPoint* scalarPoint = nullptr) {
+ SkPoint pt;
+ s.fInvProc(s.fInvMatrix,
+ SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, &pt);
+
+ SkFixed biasX, biasY;
+ if (s.fFilterQuality == kNone_SkFilterQuality) {
+ // SkFixed epsilon bias to ensure inverse-mapped bitmap coordinates are rounded
+ // consistently WRT geometry. Note that we only need the bias for positive scales:
+ // for negative scales, the rounding is intrinsically correct.
+ // We scale it to persist SkFractionalInt -> SkFixed conversions.
+ biasX = (s.fInvMatrix.getScaleX() > 0);
+ biasY = (s.fInvMatrix.getScaleY() > 0);
+ } else {
+ biasX = s.fFilterOneX >> 1;
+ biasY = s.fFilterOneY >> 1;
+ }
+
+ fX = SkScalarToFractionalInt(pt.x()) - SkFixedToFractionalInt(biasX);
+ fY = SkScalarToFractionalInt(pt.y()) - SkFixedToFractionalInt(biasY);
+
+ if (scalarPoint) {
+ scalarPoint->set(pt.x() - SkFixedToScalar(biasX),
+ pt.y() - SkFixedToScalar(biasY));
+ }
+ }
+
+ SkFractionalInt fractionalIntX() const { return fX; }
+ SkFractionalInt fractionalIntY() const { return fY; }
+
+ SkFixed fixedX() const { return SkFractionalIntToFixed(fX); }
+ SkFixed fixedY() const { return SkFractionalIntToFixed(fY); }
+
+ int intX() const { return SkFractionalIntToInt(fX); }
+ int intY() const { return SkFractionalIntToInt(fY); }
+
+private:
+ SkFractionalInt fX, fY;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBitmapProcState_filter.h b/gfx/skia/skia/src/core/SkBitmapProcState_filter.h
new file mode 100644
index 000000000..dfc18d8a2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapProcState_filter.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkColorPriv.h"
+
+/*
+ Filter_32_opaque
+
+ There is no hard-n-fast rule that the filtering must produce
+ exact results for the color components, but if the 4 incoming colors are
+ all opaque, then the output color must also be opaque. Subsequent parts of
+ the drawing pipeline may rely on this (e.g. which blitrow proc to use).
+ */
+
+static inline void Filter_32_opaque(unsigned x, unsigned y,
+ SkPMColor a00, SkPMColor a01,
+ SkPMColor a10, SkPMColor a11,
+ SkPMColor* dstColor) {
+ SkASSERT((unsigned)x <= 0xF);
+ SkASSERT((unsigned)y <= 0xF);
+
+ int xy = x * y;
+ const uint32_t mask = 0xFF00FF;
+
+ int scale = 256 - 16*y - 16*x + xy;
+ uint32_t lo = (a00 & mask) * scale;
+ uint32_t hi = ((a00 >> 8) & mask) * scale;
+
+ scale = 16*x - xy;
+ lo += (a01 & mask) * scale;
+ hi += ((a01 >> 8) & mask) * scale;
+
+ scale = 16*y - xy;
+ lo += (a10 & mask) * scale;
+ hi += ((a10 >> 8) & mask) * scale;
+
+ lo += (a11 & mask) * xy;
+ hi += ((a11 >> 8) & mask) * xy;
+
+ *dstColor = ((lo >> 8) & mask) | (hi & ~mask);
+}
+
+static inline void Filter_32_alpha(unsigned x, unsigned y,
+ SkPMColor a00, SkPMColor a01,
+ SkPMColor a10, SkPMColor a11,
+ SkPMColor* dstColor,
+ unsigned alphaScale) {
+ SkASSERT((unsigned)x <= 0xF);
+ SkASSERT((unsigned)y <= 0xF);
+ SkASSERT(alphaScale <= 256);
+
+ int xy = x * y;
+ const uint32_t mask = 0xFF00FF;
+
+ int scale = 256 - 16*y - 16*x + xy;
+ uint32_t lo = (a00 & mask) * scale;
+ uint32_t hi = ((a00 >> 8) & mask) * scale;
+
+ scale = 16*x - xy;
+ lo += (a01 & mask) * scale;
+ hi += ((a01 >> 8) & mask) * scale;
+
+ scale = 16*y - xy;
+ lo += (a10 & mask) * scale;
+ hi += ((a10 >> 8) & mask) * scale;
+
+ lo += (a11 & mask) * xy;
+ hi += ((a11 >> 8) & mask) * xy;
+
+ lo = ((lo >> 8) & mask) * alphaScale;
+ hi = ((hi >> 8) & mask) * alphaScale;
+
+ *dstColor = ((lo >> 8) & mask) | (hi & ~mask);
+}
+
+// Two color version, where we filter only along 1 axis
+static inline void Filter_32_opaque(unsigned t,
+ SkPMColor color0,
+ SkPMColor color1,
+ SkPMColor* dstColor) {
+ SkASSERT((unsigned)t <= 0xF);
+
+ const uint32_t mask = 0xFF00FF;
+
+ int scale = 256 - 16*t;
+ uint32_t lo = (color0 & mask) * scale;
+ uint32_t hi = ((color0 >> 8) & mask) * scale;
+
+ scale = 16*t;
+ lo += (color1 & mask) * scale;
+ hi += ((color1 >> 8) & mask) * scale;
+
+ *dstColor = ((lo >> 8) & mask) | (hi & ~mask);
+}
+
+// Two color version, where we filter only along 1 axis
+static inline void Filter_32_alpha(unsigned t,
+ SkPMColor color0,
+ SkPMColor color1,
+ SkPMColor* dstColor,
+ unsigned alphaScale) {
+ SkASSERT((unsigned)t <= 0xF);
+ SkASSERT(alphaScale <= 256);
+
+ const uint32_t mask = 0xFF00FF;
+
+ int scale = 256 - 16*t;
+ uint32_t lo = (color0 & mask) * scale;
+ uint32_t hi = ((color0 >> 8) & mask) * scale;
+
+ scale = 16*t;
+ lo += (color1 & mask) * scale;
+ hi += ((color1 >> 8) & mask) * scale;
+
+ lo = ((lo >> 8) & mask) * alphaScale;
+ hi = ((hi >> 8) & mask) * alphaScale;
+
+ *dstColor = ((lo >> 8) & mask) | (hi & ~mask);
+}
diff --git a/gfx/skia/skia/src/core/SkBitmapProcState_matrix.h b/gfx/skia/skia/src/core/SkBitmapProcState_matrix.h
new file mode 100644
index 000000000..7e2e44bce
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapProcState_matrix.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMath.h"
+#include "SkMathPriv.h"
+
+#define SCALE_FILTER_NAME MAKENAME(_filter_scale)
+#define AFFINE_FILTER_NAME MAKENAME(_filter_affine)
+#define PERSP_FILTER_NAME MAKENAME(_filter_persp)
+
+#define PACK_FILTER_X_NAME MAKENAME(_pack_filter_x)
+#define PACK_FILTER_Y_NAME MAKENAME(_pack_filter_y)
+
+#ifndef PREAMBLE
+ #define PREAMBLE(state)
+ #define PREAMBLE_PARAM_X
+ #define PREAMBLE_PARAM_Y
+ #define PREAMBLE_ARG_X
+ #define PREAMBLE_ARG_Y
+#endif
+
+// declare functions externally to suppress warnings.
+void SCALE_FILTER_NAME(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y);
+void AFFINE_FILTER_NAME(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y);
+void PERSP_FILTER_NAME(const SkBitmapProcState& s,
+ uint32_t* SK_RESTRICT xy, int count,
+ int x, int y);
+
+static inline uint32_t PACK_FILTER_Y_NAME(SkFixed f, unsigned max,
+ SkFixed one PREAMBLE_PARAM_Y) {
+ unsigned i = TILEY_PROCF(f, max);
+ i = (i << 4) | TILEY_LOW_BITS(f, max);
+ return (i << 14) | (TILEY_PROCF((f + one), max));
+}
+
+static inline uint32_t PACK_FILTER_X_NAME(SkFixed f, unsigned max,
+ SkFixed one PREAMBLE_PARAM_X) {
+ unsigned i = TILEX_PROCF(f, max);
+ i = (i << 4) | TILEX_LOW_BITS(f, max);
+ return (i << 14) | (TILEX_PROCF((f + one), max));
+}
+
+void SCALE_FILTER_NAME(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
+ SkMatrix::kScale_Mask)) == 0);
+ SkASSERT(s.fInvKy == 0);
+
+ PREAMBLE(s);
+
+ const unsigned maxX = s.fPixmap.width() - 1;
+ const SkFixed one = s.fFilterOneX;
+ const SkFractionalInt dx = s.fInvSxFractionalInt;
+ SkFractionalInt fx;
+
+ {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ const SkFixed fy = mapper.fixedY();
+ const unsigned maxY = s.fPixmap.height() - 1;
+ // compute our two Y values up front
+ *xy++ = PACK_FILTER_Y_NAME(fy, maxY, s.fFilterOneY PREAMBLE_ARG_Y);
+ // now initialize fx
+ fx = mapper.fractionalIntX();
+ }
+
+#ifdef CHECK_FOR_DECAL
+ if (can_truncate_to_fixed_for_decal(fx, dx, count, maxX)) {
+ decal_filter_scale(xy, SkFractionalIntToFixed(fx),
+ SkFractionalIntToFixed(dx), count);
+ } else
+#endif
+ {
+ do {
+ SkFixed fixedFx = SkFractionalIntToFixed(fx);
+ *xy++ = PACK_FILTER_X_NAME(fixedFx, maxX, one PREAMBLE_ARG_X);
+ fx += dx;
+ } while (--count != 0);
+ }
+}
+
+void AFFINE_FILTER_NAME(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT(s.fInvType & SkMatrix::kAffine_Mask);
+ SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
+ SkMatrix::kScale_Mask |
+ SkMatrix::kAffine_Mask)) == 0);
+
+ PREAMBLE(s);
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+
+ SkFixed oneX = s.fFilterOneX;
+ SkFixed oneY = s.fFilterOneY;
+ SkFixed fx = mapper.fixedX();
+ SkFixed fy = mapper.fixedY();
+ SkFixed dx = s.fInvSx;
+ SkFixed dy = s.fInvKy;
+ unsigned maxX = s.fPixmap.width() - 1;
+ unsigned maxY = s.fPixmap.height() - 1;
+
+ do {
+ *xy++ = PACK_FILTER_Y_NAME(fy, maxY, oneY PREAMBLE_ARG_Y);
+ fy += dy;
+ *xy++ = PACK_FILTER_X_NAME(fx, maxX, oneX PREAMBLE_ARG_X);
+ fx += dx;
+ } while (--count != 0);
+}
+
+void PERSP_FILTER_NAME(const SkBitmapProcState& s,
+ uint32_t* SK_RESTRICT xy, int count,
+ int x, int y) {
+ SkASSERT(s.fInvType & SkMatrix::kPerspective_Mask);
+
+ PREAMBLE(s);
+ unsigned maxX = s.fPixmap.width() - 1;
+ unsigned maxY = s.fPixmap.height() - 1;
+ SkFixed oneX = s.fFilterOneX;
+ SkFixed oneY = s.fFilterOneY;
+
+ SkPerspIter iter(s.fInvMatrix,
+ SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, count);
+
+ while ((count = iter.next()) != 0) {
+ const SkFixed* SK_RESTRICT srcXY = iter.getXY();
+ do {
+ *xy++ = PACK_FILTER_Y_NAME(srcXY[1] - (oneY >> 1), maxY,
+ oneY PREAMBLE_ARG_Y);
+ *xy++ = PACK_FILTER_X_NAME(srcXY[0] - (oneX >> 1), maxX,
+ oneX PREAMBLE_ARG_X);
+ srcXY += 2;
+ } while (--count != 0);
+ }
+}
+
+#undef MAKENAME
+#undef TILEX_PROCF
+#undef TILEY_PROCF
+#ifdef CHECK_FOR_DECAL
+ #undef CHECK_FOR_DECAL
+#endif
+
+#undef SCALE_FILTER_NAME
+#undef AFFINE_FILTER_NAME
+#undef PERSP_FILTER_NAME
+
+#undef PREAMBLE
+#undef PREAMBLE_PARAM_X
+#undef PREAMBLE_PARAM_Y
+#undef PREAMBLE_ARG_X
+#undef PREAMBLE_ARG_Y
+
+#undef TILEX_LOW_BITS
+#undef TILEY_LOW_BITS
diff --git a/gfx/skia/skia/src/core/SkBitmapProcState_matrixProcs.cpp b/gfx/skia/skia/src/core/SkBitmapProcState_matrixProcs.cpp
new file mode 100644
index 000000000..50b59b8bd
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapProcState_matrixProcs.cpp
@@ -0,0 +1,520 @@
+/*
+ * Copyright 2008 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// The copyright below was added in 2009, but I see no record of moto contributions...?
+
+/* NEON optimized code (C) COPYRIGHT 2009 Motorola
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapProcState.h"
+#include "SkPerspIter.h"
+#include "SkShader.h"
+#include "SkUtils.h"
+#include "SkUtilsArm.h"
+#include "SkBitmapProcState_utils.h"
+
+/* returns 0...(n-1) given any x (positive or negative).
+
+ As an example, if n (which is always positive) is 5...
+
+ x: -8 -7 -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 7 8
+ returns: 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3
+ */
+static inline int sk_int_mod(int x, int n) {
+ SkASSERT(n > 0);
+ if ((unsigned)x >= (unsigned)n) {
+ if (x < 0) {
+ x = n + ~(~x % n);
+ } else {
+ x = x % n;
+ }
+ }
+ return x;
+}
+
+void decal_nofilter_scale(uint32_t dst[], SkFixed fx, SkFixed dx, int count);
+void decal_filter_scale(uint32_t dst[], SkFixed fx, SkFixed dx, int count);
+
+#include "SkBitmapProcState_matrix_template.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Compile neon code paths if needed
+#if defined(SK_ARM_HAS_NEON) || defined(SK_ARM_HAS_OPTIONAL_NEON)
+
+// These are defined in src/opts/SkBitmapProcState_matrixProcs_neon.cpp
+extern const SkBitmapProcState::MatrixProc ClampX_ClampY_Procs_neon[];
+extern const SkBitmapProcState::MatrixProc RepeatX_RepeatY_Procs_neon[];
+
+#endif // defined(SK_ARM_HAS_NEON)
+
+// Compile non-neon code path if needed
+#if !defined(SK_ARM_HAS_NEON) || defined(SK_ARM_HAS_OPTIONAL_NEON)
+#define MAKENAME(suffix) ClampX_ClampY ## suffix
+#define TILEX_PROCF(fx, max) SkClampMax((fx) >> 16, max)
+#define TILEY_PROCF(fy, max) SkClampMax((fy) >> 16, max)
+#define TILEX_LOW_BITS(fx, max) (((fx) >> 12) & 0xF)
+#define TILEY_LOW_BITS(fy, max) (((fy) >> 12) & 0xF)
+#define CHECK_FOR_DECAL
+#include "SkBitmapProcState_matrix.h"
+
+struct ClampTileProcs {
+ static unsigned X(const SkBitmapProcState&, SkFixed fx, int max) {
+ return SkClampMax(fx >> 16, max);
+ }
+ static unsigned Y(const SkBitmapProcState&, SkFixed fy, int max) {
+ return SkClampMax(fy >> 16, max);
+ }
+};
+
+// Referenced in opts_check_x86.cpp
+void ClampX_ClampY_nofilter_scale(const SkBitmapProcState& s, uint32_t xy[],
+ int count, int x, int y) {
+ return NoFilterProc_Scale<ClampTileProcs, true>(s, xy, count, x, y);
+}
+void ClampX_ClampY_nofilter_affine(const SkBitmapProcState& s, uint32_t xy[],
+ int count, int x, int y) {
+ return NoFilterProc_Affine<ClampTileProcs>(s, xy, count, x, y);
+}
+
+static SkBitmapProcState::MatrixProc ClampX_ClampY_Procs[] = {
+ // only clamp lives in the right coord space to check for decal
+ ClampX_ClampY_nofilter_scale,
+ ClampX_ClampY_filter_scale,
+ ClampX_ClampY_nofilter_affine,
+ ClampX_ClampY_filter_affine,
+ NoFilterProc_Persp<ClampTileProcs>,
+ ClampX_ClampY_filter_persp
+};
+
+#define MAKENAME(suffix) RepeatX_RepeatY ## suffix
+#define TILEX_PROCF(fx, max) SK_USHIFT16((unsigned)((fx) & 0xFFFF) * ((max) + 1))
+#define TILEY_PROCF(fy, max) SK_USHIFT16((unsigned)((fy) & 0xFFFF) * ((max) + 1))
+#define TILEX_LOW_BITS(fx, max) (((unsigned)((fx) & 0xFFFF) * ((max) + 1) >> 12) & 0xF)
+#define TILEY_LOW_BITS(fy, max) (((unsigned)((fy) & 0xFFFF) * ((max) + 1) >> 12) & 0xF)
+#include "SkBitmapProcState_matrix.h"
+
+struct RepeatTileProcs {
+ static unsigned X(const SkBitmapProcState&, SkFixed fx, int max) {
+ SkASSERT(max < 65535);
+ return SK_USHIFT16((unsigned)((fx) & 0xFFFF) * ((max) + 1));
+ }
+ static unsigned Y(const SkBitmapProcState&, SkFixed fy, int max) {
+ SkASSERT(max < 65535);
+ return SK_USHIFT16((unsigned)((fy) & 0xFFFF) * ((max) + 1));
+ }
+};
+
+static SkBitmapProcState::MatrixProc RepeatX_RepeatY_Procs[] = {
+ NoFilterProc_Scale<RepeatTileProcs, false>,
+ RepeatX_RepeatY_filter_scale,
+ NoFilterProc_Affine<RepeatTileProcs>,
+ RepeatX_RepeatY_filter_affine,
+ NoFilterProc_Persp<RepeatTileProcs>,
+ RepeatX_RepeatY_filter_persp
+};
+#endif
+
+#define MAKENAME(suffix) GeneralXY ## suffix
+#define PREAMBLE(state) SkBitmapProcState::FixedTileProc tileProcX = (state).fTileProcX; (void) tileProcX; \
+ SkBitmapProcState::FixedTileProc tileProcY = (state).fTileProcY; (void) tileProcY; \
+ SkBitmapProcState::FixedTileLowBitsProc tileLowBitsProcX = (state).fTileLowBitsProcX; (void) tileLowBitsProcX; \
+ SkBitmapProcState::FixedTileLowBitsProc tileLowBitsProcY = (state).fTileLowBitsProcY; (void) tileLowBitsProcY
+#define PREAMBLE_PARAM_X , SkBitmapProcState::FixedTileProc tileProcX, SkBitmapProcState::FixedTileLowBitsProc tileLowBitsProcX
+#define PREAMBLE_PARAM_Y , SkBitmapProcState::FixedTileProc tileProcY, SkBitmapProcState::FixedTileLowBitsProc tileLowBitsProcY
+#define PREAMBLE_ARG_X , tileProcX, tileLowBitsProcX
+#define PREAMBLE_ARG_Y , tileProcY, tileLowBitsProcY
+#define TILEX_PROCF(fx, max) SK_USHIFT16(tileProcX(fx) * ((max) + 1))
+#define TILEY_PROCF(fy, max) SK_USHIFT16(tileProcY(fy) * ((max) + 1))
+#define TILEX_LOW_BITS(fx, max) tileLowBitsProcX(fx, (max) + 1)
+#define TILEY_LOW_BITS(fy, max) tileLowBitsProcY(fy, (max) + 1)
+#include "SkBitmapProcState_matrix.h"
+
+struct GeneralTileProcs {
+ static unsigned X(const SkBitmapProcState& s, SkFixed fx, int max) {
+ return SK_USHIFT16(s.fTileProcX(fx) * ((max) + 1));
+ }
+ static unsigned Y(const SkBitmapProcState& s, SkFixed fy, int max) {
+ return SK_USHIFT16(s.fTileProcY(fy) * ((max) + 1));
+ }
+};
+
+static SkBitmapProcState::MatrixProc GeneralXY_Procs[] = {
+ NoFilterProc_Scale<GeneralTileProcs, false>,
+ GeneralXY_filter_scale,
+ NoFilterProc_Affine<GeneralTileProcs>,
+ GeneralXY_filter_affine,
+ NoFilterProc_Persp<GeneralTileProcs>,
+ GeneralXY_filter_persp
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline U16CPU fixed_clamp(SkFixed x) {
+ if (x < 0) {
+ x = 0;
+ }
+ if (x >> 16) {
+ x = 0xFFFF;
+ }
+ return x;
+}
+
+static inline U16CPU fixed_repeat(SkFixed x) {
+ return x & 0xFFFF;
+}
+
+static inline U16CPU fixed_mirror(SkFixed x) {
+ SkFixed s = SkLeftShift(x, 15) >> 31;
+ // s is FFFFFFFF if we're on an odd interval, or 0 if an even interval
+ return (x ^ s) & 0xFFFF;
+}
+
+static SkBitmapProcState::FixedTileProc choose_tile_proc(unsigned m) {
+ if (SkShader::kClamp_TileMode == m) {
+ return fixed_clamp;
+ }
+ if (SkShader::kRepeat_TileMode == m) {
+ return fixed_repeat;
+ }
+ SkASSERT(SkShader::kMirror_TileMode == m);
+ return fixed_mirror;
+}
+
+static inline U16CPU fixed_clamp_lowbits(SkFixed x, int) {
+ return (x >> 12) & 0xF;
+}
+
+static inline U16CPU fixed_repeat_or_mirrow_lowbits(SkFixed x, int scale) {
+ return ((x * scale) >> 12) & 0xF;
+}
+
+static SkBitmapProcState::FixedTileLowBitsProc choose_tile_lowbits_proc(unsigned m) {
+ if (SkShader::kClamp_TileMode == m) {
+ return fixed_clamp_lowbits;
+ } else {
+ SkASSERT(SkShader::kMirror_TileMode == m ||
+ SkShader::kRepeat_TileMode == m);
+ // mirror and repeat have the same behavior for the low bits.
+ return fixed_repeat_or_mirrow_lowbits;
+ }
+}
+
+static inline U16CPU int_clamp(int x, int n) {
+ if (x >= n) {
+ x = n - 1;
+ }
+ if (x < 0) {
+ x = 0;
+ }
+ return x;
+}
+
+static inline U16CPU int_repeat(int x, int n) {
+ return sk_int_mod(x, n);
+}
+
+static inline U16CPU int_mirror(int x, int n) {
+ x = sk_int_mod(x, 2 * n);
+ if (x >= n) {
+ x = n + ~(x - n);
+ }
+ return x;
+}
+
+#if 0
+static void test_int_tileprocs() {
+ for (int i = -8; i <= 8; i++) {
+ SkDebugf(" int_mirror(%2d, 3) = %d\n", i, int_mirror(i, 3));
+ }
+}
+#endif
+
+static SkBitmapProcState::IntTileProc choose_int_tile_proc(unsigned tm) {
+ if (SkShader::kClamp_TileMode == tm)
+ return int_clamp;
+ if (SkShader::kRepeat_TileMode == tm)
+ return int_repeat;
+ SkASSERT(SkShader::kMirror_TileMode == tm);
+ return int_mirror;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void decal_nofilter_scale(uint32_t dst[], SkFixed fx, SkFixed dx, int count) {
+ int i;
+
+ for (i = (count >> 2); i > 0; --i) {
+ *dst++ = pack_two_shorts(fx >> 16, (fx + dx) >> 16);
+ fx += dx+dx;
+ *dst++ = pack_two_shorts(fx >> 16, (fx + dx) >> 16);
+ fx += dx+dx;
+ }
+ count &= 3;
+
+ uint16_t* xx = (uint16_t*)dst;
+ for (i = count; i > 0; --i) {
+ *xx++ = SkToU16(fx >> 16); fx += dx;
+ }
+}
+
+void decal_filter_scale(uint32_t dst[], SkFixed fx, SkFixed dx, int count) {
+ if (count & 1) {
+ SkASSERT((fx >> (16 + 14)) == 0);
+ *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1);
+ fx += dx;
+ }
+ while ((count -= 2) >= 0) {
+ SkASSERT((fx >> (16 + 14)) == 0);
+ *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1);
+ fx += dx;
+
+ *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1);
+ fx += dx;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// stores the same as SCALE, but is cheaper to compute. Also since there is no
+// scale, we don't need/have a FILTER version
+
+static void fill_sequential(uint16_t xptr[], int start, int count) {
+#if 1
+ if (reinterpret_cast<intptr_t>(xptr) & 0x2) {
+ *xptr++ = start++;
+ count -= 1;
+ }
+ if (count > 3) {
+ uint32_t* xxptr = reinterpret_cast<uint32_t*>(xptr);
+ uint32_t pattern0 = PACK_TWO_SHORTS(start + 0, start + 1);
+ uint32_t pattern1 = PACK_TWO_SHORTS(start + 2, start + 3);
+ start += count & ~3;
+ int qcount = count >> 2;
+ do {
+ *xxptr++ = pattern0;
+ pattern0 += 0x40004;
+ *xxptr++ = pattern1;
+ pattern1 += 0x40004;
+ } while (--qcount != 0);
+ xptr = reinterpret_cast<uint16_t*>(xxptr);
+ count &= 3;
+ }
+ while (--count >= 0) {
+ *xptr++ = start++;
+ }
+#else
+ for (int i = 0; i < count; i++) {
+ *xptr++ = start++;
+ }
+#endif
+}
+
+static int nofilter_trans_preamble(const SkBitmapProcState& s, uint32_t** xy,
+ int x, int y) {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ **xy = s.fIntTileProcY(mapper.intY(), s.fPixmap.height());
+ *xy += 1; // bump the ptr
+ // return our starting X position
+ return mapper.intX();
+}
+
+static void clampx_nofilter_trans(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT((s.fInvType & ~SkMatrix::kTranslate_Mask) == 0);
+
+ int xpos = nofilter_trans_preamble(s, &xy, x, y);
+ const int width = s.fPixmap.width();
+ if (1 == width) {
+ // all of the following X values must be 0
+ memset(xy, 0, count * sizeof(uint16_t));
+ return;
+ }
+
+ uint16_t* xptr = reinterpret_cast<uint16_t*>(xy);
+ int n;
+
+ // fill before 0 as needed
+ if (xpos < 0) {
+ n = -xpos;
+ if (n > count) {
+ n = count;
+ }
+ memset(xptr, 0, n * sizeof(uint16_t));
+ count -= n;
+ if (0 == count) {
+ return;
+ }
+ xptr += n;
+ xpos = 0;
+ }
+
+ // fill in 0..width-1 if needed
+ if (xpos < width) {
+ n = width - xpos;
+ if (n > count) {
+ n = count;
+ }
+ fill_sequential(xptr, xpos, n);
+ count -= n;
+ if (0 == count) {
+ return;
+ }
+ xptr += n;
+ }
+
+ // fill the remaining with the max value
+ sk_memset16(xptr, width - 1, count);
+}
+
+static void repeatx_nofilter_trans(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT((s.fInvType & ~SkMatrix::kTranslate_Mask) == 0);
+
+ int xpos = nofilter_trans_preamble(s, &xy, x, y);
+ const int width = s.fPixmap.width();
+ if (1 == width) {
+ // all of the following X values must be 0
+ memset(xy, 0, count * sizeof(uint16_t));
+ return;
+ }
+
+ uint16_t* xptr = reinterpret_cast<uint16_t*>(xy);
+ int start = sk_int_mod(xpos, width);
+ int n = width - start;
+ if (n > count) {
+ n = count;
+ }
+ fill_sequential(xptr, start, n);
+ xptr += n;
+ count -= n;
+
+ while (count >= width) {
+ fill_sequential(xptr, 0, width);
+ xptr += width;
+ count -= width;
+ }
+
+ if (count > 0) {
+ fill_sequential(xptr, 0, count);
+ }
+}
+
+static void fill_backwards(uint16_t xptr[], int pos, int count) {
+ for (int i = 0; i < count; i++) {
+ SkASSERT(pos >= 0);
+ xptr[i] = pos--;
+ }
+}
+
+static void mirrorx_nofilter_trans(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT((s.fInvType & ~SkMatrix::kTranslate_Mask) == 0);
+
+ int xpos = nofilter_trans_preamble(s, &xy, x, y);
+ const int width = s.fPixmap.width();
+ if (1 == width) {
+ // all of the following X values must be 0
+ memset(xy, 0, count * sizeof(uint16_t));
+ return;
+ }
+
+ uint16_t* xptr = reinterpret_cast<uint16_t*>(xy);
+ // need to know our start, and our initial phase (forward or backward)
+ bool forward;
+ int n;
+ int start = sk_int_mod(xpos, 2 * width);
+ if (start >= width) {
+ start = width + ~(start - width);
+ forward = false;
+ n = start + 1; // [start .. 0]
+ } else {
+ forward = true;
+ n = width - start; // [start .. width)
+ }
+ if (n > count) {
+ n = count;
+ }
+ if (forward) {
+ fill_sequential(xptr, start, n);
+ } else {
+ fill_backwards(xptr, start, n);
+ }
+ forward = !forward;
+ xptr += n;
+ count -= n;
+
+ while (count >= width) {
+ if (forward) {
+ fill_sequential(xptr, 0, width);
+ } else {
+ fill_backwards(xptr, width - 1, width);
+ }
+ forward = !forward;
+ xptr += width;
+ count -= width;
+ }
+
+ if (count > 0) {
+ if (forward) {
+ fill_sequential(xptr, 0, count);
+ } else {
+ fill_backwards(xptr, width - 1, count);
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkBitmapProcState::MatrixProc SkBitmapProcState::chooseMatrixProc(bool trivial_matrix) {
+// test_int_tileprocs();
+ // check for our special case when there is no scale/affine/perspective
+ if (trivial_matrix && kNone_SkFilterQuality == fFilterQuality) {
+ fIntTileProcY = choose_int_tile_proc(fTileModeY);
+ switch (fTileModeX) {
+ case SkShader::kClamp_TileMode:
+ return clampx_nofilter_trans;
+ case SkShader::kRepeat_TileMode:
+ return repeatx_nofilter_trans;
+ case SkShader::kMirror_TileMode:
+ return mirrorx_nofilter_trans;
+ }
+ }
+
+ int index = 0;
+ if (fFilterQuality != kNone_SkFilterQuality) {
+ index = 1;
+ }
+ if (fInvType & SkMatrix::kPerspective_Mask) {
+ index += 4;
+ } else if (fInvType & SkMatrix::kAffine_Mask) {
+ index += 2;
+ }
+
+ if (SkShader::kClamp_TileMode == fTileModeX && SkShader::kClamp_TileMode == fTileModeY) {
+ // clamp gets special version of filterOne
+ fFilterOneX = SK_Fixed1;
+ fFilterOneY = SK_Fixed1;
+ return SK_ARM_NEON_WRAP(ClampX_ClampY_Procs)[index];
+ }
+
+ // all remaining procs use this form for filterOne
+ fFilterOneX = SK_Fixed1 / fPixmap.width();
+ fFilterOneY = SK_Fixed1 / fPixmap.height();
+
+ if (SkShader::kRepeat_TileMode == fTileModeX && SkShader::kRepeat_TileMode == fTileModeY) {
+ return SK_ARM_NEON_WRAP(RepeatX_RepeatY_Procs)[index];
+ }
+
+ fTileProcX = choose_tile_proc(fTileModeX);
+ fTileProcY = choose_tile_proc(fTileModeY);
+ fTileLowBitsProcX = choose_tile_lowbits_proc(fTileModeX);
+ fTileLowBitsProcY = choose_tile_lowbits_proc(fTileModeY);
+ return GeneralXY_Procs[index];
+}
diff --git a/gfx/skia/skia/src/core/SkBitmapProcState_matrix_template.h b/gfx/skia/skia/src/core/SkBitmapProcState_matrix_template.h
new file mode 100644
index 000000000..0c9371851
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapProcState_matrix_template.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapProcState_MatrixTemplates_DEFINED
+#define SkBitmapProcState_MatrixTemplates_DEFINED
+
+#include "SkMath.h"
+#include "SkMathPriv.h"
+
+template <typename TileProc, bool tryDecal>
+void NoFilterProc_Scale(const SkBitmapProcState& s, uint32_t xy[],
+ int count, int x, int y) {
+ SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
+ SkMatrix::kScale_Mask)) == 0);
+
+ // we store y, x, x, x, x, x
+
+ const unsigned maxX = s.fPixmap.width() - 1;
+ SkFractionalInt fx;
+ {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ const unsigned maxY = s.fPixmap.height() - 1;
+ *xy++ = TileProc::Y(s, mapper.fixedY(), maxY);
+ fx = mapper.fractionalIntX();
+ }
+
+ if (0 == maxX) {
+ // all of the following X values must be 0
+ memset(xy, 0, count * sizeof(uint16_t));
+ return;
+ }
+
+ const SkFractionalInt dx = s.fInvSxFractionalInt;
+
+ if (tryDecal && can_truncate_to_fixed_for_decal(fx, dx, count, maxX)) {
+ decal_nofilter_scale(xy, SkFractionalIntToFixed(fx),
+ SkFractionalIntToFixed(dx), count);
+ } else {
+ int i;
+ for (i = (count >> 2); i > 0; --i) {
+ unsigned a, b;
+ a = TileProc::X(s, SkFractionalIntToFixed(fx), maxX); fx += dx;
+ b = TileProc::X(s, SkFractionalIntToFixed(fx), maxX); fx += dx;
+#ifdef SK_CPU_BENDIAN
+ *xy++ = (a << 16) | b;
+#else
+ *xy++ = (b << 16) | a;
+#endif
+ a = TileProc::X(s, SkFractionalIntToFixed(fx), maxX); fx += dx;
+ b = TileProc::X(s, SkFractionalIntToFixed(fx), maxX); fx += dx;
+#ifdef SK_CPU_BENDIAN
+ *xy++ = (a << 16) | b;
+#else
+ *xy++ = (b << 16) | a;
+#endif
+ }
+ uint16_t* xx = (uint16_t*)xy;
+ for (i = (count & 3); i > 0; --i) {
+ *xx++ = TileProc::X(s, SkFractionalIntToFixed(fx), maxX); fx += dx;
+ }
+ }
+}
+
+// note: we could special-case on a matrix which is skewed in X but not Y.
+// this would require a more general setup thatn SCALE does, but could use
+// SCALE's inner loop that only looks at dx
+
+template <typename TileProc>
+void NoFilterProc_Affine(const SkBitmapProcState& s, uint32_t xy[],
+ int count, int x, int y) {
+ SkASSERT(s.fInvType & SkMatrix::kAffine_Mask);
+ SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
+ SkMatrix::kScale_Mask |
+ SkMatrix::kAffine_Mask)) == 0);
+
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+
+ SkFractionalInt fx = mapper.fractionalIntX();
+ SkFractionalInt fy = mapper.fractionalIntY();
+ SkFractionalInt dx = s.fInvSxFractionalInt;
+ SkFractionalInt dy = s.fInvKyFractionalInt;
+ int maxX = s.fPixmap.width() - 1;
+ int maxY = s.fPixmap.height() - 1;
+
+ for (int i = count; i > 0; --i) {
+ *xy++ = (TileProc::Y(s, SkFractionalIntToFixed(fy), maxY) << 16) |
+ TileProc::X(s, SkFractionalIntToFixed(fx), maxX);
+ fx += dx; fy += dy;
+ }
+}
+
+template <typename TileProc>
+void NoFilterProc_Persp(const SkBitmapProcState& s, uint32_t* SK_RESTRICT xy,
+ int count, int x, int y) {
+ SkASSERT(s.fInvType & SkMatrix::kPerspective_Mask);
+
+ int maxX = s.fPixmap.width() - 1;
+ int maxY = s.fPixmap.height() - 1;
+
+ SkPerspIter iter(s.fInvMatrix,
+ SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, count);
+
+ while ((count = iter.next()) != 0) {
+ const SkFixed* SK_RESTRICT srcXY = iter.getXY();
+ while (--count >= 0) {
+ *xy++ = (TileProc::Y(s, srcXY[1], maxY) << 16) |
+ TileProc::X(s, srcXY[0], maxX);
+ srcXY += 2;
+ }
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBitmapProcState_procs.h b/gfx/skia/skia/src/core/SkBitmapProcState_procs.h
new file mode 100644
index 000000000..cec079eae
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapProcState_procs.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Define NAME_WRAP(x) before including this header to perform name-wrapping
+// E.g. for ARM NEON, defined it as 'x ## _neon' to ensure all important
+// identifiers have a _neon suffix.
+#ifndef NAME_WRAP
+#error "Please define NAME_WRAP() before including this file"
+#endif
+
+// returns expanded * 5bits
+static inline uint32_t Filter_565_Expanded(unsigned x, unsigned y,
+ uint32_t a00, uint32_t a01,
+ uint32_t a10, uint32_t a11) {
+ SkASSERT((unsigned)x <= 0xF);
+ SkASSERT((unsigned)y <= 0xF);
+
+ a00 = SkExpand_rgb_16(a00);
+ a01 = SkExpand_rgb_16(a01);
+ a10 = SkExpand_rgb_16(a10);
+ a11 = SkExpand_rgb_16(a11);
+
+ int xy = x * y >> 3;
+ return a00 * (32 - 2*y - 2*x + xy) +
+ a01 * (2*x - xy) +
+ a10 * (2*y - xy) +
+ a11 * xy;
+}
+
+// turn an expanded 565 * 5bits into SkPMColor
+// g:11 | r:10 | x:1 | b:10
+static inline SkPMColor SkExpanded_565_To_PMColor(uint32_t c) {
+ unsigned r = (c >> 13) & 0xFF;
+ unsigned g = (c >> 24);
+ unsigned b = (c >> 2) & 0xFF;
+ return SkPackARGB32(0xFF, r, g, b);
+}
+
+// returns answer in SkPMColor format
+static inline SkPMColor Filter_4444_D32(unsigned x, unsigned y,
+ uint32_t a00, uint32_t a01,
+ uint32_t a10, uint32_t a11) {
+ SkASSERT((unsigned)x <= 0xF);
+ SkASSERT((unsigned)y <= 0xF);
+
+ a00 = SkExpand_4444(a00);
+ a01 = SkExpand_4444(a01);
+ a10 = SkExpand_4444(a10);
+ a11 = SkExpand_4444(a11);
+
+ int xy = x * y >> 4;
+ uint32_t result = a00 * (16 - y - x + xy) +
+ a01 * (x - xy) +
+ a10 * (y - xy) +
+ a11 * xy;
+
+ return SkCompact_8888(result);
+}
+
+static inline U8CPU Filter_8(unsigned x, unsigned y,
+ U8CPU a00, U8CPU a01,
+ U8CPU a10, U8CPU a11) {
+ SkASSERT((unsigned)x <= 0xF);
+ SkASSERT((unsigned)y <= 0xF);
+
+ int xy = x * y;
+ unsigned result = a00 * (256 - 16*y - 16*x + xy) +
+ a01 * (16*x - xy) +
+ a10 * (16*y - xy) +
+ a11 * xy;
+
+ return result >> 8;
+}
+
+/*****************************************************************************
+ *
+ * D32 functions
+ *
+ */
+
+// SRC == 8888
+
+#define FILTER_PROC(x, y, a, b, c, d, dst) NAME_WRAP(Filter_32_opaque)(x, y, a, b, c, d, dst)
+
+#define MAKENAME(suffix) NAME_WRAP(S32_opaque_D32 ## suffix)
+#define SRCTYPE SkPMColor
+#define CHECKSTATE(state) SkASSERT(4 == state.fPixmap.info().bytesPerPixel()); \
+ SkASSERT(state.fAlphaScale == 256)
+#define RETURNDST(src) src
+#define SRC_TO_FILTER(src) src
+#include "SkBitmapProcState_sample.h"
+
+#undef FILTER_PROC
+#define FILTER_PROC(x, y, a, b, c, d, dst) NAME_WRAP(Filter_32_alpha)(x, y, a, b, c, d, dst, alphaScale)
+
+#define MAKENAME(suffix) NAME_WRAP(S32_alpha_D32 ## suffix)
+#define SRCTYPE SkPMColor
+#define CHECKSTATE(state) SkASSERT(4 == state.fPixmap.info().bytesPerPixel()); \
+ SkASSERT(state.fAlphaScale < 256)
+#define PREAMBLE(state) unsigned alphaScale = state.fAlphaScale
+#define RETURNDST(src) SkAlphaMulQ(src, alphaScale)
+#define SRC_TO_FILTER(src) src
+#include "SkBitmapProcState_sample.h"
+
+// SRC == 565
+
+#undef FILTER_PROC
+#define FILTER_PROC(x, y, a, b, c, d, dst) \
+ do { \
+ uint32_t tmp = Filter_565_Expanded(x, y, a, b, c, d); \
+ *(dst) = SkExpanded_565_To_PMColor(tmp); \
+ } while (0)
+
+#define MAKENAME(suffix) NAME_WRAP(S16_opaque_D32 ## suffix)
+#define SRCTYPE uint16_t
+#define CHECKSTATE(state) SkASSERT(kRGB_565_SkColorType == state.fPixmap.colorType()); \
+ SkASSERT(state.fAlphaScale == 256)
+#define RETURNDST(src) SkPixel16ToPixel32(src)
+#define SRC_TO_FILTER(src) src
+#include "SkBitmapProcState_sample.h"
+
+#undef FILTER_PROC
+#define FILTER_PROC(x, y, a, b, c, d, dst) \
+ do { \
+ uint32_t tmp = Filter_565_Expanded(x, y, a, b, c, d); \
+ *(dst) = SkAlphaMulQ(SkExpanded_565_To_PMColor(tmp), alphaScale); \
+ } while (0)
+
+#define MAKENAME(suffix) NAME_WRAP(S16_alpha_D32 ## suffix)
+#define SRCTYPE uint16_t
+#define CHECKSTATE(state) SkASSERT(kRGB_565_SkColorType == state.fPixmap.colorType()); \
+ SkASSERT(state.fAlphaScale < 256)
+#define PREAMBLE(state) unsigned alphaScale = state.fAlphaScale
+#define RETURNDST(src) SkAlphaMulQ(SkPixel16ToPixel32(src), alphaScale)
+#define SRC_TO_FILTER(src) src
+#include "SkBitmapProcState_sample.h"
+
+// SRC == Index8
+
+#undef FILTER_PROC
+#define FILTER_PROC(x, y, a, b, c, d, dst) NAME_WRAP(Filter_32_opaque)(x, y, a, b, c, d, dst)
+
+#define MAKENAME(suffix) NAME_WRAP(SI8_opaque_D32 ## suffix)
+#define SRCTYPE uint8_t
+#define CHECKSTATE(state) SkASSERT(kIndex_8_SkColorType == state.fPixmap.colorType()); \
+ SkASSERT(state.fAlphaScale == 256)
+#define PREAMBLE(state) const SkPMColor* SK_RESTRICT table = state.fPixmap.ctable()->readColors()
+#define RETURNDST(src) table[src]
+#define SRC_TO_FILTER(src) table[src]
+#define POSTAMBLE(state)
+#include "SkBitmapProcState_sample.h"
+
+#undef FILTER_PROC
+#define FILTER_PROC(x, y, a, b, c, d, dst) NAME_WRAP(Filter_32_alpha)(x, y, a, b, c, d, dst, alphaScale)
+
+#define MAKENAME(suffix) NAME_WRAP(SI8_alpha_D32 ## suffix)
+#define SRCTYPE uint8_t
+#define CHECKSTATE(state) SkASSERT(kIndex_8_SkColorType == state.fPixmap.colorType()); \
+ SkASSERT(state.fAlphaScale < 256)
+#define PREAMBLE(state) unsigned alphaScale = state.fAlphaScale; \
+ const SkPMColor* SK_RESTRICT table = state.fPixmap.ctable()->readColors()
+#define RETURNDST(src) SkAlphaMulQ(table[src], alphaScale)
+#define SRC_TO_FILTER(src) table[src]
+#define POSTAMBLE(state)
+#include "SkBitmapProcState_sample.h"
+
+// SRC == 4444
+
+#undef FILTER_PROC
+#define FILTER_PROC(x, y, a, b, c, d, dst) *(dst) = Filter_4444_D32(x, y, a, b, c, d)
+
+#define MAKENAME(suffix) NAME_WRAP(S4444_opaque_D32 ## suffix)
+#define SRCTYPE SkPMColor16
+#define CHECKSTATE(state) SkASSERT(kARGB_4444_SkColorType == state.fPixmap.colorType()); \
+ SkASSERT(state.fAlphaScale == 256)
+#define RETURNDST(src) SkPixel4444ToPixel32(src)
+#define SRC_TO_FILTER(src) src
+#include "SkBitmapProcState_sample.h"
+
+#undef FILTER_PROC
+#define FILTER_PROC(x, y, a, b, c, d, dst) \
+ do { \
+ uint32_t tmp = Filter_4444_D32(x, y, a, b, c, d); \
+ *(dst) = SkAlphaMulQ(tmp, alphaScale); \
+ } while (0)
+
+#define MAKENAME(suffix) NAME_WRAP(S4444_alpha_D32 ## suffix)
+#define SRCTYPE SkPMColor16
+#define CHECKSTATE(state) SkASSERT(kARGB_4444_SkColorType == state.fPixmap.colorType()); \
+ SkASSERT(state.fAlphaScale < 256)
+#define PREAMBLE(state) unsigned alphaScale = state.fAlphaScale
+#define RETURNDST(src) SkAlphaMulQ(SkPixel4444ToPixel32(src), alphaScale)
+#define SRC_TO_FILTER(src) src
+#include "SkBitmapProcState_sample.h"
+
+// SRC == A8
+
+#undef FILTER_PROC
+#define FILTER_PROC(x, y, a, b, c, d, dst) \
+ do { \
+ unsigned tmp = Filter_8(x, y, a, b, c, d); \
+ *(dst) = SkAlphaMulQ(pmColor, SkAlpha255To256(tmp)); \
+ } while (0)
+
+#define MAKENAME(suffix) NAME_WRAP(SA8_alpha_D32 ## suffix)
+#define SRCTYPE uint8_t
+#define CHECKSTATE(state) SkASSERT(kAlpha_8_SkColorType == state.fPixmap.colorType());
+#define PREAMBLE(state) const SkPMColor pmColor = state.fPaintPMColor;
+#define RETURNDST(src) SkAlphaMulQ(pmColor, SkAlpha255To256(src))
+#define SRC_TO_FILTER(src) src
+#include "SkBitmapProcState_sample.h"
+
+// SRC == Gray8
+
+#undef FILTER_PROC
+#define FILTER_PROC(x, y, a, b, c, d, dst) \
+ do { \
+ unsigned tmp = Filter_8(x, y, a, b, c, d); \
+ SkPMColor color = SkPackARGB32(0xFF, tmp, tmp, tmp); \
+ *(dst) = SkAlphaMulQ(color, alphaScale); \
+ } while (0)
+
+#define MAKENAME(suffix) NAME_WRAP(SG8_alpha_D32 ## suffix)
+#define SRCTYPE uint8_t
+#define CHECKSTATE(state) SkASSERT(kGray_8_SkColorType == state.fPixmap.colorType());
+#define PREAMBLE(state) unsigned alphaScale = state.fAlphaScale
+#define RETURNDST(src) SkAlphaMulQ(SkPackARGB32(0xFF, src, src, src), alphaScale)
+#define SRC_TO_FILTER(src) src
+#include "SkBitmapProcState_sample.h"
+
+
+#define TILEX_PROCF(fx, max) SkClampMax((fx) >> 16, max)
+#define TILEY_PROCF(fy, max) SkClampMax((fy) >> 16, max)
+#define TILEX_LOW_BITS(fx, max) (((fx) >> 12) & 0xF)
+#define TILEY_LOW_BITS(fy, max) (((fy) >> 12) & 0xF)
+
+#undef FILTER_PROC
+#define FILTER_PROC(x, y, a, b, c, d, dst) NAME_WRAP(Filter_32_opaque)(x, y, a, b, c, d, dst)
+#define MAKENAME(suffix) NAME_WRAP(Clamp_SI8_opaque_D32 ## suffix)
+#define SRCTYPE uint8_t
+#define CHECKSTATE(state) SkASSERT(kIndex_8_SkColorType == state.fPixmap.colorType())
+#define PREAMBLE(state) const SkPMColor* SK_RESTRICT table = state.fPixmap.ctable()->readColors()
+#define SRC_TO_FILTER(src) table[src]
+#define POSTAMBLE(state)
+#include "SkBitmapProcState_shaderproc.h"
+
+#undef NAME_WRAP
diff --git a/gfx/skia/skia/src/core/SkBitmapProcState_sample.h b/gfx/skia/skia/src/core/SkBitmapProcState_sample.h
new file mode 100644
index 000000000..8526c47cd
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapProcState_sample.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkUtils.h"
+
+// declare functions externally to suppress warnings.
+void MAKENAME(_nofilter_DXDY)(const SkBitmapProcState& s,
+ const uint32_t* SK_RESTRICT xy,
+ int count, SkPMColor* SK_RESTRICT colors);
+void MAKENAME(_nofilter_DX)(const SkBitmapProcState& s,
+ const uint32_t* SK_RESTRICT xy,
+ int count, SkPMColor* SK_RESTRICT colors);
+void MAKENAME(_filter_DX)(const SkBitmapProcState& s,
+ const uint32_t* SK_RESTRICT xy,
+ int count, SkPMColor* SK_RESTRICT colors);
+void MAKENAME(_filter_DXDY)(const SkBitmapProcState& s,
+ const uint32_t* SK_RESTRICT xy,
+ int count, SkPMColor* SK_RESTRICT colors);
+
+void MAKENAME(_nofilter_DXDY)(const SkBitmapProcState& s,
+ const uint32_t* SK_RESTRICT xy,
+ int count, SkPMColor* SK_RESTRICT colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(kNone_SkFilterQuality == s.fFilterQuality);
+ SkDEBUGCODE(CHECKSTATE(s);)
+
+#ifdef PREAMBLE
+ PREAMBLE(s);
+#endif
+ const char* SK_RESTRICT srcAddr = (const char*)s.fPixmap.addr();
+ size_t rb = s.fPixmap.rowBytes();
+
+ uint32_t XY;
+ SRCTYPE src;
+
+ for (int i = (count >> 1); i > 0; --i) {
+ XY = *xy++;
+ SkASSERT((XY >> 16) < (unsigned)s.fPixmap.height() &&
+ (XY & 0xFFFF) < (unsigned)s.fPixmap.width());
+ src = ((const SRCTYPE*)(srcAddr + (XY >> 16) * rb))[XY & 0xFFFF];
+ *colors++ = RETURNDST(src);
+
+ XY = *xy++;
+ SkASSERT((XY >> 16) < (unsigned)s.fPixmap.height() &&
+ (XY & 0xFFFF) < (unsigned)s.fPixmap.width());
+ src = ((const SRCTYPE*)(srcAddr + (XY >> 16) * rb))[XY & 0xFFFF];
+ *colors++ = RETURNDST(src);
+ }
+ if (count & 1) {
+ XY = *xy++;
+ SkASSERT((XY >> 16) < (unsigned)s.fPixmap.height() &&
+ (XY & 0xFFFF) < (unsigned)s.fPixmap.width());
+ src = ((const SRCTYPE*)(srcAddr + (XY >> 16) * rb))[XY & 0xFFFF];
+ *colors++ = RETURNDST(src);
+ }
+
+#ifdef POSTAMBLE
+ POSTAMBLE(s);
+#endif
+}
+
+void MAKENAME(_nofilter_DX)(const SkBitmapProcState& s,
+ const uint32_t* SK_RESTRICT xy,
+ int count, SkPMColor* SK_RESTRICT colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(s.fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask));
+ SkASSERT(kNone_SkFilterQuality == s.fFilterQuality);
+ SkDEBUGCODE(CHECKSTATE(s);)
+
+#ifdef PREAMBLE
+ PREAMBLE(s);
+#endif
+ const SRCTYPE* SK_RESTRICT srcAddr = (const SRCTYPE*)s.fPixmap.addr();
+
+ // buffer is y32, x16, x16, x16, x16, x16
+ // bump srcAddr to the proper row, since we're told Y never changes
+ SkASSERT((unsigned)xy[0] < (unsigned)s.fPixmap.height());
+ srcAddr = (const SRCTYPE*)((const char*)srcAddr +
+ xy[0] * s.fPixmap.rowBytes());
+ xy += 1;
+
+ SRCTYPE src;
+
+ if (1 == s.fPixmap.width()) {
+ src = srcAddr[0];
+ SkPMColor dstValue = RETURNDST(src);
+ sk_memset32(colors, dstValue, count);
+ } else {
+ int i;
+ for (i = (count >> 2); i > 0; --i) {
+ uint32_t xx0 = *xy++;
+ uint32_t xx1 = *xy++;
+ SRCTYPE x0 = srcAddr[UNPACK_PRIMARY_SHORT(xx0)];
+ SRCTYPE x1 = srcAddr[UNPACK_SECONDARY_SHORT(xx0)];
+ SRCTYPE x2 = srcAddr[UNPACK_PRIMARY_SHORT(xx1)];
+ SRCTYPE x3 = srcAddr[UNPACK_SECONDARY_SHORT(xx1)];
+
+ *colors++ = RETURNDST(x0);
+ *colors++ = RETURNDST(x1);
+ *colors++ = RETURNDST(x2);
+ *colors++ = RETURNDST(x3);
+ }
+ const uint16_t* SK_RESTRICT xx = (const uint16_t*)(xy);
+ for (i = (count & 3); i > 0; --i) {
+ SkASSERT(*xx < (unsigned)s.fPixmap.width());
+ src = srcAddr[*xx++]; *colors++ = RETURNDST(src);
+ }
+ }
+
+#ifdef POSTAMBLE
+ POSTAMBLE(s);
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void MAKENAME(_filter_DX)(const SkBitmapProcState& s,
+ const uint32_t* SK_RESTRICT xy,
+ int count, SkPMColor* SK_RESTRICT colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(s.fFilterQuality != kNone_SkFilterQuality);
+ SkDEBUGCODE(CHECKSTATE(s);)
+
+#ifdef PREAMBLE
+ PREAMBLE(s);
+#endif
+ const char* SK_RESTRICT srcAddr = (const char*)s.fPixmap.addr();
+ size_t rb = s.fPixmap.rowBytes();
+ unsigned subY;
+ const SRCTYPE* SK_RESTRICT row0;
+ const SRCTYPE* SK_RESTRICT row1;
+
+ // setup row ptrs and update proc_table
+ {
+ uint32_t XY = *xy++;
+ unsigned y0 = XY >> 14;
+ row0 = (const SRCTYPE*)(srcAddr + (y0 >> 4) * rb);
+ row1 = (const SRCTYPE*)(srcAddr + (XY & 0x3FFF) * rb);
+ subY = y0 & 0xF;
+ }
+
+ do {
+ uint32_t XX = *xy++; // x0:14 | 4 | x1:14
+ unsigned x0 = XX >> 14;
+ unsigned x1 = XX & 0x3FFF;
+ unsigned subX = x0 & 0xF;
+ x0 >>= 4;
+
+ FILTER_PROC(subX, subY,
+ SRC_TO_FILTER(row0[x0]),
+ SRC_TO_FILTER(row0[x1]),
+ SRC_TO_FILTER(row1[x0]),
+ SRC_TO_FILTER(row1[x1]),
+ colors);
+ colors += 1;
+
+ } while (--count != 0);
+
+#ifdef POSTAMBLE
+ POSTAMBLE(s);
+#endif
+}
+void MAKENAME(_filter_DXDY)(const SkBitmapProcState& s,
+ const uint32_t* SK_RESTRICT xy,
+ int count, SkPMColor* SK_RESTRICT colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(s.fFilterQuality != kNone_SkFilterQuality);
+ SkDEBUGCODE(CHECKSTATE(s);)
+
+#ifdef PREAMBLE
+ PREAMBLE(s);
+#endif
+ const char* SK_RESTRICT srcAddr = (const char*)s.fPixmap.addr();
+ size_t rb = s.fPixmap.rowBytes();
+
+ do {
+ uint32_t data = *xy++;
+ unsigned y0 = data >> 14;
+ unsigned y1 = data & 0x3FFF;
+ unsigned subY = y0 & 0xF;
+ y0 >>= 4;
+
+ data = *xy++;
+ unsigned x0 = data >> 14;
+ unsigned x1 = data & 0x3FFF;
+ unsigned subX = x0 & 0xF;
+ x0 >>= 4;
+
+ const SRCTYPE* SK_RESTRICT row0 = (const SRCTYPE*)(srcAddr + y0 * rb);
+ const SRCTYPE* SK_RESTRICT row1 = (const SRCTYPE*)(srcAddr + y1 * rb);
+
+ FILTER_PROC(subX, subY,
+ SRC_TO_FILTER(row0[x0]),
+ SRC_TO_FILTER(row0[x1]),
+ SRC_TO_FILTER(row1[x0]),
+ SRC_TO_FILTER(row1[x1]),
+ colors);
+ colors += 1;
+ } while (--count != 0);
+
+#ifdef POSTAMBLE
+ POSTAMBLE(s);
+#endif
+}
+
+#undef MAKENAME
+#undef SRCTYPE
+#undef CHECKSTATE
+#undef RETURNDST
+#undef SRC_TO_FILTER
+#undef FILTER_TO_DST
+
+#ifdef PREAMBLE
+ #undef PREAMBLE
+#endif
+#ifdef POSTAMBLE
+ #undef POSTAMBLE
+#endif
+
+#undef FILTER_PROC_TYPE
+#undef GET_FILTER_TABLE
+#undef GET_FILTER_ROW
+#undef GET_FILTER_ROW_PROC
+#undef GET_FILTER_PROC
diff --git a/gfx/skia/skia/src/core/SkBitmapProcState_shaderproc.h b/gfx/skia/skia/src/core/SkBitmapProcState_shaderproc.h
new file mode 100644
index 000000000..523b5621e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapProcState_shaderproc.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMathPriv.h"
+
+#define SCALE_FILTER_NAME MAKENAME(_filter_DX_shaderproc)
+
+// Can't be static in the general case because some of these implementations
+// will be defined and referenced in different object files.
+void SCALE_FILTER_NAME(const void* sIn, int x, int y, SkPMColor* SK_RESTRICT colors, int count);
+
+void SCALE_FILTER_NAME(const void* sIn, int x, int y, SkPMColor* SK_RESTRICT colors, int count) {
+ const SkBitmapProcState& s = *static_cast<const SkBitmapProcState*>(sIn);
+ SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
+ SkMatrix::kScale_Mask)) == 0);
+ SkASSERT(s.fInvKy == 0);
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(s.fFilterQuality != kNone_SkFilterQuality);
+ SkDEBUGCODE(CHECKSTATE(s);)
+
+ const unsigned maxX = s.fPixmap.width() - 1;
+ const SkFixed oneX = s.fFilterOneX;
+ const SkFixed dx = s.fInvSx;
+ SkFixed fx;
+ const SRCTYPE* SK_RESTRICT row0;
+ const SRCTYPE* SK_RESTRICT row1;
+ unsigned subY;
+
+ {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ SkFixed fy = mapper.fixedY();
+ const unsigned maxY = s.fPixmap.height() - 1;
+ // compute our two Y values up front
+ subY = TILEY_LOW_BITS(fy, maxY);
+ int y0 = TILEY_PROCF(fy, maxY);
+ int y1 = TILEY_PROCF((fy + s.fFilterOneY), maxY);
+
+ const char* SK_RESTRICT srcAddr = (const char*)s.fPixmap.addr();
+ size_t rb = s.fPixmap.rowBytes();
+ row0 = (const SRCTYPE*)(srcAddr + y0 * rb);
+ row1 = (const SRCTYPE*)(srcAddr + y1 * rb);
+ // now initialize fx
+ fx = mapper.fixedX();
+ }
+
+#ifdef PREAMBLE
+ PREAMBLE(s);
+#endif
+
+ do {
+ unsigned subX = TILEX_LOW_BITS(fx, maxX);
+ unsigned x0 = TILEX_PROCF(fx, maxX);
+ unsigned x1 = TILEX_PROCF((fx + oneX), maxX);
+
+ FILTER_PROC(subX, subY,
+ SRC_TO_FILTER(row0[x0]),
+ SRC_TO_FILTER(row0[x1]),
+ SRC_TO_FILTER(row1[x0]),
+ SRC_TO_FILTER(row1[x1]),
+ colors);
+ colors += 1;
+
+ fx += dx;
+ } while (--count != 0);
+
+#ifdef POSTAMBLE
+ POSTAMBLE(s);
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#undef TILEX_PROCF
+#undef TILEY_PROCF
+#undef TILEX_LOW_BITS
+#undef TILEY_LOW_BITS
+#undef MAKENAME
+#undef SRCTYPE
+#undef CHECKSTATE
+#undef SRC_TO_FILTER
+#undef FILTER_TO_DST
+#undef PREAMBLE
+#undef POSTAMBLE
+
+#undef SCALE_FILTER_NAME
diff --git a/gfx/skia/skia/src/core/SkBitmapProcState_utils.h b/gfx/skia/skia/src/core/SkBitmapProcState_utils.h
new file mode 100644
index 000000000..3c4c1fa8c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapProcState_utils.h
@@ -0,0 +1,40 @@
+#ifndef SkBitmapProcState_utils_DEFINED
+#define SkBitmapProcState_utils_DEFINED
+
+// Helper to ensure that when we shift down, we do it w/o sign-extension
+// so the caller doesn't have to manually mask off the top 16 bits
+//
+static unsigned SK_USHIFT16(unsigned x) {
+ return x >> 16;
+}
+
+/*
+ * The decal_ functions require that
+ * 1. dx > 0
+ * 2. [fx, fx+dx, fx+2dx, fx+3dx, ... fx+(count-1)dx] are all <= maxX
+ *
+ * In addition, we use SkFractionalInt to keep more fractional precision than
+ * just SkFixed, so we will abort the decal_ call if dx is very small, since
+ * the decal_ function just operates on SkFixed. If that were changed, we could
+ * skip the very_small test here.
+ */
+static inline bool can_truncate_to_fixed_for_decal(SkFractionalInt frX,
+ SkFractionalInt frDx,
+ int count, unsigned max) {
+ SkFixed dx = SkFractionalIntToFixed(frDx);
+
+ // if decal_ kept SkFractionalInt precision, this would just be dx <= 0
+ // I just made up the 1/256. Just don't want to perceive accumulated error
+ // if we truncate frDx and lose its low bits.
+ if (dx <= SK_Fixed1 / 256) {
+ return false;
+ }
+
+ // We cast to unsigned so we don't have to check for negative values, which
+ // will now appear as very large positive values, and thus fail our test!
+ SkFixed fx = SkFractionalIntToFixed(frX);
+ return (unsigned)SkFixedFloorToInt(fx) <= max &&
+ (unsigned)SkFixedFloorToInt(fx + dx * (count - 1)) < max;
+}
+
+#endif /* #ifndef SkBitmapProcState_utils_DEFINED */
diff --git a/gfx/skia/skia/src/core/SkBitmapProvider.cpp b/gfx/skia/skia/src/core/SkBitmapProvider.cpp
new file mode 100644
index 000000000..37f8dc9d5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapProvider.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapProvider.h"
+#include "SkImage_Base.h"
+#include "SkPixelRef.h"
+
+int SkBitmapProvider::width() const {
+ return fImage ? fImage->width() : fBitmap.width();
+}
+
+int SkBitmapProvider::height() const {
+ return fImage ? fImage->height() : fBitmap.height();
+}
+
+uint32_t SkBitmapProvider::getID() const {
+ return fImage ? fImage->uniqueID() : fBitmap.getGenerationID();
+}
+
+bool SkBitmapProvider::validForDrawing() const {
+ if (!fImage) {
+ if (0 == fBitmap.width() || 0 == fBitmap.height()) {
+ return false;
+ }
+ if (nullptr == fBitmap.pixelRef()) {
+ return false; // no pixels to read
+ }
+ if (kIndex_8_SkColorType == fBitmap.colorType()) {
+ SkAutoLockPixels alp(fBitmap); // but we need to call it before getColorTable() is safe.
+ if (!fBitmap.getColorTable()) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+SkImageInfo SkBitmapProvider::info() const {
+ if (fImage) {
+ return as_IB(fImage)->onImageInfo();
+ } else {
+ return fBitmap.info();
+ }
+}
+
+bool SkBitmapProvider::isVolatile() const {
+ if (fImage) {
+ // add flag to images?
+ const SkBitmap* bm = as_IB(fImage)->onPeekBitmap();
+ return bm ? bm->isVolatile() : false;
+ } else {
+ return fBitmap.isVolatile();
+ }
+}
+
+SkBitmapCacheDesc SkBitmapProvider::makeCacheDesc(int w, int h) const {
+ return fImage ? SkBitmapCacheDesc::Make(fImage, w, h) : SkBitmapCacheDesc::Make(fBitmap, w, h);
+}
+
+SkBitmapCacheDesc SkBitmapProvider::makeCacheDesc() const {
+ return fImage ? SkBitmapCacheDesc::Make(fImage) : SkBitmapCacheDesc::Make(fBitmap);
+}
+
+void SkBitmapProvider::notifyAddedToCache() const {
+ if (fImage) {
+ as_IB(fImage)->notifyAddedToCache();
+ } else {
+ fBitmap.pixelRef()->notifyAddedToCache();
+ }
+}
+
+bool SkBitmapProvider::asBitmap(SkBitmap* bm) const {
+ if (fImage) {
+ return as_IB(fImage)->getROPixels(bm, SkImage::kAllow_CachingHint);
+ } else {
+ *bm = fBitmap;
+ return true;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkBitmapProvider.h b/gfx/skia/skia/src/core/SkBitmapProvider.h
new file mode 100644
index 000000000..9901c0fc1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapProvider.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapProvider_DEFINED
+#define SkBitmapProvider_DEFINED
+
+#include "SkBitmap.h"
+#include "SkImage.h"
+#include "SkBitmapCache.h"
+
+class SkBitmapProvider {
+public:
+ explicit SkBitmapProvider(const SkBitmap& bm) : fBitmap(bm) {}
+ explicit SkBitmapProvider(const SkImage* img) : fImage(SkSafeRef(img)) {}
+ SkBitmapProvider(const SkBitmapProvider& other)
+ : fBitmap(other.fBitmap)
+ , fImage(SkSafeRef(other.fImage.get()))
+ {}
+
+ int width() const;
+ int height() const;
+ uint32_t getID() const;
+
+ bool validForDrawing() const;
+ SkImageInfo info() const;
+ bool isVolatile() const;
+
+ SkBitmapCacheDesc makeCacheDesc(int w, int h) const;
+ SkBitmapCacheDesc makeCacheDesc() const;
+ void notifyAddedToCache() const;
+
+ // Only call this if you're sure you need the bits, since it maybe expensive
+ // ... cause a decode and cache, or gpu-readback
+ bool asBitmap(SkBitmap*) const;
+
+private:
+ SkBitmap fBitmap;
+ SkAutoTUnref<const SkImage> fImage;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBitmapScaler.cpp b/gfx/skia/skia/src/core/SkBitmapScaler.cpp
new file mode 100644
index 000000000..5edb1b23e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapScaler.cpp
@@ -0,0 +1,268 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapScaler.h"
+#include "SkBitmapFilter.h"
+#include "SkConvolver.h"
+#include "SkImageInfo.h"
+#include "SkPixmap.h"
+#include "SkRect.h"
+#include "SkTArray.h"
+
+// SkResizeFilter ----------------------------------------------------------------
+
+// Encapsulates computation and storage of the filters required for one complete
+// resize operation.
+class SkResizeFilter {
+public:
+ SkResizeFilter(SkBitmapScaler::ResizeMethod method,
+ int srcFullWidth, int srcFullHeight,
+ float destWidth, float destHeight,
+ const SkRect& destSubset,
+ const SkConvolutionProcs& convolveProcs);
+ ~SkResizeFilter() { delete fBitmapFilter; }
+
+ // Returns the filled filter values.
+ const SkConvolutionFilter1D& xFilter() { return fXFilter; }
+ const SkConvolutionFilter1D& yFilter() { return fYFilter; }
+
+private:
+
+ SkBitmapFilter* fBitmapFilter;
+
+ // Computes one set of filters either horizontally or vertically. The caller
+ // will specify the "min" and "max" rather than the bottom/top and
+ // right/bottom so that the same code can be re-used in each dimension.
+ //
+ // |srcDependLo| and |srcDependSize| gives the range for the source
+ // depend rectangle (horizontally or vertically at the caller's discretion
+ // -- see above for what this means).
+ //
+ // Likewise, the range of destination values to compute and the scale factor
+ // for the transform is also specified.
+
+ void computeFilters(int srcSize,
+ float destSubsetLo, float destSubsetSize,
+ float scale,
+ SkConvolutionFilter1D* output,
+ const SkConvolutionProcs& convolveProcs);
+
+ SkConvolutionFilter1D fXFilter;
+ SkConvolutionFilter1D fYFilter;
+};
+
+SkResizeFilter::SkResizeFilter(SkBitmapScaler::ResizeMethod method,
+ int srcFullWidth, int srcFullHeight,
+ float destWidth, float destHeight,
+ const SkRect& destSubset,
+ const SkConvolutionProcs& convolveProcs) {
+
+ SkASSERT(method >= SkBitmapScaler::RESIZE_FirstMethod &&
+ method <= SkBitmapScaler::RESIZE_LastMethod);
+
+ fBitmapFilter = nullptr;
+ switch(method) {
+ case SkBitmapScaler::RESIZE_BOX:
+ fBitmapFilter = new SkBoxFilter;
+ break;
+ case SkBitmapScaler::RESIZE_TRIANGLE:
+ fBitmapFilter = new SkTriangleFilter;
+ break;
+ case SkBitmapScaler::RESIZE_MITCHELL:
+ fBitmapFilter = new SkMitchellFilter;
+ break;
+ case SkBitmapScaler::RESIZE_HAMMING:
+ fBitmapFilter = new SkHammingFilter;
+ break;
+ case SkBitmapScaler::RESIZE_LANCZOS3:
+ fBitmapFilter = new SkLanczosFilter;
+ break;
+ }
+
+
+ float scaleX = destWidth / srcFullWidth;
+ float scaleY = destHeight / srcFullHeight;
+
+ this->computeFilters(srcFullWidth, destSubset.fLeft, destSubset.width(),
+ scaleX, &fXFilter, convolveProcs);
+ if (srcFullWidth == srcFullHeight &&
+ destSubset.fLeft == destSubset.fTop &&
+ destSubset.width() == destSubset.height()&&
+ scaleX == scaleY) {
+ fYFilter = fXFilter;
+ } else {
+ this->computeFilters(srcFullHeight, destSubset.fTop, destSubset.height(),
+ scaleY, &fYFilter, convolveProcs);
+ }
+}
+
+// TODO(egouriou): Take advantage of periods in the convolution.
+// Practical resizing filters are periodic outside of the border area.
+// For Lanczos, a scaling by a (reduced) factor of p/q (q pixels in the
+// source become p pixels in the destination) will have a period of p.
+// A nice consequence is a period of 1 when downscaling by an integral
+// factor. Downscaling from typical display resolutions is also bound
+// to produce interesting periods as those are chosen to have multiple
+// small factors.
+// Small periods reduce computational load and improve cache usage if
+// the coefficients can be shared. For periods of 1 we can consider
+// loading the factors only once outside the borders.
+void SkResizeFilter::computeFilters(int srcSize,
+ float destSubsetLo, float destSubsetSize,
+ float scale,
+ SkConvolutionFilter1D* output,
+ const SkConvolutionProcs& convolveProcs) {
+ float destSubsetHi = destSubsetLo + destSubsetSize; // [lo, hi)
+
+ // When we're doing a magnification, the scale will be larger than one. This
+ // means the destination pixels are much smaller than the source pixels, and
+ // that the range covered by the filter won't necessarily cover any source
+ // pixel boundaries. Therefore, we use these clamped values (max of 1) for
+ // some computations.
+ float clampedScale = SkTMin(1.0f, scale);
+
+ // This is how many source pixels from the center we need to count
+ // to support the filtering function.
+ float srcSupport = fBitmapFilter->width() / clampedScale;
+
+ float invScale = 1.0f / scale;
+
+ SkSTArray<64, float, true> filterValuesArray;
+ SkSTArray<64, SkConvolutionFilter1D::ConvolutionFixed, true> fixedFilterValuesArray;
+
+ // Loop over all pixels in the output range. We will generate one set of
+ // filter values for each one. Those values will tell us how to blend the
+ // source pixels to compute the destination pixel.
+
+ // This is the pixel in the source directly under the pixel in the dest.
+ // Note that we base computations on the "center" of the pixels. To see
+ // why, observe that the destination pixel at coordinates (0, 0) in a 5.0x
+ // downscale should "cover" the pixels around the pixel with *its center*
+ // at coordinates (2.5, 2.5) in the source, not those around (0, 0).
+ // Hence we need to scale coordinates (0.5, 0.5), not (0, 0).
+ destSubsetLo = SkScalarFloorToScalar(destSubsetLo);
+ destSubsetHi = SkScalarCeilToScalar(destSubsetHi);
+ float srcPixel = (destSubsetLo + 0.5f) * invScale;
+ int destLimit = SkScalarTruncToInt(destSubsetHi - destSubsetLo);
+ output->reserveAdditional(destLimit, SkScalarCeilToInt(destLimit * srcSupport * 2));
+ for (int destI = 0; destI < destLimit; srcPixel += invScale, destI++)
+ {
+ // Compute the (inclusive) range of source pixels the filter covers.
+ float srcBegin = SkTMax(0.f, SkScalarFloorToScalar(srcPixel - srcSupport));
+ float srcEnd = SkTMin(srcSize - 1.f, SkScalarCeilToScalar(srcPixel + srcSupport));
+
+ // Compute the unnormalized filter value at each location of the source
+ // it covers.
+
+ // Sum of the filter values for normalizing.
+ // Distance from the center of the filter, this is the filter coordinate
+ // in source space. We also need to consider the center of the pixel
+ // when comparing distance against 'srcPixel'. In the 5x downscale
+ // example used above the distance from the center of the filter to
+ // the pixel with coordinates (2, 2) should be 0, because its center
+ // is at (2.5, 2.5).
+ float destFilterDist = (srcBegin + 0.5f - srcPixel) * clampedScale;
+ int filterCount = SkScalarTruncToInt(srcEnd - srcBegin) + 1;
+ if (filterCount <= 0) {
+ // true when srcSize is equal to srcPixel - srcSupport; this may be a bug
+ return;
+ }
+ filterValuesArray.reset(filterCount);
+ float filterSum = fBitmapFilter->evaluate_n(destFilterDist, clampedScale, filterCount,
+ filterValuesArray.begin());
+
+ // The filter must be normalized so that we don't affect the brightness of
+ // the image. Convert to normalized fixed point.
+ int fixedSum = 0;
+ fixedFilterValuesArray.reset(filterCount);
+ const float* filterValues = filterValuesArray.begin();
+ SkConvolutionFilter1D::ConvolutionFixed* fixedFilterValues = fixedFilterValuesArray.begin();
+ float invFilterSum = 1 / filterSum;
+ for (int fixedI = 0; fixedI < filterCount; fixedI++) {
+ int curFixed = SkConvolutionFilter1D::FloatToFixed(filterValues[fixedI] * invFilterSum);
+ fixedSum += curFixed;
+ fixedFilterValues[fixedI] = SkToS16(curFixed);
+ }
+ SkASSERT(fixedSum <= 0x7FFF);
+
+ // The conversion to fixed point will leave some rounding errors, which
+ // we add back in to avoid affecting the brightness of the image. We
+ // arbitrarily add this to the center of the filter array (this won't always
+ // be the center of the filter function since it could get clipped on the
+ // edges, but it doesn't matter enough to worry about that case).
+ int leftovers = SkConvolutionFilter1D::FloatToFixed(1) - fixedSum;
+ fixedFilterValues[filterCount / 2] += leftovers;
+
+ // Now it's ready to go.
+ output->AddFilter(SkScalarFloorToInt(srcBegin), fixedFilterValues, filterCount);
+ }
+
+ if (convolveProcs.fApplySIMDPadding) {
+ convolveProcs.fApplySIMDPadding(output);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static bool valid_for_resize(const SkPixmap& source, int dstW, int dstH) {
+ // TODO: Seems like we shouldn't care about the swizzle of source, just that it's 8888
+ return source.addr() && source.colorType() == kN32_SkColorType &&
+ source.width() >= 1 && source.height() >= 1 && dstW >= 1 && dstH >= 1;
+}
+
+bool SkBitmapScaler::Resize(const SkPixmap& result, const SkPixmap& source, ResizeMethod method) {
+ if (!valid_for_resize(source, result.width(), result.height())) {
+ return false;
+ }
+ if (!result.addr() || result.colorType() != source.colorType()) {
+ return false;
+ }
+
+ SkConvolutionProcs convolveProcs= { 0, nullptr, nullptr, nullptr, nullptr };
+ PlatformConvolutionProcs(&convolveProcs);
+
+ SkRect destSubset = SkRect::MakeIWH(result.width(), result.height());
+
+ SkResizeFilter filter(method, source.width(), source.height(),
+ result.width(), result.height(), destSubset, convolveProcs);
+
+ // Get a subset encompassing this touched area. We construct the
+ // offsets and row strides such that it looks like a new bitmap, while
+ // referring to the old data.
+ const uint8_t* sourceSubset = reinterpret_cast<const uint8_t*>(source.addr());
+
+ return BGRAConvolve2D(sourceSubset, static_cast<int>(source.rowBytes()),
+ !source.isOpaque(), filter.xFilter(), filter.yFilter(),
+ static_cast<int>(result.rowBytes()),
+ static_cast<unsigned char*>(result.writable_addr()),
+ convolveProcs, true);
+}
+
+bool SkBitmapScaler::Resize(SkBitmap* resultPtr, const SkPixmap& source, ResizeMethod method,
+ int destWidth, int destHeight, SkBitmap::Allocator* allocator) {
+ // Preflight some of the checks, to avoid allocating the result if we don't need it.
+ if (!valid_for_resize(source, destWidth, destHeight)) {
+ return false;
+ }
+
+ SkBitmap result;
+ // Note: pass along the profile information even thought this is no the right answer because
+ // this could be scaling in sRGB.
+ result.setInfo(SkImageInfo::MakeN32(destWidth, destHeight, source.alphaType(),
+ sk_ref_sp(source.info().colorSpace())));
+ result.allocPixels(allocator, nullptr);
+
+ SkPixmap resultPM;
+ if (!result.peekPixels(&resultPM) || !Resize(resultPM, source, method)) {
+ return false;
+ }
+
+ *resultPtr = result;
+ resultPtr->lockPixels();
+ SkASSERT(resultPtr->getPixels());
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkBitmapScaler.h b/gfx/skia/skia/src/core/SkBitmapScaler.h
new file mode 100644
index 000000000..3d734d6ef
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapScaler.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapScaler_DEFINED
+#define SkBitmapScaler_DEFINED
+
+#include "SkBitmap.h"
+#include "SkConvolver.h"
+
+/** \class SkBitmapScaler
+
+ Provides the interface for high quality image resampling.
+ */
+
+class SK_API SkBitmapScaler {
+public:
+ enum ResizeMethod {
+ RESIZE_BOX,
+ RESIZE_TRIANGLE,
+ RESIZE_LANCZOS3,
+ RESIZE_HAMMING,
+ RESIZE_MITCHELL,
+
+ RESIZE_FirstMethod = RESIZE_BOX,
+ RESIZE_LastMethod = RESIZE_MITCHELL,
+ };
+
+ /**
+ * Given already-allocated src and dst pixmaps, this will scale the src pixels using the
+ * specified resize-method and write the results into the pixels pointed to by dst.
+ */
+ static bool Resize(const SkPixmap& dst, const SkPixmap& src, ResizeMethod method);
+
+ /**
+ * Helper function that manages allocating a bitmap to hold the dst pixels, and then calls
+ * the pixmap version of Resize.
+ */
+ static bool Resize(SkBitmap* result, const SkPixmap& src, ResizeMethod method,
+ int dest_width, int dest_height, SkBitmap::Allocator* = nullptr);
+
+ /** Platforms can also optionally overwrite the convolution functions
+ if we have SIMD versions of them.
+ */
+
+ static void PlatformConvolutionProcs(SkConvolutionProcs*);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBlendModePriv.h b/gfx/skia/skia/src/core/SkBlendModePriv.h
new file mode 100644
index 000000000..b5d9e751e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlendModePriv.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlendModePriv_DEFINED
+#define SkBlendModePriv_DEFINED
+
+#include "SkBlendMode.h"
+
+bool SkBlendMode_SupportsCoverageAsAlpha(SkBlendMode);
+
+#if SK_SUPPORT_GPU
+sk_sp<GrXPFactory> SkBlendMode_AsXPFactory(SkBlendMode);
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBlitBWMaskTemplate.h b/gfx/skia/skia/src/core/SkBlitBWMaskTemplate.h
new file mode 100644
index 000000000..b0b9358b4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitBWMaskTemplate.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkBitmap.h"
+#include "SkMask.h"
+
+#ifndef ClearLow3Bits_DEFINED
+#define ClearLow3Bits_DEFINED
+ #define ClearLow3Bits(x) ((unsigned)(x) >> 3 << 3)
+#endif
+
+/*
+ SK_BLITBWMASK_NAME name of function(const SkBitmap& bitmap, const SkMask& mask, const SkIRect& clip, SK_BLITBWMASK_ARGS)
+ SK_BLITBWMASK_ARGS list of additional arguments to SK_BLITBWMASK_NAME, beginning with a comma
+ SK_BLITBWMASK_BLIT8 name of function(U8CPU byteMask, SK_BLITBWMASK_DEVTYPE* dst, int x, int y)
+ SK_BLITBWMASK_GETADDR either writable_addr[8,16,32]
+ SK_BLITBWMASK_DEVTYPE either U32 or U16 or U8
+*/
+
+static void SK_BLITBWMASK_NAME(const SkPixmap& dst, const SkMask& srcMask,
+ const SkIRect& clip SK_BLITBWMASK_ARGS) {
+ SkASSERT(clip.fRight <= srcMask.fBounds.fRight);
+
+ int cx = clip.fLeft;
+ int cy = clip.fTop;
+ int maskLeft = srcMask.fBounds.fLeft;
+ unsigned mask_rowBytes = srcMask.fRowBytes;
+ size_t bitmap_rowBytes = dst.rowBytes();
+ unsigned height = clip.height();
+
+ SkASSERT(mask_rowBytes != 0);
+ SkASSERT(bitmap_rowBytes != 0);
+ SkASSERT(height != 0);
+
+ const uint8_t* bits = srcMask.getAddr1(cx, cy);
+ SK_BLITBWMASK_DEVTYPE* device = dst.SK_BLITBWMASK_GETADDR(cx, cy);
+
+ if (cx == maskLeft && clip.fRight == srcMask.fBounds.fRight)
+ {
+ do {
+ SK_BLITBWMASK_DEVTYPE* dst = device;
+ unsigned rb = mask_rowBytes;
+ do {
+ U8CPU mask = *bits++;
+ SK_BLITBWMASK_BLIT8(mask, dst);
+ dst += 8;
+ } while (--rb != 0);
+ device = (SK_BLITBWMASK_DEVTYPE*)((char*)device + bitmap_rowBytes);
+ } while (--height != 0);
+ }
+ else
+ {
+ int left_edge = cx - maskLeft;
+ SkASSERT(left_edge >= 0);
+ int rite_edge = clip.fRight - maskLeft;
+ SkASSERT(rite_edge > left_edge);
+
+ int left_mask = 0xFF >> (left_edge & 7);
+ int rite_mask = 0xFF << (8 - (rite_edge & 7));
+ rite_mask &= 0xFF; // only want low-8 bits of mask
+ int full_runs = (rite_edge >> 3) - ((left_edge + 7) >> 3);
+
+ // check for empty right mask, so we don't read off the end (or go slower than we need to)
+ if (rite_mask == 0)
+ {
+ SkASSERT(full_runs >= 0);
+ full_runs -= 1;
+ rite_mask = 0xFF;
+ }
+ if (left_mask == 0xFF)
+ full_runs -= 1;
+
+ // back up manually so we can keep in sync with our byte-aligned src
+ // and not trigger an assert from the getAddr## function
+ device -= left_edge & 7;
+
+ if (full_runs < 0)
+ {
+ left_mask &= rite_mask;
+ SkASSERT(left_mask != 0);
+ do {
+ U8CPU mask = *bits & left_mask;
+ SK_BLITBWMASK_BLIT8(mask, device);
+ bits += mask_rowBytes;
+ device = (SK_BLITBWMASK_DEVTYPE*)((char*)device + bitmap_rowBytes);
+ } while (--height != 0);
+ }
+ else
+ {
+ do {
+ int runs = full_runs;
+ SK_BLITBWMASK_DEVTYPE* dst = device;
+ const uint8_t* b = bits;
+ U8CPU mask;
+
+ mask = *b++ & left_mask;
+ SK_BLITBWMASK_BLIT8(mask, dst);
+ dst += 8;
+
+ while (--runs >= 0)
+ {
+ mask = *b++;
+ SK_BLITBWMASK_BLIT8(mask, dst);
+ dst += 8;
+ }
+
+ mask = *b & rite_mask;
+ SK_BLITBWMASK_BLIT8(mask, dst);
+
+ bits += mask_rowBytes;
+ device = (SK_BLITBWMASK_DEVTYPE*)((char*)device + bitmap_rowBytes);
+ } while (--height != 0);
+ }
+ }
+}
+
+#undef SK_BLITBWMASK_NAME
+#undef SK_BLITBWMASK_ARGS
+#undef SK_BLITBWMASK_BLIT8
+#undef SK_BLITBWMASK_GETADDR
+#undef SK_BLITBWMASK_DEVTYPE
+#undef SK_BLITBWMASK_DOROWSETUP
diff --git a/gfx/skia/skia/src/core/SkBlitMask.h b/gfx/skia/skia/src/core/SkBlitMask.h
new file mode 100644
index 000000000..b53ff7dfd
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitMask.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlitMask_DEFINED
+#define SkBlitMask_DEFINED
+
+#include "SkColor.h"
+#include "SkMask.h"
+#include "SkPixmap.h"
+
+class SkBlitMask {
+public:
+ /**
+ * Returns true if the device config and mask format were supported.
+ * else return false (nothing was drawn)
+ */
+ static bool BlitColor(const SkPixmap& device, const SkMask& mask,
+ const SkIRect& clip, SkColor color);
+
+ /**
+ * Function pointer that blits the mask into a device (dst) colorized
+ * by color. The number of pixels to blit is specified by width and height,
+ * but each scanline is offset by dstRB (rowbytes) and srcRB respectively.
+ */
+ typedef void (*ColorProc)(void* dst, size_t dstRB,
+ const void* mask, size_t maskRB,
+ SkColor color, int width, int height);
+
+ /**
+ * Function pointer that blits a row of mask(lcd16) into a row of dst
+ * colorized by a single color. The number of pixels to blit is specified
+ * by width.
+ */
+ typedef void (*BlitLCD16RowProc)(SkPMColor dst[], const uint16_t src[],
+ SkColor color, int width,
+ SkPMColor opaqueDst);
+
+ /**
+ * Function pointer that blits a row of src colors through a row of a mask
+ * onto a row of dst colors. The RowFactory that returns this function ptr
+ * will have been told the formats for the mask and the dst.
+ */
+ typedef void (*RowProc)(SkPMColor* dst, const void* mask,
+ const SkPMColor* src, int width);
+
+ /**
+ * Public entry-point to return a blitcolor BlitLCD16RowProc.
+ */
+ static BlitLCD16RowProc BlitLCD16RowFactory(bool isOpaque);
+
+ /**
+ * Return either platform specific optimized blitcolor BlitLCD16RowProc,
+ * or nullptr if no optimized routine is available.
+ */
+ static BlitLCD16RowProc PlatformBlitRowProcs16(bool isOpaque);
+
+ enum RowFlags {
+ kSrcIsOpaque_RowFlag = 1 << 0
+ };
+
+ /**
+ * Public entry-point to return a blitmask RowProc.
+ * May return nullptr if config or format are not supported.
+ */
+ static RowProc RowFactory(SkColorType, SkMask::Format, RowFlags);
+
+ /**
+ * Return either platform specific optimized blitmask RowProc,
+ * or nullptr if no optimized routine is available.
+ */
+ static RowProc PlatformRowProcs(SkColorType, SkMask::Format, RowFlags);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBlitMask_D32.cpp b/gfx/skia/skia/src/core/SkBlitMask_D32.cpp
new file mode 100644
index 000000000..b36fcf012
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitMask_D32.cpp
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBlitMask.h"
+#include "SkColor.h"
+#include "SkColorPriv.h"
+#include "SkOpts.h"
+
+SkBlitMask::BlitLCD16RowProc SkBlitMask::BlitLCD16RowFactory(bool isOpaque) {
+ BlitLCD16RowProc proc = PlatformBlitRowProcs16(isOpaque);
+ if (proc) {
+ return proc;
+ }
+
+ if (isOpaque) {
+ return SkBlitLCD16OpaqueRow;
+ } else {
+ return SkBlitLCD16Row;
+ }
+}
+
+static void D32_LCD16_Proc(void* SK_RESTRICT dst, size_t dstRB,
+ const void* SK_RESTRICT mask, size_t maskRB,
+ SkColor color, int width, int height) {
+
+ SkPMColor* dstRow = (SkPMColor*)dst;
+ const uint16_t* srcRow = (const uint16_t*)mask;
+ SkPMColor opaqueDst;
+
+ SkBlitMask::BlitLCD16RowProc proc = nullptr;
+ bool isOpaque = (0xFF == SkColorGetA(color));
+ proc = SkBlitMask::BlitLCD16RowFactory(isOpaque);
+ SkASSERT(proc != nullptr);
+
+ if (isOpaque) {
+ opaqueDst = SkPreMultiplyColor(color);
+ } else {
+ opaqueDst = 0; // ignored
+ }
+
+ do {
+ proc(dstRow, srcRow, color, width, opaqueDst);
+ dstRow = (SkPMColor*)((char*)dstRow + dstRB);
+ srcRow = (const uint16_t*)((const char*)srcRow + maskRB);
+ } while (--height != 0);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkBlitMask::BlitColor(const SkPixmap& device, const SkMask& mask,
+ const SkIRect& clip, SkColor color) {
+ int x = clip.fLeft, y = clip.fTop;
+
+ if (device.colorType() == kN32_SkColorType && mask.fFormat == SkMask::kA8_Format) {
+ SkOpts::blit_mask_d32_a8(device.writable_addr32(x,y), device.rowBytes(),
+ (const SkAlpha*)mask.getAddr(x,y), mask.fRowBytes,
+ color, clip.width(), clip.height());
+ return true;
+ }
+
+ if (device.colorType() == kN32_SkColorType && mask.fFormat == SkMask::kLCD16_Format) {
+ // TODO: Is this reachable code? Seems like no.
+ D32_LCD16_Proc(device.writable_addr32(x,y), device.rowBytes(),
+ mask.getAddr(x,y), mask.fRowBytes,
+ color, clip.width(), clip.height());
+ return true;
+ }
+
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+static void BW_RowProc_Blend(
+ SkPMColor* SK_RESTRICT dst, const void* maskIn, const SkPMColor* SK_RESTRICT src, int count) {
+ const uint8_t* SK_RESTRICT mask = static_cast<const uint8_t*>(maskIn);
+ int i, octuple = (count + 7) >> 3;
+ for (i = 0; i < octuple; ++i) {
+ int m = *mask++;
+ if (m & 0x80) { dst[0] = SkPMSrcOver(src[0], dst[0]); }
+ if (m & 0x40) { dst[1] = SkPMSrcOver(src[1], dst[1]); }
+ if (m & 0x20) { dst[2] = SkPMSrcOver(src[2], dst[2]); }
+ if (m & 0x10) { dst[3] = SkPMSrcOver(src[3], dst[3]); }
+ if (m & 0x08) { dst[4] = SkPMSrcOver(src[4], dst[4]); }
+ if (m & 0x04) { dst[5] = SkPMSrcOver(src[5], dst[5]); }
+ if (m & 0x02) { dst[6] = SkPMSrcOver(src[6], dst[6]); }
+ if (m & 0x01) { dst[7] = SkPMSrcOver(src[7], dst[7]); }
+ src += 8;
+ dst += 8;
+ }
+ count &= 7;
+ if (count > 0) {
+ int m = *mask;
+ do {
+ if (m & 0x80) { dst[0] = SkPMSrcOver(src[0], dst[0]); }
+ m <<= 1;
+ src += 1;
+ dst += 1;
+ } while (--count > 0);
+ }
+}
+
+static void BW_RowProc_Opaque(
+ SkPMColor* SK_RESTRICT dst, const void* maskIn, const SkPMColor* SK_RESTRICT src, int count) {
+ const uint8_t* SK_RESTRICT mask = static_cast<const uint8_t*>(maskIn);
+ int i, octuple = (count + 7) >> 3;
+ for (i = 0; i < octuple; ++i) {
+ int m = *mask++;
+ if (m & 0x80) { dst[0] = src[0]; }
+ if (m & 0x40) { dst[1] = src[1]; }
+ if (m & 0x20) { dst[2] = src[2]; }
+ if (m & 0x10) { dst[3] = src[3]; }
+ if (m & 0x08) { dst[4] = src[4]; }
+ if (m & 0x04) { dst[5] = src[5]; }
+ if (m & 0x02) { dst[6] = src[6]; }
+ if (m & 0x01) { dst[7] = src[7]; }
+ src += 8;
+ dst += 8;
+ }
+ count &= 7;
+ if (count > 0) {
+ int m = *mask;
+ do {
+ if (m & 0x80) { dst[0] = SkPMSrcOver(src[0], dst[0]); }
+ m <<= 1;
+ src += 1;
+ dst += 1;
+ } while (--count > 0);
+ }
+}
+
+static void A8_RowProc_Blend(
+ SkPMColor* SK_RESTRICT dst, const void* maskIn, const SkPMColor* SK_RESTRICT src, int count) {
+ const uint8_t* SK_RESTRICT mask = static_cast<const uint8_t*>(maskIn);
+ for (int i = 0; i < count; ++i) {
+ if (mask[i]) {
+ dst[i] = SkBlendARGB32(src[i], dst[i], mask[i]);
+ }
+ }
+}
+
+static void A8_RowProc_Opaque(
+ SkPMColor* SK_RESTRICT dst, const void* maskIn, const SkPMColor* SK_RESTRICT src, int count) {
+ const uint8_t* SK_RESTRICT mask = static_cast<const uint8_t*>(maskIn);
+ for (int i = 0; i < count; ++i) {
+ int m = mask[i];
+ if (m) {
+ m += (m >> 7);
+ dst[i] = SkPMLerp(src[i], dst[i], m);
+ }
+ }
+}
+
+static int upscale31To255(int value) {
+ value = (value << 3) | (value >> 2);
+ return value;
+}
+
+static int src_alpha_blend(int src, int dst, int srcA, int mask) {
+
+ return dst + SkAlphaMul(src - SkAlphaMul(srcA, dst), mask);
+}
+
+static void LCD16_RowProc_Blend(
+ SkPMColor* SK_RESTRICT dst, const void* maskIn, const SkPMColor* SK_RESTRICT src, int count) {
+ const uint16_t* SK_RESTRICT mask = static_cast<const uint16_t*>(maskIn);
+ for (int i = 0; i < count; ++i) {
+ uint16_t m = mask[i];
+ if (0 == m) {
+ continue;
+ }
+
+ SkPMColor s = src[i];
+ SkPMColor d = dst[i];
+
+ int srcA = SkGetPackedA32(s);
+ int srcR = SkGetPackedR32(s);
+ int srcG = SkGetPackedG32(s);
+ int srcB = SkGetPackedB32(s);
+
+ srcA += srcA >> 7;
+
+ /* We want all of these in 5bits, hence the shifts in case one of them
+ * (green) is 6bits.
+ */
+ int maskR = SkGetPackedR16(m) >> (SK_R16_BITS - 5);
+ int maskG = SkGetPackedG16(m) >> (SK_G16_BITS - 5);
+ int maskB = SkGetPackedB16(m) >> (SK_B16_BITS - 5);
+
+ maskR = upscale31To255(maskR);
+ maskG = upscale31To255(maskG);
+ maskB = upscale31To255(maskB);
+
+ int dstR = SkGetPackedR32(d);
+ int dstG = SkGetPackedG32(d);
+ int dstB = SkGetPackedB32(d);
+
+ // LCD blitting is only supported if the dst is known/required
+ // to be opaque
+ dst[i] = SkPackARGB32(0xFF,
+ src_alpha_blend(srcR, dstR, srcA, maskR),
+ src_alpha_blend(srcG, dstG, srcA, maskG),
+ src_alpha_blend(srcB, dstB, srcA, maskB));
+ }
+}
+
+static void LCD16_RowProc_Opaque(
+ SkPMColor* SK_RESTRICT dst, const void* maskIn, const SkPMColor* SK_RESTRICT src, int count) {
+ const uint16_t* SK_RESTRICT mask = static_cast<const uint16_t*>(maskIn);
+ for (int i = 0; i < count; ++i) {
+ uint16_t m = mask[i];
+ if (0 == m) {
+ continue;
+ }
+
+ SkPMColor s = src[i];
+ SkPMColor d = dst[i];
+
+ int srcR = SkGetPackedR32(s);
+ int srcG = SkGetPackedG32(s);
+ int srcB = SkGetPackedB32(s);
+
+ /* We want all of these in 5bits, hence the shifts in case one of them
+ * (green) is 6bits.
+ */
+ int maskR = SkGetPackedR16(m) >> (SK_R16_BITS - 5);
+ int maskG = SkGetPackedG16(m) >> (SK_G16_BITS - 5);
+ int maskB = SkGetPackedB16(m) >> (SK_B16_BITS - 5);
+
+ // Now upscale them to 0..32, so we can use blend32
+ maskR = SkUpscale31To32(maskR);
+ maskG = SkUpscale31To32(maskG);
+ maskB = SkUpscale31To32(maskB);
+
+ int dstR = SkGetPackedR32(d);
+ int dstG = SkGetPackedG32(d);
+ int dstB = SkGetPackedB32(d);
+
+ // LCD blitting is only supported if the dst is known/required
+ // to be opaque
+ dst[i] = SkPackARGB32(0xFF,
+ SkBlend32(srcR, dstR, maskR),
+ SkBlend32(srcG, dstG, maskG),
+ SkBlend32(srcB, dstB, maskB));
+ }
+}
+
+SkBlitMask::RowProc SkBlitMask::RowFactory(SkColorType ct,
+ SkMask::Format format,
+ RowFlags flags) {
+// make this opt-in until chrome can rebaseline
+ RowProc proc = PlatformRowProcs(ct, format, flags);
+ if (proc) {
+ return proc;
+ }
+
+ static const RowProc gProcs[] = {
+ // need X coordinate to handle BW
+ false ? (RowProc)BW_RowProc_Blend : nullptr, // suppress unused warning
+ false ? (RowProc)BW_RowProc_Opaque : nullptr, // suppress unused warning
+ (RowProc)A8_RowProc_Blend, (RowProc)A8_RowProc_Opaque,
+ (RowProc)LCD16_RowProc_Blend, (RowProc)LCD16_RowProc_Opaque,
+ };
+
+ int index;
+ switch (ct) {
+ case kN32_SkColorType:
+ switch (format) {
+ case SkMask::kBW_Format: index = 0; break;
+ case SkMask::kA8_Format: index = 2; break;
+ case SkMask::kLCD16_Format: index = 4; break;
+ default:
+ return nullptr;
+ }
+ if (flags & kSrcIsOpaque_RowFlag) {
+ index |= 1;
+ }
+ SkASSERT((size_t)index < SK_ARRAY_COUNT(gProcs));
+ return gProcs[index];
+ default:
+ break;
+ }
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkBlitRow_D16.cpp b/gfx/skia/skia/src/core/SkBlitRow_D16.cpp
new file mode 100644
index 000000000..648e0ea8b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitRow_D16.cpp
@@ -0,0 +1,281 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBlitRow.h"
+#include "SkColorPriv.h"
+#include "SkDither.h"
+#include "SkMathPriv.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void S32_D565_Opaque(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src, int count,
+ U8CPU alpha, int /*x*/, int /*y*/) {
+ SkASSERT(255 == alpha);
+
+ if (count > 0) {
+ do {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+ *dst++ = SkPixel32ToPixel16_ToU16(c);
+ } while (--count != 0);
+ }
+}
+
+static void S32_D565_Blend(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src, int count,
+ U8CPU alpha, int /*x*/, int /*y*/) {
+ SkASSERT(255 > alpha);
+
+ if (count > 0) {
+ int scale = SkAlpha255To256(alpha);
+ do {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+ uint16_t d = *dst;
+ *dst++ = SkPackRGB16(
+ SkAlphaBlend(SkPacked32ToR16(c), SkGetPackedR16(d), scale),
+ SkAlphaBlend(SkPacked32ToG16(c), SkGetPackedG16(d), scale),
+ SkAlphaBlend(SkPacked32ToB16(c), SkGetPackedB16(d), scale));
+ } while (--count != 0);
+ }
+}
+
+static void S32A_D565_Opaque(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src, int count,
+ U8CPU alpha, int /*x*/, int /*y*/) {
+ SkASSERT(255 == alpha);
+
+ if (count > 0) {
+ do {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+// if (__builtin_expect(c!=0, 1))
+ if (c) {
+ *dst = SkSrcOver32To16(c, *dst);
+ }
+ dst += 1;
+ } while (--count != 0);
+ }
+}
+
+static void S32A_D565_Blend(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src, int count,
+ U8CPU alpha, int /*x*/, int /*y*/) {
+ SkASSERT(255 > alpha);
+
+ if (count > 0) {
+ do {
+ SkPMColor sc = *src++;
+ SkPMColorAssert(sc);
+ if (sc) {
+ uint16_t dc = *dst;
+ SkPMColor res = SkBlendARGB32(sc, SkPixel16ToPixel32(dc), alpha);
+ *dst = SkPixel32ToPixel16(res);
+ }
+ dst += 1;
+ } while (--count != 0);
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+
+static void S32_D565_Opaque_Dither(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha, int x, int y) {
+ SkASSERT(255 == alpha);
+
+ if (count > 0) {
+ DITHER_565_SCAN(y);
+ do {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+
+ unsigned dither = DITHER_VALUE(x);
+ *dst++ = SkDitherRGB32To565(c, dither);
+ DITHER_INC_X(x);
+ } while (--count != 0);
+ }
+}
+
+static void S32_D565_Blend_Dither(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha, int x, int y) {
+ SkASSERT(255 > alpha);
+
+ if (count > 0) {
+ int scale = SkAlpha255To256(alpha);
+ DITHER_565_SCAN(y);
+ do {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+
+ int dither = DITHER_VALUE(x);
+ int sr = SkGetPackedR32(c);
+ int sg = SkGetPackedG32(c);
+ int sb = SkGetPackedB32(c);
+ sr = SkDITHER_R32To565(sr, dither);
+ sg = SkDITHER_G32To565(sg, dither);
+ sb = SkDITHER_B32To565(sb, dither);
+
+ uint16_t d = *dst;
+ *dst++ = SkPackRGB16(SkAlphaBlend(sr, SkGetPackedR16(d), scale),
+ SkAlphaBlend(sg, SkGetPackedG16(d), scale),
+ SkAlphaBlend(sb, SkGetPackedB16(d), scale));
+ DITHER_INC_X(x);
+ } while (--count != 0);
+ }
+}
+
+static void S32A_D565_Opaque_Dither(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha, int x, int y) {
+ SkASSERT(255 == alpha);
+
+ if (count > 0) {
+ DITHER_565_SCAN(y);
+ do {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+ if (c) {
+ unsigned a = SkGetPackedA32(c);
+
+ int d = SkAlphaMul(DITHER_VALUE(x), SkAlpha255To256(a));
+
+ unsigned sr = SkGetPackedR32(c);
+ unsigned sg = SkGetPackedG32(c);
+ unsigned sb = SkGetPackedB32(c);
+ sr = SkDITHER_R32_FOR_565(sr, d);
+ sg = SkDITHER_G32_FOR_565(sg, d);
+ sb = SkDITHER_B32_FOR_565(sb, d);
+
+ uint32_t src_expanded = (sg << 24) | (sr << 13) | (sb << 2);
+ uint32_t dst_expanded = SkExpand_rgb_16(*dst);
+ dst_expanded = dst_expanded * (SkAlpha255To256(255 - a) >> 3);
+ // now src and dst expanded are in g:11 r:10 x:1 b:10
+ *dst = SkCompact_rgb_16((src_expanded + dst_expanded) >> 5);
+ }
+ dst += 1;
+ DITHER_INC_X(x);
+ } while (--count != 0);
+ }
+}
+
+static void S32A_D565_Blend_Dither(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha, int x, int y) {
+ SkASSERT(255 > alpha);
+
+ if (count > 0) {
+ int src_scale = SkAlpha255To256(alpha);
+ DITHER_565_SCAN(y);
+ do {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+ if (c)
+ {
+ unsigned d = *dst;
+ int sa = SkGetPackedA32(c);
+ int dst_scale = SkAlphaMulInv256(sa, src_scale);
+ int dither = DITHER_VALUE(x);
+
+ int sr = SkGetPackedR32(c);
+ int sg = SkGetPackedG32(c);
+ int sb = SkGetPackedB32(c);
+ sr = SkDITHER_R32To565(sr, dither);
+ sg = SkDITHER_G32To565(sg, dither);
+ sb = SkDITHER_B32To565(sb, dither);
+
+ int dr = (sr * src_scale + SkGetPackedR16(d) * dst_scale) >> 8;
+ int dg = (sg * src_scale + SkGetPackedG16(d) * dst_scale) >> 8;
+ int db = (sb * src_scale + SkGetPackedB16(d) * dst_scale) >> 8;
+
+ *dst = SkPackRGB16(dr, dg, db);
+ }
+ dst += 1;
+ DITHER_INC_X(x);
+ } while (--count != 0);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static uint32_t pmcolor_to_expand16(SkPMColor c) {
+ unsigned r = SkGetPackedR32(c);
+ unsigned g = SkGetPackedG32(c);
+ unsigned b = SkGetPackedB32(c);
+ return (g << 24) | (r << 13) | (b << 2);
+}
+
+static void Color32A_D565(uint16_t dst[], SkPMColor src, int count, int x, int y) {
+ SkASSERT(count > 0);
+ uint32_t src_expand = pmcolor_to_expand16(src);
+ unsigned scale = SkAlpha255To256(0xFF - SkGetPackedA32(src)) >> 3;
+ do {
+ *dst = SkBlend32_RGB16(src_expand, *dst, scale);
+ dst += 1;
+ } while (--count != 0);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+static const SkBlitRow::Proc16 gDefault_565_Procs[] = {
+ // no dither
+ S32_D565_Opaque,
+ S32_D565_Blend,
+
+ S32A_D565_Opaque,
+ S32A_D565_Blend,
+
+ // dither
+ S32_D565_Opaque_Dither,
+ S32_D565_Blend_Dither,
+
+ S32A_D565_Opaque_Dither,
+ S32A_D565_Blend_Dither
+};
+
+SkBlitRow::Proc16 SkBlitRow::Factory16(unsigned flags) {
+ SkASSERT(flags < SK_ARRAY_COUNT(gDefault_565_Procs));
+ // just so we don't crash
+ flags &= kFlags16_Mask;
+
+ SkBlitRow::Proc16 proc = PlatformFactory565(flags);
+ if (nullptr == proc) {
+ proc = gDefault_565_Procs[flags];
+ }
+ return proc;
+}
+
+static const SkBlitRow::ColorProc16 gDefault_565_ColorProcs[] = {
+#if 0
+ Color32A_D565,
+ Color32A_D565_Dither
+#else
+ // TODO: stop cheating and fill dither from the above specializations!
+ Color32A_D565,
+ Color32A_D565,
+#endif
+};
+
+SkBlitRow::ColorProc16 SkBlitRow::ColorFactory16(unsigned flags) {
+ SkASSERT((flags & ~kFlags16_Mask) == 0);
+ // just so we don't crash
+ flags &= kFlags16_Mask;
+ // we ignore both kGlobalAlpha_Flag and kSrcPixelAlpha_Flag, so shift down
+ // no need for the additional code specializing on opaque alpha at this time
+ flags >>= 2;
+
+ SkASSERT(flags < SK_ARRAY_COUNT(gDefault_565_ColorProcs));
+
+ SkBlitRow::ColorProc16 proc = PlatformColorFactory565(flags);
+ if (nullptr == proc) {
+ proc = gDefault_565_ColorProcs[flags];
+ }
+ return proc;
+}
diff --git a/gfx/skia/skia/src/core/SkBlitRow_D32.cpp b/gfx/skia/skia/src/core/SkBlitRow_D32.cpp
new file mode 100644
index 000000000..9494557d9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitRow_D32.cpp
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBlitRow.h"
+#include "SkBlitMask.h"
+#include "SkColorPriv.h"
+#include "SkOpts.h"
+#include "SkUtils.h"
+
+#define UNROLL
+
+static void S32_Opaque_BlitRow32(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha) {
+ SkASSERT(255 == alpha);
+ memcpy(dst, src, count * 4);
+}
+
+static void S32_Blend_BlitRow32(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha) {
+ SkASSERT(alpha <= 255);
+ if (count > 0) {
+ unsigned src_scale = SkAlpha255To256(alpha);
+
+#ifdef UNROLL
+ if (count & 1) {
+ *dst = SkPMLerp(*src, *dst, src_scale);
+ src += 1;
+ dst += 1;
+ count -= 1;
+ }
+
+ const SkPMColor* SK_RESTRICT srcEnd = src + count;
+ while (src != srcEnd) {
+ *dst = SkPMLerp(*src, *dst, src_scale);
+ src += 1;
+ dst += 1;
+ *dst = SkPMLerp(*src, *dst, src_scale);
+ src += 1;
+ dst += 1;
+ }
+#else
+ do {
+ *dst = SkPMLerp(*src, *dst, src_scale);
+ src += 1;
+ dst += 1;
+ } while (--count > 0);
+#endif
+ }
+}
+
+static void S32A_Blend_BlitRow32(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha) {
+ SkASSERT(alpha <= 255);
+ if (count > 0) {
+#ifdef UNROLL
+ if (count & 1) {
+ *dst = SkBlendARGB32(*(src++), *dst, alpha);
+ dst += 1;
+ count -= 1;
+ }
+
+ const SkPMColor* SK_RESTRICT srcEnd = src + count;
+ while (src != srcEnd) {
+ *dst = SkBlendARGB32(*(src++), *dst, alpha);
+ dst += 1;
+ *dst = SkBlendARGB32(*(src++), *dst, alpha);
+ dst += 1;
+ }
+#else
+ do {
+ *dst = SkBlendARGB32(*src, *dst, alpha);
+ src += 1;
+ dst += 1;
+ } while (--count > 0);
+#endif
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static const SkBlitRow::Proc32 gDefault_Procs32[] = {
+ S32_Opaque_BlitRow32,
+ S32_Blend_BlitRow32,
+ nullptr,
+ S32A_Blend_BlitRow32
+};
+
+SkBlitRow::Proc32 SkBlitRow::Factory32(unsigned flags) {
+ SkASSERT(flags < SK_ARRAY_COUNT(gDefault_Procs32));
+ // just so we don't crash
+ flags &= kFlags32_Mask;
+
+ if (flags == 2) {
+ // S32A_Opaque_BlitRow32 has been ported to SkOpts, but not the others yet.
+ return SkOpts::blit_row_s32a_opaque;
+ }
+
+ SkBlitRow::Proc32 proc = PlatformProcs32(flags);
+ if (nullptr == proc) {
+ proc = gDefault_Procs32[flags];
+ }
+ SkASSERT(proc);
+ return proc;
+}
+
+void SkBlitRow::Color32(SkPMColor dst[], const SkPMColor src[], int count, SkPMColor color) {
+ switch (SkGetPackedA32(color)) {
+ case 0: memmove(dst, src, count * sizeof(SkPMColor)); return;
+ case 255: sk_memset32(dst, color, count); return;
+ }
+ return SkOpts::blit_row_color32(dst, src, count, color);
+}
diff --git a/gfx/skia/skia/src/core/SkBlitter.cpp b/gfx/skia/skia/src/core/SkBlitter.cpp
new file mode 100644
index 000000000..ce689d7e8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitter.cpp
@@ -0,0 +1,1018 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBlitter.h"
+#include "SkAntiRun.h"
+#include "SkColor.h"
+#include "SkColorFilter.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+#include "SkMask.h"
+#include "SkMaskFilter.h"
+#include "SkString.h"
+#include "SkTLazy.h"
+#include "SkUtils.h"
+#include "SkXfermode.h"
+#include "SkXfermodeInterpretation.h"
+
+// define this for testing srgb blits
+//#define SK_FORCE_PM4f_FOR_L32_BLITS
+
+SkBlitter::~SkBlitter() {}
+
+bool SkBlitter::isNullBlitter() const { return false; }
+
+bool SkBlitter::resetShaderContext(const SkShader::ContextRec&) {
+ return true;
+}
+
+SkShader::Context* SkBlitter::getShaderContext() const {
+ return nullptr;
+}
+
+const SkPixmap* SkBlitter::justAnOpaqueColor(uint32_t* value) {
+ return nullptr;
+}
+
+/*
+void SkBlitter::blitH(int x, int y, int width) {
+ SkDEBUGFAIL("unimplemented");
+}
+
+
+void SkBlitter::blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) {
+ SkDEBUGFAIL("unimplemented");
+}
+ */
+
+void SkBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ if (alpha == 255) {
+ this->blitRect(x, y, 1, height);
+ } else {
+ int16_t runs[2];
+ runs[0] = 1;
+ runs[1] = 0;
+
+ while (--height >= 0) {
+ this->blitAntiH(x, y++, &alpha, runs);
+ }
+ }
+}
+
+void SkBlitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(width > 0);
+ while (--height >= 0) {
+ this->blitH(x, y++, width);
+ }
+}
+
+/// Default implementation doesn't check for any easy optimizations
+/// such as alpha == 0 or 255; also uses blitV(), which some subclasses
+/// may not support.
+void SkBlitter::blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) {
+ this->blitV(x++, y, height, leftAlpha);
+ if (width > 0) {
+ this->blitRect(x, y, width, height);
+ x += width;
+ }
+ this->blitV(x, y, height, rightAlpha);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+static inline void bits_to_runs(SkBlitter* blitter, int x, int y,
+ const uint8_t bits[],
+ uint8_t left_mask, ptrdiff_t rowBytes,
+ uint8_t right_mask) {
+ int inFill = 0;
+ int pos = 0;
+
+ while (--rowBytes >= 0) {
+ uint8_t b = *bits++ & left_mask;
+ if (rowBytes == 0) {
+ b &= right_mask;
+ }
+
+ for (uint8_t test = 0x80U; test != 0; test >>= 1) {
+ if (b & test) {
+ if (!inFill) {
+ pos = x;
+ inFill = true;
+ }
+ } else {
+ if (inFill) {
+ blitter->blitH(pos, y, x - pos);
+ inFill = false;
+ }
+ }
+ x += 1;
+ }
+ left_mask = 0xFFU;
+ }
+
+ // final cleanup
+ if (inFill) {
+ blitter->blitH(pos, y, x - pos);
+ }
+}
+
+// maskBitCount is the number of 1's to place in the mask. It must be in the range between 1 and 8.
+static uint8_t generate_right_mask(int maskBitCount) {
+ return static_cast<uint8_t>(0xFF00U >> maskBitCount);
+}
+
+void SkBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ SkASSERT(mask.fBounds.contains(clip));
+
+ if (mask.fFormat == SkMask::kLCD16_Format) {
+ return; // needs to be handled by subclass
+ }
+
+ if (mask.fFormat == SkMask::kBW_Format) {
+ int cx = clip.fLeft;
+ int cy = clip.fTop;
+ int maskLeft = mask.fBounds.fLeft;
+ int maskRowBytes = mask.fRowBytes;
+ int height = clip.height();
+
+ const uint8_t* bits = mask.getAddr1(cx, cy);
+
+ SkDEBUGCODE(const uint8_t* endOfImage =
+ mask.fImage + (mask.fBounds.height() - 1) * maskRowBytes
+ + ((mask.fBounds.width() + 7) >> 3));
+
+ if (cx == maskLeft && clip.fRight == mask.fBounds.fRight) {
+ while (--height >= 0) {
+ int affectedRightBit = mask.fBounds.width() - 1;
+ ptrdiff_t rowBytes = (affectedRightBit >> 3) + 1;
+ SkASSERT(bits + rowBytes <= endOfImage);
+ U8CPU rightMask = generate_right_mask((affectedRightBit & 7) + 1);
+ bits_to_runs(this, cx, cy, bits, 0xFF, rowBytes, rightMask);
+ bits += maskRowBytes;
+ cy += 1;
+ }
+ } else {
+ // Bits is calculated as the offset into the mask at the point {cx, cy} therefore, all
+ // addressing into the bit mask is relative to that point. Since this is an address
+ // calculated from a arbitrary bit in that byte, calculate the left most bit.
+ int bitsLeft = cx - ((cx - maskLeft) & 7);
+
+ // Everything is relative to the bitsLeft.
+ int leftEdge = cx - bitsLeft;
+ SkASSERT(leftEdge >= 0);
+ int rightEdge = clip.fRight - bitsLeft;
+ SkASSERT(rightEdge > leftEdge);
+
+ // Calculate left byte and mask
+ const uint8_t* leftByte = bits;
+ U8CPU leftMask = 0xFFU >> (leftEdge & 7);
+
+ // Calculate right byte and mask
+ int affectedRightBit = rightEdge - 1;
+ const uint8_t* rightByte = bits + (affectedRightBit >> 3);
+ U8CPU rightMask = generate_right_mask((affectedRightBit & 7) + 1);
+
+ // leftByte and rightByte are byte locations therefore, to get a count of bytes the
+ // code must add one.
+ ptrdiff_t rowBytes = rightByte - leftByte + 1;
+
+ while (--height >= 0) {
+ SkASSERT(bits + rowBytes <= endOfImage);
+ bits_to_runs(this, bitsLeft, cy, bits, leftMask, rowBytes, rightMask);
+ bits += maskRowBytes;
+ cy += 1;
+ }
+ }
+ } else {
+ int width = clip.width();
+ SkAutoSTMalloc<64, int16_t> runStorage(width + 1);
+ int16_t* runs = runStorage.get();
+ const uint8_t* aa = mask.getAddr8(clip.fLeft, clip.fTop);
+
+ sk_memset16((uint16_t*)runs, 1, width);
+ runs[width] = 0;
+
+ int height = clip.height();
+ int y = clip.fTop;
+ while (--height >= 0) {
+ this->blitAntiH(clip.fLeft, y, aa, runs);
+ aa += mask.fRowBytes;
+ y += 1;
+ }
+ }
+}
+
+/////////////////////// these guys are not virtual, just a helpers
+
+void SkBlitter::blitMaskRegion(const SkMask& mask, const SkRegion& clip) {
+ if (clip.quickReject(mask.fBounds)) {
+ return;
+ }
+
+ SkRegion::Cliperator clipper(clip, mask.fBounds);
+
+ while (!clipper.done()) {
+ const SkIRect& cr = clipper.rect();
+ this->blitMask(mask, cr);
+ clipper.next();
+ }
+}
+
+void SkBlitter::blitRectRegion(const SkIRect& rect, const SkRegion& clip) {
+ SkRegion::Cliperator clipper(clip, rect);
+
+ while (!clipper.done()) {
+ const SkIRect& cr = clipper.rect();
+ this->blitRect(cr.fLeft, cr.fTop, cr.width(), cr.height());
+ clipper.next();
+ }
+}
+
+void SkBlitter::blitRegion(const SkRegion& clip) {
+ SkRegion::Iterator iter(clip);
+
+ while (!iter.done()) {
+ const SkIRect& cr = iter.rect();
+ this->blitRect(cr.fLeft, cr.fTop, cr.width(), cr.height());
+ iter.next();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkNullBlitter::blitH(int x, int y, int width) {}
+
+void SkNullBlitter::blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) {}
+
+void SkNullBlitter::blitV(int x, int y, int height, SkAlpha alpha) {}
+
+void SkNullBlitter::blitRect(int x, int y, int width, int height) {}
+
+void SkNullBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {}
+
+const SkPixmap* SkNullBlitter::justAnOpaqueColor(uint32_t* value) {
+ return nullptr;
+}
+
+bool SkNullBlitter::isNullBlitter() const { return true; }
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int compute_anti_width(const int16_t runs[]) {
+ int width = 0;
+
+ for (;;) {
+ int count = runs[0];
+
+ SkASSERT(count >= 0);
+ if (count == 0) {
+ break;
+ }
+ width += count;
+ runs += count;
+ }
+ return width;
+}
+
+static inline bool y_in_rect(int y, const SkIRect& rect) {
+ return (unsigned)(y - rect.fTop) < (unsigned)rect.height();
+}
+
+static inline bool x_in_rect(int x, const SkIRect& rect) {
+ return (unsigned)(x - rect.fLeft) < (unsigned)rect.width();
+}
+
+void SkRectClipBlitter::blitH(int left, int y, int width) {
+ SkASSERT(width > 0);
+
+ if (!y_in_rect(y, fClipRect)) {
+ return;
+ }
+
+ int right = left + width;
+
+ if (left < fClipRect.fLeft) {
+ left = fClipRect.fLeft;
+ }
+ if (right > fClipRect.fRight) {
+ right = fClipRect.fRight;
+ }
+
+ width = right - left;
+ if (width > 0) {
+ fBlitter->blitH(left, y, width);
+ }
+}
+
+void SkRectClipBlitter::blitAntiH(int left, int y, const SkAlpha aa[],
+ const int16_t runs[]) {
+ if (!y_in_rect(y, fClipRect) || left >= fClipRect.fRight) {
+ return;
+ }
+
+ int x0 = left;
+ int x1 = left + compute_anti_width(runs);
+
+ if (x1 <= fClipRect.fLeft) {
+ return;
+ }
+
+ SkASSERT(x0 < x1);
+ if (x0 < fClipRect.fLeft) {
+ int dx = fClipRect.fLeft - x0;
+ SkAlphaRuns::BreakAt((int16_t*)runs, (uint8_t*)aa, dx);
+ runs += dx;
+ aa += dx;
+ x0 = fClipRect.fLeft;
+ }
+
+ SkASSERT(x0 < x1 && runs[x1 - x0] == 0);
+ if (x1 > fClipRect.fRight) {
+ x1 = fClipRect.fRight;
+ SkAlphaRuns::BreakAt((int16_t*)runs, (uint8_t*)aa, x1 - x0);
+ ((int16_t*)runs)[x1 - x0] = 0;
+ }
+
+ SkASSERT(x0 < x1 && runs[x1 - x0] == 0);
+ SkASSERT(compute_anti_width(runs) == x1 - x0);
+
+ fBlitter->blitAntiH(x0, y, aa, runs);
+}
+
+void SkRectClipBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ SkASSERT(height > 0);
+
+ if (!x_in_rect(x, fClipRect)) {
+ return;
+ }
+
+ int y0 = y;
+ int y1 = y + height;
+
+ if (y0 < fClipRect.fTop) {
+ y0 = fClipRect.fTop;
+ }
+ if (y1 > fClipRect.fBottom) {
+ y1 = fClipRect.fBottom;
+ }
+
+ if (y0 < y1) {
+ fBlitter->blitV(x, y0, y1 - y0, alpha);
+ }
+}
+
+void SkRectClipBlitter::blitRect(int left, int y, int width, int height) {
+ SkIRect r;
+
+ r.set(left, y, left + width, y + height);
+ if (r.intersect(fClipRect)) {
+ fBlitter->blitRect(r.fLeft, r.fTop, r.width(), r.height());
+ }
+}
+
+void SkRectClipBlitter::blitAntiRect(int left, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) {
+ SkIRect r;
+
+ // The *true* width of the rectangle blitted is width+2:
+ r.set(left, y, left + width + 2, y + height);
+ if (r.intersect(fClipRect)) {
+ if (r.fLeft != left) {
+ SkASSERT(r.fLeft > left);
+ leftAlpha = 255;
+ }
+ if (r.fRight != left + width + 2) {
+ SkASSERT(r.fRight < left + width + 2);
+ rightAlpha = 255;
+ }
+ if (255 == leftAlpha && 255 == rightAlpha) {
+ fBlitter->blitRect(r.fLeft, r.fTop, r.width(), r.height());
+ } else if (1 == r.width()) {
+ if (r.fLeft == left) {
+ fBlitter->blitV(r.fLeft, r.fTop, r.height(), leftAlpha);
+ } else {
+ SkASSERT(r.fLeft == left + width + 1);
+ fBlitter->blitV(r.fLeft, r.fTop, r.height(), rightAlpha);
+ }
+ } else {
+ fBlitter->blitAntiRect(r.fLeft, r.fTop, r.width() - 2, r.height(),
+ leftAlpha, rightAlpha);
+ }
+ }
+}
+
+void SkRectClipBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ SkASSERT(mask.fBounds.contains(clip));
+
+ SkIRect r = clip;
+
+ if (r.intersect(fClipRect)) {
+ fBlitter->blitMask(mask, r);
+ }
+}
+
+const SkPixmap* SkRectClipBlitter::justAnOpaqueColor(uint32_t* value) {
+ return fBlitter->justAnOpaqueColor(value);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkRgnClipBlitter::blitH(int x, int y, int width) {
+ SkRegion::Spanerator span(*fRgn, y, x, x + width);
+ int left, right;
+
+ while (span.next(&left, &right)) {
+ SkASSERT(left < right);
+ fBlitter->blitH(left, y, right - left);
+ }
+}
+
+void SkRgnClipBlitter::blitAntiH(int x, int y, const SkAlpha aa[],
+ const int16_t runs[]) {
+ int width = compute_anti_width(runs);
+ SkRegion::Spanerator span(*fRgn, y, x, x + width);
+ int left, right;
+ SkDEBUGCODE(const SkIRect& bounds = fRgn->getBounds();)
+
+ int prevRite = x;
+ while (span.next(&left, &right)) {
+ SkASSERT(x <= left);
+ SkASSERT(left < right);
+ SkASSERT(left >= bounds.fLeft && right <= bounds.fRight);
+
+ SkAlphaRuns::Break((int16_t*)runs, (uint8_t*)aa, left - x, right - left);
+
+ // now zero before left
+ if (left > prevRite) {
+ int index = prevRite - x;
+ ((uint8_t*)aa)[index] = 0; // skip runs after right
+ ((int16_t*)runs)[index] = SkToS16(left - prevRite);
+ }
+
+ prevRite = right;
+ }
+
+ if (prevRite > x) {
+ ((int16_t*)runs)[prevRite - x] = 0;
+
+ if (x < 0) {
+ int skip = runs[0];
+ SkASSERT(skip >= -x);
+ aa += skip;
+ runs += skip;
+ x += skip;
+ }
+ fBlitter->blitAntiH(x, y, aa, runs);
+ }
+}
+
+void SkRgnClipBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ SkIRect bounds;
+ bounds.set(x, y, x + 1, y + height);
+
+ SkRegion::Cliperator iter(*fRgn, bounds);
+
+ while (!iter.done()) {
+ const SkIRect& r = iter.rect();
+ SkASSERT(bounds.contains(r));
+
+ fBlitter->blitV(x, r.fTop, r.height(), alpha);
+ iter.next();
+ }
+}
+
+void SkRgnClipBlitter::blitRect(int x, int y, int width, int height) {
+ SkIRect bounds;
+ bounds.set(x, y, x + width, y + height);
+
+ SkRegion::Cliperator iter(*fRgn, bounds);
+
+ while (!iter.done()) {
+ const SkIRect& r = iter.rect();
+ SkASSERT(bounds.contains(r));
+
+ fBlitter->blitRect(r.fLeft, r.fTop, r.width(), r.height());
+ iter.next();
+ }
+}
+
+void SkRgnClipBlitter::blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) {
+ // The *true* width of the rectangle to blit is width + 2
+ SkIRect bounds;
+ bounds.set(x, y, x + width + 2, y + height);
+
+ SkRegion::Cliperator iter(*fRgn, bounds);
+
+ while (!iter.done()) {
+ const SkIRect& r = iter.rect();
+ SkASSERT(bounds.contains(r));
+ SkASSERT(r.fLeft >= x);
+ SkASSERT(r.fRight <= x + width + 2);
+
+ SkAlpha effectiveLeftAlpha = (r.fLeft == x) ? leftAlpha : 255;
+ SkAlpha effectiveRightAlpha = (r.fRight == x + width + 2) ?
+ rightAlpha : 255;
+
+ if (255 == effectiveLeftAlpha && 255 == effectiveRightAlpha) {
+ fBlitter->blitRect(r.fLeft, r.fTop, r.width(), r.height());
+ } else if (1 == r.width()) {
+ if (r.fLeft == x) {
+ fBlitter->blitV(r.fLeft, r.fTop, r.height(),
+ effectiveLeftAlpha);
+ } else {
+ SkASSERT(r.fLeft == x + width + 1);
+ fBlitter->blitV(r.fLeft, r.fTop, r.height(),
+ effectiveRightAlpha);
+ }
+ } else {
+ fBlitter->blitAntiRect(r.fLeft, r.fTop, r.width() - 2, r.height(),
+ effectiveLeftAlpha, effectiveRightAlpha);
+ }
+ iter.next();
+ }
+}
+
+
+void SkRgnClipBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ SkASSERT(mask.fBounds.contains(clip));
+
+ SkRegion::Cliperator iter(*fRgn, clip);
+ const SkIRect& r = iter.rect();
+ SkBlitter* blitter = fBlitter;
+
+ while (!iter.done()) {
+ blitter->blitMask(mask, r);
+ iter.next();
+ }
+}
+
+const SkPixmap* SkRgnClipBlitter::justAnOpaqueColor(uint32_t* value) {
+ return fBlitter->justAnOpaqueColor(value);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkBlitter* SkBlitterClipper::apply(SkBlitter* blitter, const SkRegion* clip,
+ const SkIRect* ir) {
+ if (clip) {
+ const SkIRect& clipR = clip->getBounds();
+
+ if (clip->isEmpty() || (ir && !SkIRect::Intersects(clipR, *ir))) {
+ blitter = &fNullBlitter;
+ } else if (clip->isRect()) {
+ if (ir == nullptr || !clipR.contains(*ir)) {
+ fRectBlitter.init(blitter, clipR);
+ blitter = &fRectBlitter;
+ }
+ } else {
+ fRgnBlitter.init(blitter, clip);
+ blitter = &fRgnBlitter;
+ }
+ }
+ return blitter;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkColorShader.h"
+#include "SkColorPriv.h"
+
+class Sk3DShader : public SkShader {
+public:
+ Sk3DShader(sk_sp<SkShader> proxy) : fProxy(std::move(proxy)) {}
+
+ size_t onContextSize(const ContextRec& rec) const override {
+ size_t size = sizeof(Sk3DShaderContext);
+ if (fProxy) {
+ size += fProxy->contextSize(rec);
+ }
+ return size;
+ }
+
+ Context* onCreateContext(const ContextRec& rec, void* storage) const override {
+ SkShader::Context* proxyContext = nullptr;
+ if (fProxy) {
+ char* proxyContextStorage = (char*) storage + sizeof(Sk3DShaderContext);
+ proxyContext = fProxy->createContext(rec, proxyContextStorage);
+ if (!proxyContext) {
+ return nullptr;
+ }
+ }
+ return new (storage) Sk3DShaderContext(*this, rec, proxyContext);
+ }
+
+ class Sk3DShaderContext : public SkShader::Context {
+ public:
+ // Calls proxyContext's destructor but will NOT free its memory.
+ Sk3DShaderContext(const Sk3DShader& shader, const ContextRec& rec,
+ SkShader::Context* proxyContext)
+ : INHERITED(shader, rec)
+ , fMask(nullptr)
+ , fProxyContext(proxyContext)
+ {
+ if (!fProxyContext) {
+ fPMColor = SkPreMultiplyColor(rec.fPaint->getColor());
+ }
+ }
+
+ virtual ~Sk3DShaderContext() {
+ if (fProxyContext) {
+ fProxyContext->~Context();
+ }
+ }
+
+ void set3DMask(const SkMask* mask) override { fMask = mask; }
+
+ void shadeSpan(int x, int y, SkPMColor span[], int count) override {
+ if (fProxyContext) {
+ fProxyContext->shadeSpan(x, y, span, count);
+ }
+
+ if (fMask == nullptr) {
+ if (fProxyContext == nullptr) {
+ sk_memset32(span, fPMColor, count);
+ }
+ return;
+ }
+
+ SkASSERT(fMask->fBounds.contains(x, y));
+ SkASSERT(fMask->fBounds.contains(x + count - 1, y));
+
+ size_t size = fMask->computeImageSize();
+ const uint8_t* alpha = fMask->getAddr8(x, y);
+ const uint8_t* mulp = alpha + size;
+ const uint8_t* addp = mulp + size;
+
+ if (fProxyContext) {
+ for (int i = 0; i < count; i++) {
+ if (alpha[i]) {
+ SkPMColor c = span[i];
+ if (c) {
+ unsigned a = SkGetPackedA32(c);
+ unsigned r = SkGetPackedR32(c);
+ unsigned g = SkGetPackedG32(c);
+ unsigned b = SkGetPackedB32(c);
+
+ unsigned mul = SkAlpha255To256(mulp[i]);
+ unsigned add = addp[i];
+
+ r = SkFastMin32(SkAlphaMul(r, mul) + add, a);
+ g = SkFastMin32(SkAlphaMul(g, mul) + add, a);
+ b = SkFastMin32(SkAlphaMul(b, mul) + add, a);
+
+ span[i] = SkPackARGB32(a, r, g, b);
+ }
+ } else {
+ span[i] = 0;
+ }
+ }
+ } else { // color
+ unsigned a = SkGetPackedA32(fPMColor);
+ unsigned r = SkGetPackedR32(fPMColor);
+ unsigned g = SkGetPackedG32(fPMColor);
+ unsigned b = SkGetPackedB32(fPMColor);
+ for (int i = 0; i < count; i++) {
+ if (alpha[i]) {
+ unsigned mul = SkAlpha255To256(mulp[i]);
+ unsigned add = addp[i];
+
+ span[i] = SkPackARGB32( a,
+ SkFastMin32(SkAlphaMul(r, mul) + add, a),
+ SkFastMin32(SkAlphaMul(g, mul) + add, a),
+ SkFastMin32(SkAlphaMul(b, mul) + add, a));
+ } else {
+ span[i] = 0;
+ }
+ }
+ }
+ }
+
+ private:
+ // Unowned.
+ const SkMask* fMask;
+ // Memory is unowned, but we need to call the destructor.
+ SkShader::Context* fProxyContext;
+ SkPMColor fPMColor;
+
+ typedef SkShader::Context INHERITED;
+ };
+
+#ifndef SK_IGNORE_TO_STRING
+ void toString(SkString* str) const override {
+ str->append("Sk3DShader: (");
+
+ if (fProxy) {
+ str->append("Proxy: ");
+ fProxy->toString(str);
+ }
+
+ this->INHERITED::toString(str);
+
+ str->append(")");
+ }
+#endif
+
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(Sk3DShader)
+
+protected:
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.writeFlattenable(fProxy.get());
+ }
+
+private:
+ sk_sp<SkShader> fProxy;
+
+ typedef SkShader INHERITED;
+};
+
+sk_sp<SkFlattenable> Sk3DShader::CreateProc(SkReadBuffer& buffer) {
+ return sk_make_sp<Sk3DShader>(buffer.readShader());
+}
+
+class Sk3DBlitter : public SkBlitter {
+public:
+ Sk3DBlitter(SkBlitter* proxy, SkShader::Context* shaderContext)
+ : fProxy(proxy)
+ , fShaderContext(shaderContext)
+ {}
+
+ void blitH(int x, int y, int width) override {
+ fProxy->blitH(x, y, width);
+ }
+
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override {
+ fProxy->blitAntiH(x, y, antialias, runs);
+ }
+
+ void blitV(int x, int y, int height, SkAlpha alpha) override {
+ fProxy->blitV(x, y, height, alpha);
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ fProxy->blitRect(x, y, width, height);
+ }
+
+ void blitMask(const SkMask& mask, const SkIRect& clip) override {
+ if (mask.fFormat == SkMask::k3D_Format) {
+ fShaderContext->set3DMask(&mask);
+
+ ((SkMask*)&mask)->fFormat = SkMask::kA8_Format;
+ fProxy->blitMask(mask, clip);
+ ((SkMask*)&mask)->fFormat = SkMask::k3D_Format;
+
+ fShaderContext->set3DMask(nullptr);
+ } else {
+ fProxy->blitMask(mask, clip);
+ }
+ }
+
+private:
+ // Both pointers are unowned. They will be deleted by SkSmallAllocator.
+ SkBlitter* fProxy;
+ SkShader::Context* fShaderContext;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkCoreBlitters.h"
+
+SkShader::ContextRec::DstType SkBlitter::PreferredShaderDest(const SkImageInfo& dstInfo) {
+#ifdef SK_FORCE_PM4f_FOR_L32_BLITS
+ return SkShader::ContextRec::kPM4f_DstType;
+#else
+ return (dstInfo.gammaCloseToSRGB() || dstInfo.colorType() == kRGBA_F16_SkColorType)
+ ? SkShader::ContextRec::kPM4f_DstType
+ : SkShader::ContextRec::kPMColor_DstType;
+#endif
+}
+
+SkBlitter* SkBlitter::Choose(const SkPixmap& device,
+ const SkMatrix& matrix,
+ const SkPaint& origPaint,
+ SkTBlitterAllocator* allocator,
+ bool drawCoverage) {
+ SkASSERT(allocator != nullptr);
+
+ // which check, in case we're being called by a client with a dummy device
+ // (e.g. they have a bounder that always aborts the draw)
+ if (kUnknown_SkColorType == device.colorType() ||
+ (drawCoverage && (kAlpha_8_SkColorType != device.colorType()))) {
+ return allocator->createT<SkNullBlitter>();
+ }
+
+ SkShader* shader = origPaint.getShader();
+ SkColorFilter* cf = origPaint.getColorFilter();
+ SkBlendMode mode = origPaint.getBlendMode();
+ sk_sp<Sk3DShader> shader3D;
+
+ SkTCopyOnFirstWrite<SkPaint> paint(origPaint);
+
+ if (origPaint.getMaskFilter() != nullptr &&
+ origPaint.getMaskFilter()->getFormat() == SkMask::k3D_Format) {
+ shader3D = sk_make_sp<Sk3DShader>(sk_ref_sp(shader));
+ // we know we haven't initialized lazyPaint yet, so just do it
+ paint.writable()->setShader(shader3D);
+ shader = shader3D.get();
+ }
+
+ if (mode != SkBlendMode::kSrcOver) {
+ bool deviceIsOpaque = kRGB_565_SkColorType == device.colorType();
+ switch (SkInterpretXfermode(*paint, deviceIsOpaque)) {
+ case kSrcOver_SkXfermodeInterpretation:
+ mode = SkBlendMode::kSrcOver;
+ paint.writable()->setBlendMode(mode);
+ break;
+ case kSkipDrawing_SkXfermodeInterpretation:{
+ return allocator->createT<SkNullBlitter>();
+ }
+ default:
+ break;
+ }
+ }
+
+ /*
+ * If the xfermode is CLEAR, then we can completely ignore the installed
+ * color/shader/colorfilter, and just pretend we're SRC + color==0. This
+ * will fall into our optimizations for SRC mode.
+ */
+ if (mode == SkBlendMode::kClear) {
+ SkPaint* p = paint.writable();
+ p->setShader(nullptr);
+ shader = nullptr;
+ p->setColorFilter(nullptr);
+ cf = nullptr;
+ p->setBlendMode(mode = SkBlendMode::kSrc);
+ p->setColor(0);
+ }
+
+ if (SkBlitter* blitter = SkCreateRasterPipelineBlitter(device, *paint, allocator)) {
+ return blitter;
+ }
+
+ if (nullptr == shader) {
+ if (mode != SkBlendMode::kSrcOver) {
+ // xfermodes (and filters) require shaders for our current blitters
+ paint.writable()->setShader(SkShader::MakeColorShader(paint->getColor()));
+ paint.writable()->setAlpha(0xFF);
+ shader = paint->getShader();
+ } else if (cf) {
+ // if no shader && no xfermode, we just apply the colorfilter to
+ // our color and move on.
+ SkPaint* writablePaint = paint.writable();
+ writablePaint->setColor(cf->filterColor(paint->getColor()));
+ writablePaint->setColorFilter(nullptr);
+ cf = nullptr;
+ }
+ }
+
+ if (cf) {
+ SkASSERT(shader);
+ paint.writable()->setShader(shader->makeWithColorFilter(sk_ref_sp(cf)));
+ shader = paint->getShader();
+ // blitters should ignore the presence/absence of a filter, since
+ // if there is one, the shader will take care of it.
+ }
+
+ /*
+ * We create a SkShader::Context object, and store it on the blitter.
+ */
+ SkShader::Context* shaderContext = nullptr;
+ if (shader) {
+ const SkShader::ContextRec rec(*paint, matrix, nullptr,
+ PreferredShaderDest(device.info()));
+ size_t contextSize = shader->contextSize(rec);
+ if (contextSize) {
+ // Try to create the ShaderContext
+ void* storage = allocator->reserveT<SkShader::Context>(contextSize);
+ shaderContext = shader->createContext(rec, storage);
+ if (!shaderContext) {
+ allocator->freeLast();
+ return allocator->createT<SkNullBlitter>();
+ }
+ SkASSERT(shaderContext);
+ SkASSERT((void*) shaderContext == storage);
+ } else {
+ return allocator->createT<SkNullBlitter>();
+ }
+ }
+
+ SkBlitter* blitter = nullptr;
+ switch (device.colorType()) {
+ case kAlpha_8_SkColorType:
+ if (drawCoverage) {
+ SkASSERT(nullptr == shader);
+ SkASSERT(paint->isSrcOver());
+ blitter = allocator->createT<SkA8_Coverage_Blitter>(device, *paint);
+ } else if (shader) {
+ blitter = allocator->createT<SkA8_Shader_Blitter>(device, *paint, shaderContext);
+ } else {
+ blitter = allocator->createT<SkA8_Blitter>(device, *paint);
+ }
+ break;
+
+ case kRGB_565_SkColorType:
+ blitter = SkBlitter_ChooseD565(device, *paint, shaderContext, allocator);
+ break;
+
+ case kN32_SkColorType:
+#ifdef SK_FORCE_PM4f_FOR_L32_BLITS
+ if (true)
+#else
+ if (device.info().gammaCloseToSRGB())
+#endif
+ {
+ blitter = SkBlitter_ARGB32_Create(device, *paint, shaderContext, allocator);
+ } else {
+ if (shader) {
+ blitter = allocator->createT<SkARGB32_Shader_Blitter>(
+ device, *paint, shaderContext);
+ } else if (paint->getColor() == SK_ColorBLACK) {
+ blitter = allocator->createT<SkARGB32_Black_Blitter>(device, *paint);
+ } else if (paint->getAlpha() == 0xFF) {
+ blitter = allocator->createT<SkARGB32_Opaque_Blitter>(device, *paint);
+ } else {
+ blitter = allocator->createT<SkARGB32_Blitter>(device, *paint);
+ }
+ }
+ break;
+
+ case kRGBA_F16_SkColorType:
+ blitter = SkBlitter_F16_Create(device, *paint, shaderContext, allocator);
+ break;
+
+ default:
+ break;
+ }
+
+ if (!blitter) {
+ blitter = allocator->createT<SkNullBlitter>();
+ }
+
+ if (shader3D) {
+ SkBlitter* innerBlitter = blitter;
+ // innerBlitter was allocated by allocator, which will delete it.
+ // We know shaderContext or its proxies is of type Sk3DShaderContext, so we need to
+ // wrapper the blitter to notify it when we see an emboss mask.
+ blitter = allocator->createT<Sk3DBlitter>(innerBlitter, shaderContext);
+ }
+ return blitter;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkZeroShaderContext : public SkShader::Context {
+public:
+ SkZeroShaderContext(const SkShader& shader, const SkShader::ContextRec& rec)
+ // Override rec with the identity matrix, so it is guaranteed to be invertible.
+ : INHERITED(shader, SkShader::ContextRec(*rec.fPaint, SkMatrix::I(), nullptr,
+ rec.fPreferredDstType)) {}
+
+ void shadeSpan(int x, int y, SkPMColor colors[], int count) override {
+ sk_bzero(colors, count * sizeof(SkPMColor));
+ }
+
+private:
+ typedef SkShader::Context INHERITED;
+};
+
+SkShaderBlitter::SkShaderBlitter(const SkPixmap& device, const SkPaint& paint,
+ SkShader::Context* shaderContext)
+ : INHERITED(device)
+ , fShader(paint.getShader())
+ , fShaderContext(shaderContext) {
+ SkASSERT(fShader);
+ SkASSERT(fShaderContext);
+
+ fShader->ref();
+ fShaderFlags = fShaderContext->getFlags();
+ fConstInY = SkToBool(fShaderFlags & SkShader::kConstInY32_Flag);
+}
+
+SkShaderBlitter::~SkShaderBlitter() {
+ fShader->unref();
+}
+
+bool SkShaderBlitter::resetShaderContext(const SkShader::ContextRec& rec) {
+ // Only destroy the old context if we have a new one. We need to ensure to have a
+ // live context in fShaderContext because the storage is owned by an SkSmallAllocator
+ // outside of this class.
+ // The new context will be of the same size as the old one because we use the same
+ // shader to create it. It is therefore safe to re-use the storage.
+ fShaderContext->~Context();
+ SkShader::Context* ctx = fShader->createContext(rec, (void*)fShaderContext);
+ if (nullptr == ctx) {
+ // Need a valid context in fShaderContext's storage, so we can later (or our caller) call
+ // the in-place destructor.
+ new (fShaderContext) SkZeroShaderContext(*fShader, rec);
+ return false;
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkBlitter.h b/gfx/skia/skia/src/core/SkBlitter.h
new file mode 100644
index 000000000..0e5fedd7e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitter.h
@@ -0,0 +1,258 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlitter_DEFINED
+#define SkBlitter_DEFINED
+
+#include "SkBitmapProcShader.h"
+#include "SkColor.h"
+#include "SkRect.h"
+#include "SkRegion.h"
+#include "SkShader.h"
+#include "SkTypes.h"
+
+class SkMatrix;
+class SkPaint;
+class SkPixmap;
+struct SkMask;
+
+/** SkBlitter and its subclasses are responsible for actually writing pixels
+ into memory. Besides efficiency, they handle clipping and antialiasing.
+ A SkBlitter subclass contains all the context needed to generate pixels
+ for the destination and how src/generated pixels map to the destination.
+ The coordinates passed to the blitX calls are in destination pixel space.
+*/
+class SkBlitter {
+public:
+ virtual ~SkBlitter();
+
+ /// Blit a horizontal run of one or more pixels.
+ virtual void blitH(int x, int y, int width) = 0;
+
+ /// Blit a horizontal run of antialiased pixels; runs[] is a *sparse*
+ /// zero-terminated run-length encoding of spans of constant alpha values.
+ /// The runs[] and antialias[] work together to represent long runs of pixels with the same
+ /// alphas. The runs[] contains the number of pixels with the same alpha, and antialias[]
+ /// contain the coverage value for that number of pixels. The runs[] (and antialias[]) are
+ /// encoded in a clever way. The runs array is zero terminated, and has enough entries for
+ /// each pixel plus one, in most cases some of the entries will not contain valid data. An entry
+ /// in the runs array contains the number of pixels (np) that have the same alpha value. The
+ /// next np value is found np entries away. For example, if runs[0] = 7, then the next valid
+ /// entry will by at runs[7]. The runs array and antialias[] are coupled by index. So, if the
+ /// np entry is at runs[45] = 12 then the alpha value can be found at antialias[45] = 0x88.
+ /// This would mean to use an alpha value of 0x88 for the next 12 pixels starting at pixel 45.
+ virtual void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) = 0;
+
+ /// Blit a vertical run of pixels with a constant alpha value.
+ virtual void blitV(int x, int y, int height, SkAlpha alpha);
+
+ /// Blit a solid rectangle one or more pixels wide.
+ virtual void blitRect(int x, int y, int width, int height);
+
+ /** Blit a rectangle with one alpha-blended column on the left,
+ width (zero or more) opaque pixels, and one alpha-blended column
+ on the right.
+ The result will always be at least two pixels wide.
+ */
+ virtual void blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha);
+
+ /// Blit a pattern of pixels defined by a rectangle-clipped mask;
+ /// typically used for text.
+ virtual void blitMask(const SkMask&, const SkIRect& clip);
+
+ /** If the blitter just sets a single value for each pixel, return the
+ bitmap it draws into, and assign value. If not, return nullptr and ignore
+ the value parameter.
+ */
+ virtual const SkPixmap* justAnOpaqueColor(uint32_t* value);
+
+ // (x, y), (x + 1, y)
+ virtual void blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) {
+ int16_t runs[3];
+ uint8_t aa[2];
+
+ runs[0] = 1;
+ runs[1] = 1;
+ runs[2] = 0;
+ aa[0] = SkToU8(a0);
+ aa[1] = SkToU8(a1);
+ this->blitAntiH(x, y, aa, runs);
+ }
+
+ // (x, y), (x, y + 1)
+ virtual void blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) {
+ int16_t runs[2];
+ uint8_t aa[1];
+
+ runs[0] = 1;
+ runs[1] = 0;
+ aa[0] = SkToU8(a0);
+ this->blitAntiH(x, y, aa, runs);
+ // reset in case the clipping blitter modified runs
+ runs[0] = 1;
+ runs[1] = 0;
+ aa[0] = SkToU8(a1);
+ this->blitAntiH(x, y + 1, aa, runs);
+ }
+
+ /**
+ * Special method just to identify the null blitter, which is returned
+ * from Choose() if the request cannot be fulfilled. Default impl
+ * returns false.
+ */
+ virtual bool isNullBlitter() const;
+
+ /**
+ * Special methods for SkShaderBlitter. On all other classes this is a no-op.
+ */
+ virtual bool resetShaderContext(const SkShader::ContextRec&);
+ virtual SkShader::Context* getShaderContext() const;
+
+ /**
+ * Special methods for blitters that can blit more than one row at a time.
+ * This function returns the number of rows that this blitter could optimally
+ * process at a time. It is still required to support blitting one scanline
+ * at a time.
+ */
+ virtual int requestRowsPreserved() const { return 1; }
+
+ /**
+ * This function allocates memory for the blitter that the blitter then owns.
+ * The memory can be used by the calling function at will, but it will be
+ * released when the blitter's destructor is called. This function returns
+ * nullptr if no persistent memory is needed by the blitter.
+ */
+ virtual void* allocBlitMemory(size_t sz) {
+ return fBlitMemory.reset(sz, SkAutoMalloc::kReuse_OnShrink);
+ }
+
+ ///@name non-virtual helpers
+ void blitMaskRegion(const SkMask& mask, const SkRegion& clip);
+ void blitRectRegion(const SkIRect& rect, const SkRegion& clip);
+ void blitRegion(const SkRegion& clip);
+ ///@}
+
+ /** @name Factories
+ Return the correct blitter to use given the specified context.
+ */
+ static SkBlitter* Choose(const SkPixmap& dst,
+ const SkMatrix& matrix,
+ const SkPaint& paint,
+ SkTBlitterAllocator*,
+ bool drawCoverage = false);
+
+ static SkBlitter* ChooseSprite(const SkPixmap& dst,
+ const SkPaint&,
+ const SkPixmap& src,
+ int left, int top,
+ SkTBlitterAllocator*);
+ ///@}
+
+ static SkShader::ContextRec::DstType PreferredShaderDest(const SkImageInfo&);
+
+protected:
+ SkAutoMalloc fBlitMemory;
+};
+
+/** This blitter silently never draws anything.
+*/
+class SkNullBlitter : public SkBlitter {
+public:
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitMask(const SkMask&, const SkIRect& clip) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t* value) override;
+ bool isNullBlitter() const override;
+};
+
+/** Wraps another (real) blitter, and ensures that the real blitter is only
+ called with coordinates that have been clipped by the specified clipRect.
+ This means the caller need not perform the clipping ahead of time.
+*/
+class SkRectClipBlitter : public SkBlitter {
+public:
+ void init(SkBlitter* blitter, const SkIRect& clipRect) {
+ SkASSERT(!clipRect.isEmpty());
+ fBlitter = blitter;
+ fClipRect = clipRect;
+ }
+
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ virtual void blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) override;
+ void blitMask(const SkMask&, const SkIRect& clip) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t* value) override;
+
+ int requestRowsPreserved() const override {
+ return fBlitter->requestRowsPreserved();
+ }
+
+ void* allocBlitMemory(size_t sz) override {
+ return fBlitter->allocBlitMemory(sz);
+ }
+
+private:
+ SkBlitter* fBlitter;
+ SkIRect fClipRect;
+};
+
+/** Wraps another (real) blitter, and ensures that the real blitter is only
+ called with coordinates that have been clipped by the specified clipRgn.
+ This means the caller need not perform the clipping ahead of time.
+*/
+class SkRgnClipBlitter : public SkBlitter {
+public:
+ void init(SkBlitter* blitter, const SkRegion* clipRgn) {
+ SkASSERT(clipRgn && !clipRgn->isEmpty());
+ fBlitter = blitter;
+ fRgn = clipRgn;
+ }
+
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) override;
+ void blitMask(const SkMask&, const SkIRect& clip) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t* value) override;
+
+ int requestRowsPreserved() const override {
+ return fBlitter->requestRowsPreserved();
+ }
+
+ void* allocBlitMemory(size_t sz) override {
+ return fBlitter->allocBlitMemory(sz);
+ }
+
+private:
+ SkBlitter* fBlitter;
+ const SkRegion* fRgn;
+};
+
+/** Factory to set up the appropriate most-efficient wrapper blitter
+ to apply a clip. Returns a pointer to a member, so lifetime must
+ be managed carefully.
+*/
+class SkBlitterClipper {
+public:
+ SkBlitter* apply(SkBlitter* blitter, const SkRegion* clip,
+ const SkIRect* bounds = nullptr);
+
+private:
+ SkNullBlitter fNullBlitter;
+ SkRectClipBlitter fRectBlitter;
+ SkRgnClipBlitter fRgnBlitter;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBlitter_A8.cpp b/gfx/skia/skia/src/core/SkBlitter_A8.cpp
new file mode 100644
index 000000000..cb7d718f5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitter_A8.cpp
@@ -0,0 +1,430 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkCoreBlitters.h"
+#include "SkColorPriv.h"
+#include "SkShader.h"
+#include "SkXfermode.h"
+
+SkA8_Blitter::SkA8_Blitter(const SkPixmap& device, const SkPaint& paint) : INHERITED(device) {
+ fSrcA = paint.getAlpha();
+}
+
+const SkPixmap* SkA8_Blitter::justAnOpaqueColor(uint32_t* value) {
+ if (255 == fSrcA) {
+ *value = 255;
+ return &fDevice;
+ }
+ return nullptr;
+}
+
+void SkA8_Blitter::blitH(int x, int y, int width) {
+ SkASSERT(x >= 0 && y >= 0 &&
+ (unsigned)(x + width) <= (unsigned)fDevice.width());
+
+ if (fSrcA == 0) {
+ return;
+ }
+
+ uint8_t* device = fDevice.writable_addr8(x, y);
+
+ if (fSrcA == 255) {
+ memset(device, 0xFF, width);
+ } else {
+ unsigned scale = 256 - SkAlpha255To256(fSrcA);
+ unsigned srcA = fSrcA;
+
+ for (int i = 0; i < width; i++) {
+ device[i] = SkToU8(srcA + SkAlphaMul(device[i], scale));
+ }
+ }
+}
+
+void SkA8_Blitter::blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) {
+ if (fSrcA == 0) {
+ return;
+ }
+
+ uint8_t* device = fDevice.writable_addr8(x, y);
+ unsigned srcA = fSrcA;
+
+ for (;;) {
+ int count = runs[0];
+ SkASSERT(count >= 0);
+ if (count == 0) {
+ return;
+ }
+ unsigned aa = antialias[0];
+
+ if (aa == 255 && srcA == 255) {
+ memset(device, 0xFF, count);
+ } else {
+ unsigned sa = SkAlphaMul(srcA, SkAlpha255To256(aa));
+ unsigned scale = 256 - sa;
+
+ for (int i = 0; i < count; i++) {
+ device[i] = SkToU8(sa + SkAlphaMul(device[i], scale));
+ }
+ }
+ runs += count;
+ antialias += count;
+ device += count;
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+
+#define solid_8_pixels(mask, dst) \
+ do { \
+ if (mask & 0x80) dst[0] = 0xFF; \
+ if (mask & 0x40) dst[1] = 0xFF; \
+ if (mask & 0x20) dst[2] = 0xFF; \
+ if (mask & 0x10) dst[3] = 0xFF; \
+ if (mask & 0x08) dst[4] = 0xFF; \
+ if (mask & 0x04) dst[5] = 0xFF; \
+ if (mask & 0x02) dst[6] = 0xFF; \
+ if (mask & 0x01) dst[7] = 0xFF; \
+ } while (0)
+
+#define SK_BLITBWMASK_NAME SkA8_BlitBW
+#define SK_BLITBWMASK_ARGS
+#define SK_BLITBWMASK_BLIT8(mask, dst) solid_8_pixels(mask, dst)
+#define SK_BLITBWMASK_GETADDR writable_addr8
+#define SK_BLITBWMASK_DEVTYPE uint8_t
+#include "SkBlitBWMaskTemplate.h"
+
+static inline void blend_8_pixels(U8CPU bw, uint8_t dst[], U8CPU sa,
+ unsigned dst_scale) {
+ if (bw & 0x80) dst[0] = SkToU8(sa + SkAlphaMul(dst[0], dst_scale));
+ if (bw & 0x40) dst[1] = SkToU8(sa + SkAlphaMul(dst[1], dst_scale));
+ if (bw & 0x20) dst[2] = SkToU8(sa + SkAlphaMul(dst[2], dst_scale));
+ if (bw & 0x10) dst[3] = SkToU8(sa + SkAlphaMul(dst[3], dst_scale));
+ if (bw & 0x08) dst[4] = SkToU8(sa + SkAlphaMul(dst[4], dst_scale));
+ if (bw & 0x04) dst[5] = SkToU8(sa + SkAlphaMul(dst[5], dst_scale));
+ if (bw & 0x02) dst[6] = SkToU8(sa + SkAlphaMul(dst[6], dst_scale));
+ if (bw & 0x01) dst[7] = SkToU8(sa + SkAlphaMul(dst[7], dst_scale));
+}
+
+#define SK_BLITBWMASK_NAME SkA8_BlendBW
+#define SK_BLITBWMASK_ARGS , U8CPU sa, unsigned dst_scale
+#define SK_BLITBWMASK_BLIT8(mask, dst) blend_8_pixels(mask, dst, sa, dst_scale)
+#define SK_BLITBWMASK_GETADDR writable_addr8
+#define SK_BLITBWMASK_DEVTYPE uint8_t
+#include "SkBlitBWMaskTemplate.h"
+
+void SkA8_Blitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ if (fSrcA == 0) {
+ return;
+ }
+
+ if (mask.fFormat == SkMask::kBW_Format) {
+ if (fSrcA == 0xFF) {
+ SkA8_BlitBW(fDevice, mask, clip);
+ } else {
+ SkA8_BlendBW(fDevice, mask, clip, fSrcA,
+ SkAlpha255To256(255 - fSrcA));
+ }
+ return;
+ }
+
+ int x = clip.fLeft;
+ int y = clip.fTop;
+ int width = clip.width();
+ int height = clip.height();
+ uint8_t* device = fDevice.writable_addr8(x, y);
+ const uint8_t* alpha = mask.getAddr8(x, y);
+ unsigned srcA = fSrcA;
+
+ while (--height >= 0) {
+ for (int i = width - 1; i >= 0; --i) {
+ unsigned sa;
+ // scale our src by the alpha value
+ {
+ int aa = alpha[i];
+ if (aa == 0) {
+ continue;
+ }
+ if (aa == 255) {
+ if (srcA == 255) {
+ device[i] = 0xFF;
+ continue;
+ }
+ sa = srcA;
+ } else {
+ sa = SkAlphaMul(srcA, SkAlpha255To256(aa));
+ }
+ }
+
+ int scale = 256 - SkAlpha255To256(sa);
+ device[i] = SkToU8(sa + SkAlphaMul(device[i], scale));
+ }
+ device += fDevice.rowBytes();
+ alpha += mask.fRowBytes;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkA8_Blitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ if (fSrcA == 0) {
+ return;
+ }
+
+ unsigned sa = SkAlphaMul(fSrcA, SkAlpha255To256(alpha));
+ uint8_t* device = fDevice.writable_addr8(x, y);
+ size_t rowBytes = fDevice.rowBytes();
+
+ if (sa == 0xFF) {
+ for (int i = 0; i < height; i++) {
+ *device = SkToU8(sa);
+ device += rowBytes;
+ }
+ } else {
+ unsigned scale = 256 - SkAlpha255To256(sa);
+
+ for (int i = 0; i < height; i++) {
+ *device = SkToU8(sa + SkAlphaMul(*device, scale));
+ device += rowBytes;
+ }
+ }
+}
+
+void SkA8_Blitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(x >= 0 && y >= 0 &&
+ (unsigned)(x + width) <= (unsigned)fDevice.width() &&
+ (unsigned)(y + height) <= (unsigned)fDevice.height());
+
+ if (fSrcA == 0) {
+ return;
+ }
+
+ uint8_t* device = fDevice.writable_addr8(x, y);
+ unsigned srcA = fSrcA;
+
+ if (srcA == 255) {
+ while (--height >= 0) {
+ memset(device, 0xFF, width);
+ device += fDevice.rowBytes();
+ }
+ } else {
+ unsigned scale = 256 - SkAlpha255To256(srcA);
+
+ while (--height >= 0) {
+ for (int i = 0; i < width; i++) {
+ device[i] = SkToU8(srcA + SkAlphaMul(device[i], scale));
+ }
+ device += fDevice.rowBytes();
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////
+
+SkA8_Shader_Blitter::SkA8_Shader_Blitter(const SkPixmap& device, const SkPaint& paint,
+ SkShader::Context* shaderContext)
+ : INHERITED(device, paint, shaderContext)
+{
+ fXfermode = SkXfermode::Peek(paint.getBlendMode());
+ SkASSERT(!fXfermode || fShaderContext);
+
+ int width = device.width();
+ fBuffer = (SkPMColor*)sk_malloc_throw(sizeof(SkPMColor) * (width + (SkAlign4(width) >> 2)));
+ fAAExpand = (uint8_t*)(fBuffer + width);
+}
+
+SkA8_Shader_Blitter::~SkA8_Shader_Blitter() {
+ sk_free(fBuffer);
+}
+
+void SkA8_Shader_Blitter::blitH(int x, int y, int width) {
+ SkASSERT(x >= 0 && y >= 0 &&
+ (unsigned)(x + width) <= (unsigned)fDevice.width());
+
+ uint8_t* device = fDevice.writable_addr8(x, y);
+ SkShader::Context* shaderContext = fShaderContext;
+
+ if ((shaderContext->getFlags() & SkShader::kOpaqueAlpha_Flag) && !fXfermode) {
+ memset(device, 0xFF, width);
+ } else {
+ SkPMColor* span = fBuffer;
+
+ shaderContext->shadeSpan(x, y, span, width);
+ if (fXfermode) {
+ fXfermode->xferA8(device, span, width, nullptr);
+ } else {
+ for (int i = width - 1; i >= 0; --i) {
+ unsigned srcA = SkGetPackedA32(span[i]);
+ unsigned scale = 256 - SkAlpha255To256(srcA);
+
+ device[i] = SkToU8(srcA + SkAlphaMul(device[i], scale));
+ }
+ }
+ }
+}
+
+static inline uint8_t aa_blend8(SkPMColor src, U8CPU da, int aa) {
+ SkASSERT((unsigned)aa <= 255);
+
+ int src_scale = SkAlpha255To256(aa);
+ int sa = SkGetPackedA32(src);
+ int dst_scale = SkAlphaMulInv256(sa, src_scale);
+
+ return SkToU8((sa * src_scale + da * dst_scale) >> 8);
+}
+
+void SkA8_Shader_Blitter::blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) {
+ SkShader::Context* shaderContext = fShaderContext;
+ SkXfermode* mode = fXfermode;
+ uint8_t* aaExpand = fAAExpand;
+ SkPMColor* span = fBuffer;
+ uint8_t* device = fDevice.writable_addr8(x, y);
+ int opaque = shaderContext->getFlags() & SkShader::kOpaqueAlpha_Flag;
+
+ for (;;) {
+ int count = *runs;
+ if (count == 0) {
+ break;
+ }
+ int aa = *antialias;
+ if (aa) {
+ if (opaque && aa == 255 && mode == nullptr) {
+ memset(device, 0xFF, count);
+ } else {
+ shaderContext->shadeSpan(x, y, span, count);
+ if (mode) {
+ memset(aaExpand, aa, count);
+ mode->xferA8(device, span, count, aaExpand);
+ } else {
+ for (int i = count - 1; i >= 0; --i) {
+ device[i] = aa_blend8(span[i], device[i], aa);
+ }
+ }
+ }
+ }
+ device += count;
+ runs += count;
+ antialias += count;
+ x += count;
+ }
+}
+
+void SkA8_Shader_Blitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ if (mask.fFormat == SkMask::kBW_Format) {
+ this->INHERITED::blitMask(mask, clip);
+ return;
+ }
+
+ int x = clip.fLeft;
+ int y = clip.fTop;
+ int width = clip.width();
+ int height = clip.height();
+ uint8_t* device = fDevice.writable_addr8(x, y);
+ const uint8_t* alpha = mask.getAddr8(x, y);
+ SkShader::Context* shaderContext = fShaderContext;
+
+ SkPMColor* span = fBuffer;
+
+ while (--height >= 0) {
+ shaderContext->shadeSpan(x, y, span, width);
+ if (fXfermode) {
+ fXfermode->xferA8(device, span, width, alpha);
+ } else {
+ for (int i = width - 1; i >= 0; --i) {
+ device[i] = aa_blend8(span[i], device[i], alpha[i]);
+ }
+ }
+
+ y += 1;
+ device += fDevice.rowBytes();
+ alpha += mask.fRowBytes;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkA8_Coverage_Blitter::SkA8_Coverage_Blitter(const SkPixmap& device,
+ const SkPaint& paint) : SkRasterBlitter(device) {
+ SkASSERT(nullptr == paint.getShader());
+ SkASSERT(paint.isSrcOver());
+ SkASSERT(nullptr == paint.getColorFilter());
+}
+
+void SkA8_Coverage_Blitter::blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) {
+ uint8_t* device = fDevice.writable_addr8(x, y);
+ SkDEBUGCODE(int totalCount = 0;)
+
+ for (;;) {
+ int count = runs[0];
+ SkASSERT(count >= 0);
+ if (count == 0) {
+ return;
+ }
+ if (antialias[0]) {
+ memset(device, antialias[0], count);
+ }
+ runs += count;
+ antialias += count;
+ device += count;
+
+ SkDEBUGCODE(totalCount += count;)
+ }
+ SkASSERT(fDevice.width() == totalCount);
+}
+
+void SkA8_Coverage_Blitter::blitH(int x, int y, int width) {
+ memset(fDevice.writable_addr8(x, y), 0xFF, width);
+}
+
+void SkA8_Coverage_Blitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ if (0 == alpha) {
+ return;
+ }
+
+ uint8_t* dst = fDevice.writable_addr8(x, y);
+ const size_t dstRB = fDevice.rowBytes();
+ while (--height >= 0) {
+ *dst = alpha;
+ dst += dstRB;
+ }
+}
+
+void SkA8_Coverage_Blitter::blitRect(int x, int y, int width, int height) {
+ uint8_t* dst = fDevice.writable_addr8(x, y);
+ const size_t dstRB = fDevice.rowBytes();
+ while (--height >= 0) {
+ memset(dst, 0xFF, width);
+ dst += dstRB;
+ }
+}
+
+void SkA8_Coverage_Blitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ SkASSERT(SkMask::kA8_Format == mask.fFormat);
+
+ int x = clip.fLeft;
+ int y = clip.fTop;
+ int width = clip.width();
+ int height = clip.height();
+
+ uint8_t* dst = fDevice.writable_addr8(x, y);
+ const uint8_t* src = mask.getAddr8(x, y);
+ const size_t srcRB = mask.fRowBytes;
+ const size_t dstRB = fDevice.rowBytes();
+
+ while (--height >= 0) {
+ memcpy(dst, src, width);
+ dst += dstRB;
+ src += srcRB;
+ }
+}
+
+const SkPixmap* SkA8_Coverage_Blitter::justAnOpaqueColor(uint32_t*) {
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkBlitter_ARGB32.cpp b/gfx/skia/skia/src/core/SkBlitter_ARGB32.cpp
new file mode 100644
index 000000000..ea0554d66
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitter_ARGB32.cpp
@@ -0,0 +1,697 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCoreBlitters.h"
+#include "SkColorPriv.h"
+#include "SkShader.h"
+#include "SkUtils.h"
+#include "SkXfermode.h"
+#include "SkBlitMask.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void SkARGB32_Blit32(const SkPixmap& device, const SkMask& mask,
+ const SkIRect& clip, SkPMColor srcColor) {
+ U8CPU alpha = SkGetPackedA32(srcColor);
+ unsigned flags = SkBlitRow::kSrcPixelAlpha_Flag32;
+ if (alpha != 255) {
+ flags |= SkBlitRow::kGlobalAlpha_Flag32;
+ }
+ SkBlitRow::Proc32 proc = SkBlitRow::Factory32(flags);
+
+ int x = clip.fLeft;
+ int y = clip.fTop;
+ int width = clip.width();
+ int height = clip.height();
+
+ SkPMColor* dstRow = device.writable_addr32(x, y);
+ const SkPMColor* srcRow = reinterpret_cast<const SkPMColor*>(mask.getAddr8(x, y));
+
+ do {
+ proc(dstRow, srcRow, width, alpha);
+ dstRow = (SkPMColor*)((char*)dstRow + device.rowBytes());
+ srcRow = (const SkPMColor*)((const char*)srcRow + mask.fRowBytes);
+ } while (--height != 0);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+
+SkARGB32_Blitter::SkARGB32_Blitter(const SkPixmap& device, const SkPaint& paint)
+ : INHERITED(device) {
+ SkColor color = paint.getColor();
+ fColor = color;
+
+ fSrcA = SkColorGetA(color);
+ unsigned scale = SkAlpha255To256(fSrcA);
+ fSrcR = SkAlphaMul(SkColorGetR(color), scale);
+ fSrcG = SkAlphaMul(SkColorGetG(color), scale);
+ fSrcB = SkAlphaMul(SkColorGetB(color), scale);
+
+ fPMColor = SkPackARGB32(fSrcA, fSrcR, fSrcG, fSrcB);
+}
+
+const SkPixmap* SkARGB32_Blitter::justAnOpaqueColor(uint32_t* value) {
+ if (255 == fSrcA) {
+ *value = fPMColor;
+ return &fDevice;
+ }
+ return nullptr;
+}
+
+#if defined _WIN32 // disable warning : local variable used without having been initialized
+#pragma warning ( push )
+#pragma warning ( disable : 4701 )
+#endif
+
+void SkARGB32_Blitter::blitH(int x, int y, int width) {
+ SkASSERT(x >= 0 && y >= 0 && x + width <= fDevice.width());
+
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkBlitRow::Color32(device, device, width, fPMColor);
+}
+
+void SkARGB32_Blitter::blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) {
+ if (fSrcA == 0) {
+ return;
+ }
+
+ uint32_t color = fPMColor;
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ unsigned opaqueMask = fSrcA; // if fSrcA is 0xFF, then we will catch the fast opaque case
+
+ for (;;) {
+ int count = runs[0];
+ SkASSERT(count >= 0);
+ if (count <= 0) {
+ return;
+ }
+ unsigned aa = antialias[0];
+ if (aa) {
+ if ((opaqueMask & aa) == 255) {
+ sk_memset32(device, color, count);
+ } else {
+ uint32_t sc = SkAlphaMulQ(color, SkAlpha255To256(aa));
+ SkBlitRow::Color32(device, device, count, sc);
+ }
+ }
+ runs += count;
+ antialias += count;
+ device += count;
+ }
+}
+
+void SkARGB32_Blitter::blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkDEBUGCODE((void)fDevice.writable_addr32(x + 1, y);)
+
+ device[0] = SkBlendARGB32(fPMColor, device[0], a0);
+ device[1] = SkBlendARGB32(fPMColor, device[1], a1);
+}
+
+void SkARGB32_Blitter::blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkDEBUGCODE((void)fDevice.writable_addr32(x, y + 1);)
+
+ device[0] = SkBlendARGB32(fPMColor, device[0], a0);
+ device = (uint32_t*)((char*)device + fDevice.rowBytes());
+ device[0] = SkBlendARGB32(fPMColor, device[0], a1);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+
+#define solid_8_pixels(mask, dst, color) \
+ do { \
+ if (mask & 0x80) dst[0] = color; \
+ if (mask & 0x40) dst[1] = color; \
+ if (mask & 0x20) dst[2] = color; \
+ if (mask & 0x10) dst[3] = color; \
+ if (mask & 0x08) dst[4] = color; \
+ if (mask & 0x04) dst[5] = color; \
+ if (mask & 0x02) dst[6] = color; \
+ if (mask & 0x01) dst[7] = color; \
+ } while (0)
+
+#define SK_BLITBWMASK_NAME SkARGB32_BlitBW
+#define SK_BLITBWMASK_ARGS , SkPMColor color
+#define SK_BLITBWMASK_BLIT8(mask, dst) solid_8_pixels(mask, dst, color)
+#define SK_BLITBWMASK_GETADDR writable_addr32
+#define SK_BLITBWMASK_DEVTYPE uint32_t
+#include "SkBlitBWMaskTemplate.h"
+
+#define blend_8_pixels(mask, dst, sc, dst_scale) \
+ do { \
+ if (mask & 0x80) { dst[0] = sc + SkAlphaMulQ(dst[0], dst_scale); } \
+ if (mask & 0x40) { dst[1] = sc + SkAlphaMulQ(dst[1], dst_scale); } \
+ if (mask & 0x20) { dst[2] = sc + SkAlphaMulQ(dst[2], dst_scale); } \
+ if (mask & 0x10) { dst[3] = sc + SkAlphaMulQ(dst[3], dst_scale); } \
+ if (mask & 0x08) { dst[4] = sc + SkAlphaMulQ(dst[4], dst_scale); } \
+ if (mask & 0x04) { dst[5] = sc + SkAlphaMulQ(dst[5], dst_scale); } \
+ if (mask & 0x02) { dst[6] = sc + SkAlphaMulQ(dst[6], dst_scale); } \
+ if (mask & 0x01) { dst[7] = sc + SkAlphaMulQ(dst[7], dst_scale); } \
+ } while (0)
+
+#define SK_BLITBWMASK_NAME SkARGB32_BlendBW
+#define SK_BLITBWMASK_ARGS , uint32_t sc, unsigned dst_scale
+#define SK_BLITBWMASK_BLIT8(mask, dst) blend_8_pixels(mask, dst, sc, dst_scale)
+#define SK_BLITBWMASK_GETADDR writable_addr32
+#define SK_BLITBWMASK_DEVTYPE uint32_t
+#include "SkBlitBWMaskTemplate.h"
+
+void SkARGB32_Blitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ SkASSERT(mask.fBounds.contains(clip));
+ SkASSERT(fSrcA != 0xFF);
+
+ if (fSrcA == 0) {
+ return;
+ }
+
+ if (SkBlitMask::BlitColor(fDevice, mask, clip, fColor)) {
+ return;
+ }
+
+ switch (mask.fFormat) {
+ case SkMask::kBW_Format:
+ SkARGB32_BlendBW(fDevice, mask, clip, fPMColor, SkAlpha255To256(255 - fSrcA));
+ break;
+ case SkMask::kARGB32_Format:
+ SkARGB32_Blit32(fDevice, mask, clip, fPMColor);
+ break;
+ default:
+ SkFAIL("Mask format not handled.");
+ }
+}
+
+void SkARGB32_Opaque_Blitter::blitMask(const SkMask& mask,
+ const SkIRect& clip) {
+ SkASSERT(mask.fBounds.contains(clip));
+
+ if (SkBlitMask::BlitColor(fDevice, mask, clip, fColor)) {
+ return;
+ }
+
+ switch (mask.fFormat) {
+ case SkMask::kBW_Format:
+ SkARGB32_BlitBW(fDevice, mask, clip, fPMColor);
+ break;
+ case SkMask::kARGB32_Format:
+ SkARGB32_Blit32(fDevice, mask, clip, fPMColor);
+ break;
+ default:
+ SkFAIL("Mask format not handled.");
+ }
+}
+
+void SkARGB32_Opaque_Blitter::blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkDEBUGCODE((void)fDevice.writable_addr32(x + 1, y);)
+
+ device[0] = SkFastFourByteInterp(fPMColor, device[0], a0);
+ device[1] = SkFastFourByteInterp(fPMColor, device[1], a1);
+}
+
+void SkARGB32_Opaque_Blitter::blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkDEBUGCODE((void)fDevice.writable_addr32(x, y + 1);)
+
+ device[0] = SkFastFourByteInterp(fPMColor, device[0], a0);
+ device = (uint32_t*)((char*)device + fDevice.rowBytes());
+ device[0] = SkFastFourByteInterp(fPMColor, device[0], a1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkARGB32_Blitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ if (alpha == 0 || fSrcA == 0) {
+ return;
+ }
+
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ uint32_t color = fPMColor;
+
+ if (alpha != 255) {
+ color = SkAlphaMulQ(color, SkAlpha255To256(alpha));
+ }
+
+#ifdef SK_SUPPORT_LEGACY_BROKEN_LERP
+ unsigned dst_scale = 255 - SkGetPackedA32(color);
+#else
+ unsigned dst_scale = SkAlpha255To256(255 - SkGetPackedA32(color));
+#endif
+ size_t rowBytes = fDevice.rowBytes();
+ while (--height >= 0) {
+ device[0] = color + SkAlphaMulQ(device[0], dst_scale);
+ device = (uint32_t*)((char*)device + rowBytes);
+ }
+}
+
+void SkARGB32_Blitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(x >= 0 && y >= 0 && x + width <= fDevice.width() && y + height <= fDevice.height());
+
+ if (fSrcA == 0) {
+ return;
+ }
+
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ uint32_t color = fPMColor;
+ size_t rowBytes = fDevice.rowBytes();
+
+ while (--height >= 0) {
+ SkBlitRow::Color32(device, device, width, color);
+ device = (uint32_t*)((char*)device + rowBytes);
+ }
+}
+
+#if defined _WIN32
+#pragma warning ( pop )
+#endif
+
+///////////////////////////////////////////////////////////////////////
+
+void SkARGB32_Black_Blitter::blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkPMColor black = (SkPMColor)(SK_A32_MASK << SK_A32_SHIFT);
+
+ for (;;) {
+ int count = runs[0];
+ SkASSERT(count >= 0);
+ if (count <= 0) {
+ return;
+ }
+ unsigned aa = antialias[0];
+ if (aa) {
+ if (aa == 255) {
+ sk_memset32(device, black, count);
+ } else {
+ SkPMColor src = aa << SK_A32_SHIFT;
+ unsigned dst_scale = 256 - aa;
+ int n = count;
+ do {
+ --n;
+ device[n] = src + SkAlphaMulQ(device[n], dst_scale);
+ } while (n > 0);
+ }
+ }
+ runs += count;
+ antialias += count;
+ device += count;
+ }
+}
+
+void SkARGB32_Black_Blitter::blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkDEBUGCODE((void)fDevice.writable_addr32(x + 1, y);)
+
+ device[0] = (a0 << SK_A32_SHIFT) + SkAlphaMulQ(device[0], 256 - a0);
+ device[1] = (a1 << SK_A32_SHIFT) + SkAlphaMulQ(device[1], 256 - a1);
+}
+
+void SkARGB32_Black_Blitter::blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkDEBUGCODE((void)fDevice.writable_addr32(x, y + 1);)
+
+ device[0] = (a0 << SK_A32_SHIFT) + SkAlphaMulQ(device[0], 256 - a0);
+ device = (uint32_t*)((char*)device + fDevice.rowBytes());
+ device[0] = (a1 << SK_A32_SHIFT) + SkAlphaMulQ(device[0], 256 - a1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Special version of SkBlitRow::Factory32 that knows we're in kSrc_Mode,
+// instead of kSrcOver_Mode
+static void blend_srcmode(SkPMColor* SK_RESTRICT device,
+ const SkPMColor* SK_RESTRICT span,
+ int count, U8CPU aa) {
+ int aa256 = SkAlpha255To256(aa);
+ for (int i = 0; i < count; ++i) {
+ device[i] = SkFourByteInterp256(span[i], device[i], aa256);
+ }
+}
+
+SkARGB32_Shader_Blitter::SkARGB32_Shader_Blitter(const SkPixmap& device,
+ const SkPaint& paint, SkShader::Context* shaderContext)
+ : INHERITED(device, paint, shaderContext)
+{
+ fBuffer = (SkPMColor*)sk_malloc_throw(device.width() * (sizeof(SkPMColor)));
+
+ fXfermode = SkXfermode::Peek(paint.getBlendMode());
+
+ int flags = 0;
+ if (!(shaderContext->getFlags() & SkShader::kOpaqueAlpha_Flag)) {
+ flags |= SkBlitRow::kSrcPixelAlpha_Flag32;
+ }
+ // we call this on the output from the shader
+ fProc32 = SkBlitRow::Factory32(flags);
+ // we call this on the output from the shader + alpha from the aa buffer
+ fProc32Blend = SkBlitRow::Factory32(flags | SkBlitRow::kGlobalAlpha_Flag32);
+
+ fShadeDirectlyIntoDevice = false;
+ if (fXfermode == nullptr) {
+ if (shaderContext->getFlags() & SkShader::kOpaqueAlpha_Flag) {
+ fShadeDirectlyIntoDevice = true;
+ }
+ } else {
+ SkXfermode::Mode mode;
+ if (fXfermode->asMode(&mode)) {
+ if (SkXfermode::kSrc_Mode == mode) {
+ fShadeDirectlyIntoDevice = true;
+ fProc32Blend = blend_srcmode;
+ }
+ }
+ }
+
+ fConstInY = SkToBool(shaderContext->getFlags() & SkShader::kConstInY32_Flag);
+}
+
+SkARGB32_Shader_Blitter::~SkARGB32_Shader_Blitter() {
+ sk_free(fBuffer);
+}
+
+void SkARGB32_Shader_Blitter::blitH(int x, int y, int width) {
+ SkASSERT(x >= 0 && y >= 0 && x + width <= fDevice.width());
+
+ uint32_t* device = fDevice.writable_addr32(x, y);
+
+ if (fShadeDirectlyIntoDevice) {
+ fShaderContext->shadeSpan(x, y, device, width);
+ } else {
+ SkPMColor* span = fBuffer;
+ fShaderContext->shadeSpan(x, y, span, width);
+ if (fXfermode) {
+ fXfermode->xfer32(device, span, width, nullptr);
+ } else {
+ fProc32(device, span, width, 255);
+ }
+ }
+}
+
+void SkARGB32_Shader_Blitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(x >= 0 && y >= 0 &&
+ x + width <= fDevice.width() && y + height <= fDevice.height());
+
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ size_t deviceRB = fDevice.rowBytes();
+ SkShader::Context* shaderContext = fShaderContext;
+ SkPMColor* span = fBuffer;
+
+ if (fConstInY) {
+ if (fShadeDirectlyIntoDevice) {
+ // shade the first row directly into the device
+ shaderContext->shadeSpan(x, y, device, width);
+ span = device;
+ while (--height > 0) {
+ device = (uint32_t*)((char*)device + deviceRB);
+ memcpy(device, span, width << 2);
+ }
+ } else {
+ shaderContext->shadeSpan(x, y, span, width);
+ SkXfermode* xfer = fXfermode;
+ if (xfer) {
+ do {
+ xfer->xfer32(device, span, width, nullptr);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ SkBlitRow::Proc32 proc = fProc32;
+ do {
+ proc(device, span, width, 255);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+ }
+ return;
+ }
+
+ if (fShadeDirectlyIntoDevice) {
+ void* ctx;
+ SkShader::Context::ShadeProc shadeProc = shaderContext->asAShadeProc(&ctx);
+ if (shadeProc) {
+ do {
+ shadeProc(ctx, x, y, device, width);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ do {
+ shaderContext->shadeSpan(x, y, device, width);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+ } else {
+ SkXfermode* xfer = fXfermode;
+ if (xfer) {
+ do {
+ shaderContext->shadeSpan(x, y, span, width);
+ xfer->xfer32(device, span, width, nullptr);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ SkBlitRow::Proc32 proc = fProc32;
+ do {
+ shaderContext->shadeSpan(x, y, span, width);
+ proc(device, span, width, 255);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+ }
+}
+
+void SkARGB32_Shader_Blitter::blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) {
+ SkPMColor* span = fBuffer;
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkShader::Context* shaderContext = fShaderContext;
+
+ if (fXfermode && !fShadeDirectlyIntoDevice) {
+ for (;;) {
+ SkXfermode* xfer = fXfermode;
+
+ int count = *runs;
+ if (count <= 0)
+ break;
+ int aa = *antialias;
+ if (aa) {
+ shaderContext->shadeSpan(x, y, span, count);
+ if (aa == 255) {
+ xfer->xfer32(device, span, count, nullptr);
+ } else {
+ // count is almost always 1
+ for (int i = count - 1; i >= 0; --i) {
+ xfer->xfer32(&device[i], &span[i], 1, antialias);
+ }
+ }
+ }
+ device += count;
+ runs += count;
+ antialias += count;
+ x += count;
+ }
+ } else if (fShadeDirectlyIntoDevice ||
+ (shaderContext->getFlags() & SkShader::kOpaqueAlpha_Flag)) {
+ for (;;) {
+ int count = *runs;
+ if (count <= 0) {
+ break;
+ }
+ int aa = *antialias;
+ if (aa) {
+ if (aa == 255) {
+ // cool, have the shader draw right into the device
+ shaderContext->shadeSpan(x, y, device, count);
+ } else {
+ shaderContext->shadeSpan(x, y, span, count);
+ fProc32Blend(device, span, count, aa);
+ }
+ }
+ device += count;
+ runs += count;
+ antialias += count;
+ x += count;
+ }
+ } else {
+ for (;;) {
+ int count = *runs;
+ if (count <= 0) {
+ break;
+ }
+ int aa = *antialias;
+ if (aa) {
+ shaderContext->shadeSpan(x, y, span, count);
+ if (aa == 255) {
+ fProc32(device, span, count, 255);
+ } else {
+ fProc32Blend(device, span, count, aa);
+ }
+ }
+ device += count;
+ runs += count;
+ antialias += count;
+ x += count;
+ }
+ }
+}
+
+void SkARGB32_Shader_Blitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ // we only handle kA8 with an xfermode
+ if (fXfermode && (SkMask::kA8_Format != mask.fFormat)) {
+ this->INHERITED::blitMask(mask, clip);
+ return;
+ }
+
+ SkASSERT(mask.fBounds.contains(clip));
+
+ SkShader::Context* shaderContext = fShaderContext;
+ SkBlitMask::RowProc proc = nullptr;
+ if (!fXfermode) {
+ unsigned flags = 0;
+ if (shaderContext->getFlags() & SkShader::kOpaqueAlpha_Flag) {
+ flags |= SkBlitMask::kSrcIsOpaque_RowFlag;
+ }
+ proc = SkBlitMask::RowFactory(kN32_SkColorType, mask.fFormat,
+ (SkBlitMask::RowFlags)flags);
+ if (nullptr == proc) {
+ this->INHERITED::blitMask(mask, clip);
+ return;
+ }
+ }
+
+ const int x = clip.fLeft;
+ const int width = clip.width();
+ int y = clip.fTop;
+ int height = clip.height();
+
+ char* dstRow = (char*)fDevice.writable_addr32(x, y);
+ const size_t dstRB = fDevice.rowBytes();
+ const uint8_t* maskRow = (const uint8_t*)mask.getAddr(x, y);
+ const size_t maskRB = mask.fRowBytes;
+
+ SkPMColor* span = fBuffer;
+
+ if (fXfermode) {
+ SkASSERT(SkMask::kA8_Format == mask.fFormat);
+ SkXfermode* xfer = fXfermode;
+ do {
+ shaderContext->shadeSpan(x, y, span, width);
+ xfer->xfer32(reinterpret_cast<SkPMColor*>(dstRow), span, width, maskRow);
+ dstRow += dstRB;
+ maskRow += maskRB;
+ y += 1;
+ } while (--height > 0);
+ } else {
+ do {
+ shaderContext->shadeSpan(x, y, span, width);
+ proc(reinterpret_cast<SkPMColor*>(dstRow), maskRow, span, width);
+ dstRow += dstRB;
+ maskRow += maskRB;
+ y += 1;
+ } while (--height > 0);
+ }
+}
+
+void SkARGB32_Shader_Blitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ SkASSERT(x >= 0 && y >= 0 && y + height <= fDevice.height());
+
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ size_t deviceRB = fDevice.rowBytes();
+ SkShader::Context* shaderContext = fShaderContext;
+
+ if (fConstInY) {
+ SkPMColor c;
+ shaderContext->shadeSpan(x, y, &c, 1);
+
+ if (fShadeDirectlyIntoDevice) {
+ if (255 == alpha) {
+ do {
+ *device = c;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ do {
+ *device = SkFourByteInterp(c, *device, alpha);
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+ } else {
+ SkXfermode* xfer = fXfermode;
+ if (xfer) {
+ do {
+ xfer->xfer32(device, &c, 1, &alpha);
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ SkBlitRow::Proc32 proc = (255 == alpha) ? fProc32 : fProc32Blend;
+ do {
+ proc(device, &c, 1, alpha);
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+ }
+ return;
+ }
+
+ if (fShadeDirectlyIntoDevice) {
+ void* ctx;
+ SkShader::Context::ShadeProc shadeProc = shaderContext->asAShadeProc(&ctx);
+ if (255 == alpha) {
+ if (shadeProc) {
+ do {
+ shadeProc(ctx, x, y, device, 1);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ do {
+ shaderContext->shadeSpan(x, y, device, 1);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+ } else { // alpha < 255
+ SkPMColor c;
+ if (shadeProc) {
+ do {
+ shadeProc(ctx, x, y, &c, 1);
+ *device = SkFourByteInterp(c, *device, alpha);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ do {
+ shaderContext->shadeSpan(x, y, &c, 1);
+ *device = SkFourByteInterp(c, *device, alpha);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+ }
+ } else {
+ SkPMColor* span = fBuffer;
+ SkXfermode* xfer = fXfermode;
+ if (xfer) {
+ do {
+ shaderContext->shadeSpan(x, y, span, 1);
+ xfer->xfer32(device, span, 1, &alpha);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ SkBlitRow::Proc32 proc = (255 == alpha) ? fProc32 : fProc32Blend;
+ do {
+ shaderContext->shadeSpan(x, y, span, 1);
+ proc(device, span, 1, alpha);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkBlitter_PM4f.cpp b/gfx/skia/skia/src/core/SkBlitter_PM4f.cpp
new file mode 100644
index 000000000..d63e924e2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitter_PM4f.cpp
@@ -0,0 +1,436 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCoreBlitters.h"
+#include "SkColorPriv.h"
+#include "SkShader.h"
+#include "SkUtils.h"
+#include "SkXfermode.h"
+#include "SkBlitMask.h"
+#include "SkTemplates.h"
+#include "SkPM4f.h"
+
+template <typename State> class SkState_Blitter : public SkRasterBlitter {
+ typedef SkRasterBlitter INHERITED;
+ State fState;
+
+public:
+ SkState_Blitter(const SkPixmap& device, const SkPaint& paint)
+ : INHERITED(device)
+ , fState(device.info(), paint, nullptr)
+ {}
+
+ void blitH(int x, int y, int width) override {
+ SkASSERT(x >= 0 && y >= 0 && x + width <= fDevice.width());
+
+ fState.fProc1(fState.fXfer, State::WritableAddr(fDevice, x, y),
+ &fState.fPM4f, width, nullptr);
+ }
+
+ void blitV(int x, int y, int height, SkAlpha alpha) override {
+ SkASSERT(x >= 0 && y >= 0 && y + height <= fDevice.height());
+
+ typename State::DstType* device = State::WritableAddr(fDevice, x, y);
+ size_t deviceRB = fDevice.rowBytes();
+
+ for (int i = 0; i < height; ++i) {
+ fState.fProc1(fState.fXfer, device, &fState.fPM4f, 1, &alpha);
+ device = (typename State::DstType*)((char*)device + deviceRB);
+ }
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ SkASSERT(x >= 0 && y >= 0 &&
+ x + width <= fDevice.width() && y + height <= fDevice.height());
+
+ typename State::DstType* device = State::WritableAddr(fDevice, x, y);
+ size_t deviceRB = fDevice.rowBytes();
+
+ do {
+ fState.fProc1(fState.fXfer, device, &fState.fPM4f, width, nullptr);
+ y += 1;
+ device = (typename State::DstType*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override {
+ typename State::DstType* device = State::WritableAddr(fDevice, x, y);
+
+ for (;;) {
+ int count = *runs;
+ if (count <= 0) {
+ break;
+ }
+ int aa = *antialias;
+ if (aa) {
+ if (aa == 255) {
+ fState.fProc1(fState.fXfer, device, &fState.fPM4f, count, nullptr);
+ } else {
+ for (int i = 0; i < count; ++i) {
+ fState.fProc1(fState.fXfer, &device[i], &fState.fPM4f, 1, antialias);
+ }
+ }
+ }
+ device += count;
+ runs += count;
+ antialias += count;
+ x += count;
+ }
+ }
+
+ void blitLCDMask(const SkMask& mask, const SkIRect& clip) {
+ auto proc = fState.getLCDProc(SkXfermode::kSrcIsSingle_LCDFlag);
+
+ const int x = clip.fLeft;
+ const int width = clip.width();
+ const int y = clip.fTop;
+ const int height = clip.height();
+
+ typename State::DstType* device = State::WritableAddr(fDevice, x, y);
+ const size_t dstRB = fDevice.rowBytes();
+ const uint16_t* maskRow = (const uint16_t*)mask.getAddr(x, y);
+ const size_t maskRB = mask.fRowBytes;
+
+ for (int i = 0; i < height; ++i) {
+ proc(device, &fState.fPM4f, width, maskRow);
+ device = (typename State::DstType*)((char*)device + dstRB);
+ maskRow = (const uint16_t*)((const char*)maskRow + maskRB);
+ }
+ }
+
+ void blitMask(const SkMask& mask, const SkIRect& clip) override {
+ if (SkMask::kLCD16_Format == mask.fFormat) {
+ this->blitLCDMask(mask, clip);
+ return;
+ }
+ if (SkMask::kA8_Format != mask.fFormat) {
+ this->INHERITED::blitMask(mask, clip);
+ return;
+ }
+
+ SkASSERT(mask.fBounds.contains(clip));
+
+ const int x = clip.fLeft;
+ const int width = clip.width();
+ const int y = clip.fTop;
+ const int height = clip.height();
+
+ typename State::DstType* device = State::WritableAddr(fDevice, x, y);
+ const size_t dstRB = fDevice.rowBytes();
+ const uint8_t* maskRow = (const uint8_t*)mask.getAddr(x, y);
+ const size_t maskRB = mask.fRowBytes;
+
+ for (int i = 0; i < height; ++i) {
+ fState.fProc1(fState.fXfer, device, &fState.fPM4f, width, maskRow);
+ device = (typename State::DstType*)((char*)device + dstRB);
+ maskRow += maskRB;
+ }
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+template <typename State> class SkState_Shader_Blitter : public SkShaderBlitter {
+public:
+ SkState_Shader_Blitter(const SkPixmap& device, const SkPaint& paint,
+ const SkShader::Context::BlitState& bstate)
+ : INHERITED(device, paint, bstate.fCtx)
+ , fState(device.info(), paint, bstate.fCtx)
+ , fBState(bstate)
+ , fBlitBW(bstate.fBlitBW)
+ , fBlitAA(bstate.fBlitAA)
+ {}
+
+ void blitH(int x, int y, int width) override {
+ SkASSERT(x >= 0 && y >= 0 && x + width <= fDevice.width());
+
+ if (fBlitBW) {
+ fBlitBW(&fBState, x, y, fDevice, width);
+ return;
+ }
+
+ typename State::DstType* device = State::WritableAddr(fDevice, x, y);
+ fShaderContext->shadeSpan4f(x, y, fState.fBuffer, width);
+ fState.fProcN(fState.fXfer, device, fState.fBuffer, width, nullptr);
+ }
+
+ void blitV(int x, int y, int height, SkAlpha alpha) override {
+ SkASSERT(x >= 0 && y >= 0 && y + height <= fDevice.height());
+
+ if (fBlitAA) {
+ for (const int bottom = y + height; y < bottom; ++y) {
+ fBlitAA(&fBState, x, y, fDevice, 1, &alpha);
+ }
+ return;
+ }
+
+ typename State::DstType* device = State::WritableAddr(fDevice, x, y);
+ size_t deviceRB = fDevice.rowBytes();
+
+ if (fConstInY) {
+ fShaderContext->shadeSpan4f(x, y, fState.fBuffer, 1);
+ }
+ for (const int bottom = y + height; y < bottom; ++y) {
+ if (!fConstInY) {
+ fShaderContext->shadeSpan4f(x, y, fState.fBuffer, 1);
+ }
+ fState.fProcN(fState.fXfer, device, fState.fBuffer, 1, &alpha);
+ device = (typename State::DstType*)((char*)device + deviceRB);
+ }
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ SkASSERT(x >= 0 && y >= 0 &&
+ x + width <= fDevice.width() && y + height <= fDevice.height());
+
+ if (fBlitBW) {
+ for (const int bottom = y + height; y < bottom; ++y) {
+ fBlitBW(&fBState, x, y, fDevice, width);
+ }
+ return;
+ }
+
+ typename State::DstType* device = State::WritableAddr(fDevice, x, y);
+ size_t deviceRB = fDevice.rowBytes();
+
+ if (fConstInY) {
+ fShaderContext->shadeSpan4f(x, y, fState.fBuffer, width);
+ }
+ for (const int bottom = y + height; y < bottom; ++y) {
+ if (!fConstInY) {
+ fShaderContext->shadeSpan4f(x, y, fState.fBuffer, width);
+ }
+ fState.fProcN(fState.fXfer, device, fState.fBuffer, width, nullptr);
+ device = (typename State::DstType*)((char*)device + deviceRB);
+ }
+ }
+
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override {
+ typename State::DstType* device = State::WritableAddr(fDevice, x, y);
+
+ for (;;) {
+ int count = *runs;
+ if (count <= 0) {
+ break;
+ }
+ int aa = *antialias;
+ if (aa) {
+ if (fBlitBW && (aa == 255)) {
+ fBlitBW(&fBState, x, y, fDevice, count);
+ } else {
+ fShaderContext->shadeSpan4f(x, y, fState.fBuffer, count);
+ if (aa == 255) {
+ fState.fProcN(fState.fXfer, device, fState.fBuffer, count, nullptr);
+ } else {
+ for (int i = 0; i < count; ++i) {
+ fState.fProcN(fState.fXfer, &device[i], &fState.fBuffer[i], 1, antialias);
+ }
+ }
+ }
+ }
+ device += count;
+ runs += count;
+ antialias += count;
+ x += count;
+ }
+ }
+
+ void blitLCDMask(const SkMask& mask, const SkIRect& clip) {
+ auto proc = fState.getLCDProc(0);
+
+ const int x = clip.fLeft;
+ const int width = clip.width();
+ int y = clip.fTop;
+
+ typename State::DstType* device = State::WritableAddr(fDevice, x, y);
+ const size_t deviceRB = fDevice.rowBytes();
+ const uint16_t* maskRow = (const uint16_t*)mask.getAddr(x, y);
+ const size_t maskRB = mask.fRowBytes;
+
+ if (fConstInY) {
+ fShaderContext->shadeSpan4f(x, y, fState.fBuffer, width);
+ }
+ for (; y < clip.fBottom; ++y) {
+ if (!fConstInY) {
+ fShaderContext->shadeSpan4f(x, y, fState.fBuffer, width);
+ }
+ proc(device, fState.fBuffer, width, maskRow);
+ device = (typename State::DstType*)((char*)device + deviceRB);
+ maskRow = (const uint16_t*)((const char*)maskRow + maskRB);
+ }
+ }
+
+ void blitMask(const SkMask& mask, const SkIRect& clip) override {
+ if (SkMask::kLCD16_Format == mask.fFormat) {
+ this->blitLCDMask(mask, clip);
+ return;
+ }
+ if (SkMask::kA8_Format != mask.fFormat) {
+ this->INHERITED::blitMask(mask, clip);
+ return;
+ }
+
+ SkASSERT(mask.fBounds.contains(clip));
+
+ const int x = clip.fLeft;
+ const int width = clip.width();
+ int y = clip.fTop;
+ const uint8_t* maskRow = (const uint8_t*)mask.getAddr(x, y);
+ const size_t maskRB = mask.fRowBytes;
+
+ if (fBlitAA) {
+ for (; y < clip.fBottom; ++y) {
+ fBlitAA(&fBState, x, y, fDevice, width, maskRow);
+ maskRow += maskRB;
+ }
+ return;
+ }
+
+ typename State::DstType* device = State::WritableAddr(fDevice, x, y);
+ const size_t deviceRB = fDevice.rowBytes();
+
+ if (fConstInY) {
+ fShaderContext->shadeSpan4f(x, y, fState.fBuffer, width);
+ }
+ for (; y < clip.fBottom; ++y) {
+ if (!fConstInY) {
+ fShaderContext->shadeSpan4f(x, y, fState.fBuffer, width);
+ }
+ fState.fProcN(fState.fXfer, device, fState.fBuffer, width, maskRow);
+ device = (typename State::DstType*)((char*)device + deviceRB);
+ maskRow += maskRB;
+ }
+ }
+
+protected:
+ State fState;
+ SkShader::Context::BlitState fBState;
+ SkShader::Context::BlitBW fBlitBW;
+ SkShader::Context::BlitAA fBlitAA;
+
+ typedef SkShaderBlitter INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static bool is_opaque(const SkPaint& paint, const SkShader::Context* shaderContext) {
+ return shaderContext ? SkToBool(shaderContext->getFlags() & SkShader::kOpaqueAlpha_Flag)
+ : 0xFF == paint.getAlpha();
+}
+
+struct State4f {
+ State4f(const SkImageInfo& info, const SkPaint& paint, const SkShader::Context* shaderContext) {
+ fXfer = SkXfermode::Peek(paint.getBlendMode());
+ if (shaderContext) {
+ fBuffer.reset(info.width());
+ } else {
+ fPM4f = SkColor4f::FromColor(paint.getColor()).premul();
+ }
+ fFlags = 0;
+ }
+
+ SkXfermode* fXfer;
+ SkPM4f fPM4f;
+ SkAutoTMalloc<SkPM4f> fBuffer;
+ uint32_t fFlags;
+
+ SkShader::Context::BlitState fBState;
+};
+
+struct State32 : State4f {
+ typedef uint32_t DstType;
+
+ SkXfermode::D32Proc fProc1;
+ SkXfermode::D32Proc fProcN;
+
+ State32(const SkImageInfo& info, const SkPaint& paint, const SkShader::Context* shaderContext)
+ : State4f(info, paint, shaderContext)
+ {
+ if (is_opaque(paint, shaderContext)) {
+ fFlags |= SkXfermode::kSrcIsOpaque_D32Flag;
+ }
+ if (info.gammaCloseToSRGB()) {
+ fFlags |= SkXfermode::kDstIsSRGB_D32Flag;
+ }
+ fProc1 = SkXfermode::GetD32Proc(fXfer, fFlags | SkXfermode::kSrcIsSingle_D32Flag);
+ fProcN = SkXfermode::GetD32Proc(fXfer, fFlags);
+ }
+
+ SkXfermode::LCD32Proc getLCDProc(uint32_t oneOrManyFlag) const {
+ uint32_t flags = fFlags & 1;
+ if (fFlags & SkXfermode::kDstIsSRGB_D32Flag) {
+ flags |= SkXfermode::kDstIsSRGB_LCDFlag;
+ }
+ return SkXfermode::GetLCD32Proc(flags | oneOrManyFlag);
+ }
+
+ static DstType* WritableAddr(const SkPixmap& device, int x, int y) {
+ return device.writable_addr32(x, y);
+ }
+};
+
+struct StateF16 : State4f {
+ typedef uint64_t DstType;
+
+ SkXfermode::F16Proc fProc1;
+ SkXfermode::F16Proc fProcN;
+
+ StateF16(const SkImageInfo& info, const SkPaint& paint, const SkShader::Context* shaderContext)
+ : State4f(info, paint, shaderContext)
+ {
+ if (is_opaque(paint, shaderContext)) {
+ fFlags |= SkXfermode::kSrcIsOpaque_F16Flag;
+ }
+ SkASSERT(kRGBA_F16_SkColorType == info.colorType());
+ fProc1 = SkXfermode::GetF16Proc(fXfer, fFlags | SkXfermode::kSrcIsSingle_F16Flag);
+ fProcN = SkXfermode::GetF16Proc(fXfer, fFlags);
+ }
+
+ SkXfermode::LCDF16Proc getLCDProc(uint32_t oneOrManyFlag) const {
+ uint32_t flags = fFlags & 1;
+ return SkXfermode::GetLCDF16Proc(flags | oneOrManyFlag);
+ }
+
+ static DstType* WritableAddr(const SkPixmap& device, int x, int y) {
+ return device.writable_addr64(x, y);
+ }
+};
+
+template <typename State> SkBlitter* create(const SkPixmap& device, const SkPaint& paint,
+ SkShader::Context* shaderContext,
+ SkTBlitterAllocator* allocator) {
+ SkASSERT(allocator != nullptr);
+
+ if (shaderContext) {
+ SkShader::Context::BlitState bstate;
+ sk_bzero(&bstate, sizeof(bstate));
+ bstate.fCtx = shaderContext;
+ bstate.fXfer = SkXfermode::Peek(paint.getBlendMode());
+
+ (void)shaderContext->chooseBlitProcs(device.info(), &bstate);
+ return allocator->createT<SkState_Shader_Blitter<State>>(device, paint, bstate);
+ } else {
+ SkColor color = paint.getColor();
+ if (0 == SkColorGetA(color)) {
+ return nullptr;
+ }
+ return allocator->createT<SkState_Blitter<State>>(device, paint);
+ }
+}
+
+SkBlitter* SkBlitter_ARGB32_Create(const SkPixmap& device, const SkPaint& paint,
+ SkShader::Context* shaderContext,
+ SkTBlitterAllocator* allocator) {
+ return create<State32>(device, paint, shaderContext, allocator);
+}
+
+SkBlitter* SkBlitter_F16_Create(const SkPixmap& device, const SkPaint& paint,
+ SkShader::Context* shaderContext,
+ SkTBlitterAllocator* allocator) {
+ return create<StateF16>(device, paint, shaderContext, allocator);
+}
diff --git a/gfx/skia/skia/src/core/SkBlitter_RGB16.cpp b/gfx/skia/skia/src/core/SkBlitter_RGB16.cpp
new file mode 100644
index 000000000..7860b7cb6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitter_RGB16.cpp
@@ -0,0 +1,928 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBlitRow.h"
+#include "SkCoreBlitters.h"
+#include "SkColorPriv.h"
+#include "SkDither.h"
+#include "SkShader.h"
+#include "SkUtils.h"
+#include "SkUtilsArm.h"
+#include "SkXfermode.h"
+
+#if defined(__mips_dsp)
+extern void blitmask_d565_opaque_mips(int width, int height, uint16_t* device,
+ unsigned deviceRB, const uint8_t* alpha,
+ uint32_t expanded32, unsigned maskRB);
+#endif
+
+#if defined(SK_ARM_HAS_NEON) && defined(SK_CPU_LENDIAN)
+ #include <arm_neon.h>
+extern void SkRGB16BlitterBlitV_neon(uint16_t* device,
+ int height,
+ size_t deviceRB,
+ unsigned scale,
+ uint32_t src32);
+#else
+ // if we don't have neon, then our black blitter is worth the extra code
+ #define USE_BLACK_BLITTER
+#endif
+
+void sk_dither_memset16(uint16_t dst[], uint16_t value, uint16_t other,
+ int count) {
+ if (count > 0) {
+ // see if we need to write one short before we can cast to an 4byte ptr
+ // (we do this subtract rather than (unsigned)dst so we don't get warnings
+ // on 64bit machines)
+ if (((char*)dst - (char*)0) & 2) {
+ *dst++ = value;
+ count -= 1;
+ SkTSwap(value, other);
+ }
+
+ // fast way to set [value,other] pairs
+#ifdef SK_CPU_BENDIAN
+ sk_memset32((uint32_t*)dst, (value << 16) | other, count >> 1);
+#else
+ sk_memset32((uint32_t*)dst, (other << 16) | value, count >> 1);
+#endif
+
+ if (count & 1) {
+ dst[count - 1] = value;
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkRGB16_Blitter : public SkRasterBlitter {
+public:
+ SkRGB16_Blitter(const SkPixmap& device, const SkPaint& paint);
+ void blitH(int x, int y, int width) override;
+ virtual void blitAntiH(int x, int y, const SkAlpha* antialias,
+ const int16_t* runs) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitMask(const SkMask&, const SkIRect&) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t*) override;
+
+protected:
+ SkPMColor fSrcColor32;
+ uint32_t fExpandedRaw16;
+ unsigned fScale;
+ uint16_t fColor16; // already scaled by fScale
+ uint16_t fRawColor16; // unscaled
+ uint16_t fRawDither16; // unscaled
+ SkBool8 fDoDither;
+
+ SkBlitRow::ColorProc16 fColorProc16;
+
+ // illegal
+ SkRGB16_Blitter& operator=(const SkRGB16_Blitter&);
+
+ typedef SkRasterBlitter INHERITED;
+};
+
+class SkRGB16_Opaque_Blitter : public SkRGB16_Blitter {
+public:
+ SkRGB16_Opaque_Blitter(const SkPixmap& device, const SkPaint& paint);
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha* antialias, const int16_t* runs) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitMask(const SkMask&, const SkIRect&) override;
+
+private:
+ typedef SkRGB16_Blitter INHERITED;
+};
+
+#ifdef USE_BLACK_BLITTER
+class SkRGB16_Black_Blitter : public SkRGB16_Opaque_Blitter {
+public:
+ SkRGB16_Black_Blitter(const SkPixmap& device, const SkPaint& paint);
+ void blitMask(const SkMask&, const SkIRect&) override;
+ void blitAntiH(int x, int y, const SkAlpha* antialias, const int16_t* runs) override;
+
+private:
+ typedef SkRGB16_Opaque_Blitter INHERITED;
+};
+#endif
+
+class SkRGB16_Shader_Blitter : public SkShaderBlitter {
+public:
+ SkRGB16_Shader_Blitter(const SkPixmap& device, const SkPaint& paint,
+ SkShader::Context* shaderContext);
+ virtual ~SkRGB16_Shader_Blitter();
+ void blitH(int x, int y, int width) override;
+ virtual void blitAntiH(int x, int y, const SkAlpha* antialias,
+ const int16_t* runs) override;
+ void blitRect(int x, int y, int width, int height) override;
+
+protected:
+ SkPMColor* fBuffer;
+ SkBlitRow::Proc16 fOpaqueProc;
+ SkBlitRow::Proc16 fAlphaProc;
+
+private:
+ // illegal
+ SkRGB16_Shader_Blitter& operator=(const SkRGB16_Shader_Blitter&);
+
+ typedef SkShaderBlitter INHERITED;
+};
+
+class SkRGB16_Shader_Xfermode_Blitter : public SkShaderBlitter {
+public:
+ SkRGB16_Shader_Xfermode_Blitter(const SkPixmap& device, const SkPaint& paint,
+ SkShader::Context* shaderContext);
+ virtual ~SkRGB16_Shader_Xfermode_Blitter();
+ void blitH(int x, int y, int width) override;
+ virtual void blitAntiH(int x, int y, const SkAlpha* antialias,
+ const int16_t* runs) override;
+
+private:
+ SkXfermode* fXfermode;
+ SkPMColor* fBuffer;
+ uint8_t* fAAExpand;
+
+ // illegal
+ SkRGB16_Shader_Xfermode_Blitter& operator=(const SkRGB16_Shader_Xfermode_Blitter&);
+
+ typedef SkShaderBlitter INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+#ifdef USE_BLACK_BLITTER
+SkRGB16_Black_Blitter::SkRGB16_Black_Blitter(const SkPixmap& device, const SkPaint& paint)
+ : INHERITED(device, paint) {
+ SkASSERT(paint.getShader() == nullptr);
+ SkASSERT(paint.getColorFilter() == nullptr);
+ SkASSERT(paint.isSrcOver());
+ SkASSERT(paint.getColor() == SK_ColorBLACK);
+}
+
+#if 1
+#define black_8_pixels(mask, dst) \
+ do { \
+ if (mask & 0x80) dst[0] = 0; \
+ if (mask & 0x40) dst[1] = 0; \
+ if (mask & 0x20) dst[2] = 0; \
+ if (mask & 0x10) dst[3] = 0; \
+ if (mask & 0x08) dst[4] = 0; \
+ if (mask & 0x04) dst[5] = 0; \
+ if (mask & 0x02) dst[6] = 0; \
+ if (mask & 0x01) dst[7] = 0; \
+ } while (0)
+#else
+static inline black_8_pixels(U8CPU mask, uint16_t dst[])
+{
+ if (mask & 0x80) dst[0] = 0;
+ if (mask & 0x40) dst[1] = 0;
+ if (mask & 0x20) dst[2] = 0;
+ if (mask & 0x10) dst[3] = 0;
+ if (mask & 0x08) dst[4] = 0;
+ if (mask & 0x04) dst[5] = 0;
+ if (mask & 0x02) dst[6] = 0;
+ if (mask & 0x01) dst[7] = 0;
+}
+#endif
+
+#define SK_BLITBWMASK_NAME SkRGB16_Black_BlitBW
+#define SK_BLITBWMASK_ARGS
+#define SK_BLITBWMASK_BLIT8(mask, dst) black_8_pixels(mask, dst)
+#define SK_BLITBWMASK_GETADDR writable_addr16
+#define SK_BLITBWMASK_DEVTYPE uint16_t
+#include "SkBlitBWMaskTemplate.h"
+
+void SkRGB16_Black_Blitter::blitMask(const SkMask& mask,
+ const SkIRect& clip) {
+ if (mask.fFormat == SkMask::kBW_Format) {
+ SkRGB16_Black_BlitBW(fDevice, mask, clip);
+ } else {
+ uint16_t* SK_RESTRICT device = fDevice.writable_addr16(clip.fLeft, clip.fTop);
+ const uint8_t* SK_RESTRICT alpha = mask.getAddr8(clip.fLeft, clip.fTop);
+ unsigned width = clip.width();
+ unsigned height = clip.height();
+ size_t deviceRB = fDevice.rowBytes() - (width << 1);
+ unsigned maskRB = mask.fRowBytes - width;
+
+ SkASSERT((int)height > 0);
+ SkASSERT((int)width > 0);
+ SkASSERT((int)deviceRB >= 0);
+ SkASSERT((int)maskRB >= 0);
+
+ do {
+ unsigned w = width;
+ do {
+ unsigned aa = *alpha++;
+ *device = SkAlphaMulRGB16(*device, SkAlpha255To256(255 - aa));
+ device += 1;
+ } while (--w != 0);
+ device = (uint16_t*)((char*)device + deviceRB);
+ alpha += maskRB;
+ } while (--height != 0);
+ }
+}
+
+void SkRGB16_Black_Blitter::blitAntiH(int x, int y,
+ const SkAlpha* SK_RESTRICT antialias,
+ const int16_t* SK_RESTRICT runs) {
+ uint16_t* SK_RESTRICT device = fDevice.writable_addr16(x, y);
+
+ for (;;) {
+ int count = runs[0];
+ SkASSERT(count >= 0);
+ if (count <= 0) {
+ return;
+ }
+ runs += count;
+
+ unsigned aa = antialias[0];
+ antialias += count;
+ if (aa) {
+ if (aa == 255) {
+ memset(device, 0, count << 1);
+ } else {
+ aa = SkAlpha255To256(255 - aa);
+ do {
+ *device = SkAlphaMulRGB16(*device, aa);
+ device += 1;
+ } while (--count != 0);
+ continue;
+ }
+ }
+ device += count;
+ }
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+SkRGB16_Opaque_Blitter::SkRGB16_Opaque_Blitter(const SkPixmap& device, const SkPaint& paint)
+ : INHERITED(device, paint) {}
+
+void SkRGB16_Opaque_Blitter::blitH(int x, int y, int width) {
+ SkASSERT(width > 0);
+ SkASSERT(x + width <= fDevice.width());
+ uint16_t* SK_RESTRICT device = fDevice.writable_addr16(x, y);
+ uint16_t srcColor = fColor16;
+
+ SkASSERT(fRawColor16 == srcColor);
+ if (fDoDither) {
+ uint16_t ditherColor = fRawDither16;
+ if ((x ^ y) & 1) {
+ SkTSwap(ditherColor, srcColor);
+ }
+ sk_dither_memset16(device, srcColor, ditherColor, width);
+ } else {
+ sk_memset16(device, srcColor, width);
+ }
+}
+
+// return 1 or 0 from a bool
+static inline int Bool2Int(int value) {
+ return !!value;
+}
+
+void SkRGB16_Opaque_Blitter::blitAntiH(int x, int y,
+ const SkAlpha* SK_RESTRICT antialias,
+ const int16_t* SK_RESTRICT runs) {
+ uint16_t* SK_RESTRICT device = fDevice.writable_addr16(x, y);
+ uint16_t srcColor = fRawColor16;
+ uint32_t srcExpanded = fExpandedRaw16;
+ int ditherInt = Bool2Int(fDoDither);
+ uint16_t ditherColor = fRawDither16;
+ // if we have no dithering, this will always fail
+ if ((x ^ y) & ditherInt) {
+ SkTSwap(ditherColor, srcColor);
+ }
+ for (;;) {
+ int count = runs[0];
+ SkASSERT(count >= 0);
+ if (count <= 0) {
+ return;
+ }
+ runs += count;
+
+ unsigned aa = antialias[0];
+ antialias += count;
+ if (aa) {
+ if (aa == 255) {
+ if (ditherInt) {
+ sk_dither_memset16(device, srcColor,
+ ditherColor, count);
+ } else {
+ sk_memset16(device, srcColor, count);
+ }
+ } else {
+ // TODO: respect fDoDither
+ unsigned scale5 = SkAlpha255To256(aa) >> 3;
+ uint32_t src32 = srcExpanded * scale5;
+ scale5 = 32 - scale5; // now we can use it on the device
+ int n = count;
+ do {
+ uint32_t dst32 = SkExpand_rgb_16(*device) * scale5;
+ *device++ = SkCompact_rgb_16((src32 + dst32) >> 5);
+ } while (--n != 0);
+ goto DONE;
+ }
+ }
+ device += count;
+
+ DONE:
+ // if we have no dithering, this will always fail
+ if (count & ditherInt) {
+ SkTSwap(ditherColor, srcColor);
+ }
+ }
+}
+
+#define solid_8_pixels(mask, dst, color) \
+ do { \
+ if (mask & 0x80) dst[0] = color; \
+ if (mask & 0x40) dst[1] = color; \
+ if (mask & 0x20) dst[2] = color; \
+ if (mask & 0x10) dst[3] = color; \
+ if (mask & 0x08) dst[4] = color; \
+ if (mask & 0x04) dst[5] = color; \
+ if (mask & 0x02) dst[6] = color; \
+ if (mask & 0x01) dst[7] = color; \
+ } while (0)
+
+#define SK_BLITBWMASK_NAME SkRGB16_BlitBW
+#define SK_BLITBWMASK_ARGS , uint16_t color
+#define SK_BLITBWMASK_BLIT8(mask, dst) solid_8_pixels(mask, dst, color)
+#define SK_BLITBWMASK_GETADDR writable_addr16
+#define SK_BLITBWMASK_DEVTYPE uint16_t
+#include "SkBlitBWMaskTemplate.h"
+
+#if !defined(__mips_dsp)
+static U16CPU blend_compact(uint32_t src32, uint32_t dst32, unsigned scale5) {
+ return SkCompact_rgb_16(dst32 + ((src32 - dst32) * scale5 >> 5));
+}
+#endif
+
+void SkRGB16_Opaque_Blitter::blitMask(const SkMask& mask,
+ const SkIRect& clip) {
+ if (mask.fFormat == SkMask::kBW_Format) {
+ SkRGB16_BlitBW(fDevice, mask, clip, fColor16);
+ return;
+ }
+
+ uint16_t* SK_RESTRICT device = fDevice.writable_addr16(clip.fLeft, clip.fTop);
+ const uint8_t* SK_RESTRICT alpha = mask.getAddr8(clip.fLeft, clip.fTop);
+ int width = clip.width();
+ int height = clip.height();
+ size_t deviceRB = fDevice.rowBytes() - (width << 1);
+ unsigned maskRB = mask.fRowBytes - width;
+ uint32_t expanded32 = fExpandedRaw16;
+
+#if defined(SK_ARM_HAS_NEON) && defined(SK_CPU_LENDIAN)
+#define UNROLL 8
+ do {
+ int w = width;
+ if (w >= UNROLL) {
+ uint32x4_t color, dev_lo, dev_hi;
+ uint32x4_t wn1, wn2, tmp;
+ uint32x4_t vmask_g16, vmask_ng16;
+ uint16x8_t valpha, vdev;
+ uint16x4_t odev_lo, odev_hi, valpha_lo, valpha_hi;
+
+ // prepare constants
+ vmask_g16 = vdupq_n_u32(SK_G16_MASK_IN_PLACE);
+ vmask_ng16 = vdupq_n_u32(~SK_G16_MASK_IN_PLACE);
+ color = vdupq_n_u32(expanded32);
+
+ do {
+ // alpha is 8x8, widen and split to get a pair of 16x4
+ valpha = vaddw_u8(vdupq_n_u16(1), vld1_u8(alpha));
+ valpha = vshrq_n_u16(valpha, 3);
+ valpha_lo = vget_low_u16(valpha);
+ valpha_hi = vget_high_u16(valpha);
+
+ // load pixels
+ vdev = vld1q_u16(device);
+ dev_lo = vmovl_u16(vget_low_u16(vdev));
+ dev_hi = vmovl_u16(vget_high_u16(vdev));
+
+ // unpack them in 32 bits
+ dev_lo = (dev_lo & vmask_ng16) | vshlq_n_u32(dev_lo & vmask_g16, 16);
+ dev_hi = (dev_hi & vmask_ng16) | vshlq_n_u32(dev_hi & vmask_g16, 16);
+
+ // blend with color
+ tmp = (color - dev_lo) * vmovl_u16(valpha_lo);
+ tmp = vshrq_n_u32(tmp, 5);
+ dev_lo += tmp;
+
+ tmp = vmulq_u32(color - dev_hi, vmovl_u16(valpha_hi));
+ tmp = vshrq_n_u32(tmp, 5);
+ dev_hi += tmp;
+
+ // re-compact
+ wn1 = dev_lo & vmask_ng16;
+ wn2 = vshrq_n_u32(dev_lo, 16) & vmask_g16;
+ odev_lo = vmovn_u32(wn1 | wn2);
+
+ wn1 = dev_hi & vmask_ng16;
+ wn2 = vshrq_n_u32(dev_hi, 16) & vmask_g16;
+ odev_hi = vmovn_u32(wn1 | wn2);
+
+ // store
+ vst1q_u16(device, vcombine_u16(odev_lo, odev_hi));
+
+ device += UNROLL;
+ alpha += UNROLL;
+ w -= UNROLL;
+ } while (w >= UNROLL);
+ }
+
+ // residuals
+ while (w > 0) {
+ *device = blend_compact(expanded32, SkExpand_rgb_16(*device),
+ SkAlpha255To256(*alpha++) >> 3);
+ device += 1;
+ --w;
+ }
+ device = (uint16_t*)((char*)device + deviceRB);
+ alpha += maskRB;
+ } while (--height != 0);
+#undef UNROLL
+#elif defined(__mips_dsp)
+ blitmask_d565_opaque_mips(width, height, device, deviceRB, alpha, expanded32, maskRB);
+#else // non-neon code
+ do {
+ int w = width;
+ do {
+ *device = blend_compact(expanded32, SkExpand_rgb_16(*device),
+ SkAlpha255To256(*alpha++) >> 3);
+ device += 1;
+ } while (--w != 0);
+ device = (uint16_t*)((char*)device + deviceRB);
+ alpha += maskRB;
+ } while (--height != 0);
+#endif
+}
+
+void SkRGB16_Opaque_Blitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ uint16_t* SK_RESTRICT device = fDevice.writable_addr16(x, y);
+ size_t deviceRB = fDevice.rowBytes();
+
+ // TODO: respect fDoDither
+ unsigned scale5 = SkAlpha255To256(alpha) >> 3;
+ uint32_t src32 = fExpandedRaw16 * scale5;
+ scale5 = 32 - scale5;
+#if defined(SK_ARM_HAS_NEON) && defined(SK_CPU_LENDIAN)
+ SkRGB16BlitterBlitV_neon(device, height, deviceRB, scale5, src32);
+#else
+ do {
+ uint32_t dst32 = SkExpand_rgb_16(*device) * scale5;
+ *device = SkCompact_rgb_16((src32 + dst32) >> 5);
+ device = (uint16_t*)((char*)device + deviceRB);
+ } while (--height != 0);
+#endif
+}
+
+void SkRGB16_Opaque_Blitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(x + width <= fDevice.width() && y + height <= fDevice.height());
+ uint16_t* SK_RESTRICT device = fDevice.writable_addr16(x, y);
+ size_t deviceRB = fDevice.rowBytes();
+ uint16_t color16 = fColor16;
+
+ if (fDoDither) {
+ uint16_t ditherColor = fRawDither16;
+ if ((x ^ y) & 1) {
+ SkTSwap(ditherColor, color16);
+ }
+ while (--height >= 0) {
+ sk_dither_memset16(device, color16, ditherColor, width);
+ SkTSwap(ditherColor, color16);
+ device = (uint16_t*)((char*)device + deviceRB);
+ }
+ } else { // no dither
+ while (--height >= 0) {
+ sk_memset16(device, color16, width);
+ device = (uint16_t*)((char*)device + deviceRB);
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkRGB16_Blitter::SkRGB16_Blitter(const SkPixmap& device, const SkPaint& paint)
+ : INHERITED(device) {
+ SkColor color = paint.getColor();
+
+ fSrcColor32 = SkPreMultiplyColor(color);
+ fScale = SkAlpha255To256(SkColorGetA(color));
+
+ int r = SkColorGetR(color);
+ int g = SkColorGetG(color);
+ int b = SkColorGetB(color);
+
+ fRawColor16 = fRawDither16 = SkPack888ToRGB16(r, g, b);
+ // if we're dithered, use fRawDither16 to hold that.
+ if ((fDoDither = paint.isDither()) != false) {
+ fRawDither16 = SkDitherPack888ToRGB16(r, g, b);
+ }
+
+ fExpandedRaw16 = SkExpand_rgb_16(fRawColor16);
+
+ fColor16 = SkPackRGB16( SkAlphaMul(r, fScale) >> (8 - SK_R16_BITS),
+ SkAlphaMul(g, fScale) >> (8 - SK_G16_BITS),
+ SkAlphaMul(b, fScale) >> (8 - SK_B16_BITS));
+
+ // compute SkBlitRow::Procs
+ unsigned flags = 0;
+
+ if (SkGetPackedA32(fSrcColor32) < 0xFF) {
+ flags |= SkBlitRow::kSrcPixelAlpha_Flag;
+ }
+
+ if (fDoDither) {
+ flags |= SkBlitRow::kDither_Flag;
+ }
+
+ fColorProc16 = SkBlitRow::ColorFactory16(flags);
+}
+
+const SkPixmap* SkRGB16_Blitter::justAnOpaqueColor(uint32_t* value) {
+ if (!fDoDither && 256 == fScale) {
+ *value = fRawColor16;
+ return &fDevice;
+ }
+ return nullptr;
+}
+
+void SkRGB16_Blitter::blitH(int x, int y, int width) {
+ SkASSERT(width > 0);
+ SkASSERT(x + width <= fDevice.width());
+ uint16_t* SK_RESTRICT device = fDevice.writable_addr16(x, y);
+
+ fColorProc16(device, fSrcColor32, width, x, y);
+}
+
+void SkRGB16_Blitter::blitAntiH(int x, int y,
+ const SkAlpha* SK_RESTRICT antialias,
+ const int16_t* SK_RESTRICT runs) {
+ uint16_t* SK_RESTRICT device = fDevice.writable_addr16(x, y);
+ uint32_t srcExpanded = fExpandedRaw16;
+ unsigned scale = fScale;
+
+ // TODO: respect fDoDither
+ for (;;) {
+ int count = runs[0];
+ SkASSERT(count >= 0);
+ if (count <= 0) {
+ return;
+ }
+ runs += count;
+
+ unsigned aa = antialias[0];
+ antialias += count;
+ if (aa) {
+ unsigned scale5 = SkAlpha255To256(aa) * scale >> (8 + 3);
+ uint32_t src32 = srcExpanded * scale5;
+ scale5 = 32 - scale5;
+ do {
+ uint32_t dst32 = SkExpand_rgb_16(*device) * scale5;
+ *device++ = SkCompact_rgb_16((src32 + dst32) >> 5);
+ } while (--count != 0);
+ continue;
+ }
+ device += count;
+ }
+}
+
+static inline void blend_8_pixels(U8CPU bw, uint16_t dst[], unsigned dst_scale,
+ U16CPU srcColor) {
+ if (bw & 0x80) dst[0] = srcColor + SkAlphaMulRGB16(dst[0], dst_scale);
+ if (bw & 0x40) dst[1] = srcColor + SkAlphaMulRGB16(dst[1], dst_scale);
+ if (bw & 0x20) dst[2] = srcColor + SkAlphaMulRGB16(dst[2], dst_scale);
+ if (bw & 0x10) dst[3] = srcColor + SkAlphaMulRGB16(dst[3], dst_scale);
+ if (bw & 0x08) dst[4] = srcColor + SkAlphaMulRGB16(dst[4], dst_scale);
+ if (bw & 0x04) dst[5] = srcColor + SkAlphaMulRGB16(dst[5], dst_scale);
+ if (bw & 0x02) dst[6] = srcColor + SkAlphaMulRGB16(dst[6], dst_scale);
+ if (bw & 0x01) dst[7] = srcColor + SkAlphaMulRGB16(dst[7], dst_scale);
+}
+
+#define SK_BLITBWMASK_NAME SkRGB16_BlendBW
+#define SK_BLITBWMASK_ARGS , unsigned dst_scale, U16CPU src_color
+#define SK_BLITBWMASK_BLIT8(mask, dst) blend_8_pixels(mask, dst, dst_scale, src_color)
+#define SK_BLITBWMASK_GETADDR writable_addr16
+#define SK_BLITBWMASK_DEVTYPE uint16_t
+#include "SkBlitBWMaskTemplate.h"
+
+void SkRGB16_Blitter::blitMask(const SkMask& mask,
+ const SkIRect& clip) {
+ if (mask.fFormat == SkMask::kBW_Format) {
+ SkRGB16_BlendBW(fDevice, mask, clip, 256 - fScale, fColor16);
+ return;
+ }
+
+ uint16_t* SK_RESTRICT device = fDevice.writable_addr16(clip.fLeft, clip.fTop);
+ const uint8_t* SK_RESTRICT alpha = mask.getAddr8(clip.fLeft, clip.fTop);
+ int width = clip.width();
+ int height = clip.height();
+ size_t deviceRB = fDevice.rowBytes() - (width << 1);
+ unsigned maskRB = mask.fRowBytes - width;
+ uint32_t color32 = fExpandedRaw16;
+
+ unsigned scale256 = fScale;
+ do {
+ int w = width;
+ do {
+ unsigned aa = *alpha++;
+ unsigned scale = SkAlpha255To256(aa) * scale256 >> (8 + 3);
+ uint32_t src32 = color32 * scale;
+ uint32_t dst32 = SkExpand_rgb_16(*device) * (32 - scale);
+ *device++ = SkCompact_rgb_16((src32 + dst32) >> 5);
+ } while (--w != 0);
+ device = (uint16_t*)((char*)device + deviceRB);
+ alpha += maskRB;
+ } while (--height != 0);
+}
+
+void SkRGB16_Blitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ uint16_t* SK_RESTRICT device = fDevice.writable_addr16(x, y);
+ size_t deviceRB = fDevice.rowBytes();
+
+ // TODO: respect fDoDither
+ unsigned scale5 = SkAlpha255To256(alpha) * fScale >> (8 + 3);
+ uint32_t src32 = fExpandedRaw16 * scale5;
+ scale5 = 32 - scale5;
+#if defined(SK_ARM_HAS_NEON) && defined(SK_CPU_LENDIAN)
+ SkRGB16BlitterBlitV_neon(device, height, deviceRB, scale5, src32);
+#else
+ do {
+ uint32_t dst32 = SkExpand_rgb_16(*device) * scale5;
+ *device = SkCompact_rgb_16((src32 + dst32) >> 5);
+ device = (uint16_t*)((char*)device + deviceRB);
+ } while (--height != 0);
+#endif
+}
+
+void SkRGB16_Blitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(x + width <= fDevice.width() && y + height <= fDevice.height());
+ uint16_t* SK_RESTRICT device = fDevice.writable_addr16(x, y);
+ size_t deviceRB = fDevice.rowBytes();
+
+ while (--height >= 0) {
+ fColorProc16(device, fSrcColor32, width, x, y);
+ device = (uint16_t*)((char*)device + deviceRB);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkRGB16_Shader_Blitter::SkRGB16_Shader_Blitter(const SkPixmap& device,
+ const SkPaint& paint,
+ SkShader::Context* shaderContext)
+ : INHERITED(device, paint, shaderContext)
+{
+ SkASSERT(paint.isSrcOver());
+
+ fBuffer = (SkPMColor*)sk_malloc_throw(device.width() * sizeof(SkPMColor));
+
+ // compute SkBlitRow::Procs
+ unsigned flags = 0;
+
+ uint32_t shaderFlags = fShaderFlags;
+ // shaders take care of global alpha, so we never set it in SkBlitRow
+ if (!(shaderFlags & SkShader::kOpaqueAlpha_Flag)) {
+ flags |= SkBlitRow::kSrcPixelAlpha_Flag;
+ }
+ if (paint.isDither()) {
+ flags |= SkBlitRow::kDither_Flag;
+ }
+ // used when we know our global alpha is 0xFF
+ fOpaqueProc = SkBlitRow::Factory16(flags);
+ // used when we know our global alpha is < 0xFF
+ fAlphaProc = SkBlitRow::Factory16(flags | SkBlitRow::kGlobalAlpha_Flag);
+}
+
+SkRGB16_Shader_Blitter::~SkRGB16_Shader_Blitter() {
+ sk_free(fBuffer);
+}
+
+void SkRGB16_Shader_Blitter::blitH(int x, int y, int width) {
+ SkASSERT(x + width <= fDevice.width());
+
+ fShaderContext->shadeSpan(x, y, fBuffer, width);
+ // shaders take care of global alpha, so we pass 0xFF (should be ignored)
+ fOpaqueProc(fDevice.writable_addr16(x, y), fBuffer, width, 0xFF, x, y);
+}
+
+void SkRGB16_Shader_Blitter::blitRect(int x, int y, int width, int height) {
+ SkShader::Context* shaderContext = fShaderContext;
+ SkBlitRow::Proc16 proc = fOpaqueProc;
+ SkPMColor* buffer = fBuffer;
+ uint16_t* dst = fDevice.writable_addr16(x, y);
+ size_t dstRB = fDevice.rowBytes();
+
+ if (fShaderFlags & SkShader::kConstInY32_Flag) {
+ shaderContext->shadeSpan(x, y, buffer, width);
+ do {
+ proc(dst, buffer, width, 0xFF, x, y);
+ y += 1;
+ dst = (uint16_t*)((char*)dst + dstRB);
+ } while (--height);
+ } else {
+ do {
+ shaderContext->shadeSpan(x, y, buffer, width);
+ proc(dst, buffer, width, 0xFF, x, y);
+ y += 1;
+ dst = (uint16_t*)((char*)dst + dstRB);
+ } while (--height);
+ }
+}
+
+static inline int count_nonzero_span(const int16_t runs[], const SkAlpha aa[]) {
+ int count = 0;
+ for (;;) {
+ int n = *runs;
+ if (n == 0 || *aa == 0) {
+ break;
+ }
+ runs += n;
+ aa += n;
+ count += n;
+ }
+ return count;
+}
+
+void SkRGB16_Shader_Blitter::blitAntiH(int x, int y,
+ const SkAlpha* SK_RESTRICT antialias,
+ const int16_t* SK_RESTRICT runs) {
+ SkShader::Context* shaderContext = fShaderContext;
+ SkPMColor* SK_RESTRICT span = fBuffer;
+ uint16_t* SK_RESTRICT device = fDevice.writable_addr16(x, y);
+
+ for (;;) {
+ int count = *runs;
+ if (count <= 0) {
+ break;
+ }
+ int aa = *antialias;
+ if (0 == aa) {
+ device += count;
+ runs += count;
+ antialias += count;
+ x += count;
+ continue;
+ }
+
+ int nonZeroCount = count + count_nonzero_span(runs + count, antialias + count);
+
+ SkASSERT(nonZeroCount <= fDevice.width()); // don't overrun fBuffer
+ shaderContext->shadeSpan(x, y, span, nonZeroCount);
+
+ SkPMColor* localSpan = span;
+ for (;;) {
+ SkBlitRow::Proc16 proc = (aa == 0xFF) ? fOpaqueProc : fAlphaProc;
+ proc(device, localSpan, count, aa, x, y);
+
+ x += count;
+ device += count;
+ runs += count;
+ antialias += count;
+ nonZeroCount -= count;
+ if (nonZeroCount == 0) {
+ break;
+ }
+ localSpan += count;
+ SkASSERT(nonZeroCount > 0);
+ count = *runs;
+ SkASSERT(count > 0);
+ aa = *antialias;
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////
+
+SkRGB16_Shader_Xfermode_Blitter::SkRGB16_Shader_Xfermode_Blitter(
+ const SkPixmap& device, const SkPaint& paint,
+ SkShader::Context* shaderContext)
+ : INHERITED(device, paint, shaderContext)
+{
+ fXfermode = SkXfermode::Peek(paint.getBlendMode());
+ SkASSERT(fXfermode);
+
+ int width = device.width();
+ fBuffer = (SkPMColor*)sk_malloc_throw((width + (SkAlign4(width) >> 2)) * sizeof(SkPMColor));
+ fAAExpand = (uint8_t*)(fBuffer + width);
+}
+
+SkRGB16_Shader_Xfermode_Blitter::~SkRGB16_Shader_Xfermode_Blitter() {
+ sk_free(fBuffer);
+}
+
+void SkRGB16_Shader_Xfermode_Blitter::blitH(int x, int y, int width) {
+ SkASSERT(x + width <= fDevice.width());
+
+ uint16_t* device = fDevice.writable_addr16(x, y);
+ SkPMColor* span = fBuffer;
+
+ fShaderContext->shadeSpan(x, y, span, width);
+ fXfermode->xfer16(device, span, width, nullptr);
+}
+
+void SkRGB16_Shader_Xfermode_Blitter::blitAntiH(int x, int y,
+ const SkAlpha* SK_RESTRICT antialias,
+ const int16_t* SK_RESTRICT runs) {
+ SkShader::Context* shaderContext = fShaderContext;
+ SkXfermode* mode = fXfermode;
+ SkPMColor* SK_RESTRICT span = fBuffer;
+ uint8_t* SK_RESTRICT aaExpand = fAAExpand;
+ uint16_t* SK_RESTRICT device = fDevice.writable_addr16(x, y);
+
+ for (;;) {
+ int count = *runs;
+ if (count <= 0) {
+ break;
+ }
+ int aa = *antialias;
+ if (0 == aa) {
+ device += count;
+ runs += count;
+ antialias += count;
+ x += count;
+ continue;
+ }
+
+ int nonZeroCount = count + count_nonzero_span(runs + count,
+ antialias + count);
+
+ SkASSERT(nonZeroCount <= fDevice.width()); // don't overrun fBuffer
+ shaderContext->shadeSpan(x, y, span, nonZeroCount);
+
+ x += nonZeroCount;
+ SkPMColor* localSpan = span;
+ for (;;) {
+ if (aa == 0xFF) {
+ mode->xfer16(device, localSpan, count, nullptr);
+ } else {
+ SkASSERT(aa);
+ memset(aaExpand, aa, count);
+ mode->xfer16(device, localSpan, count, aaExpand);
+ }
+ device += count;
+ runs += count;
+ antialias += count;
+ nonZeroCount -= count;
+ if (nonZeroCount == 0) {
+ break;
+ }
+ localSpan += count;
+ SkASSERT(nonZeroCount > 0);
+ count = *runs;
+ SkASSERT(count > 0);
+ aa = *antialias;
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkBlitter* SkBlitter_ChooseD565(const SkPixmap& device, const SkPaint& paint,
+ SkShader::Context* shaderContext,
+ SkTBlitterAllocator* allocator) {
+ SkASSERT(allocator != nullptr);
+
+ SkBlitter* blitter;
+ SkShader* shader = paint.getShader();
+ bool is_srcover = paint.isSrcOver();
+
+ // we require a shader if there is an xfermode, handled by our caller
+ SkASSERT(is_srcover || shader);
+
+ if (shader) {
+ SkASSERT(shaderContext != nullptr);
+ if (!is_srcover) {
+ blitter = allocator->createT<SkRGB16_Shader_Xfermode_Blitter>(device, paint,
+ shaderContext);
+ } else {
+ blitter = allocator->createT<SkRGB16_Shader_Blitter>(device, paint, shaderContext);
+ }
+ } else {
+ // no shader, no xfermode, (and we always ignore colorfilter)
+ SkColor color = paint.getColor();
+ if (0 == SkColorGetA(color)) {
+ blitter = allocator->createT<SkNullBlitter>();
+#ifdef USE_BLACK_BLITTER
+ } else if (SK_ColorBLACK == color) {
+ blitter = allocator->createT<SkRGB16_Black_Blitter>(device, paint);
+#endif
+ } else if (0xFF == SkColorGetA(color)) {
+ blitter = allocator->createT<SkRGB16_Opaque_Blitter>(device, paint);
+ } else {
+ blitter = allocator->createT<SkRGB16_Blitter>(device, paint);
+ }
+ }
+
+ return blitter;
+}
diff --git a/gfx/skia/skia/src/core/SkBlitter_Sprite.cpp b/gfx/skia/skia/src/core/SkBlitter_Sprite.cpp
new file mode 100644
index 000000000..cef4cfaa2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitter_Sprite.cpp
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkOpts.h"
+#include "SkSmallAllocator.h"
+#include "SkSpriteBlitter.h"
+
+SkSpriteBlitter::SkSpriteBlitter(const SkPixmap& source)
+ : fSource(source) {}
+
+void SkSpriteBlitter::setup(const SkPixmap& dst, int left, int top, const SkPaint& paint) {
+ fDst = dst;
+ fLeft = left;
+ fTop = top;
+ fPaint = &paint;
+}
+
+void SkSpriteBlitter::blitH(int x, int y, int width) {
+ SkDEBUGFAIL("how did we get here?");
+
+ // Fallback to blitRect.
+ this->blitRect(x, y, width, 1);
+}
+
+void SkSpriteBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) {
+ SkDEBUGFAIL("how did we get here?");
+
+ // No fallback strategy.
+}
+
+void SkSpriteBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ SkDEBUGFAIL("how did we get here?");
+
+ // Fall back to superclass if the code gets here in release mode.
+ INHERITED::blitV(x, y, height, alpha);
+}
+
+void SkSpriteBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ SkDEBUGFAIL("how did we get here?");
+
+ // Fall back to superclass if the code gets here in release mode.
+ INHERITED::blitMask(mask, clip);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Only valid if...
+// 1. src == dst format
+// 2. paint has no modifiers (i.e. alpha, colorfilter, etc.)
+// 3. xfermode needs no blending: e.g. kSrc_Mode or kSrcOver_Mode + opaque src
+//
+class SkSpriteBlitter_Src_SrcOver final : public SkSpriteBlitter {
+public:
+ static bool Supports(const SkPixmap& dst, const SkPixmap& src, const SkPaint& paint) {
+ if (dst.colorType() != src.colorType()) {
+ return false;
+ }
+ if (dst.info().gammaCloseToSRGB() != src.info().gammaCloseToSRGB()) {
+ return false;
+ }
+ if (paint.getMaskFilter() || paint.getColorFilter() || paint.getImageFilter()) {
+ return false;
+ }
+ if (0xFF != paint.getAlpha()) {
+ return false;
+ }
+ SkBlendMode mode = paint.getBlendMode();
+ if (SkBlendMode::kSrc == mode) {
+ return true;
+ }
+ if (SkBlendMode::kSrcOver == mode && src.isOpaque()) {
+ return true;
+ }
+
+ // At this point memcpy can't be used. The following check for using SrcOver.
+
+ if (dst.colorType() != kN32_SkColorType || !dst.info().gammaCloseToSRGB()) {
+ return false;
+ }
+
+ return SkBlendMode::kSrcOver == mode;
+ }
+
+ SkSpriteBlitter_Src_SrcOver(const SkPixmap& src)
+ : INHERITED(src) {}
+
+ void setup(const SkPixmap& dst, int left, int top, const SkPaint& paint) override {
+ SkASSERT(Supports(dst, fSource, paint));
+ this->INHERITED::setup(dst, left, top, paint);
+ SkBlendMode mode = paint.getBlendMode();
+
+ SkASSERT(mode == SkBlendMode::kSrcOver || mode == SkBlendMode::kSrc);
+
+ if (mode == SkBlendMode::kSrcOver && !fSource.isOpaque()) {
+ fUseMemcpy = false;
+ }
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ SkASSERT(fDst.colorType() == fSource.colorType());
+ SkASSERT(fDst.info().gammaCloseToSRGB() == fSource.info().gammaCloseToSRGB());
+ SkASSERT(width > 0 && height > 0);
+
+ if (fUseMemcpy) {
+ char* dst = (char*)fDst.writable_addr(x, y);
+ const char* src = (const char*)fSource.addr(x - fLeft, y - fTop);
+ const size_t dstRB = fDst.rowBytes();
+ const size_t srcRB = fSource.rowBytes();
+ const size_t bytesToCopy = width << fSource.shiftPerPixel();
+
+ while (height --> 0) {
+ memcpy(dst, src, bytesToCopy);
+ dst += dstRB;
+ src += srcRB;
+ }
+ } else {
+ uint32_t* dst = fDst.writable_addr32(x, y);
+ const uint32_t* src = fSource.addr32(x - fLeft, y - fTop);
+ const int dstStride = fDst.rowBytesAsPixels();
+ const int srcStride = fSource.rowBytesAsPixels();
+
+ while (height --> 0) {
+ SkOpts::srcover_srgb_srgb(dst, src, width, width);
+ dst += dstStride;
+ src += srcStride;
+ }
+ }
+ }
+
+private:
+ typedef SkSpriteBlitter INHERITED;
+
+ bool fUseMemcpy {true};
+};
+
+// returning null means the caller will call SkBlitter::Choose() and
+// have wrapped the source bitmap inside a shader
+SkBlitter* SkBlitter::ChooseSprite(const SkPixmap& dst, const SkPaint& paint,
+ const SkPixmap& source, int left, int top, SkTBlitterAllocator* allocator) {
+ /* We currently ignore antialiasing and filtertype, meaning we will take our
+ special blitters regardless of these settings. Ignoring filtertype seems fine
+ since by definition there is no scale in the matrix. Ignoring antialiasing is
+ a bit of a hack, since we "could" pass in the fractional left/top for the bitmap,
+ and respect that by blending the edges of the bitmap against the device. To support
+ this we could either add more special blitters here, or detect antialiasing in the
+ paint and return null if it is set, forcing the client to take the slow shader case
+ (which does respect soft edges).
+ */
+ SkASSERT(allocator != nullptr);
+
+ // Defer to the general code if the pixels are unpremultipled. This case is not common,
+ // and this simplifies the code.
+ if (source.alphaType() == kUnpremul_SkAlphaType) {
+ return nullptr;
+ }
+
+ SkSpriteBlitter* blitter = nullptr;
+
+ if (SkSpriteBlitter_Src_SrcOver::Supports(dst, source, paint)) {
+ blitter = allocator->createT<SkSpriteBlitter_Src_SrcOver>(source);
+ } else {
+ switch (dst.colorType()) {
+ case kRGB_565_SkColorType:
+ blitter = SkSpriteBlitter::ChooseD16(source, paint, allocator);
+ break;
+ case kN32_SkColorType:
+ if (dst.info().gammaCloseToSRGB()) {
+ blitter = SkSpriteBlitter::ChooseS32(source, paint, allocator);
+ } else {
+ blitter = SkSpriteBlitter::ChooseL32(source, paint, allocator);
+ }
+ break;
+ case kRGBA_F16_SkColorType:
+ blitter = SkSpriteBlitter::ChooseF16(source, paint, allocator);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (blitter) {
+ blitter->setup(dst, left, top, paint);
+ }
+ return blitter;
+}
diff --git a/gfx/skia/skia/src/core/SkBlurImageFilter.cpp b/gfx/skia/skia/src/core/SkBlurImageFilter.cpp
new file mode 100644
index 000000000..78fa071ac
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlurImageFilter.cpp
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAutoPixmapStorage.h"
+#include "SkColorPriv.h"
+#include "SkGpuBlurUtils.h"
+#include "SkOpts.h"
+#include "SkReadBuffer.h"
+#include "SkSpecialImage.h"
+#include "SkWriteBuffer.h"
+
+#if SK_SUPPORT_GPU
+#include "GrContext.h"
+#include "SkGr.h"
+#endif
+
+class SkBlurImageFilterImpl : public SkImageFilter {
+public:
+ SkBlurImageFilterImpl(SkScalar sigmaX,
+ SkScalar sigmaY,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect);
+
+ SkRect computeFastBounds(const SkRect&) const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkBlurImageFilterImpl)
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* Create(SkScalar sigmaX, SkScalar sigmaY, SkImageFilter* input = nullptr,
+ const CropRect* cropRect = nullptr) {
+ return SkImageFilter::MakeBlur(sigmaX, sigmaY, sk_ref_sp<SkImageFilter>(input),
+ cropRect).release();
+ }
+#endif
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* source, const Context&,
+ SkIPoint* offset) const override;
+ SkIRect onFilterNodeBounds(const SkIRect& src, const SkMatrix&, MapDirection) const override;
+
+private:
+ SkSize fSigma;
+ typedef SkImageFilter INHERITED;
+
+ friend class SkImageFilter;
+};
+
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkImageFilter)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkBlurImageFilterImpl)
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImageFilter> SkImageFilter::MakeBlur(SkScalar sigmaX, SkScalar sigmaY,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect) {
+ if (0 == sigmaX && 0 == sigmaY && !cropRect) {
+ return input;
+ }
+ return sk_sp<SkImageFilter>(new SkBlurImageFilterImpl(sigmaX, sigmaY, input, cropRect));
+}
+
+// This rather arbitrary-looking value results in a maximum box blur kernel size
+// of 1000 pixels on the raster path, which matches the WebKit and Firefox
+// implementations. Since the GPU path does not compute a box blur, putting
+// the limit on sigma ensures consistent behaviour between the GPU and
+// raster paths.
+#define MAX_SIGMA SkIntToScalar(532)
+
+static SkVector map_sigma(const SkSize& localSigma, const SkMatrix& ctm) {
+ SkVector sigma = SkVector::Make(localSigma.width(), localSigma.height());
+ ctm.mapVectors(&sigma, 1);
+ sigma.fX = SkMinScalar(SkScalarAbs(sigma.fX), MAX_SIGMA);
+ sigma.fY = SkMinScalar(SkScalarAbs(sigma.fY), MAX_SIGMA);
+ return sigma;
+}
+
+SkBlurImageFilterImpl::SkBlurImageFilterImpl(SkScalar sigmaX,
+ SkScalar sigmaY,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fSigma(SkSize::Make(sigmaX, sigmaY)) {
+}
+
+sk_sp<SkFlattenable> SkBlurImageFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkScalar sigmaX = buffer.readScalar();
+ SkScalar sigmaY = buffer.readScalar();
+ return SkImageFilter::MakeBlur(sigmaX, sigmaY, common.getInput(0), &common.cropRect());
+}
+
+void SkBlurImageFilterImpl::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeScalar(fSigma.fWidth);
+ buffer.writeScalar(fSigma.fHeight);
+}
+
+static void get_box3_params(SkScalar s, int *kernelSize, int* kernelSize3, int *lowOffset,
+ int *highOffset) {
+ float pi = SkScalarToFloat(SK_ScalarPI);
+ int d = static_cast<int>(floorf(SkScalarToFloat(s) * 3.0f * sqrtf(2.0f * pi) / 4.0f + 0.5f));
+ *kernelSize = d;
+ if (d % 2 == 1) {
+ *lowOffset = *highOffset = (d - 1) / 2;
+ *kernelSize3 = d;
+ } else {
+ *highOffset = d / 2;
+ *lowOffset = *highOffset - 1;
+ *kernelSize3 = d + 1;
+ }
+}
+
+sk_sp<SkSpecialImage> SkBlurImageFilterImpl::onFilterImage(SkSpecialImage* source,
+ const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+
+ sk_sp<SkSpecialImage> input(this->filterInput(0, source, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ SkIRect inputBounds = SkIRect::MakeXYWH(inputOffset.fX, inputOffset.fY,
+ input->width(), input->height());
+
+ SkIRect dstBounds;
+ if (!this->applyCropRect(this->mapContext(ctx), inputBounds, &dstBounds)) {
+ return nullptr;
+ }
+ if (!inputBounds.intersect(dstBounds)) {
+ return nullptr;
+ }
+
+ const SkVector sigma = map_sigma(fSigma, ctx.ctm());
+
+#if SK_SUPPORT_GPU
+ if (source->isTextureBacked()) {
+ GrContext* context = source->getContext();
+ sk_sp<GrTexture> inputTexture(input->asTextureRef(context));
+ SkASSERT(inputTexture);
+
+ if (0 == sigma.x() && 0 == sigma.y()) {
+ offset->fX = inputBounds.x();
+ offset->fY = inputBounds.y();
+ return input->makeSubset(inputBounds.makeOffset(-inputOffset.x(),
+ -inputOffset.y()));
+ }
+
+ offset->fX = dstBounds.fLeft;
+ offset->fY = dstBounds.fTop;
+ inputBounds.offset(-inputOffset);
+ dstBounds.offset(-inputOffset);
+ // We intentionally use the source's color space, not the destination's (from ctx). We
+ // always blur in the source's config, so we need a compatible color space. We also want to
+ // avoid doing gamut conversion on every fetch of the texture.
+ sk_sp<GrDrawContext> drawContext(SkGpuBlurUtils::GaussianBlur(
+ context,
+ inputTexture.get(),
+ sk_ref_sp(source->getColorSpace()),
+ dstBounds,
+ &inputBounds,
+ sigma.x(),
+ sigma.y()));
+ if (!drawContext) {
+ return nullptr;
+ }
+
+ // TODO: Get the colorSpace from the drawContext (once it has one)
+ return SkSpecialImage::MakeFromGpu(SkIRect::MakeWH(dstBounds.width(), dstBounds.height()),
+ kNeedNewImageUniqueID_SpecialImage,
+ drawContext->asTexture(),
+ sk_ref_sp(input->getColorSpace()), &source->props());
+ }
+#endif
+
+ int kernelSizeX, kernelSizeX3, lowOffsetX, highOffsetX;
+ int kernelSizeY, kernelSizeY3, lowOffsetY, highOffsetY;
+ get_box3_params(sigma.x(), &kernelSizeX, &kernelSizeX3, &lowOffsetX, &highOffsetX);
+ get_box3_params(sigma.y(), &kernelSizeY, &kernelSizeY3, &lowOffsetY, &highOffsetY);
+
+ if (kernelSizeX < 0 || kernelSizeY < 0) {
+ return nullptr;
+ }
+
+ if (kernelSizeX == 0 && kernelSizeY == 0) {
+ offset->fX = inputBounds.x();
+ offset->fY = inputBounds.y();
+ return input->makeSubset(inputBounds.makeOffset(-inputOffset.x(),
+ -inputOffset.y()));
+ }
+
+ SkBitmap inputBM;
+
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if (inputBM.colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+
+ SkImageInfo info = SkImageInfo::Make(dstBounds.width(), dstBounds.height(),
+ inputBM.colorType(), inputBM.alphaType());
+
+ SkBitmap tmp, dst;
+ if (!tmp.tryAllocPixels(info) || !dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ SkAutoLockPixels inputLock(inputBM), tmpLock(tmp), dstLock(dst);
+
+ offset->fX = dstBounds.fLeft;
+ offset->fY = dstBounds.fTop;
+ SkPMColor* t = tmp.getAddr32(0, 0);
+ SkPMColor* d = dst.getAddr32(0, 0);
+ int w = dstBounds.width(), h = dstBounds.height();
+ const SkPMColor* s = inputBM.getAddr32(inputBounds.x() - inputOffset.x(),
+ inputBounds.y() - inputOffset.y());
+ inputBounds.offset(-dstBounds.x(), -dstBounds.y());
+ dstBounds.offset(-dstBounds.x(), -dstBounds.y());
+ SkIRect inputBoundsT = SkIRect::MakeLTRB(inputBounds.top(), inputBounds.left(),
+ inputBounds.bottom(), inputBounds.right());
+ SkIRect dstBoundsT = SkIRect::MakeWH(dstBounds.height(), dstBounds.width());
+ int sw = int(inputBM.rowBytes() >> 2);
+
+ /**
+ *
+ * In order to make memory accesses cache-friendly, we reorder the passes to
+ * use contiguous memory reads wherever possible.
+ *
+ * For example, the 6 passes of the X-and-Y blur case are rewritten as
+ * follows. Instead of 3 passes in X and 3 passes in Y, we perform
+ * 2 passes in X, 1 pass in X transposed to Y on write, 2 passes in X,
+ * then 1 pass in X transposed to Y on write.
+ *
+ * +----+ +----+ +----+ +---+ +---+ +---+ +----+
+ * + AB + ----> | AB | ----> | AB | -----> | A | ----> | A | ----> | A | -----> | AB |
+ * +----+ blurX +----+ blurX +----+ blurXY | B | blurX | B | blurX | B | blurXY +----+
+ * +---+ +---+ +---+
+ *
+ * In this way, two of the y-blurs become x-blurs applied to transposed
+ * images, and all memory reads are contiguous.
+ */
+ if (kernelSizeX > 0 && kernelSizeY > 0) {
+ SkOpts::box_blur_xx(s, sw, inputBounds, t, kernelSizeX, lowOffsetX, highOffsetX, w, h);
+ SkOpts::box_blur_xx(t, w, dstBounds, d, kernelSizeX, highOffsetX, lowOffsetX, w, h);
+ SkOpts::box_blur_xy(d, w, dstBounds, t, kernelSizeX3, highOffsetX, highOffsetX, w, h);
+ SkOpts::box_blur_xx(t, h, dstBoundsT, d, kernelSizeY, lowOffsetY, highOffsetY, h, w);
+ SkOpts::box_blur_xx(d, h, dstBoundsT, t, kernelSizeY, highOffsetY, lowOffsetY, h, w);
+ SkOpts::box_blur_xy(t, h, dstBoundsT, d, kernelSizeY3, highOffsetY, highOffsetY, h, w);
+ } else if (kernelSizeX > 0) {
+ SkOpts::box_blur_xx(s, sw, inputBounds, d, kernelSizeX, lowOffsetX, highOffsetX, w, h);
+ SkOpts::box_blur_xx(d, w, dstBounds, t, kernelSizeX, highOffsetX, lowOffsetX, w, h);
+ SkOpts::box_blur_xx(t, w, dstBounds, d, kernelSizeX3, highOffsetX, highOffsetX, w, h);
+ } else if (kernelSizeY > 0) {
+ SkOpts::box_blur_yx(s, sw, inputBoundsT, d, kernelSizeY, lowOffsetY, highOffsetY, h, w);
+ SkOpts::box_blur_xx(d, h, dstBoundsT, t, kernelSizeY, highOffsetY, lowOffsetY, h, w);
+ SkOpts::box_blur_xy(t, h, dstBoundsT, d, kernelSizeY3, highOffsetY, highOffsetY, h, w);
+ }
+
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(dstBounds.width(),
+ dstBounds.height()),
+ dst, &source->props());
+}
+
+
+SkRect SkBlurImageFilterImpl::computeFastBounds(const SkRect& src) const {
+ SkRect bounds = this->getInput(0) ? this->getInput(0)->computeFastBounds(src) : src;
+ bounds.outset(SkScalarMul(fSigma.width(), SkIntToScalar(3)),
+ SkScalarMul(fSigma.height(), SkIntToScalar(3)));
+ return bounds;
+}
+
+SkIRect SkBlurImageFilterImpl::onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection) const {
+ SkVector sigma = map_sigma(fSigma, ctm);
+ return src.makeOutset(SkScalarCeilToInt(SkScalarMul(sigma.x(), SkIntToScalar(3))),
+ SkScalarCeilToInt(SkScalarMul(sigma.y(), SkIntToScalar(3))));
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkBlurImageFilterImpl::toString(SkString* str) const {
+ str->appendf("SkBlurImageFilterImpl: (");
+ str->appendf("sigma: (%f, %f) input (", fSigma.fWidth, fSigma.fHeight);
+
+ if (this->getInput(0)) {
+ this->getInput(0)->toString(str);
+ }
+
+ str->append("))");
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkBuffer.cpp b/gfx/skia/skia/src/core/SkBuffer.cpp
new file mode 100644
index 000000000..df8dc6959
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBuffer.cpp
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBuffer.h"
+
+#include <string.h>
+
+////////////////////////////////////////////////////////////////////////////////////////
+
+void SkRBuffer::readNoSizeCheck(void* buffer, size_t size)
+{
+ SkASSERT((fData != 0 && fStop == 0) || fPos + size <= fStop);
+ if (buffer)
+ memcpy(buffer, fPos, size);
+ fPos += size;
+}
+
+const void* SkRBuffer::skip(size_t size)
+{
+ const void* result = fPos;
+ readNoSizeCheck(nullptr, size);
+ return result;
+}
+
+size_t SkRBuffer::skipToAlign4()
+{
+ size_t pos = this->pos();
+ size_t n = SkAlign4(pos) - pos;
+ fPos += n;
+ return n;
+}
+
+bool SkRBufferWithSizeCheck::read(void* buffer, size_t size) {
+ fError = fError || (size > static_cast<size_t>(fStop - fPos));
+ if (!fError && (size > 0)) {
+ readNoSizeCheck(buffer, size);
+ }
+ return !fError;
+}
+
+void* SkWBuffer::skip(size_t size)
+{
+ void* result = fPos;
+ writeNoSizeCheck(nullptr, size);
+ return fData == nullptr ? nullptr : result;
+}
+
+void SkWBuffer::writeNoSizeCheck(const void* buffer, size_t size)
+{
+ SkASSERT(fData == 0 || fStop == 0 || fPos + size <= fStop);
+ if (fData && buffer)
+ memcpy(fPos, buffer, size);
+ fPos += size;
+}
+
+size_t SkWBuffer::padToAlign4()
+{
+ size_t pos = this->pos();
+ size_t n = SkAlign4(pos) - pos;
+
+ if (n && fData)
+ {
+ char* p = fPos;
+ char* stop = p + n;
+ do {
+ *p++ = 0;
+ } while (p < stop);
+ }
+ fPos += n;
+ return n;
+}
+
+#if 0
+#ifdef SK_DEBUG
+ static void AssertBuffer32(const void* buffer)
+ {
+ SkASSERT(buffer);
+ SkASSERT(((size_t)buffer & 3) == 0);
+ }
+#else
+ #define AssertBuffer32(buffer)
+#endif
+
+void* sk_buffer_write_int32(void* buffer, int32_t value)
+{
+ AssertBuffer32(buffer);
+ *(int32_t*)buffer = value;
+ return (char*)buffer + sizeof(int32_t);
+}
+
+void* sk_buffer_write_int32(void* buffer, const int32_t values[], int count)
+{
+ AssertBuffer32(buffer);
+ SkASSERT(count >= 0);
+
+ memcpy((int32_t*)buffer, values, count * sizeof(int32_t));
+ return (char*)buffer + count * sizeof(int32_t);
+}
+
+const void* sk_buffer_read_int32(const void* buffer, int32_t* value)
+{
+ AssertBuffer32(buffer);
+ if (value)
+ *value = *(const int32_t*)buffer;
+ return (const char*)buffer + sizeof(int32_t);
+}
+
+const void* sk_buffer_read_int32(const void* buffer, int32_t values[], int count)
+{
+ AssertBuffer32(buffer);
+ SkASSERT(count >= 0);
+
+ if (values)
+ memcpy(values, (const int32_t*)buffer, count * sizeof(int32_t));
+ return (const char*)buffer + count * sizeof(int32_t);
+}
+
+void* sk_buffer_write_ptr(void* buffer, void* ptr)
+{
+ AssertBuffer32(buffer);
+ *(void**)buffer = ptr;
+ return (char*)buffer + sizeof(void*);
+}
+
+const void* sk_buffer_read_ptr(const void* buffer, void** ptr)
+{
+ AssertBuffer32(buffer);
+ if (ptr)
+ *ptr = *(void**)buffer;
+ return (const char*)buffer + sizeof(void*);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBuffer.h b/gfx/skia/skia/src/core/SkBuffer.h
new file mode 100644
index 000000000..c466fb65e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBuffer.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkBuffer_DEFINED
+#define SkBuffer_DEFINED
+
+#include "SkScalar.h"
+#include "SkTypes.h"
+
+/** \class SkRBuffer
+
+ Light weight class for reading data from a memory block.
+ The RBuffer is given the buffer to read from, with either a specified size
+ or no size (in which case no range checking is performed). It is iillegal
+ to attempt to read a value from an empty RBuffer (data == null).
+*/
+class SkRBuffer : SkNoncopyable {
+public:
+ SkRBuffer() : fData(0), fPos(0), fStop(0) {}
+ /** Initialize RBuffer with a data pointer, but no specified length.
+ This signals the RBuffer to not perform range checks during reading.
+ */
+ SkRBuffer(const void* data) {
+ fData = (const char*)data;
+ fPos = (const char*)data;
+ fStop = 0; // no bounds checking
+ }
+ /** Initialize RBuffer with a data point and length.
+ */
+ SkRBuffer(const void* data, size_t size) {
+ SkASSERT(data != 0 || size == 0);
+ fData = (const char*)data;
+ fPos = (const char*)data;
+ fStop = (const char*)data + size;
+ }
+
+ virtual ~SkRBuffer() { }
+
+ /** Return the number of bytes that have been read from the beginning
+ of the data pointer.
+ */
+ size_t pos() const { return fPos - fData; }
+ /** Return the total size of the data pointer. Only defined if the length was
+ specified in the constructor or in a call to reset().
+ */
+ size_t size() const { return fStop - fData; }
+ /** Return true if the buffer has read to the end of the data pointer.
+ Only defined if the length was specified in the constructor or in a call
+ to reset(). Always returns true if the length was not specified.
+ */
+ bool eof() const { return fPos >= fStop; }
+
+ /** Read the specified number of bytes from the data pointer. If buffer is not
+ null, copy those bytes into buffer.
+ */
+ virtual bool read(void* buffer, size_t size) {
+ if (size) {
+ this->readNoSizeCheck(buffer, size);
+ }
+ return true;
+ }
+
+ const void* skip(size_t size); // return start of skipped data
+ size_t skipToAlign4();
+
+ bool readPtr(void** ptr) { return read(ptr, sizeof(void*)); }
+ bool readScalar(SkScalar* x) { return read(x, 4); }
+ bool readU32(uint32_t* x) { return read(x, 4); }
+ bool readS32(int32_t* x) { return read(x, 4); }
+ bool readU16(uint16_t* x) { return read(x, 2); }
+ bool readS16(int16_t* x) { return read(x, 2); }
+ bool readU8(uint8_t* x) { return read(x, 1); }
+ bool readBool(bool* x) {
+ uint8_t u8;
+ if (this->readU8(&u8)) {
+ *x = (u8 != 0);
+ return true;
+ }
+ return false;
+ }
+
+protected:
+ void readNoSizeCheck(void* buffer, size_t size);
+
+ const char* fData;
+ const char* fPos;
+ const char* fStop;
+};
+
+/** \class SkRBufferWithSizeCheck
+
+ Same as SkRBuffer, except that a size check is performed before the read operation and an
+ error is set if the read operation is attempting to read past the end of the data.
+*/
+class SkRBufferWithSizeCheck : public SkRBuffer {
+public:
+ SkRBufferWithSizeCheck(const void* data, size_t size) : SkRBuffer(data, size), fError(false) {}
+
+ /** Read the specified number of bytes from the data pointer. If buffer is not
+ null and the number of bytes to read does not overflow this object's data,
+ copy those bytes into buffer.
+ */
+ bool read(void* buffer, size_t size) override;
+
+ /** Returns whether or not a read operation attempted to read past the end of the data.
+ */
+ bool isValid() const { return !fError; }
+private:
+ bool fError;
+};
+
+/** \class SkWBuffer
+
+ Light weight class for writing data to a memory block.
+ The WBuffer is given the buffer to write into, with either a specified size
+ or no size, in which case no range checking is performed. An empty WBuffer
+ is legal, in which case no data is ever written, but the relative pos()
+ is updated.
+*/
+class SkWBuffer : SkNoncopyable {
+public:
+ SkWBuffer() : fData(0), fPos(0), fStop(0) {}
+ SkWBuffer(void* data) { reset(data); }
+ SkWBuffer(void* data, size_t size) { reset(data, size); }
+
+ void reset(void* data) {
+ fData = (char*)data;
+ fPos = (char*)data;
+ fStop = 0; // no bounds checking
+ }
+
+ void reset(void* data, size_t size) {
+ SkASSERT(data != 0 || size == 0);
+ fData = (char*)data;
+ fPos = (char*)data;
+ fStop = (char*)data + size;
+ }
+
+ size_t pos() const { return fPos - fData; }
+ void* skip(size_t size); // return start of skipped data
+
+ void write(const void* buffer, size_t size) {
+ if (size) {
+ this->writeNoSizeCheck(buffer, size);
+ }
+ }
+
+ size_t padToAlign4();
+
+ void writePtr(const void* x) { this->writeNoSizeCheck(&x, sizeof(x)); }
+ void writeScalar(SkScalar x) { this->writeNoSizeCheck(&x, 4); }
+ void write32(int32_t x) { this->writeNoSizeCheck(&x, 4); }
+ void write16(int16_t x) { this->writeNoSizeCheck(&x, 2); }
+ void write8(int8_t x) { this->writeNoSizeCheck(&x, 1); }
+ void writeBool(bool x) { this->write8(x); }
+
+private:
+ void writeNoSizeCheck(const void* buffer, size_t size);
+
+ char* fData;
+ char* fPos;
+ char* fStop;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkCachedData.cpp b/gfx/skia/skia/src/core/SkCachedData.cpp
new file mode 100644
index 000000000..1ea232b2c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCachedData.cpp
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCachedData.h"
+#include "SkDiscardableMemory.h"
+
+//#define TRACK_CACHEDDATA_LIFETIME
+
+#ifdef TRACK_CACHEDDATA_LIFETIME
+static int32_t gCachedDataCounter;
+
+static void inc() {
+ int32_t oldCount = sk_atomic_inc(&gCachedDataCounter);
+ SkDebugf("SkCachedData inc %d\n", oldCount + 1);
+}
+
+static void dec() {
+ int32_t oldCount = sk_atomic_dec(&gCachedDataCounter);
+ SkDebugf("SkCachedData dec %d\n", oldCount - 1);
+}
+#else
+static void inc() {}
+static void dec() {}
+#endif
+
+SkCachedData::SkCachedData(void* data, size_t size)
+ : fData(data)
+ , fSize(size)
+ , fRefCnt(1)
+ , fStorageType(kMalloc_StorageType)
+ , fInCache(false)
+ , fIsLocked(true)
+{
+ fStorage.fMalloc = data;
+ inc();
+}
+
+SkCachedData::SkCachedData(size_t size, SkDiscardableMemory* dm)
+ : fData(dm->data())
+ , fSize(size)
+ , fRefCnt(1)
+ , fStorageType(kDiscardableMemory_StorageType)
+ , fInCache(false)
+ , fIsLocked(true)
+{
+ fStorage.fDM = dm;
+ inc();
+}
+
+SkCachedData::~SkCachedData() {
+ switch (fStorageType) {
+ case kMalloc_StorageType:
+ sk_free(fStorage.fMalloc);
+ break;
+ case kDiscardableMemory_StorageType:
+ delete fStorage.fDM;
+ break;
+ }
+ dec();
+}
+
+class SkCachedData::AutoMutexWritable {
+public:
+ AutoMutexWritable(const SkCachedData* cd) : fCD(const_cast<SkCachedData*>(cd)) {
+ fCD->fMutex.acquire();
+ fCD->validate();
+ }
+ ~AutoMutexWritable() {
+ fCD->validate();
+ fCD->fMutex.release();
+ }
+
+ SkCachedData* get() { return fCD; }
+ SkCachedData* operator->() { return fCD; }
+
+private:
+ SkCachedData* fCD;
+};
+
+void SkCachedData::internalRef(bool fromCache) const {
+ AutoMutexWritable(this)->inMutexRef(fromCache);
+}
+
+void SkCachedData::internalUnref(bool fromCache) const {
+ if (AutoMutexWritable(this)->inMutexUnref(fromCache)) {
+ // can't delete inside doInternalUnref, since it is locking a mutex (which we own)
+ delete this;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkCachedData::inMutexRef(bool fromCache) {
+ if ((1 == fRefCnt) && fInCache) {
+ this->inMutexLock();
+ }
+
+ fRefCnt += 1;
+ if (fromCache) {
+ SkASSERT(!fInCache);
+ fInCache = true;
+ }
+}
+
+bool SkCachedData::inMutexUnref(bool fromCache) {
+ switch (--fRefCnt) {
+ case 0:
+ // we're going to be deleted, so we need to be unlocked (for DiscardableMemory)
+ if (fIsLocked) {
+ this->inMutexUnlock();
+ }
+ break;
+ case 1:
+ if (fInCache && !fromCache) {
+ // If we're down to 1 owner, and that owner is the cache, this it is safe
+ // to unlock (and mutate fData) even if the cache is in a different thread,
+ // as the cache is NOT allowed to inspect or use fData.
+ this->inMutexUnlock();
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (fromCache) {
+ SkASSERT(fInCache);
+ fInCache = false;
+ }
+
+ // return true when we need to be deleted
+ return 0 == fRefCnt;
+}
+
+void SkCachedData::inMutexLock() {
+ fMutex.assertHeld();
+
+ SkASSERT(!fIsLocked);
+ fIsLocked = true;
+
+ switch (fStorageType) {
+ case kMalloc_StorageType:
+ this->setData(fStorage.fMalloc);
+ break;
+ case kDiscardableMemory_StorageType:
+ if (fStorage.fDM->lock()) {
+ void* ptr = fStorage.fDM->data();
+ SkASSERT(ptr);
+ this->setData(ptr);
+ } else {
+ this->setData(nullptr); // signal failure to lock, contents are gone
+ }
+ break;
+ }
+}
+
+void SkCachedData::inMutexUnlock() {
+ fMutex.assertHeld();
+
+ SkASSERT(fIsLocked);
+ fIsLocked = false;
+
+ switch (fStorageType) {
+ case kMalloc_StorageType:
+ // nothing to do/check
+ break;
+ case kDiscardableMemory_StorageType:
+ if (fData) { // did the previous lock succeed?
+ fStorage.fDM->unlock();
+ }
+ break;
+ }
+ this->setData(nullptr); // signal that we're in an unlocked state
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+void SkCachedData::validate() const {
+ if (fIsLocked) {
+ SkASSERT((fInCache && fRefCnt > 1) || !fInCache);
+ switch (fStorageType) {
+ case kMalloc_StorageType:
+ SkASSERT(fData == fStorage.fMalloc);
+ break;
+ case kDiscardableMemory_StorageType:
+ // fData can be null or the actual value, depending if DM's lock succeeded
+ break;
+ }
+ } else {
+ SkASSERT((fInCache && 1 == fRefCnt) || (0 == fRefCnt));
+ SkASSERT(nullptr == fData);
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkCachedData.h b/gfx/skia/skia/src/core/SkCachedData.h
new file mode 100644
index 000000000..739be7218
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCachedData.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCachedData_DEFINED
+#define SkCachedData_DEFINED
+
+#include "SkMutex.h"
+#include "SkTypes.h"
+
+class SkDiscardableMemory;
+
+class SkCachedData : ::SkNoncopyable {
+public:
+ SkCachedData(void* mallocData, size_t size);
+ SkCachedData(size_t size, SkDiscardableMemory*);
+ virtual ~SkCachedData();
+
+ size_t size() const { return fSize; }
+ const void* data() const { return fData; }
+
+ void* writable_data() { return fData; }
+
+ void ref() const { this->internalRef(false); }
+ void unref() const { this->internalUnref(false); }
+
+ int testing_only_getRefCnt() const { return fRefCnt; }
+ bool testing_only_isLocked() const { return fIsLocked; }
+ bool testing_only_isInCache() const { return fInCache; }
+
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const {
+ return kDiscardableMemory_StorageType == fStorageType ? fStorage.fDM : nullptr;
+ }
+
+protected:
+ // called when fData changes. could be nullptr.
+ virtual void onDataChange(void* oldData, void* newData) {}
+
+private:
+ SkMutex fMutex; // could use a pool of these...
+
+ enum StorageType {
+ kDiscardableMemory_StorageType,
+ kMalloc_StorageType
+ };
+
+ union {
+ SkDiscardableMemory* fDM;
+ void* fMalloc;
+ } fStorage;
+ void* fData;
+ size_t fSize;
+ int fRefCnt; // low-bit means we're owned by the cache
+ StorageType fStorageType;
+ bool fInCache;
+ bool fIsLocked;
+
+ void internalRef(bool fromCache) const;
+ void internalUnref(bool fromCache) const;
+
+ void inMutexRef(bool fromCache);
+ bool inMutexUnref(bool fromCache); // returns true if we should delete "this"
+ void inMutexLock();
+ void inMutexUnlock();
+
+ // called whenever our fData might change (lock or unlock)
+ void setData(void* newData) {
+ if (newData != fData) {
+ // notify our subclasses of the change
+ this->onDataChange(fData, newData);
+ fData = newData;
+ }
+ }
+
+ class AutoMutexWritable;
+
+public:
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+ /*
+ * Attaching a data to to a SkResourceCache (only one at a time) enables the data to be
+ * unlocked when the cache is the only owner, thus freeing it to be purged (assuming the
+ * data is backed by a SkDiscardableMemory).
+ *
+ * When attached, it also automatically attempts to "lock" the data when the first client
+ * ref's the data (typically from a find(key, visitor) call).
+ *
+ * Thus the data will always be "locked" when a non-cache has a ref on it (whether or not
+ * the lock succeeded to recover the memory -- check data() to see if it is nullptr).
+ */
+
+ /*
+ * Call when adding this instance to a SkResourceCache::Rec subclass
+ * (typically in the Rec's constructor).
+ */
+ void attachToCacheAndRef() const { this->internalRef(true); }
+
+ /*
+ * Call when removing this instance from a SkResourceCache::Rec subclass
+ * (typically in the Rec's destructor).
+ */
+ void detachFromCacheAndUnref() const { this->internalUnref(true); }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkCanvas.cpp b/gfx/skia/skia/src/core/SkCanvas.cpp
new file mode 100644
index 000000000..505592d0f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCanvas.cpp
@@ -0,0 +1,3406 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapDevice.h"
+#include "SkCanvas.h"
+#include "SkCanvasPriv.h"
+#include "SkClipStack.h"
+#include "SkColorFilter.h"
+#include "SkDraw.h"
+#include "SkDrawable.h"
+#include "SkDrawFilter.h"
+#include "SkDrawLooper.h"
+#include "SkErrorInternals.h"
+#include "SkImage.h"
+#include "SkImage_Base.h"
+#include "SkImageFilter.h"
+#include "SkImageFilterCache.h"
+#include "SkLatticeIter.h"
+#include "SkMatrixUtils.h"
+#include "SkMetaData.h"
+#include "SkNx.h"
+#include "SkPaintPriv.h"
+#include "SkPatchUtils.h"
+#include "SkPicture.h"
+#include "SkRadialShadowMapShader.h"
+#include "SkRasterClip.h"
+#include "SkReadPixelsRec.h"
+#include "SkRRect.h"
+#include "SkShadowPaintFilterCanvas.h"
+#include "SkShadowShader.h"
+#include "SkSmallAllocator.h"
+#include "SkSpecialImage.h"
+#include "SkSurface_Base.h"
+#include "SkTextBlob.h"
+#include "SkTextFormatParams.h"
+#include "SkTLazy.h"
+#include "SkTraceEvent.h"
+#include <new>
+
+#if SK_SUPPORT_GPU
+#include "GrContext.h"
+#include "GrRenderTarget.h"
+#include "SkGrPriv.h"
+
+#endif
+
+#define RETURN_ON_NULL(ptr) do { if (nullptr == (ptr)) return; } while (0)
+
+/*
+ * Return true if the drawing this rect would hit every pixels in the canvas.
+ *
+ * Returns false if
+ * - rect does not contain the canvas' bounds
+ * - paint is not fill
+ * - paint would blur or otherwise change the coverage of the rect
+ */
+bool SkCanvas::wouldOverwriteEntireSurface(const SkRect* rect, const SkPaint* paint,
+ ShaderOverrideOpacity overrideOpacity) const {
+ static_assert((int)SkPaintPriv::kNone_ShaderOverrideOpacity ==
+ (int)kNone_ShaderOverrideOpacity,
+ "need_matching_enums0");
+ static_assert((int)SkPaintPriv::kOpaque_ShaderOverrideOpacity ==
+ (int)kOpaque_ShaderOverrideOpacity,
+ "need_matching_enums1");
+ static_assert((int)SkPaintPriv::kNotOpaque_ShaderOverrideOpacity ==
+ (int)kNotOpaque_ShaderOverrideOpacity,
+ "need_matching_enums2");
+
+ const SkISize size = this->getBaseLayerSize();
+ const SkRect bounds = SkRect::MakeIWH(size.width(), size.height());
+ if (!this->getClipStack()->quickContains(bounds)) {
+ return false;
+ }
+
+ if (rect) {
+ if (!this->getTotalMatrix().isScaleTranslate()) {
+ return false; // conservative
+ }
+
+ SkRect devRect;
+ this->getTotalMatrix().mapRectScaleTranslate(&devRect, *rect);
+ if (!devRect.contains(bounds)) {
+ return false;
+ }
+ }
+
+ if (paint) {
+ SkPaint::Style paintStyle = paint->getStyle();
+ if (!(paintStyle == SkPaint::kFill_Style ||
+ paintStyle == SkPaint::kStrokeAndFill_Style)) {
+ return false;
+ }
+ if (paint->getMaskFilter() || paint->getLooper()
+ || paint->getPathEffect() || paint->getImageFilter()) {
+ return false; // conservative
+ }
+ }
+ return SkPaintPriv::Overwrites(paint, (SkPaintPriv::ShaderOverrideOpacity)overrideOpacity);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static bool gIgnoreSaveLayerBounds;
+void SkCanvas::Internal_Private_SetIgnoreSaveLayerBounds(bool ignore) {
+ gIgnoreSaveLayerBounds = ignore;
+}
+bool SkCanvas::Internal_Private_GetIgnoreSaveLayerBounds() {
+ return gIgnoreSaveLayerBounds;
+}
+
+static bool gTreatSpriteAsBitmap;
+void SkCanvas::Internal_Private_SetTreatSpriteAsBitmap(bool spriteAsBitmap) {
+ gTreatSpriteAsBitmap = spriteAsBitmap;
+}
+bool SkCanvas::Internal_Private_GetTreatSpriteAsBitmap() {
+ return gTreatSpriteAsBitmap;
+}
+
+// experimental for faster tiled drawing...
+//#define SK_TRACE_SAVERESTORE
+
+#ifdef SK_TRACE_SAVERESTORE
+ static int gLayerCounter;
+ static void inc_layer() { ++gLayerCounter; printf("----- inc layer %d\n", gLayerCounter); }
+ static void dec_layer() { --gLayerCounter; printf("----- dec layer %d\n", gLayerCounter); }
+
+ static int gRecCounter;
+ static void inc_rec() { ++gRecCounter; printf("----- inc rec %d\n", gRecCounter); }
+ static void dec_rec() { --gRecCounter; printf("----- dec rec %d\n", gRecCounter); }
+
+ static int gCanvasCounter;
+ static void inc_canvas() { ++gCanvasCounter; printf("----- inc canvas %d\n", gCanvasCounter); }
+ static void dec_canvas() { --gCanvasCounter; printf("----- dec canvas %d\n", gCanvasCounter); }
+#else
+ #define inc_layer()
+ #define dec_layer()
+ #define inc_rec()
+ #define dec_rec()
+ #define inc_canvas()
+ #define dec_canvas()
+#endif
+
+typedef SkTLazy<SkPaint> SkLazyPaint;
+
+void SkCanvas::predrawNotify(bool willOverwritesEntireSurface) {
+ if (fSurfaceBase) {
+ fSurfaceBase->aboutToDraw(willOverwritesEntireSurface
+ ? SkSurface::kDiscard_ContentChangeMode
+ : SkSurface::kRetain_ContentChangeMode);
+ }
+}
+
+void SkCanvas::predrawNotify(const SkRect* rect, const SkPaint* paint,
+ ShaderOverrideOpacity overrideOpacity) {
+ if (fSurfaceBase) {
+ SkSurface::ContentChangeMode mode = SkSurface::kRetain_ContentChangeMode;
+ // Since willOverwriteAllPixels() may not be complete free to call, we only do so if
+ // there is an outstanding snapshot, since w/o that, there will be no copy-on-write
+ // and therefore we don't care which mode we're in.
+ //
+ if (fSurfaceBase->outstandingImageSnapshot()) {
+ if (this->wouldOverwriteEntireSurface(rect, paint, overrideOpacity)) {
+ mode = SkSurface::kDiscard_ContentChangeMode;
+ }
+ }
+ fSurfaceBase->aboutToDraw(mode);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* This is the record we keep for each SkBaseDevice that the user installs.
+ The clip/matrix/proc are fields that reflect the top of the save/restore
+ stack. Whenever the canvas changes, it marks a dirty flag, and then before
+ these are used (assuming we're not on a layer) we rebuild these cache
+ values: they reflect the top of the save stack, but translated and clipped
+ by the device's XY offset and bitmap-bounds.
+*/
+struct DeviceCM {
+ DeviceCM* fNext;
+ SkBaseDevice* fDevice;
+ SkRasterClip fClip;
+ SkPaint* fPaint; // may be null (in the future)
+ const SkMatrix* fMatrix;
+ SkMatrix fMatrixStorage;
+ SkMatrix fStashedMatrix; // original CTM; used by imagefilter in saveLayer
+
+ DeviceCM(SkBaseDevice* device, const SkPaint* paint, SkCanvas* canvas,
+ bool conservativeRasterClip, const SkMatrix& stashed)
+ : fNext(nullptr)
+ , fClip(conservativeRasterClip)
+ , fStashedMatrix(stashed)
+ {
+ SkSafeRef(device);
+ fDevice = device;
+ fPaint = paint ? new SkPaint(*paint) : nullptr;
+ }
+
+ ~DeviceCM() {
+ SkSafeUnref(fDevice);
+ delete fPaint;
+ }
+
+ void reset(const SkIRect& bounds) {
+ SkASSERT(!fPaint);
+ SkASSERT(!fNext);
+ SkASSERT(fDevice);
+ fClip.setRect(bounds);
+ }
+
+ void updateMC(const SkMatrix& totalMatrix, const SkRasterClip& totalClip,
+ SkRasterClip* updateClip) {
+ int x = fDevice->getOrigin().x();
+ int y = fDevice->getOrigin().y();
+ int width = fDevice->width();
+ int height = fDevice->height();
+
+ if ((x | y) == 0) {
+ fMatrix = &totalMatrix;
+ fClip = totalClip;
+ } else {
+ fMatrixStorage = totalMatrix;
+ fMatrixStorage.postTranslate(SkIntToScalar(-x),
+ SkIntToScalar(-y));
+ fMatrix = &fMatrixStorage;
+
+ totalClip.translate(-x, -y, &fClip);
+ }
+
+ fClip.op(SkIRect::MakeWH(width, height), SkRegion::kIntersect_Op);
+
+ // intersect clip, but don't translate it (yet)
+
+ if (updateClip) {
+ updateClip->op(SkIRect::MakeXYWH(x, y, width, height),
+ SkRegion::kDifference_Op);
+ }
+
+#ifdef SK_DEBUG
+ if (!fClip.isEmpty()) {
+ SkIRect deviceR;
+ deviceR.set(0, 0, width, height);
+ SkASSERT(deviceR.contains(fClip.getBounds()));
+ }
+#endif
+ }
+};
+
+/* This is the record we keep for each save/restore level in the stack.
+ Since a level optionally copies the matrix and/or stack, we have pointers
+ for these fields. If the value is copied for this level, the copy is
+ stored in the ...Storage field, and the pointer points to that. If the
+ value is not copied for this level, we ignore ...Storage, and just point
+ at the corresponding value in the previous level in the stack.
+*/
+class SkCanvas::MCRec {
+public:
+ SkDrawFilter* fFilter; // the current filter (or null)
+ DeviceCM* fLayer;
+ /* If there are any layers in the stack, this points to the top-most
+ one that is at or below this level in the stack (so we know what
+ bitmap/device to draw into from this level. This value is NOT
+ reference counted, since the real owner is either our fLayer field,
+ or a previous one in a lower level.)
+ */
+ DeviceCM* fTopLayer;
+ SkRasterClip fRasterClip;
+ SkMatrix fMatrix;
+ int fDeferredSaveCount;
+
+ // This is the current cumulative depth (aggregate of all done translateZ calls)
+ SkScalar fCurDrawDepth;
+
+ MCRec(bool conservativeRasterClip) : fRasterClip(conservativeRasterClip) {
+ fFilter = nullptr;
+ fLayer = nullptr;
+ fTopLayer = nullptr;
+ fMatrix.reset();
+ fDeferredSaveCount = 0;
+ fCurDrawDepth = 0;
+
+ // don't bother initializing fNext
+ inc_rec();
+ }
+ MCRec(const MCRec& prev) : fRasterClip(prev.fRasterClip), fMatrix(prev.fMatrix),
+ fCurDrawDepth(prev.fCurDrawDepth) {
+ fFilter = SkSafeRef(prev.fFilter);
+ fLayer = nullptr;
+ fTopLayer = prev.fTopLayer;
+ fDeferredSaveCount = 0;
+
+ // don't bother initializing fNext
+ inc_rec();
+ }
+ ~MCRec() {
+ SkSafeUnref(fFilter);
+ delete fLayer;
+ dec_rec();
+ }
+
+ void reset(const SkIRect& bounds) {
+ SkASSERT(fLayer);
+ SkASSERT(fDeferredSaveCount == 0);
+
+ fMatrix.reset();
+ fRasterClip.setRect(bounds);
+ fLayer->reset(bounds);
+ }
+};
+
+static SkIRect compute_device_bounds(SkBaseDevice* device) {
+ return SkIRect::MakeXYWH(device->getOrigin().x(), device->getOrigin().y(),
+ device->width(), device->height());
+}
+
+class SkDrawIter : public SkDraw {
+public:
+ SkDrawIter(SkCanvas* canvas) {
+ canvas = canvas->canvasForDrawIter();
+ canvas->updateDeviceCMCache();
+
+ fClipStack = canvas->fClipStack;
+ fCurrLayer = canvas->fMCRec->fTopLayer;
+
+ fMultiDeviceCS = nullptr;
+ if (fCurrLayer->fNext) {
+ fMultiDeviceCS = canvas->fClipStack;
+ fMultiDeviceCS->save();
+ }
+ }
+
+ ~SkDrawIter() {
+ if (fMultiDeviceCS) {
+ fMultiDeviceCS->restore();
+ }
+ }
+
+ bool next() {
+ if (fMultiDeviceCS && fDevice) {
+ // remove the previous device's bounds
+ fMultiDeviceCS->clipDevRect(compute_device_bounds(fDevice), SkCanvas::kDifference_Op);
+ }
+
+ // skip over recs with empty clips
+ while (fCurrLayer && fCurrLayer->fClip.isEmpty()) {
+ fCurrLayer = fCurrLayer->fNext;
+ }
+
+ const DeviceCM* rec = fCurrLayer;
+ if (rec && rec->fDevice) {
+
+ fMatrix = rec->fMatrix;
+ fRC = &rec->fClip;
+ fDevice = rec->fDevice;
+ if (!fDevice->accessPixels(&fDst)) {
+ fDst.reset(fDevice->imageInfo(), nullptr, 0);
+ }
+ fPaint = rec->fPaint;
+ SkDEBUGCODE(this->validate();)
+
+ fCurrLayer = rec->fNext;
+ // fCurrLayer may be nullptr now
+
+ return true;
+ }
+ return false;
+ }
+
+ SkBaseDevice* getDevice() const { return fDevice; }
+ const SkRasterClip& getClip() const { return *fRC; }
+ int getX() const { return fDevice->getOrigin().x(); }
+ int getY() const { return fDevice->getOrigin().y(); }
+ const SkMatrix& getMatrix() const { return *fMatrix; }
+ const SkPaint* getPaint() const { return fPaint; }
+
+private:
+ const DeviceCM* fCurrLayer;
+ const SkPaint* fPaint; // May be null.
+ SkClipStack* fMultiDeviceCS;
+
+ typedef SkDraw INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////////////
+
+static SkPaint* set_if_needed(SkLazyPaint* lazy, const SkPaint& orig) {
+ return lazy->isValid() ? lazy->get() : lazy->set(orig);
+}
+
+/**
+ * If the paint has an imagefilter, but it can be simplified to just a colorfilter, return that
+ * colorfilter, else return nullptr.
+ */
+static sk_sp<SkColorFilter> image_to_color_filter(const SkPaint& paint) {
+ SkImageFilter* imgf = paint.getImageFilter();
+ if (!imgf) {
+ return nullptr;
+ }
+
+ SkColorFilter* imgCFPtr;
+ if (!imgf->asAColorFilter(&imgCFPtr)) {
+ return nullptr;
+ }
+ sk_sp<SkColorFilter> imgCF(imgCFPtr);
+
+ SkColorFilter* paintCF = paint.getColorFilter();
+ if (nullptr == paintCF) {
+ // there is no existing paint colorfilter, so we can just return the imagefilter's
+ return imgCF;
+ }
+
+ // The paint has both a colorfilter(paintCF) and an imagefilter-which-is-a-colorfilter(imgCF)
+ // and we need to combine them into a single colorfilter.
+ return SkColorFilter::MakeComposeFilter(std::move(imgCF), sk_ref_sp(paintCF));
+}
+
+/**
+ * There are many bounds in skia. A circle's bounds is just its center extended by its radius.
+ * However, if we stroke a circle, then the "bounds" of that is larger, since it will now draw
+ * outside of its raw-bounds by 1/2 the stroke width. SkPaint has lots of optional
+ * effects/attributes that can modify the effective bounds of a given primitive -- maskfilters,
+ * patheffects, stroking, etc. This function takes a raw bounds and a paint, and returns the
+ * conservative "effective" bounds based on the settings in the paint... with one exception. This
+ * function does *not* look at the imagefilter, which can also modify the effective bounds. It is
+ * deliberately ignored.
+ */
+static const SkRect& apply_paint_to_bounds_sans_imagefilter(const SkPaint& paint,
+ const SkRect& rawBounds,
+ SkRect* storage) {
+ SkPaint tmpUnfiltered(paint);
+ tmpUnfiltered.setImageFilter(nullptr);
+ if (tmpUnfiltered.canComputeFastBounds()) {
+ return tmpUnfiltered.computeFastBounds(rawBounds, storage);
+ } else {
+ return rawBounds;
+ }
+}
+
+class AutoDrawLooper {
+public:
+ // "rawBounds" is the original bounds of the primitive about to be drawn, unmodified by the
+ // paint. It's used to determine the size of the offscreen layer for filters.
+ // If null, the clip will be used instead.
+ AutoDrawLooper(SkCanvas* canvas, const SkPaint& paint, bool skipLayerForImageFilter = false,
+ const SkRect* rawBounds = nullptr) : fOrigPaint(paint) {
+ fCanvas = canvas;
+#ifdef SK_SUPPORT_LEGACY_DRAWFILTER
+ fFilter = canvas->getDrawFilter();
+#else
+ fFilter = nullptr;
+#endif
+ fPaint = &fOrigPaint;
+ fSaveCount = canvas->getSaveCount();
+ fTempLayerForImageFilter = false;
+ fDone = false;
+
+ auto simplifiedCF = image_to_color_filter(fOrigPaint);
+ if (simplifiedCF) {
+ SkPaint* paint = set_if_needed(&fLazyPaintInit, fOrigPaint);
+ paint->setColorFilter(std::move(simplifiedCF));
+ paint->setImageFilter(nullptr);
+ fPaint = paint;
+ }
+
+ if (!skipLayerForImageFilter && fPaint->getImageFilter()) {
+ /**
+ * We implement ImageFilters for a given draw by creating a layer, then applying the
+ * imagefilter to the pixels of that layer (its backing surface/image), and then
+ * we call restore() to xfer that layer to the main canvas.
+ *
+ * 1. SaveLayer (with a paint containing the current imagefilter and xfermode)
+ * 2. Generate the src pixels:
+ * Remove the imagefilter and the xfermode from the paint that we (AutoDrawLooper)
+ * return (fPaint). We then draw the primitive (using srcover) into a cleared
+ * buffer/surface.
+ * 3. Restore the layer created in #1
+ * The imagefilter is passed the buffer/surface from the layer (now filled with the
+ * src pixels of the primitive). It returns a new "filtered" buffer, which we
+ * draw onto the previous layer using the xfermode from the original paint.
+ */
+ SkPaint tmp;
+ tmp.setImageFilter(fPaint->getImageFilter());
+ tmp.setBlendMode(fPaint->getBlendMode());
+ SkRect storage;
+ if (rawBounds) {
+ // Make rawBounds include all paint outsets except for those due to image filters.
+ rawBounds = &apply_paint_to_bounds_sans_imagefilter(*fPaint, *rawBounds, &storage);
+ }
+ (void)canvas->internalSaveLayer(SkCanvas::SaveLayerRec(rawBounds, &tmp),
+ SkCanvas::kFullLayer_SaveLayerStrategy);
+ fTempLayerForImageFilter = true;
+ // we remove the imagefilter/xfermode inside doNext()
+ }
+
+ if (SkDrawLooper* looper = paint.getLooper()) {
+ void* buffer = fLooperContextAllocator.reserveT<SkDrawLooper::Context>(
+ looper->contextSize());
+ fLooperContext = looper->createContext(canvas, buffer);
+ fIsSimple = false;
+ } else {
+ fLooperContext = nullptr;
+ // can we be marked as simple?
+ fIsSimple = !fFilter && !fTempLayerForImageFilter;
+ }
+ }
+
+ ~AutoDrawLooper() {
+ if (fTempLayerForImageFilter) {
+ fCanvas->internalRestore();
+ }
+ SkASSERT(fCanvas->getSaveCount() == fSaveCount);
+ }
+
+ const SkPaint& paint() const {
+ SkASSERT(fPaint);
+ return *fPaint;
+ }
+
+ bool next(SkDrawFilter::Type drawType) {
+ if (fDone) {
+ return false;
+ } else if (fIsSimple) {
+ fDone = true;
+ return !fPaint->nothingToDraw();
+ } else {
+ return this->doNext(drawType);
+ }
+ }
+
+private:
+ SkLazyPaint fLazyPaintInit; // base paint storage in case we need to modify it
+ SkLazyPaint fLazyPaintPerLooper; // per-draw-looper storage, so the looper can modify it
+ SkCanvas* fCanvas;
+ const SkPaint& fOrigPaint;
+ SkDrawFilter* fFilter;
+ const SkPaint* fPaint;
+ int fSaveCount;
+ bool fTempLayerForImageFilter;
+ bool fDone;
+ bool fIsSimple;
+ SkDrawLooper::Context* fLooperContext;
+ SkSmallAllocator<1, 32> fLooperContextAllocator;
+
+ bool doNext(SkDrawFilter::Type drawType);
+};
+
+bool AutoDrawLooper::doNext(SkDrawFilter::Type drawType) {
+ fPaint = nullptr;
+ SkASSERT(!fIsSimple);
+ SkASSERT(fLooperContext || fFilter || fTempLayerForImageFilter);
+
+ SkPaint* paint = fLazyPaintPerLooper.set(fLazyPaintInit.isValid() ?
+ *fLazyPaintInit.get() : fOrigPaint);
+
+ if (fTempLayerForImageFilter) {
+ paint->setImageFilter(nullptr);
+ paint->setBlendMode(SkBlendMode::kSrcOver);
+ }
+
+ if (fLooperContext && !fLooperContext->next(fCanvas, paint)) {
+ fDone = true;
+ return false;
+ }
+ if (fFilter) {
+ if (!fFilter->filter(paint, drawType)) {
+ fDone = true;
+ return false;
+ }
+ if (nullptr == fLooperContext) {
+ // no looper means we only draw once
+ fDone = true;
+ }
+ }
+ fPaint = paint;
+
+ // if we only came in here for the imagefilter, mark us as done
+ if (!fLooperContext && !fFilter) {
+ fDone = true;
+ }
+
+ // call this after any possible paint modifiers
+ if (fPaint->nothingToDraw()) {
+ fPaint = nullptr;
+ return false;
+ }
+ return true;
+}
+
+////////// macros to place around the internal draw calls //////////////////
+
+#define LOOPER_BEGIN_DRAWBITMAP(paint, skipLayerForFilter, bounds) \
+ this->predrawNotify(); \
+ AutoDrawLooper looper(this, paint, skipLayerForFilter, bounds); \
+ while (looper.next(SkDrawFilter::kBitmap_Type)) { \
+ SkDrawIter iter(this);
+
+
+#define LOOPER_BEGIN_DRAWDEVICE(paint, type) \
+ this->predrawNotify(); \
+ AutoDrawLooper looper(this, paint, true); \
+ while (looper.next(type)) { \
+ SkDrawIter iter(this);
+
+#define LOOPER_BEGIN(paint, type, bounds) \
+ this->predrawNotify(); \
+ AutoDrawLooper looper(this, paint, false, bounds); \
+ while (looper.next(type)) { \
+ SkDrawIter iter(this);
+
+#define LOOPER_BEGIN_CHECK_COMPLETE_OVERWRITE(paint, type, bounds, auxOpaque) \
+ this->predrawNotify(bounds, &paint, auxOpaque); \
+ AutoDrawLooper looper(this, paint, false, bounds); \
+ while (looper.next(type)) { \
+ SkDrawIter iter(this);
+
+#define LOOPER_END }
+
+////////////////////////////////////////////////////////////////////////////
+
+static inline SkRect qr_clip_bounds(const SkIRect& bounds) {
+ if (bounds.isEmpty()) {
+ return SkRect::MakeEmpty();
+ }
+
+ // Expand bounds out by 1 in case we are anti-aliasing. We store the
+ // bounds as floats to enable a faster quick reject implementation.
+ SkRect dst;
+ SkNx_cast<float>(Sk4i::Load(&bounds.fLeft) + Sk4i(-1,-1,1,1)).store(&dst.fLeft);
+ return dst;
+}
+
+void SkCanvas::resetForNextPicture(const SkIRect& bounds) {
+ this->restoreToCount(1);
+ fClipStack->reset();
+ fMCRec->reset(bounds);
+
+ // We're peering through a lot of structs here. Only at this scope do we
+ // know that the device is an SkBitmapDevice (really an SkNoPixelsBitmapDevice).
+ static_cast<SkBitmapDevice*>(fMCRec->fLayer->fDevice)->setNewSize(bounds.size());
+ fDeviceClipBounds = qr_clip_bounds(bounds);
+ fIsScaleTranslate = true;
+}
+
+SkBaseDevice* SkCanvas::init(SkBaseDevice* device, InitFlags flags) {
+ if (device && device->forceConservativeRasterClip()) {
+ flags = InitFlags(flags | kConservativeRasterClip_InitFlag);
+ }
+ // Since init() is only called once by our constructors, it is safe to perform this
+ // const-cast.
+ *const_cast<bool*>(&fConservativeRasterClip) = SkToBool(flags & kConservativeRasterClip_InitFlag);
+
+ fAllowSimplifyClip = false;
+ fDeviceCMDirty = true;
+ fSaveCount = 1;
+ fMetaData = nullptr;
+#ifdef SK_EXPERIMENTAL_SHADOWING
+ fLights = nullptr;
+#endif
+
+ fClipStack.reset(new SkClipStack);
+
+ fMCRec = (MCRec*)fMCStack.push_back();
+ new (fMCRec) MCRec(fConservativeRasterClip);
+ fIsScaleTranslate = true;
+
+ SkASSERT(sizeof(DeviceCM) <= sizeof(fDeviceCMStorage));
+ fMCRec->fLayer = (DeviceCM*)fDeviceCMStorage;
+ new (fDeviceCMStorage) DeviceCM(nullptr, nullptr, nullptr, fConservativeRasterClip,
+ fMCRec->fMatrix);
+
+ fMCRec->fTopLayer = fMCRec->fLayer;
+
+ fSurfaceBase = nullptr;
+
+ if (device) {
+ // The root device and the canvas should always have the same pixel geometry
+ SkASSERT(fProps.pixelGeometry() == device->surfaceProps().pixelGeometry());
+ fMCRec->fLayer->fDevice = SkRef(device);
+ fMCRec->fRasterClip.setRect(device->getGlobalBounds());
+ fDeviceClipBounds = qr_clip_bounds(device->getGlobalBounds());
+ }
+
+ return device;
+}
+
+SkCanvas::SkCanvas()
+ : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage))
+ , fProps(SkSurfaceProps::kLegacyFontHost_InitType)
+ , fConservativeRasterClip(false)
+{
+ inc_canvas();
+
+ this->init(nullptr, kDefault_InitFlags);
+}
+
+static SkBitmap make_nopixels(int width, int height) {
+ SkBitmap bitmap;
+ bitmap.setInfo(SkImageInfo::MakeUnknown(width, height));
+ return bitmap;
+}
+
+class SkNoPixelsBitmapDevice : public SkBitmapDevice {
+public:
+ SkNoPixelsBitmapDevice(const SkIRect& bounds, const SkSurfaceProps& surfaceProps)
+ : INHERITED(make_nopixels(bounds.width(), bounds.height()), surfaceProps)
+ {
+ this->setOrigin(bounds.x(), bounds.y());
+ }
+
+private:
+
+ typedef SkBitmapDevice INHERITED;
+};
+
+SkCanvas::SkCanvas(int width, int height, const SkSurfaceProps* props)
+ : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage))
+ , fProps(SkSurfacePropsCopyOrDefault(props))
+ , fConservativeRasterClip(false)
+{
+ inc_canvas();
+
+ this->init(new SkNoPixelsBitmapDevice(SkIRect::MakeWH(width, height), fProps),
+ kDefault_InitFlags)->unref();
+}
+
+SkCanvas::SkCanvas(const SkIRect& bounds, InitFlags flags)
+ : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage))
+ , fProps(SkSurfaceProps::kLegacyFontHost_InitType)
+ , fConservativeRasterClip(false)
+{
+ inc_canvas();
+
+ this->init(new SkNoPixelsBitmapDevice(bounds, fProps), flags)->unref();
+}
+
+SkCanvas::SkCanvas(SkBaseDevice* device)
+ : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage))
+ , fProps(device->surfaceProps())
+ , fConservativeRasterClip(false)
+{
+ inc_canvas();
+
+ this->init(device, kDefault_InitFlags);
+}
+
+SkCanvas::SkCanvas(SkBaseDevice* device, InitFlags flags)
+ : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage))
+ , fProps(device->surfaceProps())
+ , fConservativeRasterClip(false)
+{
+ inc_canvas();
+
+ this->init(device, flags);
+}
+
+SkCanvas::SkCanvas(const SkBitmap& bitmap, const SkSurfaceProps& props)
+ : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage))
+ , fProps(props)
+ , fConservativeRasterClip(false)
+{
+ inc_canvas();
+
+ SkAutoTUnref<SkBaseDevice> device(new SkBitmapDevice(bitmap, fProps));
+ this->init(device, kDefault_InitFlags);
+}
+
+SkCanvas::SkCanvas(const SkBitmap& bitmap)
+ : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage))
+ , fProps(SkSurfaceProps::kLegacyFontHost_InitType)
+ , fConservativeRasterClip(false)
+{
+ inc_canvas();
+
+ SkAutoTUnref<SkBaseDevice> device(new SkBitmapDevice(bitmap, fProps));
+ this->init(device, kDefault_InitFlags);
+}
+
+SkCanvas::~SkCanvas() {
+ // free up the contents of our deque
+ this->restoreToCount(1); // restore everything but the last
+
+ this->internalRestore(); // restore the last, since we're going away
+
+ delete fMetaData;
+
+ dec_canvas();
+}
+
+#ifdef SK_SUPPORT_LEGACY_DRAWFILTER
+SkDrawFilter* SkCanvas::getDrawFilter() const {
+ return fMCRec->fFilter;
+}
+
+SkDrawFilter* SkCanvas::setDrawFilter(SkDrawFilter* filter) {
+ this->checkForDeferredSave();
+ SkRefCnt_SafeAssign(fMCRec->fFilter, filter);
+ return filter;
+}
+#endif
+
+SkMetaData& SkCanvas::getMetaData() {
+ // metadata users are rare, so we lazily allocate it. If that changes we
+ // can decide to just make it a field in the device (rather than a ptr)
+ if (nullptr == fMetaData) {
+ fMetaData = new SkMetaData;
+ }
+ return *fMetaData;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::flush() {
+ this->onFlush();
+}
+
+void SkCanvas::onFlush() {
+ SkBaseDevice* device = this->getDevice();
+ if (device) {
+ device->flush();
+ }
+}
+
+SkISize SkCanvas::getBaseLayerSize() const {
+ SkBaseDevice* d = this->getDevice();
+ return d ? SkISize::Make(d->width(), d->height()) : SkISize::Make(0, 0);
+}
+
+SkIRect SkCanvas::getTopLayerBounds() const {
+ SkBaseDevice* d = this->getTopDevice();
+ if (!d) {
+ return SkIRect::MakeEmpty();
+ }
+ return SkIRect::MakeXYWH(d->getOrigin().x(), d->getOrigin().y(), d->width(), d->height());
+}
+
+SkBaseDevice* SkCanvas::getDevice() const {
+ // return root device
+ MCRec* rec = (MCRec*) fMCStack.front();
+ SkASSERT(rec && rec->fLayer);
+ return rec->fLayer->fDevice;
+}
+
+SkBaseDevice* SkCanvas::getTopDevice(bool updateMatrixClip) const {
+ if (updateMatrixClip) {
+ const_cast<SkCanvas*>(this)->updateDeviceCMCache();
+ }
+ return fMCRec->fTopLayer->fDevice;
+}
+
+bool SkCanvas::readPixels(SkBitmap* bitmap, int x, int y) {
+ if (kUnknown_SkColorType == bitmap->colorType()) {
+ return false;
+ }
+
+ bool weAllocated = false;
+ if (nullptr == bitmap->pixelRef()) {
+ if (!bitmap->tryAllocPixels()) {
+ return false;
+ }
+ weAllocated = true;
+ }
+
+ SkAutoPixmapUnlock unlocker;
+ if (bitmap->requestLock(&unlocker)) {
+ const SkPixmap& pm = unlocker.pixmap();
+ if (this->readPixels(pm.info(), pm.writable_addr(), pm.rowBytes(), x, y)) {
+ return true;
+ }
+ }
+
+ if (weAllocated) {
+ bitmap->setPixelRef(nullptr);
+ }
+ return false;
+}
+
+bool SkCanvas::readPixels(const SkIRect& srcRect, SkBitmap* bitmap) {
+ SkIRect r = srcRect;
+ const SkISize size = this->getBaseLayerSize();
+ if (!r.intersect(0, 0, size.width(), size.height())) {
+ bitmap->reset();
+ return false;
+ }
+
+ if (!bitmap->tryAllocN32Pixels(r.width(), r.height())) {
+ // bitmap will already be reset.
+ return false;
+ }
+ if (!this->readPixels(bitmap->info(), bitmap->getPixels(), bitmap->rowBytes(), r.x(), r.y())) {
+ bitmap->reset();
+ return false;
+ }
+ return true;
+}
+
+bool SkCanvas::readPixels(const SkImageInfo& dstInfo, void* dstP, size_t rowBytes, int x, int y) {
+ SkBaseDevice* device = this->getDevice();
+ if (!device) {
+ return false;
+ }
+ const SkISize size = this->getBaseLayerSize();
+
+ SkReadPixelsRec rec(dstInfo, dstP, rowBytes, x, y);
+ if (!rec.trim(size.width(), size.height())) {
+ return false;
+ }
+
+ // The device can assert that the requested area is always contained in its bounds
+ return device->readPixels(rec.fInfo, rec.fPixels, rec.fRowBytes, rec.fX, rec.fY);
+}
+
+bool SkCanvas::writePixels(const SkBitmap& bitmap, int x, int y) {
+ SkAutoPixmapUnlock unlocker;
+ if (bitmap.requestLock(&unlocker)) {
+ const SkPixmap& pm = unlocker.pixmap();
+ return this->writePixels(pm.info(), pm.addr(), pm.rowBytes(), x, y);
+ }
+ return false;
+}
+
+bool SkCanvas::writePixels(const SkImageInfo& origInfo, const void* pixels, size_t rowBytes,
+ int x, int y) {
+ switch (origInfo.colorType()) {
+ case kUnknown_SkColorType:
+ case kIndex_8_SkColorType:
+ return false;
+ default:
+ break;
+ }
+ if (nullptr == pixels || rowBytes < origInfo.minRowBytes()) {
+ return false;
+ }
+
+ const SkISize size = this->getBaseLayerSize();
+ SkIRect target = SkIRect::MakeXYWH(x, y, origInfo.width(), origInfo.height());
+ if (!target.intersect(0, 0, size.width(), size.height())) {
+ return false;
+ }
+
+ SkBaseDevice* device = this->getDevice();
+ if (!device) {
+ return false;
+ }
+
+ // the intersect may have shrunk info's logical size
+ const SkImageInfo info = origInfo.makeWH(target.width(), target.height());
+
+ // if x or y are negative, then we have to adjust pixels
+ if (x > 0) {
+ x = 0;
+ }
+ if (y > 0) {
+ y = 0;
+ }
+ // here x,y are either 0 or negative
+ pixels = ((const char*)pixels - y * rowBytes - x * info.bytesPerPixel());
+
+ // Tell our owning surface to bump its generation ID
+ const bool completeOverwrite = info.dimensions() == size;
+ this->predrawNotify(completeOverwrite);
+
+ // The device can assert that the requested area is always contained in its bounds
+ return device->writePixels(info, pixels, rowBytes, target.x(), target.y());
+}
+
+SkCanvas* SkCanvas::canvasForDrawIter() {
+ return this;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::updateDeviceCMCache() {
+ if (fDeviceCMDirty) {
+ const SkMatrix& totalMatrix = this->getTotalMatrix();
+ const SkRasterClip& totalClip = fMCRec->fRasterClip;
+ DeviceCM* layer = fMCRec->fTopLayer;
+
+ if (nullptr == layer->fNext) { // only one layer
+ layer->updateMC(totalMatrix, totalClip, nullptr);
+ } else {
+ SkRasterClip clip(totalClip);
+ do {
+ layer->updateMC(totalMatrix, clip, &clip);
+ } while ((layer = layer->fNext) != nullptr);
+ }
+ fDeviceCMDirty = false;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::checkForDeferredSave() {
+ if (fMCRec->fDeferredSaveCount > 0) {
+ this->doSave();
+ }
+}
+
+int SkCanvas::getSaveCount() const {
+#ifdef SK_DEBUG
+ int count = 0;
+ SkDeque::Iter iter(fMCStack, SkDeque::Iter::kFront_IterStart);
+ for (;;) {
+ const MCRec* rec = (const MCRec*)iter.next();
+ if (!rec) {
+ break;
+ }
+ count += 1 + rec->fDeferredSaveCount;
+ }
+ SkASSERT(count == fSaveCount);
+#endif
+ return fSaveCount;
+}
+
+int SkCanvas::save() {
+ fSaveCount += 1;
+ fMCRec->fDeferredSaveCount += 1;
+ return this->getSaveCount() - 1; // return our prev value
+}
+
+void SkCanvas::doSave() {
+ this->willSave();
+
+ SkASSERT(fMCRec->fDeferredSaveCount > 0);
+ fMCRec->fDeferredSaveCount -= 1;
+ this->internalSave();
+}
+
+void SkCanvas::restore() {
+ if (fMCRec->fDeferredSaveCount > 0) {
+ SkASSERT(fSaveCount > 1);
+ fSaveCount -= 1;
+ fMCRec->fDeferredSaveCount -= 1;
+ } else {
+ // check for underflow
+ if (fMCStack.count() > 1) {
+ this->willRestore();
+ SkASSERT(fSaveCount > 1);
+ fSaveCount -= 1;
+ this->internalRestore();
+ this->didRestore();
+ }
+ }
+}
+
+void SkCanvas::restoreToCount(int count) {
+ // sanity check
+ if (count < 1) {
+ count = 1;
+ }
+
+ int n = this->getSaveCount() - count;
+ for (int i = 0; i < n; ++i) {
+ this->restore();
+ }
+}
+
+void SkCanvas::internalSave() {
+ MCRec* newTop = (MCRec*)fMCStack.push_back();
+ new (newTop) MCRec(*fMCRec); // balanced in restore()
+ fMCRec = newTop;
+
+ fClipStack->save();
+}
+
+bool SkCanvas::BoundsAffectsClip(SaveLayerFlags saveLayerFlags) {
+ return !(saveLayerFlags & SkCanvas::kDontClipToLayer_PrivateSaveLayerFlag);
+}
+
+bool SkCanvas::clipRectBounds(const SkRect* bounds, SaveLayerFlags saveLayerFlags,
+ SkIRect* intersection, const SkImageFilter* imageFilter) {
+ SkIRect clipBounds;
+ if (!this->getClipDeviceBounds(&clipBounds)) {
+ return false;
+ }
+
+ const SkMatrix& ctm = fMCRec->fMatrix; // this->getTotalMatrix()
+
+ if (imageFilter) {
+ clipBounds = imageFilter->filterBounds(clipBounds, ctm);
+ if (bounds && !imageFilter->canComputeFastBounds()) {
+ bounds = nullptr;
+ }
+ }
+ SkIRect ir;
+ if (bounds) {
+ SkRect r;
+
+ ctm.mapRect(&r, *bounds);
+ r.roundOut(&ir);
+ // early exit if the layer's bounds are clipped out
+ if (!ir.intersect(clipBounds)) {
+ if (BoundsAffectsClip(saveLayerFlags)) {
+ fMCRec->fRasterClip.setEmpty();
+ fDeviceClipBounds.setEmpty();
+ }
+ return false;
+ }
+ } else { // no user bounds, so just use the clip
+ ir = clipBounds;
+ }
+ SkASSERT(!ir.isEmpty());
+
+ if (BoundsAffectsClip(saveLayerFlags)) {
+ // Simplify the current clips since they will be applied properly during restore()
+ fClipStack->clipDevRect(ir, kReplace_Op);
+ fMCRec->fRasterClip.setRect(ir);
+ fDeviceClipBounds = qr_clip_bounds(ir);
+ }
+
+ if (intersection) {
+ *intersection = ir;
+ }
+ return true;
+}
+
+
+int SkCanvas::saveLayer(const SkRect* bounds, const SkPaint* paint) {
+ return this->saveLayer(SaveLayerRec(bounds, paint, 0));
+}
+
+int SkCanvas::saveLayerPreserveLCDTextRequests(const SkRect* bounds, const SkPaint* paint) {
+ return this->saveLayer(SaveLayerRec(bounds, paint, kPreserveLCDText_SaveLayerFlag));
+}
+
+int SkCanvas::saveLayer(const SaveLayerRec& origRec) {
+ SaveLayerRec rec(origRec);
+ if (gIgnoreSaveLayerBounds) {
+ rec.fBounds = nullptr;
+ }
+ SaveLayerStrategy strategy = this->getSaveLayerStrategy(rec);
+ fSaveCount += 1;
+ this->internalSaveLayer(rec, strategy);
+ return this->getSaveCount() - 1;
+}
+
+void SkCanvas::DrawDeviceWithFilter(SkBaseDevice* src, const SkImageFilter* filter,
+ SkBaseDevice* dst, const SkMatrix& ctm,
+ const SkClipStack* clipStack) {
+ SkDraw draw;
+ SkRasterClip rc;
+ rc.setRect(SkIRect::MakeWH(dst->width(), dst->height()));
+ if (!dst->accessPixels(&draw.fDst)) {
+ draw.fDst.reset(dst->imageInfo(), nullptr, 0);
+ }
+ draw.fMatrix = &SkMatrix::I();
+ draw.fRC = &rc;
+ draw.fClipStack = clipStack;
+ draw.fDevice = dst;
+
+ SkPaint p;
+ p.setImageFilter(filter->makeWithLocalMatrix(ctm));
+
+ int x = src->getOrigin().x() - dst->getOrigin().x();
+ int y = src->getOrigin().y() - dst->getOrigin().y();
+ auto special = src->snapSpecial();
+ if (special) {
+ dst->drawSpecial(draw, special.get(), x, y, p);
+ }
+}
+
+static SkImageInfo make_layer_info(const SkImageInfo& prev, int w, int h, bool isOpaque,
+ const SkPaint* paint) {
+ // need to force L32 for now if we have an image filter. Once filters support other colortypes
+ // e.g. sRGB or F16, we can remove this check
+ // SRGBTODO: Can we remove this check now?
+ const bool hasImageFilter = paint && paint->getImageFilter();
+
+ SkAlphaType alphaType = isOpaque ? kOpaque_SkAlphaType : kPremul_SkAlphaType;
+ if ((prev.bytesPerPixel() < 4) || hasImageFilter) {
+ // force to L32
+ return SkImageInfo::MakeN32(w, h, alphaType);
+ } else {
+ // keep the same characteristics as the prev
+ return SkImageInfo::Make(w, h, prev.colorType(), alphaType, sk_ref_sp(prev.colorSpace()));
+ }
+}
+
+void SkCanvas::internalSaveLayer(const SaveLayerRec& rec, SaveLayerStrategy strategy) {
+ const SkRect* bounds = rec.fBounds;
+ const SkPaint* paint = rec.fPaint;
+ SaveLayerFlags saveLayerFlags = rec.fSaveLayerFlags;
+
+ SkLazyPaint lazyP;
+ SkImageFilter* imageFilter = paint ? paint->getImageFilter() : NULL;
+ SkMatrix stashedMatrix = fMCRec->fMatrix;
+ SkMatrix remainder;
+ SkSize scale;
+ /*
+ * ImageFilters (so far) do not correctly handle matrices (CTM) that contain rotation/skew/etc.
+ * but they do handle scaling. To accommodate this, we do the following:
+ *
+ * 1. Stash off the current CTM
+ * 2. Decompose the CTM into SCALE and REMAINDER
+ * 3. Wack the CTM to be just SCALE, and wrap the imagefilter with a MatrixImageFilter that
+ * contains the REMAINDER
+ * 4. Proceed as usual, allowing the client to draw into the layer (now with a scale-only CTM)
+ * 5. During restore, we process the MatrixImageFilter, which applies REMAINDER to the output
+ * of the original imagefilter, and draw that (via drawSprite)
+ * 6. Unwack the CTM to its original state (i.e. stashedMatrix)
+ *
+ * Perhaps in the future we could augment #5 to apply REMAINDER as part of the draw (no longer
+ * a sprite operation) to avoid the extra buffer/overhead of MatrixImageFilter.
+ */
+ if (imageFilter && !stashedMatrix.isScaleTranslate() && !imageFilter->canHandleComplexCTM() &&
+ stashedMatrix.decomposeScale(&scale, &remainder))
+ {
+ // We will restore the matrix (which we are overwriting here) in restore via fStashedMatrix
+ this->internalSetMatrix(SkMatrix::MakeScale(scale.width(), scale.height()));
+ SkPaint* p = lazyP.set(*paint);
+ p->setImageFilter(SkImageFilter::MakeMatrixFilter(remainder,
+ SkFilterQuality::kLow_SkFilterQuality,
+ sk_ref_sp(imageFilter)));
+ imageFilter = p->getImageFilter();
+ paint = p;
+ }
+
+ // do this before we create the layer. We don't call the public save() since
+ // that would invoke a possibly overridden virtual
+ this->internalSave();
+
+ fDeviceCMDirty = true;
+
+ SkIRect ir;
+ if (!this->clipRectBounds(bounds, saveLayerFlags, &ir, imageFilter)) {
+ return;
+ }
+
+ // FIXME: do willSaveLayer() overriders returning kNoLayer_SaveLayerStrategy really care about
+ // the clipRectBounds() call above?
+ if (kNoLayer_SaveLayerStrategy == strategy) {
+ return;
+ }
+
+ bool isOpaque = SkToBool(saveLayerFlags & kIsOpaque_SaveLayerFlag);
+ SkPixelGeometry geo = fProps.pixelGeometry();
+ if (paint) {
+ // TODO: perhaps add a query to filters so we might preserve opaqueness...
+ if (paint->getImageFilter() || paint->getColorFilter()) {
+ isOpaque = false;
+ geo = kUnknown_SkPixelGeometry;
+ }
+ }
+
+ SkBaseDevice* priorDevice = this->getTopDevice();
+ if (nullptr == priorDevice) {
+ SkDebugf("Unable to find device for layer.");
+ return;
+ }
+
+ SkImageInfo info = make_layer_info(priorDevice->imageInfo(), ir.width(), ir.height(), isOpaque,
+ paint);
+
+ SkAutoTUnref<SkBaseDevice> newDevice;
+ {
+ const bool preserveLCDText = kOpaque_SkAlphaType == info.alphaType() ||
+ (saveLayerFlags & kPreserveLCDText_SaveLayerFlag);
+ const SkBaseDevice::TileUsage usage = SkBaseDevice::kNever_TileUsage;
+ const SkBaseDevice::CreateInfo createInfo = SkBaseDevice::CreateInfo(info, usage, geo,
+ preserveLCDText);
+ newDevice.reset(priorDevice->onCreateDevice(createInfo, paint));
+ if (!newDevice) {
+ SkErrorInternals::SetError(kInternalError_SkError,
+ "Unable to create device for layer.");
+ return;
+ }
+ }
+ newDevice->setOrigin(ir.fLeft, ir.fTop);
+
+ DeviceCM* layer = new DeviceCM(newDevice, paint, this, fConservativeRasterClip, stashedMatrix);
+
+ layer->fNext = fMCRec->fTopLayer;
+ fMCRec->fLayer = layer;
+ fMCRec->fTopLayer = layer; // this field is NOT an owner of layer
+
+ if (rec.fBackdrop) {
+ DrawDeviceWithFilter(priorDevice, rec.fBackdrop, newDevice,
+ fMCRec->fMatrix, this->getClipStack());
+ }
+}
+
+int SkCanvas::saveLayerAlpha(const SkRect* bounds, U8CPU alpha) {
+ if (0xFF == alpha) {
+ return this->saveLayer(bounds, nullptr);
+ } else {
+ SkPaint tmpPaint;
+ tmpPaint.setAlpha(alpha);
+ return this->saveLayer(bounds, &tmpPaint);
+ }
+}
+
+void SkCanvas::internalRestore() {
+ SkASSERT(fMCStack.count() != 0);
+
+ fDeviceCMDirty = true;
+
+ fClipStack->restore();
+
+ // reserve our layer (if any)
+ DeviceCM* layer = fMCRec->fLayer; // may be null
+ // now detach it from fMCRec so we can pop(). Gets freed after its drawn
+ fMCRec->fLayer = nullptr;
+
+ // now do the normal restore()
+ fMCRec->~MCRec(); // balanced in save()
+ fMCStack.pop_back();
+ fMCRec = (MCRec*)fMCStack.back();
+
+ /* Time to draw the layer's offscreen. We can't call the public drawSprite,
+ since if we're being recorded, we don't want to record this (the
+ recorder will have already recorded the restore).
+ */
+ if (layer) {
+ if (layer->fNext) {
+ const SkIPoint& origin = layer->fDevice->getOrigin();
+ this->internalDrawDevice(layer->fDevice, origin.x(), origin.y(), layer->fPaint);
+ // restore what we smashed in internalSaveLayer
+ fMCRec->fMatrix = layer->fStashedMatrix;
+ // reset this, since internalDrawDevice will have set it to true
+ fDeviceCMDirty = true;
+ delete layer;
+ } else {
+ // we're at the root
+ SkASSERT(layer == (void*)fDeviceCMStorage);
+ layer->~DeviceCM();
+ // no need to update fMCRec, 'cause we're killing the canvas
+ }
+ }
+
+ if (fMCRec) {
+ fIsScaleTranslate = fMCRec->fMatrix.isScaleTranslate();
+ fDeviceClipBounds = qr_clip_bounds(fMCRec->fRasterClip.getBounds());
+ }
+}
+
+sk_sp<SkSurface> SkCanvas::makeSurface(const SkImageInfo& info, const SkSurfaceProps* props) {
+ if (nullptr == props) {
+ props = &fProps;
+ }
+ return this->onNewSurface(info, *props);
+}
+
+sk_sp<SkSurface> SkCanvas::onNewSurface(const SkImageInfo& info, const SkSurfaceProps& props) {
+ SkBaseDevice* dev = this->getDevice();
+ return dev ? dev->makeSurface(info, props) : nullptr;
+}
+
+SkImageInfo SkCanvas::imageInfo() const {
+ return this->onImageInfo();
+}
+
+SkImageInfo SkCanvas::onImageInfo() const {
+ SkBaseDevice* dev = this->getDevice();
+ if (dev) {
+ return dev->imageInfo();
+ } else {
+ return SkImageInfo::MakeUnknown(0, 0);
+ }
+}
+
+bool SkCanvas::getProps(SkSurfaceProps* props) const {
+ return this->onGetProps(props);
+}
+
+bool SkCanvas::onGetProps(SkSurfaceProps* props) const {
+ SkBaseDevice* dev = this->getDevice();
+ if (dev) {
+ if (props) {
+ *props = fProps;
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+#ifdef SK_SUPPORT_LEGACY_PEEKPIXELS_PARMS
+const void* SkCanvas::peekPixels(SkImageInfo* info, size_t* rowBytes) {
+ SkPixmap pmap;
+ if (this->peekPixels(&pmap)) {
+ if (info) {
+ *info = pmap.info();
+ }
+ if (rowBytes) {
+ *rowBytes = pmap.rowBytes();
+ }
+ return pmap.addr();
+ }
+ return nullptr;
+}
+#endif
+
+bool SkCanvas::peekPixels(SkPixmap* pmap) {
+ return this->onPeekPixels(pmap);
+}
+
+bool SkCanvas::onPeekPixels(SkPixmap* pmap) {
+ SkBaseDevice* dev = this->getDevice();
+ return dev && dev->peekPixels(pmap);
+}
+
+void* SkCanvas::accessTopLayerPixels(SkImageInfo* info, size_t* rowBytes, SkIPoint* origin) {
+ SkPixmap pmap;
+ if (!this->onAccessTopLayerPixels(&pmap)) {
+ return nullptr;
+ }
+ if (info) {
+ *info = pmap.info();
+ }
+ if (rowBytes) {
+ *rowBytes = pmap.rowBytes();
+ }
+ if (origin) {
+ *origin = this->getTopDevice(false)->getOrigin();
+ }
+ return pmap.writable_addr();
+}
+
+bool SkCanvas::onAccessTopLayerPixels(SkPixmap* pmap) {
+ SkBaseDevice* dev = this->getTopDevice();
+ return dev && dev->accessPixels(pmap);
+}
+
+/////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::internalDrawDevice(SkBaseDevice* srcDev, int x, int y, const SkPaint* paint) {
+ SkPaint tmp;
+ if (nullptr == paint) {
+ paint = &tmp;
+ }
+
+ LOOPER_BEGIN_DRAWDEVICE(*paint, SkDrawFilter::kBitmap_Type)
+
+ while (iter.next()) {
+ SkBaseDevice* dstDev = iter.fDevice;
+ paint = &looper.paint();
+ SkImageFilter* filter = paint->getImageFilter();
+ SkIPoint pos = { x - iter.getX(), y - iter.getY() };
+ if (filter) {
+ dstDev->drawSpecial(iter, srcDev->snapSpecial().get(), pos.x(), pos.y(), *paint);
+ } else {
+ dstDev->drawDevice(iter, srcDev, pos.x(), pos.y(), *paint);
+ }
+ }
+
+ LOOPER_END
+}
+
+/////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::translate(SkScalar dx, SkScalar dy) {
+ if (dx || dy) {
+ this->checkForDeferredSave();
+ fDeviceCMDirty = true;
+ fMCRec->fMatrix.preTranslate(dx,dy);
+
+ // Translate shouldn't affect the is-scale-translateness of the matrix.
+ SkASSERT(fIsScaleTranslate == fMCRec->fMatrix.isScaleTranslate());
+
+ this->didTranslate(dx,dy);
+ }
+}
+
+void SkCanvas::scale(SkScalar sx, SkScalar sy) {
+ SkMatrix m;
+ m.setScale(sx, sy);
+ this->concat(m);
+}
+
+void SkCanvas::rotate(SkScalar degrees) {
+ SkMatrix m;
+ m.setRotate(degrees);
+ this->concat(m);
+}
+
+void SkCanvas::rotate(SkScalar degrees, SkScalar px, SkScalar py) {
+ SkMatrix m;
+ m.setRotate(degrees, px, py);
+ this->concat(m);
+}
+
+void SkCanvas::skew(SkScalar sx, SkScalar sy) {
+ SkMatrix m;
+ m.setSkew(sx, sy);
+ this->concat(m);
+}
+
+void SkCanvas::concat(const SkMatrix& matrix) {
+ if (matrix.isIdentity()) {
+ return;
+ }
+
+ this->checkForDeferredSave();
+ fDeviceCMDirty = true;
+ fMCRec->fMatrix.preConcat(matrix);
+ fIsScaleTranslate = fMCRec->fMatrix.isScaleTranslate();
+ this->didConcat(matrix);
+}
+
+void SkCanvas::internalSetMatrix(const SkMatrix& matrix) {
+ fDeviceCMDirty = true;
+ fMCRec->fMatrix = matrix;
+ fIsScaleTranslate = matrix.isScaleTranslate();
+}
+
+void SkCanvas::setMatrix(const SkMatrix& matrix) {
+ this->checkForDeferredSave();
+ this->internalSetMatrix(matrix);
+ this->didSetMatrix(matrix);
+}
+
+void SkCanvas::resetMatrix() {
+ this->setMatrix(SkMatrix::I());
+}
+
+#ifdef SK_EXPERIMENTAL_SHADOWING
+void SkCanvas::translateZ(SkScalar z) {
+ this->checkForDeferredSave();
+ this->fMCRec->fCurDrawDepth += z;
+ this->didTranslateZ(z);
+}
+
+SkScalar SkCanvas::getZ() const {
+ return this->fMCRec->fCurDrawDepth;
+}
+
+void SkCanvas::setLights(sk_sp<SkLights> lights) {
+ this->fLights = lights;
+}
+
+sk_sp<SkLights> SkCanvas::getLights() const {
+ return this->fLights;
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::clipRect(const SkRect& rect, ClipOp op, bool doAA) {
+ this->checkForDeferredSave();
+ ClipEdgeStyle edgeStyle = doAA ? kSoft_ClipEdgeStyle : kHard_ClipEdgeStyle;
+ this->onClipRect(rect, op, edgeStyle);
+}
+
+void SkCanvas::onClipRect(const SkRect& rect, ClipOp op, ClipEdgeStyle edgeStyle) {
+ const bool isAA = kSoft_ClipEdgeStyle == edgeStyle;
+ AutoValidateClip avc(this);
+ fClipStack->clipRect(rect, fMCRec->fMatrix, op, isAA);
+ fMCRec->fRasterClip.op(rect, fMCRec->fMatrix, this->getTopLayerBounds(), (SkRegion::Op)op,
+ isAA);
+ fDeviceCMDirty = true;
+ fDeviceClipBounds = qr_clip_bounds(fMCRec->fRasterClip.getBounds());
+}
+
+void SkCanvas::clipRRect(const SkRRect& rrect, ClipOp op, bool doAA) {
+ this->checkForDeferredSave();
+ ClipEdgeStyle edgeStyle = doAA ? kSoft_ClipEdgeStyle : kHard_ClipEdgeStyle;
+ if (rrect.isRect()) {
+ this->onClipRect(rrect.getBounds(), op, edgeStyle);
+ } else {
+ this->onClipRRect(rrect, op, edgeStyle);
+ }
+}
+
+void SkCanvas::onClipRRect(const SkRRect& rrect, ClipOp op, ClipEdgeStyle edgeStyle) {
+ AutoValidateClip avc(this);
+
+ fDeviceCMDirty = true;
+
+ bool isAA = kSoft_ClipEdgeStyle == edgeStyle;
+ fClipStack->clipRRect(rrect, fMCRec->fMatrix, op, isAA);
+ fMCRec->fRasterClip.op(rrect, fMCRec->fMatrix, this->getTopLayerBounds(), (SkRegion::Op)op,
+ isAA);
+ fDeviceClipBounds = qr_clip_bounds(fMCRec->fRasterClip.getBounds());
+ return;
+}
+
+void SkCanvas::clipPath(const SkPath& path, ClipOp op, bool doAA) {
+ this->checkForDeferredSave();
+ ClipEdgeStyle edgeStyle = doAA ? kSoft_ClipEdgeStyle : kHard_ClipEdgeStyle;
+
+ if (!path.isInverseFillType() && fMCRec->fMatrix.rectStaysRect()) {
+ SkRect r;
+ if (path.isRect(&r)) {
+ this->onClipRect(r, op, edgeStyle);
+ return;
+ }
+ SkRRect rrect;
+ if (path.isOval(&r)) {
+ rrect.setOval(r);
+ this->onClipRRect(rrect, op, edgeStyle);
+ return;
+ }
+ if (path.isRRect(&rrect)) {
+ this->onClipRRect(rrect, op, edgeStyle);
+ return;
+ }
+ }
+
+ this->onClipPath(path, op, edgeStyle);
+}
+
+void SkCanvas::onClipPath(const SkPath& path, ClipOp op, ClipEdgeStyle edgeStyle) {
+ AutoValidateClip avc(this);
+
+ fDeviceCMDirty = true;
+ bool isAA = kSoft_ClipEdgeStyle == edgeStyle;
+
+ fClipStack->clipPath(path, fMCRec->fMatrix, op, isAA);
+
+ const SkPath* rasterClipPath = &path;
+ const SkMatrix* matrix = &fMCRec->fMatrix;
+ SkPath tempPath;
+ if (fAllowSimplifyClip) {
+ isAA = getClipStack()->asPath(&tempPath);
+ rasterClipPath = &tempPath;
+ matrix = &SkMatrix::I();
+ op = kReplace_Op;
+ }
+ fMCRec->fRasterClip.op(*rasterClipPath, *matrix, this->getTopLayerBounds(), (SkRegion::Op)op,
+ isAA);
+ fDeviceClipBounds = qr_clip_bounds(fMCRec->fRasterClip.getBounds());
+}
+
+void SkCanvas::clipRegion(const SkRegion& rgn, ClipOp op) {
+ this->checkForDeferredSave();
+ this->onClipRegion(rgn, op);
+}
+
+void SkCanvas::onClipRegion(const SkRegion& rgn, ClipOp op) {
+ AutoValidateClip avc(this);
+
+ fDeviceCMDirty = true;
+
+ // todo: signal fClipStack that we have a region, and therefore (I guess)
+ // we have to ignore it, and use the region directly?
+ fClipStack->clipDevRect(rgn.getBounds(), op);
+
+ fMCRec->fRasterClip.op(rgn, (SkRegion::Op)op);
+ fDeviceClipBounds = qr_clip_bounds(fMCRec->fRasterClip.getBounds());
+}
+
+#ifdef SK_DEBUG
+void SkCanvas::validateClip() const {
+#ifndef SK_DISABLE_SLOW_DEBUG_VALIDATION
+ // construct clipRgn from the clipstack
+ const SkBaseDevice* device = this->getDevice();
+ if (!device) {
+ SkASSERT(this->isClipEmpty());
+ return;
+ }
+
+ SkIRect ir;
+ ir.set(0, 0, device->width(), device->height());
+ SkRasterClip tmpClip(ir, fConservativeRasterClip);
+
+ SkClipStack::B2TIter iter(*fClipStack);
+ const SkClipStack::Element* element;
+ while ((element = iter.next()) != nullptr) {
+ switch (element->getType()) {
+ case SkClipStack::Element::kRect_Type:
+ element->getRect().round(&ir);
+ tmpClip.op(ir, (SkRegion::Op)element->getOp());
+ break;
+ case SkClipStack::Element::kEmpty_Type:
+ tmpClip.setEmpty();
+ break;
+ default: {
+ SkPath path;
+ element->asPath(&path);
+ tmpClip.op(path, SkMatrix::I(), this->getTopLayerBounds(),
+ (SkRegion::Op)element->getOp(), element->isAA());
+ break;
+ }
+ }
+ }
+#endif
+}
+#endif
+
+void SkCanvas::replayClips(ClipVisitor* visitor) const {
+ SkClipStack::B2TIter iter(*fClipStack);
+ const SkClipStack::Element* element;
+
+ while ((element = iter.next()) != nullptr) {
+ element->replay(visitor);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkCanvas::isClipEmpty() const {
+ return fMCRec->fRasterClip.isEmpty();
+}
+
+bool SkCanvas::isClipRect() const {
+ return fMCRec->fRasterClip.isRect();
+}
+
+static inline bool is_nan_or_clipped(const Sk4f& devRect, const Sk4f& devClip) {
+#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ __m128 lLtT = _mm_unpacklo_ps(devRect.fVec, devClip.fVec);
+ __m128 RrBb = _mm_unpackhi_ps(devClip.fVec, devRect.fVec);
+ __m128 mask = _mm_cmplt_ps(lLtT, RrBb);
+ return 0xF != _mm_movemask_ps(mask);
+#elif !defined(SKNX_NO_SIMD) && defined(SK_ARM_HAS_NEON)
+ float32x4_t lLtT = vzipq_f32(devRect.fVec, devClip.fVec).val[0];
+ float32x4_t RrBb = vzipq_f32(devClip.fVec, devRect.fVec).val[1];
+ uint32x4_t mask = vcltq_f32(lLtT, RrBb);
+ return 0xFFFFFFFFFFFFFFFF != (uint64_t) vmovn_u32(mask);
+#else
+ SkRect devRectAsRect;
+ SkRect devClipAsRect;
+ devRect.store(&devRectAsRect.fLeft);
+ devClip.store(&devClipAsRect.fLeft);
+ return !devRectAsRect.isFinite() || !devRectAsRect.intersect(devClipAsRect);
+#endif
+}
+
+// It's important for this function to not be inlined. Otherwise the compiler will share code
+// between the fast path and the slow path, resulting in two slow paths.
+static SK_NEVER_INLINE bool quick_reject_slow_path(const SkRect& src, const SkRect& deviceClip,
+ const SkMatrix& matrix) {
+ SkRect deviceRect;
+ matrix.mapRect(&deviceRect, src);
+ return !deviceRect.isFinite() || !deviceRect.intersect(deviceClip);
+}
+
+bool SkCanvas::quickReject(const SkRect& src) const {
+#ifdef SK_DEBUG
+ // Verify that fDeviceClipBounds are set properly.
+ SkRect tmp = qr_clip_bounds(fMCRec->fRasterClip.getBounds());
+ if (fMCRec->fRasterClip.isEmpty()) {
+ SkASSERT(fDeviceClipBounds.isEmpty());
+ } else {
+ SkASSERT(tmp == fDeviceClipBounds);
+ }
+
+ // Verify that fIsScaleTranslate is set properly.
+ SkASSERT(fIsScaleTranslate == fMCRec->fMatrix.isScaleTranslate());
+#endif
+
+ if (!fIsScaleTranslate) {
+ return quick_reject_slow_path(src, fDeviceClipBounds, fMCRec->fMatrix);
+ }
+
+ // We inline the implementation of mapScaleTranslate() for the fast path.
+ float sx = fMCRec->fMatrix.getScaleX();
+ float sy = fMCRec->fMatrix.getScaleY();
+ float tx = fMCRec->fMatrix.getTranslateX();
+ float ty = fMCRec->fMatrix.getTranslateY();
+ Sk4f scale(sx, sy, sx, sy);
+ Sk4f trans(tx, ty, tx, ty);
+
+ // Apply matrix.
+ Sk4f ltrb = Sk4f::Load(&src.fLeft) * scale + trans;
+
+ // Make sure left < right, top < bottom.
+ Sk4f rblt(ltrb[2], ltrb[3], ltrb[0], ltrb[1]);
+ Sk4f min = Sk4f::Min(ltrb, rblt);
+ Sk4f max = Sk4f::Max(ltrb, rblt);
+ // We can extract either pair [0,1] or [2,3] from min and max and be correct, but on
+ // ARM this sequence generates the fastest (a single instruction).
+ Sk4f devRect = Sk4f(min[2], min[3], max[0], max[1]);
+
+ // Check if the device rect is NaN or outside the clip.
+ return is_nan_or_clipped(devRect, Sk4f::Load(&fDeviceClipBounds.fLeft));
+}
+
+bool SkCanvas::quickReject(const SkPath& path) const {
+ return path.isEmpty() || this->quickReject(path.getBounds());
+}
+
+bool SkCanvas::getClipBounds(SkRect* bounds) const {
+ SkIRect ibounds;
+ if (!this->getClipDeviceBounds(&ibounds)) {
+ return false;
+ }
+
+ SkMatrix inverse;
+ // if we can't invert the CTM, we can't return local clip bounds
+ if (!fMCRec->fMatrix.invert(&inverse)) {
+ if (bounds) {
+ bounds->setEmpty();
+ }
+ return false;
+ }
+
+ if (bounds) {
+ SkRect r;
+ // adjust it outwards in case we are antialiasing
+ const int inset = 1;
+
+ r.iset(ibounds.fLeft - inset, ibounds.fTop - inset,
+ ibounds.fRight + inset, ibounds.fBottom + inset);
+ inverse.mapRect(bounds, r);
+ }
+ return true;
+}
+
+bool SkCanvas::getClipDeviceBounds(SkIRect* bounds) const {
+ const SkRasterClip& clip = fMCRec->fRasterClip;
+ if (clip.isEmpty()) {
+ if (bounds) {
+ bounds->setEmpty();
+ }
+ return false;
+ }
+
+ if (bounds) {
+ *bounds = clip.getBounds();
+ }
+ return true;
+}
+
+const SkMatrix& SkCanvas::getTotalMatrix() const {
+ return fMCRec->fMatrix;
+}
+
+const SkRegion& SkCanvas::internal_private_getTotalClip() const {
+ return fMCRec->fRasterClip.forceGetBW();
+}
+
+GrDrawContext* SkCanvas::internal_private_accessTopLayerDrawContext() {
+ SkBaseDevice* dev = this->getTopDevice();
+ return dev ? dev->accessDrawContext() : nullptr;
+}
+
+GrContext* SkCanvas::getGrContext() {
+ SkBaseDevice* device = this->getTopDevice();
+ return device ? device->context() : nullptr;
+}
+
+void SkCanvas::drawDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawDRRect()");
+ if (outer.isEmpty()) {
+ return;
+ }
+ if (inner.isEmpty()) {
+ this->drawRRect(outer, paint);
+ return;
+ }
+
+ // We don't have this method (yet), but technically this is what we should
+ // be able to assert...
+ // SkASSERT(outer.contains(inner));
+ //
+ // For now at least check for containment of bounds
+ SkASSERT(outer.getBounds().contains(inner.getBounds()));
+
+ this->onDrawDRRect(outer, inner, paint);
+}
+
+// These need to stop being virtual -- clients need to override the onDraw... versions
+
+void SkCanvas::drawPaint(const SkPaint& paint) {
+ this->onDrawPaint(paint);
+}
+
+void SkCanvas::drawRect(const SkRect& r, const SkPaint& paint) {
+ this->onDrawRect(r, paint);
+}
+
+void SkCanvas::drawRegion(const SkRegion& region, const SkPaint& paint) {
+ if (region.isEmpty()) {
+ return;
+ }
+
+ if (region.isRect()) {
+ return this->drawIRect(region.getBounds(), paint);
+ }
+
+ this->onDrawRegion(region, paint);
+}
+
+void SkCanvas::drawOval(const SkRect& r, const SkPaint& paint) {
+ this->onDrawOval(r, paint);
+}
+
+void SkCanvas::drawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ this->onDrawRRect(rrect, paint);
+}
+
+void SkCanvas::drawPoints(PointMode mode, size_t count, const SkPoint pts[], const SkPaint& paint) {
+ this->onDrawPoints(mode, count, pts, paint);
+}
+
+void SkCanvas::drawVertices(VertexMode vmode, int vertexCount, const SkPoint vertices[],
+ const SkPoint texs[], const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount, const SkPaint& paint) {
+ this->onDrawVertices(vmode, vertexCount, vertices, texs, colors, xmode,
+ indices, indexCount, paint);
+}
+
+void SkCanvas::drawPath(const SkPath& path, const SkPaint& paint) {
+ this->onDrawPath(path, paint);
+}
+
+void SkCanvas::drawImage(const SkImage* image, SkScalar x, SkScalar y, const SkPaint* paint) {
+ RETURN_ON_NULL(image);
+ this->onDrawImage(image, x, y, paint);
+}
+
+void SkCanvas::drawImageRect(const SkImage* image, const SkRect& src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ RETURN_ON_NULL(image);
+ if (dst.isEmpty() || src.isEmpty()) {
+ return;
+ }
+ this->onDrawImageRect(image, &src, dst, paint, constraint);
+}
+
+void SkCanvas::drawImageRect(const SkImage* image, const SkIRect& isrc, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ RETURN_ON_NULL(image);
+ this->drawImageRect(image, SkRect::Make(isrc), dst, paint, constraint);
+}
+
+void SkCanvas::drawImageRect(const SkImage* image, const SkRect& dst, const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ RETURN_ON_NULL(image);
+ this->drawImageRect(image, SkRect::MakeIWH(image->width(), image->height()), dst, paint,
+ constraint);
+}
+
+void SkCanvas::drawImageNine(const SkImage* image, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint) {
+ RETURN_ON_NULL(image);
+ if (dst.isEmpty()) {
+ return;
+ }
+ if (SkLatticeIter::Valid(image->width(), image->height(), center)) {
+ this->onDrawImageNine(image, center, dst, paint);
+ } else {
+ this->drawImageRect(image, dst, paint);
+ }
+}
+
+void SkCanvas::drawImageLattice(const SkImage* image, const Lattice& lattice, const SkRect& dst,
+ const SkPaint* paint) {
+ RETURN_ON_NULL(image);
+ if (dst.isEmpty()) {
+ return;
+ }
+
+ SkIRect bounds;
+ Lattice latticePlusBounds = lattice;
+ if (!latticePlusBounds.fBounds) {
+ bounds = SkIRect::MakeWH(image->width(), image->height());
+ latticePlusBounds.fBounds = &bounds;
+ }
+
+ if (SkLatticeIter::Valid(image->width(), image->height(), latticePlusBounds)) {
+ this->onDrawImageLattice(image, latticePlusBounds, dst, paint);
+ } else {
+ this->drawImageRect(image, dst, paint);
+ }
+}
+
+void SkCanvas::drawBitmap(const SkBitmap& bitmap, SkScalar dx, SkScalar dy, const SkPaint* paint) {
+ if (bitmap.drawsNothing()) {
+ return;
+ }
+ this->onDrawBitmap(bitmap, dx, dy, paint);
+}
+
+void SkCanvas::drawBitmapRect(const SkBitmap& bitmap, const SkRect& src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ if (bitmap.drawsNothing() || dst.isEmpty() || src.isEmpty()) {
+ return;
+ }
+ this->onDrawBitmapRect(bitmap, &src, dst, paint, constraint);
+}
+
+void SkCanvas::drawBitmapRect(const SkBitmap& bitmap, const SkIRect& isrc, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ this->drawBitmapRect(bitmap, SkRect::Make(isrc), dst, paint, constraint);
+}
+
+void SkCanvas::drawBitmapRect(const SkBitmap& bitmap, const SkRect& dst, const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ this->drawBitmapRect(bitmap, SkRect::MakeIWH(bitmap.width(), bitmap.height()), dst, paint,
+ constraint);
+}
+
+void SkCanvas::drawBitmapNine(const SkBitmap& bitmap, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint) {
+ if (bitmap.drawsNothing() || dst.isEmpty()) {
+ return;
+ }
+ if (SkLatticeIter::Valid(bitmap.width(), bitmap.height(), center)) {
+ this->onDrawBitmapNine(bitmap, center, dst, paint);
+ } else {
+ this->drawBitmapRect(bitmap, dst, paint);
+ }
+}
+
+void SkCanvas::drawBitmapLattice(const SkBitmap& bitmap, const Lattice& lattice, const SkRect& dst,
+ const SkPaint* paint) {
+ if (bitmap.drawsNothing() || dst.isEmpty()) {
+ return;
+ }
+
+ SkIRect bounds;
+ Lattice latticePlusBounds = lattice;
+ if (!latticePlusBounds.fBounds) {
+ bounds = SkIRect::MakeWH(bitmap.width(), bitmap.height());
+ latticePlusBounds.fBounds = &bounds;
+ }
+
+ if (SkLatticeIter::Valid(bitmap.width(), bitmap.height(), latticePlusBounds)) {
+ this->onDrawBitmapLattice(bitmap, latticePlusBounds, dst, paint);
+ } else {
+ this->drawBitmapRect(bitmap, dst, paint);
+ }
+}
+
+void SkCanvas::drawAtlas(const SkImage* atlas, const SkRSXform xform[], const SkRect tex[],
+ const SkColor colors[], int count, SkXfermode::Mode mode,
+ const SkRect* cull, const SkPaint* paint) {
+ RETURN_ON_NULL(atlas);
+ if (count <= 0) {
+ return;
+ }
+ SkASSERT(atlas);
+ SkASSERT(xform);
+ SkASSERT(tex);
+ this->onDrawAtlas(atlas, xform, tex, colors, count, mode, cull, paint);
+}
+
+void SkCanvas::drawAnnotation(const SkRect& rect, const char key[], SkData* value) {
+ if (key) {
+ this->onDrawAnnotation(rect, key, value);
+ }
+}
+
+void SkCanvas::legacy_drawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ if (src) {
+ this->drawImageRect(image, *src, dst, paint, constraint);
+ } else {
+ this->drawImageRect(image, SkRect::MakeIWH(image->width(), image->height()),
+ dst, paint, constraint);
+ }
+}
+void SkCanvas::legacy_drawBitmapRect(const SkBitmap& bitmap, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ if (src) {
+ this->drawBitmapRect(bitmap, *src, dst, paint, constraint);
+ } else {
+ this->drawBitmapRect(bitmap, SkRect::MakeIWH(bitmap.width(), bitmap.height()),
+ dst, paint, constraint);
+ }
+}
+
+void SkCanvas::temporary_internal_describeTopLayer(SkMatrix* matrix, SkIRect* clip_bounds) {
+ SkIRect layer_bounds = this->getTopLayerBounds();
+ if (matrix) {
+ *matrix = this->getTotalMatrix();
+ matrix->preTranslate(-layer_bounds.left(), -layer_bounds.top());
+ }
+ if (clip_bounds) {
+ this->getClipDeviceBounds(clip_bounds);
+ clip_bounds->offset(-layer_bounds.left(), -layer_bounds.top());
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// These are the virtual drawing methods
+//////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::onDiscard() {
+ if (fSurfaceBase) {
+ fSurfaceBase->aboutToDraw(SkSurface::kDiscard_ContentChangeMode);
+ }
+}
+
+void SkCanvas::onDrawPaint(const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawPaint()");
+ this->internalDrawPaint(paint);
+}
+
+void SkCanvas::internalDrawPaint(const SkPaint& paint) {
+ LOOPER_BEGIN_CHECK_COMPLETE_OVERWRITE(paint, SkDrawFilter::kPaint_Type, nullptr, false)
+
+ while (iter.next()) {
+ iter.fDevice->drawPaint(iter, looper.paint());
+ }
+
+ LOOPER_END
+}
+
+void SkCanvas::onDrawPoints(PointMode mode, size_t count, const SkPoint pts[],
+ const SkPaint& paint) {
+ TRACE_EVENT1("disabled-by-default-skia", "SkCanvas::drawPoints()", "count", static_cast<uint64_t>(count));
+ if ((long)count <= 0) {
+ return;
+ }
+
+ SkRect r, storage;
+ const SkRect* bounds = nullptr;
+ if (paint.canComputeFastBounds()) {
+ // special-case 2 points (common for drawing a single line)
+ if (2 == count) {
+ r.set(pts[0], pts[1]);
+ } else {
+ r.set(pts, SkToInt(count));
+ }
+ if (this->quickReject(paint.computeFastStrokeBounds(r, &storage))) {
+ return;
+ }
+ bounds = &r;
+ }
+
+ SkASSERT(pts != nullptr);
+
+ LOOPER_BEGIN(paint, SkDrawFilter::kPoint_Type, bounds)
+
+ while (iter.next()) {
+ iter.fDevice->drawPoints(iter, mode, count, pts, looper.paint());
+ }
+
+ LOOPER_END
+}
+
+static bool needs_autodrawlooper(SkCanvas* canvas, const SkPaint& paint) {
+ return ((intptr_t)paint.getImageFilter() |
+#ifdef SK_SUPPORT_LEGACY_DRAWFILTER
+ (intptr_t)canvas->getDrawFilter() |
+#endif
+ (intptr_t)paint.getLooper() ) != 0;
+}
+
+void SkCanvas::onDrawRect(const SkRect& r, const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawRect()");
+ SkRect storage;
+ const SkRect* bounds = nullptr;
+ if (paint.canComputeFastBounds()) {
+ // Skia will draw an inverted rect, because it explicitly "sorts" it downstream.
+ // To prevent accidental rejecting at this stage, we have to sort it before we check.
+ SkRect tmp(r);
+ tmp.sort();
+
+ if (this->quickReject(paint.computeFastBounds(tmp, &storage))) {
+ return;
+ }
+ bounds = &r;
+ }
+
+ if (needs_autodrawlooper(this, paint)) {
+ LOOPER_BEGIN_CHECK_COMPLETE_OVERWRITE(paint, SkDrawFilter::kRect_Type, bounds, false)
+
+ while (iter.next()) {
+ iter.fDevice->drawRect(iter, r, looper.paint());
+ }
+
+ LOOPER_END
+ } else {
+ this->predrawNotify(bounds, &paint, false);
+ SkDrawIter iter(this);
+ while (iter.next()) {
+ iter.fDevice->drawRect(iter, r, paint);
+ }
+ }
+}
+
+void SkCanvas::onDrawRegion(const SkRegion& region, const SkPaint& paint) {
+ SkRect storage;
+ SkRect regionRect = SkRect::Make(region.getBounds());
+ const SkRect* bounds = nullptr;
+ if (paint.canComputeFastBounds()) {
+ if (this->quickReject(paint.computeFastBounds(regionRect, &storage))) {
+ return;
+ }
+ bounds = &regionRect;
+ }
+
+ LOOPER_BEGIN(paint, SkDrawFilter::kRect_Type, bounds)
+
+ while (iter.next()) {
+ iter.fDevice->drawRegion(iter, region, looper.paint());
+ }
+
+ LOOPER_END
+}
+
+void SkCanvas::onDrawOval(const SkRect& oval, const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawOval()");
+ SkRect storage;
+ const SkRect* bounds = nullptr;
+ if (paint.canComputeFastBounds()) {
+ if (this->quickReject(paint.computeFastBounds(oval, &storage))) {
+ return;
+ }
+ bounds = &oval;
+ }
+
+ LOOPER_BEGIN(paint, SkDrawFilter::kOval_Type, bounds)
+
+ while (iter.next()) {
+ iter.fDevice->drawOval(iter, oval, looper.paint());
+ }
+
+ LOOPER_END
+}
+
+void SkCanvas::onDrawArc(const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter,
+ const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawArc()");
+ const SkRect* bounds = nullptr;
+ if (paint.canComputeFastBounds()) {
+ SkRect storage;
+ // Note we're using the entire oval as the bounds.
+ if (this->quickReject(paint.computeFastBounds(oval, &storage))) {
+ return;
+ }
+ bounds = &oval;
+ }
+
+ LOOPER_BEGIN(paint, SkDrawFilter::kOval_Type, bounds)
+
+ while (iter.next()) {
+ iter.fDevice->drawArc(iter, oval, startAngle, sweepAngle, useCenter, looper.paint());
+ }
+
+ LOOPER_END
+}
+
+void SkCanvas::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawRRect()");
+ SkRect storage;
+ const SkRect* bounds = nullptr;
+ if (paint.canComputeFastBounds()) {
+ if (this->quickReject(paint.computeFastBounds(rrect.getBounds(), &storage))) {
+ return;
+ }
+ bounds = &rrect.getBounds();
+ }
+
+ if (rrect.isRect()) {
+ // call the non-virtual version
+ this->SkCanvas::drawRect(rrect.getBounds(), paint);
+ return;
+ } else if (rrect.isOval()) {
+ // call the non-virtual version
+ this->SkCanvas::drawOval(rrect.getBounds(), paint);
+ return;
+ }
+
+ LOOPER_BEGIN(paint, SkDrawFilter::kRRect_Type, bounds)
+
+ while (iter.next()) {
+ iter.fDevice->drawRRect(iter, rrect, looper.paint());
+ }
+
+ LOOPER_END
+}
+
+void SkCanvas::onDrawDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkPaint& paint) {
+ SkRect storage;
+ const SkRect* bounds = nullptr;
+ if (paint.canComputeFastBounds()) {
+ if (this->quickReject(paint.computeFastBounds(outer.getBounds(), &storage))) {
+ return;
+ }
+ bounds = &outer.getBounds();
+ }
+
+ LOOPER_BEGIN(paint, SkDrawFilter::kRRect_Type, bounds)
+
+ while (iter.next()) {
+ iter.fDevice->drawDRRect(iter, outer, inner, looper.paint());
+ }
+
+ LOOPER_END
+}
+
+void SkCanvas::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawPath()");
+ if (!path.isFinite()) {
+ return;
+ }
+
+ SkRect storage;
+ const SkRect* bounds = nullptr;
+ if (!path.isInverseFillType() && paint.canComputeFastBounds()) {
+ const SkRect& pathBounds = path.getBounds();
+ if (this->quickReject(paint.computeFastBounds(pathBounds, &storage))) {
+ return;
+ }
+ bounds = &pathBounds;
+ }
+
+ const SkRect& r = path.getBounds();
+ if (r.width() <= 0 && r.height() <= 0) {
+ if (path.isInverseFillType()) {
+ this->internalDrawPaint(paint);
+ return;
+ }
+ }
+
+ LOOPER_BEGIN(paint, SkDrawFilter::kPath_Type, bounds)
+
+ while (iter.next()) {
+ iter.fDevice->drawPath(iter, path, looper.paint());
+ }
+
+ LOOPER_END
+}
+
+bool SkCanvas::canDrawBitmapAsSprite(SkScalar x, SkScalar y, int w, int h, const SkPaint& paint) {
+ if (!paint.getImageFilter()) {
+ return false;
+ }
+
+ const SkMatrix& ctm = this->getTotalMatrix();
+ if (!SkTreatAsSprite(ctm, SkISize::Make(w, h), paint)) {
+ return false;
+ }
+
+ // Currently we can only use the filterSprite code if we are clipped to the bitmap's bounds.
+ // Once we can filter and the filter will return a result larger than itself, we should be
+ // able to remove this constraint.
+ // skbug.com/4526
+ //
+ SkPoint pt;
+ ctm.mapXY(x, y, &pt);
+ SkIRect ir = SkIRect::MakeXYWH(SkScalarRoundToInt(pt.x()), SkScalarRoundToInt(pt.y()), w, h);
+ return ir.contains(fMCRec->fRasterClip.getBounds());
+}
+
+void SkCanvas::onDrawImage(const SkImage* image, SkScalar x, SkScalar y, const SkPaint* paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawImage()");
+ SkRect bounds = SkRect::MakeXYWH(x, y,
+ SkIntToScalar(image->width()), SkIntToScalar(image->height()));
+ if (nullptr == paint || paint->canComputeFastBounds()) {
+ SkRect tmp = bounds;
+ if (paint) {
+ paint->computeFastBounds(tmp, &tmp);
+ }
+ if (this->quickReject(tmp)) {
+ return;
+ }
+ }
+
+ SkLazyPaint lazy;
+ if (nullptr == paint) {
+ paint = lazy.init();
+ }
+
+ sk_sp<SkSpecialImage> special;
+ bool drawAsSprite = this->canDrawBitmapAsSprite(x, y, image->width(), image->height(),
+ *paint);
+ if (drawAsSprite && paint->getImageFilter()) {
+ special = this->getDevice()->makeSpecial(image);
+ if (!special) {
+ drawAsSprite = false;
+ }
+ }
+
+ LOOPER_BEGIN_DRAWBITMAP(*paint, drawAsSprite, &bounds)
+
+ while (iter.next()) {
+ const SkPaint& pnt = looper.paint();
+ if (special) {
+ SkPoint pt;
+ iter.fMatrix->mapXY(x, y, &pt);
+ iter.fDevice->drawSpecial(iter, special.get(),
+ SkScalarRoundToInt(pt.fX),
+ SkScalarRoundToInt(pt.fY), pnt);
+ } else {
+ iter.fDevice->drawImage(iter, image, x, y, pnt);
+ }
+ }
+
+ LOOPER_END
+}
+
+void SkCanvas::onDrawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawImageRect()");
+ if (nullptr == paint || paint->canComputeFastBounds()) {
+ SkRect storage = dst;
+ if (paint) {
+ paint->computeFastBounds(dst, &storage);
+ }
+ if (this->quickReject(storage)) {
+ return;
+ }
+ }
+ SkLazyPaint lazy;
+ if (nullptr == paint) {
+ paint = lazy.init();
+ }
+
+ LOOPER_BEGIN_CHECK_COMPLETE_OVERWRITE(*paint, SkDrawFilter::kBitmap_Type, &dst,
+ image->isOpaque())
+
+ while (iter.next()) {
+ iter.fDevice->drawImageRect(iter, image, src, dst, looper.paint(), constraint);
+ }
+
+ LOOPER_END
+}
+
+void SkCanvas::onDrawBitmap(const SkBitmap& bitmap, SkScalar x, SkScalar y, const SkPaint* paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawBitmap()");
+ SkDEBUGCODE(bitmap.validate();)
+
+ if (bitmap.drawsNothing()) {
+ return;
+ }
+
+ SkLazyPaint lazy;
+ if (nullptr == paint) {
+ paint = lazy.init();
+ }
+
+ const SkMatrix matrix = SkMatrix::MakeTrans(x, y);
+
+ SkRect storage;
+ const SkRect* bounds = nullptr;
+ if (paint->canComputeFastBounds()) {
+ bitmap.getBounds(&storage);
+ matrix.mapRect(&storage);
+ SkRect tmp = storage;
+ if (this->quickReject(paint->computeFastBounds(tmp, &tmp))) {
+ return;
+ }
+ bounds = &storage;
+ }
+
+ sk_sp<SkSpecialImage> special;
+ bool drawAsSprite = bounds && this->canDrawBitmapAsSprite(x, y, bitmap.width(), bitmap.height(),
+ *paint);
+ if (drawAsSprite && paint->getImageFilter()) {
+ special = this->getDevice()->makeSpecial(bitmap);
+ if (!special) {
+ drawAsSprite = false;
+ }
+ }
+
+ LOOPER_BEGIN_DRAWBITMAP(*paint, drawAsSprite, bounds)
+
+ while (iter.next()) {
+ const SkPaint& pnt = looper.paint();
+ if (special) {
+ SkPoint pt;
+ iter.fMatrix->mapXY(x, y, &pt);
+ iter.fDevice->drawSpecial(iter, special.get(),
+ SkScalarRoundToInt(pt.fX),
+ SkScalarRoundToInt(pt.fY), pnt);
+ } else {
+ iter.fDevice->drawBitmap(iter, bitmap, matrix, looper.paint());
+ }
+ }
+
+ LOOPER_END
+}
+
+// this one is non-virtual, so it can be called safely by other canvas apis
+void SkCanvas::internalDrawBitmapRect(const SkBitmap& bitmap, const SkRect* src,
+ const SkRect& dst, const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ if (bitmap.drawsNothing() || dst.isEmpty()) {
+ return;
+ }
+
+ if (nullptr == paint || paint->canComputeFastBounds()) {
+ SkRect storage;
+ if (this->quickReject(paint ? paint->computeFastBounds(dst, &storage) : dst)) {
+ return;
+ }
+ }
+
+ SkLazyPaint lazy;
+ if (nullptr == paint) {
+ paint = lazy.init();
+ }
+
+ LOOPER_BEGIN_CHECK_COMPLETE_OVERWRITE(*paint, SkDrawFilter::kBitmap_Type, &dst,
+ bitmap.isOpaque())
+
+ while (iter.next()) {
+ iter.fDevice->drawBitmapRect(iter, bitmap, src, dst, looper.paint(), constraint);
+ }
+
+ LOOPER_END
+}
+
+void SkCanvas::onDrawBitmapRect(const SkBitmap& bitmap, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawBitmapRectToRect()");
+ SkDEBUGCODE(bitmap.validate();)
+ this->internalDrawBitmapRect(bitmap, src, dst, paint, constraint);
+}
+
+void SkCanvas::onDrawImageNine(const SkImage* image, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawImageNine()");
+
+ if (nullptr == paint || paint->canComputeFastBounds()) {
+ SkRect storage;
+ if (this->quickReject(paint ? paint->computeFastBounds(dst, &storage) : dst)) {
+ return;
+ }
+ }
+
+ SkLazyPaint lazy;
+ if (nullptr == paint) {
+ paint = lazy.init();
+ }
+
+ LOOPER_BEGIN(*paint, SkDrawFilter::kBitmap_Type, &dst)
+
+ while (iter.next()) {
+ iter.fDevice->drawImageNine(iter, image, center, dst, looper.paint());
+ }
+
+ LOOPER_END
+}
+
+void SkCanvas::onDrawBitmapNine(const SkBitmap& bitmap, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawBitmapNine()");
+ SkDEBUGCODE(bitmap.validate();)
+
+ if (nullptr == paint || paint->canComputeFastBounds()) {
+ SkRect storage;
+ if (this->quickReject(paint ? paint->computeFastBounds(dst, &storage) : dst)) {
+ return;
+ }
+ }
+
+ SkLazyPaint lazy;
+ if (nullptr == paint) {
+ paint = lazy.init();
+ }
+
+ LOOPER_BEGIN(*paint, SkDrawFilter::kBitmap_Type, &dst)
+
+ while (iter.next()) {
+ iter.fDevice->drawBitmapNine(iter, bitmap, center, dst, looper.paint());
+ }
+
+ LOOPER_END
+}
+
+void SkCanvas::onDrawImageLattice(const SkImage* image, const Lattice& lattice, const SkRect& dst,
+ const SkPaint* paint) {
+ if (nullptr == paint || paint->canComputeFastBounds()) {
+ SkRect storage;
+ if (this->quickReject(paint ? paint->computeFastBounds(dst, &storage) : dst)) {
+ return;
+ }
+ }
+
+ SkLazyPaint lazy;
+ if (nullptr == paint) {
+ paint = lazy.init();
+ }
+
+ LOOPER_BEGIN(*paint, SkDrawFilter::kBitmap_Type, &dst)
+
+ while (iter.next()) {
+ iter.fDevice->drawImageLattice(iter, image, lattice, dst, looper.paint());
+ }
+
+ LOOPER_END
+}
+
+void SkCanvas::onDrawBitmapLattice(const SkBitmap& bitmap, const Lattice& lattice,
+ const SkRect& dst, const SkPaint* paint) {
+ if (nullptr == paint || paint->canComputeFastBounds()) {
+ SkRect storage;
+ if (this->quickReject(paint ? paint->computeFastBounds(dst, &storage) : dst)) {
+ return;
+ }
+ }
+
+ SkLazyPaint lazy;
+ if (nullptr == paint) {
+ paint = lazy.init();
+ }
+
+ LOOPER_BEGIN(*paint, SkDrawFilter::kBitmap_Type, &dst)
+
+ while (iter.next()) {
+ iter.fDevice->drawBitmapLattice(iter, bitmap, lattice, dst, looper.paint());
+ }
+
+ LOOPER_END
+}
+
+class SkDeviceFilteredPaint {
+public:
+ SkDeviceFilteredPaint(SkBaseDevice* device, const SkPaint& paint) {
+ uint32_t filteredFlags = device->filterTextFlags(paint);
+ if (filteredFlags != paint.getFlags()) {
+ SkPaint* newPaint = fLazy.set(paint);
+ newPaint->setFlags(filteredFlags);
+ fPaint = newPaint;
+ } else {
+ fPaint = &paint;
+ }
+ }
+
+ const SkPaint& paint() const { return *fPaint; }
+
+private:
+ const SkPaint* fPaint;
+ SkLazyPaint fLazy;
+};
+
+void SkCanvas::DrawRect(const SkDraw& draw, const SkPaint& paint,
+ const SkRect& r, SkScalar textSize) {
+ if (paint.getStyle() == SkPaint::kFill_Style) {
+ draw.fDevice->drawRect(draw, r, paint);
+ } else {
+ SkPaint p(paint);
+ p.setStrokeWidth(SkScalarMul(textSize, paint.getStrokeWidth()));
+ draw.fDevice->drawRect(draw, r, p);
+ }
+}
+
+void SkCanvas::DrawTextDecorations(const SkDraw& draw, const SkPaint& paint,
+ const char text[], size_t byteLength,
+ SkScalar x, SkScalar y) {
+ SkASSERT(byteLength == 0 || text != nullptr);
+
+ // nothing to draw
+ if (text == nullptr || byteLength == 0 ||
+ draw.fRC->isEmpty() ||
+ (paint.getAlpha() == 0 && paint.isSrcOver())) {
+ return;
+ }
+
+ SkScalar width = 0;
+ SkPoint start;
+
+ start.set(0, 0); // to avoid warning
+ if (paint.getFlags() & (SkPaint::kUnderlineText_Flag |
+ SkPaint::kStrikeThruText_Flag)) {
+ width = paint.measureText(text, byteLength);
+
+ SkScalar offsetX = 0;
+ if (paint.getTextAlign() == SkPaint::kCenter_Align) {
+ offsetX = SkScalarHalf(width);
+ } else if (paint.getTextAlign() == SkPaint::kRight_Align) {
+ offsetX = width;
+ }
+ start.set(x - offsetX, y);
+ }
+
+ if (0 == width) {
+ return;
+ }
+
+ uint32_t flags = paint.getFlags();
+
+ if (flags & (SkPaint::kUnderlineText_Flag |
+ SkPaint::kStrikeThruText_Flag)) {
+ SkScalar textSize = paint.getTextSize();
+ SkScalar height = SkScalarMul(textSize, kStdUnderline_Thickness);
+ SkRect r;
+
+ r.fLeft = start.fX;
+ r.fRight = start.fX + width;
+
+ if (flags & SkPaint::kUnderlineText_Flag) {
+ SkScalar offset = SkScalarMulAdd(textSize, kStdUnderline_Offset,
+ start.fY);
+ r.fTop = offset;
+ r.fBottom = offset + height;
+ DrawRect(draw, paint, r, 1);
+ }
+ if (flags & SkPaint::kStrikeThruText_Flag) {
+ SkScalar offset = SkScalarMulAdd(textSize, kStdStrikeThru_Offset,
+ start.fY);
+ r.fTop = offset;
+ r.fBottom = offset + height;
+ DrawRect(draw, paint, r, 1);
+ }
+ }
+}
+
+void SkCanvas::onDrawText(const void* text, size_t byteLength, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ LOOPER_BEGIN(paint, SkDrawFilter::kText_Type, nullptr)
+
+ while (iter.next()) {
+ SkDeviceFilteredPaint dfp(iter.fDevice, looper.paint());
+ iter.fDevice->drawText(iter, text, byteLength, x, y, dfp.paint());
+ DrawTextDecorations(iter, dfp.paint(),
+ static_cast<const char*>(text), byteLength, x, y);
+ }
+
+ LOOPER_END
+}
+
+void SkCanvas::onDrawPosText(const void* text, size_t byteLength, const SkPoint pos[],
+ const SkPaint& paint) {
+ SkPoint textOffset = SkPoint::Make(0, 0);
+
+ LOOPER_BEGIN(paint, SkDrawFilter::kText_Type, nullptr)
+
+ while (iter.next()) {
+ SkDeviceFilteredPaint dfp(iter.fDevice, looper.paint());
+ iter.fDevice->drawPosText(iter, text, byteLength, &pos->fX, 2, textOffset,
+ dfp.paint());
+ }
+
+ LOOPER_END
+}
+
+void SkCanvas::onDrawPosTextH(const void* text, size_t byteLength, const SkScalar xpos[],
+ SkScalar constY, const SkPaint& paint) {
+
+ SkPoint textOffset = SkPoint::Make(0, constY);
+
+ LOOPER_BEGIN(paint, SkDrawFilter::kText_Type, nullptr)
+
+ while (iter.next()) {
+ SkDeviceFilteredPaint dfp(iter.fDevice, looper.paint());
+ iter.fDevice->drawPosText(iter, text, byteLength, xpos, 1, textOffset,
+ dfp.paint());
+ }
+
+ LOOPER_END
+}
+
+void SkCanvas::onDrawTextOnPath(const void* text, size_t byteLength, const SkPath& path,
+ const SkMatrix* matrix, const SkPaint& paint) {
+ LOOPER_BEGIN(paint, SkDrawFilter::kText_Type, nullptr)
+
+ while (iter.next()) {
+ iter.fDevice->drawTextOnPath(iter, text, byteLength, path,
+ matrix, looper.paint());
+ }
+
+ LOOPER_END
+}
+
+void SkCanvas::onDrawTextRSXform(const void* text, size_t byteLength, const SkRSXform xform[],
+ const SkRect* cullRect, const SkPaint& paint) {
+ if (cullRect && this->quickReject(*cullRect)) {
+ return;
+ }
+
+ LOOPER_BEGIN(paint, SkDrawFilter::kText_Type, nullptr)
+
+ while (iter.next()) {
+ iter.fDevice->drawTextRSXform(iter, text, byteLength, xform, looper.paint());
+ }
+
+ LOOPER_END
+}
+
+void SkCanvas::onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+
+ SkRect storage;
+ const SkRect* bounds = nullptr;
+ if (paint.canComputeFastBounds()) {
+ storage = blob->bounds().makeOffset(x, y);
+ SkRect tmp;
+ if (this->quickReject(paint.computeFastBounds(storage, &tmp))) {
+ return;
+ }
+ bounds = &storage;
+ }
+
+ // We cannot filter in the looper as we normally do, because the paint is
+ // incomplete at this point (text-related attributes are embedded within blob run paints).
+ SkDrawFilter* drawFilter = fMCRec->fFilter;
+ fMCRec->fFilter = nullptr;
+
+ LOOPER_BEGIN(paint, SkDrawFilter::kText_Type, bounds)
+
+ while (iter.next()) {
+ SkDeviceFilteredPaint dfp(iter.fDevice, looper.paint());
+ iter.fDevice->drawTextBlob(iter, blob, x, y, dfp.paint(), drawFilter);
+ }
+
+ LOOPER_END
+
+ fMCRec->fFilter = drawFilter;
+}
+
+// These will become non-virtual, so they always call the (virtual) onDraw... method
+void SkCanvas::drawText(const void* text, size_t byteLength, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawText()");
+ if (byteLength) {
+ this->onDrawText(text, byteLength, x, y, paint);
+ }
+}
+void SkCanvas::drawPosText(const void* text, size_t byteLength, const SkPoint pos[],
+ const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawPosText()");
+ if (byteLength) {
+ this->onDrawPosText(text, byteLength, pos, paint);
+ }
+}
+void SkCanvas::drawPosTextH(const void* text, size_t byteLength, const SkScalar xpos[],
+ SkScalar constY, const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawPosTextH()");
+ if (byteLength) {
+ this->onDrawPosTextH(text, byteLength, xpos, constY, paint);
+ }
+}
+void SkCanvas::drawTextOnPath(const void* text, size_t byteLength, const SkPath& path,
+ const SkMatrix* matrix, const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawTextOnPath()");
+ if (byteLength) {
+ this->onDrawTextOnPath(text, byteLength, path, matrix, paint);
+ }
+}
+void SkCanvas::drawTextRSXform(const void* text, size_t byteLength, const SkRSXform xform[],
+ const SkRect* cullRect, const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawTextRSXform()");
+ if (byteLength) {
+ this->onDrawTextRSXform(text, byteLength, xform, cullRect, paint);
+ }
+}
+void SkCanvas::drawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ RETURN_ON_NULL(blob);
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawTextBlob()");
+ this->onDrawTextBlob(blob, x, y, paint);
+}
+
+void SkCanvas::onDrawVertices(VertexMode vmode, int vertexCount,
+ const SkPoint verts[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawVertices()");
+ LOOPER_BEGIN(paint, SkDrawFilter::kPath_Type, nullptr)
+
+ while (iter.next()) {
+ iter.fDevice->drawVertices(iter, vmode, vertexCount, verts, texs,
+ colors, xmode, indices, indexCount,
+ looper.paint());
+ }
+
+ LOOPER_END
+}
+
+void SkCanvas::drawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkXfermode* xmode, const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawPatch()");
+ if (nullptr == cubics) {
+ return;
+ }
+
+ this->onDrawPatch(cubics, colors, texCoords, xmode, paint);
+}
+
+void SkCanvas::onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkXfermode* xmode, const SkPaint& paint) {
+ // Since a patch is always within the convex hull of the control points, we discard it when its
+ // bounding rectangle is completely outside the current clip.
+ SkRect bounds;
+ bounds.set(cubics, SkPatchUtils::kNumCtrlPts);
+ if (this->quickReject(bounds)) {
+ return;
+ }
+
+ LOOPER_BEGIN(paint, SkDrawFilter::kPath_Type, nullptr)
+
+ while (iter.next()) {
+ iter.fDevice->drawPatch(iter, cubics, colors, texCoords, xmode, paint);
+ }
+
+ LOOPER_END
+}
+
+void SkCanvas::drawDrawable(SkDrawable* dr, SkScalar x, SkScalar y) {
+ RETURN_ON_NULL(dr);
+ if (x || y) {
+ SkMatrix matrix = SkMatrix::MakeTrans(x, y);
+ this->onDrawDrawable(dr, &matrix);
+ } else {
+ this->onDrawDrawable(dr, nullptr);
+ }
+}
+
+void SkCanvas::drawDrawable(SkDrawable* dr, const SkMatrix* matrix) {
+ RETURN_ON_NULL(dr);
+ if (matrix && matrix->isIdentity()) {
+ matrix = nullptr;
+ }
+ this->onDrawDrawable(dr, matrix);
+}
+
+void SkCanvas::onDrawDrawable(SkDrawable* dr, const SkMatrix* matrix) {
+ // drawable bounds are no longer reliable (e.g. android displaylist)
+ // so don't use them for quick-reject
+ dr->draw(this, matrix);
+}
+
+void SkCanvas::onDrawAtlas(const SkImage* atlas, const SkRSXform xform[], const SkRect tex[],
+ const SkColor colors[], int count, SkXfermode::Mode mode,
+ const SkRect* cull, const SkPaint* paint) {
+ if (cull && this->quickReject(*cull)) {
+ return;
+ }
+
+ SkPaint pnt;
+ if (paint) {
+ pnt = *paint;
+ }
+
+ LOOPER_BEGIN(pnt, SkDrawFilter::kPath_Type, nullptr)
+ while (iter.next()) {
+ iter.fDevice->drawAtlas(iter, atlas, xform, tex, colors, count, mode, pnt);
+ }
+ LOOPER_END
+}
+
+void SkCanvas::onDrawAnnotation(const SkRect& rect, const char key[], SkData* value) {
+ SkASSERT(key);
+
+ SkPaint paint;
+ LOOPER_BEGIN(paint, SkDrawFilter::kRect_Type, nullptr)
+ while (iter.next()) {
+ iter.fDevice->drawAnnotation(iter, rect, key, value);
+ }
+ LOOPER_END
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// These methods are NOT virtual, and therefore must call back into virtual
+// methods, rather than actually drawing themselves.
+//////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::drawARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b, SkBlendMode mode) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawARGB()");
+ SkPaint paint;
+
+ paint.setARGB(a, r, g, b);
+ paint.setBlendMode(mode);
+ this->drawPaint(paint);
+}
+
+void SkCanvas::drawColor(SkColor c, SkBlendMode mode) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawColor()");
+ SkPaint paint;
+
+ paint.setColor(c);
+ paint.setBlendMode(mode);
+ this->drawPaint(paint);
+}
+
+void SkCanvas::drawPoint(SkScalar x, SkScalar y, const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawPoint(SkPaint)");
+ SkPoint pt;
+
+ pt.set(x, y);
+ this->drawPoints(kPoints_PointMode, 1, &pt, paint);
+}
+
+void SkCanvas::drawPoint(SkScalar x, SkScalar y, SkColor color) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawPoint(SkColor)");
+ SkPoint pt;
+ SkPaint paint;
+
+ pt.set(x, y);
+ paint.setColor(color);
+ this->drawPoints(kPoints_PointMode, 1, &pt, paint);
+}
+
+void SkCanvas::drawLine(SkScalar x0, SkScalar y0, SkScalar x1, SkScalar y1,
+ const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawLine()");
+ SkPoint pts[2];
+
+ pts[0].set(x0, y0);
+ pts[1].set(x1, y1);
+ this->drawPoints(kLines_PointMode, 2, pts, paint);
+}
+
+void SkCanvas::drawRectCoords(SkScalar left, SkScalar top,
+ SkScalar right, SkScalar bottom,
+ const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawRectCoords()");
+ SkRect r;
+
+ r.set(left, top, right, bottom);
+ this->drawRect(r, paint);
+}
+
+void SkCanvas::drawCircle(SkScalar cx, SkScalar cy, SkScalar radius,
+ const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawCircle()");
+ if (radius < 0) {
+ radius = 0;
+ }
+
+ SkRect r;
+ r.set(cx - radius, cy - radius, cx + radius, cy + radius);
+ this->drawOval(r, paint);
+}
+
+void SkCanvas::drawRoundRect(const SkRect& r, SkScalar rx, SkScalar ry,
+ const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawRoundRect()");
+ if (rx > 0 && ry > 0) {
+ SkRRect rrect;
+ rrect.setRectXY(r, rx, ry);
+ this->drawRRect(rrect, paint);
+ } else {
+ this->drawRect(r, paint);
+ }
+}
+
+void SkCanvas::drawArc(const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter,
+ const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawArc()");
+ if (oval.isEmpty() || !sweepAngle) {
+ return;
+ }
+ this->onDrawArc(oval, startAngle, sweepAngle, useCenter, paint);
+}
+
+void SkCanvas::drawTextOnPathHV(const void* text, size_t byteLength,
+ const SkPath& path, SkScalar hOffset,
+ SkScalar vOffset, const SkPaint& paint) {
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawTextOnPathHV()");
+ SkMatrix matrix;
+
+ matrix.setTranslate(hOffset, vOffset);
+ this->drawTextOnPath(text, byteLength, path, &matrix, paint);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * This constant is trying to balance the speed of ref'ing a subpicture into a parent picture,
+ * against the playback cost of recursing into the subpicture to get at its actual ops.
+ *
+ * For now we pick a conservatively small value, though measurement (and other heuristics like
+ * the type of ops contained) may justify changing this value.
+ */
+#define kMaxPictureOpsToUnrollInsteadOfRef 1
+
+void SkCanvas::drawPicture(const SkPicture* picture, const SkMatrix* matrix, const SkPaint* paint) {
+ RETURN_ON_NULL(picture);
+
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawPicture()");
+ if (matrix && matrix->isIdentity()) {
+ matrix = nullptr;
+ }
+ if (picture->approximateOpCount() <= kMaxPictureOpsToUnrollInsteadOfRef) {
+ SkAutoCanvasMatrixPaint acmp(this, matrix, paint, picture->cullRect());
+ picture->playback(this);
+ } else {
+ this->onDrawPicture(picture, matrix, paint);
+ }
+}
+
+void SkCanvas::onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint) {
+ if (!paint || paint->canComputeFastBounds()) {
+ SkRect bounds = picture->cullRect();
+ if (paint) {
+ paint->computeFastBounds(bounds, &bounds);
+ }
+ if (matrix) {
+ matrix->mapRect(&bounds);
+ }
+ if (this->quickReject(bounds)) {
+ return;
+ }
+ }
+
+ SkAutoCanvasMatrixPaint acmp(this, matrix, paint, picture->cullRect());
+ picture->playback(this);
+}
+
+#ifdef SK_EXPERIMENTAL_SHADOWING
+void SkCanvas::drawShadowedPicture(const SkPicture* picture,
+ const SkMatrix* matrix,
+ const SkPaint* paint,
+ const SkShadowParams& params) {
+ RETURN_ON_NULL(picture);
+
+ TRACE_EVENT0("disabled-by-default-skia", "SkCanvas::drawShadowedPicture()");
+
+ this->onDrawShadowedPicture(picture, matrix, paint, params);
+}
+
+void SkCanvas::onDrawShadowedPicture(const SkPicture* picture,
+ const SkMatrix* matrix,
+ const SkPaint* paint,
+ const SkShadowParams& params) {
+ if (!paint || paint->canComputeFastBounds()) {
+ SkRect bounds = picture->cullRect();
+ if (paint) {
+ paint->computeFastBounds(bounds, &bounds);
+ }
+ if (matrix) {
+ matrix->mapRect(&bounds);
+ }
+ if (this->quickReject(bounds)) {
+ return;
+ }
+ }
+
+ SkAutoCanvasMatrixPaint acmp(this, matrix, paint, picture->cullRect());
+
+ sk_sp<SkImage> povDepthMap;
+ sk_sp<SkImage> diffuseMap;
+
+ // povDepthMap
+ {
+ SkLights::Builder builder;
+ builder.add(SkLights::Light::MakeDirectional(SkColor3f::Make(1.0f, 1.0f, 1.0f),
+ SkVector3::Make(0.0f, 0.0f, 1.0f)));
+ sk_sp<SkLights> povLight = builder.finish();
+
+ SkImageInfo info = SkImageInfo::Make(picture->cullRect().width(),
+ picture->cullRect().height(),
+ kBGRA_8888_SkColorType,
+ kOpaque_SkAlphaType);
+
+ // Create a new surface (that matches the backend of canvas)
+ // to create the povDepthMap
+ sk_sp<SkSurface> surf(this->makeSurface(info));
+
+ // Wrap another SPFCanvas around the surface
+ sk_sp<SkShadowPaintFilterCanvas> depthMapCanvas =
+ sk_make_sp<SkShadowPaintFilterCanvas>(surf->getCanvas());
+
+ // set the depth map canvas to have the light as the user's POV
+ depthMapCanvas->setLights(std::move(povLight));
+
+ depthMapCanvas->drawPicture(picture);
+ povDepthMap = surf->makeImageSnapshot();
+ }
+
+ // diffuseMap
+ {
+ SkImageInfo info = SkImageInfo::Make(picture->cullRect().width(),
+ picture->cullRect().height(),
+ kBGRA_8888_SkColorType,
+ kOpaque_SkAlphaType);
+
+ sk_sp<SkSurface> surf(this->makeSurface(info));
+ surf->getCanvas()->drawPicture(picture);
+
+ diffuseMap = surf->makeImageSnapshot();
+ }
+
+ sk_sp<SkShader> povDepthShader = povDepthMap->makeShader(SkShader::kClamp_TileMode,
+ SkShader::kClamp_TileMode);
+ sk_sp<SkShader> diffuseShader = diffuseMap->makeShader(SkShader::kClamp_TileMode,
+ SkShader::kClamp_TileMode);
+
+ // TODO: pass the depth to the shader in vertices, or uniforms
+ // so we don't have to render depth and color separately
+ for (int i = 0; i < fLights->numLights(); ++i) {
+ // skip over ambient lights; they don't cast shadows
+ // lights that have shadow maps do not need updating (because lights are immutable)
+ sk_sp<SkImage> depthMap;
+ SkISize shMapSize;
+
+ if (fLights->light(i).getShadowMap() != nullptr) {
+ continue;
+ }
+
+ if (fLights->light(i).isRadial()) {
+ shMapSize.fHeight = 1;
+ shMapSize.fWidth = (int) picture->cullRect().width();
+
+ SkImageInfo info = SkImageInfo::Make(diffuseMap->width(), 1,
+ kBGRA_8888_SkColorType,
+ kOpaque_SkAlphaType);
+
+ // Create new surface (that matches the backend of canvas)
+ // for each shadow map
+ sk_sp<SkSurface> surf(this->makeSurface(info));
+
+ // Wrap another SPFCanvas around the surface
+ SkCanvas* depthMapCanvas = surf->getCanvas();
+
+ SkLights::Builder builder;
+ builder.add(fLights->light(i));
+ sk_sp<SkLights> curLight = builder.finish();
+
+ sk_sp<SkShader> shadowMapShader;
+ shadowMapShader = SkRadialShadowMapShader::Make(
+ povDepthShader, curLight,
+ (int) picture->cullRect().width(),
+ (int) picture->cullRect().height());
+
+ SkPaint shadowMapPaint;
+ shadowMapPaint.setShader(std::move(shadowMapShader));
+
+ depthMapCanvas->setLights(curLight);
+
+ depthMapCanvas->drawRect(SkRect::MakeIWH(diffuseMap->width(),
+ diffuseMap->height()),
+ shadowMapPaint);
+
+ depthMap = surf->makeImageSnapshot();
+
+ } else {
+ // TODO: compute the correct size of the depth map from the light properties
+ // TODO: maybe add a kDepth_8_SkColorType
+ // TODO: find actual max depth of picture
+ shMapSize = SkShadowPaintFilterCanvas::ComputeDepthMapSize(
+ fLights->light(i), 255,
+ (int) picture->cullRect().width(),
+ (int) picture->cullRect().height());
+
+ SkImageInfo info = SkImageInfo::Make(shMapSize.fWidth, shMapSize.fHeight,
+ kBGRA_8888_SkColorType,
+ kOpaque_SkAlphaType);
+
+ // Create a new surface (that matches the backend of canvas)
+ // for each shadow map
+ sk_sp<SkSurface> surf(this->makeSurface(info));
+
+ // Wrap another SPFCanvas around the surface
+ sk_sp<SkShadowPaintFilterCanvas> depthMapCanvas =
+ sk_make_sp<SkShadowPaintFilterCanvas>(surf->getCanvas());
+ depthMapCanvas->setShadowParams(params);
+
+ // set the depth map canvas to have the light we're drawing.
+ SkLights::Builder builder;
+ builder.add(fLights->light(i));
+ sk_sp<SkLights> curLight = builder.finish();
+ depthMapCanvas->setLights(std::move(curLight));
+
+ depthMapCanvas->drawPicture(picture);
+ depthMap = surf->makeImageSnapshot();
+ }
+
+ if (params.fType == SkShadowParams::kNoBlur_ShadowType) {
+ fLights->light(i).setShadowMap(std::move(depthMap));
+ } else if (params.fType == SkShadowParams::kVariance_ShadowType) {
+ // we blur the variance map
+ SkPaint blurPaint;
+ blurPaint.setImageFilter(SkImageFilter::MakeBlur(params.fShadowRadius,
+ params.fShadowRadius, nullptr));
+
+ SkImageInfo blurInfo = SkImageInfo::Make(shMapSize.fWidth, shMapSize.fHeight,
+ kBGRA_8888_SkColorType,
+ kOpaque_SkAlphaType);
+
+ sk_sp<SkSurface> blurSurf(this->makeSurface(blurInfo));
+
+ blurSurf->getCanvas()->drawImage(std::move(depthMap), 0, 0, &blurPaint);
+
+ fLights->light(i).setShadowMap(blurSurf->makeImageSnapshot());
+ }
+ }
+
+ SkPaint shadowPaint;
+ sk_sp<SkShader> shadowShader = SkShadowShader::Make(std::move(povDepthShader),
+ std::move(diffuseShader),
+ fLights,
+ diffuseMap->width(),
+ diffuseMap->height(),
+ params);
+
+ shadowPaint.setShader(shadowShader);
+
+ this->drawRect(SkRect::MakeIWH(diffuseMap->width(), diffuseMap->height()), shadowPaint);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+SkCanvas::LayerIter::LayerIter(SkCanvas* canvas) {
+ static_assert(sizeof(fStorage) >= sizeof(SkDrawIter), "fStorage_too_small");
+
+ SkASSERT(canvas);
+
+ fImpl = new (fStorage) SkDrawIter(canvas);
+ fDone = !fImpl->next();
+}
+
+SkCanvas::LayerIter::~LayerIter() {
+ fImpl->~SkDrawIter();
+}
+
+void SkCanvas::LayerIter::next() {
+ fDone = !fImpl->next();
+}
+
+SkBaseDevice* SkCanvas::LayerIter::device() const {
+ return fImpl->getDevice();
+}
+
+const SkMatrix& SkCanvas::LayerIter::matrix() const {
+ return fImpl->getMatrix();
+}
+
+const SkPaint& SkCanvas::LayerIter::paint() const {
+ const SkPaint* paint = fImpl->getPaint();
+ if (nullptr == paint) {
+ paint = &fDefaultPaint;
+ }
+ return *paint;
+}
+
+const SkRasterClip& SkCanvas::LayerIter::clip() const { return fImpl->getClip(); }
+int SkCanvas::LayerIter::x() const { return fImpl->getX(); }
+int SkCanvas::LayerIter::y() const { return fImpl->getY(); }
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkCanvasClipVisitor::~SkCanvasClipVisitor() { }
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool supported_for_raster_canvas(const SkImageInfo& info) {
+ switch (info.alphaType()) {
+ case kPremul_SkAlphaType:
+ case kOpaque_SkAlphaType:
+ break;
+ default:
+ return false;
+ }
+
+ switch (info.colorType()) {
+ case kAlpha_8_SkColorType:
+ case kRGB_565_SkColorType:
+ case kN32_SkColorType:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+SkCanvas* SkCanvas::NewRasterDirect(const SkImageInfo& info, void* pixels, size_t rowBytes) {
+ if (!supported_for_raster_canvas(info)) {
+ return nullptr;
+ }
+
+ SkBitmap bitmap;
+ if (!bitmap.installPixels(info, pixels, rowBytes)) {
+ return nullptr;
+ }
+ return new SkCanvas(bitmap);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkAutoCanvasMatrixPaint::SkAutoCanvasMatrixPaint(SkCanvas* canvas, const SkMatrix* matrix,
+ const SkPaint* paint, const SkRect& bounds)
+ : fCanvas(canvas)
+ , fSaveCount(canvas->getSaveCount())
+{
+ if (paint) {
+ SkRect newBounds = bounds;
+ if (matrix) {
+ matrix->mapRect(&newBounds);
+ }
+ canvas->saveLayer(&newBounds, paint);
+ } else if (matrix) {
+ canvas->save();
+ }
+
+ if (matrix) {
+ canvas->concat(*matrix);
+ }
+}
+
+SkAutoCanvasMatrixPaint::~SkAutoCanvasMatrixPaint() {
+ fCanvas->restoreToCount(fSaveCount);
+}
+
+#ifdef SK_SUPPORT_LEGACY_NEW_SURFACE_API
+SkSurface* SkCanvas::newSurface(const SkImageInfo& info, const SkSurfaceProps* props) {
+ return this->makeSurface(info, props).release();
+}
+#endif
+
+/////////////////////////////////
+
+const SkCanvas::ClipOp SkCanvas::kDifference_Op;
+const SkCanvas::ClipOp SkCanvas::kIntersect_Op;
+const SkCanvas::ClipOp SkCanvas::kUnion_Op;
+const SkCanvas::ClipOp SkCanvas::kXOR_Op;
+const SkCanvas::ClipOp SkCanvas::kReverseDifference_Op;
+const SkCanvas::ClipOp SkCanvas::kReplace_Op;
+
+static_assert((int)SkRegion::kDifference_Op == (int)kDifference_SkClipOp, "");
+static_assert((int)SkRegion::kIntersect_Op == (int)kIntersect_SkClipOp, "");
+static_assert((int)SkRegion::kUnion_Op == (int)kUnion_SkClipOp, "");
+static_assert((int)SkRegion::kXOR_Op == (int)kXOR_SkClipOp, "");
+static_assert((int)SkRegion::kReverseDifference_Op == (int)kReverseDifference_SkClipOp, "");
+static_assert((int)SkRegion::kReplace_Op == (int)kReplace_SkClipOp, "");
diff --git a/gfx/skia/skia/src/core/SkCanvasPriv.h b/gfx/skia/skia/src/core/SkCanvasPriv.h
new file mode 100644
index 000000000..dfae154ec
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCanvasPriv.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCanvasPriv_DEFINED
+#define SkCanvasPriv_DEFINED
+
+#include "SkCanvas.h"
+
+class SkAutoCanvasMatrixPaint : SkNoncopyable {
+public:
+ SkAutoCanvasMatrixPaint(SkCanvas*, const SkMatrix*, const SkPaint*, const SkRect& bounds);
+ ~SkAutoCanvasMatrixPaint();
+
+private:
+ SkCanvas* fCanvas;
+ int fSaveCount;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkChunkAlloc.cpp b/gfx/skia/skia/src/core/SkChunkAlloc.cpp
new file mode 100644
index 000000000..90650a7ad
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkChunkAlloc.cpp
@@ -0,0 +1,235 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkChunkAlloc.h"
+
+// Don't malloc any chunks smaller than this
+#define MIN_CHUNKALLOC_BLOCK_SIZE 1024
+
+// Return the new min blocksize given the current value
+static size_t increase_next_size(size_t size) {
+ return size + (size >> 1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct SkChunkAlloc::Block {
+ Block* fNext;
+ size_t fFreeSize;
+ char* fFreePtr;
+ // data[] follows
+
+ size_t blockSize() const {
+ char* start = this->startOfData();
+ size_t bytes = fFreePtr - start;
+ return fFreeSize + bytes;
+ }
+
+ void reset() {
+ fNext = nullptr;
+ fFreeSize = this->blockSize();
+ fFreePtr = this->startOfData();
+ }
+
+ char* startOfData() const {
+ return reinterpret_cast<char*>(SkAlign8(reinterpret_cast<size_t>(this + 1)));
+ }
+
+ static void FreeChain(Block* block) {
+ while (block) {
+ Block* next = block->fNext;
+ sk_free(block);
+ block = next;
+ }
+ }
+
+ bool contains(const void* addr) const {
+ const char* ptr = reinterpret_cast<const char*>(addr);
+ return ptr >= this->startOfData() && ptr < fFreePtr;
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkChunkAlloc::SkChunkAlloc(size_t minSize) {
+ if (minSize < MIN_CHUNKALLOC_BLOCK_SIZE) {
+ minSize = MIN_CHUNKALLOC_BLOCK_SIZE;
+ }
+
+ fBlock = nullptr;
+ fMinSize = minSize;
+ fChunkSize = fMinSize;
+ fTotalCapacity = 0;
+ fTotalUsed = 0;
+ SkDEBUGCODE(fTotalLost = 0;)
+ SkDEBUGCODE(fBlockCount = 0;)
+}
+
+SkChunkAlloc::~SkChunkAlloc() {
+ this->reset();
+}
+
+void SkChunkAlloc::reset() {
+ Block::FreeChain(fBlock);
+ fBlock = nullptr;
+ fChunkSize = fMinSize; // reset to our initial minSize
+ fTotalCapacity = 0;
+ fTotalUsed = 0;
+ SkDEBUGCODE(fTotalLost = 0;)
+ SkDEBUGCODE(fBlockCount = 0;)
+}
+
+void SkChunkAlloc::rewind() {
+ SkDEBUGCODE(this->validate();)
+
+ Block* largest = fBlock;
+
+ if (largest) {
+ Block* next;
+ for (Block* cur = largest->fNext; cur; cur = next) {
+ next = cur->fNext;
+ if (cur->blockSize() > largest->blockSize()) {
+ sk_free(largest);
+ largest = cur;
+ } else {
+ sk_free(cur);
+ }
+ }
+
+ largest->reset();
+ fTotalCapacity = largest->blockSize();
+ SkDEBUGCODE(fBlockCount = 1;)
+ } else {
+ fTotalCapacity = 0;
+ SkDEBUGCODE(fBlockCount = 0;)
+ }
+
+ fBlock = largest;
+ fChunkSize = fMinSize; // reset to our initial minSize
+ fTotalUsed = 0;
+ SkDEBUGCODE(fTotalLost = 0;)
+ SkDEBUGCODE(this->validate();)
+}
+
+SkChunkAlloc::Block* SkChunkAlloc::newBlock(size_t bytes, AllocFailType ftype) {
+ size_t size = bytes;
+ if (size < fChunkSize) {
+ size = fChunkSize;
+ }
+
+ Block* block = (Block*)sk_malloc_flags(SkAlign8(sizeof(Block)) + size,
+ ftype == kThrow_AllocFailType ? SK_MALLOC_THROW : 0);
+
+ if (block) {
+ block->fFreeSize = size;
+ block->fFreePtr = block->startOfData();
+
+ fTotalCapacity += size;
+ SkDEBUGCODE(fBlockCount += 1;)
+
+ fChunkSize = increase_next_size(fChunkSize);
+ }
+ return block;
+}
+
+SkChunkAlloc::Block* SkChunkAlloc::addBlockIfNecessary(size_t bytes, AllocFailType ftype) {
+ SkASSERT(SkIsAlign8(bytes));
+
+ if (!fBlock || bytes > fBlock->fFreeSize) {
+ Block* block = this->newBlock(bytes, ftype);
+ if (!block) {
+ return nullptr;
+ }
+#ifdef SK_DEBUG
+ if (fBlock) {
+ fTotalLost += fBlock->fFreeSize;
+ }
+#endif
+ block->fNext = fBlock;
+ fBlock = block;
+ }
+
+ SkASSERT(fBlock && bytes <= fBlock->fFreeSize);
+ return fBlock;
+}
+
+void* SkChunkAlloc::alloc(size_t bytes, AllocFailType ftype) {
+ SkDEBUGCODE(this->validate();)
+
+ bytes = SkAlign8(bytes);
+
+ Block* block = this->addBlockIfNecessary(bytes, ftype);
+ if (!block) {
+ return nullptr;
+ }
+
+ char* ptr = block->fFreePtr;
+
+ fTotalUsed += bytes;
+ block->fFreeSize -= bytes;
+ block->fFreePtr = ptr + bytes;
+ SkDEBUGCODE(this->validate();)
+ SkASSERT(SkIsAlign8((size_t)ptr));
+ return ptr;
+}
+
+size_t SkChunkAlloc::unalloc(void* ptr) {
+ SkDEBUGCODE(this->validate();)
+
+ size_t bytes = 0;
+ Block* block = fBlock;
+ if (block) {
+ char* cPtr = reinterpret_cast<char*>(ptr);
+ char* start = block->startOfData();
+ if (start <= cPtr && cPtr < block->fFreePtr) {
+ bytes = block->fFreePtr - cPtr;
+ fTotalUsed -= bytes;
+ block->fFreeSize += bytes;
+ block->fFreePtr = cPtr;
+ }
+ }
+ SkDEBUGCODE(this->validate();)
+ return bytes;
+}
+
+bool SkChunkAlloc::contains(const void* addr) const {
+ const Block* block = fBlock;
+ while (block) {
+ if (block->contains(addr)) {
+ return true;
+ }
+ block = block->fNext;
+ }
+ return false;
+}
+
+#ifdef SK_DEBUG
+void SkChunkAlloc::validate() {
+ int numBlocks = 0;
+ size_t totCapacity = 0;
+ size_t totUsed = 0;
+ size_t totLost = 0;
+ size_t totAvailable = 0;
+
+ for (Block* temp = fBlock; temp; temp = temp->fNext) {
+ ++numBlocks;
+ totCapacity += temp->blockSize();
+ totUsed += temp->fFreePtr - temp->startOfData();
+ if (temp == fBlock) {
+ totAvailable += temp->fFreeSize;
+ } else {
+ totLost += temp->fFreeSize;
+ }
+ }
+
+ SkASSERT(fBlockCount == numBlocks);
+ SkASSERT(fTotalCapacity == totCapacity);
+ SkASSERT(fTotalUsed == totUsed);
+ SkASSERT(fTotalLost == totLost);
+ SkASSERT(totCapacity == totUsed + totLost + totAvailable);
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkClipStack.cpp b/gfx/skia/skia/src/core/SkClipStack.cpp
new file mode 100644
index 000000000..f155b49c4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkClipStack.cpp
@@ -0,0 +1,990 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAtomics.h"
+#include "SkCanvas.h"
+#include "SkClipStack.h"
+#include "SkPath.h"
+#include "SkPathOps.h"
+
+#include <new>
+
+
+// 0-2 are reserved for invalid, empty & wide-open
+static const int32_t kFirstUnreservedGenID = 3;
+int32_t SkClipStack::gGenID = kFirstUnreservedGenID;
+
+SkClipStack::Element::Element(const Element& that) {
+ switch (that.getType()) {
+ case kEmpty_Type:
+ fRRect.setEmpty();
+ fPath.reset();
+ break;
+ case kRect_Type: // Rect uses rrect
+ case kRRect_Type:
+ fPath.reset();
+ fRRect = that.fRRect;
+ break;
+ case kPath_Type:
+ fPath.set(that.getPath());
+ break;
+ }
+
+ fSaveCount = that.fSaveCount;
+ fOp = that.fOp;
+ fType = that.fType;
+ fDoAA = that.fDoAA;
+ fFiniteBoundType = that.fFiniteBoundType;
+ fFiniteBound = that.fFiniteBound;
+ fIsIntersectionOfRects = that.fIsIntersectionOfRects;
+ fGenID = that.fGenID;
+}
+
+bool SkClipStack::Element::operator== (const Element& element) const {
+ if (this == &element) {
+ return true;
+ }
+ if (fOp != element.fOp ||
+ fType != element.fType ||
+ fDoAA != element.fDoAA ||
+ fSaveCount != element.fSaveCount) {
+ return false;
+ }
+ switch (fType) {
+ case kPath_Type:
+ return this->getPath() == element.getPath();
+ case kRRect_Type:
+ return fRRect == element.fRRect;
+ case kRect_Type:
+ return this->getRect() == element.getRect();
+ case kEmpty_Type:
+ return true;
+ default:
+ SkDEBUGFAIL("Unexpected type.");
+ return false;
+ }
+}
+
+void SkClipStack::Element::replay(SkCanvasClipVisitor* visitor) const {
+ static const SkRect kEmptyRect = { 0, 0, 0, 0 };
+
+ switch (fType) {
+ case kPath_Type:
+ visitor->clipPath(this->getPath(), this->getOp(), this->isAA());
+ break;
+ case kRRect_Type:
+ visitor->clipRRect(this->getRRect(), this->getOp(), this->isAA());
+ break;
+ case kRect_Type:
+ visitor->clipRect(this->getRect(), this->getOp(), this->isAA());
+ break;
+ case kEmpty_Type:
+ visitor->clipRect(kEmptyRect, SkCanvas::kIntersect_Op, false);
+ break;
+ }
+}
+
+void SkClipStack::Element::invertShapeFillType() {
+ switch (fType) {
+ case kRect_Type:
+ fPath.init();
+ fPath.get()->addRect(this->getRect());
+ fPath.get()->setFillType(SkPath::kInverseEvenOdd_FillType);
+ fType = kPath_Type;
+ break;
+ case kRRect_Type:
+ fPath.init();
+ fPath.get()->addRRect(fRRect);
+ fPath.get()->setFillType(SkPath::kInverseEvenOdd_FillType);
+ fType = kPath_Type;
+ break;
+ case kPath_Type:
+ fPath.get()->toggleInverseFillType();
+ break;
+ case kEmpty_Type:
+ // Should this set to an empty, inverse filled path?
+ break;
+ }
+}
+
+void SkClipStack::Element::initPath(int saveCount, const SkPath& path, SkCanvas::ClipOp op,
+ bool doAA) {
+ if (!path.isInverseFillType()) {
+ SkRect r;
+ if (path.isRect(&r)) {
+ this->initRect(saveCount, r, op, doAA);
+ return;
+ }
+ SkRect ovalRect;
+ if (path.isOval(&ovalRect)) {
+ SkRRect rrect;
+ rrect.setOval(ovalRect);
+ this->initRRect(saveCount, rrect, op, doAA);
+ return;
+ }
+ }
+ fPath.set(path);
+ fPath.get()->setIsVolatile(true);
+ fType = kPath_Type;
+ this->initCommon(saveCount, op, doAA);
+}
+
+void SkClipStack::Element::asPath(SkPath* path) const {
+ switch (fType) {
+ case kEmpty_Type:
+ path->reset();
+ path->setIsVolatile(true);
+ break;
+ case kRect_Type:
+ path->reset();
+ path->addRect(this->getRect());
+ path->setIsVolatile(true);
+ break;
+ case kRRect_Type:
+ path->reset();
+ path->addRRect(fRRect);
+ path->setIsVolatile(true);
+ break;
+ case kPath_Type:
+ *path = *fPath.get();
+ break;
+ }
+ path->setIsVolatile(true);
+}
+
+void SkClipStack::Element::setEmpty() {
+ fType = kEmpty_Type;
+ fFiniteBound.setEmpty();
+ fFiniteBoundType = kNormal_BoundsType;
+ fIsIntersectionOfRects = false;
+ fRRect.setEmpty();
+ fPath.reset();
+ fGenID = kEmptyGenID;
+ SkDEBUGCODE(this->checkEmpty();)
+}
+
+void SkClipStack::Element::checkEmpty() const {
+ SkASSERT(fFiniteBound.isEmpty());
+ SkASSERT(kNormal_BoundsType == fFiniteBoundType);
+ SkASSERT(!fIsIntersectionOfRects);
+ SkASSERT(kEmptyGenID == fGenID);
+ SkASSERT(fRRect.isEmpty());
+ SkASSERT(!fPath.isValid());
+}
+
+bool SkClipStack::Element::canBeIntersectedInPlace(int saveCount, SkCanvas::ClipOp op) const {
+ if (kEmpty_Type == fType &&
+ (SkCanvas::kDifference_Op == op || SkCanvas::kIntersect_Op == op)) {
+ return true;
+ }
+ // Only clips within the same save/restore frame (as captured by
+ // the save count) can be merged
+ return fSaveCount == saveCount &&
+ SkCanvas::kIntersect_Op == op &&
+ (SkCanvas::kIntersect_Op == fOp || SkCanvas::kReplace_Op == fOp);
+}
+
+bool SkClipStack::Element::rectRectIntersectAllowed(const SkRect& newR, bool newAA) const {
+ SkASSERT(kRect_Type == fType);
+
+ if (fDoAA == newAA) {
+ // if the AA setting is the same there is no issue
+ return true;
+ }
+
+ if (!SkRect::Intersects(this->getRect(), newR)) {
+ // The calling code will correctly set the result to the empty clip
+ return true;
+ }
+
+ if (this->getRect().contains(newR)) {
+ // if the new rect carves out a portion of the old one there is no
+ // issue
+ return true;
+ }
+
+ // So either the two overlap in some complex manner or newR contains oldR.
+ // In the first, case the edges will require different AA. In the second,
+ // the AA setting that would be carried forward is incorrect (e.g., oldR
+ // is AA while newR is BW but since newR contains oldR, oldR will be
+ // drawn BW) since the new AA setting will predominate.
+ return false;
+}
+
+// a mirror of combineBoundsRevDiff
+void SkClipStack::Element::combineBoundsDiff(FillCombo combination, const SkRect& prevFinite) {
+ switch (combination) {
+ case kInvPrev_InvCur_FillCombo:
+ // In this case the only pixels that can remain set
+ // are inside the current clip rect since the extensions
+ // to infinity of both clips cancel out and whatever
+ // is outside of the current clip is removed
+ fFiniteBoundType = kNormal_BoundsType;
+ break;
+ case kInvPrev_Cur_FillCombo:
+ // In this case the current op is finite so the only pixels
+ // that aren't set are whatever isn't set in the previous
+ // clip and whatever this clip carves out
+ fFiniteBound.join(prevFinite);
+ fFiniteBoundType = kInsideOut_BoundsType;
+ break;
+ case kPrev_InvCur_FillCombo:
+ // In this case everything outside of this clip's bound
+ // is erased, so the only pixels that can remain set
+ // occur w/in the intersection of the two finite bounds
+ if (!fFiniteBound.intersect(prevFinite)) {
+ fFiniteBound.setEmpty();
+ fGenID = kEmptyGenID;
+ }
+ fFiniteBoundType = kNormal_BoundsType;
+ break;
+ case kPrev_Cur_FillCombo:
+ // The most conservative result bound is that of the
+ // prior clip. This could be wildly incorrect if the
+ // second clip either exactly matches the first clip
+ // (which should yield the empty set) or reduces the
+ // size of the prior bound (e.g., if the second clip
+ // exactly matched the bottom half of the prior clip).
+ // We ignore these two possibilities.
+ fFiniteBound = prevFinite;
+ break;
+ default:
+ SkDEBUGFAIL("SkClipStack::Element::combineBoundsDiff Invalid fill combination");
+ break;
+ }
+}
+
+void SkClipStack::Element::combineBoundsXOR(int combination, const SkRect& prevFinite) {
+
+ switch (combination) {
+ case kInvPrev_Cur_FillCombo: // fall through
+ case kPrev_InvCur_FillCombo:
+ // With only one of the clips inverted the result will always
+ // extend to infinity. The only pixels that may be un-writeable
+ // lie within the union of the two finite bounds
+ fFiniteBound.join(prevFinite);
+ fFiniteBoundType = kInsideOut_BoundsType;
+ break;
+ case kInvPrev_InvCur_FillCombo:
+ // The only pixels that can survive are within the
+ // union of the two bounding boxes since the extensions
+ // to infinity of both clips cancel out
+ // fall through!
+ case kPrev_Cur_FillCombo:
+ // The most conservative bound for xor is the
+ // union of the two bounds. If the two clips exactly overlapped
+ // the xor could yield the empty set. Similarly the xor
+ // could reduce the size of the original clip's bound (e.g.,
+ // if the second clip exactly matched the bottom half of the
+ // first clip). We ignore these two cases.
+ fFiniteBound.join(prevFinite);
+ fFiniteBoundType = kNormal_BoundsType;
+ break;
+ default:
+ SkDEBUGFAIL("SkClipStack::Element::combineBoundsXOR Invalid fill combination");
+ break;
+ }
+}
+
+// a mirror of combineBoundsIntersection
+void SkClipStack::Element::combineBoundsUnion(int combination, const SkRect& prevFinite) {
+
+ switch (combination) {
+ case kInvPrev_InvCur_FillCombo:
+ if (!fFiniteBound.intersect(prevFinite)) {
+ fFiniteBound.setEmpty();
+ fGenID = kWideOpenGenID;
+ }
+ fFiniteBoundType = kInsideOut_BoundsType;
+ break;
+ case kInvPrev_Cur_FillCombo:
+ // The only pixels that won't be drawable are inside
+ // the prior clip's finite bound
+ fFiniteBound = prevFinite;
+ fFiniteBoundType = kInsideOut_BoundsType;
+ break;
+ case kPrev_InvCur_FillCombo:
+ // The only pixels that won't be drawable are inside
+ // this clip's finite bound
+ break;
+ case kPrev_Cur_FillCombo:
+ fFiniteBound.join(prevFinite);
+ break;
+ default:
+ SkDEBUGFAIL("SkClipStack::Element::combineBoundsUnion Invalid fill combination");
+ break;
+ }
+}
+
+// a mirror of combineBoundsUnion
+void SkClipStack::Element::combineBoundsIntersection(int combination, const SkRect& prevFinite) {
+
+ switch (combination) {
+ case kInvPrev_InvCur_FillCombo:
+ // The only pixels that aren't writable in this case
+ // occur in the union of the two finite bounds
+ fFiniteBound.join(prevFinite);
+ fFiniteBoundType = kInsideOut_BoundsType;
+ break;
+ case kInvPrev_Cur_FillCombo:
+ // In this case the only pixels that will remain writeable
+ // are within the current clip
+ break;
+ case kPrev_InvCur_FillCombo:
+ // In this case the only pixels that will remain writeable
+ // are with the previous clip
+ fFiniteBound = prevFinite;
+ fFiniteBoundType = kNormal_BoundsType;
+ break;
+ case kPrev_Cur_FillCombo:
+ if (!fFiniteBound.intersect(prevFinite)) {
+ this->setEmpty();
+ }
+ break;
+ default:
+ SkDEBUGFAIL("SkClipStack::Element::combineBoundsIntersection Invalid fill combination");
+ break;
+ }
+}
+
+// a mirror of combineBoundsDiff
+void SkClipStack::Element::combineBoundsRevDiff(int combination, const SkRect& prevFinite) {
+
+ switch (combination) {
+ case kInvPrev_InvCur_FillCombo:
+ // The only pixels that can survive are in the
+ // previous bound since the extensions to infinity in
+ // both clips cancel out
+ fFiniteBound = prevFinite;
+ fFiniteBoundType = kNormal_BoundsType;
+ break;
+ case kInvPrev_Cur_FillCombo:
+ if (!fFiniteBound.intersect(prevFinite)) {
+ this->setEmpty();
+ } else {
+ fFiniteBoundType = kNormal_BoundsType;
+ }
+ break;
+ case kPrev_InvCur_FillCombo:
+ fFiniteBound.join(prevFinite);
+ fFiniteBoundType = kInsideOut_BoundsType;
+ break;
+ case kPrev_Cur_FillCombo:
+ // Fall through - as with the kDifference_Op case, the
+ // most conservative result bound is the bound of the
+ // current clip. The prior clip could reduce the size of this
+ // bound (as in the kDifference_Op case) but we are ignoring
+ // those cases.
+ break;
+ default:
+ SkDEBUGFAIL("SkClipStack::Element::combineBoundsRevDiff Invalid fill combination");
+ break;
+ }
+}
+
+void SkClipStack::Element::updateBoundAndGenID(const Element* prior) {
+ // We set this first here but we may overwrite it later if we determine that the clip is
+ // either wide-open or empty.
+ fGenID = GetNextGenID();
+
+ // First, optimistically update the current Element's bound information
+ // with the current clip's bound
+ fIsIntersectionOfRects = false;
+ switch (fType) {
+ case kRect_Type:
+ fFiniteBound = this->getRect();
+ fFiniteBoundType = kNormal_BoundsType;
+
+ if (SkCanvas::kReplace_Op == fOp ||
+ (SkCanvas::kIntersect_Op == fOp && nullptr == prior) ||
+ (SkCanvas::kIntersect_Op == fOp && prior->fIsIntersectionOfRects &&
+ prior->rectRectIntersectAllowed(this->getRect(), fDoAA))) {
+ fIsIntersectionOfRects = true;
+ }
+ break;
+ case kRRect_Type:
+ fFiniteBound = fRRect.getBounds();
+ fFiniteBoundType = kNormal_BoundsType;
+ break;
+ case kPath_Type:
+ fFiniteBound = fPath.get()->getBounds();
+
+ if (fPath.get()->isInverseFillType()) {
+ fFiniteBoundType = kInsideOut_BoundsType;
+ } else {
+ fFiniteBoundType = kNormal_BoundsType;
+ }
+ break;
+ case kEmpty_Type:
+ SkDEBUGFAIL("We shouldn't get here with an empty element.");
+ break;
+ }
+
+ if (!fDoAA) {
+ fFiniteBound.set(SkScalarFloorToScalar(fFiniteBound.fLeft+0.45f),
+ SkScalarRoundToScalar(fFiniteBound.fTop),
+ SkScalarRoundToScalar(fFiniteBound.fRight),
+ SkScalarRoundToScalar(fFiniteBound.fBottom));
+ }
+
+ // Now determine the previous Element's bound information taking into
+ // account that there may be no previous clip
+ SkRect prevFinite;
+ SkClipStack::BoundsType prevType;
+
+ if (nullptr == prior) {
+ // no prior clip means the entire plane is writable
+ prevFinite.setEmpty(); // there are no pixels that cannot be drawn to
+ prevType = kInsideOut_BoundsType;
+ } else {
+ prevFinite = prior->fFiniteBound;
+ prevType = prior->fFiniteBoundType;
+ }
+
+ FillCombo combination = kPrev_Cur_FillCombo;
+ if (kInsideOut_BoundsType == fFiniteBoundType) {
+ combination = (FillCombo) (combination | 0x01);
+ }
+ if (kInsideOut_BoundsType == prevType) {
+ combination = (FillCombo) (combination | 0x02);
+ }
+
+ SkASSERT(kInvPrev_InvCur_FillCombo == combination ||
+ kInvPrev_Cur_FillCombo == combination ||
+ kPrev_InvCur_FillCombo == combination ||
+ kPrev_Cur_FillCombo == combination);
+
+ // Now integrate with clip with the prior clips
+ switch (fOp) {
+ case SkCanvas::kDifference_Op:
+ this->combineBoundsDiff(combination, prevFinite);
+ break;
+ case SkCanvas::kXOR_Op:
+ this->combineBoundsXOR(combination, prevFinite);
+ break;
+ case SkCanvas::kUnion_Op:
+ this->combineBoundsUnion(combination, prevFinite);
+ break;
+ case SkCanvas::kIntersect_Op:
+ this->combineBoundsIntersection(combination, prevFinite);
+ break;
+ case SkCanvas::kReverseDifference_Op:
+ this->combineBoundsRevDiff(combination, prevFinite);
+ break;
+ case SkCanvas::kReplace_Op:
+ // Replace just ignores everything prior
+ // The current clip's bound information is already filled in
+ // so nothing to do
+ break;
+ default:
+ SkDebugf("SkCanvas::ClipOp error\n");
+ SkASSERT(0);
+ break;
+ }
+}
+
+// This constant determines how many Element's are allocated together as a block in
+// the deque. As such it needs to balance allocating too much memory vs.
+// incurring allocation/deallocation thrashing. It should roughly correspond to
+// the deepest save/restore stack we expect to see.
+static const int kDefaultElementAllocCnt = 8;
+
+SkClipStack::SkClipStack()
+ : fDeque(sizeof(Element), kDefaultElementAllocCnt)
+ , fSaveCount(0) {
+}
+
+SkClipStack::SkClipStack(const SkClipStack& b)
+ : fDeque(sizeof(Element), kDefaultElementAllocCnt) {
+ *this = b;
+}
+
+SkClipStack::~SkClipStack() {
+ reset();
+}
+
+SkClipStack& SkClipStack::operator=(const SkClipStack& b) {
+ if (this == &b) {
+ return *this;
+ }
+ reset();
+
+ fSaveCount = b.fSaveCount;
+ SkDeque::F2BIter recIter(b.fDeque);
+ for (const Element* element = (const Element*)recIter.next();
+ element != nullptr;
+ element = (const Element*)recIter.next()) {
+ new (fDeque.push_back()) Element(*element);
+ }
+
+ return *this;
+}
+
+bool SkClipStack::operator==(const SkClipStack& b) const {
+ if (this->getTopmostGenID() == b.getTopmostGenID()) {
+ return true;
+ }
+ if (fSaveCount != b.fSaveCount ||
+ fDeque.count() != b.fDeque.count()) {
+ return false;
+ }
+ SkDeque::F2BIter myIter(fDeque);
+ SkDeque::F2BIter bIter(b.fDeque);
+ const Element* myElement = (const Element*)myIter.next();
+ const Element* bElement = (const Element*)bIter.next();
+
+ while (myElement != nullptr && bElement != nullptr) {
+ if (*myElement != *bElement) {
+ return false;
+ }
+ myElement = (const Element*)myIter.next();
+ bElement = (const Element*)bIter.next();
+ }
+ return myElement == nullptr && bElement == nullptr;
+}
+
+void SkClipStack::reset() {
+ // We used a placement new for each object in fDeque, so we're responsible
+ // for calling the destructor on each of them as well.
+ while (!fDeque.empty()) {
+ Element* element = (Element*)fDeque.back();
+ element->~Element();
+ fDeque.pop_back();
+ }
+
+ fSaveCount = 0;
+}
+
+void SkClipStack::save() {
+ fSaveCount += 1;
+}
+
+void SkClipStack::restore() {
+ fSaveCount -= 1;
+ restoreTo(fSaveCount);
+}
+
+void SkClipStack::restoreTo(int saveCount) {
+ while (!fDeque.empty()) {
+ Element* element = (Element*)fDeque.back();
+ if (element->fSaveCount <= saveCount) {
+ break;
+ }
+ element->~Element();
+ fDeque.pop_back();
+ }
+}
+
+void SkClipStack::getBounds(SkRect* canvFiniteBound,
+ BoundsType* boundType,
+ bool* isIntersectionOfRects) const {
+ SkASSERT(canvFiniteBound && boundType);
+
+ Element* element = (Element*)fDeque.back();
+
+ if (nullptr == element) {
+ // the clip is wide open - the infinite plane w/ no pixels un-writeable
+ canvFiniteBound->setEmpty();
+ *boundType = kInsideOut_BoundsType;
+ if (isIntersectionOfRects) {
+ *isIntersectionOfRects = false;
+ }
+ return;
+ }
+
+ *canvFiniteBound = element->fFiniteBound;
+ *boundType = element->fFiniteBoundType;
+ if (isIntersectionOfRects) {
+ *isIntersectionOfRects = element->fIsIntersectionOfRects;
+ }
+}
+
+bool SkClipStack::internalQuickContains(const SkRect& rect) const {
+
+ Iter iter(*this, Iter::kTop_IterStart);
+ const Element* element = iter.prev();
+ while (element != nullptr) {
+ if (SkCanvas::kIntersect_Op != element->getOp() && SkCanvas::kReplace_Op != element->getOp())
+ return false;
+ if (element->isInverseFilled()) {
+ // Part of 'rect' could be trimmed off by the inverse-filled clip element
+ if (SkRect::Intersects(element->getBounds(), rect)) {
+ return false;
+ }
+ } else {
+ if (!element->contains(rect)) {
+ return false;
+ }
+ }
+ if (SkCanvas::kReplace_Op == element->getOp()) {
+ break;
+ }
+ element = iter.prev();
+ }
+ return true;
+}
+
+bool SkClipStack::internalQuickContains(const SkRRect& rrect) const {
+
+ Iter iter(*this, Iter::kTop_IterStart);
+ const Element* element = iter.prev();
+ while (element != nullptr) {
+ if (SkCanvas::kIntersect_Op != element->getOp() && SkCanvas::kReplace_Op != element->getOp())
+ return false;
+ if (element->isInverseFilled()) {
+ // Part of 'rrect' could be trimmed off by the inverse-filled clip element
+ if (SkRect::Intersects(element->getBounds(), rrect.getBounds())) {
+ return false;
+ }
+ } else {
+ if (!element->contains(rrect)) {
+ return false;
+ }
+ }
+ if (SkCanvas::kReplace_Op == element->getOp()) {
+ break;
+ }
+ element = iter.prev();
+ }
+ return true;
+}
+
+bool SkClipStack::asPath(SkPath *path) const {
+ bool isAA = false;
+
+ path->reset();
+ path->setFillType(SkPath::kInverseEvenOdd_FillType);
+
+ SkClipStack::Iter iter(*this, SkClipStack::Iter::kBottom_IterStart);
+ while (const SkClipStack::Element* element = iter.next()) {
+ SkPath operand;
+ if (element->getType() != SkClipStack::Element::kEmpty_Type) {
+ element->asPath(&operand);
+ }
+
+ SkCanvas::ClipOp elementOp = element->getOp();
+ if (elementOp == SkCanvas::kReplace_Op) {
+ *path = operand;
+ } else {
+ Op(*path, operand, (SkPathOp)elementOp, path);
+ }
+
+ // if the prev and curr clips disagree about aa -vs- not, favor the aa request.
+ // perhaps we need an API change to avoid this sort of mixed-signals about
+ // clipping.
+ isAA = (isAA || element->isAA());
+ }
+
+ return isAA;
+}
+
+void SkClipStack::pushElement(const Element& element) {
+ // Use reverse iterator instead of back because Rect path may need previous
+ SkDeque::Iter iter(fDeque, SkDeque::Iter::kBack_IterStart);
+ Element* prior = (Element*) iter.prev();
+
+ if (prior) {
+ if (prior->canBeIntersectedInPlace(fSaveCount, element.getOp())) {
+ switch (prior->fType) {
+ case Element::kEmpty_Type:
+ SkDEBUGCODE(prior->checkEmpty();)
+ return;
+ case Element::kRect_Type:
+ if (Element::kRect_Type == element.getType()) {
+ if (prior->rectRectIntersectAllowed(element.getRect(), element.isAA())) {
+ SkRect isectRect;
+ if (!isectRect.intersect(prior->getRect(), element.getRect())) {
+ prior->setEmpty();
+ return;
+ }
+
+ prior->fRRect.setRect(isectRect);
+ prior->fDoAA = element.isAA();
+ Element* priorPrior = (Element*) iter.prev();
+ prior->updateBoundAndGenID(priorPrior);
+ return;
+ }
+ break;
+ }
+ // fallthrough
+ default:
+ if (!SkRect::Intersects(prior->getBounds(), element.getBounds())) {
+ prior->setEmpty();
+ return;
+ }
+ break;
+ }
+ } else if (SkCanvas::kReplace_Op == element.getOp()) {
+ this->restoreTo(fSaveCount - 1);
+ prior = (Element*) fDeque.back();
+ }
+ }
+ Element* newElement = new (fDeque.push_back()) Element(element);
+ newElement->updateBoundAndGenID(prior);
+}
+
+void SkClipStack::clipRRect(const SkRRect& rrect, const SkMatrix& matrix, SkCanvas::ClipOp op,
+ bool doAA) {
+ SkRRect transformedRRect;
+ if (rrect.transform(matrix, &transformedRRect)) {
+ Element element(fSaveCount, transformedRRect, op, doAA);
+ this->pushElement(element);
+ return;
+ }
+ SkPath path;
+ path.addRRect(rrect);
+ path.setIsVolatile(true);
+ this->clipPath(path, matrix, op, doAA);
+}
+
+void SkClipStack::clipRect(const SkRect& rect, const SkMatrix& matrix, SkCanvas::ClipOp op,
+ bool doAA) {
+ if (matrix.rectStaysRect()) {
+ SkRect devRect;
+ matrix.mapRect(&devRect, rect);
+ Element element(fSaveCount, devRect, op, doAA);
+ this->pushElement(element);
+ return;
+ }
+ SkPath path;
+ path.addRect(rect);
+ path.setIsVolatile(true);
+ this->clipPath(path, matrix, op, doAA);
+}
+
+void SkClipStack::clipPath(const SkPath& path, const SkMatrix& matrix, SkCanvas::ClipOp op,
+ bool doAA) {
+ SkPath devPath;
+ path.transform(matrix, &devPath);
+
+ Element element(fSaveCount, devPath, op, doAA);
+ this->pushElement(element);
+}
+
+void SkClipStack::clipEmpty() {
+ Element* element = (Element*) fDeque.back();
+
+ if (element && element->canBeIntersectedInPlace(fSaveCount, SkCanvas::kIntersect_Op)) {
+ element->setEmpty();
+ }
+ new (fDeque.push_back()) Element(fSaveCount);
+
+ ((Element*)fDeque.back())->fGenID = kEmptyGenID;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkClipStack::Iter::Iter() : fStack(nullptr) {
+}
+
+SkClipStack::Iter::Iter(const SkClipStack& stack, IterStart startLoc)
+ : fStack(&stack) {
+ this->reset(stack, startLoc);
+}
+
+const SkClipStack::Element* SkClipStack::Iter::next() {
+ return (const SkClipStack::Element*)fIter.next();
+}
+
+const SkClipStack::Element* SkClipStack::Iter::prev() {
+ return (const SkClipStack::Element*)fIter.prev();
+}
+
+const SkClipStack::Element* SkClipStack::Iter::skipToTopmost(SkCanvas::ClipOp op) {
+
+ if (nullptr == fStack) {
+ return nullptr;
+ }
+
+ fIter.reset(fStack->fDeque, SkDeque::Iter::kBack_IterStart);
+
+ const SkClipStack::Element* element = nullptr;
+
+ for (element = (const SkClipStack::Element*) fIter.prev();
+ element;
+ element = (const SkClipStack::Element*) fIter.prev()) {
+
+ if (op == element->fOp) {
+ // The Deque's iterator is actually one pace ahead of the
+ // returned value. So while "element" is the element we want to
+ // return, the iterator is actually pointing at (and will
+ // return on the next "next" or "prev" call) the element
+ // in front of it in the deque. Bump the iterator forward a
+ // step so we get the expected result.
+ if (nullptr == fIter.next()) {
+ // The reverse iterator has run off the front of the deque
+ // (i.e., the "op" clip is the first clip) and can't
+ // recover. Reset the iterator to start at the front.
+ fIter.reset(fStack->fDeque, SkDeque::Iter::kFront_IterStart);
+ }
+ break;
+ }
+ }
+
+ if (nullptr == element) {
+ // There were no "op" clips
+ fIter.reset(fStack->fDeque, SkDeque::Iter::kFront_IterStart);
+ }
+
+ return this->next();
+}
+
+void SkClipStack::Iter::reset(const SkClipStack& stack, IterStart startLoc) {
+ fStack = &stack;
+ fIter.reset(stack.fDeque, static_cast<SkDeque::Iter::IterStart>(startLoc));
+}
+
+// helper method
+void SkClipStack::getConservativeBounds(int offsetX,
+ int offsetY,
+ int maxWidth,
+ int maxHeight,
+ SkRect* devBounds,
+ bool* isIntersectionOfRects) const {
+ SkASSERT(devBounds);
+
+ devBounds->setLTRB(0, 0,
+ SkIntToScalar(maxWidth), SkIntToScalar(maxHeight));
+
+ SkRect temp;
+ SkClipStack::BoundsType boundType;
+
+ // temp starts off in canvas space here
+ this->getBounds(&temp, &boundType, isIntersectionOfRects);
+ if (SkClipStack::kInsideOut_BoundsType == boundType) {
+ return;
+ }
+
+ // but is converted to device space here
+ temp.offset(SkIntToScalar(offsetX), SkIntToScalar(offsetY));
+
+ if (!devBounds->intersect(temp)) {
+ devBounds->setEmpty();
+ }
+}
+
+bool SkClipStack::isRRect(const SkRect& bounds, SkRRect* rrect, bool* aa) const {
+ // We limit to 5 elements. This means the back element will be bounds checked at most 4 times if
+ // it is an rrect.
+ int cnt = fDeque.count();
+ if (!cnt || cnt > 5) {
+ return false;
+ }
+ const Element* back = static_cast<const Element*>(fDeque.back());
+ if (back->getType() != SkClipStack::Element::kRect_Type &&
+ back->getType() != SkClipStack::Element::kRRect_Type) {
+ return false;
+ }
+ if (back->getOp() == SkCanvas::kReplace_Op) {
+ *rrect = back->asRRect();
+ *aa = back->isAA();
+ return true;
+ }
+
+ if (back->getOp() == SkCanvas::kIntersect_Op) {
+ SkRect backBounds;
+ if (!backBounds.intersect(bounds, back->asRRect().rect())) {
+ return false;
+ }
+ if (cnt > 1) {
+ SkDeque::Iter iter(fDeque, SkDeque::Iter::kBack_IterStart);
+ SkAssertResult(static_cast<const Element*>(iter.prev()) == back);
+ while (const Element* prior = (const Element*)iter.prev()) {
+ if ((prior->getOp() != SkCanvas::kIntersect_Op &&
+ prior->getOp() != SkCanvas::kReplace_Op) ||
+ !prior->contains(backBounds)) {
+ return false;
+ }
+ if (prior->getOp() == SkCanvas::kReplace_Op) {
+ break;
+ }
+ }
+ }
+ *rrect = back->asRRect();
+ *aa = back->isAA();
+ return true;
+ }
+ return false;
+}
+
+int32_t SkClipStack::GetNextGenID() {
+ // TODO: handle overflow.
+ return sk_atomic_inc(&gGenID);
+}
+
+int32_t SkClipStack::getTopmostGenID() const {
+ if (fDeque.empty()) {
+ return kWideOpenGenID;
+ }
+
+ const Element* back = static_cast<const Element*>(fDeque.back());
+ if (kInsideOut_BoundsType == back->fFiniteBoundType && back->fFiniteBound.isEmpty()) {
+ return kWideOpenGenID;
+ }
+
+ return back->getGenID();
+}
+
+#ifdef SK_DEBUG
+void SkClipStack::Element::dump() const {
+ static const char* kTypeStrings[] = {
+ "empty",
+ "rect",
+ "rrect",
+ "path"
+ };
+ static_assert(0 == kEmpty_Type, "type_str");
+ static_assert(1 == kRect_Type, "type_str");
+ static_assert(2 == kRRect_Type, "type_str");
+ static_assert(3 == kPath_Type, "type_str");
+ static_assert(SK_ARRAY_COUNT(kTypeStrings) == kTypeCnt, "type_str");
+
+ static const char* kOpStrings[] = {
+ "difference",
+ "intersect",
+ "union",
+ "xor",
+ "reverse-difference",
+ "replace",
+ };
+ static_assert(0 == SkCanvas::kDifference_Op, "op_str");
+ static_assert(1 == SkCanvas::kIntersect_Op, "op_str");
+ static_assert(2 == SkCanvas::kUnion_Op, "op_str");
+ static_assert(3 == SkCanvas::kXOR_Op, "op_str");
+ static_assert(4 == SkCanvas::kReverseDifference_Op, "op_str");
+ static_assert(5 == SkCanvas::kReplace_Op, "op_str");
+ static_assert(SK_ARRAY_COUNT(kOpStrings) == SkRegion::kOpCnt, "op_str");
+
+ SkDebugf("Type: %s, Op: %s, AA: %s, Save Count: %d\n", kTypeStrings[fType],
+ kOpStrings[fOp], (fDoAA ? "yes" : "no"), fSaveCount);
+ switch (fType) {
+ case kEmpty_Type:
+ SkDebugf("\n");
+ break;
+ case kRect_Type:
+ this->getRect().dump();
+ SkDebugf("\n");
+ break;
+ case kRRect_Type:
+ this->getRRect().dump();
+ SkDebugf("\n");
+ break;
+ case kPath_Type:
+ this->getPath().dump(nullptr, true, false);
+ break;
+ }
+}
+
+void SkClipStack::dump() const {
+ B2TIter iter(*this);
+ const Element* e;
+ while ((e = iter.next())) {
+ e->dump();
+ SkDebugf("\n");
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkColor.cpp b/gfx/skia/skia/src/core/SkColor.cpp
new file mode 100644
index 000000000..6dacc063e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColor.cpp
@@ -0,0 +1,183 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkColor.h"
+#include "SkColorPriv.h"
+#include "SkFixed.h"
+
+SkPMColor SkPreMultiplyARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ return SkPremultiplyARGBInline(a, r, g, b);
+}
+
+SkPMColor SkPreMultiplyColor(SkColor c) {
+ return SkPremultiplyARGBInline(SkColorGetA(c), SkColorGetR(c),
+ SkColorGetG(c), SkColorGetB(c));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline SkScalar ByteToScalar(U8CPU x) {
+ SkASSERT(x <= 255);
+ return SkIntToScalar(x) / 255;
+}
+
+static inline SkScalar ByteDivToScalar(int numer, U8CPU denom) {
+ // cast to keep the answer signed
+ return SkIntToScalar(numer) / (int)denom;
+}
+
+void SkRGBToHSV(U8CPU r, U8CPU g, U8CPU b, SkScalar hsv[3]) {
+ SkASSERT(hsv);
+
+ unsigned min = SkMin32(r, SkMin32(g, b));
+ unsigned max = SkMax32(r, SkMax32(g, b));
+ unsigned delta = max - min;
+
+ SkScalar v = ByteToScalar(max);
+ SkASSERT(v >= 0 && v <= SK_Scalar1);
+
+ if (0 == delta) { // we're a shade of gray
+ hsv[0] = 0;
+ hsv[1] = 0;
+ hsv[2] = v;
+ return;
+ }
+
+ SkScalar s = ByteDivToScalar(delta, max);
+ SkASSERT(s >= 0 && s <= SK_Scalar1);
+
+ SkScalar h;
+ if (r == max) {
+ h = ByteDivToScalar(g - b, delta);
+ } else if (g == max) {
+ h = SkIntToScalar(2) + ByteDivToScalar(b - r, delta);
+ } else { // b == max
+ h = SkIntToScalar(4) + ByteDivToScalar(r - g, delta);
+ }
+
+ h *= 60;
+ if (h < 0) {
+ h += SkIntToScalar(360);
+ }
+ SkASSERT(h >= 0 && h < SkIntToScalar(360));
+
+ hsv[0] = h;
+ hsv[1] = s;
+ hsv[2] = v;
+}
+
+SkColor SkHSVToColor(U8CPU a, const SkScalar hsv[3]) {
+ SkASSERT(hsv);
+
+ U8CPU s = SkUnitScalarClampToByte(hsv[1]);
+ U8CPU v = SkUnitScalarClampToByte(hsv[2]);
+
+ if (0 == s) { // shade of gray
+ return SkColorSetARGB(a, v, v, v);
+ }
+ SkFixed hx = (hsv[0] < 0 || hsv[0] >= SkIntToScalar(360)) ? 0 : SkScalarToFixed(hsv[0]/60);
+ SkFixed f = hx & 0xFFFF;
+
+ unsigned v_scale = SkAlpha255To256(v);
+ unsigned p = SkAlphaMul(255 - s, v_scale);
+ unsigned q = SkAlphaMul(255 - (s * f >> 16), v_scale);
+ unsigned t = SkAlphaMul(255 - (s * (SK_Fixed1 - f) >> 16), v_scale);
+
+ unsigned r, g, b;
+
+ SkASSERT((unsigned)(hx >> 16) < 6);
+ switch (hx >> 16) {
+ case 0: r = v; g = t; b = p; break;
+ case 1: r = q; g = v; b = p; break;
+ case 2: r = p; g = v; b = t; break;
+ case 3: r = p; g = q; b = v; break;
+ case 4: r = t; g = p; b = v; break;
+ default: r = v; g = p; b = q; break;
+ }
+ return SkColorSetARGB(a, r, g, b);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#include "SkPM4fPriv.h"
+#include "SkHalf.h"
+
+SkPM4f SkPM4f::FromPMColor(SkPMColor c) {
+ return From4f(swizzle_rb_if_bgra(Sk4f_fromL32(c)));
+}
+
+SkColor4f SkPM4f::unpremul() const {
+ float alpha = fVec[A];
+ if (0 == alpha) {
+ return { 0, 0, 0, 0 };
+ } else {
+ float invAlpha = 1 / alpha;
+ return { fVec[R] * invAlpha, fVec[G] * invAlpha, fVec[B] * invAlpha, alpha };
+ }
+}
+
+void SkPM4f::toF16(uint16_t half[4]) const {
+ for (int i = 0; i < 4; ++i) {
+ half[i] = SkFloatToHalf(fVec[i]);
+ }
+}
+
+uint64_t SkPM4f::toF16() const {
+ uint64_t value;
+ this->toF16(reinterpret_cast<uint16_t*>(&value));
+ return value;
+}
+
+SkPM4f SkPM4f::FromF16(const uint16_t half[4]) {
+ return {{
+ SkHalfToFloat(half[0]),
+ SkHalfToFloat(half[1]),
+ SkHalfToFloat(half[2]),
+ SkHalfToFloat(half[3])
+ }};
+}
+
+#ifdef SK_DEBUG
+void SkPM4f::assertIsUnit() const {
+ auto c4 = Sk4f::Load(fVec);
+ SkASSERT((c4 >= Sk4f(0)).allTrue() && (c4 <= Sk4f(1)).allTrue());
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkColor4f SkColor4f::FromColor(SkColor bgra) {
+ SkColor4f rgba;
+ swizzle_rb(Sk4f_fromS32(bgra)).store(rgba.vec());
+ return rgba;
+}
+
+SkColor4f SkColor4f::FromColor3f(SkColor3f color3f, float a) {
+ SkColor4f rgba;
+ rgba.fR = color3f.fX;
+ rgba.fG = color3f.fY;
+ rgba.fB = color3f.fZ;
+ rgba.fA = a;
+ return rgba;
+}
+
+SkColor SkColor4f::toSkColor() const {
+ return Sk4f_toS32(swizzle_rb(Sk4f::Load(this->vec())));
+}
+
+SkColor4f SkColor4f::Pin(float r, float g, float b, float a) {
+ SkColor4f c4;
+ Sk4f::Min(Sk4f::Max(Sk4f(r, g, b, a), Sk4f(0)), Sk4f(1)).store(c4.vec());
+ return c4;
+}
+
+SkPM4f SkColor4f::premul() const {
+ auto src = Sk4f::Load(this->pin().vec());
+ float srcAlpha = src[3]; // need the pinned version of our alpha
+ src = src * Sk4f(srcAlpha, srcAlpha, srcAlpha, 1);
+
+ return SkPM4f::From4f(src);
+}
diff --git a/gfx/skia/skia/src/core/SkColorFilter.cpp b/gfx/skia/skia/src/core/SkColorFilter.cpp
new file mode 100644
index 000000000..31c0ddb06
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorFilter.cpp
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkColorFilter.h"
+#include "SkReadBuffer.h"
+#include "SkRefCnt.h"
+#include "SkString.h"
+#include "SkTDArray.h"
+#include "SkUnPreMultiply.h"
+#include "SkWriteBuffer.h"
+#include "SkPM4f.h"
+#include "SkNx.h"
+
+#if SK_SUPPORT_GPU
+#include "GrFragmentProcessor.h"
+#endif
+
+bool SkColorFilter::asColorMode(SkColor* color, SkXfermode::Mode* mode) const {
+ return false;
+}
+
+bool SkColorFilter::asColorMatrix(SkScalar matrix[20]) const {
+ return false;
+}
+
+bool SkColorFilter::asComponentTable(SkBitmap*) const {
+ return false;
+}
+
+#if SK_SUPPORT_GPU
+sk_sp<GrFragmentProcessor> SkColorFilter::asFragmentProcessor(GrContext*) const {
+ return nullptr;
+}
+#endif
+
+bool SkColorFilter::appendStages(SkRasterPipeline* pipeline) const {
+ return this->onAppendStages(pipeline);
+}
+
+bool SkColorFilter::onAppendStages(SkRasterPipeline*) const {
+ return false;
+}
+
+void SkColorFilter::filterSpan4f(const SkPM4f src[], int count, SkPM4f result[]) const {
+ const int N = 128;
+ SkPMColor tmp[N];
+ while (count > 0) {
+ int n = SkTMin(count, N);
+ for (int i = 0; i < n; ++i) {
+ tmp[i] = src[i].toPMColor();
+ }
+ this->filterSpan(tmp, n, tmp);
+ for (int i = 0; i < n; ++i) {
+ result[i] = SkPM4f::FromPMColor(tmp[i]);
+ }
+ src += n;
+ result += n;
+ count -= n;
+ }
+}
+
+SkColor SkColorFilter::filterColor(SkColor c) const {
+ SkPMColor dst, src = SkPreMultiplyColor(c);
+ this->filterSpan(&src, 1, &dst);
+ return SkUnPreMultiply::PMColorToColor(dst);
+}
+
+SkColor4f SkColorFilter::filterColor4f(const SkColor4f& c) const {
+ SkPM4f dst, src = c.premul();
+ this->filterSpan4f(&src, 1, &dst);
+ return dst.unpremul();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+/*
+ * Since colorfilters may be used on the GPU backend, and in that case we may string together
+ * many GrFragmentProcessors, we might exceed some internal instruction/resource limit.
+ *
+ * Since we don't yet know *what* those limits might be when we construct the final shader,
+ * we just set an arbitrary limit during construction. If later we find smarter ways to know what
+ * the limnits are, we can change this constant (or remove it).
+ */
+#define SK_MAX_COMPOSE_COLORFILTER_COUNT 4
+
+class SkComposeColorFilter : public SkColorFilter {
+public:
+ uint32_t getFlags() const override {
+ // Can only claim alphaunchanged and SkPM4f support if both our proxys do.
+ return fOuter->getFlags() & fInner->getFlags();
+ }
+
+ void filterSpan(const SkPMColor shader[], int count, SkPMColor result[]) const override {
+ fInner->filterSpan(shader, count, result);
+ fOuter->filterSpan(result, count, result);
+ }
+
+ void filterSpan4f(const SkPM4f shader[], int count, SkPM4f result[]) const override {
+ fInner->filterSpan4f(shader, count, result);
+ fOuter->filterSpan4f(result, count, result);
+ }
+
+#ifndef SK_IGNORE_TO_STRING
+ void toString(SkString* str) const override {
+ SkString outerS, innerS;
+ fOuter->toString(&outerS);
+ fInner->toString(&innerS);
+ str->appendf("SkComposeColorFilter: outer(%s) inner(%s)", outerS.c_str(), innerS.c_str());
+ }
+#endif
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(GrContext* context) const override {
+ sk_sp<GrFragmentProcessor> innerFP(fInner->asFragmentProcessor(context));
+ sk_sp<GrFragmentProcessor> outerFP(fOuter->asFragmentProcessor(context));
+ if (!innerFP || !outerFP) {
+ return nullptr;
+ }
+ sk_sp<GrFragmentProcessor> series[] = { std::move(innerFP), std::move(outerFP) };
+ return GrFragmentProcessor::RunInSeries(series, 2);
+ }
+#endif
+
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkComposeColorFilter)
+
+protected:
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.writeFlattenable(fOuter.get());
+ buffer.writeFlattenable(fInner.get());
+ }
+
+private:
+ SkComposeColorFilter(sk_sp<SkColorFilter> outer, sk_sp<SkColorFilter> inner,
+ int composedFilterCount)
+ : fOuter(std::move(outer))
+ , fInner(std::move(inner))
+ , fComposedFilterCount(composedFilterCount)
+ {
+ SkASSERT(composedFilterCount >= 2);
+ SkASSERT(composedFilterCount <= SK_MAX_COMPOSE_COLORFILTER_COUNT);
+ }
+
+ int privateComposedFilterCount() const override {
+ return fComposedFilterCount;
+ }
+
+ sk_sp<SkColorFilter> fOuter;
+ sk_sp<SkColorFilter> fInner;
+ const int fComposedFilterCount;
+
+ friend class SkColorFilter;
+
+ typedef SkColorFilter INHERITED;
+};
+
+sk_sp<SkFlattenable> SkComposeColorFilter::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkColorFilter> outer(buffer.readColorFilter());
+ sk_sp<SkColorFilter> inner(buffer.readColorFilter());
+ return MakeComposeFilter(std::move(outer), std::move(inner));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkColorFilter> SkColorFilter::MakeComposeFilter(sk_sp<SkColorFilter> outer,
+ sk_sp<SkColorFilter> inner) {
+ if (!outer) {
+ return inner;
+ }
+ if (!inner) {
+ return outer;
+ }
+
+ // Give the subclass a shot at a more optimal composition...
+ auto composition = outer->makeComposed(inner);
+ if (composition) {
+ return composition;
+ }
+
+ int count = inner->privateComposedFilterCount() + outer->privateComposedFilterCount();
+ if (count > SK_MAX_COMPOSE_COLORFILTER_COUNT) {
+ return nullptr;
+ }
+ return sk_sp<SkColorFilter>(new SkComposeColorFilter(std::move(outer), std::move(inner),count));
+}
+
+#include "SkModeColorFilter.h"
+
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkColorFilter)
+SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkComposeColorFilter)
+SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkModeColorFilter)
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END
diff --git a/gfx/skia/skia/src/core/SkColorFilterShader.cpp b/gfx/skia/skia/src/core/SkColorFilterShader.cpp
new file mode 100644
index 000000000..8bf82b8b1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorFilterShader.cpp
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkColorFilterShader.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+#include "SkShader.h"
+#include "SkString.h"
+
+#if SK_SUPPORT_GPU
+#include "GrFragmentProcessor.h"
+#endif
+
+SkColorFilterShader::SkColorFilterShader(sk_sp<SkShader> shader, sk_sp<SkColorFilter> filter)
+ : fShader(std::move(shader))
+ , fFilter(std::move(filter))
+{
+ SkASSERT(fShader);
+ SkASSERT(fFilter);
+}
+
+sk_sp<SkFlattenable> SkColorFilterShader::CreateProc(SkReadBuffer& buffer) {
+ auto shader = buffer.readShader();
+ auto filter = buffer.readColorFilter();
+ if (!shader || !filter) {
+ return nullptr;
+ }
+ return sk_make_sp<SkColorFilterShader>(shader, filter);
+}
+
+void SkColorFilterShader::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeFlattenable(fShader.get());
+ buffer.writeFlattenable(fFilter.get());
+}
+
+uint32_t SkColorFilterShader::FilterShaderContext::getFlags() const {
+ const SkColorFilterShader& filterShader = static_cast<const SkColorFilterShader&>(fShader);
+
+ uint32_t shaderF = fShaderContext->getFlags();
+ uint32_t filterF = filterShader.fFilter->getFlags();
+
+ // If the filter does not support a given feature, but sure to clear the corresponding flag
+ // in the shader flags.
+ //
+ if (!(filterF & SkColorFilter::kAlphaUnchanged_Flag)) {
+ shaderF &= ~SkShader::kOpaqueAlpha_Flag;
+ }
+ return shaderF;
+}
+
+SkShader::Context* SkColorFilterShader::onCreateContext(const ContextRec& rec,
+ void* storage) const {
+ char* shaderContextStorage = (char*)storage + sizeof(FilterShaderContext);
+ SkShader::Context* shaderContext = fShader->createContext(rec, shaderContextStorage);
+ if (nullptr == shaderContext) {
+ return nullptr;
+ }
+ return new (storage) FilterShaderContext(*this, shaderContext, rec);
+}
+
+size_t SkColorFilterShader::onContextSize(const ContextRec& rec) const {
+ return sizeof(FilterShaderContext) + fShader->contextSize(rec);
+}
+
+SkColorFilterShader::FilterShaderContext::FilterShaderContext(
+ const SkColorFilterShader& filterShader,
+ SkShader::Context* shaderContext,
+ const ContextRec& rec)
+ : INHERITED(filterShader, rec)
+ , fShaderContext(shaderContext)
+{}
+
+SkColorFilterShader::FilterShaderContext::~FilterShaderContext() {
+ fShaderContext->~Context();
+}
+
+void SkColorFilterShader::FilterShaderContext::shadeSpan(int x, int y, SkPMColor result[],
+ int count) {
+ const SkColorFilterShader& filterShader = static_cast<const SkColorFilterShader&>(fShader);
+
+ fShaderContext->shadeSpan(x, y, result, count);
+ filterShader.fFilter->filterSpan(result, count, result);
+}
+
+void SkColorFilterShader::FilterShaderContext::shadeSpan4f(int x, int y, SkPM4f result[],
+ int count) {
+ const SkColorFilterShader& filterShader = static_cast<const SkColorFilterShader&>(fShader);
+
+ fShaderContext->shadeSpan4f(x, y, result, count);
+ filterShader.fFilter->filterSpan4f(result, count, result);
+}
+
+#if SK_SUPPORT_GPU
+/////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> SkColorFilterShader::asFragmentProcessor(const AsFPArgs& args) const {
+
+ sk_sp<GrFragmentProcessor> fp1(fShader->asFragmentProcessor(args));
+ if (!fp1) {
+ return nullptr;
+ }
+
+ sk_sp<GrFragmentProcessor> fp2(fFilter->asFragmentProcessor(args.fContext));
+ if (!fp2) {
+ return fp1;
+ }
+
+ sk_sp<GrFragmentProcessor> fpSeries[] = { std::move(fp1), std::move(fp2) };
+ return GrFragmentProcessor::RunInSeries(fpSeries, 2);
+}
+#endif
+
+#ifndef SK_IGNORE_TO_STRING
+void SkColorFilterShader::toString(SkString* str) const {
+ str->append("SkColorFilterShader: (");
+
+ str->append("Shader: ");
+ fShader->toString(str);
+ str->append(" Filter: ");
+ // TODO: add "fFilter->toString(str);" once SkColorFilter::toString is added
+
+ this->INHERITED::toString(str);
+
+ str->append(")");
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkShader> SkShader::makeWithColorFilter(sk_sp<SkColorFilter> filter) const {
+ SkShader* base = const_cast<SkShader*>(this);
+ if (!filter) {
+ return sk_ref_sp(base);
+ }
+ return sk_make_sp<SkColorFilterShader>(sk_ref_sp(base), filter);
+}
diff --git a/gfx/skia/skia/src/core/SkColorFilterShader.h b/gfx/skia/skia/src/core/SkColorFilterShader.h
new file mode 100644
index 000000000..035acd839
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorFilterShader.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorFilterShader_DEFINED
+#define SkColorFilterShader_DEFINED
+
+#include "SkColorFilter.h"
+#include "SkShader.h"
+
+class SkColorFilterShader : public SkShader {
+public:
+ SkColorFilterShader(sk_sp<SkShader> shader, sk_sp<SkColorFilter> filter);
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(const AsFPArgs&) const override;
+#endif
+
+ class FilterShaderContext : public SkShader::Context {
+ public:
+ // Takes ownership of shaderContext and calls its destructor.
+ FilterShaderContext(const SkColorFilterShader&, SkShader::Context*, const ContextRec&);
+ virtual ~FilterShaderContext();
+
+ uint32_t getFlags() const override;
+
+ void shadeSpan(int x, int y, SkPMColor[], int count) override;
+ void shadeSpan4f(int x, int y, SkPM4f[], int count) override;
+
+ void set3DMask(const SkMask* mask) override {
+ // forward to our proxy
+ fShaderContext->set3DMask(mask);
+ }
+
+ private:
+ SkShader::Context* fShaderContext;
+
+ typedef SkShader::Context INHERITED;
+ };
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkColorFilterShader)
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ size_t onContextSize(const ContextRec&) const override;
+ Context* onCreateContext(const ContextRec&, void* storage) const override;
+
+private:
+ sk_sp<SkShader> fShader;
+ sk_sp<SkColorFilter> fFilter;
+
+ typedef SkShader INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkColorMatrixFilterRowMajor255.cpp b/gfx/skia/skia/src/core/SkColorMatrixFilterRowMajor255.cpp
new file mode 100644
index 000000000..29a3f107b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorMatrixFilterRowMajor255.cpp
@@ -0,0 +1,438 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkColorMatrixFilterRowMajor255.h"
+#include "SkColorPriv.h"
+#include "SkNx.h"
+#include "SkPM4fPriv.h"
+#include "SkReadBuffer.h"
+#include "SkRefCnt.h"
+#include "SkString.h"
+#include "SkUnPreMultiply.h"
+#include "SkWriteBuffer.h"
+
+static void transpose(float dst[20], const float src[20]) {
+ const float* srcR = src + 0;
+ const float* srcG = src + 5;
+ const float* srcB = src + 10;
+ const float* srcA = src + 15;
+
+ for (int i = 0; i < 20; i += 4) {
+ dst[i + 0] = *srcR++;
+ dst[i + 1] = *srcG++;
+ dst[i + 2] = *srcB++;
+ dst[i + 3] = *srcA++;
+ }
+}
+
+void SkColorMatrixFilterRowMajor255::initState() {
+ transpose(fTranspose, fMatrix);
+
+ const float* array = fMatrix;
+
+ // check if we have to munge Alpha
+ bool changesAlpha = (array[15] || array[16] || array[17] || (array[18] - 1) || array[19]);
+ bool usesAlpha = (array[3] || array[8] || array[13]);
+
+ if (changesAlpha || usesAlpha) {
+ fFlags = changesAlpha ? 0 : kAlphaUnchanged_Flag;
+ } else {
+ fFlags = kAlphaUnchanged_Flag;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkColorMatrixFilterRowMajor255::SkColorMatrixFilterRowMajor255(const SkScalar array[20]) {
+ memcpy(fMatrix, array, 20 * sizeof(SkScalar));
+ this->initState();
+}
+
+uint32_t SkColorMatrixFilterRowMajor255::getFlags() const {
+ return this->INHERITED::getFlags() | fFlags;
+}
+
+static Sk4f scale_rgb(float scale) {
+ static_assert(SkPM4f::A == 3, "Alpha is lane 3");
+ return Sk4f(scale, scale, scale, 1);
+}
+
+static Sk4f premul(const Sk4f& x) {
+ return x * scale_rgb(x[SkPM4f::A]);
+}
+
+static Sk4f unpremul(const Sk4f& x) {
+ return x * scale_rgb(1 / x[SkPM4f::A]); // TODO: fast/approx invert?
+}
+
+static Sk4f clamp_0_1(const Sk4f& x) {
+ return Sk4f::Max(Sk4f::Min(x, Sk4f(1)), Sk4f(0));
+}
+
+static SkPMColor round(const Sk4f& x) {
+ SkPMColor c;
+ SkNx_cast<uint8_t>(x * Sk4f(255) + Sk4f(0.5f)).store(&c);
+ return c;
+}
+
+template <typename Adaptor, typename T>
+void filter_span(const float array[], const T src[], int count, T dst[]) {
+ // c0-c3 are already in [0,1].
+ const Sk4f c0 = Sk4f::Load(array + 0);
+ const Sk4f c1 = Sk4f::Load(array + 4);
+ const Sk4f c2 = Sk4f::Load(array + 8);
+ const Sk4f c3 = Sk4f::Load(array + 12);
+ // c4 (the translate vector) is in [0, 255]. Bring it back to [0,1].
+ const Sk4f c4 = Sk4f::Load(array + 16)*Sk4f(1.0f/255);
+
+ // todo: we could cache this in the constructor...
+ T matrix_translate_pmcolor = Adaptor::From4f(premul(clamp_0_1(c4)));
+
+ for (int i = 0; i < count; i++) {
+ Sk4f srcf = Adaptor::To4f(src[i]);
+ float srcA = srcf[SkPM4f::A];
+
+ if (0 == srcA) {
+ dst[i] = matrix_translate_pmcolor;
+ continue;
+ }
+ if (1 != srcA) {
+ srcf = unpremul(srcf);
+ }
+
+ Sk4f r4 = srcf[Adaptor::R];
+ Sk4f g4 = srcf[Adaptor::G];
+ Sk4f b4 = srcf[Adaptor::B];
+ Sk4f a4 = srcf[Adaptor::A];
+ // apply matrix
+ Sk4f dst4 = c0 * r4 + c1 * g4 + c2 * b4 + c3 * a4 + c4;
+
+ dst[i] = Adaptor::From4f(premul(clamp_0_1(dst4)));
+ }
+}
+
+struct SkPMColorAdaptor {
+ enum {
+ R = SK_R_INDEX,
+ G = SK_G_INDEX,
+ B = SK_B_INDEX,
+ A = SK_A_INDEX,
+ };
+ static SkPMColor From4f(const Sk4f& c4) {
+ return round(swizzle_rb_if_bgra(c4));
+ }
+ static Sk4f To4f(SkPMColor c) {
+ return Sk4f_fromL32(c);
+ }
+};
+void SkColorMatrixFilterRowMajor255::filterSpan(const SkPMColor src[], int count, SkPMColor dst[]) const {
+ filter_span<SkPMColorAdaptor>(fTranspose, src, count, dst);
+}
+
+struct SkPM4fAdaptor {
+ enum {
+ R = SkPM4f::R,
+ G = SkPM4f::G,
+ B = SkPM4f::B,
+ A = SkPM4f::A,
+ };
+ static SkPM4f From4f(const Sk4f& c4) {
+ return SkPM4f::From4f(c4);
+ }
+ static Sk4f To4f(const SkPM4f& c) {
+ return c.to4f();
+ }
+};
+void SkColorMatrixFilterRowMajor255::filterSpan4f(const SkPM4f src[], int count, SkPM4f dst[]) const {
+ filter_span<SkPM4fAdaptor>(fTranspose, src, count, dst);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkColorMatrixFilterRowMajor255::flatten(SkWriteBuffer& buffer) const {
+ SkASSERT(sizeof(fMatrix)/sizeof(SkScalar) == 20);
+ buffer.writeScalarArray(fMatrix, 20);
+}
+
+sk_sp<SkFlattenable> SkColorMatrixFilterRowMajor255::CreateProc(SkReadBuffer& buffer) {
+ SkScalar matrix[20];
+ if (buffer.readScalarArray(matrix, 20)) {
+ return sk_make_sp<SkColorMatrixFilterRowMajor255>(matrix);
+ }
+ return nullptr;
+}
+
+bool SkColorMatrixFilterRowMajor255::asColorMatrix(SkScalar matrix[20]) const {
+ if (matrix) {
+ memcpy(matrix, fMatrix, 20 * sizeof(SkScalar));
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// This code was duplicated from src/effects/SkColorMatrixc.cpp in order to be used in core.
+//////
+
+// To detect if we need to apply clamping after applying a matrix, we check if
+// any output component might go outside of [0, 255] for any combination of
+// input components in [0..255].
+// Each output component is an affine transformation of the input component, so
+// the minimum and maximum values are for any combination of minimum or maximum
+// values of input components (i.e. 0 or 255).
+// E.g. if R' = x*R + y*G + z*B + w*A + t
+// Then the maximum value will be for R=255 if x>0 or R=0 if x<0, and the
+// minimum value will be for R=0 if x>0 or R=255 if x<0.
+// Same goes for all components.
+static bool component_needs_clamping(const SkScalar row[5]) {
+ SkScalar maxValue = row[4] / 255;
+ SkScalar minValue = row[4] / 255;
+ for (int i = 0; i < 4; ++i) {
+ if (row[i] > 0)
+ maxValue += row[i];
+ else
+ minValue += row[i];
+ }
+ return (maxValue > 1) || (minValue < 0);
+}
+
+static bool needs_clamping(const SkScalar matrix[20]) {
+ return component_needs_clamping(matrix)
+ || component_needs_clamping(matrix+5)
+ || component_needs_clamping(matrix+10)
+ || component_needs_clamping(matrix+15);
+}
+
+static void set_concat(SkScalar result[20], const SkScalar outer[20], const SkScalar inner[20]) {
+ int index = 0;
+ for (int j = 0; j < 20; j += 5) {
+ for (int i = 0; i < 4; i++) {
+ result[index++] = outer[j + 0] * inner[i + 0] +
+ outer[j + 1] * inner[i + 5] +
+ outer[j + 2] * inner[i + 10] +
+ outer[j + 3] * inner[i + 15];
+ }
+ result[index++] = outer[j + 0] * inner[4] +
+ outer[j + 1] * inner[9] +
+ outer[j + 2] * inner[14] +
+ outer[j + 3] * inner[19] +
+ outer[j + 4];
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// End duplication
+//////
+
+sk_sp<SkColorFilter>
+SkColorMatrixFilterRowMajor255::makeComposed(sk_sp<SkColorFilter> innerFilter) const {
+ SkScalar innerMatrix[20];
+ if (innerFilter->asColorMatrix(innerMatrix) && !needs_clamping(innerMatrix)) {
+ SkScalar concat[20];
+ set_concat(concat, fMatrix, innerMatrix);
+ return sk_make_sp<SkColorMatrixFilterRowMajor255>(concat);
+ }
+ return nullptr;
+}
+
+#if SK_SUPPORT_GPU
+#include "GrFragmentProcessor.h"
+#include "GrInvariantOutput.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+
+class ColorMatrixEffect : public GrFragmentProcessor {
+public:
+ static sk_sp<GrFragmentProcessor> Make(const SkScalar matrix[20]) {
+ return sk_sp<GrFragmentProcessor>(new ColorMatrixEffect(matrix));
+ }
+
+ const char* name() const override { return "Color Matrix"; }
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ class GLSLProcessor : public GrGLSLFragmentProcessor {
+ public:
+ // this class always generates the same code.
+ static void GenKey(const GrProcessor&, const GrGLSLCaps&, GrProcessorKeyBuilder*) {}
+
+ void emitCode(EmitArgs& args) override {
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ fMatrixHandle = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kMat44f_GrSLType, kDefault_GrSLPrecision,
+ "ColorMatrix");
+ fVectorHandle = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType, kDefault_GrSLPrecision,
+ "ColorMatrixVector");
+
+ if (nullptr == args.fInputColor) {
+ // could optimize this case, but we aren't for now.
+ args.fInputColor = "vec4(1)";
+ }
+ GrGLSLFragmentBuilder* fragBuilder = args.fFragBuilder;
+ // The max() is to guard against 0 / 0 during unpremul when the incoming color is
+ // transparent black.
+ fragBuilder->codeAppendf("\tfloat nonZeroAlpha = max(%s.a, 0.00001);\n",
+ args.fInputColor);
+ fragBuilder->codeAppendf("\t%s = %s * vec4(%s.rgb / nonZeroAlpha, nonZeroAlpha) + %s;\n",
+ args.fOutputColor,
+ uniformHandler->getUniformCStr(fMatrixHandle),
+ args.fInputColor,
+ uniformHandler->getUniformCStr(fVectorHandle));
+ fragBuilder->codeAppendf("\t%s = clamp(%s, 0.0, 1.0);\n",
+ args.fOutputColor, args.fOutputColor);
+ fragBuilder->codeAppendf("\t%s.rgb *= %s.a;\n", args.fOutputColor, args.fOutputColor);
+ }
+
+ protected:
+ void onSetData(const GrGLSLProgramDataManager& uniManager,
+ const GrProcessor& proc) override {
+ const ColorMatrixEffect& cme = proc.cast<ColorMatrixEffect>();
+ const float* m = cme.fMatrix;
+ // The GL matrix is transposed from SkColorMatrix.
+ float mt[] = {
+ m[0], m[5], m[10], m[15],
+ m[1], m[6], m[11], m[16],
+ m[2], m[7], m[12], m[17],
+ m[3], m[8], m[13], m[18],
+ };
+ static const float kScale = 1.0f / 255.0f;
+ float vec[] = {
+ m[4] * kScale, m[9] * kScale, m[14] * kScale, m[19] * kScale,
+ };
+ uniManager.setMatrix4fv(fMatrixHandle, 1, mt);
+ uniManager.set4fv(fVectorHandle, 1, vec);
+ }
+
+ private:
+ GrGLSLProgramDataManager::UniformHandle fMatrixHandle;
+ GrGLSLProgramDataManager::UniformHandle fVectorHandle;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+ };
+
+private:
+ ColorMatrixEffect(const SkScalar matrix[20]) {
+ memcpy(fMatrix, matrix, sizeof(SkScalar) * 20);
+ this->initClassID<ColorMatrixEffect>();
+ }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override {
+ return new GLSLProcessor;
+ }
+
+ virtual void onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const override {
+ GLSLProcessor::GenKey(*this, caps, b);
+ }
+
+ bool onIsEqual(const GrFragmentProcessor& s) const override {
+ const ColorMatrixEffect& cme = s.cast<ColorMatrixEffect>();
+ return 0 == memcmp(fMatrix, cme.fMatrix, sizeof(fMatrix));
+ }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ // We only bother to check whether the alpha channel will be constant. If SkColorMatrix had
+ // type flags it might be worth checking the other components.
+
+ // The matrix is defined such the 4th row determines the output alpha. The first four
+ // columns of that row multiply the input r, g, b, and a, respectively, and the last column
+ // is the "translation".
+ static const uint32_t kRGBAFlags[] = {
+ kR_GrColorComponentFlag,
+ kG_GrColorComponentFlag,
+ kB_GrColorComponentFlag,
+ kA_GrColorComponentFlag
+ };
+ static const int kShifts[] = {
+ GrColor_SHIFT_R, GrColor_SHIFT_G, GrColor_SHIFT_B, GrColor_SHIFT_A,
+ };
+ enum {
+ kAlphaRowStartIdx = 15,
+ kAlphaRowTranslateIdx = 19,
+ };
+
+ SkScalar outputA = 0;
+ for (int i = 0; i < 4; ++i) {
+ // If any relevant component of the color to be passed through the matrix is non-const
+ // then we can't know the final result.
+ if (0 != fMatrix[kAlphaRowStartIdx + i]) {
+ if (!(inout->validFlags() & kRGBAFlags[i])) {
+ inout->setToUnknown(GrInvariantOutput::kWill_ReadInput);
+ return;
+ } else {
+ uint32_t component = (inout->color() >> kShifts[i]) & 0xFF;
+ outputA += fMatrix[kAlphaRowStartIdx + i] * component;
+ }
+ }
+ }
+ outputA += fMatrix[kAlphaRowTranslateIdx];
+ // We pin the color to [0,1]. This would happen to the *final* color output from the frag
+ // shader but currently the effect does not pin its own output. So in the case of over/
+ // underflow this may deviate from the actual result. Maybe the effect should pin its
+ // result if the matrix could over/underflow for any component?
+ inout->setToOther(kA_GrColorComponentFlag,
+ static_cast<uint8_t>(SkScalarPin(outputA, 0, 255)) << GrColor_SHIFT_A,
+ GrInvariantOutput::kWill_ReadInput);
+ }
+
+ SkScalar fMatrix[20];
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(ColorMatrixEffect);
+
+sk_sp<GrFragmentProcessor> ColorMatrixEffect::TestCreate(GrProcessorTestData* d) {
+ SkScalar colorMatrix[20];
+ for (size_t i = 0; i < SK_ARRAY_COUNT(colorMatrix); ++i) {
+ colorMatrix[i] = d->fRandom->nextSScalar1();
+ }
+ return ColorMatrixEffect::Make(colorMatrix);
+}
+
+sk_sp<GrFragmentProcessor> SkColorMatrixFilterRowMajor255::asFragmentProcessor(GrContext*) const {
+ return ColorMatrixEffect::Make(fMatrix);
+}
+
+#endif
+
+#ifndef SK_IGNORE_TO_STRING
+void SkColorMatrixFilterRowMajor255::toString(SkString* str) const {
+ str->append("SkColorMatrixFilterRowMajor255: ");
+
+ str->append("matrix: (");
+ for (int i = 0; i < 20; ++i) {
+ str->appendScalar(fMatrix[i]);
+ if (i < 19) {
+ str->append(", ");
+ }
+ }
+ str->append(")");
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkColorFilter> SkColorFilter::MakeMatrixFilterRowMajor255(const SkScalar array[20]) {
+ return sk_sp<SkColorFilter>(new SkColorMatrixFilterRowMajor255(array));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkColorFilter>
+SkColorMatrixFilterRowMajor255::MakeSingleChannelOutput(const SkScalar row[5]) {
+ SkASSERT(row);
+ auto cf = sk_make_sp<SkColorMatrixFilterRowMajor255>();
+ static_assert(sizeof(SkScalar) * 5 * 4 == sizeof(cf->fMatrix), "sizes don't match");
+ for (int i = 0; i < 4; ++i) {
+ memcpy(cf->fMatrix + 5 * i, row, sizeof(SkScalar) * 5);
+ }
+ cf->initState();
+ return cf;
+}
diff --git a/gfx/skia/skia/src/core/SkColorMatrixFilterRowMajor255.h b/gfx/skia/skia/src/core/SkColorMatrixFilterRowMajor255.h
new file mode 100644
index 000000000..c1158859f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorMatrixFilterRowMajor255.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorMatrixFilter_DEFINED
+#define SkColorMatrixFilter_DEFINED
+
+#include "SkColorFilter.h"
+
+class SK_API SkColorMatrixFilterRowMajor255 : public SkColorFilter {
+public:
+ SkColorMatrixFilterRowMajor255() {}
+ explicit SkColorMatrixFilterRowMajor255(const SkScalar array[20]);
+
+ /** Creates a color matrix filter that returns the same value in all four channels. */
+ static sk_sp<SkColorFilter> MakeSingleChannelOutput(const SkScalar row[5]);
+
+ void filterSpan(const SkPMColor src[], int count, SkPMColor[]) const override;
+ void filterSpan4f(const SkPM4f src[], int count, SkPM4f[]) const override;
+ uint32_t getFlags() const override;
+ bool asColorMatrix(SkScalar matrix[20]) const override;
+ sk_sp<SkColorFilter> makeComposed(sk_sp<SkColorFilter>) const override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(GrContext*) const override;
+#endif
+
+ SK_TO_STRING_OVERRIDE()
+
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkColorMatrixFilter)
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ SkScalar fMatrix[20];
+ float fTranspose[20]; // for Sk4s
+ uint32_t fFlags;
+
+ void initState();
+
+ typedef SkColorFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkColorShader.cpp b/gfx/skia/skia/src/core/SkColorShader.cpp
new file mode 100644
index 000000000..cfa071fed
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorShader.cpp
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkColorShader.h"
+#include "SkColorSpace.h"
+#include "SkReadBuffer.h"
+#include "SkUtils.h"
+
+SkColorShader::SkColorShader(SkColor c) : fColor(c) {}
+
+bool SkColorShader::isOpaque() const {
+ return SkColorGetA(fColor) == 255;
+}
+
+sk_sp<SkFlattenable> SkColorShader::CreateProc(SkReadBuffer& buffer) {
+ return sk_make_sp<SkColorShader>(buffer.readColor());
+}
+
+void SkColorShader::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeColor(fColor);
+}
+
+uint32_t SkColorShader::ColorShaderContext::getFlags() const {
+ return fFlags;
+}
+
+SkShader::Context* SkColorShader::onCreateContext(const ContextRec& rec, void* storage) const {
+ return new (storage) ColorShaderContext(*this, rec);
+}
+
+SkColorShader::ColorShaderContext::ColorShaderContext(const SkColorShader& shader,
+ const ContextRec& rec)
+ : INHERITED(shader, rec)
+{
+ SkColor color = shader.fColor;
+ unsigned a = SkAlphaMul(SkColorGetA(color), SkAlpha255To256(rec.fPaint->getAlpha()));
+
+ unsigned r = SkColorGetR(color);
+ unsigned g = SkColorGetG(color);
+ unsigned b = SkColorGetB(color);
+
+ if (a != 255) {
+ r = SkMulDiv255Round(r, a);
+ g = SkMulDiv255Round(g, a);
+ b = SkMulDiv255Round(b, a);
+ }
+ fPMColor = SkPackARGB32(a, r, g, b);
+
+ SkColor4f c4 = SkColor4f::FromColor(shader.fColor);
+ c4.fA *= rec.fPaint->getAlpha() / 255.0f;
+ fPM4f = c4.premul();
+
+ fFlags = kConstInY32_Flag;
+ if (255 == a) {
+ fFlags |= kOpaqueAlpha_Flag;
+ }
+}
+
+void SkColorShader::ColorShaderContext::shadeSpan(int x, int y, SkPMColor span[], int count) {
+ sk_memset32(span, fPMColor, count);
+}
+
+void SkColorShader::ColorShaderContext::shadeSpanAlpha(int x, int y, uint8_t alpha[], int count) {
+ memset(alpha, SkGetPackedA32(fPMColor), count);
+}
+
+void SkColorShader::ColorShaderContext::shadeSpan4f(int x, int y, SkPM4f span[], int count) {
+ for (int i = 0; i < count; ++i) {
+ span[i] = fPM4f;
+ }
+}
+
+SkShader::GradientType SkColorShader::asAGradient(GradientInfo* info) const {
+ if (info) {
+ if (info->fColors && info->fColorCount >= 1) {
+ info->fColors[0] = fColor;
+ }
+ info->fColorCount = 1;
+ info->fTileMode = SkShader::kRepeat_TileMode;
+ }
+ return kColor_GradientType;
+}
+
+#if SK_SUPPORT_GPU
+
+#include "SkGr.h"
+#include "effects/GrConstColorProcessor.h"
+sk_sp<GrFragmentProcessor> SkColorShader::asFragmentProcessor(const AsFPArgs&) const {
+ GrColor color = SkColorToPremulGrColor(fColor);
+ return GrConstColorProcessor::Make(color, GrConstColorProcessor::kModulateA_InputMode);
+}
+
+#endif
+
+#ifndef SK_IGNORE_TO_STRING
+void SkColorShader::toString(SkString* str) const {
+ str->append("SkColorShader: (");
+
+ str->append("Color: ");
+ str->appendHex(fColor);
+
+ this->INHERITED::toString(str);
+
+ str->append(")");
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static unsigned unit_to_byte(float unit) {
+ SkASSERT(unit >= 0 && unit <= 1);
+ return (unsigned)(unit * 255 + 0.5);
+}
+
+static SkColor unit_to_skcolor(const SkColor4f& unit, SkColorSpace* cs) {
+ return SkColorSetARGB(unit_to_byte(unit.fA), unit_to_byte(unit.fR),
+ unit_to_byte(unit.fG), unit_to_byte(unit.fB));
+}
+
+SkColor4Shader::SkColor4Shader(const SkColor4f& color, sk_sp<SkColorSpace> space)
+ : fColorSpace(std::move(space))
+ , fColor4(color)
+ , fCachedByteColor(unit_to_skcolor(color.pin(), space.get()))
+{}
+
+sk_sp<SkFlattenable> SkColor4Shader::CreateProc(SkReadBuffer& buffer) {
+ SkColor4f color;
+ buffer.readColor4f(&color);
+ if (buffer.readBool()) {
+ // TODO how do we unflatten colorspaces
+ }
+ return SkShader::MakeColorShader(color, nullptr);
+}
+
+void SkColor4Shader::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeColor4f(fColor4);
+ buffer.writeBool(false); // TODO how do we flatten colorspaces?
+}
+
+uint32_t SkColor4Shader::Color4Context::getFlags() const {
+ return fFlags;
+}
+
+SkShader::Context* SkColor4Shader::onCreateContext(const ContextRec& rec, void* storage) const {
+ return new (storage) Color4Context(*this, rec);
+}
+
+SkColor4Shader::Color4Context::Color4Context(const SkColor4Shader& shader,
+ const ContextRec& rec)
+: INHERITED(shader, rec)
+{
+ SkColor color = shader.fCachedByteColor;
+ unsigned a = SkAlphaMul(SkColorGetA(color), SkAlpha255To256(rec.fPaint->getAlpha()));
+
+ unsigned r = SkColorGetR(color);
+ unsigned g = SkColorGetG(color);
+ unsigned b = SkColorGetB(color);
+
+ if (a != 255) {
+ r = SkMulDiv255Round(r, a);
+ g = SkMulDiv255Round(g, a);
+ b = SkMulDiv255Round(b, a);
+ }
+ fPMColor = SkPackARGB32(a, r, g, b);
+
+ SkColor4f c4 = shader.fColor4;
+ c4.fA *= rec.fPaint->getAlpha() * (1 / 255.0f);
+ fPM4f = c4.premul();
+
+ fFlags = kConstInY32_Flag;
+ if (255 == a) {
+ fFlags |= kOpaqueAlpha_Flag;
+ }
+}
+
+void SkColor4Shader::Color4Context::shadeSpan(int x, int y, SkPMColor span[], int count) {
+ sk_memset32(span, fPMColor, count);
+}
+
+void SkColor4Shader::Color4Context::shadeSpanAlpha(int x, int y, uint8_t alpha[], int count) {
+ memset(alpha, SkGetPackedA32(fPMColor), count);
+}
+
+void SkColor4Shader::Color4Context::shadeSpan4f(int x, int y, SkPM4f span[], int count) {
+ for (int i = 0; i < count; ++i) {
+ span[i] = fPM4f;
+ }
+}
+
+// TODO: do we need an updated version of this method for color4+colorspace?
+SkShader::GradientType SkColor4Shader::asAGradient(GradientInfo* info) const {
+ if (info) {
+ if (info->fColors && info->fColorCount >= 1) {
+ info->fColors[0] = fCachedByteColor;
+ }
+ info->fColorCount = 1;
+ info->fTileMode = SkShader::kRepeat_TileMode;
+ }
+ return kColor_GradientType;
+}
+
+#if SK_SUPPORT_GPU
+
+#include "SkGr.h"
+#include "effects/GrConstColorProcessor.h"
+sk_sp<GrFragmentProcessor> SkColor4Shader::asFragmentProcessor(const AsFPArgs&) const {
+ // TODO: how to communicate color4f to Gr
+ GrColor color = SkColorToPremulGrColor(fCachedByteColor);
+ return GrConstColorProcessor::Make(color, GrConstColorProcessor::kModulateA_InputMode);
+}
+
+#endif
+
+#ifndef SK_IGNORE_TO_STRING
+void SkColor4Shader::toString(SkString* str) const {
+ str->append("SkColor4Shader: (");
+
+ str->append("RGBA:");
+ for (int i = 0; i < 4; ++i) {
+ str->appendf(" %g", fColor4.vec()[i]);
+ }
+ str->append(" )");
+}
+#endif
+
+sk_sp<SkShader> SkShader::MakeColorShader(const SkColor4f& color, sk_sp<SkColorSpace> space) {
+ if (!SkScalarsAreFinite(color.vec(), 4)) {
+ return nullptr;
+ }
+ return sk_make_sp<SkColor4Shader>(color, std::move(space));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static void D32_BlitBW(SkShader::Context::BlitState* state, int x, int y, const SkPixmap& dst,
+ int count) {
+ SkXfermode::D32Proc proc = (SkXfermode::D32Proc)state->fStorage[0];
+ const SkPM4f* src = (const SkPM4f*)state->fStorage[1];
+ proc(state->fXfer, dst.writable_addr32(x, y), src, count, nullptr);
+}
+
+static void D32_BlitAA(SkShader::Context::BlitState* state, int x, int y, const SkPixmap& dst,
+ int count, const SkAlpha aa[]) {
+ SkXfermode::D32Proc proc = (SkXfermode::D32Proc)state->fStorage[0];
+ const SkPM4f* src = (const SkPM4f*)state->fStorage[1];
+ proc(state->fXfer, dst.writable_addr32(x, y), src, count, aa);
+}
+
+static void F16_BlitBW(SkShader::Context::BlitState* state, int x, int y, const SkPixmap& dst,
+ int count) {
+ SkXfermode::F16Proc proc = (SkXfermode::F16Proc)state->fStorage[0];
+ const SkPM4f* src = (const SkPM4f*)state->fStorage[1];
+ proc(state->fXfer, dst.writable_addr64(x, y), src, count, nullptr);
+}
+
+static void F16_BlitAA(SkShader::Context::BlitState* state, int x, int y, const SkPixmap& dst,
+ int count, const SkAlpha aa[]) {
+ SkXfermode::F16Proc proc = (SkXfermode::F16Proc)state->fStorage[0];
+ const SkPM4f* src = (const SkPM4f*)state->fStorage[1];
+ proc(state->fXfer, dst.writable_addr64(x, y), src, count, aa);
+}
+
+static bool choose_blitprocs(const SkPM4f* pm4, const SkImageInfo& info,
+ SkShader::Context::BlitState* state) {
+ uint32_t flags = SkXfermode::kSrcIsSingle_D32Flag;
+ if (pm4->a() == 1) {
+ flags |= SkXfermode::kSrcIsOpaque_D32Flag;
+ }
+ switch (info.colorType()) {
+ case kN32_SkColorType:
+ if (info.gammaCloseToSRGB()) {
+ flags |= SkXfermode::kDstIsSRGB_D32Flag;
+ }
+ state->fStorage[0] = (void*)SkXfermode::GetD32Proc(state->fXfer, flags);
+ state->fStorage[1] = (void*)pm4;
+ state->fBlitBW = D32_BlitBW;
+ state->fBlitAA = D32_BlitAA;
+ return true;
+ case kRGBA_F16_SkColorType:
+ state->fStorage[0] = (void*)SkXfermode::GetF16Proc(state->fXfer, flags);
+ state->fStorage[1] = (void*)pm4;
+ state->fBlitBW = F16_BlitBW;
+ state->fBlitAA = F16_BlitAA;
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SkColorShader::ColorShaderContext::onChooseBlitProcs(const SkImageInfo& info,
+ BlitState* state) {
+ return choose_blitprocs(&fPM4f, info, state);
+}
+
+bool SkColor4Shader::Color4Context::onChooseBlitProcs(const SkImageInfo& info, BlitState* state) {
+ return choose_blitprocs(&fPM4f, info, state);
+}
diff --git a/gfx/skia/skia/src/core/SkColorShader.h b/gfx/skia/skia/src/core/SkColorShader.h
new file mode 100644
index 000000000..0bd270222
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorShader.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorShader_DEFINED
+#define SkColorShader_DEFINED
+
+#include "SkShader.h"
+#include "SkPM4f.h"
+
+/** \class SkColorShader
+ A Shader that represents a single color. In general, this effect can be
+ accomplished by just using the color field on the paint, but if an
+ actual shader object is needed, this provides that feature.
+*/
+class SK_API SkColorShader : public SkShader {
+public:
+ /** Create a ColorShader that ignores the color in the paint, and uses the
+ specified color. Note: like all shaders, at draw time the paint's alpha
+ will be respected, and is applied to the specified color.
+ */
+ explicit SkColorShader(SkColor c);
+
+ bool isOpaque() const override;
+
+ class ColorShaderContext : public SkShader::Context {
+ public:
+ ColorShaderContext(const SkColorShader& shader, const ContextRec&);
+
+ uint32_t getFlags() const override;
+ void shadeSpan(int x, int y, SkPMColor span[], int count) override;
+ void shadeSpanAlpha(int x, int y, uint8_t alpha[], int count) override;
+ void shadeSpan4f(int x, int y, SkPM4f[], int count) override;
+
+ protected:
+ bool onChooseBlitProcs(const SkImageInfo&, BlitState*) override;
+
+ private:
+ SkPM4f fPM4f;
+ SkPMColor fPMColor;
+ uint32_t fFlags;
+
+ typedef SkShader::Context INHERITED;
+ };
+
+ GradientType asAGradient(GradientInfo* info) const override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(const AsFPArgs&) const override;
+#endif
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkColorShader)
+
+protected:
+ SkColorShader(SkReadBuffer&);
+ void flatten(SkWriteBuffer&) const override;
+ Context* onCreateContext(const ContextRec&, void* storage) const override;
+ size_t onContextSize(const ContextRec&) const override { return sizeof(ColorShaderContext); }
+ bool onAsLuminanceColor(SkColor* lum) const override {
+ *lum = fColor;
+ return true;
+ }
+
+private:
+ SkColor fColor;
+
+ typedef SkShader INHERITED;
+};
+
+class SkColor4Shader : public SkShader {
+public:
+ SkColor4Shader(const SkColor4f&, sk_sp<SkColorSpace>);
+
+ bool isOpaque() const override {
+ return SkColorGetA(fCachedByteColor) == 255;
+ }
+
+ class Color4Context : public SkShader::Context {
+ public:
+ Color4Context(const SkColor4Shader& shader, const ContextRec&);
+
+ uint32_t getFlags() const override;
+ void shadeSpan(int x, int y, SkPMColor span[], int count) override;
+ void shadeSpanAlpha(int x, int y, uint8_t alpha[], int count) override;
+ void shadeSpan4f(int x, int y, SkPM4f[], int count) override;
+
+ protected:
+ bool onChooseBlitProcs(const SkImageInfo&, BlitState*) override;
+
+ private:
+ SkPM4f fPM4f;
+ SkPMColor fPMColor;
+ uint32_t fFlags;
+
+ typedef SkShader::Context INHERITED;
+ };
+
+ GradientType asAGradient(GradientInfo* info) const override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(const AsFPArgs&) const override;
+#endif
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkColorShader)
+
+protected:
+ SkColor4Shader(SkReadBuffer&);
+ void flatten(SkWriteBuffer&) const override;
+ Context* onCreateContext(const ContextRec&, void* storage) const override;
+ size_t onContextSize(const ContextRec&) const override { return sizeof(Color4Context); }
+ bool onAsLuminanceColor(SkColor* lum) const override {
+ *lum = fCachedByteColor;
+ return true;
+ }
+
+private:
+ sk_sp<SkColorSpace> fColorSpace;
+ const SkColor4f fColor4;
+ const SkColor fCachedByteColor;
+
+ typedef SkShader INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkColorSpace.cpp b/gfx/skia/skia/src/core/SkColorSpace.cpp
new file mode 100644
index 000000000..c6bf4b943
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorSpace.cpp
@@ -0,0 +1,472 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkColorSpace.h"
+#include "SkColorSpace_Base.h"
+#include "SkColorSpacePriv.h"
+#include "SkOnce.h"
+
+SkColorSpace_Base::SkColorSpace_Base(SkGammaNamed gammaNamed, const SkMatrix44& toXYZD50)
+ : fGammaNamed(gammaNamed)
+ , fGammas(nullptr)
+ , fProfileData(nullptr)
+ , fToXYZD50(toXYZD50)
+ , fFromXYZD50(SkMatrix44::kUninitialized_Constructor)
+{}
+
+SkColorSpace_Base::SkColorSpace_Base(sk_sp<SkColorLookUpTable> colorLUT, SkGammaNamed gammaNamed,
+ sk_sp<SkGammas> gammas, const SkMatrix44& toXYZD50,
+ sk_sp<SkData> profileData)
+ : fColorLUT(std::move(colorLUT))
+ , fGammaNamed(gammaNamed)
+ , fGammas(std::move(gammas))
+ , fProfileData(std::move(profileData))
+ , fToXYZD50(toXYZD50)
+ , fFromXYZD50(SkMatrix44::kUninitialized_Constructor)
+{}
+
+static constexpr float gSRGB_toXYZD50[] {
+ 0.4358f, 0.3853f, 0.1430f, // Rx, Gx, Bx
+ 0.2224f, 0.7170f, 0.0606f, // Ry, Gy, Gz
+ 0.0139f, 0.0971f, 0.7139f, // Rz, Gz, Bz
+};
+
+static constexpr float gAdobeRGB_toXYZD50[] {
+ 0.6098f, 0.2052f, 0.1492f, // Rx, Gx, Bx
+ 0.3111f, 0.6257f, 0.0632f, // Ry, Gy, By
+ 0.0195f, 0.0609f, 0.7448f, // Rz, Gz, Bz
+};
+
+/**
+ * Checks if our toXYZ matrix is a close match to a known color gamut.
+ *
+ * @param toXYZD50 transformation matrix deduced from profile data
+ * @param standard 3x3 canonical transformation matrix
+ */
+static bool xyz_almost_equal(const SkMatrix44& toXYZD50, const float* standard) {
+ return color_space_almost_equal(toXYZD50.getFloat(0, 0), standard[0]) &&
+ color_space_almost_equal(toXYZD50.getFloat(0, 1), standard[1]) &&
+ color_space_almost_equal(toXYZD50.getFloat(0, 2), standard[2]) &&
+ color_space_almost_equal(toXYZD50.getFloat(1, 0), standard[3]) &&
+ color_space_almost_equal(toXYZD50.getFloat(1, 1), standard[4]) &&
+ color_space_almost_equal(toXYZD50.getFloat(1, 2), standard[5]) &&
+ color_space_almost_equal(toXYZD50.getFloat(2, 0), standard[6]) &&
+ color_space_almost_equal(toXYZD50.getFloat(2, 1), standard[7]) &&
+ color_space_almost_equal(toXYZD50.getFloat(2, 2), standard[8]) &&
+ color_space_almost_equal(toXYZD50.getFloat(0, 3), 0.0f) &&
+ color_space_almost_equal(toXYZD50.getFloat(1, 3), 0.0f) &&
+ color_space_almost_equal(toXYZD50.getFloat(2, 3), 0.0f) &&
+ color_space_almost_equal(toXYZD50.getFloat(3, 0), 0.0f) &&
+ color_space_almost_equal(toXYZD50.getFloat(3, 1), 0.0f) &&
+ color_space_almost_equal(toXYZD50.getFloat(3, 2), 0.0f) &&
+ color_space_almost_equal(toXYZD50.getFloat(3, 3), 1.0f);
+}
+
+sk_sp<SkColorSpace> SkColorSpace_Base::NewRGB(const float values[3], const SkMatrix44& toXYZD50) {
+ if (0.0f > values[0] || 0.0f > values[1] || 0.0f > values[2]) {
+ return nullptr;
+ }
+
+ SkGammaNamed gammaNamed = kNonStandard_SkGammaNamed;
+ if (color_space_almost_equal(2.2f, values[0]) &&
+ color_space_almost_equal(2.2f, values[1]) &&
+ color_space_almost_equal(2.2f, values[2])) {
+ gammaNamed = k2Dot2Curve_SkGammaNamed;
+ } else if (color_space_almost_equal(1.0f, values[0]) &&
+ color_space_almost_equal(1.0f, values[1]) &&
+ color_space_almost_equal(1.0f, values[2])) {
+ gammaNamed = kLinear_SkGammaNamed;
+ }
+
+ if (kNonStandard_SkGammaNamed == gammaNamed) {
+ sk_sp<SkGammas> gammas = sk_sp<SkGammas>(new SkGammas());
+ gammas->fRedType = SkGammas::Type::kValue_Type;
+ gammas->fGreenType = SkGammas::Type::kValue_Type;
+ gammas->fBlueType = SkGammas::Type::kValue_Type;
+ gammas->fRedData.fValue = values[0];
+ gammas->fGreenData.fValue = values[1];
+ gammas->fBlueData.fValue = values[2];
+ return sk_sp<SkColorSpace>(new SkColorSpace_Base(nullptr, kNonStandard_SkGammaNamed, gammas,
+ toXYZD50, nullptr));
+ }
+
+ return SkColorSpace_Base::NewRGB(gammaNamed, toXYZD50);
+}
+
+sk_sp<SkColorSpace> SkColorSpace_Base::NewRGB(SkGammaNamed gammaNamed, const SkMatrix44& toXYZD50) {
+ switch (gammaNamed) {
+ case kSRGB_SkGammaNamed:
+ if (xyz_almost_equal(toXYZD50, gSRGB_toXYZD50)) {
+ return SkColorSpace::NewNamed(kSRGB_Named);
+ }
+ break;
+ case k2Dot2Curve_SkGammaNamed:
+ if (xyz_almost_equal(toXYZD50, gAdobeRGB_toXYZD50)) {
+ return SkColorSpace::NewNamed(kAdobeRGB_Named);
+ }
+ break;
+ case kLinear_SkGammaNamed:
+ if (xyz_almost_equal(toXYZD50, gSRGB_toXYZD50)) {
+ return SkColorSpace::NewNamed(kSRGBLinear_Named);
+ }
+ break;
+ case kNonStandard_SkGammaNamed:
+ // This is not allowed.
+ return nullptr;
+ default:
+ break;
+ }
+
+ return sk_sp<SkColorSpace>(new SkColorSpace_Base(gammaNamed, toXYZD50));
+}
+
+sk_sp<SkColorSpace> SkColorSpace::NewRGB(RenderTargetGamma gamma, const SkMatrix44& toXYZD50) {
+ switch (gamma) {
+ case kLinear_RenderTargetGamma:
+ return SkColorSpace_Base::NewRGB(kLinear_SkGammaNamed, toXYZD50);
+ case kSRGB_RenderTargetGamma:
+ return SkColorSpace_Base::NewRGB(kSRGB_SkGammaNamed, toXYZD50);
+ default:
+ return nullptr;
+ }
+}
+
+static SkColorSpace* gAdobeRGB;
+static SkColorSpace* gSRGB;
+static SkColorSpace* gSRGBLinear;
+
+sk_sp<SkColorSpace> SkColorSpace::NewNamed(Named named) {
+ static SkOnce sRGBOnce;
+ static SkOnce adobeRGBOnce;
+ static SkOnce sRGBLinearOnce;
+
+ switch (named) {
+ case kSRGB_Named: {
+ sRGBOnce([] {
+ SkMatrix44 srgbToxyzD50(SkMatrix44::kUninitialized_Constructor);
+ srgbToxyzD50.set3x3RowMajorf(gSRGB_toXYZD50);
+
+ // Force the mutable type mask to be computed. This avoids races.
+ (void)srgbToxyzD50.getType();
+ gSRGB = new SkColorSpace_Base(kSRGB_SkGammaNamed, srgbToxyzD50);
+ });
+ return sk_ref_sp<SkColorSpace>(gSRGB);
+ }
+ case kAdobeRGB_Named: {
+ adobeRGBOnce([] {
+ SkMatrix44 adobergbToxyzD50(SkMatrix44::kUninitialized_Constructor);
+ adobergbToxyzD50.set3x3RowMajorf(gAdobeRGB_toXYZD50);
+
+ // Force the mutable type mask to be computed. This avoids races.
+ (void)adobergbToxyzD50.getType();
+ gAdobeRGB = new SkColorSpace_Base(k2Dot2Curve_SkGammaNamed, adobergbToxyzD50);
+ });
+ return sk_ref_sp<SkColorSpace>(gAdobeRGB);
+ }
+ case kSRGBLinear_Named: {
+ sRGBLinearOnce([] {
+ SkMatrix44 srgbToxyzD50(SkMatrix44::kUninitialized_Constructor);
+ srgbToxyzD50.set3x3RowMajorf(gSRGB_toXYZD50);
+
+ // Force the mutable type mask to be computed. This avoids races.
+ (void)srgbToxyzD50.getType();
+ gSRGBLinear = new SkColorSpace_Base(kLinear_SkGammaNamed, srgbToxyzD50);
+ });
+ return sk_ref_sp<SkColorSpace>(gSRGBLinear);
+ }
+ default:
+ break;
+ }
+ return nullptr;
+}
+
+sk_sp<SkColorSpace> SkColorSpace::makeLinearGamma() {
+ if (this->gammaIsLinear()) {
+ return sk_ref_sp(this);
+ }
+ return SkColorSpace_Base::NewRGB(kLinear_SkGammaNamed, as_CSB(this)->fToXYZD50);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkColorSpace::gammaCloseToSRGB() const {
+ return kSRGB_SkGammaNamed == as_CSB(this)->fGammaNamed ||
+ k2Dot2Curve_SkGammaNamed == as_CSB(this)->fGammaNamed;
+}
+
+bool SkColorSpace::gammaIsLinear() const {
+ return kLinear_SkGammaNamed == as_CSB(this)->fGammaNamed;
+}
+
+const SkMatrix44& SkColorSpace_Base::fromXYZD50() const {
+ fFromXYZOnce([this] {
+ if (!fToXYZD50.invert(&fFromXYZD50)) {
+ // If a client gives us a dst gamut with a transform that we can't invert, we will
+ // simply give them back a transform to sRGB gamut.
+ SkDEBUGFAIL("Non-invertible XYZ matrix, defaulting to sRGB");
+ SkMatrix44 srgbToxyzD50(SkMatrix44::kUninitialized_Constructor);
+ srgbToxyzD50.set3x3RowMajorf(gSRGB_toXYZD50);
+ srgbToxyzD50.invert(&fFromXYZD50);
+ }
+ });
+ return fFromXYZD50;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+enum Version {
+ k0_Version, // Initial version, header + flags for matrix and profile
+};
+
+struct ColorSpaceHeader {
+ /**
+ * If kMatrix_Flag is set, we will write 12 floats after the header.
+ * Should not be set at the same time as the kICC_Flag or kFloatGamma_Flag.
+ */
+ static constexpr uint8_t kMatrix_Flag = 1 << 0;
+
+ /**
+ * If kICC_Flag is set, we will write an ICC profile after the header.
+ * The ICC profile will be written as a uint32 size, followed immediately
+ * by the data (padded to 4 bytes).
+ * Should not be set at the same time as the kMatrix_Flag or kFloatGamma_Flag.
+ */
+ static constexpr uint8_t kICC_Flag = 1 << 1;
+
+ /**
+ * If kFloatGamma_Flag is set, we will write 15 floats after the header.
+ * The first three are the gamma values, and the next twelve are the
+ * matrix.
+ * Should not be set at the same time as the kICC_Flag or kMatrix_Flag.
+ */
+ static constexpr uint8_t kFloatGamma_Flag = 1 << 2;
+
+ static ColorSpaceHeader Pack(Version version, uint8_t named, uint8_t gammaNamed, uint8_t flags)
+ {
+ ColorSpaceHeader header;
+
+ SkASSERT(k0_Version == version);
+ header.fVersion = (uint8_t) version;
+
+ SkASSERT(named <= SkColorSpace::kSRGBLinear_Named);
+ header.fNamed = (uint8_t) named;
+
+ SkASSERT(gammaNamed <= kNonStandard_SkGammaNamed);
+ header.fGammaNamed = (uint8_t) gammaNamed;
+
+ SkASSERT(flags <= kFloatGamma_Flag);
+ header.fFlags = flags;
+ return header;
+ }
+
+ uint8_t fVersion; // Always zero
+ uint8_t fNamed; // Must be a SkColorSpace::Named
+ uint8_t fGammaNamed; // Must be a SkGammaNamed
+ uint8_t fFlags; // Some combination of the flags listed above
+};
+
+size_t SkColorSpace::writeToMemory(void* memory) const {
+ // Start by trying the serialization fast path. If we haven't saved ICC profile data,
+ // we must have a profile that we can serialize easily.
+ if (!as_CSB(this)->fProfileData) {
+ // If we have a named profile, only write the enum.
+ if (this == gSRGB) {
+ if (memory) {
+ *((ColorSpaceHeader*) memory) =
+ ColorSpaceHeader::Pack(k0_Version, kSRGB_Named,
+ as_CSB(this)->fGammaNamed, 0);
+ }
+ return sizeof(ColorSpaceHeader);
+ } else if (this == gAdobeRGB) {
+ if (memory) {
+ *((ColorSpaceHeader*) memory) =
+ ColorSpaceHeader::Pack(k0_Version, kAdobeRGB_Named,
+ as_CSB(this)->fGammaNamed, 0);
+ }
+ return sizeof(ColorSpaceHeader);
+ } else if (this == gSRGBLinear) {
+ if (memory) {
+ *((ColorSpaceHeader*)memory) =
+ ColorSpaceHeader::Pack(k0_Version, kSRGBLinear_Named,
+ as_CSB(this)->fGammaNamed, 0);
+ }
+ return sizeof(ColorSpaceHeader);
+ }
+
+ // If we have a named gamma, write the enum and the matrix.
+ switch (as_CSB(this)->fGammaNamed) {
+ case kSRGB_SkGammaNamed:
+ case k2Dot2Curve_SkGammaNamed:
+ case kLinear_SkGammaNamed: {
+ if (memory) {
+ *((ColorSpaceHeader*) memory) =
+ ColorSpaceHeader::Pack(k0_Version, 0, as_CSB(this)->fGammaNamed,
+ ColorSpaceHeader::kMatrix_Flag);
+ memory = SkTAddOffset<void>(memory, sizeof(ColorSpaceHeader));
+ as_CSB(this)->fToXYZD50.as3x4RowMajorf((float*) memory);
+ }
+ return sizeof(ColorSpaceHeader) + 12 * sizeof(float);
+ }
+ default:
+ // Otherwise, write the gamma values and the matrix.
+ if (memory) {
+ *((ColorSpaceHeader*) memory) =
+ ColorSpaceHeader::Pack(k0_Version, 0, as_CSB(this)->fGammaNamed,
+ ColorSpaceHeader::kFloatGamma_Flag);
+ memory = SkTAddOffset<void>(memory, sizeof(ColorSpaceHeader));
+
+ const SkGammas* gammas = as_CSB(this)->gammas();
+ SkASSERT(gammas);
+ SkASSERT(SkGammas::Type::kValue_Type == gammas->fRedType &&
+ SkGammas::Type::kValue_Type == gammas->fGreenType &&
+ SkGammas::Type::kValue_Type == gammas->fBlueType);
+ *(((float*) memory) + 0) = gammas->fRedData.fValue;
+ *(((float*) memory) + 1) = gammas->fGreenData.fValue;
+ *(((float*) memory) + 2) = gammas->fBlueData.fValue;
+ memory = SkTAddOffset<void>(memory, 3 * sizeof(float));
+
+ as_CSB(this)->fToXYZD50.as3x4RowMajorf((float*) memory);
+ }
+ return sizeof(ColorSpaceHeader) + 15 * sizeof(float);
+ }
+ }
+
+ // Otherwise, serialize the ICC data.
+ size_t profileSize = as_CSB(this)->fProfileData->size();
+ if (SkAlign4(profileSize) != (uint32_t) SkAlign4(profileSize)) {
+ return 0;
+ }
+
+ if (memory) {
+ *((ColorSpaceHeader*) memory) = ColorSpaceHeader::Pack(k0_Version, 0,
+ kNonStandard_SkGammaNamed,
+ ColorSpaceHeader::kICC_Flag);
+ memory = SkTAddOffset<void>(memory, sizeof(ColorSpaceHeader));
+
+ *((uint32_t*) memory) = (uint32_t) SkAlign4(profileSize);
+ memory = SkTAddOffset<void>(memory, sizeof(uint32_t));
+
+ memcpy(memory, as_CSB(this)->fProfileData->data(), profileSize);
+ memset(SkTAddOffset<void>(memory, profileSize), 0, SkAlign4(profileSize) - profileSize);
+ }
+ return sizeof(ColorSpaceHeader) + sizeof(uint32_t) + SkAlign4(profileSize);
+}
+
+sk_sp<SkData> SkColorSpace::serialize() const {
+ size_t size = this->writeToMemory(nullptr);
+ if (0 == size) {
+ return nullptr;
+ }
+
+ sk_sp<SkData> data = SkData::MakeUninitialized(size);
+ this->writeToMemory(data->writable_data());
+ return data;
+}
+
+sk_sp<SkColorSpace> SkColorSpace::Deserialize(const void* data, size_t length) {
+ if (length < sizeof(ColorSpaceHeader)) {
+ return nullptr;
+ }
+
+ ColorSpaceHeader header = *((const ColorSpaceHeader*) data);
+ data = SkTAddOffset<const void>(data, sizeof(ColorSpaceHeader));
+ length -= sizeof(ColorSpaceHeader);
+ if (0 == header.fFlags) {
+ return NewNamed((Named) header.fNamed);
+ }
+
+ switch ((SkGammaNamed) header.fGammaNamed) {
+ case kSRGB_SkGammaNamed:
+ case k2Dot2Curve_SkGammaNamed:
+ case kLinear_SkGammaNamed: {
+ if (ColorSpaceHeader::kMatrix_Flag != header.fFlags || length < 12 * sizeof(float)) {
+ return nullptr;
+ }
+
+ SkMatrix44 toXYZ(SkMatrix44::kUninitialized_Constructor);
+ toXYZ.set3x4RowMajorf((const float*) data);
+ return SkColorSpace_Base::NewRGB((SkGammaNamed) header.fGammaNamed, toXYZ);
+ }
+ default:
+ break;
+ }
+
+ switch (header.fFlags) {
+ case ColorSpaceHeader::kICC_Flag: {
+ if (length < sizeof(uint32_t)) {
+ return nullptr;
+ }
+
+ uint32_t profileSize = *((uint32_t*) data);
+ data = SkTAddOffset<const void>(data, sizeof(uint32_t));
+ length -= sizeof(uint32_t);
+ if (length < profileSize) {
+ return nullptr;
+ }
+
+ return NewICC(data, profileSize);
+ }
+ case ColorSpaceHeader::kFloatGamma_Flag: {
+ if (length < 15 * sizeof(float)) {
+ return nullptr;
+ }
+
+ float gammas[3];
+ gammas[0] = *(((const float*) data) + 0);
+ gammas[1] = *(((const float*) data) + 1);
+ gammas[2] = *(((const float*) data) + 2);
+ data = SkTAddOffset<const void>(data, 3 * sizeof(float));
+
+ SkMatrix44 toXYZ(SkMatrix44::kUninitialized_Constructor);
+ toXYZ.set3x4RowMajorf((const float*) data);
+ return SkColorSpace_Base::NewRGB(gammas, toXYZ);
+ }
+ default:
+ return nullptr;
+ }
+}
+
+bool SkColorSpace::Equals(const SkColorSpace* src, const SkColorSpace* dst) {
+ if (src == dst) {
+ return true;
+ }
+
+ if (!src || !dst) {
+ return false;
+ }
+
+ SkData* srcData = as_CSB(src)->fProfileData.get();
+ SkData* dstData = as_CSB(dst)->fProfileData.get();
+ if (srcData || dstData) {
+ if (srcData && dstData) {
+ return srcData->size() == dstData->size() &&
+ 0 == memcmp(srcData->data(), dstData->data(), srcData->size());
+ }
+
+ return false;
+ }
+
+ // It's important to check fProfileData before named gammas. Some profiles may have named
+ // gammas, but also include other wacky features that cause us to save the data.
+ switch (as_CSB(src)->fGammaNamed) {
+ case kSRGB_SkGammaNamed:
+ case k2Dot2Curve_SkGammaNamed:
+ case kLinear_SkGammaNamed:
+ return (as_CSB(src)->fGammaNamed == as_CSB(dst)->fGammaNamed) &&
+ (as_CSB(src)->fToXYZD50 == as_CSB(dst)->fToXYZD50);
+ default:
+ if (as_CSB(src)->fGammaNamed != as_CSB(dst)->fGammaNamed) {
+ return false;
+ }
+
+ // It is unlikely that we will reach this case.
+ sk_sp<SkData> srcData = src->serialize();
+ sk_sp<SkData> dstData = dst->serialize();
+ return srcData->size() == dstData->size() &&
+ 0 == memcmp(srcData->data(), dstData->data(), srcData->size());
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkColorSpacePriv.h b/gfx/skia/skia/src/core/SkColorSpacePriv.h
new file mode 100644
index 000000000..e7c8aaa10
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorSpacePriv.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#define SkColorSpacePrintf(...)
+
+inline bool color_space_almost_equal(float a, float b) {
+ return SkTAbs(a - b) < 0.01f;
+}
diff --git a/gfx/skia/skia/src/core/SkColorSpaceXform.cpp b/gfx/skia/skia/src/core/SkColorSpaceXform.cpp
new file mode 100644
index 000000000..f7a0239d2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorSpaceXform.cpp
@@ -0,0 +1,1440 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkColorPriv.h"
+#include "SkColorSpace_Base.h"
+#include "SkColorSpacePriv.h"
+#include "SkColorSpaceXform.h"
+#include "SkHalf.h"
+#include "SkOpts.h"
+#include "SkSRGB.h"
+
+static constexpr float sk_linear_from_2dot2[256] = {
+ 0.000000000000000000f, 0.000005077051900662f, 0.000023328004666099f, 0.000056921765712193f,
+ 0.000107187362341244f, 0.000175123977503027f, 0.000261543754548491f, 0.000367136269815943f,
+ 0.000492503787191433f, 0.000638182842167022f, 0.000804658499513058f, 0.000992374304074325f,
+ 0.001201739522438400f, 0.001433134589671860f, 0.001686915316789280f, 0.001963416213396470f,
+ 0.002262953160706430f, 0.002585825596234170f, 0.002932318323938360f, 0.003302703032003640f,
+ 0.003697239578900130f, 0.004116177093282750f, 0.004559754922526020f, 0.005028203456855540f,
+ 0.005521744850239660f, 0.006040593654849810f, 0.006584957382581690f, 0.007155037004573030f,
+ 0.007751027397660610f, 0.008373117745148580f, 0.009021491898012130f, 0.009696328701658230f,
+ 0.010397802292555300f, 0.011126082368383200f, 0.011881334434813700f, 0.012663720031582100f,
+ 0.013473396940142600f, 0.014310519374884100f, 0.015175238159625200f, 0.016067700890886900f,
+ 0.016988052089250000f, 0.017936433339950200f, 0.018912983423721500f, 0.019917838438785700f,
+ 0.020951131914781100f, 0.022012994919336500f, 0.023103556157921400f, 0.024222942067534200f,
+ 0.025371276904734600f, 0.026548682828472900f, 0.027755279978126000f, 0.028991186547107800f,
+ 0.030256518852388700f, 0.031551391400226400f, 0.032875916948383800f, 0.034230206565082000f,
+ 0.035614369684918800f, 0.037028514161960200f, 0.038472746320194600f, 0.039947171001525600f,
+ 0.041451891611462500f, 0.042987010162657100f, 0.044552627316421400f, 0.046148842422351000f,
+ 0.047775753556170600f, 0.049433457555908000f, 0.051122050056493400f, 0.052841625522879000f,
+ 0.054592277281760300f, 0.056374097551979800f, 0.058187177473685400f, 0.060031607136313200f,
+ 0.061907475605455800f, 0.063814870948677200f, 0.065753880260330100f, 0.067724589685424300f,
+ 0.069727084442598800f, 0.071761448846239100f, 0.073827766327784600f, 0.075926119456264800f,
+ 0.078056589958101900f, 0.080219258736215100f, 0.082414205888459200f, 0.084641510725429500f,
+ 0.086901251787660300f, 0.089193506862247800f, 0.091518352998919500f, 0.093875866525577800f,
+ 0.096266123063339700f, 0.098689197541094500f, 0.101145164209600000f, 0.103634096655137000f,
+ 0.106156067812744000f, 0.108711149979039000f, 0.111299414824660000f, 0.113920933406333000f,
+ 0.116575776178572000f, 0.119264013005047000f, 0.121985713169619000f, 0.124740945387051000f,
+ 0.127529777813422000f, 0.130352278056244000f, 0.133208513184300000f, 0.136098549737202000f,
+ 0.139022453734703000f, 0.141980290685736000f, 0.144972125597231000f, 0.147998022982685000f,
+ 0.151058046870511000f, 0.154152260812165000f, 0.157280727890073000f, 0.160443510725344000f,
+ 0.163640671485290000f, 0.166872271890766000f, 0.170138373223312000f, 0.173439036332135000f,
+ 0.176774321640903000f, 0.180144289154390000f, 0.183548998464951000f, 0.186988508758844000f,
+ 0.190462878822409000f, 0.193972167048093000f, 0.197516431440340000f, 0.201095729621346000f,
+ 0.204710118836677000f, 0.208359655960767000f, 0.212044397502288000f, 0.215764399609395000f,
+ 0.219519718074868000f, 0.223310408341127000f, 0.227136525505149000f, 0.230998124323267000f,
+ 0.234895259215880000f, 0.238827984272048000f, 0.242796353254002000f, 0.246800419601550000f,
+ 0.250840236436400000f, 0.254915856566385000f, 0.259027332489606000f, 0.263174716398492000f,
+ 0.267358060183772000f, 0.271577415438375000f, 0.275832833461245000f, 0.280124365261085000f,
+ 0.284452061560024000f, 0.288815972797219000f, 0.293216149132375000f, 0.297652640449211000f,
+ 0.302125496358853000f, 0.306634766203158000f, 0.311180499057984000f, 0.315762743736397000f,
+ 0.320381548791810000f, 0.325036962521076000f, 0.329729032967515000f, 0.334457807923889000f,
+ 0.339223334935327000f, 0.344025661302187000f, 0.348864834082879000f, 0.353740900096629000f,
+ 0.358653905926199000f, 0.363603897920553000f, 0.368590922197487000f, 0.373615024646202000f,
+ 0.378676250929840000f, 0.383774646487975000f, 0.388910256539059000f, 0.394083126082829000f,
+ 0.399293299902674000f, 0.404540822567962000f, 0.409825738436323000f, 0.415148091655907000f,
+ 0.420507926167587000f, 0.425905285707146000f, 0.431340213807410000f, 0.436812753800359000f,
+ 0.442322948819202000f, 0.447870841800410000f, 0.453456475485731000f, 0.459079892424160000f,
+ 0.464741134973889000f, 0.470440245304218000f, 0.476177265397440000f, 0.481952237050698000f,
+ 0.487765201877811000f, 0.493616201311074000f, 0.499505276603030000f, 0.505432468828216000f,
+ 0.511397818884880000f, 0.517401367496673000f, 0.523443155214325000f, 0.529523222417277000f,
+ 0.535641609315311000f, 0.541798355950137000f, 0.547993502196972000f, 0.554227087766085000f,
+ 0.560499152204328000f, 0.566809734896638000f, 0.573158875067523000f, 0.579546611782525000f,
+ 0.585972983949661000f, 0.592438030320847000f, 0.598941789493296000f, 0.605484299910907000f,
+ 0.612065599865624000f, 0.618685727498780000f, 0.625344720802427000f, 0.632042617620641000f,
+ 0.638779455650817000f, 0.645555272444935000f, 0.652370105410821000f, 0.659223991813387000f,
+ 0.666116968775851000f, 0.673049073280942000f, 0.680020342172095000f, 0.687030812154625000f,
+ 0.694080519796882000f, 0.701169501531402000f, 0.708297793656032000f, 0.715465432335048000f,
+ 0.722672453600255000f, 0.729918893352071000f, 0.737204787360605000f, 0.744530171266715000f,
+ 0.751895080583051000f, 0.759299550695091000f, 0.766743616862161000f, 0.774227314218442000f,
+ 0.781750677773962000f, 0.789313742415586000f, 0.796916542907978000f, 0.804559113894567000f,
+ 0.812241489898490000f, 0.819963705323528000f, 0.827725794455034000f, 0.835527791460841000f,
+ 0.843369730392169000f, 0.851251645184515000f, 0.859173569658532000f, 0.867135537520905000f,
+ 0.875137582365205000f, 0.883179737672745000f, 0.891262036813419000f, 0.899384513046529000f,
+ 0.907547199521614000f, 0.915750129279253000f, 0.923993335251873000f, 0.932276850264543000f,
+ 0.940600707035753000f, 0.948964938178195000f, 0.957369576199527000f, 0.965814653503130000f,
+ 0.974300202388861000f, 0.982826255053791000f, 0.991392843592940000f, 1.000000000000000000f,
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static void build_table_linear_from_gamma(float* outTable, float exponent) {
+ for (float x = 0.0f; x <= 1.0f; x += (1.0f/255.0f)) {
+ *outTable++ = powf(x, exponent);
+ }
+}
+
+// Interpolating lookup in a variably sized table.
+static float interp_lut(float input, const float* table, int tableSize) {
+ float index = input * (tableSize - 1);
+ float diff = index - sk_float_floor2int(index);
+ return table[(int) sk_float_floor2int(index)] * (1.0f - diff) +
+ table[(int) sk_float_ceil2int(index)] * diff;
+}
+
+// outTable is always 256 entries, inTable may be larger or smaller.
+static void build_table_linear_from_gamma(float* outTable, const float* inTable,
+ int inTableSize) {
+ if (256 == inTableSize) {
+ memcpy(outTable, inTable, sizeof(float) * 256);
+ return;
+ }
+
+ for (float x = 0.0f; x <= 1.0f; x += (1.0f/255.0f)) {
+ *outTable++ = interp_lut(x, inTable, inTableSize);
+ }
+}
+
+static void build_table_linear_from_gamma(float* outTable, float g, float a, float b, float c,
+ float d, float e, float f) {
+ // Y = (aX + b)^g + c for X >= d
+ // Y = eX + f otherwise
+ for (float x = 0.0f; x <= 1.0f; x += (1.0f/255.0f)) {
+ if (x >= d) {
+ *outTable++ = powf(a * x + b, g) + c;
+ } else {
+ *outTable++ = e * x + f;
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Expand range from 0-1 to 0-255, then convert.
+static uint8_t clamp_normalized_float_to_byte(float v) {
+ // The ordering of the logic is a little strange here in order
+ // to make sure we convert NaNs to 0.
+ v = v * 255.0f;
+ if (v >= 254.5f) {
+ return 255;
+ } else if (v >= 0.5f) {
+ return (uint8_t) (v + 0.5f);
+ } else {
+ return 0;
+ }
+}
+
+static const int kDstGammaTableSize =
+ SkColorSpaceXform_Base<kTable_SrcGamma, kTable_DstGamma, kNone_ColorSpaceMatch>
+ ::kDstGammaTableSize;
+
+static void build_table_linear_to_gamma(uint8_t* outTable, float exponent) {
+ float toGammaExp = 1.0f / exponent;
+
+ for (int i = 0; i < kDstGammaTableSize; i++) {
+ float x = ((float) i) * (1.0f / ((float) (kDstGammaTableSize - 1)));
+ outTable[i] = clamp_normalized_float_to_byte(powf(x, toGammaExp));
+ }
+}
+
+// Inverse table lookup. Ex: what index corresponds to the input value? This will
+// have strange results when the table is non-increasing. But any sane gamma
+// function will be increasing.
+static float inverse_interp_lut(float input, const float* table, int tableSize) {
+ if (input <= table[0]) {
+ return table[0];
+ } else if (input >= table[tableSize - 1]) {
+ return 1.0f;
+ }
+
+ for (int i = 1; i < tableSize; i++) {
+ if (table[i] >= input) {
+ // We are guaranteed that input is greater than table[i - 1].
+ float diff = input - table[i - 1];
+ float distance = table[i] - table[i - 1];
+ float index = (i - 1) + diff / distance;
+ return index / (tableSize - 1);
+ }
+ }
+
+ // Should be unreachable, since we'll return before the loop if input is
+ // larger than the last entry.
+ SkASSERT(false);
+ return 0.0f;
+}
+
+static void build_table_linear_to_gamma(uint8_t* outTable, const float* inTable,
+ int inTableSize) {
+ for (int i = 0; i < kDstGammaTableSize; i++) {
+ float x = ((float) i) * (1.0f / ((float) (kDstGammaTableSize - 1)));
+ float y = inverse_interp_lut(x, inTable, inTableSize);
+ outTable[i] = clamp_normalized_float_to_byte(y);
+ }
+}
+
+static float inverse_parametric(float x, float g, float a, float b, float c, float d, float e,
+ float f) {
+ // We need to take the inverse of the following piecewise function.
+ // Y = (aX + b)^g + c for X >= d
+ // Y = eX + f otherwise
+
+ // Assume that the gamma function is continuous, or this won't make much sense anyway.
+ // Plug in |d| to the first equation to calculate the new piecewise interval.
+ // Then simply use the inverse of the original functions.
+ float interval = e * d + f;
+ if (x < interval) {
+ // X = (Y - F) / E
+ if (0.0f == e) {
+ // The gamma curve for this segment is constant, so the inverse is undefined.
+ // Since this is the lower segment, guess zero.
+ return 0.0f;
+ }
+
+ return (x - f) / e;
+ }
+
+ // X = ((Y - C)^(1 / G) - B) / A
+ if (0.0f == a || 0.0f == g) {
+ // The gamma curve for this segment is constant, so the inverse is undefined.
+ // Since this is the upper segment, guess one.
+ return 1.0f;
+ }
+
+ return (powf(x - c, 1.0f / g) - b) / a;
+}
+
+static void build_table_linear_to_gamma(uint8_t* outTable, float g, float a,
+ float b, float c, float d, float e, float f) {
+ for (int i = 0; i < kDstGammaTableSize; i++) {
+ float x = ((float) i) * (1.0f / ((float) (kDstGammaTableSize - 1)));
+ float y = inverse_parametric(x, g, a, b, c, d, e, f);
+ outTable[i] = clamp_normalized_float_to_byte(y);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+template <typename T>
+struct GammaFns {
+ const T* fSRGBTable;
+ const T* f2Dot2Table;
+ void (*fBuildFromValue)(T*, float);
+ void (*fBuildFromTable)(T*, const float*, int);
+ void (*fBuildFromParam)(T*, float, float, float, float, float, float, float);
+};
+
+static const GammaFns<float> kToLinear {
+ sk_linear_from_srgb,
+ sk_linear_from_2dot2,
+ &build_table_linear_from_gamma,
+ &build_table_linear_from_gamma,
+ &build_table_linear_from_gamma,
+};
+
+static const GammaFns<uint8_t> kFromLinear {
+ nullptr,
+ nullptr,
+ &build_table_linear_to_gamma,
+ &build_table_linear_to_gamma,
+ &build_table_linear_to_gamma,
+};
+
+// Build tables to transform src gamma to linear.
+template <typename T>
+static void build_gamma_tables(const T* outGammaTables[3], T* gammaTableStorage, int gammaTableSize,
+ SkColorSpace* space, const GammaFns<T>& fns, bool gammasAreMatching)
+{
+ switch (as_CSB(space)->gammaNamed()) {
+ case kSRGB_SkGammaNamed:
+ outGammaTables[0] = outGammaTables[1] = outGammaTables[2] = fns.fSRGBTable;
+ break;
+ case k2Dot2Curve_SkGammaNamed:
+ outGammaTables[0] = outGammaTables[1] = outGammaTables[2] = fns.f2Dot2Table;
+ break;
+ case kLinear_SkGammaNamed:
+ outGammaTables[0] = outGammaTables[1] = outGammaTables[2] = nullptr;
+ break;
+ default: {
+ const SkGammas* gammas = as_CSB(space)->gammas();
+ SkASSERT(gammas);
+
+ auto build_table = [=](int i) {
+ if (gammas->isNamed(i)) {
+ switch (gammas->data(i).fNamed) {
+ case kSRGB_SkGammaNamed:
+ (*fns.fBuildFromParam)(&gammaTableStorage[i * gammaTableSize], 2.4f,
+ (1.0f / 1.055f), (0.055f / 1.055f), 0.0f,
+ 0.04045f, (1.0f / 12.92f), 0.0f);
+ outGammaTables[i] = &gammaTableStorage[i * gammaTableSize];
+ break;
+ case k2Dot2Curve_SkGammaNamed:
+ (*fns.fBuildFromValue)(&gammaTableStorage[i * gammaTableSize], 2.2f);
+ outGammaTables[i] = &gammaTableStorage[i * gammaTableSize];
+ break;
+ case kLinear_SkGammaNamed:
+ (*fns.fBuildFromValue)(&gammaTableStorage[i * gammaTableSize], 1.0f);
+ outGammaTables[i] = &gammaTableStorage[i * gammaTableSize];
+ break;
+ default:
+ SkASSERT(false);
+ break;
+ }
+ } else if (gammas->isValue(i)) {
+ (*fns.fBuildFromValue)(&gammaTableStorage[i * gammaTableSize],
+ gammas->data(i).fValue);
+ outGammaTables[i] = &gammaTableStorage[i * gammaTableSize];
+ } else if (gammas->isTable(i)) {
+ (*fns.fBuildFromTable)(&gammaTableStorage[i * gammaTableSize], gammas->table(i),
+ gammas->data(i).fTable.fSize);
+ outGammaTables[i] = &gammaTableStorage[i * gammaTableSize];
+ } else {
+ SkASSERT(gammas->isParametric(i));
+ const SkGammas::Params& params = gammas->params(i);
+ (*fns.fBuildFromParam)(&gammaTableStorage[i * gammaTableSize], params.fG,
+ params.fA, params.fB, params.fC, params.fD, params.fE,
+ params.fF);
+ outGammaTables[i] = &gammaTableStorage[i * gammaTableSize];
+ }
+ };
+
+ if (gammasAreMatching) {
+ build_table(0);
+ outGammaTables[1] = outGammaTables[0];
+ outGammaTables[2] = outGammaTables[0];
+ } else {
+ build_table(0);
+ build_table(1);
+ build_table(2);
+ }
+
+ break;
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static inline bool is_almost_identity(const SkMatrix44& srcToDst) {
+ for (int i = 0; i < 4; i++) {
+ for (int j = 0; j < 4; j++) {
+ float expected = (i == j) ? 1.0f : 0.0f;
+ if (!color_space_almost_equal(srcToDst.getFloat(i,j), expected)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<SkColorSpaceXform> SkColorSpaceXform::New(SkColorSpace* srcSpace,
+ SkColorSpace* dstSpace) {
+ if (!srcSpace || !dstSpace) {
+ // Invalid input
+ return nullptr;
+ }
+
+ ColorSpaceMatch csm = kNone_ColorSpaceMatch;
+ SkMatrix44 srcToDst(SkMatrix44::kUninitialized_Constructor);
+ if (SkColorSpace::Equals(srcSpace, dstSpace)) {
+ srcToDst.setIdentity();
+ csm = kFull_ColorSpaceMatch;
+ } else {
+ srcToDst.setConcat(as_CSB(dstSpace)->fromXYZD50(), as_CSB(srcSpace)->toXYZD50());
+
+ if (is_almost_identity(srcToDst)) {
+ srcToDst.setIdentity();
+ csm = kGamut_ColorSpaceMatch;
+ }
+ }
+
+ switch (csm) {
+ case kNone_ColorSpaceMatch:
+ switch (as_CSB(dstSpace)->gammaNamed()) {
+ case kSRGB_SkGammaNamed:
+ if (srcSpace->gammaIsLinear()) {
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kLinear_SrcGamma, kSRGB_DstGamma, kNone_ColorSpaceMatch>
+ (srcSpace, srcToDst, dstSpace));
+ } else {
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kTable_SrcGamma, kSRGB_DstGamma, kNone_ColorSpaceMatch>
+ (srcSpace, srcToDst, dstSpace));
+ }
+ case k2Dot2Curve_SkGammaNamed:
+ if (srcSpace->gammaIsLinear()) {
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kLinear_SrcGamma, k2Dot2_DstGamma, kNone_ColorSpaceMatch>
+ (srcSpace, srcToDst, dstSpace));
+ } else {
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kTable_SrcGamma, k2Dot2_DstGamma, kNone_ColorSpaceMatch>
+ (srcSpace, srcToDst, dstSpace));
+ }
+ case kLinear_SkGammaNamed:
+ if (srcSpace->gammaIsLinear()) {
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kLinear_SrcGamma, kLinear_DstGamma, kNone_ColorSpaceMatch>
+ (srcSpace, srcToDst, dstSpace));
+ } else {
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kTable_SrcGamma, kLinear_DstGamma, kNone_ColorSpaceMatch>
+ (srcSpace, srcToDst, dstSpace));
+ }
+ default:
+ if (srcSpace->gammaIsLinear()) {
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kLinear_SrcGamma, kTable_DstGamma, kNone_ColorSpaceMatch>
+ (srcSpace, srcToDst, dstSpace));
+ } else {
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kTable_SrcGamma, kTable_DstGamma, kNone_ColorSpaceMatch>
+ (srcSpace, srcToDst, dstSpace));
+ }
+ }
+ case kGamut_ColorSpaceMatch:
+ switch (as_CSB(dstSpace)->gammaNamed()) {
+ case kSRGB_SkGammaNamed:
+ if (srcSpace->gammaIsLinear()) {
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kLinear_SrcGamma, kSRGB_DstGamma, kGamut_ColorSpaceMatch>
+ (srcSpace, srcToDst, dstSpace));
+ } else {
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kTable_SrcGamma, kSRGB_DstGamma, kGamut_ColorSpaceMatch>
+ (srcSpace, srcToDst, dstSpace));
+ }
+ case k2Dot2Curve_SkGammaNamed:
+ if (srcSpace->gammaIsLinear()) {
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kLinear_SrcGamma, k2Dot2_DstGamma, kGamut_ColorSpaceMatch>
+ (srcSpace, srcToDst, dstSpace));
+ } else {
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kTable_SrcGamma, k2Dot2_DstGamma, kGamut_ColorSpaceMatch>
+ (srcSpace, srcToDst, dstSpace));
+ }
+ case kLinear_SkGammaNamed:
+ if (srcSpace->gammaIsLinear()) {
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kLinear_SrcGamma, kLinear_DstGamma, kGamut_ColorSpaceMatch>
+ (srcSpace, srcToDst, dstSpace));
+ } else {
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kTable_SrcGamma, kLinear_DstGamma, kGamut_ColorSpaceMatch>
+ (srcSpace, srcToDst, dstSpace));
+ }
+ default:
+ if (srcSpace->gammaIsLinear()) {
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kLinear_SrcGamma, kTable_DstGamma, kGamut_ColorSpaceMatch>
+ (srcSpace, srcToDst, dstSpace));
+ } else {
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kTable_SrcGamma, kTable_DstGamma, kGamut_ColorSpaceMatch>
+ (srcSpace, srcToDst, dstSpace));
+ }
+ }
+ case kFull_ColorSpaceMatch:
+ switch (as_CSB(dstSpace)->gammaNamed()) {
+ case kSRGB_SkGammaNamed:
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kTable_SrcGamma, kSRGB_DstGamma, kFull_ColorSpaceMatch>
+ (srcSpace, srcToDst, dstSpace));
+ case k2Dot2Curve_SkGammaNamed:
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kTable_SrcGamma, k2Dot2_DstGamma, kFull_ColorSpaceMatch>
+ (srcSpace, srcToDst, dstSpace));
+ case kLinear_SkGammaNamed:
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kLinear_SrcGamma, kLinear_DstGamma, kFull_ColorSpaceMatch>
+ (srcSpace, srcToDst, dstSpace));
+ default:
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kTable_SrcGamma, kTable_DstGamma, kFull_ColorSpaceMatch>
+ (srcSpace, srcToDst, dstSpace));
+ }
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static float byte_to_float(uint8_t byte) {
+ return ((float) byte) * (1.0f / 255.0f);
+}
+
+// Clamp to the 0-1 range.
+static float clamp_normalized_float(float v) {
+ if (v > 1.0f) {
+ return 1.0f;
+ } else if ((v < 0.0f) || sk_float_isnan(v)) {
+ return 0.0f;
+ } else {
+ return v;
+ }
+}
+
+static void interp_3d_clut(float dst[3], float src[3], const SkColorLookUpTable* colorLUT) {
+ // Call the src components x, y, and z.
+ uint8_t maxX = colorLUT->fGridPoints[0] - 1;
+ uint8_t maxY = colorLUT->fGridPoints[1] - 1;
+ uint8_t maxZ = colorLUT->fGridPoints[2] - 1;
+
+ // An approximate index into each of the three dimensions of the table.
+ float x = src[0] * maxX;
+ float y = src[1] * maxY;
+ float z = src[2] * maxZ;
+
+ // This gives us the low index for our interpolation.
+ int ix = sk_float_floor2int(x);
+ int iy = sk_float_floor2int(y);
+ int iz = sk_float_floor2int(z);
+
+ // Make sure the low index is not also the max index.
+ ix = (maxX == ix) ? ix - 1 : ix;
+ iy = (maxY == iy) ? iy - 1 : iy;
+ iz = (maxZ == iz) ? iz - 1 : iz;
+
+ // Weighting factors for the interpolation.
+ float diffX = x - ix;
+ float diffY = y - iy;
+ float diffZ = z - iz;
+
+ // Constants to help us navigate the 3D table.
+ // Ex: Assume x = a, y = b, z = c.
+ // table[a * n001 + b * n010 + c * n100] logically equals table[a][b][c].
+ const int n000 = 0;
+ const int n001 = 3 * colorLUT->fGridPoints[1] * colorLUT->fGridPoints[2];
+ const int n010 = 3 * colorLUT->fGridPoints[2];
+ const int n011 = n001 + n010;
+ const int n100 = 3;
+ const int n101 = n100 + n001;
+ const int n110 = n100 + n010;
+ const int n111 = n110 + n001;
+
+ // Base ptr into the table.
+ const float* ptr = &(colorLUT->table()[ix*n001 + iy*n010 + iz*n100]);
+
+ // The code below performs a tetrahedral interpolation for each of the three
+ // dst components. Once the tetrahedron containing the interpolation point is
+ // identified, the interpolation is a weighted sum of grid values at the
+ // vertices of the tetrahedron. The claim is that tetrahedral interpolation
+ // provides a more accurate color conversion.
+ // blogs.mathworks.com/steve/2006/11/24/tetrahedral-interpolation-for-colorspace-conversion/
+ //
+ // I have one test image, and visually I can't tell the difference between
+ // tetrahedral and trilinear interpolation. In terms of computation, the
+ // tetrahedral code requires more branches but less computation. The
+ // SampleICC library provides an option for the client to choose either
+ // tetrahedral or trilinear.
+ for (int i = 0; i < 3; i++) {
+ if (diffZ < diffY) {
+ if (diffZ < diffX) {
+ dst[i] = (ptr[n000] + diffZ * (ptr[n110] - ptr[n010]) +
+ diffY * (ptr[n010] - ptr[n000]) +
+ diffX * (ptr[n111] - ptr[n110]));
+ } else if (diffY < diffX) {
+ dst[i] = (ptr[n000] + diffZ * (ptr[n111] - ptr[n011]) +
+ diffY * (ptr[n011] - ptr[n001]) +
+ diffX * (ptr[n001] - ptr[n000]));
+ } else {
+ dst[i] = (ptr[n000] + diffZ * (ptr[n111] - ptr[n011]) +
+ diffY * (ptr[n010] - ptr[n000]) +
+ diffX * (ptr[n011] - ptr[n010]));
+ }
+ } else {
+ if (diffZ < diffX) {
+ dst[i] = (ptr[n000] + diffZ * (ptr[n101] - ptr[n001]) +
+ diffY * (ptr[n111] - ptr[n101]) +
+ diffX * (ptr[n001] - ptr[n000]));
+ } else if (diffY < diffX) {
+ dst[i] = (ptr[n000] + diffZ * (ptr[n100] - ptr[n000]) +
+ diffY * (ptr[n111] - ptr[n101]) +
+ diffX * (ptr[n101] - ptr[n100]));
+ } else {
+ dst[i] = (ptr[n000] + diffZ * (ptr[n100] - ptr[n000]) +
+ diffY * (ptr[n110] - ptr[n100]) +
+ diffX * (ptr[n111] - ptr[n110]));
+ }
+ }
+
+ // Increment the table ptr in order to handle the next component.
+ // Note that this is the how table is designed: all of nXXX
+ // variables are multiples of 3 because there are 3 output
+ // components.
+ ptr++;
+ }
+}
+
+static void handle_color_lut(uint32_t* dst, const uint32_t* src, int len,
+ SkColorLookUpTable* colorLUT) {
+ while (len-- > 0) {
+ uint8_t r = (*src >> 0) & 0xFF,
+ g = (*src >> 8) & 0xFF,
+ b = (*src >> 16) & 0xFF;
+
+ float in[3];
+ float out[3];
+ in[0] = byte_to_float(r);
+ in[1] = byte_to_float(g);
+ in[2] = byte_to_float(b);
+ interp_3d_clut(out, in, colorLUT);
+
+ r = sk_float_round2int(255.0f * clamp_normalized_float(out[0]));
+ g = sk_float_round2int(255.0f * clamp_normalized_float(out[1]));
+ b = sk_float_round2int(255.0f * clamp_normalized_float(out[2]));
+ *dst = SkPackARGB_as_RGBA(0xFF, r, g, b);
+
+ src++;
+ dst++;
+ }
+}
+
+static inline void load_matrix(const float matrix[16],
+ Sk4f& rXgXbX, Sk4f& rYgYbY, Sk4f& rZgZbZ, Sk4f& rTgTbT) {
+ rXgXbX = Sk4f::Load(matrix + 0);
+ rYgYbY = Sk4f::Load(matrix + 4);
+ rZgZbZ = Sk4f::Load(matrix + 8);
+ rTgTbT = Sk4f::Load(matrix + 12);
+}
+
+enum Order {
+ kRGBA_Order,
+ kBGRA_Order,
+};
+
+static inline void set_rb_shifts(Order kOrder, int* kRShift, int* kBShift) {
+ if (kRGBA_Order == kOrder) {
+ *kRShift = 0;
+ *kBShift = 16;
+ } else {
+ *kRShift = 16;
+ *kBShift = 0;
+ }
+}
+
+template <Order kOrder>
+static inline void load_rgb_from_tables(const uint32_t* src,
+ Sk4f& r, Sk4f& g, Sk4f& b, Sk4f& a,
+ const float* const srcTables[3]) {
+ int kRShift, kGShift = 8, kBShift;
+ set_rb_shifts(kOrder, &kRShift, &kBShift);
+ r = { srcTables[0][(src[0] >> kRShift) & 0xFF],
+ srcTables[0][(src[1] >> kRShift) & 0xFF],
+ srcTables[0][(src[2] >> kRShift) & 0xFF],
+ srcTables[0][(src[3] >> kRShift) & 0xFF], };
+ g = { srcTables[1][(src[0] >> kGShift) & 0xFF],
+ srcTables[1][(src[1] >> kGShift) & 0xFF],
+ srcTables[1][(src[2] >> kGShift) & 0xFF],
+ srcTables[1][(src[3] >> kGShift) & 0xFF], };
+ b = { srcTables[2][(src[0] >> kBShift) & 0xFF],
+ srcTables[2][(src[1] >> kBShift) & 0xFF],
+ srcTables[2][(src[2] >> kBShift) & 0xFF],
+ srcTables[2][(src[3] >> kBShift) & 0xFF], };
+ a = 0.0f; // Don't let the compiler complain that |a| is uninitialized.
+}
+
+template <Order kOrder>
+static inline void load_rgba_from_tables(const uint32_t* src,
+ Sk4f& r, Sk4f& g, Sk4f& b, Sk4f& a,
+ const float* const srcTables[3]) {
+ int kRShift, kGShift = 8, kBShift;
+ set_rb_shifts(kOrder, &kRShift, &kBShift);
+ r = { srcTables[0][(src[0] >> kRShift) & 0xFF],
+ srcTables[0][(src[1] >> kRShift) & 0xFF],
+ srcTables[0][(src[2] >> kRShift) & 0xFF],
+ srcTables[0][(src[3] >> kRShift) & 0xFF], };
+ g = { srcTables[1][(src[0] >> kGShift) & 0xFF],
+ srcTables[1][(src[1] >> kGShift) & 0xFF],
+ srcTables[1][(src[2] >> kGShift) & 0xFF],
+ srcTables[1][(src[3] >> kGShift) & 0xFF], };
+ b = { srcTables[2][(src[0] >> kBShift) & 0xFF],
+ srcTables[2][(src[1] >> kBShift) & 0xFF],
+ srcTables[2][(src[2] >> kBShift) & 0xFF],
+ srcTables[2][(src[3] >> kBShift) & 0xFF], };
+ a = (1.0f / 255.0f) * SkNx_cast<float>(Sk4u::Load(src) >> 24);
+}
+
+template <Order kOrder>
+static inline void load_rgb_linear(const uint32_t* src,
+ Sk4f& r, Sk4f& g, Sk4f& b, Sk4f& a,
+ const float* const[3]) {
+ int kRShift, kGShift = 8, kBShift;
+ set_rb_shifts(kOrder, &kRShift, &kBShift);
+ r = (1.0f / 255.0f) * SkNx_cast<float>((Sk4u::Load(src) >> kRShift) & 0xFF);
+ g = (1.0f / 255.0f) * SkNx_cast<float>((Sk4u::Load(src) >> kGShift) & 0xFF);
+ b = (1.0f / 255.0f) * SkNx_cast<float>((Sk4u::Load(src) >> kBShift) & 0xFF);
+ a = 0.0f; // Don't let the compiler complain that |a| is uninitialized.
+}
+
+template <Order kOrder>
+static inline void load_rgba_linear(const uint32_t* src,
+ Sk4f& r, Sk4f& g, Sk4f& b, Sk4f& a,
+ const float* const[3]) {
+ int kRShift, kGShift = 8, kBShift;
+ set_rb_shifts(kOrder, &kRShift, &kBShift);
+ r = (1.0f / 255.0f) * SkNx_cast<float>((Sk4u::Load(src) >> kRShift) & 0xFF);
+ g = (1.0f / 255.0f) * SkNx_cast<float>((Sk4u::Load(src) >> kGShift) & 0xFF);
+ b = (1.0f / 255.0f) * SkNx_cast<float>((Sk4u::Load(src) >> kBShift) & 0xFF);
+ a = (1.0f / 255.0f) * SkNx_cast<float>((Sk4u::Load(src) >> 24));
+}
+
+template <Order kOrder>
+static inline void load_rgb_from_tables_1(const uint32_t* src,
+ Sk4f& r, Sk4f& g, Sk4f& b, Sk4f&,
+ const float* const srcTables[3]) {
+ int kRShift, kGShift = 8, kBShift;
+ set_rb_shifts(kOrder, &kRShift, &kBShift);
+ r = Sk4f(srcTables[0][(*src >> kRShift) & 0xFF]);
+ g = Sk4f(srcTables[1][(*src >> kGShift) & 0xFF]);
+ b = Sk4f(srcTables[2][(*src >> kBShift) & 0xFF]);
+}
+
+template <Order kOrder>
+static inline void load_rgba_from_tables_1(const uint32_t* src,
+ Sk4f& r, Sk4f& g, Sk4f& b, Sk4f& a,
+ const float* const srcTables[3]) {
+ int kRShift, kGShift = 8, kBShift;
+ set_rb_shifts(kOrder, &kRShift, &kBShift);
+ r = Sk4f(srcTables[0][(*src >> kRShift) & 0xFF]);
+ g = Sk4f(srcTables[1][(*src >> kGShift) & 0xFF]);
+ b = Sk4f(srcTables[2][(*src >> kBShift) & 0xFF]);
+ a = (1.0f / 255.0f) * Sk4f(*src >> 24);
+}
+
+template <Order kOrder>
+static inline void load_rgb_linear_1(const uint32_t* src,
+ Sk4f& r, Sk4f& g, Sk4f& b, Sk4f&,
+ const float* const srcTables[3]) {
+ int kRShift, kGShift = 8, kBShift;
+ set_rb_shifts(kOrder, &kRShift, &kBShift);
+ r = Sk4f((1.0f / 255.0f) * ((*src >> kRShift) & 0xFF));
+ g = Sk4f((1.0f / 255.0f) * ((*src >> kGShift) & 0xFF));
+ b = Sk4f((1.0f / 255.0f) * ((*src >> kBShift) & 0xFF));
+}
+
+template <Order kOrder>
+static inline void load_rgba_linear_1(const uint32_t* src,
+ Sk4f& r, Sk4f& g, Sk4f& b, Sk4f& a,
+ const float* const srcTables[3]) {
+ int kRShift, kGShift = 8, kBShift;
+ set_rb_shifts(kOrder, &kRShift, &kBShift);
+ r = Sk4f((1.0f / 255.0f) * ((*src >> kRShift) & 0xFF));
+ g = Sk4f((1.0f / 255.0f) * ((*src >> kGShift) & 0xFF));
+ b = Sk4f((1.0f / 255.0f) * ((*src >> kBShift) & 0xFF));
+ a = Sk4f((1.0f / 255.0f) * ((*src >> 24)));
+}
+
+static inline void transform_gamut(const Sk4f& r, const Sk4f& g, const Sk4f& b, const Sk4f& a,
+ const Sk4f& rXgXbX, const Sk4f& rYgYbY, const Sk4f& rZgZbZ,
+ Sk4f& dr, Sk4f& dg, Sk4f& db, Sk4f& da) {
+ dr = rXgXbX[0]*r + rYgYbY[0]*g + rZgZbZ[0]*b;
+ dg = rXgXbX[1]*r + rYgYbY[1]*g + rZgZbZ[1]*b;
+ db = rXgXbX[2]*r + rYgYbY[2]*g + rZgZbZ[2]*b;
+ da = a;
+}
+
+static inline void transform_gamut_1(const Sk4f& r, const Sk4f& g, const Sk4f& b,
+ const Sk4f& rXgXbX, const Sk4f& rYgYbY, const Sk4f& rZgZbZ,
+ Sk4f& rgba) {
+ rgba = rXgXbX*r + rYgYbY*g + rZgZbZ*b;
+}
+
+static inline void translate_gamut(const Sk4f& rTgTbT, Sk4f& dr, Sk4f& dg, Sk4f& db) {
+ dr = dr + rTgTbT[0];
+ dg = dg + rTgTbT[1];
+ db = db + rTgTbT[2];
+}
+
+static inline void translate_gamut_1(const Sk4f& rTgTbT, Sk4f& rgba) {
+ rgba = rgba + rTgTbT;
+}
+
+static inline void premultiply(Sk4f& dr, Sk4f& dg, Sk4f& db, const Sk4f& da) {
+ dr = da * dr;
+ dg = da * dg;
+ db = da * db;
+}
+
+static inline void premultiply_1(const Sk4f& a, Sk4f& rgba) {
+ rgba = a * rgba;
+}
+
+template <Order kOrder>
+static inline void store_srgb(void* dst, const uint32_t* src,
+ Sk4f& dr, Sk4f& dg, Sk4f& db, Sk4f&,
+ const uint8_t* const[3]) {
+ int kRShift, kGShift = 8, kBShift;
+ set_rb_shifts(kOrder, &kRShift, &kBShift);
+ dr = sk_linear_to_srgb_needs_trunc(dr);
+ dg = sk_linear_to_srgb_needs_trunc(dg);
+ db = sk_linear_to_srgb_needs_trunc(db);
+
+ dr = sk_clamp_0_255(dr);
+ dg = sk_clamp_0_255(dg);
+ db = sk_clamp_0_255(db);
+
+ Sk4i da = Sk4i::Load(src) & 0xFF000000;
+
+ Sk4i rgba = (SkNx_cast<int>(dr) << kRShift)
+ | (SkNx_cast<int>(dg) << kGShift)
+ | (SkNx_cast<int>(db) << kBShift)
+ | (da );
+ rgba.store(dst);
+}
+
+template <Order kOrder>
+static inline void store_srgb_1(void* dst, const uint32_t* src,
+ Sk4f& rgba, const Sk4f&,
+ const uint8_t* const[3]) {
+ rgba = sk_clamp_0_255(sk_linear_to_srgb_needs_trunc(rgba));
+
+ uint32_t tmp;
+ SkNx_cast<uint8_t>(SkNx_cast<int32_t>(rgba)).store(&tmp);
+ tmp = (*src & 0xFF000000) | (tmp & 0x00FFFFFF);
+ if (kBGRA_Order == kOrder) {
+ tmp = SkSwizzle_RB(tmp);
+ }
+
+ *(uint32_t*)dst = tmp;
+}
+
+static inline Sk4f linear_to_2dot2(const Sk4f& x) {
+ // x^(29/64) is a very good approximation of the true value, x^(1/2.2).
+ auto x2 = x.rsqrt(), // x^(-1/2)
+ x32 = x2.rsqrt().rsqrt().rsqrt().rsqrt(), // x^(-1/32)
+ x64 = x32.rsqrt(); // x^(+1/64)
+
+ // 29 = 32 - 2 - 1
+ return 255.0f * x2.invert() * x32 * x64.invert();
+}
+
+template <Order kOrder>
+static inline void store_2dot2(void* dst, const uint32_t* src,
+ Sk4f& dr, Sk4f& dg, Sk4f& db, Sk4f&,
+ const uint8_t* const[3]) {
+ int kRShift, kGShift = 8, kBShift;
+ set_rb_shifts(kOrder, &kRShift, &kBShift);
+ dr = linear_to_2dot2(dr);
+ dg = linear_to_2dot2(dg);
+ db = linear_to_2dot2(db);
+
+ dr = sk_clamp_0_255(dr);
+ dg = sk_clamp_0_255(dg);
+ db = sk_clamp_0_255(db);
+
+ Sk4i da = Sk4i::Load(src) & 0xFF000000;
+
+ Sk4i rgba = (Sk4f_round(dr) << kRShift)
+ | (Sk4f_round(dg) << kGShift)
+ | (Sk4f_round(db) << kBShift)
+ | (da );
+ rgba.store(dst);
+}
+
+template <Order kOrder>
+static inline void store_2dot2_1(void* dst, const uint32_t* src,
+ Sk4f& rgba, const Sk4f&,
+ const uint8_t* const[3]) {
+ rgba = sk_clamp_0_255(linear_to_2dot2(rgba));
+
+ uint32_t tmp;
+ SkNx_cast<uint8_t>(Sk4f_round(rgba)).store(&tmp);
+ tmp = (*src & 0xFF000000) | (tmp & 0x00FFFFFF);
+ if (kBGRA_Order == kOrder) {
+ tmp = SkSwizzle_RB(tmp);
+ }
+
+ *(uint32_t*)dst = tmp;
+}
+
+template <Order kOrder>
+static inline void store_linear(void* dst, const uint32_t* src,
+ Sk4f& dr, Sk4f& dg, Sk4f& db, Sk4f&,
+ const uint8_t* const[3]) {
+ int kRShift, kGShift = 8, kBShift;
+ set_rb_shifts(kOrder, &kRShift, &kBShift);
+ dr = sk_clamp_0_255(255.0f * dr);
+ dg = sk_clamp_0_255(255.0f * dg);
+ db = sk_clamp_0_255(255.0f * db);
+
+ Sk4i da = Sk4i::Load(src) & 0xFF000000;
+
+ Sk4i rgba = (Sk4f_round(dr) << kRShift)
+ | (Sk4f_round(dg) << kGShift)
+ | (Sk4f_round(db) << kBShift)
+ | (da );
+ rgba.store(dst);
+}
+
+template <Order kOrder>
+static inline void store_linear_1(void* dst, const uint32_t* src,
+ Sk4f& rgba, const Sk4f&,
+ const uint8_t* const[3]) {
+ rgba = sk_clamp_0_255(255.0f * rgba);
+
+ uint32_t tmp;
+ SkNx_cast<uint8_t>(Sk4f_round(rgba)).store(&tmp);
+ tmp = (*src & 0xFF000000) | (tmp & 0x00FFFFFF);
+ if (kBGRA_Order == kOrder) {
+ tmp = SkSwizzle_RB(tmp);
+ }
+
+ *(uint32_t*)dst = tmp;
+}
+
+template <Order kOrder>
+static inline void store_f16(void* dst, const uint32_t* src,
+ Sk4f& dr, Sk4f& dg, Sk4f& db, Sk4f& da,
+ const uint8_t* const[3]) {
+ Sk4h_store4(dst, SkFloatToHalf_finite_ftz(dr),
+ SkFloatToHalf_finite_ftz(dg),
+ SkFloatToHalf_finite_ftz(db),
+ SkFloatToHalf_finite_ftz(da));
+}
+
+template <Order kOrder>
+static inline void store_f16_1(void* dst, const uint32_t* src,
+ Sk4f& rgba, const Sk4f& a,
+ const uint8_t* const[3]) {
+ rgba = Sk4f(rgba[0], rgba[1], rgba[2], a[3]);
+ SkFloatToHalf_finite_ftz(rgba).store((uint64_t*) dst);
+}
+
+template <Order kOrder>
+static inline void store_f32(void* dst, const uint32_t* src,
+ Sk4f& dr, Sk4f& dg, Sk4f& db, Sk4f& da,
+ const uint8_t* const[3]) {
+ Sk4f_store4(dst, dr, dg, db, da);
+}
+
+template <Order kOrder>
+static inline void store_f32_1(void* dst, const uint32_t* src,
+ Sk4f& rgba, const Sk4f& a,
+ const uint8_t* const[3]) {
+ rgba = Sk4f(rgba[0], rgba[1], rgba[2], a[3]);
+ rgba.store((float*) dst);
+}
+
+template <Order kOrder>
+static inline void store_f16_opaque(void* dst, const uint32_t* src,
+ Sk4f& dr, Sk4f& dg, Sk4f& db, Sk4f&,
+ const uint8_t* const[3]) {
+ Sk4h_store4(dst, SkFloatToHalf_finite_ftz(dr),
+ SkFloatToHalf_finite_ftz(dg),
+ SkFloatToHalf_finite_ftz(db),
+ SK_Half1);
+}
+
+template <Order kOrder>
+static inline void store_f16_1_opaque(void* dst, const uint32_t* src,
+ Sk4f& rgba, const Sk4f&,
+ const uint8_t* const[3]) {
+ uint64_t tmp;
+ SkFloatToHalf_finite_ftz(rgba).store(&tmp);
+ tmp |= static_cast<uint64_t>(SK_Half1) << 48;
+ *((uint64_t*) dst) = tmp;
+}
+
+template <Order kOrder>
+static inline void store_generic(void* dst, const uint32_t* src,
+ Sk4f& dr, Sk4f& dg, Sk4f& db, Sk4f&,
+ const uint8_t* const dstTables[3]) {
+ int kRShift, kGShift = 8, kBShift;
+ set_rb_shifts(kOrder, &kRShift, &kBShift);
+ dr = Sk4f::Min(Sk4f::Max(1023.0f * dr, 0.0f), 1023.0f);
+ dg = Sk4f::Min(Sk4f::Max(1023.0f * dg, 0.0f), 1023.0f);
+ db = Sk4f::Min(Sk4f::Max(1023.0f * db, 0.0f), 1023.0f);
+
+ Sk4i ir = Sk4f_round(dr);
+ Sk4i ig = Sk4f_round(dg);
+ Sk4i ib = Sk4f_round(db);
+
+ Sk4i da = Sk4i::Load(src) & 0xFF000000;
+
+ uint32_t* dst32 = (uint32_t*) dst;
+ dst32[0] = dstTables[0][ir[0]] << kRShift
+ | dstTables[1][ig[0]] << kGShift
+ | dstTables[2][ib[0]] << kBShift
+ | da[0];
+ dst32[1] = dstTables[0][ir[1]] << kRShift
+ | dstTables[1][ig[1]] << kGShift
+ | dstTables[2][ib[1]] << kBShift
+ | da[1];
+ dst32[2] = dstTables[0][ir[2]] << kRShift
+ | dstTables[1][ig[2]] << kGShift
+ | dstTables[2][ib[2]] << kBShift
+ | da[2];
+ dst32[3] = dstTables[0][ir[3]] << kRShift
+ | dstTables[1][ig[3]] << kGShift
+ | dstTables[2][ib[3]] << kBShift
+ | da[3];
+}
+
+template <Order kOrder>
+static inline void store_generic_1(void* dst, const uint32_t* src,
+ Sk4f& rgba, const Sk4f&,
+ const uint8_t* const dstTables[3]) {
+ int kRShift, kGShift = 8, kBShift;
+ set_rb_shifts(kOrder, &kRShift, &kBShift);
+ rgba = Sk4f::Min(Sk4f::Max(1023.0f * rgba, 0.0f), 1023.0f);
+
+ Sk4i indices = Sk4f_round(rgba);
+
+ *((uint32_t*) dst) = dstTables[0][indices[0]] << kRShift
+ | dstTables[1][indices[1]] << kGShift
+ | dstTables[2][indices[2]] << kBShift
+ | (*src & 0xFF000000);
+}
+
+typedef decltype(load_rgb_from_tables<kRGBA_Order> )* LoadFn;
+typedef decltype(load_rgb_from_tables_1<kRGBA_Order>)* Load1Fn;
+typedef decltype(store_generic<kRGBA_Order> )* StoreFn;
+typedef decltype(store_generic_1<kRGBA_Order> )* Store1Fn;
+
+template <SkAlphaType kAlphaType,
+ ColorSpaceMatch kCSM>
+static inline void do_color_xform(void* dst, const uint32_t* src, int len,
+ const float* const srcTables[3], const float matrix[16],
+ const uint8_t* const dstTables[3], LoadFn load, Load1Fn load_1,
+ StoreFn store, Store1Fn store_1, size_t sizeOfDstPixel) {
+ Sk4f rXgXbX, rYgYbY, rZgZbZ, rTgTbT;
+ load_matrix(matrix, rXgXbX, rYgYbY, rZgZbZ, rTgTbT);
+
+ if (len >= 4) {
+ // Naively this would be a loop of load-transform-store, but we found it faster to
+ // move the N+1th load ahead of the Nth store. We don't bother doing this for N<4.
+ Sk4f r, g, b, a;
+ load(src, r, g, b, a, srcTables);
+ src += 4;
+ len -= 4;
+
+ Sk4f dr, dg, db, da;
+ while (len >= 4) {
+ if (kNone_ColorSpaceMatch == kCSM) {
+ transform_gamut(r, g, b, a, rXgXbX, rYgYbY, rZgZbZ, dr, dg, db, da);
+ translate_gamut(rTgTbT, dr, dg, db);
+ } else {
+ dr = r;
+ dg = g;
+ db = b;
+ da = a;
+ }
+
+ if (kPremul_SkAlphaType == kAlphaType) {
+ premultiply(dr, dg, db, da);
+ }
+
+ load(src, r, g, b, a, srcTables);
+
+ store(dst, src - 4, dr, dg, db, da, dstTables);
+ dst = SkTAddOffset<void>(dst, 4 * sizeOfDstPixel);
+ src += 4;
+ len -= 4;
+ }
+
+ if (kNone_ColorSpaceMatch == kCSM) {
+ transform_gamut(r, g, b, a, rXgXbX, rYgYbY, rZgZbZ, dr, dg, db, da);
+ translate_gamut(rTgTbT, dr, dg, db);
+ } else {
+ dr = r;
+ dg = g;
+ db = b;
+ da = a;
+ }
+
+ if (kPremul_SkAlphaType == kAlphaType) {
+ premultiply(dr, dg, db, da);
+ }
+
+ store(dst, src - 4, dr, dg, db, da, dstTables);
+ dst = SkTAddOffset<void>(dst, 4 * sizeOfDstPixel);
+ }
+
+ while (len > 0) {
+ Sk4f r, g, b, a;
+ load_1(src, r, g, b, a, srcTables);
+
+ Sk4f rgba;
+ if (kNone_ColorSpaceMatch == kCSM) {
+ transform_gamut_1(r, g, b, rXgXbX, rYgYbY, rZgZbZ, rgba);
+ translate_gamut_1(rTgTbT, rgba);
+ } else {
+ rgba = Sk4f(r[0], g[0], b[0], a[0]);
+ }
+
+ if (kPremul_SkAlphaType == kAlphaType) {
+ premultiply_1(a, rgba);
+ }
+
+ store_1(dst, src, rgba, a, dstTables);
+
+ src += 1;
+ len -= 1;
+ dst = SkTAddOffset<void>(dst, sizeOfDstPixel);
+ }
+}
+
+enum SrcFormat {
+ kRGBA_8888_Linear_SrcFormat,
+ kRGBA_8888_Table_SrcFormat,
+ kBGRA_8888_Linear_SrcFormat,
+ kBGRA_8888_Table_SrcFormat,
+};
+
+enum DstFormat {
+ kRGBA_8888_Linear_DstFormat,
+ kRGBA_8888_SRGB_DstFormat,
+ kRGBA_8888_2Dot2_DstFormat,
+ kRGBA_8888_Table_DstFormat,
+ kBGRA_8888_Linear_DstFormat,
+ kBGRA_8888_SRGB_DstFormat,
+ kBGRA_8888_2Dot2_DstFormat,
+ kBGRA_8888_Table_DstFormat,
+ kF16_Linear_DstFormat,
+ kF32_Linear_DstFormat,
+};
+
+template <SrcFormat kSrc,
+ DstFormat kDst,
+ SkAlphaType kAlphaType,
+ ColorSpaceMatch kCSM>
+static void color_xform_RGBA(void* dst, const uint32_t* src, int len,
+ const float* const srcTables[3], const float matrix[16],
+ const uint8_t* const dstTables[3]) {
+ LoadFn load;
+ Load1Fn load_1;
+ static constexpr bool loadAlpha = (kPremul_SkAlphaType == kAlphaType) ||
+ (kF16_Linear_DstFormat == kDst) ||
+ (kF32_Linear_DstFormat == kDst);
+ switch (kSrc) {
+ case kRGBA_8888_Linear_SrcFormat:
+ if (loadAlpha) {
+ load = load_rgba_linear<kRGBA_Order>;
+ load_1 = load_rgba_linear_1<kRGBA_Order>;
+ } else {
+ load = load_rgb_linear<kRGBA_Order>;
+ load_1 = load_rgb_linear_1<kRGBA_Order>;
+ }
+ break;
+ case kRGBA_8888_Table_SrcFormat:
+ if (loadAlpha) {
+ load = load_rgba_from_tables<kRGBA_Order>;
+ load_1 = load_rgba_from_tables_1<kRGBA_Order>;
+ } else {
+ load = load_rgb_from_tables<kRGBA_Order>;
+ load_1 = load_rgb_from_tables_1<kRGBA_Order>;
+ }
+ break;
+ case kBGRA_8888_Linear_SrcFormat:
+ if (loadAlpha) {
+ load = load_rgba_linear<kBGRA_Order>;
+ load_1 = load_rgba_linear_1<kBGRA_Order>;
+ } else {
+ load = load_rgb_linear<kBGRA_Order>;
+ load_1 = load_rgb_linear_1<kBGRA_Order>;
+ }
+ break;
+ case kBGRA_8888_Table_SrcFormat:
+ if (loadAlpha) {
+ load = load_rgba_from_tables<kBGRA_Order>;
+ load_1 = load_rgba_from_tables_1<kBGRA_Order>;
+ } else {
+ load = load_rgb_from_tables<kBGRA_Order>;
+ load_1 = load_rgb_from_tables_1<kBGRA_Order>;
+ }
+ break;
+ }
+
+ StoreFn store;
+ Store1Fn store_1;
+ size_t sizeOfDstPixel;
+ switch (kDst) {
+ case kRGBA_8888_Linear_DstFormat:
+ store = store_linear<kRGBA_Order>;
+ store_1 = store_linear_1<kRGBA_Order>;
+ sizeOfDstPixel = 4;
+ break;
+ case kRGBA_8888_SRGB_DstFormat:
+ store = store_srgb<kRGBA_Order>;
+ store_1 = store_srgb_1<kRGBA_Order>;
+ sizeOfDstPixel = 4;
+ break;
+ case kRGBA_8888_2Dot2_DstFormat:
+ store = store_2dot2<kRGBA_Order>;
+ store_1 = store_2dot2_1<kRGBA_Order>;
+ sizeOfDstPixel = 4;
+ break;
+ case kRGBA_8888_Table_DstFormat:
+ store = store_generic<kRGBA_Order>;
+ store_1 = store_generic_1<kRGBA_Order>;
+ sizeOfDstPixel = 4;
+ break;
+ case kBGRA_8888_Linear_DstFormat:
+ store = store_linear<kBGRA_Order>;
+ store_1 = store_linear_1<kBGRA_Order>;
+ sizeOfDstPixel = 4;
+ break;
+ case kBGRA_8888_SRGB_DstFormat:
+ store = store_srgb<kBGRA_Order>;
+ store_1 = store_srgb_1<kBGRA_Order>;
+ sizeOfDstPixel = 4;
+ break;
+ case kBGRA_8888_2Dot2_DstFormat:
+ store = store_2dot2<kBGRA_Order>;
+ store_1 = store_2dot2_1<kBGRA_Order>;
+ sizeOfDstPixel = 4;
+ break;
+ case kBGRA_8888_Table_DstFormat:
+ store = store_generic<kBGRA_Order>;
+ store_1 = store_generic_1<kBGRA_Order>;
+ sizeOfDstPixel = 4;
+ break;
+ case kF16_Linear_DstFormat:
+ store = (kOpaque_SkAlphaType == kAlphaType) ? store_f16_opaque<kRGBA_Order> :
+ store_f16<kRGBA_Order>;
+ store_1 = (kOpaque_SkAlphaType == kAlphaType) ? store_f16_1_opaque<kRGBA_Order> :
+ store_f16_1<kRGBA_Order>;
+ sizeOfDstPixel = 8;
+ break;
+ case kF32_Linear_DstFormat:
+ store = store_f32<kRGBA_Order>;
+ store_1 = store_f32_1<kRGBA_Order>;
+ sizeOfDstPixel = 16;
+ break;
+ }
+
+ do_color_xform<kAlphaType, kCSM>
+ (dst, src, len, srcTables, matrix, dstTables, load, load_1, store, store_1,
+ sizeOfDstPixel);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static inline int num_tables(SkColorSpace* space) {
+ switch (as_CSB(space)->gammaNamed()) {
+ case kSRGB_SkGammaNamed:
+ case k2Dot2Curve_SkGammaNamed:
+ case kLinear_SkGammaNamed:
+ return 0;
+ default: {
+ const SkGammas* gammas = as_CSB(space)->gammas();
+ SkASSERT(gammas);
+
+ bool gammasAreMatching = (gammas->type(0) == gammas->type(1)) &&
+ (gammas->data(0) == gammas->data(1)) &&
+ (gammas->type(0) == gammas->type(2)) &&
+ (gammas->data(0) == gammas->data(2));
+
+ // It's likely that each component will have the same gamma. In this case,
+ // we only need to build one table.
+ return gammasAreMatching ? 1 : 3;
+ }
+ }
+}
+
+template <SrcGamma kSrc, DstGamma kDst, ColorSpaceMatch kCSM>
+SkColorSpaceXform_Base<kSrc, kDst, kCSM>
+::SkColorSpaceXform_Base(SkColorSpace* srcSpace, const SkMatrix44& srcToDst, SkColorSpace* dstSpace)
+ : fColorLUT(sk_ref_sp((SkColorLookUpTable*) as_CSB(srcSpace)->colorLUT()))
+{
+ srcToDst.asColMajorf(fSrcToDst);
+
+ const int numSrcTables = num_tables(srcSpace);
+ const int numDstTables = num_tables(dstSpace);
+ const size_t srcTableBytes = numSrcTables * 256 * sizeof(float);
+ const size_t dstTableBytes = numDstTables * kDstGammaTableSize * sizeof(uint8_t);
+ fStorage.reset(srcTableBytes + dstTableBytes);
+ float* srcStorage = (float*) fStorage.get();
+ uint8_t* dstStorage = SkTAddOffset<uint8_t>(fStorage.get(), srcTableBytes);
+
+ const bool srcGammasAreMatching = (1 >= numSrcTables);
+ const bool dstGammasAreMatching = (1 >= numDstTables);
+ build_gamma_tables(fSrcGammaTables, srcStorage, 256, srcSpace, kToLinear, srcGammasAreMatching);
+ build_gamma_tables(fDstGammaTables, dstStorage, kDstGammaTableSize, dstSpace, kFromLinear,
+ dstGammasAreMatching);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+template <SrcFormat kSrc, DstFormat kDst, ColorSpaceMatch kCSM>
+static inline void apply_set_alpha(void* dst, const uint32_t* src, int len, SkAlphaType alphaType,
+ const float* const srcTables[3], const float matrix[16],
+ const uint8_t* const dstTables[3]) {
+ switch (alphaType) {
+ case kOpaque_SkAlphaType:
+ return color_xform_RGBA<kSrc, kDst, kOpaque_SkAlphaType, kCSM>
+ (dst, src, len, srcTables, matrix, dstTables);
+ case kPremul_SkAlphaType:
+ return color_xform_RGBA<kSrc, kDst, kPremul_SkAlphaType, kCSM>
+ (dst, src, len, srcTables, matrix, dstTables);
+ case kUnpremul_SkAlphaType:
+ return color_xform_RGBA<kSrc, kDst, kUnpremul_SkAlphaType, kCSM>
+ (dst, src, len, srcTables, matrix, dstTables);
+ default:
+ SkASSERT(false);
+ return;
+ }
+}
+
+template <SrcGamma kSrc, DstFormat kDst, ColorSpaceMatch kCSM>
+static inline void apply_set_src(void* dst, const uint32_t* src, int len, SkAlphaType alphaType,
+ const float* const srcTables[3], const float matrix[16],
+ const uint8_t* const dstTables[3],
+ SkColorSpaceXform::ColorFormat srcColorFormat) {
+ switch (srcColorFormat) {
+ case SkColorSpaceXform::kRGBA_8888_ColorFormat:
+ switch (kSrc) {
+ case kLinear_SrcGamma:
+ return apply_set_alpha<kRGBA_8888_Linear_SrcFormat, kDst, kCSM>
+ (dst, src, len, alphaType, nullptr, matrix, dstTables);
+ case kTable_SrcGamma:
+ return apply_set_alpha<kRGBA_8888_Table_SrcFormat, kDst, kCSM>
+ (dst, src, len, alphaType, srcTables, matrix, dstTables);
+ }
+ case SkColorSpaceXform::kBGRA_8888_ColorFormat:
+ switch (kSrc) {
+ case kLinear_SrcGamma:
+ return apply_set_alpha<kBGRA_8888_Linear_SrcFormat, kDst, kCSM>
+ (dst, src, len, alphaType, nullptr, matrix, dstTables);
+ case kTable_SrcGamma:
+ return apply_set_alpha<kBGRA_8888_Table_SrcFormat, kDst, kCSM>
+ (dst, src, len, alphaType, srcTables, matrix, dstTables);
+ }
+ default:
+ SkASSERT(false);
+ }
+}
+
+template <SrcGamma kSrc, DstGamma kDst, ColorSpaceMatch kCSM>
+void SkColorSpaceXform_Base<kSrc, kDst, kCSM>
+::apply(void* dst, const uint32_t* src, int len, ColorFormat dstColorFormat,
+ ColorFormat srcColorFormat, SkAlphaType alphaType)
+const
+{
+ if (kFull_ColorSpaceMatch == kCSM) {
+ switch (alphaType) {
+ case kPremul_SkAlphaType:
+ // We can't skip the xform since we need to perform a premultiply in the
+ // linear space.
+ break;
+ default:
+ switch (dstColorFormat) {
+ case kRGBA_8888_ColorFormat:
+ return (void) memcpy(dst, src, len * sizeof(uint32_t));
+ case kBGRA_8888_ColorFormat:
+ return SkOpts::RGBA_to_BGRA((uint32_t*) dst, src, len);
+ case kRGBA_F16_ColorFormat:
+ case kRGBA_F32_ColorFormat:
+ // There's still work to do to xform to linear floats.
+ break;
+ default:
+ SkASSERT(false);
+ return;
+ }
+ }
+ }
+
+#if defined(GOOGLE3)
+ // Stack frame size is limited in GOOGLE3.
+ SkAutoSMalloc<256 * sizeof(uint32_t)> storage;
+#else
+ SkAutoSMalloc<1024 * sizeof(uint32_t)> storage;
+#endif
+ if (fColorLUT) {
+ size_t storageBytes = len * sizeof(uint32_t);
+ storage.reset(storageBytes);
+ handle_color_lut((uint32_t*) storage.get(), src, len, fColorLUT.get());
+ src = (const uint32_t*) storage.get();
+ }
+
+ switch (dstColorFormat) {
+ case kRGBA_8888_ColorFormat:
+ switch (kDst) {
+ case kLinear_DstGamma:
+ return apply_set_src<kSrc, kRGBA_8888_Linear_DstFormat, kCSM>
+ (dst, src, len, alphaType, fSrcGammaTables, fSrcToDst, nullptr,
+ srcColorFormat);
+ case kSRGB_DstGamma:
+ return apply_set_src<kSrc, kRGBA_8888_SRGB_DstFormat, kCSM>
+ (dst, src, len, alphaType, fSrcGammaTables, fSrcToDst, nullptr,
+ srcColorFormat);
+ case k2Dot2_DstGamma:
+ return apply_set_src<kSrc, kRGBA_8888_2Dot2_DstFormat, kCSM>
+ (dst, src, len, alphaType, fSrcGammaTables, fSrcToDst, nullptr,
+ srcColorFormat);
+ case kTable_DstGamma:
+ return apply_set_src<kSrc, kRGBA_8888_Table_DstFormat, kCSM>
+ (dst, src, len, alphaType, fSrcGammaTables, fSrcToDst, fDstGammaTables,
+ srcColorFormat);
+ }
+ case kBGRA_8888_ColorFormat:
+ switch (kDst) {
+ case kLinear_DstGamma:
+ return apply_set_src<kSrc, kBGRA_8888_Linear_DstFormat, kCSM>
+ (dst, src, len, alphaType, fSrcGammaTables, fSrcToDst, nullptr,
+ srcColorFormat);
+ case kSRGB_DstGamma:
+ return apply_set_src<kSrc, kBGRA_8888_SRGB_DstFormat, kCSM>
+ (dst, src, len, alphaType, fSrcGammaTables, fSrcToDst, nullptr,
+ srcColorFormat);
+ case k2Dot2_DstGamma:
+ return apply_set_src<kSrc, kBGRA_8888_2Dot2_DstFormat, kCSM>
+ (dst, src, len, alphaType, fSrcGammaTables, fSrcToDst, nullptr,
+ srcColorFormat);
+ case kTable_DstGamma:
+ return apply_set_src<kSrc, kBGRA_8888_Table_DstFormat, kCSM>
+ (dst, src, len, alphaType, fSrcGammaTables, fSrcToDst, fDstGammaTables,
+ srcColorFormat);
+ }
+ case kRGBA_F16_ColorFormat:
+ switch (kDst) {
+ case kLinear_DstGamma:
+ return apply_set_src<kSrc, kF16_Linear_DstFormat, kCSM>
+ (dst, src, len, alphaType, fSrcGammaTables, fSrcToDst, nullptr,
+ srcColorFormat);
+ default:
+ SkASSERT(false);
+ return;
+ }
+ case kRGBA_F32_ColorFormat:
+ switch (kDst) {
+ case kLinear_DstGamma:
+ return apply_set_src<kSrc, kF32_Linear_DstFormat, kCSM>
+ (dst, src, len, alphaType, fSrcGammaTables, fSrcToDst, nullptr,
+ srcColorFormat);
+ default:
+ SkASSERT(false);
+ return;
+ }
+ default:
+ SkASSERT(false);
+ return;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<SkColorSpaceXform> SlowIdentityXform(SkColorSpace* space) {
+ return std::unique_ptr<SkColorSpaceXform>(new SkColorSpaceXform_Base
+ <kTable_SrcGamma, kTable_DstGamma, kNone_ColorSpaceMatch>
+ (space, SkMatrix::I(), space));
+}
diff --git a/gfx/skia/skia/src/core/SkColorSpaceXform.h b/gfx/skia/skia/src/core/SkColorSpaceXform.h
new file mode 100644
index 000000000..bb99071fe
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorSpaceXform.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorSpaceXform_DEFINED
+#define SkColorSpaceXform_DEFINED
+
+#include "SkColorSpace.h"
+#include "SkColorSpace_Base.h"
+#include "SkImageInfo.h"
+
+class SkColorSpaceXform : SkNoncopyable {
+public:
+
+ /**
+ * Create an object to handle color space conversions.
+ *
+ * @param srcSpace The encoded color space.
+ * @param dstSpace The destination color space.
+ *
+ */
+ static std::unique_ptr<SkColorSpaceXform> New(SkColorSpace* srcSpace, SkColorSpace* dstSpace);
+
+ enum ColorFormat : uint8_t {
+ kRGBA_8888_ColorFormat,
+ kBGRA_8888_ColorFormat,
+ kRGBA_F16_ColorFormat,
+ kRGBA_F32_ColorFormat,
+ };
+
+ /**
+ * Apply the color conversion to a |src| buffer, storing the output in the |dst| buffer.
+ *
+ * @param dst Stored in the format described by |dstColorFormat|
+ * @param src Stored in the format described by |srcColorFormat|
+ * @param len Number of pixels in the buffers
+ * @param dstColorFormat Describes color format of |dst|
+ * @param srcColorFormat Describes color format of |src|
+ * Must be kRGBA_8888 or kBGRA_8888
+ * @param alphaType Describes alpha properties of the |dst| (and |src|)
+ * kUnpremul preserves input alpha values
+ * kPremul performs a premultiplication and also preserves alpha values
+ * kOpaque optimization hint, |dst| alphas set to 1
+ *
+ */
+ virtual void apply(void* dst, const uint32_t* src, int len, ColorFormat dstColorFormat,
+ ColorFormat srcColorFormat, SkAlphaType alphaType) const = 0;
+
+ virtual ~SkColorSpaceXform() {}
+};
+
+enum SrcGamma {
+ kLinear_SrcGamma,
+ kTable_SrcGamma,
+};
+
+enum DstGamma {
+ kLinear_DstGamma,
+ kSRGB_DstGamma,
+ k2Dot2_DstGamma,
+ kTable_DstGamma,
+};
+
+enum ColorSpaceMatch {
+ kNone_ColorSpaceMatch,
+ kGamut_ColorSpaceMatch,
+ kFull_ColorSpaceMatch,
+};
+
+template <SrcGamma kSrc, DstGamma kDst, ColorSpaceMatch kCSM>
+class SkColorSpaceXform_Base : public SkColorSpaceXform {
+public:
+
+ void apply(void* dst, const uint32_t* src, int len, ColorFormat dstColorFormat,
+ ColorFormat srcColorFormat, SkAlphaType alphaType) const override;
+
+ static constexpr int kDstGammaTableSize = 1024;
+
+private:
+ SkColorSpaceXform_Base(SkColorSpace* srcSpace, const SkMatrix44& srcToDst,
+ SkColorSpace* dstSpace);
+
+ sk_sp<SkColorLookUpTable> fColorLUT;
+
+ // Contain pointers into storage or pointers into precomputed tables.
+ const float* fSrcGammaTables[3];
+ const uint8_t* fDstGammaTables[3];
+ SkAutoMalloc fStorage;
+
+ float fSrcToDst[16];
+
+ friend class SkColorSpaceXform;
+ friend std::unique_ptr<SkColorSpaceXform> SlowIdentityXform(SkColorSpace* space);
+};
+
+// For testing. Bypasses opts for when src and dst color spaces are equal.
+std::unique_ptr<SkColorSpaceXform> SlowIdentityXform(SkColorSpace* space);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkColorSpace_Base.h b/gfx/skia/skia/src/core/SkColorSpace_Base.h
new file mode 100644
index 000000000..07fa38325
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorSpace_Base.h
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorSpace_Base_DEFINED
+#define SkColorSpace_Base_DEFINED
+
+#include "SkColorSpace.h"
+#include "SkData.h"
+#include "SkOnce.h"
+#include "SkTemplates.h"
+
+enum SkGammaNamed : uint8_t {
+ kLinear_SkGammaNamed,
+ kSRGB_SkGammaNamed,
+ k2Dot2Curve_SkGammaNamed,
+ kNonStandard_SkGammaNamed,
+};
+
+struct SkGammas : SkRefCnt {
+
+ // There are four possible representations for gamma curves. kNone_Type is used
+ // as a placeholder until the struct is initialized. It is not a valid value.
+ enum class Type : uint8_t {
+ kNone_Type,
+ kNamed_Type,
+ kValue_Type,
+ kTable_Type,
+ kParam_Type,
+ };
+
+ // Contains information for a gamma table.
+ struct Table {
+ size_t fOffset;
+ int fSize;
+
+ const float* table(const SkGammas* base) const {
+ return SkTAddOffset<const float>(base, sizeof(SkGammas) + fOffset);
+ }
+ };
+
+ // Contains the parameters for a parametric curve.
+ struct Params {
+ // Y = (aX + b)^g + c for X >= d
+ // Y = eX + f otherwise
+ float fG;
+ float fA;
+ float fB;
+ float fC;
+ float fD;
+ float fE;
+ float fF;
+ };
+
+ // Contains the actual gamma curve information. Should be interpreted
+ // based on the type of the gamma curve.
+ union Data {
+ Data()
+ : fTable{ 0, 0 }
+ {}
+
+ inline bool operator==(const Data& that) const {
+ return this->fTable.fOffset == that.fTable.fOffset &&
+ this->fTable.fSize == that.fTable.fSize;
+ }
+
+ SkGammaNamed fNamed;
+ float fValue;
+ Table fTable;
+ size_t fParamOffset;
+
+ const Params& params(const SkGammas* base) const {
+ return *SkTAddOffset<const Params>(base, sizeof(SkGammas) + fParamOffset);
+ }
+ };
+
+ bool isNamed(int i) const {
+ return Type::kNamed_Type == this->type(i);
+ }
+
+ bool isValue(int i) const {
+ return Type::kValue_Type == this->type(i);
+ }
+
+ bool isTable(int i) const {
+ return Type::kTable_Type == this->type(i);
+ }
+
+ bool isParametric(int i) const {
+ return Type::kParam_Type == this->type(i);
+ }
+
+ const Data& data(int i) const {
+ switch (i) {
+ case 0:
+ return fRedData;
+ case 1:
+ return fGreenData;
+ case 2:
+ return fBlueData;
+ default:
+ SkASSERT(false);
+ return fRedData;
+ }
+ }
+
+ const float* table(int i) const {
+ SkASSERT(isTable(i));
+ return this->data(i).fTable.table(this);
+ }
+
+ const Params& params(int i) const {
+ SkASSERT(isParametric(i));
+ return this->data(i).params(this);
+ }
+
+ Type type(int i) const {
+ switch (i) {
+ case 0:
+ return fRedType;
+ case 1:
+ return fGreenType;
+ case 2:
+ return fBlueType;
+ default:
+ SkASSERT(false);
+ return fRedType;
+ }
+ }
+
+ SkGammas()
+ : fRedType(Type::kNone_Type)
+ , fGreenType(Type::kNone_Type)
+ , fBlueType(Type::kNone_Type)
+ {}
+
+ // These fields should only be modified when initializing the struct.
+ Data fRedData;
+ Data fGreenData;
+ Data fBlueData;
+ Type fRedType;
+ Type fGreenType;
+ Type fBlueType;
+
+ // Objects of this type are sometimes created in a custom fashion using
+ // sk_malloc_throw and therefore must be sk_freed. We overload new to
+ // also call sk_malloc_throw so that memory can be unconditionally released
+ // using sk_free in an overloaded delete. Overloading regular new means we
+ // must also overload placement new.
+ void* operator new(size_t size) { return sk_malloc_throw(size); }
+ void* operator new(size_t, void* p) { return p; }
+ void operator delete(void* p) { sk_free(p); }
+};
+
+struct SkColorLookUpTable : public SkRefCnt {
+ static constexpr uint8_t kOutputChannels = 3;
+
+ uint8_t fInputChannels;
+ uint8_t fGridPoints[3];
+
+ const float* table() const {
+ return SkTAddOffset<const float>(this, sizeof(SkColorLookUpTable));
+ }
+
+ SkColorLookUpTable(uint8_t inputChannels, uint8_t gridPoints[3])
+ : fInputChannels(inputChannels)
+ {
+ SkASSERT(3 == inputChannels);
+ memcpy(fGridPoints, gridPoints, 3 * sizeof(uint8_t));
+ }
+
+ // Objects of this type are created in a custom fashion using sk_malloc_throw
+ // and therefore must be sk_freed.
+ void* operator new(size_t size) = delete;
+ void* operator new(size_t, void* p) { return p; }
+ void operator delete(void* p) { sk_free(p); }
+};
+
+class SkColorSpace_Base : public SkColorSpace {
+public:
+
+ static sk_sp<SkColorSpace> NewRGB(const float gammas[3], const SkMatrix44& toXYZD50);
+
+ SkGammaNamed gammaNamed() const { return fGammaNamed; }
+ const SkGammas* gammas() const { return fGammas.get(); }
+
+ const SkColorLookUpTable* colorLUT() const { return fColorLUT.get(); }
+
+ const SkMatrix44& toXYZD50() const { return fToXYZD50; }
+ const SkMatrix44& fromXYZD50() const;
+
+private:
+
+ /**
+ * FIXME (msarett):
+ * Hiding this function until we can determine if we need it. Known issues include:
+ * Only writes 3x3 matrices
+ * Only writes float gammas
+ * Rejected by some parsers because the "profile description" is empty
+ */
+ sk_sp<SkData> writeToICC() const;
+
+ static sk_sp<SkColorSpace> NewRGB(SkGammaNamed gammaNamed, const SkMatrix44& toXYZD50);
+
+ SkColorSpace_Base(SkGammaNamed gammaNamed, const SkMatrix44& toXYZ);
+
+ SkColorSpace_Base(sk_sp<SkColorLookUpTable> colorLUT, SkGammaNamed gammaNamed,
+ sk_sp<SkGammas> gammas, const SkMatrix44& toXYZ, sk_sp<SkData> profileData);
+
+ sk_sp<SkColorLookUpTable> fColorLUT;
+ const SkGammaNamed fGammaNamed;
+ sk_sp<SkGammas> fGammas;
+ sk_sp<SkData> fProfileData;
+
+ const SkMatrix44 fToXYZD50;
+ mutable SkMatrix44 fFromXYZD50;
+ mutable SkOnce fFromXYZOnce;
+
+ friend class SkColorSpace;
+ friend class ColorSpaceXformTest;
+ friend class ColorSpaceTest;
+ typedef SkColorSpace INHERITED;
+};
+
+static inline SkColorSpace_Base* as_CSB(SkColorSpace* colorSpace) {
+ return static_cast<SkColorSpace_Base*>(colorSpace);
+}
+
+static inline const SkColorSpace_Base* as_CSB(const SkColorSpace* colorSpace) {
+ return static_cast<const SkColorSpace_Base*>(colorSpace);
+}
+
+static inline SkColorSpace_Base* as_CSB(const sk_sp<SkColorSpace>& colorSpace) {
+ return static_cast<SkColorSpace_Base*>(colorSpace.get());
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkColorSpace_ICC.cpp b/gfx/skia/skia/src/core/SkColorSpace_ICC.cpp
new file mode 100644
index 000000000..4ef9f2b0a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorSpace_ICC.cpp
@@ -0,0 +1,1338 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkColorSpace.h"
+#include "SkColorSpace_Base.h"
+#include "SkColorSpacePriv.h"
+#include "SkEndian.h"
+#include "SkFixed.h"
+#include "SkTemplates.h"
+
+#define return_if_false(pred, msg) \
+ do { \
+ if (!(pred)) { \
+ SkColorSpacePrintf("Invalid ICC Profile: %s.\n", (msg)); \
+ return false; \
+ } \
+ } while (0)
+
+#define return_null(msg) \
+ do { \
+ SkColorSpacePrintf("Invalid ICC Profile: %s.\n", (msg)); \
+ return nullptr; \
+ } while (0)
+
+static uint16_t read_big_endian_u16(const uint8_t* ptr) {
+ return ptr[0] << 8 | ptr[1];
+}
+
+static uint32_t read_big_endian_u32(const uint8_t* ptr) {
+ return ptr[0] << 24 | ptr[1] << 16 | ptr[2] << 8 | ptr[3];
+}
+
+static int32_t read_big_endian_i32(const uint8_t* ptr) {
+ return (int32_t) read_big_endian_u32(ptr);
+}
+
+// This is equal to the header size according to the ICC specification (128)
+// plus the size of the tag count (4). We include the tag count since we
+// always require it to be present anyway.
+static constexpr size_t kICCHeaderSize = 132;
+
+// Contains a signature (4), offset (4), and size (4).
+static constexpr size_t kICCTagTableEntrySize = 12;
+
+static constexpr uint32_t kRGB_ColorSpace = SkSetFourByteTag('R', 'G', 'B', ' ');
+static constexpr uint32_t kDisplay_Profile = SkSetFourByteTag('m', 'n', 't', 'r');
+static constexpr uint32_t kInput_Profile = SkSetFourByteTag('s', 'c', 'n', 'r');
+static constexpr uint32_t kOutput_Profile = SkSetFourByteTag('p', 'r', 't', 'r');
+static constexpr uint32_t kColorSpace_Profile = SkSetFourByteTag('s', 'p', 'a', 'c');
+static constexpr uint32_t kXYZ_PCSSpace = SkSetFourByteTag('X', 'Y', 'Z', ' ');
+static constexpr uint32_t kACSP_Signature = SkSetFourByteTag('a', 'c', 's', 'p');
+
+struct ICCProfileHeader {
+ uint32_t fSize;
+
+ // No reason to care about the preferred color management module (ex: Adobe, Apple, etc.).
+ // We're always going to use this one.
+ uint32_t fCMMType_ignored;
+
+ uint32_t fVersion;
+ uint32_t fProfileClass;
+ uint32_t fInputColorSpace;
+ uint32_t fPCS;
+ uint32_t fDateTime_ignored[3];
+ uint32_t fSignature;
+
+ // Indicates the platform that this profile was created for (ex: Apple, Microsoft). This
+ // doesn't really matter to us.
+ uint32_t fPlatformTarget_ignored;
+
+ // Flags can indicate:
+ // (1) Whether this profile was embedded in a file. This flag is consistently wrong.
+ // Ex: The profile came from a file but indicates that it did not.
+ // (2) Whether we are allowed to use the profile independently of the color data. If set,
+ // this may allow us to use the embedded profile for testing separate from the original
+ // image.
+ uint32_t fFlags_ignored;
+
+ // We support many output devices. It doesn't make sense to think about the attributes of
+ // the device in the context of the image profile.
+ uint32_t fDeviceManufacturer_ignored;
+ uint32_t fDeviceModel_ignored;
+ uint32_t fDeviceAttributes_ignored[2];
+
+ uint32_t fRenderingIntent;
+ int32_t fIlluminantXYZ[3];
+
+ // We don't care who created the profile.
+ uint32_t fCreator_ignored;
+
+ // This is an MD5 checksum. Could be useful for checking if profiles are equal.
+ uint32_t fProfileId_ignored[4];
+
+ // Reserved for future use.
+ uint32_t fReserved_ignored[7];
+
+ uint32_t fTagCount;
+
+ void init(const uint8_t* src, size_t len) {
+ SkASSERT(kICCHeaderSize == sizeof(*this));
+
+ uint32_t* dst = (uint32_t*) this;
+ for (uint32_t i = 0; i < kICCHeaderSize / 4; i++, src+=4) {
+ dst[i] = read_big_endian_u32(src);
+ }
+ }
+
+ bool valid() const {
+ return_if_false(fSize >= kICCHeaderSize, "Size is too small");
+
+ uint8_t majorVersion = fVersion >> 24;
+ return_if_false(majorVersion <= 4, "Unsupported version");
+
+ // These are the four basic classes of profiles that we might expect to see embedded
+ // in images. Additional classes exist, but they generally are used as a convenient
+ // way for CMMs to store calculated transforms.
+ return_if_false(fProfileClass == kDisplay_Profile ||
+ fProfileClass == kInput_Profile ||
+ fProfileClass == kOutput_Profile ||
+ fProfileClass == kColorSpace_Profile,
+ "Unsupported profile");
+
+ // TODO (msarett):
+ // All the profiles we've tested so far use RGB as the input color space.
+ return_if_false(fInputColorSpace == kRGB_ColorSpace, "Unsupported color space");
+
+ // TODO (msarett):
+ // All the profiles we've tested so far use XYZ as the profile connection space.
+ return_if_false(fPCS == kXYZ_PCSSpace, "Unsupported PCS space");
+
+ return_if_false(fSignature == kACSP_Signature, "Bad signature");
+
+ // TODO (msarett):
+ // Should we treat different rendering intents differently?
+ // Valid rendering intents include kPerceptual (0), kRelative (1),
+ // kSaturation (2), and kAbsolute (3).
+ if (fRenderingIntent > 3) {
+ // Warn rather than fail here. Occasionally, we see perfectly
+ // normal profiles with wacky rendering intents.
+ SkColorSpacePrintf("Warning, bad rendering intent.\n");
+ }
+
+ return_if_false(color_space_almost_equal(SkFixedToFloat(fIlluminantXYZ[0]), 0.96420f) &&
+ color_space_almost_equal(SkFixedToFloat(fIlluminantXYZ[1]), 1.00000f) &&
+ color_space_almost_equal(SkFixedToFloat(fIlluminantXYZ[2]), 0.82491f),
+ "Illuminant must be D50");
+
+ return_if_false(fTagCount <= 100, "Too many tags");
+
+ return true;
+ }
+};
+
+template <class T>
+static bool safe_add(T arg1, T arg2, size_t* result) {
+ SkASSERT(arg1 >= 0);
+ SkASSERT(arg2 >= 0);
+ if (arg1 >= 0 && arg2 <= std::numeric_limits<T>::max() - arg1) {
+ T sum = arg1 + arg2;
+ if (sum <= std::numeric_limits<size_t>::max()) {
+ *result = static_cast<size_t>(sum);
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool safe_mul(uint32_t arg1, uint32_t arg2, uint32_t* result) {
+ uint64_t product64 = (uint64_t) arg1 * (uint64_t) arg2;
+ uint32_t product32 = (uint32_t) product64;
+ if (product32 != product64) {
+ return false;
+ }
+
+ *result = product32;
+ return true;
+}
+
+struct ICCTag {
+ uint32_t fSignature;
+ uint32_t fOffset;
+ uint32_t fLength;
+
+ const uint8_t* init(const uint8_t* src) {
+ fSignature = read_big_endian_u32(src);
+ fOffset = read_big_endian_u32(src + 4);
+ fLength = read_big_endian_u32(src + 8);
+ return src + 12;
+ }
+
+ bool valid(size_t len) {
+ size_t tagEnd;
+ return_if_false(safe_add(fOffset, fLength, &tagEnd),
+ "Tag too large, overflows integer addition");
+ return_if_false(tagEnd <= len, "Tag too large for ICC profile");
+ return true;
+ }
+
+ const uint8_t* addr(const uint8_t* src) const {
+ return src + fOffset;
+ }
+
+ static const ICCTag* Find(const ICCTag tags[], int count, uint32_t signature) {
+ for (int i = 0; i < count; ++i) {
+ if (tags[i].fSignature == signature) {
+ return &tags[i];
+ }
+ }
+ return nullptr;
+ }
+};
+
+static constexpr uint32_t kTAG_rXYZ = SkSetFourByteTag('r', 'X', 'Y', 'Z');
+static constexpr uint32_t kTAG_gXYZ = SkSetFourByteTag('g', 'X', 'Y', 'Z');
+static constexpr uint32_t kTAG_bXYZ = SkSetFourByteTag('b', 'X', 'Y', 'Z');
+static constexpr uint32_t kTAG_rTRC = SkSetFourByteTag('r', 'T', 'R', 'C');
+static constexpr uint32_t kTAG_gTRC = SkSetFourByteTag('g', 'T', 'R', 'C');
+static constexpr uint32_t kTAG_bTRC = SkSetFourByteTag('b', 'T', 'R', 'C');
+static constexpr uint32_t kTAG_A2B0 = SkSetFourByteTag('A', '2', 'B', '0');
+
+static bool load_xyz(float dst[3], const uint8_t* src, size_t len) {
+ if (len < 20) {
+ SkColorSpacePrintf("XYZ tag is too small (%d bytes)", len);
+ return false;
+ }
+
+ dst[0] = SkFixedToFloat(read_big_endian_i32(src + 8));
+ dst[1] = SkFixedToFloat(read_big_endian_i32(src + 12));
+ dst[2] = SkFixedToFloat(read_big_endian_i32(src + 16));
+ SkColorSpacePrintf("XYZ %g %g %g\n", dst[0], dst[1], dst[2]);
+ return true;
+}
+
+static constexpr uint32_t kTAG_CurveType = SkSetFourByteTag('c', 'u', 'r', 'v');
+static constexpr uint32_t kTAG_ParaCurveType = SkSetFourByteTag('p', 'a', 'r', 'a');
+
+static SkGammas::Type set_gamma_value(SkGammas::Data* data, float value) {
+ if (color_space_almost_equal(2.2f, value)) {
+ data->fNamed = k2Dot2Curve_SkGammaNamed;
+ return SkGammas::Type::kNamed_Type;
+ }
+
+ if (color_space_almost_equal(1.0f, value)) {
+ data->fNamed = kLinear_SkGammaNamed;
+ return SkGammas::Type::kNamed_Type;
+ }
+
+ if (color_space_almost_equal(0.0f, value)) {
+ return SkGammas::Type::kNone_Type;
+ }
+
+ data->fValue = value;
+ return SkGammas::Type::kValue_Type;
+}
+
+static float read_big_endian_16_dot_16(const uint8_t buf[4]) {
+ // It just so happens that SkFixed is also 16.16!
+ return SkFixedToFloat(read_big_endian_i32(buf));
+}
+
+/**
+ * @param outData Set to the appropriate value on success. If we have table or
+ * parametric gamma, it is the responsibility of the caller to set
+ * fOffset.
+ * @param outParams If this is a parametric gamma, this is set to the appropriate
+ * parameters on success.
+ * @param outTagBytes Will be set to the length of the tag on success.
+ * @src Pointer to tag data.
+ * @len Length of tag data in bytes.
+ *
+ * @return kNone_Type on failure, otherwise the type of the gamma tag.
+ */
+static SkGammas::Type parse_gamma(SkGammas::Data* outData, SkGammas::Params* outParams,
+ size_t* outTagBytes, const uint8_t* src, size_t len) {
+ if (len < 12) {
+ SkColorSpacePrintf("gamma tag is too small (%d bytes)", len);
+ return SkGammas::Type::kNone_Type;
+ }
+
+ // In the case of consecutive gamma tags, we need to count the number of bytes in the
+ // tag, so that we can move on to the next tag.
+ size_t tagBytes;
+
+ uint32_t type = read_big_endian_u32(src);
+ // Bytes 4-7 are reserved and should be set to zero.
+ switch (type) {
+ case kTAG_CurveType: {
+ uint32_t count = read_big_endian_u32(src + 8);
+
+ // tagBytes = 12 + 2 * count
+ // We need to do safe addition here to avoid integer overflow.
+ if (!safe_add(count, count, &tagBytes) ||
+ !safe_add((size_t) 12, tagBytes, &tagBytes))
+ {
+ SkColorSpacePrintf("Invalid gamma count");
+ return SkGammas::Type::kNone_Type;
+ }
+
+ if (len < tagBytes) {
+ SkColorSpacePrintf("gamma tag is too small (%d bytes)", len);
+ return SkGammas::Type::kNone_Type;
+ }
+ *outTagBytes = tagBytes;
+
+ if (0 == count) {
+ // Some tags require a gamma curve, but the author doesn't actually want
+ // to transform the data. In this case, it is common to see a curve with
+ // a count of 0.
+ outData->fNamed = kLinear_SkGammaNamed;
+ return SkGammas::Type::kNamed_Type;
+ }
+
+ const uint16_t* table = (const uint16_t*) (src + 12);
+ if (1 == count) {
+ // The table entry is the gamma (with a bias of 256).
+ float value = (read_big_endian_u16((const uint8_t*) table)) / 256.0f;
+ SkColorSpacePrintf("gamma %g\n", value);
+
+ return set_gamma_value(outData, value);
+ }
+
+ // Check for frequently occurring sRGB curves.
+ // We do this by sampling a few values and see if they match our expectation.
+ // A more robust solution would be to compare each value in this curve against
+ // an sRGB curve to see if we remain below an error threshold. At this time,
+ // we haven't seen any images in the wild that make this kind of
+ // calculation necessary. We encounter identical gamma curves over and
+ // over again, but relatively few variations.
+ if (1024 == count) {
+ // The magic values were chosen because they match both the very common
+ // HP sRGB gamma table and the less common Canon sRGB gamma table (which use
+ // different rounding rules).
+ if (0 == read_big_endian_u16((const uint8_t*) &table[0]) &&
+ 3366 == read_big_endian_u16((const uint8_t*) &table[257]) &&
+ 14116 == read_big_endian_u16((const uint8_t*) &table[513]) &&
+ 34318 == read_big_endian_u16((const uint8_t*) &table[768]) &&
+ 65535 == read_big_endian_u16((const uint8_t*) &table[1023])) {
+ outData->fNamed = kSRGB_SkGammaNamed;
+ return SkGammas::Type::kNamed_Type;
+ }
+ }
+
+ if (26 == count) {
+ // The magic values were chosen because they match a very common LCMS sRGB
+ // gamma table.
+ if (0 == read_big_endian_u16((const uint8_t*) &table[0]) &&
+ 3062 == read_big_endian_u16((const uint8_t*) &table[6]) &&
+ 12824 == read_big_endian_u16((const uint8_t*) &table[12]) &&
+ 31237 == read_big_endian_u16((const uint8_t*) &table[18]) &&
+ 65535 == read_big_endian_u16((const uint8_t*) &table[25])) {
+ outData->fNamed = kSRGB_SkGammaNamed;
+ return SkGammas::Type::kNamed_Type;
+ }
+ }
+
+ if (4096 == count) {
+ // The magic values were chosen because they match Nikon, Epson, and
+ // LCMS sRGB gamma tables (all of which use different rounding rules).
+ if (0 == read_big_endian_u16((const uint8_t*) &table[0]) &&
+ 950 == read_big_endian_u16((const uint8_t*) &table[515]) &&
+ 3342 == read_big_endian_u16((const uint8_t*) &table[1025]) &&
+ 14079 == read_big_endian_u16((const uint8_t*) &table[2051]) &&
+ 65535 == read_big_endian_u16((const uint8_t*) &table[4095])) {
+ outData->fNamed = kSRGB_SkGammaNamed;
+ return SkGammas::Type::kNamed_Type;
+ }
+ }
+
+ // Otherwise, we will represent gamma with a table.
+ outData->fTable.fSize = count;
+ return SkGammas::Type::kTable_Type;
+ }
+ case kTAG_ParaCurveType: {
+ enum ParaCurveType {
+ kExponential_ParaCurveType = 0,
+ kGAB_ParaCurveType = 1,
+ kGABC_ParaCurveType = 2,
+ kGABDE_ParaCurveType = 3,
+ kGABCDEF_ParaCurveType = 4,
+ };
+
+ // Determine the format of the parametric curve tag.
+ uint16_t format = read_big_endian_u16(src + 8);
+ if (format > kGABCDEF_ParaCurveType) {
+ SkColorSpacePrintf("Unsupported gamma tag type %d\n", type);
+ return SkGammas::Type::kNone_Type;
+ }
+
+ if (kExponential_ParaCurveType == format) {
+ tagBytes = 12 + 4;
+ if (len < tagBytes) {
+ SkColorSpacePrintf("gamma tag is too small (%d bytes)", len);
+ return SkGammas::Type::kNone_Type;
+ }
+
+ // Y = X^g
+ float g = read_big_endian_16_dot_16(src + 12);
+
+ *outTagBytes = tagBytes;
+ return set_gamma_value(outData, g);
+ }
+
+ // Here's where the real parametric gammas start. There are many
+ // permutations of the same equations.
+ //
+ // Y = (aX + b)^g + c for X >= d
+ // Y = eX + f otherwise
+ //
+ // We will fill in with zeros as necessary to always match the above form.
+ if (len < 24) {
+ SkColorSpacePrintf("gamma tag is too small (%d bytes)", len);
+ return SkGammas::Type::kNone_Type;
+ }
+ float g = read_big_endian_16_dot_16(src + 12);
+ float a = read_big_endian_16_dot_16(src + 16);
+ float b = read_big_endian_16_dot_16(src + 20);
+ float c = 0.0f, d = 0.0f, e = 0.0f, f = 0.0f;
+ switch(format) {
+ case kGAB_ParaCurveType:
+ tagBytes = 12 + 12;
+
+ // Y = (aX + b)^g for X >= -b/a
+ // Y = 0 otherwise
+ d = -b / a;
+ break;
+ case kGABC_ParaCurveType:
+ tagBytes = 12 + 16;
+ if (len < tagBytes) {
+ SkColorSpacePrintf("gamma tag is too small (%d bytes)", len);
+ return SkGammas::Type::kNone_Type;
+ }
+
+ // Y = (aX + b)^g + c for X >= -b/a
+ // Y = c otherwise
+ c = read_big_endian_16_dot_16(src + 24);
+ d = -b / a;
+ f = c;
+ break;
+ case kGABDE_ParaCurveType:
+ tagBytes = 12 + 20;
+ if (len < tagBytes) {
+ SkColorSpacePrintf("gamma tag is too small (%d bytes)", len);
+ return SkGammas::Type::kNone_Type;
+ }
+
+ // Y = (aX + b)^g for X >= d
+ // Y = eX otherwise
+ d = read_big_endian_16_dot_16(src + 28);
+
+ // Not a bug! We define |e| to always be the coefficient on X in the
+ // second equation. The spec calls this |c| in this particular equation.
+ // We don't follow their convention because then |c| would have a
+ // different meaning in each of our cases.
+ e = read_big_endian_16_dot_16(src + 24);
+ break;
+ case kGABCDEF_ParaCurveType:
+ tagBytes = 12 + 28;
+ if (len < tagBytes) {
+ SkColorSpacePrintf("gamma tag is too small (%d bytes)", len);
+ return SkGammas::Type::kNone_Type;
+ }
+
+ // Y = (aX + b)^g + c for X >= d
+ // Y = eX + f otherwise
+ // NOTE: The ICC spec writes "cX" in place of "eX" but I think
+ // it's a typo.
+ c = read_big_endian_16_dot_16(src + 24);
+ d = read_big_endian_16_dot_16(src + 28);
+ e = read_big_endian_16_dot_16(src + 32);
+ f = read_big_endian_16_dot_16(src + 36);
+ break;
+ default:
+ SkASSERT(false);
+ return SkGammas::Type::kNone_Type;
+ }
+
+ // Recognize and simplify a very common parametric representation of sRGB gamma.
+ if (color_space_almost_equal(0.9479f, a) &&
+ color_space_almost_equal(0.0521f, b) &&
+ color_space_almost_equal(0.0000f, c) &&
+ color_space_almost_equal(0.0405f, d) &&
+ color_space_almost_equal(0.0774f, e) &&
+ color_space_almost_equal(0.0000f, f) &&
+ color_space_almost_equal(2.4000f, g)) {
+ outData->fNamed = kSRGB_SkGammaNamed;
+ return SkGammas::Type::kNamed_Type;
+ }
+
+ // Fail on invalid gammas.
+ if (SkScalarIsNaN(d)) {
+ return SkGammas::Type::kNone_Type;
+ }
+
+ if (d <= 0.0f) {
+ // Y = (aX + b)^g + c for always
+ if (0.0f == a || 0.0f == g) {
+ SkColorSpacePrintf("A or G is zero, constant gamma function "
+ "is nonsense");
+ return SkGammas::Type::kNone_Type;
+ }
+ }
+
+ if (d >= 1.0f) {
+ // Y = eX + f for always
+ if (0.0f == e) {
+ SkColorSpacePrintf("E is zero, constant gamma function is "
+ "nonsense");
+ return SkGammas::Type::kNone_Type;
+ }
+ }
+
+ if ((0.0f == a || 0.0f == g) && 0.0f == e) {
+ SkColorSpacePrintf("A or G, and E are zero, constant gamma function "
+ "is nonsense");
+ return SkGammas::Type::kNone_Type;
+ }
+
+ *outTagBytes = tagBytes;
+
+ outParams->fG = g;
+ outParams->fA = a;
+ outParams->fB = b;
+ outParams->fC = c;
+ outParams->fD = d;
+ outParams->fE = e;
+ outParams->fF = f;
+ return SkGammas::Type::kParam_Type;
+ }
+ default:
+ SkColorSpacePrintf("Unsupported gamma tag type %d\n", type);
+ return SkGammas::Type::kNone_Type;
+ }
+}
+
+/**
+ * Returns the additional size in bytes needed to store the gamma tag.
+ */
+static size_t gamma_alloc_size(SkGammas::Type type, const SkGammas::Data& data) {
+ switch (type) {
+ case SkGammas::Type::kNamed_Type:
+ case SkGammas::Type::kValue_Type:
+ return 0;
+ case SkGammas::Type::kTable_Type:
+ return sizeof(float) * data.fTable.fSize;
+ case SkGammas::Type::kParam_Type:
+ return sizeof(SkGammas::Params);
+ default:
+ SkASSERT(false);
+ return 0;
+ }
+}
+
+/**
+ * Sets invalid gamma to the default value.
+ */
+static void handle_invalid_gamma(SkGammas::Type* type, SkGammas::Data* data) {
+ if (SkGammas::Type::kNone_Type == *type) {
+ *type = SkGammas::Type::kNamed_Type;
+
+ // Guess sRGB in the case of a malformed transfer function.
+ data->fNamed = kSRGB_SkGammaNamed;
+ }
+}
+
+/**
+ * Finish loading the gammas, now that we have allocated memory for the SkGammas struct.
+ *
+ * There's nothing to do for the simple cases, but for table gammas we need to actually
+ * read the table into heap memory. And for parametric gammas, we need to copy over the
+ * parameter values.
+ *
+ * @param memory Pointer to start of the SkGammas memory block
+ * @param offset Bytes of memory (after the SkGammas struct) that are already in use.
+ * @param data In-out variable. Will fill in the offset to the table or parameters
+ * if necessary.
+ * @param params Parameters for gamma curve. Only initialized/used when we have a
+ * parametric gamma.
+ * @param src Pointer to start of the gamma tag.
+ *
+ * @return Additional bytes of memory that are being used by this gamma curve.
+ */
+static size_t load_gammas(void* memory, size_t offset, SkGammas::Type type,
+ SkGammas::Data* data, const SkGammas::Params& params,
+ const uint8_t* src) {
+ void* storage = SkTAddOffset<void>(memory, offset + sizeof(SkGammas));
+
+ switch (type) {
+ case SkGammas::Type::kNamed_Type:
+ case SkGammas::Type::kValue_Type:
+ // Nothing to do here.
+ return 0;
+ case SkGammas::Type::kTable_Type: {
+ data->fTable.fOffset = offset;
+
+ float* outTable = (float*) storage;
+ const uint16_t* inTable = (const uint16_t*) (src + 12);
+ for (int i = 0; i < data->fTable.fSize; i++) {
+ outTable[i] = (read_big_endian_u16((const uint8_t*) &inTable[i])) / 65535.0f;
+ }
+
+ return sizeof(float) * data->fTable.fSize;
+ }
+ case SkGammas::Type::kParam_Type:
+ data->fTable.fOffset = offset;
+ memcpy(storage, &params, sizeof(SkGammas::Params));
+ return sizeof(SkGammas::Params);
+ default:
+ SkASSERT(false);
+ return 0;
+ }
+}
+
+static constexpr uint32_t kTAG_AtoBType = SkSetFourByteTag('m', 'A', 'B', ' ');
+
+static bool load_color_lut(sk_sp<SkColorLookUpTable>* colorLUT, uint32_t inputChannels,
+ const uint8_t* src, size_t len) {
+ // 16 bytes reserved for grid points, 2 for precision, 2 for padding.
+ // The color LUT data follows after this header.
+ static constexpr uint32_t kColorLUTHeaderSize = 20;
+ if (len < kColorLUTHeaderSize) {
+ SkColorSpacePrintf("Color LUT tag is too small (%d bytes).", len);
+ return false;
+ }
+ size_t dataLen = len - kColorLUTHeaderSize;
+
+ SkASSERT(3 == inputChannels);
+ uint8_t gridPoints[3];
+ uint32_t numEntries = 1;
+ for (uint32_t i = 0; i < inputChannels; i++) {
+ gridPoints[i] = src[i];
+ if (0 == src[i]) {
+ SkColorSpacePrintf("Each input channel must have at least one grid point.");
+ return false;
+ }
+
+ if (!safe_mul(numEntries, src[i], &numEntries)) {
+ SkColorSpacePrintf("Too many entries in Color LUT.");
+ return false;
+ }
+ }
+
+ if (!safe_mul(numEntries, SkColorLookUpTable::kOutputChannels, &numEntries)) {
+ SkColorSpacePrintf("Too many entries in Color LUT.");
+ return false;
+ }
+
+ // Space is provided for a maximum of the 16 input channels. Now we determine the precision
+ // of the table values.
+ uint8_t precision = src[16];
+ switch (precision) {
+ case 1: // 8-bit data
+ case 2: // 16-bit data
+ break;
+ default:
+ SkColorSpacePrintf("Color LUT precision must be 8-bit or 16-bit.\n");
+ return false;
+ }
+
+ uint32_t clutBytes;
+ if (!safe_mul(numEntries, precision, &clutBytes)) {
+ SkColorSpacePrintf("Too many entries in Color LUT.");
+ return false;
+ }
+
+ if (dataLen < clutBytes) {
+ SkColorSpacePrintf("Color LUT tag is too small (%d bytes).", len);
+ return false;
+ }
+
+ // Movable struct colorLUT has ownership of fTable.
+ void* memory = sk_malloc_throw(sizeof(SkColorLookUpTable) + sizeof(float) * numEntries);
+ *colorLUT = sk_sp<SkColorLookUpTable>(new (memory) SkColorLookUpTable(inputChannels,
+ gridPoints));
+
+ float* table = SkTAddOffset<float>(memory, sizeof(SkColorLookUpTable));
+ const uint8_t* ptr = src + kColorLUTHeaderSize;
+ for (uint32_t i = 0; i < numEntries; i++, ptr += precision) {
+ if (1 == precision) {
+ table[i] = ((float) ptr[i]) / 255.0f;
+ } else {
+ table[i] = ((float) read_big_endian_u16(ptr)) / 65535.0f;
+ }
+ }
+
+ return true;
+}
+
+static bool load_matrix(SkMatrix44* toXYZ, const uint8_t* src, size_t len) {
+ if (len < 48) {
+ SkColorSpacePrintf("Matrix tag is too small (%d bytes).", len);
+ return false;
+ }
+
+ // For this matrix to behave like our "to XYZ D50" matrices, it needs to be scaled.
+ constexpr float scale = 65535.0 / 32768.0;
+ float array[16];
+ array[ 0] = scale * SkFixedToFloat(read_big_endian_i32(src));
+ array[ 1] = scale * SkFixedToFloat(read_big_endian_i32(src + 4));
+ array[ 2] = scale * SkFixedToFloat(read_big_endian_i32(src + 8));
+ array[ 3] = scale * SkFixedToFloat(read_big_endian_i32(src + 36)); // translate R
+ array[ 4] = scale * SkFixedToFloat(read_big_endian_i32(src + 12));
+ array[ 5] = scale * SkFixedToFloat(read_big_endian_i32(src + 16));
+ array[ 6] = scale * SkFixedToFloat(read_big_endian_i32(src + 20));
+ array[ 7] = scale * SkFixedToFloat(read_big_endian_i32(src + 40)); // translate G
+ array[ 8] = scale * SkFixedToFloat(read_big_endian_i32(src + 24));
+ array[ 9] = scale * SkFixedToFloat(read_big_endian_i32(src + 28));
+ array[10] = scale * SkFixedToFloat(read_big_endian_i32(src + 32));
+ array[11] = scale * SkFixedToFloat(read_big_endian_i32(src + 44)); // translate B
+ array[12] = 0.0f;
+ array[13] = 0.0f;
+ array[14] = 0.0f;
+ array[15] = 1.0f;
+ toXYZ->setRowMajorf(array);
+ return true;
+}
+
+static inline SkGammaNamed is_named(const sk_sp<SkGammas>& gammas) {
+ if (gammas->isNamed(0) && gammas->isNamed(1) && gammas->isNamed(2) &&
+ gammas->fRedData.fNamed == gammas->fGreenData.fNamed &&
+ gammas->fRedData.fNamed == gammas->fBlueData.fNamed)
+ {
+ return gammas->fRedData.fNamed;
+ }
+
+ return kNonStandard_SkGammaNamed;
+}
+
+
+static bool load_a2b0(sk_sp<SkColorLookUpTable>* colorLUT, SkGammaNamed* gammaNamed,
+ sk_sp<SkGammas>* gammas, SkMatrix44* toXYZ, const uint8_t* src, size_t len) {
+ if (len < 32) {
+ SkColorSpacePrintf("A to B tag is too small (%d bytes).", len);
+ return false;
+ }
+
+ uint32_t type = read_big_endian_u32(src);
+ if (kTAG_AtoBType != type) {
+ // FIXME (msarett): Need to support lut8Type and lut16Type.
+ SkColorSpacePrintf("Unsupported A to B tag type.\n");
+ return false;
+ }
+
+ // Read the number of channels. The four bytes that we skipped are reserved and
+ // must be zero.
+ uint8_t inputChannels = src[8];
+ uint8_t outputChannels = src[9];
+ if (3 != inputChannels || SkColorLookUpTable::kOutputChannels != outputChannels) {
+ // We only handle (supposedly) RGB inputs and RGB outputs. The numbers of input
+ // channels and output channels both must be 3.
+ // TODO (msarett):
+ // Support different numbers of input channels. Ex: CMYK (4).
+ SkColorSpacePrintf("Input and output channels must equal 3 in A to B tag.\n");
+ return false;
+ }
+
+ // Read the offsets of each element in the A to B tag. With the exception of A curves and
+ // B curves (which we do not yet support), we will handle these elements in the order in
+ // which they should be applied (rather than the order in which they occur in the tag).
+ // If the offset is non-zero it indicates that the element is present.
+ uint32_t offsetToACurves = read_big_endian_i32(src + 28);
+ uint32_t offsetToBCurves = read_big_endian_i32(src + 12);
+ if ((0 != offsetToACurves) || (0 != offsetToBCurves)) {
+ // FIXME (msarett): Handle A and B curves.
+ // Note that the A curve is technically required in order to have a color LUT.
+ // However, all the A curves I have seen so far have are just placeholders that
+ // don't actually transform the data.
+ SkColorSpacePrintf("Ignoring A and/or B curve. Output may be wrong.\n");
+ }
+
+ uint32_t offsetToColorLUT = read_big_endian_i32(src + 24);
+ if (0 != offsetToColorLUT && offsetToColorLUT < len) {
+ if (!load_color_lut(colorLUT, inputChannels, src + offsetToColorLUT,
+ len - offsetToColorLUT)) {
+ SkColorSpacePrintf("Failed to read color LUT from A to B tag.\n");
+ }
+ }
+
+ uint32_t offsetToMCurves = read_big_endian_i32(src + 20);
+ if (0 != offsetToMCurves && offsetToMCurves < len) {
+ const uint8_t* rTagPtr = src + offsetToMCurves;
+ size_t tagLen = len - offsetToMCurves;
+
+ SkGammas::Data rData;
+ SkGammas::Params rParams;
+
+ // On an invalid first gamma, tagBytes remains set as zero. This causes the two
+ // subsequent to be treated as identical (which is what we want).
+ size_t tagBytes = 0;
+ SkGammas::Type rType = parse_gamma(&rData, &rParams, &tagBytes, rTagPtr, tagLen);
+ handle_invalid_gamma(&rType, &rData);
+ size_t alignedTagBytes = SkAlign4(tagBytes);
+
+ if ((3 * alignedTagBytes <= tagLen) &&
+ !memcmp(rTagPtr, rTagPtr + 1 * alignedTagBytes, tagBytes) &&
+ !memcmp(rTagPtr, rTagPtr + 2 * alignedTagBytes, tagBytes))
+ {
+ if (SkGammas::Type::kNamed_Type == rType) {
+ *gammaNamed = rData.fNamed;
+ } else {
+ size_t allocSize = sizeof(SkGammas);
+ return_if_false(safe_add(allocSize, gamma_alloc_size(rType, rData), &allocSize),
+ "SkGammas struct is too large to allocate");
+ void* memory = sk_malloc_throw(allocSize);
+ *gammas = sk_sp<SkGammas>(new (memory) SkGammas());
+ load_gammas(memory, 0, rType, &rData, rParams, rTagPtr);
+
+ (*gammas)->fRedType = rType;
+ (*gammas)->fGreenType = rType;
+ (*gammas)->fBlueType = rType;
+
+ (*gammas)->fRedData = rData;
+ (*gammas)->fGreenData = rData;
+ (*gammas)->fBlueData = rData;
+ }
+ } else {
+ const uint8_t* gTagPtr = rTagPtr + alignedTagBytes;
+ tagLen = tagLen > alignedTagBytes ? tagLen - alignedTagBytes : 0;
+ SkGammas::Data gData;
+ SkGammas::Params gParams;
+ tagBytes = 0;
+ SkGammas::Type gType = parse_gamma(&gData, &gParams, &tagBytes, gTagPtr,
+ tagLen);
+ handle_invalid_gamma(&gType, &gData);
+
+ alignedTagBytes = SkAlign4(tagBytes);
+ const uint8_t* bTagPtr = gTagPtr + alignedTagBytes;
+ tagLen = tagLen > alignedTagBytes ? tagLen - alignedTagBytes : 0;
+ SkGammas::Data bData;
+ SkGammas::Params bParams;
+ SkGammas::Type bType = parse_gamma(&bData, &bParams, &tagBytes, bTagPtr,
+ tagLen);
+ handle_invalid_gamma(&bType, &bData);
+
+ size_t allocSize = sizeof(SkGammas);
+ return_if_false(safe_add(allocSize, gamma_alloc_size(rType, rData), &allocSize),
+ "SkGammas struct is too large to allocate");
+ return_if_false(safe_add(allocSize, gamma_alloc_size(gType, gData), &allocSize),
+ "SkGammas struct is too large to allocate");
+ return_if_false(safe_add(allocSize, gamma_alloc_size(bType, bData), &allocSize),
+ "SkGammas struct is too large to allocate");
+ void* memory = sk_malloc_throw(allocSize);
+ *gammas = sk_sp<SkGammas>(new (memory) SkGammas());
+
+ uint32_t offset = 0;
+ (*gammas)->fRedType = rType;
+ offset += load_gammas(memory, offset, rType, &rData, rParams, rTagPtr);
+
+ (*gammas)->fGreenType = gType;
+ offset += load_gammas(memory, offset, gType, &gData, gParams, gTagPtr);
+
+ (*gammas)->fBlueType = bType;
+ load_gammas(memory, offset, bType, &bData, bParams, bTagPtr);
+
+ (*gammas)->fRedData = rData;
+ (*gammas)->fGreenData = gData;
+ (*gammas)->fBlueData = bData;
+ }
+ } else {
+ // Guess sRGB if the chunk is missing a transfer function.
+ *gammaNamed = kSRGB_SkGammaNamed;
+ }
+
+ if (kNonStandard_SkGammaNamed == *gammaNamed) {
+ *gammaNamed = is_named(*gammas);
+ if (kNonStandard_SkGammaNamed != *gammaNamed) {
+ // No need to keep the gammas struct, the enum is enough.
+ *gammas = nullptr;
+ }
+ }
+
+ uint32_t offsetToMatrix = read_big_endian_i32(src + 16);
+ if (0 != offsetToMatrix && offsetToMatrix < len) {
+ if (!load_matrix(toXYZ, src + offsetToMatrix, len - offsetToMatrix)) {
+ SkColorSpacePrintf("Failed to read matrix from A to B tag.\n");
+ toXYZ->setIdentity();
+ }
+ }
+
+ return true;
+}
+
+static bool tag_equals(const ICCTag* a, const ICCTag* b, const uint8_t* base) {
+ if (!a || !b) {
+ return a == b;
+ }
+
+ if (a->fLength != b->fLength) {
+ return false;
+ }
+
+ if (a->fOffset == b->fOffset) {
+ return true;
+ }
+
+ return !memcmp(a->addr(base), b->addr(base), a->fLength);
+}
+
+sk_sp<SkColorSpace> SkColorSpace::NewICC(const void* input, size_t len) {
+ if (!input || len < kICCHeaderSize) {
+ return_null("Data is null or not large enough to contain an ICC profile");
+ }
+
+ // Create our own copy of the input.
+ void* memory = sk_malloc_throw(len);
+ memcpy(memory, input, len);
+ sk_sp<SkData> data = SkData::MakeFromMalloc(memory, len);
+ const uint8_t* base = data->bytes();
+ const uint8_t* ptr = base;
+
+ // Read the ICC profile header and check to make sure that it is valid.
+ ICCProfileHeader header;
+ header.init(ptr, len);
+ if (!header.valid()) {
+ return nullptr;
+ }
+
+ // Adjust ptr and len before reading the tags.
+ if (len < header.fSize) {
+ SkColorSpacePrintf("ICC profile might be truncated.\n");
+ } else if (len > header.fSize) {
+ SkColorSpacePrintf("Caller provided extra data beyond the end of the ICC profile.\n");
+ len = header.fSize;
+ }
+ ptr += kICCHeaderSize;
+ len -= kICCHeaderSize;
+
+ // Parse tag headers.
+ uint32_t tagCount = header.fTagCount;
+ SkColorSpacePrintf("ICC profile contains %d tags.\n", tagCount);
+ if (len < kICCTagTableEntrySize * tagCount) {
+ return_null("Not enough input data to read tag table entries");
+ }
+
+ SkAutoTArray<ICCTag> tags(tagCount);
+ for (uint32_t i = 0; i < tagCount; i++) {
+ ptr = tags[i].init(ptr);
+ SkColorSpacePrintf("[%d] %c%c%c%c %d %d\n", i, (tags[i].fSignature >> 24) & 0xFF,
+ (tags[i].fSignature >> 16) & 0xFF, (tags[i].fSignature >> 8) & 0xFF,
+ (tags[i].fSignature >> 0) & 0xFF, tags[i].fOffset, tags[i].fLength);
+
+ if (!tags[i].valid(kICCHeaderSize + len)) {
+ return_null("Tag is too large to fit in ICC profile");
+ }
+ }
+
+ switch (header.fInputColorSpace) {
+ case kRGB_ColorSpace: {
+ // Recognize the rXYZ, gXYZ, and bXYZ tags.
+ const ICCTag* r = ICCTag::Find(tags.get(), tagCount, kTAG_rXYZ);
+ const ICCTag* g = ICCTag::Find(tags.get(), tagCount, kTAG_gXYZ);
+ const ICCTag* b = ICCTag::Find(tags.get(), tagCount, kTAG_bXYZ);
+ if (r && g && b) {
+ float toXYZ[9];
+ if (!load_xyz(&toXYZ[0], r->addr(base), r->fLength) ||
+ !load_xyz(&toXYZ[3], g->addr(base), g->fLength) ||
+ !load_xyz(&toXYZ[6], b->addr(base), b->fLength))
+ {
+ return_null("Need valid rgb tags for XYZ space");
+ }
+ SkMatrix44 mat(SkMatrix44::kUninitialized_Constructor);
+ mat.set3x3(toXYZ[0], toXYZ[1], toXYZ[2],
+ toXYZ[3], toXYZ[4], toXYZ[5],
+ toXYZ[6], toXYZ[7], toXYZ[8]);
+
+ r = ICCTag::Find(tags.get(), tagCount, kTAG_rTRC);
+ g = ICCTag::Find(tags.get(), tagCount, kTAG_gTRC);
+ b = ICCTag::Find(tags.get(), tagCount, kTAG_bTRC);
+
+ // If some, but not all, of the gamma tags are missing, assume that all
+ // gammas are meant to be the same. This behavior is an arbitrary guess,
+ // but it simplifies the code below.
+ if ((!r || !g || !b) && (r || g || b)) {
+ if (!r) {
+ r = g ? g : b;
+ }
+
+ if (!g) {
+ g = r ? r : b;
+ }
+
+ if (!b) {
+ b = r ? r : g;
+ }
+ }
+
+ SkGammaNamed gammaNamed = kNonStandard_SkGammaNamed;
+ sk_sp<SkGammas> gammas = nullptr;
+ size_t tagBytes;
+ if (r && g && b) {
+ if (tag_equals(r, g, base) && tag_equals(g, b, base)) {
+ SkGammas::Data data;
+ SkGammas::Params params;
+ SkGammas::Type type =
+ parse_gamma(&data, &params, &tagBytes, r->addr(base), r->fLength);
+ handle_invalid_gamma(&type, &data);
+
+ if (SkGammas::Type::kNamed_Type == type) {
+ gammaNamed = data.fNamed;
+ } else {
+ size_t allocSize = sizeof(SkGammas);
+ if (!safe_add(allocSize, gamma_alloc_size(type, data), &allocSize)) {
+ return_null("SkGammas struct is too large to allocate");
+ }
+ void* memory = sk_malloc_throw(allocSize);
+ gammas = sk_sp<SkGammas>(new (memory) SkGammas());
+ load_gammas(memory, 0, type, &data, params, r->addr(base));
+
+ gammas->fRedType = type;
+ gammas->fGreenType = type;
+ gammas->fBlueType = type;
+
+ gammas->fRedData = data;
+ gammas->fGreenData = data;
+ gammas->fBlueData = data;
+ }
+ } else {
+ SkGammas::Data rData;
+ SkGammas::Params rParams;
+ SkGammas::Type rType =
+ parse_gamma(&rData, &rParams, &tagBytes, r->addr(base), r->fLength);
+ handle_invalid_gamma(&rType, &rData);
+
+ SkGammas::Data gData;
+ SkGammas::Params gParams;
+ SkGammas::Type gType =
+ parse_gamma(&gData, &gParams, &tagBytes, g->addr(base), g->fLength);
+ handle_invalid_gamma(&gType, &gData);
+
+ SkGammas::Data bData;
+ SkGammas::Params bParams;
+ SkGammas::Type bType =
+ parse_gamma(&bData, &bParams, &tagBytes, b->addr(base), b->fLength);
+ handle_invalid_gamma(&bType, &bData);
+
+ size_t allocSize = sizeof(SkGammas);
+ if (!safe_add(allocSize, gamma_alloc_size(rType, rData), &allocSize) ||
+ !safe_add(allocSize, gamma_alloc_size(gType, gData), &allocSize) ||
+ !safe_add(allocSize, gamma_alloc_size(bType, bData), &allocSize))
+ {
+ return_null("SkGammas struct is too large to allocate");
+ }
+ void* memory = sk_malloc_throw(allocSize);
+ gammas = sk_sp<SkGammas>(new (memory) SkGammas());
+
+ uint32_t offset = 0;
+ gammas->fRedType = rType;
+ offset += load_gammas(memory, offset, rType, &rData, rParams,
+ r->addr(base));
+
+ gammas->fGreenType = gType;
+ offset += load_gammas(memory, offset, gType, &gData, gParams,
+ g->addr(base));
+
+ gammas->fBlueType = bType;
+ load_gammas(memory, offset, bType, &bData, bParams, b->addr(base));
+
+ gammas->fRedData = rData;
+ gammas->fGreenData = gData;
+ gammas->fBlueData = bData;
+ }
+ } else {
+ // Guess sRGB if the profile is missing transfer functions.
+ gammaNamed = kSRGB_SkGammaNamed;
+ }
+
+ if (kNonStandard_SkGammaNamed == gammaNamed) {
+ // It's possible that we'll initially detect non-matching gammas, only for
+ // them to evaluate to the same named gamma curve.
+ gammaNamed = is_named(gammas);
+ if (kNonStandard_SkGammaNamed == gammaNamed) {
+ return sk_sp<SkColorSpace>(new SkColorSpace_Base(nullptr, gammaNamed,
+ std::move(gammas), mat,
+ std::move(data)));
+ }
+ }
+
+ return SkColorSpace_Base::NewRGB(gammaNamed, mat);
+ }
+
+ // Recognize color profile specified by A2B0 tag.
+ const ICCTag* a2b0 = ICCTag::Find(tags.get(), tagCount, kTAG_A2B0);
+ if (a2b0) {
+ SkGammaNamed gammaNamed = kNonStandard_SkGammaNamed;
+ sk_sp<SkGammas> gammas = nullptr;
+ sk_sp<SkColorLookUpTable> colorLUT = nullptr;
+ SkMatrix44 toXYZ(SkMatrix44::kUninitialized_Constructor);
+ if (!load_a2b0(&colorLUT, &gammaNamed, &gammas, &toXYZ, a2b0->addr(base),
+ a2b0->fLength)) {
+ return_null("Failed to parse A2B0 tag");
+ }
+
+ if (colorLUT || kNonStandard_SkGammaNamed == gammaNamed) {
+ return sk_sp<SkColorSpace>(new SkColorSpace_Base(std::move(colorLUT),
+ gammaNamed, std::move(gammas),
+ toXYZ, std::move(data)));
+ }
+
+ return SkColorSpace_Base::NewRGB(gammaNamed, toXYZ);
+ }
+ }
+ default:
+ break;
+ }
+
+ return_null("ICC profile contains unsupported colorspace");
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+// We will write a profile with the minimum nine required tags.
+static constexpr uint32_t kICCNumEntries = 9;
+
+static constexpr uint32_t kTAG_desc = SkSetFourByteTag('d', 'e', 's', 'c');
+static constexpr uint32_t kTAG_desc_Bytes = 12;
+static constexpr uint32_t kTAG_desc_Offset = kICCHeaderSize + kICCNumEntries*kICCTagTableEntrySize;
+
+static constexpr uint32_t kTAG_XYZ_Bytes = 20;
+static constexpr uint32_t kTAG_rXYZ_Offset = kTAG_desc_Offset + kTAG_desc_Bytes;
+static constexpr uint32_t kTAG_gXYZ_Offset = kTAG_rXYZ_Offset + kTAG_XYZ_Bytes;
+static constexpr uint32_t kTAG_bXYZ_Offset = kTAG_gXYZ_Offset + kTAG_XYZ_Bytes;
+
+static constexpr uint32_t kTAG_TRC_Bytes = 14;
+static constexpr uint32_t kTAG_rTRC_Offset = kTAG_bXYZ_Offset + kTAG_XYZ_Bytes;
+static constexpr uint32_t kTAG_gTRC_Offset = kTAG_rTRC_Offset + SkAlign4(kTAG_TRC_Bytes);
+static constexpr uint32_t kTAG_bTRC_Offset = kTAG_gTRC_Offset + SkAlign4(kTAG_TRC_Bytes);
+
+static constexpr uint32_t kTAG_wtpt = SkSetFourByteTag('w', 't', 'p', 't');
+static constexpr uint32_t kTAG_wtpt_Offset = kTAG_bTRC_Offset + SkAlign4(kTAG_TRC_Bytes);
+
+static constexpr uint32_t kTAG_cprt = SkSetFourByteTag('c', 'p', 'r', 't');
+static constexpr uint32_t kTAG_cprt_Bytes = 12;
+static constexpr uint32_t kTAG_cprt_Offset = kTAG_wtpt_Offset + kTAG_XYZ_Bytes;
+
+static constexpr uint32_t kICCProfileSize = kTAG_cprt_Offset + kTAG_cprt_Bytes;
+
+static constexpr uint32_t gICCHeader[kICCHeaderSize / 4] {
+ SkEndian_SwapBE32(kICCProfileSize), // Size of the profile
+ 0, // Preferred CMM type (ignored)
+ SkEndian_SwapBE32(0x02100000), // Version 2.1
+ SkEndian_SwapBE32(kDisplay_Profile), // Display device profile
+ SkEndian_SwapBE32(kRGB_ColorSpace), // RGB input color space
+ SkEndian_SwapBE32(kXYZ_PCSSpace), // XYZ profile connection space
+ 0, 0, 0, // Date and time (ignored)
+ SkEndian_SwapBE32(kACSP_Signature), // Profile signature
+ 0, // Platform target (ignored)
+ 0x00000000, // Flags: not embedded, can be used independently
+ 0, // Device manufacturer (ignored)
+ 0, // Device model (ignored)
+ 0, 0, // Device attributes (ignored)
+ SkEndian_SwapBE32(1), // Relative colorimetric rendering intent
+ SkEndian_SwapBE32(0x0000f6d6), // D50 standard illuminant (X)
+ SkEndian_SwapBE32(0x00010000), // D50 standard illuminant (Y)
+ SkEndian_SwapBE32(0x0000d32d), // D50 standard illuminant (Z)
+ 0, // Profile creator (ignored)
+ 0, 0, 0, 0, // Profile id checksum (ignored)
+ 0, 0, 0, 0, 0, 0, 0, // Reserved (ignored)
+ SkEndian_SwapBE32(kICCNumEntries), // Number of tags
+};
+
+static constexpr uint32_t gICCTagTable[3 * kICCNumEntries] {
+ // Profile description
+ SkEndian_SwapBE32(kTAG_desc),
+ SkEndian_SwapBE32(kTAG_desc_Offset),
+ SkEndian_SwapBE32(kTAG_desc_Bytes),
+
+ // rXYZ
+ SkEndian_SwapBE32(kTAG_rXYZ),
+ SkEndian_SwapBE32(kTAG_rXYZ_Offset),
+ SkEndian_SwapBE32(kTAG_XYZ_Bytes),
+
+ // gXYZ
+ SkEndian_SwapBE32(kTAG_gXYZ),
+ SkEndian_SwapBE32(kTAG_gXYZ_Offset),
+ SkEndian_SwapBE32(kTAG_XYZ_Bytes),
+
+ // bXYZ
+ SkEndian_SwapBE32(kTAG_bXYZ),
+ SkEndian_SwapBE32(kTAG_bXYZ_Offset),
+ SkEndian_SwapBE32(kTAG_XYZ_Bytes),
+
+ // rTRC
+ SkEndian_SwapBE32(kTAG_rTRC),
+ SkEndian_SwapBE32(kTAG_rTRC_Offset),
+ SkEndian_SwapBE32(kTAG_TRC_Bytes),
+
+ // gTRC
+ SkEndian_SwapBE32(kTAG_gTRC),
+ SkEndian_SwapBE32(kTAG_gTRC_Offset),
+ SkEndian_SwapBE32(kTAG_TRC_Bytes),
+
+ // bTRC
+ SkEndian_SwapBE32(kTAG_bTRC),
+ SkEndian_SwapBE32(kTAG_bTRC_Offset),
+ SkEndian_SwapBE32(kTAG_TRC_Bytes),
+
+ // White point
+ SkEndian_SwapBE32(kTAG_wtpt),
+ SkEndian_SwapBE32(kTAG_wtpt_Offset),
+ SkEndian_SwapBE32(kTAG_XYZ_Bytes),
+
+ // Copyright
+ SkEndian_SwapBE32(kTAG_cprt),
+ SkEndian_SwapBE32(kTAG_cprt_Offset),
+ SkEndian_SwapBE32(kTAG_cprt_Bytes),
+};
+
+static constexpr uint32_t kTAG_TextType = SkSetFourByteTag('m', 'l', 'u', 'c');
+static constexpr uint32_t gEmptyTextTag[3] {
+ SkEndian_SwapBE32(kTAG_TextType), // Type signature
+ 0, // Reserved
+ 0, // Zero records
+};
+
+static void write_xyz_tag(uint32_t* ptr, const SkMatrix44& toXYZ, int col) {
+ ptr[0] = SkEndian_SwapBE32(kXYZ_PCSSpace);
+ ptr[1] = 0;
+ ptr[2] = SkEndian_SwapBE32(SkFloatToFixed(toXYZ.getFloat(0, col)));
+ ptr[3] = SkEndian_SwapBE32(SkFloatToFixed(toXYZ.getFloat(1, col)));
+ ptr[4] = SkEndian_SwapBE32(SkFloatToFixed(toXYZ.getFloat(2, col)));
+}
+
+static void write_trc_tag(uint32_t* ptr, float value) {
+ ptr[0] = SkEndian_SwapBE32(kTAG_CurveType);
+ ptr[1] = 0;
+
+ // Gamma will be specified with a single value.
+ ptr[2] = SkEndian_SwapBE32(1);
+
+ // Convert gamma to 16-bit fixed point.
+ uint16_t* ptr16 = (uint16_t*) (ptr + 3);
+ ptr16[0] = SkEndian_SwapBE16((uint16_t) (value * 256.0f));
+
+ // Pad tag with zero.
+ ptr16[1] = 0;
+}
+
+sk_sp<SkData> SkColorSpace_Base::writeToICC() const {
+ // Return if this object was created from a profile, or if we have already serialized
+ // the profile.
+ if (fProfileData) {
+ return fProfileData;
+ }
+
+ // The client may create an SkColorSpace using an SkMatrix44, but currently we only
+ // support writing profiles with 3x3 matrices.
+ // TODO (msarett): Fix this!
+ if (0.0f != fToXYZD50.getFloat(3, 0) || 0.0f != fToXYZD50.getFloat(3, 1) ||
+ 0.0f != fToXYZD50.getFloat(3, 2) || 0.0f != fToXYZD50.getFloat(0, 3) ||
+ 0.0f != fToXYZD50.getFloat(1, 3) || 0.0f != fToXYZD50.getFloat(2, 3))
+ {
+ return nullptr;
+ }
+
+ SkAutoMalloc profile(kICCProfileSize);
+ uint8_t* ptr = (uint8_t*) profile.get();
+
+ // Write profile header
+ memcpy(ptr, gICCHeader, sizeof(gICCHeader));
+ ptr += sizeof(gICCHeader);
+
+ // Write tag table
+ memcpy(ptr, gICCTagTable, sizeof(gICCTagTable));
+ ptr += sizeof(gICCTagTable);
+
+ // Write profile description tag
+ memcpy(ptr, gEmptyTextTag, sizeof(gEmptyTextTag));
+ ptr += sizeof(gEmptyTextTag);
+
+ // Write XYZ tags
+ write_xyz_tag((uint32_t*) ptr, fToXYZD50, 0);
+ ptr += kTAG_XYZ_Bytes;
+ write_xyz_tag((uint32_t*) ptr, fToXYZD50, 1);
+ ptr += kTAG_XYZ_Bytes;
+ write_xyz_tag((uint32_t*) ptr, fToXYZD50, 2);
+ ptr += kTAG_XYZ_Bytes;
+
+ // Write TRC tags
+ SkGammaNamed gammaNamed = this->gammaNamed();
+ if (kNonStandard_SkGammaNamed == gammaNamed) {
+ // FIXME (msarett):
+ // Write the correct gamma representation rather than 2.2f.
+ write_trc_tag((uint32_t*) ptr, 2.2f);
+ ptr += SkAlign4(kTAG_TRC_Bytes);
+ write_trc_tag((uint32_t*) ptr, 2.2f);
+ ptr += SkAlign4(kTAG_TRC_Bytes);
+ write_trc_tag((uint32_t*) ptr, 2.2f);
+ ptr += SkAlign4(kTAG_TRC_Bytes);
+ } else {
+ switch (gammaNamed) {
+ case kSRGB_SkGammaNamed:
+ // FIXME (msarett):
+ // kSRGB cannot be represented by a value. Here we fall through to 2.2f,
+ // which is a close guess. To be more accurate, we need to represent sRGB
+ // gamma with a parametric curve.
+ case k2Dot2Curve_SkGammaNamed:
+ write_trc_tag((uint32_t*) ptr, 2.2f);
+ ptr += SkAlign4(kTAG_TRC_Bytes);
+ write_trc_tag((uint32_t*) ptr, 2.2f);
+ ptr += SkAlign4(kTAG_TRC_Bytes);
+ write_trc_tag((uint32_t*) ptr, 2.2f);
+ ptr += SkAlign4(kTAG_TRC_Bytes);
+ break;
+ case kLinear_SkGammaNamed:
+ write_trc_tag((uint32_t*) ptr, 1.0f);
+ ptr += SkAlign4(kTAG_TRC_Bytes);
+ write_trc_tag((uint32_t*) ptr, 1.0f);
+ ptr += SkAlign4(kTAG_TRC_Bytes);
+ write_trc_tag((uint32_t*) ptr, 1.0f);
+ ptr += SkAlign4(kTAG_TRC_Bytes);
+ break;
+ default:
+ SkASSERT(false);
+ break;
+ }
+ }
+
+ // Write white point tag
+ uint32_t* ptr32 = (uint32_t*) ptr;
+ ptr32[0] = SkEndian_SwapBE32(kXYZ_PCSSpace);
+ ptr32[1] = 0;
+ // TODO (msarett): These values correspond to the D65 white point. This may not always be
+ // correct.
+ ptr32[2] = SkEndian_SwapBE32(0x0000f351);
+ ptr32[3] = SkEndian_SwapBE32(0x00010000);
+ ptr32[4] = SkEndian_SwapBE32(0x000116cc);
+ ptr += kTAG_XYZ_Bytes;
+
+ // Write copyright tag
+ memcpy(ptr, gEmptyTextTag, sizeof(gEmptyTextTag));
+
+ // TODO (msarett): Should we try to hold onto the data so we can return immediately if
+ // the client calls again?
+ return SkData::MakeFromMalloc(profile.release(), kICCProfileSize);
+}
diff --git a/gfx/skia/skia/src/core/SkColorTable.cpp b/gfx/skia/skia/src/core/SkColorTable.cpp
new file mode 100644
index 000000000..296b31c36
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorTable.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkColorTable.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+#include "SkStream.h"
+#include "SkTemplates.h"
+
+void SkColorTable::init(const SkPMColor colors[], int count) {
+ SkASSERT((unsigned)count <= 256);
+
+ fCount = count;
+ fColors = reinterpret_cast<SkPMColor*>(sk_malloc_throw(count * sizeof(SkPMColor)));
+
+ memcpy(fColors, colors, count * sizeof(SkPMColor));
+}
+
+SkColorTable::SkColorTable(const SkPMColor colors[], int count) {
+ SkASSERT(0 == count || colors);
+ if (count < 0) {
+ count = 0;
+ } else if (count > 256) {
+ count = 256;
+ }
+ this->init(colors, count);
+}
+
+SkColorTable::SkColorTable(SkPMColor* colors, int count, AllocatedWithMalloc)
+ : fColors(colors)
+ , fCount(count)
+{
+ SkASSERT(count > 0 && count <= 256);
+ SkASSERT(colors);
+}
+
+SkColorTable::~SkColorTable() {
+ sk_free(fColors);
+ sk_free(f16BitCache);
+}
+
+#include "SkColorPriv.h"
+
+const uint16_t* SkColorTable::read16BitCache() const {
+ f16BitCacheOnce([this] {
+ f16BitCache = (uint16_t*)sk_malloc_throw(fCount * sizeof(uint16_t));
+ for (int i = 0; i < fCount; i++) {
+ f16BitCache[i] = SkPixel32ToPixel16_ToU16(fColors[i]);
+ }
+ });
+ return f16BitCache;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if 0
+SkColorTable::SkColorTable(SkReadBuffer& buffer) {
+ if (buffer.isVersionLT(SkReadBuffer::kRemoveColorTableAlpha_Version)) {
+ /*fAlphaType = */buffer.readUInt();
+ }
+
+ fCount = buffer.getArrayCount();
+ size_t allocSize = fCount * sizeof(SkPMColor);
+ SkDEBUGCODE(bool success = false;)
+ if (buffer.validateAvailable(allocSize)) {
+ fColors = (SkPMColor*)sk_malloc_throw(allocSize);
+ SkDEBUGCODE(success =) buffer.readColorArray(fColors, fCount);
+ } else {
+ fCount = 0;
+ fColors = nullptr;
+ }
+#ifdef SK_DEBUG
+ SkASSERT((unsigned)fCount <= 256);
+ SkASSERT(success);
+#endif
+}
+#endif
+
+void SkColorTable::writeToBuffer(SkWriteBuffer& buffer) const {
+ buffer.writeColorArray(fColors, fCount);
+}
+
+SkColorTable* SkColorTable::Create(SkReadBuffer& buffer) {
+ if (buffer.isVersionLT(SkReadBuffer::kRemoveColorTableAlpha_Version)) {
+ /*fAlphaType = */buffer.readUInt();
+ }
+
+ const int count = buffer.getArrayCount();
+ if (0 == count) {
+ return new SkColorTable(nullptr, 0);
+ }
+
+ if (count < 0 || count > 256) {
+ buffer.validate(false);
+ return nullptr;
+ }
+
+ const size_t allocSize = count * sizeof(SkPMColor);
+ SkAutoTDelete<SkPMColor> colors((SkPMColor*)sk_malloc_throw(allocSize));
+ if (!buffer.readColorArray(colors, count)) {
+ return nullptr;
+ }
+
+ return new SkColorTable(colors.release(), count, kAllocatedWithMalloc);
+}
diff --git a/gfx/skia/skia/src/core/SkComposeShader.cpp b/gfx/skia/skia/src/core/SkComposeShader.cpp
new file mode 100644
index 000000000..7696e1632
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkComposeShader.cpp
@@ -0,0 +1,251 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkComposeShader.h"
+#include "SkColorFilter.h"
+#include "SkColorPriv.h"
+#include "SkColorShader.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+#include "SkXfermode.h"
+#include "SkString.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+size_t SkComposeShader::onContextSize(const ContextRec& rec) const {
+ return sizeof(ComposeShaderContext)
+ + fShaderA->contextSize(rec)
+ + fShaderB->contextSize(rec);
+}
+
+class SkAutoAlphaRestore {
+public:
+ SkAutoAlphaRestore(SkPaint* paint, uint8_t newAlpha) {
+ fAlpha = paint->getAlpha();
+ fPaint = paint;
+ paint->setAlpha(newAlpha);
+ }
+
+ ~SkAutoAlphaRestore() {
+ fPaint->setAlpha(fAlpha);
+ }
+private:
+ SkPaint* fPaint;
+ uint8_t fAlpha;
+};
+#define SkAutoAlphaRestore(...) SK_REQUIRE_LOCAL_VAR(SkAutoAlphaRestore)
+
+sk_sp<SkFlattenable> SkComposeShader::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkShader> shaderA(buffer.readShader());
+ sk_sp<SkShader> shaderB(buffer.readShader());
+ sk_sp<SkXfermode> mode(buffer.readXfermode());
+ if (!shaderA || !shaderB) {
+ return nullptr;
+ }
+ return sk_make_sp<SkComposeShader>(std::move(shaderA), std::move(shaderB), std::move(mode));
+}
+
+void SkComposeShader::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeFlattenable(fShaderA.get());
+ buffer.writeFlattenable(fShaderB.get());
+ buffer.writeFlattenable(fMode.get());
+}
+
+template <typename T> void safe_call_destructor(T* obj) {
+ if (obj) {
+ obj->~T();
+ }
+}
+
+SkShader::Context* SkComposeShader::onCreateContext(const ContextRec& rec, void* storage) const {
+ char* aStorage = (char*) storage + sizeof(ComposeShaderContext);
+ char* bStorage = aStorage + fShaderA->contextSize(rec);
+
+ // we preconcat our localMatrix (if any) with the device matrix
+ // before calling our sub-shaders
+ SkMatrix tmpM;
+ tmpM.setConcat(*rec.fMatrix, this->getLocalMatrix());
+
+ // Our sub-shaders need to see opaque, so by combining them we don't double-alphatize the
+ // result. ComposeShader itself will respect the alpha, and post-apply it after calling the
+ // sub-shaders.
+ SkPaint opaquePaint(*rec.fPaint);
+ opaquePaint.setAlpha(0xFF);
+
+ ContextRec newRec(rec);
+ newRec.fMatrix = &tmpM;
+ newRec.fPaint = &opaquePaint;
+
+ SkShader::Context* contextA = fShaderA->createContext(newRec, aStorage);
+ SkShader::Context* contextB = fShaderB->createContext(newRec, bStorage);
+ if (!contextA || !contextB) {
+ safe_call_destructor(contextA);
+ safe_call_destructor(contextB);
+ return nullptr;
+ }
+
+ return new (storage) ComposeShaderContext(*this, rec, contextA, contextB);
+}
+
+SkComposeShader::ComposeShaderContext::ComposeShaderContext(
+ const SkComposeShader& shader, const ContextRec& rec,
+ SkShader::Context* contextA, SkShader::Context* contextB)
+ : INHERITED(shader, rec)
+ , fShaderContextA(contextA)
+ , fShaderContextB(contextB) {}
+
+SkComposeShader::ComposeShaderContext::~ComposeShaderContext() {
+ fShaderContextA->~Context();
+ fShaderContextB->~Context();
+}
+
+bool SkComposeShader::asACompose(ComposeRec* rec) const {
+ if (rec) {
+ rec->fShaderA = fShaderA.get();
+ rec->fShaderB = fShaderB.get();
+ rec->fMode = fMode.get();
+ }
+ return true;
+}
+
+
+// larger is better (fewer times we have to loop), but we shouldn't
+// take up too much stack-space (each element is 4 bytes)
+#define TMP_COLOR_COUNT 64
+
+void SkComposeShader::ComposeShaderContext::shadeSpan(int x, int y, SkPMColor result[], int count) {
+ SkShader::Context* shaderContextA = fShaderContextA;
+ SkShader::Context* shaderContextB = fShaderContextB;
+ SkXfermode* mode = static_cast<const SkComposeShader&>(fShader).fMode.get();
+ unsigned scale = SkAlpha255To256(this->getPaintAlpha());
+
+ SkPMColor tmp[TMP_COLOR_COUNT];
+
+ if (nullptr == mode) { // implied SRC_OVER
+ // TODO: when we have a good test-case, should use SkBlitRow::Proc32
+ // for these loops
+ do {
+ int n = count;
+ if (n > TMP_COLOR_COUNT) {
+ n = TMP_COLOR_COUNT;
+ }
+
+ shaderContextA->shadeSpan(x, y, result, n);
+ shaderContextB->shadeSpan(x, y, tmp, n);
+
+ if (256 == scale) {
+ for (int i = 0; i < n; i++) {
+ result[i] = SkPMSrcOver(tmp[i], result[i]);
+ }
+ } else {
+ for (int i = 0; i < n; i++) {
+ result[i] = SkAlphaMulQ(SkPMSrcOver(tmp[i], result[i]),
+ scale);
+ }
+ }
+
+ result += n;
+ x += n;
+ count -= n;
+ } while (count > 0);
+ } else { // use mode for the composition
+ do {
+ int n = count;
+ if (n > TMP_COLOR_COUNT) {
+ n = TMP_COLOR_COUNT;
+ }
+
+ shaderContextA->shadeSpan(x, y, result, n);
+ shaderContextB->shadeSpan(x, y, tmp, n);
+ mode->xfer32(result, tmp, n, nullptr);
+
+ if (256 != scale) {
+ for (int i = 0; i < n; i++) {
+ result[i] = SkAlphaMulQ(result[i], scale);
+ }
+ }
+
+ result += n;
+ x += n;
+ count -= n;
+ } while (count > 0);
+ }
+}
+
+#if SK_SUPPORT_GPU
+
+#include "effects/GrConstColorProcessor.h"
+#include "effects/GrXfermodeFragmentProcessor.h"
+
+/////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> SkComposeShader::asFragmentProcessor(const AsFPArgs& args) const {
+ // Fragment processor will only support SkXfermode::Mode modes currently.
+ SkXfermode::Mode mode;
+ if (!(SkXfermode::AsMode(fMode, &mode))) {
+ return nullptr;
+ }
+
+ switch (mode) {
+ case SkXfermode::kClear_Mode:
+ return GrConstColorProcessor::Make(GrColor_TRANSPARENT_BLACK,
+ GrConstColorProcessor::kIgnore_InputMode);
+ break;
+ case SkXfermode::kSrc_Mode:
+ return fShaderB->asFragmentProcessor(args);
+ break;
+ case SkXfermode::kDst_Mode:
+ return fShaderA->asFragmentProcessor(args);
+ break;
+ default:
+ sk_sp<GrFragmentProcessor> fpA(fShaderA->asFragmentProcessor(args));
+ if (!fpA) {
+ return nullptr;
+ }
+ sk_sp<GrFragmentProcessor> fpB(fShaderB->asFragmentProcessor(args));
+ if (!fpB) {
+ return nullptr;
+ }
+ return GrXfermodeFragmentProcessor::MakeFromTwoProcessors(std::move(fpB),
+ std::move(fpA), mode);
+ }
+}
+#endif
+
+#ifndef SK_IGNORE_TO_STRING
+void SkComposeShader::toString(SkString* str) const {
+ str->append("SkComposeShader: (");
+
+ str->append("ShaderA: ");
+ fShaderA->toString(str);
+ str->append(" ShaderB: ");
+ fShaderB->toString(str);
+ if (fMode) {
+ str->append(" Xfermode: ");
+ fMode->toString(str);
+ }
+
+ this->INHERITED::toString(str);
+
+ str->append(")");
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkShader> SkShader::MakeComposeShader(sk_sp<SkShader> dst, sk_sp<SkShader> src,
+ sk_sp<SkXfermode> xfer) {
+ if (!dst || !src) {
+ return nullptr;
+ }
+ return sk_make_sp<SkComposeShader>(std::move(dst), std::move(src), std::move(xfer));
+}
+
+sk_sp<SkShader> SkShader::MakeComposeShader(sk_sp<SkShader> dst, sk_sp<SkShader> src,
+ SkXfermode::Mode mode) {
+ return MakeComposeShader(std::move(dst), std::move(src), SkXfermode::Make(mode));
+}
diff --git a/gfx/skia/skia/src/core/SkComposeShader.h b/gfx/skia/skia/src/core/SkComposeShader.h
new file mode 100644
index 000000000..d1b095ec3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkComposeShader.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkComposeShader_DEFINED
+#define SkComposeShader_DEFINED
+
+#include "SkShader.h"
+#include "SkXfermode.h"
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+/** \class SkComposeShader
+ This subclass of shader returns the composition of two other shaders, combined by
+ a xfermode.
+*/
+class SK_API SkComposeShader : public SkShader {
+public:
+ /** Create a new compose shader, given shaders A, B, and a combining xfermode mode.
+ When the xfermode is called, it will be given the result from shader A as its
+ "dst", and the result from shader B as its "src".
+ mode->xfer32(sA_result, sB_result, ...)
+ @param shaderA The colors from this shader are seen as the "dst" by the xfermode
+ @param shaderB The colors from this shader are seen as the "src" by the xfermode
+ @param mode The xfermode that combines the colors from the two shaders. If mode
+ is null, then SRC_OVER is assumed.
+ */
+ SkComposeShader(sk_sp<SkShader> sA, sk_sp<SkShader> sB, sk_sp<SkXfermode> mode)
+ : fShaderA(std::move(sA))
+ , fShaderB(std::move(sB))
+ , fMode(std::move(mode))
+ {}
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(const AsFPArgs&) const override;
+#endif
+
+ class ComposeShaderContext : public SkShader::Context {
+ public:
+ // When this object gets destroyed, it will call contextA and contextB's destructor
+ // but it will NOT free the memory.
+ ComposeShaderContext(const SkComposeShader&, const ContextRec&,
+ SkShader::Context* contextA, SkShader::Context* contextB);
+
+ SkShader::Context* getShaderContextA() const { return fShaderContextA; }
+ SkShader::Context* getShaderContextB() const { return fShaderContextB; }
+
+ virtual ~ComposeShaderContext();
+
+ void shadeSpan(int x, int y, SkPMColor[], int count) override;
+
+ private:
+ SkShader::Context* fShaderContextA;
+ SkShader::Context* fShaderContextB;
+
+ typedef SkShader::Context INHERITED;
+ };
+
+#ifdef SK_DEBUG
+ SkShader* getShaderA() { return fShaderA.get(); }
+ SkShader* getShaderB() { return fShaderB.get(); }
+#endif
+
+ bool asACompose(ComposeRec* rec) const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkComposeShader)
+
+protected:
+ SkComposeShader(SkReadBuffer&);
+ void flatten(SkWriteBuffer&) const override;
+ size_t onContextSize(const ContextRec&) const override;
+ Context* onCreateContext(const ContextRec&, void*) const override;
+
+private:
+ sk_sp<SkShader> fShaderA;
+ sk_sp<SkShader> fShaderB;
+ sk_sp<SkXfermode> fMode;
+
+ typedef SkShader INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkConfig8888.cpp b/gfx/skia/skia/src/core/SkConfig8888.cpp
new file mode 100644
index 000000000..7c3f0214e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkConfig8888.cpp
@@ -0,0 +1,369 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmap.h"
+#include "SkCanvas.h"
+#include "SkConfig8888.h"
+#include "SkColorPriv.h"
+#include "SkDither.h"
+#include "SkMathPriv.h"
+#include "SkUnPreMultiply.h"
+
+enum AlphaVerb {
+ kNothing_AlphaVerb,
+ kPremul_AlphaVerb,
+ kUnpremul_AlphaVerb,
+};
+
+template <bool doSwapRB, AlphaVerb doAlpha> uint32_t convert32(uint32_t c) {
+ if (doSwapRB) {
+ c = SkSwizzle_RB(c);
+ }
+
+ // Lucky for us, in both RGBA and BGRA, the alpha component is always in the same place, so
+ // we can perform premul or unpremul the same way without knowing the swizzles for RGB.
+ switch (doAlpha) {
+ case kNothing_AlphaVerb:
+ // no change
+ break;
+ case kPremul_AlphaVerb:
+ c = SkPreMultiplyARGB(SkGetPackedA32(c), SkGetPackedR32(c),
+ SkGetPackedG32(c), SkGetPackedB32(c));
+ break;
+ case kUnpremul_AlphaVerb:
+ c = SkUnPreMultiply::UnPreMultiplyPreservingByteOrder(c);
+ break;
+ }
+ return c;
+}
+
+template <bool doSwapRB, AlphaVerb doAlpha>
+void convert32_row(uint32_t* dst, const uint32_t* src, int count) {
+ // This has to be correct if src == dst (but not partial overlap)
+ for (int i = 0; i < count; ++i) {
+ dst[i] = convert32<doSwapRB, doAlpha>(src[i]);
+ }
+}
+
+static bool is_32bit_colortype(SkColorType ct) {
+ return kRGBA_8888_SkColorType == ct || kBGRA_8888_SkColorType == ct;
+}
+
+static AlphaVerb compute_AlphaVerb(SkAlphaType src, SkAlphaType dst) {
+ SkASSERT(kUnknown_SkAlphaType != src);
+ SkASSERT(kUnknown_SkAlphaType != dst);
+
+ if (kOpaque_SkAlphaType == src || kOpaque_SkAlphaType == dst || src == dst) {
+ return kNothing_AlphaVerb;
+ }
+ if (kPremul_SkAlphaType == dst) {
+ SkASSERT(kUnpremul_SkAlphaType == src);
+ return kPremul_AlphaVerb;
+ } else {
+ SkASSERT(kPremul_SkAlphaType == src);
+ SkASSERT(kUnpremul_SkAlphaType == dst);
+ return kUnpremul_AlphaVerb;
+ }
+}
+
+static void memcpy32_row(uint32_t* dst, const uint32_t* src, int count) {
+ memcpy(dst, src, count * 4);
+}
+
+bool SkSrcPixelInfo::convertPixelsTo(SkDstPixelInfo* dst, int width, int height) const {
+ if (width <= 0 || height <= 0) {
+ return false;
+ }
+
+ if (!is_32bit_colortype(fColorType) || !is_32bit_colortype(dst->fColorType)) {
+ return false;
+ }
+
+ void (*proc)(uint32_t* dst, const uint32_t* src, int count);
+ AlphaVerb doAlpha = compute_AlphaVerb(fAlphaType, dst->fAlphaType);
+ bool doSwapRB = fColorType != dst->fColorType;
+
+ switch (doAlpha) {
+ case kNothing_AlphaVerb:
+ if (doSwapRB) {
+ proc = convert32_row<true, kNothing_AlphaVerb>;
+ } else {
+ if (fPixels == dst->fPixels) {
+ return true;
+ }
+ proc = memcpy32_row;
+ }
+ break;
+ case kPremul_AlphaVerb:
+ if (doSwapRB) {
+ proc = convert32_row<true, kPremul_AlphaVerb>;
+ } else {
+ proc = convert32_row<false, kPremul_AlphaVerb>;
+ }
+ break;
+ case kUnpremul_AlphaVerb:
+ if (doSwapRB) {
+ proc = convert32_row<true, kUnpremul_AlphaVerb>;
+ } else {
+ proc = convert32_row<false, kUnpremul_AlphaVerb>;
+ }
+ break;
+ }
+
+ uint32_t* dstP = static_cast<uint32_t*>(dst->fPixels);
+ const uint32_t* srcP = static_cast<const uint32_t*>(fPixels);
+ size_t srcInc = fRowBytes >> 2;
+ size_t dstInc = dst->fRowBytes >> 2;
+ for (int y = 0; y < height; ++y) {
+ proc(dstP, srcP, width);
+ dstP += dstInc;
+ srcP += srcInc;
+ }
+ return true;
+}
+
+static void copy_g8_to_32(void* dst, size_t dstRB, const void* src, size_t srcRB, int w, int h) {
+ uint32_t* dst32 = (uint32_t*)dst;
+ const uint8_t* src8 = (const uint8_t*)src;
+
+ for (int y = 0; y < h; ++y) {
+ for (int x = 0; x < w; ++x) {
+ dst32[x] = SkPackARGB32(0xFF, src8[x], src8[x], src8[x]);
+ }
+ dst32 = (uint32_t*)((char*)dst32 + dstRB);
+ src8 += srcRB;
+ }
+}
+
+static void copy_32_to_g8(void* dst, size_t dstRB, const void* src, size_t srcRB,
+ const SkImageInfo& srcInfo) {
+ uint8_t* dst8 = (uint8_t*)dst;
+ const uint32_t* src32 = (const uint32_t*)src;
+
+ const int w = srcInfo.width();
+ const int h = srcInfo.height();
+ const bool isBGRA = (kBGRA_8888_SkColorType == srcInfo.colorType());
+
+ for (int y = 0; y < h; ++y) {
+ if (isBGRA) {
+ // BGRA
+ for (int x = 0; x < w; ++x) {
+ uint32_t s = src32[x];
+ dst8[x] = SkComputeLuminance((s >> 16) & 0xFF, (s >> 8) & 0xFF, s & 0xFF);
+ }
+ } else {
+ // RGBA
+ for (int x = 0; x < w; ++x) {
+ uint32_t s = src32[x];
+ dst8[x] = SkComputeLuminance(s & 0xFF, (s >> 8) & 0xFF, (s >> 16) & 0xFF);
+ }
+ }
+ src32 = (const uint32_t*)((const char*)src32 + srcRB);
+ dst8 += dstRB;
+ }
+}
+
+static bool extract_alpha(void* dst, size_t dstRB, const void* src, size_t srcRB,
+ const SkImageInfo& srcInfo, SkColorTable* ctable) {
+ uint8_t* SK_RESTRICT dst8 = (uint8_t*)dst;
+
+ const int w = srcInfo.width();
+ const int h = srcInfo.height();
+ if (srcInfo.isOpaque()) {
+ // src is opaque, so just fill alpha with 0xFF
+ for (int y = 0; y < h; ++y) {
+ memset(dst8, 0xFF, w);
+ dst8 += dstRB;
+ }
+ return true;
+ }
+ switch (srcInfo.colorType()) {
+ case kN32_SkColorType: {
+ const SkPMColor* SK_RESTRICT src32 = (const SkPMColor*)src;
+ for (int y = 0; y < h; ++y) {
+ for (int x = 0; x < w; ++x) {
+ dst8[x] = SkGetPackedA32(src32[x]);
+ }
+ dst8 += dstRB;
+ src32 = (const SkPMColor*)((const char*)src32 + srcRB);
+ }
+ break;
+ }
+ case kARGB_4444_SkColorType: {
+ const SkPMColor16* SK_RESTRICT src16 = (const SkPMColor16*)src;
+ for (int y = 0; y < h; ++y) {
+ for (int x = 0; x < w; ++x) {
+ dst8[x] = SkPacked4444ToA32(src16[x]);
+ }
+ dst8 += dstRB;
+ src16 = (const SkPMColor16*)((const char*)src16 + srcRB);
+ }
+ break;
+ }
+ case kIndex_8_SkColorType: {
+ if (nullptr == ctable) {
+ return false;
+ }
+ const SkPMColor* SK_RESTRICT table = ctable->readColors();
+ const uint8_t* SK_RESTRICT src8 = (const uint8_t*)src;
+ for (int y = 0; y < h; ++y) {
+ for (int x = 0; x < w; ++x) {
+ dst8[x] = SkGetPackedA32(table[src8[x]]);
+ }
+ dst8 += dstRB;
+ src8 += srcRB;
+ }
+ break;
+ }
+ default:
+ return false;
+ }
+ return true;
+}
+
+bool SkPixelInfo::CopyPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRB,
+ const SkImageInfo& srcInfo, const void* srcPixels, size_t srcRB,
+ SkColorTable* ctable) {
+ if (srcInfo.dimensions() != dstInfo.dimensions()) {
+ return false;
+ }
+
+ const int width = srcInfo.width();
+ const int height = srcInfo.height();
+
+ // Do the easiest one first : both configs are equal
+ if ((srcInfo == dstInfo) && !ctable) {
+ size_t bytes = width * srcInfo.bytesPerPixel();
+ for (int y = 0; y < height; ++y) {
+ memcpy(dstPixels, srcPixels, bytes);
+ srcPixels = (const char*)srcPixels + srcRB;
+ dstPixels = (char*)dstPixels + dstRB;
+ }
+ return true;
+ }
+
+ // Handle fancy alpha swizzling if both are ARGB32
+ if (4 == srcInfo.bytesPerPixel() && 4 == dstInfo.bytesPerPixel()) {
+ SkDstPixelInfo dstPI;
+ dstPI.fColorType = dstInfo.colorType();
+ dstPI.fAlphaType = dstInfo.alphaType();
+ dstPI.fPixels = dstPixels;
+ dstPI.fRowBytes = dstRB;
+
+ SkSrcPixelInfo srcPI;
+ srcPI.fColorType = srcInfo.colorType();
+ srcPI.fAlphaType = srcInfo.alphaType();
+ srcPI.fPixels = srcPixels;
+ srcPI.fRowBytes = srcRB;
+
+ return srcPI.convertPixelsTo(&dstPI, width, height);
+ }
+
+ // If they agree on colorType and the alphaTypes are compatible, then we just memcpy.
+ // Note: we've already taken care of 32bit colortypes above.
+ if (srcInfo.colorType() == dstInfo.colorType()) {
+ switch (srcInfo.colorType()) {
+ case kRGB_565_SkColorType:
+ case kAlpha_8_SkColorType:
+ case kGray_8_SkColorType:
+ break;
+ case kIndex_8_SkColorType:
+ case kARGB_4444_SkColorType:
+ case kRGBA_F16_SkColorType:
+ if (srcInfo.alphaType() != dstInfo.alphaType()) {
+ return false;
+ }
+ break;
+ default:
+ return false;
+ }
+ SkRectMemcpy(dstPixels, dstRB, srcPixels, srcRB, width * srcInfo.bytesPerPixel(), height);
+ return true;
+ }
+
+ /*
+ * Begin section where we try to change colorTypes along the way. Not all combinations
+ * are supported.
+ */
+
+ if (kGray_8_SkColorType == srcInfo.colorType() && 4 == dstInfo.bytesPerPixel()) {
+ copy_g8_to_32(dstPixels, dstRB, srcPixels, srcRB, width, height);
+ return true;
+ }
+ if (kGray_8_SkColorType == dstInfo.colorType() && 4 == srcInfo.bytesPerPixel()) {
+ copy_32_to_g8(dstPixels, dstRB, srcPixels, srcRB, srcInfo);
+ return true;
+ }
+
+ if (kAlpha_8_SkColorType == dstInfo.colorType() &&
+ extract_alpha(dstPixels, dstRB, srcPixels, srcRB, srcInfo, ctable)) {
+ return true;
+ }
+
+ // Can no longer draw directly into 4444, but we can manually whack it for a few combinations
+ if (kARGB_4444_SkColorType == dstInfo.colorType() &&
+ (kN32_SkColorType == srcInfo.colorType() || kIndex_8_SkColorType == srcInfo.colorType())) {
+ if (srcInfo.alphaType() == kUnpremul_SkAlphaType) {
+ // Our method for converting to 4444 assumes premultiplied.
+ return false;
+ }
+
+ const SkPMColor* table = nullptr;
+ if (kIndex_8_SkColorType == srcInfo.colorType()) {
+ if (nullptr == ctable) {
+ return false;
+ }
+ table = ctable->readColors();
+ }
+
+ for (int y = 0; y < height; ++y) {
+ DITHER_4444_SCAN(y);
+ SkPMColor16* SK_RESTRICT dstRow = (SkPMColor16*)dstPixels;
+ if (table) {
+ const uint8_t* SK_RESTRICT srcRow = (const uint8_t*)srcPixels;
+ for (int x = 0; x < width; ++x) {
+ dstRow[x] = SkDitherARGB32To4444(table[srcRow[x]], DITHER_VALUE(x));
+ }
+ } else {
+ const SkPMColor* SK_RESTRICT srcRow = (const SkPMColor*)srcPixels;
+ for (int x = 0; x < width; ++x) {
+ dstRow[x] = SkDitherARGB32To4444(srcRow[x], DITHER_VALUE(x));
+ }
+ }
+ dstPixels = (char*)dstPixels + dstRB;
+ srcPixels = (const char*)srcPixels + srcRB;
+ }
+ return true;
+ }
+
+ if (dstInfo.alphaType() == kUnpremul_SkAlphaType) {
+ // We do not support drawing to unpremultiplied bitmaps.
+ return false;
+ }
+
+ // Final fall-back, draw with a canvas
+ //
+ // Always clear the dest in case one of the blitters accesses it
+ // TODO: switch the allocation of tmpDst to call sk_calloc_throw
+ {
+ SkBitmap bm;
+ if (!bm.installPixels(srcInfo, const_cast<void*>(srcPixels), srcRB, ctable, nullptr, nullptr)) {
+ return false;
+ }
+ SkAutoTUnref<SkCanvas> canvas(SkCanvas::NewRasterDirect(dstInfo, dstPixels, dstRB));
+ if (nullptr == canvas.get()) {
+ return false;
+ }
+
+ SkPaint paint;
+ paint.setDither(true);
+
+ canvas->clear(0);
+ canvas->drawBitmap(bm, 0, 0, &paint);
+ return true;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkConfig8888.h b/gfx/skia/skia/src/core/SkConfig8888.h
new file mode 100644
index 000000000..ff287267a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkConfig8888.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPixelInfo_DEFINED
+#define SkPixelInfo_DEFINED
+
+#include "SkImageInfo.h"
+
+class SkColorTable;
+
+struct SkPixelInfo {
+ SkColorType fColorType;
+ SkAlphaType fAlphaType;
+ size_t fRowBytes;
+
+ static bool CopyPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ const SkImageInfo& srcInfo, const void* srcPixels, size_t srcRowBytes,
+ SkColorTable* srcCTable = nullptr);
+};
+
+struct SkDstPixelInfo : SkPixelInfo {
+ void* fPixels;
+};
+
+struct SkSrcPixelInfo : SkPixelInfo {
+ const void* fPixels;
+
+ // Guaranteed to work even if src.fPixels and dst.fPixels are the same
+ // (but not if they overlap partially)
+ bool convertPixelsTo(SkDstPixelInfo* dst, int width, int height) const;
+};
+
+static inline void SkRectMemcpy(void* dst, size_t dstRB, const void* src, size_t srcRB,
+ size_t bytesPerRow, int rowCount) {
+ SkASSERT(bytesPerRow <= srcRB);
+ SkASSERT(bytesPerRow <= dstRB);
+ for (int i = 0; i < rowCount; ++i) {
+ memcpy(dst, src, bytesPerRow);
+ dst = (char*)dst + dstRB;
+ src = (const char*)src + srcRB;
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkConvolver.cpp b/gfx/skia/skia/src/core/SkConvolver.cpp
new file mode 100644
index 000000000..c662e2dda
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkConvolver.cpp
@@ -0,0 +1,486 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "SkConvolver.h"
+#include "SkTArray.h"
+
+namespace {
+
+ // Converts the argument to an 8-bit unsigned value by clamping to the range
+ // 0-255.
+ inline unsigned char ClampTo8(int a) {
+ if (static_cast<unsigned>(a) < 256) {
+ return a; // Avoid the extra check in the common case.
+ }
+ if (a < 0) {
+ return 0;
+ }
+ return 255;
+ }
+
+ // Stores a list of rows in a circular buffer. The usage is you write into it
+ // by calling AdvanceRow. It will keep track of which row in the buffer it
+ // should use next, and the total number of rows added.
+ class CircularRowBuffer {
+ public:
+ // The number of pixels in each row is given in |sourceRowPixelWidth|.
+ // The maximum number of rows needed in the buffer is |maxYFilterSize|
+ // (we only need to store enough rows for the biggest filter).
+ //
+ // We use the |firstInputRow| to compute the coordinates of all of the
+ // following rows returned by Advance().
+ CircularRowBuffer(int destRowPixelWidth, int maxYFilterSize,
+ int firstInputRow)
+ : fRowByteWidth(destRowPixelWidth * 4),
+ fNumRows(maxYFilterSize),
+ fNextRow(0),
+ fNextRowCoordinate(firstInputRow) {
+ fBuffer.reset(fRowByteWidth * maxYFilterSize);
+ fRowAddresses.reset(fNumRows);
+ }
+
+ // Moves to the next row in the buffer, returning a pointer to the beginning
+ // of it.
+ unsigned char* advanceRow() {
+ unsigned char* row = &fBuffer[fNextRow * fRowByteWidth];
+ fNextRowCoordinate++;
+
+ // Set the pointer to the next row to use, wrapping around if necessary.
+ fNextRow++;
+ if (fNextRow == fNumRows) {
+ fNextRow = 0;
+ }
+ return row;
+ }
+
+ // Returns a pointer to an "unrolled" array of rows. These rows will start
+ // at the y coordinate placed into |*firstRowIndex| and will continue in
+ // order for the maximum number of rows in this circular buffer.
+ //
+ // The |firstRowIndex_| may be negative. This means the circular buffer
+ // starts before the top of the image (it hasn't been filled yet).
+ unsigned char* const* GetRowAddresses(int* firstRowIndex) {
+ // Example for a 4-element circular buffer holding coords 6-9.
+ // Row 0 Coord 8
+ // Row 1 Coord 9
+ // Row 2 Coord 6 <- fNextRow = 2, fNextRowCoordinate = 10.
+ // Row 3 Coord 7
+ //
+ // The "next" row is also the first (lowest) coordinate. This computation
+ // may yield a negative value, but that's OK, the math will work out
+ // since the user of this buffer will compute the offset relative
+ // to the firstRowIndex and the negative rows will never be used.
+ *firstRowIndex = fNextRowCoordinate - fNumRows;
+
+ int curRow = fNextRow;
+ for (int i = 0; i < fNumRows; i++) {
+ fRowAddresses[i] = &fBuffer[curRow * fRowByteWidth];
+
+ // Advance to the next row, wrapping if necessary.
+ curRow++;
+ if (curRow == fNumRows) {
+ curRow = 0;
+ }
+ }
+ return &fRowAddresses[0];
+ }
+
+ private:
+ // The buffer storing the rows. They are packed, each one fRowByteWidth.
+ SkTArray<unsigned char> fBuffer;
+
+ // Number of bytes per row in the |buffer|.
+ int fRowByteWidth;
+
+ // The number of rows available in the buffer.
+ int fNumRows;
+
+ // The next row index we should write into. This wraps around as the
+ // circular buffer is used.
+ int fNextRow;
+
+ // The y coordinate of the |fNextRow|. This is incremented each time a
+ // new row is appended and does not wrap.
+ int fNextRowCoordinate;
+
+ // Buffer used by GetRowAddresses().
+ SkTArray<unsigned char*> fRowAddresses;
+ };
+
+// Convolves horizontally along a single row. The row data is given in
+// |srcData| and continues for the numValues() of the filter.
+template<bool hasAlpha>
+ void ConvolveHorizontally(const unsigned char* srcData,
+ const SkConvolutionFilter1D& filter,
+ unsigned char* outRow) {
+ // Loop over each pixel on this row in the output image.
+ int numValues = filter.numValues();
+ for (int outX = 0; outX < numValues; outX++) {
+ // Get the filter that determines the current output pixel.
+ int filterOffset, filterLength;
+ const SkConvolutionFilter1D::ConvolutionFixed* filterValues =
+ filter.FilterForValue(outX, &filterOffset, &filterLength);
+
+ // Compute the first pixel in this row that the filter affects. It will
+ // touch |filterLength| pixels (4 bytes each) after this.
+ const unsigned char* rowToFilter = &srcData[filterOffset * 4];
+
+ // Apply the filter to the row to get the destination pixel in |accum|.
+ int accum[4] = {0};
+ for (int filterX = 0; filterX < filterLength; filterX++) {
+ SkConvolutionFilter1D::ConvolutionFixed curFilter = filterValues[filterX];
+ accum[0] += curFilter * rowToFilter[filterX * 4 + 0];
+ accum[1] += curFilter * rowToFilter[filterX * 4 + 1];
+ accum[2] += curFilter * rowToFilter[filterX * 4 + 2];
+ if (hasAlpha) {
+ accum[3] += curFilter * rowToFilter[filterX * 4 + 3];
+ }
+ }
+
+ // Bring this value back in range. All of the filter scaling factors
+ // are in fixed point with kShiftBits bits of fractional part.
+ accum[0] >>= SkConvolutionFilter1D::kShiftBits;
+ accum[1] >>= SkConvolutionFilter1D::kShiftBits;
+ accum[2] >>= SkConvolutionFilter1D::kShiftBits;
+ if (hasAlpha) {
+ accum[3] >>= SkConvolutionFilter1D::kShiftBits;
+ }
+
+ // Store the new pixel.
+ outRow[outX * 4 + 0] = ClampTo8(accum[0]);
+ outRow[outX * 4 + 1] = ClampTo8(accum[1]);
+ outRow[outX * 4 + 2] = ClampTo8(accum[2]);
+ if (hasAlpha) {
+ outRow[outX * 4 + 3] = ClampTo8(accum[3]);
+ }
+ }
+ }
+
+ // There's a bug somewhere here with GCC autovectorization (-ftree-vectorize). We originally
+ // thought this was 32 bit only, but subsequent tests show that some 64 bit gcc compiles
+ // suffer here too.
+ //
+ // Dropping to -O2 disables -ftree-vectorize. GCC 4.6 needs noinline. https://bug.skia.org/2575
+ #if SK_HAS_ATTRIBUTE(optimize) && defined(SK_RELEASE)
+ #define SK_MAYBE_DISABLE_VECTORIZATION __attribute__((optimize("O2"), noinline))
+ #else
+ #define SK_MAYBE_DISABLE_VECTORIZATION
+ #endif
+
+ SK_MAYBE_DISABLE_VECTORIZATION
+ static void ConvolveHorizontallyAlpha(const unsigned char* srcData,
+ const SkConvolutionFilter1D& filter,
+ unsigned char* outRow) {
+ return ConvolveHorizontally<true>(srcData, filter, outRow);
+ }
+
+ SK_MAYBE_DISABLE_VECTORIZATION
+ static void ConvolveHorizontallyNoAlpha(const unsigned char* srcData,
+ const SkConvolutionFilter1D& filter,
+ unsigned char* outRow) {
+ return ConvolveHorizontally<false>(srcData, filter, outRow);
+ }
+
+ #undef SK_MAYBE_DISABLE_VECTORIZATION
+
+
+// Does vertical convolution to produce one output row. The filter values and
+// length are given in the first two parameters. These are applied to each
+// of the rows pointed to in the |sourceDataRows| array, with each row
+// being |pixelWidth| wide.
+//
+// The output must have room for |pixelWidth * 4| bytes.
+template<bool hasAlpha>
+ void ConvolveVertically(const SkConvolutionFilter1D::ConvolutionFixed* filterValues,
+ int filterLength,
+ unsigned char* const* sourceDataRows,
+ int pixelWidth,
+ unsigned char* outRow) {
+ // We go through each column in the output and do a vertical convolution,
+ // generating one output pixel each time.
+ for (int outX = 0; outX < pixelWidth; outX++) {
+ // Compute the number of bytes over in each row that the current column
+ // we're convolving starts at. The pixel will cover the next 4 bytes.
+ int byteOffset = outX * 4;
+
+ // Apply the filter to one column of pixels.
+ int accum[4] = {0};
+ for (int filterY = 0; filterY < filterLength; filterY++) {
+ SkConvolutionFilter1D::ConvolutionFixed curFilter = filterValues[filterY];
+ accum[0] += curFilter * sourceDataRows[filterY][byteOffset + 0];
+ accum[1] += curFilter * sourceDataRows[filterY][byteOffset + 1];
+ accum[2] += curFilter * sourceDataRows[filterY][byteOffset + 2];
+ if (hasAlpha) {
+ accum[3] += curFilter * sourceDataRows[filterY][byteOffset + 3];
+ }
+ }
+
+ // Bring this value back in range. All of the filter scaling factors
+ // are in fixed point with kShiftBits bits of precision.
+ accum[0] >>= SkConvolutionFilter1D::kShiftBits;
+ accum[1] >>= SkConvolutionFilter1D::kShiftBits;
+ accum[2] >>= SkConvolutionFilter1D::kShiftBits;
+ if (hasAlpha) {
+ accum[3] >>= SkConvolutionFilter1D::kShiftBits;
+ }
+
+ // Store the new pixel.
+ outRow[byteOffset + 0] = ClampTo8(accum[0]);
+ outRow[byteOffset + 1] = ClampTo8(accum[1]);
+ outRow[byteOffset + 2] = ClampTo8(accum[2]);
+ if (hasAlpha) {
+ unsigned char alpha = ClampTo8(accum[3]);
+
+ // Make sure the alpha channel doesn't come out smaller than any of the
+ // color channels. We use premultipled alpha channels, so this should
+ // never happen, but rounding errors will cause this from time to time.
+ // These "impossible" colors will cause overflows (and hence random pixel
+ // values) when the resulting bitmap is drawn to the screen.
+ //
+ // We only need to do this when generating the final output row (here).
+ int maxColorChannel = SkTMax(outRow[byteOffset + 0],
+ SkTMax(outRow[byteOffset + 1],
+ outRow[byteOffset + 2]));
+ if (alpha < maxColorChannel) {
+ outRow[byteOffset + 3] = maxColorChannel;
+ } else {
+ outRow[byteOffset + 3] = alpha;
+ }
+ } else {
+ // No alpha channel, the image is opaque.
+ outRow[byteOffset + 3] = 0xff;
+ }
+ }
+ }
+
+ void ConvolveVertically(const SkConvolutionFilter1D::ConvolutionFixed* filterValues,
+ int filterLength,
+ unsigned char* const* sourceDataRows,
+ int pixelWidth,
+ unsigned char* outRow,
+ bool sourceHasAlpha) {
+ if (sourceHasAlpha) {
+ ConvolveVertically<true>(filterValues, filterLength,
+ sourceDataRows, pixelWidth,
+ outRow);
+ } else {
+ ConvolveVertically<false>(filterValues, filterLength,
+ sourceDataRows, pixelWidth,
+ outRow);
+ }
+ }
+
+} // namespace
+
+// SkConvolutionFilter1D ---------------------------------------------------------
+
+SkConvolutionFilter1D::SkConvolutionFilter1D()
+: fMaxFilter(0) {
+}
+
+SkConvolutionFilter1D::~SkConvolutionFilter1D() {
+}
+
+void SkConvolutionFilter1D::AddFilter(int filterOffset,
+ const ConvolutionFixed* filterValues,
+ int filterLength) {
+ // It is common for leading/trailing filter values to be zeros. In such
+ // cases it is beneficial to only store the central factors.
+ // For a scaling to 1/4th in each dimension using a Lanczos-2 filter on
+ // a 1080p image this optimization gives a ~10% speed improvement.
+ int filterSize = filterLength;
+ int firstNonZero = 0;
+ while (firstNonZero < filterLength && filterValues[firstNonZero] == 0) {
+ firstNonZero++;
+ }
+
+ if (firstNonZero < filterLength) {
+ // Here we have at least one non-zero factor.
+ int lastNonZero = filterLength - 1;
+ while (lastNonZero >= 0 && filterValues[lastNonZero] == 0) {
+ lastNonZero--;
+ }
+
+ filterOffset += firstNonZero;
+ filterLength = lastNonZero + 1 - firstNonZero;
+ SkASSERT(filterLength > 0);
+
+ fFilterValues.append(filterLength, &filterValues[firstNonZero]);
+ } else {
+ // Here all the factors were zeroes.
+ filterLength = 0;
+ }
+
+ FilterInstance instance;
+
+ // We pushed filterLength elements onto fFilterValues
+ instance.fDataLocation = (static_cast<int>(fFilterValues.count()) -
+ filterLength);
+ instance.fOffset = filterOffset;
+ instance.fTrimmedLength = filterLength;
+ instance.fLength = filterSize;
+ fFilters.push(instance);
+
+ fMaxFilter = SkTMax(fMaxFilter, filterLength);
+}
+
+const SkConvolutionFilter1D::ConvolutionFixed* SkConvolutionFilter1D::GetSingleFilter(
+ int* specifiedFilterlength,
+ int* filterOffset,
+ int* filterLength) const {
+ const FilterInstance& filter = fFilters[0];
+ *filterOffset = filter.fOffset;
+ *filterLength = filter.fTrimmedLength;
+ *specifiedFilterlength = filter.fLength;
+ if (filter.fTrimmedLength == 0) {
+ return nullptr;
+ }
+
+ return &fFilterValues[filter.fDataLocation];
+}
+
+bool BGRAConvolve2D(const unsigned char* sourceData,
+ int sourceByteRowStride,
+ bool sourceHasAlpha,
+ const SkConvolutionFilter1D& filterX,
+ const SkConvolutionFilter1D& filterY,
+ int outputByteRowStride,
+ unsigned char* output,
+ const SkConvolutionProcs& convolveProcs,
+ bool useSimdIfPossible) {
+
+ int maxYFilterSize = filterY.maxFilter();
+
+ // The next row in the input that we will generate a horizontally
+ // convolved row for. If the filter doesn't start at the beginning of the
+ // image (this is the case when we are only resizing a subset), then we
+ // don't want to generate any output rows before that. Compute the starting
+ // row for convolution as the first pixel for the first vertical filter.
+ int filterOffset, filterLength;
+ const SkConvolutionFilter1D::ConvolutionFixed* filterValues =
+ filterY.FilterForValue(0, &filterOffset, &filterLength);
+ int nextXRow = filterOffset;
+
+ // We loop over each row in the input doing a horizontal convolution. This
+ // will result in a horizontally convolved image. We write the results into
+ // a circular buffer of convolved rows and do vertical convolution as rows
+ // are available. This prevents us from having to store the entire
+ // intermediate image and helps cache coherency.
+ // We will need four extra rows to allow horizontal convolution could be done
+ // simultaneously. We also pad each row in row buffer to be aligned-up to
+ // 16 bytes.
+ // TODO(jiesun): We do not use aligned load from row buffer in vertical
+ // convolution pass yet. Somehow Windows does not like it.
+ int rowBufferWidth = (filterX.numValues() + 15) & ~0xF;
+ int rowBufferHeight = maxYFilterSize +
+ (convolveProcs.fConvolve4RowsHorizontally ? 4 : 0);
+
+ // check for too-big allocation requests : crbug.com/528628
+ {
+ int64_t size = sk_64_mul(rowBufferWidth, rowBufferHeight);
+ // need some limit, to avoid over-committing success from malloc, but then
+ // crashing when we try to actually use the memory.
+ // 100meg seems big enough to allow "normal" zoom factors and image sizes through
+ // while avoiding the crash seen by the bug (crbug.com/528628)
+ if (size > 100 * 1024 * 1024) {
+// SkDebugf("BGRAConvolve2D: tmp allocation [%lld] too big\n", size);
+ return false;
+ }
+ }
+
+ CircularRowBuffer rowBuffer(rowBufferWidth,
+ rowBufferHeight,
+ filterOffset);
+
+ // Loop over every possible output row, processing just enough horizontal
+ // convolutions to run each subsequent vertical convolution.
+ SkASSERT(outputByteRowStride >= filterX.numValues() * 4);
+ int numOutputRows = filterY.numValues();
+
+ // We need to check which is the last line to convolve before we advance 4
+ // lines in one iteration.
+ int lastFilterOffset, lastFilterLength;
+
+ // SSE2 can access up to 3 extra pixels past the end of the
+ // buffer. At the bottom of the image, we have to be careful
+ // not to access data past the end of the buffer. Normally
+ // we fall back to the C++ implementation for the last row.
+ // If the last row is less than 3 pixels wide, we may have to fall
+ // back to the C++ version for more rows. Compute how many
+ // rows we need to avoid the SSE implementation for here.
+ filterX.FilterForValue(filterX.numValues() - 1, &lastFilterOffset,
+ &lastFilterLength);
+ int avoidSimdRows = 1 + convolveProcs.fExtraHorizontalReads /
+ (lastFilterOffset + lastFilterLength);
+
+ filterY.FilterForValue(numOutputRows - 1, &lastFilterOffset,
+ &lastFilterLength);
+
+ for (int outY = 0; outY < numOutputRows; outY++) {
+ filterValues = filterY.FilterForValue(outY,
+ &filterOffset, &filterLength);
+
+ // Generate output rows until we have enough to run the current filter.
+ while (nextXRow < filterOffset + filterLength) {
+ if (convolveProcs.fConvolve4RowsHorizontally &&
+ nextXRow + 3 < lastFilterOffset + lastFilterLength -
+ avoidSimdRows) {
+ const unsigned char* src[4];
+ unsigned char* outRow[4];
+ for (int i = 0; i < 4; ++i) {
+ src[i] = &sourceData[(uint64_t)(nextXRow + i) * sourceByteRowStride];
+ outRow[i] = rowBuffer.advanceRow();
+ }
+ convolveProcs.fConvolve4RowsHorizontally(src, filterX, outRow, 4*rowBufferWidth);
+ nextXRow += 4;
+ } else {
+ // Check if we need to avoid SSE2 for this row.
+ if (convolveProcs.fConvolveHorizontally &&
+ nextXRow < lastFilterOffset + lastFilterLength -
+ avoidSimdRows) {
+ convolveProcs.fConvolveHorizontally(
+ &sourceData[(uint64_t)nextXRow * sourceByteRowStride],
+ filterX, rowBuffer.advanceRow(), sourceHasAlpha);
+ } else {
+ if (sourceHasAlpha) {
+ ConvolveHorizontallyAlpha(
+ &sourceData[(uint64_t)nextXRow * sourceByteRowStride],
+ filterX, rowBuffer.advanceRow());
+ } else {
+ ConvolveHorizontallyNoAlpha(
+ &sourceData[(uint64_t)nextXRow * sourceByteRowStride],
+ filterX, rowBuffer.advanceRow());
+ }
+ }
+ nextXRow++;
+ }
+ }
+
+ // Compute where in the output image this row of final data will go.
+ unsigned char* curOutputRow = &output[(uint64_t)outY * outputByteRowStride];
+
+ // Get the list of rows that the circular buffer has, in order.
+ int firstRowInCircularBuffer;
+ unsigned char* const* rowsToConvolve =
+ rowBuffer.GetRowAddresses(&firstRowInCircularBuffer);
+
+ // Now compute the start of the subset of those rows that the filter
+ // needs.
+ unsigned char* const* firstRowForFilter =
+ &rowsToConvolve[filterOffset - firstRowInCircularBuffer];
+
+ if (convolveProcs.fConvolveVertically) {
+ convolveProcs.fConvolveVertically(filterValues, filterLength,
+ firstRowForFilter,
+ filterX.numValues(), curOutputRow,
+ sourceHasAlpha);
+ } else {
+ ConvolveVertically(filterValues, filterLength,
+ firstRowForFilter,
+ filterX.numValues(), curOutputRow,
+ sourceHasAlpha);
+ }
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkConvolver.h b/gfx/skia/skia/src/core/SkConvolver.h
new file mode 100644
index 000000000..4e23f6cc1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkConvolver.h
@@ -0,0 +1,207 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SK_CONVOLVER_H
+#define SK_CONVOLVER_H
+
+#include "SkSize.h"
+#include "SkTDArray.h"
+
+// avoid confusion with Mac OS X's math library (Carbon)
+#if defined(__APPLE__)
+#undef FloatToConvolutionFixed
+#undef ConvolutionFixedToFloat
+#undef FloatToFixed
+#undef FixedToFloat
+#endif
+
+// Represents a filter in one dimension. Each output pixel has one entry in this
+// object for the filter values contributing to it. You build up the filter
+// list by calling AddFilter for each output pixel (in order).
+//
+// We do 2-dimensional convolution by first convolving each row by one
+// SkConvolutionFilter1D, then convolving each column by another one.
+//
+// Entries are stored in ConvolutionFixed point, shifted left by kShiftBits.
+class SkConvolutionFilter1D {
+public:
+ typedef short ConvolutionFixed;
+
+ // The number of bits that ConvolutionFixed point values are shifted by.
+ enum { kShiftBits = 14 };
+
+ SK_API SkConvolutionFilter1D();
+ SK_API ~SkConvolutionFilter1D();
+
+ // Convert between floating point and our ConvolutionFixed point representation.
+ static ConvolutionFixed FloatToFixed(float f) {
+ return static_cast<ConvolutionFixed>(f * (1 << kShiftBits));
+ }
+ static unsigned char FixedToChar(ConvolutionFixed x) {
+ return static_cast<unsigned char>(x >> kShiftBits);
+ }
+ static float FixedToFloat(ConvolutionFixed x) {
+ // The cast relies on ConvolutionFixed being a short, implying that on
+ // the platforms we care about all (16) bits will fit into
+ // the mantissa of a (32-bit) float.
+ static_assert(sizeof(ConvolutionFixed) == 2, "ConvolutionFixed_type_should_fit_in_float_mantissa");
+ float raw = static_cast<float>(x);
+ return ldexpf(raw, -kShiftBits);
+ }
+
+ // Returns the maximum pixel span of a filter.
+ int maxFilter() const { return fMaxFilter; }
+
+ // Returns the number of filters in this filter. This is the dimension of the
+ // output image.
+ int numValues() const { return static_cast<int>(fFilters.count()); }
+
+ void reserveAdditional(int filterCount, int filterValueCount) {
+ fFilters.setReserve(fFilters.count() + filterCount);
+ fFilterValues.setReserve(fFilterValues.count() + filterValueCount);
+ }
+
+ // Appends the given list of scaling values for generating a given output
+ // pixel. |filterOffset| is the distance from the edge of the image to where
+ // the scaling factors start. The scaling factors apply to the source pixels
+ // starting from this position, and going for the next |filterLength| pixels.
+ //
+ // You will probably want to make sure your input is normalized (that is,
+ // all entries in |filterValuesg| sub to one) to prevent affecting the overall
+ // brighness of the image.
+ //
+ // The filterLength must be > 0.
+ void AddFilter(int filterOffset,
+ const ConvolutionFixed* filterValues,
+ int filterLength);
+
+ // Retrieves a filter for the given |valueOffset|, a position in the output
+ // image in the direction we're convolving. The offset and length of the
+ // filter values are put into the corresponding out arguments (see AddFilter
+ // above for what these mean), and a pointer to the first scaling factor is
+ // returned. There will be |filterLength| values in this array.
+ inline const ConvolutionFixed* FilterForValue(int valueOffset,
+ int* filterOffset,
+ int* filterLength) const {
+ const FilterInstance& filter = fFilters[valueOffset];
+ *filterOffset = filter.fOffset;
+ *filterLength = filter.fTrimmedLength;
+ if (filter.fTrimmedLength == 0) {
+ return nullptr;
+ }
+ return &fFilterValues[filter.fDataLocation];
+ }
+
+ // Retrieves the filter for the offset 0, presumed to be the one and only.
+ // The offset and length of the filter values are put into the corresponding
+ // out arguments (see AddFilter). Note that |filterLegth| and
+ // |specifiedFilterLength| may be different if leading/trailing zeros of the
+ // original floating point form were clipped.
+ // There will be |filterLength| values in the return array.
+ // Returns nullptr if the filter is 0-length (for instance when all floating
+ // point values passed to AddFilter were clipped to 0).
+ SK_API const ConvolutionFixed* GetSingleFilter(int* specifiedFilterLength,
+ int* filterOffset,
+ int* filterLength) const;
+
+ // Add another value to the fFilterValues array -- useful for
+ // SIMD padding which happens outside of this class.
+
+ void addFilterValue( ConvolutionFixed val ) {
+ fFilterValues.push( val );
+ }
+private:
+ struct FilterInstance {
+ // Offset within filterValues for this instance of the filter.
+ int fDataLocation;
+
+ // Distance from the left of the filter to the center. IN PIXELS
+ int fOffset;
+
+ // Number of values in this filter instance.
+ int fTrimmedLength;
+
+ // Filter length as specified. Note that this may be different from
+ // 'trimmed_length' if leading/trailing zeros of the original floating
+ // point form were clipped differently on each tail.
+ int fLength;
+ };
+
+ // Stores the information for each filter added to this class.
+ SkTDArray<FilterInstance> fFilters;
+
+ // We store all the filter values in this flat list, indexed by
+ // |FilterInstance.data_location| to avoid the mallocs required for storing
+ // each one separately.
+ SkTDArray<ConvolutionFixed> fFilterValues;
+
+ // The maximum size of any filter we've added.
+ int fMaxFilter;
+};
+
+typedef void (*SkConvolveVertically_pointer)(
+ const SkConvolutionFilter1D::ConvolutionFixed* filterValues,
+ int filterLength,
+ unsigned char* const* sourceDataRows,
+ int pixelWidth,
+ unsigned char* outRow,
+ bool hasAlpha);
+typedef void (*SkConvolve4RowsHorizontally_pointer)(
+ const unsigned char* srcData[4],
+ const SkConvolutionFilter1D& filter,
+ unsigned char* outRow[4],
+ size_t outRowBytes);
+typedef void (*SkConvolveHorizontally_pointer)(
+ const unsigned char* srcData,
+ const SkConvolutionFilter1D& filter,
+ unsigned char* outRow,
+ bool hasAlpha);
+typedef void (*SkConvolveFilterPadding_pointer)(
+ SkConvolutionFilter1D* filter);
+
+struct SkConvolutionProcs {
+ // This is how many extra pixels may be read by the
+ // conolve*horizontally functions.
+ int fExtraHorizontalReads;
+ SkConvolveVertically_pointer fConvolveVertically;
+ SkConvolve4RowsHorizontally_pointer fConvolve4RowsHorizontally;
+ SkConvolveHorizontally_pointer fConvolveHorizontally;
+ SkConvolveFilterPadding_pointer fApplySIMDPadding;
+};
+
+
+
+// Does a two-dimensional convolution on the given source image.
+//
+// It is assumed the source pixel offsets referenced in the input filters
+// reference only valid pixels, so the source image size is not required. Each
+// row of the source image starts |sourceByteRowStride| after the previous
+// one (this allows you to have rows with some padding at the end).
+//
+// The result will be put into the given output buffer. The destination image
+// size will be xfilter.numValues() * yfilter.numValues() pixels. It will be
+// in rows of exactly xfilter.numValues() * 4 bytes.
+//
+// |sourceHasAlpha| is a hint that allows us to avoid doing computations on
+// the alpha channel if the image is opaque. If you don't know, set this to
+// true and it will work properly, but setting this to false will be a few
+// percent faster if you know the image is opaque.
+//
+// The layout in memory is assumed to be 4-bytes per pixel in B-G-R-A order
+// (this is ARGB when loaded into 32-bit words on a little-endian machine).
+/**
+ * Returns false if it was unable to perform the convolution/rescale. in which case the output
+ * buffer is assumed to be undefined.
+ */
+SK_API bool BGRAConvolve2D(const unsigned char* sourceData,
+ int sourceByteRowStride,
+ bool sourceHasAlpha,
+ const SkConvolutionFilter1D& xfilter,
+ const SkConvolutionFilter1D& yfilter,
+ int outputByteRowStride,
+ unsigned char* output,
+ const SkConvolutionProcs&,
+ bool useSimdIfPossible);
+
+#endif // SK_CONVOLVER_H
diff --git a/gfx/skia/skia/src/core/SkCoreBlitters.h b/gfx/skia/skia/src/core/SkCoreBlitters.h
new file mode 100644
index 000000000..46b2b7ee9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCoreBlitters.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCoreBlitters_DEFINED
+#define SkCoreBlitters_DEFINED
+
+#include "SkBitmapProcShader.h"
+#include "SkBlitter.h"
+#include "SkBlitRow.h"
+#include "SkShader.h"
+#include "SkSmallAllocator.h"
+
+class SkRasterBlitter : public SkBlitter {
+public:
+ SkRasterBlitter(const SkPixmap& device) : fDevice(device) {}
+
+protected:
+ const SkPixmap fDevice;
+
+private:
+ typedef SkBlitter INHERITED;
+};
+
+class SkShaderBlitter : public SkRasterBlitter {
+public:
+ /**
+ * The storage for shaderContext is owned by the caller, but the object itself is not.
+ * The blitter only ensures that the storage always holds a live object, but it may
+ * exchange that object.
+ */
+ SkShaderBlitter(const SkPixmap& device, const SkPaint& paint,
+ SkShader::Context* shaderContext);
+ virtual ~SkShaderBlitter();
+
+ /**
+ * Create a new shader context and uses it instead of the old one if successful.
+ * Will create the context at the same location as the old one (this is safe
+ * because the shader itself is unchanged).
+ */
+ bool resetShaderContext(const SkShader::ContextRec&) override;
+
+ SkShader::Context* getShaderContext() const override { return fShaderContext; }
+
+protected:
+ uint32_t fShaderFlags;
+ const SkShader* fShader;
+ SkShader::Context* fShaderContext;
+ bool fConstInY;
+
+private:
+ // illegal
+ SkShaderBlitter& operator=(const SkShaderBlitter&);
+
+ typedef SkRasterBlitter INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkA8_Coverage_Blitter : public SkRasterBlitter {
+public:
+ SkA8_Coverage_Blitter(const SkPixmap& device, const SkPaint& paint);
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitMask(const SkMask&, const SkIRect&) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t*) override;
+};
+
+class SkA8_Blitter : public SkRasterBlitter {
+public:
+ SkA8_Blitter(const SkPixmap& device, const SkPaint& paint);
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitMask(const SkMask&, const SkIRect&) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t*) override;
+
+private:
+ unsigned fSrcA;
+
+ // illegal
+ SkA8_Blitter& operator=(const SkA8_Blitter&);
+
+ typedef SkRasterBlitter INHERITED;
+};
+
+class SkA8_Shader_Blitter : public SkShaderBlitter {
+public:
+ SkA8_Shader_Blitter(const SkPixmap& device, const SkPaint& paint,
+ SkShader::Context* shaderContext);
+ virtual ~SkA8_Shader_Blitter();
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override;
+ void blitMask(const SkMask&, const SkIRect&) override;
+
+private:
+ SkXfermode* fXfermode;
+ SkPMColor* fBuffer;
+ uint8_t* fAAExpand;
+
+ // illegal
+ SkA8_Shader_Blitter& operator=(const SkA8_Shader_Blitter&);
+
+ typedef SkShaderBlitter INHERITED;
+};
+
+////////////////////////////////////////////////////////////////
+
+class SkARGB32_Blitter : public SkRasterBlitter {
+public:
+ SkARGB32_Blitter(const SkPixmap& device, const SkPaint& paint);
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitMask(const SkMask&, const SkIRect&) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t*) override;
+ void blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) override;
+ void blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) override;
+
+protected:
+ SkColor fColor;
+ SkPMColor fPMColor;
+
+private:
+ unsigned fSrcA, fSrcR, fSrcG, fSrcB;
+
+ // illegal
+ SkARGB32_Blitter& operator=(const SkARGB32_Blitter&);
+
+ typedef SkRasterBlitter INHERITED;
+};
+
+class SkARGB32_Opaque_Blitter : public SkARGB32_Blitter {
+public:
+ SkARGB32_Opaque_Blitter(const SkPixmap& device, const SkPaint& paint)
+ : INHERITED(device, paint) { SkASSERT(paint.getAlpha() == 0xFF); }
+ void blitMask(const SkMask&, const SkIRect&) override;
+ void blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) override;
+ void blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) override;
+
+private:
+ typedef SkARGB32_Blitter INHERITED;
+};
+
+class SkARGB32_Black_Blitter : public SkARGB32_Opaque_Blitter {
+public:
+ SkARGB32_Black_Blitter(const SkPixmap& device, const SkPaint& paint)
+ : INHERITED(device, paint) {}
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override;
+ void blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) override;
+ void blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) override;
+
+private:
+ typedef SkARGB32_Opaque_Blitter INHERITED;
+};
+
+class SkARGB32_Shader_Blitter : public SkShaderBlitter {
+public:
+ SkARGB32_Shader_Blitter(const SkPixmap& device, const SkPaint& paint,
+ SkShader::Context* shaderContext);
+ virtual ~SkARGB32_Shader_Blitter();
+ void blitH(int x, int y, int width) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitAntiH(int x, int y, const SkAlpha[], const int16_t[]) override;
+ void blitMask(const SkMask&, const SkIRect&) override;
+
+private:
+ SkXfermode* fXfermode;
+ SkPMColor* fBuffer;
+ SkBlitRow::Proc32 fProc32;
+ SkBlitRow::Proc32 fProc32Blend;
+ bool fShadeDirectlyIntoDevice;
+
+ // illegal
+ SkARGB32_Shader_Blitter& operator=(const SkARGB32_Shader_Blitter&);
+
+ typedef SkShaderBlitter INHERITED;
+};
+
+SkBlitter* SkBlitter_ARGB32_Create(const SkPixmap& device, const SkPaint&, SkShader::Context*,
+ SkTBlitterAllocator*);
+
+SkBlitter* SkBlitter_F16_Create(const SkPixmap& device, const SkPaint&, SkShader::Context*,
+ SkTBlitterAllocator*);
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* These return the correct subclass of blitter for their device config.
+
+ Currently, they make the following assumptions about the state of the
+ paint:
+
+ 1. If there is an xfermode, there will also be a shader
+ 2. If there is a colorfilter, there will be a shader that itself handles
+ calling the filter, so the blitter can always ignore the colorfilter obj
+
+ These pre-conditions must be handled by the caller, in our case
+ SkBlitter::Choose(...)
+ */
+
+SkBlitter* SkBlitter_ChooseD565(const SkPixmap& device, const SkPaint& paint,
+ SkShader::Context* shaderContext,
+ SkTBlitterAllocator* allocator);
+
+
+// Returns nullptr if no SkRasterPipeline blitter can be constructed for this paint.
+SkBlitter* SkCreateRasterPipelineBlitter(const SkPixmap&, const SkPaint&, SkTBlitterAllocator*);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkCpu.cpp b/gfx/skia/skia/src/core/SkCpu.cpp
new file mode 100644
index 000000000..9c827f3ef
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCpu.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCpu.h"
+#include "SkOnce.h"
+
+#if defined(SK_CPU_X86)
+ #if defined(SK_BUILD_FOR_WIN32)
+ #include <intrin.h>
+ static void cpuid (uint32_t abcd[4]) { __cpuid ((int*)abcd, 1); }
+ static void cpuid7(uint32_t abcd[4]) { __cpuidex((int*)abcd, 7, 0); }
+ static uint64_t xgetbv(uint32_t xcr) { return _xgetbv(xcr); }
+ #else
+ #include <cpuid.h>
+ #if !defined(__cpuid_count) // Old Mac Clang doesn't have this defined.
+ #define __cpuid_count(eax, ecx, a, b, c, d) \
+ __asm__("cpuid" : "=a"(a), "=b"(b), "=c"(c), "=d"(d) : "0"(eax), "2"(ecx))
+ #endif
+ static void cpuid (uint32_t abcd[4]) { __get_cpuid(1, abcd+0, abcd+1, abcd+2, abcd+3); }
+ static void cpuid7(uint32_t abcd[4]) {
+ __cpuid_count(7, 0, abcd[0], abcd[1], abcd[2], abcd[3]);
+ }
+ static uint64_t xgetbv(uint32_t xcr) {
+ uint32_t eax, edx;
+ __asm__ __volatile__ ( "xgetbv" : "=a"(eax), "=d"(edx) : "c"(xcr));
+ return (uint64_t)(edx) << 32 | eax;
+ }
+ #endif
+
+ static uint32_t read_cpu_features() {
+ uint32_t features = 0;
+ uint32_t abcd[4] = {0,0,0,0};
+
+ // You might want to refer to http://www.sandpile.org/x86/cpuid.htm
+
+ cpuid(abcd);
+ if (abcd[3] & (1<<25)) { features |= SkCpu:: SSE1; }
+ if (abcd[3] & (1<<26)) { features |= SkCpu:: SSE2; }
+ if (abcd[2] & (1<< 0)) { features |= SkCpu:: SSE3; }
+ if (abcd[2] & (1<< 9)) { features |= SkCpu::SSSE3; }
+ if (abcd[2] & (1<<19)) { features |= SkCpu::SSE41; }
+ if (abcd[2] & (1<<20)) { features |= SkCpu::SSE42; }
+
+ if ((abcd[2] & (3<<26)) == (3<<26) && (xgetbv(0) & 6) == 6) { // XSAVE + OSXSAVE
+ if (abcd[2] & (1<<28)) { features |= SkCpu:: AVX; }
+ if (abcd[2] & (1<<29)) { features |= SkCpu::F16C; }
+ if (abcd[2] & (1<<12)) { features |= SkCpu:: FMA; }
+
+ cpuid7(abcd);
+ if (abcd[1] & (1<<5)) { features |= SkCpu::AVX2; }
+ if (abcd[1] & (1<<3)) { features |= SkCpu::BMI1; }
+ if (abcd[1] & (1<<8)) { features |= SkCpu::BMI2; }
+ }
+ return features;
+ }
+
+#elif defined(SK_CPU_ARM32) && \
+ defined(SK_BUILD_FOR_ANDROID) && \
+ !defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+ #ifdef MOZ_SKIA
+ #include "mozilla/arm.h"
+ static uint32_t read_cpu_features() {
+ uint32_t features = 0;
+ if (mozilla::supports_neon()) { features |= SkCpu::NEON; }
+ return features;
+ }
+ #else
+ #include "cpu-features.h"
+
+ static uint32_t read_cpu_features() {
+ uint32_t features = 0;
+
+ uint64_t android_features = android_getCpuFeatures();
+ if (android_features & ANDROID_CPU_ARM_FEATURE_NEON ) { features |= SkCpu::NEON ; }
+ if (android_features & ANDROID_CPU_ARM_FEATURE_NEON_FMA) { features |= SkCpu::NEON_FMA; }
+ if (android_features & ANDROID_CPU_ARM_FEATURE_VFP_FP16) { features |= SkCpu::VFP_FP16; }
+ return features;
+ }
+ #endif
+#elif defined(SK_CPU_ARM64) && \
+ defined(SK_BUILD_FOR_ANDROID) && \
+ !defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+ #ifdef MOZ_SKIA
+ static uint32_t read_cpu_features() {
+ return 0;
+ }
+ #else
+ #include "cpu-features.h"
+
+ static uint32_t read_cpu_features() {
+ uint32_t features = 0;
+
+ uint64_t android_features = android_getCpuFeatures();
+ if (android_features & ANDROID_CPU_ARM64_FEATURE_CRC32) { features |= SkCpu::CRC32; }
+ return features;
+ }
+ #endif
+#else
+ static uint32_t read_cpu_features() {
+ return 0;
+ }
+
+#endif
+
+uint32_t SkCpu::gCachedFeatures = 0;
+
+void SkCpu::CacheRuntimeFeatures() {
+ static SkOnce once;
+ once([] { gCachedFeatures = read_cpu_features(); });
+}
diff --git a/gfx/skia/skia/src/core/SkCpu.h b/gfx/skia/skia/src/core/SkCpu.h
new file mode 100644
index 000000000..34c19d254
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCpu.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCpu_DEFINED
+#define SkCpu_DEFINED
+
+#include "SkTypes.h"
+
+struct SkCpu {
+ enum {
+ SSE1 = 1 << 0,
+ SSE2 = 1 << 1,
+ SSE3 = 1 << 2,
+ SSSE3 = 1 << 3,
+ SSE41 = 1 << 4,
+ SSE42 = 1 << 5,
+ AVX = 1 << 6,
+ F16C = 1 << 7,
+ FMA = 1 << 8,
+ AVX2 = 1 << 9,
+ BMI1 = 1 << 10,
+ BMI2 = 1 << 11,
+
+ // Handy alias for all the cool Haswell+ instructions.
+ HSW = AVX2 | BMI1 | BMI2 | F16C | FMA,
+ };
+ enum {
+ NEON = 1 << 0,
+ NEON_FMA = 1 << 1,
+ VFP_FP16 = 1 << 2,
+ CRC32 = 1 << 3,
+ };
+
+ static void CacheRuntimeFeatures();
+ static bool Supports(uint32_t);
+private:
+ static uint32_t gCachedFeatures;
+};
+
+inline bool SkCpu::Supports(uint32_t mask) {
+ uint32_t features = gCachedFeatures;
+
+ // If we mask in compile-time known lower limits, the compiler can
+ // often compile away this entire function.
+#if SK_CPU_X86
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
+ features |= SSE1;
+ #endif
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ features |= SSE2;
+ #endif
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE3
+ features |= SSE3;
+ #endif
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ features |= SSSE3;
+ #endif
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ features |= SSE41;
+ #endif
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE42
+ features |= SSE42;
+ #endif
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
+ features |= AVX;
+ #endif
+ // F16C goes here if we add SK_CPU_SSE_LEVEL_F16C
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ features |= AVX2;
+ #endif
+ // FMA doesn't fit neatly into this total ordering.
+ // It's available on Haswell+ just like AVX2, but it's technically a different bit.
+ // TODO: circle back on this if we find ourselves limited by lack of compile-time FMA
+
+#else
+ #if defined(SK_ARM_HAS_NEON)
+ features |= NEON;
+ #endif
+
+ #if defined(SK_CPU_ARM64)
+ features |= NEON|NEON_FMA|VFP_FP16;
+ #endif
+
+ #if defined(SK_ARM_HAS_CRC32)
+ features |= CRC32;
+ #endif
+
+#endif
+ return (features & mask) == mask;
+}
+
+#endif//SkCpu_DEFINED
diff --git a/gfx/skia/skia/src/core/SkCubicClipper.cpp b/gfx/skia/skia/src/core/SkCubicClipper.cpp
new file mode 100644
index 000000000..b5b7dceab
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCubicClipper.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkCubicClipper.h"
+#include "SkGeometry.h"
+
+SkCubicClipper::SkCubicClipper() {
+ fClip.setEmpty();
+}
+
+void SkCubicClipper::setClip(const SkIRect& clip) {
+ // conver to scalars, since that's where we'll see the points
+ fClip.set(clip);
+}
+
+
+bool SkCubicClipper::ChopMonoAtY(const SkPoint pts[4], SkScalar y, SkScalar* t) {
+ SkScalar ycrv[4];
+ ycrv[0] = pts[0].fY - y;
+ ycrv[1] = pts[1].fY - y;
+ ycrv[2] = pts[2].fY - y;
+ ycrv[3] = pts[3].fY - y;
+
+#ifdef NEWTON_RAPHSON // Quadratic convergence, typically <= 3 iterations.
+ // Initial guess.
+ // TODO(turk): Check for zero denominator? Shouldn't happen unless the curve
+ // is not only monotonic but degenerate.
+ SkScalar t1 = ycrv[0] / (ycrv[0] - ycrv[3]);
+
+ // Newton's iterations.
+ const SkScalar tol = SK_Scalar1 / 16384; // This leaves 2 fixed noise bits.
+ SkScalar t0;
+ const int maxiters = 5;
+ int iters = 0;
+ bool converged;
+ do {
+ t0 = t1;
+ SkScalar y01 = SkScalarInterp(ycrv[0], ycrv[1], t0);
+ SkScalar y12 = SkScalarInterp(ycrv[1], ycrv[2], t0);
+ SkScalar y23 = SkScalarInterp(ycrv[2], ycrv[3], t0);
+ SkScalar y012 = SkScalarInterp(y01, y12, t0);
+ SkScalar y123 = SkScalarInterp(y12, y23, t0);
+ SkScalar y0123 = SkScalarInterp(y012, y123, t0);
+ SkScalar yder = (y123 - y012) * 3;
+ // TODO(turk): check for yder==0: horizontal.
+ t1 -= y0123 / yder;
+ converged = SkScalarAbs(t1 - t0) <= tol; // NaN-safe
+ ++iters;
+ } while (!converged && (iters < maxiters));
+ *t = t1; // Return the result.
+
+ // The result might be valid, even if outside of the range [0, 1], but
+ // we never evaluate a Bezier outside this interval, so we return false.
+ if (t1 < 0 || t1 > SK_Scalar1)
+ return false; // This shouldn't happen, but check anyway.
+ return converged;
+
+#else // BISECTION // Linear convergence, typically 16 iterations.
+
+ // Check that the endpoints straddle zero.
+ SkScalar tNeg, tPos; // Negative and positive function parameters.
+ if (ycrv[0] < 0) {
+ if (ycrv[3] < 0)
+ return false;
+ tNeg = 0;
+ tPos = SK_Scalar1;
+ } else if (ycrv[0] > 0) {
+ if (ycrv[3] > 0)
+ return false;
+ tNeg = SK_Scalar1;
+ tPos = 0;
+ } else {
+ *t = 0;
+ return true;
+ }
+
+ const SkScalar tol = SK_Scalar1 / 65536; // 1 for fixed, 1e-5 for float.
+ int iters = 0;
+ do {
+ SkScalar tMid = (tPos + tNeg) / 2;
+ SkScalar y01 = SkScalarInterp(ycrv[0], ycrv[1], tMid);
+ SkScalar y12 = SkScalarInterp(ycrv[1], ycrv[2], tMid);
+ SkScalar y23 = SkScalarInterp(ycrv[2], ycrv[3], tMid);
+ SkScalar y012 = SkScalarInterp(y01, y12, tMid);
+ SkScalar y123 = SkScalarInterp(y12, y23, tMid);
+ SkScalar y0123 = SkScalarInterp(y012, y123, tMid);
+ if (y0123 == 0) {
+ *t = tMid;
+ return true;
+ }
+ if (y0123 < 0) tNeg = tMid;
+ else tPos = tMid;
+ ++iters;
+ } while (!(SkScalarAbs(tPos - tNeg) <= tol)); // Nan-safe
+
+ *t = (tNeg + tPos) / 2;
+ return true;
+#endif // BISECTION
+}
+
+
+bool SkCubicClipper::clipCubic(const SkPoint srcPts[4], SkPoint dst[4]) {
+ bool reverse;
+
+ // we need the data to be monotonically descending in Y
+ if (srcPts[0].fY > srcPts[3].fY) {
+ dst[0] = srcPts[3];
+ dst[1] = srcPts[2];
+ dst[2] = srcPts[1];
+ dst[3] = srcPts[0];
+ reverse = true;
+ } else {
+ memcpy(dst, srcPts, 4 * sizeof(SkPoint));
+ reverse = false;
+ }
+
+ // are we completely above or below
+ const SkScalar ctop = fClip.fTop;
+ const SkScalar cbot = fClip.fBottom;
+ if (dst[3].fY <= ctop || dst[0].fY >= cbot) {
+ return false;
+ }
+
+ SkScalar t;
+ SkPoint tmp[7]; // for SkChopCubicAt
+
+ // are we partially above
+ if (dst[0].fY < ctop && ChopMonoAtY(dst, ctop, &t)) {
+ SkChopCubicAt(dst, tmp, t);
+ dst[0] = tmp[3];
+ dst[1] = tmp[4];
+ dst[2] = tmp[5];
+ }
+
+ // are we partially below
+ if (dst[3].fY > cbot && ChopMonoAtY(dst, cbot, &t)) {
+ SkChopCubicAt(dst, tmp, t);
+ dst[1] = tmp[1];
+ dst[2] = tmp[2];
+ dst[3] = tmp[3];
+ }
+
+ if (reverse) {
+ SkTSwap<SkPoint>(dst[0], dst[3]);
+ SkTSwap<SkPoint>(dst[1], dst[2]);
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkCubicClipper.h b/gfx/skia/skia/src/core/SkCubicClipper.h
new file mode 100644
index 000000000..f00c09ab5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCubicClipper.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkCubicClipper_DEFINED
+#define SkCubicClipper_DEFINED
+
+#include "SkPoint.h"
+#include "SkRect.h"
+
+/** This class is initialized with a clip rectangle, and then can be fed cubics,
+ which must already be monotonic in Y.
+
+ In the future, it might return a series of segments, allowing it to clip
+ also in X, to ensure that all segments fit in a finite coordinate system.
+ */
+class SkCubicClipper {
+public:
+ SkCubicClipper();
+
+ void setClip(const SkIRect& clip);
+
+ bool SK_WARN_UNUSED_RESULT clipCubic(const SkPoint src[4], SkPoint dst[4]);
+
+ static bool SK_WARN_UNUSED_RESULT ChopMonoAtY(const SkPoint pts[4], SkScalar y, SkScalar* t);
+private:
+ SkRect fClip;
+};
+
+#endif // SkCubicClipper_DEFINED
diff --git a/gfx/skia/skia/src/core/SkData.cpp b/gfx/skia/skia/src/core/SkData.cpp
new file mode 100644
index 000000000..ec619d99e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkData.cpp
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkData.h"
+#include "SkOSFile.h"
+#include "SkOnce.h"
+#include "SkReadBuffer.h"
+#include "SkStream.h"
+#include "SkWriteBuffer.h"
+
+SkData::SkData(const void* ptr, size_t size, ReleaseProc proc, void* context) {
+ fPtr = const_cast<void*>(ptr);
+ fSize = size;
+ fReleaseProc = proc;
+ fReleaseProcContext = context;
+}
+
+// This constructor means we are inline with our fPtr's contents. Thus we set fPtr
+// to point right after this. We also set our releaseproc to sk_inplace_sentinel_releaseproc,
+// since we need to handle "delete" ourselves. See internal_displose().
+//
+SkData::SkData(size_t size) {
+ fPtr = (char*)(this + 1); // contents are immediately after this
+ fSize = size;
+ fReleaseProc = nullptr;
+ fReleaseProcContext = nullptr;
+}
+
+SkData::~SkData() {
+ if (fReleaseProc) {
+ fReleaseProc(fPtr, fReleaseProcContext);
+ }
+}
+
+bool SkData::equals(const SkData* other) const {
+ if (nullptr == other) {
+ return false;
+ }
+
+ return fSize == other->fSize && !memcmp(fPtr, other->fPtr, fSize);
+}
+
+size_t SkData::copyRange(size_t offset, size_t length, void* buffer) const {
+ size_t available = fSize;
+ if (offset >= available || 0 == length) {
+ return 0;
+ }
+ available -= offset;
+ if (length > available) {
+ length = available;
+ }
+ SkASSERT(length > 0);
+
+ memcpy(buffer, this->bytes() + offset, length);
+ return length;
+}
+
+sk_sp<SkData> SkData::PrivateNewWithCopy(const void* srcOrNull, size_t length) {
+ if (0 == length) {
+ return SkData::MakeEmpty();
+ }
+
+ const size_t actualLength = length + sizeof(SkData);
+ if (actualLength < length) {
+ // we overflowed
+ sk_throw();
+ }
+
+ char* storage = (char*)sk_malloc_throw(actualLength);
+ SkData* data = new (storage) SkData(length);
+ if (srcOrNull) {
+ memcpy(data->writable_data(), srcOrNull, length);
+ }
+ return sk_sp<SkData>(data);
+}
+
+void SkData::DummyReleaseProc(const void*, void*) {}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkData> SkData::MakeEmpty() {
+ static SkOnce once;
+ static SkData* empty;
+
+ once([]{ empty = new SkData(nullptr, 0, nullptr, nullptr); });
+ return sk_ref_sp(empty);
+}
+
+// assumes fPtr was allocated via sk_malloc
+static void sk_free_releaseproc(const void* ptr, void*) {
+ sk_free((void*)ptr);
+}
+
+sk_sp<SkData> SkData::MakeFromMalloc(const void* data, size_t length) {
+ return sk_sp<SkData>(new SkData(data, length, sk_free_releaseproc, nullptr));
+}
+
+sk_sp<SkData> SkData::MakeWithCopy(const void* src, size_t length) {
+ SkASSERT(src);
+ return PrivateNewWithCopy(src, length);
+}
+
+sk_sp<SkData> SkData::MakeUninitialized(size_t length) {
+ return PrivateNewWithCopy(nullptr, length);
+}
+
+sk_sp<SkData> SkData::MakeWithProc(const void* ptr, size_t length, ReleaseProc proc, void* ctx) {
+ return sk_sp<SkData>(new SkData(ptr, length, proc, ctx));
+}
+
+// assumes fPtr was allocated with sk_fmmap
+static void sk_mmap_releaseproc(const void* addr, void* ctx) {
+ size_t length = reinterpret_cast<size_t>(ctx);
+ sk_fmunmap(addr, length);
+}
+
+sk_sp<SkData> SkData::MakeFromFILE(FILE* f) {
+ size_t size;
+ void* addr = sk_fmmap(f, &size);
+ if (nullptr == addr) {
+ return nullptr;
+ }
+
+ return SkData::MakeWithProc(addr, size, sk_mmap_releaseproc, reinterpret_cast<void*>(size));
+}
+
+sk_sp<SkData> SkData::MakeFromFileName(const char path[]) {
+ FILE* f = path ? sk_fopen(path, kRead_SkFILE_Flag) : nullptr;
+ if (nullptr == f) {
+ return nullptr;
+ }
+ auto data = MakeFromFILE(f);
+ sk_fclose(f);
+ return data;
+}
+
+sk_sp<SkData> SkData::MakeFromFD(int fd) {
+ size_t size;
+ void* addr = sk_fdmmap(fd, &size);
+ if (nullptr == addr) {
+ return nullptr;
+ }
+
+ return SkData::MakeWithProc(addr, size, sk_mmap_releaseproc, nullptr);
+}
+
+// assumes context is a SkData
+static void sk_dataref_releaseproc(const void*, void* context) {
+ SkData* src = reinterpret_cast<SkData*>(context);
+ src->unref();
+}
+
+sk_sp<SkData> SkData::MakeSubset(const SkData* src, size_t offset, size_t length) {
+ /*
+ We could, if we wanted/need to, just make a deep copy of src's data,
+ rather than referencing it. This would duplicate the storage (of the
+ subset amount) but would possibly allow src to go out of scope sooner.
+ */
+
+ size_t available = src->size();
+ if (offset >= available || 0 == length) {
+ return SkData::MakeEmpty();
+ }
+ available -= offset;
+ if (length > available) {
+ length = available;
+ }
+ SkASSERT(length > 0);
+
+ src->ref(); // this will be balanced in sk_dataref_releaseproc
+ return sk_sp<SkData>(new SkData(src->bytes() + offset, length, sk_dataref_releaseproc,
+ const_cast<SkData*>(src)));
+}
+
+sk_sp<SkData> SkData::MakeWithCString(const char cstr[]) {
+ size_t size;
+ if (nullptr == cstr) {
+ cstr = "";
+ size = 1;
+ } else {
+ size = strlen(cstr) + 1;
+ }
+ return MakeWithCopy(cstr, size);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkData> SkData::MakeFromStream(SkStream* stream, size_t size) {
+ sk_sp<SkData> data(SkData::MakeUninitialized(size));
+ if (stream->read(data->writable_data(), size) != size) {
+ return nullptr;
+ }
+ return data;
+}
diff --git a/gfx/skia/skia/src/core/SkDataTable.cpp b/gfx/skia/skia/src/core/SkDataTable.cpp
new file mode 100644
index 000000000..ea2b91b90
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDataTable.cpp
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkData.h"
+#include "SkDataTable.h"
+#include "SkOnce.h"
+
+static void malloc_freeproc(void* context) {
+ sk_free(context);
+}
+
+// Makes empty table
+SkDataTable::SkDataTable() {
+ fCount = 0;
+ fElemSize = 0; // 0 signals that we use fDir instead of fElems
+ fU.fDir = nullptr;
+ fFreeProc = nullptr;
+ fFreeProcContext = nullptr;
+}
+
+SkDataTable::SkDataTable(const void* array, size_t elemSize, int count,
+ FreeProc proc, void* context) {
+ SkASSERT(count > 0);
+
+ fCount = count;
+ fElemSize = elemSize; // non-zero signals we use fElems instead of fDir
+ fU.fElems = (const char*)array;
+ fFreeProc = proc;
+ fFreeProcContext = context;
+}
+
+SkDataTable::SkDataTable(const Dir* dir, int count, FreeProc proc, void* ctx) {
+ SkASSERT(count > 0);
+
+ fCount = count;
+ fElemSize = 0; // 0 signals that we use fDir instead of fElems
+ fU.fDir = dir;
+ fFreeProc = proc;
+ fFreeProcContext = ctx;
+}
+
+SkDataTable::~SkDataTable() {
+ if (fFreeProc) {
+ fFreeProc(fFreeProcContext);
+ }
+}
+
+size_t SkDataTable::atSize(int index) const {
+ SkASSERT((unsigned)index < (unsigned)fCount);
+
+ if (fElemSize) {
+ return fElemSize;
+ } else {
+ return fU.fDir[index].fSize;
+ }
+}
+
+const void* SkDataTable::at(int index, size_t* size) const {
+ SkASSERT((unsigned)index < (unsigned)fCount);
+
+ if (fElemSize) {
+ if (size) {
+ *size = fElemSize;
+ }
+ return fU.fElems + index * fElemSize;
+ } else {
+ if (size) {
+ *size = fU.fDir[index].fSize;
+ }
+ return fU.fDir[index].fPtr;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkDataTable> SkDataTable::MakeEmpty() {
+ static SkDataTable* singleton;
+ static SkOnce once;
+ once([]{ singleton = new SkDataTable(); });
+ return sk_ref_sp(singleton);
+}
+
+sk_sp<SkDataTable> SkDataTable::MakeCopyArrays(const void * const * ptrs,
+ const size_t sizes[], int count) {
+ if (count <= 0) {
+ return SkDataTable::MakeEmpty();
+ }
+
+ size_t dataSize = 0;
+ for (int i = 0; i < count; ++i) {
+ dataSize += sizes[i];
+ }
+
+ size_t bufferSize = count * sizeof(Dir) + dataSize;
+ void* buffer = sk_malloc_throw(bufferSize);
+
+ Dir* dir = (Dir*)buffer;
+ char* elem = (char*)(dir + count);
+ for (int i = 0; i < count; ++i) {
+ dir[i].fPtr = elem;
+ dir[i].fSize = sizes[i];
+ memcpy(elem, ptrs[i], sizes[i]);
+ elem += sizes[i];
+ }
+
+ return sk_sp<SkDataTable>(new SkDataTable(dir, count, malloc_freeproc, buffer));
+}
+
+sk_sp<SkDataTable> SkDataTable::MakeCopyArray(const void* array, size_t elemSize, int count) {
+ if (count <= 0) {
+ return SkDataTable::MakeEmpty();
+ }
+
+ size_t bufferSize = elemSize * count;
+ void* buffer = sk_malloc_throw(bufferSize);
+ memcpy(buffer, array, bufferSize);
+
+ return sk_sp<SkDataTable>(new SkDataTable(buffer, elemSize, count, malloc_freeproc, buffer));
+}
+
+sk_sp<SkDataTable> SkDataTable::MakeArrayProc(const void* array, size_t elemSize, int count,
+ FreeProc proc, void* ctx) {
+ if (count <= 0) {
+ return SkDataTable::MakeEmpty();
+ }
+ return sk_sp<SkDataTable>(new SkDataTable(array, elemSize, count, proc, ctx));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void chunkalloc_freeproc(void* context) { delete (SkChunkAlloc*)context; }
+
+SkDataTableBuilder::SkDataTableBuilder(size_t minChunkSize)
+ : fHeap(nullptr)
+ , fMinChunkSize(minChunkSize) {}
+
+SkDataTableBuilder::~SkDataTableBuilder() { this->reset(); }
+
+void SkDataTableBuilder::reset(size_t minChunkSize) {
+ fMinChunkSize = minChunkSize;
+ fDir.reset();
+ if (fHeap) {
+ delete fHeap;
+ fHeap = nullptr;
+ }
+}
+
+void SkDataTableBuilder::append(const void* src, size_t size) {
+ if (nullptr == fHeap) {
+ fHeap = new SkChunkAlloc(fMinChunkSize);
+ }
+
+ void* dst = fHeap->alloc(size, SkChunkAlloc::kThrow_AllocFailType);
+ memcpy(dst, src, size);
+
+ SkDataTable::Dir* dir = fDir.append();
+ dir->fPtr = dst;
+ dir->fSize = size;
+}
+
+sk_sp<SkDataTable> SkDataTableBuilder::detachDataTable() {
+ const int count = fDir.count();
+ if (0 == count) {
+ return SkDataTable::MakeEmpty();
+ }
+
+ // Copy the dir into the heap;
+ void* dir = fHeap->alloc(count * sizeof(SkDataTable::Dir), SkChunkAlloc::kThrow_AllocFailType);
+ memcpy(dir, fDir.begin(), count * sizeof(SkDataTable::Dir));
+
+ sk_sp<SkDataTable> table(
+ new SkDataTable((SkDataTable::Dir*)dir, count, chunkalloc_freeproc, fHeap));
+ // we have to detach our fHeap, since we are giving that to the table
+ fHeap = nullptr;
+ fDir.reset();
+ return table;
+}
diff --git a/gfx/skia/skia/src/core/SkDebug.cpp b/gfx/skia/skia/src/core/SkDebug.cpp
new file mode 100644
index 000000000..b0cb6c124
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDebug.cpp
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+
+#if defined(GOOGLE3)
+void SkDebugfForDumpStackTrace(const char* data, void* unused) {
+ SkDebugf("%s", data);
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkDeduper.h b/gfx/skia/skia/src/core/SkDeduper.h
new file mode 100644
index 000000000..f82f4fd8c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDeduper.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDeduper_DEFINED
+#define SkDeduper_DEFINED
+
+#include "SkTypes.h"
+
+class SkImage;
+class SkPicture;
+class SkTypeface;
+
+class SkDeduper {
+public:
+ virtual ~SkDeduper() {}
+
+ // These return 0 on failure
+
+ virtual int findOrDefineImage(SkImage*) = 0;
+ virtual int findOrDefinePicture(SkPicture*) = 0;
+ virtual int findOrDefineTypeface(SkTypeface*) = 0;
+ virtual int findOrDefineFactory(SkFlattenable*) = 0;
+};
+
+class SkInflator {
+public:
+ virtual ~SkInflator() {}
+
+ virtual SkImage* getImage(int) = 0;
+ virtual SkPicture* getPicture(int) = 0;
+ virtual SkTypeface* getTypeface(int) = 0;
+ virtual SkFlattenable::Factory getFactory(int) = 0;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDeque.cpp b/gfx/skia/skia/src/core/SkDeque.cpp
new file mode 100644
index 000000000..f9ab4af53
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDeque.cpp
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDeque.h"
+
+struct SkDeque::Block {
+ Block* fNext;
+ Block* fPrev;
+ char* fBegin; // start of used section in this chunk
+ char* fEnd; // end of used section in this chunk
+ char* fStop; // end of the allocated chunk
+
+ char* start() { return (char*)(this + 1); }
+ const char* start() const { return (const char*)(this + 1); }
+
+ void init(size_t size) {
+ fNext = fPrev = nullptr;
+ fBegin = fEnd = nullptr;
+ fStop = (char*)this + size;
+ }
+};
+
+SkDeque::SkDeque(size_t elemSize, int allocCount)
+ : fElemSize(elemSize)
+ , fInitialStorage(nullptr)
+ , fCount(0)
+ , fAllocCount(allocCount) {
+ SkASSERT(allocCount >= 1);
+ fFrontBlock = fBackBlock = nullptr;
+ fFront = fBack = nullptr;
+}
+
+SkDeque::SkDeque(size_t elemSize, void* storage, size_t storageSize, int allocCount)
+ : fElemSize(elemSize)
+ , fInitialStorage(storage)
+ , fCount(0)
+ , fAllocCount(allocCount) {
+ SkASSERT(storageSize == 0 || storage != nullptr);
+ SkASSERT(allocCount >= 1);
+
+ if (storageSize >= sizeof(Block) + elemSize) {
+ fFrontBlock = (Block*)storage;
+ fFrontBlock->init(storageSize);
+ } else {
+ fFrontBlock = nullptr;
+ }
+ fBackBlock = fFrontBlock;
+ fFront = fBack = nullptr;
+}
+
+SkDeque::~SkDeque() {
+ Block* head = fFrontBlock;
+ Block* initialHead = (Block*)fInitialStorage;
+
+ while (head) {
+ Block* next = head->fNext;
+ if (head != initialHead) {
+ this->freeBlock(head);
+ }
+ head = next;
+ }
+}
+
+void* SkDeque::push_front() {
+ fCount += 1;
+
+ if (nullptr == fFrontBlock) {
+ fFrontBlock = this->allocateBlock(fAllocCount);
+ fBackBlock = fFrontBlock; // update our linklist
+ }
+
+ Block* first = fFrontBlock;
+ char* begin;
+
+ if (nullptr == first->fBegin) {
+ INIT_CHUNK:
+ first->fEnd = first->fStop;
+ begin = first->fStop - fElemSize;
+ } else {
+ begin = first->fBegin - fElemSize;
+ if (begin < first->start()) { // no more room in this chunk
+ // should we alloc more as we accumulate more elements?
+ first = this->allocateBlock(fAllocCount);
+ first->fNext = fFrontBlock;
+ fFrontBlock->fPrev = first;
+ fFrontBlock = first;
+ goto INIT_CHUNK;
+ }
+ }
+
+ first->fBegin = begin;
+
+ if (nullptr == fFront) {
+ SkASSERT(nullptr == fBack);
+ fFront = fBack = begin;
+ } else {
+ SkASSERT(fBack);
+ fFront = begin;
+ }
+
+ return begin;
+}
+
+void* SkDeque::push_back() {
+ fCount += 1;
+
+ if (nullptr == fBackBlock) {
+ fBackBlock = this->allocateBlock(fAllocCount);
+ fFrontBlock = fBackBlock; // update our linklist
+ }
+
+ Block* last = fBackBlock;
+ char* end;
+
+ if (nullptr == last->fBegin) {
+ INIT_CHUNK:
+ last->fBegin = last->start();
+ end = last->fBegin + fElemSize;
+ } else {
+ end = last->fEnd + fElemSize;
+ if (end > last->fStop) { // no more room in this chunk
+ // should we alloc more as we accumulate more elements?
+ last = this->allocateBlock(fAllocCount);
+ last->fPrev = fBackBlock;
+ fBackBlock->fNext = last;
+ fBackBlock = last;
+ goto INIT_CHUNK;
+ }
+ }
+
+ last->fEnd = end;
+ end -= fElemSize;
+
+ if (nullptr == fBack) {
+ SkASSERT(nullptr == fFront);
+ fFront = fBack = end;
+ } else {
+ SkASSERT(fFront);
+ fBack = end;
+ }
+
+ return end;
+}
+
+void SkDeque::pop_front() {
+ SkASSERT(fCount > 0);
+ fCount -= 1;
+
+ Block* first = fFrontBlock;
+
+ SkASSERT(first != nullptr);
+
+ if (first->fBegin == nullptr) { // we were marked empty from before
+ first = first->fNext;
+ first->fPrev = nullptr;
+ this->freeBlock(fFrontBlock);
+ fFrontBlock = first;
+ SkASSERT(first != nullptr); // else we popped too far
+ }
+
+ char* begin = first->fBegin + fElemSize;
+ SkASSERT(begin <= first->fEnd);
+
+ if (begin < fFrontBlock->fEnd) {
+ first->fBegin = begin;
+ SkASSERT(first->fBegin);
+ fFront = first->fBegin;
+ } else {
+ first->fBegin = first->fEnd = nullptr; // mark as empty
+ if (nullptr == first->fNext) {
+ fFront = fBack = nullptr;
+ } else {
+ SkASSERT(first->fNext->fBegin);
+ fFront = first->fNext->fBegin;
+ }
+ }
+}
+
+void SkDeque::pop_back() {
+ SkASSERT(fCount > 0);
+ fCount -= 1;
+
+ Block* last = fBackBlock;
+
+ SkASSERT(last != nullptr);
+
+ if (last->fEnd == nullptr) { // we were marked empty from before
+ last = last->fPrev;
+ last->fNext = nullptr;
+ this->freeBlock(fBackBlock);
+ fBackBlock = last;
+ SkASSERT(last != nullptr); // else we popped too far
+ }
+
+ char* end = last->fEnd - fElemSize;
+ SkASSERT(end >= last->fBegin);
+
+ if (end > last->fBegin) {
+ last->fEnd = end;
+ SkASSERT(last->fEnd);
+ fBack = last->fEnd - fElemSize;
+ } else {
+ last->fBegin = last->fEnd = nullptr; // mark as empty
+ if (nullptr == last->fPrev) {
+ fFront = fBack = nullptr;
+ } else {
+ SkASSERT(last->fPrev->fEnd);
+ fBack = last->fPrev->fEnd - fElemSize;
+ }
+ }
+}
+
+int SkDeque::numBlocksAllocated() const {
+ int numBlocks = 0;
+
+ for (const Block* temp = fFrontBlock; temp; temp = temp->fNext) {
+ ++numBlocks;
+ }
+
+ return numBlocks;
+}
+
+SkDeque::Block* SkDeque::allocateBlock(int allocCount) {
+ Block* newBlock = (Block*)sk_malloc_throw(sizeof(Block) + allocCount * fElemSize);
+ newBlock->init(sizeof(Block) + allocCount * fElemSize);
+ return newBlock;
+}
+
+void SkDeque::freeBlock(Block* block) {
+ sk_free(block);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkDeque::Iter::Iter() : fCurBlock(nullptr), fPos(nullptr), fElemSize(0) {}
+
+SkDeque::Iter::Iter(const SkDeque& d, IterStart startLoc) {
+ this->reset(d, startLoc);
+}
+
+// Due to how reset and next work, next actually returns the current element
+// pointed to by fPos and then updates fPos to point to the next one.
+void* SkDeque::Iter::next() {
+ char* pos = fPos;
+
+ if (pos) { // if we were valid, try to move to the next setting
+ char* next = pos + fElemSize;
+ SkASSERT(next <= fCurBlock->fEnd);
+ if (next == fCurBlock->fEnd) { // exhausted this chunk, move to next
+ do {
+ fCurBlock = fCurBlock->fNext;
+ } while (fCurBlock != nullptr && fCurBlock->fBegin == nullptr);
+ next = fCurBlock ? fCurBlock->fBegin : nullptr;
+ }
+ fPos = next;
+ }
+ return pos;
+}
+
+// Like next, prev actually returns the current element pointed to by fPos and
+// then makes fPos point to the previous element.
+void* SkDeque::Iter::prev() {
+ char* pos = fPos;
+
+ if (pos) { // if we were valid, try to move to the prior setting
+ char* prev = pos - fElemSize;
+ SkASSERT(prev >= fCurBlock->fBegin - fElemSize);
+ if (prev < fCurBlock->fBegin) { // exhausted this chunk, move to prior
+ do {
+ fCurBlock = fCurBlock->fPrev;
+ } while (fCurBlock != nullptr && fCurBlock->fEnd == nullptr);
+ prev = fCurBlock ? fCurBlock->fEnd - fElemSize : nullptr;
+ }
+ fPos = prev;
+ }
+ return pos;
+}
+
+// reset works by skipping through the spare blocks at the start (or end)
+// of the doubly linked list until a non-empty one is found. The fPos
+// member is then set to the first (or last) element in the block. If
+// there are no elements in the deque both fCurBlock and fPos will come
+// out of this routine nullptr.
+void SkDeque::Iter::reset(const SkDeque& d, IterStart startLoc) {
+ fElemSize = d.fElemSize;
+
+ if (kFront_IterStart == startLoc) {
+ // initialize the iterator to start at the front
+ fCurBlock = d.fFrontBlock;
+ while (fCurBlock && nullptr == fCurBlock->fBegin) {
+ fCurBlock = fCurBlock->fNext;
+ }
+ fPos = fCurBlock ? fCurBlock->fBegin : nullptr;
+ } else {
+ // initialize the iterator to start at the back
+ fCurBlock = d.fBackBlock;
+ while (fCurBlock && nullptr == fCurBlock->fEnd) {
+ fCurBlock = fCurBlock->fPrev;
+ }
+ fPos = fCurBlock ? fCurBlock->fEnd - fElemSize : nullptr;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkDescriptor.h b/gfx/skia/skia/src/core/SkDescriptor.h
new file mode 100644
index 000000000..efa02783b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDescriptor.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDescriptor_DEFINED
+#define SkDescriptor_DEFINED
+
+#include "SkOpts.h"
+#include "SkTypes.h"
+
+class SkDescriptor : SkNoncopyable {
+public:
+ static size_t ComputeOverhead(int entryCount) {
+ SkASSERT(entryCount >= 0);
+ return sizeof(SkDescriptor) + entryCount * sizeof(Entry);
+ }
+
+ static SkDescriptor* Alloc(size_t length) {
+ SkASSERT(SkAlign4(length) == length);
+ SkDescriptor* desc = (SkDescriptor*)sk_malloc_throw(length);
+ return desc;
+ }
+
+ static void Free(SkDescriptor* desc) {
+ sk_free(desc);
+ }
+
+ void init() {
+ fLength = sizeof(SkDescriptor);
+ fCount = 0;
+ }
+
+ uint32_t getLength() const { return fLength; }
+
+ void* addEntry(uint32_t tag, size_t length, const void* data = nullptr) {
+ SkASSERT(tag);
+ SkASSERT(SkAlign4(length) == length);
+ SkASSERT(this->findEntry(tag, nullptr) == nullptr);
+
+ Entry* entry = (Entry*)((char*)this + fLength);
+ entry->fTag = tag;
+ entry->fLen = SkToU32(length);
+ if (data) {
+ memcpy(entry + 1, data, length);
+ }
+
+ fCount += 1;
+ fLength = SkToU32(fLength + sizeof(Entry) + length);
+ return (entry + 1); // return its data
+ }
+
+ void computeChecksum() {
+ fChecksum = SkDescriptor::ComputeChecksum(this);
+ }
+
+#ifdef SK_DEBUG
+ void assertChecksum() const {
+ SkASSERT(SkDescriptor::ComputeChecksum(this) == fChecksum);
+ }
+#endif
+
+ const void* findEntry(uint32_t tag, uint32_t* length) const {
+ const Entry* entry = (const Entry*)(this + 1);
+ int count = fCount;
+
+ while (--count >= 0) {
+ if (entry->fTag == tag) {
+ if (length) {
+ *length = entry->fLen;
+ }
+ return entry + 1;
+ }
+ entry = (const Entry*)((const char*)(entry + 1) + entry->fLen);
+ }
+ return nullptr;
+ }
+
+ SkDescriptor* copy() const {
+ SkDescriptor* desc = SkDescriptor::Alloc(fLength);
+ memcpy(desc, this, fLength);
+ return desc;
+ }
+
+ bool operator==(const SkDescriptor& other) const {
+ // probe to see if we have a good checksum algo
+// SkASSERT(a.fChecksum != b.fChecksum || memcmp(&a, &b, a.fLength) == 0);
+
+ // the first value we should look at is the checksum, so this loop
+ // should terminate early if they descriptors are different.
+ // NOTE: if we wrote a sentinel value at the end of each, we chould
+ // remove the aa < stop test in the loop...
+ const uint32_t* aa = (const uint32_t*)this;
+ const uint32_t* bb = (const uint32_t*)&other;
+ const uint32_t* stop = (const uint32_t*)((const char*)aa + fLength);
+ do {
+ if (*aa++ != *bb++)
+ return false;
+ } while (aa < stop);
+ return true;
+ }
+ bool operator!=(const SkDescriptor& other) const { return !(*this == other); }
+
+ uint32_t getChecksum() const { return fChecksum; }
+
+ struct Entry {
+ uint32_t fTag;
+ uint32_t fLen;
+ };
+
+#ifdef SK_DEBUG
+ uint32_t getCount() const { return fCount; }
+#endif
+
+private:
+ uint32_t fChecksum; // must be first
+ uint32_t fLength; // must be second
+ uint32_t fCount;
+
+ static uint32_t ComputeChecksum(const SkDescriptor* desc) {
+ const uint32_t* ptr = (const uint32_t*)desc + 1; // skip the checksum field
+ size_t len = desc->fLength - sizeof(uint32_t);
+ return SkOpts::hash(ptr, len);
+ }
+
+ // private so no one can create one except our factories
+ SkDescriptor() {}
+};
+
+#include "SkScalerContext.h"
+
+class SkAutoDescriptor : SkNoncopyable {
+public:
+ SkAutoDescriptor() : fDesc(nullptr) {}
+ SkAutoDescriptor(size_t size) : fDesc(nullptr) { this->reset(size); }
+ SkAutoDescriptor(const SkDescriptor& desc) : fDesc(nullptr) {
+ size_t size = desc.getLength();
+ this->reset(size);
+ memcpy(fDesc, &desc, size);
+ }
+
+ ~SkAutoDescriptor() { this->free(); }
+
+ void reset(size_t size) {
+ this->free();
+ if (size <= sizeof(fStorage)) {
+ fDesc = (SkDescriptor*)(void*)fStorage;
+ } else {
+ fDesc = SkDescriptor::Alloc(size);
+ }
+ }
+
+ SkDescriptor* getDesc() const { SkASSERT(fDesc); return fDesc; }
+private:
+ void free() {
+ if (fDesc != (SkDescriptor*)(void*)fStorage) {
+ SkDescriptor::Free(fDesc);
+ }
+ }
+
+ enum {
+ kStorageSize = sizeof(SkDescriptor)
+ + sizeof(SkDescriptor::Entry) + sizeof(SkScalerContext::Rec) // for rec
+ + sizeof(SkDescriptor::Entry) + sizeof(void*) // for typeface
+ + 32 // slop for occational small extras
+ };
+ SkDescriptor* fDesc;
+ uint32_t fStorage[(kStorageSize + 3) >> 2];
+};
+#define SkAutoDescriptor(...) SK_REQUIRE_LOCAL_VAR(SkAutoDescriptor)
+
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDevice.cpp b/gfx/skia/skia/src/core/SkDevice.cpp
new file mode 100644
index 000000000..70eabab73
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDevice.cpp
@@ -0,0 +1,577 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkColorFilter.h"
+#include "SkDevice.h"
+#include "SkDraw.h"
+#include "SkDrawFilter.h"
+#include "SkImage_Base.h"
+#include "SkImageFilter.h"
+#include "SkImageFilterCache.h"
+#include "SkImagePriv.h"
+#include "SkLatticeIter.h"
+#include "SkMetaData.h"
+#include "SkPatchUtils.h"
+#include "SkPathPriv.h"
+#include "SkPathMeasure.h"
+#include "SkRasterClip.h"
+#include "SkRSXform.h"
+#include "SkShader.h"
+#include "SkSpecialImage.h"
+#include "SkTextBlobRunIterator.h"
+#include "SkTextToPathIter.h"
+
+SkBaseDevice::SkBaseDevice(const SkImageInfo& info, const SkSurfaceProps& surfaceProps)
+ : fInfo(info)
+ , fSurfaceProps(surfaceProps)
+{
+ fOrigin.setZero();
+ fMetaData = nullptr;
+}
+
+SkBaseDevice::~SkBaseDevice() { delete fMetaData; }
+
+SkMetaData& SkBaseDevice::getMetaData() {
+ // metadata users are rare, so we lazily allocate it. If that changes we
+ // can decide to just make it a field in the device (rather than a ptr)
+ if (nullptr == fMetaData) {
+ fMetaData = new SkMetaData;
+ }
+ return *fMetaData;
+}
+
+#ifdef SK_SUPPORT_LEGACY_ACCESSBITMAP
+const SkBitmap& SkBaseDevice::accessBitmap(bool changePixels) {
+ const SkBitmap& bitmap = this->onAccessBitmap();
+ if (changePixels) {
+ bitmap.notifyPixelsChanged();
+ }
+ return bitmap;
+}
+#endif
+
+SkPixelGeometry SkBaseDevice::CreateInfo::AdjustGeometry(const SkImageInfo& info,
+ TileUsage tileUsage,
+ SkPixelGeometry geo,
+ bool preserveLCDText) {
+ switch (tileUsage) {
+ case kPossible_TileUsage:
+ // (we think) for compatibility with old clients, we assume this layer can support LCD
+ // even though they may not have marked it as opaque... seems like we should update
+ // our callers (reed/robertphilips).
+ break;
+ case kNever_TileUsage:
+ if (!preserveLCDText) {
+ geo = kUnknown_SkPixelGeometry;
+ }
+ break;
+ }
+ return geo;
+}
+
+static inline bool is_int(float x) {
+ return x == (float) sk_float_round2int(x);
+}
+
+void SkBaseDevice::drawRegion(const SkDraw& draw, const SkRegion& region, const SkPaint& paint) {
+ bool isNonTranslate = draw.fMatrix->getType() & ~(SkMatrix::kTranslate_Mask);
+ bool complexPaint = paint.getStyle() != SkPaint::kFill_Style || paint.getMaskFilter() ||
+ paint.getPathEffect();
+ bool antiAlias = paint.isAntiAlias() && (!is_int(draw.fMatrix->getTranslateX()) ||
+ !is_int(draw.fMatrix->getTranslateY()));
+ if (isNonTranslate || complexPaint || antiAlias) {
+ SkPath path;
+ region.getBoundaryPath(&path);
+ return this->drawPath(draw, path, paint, nullptr, false);
+ }
+
+ SkRegion::Iterator it(region);
+ while (!it.done()) {
+ this->drawRect(draw, SkRect::Make(it.rect()), paint);
+ it.next();
+ }
+}
+
+void SkBaseDevice::drawArc(const SkDraw& draw, const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter, const SkPaint& paint) {
+ SkPath path;
+ bool isFillNoPathEffect = SkPaint::kFill_Style == paint.getStyle() && !paint.getPathEffect();
+ SkPathPriv::CreateDrawArcPath(&path, oval, startAngle, sweepAngle, useCenter,
+ isFillNoPathEffect);
+ this->drawPath(draw, path, paint);
+}
+
+void SkBaseDevice::drawDRRect(const SkDraw& draw, const SkRRect& outer,
+ const SkRRect& inner, const SkPaint& paint) {
+ SkPath path;
+ path.addRRect(outer);
+ path.addRRect(inner);
+ path.setFillType(SkPath::kEvenOdd_FillType);
+ path.setIsVolatile(true);
+
+ const SkMatrix* preMatrix = nullptr;
+ const bool pathIsMutable = true;
+ this->drawPath(draw, path, paint, preMatrix, pathIsMutable);
+}
+
+void SkBaseDevice::drawPatch(const SkDraw& draw, const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkXfermode* xmode, const SkPaint& paint) {
+ SkPatchUtils::VertexData data;
+
+ SkISize lod = SkPatchUtils::GetLevelOfDetail(cubics, draw.fMatrix);
+
+ // It automatically adjusts lodX and lodY in case it exceeds the number of indices.
+ // If it fails to generate the vertices, then we do not draw.
+ if (SkPatchUtils::getVertexData(&data, cubics, colors, texCoords, lod.width(), lod.height())) {
+ this->drawVertices(draw, SkCanvas::kTriangles_VertexMode, data.fVertexCount, data.fPoints,
+ data.fTexCoords, data.fColors, xmode, data.fIndices, data.fIndexCount,
+ paint);
+ }
+}
+
+void SkBaseDevice::drawTextBlob(const SkDraw& draw, const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint &paint, SkDrawFilter* drawFilter) {
+
+ SkPaint runPaint = paint;
+
+ SkTextBlobRunIterator it(blob);
+ for (;!it.done(); it.next()) {
+ size_t textLen = it.glyphCount() * sizeof(uint16_t);
+ const SkPoint& offset = it.offset();
+ // applyFontToPaint() always overwrites the exact same attributes,
+ // so it is safe to not re-seed the paint for this reason.
+ it.applyFontToPaint(&runPaint);
+
+ if (drawFilter && !drawFilter->filter(&runPaint, SkDrawFilter::kText_Type)) {
+ // A false return from filter() means we should abort the current draw.
+ runPaint = paint;
+ continue;
+ }
+
+ runPaint.setFlags(this->filterTextFlags(runPaint));
+
+ switch (it.positioning()) {
+ case SkTextBlob::kDefault_Positioning:
+ this->drawText(draw, it.glyphs(), textLen, x + offset.x(), y + offset.y(), runPaint);
+ break;
+ case SkTextBlob::kHorizontal_Positioning:
+ this->drawPosText(draw, it.glyphs(), textLen, it.pos(), 1,
+ SkPoint::Make(x, y + offset.y()), runPaint);
+ break;
+ case SkTextBlob::kFull_Positioning:
+ this->drawPosText(draw, it.glyphs(), textLen, it.pos(), 2,
+ SkPoint::Make(x, y), runPaint);
+ break;
+ default:
+ SkFAIL("unhandled positioning mode");
+ }
+
+ if (drawFilter) {
+ // A draw filter may change the paint arbitrarily, so we must re-seed in this case.
+ runPaint = paint;
+ }
+ }
+}
+
+void SkBaseDevice::drawImage(const SkDraw& draw, const SkImage* image, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ // Default impl : turns everything into raster bitmap
+ SkBitmap bm;
+ if (as_IB(image)->getROPixels(&bm)) {
+ this->drawBitmap(draw, bm, SkMatrix::MakeTrans(x, y), paint);
+ }
+}
+
+void SkBaseDevice::drawImageRect(const SkDraw& draw, const SkImage* image, const SkRect* src,
+ const SkRect& dst, const SkPaint& paint,
+ SkCanvas::SrcRectConstraint constraint) {
+ // Default impl : turns everything into raster bitmap
+ SkBitmap bm;
+ if (as_IB(image)->getROPixels(&bm)) {
+ this->drawBitmapRect(draw, bm, src, dst, paint, constraint);
+ }
+}
+
+void SkBaseDevice::drawImageNine(const SkDraw& draw, const SkImage* image, const SkIRect& center,
+ const SkRect& dst, const SkPaint& paint) {
+ SkLatticeIter iter(image->width(), image->height(), center, dst);
+
+ SkRect srcR, dstR;
+ while (iter.next(&srcR, &dstR)) {
+ this->drawImageRect(draw, image, &srcR, dstR, paint, SkCanvas::kStrict_SrcRectConstraint);
+ }
+}
+
+void SkBaseDevice::drawBitmapNine(const SkDraw& draw, const SkBitmap& bitmap, const SkIRect& center,
+ const SkRect& dst, const SkPaint& paint) {
+ SkLatticeIter iter(bitmap.width(), bitmap.height(), center, dst);
+
+ SkRect srcR, dstR;
+ while (iter.next(&srcR, &dstR)) {
+ this->drawBitmapRect(draw, bitmap, &srcR, dstR, paint, SkCanvas::kStrict_SrcRectConstraint);
+ }
+}
+
+void SkBaseDevice::drawImageLattice(const SkDraw& draw, const SkImage* image,
+ const SkCanvas::Lattice& lattice, const SkRect& dst,
+ const SkPaint& paint) {
+ SkLatticeIter iter(lattice, dst);
+
+ SkRect srcR, dstR;
+ while (iter.next(&srcR, &dstR)) {
+ this->drawImageRect(draw, image, &srcR, dstR, paint, SkCanvas::kStrict_SrcRectConstraint);
+ }
+}
+
+void SkBaseDevice::drawBitmapLattice(const SkDraw& draw, const SkBitmap& bitmap,
+ const SkCanvas::Lattice& lattice, const SkRect& dst,
+ const SkPaint& paint) {
+ SkLatticeIter iter(lattice, dst);
+
+ SkRect srcR, dstR;
+ while (iter.next(&srcR, &dstR)) {
+ this->drawBitmapRect(draw, bitmap, &srcR, dstR, paint, SkCanvas::kStrict_SrcRectConstraint);
+ }
+}
+
+void SkBaseDevice::drawAtlas(const SkDraw& draw, const SkImage* atlas, const SkRSXform xform[],
+ const SkRect tex[], const SkColor colors[], int count,
+ SkXfermode::Mode mode, const SkPaint& paint) {
+ SkPath path;
+ path.setIsVolatile(true);
+
+ for (int i = 0; i < count; ++i) {
+ SkPoint quad[4];
+ xform[i].toQuad(tex[i].width(), tex[i].height(), quad);
+
+ SkMatrix localM;
+ localM.setRSXform(xform[i]);
+ localM.preTranslate(-tex[i].left(), -tex[i].top());
+
+ SkPaint pnt(paint);
+ sk_sp<SkShader> shader = atlas->makeShader(SkShader::kClamp_TileMode,
+ SkShader::kClamp_TileMode,
+ &localM);
+ if (!shader) {
+ break;
+ }
+ pnt.setShader(std::move(shader));
+
+ if (colors) {
+ pnt.setColorFilter(SkColorFilter::MakeModeFilter(colors[i], mode));
+ }
+
+ path.rewind();
+ path.addPoly(quad, 4, true);
+ path.setConvexity(SkPath::kConvex_Convexity);
+ this->drawPath(draw, path, pnt, nullptr, true);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkBaseDevice::drawSpecial(const SkDraw&, SkSpecialImage*, int x, int y, const SkPaint&) {}
+sk_sp<SkSpecialImage> SkBaseDevice::makeSpecial(const SkBitmap&) { return nullptr; }
+sk_sp<SkSpecialImage> SkBaseDevice::makeSpecial(const SkImage*) { return nullptr; }
+sk_sp<SkSpecialImage> SkBaseDevice::snapSpecial() { return nullptr; }
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkBaseDevice::readPixels(const SkImageInfo& info, void* dstP, size_t rowBytes, int x, int y) {
+#ifdef SK_DEBUG
+ SkASSERT(info.width() > 0 && info.height() > 0);
+ SkASSERT(dstP);
+ SkASSERT(rowBytes >= info.minRowBytes());
+ SkASSERT(x >= 0 && y >= 0);
+
+ const SkImageInfo& srcInfo = this->imageInfo();
+ SkASSERT(x + info.width() <= srcInfo.width());
+ SkASSERT(y + info.height() <= srcInfo.height());
+#endif
+ return this->onReadPixels(info, dstP, rowBytes, x, y);
+}
+
+bool SkBaseDevice::writePixels(const SkImageInfo& info, const void* pixels, size_t rowBytes,
+ int x, int y) {
+#ifdef SK_DEBUG
+ SkASSERT(info.width() > 0 && info.height() > 0);
+ SkASSERT(pixels);
+ SkASSERT(rowBytes >= info.minRowBytes());
+ SkASSERT(x >= 0 && y >= 0);
+
+ const SkImageInfo& dstInfo = this->imageInfo();
+ SkASSERT(x + info.width() <= dstInfo.width());
+ SkASSERT(y + info.height() <= dstInfo.height());
+#endif
+ return this->onWritePixels(info, pixels, rowBytes, x, y);
+}
+
+bool SkBaseDevice::onWritePixels(const SkImageInfo&, const void*, size_t, int, int) {
+ return false;
+}
+
+bool SkBaseDevice::onReadPixels(const SkImageInfo&, void*, size_t, int x, int y) {
+ return false;
+}
+
+bool SkBaseDevice::accessPixels(SkPixmap* pmap) {
+ SkPixmap tempStorage;
+ if (nullptr == pmap) {
+ pmap = &tempStorage;
+ }
+ return this->onAccessPixels(pmap);
+}
+
+bool SkBaseDevice::peekPixels(SkPixmap* pmap) {
+ SkPixmap tempStorage;
+ if (nullptr == pmap) {
+ pmap = &tempStorage;
+ }
+ return this->onPeekPixels(pmap);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+static void morphpoints(SkPoint dst[], const SkPoint src[], int count,
+ SkPathMeasure& meas, const SkMatrix& matrix) {
+ SkMatrix::MapXYProc proc = matrix.getMapXYProc();
+
+ for (int i = 0; i < count; i++) {
+ SkPoint pos;
+ SkVector tangent;
+
+ proc(matrix, src[i].fX, src[i].fY, &pos);
+ SkScalar sx = pos.fX;
+ SkScalar sy = pos.fY;
+
+ if (!meas.getPosTan(sx, &pos, &tangent)) {
+ // set to 0 if the measure failed, so that we just set dst == pos
+ tangent.set(0, 0);
+ }
+
+ /* This is the old way (that explains our approach but is way too slow
+ SkMatrix matrix;
+ SkPoint pt;
+
+ pt.set(sx, sy);
+ matrix.setSinCos(tangent.fY, tangent.fX);
+ matrix.preTranslate(-sx, 0);
+ matrix.postTranslate(pos.fX, pos.fY);
+ matrix.mapPoints(&dst[i], &pt, 1);
+ */
+ dst[i].set(pos.fX - SkScalarMul(tangent.fY, sy),
+ pos.fY + SkScalarMul(tangent.fX, sy));
+ }
+}
+
+/* TODO
+
+ Need differentially more subdivisions when the follow-path is curvy. Not sure how to
+ determine that, but we need it. I guess a cheap answer is let the caller tell us,
+ but that seems like a cop-out. Another answer is to get Rob Johnson to figure it out.
+ */
+static void morphpath(SkPath* dst, const SkPath& src, SkPathMeasure& meas,
+ const SkMatrix& matrix) {
+ SkPath::Iter iter(src, false);
+ SkPoint srcP[4], dstP[3];
+ SkPath::Verb verb;
+
+ while ((verb = iter.next(srcP)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ morphpoints(dstP, srcP, 1, meas, matrix);
+ dst->moveTo(dstP[0]);
+ break;
+ case SkPath::kLine_Verb:
+ // turn lines into quads to look bendy
+ srcP[0].fX = SkScalarAve(srcP[0].fX, srcP[1].fX);
+ srcP[0].fY = SkScalarAve(srcP[0].fY, srcP[1].fY);
+ morphpoints(dstP, srcP, 2, meas, matrix);
+ dst->quadTo(dstP[0], dstP[1]);
+ break;
+ case SkPath::kQuad_Verb:
+ morphpoints(dstP, &srcP[1], 2, meas, matrix);
+ dst->quadTo(dstP[0], dstP[1]);
+ break;
+ case SkPath::kCubic_Verb:
+ morphpoints(dstP, &srcP[1], 3, meas, matrix);
+ dst->cubicTo(dstP[0], dstP[1], dstP[2]);
+ break;
+ case SkPath::kClose_Verb:
+ dst->close();
+ break;
+ default:
+ SkDEBUGFAIL("unknown verb");
+ break;
+ }
+ }
+}
+
+void SkBaseDevice::drawTextOnPath(const SkDraw& draw, const void* text, size_t byteLength,
+ const SkPath& follow, const SkMatrix* matrix,
+ const SkPaint& paint) {
+ SkASSERT(byteLength == 0 || text != nullptr);
+
+ // nothing to draw
+ if (text == nullptr || byteLength == 0 || draw.fRC->isEmpty()) {
+ return;
+ }
+
+ SkTextToPathIter iter((const char*)text, byteLength, paint, true);
+ SkPathMeasure meas(follow, false);
+ SkScalar hOffset = 0;
+
+ // need to measure first
+ if (paint.getTextAlign() != SkPaint::kLeft_Align) {
+ SkScalar pathLen = meas.getLength();
+ if (paint.getTextAlign() == SkPaint::kCenter_Align) {
+ pathLen = SkScalarHalf(pathLen);
+ }
+ hOffset += pathLen;
+ }
+
+ const SkPath* iterPath;
+ SkScalar xpos;
+ SkMatrix scaledMatrix;
+ SkScalar scale = iter.getPathScale();
+
+ scaledMatrix.setScale(scale, scale);
+
+ while (iter.next(&iterPath, &xpos)) {
+ if (iterPath) {
+ SkPath tmp;
+ SkMatrix m(scaledMatrix);
+
+ tmp.setIsVolatile(true);
+ m.postTranslate(xpos + hOffset, 0);
+ if (matrix) {
+ m.postConcat(*matrix);
+ }
+ morphpath(&tmp, *iterPath, meas, m);
+ this->drawPath(draw, tmp, iter.getPaint(), nullptr, true);
+ }
+ }
+}
+
+#include "SkUtils.h"
+typedef int (*CountTextProc)(const char* text);
+static int count_utf16(const char* text) {
+ const uint16_t* prev = (uint16_t*)text;
+ (void)SkUTF16_NextUnichar(&prev);
+ return SkToInt((const char*)prev - text);
+}
+static int return_4(const char* text) { return 4; }
+static int return_2(const char* text) { return 2; }
+
+void SkBaseDevice::drawTextRSXform(const SkDraw& draw, const void* text, size_t len,
+ const SkRSXform xform[], const SkPaint& paint) {
+ CountTextProc proc = nullptr;
+ switch (paint.getTextEncoding()) {
+ case SkPaint::kUTF8_TextEncoding:
+ proc = SkUTF8_CountUTF8Bytes;
+ break;
+ case SkPaint::kUTF16_TextEncoding:
+ proc = count_utf16;
+ break;
+ case SkPaint::kUTF32_TextEncoding:
+ proc = return_4;
+ break;
+ case SkPaint::kGlyphID_TextEncoding:
+ proc = return_2;
+ break;
+ }
+
+ SkDraw localD(draw);
+ SkMatrix localM, currM;
+ const void* stopText = (const char*)text + len;
+ while ((const char*)text < (const char*)stopText) {
+ localM.setRSXform(*xform++);
+ currM.setConcat(*draw.fMatrix, localM);
+ localD.fMatrix = &currM;
+ int subLen = proc((const char*)text);
+ this->drawText(localD, text, subLen, 0, 0, paint);
+ text = (const char*)text + subLen;
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+uint32_t SkBaseDevice::filterTextFlags(const SkPaint& paint) const {
+ uint32_t flags = paint.getFlags();
+
+ if (!paint.isLCDRenderText() || !paint.isAntiAlias()) {
+ return flags;
+ }
+
+ if (kUnknown_SkPixelGeometry == fSurfaceProps.pixelGeometry()
+ || this->onShouldDisableLCD(paint)) {
+
+ flags &= ~SkPaint::kLCDRenderText_Flag;
+ flags |= SkPaint::kGenA8FromLCD_Flag;
+ }
+
+ return flags;
+}
+
+sk_sp<SkSurface> SkBaseDevice::makeSurface(SkImageInfo const&, SkSurfaceProps const&) {
+ return nullptr;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+void SkBaseDevice::LogDrawScaleFactor(const SkMatrix& matrix, SkFilterQuality filterQuality) {
+#if SK_HISTOGRAMS_ENABLED
+ enum ScaleFactor {
+ kUpscale_ScaleFactor,
+ kNoScale_ScaleFactor,
+ kDownscale_ScaleFactor,
+ kLargeDownscale_ScaleFactor,
+
+ kLast_ScaleFactor = kLargeDownscale_ScaleFactor
+ };
+
+ float rawScaleFactor = matrix.getMinScale();
+
+ ScaleFactor scaleFactor;
+ if (rawScaleFactor < 0.5f) {
+ scaleFactor = kLargeDownscale_ScaleFactor;
+ } else if (rawScaleFactor < 1.0f) {
+ scaleFactor = kDownscale_ScaleFactor;
+ } else if (rawScaleFactor > 1.0f) {
+ scaleFactor = kUpscale_ScaleFactor;
+ } else {
+ scaleFactor = kNoScale_ScaleFactor;
+ }
+
+ switch (filterQuality) {
+ case kNone_SkFilterQuality:
+ SK_HISTOGRAM_ENUMERATION("DrawScaleFactor.NoneFilterQuality", scaleFactor,
+ kLast_ScaleFactor + 1);
+ break;
+ case kLow_SkFilterQuality:
+ SK_HISTOGRAM_ENUMERATION("DrawScaleFactor.LowFilterQuality", scaleFactor,
+ kLast_ScaleFactor + 1);
+ break;
+ case kMedium_SkFilterQuality:
+ SK_HISTOGRAM_ENUMERATION("DrawScaleFactor.MediumFilterQuality", scaleFactor,
+ kLast_ScaleFactor + 1);
+ break;
+ case kHigh_SkFilterQuality:
+ SK_HISTOGRAM_ENUMERATION("DrawScaleFactor.HighFilterQuality", scaleFactor,
+ kLast_ScaleFactor + 1);
+ break;
+ }
+
+ // Also log filter quality independent scale factor.
+ SK_HISTOGRAM_ENUMERATION("DrawScaleFactor.AnyFilterQuality", scaleFactor,
+ kLast_ScaleFactor + 1);
+
+ // Also log an overall histogram of filter quality.
+ SK_HISTOGRAM_ENUMERATION("FilterQuality", filterQuality, kLast_SkFilterQuality + 1);
+#endif
+}
+
diff --git a/gfx/skia/skia/src/core/SkDeviceLooper.cpp b/gfx/skia/skia/src/core/SkDeviceLooper.cpp
new file mode 100644
index 000000000..c4e401361
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDeviceLooper.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkDeviceLooper.h"
+
+SkDeviceLooper::SkDeviceLooper(const SkPixmap& base, const SkRasterClip& rc, const SkIRect& bounds,
+ bool aa)
+ : fBaseDst(base)
+ , fBaseRC(rc)
+ , fSubsetRC(rc.isForceConservativeRects())
+ , fDelta(aa ? kAA_Delta : kBW_Delta)
+{
+ // sentinels that next() has not yet been called, and so our mapper functions
+ // should not be called either.
+ fCurrDst = nullptr;
+ fCurrRC = nullptr;
+
+ if (!rc.isEmpty()) {
+ // clip must be contained by the bitmap
+ SkASSERT(SkIRect::MakeWH(base.width(), base.height()).contains(rc.getBounds()));
+ }
+
+ if (rc.isEmpty() || !fClippedBounds.intersect(bounds, rc.getBounds())) {
+ fState = kDone_State;
+ } else if (this->fitsInDelta(fClippedBounds)) {
+ fState = kSimple_State;
+ } else {
+ // back up by 1 DX, so that next() will put us in a correct starting
+ // position.
+ fCurrOffset.set(fClippedBounds.left() - fDelta,
+ fClippedBounds.top());
+ fState = kComplex_State;
+ }
+}
+
+SkDeviceLooper::~SkDeviceLooper() {}
+
+void SkDeviceLooper::mapRect(SkRect* dst, const SkRect& src) const {
+ SkASSERT(kDone_State != fState);
+ SkASSERT(fCurrDst);
+ SkASSERT(fCurrRC);
+
+ *dst = src;
+ dst->offset(SkIntToScalar(-fCurrOffset.fX),
+ SkIntToScalar(-fCurrOffset.fY));
+}
+
+void SkDeviceLooper::mapMatrix(SkMatrix* dst, const SkMatrix& src) const {
+ SkASSERT(kDone_State != fState);
+ SkASSERT(fCurrDst);
+ SkASSERT(fCurrRC);
+
+ *dst = src;
+ dst->postTranslate(SkIntToScalar(-fCurrOffset.fX), SkIntToScalar(-fCurrOffset.fY));
+}
+
+bool SkDeviceLooper::computeCurrBitmapAndClip() {
+ SkASSERT(kComplex_State == fState);
+
+ SkIRect r = SkIRect::MakeXYWH(fCurrOffset.x(), fCurrOffset.y(),
+ fDelta, fDelta);
+ if (!fBaseDst.extractSubset(&fSubsetDst, r)) {
+ fSubsetRC.setEmpty();
+ } else {
+ fBaseRC.translate(-r.left(), -r.top(), &fSubsetRC);
+ (void)fSubsetRC.op(SkIRect::MakeWH(fDelta, fDelta), SkRegion::kIntersect_Op);
+ }
+
+ fCurrDst = &fSubsetDst;
+ fCurrRC = &fSubsetRC;
+ return !fCurrRC->isEmpty();
+}
+
+static bool next_tile(const SkIRect& boundary, int delta, SkIPoint* offset) {
+ // can we move to the right?
+ if (offset->x() + delta < boundary.right()) {
+ offset->fX += delta;
+ return true;
+ }
+
+ // reset to the left, but move down a row
+ offset->fX = boundary.left();
+ if (offset->y() + delta < boundary.bottom()) {
+ offset->fY += delta;
+ return true;
+ }
+
+ // offset is now outside of boundary, so we're done
+ return false;
+}
+
+bool SkDeviceLooper::next() {
+ switch (fState) {
+ case kDone_State:
+ // in theory, we should not get called here, since we must have
+ // previously returned false, but we check anyway.
+ break;
+
+ case kSimple_State:
+ // first time for simple
+ if (nullptr == fCurrDst) {
+ fCurrDst = &fBaseDst;
+ fCurrRC = &fBaseRC;
+ fCurrOffset.set(0, 0);
+ return true;
+ }
+ // 2nd time for simple, we are done
+ break;
+
+ case kComplex_State:
+ // need to propogate fCurrOffset through clippedbounds
+ // left to right, until we wrap around and move down
+
+ while (next_tile(fClippedBounds, fDelta, &fCurrOffset)) {
+ if (this->computeCurrBitmapAndClip()) {
+ return true;
+ }
+ }
+ break;
+ }
+ fState = kDone_State;
+ return false;
+}
diff --git a/gfx/skia/skia/src/core/SkDeviceLooper.h b/gfx/skia/skia/src/core/SkDeviceLooper.h
new file mode 100644
index 000000000..dd346d744
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDeviceLooper.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDeviceLooper_DEFINED
+#define SkDeviceLooper_DEFINED
+
+#include "SkBitmap.h"
+#include "SkMatrix.h"
+#include "SkRasterClip.h"
+
+/**
+ * Helper class to manage "tiling" a large coordinate space into managable
+ * chunks, where managable means areas that are <= some max critical coordinate
+ * size.
+ *
+ * The constructor takes an antialiasing bool, which affects what this maximum
+ * allowable size is: If we're drawing BW, then we need coordinates to stay
+ * safely within fixed-point range (we use +- 16K, to give ourselves room to
+ * add/subtract two fixed values and still be in range. If we're drawing AA,
+ * then we reduce that size by the amount that the supersampler scan converter
+ * needs (at the moment, that is 4X, so the "safe" range is +- 4K).
+ *
+ * For performance reasons, the class first checks to see if any help is needed
+ * at all, and if not (i.e. the specified bounds and base bitmap area already
+ * in the safe-zone, then the class does nothing (effectively).
+ */
+class SkDeviceLooper {
+public:
+ SkDeviceLooper(const SkPixmap& base, const SkRasterClip&, const SkIRect& bounds, bool aa);
+ ~SkDeviceLooper();
+
+ const SkPixmap& getPixmap() const {
+ SkASSERT(kDone_State != fState);
+ SkASSERT(fCurrDst);
+ return *fCurrDst;
+ }
+
+ const SkRasterClip& getRC() const {
+ SkASSERT(kDone_State != fState);
+ SkASSERT(fCurrRC);
+ return *fCurrRC;
+ }
+
+ void mapRect(SkRect* dst, const SkRect& src) const;
+ void mapMatrix(SkMatrix* dst, const SkMatrix& src) const;
+
+ /**
+ * Call next to setup the looper to return a valid coordinate chunk.
+ * Each time this returns true, it is safe to call mapRect() and
+ * mapMatrix(), to convert from "global" coordinate values to ones that
+ * are local to this chunk.
+ *
+ * When next() returns false, the list of chunks is done, and mapRect()
+ * and mapMatrix() should no longer be called.
+ */
+ bool next();
+
+private:
+ const SkPixmap& fBaseDst;
+ const SkRasterClip& fBaseRC;
+
+ enum State {
+ kDone_State, // iteration is complete, getters will assert
+ kSimple_State, // no translate/clip mods needed
+ kComplex_State
+ };
+
+ // storage for our tiled versions. Perhaps could use SkTLazy
+ SkPixmap fSubsetDst;
+ SkRasterClip fSubsetRC;
+
+ const SkPixmap* fCurrDst;
+ const SkRasterClip* fCurrRC;
+ SkIRect fClippedBounds;
+ SkIPoint fCurrOffset;
+ int fDelta;
+ State fState;
+
+ enum Delta {
+ kBW_Delta = 1 << 14, // 16K, gives room to spare for fixedpoint
+ kAA_Delta = kBW_Delta >> 2 // supersample 4x
+ };
+
+ bool fitsInDelta(const SkIRect& r) const {
+ return r.right() < fDelta && r.bottom() < fDelta;
+ }
+
+ bool computeCurrBitmapAndClip();
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDeviceProfile.cpp b/gfx/skia/skia/src/core/SkDeviceProfile.cpp
new file mode 100644
index 000000000..e1c10c87d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDeviceProfile.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDeviceProfile.h"
+#include "SkMutex.h"
+
+#define DEFAULT_GAMMAEXP 2.2f
+#define DEFAULT_CONTRASTSCALE 0.5f
+#define DEFAULT_LCDCONFIG SkDeviceProfile::kNone_LCDConfig
+#define DEFAULT_FONTHINTLEVEL SkDeviceProfile::kSlight_FontHintLevel
+
+static float pin(float value, float min, float max) {
+ if (value < min) {
+ value = min;
+ } else if (value > max) {
+ value = max;
+ }
+ return value;
+}
+
+SkDeviceProfile::SkDeviceProfile(float gammaExp, float contrast,
+ LCDConfig config, FontHintLevel level) {
+ fGammaExponent = pin(gammaExp, 0, 10);
+ fContrastScale = pin(contrast, 0, 1);
+ fLCDConfig = config;
+ fFontHintLevel = level;
+}
+
+void SkDeviceProfile::generateTableForLuminanceByte(U8CPU lumByte,
+ uint8_t table[256]) const {
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkDeviceProfile* SkDeviceProfile::Create(float gammaExp,
+ float contrast,
+ LCDConfig config,
+ FontHintLevel level) {
+ return new SkDeviceProfile(gammaExp, contrast, config, level);
+}
+
+SK_DECLARE_STATIC_MUTEX(gMutex);
+static SkDeviceProfile* gDefaultProfile;
+static SkDeviceProfile* gGlobalProfile;
+
+SkDeviceProfile* SkDeviceProfile::GetDefault() {
+ SkAutoMutexAcquire amc(gMutex);
+
+ if (nullptr == gDefaultProfile) {
+ gDefaultProfile = SkDeviceProfile::Create(DEFAULT_GAMMAEXP,
+ DEFAULT_CONTRASTSCALE,
+ DEFAULT_LCDCONFIG,
+ DEFAULT_FONTHINTLEVEL);
+ }
+ return gDefaultProfile;
+}
+
+SkDeviceProfile* SkDeviceProfile::RefGlobal() {
+ SkAutoMutexAcquire amc(gMutex);
+
+ if (nullptr == gGlobalProfile) {
+ gGlobalProfile = SkDeviceProfile::GetDefault();
+ }
+ gGlobalProfile->ref();
+ return gGlobalProfile;
+}
+
+void SkDeviceProfile::SetGlobal(SkDeviceProfile* profile) {
+ SkAutoMutexAcquire amc(gMutex);
+
+ SkRefCnt_SafeAssign(gGlobalProfile, profile);
+}
diff --git a/gfx/skia/skia/src/core/SkDeviceProfile.h b/gfx/skia/skia/src/core/SkDeviceProfile.h
new file mode 100644
index 000000000..ed533f832
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDeviceProfile.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDeviceProfile_DEFINED
+#define SkDeviceProfile_DEFINED
+
+#include "SkRefCnt.h"
+
+class SkDeviceProfile : public SkRefCnt {
+public:
+
+
+ enum LCDConfig {
+ kNone_LCDConfig, // disables LCD text rendering, uses A8 instead
+ kRGB_Horizontal_LCDConfig,
+ kBGR_Horizontal_LCDConfig,
+ kRGB_Vertical_LCDConfig,
+ kBGR_Vertical_LCDConfig
+ };
+
+ enum FontHintLevel {
+ kNone_FontHintLevel,
+ kSlight_FontHintLevel,
+ kNormal_FontHintLevel,
+ kFull_FontHintLevel,
+ kAuto_FontHintLevel
+ };
+
+ /**
+ * gammaExp is typically between 1.0 and 2.2. For no gamma adjustment,
+ * specify 1.0
+ *
+ * contrastScale will be pinned between 0.0 and 1.0. For no contrast
+ * adjustment, specify 0.0
+ *
+ * @param config Describes the LCD layout for this device. If this is set
+ * to kNone, then all requests for LCD text will be
+ * devolved to A8 antialiasing.
+ *
+ * @param level The hinting level to be used, IF the paint specifies
+ * "default". Otherwise the paint's hinting level will be
+ * respected.
+ */
+ static SkDeviceProfile* Create(float gammaExp,
+ float contrastScale,
+ LCDConfig,
+ FontHintLevel);
+
+ /**
+ * Returns the global default profile, that is used if no global profile is
+ * specified with SetGlobal(), or if nullptr is specified to SetGlobal().
+ * The references count is *not* incremented, and the caller should not
+ * call unref().
+ */
+ static SkDeviceProfile* GetDefault();
+
+ /**
+ * Return the current global profile (or the default if no global had yet
+ * been set) and increment its reference count. The call *must* call unref()
+ * when it is done using it.
+ */
+ static SkDeviceProfile* RefGlobal();
+
+ /**
+ * Make the specified profile be the global value for all subsequently
+ * instantiated devices. Does not affect any existing devices.
+ * Increments the reference count on the profile.
+ * Specify nullptr for the "identity" profile (where there is no gamma or
+ * contrast correction).
+ */
+ static void SetGlobal(SkDeviceProfile*);
+
+ float getFontGammaExponent() const { return fGammaExponent; }
+ float getFontContrastScale() const { return fContrastScale; }
+
+ /**
+ * Given a luminance byte (0 for black, 0xFF for white), generate a table
+ * that applies the gamma/contrast settings to linear coverage values.
+ */
+ void generateTableForLuminanceByte(U8CPU lumByte, uint8_t table[256]) const;
+
+private:
+ SkDeviceProfile(float gammaExp, float contrastScale, LCDConfig,
+ FontHintLevel);
+
+ float fGammaExponent;
+ float fContrastScale;
+ LCDConfig fLCDConfig;
+ FontHintLevel fFontHintLevel;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDiscardableMemory.h b/gfx/skia/skia/src/core/SkDiscardableMemory.h
new file mode 100644
index 000000000..8952b8df7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDiscardableMemory.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDiscardableMemory_DEFINED
+#define SkDiscardableMemory_DEFINED
+
+#include "SkRefCnt.h"
+#include "SkTypes.h"
+
+/**
+ * Interface for discardable memory. Implementation is provided by the
+ * embedder.
+ */
+class SK_API SkDiscardableMemory {
+public:
+ /**
+ * Factory method that creates, initializes and locks an SkDiscardableMemory
+ * object. If either of these steps fails, a nullptr pointer will be returned.
+ */
+ static SkDiscardableMemory* Create(size_t bytes);
+
+ /**
+ * Factory class that creates, initializes and locks an SkDiscardableMemory
+ * object. If either of these steps fails, a nullptr pointer will be returned.
+ */
+ class Factory : public SkRefCnt {
+ public:
+ virtual SkDiscardableMemory* create(size_t bytes) = 0;
+ private:
+ typedef SkRefCnt INHERITED;
+ };
+
+ /** Must not be called while locked.
+ */
+ virtual ~SkDiscardableMemory() {}
+
+ /**
+ * Locks the memory, prevent it from being discarded. Once locked. you may
+ * obtain a pointer to that memory using the data() method.
+ *
+ * lock() may return false, indicating that the underlying memory was
+ * discarded and that the lock failed.
+ *
+ * Nested calls to lock are not allowed.
+ */
+ virtual bool lock() = 0;
+
+ /**
+ * Returns the current pointer for the discardable memory. This call is ONLY
+ * valid when the discardable memory object is locked.
+ */
+ virtual void* data() = 0;
+
+ /**
+ * Unlock the memory so that it can be purged by the system. Must be called
+ * after every successful lock call.
+ */
+ virtual void unlock() = 0;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDistanceFieldGen.cpp b/gfx/skia/skia/src/core/SkDistanceFieldGen.cpp
new file mode 100755
index 000000000..7e4675bbc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDistanceFieldGen.cpp
@@ -0,0 +1,521 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkDistanceFieldGen.h"
+#include "SkPoint.h"
+
+struct DFData {
+ float fAlpha; // alpha value of source texel
+ float fDistSq; // distance squared to nearest (so far) edge texel
+ SkPoint fDistVector; // distance vector to nearest (so far) edge texel
+};
+
+enum NeighborFlags {
+ kLeft_NeighborFlag = 0x01,
+ kRight_NeighborFlag = 0x02,
+ kTopLeft_NeighborFlag = 0x04,
+ kTop_NeighborFlag = 0x08,
+ kTopRight_NeighborFlag = 0x10,
+ kBottomLeft_NeighborFlag = 0x20,
+ kBottom_NeighborFlag = 0x40,
+ kBottomRight_NeighborFlag = 0x80,
+ kAll_NeighborFlags = 0xff,
+
+ kNeighborFlagCount = 8
+};
+
+// We treat an "edge" as a place where we cross from >=128 to <128, or vice versa, or
+// where we have two non-zero pixels that are <128.
+// 'neighborFlags' is used to limit the directions in which we test to avoid indexing
+// outside of the image
+static bool found_edge(const unsigned char* imagePtr, int width, int neighborFlags) {
+ // the order of these should match the neighbor flags above
+ const int kNum8ConnectedNeighbors = 8;
+ const int offsets[8] = {-1, 1, -width-1, -width, -width+1, width-1, width, width+1 };
+ SkASSERT(kNum8ConnectedNeighbors == kNeighborFlagCount);
+
+ // search for an edge
+ unsigned char currVal = *imagePtr;
+ unsigned char currCheck = (currVal >> 7);
+ for (int i = 0; i < kNum8ConnectedNeighbors; ++i) {
+ unsigned char neighborVal;
+ if ((1 << i) & neighborFlags) {
+ const unsigned char* checkPtr = imagePtr + offsets[i];
+ neighborVal = *checkPtr;
+ } else {
+ neighborVal = 0;
+ }
+ unsigned char neighborCheck = (neighborVal >> 7);
+ SkASSERT(currCheck == 0 || currCheck == 1);
+ SkASSERT(neighborCheck == 0 || neighborCheck == 1);
+ // if sharp transition
+ if (currCheck != neighborCheck ||
+ // or both <128 and >0
+ (!currCheck && !neighborCheck && currVal && neighborVal)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void init_glyph_data(DFData* data, unsigned char* edges, const unsigned char* image,
+ int dataWidth, int dataHeight,
+ int imageWidth, int imageHeight,
+ int pad) {
+ data += pad*dataWidth;
+ data += pad;
+ edges += (pad*dataWidth + pad);
+
+ for (int j = 0; j < imageHeight; ++j) {
+ for (int i = 0; i < imageWidth; ++i) {
+ if (255 == *image) {
+ data->fAlpha = 1.0f;
+ } else {
+ data->fAlpha = (*image)*0.00392156862f; // 1/255
+ }
+ int checkMask = kAll_NeighborFlags;
+ if (i == 0) {
+ checkMask &= ~(kLeft_NeighborFlag|kTopLeft_NeighborFlag|kBottomLeft_NeighborFlag);
+ }
+ if (i == imageWidth-1) {
+ checkMask &= ~(kRight_NeighborFlag|kTopRight_NeighborFlag|kBottomRight_NeighborFlag);
+ }
+ if (j == 0) {
+ checkMask &= ~(kTopLeft_NeighborFlag|kTop_NeighborFlag|kTopRight_NeighborFlag);
+ }
+ if (j == imageHeight-1) {
+ checkMask &= ~(kBottomLeft_NeighborFlag|kBottom_NeighborFlag|kBottomRight_NeighborFlag);
+ }
+ if (found_edge(image, imageWidth, checkMask)) {
+ *edges = 255; // using 255 makes for convenient debug rendering
+ }
+ ++data;
+ ++image;
+ ++edges;
+ }
+ data += 2*pad;
+ edges += 2*pad;
+ }
+}
+
+// from Gustavson (2011)
+// computes the distance to an edge given an edge normal vector and a pixel's alpha value
+// assumes that direction has been pre-normalized
+static float edge_distance(const SkPoint& direction, float alpha) {
+ float dx = direction.fX;
+ float dy = direction.fY;
+ float distance;
+ if (SkScalarNearlyZero(dx) || SkScalarNearlyZero(dy)) {
+ distance = 0.5f - alpha;
+ } else {
+ // this is easier if we treat the direction as being in the first octant
+ // (other octants are symmetrical)
+ dx = SkScalarAbs(dx);
+ dy = SkScalarAbs(dy);
+ if (dx < dy) {
+ SkTSwap(dx, dy);
+ }
+
+ // a1 = 0.5*dy/dx is the smaller fractional area chopped off by the edge
+ // to avoid the divide, we just consider the numerator
+ float a1num = 0.5f*dy;
+
+ // we now compute the approximate distance, depending where the alpha falls
+ // relative to the edge fractional area
+
+ // if 0 <= alpha < a1
+ if (alpha*dx < a1num) {
+ // TODO: find a way to do this without square roots?
+ distance = 0.5f*(dx + dy) - SkScalarSqrt(2.0f*dx*dy*alpha);
+ // if a1 <= alpha <= 1 - a1
+ } else if (alpha*dx < (dx - a1num)) {
+ distance = (0.5f - alpha)*dx;
+ // if 1 - a1 < alpha <= 1
+ } else {
+ // TODO: find a way to do this without square roots?
+ distance = -0.5f*(dx + dy) + SkScalarSqrt(2.0f*dx*dy*(1.0f - alpha));
+ }
+ }
+
+ return distance;
+}
+
+static void init_distances(DFData* data, unsigned char* edges, int width, int height) {
+ // skip one pixel border
+ DFData* currData = data;
+ DFData* prevData = data - width;
+ DFData* nextData = data + width;
+
+ for (int j = 0; j < height; ++j) {
+ for (int i = 0; i < width; ++i) {
+ if (*edges) {
+ // we should not be in the one-pixel outside band
+ SkASSERT(i > 0 && i < width-1 && j > 0 && j < height-1);
+ // gradient will point from low to high
+ // +y is down in this case
+ // i.e., if you're outside, gradient points towards edge
+ // if you're inside, gradient points away from edge
+ SkPoint currGrad;
+ currGrad.fX = (prevData+1)->fAlpha - (prevData-1)->fAlpha
+ + SK_ScalarSqrt2*(currData+1)->fAlpha
+ - SK_ScalarSqrt2*(currData-1)->fAlpha
+ + (nextData+1)->fAlpha - (nextData-1)->fAlpha;
+ currGrad.fY = (nextData-1)->fAlpha - (prevData-1)->fAlpha
+ + SK_ScalarSqrt2*nextData->fAlpha
+ - SK_ScalarSqrt2*prevData->fAlpha
+ + (nextData+1)->fAlpha - (prevData+1)->fAlpha;
+ currGrad.setLengthFast(1.0f);
+
+ // init squared distance to edge and distance vector
+ float dist = edge_distance(currGrad, currData->fAlpha);
+ currGrad.scale(dist, &currData->fDistVector);
+ currData->fDistSq = dist*dist;
+ } else {
+ // init distance to "far away"
+ currData->fDistSq = 2000000.f;
+ currData->fDistVector.fX = 1000.f;
+ currData->fDistVector.fY = 1000.f;
+ }
+ ++currData;
+ ++prevData;
+ ++nextData;
+ ++edges;
+ }
+ }
+}
+
+// Danielsson's 8SSEDT
+
+// first stage forward pass
+// (forward in Y, forward in X)
+static void F1(DFData* curr, int width) {
+ // upper left
+ DFData* check = curr - width-1;
+ SkPoint distVec = check->fDistVector;
+ float distSq = check->fDistSq - 2.0f*(distVec.fX + distVec.fY - 1.0f);
+ if (distSq < curr->fDistSq) {
+ distVec.fX -= 1.0f;
+ distVec.fY -= 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+
+ // up
+ check = curr - width;
+ distVec = check->fDistVector;
+ distSq = check->fDistSq - 2.0f*distVec.fY + 1.0f;
+ if (distSq < curr->fDistSq) {
+ distVec.fY -= 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+
+ // upper right
+ check = curr - width+1;
+ distVec = check->fDistVector;
+ distSq = check->fDistSq + 2.0f*(distVec.fX - distVec.fY + 1.0f);
+ if (distSq < curr->fDistSq) {
+ distVec.fX += 1.0f;
+ distVec.fY -= 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+
+ // left
+ check = curr - 1;
+ distVec = check->fDistVector;
+ distSq = check->fDistSq - 2.0f*distVec.fX + 1.0f;
+ if (distSq < curr->fDistSq) {
+ distVec.fX -= 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+}
+
+// second stage forward pass
+// (forward in Y, backward in X)
+static void F2(DFData* curr, int width) {
+ // right
+ DFData* check = curr + 1;
+ SkPoint distVec = check->fDistVector;
+ float distSq = check->fDistSq + 2.0f*distVec.fX + 1.0f;
+ if (distSq < curr->fDistSq) {
+ distVec.fX += 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+}
+
+// first stage backward pass
+// (backward in Y, forward in X)
+static void B1(DFData* curr, int width) {
+ // left
+ DFData* check = curr - 1;
+ SkPoint distVec = check->fDistVector;
+ float distSq = check->fDistSq - 2.0f*distVec.fX + 1.0f;
+ if (distSq < curr->fDistSq) {
+ distVec.fX -= 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+}
+
+// second stage backward pass
+// (backward in Y, backwards in X)
+static void B2(DFData* curr, int width) {
+ // right
+ DFData* check = curr + 1;
+ SkPoint distVec = check->fDistVector;
+ float distSq = check->fDistSq + 2.0f*distVec.fX + 1.0f;
+ if (distSq < curr->fDistSq) {
+ distVec.fX += 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+
+ // bottom left
+ check = curr + width-1;
+ distVec = check->fDistVector;
+ distSq = check->fDistSq - 2.0f*(distVec.fX - distVec.fY - 1.0f);
+ if (distSq < curr->fDistSq) {
+ distVec.fX -= 1.0f;
+ distVec.fY += 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+
+ // bottom
+ check = curr + width;
+ distVec = check->fDistVector;
+ distSq = check->fDistSq + 2.0f*distVec.fY + 1.0f;
+ if (distSq < curr->fDistSq) {
+ distVec.fY += 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+
+ // bottom right
+ check = curr + width+1;
+ distVec = check->fDistVector;
+ distSq = check->fDistSq + 2.0f*(distVec.fX + distVec.fY + 1.0f);
+ if (distSq < curr->fDistSq) {
+ distVec.fX += 1.0f;
+ distVec.fY += 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+}
+
+// enable this to output edge data rather than the distance field
+#define DUMP_EDGE 0
+
+#if !DUMP_EDGE
+template <int distanceMagnitude>
+static unsigned char pack_distance_field_val(float dist) {
+ // The distance field is constructed as unsigned char values, so that the zero value is at 128,
+ // Beside 128, we have 128 values in range [0, 128), but only 127 values in range (128, 255].
+ // So we multiply distanceMagnitude by 127/128 at the latter range to avoid overflow.
+ dist = SkScalarPin(-dist, -distanceMagnitude, distanceMagnitude * 127.0f / 128.0f);
+
+ // Scale into the positive range for unsigned distance.
+ dist += distanceMagnitude;
+
+ // Scale into unsigned char range.
+ // Round to place negative and positive values as equally as possible around 128
+ // (which represents zero).
+ return (unsigned char)SkScalarRoundToInt(dist / (2 * distanceMagnitude) * 256.0f);
+}
+#endif
+
+// assumes a padded 8-bit image and distance field
+// width and height are the original width and height of the image
+static bool generate_distance_field_from_image(unsigned char* distanceField,
+ const unsigned char* copyPtr,
+ int width, int height) {
+ SkASSERT(distanceField);
+ SkASSERT(copyPtr);
+
+ // we expand our temp data by one more on each side to simplify
+ // the scanning code -- will always be treated as infinitely far away
+ int pad = SK_DistanceFieldPad + 1;
+
+ // set params for distance field data
+ int dataWidth = width + 2*pad;
+ int dataHeight = height + 2*pad;
+
+ // create zeroed temp DFData+edge storage
+ SkAutoFree storage(sk_calloc_throw(dataWidth*dataHeight*(sizeof(DFData) + 1)));
+ DFData* dataPtr = (DFData*)storage.get();
+ unsigned char* edgePtr = (unsigned char*)storage.get() + dataWidth*dataHeight*sizeof(DFData);
+
+ // copy glyph into distance field storage
+ init_glyph_data(dataPtr, edgePtr, copyPtr,
+ dataWidth, dataHeight,
+ width+2, height+2, SK_DistanceFieldPad);
+
+ // create initial distance data, particularly at edges
+ init_distances(dataPtr, edgePtr, dataWidth, dataHeight);
+
+ // now perform Euclidean distance transform to propagate distances
+
+ // forwards in y
+ DFData* currData = dataPtr+dataWidth+1; // skip outer buffer
+ unsigned char* currEdge = edgePtr+dataWidth+1;
+ for (int j = 1; j < dataHeight-1; ++j) {
+ // forwards in x
+ for (int i = 1; i < dataWidth-1; ++i) {
+ // don't need to calculate distance for edge pixels
+ if (!*currEdge) {
+ F1(currData, dataWidth);
+ }
+ ++currData;
+ ++currEdge;
+ }
+
+ // backwards in x
+ --currData; // reset to end
+ --currEdge;
+ for (int i = 1; i < dataWidth-1; ++i) {
+ // don't need to calculate distance for edge pixels
+ if (!*currEdge) {
+ F2(currData, dataWidth);
+ }
+ --currData;
+ --currEdge;
+ }
+
+ currData += dataWidth+1;
+ currEdge += dataWidth+1;
+ }
+
+ // backwards in y
+ currData = dataPtr+dataWidth*(dataHeight-2) - 1; // skip outer buffer
+ currEdge = edgePtr+dataWidth*(dataHeight-2) - 1;
+ for (int j = 1; j < dataHeight-1; ++j) {
+ // forwards in x
+ for (int i = 1; i < dataWidth-1; ++i) {
+ // don't need to calculate distance for edge pixels
+ if (!*currEdge) {
+ B1(currData, dataWidth);
+ }
+ ++currData;
+ ++currEdge;
+ }
+
+ // backwards in x
+ --currData; // reset to end
+ --currEdge;
+ for (int i = 1; i < dataWidth-1; ++i) {
+ // don't need to calculate distance for edge pixels
+ if (!*currEdge) {
+ B2(currData, dataWidth);
+ }
+ --currData;
+ --currEdge;
+ }
+
+ currData -= dataWidth-1;
+ currEdge -= dataWidth-1;
+ }
+
+ // copy results to final distance field data
+ currData = dataPtr + dataWidth+1;
+ currEdge = edgePtr + dataWidth+1;
+ unsigned char *dfPtr = distanceField;
+ for (int j = 1; j < dataHeight-1; ++j) {
+ for (int i = 1; i < dataWidth-1; ++i) {
+#if DUMP_EDGE
+ float alpha = currData->fAlpha;
+ float edge = 0.0f;
+ if (*currEdge) {
+ edge = 0.25f;
+ }
+ // blend with original image
+ float result = alpha + (1.0f-alpha)*edge;
+ unsigned char val = sk_float_round2int(255*result);
+ *dfPtr++ = val;
+#else
+ float dist;
+ if (currData->fAlpha > 0.5f) {
+ dist = -SkScalarSqrt(currData->fDistSq);
+ } else {
+ dist = SkScalarSqrt(currData->fDistSq);
+ }
+ *dfPtr++ = pack_distance_field_val<SK_DistanceFieldMagnitude>(dist);
+#endif
+ ++currData;
+ ++currEdge;
+ }
+ currData += 2;
+ currEdge += 2;
+ }
+
+ return true;
+}
+
+// assumes an 8-bit image and distance field
+bool SkGenerateDistanceFieldFromA8Image(unsigned char* distanceField,
+ const unsigned char* image,
+ int width, int height, size_t rowBytes) {
+ SkASSERT(distanceField);
+ SkASSERT(image);
+
+ // create temp data
+ SkAutoSMalloc<1024> copyStorage((width+2)*(height+2)*sizeof(char));
+ unsigned char* copyPtr = (unsigned char*) copyStorage.get();
+
+ // we copy our source image into a padded copy to ensure we catch edge transitions
+ // around the outside
+ const unsigned char* currSrcScanLine = image;
+ sk_bzero(copyPtr, (width+2)*sizeof(char));
+ unsigned char* currDestPtr = copyPtr + width + 2;
+ for (int i = 0; i < height; ++i) {
+ *currDestPtr++ = 0;
+ memcpy(currDestPtr, currSrcScanLine, rowBytes);
+ currSrcScanLine += rowBytes;
+ currDestPtr += width;
+ *currDestPtr++ = 0;
+ }
+ sk_bzero(currDestPtr, (width+2)*sizeof(char));
+
+ return generate_distance_field_from_image(distanceField, copyPtr, width, height);
+}
+
+// assumes a 1-bit image and 8-bit distance field
+bool SkGenerateDistanceFieldFromBWImage(unsigned char* distanceField,
+ const unsigned char* image,
+ int width, int height, size_t rowBytes) {
+ SkASSERT(distanceField);
+ SkASSERT(image);
+
+ // create temp data
+ SkAutoSMalloc<1024> copyStorage((width+2)*(height+2)*sizeof(char));
+ unsigned char* copyPtr = (unsigned char*) copyStorage.get();
+
+ // we copy our source image into a padded copy to ensure we catch edge transitions
+ // around the outside
+ const unsigned char* currSrcScanLine = image;
+ sk_bzero(copyPtr, (width+2)*sizeof(char));
+ unsigned char* currDestPtr = copyPtr + width + 2;
+ for (int i = 0; i < height; ++i) {
+ *currDestPtr++ = 0;
+ int rowWritesLeft = width;
+ const unsigned char *maskPtr = currSrcScanLine;
+ while (rowWritesLeft > 0) {
+ unsigned mask = *maskPtr++;
+ for (int i = 7; i >= 0 && rowWritesLeft; --i, --rowWritesLeft) {
+ *currDestPtr++ = (mask & (1 << i)) ? 0xff : 0;
+ }
+ }
+ currSrcScanLine += rowBytes;
+ *currDestPtr++ = 0;
+ }
+ sk_bzero(currDestPtr, (width+2)*sizeof(char));
+
+ return generate_distance_field_from_image(distanceField, copyPtr, width, height);
+}
diff --git a/gfx/skia/skia/src/core/SkDistanceFieldGen.h b/gfx/skia/skia/src/core/SkDistanceFieldGen.h
new file mode 100644
index 000000000..5e7af52a5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDistanceFieldGen.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkDistanceFieldGen_DEFINED
+#define SkDistanceFieldGen_DEFINED
+
+#include "SkTypes.h"
+
+// the max magnitude for the distance field
+// distance values are limited to the range (-SK_DistanceFieldMagnitude, SK_DistanceFieldMagnitude]
+#define SK_DistanceFieldMagnitude 4
+// we need to pad around the original glyph to allow our maximum distance of
+// SK_DistanceFieldMagnitude texels away from any edge
+#define SK_DistanceFieldPad 4
+// the rect we render with is inset from the distance field glyph size to allow for bilerp
+#define SK_DistanceFieldInset 2
+
+// For the fragment shader:
+// The distance field is constructed as unsigned char values,
+// so that the zero value is at 128, and the supported range of distances is [-4 * 127/128, 4].
+// Hence our multiplier (width of the range) is 4 * 255/128 and zero threshold is 128/255.
+#define SK_DistanceFieldMultiplier "7.96875"
+#define SK_DistanceFieldThreshold "0.50196078431"
+
+/** Given 8-bit mask data, generate the associated distance field
+
+ * @param distanceField The distance field to be generated. Should already be allocated
+ * by the client with the padding above.
+ * @param image 8-bit mask we're using to generate the distance field.
+ * @param w Width of the original image.
+ * @param h Height of the original image.
+ * @param rowBytes Size of each row in the image, in bytes
+ */
+bool SkGenerateDistanceFieldFromA8Image(unsigned char* distanceField,
+ const unsigned char* image,
+ int w, int h, size_t rowBytes);
+
+/** Given 1-bit mask data, generate the associated distance field
+
+ * @param distanceField The distance field to be generated. Should already be allocated
+ * by the client with the padding above.
+ * @param image 1-bit mask we're using to generate the distance field.
+ * @param w Width of the original image.
+ * @param h Height of the original image.
+ * @param rowBytes Size of each row in the image, in bytes
+ */
+bool SkGenerateDistanceFieldFromBWImage(unsigned char* distanceField,
+ const unsigned char* image,
+ int w, int h, size_t rowBytes);
+
+/** Given width and height of original image, return size (in bytes) of distance field
+ * @param w Width of the original image.
+ * @param h Height of the original image.
+ */
+inline size_t SkComputeDistanceFieldSize(int w, int h) {
+ return (w + 2*SK_DistanceFieldPad) * (h + 2*SK_DistanceFieldPad) * sizeof(unsigned char);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDither.cpp b/gfx/skia/skia/src/core/SkDither.cpp
new file mode 100644
index 000000000..d966e0e91
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDither.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkDither.h"
+
+/* The base dither matrix we use to derive optimized ones for 565 and 4444
+
+ { 0, 32, 8, 40, 2, 34, 10, 42 },
+ { 48, 16, 56, 24, 50, 18, 58, 26 },
+ { 12, 44, 4, 36, 14, 46, 6, 38 },
+ { 60, 28, 52, 20, 62, 30, 54, 22 },
+ { 3, 35, 11, 43, 1, 33, 9, 41 },
+ { 51, 19, 59, 27, 49, 17, 57, 25 },
+ { 15, 47, 7, 39, 13, 45, 5, 37 },
+ { 63, 31, 55, 23, 61, 29, 53, 21 }
+
+ The 4444 version only needs 4 bits, and given that we can reduce its size
+ since the other 4x4 sub pieces all look the same once we truncate the bits.
+
+ The 565 version only needs 3 bits for red/blue, and only 2 bits for green.
+ For simplicity, we store 3 bits, and have the dither macros for green know
+ this, and they shift the dither value down by 1 to make it 2 bits.
+ */
+
+#ifdef ENABLE_DITHER_MATRIX_4X4
+
+const uint8_t gDitherMatrix_4Bit_4X4[4][4] = {
+ { 0, 8, 2, 10 },
+ { 12, 4, 14, 6 },
+ { 3, 11, 1, 9 },
+ { 15, 7, 13, 5 }
+};
+
+const uint8_t gDitherMatrix_3Bit_4X4[4][4] = {
+ { 0, 4, 1, 5 },
+ { 6, 2, 7, 3 },
+ { 1, 5, 0, 4 },
+ { 7, 3, 6, 2 }
+};
+
+#else // used packed shorts for a scanlines worth of dither values
+
+const uint16_t gDitherMatrix_4Bit_16[4] = {
+ 0xA280, 0x6E4C, 0x91B3, 0x5D7F
+};
+
+const uint16_t gDitherMatrix_3Bit_16[4] = {
+ 0x5140, 0x3726, 0x4051, 0x2637
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDither.h b/gfx/skia/skia/src/core/SkDither.h
new file mode 100644
index 000000000..463b50403
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDither.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDither_DEFINED
+#define SkDither_DEFINED
+
+#include "SkColorPriv.h"
+
+#define SK_DitherValueMax4444 15
+#define SK_DitherValueMax565 7
+
+/* need to use macros for bit-counts for each component, and then
+ move these into SkColorPriv.h
+*/
+
+#define SkDITHER_R32_FOR_565_MACRO(r, d) (r + d - (r >> 5))
+#define SkDITHER_G32_FOR_565_MACRO(g, d) (g + (d >> 1) - (g >> 6))
+#define SkDITHER_B32_FOR_565_MACRO(b, d) (b + d - (b >> 5))
+
+#define SkDITHER_A32_FOR_4444_MACRO(a, d) (a + 15 - (a >> 4))
+#define SkDITHER_R32_FOR_4444_MACRO(r, d) (r + d - (r >> 4))
+#define SkDITHER_G32_FOR_4444_MACRO(g, d) (g + d - (g >> 4))
+#define SkDITHER_B32_FOR_4444_MACRO(b, d) (b + d - (b >> 4))
+
+#ifdef SK_DEBUG
+ inline unsigned SkDITHER_R32_FOR_565(unsigned r, unsigned d)
+ {
+ SkASSERT(d <= SK_DitherValueMax565);
+ SkA32Assert(r);
+ r = SkDITHER_R32_FOR_565_MACRO(r, d);
+ SkA32Assert(r);
+ return r;
+ }
+ inline unsigned SkDITHER_G32_FOR_565(unsigned g, unsigned d)
+ {
+ SkASSERT(d <= SK_DitherValueMax565);
+ SkG32Assert(g);
+ g = SkDITHER_G32_FOR_565_MACRO(g, d);
+ SkG32Assert(g);
+ return g;
+ }
+ inline unsigned SkDITHER_B32_FOR_565(unsigned b, unsigned d)
+ {
+ SkASSERT(d <= SK_DitherValueMax565);
+ SkB32Assert(b);
+ b = SkDITHER_B32_FOR_565_MACRO(b, d);
+ SkB32Assert(b);
+ return b;
+ }
+#else
+ #define SkDITHER_R32_FOR_565(r, d) SkDITHER_R32_FOR_565_MACRO(r, d)
+ #define SkDITHER_G32_FOR_565(g, d) SkDITHER_G32_FOR_565_MACRO(g, d)
+ #define SkDITHER_B32_FOR_565(b, d) SkDITHER_B32_FOR_565_MACRO(b, d)
+#endif
+
+#define SkDITHER_R32To565(r, d) SkR32ToR16(SkDITHER_R32_FOR_565(r, d))
+#define SkDITHER_G32To565(g, d) SkG32ToG16(SkDITHER_G32_FOR_565(g, d))
+#define SkDITHER_B32To565(b, d) SkB32ToB16(SkDITHER_B32_FOR_565(b, d))
+
+#define SkDITHER_A32To4444(a, d) SkA32To4444(SkDITHER_A32_FOR_4444_MACRO(a, d))
+#define SkDITHER_R32To4444(r, d) SkR32To4444(SkDITHER_R32_FOR_4444_MACRO(r, d))
+#define SkDITHER_G32To4444(g, d) SkG32To4444(SkDITHER_G32_FOR_4444_MACRO(g, d))
+#define SkDITHER_B32To4444(b, d) SkB32To4444(SkDITHER_B32_FOR_4444_MACRO(b, d))
+
+static inline SkPMColor SkDitherARGB32For565(SkPMColor c, unsigned dither)
+{
+ SkASSERT(dither <= SK_DitherValueMax565);
+
+ unsigned sa = SkGetPackedA32(c);
+ dither = SkAlphaMul(dither, SkAlpha255To256(sa));
+
+ unsigned sr = SkGetPackedR32(c);
+ unsigned sg = SkGetPackedG32(c);
+ unsigned sb = SkGetPackedB32(c);
+ sr = SkDITHER_R32_FOR_565(sr, dither);
+ sg = SkDITHER_G32_FOR_565(sg, dither);
+ sb = SkDITHER_B32_FOR_565(sb, dither);
+
+ return SkPackARGB32(sa, sr, sg, sb);
+}
+
+static inline SkPMColor SkDitherRGB32For565(SkPMColor c, unsigned dither)
+{
+ SkASSERT(dither <= SK_DitherValueMax565);
+
+ unsigned sr = SkGetPackedR32(c);
+ unsigned sg = SkGetPackedG32(c);
+ unsigned sb = SkGetPackedB32(c);
+ sr = SkDITHER_R32_FOR_565(sr, dither);
+ sg = SkDITHER_G32_FOR_565(sg, dither);
+ sb = SkDITHER_B32_FOR_565(sb, dither);
+
+ return SkPackARGB32(0xFF, sr, sg, sb);
+}
+
+static inline uint16_t SkDitherRGBTo565(U8CPU r, U8CPU g, U8CPU b,
+ unsigned dither)
+{
+ SkASSERT(dither <= SK_DitherValueMax565);
+ r = SkDITHER_R32To565(r, dither);
+ g = SkDITHER_G32To565(g, dither);
+ b = SkDITHER_B32To565(b, dither);
+ return SkPackRGB16(r, g, b);
+}
+
+static inline uint16_t SkDitherRGB32To565(SkPMColor c, unsigned dither)
+{
+ SkASSERT(dither <= SK_DitherValueMax565);
+
+ unsigned sr = SkGetPackedR32(c);
+ unsigned sg = SkGetPackedG32(c);
+ unsigned sb = SkGetPackedB32(c);
+ sr = SkDITHER_R32To565(sr, dither);
+ sg = SkDITHER_G32To565(sg, dither);
+ sb = SkDITHER_B32To565(sb, dither);
+
+ return SkPackRGB16(sr, sg, sb);
+}
+
+static inline uint16_t SkDitherARGB32To565(U8CPU sa, SkPMColor c, unsigned dither)
+{
+ SkASSERT(dither <= SK_DitherValueMax565);
+ dither = SkAlphaMul(dither, SkAlpha255To256(sa));
+
+ unsigned sr = SkGetPackedR32(c);
+ unsigned sg = SkGetPackedG32(c);
+ unsigned sb = SkGetPackedB32(c);
+ sr = SkDITHER_R32To565(sr, dither);
+ sg = SkDITHER_G32To565(sg, dither);
+ sb = SkDITHER_B32To565(sb, dither);
+
+ return SkPackRGB16(sr, sg, sb);
+}
+
+///////////////////////// 4444
+
+static inline SkPMColor16 SkDitherARGB32To4444(U8CPU a, U8CPU r, U8CPU g,
+ U8CPU b, unsigned dither)
+{
+ dither = SkAlphaMul(dither, SkAlpha255To256(a));
+
+ a = SkDITHER_A32To4444(a, dither);
+ r = SkDITHER_R32To4444(r, dither);
+ g = SkDITHER_G32To4444(g, dither);
+ b = SkDITHER_B32To4444(b, dither);
+
+ return SkPackARGB4444(a, r, g, b);
+}
+
+static inline SkPMColor16 SkDitherARGB32To4444(SkPMColor c, unsigned dither)
+{
+ unsigned a = SkGetPackedA32(c);
+ unsigned r = SkGetPackedR32(c);
+ unsigned g = SkGetPackedG32(c);
+ unsigned b = SkGetPackedB32(c);
+
+ dither = SkAlphaMul(dither, SkAlpha255To256(a));
+
+ a = SkDITHER_A32To4444(a, dither);
+ r = SkDITHER_R32To4444(r, dither);
+ g = SkDITHER_G32To4444(g, dither);
+ b = SkDITHER_B32To4444(b, dither);
+
+ return SkPackARGB4444(a, r, g, b);
+}
+
+// TODO: need dither routines for 565 -> 4444
+
+// this toggles between a 4x4 and a 1x4 array
+//#define ENABLE_DITHER_MATRIX_4X4
+
+#ifdef ENABLE_DITHER_MATRIX_4X4
+ extern const uint8_t gDitherMatrix_4Bit_4X4[4][4];
+ extern const uint8_t gDitherMatrix_3Bit_4X4[4][4];
+
+ #define DITHER_4444_SCAN(y) const uint8_t* dither_scan = gDitherMatrix_4Bit_4X4[(y) & 3]
+ #define DITHER_565_SCAN(y) const uint8_t* dither_scan = gDitherMatrix_3Bit_4X4[(y) & 3]
+
+ #define DITHER_VALUE(x) dither_scan[(x) & 3]
+#else
+ extern const uint16_t gDitherMatrix_4Bit_16[4];
+ extern const uint16_t gDitherMatrix_3Bit_16[4];
+
+ #define DITHER_4444_SCAN(y) const uint16_t dither_scan = gDitherMatrix_4Bit_16[(y) & 3]
+ #define DITHER_565_SCAN(y) const uint16_t dither_scan = gDitherMatrix_3Bit_16[(y) & 3]
+
+ #define DITHER_VALUE(x) ((dither_scan >> (((x) & 3) << 2)) & 0xF)
+#endif
+
+#define DITHER_INC_X(x) ++(x)
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDocument.cpp b/gfx/skia/skia/src/core/SkDocument.cpp
new file mode 100644
index 000000000..29db7f05e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDocument.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkDocument.h"
+#include "SkStream.h"
+
+SkDocument::SkDocument(SkWStream* stream, void (*doneProc)(SkWStream*, bool)) {
+ fStream = stream; // we do not own this object.
+ fDoneProc = doneProc;
+ fState = kBetweenPages_State;
+}
+
+SkDocument::~SkDocument() {
+ this->close();
+}
+
+SkCanvas* SkDocument::beginPage(SkScalar width, SkScalar height,
+ const SkRect* content) {
+ if (width <= 0 || height <= 0) {
+ return nullptr;
+ }
+
+ SkRect outer = SkRect::MakeWH(width, height);
+ SkRect inner;
+ if (content) {
+ inner = *content;
+ if (!inner.intersect(outer)) {
+ return nullptr;
+ }
+ } else {
+ inner = outer;
+ }
+
+ for (;;) {
+ switch (fState) {
+ case kBetweenPages_State:
+ fState = kInPage_State;
+ return this->onBeginPage(width, height, inner);
+ case kInPage_State:
+ this->endPage();
+ break;
+ case kClosed_State:
+ return nullptr;
+ }
+ }
+ SkDEBUGFAIL("never get here");
+ return nullptr;
+}
+
+void SkDocument::endPage() {
+ if (kInPage_State == fState) {
+ fState = kBetweenPages_State;
+ this->onEndPage();
+ }
+}
+
+void SkDocument::close() {
+ for (;;) {
+ switch (fState) {
+ case kBetweenPages_State: {
+ fState = kClosed_State;
+ this->onClose(fStream);
+
+ if (fDoneProc) {
+ fDoneProc(fStream, false);
+ }
+ // we don't own the stream, but we mark it nullptr since we can
+ // no longer write to it.
+ fStream = nullptr;
+ return;
+ }
+ case kInPage_State:
+ this->endPage();
+ break;
+ case kClosed_State:
+ return;
+ }
+ }
+}
+
+void SkDocument::abort() {
+ this->onAbort();
+
+ fState = kClosed_State;
+ if (fDoneProc) {
+ fDoneProc(fStream, true);
+ }
+ // we don't own the stream, but we mark it nullptr since we can
+ // no longer write to it.
+ fStream = nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkDraw.cpp b/gfx/skia/skia/src/core/SkDraw.cpp
new file mode 100644
index 000000000..72b292d30
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDraw.cpp
@@ -0,0 +1,2120 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#define __STDC_LIMIT_MACROS
+
+#include "SkDraw.h"
+#include "SkBlendModePriv.h"
+#include "SkBlitter.h"
+#include "SkCanvas.h"
+#include "SkColorPriv.h"
+#include "SkDevice.h"
+#include "SkDeviceLooper.h"
+#include "SkFindAndPlaceGlyph.h"
+#include "SkFixed.h"
+#include "SkMaskFilter.h"
+#include "SkMatrix.h"
+#include "SkPaint.h"
+#include "SkPathEffect.h"
+#include "SkRasterClip.h"
+#include "SkRasterizer.h"
+#include "SkRRect.h"
+#include "SkScan.h"
+#include "SkShader.h"
+#include "SkSmallAllocator.h"
+#include "SkString.h"
+#include "SkStroke.h"
+#include "SkStrokeRec.h"
+#include "SkTemplates.h"
+#include "SkTextMapStateProc.h"
+#include "SkTLazy.h"
+#include "SkUtils.h"
+#include "SkVertState.h"
+#include "SkXfermode.h"
+
+#include "SkBitmapProcShader.h"
+#include "SkDrawProcs.h"
+#include "SkMatrixUtils.h"
+
+//#define TRACE_BITMAP_DRAWS
+
+// Helper function to fix code gen bug on ARM64.
+// See SkFindAndPlaceGlyph.h for more details.
+void FixGCC49Arm64Bug(int v) { }
+
+/** Helper for allocating small blitters on the stack.
+ */
+class SkAutoBlitterChoose : SkNoncopyable {
+public:
+ SkAutoBlitterChoose() {
+ fBlitter = nullptr;
+ }
+ SkAutoBlitterChoose(const SkPixmap& dst, const SkMatrix& matrix,
+ const SkPaint& paint, bool drawCoverage = false) {
+ fBlitter = SkBlitter::Choose(dst, matrix, paint, &fAllocator, drawCoverage);
+ }
+
+ SkBlitter* operator->() { return fBlitter; }
+ SkBlitter* get() const { return fBlitter; }
+
+ void choose(const SkPixmap& dst, const SkMatrix& matrix,
+ const SkPaint& paint, bool drawCoverage = false) {
+ SkASSERT(!fBlitter);
+ fBlitter = SkBlitter::Choose(dst, matrix, paint, &fAllocator, drawCoverage);
+ }
+
+private:
+ // Owned by fAllocator, which will handle the delete.
+ SkBlitter* fBlitter;
+ SkTBlitterAllocator fAllocator;
+};
+#define SkAutoBlitterChoose(...) SK_REQUIRE_LOCAL_VAR(SkAutoBlitterChoose)
+
+/**
+ * Since we are providing the storage for the shader (to avoid the perf cost
+ * of calling new) we insist that in our destructor we can account for all
+ * owners of the shader.
+ */
+class SkAutoBitmapShaderInstall : SkNoncopyable {
+public:
+ SkAutoBitmapShaderInstall(const SkBitmap& src, const SkPaint& paint,
+ const SkMatrix* localMatrix = nullptr)
+ : fPaint(paint) /* makes a copy of the paint */ {
+ fPaint.setShader(SkMakeBitmapShader(src, SkShader::kClamp_TileMode,
+ SkShader::kClamp_TileMode, localMatrix,
+ kNever_SkCopyPixelsMode,
+ &fAllocator));
+ // we deliberately left the shader with an owner-count of 2
+ fPaint.getShader()->ref();
+ SkASSERT(2 == fPaint.getShader()->getRefCnt());
+ }
+
+ ~SkAutoBitmapShaderInstall() {
+ // since fAllocator will destroy shader, we insist that owners == 2
+ SkASSERT(2 == fPaint.getShader()->getRefCnt());
+
+ fPaint.setShader(nullptr); // unref the shader by 1
+
+ }
+
+ // return the new paint that has the shader applied
+ const SkPaint& paintWithShader() const { return fPaint; }
+
+private:
+ // copy of caller's paint (which we then modify)
+ SkPaint fPaint;
+ // Stores the shader.
+ SkTBlitterAllocator fAllocator;
+};
+#define SkAutoBitmapShaderInstall(...) SK_REQUIRE_LOCAL_VAR(SkAutoBitmapShaderInstall)
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkDraw::SkDraw() {
+ sk_bzero(this, sizeof(*this));
+}
+
+bool SkDraw::computeConservativeLocalClipBounds(SkRect* localBounds) const {
+ if (fRC->isEmpty()) {
+ return false;
+ }
+
+ SkMatrix inverse;
+ if (!fMatrix->invert(&inverse)) {
+ return false;
+ }
+
+ SkIRect devBounds = fRC->getBounds();
+ // outset to have slop for antialasing and hairlines
+ devBounds.outset(1, 1);
+ inverse.mapRect(localBounds, SkRect::Make(devBounds));
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef void (*BitmapXferProc)(void* pixels, size_t bytes, uint32_t data);
+
+static void D_Clear_BitmapXferProc(void* pixels, size_t bytes, uint32_t) {
+ sk_bzero(pixels, bytes);
+}
+
+static void D_Dst_BitmapXferProc(void*, size_t, uint32_t data) {}
+
+static void D32_Src_BitmapXferProc(void* pixels, size_t bytes, uint32_t data) {
+ sk_memset32((uint32_t*)pixels, data, SkToInt(bytes >> 2));
+}
+
+static void D16_Src_BitmapXferProc(void* pixels, size_t bytes, uint32_t data) {
+ sk_memset16((uint16_t*)pixels, data, SkToInt(bytes >> 1));
+}
+
+static void DA8_Src_BitmapXferProc(void* pixels, size_t bytes, uint32_t data) {
+ memset(pixels, data, bytes);
+}
+
+static BitmapXferProc ChooseBitmapXferProc(const SkPixmap& dst, const SkPaint& paint,
+ uint32_t* data) {
+ // todo: we can apply colorfilter up front if no shader, so we wouldn't
+ // need to abort this fastpath
+ if (paint.getShader() || paint.getColorFilter()) {
+ return nullptr;
+ }
+
+ SkBlendMode mode = paint.getBlendMode();
+ SkColor color = paint.getColor();
+
+ // collaps modes based on color...
+ if (SkBlendMode::kSrcOver == mode) {
+ unsigned alpha = SkColorGetA(color);
+ if (0 == alpha) {
+ mode = SkBlendMode::kDst;
+ } else if (0xFF == alpha) {
+ mode = SkBlendMode::kSrc;
+ }
+ }
+
+ switch (mode) {
+ case SkBlendMode::kClear:
+// SkDebugf("--- D_Clear_BitmapXferProc\n");
+ return D_Clear_BitmapXferProc; // ignore data
+ case SkBlendMode::kDst:
+// SkDebugf("--- D_Dst_BitmapXferProc\n");
+ return D_Dst_BitmapXferProc; // ignore data
+ case SkBlendMode::kSrc: {
+ /*
+ should I worry about dithering for the lower depths?
+ */
+ SkPMColor pmc = SkPreMultiplyColor(color);
+ switch (dst.colorType()) {
+ case kN32_SkColorType:
+ if (data) {
+ *data = pmc;
+ }
+// SkDebugf("--- D32_Src_BitmapXferProc\n");
+ return D32_Src_BitmapXferProc;
+ case kRGB_565_SkColorType:
+ if (data) {
+ *data = SkPixel32ToPixel16(pmc);
+ }
+// SkDebugf("--- D16_Src_BitmapXferProc\n");
+ return D16_Src_BitmapXferProc;
+ case kAlpha_8_SkColorType:
+ if (data) {
+ *data = SkGetPackedA32(pmc);
+ }
+// SkDebugf("--- DA8_Src_BitmapXferProc\n");
+ return DA8_Src_BitmapXferProc;
+ default:
+ break;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return nullptr;
+}
+
+static void CallBitmapXferProc(const SkPixmap& dst, const SkIRect& rect, BitmapXferProc proc,
+ uint32_t procData) {
+ int shiftPerPixel;
+ switch (dst.colorType()) {
+ case kN32_SkColorType:
+ shiftPerPixel = 2;
+ break;
+ case kRGB_565_SkColorType:
+ shiftPerPixel = 1;
+ break;
+ case kAlpha_8_SkColorType:
+ shiftPerPixel = 0;
+ break;
+ default:
+ SkDEBUGFAIL("Can't use xferproc on this config");
+ return;
+ }
+
+ uint8_t* pixels = (uint8_t*)dst.writable_addr();
+ SkASSERT(pixels);
+ const size_t rowBytes = dst.rowBytes();
+ const int widthBytes = rect.width() << shiftPerPixel;
+
+ // skip down to the first scanline and X position
+ pixels += rect.fTop * rowBytes + (rect.fLeft << shiftPerPixel);
+ for (int scans = rect.height() - 1; scans >= 0; --scans) {
+ proc(pixels, widthBytes, procData);
+ pixels += rowBytes;
+ }
+}
+
+void SkDraw::drawPaint(const SkPaint& paint) const {
+ SkDEBUGCODE(this->validate();)
+
+ if (fRC->isEmpty()) {
+ return;
+ }
+
+ SkIRect devRect;
+ devRect.set(0, 0, fDst.width(), fDst.height());
+
+ if (fRC->isBW()) {
+ /* If we don't have a shader (i.e. we're just a solid color) we may
+ be faster to operate directly on the device bitmap, rather than invoking
+ a blitter. Esp. true for xfermodes, which require a colorshader to be
+ present, which is just redundant work. Since we're drawing everywhere
+ in the clip, we don't have to worry about antialiasing.
+ */
+ uint32_t procData = 0; // to avoid the warning
+ BitmapXferProc proc = ChooseBitmapXferProc(fDst, paint, &procData);
+ if (proc) {
+ if (D_Dst_BitmapXferProc == proc) { // nothing to do
+ return;
+ }
+
+ SkRegion::Iterator iter(fRC->bwRgn());
+ while (!iter.done()) {
+ CallBitmapXferProc(fDst, iter.rect(), proc, procData);
+ iter.next();
+ }
+ return;
+ }
+ }
+
+ // normal case: use a blitter
+ SkAutoBlitterChoose blitter(fDst, *fMatrix, paint);
+ SkScan::FillIRect(devRect, *fRC, blitter.get());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct PtProcRec {
+ SkCanvas::PointMode fMode;
+ const SkPaint* fPaint;
+ const SkRegion* fClip;
+ const SkRasterClip* fRC;
+
+ // computed values
+ SkFixed fRadius;
+
+ typedef void (*Proc)(const PtProcRec&, const SkPoint devPts[], int count,
+ SkBlitter*);
+
+ bool init(SkCanvas::PointMode, const SkPaint&, const SkMatrix* matrix,
+ const SkRasterClip*);
+ Proc chooseProc(SkBlitter** blitter);
+
+private:
+ SkAAClipBlitterWrapper fWrapper;
+};
+
+static void bw_pt_rect_hair_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ SkASSERT(rec.fClip->isRect());
+ const SkIRect& r = rec.fClip->getBounds();
+
+ for (int i = 0; i < count; i++) {
+ int x = SkScalarFloorToInt(devPts[i].fX);
+ int y = SkScalarFloorToInt(devPts[i].fY);
+ if (r.contains(x, y)) {
+ blitter->blitH(x, y, 1);
+ }
+ }
+}
+
+static void bw_pt_rect_16_hair_proc(const PtProcRec& rec,
+ const SkPoint devPts[], int count,
+ SkBlitter* blitter) {
+ SkASSERT(rec.fRC->isRect());
+ const SkIRect& r = rec.fRC->getBounds();
+ uint32_t value;
+ const SkPixmap* dst = blitter->justAnOpaqueColor(&value);
+ SkASSERT(dst);
+
+ uint16_t* addr = dst->writable_addr16(0, 0);
+ size_t rb = dst->rowBytes();
+
+ for (int i = 0; i < count; i++) {
+ int x = SkScalarFloorToInt(devPts[i].fX);
+ int y = SkScalarFloorToInt(devPts[i].fY);
+ if (r.contains(x, y)) {
+ ((uint16_t*)((char*)addr + y * rb))[x] = SkToU16(value);
+ }
+ }
+}
+
+static void bw_pt_rect_32_hair_proc(const PtProcRec& rec,
+ const SkPoint devPts[], int count,
+ SkBlitter* blitter) {
+ SkASSERT(rec.fRC->isRect());
+ const SkIRect& r = rec.fRC->getBounds();
+ uint32_t value;
+ const SkPixmap* dst = blitter->justAnOpaqueColor(&value);
+ SkASSERT(dst);
+
+ SkPMColor* addr = dst->writable_addr32(0, 0);
+ size_t rb = dst->rowBytes();
+
+ for (int i = 0; i < count; i++) {
+ int x = SkScalarFloorToInt(devPts[i].fX);
+ int y = SkScalarFloorToInt(devPts[i].fY);
+ if (r.contains(x, y)) {
+ ((SkPMColor*)((char*)addr + y * rb))[x] = value;
+ }
+ }
+}
+
+static void bw_pt_hair_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ for (int i = 0; i < count; i++) {
+ int x = SkScalarFloorToInt(devPts[i].fX);
+ int y = SkScalarFloorToInt(devPts[i].fY);
+ if (rec.fClip->contains(x, y)) {
+ blitter->blitH(x, y, 1);
+ }
+ }
+}
+
+static void bw_line_hair_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ for (int i = 0; i < count; i += 2) {
+ SkScan::HairLine(&devPts[i], 2, *rec.fRC, blitter);
+ }
+}
+
+static void bw_poly_hair_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ SkScan::HairLine(devPts, count, *rec.fRC, blitter);
+}
+
+// aa versions
+
+static void aa_line_hair_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ for (int i = 0; i < count; i += 2) {
+ SkScan::AntiHairLine(&devPts[i], 2, *rec.fRC, blitter);
+ }
+}
+
+static void aa_poly_hair_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ SkScan::AntiHairLine(devPts, count, *rec.fRC, blitter);
+}
+
+// square procs (strokeWidth > 0 but matrix is square-scale (sx == sy)
+
+static void bw_square_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ const SkFixed radius = rec.fRadius;
+ for (int i = 0; i < count; i++) {
+ SkFixed x = SkScalarToFixed(devPts[i].fX);
+ SkFixed y = SkScalarToFixed(devPts[i].fY);
+
+ SkXRect r;
+ r.fLeft = x - radius;
+ r.fTop = y - radius;
+ r.fRight = x + radius;
+ r.fBottom = y + radius;
+
+ SkScan::FillXRect(r, *rec.fRC, blitter);
+ }
+}
+
+static void aa_square_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ const SkFixed radius = rec.fRadius;
+ for (int i = 0; i < count; i++) {
+ SkFixed x = SkScalarToFixed(devPts[i].fX);
+ SkFixed y = SkScalarToFixed(devPts[i].fY);
+
+ SkXRect r;
+ r.fLeft = x - radius;
+ r.fTop = y - radius;
+ r.fRight = x + radius;
+ r.fBottom = y + radius;
+
+ SkScan::AntiFillXRect(r, *rec.fRC, blitter);
+ }
+}
+
+// If this guy returns true, then chooseProc() must return a valid proc
+bool PtProcRec::init(SkCanvas::PointMode mode, const SkPaint& paint,
+ const SkMatrix* matrix, const SkRasterClip* rc) {
+ if ((unsigned)mode > (unsigned)SkCanvas::kPolygon_PointMode) {
+ return false;
+ }
+
+ if (paint.getPathEffect()) {
+ return false;
+ }
+ SkScalar width = paint.getStrokeWidth();
+ if (0 == width) {
+ fMode = mode;
+ fPaint = &paint;
+ fClip = nullptr;
+ fRC = rc;
+ fRadius = SK_FixedHalf;
+ return true;
+ }
+ if (paint.getStrokeCap() != SkPaint::kRound_Cap &&
+ matrix->isScaleTranslate() && SkCanvas::kPoints_PointMode == mode) {
+ SkScalar sx = matrix->get(SkMatrix::kMScaleX);
+ SkScalar sy = matrix->get(SkMatrix::kMScaleY);
+ if (SkScalarNearlyZero(sx - sy)) {
+ if (sx < 0) {
+ sx = -sx;
+ }
+
+ fMode = mode;
+ fPaint = &paint;
+ fClip = nullptr;
+ fRC = rc;
+ fRadius = SkScalarToFixed(SkScalarMul(width, sx)) >> 1;
+ return true;
+ }
+ }
+ return false;
+}
+
+PtProcRec::Proc PtProcRec::chooseProc(SkBlitter** blitterPtr) {
+ Proc proc = nullptr;
+
+ SkBlitter* blitter = *blitterPtr;
+ if (fRC->isBW()) {
+ fClip = &fRC->bwRgn();
+ } else {
+ fWrapper.init(*fRC, blitter);
+ fClip = &fWrapper.getRgn();
+ blitter = fWrapper.getBlitter();
+ *blitterPtr = blitter;
+ }
+
+ // for our arrays
+ SkASSERT(0 == SkCanvas::kPoints_PointMode);
+ SkASSERT(1 == SkCanvas::kLines_PointMode);
+ SkASSERT(2 == SkCanvas::kPolygon_PointMode);
+ SkASSERT((unsigned)fMode <= (unsigned)SkCanvas::kPolygon_PointMode);
+
+ if (fPaint->isAntiAlias()) {
+ if (0 == fPaint->getStrokeWidth()) {
+ static const Proc gAAProcs[] = {
+ aa_square_proc, aa_line_hair_proc, aa_poly_hair_proc
+ };
+ proc = gAAProcs[fMode];
+ } else if (fPaint->getStrokeCap() != SkPaint::kRound_Cap) {
+ SkASSERT(SkCanvas::kPoints_PointMode == fMode);
+ proc = aa_square_proc;
+ }
+ } else { // BW
+ if (fRadius <= SK_FixedHalf) { // small radii and hairline
+ if (SkCanvas::kPoints_PointMode == fMode && fClip->isRect()) {
+ uint32_t value;
+ const SkPixmap* bm = blitter->justAnOpaqueColor(&value);
+ if (bm && kRGB_565_SkColorType == bm->colorType()) {
+ proc = bw_pt_rect_16_hair_proc;
+ } else if (bm && kN32_SkColorType == bm->colorType()) {
+ proc = bw_pt_rect_32_hair_proc;
+ } else {
+ proc = bw_pt_rect_hair_proc;
+ }
+ } else {
+ static Proc gBWProcs[] = {
+ bw_pt_hair_proc, bw_line_hair_proc, bw_poly_hair_proc
+ };
+ proc = gBWProcs[fMode];
+ }
+ } else {
+ proc = bw_square_proc;
+ }
+ }
+ return proc;
+}
+
+// each of these costs 8-bytes of stack space, so don't make it too large
+// must be even for lines/polygon to work
+#define MAX_DEV_PTS 32
+
+void SkDraw::drawPoints(SkCanvas::PointMode mode, size_t count,
+ const SkPoint pts[], const SkPaint& paint,
+ bool forceUseDevice) const {
+ // if we're in lines mode, force count to be even
+ if (SkCanvas::kLines_PointMode == mode) {
+ count &= ~(size_t)1;
+ }
+
+ if ((long)count <= 0) {
+ return;
+ }
+
+ SkASSERT(pts != nullptr);
+ SkDEBUGCODE(this->validate();)
+
+ // nothing to draw
+ if (fRC->isEmpty()) {
+ return;
+ }
+
+ PtProcRec rec;
+ if (!forceUseDevice && rec.init(mode, paint, fMatrix, fRC)) {
+ SkAutoBlitterChoose blitter(fDst, *fMatrix, paint);
+
+ SkPoint devPts[MAX_DEV_PTS];
+ const SkMatrix* matrix = fMatrix;
+ SkBlitter* bltr = blitter.get();
+ PtProcRec::Proc proc = rec.chooseProc(&bltr);
+ // we have to back up subsequent passes if we're in polygon mode
+ const size_t backup = (SkCanvas::kPolygon_PointMode == mode);
+
+ do {
+ int n = SkToInt(count);
+ if (n > MAX_DEV_PTS) {
+ n = MAX_DEV_PTS;
+ }
+ matrix->mapPoints(devPts, pts, n);
+ proc(rec, devPts, n, bltr);
+ pts += n - backup;
+ SkASSERT(SkToInt(count) >= n);
+ count -= n;
+ if (count > 0) {
+ count += backup;
+ }
+ } while (count != 0);
+ } else {
+ switch (mode) {
+ case SkCanvas::kPoints_PointMode: {
+ // temporarily mark the paint as filling.
+ SkPaint newPaint(paint);
+ newPaint.setStyle(SkPaint::kFill_Style);
+
+ SkScalar width = newPaint.getStrokeWidth();
+ SkScalar radius = SkScalarHalf(width);
+
+ if (newPaint.getStrokeCap() == SkPaint::kRound_Cap) {
+ SkPath path;
+ SkMatrix preMatrix;
+
+ path.addCircle(0, 0, radius);
+ for (size_t i = 0; i < count; i++) {
+ preMatrix.setTranslate(pts[i].fX, pts[i].fY);
+ // pass true for the last point, since we can modify
+ // then path then
+ path.setIsVolatile((count-1) == i);
+ if (fDevice) {
+ fDevice->drawPath(*this, path, newPaint, &preMatrix,
+ (count-1) == i);
+ } else {
+ this->drawPath(path, newPaint, &preMatrix,
+ (count-1) == i);
+ }
+ }
+ } else {
+ SkRect r;
+
+ for (size_t i = 0; i < count; i++) {
+ r.fLeft = pts[i].fX - radius;
+ r.fTop = pts[i].fY - radius;
+ r.fRight = r.fLeft + width;
+ r.fBottom = r.fTop + width;
+ if (fDevice) {
+ fDevice->drawRect(*this, r, newPaint);
+ } else {
+ this->drawRect(r, newPaint);
+ }
+ }
+ }
+ break;
+ }
+ case SkCanvas::kLines_PointMode:
+ if (2 == count && paint.getPathEffect()) {
+ // most likely a dashed line - see if it is one of the ones
+ // we can accelerate
+ SkStrokeRec rec(paint);
+ SkPathEffect::PointData pointData;
+
+ SkPath path;
+ path.moveTo(pts[0]);
+ path.lineTo(pts[1]);
+
+ SkRect cullRect = SkRect::Make(fRC->getBounds());
+
+ if (paint.getPathEffect()->asPoints(&pointData, path, rec,
+ *fMatrix, &cullRect)) {
+ // 'asPoints' managed to find some fast path
+
+ SkPaint newP(paint);
+ newP.setPathEffect(nullptr);
+ newP.setStyle(SkPaint::kFill_Style);
+
+ if (!pointData.fFirst.isEmpty()) {
+ if (fDevice) {
+ fDevice->drawPath(*this, pointData.fFirst, newP);
+ } else {
+ this->drawPath(pointData.fFirst, newP);
+ }
+ }
+
+ if (!pointData.fLast.isEmpty()) {
+ if (fDevice) {
+ fDevice->drawPath(*this, pointData.fLast, newP);
+ } else {
+ this->drawPath(pointData.fLast, newP);
+ }
+ }
+
+ if (pointData.fSize.fX == pointData.fSize.fY) {
+ // The rest of the dashed line can just be drawn as points
+ SkASSERT(pointData.fSize.fX == SkScalarHalf(newP.getStrokeWidth()));
+
+ if (SkPathEffect::PointData::kCircles_PointFlag & pointData.fFlags) {
+ newP.setStrokeCap(SkPaint::kRound_Cap);
+ } else {
+ newP.setStrokeCap(SkPaint::kButt_Cap);
+ }
+
+ if (fDevice) {
+ fDevice->drawPoints(*this,
+ SkCanvas::kPoints_PointMode,
+ pointData.fNumPoints,
+ pointData.fPoints,
+ newP);
+ } else {
+ this->drawPoints(SkCanvas::kPoints_PointMode,
+ pointData.fNumPoints,
+ pointData.fPoints,
+ newP,
+ forceUseDevice);
+ }
+ break;
+ } else {
+ // The rest of the dashed line must be drawn as rects
+ SkASSERT(!(SkPathEffect::PointData::kCircles_PointFlag &
+ pointData.fFlags));
+
+ SkRect r;
+
+ for (int i = 0; i < pointData.fNumPoints; ++i) {
+ r.set(pointData.fPoints[i].fX - pointData.fSize.fX,
+ pointData.fPoints[i].fY - pointData.fSize.fY,
+ pointData.fPoints[i].fX + pointData.fSize.fX,
+ pointData.fPoints[i].fY + pointData.fSize.fY);
+ if (fDevice) {
+ fDevice->drawRect(*this, r, newP);
+ } else {
+ this->drawRect(r, newP);
+ }
+ }
+ }
+
+ break;
+ }
+ }
+ // couldn't take fast path so fall through!
+ case SkCanvas::kPolygon_PointMode: {
+ count -= 1;
+ SkPath path;
+ SkPaint p(paint);
+ p.setStyle(SkPaint::kStroke_Style);
+ size_t inc = (SkCanvas::kLines_PointMode == mode) ? 2 : 1;
+ path.setIsVolatile(true);
+ for (size_t i = 0; i < count; i += inc) {
+ path.moveTo(pts[i]);
+ path.lineTo(pts[i+1]);
+ if (fDevice) {
+ fDevice->drawPath(*this, path, p, nullptr, true);
+ } else {
+ this->drawPath(path, p, nullptr, true);
+ }
+ path.rewind();
+ }
+ break;
+ }
+ }
+ }
+}
+
+static inline SkPoint compute_stroke_size(const SkPaint& paint, const SkMatrix& matrix) {
+ SkASSERT(matrix.rectStaysRect());
+ SkASSERT(SkPaint::kFill_Style != paint.getStyle());
+
+ SkVector size;
+ SkPoint pt = { paint.getStrokeWidth(), paint.getStrokeWidth() };
+ matrix.mapVectors(&size, &pt, 1);
+ return SkPoint::Make(SkScalarAbs(size.fX), SkScalarAbs(size.fY));
+}
+
+static bool easy_rect_join(const SkPaint& paint, const SkMatrix& matrix,
+ SkPoint* strokeSize) {
+ if (SkPaint::kMiter_Join != paint.getStrokeJoin() ||
+ paint.getStrokeMiter() < SK_ScalarSqrt2) {
+ return false;
+ }
+
+ *strokeSize = compute_stroke_size(paint, matrix);
+ return true;
+}
+
+SkDraw::RectType SkDraw::ComputeRectType(const SkPaint& paint,
+ const SkMatrix& matrix,
+ SkPoint* strokeSize) {
+ RectType rtype;
+ const SkScalar width = paint.getStrokeWidth();
+ const bool zeroWidth = (0 == width);
+ SkPaint::Style style = paint.getStyle();
+
+ if ((SkPaint::kStrokeAndFill_Style == style) && zeroWidth) {
+ style = SkPaint::kFill_Style;
+ }
+
+ if (paint.getPathEffect() || paint.getMaskFilter() ||
+ paint.getRasterizer() || !matrix.rectStaysRect() ||
+ SkPaint::kStrokeAndFill_Style == style) {
+ rtype = kPath_RectType;
+ } else if (SkPaint::kFill_Style == style) {
+ rtype = kFill_RectType;
+ } else if (zeroWidth) {
+ rtype = kHair_RectType;
+ } else if (easy_rect_join(paint, matrix, strokeSize)) {
+ rtype = kStroke_RectType;
+ } else {
+ rtype = kPath_RectType;
+ }
+ return rtype;
+}
+
+static const SkPoint* rect_points(const SkRect& r) {
+ return SkTCast<const SkPoint*>(&r);
+}
+
+static SkPoint* rect_points(SkRect& r) {
+ return SkTCast<SkPoint*>(&r);
+}
+
+void SkDraw::drawRect(const SkRect& prePaintRect, const SkPaint& paint,
+ const SkMatrix* paintMatrix, const SkRect* postPaintRect) const {
+ SkDEBUGCODE(this->validate();)
+
+ // nothing to draw
+ if (fRC->isEmpty()) {
+ return;
+ }
+
+ const SkMatrix* matrix;
+ SkMatrix combinedMatrixStorage;
+ if (paintMatrix) {
+ SkASSERT(postPaintRect);
+ combinedMatrixStorage.setConcat(*fMatrix, *paintMatrix);
+ matrix = &combinedMatrixStorage;
+ } else {
+ SkASSERT(!postPaintRect);
+ matrix = fMatrix;
+ }
+
+ SkPoint strokeSize;
+ RectType rtype = ComputeRectType(paint, *fMatrix, &strokeSize);
+
+ if (kPath_RectType == rtype) {
+ SkDraw draw(*this);
+ if (paintMatrix) {
+ draw.fMatrix = matrix;
+ }
+ SkPath tmp;
+ tmp.addRect(prePaintRect);
+ tmp.setFillType(SkPath::kWinding_FillType);
+ draw.drawPath(tmp, paint, nullptr, true);
+ return;
+ }
+
+ SkRect devRect;
+ const SkRect& paintRect = paintMatrix ? *postPaintRect : prePaintRect;
+ // skip the paintMatrix when transforming the rect by the CTM
+ fMatrix->mapPoints(rect_points(devRect), rect_points(paintRect), 2);
+ devRect.sort();
+
+ // look for the quick exit, before we build a blitter
+ SkRect bbox = devRect;
+ if (paint.getStyle() != SkPaint::kFill_Style) {
+ // extra space for hairlines
+ if (paint.getStrokeWidth() == 0) {
+ bbox.outset(1, 1);
+ } else {
+ // For kStroke_RectType, strokeSize is already computed.
+ const SkPoint& ssize = (kStroke_RectType == rtype)
+ ? strokeSize
+ : compute_stroke_size(paint, *fMatrix);
+ bbox.outset(SkScalarHalf(ssize.x()), SkScalarHalf(ssize.y()));
+ }
+ }
+
+ SkIRect ir = bbox.roundOut();
+ if (fRC->quickReject(ir)) {
+ return;
+ }
+
+ SkDeviceLooper looper(fDst, *fRC, ir, paint.isAntiAlias());
+ while (looper.next()) {
+ SkRect localDevRect;
+ looper.mapRect(&localDevRect, devRect);
+ SkMatrix localMatrix;
+ looper.mapMatrix(&localMatrix, *matrix);
+
+ SkAutoBlitterChoose blitterStorage(looper.getPixmap(), localMatrix, paint);
+ const SkRasterClip& clip = looper.getRC();
+ SkBlitter* blitter = blitterStorage.get();
+
+ // we want to "fill" if we are kFill or kStrokeAndFill, since in the latter
+ // case we are also hairline (if we've gotten to here), which devolves to
+ // effectively just kFill
+ switch (rtype) {
+ case kFill_RectType:
+ if (paint.isAntiAlias()) {
+ SkScan::AntiFillRect(localDevRect, clip, blitter);
+ } else {
+ SkScan::FillRect(localDevRect, clip, blitter);
+ }
+ break;
+ case kStroke_RectType:
+ if (paint.isAntiAlias()) {
+ SkScan::AntiFrameRect(localDevRect, strokeSize, clip, blitter);
+ } else {
+ SkScan::FrameRect(localDevRect, strokeSize, clip, blitter);
+ }
+ break;
+ case kHair_RectType:
+ if (paint.isAntiAlias()) {
+ SkScan::AntiHairRect(localDevRect, clip, blitter);
+ } else {
+ SkScan::HairRect(localDevRect, clip, blitter);
+ }
+ break;
+ default:
+ SkDEBUGFAIL("bad rtype");
+ }
+ }
+}
+
+void SkDraw::drawDevMask(const SkMask& srcM, const SkPaint& paint) const {
+ if (srcM.fBounds.isEmpty()) {
+ return;
+ }
+
+ const SkMask* mask = &srcM;
+
+ SkMask dstM;
+ if (paint.getMaskFilter() &&
+ paint.getMaskFilter()->filterMask(&dstM, srcM, *fMatrix, nullptr)) {
+ mask = &dstM;
+ }
+ SkAutoMaskFreeImage ami(dstM.fImage);
+
+ SkAutoBlitterChoose blitterChooser(fDst, *fMatrix, paint);
+ SkBlitter* blitter = blitterChooser.get();
+
+ SkAAClipBlitterWrapper wrapper;
+ const SkRegion* clipRgn;
+
+ if (fRC->isBW()) {
+ clipRgn = &fRC->bwRgn();
+ } else {
+ wrapper.init(*fRC, blitter);
+ clipRgn = &wrapper.getRgn();
+ blitter = wrapper.getBlitter();
+ }
+ blitter->blitMaskRegion(*mask, *clipRgn);
+}
+
+static SkScalar fast_len(const SkVector& vec) {
+ SkScalar x = SkScalarAbs(vec.fX);
+ SkScalar y = SkScalarAbs(vec.fY);
+ if (x < y) {
+ SkTSwap(x, y);
+ }
+ return x + SkScalarHalf(y);
+}
+
+bool SkDrawTreatAAStrokeAsHairline(SkScalar strokeWidth, const SkMatrix& matrix,
+ SkScalar* coverage) {
+ SkASSERT(strokeWidth > 0);
+ // We need to try to fake a thick-stroke with a modulated hairline.
+
+ if (matrix.hasPerspective()) {
+ return false;
+ }
+
+ SkVector src[2], dst[2];
+ src[0].set(strokeWidth, 0);
+ src[1].set(0, strokeWidth);
+ matrix.mapVectors(dst, src, 2);
+ SkScalar len0 = fast_len(dst[0]);
+ SkScalar len1 = fast_len(dst[1]);
+ if (len0 <= SK_Scalar1 && len1 <= SK_Scalar1) {
+ if (coverage) {
+ *coverage = SkScalarAve(len0, len1);
+ }
+ return true;
+ }
+ return false;
+}
+
+void SkDraw::drawRRect(const SkRRect& rrect, const SkPaint& paint) const {
+ SkDEBUGCODE(this->validate());
+
+ if (fRC->isEmpty()) {
+ return;
+ }
+
+ {
+ // TODO: Investigate optimizing these options. They are in the same
+ // order as SkDraw::drawPath, which handles each case. It may be
+ // that there is no way to optimize for these using the SkRRect path.
+ SkScalar coverage;
+ if (SkDrawTreatAsHairline(paint, *fMatrix, &coverage)) {
+ goto DRAW_PATH;
+ }
+
+ if (paint.getPathEffect() || paint.getStyle() != SkPaint::kFill_Style) {
+ goto DRAW_PATH;
+ }
+
+ if (paint.getRasterizer()) {
+ goto DRAW_PATH;
+ }
+ }
+
+ if (paint.getMaskFilter()) {
+ // Transform the rrect into device space.
+ SkRRect devRRect;
+ if (rrect.transform(*fMatrix, &devRRect)) {
+ SkAutoBlitterChoose blitter(fDst, *fMatrix, paint);
+ if (paint.getMaskFilter()->filterRRect(devRRect, *fMatrix, *fRC, blitter.get())) {
+ return; // filterRRect() called the blitter, so we're done
+ }
+ }
+ }
+
+DRAW_PATH:
+ // Now fall back to the default case of using a path.
+ SkPath path;
+ path.addRRect(rrect);
+ this->drawPath(path, paint, nullptr, true);
+}
+
+SkScalar SkDraw::ComputeResScaleForStroking(const SkMatrix& matrix) {
+ if (!matrix.hasPerspective()) {
+ SkScalar sx = SkPoint::Length(matrix[SkMatrix::kMScaleX], matrix[SkMatrix::kMSkewY]);
+ SkScalar sy = SkPoint::Length(matrix[SkMatrix::kMSkewX], matrix[SkMatrix::kMScaleY]);
+ if (SkScalarsAreFinite(sx, sy)) {
+ SkScalar scale = SkTMax(sx, sy);
+ if (scale > 0) {
+ return scale;
+ }
+ }
+ }
+ return 1;
+}
+
+void SkDraw::drawDevPath(const SkPath& devPath, const SkPaint& paint, bool drawCoverage,
+ SkBlitter* customBlitter, bool doFill) const {
+ // Do a conservative quick-reject test, since a looper or other modifier may have moved us
+ // out of range.
+ if (!devPath.isInverseFillType()) {
+ // If we're a H or V line, our bounds will be empty. So we bloat here just so we don't
+ // appear empty to the intersects call. This also gives us slop in case we're antialiasing
+ SkRect pathBounds = devPath.getBounds().makeOutset(1, 1);
+
+ if (paint.getMaskFilter()) {
+ paint.getMaskFilter()->computeFastBounds(pathBounds, &pathBounds);
+
+ // Need to outset the path to work-around a bug in blurmaskfilter. When that is fixed
+ // we can remove this hack. See skbug.com/5542
+ pathBounds.outset(7, 7);
+ }
+
+ // Now compare against the clip's bounds
+ if (!SkRect::Make(fRC->getBounds()).intersects(pathBounds)) {
+ return;
+ }
+ }
+
+ SkBlitter* blitter = nullptr;
+ SkAutoBlitterChoose blitterStorage;
+ if (nullptr == customBlitter) {
+ blitterStorage.choose(fDst, *fMatrix, paint, drawCoverage);
+ blitter = blitterStorage.get();
+ } else {
+ blitter = customBlitter;
+ }
+
+ if (paint.getMaskFilter()) {
+ SkStrokeRec::InitStyle style = doFill ? SkStrokeRec::kFill_InitStyle
+ : SkStrokeRec::kHairline_InitStyle;
+ if (paint.getMaskFilter()->filterPath(devPath, *fMatrix, *fRC, blitter, style)) {
+ return; // filterPath() called the blitter, so we're done
+ }
+ }
+
+ void (*proc)(const SkPath&, const SkRasterClip&, SkBlitter*);
+ if (doFill) {
+ if (paint.isAntiAlias()) {
+ proc = SkScan::AntiFillPath;
+ } else {
+ proc = SkScan::FillPath;
+ }
+ } else { // hairline
+ if (paint.isAntiAlias()) {
+ switch (paint.getStrokeCap()) {
+ case SkPaint::kButt_Cap:
+ proc = SkScan::AntiHairPath;
+ break;
+ case SkPaint::kSquare_Cap:
+ proc = SkScan::AntiHairSquarePath;
+ break;
+ case SkPaint::kRound_Cap:
+ proc = SkScan::AntiHairRoundPath;
+ break;
+ default:
+ proc SK_INIT_TO_AVOID_WARNING;
+ SkDEBUGFAIL("unknown paint cap type");
+ }
+ } else {
+ switch (paint.getStrokeCap()) {
+ case SkPaint::kButt_Cap:
+ proc = SkScan::HairPath;
+ break;
+ case SkPaint::kSquare_Cap:
+ proc = SkScan::HairSquarePath;
+ break;
+ case SkPaint::kRound_Cap:
+ proc = SkScan::HairRoundPath;
+ break;
+ default:
+ proc SK_INIT_TO_AVOID_WARNING;
+ SkDEBUGFAIL("unknown paint cap type");
+ }
+ }
+ }
+ proc(devPath, *fRC, blitter);
+}
+
+void SkDraw::drawPath(const SkPath& origSrcPath, const SkPaint& origPaint,
+ const SkMatrix* prePathMatrix, bool pathIsMutable,
+ bool drawCoverage, SkBlitter* customBlitter) const {
+ SkDEBUGCODE(this->validate();)
+
+ // nothing to draw
+ if (fRC->isEmpty()) {
+ return;
+ }
+
+ SkPath* pathPtr = (SkPath*)&origSrcPath;
+ bool doFill = true;
+ SkPath tmpPath;
+ SkMatrix tmpMatrix;
+ const SkMatrix* matrix = fMatrix;
+ tmpPath.setIsVolatile(true);
+
+ if (prePathMatrix) {
+ if (origPaint.getPathEffect() || origPaint.getStyle() != SkPaint::kFill_Style ||
+ origPaint.getRasterizer()) {
+ SkPath* result = pathPtr;
+
+ if (!pathIsMutable) {
+ result = &tmpPath;
+ pathIsMutable = true;
+ }
+ pathPtr->transform(*prePathMatrix, result);
+ pathPtr = result;
+ } else {
+ tmpMatrix.setConcat(*matrix, *prePathMatrix);
+ matrix = &tmpMatrix;
+ }
+ }
+ // at this point we're done with prePathMatrix
+ SkDEBUGCODE(prePathMatrix = (const SkMatrix*)0x50FF8001;)
+
+ SkTCopyOnFirstWrite<SkPaint> paint(origPaint);
+
+ {
+ SkScalar coverage;
+ if (SkDrawTreatAsHairline(origPaint, *matrix, &coverage)) {
+ if (SK_Scalar1 == coverage) {
+ paint.writable()->setStrokeWidth(0);
+ } else if (SkBlendMode_SupportsCoverageAsAlpha(origPaint.getBlendMode())) {
+ U8CPU newAlpha;
+#if 0
+ newAlpha = SkToU8(SkScalarRoundToInt(coverage *
+ origPaint.getAlpha()));
+#else
+ // this is the old technique, which we preserve for now so
+ // we don't change previous results (testing)
+ // the new way seems fine, its just (a tiny bit) different
+ int scale = (int)SkScalarMul(coverage, 256);
+ newAlpha = origPaint.getAlpha() * scale >> 8;
+#endif
+ SkPaint* writablePaint = paint.writable();
+ writablePaint->setStrokeWidth(0);
+ writablePaint->setAlpha(newAlpha);
+ }
+ }
+ }
+
+ if (paint->getPathEffect() || paint->getStyle() != SkPaint::kFill_Style) {
+ SkRect cullRect;
+ const SkRect* cullRectPtr = nullptr;
+ if (this->computeConservativeLocalClipBounds(&cullRect)) {
+ cullRectPtr = &cullRect;
+ }
+ doFill = paint->getFillPath(*pathPtr, &tmpPath, cullRectPtr,
+ ComputeResScaleForStroking(*fMatrix));
+ pathPtr = &tmpPath;
+ }
+
+ if (paint->getRasterizer()) {
+ SkMask mask;
+ if (paint->getRasterizer()->rasterize(*pathPtr, *matrix,
+ &fRC->getBounds(), paint->getMaskFilter(), &mask,
+ SkMask::kComputeBoundsAndRenderImage_CreateMode)) {
+ this->drawDevMask(mask, *paint);
+ SkMask::FreeImage(mask.fImage);
+ }
+ return;
+ }
+
+ // avoid possibly allocating a new path in transform if we can
+ SkPath* devPathPtr = pathIsMutable ? pathPtr : &tmpPath;
+
+ // transform the path into device space
+ pathPtr->transform(*matrix, devPathPtr);
+
+ this->drawDevPath(*devPathPtr, *paint, drawCoverage, customBlitter, doFill);
+}
+
+void SkDraw::drawBitmapAsMask(const SkBitmap& bitmap, const SkPaint& paint) const {
+ SkASSERT(bitmap.colorType() == kAlpha_8_SkColorType);
+
+ if (SkTreatAsSprite(*fMatrix, bitmap.dimensions(), paint)) {
+ int ix = SkScalarRoundToInt(fMatrix->getTranslateX());
+ int iy = SkScalarRoundToInt(fMatrix->getTranslateY());
+
+ SkAutoPixmapUnlock result;
+ if (!bitmap.requestLock(&result)) {
+ return;
+ }
+ const SkPixmap& pmap = result.pixmap();
+ SkMask mask;
+ mask.fBounds.set(ix, iy, ix + pmap.width(), iy + pmap.height());
+ mask.fFormat = SkMask::kA8_Format;
+ mask.fRowBytes = SkToU32(pmap.rowBytes());
+ // fImage is typed as writable, but in this case it is used read-only
+ mask.fImage = (uint8_t*)pmap.addr8(0, 0);
+
+ this->drawDevMask(mask, paint);
+ } else { // need to xform the bitmap first
+ SkRect r;
+ SkMask mask;
+
+ r.set(0, 0,
+ SkIntToScalar(bitmap.width()), SkIntToScalar(bitmap.height()));
+ fMatrix->mapRect(&r);
+ r.round(&mask.fBounds);
+
+ // set the mask's bounds to the transformed bitmap-bounds,
+ // clipped to the actual device
+ {
+ SkIRect devBounds;
+ devBounds.set(0, 0, fDst.width(), fDst.height());
+ // need intersect(l, t, r, b) on irect
+ if (!mask.fBounds.intersect(devBounds)) {
+ return;
+ }
+ }
+
+ mask.fFormat = SkMask::kA8_Format;
+ mask.fRowBytes = SkAlign4(mask.fBounds.width());
+ size_t size = mask.computeImageSize();
+ if (0 == size) {
+ // the mask is too big to allocated, draw nothing
+ return;
+ }
+
+ // allocate (and clear) our temp buffer to hold the transformed bitmap
+ SkAutoTMalloc<uint8_t> storage(size);
+ mask.fImage = storage.get();
+ memset(mask.fImage, 0, size);
+
+ // now draw our bitmap(src) into mask(dst), transformed by the matrix
+ {
+ SkBitmap device;
+ device.installPixels(SkImageInfo::MakeA8(mask.fBounds.width(), mask.fBounds.height()),
+ mask.fImage, mask.fRowBytes);
+
+ SkCanvas c(device);
+ // need the unclipped top/left for the translate
+ c.translate(-SkIntToScalar(mask.fBounds.fLeft),
+ -SkIntToScalar(mask.fBounds.fTop));
+ c.concat(*fMatrix);
+
+ // We can't call drawBitmap, or we'll infinitely recurse. Instead
+ // we manually build a shader and draw that into our new mask
+ SkPaint tmpPaint;
+ tmpPaint.setFlags(paint.getFlags());
+ tmpPaint.setFilterQuality(paint.getFilterQuality());
+ SkAutoBitmapShaderInstall install(bitmap, tmpPaint);
+ SkRect rr;
+ rr.set(0, 0, SkIntToScalar(bitmap.width()),
+ SkIntToScalar(bitmap.height()));
+ c.drawRect(rr, install.paintWithShader());
+ }
+ this->drawDevMask(mask, paint);
+ }
+}
+
+static bool clipped_out(const SkMatrix& m, const SkRasterClip& c,
+ const SkRect& srcR) {
+ SkRect dstR;
+ m.mapRect(&dstR, srcR);
+ return c.quickReject(dstR.roundOut());
+}
+
+static bool clipped_out(const SkMatrix& matrix, const SkRasterClip& clip,
+ int width, int height) {
+ SkRect r;
+ r.set(0, 0, SkIntToScalar(width), SkIntToScalar(height));
+ return clipped_out(matrix, clip, r);
+}
+
+static bool clipHandlesSprite(const SkRasterClip& clip, int x, int y, const SkPixmap& pmap) {
+ return clip.isBW() || clip.quickContains(x, y, x + pmap.width(), y + pmap.height());
+}
+
+void SkDraw::drawBitmap(const SkBitmap& bitmap, const SkMatrix& prematrix,
+ const SkRect* dstBounds, const SkPaint& origPaint) const {
+ SkDEBUGCODE(this->validate();)
+
+ // nothing to draw
+ if (fRC->isEmpty() ||
+ bitmap.width() == 0 || bitmap.height() == 0 ||
+ bitmap.colorType() == kUnknown_SkColorType) {
+ return;
+ }
+
+ SkTCopyOnFirstWrite<SkPaint> paint(origPaint);
+ if (origPaint.getStyle() != SkPaint::kFill_Style) {
+ paint.writable()->setStyle(SkPaint::kFill_Style);
+ }
+
+ SkMatrix matrix;
+ matrix.setConcat(*fMatrix, prematrix);
+
+ if (clipped_out(matrix, *fRC, bitmap.width(), bitmap.height())) {
+ return;
+ }
+
+ if (bitmap.colorType() != kAlpha_8_SkColorType
+ && SkTreatAsSprite(matrix, bitmap.dimensions(), *paint)) {
+ //
+ // It is safe to call lock pixels now, since we know the matrix is
+ // (more or less) identity.
+ //
+ SkAutoPixmapUnlock unlocker;
+ if (!bitmap.requestLock(&unlocker)) {
+ return;
+ }
+ const SkPixmap& pmap = unlocker.pixmap();
+ int ix = SkScalarRoundToInt(matrix.getTranslateX());
+ int iy = SkScalarRoundToInt(matrix.getTranslateY());
+ if (clipHandlesSprite(*fRC, ix, iy, pmap)) {
+ SkTBlitterAllocator allocator;
+ // blitter will be owned by the allocator.
+ SkBlitter* blitter = SkBlitter::ChooseSprite(fDst, *paint, pmap, ix, iy, &allocator);
+ if (blitter) {
+ SkScan::FillIRect(SkIRect::MakeXYWH(ix, iy, pmap.width(), pmap.height()),
+ *fRC, blitter);
+ return;
+ }
+ // if !blitter, then we fall-through to the slower case
+ }
+ }
+
+ // now make a temp draw on the stack, and use it
+ //
+ SkDraw draw(*this);
+ draw.fMatrix = &matrix;
+
+ if (bitmap.colorType() == kAlpha_8_SkColorType) {
+ draw.drawBitmapAsMask(bitmap, *paint);
+ } else {
+ SkAutoBitmapShaderInstall install(bitmap, *paint);
+ const SkPaint& paintWithShader = install.paintWithShader();
+ const SkRect srcBounds = SkRect::MakeIWH(bitmap.width(), bitmap.height());
+ if (dstBounds) {
+ this->drawRect(srcBounds, paintWithShader, &prematrix, dstBounds);
+ } else {
+ draw.drawRect(srcBounds, paintWithShader);
+ }
+ }
+}
+
+void SkDraw::drawSprite(const SkBitmap& bitmap, int x, int y, const SkPaint& origPaint) const {
+ SkDEBUGCODE(this->validate();)
+
+ // nothing to draw
+ if (fRC->isEmpty() ||
+ bitmap.width() == 0 || bitmap.height() == 0 ||
+ bitmap.colorType() == kUnknown_SkColorType) {
+ return;
+ }
+
+ const SkIRect bounds = SkIRect::MakeXYWH(x, y, bitmap.width(), bitmap.height());
+
+ if (fRC->quickReject(bounds)) {
+ return; // nothing to draw
+ }
+
+ SkPaint paint(origPaint);
+ paint.setStyle(SkPaint::kFill_Style);
+
+ SkAutoPixmapUnlock unlocker;
+ if (!bitmap.requestLock(&unlocker)) {
+ return;
+ }
+ const SkPixmap& pmap = unlocker.pixmap();
+
+ if (nullptr == paint.getColorFilter() && clipHandlesSprite(*fRC, x, y, pmap)) {
+ SkTBlitterAllocator allocator;
+ // blitter will be owned by the allocator.
+ SkBlitter* blitter = SkBlitter::ChooseSprite(fDst, paint, pmap, x, y, &allocator);
+ if (blitter) {
+ SkScan::FillIRect(bounds, *fRC, blitter);
+ return;
+ }
+ }
+
+ SkMatrix matrix;
+ SkRect r;
+
+ // get a scalar version of our rect
+ r.set(bounds);
+
+ // create shader with offset
+ matrix.setTranslate(r.fLeft, r.fTop);
+ SkAutoBitmapShaderInstall install(bitmap, paint, &matrix);
+ const SkPaint& shaderPaint = install.paintWithShader();
+
+ SkDraw draw(*this);
+ matrix.reset();
+ draw.fMatrix = &matrix;
+ // call ourself with a rect
+ // is this OK if paint has a rasterizer?
+ draw.drawRect(r, shaderPaint);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkScalerContext.h"
+#include "SkGlyphCache.h"
+#include "SkTextToPathIter.h"
+#include "SkUtils.h"
+
+bool SkDraw::ShouldDrawTextAsPaths(const SkPaint& paint, const SkMatrix& ctm) {
+ // hairline glyphs are fast enough so we don't need to cache them
+ if (SkPaint::kStroke_Style == paint.getStyle() && 0 == paint.getStrokeWidth()) {
+ return true;
+ }
+
+ // we don't cache perspective
+ if (ctm.hasPerspective()) {
+ return true;
+ }
+
+ // Glyphs like Emojis can't be rendered as a path.
+ if (paint.getTypeface() && paint.getTypeface()->hasColorGlyphs()) {
+ return false;
+ }
+
+ SkMatrix textM;
+ return SkPaint::TooBigToUseCache(ctm, *paint.setTextMatrix(&textM));
+}
+
+void SkDraw::drawText_asPaths(const char text[], size_t byteLength,
+ SkScalar x, SkScalar y,
+ const SkPaint& paint) const {
+ SkDEBUGCODE(this->validate();)
+
+ SkTextToPathIter iter(text, byteLength, paint, true);
+
+ SkMatrix matrix;
+ matrix.setScale(iter.getPathScale(), iter.getPathScale());
+ matrix.postTranslate(x, y);
+
+ const SkPath* iterPath;
+ SkScalar xpos, prevXPos = 0;
+
+ while (iter.next(&iterPath, &xpos)) {
+ matrix.postTranslate(xpos - prevXPos, 0);
+ if (iterPath) {
+ const SkPaint& pnt = iter.getPaint();
+ if (fDevice) {
+ fDevice->drawPath(*this, *iterPath, pnt, &matrix, false);
+ } else {
+ this->drawPath(*iterPath, pnt, &matrix, false);
+ }
+ }
+ prevXPos = xpos;
+ }
+}
+
+// disable warning : local variable used without having been initialized
+#if defined _WIN32
+#pragma warning ( push )
+#pragma warning ( disable : 4701 )
+#endif
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+class DrawOneGlyph {
+public:
+ DrawOneGlyph(const SkDraw& draw, const SkPaint& paint, SkGlyphCache* cache, SkBlitter* blitter)
+ : fUseRegionToDraw(UsingRegionToDraw(draw.fRC))
+ , fGlyphCache(cache)
+ , fBlitter(blitter)
+ , fClip(fUseRegionToDraw ? &draw.fRC->bwRgn() : nullptr)
+ , fDraw(draw)
+ , fPaint(paint)
+ , fClipBounds(PickClipBounds(draw)) { }
+
+ void operator()(const SkGlyph& glyph, SkPoint position, SkPoint rounding) {
+ position += rounding;
+ // Prevent glyphs from being drawn outside of or straddling the edge of device space.
+ // Comparisons written a little weirdly so that NaN coordinates are treated safely.
+ auto gt = [](float a, int b) { return !(a <= (float)b); };
+ auto lt = [](float a, int b) { return !(a >= (float)b); };
+ if (gt(position.fX, INT_MAX - (INT16_MAX + UINT16_MAX)) ||
+ lt(position.fX, INT_MIN - (INT16_MIN + 0 /*UINT16_MIN*/)) ||
+ gt(position.fY, INT_MAX - (INT16_MAX + UINT16_MAX)) ||
+ lt(position.fY, INT_MIN - (INT16_MIN + 0 /*UINT16_MIN*/))) {
+ return;
+ }
+
+ int left = SkScalarFloorToInt(position.fX);
+ int top = SkScalarFloorToInt(position.fY);
+ SkASSERT(glyph.fWidth > 0 && glyph.fHeight > 0);
+
+ left += glyph.fLeft;
+ top += glyph.fTop;
+
+ int right = left + glyph.fWidth;
+ int bottom = top + glyph.fHeight;
+
+ SkMask mask;
+ mask.fBounds.set(left, top, right, bottom);
+ SkASSERT(!mask.fBounds.isEmpty());
+
+ if (fUseRegionToDraw) {
+ SkRegion::Cliperator clipper(*fClip, mask.fBounds);
+
+ if (!clipper.done() && this->getImageData(glyph, &mask)) {
+ const SkIRect& cr = clipper.rect();
+ do {
+ this->blitMask(mask, cr);
+ clipper.next();
+ } while (!clipper.done());
+ }
+ } else {
+ SkIRect storage;
+ SkIRect* bounds = &mask.fBounds;
+
+ // this extra test is worth it, assuming that most of the time it succeeds
+ // since we can avoid writing to storage
+ if (!fClipBounds.containsNoEmptyCheck(mask.fBounds)) {
+ if (!storage.intersectNoEmptyCheck(mask.fBounds, fClipBounds))
+ return;
+ bounds = &storage;
+ }
+
+ if (this->getImageData(glyph, &mask)) {
+ this->blitMask(mask, *bounds);
+ }
+ }
+ }
+
+private:
+ static bool UsingRegionToDraw(const SkRasterClip* rClip) {
+ return rClip->isBW() && !rClip->isRect();
+ }
+
+ static SkIRect PickClipBounds(const SkDraw& draw) {
+ const SkRasterClip& rasterClip = *draw.fRC;
+
+ if (rasterClip.isBW()) {
+ return rasterClip.bwRgn().getBounds();
+ } else {
+ return rasterClip.aaRgn().getBounds();
+ }
+ }
+
+ bool getImageData(const SkGlyph& glyph, SkMask* mask) {
+ uint8_t* bits = (uint8_t*)(fGlyphCache->findImage(glyph));
+ if (nullptr == bits) {
+ return false; // can't rasterize glyph
+ }
+ mask->fImage = bits;
+ mask->fRowBytes = glyph.rowBytes();
+ mask->fFormat = static_cast<SkMask::Format>(glyph.fMaskFormat);
+ return true;
+ }
+
+ void blitMask(const SkMask& mask, const SkIRect& clip) const {
+ if (SkMask::kARGB32_Format == mask.fFormat) {
+ SkBitmap bm;
+ bm.installPixels(
+ SkImageInfo::MakeN32Premul(mask.fBounds.width(), mask.fBounds.height()),
+ (SkPMColor*)mask.fImage, mask.fRowBytes);
+
+ fDraw.drawSprite(bm, mask.fBounds.x(), mask.fBounds.y(), fPaint);
+ } else {
+ fBlitter->blitMask(mask, clip);
+ }
+ }
+
+ const bool fUseRegionToDraw;
+ SkGlyphCache * const fGlyphCache;
+ SkBlitter * const fBlitter;
+ const SkRegion* const fClip;
+ const SkDraw& fDraw;
+ const SkPaint& fPaint;
+ const SkIRect fClipBounds;
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+uint32_t SkDraw::scalerContextFlags() const {
+ uint32_t flags = SkPaint::kBoostContrast_ScalerContextFlag;
+ if (!SkImageInfoIsGammaCorrect(fDevice->imageInfo())) {
+ flags |= SkPaint::kFakeGamma_ScalerContextFlag;
+ }
+ return flags;
+}
+
+void SkDraw::drawText(const char text[], size_t byteLength,
+ SkScalar x, SkScalar y, const SkPaint& paint) const {
+ SkASSERT(byteLength == 0 || text != nullptr);
+
+ SkDEBUGCODE(this->validate();)
+
+ // nothing to draw
+ if (text == nullptr || byteLength == 0 || fRC->isEmpty()) {
+ return;
+ }
+
+ // SkScalarRec doesn't currently have a way of representing hairline stroke and
+ // will fill if its frame-width is 0.
+ if (ShouldDrawTextAsPaths(paint, *fMatrix)) {
+ this->drawText_asPaths(text, byteLength, x, y, paint);
+ return;
+ }
+
+ SkAutoGlyphCache cache(paint, &fDevice->surfaceProps(), this->scalerContextFlags(), fMatrix);
+
+ // The Blitter Choose needs to be live while using the blitter below.
+ SkAutoBlitterChoose blitterChooser(fDst, *fMatrix, paint);
+ SkAAClipBlitterWrapper wrapper(*fRC, blitterChooser.get());
+ DrawOneGlyph drawOneGlyph(*this, paint, cache.get(), wrapper.getBlitter());
+
+ SkFindAndPlaceGlyph::ProcessText(
+ paint.getTextEncoding(), text, byteLength,
+ {x, y}, *fMatrix, paint.getTextAlign(), cache.get(), drawOneGlyph);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void SkDraw::drawPosText_asPaths(const char text[], size_t byteLength,
+ const SkScalar pos[], int scalarsPerPosition,
+ const SkPoint& offset, const SkPaint& origPaint) const {
+ // setup our std paint, in hopes of getting hits in the cache
+ SkPaint paint(origPaint);
+ SkScalar matrixScale = paint.setupForAsPaths();
+
+ SkMatrix matrix;
+ matrix.setScale(matrixScale, matrixScale);
+
+ // Temporarily jam in kFill, so we only ever ask for the raw outline from the cache.
+ paint.setStyle(SkPaint::kFill_Style);
+ paint.setPathEffect(nullptr);
+
+ SkPaint::GlyphCacheProc glyphCacheProc = SkPaint::GetGlyphCacheProc(paint.getTextEncoding(),
+ paint.isDevKernText(),
+ true);
+ SkAutoGlyphCache cache(paint, &fDevice->surfaceProps(), this->scalerContextFlags(), nullptr);
+
+ const char* stop = text + byteLength;
+ SkTextAlignProc alignProc(paint.getTextAlign());
+ SkTextMapStateProc tmsProc(SkMatrix::I(), offset, scalarsPerPosition);
+
+ // Now restore the original settings, so we "draw" with whatever style/stroking.
+ paint.setStyle(origPaint.getStyle());
+ paint.setPathEffect(sk_ref_sp(origPaint.getPathEffect()));
+
+ while (text < stop) {
+ const SkGlyph& glyph = glyphCacheProc(cache.get(), &text);
+ if (glyph.fWidth) {
+ const SkPath* path = cache->findPath(glyph);
+ if (path) {
+ SkPoint tmsLoc;
+ tmsProc(pos, &tmsLoc);
+ SkPoint loc;
+ alignProc(tmsLoc, glyph, &loc);
+
+ matrix[SkMatrix::kMTransX] = loc.fX;
+ matrix[SkMatrix::kMTransY] = loc.fY;
+ if (fDevice) {
+ fDevice->drawPath(*this, *path, paint, &matrix, false);
+ } else {
+ this->drawPath(*path, paint, &matrix, false);
+ }
+ }
+ }
+ pos += scalarsPerPosition;
+ }
+}
+
+void SkDraw::drawPosText(const char text[], size_t byteLength,
+ const SkScalar pos[], int scalarsPerPosition,
+ const SkPoint& offset, const SkPaint& paint) const {
+ SkASSERT(byteLength == 0 || text != nullptr);
+ SkASSERT(1 == scalarsPerPosition || 2 == scalarsPerPosition);
+
+ SkDEBUGCODE(this->validate();)
+
+ // nothing to draw
+ if (text == nullptr || byteLength == 0 || fRC->isEmpty()) {
+ return;
+ }
+
+ if (ShouldDrawTextAsPaths(paint, *fMatrix)) {
+ this->drawPosText_asPaths(text, byteLength, pos, scalarsPerPosition, offset, paint);
+ return;
+ }
+
+ SkAutoGlyphCache cache(paint, &fDevice->surfaceProps(), this->scalerContextFlags(), fMatrix);
+
+ // The Blitter Choose needs to be live while using the blitter below.
+ SkAutoBlitterChoose blitterChooser(fDst, *fMatrix, paint);
+ SkAAClipBlitterWrapper wrapper(*fRC, blitterChooser.get());
+ DrawOneGlyph drawOneGlyph(*this, paint, cache.get(), wrapper.getBlitter());
+ SkPaint::Align textAlignment = paint.getTextAlign();
+
+ SkFindAndPlaceGlyph::ProcessPosText(
+ paint.getTextEncoding(), text, byteLength,
+ offset, *fMatrix, pos, scalarsPerPosition, textAlignment, cache.get(), drawOneGlyph);
+}
+
+#if defined _WIN32
+#pragma warning ( pop )
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+static SkScan::HairRCProc ChooseHairProc(bool doAntiAlias) {
+ return doAntiAlias ? SkScan::AntiHairLine : SkScan::HairLine;
+}
+
+static bool texture_to_matrix(const VertState& state, const SkPoint verts[],
+ const SkPoint texs[], SkMatrix* matrix) {
+ SkPoint src[3], dst[3];
+
+ src[0] = texs[state.f0];
+ src[1] = texs[state.f1];
+ src[2] = texs[state.f2];
+ dst[0] = verts[state.f0];
+ dst[1] = verts[state.f1];
+ dst[2] = verts[state.f2];
+ return matrix->setPolyToPoly(src, dst, 3);
+}
+
+class SkTriColorShader : public SkShader {
+public:
+ SkTriColorShader();
+
+ class TriColorShaderContext : public SkShader::Context {
+ public:
+ TriColorShaderContext(const SkTriColorShader& shader, const ContextRec&);
+ virtual ~TriColorShaderContext();
+ void shadeSpan(int x, int y, SkPMColor dstC[], int count) override;
+
+ private:
+ bool setup(const SkPoint pts[], const SkColor colors[], int, int, int);
+
+ SkMatrix fDstToUnit;
+ SkPMColor fColors[3];
+ bool fSetup;
+
+ typedef SkShader::Context INHERITED;
+ };
+
+ struct TriColorShaderData {
+ const SkPoint* pts;
+ const SkColor* colors;
+ const VertState *state;
+ };
+
+ SK_TO_STRING_OVERRIDE()
+
+ // For serialization. This will never be called.
+ Factory getFactory() const override { sk_throw(); return nullptr; }
+
+ // Supply setup data to context from drawing setup
+ void bindSetupData(TriColorShaderData* setupData) { fSetupData = setupData; }
+
+ // Take the setup data from context when needed.
+ TriColorShaderData* takeSetupData() {
+ TriColorShaderData *data = fSetupData;
+ fSetupData = NULL;
+ return data;
+ }
+
+protected:
+ size_t onContextSize(const ContextRec&) const override;
+ Context* onCreateContext(const ContextRec& rec, void* storage) const override {
+ return new (storage) TriColorShaderContext(*this, rec);
+ }
+
+private:
+ TriColorShaderData *fSetupData;
+
+ typedef SkShader INHERITED;
+};
+
+bool SkTriColorShader::TriColorShaderContext::setup(const SkPoint pts[], const SkColor colors[],
+ int index0, int index1, int index2) {
+
+ fColors[0] = SkPreMultiplyColor(colors[index0]);
+ fColors[1] = SkPreMultiplyColor(colors[index1]);
+ fColors[2] = SkPreMultiplyColor(colors[index2]);
+
+ SkMatrix m, im;
+ m.reset();
+ m.set(0, pts[index1].fX - pts[index0].fX);
+ m.set(1, pts[index2].fX - pts[index0].fX);
+ m.set(2, pts[index0].fX);
+ m.set(3, pts[index1].fY - pts[index0].fY);
+ m.set(4, pts[index2].fY - pts[index0].fY);
+ m.set(5, pts[index0].fY);
+ if (!m.invert(&im)) {
+ return false;
+ }
+ // We can't call getTotalInverse(), because we explicitly don't want to look at the localmatrix
+ // as our interators are intrinsically tied to the vertices, and nothing else.
+ SkMatrix ctmInv;
+ if (!this->getCTM().invert(&ctmInv)) {
+ return false;
+ }
+ // TODO replace INV(m) * INV(ctm) with INV(ctm * m)
+ fDstToUnit.setConcat(im, ctmInv);
+ return true;
+}
+
+#include "SkColorPriv.h"
+#include "SkComposeShader.h"
+
+static int ScalarTo256(SkScalar v) {
+ return static_cast<int>(SkScalarPin(v, 0, 1) * 256 + 0.5);
+}
+
+SkTriColorShader::SkTriColorShader()
+ : INHERITED(NULL)
+ , fSetupData(NULL) {}
+
+SkTriColorShader::TriColorShaderContext::TriColorShaderContext(const SkTriColorShader& shader,
+ const ContextRec& rec)
+ : INHERITED(shader, rec)
+ , fSetup(false) {}
+
+SkTriColorShader::TriColorShaderContext::~TriColorShaderContext() {}
+
+size_t SkTriColorShader::onContextSize(const ContextRec&) const {
+ return sizeof(TriColorShaderContext);
+}
+
+void SkTriColorShader::TriColorShaderContext::shadeSpan(int x, int y, SkPMColor dstC[], int count) {
+ SkTriColorShader* parent = static_cast<SkTriColorShader*>(const_cast<SkShader*>(&fShader));
+ TriColorShaderData* set = parent->takeSetupData();
+ if (set) {
+ fSetup = setup(set->pts, set->colors, set->state->f0, set->state->f1, set->state->f2);
+ }
+
+ if (!fSetup) {
+ // Invalid matrices. Not checked before so no need to assert.
+ return;
+ }
+
+ const int alphaScale = Sk255To256(this->getPaintAlpha());
+
+ SkPoint src;
+
+ for (int i = 0; i < count; i++) {
+ fDstToUnit.mapXY(SkIntToScalar(x), SkIntToScalar(y), &src);
+ x += 1;
+
+ int scale1 = ScalarTo256(src.fX);
+ int scale2 = ScalarTo256(src.fY);
+ int scale0 = 256 - scale1 - scale2;
+ if (scale0 < 0) {
+ if (scale1 > scale2) {
+ scale2 = 256 - scale1;
+ } else {
+ scale1 = 256 - scale2;
+ }
+ scale0 = 0;
+ }
+
+ if (256 != alphaScale) {
+ scale0 = SkAlphaMul(scale0, alphaScale);
+ scale1 = SkAlphaMul(scale1, alphaScale);
+ scale2 = SkAlphaMul(scale2, alphaScale);
+ }
+
+ dstC[i] = SkAlphaMulQ(fColors[0], scale0) +
+ SkAlphaMulQ(fColors[1], scale1) +
+ SkAlphaMulQ(fColors[2], scale2);
+ }
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkTriColorShader::toString(SkString* str) const {
+ str->append("SkTriColorShader: (");
+
+ this->INHERITED::toString(str);
+
+ str->append(")");
+}
+#endif
+
+void SkDraw::drawVertices(SkCanvas::VertexMode vmode, int count,
+ const SkPoint vertices[], const SkPoint textures[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint) const {
+ SkASSERT(0 == count || vertices);
+
+ // abort early if there is nothing to draw
+ if (count < 3 || (indices && indexCount < 3) || fRC->isEmpty()) {
+ return;
+ }
+
+ // transform out vertices into device coordinates
+ SkAutoSTMalloc<16, SkPoint> storage(count);
+ SkPoint* devVerts = storage.get();
+ fMatrix->mapPoints(devVerts, vertices, count);
+
+ /*
+ We can draw the vertices in 1 of 4 ways:
+
+ - solid color (no shader/texture[], no colors[])
+ - just colors (no shader/texture[], has colors[])
+ - just texture (has shader/texture[], no colors[])
+ - colors * texture (has shader/texture[], has colors[])
+
+ Thus for texture drawing, we need both texture[] and a shader.
+ */
+
+ auto triShader = sk_make_sp<SkTriColorShader>();
+ SkPaint p(paint);
+
+ SkShader* shader = p.getShader();
+ if (nullptr == shader) {
+ // if we have no shader, we ignore the texture coordinates
+ textures = nullptr;
+ } else if (nullptr == textures) {
+ // if we don't have texture coordinates, ignore the shader
+ p.setShader(nullptr);
+ shader = nullptr;
+ }
+
+ // setup the custom shader (if needed)
+ if (colors) {
+ if (nullptr == textures) {
+ // just colors (no texture)
+ p.setShader(triShader);
+ } else {
+ // colors * texture
+ SkASSERT(shader);
+ sk_sp<SkXfermode> xfer = xmode ? sk_ref_sp(xmode)
+ : SkXfermode::Make(SkXfermode::kModulate_Mode);
+ p.setShader(SkShader::MakeComposeShader(triShader, sk_ref_sp(shader), std::move(xfer)));
+ }
+ }
+
+ SkAutoBlitterChoose blitter(fDst, *fMatrix, p);
+ // Abort early if we failed to create a shader context.
+ if (blitter->isNullBlitter()) {
+ return;
+ }
+
+ // setup our state and function pointer for iterating triangles
+ VertState state(count, indices, indexCount);
+ VertState::Proc vertProc = state.chooseProc(vmode);
+
+ if (textures || colors) {
+ SkTriColorShader::TriColorShaderData verticesSetup = { vertices, colors, &state };
+
+ while (vertProc(&state)) {
+ if (textures) {
+ SkMatrix tempM;
+ if (texture_to_matrix(state, vertices, textures, &tempM)) {
+ SkShader::ContextRec rec(p, *fMatrix, &tempM,
+ SkBlitter::PreferredShaderDest(fDst.info()));
+ if (!blitter->resetShaderContext(rec)) {
+ continue;
+ }
+ }
+ }
+ if (colors) {
+ triShader->bindSetupData(&verticesSetup);
+ }
+
+ SkPoint tmp[] = {
+ devVerts[state.f0], devVerts[state.f1], devVerts[state.f2]
+ };
+ SkScan::FillTriangle(tmp, *fRC, blitter.get());
+ triShader->bindSetupData(NULL);
+ }
+ } else {
+ // no colors[] and no texture, stroke hairlines with paint's color.
+ SkScan::HairRCProc hairProc = ChooseHairProc(paint.isAntiAlias());
+ const SkRasterClip& clip = *fRC;
+ while (vertProc(&state)) {
+ SkPoint array[] = {
+ devVerts[state.f0], devVerts[state.f1], devVerts[state.f2], devVerts[state.f0]
+ };
+ hairProc(array, 4, clip, blitter.get());
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+
+void SkDraw::validate() const {
+ SkASSERT(fMatrix != nullptr);
+ SkASSERT(fRC != nullptr);
+
+ const SkIRect& cr = fRC->getBounds();
+ SkIRect br;
+
+ br.set(0, 0, fDst.width(), fDst.height());
+ SkASSERT(cr.isEmpty() || br.contains(cr));
+}
+
+#endif
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "SkPath.h"
+#include "SkDraw.h"
+#include "SkRegion.h"
+#include "SkBlitter.h"
+
+static bool compute_bounds(const SkPath& devPath, const SkIRect* clipBounds,
+ const SkMaskFilter* filter, const SkMatrix* filterMatrix,
+ SkIRect* bounds) {
+ if (devPath.isEmpty()) {
+ return false;
+ }
+
+ // init our bounds from the path
+ *bounds = devPath.getBounds().makeOutset(SK_ScalarHalf, SK_ScalarHalf).roundOut();
+
+ SkIPoint margin = SkIPoint::Make(0, 0);
+ if (filter) {
+ SkASSERT(filterMatrix);
+
+ SkMask srcM, dstM;
+
+ srcM.fBounds = *bounds;
+ srcM.fFormat = SkMask::kA8_Format;
+ if (!filter->filterMask(&dstM, srcM, *filterMatrix, &margin)) {
+ return false;
+ }
+ }
+
+ // (possibly) trim the bounds to reflect the clip
+ // (plus whatever slop the filter needs)
+ if (clipBounds) {
+ // Ugh. Guard against gigantic margins from wacky filters. Without this
+ // check we can request arbitrary amounts of slop beyond our visible
+ // clip, and bring down the renderer (at least on finite RAM machines
+ // like handsets, etc.). Need to balance this invented value between
+ // quality of large filters like blurs, and the corresponding memory
+ // requests.
+ static const int MAX_MARGIN = 128;
+ if (!bounds->intersect(clipBounds->makeOutset(SkMin32(margin.fX, MAX_MARGIN),
+ SkMin32(margin.fY, MAX_MARGIN)))) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void draw_into_mask(const SkMask& mask, const SkPath& devPath,
+ SkStrokeRec::InitStyle style) {
+ SkDraw draw;
+ if (!draw.fDst.reset(mask)) {
+ return;
+ }
+
+ SkRasterClip clip;
+ SkMatrix matrix;
+ SkPaint paint;
+
+ clip.setRect(SkIRect::MakeWH(mask.fBounds.width(), mask.fBounds.height()));
+ matrix.setTranslate(-SkIntToScalar(mask.fBounds.fLeft),
+ -SkIntToScalar(mask.fBounds.fTop));
+
+ draw.fRC = &clip;
+ draw.fMatrix = &matrix;
+ paint.setAntiAlias(true);
+ switch (style) {
+ case SkStrokeRec::kHairline_InitStyle:
+ SkASSERT(!paint.getStrokeWidth());
+ paint.setStyle(SkPaint::kStroke_Style);
+ break;
+ case SkStrokeRec::kFill_InitStyle:
+ SkASSERT(paint.getStyle() == SkPaint::kFill_Style);
+ break;
+
+ }
+ draw.drawPath(devPath, paint);
+}
+
+bool SkDraw::DrawToMask(const SkPath& devPath, const SkIRect* clipBounds,
+ const SkMaskFilter* filter, const SkMatrix* filterMatrix,
+ SkMask* mask, SkMask::CreateMode mode,
+ SkStrokeRec::InitStyle style) {
+ if (SkMask::kJustRenderImage_CreateMode != mode) {
+ if (!compute_bounds(devPath, clipBounds, filter, filterMatrix, &mask->fBounds))
+ return false;
+ }
+
+ if (SkMask::kComputeBoundsAndRenderImage_CreateMode == mode) {
+ mask->fFormat = SkMask::kA8_Format;
+ mask->fRowBytes = mask->fBounds.width();
+ size_t size = mask->computeImageSize();
+ if (0 == size) {
+ // we're too big to allocate the mask, abort
+ return false;
+ }
+ mask->fImage = SkMask::AllocImage(size);
+ memset(mask->fImage, 0, mask->computeImageSize());
+ }
+
+ if (SkMask::kJustComputeBounds_CreateMode != mode) {
+ draw_into_mask(*mask, devPath, style);
+ }
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkDrawLooper.cpp b/gfx/skia/skia/src/core/SkDrawLooper.cpp
new file mode 100644
index 000000000..aa53f2e3a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDrawLooper.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkDrawLooper.h"
+#include "SkCanvas.h"
+#include "SkMatrix.h"
+#include "SkPaint.h"
+#include "SkRect.h"
+#include "SkSmallAllocator.h"
+
+bool SkDrawLooper::canComputeFastBounds(const SkPaint& paint) const {
+ SkCanvas canvas;
+ SkSmallAllocator<1, 32> allocator;
+ void* buffer = allocator.reserveT<SkDrawLooper::Context>(this->contextSize());
+
+ SkDrawLooper::Context* context = this->createContext(&canvas, buffer);
+ for (;;) {
+ SkPaint p(paint);
+ if (context->next(&canvas, &p)) {
+ p.setLooper(nullptr);
+ if (!p.canComputeFastBounds()) {
+ return false;
+ }
+ } else {
+ break;
+ }
+ }
+ return true;
+}
+
+void SkDrawLooper::computeFastBounds(const SkPaint& paint, const SkRect& s,
+ SkRect* dst) const {
+ // src and dst rects may alias and we need to keep the original src, so copy it.
+ const SkRect src = s;
+
+ SkCanvas canvas;
+ SkSmallAllocator<1, 32> allocator;
+ void* buffer = allocator.reserveT<SkDrawLooper::Context>(this->contextSize());
+
+ *dst = src; // catch case where there are no loops
+ SkDrawLooper::Context* context = this->createContext(&canvas, buffer);
+ for (bool firstTime = true;; firstTime = false) {
+ SkPaint p(paint);
+ if (context->next(&canvas, &p)) {
+ SkRect r(src);
+
+ p.setLooper(nullptr);
+ p.computeFastBounds(r, &r);
+ canvas.getTotalMatrix().mapRect(&r);
+
+ if (firstTime) {
+ *dst = r;
+ } else {
+ dst->join(r);
+ }
+ } else {
+ break;
+ }
+ }
+}
+
+bool SkDrawLooper::asABlurShadow(BlurShadowRec*) const {
+ return false;
+}
diff --git a/gfx/skia/skia/src/core/SkDrawProcs.h b/gfx/skia/skia/src/core/SkDrawProcs.h
new file mode 100644
index 000000000..15c5cf866
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDrawProcs.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDrawProcs_DEFINED
+#define SkDrawProcs_DEFINED
+
+#include "SkDraw.h"
+#include "SkGlyph.h"
+
+bool SkDrawTreatAAStrokeAsHairline(SkScalar strokeWidth, const SkMatrix&,
+ SkScalar* coverage);
+
+/**
+ * If the current paint is set to stroke and the stroke-width when applied to
+ * the matrix is <= 1.0, then this returns true, and sets coverage (simulating
+ * a stroke by drawing a hairline with partial coverage). If any of these
+ * conditions are false, then this returns false and coverage is ignored.
+ */
+inline bool SkDrawTreatAsHairline(const SkPaint& paint, const SkMatrix& matrix,
+ SkScalar* coverage) {
+ if (SkPaint::kStroke_Style != paint.getStyle()) {
+ return false;
+ }
+
+ SkScalar strokeWidth = paint.getStrokeWidth();
+ if (0 == strokeWidth) {
+ *coverage = SK_Scalar1;
+ return true;
+ }
+
+ if (!paint.isAntiAlias()) {
+ return false;
+ }
+
+ return SkDrawTreatAAStrokeAsHairline(strokeWidth, matrix, coverage);
+}
+
+class SkTextAlignProc {
+public:
+ SkTextAlignProc(SkPaint::Align align)
+ : fAlign(align) {
+ }
+
+ // Returns the glyph position, which may be rounded or not by the caller
+ // e.g. subpixel doesn't round.
+ void operator()(const SkPoint& loc, const SkGlyph& glyph, SkPoint* dst) {
+ if (SkPaint::kLeft_Align == fAlign) {
+ dst->set(loc.fX, loc.fY);
+ } else if (SkPaint::kCenter_Align == fAlign) {
+ dst->set(loc.fX - SkFloatToScalar(glyph.fAdvanceX) / 2,
+ loc.fY - SkFloatToScalar(glyph.fAdvanceY) / 2);
+ } else {
+ SkASSERT(SkPaint::kRight_Align == fAlign);
+ dst->set(loc.fX - SkFloatToScalar(glyph.fAdvanceX),
+ loc.fY - SkFloatToScalar(glyph.fAdvanceY));
+ }
+ }
+private:
+ const SkPaint::Align fAlign;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDrawable.cpp b/gfx/skia/skia/src/core/SkDrawable.cpp
new file mode 100644
index 000000000..bf6d39cfc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDrawable.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAtomics.h"
+#include "SkCanvas.h"
+#include "SkDrawable.h"
+
+static int32_t next_generation_id() {
+ static int32_t gCanvasDrawableGenerationID;
+
+ // do a loop in case our global wraps around, as we never want to
+ // return a 0
+ int32_t genID;
+ do {
+ genID = sk_atomic_inc(&gCanvasDrawableGenerationID) + 1;
+ } while (0 == genID);
+ return genID;
+}
+
+SkDrawable::SkDrawable() : fGenerationID(0) {}
+
+static void draw_bbox(SkCanvas* canvas, const SkRect& r) {
+ SkPaint paint;
+ paint.setStyle(SkPaint::kStroke_Style);
+ paint.setColor(0xFFFF7088);
+ canvas->drawRect(r, paint);
+ canvas->drawLine(r.left(), r.top(), r.right(), r.bottom(), paint);
+ canvas->drawLine(r.left(), r.bottom(), r.right(), r.top(), paint);
+}
+
+void SkDrawable::draw(SkCanvas* canvas, const SkMatrix* matrix) {
+ SkAutoCanvasRestore acr(canvas, true);
+ if (matrix) {
+ canvas->concat(*matrix);
+ }
+ this->onDraw(canvas);
+
+ if (false) {
+ draw_bbox(canvas, this->getBounds());
+ }
+}
+
+void SkDrawable::draw(SkCanvas* canvas, SkScalar x, SkScalar y) {
+ SkMatrix matrix = SkMatrix::MakeTrans(x, y);
+ this->draw(canvas, &matrix);
+}
+
+SkPicture* SkDrawable::newPictureSnapshot() {
+ return this->onNewPictureSnapshot();
+}
+
+uint32_t SkDrawable::getGenerationID() {
+ if (0 == fGenerationID) {
+ fGenerationID = next_generation_id();
+ }
+ return fGenerationID;
+}
+
+SkRect SkDrawable::getBounds() {
+ return this->onGetBounds();
+}
+
+void SkDrawable::notifyDrawingChanged() {
+ fGenerationID = 0;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////
+
+#include "SkPictureRecorder.h"
+
+SkPicture* SkDrawable::onNewPictureSnapshot() {
+ SkPictureRecorder recorder;
+
+ const SkRect bounds = this->getBounds();
+ SkCanvas* canvas = recorder.beginRecording(bounds, nullptr, 0);
+ this->draw(canvas);
+ if (false) {
+ draw_bbox(canvas, bounds);
+ }
+ return recorder.finishRecordingAsPicture().release();
+}
diff --git a/gfx/skia/skia/src/core/SkEdge.cpp b/gfx/skia/skia/src/core/SkEdge.cpp
new file mode 100644
index 000000000..d91c3e6bc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEdge.cpp
@@ -0,0 +1,479 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkEdge.h"
+#include "SkFDot6.h"
+#include "SkMathPriv.h"
+
+/*
+ In setLine, setQuadratic, setCubic, the first thing we do is to convert
+ the points into FDot6. This is modulated by the shift parameter, which
+ will either be 0, or something like 2 for antialiasing.
+
+ In the float case, we want to turn the float into .6 by saying pt * 64,
+ or pt * 256 for antialiasing. This is implemented as 1 << (shift + 6).
+
+ In the fixed case, we want to turn the fixed into .6 by saying pt >> 10,
+ or pt >> 8 for antialiasing. This is implemented as pt >> (10 - shift).
+*/
+
+static inline SkFixed SkFDot6ToFixedDiv2(SkFDot6 value) {
+ // we want to return SkFDot6ToFixed(value >> 1), but we don't want to throw
+ // away data in value, so just perform a modify up-shift
+ return SkLeftShift(value, 16 - 6 - 1);
+}
+
+/////////////////////////////////////////////////////////////////////////
+
+int SkEdge::setLine(const SkPoint& p0, const SkPoint& p1, const SkIRect* clip,
+ int shift) {
+ SkFDot6 x0, y0, x1, y1;
+
+ {
+#ifdef SK_RASTERIZE_EVEN_ROUNDING
+ x0 = SkScalarRoundToFDot6(p0.fX, shift);
+ y0 = SkScalarRoundToFDot6(p0.fY, shift);
+ x1 = SkScalarRoundToFDot6(p1.fX, shift);
+ y1 = SkScalarRoundToFDot6(p1.fY, shift);
+#else
+ float scale = float(1 << (shift + 6));
+ x0 = int(p0.fX * scale);
+ y0 = int(p0.fY * scale);
+ x1 = int(p1.fX * scale);
+ y1 = int(p1.fY * scale);
+#endif
+ }
+
+ int winding = 1;
+
+ if (y0 > y1) {
+ SkTSwap(x0, x1);
+ SkTSwap(y0, y1);
+ winding = -1;
+ }
+
+ int top = SkFDot6Round(y0);
+ int bot = SkFDot6Round(y1);
+
+ // are we a zero-height line?
+ if (top == bot) {
+ return 0;
+ }
+ // are we completely above or below the clip?
+ if (clip && (top >= clip->fBottom || bot <= clip->fTop)) {
+ return 0;
+ }
+
+ SkFixed slope = SkFDot6Div(x1 - x0, y1 - y0);
+ const SkFDot6 dy = SkEdge_Compute_DY(top, y0);
+
+ fX = SkFDot6ToFixed(x0 + SkFixedMul(slope, dy)); // + SK_Fixed1/2
+ fDX = slope;
+ fFirstY = top;
+ fLastY = bot - 1;
+ fCurveCount = 0;
+ fWinding = SkToS8(winding);
+ fCurveShift = 0;
+
+ if (clip) {
+ this->chopLineWithClip(*clip);
+ }
+ return 1;
+}
+
+// called from a curve subclass
+int SkEdge::updateLine(SkFixed x0, SkFixed y0, SkFixed x1, SkFixed y1)
+{
+ SkASSERT(fWinding == 1 || fWinding == -1);
+ SkASSERT(fCurveCount != 0);
+// SkASSERT(fCurveShift != 0);
+
+ y0 >>= 10;
+ y1 >>= 10;
+
+ SkASSERT(y0 <= y1);
+
+ int top = SkFDot6Round(y0);
+ int bot = SkFDot6Round(y1);
+
+// SkASSERT(top >= fFirstY);
+
+ // are we a zero-height line?
+ if (top == bot)
+ return 0;
+
+ x0 >>= 10;
+ x1 >>= 10;
+
+ SkFixed slope = SkFDot6Div(x1 - x0, y1 - y0);
+ const SkFDot6 dy = SkEdge_Compute_DY(top, y0);
+
+ fX = SkFDot6ToFixed(x0 + SkFixedMul(slope, dy)); // + SK_Fixed1/2
+ fDX = slope;
+ fFirstY = top;
+ fLastY = bot - 1;
+
+ return 1;
+}
+
+void SkEdge::chopLineWithClip(const SkIRect& clip)
+{
+ int top = fFirstY;
+
+ SkASSERT(top < clip.fBottom);
+
+ // clip the line to the top
+ if (top < clip.fTop)
+ {
+ SkASSERT(fLastY >= clip.fTop);
+ fX += fDX * (clip.fTop - top);
+ fFirstY = clip.fTop;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* We store 1<<shift in a (signed) byte, so its maximum value is 1<<6 == 64.
+ Note that this limits the number of lines we use to approximate a curve.
+ If we need to increase this, we need to store fCurveCount in something
+ larger than int8_t.
+*/
+#define MAX_COEFF_SHIFT 6
+
+static inline SkFDot6 cheap_distance(SkFDot6 dx, SkFDot6 dy)
+{
+ dx = SkAbs32(dx);
+ dy = SkAbs32(dy);
+ // return max + min/2
+ if (dx > dy)
+ dx += dy >> 1;
+ else
+ dx = dy + (dx >> 1);
+ return dx;
+}
+
+static inline int diff_to_shift(SkFDot6 dx, SkFDot6 dy)
+{
+ // cheap calc of distance from center of p0-p2 to the center of the curve
+ SkFDot6 dist = cheap_distance(dx, dy);
+
+ // shift down dist (it is currently in dot6)
+ // down by 5 should give us 1/2 pixel accuracy (assuming our dist is accurate...)
+ // this is chosen by heuristic: make it as big as possible (to minimize segments)
+ // ... but small enough so that our curves still look smooth
+ dist = (dist + (1 << 4)) >> 5;
+
+ // each subdivision (shift value) cuts this dist (error) by 1/4
+ return (32 - SkCLZ(dist)) >> 1;
+}
+
+int SkQuadraticEdge::setQuadratic(const SkPoint pts[3], int shift)
+{
+ SkFDot6 x0, y0, x1, y1, x2, y2;
+
+ {
+#ifdef SK_RASTERIZE_EVEN_ROUNDING
+ x0 = SkScalarRoundToFDot6(pts[0].fX, shift);
+ y0 = SkScalarRoundToFDot6(pts[0].fY, shift);
+ x1 = SkScalarRoundToFDot6(pts[1].fX, shift);
+ y1 = SkScalarRoundToFDot6(pts[1].fY, shift);
+ x2 = SkScalarRoundToFDot6(pts[2].fX, shift);
+ y2 = SkScalarRoundToFDot6(pts[2].fY, shift);
+#else
+ float scale = float(1 << (shift + 6));
+ x0 = int(pts[0].fX * scale);
+ y0 = int(pts[0].fY * scale);
+ x1 = int(pts[1].fX * scale);
+ y1 = int(pts[1].fY * scale);
+ x2 = int(pts[2].fX * scale);
+ y2 = int(pts[2].fY * scale);
+#endif
+ }
+
+ int winding = 1;
+ if (y0 > y2)
+ {
+ SkTSwap(x0, x2);
+ SkTSwap(y0, y2);
+ winding = -1;
+ }
+ SkASSERT(y0 <= y1 && y1 <= y2);
+
+ int top = SkFDot6Round(y0);
+ int bot = SkFDot6Round(y2);
+
+ // are we a zero-height quad (line)?
+ if (top == bot)
+ return 0;
+
+ // compute number of steps needed (1 << shift)
+ {
+ SkFDot6 dx = (SkLeftShift(x1, 1) - x0 - x2) >> 2;
+ SkFDot6 dy = (SkLeftShift(y1, 1) - y0 - y2) >> 2;
+ shift = diff_to_shift(dx, dy);
+ SkASSERT(shift >= 0);
+ }
+ // need at least 1 subdivision for our bias trick
+ if (shift == 0) {
+ shift = 1;
+ } else if (shift > MAX_COEFF_SHIFT) {
+ shift = MAX_COEFF_SHIFT;
+ }
+
+ fWinding = SkToS8(winding);
+ //fCubicDShift only set for cubics
+ fCurveCount = SkToS8(1 << shift);
+
+ /*
+ * We want to reformulate into polynomial form, to make it clear how we
+ * should forward-difference.
+ *
+ * p0 (1 - t)^2 + p1 t(1 - t) + p2 t^2 ==> At^2 + Bt + C
+ *
+ * A = p0 - 2p1 + p2
+ * B = 2(p1 - p0)
+ * C = p0
+ *
+ * Our caller must have constrained our inputs (p0..p2) to all fit into
+ * 16.16. However, as seen above, we sometimes compute values that can be
+ * larger (e.g. B = 2*(p1 - p0)). To guard against overflow, we will store
+ * A and B at 1/2 of their actual value, and just apply a 2x scale during
+ * application in updateQuadratic(). Hence we store (shift - 1) in
+ * fCurveShift.
+ */
+
+ fCurveShift = SkToU8(shift - 1);
+
+ SkFixed A = SkFDot6ToFixedDiv2(x0 - x1 - x1 + x2); // 1/2 the real value
+ SkFixed B = SkFDot6ToFixed(x1 - x0); // 1/2 the real value
+
+ fQx = SkFDot6ToFixed(x0);
+ fQDx = B + (A >> shift); // biased by shift
+ fQDDx = A >> (shift - 1); // biased by shift
+
+ A = SkFDot6ToFixedDiv2(y0 - y1 - y1 + y2); // 1/2 the real value
+ B = SkFDot6ToFixed(y1 - y0); // 1/2 the real value
+
+ fQy = SkFDot6ToFixed(y0);
+ fQDy = B + (A >> shift); // biased by shift
+ fQDDy = A >> (shift - 1); // biased by shift
+
+ fQLastX = SkFDot6ToFixed(x2);
+ fQLastY = SkFDot6ToFixed(y2);
+
+ return this->updateQuadratic();
+}
+
+int SkQuadraticEdge::updateQuadratic()
+{
+ int success;
+ int count = fCurveCount;
+ SkFixed oldx = fQx;
+ SkFixed oldy = fQy;
+ SkFixed dx = fQDx;
+ SkFixed dy = fQDy;
+ SkFixed newx, newy;
+ int shift = fCurveShift;
+
+ SkASSERT(count > 0);
+
+ do {
+ if (--count > 0)
+ {
+ newx = oldx + (dx >> shift);
+ dx += fQDDx;
+ newy = oldy + (dy >> shift);
+ dy += fQDDy;
+ }
+ else // last segment
+ {
+ newx = fQLastX;
+ newy = fQLastY;
+ }
+ success = this->updateLine(oldx, oldy, newx, newy);
+ oldx = newx;
+ oldy = newy;
+ } while (count > 0 && !success);
+
+ fQx = newx;
+ fQy = newy;
+ fQDx = dx;
+ fQDy = dy;
+ fCurveCount = SkToS8(count);
+ return success;
+}
+
+/////////////////////////////////////////////////////////////////////////
+
+static inline int SkFDot6UpShift(SkFDot6 x, int upShift) {
+ SkASSERT((SkLeftShift(x, upShift) >> upShift) == x);
+ return SkLeftShift(x, upShift);
+}
+
+/* f(1/3) = (8a + 12b + 6c + d) / 27
+ f(2/3) = (a + 6b + 12c + 8d) / 27
+
+ f(1/3)-b = (8a - 15b + 6c + d) / 27
+ f(2/3)-c = (a + 6b - 15c + 8d) / 27
+
+ use 16/512 to approximate 1/27
+*/
+static SkFDot6 cubic_delta_from_line(SkFDot6 a, SkFDot6 b, SkFDot6 c, SkFDot6 d)
+{
+ // since our parameters may be negative, we don't use << to avoid ASAN warnings
+ SkFDot6 oneThird = (a*8 - b*15 + 6*c + d) * 19 >> 9;
+ SkFDot6 twoThird = (a + 6*b - c*15 + d*8) * 19 >> 9;
+
+ return SkMax32(SkAbs32(oneThird), SkAbs32(twoThird));
+}
+
+int SkCubicEdge::setCubic(const SkPoint pts[4], int shift) {
+ SkFDot6 x0, y0, x1, y1, x2, y2, x3, y3;
+
+ {
+#ifdef SK_RASTERIZE_EVEN_ROUNDING
+ x0 = SkScalarRoundToFDot6(pts[0].fX, shift);
+ y0 = SkScalarRoundToFDot6(pts[0].fY, shift);
+ x1 = SkScalarRoundToFDot6(pts[1].fX, shift);
+ y1 = SkScalarRoundToFDot6(pts[1].fY, shift);
+ x2 = SkScalarRoundToFDot6(pts[2].fX, shift);
+ y2 = SkScalarRoundToFDot6(pts[2].fY, shift);
+ x3 = SkScalarRoundToFDot6(pts[3].fX, shift);
+ y3 = SkScalarRoundToFDot6(pts[3].fY, shift);
+#else
+ float scale = float(1 << (shift + 6));
+ x0 = int(pts[0].fX * scale);
+ y0 = int(pts[0].fY * scale);
+ x1 = int(pts[1].fX * scale);
+ y1 = int(pts[1].fY * scale);
+ x2 = int(pts[2].fX * scale);
+ y2 = int(pts[2].fY * scale);
+ x3 = int(pts[3].fX * scale);
+ y3 = int(pts[3].fY * scale);
+#endif
+ }
+
+ int winding = 1;
+ if (y0 > y3)
+ {
+ SkTSwap(x0, x3);
+ SkTSwap(x1, x2);
+ SkTSwap(y0, y3);
+ SkTSwap(y1, y2);
+ winding = -1;
+ }
+
+ int top = SkFDot6Round(y0);
+ int bot = SkFDot6Round(y3);
+
+ // are we a zero-height cubic (line)?
+ if (top == bot)
+ return 0;
+
+ // compute number of steps needed (1 << shift)
+ {
+ // Can't use (center of curve - center of baseline), since center-of-curve
+ // need not be the max delta from the baseline (it could even be coincident)
+ // so we try just looking at the two off-curve points
+ SkFDot6 dx = cubic_delta_from_line(x0, x1, x2, x3);
+ SkFDot6 dy = cubic_delta_from_line(y0, y1, y2, y3);
+ // add 1 (by observation)
+ shift = diff_to_shift(dx, dy) + 1;
+ }
+ // need at least 1 subdivision for our bias trick
+ SkASSERT(shift > 0);
+ if (shift > MAX_COEFF_SHIFT) {
+ shift = MAX_COEFF_SHIFT;
+ }
+
+ /* Since our in coming data is initially shifted down by 10 (or 8 in
+ antialias). That means the most we can shift up is 8. However, we
+ compute coefficients with a 3*, so the safest upshift is really 6
+ */
+ int upShift = 6; // largest safe value
+ int downShift = shift + upShift - 10;
+ if (downShift < 0) {
+ downShift = 0;
+ upShift = 10 - shift;
+ }
+
+ fWinding = SkToS8(winding);
+ fCurveCount = SkToS8(SkLeftShift(-1, shift));
+ fCurveShift = SkToU8(shift);
+ fCubicDShift = SkToU8(downShift);
+
+ SkFixed B = SkFDot6UpShift(3 * (x1 - x0), upShift);
+ SkFixed C = SkFDot6UpShift(3 * (x0 - x1 - x1 + x2), upShift);
+ SkFixed D = SkFDot6UpShift(x3 + 3 * (x1 - x2) - x0, upShift);
+
+ fCx = SkFDot6ToFixed(x0);
+ fCDx = B + (C >> shift) + (D >> 2*shift); // biased by shift
+ fCDDx = 2*C + (3*D >> (shift - 1)); // biased by 2*shift
+ fCDDDx = 3*D >> (shift - 1); // biased by 2*shift
+
+ B = SkFDot6UpShift(3 * (y1 - y0), upShift);
+ C = SkFDot6UpShift(3 * (y0 - y1 - y1 + y2), upShift);
+ D = SkFDot6UpShift(y3 + 3 * (y1 - y2) - y0, upShift);
+
+ fCy = SkFDot6ToFixed(y0);
+ fCDy = B + (C >> shift) + (D >> 2*shift); // biased by shift
+ fCDDy = 2*C + (3*D >> (shift - 1)); // biased by 2*shift
+ fCDDDy = 3*D >> (shift - 1); // biased by 2*shift
+
+ fCLastX = SkFDot6ToFixed(x3);
+ fCLastY = SkFDot6ToFixed(y3);
+
+ return this->updateCubic();
+}
+
+int SkCubicEdge::updateCubic()
+{
+ int success;
+ int count = fCurveCount;
+ SkFixed oldx = fCx;
+ SkFixed oldy = fCy;
+ SkFixed newx, newy;
+ const int ddshift = fCurveShift;
+ const int dshift = fCubicDShift;
+
+ SkASSERT(count < 0);
+
+ do {
+ if (++count < 0)
+ {
+ newx = oldx + (fCDx >> dshift);
+ fCDx += fCDDx >> ddshift;
+ fCDDx += fCDDDx;
+
+ newy = oldy + (fCDy >> dshift);
+ fCDy += fCDDy >> ddshift;
+ fCDDy += fCDDDy;
+ }
+ else // last segment
+ {
+ // SkDebugf("LastX err=%d, LastY err=%d\n", (oldx + (fCDx >> shift) - fLastX), (oldy + (fCDy >> shift) - fLastY));
+ newx = fCLastX;
+ newy = fCLastY;
+ }
+
+ // we want to say SkASSERT(oldy <= newy), but our finite fixedpoint
+ // doesn't always achieve that, so we have to explicitly pin it here.
+ if (newy < oldy) {
+ newy = oldy;
+ }
+
+ success = this->updateLine(oldx, oldy, newx, newy);
+ oldx = newx;
+ oldy = newy;
+ } while (count < 0 && !success);
+
+ fCx = newx;
+ fCy = newy;
+ fCurveCount = SkToS8(count);
+ return success;
+}
diff --git a/gfx/skia/skia/src/core/SkEdge.h b/gfx/skia/skia/src/core/SkEdge.h
new file mode 100644
index 000000000..11669b4f7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEdge.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkEdge_DEFINED
+#define SkEdge_DEFINED
+
+#include "SkRect.h"
+#include "SkFDot6.h"
+#include "SkMath.h"
+
+// This correctly favors the lower-pixel when y0 is on a 1/2 pixel boundary
+#define SkEdge_Compute_DY(top, y0) (SkLeftShift(top, 6) + 32 - (y0))
+
+struct SkEdge {
+ enum Type {
+ kLine_Type,
+ kQuad_Type,
+ kCubic_Type
+ };
+
+ SkEdge* fNext;
+ SkEdge* fPrev;
+
+ SkFixed fX;
+ SkFixed fDX;
+ int32_t fFirstY;
+ int32_t fLastY;
+ int8_t fCurveCount; // only used by kQuad(+) and kCubic(-)
+ uint8_t fCurveShift; // appled to all Dx/DDx/DDDx except for fCubicDShift exception
+ uint8_t fCubicDShift; // applied to fCDx and fCDy only in cubic
+ int8_t fWinding; // 1 or -1
+
+ int setLine(const SkPoint& p0, const SkPoint& p1, const SkIRect* clip, int shiftUp);
+ // call this version if you know you don't have a clip
+ inline int setLine(const SkPoint& p0, const SkPoint& p1, int shiftUp);
+ inline int updateLine(SkFixed ax, SkFixed ay, SkFixed bx, SkFixed by);
+ void chopLineWithClip(const SkIRect& clip);
+
+ inline bool intersectsClip(const SkIRect& clip) const {
+ SkASSERT(fFirstY < clip.fBottom);
+ return fLastY >= clip.fTop;
+ }
+
+#ifdef SK_DEBUG
+ void dump() const {
+ SkDebugf("edge: firstY:%d lastY:%d x:%g dx:%g w:%d\n", fFirstY, fLastY, SkFixedToFloat(fX), SkFixedToFloat(fDX), fWinding);
+ }
+
+ void validate() const {
+ SkASSERT(fPrev && fNext);
+ SkASSERT(fPrev->fNext == this);
+ SkASSERT(fNext->fPrev == this);
+
+ SkASSERT(fFirstY <= fLastY);
+ SkASSERT(SkAbs32(fWinding) == 1);
+ }
+#endif
+};
+
+struct SkQuadraticEdge : public SkEdge {
+ SkFixed fQx, fQy;
+ SkFixed fQDx, fQDy;
+ SkFixed fQDDx, fQDDy;
+ SkFixed fQLastX, fQLastY;
+
+ int setQuadratic(const SkPoint pts[3], int shiftUp);
+ int updateQuadratic();
+};
+
+struct SkCubicEdge : public SkEdge {
+ SkFixed fCx, fCy;
+ SkFixed fCDx, fCDy;
+ SkFixed fCDDx, fCDDy;
+ SkFixed fCDDDx, fCDDDy;
+ SkFixed fCLastX, fCLastY;
+
+ int setCubic(const SkPoint pts[4], int shiftUp);
+ int updateCubic();
+};
+
+int SkEdge::setLine(const SkPoint& p0, const SkPoint& p1, int shift) {
+ SkFDot6 x0, y0, x1, y1;
+
+ {
+#ifdef SK_RASTERIZE_EVEN_ROUNDING
+ x0 = SkScalarRoundToFDot6(p0.fX, shift);
+ y0 = SkScalarRoundToFDot6(p0.fY, shift);
+ x1 = SkScalarRoundToFDot6(p1.fX, shift);
+ y1 = SkScalarRoundToFDot6(p1.fY, shift);
+#else
+ float scale = float(1 << (shift + 6));
+ x0 = int(p0.fX * scale);
+ y0 = int(p0.fY * scale);
+ x1 = int(p1.fX * scale);
+ y1 = int(p1.fY * scale);
+#endif
+ }
+
+ int winding = 1;
+
+ if (y0 > y1) {
+ SkTSwap(x0, x1);
+ SkTSwap(y0, y1);
+ winding = -1;
+ }
+
+ int top = SkFDot6Round(y0);
+ int bot = SkFDot6Round(y1);
+
+ // are we a zero-height line?
+ if (top == bot) {
+ return 0;
+ }
+
+ SkFixed slope = SkFDot6Div(x1 - x0, y1 - y0);
+ const SkFDot6 dy = SkEdge_Compute_DY(top, y0);
+
+ fX = SkFDot6ToFixed(x0 + SkFixedMul(slope, dy)); // + SK_Fixed1/2
+ fDX = slope;
+ fFirstY = top;
+ fLastY = bot - 1;
+ fCurveCount = 0;
+ fWinding = SkToS8(winding);
+ fCurveShift = 0;
+ return 1;
+}
+
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkEdgeBuilder.cpp b/gfx/skia/skia/src/core/SkEdgeBuilder.cpp
new file mode 100644
index 000000000..a6b06f161
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEdgeBuilder.cpp
@@ -0,0 +1,337 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkEdgeBuilder.h"
+#include "SkPath.h"
+#include "SkEdge.h"
+#include "SkEdgeClipper.h"
+#include "SkLineClipper.h"
+#include "SkGeometry.h"
+
+template <typename T> static T* typedAllocThrow(SkChunkAlloc& alloc) {
+ return static_cast<T*>(alloc.allocThrow(sizeof(T)));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkEdgeBuilder::SkEdgeBuilder() : fAlloc(16*1024) {
+ fEdgeList = nullptr;
+}
+
+SkEdgeBuilder::Combine SkEdgeBuilder::CombineVertical(const SkEdge* edge, SkEdge* last) {
+ if (last->fCurveCount || last->fDX || edge->fX != last->fX) {
+ return kNo_Combine;
+ }
+ if (edge->fWinding == last->fWinding) {
+ if (edge->fLastY + 1 == last->fFirstY) {
+ last->fFirstY = edge->fFirstY;
+ return kPartial_Combine;
+ }
+ if (edge->fFirstY == last->fLastY + 1) {
+ last->fLastY = edge->fLastY;
+ return kPartial_Combine;
+ }
+ return kNo_Combine;
+ }
+ if (edge->fFirstY == last->fFirstY) {
+ if (edge->fLastY == last->fLastY) {
+ return kTotal_Combine;
+ }
+ if (edge->fLastY < last->fLastY) {
+ last->fFirstY = edge->fLastY + 1;
+ return kPartial_Combine;
+ }
+ last->fFirstY = last->fLastY + 1;
+ last->fLastY = edge->fLastY;
+ last->fWinding = edge->fWinding;
+ return kPartial_Combine;
+ }
+ if (edge->fLastY == last->fLastY) {
+ if (edge->fFirstY > last->fFirstY) {
+ last->fLastY = edge->fFirstY - 1;
+ return kPartial_Combine;
+ }
+ last->fLastY = last->fFirstY - 1;
+ last->fFirstY = edge->fFirstY;
+ last->fWinding = edge->fWinding;
+ return kPartial_Combine;
+ }
+ return kNo_Combine;
+}
+
+static bool vertical_line(const SkEdge* edge) {
+ return !edge->fDX && !edge->fCurveCount;
+}
+
+void SkEdgeBuilder::addLine(const SkPoint pts[]) {
+ SkEdge* edge = typedAllocThrow<SkEdge>(fAlloc);
+ if (edge->setLine(pts[0], pts[1], fShiftUp)) {
+ if (vertical_line(edge) && fList.count()) {
+ Combine combine = CombineVertical(edge, *(fList.end() - 1));
+ if (kNo_Combine != combine) {
+ if (kTotal_Combine == combine) {
+ fList.pop();
+ }
+ goto unallocate_edge;
+ }
+ }
+ fList.push(edge);
+ } else {
+unallocate_edge:
+ ;
+ // TODO: unallocate edge from storage...
+ }
+}
+
+void SkEdgeBuilder::addQuad(const SkPoint pts[]) {
+ SkQuadraticEdge* edge = typedAllocThrow<SkQuadraticEdge>(fAlloc);
+ if (edge->setQuadratic(pts, fShiftUp)) {
+ fList.push(edge);
+ } else {
+ // TODO: unallocate edge from storage...
+ }
+}
+
+void SkEdgeBuilder::addCubic(const SkPoint pts[]) {
+ SkCubicEdge* edge = typedAllocThrow<SkCubicEdge>(fAlloc);
+ if (edge->setCubic(pts, fShiftUp)) {
+ fList.push(edge);
+ } else {
+ // TODO: unallocate edge from storage...
+ }
+}
+
+void SkEdgeBuilder::addClipper(SkEdgeClipper* clipper) {
+ SkPoint pts[4];
+ SkPath::Verb verb;
+
+ while ((verb = clipper->next(pts)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kLine_Verb:
+ this->addLine(pts);
+ break;
+ case SkPath::kQuad_Verb:
+ this->addQuad(pts);
+ break;
+ case SkPath::kCubic_Verb:
+ this->addCubic(pts);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void setShiftedClip(SkRect* dst, const SkIRect& src, int shift) {
+ dst->set(SkIntToScalar(src.fLeft >> shift),
+ SkIntToScalar(src.fTop >> shift),
+ SkIntToScalar(src.fRight >> shift),
+ SkIntToScalar(src.fBottom >> shift));
+}
+
+SkEdgeBuilder::Combine SkEdgeBuilder::checkVertical(const SkEdge* edge, SkEdge** edgePtr) {
+ return !vertical_line(edge) || edgePtr <= fEdgeList ? kNo_Combine :
+ CombineVertical(edge, edgePtr[-1]);
+}
+
+int SkEdgeBuilder::buildPoly(const SkPath& path, const SkIRect* iclip, int shiftUp,
+ bool canCullToTheRight) {
+ SkPath::Iter iter(path, true);
+ SkPoint pts[4];
+ SkPath::Verb verb;
+
+ int maxEdgeCount = path.countPoints();
+ if (iclip) {
+ // clipping can turn 1 line into (up to) kMaxClippedLineSegments, since
+ // we turn portions that are clipped out on the left/right into vertical
+ // segments.
+ SkASSERT_RELEASE(maxEdgeCount <= std::numeric_limits<int>::max() / SkLineClipper::kMaxClippedLineSegments);
+ maxEdgeCount *= SkLineClipper::kMaxClippedLineSegments;
+ }
+ SkASSERT_RELEASE((size_t)maxEdgeCount <= std::numeric_limits<size_t>::max() / (sizeof(SkEdge) + sizeof(SkEdge*)));
+ size_t maxEdgeSize = maxEdgeCount * sizeof(SkEdge);
+ size_t maxEdgePtrSize = maxEdgeCount * sizeof(SkEdge*);
+
+ // lets store the edges and their pointers in the same block
+ char* storage = (char*)fAlloc.allocThrow(maxEdgeSize + maxEdgePtrSize);
+ SkEdge* edge = reinterpret_cast<SkEdge*>(storage);
+ SkEdge** edgePtr = reinterpret_cast<SkEdge**>(storage + maxEdgeSize);
+ // Record the beginning of our pointers, so we can return them to the caller
+ fEdgeList = edgePtr;
+
+ if (iclip) {
+ SkRect clip;
+ setShiftedClip(&clip, *iclip, shiftUp);
+
+ while ((verb = iter.next(pts, false)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ case SkPath::kClose_Verb:
+ // we ignore these, and just get the whole segment from
+ // the corresponding line/quad/cubic verbs
+ break;
+ case SkPath::kLine_Verb: {
+ SkPoint lines[SkLineClipper::kMaxPoints];
+ int lineCount = SkLineClipper::ClipLine(pts, clip, lines, canCullToTheRight);
+ SkASSERT(lineCount <= SkLineClipper::kMaxClippedLineSegments);
+ for (int i = 0; i < lineCount; i++) {
+ if (edge->setLine(lines[i], lines[i + 1], shiftUp)) {
+ Combine combine = checkVertical(edge, edgePtr);
+ if (kNo_Combine == combine) {
+ *edgePtr++ = edge++;
+ } else if (kTotal_Combine == combine) {
+ --edgePtr;
+ }
+ }
+ }
+ break;
+ }
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ break;
+ }
+ }
+ } else {
+ while ((verb = iter.next(pts, false)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ case SkPath::kClose_Verb:
+ // we ignore these, and just get the whole segment from
+ // the corresponding line/quad/cubic verbs
+ break;
+ case SkPath::kLine_Verb:
+ if (edge->setLine(pts[0], pts[1], shiftUp)) {
+ Combine combine = checkVertical(edge, edgePtr);
+ if (kNo_Combine == combine) {
+ *edgePtr++ = edge++;
+ } else if (kTotal_Combine == combine) {
+ --edgePtr;
+ }
+ }
+ break;
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ break;
+ }
+ }
+ }
+ SkASSERT((char*)edge <= (char*)fEdgeList);
+ SkASSERT(edgePtr - fEdgeList <= maxEdgeCount);
+ return SkToInt(edgePtr - fEdgeList);
+}
+
+static void handle_quad(SkEdgeBuilder* builder, const SkPoint pts[3]) {
+ SkPoint monoX[5];
+ int n = SkChopQuadAtYExtrema(pts, monoX);
+ for (int i = 0; i <= n; i++) {
+ builder->addQuad(&monoX[i * 2]);
+ }
+}
+
+int SkEdgeBuilder::build(const SkPath& path, const SkIRect* iclip, int shiftUp,
+ bool canCullToTheRight) {
+ fAlloc.reset();
+ fList.reset();
+ fShiftUp = shiftUp;
+
+ if (SkPath::kLine_SegmentMask == path.getSegmentMasks()) {
+ return this->buildPoly(path, iclip, shiftUp, canCullToTheRight);
+ }
+
+ SkAutoConicToQuads quadder;
+ const SkScalar conicTol = SK_Scalar1 / 4;
+
+ SkPath::Iter iter(path, true);
+ SkPoint pts[4];
+ SkPath::Verb verb;
+
+ if (iclip) {
+ SkRect clip;
+ setShiftedClip(&clip, *iclip, shiftUp);
+ SkEdgeClipper clipper(canCullToTheRight);
+
+ while ((verb = iter.next(pts, false)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ case SkPath::kClose_Verb:
+ // we ignore these, and just get the whole segment from
+ // the corresponding line/quad/cubic verbs
+ break;
+ case SkPath::kLine_Verb: {
+ SkPoint lines[SkLineClipper::kMaxPoints];
+ int lineCount = SkLineClipper::ClipLine(pts, clip, lines, canCullToTheRight);
+ for (int i = 0; i < lineCount; i++) {
+ this->addLine(&lines[i]);
+ }
+ break;
+ }
+ case SkPath::kQuad_Verb:
+ if (clipper.clipQuad(pts, clip)) {
+ this->addClipper(&clipper);
+ }
+ break;
+ case SkPath::kConic_Verb: {
+ const SkPoint* quadPts = quadder.computeQuads(
+ pts, iter.conicWeight(), conicTol);
+ for (int i = 0; i < quadder.countQuads(); ++i) {
+ if (clipper.clipQuad(quadPts, clip)) {
+ this->addClipper(&clipper);
+ }
+ quadPts += 2;
+ }
+ } break;
+ case SkPath::kCubic_Verb:
+ if (clipper.clipCubic(pts, clip)) {
+ this->addClipper(&clipper);
+ }
+ break;
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ break;
+ }
+ }
+ } else {
+ while ((verb = iter.next(pts, false)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ case SkPath::kClose_Verb:
+ // we ignore these, and just get the whole segment from
+ // the corresponding line/quad/cubic verbs
+ break;
+ case SkPath::kLine_Verb:
+ this->addLine(pts);
+ break;
+ case SkPath::kQuad_Verb: {
+ handle_quad(this, pts);
+ break;
+ }
+ case SkPath::kConic_Verb: {
+ const SkPoint* quadPts = quadder.computeQuads(
+ pts, iter.conicWeight(), conicTol);
+ for (int i = 0; i < quadder.countQuads(); ++i) {
+ handle_quad(this, quadPts);
+ quadPts += 2;
+ }
+ } break;
+ case SkPath::kCubic_Verb: {
+ SkPoint monoY[10];
+ int n = SkChopCubicAtYExtrema(pts, monoY);
+ for (int i = 0; i <= n; i++) {
+ this->addCubic(&monoY[i * 3]);
+ }
+ break;
+ }
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ break;
+ }
+ }
+ }
+ fEdgeList = fList.begin();
+ return fList.count();
+}
diff --git a/gfx/skia/skia/src/core/SkEdgeBuilder.h b/gfx/skia/skia/src/core/SkEdgeBuilder.h
new file mode 100644
index 000000000..59f62870e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEdgeBuilder.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkEdgeBuilder_DEFINED
+#define SkEdgeBuilder_DEFINED
+
+#include "SkChunkAlloc.h"
+#include "SkRect.h"
+#include "SkTDArray.h"
+
+struct SkEdge;
+class SkEdgeClipper;
+class SkPath;
+
+class SkEdgeBuilder {
+public:
+ SkEdgeBuilder();
+
+ // returns the number of built edges. The array of those edge pointers
+ // is returned from edgeList().
+ int build(const SkPath& path, const SkIRect* clip, int shiftUp, bool clipToTheRight);
+
+ SkEdge** edgeList() { return fEdgeList; }
+
+private:
+ enum Combine {
+ kNo_Combine,
+ kPartial_Combine,
+ kTotal_Combine
+ };
+
+ static Combine CombineVertical(const SkEdge* edge, SkEdge* last);
+ Combine checkVertical(const SkEdge* edge, SkEdge** edgePtr);
+
+ SkChunkAlloc fAlloc;
+ SkTDArray<SkEdge*> fList;
+
+ /*
+ * If we're in general mode, we allcoate the pointers in fList, and this
+ * will point at fList.begin(). If we're in polygon mode, fList will be
+ * empty, as we will have preallocated room for the pointers in fAlloc's
+ * block, and fEdgeList will point into that.
+ */
+ SkEdge** fEdgeList;
+
+ int fShiftUp;
+
+public:
+ void addLine(const SkPoint pts[]);
+ void addQuad(const SkPoint pts[]);
+ void addCubic(const SkPoint pts[]);
+ void addClipper(SkEdgeClipper*);
+
+ int buildPoly(const SkPath& path, const SkIRect* clip, int shiftUp, bool clipToTheRight);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkEdgeClipper.cpp b/gfx/skia/skia/src/core/SkEdgeClipper.cpp
new file mode 100644
index 000000000..b5ac27ae5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEdgeClipper.cpp
@@ -0,0 +1,507 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkEdgeClipper.h"
+#include "SkGeometry.h"
+
+static bool quick_reject(const SkRect& bounds, const SkRect& clip) {
+ return bounds.fTop >= clip.fBottom || bounds.fBottom <= clip.fTop;
+}
+
+static inline void clamp_le(SkScalar& value, SkScalar max) {
+ if (value > max) {
+ value = max;
+ }
+}
+
+static inline void clamp_ge(SkScalar& value, SkScalar min) {
+ if (value < min) {
+ value = min;
+ }
+}
+
+/* src[] must be monotonic in Y. This routine copies src into dst, and sorts
+ it to be increasing in Y. If it had to reverse the order of the points,
+ it returns true, otherwise it returns false
+ */
+static bool sort_increasing_Y(SkPoint dst[], const SkPoint src[], int count) {
+ // we need the data to be monotonically increasing in Y
+ if (src[0].fY > src[count - 1].fY) {
+ for (int i = 0; i < count; i++) {
+ dst[i] = src[count - i - 1];
+ }
+ return true;
+ } else {
+ memcpy(dst, src, count * sizeof(SkPoint));
+ return false;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool chopMonoQuadAt(SkScalar c0, SkScalar c1, SkScalar c2,
+ SkScalar target, SkScalar* t) {
+ /* Solve F(t) = y where F(t) := [0](1-t)^2 + 2[1]t(1-t) + [2]t^2
+ * We solve for t, using quadratic equation, hence we have to rearrange
+ * our cooefficents to look like At^2 + Bt + C
+ */
+ SkScalar A = c0 - c1 - c1 + c2;
+ SkScalar B = 2*(c1 - c0);
+ SkScalar C = c0 - target;
+
+ SkScalar roots[2]; // we only expect one, but make room for 2 for safety
+ int count = SkFindUnitQuadRoots(A, B, C, roots);
+ if (count) {
+ *t = roots[0];
+ return true;
+ }
+ return false;
+}
+
+static bool chopMonoQuadAtY(SkPoint pts[3], SkScalar y, SkScalar* t) {
+ return chopMonoQuadAt(pts[0].fY, pts[1].fY, pts[2].fY, y, t);
+}
+
+static bool chopMonoQuadAtX(SkPoint pts[3], SkScalar x, SkScalar* t) {
+ return chopMonoQuadAt(pts[0].fX, pts[1].fX, pts[2].fX, x, t);
+}
+
+// Modify pts[] in place so that it is clipped in Y to the clip rect
+static void chop_quad_in_Y(SkPoint pts[3], const SkRect& clip) {
+ SkScalar t;
+ SkPoint tmp[5]; // for SkChopQuadAt
+
+ // are we partially above
+ if (pts[0].fY < clip.fTop) {
+ if (chopMonoQuadAtY(pts, clip.fTop, &t)) {
+ // take the 2nd chopped quad
+ SkChopQuadAt(pts, tmp, t);
+ // clamp to clean up imprecise numerics in the chop
+ tmp[2].fY = clip.fTop;
+ clamp_ge(tmp[3].fY, clip.fTop);
+
+ pts[0] = tmp[2];
+ pts[1] = tmp[3];
+ } else {
+ // if chopMonoQuadAtY failed, then we may have hit inexact numerics
+ // so we just clamp against the top
+ for (int i = 0; i < 3; i++) {
+ if (pts[i].fY < clip.fTop) {
+ pts[i].fY = clip.fTop;
+ }
+ }
+ }
+ }
+
+ // are we partially below
+ if (pts[2].fY > clip.fBottom) {
+ if (chopMonoQuadAtY(pts, clip.fBottom, &t)) {
+ SkChopQuadAt(pts, tmp, t);
+ // clamp to clean up imprecise numerics in the chop
+ clamp_le(tmp[1].fY, clip.fBottom);
+ tmp[2].fY = clip.fBottom;
+
+ pts[1] = tmp[1];
+ pts[2] = tmp[2];
+ } else {
+ // if chopMonoQuadAtY failed, then we may have hit inexact numerics
+ // so we just clamp against the bottom
+ for (int i = 0; i < 3; i++) {
+ if (pts[i].fY > clip.fBottom) {
+ pts[i].fY = clip.fBottom;
+ }
+ }
+ }
+ }
+}
+
+// srcPts[] must be monotonic in X and Y
+void SkEdgeClipper::clipMonoQuad(const SkPoint srcPts[3], const SkRect& clip) {
+ SkPoint pts[3];
+ bool reverse = sort_increasing_Y(pts, srcPts, 3);
+
+ // are we completely above or below
+ if (pts[2].fY <= clip.fTop || pts[0].fY >= clip.fBottom) {
+ return;
+ }
+
+ // Now chop so that pts is contained within clip in Y
+ chop_quad_in_Y(pts, clip);
+
+ if (pts[0].fX > pts[2].fX) {
+ SkTSwap<SkPoint>(pts[0], pts[2]);
+ reverse = !reverse;
+ }
+ SkASSERT(pts[0].fX <= pts[1].fX);
+ SkASSERT(pts[1].fX <= pts[2].fX);
+
+ // Now chop in X has needed, and record the segments
+
+ if (pts[2].fX <= clip.fLeft) { // wholly to the left
+ this->appendVLine(clip.fLeft, pts[0].fY, pts[2].fY, reverse);
+ return;
+ }
+ if (pts[0].fX >= clip.fRight) { // wholly to the right
+ if (!this->canCullToTheRight()) {
+ this->appendVLine(clip.fRight, pts[0].fY, pts[2].fY, reverse);
+ }
+ return;
+ }
+
+ SkScalar t;
+ SkPoint tmp[5]; // for SkChopQuadAt
+
+ // are we partially to the left
+ if (pts[0].fX < clip.fLeft) {
+ if (chopMonoQuadAtX(pts, clip.fLeft, &t)) {
+ SkChopQuadAt(pts, tmp, t);
+ this->appendVLine(clip.fLeft, tmp[0].fY, tmp[2].fY, reverse);
+ // clamp to clean up imprecise numerics in the chop
+ tmp[2].fX = clip.fLeft;
+ clamp_ge(tmp[3].fX, clip.fLeft);
+
+ pts[0] = tmp[2];
+ pts[1] = tmp[3];
+ } else {
+ // if chopMonoQuadAtY failed, then we may have hit inexact numerics
+ // so we just clamp against the left
+ this->appendVLine(clip.fLeft, pts[0].fY, pts[2].fY, reverse);
+ return;
+ }
+ }
+
+ // are we partially to the right
+ if (pts[2].fX > clip.fRight) {
+ if (chopMonoQuadAtX(pts, clip.fRight, &t)) {
+ SkChopQuadAt(pts, tmp, t);
+ // clamp to clean up imprecise numerics in the chop
+ clamp_le(tmp[1].fX, clip.fRight);
+ tmp[2].fX = clip.fRight;
+
+ this->appendQuad(tmp, reverse);
+ this->appendVLine(clip.fRight, tmp[2].fY, tmp[4].fY, reverse);
+ } else {
+ // if chopMonoQuadAtY failed, then we may have hit inexact numerics
+ // so we just clamp against the right
+ this->appendVLine(clip.fRight, pts[0].fY, pts[2].fY, reverse);
+ }
+ } else { // wholly inside the clip
+ this->appendQuad(pts, reverse);
+ }
+}
+
+bool SkEdgeClipper::clipQuad(const SkPoint srcPts[3], const SkRect& clip) {
+ fCurrPoint = fPoints;
+ fCurrVerb = fVerbs;
+
+ SkRect bounds;
+ bounds.set(srcPts, 3);
+
+ if (!quick_reject(bounds, clip)) {
+ SkPoint monoY[5];
+ int countY = SkChopQuadAtYExtrema(srcPts, monoY);
+ for (int y = 0; y <= countY; y++) {
+ SkPoint monoX[5];
+ int countX = SkChopQuadAtXExtrema(&monoY[y * 2], monoX);
+ for (int x = 0; x <= countX; x++) {
+ this->clipMonoQuad(&monoX[x * 2], clip);
+ SkASSERT(fCurrVerb - fVerbs < kMaxVerbs);
+ SkASSERT(fCurrPoint - fPoints <= kMaxPoints);
+ }
+ }
+ }
+
+ *fCurrVerb = SkPath::kDone_Verb;
+ fCurrPoint = fPoints;
+ fCurrVerb = fVerbs;
+ return SkPath::kDone_Verb != fVerbs[0];
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static SkScalar mono_cubic_closestT(const SkScalar src[], SkScalar x) {
+ SkScalar t = 0.5f;
+ SkScalar lastT;
+ SkScalar bestT SK_INIT_TO_AVOID_WARNING;
+ SkScalar step = 0.25f;
+ SkScalar D = src[0];
+ SkScalar A = src[6] + 3*(src[2] - src[4]) - D;
+ SkScalar B = 3*(src[4] - src[2] - src[2] + D);
+ SkScalar C = 3*(src[2] - D);
+ x -= D;
+ SkScalar closest = SK_ScalarMax;
+ do {
+ SkScalar loc = ((A * t + B) * t + C) * t;
+ SkScalar dist = SkScalarAbs(loc - x);
+ if (closest > dist) {
+ closest = dist;
+ bestT = t;
+ }
+ lastT = t;
+ t += loc < x ? step : -step;
+ step *= 0.5f;
+ } while (closest > 0.25f && lastT != t);
+ return bestT;
+}
+
+static void chop_mono_cubic_at_y(SkPoint src[4], SkScalar y, SkPoint dst[7]) {
+ if (SkChopMonoCubicAtY(src, y, dst)) {
+ return;
+ }
+ SkChopCubicAt(src, dst, mono_cubic_closestT(&src->fY, y));
+}
+
+// Modify pts[] in place so that it is clipped in Y to the clip rect
+static void chop_cubic_in_Y(SkPoint pts[4], const SkRect& clip) {
+
+ // are we partially above
+ if (pts[0].fY < clip.fTop) {
+ SkPoint tmp[7];
+ chop_mono_cubic_at_y(pts, clip.fTop, tmp);
+
+ /*
+ * For a large range in the points, we can do a poor job of chopping, such that the t
+ * we computed resulted in the lower cubic still being partly above the clip.
+ *
+ * If just the first or first 2 Y values are above the fTop, we can just smash them
+ * down. If the first 3 Ys are above fTop, we can't smash all 3, as that can really
+ * distort the cubic. In this case, we take the first output (tmp[3..6] and treat it as
+ * a guess, and re-chop against fTop. Then we fall through to checking if we need to
+ * smash the first 1 or 2 Y values.
+ */
+ if (tmp[3].fY < clip.fTop && tmp[4].fY < clip.fTop && tmp[5].fY < clip.fTop) {
+ SkPoint tmp2[4];
+ memcpy(tmp2, &tmp[3].fX, 4 * sizeof(SkPoint));
+ chop_mono_cubic_at_y(tmp2, clip.fTop, tmp);
+ }
+
+ // tmp[3, 4].fY should all be to the below clip.fTop.
+ // Since we can't trust the numerics of the chopper, we force those conditions now
+ tmp[3].fY = clip.fTop;
+ clamp_ge(tmp[4].fY, clip.fTop);
+
+ pts[0] = tmp[3];
+ pts[1] = tmp[4];
+ pts[2] = tmp[5];
+ }
+
+ // are we partially below
+ if (pts[3].fY > clip.fBottom) {
+ SkPoint tmp[7];
+ chop_mono_cubic_at_y(pts, clip.fBottom, tmp);
+ tmp[3].fY = clip.fBottom;
+ clamp_le(tmp[2].fY, clip.fBottom);
+
+ pts[1] = tmp[1];
+ pts[2] = tmp[2];
+ pts[3] = tmp[3];
+ }
+}
+
+static void chop_mono_cubic_at_x(SkPoint src[4], SkScalar x, SkPoint dst[7]) {
+ if (SkChopMonoCubicAtX(src, x, dst)) {
+ return;
+ }
+ SkChopCubicAt(src, dst, mono_cubic_closestT(&src->fX, x));
+}
+
+// srcPts[] must be monotonic in X and Y
+void SkEdgeClipper::clipMonoCubic(const SkPoint src[4], const SkRect& clip) {
+ SkPoint pts[4];
+ bool reverse = sort_increasing_Y(pts, src, 4);
+
+ // are we completely above or below
+ if (pts[3].fY <= clip.fTop || pts[0].fY >= clip.fBottom) {
+ return;
+ }
+
+ // Now chop so that pts is contained within clip in Y
+ chop_cubic_in_Y(pts, clip);
+
+ if (pts[0].fX > pts[3].fX) {
+ SkTSwap<SkPoint>(pts[0], pts[3]);
+ SkTSwap<SkPoint>(pts[1], pts[2]);
+ reverse = !reverse;
+ }
+
+ // Now chop in X has needed, and record the segments
+
+ if (pts[3].fX <= clip.fLeft) { // wholly to the left
+ this->appendVLine(clip.fLeft, pts[0].fY, pts[3].fY, reverse);
+ return;
+ }
+ if (pts[0].fX >= clip.fRight) { // wholly to the right
+ if (!this->canCullToTheRight()) {
+ this->appendVLine(clip.fRight, pts[0].fY, pts[3].fY, reverse);
+ }
+ return;
+ }
+
+ // are we partially to the left
+ if (pts[0].fX < clip.fLeft) {
+ SkPoint tmp[7];
+ chop_mono_cubic_at_x(pts, clip.fLeft, tmp);
+ this->appendVLine(clip.fLeft, tmp[0].fY, tmp[3].fY, reverse);
+
+ // tmp[3, 4].fX should all be to the right of clip.fLeft.
+ // Since we can't trust the numerics of
+ // the chopper, we force those conditions now
+ tmp[3].fX = clip.fLeft;
+ clamp_ge(tmp[4].fX, clip.fLeft);
+
+ pts[0] = tmp[3];
+ pts[1] = tmp[4];
+ pts[2] = tmp[5];
+ }
+
+ // are we partially to the right
+ if (pts[3].fX > clip.fRight) {
+ SkPoint tmp[7];
+ chop_mono_cubic_at_x(pts, clip.fRight, tmp);
+ tmp[3].fX = clip.fRight;
+ clamp_le(tmp[2].fX, clip.fRight);
+
+ this->appendCubic(tmp, reverse);
+ this->appendVLine(clip.fRight, tmp[3].fY, tmp[6].fY, reverse);
+ } else { // wholly inside the clip
+ this->appendCubic(pts, reverse);
+ }
+}
+
+static bool quick_reject_in_y(const SkPoint pts[4], const SkRect& clip) {
+ Sk4s ys(pts[0].fY, pts[1].fY, pts[2].fY, pts[3].fY);
+ Sk4s t(clip.top());
+ Sk4s b(clip.bottom());
+
+ return (ys < t).allTrue() || (ys > b).allTrue();
+}
+
+bool SkEdgeClipper::clipCubic(const SkPoint srcPts[4], const SkRect& clip) {
+ fCurrPoint = fPoints;
+ fCurrVerb = fVerbs;
+
+ if (!quick_reject_in_y(srcPts, clip)) {
+ SkPoint monoY[10];
+ int countY = SkChopCubicAtYExtrema(srcPts, monoY);
+ for (int y = 0; y <= countY; y++) {
+ SkPoint monoX[10];
+ int countX = SkChopCubicAtXExtrema(&monoY[y * 3], monoX);
+ for (int x = 0; x <= countX; x++) {
+ this->clipMonoCubic(&monoX[x * 3], clip);
+ SkASSERT(fCurrVerb - fVerbs < kMaxVerbs);
+ SkASSERT(fCurrPoint - fPoints <= kMaxPoints);
+ }
+ }
+ }
+
+ *fCurrVerb = SkPath::kDone_Verb;
+ fCurrPoint = fPoints;
+ fCurrVerb = fVerbs;
+ return SkPath::kDone_Verb != fVerbs[0];
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkEdgeClipper::appendVLine(SkScalar x, SkScalar y0, SkScalar y1,
+ bool reverse) {
+ *fCurrVerb++ = SkPath::kLine_Verb;
+
+ if (reverse) {
+ SkTSwap<SkScalar>(y0, y1);
+ }
+ fCurrPoint[0].set(x, y0);
+ fCurrPoint[1].set(x, y1);
+ fCurrPoint += 2;
+}
+
+void SkEdgeClipper::appendQuad(const SkPoint pts[3], bool reverse) {
+ *fCurrVerb++ = SkPath::kQuad_Verb;
+
+ if (reverse) {
+ fCurrPoint[0] = pts[2];
+ fCurrPoint[2] = pts[0];
+ } else {
+ fCurrPoint[0] = pts[0];
+ fCurrPoint[2] = pts[2];
+ }
+ fCurrPoint[1] = pts[1];
+ fCurrPoint += 3;
+}
+
+void SkEdgeClipper::appendCubic(const SkPoint pts[4], bool reverse) {
+ *fCurrVerb++ = SkPath::kCubic_Verb;
+
+ if (reverse) {
+ for (int i = 0; i < 4; i++) {
+ fCurrPoint[i] = pts[3 - i];
+ }
+ } else {
+ memcpy(fCurrPoint, pts, 4 * sizeof(SkPoint));
+ }
+ fCurrPoint += 4;
+}
+
+SkPath::Verb SkEdgeClipper::next(SkPoint pts[]) {
+ SkPath::Verb verb = *fCurrVerb;
+
+ switch (verb) {
+ case SkPath::kLine_Verb:
+ memcpy(pts, fCurrPoint, 2 * sizeof(SkPoint));
+ fCurrPoint += 2;
+ fCurrVerb += 1;
+ break;
+ case SkPath::kQuad_Verb:
+ memcpy(pts, fCurrPoint, 3 * sizeof(SkPoint));
+ fCurrPoint += 3;
+ fCurrVerb += 1;
+ break;
+ case SkPath::kCubic_Verb:
+ memcpy(pts, fCurrPoint, 4 * sizeof(SkPoint));
+ fCurrPoint += 4;
+ fCurrVerb += 1;
+ break;
+ case SkPath::kDone_Verb:
+ break;
+ default:
+ SkDEBUGFAIL("unexpected verb in quadclippper2 iter");
+ break;
+ }
+ return verb;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+static void assert_monotonic(const SkScalar coord[], int count) {
+ if (coord[0] > coord[(count - 1) * 2]) {
+ for (int i = 1; i < count; i++) {
+ SkASSERT(coord[2 * (i - 1)] >= coord[i * 2]);
+ }
+ } else if (coord[0] < coord[(count - 1) * 2]) {
+ for (int i = 1; i < count; i++) {
+ SkASSERT(coord[2 * (i - 1)] <= coord[i * 2]);
+ }
+ } else {
+ for (int i = 1; i < count; i++) {
+ SkASSERT(coord[2 * (i - 1)] == coord[i * 2]);
+ }
+ }
+}
+
+void sk_assert_monotonic_y(const SkPoint pts[], int count) {
+ if (count > 1) {
+ assert_monotonic(&pts[0].fY, count);
+ }
+}
+
+void sk_assert_monotonic_x(const SkPoint pts[], int count) {
+ if (count > 1) {
+ assert_monotonic(&pts[0].fX, count);
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkEdgeClipper.h b/gfx/skia/skia/src/core/SkEdgeClipper.h
new file mode 100644
index 000000000..e460c1cd8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEdgeClipper.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkEdgeClipper_DEFINED
+#define SkEdgeClipper_DEFINED
+
+#include "SkPath.h"
+
+/** This is basically an iterator. It is initialized with an edge and a clip,
+ and then next() is called until it returns kDone_Verb.
+ */
+class SkEdgeClipper {
+public:
+ SkEdgeClipper(bool canCullToTheRight) : fCanCullToTheRight(canCullToTheRight) {}
+
+ bool clipQuad(const SkPoint pts[3], const SkRect& clip);
+ bool clipCubic(const SkPoint pts[4], const SkRect& clip);
+
+ SkPath::Verb next(SkPoint pts[]);
+
+ bool canCullToTheRight() const { return fCanCullToTheRight; }
+
+private:
+ SkPoint* fCurrPoint;
+ SkPath::Verb* fCurrVerb;
+ const bool fCanCullToTheRight;
+
+ enum {
+ kMaxVerbs = 13,
+ kMaxPoints = 32
+ };
+ SkPoint fPoints[kMaxPoints];
+ SkPath::Verb fVerbs[kMaxVerbs];
+
+ void clipMonoQuad(const SkPoint srcPts[3], const SkRect& clip);
+ void clipMonoCubic(const SkPoint srcPts[4], const SkRect& clip);
+ void appendVLine(SkScalar x, SkScalar y0, SkScalar y1, bool reverse);
+ void appendQuad(const SkPoint pts[3], bool reverse);
+ void appendCubic(const SkPoint pts[4], bool reverse);
+};
+
+#ifdef SK_DEBUG
+ void sk_assert_monotonic_x(const SkPoint pts[], int count);
+ void sk_assert_monotonic_y(const SkPoint pts[], int count);
+#else
+ #define sk_assert_monotonic_x(pts, count)
+ #define sk_assert_monotonic_y(pts, count)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkEmptyShader.h b/gfx/skia/skia/src/core/SkEmptyShader.h
new file mode 100644
index 000000000..528ceeabe
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEmptyShader.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEmptyShader_DEFINED
+#define SkEmptyShader_DEFINED
+
+#include "SkShader.h"
+
+// TODO: move this to private, as there is a public factory on SkShader
+
+/**
+ * \class SkEmptyShader
+ * A Shader that always draws nothing. Its createContext always returns nullptr.
+ */
+class SK_API SkEmptyShader : public SkShader {
+public:
+ SkEmptyShader() {}
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkEmptyShader)
+
+protected:
+ size_t onContextSize(const ContextRec&) const override {
+ // Even though createContext returns nullptr we have to return a value of at least
+ // sizeof(SkShader::Context) to satisfy SkSmallAllocator.
+ return sizeof(SkShader::Context);
+ }
+
+ SkShader::Context* onCreateContext(const ContextRec&, void*) const override {
+ return nullptr;
+ }
+
+ void flatten(SkWriteBuffer& buffer) const override {
+ // Do nothing.
+ // We just don't want to fall through to SkShader::flatten(),
+ // which will write data we don't care to serialize or decode.
+ }
+
+private:
+ typedef SkShader INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkEndian.h b/gfx/skia/skia/src/core/SkEndian.h
new file mode 100644
index 000000000..39e5dd8fb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEndian.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEndian_DEFINED
+#define SkEndian_DEFINED
+
+#include "SkTypes.h"
+
+/** \file SkEndian.h
+
+ Macros and helper functions for handling 16 and 32 bit values in
+ big and little endian formats.
+*/
+
+#if defined(SK_CPU_LENDIAN) && defined(SK_CPU_BENDIAN)
+ #error "can't have both LENDIAN and BENDIAN defined"
+#endif
+
+#if !defined(SK_CPU_LENDIAN) && !defined(SK_CPU_BENDIAN)
+ #error "need either LENDIAN or BENDIAN defined"
+#endif
+
+/** Swap the two bytes in the low 16bits of the parameters.
+ e.g. 0x1234 -> 0x3412
+*/
+static inline uint16_t SkEndianSwap16(uint16_t value) {
+ return static_cast<uint16_t>((value >> 8) | (value << 8));
+}
+
+template<uint16_t N> struct SkTEndianSwap16 {
+ static const uint16_t value = static_cast<uint16_t>((N >> 8) | ((N & 0xFF) << 8));
+};
+
+/** Vector version of SkEndianSwap16(), which swaps the
+ low two bytes of each value in the array.
+*/
+static inline void SkEndianSwap16s(uint16_t array[], int count) {
+ SkASSERT(count == 0 || array != nullptr);
+
+ while (--count >= 0) {
+ *array = SkEndianSwap16(*array);
+ array += 1;
+ }
+}
+
+/** Reverse all 4 bytes in a 32bit value.
+ e.g. 0x12345678 -> 0x78563412
+*/
+static constexpr uint32_t SkEndianSwap32(uint32_t value) {
+ return ((value & 0xFF) << 24) |
+ ((value & 0xFF00) << 8) |
+ ((value & 0xFF0000) >> 8) |
+ (value >> 24);
+}
+
+template<uint32_t N> struct SkTEndianSwap32 {
+ static const uint32_t value = ((N & 0xFF) << 24) |
+ ((N & 0xFF00) << 8) |
+ ((N & 0xFF0000) >> 8) |
+ (N >> 24);
+};
+
+/** Vector version of SkEndianSwap32(), which swaps the
+ bytes of each value in the array.
+*/
+static inline void SkEndianSwap32s(uint32_t array[], int count) {
+ SkASSERT(count == 0 || array != nullptr);
+
+ while (--count >= 0) {
+ *array = SkEndianSwap32(*array);
+ array += 1;
+ }
+}
+
+/** Reverse all 8 bytes in a 64bit value.
+ e.g. 0x1122334455667788 -> 0x8877665544332211
+*/
+static inline uint64_t SkEndianSwap64(uint64_t value) {
+ return (((value & 0x00000000000000FFULL) << (8*7)) |
+ ((value & 0x000000000000FF00ULL) << (8*5)) |
+ ((value & 0x0000000000FF0000ULL) << (8*3)) |
+ ((value & 0x00000000FF000000ULL) << (8*1)) |
+ ((value & 0x000000FF00000000ULL) >> (8*1)) |
+ ((value & 0x0000FF0000000000ULL) >> (8*3)) |
+ ((value & 0x00FF000000000000ULL) >> (8*5)) |
+ ((value) >> (8*7)));
+}
+template<uint64_t N> struct SkTEndianSwap64 {
+ static const uint64_t value = (((N & 0x00000000000000FFULL) << (8*7)) |
+ ((N & 0x000000000000FF00ULL) << (8*5)) |
+ ((N & 0x0000000000FF0000ULL) << (8*3)) |
+ ((N & 0x00000000FF000000ULL) << (8*1)) |
+ ((N & 0x000000FF00000000ULL) >> (8*1)) |
+ ((N & 0x0000FF0000000000ULL) >> (8*3)) |
+ ((N & 0x00FF000000000000ULL) >> (8*5)) |
+ ((N) >> (8*7)));
+};
+
+/** Vector version of SkEndianSwap64(), which swaps the
+ bytes of each value in the array.
+*/
+static inline void SkEndianSwap64s(uint64_t array[], int count) {
+ SkASSERT(count == 0 || array != nullptr);
+
+ while (--count >= 0) {
+ *array = SkEndianSwap64(*array);
+ array += 1;
+ }
+}
+
+#ifdef SK_CPU_LENDIAN
+ #define SkEndian_SwapBE16(n) SkEndianSwap16(n)
+ #define SkEndian_SwapBE32(n) SkEndianSwap32(n)
+ #define SkEndian_SwapBE64(n) SkEndianSwap64(n)
+ #define SkEndian_SwapLE16(n) (n)
+ #define SkEndian_SwapLE32(n) (n)
+ #define SkEndian_SwapLE64(n) (n)
+
+ #define SkTEndian_SwapBE16(n) SkTEndianSwap16<n>::value
+ #define SkTEndian_SwapBE32(n) SkTEndianSwap32<n>::value
+ #define SkTEndian_SwapBE64(n) SkTEndianSwap64<n>::value
+ #define SkTEndian_SwapLE16(n) (n)
+ #define SkTEndian_SwapLE32(n) (n)
+ #define SkTEndian_SwapLE64(n) (n)
+#else // SK_CPU_BENDIAN
+ #define SkEndian_SwapBE16(n) (n)
+ #define SkEndian_SwapBE32(n) (n)
+ #define SkEndian_SwapBE64(n) (n)
+ #define SkEndian_SwapLE16(n) SkEndianSwap16(n)
+ #define SkEndian_SwapLE32(n) SkEndianSwap32(n)
+ #define SkEndian_SwapLE64(n) SkEndianSwap64(n)
+
+ #define SkTEndian_SwapBE16(n) (n)
+ #define SkTEndian_SwapBE32(n) (n)
+ #define SkTEndian_SwapBE64(n) (n)
+ #define SkTEndian_SwapLE16(n) SkTEndianSwap16<n>::value
+ #define SkTEndian_SwapLE32(n) SkTEndianSwap32<n>::value
+ #define SkTEndian_SwapLE64(n) SkTEndianSwap64<n>::value
+#endif
+
+// When a bytestream is embedded in a 32-bit word, how far we need to
+// shift the word to extract each byte from the low 8 bits by anding with 0xff.
+#ifdef SK_CPU_LENDIAN
+ #define SkEndian_Byte0Shift 0
+ #define SkEndian_Byte1Shift 8
+ #define SkEndian_Byte2Shift 16
+ #define SkEndian_Byte3Shift 24
+#else // SK_CPU_BENDIAN
+ #define SkEndian_Byte0Shift 24
+ #define SkEndian_Byte1Shift 16
+ #define SkEndian_Byte2Shift 8
+ #define SkEndian_Byte3Shift 0
+#endif
+
+
+#if defined(SK_UINT8_BITFIELD_LENDIAN) && defined(SK_UINT8_BITFIELD_BENDIAN)
+ #error "can't have both bitfield LENDIAN and BENDIAN defined"
+#endif
+
+#if !defined(SK_UINT8_BITFIELD_LENDIAN) && !defined(SK_UINT8_BITFIELD_BENDIAN)
+ #ifdef SK_CPU_LENDIAN
+ #define SK_UINT8_BITFIELD_LENDIAN
+ #else
+ #define SK_UINT8_BITFIELD_BENDIAN
+ #endif
+#endif
+
+#ifdef SK_UINT8_BITFIELD_LENDIAN
+ #define SK_UINT8_BITFIELD(f0, f1, f2, f3, f4, f5, f6, f7) \
+ SK_OT_BYTE f0 : 1; \
+ SK_OT_BYTE f1 : 1; \
+ SK_OT_BYTE f2 : 1; \
+ SK_OT_BYTE f3 : 1; \
+ SK_OT_BYTE f4 : 1; \
+ SK_OT_BYTE f5 : 1; \
+ SK_OT_BYTE f6 : 1; \
+ SK_OT_BYTE f7 : 1;
+#else
+ #define SK_UINT8_BITFIELD(f0, f1, f2, f3, f4, f5, f6, f7) \
+ SK_OT_BYTE f7 : 1; \
+ SK_OT_BYTE f6 : 1; \
+ SK_OT_BYTE f5 : 1; \
+ SK_OT_BYTE f4 : 1; \
+ SK_OT_BYTE f3 : 1; \
+ SK_OT_BYTE f2 : 1; \
+ SK_OT_BYTE f1 : 1; \
+ SK_OT_BYTE f0 : 1;
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkError.cpp b/gfx/skia/skia/src/core/SkError.cpp
new file mode 100644
index 000000000..d85b5ff0c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkError.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTLS.h"
+#include "SkTypes.h"
+#include "SkError.h"
+#include "SkErrorInternals.h"
+
+#include <stdio.h>
+#include <stdarg.h>
+
+namespace {
+void *CreateThreadError() { return new SkError(kNoError_SkError); }
+void DeleteThreadError(void *v) { delete reinterpret_cast<SkError *>(v); }
+ #define THREAD_ERROR \
+ (*reinterpret_cast<SkError*>(SkTLS::Get(CreateThreadError, DeleteThreadError)))
+
+ void *CreateThreadErrorCallback() {
+ return new SkErrorCallbackFunction(SkErrorInternals::DefaultErrorCallback);
+ }
+ void DeleteThreadErrorCallback(void* v) {
+ delete reinterpret_cast<SkErrorCallbackFunction *>(v);
+ }
+
+ #define THREAD_ERROR_CALLBACK \
+ *(reinterpret_cast<SkErrorCallbackFunction *>(SkTLS::Get(CreateThreadErrorCallback, \
+ DeleteThreadErrorCallback)))
+
+ void *CreateThreadErrorContext() { return new void **(nullptr); }
+ void DeleteThreadErrorContext(void *v) { delete reinterpret_cast<void **>(v); }
+ #define THREAD_ERROR_CONTEXT \
+ (*reinterpret_cast<void **>(SkTLS::Get(CreateThreadErrorContext, DeleteThreadErrorContext)))
+
+ #define ERROR_STRING_LENGTH 2048
+
+ void *CreateThreadErrorString() { return new char[(ERROR_STRING_LENGTH)]; }
+ void DeleteThreadErrorString(void *v) { delete[] reinterpret_cast<char *>(v); }
+ #define THREAD_ERROR_STRING \
+ (reinterpret_cast<char *>(SkTLS::Get(CreateThreadErrorString, DeleteThreadErrorString)))
+}
+
+SkError SkGetLastError() {
+ return SkErrorInternals::GetLastError();
+}
+void SkClearLastError() {
+ SkErrorInternals::ClearError();
+}
+void SkSetErrorCallback(SkErrorCallbackFunction cb, void *context) {
+ SkErrorInternals::SetErrorCallback(cb, context);
+}
+const char *SkGetLastErrorString() {
+ return SkErrorInternals::GetLastErrorString();
+}
+
+// ------------ Private Error functions ---------
+
+void SkErrorInternals::SetErrorCallback(SkErrorCallbackFunction cb, void *context) {
+ if (cb == nullptr) {
+ THREAD_ERROR_CALLBACK = SkErrorInternals::DefaultErrorCallback;
+ } else {
+ THREAD_ERROR_CALLBACK = cb;
+ }
+ THREAD_ERROR_CONTEXT = context;
+}
+
+void SkErrorInternals::DefaultErrorCallback(SkError code, void *context) {
+ SkDebugf("Skia Error: %s\n", SkGetLastErrorString());
+}
+
+void SkErrorInternals::ClearError() {
+ SkErrorInternals::SetError( kNoError_SkError, "All is well" );
+}
+
+SkError SkErrorInternals::GetLastError() {
+ return THREAD_ERROR;
+}
+
+const char *SkErrorInternals::GetLastErrorString() {
+ return THREAD_ERROR_STRING;
+}
+
+void SkErrorInternals::SetError(SkError code, const char *fmt, ...) {
+ THREAD_ERROR = code;
+ va_list args;
+
+ char *str = THREAD_ERROR_STRING;
+ const char *error_name = nullptr;
+ switch( code ) {
+ case kNoError_SkError:
+ error_name = "No Error";
+ break;
+ case kInvalidArgument_SkError:
+ error_name = "Invalid Argument";
+ break;
+ case kInvalidOperation_SkError:
+ error_name = "Invalid Operation";
+ break;
+ case kInvalidHandle_SkError:
+ error_name = "Invalid Handle";
+ break;
+ case kInvalidPaint_SkError:
+ error_name = "Invalid Paint";
+ break;
+ case kOutOfMemory_SkError:
+ error_name = "Out Of Memory";
+ break;
+ case kParseError_SkError:
+ error_name = "Parse Error";
+ break;
+ default:
+ error_name = "Unknown error";
+ break;
+ }
+
+ sprintf( str, "%s: ", error_name );
+ int string_left = SkToInt(ERROR_STRING_LENGTH - strlen(str));
+ str += strlen(str);
+
+ va_start( args, fmt );
+ vsnprintf( str, string_left, fmt, args );
+ va_end( args );
+ SkErrorCallbackFunction fn = THREAD_ERROR_CALLBACK;
+ if (fn && code != kNoError_SkError) {
+ fn(code, THREAD_ERROR_CONTEXT);
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkErrorInternals.h b/gfx/skia/skia/src/core/SkErrorInternals.h
new file mode 100644
index 000000000..d57357609
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkErrorInternals.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkErrorInternals_DEFINED
+#define SkErrorInternals_DEFINED
+
+#include "SkError.h"
+
+class SkErrorInternals {
+
+public:
+ static void ClearError();
+ static void SetError(SkError code, const char *fmt, ...);
+ static SkError GetLastError();
+ static const char *GetLastErrorString();
+ static void SetErrorCallback(SkErrorCallbackFunction cb, void *context);
+ static void DefaultErrorCallback(SkError code, void *context);
+};
+
+
+
+#endif /* SkErrorInternals_DEFINED */
diff --git a/gfx/skia/skia/src/core/SkExchange.h b/gfx/skia/skia/src/core/SkExchange.h
new file mode 100644
index 000000000..50e2fe983
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkExchange.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkExchange_DEFINED
+#define SkExchange_DEFINED
+
+#include <utility>
+
+namespace skstd {
+
+// std::exchange is in C++14
+template<typename T, typename U = T>
+inline static T exchange(T& obj, U&& new_val) {
+ T old_val = std::move(obj);
+ obj = std::forward<U>(new_val);
+ return old_val;
+}
+
+}
+
+#endif // SkExchange_DEFINED
diff --git a/gfx/skia/skia/src/core/SkFDot6.h b/gfx/skia/skia/src/core/SkFDot6.h
new file mode 100644
index 000000000..726aa2e46
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFDot6.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkFDot6_DEFINED
+#define SkFDot6_DEFINED
+
+#include "SkFixed.h"
+#include "SkScalar.h"
+#include "SkMath.h"
+
+typedef int32_t SkFDot6;
+
+/* This uses the magic number approach suggested here:
+ * http://stereopsis.com/sree/fpu2006.html and used in
+ * _cairo_fixed_from_double. It does banker's rounding
+ * (i.e. round to nearest even)
+ */
+inline SkFDot6 SkScalarRoundToFDot6(SkScalar x, int shift = 0)
+{
+ union {
+ double fDouble;
+ int32_t fBits[2];
+ } tmp;
+ int fractionalBits = 6 + shift;
+ double magic = (1LL << (52 - (fractionalBits))) * 1.5;
+
+ tmp.fDouble = SkScalarToDouble(x) + magic;
+#ifdef SK_CPU_BENDIAN
+ return tmp.fBits[1];
+#else
+ return tmp.fBits[0];
+#endif
+}
+
+#define SK_FDot6One (64)
+#define SK_FDot6Half (32)
+
+#ifdef SK_DEBUG
+ inline SkFDot6 SkIntToFDot6(S16CPU x) {
+ SkASSERT(SkToS16(x) == x);
+ return x << 6;
+ }
+#else
+ #define SkIntToFDot6(x) ((x) << 6)
+#endif
+
+#define SkFDot6Floor(x) ((x) >> 6)
+#define SkFDot6Ceil(x) (((x) + 63) >> 6)
+#define SkFDot6Round(x) (((x) + 32) >> 6)
+
+#define SkFixedToFDot6(x) ((x) >> 10)
+
+inline SkFixed SkFDot6ToFixed(SkFDot6 x) {
+ SkASSERT((SkLeftShift(x, 10) >> 10) == x);
+
+ return SkLeftShift(x, 10);
+}
+
+#define SkScalarToFDot6(x) (SkFDot6)((x) * 64)
+#define SkFDot6ToScalar(x) ((SkScalar)(x) * 0.015625f)
+#define SkFDot6ToFloat SkFDot6ToScalar
+
+inline SkFixed SkFDot6Div(SkFDot6 a, SkFDot6 b) {
+ SkASSERT(b != 0);
+
+ if (a == (int16_t)a) {
+ return SkLeftShift(a, 16) / b;
+ } else {
+ return SkFixedDiv(a, b);
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkFilterProc.cpp b/gfx/skia/skia/src/core/SkFilterProc.cpp
new file mode 100644
index 000000000..5049727bd
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFilterProc.cpp
@@ -0,0 +1,293 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkFilterProc.h"
+
+/* [1-x 1-y] [x 1-y]
+ [1-x y] [x y]
+*/
+
+static unsigned bilerp00(unsigned a00, unsigned a01, unsigned a10, unsigned a11) { return a00; }
+static unsigned bilerp01(unsigned a00, unsigned a01, unsigned a10, unsigned a11) { return (3 * a00 + a01) >> 2; }
+static unsigned bilerp02(unsigned a00, unsigned a01, unsigned a10, unsigned a11) { return (a00 + a01) >> 1; }
+static unsigned bilerp03(unsigned a00, unsigned a01, unsigned a10, unsigned a11) { return (a00 + 3 * a01) >> 2; }
+
+static unsigned bilerp10(unsigned a00, unsigned a01, unsigned a10, unsigned a11) { return (3 * a00 + a10) >> 2; }
+static unsigned bilerp11(unsigned a00, unsigned a01, unsigned a10, unsigned a11) { return (9 * a00 + 3 * (a01 + a10) + a11) >> 4; }
+static unsigned bilerp12(unsigned a00, unsigned a01, unsigned a10, unsigned a11) { return (3 * (a00 + a01) + a10 + a11) >> 3; }
+static unsigned bilerp13(unsigned a00, unsigned a01, unsigned a10, unsigned a11) { return (9 * a01 + 3 * (a00 + a11) + a10) >> 4; }
+
+static unsigned bilerp20(unsigned a00, unsigned a01, unsigned a10, unsigned a11) { return (a00 + a10) >> 1; }
+static unsigned bilerp21(unsigned a00, unsigned a01, unsigned a10, unsigned a11) { return (3 * (a00 + a10) + a01 + a11) >> 3; }
+static unsigned bilerp22(unsigned a00, unsigned a01, unsigned a10, unsigned a11) { return (a00 + a01 + a10 + a11) >> 2; }
+static unsigned bilerp23(unsigned a00, unsigned a01, unsigned a10, unsigned a11) { return (3 * (a01 + a11) + a00 + a10) >> 3; }
+
+static unsigned bilerp30(unsigned a00, unsigned a01, unsigned a10, unsigned a11) { return (a00 + 3 * a10) >> 2; }
+static unsigned bilerp31(unsigned a00, unsigned a01, unsigned a10, unsigned a11) { return (9 * a10 + 3 * (a00 + a11) + a01) >> 4; }
+static unsigned bilerp32(unsigned a00, unsigned a01, unsigned a10, unsigned a11) { return (3 * (a10 + a11) + a00 + a01) >> 3; }
+static unsigned bilerp33(unsigned a00, unsigned a01, unsigned a10, unsigned a11) { return (9 * a11 + 3 * (a01 + a10) + a00) >> 4; }
+
+static const SkFilterProc gBilerpProcs[4 * 4] = {
+ bilerp00, bilerp01, bilerp02, bilerp03,
+ bilerp10, bilerp11, bilerp12, bilerp13,
+ bilerp20, bilerp21, bilerp22, bilerp23,
+ bilerp30, bilerp31, bilerp32, bilerp33
+};
+
+const SkFilterProc* SkGetBilinearFilterProcTable()
+{
+ return gBilerpProcs;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+#define MASK 0xFF00FF
+#define LO_PAIR(x) ((x) & MASK)
+#define HI_PAIR(x) (((x) >> 8) & MASK)
+#define COMBINE(lo, hi) (((lo) & ~0xFF00) | (((hi) & ~0xFF00) << 8))
+
+///////////////////////////////////////////////////////////////////////////////
+
+static unsigned bilerp4_00(uint32_t c00, uint32_t c01, uint32_t c10, uint32_t c11) {
+ return c00;
+}
+static unsigned bilerp4_01(uint32_t c00, uint32_t c01, uint32_t c10, uint32_t c11) {
+ uint32_t lo = (3 * LO_PAIR(c00) + LO_PAIR(c01)) >> 2;
+ uint32_t hi = (3 * HI_PAIR(c00) + HI_PAIR(c01)) >> 2;
+ return COMBINE(lo, hi);
+}
+static unsigned bilerp4_02(uint32_t c00, uint32_t c01, uint32_t c10, uint32_t c11) {
+ uint32_t lo = (LO_PAIR(c00) + LO_PAIR(c01)) >> 1;
+ uint32_t hi = (HI_PAIR(c00) + HI_PAIR(c01)) >> 1;
+ return COMBINE(lo, hi);
+}
+static unsigned bilerp4_03(uint32_t c00, uint32_t c01, uint32_t c10, uint32_t c11) {
+ uint32_t lo = (LO_PAIR(c00) + 3 * LO_PAIR(c01)) >> 2;
+ uint32_t hi = (HI_PAIR(c00) + 3 * HI_PAIR(c01)) >> 2;
+ return COMBINE(lo, hi);
+}
+
+static unsigned bilerp4_10(uint32_t c00, uint32_t c01, uint32_t c10, uint32_t c11) {
+ uint32_t lo = (3 * LO_PAIR(c00) + LO_PAIR(c10)) >> 2;
+ uint32_t hi = (3 * HI_PAIR(c00) + HI_PAIR(c10)) >> 2;
+ return COMBINE(lo, hi);
+}
+static unsigned bilerp4_11(uint32_t c00, uint32_t c01, uint32_t c10, uint32_t c11) {
+ uint32_t lo = (9 * LO_PAIR(c00) + 3 * (LO_PAIR(c01) + LO_PAIR(c10)) + LO_PAIR(c11)) >> 4;
+ uint32_t hi = (9 * HI_PAIR(c00) + 3 * (HI_PAIR(c01) + HI_PAIR(c10)) + HI_PAIR(c11)) >> 4;
+ return COMBINE(lo, hi);
+}
+static unsigned bilerp4_12(uint32_t c00, uint32_t c01, uint32_t c10, uint32_t c11) {
+ uint32_t lo = (3 * (LO_PAIR(c00) + LO_PAIR(c01)) + LO_PAIR(c10) + LO_PAIR(c11)) >> 3;
+ uint32_t hi = (3 * (HI_PAIR(c00) + HI_PAIR(c01)) + HI_PAIR(c10) + HI_PAIR(c11)) >> 3;
+ return COMBINE(lo, hi);
+}
+static unsigned bilerp4_13(uint32_t c00, uint32_t c01, uint32_t c10, uint32_t c11) {
+ uint32_t lo = (9 * LO_PAIR(c01) + 3 * (LO_PAIR(c00) + LO_PAIR(c11)) + LO_PAIR(c10)) >> 4;
+ uint32_t hi = (9 * HI_PAIR(c01) + 3 * (HI_PAIR(c00) + HI_PAIR(c11)) + HI_PAIR(c10)) >> 4;
+ return COMBINE(lo, hi);
+}
+
+static unsigned bilerp4_20(uint32_t c00, uint32_t c01, uint32_t c10, uint32_t c11) {
+ uint32_t lo = (LO_PAIR(c00) + LO_PAIR(c10)) >> 1;
+ uint32_t hi = (HI_PAIR(c00) + HI_PAIR(c10)) >> 1;
+ return COMBINE(lo, hi);
+}
+static unsigned bilerp4_21(uint32_t c00, uint32_t c01, uint32_t c10, uint32_t c11) {
+ uint32_t lo = (3 * (LO_PAIR(c00) + LO_PAIR(c10)) + LO_PAIR(c01) + LO_PAIR(c11)) >> 3;
+ uint32_t hi = (3 * (HI_PAIR(c00) + HI_PAIR(c10)) + HI_PAIR(c01) + HI_PAIR(c11)) >> 3;
+ return COMBINE(lo, hi);
+}
+static unsigned bilerp4_22(uint32_t c00, uint32_t c01, uint32_t c10, uint32_t c11) {
+ uint32_t lo = (LO_PAIR(c00) + LO_PAIR(c01) + LO_PAIR(c10) + LO_PAIR(c11)) >> 2;
+ uint32_t hi = (HI_PAIR(c00) + HI_PAIR(c01) + HI_PAIR(c10) + HI_PAIR(c11)) >> 2;
+ return COMBINE(lo, hi);
+}
+static unsigned bilerp4_23(uint32_t c00, uint32_t c01, uint32_t c10, uint32_t c11) {
+ uint32_t lo = (3 * (LO_PAIR(c01) + LO_PAIR(c11)) + LO_PAIR(c00) + LO_PAIR(c10)) >> 3;
+ uint32_t hi = (3 * (HI_PAIR(c01) + HI_PAIR(c11)) + HI_PAIR(c00) + HI_PAIR(c10)) >> 3;
+ return COMBINE(lo, hi);
+}
+
+static unsigned bilerp4_30(uint32_t c00, uint32_t c01, uint32_t c10, uint32_t c11) {
+ uint32_t lo = (LO_PAIR(c00) + 3 * LO_PAIR(c10)) >> 2;
+ uint32_t hi = (HI_PAIR(c00) + 3 * HI_PAIR(c10)) >> 2;
+ return COMBINE(lo, hi);
+}
+static unsigned bilerp4_31(uint32_t c00, uint32_t c01, uint32_t c10, uint32_t c11) {
+ uint32_t lo = (9 * LO_PAIR(c10) + 3 * (LO_PAIR(c00) + LO_PAIR(c11)) + LO_PAIR(c01)) >> 4;
+ uint32_t hi = (9 * HI_PAIR(c10) + 3 * (HI_PAIR(c00) + HI_PAIR(c11)) + HI_PAIR(c01)) >> 4;
+ return COMBINE(lo, hi);
+}
+static unsigned bilerp4_32(uint32_t c00, uint32_t c01, uint32_t c10, uint32_t c11) {
+ uint32_t lo = (3 * (LO_PAIR(c10) + LO_PAIR(c11)) + LO_PAIR(c00) + LO_PAIR(c01)) >> 3;
+ uint32_t hi = (3 * (HI_PAIR(c10) + HI_PAIR(c11)) + HI_PAIR(c00) + HI_PAIR(c01)) >> 3;
+ return COMBINE(lo, hi);
+}
+static unsigned bilerp4_33(uint32_t c00, uint32_t c01, uint32_t c10, uint32_t c11) {
+ uint32_t lo = (9 * LO_PAIR(c11) + 3 * (LO_PAIR(c01) + LO_PAIR(c10)) + LO_PAIR(c00)) >> 4;
+ uint32_t hi = (9 * HI_PAIR(c11) + 3 * (HI_PAIR(c01) + HI_PAIR(c10)) + HI_PAIR(c00)) >> 4;
+ return COMBINE(lo, hi);
+}
+
+static const SkFilter32Proc gBilerp32Procs[4 * 4] = {
+ bilerp4_00, bilerp4_01, bilerp4_02, bilerp4_03,
+ bilerp4_10, bilerp4_11, bilerp4_12, bilerp4_13,
+ bilerp4_20, bilerp4_21, bilerp4_22, bilerp4_23,
+ bilerp4_30, bilerp4_31, bilerp4_32, bilerp4_33
+};
+
+const SkFilter32Proc* SkGetFilter32ProcTable()
+{
+ return gBilerp32Procs;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static uint32_t bilerptr00(const uint32_t* a00, const uint32_t* a01, const uint32_t* a10, const uint32_t* a11) {
+ return *a00;
+}
+static uint32_t bilerptr01(const uint32_t* a00, const uint32_t* a01, const uint32_t* a10, const uint32_t* a11) {
+ uint32_t c00 = *a00;
+ uint32_t c01 = *a01;
+ uint32_t lo = (3 * LO_PAIR(c00) + LO_PAIR(c01)) >> 2;
+ uint32_t hi = (3 * HI_PAIR(c00) + HI_PAIR(c01)) >> 2;
+ return COMBINE(lo, hi);
+}
+static uint32_t bilerptr02(const uint32_t* a00, const uint32_t* a01, const uint32_t* a10, const uint32_t* a11) {
+ uint32_t c00 = *a00;
+ uint32_t c01 = *a01;
+ uint32_t lo = (LO_PAIR(c00) + LO_PAIR(c01)) >> 1;
+ uint32_t hi = (HI_PAIR(c00) + HI_PAIR(c01)) >> 1;
+ return COMBINE(lo, hi);
+}
+static uint32_t bilerptr03(const uint32_t* a00, const uint32_t* a01, const uint32_t* a10, const uint32_t* a11) {
+ uint32_t c00 = *a00;
+ uint32_t c01 = *a01;
+ uint32_t lo = (LO_PAIR(c00) + 3 * LO_PAIR(c01)) >> 2;
+ uint32_t hi = (HI_PAIR(c00) + 3 * HI_PAIR(c01)) >> 2;
+ return COMBINE(lo, hi);
+}
+
+static uint32_t bilerptr10(const uint32_t* a00, const uint32_t* a01, const uint32_t* a10, const uint32_t* a11) {
+ uint32_t c00 = *a00;
+ uint32_t c10 = *a10;
+ uint32_t lo = (3 * LO_PAIR(c00) + LO_PAIR(c10)) >> 2;
+ uint32_t hi = (3 * HI_PAIR(c00) + HI_PAIR(c10)) >> 2;
+ return COMBINE(lo, hi);
+}
+static uint32_t bilerptr11(const uint32_t* a00, const uint32_t* a01, const uint32_t* a10, const uint32_t* a11) {
+ uint32_t c00 = *a00;
+ uint32_t c01 = *a01;
+ uint32_t c10 = *a10;
+ uint32_t c11 = *a11;
+ uint32_t lo = (9 * LO_PAIR(c00) + 3 * (LO_PAIR(c01) + LO_PAIR(c10)) + LO_PAIR(c11)) >> 4;
+ uint32_t hi = (9 * HI_PAIR(c00) + 3 * (HI_PAIR(c01) + HI_PAIR(c10)) + HI_PAIR(c11)) >> 4;
+ return COMBINE(lo, hi);
+}
+static uint32_t bilerptr12(const uint32_t* a00, const uint32_t* a01, const uint32_t* a10, const uint32_t* a11) {
+ uint32_t c00 = *a00;
+ uint32_t c01 = *a01;
+ uint32_t c10 = *a10;
+ uint32_t c11 = *a11;
+ uint32_t lo = (3 * (LO_PAIR(c00) + LO_PAIR(c01)) + LO_PAIR(c10) + LO_PAIR(c11)) >> 3;
+ uint32_t hi = (3 * (HI_PAIR(c00) + HI_PAIR(c01)) + HI_PAIR(c10) + HI_PAIR(c11)) >> 3;
+ return COMBINE(lo, hi);
+}
+static uint32_t bilerptr13(const uint32_t* a00, const uint32_t* a01, const uint32_t* a10, const uint32_t* a11) {
+ uint32_t c00 = *a00;
+ uint32_t c01 = *a01;
+ uint32_t c10 = *a10;
+ uint32_t c11 = *a11;
+ uint32_t lo = (9 * LO_PAIR(c01) + 3 * (LO_PAIR(c00) + LO_PAIR(c11)) + LO_PAIR(c10)) >> 4;
+ uint32_t hi = (9 * HI_PAIR(c01) + 3 * (HI_PAIR(c00) + HI_PAIR(c11)) + HI_PAIR(c10)) >> 4;
+ return COMBINE(lo, hi);
+}
+
+static uint32_t bilerptr20(const uint32_t* a00, const uint32_t* a01, const uint32_t* a10, const uint32_t* a11) {
+ uint32_t c00 = *a00;
+ uint32_t c10 = *a10;
+ uint32_t lo = (LO_PAIR(c00) + LO_PAIR(c10)) >> 1;
+ uint32_t hi = (HI_PAIR(c00) + HI_PAIR(c10)) >> 1;
+ return COMBINE(lo, hi);
+}
+static uint32_t bilerptr21(const uint32_t* a00, const uint32_t* a01, const uint32_t* a10, const uint32_t* a11) {
+ uint32_t c00 = *a00;
+ uint32_t c01 = *a01;
+ uint32_t c10 = *a10;
+ uint32_t c11 = *a11;
+ uint32_t lo = (3 * (LO_PAIR(c00) + LO_PAIR(c10)) + LO_PAIR(c01) + LO_PAIR(c11)) >> 3;
+ uint32_t hi = (3 * (HI_PAIR(c00) + HI_PAIR(c10)) + HI_PAIR(c01) + HI_PAIR(c11)) >> 3;
+ return COMBINE(lo, hi);
+}
+static uint32_t bilerptr22(const uint32_t* a00, const uint32_t* a01, const uint32_t* a10, const uint32_t* a11) {
+ uint32_t c00 = *a00;
+ uint32_t c01 = *a01;
+ uint32_t c10 = *a10;
+ uint32_t c11 = *a11;
+ uint32_t lo = (LO_PAIR(c00) + LO_PAIR(c01) + LO_PAIR(c10) + LO_PAIR(c11)) >> 2;
+ uint32_t hi = (HI_PAIR(c00) + HI_PAIR(c01) + HI_PAIR(c10) + HI_PAIR(c11)) >> 2;
+ return COMBINE(lo, hi);
+}
+static uint32_t bilerptr23(const uint32_t* a00, const uint32_t* a01, const uint32_t* a10, const uint32_t* a11) {
+ uint32_t c00 = *a00;
+ uint32_t c01 = *a01;
+ uint32_t c10 = *a10;
+ uint32_t c11 = *a11;
+ uint32_t lo = (3 * (LO_PAIR(c01) + LO_PAIR(c11)) + LO_PAIR(c00) + LO_PAIR(c10)) >> 3;
+ uint32_t hi = (3 * (HI_PAIR(c01) + HI_PAIR(c11)) + HI_PAIR(c00) + HI_PAIR(c10)) >> 3;
+ return COMBINE(lo, hi);
+}
+
+static uint32_t bilerptr30(const uint32_t* a00, const uint32_t* a01, const uint32_t* a10, const uint32_t* a11) {
+ uint32_t c00 = *a00;
+ uint32_t c10 = *a10;
+ uint32_t lo = (LO_PAIR(c00) + 3 * LO_PAIR(c10)) >> 2;
+ uint32_t hi = (HI_PAIR(c00) + 3 * HI_PAIR(c10)) >> 2;
+ return COMBINE(lo, hi);
+}
+static uint32_t bilerptr31(const uint32_t* a00, const uint32_t* a01, const uint32_t* a10, const uint32_t* a11) {
+ uint32_t c00 = *a00;
+ uint32_t c01 = *a01;
+ uint32_t c10 = *a10;
+ uint32_t c11 = *a11;
+ uint32_t lo = (9 * LO_PAIR(c10) + 3 * (LO_PAIR(c00) + LO_PAIR(c11)) + LO_PAIR(c01)) >> 4;
+ uint32_t hi = (9 * HI_PAIR(c10) + 3 * (HI_PAIR(c00) + HI_PAIR(c11)) + HI_PAIR(c01)) >> 4;
+ return COMBINE(lo, hi);
+}
+static uint32_t bilerptr32(const uint32_t* a00, const uint32_t* a01, const uint32_t* a10, const uint32_t* a11) {
+ uint32_t c00 = *a00;
+ uint32_t c01 = *a01;
+ uint32_t c10 = *a10;
+ uint32_t c11 = *a11;
+ uint32_t lo = (3 * (LO_PAIR(c10) + LO_PAIR(c11)) + LO_PAIR(c00) + LO_PAIR(c01)) >> 3;
+ uint32_t hi = (3 * (HI_PAIR(c10) + HI_PAIR(c11)) + HI_PAIR(c00) + HI_PAIR(c01)) >> 3;
+ return COMBINE(lo, hi);
+}
+static uint32_t bilerptr33(const uint32_t* a00, const uint32_t* a01, const uint32_t* a10, const uint32_t* a11) {
+ uint32_t c00 = *a00;
+ uint32_t c01 = *a01;
+ uint32_t c10 = *a10;
+ uint32_t c11 = *a11;
+ uint32_t lo = (9 * LO_PAIR(c11) + 3 * (LO_PAIR(c01) + LO_PAIR(c10)) + LO_PAIR(c00)) >> 4;
+ uint32_t hi = (9 * HI_PAIR(c11) + 3 * (HI_PAIR(c01) + HI_PAIR(c10)) + HI_PAIR(c00)) >> 4;
+ return COMBINE(lo, hi);
+}
+
+static const SkFilterPtrProc gBilerpPtrProcs[4 * 4] = {
+ bilerptr00, bilerptr01, bilerptr02, bilerptr03,
+ bilerptr10, bilerptr11, bilerptr12, bilerptr13,
+ bilerptr20, bilerptr21, bilerptr22, bilerptr23,
+ bilerptr30, bilerptr31, bilerptr32, bilerptr33
+};
+
+const SkFilterPtrProc* SkGetBilinearFilterPtrProcTable()
+{
+ return gBilerpPtrProcs;
+}
diff --git a/gfx/skia/skia/src/core/SkFilterProc.h b/gfx/skia/skia/src/core/SkFilterProc.h
new file mode 100644
index 000000000..7348967fd
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFilterProc.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkFilter_DEFINED
+#define SkFilter_DEFINED
+
+#include "SkMath.h"
+#include "SkFixed.h"
+
+typedef unsigned (*SkFilterProc)(unsigned x00, unsigned x01,
+ unsigned x10, unsigned x11);
+
+const SkFilterProc* SkGetBilinearFilterProcTable();
+
+inline SkFilterProc SkGetBilinearFilterProc(const SkFilterProc* table,
+ SkFixed x, SkFixed y)
+{
+ SkASSERT(table);
+
+ // convert to dot 2
+ x = (unsigned)(x << 16) >> 30;
+ y = (unsigned)(y << 16) >> 30;
+ return table[(y << 2) | x];
+}
+
+inline SkFilterProc SkGetBilinearFilterProc22(const SkFilterProc* table,
+ unsigned x, unsigned y)
+{
+ SkASSERT(table);
+
+ // extract low 2 bits
+ x = x << 30 >> 30;
+ y = y << 30 >> 30;
+ return table[(y << 2) | x];
+}
+
+inline const SkFilterProc* SkGetBilinearFilterProc22Row(const SkFilterProc* table,
+ unsigned y)
+{
+ SkASSERT(table);
+ // extract low 2 bits and shift up 2
+ return &table[y << 30 >> 28];
+}
+
+inline SkFilterProc SkGetBilinearFilterProc22RowProc(const SkFilterProc* row,
+ unsigned x)
+{
+ SkASSERT(row);
+ // extract low 2 bits
+ return row[x << 30 >> 30];
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef unsigned (*SkFilter32Proc)(uint32_t x00, uint32_t x01,
+ uint32_t x10, uint32_t x11);
+
+const SkFilter32Proc* SkGetFilter32ProcTable();
+
+inline SkFilter32Proc SkGetFilter32Proc22(const SkFilter32Proc* table,
+ unsigned x, unsigned y)
+{
+ SkASSERT(table);
+
+ // extract low 2 bits
+ x = x << 30 >> 30;
+ y = y << 30 >> 30;
+ return table[(y << 2) | x];
+}
+
+inline const SkFilter32Proc* SkGetFilter32Proc22Row(const SkFilter32Proc* table,
+ unsigned y)
+{
+ SkASSERT(table);
+ // extract low 2 bits and shift up 2
+ return &table[y << 30 >> 28];
+}
+
+inline SkFilter32Proc SkGetFilter32Proc22RowProc(const SkFilter32Proc* row,
+ unsigned x)
+{
+ SkASSERT(row);
+ // extract low 2 bits
+ return row[x << 30 >> 30];
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** Special version of SkFilterProc. This takes the address of 4 ints, and combines them a byte at a
+ time. AABBCCDD.
+*/
+typedef uint32_t (*SkFilterPtrProc)(const uint32_t*, const uint32_t*, const uint32_t*, const uint32_t*);
+
+const SkFilterPtrProc* SkGetBilinearFilterPtrProcTable();
+inline SkFilterPtrProc SkGetBilinearFilterPtrProc(const SkFilterPtrProc* table, SkFixed x, SkFixed y)
+{
+ SkASSERT(table);
+
+ // convert to dot 2
+ x = (unsigned)(x << 16) >> 30;
+ y = (unsigned)(y << 16) >> 30;
+ return table[(y << 2) | x];
+}
+
+/** Given a Y value, return a subset of the proc table for that value.
+ Pass this to SkGetBilinearFilterPtrXProc with the corresponding X value to get the
+ correct proc.
+*/
+inline const SkFilterPtrProc* SkGetBilinearFilterPtrProcYTable(const SkFilterPtrProc* table, SkFixed y)
+{
+ SkASSERT(table);
+
+ y = (unsigned)(y << 16) >> 30;
+ return table + (y << 2);
+}
+
+/** Given a subtable returned by SkGetBilinearFilterPtrProcYTable(), return the proc for the
+ specified X value.
+*/
+inline SkFilterPtrProc SkGetBilinearFilterPtrXProc(const SkFilterPtrProc* table, SkFixed x)
+{
+ SkASSERT(table);
+
+ // convert to dot 2
+ x = (unsigned)(x << 16) >> 30;
+ return table[x];
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkFindAndPlaceGlyph.h b/gfx/skia/skia/src/core/SkFindAndPlaceGlyph.h
new file mode 100644
index 000000000..257528cd0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFindAndPlaceGlyph.h
@@ -0,0 +1,736 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFindAndPositionGlyph_DEFINED
+#define SkFindAndPositionGlyph_DEFINED
+
+#include "SkAutoKern.h"
+#include "SkGlyph.h"
+#include "SkGlyphCache.h"
+#include "SkPaint.h"
+#include "SkTemplates.h"
+#include "SkUtils.h"
+#include <utility>
+
+// Calculate a type with the same size as the max of all the Ts.
+// This must be top level because the is no specialization of inner classes.
+template<typename... Ts> struct SkMaxSizeOf;
+
+template<>
+struct SkMaxSizeOf<> {
+ static const size_t value = 0;
+};
+
+template<typename H, typename... Ts>
+struct SkMaxSizeOf<H, Ts...> {
+ static const size_t value =
+ sizeof(H) >= SkMaxSizeOf<Ts...>::value ? sizeof(H) : SkMaxSizeOf<Ts...>::value;
+};
+
+
+// This is a temporary helper function to work around a bug in the code generation
+// for aarch64 (arm) on GCC 4.9. This bug does not show up on other platforms, so it
+// seems to be an aarch64 backend problem.
+//
+// GCC 4.9 on ARM64 does not generate the proper constructor code for PositionReader or
+// GlyphFindAndPlace. The vtable is not set properly without adding the fixme code.
+// The implementation is in SkDraw.cpp.
+extern void FixGCC49Arm64Bug(int v);
+
+class SkFindAndPlaceGlyph {
+public:
+ template<typename ProcessOneGlyph>
+ static void ProcessText(
+ SkPaint::TextEncoding, const char text[], size_t byteLength,
+ SkPoint offset, const SkMatrix& matrix, SkPaint::Align textAlignment,
+ SkGlyphCache* cache, ProcessOneGlyph&& processOneGlyph);
+ // ProcessPosText handles all cases for finding and positioning glyphs. It has a very large
+ // multiplicity. It figures out the glyph, position and rounding and pass those parameters to
+ // processOneGlyph.
+ //
+ // The routine processOneGlyph passed in by the client has the following signature:
+ // void f(const SkGlyph& glyph, SkPoint position, SkPoint rounding);
+ //
+ // * Sub-pixel positioning (2) - use sub-pixel positioning.
+ // * Text alignment (3) - text alignment with respect to the glyph's width.
+ // * Matrix type (3) - special cases for translation and X-coordinate scaling.
+ // * Components per position (2) - the positions vector can have a common Y with different
+ // Xs, or XY-pairs.
+ // * Axis Alignment (for sub-pixel positioning) (3) - when using sub-pixel positioning, round
+ // to a whole coordinate instead of using sub-pixel positioning.
+ // The number of variations is 108 for sub-pixel and 36 for full-pixel.
+ // This routine handles all of them using inline polymorphic variable (no heap allocation).
+ template<typename ProcessOneGlyph>
+ static void ProcessPosText(
+ SkPaint::TextEncoding, const char text[], size_t byteLength,
+ SkPoint offset, const SkMatrix& matrix, const SkScalar pos[], int scalarsPerPosition,
+ SkPaint::Align textAlignment,
+ SkGlyphCache* cache, ProcessOneGlyph&& processOneGlyph);
+
+private:
+ // UntaggedVariant is a pile of memory that can hold one of the Ts. It provides a way
+ // to initialize that memory in a typesafe way.
+ template<typename... Ts>
+ class UntaggedVariant {
+ public:
+ UntaggedVariant() { }
+
+ ~UntaggedVariant() { }
+ UntaggedVariant(const UntaggedVariant&) = delete;
+ UntaggedVariant& operator=(const UntaggedVariant&) = delete;
+ UntaggedVariant(UntaggedVariant&&) = delete;
+ UntaggedVariant& operator=(UntaggedVariant&&) = delete;
+
+ template<typename Variant, typename... Args>
+ void initialize(Args&&... args) {
+ SkASSERT(sizeof(Variant) <= sizeof(fSpace));
+ #if defined(_MSC_VER) && _MSC_VER < 1900
+ #define alignof __alignof
+ #endif
+ SkASSERT(alignof(Variant) <= alignof(Space));
+ new(&fSpace) Variant(std::forward<Args>(args)...);
+ }
+
+ private:
+ typedef SkAlignedSStorage<SkMaxSizeOf<Ts...>::value> Space;
+ Space fSpace;
+ };
+
+ // PolymorphicVariant holds subclasses of Base without slicing. Ts must be subclasses of Base.
+ template<typename Base, typename... Ts>
+ class PolymorphicVariant {
+ public:
+ typedef UntaggedVariant<Ts...> Variants;
+
+ template<typename Initializer>
+ PolymorphicVariant(Initializer&& initializer) {
+ initializer(&fVariants);
+ }
+ ~PolymorphicVariant() { get()->~Base(); }
+ Base* get() const { return reinterpret_cast<Base*>(&fVariants); }
+ Base* operator->() const { return get(); }
+ Base& operator*() const { return *get(); }
+
+ private:
+ mutable Variants fVariants;
+ };
+
+ // GlyphFinderInterface is the polymorphic base for classes that parse a stream of chars into
+ // the right UniChar (or GlyphID) and lookup up the glyph on the cache. The concrete
+ // implementations are: Utf8GlyphFinder, Utf16GlyphFinder, Utf32GlyphFinder,
+ // and GlyphIdGlyphFinder.
+ class GlyphFinderInterface {
+ public:
+ virtual ~GlyphFinderInterface() {}
+ virtual const SkGlyph& lookupGlyph(const char** text) = 0;
+ virtual const SkGlyph& lookupGlyphXY(const char** text, SkFixed x, SkFixed y) = 0;
+ };
+
+ class UtfNGlyphFinder : public GlyphFinderInterface {
+ public:
+ UtfNGlyphFinder(SkGlyphCache* cache) : fCache(cache) { SkASSERT(cache != nullptr); }
+
+ const SkGlyph& lookupGlyph(const char** text) override {
+ SkASSERT(text != nullptr);
+ return fCache->getUnicharMetrics(nextUnichar(text));
+ }
+ const SkGlyph& lookupGlyphXY(const char** text, SkFixed x, SkFixed y) override {
+ SkASSERT(text != nullptr);
+ return fCache->getUnicharMetrics(nextUnichar(text), x, y);
+ }
+
+ private:
+ virtual SkUnichar nextUnichar(const char** text) = 0;
+ SkGlyphCache* fCache;
+ };
+
+ class Utf8GlyphFinder final : public UtfNGlyphFinder {
+ public:
+ Utf8GlyphFinder(SkGlyphCache* cache) : UtfNGlyphFinder(cache) { }
+
+ private:
+ SkUnichar nextUnichar(const char** text) override { return SkUTF8_NextUnichar(text); }
+ };
+
+ class Utf16GlyphFinder final : public UtfNGlyphFinder {
+ public:
+ Utf16GlyphFinder(SkGlyphCache* cache) : UtfNGlyphFinder(cache) { }
+
+ private:
+ SkUnichar nextUnichar(const char** text) override {
+ return SkUTF16_NextUnichar((const uint16_t**)text);
+ }
+ };
+
+ class Utf32GlyphFinder final : public UtfNGlyphFinder {
+ public:
+ Utf32GlyphFinder(SkGlyphCache* cache) : UtfNGlyphFinder(cache) { }
+
+ private:
+ SkUnichar nextUnichar(const char** text) override {
+ const int32_t* ptr = *(const int32_t**)text;
+ SkUnichar uni = *ptr++;
+ *text = (const char*)ptr;
+ return uni;
+ }
+ };
+
+ class GlyphIdGlyphFinder final : public GlyphFinderInterface {
+ public:
+ GlyphIdGlyphFinder(SkGlyphCache* cache) : fCache(cache) { SkASSERT(cache != nullptr); }
+
+ const SkGlyph& lookupGlyph(const char** text) override {
+ return fCache->getGlyphIDMetrics(nextGlyphId(text));
+ }
+ const SkGlyph& lookupGlyphXY(const char** text, SkFixed x, SkFixed y) override {
+ return fCache->getGlyphIDMetrics(nextGlyphId(text), x, y);
+ }
+
+ private:
+ uint16_t nextGlyphId(const char** text) {
+ SkASSERT(text != nullptr);
+
+ const uint16_t* ptr = *(const uint16_t**)text;
+ uint16_t glyphID = *ptr;
+ ptr += 1;
+ *text = (const char*)ptr;
+ return glyphID;
+ }
+ SkGlyphCache* fCache;
+ };
+
+ typedef PolymorphicVariant<
+ GlyphFinderInterface,
+ Utf8GlyphFinder,
+ Utf16GlyphFinder,
+ Utf32GlyphFinder,
+ GlyphIdGlyphFinder> LookupGlyphVariant;
+
+ class LookupGlyph : public LookupGlyphVariant {
+ public:
+ LookupGlyph(SkPaint::TextEncoding encoding, SkGlyphCache* cache)
+ : LookupGlyphVariant(
+ [&](LookupGlyphVariant::Variants* to_init) {
+ switch(encoding) {
+ case SkPaint::kUTF8_TextEncoding:
+ to_init->initialize<Utf8GlyphFinder>(cache);
+ break;
+ case SkPaint::kUTF16_TextEncoding:
+ to_init->initialize<Utf16GlyphFinder>(cache);
+ break;
+ case SkPaint::kUTF32_TextEncoding:
+ to_init->initialize<Utf32GlyphFinder>(cache);
+ break;
+ case SkPaint::kGlyphID_TextEncoding:
+ to_init->initialize<GlyphIdGlyphFinder>(cache);
+ break;
+ }
+ }
+ ) { }
+ };
+
+ // PositionReaderInterface reads a point from the pos vector.
+ // * HorizontalPositions - assumes a common Y for many X values.
+ // * ArbitraryPositions - a list of (X,Y) pairs.
+ class PositionReaderInterface {
+ public:
+ virtual ~PositionReaderInterface() { }
+ virtual SkPoint nextPoint() = 0;
+ // This is only here to fix a GCC 4.9 aarch64 code gen bug.
+ // See comment at the top of the file.
+ virtual int forceUseForBug() = 0;
+ };
+
+ class HorizontalPositions final : public PositionReaderInterface {
+ public:
+ explicit HorizontalPositions(const SkScalar* positions)
+ : fPositions(positions) { }
+
+ SkPoint nextPoint() override {
+ SkScalar x = *fPositions++;
+ return {x, 0};
+ }
+
+ int forceUseForBug() override { return 1; }
+
+ private:
+ const SkScalar* fPositions;
+ };
+
+ class ArbitraryPositions final : public PositionReaderInterface {
+ public:
+ explicit ArbitraryPositions(const SkScalar* positions)
+ : fPositions(positions) { }
+
+ SkPoint nextPoint() override {
+ SkPoint to_return{fPositions[0], fPositions[1]};
+ fPositions += 2;
+ return to_return;
+ }
+
+ int forceUseForBug() override { return 2; }
+
+ private:
+ const SkScalar* fPositions;
+ };
+
+ typedef PolymorphicVariant<PositionReaderInterface, HorizontalPositions, ArbitraryPositions>
+ PositionReader;
+
+ // MapperInterface given a point map it through the matrix. There are several shortcut
+ // variants.
+ // * TranslationMapper - assumes a translation only matrix.
+ // * XScaleMapper - assumes an X scaling and a translation.
+ // * GeneralMapper - Does all other matricies.
+ class MapperInterface {
+ public:
+ virtual ~MapperInterface() { }
+
+ virtual SkPoint map(SkPoint position) const = 0;
+ };
+
+ class TranslationMapper final : public MapperInterface {
+ public:
+ TranslationMapper(const SkMatrix& matrix, const SkPoint origin)
+ : fTranslate(matrix.mapXY(origin.fX, origin.fY)) { }
+
+ SkPoint map(SkPoint position) const override {
+ return position + fTranslate;
+ }
+
+ private:
+ const SkPoint fTranslate;
+ };
+
+ class XScaleMapper final : public MapperInterface {
+ public:
+ XScaleMapper(const SkMatrix& matrix, const SkPoint origin)
+ : fTranslate(matrix.mapXY(origin.fX, origin.fY)), fXScale(matrix.getScaleX()) { }
+
+ SkPoint map(SkPoint position) const override {
+ return {fXScale * position.fX + fTranslate.fX, fTranslate.fY};
+ }
+
+ private:
+ const SkPoint fTranslate;
+ const SkScalar fXScale;
+ };
+
+ // The caller must keep matrix alive while this class is used.
+ class GeneralMapper final : public MapperInterface {
+ public:
+ GeneralMapper(const SkMatrix& matrix, const SkPoint origin)
+ : fOrigin(origin), fMatrix(matrix), fMapProc(matrix.getMapXYProc()) { }
+
+ SkPoint map(SkPoint position) const override {
+ SkPoint result;
+ fMapProc(fMatrix, position.fX + fOrigin.fX, position.fY + fOrigin.fY, &result);
+ return result;
+ }
+
+ private:
+ const SkPoint fOrigin;
+ const SkMatrix& fMatrix;
+ const SkMatrix::MapXYProc fMapProc;
+ };
+
+ typedef PolymorphicVariant<
+ MapperInterface, TranslationMapper, XScaleMapper, GeneralMapper> Mapper;
+
+ // TextAlignmentAdjustment handles shifting the glyph based on its width.
+ static SkPoint TextAlignmentAdjustment(SkPaint::Align textAlignment, const SkGlyph& glyph) {
+ switch (textAlignment) {
+ case SkPaint::kLeft_Align:
+ return {0.0f, 0.0f};
+ case SkPaint::kCenter_Align:
+ return {SkFloatToScalar(glyph.fAdvanceX) / 2,
+ SkFloatToScalar(glyph.fAdvanceY) / 2};
+ case SkPaint::kRight_Align:
+ return {SkFloatToScalar(glyph.fAdvanceX),
+ SkFloatToScalar(glyph.fAdvanceY)};
+ }
+ // Even though the entire enum is covered above, MVSC doesn't think so. Make it happy.
+ SkFAIL("Should never get here.");
+ return {0.0f, 0.0f};
+ }
+
+ // The "call" to SkFixedToScalar is actually a macro. It's macros all the way down.
+ // Needs to be a macro because you can't have a const float unless you make it constexpr.
+ #define kSubpixelRounding (SkFixedToScalar(SkGlyph::kSubpixelRound))
+
+ // The SubpixelPositionRounding function returns a point suitable for rounding a sub-pixel
+ // positioned glyph.
+ static SkPoint SubpixelPositionRounding(SkAxisAlignment axisAlignment) {
+ switch (axisAlignment) {
+ case kX_SkAxisAlignment:
+ return {kSubpixelRounding, SK_ScalarHalf};
+ case kY_SkAxisAlignment:
+ return {SK_ScalarHalf, kSubpixelRounding};
+ case kNone_SkAxisAlignment:
+ return {kSubpixelRounding, kSubpixelRounding};
+ }
+ SkFAIL("Should not get here.");
+ return {0.0f, 0.0f};
+ }
+
+ // The SubpixelAlignment function produces a suitable position for the glyph cache to
+ // produce the correct sub-pixel alignment. If a position is aligned with an axis a shortcut
+ // of 0 is used for the sub-pixel position.
+ static SkIPoint SubpixelAlignment(SkAxisAlignment axisAlignment, SkPoint position) {
+ // Only the fractional part of position.fX and position.fY matter, because the result of
+ // this function will just be passed to FixedToSub.
+ switch (axisAlignment) {
+ case kX_SkAxisAlignment:
+ return {SkScalarToFixed(SkScalarFraction(position.fX) + kSubpixelRounding), 0};
+ case kY_SkAxisAlignment:
+ return {0, SkScalarToFixed(SkScalarFraction(position.fY) + kSubpixelRounding)};
+ case kNone_SkAxisAlignment:
+ return {SkScalarToFixed(SkScalarFraction(position.fX) + kSubpixelRounding),
+ SkScalarToFixed(SkScalarFraction(position.fY) + kSubpixelRounding)};
+ }
+ SkFAIL("Should not get here.");
+ return {0, 0};
+ }
+
+ #undef kSubpixelRounding
+
+ // GlyphFindAndPlaceInterface given the text and position finds the correct glyph and does
+ // glyph specific position adjustment. The findAndPositionGlyph method takes text and
+ // position and calls processOneGlyph with the correct glyph, final position and rounding
+ // terms. The final position is not rounded yet and is the responsibility of processOneGlyph.
+ template<typename ProcessOneGlyph>
+ class GlyphFindAndPlaceInterface : SkNoncopyable {
+ public:
+ virtual ~GlyphFindAndPlaceInterface() { }
+
+ // findAndPositionGlyph calculates the position of the glyph, finds the glyph, and
+ // returns the position of where the next glyph will be using the glyph's advance and
+ // possibly kerning. The returned position is used by drawText, but ignored by drawPosText.
+ // The compiler should prune all this calculation if the return value is not used.
+ //
+ // This should be a pure virtual, but some versions of GCC <= 4.8 have a bug that causes a
+ // compile error.
+ // See GCC bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=60277
+ virtual SkPoint findAndPositionGlyph(
+ const char** text, SkPoint position, ProcessOneGlyph&& processOneGlyph) {
+ SkFAIL("Should never get here.");
+ return {0.0f, 0.0f};
+ }
+ };
+
+ // GlyphFindAndPlaceSubpixel handles finding and placing glyphs when sub-pixel positioning is
+ // requested. After it has found and placed the glyph it calls the templated function
+ // ProcessOneGlyph in order to actually perform an action.
+ template<typename ProcessOneGlyph, SkPaint::Align kTextAlignment,
+ SkAxisAlignment kAxisAlignment>
+ class GlyphFindAndPlaceSubpixel final : public GlyphFindAndPlaceInterface<ProcessOneGlyph> {
+ public:
+ GlyphFindAndPlaceSubpixel(LookupGlyph& glyphFinder)
+ : fGlyphFinder(glyphFinder) {
+ FixGCC49Arm64Bug(1);
+ }
+
+ SkPoint findAndPositionGlyph(
+ const char** text, SkPoint position, ProcessOneGlyph&& processOneGlyph) override {
+ SkPoint finalPosition = position;
+ if (kTextAlignment != SkPaint::kLeft_Align) {
+ // Get the width of an un-sub-pixel positioned glyph for calculating the
+ // alignment. This is not needed for kLeftAlign because its adjustment is
+ // always {0, 0}.
+ const char* tempText = *text;
+ const SkGlyph &metricGlyph = fGlyphFinder->lookupGlyph(&tempText);
+
+ if (metricGlyph.fWidth <= 0) {
+ // Exiting early, be sure to update text pointer.
+ *text = tempText;
+ return finalPosition + SkPoint{SkFloatToScalar(metricGlyph.fAdvanceX),
+ SkFloatToScalar(metricGlyph.fAdvanceY)};
+ }
+
+ // Adjust the final position by the alignment adjustment.
+ finalPosition -= TextAlignmentAdjustment(kTextAlignment, metricGlyph);
+ }
+
+ // Find the glyph.
+ SkIPoint lookupPosition = SubpixelAlignment(kAxisAlignment, finalPosition);
+ const SkGlyph& renderGlyph =
+ fGlyphFinder->lookupGlyphXY(text, lookupPosition.fX, lookupPosition.fY);
+
+ // If the glyph has no width (no pixels) then don't bother processing it.
+ if (renderGlyph.fWidth > 0) {
+ processOneGlyph(renderGlyph, finalPosition,
+ SubpixelPositionRounding(kAxisAlignment));
+ }
+ return finalPosition + SkPoint{SkFloatToScalar(renderGlyph.fAdvanceX),
+ SkFloatToScalar(renderGlyph.fAdvanceY)};
+ }
+
+ private:
+ LookupGlyph& fGlyphFinder;
+ };
+
+ enum SelectKerning {
+ kNoKerning = false,
+ kUseKerning = true
+ };
+
+ // GlyphFindAndPlaceFullPixel handles finding and placing glyphs when no sub-pixel
+ // positioning is requested. The kUseKerning argument should be true for drawText, and false
+ // for drawPosText.
+ template<typename ProcessOneGlyph, SkPaint::Align kTextAlignment, SelectKerning kUseKerning>
+ class GlyphFindAndPlaceFullPixel final : public GlyphFindAndPlaceInterface<ProcessOneGlyph> {
+ public:
+ GlyphFindAndPlaceFullPixel(LookupGlyph& glyphFinder)
+ : fGlyphFinder(glyphFinder) {
+ FixGCC49Arm64Bug(2);
+ // Kerning can only be used with SkPaint::kLeft_Align
+ static_assert(!kUseKerning || SkPaint::kLeft_Align == kTextAlignment,
+ "Kerning can only be used with left aligned text.");
+ }
+
+ SkPoint findAndPositionGlyph(
+ const char** text, SkPoint position, ProcessOneGlyph&& processOneGlyph) override {
+ SkPoint finalPosition = position;
+ const SkGlyph& glyph = fGlyphFinder->lookupGlyph(text);
+ if (kUseKerning) {
+ finalPosition += {fAutoKern.adjust(glyph), 0.0f};
+ }
+ if (glyph.fWidth > 0) {
+ finalPosition -= TextAlignmentAdjustment(kTextAlignment, glyph);
+ processOneGlyph(glyph, finalPosition, {SK_ScalarHalf, SK_ScalarHalf});
+ }
+ return finalPosition + SkPoint{SkFloatToScalar(glyph.fAdvanceX),
+ SkFloatToScalar(glyph.fAdvanceY)};
+ }
+
+ private:
+ LookupGlyph& fGlyphFinder;
+
+ SkAutoKern fAutoKern;
+ };
+
+ // GlyphFindAndPlace is a large variant that encapsulates the multiple types of finding and
+ // placing a glyph. There are three factors that go into the different factors.
+ // * Is sub-pixel positioned - a boolean that says whether to use sub-pixel positioning.
+ // * Text alignment - indicates if the glyph should be placed to the right, centered or left
+ // of a given position.
+ // * Axis alignment - indicates if the glyphs final sub-pixel position should be rounded to a
+ // whole pixel if the glyph is aligned with an axis. This is only used for sub-pixel
+ // positioning and allows the baseline to look crisp.
+ template<typename ProcessOneGlyph>
+ using GlyphFindAndPlace = PolymorphicVariant<
+ GlyphFindAndPlaceInterface<ProcessOneGlyph>,
+ // Subpixel
+ GlyphFindAndPlaceSubpixel<ProcessOneGlyph, SkPaint::kLeft_Align, kNone_SkAxisAlignment>,
+ GlyphFindAndPlaceSubpixel<ProcessOneGlyph, SkPaint::kLeft_Align, kX_SkAxisAlignment >,
+ GlyphFindAndPlaceSubpixel<ProcessOneGlyph, SkPaint::kLeft_Align, kY_SkAxisAlignment >,
+ GlyphFindAndPlaceSubpixel<ProcessOneGlyph, SkPaint::kCenter_Align, kNone_SkAxisAlignment>,
+ GlyphFindAndPlaceSubpixel<ProcessOneGlyph, SkPaint::kCenter_Align, kX_SkAxisAlignment >,
+ GlyphFindAndPlaceSubpixel<ProcessOneGlyph, SkPaint::kCenter_Align, kY_SkAxisAlignment >,
+ GlyphFindAndPlaceSubpixel<ProcessOneGlyph, SkPaint::kRight_Align, kNone_SkAxisAlignment>,
+ GlyphFindAndPlaceSubpixel<ProcessOneGlyph, SkPaint::kRight_Align, kX_SkAxisAlignment >,
+ GlyphFindAndPlaceSubpixel<ProcessOneGlyph, SkPaint::kRight_Align, kY_SkAxisAlignment >,
+ // Full pixel
+ GlyphFindAndPlaceFullPixel<ProcessOneGlyph, SkPaint::kLeft_Align, kNoKerning>,
+ GlyphFindAndPlaceFullPixel<ProcessOneGlyph, SkPaint::kCenter_Align, kNoKerning>,
+ GlyphFindAndPlaceFullPixel<ProcessOneGlyph, SkPaint::kRight_Align, kNoKerning>
+ >;
+
+ // InitSubpixel is a helper function for initializing all the variants of
+ // GlyphFindAndPlaceSubpixel.
+ template<typename ProcessOneGlyph, SkPaint::Align kTextAlignment>
+ static void InitSubpixel(
+ typename GlyphFindAndPlace<ProcessOneGlyph>::Variants* to_init,
+ SkAxisAlignment axisAlignment,
+ LookupGlyph& glyphFinder) {
+ switch (axisAlignment) {
+ case kX_SkAxisAlignment:
+ to_init->template initialize<GlyphFindAndPlaceSubpixel<
+ ProcessOneGlyph, kTextAlignment, kX_SkAxisAlignment>>(glyphFinder);
+ break;
+ case kNone_SkAxisAlignment:
+ to_init->template initialize<GlyphFindAndPlaceSubpixel<
+ ProcessOneGlyph, kTextAlignment, kNone_SkAxisAlignment>>(glyphFinder);
+ break;
+ case kY_SkAxisAlignment:
+ to_init->template initialize<GlyphFindAndPlaceSubpixel<
+ ProcessOneGlyph, kTextAlignment, kY_SkAxisAlignment>>(glyphFinder);
+ break;
+ }
+ }
+
+ static SkPoint MeasureText(LookupGlyph& glyphFinder, const char text[], size_t byteLength) {
+ SkScalar x = 0, y = 0;
+ const char* stop = text + byteLength;
+
+ SkAutoKern autokern;
+
+ while (text < stop) {
+ // don't need x, y here, since all subpixel variants will have the
+ // same advance
+ const SkGlyph& glyph = glyphFinder->lookupGlyph(&text);
+
+ x += autokern.adjust(glyph) + SkFloatToScalar(glyph.fAdvanceX);
+ y += SkFloatToScalar(glyph.fAdvanceY);
+ }
+ SkASSERT(text == stop);
+ return {x, y};
+ }
+};
+
+template<typename ProcessOneGlyph>
+inline void SkFindAndPlaceGlyph::ProcessPosText(
+ SkPaint::TextEncoding textEncoding, const char text[], size_t byteLength,
+ SkPoint offset, const SkMatrix& matrix, const SkScalar pos[], int scalarsPerPosition,
+ SkPaint::Align textAlignment,
+ SkGlyphCache* cache, ProcessOneGlyph&& processOneGlyph) {
+
+ SkAxisAlignment axisAlignment = cache->getScalerContext()->computeAxisAlignmentForHText();
+ uint32_t mtype = matrix.getType();
+ LookupGlyph glyphFinder(textEncoding, cache);
+
+ // Specialized code for handling the most common case for blink. The while loop is totally
+ // de-virtualized.
+ if (scalarsPerPosition == 1
+ && textAlignment == SkPaint::kLeft_Align
+ && axisAlignment == kX_SkAxisAlignment
+ && cache->isSubpixel()
+ && mtype <= SkMatrix::kTranslate_Mask) {
+ typedef GlyphFindAndPlaceSubpixel<
+ ProcessOneGlyph, SkPaint::kLeft_Align, kX_SkAxisAlignment> Positioner;
+ HorizontalPositions positions{pos};
+ TranslationMapper mapper{matrix, offset};
+ Positioner positioner(glyphFinder);
+ const char* cursor = text;
+ const char* stop = text + byteLength;
+ while (cursor < stop) {
+ SkPoint mappedPoint = mapper.TranslationMapper::map(
+ positions.HorizontalPositions::nextPoint());
+ positioner.Positioner::findAndPositionGlyph(
+ &cursor, mappedPoint, std::forward<ProcessOneGlyph>(processOneGlyph));
+ }
+ return;
+ }
+
+ PositionReader positionReader{
+ [&](PositionReader::Variants* to_init) {
+ if (2 == scalarsPerPosition) {
+ to_init->initialize<ArbitraryPositions>(pos);
+ } else {
+ to_init->initialize<HorizontalPositions>(pos);
+ }
+ positionReader->forceUseForBug();
+ }
+ };
+
+ Mapper mapper{
+ [&](Mapper::Variants* to_init) {
+ if (mtype & (SkMatrix::kAffine_Mask | SkMatrix::kPerspective_Mask)
+ || scalarsPerPosition == 2) {
+ to_init->initialize<GeneralMapper>(matrix, offset);
+ } else if (mtype & SkMatrix::kScale_Mask) {
+ to_init->initialize<XScaleMapper>(matrix, offset);
+ } else {
+ to_init->initialize<TranslationMapper>(matrix, offset);
+ }
+ }
+ };
+
+ GlyphFindAndPlace<ProcessOneGlyph> findAndPosition {
+ [&](typename GlyphFindAndPlace<ProcessOneGlyph>::Variants* to_init) {
+ if (cache->isSubpixel()) {
+ switch (textAlignment) {
+ case SkPaint::kLeft_Align:
+ InitSubpixel<ProcessOneGlyph, SkPaint::kLeft_Align>(
+ to_init, axisAlignment, glyphFinder);
+ break;
+ case SkPaint::kCenter_Align:
+ InitSubpixel<ProcessOneGlyph, SkPaint::kCenter_Align>(
+ to_init, axisAlignment, glyphFinder);
+ break;
+ case SkPaint::kRight_Align:
+ InitSubpixel<ProcessOneGlyph, SkPaint::kRight_Align>(
+ to_init, axisAlignment, glyphFinder);
+ break;
+ }
+ } else {
+ switch (textAlignment) {
+ case SkPaint::kLeft_Align:
+ to_init->template initialize<
+ GlyphFindAndPlaceFullPixel<ProcessOneGlyph,
+ SkPaint::kLeft_Align, kNoKerning>>(glyphFinder);
+ break;
+ case SkPaint::kCenter_Align:
+ to_init->template initialize<
+ GlyphFindAndPlaceFullPixel<ProcessOneGlyph,
+ SkPaint::kCenter_Align, kNoKerning>>(glyphFinder);
+ break;
+ case SkPaint::kRight_Align:
+ to_init->template initialize<
+ GlyphFindAndPlaceFullPixel<ProcessOneGlyph,
+ SkPaint::kRight_Align, kNoKerning>>(glyphFinder);
+ break;
+ }
+ }
+ }
+ };
+
+ const char* stop = text + byteLength;
+ while (text < stop) {
+ SkPoint mappedPoint = mapper->map(positionReader->nextPoint());
+ findAndPosition->findAndPositionGlyph(
+ &text, mappedPoint, std::forward<ProcessOneGlyph>(processOneGlyph));
+ }
+}
+
+template<typename ProcessOneGlyph>
+inline void SkFindAndPlaceGlyph::ProcessText(
+ SkPaint::TextEncoding textEncoding, const char text[], size_t byteLength,
+ SkPoint offset, const SkMatrix& matrix, SkPaint::Align textAlignment,
+ SkGlyphCache* cache, ProcessOneGlyph&& processOneGlyph) {
+
+ // transform the starting point
+ matrix.mapPoints(&offset, 1);
+
+ LookupGlyph glyphFinder(textEncoding, cache);
+
+ // need to measure first
+ if (textAlignment != SkPaint::kLeft_Align) {
+ SkVector stop = MeasureText(glyphFinder, text, byteLength);
+
+ if (textAlignment == SkPaint::kCenter_Align) {
+ stop *= SK_ScalarHalf;
+ }
+ offset -= stop;
+ }
+
+ GlyphFindAndPlace<ProcessOneGlyph> findAndPosition{
+ [&](typename GlyphFindAndPlace<ProcessOneGlyph>::Variants* to_init) {
+ if (cache->isSubpixel()) {
+ SkAxisAlignment axisAlignment =
+ cache->getScalerContext()->computeAxisAlignmentForHText();
+ InitSubpixel<ProcessOneGlyph, SkPaint::kLeft_Align>(
+ to_init, axisAlignment, glyphFinder);
+ } else {
+ to_init->template initialize<
+ GlyphFindAndPlaceFullPixel<
+ ProcessOneGlyph, SkPaint::kLeft_Align, kUseKerning>>(glyphFinder);
+ }
+ }
+ };
+
+ const char* stop = text + byteLength;
+ SkPoint current = offset;
+ while (text < stop) {
+ current =
+ findAndPosition->findAndPositionGlyph(
+ &text, current, std::forward<ProcessOneGlyph>(processOneGlyph));
+
+ }
+}
+
+#endif // SkFindAndPositionGlyph_DEFINED
diff --git a/gfx/skia/skia/src/core/SkFlattenable.cpp b/gfx/skia/skia/src/core/SkFlattenable.cpp
new file mode 100644
index 000000000..c759a1ab3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFlattenable.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFlattenable.h"
+#include "SkPtrRecorder.h"
+#include "SkReadBuffer.h"
+
+SkNamedFactorySet::SkNamedFactorySet() : fNextAddedFactory(0) {}
+
+uint32_t SkNamedFactorySet::find(SkFlattenable::Factory factory) {
+ uint32_t index = fFactorySet.find(factory);
+ if (index > 0) {
+ return index;
+ }
+ const char* name = SkFlattenable::FactoryToName(factory);
+ if (nullptr == name) {
+ return 0;
+ }
+ *fNames.append() = name;
+ return fFactorySet.add(factory);
+}
+
+const char* SkNamedFactorySet::getNextAddedFactoryName() {
+ if (fNextAddedFactory < fNames.count()) {
+ return fNames[fNextAddedFactory++];
+ }
+ return nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkRefCntSet::~SkRefCntSet() {
+ // call this now, while our decPtr() is sill in scope
+ this->reset();
+}
+
+void SkRefCntSet::incPtr(void* ptr) {
+ ((SkRefCnt*)ptr)->ref();
+}
+
+void SkRefCntSet::decPtr(void* ptr) {
+ ((SkRefCnt*)ptr)->unref();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define MAX_ENTRY_COUNT 1024
+
+struct Entry {
+ const char* fName;
+ SkFlattenable::Factory fFactory;
+ SkFlattenable::Type fType;
+};
+
+static int gCount = 0;
+static Entry gEntries[MAX_ENTRY_COUNT];
+
+void SkFlattenable::Register(const char name[], Factory factory, SkFlattenable::Type type) {
+ SkASSERT(name);
+ SkASSERT(factory);
+ SkASSERT(gCount < MAX_ENTRY_COUNT);
+
+ gEntries[gCount].fName = name;
+ gEntries[gCount].fFactory = factory;
+ gEntries[gCount].fType = type;
+ gCount += 1;
+}
+
+#ifdef SK_DEBUG
+static void report_no_entries(const char* functionName) {
+ if (!gCount) {
+ SkDebugf("%s has no registered name/factory/type entries."
+ " Call SkFlattenable::InitializeFlattenablesIfNeeded() before using gEntries",
+ functionName);
+ }
+}
+#endif
+
+SkFlattenable::Factory SkFlattenable::NameToFactory(const char name[]) {
+ InitializeFlattenablesIfNeeded();
+#ifdef SK_DEBUG
+ report_no_entries(__FUNCTION__);
+#endif
+ const Entry* entries = gEntries;
+ for (int i = gCount - 1; i >= 0; --i) {
+ if (strcmp(entries[i].fName, name) == 0) {
+ return entries[i].fFactory;
+ }
+ }
+ return nullptr;
+}
+
+bool SkFlattenable::NameToType(const char name[], SkFlattenable::Type* type) {
+ SkASSERT(type);
+ InitializeFlattenablesIfNeeded();
+#ifdef SK_DEBUG
+ report_no_entries(__FUNCTION__);
+#endif
+ const Entry* entries = gEntries;
+ for (int i = gCount - 1; i >= 0; --i) {
+ if (strcmp(entries[i].fName, name) == 0) {
+ *type = entries[i].fType;
+ return true;
+ }
+ }
+ return false;
+}
+
+const char* SkFlattenable::FactoryToName(Factory fact) {
+ InitializeFlattenablesIfNeeded();
+#ifdef SK_DEBUG
+ report_no_entries(__FUNCTION__);
+#endif
+ const Entry* entries = gEntries;
+ for (int i = gCount - 1; i >= 0; --i) {
+ if (entries[i].fFactory == fact) {
+ return entries[i].fName;
+ }
+ }
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkFlattenableSerialization.cpp b/gfx/skia/skia/src/core/SkFlattenableSerialization.cpp
new file mode 100644
index 000000000..704526673
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFlattenableSerialization.cpp
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFlattenableSerialization.h"
+
+#include "SkData.h"
+#include "SkValidatingReadBuffer.h"
+#include "SkWriteBuffer.h"
+
+SkData* SkValidatingSerializeFlattenable(SkFlattenable* flattenable) {
+ SkBinaryWriteBuffer writer;
+ writer.writeFlattenable(flattenable);
+ size_t size = writer.bytesWritten();
+ auto data = SkData::MakeUninitialized(size);
+ writer.writeToMemory(data->writable_data());
+ return data.release();
+}
+
+SkFlattenable* SkValidatingDeserializeFlattenable(const void* data, size_t size,
+ SkFlattenable::Type type) {
+ SkValidatingReadBuffer buffer(data, size);
+ return buffer.readFlattenable(type);
+}
diff --git a/gfx/skia/skia/src/core/SkFont.cpp b/gfx/skia/skia/src/core/SkFont.cpp
new file mode 100644
index 000000000..aa7fe366c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFont.cpp
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFont.h"
+#include "SkTypeface.h"
+#include "SkUtils.h"
+
+SkFont::SkFont(sk_sp<SkTypeface> face, SkScalar size, SkScalar scaleX, SkScalar skewX, MaskType mt,
+ uint32_t flags)
+ : fTypeface(face ? std::move(face) : SkTypeface::MakeDefault())
+ , fSize(size)
+ , fScaleX(scaleX)
+ , fSkewX(skewX)
+ , fFlags(flags)
+ , fMaskType(SkToU8(mt))
+{
+ SkASSERT(size > 0);
+ SkASSERT(scaleX > 0);
+ SkASSERT(SkScalarIsFinite(skewX));
+ SkASSERT(0 == (flags & ~kAllFlags));
+}
+
+sk_sp<SkFont> SkFont::Make(sk_sp<SkTypeface> face, SkScalar size, SkScalar scaleX, SkScalar skewX,
+ MaskType mt, uint32_t flags) {
+ if (size <= 0 || !SkScalarIsFinite(size)) {
+ return nullptr;
+ }
+ if (scaleX <= 0 || !SkScalarIsFinite(scaleX)) {
+ return nullptr;
+ }
+ if (!SkScalarIsFinite(skewX)) {
+ return nullptr;
+ }
+ flags &= kAllFlags;
+ return sk_sp<SkFont>(new SkFont(std::move(face), size, scaleX, skewX, mt, flags));
+}
+
+sk_sp<SkFont> SkFont::Make(sk_sp<SkTypeface> face, SkScalar size, MaskType mt, uint32_t flags) {
+ return SkFont::Make(std::move(face), size, 1, 0, mt, flags);
+}
+
+sk_sp<SkFont> SkFont::makeWithSize(SkScalar newSize) const {
+ return SkFont::Make(sk_ref_sp(this->getTypeface()), newSize, this->getScaleX(),
+ this->getSkewX(), this->getMaskType(), this->getFlags());
+}
+
+sk_sp<SkFont> SkFont::makeWithFlags(uint32_t newFlags) const {
+ return SkFont::Make(sk_ref_sp(this->getTypeface()), this->getSize(), this->getScaleX(),
+ this->getSkewX(), this->getMaskType(), newFlags);
+}
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+int SkFont::textToGlyphs(const void* text, size_t byteLength, SkTextEncoding encoding,
+ uint16_t glyphs[], int maxGlyphCount) const {
+ if (0 == byteLength) {
+ return 0;
+ }
+
+ SkASSERT(text);
+
+ int count = 0; // fix uninitialized warning (even though the switch is complete!)
+
+ switch (encoding) {
+ case kUTF8_SkTextEncoding:
+ count = SkUTF8_CountUnichars((const char*)text, byteLength);
+ break;
+ case kUTF16_SkTextEncoding:
+ count = SkUTF16_CountUnichars((const uint16_t*)text, SkToInt(byteLength >> 1));
+ break;
+ case kUTF32_SkTextEncoding:
+ count = SkToInt(byteLength >> 2);
+ break;
+ case kGlyphID_SkTextEncoding:
+ count = SkToInt(byteLength >> 1);
+ break;
+ }
+ if (!glyphs) {
+ return count;
+ }
+
+ // TODO: unify/eliminate SkTypeface::Encoding with SkTextEncoding
+ SkTypeface::Encoding typefaceEncoding;
+ switch (encoding) {
+ case kUTF8_SkTextEncoding:
+ typefaceEncoding = SkTypeface::kUTF8_Encoding;
+ break;
+ case kUTF16_SkTextEncoding:
+ typefaceEncoding = SkTypeface::kUTF16_Encoding;
+ break;
+ case kUTF32_SkTextEncoding:
+ typefaceEncoding = SkTypeface::kUTF32_Encoding;
+ break;
+ default:
+ SkASSERT(kGlyphID_SkTextEncoding == encoding);
+ // we can early exit, since we already have glyphIDs
+ memcpy(glyphs, text, count << 1);
+ return count;
+ }
+
+ (void)fTypeface->charsToGlyphs(text, typefaceEncoding, glyphs, count);
+ return count;
+}
+
+SkScalar SkFont::measureText(const void* text, size_t byteLength, SkTextEncoding encoding) const {
+ // TODO: need access to the cache
+ return -1;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "SkPaint.h"
+
+sk_sp<SkFont> SkFont::Testing_CreateFromPaint(const SkPaint& paint) {
+ uint32_t flags = 0;
+ if (paint.isVerticalText()) {
+ flags |= kVertical_Flag;
+ }
+ if (paint.isEmbeddedBitmapText()) {
+ flags |= kEmbeddedBitmaps_Flag;
+ }
+ if (paint.getFlags() & SkPaint::kGenA8FromLCD_Flag) {
+ flags |= kGenA8FromLCD_Flag;
+ }
+ if (paint.isFakeBoldText()) {
+ flags |= kEmbolden_Flag;
+ }
+
+ if (SkPaint::kFull_Hinting == paint.getHinting()) {
+ flags |= kEnableByteCodeHints_Flag;
+ }
+ if (paint.isAutohinted()) {
+ flags |= kEnableAutoHints_Flag;
+ }
+ if (paint.isSubpixelText() || paint.isLinearText()) {
+ // this is our default
+ } else {
+ flags |= kUseNonlinearMetrics_Flag;
+ }
+
+ MaskType maskType = SkFont::kBW_MaskType;
+ if (paint.isAntiAlias()) {
+ maskType = paint.isLCDRenderText() ? kLCD_MaskType : kA8_MaskType;
+ }
+
+ return Make(sk_ref_sp(paint.getTypeface()), paint.getTextSize(), paint.getTextScaleX(),
+ paint.getTextSkewX(), maskType, flags);
+}
diff --git a/gfx/skia/skia/src/core/SkFontDescriptor.cpp b/gfx/skia/skia/src/core/SkFontDescriptor.cpp
new file mode 100644
index 000000000..73ea2058c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontDescriptor.cpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFontDescriptor.h"
+#include "SkMakeUnique.h"
+#include "SkStream.h"
+#include "SkData.h"
+
+enum {
+ // these must match the sfnt 'name' enums
+ kFontFamilyName = 0x01,
+ kFullName = 0x04,
+ kPostscriptName = 0x06,
+
+ // These count backwards from 0xFF, so as not to collide with the SFNT
+ // defines for names in its 'name' table.
+ kFontAxes = 0xFC,
+ kFontIndex = 0xFD,
+ kFontFileName = 0xFE, // Remove when MIN_PICTURE_VERSION > 41
+ kSentinel = 0xFF,
+};
+
+SkFontDescriptor::SkFontDescriptor() { }
+
+static void read_string(SkStream* stream, SkString* string) {
+ const uint32_t length = SkToU32(stream->readPackedUInt());
+ if (length > 0) {
+ string->resize(length);
+ stream->read(string->writable_str(), length);
+ }
+}
+
+// Remove when MIN_PICTURE_VERSION > 41
+static void skip_string(SkStream* stream) {
+ const uint32_t length = SkToU32(stream->readPackedUInt());
+ if (length > 0) {
+ stream->skip(length);
+ }
+}
+
+static void write_string(SkWStream* stream, const SkString& string, uint32_t id) {
+ if (!string.isEmpty()) {
+ stream->writePackedUInt(id);
+ stream->writePackedUInt(string.size());
+ stream->write(string.c_str(), string.size());
+ }
+}
+
+static size_t read_uint(SkStream* stream) {
+ return stream->readPackedUInt();
+}
+
+static void write_uint(SkWStream* stream, size_t n, uint32_t id) {
+ stream->writePackedUInt(id);
+ stream->writePackedUInt(n);
+}
+
+bool SkFontDescriptor::Deserialize(SkStream* stream, SkFontDescriptor* result) {
+ size_t styleBits = stream->readPackedUInt();
+ if (styleBits <= 2) {
+ // Remove this branch when MIN_PICTURE_VERSION > 45
+ result->fStyle = SkFontStyle::FromOldStyle(styleBits);
+ } else {
+ result->fStyle = SkFontStyle((styleBits >> 16) & 0xFFFF,
+ (styleBits >> 8 ) & 0xFF,
+ static_cast<SkFontStyle::Slant>(styleBits & 0xFF));
+ }
+
+ SkAutoSTMalloc<4, SkFixed> axis;
+ size_t axisCount = 0;
+ size_t index = 0;
+ for (size_t id; (id = stream->readPackedUInt()) != kSentinel;) {
+ switch (id) {
+ case kFontFamilyName:
+ read_string(stream, &result->fFamilyName);
+ break;
+ case kFullName:
+ read_string(stream, &result->fFullName);
+ break;
+ case kPostscriptName:
+ read_string(stream, &result->fPostscriptName);
+ break;
+ case kFontAxes:
+ axisCount = read_uint(stream);
+ axis.reset(axisCount);
+ for (size_t i = 0; i < axisCount; ++i) {
+ axis[i] = read_uint(stream);
+ }
+ break;
+ case kFontIndex:
+ index = read_uint(stream);
+ break;
+ case kFontFileName: // Remove when MIN_PICTURE_VERSION > 41
+ skip_string(stream);
+ break;
+ default:
+ SkDEBUGFAIL("Unknown id used by a font descriptor");
+ return false;
+ }
+ }
+
+ size_t length = stream->readPackedUInt();
+ if (length > 0) {
+ sk_sp<SkData> data(SkData::MakeUninitialized(length));
+ if (stream->read(data->writable_data(), length) == length) {
+ result->fFontData = skstd::make_unique<SkFontData>(
+ skstd::make_unique<SkMemoryStream>(data), index, axis, axisCount);
+ } else {
+ SkDEBUGFAIL("Could not read font data");
+ return false;
+ }
+ }
+ return true;
+}
+
+void SkFontDescriptor::serialize(SkWStream* stream) {
+ uint32_t styleBits = (fStyle.weight() << 16) | (fStyle.width() << 8) | (fStyle.slant());
+ stream->writePackedUInt(styleBits);
+
+ write_string(stream, fFamilyName, kFontFamilyName);
+ write_string(stream, fFullName, kFullName);
+ write_string(stream, fPostscriptName, kPostscriptName);
+ if (fFontData.get()) {
+ if (fFontData->getIndex()) {
+ write_uint(stream, fFontData->getIndex(), kFontIndex);
+ }
+ if (fFontData->getAxisCount()) {
+ write_uint(stream, fFontData->getAxisCount(), kFontAxes);
+ for (int i = 0; i < fFontData->getAxisCount(); ++i) {
+ stream->writePackedUInt(fFontData->getAxis()[i]);
+ }
+ }
+ }
+
+ stream->writePackedUInt(kSentinel);
+
+ if (fFontData.get() && fFontData->hasStream()) {
+ std::unique_ptr<SkStreamAsset> fontStream = fFontData->detachStream();
+ size_t length = fontStream->getLength();
+ stream->writePackedUInt(length);
+ stream->writeStream(fontStream.get(), length);
+ } else {
+ stream->writePackedUInt(0);
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkFontDescriptor.h b/gfx/skia/skia/src/core/SkFontDescriptor.h
new file mode 100644
index 000000000..de1462177
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontDescriptor.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontDescriptor_DEFINED
+#define SkFontDescriptor_DEFINED
+
+#include "SkFixed.h"
+#include "SkStream.h"
+#include "SkString.h"
+#include "SkTypeface.h"
+
+class SkFontData {
+public:
+ /** Makes a copy of the data in 'axis'. */
+ SkFontData(std::unique_ptr<SkStreamAsset> stream, int index, const SkFixed axis[],int axisCount)
+ : fStream(std::move(stream)), fIndex(index), fAxisCount(axisCount), fAxis(axisCount)
+ {
+ for (int i = 0; i < axisCount; ++i) {
+ fAxis[i] = axis[i];
+ }
+ }
+ SkFontData(const SkFontData& that)
+ : fStream(that.fStream->duplicate())
+ , fIndex(that.fIndex)
+ , fAxisCount(that.fAxisCount)
+ , fAxis(fAxisCount)
+ {
+ for (int i = 0; i < fAxisCount; ++i) {
+ fAxis[i] = that.fAxis[i];
+ }
+ }
+ bool hasStream() const { return fStream.get() != nullptr; }
+ std::unique_ptr<SkStreamAsset> detachStream() { return std::move(fStream); }
+ SkStreamAsset* getStream() { return fStream.get(); }
+ SkStreamAsset const* getStream() const { return fStream.get(); }
+ int getIndex() const { return fIndex; }
+ int getAxisCount() const { return fAxisCount; }
+ const SkFixed* getAxis() const { return fAxis.get(); }
+
+private:
+ std::unique_ptr<SkStreamAsset> fStream;
+ int fIndex;
+ int fAxisCount;
+ SkAutoSTMalloc<4, SkFixed> fAxis;
+};
+
+class SkFontDescriptor : SkNoncopyable {
+public:
+ SkFontDescriptor();
+ // Does not affect ownership of SkStream.
+ static bool Deserialize(SkStream*, SkFontDescriptor* result);
+
+ void serialize(SkWStream*);
+
+ SkFontStyle getStyle() { return fStyle; }
+ void setStyle(SkFontStyle style) { fStyle = style; }
+
+ const char* getFamilyName() const { return fFamilyName.c_str(); }
+ const char* getFullName() const { return fFullName.c_str(); }
+ const char* getPostscriptName() const { return fPostscriptName.c_str(); }
+ bool hasFontData() const { return fFontData.get() != nullptr; }
+ std::unique_ptr<SkFontData> detachFontData() { return std::move(fFontData); }
+
+ void setFamilyName(const char* name) { fFamilyName.set(name); }
+ void setFullName(const char* name) { fFullName.set(name); }
+ void setPostscriptName(const char* name) { fPostscriptName.set(name); }
+ /** Set the font data only if it is necessary for serialization. */
+ void setFontData(std::unique_ptr<SkFontData> data) { fFontData = std::move(data); }
+
+private:
+ SkString fFamilyName;
+ SkString fFullName;
+ SkString fPostscriptName;
+ std::unique_ptr<SkFontData> fFontData;
+
+ SkFontStyle fStyle;
+};
+
+#endif // SkFontDescriptor_DEFINED
diff --git a/gfx/skia/skia/src/core/SkFontLCDConfig.cpp b/gfx/skia/skia/src/core/SkFontLCDConfig.cpp
new file mode 100644
index 000000000..3d1f35b58
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontLCDConfig.cpp
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFontLCDConfig.h"
+
+static SkFontLCDConfig::LCDOrientation gLCDOrientation = SkFontLCDConfig::kHorizontal_LCDOrientation;
+static SkFontLCDConfig::LCDOrder gLCDOrder = SkFontLCDConfig::kRGB_LCDOrder;
+
+SkFontLCDConfig::LCDOrientation SkFontLCDConfig::GetSubpixelOrientation() {
+ return gLCDOrientation;
+}
+
+void SkFontLCDConfig::SetSubpixelOrientation(LCDOrientation orientation) {
+ gLCDOrientation = orientation;
+}
+
+SkFontLCDConfig::LCDOrder SkFontLCDConfig::GetSubpixelOrder() {
+ return gLCDOrder;
+}
+
+void SkFontLCDConfig::SetSubpixelOrder(LCDOrder order) {
+ gLCDOrder = order;
+}
diff --git a/gfx/skia/skia/src/core/SkFontMgr.cpp b/gfx/skia/skia/src/core/SkFontMgr.cpp
new file mode 100644
index 000000000..57f82b03b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontMgr.cpp
@@ -0,0 +1,295 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFontDescriptor.h"
+#include "SkFontMgr.h"
+#include "SkOnce.h"
+#include "SkStream.h"
+#include "SkTypes.h"
+
+class SkFontStyle;
+class SkTypeface;
+
+class SkEmptyFontStyleSet : public SkFontStyleSet {
+public:
+ int count() override { return 0; }
+ void getStyle(int, SkFontStyle*, SkString*) override {
+ SkDEBUGFAIL("SkFontStyleSet::getStyle called on empty set");
+ }
+ SkTypeface* createTypeface(int index) override {
+ SkDEBUGFAIL("SkFontStyleSet::createTypeface called on empty set");
+ return nullptr;
+ }
+ SkTypeface* matchStyle(const SkFontStyle&) override {
+ return nullptr;
+ }
+};
+
+SkFontStyleSet* SkFontStyleSet::CreateEmpty() { return new SkEmptyFontStyleSet; }
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkEmptyFontMgr : public SkFontMgr {
+protected:
+ int onCountFamilies() const override {
+ return 0;
+ }
+ void onGetFamilyName(int index, SkString* familyName) const override {
+ SkDEBUGFAIL("onGetFamilyName called with bad index");
+ }
+ SkFontStyleSet* onCreateStyleSet(int index) const override {
+ SkDEBUGFAIL("onCreateStyleSet called with bad index");
+ return nullptr;
+ }
+ SkFontStyleSet* onMatchFamily(const char[]) const override {
+ return SkFontStyleSet::CreateEmpty();
+ }
+
+ virtual SkTypeface* onMatchFamilyStyle(const char[],
+ const SkFontStyle&) const override {
+ return nullptr;
+ }
+ virtual SkTypeface* onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[],
+ int bcp47Count,
+ SkUnichar character) const override {
+ return nullptr;
+ }
+ virtual SkTypeface* onMatchFaceStyle(const SkTypeface*,
+ const SkFontStyle&) const override {
+ return nullptr;
+ }
+ SkTypeface* onCreateFromData(SkData*, int) const override {
+ return nullptr;
+ }
+ SkTypeface* onCreateFromStream(SkStreamAsset* stream, int) const override {
+ delete stream;
+ return nullptr;
+ }
+ SkTypeface* onCreateFromFile(const char[], int) const override {
+ return nullptr;
+ }
+ SkTypeface* onLegacyCreateTypeface(const char [], SkFontStyle) const override {
+ return nullptr;
+ }
+};
+
+static SkFontStyleSet* emptyOnNull(SkFontStyleSet* fsset) {
+ if (nullptr == fsset) {
+ fsset = SkFontStyleSet::CreateEmpty();
+ }
+ return fsset;
+}
+
+int SkFontMgr::countFamilies() const {
+ return this->onCountFamilies();
+}
+
+void SkFontMgr::getFamilyName(int index, SkString* familyName) const {
+ this->onGetFamilyName(index, familyName);
+}
+
+SkFontStyleSet* SkFontMgr::createStyleSet(int index) const {
+ return emptyOnNull(this->onCreateStyleSet(index));
+}
+
+SkFontStyleSet* SkFontMgr::matchFamily(const char familyName[]) const {
+ return emptyOnNull(this->onMatchFamily(familyName));
+}
+
+SkTypeface* SkFontMgr::matchFamilyStyle(const char familyName[],
+ const SkFontStyle& fs) const {
+ return this->onMatchFamilyStyle(familyName, fs);
+}
+
+SkTypeface* SkFontMgr::matchFamilyStyleCharacter(const char familyName[], const SkFontStyle& style,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const {
+ return this->onMatchFamilyStyleCharacter(familyName, style, bcp47, bcp47Count, character);
+}
+
+SkTypeface* SkFontMgr::matchFaceStyle(const SkTypeface* face,
+ const SkFontStyle& fs) const {
+ return this->onMatchFaceStyle(face, fs);
+}
+
+SkTypeface* SkFontMgr::createFromData(SkData* data, int ttcIndex) const {
+ if (nullptr == data) {
+ return nullptr;
+ }
+ return this->onCreateFromData(data, ttcIndex);
+}
+
+SkTypeface* SkFontMgr::createFromStream(SkStreamAsset* stream, int ttcIndex) const {
+ if (nullptr == stream) {
+ return nullptr;
+ }
+ return this->onCreateFromStream(stream, ttcIndex);
+}
+
+SkTypeface* SkFontMgr::createFromStream(SkStreamAsset* stream, const FontParameters& params) const {
+ if (nullptr == stream) {
+ return nullptr;
+ }
+ return this->onCreateFromStream(stream, params);
+}
+
+SkTypeface* SkFontMgr::createFromFontData(std::unique_ptr<SkFontData> data) const {
+ if (nullptr == data) {
+ return nullptr;
+ }
+ return this->onCreateFromFontData(std::move(data));
+}
+
+// This implementation is temporary until it can be made pure virtual.
+SkTypeface* SkFontMgr::onCreateFromStream(SkStreamAsset* stream, const FontParameters& p) const {
+ return this->createFromStream(stream, p.getCollectionIndex());
+}
+
+// This implementation is temporary until it can be made pure virtual.
+SkTypeface* SkFontMgr::onCreateFromFontData(std::unique_ptr<SkFontData> data) const {
+ return this->createFromStream(data->detachStream().release(), data->getIndex());
+}
+
+SkTypeface* SkFontMgr::createFromFile(const char path[], int ttcIndex) const {
+ if (nullptr == path) {
+ return nullptr;
+ }
+ return this->onCreateFromFile(path, ttcIndex);
+}
+
+SkTypeface* SkFontMgr::legacyCreateTypeface(const char familyName[], SkFontStyle style) const {
+ return this->onLegacyCreateTypeface(familyName, style);
+}
+
+SkFontMgr* SkFontMgr::RefDefault() {
+ static SkOnce once;
+ static SkFontMgr* singleton;
+
+ once([]{
+ SkFontMgr* fm = SkFontMgr::Factory();
+ singleton = fm ? fm : new SkEmptyFontMgr;
+ });
+ return SkRef(singleton);
+}
+
+/**
+* Width has the greatest priority.
+* If the value of pattern.width is 5 (normal) or less,
+* narrower width values are checked first, then wider values.
+* If the value of pattern.width is greater than 5 (normal),
+* wider values are checked first, followed by narrower values.
+*
+* Italic/Oblique has the next highest priority.
+* If italic requested and there is some italic font, use it.
+* If oblique requested and there is some oblique font, use it.
+* If italic requested and there is some oblique font, use it.
+* If oblique requested and there is some italic font, use it.
+*
+* Exact match.
+* If pattern.weight < 400, weights below pattern.weight are checked
+* in descending order followed by weights above pattern.weight
+* in ascending order until a match is found.
+* If pattern.weight > 500, weights above pattern.weight are checked
+* in ascending order followed by weights below pattern.weight
+* in descending order until a match is found.
+* If pattern.weight is 400, 500 is checked first
+* and then the rule for pattern.weight < 400 is used.
+* If pattern.weight is 500, 400 is checked first
+* and then the rule for pattern.weight < 400 is used.
+*/
+SkTypeface* SkFontStyleSet::matchStyleCSS3(const SkFontStyle& pattern) {
+ int count = this->count();
+ if (0 == count) {
+ return nullptr;
+ }
+
+ struct Score {
+ int score;
+ int index;
+ Score& operator +=(int rhs) { this->score += rhs; return *this; }
+ Score& operator <<=(int rhs) { this->score <<= rhs; return *this; }
+ bool operator <(const Score& that) { return this->score < that.score; }
+ };
+
+ Score maxScore = { 0, 0 };
+ for (int i = 0; i < count; ++i) {
+ SkFontStyle current;
+ this->getStyle(i, &current, nullptr);
+ Score currentScore = { 0, i };
+
+ // CSS stretch / SkFontStyle::Width
+ // Takes priority over everything else.
+ if (pattern.width() <= SkFontStyle::kNormal_Width) {
+ if (current.width() <= pattern.width()) {
+ currentScore += 10 - pattern.width() + current.width();
+ } else {
+ currentScore += 10 - current.width();
+ }
+ } else {
+ if (current.width() > pattern.width()) {
+ currentScore += 10 + pattern.width() - current.width();
+ } else {
+ currentScore += current.width();
+ }
+ }
+ currentScore <<= 8;
+
+ // CSS style (normal, italic, oblique) / SkFontStyle::Slant (upright, italic, oblique)
+ // Takes priority over all valid weights.
+ static_assert(SkFontStyle::kUpright_Slant == 0 &&
+ SkFontStyle::kItalic_Slant == 1 &&
+ SkFontStyle::kOblique_Slant == 2,
+ "SkFontStyle::Slant values not as required.");
+ SkASSERT(0 <= pattern.slant() && pattern.slant() <= 2 &&
+ 0 <= current.slant() && current.slant() <= 2);
+ static const int score[3][3] = {
+ /* Upright Italic Oblique [current]*/
+ /* Upright */ { 3 , 1 , 2 },
+ /* Italic */ { 1 , 3 , 2 },
+ /* Oblique */ { 1 , 2 , 3 },
+ /* [pattern] */
+ };
+ currentScore += score[pattern.slant()][current.slant()];
+ currentScore <<= 8;
+
+ // Synthetics (weight, style) [no stretch synthetic?]
+
+ // CSS weight / SkFontStyle::Weight
+ // The 'closer' to the target weight, the higher the score.
+ // 1000 is the 'heaviest' recognized weight
+ if (pattern.weight() == current.weight()) {
+ currentScore += 1000;
+ } else if (pattern.weight() <= 500) {
+ if (400 <= pattern.weight() && pattern.weight() < 450) {
+ if (450 <= current.weight() && current.weight() <= 500) {
+ // Artificially boost the 500 weight.
+ // TODO: determine correct number to use.
+ currentScore += 500;
+ }
+ }
+ if (current.weight() <= pattern.weight()) {
+ currentScore += 1000 - pattern.weight() + current.weight();
+ } else {
+ currentScore += 1000 - current.weight();
+ }
+ } else if (pattern.weight() > 500) {
+ if (current.weight() > pattern.weight()) {
+ currentScore += 1000 + pattern.weight() - current.weight();
+ } else {
+ currentScore += current.weight();
+ }
+ }
+
+ if (maxScore < currentScore) {
+ maxScore = currentScore;
+ }
+ }
+
+ return this->createTypeface(maxScore.index);
+}
diff --git a/gfx/skia/skia/src/core/SkFontStream.cpp b/gfx/skia/skia/src/core/SkFontStream.cpp
new file mode 100644
index 000000000..b2ffe8deb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontStream.cpp
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkEndian.h"
+#include "SkFontStream.h"
+#include "SkStream.h"
+
+struct SkSFNTHeader {
+ uint32_t fVersion;
+ uint16_t fNumTables;
+ uint16_t fSearchRange;
+ uint16_t fEntrySelector;
+ uint16_t fRangeShift;
+};
+
+struct SkTTCFHeader {
+ uint32_t fTag;
+ uint32_t fVersion;
+ uint32_t fNumOffsets;
+ uint32_t fOffset0; // the first of N (fNumOffsets)
+};
+
+union SkSharedTTHeader {
+ SkSFNTHeader fSingle;
+ SkTTCFHeader fCollection;
+};
+
+struct SkSFNTDirEntry {
+ uint32_t fTag;
+ uint32_t fChecksum;
+ uint32_t fOffset;
+ uint32_t fLength;
+};
+
+static bool read(SkStream* stream, void* buffer, size_t amount) {
+ return stream->read(buffer, amount) == amount;
+}
+
+static bool skip(SkStream* stream, size_t amount) {
+ return stream->skip(amount) == amount;
+}
+
+/** Return the number of tables, or if this is a TTC (collection), return the
+ number of tables in the first element of the collection. In either case,
+ if offsetToDir is not-null, set it to the offset to the beginning of the
+ table headers (SkSFNTDirEntry), relative to the start of the stream.
+
+ On an error, return 0 for number of tables, and ignore offsetToDir
+ */
+static int count_tables(SkStream* stream, int ttcIndex, size_t* offsetToDir) {
+ SkASSERT(ttcIndex >= 0);
+
+ SkAutoSMalloc<1024> storage(sizeof(SkSharedTTHeader));
+ SkSharedTTHeader* header = (SkSharedTTHeader*)storage.get();
+
+ if (!read(stream, header, sizeof(SkSharedTTHeader))) {
+ return 0;
+ }
+
+ // by default, SkSFNTHeader is at the start of the stream
+ size_t offset = 0;
+
+ // if we're really a collection, the first 4-bytes will be 'ttcf'
+ uint32_t tag = SkEndian_SwapBE32(header->fCollection.fTag);
+ if (SkSetFourByteTag('t', 't', 'c', 'f') == tag) {
+ unsigned count = SkEndian_SwapBE32(header->fCollection.fNumOffsets);
+ if ((unsigned)ttcIndex >= count) {
+ return 0;
+ }
+
+ if (ttcIndex > 0) { // need to read more of the shared header
+ stream->rewind();
+ size_t amount = sizeof(SkSharedTTHeader) + ttcIndex * sizeof(uint32_t);
+ header = (SkSharedTTHeader*)storage.reset(amount);
+ if (!read(stream, header, amount)) {
+ return 0;
+ }
+ }
+ // this is the offset to the local SkSFNTHeader
+ offset = SkEndian_SwapBE32((&header->fCollection.fOffset0)[ttcIndex]);
+ stream->rewind();
+ if (!skip(stream, offset)) {
+ return 0;
+ }
+ if (!read(stream, header, sizeof(SkSFNTHeader))) {
+ return 0;
+ }
+ }
+
+ if (offsetToDir) {
+ // add the size of the header, so we will point to the DirEntries
+ *offsetToDir = offset + sizeof(SkSFNTHeader);
+ }
+ return SkEndian_SwapBE16(header->fSingle.fNumTables);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct SfntHeader {
+ SfntHeader() : fCount(0), fDir(nullptr) {}
+ ~SfntHeader() { sk_free(fDir); }
+
+ /** If it returns true, then fCount and fDir are properly initialized.
+ Note: fDir will point to the raw array of SkSFNTDirEntry values,
+ meaning they will still be in the file's native endianness (BE).
+
+ fDir will be automatically freed when this object is destroyed
+ */
+ bool init(SkStream* stream, int ttcIndex) {
+ stream->rewind();
+
+ size_t offsetToDir;
+ fCount = count_tables(stream, ttcIndex, &offsetToDir);
+ if (0 == fCount) {
+ return false;
+ }
+
+ stream->rewind();
+ if (!skip(stream, offsetToDir)) {
+ return false;
+ }
+
+ size_t size = fCount * sizeof(SkSFNTDirEntry);
+ fDir = reinterpret_cast<SkSFNTDirEntry*>(sk_malloc_throw(size));
+ return read(stream, fDir, size);
+ }
+
+ int fCount;
+ SkSFNTDirEntry* fDir;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+int SkFontStream::CountTTCEntries(SkStream* stream) {
+ stream->rewind();
+
+ SkSharedTTHeader shared;
+ if (!read(stream, &shared, sizeof(shared))) {
+ return 0;
+ }
+
+ // if we're really a collection, the first 4-bytes will be 'ttcf'
+ uint32_t tag = SkEndian_SwapBE32(shared.fCollection.fTag);
+ if (SkSetFourByteTag('t', 't', 'c', 'f') == tag) {
+ return SkEndian_SwapBE32(shared.fCollection.fNumOffsets);
+ } else {
+ return 1; // normal 'sfnt' has 1 dir entry
+ }
+}
+
+int SkFontStream::GetTableTags(SkStream* stream, int ttcIndex,
+ SkFontTableTag tags[]) {
+ SfntHeader header;
+ if (!header.init(stream, ttcIndex)) {
+ return 0;
+ }
+
+ if (tags) {
+ for (int i = 0; i < header.fCount; i++) {
+ tags[i] = SkEndian_SwapBE32(header.fDir[i].fTag);
+ }
+ }
+ return header.fCount;
+}
+
+size_t SkFontStream::GetTableData(SkStream* stream, int ttcIndex,
+ SkFontTableTag tag,
+ size_t offset, size_t length, void* data) {
+ SfntHeader header;
+ if (!header.init(stream, ttcIndex)) {
+ return 0;
+ }
+
+ for (int i = 0; i < header.fCount; i++) {
+ if (SkEndian_SwapBE32(header.fDir[i].fTag) == tag) {
+ size_t realOffset = SkEndian_SwapBE32(header.fDir[i].fOffset);
+ size_t realLength = SkEndian_SwapBE32(header.fDir[i].fLength);
+ // now sanity check the caller's offset/length
+ if (offset >= realLength) {
+ return 0;
+ }
+ // if the caller is trusting the length from the file, then a
+ // hostile file might choose a value which would overflow offset +
+ // length.
+ if (offset + length < offset) {
+ return 0;
+ }
+ if (length > realLength - offset) {
+ length = realLength - offset;
+ }
+ if (data) {
+ // skip the stream to the part of the table we want to copy from
+ stream->rewind();
+ size_t bytesToSkip = realOffset + offset;
+ if (!skip(stream, bytesToSkip)) {
+ return 0;
+ }
+ if (!read(stream, data, length)) {
+ return 0;
+ }
+ }
+ return length;
+ }
+ }
+ return 0;
+}
diff --git a/gfx/skia/skia/src/core/SkFontStream.h b/gfx/skia/skia/src/core/SkFontStream.h
new file mode 100644
index 000000000..0a2322fc0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontStream.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontStream_DEFINED
+#define SkFontStream_DEFINED
+
+class SkStream;
+
+#include "SkTypeface.h"
+
+class SkFontStream {
+public:
+ /**
+ * Return the number of shared directories inside a TTC sfnt, or return 1
+ * if the stream is a normal sfnt (ttf). If there is an error or
+ * no directory is found, return 0.
+ *
+ * Note: the stream is rewound initially, but is returned at an arbitrary
+ * read offset.
+ */
+ static int CountTTCEntries(SkStream*);
+
+ /**
+ * @param ttcIndex 0 for normal sfnts, or the index within a TTC sfnt.
+ *
+ * Note: the stream is rewound initially, but is returned at an arbitrary
+ * read offset.
+ */
+ static int GetTableTags(SkStream*, int ttcIndex, SkFontTableTag tags[]);
+
+ /**
+ * @param ttcIndex 0 for normal sfnts, or the index within a TTC sfnt.
+ *
+ * Note: the stream is rewound initially, but is returned at an arbitrary
+ * read offset.
+ */
+ static size_t GetTableData(SkStream*, int ttcIndex, SkFontTableTag tag,
+ size_t offset, size_t length, void* data);
+
+ static size_t GetTableSize(SkStream* stream, int ttcIndex, SkFontTableTag tag) {
+ return GetTableData(stream, ttcIndex, tag, 0, ~0U, nullptr);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkFontStyle.cpp b/gfx/skia/skia/src/core/SkFontStyle.cpp
new file mode 100644
index 000000000..a24e7cde2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontStyle.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFontStyle.h"
+#include "SkTypeface.h"
+#include "SkTypes.h"
+
+SkFontStyle::SkFontStyle() {
+ fUnion.fU32 = 0;
+ fUnion.fR.fWeight = kNormal_Weight;
+ fUnion.fR.fWidth = kNormal_Width;
+ fUnion.fR.fSlant = kUpright_Slant;
+}
+
+SkFontStyle::SkFontStyle(int weight, int width, Slant slant) {
+ fUnion.fU32 = 0;
+ fUnion.fR.fWeight = SkTPin<int>(weight, kInvisible_Weight, kExtraBlack_Weight);
+ fUnion.fR.fWidth = SkTPin<int>(width, kUltraCondensed_Width, kUltraExpanded_Width);
+ fUnion.fR.fSlant = SkTPin<int>(slant, kUpright_Slant, kOblique_Slant);
+}
+
+/*static*/SkFontStyle SkFontStyle::FromOldStyle(unsigned oldStyle) {
+ return SkFontStyle((oldStyle & SkTypeface::kBold) ? SkFontStyle::kBold_Weight
+ : SkFontStyle::kNormal_Weight,
+ SkFontStyle::kNormal_Width,
+ (oldStyle & SkTypeface::kItalic) ? SkFontStyle::kItalic_Slant
+ : SkFontStyle::kUpright_Slant);
+}
diff --git a/gfx/skia/skia/src/core/SkForceCPlusPlusLinking.cpp b/gfx/skia/skia/src/core/SkForceCPlusPlusLinking.cpp
new file mode 100644
index 000000000..829d0d3fe
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkForceCPlusPlusLinking.cpp
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// This file is intentionally empty. We add it to the dependencies of skia_lib
+// so that GYP detects that libskia is a C++ library (implicitly depending on
+// the standard library, -lm, etc.) from its file extension.
+//
+// If we didn't do this, GYP would link libskia.so as a C library and we'd get
+// link-time failures for simple binaries that don't themselves depend on the
+// C++ standard library.
+//
+// Even if we try hard not to depend on the standard library, say, never
+// calling new or delete, the compiler can still insert calls on our behalf
+// that make us depend on it anyway: a handler when we call a for a pure
+// virtual, thread-safety guards around statics, probably other similar
+// language constructs.
diff --git a/gfx/skia/skia/src/core/SkFuzzLogging.h b/gfx/skia/skia/src/core/SkFuzzLogging.h
new file mode 100644
index 000000000..8e546e3a0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFuzzLogging.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFuzzLogging_DEFINED
+#define SkFuzzLogging_DEFINED
+
+// Utilities for Skia's fuzzer
+
+// When SK_FUZZ_LOGGING is defined SkDebugfs relevant to image filter fuzzing
+// will be enabled. This allows the filter fuzzing code to white list fuzzer
+// failures based on the output logs.
+// Define this flag in your SkUserConfig.h or in your Make/Build system.
+#ifdef SK_FUZZ_LOGGING
+ #define SkFUZZF(args) SkDebugf("SkFUZZ: "); SkDebugf args
+#else
+ #define SkFUZZF(args)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkGeometry.cpp b/gfx/skia/skia/src/core/SkGeometry.cpp
new file mode 100644
index 000000000..58b45140e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGeometry.cpp
@@ -0,0 +1,1421 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkGeometry.h"
+#include "SkMatrix.h"
+#include "SkNx.h"
+
+static SkVector to_vector(const Sk2s& x) {
+ SkVector vector;
+ x.store(&vector);
+ return vector;
+}
+
+/** If defined, this makes eval_quad and eval_cubic do more setup (sometimes
+ involving integer multiplies by 2 or 3, but fewer calls to SkScalarMul.
+ May also introduce overflow of fixed when we compute our setup.
+*/
+// #define DIRECT_EVAL_OF_POLYNOMIALS
+
+////////////////////////////////////////////////////////////////////////
+
+static int is_not_monotonic(SkScalar a, SkScalar b, SkScalar c) {
+ SkScalar ab = a - b;
+ SkScalar bc = b - c;
+ if (ab < 0) {
+ bc = -bc;
+ }
+ return ab == 0 || bc < 0;
+}
+
+////////////////////////////////////////////////////////////////////////
+
+static bool is_unit_interval(SkScalar x) {
+ return x > 0 && x < SK_Scalar1;
+}
+
+static int valid_unit_divide(SkScalar numer, SkScalar denom, SkScalar* ratio) {
+ SkASSERT(ratio);
+
+ if (numer < 0) {
+ numer = -numer;
+ denom = -denom;
+ }
+
+ if (denom == 0 || numer == 0 || numer >= denom) {
+ return 0;
+ }
+
+ SkScalar r = numer / denom;
+ if (SkScalarIsNaN(r)) {
+ return 0;
+ }
+ SkASSERTF(r >= 0 && r < SK_Scalar1, "numer %f, denom %f, r %f", numer, denom, r);
+ if (r == 0) { // catch underflow if numer <<<< denom
+ return 0;
+ }
+ *ratio = r;
+ return 1;
+}
+
+/** From Numerical Recipes in C.
+
+ Q = -1/2 (B + sign(B) sqrt[B*B - 4*A*C])
+ x1 = Q / A
+ x2 = C / Q
+*/
+int SkFindUnitQuadRoots(SkScalar A, SkScalar B, SkScalar C, SkScalar roots[2]) {
+ SkASSERT(roots);
+
+ if (A == 0) {
+ return valid_unit_divide(-C, B, roots);
+ }
+
+ SkScalar* r = roots;
+
+ SkScalar R = B*B - 4*A*C;
+ if (R < 0 || !SkScalarIsFinite(R)) { // complex roots
+ // if R is infinite, it's possible that it may still produce
+ // useful results if the operation was repeated in doubles
+ // the flipside is determining if the more precise answer
+ // isn't useful because surrounding machinery (e.g., subtracting
+ // the axis offset from C) already discards the extra precision
+ // more investigation and unit tests required...
+ return 0;
+ }
+ R = SkScalarSqrt(R);
+
+ SkScalar Q = (B < 0) ? -(B-R)/2 : -(B+R)/2;
+ r += valid_unit_divide(Q, A, r);
+ r += valid_unit_divide(C, Q, r);
+ if (r - roots == 2) {
+ if (roots[0] > roots[1])
+ SkTSwap<SkScalar>(roots[0], roots[1]);
+ else if (roots[0] == roots[1]) // nearly-equal?
+ r -= 1; // skip the double root
+ }
+ return (int)(r - roots);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+void SkEvalQuadAt(const SkPoint src[3], SkScalar t, SkPoint* pt, SkVector* tangent) {
+ SkASSERT(src);
+ SkASSERT(t >= 0 && t <= SK_Scalar1);
+
+ if (pt) {
+ *pt = SkEvalQuadAt(src, t);
+ }
+ if (tangent) {
+ *tangent = SkEvalQuadTangentAt(src, t);
+ }
+}
+
+SkPoint SkEvalQuadAt(const SkPoint src[3], SkScalar t) {
+ return to_point(SkQuadCoeff(src).eval(t));
+}
+
+SkVector SkEvalQuadTangentAt(const SkPoint src[3], SkScalar t) {
+ // The derivative equation is 2(b - a +(a - 2b +c)t). This returns a
+ // zero tangent vector when t is 0 or 1, and the control point is equal
+ // to the end point. In this case, use the quad end points to compute the tangent.
+ if ((t == 0 && src[0] == src[1]) || (t == 1 && src[1] == src[2])) {
+ return src[2] - src[0];
+ }
+ SkASSERT(src);
+ SkASSERT(t >= 0 && t <= SK_Scalar1);
+
+ Sk2s P0 = from_point(src[0]);
+ Sk2s P1 = from_point(src[1]);
+ Sk2s P2 = from_point(src[2]);
+
+ Sk2s B = P1 - P0;
+ Sk2s A = P2 - P1 - B;
+ Sk2s T = A * Sk2s(t) + B;
+
+ return to_vector(T + T);
+}
+
+static inline Sk2s interp(const Sk2s& v0, const Sk2s& v1, const Sk2s& t) {
+ return v0 + (v1 - v0) * t;
+}
+
+void SkChopQuadAt(const SkPoint src[3], SkPoint dst[5], SkScalar t) {
+ SkASSERT(t > 0 && t < SK_Scalar1);
+
+ Sk2s p0 = from_point(src[0]);
+ Sk2s p1 = from_point(src[1]);
+ Sk2s p2 = from_point(src[2]);
+ Sk2s tt(t);
+
+ Sk2s p01 = interp(p0, p1, tt);
+ Sk2s p12 = interp(p1, p2, tt);
+
+ dst[0] = to_point(p0);
+ dst[1] = to_point(p01);
+ dst[2] = to_point(interp(p01, p12, tt));
+ dst[3] = to_point(p12);
+ dst[4] = to_point(p2);
+}
+
+void SkChopQuadAtHalf(const SkPoint src[3], SkPoint dst[5]) {
+ SkChopQuadAt(src, dst, 0.5f);
+}
+
+/** Quad'(t) = At + B, where
+ A = 2(a - 2b + c)
+ B = 2(b - a)
+ Solve for t, only if it fits between 0 < t < 1
+*/
+int SkFindQuadExtrema(SkScalar a, SkScalar b, SkScalar c, SkScalar tValue[1]) {
+ /* At + B == 0
+ t = -B / A
+ */
+ return valid_unit_divide(a - b, a - b - b + c, tValue);
+}
+
+static inline void flatten_double_quad_extrema(SkScalar coords[14]) {
+ coords[2] = coords[6] = coords[4];
+}
+
+/* Returns 0 for 1 quad, and 1 for two quads, either way the answer is
+ stored in dst[]. Guarantees that the 1/2 quads will be monotonic.
+ */
+int SkChopQuadAtYExtrema(const SkPoint src[3], SkPoint dst[5]) {
+ SkASSERT(src);
+ SkASSERT(dst);
+
+ SkScalar a = src[0].fY;
+ SkScalar b = src[1].fY;
+ SkScalar c = src[2].fY;
+
+ if (is_not_monotonic(a, b, c)) {
+ SkScalar tValue;
+ if (valid_unit_divide(a - b, a - b - b + c, &tValue)) {
+ SkChopQuadAt(src, dst, tValue);
+ flatten_double_quad_extrema(&dst[0].fY);
+ return 1;
+ }
+ // if we get here, we need to force dst to be monotonic, even though
+ // we couldn't compute a unit_divide value (probably underflow).
+ b = SkScalarAbs(a - b) < SkScalarAbs(b - c) ? a : c;
+ }
+ dst[0].set(src[0].fX, a);
+ dst[1].set(src[1].fX, b);
+ dst[2].set(src[2].fX, c);
+ return 0;
+}
+
+/* Returns 0 for 1 quad, and 1 for two quads, either way the answer is
+ stored in dst[]. Guarantees that the 1/2 quads will be monotonic.
+ */
+int SkChopQuadAtXExtrema(const SkPoint src[3], SkPoint dst[5]) {
+ SkASSERT(src);
+ SkASSERT(dst);
+
+ SkScalar a = src[0].fX;
+ SkScalar b = src[1].fX;
+ SkScalar c = src[2].fX;
+
+ if (is_not_monotonic(a, b, c)) {
+ SkScalar tValue;
+ if (valid_unit_divide(a - b, a - b - b + c, &tValue)) {
+ SkChopQuadAt(src, dst, tValue);
+ flatten_double_quad_extrema(&dst[0].fX);
+ return 1;
+ }
+ // if we get here, we need to force dst to be monotonic, even though
+ // we couldn't compute a unit_divide value (probably underflow).
+ b = SkScalarAbs(a - b) < SkScalarAbs(b - c) ? a : c;
+ }
+ dst[0].set(a, src[0].fY);
+ dst[1].set(b, src[1].fY);
+ dst[2].set(c, src[2].fY);
+ return 0;
+}
+
+// F(t) = a (1 - t) ^ 2 + 2 b t (1 - t) + c t ^ 2
+// F'(t) = 2 (b - a) + 2 (a - 2b + c) t
+// F''(t) = 2 (a - 2b + c)
+//
+// A = 2 (b - a)
+// B = 2 (a - 2b + c)
+//
+// Maximum curvature for a quadratic means solving
+// Fx' Fx'' + Fy' Fy'' = 0
+//
+// t = - (Ax Bx + Ay By) / (Bx ^ 2 + By ^ 2)
+//
+SkScalar SkFindQuadMaxCurvature(const SkPoint src[3]) {
+ SkScalar Ax = src[1].fX - src[0].fX;
+ SkScalar Ay = src[1].fY - src[0].fY;
+ SkScalar Bx = src[0].fX - src[1].fX - src[1].fX + src[2].fX;
+ SkScalar By = src[0].fY - src[1].fY - src[1].fY + src[2].fY;
+ SkScalar t = 0; // 0 means don't chop
+
+ (void)valid_unit_divide(-(Ax * Bx + Ay * By), Bx * Bx + By * By, &t);
+ return t;
+}
+
+int SkChopQuadAtMaxCurvature(const SkPoint src[3], SkPoint dst[5]) {
+ SkScalar t = SkFindQuadMaxCurvature(src);
+ if (t == 0) {
+ memcpy(dst, src, 3 * sizeof(SkPoint));
+ return 1;
+ } else {
+ SkChopQuadAt(src, dst, t);
+ return 2;
+ }
+}
+
+void SkConvertQuadToCubic(const SkPoint src[3], SkPoint dst[4]) {
+ Sk2s scale(SkDoubleToScalar(2.0 / 3.0));
+ Sk2s s0 = from_point(src[0]);
+ Sk2s s1 = from_point(src[1]);
+ Sk2s s2 = from_point(src[2]);
+
+ dst[0] = src[0];
+ dst[1] = to_point(s0 + (s1 - s0) * scale);
+ dst[2] = to_point(s2 + (s1 - s2) * scale);
+ dst[3] = src[2];
+}
+
+//////////////////////////////////////////////////////////////////////////////
+///// CUBICS // CUBICS // CUBICS // CUBICS // CUBICS // CUBICS // CUBICS /////
+//////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_SUPPORT_LEGACY_EVAL_CUBIC
+static SkScalar eval_cubic(const SkScalar src[], SkScalar t) {
+ SkASSERT(src);
+ SkASSERT(t >= 0 && t <= SK_Scalar1);
+
+ if (t == 0) {
+ return src[0];
+ }
+
+#ifdef DIRECT_EVAL_OF_POLYNOMIALS
+ SkScalar D = src[0];
+ SkScalar A = src[6] + 3*(src[2] - src[4]) - D;
+ SkScalar B = 3*(src[4] - src[2] - src[2] + D);
+ SkScalar C = 3*(src[2] - D);
+
+ return SkScalarMulAdd(SkScalarMulAdd(SkScalarMulAdd(A, t, B), t, C), t, D);
+#else
+ SkScalar ab = SkScalarInterp(src[0], src[2], t);
+ SkScalar bc = SkScalarInterp(src[2], src[4], t);
+ SkScalar cd = SkScalarInterp(src[4], src[6], t);
+ SkScalar abc = SkScalarInterp(ab, bc, t);
+ SkScalar bcd = SkScalarInterp(bc, cd, t);
+ return SkScalarInterp(abc, bcd, t);
+#endif
+}
+#endif
+
+static SkVector eval_cubic_derivative(const SkPoint src[4], SkScalar t) {
+ SkQuadCoeff coeff;
+ Sk2s P0 = from_point(src[0]);
+ Sk2s P1 = from_point(src[1]);
+ Sk2s P2 = from_point(src[2]);
+ Sk2s P3 = from_point(src[3]);
+
+ coeff.fA = P3 + Sk2s(3) * (P1 - P2) - P0;
+ coeff.fB = times_2(P2 - times_2(P1) + P0);
+ coeff.fC = P1 - P0;
+ return to_vector(coeff.eval(t));
+}
+
+static SkVector eval_cubic_2ndDerivative(const SkPoint src[4], SkScalar t) {
+ Sk2s P0 = from_point(src[0]);
+ Sk2s P1 = from_point(src[1]);
+ Sk2s P2 = from_point(src[2]);
+ Sk2s P3 = from_point(src[3]);
+ Sk2s A = P3 + Sk2s(3) * (P1 - P2) - P0;
+ Sk2s B = P2 - times_2(P1) + P0;
+
+ return to_vector(A * Sk2s(t) + B);
+}
+
+void SkEvalCubicAt(const SkPoint src[4], SkScalar t, SkPoint* loc,
+ SkVector* tangent, SkVector* curvature) {
+ SkASSERT(src);
+ SkASSERT(t >= 0 && t <= SK_Scalar1);
+
+ if (loc) {
+#ifdef SK_SUPPORT_LEGACY_EVAL_CUBIC
+ loc->set(eval_cubic(&src[0].fX, t), eval_cubic(&src[0].fY, t));
+#else
+ *loc = to_point(SkCubicCoeff(src).eval(t));
+#endif
+ }
+ if (tangent) {
+ // The derivative equation returns a zero tangent vector when t is 0 or 1, and the
+ // adjacent control point is equal to the end point. In this case, use the
+ // next control point or the end points to compute the tangent.
+ if ((t == 0 && src[0] == src[1]) || (t == 1 && src[2] == src[3])) {
+ if (t == 0) {
+ *tangent = src[2] - src[0];
+ } else {
+ *tangent = src[3] - src[1];
+ }
+ if (!tangent->fX && !tangent->fY) {
+ *tangent = src[3] - src[0];
+ }
+ } else {
+ *tangent = eval_cubic_derivative(src, t);
+ }
+ }
+ if (curvature) {
+ *curvature = eval_cubic_2ndDerivative(src, t);
+ }
+}
+
+/** Cubic'(t) = At^2 + Bt + C, where
+ A = 3(-a + 3(b - c) + d)
+ B = 6(a - 2b + c)
+ C = 3(b - a)
+ Solve for t, keeping only those that fit betwee 0 < t < 1
+*/
+int SkFindCubicExtrema(SkScalar a, SkScalar b, SkScalar c, SkScalar d,
+ SkScalar tValues[2]) {
+ // we divide A,B,C by 3 to simplify
+ SkScalar A = d - a + 3*(b - c);
+ SkScalar B = 2*(a - b - b + c);
+ SkScalar C = b - a;
+
+ return SkFindUnitQuadRoots(A, B, C, tValues);
+}
+
+void SkChopCubicAt(const SkPoint src[4], SkPoint dst[7], SkScalar t) {
+ SkASSERT(t > 0 && t < SK_Scalar1);
+
+ Sk2s p0 = from_point(src[0]);
+ Sk2s p1 = from_point(src[1]);
+ Sk2s p2 = from_point(src[2]);
+ Sk2s p3 = from_point(src[3]);
+ Sk2s tt(t);
+
+ Sk2s ab = interp(p0, p1, tt);
+ Sk2s bc = interp(p1, p2, tt);
+ Sk2s cd = interp(p2, p3, tt);
+ Sk2s abc = interp(ab, bc, tt);
+ Sk2s bcd = interp(bc, cd, tt);
+ Sk2s abcd = interp(abc, bcd, tt);
+
+ dst[0] = src[0];
+ dst[1] = to_point(ab);
+ dst[2] = to_point(abc);
+ dst[3] = to_point(abcd);
+ dst[4] = to_point(bcd);
+ dst[5] = to_point(cd);
+ dst[6] = src[3];
+}
+
+/* http://code.google.com/p/skia/issues/detail?id=32
+
+ This test code would fail when we didn't check the return result of
+ valid_unit_divide in SkChopCubicAt(... tValues[], int roots). The reason is
+ that after the first chop, the parameters to valid_unit_divide are equal
+ (thanks to finite float precision and rounding in the subtracts). Thus
+ even though the 2nd tValue looks < 1.0, after we renormalize it, we end
+ up with 1.0, hence the need to check and just return the last cubic as
+ a degenerate clump of 4 points in the sampe place.
+
+ static void test_cubic() {
+ SkPoint src[4] = {
+ { 556.25000, 523.03003 },
+ { 556.23999, 522.96002 },
+ { 556.21997, 522.89001 },
+ { 556.21997, 522.82001 }
+ };
+ SkPoint dst[10];
+ SkScalar tval[] = { 0.33333334f, 0.99999994f };
+ SkChopCubicAt(src, dst, tval, 2);
+ }
+ */
+
+void SkChopCubicAt(const SkPoint src[4], SkPoint dst[],
+ const SkScalar tValues[], int roots) {
+#ifdef SK_DEBUG
+ {
+ for (int i = 0; i < roots - 1; i++)
+ {
+ SkASSERT(is_unit_interval(tValues[i]));
+ SkASSERT(is_unit_interval(tValues[i+1]));
+ SkASSERT(tValues[i] < tValues[i+1]);
+ }
+ }
+#endif
+
+ if (dst) {
+ if (roots == 0) { // nothing to chop
+ memcpy(dst, src, 4*sizeof(SkPoint));
+ } else {
+ SkScalar t = tValues[0];
+ SkPoint tmp[4];
+
+ for (int i = 0; i < roots; i++) {
+ SkChopCubicAt(src, dst, t);
+ if (i == roots - 1) {
+ break;
+ }
+
+ dst += 3;
+ // have src point to the remaining cubic (after the chop)
+ memcpy(tmp, dst, 4 * sizeof(SkPoint));
+ src = tmp;
+
+ // watch out in case the renormalized t isn't in range
+ if (!valid_unit_divide(tValues[i+1] - tValues[i],
+ SK_Scalar1 - tValues[i], &t)) {
+ // if we can't, just create a degenerate cubic
+ dst[4] = dst[5] = dst[6] = src[3];
+ break;
+ }
+ }
+ }
+ }
+}
+
+void SkChopCubicAtHalf(const SkPoint src[4], SkPoint dst[7]) {
+ SkChopCubicAt(src, dst, 0.5f);
+}
+
+static void flatten_double_cubic_extrema(SkScalar coords[14]) {
+ coords[4] = coords[8] = coords[6];
+}
+
+/** Given 4 points on a cubic bezier, chop it into 1, 2, 3 beziers such that
+ the resulting beziers are monotonic in Y. This is called by the scan
+ converter. Depending on what is returned, dst[] is treated as follows:
+ 0 dst[0..3] is the original cubic
+ 1 dst[0..3] and dst[3..6] are the two new cubics
+ 2 dst[0..3], dst[3..6], dst[6..9] are the three new cubics
+ If dst == null, it is ignored and only the count is returned.
+*/
+int SkChopCubicAtYExtrema(const SkPoint src[4], SkPoint dst[10]) {
+ SkScalar tValues[2];
+ int roots = SkFindCubicExtrema(src[0].fY, src[1].fY, src[2].fY,
+ src[3].fY, tValues);
+
+ SkChopCubicAt(src, dst, tValues, roots);
+ if (dst && roots > 0) {
+ // we do some cleanup to ensure our Y extrema are flat
+ flatten_double_cubic_extrema(&dst[0].fY);
+ if (roots == 2) {
+ flatten_double_cubic_extrema(&dst[3].fY);
+ }
+ }
+ return roots;
+}
+
+int SkChopCubicAtXExtrema(const SkPoint src[4], SkPoint dst[10]) {
+ SkScalar tValues[2];
+ int roots = SkFindCubicExtrema(src[0].fX, src[1].fX, src[2].fX,
+ src[3].fX, tValues);
+
+ SkChopCubicAt(src, dst, tValues, roots);
+ if (dst && roots > 0) {
+ // we do some cleanup to ensure our Y extrema are flat
+ flatten_double_cubic_extrema(&dst[0].fX);
+ if (roots == 2) {
+ flatten_double_cubic_extrema(&dst[3].fX);
+ }
+ }
+ return roots;
+}
+
+/** http://www.faculty.idc.ac.il/arik/quality/appendixA.html
+
+ Inflection means that curvature is zero.
+ Curvature is [F' x F''] / [F'^3]
+ So we solve F'x X F''y - F'y X F''y == 0
+ After some canceling of the cubic term, we get
+ A = b - a
+ B = c - 2b + a
+ C = d - 3c + 3b - a
+ (BxCy - ByCx)t^2 + (AxCy - AyCx)t + AxBy - AyBx == 0
+*/
+int SkFindCubicInflections(const SkPoint src[4], SkScalar tValues[]) {
+ SkScalar Ax = src[1].fX - src[0].fX;
+ SkScalar Ay = src[1].fY - src[0].fY;
+ SkScalar Bx = src[2].fX - 2 * src[1].fX + src[0].fX;
+ SkScalar By = src[2].fY - 2 * src[1].fY + src[0].fY;
+ SkScalar Cx = src[3].fX + 3 * (src[1].fX - src[2].fX) - src[0].fX;
+ SkScalar Cy = src[3].fY + 3 * (src[1].fY - src[2].fY) - src[0].fY;
+
+ return SkFindUnitQuadRoots(Bx*Cy - By*Cx,
+ Ax*Cy - Ay*Cx,
+ Ax*By - Ay*Bx,
+ tValues);
+}
+
+int SkChopCubicAtInflections(const SkPoint src[], SkPoint dst[10]) {
+ SkScalar tValues[2];
+ int count = SkFindCubicInflections(src, tValues);
+
+ if (dst) {
+ if (count == 0) {
+ memcpy(dst, src, 4 * sizeof(SkPoint));
+ } else {
+ SkChopCubicAt(src, dst, tValues, count);
+ }
+ }
+ return count + 1;
+}
+
+// See http://http.developer.nvidia.com/GPUGems3/gpugems3_ch25.html (from the book GPU Gems 3)
+// discr(I) = d0^2 * (3*d1^2 - 4*d0*d2)
+// Classification:
+// discr(I) > 0 Serpentine
+// discr(I) = 0 Cusp
+// discr(I) < 0 Loop
+// d0 = d1 = 0 Quadratic
+// d0 = d1 = d2 = 0 Line
+// p0 = p1 = p2 = p3 Point
+static SkCubicType classify_cubic(const SkPoint p[4], const SkScalar d[3]) {
+ if (p[0] == p[1] && p[0] == p[2] && p[0] == p[3]) {
+ return kPoint_SkCubicType;
+ }
+ const SkScalar discr = d[0] * d[0] * (3.f * d[1] * d[1] - 4.f * d[0] * d[2]);
+ if (discr > SK_ScalarNearlyZero) {
+ return kSerpentine_SkCubicType;
+ } else if (discr < -SK_ScalarNearlyZero) {
+ return kLoop_SkCubicType;
+ } else {
+ if (0.f == d[0] && 0.f == d[1]) {
+ return (0.f == d[2] ? kLine_SkCubicType : kQuadratic_SkCubicType);
+ } else {
+ return kCusp_SkCubicType;
+ }
+ }
+}
+
+// Assumes the third component of points is 1.
+// Calcs p0 . (p1 x p2)
+static SkScalar calc_dot_cross_cubic(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2) {
+ const SkScalar xComp = p0.fX * (p1.fY - p2.fY);
+ const SkScalar yComp = p0.fY * (p2.fX - p1.fX);
+ const SkScalar wComp = p1.fX * p2.fY - p1.fY * p2.fX;
+ return (xComp + yComp + wComp);
+}
+
+// Calc coefficients of I(s,t) where roots of I are inflection points of curve
+// I(s,t) = t*(3*d0*s^2 - 3*d1*s*t + d2*t^2)
+// d0 = a1 - 2*a2+3*a3
+// d1 = -a2 + 3*a3
+// d2 = 3*a3
+// a1 = p0 . (p3 x p2)
+// a2 = p1 . (p0 x p3)
+// a3 = p2 . (p1 x p0)
+// Places the values of d1, d2, d3 in array d passed in
+static void calc_cubic_inflection_func(const SkPoint p[4], SkScalar d[3]) {
+ SkScalar a1 = calc_dot_cross_cubic(p[0], p[3], p[2]);
+ SkScalar a2 = calc_dot_cross_cubic(p[1], p[0], p[3]);
+ SkScalar a3 = calc_dot_cross_cubic(p[2], p[1], p[0]);
+
+ // need to scale a's or values in later calculations will grow to high
+ SkScalar max = SkScalarAbs(a1);
+ max = SkMaxScalar(max, SkScalarAbs(a2));
+ max = SkMaxScalar(max, SkScalarAbs(a3));
+ max = 1.f/max;
+ a1 = a1 * max;
+ a2 = a2 * max;
+ a3 = a3 * max;
+
+ d[2] = 3.f * a3;
+ d[1] = d[2] - a2;
+ d[0] = d[1] - a2 + a1;
+}
+
+SkCubicType SkClassifyCubic(const SkPoint src[4], SkScalar d[3]) {
+ calc_cubic_inflection_func(src, d);
+ return classify_cubic(src, d);
+}
+
+template <typename T> void bubble_sort(T array[], int count) {
+ for (int i = count - 1; i > 0; --i)
+ for (int j = i; j > 0; --j)
+ if (array[j] < array[j-1])
+ {
+ T tmp(array[j]);
+ array[j] = array[j-1];
+ array[j-1] = tmp;
+ }
+}
+
+/**
+ * Given an array and count, remove all pair-wise duplicates from the array,
+ * keeping the existing sorting, and return the new count
+ */
+static int collaps_duplicates(SkScalar array[], int count) {
+ for (int n = count; n > 1; --n) {
+ if (array[0] == array[1]) {
+ for (int i = 1; i < n; ++i) {
+ array[i - 1] = array[i];
+ }
+ count -= 1;
+ } else {
+ array += 1;
+ }
+ }
+ return count;
+}
+
+#ifdef SK_DEBUG
+
+#define TEST_COLLAPS_ENTRY(array) array, SK_ARRAY_COUNT(array)
+
+static void test_collaps_duplicates() {
+ static bool gOnce;
+ if (gOnce) { return; }
+ gOnce = true;
+ const SkScalar src0[] = { 0 };
+ const SkScalar src1[] = { 0, 0 };
+ const SkScalar src2[] = { 0, 1 };
+ const SkScalar src3[] = { 0, 0, 0 };
+ const SkScalar src4[] = { 0, 0, 1 };
+ const SkScalar src5[] = { 0, 1, 1 };
+ const SkScalar src6[] = { 0, 1, 2 };
+ const struct {
+ const SkScalar* fData;
+ int fCount;
+ int fCollapsedCount;
+ } data[] = {
+ { TEST_COLLAPS_ENTRY(src0), 1 },
+ { TEST_COLLAPS_ENTRY(src1), 1 },
+ { TEST_COLLAPS_ENTRY(src2), 2 },
+ { TEST_COLLAPS_ENTRY(src3), 1 },
+ { TEST_COLLAPS_ENTRY(src4), 2 },
+ { TEST_COLLAPS_ENTRY(src5), 2 },
+ { TEST_COLLAPS_ENTRY(src6), 3 },
+ };
+ for (size_t i = 0; i < SK_ARRAY_COUNT(data); ++i) {
+ SkScalar dst[3];
+ memcpy(dst, data[i].fData, data[i].fCount * sizeof(dst[0]));
+ int count = collaps_duplicates(dst, data[i].fCount);
+ SkASSERT(data[i].fCollapsedCount == count);
+ for (int j = 1; j < count; ++j) {
+ SkASSERT(dst[j-1] < dst[j]);
+ }
+ }
+}
+#endif
+
+static SkScalar SkScalarCubeRoot(SkScalar x) {
+ return SkScalarPow(x, 0.3333333f);
+}
+
+/* Solve coeff(t) == 0, returning the number of roots that
+ lie withing 0 < t < 1.
+ coeff[0]t^3 + coeff[1]t^2 + coeff[2]t + coeff[3]
+
+ Eliminates repeated roots (so that all tValues are distinct, and are always
+ in increasing order.
+*/
+static int solve_cubic_poly(const SkScalar coeff[4], SkScalar tValues[3]) {
+ if (SkScalarNearlyZero(coeff[0])) { // we're just a quadratic
+ return SkFindUnitQuadRoots(coeff[1], coeff[2], coeff[3], tValues);
+ }
+
+ SkScalar a, b, c, Q, R;
+
+ {
+ SkASSERT(coeff[0] != 0);
+
+ SkScalar inva = SkScalarInvert(coeff[0]);
+ a = coeff[1] * inva;
+ b = coeff[2] * inva;
+ c = coeff[3] * inva;
+ }
+ Q = (a*a - b*3) / 9;
+ R = (2*a*a*a - 9*a*b + 27*c) / 54;
+
+ SkScalar Q3 = Q * Q * Q;
+ SkScalar R2MinusQ3 = R * R - Q3;
+ SkScalar adiv3 = a / 3;
+
+ SkScalar* roots = tValues;
+ SkScalar r;
+
+ if (R2MinusQ3 < 0) { // we have 3 real roots
+ // the divide/root can, due to finite precisions, be slightly outside of -1...1
+ SkScalar theta = SkScalarACos(SkScalarPin(R / SkScalarSqrt(Q3), -1, 1));
+ SkScalar neg2RootQ = -2 * SkScalarSqrt(Q);
+
+ r = neg2RootQ * SkScalarCos(theta/3) - adiv3;
+ if (is_unit_interval(r)) {
+ *roots++ = r;
+ }
+ r = neg2RootQ * SkScalarCos((theta + 2*SK_ScalarPI)/3) - adiv3;
+ if (is_unit_interval(r)) {
+ *roots++ = r;
+ }
+ r = neg2RootQ * SkScalarCos((theta - 2*SK_ScalarPI)/3) - adiv3;
+ if (is_unit_interval(r)) {
+ *roots++ = r;
+ }
+ SkDEBUGCODE(test_collaps_duplicates();)
+
+ // now sort the roots
+ int count = (int)(roots - tValues);
+ SkASSERT((unsigned)count <= 3);
+ bubble_sort(tValues, count);
+ count = collaps_duplicates(tValues, count);
+ roots = tValues + count; // so we compute the proper count below
+ } else { // we have 1 real root
+ SkScalar A = SkScalarAbs(R) + SkScalarSqrt(R2MinusQ3);
+ A = SkScalarCubeRoot(A);
+ if (R > 0) {
+ A = -A;
+ }
+ if (A != 0) {
+ A += Q / A;
+ }
+ r = A - adiv3;
+ if (is_unit_interval(r)) {
+ *roots++ = r;
+ }
+ }
+
+ return (int)(roots - tValues);
+}
+
+/* Looking for F' dot F'' == 0
+
+ A = b - a
+ B = c - 2b + a
+ C = d - 3c + 3b - a
+
+ F' = 3Ct^2 + 6Bt + 3A
+ F'' = 6Ct + 6B
+
+ F' dot F'' -> CCt^3 + 3BCt^2 + (2BB + CA)t + AB
+*/
+static void formulate_F1DotF2(const SkScalar src[], SkScalar coeff[4]) {
+ SkScalar a = src[2] - src[0];
+ SkScalar b = src[4] - 2 * src[2] + src[0];
+ SkScalar c = src[6] + 3 * (src[2] - src[4]) - src[0];
+
+ coeff[0] = c * c;
+ coeff[1] = 3 * b * c;
+ coeff[2] = 2 * b * b + c * a;
+ coeff[3] = a * b;
+}
+
+/* Looking for F' dot F'' == 0
+
+ A = b - a
+ B = c - 2b + a
+ C = d - 3c + 3b - a
+
+ F' = 3Ct^2 + 6Bt + 3A
+ F'' = 6Ct + 6B
+
+ F' dot F'' -> CCt^3 + 3BCt^2 + (2BB + CA)t + AB
+*/
+int SkFindCubicMaxCurvature(const SkPoint src[4], SkScalar tValues[3]) {
+ SkScalar coeffX[4], coeffY[4];
+ int i;
+
+ formulate_F1DotF2(&src[0].fX, coeffX);
+ formulate_F1DotF2(&src[0].fY, coeffY);
+
+ for (i = 0; i < 4; i++) {
+ coeffX[i] += coeffY[i];
+ }
+
+ SkScalar t[3];
+ int count = solve_cubic_poly(coeffX, t);
+ int maxCount = 0;
+
+ // now remove extrema where the curvature is zero (mins)
+ // !!!! need a test for this !!!!
+ for (i = 0; i < count; i++) {
+ // if (not_min_curvature())
+ if (t[i] > 0 && t[i] < SK_Scalar1) {
+ tValues[maxCount++] = t[i];
+ }
+ }
+ return maxCount;
+}
+
+int SkChopCubicAtMaxCurvature(const SkPoint src[4], SkPoint dst[13],
+ SkScalar tValues[3]) {
+ SkScalar t_storage[3];
+
+ if (tValues == nullptr) {
+ tValues = t_storage;
+ }
+
+ int count = SkFindCubicMaxCurvature(src, tValues);
+
+ if (dst) {
+ if (count == 0) {
+ memcpy(dst, src, 4 * sizeof(SkPoint));
+ } else {
+ SkChopCubicAt(src, dst, tValues, count);
+ }
+ }
+ return count + 1;
+}
+
+#include "../pathops/SkPathOpsCubic.h"
+
+typedef int (SkDCubic::*InterceptProc)(double intercept, double roots[3]) const;
+
+static bool cubic_dchop_at_intercept(const SkPoint src[4], SkScalar intercept, SkPoint dst[7],
+ InterceptProc method) {
+ SkDCubic cubic;
+ double roots[3];
+ int count = (cubic.set(src).*method)(intercept, roots);
+ if (count > 0) {
+ SkDCubicPair pair = cubic.chopAt(roots[0]);
+ for (int i = 0; i < 7; ++i) {
+ dst[i] = pair.pts[i].asSkPoint();
+ }
+ return true;
+ }
+ return false;
+}
+
+bool SkChopMonoCubicAtY(SkPoint src[4], SkScalar y, SkPoint dst[7]) {
+ return cubic_dchop_at_intercept(src, y, dst, &SkDCubic::horizontalIntersect);
+}
+
+bool SkChopMonoCubicAtX(SkPoint src[4], SkScalar x, SkPoint dst[7]) {
+ return cubic_dchop_at_intercept(src, x, dst, &SkDCubic::verticalIntersect);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// NURB representation for conics. Helpful explanations at:
+//
+// http://citeseerx.ist.psu.edu/viewdoc/
+// download?doi=10.1.1.44.5740&rep=rep1&type=ps
+// and
+// http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/NURBS/RB-conics.html
+//
+// F = (A (1 - t)^2 + C t^2 + 2 B (1 - t) t w)
+// ------------------------------------------
+// ((1 - t)^2 + t^2 + 2 (1 - t) t w)
+//
+// = {t^2 (P0 + P2 - 2 P1 w), t (-2 P0 + 2 P1 w), P0}
+// ------------------------------------------------
+// {t^2 (2 - 2 w), t (-2 + 2 w), 1}
+//
+
+// F' = 2 (C t (1 + t (-1 + w)) - A (-1 + t) (t (-1 + w) - w) + B (1 - 2 t) w)
+//
+// t^2 : (2 P0 - 2 P2 - 2 P0 w + 2 P2 w)
+// t^1 : (-2 P0 + 2 P2 + 4 P0 w - 4 P1 w)
+// t^0 : -2 P0 w + 2 P1 w
+//
+// We disregard magnitude, so we can freely ignore the denominator of F', and
+// divide the numerator by 2
+//
+// coeff[0] for t^2
+// coeff[1] for t^1
+// coeff[2] for t^0
+//
+static void conic_deriv_coeff(const SkScalar src[],
+ SkScalar w,
+ SkScalar coeff[3]) {
+ const SkScalar P20 = src[4] - src[0];
+ const SkScalar P10 = src[2] - src[0];
+ const SkScalar wP10 = w * P10;
+ coeff[0] = w * P20 - P20;
+ coeff[1] = P20 - 2 * wP10;
+ coeff[2] = wP10;
+}
+
+static bool conic_find_extrema(const SkScalar src[], SkScalar w, SkScalar* t) {
+ SkScalar coeff[3];
+ conic_deriv_coeff(src, w, coeff);
+
+ SkScalar tValues[2];
+ int roots = SkFindUnitQuadRoots(coeff[0], coeff[1], coeff[2], tValues);
+ SkASSERT(0 == roots || 1 == roots);
+
+ if (1 == roots) {
+ *t = tValues[0];
+ return true;
+ }
+ return false;
+}
+
+struct SkP3D {
+ SkScalar fX, fY, fZ;
+
+ void set(SkScalar x, SkScalar y, SkScalar z) {
+ fX = x; fY = y; fZ = z;
+ }
+
+ void projectDown(SkPoint* dst) const {
+ dst->set(fX / fZ, fY / fZ);
+ }
+};
+
+// We only interpolate one dimension at a time (the first, at +0, +3, +6).
+static void p3d_interp(const SkScalar src[7], SkScalar dst[7], SkScalar t) {
+ SkScalar ab = SkScalarInterp(src[0], src[3], t);
+ SkScalar bc = SkScalarInterp(src[3], src[6], t);
+ dst[0] = ab;
+ dst[3] = SkScalarInterp(ab, bc, t);
+ dst[6] = bc;
+}
+
+static void ratquad_mapTo3D(const SkPoint src[3], SkScalar w, SkP3D dst[]) {
+ dst[0].set(src[0].fX * 1, src[0].fY * 1, 1);
+ dst[1].set(src[1].fX * w, src[1].fY * w, w);
+ dst[2].set(src[2].fX * 1, src[2].fY * 1, 1);
+}
+
+// return false if infinity or NaN is generated; caller must check
+bool SkConic::chopAt(SkScalar t, SkConic dst[2]) const {
+ SkP3D tmp[3], tmp2[3];
+
+ ratquad_mapTo3D(fPts, fW, tmp);
+
+ p3d_interp(&tmp[0].fX, &tmp2[0].fX, t);
+ p3d_interp(&tmp[0].fY, &tmp2[0].fY, t);
+ p3d_interp(&tmp[0].fZ, &tmp2[0].fZ, t);
+
+ dst[0].fPts[0] = fPts[0];
+ tmp2[0].projectDown(&dst[0].fPts[1]);
+ tmp2[1].projectDown(&dst[0].fPts[2]); dst[1].fPts[0] = dst[0].fPts[2];
+ tmp2[2].projectDown(&dst[1].fPts[1]);
+ dst[1].fPts[2] = fPts[2];
+
+ // to put in "standard form", where w0 and w2 are both 1, we compute the
+ // new w1 as sqrt(w1*w1/w0*w2)
+ // or
+ // w1 /= sqrt(w0*w2)
+ //
+ // However, in our case, we know that for dst[0]:
+ // w0 == 1, and for dst[1], w2 == 1
+ //
+ SkScalar root = SkScalarSqrt(tmp2[1].fZ);
+ dst[0].fW = tmp2[0].fZ / root;
+ dst[1].fW = tmp2[2].fZ / root;
+ SkASSERT(sizeof(dst[0]) == sizeof(SkScalar) * 7);
+ SkASSERT(0 == offsetof(SkConic, fPts[0].fX));
+ return SkScalarsAreFinite(&dst[0].fPts[0].fX, 7 * 2);
+}
+
+void SkConic::chopAt(SkScalar t1, SkScalar t2, SkConic* dst) const {
+ if (0 == t1 || 1 == t2) {
+ if (0 == t1 && 1 == t2) {
+ *dst = *this;
+ return;
+ } else {
+ SkConic pair[2];
+ if (this->chopAt(t1 ? t1 : t2, pair)) {
+ *dst = pair[SkToBool(t1)];
+ return;
+ }
+ }
+ }
+ SkConicCoeff coeff(*this);
+ Sk2s tt1(t1);
+ Sk2s aXY = coeff.fNumer.eval(tt1);
+ Sk2s aZZ = coeff.fDenom.eval(tt1);
+ Sk2s midTT((t1 + t2) / 2);
+ Sk2s dXY = coeff.fNumer.eval(midTT);
+ Sk2s dZZ = coeff.fDenom.eval(midTT);
+ Sk2s tt2(t2);
+ Sk2s cXY = coeff.fNumer.eval(tt2);
+ Sk2s cZZ = coeff.fDenom.eval(tt2);
+ Sk2s bXY = times_2(dXY) - (aXY + cXY) * Sk2s(0.5f);
+ Sk2s bZZ = times_2(dZZ) - (aZZ + cZZ) * Sk2s(0.5f);
+ dst->fPts[0] = to_point(aXY / aZZ);
+ dst->fPts[1] = to_point(bXY / bZZ);
+ dst->fPts[2] = to_point(cXY / cZZ);
+ Sk2s ww = bZZ / (aZZ * cZZ).sqrt();
+ dst->fW = ww[0];
+}
+
+SkPoint SkConic::evalAt(SkScalar t) const {
+ return to_point(SkConicCoeff(*this).eval(t));
+}
+
+SkVector SkConic::evalTangentAt(SkScalar t) const {
+ // The derivative equation returns a zero tangent vector when t is 0 or 1,
+ // and the control point is equal to the end point.
+ // In this case, use the conic endpoints to compute the tangent.
+ if ((t == 0 && fPts[0] == fPts[1]) || (t == 1 && fPts[1] == fPts[2])) {
+ return fPts[2] - fPts[0];
+ }
+ Sk2s p0 = from_point(fPts[0]);
+ Sk2s p1 = from_point(fPts[1]);
+ Sk2s p2 = from_point(fPts[2]);
+ Sk2s ww(fW);
+
+ Sk2s p20 = p2 - p0;
+ Sk2s p10 = p1 - p0;
+
+ Sk2s C = ww * p10;
+ Sk2s A = ww * p20 - p20;
+ Sk2s B = p20 - C - C;
+
+ return to_vector(SkQuadCoeff(A, B, C).eval(t));
+}
+
+void SkConic::evalAt(SkScalar t, SkPoint* pt, SkVector* tangent) const {
+ SkASSERT(t >= 0 && t <= SK_Scalar1);
+
+ if (pt) {
+ *pt = this->evalAt(t);
+ }
+ if (tangent) {
+ *tangent = this->evalTangentAt(t);
+ }
+}
+
+static SkScalar subdivide_w_value(SkScalar w) {
+ return SkScalarSqrt(SK_ScalarHalf + w * SK_ScalarHalf);
+}
+
+void SkConic::chop(SkConic * SK_RESTRICT dst) const {
+ Sk2s scale = Sk2s(SkScalarInvert(SK_Scalar1 + fW));
+ SkScalar newW = subdivide_w_value(fW);
+
+ Sk2s p0 = from_point(fPts[0]);
+ Sk2s p1 = from_point(fPts[1]);
+ Sk2s p2 = from_point(fPts[2]);
+ Sk2s ww(fW);
+
+ Sk2s wp1 = ww * p1;
+ Sk2s m = (p0 + times_2(wp1) + p2) * scale * Sk2s(0.5f);
+
+ dst[0].fPts[0] = fPts[0];
+ dst[0].fPts[1] = to_point((p0 + wp1) * scale);
+ dst[0].fPts[2] = dst[1].fPts[0] = to_point(m);
+ dst[1].fPts[1] = to_point((wp1 + p2) * scale);
+ dst[1].fPts[2] = fPts[2];
+
+ dst[0].fW = dst[1].fW = newW;
+}
+
+/*
+ * "High order approximation of conic sections by quadratic splines"
+ * by Michael Floater, 1993
+ */
+#define AS_QUAD_ERROR_SETUP \
+ SkScalar a = fW - 1; \
+ SkScalar k = a / (4 * (2 + a)); \
+ SkScalar x = k * (fPts[0].fX - 2 * fPts[1].fX + fPts[2].fX); \
+ SkScalar y = k * (fPts[0].fY - 2 * fPts[1].fY + fPts[2].fY);
+
+void SkConic::computeAsQuadError(SkVector* err) const {
+ AS_QUAD_ERROR_SETUP
+ err->set(x, y);
+}
+
+bool SkConic::asQuadTol(SkScalar tol) const {
+ AS_QUAD_ERROR_SETUP
+ return (x * x + y * y) <= tol * tol;
+}
+
+// Limit the number of suggested quads to approximate a conic
+#define kMaxConicToQuadPOW2 5
+
+int SkConic::computeQuadPOW2(SkScalar tol) const {
+ if (tol < 0 || !SkScalarIsFinite(tol)) {
+ return 0;
+ }
+
+ AS_QUAD_ERROR_SETUP
+
+ SkScalar error = SkScalarSqrt(x * x + y * y);
+ int pow2;
+ for (pow2 = 0; pow2 < kMaxConicToQuadPOW2; ++pow2) {
+ if (error <= tol) {
+ break;
+ }
+ error *= 0.25f;
+ }
+ // float version -- using ceil gives the same results as the above.
+ if (false) {
+ SkScalar err = SkScalarSqrt(x * x + y * y);
+ if (err <= tol) {
+ return 0;
+ }
+ SkScalar tol2 = tol * tol;
+ if (tol2 == 0) {
+ return kMaxConicToQuadPOW2;
+ }
+ SkScalar fpow2 = SkScalarLog2((x * x + y * y) / tol2) * 0.25f;
+ int altPow2 = SkScalarCeilToInt(fpow2);
+ if (altPow2 != pow2) {
+ SkDebugf("pow2 %d altPow2 %d fbits %g err %g tol %g\n", pow2, altPow2, fpow2, err, tol);
+ }
+ pow2 = altPow2;
+ }
+ return pow2;
+}
+
+// This was originally developed and tested for pathops: see SkOpTypes.h
+// returns true if (a <= b <= c) || (a >= b >= c)
+static bool between(SkScalar a, SkScalar b, SkScalar c) {
+ return (a - b) * (c - b) <= 0;
+}
+
+static SkPoint* subdivide(const SkConic& src, SkPoint pts[], int level) {
+ SkASSERT(level >= 0);
+
+ if (0 == level) {
+ memcpy(pts, &src.fPts[1], 2 * sizeof(SkPoint));
+ return pts + 2;
+ } else {
+ SkConic dst[2];
+ src.chop(dst);
+ const SkScalar startY = src.fPts[0].fY;
+ const SkScalar endY = src.fPts[2].fY;
+ if (between(startY, src.fPts[1].fY, endY)) {
+ // If the input is monotonic and the output is not, the scan converter hangs.
+ // Ensure that the chopped conics maintain their y-order.
+ SkScalar midY = dst[0].fPts[2].fY;
+ if (!between(startY, midY, endY)) {
+ // If the computed midpoint is outside the ends, move it to the closer one.
+ SkScalar closerY = SkTAbs(midY - startY) < SkTAbs(midY - endY) ? startY : endY;
+ dst[0].fPts[2].fY = dst[1].fPts[0].fY = closerY;
+ }
+ if (!between(startY, dst[0].fPts[1].fY, dst[0].fPts[2].fY)) {
+ // If the 1st control is not between the start and end, put it at the start.
+ // This also reduces the quad to a line.
+ dst[0].fPts[1].fY = startY;
+ }
+ if (!between(dst[1].fPts[0].fY, dst[1].fPts[1].fY, endY)) {
+ // If the 2nd control is not between the start and end, put it at the end.
+ // This also reduces the quad to a line.
+ dst[1].fPts[1].fY = endY;
+ }
+ // Verify that all five points are in order.
+ SkASSERT(between(startY, dst[0].fPts[1].fY, dst[0].fPts[2].fY));
+ SkASSERT(between(dst[0].fPts[1].fY, dst[0].fPts[2].fY, dst[1].fPts[1].fY));
+ SkASSERT(between(dst[0].fPts[2].fY, dst[1].fPts[1].fY, endY));
+ }
+ --level;
+ pts = subdivide(dst[0], pts, level);
+ return subdivide(dst[1], pts, level);
+ }
+}
+
+int SkConic::chopIntoQuadsPOW2(SkPoint pts[], int pow2) const {
+ SkASSERT(pow2 >= 0);
+ *pts = fPts[0];
+ SkDEBUGCODE(SkPoint* endPts);
+ if (pow2 == kMaxConicToQuadPOW2) { // If an extreme weight generates many quads ...
+ SkConic dst[2];
+ this->chop(dst);
+ // check to see if the first chop generates a pair of lines
+ if (dst[0].fPts[1].equalsWithinTolerance(dst[0].fPts[2])
+ && dst[1].fPts[0].equalsWithinTolerance(dst[1].fPts[1])) {
+ pts[1] = pts[2] = pts[3] = dst[0].fPts[1]; // set ctrl == end to make lines
+ pts[4] = dst[1].fPts[2];
+ pow2 = 1;
+ SkDEBUGCODE(endPts = &pts[5]);
+ goto commonFinitePtCheck;
+ }
+ }
+ SkDEBUGCODE(endPts = ) subdivide(*this, pts + 1, pow2);
+commonFinitePtCheck:
+ const int quadCount = 1 << pow2;
+ const int ptCount = 2 * quadCount + 1;
+ SkASSERT(endPts - pts == ptCount);
+ if (!SkPointsAreFinite(pts, ptCount)) {
+ // if we generated a non-finite, pin ourselves to the middle of the hull,
+ // as our first and last are already on the first/last pts of the hull.
+ for (int i = 1; i < ptCount - 1; ++i) {
+ pts[i] = fPts[1];
+ }
+ }
+ return 1 << pow2;
+}
+
+bool SkConic::findXExtrema(SkScalar* t) const {
+ return conic_find_extrema(&fPts[0].fX, fW, t);
+}
+
+bool SkConic::findYExtrema(SkScalar* t) const {
+ return conic_find_extrema(&fPts[0].fY, fW, t);
+}
+
+bool SkConic::chopAtXExtrema(SkConic dst[2]) const {
+ SkScalar t;
+ if (this->findXExtrema(&t)) {
+ if (!this->chopAt(t, dst)) {
+ // if chop can't return finite values, don't chop
+ return false;
+ }
+ // now clean-up the middle, since we know t was meant to be at
+ // an X-extrema
+ SkScalar value = dst[0].fPts[2].fX;
+ dst[0].fPts[1].fX = value;
+ dst[1].fPts[0].fX = value;
+ dst[1].fPts[1].fX = value;
+ return true;
+ }
+ return false;
+}
+
+bool SkConic::chopAtYExtrema(SkConic dst[2]) const {
+ SkScalar t;
+ if (this->findYExtrema(&t)) {
+ if (!this->chopAt(t, dst)) {
+ // if chop can't return finite values, don't chop
+ return false;
+ }
+ // now clean-up the middle, since we know t was meant to be at
+ // an Y-extrema
+ SkScalar value = dst[0].fPts[2].fY;
+ dst[0].fPts[1].fY = value;
+ dst[1].fPts[0].fY = value;
+ dst[1].fPts[1].fY = value;
+ return true;
+ }
+ return false;
+}
+
+void SkConic::computeTightBounds(SkRect* bounds) const {
+ SkPoint pts[4];
+ pts[0] = fPts[0];
+ pts[1] = fPts[2];
+ int count = 2;
+
+ SkScalar t;
+ if (this->findXExtrema(&t)) {
+ this->evalAt(t, &pts[count++]);
+ }
+ if (this->findYExtrema(&t)) {
+ this->evalAt(t, &pts[count++]);
+ }
+ bounds->set(pts, count);
+}
+
+void SkConic::computeFastBounds(SkRect* bounds) const {
+ bounds->set(fPts, 3);
+}
+
+#if 0 // unimplemented
+bool SkConic::findMaxCurvature(SkScalar* t) const {
+ // TODO: Implement me
+ return false;
+}
+#endif
+
+SkScalar SkConic::TransformW(const SkPoint pts[], SkScalar w,
+ const SkMatrix& matrix) {
+ if (!matrix.hasPerspective()) {
+ return w;
+ }
+
+ SkP3D src[3], dst[3];
+
+ ratquad_mapTo3D(pts, w, src);
+
+ matrix.mapHomogeneousPoints(&dst[0].fX, &src[0].fX, 3);
+
+ // w' = sqrt(w1*w1/w0*w2)
+ SkScalar w0 = dst[0].fZ;
+ SkScalar w1 = dst[1].fZ;
+ SkScalar w2 = dst[2].fZ;
+ w = SkScalarSqrt((w1 * w1) / (w0 * w2));
+ return w;
+}
+
+int SkConic::BuildUnitArc(const SkVector& uStart, const SkVector& uStop, SkRotationDirection dir,
+ const SkMatrix* userMatrix, SkConic dst[kMaxConicsForArc]) {
+ // rotate by x,y so that uStart is (1.0)
+ SkScalar x = SkPoint::DotProduct(uStart, uStop);
+ SkScalar y = SkPoint::CrossProduct(uStart, uStop);
+
+ SkScalar absY = SkScalarAbs(y);
+
+ // check for (effectively) coincident vectors
+ // this can happen if our angle is nearly 0 or nearly 180 (y == 0)
+ // ... we use the dot-prod to distinguish between 0 and 180 (x > 0)
+ if (absY <= SK_ScalarNearlyZero && x > 0 && ((y >= 0 && kCW_SkRotationDirection == dir) ||
+ (y <= 0 && kCCW_SkRotationDirection == dir))) {
+ return 0;
+ }
+
+ if (dir == kCCW_SkRotationDirection) {
+ y = -y;
+ }
+
+ // We decide to use 1-conic per quadrant of a circle. What quadrant does [xy] lie in?
+ // 0 == [0 .. 90)
+ // 1 == [90 ..180)
+ // 2 == [180..270)
+ // 3 == [270..360)
+ //
+ int quadrant = 0;
+ if (0 == y) {
+ quadrant = 2; // 180
+ SkASSERT(SkScalarAbs(x + SK_Scalar1) <= SK_ScalarNearlyZero);
+ } else if (0 == x) {
+ SkASSERT(absY - SK_Scalar1 <= SK_ScalarNearlyZero);
+ quadrant = y > 0 ? 1 : 3; // 90 : 270
+ } else {
+ if (y < 0) {
+ quadrant += 2;
+ }
+ if ((x < 0) != (y < 0)) {
+ quadrant += 1;
+ }
+ }
+
+ const SkPoint quadrantPts[] = {
+ { 1, 0 }, { 1, 1 }, { 0, 1 }, { -1, 1 }, { -1, 0 }, { -1, -1 }, { 0, -1 }, { 1, -1 }
+ };
+ const SkScalar quadrantWeight = SK_ScalarRoot2Over2;
+
+ int conicCount = quadrant;
+ for (int i = 0; i < conicCount; ++i) {
+ dst[i].set(&quadrantPts[i * 2], quadrantWeight);
+ }
+
+ // Now compute any remaing (sub-90-degree) arc for the last conic
+ const SkPoint finalP = { x, y };
+ const SkPoint& lastQ = quadrantPts[quadrant * 2]; // will already be a unit-vector
+ const SkScalar dot = SkVector::DotProduct(lastQ, finalP);
+ if (!SkScalarIsFinite(dot)) {
+ return 0;
+ }
+ SkASSERT(0 <= dot && dot <= SK_Scalar1 + SK_ScalarNearlyZero);
+
+ if (dot < 1) {
+ SkVector offCurve = { lastQ.x() + x, lastQ.y() + y };
+ // compute the bisector vector, and then rescale to be the off-curve point.
+ // we compute its length from cos(theta/2) = length / 1, using half-angle identity we get
+ // length = sqrt(2 / (1 + cos(theta)). We already have cos() when to computed the dot.
+ // This is nice, since our computed weight is cos(theta/2) as well!
+ //
+ const SkScalar cosThetaOver2 = SkScalarSqrt((1 + dot) / 2);
+ offCurve.setLength(SkScalarInvert(cosThetaOver2));
+ if (!lastQ.equalsWithinTolerance(offCurve)) {
+ dst[conicCount].set(lastQ, offCurve, finalP, cosThetaOver2);
+ conicCount += 1;
+ }
+ }
+
+ // now handle counter-clockwise and the initial unitStart rotation
+ SkMatrix matrix;
+ matrix.setSinCos(uStart.fY, uStart.fX);
+ if (dir == kCCW_SkRotationDirection) {
+ matrix.preScale(SK_Scalar1, -SK_Scalar1);
+ }
+ if (userMatrix) {
+ matrix.postConcat(*userMatrix);
+ }
+ for (int i = 0; i < conicCount; ++i) {
+ matrix.mapPoints(dst[i].fPts, 3);
+ }
+ return conicCount;
+}
diff --git a/gfx/skia/skia/src/core/SkGeometry.h b/gfx/skia/skia/src/core/SkGeometry.h
new file mode 100644
index 000000000..55d763b96
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGeometry.h
@@ -0,0 +1,409 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGeometry_DEFINED
+#define SkGeometry_DEFINED
+
+#include "SkMatrix.h"
+#include "SkNx.h"
+
+static inline Sk2s from_point(const SkPoint& point) {
+ return Sk2s::Load(&point);
+}
+
+static inline SkPoint to_point(const Sk2s& x) {
+ SkPoint point;
+ x.store(&point);
+ return point;
+}
+
+static Sk2s times_2(const Sk2s& value) {
+ return value + value;
+}
+
+/** Given a quadratic equation Ax^2 + Bx + C = 0, return 0, 1, 2 roots for the
+ equation.
+*/
+int SkFindUnitQuadRoots(SkScalar A, SkScalar B, SkScalar C, SkScalar roots[2]);
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPoint SkEvalQuadAt(const SkPoint src[3], SkScalar t);
+SkPoint SkEvalQuadTangentAt(const SkPoint src[3], SkScalar t);
+
+/** Set pt to the point on the src quadratic specified by t. t must be
+ 0 <= t <= 1.0
+*/
+void SkEvalQuadAt(const SkPoint src[3], SkScalar t, SkPoint* pt, SkVector* tangent = nullptr);
+
+/** Given a src quadratic bezier, chop it at the specified t value,
+ where 0 < t < 1, and return the two new quadratics in dst:
+ dst[0..2] and dst[2..4]
+*/
+void SkChopQuadAt(const SkPoint src[3], SkPoint dst[5], SkScalar t);
+
+/** Given a src quadratic bezier, chop it at the specified t == 1/2,
+ The new quads are returned in dst[0..2] and dst[2..4]
+*/
+void SkChopQuadAtHalf(const SkPoint src[3], SkPoint dst[5]);
+
+/** Given the 3 coefficients for a quadratic bezier (either X or Y values), look
+ for extrema, and return the number of t-values that are found that represent
+ these extrema. If the quadratic has no extrema betwee (0..1) exclusive, the
+ function returns 0.
+ Returned count tValues[]
+ 0 ignored
+ 1 0 < tValues[0] < 1
+*/
+int SkFindQuadExtrema(SkScalar a, SkScalar b, SkScalar c, SkScalar tValues[1]);
+
+/** Given 3 points on a quadratic bezier, chop it into 1, 2 beziers such that
+ the resulting beziers are monotonic in Y. This is called by the scan converter.
+ Depending on what is returned, dst[] is treated as follows
+ 0 dst[0..2] is the original quad
+ 1 dst[0..2] and dst[2..4] are the two new quads
+*/
+int SkChopQuadAtYExtrema(const SkPoint src[3], SkPoint dst[5]);
+int SkChopQuadAtXExtrema(const SkPoint src[3], SkPoint dst[5]);
+
+/** Given 3 points on a quadratic bezier, if the point of maximum
+ curvature exists on the segment, returns the t value for this
+ point along the curve. Otherwise it will return a value of 0.
+*/
+SkScalar SkFindQuadMaxCurvature(const SkPoint src[3]);
+
+/** Given 3 points on a quadratic bezier, divide it into 2 quadratics
+ if the point of maximum curvature exists on the quad segment.
+ Depending on what is returned, dst[] is treated as follows
+ 1 dst[0..2] is the original quad
+ 2 dst[0..2] and dst[2..4] are the two new quads
+ If dst == null, it is ignored and only the count is returned.
+*/
+int SkChopQuadAtMaxCurvature(const SkPoint src[3], SkPoint dst[5]);
+
+/** Given 3 points on a quadratic bezier, use degree elevation to
+ convert it into the cubic fitting the same curve. The new cubic
+ curve is returned in dst[0..3].
+*/
+SK_API void SkConvertQuadToCubic(const SkPoint src[3], SkPoint dst[4]);
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** Set pt to the point on the src cubic specified by t. t must be
+ 0 <= t <= 1.0
+*/
+void SkEvalCubicAt(const SkPoint src[4], SkScalar t, SkPoint* locOrNull,
+ SkVector* tangentOrNull, SkVector* curvatureOrNull);
+
+/** Given a src cubic bezier, chop it at the specified t value,
+ where 0 < t < 1, and return the two new cubics in dst:
+ dst[0..3] and dst[3..6]
+*/
+void SkChopCubicAt(const SkPoint src[4], SkPoint dst[7], SkScalar t);
+
+/** Given a src cubic bezier, chop it at the specified t values,
+ where 0 < t < 1, and return the new cubics in dst:
+ dst[0..3],dst[3..6],...,dst[3*t_count..3*(t_count+1)]
+*/
+void SkChopCubicAt(const SkPoint src[4], SkPoint dst[], const SkScalar t[],
+ int t_count);
+
+/** Given a src cubic bezier, chop it at the specified t == 1/2,
+ The new cubics are returned in dst[0..3] and dst[3..6]
+*/
+void SkChopCubicAtHalf(const SkPoint src[4], SkPoint dst[7]);
+
+/** Given the 4 coefficients for a cubic bezier (either X or Y values), look
+ for extrema, and return the number of t-values that are found that represent
+ these extrema. If the cubic has no extrema betwee (0..1) exclusive, the
+ function returns 0.
+ Returned count tValues[]
+ 0 ignored
+ 1 0 < tValues[0] < 1
+ 2 0 < tValues[0] < tValues[1] < 1
+*/
+int SkFindCubicExtrema(SkScalar a, SkScalar b, SkScalar c, SkScalar d,
+ SkScalar tValues[2]);
+
+/** Given 4 points on a cubic bezier, chop it into 1, 2, 3 beziers such that
+ the resulting beziers are monotonic in Y. This is called by the scan converter.
+ Depending on what is returned, dst[] is treated as follows
+ 0 dst[0..3] is the original cubic
+ 1 dst[0..3] and dst[3..6] are the two new cubics
+ 2 dst[0..3], dst[3..6], dst[6..9] are the three new cubics
+ If dst == null, it is ignored and only the count is returned.
+*/
+int SkChopCubicAtYExtrema(const SkPoint src[4], SkPoint dst[10]);
+int SkChopCubicAtXExtrema(const SkPoint src[4], SkPoint dst[10]);
+
+/** Given a cubic bezier, return 0, 1, or 2 t-values that represent the
+ inflection points.
+*/
+int SkFindCubicInflections(const SkPoint src[4], SkScalar tValues[2]);
+
+/** Return 1 for no chop, 2 for having chopped the cubic at a single
+ inflection point, 3 for having chopped at 2 inflection points.
+ dst will hold the resulting 1, 2, or 3 cubics.
+*/
+int SkChopCubicAtInflections(const SkPoint src[4], SkPoint dst[10]);
+
+int SkFindCubicMaxCurvature(const SkPoint src[4], SkScalar tValues[3]);
+int SkChopCubicAtMaxCurvature(const SkPoint src[4], SkPoint dst[13],
+ SkScalar tValues[3] = nullptr);
+
+bool SkChopMonoCubicAtX(SkPoint src[4], SkScalar y, SkPoint dst[7]);
+bool SkChopMonoCubicAtY(SkPoint src[4], SkScalar x, SkPoint dst[7]);
+
+enum SkCubicType {
+ kSerpentine_SkCubicType,
+ kCusp_SkCubicType,
+ kLoop_SkCubicType,
+ kQuadratic_SkCubicType,
+ kLine_SkCubicType,
+ kPoint_SkCubicType
+};
+
+/** Returns the cubic classification. Pass scratch storage for computing inflection data,
+ which can be used with additional work to find the loop intersections and so on.
+*/
+SkCubicType SkClassifyCubic(const SkPoint p[4], SkScalar inflection[3]);
+
+///////////////////////////////////////////////////////////////////////////////
+
+enum SkRotationDirection {
+ kCW_SkRotationDirection,
+ kCCW_SkRotationDirection
+};
+
+struct SkConic {
+ SkConic() {}
+ SkConic(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2, SkScalar w) {
+ fPts[0] = p0;
+ fPts[1] = p1;
+ fPts[2] = p2;
+ fW = w;
+ }
+ SkConic(const SkPoint pts[3], SkScalar w) {
+ memcpy(fPts, pts, sizeof(fPts));
+ fW = w;
+ }
+
+ SkPoint fPts[3];
+ SkScalar fW;
+
+ void set(const SkPoint pts[3], SkScalar w) {
+ memcpy(fPts, pts, 3 * sizeof(SkPoint));
+ fW = w;
+ }
+
+ void set(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2, SkScalar w) {
+ fPts[0] = p0;
+ fPts[1] = p1;
+ fPts[2] = p2;
+ fW = w;
+ }
+
+ /**
+ * Given a t-value [0...1] return its position and/or tangent.
+ * If pos is not null, return its position at the t-value.
+ * If tangent is not null, return its tangent at the t-value. NOTE the
+ * tangent value's length is arbitrary, and only its direction should
+ * be used.
+ */
+ void evalAt(SkScalar t, SkPoint* pos, SkVector* tangent = nullptr) const;
+ bool SK_WARN_UNUSED_RESULT chopAt(SkScalar t, SkConic dst[2]) const;
+ void chopAt(SkScalar t1, SkScalar t2, SkConic* dst) const;
+ void chop(SkConic dst[2]) const;
+
+ SkPoint evalAt(SkScalar t) const;
+ SkVector evalTangentAt(SkScalar t) const;
+
+ void computeAsQuadError(SkVector* err) const;
+ bool asQuadTol(SkScalar tol) const;
+
+ /**
+ * return the power-of-2 number of quads needed to approximate this conic
+ * with a sequence of quads. Will be >= 0.
+ */
+ int computeQuadPOW2(SkScalar tol) const;
+
+ /**
+ * Chop this conic into N quads, stored continguously in pts[], where
+ * N = 1 << pow2. The amount of storage needed is (1 + 2 * N)
+ */
+ int SK_WARN_UNUSED_RESULT chopIntoQuadsPOW2(SkPoint pts[], int pow2) const;
+
+ bool findXExtrema(SkScalar* t) const;
+ bool findYExtrema(SkScalar* t) const;
+ bool chopAtXExtrema(SkConic dst[2]) const;
+ bool chopAtYExtrema(SkConic dst[2]) const;
+
+ void computeTightBounds(SkRect* bounds) const;
+ void computeFastBounds(SkRect* bounds) const;
+
+ /** Find the parameter value where the conic takes on its maximum curvature.
+ *
+ * @param t output scalar for max curvature. Will be unchanged if
+ * max curvature outside 0..1 range.
+ *
+ * @return true if max curvature found inside 0..1 range, false otherwise
+ */
+// bool findMaxCurvature(SkScalar* t) const; // unimplemented
+
+ static SkScalar TransformW(const SkPoint[3], SkScalar w, const SkMatrix&);
+
+ enum {
+ kMaxConicsForArc = 5
+ };
+ static int BuildUnitArc(const SkVector& start, const SkVector& stop, SkRotationDirection,
+ const SkMatrix*, SkConic conics[kMaxConicsForArc]);
+};
+
+// inline helpers are contained in a namespace to avoid external leakage to fragile SkNx members
+namespace {
+
+/**
+ * use for : eval(t) == A * t^2 + B * t + C
+ */
+struct SkQuadCoeff {
+ SkQuadCoeff() {}
+
+ SkQuadCoeff(const Sk2s& A, const Sk2s& B, const Sk2s& C)
+ : fA(A)
+ , fB(B)
+ , fC(C)
+ {
+ }
+
+ SkQuadCoeff(const SkPoint src[3]) {
+ fC = from_point(src[0]);
+ Sk2s P1 = from_point(src[1]);
+ Sk2s P2 = from_point(src[2]);
+ fB = times_2(P1 - fC);
+ fA = P2 - times_2(P1) + fC;
+ }
+
+ Sk2s eval(SkScalar t) {
+ Sk2s tt(t);
+ return eval(tt);
+ }
+
+ Sk2s eval(const Sk2s& tt) {
+ return (fA * tt + fB) * tt + fC;
+ }
+
+ Sk2s fA;
+ Sk2s fB;
+ Sk2s fC;
+};
+
+struct SkConicCoeff {
+ SkConicCoeff(const SkConic& conic) {
+ Sk2s p0 = from_point(conic.fPts[0]);
+ Sk2s p1 = from_point(conic.fPts[1]);
+ Sk2s p2 = from_point(conic.fPts[2]);
+ Sk2s ww(conic.fW);
+
+ Sk2s p1w = p1 * ww;
+ fNumer.fC = p0;
+ fNumer.fA = p2 - times_2(p1w) + p0;
+ fNumer.fB = times_2(p1w - p0);
+
+ fDenom.fC = Sk2s(1);
+ fDenom.fB = times_2(ww - fDenom.fC);
+ fDenom.fA = Sk2s(0) - fDenom.fB;
+ }
+
+ Sk2s eval(SkScalar t) {
+ Sk2s tt(t);
+ Sk2s numer = fNumer.eval(tt);
+ Sk2s denom = fDenom.eval(tt);
+ return numer / denom;
+ }
+
+ SkQuadCoeff fNumer;
+ SkQuadCoeff fDenom;
+};
+
+struct SkCubicCoeff {
+ SkCubicCoeff(const SkPoint src[4]) {
+ Sk2s P0 = from_point(src[0]);
+ Sk2s P1 = from_point(src[1]);
+ Sk2s P2 = from_point(src[2]);
+ Sk2s P3 = from_point(src[3]);
+ Sk2s three(3);
+ fA = P3 + three * (P1 - P2) - P0;
+ fB = three * (P2 - times_2(P1) + P0);
+ fC = three * (P1 - P0);
+ fD = P0;
+ }
+
+ Sk2s eval(SkScalar t) {
+ Sk2s tt(t);
+ return eval(tt);
+ }
+
+ Sk2s eval(const Sk2s& t) {
+ return ((fA * t + fB) * t + fC) * t + fD;
+ }
+
+ Sk2s fA;
+ Sk2s fB;
+ Sk2s fC;
+ Sk2s fD;
+};
+
+}
+
+#include "SkTemplates.h"
+
+/**
+ * Help class to allocate storage for approximating a conic with N quads.
+ */
+class SkAutoConicToQuads {
+public:
+ SkAutoConicToQuads() : fQuadCount(0) {}
+
+ /**
+ * Given a conic and a tolerance, return the array of points for the
+ * approximating quad(s). Call countQuads() to know the number of quads
+ * represented in these points.
+ *
+ * The quads are allocated to share end-points. e.g. if there are 4 quads,
+ * there will be 9 points allocated as follows
+ * quad[0] == pts[0..2]
+ * quad[1] == pts[2..4]
+ * quad[2] == pts[4..6]
+ * quad[3] == pts[6..8]
+ */
+ const SkPoint* computeQuads(const SkConic& conic, SkScalar tol) {
+ int pow2 = conic.computeQuadPOW2(tol);
+ fQuadCount = 1 << pow2;
+ SkPoint* pts = fStorage.reset(1 + 2 * fQuadCount);
+ fQuadCount = conic.chopIntoQuadsPOW2(pts, pow2);
+ return pts;
+ }
+
+ const SkPoint* computeQuads(const SkPoint pts[3], SkScalar weight,
+ SkScalar tol) {
+ SkConic conic;
+ conic.set(pts, weight);
+ return computeQuads(conic, tol);
+ }
+
+ int countQuads() const { return fQuadCount; }
+
+private:
+ enum {
+ kQuadCount = 8, // should handle most conics
+ kPointCount = 1 + 2 * kQuadCount,
+ };
+ SkAutoSTMalloc<kPointCount, SkPoint> fStorage;
+ int fQuadCount; // #quads for current usage
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkGlobalInitialization_core.cpp b/gfx/skia/skia/src/core/SkGlobalInitialization_core.cpp
new file mode 100644
index 000000000..21c4d16c4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGlobalInitialization_core.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapProcShader.h"
+#include "SkColorFilter.h"
+#include "SkColorFilterShader.h"
+#include "SkColorShader.h"
+#include "SkComposeShader.h"
+#include "SkEmptyShader.h"
+#include "SkFlattenable.h"
+#include "SkImageShader.h"
+#include "SkLocalMatrixShader.h"
+#include "SkMatrixImageFilter.h"
+#include "SkOnce.h"
+#include "SkPathEffect.h"
+#include "SkPictureShader.h"
+#include "SkRecordedDrawable.h"
+#include "SkXfermode.h"
+
+/*
+ * Registers all of the required effects subclasses for picture deserialization.
+ *
+ * Optional subclasses (e.g. Blur) should be registered in the ports/ version of this file,
+ * inside the InitEffects() method.
+ */
+void SkFlattenable::PrivateInitializer::InitCore() {
+ // Shader
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkColorFilterShader)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkColorShader)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkColor4Shader)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkComposeShader)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkEmptyShader)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkLocalMatrixShader)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkPictureShader)
+
+ // PathEffect
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkComposePathEffect)
+
+ // ImageFilter
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkMatrixImageFilter)
+
+ // ColorFilter
+ SkColorFilter::InitializeFlattenables();
+
+ SkShader::InitializeFlattenables();
+
+ // Xfermode
+ SkXfermode::InitializeFlattenables();
+
+ // Drawable
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkRecordedDrawable)
+
+ // Now initialize any optional/additional effects (implemented in src/ports)
+ InitEffects();
+};
+
+void SkFlattenable::InitializeFlattenablesIfNeeded() {
+ static SkOnce once;
+ once(SkFlattenable::PrivateInitializer::InitCore);
+}
diff --git a/gfx/skia/skia/src/core/SkGlyph.h b/gfx/skia/skia/src/core/SkGlyph.h
new file mode 100644
index 000000000..04f9296b5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGlyph.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGlyph_DEFINED
+#define SkGlyph_DEFINED
+
+#include "SkChecksum.h"
+#include "SkTypes.h"
+#include "SkFixed.h"
+#include "SkMask.h"
+
+class SkPath;
+class SkGlyphCache;
+
+// needs to be != to any valid SkMask::Format
+#define MASK_FORMAT_UNKNOWN (0xFF)
+#define MASK_FORMAT_JUST_ADVANCE MASK_FORMAT_UNKNOWN
+
+#define kMaxGlyphWidth (1<<13)
+
+SK_BEGIN_REQUIRE_DENSE
+class SkGlyph {
+ enum {
+ kSubBits = 2,
+ kSubMask = ((1 << kSubBits) - 1),
+ kSubShift = 24, // must be large enough for glyphs and unichars
+ kCodeMask = ((1 << kSubShift) - 1),
+ // relative offsets for X and Y subpixel bits
+ kSubShiftX = kSubBits,
+ kSubShiftY = 0
+ };
+
+ // Support horizontal and vertical skipping strike-through / underlines.
+ // The caller walks the linked list looking for a match. For a horizontal underline,
+ // the fBounds contains the top and bottom of the underline. The fInterval pair contains the
+ // beginning and end of of the intersection of the bounds and the glyph's path.
+ // If interval[0] >= interval[1], no intesection was found.
+ struct Intercept {
+ Intercept* fNext;
+ SkScalar fBounds[2]; // for horz underlines, the boundaries in Y
+ SkScalar fInterval[2]; // the outside intersections of the axis and the glyph
+ };
+
+ struct PathData {
+ Intercept* fIntercept;
+ SkPath* fPath;
+ };
+
+public:
+ static const SkFixed kSubpixelRound = SK_FixedHalf >> SkGlyph::kSubBits;
+ // A value that can never be generated by MakeID.
+ static const uint32_t kImpossibleID = ~0;
+ void* fImage;
+ PathData* fPathData;
+ float fAdvanceX, fAdvanceY;
+
+ uint16_t fWidth, fHeight;
+ int16_t fTop, fLeft;
+
+ uint8_t fMaskFormat;
+ int8_t fRsbDelta, fLsbDelta; // used by auto-kerning
+ int8_t fForceBW;
+
+ void initWithGlyphID(uint32_t glyph_id) {
+ this->initCommon(MakeID(glyph_id));
+ }
+
+ void initGlyphIdFrom(const SkGlyph& glyph) {
+ this->initCommon(glyph.fID);
+ }
+
+ void initGlyphFromCombinedID(uint32_t combined_id) {
+ this->initCommon(combined_id);
+ }
+
+ /**
+ * Compute the rowbytes for the specified width and mask-format.
+ */
+ static unsigned ComputeRowBytes(unsigned width, SkMask::Format format) {
+ unsigned rb = width;
+ if (SkMask::kBW_Format == format) {
+ rb = (rb + 7) >> 3;
+ } else if (SkMask::kARGB32_Format == format) {
+ rb <<= 2;
+ } else if (SkMask::kLCD16_Format == format) {
+ rb = SkAlign4(rb << 1);
+ } else {
+ rb = SkAlign4(rb);
+ }
+ return rb;
+ }
+
+ unsigned rowBytes() const {
+ return ComputeRowBytes(fWidth, (SkMask::Format)fMaskFormat);
+ }
+
+ bool isJustAdvance() const {
+ return MASK_FORMAT_JUST_ADVANCE == fMaskFormat;
+ }
+
+ bool isFullMetrics() const {
+ return MASK_FORMAT_JUST_ADVANCE != fMaskFormat;
+ }
+
+ uint16_t getGlyphID() const {
+ return ID2Code(fID);
+ }
+
+ unsigned getSubX() const {
+ return ID2SubX(fID);
+ }
+
+ SkFixed getSubXFixed() const {
+ return SubToFixed(ID2SubX(fID));
+ }
+
+ SkFixed getSubYFixed() const {
+ return SubToFixed(ID2SubY(fID));
+ }
+
+ size_t computeImageSize() const;
+
+ /** Call this to set all of the metrics fields to 0 (e.g. if the scaler
+ encounters an error measuring a glyph). Note: this does not alter the
+ fImage, fPath, fID, fMaskFormat fields.
+ */
+ void zeroMetrics();
+
+ void toMask(SkMask* mask) const;
+
+ class HashTraits {
+ public:
+ static uint32_t GetKey(const SkGlyph& glyph) {
+ return glyph.fID;
+ }
+ static uint32_t Hash(uint32_t glyphId) {
+ return SkChecksum::CheapMix(glyphId);
+ }
+ };
+
+ private:
+ // TODO(herb) remove friend statement after SkGlyphCache cleanup.
+ friend class SkGlyphCache;
+
+ void initCommon(uint32_t id) {
+ fID = id;
+ fImage = nullptr;
+ fPathData = nullptr;
+ fMaskFormat = MASK_FORMAT_UNKNOWN;
+ fForceBW = 0;
+ }
+
+ static unsigned ID2Code(uint32_t id) {
+ return id & kCodeMask;
+ }
+
+ static unsigned ID2SubX(uint32_t id) {
+ return id >> (kSubShift + kSubShiftX);
+ }
+
+ static unsigned ID2SubY(uint32_t id) {
+ return (id >> (kSubShift + kSubShiftY)) & kSubMask;
+ }
+
+ static unsigned FixedToSub(SkFixed n) {
+ return (n >> (16 - kSubBits)) & kSubMask;
+ }
+
+ static SkFixed SubToFixed(unsigned sub) {
+ SkASSERT(sub <= kSubMask);
+ return sub << (16 - kSubBits);
+ }
+
+ static uint32_t MakeID(unsigned code) {
+ SkASSERT(code <= kCodeMask);
+ SkASSERT(code != kImpossibleID);
+ return code;
+ }
+
+ static uint32_t MakeID(unsigned code, SkFixed x, SkFixed y) {
+ SkASSERT(code <= kCodeMask);
+ x = FixedToSub(x);
+ y = FixedToSub(y);
+ uint32_t ID = (x << (kSubShift + kSubShiftX)) |
+ (y << (kSubShift + kSubShiftY)) |
+ code;
+ SkASSERT(ID != kImpossibleID);
+ return ID;
+ }
+
+ // FIXME - This is needed because the Android frame work directly
+ // accesses fID. Remove when fID accesses are cleaned up.
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ public:
+#endif
+ uint32_t fID;
+};
+SK_END_REQUIRE_DENSE
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkGlyphCache.cpp b/gfx/skia/skia/src/core/SkGlyphCache.cpp
new file mode 100644
index 000000000..a8eaa667a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGlyphCache.cpp
@@ -0,0 +1,859 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkGlyphCache.h"
+#include "SkGlyphCache_Globals.h"
+#include "SkGraphics.h"
+#include "SkOnce.h"
+#include "SkPath.h"
+#include "SkTemplates.h"
+#include "SkTraceMemoryDump.h"
+#include "SkTypeface.h"
+
+#include <cctype>
+
+//#define SPEW_PURGE_STATUS
+
+namespace {
+const char gGlyphCacheDumpName[] = "skia/sk_glyph_cache";
+} // namespace
+
+// Returns the shared globals
+static SkGlyphCache_Globals& get_globals() {
+ static SkOnce once;
+ static SkGlyphCache_Globals* globals;
+
+ once([]{ globals = new SkGlyphCache_Globals; });
+ return *globals;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// so we don't grow our arrays a lot
+#define kMinGlyphCount 16
+#define kMinGlyphImageSize (16*2)
+#define kMinAllocAmount ((sizeof(SkGlyph) + kMinGlyphImageSize) * kMinGlyphCount)
+
+SkGlyphCache::SkGlyphCache(SkTypeface* typeface, const SkDescriptor* desc, SkScalerContext* ctx)
+ : fDesc(desc->copy())
+ , fScalerContext(ctx)
+ , fGlyphAlloc(kMinAllocAmount) {
+ SkASSERT(typeface);
+ SkASSERT(desc);
+ SkASSERT(ctx);
+
+ fPrev = fNext = nullptr;
+
+ fScalerContext->getFontMetrics(&fFontMetrics);
+
+ fMemoryUsed = sizeof(*this);
+
+ fAuxProcList = nullptr;
+}
+
+SkGlyphCache::~SkGlyphCache() {
+ fGlyphMap.foreach ([](SkGlyph* g) {
+ if (g->fPathData) {
+ delete g->fPathData->fPath;
+ } } );
+ SkDescriptor::Free(fDesc);
+ delete fScalerContext;
+ this->invokeAndRemoveAuxProcs();
+}
+
+SkGlyphCache::CharGlyphRec* SkGlyphCache::getCharGlyphRec(PackedUnicharID packedUnicharID) {
+ if (nullptr == fPackedUnicharIDToPackedGlyphID.get()) {
+ // Allocate the array.
+ fPackedUnicharIDToPackedGlyphID.reset(kHashCount);
+ // Initialize array to map character and position with the impossible glyph ID. This
+ // represents no mapping.
+ for (int i = 0; i <kHashCount; ++i) {
+ fPackedUnicharIDToPackedGlyphID[i].fPackedUnicharID = SkGlyph::kImpossibleID;
+ fPackedUnicharIDToPackedGlyphID[i].fPackedGlyphID = 0;
+ }
+ }
+
+ return &fPackedUnicharIDToPackedGlyphID[SkChecksum::CheapMix(packedUnicharID) & kHashMask];
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+#define VALIDATE() AutoValidate av(this)
+#else
+#define VALIDATE()
+#endif
+
+uint16_t SkGlyphCache::unicharToGlyph(SkUnichar charCode) {
+ VALIDATE();
+ PackedUnicharID packedUnicharID = SkGlyph::MakeID(charCode);
+ CharGlyphRec* rec = this->getCharGlyphRec(packedUnicharID);
+
+ if (rec->fPackedUnicharID == packedUnicharID) {
+ // The glyph exists in the unichar to glyph mapping cache. Return it.
+ return SkGlyph::ID2Code(rec->fPackedGlyphID);
+ } else {
+ // The glyph is not in the unichar to glyph mapping cache. Insert it.
+ rec->fPackedUnicharID = packedUnicharID;
+ uint16_t glyphID = fScalerContext->charToGlyphID(charCode);
+ rec->fPackedGlyphID = SkGlyph::MakeID(glyphID);
+ return glyphID;
+ }
+}
+
+SkUnichar SkGlyphCache::glyphToUnichar(uint16_t glyphID) {
+ return fScalerContext->glyphIDToChar(glyphID);
+}
+
+unsigned SkGlyphCache::getGlyphCount() const {
+ return fScalerContext->getGlyphCount();
+}
+
+int SkGlyphCache::countCachedGlyphs() const {
+ return fGlyphMap.count();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+const SkGlyph& SkGlyphCache::getUnicharAdvance(SkUnichar charCode) {
+ VALIDATE();
+ return *this->lookupByChar(charCode, kJustAdvance_MetricsType);
+}
+
+const SkGlyph& SkGlyphCache::getGlyphIDAdvance(uint16_t glyphID) {
+ VALIDATE();
+ PackedGlyphID packedGlyphID = SkGlyph::MakeID(glyphID);
+ return *this->lookupByPackedGlyphID(packedGlyphID, kJustAdvance_MetricsType);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+const SkGlyph& SkGlyphCache::getUnicharMetrics(SkUnichar charCode) {
+ VALIDATE();
+ return *this->lookupByChar(charCode, kFull_MetricsType);
+}
+
+const SkGlyph& SkGlyphCache::getUnicharMetrics(SkUnichar charCode, SkFixed x, SkFixed y) {
+ VALIDATE();
+ return *this->lookupByChar(charCode, kFull_MetricsType, x, y);
+}
+
+const SkGlyph& SkGlyphCache::getGlyphIDMetrics(uint16_t glyphID) {
+ VALIDATE();
+ PackedGlyphID packedGlyphID = SkGlyph::MakeID(glyphID);
+ return *this->lookupByPackedGlyphID(packedGlyphID, kFull_MetricsType);
+}
+
+const SkGlyph& SkGlyphCache::getGlyphIDMetrics(uint16_t glyphID, SkFixed x, SkFixed y) {
+ VALIDATE();
+ PackedGlyphID packedGlyphID = SkGlyph::MakeID(glyphID, x, y);
+ return *this->lookupByPackedGlyphID(packedGlyphID, kFull_MetricsType);
+}
+
+SkGlyph* SkGlyphCache::lookupByChar(SkUnichar charCode, MetricsType type, SkFixed x, SkFixed y) {
+ PackedUnicharID id = SkGlyph::MakeID(charCode, x, y);
+ CharGlyphRec* rec = this->getCharGlyphRec(id);
+ if (rec->fPackedUnicharID != id) {
+ // this ID is based on the UniChar
+ rec->fPackedUnicharID = id;
+ // this ID is based on the glyph index
+ PackedGlyphID combinedID = SkGlyph::MakeID(fScalerContext->charToGlyphID(charCode), x, y);
+ rec->fPackedGlyphID = combinedID;
+ return this->lookupByPackedGlyphID(combinedID, type);
+ } else {
+ return this->lookupByPackedGlyphID(rec->fPackedGlyphID, type);
+ }
+}
+
+SkGlyph* SkGlyphCache::lookupByPackedGlyphID(PackedGlyphID packedGlyphID, MetricsType type) {
+ SkGlyph* glyph = fGlyphMap.find(packedGlyphID);
+
+ if (nullptr == glyph) {
+ glyph = this->allocateNewGlyph(packedGlyphID, type);
+ } else {
+ if (type == kFull_MetricsType && glyph->isJustAdvance()) {
+ fScalerContext->getMetrics(glyph);
+ }
+ }
+ return glyph;
+}
+
+SkGlyph* SkGlyphCache::allocateNewGlyph(PackedGlyphID packedGlyphID, MetricsType mtype) {
+ fMemoryUsed += sizeof(SkGlyph);
+
+ SkGlyph* glyphPtr;
+ {
+ SkGlyph glyph;
+ glyph.initGlyphFromCombinedID(packedGlyphID);
+ glyphPtr = fGlyphMap.set(glyph);
+ }
+
+ if (kJustAdvance_MetricsType == mtype) {
+ fScalerContext->getAdvance(glyphPtr);
+ } else {
+ SkASSERT(kFull_MetricsType == mtype);
+ fScalerContext->getMetrics(glyphPtr);
+ }
+
+ SkASSERT(glyphPtr->fID != SkGlyph::kImpossibleID);
+ return glyphPtr;
+}
+
+const void* SkGlyphCache::findImage(const SkGlyph& glyph) {
+ if (glyph.fWidth > 0 && glyph.fWidth < kMaxGlyphWidth) {
+ if (nullptr == glyph.fImage) {
+ size_t size = glyph.computeImageSize();
+ const_cast<SkGlyph&>(glyph).fImage = fGlyphAlloc.alloc(size,
+ SkChunkAlloc::kReturnNil_AllocFailType);
+ // check that alloc() actually succeeded
+ if (glyph.fImage) {
+ fScalerContext->getImage(glyph);
+ // TODO: the scaler may have changed the maskformat during
+ // getImage (e.g. from AA or LCD to BW) which means we may have
+ // overallocated the buffer. Check if the new computedImageSize
+ // is smaller, and if so, strink the alloc size in fImageAlloc.
+ fMemoryUsed += size;
+ }
+ }
+ }
+ return glyph.fImage;
+}
+
+const SkPath* SkGlyphCache::findPath(const SkGlyph& glyph) {
+ if (glyph.fWidth) {
+ if (glyph.fPathData == nullptr) {
+ SkGlyph::PathData* pathData =
+ (SkGlyph::PathData* ) fGlyphAlloc.allocThrow(sizeof(SkGlyph::PathData));
+ const_cast<SkGlyph&>(glyph).fPathData = pathData;
+ pathData->fIntercept = nullptr;
+ SkPath* path = pathData->fPath = new SkPath;
+ fScalerContext->getPath(glyph, path);
+ fMemoryUsed += sizeof(SkPath) + path->countPoints() * sizeof(SkPoint);
+ }
+ }
+ return glyph.fPathData ? glyph.fPathData->fPath : nullptr;
+}
+
+#include "../pathops/SkPathOpsCubic.h"
+#include "../pathops/SkPathOpsQuad.h"
+
+static bool quad_in_bounds(const SkScalar* pts, const SkScalar bounds[2]) {
+ SkScalar min = SkTMin(SkTMin(pts[0], pts[2]), pts[4]);
+ if (bounds[1] < min) {
+ return false;
+ }
+ SkScalar max = SkTMax(SkTMax(pts[0], pts[2]), pts[4]);
+ return bounds[0] < max;
+}
+
+static bool cubic_in_bounds(const SkScalar* pts, const SkScalar bounds[2]) {
+ SkScalar min = SkTMin(SkTMin(SkTMin(pts[0], pts[2]), pts[4]), pts[6]);
+ if (bounds[1] < min) {
+ return false;
+ }
+ SkScalar max = SkTMax(SkTMax(SkTMax(pts[0], pts[2]), pts[4]), pts[6]);
+ return bounds[0] < max;
+}
+
+void SkGlyphCache::OffsetResults(const SkGlyph::Intercept* intercept, SkScalar scale,
+ SkScalar xPos, SkScalar* array, int* count) {
+ if (array) {
+ array += *count;
+ for (int index = 0; index < 2; index++) {
+ *array++ = intercept->fInterval[index] * scale + xPos;
+ }
+ }
+ *count += 2;
+}
+
+void SkGlyphCache::AddInterval(SkScalar val, SkGlyph::Intercept* intercept) {
+ intercept->fInterval[0] = SkTMin(intercept->fInterval[0], val);
+ intercept->fInterval[1] = SkTMax(intercept->fInterval[1], val);
+}
+
+void SkGlyphCache::AddPoints(const SkPoint* pts, int ptCount, const SkScalar bounds[2],
+ bool yAxis, SkGlyph::Intercept* intercept) {
+ for (int i = 0; i < ptCount; ++i) {
+ SkScalar val = *(&pts[i].fY - yAxis);
+ if (bounds[0] < val && val < bounds[1]) {
+ AddInterval(*(&pts[i].fX + yAxis), intercept);
+ }
+ }
+}
+
+void SkGlyphCache::AddLine(const SkPoint pts[2], SkScalar axis, bool yAxis,
+ SkGlyph::Intercept* intercept) {
+ SkScalar t = yAxis ? (axis - pts[0].fX) / (pts[1].fX - pts[0].fX)
+ : (axis - pts[0].fY) / (pts[1].fY - pts[0].fY);
+ if (0 <= t && t < 1) { // this handles divide by zero above
+ AddInterval(yAxis ? pts[0].fY + t * (pts[1].fY - pts[0].fY)
+ : pts[0].fX + t * (pts[1].fX - pts[0].fX), intercept);
+ }
+}
+
+void SkGlyphCache::AddQuad(const SkPoint pts[2], SkScalar axis, bool yAxis,
+ SkGlyph::Intercept* intercept) {
+ SkDQuad quad;
+ quad.set(pts);
+ double roots[2];
+ int count = yAxis ? quad.verticalIntersect(axis, roots)
+ : quad.horizontalIntersect(axis, roots);
+ while (--count >= 0) {
+ SkPoint pt = quad.ptAtT(roots[count]).asSkPoint();
+ AddInterval(*(&pt.fX + yAxis), intercept);
+ }
+}
+
+void SkGlyphCache::AddCubic(const SkPoint pts[3], SkScalar axis, bool yAxis,
+ SkGlyph::Intercept* intercept) {
+ SkDCubic cubic;
+ cubic.set(pts);
+ double roots[3];
+ int count = yAxis ? cubic.verticalIntersect(axis, roots)
+ : cubic.horizontalIntersect(axis, roots);
+ while (--count >= 0) {
+ SkPoint pt = cubic.ptAtT(roots[count]).asSkPoint();
+ AddInterval(*(&pt.fX + yAxis), intercept);
+ }
+}
+
+const SkGlyph::Intercept* SkGlyphCache::MatchBounds(const SkGlyph* glyph,
+ const SkScalar bounds[2]) {
+ if (!glyph->fPathData) {
+ return nullptr;
+ }
+ const SkGlyph::Intercept* intercept = glyph->fPathData->fIntercept;
+ while (intercept) {
+ if (bounds[0] == intercept->fBounds[0] && bounds[1] == intercept->fBounds[1]) {
+ return intercept;
+ }
+ intercept = intercept->fNext;
+ }
+ return nullptr;
+}
+
+void SkGlyphCache::findIntercepts(const SkScalar bounds[2], SkScalar scale, SkScalar xPos,
+ bool yAxis, SkGlyph* glyph, SkScalar* array, int* count) {
+ const SkGlyph::Intercept* match = MatchBounds(glyph, bounds);
+
+ if (match) {
+ if (match->fInterval[0] < match->fInterval[1]) {
+ OffsetResults(match, scale, xPos, array, count);
+ }
+ return;
+ }
+
+ SkGlyph::Intercept* intercept =
+ (SkGlyph::Intercept* ) fGlyphAlloc.allocThrow(sizeof(SkGlyph::Intercept));
+ intercept->fNext = glyph->fPathData->fIntercept;
+ intercept->fBounds[0] = bounds[0];
+ intercept->fBounds[1] = bounds[1];
+ intercept->fInterval[0] = SK_ScalarMax;
+ intercept->fInterval[1] = SK_ScalarMin;
+ glyph->fPathData->fIntercept = intercept;
+ const SkPath* path = glyph->fPathData->fPath;
+ const SkRect& pathBounds = path->getBounds();
+ if (*(&pathBounds.fBottom - yAxis) < bounds[0] || bounds[1] < *(&pathBounds.fTop - yAxis)) {
+ return;
+ }
+ SkPath::Iter iter(*path, false);
+ SkPoint pts[4];
+ SkPath::Verb verb;
+ while (SkPath::kDone_Verb != (verb = iter.next(pts))) {
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ break;
+ case SkPath::kLine_Verb:
+ AddLine(pts, bounds[0], yAxis, intercept);
+ AddLine(pts, bounds[1], yAxis, intercept);
+ AddPoints(pts, 2, bounds, yAxis, intercept);
+ break;
+ case SkPath::kQuad_Verb:
+ if (!quad_in_bounds(&pts[0].fY - yAxis, bounds)) {
+ break;
+ }
+ AddQuad(pts, bounds[0], yAxis, intercept);
+ AddQuad(pts, bounds[1], yAxis, intercept);
+ AddPoints(pts, 3, bounds, yAxis, intercept);
+ break;
+ case SkPath::kConic_Verb:
+ SkASSERT(0); // no support for text composed of conics
+ break;
+ case SkPath::kCubic_Verb:
+ if (!cubic_in_bounds(&pts[0].fY - yAxis, bounds)) {
+ break;
+ }
+ AddCubic(pts, bounds[0], yAxis, intercept);
+ AddCubic(pts, bounds[1], yAxis, intercept);
+ AddPoints(pts, 4, bounds, yAxis, intercept);
+ break;
+ case SkPath::kClose_Verb:
+ break;
+ default:
+ SkASSERT(0);
+ break;
+ }
+ }
+ if (intercept->fInterval[0] >= intercept->fInterval[1]) {
+ intercept->fInterval[0] = SK_ScalarMax;
+ intercept->fInterval[1] = SK_ScalarMin;
+ return;
+ }
+ OffsetResults(intercept, scale, xPos, array, count);
+}
+
+void SkGlyphCache::dump() const {
+ const SkTypeface* face = fScalerContext->getTypeface();
+ const SkScalerContextRec& rec = fScalerContext->getRec();
+ SkMatrix matrix;
+ rec.getSingleMatrix(&matrix);
+ matrix.preScale(SkScalarInvert(rec.fTextSize), SkScalarInvert(rec.fTextSize));
+ SkString name;
+ face->getFamilyName(&name);
+
+ SkString msg;
+ msg.printf("cache typeface:%x %25s:%d size:%2g [%g %g %g %g] lum:%02X devG:%d pntG:%d cntr:%d glyphs:%3d",
+ face->uniqueID(), name.c_str(), face->style(), rec.fTextSize,
+ matrix[SkMatrix::kMScaleX], matrix[SkMatrix::kMSkewX],
+ matrix[SkMatrix::kMSkewY], matrix[SkMatrix::kMScaleY],
+ rec.fLumBits & 0xFF, rec.fDeviceGamma, rec.fPaintGamma, rec.fContrast,
+ fGlyphMap.count());
+ SkDebugf("%s\n", msg.c_str());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkGlyphCache::getAuxProcData(void (*proc)(void*), void** dataPtr) const {
+ const AuxProcRec* rec = fAuxProcList;
+ while (rec) {
+ if (rec->fProc == proc) {
+ if (dataPtr) {
+ *dataPtr = rec->fData;
+ }
+ return true;
+ }
+ rec = rec->fNext;
+ }
+ return false;
+}
+
+void SkGlyphCache::setAuxProc(void (*proc)(void*), void* data) {
+ if (proc == nullptr) {
+ return;
+ }
+
+ AuxProcRec* rec = fAuxProcList;
+ while (rec) {
+ if (rec->fProc == proc) {
+ rec->fData = data;
+ return;
+ }
+ rec = rec->fNext;
+ }
+ // not found, create a new rec
+ rec = new AuxProcRec;
+ rec->fProc = proc;
+ rec->fData = data;
+ rec->fNext = fAuxProcList;
+ fAuxProcList = rec;
+}
+
+void SkGlyphCache::invokeAndRemoveAuxProcs() {
+ AuxProcRec* rec = fAuxProcList;
+ while (rec) {
+ rec->fProc(rec->fData);
+ AuxProcRec* next = rec->fNext;
+ delete rec;
+ rec = next;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+size_t SkGlyphCache_Globals::getTotalMemoryUsed() const {
+ SkAutoExclusive ac(fLock);
+ return fTotalMemoryUsed;
+}
+
+int SkGlyphCache_Globals::getCacheCountUsed() const {
+ SkAutoExclusive ac(fLock);
+ return fCacheCount;
+}
+
+int SkGlyphCache_Globals::getCacheCountLimit() const {
+ SkAutoExclusive ac(fLock);
+ return fCacheCountLimit;
+}
+
+size_t SkGlyphCache_Globals::setCacheSizeLimit(size_t newLimit) {
+ static const size_t minLimit = 256 * 1024;
+ if (newLimit < minLimit) {
+ newLimit = minLimit;
+ }
+
+ SkAutoExclusive ac(fLock);
+
+ size_t prevLimit = fCacheSizeLimit;
+ fCacheSizeLimit = newLimit;
+ this->internalPurge();
+ return prevLimit;
+}
+
+size_t SkGlyphCache_Globals::getCacheSizeLimit() const {
+ SkAutoExclusive ac(fLock);
+ return fCacheSizeLimit;
+}
+
+int SkGlyphCache_Globals::setCacheCountLimit(int newCount) {
+ if (newCount < 0) {
+ newCount = 0;
+ }
+
+ SkAutoExclusive ac(fLock);
+
+ int prevCount = fCacheCountLimit;
+ fCacheCountLimit = newCount;
+ this->internalPurge();
+ return prevCount;
+}
+
+void SkGlyphCache_Globals::purgeAll() {
+ SkAutoExclusive ac(fLock);
+ this->internalPurge(fTotalMemoryUsed);
+}
+
+/* This guy calls the visitor from within the mutext lock, so the visitor
+ cannot:
+ - take too much time
+ - try to acquire the mutext again
+ - call a fontscaler (which might call into the cache)
+*/
+SkGlyphCache* SkGlyphCache::VisitCache(SkTypeface* typeface,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc,
+ bool (*proc)(const SkGlyphCache*, void*),
+ void* context) {
+ if (!typeface) {
+ typeface = SkTypeface::GetDefaultTypeface();
+ }
+ SkASSERT(desc);
+
+ // Precondition: the typeface id must be the fFontID in the descriptor
+ SkDEBUGCODE(
+ uint32_t length = 0;
+ const SkScalerContext::Rec* rec = static_cast<const SkScalerContext::Rec*>(
+ desc->findEntry(kRec_SkDescriptorTag, &length));
+ SkASSERT(rec);
+ SkASSERT(length == sizeof(*rec));
+ SkASSERT(typeface->uniqueID() == rec->fFontID);
+ )
+
+ SkGlyphCache_Globals& globals = get_globals();
+ SkGlyphCache* cache;
+
+ {
+ SkAutoExclusive ac(globals.fLock);
+
+ globals.validate();
+
+ for (cache = globals.internalGetHead(); cache != nullptr; cache = cache->fNext) {
+ if (*cache->fDesc == *desc) {
+ globals.internalDetachCache(cache);
+ if (!proc(cache, context)) {
+ globals.internalAttachCacheToHead(cache);
+ cache = nullptr;
+ }
+ return cache;
+ }
+ }
+ }
+
+ // Check if we can create a scaler-context before creating the glyphcache.
+ // If not, we may have exhausted OS/font resources, so try purging the
+ // cache once and try again.
+ {
+ // pass true the first time, to notice if the scalercontext failed,
+ // so we can try the purge.
+ SkScalerContext* ctx = typeface->createScalerContext(effects, desc, true);
+ if (!ctx) {
+ get_globals().purgeAll();
+ ctx = typeface->createScalerContext(effects, desc, false);
+ SkASSERT(ctx);
+ }
+ cache = new SkGlyphCache(typeface, desc, ctx);
+ }
+
+ AutoValidate av(cache);
+
+ if (!proc(cache, context)) { // need to reattach
+ globals.attachCacheToHead(cache);
+ cache = nullptr;
+ }
+ return cache;
+}
+
+void SkGlyphCache::AttachCache(SkGlyphCache* cache) {
+ SkASSERT(cache);
+ SkASSERT(cache->fNext == nullptr);
+
+ get_globals().attachCacheToHead(cache);
+}
+
+static void dump_visitor(const SkGlyphCache& cache, void* context) {
+ int* counter = (int*)context;
+ int index = *counter;
+ *counter += 1;
+
+ const SkScalerContextRec& rec = cache.getScalerContext()->getRec();
+
+ SkDebugf("[%3d] ID %3d, glyphs %3d, size %g, scale %g, skew %g, [%g %g %g %g]\n",
+ index, rec.fFontID, cache.countCachedGlyphs(),
+ rec.fTextSize, rec.fPreScaleX, rec.fPreSkewX,
+ rec.fPost2x2[0][0], rec.fPost2x2[0][1], rec.fPost2x2[1][0], rec.fPost2x2[1][1]);
+}
+
+void SkGlyphCache::Dump() {
+ SkDebugf("GlyphCache [ used budget ]\n");
+ SkDebugf(" bytes [ %8zu %8zu ]\n",
+ SkGraphics::GetFontCacheUsed(), SkGraphics::GetFontCacheLimit());
+ SkDebugf(" count [ %8zu %8zu ]\n",
+ SkGraphics::GetFontCacheCountUsed(), SkGraphics::GetFontCacheCountLimit());
+
+ int counter = 0;
+ SkGlyphCache::VisitAll(dump_visitor, &counter);
+}
+
+static void sk_trace_dump_visitor(const SkGlyphCache& cache, void* context) {
+ SkTraceMemoryDump* dump = static_cast<SkTraceMemoryDump*>(context);
+
+ const SkTypeface* face = cache.getScalerContext()->getTypeface();
+ const SkScalerContextRec& rec = cache.getScalerContext()->getRec();
+
+ SkString fontName;
+ face->getFamilyName(&fontName);
+ // Replace all special characters with '_'.
+ for (size_t index = 0; index < fontName.size(); ++index) {
+ if (!std::isalnum(fontName[index])) {
+ fontName[index] = '_';
+ }
+ }
+
+ SkString dumpName = SkStringPrintf("%s/%s_%d/%p",
+ gGlyphCacheDumpName, fontName.c_str(), rec.fFontID, &cache);
+
+ dump->dumpNumericValue(dumpName.c_str(), "size", "bytes", cache.getMemoryUsed());
+ dump->dumpNumericValue(dumpName.c_str(), "glyph_count", "objects", cache.countCachedGlyphs());
+ dump->setMemoryBacking(dumpName.c_str(), "malloc", nullptr);
+}
+
+void SkGlyphCache::DumpMemoryStatistics(SkTraceMemoryDump* dump) {
+ dump->dumpNumericValue(gGlyphCacheDumpName, "size", "bytes", SkGraphics::GetFontCacheUsed());
+ dump->dumpNumericValue(gGlyphCacheDumpName, "budget_size", "bytes",
+ SkGraphics::GetFontCacheLimit());
+ dump->dumpNumericValue(gGlyphCacheDumpName, "glyph_count", "objects",
+ SkGraphics::GetFontCacheCountUsed());
+ dump->dumpNumericValue(gGlyphCacheDumpName, "budget_glyph_count", "objects",
+ SkGraphics::GetFontCacheCountLimit());
+
+ if (dump->getRequestedDetails() == SkTraceMemoryDump::kLight_LevelOfDetail) {
+ dump->setMemoryBacking(gGlyphCacheDumpName, "malloc", nullptr);
+ return;
+ }
+
+ SkGlyphCache::VisitAll(sk_trace_dump_visitor, dump);
+}
+
+void SkGlyphCache::VisitAll(Visitor visitor, void* context) {
+ SkGlyphCache_Globals& globals = get_globals();
+ SkAutoExclusive ac(globals.fLock);
+ SkGlyphCache* cache;
+
+ globals.validate();
+
+ for (cache = globals.internalGetHead(); cache != nullptr; cache = cache->fNext) {
+ visitor(*cache, context);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkGlyphCache_Globals::attachCacheToHead(SkGlyphCache* cache) {
+ SkAutoExclusive ac(fLock);
+
+ this->validate();
+ cache->validate();
+
+ this->internalAttachCacheToHead(cache);
+ this->internalPurge();
+}
+
+SkGlyphCache* SkGlyphCache_Globals::internalGetTail() const {
+ SkGlyphCache* cache = fHead;
+ if (cache) {
+ while (cache->fNext) {
+ cache = cache->fNext;
+ }
+ }
+ return cache;
+}
+
+size_t SkGlyphCache_Globals::internalPurge(size_t minBytesNeeded) {
+ this->validate();
+
+ size_t bytesNeeded = 0;
+ if (fTotalMemoryUsed > fCacheSizeLimit) {
+ bytesNeeded = fTotalMemoryUsed - fCacheSizeLimit;
+ }
+ bytesNeeded = SkTMax(bytesNeeded, minBytesNeeded);
+ if (bytesNeeded) {
+ // no small purges!
+ bytesNeeded = SkTMax(bytesNeeded, fTotalMemoryUsed >> 2);
+ }
+
+ int countNeeded = 0;
+ if (fCacheCount > fCacheCountLimit) {
+ countNeeded = fCacheCount - fCacheCountLimit;
+ // no small purges!
+ countNeeded = SkMax32(countNeeded, fCacheCount >> 2);
+ }
+
+ // early exit
+ if (!countNeeded && !bytesNeeded) {
+ return 0;
+ }
+
+ size_t bytesFreed = 0;
+ int countFreed = 0;
+
+ // we start at the tail and proceed backwards, as the linklist is in LRU
+ // order, with unimportant entries at the tail.
+ SkGlyphCache* cache = this->internalGetTail();
+ while (cache != nullptr &&
+ (bytesFreed < bytesNeeded || countFreed < countNeeded)) {
+ SkGlyphCache* prev = cache->fPrev;
+ bytesFreed += cache->fMemoryUsed;
+ countFreed += 1;
+
+ this->internalDetachCache(cache);
+ delete cache;
+ cache = prev;
+ }
+
+ this->validate();
+
+#ifdef SPEW_PURGE_STATUS
+ if (countFreed) {
+ SkDebugf("purging %dK from font cache [%d entries]\n",
+ (int)(bytesFreed >> 10), countFreed);
+ }
+#endif
+
+ return bytesFreed;
+}
+
+void SkGlyphCache_Globals::internalAttachCacheToHead(SkGlyphCache* cache) {
+ SkASSERT(nullptr == cache->fPrev && nullptr == cache->fNext);
+ if (fHead) {
+ fHead->fPrev = cache;
+ cache->fNext = fHead;
+ }
+ fHead = cache;
+
+ fCacheCount += 1;
+ fTotalMemoryUsed += cache->fMemoryUsed;
+}
+
+void SkGlyphCache_Globals::internalDetachCache(SkGlyphCache* cache) {
+ SkASSERT(fCacheCount > 0);
+ fCacheCount -= 1;
+ fTotalMemoryUsed -= cache->fMemoryUsed;
+
+ if (cache->fPrev) {
+ cache->fPrev->fNext = cache->fNext;
+ } else {
+ fHead = cache->fNext;
+ }
+ if (cache->fNext) {
+ cache->fNext->fPrev = cache->fPrev;
+ }
+ cache->fPrev = cache->fNext = nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+
+void SkGlyphCache::validate() const {
+#ifdef SK_DEBUG_GLYPH_CACHE
+ int count = fGlyphArray.count();
+ for (int i = 0; i < count; i++) {
+ const SkGlyph* glyph = &fGlyphArray[i];
+ SkASSERT(glyph);
+ if (glyph->fImage) {
+ SkASSERT(fGlyphAlloc.contains(glyph->fImage));
+ }
+ }
+#endif
+}
+
+void SkGlyphCache_Globals::validate() const {
+ size_t computedBytes = 0;
+ int computedCount = 0;
+
+ const SkGlyphCache* head = fHead;
+ while (head != nullptr) {
+ computedBytes += head->fMemoryUsed;
+ computedCount += 1;
+ head = head->fNext;
+ }
+
+ SkASSERTF(fCacheCount == computedCount, "fCacheCount: %d, computedCount: %d", fCacheCount,
+ computedCount);
+ SkASSERTF(fTotalMemoryUsed == computedBytes, "fTotalMemoryUsed: %d, computedBytes: %d",
+ fTotalMemoryUsed, computedBytes);
+}
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkTypefaceCache.h"
+
+size_t SkGraphics::GetFontCacheLimit() {
+ return get_globals().getCacheSizeLimit();
+}
+
+size_t SkGraphics::SetFontCacheLimit(size_t bytes) {
+ return get_globals().setCacheSizeLimit(bytes);
+}
+
+size_t SkGraphics::GetFontCacheUsed() {
+ return get_globals().getTotalMemoryUsed();
+}
+
+int SkGraphics::GetFontCacheCountLimit() {
+ return get_globals().getCacheCountLimit();
+}
+
+int SkGraphics::SetFontCacheCountLimit(int count) {
+ return get_globals().setCacheCountLimit(count);
+}
+
+int SkGraphics::GetFontCacheCountUsed() {
+ return get_globals().getCacheCountUsed();
+}
+
+void SkGraphics::PurgeFontCache() {
+ get_globals().purgeAll();
+ SkTypefaceCache::PurgeAll();
+}
+
+// TODO(herb): clean up TLS apis.
+size_t SkGraphics::GetTLSFontCacheLimit() { return 0; }
+void SkGraphics::SetTLSFontCacheLimit(size_t bytes) { }
diff --git a/gfx/skia/skia/src/core/SkGlyphCache.h b/gfx/skia/skia/src/core/SkGlyphCache.h
new file mode 100644
index 000000000..84a32eb3e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGlyphCache.h
@@ -0,0 +1,312 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+ */
+
+#ifndef SkGlyphCache_DEFINED
+#define SkGlyphCache_DEFINED
+
+#include "SkBitmap.h"
+#include "SkChunkAlloc.h"
+#include "SkDescriptor.h"
+#include "SkGlyph.h"
+#include "SkPaint.h"
+#include "SkTHash.h"
+#include "SkScalerContext.h"
+#include "SkTemplates.h"
+#include "SkTDArray.h"
+
+class SkTraceMemoryDump;
+
+class SkGlyphCache_Globals;
+
+/** \class SkGlyphCache
+
+ This class represents a strike: a specific combination of typeface, size, matrix, etc., and
+ holds the glyphs for that strike. Calling any of the getUnichar.../getGlyphID... methods will
+ return the requested glyph, either instantly if it is already cached, or by first generating
+ it and then adding it to the strike.
+
+ The strikes are held in a global list, available to all threads. To interact with one, call
+ either VisitCache() or DetachCache().
+*/
+class SkGlyphCache {
+public:
+ /** Returns a glyph with valid fAdvance and fDevKern fields. The remaining fields may be
+ valid, but that is not guaranteed. If you require those, call getUnicharMetrics or
+ getGlyphIDMetrics instead.
+ */
+ const SkGlyph& getUnicharAdvance(SkUnichar);
+ const SkGlyph& getGlyphIDAdvance(uint16_t);
+
+ /** Returns a glyph with all fields valid except fImage and fPath, which may be null. If they
+ are null, call findImage or findPath for those. If they are not null, then they are valid.
+
+ This call is potentially slower than the matching ...Advance call. If you only need the
+ fAdvance/fDevKern fields, call those instead.
+ */
+ const SkGlyph& getUnicharMetrics(SkUnichar);
+ const SkGlyph& getGlyphIDMetrics(uint16_t);
+
+ /** These are variants that take the device position of the glyph. Call these only if you are
+ drawing in subpixel mode. Passing 0, 0 is effectively the same as calling the variants
+ w/o the extra params, though a tiny bit slower.
+ */
+ const SkGlyph& getUnicharMetrics(SkUnichar, SkFixed x, SkFixed y);
+ const SkGlyph& getGlyphIDMetrics(uint16_t, SkFixed x, SkFixed y);
+
+ /** Return the glyphID for the specified Unichar. If the char has already been seen, use the
+ existing cache entry. If not, ask the scalercontext to compute it for us.
+ */
+ uint16_t unicharToGlyph(SkUnichar);
+
+ /** Map the glyph to its Unicode equivalent. Unmappable glyphs map to a character code of zero.
+ */
+ SkUnichar glyphToUnichar(uint16_t);
+
+ /** Returns the number of glyphs for this strike.
+ */
+ unsigned getGlyphCount() const;
+
+ /** Return the number of glyphs currently cached. */
+ int countCachedGlyphs() const;
+
+ /** Return the image associated with the glyph. If it has not been generated this will
+ trigger that.
+ */
+ const void* findImage(const SkGlyph&);
+
+ /** If the advance axis intersects the glyph's path, append the positions scaled and offset
+ to the array (if non-null), and set the count to the updated array length.
+ */
+ void findIntercepts(const SkScalar bounds[2], SkScalar scale, SkScalar xPos,
+ bool yAxis, SkGlyph* , SkScalar* array, int* count);
+
+ /** Return the Path associated with the glyph. If it has not been generated this will trigger
+ that.
+ */
+ const SkPath* findPath(const SkGlyph&);
+
+ /** Return the vertical metrics for this strike.
+ */
+ const SkPaint::FontMetrics& getFontMetrics() const {
+ return fFontMetrics;
+ }
+
+ const SkDescriptor& getDescriptor() const { return *fDesc; }
+
+ SkMask::Format getMaskFormat() const {
+ return fScalerContext->getMaskFormat();
+ }
+
+ bool isSubpixel() const {
+ return fScalerContext->isSubpixel();
+ }
+
+ /** Return the approx RAM usage for this cache. */
+ size_t getMemoryUsed() const { return fMemoryUsed; }
+
+ void dump() const;
+
+ /** AuxProc/Data allow a client to associate data with this cache entry. Multiple clients can
+ use this, as their data is keyed with a function pointer. In addition to serving as a
+ key, the function pointer is called with the data when the glyphcache object is deleted,
+ so the client can cleanup their data as well.
+ NOTE: the auxProc must not try to access this glyphcache in any way, since it may be in
+ the process of being deleted.
+ */
+
+ //! If the proc is found, return true and set *dataPtr to its data
+ bool getAuxProcData(void (*auxProc)(void*), void** dataPtr) const;
+
+ //! Add a proc/data pair to the glyphcache. proc should be non-null
+ void setAuxProc(void (*auxProc)(void*), void* auxData);
+
+ SkScalerContext* getScalerContext() const { return fScalerContext; }
+
+ /** Find a matching cache entry, and call proc() with it. If none is found create a new one.
+ If the proc() returns true, detach the cache and return it, otherwise leave it and return
+ nullptr.
+ */
+ static SkGlyphCache* VisitCache(SkTypeface*, const SkScalerContextEffects&, const SkDescriptor*,
+ bool (*proc)(const SkGlyphCache*, void*),
+ void* context);
+
+ /** Given a strike that was returned by either VisitCache() or DetachCache() add it back into
+ the global cache list (after which the caller should not reference it anymore.
+ */
+ static void AttachCache(SkGlyphCache*);
+ using AttachCacheFunctor = SkFunctionWrapper<void, SkGlyphCache, AttachCache>;
+
+ /** Detach a strike from the global cache matching the specified descriptor. Once detached,
+ it can be queried/modified by the current thread, and when finished, be reattached to the
+ global cache with AttachCache(). While detached, if another request is made with the same
+ descriptor, a different strike will be generated. This is fine. It does mean we can have
+ more than 1 strike for the same descriptor, but that will eventually get purged, and the
+ win is that different thread will never block each other while a strike is being used.
+ */
+ static SkGlyphCache* DetachCache(SkTypeface* typeface, const SkScalerContextEffects& effects,
+ const SkDescriptor* desc) {
+ return VisitCache(typeface, effects, desc, DetachProc, nullptr);
+ }
+
+ static void Dump();
+
+ /** Dump memory usage statistics of all the attaches caches in the process using the
+ SkTraceMemoryDump interface.
+ */
+ static void DumpMemoryStatistics(SkTraceMemoryDump* dump);
+
+ typedef void (*Visitor)(const SkGlyphCache&, void* context);
+ static void VisitAll(Visitor, void* context);
+
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+ class AutoValidate : SkNoncopyable {
+ public:
+ AutoValidate(const SkGlyphCache* cache) : fCache(cache) {
+ if (fCache) {
+ fCache->validate();
+ }
+ }
+ ~AutoValidate() {
+ if (fCache) {
+ fCache->validate();
+ }
+ }
+ void forget() {
+ fCache = nullptr;
+ }
+ private:
+ const SkGlyphCache* fCache;
+ };
+
+private:
+ friend class SkGlyphCache_Globals;
+
+ enum MetricsType {
+ kJustAdvance_MetricsType,
+ kFull_MetricsType
+ };
+
+ enum {
+ kHashBits = 8,
+ kHashCount = 1 << kHashBits,
+ kHashMask = kHashCount - 1
+ };
+
+ typedef uint32_t PackedGlyphID; // glyph-index + subpixel-pos
+ typedef uint32_t PackedUnicharID; // unichar + subpixel-pos
+
+ struct CharGlyphRec {
+ PackedUnicharID fPackedUnicharID;
+ PackedGlyphID fPackedGlyphID;
+ };
+
+ struct AuxProcRec {
+ AuxProcRec* fNext;
+ void (*fProc)(void*);
+ void* fData;
+ };
+
+ // SkGlyphCache takes ownership of the scalercontext.
+ SkGlyphCache(SkTypeface*, const SkDescriptor*, SkScalerContext*);
+ ~SkGlyphCache();
+
+ // Return the SkGlyph* associated with MakeID. The id parameter is the
+ // combined glyph/x/y id generated by MakeID. If it is just a glyph id
+ // then x and y are assumed to be zero.
+ SkGlyph* lookupByPackedGlyphID(PackedGlyphID packedGlyphID, MetricsType type);
+
+ // Return a SkGlyph* associated with unicode id and position x and y.
+ SkGlyph* lookupByChar(SkUnichar id, MetricsType type, SkFixed x = 0, SkFixed y = 0);
+
+ // Return a new SkGlyph for the glyph ID and subpixel position id. Limit the amount
+ // of work using type.
+ SkGlyph* allocateNewGlyph(PackedGlyphID packedGlyphID, MetricsType type);
+
+ static bool DetachProc(const SkGlyphCache*, void*) { return true; }
+
+ // The id arg is a combined id generated by MakeID.
+ CharGlyphRec* getCharGlyphRec(PackedUnicharID id);
+
+ void invokeAndRemoveAuxProcs();
+
+ static void OffsetResults(const SkGlyph::Intercept* intercept, SkScalar scale,
+ SkScalar xPos, SkScalar* array, int* count);
+ static void AddInterval(SkScalar val, SkGlyph::Intercept* intercept);
+ static void AddPoints(const SkPoint* pts, int ptCount, const SkScalar bounds[2],
+ bool yAxis, SkGlyph::Intercept* intercept);
+ static void AddLine(const SkPoint pts[2], SkScalar axis, bool yAxis,
+ SkGlyph::Intercept* intercept);
+ static void AddQuad(const SkPoint pts[2], SkScalar axis, bool yAxis,
+ SkGlyph::Intercept* intercept);
+ static void AddCubic(const SkPoint pts[3], SkScalar axis, bool yAxis,
+ SkGlyph::Intercept* intercept);
+ static const SkGlyph::Intercept* MatchBounds(const SkGlyph* glyph,
+ const SkScalar bounds[2]);
+
+ SkGlyphCache* fNext;
+ SkGlyphCache* fPrev;
+ SkDescriptor* const fDesc;
+ SkScalerContext* const fScalerContext;
+ SkPaint::FontMetrics fFontMetrics;
+
+ // Map from a combined GlyphID and sub-pixel position to a SkGlyph.
+ SkTHashTable<SkGlyph, PackedGlyphID, SkGlyph::HashTraits> fGlyphMap;
+
+ SkChunkAlloc fGlyphAlloc;
+
+ SkAutoTArray<CharGlyphRec> fPackedUnicharIDToPackedGlyphID;
+
+ // used to track (approx) how much ram is tied-up in this cache
+ size_t fMemoryUsed;
+
+ AuxProcRec* fAuxProcList;
+};
+
+class SkAutoGlyphCache : public std::unique_ptr<SkGlyphCache, SkGlyphCache::AttachCacheFunctor> {
+public:
+ /** deprecated: use get() */
+ SkGlyphCache* getCache() const { return this->get(); }
+ SkAutoGlyphCache() = default;
+ SkAutoGlyphCache(SkGlyphCache* cache) : INHERITED(cache) {}
+ SkAutoGlyphCache(SkTypeface* typeface, const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : INHERITED(SkGlyphCache::DetachCache(typeface, effects, desc))
+ {}
+ /** deprecated: always enables fake gamma */
+ SkAutoGlyphCache(const SkPaint& paint,
+ const SkSurfaceProps* surfaceProps,
+ const SkMatrix* matrix)
+ : INHERITED(paint.detachCache(surfaceProps,
+ SkPaint::kFakeGammaAndBoostContrast_ScalerContextFlags,
+ matrix))
+ {}
+ SkAutoGlyphCache(const SkPaint& paint,
+ const SkSurfaceProps* surfaceProps,
+ uint32_t scalerContextFlags,
+ const SkMatrix* matrix)
+ : INHERITED(paint.detachCache(surfaceProps, scalerContextFlags, matrix))
+ {}
+private:
+ using INHERITED = std::unique_ptr<SkGlyphCache, SkGlyphCache::AttachCacheFunctor>;
+};
+
+class SkAutoGlyphCacheNoGamma : public SkAutoGlyphCache {
+public:
+ SkAutoGlyphCacheNoGamma(const SkPaint& paint,
+ const SkSurfaceProps* surfaceProps,
+ const SkMatrix* matrix)
+ : SkAutoGlyphCache(paint, surfaceProps, SkPaint::kNone_ScalerContextFlags, matrix)
+ {}
+};
+#define SkAutoGlyphCache(...) SK_REQUIRE_LOCAL_VAR(SkAutoGlyphCache)
+#define SkAutoGlyphCacheNoGamma(...) SK_REQUIRE_LOCAL_VAR(SkAutoGlyphCacheNoGamma)
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkGlyphCache_Globals.h b/gfx/skia/skia/src/core/SkGlyphCache_Globals.h
new file mode 100644
index 000000000..4d7fe22d8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGlyphCache_Globals.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGlyphCache_Globals_DEFINED
+#define SkGlyphCache_Globals_DEFINED
+
+#include "SkGlyphCache.h"
+#include "SkMutex.h"
+#include "SkSpinlock.h"
+#include "SkTLS.h"
+
+#ifndef SK_DEFAULT_FONT_CACHE_COUNT_LIMIT
+ #define SK_DEFAULT_FONT_CACHE_COUNT_LIMIT 2048
+#endif
+
+#ifndef SK_DEFAULT_FONT_CACHE_LIMIT
+ #define SK_DEFAULT_FONT_CACHE_LIMIT (2 * 1024 * 1024)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkGlyphCache_Globals {
+public:
+ SkGlyphCache_Globals() {
+ fHead = nullptr;
+ fTotalMemoryUsed = 0;
+ fCacheSizeLimit = SK_DEFAULT_FONT_CACHE_LIMIT;
+ fCacheCount = 0;
+ fCacheCountLimit = SK_DEFAULT_FONT_CACHE_COUNT_LIMIT;
+ }
+
+ ~SkGlyphCache_Globals() {
+ SkGlyphCache* cache = fHead;
+ while (cache) {
+ SkGlyphCache* next = cache->fNext;
+ delete cache;
+ cache = next;
+ }
+ }
+
+ mutable SkSpinlock fLock;
+
+ SkGlyphCache* internalGetHead() const { return fHead; }
+ SkGlyphCache* internalGetTail() const;
+
+ size_t getTotalMemoryUsed() const;
+ int getCacheCountUsed() const;
+
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+ int getCacheCountLimit() const;
+ int setCacheCountLimit(int limit);
+
+ size_t getCacheSizeLimit() const;
+ size_t setCacheSizeLimit(size_t limit);
+
+ void purgeAll(); // does not change budget
+
+ // call when a glyphcache is available for caching (i.e. not in use)
+ void attachCacheToHead(SkGlyphCache*);
+
+ // can only be called when the mutex is already held
+ void internalDetachCache(SkGlyphCache*);
+ void internalAttachCacheToHead(SkGlyphCache*);
+
+private:
+ SkGlyphCache* fHead;
+ size_t fTotalMemoryUsed;
+ size_t fCacheSizeLimit;
+ int32_t fCacheCountLimit;
+ int32_t fCacheCount;
+
+ // Checkout budgets, modulated by the specified min-bytes-needed-to-purge,
+ // and attempt to purge caches to match.
+ // Returns number of bytes freed.
+ size_t internalPurge(size_t minBytesNeeded = 0);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkGpuBlurUtils.cpp b/gfx/skia/skia/src/core/SkGpuBlurUtils.cpp
new file mode 100644
index 000000000..ec3b0a990
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGpuBlurUtils.cpp
@@ -0,0 +1,383 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkGpuBlurUtils.h"
+
+#include "SkRect.h"
+
+#if SK_SUPPORT_GPU
+#include "effects/GrConvolutionEffect.h"
+#include "effects/GrMatrixConvolutionEffect.h"
+#include "GrContext.h"
+#include "GrCaps.h"
+#include "GrDrawContext.h"
+#include "GrFixedClip.h"
+
+#define MAX_BLUR_SIGMA 4.0f
+
+static void scale_irect_roundout(SkIRect* rect, float xScale, float yScale) {
+ rect->fLeft = SkScalarFloorToInt(SkScalarMul(rect->fLeft, xScale));
+ rect->fTop = SkScalarFloorToInt(SkScalarMul(rect->fTop, yScale));
+ rect->fRight = SkScalarCeilToInt(SkScalarMul(rect->fRight, xScale));
+ rect->fBottom = SkScalarCeilToInt(SkScalarMul(rect->fBottom, yScale));
+}
+
+static void scale_irect(SkIRect* rect, int xScale, int yScale) {
+ rect->fLeft *= xScale;
+ rect->fTop *= yScale;
+ rect->fRight *= xScale;
+ rect->fBottom *= yScale;
+}
+
+#ifdef SK_DEBUG
+static inline int is_even(int x) { return !(x & 1); }
+#endif
+
+static void shrink_irect_by_2(SkIRect* rect, bool xAxis, bool yAxis) {
+ if (xAxis) {
+ SkASSERT(is_even(rect->fLeft) && is_even(rect->fRight));
+ rect->fLeft /= 2;
+ rect->fRight /= 2;
+ }
+ if (yAxis) {
+ SkASSERT(is_even(rect->fTop) && is_even(rect->fBottom));
+ rect->fTop /= 2;
+ rect->fBottom /= 2;
+ }
+}
+
+static float adjust_sigma(float sigma, int maxTextureSize, int *scaleFactor, int *radius) {
+ *scaleFactor = 1;
+ while (sigma > MAX_BLUR_SIGMA) {
+ *scaleFactor *= 2;
+ sigma *= 0.5f;
+ if (*scaleFactor > maxTextureSize) {
+ *scaleFactor = maxTextureSize;
+ sigma = MAX_BLUR_SIGMA;
+ }
+ }
+ *radius = static_cast<int>(ceilf(sigma * 3.0f));
+ SkASSERT(*radius <= GrConvolutionEffect::kMaxKernelRadius);
+ return sigma;
+}
+
+static void convolve_gaussian_1d(GrDrawContext* drawContext,
+ const GrClip& clip,
+ const SkIRect& dstRect,
+ const SkIPoint& srcOffset,
+ GrTexture* texture,
+ Gr1DKernelEffect::Direction direction,
+ int radius,
+ float sigma,
+ bool useBounds,
+ float bounds[2]) {
+ GrPaint paint;
+ paint.setGammaCorrect(drawContext->isGammaCorrect());
+ sk_sp<GrFragmentProcessor> conv(GrConvolutionEffect::MakeGaussian(
+ texture, direction, radius, sigma, useBounds, bounds));
+ paint.addColorFragmentProcessor(std::move(conv));
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ SkMatrix localMatrix = SkMatrix::MakeTrans(-SkIntToScalar(srcOffset.x()),
+ -SkIntToScalar(srcOffset.y()));
+ drawContext->fillRectWithLocalMatrix(clip, paint, SkMatrix::I(),
+ SkRect::Make(dstRect), localMatrix);
+}
+
+static void convolve_gaussian_2d(GrDrawContext* drawContext,
+ const GrClip& clip,
+ const SkIRect& dstRect,
+ const SkIPoint& srcOffset,
+ GrTexture* texture,
+ int radiusX,
+ int radiusY,
+ SkScalar sigmaX,
+ SkScalar sigmaY,
+ const SkIRect* srcBounds) {
+ SkMatrix localMatrix = SkMatrix::MakeTrans(-SkIntToScalar(srcOffset.x()),
+ -SkIntToScalar(srcOffset.y()));
+ SkISize size = SkISize::Make(2 * radiusX + 1, 2 * radiusY + 1);
+ SkIPoint kernelOffset = SkIPoint::Make(radiusX, radiusY);
+ GrPaint paint;
+ paint.setGammaCorrect(drawContext->isGammaCorrect());
+ SkIRect bounds = srcBounds ? *srcBounds : SkIRect::EmptyIRect();
+
+ sk_sp<GrFragmentProcessor> conv(GrMatrixConvolutionEffect::MakeGaussian(
+ texture, bounds, size, 1.0, 0.0, kernelOffset,
+ srcBounds ? GrTextureDomain::kDecal_Mode : GrTextureDomain::kIgnore_Mode,
+ true, sigmaX, sigmaY));
+ paint.addColorFragmentProcessor(std::move(conv));
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ drawContext->fillRectWithLocalMatrix(clip, paint, SkMatrix::I(),
+ SkRect::Make(dstRect), localMatrix);
+}
+
+static void convolve_gaussian(GrDrawContext* drawContext,
+ const GrClip& clip,
+ const SkIRect& srcRect,
+ GrTexture* texture,
+ Gr1DKernelEffect::Direction direction,
+ int radius,
+ float sigma,
+ const SkIRect* srcBounds,
+ const SkIPoint& srcOffset) {
+ float bounds[2] = { 0.0f, 1.0f };
+ SkIRect dstRect = SkIRect::MakeWH(srcRect.width(), srcRect.height());
+ if (!srcBounds) {
+ convolve_gaussian_1d(drawContext, clip, dstRect, srcOffset, texture,
+ direction, radius, sigma, false, bounds);
+ return;
+ }
+ SkIRect midRect = *srcBounds, leftRect, rightRect;
+ midRect.offset(srcOffset);
+ SkIRect topRect, bottomRect;
+ if (direction == Gr1DKernelEffect::kX_Direction) {
+ bounds[0] = SkIntToFloat(srcBounds->left()) / texture->width();
+ bounds[1] = SkIntToFloat(srcBounds->right()) / texture->width();
+ topRect = SkIRect::MakeLTRB(0, 0, dstRect.right(), midRect.top());
+ bottomRect = SkIRect::MakeLTRB(0, midRect.bottom(), dstRect.right(), dstRect.bottom());
+ midRect.inset(radius, 0);
+ leftRect = SkIRect::MakeLTRB(0, midRect.top(), midRect.left(), midRect.bottom());
+ rightRect =
+ SkIRect::MakeLTRB(midRect.right(), midRect.top(), dstRect.width(), midRect.bottom());
+ dstRect.fTop = midRect.top();
+ dstRect.fBottom = midRect.bottom();
+ } else {
+ bounds[0] = SkIntToFloat(srcBounds->top()) / texture->height();
+ bounds[1] = SkIntToFloat(srcBounds->bottom()) / texture->height();
+ topRect = SkIRect::MakeLTRB(0, 0, midRect.left(), dstRect.bottom());
+ bottomRect = SkIRect::MakeLTRB(midRect.right(), 0, dstRect.right(), dstRect.bottom());
+ midRect.inset(0, radius);
+ leftRect = SkIRect::MakeLTRB(midRect.left(), 0, midRect.right(), midRect.top());
+ rightRect =
+ SkIRect::MakeLTRB(midRect.left(), midRect.bottom(), midRect.right(), dstRect.height());
+ dstRect.fLeft = midRect.left();
+ dstRect.fRight = midRect.right();
+ }
+ if (!topRect.isEmpty()) {
+ drawContext->clear(&topRect, 0, false);
+ }
+
+ if (!bottomRect.isEmpty()) {
+ drawContext->clear(&bottomRect, 0, false);
+ }
+ if (midRect.isEmpty()) {
+ // Blur radius covers srcBounds; use bounds over entire draw
+ convolve_gaussian_1d(drawContext, clip, dstRect, srcOffset, texture,
+ direction, radius, sigma, true, bounds);
+ } else {
+ // Draw right and left margins with bounds; middle without.
+ convolve_gaussian_1d(drawContext, clip, leftRect, srcOffset, texture,
+ direction, radius, sigma, true, bounds);
+ convolve_gaussian_1d(drawContext, clip, rightRect, srcOffset, texture,
+ direction, radius, sigma, true, bounds);
+ convolve_gaussian_1d(drawContext, clip, midRect, srcOffset, texture,
+ direction, radius, sigma, false, bounds);
+ }
+}
+
+namespace SkGpuBlurUtils {
+
+sk_sp<GrDrawContext> GaussianBlur(GrContext* context,
+ GrTexture* origSrc,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkIRect& dstBounds,
+ const SkIRect* srcBounds,
+ float sigmaX,
+ float sigmaY,
+ SkBackingFit fit) {
+ SkASSERT(context);
+ SkIRect clearRect;
+ int scaleFactorX, radiusX;
+ int scaleFactorY, radiusY;
+ int maxTextureSize = context->caps()->maxTextureSize();
+ sigmaX = adjust_sigma(sigmaX, maxTextureSize, &scaleFactorX, &radiusX);
+ sigmaY = adjust_sigma(sigmaY, maxTextureSize, &scaleFactorY, &radiusY);
+ SkASSERT(sigmaX || sigmaY);
+
+ SkIPoint srcOffset = SkIPoint::Make(-dstBounds.x(), -dstBounds.y());
+ SkIRect localDstBounds = SkIRect::MakeWH(dstBounds.width(), dstBounds.height());
+ SkIRect localSrcBounds;
+ SkIRect srcRect;
+ if (srcBounds) {
+ srcRect = localSrcBounds = *srcBounds;
+ srcRect.offset(srcOffset);
+ srcBounds = &localSrcBounds;
+ } else {
+ srcRect = localDstBounds;
+ }
+
+ scale_irect_roundout(&srcRect, 1.0f / scaleFactorX, 1.0f / scaleFactorY);
+ scale_irect(&srcRect, scaleFactorX, scaleFactorY);
+
+ // setup new clip
+ GrFixedClip clip(localDstBounds);
+
+ sk_sp<GrTexture> srcTexture(sk_ref_sp(origSrc));
+
+ SkASSERT(kBGRA_8888_GrPixelConfig == srcTexture->config() ||
+ kRGBA_8888_GrPixelConfig == srcTexture->config() ||
+ kSRGBA_8888_GrPixelConfig == srcTexture->config() ||
+ kSBGRA_8888_GrPixelConfig == srcTexture->config() ||
+ kRGBA_half_GrPixelConfig == srcTexture->config() ||
+ kAlpha_8_GrPixelConfig == srcTexture->config());
+
+ const int width = dstBounds.width();
+ const int height = dstBounds.height();
+ const GrPixelConfig config = srcTexture->config();
+
+ sk_sp<GrDrawContext> dstDrawContext(context->makeDrawContext(fit,
+ width, height, config, colorSpace,
+ 0, kDefault_GrSurfaceOrigin));
+ if (!dstDrawContext) {
+ return nullptr;
+ }
+
+ // For really small blurs (certainly no wider than 5x5 on desktop gpus) it is faster to just
+ // launch a single non separable kernel vs two launches
+ if (sigmaX > 0.0f && sigmaY > 0.0f &&
+ (2 * radiusX + 1) * (2 * radiusY + 1) <= MAX_KERNEL_SIZE) {
+ // We shouldn't be scaling because this is a small size blur
+ SkASSERT((1 == scaleFactorX) && (1 == scaleFactorY));
+
+ convolve_gaussian_2d(dstDrawContext.get(), clip, localDstBounds, srcOffset,
+ srcTexture.get(), radiusX, radiusY, sigmaX, sigmaY, srcBounds);
+
+ return dstDrawContext;
+ }
+
+ sk_sp<GrDrawContext> tmpDrawContext(context->makeDrawContext(fit,
+ width, height, config, colorSpace,
+ 0, kDefault_GrSurfaceOrigin));
+ if (!tmpDrawContext) {
+ return nullptr;
+ }
+
+ sk_sp<GrDrawContext> srcDrawContext;
+
+ SkASSERT(SkIsPow2(scaleFactorX) && SkIsPow2(scaleFactorY));
+
+ for (int i = 1; i < scaleFactorX || i < scaleFactorY; i *= 2) {
+ GrPaint paint;
+ paint.setGammaCorrect(dstDrawContext->isGammaCorrect());
+ SkMatrix matrix;
+ matrix.setIDiv(srcTexture->width(), srcTexture->height());
+ SkIRect dstRect(srcRect);
+ if (srcBounds && i == 1) {
+ SkRect domain;
+ matrix.mapRect(&domain, SkRect::Make(*srcBounds));
+ domain.inset((i < scaleFactorX) ? SK_ScalarHalf / srcTexture->width() : 0.0f,
+ (i < scaleFactorY) ? SK_ScalarHalf / srcTexture->height() : 0.0f);
+ sk_sp<GrFragmentProcessor> fp(GrTextureDomainEffect::Make(
+ srcTexture.get(),
+ nullptr,
+ matrix,
+ domain,
+ GrTextureDomain::kDecal_Mode,
+ GrTextureParams::kBilerp_FilterMode));
+ paint.addColorFragmentProcessor(std::move(fp));
+ srcRect.offset(-srcOffset);
+ srcOffset.set(0, 0);
+ } else {
+ GrTextureParams params(SkShader::kClamp_TileMode, GrTextureParams::kBilerp_FilterMode);
+ paint.addColorTextureProcessor(srcTexture.get(), nullptr, matrix, params);
+ }
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ shrink_irect_by_2(&dstRect, i < scaleFactorX, i < scaleFactorY);
+
+ dstDrawContext->fillRectToRect(clip, paint, SkMatrix::I(),
+ SkRect::Make(dstRect), SkRect::Make(srcRect));
+
+ srcDrawContext = dstDrawContext;
+ srcRect = dstRect;
+ srcTexture = srcDrawContext->asTexture();
+ dstDrawContext.swap(tmpDrawContext);
+ localSrcBounds = srcRect;
+ }
+
+ srcRect = localDstBounds;
+ scale_irect_roundout(&srcRect, 1.0f / scaleFactorX, 1.0f / scaleFactorY);
+ if (sigmaX > 0.0f) {
+ if (scaleFactorX > 1) {
+ SkASSERT(srcDrawContext);
+
+ // Clear out a radius to the right of the srcRect to prevent the
+ // X convolution from reading garbage.
+ clearRect = SkIRect::MakeXYWH(srcRect.fRight, srcRect.fTop,
+ radiusX, srcRect.height());
+ srcDrawContext->clear(&clearRect, 0x0, false);
+ }
+
+ convolve_gaussian(dstDrawContext.get(), clip, srcRect,
+ srcTexture.get(), Gr1DKernelEffect::kX_Direction, radiusX, sigmaX,
+ srcBounds, srcOffset);
+ srcDrawContext = dstDrawContext;
+ srcTexture = srcDrawContext->asTexture();
+ srcRect.offsetTo(0, 0);
+ dstDrawContext.swap(tmpDrawContext);
+ localSrcBounds = srcRect;
+ srcOffset.set(0, 0);
+ }
+
+ if (sigmaY > 0.0f) {
+ if (scaleFactorY > 1 || sigmaX > 0.0f) {
+ SkASSERT(srcDrawContext);
+
+ // Clear out a radius below the srcRect to prevent the Y
+ // convolution from reading garbage.
+ clearRect = SkIRect::MakeXYWH(srcRect.fLeft, srcRect.fBottom,
+ srcRect.width(), radiusY);
+ srcDrawContext->clear(&clearRect, 0x0, false);
+ }
+
+ convolve_gaussian(dstDrawContext.get(), clip, srcRect,
+ srcTexture.get(), Gr1DKernelEffect::kY_Direction, radiusY, sigmaY,
+ srcBounds, srcOffset);
+
+ srcDrawContext = dstDrawContext;
+ srcRect.offsetTo(0, 0);
+ dstDrawContext.swap(tmpDrawContext);
+ }
+
+ SkASSERT(srcDrawContext);
+ srcTexture = nullptr; // we don't use this from here on out
+
+ if (scaleFactorX > 1 || scaleFactorY > 1) {
+ // Clear one pixel to the right and below, to accommodate bilinear upsampling.
+ clearRect = SkIRect::MakeXYWH(srcRect.fLeft, srcRect.fBottom, srcRect.width() + 1, 1);
+ srcDrawContext->clear(&clearRect, 0x0, false);
+ clearRect = SkIRect::MakeXYWH(srcRect.fRight, srcRect.fTop, 1, srcRect.height());
+ srcDrawContext->clear(&clearRect, 0x0, false);
+
+ SkMatrix matrix;
+ matrix.setIDiv(srcDrawContext->width(), srcDrawContext->height());
+
+ GrPaint paint;
+ paint.setGammaCorrect(dstDrawContext->isGammaCorrect());
+ // FIXME: this should be mitchell, not bilinear.
+ GrTextureParams params(SkShader::kClamp_TileMode, GrTextureParams::kBilerp_FilterMode);
+ sk_sp<GrTexture> tex(srcDrawContext->asTexture());
+ paint.addColorTextureProcessor(tex.get(), nullptr, matrix, params);
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ SkIRect dstRect(srcRect);
+ scale_irect(&dstRect, scaleFactorX, scaleFactorY);
+
+ dstDrawContext->fillRectToRect(clip, paint, SkMatrix::I(),
+ SkRect::Make(dstRect), SkRect::Make(srcRect));
+
+ srcDrawContext = dstDrawContext;
+ srcRect = dstRect;
+ dstDrawContext.swap(tmpDrawContext);
+ }
+
+ return srcDrawContext;
+}
+
+}
+
+#endif
+
diff --git a/gfx/skia/skia/src/core/SkGpuBlurUtils.h b/gfx/skia/skia/src/core/SkGpuBlurUtils.h
new file mode 100644
index 000000000..a12a08873
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGpuBlurUtils.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGpuBlurUtils_DEFINED
+#define SkGpuBlurUtils_DEFINED
+
+#if SK_SUPPORT_GPU
+#include "GrDrawContext.h"
+
+class GrContext;
+class GrTexture;
+
+struct SkRect;
+
+namespace SkGpuBlurUtils {
+ /**
+ * Applies a 2D Gaussian blur to a given texture. The blurred result is returned
+ * as a drawContext in case the caller wishes to future draw into the result.
+ * Note: one of sigmaX and sigmaY should be non-zero!
+ * @param context The GPU context
+ * @param srcTexture The source texture to be blurred.
+ * @param colorSpace Color space of the source (used for the drawContext result, too).
+ * @param dstBounds The destination bounds, relative to the source texture.
+ * @param srcBounds The source bounds, relative to the source texture. If non-null,
+ * no pixels will be sampled outside of this rectangle.
+ * @param sigmaX The blur's standard deviation in X.
+ * @param sigmaY The blur's standard deviation in Y.
+ * @param fit backing fit for the returned draw context
+ * @return The drawContext containing the blurred result.
+ */
+ sk_sp<GrDrawContext> GaussianBlur(GrContext* context,
+ GrTexture* srcTexture,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkIRect& dstBounds,
+ const SkIRect* srcBounds,
+ float sigmaX,
+ float sigmaY,
+ SkBackingFit fit = SkBackingFit::kApprox);
+};
+
+#endif
+#endif
diff --git a/gfx/skia/skia/src/core/SkGraphics.cpp b/gfx/skia/skia/src/core/SkGraphics.cpp
new file mode 100644
index 000000000..01b1432ef
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGraphics.cpp
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkGraphics.h"
+
+#include "SkBlitter.h"
+#include "SkCanvas.h"
+#include "SkCpu.h"
+#include "SkGeometry.h"
+#include "SkGlyphCache.h"
+#include "SkImageFilter.h"
+#include "SkMath.h"
+#include "SkMatrix.h"
+#include "SkOpts.h"
+#include "SkPath.h"
+#include "SkPathEffect.h"
+#include "SkPixelRef.h"
+#include "SkRefCnt.h"
+#include "SkResourceCache.h"
+#include "SkScalerContext.h"
+#include "SkShader.h"
+#include "SkStream.h"
+#include "SkTSearch.h"
+#include "SkTime.h"
+#include "SkUtils.h"
+#include "SkXfermode.h"
+
+#include <stdlib.h>
+
+void SkGraphics::GetVersion(int32_t* major, int32_t* minor, int32_t* patch) {
+ if (major) {
+ *major = SKIA_VERSION_MAJOR;
+ }
+ if (minor) {
+ *minor = SKIA_VERSION_MINOR;
+ }
+ if (patch) {
+ *patch = SKIA_VERSION_PATCH;
+ }
+}
+
+void SkGraphics::Init() {
+ // SkGraphics::Init() must be thread-safe and idempotent.
+ SkCpu::CacheRuntimeFeatures();
+ SkOpts::Init();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkGraphics::DumpMemoryStatistics(SkTraceMemoryDump* dump) {
+ SkResourceCache::DumpMemoryStatistics(dump);
+ SkGlyphCache::DumpMemoryStatistics(dump);
+}
+
+void SkGraphics::PurgeAllCaches() {
+ SkGraphics::PurgeFontCache();
+ SkGraphics::PurgeResourceCache();
+ SkImageFilter::PurgeCache();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static const char kFontCacheLimitStr[] = "font-cache-limit";
+static const size_t kFontCacheLimitLen = sizeof(kFontCacheLimitStr) - 1;
+
+static const struct {
+ const char* fStr;
+ size_t fLen;
+ size_t (*fFunc)(size_t);
+} gFlags[] = {
+ { kFontCacheLimitStr, kFontCacheLimitLen, SkGraphics::SetFontCacheLimit }
+};
+
+/* flags are of the form param; or param=value; */
+void SkGraphics::SetFlags(const char* flags) {
+ if (!flags) {
+ return;
+ }
+ const char* nextSemi;
+ do {
+ size_t len = strlen(flags);
+ const char* paramEnd = flags + len;
+ const char* nextEqual = strchr(flags, '=');
+ if (nextEqual && paramEnd > nextEqual) {
+ paramEnd = nextEqual;
+ }
+ nextSemi = strchr(flags, ';');
+ if (nextSemi && paramEnd > nextSemi) {
+ paramEnd = nextSemi;
+ }
+ size_t paramLen = paramEnd - flags;
+ for (int i = 0; i < (int)SK_ARRAY_COUNT(gFlags); ++i) {
+ if (paramLen != gFlags[i].fLen) {
+ continue;
+ }
+ if (strncmp(flags, gFlags[i].fStr, paramLen) == 0) {
+ size_t val = 0;
+ if (nextEqual) {
+ val = (size_t) atoi(nextEqual + 1);
+ }
+ (gFlags[i].fFunc)(val);
+ break;
+ }
+ }
+ flags = nextSemi + 1;
+ } while (nextSemi);
+}
diff --git a/gfx/skia/skia/src/core/SkHalf.cpp b/gfx/skia/skia/src/core/SkHalf.cpp
new file mode 100644
index 000000000..262362e07
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkHalf.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkHalf.h"
+#include "SkFloatBits.h"
+
+uint16_t halfMantissa(SkHalf h) {
+ return h & 0x03ff;
+}
+
+uint16_t halfExponent(SkHalf h) {
+ return (h >> 10) & 0x001f;
+}
+
+uint16_t halfSign(SkHalf h) {
+ return h >> 15;
+}
+
+union FloatUIntUnion {
+ uint32_t fUInt; // this must come first for the initializations below to work
+ float fFloat;
+};
+
+// based on Fabien Giesen's float_to_half_fast3()
+// see https://gist.github.com/rygorous/2156668
+SkHalf SkFloatToHalf(float f) {
+ static const uint32_t f32infty = { 255 << 23 };
+ static const uint32_t f16infty = { 31 << 23 };
+ static const FloatUIntUnion magic = { 15 << 23 };
+ static const uint32_t sign_mask = 0x80000000u;
+ static const uint32_t round_mask = ~0xfffu;
+ SkHalf o = 0;
+
+ FloatUIntUnion floatUnion;
+ floatUnion.fFloat = f;
+
+ uint32_t sign = floatUnion.fUInt & sign_mask;
+ floatUnion.fUInt ^= sign;
+
+ // NOTE all the integer compares in this function can be safely
+ // compiled into signed compares since all operands are below
+ // 0x80000000. Important if you want fast straight SSE2 code
+ // (since there's no unsigned PCMPGTD).
+
+ // Inf or NaN (all exponent bits set)
+ if (floatUnion.fUInt >= f32infty)
+ // NaN->qNaN and Inf->Inf
+ o = (floatUnion.fUInt > f32infty) ? 0x7e00 : 0x7c00;
+ // (De)normalized number or zero
+ else {
+ floatUnion.fUInt &= round_mask;
+ floatUnion.fFloat *= magic.fFloat;
+ floatUnion.fUInt -= round_mask;
+ // Clamp to signed infinity if overflowed
+ if (floatUnion.fUInt > f16infty) {
+ floatUnion.fUInt = f16infty;
+ }
+
+ o = floatUnion.fUInt >> 13; // Take the bits!
+ }
+
+ o |= sign >> 16;
+ return o;
+}
+
+// based on Fabien Giesen's half_to_float_fast2()
+// see https://fgiesen.wordpress.com/2012/03/28/half-to-float-done-quic/
+float SkHalfToFloat(SkHalf h) {
+ static const FloatUIntUnion magic = { 126 << 23 };
+ FloatUIntUnion o;
+
+ if (halfExponent(h) == 0)
+ {
+ // Zero / Denormal
+ o.fUInt = magic.fUInt + halfMantissa(h);
+ o.fFloat -= magic.fFloat;
+ }
+ else
+ {
+ // Set mantissa
+ o.fUInt = halfMantissa(h) << 13;
+ // Set exponent
+ if (halfExponent(h) == 0x1f)
+ // Inf/NaN
+ o.fUInt |= (255 << 23);
+ else
+ o.fUInt |= ((127 - 15 + halfExponent(h)) << 23);
+ }
+
+ // Set sign
+ o.fUInt |= (halfSign(h) << 31);
+ return o.fFloat;
+}
diff --git a/gfx/skia/skia/src/core/SkHalf.h b/gfx/skia/skia/src/core/SkHalf.h
new file mode 100644
index 000000000..dd978a234
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkHalf.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkHalf_DEFINED
+#define SkHalf_DEFINED
+
+#include "SkNx.h"
+#include "SkTypes.h"
+
+// 16-bit floating point value
+// format is 1 bit sign, 5 bits exponent, 10 bits mantissa
+// only used for storage
+typedef uint16_t SkHalf;
+
+static constexpr uint16_t SK_HalfMin = 0x0400; // 2^-24 (minimum positive normal value)
+static constexpr uint16_t SK_HalfMax = 0x7bff; // 65504
+static constexpr uint16_t SK_HalfEpsilon = 0x1400; // 2^-10
+static constexpr uint16_t SK_Half1 = 0x3C00; // 1
+
+// convert between half and single precision floating point
+float SkHalfToFloat(SkHalf h);
+SkHalf SkFloatToHalf(float f);
+
+// Convert between half and single precision floating point,
+// assuming inputs and outputs are both finite, and may
+// flush values which would be denormal half floats to zero.
+static inline Sk4f SkHalfToFloat_finite_ftz(uint64_t);
+static inline Sk4h SkFloatToHalf_finite_ftz(const Sk4f&);
+
+// ~~~~~~~~~~~ impl ~~~~~~~~~~~~~~ //
+
+// Like the serial versions in SkHalf.cpp, these are based on
+// https://fgiesen.wordpress.com/2012/03/28/half-to-float-done-quic/
+
+// GCC 4.9 lacks the intrinsics to use ARMv8 f16<->f32 instructions, so we use inline assembly.
+
+static inline Sk4f SkHalfToFloat_finite_ftz(const Sk4h& hs) {
+#if !defined(SKNX_NO_SIMD) && defined(SK_CPU_ARM64)
+ float32x4_t fs;
+ asm ("fcvtl %[fs].4s, %[hs].4h \n" // vcvt_f32_f16(...)
+ : [fs] "=w" (fs) // =w: write-only NEON register
+ : [hs] "w" (hs.fVec)); // w: read-only NEON register
+ return fs;
+#else
+ Sk4i bits = SkNx_cast<int>(hs), // Expand to 32 bit.
+ sign = bits & 0x00008000, // Save the sign bit for later...
+ positive = bits ^ sign, // ...but strip it off for now.
+ is_norm = 0x03ff < positive; // Exponent > 0?
+
+ // For normal half floats, extend the mantissa by 13 zero bits,
+ // then adjust the exponent from 15 bias to 127 bias.
+ Sk4i norm = (positive << 13) + ((127 - 15) << 23);
+
+ Sk4i merged = (sign << 16) | (norm & is_norm);
+ return Sk4f::Load(&merged);
+#endif
+}
+
+static inline Sk4f SkHalfToFloat_finite_ftz(uint64_t hs) {
+ return SkHalfToFloat_finite_ftz(Sk4h::Load(&hs));
+}
+
+static inline Sk4h SkFloatToHalf_finite_ftz(const Sk4f& fs) {
+#if !defined(SKNX_NO_SIMD) && defined(SK_CPU_ARM64)
+ float32x4_t vec = fs.fVec;
+ asm ("fcvtn %[vec].4h, %[vec].4s \n" // vcvt_f16_f32(vec)
+ : [vec] "+w" (vec)); // +w: read-write NEON register
+ return vreinterpret_u16_f32(vget_low_f32(vec));
+#else
+ Sk4i bits = Sk4i::Load(&fs),
+ sign = bits & 0x80000000, // Save the sign bit for later...
+ positive = bits ^ sign, // ...but strip it off for now.
+ will_be_norm = 0x387fdfff < positive; // greater than largest denorm half?
+
+ // For normal half floats, adjust the exponent from 127 bias to 15 bias,
+ // then drop the bottom 13 mantissa bits.
+ Sk4i norm = (positive - ((127 - 15) << 23)) >> 13;
+
+ Sk4i merged = (sign >> 16) | (will_be_norm & norm);
+ return SkNx_cast<uint16_t>(merged);
+#endif
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkImageCacherator.cpp b/gfx/skia/skia/src/core/SkImageCacherator.cpp
new file mode 100644
index 000000000..496ca74d9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageCacherator.cpp
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmap.h"
+#include "SkBitmapCache.h"
+#include "SkImage_Base.h"
+#include "SkImageCacherator.h"
+#include "SkMallocPixelRef.h"
+#include "SkNextID.h"
+#include "SkPixelRef.h"
+#include "SkResourceCache.h"
+
+#if SK_SUPPORT_GPU
+#include "GrContext.h"
+#include "GrGpuResourcePriv.h"
+#include "GrImageIDTextureAdjuster.h"
+#include "GrResourceKey.h"
+#include "GrTextureParams.h"
+#include "GrYUVProvider.h"
+#include "SkGr.h"
+#include "SkGrPriv.h"
+#endif
+
+// Until we actually have codecs/etc. that can contain/support a GPU texture format
+// skip this step, since for some generators, returning their encoded data as a SkData
+// can be somewhat expensive, and this call doesn't indicate to the generator that we're
+// only interested in GPU datas...
+// see skbug.com/ 4971, 5128, ...
+//#define SK_SUPPORT_COMPRESSED_TEXTURES_IN_CACHERATOR
+
+SkImageCacherator* SkImageCacherator::NewFromGenerator(SkImageGenerator* gen,
+ const SkIRect* subset) {
+ if (!gen) {
+ return nullptr;
+ }
+
+ // We are required to take ownership of gen, regardless of if we return a cacherator or not
+ SkAutoTDelete<SkImageGenerator> genHolder(gen);
+
+ const SkImageInfo& info = gen->getInfo();
+ if (info.isEmpty()) {
+ return nullptr;
+ }
+
+ uint32_t uniqueID = gen->uniqueID();
+ const SkIRect bounds = SkIRect::MakeWH(info.width(), info.height());
+ if (subset) {
+ if (!bounds.contains(*subset)) {
+ return nullptr;
+ }
+ if (*subset != bounds) {
+ // we need a different uniqueID since we really are a subset of the raw generator
+ uniqueID = SkNextID::ImageID();
+ }
+ } else {
+ subset = &bounds;
+ }
+
+ // Now that we know we can hand-off the generator (to be owned by the cacherator) we can
+ // release our holder. (we DONT want to delete it here anymore)
+ genHolder.release();
+
+ return new SkImageCacherator(gen, gen->getInfo().makeWH(subset->width(), subset->height()),
+ SkIPoint::Make(subset->x(), subset->y()), uniqueID);
+}
+
+SkImageCacherator::SkImageCacherator(SkImageGenerator* gen, const SkImageInfo& info,
+ const SkIPoint& origin, uint32_t uniqueID)
+ : fNotThreadSafeGenerator(gen)
+ , fInfo(info)
+ , fOrigin(origin)
+ , fUniqueID(uniqueID)
+{}
+
+SkData* SkImageCacherator::refEncoded(GrContext* ctx) {
+ ScopedGenerator generator(this);
+ return generator->refEncodedData(ctx);
+}
+
+static bool check_output_bitmap(const SkBitmap& bitmap, uint32_t expectedID) {
+ SkASSERT(bitmap.getGenerationID() == expectedID);
+ SkASSERT(bitmap.isImmutable());
+ SkASSERT(bitmap.getPixels());
+ return true;
+}
+
+// Note, this returns a new, mutable, bitmap, with a new genID.
+// If you want the immutable bitmap with the same ID as our cacherator, call tryLockAsBitmap()
+//
+bool SkImageCacherator::generateBitmap(SkBitmap* bitmap) {
+ SkBitmap::Allocator* allocator = SkResourceCache::GetAllocator();
+
+ ScopedGenerator generator(this);
+ const SkImageInfo& genInfo = generator->getInfo();
+ if (fInfo.dimensions() == genInfo.dimensions()) {
+ SkASSERT(fOrigin.x() == 0 && fOrigin.y() == 0);
+ // fast-case, no copy needed
+ return generator->tryGenerateBitmap(bitmap, fInfo, allocator);
+ } else {
+ // need to handle subsetting, so we first generate the full size version, and then
+ // "read" from it to get our subset. See https://bug.skia.org/4213
+
+ SkBitmap full;
+ if (!generator->tryGenerateBitmap(&full, genInfo, allocator)) {
+ return false;
+ }
+ if (!bitmap->tryAllocPixels(fInfo, nullptr, full.getColorTable())) {
+ return false;
+ }
+ return full.readPixels(bitmap->info(), bitmap->getPixels(), bitmap->rowBytes(),
+ fOrigin.x(), fOrigin.y());
+ }
+}
+
+bool SkImageCacherator::directGeneratePixels(const SkImageInfo& info, void* pixels, size_t rb,
+ int srcX, int srcY) {
+ ScopedGenerator generator(this);
+ const SkImageInfo& genInfo = generator->getInfo();
+ // Currently generators do not natively handle subsets, so check that first.
+ if (srcX || srcY || genInfo.width() != info.width() || genInfo.height() != info.height()) {
+ return false;
+ }
+ return generator->getPixels(info, pixels, rb);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkImageCacherator::lockAsBitmapOnlyIfAlreadyCached(SkBitmap* bitmap) {
+ return SkBitmapCache::Find(fUniqueID, bitmap) && check_output_bitmap(*bitmap, fUniqueID);
+}
+
+bool SkImageCacherator::tryLockAsBitmap(SkBitmap* bitmap, const SkImage* client,
+ SkImage::CachingHint chint) {
+ if (this->lockAsBitmapOnlyIfAlreadyCached(bitmap)) {
+ return true;
+ }
+ if (!this->generateBitmap(bitmap)) {
+ return false;
+ }
+
+ bitmap->pixelRef()->setImmutableWithID(fUniqueID);
+ if (SkImage::kAllow_CachingHint == chint) {
+ SkBitmapCache::Add(fUniqueID, *bitmap);
+ if (client) {
+ as_IB(client)->notifyAddedToCache();
+ }
+ }
+ return true;
+}
+
+bool SkImageCacherator::lockAsBitmap(SkBitmap* bitmap, const SkImage* client,
+ SkImage::CachingHint chint) {
+ if (this->tryLockAsBitmap(bitmap, client, chint)) {
+ return check_output_bitmap(*bitmap, fUniqueID);
+ }
+
+#if SK_SUPPORT_GPU
+ // Try to get a texture and read it back to raster (and then cache that with our ID)
+ SkAutoTUnref<GrTexture> tex;
+
+ {
+ ScopedGenerator generator(this);
+ SkIRect subset = SkIRect::MakeXYWH(fOrigin.x(), fOrigin.y(), fInfo.width(), fInfo.height());
+ tex.reset(generator->generateTexture(nullptr, &subset));
+ }
+ if (!tex) {
+ bitmap->reset();
+ return false;
+ }
+
+ if (!bitmap->tryAllocPixels(fInfo)) {
+ bitmap->reset();
+ return false;
+ }
+
+ const uint32_t pixelOpsFlags = 0;
+ if (!tex->readPixels(0, 0, bitmap->width(), bitmap->height(),
+ SkImageInfo2GrPixelConfig(fInfo, *tex->getContext()->caps()),
+ bitmap->getPixels(), bitmap->rowBytes(), pixelOpsFlags)) {
+ bitmap->reset();
+ return false;
+ }
+
+ bitmap->pixelRef()->setImmutableWithID(fUniqueID);
+ if (SkImage::kAllow_CachingHint == chint) {
+ SkBitmapCache::Add(fUniqueID, *bitmap);
+ if (client) {
+ as_IB(client)->notifyAddedToCache();
+ }
+ }
+ return check_output_bitmap(*bitmap, fUniqueID);
+#else
+ return false;
+#endif
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+#ifdef SK_SUPPORT_COMPRESSED_TEXTURES_IN_CACHERATOR
+static GrTexture* load_compressed_into_texture(GrContext* ctx, SkData* data, GrSurfaceDesc desc) {
+ const void* rawStart;
+ GrPixelConfig config = GrIsCompressedTextureDataSupported(ctx, data, desc.fWidth, desc.fHeight,
+ &rawStart);
+ if (kUnknown_GrPixelConfig == config) {
+ return nullptr;
+ }
+
+ desc.fConfig = config;
+ return ctx->textureProvider()->createTexture(desc, SkBudgeted::kYes, rawStart, 0);
+}
+#endif
+
+class Generator_GrYUVProvider : public GrYUVProvider {
+ SkImageGenerator* fGen;
+
+public:
+ Generator_GrYUVProvider(SkImageGenerator* gen) : fGen(gen) {}
+
+ uint32_t onGetID() override { return fGen->uniqueID(); }
+ bool onQueryYUV8(SkYUVSizeInfo* sizeInfo, SkYUVColorSpace* colorSpace) const override {
+ return fGen->queryYUV8(sizeInfo, colorSpace);
+ }
+ bool onGetYUV8Planes(const SkYUVSizeInfo& sizeInfo, void* planes[3]) override {
+ return fGen->getYUV8Planes(sizeInfo, planes);
+ }
+};
+
+static GrTexture* set_key_and_return(GrTexture* tex, const GrUniqueKey& key) {
+ if (key.isValid()) {
+ tex->resourcePriv().setUniqueKey(key);
+ }
+ return tex;
+}
+
+/*
+ * We have a 5 ways to try to return a texture (in sorted order)
+ *
+ * 1. Check the cache for a pre-existing one
+ * 2. Ask the generator to natively create one
+ * 3. Ask the generator to return a compressed form that the GPU might support
+ * 4. Ask the generator to return YUV planes, which the GPU can convert
+ * 5. Ask the generator to return RGB(A) data, which the GPU can convert
+ */
+GrTexture* SkImageCacherator::lockTexture(GrContext* ctx, const GrUniqueKey& key,
+ const SkImage* client, SkImage::CachingHint chint,
+ bool willBeMipped,
+ SkSourceGammaTreatment gammaTreatment) {
+ // Values representing the various texture lock paths we can take. Used for logging the path
+ // taken to a histogram.
+ enum LockTexturePath {
+ kFailure_LockTexturePath,
+ kPreExisting_LockTexturePath,
+ kNative_LockTexturePath,
+ kCompressed_LockTexturePath,
+ kYUV_LockTexturePath,
+ kRGBA_LockTexturePath,
+ };
+
+ enum { kLockTexturePathCount = kRGBA_LockTexturePath + 1 };
+
+ // 1. Check the cache for a pre-existing one
+ if (key.isValid()) {
+ if (GrTexture* tex = ctx->textureProvider()->findAndRefTextureByUniqueKey(key)) {
+ SK_HISTOGRAM_ENUMERATION("LockTexturePath", kPreExisting_LockTexturePath,
+ kLockTexturePathCount);
+ return tex;
+ }
+ }
+
+ // 2. Ask the generator to natively create one
+ {
+ ScopedGenerator generator(this);
+ SkIRect subset = SkIRect::MakeXYWH(fOrigin.x(), fOrigin.y(), fInfo.width(), fInfo.height());
+ if (GrTexture* tex = generator->generateTexture(ctx, &subset)) {
+ SK_HISTOGRAM_ENUMERATION("LockTexturePath", kNative_LockTexturePath,
+ kLockTexturePathCount);
+ return set_key_and_return(tex, key);
+ }
+ }
+
+ const GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(fInfo, *ctx->caps());
+
+#ifdef SK_SUPPORT_COMPRESSED_TEXTURES_IN_CACHERATOR
+ // 3. Ask the generator to return a compressed form that the GPU might support
+ sk_sp<SkData> data(this->refEncoded(ctx));
+ if (data) {
+ GrTexture* tex = load_compressed_into_texture(ctx, data, desc);
+ if (tex) {
+ SK_HISTOGRAM_ENUMERATION("LockTexturePath", kCompressed_LockTexturePath,
+ kLockTexturePathCount);
+ return set_key_and_return(tex, key);
+ }
+ }
+#endif
+
+ // 4. Ask the generator to return YUV planes, which the GPU can convert
+ {
+ ScopedGenerator generator(this);
+ Generator_GrYUVProvider provider(generator);
+ sk_sp<GrTexture> tex = provider.refAsTexture(ctx, desc, true);
+ if (tex) {
+ SK_HISTOGRAM_ENUMERATION("LockTexturePath", kYUV_LockTexturePath,
+ kLockTexturePathCount);
+ return set_key_and_return(tex.release(), key);
+ }
+ }
+
+ // 5. Ask the generator to return RGB(A) data, which the GPU can convert
+ SkBitmap bitmap;
+ if (this->tryLockAsBitmap(&bitmap, client, chint)) {
+ GrTexture* tex = nullptr;
+ if (willBeMipped) {
+ tex = GrGenerateMipMapsAndUploadToTexture(ctx, bitmap, gammaTreatment);
+ }
+ if (!tex) {
+ tex = GrUploadBitmapToTexture(ctx, bitmap);
+ }
+ if (tex) {
+ SK_HISTOGRAM_ENUMERATION("LockTexturePath", kRGBA_LockTexturePath,
+ kLockTexturePathCount);
+ return set_key_and_return(tex, key);
+ }
+ }
+ SK_HISTOGRAM_ENUMERATION("LockTexturePath", kFailure_LockTexturePath,
+ kLockTexturePathCount);
+ return nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+GrTexture* SkImageCacherator::lockAsTexture(GrContext* ctx, const GrTextureParams& params,
+ SkSourceGammaTreatment gammaTreatment,
+ const SkImage* client, SkImage::CachingHint chint) {
+ if (!ctx) {
+ return nullptr;
+ }
+
+ return GrImageTextureMaker(ctx, this, client, chint).refTextureForParams(params,
+ gammaTreatment);
+}
+
+#else
+
+GrTexture* SkImageCacherator::lockAsTexture(GrContext* ctx, const GrTextureParams&,
+ SkSourceGammaTreatment gammaTreatment,
+ const SkImage* client, SkImage::CachingHint) {
+ return nullptr;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkImageCacherator.h b/gfx/skia/skia/src/core/SkImageCacherator.h
new file mode 100644
index 000000000..3be69a578
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageCacherator.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageCacherator_DEFINED
+#define SkImageCacherator_DEFINED
+
+#include "SkImageGenerator.h"
+#include "SkMutex.h"
+#include "SkTemplates.h"
+
+class GrContext;
+class GrTextureParams;
+class GrUniqueKey;
+class SkBitmap;
+class SkImage;
+
+/*
+ * Internal class to manage caching the output of an ImageGenerator.
+ */
+class SkImageCacherator {
+public:
+ // Takes ownership of the generator
+ static SkImageCacherator* NewFromGenerator(SkImageGenerator*, const SkIRect* subset = nullptr);
+
+ const SkImageInfo& info() const { return fInfo; }
+ uint32_t uniqueID() const { return fUniqueID; }
+
+ /**
+ * On success (true), bitmap will point to the pixels for this generator. If this returns
+ * false, the bitmap will be reset to empty.
+ *
+ * If not NULL, the client will be notified (->notifyAddedToCache()) when resources are
+ * added to the cache on its behalf.
+ */
+ bool lockAsBitmap(SkBitmap*, const SkImage* client,
+ SkImage::CachingHint = SkImage::kAllow_CachingHint);
+
+ /**
+ * Returns a ref() on the texture produced by this generator. The caller must call unref()
+ * when it is done. Will return nullptr on failure.
+ *
+ * If not NULL, the client will be notified (->notifyAddedToCache()) when resources are
+ * added to the cache on its behalf.
+ *
+ * The caller is responsible for calling texture->unref() when they are done.
+ */
+ GrTexture* lockAsTexture(GrContext*, const GrTextureParams&,
+ SkSourceGammaTreatment gammaTreatment, const SkImage* client,
+ SkImage::CachingHint = SkImage::kAllow_CachingHint);
+
+ /**
+ * If the underlying src naturally is represented by an encoded blob (in SkData), this returns
+ * a ref to that data. If not, it returns null.
+ *
+ * If a GrContext is specified, then the caller is only interested in gpu-specific encoded
+ * formats, so others (e.g. PNG) can just return nullptr.
+ */
+ SkData* refEncoded(GrContext*);
+
+ // Only return true if the generate has already been cached.
+ bool lockAsBitmapOnlyIfAlreadyCached(SkBitmap*);
+ // Call the underlying generator directly
+ bool directGeneratePixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRB,
+ int srcX, int srcY);
+
+private:
+ SkImageCacherator(SkImageGenerator*, const SkImageInfo&, const SkIPoint&, uint32_t uniqueID);
+
+ bool generateBitmap(SkBitmap*);
+ bool tryLockAsBitmap(SkBitmap*, const SkImage*, SkImage::CachingHint);
+#if SK_SUPPORT_GPU
+ // Returns the texture. If the cacherator is generating the texture and wants to cache it,
+ // it should use the passed in key (if the key is valid).
+ GrTexture* lockTexture(GrContext*, const GrUniqueKey& key, const SkImage* client,
+ SkImage::CachingHint, bool willBeMipped, SkSourceGammaTreatment);
+#endif
+
+ class ScopedGenerator {
+ SkImageCacherator* fCacher;
+ public:
+ ScopedGenerator(SkImageCacherator* cacher) : fCacher(cacher) {
+ fCacher->fMutexForGenerator.acquire();
+ }
+ ~ScopedGenerator() {
+ fCacher->fMutexForGenerator.release();
+ }
+ SkImageGenerator* operator->() const { return fCacher->fNotThreadSafeGenerator; }
+ operator SkImageGenerator*() const { return fCacher->fNotThreadSafeGenerator; }
+ };
+
+ SkMutex fMutexForGenerator;
+ SkAutoTDelete<SkImageGenerator> fNotThreadSafeGenerator;
+
+ const SkImageInfo fInfo;
+ const SkIPoint fOrigin;
+ const uint32_t fUniqueID;
+
+ friend class GrImageTextureMaker;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkImageFilter.cpp b/gfx/skia/skia/src/core/SkImageFilter.cpp
new file mode 100644
index 000000000..68183cc44
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageFilter.cpp
@@ -0,0 +1,471 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkImageFilter.h"
+
+#include "SkCanvas.h"
+#include "SkFuzzLogging.h"
+#include "SkImageFilterCache.h"
+#include "SkLocalMatrixImageFilter.h"
+#include "SkMatrixImageFilter.h"
+#include "SkReadBuffer.h"
+#include "SkRect.h"
+#include "SkSpecialImage.h"
+#include "SkSpecialSurface.h"
+#include "SkValidationUtils.h"
+#include "SkWriteBuffer.h"
+#if SK_SUPPORT_GPU
+#include "GrContext.h"
+#include "GrDrawContext.h"
+#include "GrFixedClip.h"
+#include "SkGrPriv.h"
+#endif
+
+#ifndef SK_IGNORE_TO_STRING
+void SkImageFilter::CropRect::toString(SkString* str) const {
+ if (!fFlags) {
+ return;
+ }
+
+ str->appendf("cropRect (");
+ if (fFlags & CropRect::kHasLeft_CropEdge) {
+ str->appendf("%.2f, ", fRect.fLeft);
+ } else {
+ str->appendf("X, ");
+ }
+ if (fFlags & CropRect::kHasTop_CropEdge) {
+ str->appendf("%.2f, ", fRect.fTop);
+ } else {
+ str->appendf("X, ");
+ }
+ if (fFlags & CropRect::kHasWidth_CropEdge) {
+ str->appendf("%.2f, ", fRect.width());
+ } else {
+ str->appendf("X, ");
+ }
+ if (fFlags & CropRect::kHasHeight_CropEdge) {
+ str->appendf("%.2f", fRect.height());
+ } else {
+ str->appendf("X");
+ }
+ str->appendf(") ");
+}
+#endif
+
+void SkImageFilter::CropRect::applyTo(const SkIRect& imageBounds,
+ const SkMatrix& ctm,
+ bool embiggen,
+ SkIRect* cropped) const {
+ *cropped = imageBounds;
+ if (fFlags) {
+ SkRect devCropR;
+ ctm.mapRect(&devCropR, fRect);
+ SkIRect devICropR = devCropR.roundOut();
+
+ // Compute the left/top first, in case we need to modify the right/bottom for a missing edge
+ if (fFlags & kHasLeft_CropEdge) {
+ if (embiggen || devICropR.fLeft > cropped->fLeft) {
+ cropped->fLeft = devICropR.fLeft;
+ }
+ } else {
+ devICropR.fRight = cropped->fLeft + devICropR.width();
+ }
+ if (fFlags & kHasTop_CropEdge) {
+ if (embiggen || devICropR.fTop > cropped->fTop) {
+ cropped->fTop = devICropR.fTop;
+ }
+ } else {
+ devICropR.fBottom = cropped->fTop + devICropR.height();
+ }
+ if (fFlags & kHasWidth_CropEdge) {
+ if (embiggen || devICropR.fRight < cropped->fRight) {
+ cropped->fRight = devICropR.fRight;
+ }
+ }
+ if (fFlags & kHasHeight_CropEdge) {
+ if (embiggen || devICropR.fBottom < cropped->fBottom) {
+ cropped->fBottom = devICropR.fBottom;
+ }
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static int32_t next_image_filter_unique_id() {
+ static int32_t gImageFilterUniqueID;
+
+ // Never return 0.
+ int32_t id;
+ do {
+ id = sk_atomic_inc(&gImageFilterUniqueID) + 1;
+ } while (0 == id);
+ return id;
+}
+
+void SkImageFilter::Common::allocInputs(int count) {
+ fInputs.reset(count);
+}
+
+bool SkImageFilter::Common::unflatten(SkReadBuffer& buffer, int expectedCount) {
+ const int count = buffer.readInt();
+ if (!buffer.validate(count >= 0)) {
+ return false;
+ }
+ if (!buffer.validate(expectedCount < 0 || count == expectedCount)) {
+ return false;
+ }
+
+ SkFUZZF(("allocInputs: %d\n", count));
+ this->allocInputs(count);
+ for (int i = 0; i < count; i++) {
+ if (buffer.readBool()) {
+ fInputs[i] = sk_sp<SkImageFilter>(buffer.readImageFilter());
+ }
+ if (!buffer.isValid()) {
+ return false;
+ }
+ }
+ SkRect rect;
+ buffer.readRect(&rect);
+ if (!buffer.isValid() || !buffer.validate(SkIsValidRect(rect))) {
+ return false;
+ }
+
+ uint32_t flags = buffer.readUInt();
+ fCropRect = CropRect(rect, flags);
+ if (buffer.isVersionLT(SkReadBuffer::kImageFilterNoUniqueID_Version)) {
+
+ (void) buffer.readUInt();
+ }
+ return buffer.isValid();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkImageFilter::init(sk_sp<SkImageFilter>* inputs,
+ int inputCount,
+ const CropRect* cropRect) {
+ fCropRect = cropRect ? *cropRect : CropRect(SkRect(), 0x0);
+
+ fInputs.reset(inputCount);
+
+ for (int i = 0; i < inputCount; ++i) {
+ if (!inputs[i] || inputs[i]->usesSrcInput()) {
+ fUsesSrcInput = true;
+ }
+ fInputs[i] = inputs[i];
+ }
+}
+
+SkImageFilter::SkImageFilter(sk_sp<SkImageFilter>* inputs,
+ int inputCount,
+ const CropRect* cropRect)
+ : fUsesSrcInput(false)
+ , fUniqueID(next_image_filter_unique_id()) {
+ this->init(inputs, inputCount, cropRect);
+}
+
+SkImageFilter::~SkImageFilter() {
+ SkImageFilterCache::Get()->purgeByKeys(fCacheKeys.begin(), fCacheKeys.count());
+}
+
+SkImageFilter::SkImageFilter(int inputCount, SkReadBuffer& buffer)
+ : fUsesSrcInput(false)
+ , fCropRect(SkRect(), 0x0)
+ , fUniqueID(next_image_filter_unique_id()) {
+ Common common;
+ if (common.unflatten(buffer, inputCount)) {
+ this->init(common.inputs(), common.inputCount(), &common.cropRect());
+ }
+}
+
+void SkImageFilter::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeInt(fInputs.count());
+ for (int i = 0; i < fInputs.count(); i++) {
+ SkImageFilter* input = this->getInput(i);
+ buffer.writeBool(input != nullptr);
+ if (input != nullptr) {
+ buffer.writeFlattenable(input);
+ }
+ }
+ buffer.writeRect(fCropRect.rect());
+ buffer.writeUInt(fCropRect.flags());
+}
+
+sk_sp<SkSpecialImage> SkImageFilter::filterImage(SkSpecialImage* src, const Context& context,
+ SkIPoint* offset) const {
+ SkASSERT(src && offset);
+
+ uint32_t srcGenID = fUsesSrcInput ? src->uniqueID() : 0;
+ const SkIRect srcSubset = fUsesSrcInput ? src->subset() : SkIRect::MakeWH(0, 0);
+ SkImageFilterCacheKey key(fUniqueID, context.ctm(), context.clipBounds(), srcGenID, srcSubset);
+ if (context.cache()) {
+ SkSpecialImage* result = context.cache()->get(key, offset);
+ if (result) {
+ return sk_sp<SkSpecialImage>(SkRef(result));
+ }
+ }
+
+ sk_sp<SkSpecialImage> result(this->onFilterImage(src, context, offset));
+
+#if SK_SUPPORT_GPU
+ if (src->isTextureBacked() && result && !result->isTextureBacked()) {
+ // Keep the result on the GPU - this is still required for some
+ // image filters that don't support GPU in all cases
+ GrContext* context = src->getContext();
+ result = result->makeTextureImage(context);
+ }
+#endif
+
+ if (result && context.cache()) {
+ context.cache()->set(key, result.get(), *offset);
+ SkAutoMutexAcquire mutex(fMutex);
+ fCacheKeys.push_back(key);
+ }
+
+ return result;
+}
+
+SkIRect SkImageFilter::filterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection direction) const {
+ if (kReverse_MapDirection == direction) {
+ SkIRect bounds = this->onFilterNodeBounds(src, ctm, direction);
+ return this->onFilterBounds(bounds, ctm, direction);
+ } else {
+ SkIRect bounds = this->onFilterBounds(src, ctm, direction);
+ bounds = this->onFilterNodeBounds(bounds, ctm, direction);
+ SkIRect dst;
+ this->getCropRect().applyTo(bounds, ctm, this->affectsTransparentBlack(), &dst);
+ return dst;
+ }
+}
+
+SkRect SkImageFilter::computeFastBounds(const SkRect& src) const {
+ if (0 == this->countInputs()) {
+ return src;
+ }
+ SkRect combinedBounds = this->getInput(0) ? this->getInput(0)->computeFastBounds(src) : src;
+ for (int i = 1; i < this->countInputs(); i++) {
+ SkImageFilter* input = this->getInput(i);
+ if (input) {
+ combinedBounds.join(input->computeFastBounds(src));
+ } else {
+ combinedBounds.join(src);
+ }
+ }
+ return combinedBounds;
+}
+
+bool SkImageFilter::canComputeFastBounds() const {
+ if (this->affectsTransparentBlack()) {
+ return false;
+ }
+ for (int i = 0; i < this->countInputs(); i++) {
+ SkImageFilter* input = this->getInput(i);
+ if (input && !input->canComputeFastBounds()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+#if SK_SUPPORT_GPU
+sk_sp<SkSpecialImage> SkImageFilter::DrawWithFP(GrContext* context,
+ sk_sp<GrFragmentProcessor> fp,
+ const SkIRect& bounds,
+ const OutputProperties& outputProperties) {
+ GrPaint paint;
+ paint.addColorFragmentProcessor(std::move(fp));
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ sk_sp<SkColorSpace> colorSpace = sk_ref_sp(outputProperties.colorSpace());
+ GrPixelConfig config = GrRenderableConfigForColorSpace(colorSpace.get());
+ sk_sp<GrDrawContext> drawContext(context->makeDrawContext(SkBackingFit::kApprox,
+ bounds.width(), bounds.height(),
+ config,
+ std::move(colorSpace)));
+ if (!drawContext) {
+ return nullptr;
+ }
+ paint.setGammaCorrect(drawContext->isGammaCorrect());
+
+ SkIRect dstIRect = SkIRect::MakeWH(bounds.width(), bounds.height());
+ SkRect srcRect = SkRect::Make(bounds);
+ SkRect dstRect = SkRect::MakeWH(srcRect.width(), srcRect.height());
+ GrFixedClip clip(dstIRect);
+ drawContext->fillRectToRect(clip, paint, SkMatrix::I(), dstRect, srcRect);
+
+ return SkSpecialImage::MakeFromGpu(dstIRect, kNeedNewImageUniqueID_SpecialImage,
+ drawContext->asTexture(),
+ sk_ref_sp(drawContext->getColorSpace()));
+}
+#endif
+
+bool SkImageFilter::asAColorFilter(SkColorFilter** filterPtr) const {
+ SkASSERT(nullptr != filterPtr);
+ if (!this->isColorFilterNode(filterPtr)) {
+ return false;
+ }
+ if (nullptr != this->getInput(0) || (*filterPtr)->affectsTransparentBlack()) {
+ (*filterPtr)->unref();
+ return false;
+ }
+ return true;
+}
+
+bool SkImageFilter::canHandleComplexCTM() const {
+ if (!this->onCanHandleComplexCTM()) {
+ return false;
+ }
+ const int count = this->countInputs();
+ for (int i = 0; i < count; ++i) {
+ SkImageFilter* input = this->getInput(i);
+ if (input && !input->canHandleComplexCTM()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool SkImageFilter::applyCropRect(const Context& ctx, const SkIRect& srcBounds,
+ SkIRect* dstBounds) const {
+ SkIRect temp = this->onFilterNodeBounds(srcBounds, ctx.ctm(), kForward_MapDirection);
+ fCropRect.applyTo(temp, ctx.ctm(), this->affectsTransparentBlack(), dstBounds);
+ // Intersect against the clip bounds, in case the crop rect has
+ // grown the bounds beyond the original clip. This can happen for
+ // example in tiling, where the clip is much smaller than the filtered
+ // primitive. If we didn't do this, we would be processing the filter
+ // at the full crop rect size in every tile.
+ return dstBounds->intersect(ctx.clipBounds());
+}
+
+// Return a larger (newWidth x newHeight) copy of 'src' with black padding
+// around it.
+static sk_sp<SkSpecialImage> pad_image(SkSpecialImage* src,
+ const SkImageFilter::OutputProperties& outProps,
+ int newWidth, int newHeight, int offX, int offY) {
+ // We would like to operate in the source's color space (so that we return an "identical"
+ // image, other than the padding. To achieve that, we'd create new output properties:
+ //
+ // SkImageFilter::OutputProperties outProps(src->getColorSpace());
+ //
+ // That fails in at least two ways. For formats that are texturable but not renderable (like
+ // F16 on some ES implementations), we can't create a surface to do the work. For sRGB, images
+ // may be tagged with an sRGB color space (which leads to an sRGB config in makeSurface). But
+ // the actual config of that sRGB image on a device with no sRGB support is non-sRGB.
+ //
+ // Rather than try to special case these situations, we execute the image padding in the
+ // destination color space. This should not affect the output of the DAG in (almost) any case,
+ // because the result of this call is going to be used as an input, where it would have been
+ // switched to the destination space anyway. The one exception would be a filter that expected
+ // to consume unclamped F16 data, but the padded version of the image is pre-clamped to 8888.
+ // We can revisit this logic if that ever becomes an actual problem.
+ sk_sp<SkSpecialSurface> surf(src->makeSurface(outProps, SkISize::Make(newWidth, newHeight)));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ canvas->clear(0x0);
+
+ src->draw(canvas, offX, offY, nullptr);
+
+ return surf->makeImageSnapshot();
+}
+
+sk_sp<SkSpecialImage> SkImageFilter::applyCropRect(const Context& ctx,
+ SkSpecialImage* src,
+ SkIPoint* srcOffset,
+ SkIRect* bounds) const {
+ const SkIRect srcBounds = SkIRect::MakeXYWH(srcOffset->x(), srcOffset->y(),
+ src->width(), src->height());
+
+ SkIRect dstBounds = this->onFilterNodeBounds(srcBounds, ctx.ctm(), kForward_MapDirection);
+ fCropRect.applyTo(dstBounds, ctx.ctm(), this->affectsTransparentBlack(), bounds);
+ if (!bounds->intersect(ctx.clipBounds())) {
+ return nullptr;
+ }
+
+ if (srcBounds.contains(*bounds)) {
+ return sk_sp<SkSpecialImage>(SkRef(src));
+ } else {
+ sk_sp<SkSpecialImage> img(pad_image(src, ctx.outputProperties(),
+ bounds->width(), bounds->height(),
+ srcOffset->x() - bounds->x(),
+ srcOffset->y() - bounds->y()));
+ *srcOffset = SkIPoint::Make(bounds->x(), bounds->y());
+ return img;
+ }
+}
+
+SkIRect SkImageFilter::onFilterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection direction) const {
+ if (this->countInputs() < 1) {
+ return src;
+ }
+
+ SkIRect totalBounds;
+ for (int i = 0; i < this->countInputs(); ++i) {
+ SkImageFilter* filter = this->getInput(i);
+ SkIRect rect = filter ? filter->filterBounds(src, ctm, direction) : src;
+ if (0 == i) {
+ totalBounds = rect;
+ } else {
+ totalBounds.join(rect);
+ }
+ }
+
+ return totalBounds;
+}
+
+SkIRect SkImageFilter::onFilterNodeBounds(const SkIRect& src, const SkMatrix&, MapDirection) const {
+ return src;
+}
+
+
+SkImageFilter::Context SkImageFilter::mapContext(const Context& ctx) const {
+ SkIRect clipBounds = this->onFilterNodeBounds(ctx.clipBounds(), ctx.ctm(),
+ MapDirection::kReverse_MapDirection);
+ return Context(ctx.ctm(), clipBounds, ctx.cache(), ctx.outputProperties());
+}
+
+sk_sp<SkImageFilter> SkImageFilter::MakeMatrixFilter(const SkMatrix& matrix,
+ SkFilterQuality filterQuality,
+ sk_sp<SkImageFilter> input) {
+ return SkMatrixImageFilter::Make(matrix, filterQuality, std::move(input));
+}
+
+sk_sp<SkImageFilter> SkImageFilter::makeWithLocalMatrix(const SkMatrix& matrix) const {
+ // SkLocalMatrixImageFilter takes SkImage* in its factory, but logically that parameter
+ // is *always* treated as a const ptr. Hence the const-cast here.
+ //
+ SkImageFilter* nonConstThis = const_cast<SkImageFilter*>(this);
+ return SkLocalMatrixImageFilter::Make(matrix, sk_ref_sp<SkImageFilter>(nonConstThis));
+}
+
+sk_sp<SkSpecialImage> SkImageFilter::filterInput(int index,
+ SkSpecialImage* src,
+ const Context& ctx,
+ SkIPoint* offset) const {
+ SkImageFilter* input = this->getInput(index);
+ if (!input) {
+ return sk_sp<SkSpecialImage>(SkRef(src));
+ }
+
+ sk_sp<SkSpecialImage> result(input->filterImage(src, this->mapContext(ctx), offset));
+
+ SkASSERT(!result || src->isTextureBacked() == result->isTextureBacked());
+
+ return result;
+}
+
+void SkImageFilter::PurgeCache() {
+ SkImageFilterCache::Get()->purge();
+}
diff --git a/gfx/skia/skia/src/core/SkImageFilterCache.cpp b/gfx/skia/skia/src/core/SkImageFilterCache.cpp
new file mode 100644
index 000000000..c7104def3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageFilterCache.cpp
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkImageFilterCache.h"
+
+#include "SkMutex.h"
+#include "SkOnce.h"
+#include "SkOpts.h"
+#include "SkRefCnt.h"
+#include "SkSpecialImage.h"
+#include "SkTDynamicHash.h"
+#include "SkTInternalLList.h"
+
+#ifdef SK_BUILD_FOR_IOS
+ enum { kDefaultCacheSize = 2 * 1024 * 1024 };
+#else
+ enum { kDefaultCacheSize = 128 * 1024 * 1024 };
+#endif
+
+namespace {
+
+class CacheImpl : public SkImageFilterCache {
+public:
+ typedef SkImageFilterCacheKey Key;
+ CacheImpl(size_t maxBytes) : fMaxBytes(maxBytes), fCurrentBytes(0) { }
+ ~CacheImpl() override {
+ SkTDynamicHash<Value, Key>::Iter iter(&fLookup);
+
+ while (!iter.done()) {
+ Value* v = &*iter;
+ ++iter;
+ delete v;
+ }
+ }
+ struct Value {
+ Value(const Key& key, SkSpecialImage* image, const SkIPoint& offset)
+ : fKey(key), fImage(SkRef(image)), fOffset(offset) {}
+
+ Key fKey;
+ SkAutoTUnref<SkSpecialImage> fImage;
+ SkIPoint fOffset;
+ static const Key& GetKey(const Value& v) {
+ return v.fKey;
+ }
+ static uint32_t Hash(const Key& key) {
+ return SkOpts::hash(reinterpret_cast<const uint32_t*>(&key), sizeof(Key));
+ }
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(Value);
+ };
+
+ SkSpecialImage* get(const Key& key, SkIPoint* offset) const override {
+ SkAutoMutexAcquire mutex(fMutex);
+ if (Value* v = fLookup.find(key)) {
+ *offset = v->fOffset;
+ if (v != fLRU.head()) {
+ fLRU.remove(v);
+ fLRU.addToHead(v);
+ }
+ return v->fImage;
+ }
+ return nullptr;
+ }
+
+ void set(const Key& key, SkSpecialImage* image, const SkIPoint& offset) override {
+ SkAutoMutexAcquire mutex(fMutex);
+ if (Value* v = fLookup.find(key)) {
+ this->removeInternal(v);
+ }
+ Value* v = new Value(key, image, offset);
+ fLookup.add(v);
+ fLRU.addToHead(v);
+ fCurrentBytes += image->getSize();
+ while (fCurrentBytes > fMaxBytes) {
+ Value* tail = fLRU.tail();
+ SkASSERT(tail);
+ if (tail == v) {
+ break;
+ }
+ this->removeInternal(tail);
+ }
+ }
+
+ void purge() override {
+ SkAutoMutexAcquire mutex(fMutex);
+ while (fCurrentBytes > 0) {
+ Value* tail = fLRU.tail();
+ SkASSERT(tail);
+ this->removeInternal(tail);
+ }
+ }
+
+ void purgeByKeys(const Key keys[], int count) override {
+ SkAutoMutexAcquire mutex(fMutex);
+ for (int i = 0; i < count; i++) {
+ if (Value* v = fLookup.find(keys[i])) {
+ this->removeInternal(v);
+ }
+ }
+ }
+
+ SkDEBUGCODE(int count() const override { return fLookup.count(); })
+private:
+ void removeInternal(Value* v) {
+ SkASSERT(v->fImage);
+ fCurrentBytes -= v->fImage->getSize();
+ fLRU.remove(v);
+ fLookup.remove(v->fKey);
+ delete v;
+ }
+private:
+ SkTDynamicHash<Value, Key> fLookup;
+ mutable SkTInternalLList<Value> fLRU;
+ size_t fMaxBytes;
+ size_t fCurrentBytes;
+ mutable SkMutex fMutex;
+};
+
+} // namespace
+
+SkImageFilterCache* SkImageFilterCache::Create(size_t maxBytes) {
+ return new CacheImpl(maxBytes);
+}
+
+SkImageFilterCache* SkImageFilterCache::Get() {
+ static SkOnce once;
+ static SkImageFilterCache* cache;
+
+ once([]{ cache = SkImageFilterCache::Create(kDefaultCacheSize); });
+ return cache;
+}
diff --git a/gfx/skia/skia/src/core/SkImageFilterCache.h b/gfx/skia/skia/src/core/SkImageFilterCache.h
new file mode 100644
index 000000000..a65357f69
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageFilterCache.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageFilterCache_DEFINED
+#define SkImageFilterCache_DEFINED
+
+#include "SkMatrix.h"
+#include "SkRefCnt.h"
+
+struct SkIPoint;
+class SkSpecialImage;
+
+struct SkImageFilterCacheKey {
+ SkImageFilterCacheKey(const uint32_t uniqueID, const SkMatrix& matrix,
+ const SkIRect& clipBounds, uint32_t srcGenID, const SkIRect& srcSubset)
+ : fUniqueID(uniqueID)
+ , fMatrix(matrix)
+ , fClipBounds(clipBounds)
+ , fSrcGenID(srcGenID)
+ , fSrcSubset(srcSubset) {
+ // Assert that Key is tightly-packed, since it is hashed.
+ static_assert(sizeof(SkImageFilterCacheKey) == sizeof(uint32_t) + sizeof(SkMatrix) +
+ sizeof(SkIRect) + sizeof(uint32_t) + 4 * sizeof(int32_t),
+ "image_filter_key_tight_packing");
+ fMatrix.getType(); // force initialization of type, so hashes match
+ }
+
+ uint32_t fUniqueID;
+ SkMatrix fMatrix;
+ SkIRect fClipBounds;
+ uint32_t fSrcGenID;
+ SkIRect fSrcSubset;
+
+ bool operator==(const SkImageFilterCacheKey& other) const {
+ return fUniqueID == other.fUniqueID &&
+ fMatrix == other.fMatrix &&
+ fClipBounds == other.fClipBounds &&
+ fSrcGenID == other.fSrcGenID &&
+ fSrcSubset == other.fSrcSubset;
+ }
+};
+
+// This cache maps from (filter's unique ID + CTM + clipBounds + src bitmap generation ID) to
+// (result, offset).
+class SkImageFilterCache : public SkRefCnt {
+public:
+ enum { kDefaultTransientSize = 32 * 1024 * 1024 };
+
+ virtual ~SkImageFilterCache() {}
+ static SkImageFilterCache* Create(size_t maxBytes);
+ static SkImageFilterCache* Get();
+ virtual SkSpecialImage* get(const SkImageFilterCacheKey& key, SkIPoint* offset) const = 0;
+ virtual void set(const SkImageFilterCacheKey& key, SkSpecialImage* image,
+ const SkIPoint& offset) = 0;
+ virtual void purge() = 0;
+ virtual void purgeByKeys(const SkImageFilterCacheKey[], int) = 0;
+ SkDEBUGCODE(virtual int count() const = 0;)
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkImageGenerator.cpp b/gfx/skia/skia/src/core/SkImageGenerator.cpp
new file mode 100644
index 000000000..84d9c743e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageGenerator.cpp
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkImageGenerator.h"
+#include "SkNextID.h"
+
+SkImageGenerator::SkImageGenerator(const SkImageInfo& info, uint32_t uniqueID)
+ : fInfo(info)
+ , fUniqueID(kNeedNewImageUniqueID == uniqueID ? SkNextID::ImageID() : uniqueID)
+{}
+
+bool SkImageGenerator::getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ SkPMColor ctable[], int* ctableCount) {
+ if (kUnknown_SkColorType == info.colorType()) {
+ return false;
+ }
+ if (nullptr == pixels) {
+ return false;
+ }
+ if (rowBytes < info.minRowBytes()) {
+ return false;
+ }
+
+ if (kIndex_8_SkColorType == info.colorType()) {
+ if (nullptr == ctable || nullptr == ctableCount) {
+ return false;
+ }
+ } else {
+ if (ctableCount) {
+ *ctableCount = 0;
+ }
+ ctableCount = nullptr;
+ ctable = nullptr;
+ }
+
+ const bool success = this->onGetPixels(info, pixels, rowBytes, ctable, ctableCount);
+ if (success && ctableCount) {
+ SkASSERT(*ctableCount >= 0 && *ctableCount <= 256);
+ }
+ return success;
+}
+
+bool SkImageGenerator::getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes) {
+ SkASSERT(kIndex_8_SkColorType != info.colorType());
+ if (kIndex_8_SkColorType == info.colorType()) {
+ return false;
+ }
+ return this->getPixels(info, pixels, rowBytes, nullptr, nullptr);
+}
+
+bool SkImageGenerator::queryYUV8(SkYUVSizeInfo* sizeInfo, SkYUVColorSpace* colorSpace) const {
+ SkASSERT(sizeInfo);
+
+ return this->onQueryYUV8(sizeInfo, colorSpace);
+}
+
+bool SkImageGenerator::getYUV8Planes(const SkYUVSizeInfo& sizeInfo, void* planes[3]) {
+ SkASSERT(sizeInfo.fSizes[SkYUVSizeInfo::kY].fWidth >= 0);
+ SkASSERT(sizeInfo.fSizes[SkYUVSizeInfo::kY].fHeight >= 0);
+ SkASSERT(sizeInfo.fSizes[SkYUVSizeInfo::kU].fWidth >= 0);
+ SkASSERT(sizeInfo.fSizes[SkYUVSizeInfo::kU].fHeight >= 0);
+ SkASSERT(sizeInfo.fSizes[SkYUVSizeInfo::kV].fWidth >= 0);
+ SkASSERT(sizeInfo.fSizes[SkYUVSizeInfo::kV].fHeight >= 0);
+ SkASSERT(sizeInfo.fWidthBytes[SkYUVSizeInfo::kY] >=
+ (size_t) sizeInfo.fSizes[SkYUVSizeInfo::kY].fWidth);
+ SkASSERT(sizeInfo.fWidthBytes[SkYUVSizeInfo::kU] >=
+ (size_t) sizeInfo.fSizes[SkYUVSizeInfo::kU].fWidth);
+ SkASSERT(sizeInfo.fWidthBytes[SkYUVSizeInfo::kV] >=
+ (size_t) sizeInfo.fSizes[SkYUVSizeInfo::kV].fWidth);
+ SkASSERT(planes && planes[0] && planes[1] && planes[2]);
+
+ return this->onGetYUV8Planes(sizeInfo, planes);
+}
+
+GrTexture* SkImageGenerator::generateTexture(GrContext* ctx, const SkIRect* subset) {
+ if (subset && !SkIRect::MakeWH(fInfo.width(), fInfo.height()).contains(*subset)) {
+ return nullptr;
+ }
+ return this->onGenerateTexture(ctx, subset);
+}
+
+bool SkImageGenerator::computeScaledDimensions(SkScalar scale, SupportedSizes* sizes) {
+ if (scale > 0 && scale <= 1) {
+ return this->onComputeScaledDimensions(scale, sizes);
+ }
+ return false;
+}
+
+bool SkImageGenerator::generateScaledPixels(const SkISize& scaledSize,
+ const SkIPoint& subsetOrigin,
+ const SkPixmap& subsetPixels) {
+ if (scaledSize.width() <= 0 || scaledSize.height() <= 0) {
+ return false;
+ }
+ if (subsetPixels.width() <= 0 || subsetPixels.height() <= 0) {
+ return false;
+ }
+ const SkIRect subset = SkIRect::MakeXYWH(subsetOrigin.x(), subsetOrigin.y(),
+ subsetPixels.width(), subsetPixels.height());
+ if (!SkIRect::MakeWH(scaledSize.width(), scaledSize.height()).contains(subset)) {
+ return false;
+ }
+ return this->onGenerateScaledPixels(scaledSize, subsetOrigin, subsetPixels);
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+
+SkData* SkImageGenerator::onRefEncodedData(SK_REFENCODEDDATA_CTXPARAM) {
+ return nullptr;
+}
+
+bool SkImageGenerator::onGetPixels(const SkImageInfo& info, void* dst, size_t rb,
+ SkPMColor* colors, int* colorCount) {
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "SkBitmap.h"
+#include "SkColorTable.h"
+
+static bool reset_and_return_false(SkBitmap* bitmap) {
+ bitmap->reset();
+ return false;
+}
+
+bool SkImageGenerator::tryGenerateBitmap(SkBitmap* bitmap, const SkImageInfo* infoPtr,
+ SkBitmap::Allocator* allocator) {
+ SkImageInfo info = infoPtr ? *infoPtr : this->getInfo();
+ if (0 == info.getSafeSize(info.minRowBytes())) {
+ return false;
+ }
+ if (!bitmap->setInfo(info)) {
+ return reset_and_return_false(bitmap);
+ }
+
+ SkPMColor ctStorage[256];
+ memset(ctStorage, 0xFF, sizeof(ctStorage)); // init with opaque-white for the moment
+ SkAutoTUnref<SkColorTable> ctable(new SkColorTable(ctStorage, 256));
+ if (!bitmap->tryAllocPixels(allocator, ctable)) {
+ // SkResourceCache's custom allcator can'thandle ctables, so it may fail on
+ // kIndex_8_SkColorTable.
+ // https://bug.skia.org/4355
+#if 1
+ // ignroe the allocator, and see if we can succeed without it
+ if (!bitmap->tryAllocPixels(nullptr, ctable)) {
+ return reset_and_return_false(bitmap);
+ }
+#else
+ // this is the up-scale technique, not fully debugged, but we keep it here at the moment
+ // to remind ourselves that this might be better than ignoring the allocator.
+
+ info = SkImageInfo::MakeN32(info.width(), info.height(), info.alphaType());
+ if (!bitmap->setInfo(info)) {
+ return reset_and_return_false(bitmap);
+ }
+ // we pass nullptr for the ctable arg, since we are now explicitly N32
+ if (!bitmap->tryAllocPixels(allocator, nullptr)) {
+ return reset_and_return_false(bitmap);
+ }
+#endif
+ }
+
+ bitmap->lockPixels();
+ if (!bitmap->getPixels()) {
+ return reset_and_return_false(bitmap);
+ }
+
+ int ctCount = 0;
+ if (!this->getPixels(bitmap->info(), bitmap->getPixels(), bitmap->rowBytes(),
+ ctStorage, &ctCount)) {
+ return reset_and_return_false(bitmap);
+ }
+
+ if (ctCount > 0) {
+ SkASSERT(kIndex_8_SkColorType == bitmap->colorType());
+ // we and bitmap should be owners
+ SkASSERT(!ctable->unique());
+
+ // Now we need to overwrite the ctable we built earlier, with the correct colors.
+ // This does mean that we may have made the table too big, but that cannot be avoided
+ // until we can change SkImageGenerator's API to return us the ctable *before* we have to
+ // allocate space for all the pixels.
+ ctable->dangerous_overwriteColors(ctStorage, ctCount);
+ } else {
+ SkASSERT(kIndex_8_SkColorType != bitmap->colorType());
+ // we should be the only owner
+ SkASSERT(ctable->unique());
+ }
+ return true;
+}
+
+#include "SkGraphics.h"
+
+static SkGraphics::ImageGeneratorFromEncodedFactory gFactory;
+
+SkGraphics::ImageGeneratorFromEncodedFactory
+SkGraphics::SetImageGeneratorFromEncodedFactory(ImageGeneratorFromEncodedFactory factory)
+{
+ ImageGeneratorFromEncodedFactory prev = gFactory;
+ gFactory = factory;
+ return prev;
+}
+
+SkImageGenerator* SkImageGenerator::NewFromEncoded(SkData* data) {
+ if (nullptr == data) {
+ return nullptr;
+ }
+ if (gFactory) {
+ if (SkImageGenerator* generator = gFactory(data)) {
+ return generator;
+ }
+ }
+ return SkImageGenerator::NewFromEncodedImpl(data);
+}
diff --git a/gfx/skia/skia/src/core/SkImageGeneratorPriv.h b/gfx/skia/skia/src/core/SkImageGeneratorPriv.h
new file mode 100644
index 000000000..93114e328
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageGeneratorPriv.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageGeneratorPriv_DEFINED
+#define SkImageGeneratorPriv_DEFINED
+
+#include "SkImageGenerator.h"
+#include "SkDiscardableMemory.h"
+
+/**
+ * Takes ownership of SkImageGenerator. If this method fails for
+ * whatever reason, it will return false and immediatetely delete
+ * the generator. If it succeeds, it will modify destination
+ * bitmap.
+ *
+ * If generator is nullptr, will safely return false.
+ *
+ * If this fails or when the SkDiscardablePixelRef that is
+ * installed into destination is destroyed, it will call
+ * `delete` on the generator. Therefore, generator should be
+ * allocated with `new`.
+ *
+ * @param destination Upon success, this bitmap will be
+ * configured and have a pixelref installed.
+ *
+ * @param factory If not nullptr, this object will be used as a
+ * source of discardable memory when decoding. If nullptr, then
+ * SkDiscardableMemory::Create() will be called.
+ *
+ * @return true iff successful.
+ */
+bool SkDEPRECATED_InstallDiscardablePixelRef(SkImageGenerator*, const SkIRect* subset,
+ SkBitmap* destination,
+ SkDiscardableMemory::Factory* factory);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkImageInfo.cpp b/gfx/skia/skia/src/core/SkImageInfo.cpp
new file mode 100644
index 000000000..75c6807d1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageInfo.cpp
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkImageInfo.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+
+static bool alpha_type_is_valid(SkAlphaType alphaType) {
+ return (alphaType >= 0) && (alphaType <= kLastEnum_SkAlphaType);
+}
+
+static bool color_type_is_valid(SkColorType colorType) {
+ return (colorType >= 0) && (colorType <= kLastEnum_SkColorType);
+}
+
+SkImageInfo SkImageInfo::MakeS32(int width, int height, SkAlphaType at) {
+ return SkImageInfo(width, height, kN32_SkColorType, at,
+ SkColorSpace::NewNamed(SkColorSpace::kSRGB_Named));
+}
+
+static const int kColorTypeMask = 0x0F;
+static const int kAlphaTypeMask = 0x03;
+
+void SkImageInfo::unflatten(SkReadBuffer& buffer) {
+ fWidth = buffer.read32();
+ fHeight = buffer.read32();
+
+ uint32_t packed = buffer.read32();
+ fColorType = (SkColorType)((packed >> 0) & kColorTypeMask);
+ fAlphaType = (SkAlphaType)((packed >> 8) & kAlphaTypeMask);
+ buffer.validate(alpha_type_is_valid(fAlphaType) && color_type_is_valid(fColorType));
+
+ sk_sp<SkData> data = buffer.readByteArrayAsData();
+ fColorSpace = SkColorSpace::Deserialize(data->data(), data->size());
+}
+
+void SkImageInfo::flatten(SkWriteBuffer& buffer) const {
+ buffer.write32(fWidth);
+ buffer.write32(fHeight);
+
+ SkASSERT(0 == (fAlphaType & ~kAlphaTypeMask));
+ SkASSERT(0 == (fColorType & ~kColorTypeMask));
+ uint32_t packed = (fAlphaType << 8) | fColorType;
+ buffer.write32(packed);
+
+ if (fColorSpace) {
+ sk_sp<SkData> data = fColorSpace->serialize();
+ if (data) {
+ buffer.writeDataAsByteArray(data.get());
+ } else {
+ buffer.writeByteArray(nullptr, 0);
+ }
+ } else {
+ sk_sp<SkData> data = SkData::MakeEmpty();
+ buffer.writeDataAsByteArray(data.get());
+ }
+}
+
+bool SkColorTypeValidateAlphaType(SkColorType colorType, SkAlphaType alphaType,
+ SkAlphaType* canonical) {
+ switch (colorType) {
+ case kUnknown_SkColorType:
+ alphaType = kUnknown_SkAlphaType;
+ break;
+ case kAlpha_8_SkColorType:
+ if (kUnpremul_SkAlphaType == alphaType) {
+ alphaType = kPremul_SkAlphaType;
+ }
+ // fall-through
+ case kIndex_8_SkColorType:
+ case kARGB_4444_SkColorType:
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ case kRGBA_F16_SkColorType:
+ if (kUnknown_SkAlphaType == alphaType) {
+ return false;
+ }
+ break;
+ case kRGB_565_SkColorType:
+ case kGray_8_SkColorType:
+ alphaType = kOpaque_SkAlphaType;
+ break;
+ default:
+ return false;
+ }
+ if (canonical) {
+ *canonical = alphaType;
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "SkReadPixelsRec.h"
+
+bool SkReadPixelsRec::trim(int srcWidth, int srcHeight) {
+ switch (fInfo.colorType()) {
+ case kUnknown_SkColorType:
+ case kIndex_8_SkColorType:
+ return false;
+ default:
+ break;
+ }
+ if (nullptr == fPixels || fRowBytes < fInfo.minRowBytes()) {
+ return false;
+ }
+ if (0 == fInfo.width() || 0 == fInfo.height()) {
+ return false;
+ }
+
+ int x = fX;
+ int y = fY;
+ SkIRect srcR = SkIRect::MakeXYWH(x, y, fInfo.width(), fInfo.height());
+ if (!srcR.intersect(0, 0, srcWidth, srcHeight)) {
+ return false;
+ }
+
+ // if x or y are negative, then we have to adjust pixels
+ if (x > 0) {
+ x = 0;
+ }
+ if (y > 0) {
+ y = 0;
+ }
+ // here x,y are either 0 or negative
+ fPixels = ((char*)fPixels - y * fRowBytes - x * fInfo.bytesPerPixel());
+ // the intersect may have shrunk info's logical size
+ fInfo = fInfo.makeWH(srcR.width(), srcR.height());
+ fX = srcR.x();
+ fY = srcR.y();
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkImagePriv.h b/gfx/skia/skia/src/core/SkImagePriv.h
new file mode 100644
index 000000000..8e2f5cecd
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImagePriv.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImagePriv_DEFINED
+#define SkImagePriv_DEFINED
+
+#include "SkImage.h"
+#include "SkSmallAllocator.h"
+#include "SkSurface.h"
+
+enum SkCopyPixelsMode {
+ kIfMutable_SkCopyPixelsMode, //!< only copy src pixels if they are marked mutable
+ kAlways_SkCopyPixelsMode, //!< always copy src pixels (even if they are marked immutable)
+ kNever_SkCopyPixelsMode, //!< never copy src pixels (even if they are marked mutable)
+};
+
+enum {kSkBlitterContextSize = 3332};
+
+// Commonly used allocator. It currently is only used to allocate up to 3 objects. The total
+// bytes requested is calculated using one of our large shaders, its context size plus the size of
+// an Sk3DBlitter in SkDraw.cpp
+// Note that some contexts may contain other contexts (e.g. for compose shaders), but we've not
+// yet found a situation where the size below isn't big enough.
+typedef SkSmallAllocator<3, kSkBlitterContextSize> SkTBlitterAllocator;
+
+// If alloc is non-nullptr, it will be used to allocate the returned SkShader, and MUST outlive
+// the SkShader.
+sk_sp<SkShader> SkMakeBitmapShader(const SkBitmap& src, SkShader::TileMode, SkShader::TileMode,
+ const SkMatrix* localMatrix, SkCopyPixelsMode,
+ SkTBlitterAllocator* alloc);
+
+// Call this if you explicitly want to use/share this pixelRef in the image
+extern sk_sp<SkImage> SkMakeImageFromPixelRef(const SkImageInfo&, SkPixelRef*,
+ const SkIPoint& pixelRefOrigin,
+ size_t rowBytes);
+
+/**
+ * Examines the bitmap to decide if it can share the existing pixelRef, or
+ * if it needs to make a deep-copy of the pixels.
+ *
+ * The bitmap's pixelref will be shared if either the bitmap is marked as
+ * immutable, or CopyPixelsMode allows it. Shared pixel refs are also
+ * locked when kLocked_SharedPixelRefMode is specified.
+ *
+ * Passing kLocked_SharedPixelRefMode allows the image's peekPixels() method
+ * to succeed, but it will force any lazy decodes/generators to execute if
+ * they exist on the pixelref.
+ *
+ * It is illegal to call this with a texture-backed bitmap.
+ *
+ * If the bitmap's colortype cannot be converted into a corresponding
+ * SkImageInfo, or the bitmap's pixels cannot be accessed, this will return
+ * nullptr.
+ */
+extern sk_sp<SkImage> SkMakeImageFromRasterBitmap(const SkBitmap&, SkCopyPixelsMode,
+ SkTBlitterAllocator* = nullptr);
+
+// Given an image created from SkNewImageFromBitmap, return its pixelref. This
+// may be called to see if the surface and the image share the same pixelref,
+// in which case the surface may need to perform a copy-on-write.
+extern const SkPixelRef* SkBitmapImageGetPixelRef(const SkImage* rasterImage);
+
+// When a texture is shared by a surface and an image its budgeted status is that of the
+// surface. This function is used when the surface makes a new texture for itself in order
+// for the orphaned image to determine whether the original texture counts against the
+// budget or not.
+extern void SkTextureImageApplyBudgetedDecision(SkImage* textureImage);
+
+// Update the texture wrapped by an image created with NewTexture. This
+// is called when a surface and image share the same GrTexture and the
+// surface needs to perform a copy-on-write
+extern void SkTextureImageSetTexture(SkImage* image, GrTexture* texture);
+
+/**
+ * Will attempt to upload and lock the contents of the image as a texture, so that subsequent
+ * draws to a gpu-target will come from that texture (and not by looking at the original image
+ * src). In particular this is intended to use the texture even if the image's original content
+ * changes subsequent to this call (i.e. the src is mutable!).
+ *
+ * This must be balanced by an equal number of calls to SkImage_unpinAsTexture() -- calls can be
+ * nested.
+ *
+ * Once in this "pinned" state, the image has all of the same thread restrictions that exist
+ * for a natively created gpu image (e.g. SkImage::MakeFromTexture)
+ * - all drawing, pinning, unpinning must happen in the same thread as the GrContext.
+ */
+void SkImage_pinAsTexture(const SkImage*, GrContext*);
+
+/**
+ * The balancing call to SkImage_pinAsTexture. When a balanced number of calls have been made, then
+ * the "pinned" texture is free to be purged, etc. This also means that a subsequent "pin" call
+ * will look at the original content again, and if its uniqueID/generationID has changed, then
+ * a newer texture will be uploaded/pinned.
+ *
+ * The context passed to unpin must match the one passed to pin.
+ */
+void SkImage_unpinAsTexture(const SkImage*, GrContext*);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkLatticeIter.cpp b/gfx/skia/skia/src/core/SkLatticeIter.cpp
new file mode 100644
index 000000000..4fe9352d0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLatticeIter.cpp
@@ -0,0 +1,288 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkLatticeIter.h"
+#include "SkRect.h"
+
+/**
+ * Divs must be in increasing order with no duplicates.
+ */
+static bool valid_divs(const int* divs, int count, int start, int end) {
+ int prev = start - 1;
+ for (int i = 0; i < count; i++) {
+ if (prev >= divs[i] || divs[i] >= end) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool SkLatticeIter::Valid(int width, int height, const SkCanvas::Lattice& lattice) {
+ SkIRect totalBounds = SkIRect::MakeWH(width, height);
+ SkASSERT(lattice.fBounds);
+ const SkIRect latticeBounds = *lattice.fBounds;
+ if (!totalBounds.contains(latticeBounds)) {
+ return false;
+ }
+
+ bool zeroXDivs = lattice.fXCount <= 0 || (1 == lattice.fXCount &&
+ latticeBounds.fLeft == lattice.fXDivs[0]);
+ bool zeroYDivs = lattice.fYCount <= 0 || (1 == lattice.fYCount &&
+ latticeBounds.fTop == lattice.fYDivs[0]);
+ if (zeroXDivs && zeroYDivs) {
+ return false;
+ }
+
+ return valid_divs(lattice.fXDivs, lattice.fXCount, latticeBounds.fLeft, latticeBounds.fRight)
+ && valid_divs(lattice.fYDivs, lattice.fYCount, latticeBounds.fTop, latticeBounds.fBottom);
+}
+
+/**
+ * Count the number of pixels that are in "scalable" patches.
+ */
+static int count_scalable_pixels(const int32_t* divs, int numDivs, bool firstIsScalable,
+ int start, int end) {
+ if (0 == numDivs) {
+ return firstIsScalable ? end - start : 0;
+ }
+
+ int i;
+ int count;
+ if (firstIsScalable) {
+ count = divs[0] - start;
+ i = 1;
+ } else {
+ count = 0;
+ i = 0;
+ }
+
+ for (; i < numDivs; i += 2) {
+ // Alternatively, we could use |top| and |bottom| as variable names, instead of
+ // |left| and |right|.
+ int left = divs[i];
+ int right = (i + 1 < numDivs) ? divs[i + 1] : end;
+ count += right - left;
+ }
+
+ return count;
+}
+
+/**
+ * Set points for the src and dst rects on subsequent draw calls.
+ */
+static void set_points(float* dst, float* src, const int* divs, int divCount, int srcFixed,
+ int srcScalable, float srcStart, float srcEnd, float dstStart, float dstEnd,
+ bool isScalable) {
+
+ float dstLen = dstEnd - dstStart;
+ float scale;
+ if (srcFixed <= dstLen) {
+ // This is the "normal" case, where we scale the "scalable" patches and leave
+ // the other patches fixed.
+ scale = (dstLen - ((float) srcFixed)) / ((float) srcScalable);
+ } else {
+ // In this case, we eliminate the "scalable" patches and scale the "fixed" patches.
+ scale = dstLen / ((float) srcFixed);
+ }
+
+ src[0] = srcStart;
+ dst[0] = dstStart;
+ for (int i = 0; i < divCount; i++) {
+ src[i + 1] = (float) (divs[i]);
+ float srcDelta = src[i + 1] - src[i];
+ float dstDelta;
+ if (srcFixed <= dstLen) {
+ dstDelta = isScalable ? scale * srcDelta : srcDelta;
+ } else {
+ dstDelta = isScalable ? 0.0f : scale * srcDelta;
+ }
+ dst[i + 1] = dst[i] + dstDelta;
+
+ // Alternate between "scalable" and "fixed" patches.
+ isScalable = !isScalable;
+ }
+
+ src[divCount + 1] = srcEnd;
+ dst[divCount + 1] = dstEnd;
+}
+
+SkLatticeIter::SkLatticeIter(const SkCanvas::Lattice& lattice, const SkRect& dst) {
+ const int* xDivs = lattice.fXDivs;
+ const int origXCount = lattice.fXCount;
+ const int* yDivs = lattice.fYDivs;
+ const int origYCount = lattice.fYCount;
+ SkASSERT(lattice.fBounds);
+ const SkIRect src = *lattice.fBounds;
+
+ // In the x-dimension, the first rectangle always starts at x = 0 and is "scalable".
+ // If xDiv[0] is 0, it indicates that the first rectangle is degenerate, so the
+ // first real rectangle "scalable" in the x-direction.
+ //
+ // The same interpretation applies to the y-dimension.
+ //
+ // As we move left to right across the image, alternating patches will be "fixed" or
+ // "scalable" in the x-direction. Similarly, as move top to bottom, alternating
+ // patches will be "fixed" or "scalable" in the y-direction.
+ int xCount = origXCount;
+ int yCount = origYCount;
+ bool xIsScalable = (xCount > 0 && src.fLeft == xDivs[0]);
+ if (xIsScalable) {
+ // Once we've decided that the first patch is "scalable", we don't need the
+ // xDiv. It is always implied that we start at the edge of the bounds.
+ xDivs++;
+ xCount--;
+ }
+ bool yIsScalable = (yCount > 0 && src.fTop == yDivs[0]);
+ if (yIsScalable) {
+ // Once we've decided that the first patch is "scalable", we don't need the
+ // yDiv. It is always implied that we start at the edge of the bounds.
+ yDivs++;
+ yCount--;
+ }
+
+ // Count "scalable" and "fixed" pixels in each dimension.
+ int xCountScalable = count_scalable_pixels(xDivs, xCount, xIsScalable, src.fLeft, src.fRight);
+ int xCountFixed = src.width() - xCountScalable;
+ int yCountScalable = count_scalable_pixels(yDivs, yCount, yIsScalable, src.fTop, src.fBottom);
+ int yCountFixed = src.height() - yCountScalable;
+
+ fSrcX.reset(xCount + 2);
+ fDstX.reset(xCount + 2);
+ set_points(fDstX.begin(), fSrcX.begin(), xDivs, xCount, xCountFixed, xCountScalable,
+ src.fLeft, src.fRight, dst.fLeft, dst.fRight, xIsScalable);
+
+ fSrcY.reset(yCount + 2);
+ fDstY.reset(yCount + 2);
+ set_points(fDstY.begin(), fSrcY.begin(), yDivs, yCount, yCountFixed, yCountScalable,
+ src.fTop, src.fBottom, dst.fTop, dst.fBottom, yIsScalable);
+
+ fCurrX = fCurrY = 0;
+ fNumRectsInLattice = (xCount + 1) * (yCount + 1);
+ fNumRectsToDraw = fNumRectsInLattice;
+
+ if (lattice.fFlags) {
+ fFlags.push_back_n(fNumRectsInLattice);
+
+ const SkCanvas::Lattice::Flags* flags = lattice.fFlags;
+
+ bool hasPadRow = (yCount != origYCount);
+ bool hasPadCol = (xCount != origXCount);
+ if (hasPadRow) {
+ // The first row of rects are all empty, skip the first row of flags.
+ flags += origXCount + 1;
+ }
+
+ int i = 0;
+ for (int y = 0; y < yCount + 1; y++) {
+ for (int x = 0; x < origXCount + 1; x++) {
+ if (0 == x && hasPadCol) {
+ // The first column of rects are all empty. Skip a rect.
+ flags++;
+ continue;
+ }
+
+ fFlags[i] = *flags;
+ flags++;
+ i++;
+ }
+ }
+
+ for (int j = 0; j < fFlags.count(); j++) {
+ if (SkCanvas::Lattice::kTransparent_Flags == fFlags[j]) {
+ fNumRectsToDraw--;
+ }
+ }
+ }
+}
+
+bool SkLatticeIter::Valid(int width, int height, const SkIRect& center) {
+ return !center.isEmpty() && SkIRect::MakeWH(width, height).contains(center);
+}
+
+SkLatticeIter::SkLatticeIter(int w, int h, const SkIRect& c, const SkRect& dst) {
+ SkASSERT(SkIRect::MakeWH(w, h).contains(c));
+
+ fSrcX.reset(4);
+ fSrcY.reset(4);
+ fDstX.reset(4);
+ fDstY.reset(4);
+
+ fSrcX[0] = 0;
+ fSrcX[1] = SkIntToScalar(c.fLeft);
+ fSrcX[2] = SkIntToScalar(c.fRight);
+ fSrcX[3] = SkIntToScalar(w);
+
+ fSrcY[0] = 0;
+ fSrcY[1] = SkIntToScalar(c.fTop);
+ fSrcY[2] = SkIntToScalar(c.fBottom);
+ fSrcY[3] = SkIntToScalar(h);
+
+ fDstX[0] = dst.fLeft;
+ fDstX[1] = dst.fLeft + SkIntToScalar(c.fLeft);
+ fDstX[2] = dst.fRight - SkIntToScalar(w - c.fRight);
+ fDstX[3] = dst.fRight;
+
+ fDstY[0] = dst.fTop;
+ fDstY[1] = dst.fTop + SkIntToScalar(c.fTop);
+ fDstY[2] = dst.fBottom - SkIntToScalar(h - c.fBottom);
+ fDstY[3] = dst.fBottom;
+
+ if (fDstX[1] > fDstX[2]) {
+ fDstX[1] = fDstX[0] + (fDstX[3] - fDstX[0]) * c.fLeft / (w - c.width());
+ fDstX[2] = fDstX[1];
+ }
+
+ if (fDstY[1] > fDstY[2]) {
+ fDstY[1] = fDstY[0] + (fDstY[3] - fDstY[0]) * c.fTop / (h - c.height());
+ fDstY[2] = fDstY[1];
+ }
+
+ fCurrX = fCurrY = 0;
+ fNumRectsInLattice = 9;
+ fNumRectsToDraw = 9;
+}
+
+bool SkLatticeIter::next(SkRect* src, SkRect* dst) {
+ int currRect = fCurrX + fCurrY * (fSrcX.count() - 1);
+ if (currRect == fNumRectsInLattice) {
+ return false;
+ }
+
+ const int x = fCurrX;
+ const int y = fCurrY;
+ SkASSERT(x >= 0 && x < fSrcX.count() - 1);
+ SkASSERT(y >= 0 && y < fSrcY.count() - 1);
+
+ if (fSrcX.count() - 1 == ++fCurrX) {
+ fCurrX = 0;
+ fCurrY += 1;
+ }
+
+ if (fFlags.count() > 0 && SkToBool(SkCanvas::Lattice::kTransparent_Flags & fFlags[currRect])) {
+ return this->next(src, dst);
+ }
+
+ src->set(fSrcX[x], fSrcY[y], fSrcX[x + 1], fSrcY[y + 1]);
+ dst->set(fDstX[x], fDstY[y], fDstX[x + 1], fDstY[y + 1]);
+ return true;
+}
+
+void SkLatticeIter::mapDstScaleTranslate(const SkMatrix& matrix) {
+ SkASSERT(matrix.isScaleTranslate());
+ SkScalar tx = matrix.getTranslateX();
+ SkScalar sx = matrix.getScaleX();
+ for (int i = 0; i < fDstX.count(); i++) {
+ fDstX[i] = fDstX[i] * sx + tx;
+ }
+
+ SkScalar ty = matrix.getTranslateY();
+ SkScalar sy = matrix.getScaleY();
+ for (int i = 0; i < fDstY.count(); i++) {
+ fDstY[i] = fDstY[i] * sy + ty;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkLatticeIter.h b/gfx/skia/skia/src/core/SkLatticeIter.h
new file mode 100644
index 000000000..f3d37e668
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLatticeIter.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLatticeIter_DEFINED
+#define SkLatticeIter_DEFINED
+
+#include "SkCanvas.h"
+#include "SkScalar.h"
+#include "SkTArray.h"
+
+struct SkIRect;
+struct SkRect;
+
+/**
+ * Disect a lattice request into an sequence of src-rect / dst-rect pairs
+ */
+class SkLatticeIter {
+public:
+
+ static bool Valid(int imageWidth, int imageHeight, const SkCanvas::Lattice& lattice);
+
+ SkLatticeIter(const SkCanvas::Lattice& lattice, const SkRect& dst);
+
+ static bool Valid(int imageWidth, int imageHeight, const SkIRect& center);
+
+ SkLatticeIter(int imageWidth, int imageHeight, const SkIRect& center, const SkRect& dst);
+
+ /**
+ * While it returns true, use src/dst to draw the image/bitmap
+ */
+ bool next(SkRect* src, SkRect* dst);
+
+ /**
+ * Apply a matrix to the dst points.
+ */
+ void mapDstScaleTranslate(const SkMatrix& matrix);
+
+ /**
+ * Returns the number of rects that will actually be drawn.
+ */
+ int numRectsToDraw() const {
+ return fNumRectsToDraw;
+ }
+
+private:
+ SkTArray<SkScalar> fSrcX;
+ SkTArray<SkScalar> fSrcY;
+ SkTArray<SkScalar> fDstX;
+ SkTArray<SkScalar> fDstY;
+ SkTArray<SkCanvas::Lattice::Flags> fFlags;
+
+ int fCurrX;
+ int fCurrY;
+ int fNumRectsInLattice;
+ int fNumRectsToDraw;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkLightingShader.cpp b/gfx/skia/skia/src/core/SkLightingShader.cpp
new file mode 100644
index 000000000..9030a192b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLightingShader.cpp
@@ -0,0 +1,516 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapProcShader.h"
+#include "SkBitmapProcState.h"
+#include "SkColor.h"
+#include "SkEmptyShader.h"
+#include "SkErrorInternals.h"
+#include "SkLightingShader.h"
+#include "SkMathPriv.h"
+#include "SkNormalSource.h"
+#include "SkPoint3.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+
+////////////////////////////////////////////////////////////////////////////
+
+/*
+ SkLightingShader TODOs:
+ support different light types
+ support multiple lights
+ fix non-opaque diffuse textures
+
+ To Test:
+ A8 diffuse textures
+ down & upsampled draws
+*/
+
+
+
+/** \class SkLightingShaderImpl
+ This subclass of shader applies lighting.
+*/
+class SkLightingShaderImpl : public SkShader {
+public:
+ /** Create a new lighting shader that uses the provided normal map and
+ lights to light the diffuse bitmap.
+ @param diffuseShader the shader that provides the diffuse colors
+ @param normalSource the source of normals for lighting computation
+ @param lights the lights applied to the geometry
+ */
+ SkLightingShaderImpl(sk_sp<SkShader> diffuseShader,
+ sk_sp<SkNormalSource> normalSource,
+ sk_sp<SkLights> lights)
+ : fDiffuseShader(std::move(diffuseShader))
+ , fNormalSource(std::move(normalSource))
+ , fLights(std::move(lights)) {}
+
+ bool isOpaque() const override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(const AsFPArgs&) const override;
+#endif
+
+ class LightingShaderContext : public SkShader::Context {
+ public:
+ // The context takes ownership of the context and provider. It will call their destructors
+ // and then indirectly free their memory by calling free() on heapAllocated
+ LightingShaderContext(const SkLightingShaderImpl&, const ContextRec&,
+ SkShader::Context* diffuseContext, SkNormalSource::Provider*,
+ void* heapAllocated);
+
+ ~LightingShaderContext() override;
+
+ void shadeSpan(int x, int y, SkPMColor[], int count) override;
+
+ uint32_t getFlags() const override { return fFlags; }
+
+ private:
+ SkShader::Context* fDiffuseContext;
+ SkNormalSource::Provider* fNormalProvider;
+ SkColor fPaintColor;
+ uint32_t fFlags;
+
+ void* fHeapAllocated;
+
+ typedef SkShader::Context INHERITED;
+ };
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkLightingShaderImpl)
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ size_t onContextSize(const ContextRec&) const override;
+ Context* onCreateContext(const ContextRec&, void*) const override;
+
+private:
+ sk_sp<SkShader> fDiffuseShader;
+ sk_sp<SkNormalSource> fNormalSource;
+ sk_sp<SkLights> fLights;
+
+ friend class SkLightingShader;
+
+ typedef SkShader INHERITED;
+};
+
+////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+#include "GrCoordTransform.h"
+#include "GrFragmentProcessor.h"
+#include "GrInvariantOutput.h"
+#include "GrTextureAccess.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "SkGr.h"
+#include "SkGrPriv.h"
+
+// This FP expects a premul'd color input for its diffuse color. Premul'ing of the paint's color is
+// handled by the asFragmentProcessor() factory, but shaders providing diffuse color must output it
+// premul'd.
+class LightingFP : public GrFragmentProcessor {
+public:
+ LightingFP(sk_sp<GrFragmentProcessor> normalFP, sk_sp<SkLights> lights) {
+
+ // fuse all ambient lights into a single one
+ fAmbientColor = lights->ambientLightColor();
+ for (int i = 0; i < lights->numLights(); ++i) {
+ if (SkLights::Light::kDirectional_LightType == lights->light(i).type()) {
+ fDirectionalLights.push_back(lights->light(i));
+ // TODO get the handle to the shadow map if there is one
+ } else {
+ SkDEBUGFAIL("Unimplemented Light Type passed to LightingFP");
+ }
+ }
+
+ this->registerChildProcessor(std::move(normalFP));
+ this->initClassID<LightingFP>();
+ }
+
+ class GLSLLightingFP : public GrGLSLFragmentProcessor {
+ public:
+ GLSLLightingFP() {
+ fAmbientColor.fX = 0.0f;
+ }
+
+ void emitCode(EmitArgs& args) override {
+
+ GrGLSLFragmentBuilder* fragBuilder = args.fFragBuilder;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ const LightingFP& lightingFP = args.fFp.cast<LightingFP>();
+
+ const char *lightDirsUniName = nullptr;
+ const char *lightColorsUniName = nullptr;
+ if (lightingFP.fDirectionalLights.count() != 0) {
+ fLightDirsUni = uniformHandler->addUniformArray(
+ kFragment_GrShaderFlag,
+ kVec3f_GrSLType,
+ kDefault_GrSLPrecision,
+ "LightDir",
+ lightingFP.fDirectionalLights.count(),
+ &lightDirsUniName);
+ fLightColorsUni = uniformHandler->addUniformArray(
+ kFragment_GrShaderFlag,
+ kVec3f_GrSLType,
+ kDefault_GrSLPrecision,
+ "LightColor",
+ lightingFP.fDirectionalLights.count(),
+ &lightColorsUniName);
+ }
+
+ const char* ambientColorUniName = nullptr;
+ fAmbientColorUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec3f_GrSLType, kDefault_GrSLPrecision,
+ "AmbientColor", &ambientColorUniName);
+
+ fragBuilder->codeAppendf("vec4 diffuseColor = %s;", args.fInputColor);
+
+ SkString dstNormalName("dstNormal");
+ this->emitChild(0, nullptr, &dstNormalName, args);
+
+ fragBuilder->codeAppendf("vec3 normal = %s.xyz;", dstNormalName.c_str());
+
+ fragBuilder->codeAppend( "vec3 result = vec3(0.0);");
+
+ // diffuse light
+ if (lightingFP.fDirectionalLights.count() != 0) {
+ fragBuilder->codeAppendf("for (int i = 0; i < %d; i++) {",
+ lightingFP.fDirectionalLights.count());
+ // TODO: modulate the contribution from each light based on the shadow map
+ fragBuilder->codeAppendf(" float NdotL = clamp(dot(normal, %s[i]), 0.0, 1.0);",
+ lightDirsUniName);
+ fragBuilder->codeAppendf(" result += %s[i]*diffuseColor.rgb*NdotL;",
+ lightColorsUniName);
+ fragBuilder->codeAppend("}");
+ }
+
+ // ambient light
+ fragBuilder->codeAppendf("result += %s * diffuseColor.rgb;", ambientColorUniName);
+
+ // Clamping to alpha (equivalent to an unpremul'd clamp to 1.0)
+ fragBuilder->codeAppendf("%s = vec4(clamp(result.rgb, 0.0, diffuseColor.a), "
+ "diffuseColor.a);", args.fOutputColor);
+ }
+
+ static void GenKey(const GrProcessor& proc, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const LightingFP& lightingFP = proc.cast<LightingFP>();
+ b->add32(lightingFP.fDirectionalLights.count());
+ }
+
+ protected:
+ void onSetData(const GrGLSLProgramDataManager& pdman, const GrProcessor& proc) override {
+ const LightingFP& lightingFP = proc.cast<LightingFP>();
+
+ const SkTArray<SkLights::Light>& directionalLights = lightingFP.directionalLights();
+ if (directionalLights != fDirectionalLights) {
+ SkTArray<SkColor3f> lightDirs(directionalLights.count());
+ SkTArray<SkVector3> lightColors(directionalLights.count());
+ for (const SkLights::Light& light : directionalLights) {
+ lightDirs.push_back(light.dir());
+ lightColors.push_back(light.color());
+ }
+
+ pdman.set3fv(fLightDirsUni, directionalLights.count(), &(lightDirs[0].fX));
+ pdman.set3fv(fLightColorsUni, directionalLights.count(), &(lightColors[0].fX));
+
+ fDirectionalLights = directionalLights;
+ }
+
+ const SkColor3f& ambientColor = lightingFP.ambientColor();
+ if (ambientColor != fAmbientColor) {
+ pdman.set3fv(fAmbientColorUni, 1, &ambientColor.fX);
+ fAmbientColor = ambientColor;
+ }
+ }
+
+ private:
+ SkTArray<SkLights::Light> fDirectionalLights;
+ GrGLSLProgramDataManager::UniformHandle fLightDirsUni;
+ GrGLSLProgramDataManager::UniformHandle fLightColorsUni;
+
+ SkColor3f fAmbientColor;
+ GrGLSLProgramDataManager::UniformHandle fAmbientColorUni;
+ };
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLLightingFP::GenKey(*this, caps, b);
+ }
+
+ const char* name() const override { return "LightingFP"; }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ inout->mulByUnknownFourComponents();
+ }
+
+ const SkTArray<SkLights::Light>& directionalLights() const { return fDirectionalLights; }
+ const SkColor3f& ambientColor() const { return fAmbientColor; }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override { return new GLSLLightingFP; }
+
+ bool onIsEqual(const GrFragmentProcessor& proc) const override {
+ const LightingFP& lightingFP = proc.cast<LightingFP>();
+ return fDirectionalLights == lightingFP.fDirectionalLights &&
+ fAmbientColor == lightingFP.fAmbientColor;
+ }
+
+ SkTArray<SkLights::Light> fDirectionalLights;
+ SkColor3f fAmbientColor;
+};
+
+////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> SkLightingShaderImpl::asFragmentProcessor(const AsFPArgs& args) const {
+ sk_sp<GrFragmentProcessor> normalFP(fNormalSource->asFragmentProcessor(args));
+ if (!normalFP) {
+ return nullptr;
+ }
+
+ if (fDiffuseShader) {
+ sk_sp<GrFragmentProcessor> fpPipeline[] = {
+ fDiffuseShader->asFragmentProcessor(args),
+ sk_make_sp<LightingFP>(std::move(normalFP), fLights)
+ };
+ if(!fpPipeline[0]) {
+ return nullptr;
+ }
+
+ sk_sp<GrFragmentProcessor> innerLightFP = GrFragmentProcessor::RunInSeries(fpPipeline, 2);
+ // FP is wrapped because paint's alpha needs to be applied to output
+ return GrFragmentProcessor::MulOutputByInputAlpha(std::move(innerLightFP));
+ } else {
+ // FP is wrapped because paint comes in unpremul'd to fragment shader, but LightingFP
+ // expects premul'd color.
+ return GrFragmentProcessor::PremulInput(sk_make_sp<LightingFP>(std::move(normalFP),
+ fLights));
+ }
+}
+
+#endif
+
+////////////////////////////////////////////////////////////////////////////
+
+bool SkLightingShaderImpl::isOpaque() const {
+ return (fDiffuseShader ? fDiffuseShader->isOpaque() : false);
+}
+
+SkLightingShaderImpl::LightingShaderContext::LightingShaderContext(
+ const SkLightingShaderImpl& shader, const ContextRec& rec,
+ SkShader::Context* diffuseContext, SkNormalSource::Provider* normalProvider,
+ void* heapAllocated)
+ : INHERITED(shader, rec)
+ , fDiffuseContext(diffuseContext)
+ , fNormalProvider(normalProvider)
+ , fHeapAllocated(heapAllocated) {
+ bool isOpaque = shader.isOpaque();
+
+ // update fFlags
+ uint32_t flags = 0;
+ if (isOpaque && (255 == this->getPaintAlpha())) {
+ flags |= kOpaqueAlpha_Flag;
+ }
+
+ fPaintColor = rec.fPaint->getColor();
+ fFlags = flags;
+}
+
+SkLightingShaderImpl::LightingShaderContext::~LightingShaderContext() {
+ // The dependencies have been created outside of the context on memory that was allocated by
+ // the onCreateContext() method. Call the destructors and free the memory.
+ if (fDiffuseContext) {
+ fDiffuseContext->~Context();
+ }
+ fNormalProvider->~Provider();
+
+ sk_free(fHeapAllocated);
+}
+
+static inline SkPMColor convert(SkColor3f color, U8CPU a) {
+ if (color.fX <= 0.0f) {
+ color.fX = 0.0f;
+ } else if (color.fX >= 255.0f) {
+ color.fX = 255.0f;
+ }
+
+ if (color.fY <= 0.0f) {
+ color.fY = 0.0f;
+ } else if (color.fY >= 255.0f) {
+ color.fY = 255.0f;
+ }
+
+ if (color.fZ <= 0.0f) {
+ color.fZ = 0.0f;
+ } else if (color.fZ >= 255.0f) {
+ color.fZ = 255.0f;
+ }
+
+ return SkPreMultiplyARGB(a, (int) color.fX, (int) color.fY, (int) color.fZ);
+}
+
+// larger is better (fewer times we have to loop), but we shouldn't
+// take up too much stack-space (each one here costs 16 bytes)
+#define BUFFER_MAX 16
+void SkLightingShaderImpl::LightingShaderContext::shadeSpan(int x, int y,
+ SkPMColor result[], int count) {
+ const SkLightingShaderImpl& lightShader = static_cast<const SkLightingShaderImpl&>(fShader);
+
+ SkPMColor diffuse[BUFFER_MAX];
+ SkPoint3 normals[BUFFER_MAX];
+
+ SkColor diffColor = fPaintColor;
+
+ do {
+ int n = SkTMin(count, BUFFER_MAX);
+
+ fNormalProvider->fillScanLine(x, y, normals, n);
+
+ if (fDiffuseContext) {
+ fDiffuseContext->shadeSpan(x, y, diffuse, n);
+ }
+
+ for (int i = 0; i < n; ++i) {
+ if (fDiffuseContext) {
+ diffColor = SkUnPreMultiply::PMColorToColor(diffuse[i]);
+ }
+
+ SkColor3f accum = SkColor3f::Make(0.0f, 0.0f, 0.0f);
+
+ // Adding ambient light
+ accum.fX += lightShader.fLights->ambientLightColor().fX * SkColorGetR(diffColor);
+ accum.fY += lightShader.fLights->ambientLightColor().fY * SkColorGetG(diffColor);
+ accum.fZ += lightShader.fLights->ambientLightColor().fZ * SkColorGetB(diffColor);
+
+ // This is all done in linear unpremul color space (each component 0..255.0f though)
+ for (int l = 0; l < lightShader.fLights->numLights(); ++l) {
+ const SkLights::Light& light = lightShader.fLights->light(l);
+
+ SkScalar illuminanceScalingFactor = 1.0f;
+
+ if (SkLights::Light::kDirectional_LightType == light.type()) {
+ illuminanceScalingFactor = normals[i].dot(light.dir());
+ if (illuminanceScalingFactor < 0.0f) {
+ illuminanceScalingFactor = 0.0f;
+ }
+ }
+
+ accum.fX += light.color().fX * SkColorGetR(diffColor) * illuminanceScalingFactor;
+ accum.fY += light.color().fY * SkColorGetG(diffColor) * illuminanceScalingFactor;
+ accum.fZ += light.color().fZ * SkColorGetB(diffColor) * illuminanceScalingFactor;
+ }
+
+ // convert() premultiplies the accumulate color with alpha
+ result[i] = convert(accum, SkColorGetA(diffColor));
+ }
+
+ result += n;
+ x += n;
+ count -= n;
+ } while (count > 0);
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+#ifndef SK_IGNORE_TO_STRING
+void SkLightingShaderImpl::toString(SkString* str) const {
+ str->appendf("LightingShader: ()");
+}
+#endif
+
+sk_sp<SkFlattenable> SkLightingShaderImpl::CreateProc(SkReadBuffer& buf) {
+
+ // Discarding SkShader flattenable params
+ bool hasLocalMatrix = buf.readBool();
+ SkAssertResult(!hasLocalMatrix);
+
+ sk_sp<SkLights> lights = SkLights::MakeFromBuffer(buf);
+
+ sk_sp<SkNormalSource> normalSource(buf.readFlattenable<SkNormalSource>());
+
+ bool hasDiffuse = buf.readBool();
+ sk_sp<SkShader> diffuseShader = nullptr;
+ if (hasDiffuse) {
+ diffuseShader = buf.readFlattenable<SkShader>();
+ }
+
+ return sk_make_sp<SkLightingShaderImpl>(std::move(diffuseShader), std::move(normalSource),
+ std::move(lights));
+}
+
+void SkLightingShaderImpl::flatten(SkWriteBuffer& buf) const {
+ this->INHERITED::flatten(buf);
+
+ fLights->flatten(buf);
+
+ buf.writeFlattenable(fNormalSource.get());
+ buf.writeBool(fDiffuseShader);
+ if (fDiffuseShader) {
+ buf.writeFlattenable(fDiffuseShader.get());
+ }
+}
+
+size_t SkLightingShaderImpl::onContextSize(const ContextRec& rec) const {
+ return sizeof(LightingShaderContext);
+}
+
+SkShader::Context* SkLightingShaderImpl::onCreateContext(const ContextRec& rec,
+ void* storage) const {
+ size_t heapRequired = (fDiffuseShader ? fDiffuseShader->contextSize(rec) : 0) +
+ fNormalSource->providerSize(rec);
+ void* heapAllocated = sk_malloc_throw(heapRequired);
+
+ void* diffuseContextStorage = heapAllocated;
+ void* normalProviderStorage = (char*) diffuseContextStorage +
+ (fDiffuseShader ? fDiffuseShader->contextSize(rec) : 0);
+
+ SkShader::Context *diffuseContext = nullptr;
+ if (fDiffuseShader) {
+ diffuseContext = fDiffuseShader->createContext(rec, diffuseContextStorage);
+ if (!diffuseContext) {
+ sk_free(heapAllocated);
+ return nullptr;
+ }
+ }
+
+ SkNormalSource::Provider* normalProvider = fNormalSource->asProvider(rec,
+ normalProviderStorage);
+ if (!normalProvider) {
+ diffuseContext->~Context();
+ sk_free(heapAllocated);
+ return nullptr;
+ }
+
+ return new (storage) LightingShaderContext(*this, rec, diffuseContext, normalProvider,
+ heapAllocated);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkShader> SkLightingShader::Make(sk_sp<SkShader> diffuseShader,
+ sk_sp<SkNormalSource> normalSource,
+ sk_sp<SkLights> lights) {
+ SkASSERT(lights);
+ if (!normalSource) {
+ normalSource = SkNormalSource::MakeFlat();
+ }
+
+ return sk_make_sp<SkLightingShaderImpl>(std::move(diffuseShader), std::move(normalSource),
+ std::move(lights));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkLightingShader)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkLightingShaderImpl)
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END
+
+///////////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/skia/src/core/SkLightingShader.h b/gfx/skia/skia/src/core/SkLightingShader.h
new file mode 100644
index 000000000..aa90710aa
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLightingShader.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLightingShader_DEFINED
+#define SkLightingShader_DEFINED
+
+#include "SkLights.h"
+#include "SkShader.h"
+
+class SkBitmap;
+class SkMatrix;
+class SkNormalSource;
+
+class SK_API SkLightingShader {
+public:
+ /** Returns a shader that lights the shape, colored by the diffuseShader, using the
+ normals from normalSource, with the set of lights provided.
+
+ @param diffuseShader the shader that provides the colors. If nullptr, uses the paint's
+ color.
+ @param normalSource the source for the shape's normals. If nullptr, assumes straight
+ up normals (<0,0,1>).
+ @param lights the lights applied to the normals
+
+ The lighting equation is currently:
+ result = (LightColor * dot(Normal, LightDir) + AmbientColor) * DiffuseColor
+
+ */
+ static sk_sp<SkShader> Make(sk_sp<SkShader> diffuseShader, sk_sp<SkNormalSource> normalSource,
+ sk_sp<SkLights> lights);
+
+ SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP()
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkLights.cpp b/gfx/skia/skia/src/core/SkLights.cpp
new file mode 100644
index 000000000..56c929943
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLights.cpp
@@ -0,0 +1,88 @@
+
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkLights.h"
+#include "SkReadBuffer.h"
+
+sk_sp<SkLights> SkLights::MakeFromBuffer(SkReadBuffer& buf) {
+ Builder builder;
+
+ SkColor3f ambColor;
+ if (!buf.readScalarArray(&ambColor.fX, 3)) {
+ return nullptr;
+ }
+
+ builder.setAmbientLightColor(ambColor);
+
+ int numLights = buf.readInt();
+
+ for (int l = 0; l < numLights; ++l) {
+ bool isPoint = buf.readBool();
+
+ SkColor3f color;
+ if (!buf.readScalarArray(&color.fX, 3)) {
+ return nullptr;
+ }
+
+ SkVector3 dirOrPos;
+ if (!buf.readScalarArray(&dirOrPos.fX, 3)) {
+ return nullptr;
+ }
+
+ sk_sp<SkImage> depthMap;
+ bool hasShadowMap = buf.readBool();
+ if (hasShadowMap) {
+ if (!(depthMap = buf.readImage())) {
+ return nullptr;
+ }
+ }
+
+ bool isRadial = buf.readBool();
+ if (isPoint) {
+ SkScalar intensity;
+ intensity = buf.readScalar();
+ Light light = Light::MakePoint(color, dirOrPos, intensity, isRadial);
+ light.setShadowMap(depthMap);
+ builder.add(light);
+ } else {
+ Light light = Light::MakeDirectional(color, dirOrPos, isRadial);
+ light.setShadowMap(depthMap);
+ builder.add(light);
+ }
+ }
+
+ return builder.finish();
+}
+
+void SkLights::flatten(SkWriteBuffer& buf) const {
+ buf.writeScalarArray(&this->ambientLightColor().fX, 3);
+
+ buf.writeInt(this->numLights());
+ for (int l = 0; l < this->numLights(); ++l) {
+ const Light& light = this->light(l);
+
+ bool isPoint = Light::kPoint_LightType == light.type();
+
+ buf.writeBool(isPoint);
+ buf.writeScalarArray(&light.color().fX, 3);
+ buf.writeScalarArray(&light.dir().fX, 3);
+
+ bool hasShadowMap = light.getShadowMap() != nullptr;
+ buf.writeBool(hasShadowMap);
+
+ bool isRadial = light.isRadial();
+ buf.writeBool(isRadial);
+
+ if (hasShadowMap) {
+ buf.writeImage(light.getShadowMap());
+ }
+ if (isPoint) {
+ buf.writeScalar(light.intensity());
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkLineClipper.cpp b/gfx/skia/skia/src/core/SkLineClipper.cpp
new file mode 100644
index 000000000..8d1065632
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLineClipper.cpp
@@ -0,0 +1,261 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkLineClipper.h"
+
+template <typename T> T pin_unsorted(T value, T limit0, T limit1) {
+ if (limit1 < limit0) {
+ SkTSwap(limit0, limit1);
+ }
+ // now the limits are sorted
+ SkASSERT(limit0 <= limit1);
+
+ if (value < limit0) {
+ value = limit0;
+ } else if (value > limit1) {
+ value = limit1;
+ }
+ return value;
+}
+
+// return X coordinate of intersection with horizontal line at Y
+static SkScalar sect_with_horizontal(const SkPoint src[2], SkScalar Y) {
+ SkScalar dy = src[1].fY - src[0].fY;
+ if (SkScalarNearlyZero(dy)) {
+ return SkScalarAve(src[0].fX, src[1].fX);
+ } else {
+ // need the extra precision so we don't compute a value that exceeds
+ // our original limits
+ double X0 = src[0].fX;
+ double Y0 = src[0].fY;
+ double X1 = src[1].fX;
+ double Y1 = src[1].fY;
+ double result = X0 + ((double)Y - Y0) * (X1 - X0) / (Y1 - Y0);
+
+ // The computed X value might still exceed [X0..X1] due to quantum flux
+ // when the doubles were added and subtracted, so we have to pin the
+ // answer :(
+ return (float)pin_unsorted(result, X0, X1);
+ }
+}
+
+// return Y coordinate of intersection with vertical line at X
+static SkScalar sect_with_vertical(const SkPoint src[2], SkScalar X) {
+ SkScalar dx = src[1].fX - src[0].fX;
+ if (SkScalarNearlyZero(dx)) {
+ return SkScalarAve(src[0].fY, src[1].fY);
+ } else {
+ // need the extra precision so we don't compute a value that exceeds
+ // our original limits
+ double X0 = src[0].fX;
+ double Y0 = src[0].fY;
+ double X1 = src[1].fX;
+ double Y1 = src[1].fY;
+ double result = Y0 + ((double)X - X0) * (Y1 - Y0) / (X1 - X0);
+ return (float)result;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline bool nestedLT(SkScalar a, SkScalar b, SkScalar dim) {
+ return a <= b && (a < b || dim > 0);
+}
+
+// returns true if outer contains inner, even if inner is empty.
+// note: outer.contains(inner) always returns false if inner is empty.
+static inline bool containsNoEmptyCheck(const SkRect& outer,
+ const SkRect& inner) {
+ return outer.fLeft <= inner.fLeft && outer.fTop <= inner.fTop &&
+ outer.fRight >= inner.fRight && outer.fBottom >= inner.fBottom;
+}
+
+bool SkLineClipper::IntersectLine(const SkPoint src[2], const SkRect& clip,
+ SkPoint dst[2]) {
+ SkRect bounds;
+
+ bounds.set(src[0], src[1]);
+ if (containsNoEmptyCheck(clip, bounds)) {
+ if (src != dst) {
+ memcpy(dst, src, 2 * sizeof(SkPoint));
+ }
+ return true;
+ }
+ // check for no overlap, and only permit coincident edges if the line
+ // and the edge are colinear
+ if (nestedLT(bounds.fRight, clip.fLeft, bounds.width()) ||
+ nestedLT(clip.fRight, bounds.fLeft, bounds.width()) ||
+ nestedLT(bounds.fBottom, clip.fTop, bounds.height()) ||
+ nestedLT(clip.fBottom, bounds.fTop, bounds.height())) {
+ return false;
+ }
+
+ int index0, index1;
+
+ if (src[0].fY < src[1].fY) {
+ index0 = 0;
+ index1 = 1;
+ } else {
+ index0 = 1;
+ index1 = 0;
+ }
+
+ SkPoint tmp[2];
+ memcpy(tmp, src, sizeof(tmp));
+
+ // now compute Y intersections
+ if (tmp[index0].fY < clip.fTop) {
+ tmp[index0].set(sect_with_horizontal(src, clip.fTop), clip.fTop);
+ }
+ if (tmp[index1].fY > clip.fBottom) {
+ tmp[index1].set(sect_with_horizontal(src, clip.fBottom), clip.fBottom);
+ }
+
+ if (tmp[0].fX < tmp[1].fX) {
+ index0 = 0;
+ index1 = 1;
+ } else {
+ index0 = 1;
+ index1 = 0;
+ }
+
+ // check for quick-reject in X again, now that we may have been chopped
+ if ((tmp[index1].fX <= clip.fLeft || tmp[index0].fX >= clip.fRight) &&
+ tmp[index0].fX < tmp[index1].fX) {
+ // only reject if we have a non-zero width
+ return false;
+ }
+
+ if (tmp[index0].fX < clip.fLeft) {
+ tmp[index0].set(clip.fLeft, sect_with_vertical(src, clip.fLeft));
+ }
+ if (tmp[index1].fX > clip.fRight) {
+ tmp[index1].set(clip.fRight, sect_with_vertical(src, clip.fRight));
+ }
+#ifdef SK_DEBUG
+ bounds.set(tmp[0], tmp[1]);
+ SkASSERT(containsNoEmptyCheck(clip, bounds));
+#endif
+ memcpy(dst, tmp, sizeof(tmp));
+ return true;
+}
+
+#ifdef SK_DEBUG
+// return value between the two limits, where the limits are either ascending
+// or descending.
+static bool is_between_unsorted(SkScalar value,
+ SkScalar limit0, SkScalar limit1) {
+ if (limit0 < limit1) {
+ return limit0 <= value && value <= limit1;
+ } else {
+ return limit1 <= value && value <= limit0;
+ }
+}
+#endif
+
+int SkLineClipper::ClipLine(const SkPoint pts[], const SkRect& clip, SkPoint lines[],
+ bool canCullToTheRight) {
+ int index0, index1;
+
+ if (pts[0].fY < pts[1].fY) {
+ index0 = 0;
+ index1 = 1;
+ } else {
+ index0 = 1;
+ index1 = 0;
+ }
+
+ // Check if we're completely clipped out in Y (above or below
+
+ if (pts[index1].fY <= clip.fTop) { // we're above the clip
+ return 0;
+ }
+ if (pts[index0].fY >= clip.fBottom) { // we're below the clip
+ return 0;
+ }
+
+ // Chop in Y to produce a single segment, stored in tmp[0..1]
+
+ SkPoint tmp[2];
+ memcpy(tmp, pts, sizeof(tmp));
+
+ // now compute intersections
+ if (pts[index0].fY < clip.fTop) {
+ tmp[index0].set(sect_with_horizontal(pts, clip.fTop), clip.fTop);
+ SkASSERT(is_between_unsorted(tmp[index0].fX, pts[0].fX, pts[1].fX));
+ }
+ if (tmp[index1].fY > clip.fBottom) {
+ tmp[index1].set(sect_with_horizontal(pts, clip.fBottom), clip.fBottom);
+ SkASSERT(is_between_unsorted(tmp[index1].fX, pts[0].fX, pts[1].fX));
+ }
+
+ // Chop it into 1..3 segments that are wholly within the clip in X.
+
+ // temp storage for up to 3 segments
+ SkPoint resultStorage[kMaxPoints];
+ SkPoint* result; // points to our results, either tmp or resultStorage
+ int lineCount = 1;
+ bool reverse;
+
+ if (pts[0].fX < pts[1].fX) {
+ index0 = 0;
+ index1 = 1;
+ reverse = false;
+ } else {
+ index0 = 1;
+ index1 = 0;
+ reverse = true;
+ }
+
+ if (tmp[index1].fX <= clip.fLeft) { // wholly to the left
+ tmp[0].fX = tmp[1].fX = clip.fLeft;
+ result = tmp;
+ reverse = false;
+ } else if (tmp[index0].fX >= clip.fRight) { // wholly to the right
+ if (canCullToTheRight) {
+ return 0;
+ }
+ tmp[0].fX = tmp[1].fX = clip.fRight;
+ result = tmp;
+ reverse = false;
+ } else {
+ result = resultStorage;
+ SkPoint* r = result;
+
+ if (tmp[index0].fX < clip.fLeft) {
+ r->set(clip.fLeft, tmp[index0].fY);
+ r += 1;
+ r->set(clip.fLeft, sect_with_vertical(tmp, clip.fLeft));
+ SkASSERT(is_between_unsorted(r->fY, tmp[0].fY, tmp[1].fY));
+ } else {
+ *r = tmp[index0];
+ }
+ r += 1;
+
+ if (tmp[index1].fX > clip.fRight) {
+ r->set(clip.fRight, sect_with_vertical(tmp, clip.fRight));
+ SkASSERT(is_between_unsorted(r->fY, tmp[0].fY, tmp[1].fY));
+ r += 1;
+ r->set(clip.fRight, tmp[index1].fY);
+ } else {
+ *r = tmp[index1];
+ }
+
+ lineCount = SkToInt(r - result);
+ }
+
+ // Now copy the results into the caller's lines[] parameter
+ if (reverse) {
+ // copy the pts in reverse order to maintain winding order
+ for (int i = 0; i <= lineCount; i++) {
+ lines[lineCount - i] = result[i];
+ }
+ } else {
+ memcpy(lines, result, (lineCount + 1) * sizeof(SkPoint));
+ }
+ return lineCount;
+}
diff --git a/gfx/skia/skia/src/core/SkLineClipper.h b/gfx/skia/skia/src/core/SkLineClipper.h
new file mode 100644
index 000000000..d2c9b5fe7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLineClipper.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkLineClipper_DEFINED
+#define SkLineClipper_DEFINED
+
+#include "SkRect.h"
+#include "SkPoint.h"
+
+class SkLineClipper {
+public:
+ enum {
+ kMaxPoints = 4,
+ kMaxClippedLineSegments = kMaxPoints - 1
+ };
+
+ /* Clip the line pts[0]...pts[1] against clip, ignoring segments that
+ lie completely above or below the clip. For portions to the left or
+ right, turn those into vertical line segments that are aligned to the
+ edge of the clip.
+
+ Return the number of line segments that result, and store the end-points
+ of those segments sequentially in lines as follows:
+ 1st segment: lines[0]..lines[1]
+ 2nd segment: lines[1]..lines[2]
+ 3rd segment: lines[2]..lines[3]
+ */
+ static int ClipLine(const SkPoint pts[2], const SkRect& clip,
+ SkPoint lines[kMaxPoints], bool canCullToTheRight);
+
+ /* Intersect the line segment against the rect. If there is a non-empty
+ resulting segment, return true and set dst[] to that segment. If not,
+ return false and ignore dst[].
+
+ ClipLine is specialized for scan-conversion, as it adds vertical
+ segments on the sides to show where the line extended beyond the
+ left or right sides. IntersectLine does not.
+ */
+ static bool IntersectLine(const SkPoint src[2], const SkRect& clip, SkPoint dst[2]);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkLinearBitmapPipeline.cpp b/gfx/skia/skia/src/core/SkLinearBitmapPipeline.cpp
new file mode 100644
index 000000000..401eb41b7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLinearBitmapPipeline.cpp
@@ -0,0 +1,743 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkLinearBitmapPipeline.h"
+
+#include <algorithm>
+#include <cmath>
+#include <limits>
+#include <tuple>
+
+#include "SkLinearBitmapPipeline_core.h"
+#include "SkLinearBitmapPipeline_matrix.h"
+#include "SkLinearBitmapPipeline_tile.h"
+#include "SkLinearBitmapPipeline_sample.h"
+#include "SkNx.h"
+#include "SkOpts.h"
+#include "SkPM4f.h"
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// SkLinearBitmapPipeline::Stage
+template<typename Base, size_t kSize, typename Next>
+SkLinearBitmapPipeline::Stage<Base, kSize, Next>::~Stage() {
+ if (fIsInitialized) {
+ this->get()->~Base();
+ }
+}
+
+template<typename Base, size_t kSize, typename Next>
+template<typename Variant, typename... Args>
+void SkLinearBitmapPipeline::Stage<Base, kSize, Next>::initStage(Next* next, Args&& ... args) {
+ SkASSERTF(sizeof(Variant) <= sizeof(fSpace),
+ "Size Variant: %d, Space: %d", sizeof(Variant), sizeof(fSpace));
+
+ new (&fSpace) Variant(next, std::forward<Args>(args)...);
+ fStageCloner = [this](Next* nextClone, void* addr) {
+ new (addr) Variant(nextClone, (const Variant&)*this->get());
+ };
+ fIsInitialized = true;
+};
+
+template<typename Base, size_t kSize, typename Next>
+template<typename Variant, typename... Args>
+void SkLinearBitmapPipeline::Stage<Base, kSize, Next>::initSink(Args&& ... args) {
+ SkASSERTF(sizeof(Variant) <= sizeof(fSpace),
+ "Size Variant: %d, Space: %d", sizeof(Variant), sizeof(fSpace));
+ new (&fSpace) Variant(std::forward<Args>(args)...);
+ fIsInitialized = true;
+};
+
+template<typename Base, size_t kSize, typename Next>
+template <typename To, typename From>
+To* SkLinearBitmapPipeline::Stage<Base, kSize, Next>::getInterface() {
+ From* down = static_cast<From*>(this->get());
+ return static_cast<To*>(down);
+}
+
+template<typename Base, size_t kSize, typename Next>
+Base* SkLinearBitmapPipeline::Stage<Base, kSize, Next>::cloneStageTo(
+ Next* next, Stage* cloneToStage) const
+{
+ if (!fIsInitialized) return nullptr;
+ fStageCloner(next, &cloneToStage->fSpace);
+ return cloneToStage->get();
+}
+
+namespace {
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Matrix Stage
+// PointProcessor uses a strategy to help complete the work of the different stages. The strategy
+// must implement the following methods:
+// * processPoints(xs, ys) - must mutate the xs and ys for the stage.
+// * maybeProcessSpan(span, next) - This represents a horizontal series of pixels
+// to work over.
+// span - encapsulation of span.
+// next - a pointer to the next stage.
+// maybeProcessSpan - returns false if it can not process the span and needs to fallback to
+// point lists for processing.
+template<typename Strategy, typename Next>
+class MatrixStage final : public SkLinearBitmapPipeline::PointProcessorInterface {
+public:
+ template <typename... Args>
+ MatrixStage(Next* next, Args&&... args)
+ : fNext{next}
+ , fStrategy{std::forward<Args>(args)...}{ }
+
+ MatrixStage(Next* next, const MatrixStage& stage)
+ : fNext{next}
+ , fStrategy{stage.fStrategy} { }
+
+ void SK_VECTORCALL pointListFew(int n, Sk4s xs, Sk4s ys) override {
+ fStrategy.processPoints(&xs, &ys);
+ fNext->pointListFew(n, xs, ys);
+ }
+
+ void SK_VECTORCALL pointList4(Sk4s xs, Sk4s ys) override {
+ fStrategy.processPoints(&xs, &ys);
+ fNext->pointList4(xs, ys);
+ }
+
+ // The span you pass must not be empty.
+ void pointSpan(Span span) override {
+ SkASSERT(!span.isEmpty());
+ if (!fStrategy.maybeProcessSpan(span, fNext)) {
+ span_fallback(span, this);
+ }
+ }
+
+private:
+ Next* const fNext;
+ Strategy fStrategy;
+};
+
+template <typename Next = SkLinearBitmapPipeline::PointProcessorInterface>
+using TranslateMatrix = MatrixStage<TranslateMatrixStrategy, Next>;
+
+template <typename Next = SkLinearBitmapPipeline::PointProcessorInterface>
+using ScaleMatrix = MatrixStage<ScaleMatrixStrategy, Next>;
+
+template <typename Next = SkLinearBitmapPipeline::PointProcessorInterface>
+using AffineMatrix = MatrixStage<AffineMatrixStrategy, Next>;
+
+template <typename Next = SkLinearBitmapPipeline::PointProcessorInterface>
+using PerspectiveMatrix = MatrixStage<PerspectiveMatrixStrategy, Next>;
+
+
+static SkLinearBitmapPipeline::PointProcessorInterface* choose_matrix(
+ SkLinearBitmapPipeline::PointProcessorInterface* next,
+ const SkMatrix& inverse,
+ SkLinearBitmapPipeline::MatrixStage* matrixProc) {
+ if (inverse.hasPerspective()) {
+ matrixProc->initStage<PerspectiveMatrix<>>(
+ next,
+ SkVector{inverse.getTranslateX(), inverse.getTranslateY()},
+ SkVector{inverse.getScaleX(), inverse.getScaleY()},
+ SkVector{inverse.getSkewX(), inverse.getSkewY()},
+ SkVector{inverse.getPerspX(), inverse.getPerspY()},
+ inverse.get(SkMatrix::kMPersp2));
+ } else if (inverse.getSkewX() != 0.0f || inverse.getSkewY() != 0.0f) {
+ matrixProc->initStage<AffineMatrix<>>(
+ next,
+ SkVector{inverse.getTranslateX(), inverse.getTranslateY()},
+ SkVector{inverse.getScaleX(), inverse.getScaleY()},
+ SkVector{inverse.getSkewX(), inverse.getSkewY()});
+ } else if (inverse.getScaleX() != 1.0f || inverse.getScaleY() != 1.0f) {
+ matrixProc->initStage<ScaleMatrix<>>(
+ next,
+ SkVector{inverse.getTranslateX(), inverse.getTranslateY()},
+ SkVector{inverse.getScaleX(), inverse.getScaleY()});
+ } else if (inverse.getTranslateX() != 0.0f || inverse.getTranslateY() != 0.0f) {
+ matrixProc->initStage<TranslateMatrix<>>(
+ next,
+ SkVector{inverse.getTranslateX(), inverse.getTranslateY()});
+ } else {
+ return next;
+ }
+ return matrixProc->get();
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Tile Stage
+
+template<typename XStrategy, typename YStrategy, typename Next>
+class CombinedTileStage final : public SkLinearBitmapPipeline::PointProcessorInterface {
+public:
+ CombinedTileStage(Next* next, SkISize dimensions)
+ : fNext{next}
+ , fXStrategy{dimensions.width()}
+ , fYStrategy{dimensions.height()}{ }
+
+ CombinedTileStage(Next* next, const CombinedTileStage& stage)
+ : fNext{next}
+ , fXStrategy{stage.fXStrategy}
+ , fYStrategy{stage.fYStrategy} { }
+
+ void SK_VECTORCALL pointListFew(int n, Sk4s xs, Sk4s ys) override {
+ fXStrategy.tileXPoints(&xs);
+ fYStrategy.tileYPoints(&ys);
+ fNext->pointListFew(n, xs, ys);
+ }
+
+ void SK_VECTORCALL pointList4(Sk4s xs, Sk4s ys) override {
+ fXStrategy.tileXPoints(&xs);
+ fYStrategy.tileYPoints(&ys);
+ fNext->pointList4(xs, ys);
+ }
+
+ // The span you pass must not be empty.
+ void pointSpan(Span span) override {
+ SkASSERT(!span.isEmpty());
+ SkPoint start; SkScalar length; int count;
+ std::tie(start, length, count) = span;
+
+ if (span.count() == 1) {
+ // DANGER:
+ // The explicit casts from float to Sk4f are not usually necessary, but are here to
+ // work around an MSVC 2015u2 c++ code generation bug. This is tracked using skia bug
+ // 5566.
+ this->pointListFew(1, Sk4f{span.startX()}, Sk4f{span.startY()});
+ return;
+ }
+
+ SkScalar x = X(start);
+ SkScalar y = fYStrategy.tileY(Y(start));
+ Span yAdjustedSpan{{x, y}, length, count};
+
+ if (!fXStrategy.maybeProcessSpan(yAdjustedSpan, fNext)) {
+ span_fallback(span, this);
+ }
+ }
+
+private:
+ Next* const fNext;
+ XStrategy fXStrategy;
+ YStrategy fYStrategy;
+};
+
+template <typename XStrategy, typename Next>
+void choose_tiler_ymode(
+ SkShader::TileMode yMode, SkFilterQuality filterQuality, SkISize dimensions,
+ Next* next,
+ SkLinearBitmapPipeline::TileStage* tileStage) {
+ switch (yMode) {
+ case SkShader::kClamp_TileMode: {
+ using Tiler = CombinedTileStage<XStrategy, YClampStrategy, Next>;
+ tileStage->initStage<Tiler>(next, dimensions);
+ break;
+ }
+ case SkShader::kRepeat_TileMode: {
+ using Tiler = CombinedTileStage<XStrategy, YRepeatStrategy, Next>;
+ tileStage->initStage<Tiler>(next, dimensions);
+ break;
+ }
+ case SkShader::kMirror_TileMode: {
+ using Tiler = CombinedTileStage<XStrategy, YMirrorStrategy, Next>;
+ tileStage->initStage<Tiler>(next, dimensions);
+ break;
+ }
+ }
+};
+
+static SkLinearBitmapPipeline::PointProcessorInterface* choose_tiler(
+ SkLinearBitmapPipeline::SampleProcessorInterface* next,
+ SkISize dimensions,
+ SkShader::TileMode xMode,
+ SkShader::TileMode yMode,
+ SkFilterQuality filterQuality,
+ SkScalar dx,
+ SkLinearBitmapPipeline::TileStage* tileStage)
+{
+ switch (xMode) {
+ case SkShader::kClamp_TileMode:
+ choose_tiler_ymode<XClampStrategy>(yMode, filterQuality, dimensions, next, tileStage);
+ break;
+ case SkShader::kRepeat_TileMode:
+ if (dx == 1.0f && filterQuality == kNone_SkFilterQuality) {
+ choose_tiler_ymode<XRepeatUnitScaleStrategy>(
+ yMode, kNone_SkFilterQuality, dimensions, next, tileStage);
+ } else {
+ choose_tiler_ymode<XRepeatStrategy>(
+ yMode, filterQuality, dimensions, next, tileStage);
+ }
+ break;
+ case SkShader::kMirror_TileMode:
+ choose_tiler_ymode<XMirrorStrategy>(yMode, filterQuality, dimensions, next, tileStage);
+ break;
+ }
+
+ return tileStage->get();
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Specialized Samplers
+
+// RGBA8888UnitRepeatSrc - A sampler that takes advantage of the fact the the src and destination
+// are the same format and do not need in transformations in pixel space. Therefore, there is no
+// need to convert them to HiFi pixel format.
+class RGBA8888UnitRepeatSrc final : public SkLinearBitmapPipeline::SampleProcessorInterface,
+ public SkLinearBitmapPipeline::DestinationInterface {
+public:
+ RGBA8888UnitRepeatSrc(const uint32_t* src, int32_t width)
+ : fSrc{src}, fWidth{width} { }
+
+ void SK_VECTORCALL pointListFew(int n, Sk4s xs, Sk4s ys) override {
+ SkASSERT(fDest + n <= fEnd);
+ // At this point xs and ys should be >= 0, so trunc is the same as floor.
+ Sk4i iXs = SkNx_cast<int>(xs);
+ Sk4i iYs = SkNx_cast<int>(ys);
+
+ if (n >= 1) *fDest++ = *this->pixelAddress(iXs[0], iYs[0]);
+ if (n >= 2) *fDest++ = *this->pixelAddress(iXs[1], iYs[1]);
+ if (n >= 3) *fDest++ = *this->pixelAddress(iXs[2], iYs[2]);
+ }
+
+ void SK_VECTORCALL pointList4(Sk4s xs, Sk4s ys) override {
+ SkASSERT(fDest + 4 <= fEnd);
+ Sk4i iXs = SkNx_cast<int>(xs);
+ Sk4i iYs = SkNx_cast<int>(ys);
+ *fDest++ = *this->pixelAddress(iXs[0], iYs[0]);
+ *fDest++ = *this->pixelAddress(iXs[1], iYs[1]);
+ *fDest++ = *this->pixelAddress(iXs[2], iYs[2]);
+ *fDest++ = *this->pixelAddress(iXs[3], iYs[3]);
+ }
+
+ void pointSpan(Span span) override {
+ SkASSERT(fDest + span.count() <= fEnd);
+ if (span.length() != 0.0f) {
+ int32_t x = SkScalarTruncToInt(span.startX());
+ int32_t y = SkScalarTruncToInt(span.startY());
+ const uint32_t* src = this->pixelAddress(x, y);
+ memmove(fDest, src, span.count() * sizeof(uint32_t));
+ fDest += span.count();
+ }
+ }
+
+ void repeatSpan(Span span, int32_t repeatCount) override {
+ SkASSERT(fDest + span.count() * repeatCount <= fEnd);
+
+ int32_t x = SkScalarTruncToInt(span.startX());
+ int32_t y = SkScalarTruncToInt(span.startY());
+ const uint32_t* src = this->pixelAddress(x, y);
+ uint32_t* dest = fDest;
+ while (repeatCount --> 0) {
+ memmove(dest, src, span.count() * sizeof(uint32_t));
+ dest += span.count();
+ }
+ fDest = dest;
+ }
+
+ void setDestination(void* dst, int count) override {
+ fDest = static_cast<uint32_t*>(dst);
+ fEnd = fDest + count;
+ }
+
+private:
+ const uint32_t* pixelAddress(int32_t x, int32_t y) {
+ return &fSrc[fWidth * y + x];
+ }
+ const uint32_t* const fSrc;
+ const int32_t fWidth;
+ uint32_t* fDest;
+ uint32_t* fEnd;
+};
+
+// RGBA8888UnitRepeatSrc - A sampler that takes advantage of the fact the the src and destination
+// are the same format and do not need in transformations in pixel space. Therefore, there is no
+// need to convert them to HiFi pixel format.
+class RGBA8888UnitRepeatSrcOver final : public SkLinearBitmapPipeline::SampleProcessorInterface,
+ public SkLinearBitmapPipeline::DestinationInterface {
+public:
+ RGBA8888UnitRepeatSrcOver(const uint32_t* src, int32_t width)
+ : fSrc{src}, fWidth{width} { }
+
+ void SK_VECTORCALL pointListFew(int n, Sk4s xs, Sk4s ys) override {
+ SkASSERT(fDest + n <= fEnd);
+ // At this point xs and ys should be >= 0, so trunc is the same as floor.
+ Sk4i iXs = SkNx_cast<int>(xs);
+ Sk4i iYs = SkNx_cast<int>(ys);
+
+ if (n >= 1) blendPixelAt(iXs[0], iYs[0]);
+ if (n >= 2) blendPixelAt(iXs[1], iYs[1]);
+ if (n >= 3) blendPixelAt(iXs[2], iYs[2]);
+ }
+
+ void SK_VECTORCALL pointList4(Sk4s xs, Sk4s ys) override {
+ SkASSERT(fDest + 4 <= fEnd);
+ Sk4i iXs = SkNx_cast<int>(xs);
+ Sk4i iYs = SkNx_cast<int>(ys);
+ blendPixelAt(iXs[0], iYs[0]);
+ blendPixelAt(iXs[1], iYs[1]);
+ blendPixelAt(iXs[2], iYs[2]);
+ blendPixelAt(iXs[3], iYs[3]);
+ }
+
+ void pointSpan(Span span) override {
+ if (span.length() != 0.0f) {
+ this->repeatSpan(span, 1);
+ }
+ }
+
+ void repeatSpan(Span span, int32_t repeatCount) override {
+ SkASSERT(fDest + span.count() * repeatCount <= fEnd);
+ SkASSERT(span.count() > 0);
+ SkASSERT(repeatCount > 0);
+
+ int32_t x = (int32_t)span.startX();
+ int32_t y = (int32_t)span.startY();
+ const uint32_t* beginSpan = this->pixelAddress(x, y);
+
+ SkOpts::srcover_srgb_srgb(fDest, beginSpan, span.count() * repeatCount, span.count());
+
+ fDest += span.count() * repeatCount;
+
+ SkASSERT(fDest <= fEnd);
+ }
+
+ void setDestination(void* dst, int count) override {
+ SkASSERT(count > 0);
+ fDest = static_cast<uint32_t*>(dst);
+ fEnd = fDest + count;
+ }
+
+private:
+ const uint32_t* pixelAddress(int32_t x, int32_t y) {
+ return &fSrc[fWidth * y + x];
+ }
+
+ void blendPixelAt(int32_t x, int32_t y) {
+ const uint32_t* src = this->pixelAddress(x, y);
+ SkOpts::srcover_srgb_srgb(fDest, src, 1, 1);
+ fDest += 1;
+ }
+
+ const uint32_t* const fSrc;
+ const int32_t fWidth;
+ uint32_t* fDest;
+ uint32_t* fEnd;
+};
+
+using Blender = SkLinearBitmapPipeline::BlendProcessorInterface;
+
+template <SkColorType colorType>
+static SkLinearBitmapPipeline::PixelAccessorInterface* choose_specific_accessor(
+ const SkPixmap& srcPixmap, SkLinearBitmapPipeline::Accessor* accessor)
+{
+ if (srcPixmap.info().gammaCloseToSRGB()) {
+ using PA = PixelAccessor<colorType, kSRGB_SkGammaType>;
+ accessor->init<PA>(srcPixmap);
+ return accessor->get();
+ } else {
+ using PA = PixelAccessor<colorType, kLinear_SkGammaType>;
+ accessor->init<PA>(srcPixmap);
+ return accessor->get();
+ }
+}
+
+static SkLinearBitmapPipeline::PixelAccessorInterface* choose_pixel_accessor(
+ const SkPixmap& srcPixmap,
+ const SkColor A8TintColor,
+ SkLinearBitmapPipeline::Accessor* accessor)
+{
+ const SkImageInfo& imageInfo = srcPixmap.info();
+
+ SkLinearBitmapPipeline::PixelAccessorInterface* pixelAccessor = nullptr;
+ switch (imageInfo.colorType()) {
+ case kAlpha_8_SkColorType: {
+ using PA = PixelAccessor<kAlpha_8_SkColorType, kLinear_SkGammaType>;
+ accessor->init<PA>(srcPixmap, A8TintColor);
+ pixelAccessor = accessor->get();
+ }
+ break;
+ case kARGB_4444_SkColorType:
+ pixelAccessor = choose_specific_accessor<kARGB_4444_SkColorType>(srcPixmap, accessor);
+ break;
+ case kRGB_565_SkColorType:
+ pixelAccessor = choose_specific_accessor<kRGB_565_SkColorType>(srcPixmap, accessor);
+ break;
+ case kRGBA_8888_SkColorType:
+ pixelAccessor = choose_specific_accessor<kRGBA_8888_SkColorType>(srcPixmap, accessor);
+ break;
+ case kBGRA_8888_SkColorType:
+ pixelAccessor = choose_specific_accessor<kBGRA_8888_SkColorType>(srcPixmap, accessor);
+ break;
+ case kIndex_8_SkColorType:
+ pixelAccessor = choose_specific_accessor<kIndex_8_SkColorType>(srcPixmap, accessor);
+ break;
+ case kGray_8_SkColorType:
+ pixelAccessor = choose_specific_accessor<kGray_8_SkColorType>(srcPixmap, accessor);
+ break;
+ case kRGBA_F16_SkColorType: {
+ using PA = PixelAccessor<kRGBA_F16_SkColorType, kLinear_SkGammaType>;
+ accessor->init<PA>(srcPixmap);
+ pixelAccessor = accessor->get();
+ }
+ break;
+ default:
+ SkFAIL("Not implemented. Unsupported src");
+ break;
+ }
+
+ return pixelAccessor;
+}
+
+SkLinearBitmapPipeline::SampleProcessorInterface* choose_pixel_sampler(
+ Blender* next,
+ SkFilterQuality filterQuality,
+ SkShader::TileMode xTile, SkShader::TileMode yTile,
+ const SkPixmap& srcPixmap,
+ const SkColor A8TintColor,
+ SkLinearBitmapPipeline::SampleStage* sampleStage,
+ SkLinearBitmapPipeline::Accessor* accessor) {
+ const SkImageInfo& imageInfo = srcPixmap.info();
+ SkISize dimensions = imageInfo.dimensions();
+
+ // Special case samplers with fully expanded templates
+ if (imageInfo.gammaCloseToSRGB()) {
+ if (filterQuality == kNone_SkFilterQuality) {
+ switch (imageInfo.colorType()) {
+ case kN32_SkColorType: {
+ using S =
+ NearestNeighborSampler<
+ PixelAccessor<kN32_SkColorType, kSRGB_SkGammaType>, Blender>;
+ sampleStage->initStage<S>(next, srcPixmap);
+ return sampleStage->get();
+ }
+ case kIndex_8_SkColorType: {
+ using S =
+ NearestNeighborSampler<
+ PixelAccessor<kIndex_8_SkColorType, kSRGB_SkGammaType>, Blender>;
+ sampleStage->initStage<S>(next, srcPixmap);
+ return sampleStage->get();
+ }
+ default:
+ break;
+ }
+ } else {
+ switch (imageInfo.colorType()) {
+ case kN32_SkColorType: {
+ using S =
+ BilerpSampler<
+ PixelAccessor<kN32_SkColorType, kSRGB_SkGammaType>, Blender>;
+ sampleStage->initStage<S>(next, dimensions, xTile, yTile, srcPixmap);
+ return sampleStage->get();
+ }
+ case kIndex_8_SkColorType: {
+ using S =
+ BilerpSampler<
+ PixelAccessor<kIndex_8_SkColorType, kSRGB_SkGammaType>, Blender>;
+ sampleStage->initStage<S>(next, dimensions, xTile, yTile, srcPixmap);
+ return sampleStage->get();
+ }
+ default:
+ break;
+ }
+ }
+ }
+
+ auto pixelAccessor = choose_pixel_accessor(srcPixmap, A8TintColor, accessor);
+ // General cases.
+ if (filterQuality == kNone_SkFilterQuality) {
+ using S = NearestNeighborSampler<PixelAccessorShim, Blender>;
+ sampleStage->initStage<S>(next, pixelAccessor);
+ } else {
+ using S = BilerpSampler<PixelAccessorShim, Blender>;
+ sampleStage->initStage<S>(next, dimensions, xTile, yTile, pixelAccessor);
+ }
+ return sampleStage->get();
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Pixel Blender Stage
+template <SkAlphaType alphaType>
+class SrcFPPixel final : public SkLinearBitmapPipeline::BlendProcessorInterface {
+public:
+ SrcFPPixel(float postAlpha) : fPostAlpha{postAlpha} { }
+ SrcFPPixel(const SrcFPPixel& Blender) : fPostAlpha(Blender.fPostAlpha) {}
+ void SK_VECTORCALL blendPixel(Sk4f pixel) override {
+ SkASSERT(fDst + 1 <= fEnd );
+ this->srcPixel(fDst, pixel, 0);
+ fDst += 1;
+ }
+
+ void SK_VECTORCALL blend4Pixels(Sk4f p0, Sk4f p1, Sk4f p2, Sk4f p3) override {
+ SkASSERT(fDst + 4 <= fEnd);
+ SkPM4f* dst = fDst;
+ this->srcPixel(dst, p0, 0);
+ this->srcPixel(dst, p1, 1);
+ this->srcPixel(dst, p2, 2);
+ this->srcPixel(dst, p3, 3);
+ fDst += 4;
+ }
+
+ void setDestination(void* dst, int count) override {
+ fDst = static_cast<SkPM4f*>(dst);
+ fEnd = fDst + count;
+ }
+
+private:
+ void SK_VECTORCALL srcPixel(SkPM4f* dst, Sk4f pixel, int index) {
+ check_pixel(pixel);
+
+ Sk4f newPixel = pixel;
+ if (alphaType == kUnpremul_SkAlphaType) {
+ newPixel = Premultiply(pixel);
+ }
+ newPixel = newPixel * fPostAlpha;
+ newPixel.store(dst + index);
+ }
+ static Sk4f SK_VECTORCALL Premultiply(Sk4f pixel) {
+ float alpha = pixel[3];
+ return pixel * Sk4f{alpha, alpha, alpha, 1.0f};
+ }
+
+ SkPM4f* fDst;
+ SkPM4f* fEnd;
+ Sk4f fPostAlpha;
+};
+
+static SkLinearBitmapPipeline::BlendProcessorInterface* choose_blender_for_shading(
+ SkAlphaType alphaType,
+ float postAlpha,
+ SkLinearBitmapPipeline::BlenderStage* blenderStage) {
+ if (alphaType == kUnpremul_SkAlphaType) {
+ blenderStage->initSink<SrcFPPixel<kUnpremul_SkAlphaType>>(postAlpha);
+ } else {
+ // kOpaque_SkAlphaType is treated the same as kPremul_SkAlphaType
+ blenderStage->initSink<SrcFPPixel<kPremul_SkAlphaType>>(postAlpha);
+ }
+ return blenderStage->get();
+}
+
+} // namespace
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// SkLinearBitmapPipeline
+SkLinearBitmapPipeline::~SkLinearBitmapPipeline() {}
+
+SkLinearBitmapPipeline::SkLinearBitmapPipeline(
+ const SkMatrix& inverse,
+ SkFilterQuality filterQuality,
+ SkShader::TileMode xTile, SkShader::TileMode yTile,
+ SkColor paintColor,
+ const SkPixmap& srcPixmap)
+{
+ SkISize dimensions = srcPixmap.info().dimensions();
+ const SkImageInfo& srcImageInfo = srcPixmap.info();
+
+ SkMatrix adjustedInverse = inverse;
+ if (filterQuality == kNone_SkFilterQuality) {
+ if (inverse.getScaleX() >= 0.0f) {
+ adjustedInverse.setTranslateX(
+ nextafterf(inverse.getTranslateX(), std::floor(inverse.getTranslateX())));
+ }
+ if (inverse.getScaleY() >= 0.0f) {
+ adjustedInverse.setTranslateY(
+ nextafterf(inverse.getTranslateY(), std::floor(inverse.getTranslateY())));
+ }
+ }
+
+ SkScalar dx = adjustedInverse.getScaleX();
+
+ // If it is an index 8 color type, the sampler converts to unpremul for better fidelity.
+ SkAlphaType alphaType = srcImageInfo.alphaType();
+ if (srcPixmap.colorType() == kIndex_8_SkColorType) {
+ alphaType = kUnpremul_SkAlphaType;
+ }
+
+ float postAlpha = SkColorGetA(paintColor) * (1.0f / 255.0f);
+ // As the stages are built, the chooser function may skip a stage. For example, with the
+ // identity matrix, the matrix stage is skipped, and the tilerStage is the first stage.
+ auto blenderStage = choose_blender_for_shading(alphaType, postAlpha, &fBlenderStage);
+ auto samplerStage = choose_pixel_sampler(
+ blenderStage, filterQuality, xTile, yTile,
+ srcPixmap, paintColor, &fSampleStage, &fAccessor);
+ auto tilerStage = choose_tiler(samplerStage, dimensions, xTile, yTile,
+ filterQuality, dx, &fTileStage);
+ fFirstStage = choose_matrix(tilerStage, adjustedInverse, &fMatrixStage);
+ fLastStage = blenderStage;
+}
+
+bool SkLinearBitmapPipeline::ClonePipelineForBlitting(
+ SkEmbeddableLinearPipeline* pipelineStorage,
+ const SkLinearBitmapPipeline& pipeline,
+ SkMatrix::TypeMask matrixMask,
+ SkShader::TileMode xTileMode,
+ SkShader::TileMode yTileMode,
+ SkFilterQuality filterQuality,
+ const SkPixmap& srcPixmap,
+ float finalAlpha,
+ SkXfermode::Mode xferMode,
+ const SkImageInfo& dstInfo)
+{
+ if (xferMode == SkXfermode::kSrcOver_Mode
+ && srcPixmap.info().alphaType() == kOpaque_SkAlphaType) {
+ xferMode = SkXfermode::kSrc_Mode;
+ }
+
+ if (matrixMask & ~SkMatrix::kTranslate_Mask ) { return false; }
+ if (filterQuality != SkFilterQuality::kNone_SkFilterQuality) { return false; }
+ if (finalAlpha != 1.0f) { return false; }
+ if (srcPixmap.info().colorType() != kRGBA_8888_SkColorType
+ || dstInfo.colorType() != kRGBA_8888_SkColorType) { return false; }
+
+ if (!srcPixmap.info().gammaCloseToSRGB() || !dstInfo.gammaCloseToSRGB()) {
+ return false;
+ }
+
+ if (xferMode != SkXfermode::kSrc_Mode && xferMode != SkXfermode::kSrcOver_Mode) {
+ return false;
+ }
+
+ pipelineStorage->init(pipeline, srcPixmap, xferMode, dstInfo);
+
+ return true;
+}
+
+SkLinearBitmapPipeline::SkLinearBitmapPipeline(
+ const SkLinearBitmapPipeline& pipeline,
+ const SkPixmap& srcPixmap,
+ SkXfermode::Mode mode,
+ const SkImageInfo& dstInfo)
+{
+ SkASSERT(mode == SkXfermode::kSrc_Mode || mode == SkXfermode::kSrcOver_Mode);
+ SkASSERT(srcPixmap.info().colorType() == dstInfo.colorType()
+ && srcPixmap.info().colorType() == kRGBA_8888_SkColorType);
+
+ if (mode == SkXfermode::kSrc_Mode) {
+ fSampleStage.initSink<RGBA8888UnitRepeatSrc>(
+ srcPixmap.writable_addr32(0, 0), srcPixmap.rowBytes() / 4);
+ fLastStage = fSampleStage.getInterface<DestinationInterface, RGBA8888UnitRepeatSrc>();
+ } else {
+ fSampleStage.initSink<RGBA8888UnitRepeatSrcOver>(
+ srcPixmap.writable_addr32(0, 0), srcPixmap.rowBytes() / 4);
+ fLastStage = fSampleStage.getInterface<DestinationInterface, RGBA8888UnitRepeatSrcOver>();
+ }
+
+ auto sampleStage = fSampleStage.get();
+ auto tilerStage = pipeline.fTileStage.cloneStageTo(sampleStage, &fTileStage);
+ tilerStage = (tilerStage != nullptr) ? tilerStage : sampleStage;
+ auto matrixStage = pipeline.fMatrixStage.cloneStageTo(tilerStage, &fMatrixStage);
+ matrixStage = (matrixStage != nullptr) ? matrixStage : tilerStage;
+ fFirstStage = matrixStage;
+}
+
+void SkLinearBitmapPipeline::shadeSpan4f(int x, int y, SkPM4f* dst, int count) {
+ SkASSERT(count > 0);
+ this->blitSpan(x, y, dst, count);
+}
+
+void SkLinearBitmapPipeline::blitSpan(int x, int y, void* dst, int count) {
+ SkASSERT(count > 0);
+ fLastStage->setDestination(dst, count);
+
+ // The count and length arguments start out in a precise relation in order to keep the
+ // math correct through the different stages. Count is the number of pixel to produce.
+ // Since the code samples at pixel centers, length is the distance from the center of the
+ // first pixel to the center of the last pixel. This implies that length is count-1.
+ fFirstStage->pointSpan(Span{{x + 0.5f, y + 0.5f}, count - 1.0f, count});
+}
diff --git a/gfx/skia/skia/src/core/SkLinearBitmapPipeline.h b/gfx/skia/skia/src/core/SkLinearBitmapPipeline.h
new file mode 100644
index 000000000..91b573df5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLinearBitmapPipeline.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLinearBitmapPipeline_DEFINED
+#define SkLinearBitmapPipeline_DEFINED
+
+#include "SkColor.h"
+#include "SkImageInfo.h"
+#include "SkMatrix.h"
+#include "SkShader.h"
+
+class SkEmbeddableLinearPipeline;
+
+enum SkGammaType {
+ kLinear_SkGammaType,
+ kSRGB_SkGammaType,
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// SkLinearBitmapPipeline - encapsulates all the machinery for doing floating point pixel
+// processing in a linear color space.
+// Note: this class has unusual alignment requirements due to its use of SIMD instructions. The
+// class SkEmbeddableLinearPipeline below manages these requirements.
+class SkLinearBitmapPipeline {
+public:
+ SkLinearBitmapPipeline(
+ const SkMatrix& inverse,
+ SkFilterQuality filterQuality,
+ SkShader::TileMode xTile, SkShader::TileMode yTile,
+ SkColor paintColor,
+ const SkPixmap& srcPixmap);
+
+ SkLinearBitmapPipeline(
+ const SkLinearBitmapPipeline& pipeline,
+ const SkPixmap& srcPixmap,
+ SkXfermode::Mode xferMode,
+ const SkImageInfo& dstInfo);
+
+ static bool ClonePipelineForBlitting(
+ SkEmbeddableLinearPipeline* pipelineStorage,
+ const SkLinearBitmapPipeline& pipeline,
+ SkMatrix::TypeMask matrixMask,
+ SkShader::TileMode xTileMode,
+ SkShader::TileMode yTileMode,
+ SkFilterQuality filterQuality,
+ const SkPixmap& srcPixmap,
+ float finalAlpha,
+ SkXfermode::Mode xferMode,
+ const SkImageInfo& dstInfo);
+
+ ~SkLinearBitmapPipeline();
+
+ void shadeSpan4f(int x, int y, SkPM4f* dst, int count);
+ void blitSpan(int32_t x, int32_t y, void* dst, int count);
+
+ template<typename Base, size_t kSize, typename Next = void>
+ class Stage {
+ public:
+ Stage() : fIsInitialized{false} {}
+ ~Stage();
+
+ template<typename Variant, typename... Args>
+ void initStage(Next* next, Args&& ... args);
+
+ template<typename Variant, typename... Args>
+ void initSink(Args&& ... args);
+
+ template <typename To, typename From>
+ To* getInterface();
+
+ // Copy this stage to `cloneToStage` with `next` as its next stage
+ // (not necessarily the same as our next, you see), returning `cloneToStage`.
+ // Note: There is no cloneSinkTo method because the code usually places the top part of
+ // the pipeline on a new sampler.
+ Base* cloneStageTo(Next* next, Stage* cloneToStage) const;
+
+ Base* get() const { return reinterpret_cast<Base*>(&fSpace); }
+ Base* operator->() const { return this->get(); }
+ Base& operator*() const { return *(this->get()); }
+
+ private:
+ std::function<void (Next*, void*)> fStageCloner;
+ struct SK_STRUCT_ALIGN(16) Space {
+ char space[kSize];
+ };
+ bool fIsInitialized;
+ mutable Space fSpace;
+ };
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// PolyMemory
+ template <typename Base, size_t kSize>
+ class PolyMemory {
+ public:
+ PolyMemory() : fIsInitialized{false} { }
+ ~PolyMemory() {
+ if (fIsInitialized) {
+ this->get()->~Base();
+ }
+ }
+ template<typename Variant, typename... Args>
+ void init(Args&& ... args) {
+ SkASSERTF(sizeof(Variant) <= sizeof(fSpace),
+ "Size Variant: %d, Space: %d", sizeof(Variant), sizeof(fSpace));
+
+ new (&fSpace) Variant(std::forward<Args>(args)...);
+ fIsInitialized = true;
+ }
+
+ Base* get() const { return reinterpret_cast<Base*>(&fSpace); }
+ Base* operator->() const { return this->get(); }
+ Base& operator*() const { return *(this->get()); }
+
+ private:
+ struct SK_STRUCT_ALIGN(16) Space {
+ char space[kSize];
+ };
+ mutable Space fSpace;
+ bool fIsInitialized;
+
+ };
+
+ class PointProcessorInterface;
+ class SampleProcessorInterface;
+ class BlendProcessorInterface;
+ class DestinationInterface;
+ class PixelAccessorInterface;
+
+ // These values were generated by the assert above in Stage::init{Sink|Stage}.
+ using MatrixStage = Stage<PointProcessorInterface, 160, PointProcessorInterface>;
+ using TileStage = Stage<PointProcessorInterface, 160, SampleProcessorInterface>;
+ using SampleStage = Stage<SampleProcessorInterface, 160, BlendProcessorInterface>;
+ using BlenderStage = Stage<BlendProcessorInterface, 40>;
+ using Accessor = PolyMemory<PixelAccessorInterface, 64>;
+
+private:
+ PointProcessorInterface* fFirstStage;
+ MatrixStage fMatrixStage;
+ TileStage fTileStage;
+ SampleStage fSampleStage;
+ BlenderStage fBlenderStage;
+ DestinationInterface* fLastStage;
+ Accessor fAccessor;
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// SkEmbeddableLinearPipeline - manage stricter alignment needs for SkLinearBitmapPipeline.
+class SkEmbeddableLinearPipeline {
+public:
+ SkEmbeddableLinearPipeline() { }
+ ~SkEmbeddableLinearPipeline() {
+ if (get() != nullptr) {
+ get()->~SkLinearBitmapPipeline();
+ }
+ }
+
+ template <typename... Args>
+ void init(Args&&... args) {
+ // Ensure that our pipeline is created at a 16 byte aligned address.
+ fPipeline = (SkLinearBitmapPipeline*)SkAlign16((intptr_t)fPipelineStorage);
+ new (fPipeline) SkLinearBitmapPipeline{std::forward<Args>(args)...};
+ }
+
+ SkLinearBitmapPipeline* get() const { return fPipeline; }
+ SkLinearBitmapPipeline& operator*() const { return *this->get(); }
+ SkLinearBitmapPipeline* operator->() const { return this->get(); }
+
+private:
+ enum {
+ kActualSize = sizeof(SkLinearBitmapPipeline),
+ kPaddedSize = SkAlignPtr(kActualSize + 12),
+ };
+ void* fPipelineStorage[kPaddedSize / sizeof(void*)];
+ SkLinearBitmapPipeline* fPipeline{nullptr};
+};
+
+#endif // SkLinearBitmapPipeline_DEFINED
diff --git a/gfx/skia/skia/src/core/SkLinearBitmapPipeline_core.h b/gfx/skia/skia/src/core/SkLinearBitmapPipeline_core.h
new file mode 100644
index 000000000..5ef6fcab5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLinearBitmapPipeline_core.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLinearBitmapPipeline_core_DEFINED
+#define SkLinearBitmapPipeline_core_DEFINED
+
+#include <algorithm>
+#include <cmath>
+#include "SkNx.h"
+
+// New bilerp strategy:
+// Pass through on bilerpList4 and bilerpListFew (analogs to pointList), introduce bilerpEdge
+// which takes 4 points. If the sample spans an edge, then break it into a bilerpEdge. Bilerp
+// span then becomes a normal span except in special cases where an extra Y is given. The bilerp
+// need to stay single point calculations until the tile layer.
+// TODO:
+// - edge span predicate.
+// - introduce new point API
+// - Add tile for new api.
+
+namespace {
+struct X {
+ explicit X(SkScalar val) : fVal{val} { }
+ explicit X(SkPoint pt) : fVal{pt.fX} { }
+ explicit X(SkSize s) : fVal{s.fWidth} { }
+ explicit X(SkISize s) : fVal((SkScalar)s.fWidth) { }
+ operator SkScalar () const {return fVal;}
+private:
+ SkScalar fVal;
+};
+
+struct Y {
+ explicit Y(SkScalar val) : fVal{val} { }
+ explicit Y(SkPoint pt) : fVal{pt.fY} { }
+ explicit Y(SkSize s) : fVal{s.fHeight} { }
+ explicit Y(SkISize s) : fVal((SkScalar)s.fHeight) { }
+ operator SkScalar () const {return fVal;}
+private:
+ SkScalar fVal;
+};
+
+// The Span class enables efficient processing horizontal spans of pixels.
+// * start - the point where to start the span.
+// * length - the number of pixels to traverse in source space.
+// * count - the number of pixels to produce in destination space.
+// Both start and length are mapped through the inversion matrix to produce values in source
+// space. After the matrix operation, the tilers may break the spans up into smaller spans.
+// The tilers can produce spans that seem nonsensical.
+// * The clamp tiler can create spans with length of 0. This indicates to copy an edge pixel out
+// to the edge of the destination scan.
+// * The mirror tiler can produce spans with negative length. This indicates that the source
+// should be traversed in the opposite direction to the destination pixels.
+class Span {
+public:
+ Span(SkPoint start, SkScalar length, int count)
+ : fStart(start)
+ , fLength(length)
+ , fCount{count} {
+ SkASSERT(std::isfinite(length));
+ }
+
+ operator std::tuple<SkPoint&, SkScalar&, int&>() {
+ return std::tie(fStart, fLength, fCount);
+ }
+
+ bool isEmpty() const { return 0 == fCount; }
+ void clear() { fCount = 0; }
+ int count() const { return fCount; }
+ SkScalar length() const { return fLength; }
+ SkScalar startX() const { return X(fStart); }
+ SkScalar endX() const { return this->startX() + this->length(); }
+ SkScalar startY() const { return Y(fStart); }
+ Span emptySpan() { return Span{{0.0, 0.0}, 0.0f, 0}; }
+
+ bool completelyWithin(SkScalar xMin, SkScalar xMax) const {
+ SkScalar sMin, sMax;
+ std::tie(sMin, sMax) = std::minmax(startX(), endX());
+ return xMin <= sMin && sMax < xMax;
+ }
+
+ void offset(SkScalar offsetX) {
+ fStart.offset(offsetX, 0.0f);
+ }
+
+ Span breakAt(SkScalar breakX, SkScalar dx) {
+ SkASSERT(std::isfinite(breakX));
+ SkASSERT(std::isfinite(dx));
+ SkASSERT(dx != 0.0f);
+
+ if (this->isEmpty()) {
+ return this->emptySpan();
+ }
+
+ int dxSteps = SkScalarFloorToInt((breakX - this->startX()) / dx);
+
+ if (dxSteps < 0) {
+ // The span is wholly after breakX.
+ return this->emptySpan();
+ } else if (dxSteps >= fCount) {
+ // The span is wholly before breakX.
+ Span answer = *this;
+ this->clear();
+ return answer;
+ }
+
+ // Calculate the values for the span to cleave off.
+ SkScalar newLength = dxSteps * dx;
+
+ // If the last (or first if count = 1) sample lands directly on the boundary. Include it
+ // when dx < 0 and exclude it when dx > 0.
+ // Reasoning:
+ // dx > 0: The sample point on the boundary is part of the next span because the entire
+ // pixel is after the boundary.
+ // dx < 0: The sample point on the boundary is part of the current span because the
+ // entire pixel is before the boundary.
+ if (this->startX() + newLength == breakX && dx > 0) {
+ if (dxSteps > 0) {
+ dxSteps -= 1;
+ newLength -= dx;
+ } else {
+ return this->emptySpan();
+ }
+ }
+
+ // Calculate new span parameters
+ SkPoint newStart = fStart;
+ int newCount = dxSteps + 1;
+ SkASSERT(newCount > 0);
+
+ // Update this span to reflect the break.
+ SkScalar lengthToStart = newLength + dx;
+ fLength -= lengthToStart;
+ fCount -= newCount;
+ fStart = {this->startX() + lengthToStart, Y(fStart)};
+
+ return Span{newStart, newLength, newCount};
+ }
+
+ void clampToSinglePixel(SkPoint pixel) {
+ fStart = pixel;
+ fLength = 0.0f;
+ }
+
+private:
+ SkPoint fStart;
+ SkScalar fLength;
+ int fCount;
+};
+
+template<typename Stage>
+void span_fallback(Span span, Stage* stage) {
+ SkPoint start;
+ SkScalar length;
+ int count;
+ std::tie(start, length, count) = span;
+ Sk4f xs{X(start)};
+ Sk4f ys{Y(start)};
+
+ // Initializing this is not needed, but some compilers can't figure this out.
+ Sk4s fourDx{0.0f};
+ if (count > 1) {
+ SkScalar dx = length / (count - 1);
+ xs = xs + Sk4f{0.0f, 1.0f, 2.0f, 3.0f} * dx;
+ // Only used if count is >= 4.
+ fourDx = Sk4f{4.0f * dx};
+ }
+
+ while (count >= 4) {
+ stage->pointList4(xs, ys);
+ xs = xs + fourDx;
+ count -= 4;
+ }
+ if (count > 0) {
+ stage->pointListFew(count, xs, ys);
+ }
+}
+
+inline Sk4f SK_VECTORCALL check_pixel(const Sk4f& pixel) {
+ SkASSERTF(0.0f <= pixel[0] && pixel[0] <= 1.0f, "pixel[0]: %f", pixel[0]);
+ SkASSERTF(0.0f <= pixel[1] && pixel[1] <= 1.0f, "pixel[1]: %f", pixel[1]);
+ SkASSERTF(0.0f <= pixel[2] && pixel[2] <= 1.0f, "pixel[2]: %f", pixel[2]);
+ SkASSERTF(0.0f <= pixel[3] && pixel[3] <= 1.0f, "pixel[3]: %f", pixel[3]);
+ return pixel;
+}
+
+} // namespace
+
+class SkLinearBitmapPipeline::PointProcessorInterface {
+public:
+ virtual ~PointProcessorInterface() { }
+ // Take the first n (where 0 < n && n < 4) items from xs and ys and sample those points. For
+ // nearest neighbor, that means just taking the floor xs and ys. For bilerp, this means
+ // to expand the bilerp filter around the point and sample using that filter.
+ virtual void SK_VECTORCALL pointListFew(int n, Sk4s xs, Sk4s ys) = 0;
+ // Same as pointListFew, but n = 4.
+ virtual void SK_VECTORCALL pointList4(Sk4s xs, Sk4s ys) = 0;
+ // A span is a compact form of sample points that are obtained by mapping points from
+ // destination space to source space. This is used for horizontal lines only, and is mainly
+ // used to take advantage of memory coherence for horizontal spans.
+ virtual void pointSpan(Span span) = 0;
+};
+
+class SkLinearBitmapPipeline::SampleProcessorInterface
+ : public SkLinearBitmapPipeline::PointProcessorInterface {
+public:
+ // Used for nearest neighbor when scale factor is 1.0. The span can just be repeated with no
+ // edge pixel alignment problems. This is for handling a very common case.
+ virtual void repeatSpan(Span span, int32_t repeatCount) = 0;
+};
+
+class SkLinearBitmapPipeline::DestinationInterface {
+public:
+ virtual ~DestinationInterface() { }
+ // Count is normally not needed, but in these early stages of development it is useful to
+ // check bounds.
+ // TODO(herb): 4/6/2016 - remove count when code is stable.
+ virtual void setDestination(void* dst, int count) = 0;
+};
+
+class SkLinearBitmapPipeline::BlendProcessorInterface
+ : public SkLinearBitmapPipeline::DestinationInterface {
+public:
+ virtual void SK_VECTORCALL blendPixel(Sk4f pixel0) = 0;
+ virtual void SK_VECTORCALL blend4Pixels(Sk4f p0, Sk4f p1, Sk4f p2, Sk4f p3) = 0;
+};
+
+class SkLinearBitmapPipeline::PixelAccessorInterface {
+public:
+ virtual ~PixelAccessorInterface() { }
+ virtual void SK_VECTORCALL getFewPixels(
+ int n, Sk4i xs, Sk4i ys, Sk4f* px0, Sk4f* px1, Sk4f* px2) const = 0;
+
+ virtual void SK_VECTORCALL get4Pixels(
+ Sk4i xs, Sk4i ys, Sk4f* px0, Sk4f* px1, Sk4f* px2, Sk4f* px3) const = 0;
+
+ virtual void get4Pixels(
+ const void* src, int index, Sk4f* px0, Sk4f* px1, Sk4f* px2, Sk4f* px3) const = 0;
+
+ virtual Sk4f getPixelFromRow(const void* row, int index) const = 0;
+
+ virtual Sk4f getPixelAt(int index) const = 0;
+
+ virtual const void* row(int y) const = 0;
+};
+
+#endif // SkLinearBitmapPipeline_core_DEFINED
diff --git a/gfx/skia/skia/src/core/SkLinearBitmapPipeline_matrix.h b/gfx/skia/skia/src/core/SkLinearBitmapPipeline_matrix.h
new file mode 100644
index 000000000..d194d0729
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLinearBitmapPipeline_matrix.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLinearBitmapPipeline_matrix_DEFINED
+#define SkLinearBitmapPipeline_matrix_DEFINED
+
+#include "SkLinearBitmapPipeline_core.h"
+
+namespace {
+class TranslateMatrixStrategy {
+public:
+ TranslateMatrixStrategy(SkVector offset)
+ : fXOffset{X(offset)}
+ , fYOffset{Y(offset)} { }
+
+ void processPoints(Sk4s* xs, Sk4s* ys) {
+ *xs = *xs + fXOffset;
+ *ys = *ys + fYOffset;
+ }
+
+ template <typename Next>
+ bool maybeProcessSpan(Span span, Next* next) {
+ SkPoint start; SkScalar length; int count;
+ std::tie(start, length, count) = span;
+ next->pointSpan(Span{start + SkPoint{fXOffset[0], fYOffset[0]}, length, count});
+ return true;
+ }
+
+private:
+ const Sk4s fXOffset, fYOffset;
+};
+
+class ScaleMatrixStrategy {
+public:
+ ScaleMatrixStrategy(SkVector offset, SkVector scale)
+ : fXOffset{X(offset)}, fYOffset{Y(offset)}
+ , fXScale{X(scale)}, fYScale{Y(scale)} { }
+ void processPoints(Sk4s* xs, Sk4s* ys) {
+ *xs = *xs * fXScale + fXOffset;
+ *ys = *ys * fYScale + fYOffset;
+ }
+
+ template <typename Next>
+ bool maybeProcessSpan(Span span, Next* next) {
+ SkPoint start; SkScalar length; int count;
+ std::tie(start, length, count) = span;
+ SkPoint newStart =
+ SkPoint{X(start) * fXScale[0] + fXOffset[0], Y(start) * fYScale[0] + fYOffset[0]};
+ SkScalar newLength = length * fXScale[0];
+ next->pointSpan(Span{newStart, newLength, count});
+ return true;
+ }
+
+private:
+ const Sk4s fXOffset, fYOffset;
+ const Sk4s fXScale, fYScale;
+};
+
+class AffineMatrixStrategy {
+public:
+ AffineMatrixStrategy(SkVector offset, SkVector scale, SkVector skew)
+ : fXOffset{X(offset)}, fYOffset{Y(offset)}
+ , fXScale{X(scale)}, fYScale{Y(scale)}
+ , fXSkew{X(skew)}, fYSkew{Y(skew)} { }
+ void processPoints(Sk4s* xs, Sk4s* ys) {
+ Sk4s newXs = fXScale * *xs + fXSkew * *ys + fXOffset;
+ Sk4s newYs = fYSkew * *xs + fYScale * *ys + fYOffset;
+
+ *xs = newXs;
+ *ys = newYs;
+ }
+
+ template <typename Next>
+ bool maybeProcessSpan(Span span, Next* next) {
+ return false;
+ }
+
+private:
+ const Sk4s fXOffset, fYOffset;
+ const Sk4s fXScale, fYScale;
+ const Sk4s fXSkew, fYSkew;
+};
+
+class PerspectiveMatrixStrategy {
+public:
+ PerspectiveMatrixStrategy(SkVector offset, SkVector scale, SkVector skew,
+ SkVector zSkew, SkScalar zOffset)
+ : fXOffset{X(offset)}, fYOffset{Y(offset)}, fZOffset{zOffset}
+ , fXScale{X(scale)}, fYScale{Y(scale)}
+ , fXSkew{X(skew)}, fYSkew{Y(skew)}, fZXSkew{X(zSkew)}, fZYSkew{Y(zSkew)} { }
+ void processPoints(Sk4s* xs, Sk4s* ys) {
+ Sk4s newXs = fXScale * *xs + fXSkew * *ys + fXOffset;
+ Sk4s newYs = fYSkew * *xs + fYScale * *ys + fYOffset;
+ Sk4s newZs = fZXSkew * *xs + fZYSkew * *ys + fZOffset;
+
+ *xs = newXs / newZs;
+ *ys = newYs / newZs;
+ }
+
+ template <typename Next>
+ bool maybeProcessSpan(Span span, Next* next) {
+ return false;
+ }
+
+private:
+ const Sk4s fXOffset, fYOffset, fZOffset;
+ const Sk4s fXScale, fYScale;
+ const Sk4s fXSkew, fYSkew, fZXSkew, fZYSkew;
+};
+
+
+} // namespace
+
+#endif // SkLinearBitmapPipeline_matrix_DEFINED
diff --git a/gfx/skia/skia/src/core/SkLinearBitmapPipeline_sample.h b/gfx/skia/skia/src/core/SkLinearBitmapPipeline_sample.h
new file mode 100644
index 000000000..5f9948c64
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLinearBitmapPipeline_sample.h
@@ -0,0 +1,1036 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLinearBitmapPipeline_sampler_DEFINED
+#define SkLinearBitmapPipeline_sampler_DEFINED
+
+#include <tuple>
+
+#include "SkColor.h"
+#include "SkColorPriv.h"
+#include "SkFixed.h"
+#include "SkHalf.h"
+#include "SkLinearBitmapPipeline_core.h"
+#include "SkNx.h"
+#include "SkPM4fPriv.h"
+
+namespace {
+// Explaination of the math:
+// 1 - x x
+// +--------+--------+
+// | | |
+// 1 - y | px00 | px10 |
+// | | |
+// +--------+--------+
+// | | |
+// y | px01 | px11 |
+// | | |
+// +--------+--------+
+//
+//
+// Given a pixelxy each is multiplied by a different factor derived from the fractional part of x
+// and y:
+// * px00 -> (1 - x)(1 - y) = 1 - x - y + xy
+// * px10 -> x(1 - y) = x - xy
+// * px01 -> (1 - x)y = y - xy
+// * px11 -> xy
+// So x * y is calculated first and then used to calculate all the other factors.
+static Sk4s SK_VECTORCALL bilerp4(Sk4s xs, Sk4s ys, Sk4f px00, Sk4f px10,
+ Sk4f px01, Sk4f px11) {
+ // Calculate fractional xs and ys.
+ Sk4s fxs = xs - xs.floor();
+ Sk4s fys = ys - ys.floor();
+ Sk4s fxys{fxs * fys};
+ Sk4f sum = px11 * fxys;
+ sum = sum + px01 * (fys - fxys);
+ sum = sum + px10 * (fxs - fxys);
+ sum = sum + px00 * (Sk4f{1.0f} - fxs - fys + fxys);
+ return sum;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// PixelGetter is the lowest level interface to the source data. There is a PixelConverter for each
+// of the different SkColorTypes.
+template <SkColorType, SkGammaType> class PixelConverter;
+
+// Alpha handling:
+// The alpha from the paint (tintColor) is used in the blend part of the pipeline to modulate
+// the entire bitmap. So, the tint color is given an alpha of 1.0 so that the later alpha can
+// modulate this color later.
+template <>
+class PixelConverter<kAlpha_8_SkColorType, kLinear_SkGammaType> {
+public:
+ using Element = uint8_t;
+ PixelConverter(const SkPixmap& srcPixmap, SkColor tintColor)
+ : fTintColor{set_alpha(Sk4f_from_SkColor(tintColor), 1.0f)} { }
+
+ Sk4f toSk4f(const Element pixel) const {
+ return fTintColor * (pixel * (1.0f/255.0f));
+ }
+
+private:
+ const Sk4f fTintColor;
+};
+
+template <SkGammaType gammaType>
+static inline Sk4f pmcolor_to_rgba(SkPMColor pixel) {
+ return swizzle_rb_if_bgra(
+ (gammaType == kSRGB_SkGammaType) ? Sk4f_fromS32(pixel)
+ : Sk4f_fromL32(pixel));
+}
+
+template <SkGammaType gammaType>
+class PixelConverter<kRGB_565_SkColorType, gammaType> {
+public:
+ using Element = uint16_t;
+ PixelConverter(const SkPixmap& srcPixmap) { }
+
+ Sk4f toSk4f(Element pixel) const {
+ return pmcolor_to_rgba<gammaType>(SkPixel16ToPixel32(pixel));
+ }
+};
+
+template <SkGammaType gammaType>
+class PixelConverter<kARGB_4444_SkColorType, gammaType> {
+public:
+ using Element = uint16_t;
+ PixelConverter(const SkPixmap& srcPixmap) { }
+
+ Sk4f toSk4f(Element pixel) const {
+ return pmcolor_to_rgba<gammaType>(SkPixel4444ToPixel32(pixel));
+ }
+};
+
+template <SkGammaType gammaType>
+class PixelConverter<kRGBA_8888_SkColorType, gammaType> {
+public:
+ using Element = uint32_t;
+ PixelConverter(const SkPixmap& srcPixmap) { }
+
+ Sk4f toSk4f(Element pixel) const {
+ return gammaType == kSRGB_SkGammaType
+ ? Sk4f_fromS32(pixel)
+ : Sk4f_fromL32(pixel);
+ }
+};
+
+template <SkGammaType gammaType>
+class PixelConverter<kBGRA_8888_SkColorType, gammaType> {
+public:
+ using Element = uint32_t;
+ PixelConverter(const SkPixmap& srcPixmap) { }
+
+ Sk4f toSk4f(Element pixel) const {
+ return swizzle_rb(
+ gammaType == kSRGB_SkGammaType ? Sk4f_fromS32(pixel) : Sk4f_fromL32(pixel));
+ }
+};
+
+template <SkGammaType gammaType>
+class PixelConverter<kIndex_8_SkColorType, gammaType> {
+public:
+ using Element = uint8_t;
+ PixelConverter(const SkPixmap& srcPixmap)
+ : fColorTableSize(srcPixmap.ctable()->count()){
+ SkColorTable* skColorTable = srcPixmap.ctable();
+ SkASSERT(skColorTable != nullptr);
+
+ fColorTable = (Sk4f*)SkAlign16((intptr_t)fColorTableStorage.get());
+ for (int i = 0; i < fColorTableSize; i++) {
+ fColorTable[i] = pmcolor_to_rgba<gammaType>((*skColorTable)[i]);
+ }
+ }
+
+ PixelConverter(const PixelConverter& strategy)
+ : fColorTableSize{strategy.fColorTableSize}{
+ fColorTable = (Sk4f*)SkAlign16((intptr_t)fColorTableStorage.get());
+ for (int i = 0; i < fColorTableSize; i++) {
+ fColorTable[i] = strategy.fColorTable[i];
+ }
+ }
+
+ Sk4f toSk4f(Element index) const {
+ return fColorTable[index];
+ }
+
+private:
+ static const size_t kColorTableSize = sizeof(Sk4f[256]) + 12;
+ const int fColorTableSize;
+ SkAutoMalloc fColorTableStorage{kColorTableSize};
+ Sk4f* fColorTable;
+};
+
+template <SkGammaType gammaType>
+class PixelConverter<kGray_8_SkColorType, gammaType> {
+public:
+ using Element = uint8_t;
+ PixelConverter(const SkPixmap& srcPixmap) { }
+
+ Sk4f toSk4f(Element pixel) const {
+ float gray = (gammaType == kSRGB_SkGammaType)
+ ? sk_linear_from_srgb[pixel]
+ : pixel * (1/255.0f);
+ return {gray, gray, gray, 1.0f};
+ }
+};
+
+template <>
+class PixelConverter<kRGBA_F16_SkColorType, kLinear_SkGammaType> {
+public:
+ using Element = uint64_t;
+ PixelConverter(const SkPixmap& srcPixmap) { }
+
+ Sk4f toSk4f(const Element pixel) const {
+ return SkHalfToFloat_finite_ftz(pixel);
+ }
+};
+
+class PixelAccessorShim {
+public:
+ explicit PixelAccessorShim(SkLinearBitmapPipeline::PixelAccessorInterface* accessor)
+ : fPixelAccessor(accessor) { }
+
+ void SK_VECTORCALL getFewPixels(
+ int n, Sk4i xs, Sk4i ys, Sk4f* px0, Sk4f* px1, Sk4f* px2) const {
+ fPixelAccessor->getFewPixels(n, xs, ys, px0, px1, px2);
+ }
+
+ void SK_VECTORCALL get4Pixels(
+ Sk4i xs, Sk4i ys, Sk4f* px0, Sk4f* px1, Sk4f* px2, Sk4f* px3) const {
+ fPixelAccessor->get4Pixels(xs, ys, px0, px1, px2, px3);
+ }
+
+ void get4Pixels(
+ const void* src, int index, Sk4f* px0, Sk4f* px1, Sk4f* px2, Sk4f* px3) const {
+ fPixelAccessor->get4Pixels(src, index, px0, px1, px2, px3);
+ }
+
+ Sk4f getPixelFromRow(const void* row, int index) const {
+ return fPixelAccessor->getPixelFromRow(row, index);
+ }
+
+ Sk4f getPixelAt(int index) const {
+ return fPixelAccessor->getPixelAt(index);
+ }
+
+ const void* row(int y) const {
+ return fPixelAccessor->row(y);
+ }
+
+private:
+ SkLinearBitmapPipeline::PixelAccessorInterface* const fPixelAccessor;
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// PixelAccessor handles all the same plumbing for all the PixelGetters.
+template <SkColorType colorType, SkGammaType gammaType>
+class PixelAccessor final : public SkLinearBitmapPipeline::PixelAccessorInterface {
+ using Element = typename PixelConverter<colorType, gammaType>::Element;
+public:
+ template <typename... Args>
+ PixelAccessor(const SkPixmap& srcPixmap, Args&&... args)
+ : fSrc{static_cast<const Element*>(srcPixmap.addr())}
+ , fWidth{srcPixmap.rowBytesAsPixels()}
+ , fConverter{srcPixmap, std::move<Args>(args)...} { }
+
+ void SK_VECTORCALL getFewPixels (
+ int n, Sk4i xs, Sk4i ys, Sk4f* px0, Sk4f* px1, Sk4f* px2) const override {
+ Sk4i bufferLoc = ys * fWidth + xs;
+ switch (n) {
+ case 3:
+ *px2 = this->getPixelAt(bufferLoc[2]);
+ case 2:
+ *px1 = this->getPixelAt(bufferLoc[1]);
+ case 1:
+ *px0 = this->getPixelAt(bufferLoc[0]);
+ default:
+ break;
+ }
+ }
+
+ void SK_VECTORCALL get4Pixels(
+ Sk4i xs, Sk4i ys, Sk4f* px0, Sk4f* px1, Sk4f* px2, Sk4f* px3) const override {
+ Sk4i bufferLoc = ys * fWidth + xs;
+ *px0 = this->getPixelAt(bufferLoc[0]);
+ *px1 = this->getPixelAt(bufferLoc[1]);
+ *px2 = this->getPixelAt(bufferLoc[2]);
+ *px3 = this->getPixelAt(bufferLoc[3]);
+ }
+
+ void get4Pixels(
+ const void* src, int index, Sk4f* px0, Sk4f* px1, Sk4f* px2, Sk4f* px3) const override {
+ *px0 = this->getPixelFromRow(src, index + 0);
+ *px1 = this->getPixelFromRow(src, index + 1);
+ *px2 = this->getPixelFromRow(src, index + 2);
+ *px3 = this->getPixelFromRow(src, index + 3);
+ }
+
+ Sk4f getPixelFromRow(const void* row, int index) const override {
+ const Element* src = static_cast<const Element*>(row);
+ return fConverter.toSk4f(src[index]);
+ }
+
+ Sk4f getPixelAt(int index) const override {
+ return this->getPixelFromRow(fSrc, index);
+ }
+
+ const void* row(int y) const override { return fSrc + y * fWidth; }
+
+private:
+ const Element* const fSrc;
+ const int fWidth;
+ PixelConverter<colorType, gammaType> fConverter;
+};
+
+// We're moving through source space at a rate of 1 source pixel per 1 dst pixel.
+// We'll never re-use pixels, but we can at least load contiguous pixels.
+template <typename Next, typename Strategy>
+static void src_strategy_blend(Span span, Next* next, Strategy* strategy) {
+ SkPoint start;
+ SkScalar length;
+ int count;
+ std::tie(start, length, count) = span;
+ int ix = SkScalarFloorToInt(X(start));
+ const void* row = strategy->row((int)std::floor(Y(start)));
+ if (length > 0) {
+ while (count >= 4) {
+ Sk4f px0, px1, px2, px3;
+ strategy->get4Pixels(row, ix, &px0, &px1, &px2, &px3);
+ next->blend4Pixels(px0, px1, px2, px3);
+ ix += 4;
+ count -= 4;
+ }
+
+ while (count > 0) {
+ next->blendPixel(strategy->getPixelFromRow(row, ix));
+ ix += 1;
+ count -= 1;
+ }
+ } else {
+ while (count >= 4) {
+ Sk4f px0, px1, px2, px3;
+ strategy->get4Pixels(row, ix - 3, &px3, &px2, &px1, &px0);
+ next->blend4Pixels(px0, px1, px2, px3);
+ ix -= 4;
+ count -= 4;
+ }
+
+ while (count > 0) {
+ next->blendPixel(strategy->getPixelFromRow(row, ix));
+ ix -= 1;
+ count -= 1;
+ }
+ }
+}
+
+// -- NearestNeighborSampler -----------------------------------------------------------------------
+// NearestNeighborSampler - use nearest neighbor filtering to create runs of destination pixels.
+template<typename Accessor, typename Next>
+class NearestNeighborSampler : public SkLinearBitmapPipeline::SampleProcessorInterface {
+public:
+ template<typename... Args>
+ NearestNeighborSampler(SkLinearBitmapPipeline::BlendProcessorInterface* next, Args&& ... args)
+ : fNext{next}, fAccessor{std::forward<Args>(args)...} { }
+
+ NearestNeighborSampler(SkLinearBitmapPipeline::BlendProcessorInterface* next,
+ const NearestNeighborSampler& sampler)
+ : fNext{next}, fAccessor{sampler.fAccessor} { }
+
+ void SK_VECTORCALL pointListFew(int n, Sk4s xs, Sk4s ys) override {
+ SkASSERT(0 < n && n < 4);
+ Sk4f px0, px1, px2;
+ fAccessor.getFewPixels(n, SkNx_cast<int>(xs), SkNx_cast<int>(ys), &px0, &px1, &px2);
+ if (n >= 1) fNext->blendPixel(px0);
+ if (n >= 2) fNext->blendPixel(px1);
+ if (n >= 3) fNext->blendPixel(px2);
+ }
+
+ void SK_VECTORCALL pointList4(Sk4s xs, Sk4s ys) override {
+ Sk4f px0, px1, px2, px3;
+ fAccessor.get4Pixels(SkNx_cast<int>(xs), SkNx_cast<int>(ys), &px0, &px1, &px2, &px3);
+ fNext->blend4Pixels(px0, px1, px2, px3);
+ }
+
+ void pointSpan(Span span) override {
+ SkASSERT(!span.isEmpty());
+ SkPoint start;
+ SkScalar length;
+ int count;
+ std::tie(start, length, count) = span;
+ SkScalar absLength = SkScalarAbs(length);
+ if (absLength < (count - 1)) {
+ this->spanSlowRate(span);
+ } else if (absLength == (count - 1)) {
+ src_strategy_blend(span, fNext, &fAccessor);
+ } else {
+ this->spanFastRate(span);
+ }
+ }
+
+ void repeatSpan(Span span, int32_t repeatCount) override {
+ while (repeatCount > 0) {
+ this->pointSpan(span);
+ repeatCount--;
+ }
+ }
+
+private:
+ // When moving through source space more slowly than dst space (zoomed in),
+ // we'll be sampling from the same source pixel more than once.
+ void spanSlowRate(Span span) {
+ SkPoint start; SkScalar length; int count;
+ std::tie(start, length, count) = span;
+ SkScalar x = X(start);
+ SkFixed fx = SkScalarToFixed(x);
+ SkScalar dx = length / (count - 1);
+ SkFixed fdx = SkScalarToFixed(dx);
+
+ const void* row = fAccessor.row((int)std::floor(Y(start)));
+ Next* next = fNext;
+
+ int ix = SkFixedFloorToInt(fx);
+ int prevIX = ix;
+ Sk4f fpixel = fAccessor.getPixelFromRow(row, ix);
+
+ // When dx is less than one, each pixel is used more than once. Using the fixed point fx
+ // allows the code to quickly check that the same pixel is being used. The code uses this
+ // same pixel check to do the sRGB and normalization only once.
+ auto getNextPixel = [&]() {
+ if (ix != prevIX) {
+ fpixel = fAccessor.getPixelFromRow(row, ix);
+ prevIX = ix;
+ }
+ fx += fdx;
+ ix = SkFixedFloorToInt(fx);
+ return fpixel;
+ };
+
+ while (count >= 4) {
+ Sk4f px0 = getNextPixel();
+ Sk4f px1 = getNextPixel();
+ Sk4f px2 = getNextPixel();
+ Sk4f px3 = getNextPixel();
+ next->blend4Pixels(px0, px1, px2, px3);
+ count -= 4;
+ }
+ while (count > 0) {
+ next->blendPixel(getNextPixel());
+ count -= 1;
+ }
+ }
+
+ // We're moving through source space at a rate of 1 source pixel per 1 dst pixel.
+ // We'll never re-use pixels, but we can at least load contiguous pixels.
+ void spanUnitRate(Span span) {
+ src_strategy_blend(span, fNext, &fAccessor);
+ }
+
+ // We're moving through source space faster than dst (zoomed out),
+ // so we'll never reuse a source pixel or be able to do contiguous loads.
+ void spanFastRate(Span span) {
+ span_fallback(span, this);
+ }
+
+ Next* const fNext;
+ Accessor fAccessor;
+};
+
+// From an edgeType, the integer value of a pixel vs, and the integer value of the extreme edge
+// vMax, take the point which might be off the tile by one pixel and either wrap it or pin it to
+// generate the right pixel. The value vs is on the interval [-1, vMax + 1]. It produces a value
+// on the interval [0, vMax].
+// Note: vMax is not width or height, but width-1 or height-1 because it is the largest valid pixel.
+static inline int adjust_edge(SkShader::TileMode edgeType, int vs, int vMax) {
+ SkASSERT(-1 <= vs && vs <= vMax + 1);
+ switch (edgeType) {
+ case SkShader::kClamp_TileMode:
+ case SkShader::kMirror_TileMode:
+ vs = std::max(vs, 0);
+ vs = std::min(vs, vMax);
+ break;
+ case SkShader::kRepeat_TileMode:
+ vs = (vs <= vMax) ? vs : 0;
+ vs = (vs >= 0) ? vs : vMax;
+ break;
+ }
+ SkASSERT(0 <= vs && vs <= vMax);
+ return vs;
+}
+
+// From a sample point on the tile, return the top or left filter value.
+// The result r should be in the range (0, 1]. Since this represents the weight given to the top
+// left element, then if x == 0.5 the filter value should be 1.0.
+// The input sample point must be on the tile, therefore it must be >= 0.
+static SkScalar sample_to_filter(SkScalar x) {
+ SkASSERT(x >= 0.0f);
+ // The usual form of the top or left edge is x - .5, but since we are working on the unit
+ // square, then x + .5 works just as well. This also guarantees that v > 0.0 allowing the use
+ // of trunc.
+ SkScalar v = x + 0.5f;
+ // Produce the top or left offset a value on the range [0, 1).
+ SkScalar f = v - SkScalarTruncToScalar(v);
+ // Produce the filter value which is on the range (0, 1].
+ SkScalar r = 1.0f - f;
+ SkASSERT(0.0f < r && r <= 1.0f);
+ return r;
+}
+
+// -- BilerpSampler --------------------------------------------------------------------------------
+// BilerpSampler - use a bilerp filter to create runs of destination pixels.
+// Note: in the code below, there are two types of points
+// * sample points - these are the points passed in by pointList* and Spans.
+// * filter points - are created from a sample point to form the coordinates of the points
+// to use in the filter and to generate the filter values.
+template<typename Accessor, typename Next>
+class BilerpSampler : public SkLinearBitmapPipeline::SampleProcessorInterface {
+public:
+ template<typename... Args>
+ BilerpSampler(
+ SkLinearBitmapPipeline::BlendProcessorInterface* next,
+ SkISize dimensions,
+ SkShader::TileMode xTile, SkShader::TileMode yTile,
+ Args&& ... args
+ )
+ : fNext{next}
+ , fXEdgeType{xTile}
+ , fXMax{dimensions.width() - 1}
+ , fYEdgeType{yTile}
+ , fYMax{dimensions.height() - 1}
+ , fAccessor{std::forward<Args>(args)...} { }
+
+ BilerpSampler(SkLinearBitmapPipeline::BlendProcessorInterface* next,
+ const BilerpSampler& sampler)
+ : fNext{next}
+ , fXEdgeType{sampler.fXEdgeType}
+ , fXMax{sampler.fXMax}
+ , fYEdgeType{sampler.fYEdgeType}
+ , fYMax{sampler.fYMax}
+ , fAccessor{sampler.fAccessor} { }
+
+ void SK_VECTORCALL pointListFew(int n, Sk4s xs, Sk4s ys) override {
+ SkASSERT(0 < n && n < 4);
+ auto bilerpPixel = [&](int index) {
+ return this->bilerpSamplePoint(SkPoint{xs[index], ys[index]});
+ };
+
+ if (n >= 1) fNext->blendPixel(bilerpPixel(0));
+ if (n >= 2) fNext->blendPixel(bilerpPixel(1));
+ if (n >= 3) fNext->blendPixel(bilerpPixel(2));
+ }
+
+ void SK_VECTORCALL pointList4(Sk4s xs, Sk4s ys) override {
+ auto bilerpPixel = [&](int index) {
+ return this->bilerpSamplePoint(SkPoint{xs[index], ys[index]});
+ };
+ fNext->blend4Pixels(bilerpPixel(0), bilerpPixel(1), bilerpPixel(2), bilerpPixel(3));
+ }
+
+ void pointSpan(Span span) override {
+ SkASSERT(!span.isEmpty());
+ SkPoint start;
+ SkScalar length;
+ int count;
+ std::tie(start, length, count) = span;
+
+ // Nothing to do.
+ if (count == 0) {
+ return;
+ }
+
+ // Trivial case. No sample points are generated other than start.
+ if (count == 1) {
+ fNext->blendPixel(this->bilerpSamplePoint(start));
+ return;
+ }
+
+ // Note: the following code could be done in terms of dx = length / (count -1), but that
+ // would introduce a divide that is not needed for the most common dx == 1 cases.
+ SkScalar absLength = SkScalarAbs(length);
+ if (absLength == 0.0f) {
+ // |dx| == 0
+ // length is zero, so clamp an edge pixel.
+ this->spanZeroRate(span);
+ } else if (absLength < (count - 1)) {
+ // 0 < |dx| < 1.
+ this->spanSlowRate(span);
+ } else if (absLength == (count - 1)) {
+ // |dx| == 1.
+ if (sample_to_filter(span.startX()) == 1.0f
+ && sample_to_filter(span.startY()) == 1.0f) {
+ // All the pixels are aligned with the dest; go fast.
+ src_strategy_blend(span, fNext, &fAccessor);
+ } else {
+ // There is some sub-pixel offsets, so bilerp.
+ this->spanUnitRate(span);
+ }
+ } else if (absLength < 2.0f * (count - 1)) {
+ // 1 < |dx| < 2.
+ this->spanMediumRate(span);
+ } else {
+ // |dx| >= 2.
+ this->spanFastRate(span);
+ }
+ }
+
+ void repeatSpan(Span span, int32_t repeatCount) override {
+ while (repeatCount > 0) {
+ this->pointSpan(span);
+ repeatCount--;
+ }
+ }
+
+private:
+
+ // Convert a sample point to the points used by the filter.
+ void filterPoints(SkPoint sample, Sk4i* filterXs, Sk4i* filterYs) {
+ // May be less than zero. Be careful to use Floor.
+ int x0 = adjust_edge(fXEdgeType, SkScalarFloorToInt(X(sample) - 0.5), fXMax);
+ // Always greater than zero. Use the faster Trunc.
+ int x1 = adjust_edge(fXEdgeType, SkScalarTruncToInt(X(sample) + 0.5), fXMax);
+ int y0 = adjust_edge(fYEdgeType, SkScalarFloorToInt(Y(sample) - 0.5), fYMax);
+ int y1 = adjust_edge(fYEdgeType, SkScalarTruncToInt(Y(sample) + 0.5), fYMax);
+
+ *filterXs = Sk4i{x0, x1, x0, x1};
+ *filterYs = Sk4i{y0, y0, y1, y1};
+ }
+
+ // Given a sample point, generate a color by bilerping the four filter points.
+ Sk4f bilerpSamplePoint(SkPoint sample) {
+ Sk4i iXs, iYs;
+ filterPoints(sample, &iXs, &iYs);
+ Sk4f px00, px10, px01, px11;
+ fAccessor.get4Pixels(iXs, iYs, &px00, &px10, &px01, &px11);
+ return bilerp4(Sk4f{X(sample) - 0.5f}, Sk4f{Y(sample) - 0.5f}, px00, px10, px01, px11);
+ }
+
+ // Get two pixels at x from row0 and row1.
+ void get2PixelColumn(const void* row0, const void* row1, int x, Sk4f* px0, Sk4f* px1) {
+ *px0 = fAccessor.getPixelFromRow(row0, x);
+ *px1 = fAccessor.getPixelFromRow(row1, x);
+ }
+
+ // |dx| == 0. This code assumes that length is zero.
+ void spanZeroRate(Span span) {
+ SkPoint start; SkScalar length; int count;
+ std::tie(start, length, count) = span;
+ SkASSERT(length == 0.0f);
+
+ // Filter for the blending of the top and bottom pixels.
+ SkScalar filterY = sample_to_filter(Y(start));
+
+ // Generate the four filter points from the sample point start. Generate the row* values.
+ Sk4i iXs, iYs;
+ this->filterPoints(start, &iXs, &iYs);
+ const void* const row0 = fAccessor.row(iYs[0]);
+ const void* const row1 = fAccessor.row(iYs[2]);
+
+ // Get the two pixels that make up the clamping pixel.
+ Sk4f pxTop, pxBottom;
+ this->get2PixelColumn(row0, row1, SkScalarFloorToInt(X(start)), &pxTop, &pxBottom);
+ Sk4f pixel = pxTop * filterY + (1.0f - filterY) * pxBottom;
+
+ while (count >= 4) {
+ fNext->blend4Pixels(pixel, pixel, pixel, pixel);
+ count -= 4;
+ }
+ while (count > 0) {
+ fNext->blendPixel(pixel);
+ count -= 1;
+ }
+ }
+
+ // 0 < |dx| < 1. This code reuses the calculations from previous pixels to reduce
+ // computation. In particular, several destination pixels maybe generated from the same four
+ // source pixels.
+ // In the following code a "part" is a combination of two pixels from the same column of the
+ // filter.
+ void spanSlowRate(Span span) {
+ SkPoint start; SkScalar length; int count;
+ std::tie(start, length, count) = span;
+
+ // Calculate the distance between each sample point.
+ const SkScalar dx = length / (count - 1);
+ SkASSERT(-1.0f < dx && dx < 1.0f && dx != 0.0f);
+
+ // Generate the filter values for the top-left corner.
+ // Note: these values are in filter space; this has implications about how to adjust
+ // these values at each step. For example, as the sample point increases, the filter
+ // value decreases, this is because the filter and position are related by
+ // (1 - (X(sample) - .5)) % 1. The (1 - stuff) causes the filter to move in the opposite
+ // direction of the sample point which is increasing by dx.
+ SkScalar filterX = sample_to_filter(X(start));
+ SkScalar filterY = sample_to_filter(Y(start));
+
+ // Generate the four filter points from the sample point start. Generate the row* values.
+ Sk4i iXs, iYs;
+ this->filterPoints(start, &iXs, &iYs);
+ const void* const row0 = fAccessor.row(iYs[0]);
+ const void* const row1 = fAccessor.row(iYs[2]);
+
+ // Generate part of the filter value at xColumn.
+ auto partAtColumn = [&](int xColumn) {
+ int adjustedColumn = adjust_edge(fXEdgeType, xColumn, fXMax);
+ Sk4f pxTop, pxBottom;
+ this->get2PixelColumn(row0, row1, adjustedColumn, &pxTop, &pxBottom);
+ return pxTop * filterY + (1.0f - filterY) * pxBottom;
+ };
+
+ // The leftPart is made up of two pixels from the left column of the filter, right part
+ // is similar. The top and bottom pixels in the *Part are created as a linear blend of
+ // the top and bottom pixels using filterY. See the partAtColumn function above.
+ Sk4f leftPart = partAtColumn(iXs[0]);
+ Sk4f rightPart = partAtColumn(iXs[1]);
+
+ // Create a destination color by blending together a left and right part using filterX.
+ auto bilerp = [&](const Sk4f& leftPart, const Sk4f& rightPart) {
+ Sk4f pixel = leftPart * filterX + rightPart * (1.0f - filterX);
+ return check_pixel(pixel);
+ };
+
+ // Send the first pixel to the destination. This simplifies the loop structure so that no
+ // extra pixels are fetched for the last iteration of the loop.
+ fNext->blendPixel(bilerp(leftPart, rightPart));
+ count -= 1;
+
+ if (dx > 0.0f) {
+ // * positive direction - generate destination pixels by sliding the filter from left
+ // to right.
+ int rightPartCursor = iXs[1];
+
+ // Advance the filter from left to right. Remember that moving the top-left corner of
+ // the filter to the right actually makes the filter value smaller.
+ auto advanceFilter = [&]() {
+ filterX -= dx;
+ if (filterX <= 0.0f) {
+ filterX += 1.0f;
+ leftPart = rightPart;
+ rightPartCursor += 1;
+ rightPart = partAtColumn(rightPartCursor);
+ }
+ SkASSERT(0.0f < filterX && filterX <= 1.0f);
+
+ return bilerp(leftPart, rightPart);
+ };
+
+ while (count >= 4) {
+ Sk4f px0 = advanceFilter(),
+ px1 = advanceFilter(),
+ px2 = advanceFilter(),
+ px3 = advanceFilter();
+ fNext->blend4Pixels(px0, px1, px2, px3);
+ count -= 4;
+ }
+
+ while (count > 0) {
+ fNext->blendPixel(advanceFilter());
+ count -= 1;
+ }
+ } else {
+ // * negative direction - generate destination pixels by sliding the filter from
+ // right to left.
+ int leftPartCursor = iXs[0];
+
+ // Advance the filter from right to left. Remember that moving the top-left corner of
+ // the filter to the left actually makes the filter value larger.
+ auto advanceFilter = [&]() {
+ // Remember, dx < 0 therefore this adds |dx| to filterX.
+ filterX -= dx;
+ // At this point filterX may be > 1, and needs to be wrapped back on to the filter
+ // interval, and the next column in the filter is calculated.
+ if (filterX > 1.0f) {
+ filterX -= 1.0f;
+ rightPart = leftPart;
+ leftPartCursor -= 1;
+ leftPart = partAtColumn(leftPartCursor);
+ }
+ SkASSERT(0.0f < filterX && filterX <= 1.0f);
+
+ return bilerp(leftPart, rightPart);
+ };
+
+ while (count >= 4) {
+ Sk4f px0 = advanceFilter(),
+ px1 = advanceFilter(),
+ px2 = advanceFilter(),
+ px3 = advanceFilter();
+ fNext->blend4Pixels(px0, px1, px2, px3);
+ count -= 4;
+ }
+
+ while (count > 0) {
+ fNext->blendPixel(advanceFilter());
+ count -= 1;
+ }
+ }
+ }
+
+ // |dx| == 1. Moving through source space at a rate of 1 source pixel per 1 dst pixel.
+ // Every filter part is used for two destination pixels, and the code can bulk load four
+ // pixels at a time.
+ void spanUnitRate(Span span) {
+ SkPoint start; SkScalar length; int count;
+ std::tie(start, length, count) = span;
+ SkASSERT(SkScalarAbs(length) == (count - 1));
+
+ // Calculate the four filter points of start, and use the two different Y values to
+ // generate the row pointers.
+ Sk4i iXs, iYs;
+ filterPoints(start, &iXs, &iYs);
+ const void* row0 = fAccessor.row(iYs[0]);
+ const void* row1 = fAccessor.row(iYs[2]);
+
+ // Calculate the filter values for the top-left filter element.
+ const SkScalar filterX = sample_to_filter(X(start));
+ const SkScalar filterY = sample_to_filter(Y(start));
+
+ // Generate part of the filter value at xColumn.
+ auto partAtColumn = [&](int xColumn) {
+ int adjustedColumn = adjust_edge(fXEdgeType, xColumn, fXMax);
+ Sk4f pxTop, pxBottom;
+ this->get2PixelColumn(row0, row1, adjustedColumn, &pxTop, &pxBottom);
+ return pxTop * filterY + (1.0f - filterY) * pxBottom;
+ };
+
+ auto get4Parts = [&](int ix, Sk4f* part0, Sk4f* part1, Sk4f* part2, Sk4f* part3) {
+ // Check if the pixels needed are near the edges. If not go fast using bulk pixels,
+ // otherwise be careful.
+ if (0 <= ix && ix <= fXMax - 3) {
+ Sk4f px00, px10, px20, px30,
+ px01, px11, px21, px31;
+ fAccessor.get4Pixels(row0, ix, &px00, &px10, &px20, &px30);
+ fAccessor.get4Pixels(row1, ix, &px01, &px11, &px21, &px31);
+ *part0 = filterY * px00 + (1.0f - filterY) * px01;
+ *part1 = filterY * px10 + (1.0f - filterY) * px11;
+ *part2 = filterY * px20 + (1.0f - filterY) * px21;
+ *part3 = filterY * px30 + (1.0f - filterY) * px31;
+ } else {
+ *part0 = partAtColumn(ix + 0);
+ *part1 = partAtColumn(ix + 1);
+ *part2 = partAtColumn(ix + 2);
+ *part3 = partAtColumn(ix + 3);
+ }
+ };
+
+ auto bilerp = [&](const Sk4f& part0, const Sk4f& part1) {
+ return part0 * filterX + part1 * (1.0f - filterX);
+ };
+
+ if (length > 0) {
+ // * positive direction - generate destination pixels by sliding the filter from left
+ // to right.
+
+ // overlapPart is the filter part from the end of the previous four pixels used at
+ // the start of the next four pixels.
+ Sk4f overlapPart = partAtColumn(iXs[0]);
+ int rightColumnCursor = iXs[1];
+ while (count >= 4) {
+ Sk4f part0, part1, part2, part3;
+ get4Parts(rightColumnCursor, &part0, &part1, &part2, &part3);
+ Sk4f px0 = bilerp(overlapPart, part0);
+ Sk4f px1 = bilerp(part0, part1);
+ Sk4f px2 = bilerp(part1, part2);
+ Sk4f px3 = bilerp(part2, part3);
+ overlapPart = part3;
+ fNext->blend4Pixels(px0, px1, px2, px3);
+ rightColumnCursor += 4;
+ count -= 4;
+ }
+
+ while (count > 0) {
+ Sk4f rightPart = partAtColumn(rightColumnCursor);
+
+ fNext->blendPixel(bilerp(overlapPart, rightPart));
+ overlapPart = rightPart;
+ rightColumnCursor += 1;
+ count -= 1;
+ }
+ } else {
+ // * negative direction - generate destination pixels by sliding the filter from
+ // right to left.
+ Sk4f overlapPart = partAtColumn(iXs[1]);
+ int leftColumnCursor = iXs[0];
+
+ while (count >= 4) {
+ Sk4f part0, part1, part2, part3;
+ get4Parts(leftColumnCursor - 3, &part3, &part2, &part1, &part0);
+ Sk4f px0 = bilerp(part0, overlapPart);
+ Sk4f px1 = bilerp(part1, part0);
+ Sk4f px2 = bilerp(part2, part1);
+ Sk4f px3 = bilerp(part3, part2);
+ overlapPart = part3;
+ fNext->blend4Pixels(px0, px1, px2, px3);
+ leftColumnCursor -= 4;
+ count -= 4;
+ }
+
+ while (count > 0) {
+ Sk4f leftPart = partAtColumn(leftColumnCursor);
+
+ fNext->blendPixel(bilerp(leftPart, overlapPart));
+ overlapPart = leftPart;
+ leftColumnCursor -= 1;
+ count -= 1;
+ }
+ }
+ }
+
+ // 1 < |dx| < 2. Going through the source pixels at a faster rate than the dest pixels, but
+ // still slow enough to take advantage of previous calculations.
+ void spanMediumRate(Span span) {
+ SkPoint start; SkScalar length; int count;
+ std::tie(start, length, count) = span;
+
+ // Calculate the distance between each sample point.
+ const SkScalar dx = length / (count - 1);
+ SkASSERT((-2.0f < dx && dx < -1.0f) || (1.0f < dx && dx < 2.0f));
+
+ // Generate the filter values for the top-left corner.
+ // Note: these values are in filter space; this has implications about how to adjust
+ // these values at each step. For example, as the sample point increases, the filter
+ // value decreases, this is because the filter and position are related by
+ // (1 - (X(sample) - .5)) % 1. The (1 - stuff) causes the filter to move in the opposite
+ // direction of the sample point which is increasing by dx.
+ SkScalar filterX = sample_to_filter(X(start));
+ SkScalar filterY = sample_to_filter(Y(start));
+
+ // Generate the four filter points from the sample point start. Generate the row* values.
+ Sk4i iXs, iYs;
+ this->filterPoints(start, &iXs, &iYs);
+ const void* const row0 = fAccessor.row(iYs[0]);
+ const void* const row1 = fAccessor.row(iYs[2]);
+
+ // Generate part of the filter value at xColumn.
+ auto partAtColumn = [&](int xColumn) {
+ int adjustedColumn = adjust_edge(fXEdgeType, xColumn, fXMax);
+ Sk4f pxTop, pxBottom;
+ this->get2PixelColumn(row0, row1, adjustedColumn, &pxTop, &pxBottom);
+ return pxTop * filterY + (1.0f - filterY) * pxBottom;
+ };
+
+ // The leftPart is made up of two pixels from the left column of the filter, right part
+ // is similar. The top and bottom pixels in the *Part are created as a linear blend of
+ // the top and bottom pixels using filterY. See the nextPart function below.
+ Sk4f leftPart = partAtColumn(iXs[0]);
+ Sk4f rightPart = partAtColumn(iXs[1]);
+
+ // Create a destination color by blending together a left and right part using filterX.
+ auto bilerp = [&](const Sk4f& leftPart, const Sk4f& rightPart) {
+ Sk4f pixel = leftPart * filterX + rightPart * (1.0f - filterX);
+ return check_pixel(pixel);
+ };
+
+ // Send the first pixel to the destination. This simplifies the loop structure so that no
+ // extra pixels are fetched for the last iteration of the loop.
+ fNext->blendPixel(bilerp(leftPart, rightPart));
+ count -= 1;
+
+ if (dx > 0.0f) {
+ // * positive direction - generate destination pixels by sliding the filter from left
+ // to right.
+ int rightPartCursor = iXs[1];
+
+ // Advance the filter from left to right. Remember that moving the top-left corner of
+ // the filter to the right actually makes the filter value smaller.
+ auto advanceFilter = [&]() {
+ filterX -= dx;
+ // At this point filterX is less than zero, but might actually be less than -1.
+ if (filterX > -1.0f) {
+ filterX += 1.0f;
+ leftPart = rightPart;
+ rightPartCursor += 1;
+ rightPart = partAtColumn(rightPartCursor);
+ } else {
+ filterX += 2.0f;
+ rightPartCursor += 2;
+ leftPart = partAtColumn(rightPartCursor - 1);
+ rightPart = partAtColumn(rightPartCursor);
+ }
+ SkASSERT(0.0f < filterX && filterX <= 1.0f);
+
+ return bilerp(leftPart, rightPart);
+ };
+
+ while (count >= 4) {
+ Sk4f px0 = advanceFilter(),
+ px1 = advanceFilter(),
+ px2 = advanceFilter(),
+ px3 = advanceFilter();
+ fNext->blend4Pixels(px0, px1, px2, px3);
+ count -= 4;
+ }
+
+ while (count > 0) {
+ fNext->blendPixel(advanceFilter());
+ count -= 1;
+ }
+ } else {
+ // * negative direction - generate destination pixels by sliding the filter from
+ // right to left.
+ int leftPartCursor = iXs[0];
+
+ auto advanceFilter = [&]() {
+ // Remember, dx < 0 therefore this adds |dx| to filterX.
+ filterX -= dx;
+ // At this point, filterX is greater than one, but may actually be greater than two.
+ if (filterX < 2.0f) {
+ filterX -= 1.0f;
+ rightPart = leftPart;
+ leftPartCursor -= 1;
+ leftPart = partAtColumn(leftPartCursor);
+ } else {
+ filterX -= 2.0f;
+ leftPartCursor -= 2;
+ rightPart = partAtColumn(leftPartCursor - 1);
+ leftPart = partAtColumn(leftPartCursor);
+ }
+ SkASSERT(0.0f < filterX && filterX <= 1.0f);
+ return bilerp(leftPart, rightPart);
+ };
+
+ while (count >= 4) {
+ Sk4f px0 = advanceFilter(),
+ px1 = advanceFilter(),
+ px2 = advanceFilter(),
+ px3 = advanceFilter();
+ fNext->blend4Pixels(px0, px1, px2, px3);
+ count -= 4;
+ }
+
+ while (count > 0) {
+ fNext->blendPixel(advanceFilter());
+ count -= 1;
+ }
+ }
+ }
+
+ // We're moving through source space faster than dst (zoomed out),
+ // so we'll never reuse a source pixel or be able to do contiguous loads.
+ void spanFastRate(Span span) {
+ SkPoint start; SkScalar length; int count;
+ std::tie(start, length, count) = span;
+ SkScalar x = X(start);
+ SkScalar y = Y(start);
+
+ SkScalar dx = length / (count - 1);
+ while (count > 0) {
+ fNext->blendPixel(this->bilerpSamplePoint(SkPoint{x, y}));
+ x += dx;
+ count -= 1;
+ }
+ }
+
+ Next* const fNext;
+ const SkShader::TileMode fXEdgeType;
+ const int fXMax;
+ const SkShader::TileMode fYEdgeType;
+ const int fYMax;
+ Accessor fAccessor;
+};
+
+} // namespace
+
+#endif // SkLinearBitmapPipeline_sampler_DEFINED
diff --git a/gfx/skia/skia/src/core/SkLinearBitmapPipeline_tile.h b/gfx/skia/skia/src/core/SkLinearBitmapPipeline_tile.h
new file mode 100644
index 000000000..1e07c22cf
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLinearBitmapPipeline_tile.h
@@ -0,0 +1,423 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLinearBitmapPipeline_tile_DEFINED
+#define SkLinearBitmapPipeline_tile_DEFINED
+
+#include "SkLinearBitmapPipeline_core.h"
+#include "SkPM4f.h"
+#include <algorithm>
+#include <cmath>
+#include <limits>
+
+namespace {
+class XClampStrategy {
+public:
+ XClampStrategy(int32_t max)
+ : fXsMax{SkScalar(max - 0.5f)}
+ , fXMax{SkScalar(max)} { }
+
+ void tileXPoints(Sk4s* xs) {
+ *xs = Sk4s::Min(Sk4s::Max(*xs, 0.0f), fXsMax);
+ SkASSERT(0 <= (*xs)[0] && (*xs)[0] < fXMax);
+ SkASSERT(0 <= (*xs)[1] && (*xs)[1] < fXMax);
+ SkASSERT(0 <= (*xs)[2] && (*xs)[2] < fXMax);
+ SkASSERT(0 <= (*xs)[3] && (*xs)[3] < fXMax);
+ }
+
+ template<typename Next>
+ bool maybeProcessSpan(Span originalSpan, Next* next) {
+ SkASSERT(!originalSpan.isEmpty());
+ SkPoint start; SkScalar length; int count;
+ std::tie(start, length, count) = originalSpan;
+ SkScalar x = X(start);
+ SkScalar y = Y(start);
+ Span span{{x, y}, length, count};
+
+ if (span.completelyWithin(0.0f, fXMax)) {
+ next->pointSpan(span);
+ return true;
+ }
+ if (1 == count || 0.0f == length) {
+ return false;
+ }
+
+ SkScalar dx = length / (count - 1);
+
+ // A B C
+ // +-------+-------+-------++-------+-------+-------+ +-------+-------++------
+ // | *---*|---*---|*---*--||-*---*-|---*---|*---...| |--*---*|---*---||*---*....
+ // | | | || | | | ... | | ||
+ // | | | || | | | | | ||
+ // +-------+-------+-------++-------+-------+-------+ +-------+-------++------
+ // ^ ^
+ // | xMin xMax-1 | xMax
+ //
+ // *---*---*---... - track of samples. * = sample
+ //
+ // +-+ ||
+ // | | - pixels in source space. || - tile border.
+ // +-+ ||
+ //
+ // The length from A to B is the length in source space or 4 * dx or (count - 1) * dx
+ // where dx is the distance between samples. There are 5 destination pixels
+ // corresponding to 5 samples specified in the A, B span. The distance from A to the next
+ // span starting at C is 5 * dx, so count * dx.
+ // Remember, count is the number of pixels needed for the destination and the number of
+ // samples.
+ // Overall Strategy:
+ // * Under - for portions of the span < xMin, take the color at pixel {xMin, y} and use it
+ // to fill in the 5 pixel sampled from A to B.
+ // * Middle - for the portion of the span between xMin and xMax sample normally.
+ // * Over - for the portion of the span > xMax, take the color at pixel {xMax-1, y} and
+ // use it to fill in the rest of the destination pixels.
+ if (dx >= 0) {
+ Span leftClamped = span.breakAt(0.0f, dx);
+ if (!leftClamped.isEmpty()) {
+ leftClamped.clampToSinglePixel({0.0f, y});
+ next->pointSpan(leftClamped);
+ }
+ Span center = span.breakAt(fXMax, dx);
+ if (!center.isEmpty()) {
+ next->pointSpan(center);
+ }
+ if (!span.isEmpty()) {
+ span.clampToSinglePixel({fXMax - 1, y});
+ next->pointSpan(span);
+ }
+ } else {
+ Span rightClamped = span.breakAt(fXMax, dx);
+ if (!rightClamped.isEmpty()) {
+ rightClamped.clampToSinglePixel({fXMax - 1, y});
+ next->pointSpan(rightClamped);
+ }
+ Span center = span.breakAt(0.0f, dx);
+ if (!center.isEmpty()) {
+ next->pointSpan(center);
+ }
+ if (!span.isEmpty()) {
+ span.clampToSinglePixel({0.0f, y});
+ next->pointSpan(span);
+ }
+ }
+ return true;
+ }
+
+private:
+ const Sk4s fXsMax;
+ const SkScalar fXMax;
+};
+
+class YClampStrategy {
+public:
+ YClampStrategy(int32_t max)
+ : fYMax{SkScalar(max) - 0.5f}
+ , fYsMax{SkScalar(max) - 0.5f} { }
+
+ void tileYPoints(Sk4s* ys) {
+ *ys = Sk4s::Min(Sk4s::Max(*ys, 0.0f), fYsMax);
+ SkASSERT(0 <= (*ys)[0] && (*ys)[0] <= fYMax);
+ SkASSERT(0 <= (*ys)[1] && (*ys)[1] <= fYMax);
+ SkASSERT(0 <= (*ys)[2] && (*ys)[2] <= fYMax);
+ SkASSERT(0 <= (*ys)[3] && (*ys)[3] <= fYMax);
+ }
+
+ SkScalar tileY(SkScalar y) {
+ return std::min(std::max<SkScalar>(0.0f, y), fYMax);
+ }
+
+private:
+ const SkScalar fYMax;
+ const Sk4s fYsMax;
+};
+
+SkScalar tile_mod(SkScalar x, SkScalar base) {
+ return x - SkScalarFloorToScalar(x / base) * base;
+}
+
+class XRepeatStrategy {
+public:
+ XRepeatStrategy(int32_t max)
+ : fXMax{SkScalar(max)}
+ , fXsMax{SkScalar(max)}
+ , fXsCap{SkScalar(nextafterf(SkScalar(max), 0.0f))}
+ , fXsInvMax{1.0f / SkScalar(max)} { }
+
+ void tileXPoints(Sk4s* xs) {
+ Sk4s divX = *xs * fXsInvMax;
+ Sk4s modX = *xs - divX.floor() * fXsMax;
+ *xs = Sk4s::Min(fXsCap, modX);
+ SkASSERT(0 <= (*xs)[0] && (*xs)[0] < fXMax);
+ SkASSERT(0 <= (*xs)[1] && (*xs)[1] < fXMax);
+ SkASSERT(0 <= (*xs)[2] && (*xs)[2] < fXMax);
+ SkASSERT(0 <= (*xs)[3] && (*xs)[3] < fXMax);
+ }
+
+ template<typename Next>
+ bool maybeProcessSpan(Span originalSpan, Next* next) {
+ SkASSERT(!originalSpan.isEmpty());
+ SkPoint start; SkScalar length; int count;
+ std::tie(start, length, count) = originalSpan;
+ // Make x and y in range on the tile.
+ SkScalar x = tile_mod(X(start), fXMax);
+ SkScalar y = Y(start);
+ SkScalar dx = length / (count - 1);
+
+ // No need trying to go fast because the steps are larger than a tile or there is one point.
+ if (SkScalarAbs(dx) >= fXMax || count <= 1) {
+ return false;
+ }
+
+ // A B C D Z
+ // +-------+-------+-------++-------+-------+-------++ +-------+-------++------
+ // | | *---|*---*--||-*---*-|---*---|*---*--|| |--*---*| ||
+ // | | | || | | || ... | | ||
+ // | | | || | | || | | ||
+ // +-------+-------+-------++-------+-------+-------++ +-------+-------++------
+ // ^^ ^^ ^^
+ // xMax || xMin xMax || xMin xMax || xMin
+ //
+ // *---*---*---... - track of samples. * = sample
+ //
+ // +-+ ||
+ // | | - pixels in source space. || - tile border.
+ // +-+ ||
+ //
+ //
+ // The given span starts at A and continues on through several tiles to sample point Z.
+ // The idea is to break this into several spans one on each tile the entire span
+ // intersects. The A to B span only covers a partial tile and has a count of 3 and the
+ // distance from A to B is (count - 1) * dx or 2 * dx. The distance from A to the start of
+ // the next span is count * dx or 3 * dx. Span C to D covers an entire tile has a count
+ // of 5 and a length of 4 * dx. Remember, count is the number of pixels needed for the
+ // destination and the number of samples.
+ //
+ // Overall Strategy:
+ // While the span hangs over the edge of the tile, draw the span covering the tile then
+ // slide the span over to the next tile.
+
+ // The guard could have been count > 0, but then a bunch of math would be done in the
+ // common case.
+
+ Span span({x, y}, length, count);
+ if (dx > 0) {
+ while (!span.isEmpty() && span.endX() >= fXMax) {
+ Span toDraw = span.breakAt(fXMax, dx);
+ next->pointSpan(toDraw);
+ span.offset(-fXMax);
+ }
+ } else {
+ while (!span.isEmpty() && span.endX() < 0.0f) {
+ Span toDraw = span.breakAt(0.0f, dx);
+ next->pointSpan(toDraw);
+ span.offset(fXMax);
+ }
+ }
+
+ // All on a single tile.
+ if (!span.isEmpty()) {
+ next->pointSpan(span);
+ }
+
+ return true;
+ }
+
+private:
+ const SkScalar fXMax;
+ const Sk4s fXsMax;
+ const Sk4s fXsCap;
+ const Sk4s fXsInvMax;
+};
+
+// The XRepeatUnitScaleStrategy exploits the situation where dx = 1.0. The main advantage is that
+// the relationship between the sample points and the source pixels does not change from tile to
+// repeated tile. This allows the tiler to calculate the span once and re-use it for each
+// repeated tile. This is later exploited by some samplers to avoid converting pixels to linear
+// space allowing the use of memmove to place pixel in the destination.
+class XRepeatUnitScaleStrategy {
+public:
+ XRepeatUnitScaleStrategy(int32_t max)
+ : fXMax{SkScalar(max)}
+ , fXsMax{SkScalar(max)}
+ , fXsCap{SkScalar(nextafterf(SkScalar(max), 0.0f))}
+ , fXsInvMax{1.0f / SkScalar(max)} { }
+
+ void tileXPoints(Sk4s* xs) {
+ Sk4s divX = *xs * fXsInvMax;
+ Sk4s modX = *xs - divX.floor() * fXsMax;
+ *xs = Sk4s::Min(fXsCap, modX);
+ SkASSERT(0 <= (*xs)[0] && (*xs)[0] < fXMax);
+ SkASSERT(0 <= (*xs)[1] && (*xs)[1] < fXMax);
+ SkASSERT(0 <= (*xs)[2] && (*xs)[2] < fXMax);
+ SkASSERT(0 <= (*xs)[3] && (*xs)[3] < fXMax);
+ }
+
+ template<typename Next>
+ bool maybeProcessSpan(Span originalSpan, Next* next) {
+ SkASSERT(!originalSpan.isEmpty());
+ SkPoint start; SkScalar length; int count;
+ std::tie(start, length, count) = originalSpan;
+ // Make x and y in range on the tile.
+ SkScalar x = tile_mod(X(start), fXMax);
+ SkScalar y = Y(start);
+
+ // No need trying to go fast because the steps are larger than a tile or there is one point.
+ if (fXMax == 1 || count <= 1) {
+ return false;
+ }
+
+ // x should be on the tile.
+ SkASSERT(0.0f <= x && x < fXMax);
+ Span span({x, y}, length, count);
+
+ if (SkScalarFloorToScalar(x) != 0.0f) {
+ Span toDraw = span.breakAt(fXMax, 1.0f);
+ SkASSERT(0.0f <= toDraw.startX() && toDraw.endX() < fXMax);
+ next->pointSpan(toDraw);
+ span.offset(-fXMax);
+ }
+
+ // All of the span could have been on the first tile. If so, then no work to do.
+ if (span.isEmpty()) return true;
+
+ // At this point the span should be aligned to zero.
+ SkASSERT(SkScalarFloorToScalar(span.startX()) == 0.0f);
+
+ // Note: The span length has an unintuitive relation to the tile width. The tile width is
+ // a half open interval [tb, te), but the span is a closed interval [sb, se]. In order to
+ // compare the two, you need to convert the span to a half open interval. This is done by
+ // adding dx to se. So, the span becomes: [sb, se + dx). Hence the + 1.0f below.
+ SkScalar div = (span.length() + 1.0f) / fXMax;
+ int32_t repeatCount = SkScalarFloorToInt(div);
+ Span repeatableSpan{{0.0f, y}, fXMax - 1.0f, SkScalarFloorToInt(fXMax)};
+
+ // Repeat the center section.
+ SkASSERT(0.0f <= repeatableSpan.startX() && repeatableSpan.endX() < fXMax);
+ if (repeatCount > 0) {
+ next->repeatSpan(repeatableSpan, repeatCount);
+ }
+
+ // Calculate the advance past the center portion.
+ SkScalar advance = SkScalar(repeatCount) * fXMax;
+
+ // There may be some of the span left over.
+ span.breakAt(advance, 1.0f);
+
+ // All on a single tile.
+ if (!span.isEmpty()) {
+ span.offset(-advance);
+ SkASSERT(0.0f <= span.startX() && span.endX() < fXMax);
+ next->pointSpan(span);
+ }
+
+ return true;
+ }
+
+private:
+ const SkScalar fXMax;
+ const Sk4s fXsMax;
+ const Sk4s fXsCap;
+ const Sk4s fXsInvMax;
+};
+
+class YRepeatStrategy {
+public:
+ YRepeatStrategy(int32_t max)
+ : fYMax{SkScalar(max)}
+ , fYsMax{SkScalar(max)}
+ , fYsInvMax{1.0f / SkScalar(max)} { }
+
+ void tileYPoints(Sk4s* ys) {
+ Sk4s divY = *ys * fYsInvMax;
+ Sk4s modY = *ys - divY.floor() * fYsMax;
+ *ys = modY;
+ SkASSERT(0 <= (*ys)[0] && (*ys)[0] < fYMax);
+ SkASSERT(0 <= (*ys)[1] && (*ys)[1] < fYMax);
+ SkASSERT(0 <= (*ys)[2] && (*ys)[2] < fYMax);
+ SkASSERT(0 <= (*ys)[3] && (*ys)[3] < fYMax);
+ }
+
+ SkScalar tileY(SkScalar y) {
+ SkScalar answer = tile_mod(y, fYMax);
+ SkASSERT(0 <= answer && answer < fYMax);
+ return answer;
+ }
+
+private:
+ const SkScalar fYMax;
+ const Sk4s fYsMax;
+ const Sk4s fYsInvMax;
+};
+// max = 40
+// mq2[x_] := Abs[(x - 40) - Floor[(x - 40)/80] * 80 - 40]
+class XMirrorStrategy {
+public:
+ XMirrorStrategy(int32_t max)
+ : fXsMax{SkScalar(max)}
+ , fXsCap{SkScalar(nextafterf(SkScalar(max), 0.0f))}
+ , fXsDoubleInvMax{1.0f / (2.0f * SkScalar(max))} { }
+
+ void tileXPoints(Sk4s* xs) {
+ Sk4f bias = *xs - fXsMax;
+ Sk4f div = bias * fXsDoubleInvMax;
+ Sk4f mod = bias - div.floor() * 2.0f * fXsMax;
+ Sk4f unbias = mod - fXsMax;
+ *xs = Sk4f::Min(unbias.abs(), fXsCap);
+ SkASSERT(0 <= (*xs)[0] && (*xs)[0] < fXsMax[0]);
+ SkASSERT(0 <= (*xs)[1] && (*xs)[1] < fXsMax[0]);
+ SkASSERT(0 <= (*xs)[2] && (*xs)[2] < fXsMax[0]);
+ SkASSERT(0 <= (*xs)[3] && (*xs)[3] < fXsMax[0]);
+ }
+
+ template <typename Next>
+ bool maybeProcessSpan(Span originalSpan, Next* next) { return false; }
+
+private:
+ Sk4f fXsMax;
+ Sk4f fXsCap;
+ Sk4f fXsDoubleInvMax;
+};
+
+class YMirrorStrategy {
+public:
+ YMirrorStrategy(int32_t max)
+ : fYMax{SkScalar(max)}
+ , fYsMax{SkScalar(max)}
+ , fYsCap{nextafterf(SkScalar(max), 0.0f)}
+ , fYsDoubleInvMax{1.0f / (2.0f * SkScalar(max))} { }
+
+ void tileYPoints(Sk4s* ys) {
+ Sk4f bias = *ys - fYsMax;
+ Sk4f div = bias * fYsDoubleInvMax;
+ Sk4f mod = bias - div.floor() * 2.0f * fYsMax;
+ Sk4f unbias = mod - fYsMax;
+ *ys = Sk4f::Min(unbias.abs(), fYsCap);
+ SkASSERT(0 <= (*ys)[0] && (*ys)[0] < fYMax);
+ SkASSERT(0 <= (*ys)[1] && (*ys)[1] < fYMax);
+ SkASSERT(0 <= (*ys)[2] && (*ys)[2] < fYMax);
+ SkASSERT(0 <= (*ys)[3] && (*ys)[3] < fYMax);
+ }
+
+ SkScalar tileY(SkScalar y) {
+ SkScalar bias = y - fYMax;
+ SkScalar div = bias * fYsDoubleInvMax[0];
+ SkScalar mod = bias - SkScalarFloorToScalar(div) * 2.0f * fYMax;
+ SkScalar unbias = mod - fYMax;
+ SkScalar answer = SkMinScalar(SkScalarAbs(unbias), fYsCap[0]);
+ SkASSERT(0 <= answer && answer < fYMax);
+ return answer;
+ }
+
+private:
+ SkScalar fYMax;
+ Sk4f fYsMax;
+ Sk4f fYsCap;
+ Sk4f fYsDoubleInvMax;
+};
+
+} // namespace
+#endif // SkLinearBitmapPipeline_tile_DEFINED
diff --git a/gfx/skia/skia/src/core/SkLiteDL.cpp b/gfx/skia/skia/src/core/SkLiteDL.cpp
new file mode 100644
index 000000000..ea9180bb8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLiteDL.cpp
@@ -0,0 +1,815 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCanvas.h"
+#include "SkData.h"
+#include "SkDrawFilter.h"
+#include "SkImageFilter.h"
+#include "SkLiteDL.h"
+#include "SkMath.h"
+#include "SkPicture.h"
+#include "SkRSXform.h"
+#include "SkTextBlob.h"
+
+#ifndef SKLITEDL_PAGE
+ #define SKLITEDL_PAGE 4096
+#endif
+
+// A stand-in for an optional SkRect which was not set, e.g. bounds for a saveLayer().
+static const SkRect kUnset = { SK_ScalarInfinity, 0,0,0};
+static const SkRect* maybe_unset(const SkRect& r) {
+ return r.left() == SK_ScalarInfinity ? nullptr : &r;
+}
+
+// copy_v(dst, src,n, src,n, ...) copies an arbitrary number of typed srcs into dst.
+static void copy_v(void* dst) {}
+
+template <typename S, typename... Rest>
+static void copy_v(void* dst, const S* src, int n, Rest&&... rest) {
+ SkASSERTF(((uintptr_t)dst & (alignof(S)-1)) == 0,
+ "Expected %p to be aligned for at least %zu bytes.", dst, alignof(S));
+ sk_careful_memcpy(dst, src, n*sizeof(S));
+ copy_v(SkTAddOffset<void>(dst, n*sizeof(S)), std::forward<Rest>(rest)...);
+}
+
+// Helper for getting back at arrays which have been copy_v'd together after an Op.
+template <typename D, typename T>
+static D* pod(T* op, size_t offset = 0) {
+ return SkTAddOffset<D>(op+1, offset);
+}
+
+// Pre-cache lazy non-threadsafe fields on SkPath and/or SkMatrix.
+static void make_threadsafe(SkPath* path, SkMatrix* matrix) {
+ if (path) { path->updateBoundsCache(); }
+ if (matrix) { (void)matrix->getType(); }
+}
+
+namespace {
+#define TYPES(M) \
+ M(SetDrawFilter) M(Save) M(Restore) M(SaveLayer) \
+ M(Concat) M(SetMatrix) M(Translate) M(TranslateZ) \
+ M(ClipPath) M(ClipRect) M(ClipRRect) M(ClipRegion) \
+ M(DrawPaint) M(DrawPath) M(DrawRect) M(DrawRegion) M(DrawOval) M(DrawArc) \
+ M(DrawRRect) M(DrawDRRect) M(DrawAnnotation) M(DrawDrawable) M(DrawPicture) \
+ M(DrawShadowedPicture) \
+ M(DrawImage) M(DrawImageNine) M(DrawImageRect) M(DrawImageLattice) \
+ M(DrawText) M(DrawPosText) M(DrawPosTextH) \
+ M(DrawTextOnPath) M(DrawTextRSXform) M(DrawTextBlob) \
+ M(DrawPatch) M(DrawPoints) M(DrawVertices) M(DrawAtlas)
+
+#define M(T) T,
+ enum class Type : uint8_t { TYPES(M) };
+#undef M
+
+ struct Op {
+ void makeThreadsafe() {}
+
+ uint32_t type : 8;
+ uint32_t skip : 24;
+ };
+ static_assert(sizeof(Op) == 4, "");
+
+ struct SetDrawFilter final : Op {
+#ifdef SK_SUPPORT_LEGACY_DRAWFILTER
+ static const auto kType = Type::SetDrawFilter;
+ SetDrawFilter(SkDrawFilter* df) : drawFilter(sk_ref_sp(df)) {}
+ sk_sp<SkDrawFilter> drawFilter;
+#endif
+ void draw(SkCanvas* c, const SkMatrix&) {
+#ifdef SK_SUPPORT_LEGACY_DRAWFILTER
+ c->setDrawFilter(drawFilter.get());
+#endif
+ }
+ };
+
+ struct Save final : Op {
+ static const auto kType = Type::Save;
+ void draw(SkCanvas* c, const SkMatrix&) { c->save(); }
+ };
+ struct Restore final : Op {
+ static const auto kType = Type::Restore;
+ void draw(SkCanvas* c, const SkMatrix&) { c->restore(); }
+ };
+ struct SaveLayer final : Op {
+ static const auto kType = Type::SaveLayer;
+ SaveLayer(const SkRect* bounds, const SkPaint* paint,
+ const SkImageFilter* backdrop, SkCanvas::SaveLayerFlags flags) {
+ if (bounds) { this->bounds = *bounds; }
+ if (paint) { this->paint = *paint; }
+ this->backdrop = sk_ref_sp(backdrop);
+ this->flags = flags;
+ }
+ SkRect bounds = kUnset;
+ SkPaint paint;
+ sk_sp<const SkImageFilter> backdrop;
+ SkCanvas::SaveLayerFlags flags;
+ void draw(SkCanvas* c, const SkMatrix&) {
+ c->saveLayer({ maybe_unset(bounds), &paint, backdrop.get(), flags });
+ }
+ };
+
+ struct Concat final : Op {
+ static const auto kType = Type::Concat;
+ Concat(const SkMatrix& matrix) : matrix(matrix) {}
+ SkMatrix matrix;
+ void draw(SkCanvas* c, const SkMatrix&) { c->concat(matrix); }
+ void makeThreadsafe() { make_threadsafe(nullptr, &matrix); }
+ };
+ struct SetMatrix final : Op {
+ static const auto kType = Type::SetMatrix;
+ SetMatrix(const SkMatrix& matrix) : matrix(matrix) {}
+ SkMatrix matrix;
+ void draw(SkCanvas* c, const SkMatrix& original) {
+ c->setMatrix(SkMatrix::Concat(original, matrix));
+ }
+ void makeThreadsafe() { make_threadsafe(nullptr, &matrix); }
+ };
+ struct Translate final : Op {
+ static const auto kType = Type::Translate;
+ Translate(SkScalar dx, SkScalar dy) : dx(dx), dy(dy) {}
+ SkScalar dx,dy;
+ void draw(SkCanvas* c, const SkMatrix&) {
+ c->translate(dx, dy);
+ }
+ };
+ struct TranslateZ final : Op {
+ static const auto kType = Type::TranslateZ;
+ TranslateZ(SkScalar dz) : dz(dz) {}
+ SkScalar dz;
+ void draw(SkCanvas* c, const SkMatrix&) {
+ #ifdef SK_EXPERIMENTAL_SHADOWING
+ c->translateZ(dz);
+ #endif
+ }
+ };
+
+ struct ClipPath final : Op {
+ static const auto kType = Type::ClipPath;
+ ClipPath(const SkPath& path, SkCanvas::ClipOp op, bool aa) : path(path), op(op), aa(aa) {}
+ SkPath path;
+ SkCanvas::ClipOp op;
+ bool aa;
+ void draw(SkCanvas* c, const SkMatrix&) { c->clipPath(path, op, aa); }
+ void makeThreadsafe() { make_threadsafe(&path, nullptr); }
+ };
+ struct ClipRect final : Op {
+ static const auto kType = Type::ClipRect;
+ ClipRect(const SkRect& rect, SkCanvas::ClipOp op, bool aa) : rect(rect), op(op), aa(aa) {}
+ SkRect rect;
+ SkCanvas::ClipOp op;
+ bool aa;
+ void draw(SkCanvas* c, const SkMatrix&) { c->clipRect(rect, op, aa); }
+ };
+ struct ClipRRect final : Op {
+ static const auto kType = Type::ClipRRect;
+ ClipRRect(const SkRRect& rrect, SkCanvas::ClipOp op, bool aa) : rrect(rrect), op(op), aa(aa) {}
+ SkRRect rrect;
+ SkCanvas::ClipOp op;
+ bool aa;
+ void draw(SkCanvas* c, const SkMatrix&) { c->clipRRect(rrect, op, aa); }
+ };
+ struct ClipRegion final : Op {
+ static const auto kType = Type::ClipRegion;
+ ClipRegion(const SkRegion& region, SkCanvas::ClipOp op) : region(region), op(op) {}
+ SkRegion region;
+ SkCanvas::ClipOp op;
+ void draw(SkCanvas* c, const SkMatrix&) { c->clipRegion(region, op); }
+ };
+
+ struct DrawPaint final : Op {
+ static const auto kType = Type::DrawPaint;
+ DrawPaint(const SkPaint& paint) : paint(paint) {}
+ SkPaint paint;
+ void draw(SkCanvas* c, const SkMatrix&) { c->drawPaint(paint); }
+ };
+ struct DrawPath final : Op {
+ static const auto kType = Type::DrawPath;
+ DrawPath(const SkPath& path, const SkPaint& paint) : path(path), paint(paint) {}
+ SkPath path;
+ SkPaint paint;
+ void draw(SkCanvas* c, const SkMatrix&) { c->drawPath(path, paint); }
+ void makeThreadsafe() { make_threadsafe(&path, nullptr); }
+ };
+ struct DrawRect final : Op {
+ static const auto kType = Type::DrawRect;
+ DrawRect(const SkRect& rect, const SkPaint& paint) : rect(rect), paint(paint) {}
+ SkRect rect;
+ SkPaint paint;
+ void draw(SkCanvas* c, const SkMatrix&) { c->drawRect(rect, paint); }
+ };
+ struct DrawRegion final : Op {
+ static const auto kType = Type::DrawRegion;
+ DrawRegion(const SkRegion& region, const SkPaint& paint) : region(region), paint(paint) {}
+ SkRegion region;
+ SkPaint paint;
+ void draw(SkCanvas* c, const SkMatrix&) { c->drawRegion(region, paint); }
+ };
+ struct DrawOval final : Op {
+ static const auto kType = Type::DrawOval;
+ DrawOval(const SkRect& oval, const SkPaint& paint) : oval(oval), paint(paint) {}
+ SkRect oval;
+ SkPaint paint;
+ void draw(SkCanvas* c, const SkMatrix&) { c->drawOval(oval, paint); }
+ };
+ struct DrawArc final : Op {
+ static const auto kType = Type::DrawArc;
+ DrawArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle, bool useCenter,
+ const SkPaint& paint)
+ : oval(oval), startAngle(startAngle), sweepAngle(sweepAngle), useCenter(useCenter)
+ , paint(paint) {}
+ SkRect oval;
+ SkScalar startAngle;
+ SkScalar sweepAngle;
+ bool useCenter;
+ SkPaint paint;
+ void draw(SkCanvas* c, const SkMatrix&) { c->drawArc(oval, startAngle, sweepAngle,
+ useCenter, paint); }
+ };
+ struct DrawRRect final : Op {
+ static const auto kType = Type::DrawRRect;
+ DrawRRect(const SkRRect& rrect, const SkPaint& paint) : rrect(rrect), paint(paint) {}
+ SkRRect rrect;
+ SkPaint paint;
+ void draw(SkCanvas* c, const SkMatrix&) { c->drawRRect(rrect, paint); }
+ };
+ struct DrawDRRect final : Op {
+ static const auto kType = Type::DrawDRRect;
+ DrawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint& paint)
+ : outer(outer), inner(inner), paint(paint) {}
+ SkRRect outer, inner;
+ SkPaint paint;
+ void draw(SkCanvas* c, const SkMatrix&) { c->drawDRRect(outer, inner, paint); }
+ };
+
+ struct DrawAnnotation final : Op {
+ static const auto kType = Type::DrawAnnotation;
+ DrawAnnotation(const SkRect& rect, SkData* value) : rect(rect), value(sk_ref_sp(value)) {}
+ SkRect rect;
+ sk_sp<SkData> value;
+ void draw(SkCanvas* c, const SkMatrix&) {
+ c->drawAnnotation(rect, pod<char>(this), value.get());
+ }
+ };
+ struct DrawDrawable final : Op {
+ static const auto kType = Type::DrawDrawable;
+ DrawDrawable(SkDrawable* drawable, const SkMatrix* matrix) : drawable(sk_ref_sp(drawable)) {
+ if (matrix) { this->matrix = *matrix; }
+ }
+ sk_sp<SkDrawable> drawable;
+ sk_sp<const SkPicture> snapped;
+ SkMatrix matrix = SkMatrix::I();
+ void draw(SkCanvas* c, const SkMatrix&) {
+ snapped ? c->drawPicture(snapped.get(), &matrix, nullptr)
+ : c->drawDrawable(drawable.get(), &matrix);
+ }
+ void makeThreadsafe() {
+ snapped.reset(drawable->newPictureSnapshot());
+ make_threadsafe(nullptr, &matrix);
+ }
+ };
+ struct DrawPicture final : Op {
+ static const auto kType = Type::DrawPicture;
+ DrawPicture(const SkPicture* picture, const SkMatrix* matrix, const SkPaint* paint)
+ : picture(sk_ref_sp(picture)) {
+ if (matrix) { this->matrix = *matrix; }
+ if (paint) { this->paint = *paint; has_paint = true; }
+ }
+ sk_sp<const SkPicture> picture;
+ SkMatrix matrix = SkMatrix::I();
+ SkPaint paint;
+ bool has_paint = false; // TODO: why is a default paint not the same?
+ void draw(SkCanvas* c, const SkMatrix&) {
+ c->drawPicture(picture.get(), &matrix, has_paint ? &paint : nullptr);
+ }
+ void makeThreadsafe() { make_threadsafe(nullptr, &matrix); }
+ };
+ struct DrawShadowedPicture final : Op {
+ static const auto kType = Type::DrawShadowedPicture;
+ DrawShadowedPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint, const SkShadowParams& params)
+ : picture(sk_ref_sp(picture)) {
+ if (matrix) { this->matrix = *matrix; }
+ if (paint) { this->paint = *paint; }
+ this->params = params;
+ }
+ sk_sp<const SkPicture> picture;
+ SkMatrix matrix = SkMatrix::I();
+ SkPaint paint;
+ SkShadowParams params;
+ void draw(SkCanvas* c, const SkMatrix&) {
+ #ifdef SK_EXPERIMENTAL_SHADOWING
+ c->drawShadowedPicture(picture.get(), &matrix, &paint, params);
+ #endif
+ }
+ void makeThreadsafe() { make_threadsafe(nullptr, &matrix); }
+ };
+
+ struct DrawImage final : Op {
+ static const auto kType = Type::DrawImage;
+ DrawImage(sk_sp<const SkImage>&& image, SkScalar x, SkScalar y, const SkPaint* paint)
+ : image(std::move(image)), x(x), y(y) {
+ if (paint) { this->paint = *paint; }
+ }
+ sk_sp<const SkImage> image;
+ SkScalar x,y;
+ SkPaint paint;
+ void draw(SkCanvas* c, const SkMatrix&) { c->drawImage(image.get(), x,y, &paint); }
+ };
+ struct DrawImageNine final : Op {
+ static const auto kType = Type::DrawImageNine;
+ DrawImageNine(sk_sp<const SkImage>&& image,
+ const SkIRect& center, const SkRect& dst, const SkPaint* paint)
+ : image(std::move(image)), center(center), dst(dst) {
+ if (paint) { this->paint = *paint; }
+ }
+ sk_sp<const SkImage> image;
+ SkIRect center;
+ SkRect dst;
+ SkPaint paint;
+ void draw(SkCanvas* c, const SkMatrix&) {
+ c->drawImageNine(image.get(), center, dst, &paint);
+ }
+ };
+ struct DrawImageRect final : Op {
+ static const auto kType = Type::DrawImageRect;
+ DrawImageRect(sk_sp<const SkImage>&& image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SkCanvas::SrcRectConstraint constraint)
+ : image(std::move(image)), dst(dst), constraint(constraint) {
+ this->src = src ? *src : SkRect::MakeIWH(image->width(), image->height());
+ if (paint) { this->paint = *paint; }
+ }
+ sk_sp<const SkImage> image;
+ SkRect src, dst;
+ SkPaint paint;
+ SkCanvas::SrcRectConstraint constraint;
+ void draw(SkCanvas* c, const SkMatrix&) {
+ c->drawImageRect(image.get(), src, dst, &paint, constraint);
+ }
+ };
+ struct DrawImageLattice final : Op {
+ static const auto kType = Type::DrawImageLattice;
+ DrawImageLattice(sk_sp<const SkImage>&& image, int xs, int ys, int fs,
+ const SkIRect& src, const SkRect& dst, const SkPaint* paint)
+ : image(std::move(image)), xs(xs), ys(ys), fs(fs), src(src), dst(dst) {
+ if (paint) { this->paint = *paint; }
+ }
+ sk_sp<const SkImage> image;
+ int xs, ys, fs;
+ SkIRect src;
+ SkRect dst;
+ SkPaint paint;
+ void draw(SkCanvas* c, const SkMatrix&) {
+ auto xdivs = pod<int>(this, 0),
+ ydivs = pod<int>(this, xs*sizeof(int));
+ auto flags = (0 == fs) ? nullptr :
+ pod<SkCanvas::Lattice::Flags>(this, (xs+ys)*sizeof(int));
+ c->drawImageLattice(image.get(), {xdivs, ydivs, flags, xs, ys, &src}, dst, &paint);
+ }
+ };
+
+ struct DrawText final : Op {
+ static const auto kType = Type::DrawText;
+ DrawText(size_t bytes, SkScalar x, SkScalar y, const SkPaint& paint)
+ : bytes(bytes), x(x), y(y), paint(paint) {}
+ size_t bytes;
+ SkScalar x,y;
+ SkPaint paint;
+ void draw(SkCanvas* c, const SkMatrix&) {
+ c->drawText(pod<void>(this), bytes, x,y, paint);
+ }
+ };
+ struct DrawPosText final : Op {
+ static const auto kType = Type::DrawPosText;
+ DrawPosText(size_t bytes, const SkPaint& paint, int n)
+ : bytes(bytes), paint(paint), n(n) {}
+ size_t bytes;
+ SkPaint paint;
+ int n;
+ void draw(SkCanvas* c, const SkMatrix&) {
+ auto points = pod<SkPoint>(this);
+ auto text = pod<void>(this, n*sizeof(SkPoint));
+ c->drawPosText(text, bytes, points, paint);
+ }
+ };
+ struct DrawPosTextH final : Op {
+ static const auto kType = Type::DrawPosTextH;
+ DrawPosTextH(size_t bytes, SkScalar y, const SkPaint& paint, int n)
+ : bytes(bytes), y(y), paint(paint), n(n) {}
+ size_t bytes;
+ SkScalar y;
+ SkPaint paint;
+ int n;
+ void draw(SkCanvas* c, const SkMatrix&) {
+ auto xs = pod<SkScalar>(this);
+ auto text = pod<void>(this, n*sizeof(SkScalar));
+ c->drawPosTextH(text, bytes, xs, y, paint);
+ }
+ };
+ struct DrawTextOnPath final : Op {
+ static const auto kType = Type::DrawTextOnPath;
+ DrawTextOnPath(size_t bytes, const SkPath& path,
+ const SkMatrix* matrix, const SkPaint& paint)
+ : bytes(bytes), path(path), paint(paint) {
+ if (matrix) { this->matrix = *matrix; }
+ }
+ size_t bytes;
+ SkPath path;
+ SkMatrix matrix = SkMatrix::I();
+ SkPaint paint;
+ void draw(SkCanvas* c, const SkMatrix&) {
+ c->drawTextOnPath(pod<void>(this), bytes, path, &matrix, paint);
+ }
+ void makeThreadsafe() { make_threadsafe(&path, &matrix); }
+ };
+ struct DrawTextRSXform final : Op {
+ static const auto kType = Type::DrawTextRSXform;
+ DrawTextRSXform(size_t bytes, const SkRect* cull, const SkPaint& paint)
+ : bytes(bytes), paint(paint) {
+ if (cull) { this->cull = *cull; }
+ }
+ size_t bytes;
+ SkRect cull = kUnset;
+ SkPaint paint;
+ void draw(SkCanvas* c, const SkMatrix&) {
+ c->drawTextRSXform(pod<void>(this), bytes, pod<SkRSXform>(this, bytes),
+ maybe_unset(cull), paint);
+ }
+ };
+ struct DrawTextBlob final : Op {
+ static const auto kType = Type::DrawTextBlob;
+ DrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y, const SkPaint& paint)
+ : blob(sk_ref_sp(blob)), x(x), y(y), paint(paint) {}
+ sk_sp<const SkTextBlob> blob;
+ SkScalar x,y;
+ SkPaint paint;
+ void draw(SkCanvas* c, const SkMatrix&) {
+ c->drawTextBlob(blob.get(), x,y, paint);
+ }
+ };
+
+ struct DrawPatch final : Op {
+ static const auto kType = Type::DrawPatch;
+ DrawPatch(const SkPoint cubics[12], const SkColor colors[4], const SkPoint texs[4],
+ SkXfermode* xfermode, const SkPaint& paint)
+ : xfermode(sk_ref_sp(xfermode)), paint(paint) {
+ copy_v(this->cubics, cubics, 12);
+ if (colors) { copy_v(this->colors, colors, 4); has_colors = true; }
+ if (texs ) { copy_v(this->texs , texs , 4); has_texs = true; }
+ }
+ SkPoint cubics[12];
+ SkColor colors[4];
+ SkPoint texs[4];
+ sk_sp<SkXfermode> xfermode;
+ SkPaint paint;
+ bool has_colors = false;
+ bool has_texs = false;
+ void draw(SkCanvas* c, const SkMatrix&) {
+ c->drawPatch(cubics, has_colors ? colors : nullptr, has_texs ? texs : nullptr,
+ xfermode.get(), paint);
+ }
+ };
+ struct DrawPoints final : Op {
+ static const auto kType = Type::DrawPoints;
+ DrawPoints(SkCanvas::PointMode mode, size_t count, const SkPaint& paint)
+ : mode(mode), count(count), paint(paint) {}
+ SkCanvas::PointMode mode;
+ size_t count;
+ SkPaint paint;
+ void draw(SkCanvas* c, const SkMatrix&) {
+ c->drawPoints(mode, count, pod<SkPoint>(this), paint);
+ }
+ };
+ struct DrawVertices final : Op {
+ static const auto kType = Type::DrawVertices;
+ DrawVertices(SkCanvas::VertexMode mode, int count, SkXfermode* xfermode, int nindices,
+ const SkPaint& paint, bool has_texs, bool has_colors, bool has_indices)
+ : mode(mode), count(count), xfermode(sk_ref_sp(xfermode)), nindices(nindices)
+ , paint(paint), has_texs(has_texs), has_colors(has_colors), has_indices(has_indices) {}
+ SkCanvas::VertexMode mode;
+ int count;
+ sk_sp<SkXfermode> xfermode;
+ int nindices;
+ SkPaint paint;
+ bool has_texs;
+ bool has_colors;
+ bool has_indices;
+ void draw(SkCanvas* c, const SkMatrix&) {
+ SkPoint* vertices = pod<SkPoint>(this, 0);
+ size_t offset = count*sizeof(SkPoint);
+
+ SkPoint* texs = nullptr;
+ if (has_texs) {
+ texs = pod<SkPoint>(this, offset);
+ offset += count*sizeof(SkPoint);
+ }
+
+ SkColor* colors = nullptr;
+ if (has_colors) {
+ colors = pod<SkColor>(this, offset);
+ offset += count*sizeof(SkColor);
+ }
+
+ uint16_t* indices = nullptr;
+ if (has_indices) {
+ indices = pod<uint16_t>(this, offset);
+ }
+ c->drawVertices(mode, count, vertices, texs, colors, xfermode.get(),
+ indices, nindices, paint);
+ }
+ };
+ struct DrawAtlas final : Op {
+ static const auto kType = Type::DrawAtlas;
+ DrawAtlas(const SkImage* atlas, int count, SkXfermode::Mode xfermode,
+ const SkRect* cull, const SkPaint* paint, bool has_colors)
+ : atlas(sk_ref_sp(atlas)), count(count), xfermode(xfermode), has_colors(has_colors) {
+ if (cull) { this->cull = *cull; }
+ if (paint) { this->paint = *paint; }
+ }
+ sk_sp<const SkImage> atlas;
+ int count;
+ SkXfermode::Mode xfermode;
+ SkRect cull = kUnset;
+ SkPaint paint;
+ bool has_colors;
+ void draw(SkCanvas* c, const SkMatrix&) {
+ auto xforms = pod<SkRSXform>(this, 0);
+ auto texs = pod<SkRect>(this, count*sizeof(SkRSXform));
+ auto colors = has_colors
+ ? pod<SkColor>(this, count*(sizeof(SkRSXform) + sizeof(SkRect)))
+ : nullptr;
+ c->drawAtlas(atlas.get(), xforms, texs, colors, count, xfermode,
+ maybe_unset(cull), &paint);
+ }
+ };
+}
+
+template <typename T, typename... Args>
+void* SkLiteDL::push(size_t pod, Args&&... args) {
+ size_t skip = SkAlignPtr(sizeof(T) + pod);
+ SkASSERT(skip < (1<<24));
+ if (fUsed + skip > fReserved) {
+ static_assert(SkIsPow2(SKLITEDL_PAGE), "This math needs updating for non-pow2.");
+ // Next greater multiple of SKLITEDL_PAGE.
+ fReserved = (fUsed + skip + SKLITEDL_PAGE) & ~(SKLITEDL_PAGE-1);
+ fBytes.realloc(fReserved);
+ }
+ SkASSERT(fUsed + skip <= fReserved);
+ auto op = (T*)(fBytes.get() + fUsed);
+ fUsed += skip;
+ new (op) T{ std::forward<Args>(args)... };
+ op->type = (uint32_t)T::kType;
+ op->skip = skip;
+ return op+1;
+}
+
+template <typename Fn, typename... Args>
+inline void SkLiteDL::map(const Fn fns[], Args... args) {
+ auto end = fBytes.get() + fUsed;
+ for (uint8_t* ptr = fBytes.get(); ptr < end; ) {
+ auto op = (Op*)ptr;
+ auto type = op->type;
+ auto skip = op->skip;
+ if (auto fn = fns[type]) { // We replace no-op functions with nullptrs
+ fn(op, args...); // to avoid the overhead of a pointless call.
+ }
+ ptr += skip;
+ }
+}
+
+#ifdef SK_SUPPORT_LEGACY_DRAWFILTER
+void SkLiteDL::setDrawFilter(SkDrawFilter* df) {
+ this->push<SetDrawFilter>(0, df);
+}
+#endif
+
+void SkLiteDL:: save() { this->push <Save>(0); }
+void SkLiteDL::restore() { this->push<Restore>(0); }
+void SkLiteDL::saveLayer(const SkRect* bounds, const SkPaint* paint,
+ const SkImageFilter* backdrop, SkCanvas::SaveLayerFlags flags) {
+ this->push<SaveLayer>(0, bounds, paint, backdrop, flags);
+}
+
+void SkLiteDL:: concat(const SkMatrix& matrix) { this->push <Concat>(0, matrix); }
+void SkLiteDL::setMatrix(const SkMatrix& matrix) { this->push<SetMatrix>(0, matrix); }
+void SkLiteDL::translate(SkScalar dx, SkScalar dy) { this->push<Translate>(0, dx, dy); }
+void SkLiteDL::translateZ(SkScalar dz) { this->push<TranslateZ>(0, dz); }
+
+void SkLiteDL::clipPath(const SkPath& path, SkCanvas::ClipOp op, bool aa) {
+ this->push<ClipPath>(0, path, op, aa);
+}
+void SkLiteDL::clipRect(const SkRect& rect, SkCanvas::ClipOp op, bool aa) {
+ this->push<ClipRect>(0, rect, op, aa);
+}
+void SkLiteDL::clipRRect(const SkRRect& rrect, SkCanvas::ClipOp op, bool aa) {
+ this->push<ClipRRect>(0, rrect, op, aa);
+}
+void SkLiteDL::clipRegion(const SkRegion& region, SkCanvas::ClipOp op) {
+ this->push<ClipRegion>(0, region, op);
+}
+
+void SkLiteDL::drawPaint(const SkPaint& paint) {
+ this->push<DrawPaint>(0, paint);
+}
+void SkLiteDL::drawPath(const SkPath& path, const SkPaint& paint) {
+ this->push<DrawPath>(0, path, paint);
+}
+void SkLiteDL::drawRect(const SkRect& rect, const SkPaint& paint) {
+ this->push<DrawRect>(0, rect, paint);
+}
+void SkLiteDL::drawRegion(const SkRegion& region, const SkPaint& paint) {
+ this->push<DrawRegion>(0, region, paint);
+}
+void SkLiteDL::drawOval(const SkRect& oval, const SkPaint& paint) {
+ this->push<DrawOval>(0, oval, paint);
+}
+void SkLiteDL::drawArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle, bool useCenter,
+ const SkPaint& paint) {
+ this->push<DrawArc>(0, oval, startAngle, sweepAngle, useCenter, paint);
+}
+void SkLiteDL::drawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ this->push<DrawRRect>(0, rrect, paint);
+}
+void SkLiteDL::drawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint& paint) {
+ this->push<DrawDRRect>(0, outer, inner, paint);
+}
+
+void SkLiteDL::drawAnnotation(const SkRect& rect, const char* key, SkData* value) {
+ size_t bytes = strlen(key)+1;
+ void* pod = this->push<DrawAnnotation>(bytes, rect, value);
+ copy_v(pod, key,bytes);
+}
+void SkLiteDL::drawDrawable(SkDrawable* drawable, const SkMatrix* matrix) {
+ this->push<DrawDrawable>(0, drawable, matrix);
+}
+void SkLiteDL::drawPicture(const SkPicture* picture,
+ const SkMatrix* matrix, const SkPaint* paint) {
+ this->push<DrawPicture>(0, picture, matrix, paint);
+}
+void SkLiteDL::drawShadowedPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint, const SkShadowParams& params) {
+ push<DrawShadowedPicture>(0, picture, matrix, paint, params);
+}
+
+void SkLiteDL::drawImage(sk_sp<const SkImage> image, SkScalar x, SkScalar y, const SkPaint* paint) {
+ this->push<DrawImage>(0, std::move(image), x,y, paint);
+}
+void SkLiteDL::drawImageNine(sk_sp<const SkImage> image, const SkIRect& center,
+ const SkRect& dst, const SkPaint* paint) {
+ this->push<DrawImageNine>(0, std::move(image), center, dst, paint);
+}
+void SkLiteDL::drawImageRect(sk_sp<const SkImage> image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SkCanvas::SrcRectConstraint constraint) {
+ this->push<DrawImageRect>(0, std::move(image), src, dst, paint, constraint);
+}
+void SkLiteDL::drawImageLattice(sk_sp<const SkImage> image, const SkCanvas::Lattice& lattice,
+ const SkRect& dst, const SkPaint* paint) {
+ int xs = lattice.fXCount, ys = lattice.fYCount;
+ int fs = lattice.fFlags ? (xs + 1) * (ys + 1) : 0;
+ size_t bytes = (xs + ys) * sizeof(int) + fs * sizeof(SkCanvas::Lattice::Flags);
+ SkASSERT(lattice.fBounds);
+ void* pod = this->push<DrawImageLattice>(bytes, std::move(image), xs, ys, fs, *lattice.fBounds,
+ dst, paint);
+ copy_v(pod, lattice.fXDivs, xs,
+ lattice.fYDivs, ys,
+ lattice.fFlags, fs);
+}
+
+void SkLiteDL::drawText(const void* text, size_t bytes,
+ SkScalar x, SkScalar y, const SkPaint& paint) {
+ void* pod = this->push<DrawText>(bytes, bytes, x, y, paint);
+ copy_v(pod, (const char*)text,bytes);
+}
+void SkLiteDL::drawPosText(const void* text, size_t bytes,
+ const SkPoint pos[], const SkPaint& paint) {
+ int n = paint.countText(text, bytes);
+ void* pod = this->push<DrawPosText>(n*sizeof(SkPoint)+bytes, bytes, paint, n);
+ copy_v(pod, pos,n, (const char*)text,bytes);
+}
+void SkLiteDL::drawPosTextH(const void* text, size_t bytes,
+ const SkScalar xs[], SkScalar y, const SkPaint& paint) {
+ int n = paint.countText(text, bytes);
+ void* pod = this->push<DrawPosTextH>(n*sizeof(SkScalar)+bytes, bytes, y, paint, n);
+ copy_v(pod, xs,n, (const char*)text,bytes);
+}
+void SkLiteDL::drawTextOnPath(const void* text, size_t bytes,
+ const SkPath& path, const SkMatrix* matrix, const SkPaint& paint) {
+ void* pod = this->push<DrawTextOnPath>(bytes, bytes, path, matrix, paint);
+ copy_v(pod, (const char*)text,bytes);
+}
+void SkLiteDL::drawTextRSXform(const void* text, size_t bytes,
+ const SkRSXform xforms[], const SkRect* cull, const SkPaint& paint) {
+ int n = paint.countText(text, bytes);
+ void* pod = this->push<DrawTextRSXform>(bytes+n*sizeof(SkRSXform), bytes, cull, paint);
+ copy_v(pod, (const char*)text,bytes, xforms,n);
+}
+void SkLiteDL::drawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y, const SkPaint& paint) {
+ this->push<DrawTextBlob>(0, blob, x,y, paint);
+}
+
+void SkLiteDL::drawPatch(const SkPoint points[12], const SkColor colors[4], const SkPoint texs[4],
+ SkXfermode* xfermode, const SkPaint& paint) {
+ this->push<DrawPatch>(0, points, colors, texs, xfermode, paint);
+}
+void SkLiteDL::drawPoints(SkCanvas::PointMode mode, size_t count, const SkPoint points[],
+ const SkPaint& paint) {
+ void* pod = this->push<DrawPoints>(count*sizeof(SkPoint), mode, count, paint);
+ copy_v(pod, points,count);
+}
+void SkLiteDL::drawVertices(SkCanvas::VertexMode mode, int count, const SkPoint vertices[],
+ const SkPoint texs[], const SkColor colors[], SkXfermode* xfermode,
+ const uint16_t indices[], int nindices, const SkPaint& paint) {
+ size_t bytes = count * sizeof(SkPoint);
+ if (texs ) { bytes += count * sizeof(SkPoint); }
+ if (colors) { bytes += count * sizeof(SkColor); }
+ if (indices) { bytes += nindices * sizeof(uint16_t); }
+ void* pod = this->push<DrawVertices>(bytes, mode, count, xfermode, nindices, paint,
+ texs != nullptr, colors != nullptr, indices != nullptr);
+ copy_v(pod, vertices, count,
+ texs, texs ? count : 0,
+ colors, colors ? count : 0,
+ indices, indices ? nindices : 0);
+}
+void SkLiteDL::drawAtlas(const SkImage* atlas, const SkRSXform xforms[], const SkRect texs[],
+ const SkColor colors[], int count, SkXfermode::Mode xfermode,
+ const SkRect* cull, const SkPaint* paint) {
+ size_t bytes = count*(sizeof(SkRSXform) + sizeof(SkRect));
+ if (colors) {
+ bytes += count*sizeof(SkColor);
+ }
+ void* pod = this->push<DrawAtlas>(bytes,
+ atlas, count, xfermode, cull, paint, colors != nullptr);
+ copy_v(pod, xforms, count,
+ texs, count,
+ colors, colors ? count : 0);
+}
+
+typedef void(*draw_fn)(void*, SkCanvas*, const SkMatrix&);
+typedef void(*void_fn)(void*);
+
+// All ops implement draw().
+#define M(T) [](void* op, SkCanvas* c, const SkMatrix& original) { ((T*)op)->draw(c, original); },
+static const draw_fn draw_fns[] = { TYPES(M) };
+#undef M
+
+#define M(T) [](void* op) { ((T*)op)->makeThreadsafe(); },
+static const void_fn make_threadsafe_fns[] = { TYPES(M) };
+#undef M
+
+// Older libstdc++ has pre-standard std::has_trivial_destructor.
+#if defined(__GLIBCXX__) && (__GLIBCXX__ < 20130000)
+ template <typename T> using can_skip_destructor = std::has_trivial_destructor<T>;
+#else
+ template <typename T> using can_skip_destructor = std::is_trivially_destructible<T>;
+#endif
+
+// Most state ops (matrix, clip, save, restore) have a trivial destructor.
+#define M(T) !can_skip_destructor<T>::value ? [](void* op) { ((T*)op)->~T(); } : (void_fn)nullptr,
+static const void_fn dtor_fns[] = { TYPES(M) };
+#undef M
+
+void SkLiteDL::onDraw(SkCanvas* canvas) { this->map(draw_fns, canvas, canvas->getTotalMatrix()); }
+void SkLiteDL::makeThreadsafe() { this->map(make_threadsafe_fns); }
+
+SkRect SkLiteDL::onGetBounds() {
+ return fBounds;
+}
+
+SkLiteDL:: SkLiteDL(SkRect bounds) : fUsed(0), fReserved(0), fBounds(bounds) {}
+
+SkLiteDL::~SkLiteDL() {
+ this->reset(SkRect::MakeEmpty());
+}
+
+sk_sp<SkLiteDL> SkLiteDL::New(SkRect bounds) {
+ return sk_sp<SkLiteDL>(new SkLiteDL(bounds));
+}
+
+void SkLiteDL::reset(SkRect bounds) {
+ SkASSERT(this->unique());
+ this->map(dtor_fns);
+
+ // Leave fBytes and fReserved alone.
+ fUsed = 0;
+ fBounds = bounds;
+}
+
+void SkLiteDL::drawAsLayer(SkCanvas* canvas, const SkMatrix* matrix, const SkPaint* paint) {
+ auto fallback_plan = [&] {
+ SkRect bounds = this->getBounds();
+ canvas->saveLayer(&bounds, paint);
+ this->draw(canvas, matrix);
+ canvas->restore();
+ };
+
+ // TODO: single-draw specializations
+
+ return fallback_plan();
+}
+
+void SkLiteDL::setBounds(const SkRect& bounds) {
+ fBounds = bounds;
+}
diff --git a/gfx/skia/skia/src/core/SkLiteDL.h b/gfx/skia/skia/src/core/SkLiteDL.h
new file mode 100644
index 000000000..3e9eb5e29
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLiteDL.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLiteDL_DEFINED
+#define SkLiteDL_DEFINED
+
+#include "SkCanvas.h"
+#include "SkPaint.h"
+#include "SkPath.h"
+#include "SkDrawable.h"
+#include "SkRect.h"
+#include "SkTDArray.h"
+
+class SkLiteDL final : public SkDrawable {
+public:
+ static sk_sp<SkLiteDL> New(SkRect);
+ void reset(SkRect);
+
+ void makeThreadsafe();
+ bool empty() const { return fUsed == 0; }
+
+#ifdef SK_SUPPORT_LEGACY_DRAWFILTER
+ void setDrawFilter(SkDrawFilter*);
+#endif
+
+ // Draws as if...
+ // SkRect bounds = this->getBounds();
+ // canvas->saveLayer(&bounds, paint);
+ // this->draw(canvas, matrix);
+ // canvas->restore();
+ void drawAsLayer(SkCanvas*, const SkMatrix*, const SkPaint*);
+
+ void save();
+ void saveLayer(const SkRect*, const SkPaint*, const SkImageFilter*, SkCanvas::SaveLayerFlags);
+ void restore();
+
+ void concat (const SkMatrix&);
+ void setMatrix (const SkMatrix&);
+ void translate(SkScalar, SkScalar);
+ void translateZ(SkScalar);
+
+ void clipPath (const SkPath&, SkCanvas::ClipOp, bool aa);
+ void clipRect (const SkRect&, SkCanvas::ClipOp, bool aa);
+ void clipRRect (const SkRRect&, SkCanvas::ClipOp, bool aa);
+ void clipRegion(const SkRegion&, SkCanvas::ClipOp);
+
+ void drawPaint (const SkPaint&);
+ void drawPath (const SkPath&, const SkPaint&);
+ void drawRect (const SkRect&, const SkPaint&);
+ void drawRegion(const SkRegion&, const SkPaint&);
+ void drawOval (const SkRect&, const SkPaint&);
+ void drawArc (const SkRect&, SkScalar, SkScalar, bool, const SkPaint&);
+ void drawRRect (const SkRRect&, const SkPaint&);
+ void drawDRRect(const SkRRect&, const SkRRect&, const SkPaint&);
+
+ void drawAnnotation (const SkRect&, const char*, SkData*);
+ void drawDrawable (SkDrawable*, const SkMatrix*);
+ void drawPicture (const SkPicture*, const SkMatrix*, const SkPaint*);
+ void drawShadowedPicture(const SkPicture*, const SkMatrix*,
+ const SkPaint*, const SkShadowParams& params);
+
+ void drawText (const void*, size_t, SkScalar, SkScalar, const SkPaint&);
+ void drawPosText (const void*, size_t, const SkPoint[], const SkPaint&);
+ void drawPosTextH (const void*, size_t, const SkScalar[], SkScalar, const SkPaint&);
+ void drawTextOnPath (const void*, size_t, const SkPath&, const SkMatrix*, const SkPaint&);
+ void drawTextRSXform(const void*, size_t, const SkRSXform[], const SkRect*, const SkPaint&);
+ void drawTextBlob (const SkTextBlob*, SkScalar,SkScalar, const SkPaint&);
+
+ void drawImage (sk_sp<const SkImage>, SkScalar,SkScalar, const SkPaint*);
+ void drawImageNine(sk_sp<const SkImage>, const SkIRect&, const SkRect&, const SkPaint*);
+ void drawImageRect(sk_sp<const SkImage>, const SkRect*, const SkRect&, const SkPaint*,
+ SkCanvas::SrcRectConstraint);
+ void drawImageLattice(sk_sp<const SkImage>, const SkCanvas::Lattice&,
+ const SkRect&, const SkPaint*);
+
+ void drawPatch(const SkPoint[12], const SkColor[4], const SkPoint[4],
+ SkXfermode*, const SkPaint&);
+ void drawPoints(SkCanvas::PointMode, size_t, const SkPoint[], const SkPaint&);
+ void drawVertices(SkCanvas::VertexMode, int, const SkPoint[], const SkPoint[], const SkColor[],
+ SkXfermode*, const uint16_t[], int, const SkPaint&);
+ void drawAtlas(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[], int,
+ SkXfermode::Mode, const SkRect*, const SkPaint*);
+
+ void setBounds(const SkRect& bounds);
+
+private:
+ SkLiteDL(SkRect);
+ ~SkLiteDL();
+
+ SkRect onGetBounds() override;
+ void onDraw(SkCanvas*) override;
+
+ template <typename T, typename... Args>
+ void* push(size_t, Args&&...);
+
+ template <typename Fn, typename... Args>
+ void map(const Fn[], Args...);
+
+ SkAutoTMalloc<uint8_t> fBytes;
+ size_t fUsed;
+ size_t fReserved;
+ SkRect fBounds;
+};
+
+#endif//SkLiteDL_DEFINED
diff --git a/gfx/skia/skia/src/core/SkLiteRecorder.cpp b/gfx/skia/skia/src/core/SkLiteRecorder.cpp
new file mode 100644
index 000000000..49b1984c4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLiteRecorder.cpp
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkLiteDL.h"
+#include "SkLiteRecorder.h"
+#include "SkSurface.h"
+
+SkLiteRecorder::SkLiteRecorder()
+ : SkCanvas({0,0,1,1}, SkCanvas::kConservativeRasterClip_InitFlag)
+ , fDL(nullptr) {}
+
+void SkLiteRecorder::reset(SkLiteDL* dl) {
+ this->resetForNextPicture(dl->getBounds().roundOut());
+ fDL = dl;
+}
+
+sk_sp<SkSurface> SkLiteRecorder::onNewSurface(const SkImageInfo&, const SkSurfaceProps&) {
+ return nullptr;
+}
+
+#ifdef SK_SUPPORT_LEGACY_DRAWFILTER
+SkDrawFilter* SkLiteRecorder::setDrawFilter(SkDrawFilter* df) {
+ fDL->setDrawFilter(df);
+ return SkCanvas::setDrawFilter(df);
+}
+#endif
+
+void SkLiteRecorder::willSave() { fDL->save(); }
+SkCanvas::SaveLayerStrategy SkLiteRecorder::getSaveLayerStrategy(const SaveLayerRec& rec) {
+ fDL->saveLayer(rec.fBounds, rec.fPaint, rec.fBackdrop, rec.fSaveLayerFlags);
+ return SkCanvas::kNoLayer_SaveLayerStrategy;
+}
+void SkLiteRecorder::willRestore() { fDL->restore(); }
+
+void SkLiteRecorder::didConcat (const SkMatrix& matrix) { fDL-> concat(matrix); }
+void SkLiteRecorder::didSetMatrix(const SkMatrix& matrix) { fDL->setMatrix(matrix); }
+void SkLiteRecorder::didTranslate(SkScalar dx, SkScalar dy) { fDL->translate(dx, dy); }
+
+void SkLiteRecorder::onClipRect(const SkRect& rect, ClipOp op, ClipEdgeStyle style) {
+ fDL->clipRect(rect, op, style==kSoft_ClipEdgeStyle);
+ SkCanvas::onClipRect(rect, op, style);
+}
+void SkLiteRecorder::onClipRRect(const SkRRect& rrect, ClipOp op, ClipEdgeStyle style) {
+ fDL->clipRRect(rrect, op, style==kSoft_ClipEdgeStyle);
+ SkCanvas::onClipRRect(rrect, op, style);
+}
+void SkLiteRecorder::onClipPath(const SkPath& path, ClipOp op, ClipEdgeStyle style) {
+ fDL->clipPath(path, op, style==kSoft_ClipEdgeStyle);
+ SkCanvas::onClipPath(path, op, style);
+}
+void SkLiteRecorder::onClipRegion(const SkRegion& region, ClipOp op) {
+ fDL->clipRegion(region, op);
+ SkCanvas::onClipRegion(region, op);
+}
+
+void SkLiteRecorder::onDrawPaint(const SkPaint& paint) {
+ fDL->drawPaint(paint);
+}
+void SkLiteRecorder::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ fDL->drawPath(path, paint);
+}
+void SkLiteRecorder::onDrawRect(const SkRect& rect, const SkPaint& paint) {
+ fDL->drawRect(rect, paint);
+}
+void SkLiteRecorder::onDrawRegion(const SkRegion& region, const SkPaint& paint) {
+ fDL->drawRegion(region, paint);
+}
+void SkLiteRecorder::onDrawOval(const SkRect& oval, const SkPaint& paint) {
+ fDL->drawOval(oval, paint);
+}
+void SkLiteRecorder::onDrawArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) {
+ fDL->drawArc(oval, startAngle, sweepAngle, useCenter, paint);
+}
+void SkLiteRecorder::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ fDL->drawRRect(rrect, paint);
+}
+void SkLiteRecorder::onDrawDRRect(const SkRRect& out, const SkRRect& in, const SkPaint& paint) {
+ fDL->drawDRRect(out, in, paint);
+}
+
+void SkLiteRecorder::onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix) {
+ fDL->drawDrawable(drawable, matrix);
+}
+void SkLiteRecorder::onDrawPicture(const SkPicture* picture,
+ const SkMatrix* matrix,
+ const SkPaint* paint) {
+ fDL->drawPicture(picture, matrix, paint);
+}
+void SkLiteRecorder::onDrawAnnotation(const SkRect& rect, const char key[], SkData* val) {
+ fDL->drawAnnotation(rect, key, val);
+}
+
+void SkLiteRecorder::onDrawText(const void* text, size_t bytes,
+ SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ fDL->drawText(text, bytes, x, y, paint);
+}
+void SkLiteRecorder::onDrawPosText(const void* text, size_t bytes,
+ const SkPoint pos[],
+ const SkPaint& paint) {
+ fDL->drawPosText(text, bytes, pos, paint);
+}
+void SkLiteRecorder::onDrawPosTextH(const void* text, size_t bytes,
+ const SkScalar xs[], SkScalar y,
+ const SkPaint& paint) {
+ fDL->drawPosTextH(text, bytes, xs, y, paint);
+}
+void SkLiteRecorder::onDrawTextOnPath(const void* text, size_t bytes,
+ const SkPath& path, const SkMatrix* matrix,
+ const SkPaint& paint) {
+ fDL->drawTextOnPath(text, bytes, path, matrix, paint);
+}
+void SkLiteRecorder::onDrawTextRSXform(const void* text, size_t bytes,
+ const SkRSXform xform[], const SkRect* cull,
+ const SkPaint& paint) {
+ fDL->drawTextRSXform(text, bytes, xform, cull, paint);
+}
+void SkLiteRecorder::onDrawTextBlob(const SkTextBlob* blob,
+ SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ fDL->drawTextBlob(blob, x,y, paint);
+}
+
+void SkLiteRecorder::onDrawBitmap(const SkBitmap& bm,
+ SkScalar x, SkScalar y,
+ const SkPaint* paint) {
+ fDL->drawImage(SkImage::MakeFromBitmap(bm), x,y, paint);
+}
+void SkLiteRecorder::onDrawBitmapNine(const SkBitmap& bm,
+ const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint) {
+ fDL->drawImageNine(SkImage::MakeFromBitmap(bm), center, dst, paint);
+}
+void SkLiteRecorder::onDrawBitmapRect(const SkBitmap& bm,
+ const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ fDL->drawImageRect(SkImage::MakeFromBitmap(bm), src, dst, paint, constraint);
+}
+void SkLiteRecorder::onDrawBitmapLattice(const SkBitmap& bm,
+ const SkCanvas::Lattice& lattice, const SkRect& dst,
+ const SkPaint* paint) {
+ fDL->drawImageLattice(SkImage::MakeFromBitmap(bm), lattice, dst, paint);
+}
+
+void SkLiteRecorder::onDrawImage(const SkImage* img,
+ SkScalar x, SkScalar y,
+ const SkPaint* paint) {
+ fDL->drawImage(sk_ref_sp(img), x,y, paint);
+}
+void SkLiteRecorder::onDrawImageNine(const SkImage* img,
+ const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint) {
+ fDL->drawImageNine(sk_ref_sp(img), center, dst, paint);
+}
+void SkLiteRecorder::onDrawImageRect(const SkImage* img,
+ const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ fDL->drawImageRect(sk_ref_sp(img), src, dst, paint, constraint);
+}
+void SkLiteRecorder::onDrawImageLattice(const SkImage* img,
+ const SkCanvas::Lattice& lattice, const SkRect& dst,
+ const SkPaint* paint) {
+ fDL->drawImageLattice(sk_ref_sp(img), lattice, dst, paint);
+}
+
+
+void SkLiteRecorder::onDrawPatch(const SkPoint cubics[12],
+ const SkColor colors[4], const SkPoint texCoords[4],
+ SkXfermode* xfermode, const SkPaint& paint) {
+ fDL->drawPatch(cubics, colors, texCoords, xfermode, paint);
+}
+void SkLiteRecorder::onDrawPoints(SkCanvas::PointMode mode,
+ size_t count, const SkPoint pts[],
+ const SkPaint& paint) {
+ fDL->drawPoints(mode, count, pts, paint);
+}
+void SkLiteRecorder::onDrawVertices(SkCanvas::VertexMode mode,
+ int count, const SkPoint vertices[],
+ const SkPoint texs[], const SkColor colors[],
+ SkXfermode* xfermode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint) {
+ fDL->drawVertices(mode, count, vertices, texs, colors, xfermode, indices, indexCount, paint);
+}
+void SkLiteRecorder::onDrawAtlas(const SkImage* atlas,
+ const SkRSXform xforms[],
+ const SkRect texs[],
+ const SkColor colors[],
+ int count,
+ SkXfermode::Mode xfermode,
+ const SkRect* cull,
+ const SkPaint* paint) {
+ fDL->drawAtlas(atlas, xforms, texs, colors, count, xfermode, cull, paint);
+}
+
+void SkLiteRecorder::didTranslateZ(SkScalar dz) {
+ fDL->translateZ(dz);
+}
+void SkLiteRecorder::onDrawShadowedPicture(const SkPicture* picture,
+ const SkMatrix* matrix,
+ const SkPaint* paint,
+ const SkShadowParams& params) {
+ fDL->drawShadowedPicture(picture, matrix, paint, params);
+}
diff --git a/gfx/skia/skia/src/core/SkLiteRecorder.h b/gfx/skia/skia/src/core/SkLiteRecorder.h
new file mode 100644
index 000000000..be278549a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLiteRecorder.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLiteRecorder_DEFINED
+#define SkLiteRecorder_DEFINED
+
+#include "SkCanvas.h"
+
+class SkLiteDL;
+
+class SkLiteRecorder final : public SkCanvas {
+public:
+ SkLiteRecorder();
+ void reset(SkLiteDL*);
+
+ sk_sp<SkSurface> onNewSurface(const SkImageInfo&, const SkSurfaceProps&) override;
+
+#ifdef SK_SUPPORT_LEGACY_DRAWFILTER
+ SkDrawFilter* setDrawFilter(SkDrawFilter*) override;
+#endif
+
+ void willSave() override;
+ SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec&) override;
+ void willRestore() override;
+
+ void didConcat(const SkMatrix&) override;
+ void didSetMatrix(const SkMatrix&) override;
+ void didTranslate(SkScalar, SkScalar) override;
+
+ void onClipRect (const SkRect&, ClipOp, ClipEdgeStyle) override;
+ void onClipRRect (const SkRRect&, ClipOp, ClipEdgeStyle) override;
+ void onClipPath (const SkPath&, ClipOp, ClipEdgeStyle) override;
+ void onClipRegion(const SkRegion&, ClipOp) override;
+
+ void onDrawPaint (const SkPaint&) override;
+ void onDrawPath (const SkPath&, const SkPaint&) override;
+ void onDrawRect (const SkRect&, const SkPaint&) override;
+ void onDrawRegion(const SkRegion&, const SkPaint&) override;
+ void onDrawOval (const SkRect&, const SkPaint&) override;
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override;
+ void onDrawRRect (const SkRRect&, const SkPaint&) override;
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override;
+
+ void onDrawDrawable(SkDrawable*, const SkMatrix*) override;
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override;
+ void onDrawAnnotation(const SkRect&, const char[], SkData*) override;
+
+ void onDrawText (const void*, size_t, SkScalar x, SkScalar y, const SkPaint&) override;
+ void onDrawPosText (const void*, size_t, const SkPoint[], const SkPaint&) override;
+ void onDrawPosTextH (const void*, size_t, const SkScalar[], SkScalar, const SkPaint&) override;
+ void onDrawTextOnPath(const void*, size_t,
+ const SkPath&, const SkMatrix*, const SkPaint&) override;
+ void onDrawTextRSXform(const void*, size_t,
+ const SkRSXform[], const SkRect*, const SkPaint&) override;
+ void onDrawTextBlob(const SkTextBlob*, SkScalar, SkScalar, const SkPaint&) override;
+
+ void onDrawBitmap(const SkBitmap&, SkScalar, SkScalar, const SkPaint*) override;
+ void onDrawBitmapLattice(const SkBitmap&, const Lattice&, const SkRect&,
+ const SkPaint*) override;
+ void onDrawBitmapNine(const SkBitmap&, const SkIRect&, const SkRect&, const SkPaint*) override;
+ void onDrawBitmapRect(const SkBitmap&, const SkRect*, const SkRect&, const SkPaint*,
+ SrcRectConstraint) override;
+
+ void onDrawImage(const SkImage*, SkScalar, SkScalar, const SkPaint*) override;
+ void onDrawImageLattice(const SkImage*, const Lattice&, const SkRect&, const SkPaint*) override;
+ void onDrawImageNine(const SkImage*, const SkIRect&, const SkRect&, const SkPaint*) override;
+ void onDrawImageRect(const SkImage*, const SkRect*, const SkRect&, const SkPaint*,
+ SrcRectConstraint) override;
+
+ void onDrawPatch(const SkPoint[12], const SkColor[4],
+ const SkPoint[4], SkXfermode*, const SkPaint&) override;
+ void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&) override;
+ void onDrawVertices(VertexMode, int, const SkPoint[], const SkPoint[], const SkColor[],
+ SkXfermode*, const uint16_t[], int, const SkPaint&) override;
+ void onDrawAtlas(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[],
+ int, SkXfermode::Mode, const SkRect*, const SkPaint*) override;
+
+#ifdef SK_EXPERIMENTAL_SHADOWING
+ void didTranslateZ(SkScalar) override;
+ void onDrawShadowedPicture(const SkPicture*, const SkMatrix*,
+ const SkPaint*, const SkShadowParams& params) override;
+#else
+ void didTranslateZ(SkScalar);
+ void onDrawShadowedPicture(const SkPicture*, const SkMatrix*,
+ const SkPaint*, const SkShadowParams& params);
+#endif
+
+private:
+ SkLiteDL* fDL;
+};
+
+#endif//SkLiteRecorder_DEFINED
diff --git a/gfx/skia/skia/src/core/SkLocalMatrixImageFilter.cpp b/gfx/skia/skia/src/core/SkLocalMatrixImageFilter.cpp
new file mode 100644
index 000000000..864b24b0f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLocalMatrixImageFilter.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkLocalMatrixImageFilter.h"
+#include "SkReadBuffer.h"
+#include "SkSpecialImage.h"
+#include "SkString.h"
+
+sk_sp<SkImageFilter> SkLocalMatrixImageFilter::Make(const SkMatrix& localM,
+ sk_sp<SkImageFilter> input) {
+ if (!input) {
+ return nullptr;
+ }
+ if (localM.getType() & (SkMatrix::kAffine_Mask | SkMatrix::kPerspective_Mask)) {
+ return nullptr;
+ }
+ if (localM.isIdentity()) {
+ return input;
+ }
+ return sk_sp<SkImageFilter>(new SkLocalMatrixImageFilter(localM, input));
+}
+
+SkLocalMatrixImageFilter::SkLocalMatrixImageFilter(const SkMatrix& localM,
+ sk_sp<SkImageFilter> input)
+ : INHERITED(&input, 1, nullptr)
+ , fLocalM(localM) {
+}
+
+sk_sp<SkFlattenable> SkLocalMatrixImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkMatrix lm;
+ buffer.readMatrix(&lm);
+ return SkLocalMatrixImageFilter::Make(lm, common.getInput(0));
+}
+
+void SkLocalMatrixImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeMatrix(fLocalM);
+}
+
+sk_sp<SkSpecialImage> SkLocalMatrixImageFilter::onFilterImage(SkSpecialImage* source,
+ const Context& ctx,
+ SkIPoint* offset) const {
+ Context localCtx(SkMatrix::Concat(ctx.ctm(), fLocalM), ctx.clipBounds(), ctx.cache(),
+ ctx.outputProperties());
+ return this->filterInput(0, source, localCtx, offset);
+}
+
+SkIRect SkLocalMatrixImageFilter::onFilterBounds(const SkIRect& src, const SkMatrix& matrix,
+ MapDirection direction) const {
+ return this->getInput(0)->filterBounds(src, SkMatrix::Concat(matrix, fLocalM), direction);
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkLocalMatrixImageFilter::toString(SkString* str) const {
+ str->append("SkLocalMatrixImageFilter: (");
+ str->append(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkLocalMatrixImageFilter.h b/gfx/skia/skia/src/core/SkLocalMatrixImageFilter.h
new file mode 100644
index 000000000..3ec50384b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLocalMatrixImageFilter.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLocalMatrixImageFilter_DEFINED
+#define SkLocalMatrixImageFilter_DEFINED
+
+#include "SkImageFilter.h"
+
+/**
+ * Wraps another imagefilter + matrix, such that using this filter will give the same result
+ * as using the wrapped filter with the matrix applied to its context.
+ */
+class SkLocalMatrixImageFilter : public SkImageFilter {
+public:
+ static sk_sp<SkImageFilter> Make(const SkMatrix& localM, sk_sp<SkImageFilter> input);
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkLocalMatrixImageFilter)
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* Create(const SkMatrix& localM, SkImageFilter* input) {
+ return Make(localM, sk_sp<SkImageFilter>(SkSafeRef(input))).release();
+ }
+#endif
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* source, const Context&,
+ SkIPoint* offset) const override;
+ SkIRect onFilterBounds(const SkIRect& src, const SkMatrix&, MapDirection) const override;
+
+private:
+ SkLocalMatrixImageFilter(const SkMatrix& localM, sk_sp<SkImageFilter> input);
+
+ SkMatrix fLocalM;
+
+ typedef SkImageFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkLocalMatrixShader.cpp b/gfx/skia/skia/src/core/SkLocalMatrixShader.cpp
new file mode 100644
index 000000000..cdd753327
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLocalMatrixShader.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkLocalMatrixShader.h"
+
+#if SK_SUPPORT_GPU
+#include "GrFragmentProcessor.h"
+#endif
+
+#if SK_SUPPORT_GPU
+sk_sp<GrFragmentProcessor> SkLocalMatrixShader::asFragmentProcessor(const AsFPArgs& args) const {
+ SkMatrix tmp = this->getLocalMatrix();
+ if (args.fLocalMatrix) {
+ tmp.preConcat(*args.fLocalMatrix);
+ }
+ return fProxyShader->asFragmentProcessor(AsFPArgs(
+ args.fContext, args.fViewMatrix, &tmp, args.fFilterQuality, args.fDstColorSpace,
+ args.fGammaTreatment));
+}
+#endif
+
+sk_sp<SkFlattenable> SkLocalMatrixShader::CreateProc(SkReadBuffer& buffer) {
+ SkMatrix lm;
+ buffer.readMatrix(&lm);
+ auto baseShader(buffer.readShader());
+ if (!baseShader) {
+ return nullptr;
+ }
+ return baseShader->makeWithLocalMatrix(lm);
+}
+
+void SkLocalMatrixShader::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeMatrix(this->getLocalMatrix());
+ buffer.writeFlattenable(fProxyShader.get());
+}
+
+SkShader::Context* SkLocalMatrixShader::onCreateContext(const ContextRec& rec,
+ void* storage) const {
+ ContextRec newRec(rec);
+ SkMatrix tmp;
+ if (rec.fLocalMatrix) {
+ tmp.setConcat(*rec.fLocalMatrix, this->getLocalMatrix());
+ newRec.fLocalMatrix = &tmp;
+ } else {
+ newRec.fLocalMatrix = &this->getLocalMatrix();
+ }
+ return fProxyShader->createContext(newRec, storage);
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkLocalMatrixShader::toString(SkString* str) const {
+ str->append("SkLocalMatrixShader: (");
+
+ fProxyShader->toString(str);
+
+ this->INHERITED::toString(str);
+
+ str->append(")");
+}
+#endif
+
+sk_sp<SkShader> SkShader::makeWithLocalMatrix(const SkMatrix& localMatrix) const {
+ if (localMatrix.isIdentity()) {
+ return sk_ref_sp(const_cast<SkShader*>(this));
+ }
+
+ const SkMatrix* lm = &localMatrix;
+
+ SkShader* baseShader = const_cast<SkShader*>(this);
+ SkMatrix otherLocalMatrix;
+ SkAutoTUnref<SkShader> proxy(this->refAsALocalMatrixShader(&otherLocalMatrix));
+ if (proxy) {
+ otherLocalMatrix.preConcat(localMatrix);
+ lm = &otherLocalMatrix;
+ baseShader = proxy.get();
+ }
+
+ return sk_make_sp<SkLocalMatrixShader>(baseShader, *lm);
+}
diff --git a/gfx/skia/skia/src/core/SkLocalMatrixShader.h b/gfx/skia/skia/src/core/SkLocalMatrixShader.h
new file mode 100644
index 000000000..849d9af91
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLocalMatrixShader.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLocalMatrixShader_DEFINED
+#define SkLocalMatrixShader_DEFINED
+
+#include "SkShader.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+
+class GrFragmentProcessor;
+
+class SkLocalMatrixShader : public SkShader {
+public:
+ SkLocalMatrixShader(SkShader* proxy, const SkMatrix& localMatrix)
+ : INHERITED(&localMatrix)
+ , fProxyShader(SkRef(proxy))
+ {}
+
+ GradientType asAGradient(GradientInfo* info) const override {
+ return fProxyShader->asAGradient(info);
+ }
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(const AsFPArgs&) const override;
+#endif
+
+ SkShader* refAsALocalMatrixShader(SkMatrix* localMatrix) const override {
+ if (localMatrix) {
+ *localMatrix = this->getLocalMatrix();
+ }
+ return SkRef(fProxyShader.get());
+ }
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkLocalMatrixShader)
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ Context* onCreateContext(const ContextRec&, void*) const override;
+
+ size_t onContextSize(const ContextRec& rec) const override {
+ return fProxyShader->contextSize(rec);
+ }
+
+ SkImage* onIsAImage(SkMatrix* matrix, TileMode* mode) const override {
+ return fProxyShader->isAImage(matrix, mode);
+ }
+
+#ifdef SK_SUPPORT_LEGACY_SHADER_ISABITMAP
+ bool onIsABitmap(SkBitmap* bitmap, SkMatrix* matrix, TileMode* mode) const override {
+ return fProxyShader->isABitmap(bitmap, matrix, mode);
+ }
+#endif
+
+private:
+ SkAutoTUnref<SkShader> fProxyShader;
+
+ typedef SkShader INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMD5.cpp b/gfx/skia/skia/src/core/SkMD5.cpp
new file mode 100644
index 000000000..c0c3c31fb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMD5.cpp
@@ -0,0 +1,258 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * The following code is based on the description in RFC 1321.
+ * http://www.ietf.org/rfc/rfc1321.txt
+ */
+
+//The following macros can be defined to affect the MD5 code generated.
+//SK_MD5_CLEAR_DATA causes all intermediate state to be overwritten with 0's.
+//SK_CPU_LENDIAN allows 32 bit <=> 8 bit conversions without copies (if alligned).
+//SK_CPU_FAST_UNALIGNED_ACCESS allows 32 bit <=> 8 bit conversions without copies if SK_CPU_LENDIAN.
+
+#include "SkMD5.h"
+#include <string.h>
+
+/** MD5 basic transformation. Transforms state based on block. */
+static void transform(uint32_t state[4], const uint8_t block[64]);
+
+/** Encodes input into output (4 little endian 32 bit values). */
+static void encode(uint8_t output[16], const uint32_t input[4]);
+
+/** Encodes input into output (little endian 64 bit value). */
+static void encode(uint8_t output[8], const uint64_t input);
+
+/** Decodes input (4 little endian 32 bit values) into storage, if required. */
+static const uint32_t* decode(uint32_t storage[16], const uint8_t input[64]);
+
+SkMD5::SkMD5() : byteCount(0) {
+ // These are magic numbers from the specification.
+ this->state[0] = 0x67452301;
+ this->state[1] = 0xefcdab89;
+ this->state[2] = 0x98badcfe;
+ this->state[3] = 0x10325476;
+}
+
+bool SkMD5::write(const void* buf, size_t inputLength) {
+ const uint8_t* input = reinterpret_cast<const uint8_t*>(buf);
+ unsigned int bufferIndex = (unsigned int)(this->byteCount & 0x3F);
+ unsigned int bufferAvailable = 64 - bufferIndex;
+
+ unsigned int inputIndex;
+ if (inputLength >= bufferAvailable) {
+ if (bufferIndex) {
+ memcpy(&this->buffer[bufferIndex], input, bufferAvailable);
+ transform(this->state, this->buffer);
+ inputIndex = bufferAvailable;
+ } else {
+ inputIndex = 0;
+ }
+
+ for (; inputIndex + 63 < inputLength; inputIndex += 64) {
+ transform(this->state, &input[inputIndex]);
+ }
+
+ bufferIndex = 0;
+ } else {
+ inputIndex = 0;
+ }
+
+ memcpy(&this->buffer[bufferIndex], &input[inputIndex], inputLength - inputIndex);
+
+ this->byteCount += inputLength;
+ return true;
+}
+
+void SkMD5::finish(Digest& digest) {
+ // Get the number of bits before padding.
+ uint8_t bits[8];
+ encode(bits, this->byteCount << 3);
+
+ // Pad out to 56 mod 64.
+ unsigned int bufferIndex = (unsigned int)(this->byteCount & 0x3F);
+ unsigned int paddingLength = (bufferIndex < 56) ? (56 - bufferIndex) : (120 - bufferIndex);
+ static uint8_t PADDING[64] = {
+ 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ };
+ (void)this->write(PADDING, paddingLength);
+
+ // Append length (length before padding, will cause final update).
+ (void)this->write(bits, 8);
+
+ // Write out digest.
+ encode(digest.data, this->state);
+
+#if defined(SK_MD5_CLEAR_DATA)
+ // Clear state.
+ memset(this, 0, sizeof(*this));
+#endif
+}
+
+struct F { uint32_t operator()(uint32_t x, uint32_t y, uint32_t z) {
+ //return (x & y) | ((~x) & z);
+ return ((y ^ z) & x) ^ z; //equivelent but faster
+}};
+
+struct G { uint32_t operator()(uint32_t x, uint32_t y, uint32_t z) {
+ return (x & z) | (y & (~z));
+ //return ((x ^ y) & z) ^ y; //equivelent but slower
+}};
+
+struct H { uint32_t operator()(uint32_t x, uint32_t y, uint32_t z) {
+ return x ^ y ^ z;
+}};
+
+struct I { uint32_t operator()(uint32_t x, uint32_t y, uint32_t z) {
+ return y ^ (x | (~z));
+}};
+
+/** Rotates x left n bits. */
+static inline uint32_t rotate_left(uint32_t x, uint8_t n) {
+ return (x << n) | (x >> (32 - n));
+}
+
+template <typename T>
+static inline void operation(T operation, uint32_t& a, uint32_t b, uint32_t c, uint32_t d,
+ uint32_t x, uint8_t s, uint32_t t) {
+ a = b + rotate_left(a + operation(b, c, d) + x + t, s);
+}
+
+static void transform(uint32_t state[4], const uint8_t block[64]) {
+ uint32_t a = state[0], b = state[1], c = state[2], d = state[3];
+
+ uint32_t storage[16];
+ const uint32_t* X = decode(storage, block);
+
+ // Round 1
+ operation(F(), a, b, c, d, X[ 0], 7, 0xd76aa478); // 1
+ operation(F(), d, a, b, c, X[ 1], 12, 0xe8c7b756); // 2
+ operation(F(), c, d, a, b, X[ 2], 17, 0x242070db); // 3
+ operation(F(), b, c, d, a, X[ 3], 22, 0xc1bdceee); // 4
+ operation(F(), a, b, c, d, X[ 4], 7, 0xf57c0faf); // 5
+ operation(F(), d, a, b, c, X[ 5], 12, 0x4787c62a); // 6
+ operation(F(), c, d, a, b, X[ 6], 17, 0xa8304613); // 7
+ operation(F(), b, c, d, a, X[ 7], 22, 0xfd469501); // 8
+ operation(F(), a, b, c, d, X[ 8], 7, 0x698098d8); // 9
+ operation(F(), d, a, b, c, X[ 9], 12, 0x8b44f7af); // 10
+ operation(F(), c, d, a, b, X[10], 17, 0xffff5bb1); // 11
+ operation(F(), b, c, d, a, X[11], 22, 0x895cd7be); // 12
+ operation(F(), a, b, c, d, X[12], 7, 0x6b901122); // 13
+ operation(F(), d, a, b, c, X[13], 12, 0xfd987193); // 14
+ operation(F(), c, d, a, b, X[14], 17, 0xa679438e); // 15
+ operation(F(), b, c, d, a, X[15], 22, 0x49b40821); // 16
+
+ // Round 2
+ operation(G(), a, b, c, d, X[ 1], 5, 0xf61e2562); // 17
+ operation(G(), d, a, b, c, X[ 6], 9, 0xc040b340); // 18
+ operation(G(), c, d, a, b, X[11], 14, 0x265e5a51); // 19
+ operation(G(), b, c, d, a, X[ 0], 20, 0xe9b6c7aa); // 20
+ operation(G(), a, b, c, d, X[ 5], 5, 0xd62f105d); // 21
+ operation(G(), d, a, b, c, X[10], 9, 0x2441453); // 22
+ operation(G(), c, d, a, b, X[15], 14, 0xd8a1e681); // 23
+ operation(G(), b, c, d, a, X[ 4], 20, 0xe7d3fbc8); // 24
+ operation(G(), a, b, c, d, X[ 9], 5, 0x21e1cde6); // 25
+ operation(G(), d, a, b, c, X[14], 9, 0xc33707d6); // 26
+ operation(G(), c, d, a, b, X[ 3], 14, 0xf4d50d87); // 27
+ operation(G(), b, c, d, a, X[ 8], 20, 0x455a14ed); // 28
+ operation(G(), a, b, c, d, X[13], 5, 0xa9e3e905); // 29
+ operation(G(), d, a, b, c, X[ 2], 9, 0xfcefa3f8); // 30
+ operation(G(), c, d, a, b, X[ 7], 14, 0x676f02d9); // 31
+ operation(G(), b, c, d, a, X[12], 20, 0x8d2a4c8a); // 32
+
+ // Round 3
+ operation(H(), a, b, c, d, X[ 5], 4, 0xfffa3942); // 33
+ operation(H(), d, a, b, c, X[ 8], 11, 0x8771f681); // 34
+ operation(H(), c, d, a, b, X[11], 16, 0x6d9d6122); // 35
+ operation(H(), b, c, d, a, X[14], 23, 0xfde5380c); // 36
+ operation(H(), a, b, c, d, X[ 1], 4, 0xa4beea44); // 37
+ operation(H(), d, a, b, c, X[ 4], 11, 0x4bdecfa9); // 38
+ operation(H(), c, d, a, b, X[ 7], 16, 0xf6bb4b60); // 39
+ operation(H(), b, c, d, a, X[10], 23, 0xbebfbc70); // 40
+ operation(H(), a, b, c, d, X[13], 4, 0x289b7ec6); // 41
+ operation(H(), d, a, b, c, X[ 0], 11, 0xeaa127fa); // 42
+ operation(H(), c, d, a, b, X[ 3], 16, 0xd4ef3085); // 43
+ operation(H(), b, c, d, a, X[ 6], 23, 0x4881d05); // 44
+ operation(H(), a, b, c, d, X[ 9], 4, 0xd9d4d039); // 45
+ operation(H(), d, a, b, c, X[12], 11, 0xe6db99e5); // 46
+ operation(H(), c, d, a, b, X[15], 16, 0x1fa27cf8); // 47
+ operation(H(), b, c, d, a, X[ 2], 23, 0xc4ac5665); // 48
+
+ // Round 4
+ operation(I(), a, b, c, d, X[ 0], 6, 0xf4292244); // 49
+ operation(I(), d, a, b, c, X[ 7], 10, 0x432aff97); // 50
+ operation(I(), c, d, a, b, X[14], 15, 0xab9423a7); // 51
+ operation(I(), b, c, d, a, X[ 5], 21, 0xfc93a039); // 52
+ operation(I(), a, b, c, d, X[12], 6, 0x655b59c3); // 53
+ operation(I(), d, a, b, c, X[ 3], 10, 0x8f0ccc92); // 54
+ operation(I(), c, d, a, b, X[10], 15, 0xffeff47d); // 55
+ operation(I(), b, c, d, a, X[ 1], 21, 0x85845dd1); // 56
+ operation(I(), a, b, c, d, X[ 8], 6, 0x6fa87e4f); // 57
+ operation(I(), d, a, b, c, X[15], 10, 0xfe2ce6e0); // 58
+ operation(I(), c, d, a, b, X[ 6], 15, 0xa3014314); // 59
+ operation(I(), b, c, d, a, X[13], 21, 0x4e0811a1); // 60
+ operation(I(), a, b, c, d, X[ 4], 6, 0xf7537e82); // 61
+ operation(I(), d, a, b, c, X[11], 10, 0xbd3af235); // 62
+ operation(I(), c, d, a, b, X[ 2], 15, 0x2ad7d2bb); // 63
+ operation(I(), b, c, d, a, X[ 9], 21, 0xeb86d391); // 64
+
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+
+#if defined(SK_MD5_CLEAR_DATA)
+ // Clear sensitive information.
+ if (X == &storage) {
+ memset(storage, 0, sizeof(storage));
+ }
+#endif
+}
+
+static void encode(uint8_t output[16], const uint32_t input[4]) {
+ for (size_t i = 0, j = 0; i < 4; i++, j += 4) {
+ output[j ] = (uint8_t) (input[i] & 0xff);
+ output[j+1] = (uint8_t)((input[i] >> 8) & 0xff);
+ output[j+2] = (uint8_t)((input[i] >> 16) & 0xff);
+ output[j+3] = (uint8_t)((input[i] >> 24) & 0xff);
+ }
+}
+
+static void encode(uint8_t output[8], const uint64_t input) {
+ output[0] = (uint8_t) (input & 0xff);
+ output[1] = (uint8_t)((input >> 8) & 0xff);
+ output[2] = (uint8_t)((input >> 16) & 0xff);
+ output[3] = (uint8_t)((input >> 24) & 0xff);
+ output[4] = (uint8_t)((input >> 32) & 0xff);
+ output[5] = (uint8_t)((input >> 40) & 0xff);
+ output[6] = (uint8_t)((input >> 48) & 0xff);
+ output[7] = (uint8_t)((input >> 56) & 0xff);
+}
+
+static inline bool is_aligned(const void *pointer, size_t byte_count) {
+ return reinterpret_cast<uintptr_t>(pointer) % byte_count == 0;
+}
+
+static const uint32_t* decode(uint32_t storage[16], const uint8_t input[64]) {
+#if defined(SK_CPU_LENDIAN) && defined(SK_CPU_FAST_UNALIGNED_ACCESS)
+ return reinterpret_cast<const uint32_t*>(input);
+#else
+#if defined(SK_CPU_LENDIAN)
+ if (is_aligned(input, 4)) {
+ return reinterpret_cast<const uint32_t*>(input);
+ }
+#endif
+ for (size_t i = 0, j = 0; j < 64; i++, j += 4) {
+ storage[i] = ((uint32_t)input[j ]) |
+ (((uint32_t)input[j+1]) << 8) |
+ (((uint32_t)input[j+2]) << 16) |
+ (((uint32_t)input[j+3]) << 24);
+ }
+ return storage;
+#endif
+}
diff --git a/gfx/skia/skia/src/core/SkMD5.h b/gfx/skia/skia/src/core/SkMD5.h
new file mode 100644
index 000000000..8838d47ac
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMD5.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMD5_DEFINED
+#define SkMD5_DEFINED
+
+#include "SkStream.h"
+
+/* Calculate a 128-bit MD5 message-digest of the bytes sent to this stream. */
+class SkMD5 : public SkWStream {
+public:
+ SkMD5();
+
+ /** Processes input, adding it to the digest.
+ Calling this after finish is undefined. */
+ bool write(const void* buffer, size_t size) final;
+
+ size_t bytesWritten() const final { return SkToSizeT(this->byteCount); }
+
+ struct Digest {
+ uint8_t data[16];
+ bool operator ==(Digest const& other) const {
+ return 0 == memcmp(data, other.data, sizeof(data));
+ }
+ bool operator !=(Digest const& other) const { return !(*this == other); }
+ };
+
+ /** Computes and returns the digest. */
+ void finish(Digest& digest);
+
+private:
+ uint64_t byteCount; // number of bytes, modulo 2^64
+ uint32_t state[4]; // state (ABCD)
+ uint8_t buffer[64]; // input buffer
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMSAN.h b/gfx/skia/skia/src/core/SkMSAN.h
new file mode 100644
index 000000000..1f32e5356
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMSAN.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMSAN_DEFINED
+#define SkMSAN_DEFINED
+
+#include "SkTypes.h"
+
+// Typically declared in LLVM's msan_interface.h. Easier for us to just re-declare.
+extern "C" {
+ void __msan_check_mem_is_initialized(const volatile void*, size_t);
+ void __msan_unpoison (const volatile void*, size_t);
+}
+
+// Code that requires initialized inputs can call this to make it clear that
+// the blame for use of uninitialized data belongs further up the call stack.
+static inline void sk_msan_assert_initialized(const void* begin, const void* end) {
+#if defined(__has_feature)
+ #if __has_feature(memory_sanitizer)
+ __msan_check_mem_is_initialized(begin, (const char*)end - (const char*)begin);
+ #endif
+#endif
+}
+
+// Lie to MSAN that this range of memory is initialized.
+// This can hide serious problems if overused. Every use of this should refer to a bug.
+static inline void sk_msan_mark_initialized(const void* begin, const void* end, const char* skbug) {
+ SkASSERT(skbug && 0 != strcmp(skbug, ""));
+#if defined(__has_feature)
+ #if __has_feature(memory_sanitizer)
+ __msan_unpoison(begin, (const char*)end - (const char*)begin);
+ #endif
+#endif
+}
+
+#endif//SkMSAN_DEFINED
diff --git a/gfx/skia/skia/src/core/SkMakeUnique.h b/gfx/skia/skia/src/core/SkMakeUnique.h
new file mode 100644
index 000000000..188eb05ff
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMakeUnique.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMakeUnique_DEFINED
+#define SkMakeUnique_DEFINED
+
+#include <memory>
+
+namespace skstd {
+
+// std::make_unique is in C++14
+template<typename T, typename... Args>
+std::unique_ptr<T> make_unique(Args&&... args) {
+ return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
+}
+
+}
+
+#endif // SkMakeUnique_DEFINED
diff --git a/gfx/skia/skia/src/core/SkMallocPixelRef.cpp b/gfx/skia/skia/src/core/SkMallocPixelRef.cpp
new file mode 100644
index 000000000..fffc04484
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMallocPixelRef.cpp
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMallocPixelRef.h"
+#include "SkBitmap.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+
+// assumes ptr was allocated via sk_malloc
+static void sk_free_releaseproc(void* ptr, void*) {
+ sk_free(ptr);
+}
+
+static bool is_valid(const SkImageInfo& info, SkColorTable* ctable) {
+ if (info.width() < 0 || info.height() < 0 ||
+ (unsigned)info.colorType() > (unsigned)kLastEnum_SkColorType ||
+ (unsigned)info.alphaType() > (unsigned)kLastEnum_SkAlphaType)
+ {
+ return false;
+ }
+
+ // these seem like good checks, but currently we have (at least) tests
+ // that expect the pixelref to succeed even when there is a mismatch
+ // with colortables. fix?
+#if 0
+ if (kIndex8_SkColorType == info.fColorType && nullptr == ctable) {
+ return false;
+ }
+ if (kIndex8_SkColorType != info.fColorType && ctable) {
+ return false;
+ }
+#endif
+ return true;
+}
+
+SkMallocPixelRef* SkMallocPixelRef::NewDirect(const SkImageInfo& info,
+ void* addr,
+ size_t rowBytes,
+ SkColorTable* ctable) {
+ if (!is_valid(info, ctable)) {
+ return nullptr;
+ }
+ return new SkMallocPixelRef(info, addr, rowBytes, ctable, nullptr, nullptr);
+}
+
+
+ SkMallocPixelRef* SkMallocPixelRef::NewUsing(void*(*alloc)(size_t),
+ const SkImageInfo& info,
+ size_t requestedRowBytes,
+ SkColorTable* ctable) {
+ if (!is_valid(info, ctable)) {
+ return nullptr;
+ }
+
+ // only want to permit 31bits of rowBytes
+ int64_t minRB = (int64_t)info.minRowBytes64();
+ if (minRB < 0 || !sk_64_isS32(minRB)) {
+ return nullptr; // allocation will be too large
+ }
+ if (requestedRowBytes > 0 && (int32_t)requestedRowBytes < minRB) {
+ return nullptr; // cannot meet requested rowbytes
+ }
+
+ int32_t rowBytes;
+ if (requestedRowBytes) {
+ rowBytes = SkToS32(requestedRowBytes);
+ } else {
+ rowBytes = minRB;
+ }
+
+ int64_t bigSize = (int64_t)info.height() * rowBytes;
+ if (!sk_64_isS32(bigSize)) {
+ return nullptr;
+ }
+
+ size_t size = sk_64_asS32(bigSize);
+ SkASSERT(size >= info.getSafeSize(rowBytes));
+ void* addr = alloc(size);
+ if (nullptr == addr) {
+ return nullptr;
+ }
+
+ return new SkMallocPixelRef(info, addr, rowBytes, ctable, sk_free_releaseproc, nullptr);
+}
+
+SkMallocPixelRef* SkMallocPixelRef::NewAllocate(const SkImageInfo& info,
+ size_t rowBytes,
+ SkColorTable* ctable) {
+ auto sk_malloc_nothrow = [](size_t size) { return sk_malloc_flags(size, 0); };
+ return NewUsing(sk_malloc_nothrow, info, rowBytes, ctable);
+}
+
+SkMallocPixelRef* SkMallocPixelRef::NewZeroed(const SkImageInfo& info,
+ size_t rowBytes,
+ SkColorTable* ctable) {
+ return NewUsing(sk_calloc, info, rowBytes, ctable);
+}
+
+SkMallocPixelRef* SkMallocPixelRef::NewWithProc(const SkImageInfo& info,
+ size_t rowBytes,
+ SkColorTable* ctable,
+ void* addr,
+ SkMallocPixelRef::ReleaseProc proc,
+ void* context) {
+ if (!is_valid(info, ctable)) {
+ return nullptr;
+ }
+ return new SkMallocPixelRef(info, addr, rowBytes, ctable, proc, context);
+}
+
+static void sk_data_releaseproc(void*, void* dataPtr) {
+ (static_cast<SkData*>(dataPtr))->unref();
+}
+
+SkMallocPixelRef* SkMallocPixelRef::NewWithData(const SkImageInfo& info,
+ size_t rowBytes,
+ SkColorTable* ctable,
+ SkData* data) {
+ SkASSERT(data != nullptr);
+ if (!is_valid(info, ctable)) {
+ return nullptr;
+ }
+ if ((rowBytes < info.minRowBytes())
+ || (data->size() < info.getSafeSize(rowBytes))) {
+ return nullptr;
+ }
+ data->ref();
+ SkMallocPixelRef* pr =
+ new SkMallocPixelRef(info, const_cast<void*>(data->data()), rowBytes, ctable,
+ sk_data_releaseproc, static_cast<void*>(data));
+ SkASSERT(pr != nullptr);
+ // We rely on the immutability of the pixels to make the
+ // const_cast okay.
+ pr->setImmutable();
+ return pr;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkMallocPixelRef::SkMallocPixelRef(const SkImageInfo& info, void* storage,
+ size_t rowBytes, SkColorTable* ctable,
+ bool ownsPixels)
+ : INHERITED(info)
+ , fReleaseProc(ownsPixels ? sk_free_releaseproc : nullptr)
+ , fReleaseProcContext(nullptr) {
+ // This constructor is now DEPRICATED.
+ SkASSERT(is_valid(info, ctable));
+ SkASSERT(rowBytes >= info.minRowBytes());
+
+ if (kIndex_8_SkColorType != info.colorType()) {
+ ctable = nullptr;
+ }
+
+ fStorage = storage;
+ fCTable = ctable;
+ fRB = rowBytes;
+ SkSafeRef(ctable);
+
+ this->setPreLocked(fStorage, rowBytes, fCTable);
+}
+
+SkMallocPixelRef::SkMallocPixelRef(const SkImageInfo& info, void* storage,
+ size_t rowBytes, SkColorTable* ctable,
+ SkMallocPixelRef::ReleaseProc proc,
+ void* context)
+ : INHERITED(info)
+ , fReleaseProc(proc)
+ , fReleaseProcContext(context)
+{
+ SkASSERT(is_valid(info, ctable));
+ SkASSERT(rowBytes >= info.minRowBytes());
+
+ if (kIndex_8_SkColorType != info.colorType()) {
+ ctable = nullptr;
+ }
+
+ fStorage = storage;
+ fCTable = ctable;
+ fRB = rowBytes;
+ SkSafeRef(ctable);
+
+ this->setPreLocked(fStorage, rowBytes, fCTable);
+}
+
+
+SkMallocPixelRef::~SkMallocPixelRef() {
+ SkSafeUnref(fCTable);
+ if (fReleaseProc != nullptr) {
+ fReleaseProc(fStorage, fReleaseProcContext);
+ }
+}
+
+bool SkMallocPixelRef::onNewLockPixels(LockRec* rec) {
+ rec->fPixels = fStorage;
+ rec->fRowBytes = fRB;
+ rec->fColorTable = fCTable;
+ return true;
+}
+
+void SkMallocPixelRef::onUnlockPixels() {
+ // nothing to do
+}
+
+size_t SkMallocPixelRef::getAllocatedSizeInBytes() const {
+ return this->info().getSafeSize(fRB);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPixelRef* SkMallocPixelRef::PRFactory::create(const SkImageInfo& info, size_t rowBytes,
+ SkColorTable* ctable) {
+ return SkMallocPixelRef::NewAllocate(info, rowBytes, ctable);
+}
+
+SkPixelRef* SkMallocPixelRef::ZeroedPRFactory::create(const SkImageInfo& info, size_t rowBytes,
+ SkColorTable* ctable) {
+ return SkMallocPixelRef::NewZeroed(info, rowBytes, ctable);
+}
diff --git a/gfx/skia/skia/src/core/SkMask.cpp b/gfx/skia/skia/src/core/SkMask.cpp
new file mode 100644
index 000000000..111508074
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMask.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMask.h"
+
+//#define TRACK_SKMASK_LIFETIME
+
+/** returns the product if it is positive and fits in 31 bits. Otherwise this
+ returns 0.
+ */
+static int32_t safeMul32(int32_t a, int32_t b) {
+ int64_t size = sk_64_mul(a, b);
+ if (size > 0 && sk_64_isS32(size)) {
+ return sk_64_asS32(size);
+ }
+ return 0;
+}
+
+size_t SkMask::computeImageSize() const {
+ return safeMul32(fBounds.height(), fRowBytes);
+}
+
+size_t SkMask::computeTotalImageSize() const {
+ size_t size = this->computeImageSize();
+ if (fFormat == SkMask::k3D_Format) {
+ size = safeMul32(SkToS32(size), 3);
+ }
+ return size;
+}
+
+#ifdef TRACK_SKMASK_LIFETIME
+ static int gCounter;
+#endif
+
+/** We explicitly use this allocator for SkBimap pixels, so that we can
+ freely assign memory allocated by one class to the other.
+*/
+uint8_t* SkMask::AllocImage(size_t size) {
+#ifdef TRACK_SKMASK_LIFETIME
+ SkDebugf("SkMask::AllocImage %d\n", gCounter++);
+#endif
+ return (uint8_t*)sk_malloc_throw(SkAlign4(size));
+}
+
+/** We explicitly use this allocator for SkBimap pixels, so that we can
+ freely assign memory allocated by one class to the other.
+*/
+void SkMask::FreeImage(void* image) {
+#ifdef TRACK_SKMASK_LIFETIME
+ if (image) {
+ SkDebugf("SkMask::FreeImage %d\n", --gCounter);
+ }
+#endif
+ sk_free(image);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static const int gMaskFormatToShift[] = {
+ ~0, // BW -- not supported
+ 0, // A8
+ 0, // 3D
+ 2, // ARGB32
+ 1, // LCD16
+};
+
+static int maskFormatToShift(SkMask::Format format) {
+ SkASSERT((unsigned)format < SK_ARRAY_COUNT(gMaskFormatToShift));
+ SkASSERT(SkMask::kBW_Format != format);
+ return gMaskFormatToShift[format];
+}
+
+void* SkMask::getAddr(int x, int y) const {
+ SkASSERT(kBW_Format != fFormat);
+ SkASSERT(fBounds.contains(x, y));
+ SkASSERT(fImage);
+
+ char* addr = (char*)fImage;
+ addr += (y - fBounds.fTop) * fRowBytes;
+ addr += (x - fBounds.fLeft) << maskFormatToShift(fFormat);
+ return addr;
+}
diff --git a/gfx/skia/skia/src/core/SkMaskCache.cpp b/gfx/skia/skia/src/core/SkMaskCache.cpp
new file mode 100644
index 000000000..eb7c8f094
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskCache.cpp
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMaskCache.h"
+
+#define CHECK_LOCAL(localCache, localName, globalName, ...) \
+ ((localCache) ? localCache->localName(__VA_ARGS__) : SkResourceCache::globalName(__VA_ARGS__))
+
+struct MaskValue {
+ SkMask fMask;
+ SkCachedData* fData;
+};
+
+namespace {
+static unsigned gRRectBlurKeyNamespaceLabel;
+
+struct RRectBlurKey : public SkResourceCache::Key {
+public:
+ RRectBlurKey(SkScalar sigma, const SkRRect& rrect, SkBlurStyle style, SkBlurQuality quality)
+ : fSigma(sigma)
+ , fStyle(style)
+ , fQuality(quality)
+ , fRRect(rrect)
+ {
+ this->init(&gRRectBlurKeyNamespaceLabel, 0,
+ sizeof(fSigma) + sizeof(fStyle) + sizeof(fQuality) + sizeof(fRRect));
+ }
+
+ SkScalar fSigma;
+ int32_t fStyle;
+ int32_t fQuality;
+ SkRRect fRRect;
+};
+
+struct RRectBlurRec : public SkResourceCache::Rec {
+ RRectBlurRec(RRectBlurKey key, const SkMask& mask, SkCachedData* data)
+ : fKey(key)
+ {
+ fValue.fMask = mask;
+ fValue.fData = data;
+ fValue.fData->attachToCacheAndRef();
+ }
+ ~RRectBlurRec() {
+ fValue.fData->detachFromCacheAndUnref();
+ }
+
+ RRectBlurKey fKey;
+ MaskValue fValue;
+
+ const Key& getKey() const override { return fKey; }
+ size_t bytesUsed() const override { return sizeof(*this) + fValue.fData->size(); }
+ const char* getCategory() const override { return "rrect-blur"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override {
+ return fValue.fData->diagnostic_only_getDiscardable();
+ }
+
+ static bool Visitor(const SkResourceCache::Rec& baseRec, void* contextData) {
+ const RRectBlurRec& rec = static_cast<const RRectBlurRec&>(baseRec);
+ MaskValue* result = (MaskValue*)contextData;
+
+ SkCachedData* tmpData = rec.fValue.fData;
+ tmpData->ref();
+ if (nullptr == tmpData->data()) {
+ tmpData->unref();
+ return false;
+ }
+ *result = rec.fValue;
+ return true;
+ }
+};
+} // namespace
+
+SkCachedData* SkMaskCache::FindAndRef(SkScalar sigma, SkBlurStyle style, SkBlurQuality quality,
+ const SkRRect& rrect, SkMask* mask, SkResourceCache* localCache) {
+ MaskValue result;
+ RRectBlurKey key(sigma, rrect, style, quality);
+ if (!CHECK_LOCAL(localCache, find, Find, key, RRectBlurRec::Visitor, &result)) {
+ return nullptr;
+ }
+
+ *mask = result.fMask;
+ mask->fImage = (uint8_t*)(result.fData->data());
+ return result.fData;
+}
+
+void SkMaskCache::Add(SkScalar sigma, SkBlurStyle style, SkBlurQuality quality,
+ const SkRRect& rrect, const SkMask& mask, SkCachedData* data,
+ SkResourceCache* localCache) {
+ RRectBlurKey key(sigma, rrect, style, quality);
+ return CHECK_LOCAL(localCache, add, Add, new RRectBlurRec(key, mask, data));
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+static unsigned gRectsBlurKeyNamespaceLabel;
+
+struct RectsBlurKey : public SkResourceCache::Key {
+public:
+ RectsBlurKey(SkScalar sigma, SkBlurStyle style, SkBlurQuality quality,
+ const SkRect rects[], int count)
+ : fSigma(sigma)
+ , fStyle(style)
+ , fQuality(quality)
+ {
+ SkASSERT(1 == count || 2 == count);
+ SkIRect ir;
+ rects[0].roundOut(&ir);
+ fSizes[0] = SkSize::Make(rects[0].width(), rects[0].height());
+ if (2 == count) {
+ fSizes[1] = SkSize::Make(rects[1].width(), rects[1].height());
+ fSizes[2] = SkSize::Make(rects[0].x() - rects[1].x(), rects[0].y() - rects[1].y());
+ } else {
+ fSizes[1] = SkSize::Make(0, 0);
+ fSizes[2] = SkSize::Make(0, 0);
+ }
+ fSizes[3] = SkSize::Make(rects[0].x() - ir.x(), rects[0].y() - ir.y());
+
+ this->init(&gRectsBlurKeyNamespaceLabel, 0,
+ sizeof(fSigma) + sizeof(fStyle) + sizeof(fQuality) + sizeof(fSizes));
+ }
+
+ SkScalar fSigma;
+ int32_t fStyle;
+ int32_t fQuality;
+ SkSize fSizes[4];
+};
+
+struct RectsBlurRec : public SkResourceCache::Rec {
+ RectsBlurRec(RectsBlurKey key, const SkMask& mask, SkCachedData* data)
+ : fKey(key)
+ {
+ fValue.fMask = mask;
+ fValue.fData = data;
+ fValue.fData->attachToCacheAndRef();
+ }
+ ~RectsBlurRec() {
+ fValue.fData->detachFromCacheAndUnref();
+ }
+
+ RectsBlurKey fKey;
+ MaskValue fValue;
+
+ const Key& getKey() const override { return fKey; }
+ size_t bytesUsed() const override { return sizeof(*this) + fValue.fData->size(); }
+ const char* getCategory() const override { return "rects-blur"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override {
+ return fValue.fData->diagnostic_only_getDiscardable();
+ }
+
+ static bool Visitor(const SkResourceCache::Rec& baseRec, void* contextData) {
+ const RectsBlurRec& rec = static_cast<const RectsBlurRec&>(baseRec);
+ MaskValue* result = static_cast<MaskValue*>(contextData);
+
+ SkCachedData* tmpData = rec.fValue.fData;
+ tmpData->ref();
+ if (nullptr == tmpData->data()) {
+ tmpData->unref();
+ return false;
+ }
+ *result = rec.fValue;
+ return true;
+ }
+};
+} // namespace
+
+SkCachedData* SkMaskCache::FindAndRef(SkScalar sigma, SkBlurStyle style, SkBlurQuality quality,
+ const SkRect rects[], int count, SkMask* mask,
+ SkResourceCache* localCache) {
+ MaskValue result;
+ RectsBlurKey key(sigma, style, quality, rects, count);
+ if (!CHECK_LOCAL(localCache, find, Find, key, RectsBlurRec::Visitor, &result)) {
+ return nullptr;
+ }
+
+ *mask = result.fMask;
+ mask->fImage = (uint8_t*)(result.fData->data());
+ return result.fData;
+}
+
+void SkMaskCache::Add(SkScalar sigma, SkBlurStyle style, SkBlurQuality quality,
+ const SkRect rects[], int count, const SkMask& mask, SkCachedData* data,
+ SkResourceCache* localCache) {
+ RectsBlurKey key(sigma, style, quality, rects, count);
+ return CHECK_LOCAL(localCache, add, Add, new RectsBlurRec(key, mask, data));
+}
diff --git a/gfx/skia/skia/src/core/SkMaskCache.h b/gfx/skia/skia/src/core/SkMaskCache.h
new file mode 100644
index 000000000..9df1bf860
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskCache.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMaskCache_DEFINED
+#define SkMaskCache_DEFINED
+
+#include "SkBlurTypes.h"
+#include "SkCachedData.h"
+#include "SkMask.h"
+#include "SkRect.h"
+#include "SkResourceCache.h"
+#include "SkRRect.h"
+
+class SkMaskCache {
+public:
+ /**
+ * On success, return a ref to the SkCachedData that holds the pixels, and have mask
+ * already point to that memory.
+ *
+ * On failure, return nullptr.
+ */
+ static SkCachedData* FindAndRef(SkScalar sigma, SkBlurStyle style, SkBlurQuality quality,
+ const SkRRect& rrect, SkMask* mask,
+ SkResourceCache* localCache = nullptr);
+ static SkCachedData* FindAndRef(SkScalar sigma, SkBlurStyle style, SkBlurQuality quality,
+ const SkRect rects[], int count, SkMask* mask,
+ SkResourceCache* localCache = nullptr);
+
+ /**
+ * Add a mask and its pixel-data to the cache.
+ */
+ static void Add(SkScalar sigma, SkBlurStyle style, SkBlurQuality quality,
+ const SkRRect& rrect, const SkMask& mask, SkCachedData* data,
+ SkResourceCache* localCache = nullptr);
+ static void Add(SkScalar sigma, SkBlurStyle style, SkBlurQuality quality,
+ const SkRect rects[], int count, const SkMask& mask, SkCachedData* data,
+ SkResourceCache* localCache = nullptr);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMaskFilter.cpp b/gfx/skia/skia/src/core/SkMaskFilter.cpp
new file mode 100644
index 000000000..62cfcf964
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskFilter.cpp
@@ -0,0 +1,359 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkMaskFilter.h"
+#include "SkBlitter.h"
+#include "SkDraw.h"
+#include "SkCachedData.h"
+#include "SkPath.h"
+#include "SkRasterClip.h"
+#include "SkRRect.h"
+#include "SkTypes.h"
+
+#if SK_SUPPORT_GPU
+#include "GrTexture.h"
+#include "SkGr.h"
+#endif
+
+SkMaskFilter::NinePatch::~NinePatch() {
+ if (fCache) {
+ SkASSERT((const void*)fMask.fImage == fCache->data());
+ fCache->unref();
+ } else {
+ SkMask::FreeImage(fMask.fImage);
+ }
+}
+
+bool SkMaskFilter::filterMask(SkMask*, const SkMask&, const SkMatrix&,
+ SkIPoint*) const {
+ return false;
+}
+
+bool SkMaskFilter::asABlur(BlurRec*) const {
+ return false;
+}
+
+static void extractMaskSubset(const SkMask& src, SkMask* dst) {
+ SkASSERT(src.fBounds.contains(dst->fBounds));
+
+ const int dx = dst->fBounds.left() - src.fBounds.left();
+ const int dy = dst->fBounds.top() - src.fBounds.top();
+ dst->fImage = src.fImage + dy * src.fRowBytes + dx;
+ dst->fRowBytes = src.fRowBytes;
+ dst->fFormat = src.fFormat;
+}
+
+static void blitClippedMask(SkBlitter* blitter, const SkMask& mask,
+ const SkIRect& bounds, const SkIRect& clipR) {
+ SkIRect r;
+ if (r.intersect(bounds, clipR)) {
+ blitter->blitMask(mask, r);
+ }
+}
+
+static void blitClippedRect(SkBlitter* blitter, const SkIRect& rect, const SkIRect& clipR) {
+ SkIRect r;
+ if (r.intersect(rect, clipR)) {
+ blitter->blitRect(r.left(), r.top(), r.width(), r.height());
+ }
+}
+
+#if 0
+static void dump(const SkMask& mask) {
+ for (int y = mask.fBounds.top(); y < mask.fBounds.bottom(); ++y) {
+ for (int x = mask.fBounds.left(); x < mask.fBounds.right(); ++x) {
+ SkDebugf("%02X", *mask.getAddr8(x, y));
+ }
+ SkDebugf("\n");
+ }
+ SkDebugf("\n");
+}
+#endif
+
+static void draw_nine_clipped(const SkMask& mask, const SkIRect& outerR,
+ const SkIPoint& center, bool fillCenter,
+ const SkIRect& clipR, SkBlitter* blitter) {
+ int cx = center.x();
+ int cy = center.y();
+ SkMask m;
+
+ // top-left
+ m.fBounds = mask.fBounds;
+ m.fBounds.fRight = cx;
+ m.fBounds.fBottom = cy;
+ if (m.fBounds.width() > 0 && m.fBounds.height() > 0) {
+ extractMaskSubset(mask, &m);
+ m.fBounds.offsetTo(outerR.left(), outerR.top());
+ blitClippedMask(blitter, m, m.fBounds, clipR);
+ }
+
+ // top-right
+ m.fBounds = mask.fBounds;
+ m.fBounds.fLeft = cx + 1;
+ m.fBounds.fBottom = cy;
+ if (m.fBounds.width() > 0 && m.fBounds.height() > 0) {
+ extractMaskSubset(mask, &m);
+ m.fBounds.offsetTo(outerR.right() - m.fBounds.width(), outerR.top());
+ blitClippedMask(blitter, m, m.fBounds, clipR);
+ }
+
+ // bottom-left
+ m.fBounds = mask.fBounds;
+ m.fBounds.fRight = cx;
+ m.fBounds.fTop = cy + 1;
+ if (m.fBounds.width() > 0 && m.fBounds.height() > 0) {
+ extractMaskSubset(mask, &m);
+ m.fBounds.offsetTo(outerR.left(), outerR.bottom() - m.fBounds.height());
+ blitClippedMask(blitter, m, m.fBounds, clipR);
+ }
+
+ // bottom-right
+ m.fBounds = mask.fBounds;
+ m.fBounds.fLeft = cx + 1;
+ m.fBounds.fTop = cy + 1;
+ if (m.fBounds.width() > 0 && m.fBounds.height() > 0) {
+ extractMaskSubset(mask, &m);
+ m.fBounds.offsetTo(outerR.right() - m.fBounds.width(),
+ outerR.bottom() - m.fBounds.height());
+ blitClippedMask(blitter, m, m.fBounds, clipR);
+ }
+
+ SkIRect innerR;
+ innerR.set(outerR.left() + cx - mask.fBounds.left(),
+ outerR.top() + cy - mask.fBounds.top(),
+ outerR.right() + (cx + 1 - mask.fBounds.right()),
+ outerR.bottom() + (cy + 1 - mask.fBounds.bottom()));
+ if (fillCenter) {
+ blitClippedRect(blitter, innerR, clipR);
+ }
+
+ const int innerW = innerR.width();
+ size_t storageSize = (innerW + 1) * (sizeof(int16_t) + sizeof(uint8_t));
+ SkAutoSMalloc<4*1024> storage(storageSize);
+ int16_t* runs = (int16_t*)storage.get();
+ uint8_t* alpha = (uint8_t*)(runs + innerW + 1);
+
+ SkIRect r;
+ // top
+ r.set(innerR.left(), outerR.top(), innerR.right(), innerR.top());
+ if (r.intersect(clipR)) {
+ int startY = SkMax32(0, r.top() - outerR.top());
+ int stopY = startY + r.height();
+ int width = r.width();
+ for (int y = startY; y < stopY; ++y) {
+ runs[0] = width;
+ runs[width] = 0;
+ alpha[0] = *mask.getAddr8(cx, mask.fBounds.top() + y);
+ blitter->blitAntiH(r.left(), outerR.top() + y, alpha, runs);
+ }
+ }
+ // bottom
+ r.set(innerR.left(), innerR.bottom(), innerR.right(), outerR.bottom());
+ if (r.intersect(clipR)) {
+ int startY = outerR.bottom() - r.bottom();
+ int stopY = startY + r.height();
+ int width = r.width();
+ for (int y = startY; y < stopY; ++y) {
+ runs[0] = width;
+ runs[width] = 0;
+ alpha[0] = *mask.getAddr8(cx, mask.fBounds.bottom() - y - 1);
+ blitter->blitAntiH(r.left(), outerR.bottom() - y - 1, alpha, runs);
+ }
+ }
+ // left
+ r.set(outerR.left(), innerR.top(), innerR.left(), innerR.bottom());
+ if (r.intersect(clipR)) {
+ int startX = r.left() - outerR.left();
+ int stopX = startX + r.width();
+ int height = r.height();
+ for (int x = startX; x < stopX; ++x) {
+ blitter->blitV(outerR.left() + x, r.top(), height,
+ *mask.getAddr8(mask.fBounds.left() + x, mask.fBounds.top() + cy));
+ }
+ }
+ // right
+ r.set(innerR.right(), innerR.top(), outerR.right(), innerR.bottom());
+ if (r.intersect(clipR)) {
+ int startX = outerR.right() - r.right();
+ int stopX = startX + r.width();
+ int height = r.height();
+ for (int x = startX; x < stopX; ++x) {
+ blitter->blitV(outerR.right() - x - 1, r.top(), height,
+ *mask.getAddr8(mask.fBounds.right() - x - 1, mask.fBounds.top() + cy));
+ }
+ }
+}
+
+static void draw_nine(const SkMask& mask, const SkIRect& outerR, const SkIPoint& center,
+ bool fillCenter, const SkRasterClip& clip, SkBlitter* blitter) {
+ // if we get here, we need to (possibly) resolve the clip and blitter
+ SkAAClipBlitterWrapper wrapper(clip, blitter);
+ blitter = wrapper.getBlitter();
+
+ SkRegion::Cliperator clipper(wrapper.getRgn(), outerR);
+
+ if (!clipper.done()) {
+ const SkIRect& cr = clipper.rect();
+ do {
+ draw_nine_clipped(mask, outerR, center, fillCenter, cr, blitter);
+ clipper.next();
+ } while (!clipper.done());
+ }
+}
+
+static int countNestedRects(const SkPath& path, SkRect rects[2]) {
+ if (path.isNestedFillRects(rects)) {
+ return 2;
+ }
+ return path.isRect(&rects[0]);
+}
+
+bool SkMaskFilter::filterRRect(const SkRRect& devRRect, const SkMatrix& matrix,
+ const SkRasterClip& clip, SkBlitter* blitter) const {
+ // Attempt to speed up drawing by creating a nine patch. If a nine patch
+ // cannot be used, return false to allow our caller to recover and perform
+ // the drawing another way.
+ NinePatch patch;
+ patch.fMask.fImage = nullptr;
+ if (kTrue_FilterReturn != this->filterRRectToNine(devRRect, matrix,
+ clip.getBounds(),
+ &patch)) {
+ SkASSERT(nullptr == patch.fMask.fImage);
+ return false;
+ }
+ draw_nine(patch.fMask, patch.fOuterRect, patch.fCenter, true, clip, blitter);
+ return true;
+}
+
+bool SkMaskFilter::filterPath(const SkPath& devPath, const SkMatrix& matrix,
+ const SkRasterClip& clip, SkBlitter* blitter,
+ SkStrokeRec::InitStyle style) const {
+ SkRect rects[2];
+ int rectCount = 0;
+ if (SkStrokeRec::kFill_InitStyle == style) {
+ rectCount = countNestedRects(devPath, rects);
+ }
+ if (rectCount > 0) {
+ NinePatch patch;
+
+ switch (this->filterRectsToNine(rects, rectCount, matrix, clip.getBounds(), &patch)) {
+ case kFalse_FilterReturn:
+ SkASSERT(nullptr == patch.fMask.fImage);
+ return false;
+
+ case kTrue_FilterReturn:
+ draw_nine(patch.fMask, patch.fOuterRect, patch.fCenter, 1 == rectCount, clip,
+ blitter);
+ return true;
+
+ case kUnimplemented_FilterReturn:
+ SkASSERT(nullptr == patch.fMask.fImage);
+ // fall through
+ break;
+ }
+ }
+
+ SkMask srcM, dstM;
+
+ if (!SkDraw::DrawToMask(devPath, &clip.getBounds(), this, &matrix, &srcM,
+ SkMask::kComputeBoundsAndRenderImage_CreateMode,
+ style)) {
+ return false;
+ }
+ SkAutoMaskFreeImage autoSrc(srcM.fImage);
+
+ if (!this->filterMask(&dstM, srcM, matrix, nullptr)) {
+ return false;
+ }
+ SkAutoMaskFreeImage autoDst(dstM.fImage);
+
+ // if we get here, we need to (possibly) resolve the clip and blitter
+ SkAAClipBlitterWrapper wrapper(clip, blitter);
+ blitter = wrapper.getBlitter();
+
+ SkRegion::Cliperator clipper(wrapper.getRgn(), dstM.fBounds);
+
+ if (!clipper.done()) {
+ const SkIRect& cr = clipper.rect();
+ do {
+ blitter->blitMask(dstM, cr);
+ clipper.next();
+ } while (!clipper.done());
+ }
+
+ return true;
+}
+
+SkMaskFilter::FilterReturn
+SkMaskFilter::filterRRectToNine(const SkRRect&, const SkMatrix&,
+ const SkIRect& clipBounds, NinePatch*) const {
+ return kUnimplemented_FilterReturn;
+}
+
+SkMaskFilter::FilterReturn
+SkMaskFilter::filterRectsToNine(const SkRect[], int count, const SkMatrix&,
+ const SkIRect& clipBounds, NinePatch*) const {
+ return kUnimplemented_FilterReturn;
+}
+
+#if SK_SUPPORT_GPU
+bool SkMaskFilter::asFragmentProcessor(GrFragmentProcessor**, GrTexture*, const SkMatrix&) const {
+ return false;
+}
+
+bool SkMaskFilter::canFilterMaskGPU(const SkRRect& devRRect,
+ const SkIRect& clipBounds,
+ const SkMatrix& ctm,
+ SkRect* maskRect) const {
+ return false;
+}
+
+ bool SkMaskFilter::directFilterMaskGPU(GrTextureProvider* texProvider,
+ GrDrawContext* drawContext,
+ GrPaint* grp,
+ const GrClip&,
+ const SkMatrix& viewMatrix,
+ const SkStrokeRec& strokeRec,
+ const SkPath& path) const {
+ return false;
+}
+
+
+bool SkMaskFilter::directFilterRRectMaskGPU(GrContext*,
+ GrDrawContext* drawContext,
+ GrPaint* grp,
+ const GrClip&,
+ const SkMatrix& viewMatrix,
+ const SkStrokeRec& strokeRec,
+ const SkRRect& rrect,
+ const SkRRect& devRRect) const {
+ return false;
+}
+
+bool SkMaskFilter::filterMaskGPU(GrTexture* src,
+ const SkMatrix& ctm,
+ const SkIRect& maskRect,
+ GrTexture** result) const {
+ return false;
+}
+#endif
+
+void SkMaskFilter::computeFastBounds(const SkRect& src, SkRect* dst) const {
+ SkMask srcM, dstM;
+
+ srcM.fBounds = src.roundOut();
+ srcM.fRowBytes = 0;
+ srcM.fFormat = SkMask::kA8_Format;
+
+ SkIPoint margin; // ignored
+ if (this->filterMask(&dstM, srcM, SkMatrix::I(), &margin)) {
+ dst->set(dstM.fBounds);
+ } else {
+ dst->set(srcM.fBounds);
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkMaskGamma.cpp b/gfx/skia/skia/src/core/SkMaskGamma.cpp
new file mode 100644
index 000000000..bbe72c4e3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskGamma.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+
+#include "SkColor.h"
+#include "SkFloatingPoint.h"
+#include "SkMaskGamma.h"
+
+class SkLinearColorSpaceLuminance : public SkColorSpaceLuminance {
+ SkScalar toLuma(SkScalar SkDEBUGCODE(gamma), SkScalar luminance) const override {
+ SkASSERT(SK_Scalar1 == gamma);
+ return luminance;
+ }
+ SkScalar fromLuma(SkScalar SkDEBUGCODE(gamma), SkScalar luma) const override {
+ SkASSERT(SK_Scalar1 == gamma);
+ return luma;
+ }
+};
+
+class SkGammaColorSpaceLuminance : public SkColorSpaceLuminance {
+ SkScalar toLuma(SkScalar gamma, SkScalar luminance) const override {
+ return SkScalarPow(luminance, gamma);
+ }
+ SkScalar fromLuma(SkScalar gamma, SkScalar luma) const override {
+ return SkScalarPow(luma, SkScalarInvert(gamma));
+ }
+};
+
+class SkSRGBColorSpaceLuminance : public SkColorSpaceLuminance {
+ SkScalar toLuma(SkScalar SkDEBUGCODE(gamma), SkScalar luminance) const override {
+ SkASSERT(0 == gamma);
+ //The magic numbers are derived from the sRGB specification.
+ //See http://www.color.org/chardata/rgb/srgb.xalter .
+ if (luminance <= 0.04045f) {
+ return luminance / 12.92f;
+ }
+ return SkScalarPow((luminance + 0.055f) / 1.055f,
+ 2.4f);
+ }
+ SkScalar fromLuma(SkScalar SkDEBUGCODE(gamma), SkScalar luma) const override {
+ SkASSERT(0 == gamma);
+ //The magic numbers are derived from the sRGB specification.
+ //See http://www.color.org/chardata/rgb/srgb.xalter .
+ if (luma <= 0.0031308f) {
+ return luma * 12.92f;
+ }
+ return 1.055f * SkScalarPow(luma, SkScalarInvert(2.4f))
+ - 0.055f;
+ }
+};
+
+/*static*/ const SkColorSpaceLuminance& SkColorSpaceLuminance::Fetch(SkScalar gamma) {
+ static SkLinearColorSpaceLuminance gSkLinearColorSpaceLuminance;
+ static SkGammaColorSpaceLuminance gSkGammaColorSpaceLuminance;
+ static SkSRGBColorSpaceLuminance gSkSRGBColorSpaceLuminance;
+
+ if (0 == gamma) {
+ return gSkSRGBColorSpaceLuminance;
+ } else if (SK_Scalar1 == gamma) {
+ return gSkLinearColorSpaceLuminance;
+ } else {
+ return gSkGammaColorSpaceLuminance;
+ }
+}
+
+static float apply_contrast(float srca, float contrast) {
+ return srca + ((1.0f - srca) * contrast * srca);
+}
+
+void SkTMaskGamma_build_correcting_lut(uint8_t table[256], U8CPU srcI, SkScalar contrast,
+ const SkColorSpaceLuminance& srcConvert, SkScalar srcGamma,
+ const SkColorSpaceLuminance& dstConvert, SkScalar dstGamma) {
+ const float src = (float)srcI / 255.0f;
+ const float linSrc = srcConvert.toLuma(srcGamma, src);
+ //Guess at the dst. The perceptual inverse provides smaller visual
+ //discontinuities when slight changes to desaturated colors cause a channel
+ //to map to a different correcting lut with neighboring srcI.
+ //See https://code.google.com/p/chromium/issues/detail?id=141425#c59 .
+ const float dst = 1.0f - src;
+ const float linDst = dstConvert.toLuma(dstGamma, dst);
+
+ //Contrast value tapers off to 0 as the src luminance becomes white
+ const float adjustedContrast = SkScalarToFloat(contrast) * linDst;
+
+ //Remove discontinuity and instability when src is close to dst.
+ //The value 1/256 is arbitrary and appears to contain the instability.
+ if (fabs(src - dst) < (1.0f / 256.0f)) {
+ float ii = 0.0f;
+ for (int i = 0; i < 256; ++i, ii += 1.0f) {
+ float rawSrca = ii / 255.0f;
+ float srca = apply_contrast(rawSrca, adjustedContrast);
+ table[i] = SkToU8(sk_float_round2int(255.0f * srca));
+ }
+ } else {
+ // Avoid slow int to float conversion.
+ float ii = 0.0f;
+ for (int i = 0; i < 256; ++i, ii += 1.0f) {
+ // 'rawSrca += 1.0f / 255.0f' and even
+ // 'rawSrca = i * (1.0f / 255.0f)' can add up to more than 1.0f.
+ // When this happens the table[255] == 0x0 instead of 0xff.
+ // See http://code.google.com/p/chromium/issues/detail?id=146466
+ float rawSrca = ii / 255.0f;
+ float srca = apply_contrast(rawSrca, adjustedContrast);
+ SkASSERT(srca <= 1.0f);
+ float dsta = 1.0f - srca;
+
+ //Calculate the output we want.
+ float linOut = (linSrc * srca + dsta * linDst);
+ SkASSERT(linOut <= 1.0f);
+ float out = dstConvert.fromLuma(dstGamma, linOut);
+
+ //Undo what the blit blend will do.
+ float result = (out - dst) / (src - dst);
+ SkASSERT(sk_float_round2int(255.0f * result) <= 255);
+
+ table[i] = SkToU8(sk_float_round2int(255.0f * result));
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkMaskGamma.h b/gfx/skia/skia/src/core/SkMaskGamma.h
new file mode 100644
index 000000000..94219d42a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskGamma.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMaskGamma_DEFINED
+#define SkMaskGamma_DEFINED
+
+#include "SkTypes.h"
+#include "SkColor.h"
+#include "SkColorPriv.h"
+#include "SkRefCnt.h"
+
+/**
+ * SkColorSpaceLuminance is used to convert luminances to and from linear and
+ * perceptual color spaces.
+ *
+ * Luma is used to specify a linear luminance value [0.0, 1.0].
+ * Luminance is used to specify a luminance value in an arbitrary color space [0.0, 1.0].
+ */
+class SkColorSpaceLuminance : SkNoncopyable {
+public:
+ virtual ~SkColorSpaceLuminance() { }
+
+ /** Converts a color component luminance in the color space to a linear luma. */
+ virtual SkScalar toLuma(SkScalar gamma, SkScalar luminance) const = 0;
+ /** Converts a linear luma to a color component luminance in the color space. */
+ virtual SkScalar fromLuma(SkScalar gamma, SkScalar luma) const = 0;
+
+ /** Converts a color to a luminance value. */
+ static U8CPU computeLuminance(SkScalar gamma, SkColor c) {
+ const SkColorSpaceLuminance& luminance = Fetch(gamma);
+ SkScalar r = luminance.toLuma(gamma, SkIntToScalar(SkColorGetR(c)) / 255);
+ SkScalar g = luminance.toLuma(gamma, SkIntToScalar(SkColorGetG(c)) / 255);
+ SkScalar b = luminance.toLuma(gamma, SkIntToScalar(SkColorGetB(c)) / 255);
+ SkScalar luma = r * SK_LUM_COEFF_R +
+ g * SK_LUM_COEFF_G +
+ b * SK_LUM_COEFF_B;
+ SkASSERT(luma <= SK_Scalar1);
+ return SkScalarRoundToInt(luminance.fromLuma(gamma, luma) * 255);
+ }
+
+ /** Retrieves the SkColorSpaceLuminance for the given gamma. */
+ static const SkColorSpaceLuminance& Fetch(SkScalar gamma);
+};
+
+///@{
+/**
+ * Scales base <= 2^N-1 to 2^8-1
+ * @param N [1, 8] the number of bits used by base.
+ * @param base the number to be scaled to [0, 255].
+ */
+template<U8CPU N> static inline U8CPU sk_t_scale255(U8CPU base) {
+ base <<= (8 - N);
+ U8CPU lum = base;
+ for (unsigned int i = N; i < 8; i += N) {
+ lum |= base >> i;
+ }
+ return lum;
+}
+template<> /*static*/ inline U8CPU sk_t_scale255<1>(U8CPU base) {
+ return base * 0xFF;
+}
+template<> /*static*/ inline U8CPU sk_t_scale255<2>(U8CPU base) {
+ return base * 0x55;
+}
+template<> /*static*/ inline U8CPU sk_t_scale255<4>(U8CPU base) {
+ return base * 0x11;
+}
+template<> /*static*/ inline U8CPU sk_t_scale255<8>(U8CPU base) {
+ return base;
+}
+///@}
+
+template <int R_LUM_BITS, int G_LUM_BITS, int B_LUM_BITS> class SkTMaskPreBlend;
+
+void SkTMaskGamma_build_correcting_lut(uint8_t table[256], U8CPU srcI, SkScalar contrast,
+ const SkColorSpaceLuminance& srcConvert, SkScalar srcGamma,
+ const SkColorSpaceLuminance& dstConvert, SkScalar dstGamma);
+
+/**
+ * A regular mask contains linear alpha values. A gamma correcting mask
+ * contains non-linear alpha values in an attempt to create gamma correct blits
+ * in the presence of a gamma incorrect (linear) blend in the blitter.
+ *
+ * SkMaskGamma creates and maintains tables which convert linear alpha values
+ * to gamma correcting alpha values.
+ * @param R The number of luminance bits to use [1, 8] from the red channel.
+ * @param G The number of luminance bits to use [1, 8] from the green channel.
+ * @param B The number of luminance bits to use [1, 8] from the blue channel.
+ */
+template <int R_LUM_BITS, int G_LUM_BITS, int B_LUM_BITS> class SkTMaskGamma : public SkRefCnt {
+
+public:
+
+ /** Creates a linear SkTMaskGamma. */
+ SkTMaskGamma() : fIsLinear(true) { }
+
+ /**
+ * Creates tables to convert linear alpha values to gamma correcting alpha
+ * values.
+ *
+ * @param contrast A value in the range [0.0, 1.0] which indicates the
+ * amount of artificial contrast to add.
+ * @param paint The color space in which the paint color was chosen.
+ * @param device The color space of the target device.
+ */
+ SkTMaskGamma(SkScalar contrast, SkScalar paintGamma, SkScalar deviceGamma) : fIsLinear(false) {
+ const SkColorSpaceLuminance& paintConvert = SkColorSpaceLuminance::Fetch(paintGamma);
+ const SkColorSpaceLuminance& deviceConvert = SkColorSpaceLuminance::Fetch(deviceGamma);
+ for (U8CPU i = 0; i < (1 << MAX_LUM_BITS); ++i) {
+ U8CPU lum = sk_t_scale255<MAX_LUM_BITS>(i);
+ SkTMaskGamma_build_correcting_lut(fGammaTables[i], lum, contrast,
+ paintConvert, paintGamma,
+ deviceConvert, deviceGamma);
+ }
+ }
+
+ /** Given a color, returns the closest canonical color. */
+ static SkColor CanonicalColor(SkColor color) {
+ return SkColorSetRGB(
+ sk_t_scale255<R_LUM_BITS>(SkColorGetR(color) >> (8 - R_LUM_BITS)),
+ sk_t_scale255<G_LUM_BITS>(SkColorGetG(color) >> (8 - G_LUM_BITS)),
+ sk_t_scale255<B_LUM_BITS>(SkColorGetB(color) >> (8 - B_LUM_BITS)));
+ }
+
+ /** The type of the mask pre-blend which will be returned from preBlend(SkColor). */
+ typedef SkTMaskPreBlend<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS> PreBlend;
+
+ /**
+ * Provides access to the tables appropriate for converting linear alpha
+ * values into gamma correcting alpha values when drawing the given color
+ * through the mask. The destination color will be approximated.
+ */
+ PreBlend preBlend(SkColor color) const;
+
+ /**
+ * Get dimensions for the full table set, so it can be allocated as a block.
+ */
+ void getGammaTableDimensions(int* tableWidth, int* numTables) const {
+ *tableWidth = 256;
+ *numTables = (1 << MAX_LUM_BITS);
+ }
+
+ /**
+ * Provides direct access to the full table set, so it can be uploaded
+ * into a texture.
+ */
+ const uint8_t* getGammaTables() const {
+ return (const uint8_t*) fGammaTables;
+ }
+
+private:
+ static const int MAX_LUM_BITS =
+ B_LUM_BITS > (R_LUM_BITS > G_LUM_BITS ? R_LUM_BITS : G_LUM_BITS)
+ ? B_LUM_BITS : (R_LUM_BITS > G_LUM_BITS ? R_LUM_BITS : G_LUM_BITS);
+ uint8_t fGammaTables[1 << MAX_LUM_BITS][256];
+ bool fIsLinear;
+
+ typedef SkRefCnt INHERITED;
+};
+
+
+/**
+ * SkTMaskPreBlend is a tear-off of SkTMaskGamma. It provides the tables to
+ * convert a linear alpha value for a given channel to a gamma correcting alpha
+ * value for that channel. This class is immutable.
+ *
+ * If fR, fG, or fB is nullptr, all of them will be. This indicates that no mask
+ * pre blend should be applied. SkTMaskPreBlend::isApplicable() is provided as
+ * a convenience function to test for the absence of this case.
+ */
+template <int R_LUM_BITS, int G_LUM_BITS, int B_LUM_BITS> class SkTMaskPreBlend {
+private:
+ SkTMaskPreBlend(const SkTMaskGamma<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>* parent,
+ const uint8_t* r, const uint8_t* g, const uint8_t* b)
+ : fParent(SkSafeRef(parent)), fR(r), fG(g), fB(b) { }
+
+ SkAutoTUnref<const SkTMaskGamma<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS> > fParent;
+ friend class SkTMaskGamma<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>;
+public:
+ /** Creates a non applicable SkTMaskPreBlend. */
+ SkTMaskPreBlend() : fParent(), fR(nullptr), fG(nullptr), fB(nullptr) { }
+
+ /**
+ * This copy contructor exists for correctness, but should never be called
+ * when return value optimization is enabled.
+ */
+ SkTMaskPreBlend(const SkTMaskPreBlend<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>& that)
+ : fParent(SkSafeRef(that.fParent.get())), fR(that.fR), fG(that.fG), fB(that.fB) { }
+
+ ~SkTMaskPreBlend() { }
+
+ /** True if this PreBlend should be applied. When false, fR, fG, and fB are nullptr. */
+ bool isApplicable() const { return SkToBool(this->fG); }
+
+ const uint8_t* fR;
+ const uint8_t* fG;
+ const uint8_t* fB;
+};
+
+template <int R_LUM_BITS, int G_LUM_BITS, int B_LUM_BITS>
+SkTMaskPreBlend<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>
+SkTMaskGamma<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>::preBlend(SkColor color) const {
+ return fIsLinear ? SkTMaskPreBlend<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>()
+ : SkTMaskPreBlend<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>(this,
+ fGammaTables[SkColorGetR(color) >> (8 - MAX_LUM_BITS)],
+ fGammaTables[SkColorGetG(color) >> (8 - MAX_LUM_BITS)],
+ fGammaTables[SkColorGetB(color) >> (8 - MAX_LUM_BITS)]);
+}
+
+///@{
+/**
+ * If APPLY_LUT is false, returns component unchanged.
+ * If APPLY_LUT is true, returns lut[component].
+ * @param APPLY_LUT whether or not the look-up table should be applied to component.
+ * @component the initial component.
+ * @lut a look-up table which transforms the component.
+ */
+template<bool APPLY_LUT> static inline U8CPU sk_apply_lut_if(U8CPU component, const uint8_t*) {
+ return component;
+}
+template<> /*static*/ inline U8CPU sk_apply_lut_if<true>(U8CPU component, const uint8_t* lut) {
+ return lut[component];
+}
+///@}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMath.cpp b/gfx/skia/skia/src/core/SkMath.cpp
new file mode 100644
index 000000000..6eff790c8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMath.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMathPriv.h"
+#include "SkFixed.h"
+#include "SkFloatBits.h"
+#include "SkFloatingPoint.h"
+#include "SkScalar.h"
+
+#define sub_shift(zeros, x, n) \
+ zeros -= n; \
+ x >>= n
+
+int SkCLZ_portable(uint32_t x) {
+ if (x == 0) {
+ return 32;
+ }
+
+ int zeros = 31;
+ if (x & 0xFFFF0000) {
+ sub_shift(zeros, x, 16);
+ }
+ if (x & 0xFF00) {
+ sub_shift(zeros, x, 8);
+ }
+ if (x & 0xF0) {
+ sub_shift(zeros, x, 4);
+ }
+ if (x & 0xC) {
+ sub_shift(zeros, x, 2);
+ }
+ if (x & 0x2) {
+ sub_shift(zeros, x, 1);
+ }
+
+ return zeros;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* www.worldserver.com/turk/computergraphics/FixedSqrt.pdf
+*/
+int32_t SkSqrtBits(int32_t x, int count) {
+ SkASSERT(x >= 0 && count > 0 && (unsigned)count <= 30);
+
+ uint32_t root = 0;
+ uint32_t remHi = 0;
+ uint32_t remLo = x;
+
+ do {
+ root <<= 1;
+
+ remHi = (remHi<<2) | (remLo>>30);
+ remLo <<= 2;
+
+ uint32_t testDiv = (root << 1) + 1;
+ if (remHi >= testDiv) {
+ remHi -= testDiv;
+ root++;
+ }
+ } while (--count >= 0);
+
+ return root;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+float SkScalarSinCos(float radians, float* cosValue) {
+ float sinValue = sk_float_sin(radians);
+
+ if (cosValue) {
+ *cosValue = sk_float_cos(radians);
+ if (SkScalarNearlyZero(*cosValue)) {
+ *cosValue = 0;
+ }
+ }
+
+ if (SkScalarNearlyZero(sinValue)) {
+ sinValue = 0;
+ }
+ return sinValue;
+}
diff --git a/gfx/skia/skia/src/core/SkMathPriv.h b/gfx/skia/skia/src/core/SkMathPriv.h
new file mode 100644
index 000000000..fca09f3ff
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMathPriv.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMathPriv_DEFINED
+#define SkMathPriv_DEFINED
+
+#include "SkMath.h"
+
+#if defined(SK_BUILD_FOR_IOS) && (defined(SK_BUILD_FOR_ARM32) || defined(SK_BUILD_FOR_ARM64))
+// iOS on ARM starts processes with the Flush-To-Zero (FTZ) and
+// Denormals-Are-Zero (DAZ) bits in the fpscr register set.
+// Algorithms that rely on denormalized numbers need alternative implementations.
+// This can also be controlled in SSE with the MXCSR register,
+// x87 with FSTCW/FLDCW, and mips with FCSR. This should be detected at runtime,
+// or the library built one way or the other more generally (by the build).
+#define SK_CPU_FLUSH_TO_ZERO
+#endif
+
+/** Returns -1 if n < 0, else returns 0
+ */
+#define SkExtractSign(n) ((int32_t)(n) >> 31)
+
+/** If sign == -1, returns -n, else sign must be 0, and returns n.
+ Typically used in conjunction with SkExtractSign().
+ */
+static inline int32_t SkApplySign(int32_t n, int32_t sign) {
+ SkASSERT(sign == 0 || sign == -1);
+ return (n ^ sign) - sign;
+}
+
+/** Return x with the sign of y */
+static inline int32_t SkCopySign32(int32_t x, int32_t y) {
+ return SkApplySign(x, SkExtractSign(x ^ y));
+}
+
+/** Given a positive value and a positive max, return the value
+ pinned against max.
+ Note: only works as long as max - value doesn't wrap around
+ @return max if value >= max, else value
+ */
+static inline unsigned SkClampUMax(unsigned value, unsigned max) {
+ if (value > max) {
+ value = max;
+ }
+ return value;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** Return a*b/255, truncating away any fractional bits. Only valid if both
+ a and b are 0..255
+ */
+static inline U8CPU SkMulDiv255Trunc(U8CPU a, U8CPU b) {
+ SkASSERT((uint8_t)a == a);
+ SkASSERT((uint8_t)b == b);
+ unsigned prod = a*b + 1;
+ return (prod + (prod >> 8)) >> 8;
+}
+
+/** Return (a*b)/255, taking the ceiling of any fractional bits. Only valid if
+ both a and b are 0..255. The expected result equals (a * b + 254) / 255.
+ */
+static inline U8CPU SkMulDiv255Ceiling(U8CPU a, U8CPU b) {
+ SkASSERT((uint8_t)a == a);
+ SkASSERT((uint8_t)b == b);
+ unsigned prod = a*b + 255;
+ return (prod + (prod >> 8)) >> 8;
+}
+
+/** Just the rounding step in SkDiv255Round: round(value / 255)
+ */
+static inline unsigned SkDiv255Round(unsigned prod) {
+ prod += 128;
+ return (prod + (prod >> 8)) >> 8;
+}
+
+static inline float SkPinToUnitFloat(float x) {
+ return SkTMin(SkTMax(x, 0.0f), 1.0f);
+}
+
+/**
+ * Swap byte order of a 4-byte value, e.g. 0xaarrggbb -> 0xbbggrraa.
+ */
+#if defined(_MSC_VER)
+ #include <intrin.h>
+ static inline uint32_t SkBSwap32(uint32_t v) { return _byteswap_ulong(v); }
+#else
+ static inline uint32_t SkBSwap32(uint32_t v) { return __builtin_bswap32(v); }
+#endif
+
+//! Returns the number of leading zero bits (0...32)
+int SkCLZ_portable(uint32_t);
+
+#ifndef SkCLZ
+ #if defined(SK_BUILD_FOR_WIN32)
+ #include <intrin.h>
+
+ static inline int SkCLZ(uint32_t mask) {
+ if (mask) {
+ unsigned long index;
+ _BitScanReverse(&index, mask);
+ // Suppress this bogus /analyze warning. The check for non-zero
+ // guarantees that _BitScanReverse will succeed.
+#pragma warning(suppress : 6102) // Using 'index' from failed function call
+ return index ^ 0x1F;
+ } else {
+ return 32;
+ }
+ }
+ #elif defined(SK_CPU_ARM32) || defined(__GNUC__) || defined(__clang__)
+ static inline int SkCLZ(uint32_t mask) {
+ // __builtin_clz(0) is undefined, so we have to detect that case.
+ return mask ? __builtin_clz(mask) : 32;
+ }
+ #else
+ #define SkCLZ(x) SkCLZ_portable(x)
+ #endif
+#endif
+
+/**
+ * Returns the smallest power-of-2 that is >= the specified value. If value
+ * is already a power of 2, then it is returned unchanged. It is undefined
+ * if value is <= 0.
+ */
+static inline int SkNextPow2(int value) {
+ SkASSERT(value > 0);
+ return 1 << (32 - SkCLZ(value - 1));
+}
+
+/**
+ * Returns the log2 of the specified value, were that value to be rounded up
+ * to the next power of 2. It is undefined to pass 0. Examples:
+ * SkNextLog2(1) -> 0
+ * SkNextLog2(2) -> 1
+ * SkNextLog2(3) -> 2
+ * SkNextLog2(4) -> 2
+ * SkNextLog2(5) -> 3
+ */
+static inline int SkNextLog2(uint32_t value) {
+ SkASSERT(value != 0);
+ return 32 - SkCLZ(value - 1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Return the next power of 2 >= n.
+ */
+static inline uint32_t GrNextPow2(uint32_t n) {
+ return n ? (1 << (32 - SkCLZ(n - 1))) : 1;
+}
+
+static inline int GrNextPow2(int n) {
+ SkASSERT(n >= 0); // this impl only works for non-neg.
+ return n ? (1 << (32 - SkCLZ(n - 1))) : 1;
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkMatrix.cpp b/gfx/skia/skia/src/core/SkMatrix.cpp
new file mode 100644
index 000000000..a05ae0a17
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMatrix.cpp
@@ -0,0 +1,1908 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFloatBits.h"
+#include "SkMatrix.h"
+#include "SkNx.h"
+#include "SkPaint.h"
+#include "SkRSXform.h"
+#include "SkString.h"
+#include <stddef.h>
+
+static void normalize_perspective(SkScalar mat[9]) {
+ // If it was interesting to never store the last element, we could divide all 8 other
+ // elements here by the 9th, making it 1.0...
+ //
+ // When SkScalar was SkFixed, we would sometimes rescale the entire matrix to keep its
+ // component values from getting too large. This is not a concern when using floats/doubles,
+ // so we do nothing now.
+
+ // Disable this for now, but it could be enabled.
+#if 0
+ if (0 == mat[SkMatrix::kMPersp0] && 0 == mat[SkMatrix::kMPersp1]) {
+ SkScalar p2 = mat[SkMatrix::kMPersp2];
+ if (p2 != 0 && p2 != 1) {
+ double inv = 1.0 / p2;
+ for (int i = 0; i < 6; ++i) {
+ mat[i] = SkDoubleToScalar(mat[i] * inv);
+ }
+ mat[SkMatrix::kMPersp2] = 1;
+ }
+ }
+#endif
+}
+
+// In a few places, we performed the following
+// a * b + c * d + e
+// as
+// a * b + (c * d + e)
+//
+// sdot and scross are indended to capture these compound operations into a
+// function, with an eye toward considering upscaling the intermediates to
+// doubles for more precision (as we do in concat and invert).
+//
+// However, these few lines that performed the last add before the "dot", cause
+// tiny image differences, so we guard that change until we see the impact on
+// chrome's layouttests.
+//
+#define SK_LEGACY_MATRIX_MATH_ORDER
+
+static inline float SkDoubleToFloat(double x) {
+ return static_cast<float>(x);
+}
+
+/* [scale-x skew-x trans-x] [X] [X']
+ [skew-y scale-y trans-y] * [Y] = [Y']
+ [persp-0 persp-1 persp-2] [1] [1 ]
+*/
+
+void SkMatrix::reset() {
+ fMat[kMScaleX] = fMat[kMScaleY] = fMat[kMPersp2] = 1;
+ fMat[kMSkewX] = fMat[kMSkewY] =
+ fMat[kMTransX] = fMat[kMTransY] =
+ fMat[kMPersp0] = fMat[kMPersp1] = 0;
+ this->setTypeMask(kIdentity_Mask | kRectStaysRect_Mask);
+}
+
+void SkMatrix::set9(const SkScalar buffer[]) {
+ memcpy(fMat, buffer, 9 * sizeof(SkScalar));
+ normalize_perspective(fMat);
+ this->setTypeMask(kUnknown_Mask);
+}
+
+void SkMatrix::setAffine(const SkScalar buffer[]) {
+ fMat[kMScaleX] = buffer[kAScaleX];
+ fMat[kMSkewX] = buffer[kASkewX];
+ fMat[kMTransX] = buffer[kATransX];
+ fMat[kMSkewY] = buffer[kASkewY];
+ fMat[kMScaleY] = buffer[kAScaleY];
+ fMat[kMTransY] = buffer[kATransY];
+ fMat[kMPersp0] = 0;
+ fMat[kMPersp1] = 0;
+ fMat[kMPersp2] = 1;
+ this->setTypeMask(kUnknown_Mask);
+}
+
+// this guy aligns with the masks, so we can compute a mask from a varaible 0/1
+enum {
+ kTranslate_Shift,
+ kScale_Shift,
+ kAffine_Shift,
+ kPerspective_Shift,
+ kRectStaysRect_Shift
+};
+
+static const int32_t kScalar1Int = 0x3f800000;
+
+uint8_t SkMatrix::computePerspectiveTypeMask() const {
+ // Benchmarking suggests that replacing this set of SkScalarAs2sCompliment
+ // is a win, but replacing those below is not. We don't yet understand
+ // that result.
+ if (fMat[kMPersp0] != 0 || fMat[kMPersp1] != 0 || fMat[kMPersp2] != 1) {
+ // If this is a perspective transform, we return true for all other
+ // transform flags - this does not disable any optimizations, respects
+ // the rule that the type mask must be conservative, and speeds up
+ // type mask computation.
+ return SkToU8(kORableMasks);
+ }
+
+ return SkToU8(kOnlyPerspectiveValid_Mask | kUnknown_Mask);
+}
+
+uint8_t SkMatrix::computeTypeMask() const {
+ unsigned mask = 0;
+
+ if (fMat[kMPersp0] != 0 || fMat[kMPersp1] != 0 || fMat[kMPersp2] != 1) {
+ // Once it is determined that that this is a perspective transform,
+ // all other flags are moot as far as optimizations are concerned.
+ return SkToU8(kORableMasks);
+ }
+
+ if (fMat[kMTransX] != 0 || fMat[kMTransY] != 0) {
+ mask |= kTranslate_Mask;
+ }
+
+ int m00 = SkScalarAs2sCompliment(fMat[SkMatrix::kMScaleX]);
+ int m01 = SkScalarAs2sCompliment(fMat[SkMatrix::kMSkewX]);
+ int m10 = SkScalarAs2sCompliment(fMat[SkMatrix::kMSkewY]);
+ int m11 = SkScalarAs2sCompliment(fMat[SkMatrix::kMScaleY]);
+
+ if (m01 | m10) {
+ // The skew components may be scale-inducing, unless we are dealing
+ // with a pure rotation. Testing for a pure rotation is expensive,
+ // so we opt for being conservative by always setting the scale bit.
+ // along with affine.
+ // By doing this, we are also ensuring that matrices have the same
+ // type masks as their inverses.
+ mask |= kAffine_Mask | kScale_Mask;
+
+ // For rectStaysRect, in the affine case, we only need check that
+ // the primary diagonal is all zeros and that the secondary diagonal
+ // is all non-zero.
+
+ // map non-zero to 1
+ m01 = m01 != 0;
+ m10 = m10 != 0;
+
+ int dp0 = 0 == (m00 | m11) ; // true if both are 0
+ int ds1 = m01 & m10; // true if both are 1
+
+ mask |= (dp0 & ds1) << kRectStaysRect_Shift;
+ } else {
+ // Only test for scale explicitly if not affine, since affine sets the
+ // scale bit.
+ if ((m00 ^ kScalar1Int) | (m11 ^ kScalar1Int)) {
+ mask |= kScale_Mask;
+ }
+
+ // Not affine, therefore we already know secondary diagonal is
+ // all zeros, so we just need to check that primary diagonal is
+ // all non-zero.
+
+ // map non-zero to 1
+ m00 = m00 != 0;
+ m11 = m11 != 0;
+
+ // record if the (p)rimary diagonal is all non-zero
+ mask |= (m00 & m11) << kRectStaysRect_Shift;
+ }
+
+ return SkToU8(mask);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool operator==(const SkMatrix& a, const SkMatrix& b) {
+ const SkScalar* SK_RESTRICT ma = a.fMat;
+ const SkScalar* SK_RESTRICT mb = b.fMat;
+
+ return ma[0] == mb[0] && ma[1] == mb[1] && ma[2] == mb[2] &&
+ ma[3] == mb[3] && ma[4] == mb[4] && ma[5] == mb[5] &&
+ ma[6] == mb[6] && ma[7] == mb[7] && ma[8] == mb[8];
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// helper function to determine if upper-left 2x2 of matrix is degenerate
+static inline bool is_degenerate_2x2(SkScalar scaleX, SkScalar skewX,
+ SkScalar skewY, SkScalar scaleY) {
+ SkScalar perp_dot = scaleX*scaleY - skewX*skewY;
+ return SkScalarNearlyZero(perp_dot, SK_ScalarNearlyZero*SK_ScalarNearlyZero);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkMatrix::isSimilarity(SkScalar tol) const {
+ // if identity or translate matrix
+ TypeMask mask = this->getType();
+ if (mask <= kTranslate_Mask) {
+ return true;
+ }
+ if (mask & kPerspective_Mask) {
+ return false;
+ }
+
+ SkScalar mx = fMat[kMScaleX];
+ SkScalar my = fMat[kMScaleY];
+ // if no skew, can just compare scale factors
+ if (!(mask & kAffine_Mask)) {
+ return !SkScalarNearlyZero(mx) && SkScalarNearlyEqual(SkScalarAbs(mx), SkScalarAbs(my));
+ }
+ SkScalar sx = fMat[kMSkewX];
+ SkScalar sy = fMat[kMSkewY];
+
+ if (is_degenerate_2x2(mx, sx, sy, my)) {
+ return false;
+ }
+
+ // upper 2x2 is rotation/reflection + uniform scale if basis vectors
+ // are 90 degree rotations of each other
+ return (SkScalarNearlyEqual(mx, my, tol) && SkScalarNearlyEqual(sx, -sy, tol))
+ || (SkScalarNearlyEqual(mx, -my, tol) && SkScalarNearlyEqual(sx, sy, tol));
+}
+
+bool SkMatrix::preservesRightAngles(SkScalar tol) const {
+ TypeMask mask = this->getType();
+
+ if (mask <= kTranslate_Mask) {
+ // identity, translate and/or scale
+ return true;
+ }
+ if (mask & kPerspective_Mask) {
+ return false;
+ }
+
+ SkASSERT(mask & (kAffine_Mask | kScale_Mask));
+
+ SkScalar mx = fMat[kMScaleX];
+ SkScalar my = fMat[kMScaleY];
+ SkScalar sx = fMat[kMSkewX];
+ SkScalar sy = fMat[kMSkewY];
+
+ if (is_degenerate_2x2(mx, sx, sy, my)) {
+ return false;
+ }
+
+ // upper 2x2 is scale + rotation/reflection if basis vectors are orthogonal
+ SkVector vec[2];
+ vec[0].set(mx, sy);
+ vec[1].set(sx, my);
+
+ return SkScalarNearlyZero(vec[0].dot(vec[1]), SkScalarSquare(tol));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline SkScalar sdot(SkScalar a, SkScalar b, SkScalar c, SkScalar d) {
+ return a * b + c * d;
+}
+
+static inline SkScalar sdot(SkScalar a, SkScalar b, SkScalar c, SkScalar d,
+ SkScalar e, SkScalar f) {
+ return a * b + c * d + e * f;
+}
+
+static inline SkScalar scross(SkScalar a, SkScalar b, SkScalar c, SkScalar d) {
+ return a * b - c * d;
+}
+
+void SkMatrix::setTranslate(SkScalar dx, SkScalar dy) {
+ if (dx || dy) {
+ fMat[kMTransX] = dx;
+ fMat[kMTransY] = dy;
+
+ fMat[kMScaleX] = fMat[kMScaleY] = fMat[kMPersp2] = 1;
+ fMat[kMSkewX] = fMat[kMSkewY] =
+ fMat[kMPersp0] = fMat[kMPersp1] = 0;
+
+ this->setTypeMask(kTranslate_Mask | kRectStaysRect_Mask);
+ } else {
+ this->reset();
+ }
+}
+
+void SkMatrix::preTranslate(SkScalar dx, SkScalar dy) {
+ if (!dx && !dy) {
+ return;
+ }
+
+ if (fTypeMask <= kTranslate_Mask) {
+ fMat[kMTransX] += dx;
+ fMat[kMTransY] += dy;
+ this->setTypeMask((fMat[kMTransX] != 0 || fMat[kMTransY] != 0) ? kTranslate_Mask
+ : kIdentity_Mask);
+ } else if (this->hasPerspective()) {
+ SkMatrix m;
+ m.setTranslate(dx, dy);
+ this->preConcat(m);
+ } else {
+ fMat[kMTransX] += sdot(fMat[kMScaleX], dx, fMat[kMSkewX], dy);
+ fMat[kMTransY] += sdot(fMat[kMSkewY], dx, fMat[kMScaleY], dy);
+ this->setTypeMask(kUnknown_Mask | kOnlyPerspectiveValid_Mask);
+ }
+}
+
+void SkMatrix::postTranslate(SkScalar dx, SkScalar dy) {
+ if (!dx && !dy) {
+ return;
+ }
+
+ if (this->hasPerspective()) {
+ SkMatrix m;
+ m.setTranslate(dx, dy);
+ this->postConcat(m);
+ } else {
+ fMat[kMTransX] += dx;
+ fMat[kMTransY] += dy;
+ this->setTypeMask(kUnknown_Mask | kOnlyPerspectiveValid_Mask);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix::setScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py) {
+ if (1 == sx && 1 == sy) {
+ this->reset();
+ } else {
+ this->setScaleTranslate(sx, sy, px - sx * px, py - sy * py);
+ }
+}
+
+void SkMatrix::setScale(SkScalar sx, SkScalar sy) {
+ if (1 == sx && 1 == sy) {
+ this->reset();
+ } else {
+ fMat[kMScaleX] = sx;
+ fMat[kMScaleY] = sy;
+ fMat[kMPersp2] = 1;
+
+ fMat[kMTransX] = fMat[kMTransY] =
+ fMat[kMSkewX] = fMat[kMSkewY] =
+ fMat[kMPersp0] = fMat[kMPersp1] = 0;
+
+ this->setTypeMask(kScale_Mask | kRectStaysRect_Mask);
+ }
+}
+
+bool SkMatrix::setIDiv(int divx, int divy) {
+ if (!divx || !divy) {
+ return false;
+ }
+ this->setScale(SkScalarInvert(divx), SkScalarInvert(divy));
+ return true;
+}
+
+void SkMatrix::preScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py) {
+ if (1 == sx && 1 == sy) {
+ return;
+ }
+
+ SkMatrix m;
+ m.setScale(sx, sy, px, py);
+ this->preConcat(m);
+}
+
+void SkMatrix::preScale(SkScalar sx, SkScalar sy) {
+ if (1 == sx && 1 == sy) {
+ return;
+ }
+
+ // the assumption is that these multiplies are very cheap, and that
+ // a full concat and/or just computing the matrix type is more expensive.
+ // Also, the fixed-point case checks for overflow, but the float doesn't,
+ // so we can get away with these blind multiplies.
+
+ fMat[kMScaleX] *= sx;
+ fMat[kMSkewY] *= sx;
+ fMat[kMPersp0] *= sx;
+
+ fMat[kMSkewX] *= sy;
+ fMat[kMScaleY] *= sy;
+ fMat[kMPersp1] *= sy;
+
+ // Attempt to simplify our type when applying an inverse scale.
+ // TODO: The persp/affine preconditions are in place to keep the mask consistent with
+ // what computeTypeMask() would produce (persp/skew always implies kScale).
+ // We should investigate whether these flag dependencies are truly needed.
+ if (fMat[kMScaleX] == 1 && fMat[kMScaleY] == 1
+ && !(fTypeMask & (kPerspective_Mask | kAffine_Mask))) {
+ this->clearTypeMask(kScale_Mask);
+ } else {
+ this->orTypeMask(kScale_Mask);
+ }
+}
+
+void SkMatrix::postScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py) {
+ if (1 == sx && 1 == sy) {
+ return;
+ }
+ SkMatrix m;
+ m.setScale(sx, sy, px, py);
+ this->postConcat(m);
+}
+
+void SkMatrix::postScale(SkScalar sx, SkScalar sy) {
+ if (1 == sx && 1 == sy) {
+ return;
+ }
+ SkMatrix m;
+ m.setScale(sx, sy);
+ this->postConcat(m);
+}
+
+// this guy perhaps can go away, if we have a fract/high-precision way to
+// scale matrices
+bool SkMatrix::postIDiv(int divx, int divy) {
+ if (divx == 0 || divy == 0) {
+ return false;
+ }
+
+ const float invX = 1.f / divx;
+ const float invY = 1.f / divy;
+
+ fMat[kMScaleX] *= invX;
+ fMat[kMSkewX] *= invX;
+ fMat[kMTransX] *= invX;
+
+ fMat[kMScaleY] *= invY;
+ fMat[kMSkewY] *= invY;
+ fMat[kMTransY] *= invY;
+
+ this->setTypeMask(kUnknown_Mask);
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix::setSinCos(SkScalar sinV, SkScalar cosV, SkScalar px, SkScalar py) {
+ const SkScalar oneMinusCosV = 1 - cosV;
+
+ fMat[kMScaleX] = cosV;
+ fMat[kMSkewX] = -sinV;
+ fMat[kMTransX] = sdot(sinV, py, oneMinusCosV, px);
+
+ fMat[kMSkewY] = sinV;
+ fMat[kMScaleY] = cosV;
+ fMat[kMTransY] = sdot(-sinV, px, oneMinusCosV, py);
+
+ fMat[kMPersp0] = fMat[kMPersp1] = 0;
+ fMat[kMPersp2] = 1;
+
+ this->setTypeMask(kUnknown_Mask | kOnlyPerspectiveValid_Mask);
+}
+
+SkMatrix& SkMatrix::setRSXform(const SkRSXform& xform) {
+ fMat[kMScaleX] = xform.fSCos;
+ fMat[kMSkewX] = -xform.fSSin;
+ fMat[kMTransX] = xform.fTx;
+
+ fMat[kMSkewY] = xform.fSSin;
+ fMat[kMScaleY] = xform.fSCos;
+ fMat[kMTransY] = xform.fTy;
+
+ fMat[kMPersp0] = fMat[kMPersp1] = 0;
+ fMat[kMPersp2] = 1;
+
+ this->setTypeMask(kUnknown_Mask | kOnlyPerspectiveValid_Mask);
+ return *this;
+}
+
+void SkMatrix::setSinCos(SkScalar sinV, SkScalar cosV) {
+ fMat[kMScaleX] = cosV;
+ fMat[kMSkewX] = -sinV;
+ fMat[kMTransX] = 0;
+
+ fMat[kMSkewY] = sinV;
+ fMat[kMScaleY] = cosV;
+ fMat[kMTransY] = 0;
+
+ fMat[kMPersp0] = fMat[kMPersp1] = 0;
+ fMat[kMPersp2] = 1;
+
+ this->setTypeMask(kUnknown_Mask | kOnlyPerspectiveValid_Mask);
+}
+
+void SkMatrix::setRotate(SkScalar degrees, SkScalar px, SkScalar py) {
+ SkScalar sinV, cosV;
+ sinV = SkScalarSinCos(SkDegreesToRadians(degrees), &cosV);
+ this->setSinCos(sinV, cosV, px, py);
+}
+
+void SkMatrix::setRotate(SkScalar degrees) {
+ SkScalar sinV, cosV;
+ sinV = SkScalarSinCos(SkDegreesToRadians(degrees), &cosV);
+ this->setSinCos(sinV, cosV);
+}
+
+void SkMatrix::preRotate(SkScalar degrees, SkScalar px, SkScalar py) {
+ SkMatrix m;
+ m.setRotate(degrees, px, py);
+ this->preConcat(m);
+}
+
+void SkMatrix::preRotate(SkScalar degrees) {
+ SkMatrix m;
+ m.setRotate(degrees);
+ this->preConcat(m);
+}
+
+void SkMatrix::postRotate(SkScalar degrees, SkScalar px, SkScalar py) {
+ SkMatrix m;
+ m.setRotate(degrees, px, py);
+ this->postConcat(m);
+}
+
+void SkMatrix::postRotate(SkScalar degrees) {
+ SkMatrix m;
+ m.setRotate(degrees);
+ this->postConcat(m);
+}
+
+////////////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix::setSkew(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py) {
+ fMat[kMScaleX] = 1;
+ fMat[kMSkewX] = sx;
+ fMat[kMTransX] = -sx * py;
+
+ fMat[kMSkewY] = sy;
+ fMat[kMScaleY] = 1;
+ fMat[kMTransY] = -sy * px;
+
+ fMat[kMPersp0] = fMat[kMPersp1] = 0;
+ fMat[kMPersp2] = 1;
+
+ this->setTypeMask(kUnknown_Mask | kOnlyPerspectiveValid_Mask);
+}
+
+void SkMatrix::setSkew(SkScalar sx, SkScalar sy) {
+ fMat[kMScaleX] = 1;
+ fMat[kMSkewX] = sx;
+ fMat[kMTransX] = 0;
+
+ fMat[kMSkewY] = sy;
+ fMat[kMScaleY] = 1;
+ fMat[kMTransY] = 0;
+
+ fMat[kMPersp0] = fMat[kMPersp1] = 0;
+ fMat[kMPersp2] = 1;
+
+ this->setTypeMask(kUnknown_Mask | kOnlyPerspectiveValid_Mask);
+}
+
+void SkMatrix::preSkew(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py) {
+ SkMatrix m;
+ m.setSkew(sx, sy, px, py);
+ this->preConcat(m);
+}
+
+void SkMatrix::preSkew(SkScalar sx, SkScalar sy) {
+ SkMatrix m;
+ m.setSkew(sx, sy);
+ this->preConcat(m);
+}
+
+void SkMatrix::postSkew(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py) {
+ SkMatrix m;
+ m.setSkew(sx, sy, px, py);
+ this->postConcat(m);
+}
+
+void SkMatrix::postSkew(SkScalar sx, SkScalar sy) {
+ SkMatrix m;
+ m.setSkew(sx, sy);
+ this->postConcat(m);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkMatrix::setRectToRect(const SkRect& src, const SkRect& dst, ScaleToFit align) {
+ if (src.isEmpty()) {
+ this->reset();
+ return false;
+ }
+
+ if (dst.isEmpty()) {
+ sk_bzero(fMat, 8 * sizeof(SkScalar));
+ fMat[kMPersp2] = 1;
+ this->setTypeMask(kScale_Mask | kRectStaysRect_Mask);
+ } else {
+ SkScalar tx, sx = dst.width() / src.width();
+ SkScalar ty, sy = dst.height() / src.height();
+ bool xLarger = false;
+
+ if (align != kFill_ScaleToFit) {
+ if (sx > sy) {
+ xLarger = true;
+ sx = sy;
+ } else {
+ sy = sx;
+ }
+ }
+
+ tx = dst.fLeft - src.fLeft * sx;
+ ty = dst.fTop - src.fTop * sy;
+ if (align == kCenter_ScaleToFit || align == kEnd_ScaleToFit) {
+ SkScalar diff;
+
+ if (xLarger) {
+ diff = dst.width() - src.width() * sy;
+ } else {
+ diff = dst.height() - src.height() * sy;
+ }
+
+ if (align == kCenter_ScaleToFit) {
+ diff = SkScalarHalf(diff);
+ }
+
+ if (xLarger) {
+ tx += diff;
+ } else {
+ ty += diff;
+ }
+ }
+
+ this->setScaleTranslate(sx, sy, tx, ty);
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline float muladdmul(float a, float b, float c, float d) {
+ return SkDoubleToFloat((double)a * b + (double)c * d);
+}
+
+static inline float rowcol3(const float row[], const float col[]) {
+ return row[0] * col[0] + row[1] * col[3] + row[2] * col[6];
+}
+
+static bool only_scale_and_translate(unsigned mask) {
+ return 0 == (mask & (SkMatrix::kAffine_Mask | SkMatrix::kPerspective_Mask));
+}
+
+void SkMatrix::setConcat(const SkMatrix& a, const SkMatrix& b) {
+ TypeMask aType = a.getType();
+ TypeMask bType = b.getType();
+
+ if (a.isTriviallyIdentity()) {
+ *this = b;
+ } else if (b.isTriviallyIdentity()) {
+ *this = a;
+ } else if (only_scale_and_translate(aType | bType)) {
+ this->setScaleTranslate(a.fMat[kMScaleX] * b.fMat[kMScaleX],
+ a.fMat[kMScaleY] * b.fMat[kMScaleY],
+ a.fMat[kMScaleX] * b.fMat[kMTransX] + a.fMat[kMTransX],
+ a.fMat[kMScaleY] * b.fMat[kMTransY] + a.fMat[kMTransY]);
+ } else {
+ SkMatrix tmp;
+
+ if ((aType | bType) & kPerspective_Mask) {
+ tmp.fMat[kMScaleX] = rowcol3(&a.fMat[0], &b.fMat[0]);
+ tmp.fMat[kMSkewX] = rowcol3(&a.fMat[0], &b.fMat[1]);
+ tmp.fMat[kMTransX] = rowcol3(&a.fMat[0], &b.fMat[2]);
+ tmp.fMat[kMSkewY] = rowcol3(&a.fMat[3], &b.fMat[0]);
+ tmp.fMat[kMScaleY] = rowcol3(&a.fMat[3], &b.fMat[1]);
+ tmp.fMat[kMTransY] = rowcol3(&a.fMat[3], &b.fMat[2]);
+ tmp.fMat[kMPersp0] = rowcol3(&a.fMat[6], &b.fMat[0]);
+ tmp.fMat[kMPersp1] = rowcol3(&a.fMat[6], &b.fMat[1]);
+ tmp.fMat[kMPersp2] = rowcol3(&a.fMat[6], &b.fMat[2]);
+
+ normalize_perspective(tmp.fMat);
+ tmp.setTypeMask(kUnknown_Mask);
+ } else {
+ tmp.fMat[kMScaleX] = muladdmul(a.fMat[kMScaleX],
+ b.fMat[kMScaleX],
+ a.fMat[kMSkewX],
+ b.fMat[kMSkewY]);
+
+ tmp.fMat[kMSkewX] = muladdmul(a.fMat[kMScaleX],
+ b.fMat[kMSkewX],
+ a.fMat[kMSkewX],
+ b.fMat[kMScaleY]);
+
+ tmp.fMat[kMTransX] = muladdmul(a.fMat[kMScaleX],
+ b.fMat[kMTransX],
+ a.fMat[kMSkewX],
+ b.fMat[kMTransY]) + a.fMat[kMTransX];
+
+ tmp.fMat[kMSkewY] = muladdmul(a.fMat[kMSkewY],
+ b.fMat[kMScaleX],
+ a.fMat[kMScaleY],
+ b.fMat[kMSkewY]);
+
+ tmp.fMat[kMScaleY] = muladdmul(a.fMat[kMSkewY],
+ b.fMat[kMSkewX],
+ a.fMat[kMScaleY],
+ b.fMat[kMScaleY]);
+
+ tmp.fMat[kMTransY] = muladdmul(a.fMat[kMSkewY],
+ b.fMat[kMTransX],
+ a.fMat[kMScaleY],
+ b.fMat[kMTransY]) + a.fMat[kMTransY];
+
+ tmp.fMat[kMPersp0] = 0;
+ tmp.fMat[kMPersp1] = 0;
+ tmp.fMat[kMPersp2] = 1;
+ //SkDebugf("Concat mat non-persp type: %d\n", tmp.getType());
+ //SkASSERT(!(tmp.getType() & kPerspective_Mask));
+ tmp.setTypeMask(kUnknown_Mask | kOnlyPerspectiveValid_Mask);
+ }
+ *this = tmp;
+ }
+}
+
+void SkMatrix::preConcat(const SkMatrix& mat) {
+ // check for identity first, so we don't do a needless copy of ourselves
+ // to ourselves inside setConcat()
+ if(!mat.isIdentity()) {
+ this->setConcat(*this, mat);
+ }
+}
+
+void SkMatrix::postConcat(const SkMatrix& mat) {
+ // check for identity first, so we don't do a needless copy of ourselves
+ // to ourselves inside setConcat()
+ if (!mat.isIdentity()) {
+ this->setConcat(mat, *this);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* Matrix inversion is very expensive, but also the place where keeping
+ precision may be most important (here and matrix concat). Hence to avoid
+ bitmap blitting artifacts when walking the inverse, we use doubles for
+ the intermediate math, even though we know that is more expensive.
+ */
+
+static inline SkScalar scross_dscale(SkScalar a, SkScalar b,
+ SkScalar c, SkScalar d, double scale) {
+ return SkDoubleToScalar(scross(a, b, c, d) * scale);
+}
+
+static inline double dcross(double a, double b, double c, double d) {
+ return a * b - c * d;
+}
+
+static inline SkScalar dcross_dscale(double a, double b,
+ double c, double d, double scale) {
+ return SkDoubleToScalar(dcross(a, b, c, d) * scale);
+}
+
+static double sk_inv_determinant(const float mat[9], int isPerspective) {
+ double det;
+
+ if (isPerspective) {
+ det = mat[SkMatrix::kMScaleX] *
+ dcross(mat[SkMatrix::kMScaleY], mat[SkMatrix::kMPersp2],
+ mat[SkMatrix::kMTransY], mat[SkMatrix::kMPersp1])
+ +
+ mat[SkMatrix::kMSkewX] *
+ dcross(mat[SkMatrix::kMTransY], mat[SkMatrix::kMPersp0],
+ mat[SkMatrix::kMSkewY], mat[SkMatrix::kMPersp2])
+ +
+ mat[SkMatrix::kMTransX] *
+ dcross(mat[SkMatrix::kMSkewY], mat[SkMatrix::kMPersp1],
+ mat[SkMatrix::kMScaleY], mat[SkMatrix::kMPersp0]);
+ } else {
+ det = dcross(mat[SkMatrix::kMScaleX], mat[SkMatrix::kMScaleY],
+ mat[SkMatrix::kMSkewX], mat[SkMatrix::kMSkewY]);
+ }
+
+ // Since the determinant is on the order of the cube of the matrix members,
+ // compare to the cube of the default nearly-zero constant (although an
+ // estimate of the condition number would be better if it wasn't so expensive).
+ if (SkScalarNearlyZero((float)det, SK_ScalarNearlyZero * SK_ScalarNearlyZero * SK_ScalarNearlyZero)) {
+ return 0;
+ }
+ return 1.0 / det;
+}
+
+void SkMatrix::SetAffineIdentity(SkScalar affine[6]) {
+ affine[kAScaleX] = 1;
+ affine[kASkewY] = 0;
+ affine[kASkewX] = 0;
+ affine[kAScaleY] = 1;
+ affine[kATransX] = 0;
+ affine[kATransY] = 0;
+}
+
+bool SkMatrix::asAffine(SkScalar affine[6]) const {
+ if (this->hasPerspective()) {
+ return false;
+ }
+ if (affine) {
+ affine[kAScaleX] = this->fMat[kMScaleX];
+ affine[kASkewY] = this->fMat[kMSkewY];
+ affine[kASkewX] = this->fMat[kMSkewX];
+ affine[kAScaleY] = this->fMat[kMScaleY];
+ affine[kATransX] = this->fMat[kMTransX];
+ affine[kATransY] = this->fMat[kMTransY];
+ }
+ return true;
+}
+
+void SkMatrix::ComputeInv(SkScalar dst[9], const SkScalar src[9], double invDet, bool isPersp) {
+ SkASSERT(src != dst);
+ SkASSERT(src && dst);
+
+ if (isPersp) {
+ dst[kMScaleX] = scross_dscale(src[kMScaleY], src[kMPersp2], src[kMTransY], src[kMPersp1], invDet);
+ dst[kMSkewX] = scross_dscale(src[kMTransX], src[kMPersp1], src[kMSkewX], src[kMPersp2], invDet);
+ dst[kMTransX] = scross_dscale(src[kMSkewX], src[kMTransY], src[kMTransX], src[kMScaleY], invDet);
+
+ dst[kMSkewY] = scross_dscale(src[kMTransY], src[kMPersp0], src[kMSkewY], src[kMPersp2], invDet);
+ dst[kMScaleY] = scross_dscale(src[kMScaleX], src[kMPersp2], src[kMTransX], src[kMPersp0], invDet);
+ dst[kMTransY] = scross_dscale(src[kMTransX], src[kMSkewY], src[kMScaleX], src[kMTransY], invDet);
+
+ dst[kMPersp0] = scross_dscale(src[kMSkewY], src[kMPersp1], src[kMScaleY], src[kMPersp0], invDet);
+ dst[kMPersp1] = scross_dscale(src[kMSkewX], src[kMPersp0], src[kMScaleX], src[kMPersp1], invDet);
+ dst[kMPersp2] = scross_dscale(src[kMScaleX], src[kMScaleY], src[kMSkewX], src[kMSkewY], invDet);
+ } else { // not perspective
+ dst[kMScaleX] = SkDoubleToScalar(src[kMScaleY] * invDet);
+ dst[kMSkewX] = SkDoubleToScalar(-src[kMSkewX] * invDet);
+ dst[kMTransX] = dcross_dscale(src[kMSkewX], src[kMTransY], src[kMScaleY], src[kMTransX], invDet);
+
+ dst[kMSkewY] = SkDoubleToScalar(-src[kMSkewY] * invDet);
+ dst[kMScaleY] = SkDoubleToScalar(src[kMScaleX] * invDet);
+ dst[kMTransY] = dcross_dscale(src[kMSkewY], src[kMTransX], src[kMScaleX], src[kMTransY], invDet);
+
+ dst[kMPersp0] = 0;
+ dst[kMPersp1] = 0;
+ dst[kMPersp2] = 1;
+ }
+}
+
+bool SkMatrix::invertNonIdentity(SkMatrix* inv) const {
+ SkASSERT(!this->isIdentity());
+
+ TypeMask mask = this->getType();
+
+ if (0 == (mask & ~(kScale_Mask | kTranslate_Mask))) {
+ bool invertible = true;
+ if (inv) {
+ if (mask & kScale_Mask) {
+ SkScalar invX = fMat[kMScaleX];
+ SkScalar invY = fMat[kMScaleY];
+ if (0 == invX || 0 == invY) {
+ return false;
+ }
+ invX = SkScalarInvert(invX);
+ invY = SkScalarInvert(invY);
+
+ // Must be careful when writing to inv, since it may be the
+ // same memory as this.
+
+ inv->fMat[kMSkewX] = inv->fMat[kMSkewY] =
+ inv->fMat[kMPersp0] = inv->fMat[kMPersp1] = 0;
+
+ inv->fMat[kMScaleX] = invX;
+ inv->fMat[kMScaleY] = invY;
+ inv->fMat[kMPersp2] = 1;
+ inv->fMat[kMTransX] = -fMat[kMTransX] * invX;
+ inv->fMat[kMTransY] = -fMat[kMTransY] * invY;
+
+ inv->setTypeMask(mask | kRectStaysRect_Mask);
+ } else {
+ // translate only
+ inv->setTranslate(-fMat[kMTransX], -fMat[kMTransY]);
+ }
+ } else { // inv is nullptr, just check if we're invertible
+ if (!fMat[kMScaleX] || !fMat[kMScaleY]) {
+ invertible = false;
+ }
+ }
+ return invertible;
+ }
+
+ int isPersp = mask & kPerspective_Mask;
+ double invDet = sk_inv_determinant(fMat, isPersp);
+
+ if (invDet == 0) { // underflow
+ return false;
+ }
+
+ bool applyingInPlace = (inv == this);
+
+ SkMatrix* tmp = inv;
+
+ SkMatrix storage;
+ if (applyingInPlace || nullptr == tmp) {
+ tmp = &storage; // we either need to avoid trampling memory or have no memory
+ }
+
+ ComputeInv(tmp->fMat, fMat, invDet, isPersp);
+ if (!tmp->isFinite()) {
+ return false;
+ }
+
+ tmp->setTypeMask(fTypeMask);
+
+ if (applyingInPlace) {
+ *inv = storage; // need to copy answer back
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix::Identity_pts(const SkMatrix& m, SkPoint dst[], const SkPoint src[], int count) {
+ SkASSERT(m.getType() == 0);
+
+ if (dst != src && count > 0) {
+ memcpy(dst, src, count * sizeof(SkPoint));
+ }
+}
+
+void SkMatrix::Trans_pts(const SkMatrix& m, SkPoint dst[], const SkPoint src[], int count) {
+ SkASSERT(m.getType() <= SkMatrix::kTranslate_Mask);
+ if (count > 0) {
+ SkScalar tx = m.getTranslateX();
+ SkScalar ty = m.getTranslateY();
+ if (count & 1) {
+ dst->fX = src->fX + tx;
+ dst->fY = src->fY + ty;
+ src += 1;
+ dst += 1;
+ }
+ Sk4s trans4(tx, ty, tx, ty);
+ count >>= 1;
+ if (count & 1) {
+ (Sk4s::Load(src) + trans4).store(dst);
+ src += 2;
+ dst += 2;
+ }
+ count >>= 1;
+ for (int i = 0; i < count; ++i) {
+ (Sk4s::Load(src+0) + trans4).store(dst+0);
+ (Sk4s::Load(src+2) + trans4).store(dst+2);
+ src += 4;
+ dst += 4;
+ }
+ }
+}
+
+void SkMatrix::Scale_pts(const SkMatrix& m, SkPoint dst[], const SkPoint src[], int count) {
+ SkASSERT(m.getType() <= (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask));
+ if (count > 0) {
+ SkScalar tx = m.getTranslateX();
+ SkScalar ty = m.getTranslateY();
+ SkScalar sx = m.getScaleX();
+ SkScalar sy = m.getScaleY();
+ if (count & 1) {
+ dst->fX = src->fX * sx + tx;
+ dst->fY = src->fY * sy + ty;
+ src += 1;
+ dst += 1;
+ }
+ Sk4s trans4(tx, ty, tx, ty);
+ Sk4s scale4(sx, sy, sx, sy);
+ count >>= 1;
+ if (count & 1) {
+ (Sk4s::Load(src) * scale4 + trans4).store(dst);
+ src += 2;
+ dst += 2;
+ }
+ count >>= 1;
+ for (int i = 0; i < count; ++i) {
+ (Sk4s::Load(src+0) * scale4 + trans4).store(dst+0);
+ (Sk4s::Load(src+2) * scale4 + trans4).store(dst+2);
+ src += 4;
+ dst += 4;
+ }
+ }
+}
+
+void SkMatrix::Persp_pts(const SkMatrix& m, SkPoint dst[],
+ const SkPoint src[], int count) {
+ SkASSERT(m.hasPerspective());
+
+ if (count > 0) {
+ do {
+ SkScalar sy = src->fY;
+ SkScalar sx = src->fX;
+ src += 1;
+
+ SkScalar x = sdot(sx, m.fMat[kMScaleX], sy, m.fMat[kMSkewX]) + m.fMat[kMTransX];
+ SkScalar y = sdot(sx, m.fMat[kMSkewY], sy, m.fMat[kMScaleY]) + m.fMat[kMTransY];
+#ifdef SK_LEGACY_MATRIX_MATH_ORDER
+ SkScalar z = sx * m.fMat[kMPersp0] + (sy * m.fMat[kMPersp1] + m.fMat[kMPersp2]);
+#else
+ SkScalar z = sdot(sx, m.fMat[kMPersp0], sy, m.fMat[kMPersp1]) + m.fMat[kMPersp2];
+#endif
+ if (z) {
+ z = SkScalarFastInvert(z);
+ }
+
+ dst->fY = y * z;
+ dst->fX = x * z;
+ dst += 1;
+ } while (--count);
+ }
+}
+
+void SkMatrix::Affine_vpts(const SkMatrix& m, SkPoint dst[], const SkPoint src[], int count) {
+ SkASSERT(m.getType() != SkMatrix::kPerspective_Mask);
+ if (count > 0) {
+ SkScalar tx = m.getTranslateX();
+ SkScalar ty = m.getTranslateY();
+ SkScalar sx = m.getScaleX();
+ SkScalar sy = m.getScaleY();
+ SkScalar kx = m.getSkewX();
+ SkScalar ky = m.getSkewY();
+ if (count & 1) {
+ dst->set(src->fX * sx + src->fY * kx + tx,
+ src->fX * ky + src->fY * sy + ty);
+ src += 1;
+ dst += 1;
+ }
+ Sk4s trans4(tx, ty, tx, ty);
+ Sk4s scale4(sx, sy, sx, sy);
+ Sk4s skew4(kx, ky, kx, ky); // applied to swizzle of src4
+ count >>= 1;
+ for (int i = 0; i < count; ++i) {
+ Sk4s src4 = Sk4s::Load(src);
+ Sk4s swz4 = SkNx_shuffle<1,0,3,2>(src4); // y0 x0, y1 x1
+ (src4 * scale4 + swz4 * skew4 + trans4).store(dst);
+ src += 2;
+ dst += 2;
+ }
+ }
+}
+
+const SkMatrix::MapPtsProc SkMatrix::gMapPtsProcs[] = {
+ SkMatrix::Identity_pts, SkMatrix::Trans_pts,
+ SkMatrix::Scale_pts, SkMatrix::Scale_pts,
+ SkMatrix::Affine_vpts, SkMatrix::Affine_vpts,
+ SkMatrix::Affine_vpts, SkMatrix::Affine_vpts,
+ // repeat the persp proc 8 times
+ SkMatrix::Persp_pts, SkMatrix::Persp_pts,
+ SkMatrix::Persp_pts, SkMatrix::Persp_pts,
+ SkMatrix::Persp_pts, SkMatrix::Persp_pts,
+ SkMatrix::Persp_pts, SkMatrix::Persp_pts
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix::mapHomogeneousPoints(SkScalar dst[], const SkScalar src[], int count) const {
+ SkASSERT((dst && src && count > 0) || 0 == count);
+ // no partial overlap
+ SkASSERT(src == dst || &dst[3*count] <= &src[0] || &src[3*count] <= &dst[0]);
+
+ if (count > 0) {
+ if (this->isIdentity()) {
+ memcpy(dst, src, 3*count*sizeof(SkScalar));
+ return;
+ }
+ do {
+ SkScalar sx = src[0];
+ SkScalar sy = src[1];
+ SkScalar sw = src[2];
+ src += 3;
+
+ SkScalar x = sdot(sx, fMat[kMScaleX], sy, fMat[kMSkewX], sw, fMat[kMTransX]);
+ SkScalar y = sdot(sx, fMat[kMSkewY], sy, fMat[kMScaleY], sw, fMat[kMTransY]);
+ SkScalar w = sdot(sx, fMat[kMPersp0], sy, fMat[kMPersp1], sw, fMat[kMPersp2]);
+
+ dst[0] = x;
+ dst[1] = y;
+ dst[2] = w;
+ dst += 3;
+ } while (--count);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix::mapVectors(SkPoint dst[], const SkPoint src[], int count) const {
+ if (this->hasPerspective()) {
+ SkPoint origin;
+
+ MapXYProc proc = this->getMapXYProc();
+ proc(*this, 0, 0, &origin);
+
+ for (int i = count - 1; i >= 0; --i) {
+ SkPoint tmp;
+
+ proc(*this, src[i].fX, src[i].fY, &tmp);
+ dst[i].set(tmp.fX - origin.fX, tmp.fY - origin.fY);
+ }
+ } else {
+ SkMatrix tmp = *this;
+
+ tmp.fMat[kMTransX] = tmp.fMat[kMTransY] = 0;
+ tmp.clearTypeMask(kTranslate_Mask);
+ tmp.mapPoints(dst, src, count);
+ }
+}
+
+static Sk4f sort_as_rect(const Sk4f& ltrb) {
+ Sk4f rblt(ltrb[2], ltrb[3], ltrb[0], ltrb[1]);
+ Sk4f min = Sk4f::Min(ltrb, rblt);
+ Sk4f max = Sk4f::Max(ltrb, rblt);
+ // We can extract either pair [0,1] or [2,3] from min and max and be correct, but on
+ // ARM this sequence generates the fastest (a single instruction).
+ return Sk4f(min[2], min[3], max[0], max[1]);
+}
+
+void SkMatrix::mapRectScaleTranslate(SkRect* dst, const SkRect& src) const {
+ SkASSERT(dst);
+ SkASSERT(this->isScaleTranslate());
+
+ SkScalar sx = fMat[kMScaleX];
+ SkScalar sy = fMat[kMScaleY];
+ SkScalar tx = fMat[kMTransX];
+ SkScalar ty = fMat[kMTransY];
+ Sk4f scale(sx, sy, sx, sy);
+ Sk4f trans(tx, ty, tx, ty);
+ sort_as_rect(Sk4f::Load(&src.fLeft) * scale + trans).store(&dst->fLeft);
+}
+
+bool SkMatrix::mapRect(SkRect* dst, const SkRect& src) const {
+ SkASSERT(dst);
+
+ if (this->getType() <= kTranslate_Mask) {
+ SkScalar tx = fMat[kMTransX];
+ SkScalar ty = fMat[kMTransY];
+ Sk4f trans(tx, ty, tx, ty);
+ sort_as_rect(Sk4f::Load(&src.fLeft) + trans).store(&dst->fLeft);
+ return true;
+ }
+ if (this->isScaleTranslate()) {
+ this->mapRectScaleTranslate(dst, src);
+ return true;
+ } else {
+ SkPoint quad[4];
+
+ src.toQuad(quad);
+ this->mapPoints(quad, quad, 4);
+ dst->set(quad, 4);
+ return false;
+ }
+}
+
+SkScalar SkMatrix::mapRadius(SkScalar radius) const {
+ SkVector vec[2];
+
+ vec[0].set(radius, 0);
+ vec[1].set(0, radius);
+ this->mapVectors(vec, 2);
+
+ SkScalar d0 = vec[0].length();
+ SkScalar d1 = vec[1].length();
+
+ // return geometric mean
+ return SkScalarSqrt(d0 * d1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix::Persp_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT(m.hasPerspective());
+
+ SkScalar x = sdot(sx, m.fMat[kMScaleX], sy, m.fMat[kMSkewX]) + m.fMat[kMTransX];
+ SkScalar y = sdot(sx, m.fMat[kMSkewY], sy, m.fMat[kMScaleY]) + m.fMat[kMTransY];
+ SkScalar z = sdot(sx, m.fMat[kMPersp0], sy, m.fMat[kMPersp1]) + m.fMat[kMPersp2];
+ if (z) {
+ z = SkScalarFastInvert(z);
+ }
+ pt->fX = x * z;
+ pt->fY = y * z;
+}
+
+void SkMatrix::RotTrans_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT((m.getType() & (kAffine_Mask | kPerspective_Mask)) == kAffine_Mask);
+
+#ifdef SK_LEGACY_MATRIX_MATH_ORDER
+ pt->fX = sx * m.fMat[kMScaleX] + (sy * m.fMat[kMSkewX] + m.fMat[kMTransX]);
+ pt->fY = sx * m.fMat[kMSkewY] + (sy * m.fMat[kMScaleY] + m.fMat[kMTransY]);
+#else
+ pt->fX = sdot(sx, m.fMat[kMScaleX], sy, m.fMat[kMSkewX]) + m.fMat[kMTransX];
+ pt->fY = sdot(sx, m.fMat[kMSkewY], sy, m.fMat[kMScaleY]) + m.fMat[kMTransY];
+#endif
+}
+
+void SkMatrix::Rot_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT((m.getType() & (kAffine_Mask | kPerspective_Mask))== kAffine_Mask);
+ SkASSERT(0 == m.fMat[kMTransX]);
+ SkASSERT(0 == m.fMat[kMTransY]);
+
+#ifdef SK_LEGACY_MATRIX_MATH_ORDER
+ pt->fX = sx * m.fMat[kMScaleX] + (sy * m.fMat[kMSkewX] + m.fMat[kMTransX]);
+ pt->fY = sx * m.fMat[kMSkewY] + (sy * m.fMat[kMScaleY] + m.fMat[kMTransY]);
+#else
+ pt->fX = sdot(sx, m.fMat[kMScaleX], sy, m.fMat[kMSkewX]) + m.fMat[kMTransX];
+ pt->fY = sdot(sx, m.fMat[kMSkewY], sy, m.fMat[kMScaleY]) + m.fMat[kMTransY];
+#endif
+}
+
+void SkMatrix::ScaleTrans_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT((m.getType() & (kScale_Mask | kAffine_Mask | kPerspective_Mask))
+ == kScale_Mask);
+
+ pt->fX = sx * m.fMat[kMScaleX] + m.fMat[kMTransX];
+ pt->fY = sy * m.fMat[kMScaleY] + m.fMat[kMTransY];
+}
+
+void SkMatrix::Scale_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT((m.getType() & (kScale_Mask | kAffine_Mask | kPerspective_Mask))
+ == kScale_Mask);
+ SkASSERT(0 == m.fMat[kMTransX]);
+ SkASSERT(0 == m.fMat[kMTransY]);
+
+ pt->fX = sx * m.fMat[kMScaleX];
+ pt->fY = sy * m.fMat[kMScaleY];
+}
+
+void SkMatrix::Trans_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT(m.getType() == kTranslate_Mask);
+
+ pt->fX = sx + m.fMat[kMTransX];
+ pt->fY = sy + m.fMat[kMTransY];
+}
+
+void SkMatrix::Identity_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT(0 == m.getType());
+
+ pt->fX = sx;
+ pt->fY = sy;
+}
+
+const SkMatrix::MapXYProc SkMatrix::gMapXYProcs[] = {
+ SkMatrix::Identity_xy, SkMatrix::Trans_xy,
+ SkMatrix::Scale_xy, SkMatrix::ScaleTrans_xy,
+ SkMatrix::Rot_xy, SkMatrix::RotTrans_xy,
+ SkMatrix::Rot_xy, SkMatrix::RotTrans_xy,
+ // repeat the persp proc 8 times
+ SkMatrix::Persp_xy, SkMatrix::Persp_xy,
+ SkMatrix::Persp_xy, SkMatrix::Persp_xy,
+ SkMatrix::Persp_xy, SkMatrix::Persp_xy,
+ SkMatrix::Persp_xy, SkMatrix::Persp_xy
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+// if its nearly zero (just made up 26, perhaps it should be bigger or smaller)
+#define PerspNearlyZero(x) SkScalarNearlyZero(x, (1.0f / (1 << 26)))
+
+bool SkMatrix::isFixedStepInX() const {
+ return PerspNearlyZero(fMat[kMPersp0]);
+}
+
+SkVector SkMatrix::fixedStepInX(SkScalar y) const {
+ SkASSERT(PerspNearlyZero(fMat[kMPersp0]));
+ if (PerspNearlyZero(fMat[kMPersp1]) &&
+ PerspNearlyZero(fMat[kMPersp2] - 1)) {
+ return SkVector::Make(fMat[kMScaleX], fMat[kMSkewY]);
+ } else {
+ SkScalar z = y * fMat[kMPersp1] + fMat[kMPersp2];
+ return SkVector::Make(fMat[kMScaleX] / z, fMat[kMSkewY] / z);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkPerspIter.h"
+
+SkPerspIter::SkPerspIter(const SkMatrix& m, SkScalar x0, SkScalar y0, int count)
+ : fMatrix(m), fSX(x0), fSY(y0), fCount(count) {
+ SkPoint pt;
+
+ SkMatrix::Persp_xy(m, x0, y0, &pt);
+ fX = SkScalarToFixed(pt.fX);
+ fY = SkScalarToFixed(pt.fY);
+}
+
+int SkPerspIter::next() {
+ int n = fCount;
+
+ if (0 == n) {
+ return 0;
+ }
+ SkPoint pt;
+ SkFixed x = fX;
+ SkFixed y = fY;
+ SkFixed dx, dy;
+
+ if (n >= kCount) {
+ n = kCount;
+ fSX += SkIntToScalar(kCount);
+ SkMatrix::Persp_xy(fMatrix, fSX, fSY, &pt);
+ fX = SkScalarToFixed(pt.fX);
+ fY = SkScalarToFixed(pt.fY);
+ dx = (fX - x) >> kShift;
+ dy = (fY - y) >> kShift;
+ } else {
+ fSX += SkIntToScalar(n);
+ SkMatrix::Persp_xy(fMatrix, fSX, fSY, &pt);
+ fX = SkScalarToFixed(pt.fX);
+ fY = SkScalarToFixed(pt.fY);
+ dx = (fX - x) / n;
+ dy = (fY - y) / n;
+ }
+
+ SkFixed* p = fStorage;
+ for (int i = 0; i < n; i++) {
+ *p++ = x; x += dx;
+ *p++ = y; y += dy;
+ }
+
+ fCount -= n;
+ return n;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline bool checkForZero(float x) {
+ return x*x == 0;
+}
+
+static inline bool poly_to_point(SkPoint* pt, const SkPoint poly[], int count) {
+ float x = 1, y = 1;
+ SkPoint pt1, pt2;
+
+ if (count > 1) {
+ pt1.fX = poly[1].fX - poly[0].fX;
+ pt1.fY = poly[1].fY - poly[0].fY;
+ y = SkPoint::Length(pt1.fX, pt1.fY);
+ if (checkForZero(y)) {
+ return false;
+ }
+ switch (count) {
+ case 2:
+ break;
+ case 3:
+ pt2.fX = poly[0].fY - poly[2].fY;
+ pt2.fY = poly[2].fX - poly[0].fX;
+ goto CALC_X;
+ default:
+ pt2.fX = poly[0].fY - poly[3].fY;
+ pt2.fY = poly[3].fX - poly[0].fX;
+ CALC_X:
+ x = sdot(pt1.fX, pt2.fX, pt1.fY, pt2.fY) / y;
+ break;
+ }
+ }
+ pt->set(x, y);
+ return true;
+}
+
+bool SkMatrix::Poly2Proc(const SkPoint srcPt[], SkMatrix* dst,
+ const SkPoint& scale) {
+ float invScale = 1 / scale.fY;
+
+ dst->fMat[kMScaleX] = (srcPt[1].fY - srcPt[0].fY) * invScale;
+ dst->fMat[kMSkewY] = (srcPt[0].fX - srcPt[1].fX) * invScale;
+ dst->fMat[kMPersp0] = 0;
+ dst->fMat[kMSkewX] = (srcPt[1].fX - srcPt[0].fX) * invScale;
+ dst->fMat[kMScaleY] = (srcPt[1].fY - srcPt[0].fY) * invScale;
+ dst->fMat[kMPersp1] = 0;
+ dst->fMat[kMTransX] = srcPt[0].fX;
+ dst->fMat[kMTransY] = srcPt[0].fY;
+ dst->fMat[kMPersp2] = 1;
+ dst->setTypeMask(kUnknown_Mask);
+ return true;
+}
+
+bool SkMatrix::Poly3Proc(const SkPoint srcPt[], SkMatrix* dst,
+ const SkPoint& scale) {
+ float invScale = 1 / scale.fX;
+ dst->fMat[kMScaleX] = (srcPt[2].fX - srcPt[0].fX) * invScale;
+ dst->fMat[kMSkewY] = (srcPt[2].fY - srcPt[0].fY) * invScale;
+ dst->fMat[kMPersp0] = 0;
+
+ invScale = 1 / scale.fY;
+ dst->fMat[kMSkewX] = (srcPt[1].fX - srcPt[0].fX) * invScale;
+ dst->fMat[kMScaleY] = (srcPt[1].fY - srcPt[0].fY) * invScale;
+ dst->fMat[kMPersp1] = 0;
+
+ dst->fMat[kMTransX] = srcPt[0].fX;
+ dst->fMat[kMTransY] = srcPt[0].fY;
+ dst->fMat[kMPersp2] = 1;
+ dst->setTypeMask(kUnknown_Mask);
+ return true;
+}
+
+bool SkMatrix::Poly4Proc(const SkPoint srcPt[], SkMatrix* dst,
+ const SkPoint& scale) {
+ float a1, a2;
+ float x0, y0, x1, y1, x2, y2;
+
+ x0 = srcPt[2].fX - srcPt[0].fX;
+ y0 = srcPt[2].fY - srcPt[0].fY;
+ x1 = srcPt[2].fX - srcPt[1].fX;
+ y1 = srcPt[2].fY - srcPt[1].fY;
+ x2 = srcPt[2].fX - srcPt[3].fX;
+ y2 = srcPt[2].fY - srcPt[3].fY;
+
+ /* check if abs(x2) > abs(y2) */
+ if ( x2 > 0 ? y2 > 0 ? x2 > y2 : x2 > -y2 : y2 > 0 ? -x2 > y2 : x2 < y2) {
+ float denom = SkScalarMulDiv(x1, y2, x2) - y1;
+ if (checkForZero(denom)) {
+ return false;
+ }
+ a1 = (SkScalarMulDiv(x0 - x1, y2, x2) - y0 + y1) / denom;
+ } else {
+ float denom = x1 - SkScalarMulDiv(y1, x2, y2);
+ if (checkForZero(denom)) {
+ return false;
+ }
+ a1 = (x0 - x1 - SkScalarMulDiv(y0 - y1, x2, y2)) / denom;
+ }
+
+ /* check if abs(x1) > abs(y1) */
+ if ( x1 > 0 ? y1 > 0 ? x1 > y1 : x1 > -y1 : y1 > 0 ? -x1 > y1 : x1 < y1) {
+ float denom = y2 - SkScalarMulDiv(x2, y1, x1);
+ if (checkForZero(denom)) {
+ return false;
+ }
+ a2 = (y0 - y2 - SkScalarMulDiv(x0 - x2, y1, x1)) / denom;
+ } else {
+ float denom = SkScalarMulDiv(y2, x1, y1) - x2;
+ if (checkForZero(denom)) {
+ return false;
+ }
+ a2 = (SkScalarMulDiv(y0 - y2, x1, y1) - x0 + x2) / denom;
+ }
+
+ float invScale = SkScalarInvert(scale.fX);
+ dst->fMat[kMScaleX] = (a2 * srcPt[3].fX + srcPt[3].fX - srcPt[0].fX) * invScale;
+ dst->fMat[kMSkewY] = (a2 * srcPt[3].fY + srcPt[3].fY - srcPt[0].fY) * invScale;
+ dst->fMat[kMPersp0] = a2 * invScale;
+
+ invScale = SkScalarInvert(scale.fY);
+ dst->fMat[kMSkewX] = (a1 * srcPt[1].fX + srcPt[1].fX - srcPt[0].fX) * invScale;
+ dst->fMat[kMScaleY] = (a1 * srcPt[1].fY + srcPt[1].fY - srcPt[0].fY) * invScale;
+ dst->fMat[kMPersp1] = a1 * invScale;
+
+ dst->fMat[kMTransX] = srcPt[0].fX;
+ dst->fMat[kMTransY] = srcPt[0].fY;
+ dst->fMat[kMPersp2] = 1;
+ dst->setTypeMask(kUnknown_Mask);
+ return true;
+}
+
+typedef bool (*PolyMapProc)(const SkPoint[], SkMatrix*, const SkPoint&);
+
+/* Taken from Rob Johnson's original sample code in QuickDraw GX
+*/
+bool SkMatrix::setPolyToPoly(const SkPoint src[], const SkPoint dst[],
+ int count) {
+ if ((unsigned)count > 4) {
+ SkDebugf("--- SkMatrix::setPolyToPoly count out of range %d\n", count);
+ return false;
+ }
+
+ if (0 == count) {
+ this->reset();
+ return true;
+ }
+ if (1 == count) {
+ this->setTranslate(dst[0].fX - src[0].fX, dst[0].fY - src[0].fY);
+ return true;
+ }
+
+ SkPoint scale;
+ if (!poly_to_point(&scale, src, count) ||
+ SkScalarNearlyZero(scale.fX) ||
+ SkScalarNearlyZero(scale.fY)) {
+ return false;
+ }
+
+ static const PolyMapProc gPolyMapProcs[] = {
+ SkMatrix::Poly2Proc, SkMatrix::Poly3Proc, SkMatrix::Poly4Proc
+ };
+ PolyMapProc proc = gPolyMapProcs[count - 2];
+
+ SkMatrix tempMap, result;
+ tempMap.setTypeMask(kUnknown_Mask);
+
+ if (!proc(src, &tempMap, scale)) {
+ return false;
+ }
+ if (!tempMap.invert(&result)) {
+ return false;
+ }
+ if (!proc(dst, &tempMap, scale)) {
+ return false;
+ }
+ this->setConcat(tempMap, result);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+enum MinMaxOrBoth {
+ kMin_MinMaxOrBoth,
+ kMax_MinMaxOrBoth,
+ kBoth_MinMaxOrBoth
+};
+
+template <MinMaxOrBoth MIN_MAX_OR_BOTH> bool get_scale_factor(SkMatrix::TypeMask typeMask,
+ const SkScalar m[9],
+ SkScalar results[/*1 or 2*/]) {
+ if (typeMask & SkMatrix::kPerspective_Mask) {
+ return false;
+ }
+ if (SkMatrix::kIdentity_Mask == typeMask) {
+ results[0] = SK_Scalar1;
+ if (kBoth_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[1] = SK_Scalar1;
+ }
+ return true;
+ }
+ if (!(typeMask & SkMatrix::kAffine_Mask)) {
+ if (kMin_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[0] = SkMinScalar(SkScalarAbs(m[SkMatrix::kMScaleX]),
+ SkScalarAbs(m[SkMatrix::kMScaleY]));
+ } else if (kMax_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[0] = SkMaxScalar(SkScalarAbs(m[SkMatrix::kMScaleX]),
+ SkScalarAbs(m[SkMatrix::kMScaleY]));
+ } else {
+ results[0] = SkScalarAbs(m[SkMatrix::kMScaleX]);
+ results[1] = SkScalarAbs(m[SkMatrix::kMScaleY]);
+ if (results[0] > results[1]) {
+ SkTSwap(results[0], results[1]);
+ }
+ }
+ return true;
+ }
+ // ignore the translation part of the matrix, just look at 2x2 portion.
+ // compute singular values, take largest or smallest abs value.
+ // [a b; b c] = A^T*A
+ SkScalar a = sdot(m[SkMatrix::kMScaleX], m[SkMatrix::kMScaleX],
+ m[SkMatrix::kMSkewY], m[SkMatrix::kMSkewY]);
+ SkScalar b = sdot(m[SkMatrix::kMScaleX], m[SkMatrix::kMSkewX],
+ m[SkMatrix::kMScaleY], m[SkMatrix::kMSkewY]);
+ SkScalar c = sdot(m[SkMatrix::kMSkewX], m[SkMatrix::kMSkewX],
+ m[SkMatrix::kMScaleY], m[SkMatrix::kMScaleY]);
+ // eigenvalues of A^T*A are the squared singular values of A.
+ // characteristic equation is det((A^T*A) - l*I) = 0
+ // l^2 - (a + c)l + (ac-b^2)
+ // solve using quadratic equation (divisor is non-zero since l^2 has 1 coeff
+ // and roots are guaranteed to be pos and real).
+ SkScalar bSqd = b * b;
+ // if upper left 2x2 is orthogonal save some math
+ if (bSqd <= SK_ScalarNearlyZero*SK_ScalarNearlyZero) {
+ if (kMin_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[0] = SkMinScalar(a, c);
+ } else if (kMax_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[0] = SkMaxScalar(a, c);
+ } else {
+ results[0] = a;
+ results[1] = c;
+ if (results[0] > results[1]) {
+ SkTSwap(results[0], results[1]);
+ }
+ }
+ } else {
+ SkScalar aminusc = a - c;
+ SkScalar apluscdiv2 = SkScalarHalf(a + c);
+ SkScalar x = SkScalarHalf(SkScalarSqrt(aminusc * aminusc + 4 * bSqd));
+ if (kMin_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[0] = apluscdiv2 - x;
+ } else if (kMax_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[0] = apluscdiv2 + x;
+ } else {
+ results[0] = apluscdiv2 - x;
+ results[1] = apluscdiv2 + x;
+ }
+ }
+ if (!SkScalarIsFinite(results[0])) {
+ return false;
+ }
+ // Due to the floating point inaccuracy, there might be an error in a, b, c
+ // calculated by sdot, further deepened by subsequent arithmetic operations
+ // on them. Therefore, we allow and cap the nearly-zero negative values.
+ SkASSERT(results[0] >= -SK_ScalarNearlyZero);
+ if (results[0] < 0) {
+ results[0] = 0;
+ }
+ results[0] = SkScalarSqrt(results[0]);
+ if (kBoth_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ if (!SkScalarIsFinite(results[1])) {
+ return false;
+ }
+ SkASSERT(results[1] >= -SK_ScalarNearlyZero);
+ if (results[1] < 0) {
+ results[1] = 0;
+ }
+ results[1] = SkScalarSqrt(results[1]);
+ }
+ return true;
+}
+
+SkScalar SkMatrix::getMinScale() const {
+ SkScalar factor;
+ if (get_scale_factor<kMin_MinMaxOrBoth>(this->getType(), fMat, &factor)) {
+ return factor;
+ } else {
+ return -1;
+ }
+}
+
+SkScalar SkMatrix::getMaxScale() const {
+ SkScalar factor;
+ if (get_scale_factor<kMax_MinMaxOrBoth>(this->getType(), fMat, &factor)) {
+ return factor;
+ } else {
+ return -1;
+ }
+}
+
+bool SkMatrix::getMinMaxScales(SkScalar scaleFactors[2]) const {
+ return get_scale_factor<kBoth_MinMaxOrBoth>(this->getType(), fMat, scaleFactors);
+}
+
+namespace {
+
+// SkMatrix is C++11 POD (trivial and standard-layout), but not aggregate (it has private fields).
+struct AggregateMatrix {
+ SkScalar matrix[9];
+ uint32_t typemask;
+
+ const SkMatrix& asSkMatrix() const { return *reinterpret_cast<const SkMatrix*>(this); }
+};
+static_assert(sizeof(AggregateMatrix) == sizeof(SkMatrix), "AggregateMatrix size mismatch.");
+
+} // namespace
+
+const SkMatrix& SkMatrix::I() {
+ static_assert(offsetof(SkMatrix,fMat) == offsetof(AggregateMatrix,matrix), "fMat");
+ static_assert(offsetof(SkMatrix,fTypeMask) == offsetof(AggregateMatrix,typemask), "fTypeMask");
+
+ static const AggregateMatrix identity = { {SK_Scalar1, 0, 0,
+ 0, SK_Scalar1, 0,
+ 0, 0, SK_Scalar1 },
+ kIdentity_Mask | kRectStaysRect_Mask};
+ SkASSERT(identity.asSkMatrix().isIdentity());
+ return identity.asSkMatrix();
+}
+
+const SkMatrix& SkMatrix::InvalidMatrix() {
+ static_assert(offsetof(SkMatrix,fMat) == offsetof(AggregateMatrix,matrix), "fMat");
+ static_assert(offsetof(SkMatrix,fTypeMask) == offsetof(AggregateMatrix,typemask), "fTypeMask");
+
+ static const AggregateMatrix invalid =
+ { {SK_ScalarMax, SK_ScalarMax, SK_ScalarMax,
+ SK_ScalarMax, SK_ScalarMax, SK_ScalarMax,
+ SK_ScalarMax, SK_ScalarMax, SK_ScalarMax },
+ kTranslate_Mask | kScale_Mask | kAffine_Mask | kPerspective_Mask };
+ return invalid.asSkMatrix();
+}
+
+bool SkMatrix::decomposeScale(SkSize* scale, SkMatrix* remaining) const {
+ if (this->hasPerspective()) {
+ return false;
+ }
+
+ const SkScalar sx = SkVector::Length(this->getScaleX(), this->getSkewY());
+ const SkScalar sy = SkVector::Length(this->getSkewX(), this->getScaleY());
+ if (!SkScalarIsFinite(sx) || !SkScalarIsFinite(sy) ||
+ SkScalarNearlyZero(sx) || SkScalarNearlyZero(sy)) {
+ return false;
+ }
+
+ if (scale) {
+ scale->set(sx, sy);
+ }
+ if (remaining) {
+ *remaining = *this;
+ remaining->postScale(SkScalarInvert(sx), SkScalarInvert(sy));
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+size_t SkMatrix::writeToMemory(void* buffer) const {
+ // TODO write less for simple matrices
+ static const size_t sizeInMemory = 9 * sizeof(SkScalar);
+ if (buffer) {
+ memcpy(buffer, fMat, sizeInMemory);
+ }
+ return sizeInMemory;
+}
+
+size_t SkMatrix::readFromMemory(const void* buffer, size_t length) {
+ static const size_t sizeInMemory = 9 * sizeof(SkScalar);
+ if (length < sizeInMemory) {
+ return 0;
+ }
+ if (buffer) {
+ memcpy(fMat, buffer, sizeInMemory);
+ this->setTypeMask(kUnknown_Mask);
+ }
+ return sizeInMemory;
+}
+
+void SkMatrix::dump() const {
+ SkString str;
+ this->toString(&str);
+ SkDebugf("%s\n", str.c_str());
+}
+
+void SkMatrix::toString(SkString* str) const {
+ str->appendf("[%8.4f %8.4f %8.4f][%8.4f %8.4f %8.4f][%8.4f %8.4f %8.4f]",
+ fMat[0], fMat[1], fMat[2], fMat[3], fMat[4], fMat[5],
+ fMat[6], fMat[7], fMat[8]);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkMatrixUtils.h"
+
+bool SkTreatAsSprite(const SkMatrix& mat, const SkISize& size, const SkPaint& paint) {
+ // Our path aa is 2-bits, and our rect aa is 8, so we could use 8,
+ // but in practice 4 seems enough (still looks smooth) and allows
+ // more slightly fractional cases to fall into the fast (sprite) case.
+ static const unsigned kAntiAliasSubpixelBits = 4;
+
+ const unsigned subpixelBits = paint.isAntiAlias() ? kAntiAliasSubpixelBits : 0;
+
+ // quick reject on affine or perspective
+ if (mat.getType() & ~(SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask)) {
+ return false;
+ }
+
+ // quick success check
+ if (!subpixelBits && !(mat.getType() & ~SkMatrix::kTranslate_Mask)) {
+ return true;
+ }
+
+ // mapRect supports negative scales, so we eliminate those first
+ if (mat.getScaleX() < 0 || mat.getScaleY() < 0) {
+ return false;
+ }
+
+ SkRect dst;
+ SkIRect isrc = SkIRect::MakeSize(size);
+
+ {
+ SkRect src;
+ src.set(isrc);
+ mat.mapRect(&dst, src);
+ }
+
+ // just apply the translate to isrc
+ isrc.offset(SkScalarRoundToInt(mat.getTranslateX()),
+ SkScalarRoundToInt(mat.getTranslateY()));
+
+ if (subpixelBits) {
+ isrc.fLeft = SkLeftShift(isrc.fLeft, subpixelBits);
+ isrc.fTop = SkLeftShift(isrc.fTop, subpixelBits);
+ isrc.fRight = SkLeftShift(isrc.fRight, subpixelBits);
+ isrc.fBottom = SkLeftShift(isrc.fBottom, subpixelBits);
+
+ const float scale = 1 << subpixelBits;
+ dst.fLeft *= scale;
+ dst.fTop *= scale;
+ dst.fRight *= scale;
+ dst.fBottom *= scale;
+ }
+
+ SkIRect idst;
+ dst.round(&idst);
+ return isrc == idst;
+}
+
+// A square matrix M can be decomposed (via polar decomposition) into two matrices --
+// an orthogonal matrix Q and a symmetric matrix S. In turn we can decompose S into U*W*U^T,
+// where U is another orthogonal matrix and W is a scale matrix. These can be recombined
+// to give M = (Q*U)*W*U^T, i.e., the product of two orthogonal matrices and a scale matrix.
+//
+// The one wrinkle is that traditionally Q may contain a reflection -- the
+// calculation has been rejiggered to put that reflection into W.
+bool SkDecomposeUpper2x2(const SkMatrix& matrix,
+ SkPoint* rotation1,
+ SkPoint* scale,
+ SkPoint* rotation2) {
+
+ SkScalar A = matrix[SkMatrix::kMScaleX];
+ SkScalar B = matrix[SkMatrix::kMSkewX];
+ SkScalar C = matrix[SkMatrix::kMSkewY];
+ SkScalar D = matrix[SkMatrix::kMScaleY];
+
+ if (is_degenerate_2x2(A, B, C, D)) {
+ return false;
+ }
+
+ double w1, w2;
+ SkScalar cos1, sin1;
+ SkScalar cos2, sin2;
+
+ // do polar decomposition (M = Q*S)
+ SkScalar cosQ, sinQ;
+ double Sa, Sb, Sd;
+ // if M is already symmetric (i.e., M = I*S)
+ if (SkScalarNearlyEqual(B, C)) {
+ cosQ = 1;
+ sinQ = 0;
+
+ Sa = A;
+ Sb = B;
+ Sd = D;
+ } else {
+ cosQ = A + D;
+ sinQ = C - B;
+ SkScalar reciplen = SkScalarInvert(SkScalarSqrt(cosQ*cosQ + sinQ*sinQ));
+ cosQ *= reciplen;
+ sinQ *= reciplen;
+
+ // S = Q^-1*M
+ // we don't calc Sc since it's symmetric
+ Sa = A*cosQ + C*sinQ;
+ Sb = B*cosQ + D*sinQ;
+ Sd = -B*sinQ + D*cosQ;
+ }
+
+ // Now we need to compute eigenvalues of S (our scale factors)
+ // and eigenvectors (bases for our rotation)
+ // From this, should be able to reconstruct S as U*W*U^T
+ if (SkScalarNearlyZero(SkDoubleToScalar(Sb))) {
+ // already diagonalized
+ cos1 = 1;
+ sin1 = 0;
+ w1 = Sa;
+ w2 = Sd;
+ cos2 = cosQ;
+ sin2 = sinQ;
+ } else {
+ double diff = Sa - Sd;
+ double discriminant = sqrt(diff*diff + 4.0*Sb*Sb);
+ double trace = Sa + Sd;
+ if (diff > 0) {
+ w1 = 0.5*(trace + discriminant);
+ w2 = 0.5*(trace - discriminant);
+ } else {
+ w1 = 0.5*(trace - discriminant);
+ w2 = 0.5*(trace + discriminant);
+ }
+
+ cos1 = SkDoubleToScalar(Sb); sin1 = SkDoubleToScalar(w1 - Sa);
+ SkScalar reciplen = SkScalarInvert(SkScalarSqrt(cos1*cos1 + sin1*sin1));
+ cos1 *= reciplen;
+ sin1 *= reciplen;
+
+ // rotation 2 is composition of Q and U
+ cos2 = cos1*cosQ - sin1*sinQ;
+ sin2 = sin1*cosQ + cos1*sinQ;
+
+ // rotation 1 is U^T
+ sin1 = -sin1;
+ }
+
+ if (scale) {
+ scale->fX = SkDoubleToScalar(w1);
+ scale->fY = SkDoubleToScalar(w2);
+ }
+ if (rotation1) {
+ rotation1->fX = cos1;
+ rotation1->fY = sin1;
+ }
+ if (rotation2) {
+ rotation2->fX = cos2;
+ rotation2->fY = sin2;
+ }
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkRSXform::toQuad(SkScalar width, SkScalar height, SkPoint quad[4]) const {
+#if 0
+ // This is the slow way, but it documents what we're doing
+ quad[0].set(0, 0);
+ quad[1].set(width, 0);
+ quad[2].set(width, height);
+ quad[3].set(0, height);
+ SkMatrix m;
+ m.setRSXform(*this).mapPoints(quad, quad, 4);
+#else
+ const SkScalar m00 = fSCos;
+ const SkScalar m01 = -fSSin;
+ const SkScalar m02 = fTx;
+ const SkScalar m10 = -m01;
+ const SkScalar m11 = m00;
+ const SkScalar m12 = fTy;
+
+ quad[0].set(m02, m12);
+ quad[1].set(m00 * width + m02, m10 * width + m12);
+ quad[2].set(m00 * width + m01 * height + m02, m10 * width + m11 * height + m12);
+ quad[3].set(m01 * height + m02, m11 * height + m12);
+#endif
+}
diff --git a/gfx/skia/skia/src/core/SkMatrix44.cpp b/gfx/skia/skia/src/core/SkMatrix44.cpp
new file mode 100644
index 000000000..818f23af5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMatrix44.cpp
@@ -0,0 +1,1018 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMatrix44.h"
+
+static inline bool eq4(const SkMScalar* SK_RESTRICT a,
+ const SkMScalar* SK_RESTRICT b) {
+ return (a[0] == b[0]) & (a[1] == b[1]) & (a[2] == b[2]) & (a[3] == b[3]);
+}
+
+bool SkMatrix44::operator==(const SkMatrix44& other) const {
+ if (this == &other) {
+ return true;
+ }
+
+ if (this->isTriviallyIdentity() && other.isTriviallyIdentity()) {
+ return true;
+ }
+
+ const SkMScalar* SK_RESTRICT a = &fMat[0][0];
+ const SkMScalar* SK_RESTRICT b = &other.fMat[0][0];
+
+#if 0
+ for (int i = 0; i < 16; ++i) {
+ if (a[i] != b[i]) {
+ return false;
+ }
+ }
+ return true;
+#else
+ // to reduce branch instructions, we compare 4 at a time.
+ // see bench/Matrix44Bench.cpp for test.
+ if (!eq4(&a[0], &b[0])) {
+ return false;
+ }
+ if (!eq4(&a[4], &b[4])) {
+ return false;
+ }
+ if (!eq4(&a[8], &b[8])) {
+ return false;
+ }
+ return eq4(&a[12], &b[12]);
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+int SkMatrix44::computeTypeMask() const {
+ unsigned mask = 0;
+
+ if (0 != perspX() || 0 != perspY() || 0 != perspZ() || 1 != fMat[3][3]) {
+ return kTranslate_Mask | kScale_Mask | kAffine_Mask | kPerspective_Mask;
+ }
+
+ if (0 != transX() || 0 != transY() || 0 != transZ()) {
+ mask |= kTranslate_Mask;
+ }
+
+ if (1 != scaleX() || 1 != scaleY() || 1 != scaleZ()) {
+ mask |= kScale_Mask;
+ }
+
+ if (0 != fMat[1][0] || 0 != fMat[0][1] || 0 != fMat[0][2] ||
+ 0 != fMat[2][0] || 0 != fMat[1][2] || 0 != fMat[2][1]) {
+ mask |= kAffine_Mask;
+ }
+
+ return mask;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix44::asColMajorf(float dst[]) const {
+ const SkMScalar* src = &fMat[0][0];
+#ifdef SK_MSCALAR_IS_DOUBLE
+ for (int i = 0; i < 16; ++i) {
+ dst[i] = SkMScalarToFloat(src[i]);
+ }
+#elif defined SK_MSCALAR_IS_FLOAT
+ memcpy(dst, src, 16 * sizeof(float));
+#endif
+}
+
+void SkMatrix44::as3x4RowMajorf(float dst[]) const {
+ dst[0] = fMat[0][0]; dst[1] = fMat[1][0]; dst[2] = fMat[2][0]; dst[3] = fMat[3][0];
+ dst[4] = fMat[0][1]; dst[5] = fMat[1][1]; dst[6] = fMat[2][1]; dst[7] = fMat[3][1];
+ dst[8] = fMat[0][2]; dst[9] = fMat[1][2]; dst[10] = fMat[2][2]; dst[11] = fMat[3][2];
+}
+
+void SkMatrix44::asColMajord(double dst[]) const {
+ const SkMScalar* src = &fMat[0][0];
+#ifdef SK_MSCALAR_IS_DOUBLE
+ memcpy(dst, src, 16 * sizeof(double));
+#elif defined SK_MSCALAR_IS_FLOAT
+ for (int i = 0; i < 16; ++i) {
+ dst[i] = SkMScalarToDouble(src[i]);
+ }
+#endif
+}
+
+void SkMatrix44::asRowMajorf(float dst[]) const {
+ const SkMScalar* src = &fMat[0][0];
+ for (int i = 0; i < 4; ++i) {
+ dst[0] = SkMScalarToFloat(src[0]);
+ dst[4] = SkMScalarToFloat(src[1]);
+ dst[8] = SkMScalarToFloat(src[2]);
+ dst[12] = SkMScalarToFloat(src[3]);
+ src += 4;
+ dst += 1;
+ }
+}
+
+void SkMatrix44::asRowMajord(double dst[]) const {
+ const SkMScalar* src = &fMat[0][0];
+ for (int i = 0; i < 4; ++i) {
+ dst[0] = SkMScalarToDouble(src[0]);
+ dst[4] = SkMScalarToDouble(src[1]);
+ dst[8] = SkMScalarToDouble(src[2]);
+ dst[12] = SkMScalarToDouble(src[3]);
+ src += 4;
+ dst += 1;
+ }
+}
+
+void SkMatrix44::setColMajorf(const float src[]) {
+ SkMScalar* dst = &fMat[0][0];
+#ifdef SK_MSCALAR_IS_DOUBLE
+ for (int i = 0; i < 16; ++i) {
+ dst[i] = SkMScalarToFloat(src[i]);
+ }
+#elif defined SK_MSCALAR_IS_FLOAT
+ memcpy(dst, src, 16 * sizeof(float));
+#endif
+
+ this->dirtyTypeMask();
+}
+
+void SkMatrix44::setColMajord(const double src[]) {
+ SkMScalar* dst = &fMat[0][0];
+#ifdef SK_MSCALAR_IS_DOUBLE
+ memcpy(dst, src, 16 * sizeof(double));
+#elif defined SK_MSCALAR_IS_FLOAT
+ for (int i = 0; i < 16; ++i) {
+ dst[i] = SkDoubleToMScalar(src[i]);
+ }
+#endif
+
+ this->dirtyTypeMask();
+}
+
+void SkMatrix44::setRowMajorf(const float src[]) {
+ SkMScalar* dst = &fMat[0][0];
+ for (int i = 0; i < 4; ++i) {
+ dst[0] = SkMScalarToFloat(src[0]);
+ dst[4] = SkMScalarToFloat(src[1]);
+ dst[8] = SkMScalarToFloat(src[2]);
+ dst[12] = SkMScalarToFloat(src[3]);
+ src += 4;
+ dst += 1;
+ }
+ this->dirtyTypeMask();
+}
+
+void SkMatrix44::setRowMajord(const double src[]) {
+ SkMScalar* dst = &fMat[0][0];
+ for (int i = 0; i < 4; ++i) {
+ dst[0] = SkDoubleToMScalar(src[0]);
+ dst[4] = SkDoubleToMScalar(src[1]);
+ dst[8] = SkDoubleToMScalar(src[2]);
+ dst[12] = SkDoubleToMScalar(src[3]);
+ src += 4;
+ dst += 1;
+ }
+ this->dirtyTypeMask();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+const SkMatrix44& SkMatrix44::I() {
+ static constexpr SkMatrix44 gIdentity44(kIdentity_Constructor);
+ return gIdentity44;
+}
+
+void SkMatrix44::setIdentity() {
+ fMat[0][0] = 1;
+ fMat[0][1] = 0;
+ fMat[0][2] = 0;
+ fMat[0][3] = 0;
+ fMat[1][0] = 0;
+ fMat[1][1] = 1;
+ fMat[1][2] = 0;
+ fMat[1][3] = 0;
+ fMat[2][0] = 0;
+ fMat[2][1] = 0;
+ fMat[2][2] = 1;
+ fMat[2][3] = 0;
+ fMat[3][0] = 0;
+ fMat[3][1] = 0;
+ fMat[3][2] = 0;
+ fMat[3][3] = 1;
+ this->setTypeMask(kIdentity_Mask);
+}
+
+void SkMatrix44::set3x3(SkMScalar m00, SkMScalar m01, SkMScalar m02,
+ SkMScalar m10, SkMScalar m11, SkMScalar m12,
+ SkMScalar m20, SkMScalar m21, SkMScalar m22) {
+ fMat[0][0] = m00; fMat[0][1] = m01; fMat[0][2] = m02; fMat[0][3] = 0;
+ fMat[1][0] = m10; fMat[1][1] = m11; fMat[1][2] = m12; fMat[1][3] = 0;
+ fMat[2][0] = m20; fMat[2][1] = m21; fMat[2][2] = m22; fMat[2][3] = 0;
+ fMat[3][0] = 0; fMat[3][1] = 0; fMat[3][2] = 0; fMat[3][3] = 1;
+ this->dirtyTypeMask();
+}
+
+void SkMatrix44::set3x3RowMajorf(const float src[]) {
+ fMat[0][0] = src[0]; fMat[0][1] = src[3]; fMat[0][2] = src[6]; fMat[0][3] = 0;
+ fMat[1][0] = src[1]; fMat[1][1] = src[4]; fMat[1][2] = src[7]; fMat[1][3] = 0;
+ fMat[2][0] = src[2]; fMat[2][1] = src[5]; fMat[2][2] = src[8]; fMat[2][3] = 0;
+ fMat[3][0] = 0; fMat[3][1] = 0; fMat[3][2] = 0; fMat[3][3] = 1;
+ this->dirtyTypeMask();
+}
+
+void SkMatrix44::set3x4RowMajorf(const float src[]) {
+ fMat[0][0] = src[0]; fMat[1][0] = src[1]; fMat[2][0] = src[2]; fMat[3][0] = src[3];
+ fMat[0][1] = src[4]; fMat[1][1] = src[5]; fMat[2][1] = src[6]; fMat[3][1] = src[7];
+ fMat[0][2] = src[8]; fMat[1][2] = src[9]; fMat[2][2] = src[10]; fMat[3][2] = src[11];
+ fMat[0][3] = 0; fMat[1][3] = 0; fMat[2][3] = 0; fMat[3][3] = 1;
+ this->dirtyTypeMask();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix44::setTranslate(SkMScalar dx, SkMScalar dy, SkMScalar dz) {
+ this->setIdentity();
+
+ if (!dx && !dy && !dz) {
+ return;
+ }
+
+ fMat[3][0] = dx;
+ fMat[3][1] = dy;
+ fMat[3][2] = dz;
+ this->setTypeMask(kTranslate_Mask);
+}
+
+void SkMatrix44::preTranslate(SkMScalar dx, SkMScalar dy, SkMScalar dz) {
+ if (!dx && !dy && !dz) {
+ return;
+ }
+
+ for (int i = 0; i < 4; ++i) {
+ fMat[3][i] = fMat[0][i] * dx + fMat[1][i] * dy + fMat[2][i] * dz + fMat[3][i];
+ }
+ this->dirtyTypeMask();
+}
+
+void SkMatrix44::postTranslate(SkMScalar dx, SkMScalar dy, SkMScalar dz) {
+ if (!dx && !dy && !dz) {
+ return;
+ }
+
+ if (this->getType() & kPerspective_Mask) {
+ for (int i = 0; i < 4; ++i) {
+ fMat[i][0] += fMat[i][3] * dx;
+ fMat[i][1] += fMat[i][3] * dy;
+ fMat[i][2] += fMat[i][3] * dz;
+ }
+ } else {
+ fMat[3][0] += dx;
+ fMat[3][1] += dy;
+ fMat[3][2] += dz;
+ this->dirtyTypeMask();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix44::setScale(SkMScalar sx, SkMScalar sy, SkMScalar sz) {
+ this->setIdentity();
+
+ if (1 == sx && 1 == sy && 1 == sz) {
+ return;
+ }
+
+ fMat[0][0] = sx;
+ fMat[1][1] = sy;
+ fMat[2][2] = sz;
+ this->setTypeMask(kScale_Mask);
+}
+
+void SkMatrix44::preScale(SkMScalar sx, SkMScalar sy, SkMScalar sz) {
+ if (1 == sx && 1 == sy && 1 == sz) {
+ return;
+ }
+
+ // The implementation matrix * pureScale can be shortcut
+ // by knowing that pureScale components effectively scale
+ // the columns of the original matrix.
+ for (int i = 0; i < 4; i++) {
+ fMat[0][i] *= sx;
+ fMat[1][i] *= sy;
+ fMat[2][i] *= sz;
+ }
+ this->dirtyTypeMask();
+}
+
+void SkMatrix44::postScale(SkMScalar sx, SkMScalar sy, SkMScalar sz) {
+ if (1 == sx && 1 == sy && 1 == sz) {
+ return;
+ }
+
+ for (int i = 0; i < 4; i++) {
+ fMat[i][0] *= sx;
+ fMat[i][1] *= sy;
+ fMat[i][2] *= sz;
+ }
+ this->dirtyTypeMask();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix44::setRotateAbout(SkMScalar x, SkMScalar y, SkMScalar z,
+ SkMScalar radians) {
+ double len2 = (double)x * x + (double)y * y + (double)z * z;
+ if (1 != len2) {
+ if (0 == len2) {
+ this->setIdentity();
+ return;
+ }
+ double scale = 1 / sqrt(len2);
+ x = SkDoubleToMScalar(x * scale);
+ y = SkDoubleToMScalar(y * scale);
+ z = SkDoubleToMScalar(z * scale);
+ }
+ this->setRotateAboutUnit(x, y, z, radians);
+}
+
+void SkMatrix44::setRotateAboutUnit(SkMScalar x, SkMScalar y, SkMScalar z,
+ SkMScalar radians) {
+ double c = cos(radians);
+ double s = sin(radians);
+ double C = 1 - c;
+ double xs = x * s;
+ double ys = y * s;
+ double zs = z * s;
+ double xC = x * C;
+ double yC = y * C;
+ double zC = z * C;
+ double xyC = x * yC;
+ double yzC = y * zC;
+ double zxC = z * xC;
+
+ // if you're looking at wikipedia, remember that we're column major.
+ this->set3x3(SkDoubleToMScalar(x * xC + c), // scale x
+ SkDoubleToMScalar(xyC + zs), // skew x
+ SkDoubleToMScalar(zxC - ys), // trans x
+
+ SkDoubleToMScalar(xyC - zs), // skew y
+ SkDoubleToMScalar(y * yC + c), // scale y
+ SkDoubleToMScalar(yzC + xs), // trans y
+
+ SkDoubleToMScalar(zxC + ys), // persp x
+ SkDoubleToMScalar(yzC - xs), // persp y
+ SkDoubleToMScalar(z * zC + c)); // persp 2
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool bits_isonly(int value, int mask) {
+ return 0 == (value & ~mask);
+}
+
+void SkMatrix44::setConcat(const SkMatrix44& a, const SkMatrix44& b) {
+ const SkMatrix44::TypeMask a_mask = a.getType();
+ const SkMatrix44::TypeMask b_mask = b.getType();
+
+ if (kIdentity_Mask == a_mask) {
+ *this = b;
+ return;
+ }
+ if (kIdentity_Mask == b_mask) {
+ *this = a;
+ return;
+ }
+
+ bool useStorage = (this == &a || this == &b);
+ SkMScalar storage[16];
+ SkMScalar* result = useStorage ? storage : &fMat[0][0];
+
+ // Both matrices are at most scale+translate
+ if (bits_isonly(a_mask | b_mask, kScale_Mask | kTranslate_Mask)) {
+ result[0] = a.fMat[0][0] * b.fMat[0][0];
+ result[1] = result[2] = result[3] = result[4] = 0;
+ result[5] = a.fMat[1][1] * b.fMat[1][1];
+ result[6] = result[7] = result[8] = result[9] = 0;
+ result[10] = a.fMat[2][2] * b.fMat[2][2];
+ result[11] = 0;
+ result[12] = a.fMat[0][0] * b.fMat[3][0] + a.fMat[3][0];
+ result[13] = a.fMat[1][1] * b.fMat[3][1] + a.fMat[3][1];
+ result[14] = a.fMat[2][2] * b.fMat[3][2] + a.fMat[3][2];
+ result[15] = 1;
+ } else {
+ for (int j = 0; j < 4; j++) {
+ for (int i = 0; i < 4; i++) {
+ double value = 0;
+ for (int k = 0; k < 4; k++) {
+ value += SkMScalarToDouble(a.fMat[k][i]) * b.fMat[j][k];
+ }
+ *result++ = SkDoubleToMScalar(value);
+ }
+ }
+ }
+
+ if (useStorage) {
+ memcpy(fMat, storage, sizeof(storage));
+ }
+ this->dirtyTypeMask();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** We always perform the calculation in doubles, to avoid prematurely losing
+ precision along the way. This relies on the compiler automatically
+ promoting our SkMScalar values to double (if needed).
+ */
+double SkMatrix44::determinant() const {
+ if (this->isIdentity()) {
+ return 1;
+ }
+ if (this->isScaleTranslate()) {
+ return fMat[0][0] * fMat[1][1] * fMat[2][2] * fMat[3][3];
+ }
+
+ double a00 = fMat[0][0];
+ double a01 = fMat[0][1];
+ double a02 = fMat[0][2];
+ double a03 = fMat[0][3];
+ double a10 = fMat[1][0];
+ double a11 = fMat[1][1];
+ double a12 = fMat[1][2];
+ double a13 = fMat[1][3];
+ double a20 = fMat[2][0];
+ double a21 = fMat[2][1];
+ double a22 = fMat[2][2];
+ double a23 = fMat[2][3];
+ double a30 = fMat[3][0];
+ double a31 = fMat[3][1];
+ double a32 = fMat[3][2];
+ double a33 = fMat[3][3];
+
+ double b00 = a00 * a11 - a01 * a10;
+ double b01 = a00 * a12 - a02 * a10;
+ double b02 = a00 * a13 - a03 * a10;
+ double b03 = a01 * a12 - a02 * a11;
+ double b04 = a01 * a13 - a03 * a11;
+ double b05 = a02 * a13 - a03 * a12;
+ double b06 = a20 * a31 - a21 * a30;
+ double b07 = a20 * a32 - a22 * a30;
+ double b08 = a20 * a33 - a23 * a30;
+ double b09 = a21 * a32 - a22 * a31;
+ double b10 = a21 * a33 - a23 * a31;
+ double b11 = a22 * a33 - a23 * a32;
+
+ // Calculate the determinant
+ return b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - b04 * b07 + b05 * b06;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool is_matrix_finite(const SkMatrix44& matrix) {
+ SkMScalar accumulator = 0;
+ for (int row = 0; row < 4; ++row) {
+ for (int col = 0; col < 4; ++col) {
+ accumulator *= matrix.get(row, col);
+ }
+ }
+ return accumulator == 0;
+}
+
+bool SkMatrix44::invert(SkMatrix44* storage) const {
+ if (this->isIdentity()) {
+ if (storage) {
+ storage->setIdentity();
+ }
+ return true;
+ }
+
+ if (this->isTranslate()) {
+ if (storage) {
+ storage->setTranslate(-fMat[3][0], -fMat[3][1], -fMat[3][2]);
+ }
+ return true;
+ }
+
+ SkMatrix44 tmp(kUninitialized_Constructor);
+ // Use storage if it's available and distinct from this matrix.
+ SkMatrix44* inverse = (storage && storage != this) ? storage : &tmp;
+ if (this->isScaleTranslate()) {
+ if (0 == fMat[0][0] * fMat[1][1] * fMat[2][2]) {
+ return false;
+ }
+
+ double invXScale = 1 / fMat[0][0];
+ double invYScale = 1 / fMat[1][1];
+ double invZScale = 1 / fMat[2][2];
+
+ inverse->fMat[0][0] = SkDoubleToMScalar(invXScale);
+ inverse->fMat[0][1] = 0;
+ inverse->fMat[0][2] = 0;
+ inverse->fMat[0][3] = 0;
+
+ inverse->fMat[1][0] = 0;
+ inverse->fMat[1][1] = SkDoubleToMScalar(invYScale);
+ inverse->fMat[1][2] = 0;
+ inverse->fMat[1][3] = 0;
+
+ inverse->fMat[2][0] = 0;
+ inverse->fMat[2][1] = 0;
+ inverse->fMat[2][2] = SkDoubleToMScalar(invZScale);
+ inverse->fMat[2][3] = 0;
+
+ inverse->fMat[3][0] = SkDoubleToMScalar(-fMat[3][0] * invXScale);
+ inverse->fMat[3][1] = SkDoubleToMScalar(-fMat[3][1] * invYScale);
+ inverse->fMat[3][2] = SkDoubleToMScalar(-fMat[3][2] * invZScale);
+ inverse->fMat[3][3] = 1;
+
+ inverse->setTypeMask(this->getType());
+
+ if (!is_matrix_finite(*inverse)) {
+ return false;
+ }
+ if (storage && inverse != storage) {
+ *storage = *inverse;
+ }
+ return true;
+ }
+
+ double a00 = fMat[0][0];
+ double a01 = fMat[0][1];
+ double a02 = fMat[0][2];
+ double a03 = fMat[0][3];
+ double a10 = fMat[1][0];
+ double a11 = fMat[1][1];
+ double a12 = fMat[1][2];
+ double a13 = fMat[1][3];
+ double a20 = fMat[2][0];
+ double a21 = fMat[2][1];
+ double a22 = fMat[2][2];
+ double a23 = fMat[2][3];
+ double a30 = fMat[3][0];
+ double a31 = fMat[3][1];
+ double a32 = fMat[3][2];
+ double a33 = fMat[3][3];
+
+ if (!(this->getType() & kPerspective_Mask)) {
+ // If we know the matrix has no perspective, then the perspective
+ // component is (0, 0, 0, 1). We can use this information to save a lot
+ // of arithmetic that would otherwise be spent to compute the inverse
+ // of a general matrix.
+
+ SkASSERT(a03 == 0);
+ SkASSERT(a13 == 0);
+ SkASSERT(a23 == 0);
+ SkASSERT(a33 == 1);
+
+ double b00 = a00 * a11 - a01 * a10;
+ double b01 = a00 * a12 - a02 * a10;
+ double b03 = a01 * a12 - a02 * a11;
+ double b06 = a20 * a31 - a21 * a30;
+ double b07 = a20 * a32 - a22 * a30;
+ double b08 = a20;
+ double b09 = a21 * a32 - a22 * a31;
+ double b10 = a21;
+ double b11 = a22;
+
+ // Calculate the determinant
+ double det = b00 * b11 - b01 * b10 + b03 * b08;
+
+ double invdet = 1.0 / det;
+ // If det is zero, we want to return false. However, we also want to return false
+ // if 1/det overflows to infinity (i.e. det is denormalized). Both of these are
+ // handled by checking that 1/det is finite.
+ if (!sk_float_isfinite(invdet)) {
+ return false;
+ }
+
+ b00 *= invdet;
+ b01 *= invdet;
+ b03 *= invdet;
+ b06 *= invdet;
+ b07 *= invdet;
+ b08 *= invdet;
+ b09 *= invdet;
+ b10 *= invdet;
+ b11 *= invdet;
+
+ inverse->fMat[0][0] = SkDoubleToMScalar(a11 * b11 - a12 * b10);
+ inverse->fMat[0][1] = SkDoubleToMScalar(a02 * b10 - a01 * b11);
+ inverse->fMat[0][2] = SkDoubleToMScalar(b03);
+ inverse->fMat[0][3] = 0;
+ inverse->fMat[1][0] = SkDoubleToMScalar(a12 * b08 - a10 * b11);
+ inverse->fMat[1][1] = SkDoubleToMScalar(a00 * b11 - a02 * b08);
+ inverse->fMat[1][2] = SkDoubleToMScalar(-b01);
+ inverse->fMat[1][3] = 0;
+ inverse->fMat[2][0] = SkDoubleToMScalar(a10 * b10 - a11 * b08);
+ inverse->fMat[2][1] = SkDoubleToMScalar(a01 * b08 - a00 * b10);
+ inverse->fMat[2][2] = SkDoubleToMScalar(b00);
+ inverse->fMat[2][3] = 0;
+ inverse->fMat[3][0] = SkDoubleToMScalar(a11 * b07 - a10 * b09 - a12 * b06);
+ inverse->fMat[3][1] = SkDoubleToMScalar(a00 * b09 - a01 * b07 + a02 * b06);
+ inverse->fMat[3][2] = SkDoubleToMScalar(a31 * b01 - a30 * b03 - a32 * b00);
+ inverse->fMat[3][3] = 1;
+
+ inverse->setTypeMask(this->getType());
+ if (!is_matrix_finite(*inverse)) {
+ return false;
+ }
+ if (storage && inverse != storage) {
+ *storage = *inverse;
+ }
+ return true;
+ }
+
+ double b00 = a00 * a11 - a01 * a10;
+ double b01 = a00 * a12 - a02 * a10;
+ double b02 = a00 * a13 - a03 * a10;
+ double b03 = a01 * a12 - a02 * a11;
+ double b04 = a01 * a13 - a03 * a11;
+ double b05 = a02 * a13 - a03 * a12;
+ double b06 = a20 * a31 - a21 * a30;
+ double b07 = a20 * a32 - a22 * a30;
+ double b08 = a20 * a33 - a23 * a30;
+ double b09 = a21 * a32 - a22 * a31;
+ double b10 = a21 * a33 - a23 * a31;
+ double b11 = a22 * a33 - a23 * a32;
+
+ // Calculate the determinant
+ double det = b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - b04 * b07 + b05 * b06;
+
+ double invdet = 1.0 / det;
+ // If det is zero, we want to return false. However, we also want to return false
+ // if 1/det overflows to infinity (i.e. det is denormalized). Both of these are
+ // handled by checking that 1/det is finite.
+ if (!sk_float_isfinite(invdet)) {
+ return false;
+ }
+
+ b00 *= invdet;
+ b01 *= invdet;
+ b02 *= invdet;
+ b03 *= invdet;
+ b04 *= invdet;
+ b05 *= invdet;
+ b06 *= invdet;
+ b07 *= invdet;
+ b08 *= invdet;
+ b09 *= invdet;
+ b10 *= invdet;
+ b11 *= invdet;
+
+ inverse->fMat[0][0] = SkDoubleToMScalar(a11 * b11 - a12 * b10 + a13 * b09);
+ inverse->fMat[0][1] = SkDoubleToMScalar(a02 * b10 - a01 * b11 - a03 * b09);
+ inverse->fMat[0][2] = SkDoubleToMScalar(a31 * b05 - a32 * b04 + a33 * b03);
+ inverse->fMat[0][3] = SkDoubleToMScalar(a22 * b04 - a21 * b05 - a23 * b03);
+ inverse->fMat[1][0] = SkDoubleToMScalar(a12 * b08 - a10 * b11 - a13 * b07);
+ inverse->fMat[1][1] = SkDoubleToMScalar(a00 * b11 - a02 * b08 + a03 * b07);
+ inverse->fMat[1][2] = SkDoubleToMScalar(a32 * b02 - a30 * b05 - a33 * b01);
+ inverse->fMat[1][3] = SkDoubleToMScalar(a20 * b05 - a22 * b02 + a23 * b01);
+ inverse->fMat[2][0] = SkDoubleToMScalar(a10 * b10 - a11 * b08 + a13 * b06);
+ inverse->fMat[2][1] = SkDoubleToMScalar(a01 * b08 - a00 * b10 - a03 * b06);
+ inverse->fMat[2][2] = SkDoubleToMScalar(a30 * b04 - a31 * b02 + a33 * b00);
+ inverse->fMat[2][3] = SkDoubleToMScalar(a21 * b02 - a20 * b04 - a23 * b00);
+ inverse->fMat[3][0] = SkDoubleToMScalar(a11 * b07 - a10 * b09 - a12 * b06);
+ inverse->fMat[3][1] = SkDoubleToMScalar(a00 * b09 - a01 * b07 + a02 * b06);
+ inverse->fMat[3][2] = SkDoubleToMScalar(a31 * b01 - a30 * b03 - a32 * b00);
+ inverse->fMat[3][3] = SkDoubleToMScalar(a20 * b03 - a21 * b01 + a22 * b00);
+ inverse->dirtyTypeMask();
+
+ inverse->setTypeMask(this->getType());
+ if (!is_matrix_finite(*inverse)) {
+ return false;
+ }
+ if (storage && inverse != storage) {
+ *storage = *inverse;
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix44::transpose() {
+ SkTSwap(fMat[0][1], fMat[1][0]);
+ SkTSwap(fMat[0][2], fMat[2][0]);
+ SkTSwap(fMat[0][3], fMat[3][0]);
+ SkTSwap(fMat[1][2], fMat[2][1]);
+ SkTSwap(fMat[1][3], fMat[3][1]);
+ SkTSwap(fMat[2][3], fMat[3][2]);
+
+ if (!this->isTriviallyIdentity()) {
+ this->dirtyTypeMask();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix44::mapScalars(const SkScalar src[4], SkScalar dst[4]) const {
+ SkScalar storage[4];
+ SkScalar* result = (src == dst) ? storage : dst;
+
+ for (int i = 0; i < 4; i++) {
+ SkMScalar value = 0;
+ for (int j = 0; j < 4; j++) {
+ value += fMat[j][i] * src[j];
+ }
+ result[i] = SkMScalarToScalar(value);
+ }
+
+ if (storage == result) {
+ memcpy(dst, storage, sizeof(storage));
+ }
+}
+
+#ifdef SK_MSCALAR_IS_DOUBLE
+
+void SkMatrix44::mapMScalars(const SkMScalar src[4], SkMScalar dst[4]) const {
+ SkMScalar storage[4];
+ SkMScalar* result = (src == dst) ? storage : dst;
+
+ for (int i = 0; i < 4; i++) {
+ SkMScalar value = 0;
+ for (int j = 0; j < 4; j++) {
+ value += fMat[j][i] * src[j];
+ }
+ result[i] = value;
+ }
+
+ if (storage == result) {
+ memcpy(dst, storage, sizeof(storage));
+ }
+}
+
+#endif
+
+typedef void (*Map2Procf)(const SkMScalar mat[][4], const float src2[], int count, float dst4[]);
+typedef void (*Map2Procd)(const SkMScalar mat[][4], const double src2[], int count, double dst4[]);
+
+static void map2_if(const SkMScalar mat[][4], const float* SK_RESTRICT src2,
+ int count, float* SK_RESTRICT dst4) {
+ for (int i = 0; i < count; ++i) {
+ dst4[0] = src2[0];
+ dst4[1] = src2[1];
+ dst4[2] = 0;
+ dst4[3] = 1;
+ src2 += 2;
+ dst4 += 4;
+ }
+}
+
+static void map2_id(const SkMScalar mat[][4], const double* SK_RESTRICT src2,
+ int count, double* SK_RESTRICT dst4) {
+ for (int i = 0; i < count; ++i) {
+ dst4[0] = src2[0];
+ dst4[1] = src2[1];
+ dst4[2] = 0;
+ dst4[3] = 1;
+ src2 += 2;
+ dst4 += 4;
+ }
+}
+
+static void map2_tf(const SkMScalar mat[][4], const float* SK_RESTRICT src2,
+ int count, float* SK_RESTRICT dst4) {
+ const float mat30 = SkMScalarToFloat(mat[3][0]);
+ const float mat31 = SkMScalarToFloat(mat[3][1]);
+ const float mat32 = SkMScalarToFloat(mat[3][2]);
+ for (int n = 0; n < count; ++n) {
+ dst4[0] = src2[0] + mat30;
+ dst4[1] = src2[1] + mat31;
+ dst4[2] = mat32;
+ dst4[3] = 1;
+ src2 += 2;
+ dst4 += 4;
+ }
+}
+
+static void map2_td(const SkMScalar mat[][4], const double* SK_RESTRICT src2,
+ int count, double* SK_RESTRICT dst4) {
+ for (int n = 0; n < count; ++n) {
+ dst4[0] = src2[0] + mat[3][0];
+ dst4[1] = src2[1] + mat[3][1];
+ dst4[2] = mat[3][2];
+ dst4[3] = 1;
+ src2 += 2;
+ dst4 += 4;
+ }
+}
+
+static void map2_sf(const SkMScalar mat[][4], const float* SK_RESTRICT src2,
+ int count, float* SK_RESTRICT dst4) {
+ const float mat32 = SkMScalarToFloat(mat[3][2]);
+ for (int n = 0; n < count; ++n) {
+ dst4[0] = SkMScalarToFloat(mat[0][0] * src2[0] + mat[3][0]);
+ dst4[1] = SkMScalarToFloat(mat[1][1] * src2[1] + mat[3][1]);
+ dst4[2] = mat32;
+ dst4[3] = 1;
+ src2 += 2;
+ dst4 += 4;
+ }
+}
+
+static void map2_sd(const SkMScalar mat[][4], const double* SK_RESTRICT src2,
+ int count, double* SK_RESTRICT dst4) {
+ for (int n = 0; n < count; ++n) {
+ dst4[0] = mat[0][0] * src2[0] + mat[3][0];
+ dst4[1] = mat[1][1] * src2[1] + mat[3][1];
+ dst4[2] = mat[3][2];
+ dst4[3] = 1;
+ src2 += 2;
+ dst4 += 4;
+ }
+}
+
+static void map2_af(const SkMScalar mat[][4], const float* SK_RESTRICT src2,
+ int count, float* SK_RESTRICT dst4) {
+ SkMScalar r;
+ for (int n = 0; n < count; ++n) {
+ SkMScalar sx = SkFloatToMScalar(src2[0]);
+ SkMScalar sy = SkFloatToMScalar(src2[1]);
+ r = mat[0][0] * sx + mat[1][0] * sy + mat[3][0];
+ dst4[0] = SkMScalarToFloat(r);
+ r = mat[0][1] * sx + mat[1][1] * sy + mat[3][1];
+ dst4[1] = SkMScalarToFloat(r);
+ r = mat[0][2] * sx + mat[1][2] * sy + mat[3][2];
+ dst4[2] = SkMScalarToFloat(r);
+ dst4[3] = 1;
+ src2 += 2;
+ dst4 += 4;
+ }
+}
+
+static void map2_ad(const SkMScalar mat[][4], const double* SK_RESTRICT src2,
+ int count, double* SK_RESTRICT dst4) {
+ for (int n = 0; n < count; ++n) {
+ double sx = src2[0];
+ double sy = src2[1];
+ dst4[0] = mat[0][0] * sx + mat[1][0] * sy + mat[3][0];
+ dst4[1] = mat[0][1] * sx + mat[1][1] * sy + mat[3][1];
+ dst4[2] = mat[0][2] * sx + mat[1][2] * sy + mat[3][2];
+ dst4[3] = 1;
+ src2 += 2;
+ dst4 += 4;
+ }
+}
+
+static void map2_pf(const SkMScalar mat[][4], const float* SK_RESTRICT src2,
+ int count, float* SK_RESTRICT dst4) {
+ SkMScalar r;
+ for (int n = 0; n < count; ++n) {
+ SkMScalar sx = SkFloatToMScalar(src2[0]);
+ SkMScalar sy = SkFloatToMScalar(src2[1]);
+ for (int i = 0; i < 4; i++) {
+ r = mat[0][i] * sx + mat[1][i] * sy + mat[3][i];
+ dst4[i] = SkMScalarToFloat(r);
+ }
+ src2 += 2;
+ dst4 += 4;
+ }
+}
+
+static void map2_pd(const SkMScalar mat[][4], const double* SK_RESTRICT src2,
+ int count, double* SK_RESTRICT dst4) {
+ for (int n = 0; n < count; ++n) {
+ double sx = src2[0];
+ double sy = src2[1];
+ for (int i = 0; i < 4; i++) {
+ dst4[i] = mat[0][i] * sx + mat[1][i] * sy + mat[3][i];
+ }
+ src2 += 2;
+ dst4 += 4;
+ }
+}
+
+void SkMatrix44::map2(const float src2[], int count, float dst4[]) const {
+ static const Map2Procf gProc[] = {
+ map2_if, map2_tf, map2_sf, map2_sf, map2_af, map2_af, map2_af, map2_af
+ };
+
+ TypeMask mask = this->getType();
+ Map2Procf proc = (mask & kPerspective_Mask) ? map2_pf : gProc[mask];
+ proc(fMat, src2, count, dst4);
+}
+
+void SkMatrix44::map2(const double src2[], int count, double dst4[]) const {
+ static const Map2Procd gProc[] = {
+ map2_id, map2_td, map2_sd, map2_sd, map2_ad, map2_ad, map2_ad, map2_ad
+ };
+
+ TypeMask mask = this->getType();
+ Map2Procd proc = (mask & kPerspective_Mask) ? map2_pd : gProc[mask];
+ proc(fMat, src2, count, dst4);
+}
+
+bool SkMatrix44::preserves2dAxisAlignment (SkMScalar epsilon) const {
+
+ // Can't check (mask & kPerspective_Mask) because Z isn't relevant here.
+ if (0 != perspX() || 0 != perspY()) return false;
+
+ // A matrix with two non-zeroish values in any of the upper right
+ // rows or columns will skew. If only one value in each row or
+ // column is non-zeroish, we get a scale plus perhaps a 90-degree
+ // rotation.
+ int col0 = 0;
+ int col1 = 0;
+ int row0 = 0;
+ int row1 = 0;
+
+ // Must test against epsilon, not 0, because we can get values
+ // around 6e-17 in the matrix that "should" be 0.
+
+ if (SkMScalarAbs(fMat[0][0]) > epsilon) {
+ col0++;
+ row0++;
+ }
+ if (SkMScalarAbs(fMat[0][1]) > epsilon) {
+ col1++;
+ row0++;
+ }
+ if (SkMScalarAbs(fMat[1][0]) > epsilon) {
+ col0++;
+ row1++;
+ }
+ if (SkMScalarAbs(fMat[1][1]) > epsilon) {
+ col1++;
+ row1++;
+ }
+ if (col0 > 1 || col1 > 1 || row0 > 1 || row1 > 1) {
+ return false;
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix44::dump() const {
+ static const char* format =
+ "[%g %g %g %g][%g %g %g %g][%g %g %g %g][%g %g %g %g]\n";
+#if 0
+ SkDebugf(format,
+ fMat[0][0], fMat[1][0], fMat[2][0], fMat[3][0],
+ fMat[0][1], fMat[1][1], fMat[2][1], fMat[3][1],
+ fMat[0][2], fMat[1][2], fMat[2][2], fMat[3][2],
+ fMat[0][3], fMat[1][3], fMat[2][3], fMat[3][3]);
+#else
+ SkDebugf(format,
+ fMat[0][0], fMat[0][1], fMat[0][2], fMat[0][3],
+ fMat[1][0], fMat[1][1], fMat[1][2], fMat[1][3],
+ fMat[2][0], fMat[2][1], fMat[2][2], fMat[2][3],
+ fMat[3][0], fMat[3][1], fMat[3][2], fMat[3][3]);
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void initFromMatrix(SkMScalar dst[4][4], const SkMatrix& src) {
+ dst[0][0] = SkScalarToMScalar(src[SkMatrix::kMScaleX]);
+ dst[1][0] = SkScalarToMScalar(src[SkMatrix::kMSkewX]);
+ dst[2][0] = 0;
+ dst[3][0] = SkScalarToMScalar(src[SkMatrix::kMTransX]);
+ dst[0][1] = SkScalarToMScalar(src[SkMatrix::kMSkewY]);
+ dst[1][1] = SkScalarToMScalar(src[SkMatrix::kMScaleY]);
+ dst[2][1] = 0;
+ dst[3][1] = SkScalarToMScalar(src[SkMatrix::kMTransY]);
+ dst[0][2] = 0;
+ dst[1][2] = 0;
+ dst[2][2] = 1;
+ dst[3][2] = 0;
+ dst[0][3] = SkScalarToMScalar(src[SkMatrix::kMPersp0]);
+ dst[1][3] = SkScalarToMScalar(src[SkMatrix::kMPersp1]);
+ dst[2][3] = 0;
+ dst[3][3] = SkScalarToMScalar(src[SkMatrix::kMPersp2]);
+}
+
+SkMatrix44::SkMatrix44(const SkMatrix& src) {
+ this->operator=(src);
+}
+
+SkMatrix44& SkMatrix44::operator=(const SkMatrix& src) {
+ initFromMatrix(fMat, src);
+
+ if (src.isIdentity()) {
+ this->setTypeMask(kIdentity_Mask);
+ } else {
+ this->dirtyTypeMask();
+ }
+ return *this;
+}
+
+SkMatrix44::operator SkMatrix() const {
+ SkMatrix dst;
+
+ dst[SkMatrix::kMScaleX] = SkMScalarToScalar(fMat[0][0]);
+ dst[SkMatrix::kMSkewX] = SkMScalarToScalar(fMat[1][0]);
+ dst[SkMatrix::kMTransX] = SkMScalarToScalar(fMat[3][0]);
+
+ dst[SkMatrix::kMSkewY] = SkMScalarToScalar(fMat[0][1]);
+ dst[SkMatrix::kMScaleY] = SkMScalarToScalar(fMat[1][1]);
+ dst[SkMatrix::kMTransY] = SkMScalarToScalar(fMat[3][1]);
+
+ dst[SkMatrix::kMPersp0] = SkMScalarToScalar(fMat[0][3]);
+ dst[SkMatrix::kMPersp1] = SkMScalarToScalar(fMat[1][3]);
+ dst[SkMatrix::kMPersp2] = SkMScalarToScalar(fMat[3][3]);
+
+ return dst;
+}
diff --git a/gfx/skia/skia/src/core/SkMatrixImageFilter.cpp b/gfx/skia/skia/src/core/SkMatrixImageFilter.cpp
new file mode 100644
index 000000000..0a3328041
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMatrixImageFilter.cpp
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMatrixImageFilter.h"
+
+#include "SkCanvas.h"
+#include "SkReadBuffer.h"
+#include "SkSpecialImage.h"
+#include "SkSpecialSurface.h"
+#include "SkWriteBuffer.h"
+#include "SkRect.h"
+
+SkMatrixImageFilter::SkMatrixImageFilter(const SkMatrix& transform,
+ SkFilterQuality filterQuality,
+ sk_sp<SkImageFilter> input)
+ : INHERITED(&input, 1, nullptr)
+ , fTransform(transform)
+ , fFilterQuality(filterQuality) {
+}
+
+sk_sp<SkImageFilter> SkMatrixImageFilter::Make(const SkMatrix& transform,
+ SkFilterQuality filterQuality,
+ sk_sp<SkImageFilter> input) {
+ return sk_sp<SkImageFilter>(new SkMatrixImageFilter(transform,
+ filterQuality,
+ std::move(input)));
+}
+
+sk_sp<SkFlattenable> SkMatrixImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkMatrix matrix;
+ buffer.readMatrix(&matrix);
+ SkFilterQuality quality = static_cast<SkFilterQuality>(buffer.readInt());
+ return Make(matrix, quality, common.getInput(0));
+}
+
+void SkMatrixImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeMatrix(fTransform);
+ buffer.writeInt(fFilterQuality);
+}
+
+sk_sp<SkSpecialImage> SkMatrixImageFilter::onFilterImage(SkSpecialImage* source,
+ const Context& ctx,
+ SkIPoint* offset) const {
+
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, source, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ SkMatrix matrix;
+ if (!ctx.ctm().invert(&matrix)) {
+ return nullptr;
+ }
+ matrix.postConcat(fTransform);
+ matrix.postConcat(ctx.ctm());
+
+ const SkIRect srcBounds = SkIRect::MakeXYWH(inputOffset.x(), inputOffset.y(),
+ input->width(), input->height());
+ const SkRect srcRect = SkRect::Make(srcBounds);
+
+ SkRect dstRect;
+ matrix.mapRect(&dstRect, srcRect);
+ SkIRect dstBounds;
+ dstRect.roundOut(&dstBounds);
+
+ sk_sp<SkSpecialSurface> surf(input->makeSurface(ctx.outputProperties(), dstBounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ canvas->clear(0x0);
+
+ canvas->translate(-SkIntToScalar(dstBounds.x()), -SkIntToScalar(dstBounds.y()));
+ canvas->concat(matrix);
+
+ SkPaint paint;
+ paint.setAntiAlias(true);
+ paint.setBlendMode(SkBlendMode::kSrc);
+ paint.setFilterQuality(fFilterQuality);
+
+ input->draw(canvas, srcRect.x(), srcRect.y(), &paint);
+
+ offset->fX = dstBounds.fLeft;
+ offset->fY = dstBounds.fTop;
+ return surf->makeImageSnapshot();
+}
+
+SkRect SkMatrixImageFilter::computeFastBounds(const SkRect& src) const {
+ SkRect bounds = this->getInput(0) ? this->getInput(0)->computeFastBounds(src) : src;
+ SkRect dst;
+ fTransform.mapRect(&dst, bounds);
+ return dst;
+}
+
+SkIRect SkMatrixImageFilter::onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection direction) const {
+ SkMatrix matrix;
+ if (!ctm.invert(&matrix)) {
+ return src;
+ }
+ if (kForward_MapDirection == direction) {
+ matrix.postConcat(fTransform);
+ } else {
+ SkMatrix transformInverse;
+ if (!fTransform.invert(&transformInverse)) {
+ return src;
+ }
+ matrix.postConcat(transformInverse);
+ }
+ matrix.postConcat(ctm);
+ SkRect floatBounds;
+ matrix.mapRect(&floatBounds, SkRect::Make(src));
+ return floatBounds.roundOut();
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkMatrixImageFilter::toString(SkString* str) const {
+ str->appendf("SkMatrixImageFilter: (");
+
+ str->appendf("transform: (%f %f %f %f %f %f %f %f %f)",
+ fTransform[SkMatrix::kMScaleX],
+ fTransform[SkMatrix::kMSkewX],
+ fTransform[SkMatrix::kMTransX],
+ fTransform[SkMatrix::kMSkewY],
+ fTransform[SkMatrix::kMScaleY],
+ fTransform[SkMatrix::kMTransY],
+ fTransform[SkMatrix::kMPersp0],
+ fTransform[SkMatrix::kMPersp1],
+ fTransform[SkMatrix::kMPersp2]);
+
+ str->append("<dt>FilterLevel:</dt><dd>");
+ static const char* gFilterLevelStrings[] = { "None", "Low", "Medium", "High" };
+ str->append(gFilterLevelStrings[fFilterQuality]);
+ str->append("</dd>");
+
+ str->appendf(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkMatrixImageFilter.h b/gfx/skia/skia/src/core/SkMatrixImageFilter.h
new file mode 100644
index 000000000..e02541c12
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMatrixImageFilter.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMatrixImageFilter_DEFINED
+#define SkMatrixImageFilter_DEFINED
+
+#include "SkImageFilter.h"
+#include "SkMatrix.h"
+
+/*! \class SkMatrixImageFilter
+ Matrix transformation image filter. This filter draws its source
+ input transformed by the given matrix.
+ */
+
+class SK_API SkMatrixImageFilter : public SkImageFilter {
+public:
+ /** Construct a 2D transformation image filter.
+ * @param transform The matrix to apply when drawing the src bitmap
+ * @param filterQuality The quality of filtering to apply when scaling.
+ * @param input The input image filter. If nullptr, the src bitmap
+ * passed to filterImage() is used instead.
+ */
+
+ static sk_sp<SkImageFilter> Make(const SkMatrix& transform,
+ SkFilterQuality filterQuality,
+ sk_sp<SkImageFilter> input);
+
+ SkRect computeFastBounds(const SkRect&) const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkMatrixImageFilter)
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFILTER_PTR
+ static SkImageFilter* Create(const SkMatrix& transform,
+ SkFilterQuality filterQuality,
+ SkImageFilter* input = nullptr) {
+ return Make(transform, filterQuality, sk_ref_sp<SkImageFilter>(input)).release();
+ }
+#endif
+
+protected:
+ SkMatrixImageFilter(const SkMatrix& transform,
+ SkFilterQuality,
+ sk_sp<SkImageFilter> input);
+ void flatten(SkWriteBuffer&) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* source, const Context&,
+ SkIPoint* offset) const override;
+ SkIRect onFilterNodeBounds(const SkIRect& src, const SkMatrix&, MapDirection) const override;
+
+private:
+ SkMatrix fTransform;
+ SkFilterQuality fFilterQuality;
+ typedef SkImageFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMatrixPriv.h b/gfx/skia/skia/src/core/SkMatrixPriv.h
new file mode 100644
index 000000000..844901c01
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMatrixPriv.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMatrixPriv_DEFINE
+#define SkMatrixPriv_DEFINE
+
+#include "SkMatrix.h"
+#include "SkNx.h"
+
+class SkMatrixPriv {
+public:
+ /**
+ * Attempt to map the rect through the inverse of the matrix. If it is not invertible,
+ * then this returns false and dst is unchanged.
+ */
+ static bool SK_WARN_UNUSED_RESULT InverseMapRect(const SkMatrix& mx,
+ SkRect* dst, const SkRect& src) {
+ if (mx.getType() <= SkMatrix::kTranslate_Mask) {
+ SkScalar tx = mx.getTranslateX();
+ SkScalar ty = mx.getTranslateY();
+ Sk4f trans(tx, ty, tx, ty);
+ (Sk4f::Load(&src.fLeft) - trans).store(&dst->fLeft);
+ return true;
+ }
+ // Insert other special-cases here (e.g. scale+translate)
+
+ // general case
+ SkMatrix inverse;
+ if (mx.invert(&inverse)) {
+ inverse.mapRect(dst, src);
+ return true;
+ }
+ return false;
+ }
+
+ static void MapPointsWithStride(const SkMatrix& mx, SkPoint pts[], size_t stride, int count) {
+ SkASSERT(stride >= sizeof(SkPoint));
+ SkASSERT(0 == stride % sizeof(SkScalar));
+
+ SkMatrix::TypeMask tm = mx.getType();
+
+ if (SkMatrix::kIdentity_Mask == tm) {
+ return;
+ }
+ if (SkMatrix::kTranslate_Mask == tm) {
+ const SkScalar tx = mx.getTranslateX();
+ const SkScalar ty = mx.getTranslateY();
+ Sk2s trans(tx, ty);
+ for (int i = 0; i < count; ++i) {
+ (Sk2s::Load(&pts->fX) + trans).store(&pts->fX);
+ pts = (SkPoint*)((intptr_t)pts + stride);
+ }
+ return;
+ }
+ // Insert other special-cases here (e.g. scale+translate)
+
+ // general case
+ SkMatrix::MapXYProc proc = mx.getMapXYProc();
+ for (int i = 0; i < count; ++i) {
+ proc(mx, pts->fX, pts->fY, pts);
+ pts = (SkPoint*)((intptr_t)pts + stride);
+ }
+ }
+
+ static void SetMappedRectFan(const SkMatrix& mx, const SkRect& rect, SkPoint quad[4]) {
+ SkMatrix::TypeMask tm = mx.getType();
+ SkScalar l = rect.fLeft;
+ SkScalar t = rect.fTop;
+ SkScalar r = rect.fRight;
+ SkScalar b = rect.fBottom;
+ if (tm <= (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask)) {
+ const SkScalar tx = mx.getTranslateX();
+ const SkScalar ty = mx.getTranslateY();
+ if (tm <= SkMatrix::kTranslate_Mask) {
+ l += tx;
+ t += ty;
+ r += tx;
+ b += ty;
+ } else {
+ const SkScalar sx = mx.getScaleX();
+ const SkScalar sy = mx.getScaleY();
+ l = sx * l + tx;
+ t = sy * t + ty;
+ r = sx * r + tx;
+ b = sy * b + ty;
+ }
+ quad[0].set(l, t);
+ quad[1].set(l, b);
+ quad[2].set(r, b);
+ quad[3].set(r, t);
+ } else {
+ quad[0].setRectFan(l, t, r, b);
+ mx.mapPoints(quad, quad, 4);
+ }
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMatrixUtils.h b/gfx/skia/skia/src/core/SkMatrixUtils.h
new file mode 100644
index 000000000..0e01fbe95
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMatrixUtils.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMatrixUtils_DEFINED
+#define SkMatrixUtils_DEFINED
+
+#include "SkSize.h"
+
+class SkMatrix;
+class SkPaint;
+
+/**
+ * Given a matrix, size and paint, return true if the computed dst-rect would
+ * align such that there is a 1-to-1 coorspondence between src and dst pixels.
+ * This can be called by drawing code to see if drawBitmap can be turned into
+ * drawSprite (which is faster).
+ *
+ * The src-rect is defined to be { 0, 0, size.width(), size.height() }
+ */
+bool SkTreatAsSprite(const SkMatrix&, const SkISize& size, const SkPaint& paint);
+
+/** Decomposes the upper-left 2x2 of the matrix into a rotation (represented by
+ the cosine and sine of the rotation angle), followed by a non-uniform scale,
+ followed by another rotation. If there is a reflection, one of the scale
+ factors will be negative.
+ Returns true if successful. Returns false if the matrix is degenerate.
+ */
+bool SkDecomposeUpper2x2(const SkMatrix& matrix,
+ SkPoint* rotation1,
+ SkPoint* scale,
+ SkPoint* rotation2);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMessageBus.h b/gfx/skia/skia/src/core/SkMessageBus.h
new file mode 100644
index 000000000..79f5c026d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMessageBus.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMessageBus_DEFINED
+#define SkMessageBus_DEFINED
+
+#include "SkMutex.h"
+#include "SkOnce.h"
+#include "SkTArray.h"
+#include "SkTDArray.h"
+#include "SkTypes.h"
+
+template <typename Message>
+class SkMessageBus : SkNoncopyable {
+public:
+ // Post a message to be received by all Inboxes for this Message type. Threadsafe.
+ static void Post(const Message& m);
+
+ class Inbox {
+ public:
+ Inbox();
+ ~Inbox();
+
+ // Overwrite out with all the messages we've received since the last call. Threadsafe.
+ void poll(SkTArray<Message>* out);
+
+ private:
+ SkTArray<Message> fMessages;
+ SkMutex fMessagesMutex;
+
+ friend class SkMessageBus;
+ void receive(const Message& m); // SkMessageBus is a friend only to call this.
+ };
+
+private:
+ SkMessageBus();
+ static SkMessageBus* Get();
+
+ SkTDArray<Inbox*> fInboxes;
+ SkMutex fInboxesMutex;
+};
+
+// This must go in a single .cpp file, not some .h, or we risk creating more than one global
+// SkMessageBus per type when using shared libraries. NOTE: at most one per file will compile.
+#define DECLARE_SKMESSAGEBUS_MESSAGE(Message) \
+ template <> \
+ SkMessageBus<Message>* SkMessageBus<Message>::Get() { \
+ static SkOnce once; \
+ static SkMessageBus<Message>* bus; \
+ once([] { bus = new SkMessageBus<Message>(); }); \
+ return bus; \
+ }
+
+// ----------------------- Implementation of SkMessageBus::Inbox -----------------------
+
+template<typename Message>
+SkMessageBus<Message>::Inbox::Inbox() {
+ // Register ourselves with the corresponding message bus.
+ SkMessageBus<Message>* bus = SkMessageBus<Message>::Get();
+ SkAutoMutexAcquire lock(bus->fInboxesMutex);
+ bus->fInboxes.push(this);
+}
+
+template<typename Message>
+SkMessageBus<Message>::Inbox::~Inbox() {
+ // Remove ourselves from the corresponding message bus.
+ SkMessageBus<Message>* bus = SkMessageBus<Message>::Get();
+ SkAutoMutexAcquire lock(bus->fInboxesMutex);
+ // This is a cheaper fInboxes.remove(fInboxes.find(this)) when order doesn't matter.
+ for (int i = 0; i < bus->fInboxes.count(); i++) {
+ if (this == bus->fInboxes[i]) {
+ bus->fInboxes.removeShuffle(i);
+ break;
+ }
+ }
+}
+
+template<typename Message>
+void SkMessageBus<Message>::Inbox::receive(const Message& m) {
+ SkAutoMutexAcquire lock(fMessagesMutex);
+ fMessages.push_back(m);
+}
+
+template<typename Message>
+void SkMessageBus<Message>::Inbox::poll(SkTArray<Message>* messages) {
+ SkASSERT(messages);
+ messages->reset();
+ SkAutoMutexAcquire lock(fMessagesMutex);
+ fMessages.swap(messages);
+}
+
+// ----------------------- Implementation of SkMessageBus -----------------------
+
+template <typename Message>
+SkMessageBus<Message>::SkMessageBus() {}
+
+template <typename Message>
+/*static*/ void SkMessageBus<Message>::Post(const Message& m) {
+ SkMessageBus<Message>* bus = SkMessageBus<Message>::Get();
+ SkAutoMutexAcquire lock(bus->fInboxesMutex);
+ for (int i = 0; i < bus->fInboxes.count(); i++) {
+ bus->fInboxes[i]->receive(m);
+ }
+}
+
+#endif // SkMessageBus_DEFINED
diff --git a/gfx/skia/skia/src/core/SkMetaData.cpp b/gfx/skia/skia/src/core/SkMetaData.cpp
new file mode 100644
index 000000000..6e1f5e2a3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMetaData.cpp
@@ -0,0 +1,335 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkMetaData.h"
+#include "SkRefCnt.h"
+
+struct PtrPair {
+ void* fPtr;
+ SkMetaData::PtrProc fProc;
+};
+
+void* SkMetaData::RefCntProc(void* ptr, bool doRef) {
+ SkASSERT(ptr);
+ SkRefCnt* refcnt = reinterpret_cast<SkRefCnt*>(ptr);
+
+ if (doRef) {
+ refcnt->ref();
+ } else {
+ refcnt->unref();
+ }
+ return ptr;
+}
+
+SkMetaData::SkMetaData() : fRec(nullptr)
+{
+}
+
+SkMetaData::SkMetaData(const SkMetaData& src) : fRec(nullptr)
+{
+ *this = src;
+}
+
+SkMetaData::~SkMetaData()
+{
+ this->reset();
+}
+
+void SkMetaData::reset()
+{
+ Rec* rec = fRec;
+ while (rec) {
+ if (kPtr_Type == rec->fType) {
+ PtrPair* pair = (PtrPair*)rec->data();
+ if (pair->fProc && pair->fPtr) {
+ pair->fPtr = pair->fProc(pair->fPtr, false);
+ }
+ }
+ Rec* next = rec->fNext;
+ Rec::Free(rec);
+ rec = next;
+ }
+ fRec = nullptr;
+}
+
+SkMetaData& SkMetaData::operator=(const SkMetaData& src)
+{
+ this->reset();
+
+ const Rec* rec = src.fRec;
+ while (rec)
+ {
+ this->set(rec->name(), rec->data(), rec->fDataLen, (Type)rec->fType, rec->fDataCount);
+ rec = rec->fNext;
+ }
+ return *this;
+}
+
+void SkMetaData::setS32(const char name[], int32_t value)
+{
+ (void)this->set(name, &value, sizeof(int32_t), kS32_Type, 1);
+}
+
+void SkMetaData::setScalar(const char name[], SkScalar value)
+{
+ (void)this->set(name, &value, sizeof(SkScalar), kScalar_Type, 1);
+}
+
+SkScalar* SkMetaData::setScalars(const char name[], int count, const SkScalar values[])
+{
+ SkASSERT(count > 0);
+ if (count > 0)
+ return (SkScalar*)this->set(name, values, sizeof(SkScalar), kScalar_Type, count);
+ return nullptr;
+}
+
+void SkMetaData::setString(const char name[], const char value[])
+{
+ (void)this->set(name, value, sizeof(char), kString_Type, SkToInt(strlen(value) + 1));
+}
+
+void SkMetaData::setPtr(const char name[], void* ptr, PtrProc proc) {
+ PtrPair pair = { ptr, proc };
+ (void)this->set(name, &pair, sizeof(PtrPair), kPtr_Type, 1);
+}
+
+void SkMetaData::setBool(const char name[], bool value)
+{
+ (void)this->set(name, &value, sizeof(bool), kBool_Type, 1);
+}
+
+void SkMetaData::setData(const char name[], const void* data, size_t byteCount) {
+ (void)this->set(name, data, sizeof(char), kData_Type, SkToInt(byteCount));
+}
+
+void* SkMetaData::set(const char name[], const void* data, size_t dataSize, Type type, int count)
+{
+ SkASSERT(name);
+ SkASSERT(dataSize);
+ SkASSERT(count > 0);
+
+ (void)this->remove(name, type);
+
+ size_t len = strlen(name);
+ Rec* rec = Rec::Alloc(sizeof(Rec) + dataSize * count + len + 1);
+
+#ifndef SK_DEBUG
+ rec->fType = SkToU8(type);
+#else
+ rec->fType = type;
+#endif
+ rec->fDataLen = SkToU8(dataSize);
+ rec->fDataCount = SkToU16(count);
+ if (data)
+ memcpy(rec->data(), data, dataSize * count);
+ memcpy(rec->name(), name, len + 1);
+
+ if (kPtr_Type == type) {
+ PtrPair* pair = (PtrPair*)rec->data();
+ if (pair->fProc && pair->fPtr) {
+ pair->fPtr = pair->fProc(pair->fPtr, true);
+ }
+ }
+
+ rec->fNext = fRec;
+ fRec = rec;
+ return rec->data();
+}
+
+bool SkMetaData::findS32(const char name[], int32_t* value) const
+{
+ const Rec* rec = this->find(name, kS32_Type);
+ if (rec)
+ {
+ SkASSERT(rec->fDataCount == 1);
+ if (value)
+ *value = *(const int32_t*)rec->data();
+ return true;
+ }
+ return false;
+}
+
+bool SkMetaData::findScalar(const char name[], SkScalar* value) const
+{
+ const Rec* rec = this->find(name, kScalar_Type);
+ if (rec)
+ {
+ SkASSERT(rec->fDataCount == 1);
+ if (value)
+ *value = *(const SkScalar*)rec->data();
+ return true;
+ }
+ return false;
+}
+
+const SkScalar* SkMetaData::findScalars(const char name[], int* count, SkScalar values[]) const
+{
+ const Rec* rec = this->find(name, kScalar_Type);
+ if (rec)
+ {
+ if (count)
+ *count = rec->fDataCount;
+ if (values)
+ memcpy(values, rec->data(), rec->fDataCount * rec->fDataLen);
+ return (const SkScalar*)rec->data();
+ }
+ return nullptr;
+}
+
+bool SkMetaData::findPtr(const char name[], void** ptr, PtrProc* proc) const {
+ const Rec* rec = this->find(name, kPtr_Type);
+ if (rec) {
+ SkASSERT(rec->fDataCount == 1);
+ const PtrPair* pair = (const PtrPair*)rec->data();
+ if (ptr) {
+ *ptr = pair->fPtr;
+ }
+ if (proc) {
+ *proc = pair->fProc;
+ }
+ return true;
+ }
+ return false;
+}
+
+const char* SkMetaData::findString(const char name[]) const
+{
+ const Rec* rec = this->find(name, kString_Type);
+ SkASSERT(rec == nullptr || rec->fDataLen == sizeof(char));
+ return rec ? (const char*)rec->data() : nullptr;
+}
+
+bool SkMetaData::findBool(const char name[], bool* value) const
+{
+ const Rec* rec = this->find(name, kBool_Type);
+ if (rec)
+ {
+ SkASSERT(rec->fDataCount == 1);
+ if (value)
+ *value = *(const bool*)rec->data();
+ return true;
+ }
+ return false;
+}
+
+const void* SkMetaData::findData(const char name[], size_t* length) const {
+ const Rec* rec = this->find(name, kData_Type);
+ if (rec) {
+ SkASSERT(rec->fDataLen == sizeof(char));
+ if (length) {
+ *length = rec->fDataCount;
+ }
+ return rec->data();
+ }
+ return nullptr;
+}
+
+const SkMetaData::Rec* SkMetaData::find(const char name[], Type type) const
+{
+ const Rec* rec = fRec;
+ while (rec)
+ {
+ if (rec->fType == type && !strcmp(rec->name(), name))
+ return rec;
+ rec = rec->fNext;
+ }
+ return nullptr;
+}
+
+bool SkMetaData::remove(const char name[], Type type) {
+ Rec* rec = fRec;
+ Rec* prev = nullptr;
+ while (rec) {
+ Rec* next = rec->fNext;
+ if (rec->fType == type && !strcmp(rec->name(), name)) {
+ if (prev) {
+ prev->fNext = next;
+ } else {
+ fRec = next;
+ }
+
+ if (kPtr_Type == type) {
+ PtrPair* pair = (PtrPair*)rec->data();
+ if (pair->fProc && pair->fPtr) {
+ (void)pair->fProc(pair->fPtr, false);
+ }
+ }
+ Rec::Free(rec);
+ return true;
+ }
+ prev = rec;
+ rec = next;
+ }
+ return false;
+}
+
+bool SkMetaData::removeS32(const char name[])
+{
+ return this->remove(name, kS32_Type);
+}
+
+bool SkMetaData::removeScalar(const char name[])
+{
+ return this->remove(name, kScalar_Type);
+}
+
+bool SkMetaData::removeString(const char name[])
+{
+ return this->remove(name, kString_Type);
+}
+
+bool SkMetaData::removePtr(const char name[])
+{
+ return this->remove(name, kPtr_Type);
+}
+
+bool SkMetaData::removeBool(const char name[])
+{
+ return this->remove(name, kBool_Type);
+}
+
+bool SkMetaData::removeData(const char name[]) {
+ return this->remove(name, kData_Type);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkMetaData::Iter::Iter(const SkMetaData& metadata) {
+ fRec = metadata.fRec;
+}
+
+void SkMetaData::Iter::reset(const SkMetaData& metadata) {
+ fRec = metadata.fRec;
+}
+
+const char* SkMetaData::Iter::next(SkMetaData::Type* t, int* count) {
+ const char* name = nullptr;
+
+ if (fRec) {
+ if (t) {
+ *t = (SkMetaData::Type)fRec->fType;
+ }
+ if (count) {
+ *count = fRec->fDataCount;
+ }
+ name = fRec->name();
+
+ fRec = fRec->fNext;
+ }
+ return name;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkMetaData::Rec* SkMetaData::Rec::Alloc(size_t size) {
+ return (Rec*)sk_malloc_throw(size);
+}
+
+void SkMetaData::Rec::Free(Rec* rec) {
+ sk_free(rec);
+}
diff --git a/gfx/skia/skia/src/core/SkMiniRecorder.cpp b/gfx/skia/skia/src/core/SkMiniRecorder.cpp
new file mode 100644
index 000000000..51ff5acbb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMiniRecorder.cpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCanvas.h"
+#include "SkTLazy.h"
+#include "SkMiniRecorder.h"
+#include "SkOnce.h"
+#include "SkPicture.h"
+#include "SkPictureCommon.h"
+#include "SkRecordDraw.h"
+#include "SkTextBlob.h"
+
+using namespace SkRecords;
+
+class SkEmptyPicture final : public SkPicture {
+public:
+ void playback(SkCanvas*, AbortCallback*) const override { }
+
+ size_t approximateBytesUsed() const override { return sizeof(*this); }
+ int approximateOpCount() const override { return 0; }
+ SkRect cullRect() const override { return SkRect::MakeEmpty(); }
+ int numSlowPaths() const override { return 0; }
+ bool willPlayBackBitmaps() const override { return false; }
+};
+
+template <typename T>
+class SkMiniPicture final : public SkPicture {
+public:
+ SkMiniPicture(SkRect cull, T* op) : fCull(cull) {
+ memcpy(&fOp, op, sizeof(fOp)); // We take ownership of op's guts.
+ }
+
+ void playback(SkCanvas* c, AbortCallback*) const override {
+ SkRecords::Draw(c, nullptr, nullptr, 0, nullptr)(fOp);
+ }
+
+ size_t approximateBytesUsed() const override { return sizeof(*this); }
+ int approximateOpCount() const override { return 1; }
+ SkRect cullRect() const override { return fCull; }
+ bool willPlayBackBitmaps() const override { return SkBitmapHunter()(fOp); }
+ int numSlowPaths() const override {
+ SkPathCounter counter;
+ counter(fOp);
+ return counter.fNumSlowPathsAndDashEffects;
+ }
+
+private:
+ SkRect fCull;
+ T fOp;
+};
+
+
+SkMiniRecorder::SkMiniRecorder() : fState(State::kEmpty) {}
+SkMiniRecorder::~SkMiniRecorder() {
+ if (fState != State::kEmpty) {
+ // We have internal state pending.
+ // Detaching then deleting a picture is an easy way to clean up.
+ (void)this->detachAsPicture(SkRect::MakeEmpty());
+ }
+ SkASSERT(fState == State::kEmpty);
+}
+
+#define TRY_TO_STORE(Type, ...) \
+ if (fState != State::kEmpty) { return false; } \
+ fState = State::k##Type; \
+ new (fBuffer.get()) Type{__VA_ARGS__}; \
+ return true
+
+bool SkMiniRecorder::drawRect(const SkRect& rect, const SkPaint& paint) {
+ TRY_TO_STORE(DrawRect, paint, rect);
+}
+
+bool SkMiniRecorder::drawPath(const SkPath& path, const SkPaint& paint) {
+ TRY_TO_STORE(DrawPath, paint, path);
+}
+
+bool SkMiniRecorder::drawTextBlob(const SkTextBlob* b, SkScalar x, SkScalar y, const SkPaint& p) {
+ TRY_TO_STORE(DrawTextBlob, p, sk_ref_sp(b), x, y);
+}
+#undef TRY_TO_STORE
+
+
+sk_sp<SkPicture> SkMiniRecorder::detachAsPicture(const SkRect& cull) {
+#define CASE(Type) \
+ case State::k##Type: \
+ fState = State::kEmpty; \
+ return sk_make_sp<SkMiniPicture<Type>>(cull, reinterpret_cast<Type*>(fBuffer.get()))
+
+ static SkOnce once;
+ static SkPicture* empty;
+
+ switch (fState) {
+ case State::kEmpty:
+ once([]{ empty = new SkEmptyPicture; });
+ return sk_ref_sp(empty);
+ CASE(DrawPath);
+ CASE(DrawRect);
+ CASE(DrawTextBlob);
+ }
+ SkASSERT(false);
+ return nullptr;
+#undef CASE
+}
+
+void SkMiniRecorder::flushAndReset(SkCanvas* canvas) {
+#define CASE(Type) \
+ case State::k##Type: { \
+ fState = State::kEmpty; \
+ Type* op = reinterpret_cast<Type*>(fBuffer.get()); \
+ SkRecords::Draw(canvas, nullptr, nullptr, 0, nullptr)(*op); \
+ op->~Type(); \
+ } return
+
+ switch (fState) {
+ case State::kEmpty: return;
+ CASE(DrawPath);
+ CASE(DrawRect);
+ CASE(DrawTextBlob);
+ }
+ SkASSERT(false);
+#undef CASE
+}
diff --git a/gfx/skia/skia/src/core/SkMipMap.cpp b/gfx/skia/skia/src/core/SkMipMap.cpp
new file mode 100644
index 000000000..c5bd3ac3b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMipMap.cpp
@@ -0,0 +1,636 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMipMap.h"
+#include "SkBitmap.h"
+#include "SkColorPriv.h"
+#include "SkHalf.h"
+#include "SkMathPriv.h"
+#include "SkNx.h"
+#include "SkPM4fPriv.h"
+#include "SkTypes.h"
+
+//
+// ColorTypeFilter is the "Type" we pass to some downsample template functions.
+// It controls how we expand a pixel into a large type, with space between each component,
+// so we can then perform our simple filter (either box or triangle) and store the intermediates
+// in the expanded type.
+//
+
+struct ColorTypeFilter_8888 {
+ typedef uint32_t Type;
+#if defined(SKNX_IS_FAST)
+ static Sk4h Expand(uint32_t x) {
+ return SkNx_cast<uint16_t>(Sk4b::Load(&x));
+ }
+ static uint32_t Compact(const Sk4h& x) {
+ uint32_t r;
+ SkNx_cast<uint8_t>(x).store(&r);
+ return r;
+ }
+#else
+ static uint64_t Expand(uint32_t x) {
+ return (x & 0xFF00FF) | ((uint64_t)(x & 0xFF00FF00) << 24);
+ }
+ static uint32_t Compact(uint64_t x) {
+ return (uint32_t)((x & 0xFF00FF) | ((x >> 24) & 0xFF00FF00));
+ }
+#endif
+};
+
+struct ColorTypeFilter_S32 {
+ typedef uint32_t Type;
+ static Sk4f Expand(uint32_t x) {
+ return Sk4f_fromS32(x);
+ }
+ static uint32_t Compact(const Sk4f& x) {
+ return Sk4f_toS32(x);
+ }
+};
+
+struct ColorTypeFilter_565 {
+ typedef uint16_t Type;
+ static uint32_t Expand(uint16_t x) {
+ return (x & ~SK_G16_MASK_IN_PLACE) | ((x & SK_G16_MASK_IN_PLACE) << 16);
+ }
+ static uint16_t Compact(uint32_t x) {
+ return (x & ~SK_G16_MASK_IN_PLACE) | ((x >> 16) & SK_G16_MASK_IN_PLACE);
+ }
+};
+
+struct ColorTypeFilter_4444 {
+ typedef uint16_t Type;
+ static uint32_t Expand(uint16_t x) {
+ return (x & 0xF0F) | ((x & ~0xF0F) << 12);
+ }
+ static uint16_t Compact(uint32_t x) {
+ return (x & 0xF0F) | ((x >> 12) & ~0xF0F);
+ }
+};
+
+struct ColorTypeFilter_8 {
+ typedef uint8_t Type;
+ static unsigned Expand(unsigned x) {
+ return x;
+ }
+ static uint8_t Compact(unsigned x) {
+ return (uint8_t)x;
+ }
+};
+
+struct ColorTypeFilter_F16 {
+ typedef uint64_t Type; // SkHalf x4
+ static Sk4f Expand(uint64_t x) {
+ return SkHalfToFloat_finite_ftz(x);
+ }
+ static uint64_t Compact(const Sk4f& x) {
+ uint64_t r;
+ SkFloatToHalf_finite_ftz(x).store(&r);
+ return r;
+ }
+};
+
+template <typename T> T add_121(const T& a, const T& b, const T& c) {
+ return a + b + b + c;
+}
+
+template <typename T> T shift_right(const T& x, int bits) {
+ return x >> bits;
+}
+
+Sk4f shift_right(const Sk4f& x, int bits) {
+ return x * (1.0f / (1 << bits));
+}
+
+template <typename T> T shift_left(const T& x, int bits) {
+ return x << bits;
+}
+
+Sk4f shift_left(const Sk4f& x, int bits) {
+ return x * (1 << bits);
+}
+
+//
+// To produce each mip level, we need to filter down by 1/2 (e.g. 100x100 -> 50,50)
+// If the starting dimension is odd, we floor the size of the lower level (e.g. 101 -> 50)
+// In those (odd) cases, we use a triangle filter, with 1-pixel overlap between samplings,
+// else for even cases, we just use a 2x box filter.
+//
+// This produces 4 possible isotropic filters: 2x2 2x3 3x2 3x3 where WxH indicates the number of
+// src pixels we need to sample in each dimension to produce 1 dst pixel.
+//
+// OpenGL expects a full mipmap stack to contain anisotropic space as well.
+// This means a 100x1 image would continue down to a 50x1 image, 25x1 image...
+// Because of this, we need 4 more anisotropic filters: 1x2, 1x3, 2x1, 3x1.
+
+template <typename F> void downsample_1_2(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto p1 = (const typename F::Type*)((const char*)p0 + srcRB);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ for (int i = 0; i < count; ++i) {
+ auto c00 = F::Expand(p0[0]);
+ auto c10 = F::Expand(p1[0]);
+
+ auto c = c00 + c10;
+ d[i] = F::Compact(shift_right(c, 1));
+ p0 += 2;
+ p1 += 2;
+ }
+}
+
+template <typename F> void downsample_1_3(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto p1 = (const typename F::Type*)((const char*)p0 + srcRB);
+ auto p2 = (const typename F::Type*)((const char*)p1 + srcRB);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ for (int i = 0; i < count; ++i) {
+ auto c00 = F::Expand(p0[0]);
+ auto c10 = F::Expand(p1[0]);
+ auto c20 = F::Expand(p2[0]);
+
+ auto c = add_121(c00, c10, c20);
+ d[i] = F::Compact(shift_right(c, 2));
+ p0 += 2;
+ p1 += 2;
+ p2 += 2;
+ }
+}
+
+template <typename F> void downsample_2_1(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ for (int i = 0; i < count; ++i) {
+ auto c00 = F::Expand(p0[0]);
+ auto c01 = F::Expand(p0[1]);
+
+ auto c = c00 + c01;
+ d[i] = F::Compact(shift_right(c, 1));
+ p0 += 2;
+ }
+}
+
+template <typename F> void downsample_2_2(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto p1 = (const typename F::Type*)((const char*)p0 + srcRB);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ for (int i = 0; i < count; ++i) {
+ auto c00 = F::Expand(p0[0]);
+ auto c01 = F::Expand(p0[1]);
+ auto c10 = F::Expand(p1[0]);
+ auto c11 = F::Expand(p1[1]);
+
+ auto c = c00 + c10 + c01 + c11;
+ d[i] = F::Compact(shift_right(c, 2));
+ p0 += 2;
+ p1 += 2;
+ }
+}
+
+template <typename F> void downsample_2_3(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto p1 = (const typename F::Type*)((const char*)p0 + srcRB);
+ auto p2 = (const typename F::Type*)((const char*)p1 + srcRB);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ for (int i = 0; i < count; ++i) {
+ auto c00 = F::Expand(p0[0]);
+ auto c01 = F::Expand(p0[1]);
+ auto c10 = F::Expand(p1[0]);
+ auto c11 = F::Expand(p1[1]);
+ auto c20 = F::Expand(p2[0]);
+ auto c21 = F::Expand(p2[1]);
+
+ auto c = add_121(c00, c10, c20) + add_121(c01, c11, c21);
+ d[i] = F::Compact(shift_right(c, 3));
+ p0 += 2;
+ p1 += 2;
+ p2 += 2;
+ }
+}
+
+template <typename F> void downsample_3_1(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ auto c02 = F::Expand(p0[0]);
+ for (int i = 0; i < count; ++i) {
+ auto c00 = c02;
+ auto c01 = F::Expand(p0[1]);
+ c02 = F::Expand(p0[2]);
+
+ auto c = add_121(c00, c01, c02);
+ d[i] = F::Compact(shift_right(c, 2));
+ p0 += 2;
+ }
+}
+
+template <typename F> void downsample_3_2(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto p1 = (const typename F::Type*)((const char*)p0 + srcRB);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ auto c02 = F::Expand(p0[0]);
+ auto c12 = F::Expand(p1[0]);
+ for (int i = 0; i < count; ++i) {
+ auto c00 = c02;
+ auto c01 = F::Expand(p0[1]);
+ c02 = F::Expand(p0[2]);
+ auto c10 = c12;
+ auto c11 = F::Expand(p1[1]);
+ c12 = F::Expand(p1[2]);
+
+ auto c = add_121(c00, c01, c02) + add_121(c10, c11, c12);
+ d[i] = F::Compact(shift_right(c, 3));
+ p0 += 2;
+ p1 += 2;
+ }
+}
+
+template <typename F> void downsample_3_3(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto p1 = (const typename F::Type*)((const char*)p0 + srcRB);
+ auto p2 = (const typename F::Type*)((const char*)p1 + srcRB);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ auto c02 = F::Expand(p0[0]);
+ auto c12 = F::Expand(p1[0]);
+ auto c22 = F::Expand(p2[0]);
+ for (int i = 0; i < count; ++i) {
+ auto c00 = c02;
+ auto c01 = F::Expand(p0[1]);
+ c02 = F::Expand(p0[2]);
+ auto c10 = c12;
+ auto c11 = F::Expand(p1[1]);
+ c12 = F::Expand(p1[2]);
+ auto c20 = c22;
+ auto c21 = F::Expand(p2[1]);
+ c22 = F::Expand(p2[2]);
+
+ auto c =
+ add_121(c00, c01, c02) +
+ shift_left(add_121(c10, c11, c12), 1) +
+ add_121(c20, c21, c22);
+ d[i] = F::Compact(shift_right(c, 4));
+ p0 += 2;
+ p1 += 2;
+ p2 += 2;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+size_t SkMipMap::AllocLevelsSize(int levelCount, size_t pixelSize) {
+ if (levelCount < 0) {
+ return 0;
+ }
+ int64_t size = sk_64_mul(levelCount + 1, sizeof(Level)) + pixelSize;
+ if (!sk_64_isS32(size)) {
+ return 0;
+ }
+ return sk_64_asS32(size);
+}
+
+SkMipMap* SkMipMap::Build(const SkPixmap& src, SkSourceGammaTreatment treatment,
+ SkDiscardableFactoryProc fact) {
+ typedef void FilterProc(void*, const void* srcPtr, size_t srcRB, int count);
+
+ FilterProc* proc_1_2 = nullptr;
+ FilterProc* proc_1_3 = nullptr;
+ FilterProc* proc_2_1 = nullptr;
+ FilterProc* proc_2_2 = nullptr;
+ FilterProc* proc_2_3 = nullptr;
+ FilterProc* proc_3_1 = nullptr;
+ FilterProc* proc_3_2 = nullptr;
+ FilterProc* proc_3_3 = nullptr;
+
+ const SkColorType ct = src.colorType();
+ const SkAlphaType at = src.alphaType();
+ const bool srgbGamma = (SkSourceGammaTreatment::kRespect == treatment)
+ && src.info().gammaCloseToSRGB();
+
+ switch (ct) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ if (srgbGamma) {
+ proc_1_2 = downsample_1_2<ColorTypeFilter_S32>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_S32>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_S32>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_S32>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_S32>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_S32>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_S32>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_S32>;
+ } else {
+ proc_1_2 = downsample_1_2<ColorTypeFilter_8888>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_8888>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_8888>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_8888>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_8888>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_8888>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_8888>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_8888>;
+ }
+ break;
+ case kRGB_565_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_565>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_565>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_565>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_565>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_565>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_565>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_565>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_565>;
+ break;
+ case kARGB_4444_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_4444>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_4444>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_4444>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_4444>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_4444>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_4444>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_4444>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_4444>;
+ break;
+ case kAlpha_8_SkColorType:
+ case kGray_8_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_8>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_8>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_8>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_8>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_8>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_8>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_8>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_8>;
+ break;
+ case kRGBA_F16_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_F16>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_F16>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_F16>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_F16>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_F16>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_F16>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_F16>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_F16>;
+ break;
+ default:
+ // TODO: We could build miplevels for kIndex8 if the levels were in 8888.
+ // Means using more ram, but the quality would be fine.
+ return nullptr;
+ }
+
+ if (src.width() <= 1 && src.height() <= 1) {
+ return nullptr;
+ }
+ // whip through our loop to compute the exact size needed
+ size_t size = 0;
+ int countLevels = ComputeLevelCount(src.width(), src.height());
+ for (int currentMipLevel = countLevels; currentMipLevel >= 0; currentMipLevel--) {
+ SkISize mipSize = ComputeLevelSize(src.width(), src.height(), currentMipLevel);
+ size += SkColorTypeMinRowBytes(ct, mipSize.fWidth) * mipSize.fHeight;
+ }
+
+ size_t storageSize = SkMipMap::AllocLevelsSize(countLevels, size);
+ if (0 == storageSize) {
+ return nullptr;
+ }
+
+ SkMipMap* mipmap;
+ if (fact) {
+ SkDiscardableMemory* dm = fact(storageSize);
+ if (nullptr == dm) {
+ return nullptr;
+ }
+ mipmap = new SkMipMap(storageSize, dm);
+ } else {
+ mipmap = new SkMipMap(sk_malloc_throw(storageSize), storageSize);
+ }
+
+ // init
+ mipmap->fCS = sk_ref_sp(src.info().colorSpace());
+ mipmap->fCount = countLevels;
+ mipmap->fLevels = (Level*)mipmap->writable_data();
+ SkASSERT(mipmap->fLevels);
+
+ Level* levels = mipmap->fLevels;
+ uint8_t* baseAddr = (uint8_t*)&levels[countLevels];
+ uint8_t* addr = baseAddr;
+ int width = src.width();
+ int height = src.height();
+ uint32_t rowBytes;
+ SkPixmap srcPM(src);
+
+ for (int i = 0; i < countLevels; ++i) {
+ FilterProc* proc;
+ if (height & 1) {
+ if (height == 1) { // src-height is 1
+ if (width & 1) { // src-width is 3
+ proc = proc_3_1;
+ } else { // src-width is 2
+ proc = proc_2_1;
+ }
+ } else { // src-height is 3
+ if (width & 1) {
+ if (width == 1) { // src-width is 1
+ proc = proc_1_3;
+ } else { // src-width is 3
+ proc = proc_3_3;
+ }
+ } else { // src-width is 2
+ proc = proc_2_3;
+ }
+ }
+ } else { // src-height is 2
+ if (width & 1) {
+ if (width == 1) { // src-width is 1
+ proc = proc_1_2;
+ } else { // src-width is 3
+ proc = proc_3_2;
+ }
+ } else { // src-width is 2
+ proc = proc_2_2;
+ }
+ }
+ width = SkTMax(1, width >> 1);
+ height = SkTMax(1, height >> 1);
+ rowBytes = SkToU32(SkColorTypeMinRowBytes(ct, width));
+
+ // We make the Info w/o any colorspace, since that storage is not under our control, and
+ // will not be deleted in a controlled fashion. When the caller is given the pixmap for
+ // a given level, we augment this pixmap with fCS (which we do manage).
+ new (&levels[i].fPixmap) SkPixmap(SkImageInfo::Make(width, height, ct, at), addr, rowBytes);
+ levels[i].fScale = SkSize::Make(SkIntToScalar(width) / src.width(),
+ SkIntToScalar(height) / src.height());
+
+ const SkPixmap& dstPM = levels[i].fPixmap;
+ const void* srcBasePtr = srcPM.addr();
+ void* dstBasePtr = dstPM.writable_addr();
+
+ const size_t srcRB = srcPM.rowBytes();
+ for (int y = 0; y < height; y++) {
+ proc(dstBasePtr, srcBasePtr, srcRB, width);
+ srcBasePtr = (char*)srcBasePtr + srcRB * 2; // jump two rows
+ dstBasePtr = (char*)dstBasePtr + dstPM.rowBytes();
+ }
+ srcPM = dstPM;
+ addr += height * rowBytes;
+ }
+ SkASSERT(addr == baseAddr + size);
+
+ SkASSERT(mipmap->fLevels);
+ return mipmap;
+}
+
+int SkMipMap::ComputeLevelCount(int baseWidth, int baseHeight) {
+ if (baseWidth < 1 || baseHeight < 1) {
+ return 0;
+ }
+
+ // OpenGL's spec requires that each mipmap level have height/width equal to
+ // max(1, floor(original_height / 2^i)
+ // (or original_width) where i is the mipmap level.
+ // Continue scaling down until both axes are size 1.
+
+ const int largestAxis = SkTMax(baseWidth, baseHeight);
+ if (largestAxis < 2) {
+ // SkMipMap::Build requires a minimum size of 2.
+ return 0;
+ }
+ const int leadingZeros = SkCLZ(static_cast<uint32_t>(largestAxis));
+ // If the value 00011010 has 3 leading 0s then it has 5 significant bits
+ // (the bits which are not leading zeros)
+ const int significantBits = (sizeof(uint32_t) * 8) - leadingZeros;
+ // This is making the assumption that the size of a byte is 8 bits
+ // and that sizeof(uint32_t)'s implementation-defined behavior is 4.
+ int mipLevelCount = significantBits;
+
+ // SkMipMap does not include the base mip level.
+ // For example, it contains levels 1-x instead of 0-x.
+ // This is because the image used to create SkMipMap is the base level.
+ // So subtract 1 from the mip level count.
+ if (mipLevelCount > 0) {
+ --mipLevelCount;
+ }
+
+ return mipLevelCount;
+}
+
+SkISize SkMipMap::ComputeLevelSize(int baseWidth, int baseHeight, int level) {
+ if (baseWidth < 1 || baseHeight < 1) {
+ return SkISize::Make(0, 0);
+ }
+
+ int maxLevelCount = ComputeLevelCount(baseWidth, baseHeight);
+ if (level >= maxLevelCount || level < 0) {
+ return SkISize::Make(0, 0);
+ }
+ // OpenGL's spec requires that each mipmap level have height/width equal to
+ // max(1, floor(original_height / 2^i)
+ // (or original_width) where i is the mipmap level.
+
+ // SkMipMap does not include the base mip level.
+ // For example, it contains levels 1-x instead of 0-x.
+ // This is because the image used to create SkMipMap is the base level.
+ // So subtract 1 from the mip level to get the index stored by SkMipMap.
+ int width = SkTMax(1, baseWidth >> (level + 1));
+ int height = SkTMax(1, baseHeight >> (level + 1));
+
+ return SkISize::Make(width, height);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkMipMap::extractLevel(const SkSize& scaleSize, Level* levelPtr) const {
+ if (nullptr == fLevels) {
+ return false;
+ }
+
+ SkASSERT(scaleSize.width() >= 0 && scaleSize.height() >= 0);
+
+#ifndef SK_SUPPORT_LEGACY_ANISOTROPIC_MIPMAP_SCALE
+ // Use the smallest scale to match the GPU impl.
+ const SkScalar scale = SkTMin(scaleSize.width(), scaleSize.height());
+#else
+ // Ideally we'd pick the smaller scale, to match Ganesh. But ignoring one of the
+ // scales can produce some atrocious results, so for now we use the geometric mean.
+ // (https://bugs.chromium.org/p/skia/issues/detail?id=4863)
+ const SkScalar scale = SkScalarSqrt(scaleSize.width() * scaleSize.height());
+#endif
+
+ if (scale >= SK_Scalar1 || scale <= 0 || !SkScalarIsFinite(scale)) {
+ return false;
+ }
+
+ SkScalar L = -SkScalarLog2(scale);
+ if (!SkScalarIsFinite(L)) {
+ return false;
+ }
+ SkASSERT(L >= 0);
+ int level = SkScalarFloorToInt(L);
+
+ SkASSERT(level >= 0);
+ if (level <= 0) {
+ return false;
+ }
+
+ if (level > fCount) {
+ level = fCount;
+ }
+ if (levelPtr) {
+ *levelPtr = fLevels[level - 1];
+ // need to augment with our colorspace
+ levelPtr->fPixmap.setColorSpace(fCS);
+ }
+ return true;
+}
+
+// Helper which extracts a pixmap from the src bitmap
+//
+SkMipMap* SkMipMap::Build(const SkBitmap& src, SkSourceGammaTreatment treatment,
+ SkDiscardableFactoryProc fact) {
+ SkAutoPixmapUnlock srcUnlocker;
+ if (!src.requestLock(&srcUnlocker)) {
+ return nullptr;
+ }
+ const SkPixmap& srcPixmap = srcUnlocker.pixmap();
+ // Try to catch where we might have returned nullptr for src crbug.com/492818
+ if (nullptr == srcPixmap.addr()) {
+ sk_throw();
+ }
+ return Build(srcPixmap, treatment, fact);
+}
+
+int SkMipMap::countLevels() const {
+ return fCount;
+}
+
+bool SkMipMap::getLevel(int index, Level* levelPtr) const {
+ if (NULL == fLevels) {
+ return false;
+ }
+ if (index < 0) {
+ return false;
+ }
+ if (index > fCount - 1) {
+ return false;
+ }
+ if (levelPtr) {
+ *levelPtr = fLevels[index];
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkMipMap.h b/gfx/skia/skia/src/core/SkMipMap.h
new file mode 100644
index 000000000..0f31a9f70
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMipMap.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMipMap_DEFINED
+#define SkMipMap_DEFINED
+
+#include "SkCachedData.h"
+#include "SkPixmap.h"
+#include "SkScalar.h"
+#include "SkSize.h"
+#include "SkShader.h"
+
+class SkBitmap;
+class SkDiscardableMemory;
+
+typedef SkDiscardableMemory* (*SkDiscardableFactoryProc)(size_t bytes);
+
+/*
+ * SkMipMap will generate mipmap levels when given a base mipmap level image.
+ *
+ * Any function which deals with mipmap levels indices will start with index 0
+ * being the first mipmap level which was generated. Said another way, it does
+ * not include the base level in its range.
+ */
+class SkMipMap : public SkCachedData {
+public:
+ static SkMipMap* Build(const SkPixmap& src, SkSourceGammaTreatment, SkDiscardableFactoryProc);
+ static SkMipMap* Build(const SkBitmap& src, SkSourceGammaTreatment, SkDiscardableFactoryProc);
+
+ static SkSourceGammaTreatment DeduceTreatment(const SkShader::ContextRec& rec) {
+ return (SkShader::ContextRec::kPMColor_DstType == rec.fPreferredDstType) ?
+ SkSourceGammaTreatment::kIgnore : SkSourceGammaTreatment::kRespect;
+ }
+
+ // Determines how many levels a SkMipMap will have without creating that mipmap.
+ // This does not include the base mipmap level that the user provided when
+ // creating the SkMipMap.
+ static int ComputeLevelCount(int baseWidth, int baseHeight);
+
+ // Determines the size of a given mipmap level.
+ // |level| is an index into the generated mipmap levels. It does not include
+ // the base level. So index 0 represents mipmap level 1.
+ static SkISize ComputeLevelSize(int baseWidth, int baseHeight, int level);
+
+ struct Level {
+ SkPixmap fPixmap;
+ SkSize fScale; // < 1.0
+ };
+
+ bool extractLevel(const SkSize& scale, Level*) const;
+
+ // countLevels returns the number of mipmap levels generated (which does not
+ // include the base mipmap level).
+ int countLevels() const;
+
+ // |index| is an index into the generated mipmap levels. It does not include
+ // the base level. So index 0 represents mipmap level 1.
+ bool getLevel(int index, Level*) const;
+
+protected:
+ void onDataChange(void* oldData, void* newData) override {
+ fLevels = (Level*)newData; // could be nullptr
+ }
+
+private:
+ sk_sp<SkColorSpace> fCS;
+ Level* fLevels; // managed by the baseclass, may be null due to onDataChanged.
+ int fCount;
+
+ SkMipMap(void* malloc, size_t size) : INHERITED(malloc, size) {}
+ SkMipMap(size_t size, SkDiscardableMemory* dm) : INHERITED(size, dm) {}
+
+ static size_t AllocLevelsSize(int levelCount, size_t pixelSize);
+
+ typedef SkCachedData INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkModeColorFilter.cpp b/gfx/skia/skia/src/core/SkModeColorFilter.cpp
new file mode 100644
index 000000000..ba3b50d76
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkModeColorFilter.cpp
@@ -0,0 +1,192 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBlitRow.h"
+#include "SkColorFilter.h"
+#include "SkColorPriv.h"
+#include "SkModeColorFilter.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+#include "SkUtils.h"
+#include "SkString.h"
+#include "SkValidationUtils.h"
+#include "SkPM4f.h"
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifndef SK_IGNORE_TO_STRING
+void SkModeColorFilter::toString(SkString* str) const {
+ str->append("SkModeColorFilter: color: 0x");
+ str->appendHex(fColor);
+ str->append(" mode: ");
+ str->append(SkXfermode::ModeName(fMode));
+}
+#endif
+
+bool SkModeColorFilter::asColorMode(SkColor* color, SkXfermode::Mode* mode) const {
+ if (color) {
+ *color = fColor;
+ }
+ if (mode) {
+ *mode = fMode;
+ }
+ return true;
+}
+
+uint32_t SkModeColorFilter::getFlags() const {
+ uint32_t flags = 0;
+ switch (fMode) {
+ case SkXfermode::kDst_Mode: //!< [Da, Dc]
+ case SkXfermode::kSrcATop_Mode: //!< [Da, Sc * Da + (1 - Sa) * Dc]
+ flags |= kAlphaUnchanged_Flag;
+ default:
+ break;
+ }
+ return flags;
+}
+
+void SkModeColorFilter::filterSpan(const SkPMColor shader[], int count, SkPMColor result[]) const {
+ SkPMColor color = fPMColor;
+ SkXfermodeProc proc = fProc;
+
+ for (int i = 0; i < count; i++) {
+ result[i] = proc(color, shader[i]);
+ }
+}
+
+void SkModeColorFilter::filterSpan4f(const SkPM4f shader[], int count, SkPM4f result[]) const {
+ SkPM4f color = SkPM4f::FromPMColor(fPMColor);
+ SkXfermodeProc4f proc = SkXfermode::GetProc4f(fMode);
+
+ for (int i = 0; i < count; i++) {
+ result[i] = proc(color, shader[i]);
+ }
+}
+
+void SkModeColorFilter::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeColor(fColor);
+ buffer.writeUInt(fMode);
+}
+
+void SkModeColorFilter::updateCache() {
+ fPMColor = SkPreMultiplyColor(fColor);
+ fProc = SkXfermode::GetProc(fMode);
+}
+
+sk_sp<SkFlattenable> SkModeColorFilter::CreateProc(SkReadBuffer& buffer) {
+ SkColor color = buffer.readColor();
+ SkXfermode::Mode mode = (SkXfermode::Mode)buffer.readUInt();
+ return SkColorFilter::MakeModeFilter(color, mode);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+#if SK_SUPPORT_GPU
+#include "GrBlend.h"
+#include "GrInvariantOutput.h"
+#include "effects/GrXfermodeFragmentProcessor.h"
+#include "effects/GrConstColorProcessor.h"
+#include "SkGr.h"
+
+sk_sp<GrFragmentProcessor> SkModeColorFilter::asFragmentProcessor(GrContext*) const {
+ if (SkXfermode::kDst_Mode == fMode) {
+ return nullptr;
+ }
+
+ sk_sp<GrFragmentProcessor> constFP(
+ GrConstColorProcessor::Make(SkColorToPremulGrColor(fColor),
+ GrConstColorProcessor::kIgnore_InputMode));
+ sk_sp<GrFragmentProcessor> fp(
+ GrXfermodeFragmentProcessor::MakeFromSrcProcessor(std::move(constFP), fMode));
+ if (!fp) {
+ return nullptr;
+ }
+#ifdef SK_DEBUG
+ // With a solid color input this should always be able to compute the blended color
+ // (at least for coeff modes)
+ if (fMode <= SkXfermode::kLastCoeffMode) {
+ static SkRandom gRand;
+ GrInvariantOutput io(GrPremulColor(gRand.nextU()), kRGBA_GrColorComponentFlags,
+ false);
+ fp->computeInvariantOutput(&io);
+ SkASSERT(io.validFlags() == kRGBA_GrColorComponentFlags);
+ }
+#endif
+ return fp;
+}
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+class Src_SkModeColorFilter final : public SkModeColorFilter {
+public:
+ Src_SkModeColorFilter(SkColor color) : INHERITED(color, SkXfermode::kSrc_Mode) {}
+
+ void filterSpan(const SkPMColor shader[], int count, SkPMColor result[]) const override {
+ sk_memset32(result, this->getPMColor(), count);
+ }
+
+private:
+ typedef SkModeColorFilter INHERITED;
+};
+
+class SrcOver_SkModeColorFilter final : public SkModeColorFilter {
+public:
+ SrcOver_SkModeColorFilter(SkColor color) : INHERITED(color, SkXfermode::kSrcOver_Mode) { }
+
+ void filterSpan(const SkPMColor shader[], int count, SkPMColor result[]) const override {
+ SkBlitRow::Color32(result, shader, count, this->getPMColor());
+ }
+
+private:
+ typedef SkModeColorFilter INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkColorFilter> SkColorFilter::MakeModeFilter(SkColor color, SkXfermode::Mode mode) {
+ if (!SkIsValidMode(mode)) {
+ return nullptr;
+ }
+
+ unsigned alpha = SkColorGetA(color);
+
+ // first collaps some modes if possible
+
+ if (SkXfermode::kClear_Mode == mode) {
+ color = 0;
+ mode = SkXfermode::kSrc_Mode;
+ } else if (SkXfermode::kSrcOver_Mode == mode) {
+ if (0 == alpha) {
+ mode = SkXfermode::kDst_Mode;
+ } else if (255 == alpha) {
+ mode = SkXfermode::kSrc_Mode;
+ }
+ // else just stay srcover
+ }
+
+ // weed out combinations that are noops, and just return null
+ if (SkXfermode::kDst_Mode == mode ||
+ (0 == alpha && (SkXfermode::kSrcOver_Mode == mode ||
+ SkXfermode::kDstOver_Mode == mode ||
+ SkXfermode::kDstOut_Mode == mode ||
+ SkXfermode::kSrcATop_Mode == mode ||
+ SkXfermode::kXor_Mode == mode ||
+ SkXfermode::kDarken_Mode == mode)) ||
+ (0xFF == alpha && SkXfermode::kDstIn_Mode == mode)) {
+ return nullptr;
+ }
+
+ switch (mode) {
+ case SkXfermode::kSrc_Mode:
+ return sk_make_sp<Src_SkModeColorFilter>(color);
+ case SkXfermode::kSrcOver_Mode:
+ return sk_make_sp<SrcOver_SkModeColorFilter>(color);
+ default:
+ return SkModeColorFilter::Make(color, mode);
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkModeColorFilter.h b/gfx/skia/skia/src/core/SkModeColorFilter.h
new file mode 100644
index 000000000..7d4d4cc3e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkModeColorFilter.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkColorFilter.h"
+#include "SkXfermode.h"
+
+#ifndef SkModeColorFilter_DEFINED
+#define SkModeColorFilter_DEFINED
+
+class SkModeColorFilter : public SkColorFilter {
+public:
+ static sk_sp<SkColorFilter> Make(SkColor color, SkXfermode::Mode mode) {
+ return sk_sp<SkColorFilter>(new SkModeColorFilter(color, mode));
+ }
+#ifdef SK_SUPPORT_LEGACY_COLORFILTER_PTR
+ static SkColorFilter* Create(SkColor color, SkXfermode::Mode mode) {
+ return Make(color, mode).release();
+ }
+#endif
+
+ SkColor getColor() const { return fColor; }
+ SkXfermode::Mode getMode() const { return fMode; }
+ SkPMColor getPMColor() const { return fPMColor; }
+
+ bool asColorMode(SkColor*, SkXfermode::Mode*) const override;
+ uint32_t getFlags() const override;
+ void filterSpan(const SkPMColor shader[], int count, SkPMColor result[]) const override;
+ void filterSpan4f(const SkPM4f shader[], int count, SkPM4f result[]) const override;
+
+#ifndef SK_IGNORE_TO_STRING
+ void toString(SkString* str) const override;
+#endif
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(GrContext*) const override;
+#endif
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkModeColorFilter)
+
+protected:
+ SkModeColorFilter(SkColor color, SkXfermode::Mode mode) {
+ fColor = color;
+ fMode = mode;
+ this->updateCache();
+ }
+
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ SkColor fColor;
+ SkXfermode::Mode fMode;
+ // cache
+ SkPMColor fPMColor;
+ SkXfermodeProc fProc;
+
+ void updateCache();
+
+ friend class SkColorFilter;
+
+ typedef SkColorFilter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMultiPictureDraw.cpp b/gfx/skia/skia/src/core/SkMultiPictureDraw.cpp
new file mode 100644
index 000000000..b3c636876
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMultiPictureDraw.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCanvas.h"
+#include "SkCanvasPriv.h"
+#include "SkMultiPictureDraw.h"
+#include "SkPicture.h"
+#include "SkTaskGroup.h"
+
+void SkMultiPictureDraw::DrawData::draw() {
+ fCanvas->drawPicture(fPicture, &fMatrix, fPaint);
+}
+
+void SkMultiPictureDraw::DrawData::init(SkCanvas* canvas, const SkPicture* picture,
+ const SkMatrix* matrix, const SkPaint* paint) {
+ fPicture = SkRef(picture);
+ fCanvas = SkRef(canvas);
+ if (matrix) {
+ fMatrix = *matrix;
+ } else {
+ fMatrix.setIdentity();
+ }
+ if (paint) {
+ fPaint = new SkPaint(*paint);
+ } else {
+ fPaint = nullptr;
+ }
+}
+
+void SkMultiPictureDraw::DrawData::Reset(SkTDArray<DrawData>& data) {
+ for (int i = 0; i < data.count(); ++i) {
+ data[i].fPicture->unref();
+ data[i].fCanvas->unref();
+ delete data[i].fPaint;
+ }
+ data.rewind();
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+
+SkMultiPictureDraw::SkMultiPictureDraw(int reserve) {
+ if (reserve > 0) {
+ fGPUDrawData.setReserve(reserve);
+ fThreadSafeDrawData.setReserve(reserve);
+ }
+}
+
+void SkMultiPictureDraw::reset() {
+ DrawData::Reset(fGPUDrawData);
+ DrawData::Reset(fThreadSafeDrawData);
+}
+
+void SkMultiPictureDraw::add(SkCanvas* canvas,
+ const SkPicture* picture,
+ const SkMatrix* matrix,
+ const SkPaint* paint) {
+ if (nullptr == canvas || nullptr == picture) {
+ SkDEBUGFAIL("parameters to SkMultiPictureDraw::add should be non-nullptr");
+ return;
+ }
+
+ SkTDArray<DrawData>& array = canvas->getGrContext() ? fGPUDrawData : fThreadSafeDrawData;
+ array.append()->init(canvas, picture, matrix, paint);
+}
+
+class AutoMPDReset : SkNoncopyable {
+ SkMultiPictureDraw* fMPD;
+public:
+ AutoMPDReset(SkMultiPictureDraw* mpd) : fMPD(mpd) {}
+ ~AutoMPDReset() { fMPD->reset(); }
+};
+
+//#define FORCE_SINGLE_THREAD_DRAWING_FOR_TESTING
+
+void SkMultiPictureDraw::draw(bool flush) {
+ AutoMPDReset mpdreset(this);
+
+#ifdef FORCE_SINGLE_THREAD_DRAWING_FOR_TESTING
+ for (int i = 0; i < fThreadSafeDrawData.count(); ++i) {
+ fThreadSafeDrawData[i].draw();
+ }
+#else
+ SkTaskGroup().batch(fThreadSafeDrawData.count(), [&](int i) {
+ fThreadSafeDrawData[i].draw();
+ });
+#endif
+
+ // N.B. we could get going on any GPU work from this main thread while the CPU work runs.
+ // But in practice, we've either got GPU work or CPU work, not both.
+
+ const int count = fGPUDrawData.count();
+ if (0 == count) {
+ return;
+ }
+
+ for (int i = 0; i < count; ++i) {
+ const DrawData& data = fGPUDrawData[i];
+ SkCanvas* canvas = data.fCanvas;
+ const SkPicture* picture = data.fPicture;
+
+ canvas->drawPicture(picture, &data.fMatrix, data.fPaint);
+ if (flush) {
+ canvas->flush();
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkNextID.h b/gfx/skia/skia/src/core/SkNextID.h
new file mode 100644
index 000000000..570fd946d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkNextID.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNextID_DEFINED
+#define SkNextID_DEFINED
+
+#include "SkTypes.h"
+
+class SkNextID {
+public:
+ /**
+ * Shared between SkPixelRef's generationID and SkImage's uniqueID
+ */
+ static uint32_t ImageID();
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkNormalBevelSource.cpp b/gfx/skia/skia/src/core/SkNormalBevelSource.cpp
new file mode 100644
index 000000000..4a08728a9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkNormalBevelSource.cpp
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkNormalBevelSource.h"
+
+#include "SkNormalSource.h"
+#include "SkNormalSourcePriv.h"
+#include "SkPoint3.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+
+#if SK_SUPPORT_GPU
+#include "GrInvariantOutput.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "SkGr.h"
+
+/** \class NormalBevelFP
+ *
+ * Fragment processor for the SkNormalBevelSource.
+ *
+ * @param bevelType type of the bevel
+ * @param bevelWidth width of the bevel in device space
+ * @param bevelHeight height of the bevel in device space
+ */
+class NormalBevelFP : public GrFragmentProcessor {
+public:
+ NormalBevelFP(SkNormalSource::BevelType bevelType, SkScalar bevelWidth, SkScalar bevelHeight)
+ : fBevelType(bevelType)
+ , fBevelWidth(bevelWidth)
+ , fBevelHeight(bevelHeight) {
+ this->initClassID<NormalBevelFP>();
+
+ fUsesDistanceVectorField = true;
+ }
+
+ class GLSLNormalBevelFP : public GLSLNormalFP {
+ public:
+ GLSLNormalBevelFP() {
+ fPrevWidth = SkFloatToScalar(0.0f);
+ fPrevHeight = SkFloatToScalar(0.0f);
+ }
+
+ void onEmitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const NormalBevelFP& fp = args.fFp.cast<NormalBevelFP>();
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // Determining necessary uniforms and initializing them
+ bool needWidth = true;
+ bool needHeight = (fp.fBevelType == SkNormalSource::BevelType::kRoundedOut ||
+ fp.fBevelType == SkNormalSource::BevelType::kRoundedIn);
+ bool needNormalized = (fp.fBevelType == SkNormalSource::BevelType::kLinear);
+
+ const char *widthUniName = nullptr;
+ if (needWidth) {
+ fWidthUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kFloat_GrSLType,
+ kDefault_GrSLPrecision, "Width",
+ &widthUniName);
+ }
+
+ const char* heightUniName = nullptr;
+ if (needHeight) {
+ fHeightUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kFloat_GrSLType,
+ kDefault_GrSLPrecision, "Height",
+ &heightUniName);
+ }
+
+ const char* normalizedWidthUniName = nullptr;
+ const char* normalizedHeightUniName = nullptr;
+ if (needNormalized) {
+ fNormalizedWidthUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType,
+ kDefault_GrSLPrecision,
+ "NormalizedWidth",
+ &normalizedWidthUniName);
+ fNormalizedHeightUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType,
+ kDefault_GrSLPrecision,
+ "NormalizedHeight",
+ &normalizedHeightUniName);
+ }
+
+ // Here we are splitting the distance vector into length and normalized direction
+ fragBuilder->codeAppendf("float dv_length = %s.z;",
+ fragBuilder->distanceVectorName());
+ fragBuilder->codeAppendf("vec2 dv_norm = %s.xy;",
+ fragBuilder->distanceVectorName());
+
+ // Asserting presence of necessary uniforms
+ SkASSERT(widthUniName);
+
+ fragBuilder->codeAppend( "vec3 normal;");
+ fragBuilder->codeAppendf("if (dv_length >= %s) {", widthUniName);
+ fragBuilder->codeAppend( " normal = vec3(0.0, 0.0, 1.0);");
+ fragBuilder->codeAppend( "} else {");
+ this->emitMath(fragBuilder, fp.fBevelType, widthUniName, heightUniName,
+ normalizedWidthUniName, normalizedHeightUniName);
+ fragBuilder->codeAppend( "}");
+ fragBuilder->codeAppendf("%s = vec4(normal, 0.0);", args.fOutputColor);
+ }
+
+ static void GenKey(const GrProcessor& proc, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const NormalBevelFP& fp = proc.cast<NormalBevelFP>();
+ b->add32(static_cast<int>(fp.fBevelType));
+ }
+
+ protected:
+ void setNormalData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& proc) override {
+ const NormalBevelFP& normalBevelFP = proc.cast<NormalBevelFP>();
+
+ // Updating uniform if bevel type requires it and data has changed
+
+ bool needWidth = true;
+ bool needHeight = (normalBevelFP.fBevelType == SkNormalSource::BevelType::kRoundedOut ||
+ normalBevelFP.fBevelType == SkNormalSource::BevelType::kRoundedIn);
+ bool needNormalized = (normalBevelFP.fBevelType == SkNormalSource::BevelType::kLinear);
+
+ bool dirtyWidth = (fPrevWidth != normalBevelFP.fBevelWidth);
+ bool dirtyHeight = (fPrevHeight != normalBevelFP.fBevelHeight);
+ bool dirtyNormalized = (dirtyHeight || dirtyWidth);
+
+
+ if (needWidth && dirtyWidth) {
+ pdman.set1f(fWidthUni, normalBevelFP.fBevelWidth);
+ fPrevWidth = normalBevelFP.fBevelWidth;
+ }
+ if (needHeight && dirtyHeight) {
+ pdman.set1f(fHeightUni, normalBevelFP.fBevelHeight);
+ fPrevHeight = normalBevelFP.fBevelHeight;
+ }
+ if (needNormalized && dirtyNormalized) {
+ SkScalar height = normalBevelFP.fBevelHeight;
+ SkScalar width = normalBevelFP.fBevelWidth;
+
+ SkScalar length = SkScalarSqrt(SkScalarSquare(height) + SkScalarSquare(width));
+ pdman.set1f(fNormalizedHeightUni, height/length);
+ pdman.set1f(fNormalizedWidthUni, width/length);
+ }
+ }
+
+ // This method emits the code that calculates the normal orthgonal to the simulated beveled
+ // surface. In the comments inside the function, the math involved is described. For this
+ // purpose, the d-axis is defined to be the axis co-linear to the distance vector, where the
+ // origin is the end of the bevel inside the shape.
+ void emitMath(GrGLSLFPFragmentBuilder* fb, SkNormalSource::BevelType type,
+ const char* width, const char* height, const char* normalizedWidth,
+ const char* normalizedHeight) {
+ switch (type) {
+ case SkNormalSource::BevelType::kLinear:
+ // Asserting presence of necessary uniforms
+ SkASSERT(normalizedHeight);
+ SkASSERT(normalizedWidth);
+
+ // Because the slope of the bevel is -height/width, the vector
+ // normalized(vec2(height, width)) is the d- and z-components of the normal
+ // vector that is orthogonal to the linear bevel. Multiplying the d-component
+ // to the normalized distance vector splits it into x- and y-components.
+ fb->codeAppendf("normal = vec3(%s * dv_norm, %s);",
+ normalizedHeight, normalizedWidth);
+ break;
+ case SkNormalSource::BevelType::kRoundedOut:
+ // Fall through
+ case SkNormalSource::BevelType::kRoundedIn:
+ // Asserting presence of necessary uniforms
+ SkASSERT(height);
+ SkASSERT(width);
+
+ // Setting the current position in the d-axis to the distance from the end of
+ // the bevel as opposed to the beginning if the bevel is rounded in, essentially
+ // flipping the bevel calculations.
+ if ( type == SkNormalSource::BevelType::kRoundedIn ) {
+ fb->codeAppendf("float currentPos_d = %s - dv_length;", width);
+ } else if (type == SkNormalSource::BevelType::kRoundedOut) {
+ fb->codeAppendf("float currentPos_d = dv_length;");
+ }
+
+ fb->codeAppendf("float rootDOverW = sqrt(currentPos_d/%s);", width);
+
+ // Calculating the d- and z-components of the normal, where 'd' is the axis
+ // co-linear to the distance vector. Equation was derived from the formula for
+ // a bezier curve by solving the parametric equation for d(t) and z(t), then
+ // with those, calculate d'(t), z'(t) and t(d), and from these, d'(d) and z'(d).
+ // z'(d)/d'(d) results in the slope of the bevel at d, so we construct an
+ // orthogonal vector of slope -d'(d)/z'(d) and length 1.
+ fb->codeAppendf("vec2 unnormalizedNormal_dz = vec2(%s*(1.0-rootDOverW), "
+ "%s*rootDOverW);",
+ height, width);
+ fb->codeAppendf("vec2 normal_dz = normalize(unnormalizedNormal_dz);");
+
+ // Multiplying the d-component to the normalized distance vector splits it into
+ // x- and y-components.
+ fb->codeAppendf("normal = vec3(normal_dz.x*dv_norm, normal_dz.y);");
+
+ break;
+ default:
+ SkDEBUGFAIL("Invalid bevel type passed to emitMath");
+ }
+ }
+
+ private:
+ SkScalar fPrevWidth;
+ GrGLSLProgramDataManager::UniformHandle fWidthUni;
+
+ SkScalar fPrevHeight;
+ GrGLSLProgramDataManager::UniformHandle fHeightUni;
+
+ // width / length(<width,height>)
+ GrGLSLProgramDataManager::UniformHandle fNormalizedWidthUni;
+ // height / length(<width,height>)
+ GrGLSLProgramDataManager::UniformHandle fNormalizedHeightUni;
+ };
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLNormalBevelFP::GenKey(*this, caps, b);
+ }
+
+ const char* name() const override { return "NormalBevelFP"; }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ inout->setToUnknown(GrInvariantOutput::ReadInput::kWillNot_ReadInput);
+ }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override { return new GLSLNormalBevelFP; }
+
+ bool onIsEqual(const GrFragmentProcessor& proc) const override {
+ const NormalBevelFP& normalBevelFP = proc.cast<NormalBevelFP>();
+ return fBevelType == normalBevelFP.fBevelType &&
+ fBevelWidth == normalBevelFP.fBevelWidth &&
+ fBevelHeight == normalBevelFP.fBevelHeight;
+ }
+
+ SkNormalSource::BevelType fBevelType;
+ SkScalar fBevelWidth;
+ SkScalar fBevelHeight;
+};
+
+sk_sp<GrFragmentProcessor> SkNormalBevelSourceImpl::asFragmentProcessor(
+ const SkShader::AsFPArgs& args) const {
+
+ // This assumes a uniform scale. Anisotropic scaling might not be handled gracefully.
+ SkScalar maxScale = args.fViewMatrix->getMaxScale();
+
+ // Providing device-space width and height
+ return sk_make_sp<NormalBevelFP>(fType, maxScale * fWidth, maxScale * fHeight);
+}
+
+#endif // SK_SUPPORT_GPU
+
+////////////////////////////////////////////////////////////////////////////
+
+SkNormalBevelSourceImpl::Provider::Provider() {}
+
+SkNormalBevelSourceImpl::Provider::~Provider() {}
+
+SkNormalSource::Provider* SkNormalBevelSourceImpl::asProvider(const SkShader::ContextRec &rec,
+ void *storage) const {
+ return new (storage) Provider();
+}
+
+size_t SkNormalBevelSourceImpl::providerSize(const SkShader::ContextRec&) const {
+ return sizeof(Provider);
+}
+
+// TODO Implement feature for the CPU pipeline
+void SkNormalBevelSourceImpl::Provider::fillScanLine(int x, int y, SkPoint3 output[],
+ int count) const {
+ for (int i = 0; i < count; i++) {
+ output[i] = {0.0f, 0.0f, 1.0f};
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkNormalBevelSourceImpl::CreateProc(SkReadBuffer& buf) {
+
+ auto type = static_cast<SkNormalSource::BevelType>(buf.readInt());
+ SkScalar width = buf.readScalar();
+ SkScalar height = buf.readScalar();
+
+ return sk_make_sp<SkNormalBevelSourceImpl>(type, width, height);
+}
+
+void SkNormalBevelSourceImpl::flatten(SkWriteBuffer& buf) const {
+ this->INHERITED::flatten(buf);
+
+ buf.writeInt(static_cast<int>(fType));
+ buf.writeScalar(fWidth);
+ buf.writeScalar(fHeight);
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkNormalSource> SkNormalSource::MakeBevel(BevelType type, SkScalar width, SkScalar height) {
+ /* TODO make sure these checks are tolerant enough to account for loss of conversion when GPUs
+ use 16-bit float types. We don't want to assume stuff is non-zero on the GPU and be wrong.*/
+ SkASSERT(width > 0.0f && !SkScalarNearlyZero(width));
+ if (SkScalarNearlyZero(height)) {
+ return SkNormalSource::MakeFlat();
+ }
+
+ return sk_make_sp<SkNormalBevelSourceImpl>(type, width, height);
+}
diff --git a/gfx/skia/skia/src/core/SkNormalBevelSource.h b/gfx/skia/skia/src/core/SkNormalBevelSource.h
new file mode 100644
index 000000000..d133738bc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkNormalBevelSource.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNormalBevelSource_DEFINED
+#define SkNormalBevelSource_DEFINED
+
+#include "SkNormalSource.h"
+
+class SK_API SkNormalBevelSourceImpl : public SkNormalSource {
+public:
+ SkNormalBevelSourceImpl(BevelType type, SkScalar width, SkScalar height)
+ : fType(type)
+ , fWidth(width)
+ , fHeight(height) {}
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(const SkShader::AsFPArgs&) const override;
+#endif
+
+ SkNormalSource::Provider* asProvider(const SkShader::ContextRec& rec,
+ void* storage) const override;
+ size_t providerSize(const SkShader::ContextRec& rec) const override;
+
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkNormalBevelSourceImpl)
+
+protected:
+ void flatten(SkWriteBuffer& buf) const override;
+
+private:
+ class Provider : public SkNormalSource::Provider {
+ public:
+ Provider();
+
+ virtual ~Provider();
+
+ void fillScanLine(int x, int y, SkPoint3 output[], int count) const override;
+
+ private:
+ typedef SkNormalSource::Provider INHERITED;
+
+ };
+
+ SkNormalSource::BevelType fType;
+ SkScalar fWidth;
+ SkScalar fHeight;
+
+ friend class SkNormalSource;
+
+ typedef SkNormalSource INHERITED;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkNormalFlatSource.cpp b/gfx/skia/skia/src/core/SkNormalFlatSource.cpp
new file mode 100644
index 000000000..c30cca14d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkNormalFlatSource.cpp
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkNormalFlatSource.h"
+
+#include "SkNormalSource.h"
+#include "SkNormalSourcePriv.h"
+#include "SkPoint3.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+
+#if SK_SUPPORT_GPU
+#include "GrInvariantOutput.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+
+class NormalFlatFP : public GrFragmentProcessor {
+public:
+ NormalFlatFP() {
+ this->initClassID<NormalFlatFP>();
+ }
+
+ class GLSLNormalFlatFP : public GLSLNormalFP {
+ public:
+ GLSLNormalFlatFP() {}
+
+ void onEmitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ fragBuilder->codeAppendf("%s = vec4(0, 0, 1, 0);", args.fOutputColor);
+ }
+
+ static void GenKey(const GrProcessor& proc, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ b->add32(0x0);
+ }
+
+ protected:
+ void setNormalData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& proc) override {}
+ };
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLNormalFlatFP::GenKey(*this, caps, b);
+ }
+
+ const char* name() const override { return "NormalFlatFP"; }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ inout->setToUnknown(GrInvariantOutput::ReadInput::kWillNot_ReadInput);
+ }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override { return new GLSLNormalFlatFP; }
+
+ bool onIsEqual(const GrFragmentProcessor&) const override { return true; }
+};
+
+sk_sp<GrFragmentProcessor> SkNormalFlatSourceImpl::asFragmentProcessor(
+ const SkShader::AsFPArgs&) const {
+
+ return sk_make_sp<NormalFlatFP>();
+}
+
+#endif // SK_SUPPORT_GPU
+
+////////////////////////////////////////////////////////////////////////////
+
+SkNormalFlatSourceImpl::Provider::Provider() {}
+
+SkNormalFlatSourceImpl::Provider::~Provider() {}
+
+SkNormalSource::Provider* SkNormalFlatSourceImpl::asProvider(const SkShader::ContextRec &rec,
+ void *storage) const {
+ return new (storage) Provider();
+}
+
+size_t SkNormalFlatSourceImpl::providerSize(const SkShader::ContextRec&) const {
+ return sizeof(Provider);
+}
+
+void SkNormalFlatSourceImpl::Provider::fillScanLine(int x, int y, SkPoint3 output[],
+ int count) const {
+ for (int i = 0; i < count; i++) {
+ output[i] = {0.0f, 0.0f, 1.0f};
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkNormalFlatSourceImpl::CreateProc(SkReadBuffer& buf) {
+ return sk_make_sp<SkNormalFlatSourceImpl>();
+}
+
+void SkNormalFlatSourceImpl::flatten(SkWriteBuffer& buf) const {
+ this->INHERITED::flatten(buf);
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkNormalSource> SkNormalSource::MakeFlat() {
+ return sk_make_sp<SkNormalFlatSourceImpl>();
+}
diff --git a/gfx/skia/skia/src/core/SkNormalFlatSource.h b/gfx/skia/skia/src/core/SkNormalFlatSource.h
new file mode 100644
index 000000000..e1295596b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkNormalFlatSource.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNormalFlatSource_DEFINED
+#define SkNormalFlatSource_DEFINED
+
+#include "SkNormalSource.h"
+
+class SK_API SkNormalFlatSourceImpl : public SkNormalSource {
+public:
+ SkNormalFlatSourceImpl(){}
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(const SkShader::AsFPArgs&) const override;
+#endif
+
+ SkNormalSource::Provider* asProvider(const SkShader::ContextRec& rec,
+ void* storage) const override;
+ size_t providerSize(const SkShader::ContextRec& rec) const override;
+
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkNormalFlatSourceImpl)
+
+protected:
+ void flatten(SkWriteBuffer& buf) const override;
+
+private:
+ class Provider : public SkNormalSource::Provider {
+ public:
+ Provider();
+
+ virtual ~Provider();
+
+ void fillScanLine(int x, int y, SkPoint3 output[], int count) const override;
+
+ private:
+ typedef SkNormalSource::Provider INHERITED;
+ };
+
+ friend class SkNormalSource;
+
+ typedef SkNormalSource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkNormalMapSource.cpp b/gfx/skia/skia/src/core/SkNormalMapSource.cpp
new file mode 100644
index 000000000..25c533cdb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkNormalMapSource.cpp
@@ -0,0 +1,267 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkNormalMapSource.h"
+
+#include "SkLightingShader.h"
+#include "SkMatrix.h"
+#include "SkNormalSource.h"
+#include "SkNormalSourcePriv.h"
+#include "SkPM4f.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+
+#if SK_SUPPORT_GPU
+#include "GrCoordTransform.h"
+#include "GrInvariantOutput.h"
+#include "GrTextureParams.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "SkGr.h"
+
+class NormalMapFP : public GrFragmentProcessor {
+public:
+ NormalMapFP(sk_sp<GrFragmentProcessor> mapFP, const SkMatrix& invCTM)
+ : fInvCTM(invCTM) {
+ this->registerChildProcessor(mapFP);
+
+ this->initClassID<NormalMapFP>();
+ }
+
+ class GLSLNormalMapFP : public GLSLNormalFP {
+ public:
+ GLSLNormalMapFP()
+ : fColumnMajorInvCTM22{0.0f} {}
+
+ void onEmitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // add uniform
+ const char* xformUniName = nullptr;
+ fXformUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kMat22f_GrSLType,
+ kDefault_GrSLPrecision, "Xform", &xformUniName);
+
+ SkString dstNormalColorName("dstNormalColor");
+ this->emitChild(0, nullptr, &dstNormalColorName, args);
+ fragBuilder->codeAppendf("vec3 normal = normalize(%s.rgb - vec3(0.5));",
+ dstNormalColorName.c_str());
+
+ // If there's no x & y components, return (0, 0, +/- 1) instead to avoid division by 0
+ fragBuilder->codeAppend( "if (abs(normal.z) > 0.999) {");
+ fragBuilder->codeAppendf(" %s = normalize(vec4(0.0, 0.0, normal.z, 0.0));",
+ args.fOutputColor);
+ // Else, Normalizing the transformed X and Y, while keeping constant both Z and the
+ // vector's angle in the XY plane. This maintains the "slope" for the surface while
+ // appropriately rotating the normal regardless of any anisotropic scaling that occurs.
+ // Here, we call 'scaling factor' the number that must divide the transformed X and Y so
+ // that the normal's length remains equal to 1.
+ fragBuilder->codeAppend( "} else {");
+ fragBuilder->codeAppendf(" vec2 transformed = %s * normal.xy;",
+ xformUniName);
+ fragBuilder->codeAppend( " float scalingFactorSquared = "
+ "( (transformed.x * transformed.x) "
+ "+ (transformed.y * transformed.y) )"
+ "/(1.0 - (normal.z * normal.z));");
+ fragBuilder->codeAppendf(" %s = vec4(transformed*inversesqrt(scalingFactorSquared),"
+ "normal.z, 0.0);",
+ args.fOutputColor);
+ fragBuilder->codeAppend( "}");
+ }
+
+ static void GenKey(const GrProcessor&, const GrGLSLCaps&, GrProcessorKeyBuilder* b) {
+ b->add32(0x0);
+ }
+
+ protected:
+ void setNormalData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& proc) override {
+ const NormalMapFP& normalMapFP = proc.cast<NormalMapFP>();
+
+ const SkMatrix& invCTM = normalMapFP.invCTM();
+ fColumnMajorInvCTM22[0] = invCTM.get(SkMatrix::kMScaleX);
+ fColumnMajorInvCTM22[1] = invCTM.get(SkMatrix::kMSkewY);
+ fColumnMajorInvCTM22[2] = invCTM.get(SkMatrix::kMSkewX);
+ fColumnMajorInvCTM22[3] = invCTM.get(SkMatrix::kMScaleY);
+ pdman.setMatrix2f(fXformUni, fColumnMajorInvCTM22);
+ }
+
+ private:
+ // Upper-right 2x2 corner of the inverse of the CTM in column-major form
+ float fColumnMajorInvCTM22[4];
+ GrGLSLProgramDataManager::UniformHandle fXformUni;
+ };
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLNormalMapFP::GenKey(*this, caps, b);
+ }
+
+ const char* name() const override { return "NormalMapFP"; }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ inout->setToUnknown(GrInvariantOutput::ReadInput::kWillNot_ReadInput);
+ }
+
+ const SkMatrix& invCTM() const { return fInvCTM; }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override { return new GLSLNormalMapFP; }
+
+ bool onIsEqual(const GrFragmentProcessor& proc) const override {
+ const NormalMapFP& normalMapFP = proc.cast<NormalMapFP>();
+ return fInvCTM == normalMapFP.fInvCTM;
+ }
+
+ SkMatrix fInvCTM;
+};
+
+sk_sp<GrFragmentProcessor> SkNormalMapSourceImpl::asFragmentProcessor(
+ const SkShader::AsFPArgs& args) const {
+ sk_sp<GrFragmentProcessor> mapFP = fMapShader->asFragmentProcessor(args);
+ if (!mapFP) {
+ return nullptr;
+ }
+
+ return sk_make_sp<NormalMapFP>(std::move(mapFP), fInvCTM);
+}
+
+#endif // SK_SUPPORT_GPU
+
+////////////////////////////////////////////////////////////////////////////
+
+SkNormalMapSourceImpl::Provider::Provider(const SkNormalMapSourceImpl& source,
+ SkShader::Context* mapContext,
+ SkPaint* overridePaint)
+ : fSource(source)
+ , fMapContext(mapContext)
+ , fOverridePaint(overridePaint) {}
+
+SkNormalMapSourceImpl::Provider::~Provider() {
+ fMapContext->~Context();
+ fOverridePaint->~SkPaint();
+}
+
+SkNormalSource::Provider* SkNormalMapSourceImpl::asProvider(const SkShader::ContextRec &rec,
+ void *storage) const {
+ SkMatrix normTotalInv;
+ if (!this->computeNormTotalInverse(rec, &normTotalInv)) {
+ return nullptr;
+ }
+
+ // Overriding paint's alpha because we need the normal map's RGB channels to be unpremul'd
+ void* paintStorage = (char*)storage + sizeof(Provider);
+ SkPaint* overridePaint = new (paintStorage) SkPaint(*(rec.fPaint));
+ overridePaint->setAlpha(0xFF);
+ SkShader::ContextRec overrideRec(*overridePaint, *(rec.fMatrix), rec.fLocalMatrix,
+ rec.fPreferredDstType);
+
+ void* mapContextStorage = (char*) paintStorage + sizeof(SkPaint);
+ SkShader::Context* context = fMapShader->createContext(overrideRec, mapContextStorage);
+ if (!context) {
+ return nullptr;
+ }
+
+ return new (storage) Provider(*this, context, overridePaint);
+}
+
+size_t SkNormalMapSourceImpl::providerSize(const SkShader::ContextRec& rec) const {
+ return sizeof(Provider) + sizeof(SkPaint) + fMapShader->contextSize(rec);
+}
+
+bool SkNormalMapSourceImpl::computeNormTotalInverse(const SkShader::ContextRec& rec,
+ SkMatrix* normTotalInverse) const {
+ SkMatrix total;
+ total.setConcat(*rec.fMatrix, fMapShader->getLocalMatrix());
+
+ const SkMatrix* m = &total;
+ if (rec.fLocalMatrix) {
+ total.setConcat(*m, *rec.fLocalMatrix);
+ m = &total;
+ }
+ return m->invert(normTotalInverse);
+}
+
+#define BUFFER_MAX 16
+void SkNormalMapSourceImpl::Provider::fillScanLine(int x, int y, SkPoint3 output[],
+ int count) const {
+ SkPMColor tmpNormalColors[BUFFER_MAX];
+
+ do {
+ int n = SkTMin(count, BUFFER_MAX);
+
+ fMapContext->shadeSpan(x, y, tmpNormalColors, n);
+
+ for (int i = 0; i < n; i++) {
+ SkPoint3 tempNorm;
+
+ tempNorm.set(SkIntToScalar(SkGetPackedR32(tmpNormalColors[i])) - 127.0f,
+ SkIntToScalar(SkGetPackedG32(tmpNormalColors[i])) - 127.0f,
+ SkIntToScalar(SkGetPackedB32(tmpNormalColors[i])) - 127.0f);
+
+ tempNorm.normalize();
+
+
+ if (!SkScalarNearlyEqual(SkScalarAbs(tempNorm.fZ), 1.0f)) {
+ SkVector transformed = fSource.fInvCTM.mapVector(tempNorm.fX, tempNorm.fY);
+
+ // Normalizing the transformed X and Y, while keeping constant both Z and the
+ // vector's angle in the XY plane. This maintains the "slope" for the surface while
+ // appropriately rotating the normal for any anisotropic scaling that occurs.
+ // Here, we call scaling factor the number that must divide the transformed X and Y
+ // so that the normal's length remains equal to 1.
+ SkScalar scalingFactorSquared =
+ (SkScalarSquare(transformed.fX) + SkScalarSquare(transformed.fY))
+ / (1.0f - SkScalarSquare(tempNorm.fZ));
+ SkScalar invScalingFactor = SkScalarInvert(SkScalarSqrt(scalingFactorSquared));
+
+ output[i].fX = transformed.fX * invScalingFactor;
+ output[i].fY = transformed.fY * invScalingFactor;
+ output[i].fZ = tempNorm.fZ;
+ } else {
+ output[i] = {0.0f, 0.0f, tempNorm.fZ};
+ output[i].normalize();
+ }
+
+ SkASSERT(SkScalarNearlyEqual(output[i].length(), 1.0f));
+ }
+
+ output += n;
+ x += n;
+ count -= n;
+ } while (count > 0);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkNormalMapSourceImpl::CreateProc(SkReadBuffer& buf) {
+
+ sk_sp<SkShader> mapShader = buf.readFlattenable<SkShader>();
+
+ SkMatrix invCTM;
+ buf.readMatrix(&invCTM);
+
+ return sk_make_sp<SkNormalMapSourceImpl>(std::move(mapShader), invCTM);
+}
+
+void SkNormalMapSourceImpl::flatten(SkWriteBuffer& buf) const {
+ this->INHERITED::flatten(buf);
+
+ buf.writeFlattenable(fMapShader.get());
+ buf.writeMatrix(fInvCTM);
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkNormalSource> SkNormalSource::MakeFromNormalMap(sk_sp<SkShader> map, const SkMatrix& ctm) {
+ SkMatrix invCTM;
+
+ if (!ctm.invert(&invCTM) || !map) {
+ return nullptr;
+ }
+
+ return sk_make_sp<SkNormalMapSourceImpl>(std::move(map), invCTM);
+}
diff --git a/gfx/skia/skia/src/core/SkNormalMapSource.h b/gfx/skia/skia/src/core/SkNormalMapSource.h
new file mode 100644
index 000000000..5908369fc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkNormalMapSource.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNormalMapSource_DEFINED
+#define SkNormalMapSource_DEFINED
+
+#include "SkNormalSource.h"
+
+class SkNormalMapSourceImpl : public SkNormalSource {
+public:
+ SkNormalMapSourceImpl(sk_sp<SkShader> mapShader, const SkMatrix& invCTM)
+ : fMapShader(std::move(mapShader))
+ , fInvCTM(invCTM) {}
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(const SkShader::AsFPArgs&) const override;
+#endif
+
+ SkNormalSource::Provider* asProvider(const SkShader::ContextRec& rec,
+ void* storage) const override;
+ size_t providerSize(const SkShader::ContextRec& rec) const override;
+
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkNormalMapSourceImpl)
+
+protected:
+ void flatten(SkWriteBuffer& buf) const override;
+
+ bool computeNormTotalInverse(const SkShader::ContextRec& rec, SkMatrix* normTotalInverse) const;
+
+private:
+ class Provider : public SkNormalSource::Provider {
+ public:
+ Provider(const SkNormalMapSourceImpl& source, SkShader::Context* mapContext,
+ SkPaint* overridePaint);
+
+ virtual ~Provider() override;
+
+ void fillScanLine(int x, int y, SkPoint3 output[], int count) const override;
+
+ private:
+ const SkNormalMapSourceImpl& fSource;
+ SkShader::Context* fMapContext;
+
+ SkPaint* fOverridePaint;
+
+ typedef SkNormalSource::Provider INHERITED;
+ };
+
+ sk_sp<SkShader> fMapShader;
+ SkMatrix fInvCTM; // Inverse of the canvas total matrix, used for rotating normals.
+
+ friend class SkNormalSource;
+
+ typedef SkNormalSource INHERITED;
+};
+
+#endif
+
diff --git a/gfx/skia/skia/src/core/SkNormalSource.cpp b/gfx/skia/skia/src/core/SkNormalSource.cpp
new file mode 100644
index 000000000..2bea7baf6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkNormalSource.cpp
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkNormalBevelSource.h"
+#include "SkNormalFlatSource.h"
+#include "SkNormalMapSource.h"
+#include "SkNormalSource.h"
+
+// Generating vtable
+SkNormalSource::~SkNormalSource() {}
+
+////////////////////////////////////////////////////////////////////////////
+
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkNormalSource)
+SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkNormalMapSourceImpl)
+SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkNormalFlatSourceImpl)
+SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkNormalBevelSourceImpl)
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END
+
+////////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/skia/src/core/SkNormalSource.h b/gfx/skia/skia/src/core/SkNormalSource.h
new file mode 100644
index 000000000..32ef08ce5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkNormalSource.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNormalSource_DEFINED
+#define SkNormalSource_DEFINED
+
+#include "SkFlattenable.h"
+#include "SkShader.h"
+
+class SkMatrix;
+struct SkPoint3;
+
+#if SK_SUPPORT_GPU
+class GrFragmentProcessor;
+#endif
+
+/** Abstract class that generates or reads in normals for use by SkLightingShader.
+*/
+class SK_API SkNormalSource : public SkFlattenable {
+public:
+ virtual ~SkNormalSource() override;
+
+#if SK_SUPPORT_GPU
+ /** Returns a fragment processor that takes no input and outputs a normal (already rotated)
+ as its output color. To be used as a child fragment processor.
+ */
+ virtual sk_sp<GrFragmentProcessor> asFragmentProcessor(const SkShader::AsFPArgs&) const = 0;
+#endif
+
+ class Provider {
+ public:
+ virtual ~Provider() {}
+
+ /** Called for each span of the object being drawn on the CPU. Your subclass should set
+ the appropriate normals that correspond to the specified device coordinates.
+ */
+ virtual void fillScanLine(int x, int y, SkPoint3 output[], int count) const = 0;
+ };
+
+ /** Returns an instance of 'Provider' that provides normals for the CPU pipeline. The
+ necessary data will be initialized in place at 'storage'.
+ */
+ virtual Provider* asProvider(const SkShader::ContextRec&, void* storage) const = 0;
+
+ /** Amount of memory needed to store a provider object and its dependencies.
+ */
+ virtual size_t providerSize(const SkShader::ContextRec&) const = 0;
+
+ /** Returns a normal source that provides normals sourced from the the normal map argument.
+
+ @param map a shader that outputs the normal map
+ @param ctm the current canvas' total matrix, used to rotate normals when necessary.
+
+ nullptr will be returned if 'map' is null
+
+ The normal map is currently assumed to be an 8888 image where the normal at a texel
+ is retrieved by:
+ N.x = R-127;
+ N.y = G-127;
+ N.z = B-127;
+ N.normalize();
+ The +Z axis is thus encoded in RGB as (127, 127, 255) while the -Z axis is
+ (127, 127, 0).
+ */
+ static sk_sp<SkNormalSource> MakeFromNormalMap(sk_sp<SkShader> map, const SkMatrix& ctm);
+
+ /** Returns a normal source that provides straight-up normals only <0, 0, 1>.
+ */
+ static sk_sp<SkNormalSource> MakeFlat();
+
+ /** This enum specifies the shape of the bevel. All bevels output <0, 0, 1> as the surface
+ * normal for any point more than 'width' away from any edge.
+ *
+ * Mathematical details:
+ * For the purpose of describing the shape of the bevel, we define 'w' to be the given width of
+ * the bevel, and 'h' to be the given height. We will assume the shape is rotated such that the
+ * point being shaded as well as the closest point in the shape's edge to that point are in the
+ * x-axis, and the shape is translated so that the aforementioned point in the edge is at
+ * coordinates (w, 0, 0) and the end of the bevel is at (0, 0, h).
+ *
+ */
+ enum class BevelType {
+ /* This bevel simulates a surface that is slanted from the shape's edges inwards, linearly.
+ *
+ * Mathematical details:
+ * This bevel follows a straight line from (w, 0, 0) to (0, 0, h).
+ */
+ kLinear,
+ /* This bevel simulates a surface that rounds off at the shape's edges, smoothly becoming
+ * perpendicular to the x-y plane.
+ *
+ * Mathematical details:
+ * This bevel follows the only quadratic bezier curve whose start point is at (w, 0, 0),
+ * control point is at (w, 0, h), and end point is at (0, 0, h).
+ */
+ kRoundedOut,
+ /* This bevel simulates a surface that sharply becomes perpendicular to the x-y plane when
+ * at 'width' units from the nearest edge, and then rounds off towards the shape's
+ * edge, smoothly becoming parallel to the x-y plane.
+ *
+ * Mathematical details:
+ * This bevel follows the only quadratic bezier curve whose start point is at (w, 0, 0),
+ * control point is at (0, 0, 0), and end point is at (0, 0, h).
+ */
+ kRoundedIn
+ };
+
+ /** Returns a normal source that generates a bevel for the shape being drawn. Currently this is
+ not implemented on CPU rendering. On GPU this currently only works for anti-aliased circles
+ and rectangles.
+
+ @param type the type of bevel to add.
+ @param width the width of the bevel, in source space. Must be positive.
+ @param height the height of the plateau, in source space. Can be positive, negative,
+ or zero. A negative height means the simulated bevels slope downwards.
+ */
+ static sk_sp<SkNormalSource> MakeBevel(BevelType, SkScalar width, SkScalar height);
+
+ SK_DEFINE_FLATTENABLE_TYPE(SkNormalSource)
+ SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP()
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkNormalSourcePriv.h b/gfx/skia/skia/src/core/SkNormalSourcePriv.h
new file mode 100644
index 000000000..ce8baf61e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkNormalSourcePriv.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNormalSourcePriv_DEFINED
+#define SkNormalSourcePriv_DEFINED
+
+#if SK_SUPPORT_GPU
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+
+/* GLSLFragmentProcessors for NormalSourceImpls must sub-class this class and override onEmitCode,
+ * and setNormalData calls, as well as all other calls FPs normally override, except for the 2
+ * defined in this superclass.
+ * This class exists to intercept emitCode calls and emit <0, 0, 1> if the FP requires a distance
+ * vector but the GP doesn't provide it. onSetData calls need to be intercepted too because
+ * uniform handlers will be invalid in subclasses where onEmitCode isn't called.
+ * We don't need to adjust the key here since the use of a given GP (through its class ID already in
+ * the key), will determine what code gets emitted here.
+ */
+class GLSLNormalFP : public GrGLSLFragmentProcessor {
+public:
+ GLSLNormalFP()
+ : fDidIntercept(false) {}
+
+ void emitCode(EmitArgs& args) final override {
+ if (args.fFp.usesDistanceVectorField() && !args.fGpImplementsDistanceVector) {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ fragBuilder->codeAppendf("// GLSLNormalFP intercepted emitCode call, GP does not "
+ "implement required distance vector feature\n");
+ fragBuilder->codeAppendf("%s = vec4(0, 0, 1, 0);", args.fOutputColor);
+
+ fDidIntercept = true;
+ } else {
+ this->onEmitCode(args);
+ }
+ }
+
+ void onSetData(const GrGLSLProgramDataManager& pdman, const GrProcessor& proc) final override {
+ if (!fDidIntercept) {
+ this->setNormalData(pdman, proc);
+ }
+ }
+
+protected:
+ virtual void onEmitCode(EmitArgs& args) = 0;
+ virtual void setNormalData(const GrGLSLProgramDataManager& pdman, const GrProcessor& proc) = 0;
+
+private:
+ bool fDidIntercept;
+};
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkNx.h b/gfx/skia/skia/src/core/SkNx.h
new file mode 100644
index 000000000..6bca856d8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkNx.h
@@ -0,0 +1,365 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNx_DEFINED
+#define SkNx_DEFINED
+
+//#define SKNX_NO_SIMD
+
+#include "SkScalar.h"
+#include "SkTypes.h"
+#include <limits>
+#include <math.h>
+#include <type_traits>
+
+#define SI static inline
+
+// The default SkNx<N,T> just proxies down to a pair of SkNx<N/2, T>.
+template <int N, typename T>
+struct SkNx {
+ typedef SkNx<N/2, T> Half;
+
+ Half fLo, fHi;
+
+ SkNx() = default;
+ SkNx(const Half& lo, const Half& hi) : fLo(lo), fHi(hi) {}
+
+ SkNx(T v) : fLo(v), fHi(v) {}
+
+ SkNx(T a, T b) : fLo(a) , fHi(b) { static_assert(N==2, ""); }
+ SkNx(T a, T b, T c, T d) : fLo(a,b), fHi(c,d) { static_assert(N==4, ""); }
+ SkNx(T a, T b, T c, T d, T e, T f, T g, T h) : fLo(a,b,c,d), fHi(e,f,g,h) {
+ static_assert(N==8, "");
+ }
+ SkNx(T a, T b, T c, T d, T e, T f, T g, T h,
+ T i, T j, T k, T l, T m, T n, T o, T p) : fLo(a,b,c,d, e,f,g,h), fHi(i,j,k,l, m,n,o,p) {
+ static_assert(N==16, "");
+ }
+
+ T operator[](int k) const {
+ SkASSERT(0 <= k && k < N);
+ return k < N/2 ? fLo[k] : fHi[k-N/2];
+ }
+
+ static SkNx Load(const void* vptr) {
+ auto ptr = (const char*)vptr;
+ return { Half::Load(ptr), Half::Load(ptr + N/2*sizeof(T)) };
+ }
+ void store(void* vptr) const {
+ auto ptr = (char*)vptr;
+ fLo.store(ptr);
+ fHi.store(ptr + N/2*sizeof(T));
+ }
+
+ bool anyTrue() const { return fLo.anyTrue() || fHi.anyTrue(); }
+ bool allTrue() const { return fLo.allTrue() && fHi.allTrue(); }
+
+ SkNx abs() const { return { fLo. abs(), fHi. abs() }; }
+ SkNx sqrt() const { return { fLo. sqrt(), fHi. sqrt() }; }
+ SkNx rsqrt() const { return { fLo. rsqrt(), fHi. rsqrt() }; }
+ SkNx floor() const { return { fLo. floor(), fHi. floor() }; }
+ SkNx invert() const { return { fLo.invert(), fHi.invert() }; }
+
+ SkNx operator!() const { return { !fLo, !fHi }; }
+ SkNx operator-() const { return { -fLo, -fHi }; }
+ SkNx operator~() const { return { ~fLo, ~fHi }; }
+
+ SkNx operator<<(int bits) const { return { fLo << bits, fHi << bits }; }
+ SkNx operator>>(int bits) const { return { fLo >> bits, fHi >> bits }; }
+
+ SkNx operator+(const SkNx& y) const { return { fLo + y.fLo, fHi + y.fHi }; }
+ SkNx operator-(const SkNx& y) const { return { fLo - y.fLo, fHi - y.fHi }; }
+ SkNx operator*(const SkNx& y) const { return { fLo * y.fLo, fHi * y.fHi }; }
+ SkNx operator/(const SkNx& y) const { return { fLo / y.fLo, fHi / y.fHi }; }
+
+ SkNx operator&(const SkNx& y) const { return { fLo & y.fLo, fHi & y.fHi }; }
+ SkNx operator|(const SkNx& y) const { return { fLo | y.fLo, fHi | y.fHi }; }
+ SkNx operator^(const SkNx& y) const { return { fLo ^ y.fLo, fHi ^ y.fHi }; }
+
+ SkNx operator==(const SkNx& y) const { return { fLo == y.fLo, fHi == y.fHi }; }
+ SkNx operator!=(const SkNx& y) const { return { fLo != y.fLo, fHi != y.fHi }; }
+ SkNx operator<=(const SkNx& y) const { return { fLo <= y.fLo, fHi <= y.fHi }; }
+ SkNx operator>=(const SkNx& y) const { return { fLo >= y.fLo, fHi >= y.fHi }; }
+ SkNx operator< (const SkNx& y) const { return { fLo < y.fLo, fHi < y.fHi }; }
+ SkNx operator> (const SkNx& y) const { return { fLo > y.fLo, fHi > y.fHi }; }
+
+ SkNx saturatedAdd(const SkNx& y) const {
+ return { fLo.saturatedAdd(y.fLo), fHi.saturatedAdd(y.fHi) };
+ }
+ SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return { fLo.thenElse(t.fLo, e.fLo), fHi.thenElse(t.fHi, e.fHi) };
+ }
+
+ static SkNx Min(const SkNx& x, const SkNx& y) {
+ return { Half::Min(x.fLo, y.fLo), Half::Min(x.fHi, y.fHi) };
+ }
+ static SkNx Max(const SkNx& x, const SkNx& y) {
+ return { Half::Max(x.fLo, y.fLo), Half::Max(x.fHi, y.fHi) };
+ }
+};
+
+// The N -> N/2 recursion bottoms out at N == 1, a scalar value.
+template <typename T>
+struct SkNx<1,T> {
+ T fVal;
+
+ SkNx() = default;
+ SkNx(T v) : fVal(v) {}
+
+ // Android complains against unused parameters, so we guard it
+ T operator[](int SkDEBUGCODE(k)) const {
+ SkASSERT(k == 0);
+ return fVal;
+ }
+
+ static SkNx Load(const void* ptr) {
+ SkNx v;
+ memcpy(&v, ptr, sizeof(T));
+ return v;
+ }
+ void store(void* ptr) const { memcpy(ptr, &fVal, sizeof(T)); }
+
+ bool anyTrue() const { return fVal != 0; }
+ bool allTrue() const { return fVal != 0; }
+
+ SkNx abs() const { return Abs(fVal); }
+ SkNx sqrt() const { return Sqrt(fVal); }
+ SkNx rsqrt() const { return T(1) / this->sqrt(); }
+ SkNx floor() const { return Floor(fVal); }
+ SkNx invert() const { return T(1) / *this; }
+
+ SkNx operator!() const { return !fVal; }
+ SkNx operator-() const { return -fVal; }
+ SkNx operator~() const { return FromBits(~ToBits(fVal)); }
+
+ SkNx operator<<(int bits) const { return fVal << bits; }
+ SkNx operator>>(int bits) const { return fVal >> bits; }
+
+ SkNx operator+(const SkNx& y) const { return fVal + y.fVal; }
+ SkNx operator-(const SkNx& y) const { return fVal - y.fVal; }
+ SkNx operator*(const SkNx& y) const { return fVal * y.fVal; }
+ SkNx operator/(const SkNx& y) const { return fVal / y.fVal; }
+
+ SkNx operator&(const SkNx& y) const { return FromBits(ToBits(fVal) & ToBits(y.fVal)); }
+ SkNx operator|(const SkNx& y) const { return FromBits(ToBits(fVal) | ToBits(y.fVal)); }
+ SkNx operator^(const SkNx& y) const { return FromBits(ToBits(fVal) ^ ToBits(y.fVal)); }
+
+ SkNx operator==(const SkNx& y) const { return FromBits(fVal == y.fVal ? ~0 : 0); }
+ SkNx operator!=(const SkNx& y) const { return FromBits(fVal != y.fVal ? ~0 : 0); }
+ SkNx operator<=(const SkNx& y) const { return FromBits(fVal <= y.fVal ? ~0 : 0); }
+ SkNx operator>=(const SkNx& y) const { return FromBits(fVal >= y.fVal ? ~0 : 0); }
+ SkNx operator< (const SkNx& y) const { return FromBits(fVal < y.fVal ? ~0 : 0); }
+ SkNx operator> (const SkNx& y) const { return FromBits(fVal > y.fVal ? ~0 : 0); }
+
+ static SkNx Min(const SkNx& x, const SkNx& y) { return x.fVal < y.fVal ? x : y; }
+ static SkNx Max(const SkNx& x, const SkNx& y) { return x.fVal > y.fVal ? x : y; }
+
+ SkNx saturatedAdd(const SkNx& y) const {
+ static_assert(std::is_unsigned<T>::value, "");
+ T sum = fVal + y.fVal;
+ return sum < fVal ? std::numeric_limits<T>::max() : sum;
+ }
+
+ SkNx thenElse(const SkNx& t, const SkNx& e) const { return fVal != 0 ? t : e; }
+
+private:
+ // Helper functions to choose the right float/double methods. (In <cmath> madness lies...)
+ static float Abs(float val) { return ::fabsf(val); }
+ static float Sqrt(float val) { return ::sqrtf(val); }
+ static float Floor(float val) { return ::floorf(val); }
+
+ static double Abs(double val) { return ::fabs(val); }
+ static double Sqrt(double val) { return ::sqrt(val); }
+ static double Floor(double val) { return ::floor(val); }
+
+ // Helper functions for working with floats/doubles as bit patterns.
+ template <typename U> static U ToBits(U v) { return v; }
+ static int32_t ToBits(float v) { int32_t bits; memcpy(&bits, &v, sizeof(v)); return bits; }
+ static int64_t ToBits(double v) { int64_t bits; memcpy(&bits, &v, sizeof(v)); return bits; }
+
+ template <typename Bits> static T FromBits(Bits bits) {
+ static_assert(std::is_pod<T >::value &&
+ std::is_pod<Bits>::value &&
+ sizeof(T) <= sizeof(Bits), "");
+ T val;
+ memcpy(&val, &bits, sizeof(T));
+ return val;
+ }
+};
+
+// Allow scalars on the left or right of binary operators, and things like +=, &=, etc.
+#define V template <int N, typename T> SI SkNx<N,T>
+ V operator+ (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) + y; }
+ V operator- (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) - y; }
+ V operator* (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) * y; }
+ V operator/ (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) / y; }
+ V operator& (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) & y; }
+ V operator| (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) | y; }
+ V operator^ (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) ^ y; }
+ V operator==(T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) == y; }
+ V operator!=(T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) != y; }
+ V operator<=(T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) <= y; }
+ V operator>=(T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) >= y; }
+ V operator< (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) < y; }
+ V operator> (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) > y; }
+
+ V operator+ (const SkNx<N,T>& x, T y) { return x + SkNx<N,T>(y); }
+ V operator- (const SkNx<N,T>& x, T y) { return x - SkNx<N,T>(y); }
+ V operator* (const SkNx<N,T>& x, T y) { return x * SkNx<N,T>(y); }
+ V operator/ (const SkNx<N,T>& x, T y) { return x / SkNx<N,T>(y); }
+ V operator& (const SkNx<N,T>& x, T y) { return x & SkNx<N,T>(y); }
+ V operator| (const SkNx<N,T>& x, T y) { return x | SkNx<N,T>(y); }
+ V operator^ (const SkNx<N,T>& x, T y) { return x ^ SkNx<N,T>(y); }
+ V operator==(const SkNx<N,T>& x, T y) { return x == SkNx<N,T>(y); }
+ V operator!=(const SkNx<N,T>& x, T y) { return x != SkNx<N,T>(y); }
+ V operator<=(const SkNx<N,T>& x, T y) { return x <= SkNx<N,T>(y); }
+ V operator>=(const SkNx<N,T>& x, T y) { return x >= SkNx<N,T>(y); }
+ V operator< (const SkNx<N,T>& x, T y) { return x < SkNx<N,T>(y); }
+ V operator> (const SkNx<N,T>& x, T y) { return x > SkNx<N,T>(y); }
+
+ V& operator<<=(SkNx<N,T>& x, int bits) { return (x = x << bits); }
+ V& operator>>=(SkNx<N,T>& x, int bits) { return (x = x >> bits); }
+
+ V& operator +=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x + y); }
+ V& operator -=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x - y); }
+ V& operator *=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x * y); }
+ V& operator /=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x / y); }
+ V& operator &=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x & y); }
+ V& operator |=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x | y); }
+ V& operator ^=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x ^ y); }
+
+ V& operator +=(SkNx<N,T>& x, T y) { return (x = x + SkNx<N,T>(y)); }
+ V& operator -=(SkNx<N,T>& x, T y) { return (x = x - SkNx<N,T>(y)); }
+ V& operator *=(SkNx<N,T>& x, T y) { return (x = x * SkNx<N,T>(y)); }
+ V& operator /=(SkNx<N,T>& x, T y) { return (x = x / SkNx<N,T>(y)); }
+ V& operator &=(SkNx<N,T>& x, T y) { return (x = x & SkNx<N,T>(y)); }
+ V& operator |=(SkNx<N,T>& x, T y) { return (x = x | SkNx<N,T>(y)); }
+ V& operator ^=(SkNx<N,T>& x, T y) { return (x = x ^ SkNx<N,T>(y)); }
+#undef V
+
+// SkNx<N,T> ~~> SkNx<N/2,T> + SkNx<N/2,T>
+template <int N, typename T>
+SI void SkNx_split(const SkNx<N,T>& v, SkNx<N/2,T>* lo, SkNx<N/2,T>* hi) {
+ *lo = v.fLo;
+ *hi = v.fHi;
+}
+
+// SkNx<N/2,T> + SkNx<N/2,T> ~~> SkNx<N,T>
+template <int N, typename T>
+SI SkNx<N*2,T> SkNx_join(const SkNx<N,T>& lo, const SkNx<N,T>& hi) {
+ return { lo, hi };
+}
+
+// A very generic shuffle. Can reorder, duplicate, contract, expand...
+// Sk4f v = { R,G,B,A };
+// SkNx_shuffle<2,1,0,3>(v) ~~> {B,G,R,A}
+// SkNx_shuffle<2,1>(v) ~~> {B,G}
+// SkNx_shuffle<2,1,2,1,2,1,2,1>(v) ~~> {B,G,B,G,B,G,B,G}
+// SkNx_shuffle<3,3,3,3>(v) ~~> {A,A,A,A}
+template <int... Ix, int N, typename T>
+SI SkNx<sizeof...(Ix),T> SkNx_shuffle(const SkNx<N,T>& v) {
+ return { v[Ix]... };
+}
+
+// Cast from SkNx<N, Src> to SkNx<N, Dst>, as if you called static_cast<Dst>(Src).
+template <typename Dst, typename Src, int N>
+SI SkNx<N,Dst> SkNx_cast(const SkNx<N,Src>& v) {
+ return { SkNx_cast<Dst>(v.fLo), SkNx_cast<Dst>(v.fHi) };
+}
+template <typename Dst, typename Src>
+SI SkNx<1,Dst> SkNx_cast(const SkNx<1,Src>& v) {
+ return static_cast<Dst>(v.fVal);
+}
+
+typedef SkNx<2, float> Sk2f;
+typedef SkNx<4, float> Sk4f;
+typedef SkNx<8, float> Sk8f;
+typedef SkNx<16, float> Sk16f;
+
+typedef SkNx<2, SkScalar> Sk2s;
+typedef SkNx<4, SkScalar> Sk4s;
+typedef SkNx<8, SkScalar> Sk8s;
+typedef SkNx<16, SkScalar> Sk16s;
+
+typedef SkNx<4, uint8_t> Sk4b;
+typedef SkNx<8, uint8_t> Sk8b;
+typedef SkNx<16, uint8_t> Sk16b;
+
+typedef SkNx<4, uint16_t> Sk4h;
+typedef SkNx<8, uint16_t> Sk8h;
+typedef SkNx<16, uint16_t> Sk16h;
+
+typedef SkNx<4, int32_t> Sk4i;
+typedef SkNx<4, uint32_t> Sk4u;
+
+// Include platform specific specializations if available.
+#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include "../opts/SkNx_sse.h"
+#elif !defined(SKNX_NO_SIMD) && defined(SK_ARM_HAS_NEON)
+ #include "../opts/SkNx_neon.h"
+#else
+
+SI Sk4i Sk4f_round(const Sk4f& x) {
+ return { (int) lrintf (x[0]),
+ (int) lrintf (x[1]),
+ (int) lrintf (x[2]),
+ (int) lrintf (x[3]), };
+}
+
+// Load 4 Sk4h and transpose them (256 bits total).
+SI void Sk4h_load4(const void* vptr, Sk4h* r, Sk4h* g, Sk4h* b, Sk4h* a) {
+ const uint64_t* ptr = (const uint64_t*)vptr;
+ auto p0 = Sk4h::Load(ptr+0),
+ p1 = Sk4h::Load(ptr+1),
+ p2 = Sk4h::Load(ptr+2),
+ p3 = Sk4h::Load(ptr+3);
+ *r = { p0[0], p1[0], p2[0], p3[0] };
+ *g = { p0[1], p1[1], p2[1], p3[1] };
+ *b = { p0[2], p1[2], p2[2], p3[2] };
+ *a = { p0[3], p1[3], p2[3], p3[3] };
+}
+
+// Transpose 4 Sk4h and store (256 bits total).
+SI void Sk4h_store4(void* dst, const Sk4h& r, const Sk4h& g, const Sk4h& b, const Sk4h& a) {
+ uint64_t* dst64 = (uint64_t*) dst;
+ Sk4h(r[0], g[0], b[0], a[0]).store(dst64 + 0);
+ Sk4h(r[1], g[1], b[1], a[1]).store(dst64 + 1);
+ Sk4h(r[2], g[2], b[2], a[2]).store(dst64 + 2);
+ Sk4h(r[3], g[3], b[3], a[3]).store(dst64 + 3);
+}
+
+// Load 4 Sk4f and transpose them (512 bits total).
+SI void Sk4f_load4(const void* vptr, Sk4f* r, Sk4f* g, Sk4f* b, Sk4f* a) {
+ const float* ptr = (const float*) vptr;
+ auto p0 = Sk4f::Load(ptr + 0),
+ p1 = Sk4f::Load(ptr + 4),
+ p2 = Sk4f::Load(ptr + 8),
+ p3 = Sk4f::Load(ptr + 12);
+ *r = { p0[0], p1[0], p2[0], p3[0] };
+ *g = { p0[1], p1[1], p2[1], p3[1] };
+ *b = { p0[2], p1[2], p2[2], p3[2] };
+ *a = { p0[3], p1[3], p2[3], p3[3] };
+}
+
+// Transpose 4 Sk4f and store (512 bits total).
+SI void Sk4f_store4(void* vdst, const Sk4f& r, const Sk4f& g, const Sk4f& b, const Sk4f& a) {
+ float* dst = (float*) vdst;
+ Sk4f(r[0], g[0], b[0], a[0]).store(dst + 0);
+ Sk4f(r[1], g[1], b[1], a[1]).store(dst + 4);
+ Sk4f(r[2], g[2], b[2], a[2]).store(dst + 8);
+ Sk4f(r[3], g[3], b[3], a[3]).store(dst + 12);
+}
+
+#endif
+
+SI void Sk4f_ToBytes(uint8_t p[16], const Sk4f& a, const Sk4f& b, const Sk4f& c, const Sk4f& d) {
+ SkNx_cast<uint8_t>(SkNx_join(SkNx_join(a,b), SkNx_join(c,d))).store(p);
+}
+
+#undef SI
+
+#endif//SkNx_DEFINED
diff --git a/gfx/skia/skia/src/core/SkOpts.cpp b/gfx/skia/skia/src/core/SkOpts.cpp
new file mode 100644
index 000000000..c9bb48b55
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkOpts.cpp
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCpu.h"
+#include "SkHalf.h"
+#include "SkOnce.h"
+#include "SkOpts.h"
+
+#if defined(SK_ARM_HAS_NEON)
+ #if defined(SK_ARM_HAS_CRC32)
+ #define SK_OPTS_NS neon_and_crc32
+ #else
+ #define SK_OPTS_NS neon
+ #endif
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ #define SK_OPTS_NS avx2
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
+ #define SK_OPTS_NS avx
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE42
+ #define SK_OPTS_NS sse42
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ #define SK_OPTS_NS sse41
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ #define SK_OPTS_NS ssse3
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE3
+ #define SK_OPTS_NS sse3
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #define SK_OPTS_NS sse2
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
+ #define SK_OPTS_NS sse
+#else
+ #define SK_OPTS_NS portable
+#endif
+
+#include "SkBlend_opts.h"
+#include "SkBlitMask_opts.h"
+#include "SkBlitRow_opts.h"
+#include "SkBlurImageFilter_opts.h"
+#include "SkChecksum_opts.h"
+#include "SkColorCubeFilter_opts.h"
+#include "SkMorphologyImageFilter_opts.h"
+#include "SkRasterPipeline_opts.h"
+#include "SkSwizzler_opts.h"
+#include "SkTextureCompressor_opts.h"
+#include "SkXfermode_opts.h"
+
+namespace SkOpts {
+ // Define default function pointer values here...
+ // If our global compile options are set high enough, these defaults might even be
+ // CPU-specialized, e.g. a typical x86-64 machine might start with SSE2 defaults.
+ // They'll still get a chance to be replaced with even better ones, e.g. using SSE4.1.
+#define DEFINE_DEFAULT(name) decltype(name) name = SK_OPTS_NS::name
+ DEFINE_DEFAULT(create_xfermode);
+ DEFINE_DEFAULT(color_cube_filter_span);
+
+ DEFINE_DEFAULT(box_blur_xx);
+ DEFINE_DEFAULT(box_blur_xy);
+ DEFINE_DEFAULT(box_blur_yx);
+
+ DEFINE_DEFAULT(dilate_x);
+ DEFINE_DEFAULT(dilate_y);
+ DEFINE_DEFAULT( erode_x);
+ DEFINE_DEFAULT( erode_y);
+
+ DEFINE_DEFAULT(texture_compressor);
+ DEFINE_DEFAULT(fill_block_dimensions);
+
+ DEFINE_DEFAULT(blit_mask_d32_a8);
+
+ DEFINE_DEFAULT(blit_row_color32);
+ DEFINE_DEFAULT(blit_row_s32a_opaque);
+
+ DEFINE_DEFAULT(RGBA_to_BGRA);
+ DEFINE_DEFAULT(RGBA_to_rgbA);
+ DEFINE_DEFAULT(RGBA_to_bgrA);
+ DEFINE_DEFAULT(RGB_to_RGB1);
+ DEFINE_DEFAULT(RGB_to_BGR1);
+ DEFINE_DEFAULT(gray_to_RGB1);
+ DEFINE_DEFAULT(grayA_to_RGBA);
+ DEFINE_DEFAULT(grayA_to_rgbA);
+ DEFINE_DEFAULT(inverted_CMYK_to_RGB1);
+ DEFINE_DEFAULT(inverted_CMYK_to_BGR1);
+
+ DEFINE_DEFAULT(srcover_srgb_srgb);
+
+ DEFINE_DEFAULT(hash_fn);
+#undef DEFINE_DEFAULT
+
+ // TODO: might be nice to only create one instance of tail-insensitive stages.
+
+ SkRasterPipeline::Fn stages_4[] = {
+ stage_4<SK_OPTS_NS::store_565 , false>,
+ stage_4<SK_OPTS_NS::store_srgb, false>,
+ stage_4<SK_OPTS_NS::store_f16 , false>,
+
+ stage_4<SK_OPTS_NS::load_s_565 , true>,
+ stage_4<SK_OPTS_NS::load_s_srgb, true>,
+ stage_4<SK_OPTS_NS::load_s_f16 , true>,
+
+ stage_4<SK_OPTS_NS::load_d_565 , true>,
+ stage_4<SK_OPTS_NS::load_d_srgb, true>,
+ stage_4<SK_OPTS_NS::load_d_f16 , true>,
+
+ stage_4<SK_OPTS_NS::scale_u8, true>,
+
+ stage_4<SK_OPTS_NS::lerp_u8 , true>,
+ stage_4<SK_OPTS_NS::lerp_565 , true>,
+ stage_4<SK_OPTS_NS::lerp_constant_float, true>,
+
+ stage_4<SK_OPTS_NS::constant_color, true>,
+
+ SK_OPTS_NS::dst,
+ SK_OPTS_NS::dstatop,
+ SK_OPTS_NS::dstin,
+ SK_OPTS_NS::dstout,
+ SK_OPTS_NS::dstover,
+ SK_OPTS_NS::srcatop,
+ SK_OPTS_NS::srcin,
+ SK_OPTS_NS::srcout,
+ SK_OPTS_NS::srcover,
+ SK_OPTS_NS::clear,
+ SK_OPTS_NS::modulate,
+ SK_OPTS_NS::multiply,
+ SK_OPTS_NS::plus_,
+ SK_OPTS_NS::screen,
+ SK_OPTS_NS::xor_,
+ SK_OPTS_NS::colorburn,
+ SK_OPTS_NS::colordodge,
+ SK_OPTS_NS::darken,
+ SK_OPTS_NS::difference,
+ SK_OPTS_NS::exclusion,
+ SK_OPTS_NS::hardlight,
+ SK_OPTS_NS::lighten,
+ SK_OPTS_NS::overlay,
+ SK_OPTS_NS::softlight,
+ };
+ static_assert(SK_ARRAY_COUNT(stages_4) == SkRasterPipeline::kNumStockStages, "");
+
+ SkRasterPipeline::Fn stages_1_3[] = {
+ stage_1_3<SK_OPTS_NS::store_565 , false>,
+ stage_1_3<SK_OPTS_NS::store_srgb, false>,
+ stage_1_3<SK_OPTS_NS::store_f16 , false>,
+
+ stage_1_3<SK_OPTS_NS::load_s_565 , true>,
+ stage_1_3<SK_OPTS_NS::load_s_srgb, true>,
+ stage_1_3<SK_OPTS_NS::load_s_f16 , true>,
+
+ stage_1_3<SK_OPTS_NS::load_d_565 , true>,
+ stage_1_3<SK_OPTS_NS::load_d_srgb, true>,
+ stage_1_3<SK_OPTS_NS::load_d_f16 , true>,
+
+ stage_1_3<SK_OPTS_NS::scale_u8, true>,
+
+ stage_1_3<SK_OPTS_NS::lerp_u8 , true>,
+ stage_1_3<SK_OPTS_NS::lerp_565 , true>,
+ stage_1_3<SK_OPTS_NS::lerp_constant_float, true>,
+
+ stage_1_3<SK_OPTS_NS::constant_color, true>,
+
+ SK_OPTS_NS::dst,
+ SK_OPTS_NS::dstatop,
+ SK_OPTS_NS::dstin,
+ SK_OPTS_NS::dstout,
+ SK_OPTS_NS::dstover,
+ SK_OPTS_NS::srcatop,
+ SK_OPTS_NS::srcin,
+ SK_OPTS_NS::srcout,
+ SK_OPTS_NS::srcover,
+ SK_OPTS_NS::clear,
+ SK_OPTS_NS::modulate,
+ SK_OPTS_NS::multiply,
+ SK_OPTS_NS::plus_,
+ SK_OPTS_NS::screen,
+ SK_OPTS_NS::xor_,
+ SK_OPTS_NS::colorburn,
+ SK_OPTS_NS::colordodge,
+ SK_OPTS_NS::darken,
+ SK_OPTS_NS::difference,
+ SK_OPTS_NS::exclusion,
+ SK_OPTS_NS::hardlight,
+ SK_OPTS_NS::lighten,
+ SK_OPTS_NS::overlay,
+ SK_OPTS_NS::softlight,
+ };
+ static_assert(SK_ARRAY_COUNT(stages_1_3) == SkRasterPipeline::kNumStockStages, "");
+
+ // Each Init_foo() is defined in src/opts/SkOpts_foo.cpp.
+ void Init_ssse3();
+ void Init_sse41();
+ void Init_sse42();
+ void Init_avx();
+ void Init_hsw();
+ void Init_crc32() {}
+ void Init_neon();
+
+ static void init() {
+#if !defined(SK_BUILD_NO_OPTS)
+ #if defined(SK_CPU_X86)
+ if (SkCpu::Supports(SkCpu::SSSE3)) { Init_ssse3(); }
+ if (SkCpu::Supports(SkCpu::SSE41)) { Init_sse41(); }
+ if (SkCpu::Supports(SkCpu::SSE42)) { Init_sse42(); }
+ if (SkCpu::Supports(SkCpu::AVX )) { Init_avx(); }
+ if (SkCpu::Supports(SkCpu::HSW )) { Init_hsw(); }
+
+ #elif defined(SK_CPU_ARM64)
+ if (SkCpu::Supports(SkCpu::CRC32)) { Init_crc32(); }
+
+ #elif defined(SK_CPU_ARM32) && !defined(SK_ARM_HAS_NEON) && defined(SK_ARM_HAS_OPTIONAL_NEON)
+ if (SkCpu::Supports(SkCpu::NEON)) { Init_neon(); }
+
+ #endif
+#endif
+ }
+
+ void Init() {
+ static SkOnce once;
+ once(init);
+ }
+} // namespace SkOpts
diff --git a/gfx/skia/skia/src/core/SkOpts.h b/gfx/skia/skia/src/core/SkOpts.h
new file mode 100644
index 000000000..50de9c45e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkOpts.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOpts_DEFINED
+#define SkOpts_DEFINED
+
+#include "SkRasterPipeline.h"
+#include "SkTextureCompressor.h"
+#include "SkTypes.h"
+#include "SkXfermode.h"
+
+struct ProcCoeff;
+
+namespace SkOpts {
+ // Call to replace pointers to portable functions with pointers to CPU-specific functions.
+ // Thread-safe and idempotent.
+ // Called by SkGraphics::Init().
+ void Init();
+
+ // Declare function pointers here...
+
+ // May return nullptr if we haven't specialized the given Mode.
+ extern SkXfermode* (*create_xfermode)(const ProcCoeff&, SkXfermode::Mode);
+
+ typedef void (*BoxBlur)(const SkPMColor*, int, const SkIRect& srcBounds, SkPMColor*, int, int, int, int, int);
+ extern BoxBlur box_blur_xx, box_blur_xy, box_blur_yx;
+
+ typedef void (*Morph)(const SkPMColor*, SkPMColor*, int, int, int, int, int);
+ extern Morph dilate_x, dilate_y, erode_x, erode_y;
+
+ typedef bool (*TextureCompressor)(uint8_t* dst, const uint8_t* src,
+ int width, int height, size_t rowBytes);
+ extern TextureCompressor (*texture_compressor)(SkColorType, SkTextureCompressor::Format);
+ extern bool (*fill_block_dimensions)(SkTextureCompressor::Format, int* x, int* y);
+
+ extern void (*blit_mask_d32_a8)(SkPMColor*, size_t, const SkAlpha*, size_t, SkColor, int, int);
+ extern void (*blit_row_color32)(SkPMColor*, const SkPMColor*, int, SkPMColor);
+ extern void (*blit_row_s32a_opaque)(SkPMColor*, const SkPMColor*, int, U8CPU);
+
+ // This function is an optimized version of SkColorCubeFilter::filterSpan
+ extern void (*color_cube_filter_span)(const SkPMColor[],
+ int,
+ SkPMColor[],
+ const int * [2],
+ const SkScalar * [2],
+ int,
+ const SkColor*);
+
+ // Swizzle input into some sort of 8888 pixel, {premul,unpremul} x {rgba,bgra}.
+ typedef void (*Swizzle_8888)(uint32_t*, const void*, int);
+ extern Swizzle_8888 RGBA_to_BGRA, // i.e. just swap RB
+ RGBA_to_rgbA, // i.e. just premultiply
+ RGBA_to_bgrA, // i.e. swap RB and premultiply
+ RGB_to_RGB1, // i.e. insert an opaque alpha
+ RGB_to_BGR1, // i.e. swap RB and insert an opaque alpha
+ gray_to_RGB1, // i.e. expand to color channels + an opaque alpha
+ grayA_to_RGBA, // i.e. expand to color channels
+ grayA_to_rgbA, // i.e. expand to color channels and premultiply
+ inverted_CMYK_to_RGB1, // i.e. convert color space
+ inverted_CMYK_to_BGR1; // i.e. convert color space
+
+ // Blend ndst src pixels over dst, where both src and dst point to sRGB pixels (RGBA or BGRA).
+ // If nsrc < ndst, we loop over src to create a pattern.
+ extern void (*srcover_srgb_srgb)(uint32_t* dst, const uint32_t* src, int ndst, int nsrc);
+
+ // The fastest high quality 32-bit hash we can provide on this platform.
+ extern uint32_t (*hash_fn)(const void*, size_t, uint32_t seed);
+ static inline uint32_t hash(const void* data, size_t bytes, uint32_t seed=0) {
+ return hash_fn(data, bytes, seed);
+ }
+
+ extern SkRasterPipeline::Fn stages_4 [SkRasterPipeline::kNumStockStages],
+ stages_1_3[SkRasterPipeline::kNumStockStages];
+}
+
+#endif//SkOpts_DEFINED
diff --git a/gfx/skia/skia/src/core/SkOrderedReadBuffer.h b/gfx/skia/skia/src/core/SkOrderedReadBuffer.h
new file mode 100644
index 000000000..3286f6b9d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkOrderedReadBuffer.h
@@ -0,0 +1,9 @@
+// Temporary shim to keep a couple dependencies working in Chromium.
+#ifndef SkOrderedReadBuffer_DEFINED
+#define SkOrderedReadBuffer_DEFINED
+
+#include "SkReadBuffer.h"
+
+typedef SkReadBuffer SkOrderedReadBuffer;
+
+#endif//SkOrderedReadBuffer_DEFINED
diff --git a/gfx/skia/skia/src/core/SkPM4f.h b/gfx/skia/skia/src/core/SkPM4f.h
new file mode 100644
index 000000000..f983101f4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPM4f.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPM4f_DEFINED
+#define SkPM4f_DEFINED
+
+#include "SkColorPriv.h"
+#include "SkNx.h"
+
+static inline Sk4f swizzle_rb(const Sk4f& x) {
+ return SkNx_shuffle<2, 1, 0, 3>(x);
+}
+
+static inline Sk4f swizzle_rb_if_bgra(const Sk4f& x) {
+#ifdef SK_PMCOLOR_IS_BGRA
+ return swizzle_rb(x);
+#else
+ return x;
+#endif
+}
+
+/*
+ * The float values are 0...1 premultiplied in RGBA order (regardless of SkPMColor order)
+ */
+struct SkPM4f {
+ enum {
+ R, G, B, A,
+ };
+ float fVec[4];
+
+ float r() const { return fVec[R]; }
+ float g() const { return fVec[G]; }
+ float b() const { return fVec[B]; }
+ float a() const { return fVec[A]; }
+
+ static SkPM4f From4f(const Sk4f& x) {
+ SkPM4f pm;
+ x.store(pm.fVec);
+ return pm;
+ }
+ static SkPM4f FromF16(const uint16_t[4]);
+ static SkPM4f FromPMColor(SkPMColor);
+
+ Sk4f to4f() const { return Sk4f::Load(fVec); }
+ Sk4f to4f_rgba() const { return this->to4f(); }
+ Sk4f to4f_bgra() const { return swizzle_rb(this->to4f()); }
+ Sk4f to4f_pmorder() const { return swizzle_rb_if_bgra(this->to4f()); }
+
+ SkPMColor toPMColor() const {
+ Sk4f value = swizzle_rb_if_bgra(this->to4f());
+ SkPMColor result;
+ SkNx_cast<uint8_t>(value * Sk4f(255) + Sk4f(0.5f)).store(&result);
+ return result;
+ }
+
+ void toF16(uint16_t[4]) const;
+ uint64_t toF16() const; // 4 float16 values packed into uint64_t
+
+ SkColor4f unpremul() const;
+
+#ifdef SK_DEBUG
+ void assertIsUnit() const;
+#else
+ void assertIsUnit() const {}
+#endif
+};
+
+typedef SkPM4f (*SkXfermodeProc4f)(const SkPM4f& src, const SkPM4f& dst);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPM4fPriv.h b/gfx/skia/skia/src/core/SkPM4fPriv.h
new file mode 100644
index 000000000..89a0caeb7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPM4fPriv.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPM4fPriv_DEFINED
+#define SkPM4fPriv_DEFINED
+
+#include "SkColorPriv.h"
+#include "SkPM4f.h"
+#include "SkSRGB.h"
+
+static inline Sk4f set_alpha(const Sk4f& px, float alpha) {
+ return { px[0], px[1], px[2], alpha };
+}
+
+static inline float get_alpha(const Sk4f& px) {
+ return px[3];
+}
+
+
+static inline Sk4f Sk4f_fromL32(uint32_t px) {
+ return SkNx_cast<float>(Sk4b::Load(&px)) * (1/255.0f);
+}
+
+static inline Sk4f Sk4f_fromS32(uint32_t px) {
+ return { sk_linear_from_srgb[(px >> 0) & 0xff],
+ sk_linear_from_srgb[(px >> 8) & 0xff],
+ sk_linear_from_srgb[(px >> 16) & 0xff],
+ (1/255.0f) * (px >> 24) };
+}
+
+static inline uint32_t Sk4f_toL32(const Sk4f& px) {
+ uint32_t l32;
+ SkNx_cast<uint8_t>(Sk4f_round(px * 255.0f)).store(&l32);
+ return l32;
+}
+
+static inline uint32_t Sk4f_toS32(const Sk4f& px) {
+ Sk4i rgb = sk_linear_to_srgb(px),
+ srgb = { rgb[0], rgb[1], rgb[2], (int)(255.0f * px[3] + 0.5f) };
+
+ uint32_t s32;
+ SkNx_cast<uint8_t>(srgb).store(&s32);
+ return s32;
+}
+
+
+// SkColor handling:
+// SkColor has an ordering of (b, g, r, a) if cast to an Sk4f, so the code swizzles r and b to
+// produce the needed (r, g, b, a) ordering.
+static inline Sk4f Sk4f_from_SkColor(SkColor color) {
+ return swizzle_rb(Sk4f_fromS32(color));
+}
+
+static inline void assert_unit(float x) {
+ SkASSERT(0 <= x && x <= 1);
+}
+
+static inline float exact_srgb_to_linear(float srgb) {
+ assert_unit(srgb);
+ float linear;
+ if (srgb <= 0.04045) {
+ linear = srgb / 12.92f;
+ } else {
+ linear = powf((srgb + 0.055f) / 1.055f, 2.4f);
+ }
+ assert_unit(linear);
+ return linear;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPaint.cpp b/gfx/skia/skia/src/core/SkPaint.cpp
new file mode 100644
index 000000000..17843496b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPaint.cpp
@@ -0,0 +1,2406 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPaint.h"
+#include "SkAutoKern.h"
+#include "SkColorFilter.h"
+#include "SkData.h"
+#include "SkDraw.h"
+#include "SkFontDescriptor.h"
+#include "SkGlyphCache.h"
+#include "SkImageFilter.h"
+#include "SkMaskFilter.h"
+#include "SkMaskGamma.h"
+#include "SkMutex.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+#include "SkOpts.h"
+#include "SkPaintDefaults.h"
+#include "SkPathEffect.h"
+#include "SkRasterizer.h"
+#include "SkScalar.h"
+#include "SkScalerContext.h"
+#include "SkShader.h"
+#include "SkStringUtils.h"
+#include "SkStroke.h"
+#include "SkStrokeRec.h"
+#include "SkSurfacePriv.h"
+#include "SkTextBlob.h"
+#include "SkTextBlobRunIterator.h"
+#include "SkTextFormatParams.h"
+#include "SkTextToPathIter.h"
+#include "SkTLazy.h"
+#include "SkTypeface.h"
+#include "SkXfermode.h"
+
+static inline uint32_t set_clear_mask(uint32_t bits, bool cond, uint32_t mask) {
+ return cond ? bits | mask : bits & ~mask;
+}
+
+// define this to get a printf for out-of-range parameter in setters
+// e.g. setTextSize(-1)
+//#define SK_REPORT_API_RANGE_CHECK
+
+SkPaint::SkPaint() {
+ fTextSize = SkPaintDefaults_TextSize;
+ fTextScaleX = SK_Scalar1;
+ fTextSkewX = 0;
+ fColor = SK_ColorBLACK;
+ fWidth = 0;
+ fMiterLimit = SkPaintDefaults_MiterLimit;
+ fBlendMode = (unsigned)SkBlendMode::kSrcOver;
+
+ // Zero all bitfields, then set some non-zero defaults.
+ fBitfieldsUInt = 0;
+ fBitfields.fFlags = SkPaintDefaults_Flags;
+ fBitfields.fCapType = kDefault_Cap;
+ fBitfields.fJoinType = kDefault_Join;
+ fBitfields.fTextAlign = kLeft_Align;
+ fBitfields.fStyle = kFill_Style;
+ fBitfields.fTextEncoding = kUTF8_TextEncoding;
+ fBitfields.fHinting = SkPaintDefaults_Hinting;
+}
+
+SkPaint::SkPaint(const SkPaint& src)
+#define COPY(field) field(src.field)
+ : COPY(fTypeface)
+ , COPY(fPathEffect)
+ , COPY(fShader)
+ , COPY(fMaskFilter)
+ , COPY(fColorFilter)
+ , COPY(fRasterizer)
+ , COPY(fDrawLooper)
+ , COPY(fImageFilter)
+ , COPY(fTextSize)
+ , COPY(fTextScaleX)
+ , COPY(fTextSkewX)
+ , COPY(fColor)
+ , COPY(fWidth)
+ , COPY(fMiterLimit)
+ , COPY(fBlendMode)
+ , COPY(fBitfields)
+#undef COPY
+{}
+
+SkPaint::SkPaint(SkPaint&& src) {
+#define MOVE(field) field = std::move(src.field)
+ MOVE(fTypeface);
+ MOVE(fPathEffect);
+ MOVE(fShader);
+ MOVE(fMaskFilter);
+ MOVE(fColorFilter);
+ MOVE(fRasterizer);
+ MOVE(fDrawLooper);
+ MOVE(fImageFilter);
+ MOVE(fTextSize);
+ MOVE(fTextScaleX);
+ MOVE(fTextSkewX);
+ MOVE(fColor);
+ MOVE(fWidth);
+ MOVE(fMiterLimit);
+ MOVE(fBlendMode);
+ MOVE(fBitfields);
+#undef MOVE
+}
+
+SkPaint::~SkPaint() {}
+
+SkPaint& SkPaint::operator=(const SkPaint& src) {
+ if (this == &src) {
+ return *this;
+ }
+
+#define ASSIGN(field) field = src.field
+ ASSIGN(fTypeface);
+ ASSIGN(fPathEffect);
+ ASSIGN(fShader);
+ ASSIGN(fMaskFilter);
+ ASSIGN(fColorFilter);
+ ASSIGN(fRasterizer);
+ ASSIGN(fDrawLooper);
+ ASSIGN(fImageFilter);
+ ASSIGN(fTextSize);
+ ASSIGN(fTextScaleX);
+ ASSIGN(fTextSkewX);
+ ASSIGN(fColor);
+ ASSIGN(fWidth);
+ ASSIGN(fMiterLimit);
+ ASSIGN(fBlendMode);
+ ASSIGN(fBitfields);
+#undef ASSIGN
+
+ return *this;
+}
+
+SkPaint& SkPaint::operator=(SkPaint&& src) {
+ if (this == &src) {
+ return *this;
+ }
+
+#define MOVE(field) field = std::move(src.field)
+ MOVE(fTypeface);
+ MOVE(fPathEffect);
+ MOVE(fShader);
+ MOVE(fMaskFilter);
+ MOVE(fColorFilter);
+ MOVE(fRasterizer);
+ MOVE(fDrawLooper);
+ MOVE(fImageFilter);
+ MOVE(fTextSize);
+ MOVE(fTextScaleX);
+ MOVE(fTextSkewX);
+ MOVE(fColor);
+ MOVE(fWidth);
+ MOVE(fMiterLimit);
+ MOVE(fBlendMode);
+ MOVE(fBitfields);
+#undef MOVE
+
+ return *this;
+}
+
+bool operator==(const SkPaint& a, const SkPaint& b) {
+#define EQUAL(field) (a.field == b.field)
+ return EQUAL(fTypeface)
+ && EQUAL(fPathEffect)
+ && EQUAL(fShader)
+ && EQUAL(fMaskFilter)
+ && EQUAL(fColorFilter)
+ && EQUAL(fRasterizer)
+ && EQUAL(fDrawLooper)
+ && EQUAL(fImageFilter)
+ && EQUAL(fTextSize)
+ && EQUAL(fTextScaleX)
+ && EQUAL(fTextSkewX)
+ && EQUAL(fColor)
+ && EQUAL(fWidth)
+ && EQUAL(fMiterLimit)
+ && EQUAL(fBlendMode)
+ && EQUAL(fBitfieldsUInt)
+ ;
+#undef EQUAL
+}
+
+void SkPaint::reset() {
+ SkPaint init;
+ *this = init;
+}
+
+void SkPaint::setFilterQuality(SkFilterQuality quality) {
+ fBitfields.fFilterQuality = quality;
+}
+
+void SkPaint::setHinting(Hinting hintingLevel) {
+ fBitfields.fHinting = hintingLevel;
+}
+
+void SkPaint::setFlags(uint32_t flags) {
+ fBitfields.fFlags = flags;
+}
+
+void SkPaint::setAntiAlias(bool doAA) {
+ this->setFlags(set_clear_mask(fBitfields.fFlags, doAA, kAntiAlias_Flag));
+}
+
+void SkPaint::setDither(bool doDither) {
+ this->setFlags(set_clear_mask(fBitfields.fFlags, doDither, kDither_Flag));
+}
+
+void SkPaint::setSubpixelText(bool doSubpixel) {
+ this->setFlags(set_clear_mask(fBitfields.fFlags, doSubpixel, kSubpixelText_Flag));
+}
+
+void SkPaint::setLCDRenderText(bool doLCDRender) {
+ this->setFlags(set_clear_mask(fBitfields.fFlags, doLCDRender, kLCDRenderText_Flag));
+}
+
+void SkPaint::setEmbeddedBitmapText(bool doEmbeddedBitmapText) {
+ this->setFlags(set_clear_mask(fBitfields.fFlags, doEmbeddedBitmapText, kEmbeddedBitmapText_Flag));
+}
+
+void SkPaint::setAutohinted(bool useAutohinter) {
+ this->setFlags(set_clear_mask(fBitfields.fFlags, useAutohinter, kAutoHinting_Flag));
+}
+
+void SkPaint::setLinearText(bool doLinearText) {
+ this->setFlags(set_clear_mask(fBitfields.fFlags, doLinearText, kLinearText_Flag));
+}
+
+void SkPaint::setVerticalText(bool doVertical) {
+ this->setFlags(set_clear_mask(fBitfields.fFlags, doVertical, kVerticalText_Flag));
+}
+
+void SkPaint::setUnderlineText(bool doUnderline) {
+ this->setFlags(set_clear_mask(fBitfields.fFlags, doUnderline, kUnderlineText_Flag));
+}
+
+void SkPaint::setStrikeThruText(bool doStrikeThru) {
+ this->setFlags(set_clear_mask(fBitfields.fFlags, doStrikeThru, kStrikeThruText_Flag));
+}
+
+void SkPaint::setFakeBoldText(bool doFakeBold) {
+ this->setFlags(set_clear_mask(fBitfields.fFlags, doFakeBold, kFakeBoldText_Flag));
+}
+
+void SkPaint::setDevKernText(bool doDevKern) {
+ this->setFlags(set_clear_mask(fBitfields.fFlags, doDevKern, kDevKernText_Flag));
+}
+
+void SkPaint::setStyle(Style style) {
+ if ((unsigned)style < kStyleCount) {
+ fBitfields.fStyle = style;
+ } else {
+#ifdef SK_REPORT_API_RANGE_CHECK
+ SkDebugf("SkPaint::setStyle(%d) out of range\n", style);
+#endif
+ }
+}
+
+void SkPaint::setColor(SkColor color) {
+ fColor = color;
+}
+
+void SkPaint::setAlpha(U8CPU a) {
+ this->setColor(SkColorSetARGB(a, SkColorGetR(fColor),
+ SkColorGetG(fColor), SkColorGetB(fColor)));
+}
+
+void SkPaint::setARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ this->setColor(SkColorSetARGB(a, r, g, b));
+}
+
+void SkPaint::setStrokeWidth(SkScalar width) {
+ if (width >= 0) {
+ fWidth = width;
+ } else {
+#ifdef SK_REPORT_API_RANGE_CHECK
+ SkDebugf("SkPaint::setStrokeWidth() called with negative value\n");
+#endif
+ }
+}
+
+void SkPaint::setStrokeMiter(SkScalar limit) {
+ if (limit >= 0) {
+ fMiterLimit = limit;
+ } else {
+#ifdef SK_REPORT_API_RANGE_CHECK
+ SkDebugf("SkPaint::setStrokeMiter() called with negative value\n");
+#endif
+ }
+}
+
+void SkPaint::setStrokeCap(Cap ct) {
+ if ((unsigned)ct < kCapCount) {
+ fBitfields.fCapType = SkToU8(ct);
+ } else {
+#ifdef SK_REPORT_API_RANGE_CHECK
+ SkDebugf("SkPaint::setStrokeCap(%d) out of range\n", ct);
+#endif
+ }
+}
+
+void SkPaint::setStrokeJoin(Join jt) {
+ if ((unsigned)jt < kJoinCount) {
+ fBitfields.fJoinType = SkToU8(jt);
+ } else {
+#ifdef SK_REPORT_API_RANGE_CHECK
+ SkDebugf("SkPaint::setStrokeJoin(%d) out of range\n", jt);
+#endif
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkPaint::setTextAlign(Align align) {
+ if ((unsigned)align < kAlignCount) {
+ fBitfields.fTextAlign = SkToU8(align);
+ } else {
+#ifdef SK_REPORT_API_RANGE_CHECK
+ SkDebugf("SkPaint::setTextAlign(%d) out of range\n", align);
+#endif
+ }
+}
+
+void SkPaint::setTextSize(SkScalar ts) {
+ if (ts >= 0) {
+ fTextSize = ts;
+ } else {
+#ifdef SK_REPORT_API_RANGE_CHECK
+ SkDebugf("SkPaint::setTextSize() called with negative value\n");
+#endif
+ }
+}
+
+void SkPaint::setTextScaleX(SkScalar scaleX) {
+ fTextScaleX = scaleX;
+}
+
+void SkPaint::setTextSkewX(SkScalar skewX) {
+ fTextSkewX = skewX;
+}
+
+void SkPaint::setTextEncoding(TextEncoding encoding) {
+ if ((unsigned)encoding <= kGlyphID_TextEncoding) {
+ fBitfields.fTextEncoding = encoding;
+ } else {
+#ifdef SK_REPORT_API_RANGE_CHECK
+ SkDebugf("SkPaint::setTextEncoding(%d) out of range\n", encoding);
+#endif
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define MOVE_FIELD(Field) void SkPaint::set##Field(sk_sp<Sk##Field> f) { f##Field = std::move(f); }
+MOVE_FIELD(Typeface)
+MOVE_FIELD(Rasterizer)
+MOVE_FIELD(ImageFilter)
+MOVE_FIELD(Shader)
+MOVE_FIELD(ColorFilter)
+MOVE_FIELD(PathEffect)
+MOVE_FIELD(MaskFilter)
+MOVE_FIELD(DrawLooper)
+#undef MOVE_FIELD
+void SkPaint::setLooper(sk_sp<SkDrawLooper> looper) { fDrawLooper = std::move(looper); }
+
+#define SET_PTR(Field) \
+ Sk##Field* SkPaint::set##Field(Sk##Field* f) { \
+ this->f##Field.reset(SkSafeRef(f)); \
+ return f; \
+ }
+#ifdef SK_SUPPORT_LEGACY_TYPEFACE_PTR
+SET_PTR(Typeface)
+#endif
+#ifdef SK_SUPPORT_LEGACY_MINOR_EFFECT_PTR
+SET_PTR(Rasterizer)
+#endif
+SET_PTR(ImageFilter)
+#ifdef SK_SUPPORT_LEGACY_CREATESHADER_PTR
+SET_PTR(Shader)
+#endif
+#ifdef SK_SUPPORT_LEGACY_COLORFILTER_PTR
+SET_PTR(ColorFilter)
+#endif
+#ifdef SK_SUPPORT_LEGACY_XFERMODE_PTR
+SkXfermode* SkPaint::setXfermode(SkXfermode* xfer) {
+ this->setBlendMode(xfer ? xfer->blend() : SkBlendMode::kSrcOver);
+ return this->getXfermode();
+}
+#endif
+#ifdef SK_SUPPORT_LEGACY_PATHEFFECT_PTR
+SET_PTR(PathEffect)
+#endif
+#ifdef SK_SUPPORT_LEGACY_MASKFILTER_PTR
+SET_PTR(MaskFilter)
+#endif
+#undef SET_PTR
+
+#ifdef SK_SUPPORT_LEGACY_MINOR_EFFECT_PTR
+SkDrawLooper* SkPaint::setLooper(SkDrawLooper* looper) {
+ fDrawLooper.reset(SkSafeRef(looper));
+ return looper;
+}
+#endif
+
+#ifdef SK_SUPPORT_LEGACY_XFERMODE_OBJECT
+void SkPaint::setXfermode(sk_sp<SkXfermode> mode) {
+ this->setBlendMode(mode ? mode->blend() : SkBlendMode::kSrcOver);
+}
+SkXfermode* SkPaint::getXfermode() const {
+ return SkXfermode::Peek((SkBlendMode)fBlendMode);
+}
+SkXfermode* SkPaint::setXfermodeMode(SkXfermode::Mode mode) {
+ this->setBlendMode((SkBlendMode)mode);
+ return SkXfermode::Peek((SkBlendMode)mode);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+static SkScalar mag2(SkScalar x, SkScalar y) {
+ return x * x + y * y;
+}
+
+static bool tooBig(const SkMatrix& m, SkScalar ma2max) {
+ return mag2(m[SkMatrix::kMScaleX], m[SkMatrix::kMSkewY]) > ma2max
+ ||
+ mag2(m[SkMatrix::kMSkewX], m[SkMatrix::kMScaleY]) > ma2max;
+}
+
+bool SkPaint::TooBigToUseCache(const SkMatrix& ctm, const SkMatrix& textM) {
+ SkASSERT(!ctm.hasPerspective());
+ SkASSERT(!textM.hasPerspective());
+
+ SkMatrix matrix;
+ matrix.setConcat(ctm, textM);
+ return tooBig(matrix, MaxCacheSize2());
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkGlyphCache.h"
+#include "SkUtils.h"
+
+static void DetachDescProc(SkTypeface* typeface, const SkScalerContextEffects& effects,
+ const SkDescriptor* desc, void* context) {
+ *((SkGlyphCache**)context) = SkGlyphCache::DetachCache(typeface, effects, desc);
+}
+
+int SkPaint::textToGlyphs(const void* textData, size_t byteLength, uint16_t glyphs[]) const {
+ if (byteLength == 0) {
+ return 0;
+ }
+
+ SkASSERT(textData != nullptr);
+
+ if (nullptr == glyphs) {
+ switch (this->getTextEncoding()) {
+ case kUTF8_TextEncoding:
+ return SkUTF8_CountUnichars((const char*)textData, byteLength);
+ case kUTF16_TextEncoding:
+ return SkUTF16_CountUnichars((const uint16_t*)textData, SkToInt(byteLength >> 1));
+ case kUTF32_TextEncoding:
+ return SkToInt(byteLength >> 2);
+ case kGlyphID_TextEncoding:
+ return SkToInt(byteLength >> 1);
+ default:
+ SkDEBUGFAIL("unknown text encoding");
+ }
+ return 0;
+ }
+
+ // if we get here, we have a valid glyphs[] array, so time to fill it in
+
+ // handle this encoding before the setup for the glyphcache
+ if (this->getTextEncoding() == kGlyphID_TextEncoding) {
+ // we want to ignore the low bit of byteLength
+ memcpy(glyphs, textData, byteLength >> 1 << 1);
+ return SkToInt(byteLength >> 1);
+ }
+
+ SkAutoGlyphCache autoCache(*this, nullptr, nullptr);
+ SkGlyphCache* cache = autoCache.getCache();
+
+ const char* text = (const char*)textData;
+ const char* stop = text + byteLength;
+ uint16_t* gptr = glyphs;
+
+ switch (this->getTextEncoding()) {
+ case SkPaint::kUTF8_TextEncoding:
+ while (text < stop) {
+ *gptr++ = cache->unicharToGlyph(SkUTF8_NextUnichar(&text));
+ }
+ break;
+ case SkPaint::kUTF16_TextEncoding: {
+ const uint16_t* text16 = (const uint16_t*)text;
+ const uint16_t* stop16 = (const uint16_t*)stop;
+ while (text16 < stop16) {
+ *gptr++ = cache->unicharToGlyph(SkUTF16_NextUnichar(&text16));
+ }
+ break;
+ }
+ case kUTF32_TextEncoding: {
+ const int32_t* text32 = (const int32_t*)text;
+ const int32_t* stop32 = (const int32_t*)stop;
+ while (text32 < stop32) {
+ *gptr++ = cache->unicharToGlyph(*text32++);
+ }
+ break;
+ }
+ default:
+ SkDEBUGFAIL("unknown text encoding");
+ }
+ return SkToInt(gptr - glyphs);
+}
+
+bool SkPaint::containsText(const void* textData, size_t byteLength) const {
+ if (0 == byteLength) {
+ return true;
+ }
+
+ SkASSERT(textData != nullptr);
+
+ // handle this encoding before the setup for the glyphcache
+ if (this->getTextEncoding() == kGlyphID_TextEncoding) {
+ const uint16_t* glyphID = static_cast<const uint16_t*>(textData);
+ size_t count = byteLength >> 1;
+ for (size_t i = 0; i < count; i++) {
+ if (0 == glyphID[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ SkAutoGlyphCache autoCache(*this, nullptr, nullptr);
+ SkGlyphCache* cache = autoCache.getCache();
+
+ switch (this->getTextEncoding()) {
+ case SkPaint::kUTF8_TextEncoding: {
+ const char* text = static_cast<const char*>(textData);
+ const char* stop = text + byteLength;
+ while (text < stop) {
+ if (0 == cache->unicharToGlyph(SkUTF8_NextUnichar(&text))) {
+ return false;
+ }
+ }
+ break;
+ }
+ case SkPaint::kUTF16_TextEncoding: {
+ const uint16_t* text = static_cast<const uint16_t*>(textData);
+ const uint16_t* stop = text + (byteLength >> 1);
+ while (text < stop) {
+ if (0 == cache->unicharToGlyph(SkUTF16_NextUnichar(&text))) {
+ return false;
+ }
+ }
+ break;
+ }
+ case SkPaint::kUTF32_TextEncoding: {
+ const int32_t* text = static_cast<const int32_t*>(textData);
+ const int32_t* stop = text + (byteLength >> 2);
+ while (text < stop) {
+ if (0 == cache->unicharToGlyph(*text++)) {
+ return false;
+ }
+ }
+ break;
+ }
+ default:
+ SkDEBUGFAIL("unknown text encoding");
+ return false;
+ }
+ return true;
+}
+
+void SkPaint::glyphsToUnichars(const uint16_t glyphs[], int count, SkUnichar textData[]) const {
+ if (count <= 0) {
+ return;
+ }
+
+ SkASSERT(glyphs != nullptr);
+ SkASSERT(textData != nullptr);
+
+ SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
+ SkAutoGlyphCache autoCache(*this, &props, nullptr);
+ SkGlyphCache* cache = autoCache.getCache();
+
+ for (int index = 0; index < count; index++) {
+ textData[index] = cache->glyphToUnichar(glyphs[index]);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static const SkGlyph& sk_getMetrics_utf8_next(SkGlyphCache* cache,
+ const char** text) {
+ SkASSERT(cache != nullptr);
+ SkASSERT(text != nullptr);
+
+ return cache->getUnicharMetrics(SkUTF8_NextUnichar(text));
+}
+
+static const SkGlyph& sk_getMetrics_utf16_next(SkGlyphCache* cache,
+ const char** text) {
+ SkASSERT(cache != nullptr);
+ SkASSERT(text != nullptr);
+
+ return cache->getUnicharMetrics(SkUTF16_NextUnichar((const uint16_t**)text));
+}
+
+static const SkGlyph& sk_getMetrics_utf32_next(SkGlyphCache* cache,
+ const char** text) {
+ SkASSERT(cache != nullptr);
+ SkASSERT(text != nullptr);
+
+ const int32_t* ptr = *(const int32_t**)text;
+ SkUnichar uni = *ptr++;
+ *text = (const char*)ptr;
+ return cache->getUnicharMetrics(uni);
+}
+
+static const SkGlyph& sk_getMetrics_glyph_next(SkGlyphCache* cache,
+ const char** text) {
+ SkASSERT(cache != nullptr);
+ SkASSERT(text != nullptr);
+
+ const uint16_t* ptr = *(const uint16_t**)text;
+ unsigned glyphID = *ptr;
+ ptr += 1;
+ *text = (const char*)ptr;
+ return cache->getGlyphIDMetrics(glyphID);
+}
+
+static const SkGlyph& sk_getAdvance_utf8_next(SkGlyphCache* cache,
+ const char** text) {
+ SkASSERT(cache != nullptr);
+ SkASSERT(text != nullptr);
+
+ return cache->getUnicharAdvance(SkUTF8_NextUnichar(text));
+}
+
+static const SkGlyph& sk_getAdvance_utf16_next(SkGlyphCache* cache,
+ const char** text) {
+ SkASSERT(cache != nullptr);
+ SkASSERT(text != nullptr);
+
+ return cache->getUnicharAdvance(SkUTF16_NextUnichar((const uint16_t**)text));
+}
+
+static const SkGlyph& sk_getAdvance_utf32_next(SkGlyphCache* cache,
+ const char** text) {
+ SkASSERT(cache != nullptr);
+ SkASSERT(text != nullptr);
+
+ const int32_t* ptr = *(const int32_t**)text;
+ SkUnichar uni = *ptr++;
+ *text = (const char*)ptr;
+ return cache->getUnicharAdvance(uni);
+}
+
+static const SkGlyph& sk_getAdvance_glyph_next(SkGlyphCache* cache,
+ const char** text) {
+ SkASSERT(cache != nullptr);
+ SkASSERT(text != nullptr);
+
+ const uint16_t* ptr = *(const uint16_t**)text;
+ unsigned glyphID = *ptr;
+ ptr += 1;
+ *text = (const char*)ptr;
+ return cache->getGlyphIDAdvance(glyphID);
+}
+
+SkPaint::GlyphCacheProc SkPaint::GetGlyphCacheProc(TextEncoding encoding,
+ bool isDevKern,
+ bool needFullMetrics) {
+ static const GlyphCacheProc gGlyphCacheProcs[] = {
+ sk_getMetrics_utf8_next,
+ sk_getMetrics_utf16_next,
+ sk_getMetrics_utf32_next,
+ sk_getMetrics_glyph_next,
+
+ sk_getAdvance_utf8_next,
+ sk_getAdvance_utf16_next,
+ sk_getAdvance_utf32_next,
+ sk_getAdvance_glyph_next,
+ };
+
+ unsigned index = encoding;
+
+ if (!needFullMetrics && !isDevKern) {
+ index += 4;
+ }
+
+ SkASSERT(index < SK_ARRAY_COUNT(gGlyphCacheProcs));
+ return gGlyphCacheProcs[index];
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define TEXT_AS_PATHS_PAINT_FLAGS_TO_IGNORE ( \
+SkPaint::kDevKernText_Flag | \
+SkPaint::kLinearText_Flag | \
+SkPaint::kLCDRenderText_Flag | \
+SkPaint::kEmbeddedBitmapText_Flag | \
+SkPaint::kAutoHinting_Flag | \
+SkPaint::kGenA8FromLCD_Flag )
+
+SkScalar SkPaint::setupForAsPaths() {
+ uint32_t flags = this->getFlags();
+ // clear the flags we don't care about
+ flags &= ~TEXT_AS_PATHS_PAINT_FLAGS_TO_IGNORE;
+ // set the flags we do care about
+ flags |= SkPaint::kSubpixelText_Flag;
+
+ this->setFlags(flags);
+ this->setHinting(SkPaint::kNo_Hinting);
+
+ SkScalar textSize = fTextSize;
+ this->setTextSize(kCanonicalTextSizeForPaths);
+ return textSize / kCanonicalTextSizeForPaths;
+}
+
+class SkCanonicalizePaint {
+public:
+ SkCanonicalizePaint(const SkPaint& paint) : fPaint(&paint), fScale(0) {
+ if (paint.isLinearText() || SkDraw::ShouldDrawTextAsPaths(paint, SkMatrix::I())) {
+ SkPaint* p = fLazy.set(paint);
+ fScale = p->setupForAsPaths();
+ fPaint = p;
+ }
+ }
+
+ const SkPaint& getPaint() const { return *fPaint; }
+
+ /**
+ * Returns 0 if the paint was unmodified, or the scale factor need to
+ * the original textSize
+ */
+ SkScalar getScale() const { return fScale; }
+
+private:
+ const SkPaint* fPaint;
+ SkScalar fScale;
+ SkTLazy<SkPaint> fLazy;
+};
+
+static void set_bounds(const SkGlyph& g, SkRect* bounds) {
+ bounds->set(SkIntToScalar(g.fLeft),
+ SkIntToScalar(g.fTop),
+ SkIntToScalar(g.fLeft + g.fWidth),
+ SkIntToScalar(g.fTop + g.fHeight));
+}
+
+static void join_bounds_x(const SkGlyph& g, SkRect* bounds, SkScalar dx) {
+ bounds->join(SkIntToScalar(g.fLeft) + dx,
+ SkIntToScalar(g.fTop),
+ SkIntToScalar(g.fLeft + g.fWidth) + dx,
+ SkIntToScalar(g.fTop + g.fHeight));
+}
+
+static void join_bounds_y(const SkGlyph& g, SkRect* bounds, SkScalar dy) {
+ bounds->join(SkIntToScalar(g.fLeft),
+ SkIntToScalar(g.fTop) + dy,
+ SkIntToScalar(g.fLeft + g.fWidth),
+ SkIntToScalar(g.fTop + g.fHeight) + dy);
+}
+
+typedef void (*JoinBoundsProc)(const SkGlyph&, SkRect*, SkScalar);
+
+// xyIndex is 0 for fAdvanceX or 1 for fAdvanceY
+static SkScalar advance(const SkGlyph& glyph, int xyIndex) {
+ SkASSERT(0 == xyIndex || 1 == xyIndex);
+ return SkFloatToScalar((&glyph.fAdvanceX)[xyIndex]);
+}
+
+SkScalar SkPaint::measure_text(SkGlyphCache* cache,
+ const char* text, size_t byteLength,
+ int* count, SkRect* bounds) const {
+ SkASSERT(count);
+ if (byteLength == 0) {
+ *count = 0;
+ if (bounds) {
+ bounds->setEmpty();
+ }
+ return 0;
+ }
+
+ GlyphCacheProc glyphCacheProc = SkPaint::GetGlyphCacheProc(this->getTextEncoding(),
+ this->isDevKernText(),
+ nullptr != bounds);
+
+ int xyIndex;
+ JoinBoundsProc joinBoundsProc;
+ if (this->isVerticalText()) {
+ xyIndex = 1;
+ joinBoundsProc = join_bounds_y;
+ } else {
+ xyIndex = 0;
+ joinBoundsProc = join_bounds_x;
+ }
+
+ int n = 1;
+ const char* stop = (const char*)text + byteLength;
+ const SkGlyph* g = &glyphCacheProc(cache, &text);
+ SkScalar x = advance(*g, xyIndex);
+
+ if (nullptr == bounds) {
+ if (this->isDevKernText()) {
+ for (; text < stop; n++) {
+ const int rsb = g->fRsbDelta;
+ g = &glyphCacheProc(cache, &text);
+ x += SkAutoKern_Adjust(rsb, g->fLsbDelta) + advance(*g, xyIndex);
+ }
+ } else {
+ for (; text < stop; n++) {
+ x += advance(glyphCacheProc(cache, &text), xyIndex);
+ }
+ }
+ } else {
+ set_bounds(*g, bounds);
+ if (this->isDevKernText()) {
+ for (; text < stop; n++) {
+ const int rsb = g->fRsbDelta;
+ g = &glyphCacheProc(cache, &text);
+ x += SkAutoKern_Adjust(rsb, g->fLsbDelta);
+ joinBoundsProc(*g, bounds, x);
+ x += advance(*g, xyIndex);
+ }
+ } else {
+ for (; text < stop; n++) {
+ g = &glyphCacheProc(cache, &text);
+ joinBoundsProc(*g, bounds, x);
+ x += advance(*g, xyIndex);
+ }
+ }
+ }
+ SkASSERT(text == stop);
+
+ *count = n;
+ return x;
+}
+
+SkScalar SkPaint::measureText(const void* textData, size_t length, SkRect* bounds) const {
+ const char* text = (const char*)textData;
+ SkASSERT(text != nullptr || length == 0);
+
+ SkCanonicalizePaint canon(*this);
+ const SkPaint& paint = canon.getPaint();
+ SkScalar scale = canon.getScale();
+
+ SkAutoGlyphCache autoCache(paint, nullptr, nullptr);
+ SkGlyphCache* cache = autoCache.getCache();
+
+ SkScalar width = 0;
+
+ if (length > 0) {
+ int tempCount;
+
+ width = paint.measure_text(cache, text, length, &tempCount, bounds);
+ if (scale) {
+ width = SkScalarMul(width, scale);
+ if (bounds) {
+ bounds->fLeft = SkScalarMul(bounds->fLeft, scale);
+ bounds->fTop = SkScalarMul(bounds->fTop, scale);
+ bounds->fRight = SkScalarMul(bounds->fRight, scale);
+ bounds->fBottom = SkScalarMul(bounds->fBottom, scale);
+ }
+ }
+ } else if (bounds) {
+ // ensure that even if we don't measure_text we still update the bounds
+ bounds->setEmpty();
+ }
+ return width;
+}
+
+size_t SkPaint::breakText(const void* textD, size_t length, SkScalar maxWidth,
+ SkScalar* measuredWidth) const {
+ if (0 == length || 0 >= maxWidth) {
+ if (measuredWidth) {
+ *measuredWidth = 0;
+ }
+ return 0;
+ }
+
+ if (0 == fTextSize) {
+ if (measuredWidth) {
+ *measuredWidth = 0;
+ }
+ return length;
+ }
+
+ SkASSERT(textD != nullptr);
+ const char* text = (const char*)textD;
+ const char* stop = text + length;
+
+ SkCanonicalizePaint canon(*this);
+ const SkPaint& paint = canon.getPaint();
+ SkScalar scale = canon.getScale();
+
+ // adjust max in case we changed the textSize in paint
+ if (scale) {
+ maxWidth /= scale;
+ }
+
+ SkAutoGlyphCache autoCache(paint, nullptr, nullptr);
+ SkGlyphCache* cache = autoCache.getCache();
+
+ GlyphCacheProc glyphCacheProc = SkPaint::GetGlyphCacheProc(paint.getTextEncoding(),
+ paint.isDevKernText(),
+ false);
+ const int xyIndex = paint.isVerticalText() ? 1 : 0;
+ SkScalar width = 0;
+
+ if (this->isDevKernText()) {
+ int rsb = 0;
+ while (text < stop) {
+ const char* curr = text;
+ const SkGlyph& g = glyphCacheProc(cache, &text);
+ SkScalar x = SkAutoKern_Adjust(rsb, g.fLsbDelta) + advance(g, xyIndex);
+ if ((width += x) > maxWidth) {
+ width -= x;
+ text = curr;
+ break;
+ }
+ rsb = g.fRsbDelta;
+ }
+ } else {
+ while (text < stop) {
+ const char* curr = text;
+ SkScalar x = advance(glyphCacheProc(cache, &text), xyIndex);
+ if ((width += x) > maxWidth) {
+ width -= x;
+ text = curr;
+ break;
+ }
+ }
+ }
+
+ if (measuredWidth) {
+ if (scale) {
+ width *= scale;
+ }
+ *measuredWidth = width;
+ }
+
+ // return the number of bytes measured
+ return text - stop + length;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool FontMetricsCacheProc(const SkGlyphCache* cache, void* context) {
+ *(SkPaint::FontMetrics*)context = cache->getFontMetrics();
+ return false; // don't detach the cache
+}
+
+static void FontMetricsDescProc(SkTypeface* typeface, const SkScalerContextEffects& effects,
+ const SkDescriptor* desc, void* context) {
+ SkGlyphCache::VisitCache(typeface, effects, desc, FontMetricsCacheProc, context);
+}
+
+SkScalar SkPaint::getFontMetrics(FontMetrics* metrics, SkScalar zoom) const {
+ SkCanonicalizePaint canon(*this);
+ const SkPaint& paint = canon.getPaint();
+ SkScalar scale = canon.getScale();
+
+ SkMatrix zoomMatrix, *zoomPtr = nullptr;
+ if (zoom) {
+ zoomMatrix.setScale(zoom, zoom);
+ zoomPtr = &zoomMatrix;
+ }
+
+ FontMetrics storage;
+ if (nullptr == metrics) {
+ metrics = &storage;
+ }
+
+ paint.descriptorProc(nullptr, kNone_ScalerContextFlags, zoomPtr, FontMetricsDescProc, metrics);
+
+ if (scale) {
+ metrics->fTop = SkScalarMul(metrics->fTop, scale);
+ metrics->fAscent = SkScalarMul(metrics->fAscent, scale);
+ metrics->fDescent = SkScalarMul(metrics->fDescent, scale);
+ metrics->fBottom = SkScalarMul(metrics->fBottom, scale);
+ metrics->fLeading = SkScalarMul(metrics->fLeading, scale);
+ metrics->fAvgCharWidth = SkScalarMul(metrics->fAvgCharWidth, scale);
+ metrics->fXMin = SkScalarMul(metrics->fXMin, scale);
+ metrics->fXMax = SkScalarMul(metrics->fXMax, scale);
+ metrics->fXHeight = SkScalarMul(metrics->fXHeight, scale);
+ metrics->fUnderlineThickness = SkScalarMul(metrics->fUnderlineThickness, scale);
+ metrics->fUnderlinePosition = SkScalarMul(metrics->fUnderlinePosition, scale);
+ }
+ return metrics->fDescent - metrics->fAscent + metrics->fLeading;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void set_bounds(const SkGlyph& g, SkRect* bounds, SkScalar scale) {
+ bounds->set(g.fLeft * scale,
+ g.fTop * scale,
+ (g.fLeft + g.fWidth) * scale,
+ (g.fTop + g.fHeight) * scale);
+}
+
+int SkPaint::getTextWidths(const void* textData, size_t byteLength,
+ SkScalar widths[], SkRect bounds[]) const {
+ if (0 == byteLength) {
+ return 0;
+ }
+
+ SkASSERT(textData);
+
+ if (nullptr == widths && nullptr == bounds) {
+ return this->countText(textData, byteLength);
+ }
+
+ SkCanonicalizePaint canon(*this);
+ const SkPaint& paint = canon.getPaint();
+ SkScalar scale = canon.getScale();
+
+ SkAutoGlyphCache autoCache(paint, nullptr, nullptr);
+ SkGlyphCache* cache = autoCache.getCache();
+ GlyphCacheProc glyphCacheProc = SkPaint::GetGlyphCacheProc(paint.getTextEncoding(),
+ paint.isDevKernText(),
+ nullptr != bounds);
+
+ const char* text = (const char*)textData;
+ const char* stop = text + byteLength;
+ int count = 0;
+ const int xyIndex = paint.isVerticalText() ? 1 : 0;
+
+ if (this->isDevKernText()) {
+ // we adjust the widths returned here through auto-kerning
+ SkAutoKern autokern;
+ SkScalar prevWidth = 0;
+
+ if (scale) {
+ while (text < stop) {
+ const SkGlyph& g = glyphCacheProc(cache, &text);
+ if (widths) {
+ SkScalar adjust = autokern.adjust(g);
+
+ if (count > 0) {
+ *widths++ = SkScalarMul(prevWidth + adjust, scale);
+ }
+ prevWidth = advance(g, xyIndex);
+ }
+ if (bounds) {
+ set_bounds(g, bounds++, scale);
+ }
+ ++count;
+ }
+ if (count > 0 && widths) {
+ *widths = SkScalarMul(prevWidth, scale);
+ }
+ } else {
+ while (text < stop) {
+ const SkGlyph& g = glyphCacheProc(cache, &text);
+ if (widths) {
+ SkScalar adjust = autokern.adjust(g);
+
+ if (count > 0) {
+ *widths++ = prevWidth + adjust;
+ }
+ prevWidth = advance(g, xyIndex);
+ }
+ if (bounds) {
+ set_bounds(g, bounds++);
+ }
+ ++count;
+ }
+ if (count > 0 && widths) {
+ *widths = prevWidth;
+ }
+ }
+ } else { // no devkern
+ if (scale) {
+ while (text < stop) {
+ const SkGlyph& g = glyphCacheProc(cache, &text);
+ if (widths) {
+ *widths++ = SkScalarMul(advance(g, xyIndex),
+ scale);
+ }
+ if (bounds) {
+ set_bounds(g, bounds++, scale);
+ }
+ ++count;
+ }
+ } else {
+ while (text < stop) {
+ const SkGlyph& g = glyphCacheProc(cache, &text);
+ if (widths) {
+ *widths++ = advance(g, xyIndex);
+ }
+ if (bounds) {
+ set_bounds(g, bounds++);
+ }
+ ++count;
+ }
+ }
+ }
+
+ SkASSERT(text == stop);
+ return count;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkDraw.h"
+
+void SkPaint::getTextPath(const void* textData, size_t length,
+ SkScalar x, SkScalar y, SkPath* path) const {
+ SkASSERT(length == 0 || textData != nullptr);
+
+ const char* text = (const char*)textData;
+ if (text == nullptr || length == 0 || path == nullptr) {
+ return;
+ }
+
+ SkTextToPathIter iter(text, length, *this, false);
+ SkMatrix matrix;
+ SkScalar prevXPos = 0;
+
+ matrix.setScale(iter.getPathScale(), iter.getPathScale());
+ matrix.postTranslate(x, y);
+ path->reset();
+
+ SkScalar xpos;
+ const SkPath* iterPath;
+ while (iter.next(&iterPath, &xpos)) {
+ matrix.postTranslate(xpos - prevXPos, 0);
+ if (iterPath) {
+ path->addPath(*iterPath, matrix);
+ }
+ prevXPos = xpos;
+ }
+}
+
+void SkPaint::getPosTextPath(const void* textData, size_t length,
+ const SkPoint pos[], SkPath* path) const {
+ SkASSERT(length == 0 || textData != nullptr);
+
+ const char* text = (const char*)textData;
+ if (text == nullptr || length == 0 || path == nullptr) {
+ return;
+ }
+
+ SkTextToPathIter iter(text, length, *this, false);
+ SkMatrix matrix;
+ SkPoint prevPos;
+ prevPos.set(0, 0);
+
+ matrix.setScale(iter.getPathScale(), iter.getPathScale());
+ path->reset();
+
+ unsigned int i = 0;
+ const SkPath* iterPath;
+ while (iter.next(&iterPath, nullptr)) {
+ matrix.postTranslate(pos[i].fX - prevPos.fX, pos[i].fY - prevPos.fY);
+ if (iterPath) {
+ path->addPath(*iterPath, matrix);
+ }
+ prevPos = pos[i];
+ i++;
+ }
+}
+
+template <SkTextInterceptsIter::TextType TextType, typename Func>
+int GetTextIntercepts(const SkPaint& paint, const void* text, size_t length,
+ const SkScalar bounds[2], SkScalar* array, Func posMaker) {
+ SkASSERT(length == 0 || text != nullptr);
+ if (!length) {
+ return 0;
+ }
+
+ const SkPoint pos0 = posMaker(0);
+ SkTextInterceptsIter iter(static_cast<const char*>(text), length, paint, bounds,
+ pos0.x(), pos0.y(), TextType);
+
+ int i = 0;
+ int count = 0;
+ while (iter.next(array, &count)) {
+ if (TextType == SkTextInterceptsIter::TextType::kPosText) {
+ const SkPoint pos = posMaker(++i);
+ iter.setPosition(pos.x(), pos.y());
+ }
+ }
+
+ return count;
+}
+
+int SkPaint::getTextIntercepts(const void* textData, size_t length,
+ SkScalar x, SkScalar y, const SkScalar bounds[2],
+ SkScalar* array) const {
+
+ return GetTextIntercepts<SkTextInterceptsIter::TextType::kText>(
+ *this, textData, length, bounds, array, [&x, &y] (int) -> SkPoint {
+ return SkPoint::Make(x, y);
+ });
+}
+
+int SkPaint::getPosTextIntercepts(const void* textData, size_t length, const SkPoint pos[],
+ const SkScalar bounds[2], SkScalar* array) const {
+
+ return GetTextIntercepts<SkTextInterceptsIter::TextType::kPosText>(
+ *this, textData, length, bounds, array, [&pos] (int i) -> SkPoint {
+ return pos[i];
+ });
+}
+
+int SkPaint::getPosTextHIntercepts(const void* textData, size_t length, const SkScalar xpos[],
+ SkScalar constY, const SkScalar bounds[2],
+ SkScalar* array) const {
+
+ return GetTextIntercepts<SkTextInterceptsIter::TextType::kPosText>(
+ *this, textData, length, bounds, array, [&xpos, &constY] (int i) -> SkPoint {
+ return SkPoint::Make(xpos[i], constY);
+ });
+}
+
+int SkPaint::getTextBlobIntercepts(const SkTextBlob* blob, const SkScalar bounds[2],
+ SkScalar* intervals) const {
+ int count = 0;
+ SkPaint runPaint(*this);
+
+ SkTextBlobRunIterator it(blob);
+ while (!it.done()) {
+ it.applyFontToPaint(&runPaint);
+ const size_t runByteCount = it.glyphCount() * sizeof(SkGlyphID);
+ SkScalar* runIntervals = intervals ? intervals + count : nullptr;
+
+ switch (it.positioning()) {
+ case SkTextBlob::kDefault_Positioning:
+ count += runPaint.getTextIntercepts(it.glyphs(), runByteCount, it.offset().x(),
+ it.offset().y(), bounds, runIntervals);
+ break;
+ case SkTextBlob::kHorizontal_Positioning:
+ count += runPaint.getPosTextHIntercepts(it.glyphs(), runByteCount, it.pos(),
+ it.offset().y(), bounds, runIntervals);
+ break;
+ case SkTextBlob::kFull_Positioning:
+ count += runPaint.getPosTextIntercepts(it.glyphs(), runByteCount,
+ reinterpret_cast<const SkPoint*>(it.pos()),
+ bounds, runIntervals);
+ break;
+ }
+
+ it.next();
+ }
+
+ return count;
+}
+
+SkRect SkPaint::getFontBounds() const {
+ SkMatrix m;
+ m.setScale(fTextSize * fTextScaleX, fTextSize);
+ m.postSkew(fTextSkewX, 0);
+
+ SkTypeface* typeface = this->getTypeface();
+ if (nullptr == typeface) {
+ typeface = SkTypeface::GetDefaultTypeface();
+ }
+
+ SkRect bounds;
+ m.mapRect(&bounds, typeface->getBounds());
+ return bounds;
+}
+
+static void add_flattenable(SkDescriptor* desc, uint32_t tag,
+ SkBinaryWriteBuffer* buffer) {
+ buffer->writeToMemory(desc->addEntry(tag, buffer->bytesWritten(), nullptr));
+}
+
+static SkMask::Format compute_mask_format(const SkPaint& paint) {
+ uint32_t flags = paint.getFlags();
+
+ // Antialiasing being disabled trumps all other settings.
+ if (!(flags & SkPaint::kAntiAlias_Flag)) {
+ return SkMask::kBW_Format;
+ }
+
+ if (flags & SkPaint::kLCDRenderText_Flag) {
+ return SkMask::kLCD16_Format;
+ }
+
+ return SkMask::kA8_Format;
+}
+
+// if linear-text is on, then we force hinting to be off (since that's sort of
+// the point of linear-text.
+static SkPaint::Hinting computeHinting(const SkPaint& paint) {
+ SkPaint::Hinting h = paint.getHinting();
+ if (paint.isLinearText()) {
+ h = SkPaint::kNo_Hinting;
+ }
+ return h;
+}
+
+// return true if the paint is just a single color (i.e. not a shader). If its
+// a shader, then we can't compute a const luminance for it :(
+static bool justAColor(const SkPaint& paint, SkColor* color) {
+ SkColor c = paint.getColor();
+
+ SkShader* shader = paint.getShader();
+ if (shader && !shader->asLuminanceColor(&c)) {
+ return false;
+ }
+ if (paint.getColorFilter()) {
+ c = paint.getColorFilter()->filterColor(c);
+ }
+ if (color) {
+ *color = c;
+ }
+ return true;
+}
+
+SkColor SkPaint::computeLuminanceColor() const {
+ SkColor c;
+ if (!justAColor(*this, &c)) {
+ c = SkColorSetRGB(0x7F, 0x80, 0x7F);
+ }
+ return c;
+}
+
+#define assert_byte(x) SkASSERT(0 == ((x) >> 8))
+
+// Beyond this size, LCD doesn't appreciably improve quality, but it always
+// cost more RAM and draws slower, so we set a cap.
+#ifndef SK_MAX_SIZE_FOR_LCDTEXT
+ #define SK_MAX_SIZE_FOR_LCDTEXT 48
+#endif
+
+const SkScalar gMaxSize2ForLCDText = SK_MAX_SIZE_FOR_LCDTEXT * SK_MAX_SIZE_FOR_LCDTEXT;
+
+static bool too_big_for_lcd(const SkScalerContext::Rec& rec, bool checkPost2x2) {
+ if (checkPost2x2) {
+ SkScalar area = rec.fPost2x2[0][0] * rec.fPost2x2[1][1] -
+ rec.fPost2x2[1][0] * rec.fPost2x2[0][1];
+ area *= rec.fTextSize * rec.fTextSize;
+ return area > gMaxSize2ForLCDText;
+ } else {
+ return rec.fTextSize > SK_MAX_SIZE_FOR_LCDTEXT;
+ }
+}
+
+/*
+ * Return the scalar with only limited fractional precision. Used to consolidate matrices
+ * that vary only slightly when we create our key into the font cache, since the font scaler
+ * typically returns the same looking resuts for tiny changes in the matrix.
+ */
+static SkScalar sk_relax(SkScalar x) {
+ SkScalar n = SkScalarRoundToScalar(x * 1024);
+ return n / 1024.0f;
+}
+
+void SkScalerContext::MakeRec(const SkPaint& paint,
+ const SkSurfaceProps* surfaceProps,
+ const SkMatrix* deviceMatrix,
+ Rec* rec) {
+ SkASSERT(deviceMatrix == nullptr || !deviceMatrix->hasPerspective());
+
+ SkTypeface* typeface = paint.getTypeface();
+ if (nullptr == typeface) {
+ typeface = SkTypeface::GetDefaultTypeface();
+ }
+ rec->fFontID = typeface->uniqueID();
+ rec->fTextSize = paint.getTextSize();
+ rec->fPreScaleX = paint.getTextScaleX();
+ rec->fPreSkewX = paint.getTextSkewX();
+
+ bool checkPost2x2 = false;
+
+ if (deviceMatrix) {
+ const SkMatrix::TypeMask mask = deviceMatrix->getType();
+ if (mask & SkMatrix::kScale_Mask) {
+ rec->fPost2x2[0][0] = sk_relax(deviceMatrix->getScaleX());
+ rec->fPost2x2[1][1] = sk_relax(deviceMatrix->getScaleY());
+ checkPost2x2 = true;
+ } else {
+ rec->fPost2x2[0][0] = rec->fPost2x2[1][1] = SK_Scalar1;
+ }
+ if (mask & SkMatrix::kAffine_Mask) {
+ rec->fPost2x2[0][1] = sk_relax(deviceMatrix->getSkewX());
+ rec->fPost2x2[1][0] = sk_relax(deviceMatrix->getSkewY());
+ checkPost2x2 = true;
+ } else {
+ rec->fPost2x2[0][1] = rec->fPost2x2[1][0] = 0;
+ }
+ } else {
+ rec->fPost2x2[0][0] = rec->fPost2x2[1][1] = SK_Scalar1;
+ rec->fPost2x2[0][1] = rec->fPost2x2[1][0] = 0;
+ }
+
+ SkPaint::Style style = paint.getStyle();
+ SkScalar strokeWidth = paint.getStrokeWidth();
+
+ unsigned flags = 0;
+
+ if (paint.isFakeBoldText()) {
+#ifdef SK_USE_FREETYPE_EMBOLDEN
+ flags |= SkScalerContext::kEmbolden_Flag;
+#else
+ SkScalar fakeBoldScale = SkScalarInterpFunc(paint.getTextSize(),
+ kStdFakeBoldInterpKeys,
+ kStdFakeBoldInterpValues,
+ kStdFakeBoldInterpLength);
+ SkScalar extra = SkScalarMul(paint.getTextSize(), fakeBoldScale);
+
+ if (style == SkPaint::kFill_Style) {
+ style = SkPaint::kStrokeAndFill_Style;
+ strokeWidth = extra; // ignore paint's strokeWidth if it was "fill"
+ } else {
+ strokeWidth += extra;
+ }
+#endif
+ }
+
+ if (paint.isDevKernText()) {
+ flags |= SkScalerContext::kDevKernText_Flag;
+ }
+
+ if (style != SkPaint::kFill_Style && strokeWidth > 0) {
+ rec->fFrameWidth = strokeWidth;
+ rec->fMiterLimit = paint.getStrokeMiter();
+ rec->fStrokeJoin = SkToU8(paint.getStrokeJoin());
+ rec->fStrokeCap = SkToU8(paint.getStrokeCap());
+
+ if (style == SkPaint::kStrokeAndFill_Style) {
+ flags |= SkScalerContext::kFrameAndFill_Flag;
+ }
+ } else {
+ rec->fFrameWidth = 0;
+ rec->fMiterLimit = 0;
+ rec->fStrokeJoin = 0;
+ rec->fStrokeCap = 0;
+ }
+
+ rec->fMaskFormat = SkToU8(compute_mask_format(paint));
+
+ if (SkMask::kLCD16_Format == rec->fMaskFormat) {
+ if (too_big_for_lcd(*rec, checkPost2x2)) {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ flags |= SkScalerContext::kGenA8FromLCD_Flag;
+ } else {
+ SkPixelGeometry geometry = surfaceProps
+ ? surfaceProps->pixelGeometry()
+ : SkSurfacePropsDefaultPixelGeometry();
+ switch (geometry) {
+ case kUnknown_SkPixelGeometry:
+ // eeek, can't support LCD
+ rec->fMaskFormat = SkMask::kA8_Format;
+ flags |= SkScalerContext::kGenA8FromLCD_Flag;
+ break;
+ case kRGB_H_SkPixelGeometry:
+ // our default, do nothing.
+ break;
+ case kBGR_H_SkPixelGeometry:
+ flags |= SkScalerContext::kLCD_BGROrder_Flag;
+ break;
+ case kRGB_V_SkPixelGeometry:
+ flags |= SkScalerContext::kLCD_Vertical_Flag;
+ break;
+ case kBGR_V_SkPixelGeometry:
+ flags |= SkScalerContext::kLCD_Vertical_Flag;
+ flags |= SkScalerContext::kLCD_BGROrder_Flag;
+ break;
+ }
+ }
+ }
+
+ if (paint.isEmbeddedBitmapText()) {
+ flags |= SkScalerContext::kEmbeddedBitmapText_Flag;
+ }
+ if (paint.isSubpixelText()) {
+ flags |= SkScalerContext::kSubpixelPositioning_Flag;
+ }
+ if (paint.isAutohinted()) {
+ flags |= SkScalerContext::kForceAutohinting_Flag;
+ }
+ if (paint.isVerticalText()) {
+ flags |= SkScalerContext::kVertical_Flag;
+ }
+ if (paint.getFlags() & SkPaint::kGenA8FromLCD_Flag) {
+ flags |= SkScalerContext::kGenA8FromLCD_Flag;
+ }
+ rec->fFlags = SkToU16(flags);
+
+ // these modify fFlags, so do them after assigning fFlags
+ rec->setHinting(computeHinting(paint));
+
+ rec->setLuminanceColor(paint.computeLuminanceColor());
+
+ //For now always set the paint gamma equal to the device gamma.
+ //The math in SkMaskGamma can handle them being different,
+ //but it requires superluminous masks when
+ //Ex : deviceGamma(x) < paintGamma(x) and x is sufficiently large.
+ rec->setDeviceGamma(SK_GAMMA_EXPONENT);
+ rec->setPaintGamma(SK_GAMMA_EXPONENT);
+
+#ifdef SK_GAMMA_CONTRAST
+ rec->setContrast(SK_GAMMA_CONTRAST);
+#else
+ /**
+ * A value of 0.5 for SK_GAMMA_CONTRAST appears to be a good compromise.
+ * With lower values small text appears washed out (though correctly so).
+ * With higher values lcd fringing is worse and the smoothing effect of
+ * partial coverage is diminished.
+ */
+ rec->setContrast(0.5f);
+#endif
+
+ rec->fReservedAlign = 0;
+
+ /* Allow the fonthost to modify our rec before we use it as a key into the
+ cache. This way if we're asking for something that they will ignore,
+ they can modify our rec up front, so we don't create duplicate cache
+ entries.
+ */
+ typeface->onFilterRec(rec);
+
+ // be sure to call PostMakeRec(rec) before you actually use it!
+}
+
+/**
+ * In order to call cachedDeviceLuminance, cachedPaintLuminance, or
+ * cachedMaskGamma the caller must hold the gMaskGammaCacheMutex and continue
+ * to hold it until the returned pointer is refed or forgotten.
+ */
+SK_DECLARE_STATIC_MUTEX(gMaskGammaCacheMutex);
+
+static SkMaskGamma* gLinearMaskGamma = nullptr;
+static SkMaskGamma* gMaskGamma = nullptr;
+static SkScalar gContrast = SK_ScalarMin;
+static SkScalar gPaintGamma = SK_ScalarMin;
+static SkScalar gDeviceGamma = SK_ScalarMin;
+/**
+ * The caller must hold the gMaskGammaCacheMutex and continue to hold it until
+ * the returned SkMaskGamma pointer is refed or forgotten.
+ */
+static const SkMaskGamma& cachedMaskGamma(SkScalar contrast, SkScalar paintGamma, SkScalar deviceGamma) {
+ gMaskGammaCacheMutex.assertHeld();
+ if (0 == contrast && SK_Scalar1 == paintGamma && SK_Scalar1 == deviceGamma) {
+ if (nullptr == gLinearMaskGamma) {
+ gLinearMaskGamma = new SkMaskGamma;
+ }
+ return *gLinearMaskGamma;
+ }
+ if (gContrast != contrast || gPaintGamma != paintGamma || gDeviceGamma != deviceGamma) {
+ SkSafeUnref(gMaskGamma);
+ gMaskGamma = new SkMaskGamma(contrast, paintGamma, deviceGamma);
+ gContrast = contrast;
+ gPaintGamma = paintGamma;
+ gDeviceGamma = deviceGamma;
+ }
+ return *gMaskGamma;
+}
+
+/**
+ * We ensure that the rec is self-consistent and efficient (where possible)
+ */
+void SkScalerContext::PostMakeRec(const SkPaint&, SkScalerContext::Rec* rec) {
+ /**
+ * If we're asking for A8, we force the colorlum to be gray, since that
+ * limits the number of unique entries, and the scaler will only look at
+ * the lum of one of them.
+ */
+ switch (rec->fMaskFormat) {
+ case SkMask::kLCD16_Format: {
+ // filter down the luminance color to a finite number of bits
+ SkColor color = rec->getLuminanceColor();
+ rec->setLuminanceColor(SkMaskGamma::CanonicalColor(color));
+ break;
+ }
+ case SkMask::kA8_Format: {
+ // filter down the luminance to a single component, since A8 can't
+ // use per-component information
+ SkColor color = rec->getLuminanceColor();
+ U8CPU lum = SkComputeLuminance(SkColorGetR(color),
+ SkColorGetG(color),
+ SkColorGetB(color));
+ // reduce to our finite number of bits
+ color = SkColorSetRGB(lum, lum, lum);
+ rec->setLuminanceColor(SkMaskGamma::CanonicalColor(color));
+ break;
+ }
+ case SkMask::kBW_Format:
+ // No need to differentiate gamma or apply contrast if we're BW
+ rec->ignorePreBlend();
+ break;
+ }
+}
+
+#define MIN_SIZE_FOR_EFFECT_BUFFER 1024
+
+#ifdef SK_DEBUG
+ #define TEST_DESC
+#endif
+
+static void write_out_descriptor(SkDescriptor* desc, const SkScalerContext::Rec& rec,
+ const SkPathEffect* pe, SkBinaryWriteBuffer* peBuffer,
+ const SkMaskFilter* mf, SkBinaryWriteBuffer* mfBuffer,
+ const SkRasterizer* ra, SkBinaryWriteBuffer* raBuffer,
+ size_t descSize) {
+ desc->init();
+ desc->addEntry(kRec_SkDescriptorTag, sizeof(rec), &rec);
+
+ if (pe) {
+ add_flattenable(desc, kPathEffect_SkDescriptorTag, peBuffer);
+ }
+ if (mf) {
+ add_flattenable(desc, kMaskFilter_SkDescriptorTag, mfBuffer);
+ }
+ if (ra) {
+ add_flattenable(desc, kRasterizer_SkDescriptorTag, raBuffer);
+ }
+
+ desc->computeChecksum();
+}
+
+static size_t fill_out_rec(const SkPaint& paint, SkScalerContext::Rec* rec,
+ const SkSurfaceProps* surfaceProps,
+ bool fakeGamma, bool boostContrast,
+ const SkMatrix* deviceMatrix,
+ const SkPathEffect* pe, SkBinaryWriteBuffer* peBuffer,
+ const SkMaskFilter* mf, SkBinaryWriteBuffer* mfBuffer,
+ const SkRasterizer* ra, SkBinaryWriteBuffer* raBuffer) {
+ SkScalerContext::MakeRec(paint, surfaceProps, deviceMatrix, rec);
+ if (!fakeGamma) {
+ rec->ignoreGamma();
+ }
+ if (!boostContrast) {
+ rec->setContrast(0);
+ }
+
+ int entryCount = 1;
+ size_t descSize = sizeof(*rec);
+
+ if (pe) {
+ pe->flatten(*peBuffer);
+ descSize += peBuffer->bytesWritten();
+ entryCount += 1;
+ rec->fMaskFormat = SkMask::kA8_Format; // force antialiasing when we do the scan conversion
+ // seems like we could support kLCD as well at this point...
+ }
+ if (mf) {
+ mf->flatten(*mfBuffer);
+ descSize += mfBuffer->bytesWritten();
+ entryCount += 1;
+ rec->fMaskFormat = SkMask::kA8_Format; // force antialiasing with maskfilters
+ /* Pre-blend is not currently applied to filtered text.
+ The primary filter is blur, for which contrast makes no sense,
+ and for which the destination guess error is more visible.
+ Also, all existing users of blur have calibrated for linear. */
+ rec->ignorePreBlend();
+ }
+ if (ra) {
+ ra->flatten(*raBuffer);
+ descSize += raBuffer->bytesWritten();
+ entryCount += 1;
+ rec->fMaskFormat = SkMask::kA8_Format; // force antialiasing when we do the scan conversion
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Now that we're done tweaking the rec, call the PostMakeRec cleanup
+ SkScalerContext::PostMakeRec(paint, rec);
+
+ descSize += SkDescriptor::ComputeOverhead(entryCount);
+ return descSize;
+}
+
+#ifdef TEST_DESC
+static void test_desc(const SkScalerContext::Rec& rec,
+ const SkPathEffect* pe, SkBinaryWriteBuffer* peBuffer,
+ const SkMaskFilter* mf, SkBinaryWriteBuffer* mfBuffer,
+ const SkRasterizer* ra, SkBinaryWriteBuffer* raBuffer,
+ const SkDescriptor* desc, size_t descSize) {
+ // Check that we completely write the bytes in desc (our key), and that
+ // there are no uninitialized bytes. If there were, then we would get
+ // false-misses (or worse, false-hits) in our fontcache.
+ //
+ // We do this buy filling 2 others, one with 0s and the other with 1s
+ // and create those, and then check that all 3 are identical.
+ SkAutoDescriptor ad1(descSize);
+ SkAutoDescriptor ad2(descSize);
+ SkDescriptor* desc1 = ad1.getDesc();
+ SkDescriptor* desc2 = ad2.getDesc();
+
+ memset(desc1, 0x00, descSize);
+ memset(desc2, 0xFF, descSize);
+
+ desc1->init();
+ desc2->init();
+ desc1->addEntry(kRec_SkDescriptorTag, sizeof(rec), &rec);
+ desc2->addEntry(kRec_SkDescriptorTag, sizeof(rec), &rec);
+
+ if (pe) {
+ add_flattenable(desc1, kPathEffect_SkDescriptorTag, peBuffer);
+ add_flattenable(desc2, kPathEffect_SkDescriptorTag, peBuffer);
+ }
+ if (mf) {
+ add_flattenable(desc1, kMaskFilter_SkDescriptorTag, mfBuffer);
+ add_flattenable(desc2, kMaskFilter_SkDescriptorTag, mfBuffer);
+ }
+ if (ra) {
+ add_flattenable(desc1, kRasterizer_SkDescriptorTag, raBuffer);
+ add_flattenable(desc2, kRasterizer_SkDescriptorTag, raBuffer);
+ }
+
+ SkASSERT(descSize == desc1->getLength());
+ SkASSERT(descSize == desc2->getLength());
+ desc1->computeChecksum();
+ desc2->computeChecksum();
+ SkASSERT(!memcmp(desc, desc1, descSize));
+ SkASSERT(!memcmp(desc, desc2, descSize));
+}
+#endif
+
+/* see the note on ignoreGamma on descriptorProc */
+void SkPaint::getScalerContextDescriptor(SkScalerContextEffects* effects,
+ SkAutoDescriptor* ad,
+ const SkSurfaceProps& surfaceProps,
+ uint32_t scalerContextFlags,
+ const SkMatrix* deviceMatrix) const {
+ SkScalerContext::Rec rec;
+
+ SkPathEffect* pe = this->getPathEffect();
+ SkMaskFilter* mf = this->getMaskFilter();
+ SkRasterizer* ra = this->getRasterizer();
+
+ SkBinaryWriteBuffer peBuffer, mfBuffer, raBuffer;
+ size_t descSize = fill_out_rec(*this, &rec, &surfaceProps,
+ SkToBool(scalerContextFlags & kFakeGamma_ScalerContextFlag),
+ SkToBool(scalerContextFlags & kBoostContrast_ScalerContextFlag),
+ deviceMatrix, pe, &peBuffer, mf, &mfBuffer, ra, &raBuffer);
+
+ ad->reset(descSize);
+ SkDescriptor* desc = ad->getDesc();
+
+ write_out_descriptor(desc, rec, pe, &peBuffer, mf, &mfBuffer, ra, &raBuffer, descSize);
+
+ SkASSERT(descSize == desc->getLength());
+
+#ifdef TEST_DESC
+ test_desc(rec, pe, &peBuffer, mf, &mfBuffer, ra, &raBuffer, desc, descSize);
+#endif
+
+ effects->fPathEffect = pe;
+ effects->fMaskFilter = mf;
+ effects->fRasterizer = ra;
+}
+
+/*
+ * ignoreGamma tells us that the caller just wants metrics that are unaffected
+ * by gamma correction, so we set the rec to ignore preblend: i.e. gamma = 1,
+ * contrast = 0, luminanceColor = transparent black.
+ */
+void SkPaint::descriptorProc(const SkSurfaceProps* surfaceProps,
+ uint32_t scalerContextFlags,
+ const SkMatrix* deviceMatrix,
+ void (*proc)(SkTypeface*, const SkScalerContextEffects&,
+ const SkDescriptor*, void*),
+ void* context) const {
+ SkScalerContext::Rec rec;
+
+ SkPathEffect* pe = this->getPathEffect();
+ SkMaskFilter* mf = this->getMaskFilter();
+ SkRasterizer* ra = this->getRasterizer();
+
+ SkBinaryWriteBuffer peBuffer, mfBuffer, raBuffer;
+ size_t descSize = fill_out_rec(*this, &rec, surfaceProps,
+ SkToBool(scalerContextFlags & kFakeGamma_ScalerContextFlag),
+ SkToBool(scalerContextFlags & kBoostContrast_ScalerContextFlag),
+ deviceMatrix, pe, &peBuffer, mf, &mfBuffer, ra, &raBuffer);
+
+ SkAutoDescriptor ad(descSize);
+ SkDescriptor* desc = ad.getDesc();
+
+ write_out_descriptor(desc, rec, pe, &peBuffer, mf, &mfBuffer, ra, &raBuffer, descSize);
+
+ SkASSERT(descSize == desc->getLength());
+
+#ifdef TEST_DESC
+ test_desc(rec, pe, &peBuffer, mf, &mfBuffer, ra, &raBuffer, desc, descSize);
+#endif
+
+ proc(fTypeface.get(), { pe, mf, ra }, desc, context);
+}
+
+SkGlyphCache* SkPaint::detachCache(const SkSurfaceProps* surfaceProps,
+ uint32_t scalerContextFlags,
+ const SkMatrix* deviceMatrix) const {
+ SkGlyphCache* cache;
+ this->descriptorProc(surfaceProps, scalerContextFlags, deviceMatrix, DetachDescProc, &cache);
+ return cache;
+}
+
+/**
+ * Expands fDeviceGamma, fPaintGamma, fContrast, and fLumBits into a mask pre-blend.
+ */
+//static
+SkMaskGamma::PreBlend SkScalerContext::GetMaskPreBlend(const SkScalerContext::Rec& rec) {
+ SkAutoMutexAcquire ama(gMaskGammaCacheMutex);
+ const SkMaskGamma& maskGamma = cachedMaskGamma(rec.getContrast(),
+ rec.getPaintGamma(),
+ rec.getDeviceGamma());
+ return maskGamma.preBlend(rec.getLuminanceColor());
+}
+
+size_t SkScalerContext::GetGammaLUTSize(SkScalar contrast, SkScalar paintGamma,
+ SkScalar deviceGamma, int* width, int* height) {
+ SkAutoMutexAcquire ama(gMaskGammaCacheMutex);
+ const SkMaskGamma& maskGamma = cachedMaskGamma(contrast,
+ paintGamma,
+ deviceGamma);
+
+ maskGamma.getGammaTableDimensions(width, height);
+ size_t size = (*width)*(*height)*sizeof(uint8_t);
+
+ return size;
+}
+
+void SkScalerContext::GetGammaLUTData(SkScalar contrast, SkScalar paintGamma, SkScalar deviceGamma,
+ void* data) {
+ SkAutoMutexAcquire ama(gMaskGammaCacheMutex);
+ const SkMaskGamma& maskGamma = cachedMaskGamma(contrast,
+ paintGamma,
+ deviceGamma);
+ int width, height;
+ maskGamma.getGammaTableDimensions(&width, &height);
+ size_t size = width*height*sizeof(uint8_t);
+ const uint8_t* gammaTables = maskGamma.getGammaTables();
+ memcpy(data, gammaTables, size);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkStream.h"
+
+static uintptr_t asint(const void* p) {
+ return reinterpret_cast<uintptr_t>(p);
+}
+
+static uint32_t pack_4(unsigned a, unsigned b, unsigned c, unsigned d) {
+ SkASSERT(a == (uint8_t)a);
+ SkASSERT(b == (uint8_t)b);
+ SkASSERT(c == (uint8_t)c);
+ SkASSERT(d == (uint8_t)d);
+ return (a << 24) | (b << 16) | (c << 8) | d;
+}
+
+#ifdef SK_DEBUG
+ static void ASSERT_FITS_IN(uint32_t value, int bitCount) {
+ SkASSERT(bitCount > 0 && bitCount <= 32);
+ uint32_t mask = ~0U;
+ mask >>= (32 - bitCount);
+ SkASSERT(0 == (value & ~mask));
+ }
+#else
+ #define ASSERT_FITS_IN(value, bitcount)
+#endif
+
+enum FlatFlags {
+ kHasTypeface_FlatFlag = 0x1,
+ kHasEffects_FlatFlag = 0x2,
+
+ kFlatFlagMask = 0x3,
+};
+
+enum BitsPerField {
+ kFlags_BPF = 16,
+ kHint_BPF = 2,
+ kAlign_BPF = 2,
+ kFilter_BPF = 2,
+ kFlatFlags_BPF = 3,
+};
+
+static inline int BPF_Mask(int bits) {
+ return (1 << bits) - 1;
+}
+
+static uint32_t pack_paint_flags(unsigned flags, unsigned hint, unsigned align,
+ unsigned filter, unsigned flatFlags) {
+ ASSERT_FITS_IN(flags, kFlags_BPF);
+ ASSERT_FITS_IN(hint, kHint_BPF);
+ ASSERT_FITS_IN(align, kAlign_BPF);
+ ASSERT_FITS_IN(filter, kFilter_BPF);
+ ASSERT_FITS_IN(flatFlags, kFlatFlags_BPF);
+
+ // left-align the fields of "known" size, and right-align the last (flatFlags) so it can easly
+ // add more bits in the future.
+ return (flags << 16) | (hint << 14) | (align << 12) | (filter << 10) | flatFlags;
+}
+
+static FlatFlags unpack_paint_flags(SkPaint* paint, uint32_t packed) {
+ paint->setFlags(packed >> 16);
+ paint->setHinting((SkPaint::Hinting)((packed >> 14) & BPF_Mask(kHint_BPF)));
+ paint->setTextAlign((SkPaint::Align)((packed >> 12) & BPF_Mask(kAlign_BPF)));
+ paint->setFilterQuality((SkFilterQuality)((packed >> 10) & BPF_Mask(kFilter_BPF)));
+ return (FlatFlags)(packed & kFlatFlagMask);
+}
+
+/* To save space/time, we analyze the paint, and write a truncated version of
+ it if there are not tricky elements like shaders, etc.
+ */
+void SkPaint::flatten(SkWriteBuffer& buffer) const {
+ uint8_t flatFlags = 0;
+ if (this->getTypeface()) {
+ flatFlags |= kHasTypeface_FlatFlag;
+ }
+ if (asint(this->getPathEffect()) |
+ asint(this->getShader()) |
+ asint(this->getMaskFilter()) |
+ asint(this->getColorFilter()) |
+ asint(this->getRasterizer()) |
+ asint(this->getLooper()) |
+ asint(this->getImageFilter())) {
+ flatFlags |= kHasEffects_FlatFlag;
+ }
+
+ buffer.writeScalar(this->getTextSize());
+ buffer.writeScalar(this->getTextScaleX());
+ buffer.writeScalar(this->getTextSkewX());
+ buffer.writeScalar(this->getStrokeWidth());
+ buffer.writeScalar(this->getStrokeMiter());
+ buffer.writeColor(this->getColor());
+
+ buffer.writeUInt(pack_paint_flags(this->getFlags(), this->getHinting(), this->getTextAlign(),
+ this->getFilterQuality(), flatFlags));
+ buffer.writeUInt(pack_4(this->getStrokeCap(), this->getStrokeJoin(),
+ (this->getStyle() << 4) | this->getTextEncoding(),
+ fBlendMode));
+
+ // now we're done with ptr and the (pre)reserved space. If we need to write
+ // additional fields, use the buffer directly
+ if (flatFlags & kHasTypeface_FlatFlag) {
+ buffer.writeTypeface(this->getTypeface());
+ }
+ if (flatFlags & kHasEffects_FlatFlag) {
+ buffer.writeFlattenable(this->getPathEffect());
+ buffer.writeFlattenable(this->getShader());
+ buffer.writeFlattenable(this->getMaskFilter());
+ buffer.writeFlattenable(this->getColorFilter());
+ buffer.writeFlattenable(this->getRasterizer());
+ buffer.writeFlattenable(this->getLooper());
+ buffer.writeFlattenable(this->getImageFilter());
+ }
+}
+
+void SkPaint::unflatten(SkReadBuffer& buffer) {
+ this->setTextSize(buffer.readScalar());
+ this->setTextScaleX(buffer.readScalar());
+ this->setTextSkewX(buffer.readScalar());
+ this->setStrokeWidth(buffer.readScalar());
+ this->setStrokeMiter(buffer.readScalar());
+ this->setColor(buffer.readColor());
+
+ unsigned flatFlags = unpack_paint_flags(this, buffer.readUInt());
+
+ uint32_t tmp = buffer.readUInt();
+ this->setStrokeCap(static_cast<Cap>((tmp >> 24) & 0xFF));
+ this->setStrokeJoin(static_cast<Join>((tmp >> 16) & 0xFF));
+ if (buffer.isVersionLT(SkReadBuffer::kXfermodeToBlendMode_Version)) {
+ this->setStyle(static_cast<Style>((tmp >> 8) & 0xFF));
+ this->setTextEncoding(static_cast<TextEncoding>((tmp >> 0) & 0xFF));
+ } else {
+ this->setStyle(static_cast<Style>((tmp >> 12) & 0xF));
+ this->setTextEncoding(static_cast<TextEncoding>((tmp >> 8) & 0xF));
+ this->setBlendMode((SkBlendMode)(tmp & 0xFF));
+ }
+
+ if (flatFlags & kHasTypeface_FlatFlag) {
+ this->setTypeface(buffer.readTypeface());
+ } else {
+ this->setTypeface(nullptr);
+ }
+
+ if (flatFlags & kHasEffects_FlatFlag) {
+ this->setPathEffect(buffer.readPathEffect());
+ this->setShader(buffer.readShader());
+ if (buffer.isVersionLT(SkReadBuffer::kXfermodeToBlendMode_Version)) {
+ sk_sp<SkXfermode> xfer = buffer.readXfermode();
+ this->setBlendMode(xfer ? xfer->blend() : SkBlendMode::kSrcOver);
+ }
+ this->setMaskFilter(buffer.readMaskFilter());
+ this->setColorFilter(buffer.readColorFilter());
+ this->setRasterizer(buffer.readRasterizer());
+ this->setLooper(buffer.readDrawLooper());
+ this->setImageFilter(buffer.readImageFilter());
+
+ if (buffer.isVersionLT(SkReadBuffer::kAnnotationsMovedToCanvas_Version)) {
+ // We used to store annotations here (string+skdata) if this bool was true
+ if (buffer.readBool()) {
+ // Annotations have moved to drawAnnotation, so we just drop this one on the floor.
+ SkString key;
+ buffer.readString(&key);
+ (void)buffer.readByteArrayAsData();
+ }
+ }
+ } else {
+ this->setPathEffect(nullptr);
+ this->setShader(nullptr);
+ this->setMaskFilter(nullptr);
+ this->setColorFilter(nullptr);
+ this->setRasterizer(nullptr);
+ this->setLooper(nullptr);
+ this->setImageFilter(nullptr);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkPaint::getFillPath(const SkPath& src, SkPath* dst, const SkRect* cullRect,
+ SkScalar resScale) const {
+ SkStrokeRec rec(*this, resScale);
+
+ const SkPath* srcPtr = &src;
+ SkPath tmpPath;
+
+ if (fPathEffect && fPathEffect->filterPath(&tmpPath, src, &rec, cullRect)) {
+ srcPtr = &tmpPath;
+ }
+
+ if (!rec.applyToPath(dst, *srcPtr)) {
+ if (srcPtr == &tmpPath) {
+ // If path's were copy-on-write, this trick would not be needed.
+ // As it is, we want to save making a deep-copy from tmpPath -> dst
+ // since we know we're just going to delete tmpPath when we return,
+ // so the swap saves that copy.
+ dst->swap(tmpPath);
+ } else {
+ *dst = *srcPtr;
+ }
+ }
+ return !rec.isHairlineStyle();
+}
+
+bool SkPaint::canComputeFastBounds() const {
+ if (this->getLooper()) {
+ return this->getLooper()->canComputeFastBounds(*this);
+ }
+ if (this->getImageFilter() && !this->getImageFilter()->canComputeFastBounds()) {
+ return false;
+ }
+ return !this->getRasterizer();
+}
+
+const SkRect& SkPaint::doComputeFastBounds(const SkRect& origSrc,
+ SkRect* storage,
+ Style style) const {
+ SkASSERT(storage);
+
+ const SkRect* src = &origSrc;
+
+ if (this->getLooper()) {
+ SkASSERT(this->getLooper()->canComputeFastBounds(*this));
+ this->getLooper()->computeFastBounds(*this, *src, storage);
+ return *storage;
+ }
+
+ SkRect tmpSrc;
+ if (this->getPathEffect()) {
+ this->getPathEffect()->computeFastBounds(&tmpSrc, origSrc);
+ src = &tmpSrc;
+ }
+
+ SkScalar radius = SkStrokeRec::GetInflationRadius(*this, style);
+ *storage = src->makeOutset(radius, radius);
+
+ if (this->getMaskFilter()) {
+ this->getMaskFilter()->computeFastBounds(*storage, storage);
+ }
+
+ if (this->getImageFilter()) {
+ *storage = this->getImageFilter()->computeFastBounds(*storage);
+ }
+
+ return *storage;
+}
+
+#ifndef SK_IGNORE_TO_STRING
+
+void SkPaint::toString(SkString* str) const {
+ str->append("<dl><dt>SkPaint:</dt><dd><dl>");
+
+ SkTypeface* typeface = this->getTypeface();
+ if (typeface) {
+ SkDynamicMemoryWStream ostream;
+ typeface->serialize(&ostream);
+ SkAutoTDelete<SkStreamAsset> istream(ostream.detachAsStream());
+
+ SkFontDescriptor descriptor;
+ if (!SkFontDescriptor::Deserialize(istream, &descriptor)) {
+ str->append("<dt>FontDescriptor deserialization failed</dt>");
+ } else {
+ str->append("<dt>Font Family Name:</dt><dd>");
+ str->append(descriptor.getFamilyName());
+ str->append("</dd><dt>Font Full Name:</dt><dd>");
+ str->append(descriptor.getFullName());
+ str->append("</dd><dt>Font PS Name:</dt><dd>");
+ str->append(descriptor.getPostscriptName());
+ str->append("</dd>");
+ }
+ }
+
+ str->append("<dt>TextSize:</dt><dd>");
+ str->appendScalar(this->getTextSize());
+ str->append("</dd>");
+
+ str->append("<dt>TextScaleX:</dt><dd>");
+ str->appendScalar(this->getTextScaleX());
+ str->append("</dd>");
+
+ str->append("<dt>TextSkewX:</dt><dd>");
+ str->appendScalar(this->getTextSkewX());
+ str->append("</dd>");
+
+ SkPathEffect* pathEffect = this->getPathEffect();
+ if (pathEffect) {
+ str->append("<dt>PathEffect:</dt><dd>");
+ pathEffect->toString(str);
+ str->append("</dd>");
+ }
+
+ SkShader* shader = this->getShader();
+ if (shader) {
+ str->append("<dt>Shader:</dt><dd>");
+ shader->toString(str);
+ str->append("</dd>");
+ }
+
+ if (!this->isSrcOver()) {
+ str->appendf("<dt>Xfermode:</dt><dd>%d</dd>", fBlendMode);
+ }
+
+ SkMaskFilter* maskFilter = this->getMaskFilter();
+ if (maskFilter) {
+ str->append("<dt>MaskFilter:</dt><dd>");
+ maskFilter->toString(str);
+ str->append("</dd>");
+ }
+
+ SkColorFilter* colorFilter = this->getColorFilter();
+ if (colorFilter) {
+ str->append("<dt>ColorFilter:</dt><dd>");
+ colorFilter->toString(str);
+ str->append("</dd>");
+ }
+
+ SkRasterizer* rasterizer = this->getRasterizer();
+ if (rasterizer) {
+ str->append("<dt>Rasterizer:</dt><dd>");
+ str->append("</dd>");
+ }
+
+ SkDrawLooper* looper = this->getLooper();
+ if (looper) {
+ str->append("<dt>DrawLooper:</dt><dd>");
+ looper->toString(str);
+ str->append("</dd>");
+ }
+
+ SkImageFilter* imageFilter = this->getImageFilter();
+ if (imageFilter) {
+ str->append("<dt>ImageFilter:</dt><dd>");
+ imageFilter->toString(str);
+ str->append("</dd>");
+ }
+
+ str->append("<dt>Color:</dt><dd>0x");
+ SkColor color = this->getColor();
+ str->appendHex(color);
+ str->append("</dd>");
+
+ str->append("<dt>Stroke Width:</dt><dd>");
+ str->appendScalar(this->getStrokeWidth());
+ str->append("</dd>");
+
+ str->append("<dt>Stroke Miter:</dt><dd>");
+ str->appendScalar(this->getStrokeMiter());
+ str->append("</dd>");
+
+ str->append("<dt>Flags:</dt><dd>(");
+ if (this->getFlags()) {
+ bool needSeparator = false;
+ SkAddFlagToString(str, this->isAntiAlias(), "AntiAlias", &needSeparator);
+ SkAddFlagToString(str, this->isDither(), "Dither", &needSeparator);
+ SkAddFlagToString(str, this->isUnderlineText(), "UnderlineText", &needSeparator);
+ SkAddFlagToString(str, this->isStrikeThruText(), "StrikeThruText", &needSeparator);
+ SkAddFlagToString(str, this->isFakeBoldText(), "FakeBoldText", &needSeparator);
+ SkAddFlagToString(str, this->isLinearText(), "LinearText", &needSeparator);
+ SkAddFlagToString(str, this->isSubpixelText(), "SubpixelText", &needSeparator);
+ SkAddFlagToString(str, this->isDevKernText(), "DevKernText", &needSeparator);
+ SkAddFlagToString(str, this->isLCDRenderText(), "LCDRenderText", &needSeparator);
+ SkAddFlagToString(str, this->isEmbeddedBitmapText(),
+ "EmbeddedBitmapText", &needSeparator);
+ SkAddFlagToString(str, this->isAutohinted(), "Autohinted", &needSeparator);
+ SkAddFlagToString(str, this->isVerticalText(), "VerticalText", &needSeparator);
+ SkAddFlagToString(str, SkToBool(this->getFlags() & SkPaint::kGenA8FromLCD_Flag),
+ "GenA8FromLCD", &needSeparator);
+ } else {
+ str->append("None");
+ }
+ str->append(")</dd>");
+
+ str->append("<dt>FilterLevel:</dt><dd>");
+ static const char* gFilterQualityStrings[] = { "None", "Low", "Medium", "High" };
+ str->append(gFilterQualityStrings[this->getFilterQuality()]);
+ str->append("</dd>");
+
+ str->append("<dt>TextAlign:</dt><dd>");
+ static const char* gTextAlignStrings[SkPaint::kAlignCount] = { "Left", "Center", "Right" };
+ str->append(gTextAlignStrings[this->getTextAlign()]);
+ str->append("</dd>");
+
+ str->append("<dt>CapType:</dt><dd>");
+ static const char* gStrokeCapStrings[SkPaint::kCapCount] = { "Butt", "Round", "Square" };
+ str->append(gStrokeCapStrings[this->getStrokeCap()]);
+ str->append("</dd>");
+
+ str->append("<dt>JoinType:</dt><dd>");
+ static const char* gJoinStrings[SkPaint::kJoinCount] = { "Miter", "Round", "Bevel" };
+ str->append(gJoinStrings[this->getStrokeJoin()]);
+ str->append("</dd>");
+
+ str->append("<dt>Style:</dt><dd>");
+ static const char* gStyleStrings[SkPaint::kStyleCount] = { "Fill", "Stroke", "StrokeAndFill" };
+ str->append(gStyleStrings[this->getStyle()]);
+ str->append("</dd>");
+
+ str->append("<dt>TextEncoding:</dt><dd>");
+ static const char* gTextEncodingStrings[] = { "UTF8", "UTF16", "UTF32", "GlyphID" };
+ str->append(gTextEncodingStrings[this->getTextEncoding()]);
+ str->append("</dd>");
+
+ str->append("<dt>Hinting:</dt><dd>");
+ static const char* gHintingStrings[] = { "None", "Slight", "Normal", "Full" };
+ str->append(gHintingStrings[this->getHinting()]);
+ str->append("</dd>");
+
+ str->append("</dd></dl></dl>");
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool has_thick_frame(const SkPaint& paint) {
+ return paint.getStrokeWidth() > 0 &&
+ paint.getStyle() != SkPaint::kFill_Style;
+}
+
+SkTextBaseIter::SkTextBaseIter(const char text[], size_t length,
+ const SkPaint& paint,
+ bool applyStrokeAndPathEffects)
+ : fPaint(paint) {
+ fGlyphCacheProc = SkPaint::GetGlyphCacheProc(paint.getTextEncoding(),
+ paint.isDevKernText(),
+ true);
+
+ fPaint.setLinearText(true);
+ fPaint.setMaskFilter(nullptr); // don't want this affecting our path-cache lookup
+
+ if (fPaint.getPathEffect() == nullptr && !has_thick_frame(fPaint)) {
+ applyStrokeAndPathEffects = false;
+ }
+
+ // can't use our canonical size if we need to apply patheffects
+ if (fPaint.getPathEffect() == nullptr) {
+ fPaint.setTextSize(SkIntToScalar(SkPaint::kCanonicalTextSizeForPaths));
+ fScale = paint.getTextSize() / SkPaint::kCanonicalTextSizeForPaths;
+ if (has_thick_frame(fPaint)) {
+ fPaint.setStrokeWidth(fPaint.getStrokeWidth() / fScale);
+ }
+ } else {
+ fScale = SK_Scalar1;
+ }
+
+ if (!applyStrokeAndPathEffects) {
+ fPaint.setStyle(SkPaint::kFill_Style);
+ fPaint.setPathEffect(nullptr);
+ }
+
+ // SRGBTODO: Is this correct?
+ fCache = fPaint.detachCache(nullptr, SkPaint::kFakeGammaAndBoostContrast_ScalerContextFlags,
+ nullptr);
+
+ SkPaint::Style style = SkPaint::kFill_Style;
+ sk_sp<SkPathEffect> pe;
+
+ if (!applyStrokeAndPathEffects) {
+ style = paint.getStyle(); // restore
+ pe = sk_ref_sp(paint.getPathEffect()); // restore
+ }
+ fPaint.setStyle(style);
+ fPaint.setPathEffect(pe);
+ fPaint.setMaskFilter(sk_ref_sp(paint.getMaskFilter())); // restore
+
+ // now compute fXOffset if needed
+
+ SkScalar xOffset = 0;
+ if (paint.getTextAlign() != SkPaint::kLeft_Align) { // need to measure first
+ int count;
+ SkScalar width = SkScalarMul(fPaint.measure_text(fCache, text, length,
+ &count, nullptr), fScale);
+ if (paint.getTextAlign() == SkPaint::kCenter_Align) {
+ width = SkScalarHalf(width);
+ }
+ xOffset = -width;
+ }
+ fXPos = xOffset;
+ fPrevAdvance = 0;
+
+ fText = text;
+ fStop = text + length;
+
+ fXYIndex = paint.isVerticalText() ? 1 : 0;
+}
+
+SkTextBaseIter::~SkTextBaseIter() {
+ SkGlyphCache::AttachCache(fCache);
+}
+
+bool SkTextToPathIter::next(const SkPath** path, SkScalar* xpos) {
+ if (fText < fStop) {
+ const SkGlyph& glyph = fGlyphCacheProc(fCache, &fText);
+
+ fXPos += SkScalarMul(fPrevAdvance + fAutoKern.adjust(glyph), fScale);
+ fPrevAdvance = advance(glyph, fXYIndex); // + fPaint.getTextTracking();
+
+ if (glyph.fWidth) {
+ if (path) {
+ *path = fCache->findPath(glyph);
+ }
+ } else {
+ if (path) {
+ *path = nullptr;
+ }
+ }
+ if (xpos) {
+ *xpos = fXPos;
+ }
+ return true;
+ }
+ return false;
+}
+
+bool SkTextInterceptsIter::next(SkScalar* array, int* count) {
+ const SkGlyph& glyph = fGlyphCacheProc(fCache, &fText);
+ fXPos += SkScalarMul(fPrevAdvance + fAutoKern.adjust(glyph), fScale);
+ fPrevAdvance = advance(glyph, fXYIndex); // + fPaint.getTextTracking();
+ if (fCache->findPath(glyph)) {
+ fCache->findIntercepts(fBounds, fScale, fXPos, SkToBool(fXYIndex),
+ const_cast<SkGlyph*>(&glyph), array, count);
+ }
+ return fText < fStop;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// return true if the filter exists, and may affect alpha
+static bool affects_alpha(const SkColorFilter* cf) {
+ return cf && !(cf->getFlags() & SkColorFilter::kAlphaUnchanged_Flag);
+}
+
+// return true if the filter exists, and may affect alpha
+static bool affects_alpha(const SkImageFilter* imf) {
+ // TODO: check if we should allow imagefilters to broadcast that they don't affect alpha
+ // ala colorfilters
+ return imf != nullptr;
+}
+
+bool SkPaint::nothingToDraw() const {
+ if (fDrawLooper) {
+ return false;
+ }
+ switch ((SkBlendMode)fBlendMode) {
+ case SkBlendMode::kSrcOver:
+ case SkBlendMode::kSrcATop:
+ case SkBlendMode::kDstOut:
+ case SkBlendMode::kDstOver:
+ case SkBlendMode::kPlus:
+ if (0 == this->getAlpha()) {
+ return !affects_alpha(fColorFilter.get()) && !affects_alpha(fImageFilter.get());
+ }
+ break;
+ case SkBlendMode::kDst:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+uint32_t SkPaint::getHash() const {
+ // We're going to hash 10 pointers and 7 32-bit values, finishing up with fBitfields,
+ // so fBitfields should be 10 pointers and 6 32-bit values from the start.
+ static_assert(offsetof(SkPaint, fBitfields) == 8 * sizeof(void*) + 7 * sizeof(uint32_t),
+ "SkPaint_notPackedTightly");
+ return SkOpts::hash(reinterpret_cast<const uint32_t*>(this),
+ offsetof(SkPaint, fBitfields) + sizeof(fBitfields));
+}
diff --git a/gfx/skia/skia/src/core/SkPaintDefaults.h b/gfx/skia/skia/src/core/SkPaintDefaults.h
new file mode 100644
index 000000000..3ea1cd305
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPaintDefaults.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPaintDefaults_DEFINED
+#define SkPaintDefaults_DEFINED
+
+#include "SkPaint.h"
+
+/**
+ * Any of these can be specified by the build system (or SkUserConfig.h)
+ * to change the default values for a SkPaint. This file should not be
+ * edited directly.
+ */
+
+#ifndef SkPaintDefaults_Flags
+ #define SkPaintDefaults_Flags 0
+#endif
+
+#ifndef SkPaintDefaults_TextSize
+ #define SkPaintDefaults_TextSize SkIntToScalar(12)
+#endif
+
+#ifndef SkPaintDefaults_Hinting
+ #define SkPaintDefaults_Hinting SkPaint::kNormal_Hinting
+#endif
+
+#ifndef SkPaintDefaults_MiterLimit
+ #define SkPaintDefaults_MiterLimit SkIntToScalar(4)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPaintPriv.cpp b/gfx/skia/skia/src/core/SkPaintPriv.cpp
new file mode 100644
index 000000000..cbe2558c2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPaintPriv.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmap.h"
+#include "SkColorFilter.h"
+#include "SkPaintPriv.h"
+#include "SkImage.h"
+#include "SkPaint.h"
+#include "SkShader.h"
+
+static bool changes_alpha(const SkPaint& paint) {
+ SkColorFilter* cf = paint.getColorFilter();
+ return cf && !(cf->getFlags() & SkColorFilter::kAlphaUnchanged_Flag);
+}
+
+bool SkPaintPriv::Overwrites(const SkPaint* paint, ShaderOverrideOpacity overrideOpacity) {
+ if (!paint) {
+ // No paint means we default to SRC_OVER, so we overwrite iff our shader-override
+ // is opaque, or we don't have one.
+ return overrideOpacity != kNotOpaque_ShaderOverrideOpacity;
+ }
+
+ SkXfermode::SrcColorOpacity opacityType = SkXfermode::kUnknown_SrcColorOpacity;
+
+ if (!changes_alpha(*paint)) {
+ const unsigned paintAlpha = paint->getAlpha();
+ if (0xff == paintAlpha && overrideOpacity != kNotOpaque_ShaderOverrideOpacity &&
+ (!paint->getShader() || paint->getShader()->isOpaque()))
+ {
+ opacityType = SkXfermode::kOpaque_SrcColorOpacity;
+ } else if (0 == paintAlpha) {
+ if (overrideOpacity == kNone_ShaderOverrideOpacity && !paint->getShader()) {
+ opacityType = SkXfermode::kTransparentBlack_SrcColorOpacity;
+ } else {
+ opacityType = SkXfermode::kTransparentAlpha_SrcColorOpacity;
+ }
+ }
+ }
+
+ return SkXfermode::IsOpaque(paint->getBlendMode(), opacityType);
+}
+
+bool SkPaintPriv::Overwrites(const SkBitmap& bitmap, const SkPaint* paint) {
+ return Overwrites(paint, bitmap.isOpaque() ? kOpaque_ShaderOverrideOpacity
+ : kNotOpaque_ShaderOverrideOpacity);
+}
+
+bool SkPaintPriv::Overwrites(const SkImage* image, const SkPaint* paint) {
+ return Overwrites(paint, image->isOpaque() ? kOpaque_ShaderOverrideOpacity
+ : kNotOpaque_ShaderOverrideOpacity);
+}
diff --git a/gfx/skia/skia/src/core/SkPaintPriv.h b/gfx/skia/skia/src/core/SkPaintPriv.h
new file mode 100644
index 000000000..1cf404075
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPaintPriv.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPaintPriv_DEFINED
+#define SkPaintPriv_DEFINED
+
+#include "SkTypes.h"
+
+class SkBitmap;
+class SkImage;
+class SkPaint;
+
+class SkPaintPriv {
+public:
+ enum ShaderOverrideOpacity {
+ kNone_ShaderOverrideOpacity, //!< there is no overriding shader (bitmap or image)
+ kOpaque_ShaderOverrideOpacity, //!< the overriding shader is opaque
+ kNotOpaque_ShaderOverrideOpacity, //!< the overriding shader may not be opaque
+ };
+
+ /**
+ * Returns true if drawing with this paint (or nullptr) will ovewrite all affected pixels.
+ *
+ * Note: returns conservative true, meaning it may return false even though the paint might
+ * in fact overwrite its pixels.
+ */
+ static bool Overwrites(const SkPaint* paint, ShaderOverrideOpacity);
+
+ static bool Overwrites(const SkPaint& paint) {
+ return Overwrites(&paint, kNone_ShaderOverrideOpacity);
+ }
+
+ /**
+ * Returns true if drawing this bitmap with this paint (or nullptr) will ovewrite all affected
+ * pixels.
+ */
+ static bool Overwrites(const SkBitmap&, const SkPaint* paint);
+
+ /**
+ * Returns true if drawing this image with this paint (or nullptr) will ovewrite all affected
+ * pixels.
+ */
+ static bool Overwrites(const SkImage*, const SkPaint* paint);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPath.cpp b/gfx/skia/skia/src/core/SkPath.cpp
new file mode 100644
index 000000000..a2ef54620
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPath.cpp
@@ -0,0 +1,3388 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <cmath>
+#include "SkBuffer.h"
+#include "SkCubicClipper.h"
+#include "SkErrorInternals.h"
+#include "SkGeometry.h"
+#include "SkMath.h"
+#include "SkPathPriv.h"
+#include "SkPathRef.h"
+#include "SkRRect.h"
+
+////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Path.bounds is defined to be the bounds of all the control points.
+ * If we called bounds.join(r) we would skip r if r was empty, which breaks
+ * our promise. Hence we have a custom joiner that doesn't look at emptiness
+ */
+static void joinNoEmptyChecks(SkRect* dst, const SkRect& src) {
+ dst->fLeft = SkMinScalar(dst->fLeft, src.fLeft);
+ dst->fTop = SkMinScalar(dst->fTop, src.fTop);
+ dst->fRight = SkMaxScalar(dst->fRight, src.fRight);
+ dst->fBottom = SkMaxScalar(dst->fBottom, src.fBottom);
+}
+
+static bool is_degenerate(const SkPath& path) {
+ SkPath::Iter iter(path, false);
+ SkPoint pts[4];
+ return SkPath::kDone_Verb == iter.next(pts);
+}
+
+class SkAutoDisableDirectionCheck {
+public:
+ SkAutoDisableDirectionCheck(SkPath* path) : fPath(path) {
+ fSaved = static_cast<SkPathPriv::FirstDirection>(fPath->fFirstDirection.load());
+ }
+
+ ~SkAutoDisableDirectionCheck() {
+ fPath->fFirstDirection = fSaved;
+ }
+
+private:
+ SkPath* fPath;
+ SkPathPriv::FirstDirection fSaved;
+};
+#define SkAutoDisableDirectionCheck(...) SK_REQUIRE_LOCAL_VAR(SkAutoDisableDirectionCheck)
+
+/* This guy's constructor/destructor bracket a path editing operation. It is
+ used when we know the bounds of the amount we are going to add to the path
+ (usually a new contour, but not required).
+
+ It captures some state about the path up front (i.e. if it already has a
+ cached bounds), and then if it can, it updates the cache bounds explicitly,
+ avoiding the need to revisit all of the points in getBounds().
+
+ It also notes if the path was originally degenerate, and if so, sets
+ isConvex to true. Thus it can only be used if the contour being added is
+ convex.
+ */
+class SkAutoPathBoundsUpdate {
+public:
+ SkAutoPathBoundsUpdate(SkPath* path, const SkRect& r) : fRect(r) {
+ this->init(path);
+ }
+
+ SkAutoPathBoundsUpdate(SkPath* path, SkScalar left, SkScalar top,
+ SkScalar right, SkScalar bottom) {
+ fRect.set(left, top, right, bottom);
+ this->init(path);
+ }
+
+ ~SkAutoPathBoundsUpdate() {
+ fPath->setConvexity(fDegenerate ? SkPath::kConvex_Convexity
+ : SkPath::kUnknown_Convexity);
+ if (fEmpty || fHasValidBounds) {
+ fPath->setBounds(fRect);
+ }
+ }
+
+private:
+ SkPath* fPath;
+ SkRect fRect;
+ bool fHasValidBounds;
+ bool fDegenerate;
+ bool fEmpty;
+
+ void init(SkPath* path) {
+ // Cannot use fRect for our bounds unless we know it is sorted
+ fRect.sort();
+ fPath = path;
+ // Mark the path's bounds as dirty if (1) they are, or (2) the path
+ // is non-finite, and therefore its bounds are not meaningful
+ fHasValidBounds = path->hasComputedBounds() && path->isFinite();
+ fEmpty = path->isEmpty();
+ if (fHasValidBounds && !fEmpty) {
+ joinNoEmptyChecks(&fRect, fPath->getBounds());
+ }
+ fDegenerate = is_degenerate(*path);
+ }
+};
+#define SkAutoPathBoundsUpdate(...) SK_REQUIRE_LOCAL_VAR(SkAutoPathBoundsUpdate)
+
+////////////////////////////////////////////////////////////////////////////
+
+/*
+ Stores the verbs and points as they are given to us, with exceptions:
+ - we only record "Close" if it was immediately preceeded by Move | Line | Quad | Cubic
+ - we insert a Move(0,0) if Line | Quad | Cubic is our first command
+
+ The iterator does more cleanup, especially if forceClose == true
+ 1. If we encounter degenerate segments, remove them
+ 2. if we encounter Close, return a cons'd up Line() first (if the curr-pt != start-pt)
+ 3. if we encounter Move without a preceeding Close, and forceClose is true, goto #2
+ 4. if we encounter Line | Quad | Cubic after Close, cons up a Move
+*/
+
+////////////////////////////////////////////////////////////////////////////
+
+// flag to require a moveTo if we begin with something else, like lineTo etc.
+#define INITIAL_LASTMOVETOINDEX_VALUE ~0
+
+SkPath::SkPath()
+ : fPathRef(SkPathRef::CreateEmpty()) {
+ this->resetFields();
+ fIsVolatile = false;
+}
+
+void SkPath::resetFields() {
+ //fPathRef is assumed to have been emptied by the caller.
+ fLastMoveToIndex = INITIAL_LASTMOVETOINDEX_VALUE;
+ fFillType = kWinding_FillType;
+ fConvexity = kUnknown_Convexity;
+ fFirstDirection = SkPathPriv::kUnknown_FirstDirection;
+
+ // We don't touch Android's fSourcePath. It's used to track texture garbage collection, so we
+ // don't want to muck with it if it's been set to something non-nullptr.
+}
+
+SkPath::SkPath(const SkPath& that)
+ : fPathRef(SkRef(that.fPathRef.get())) {
+ this->copyFields(that);
+ SkDEBUGCODE(that.validate();)
+}
+
+SkPath::~SkPath() {
+ SkDEBUGCODE(this->validate();)
+}
+
+SkPath& SkPath::operator=(const SkPath& that) {
+ SkDEBUGCODE(that.validate();)
+
+ if (this != &that) {
+ fPathRef.reset(SkRef(that.fPathRef.get()));
+ this->copyFields(that);
+ }
+ SkDEBUGCODE(this->validate();)
+ return *this;
+}
+
+void SkPath::copyFields(const SkPath& that) {
+ //fPathRef is assumed to have been set by the caller.
+ fLastMoveToIndex = that.fLastMoveToIndex;
+ fFillType = that.fFillType;
+ fConvexity = that.fConvexity;
+ // Simulate fFirstDirection = that.fFirstDirection;
+ fFirstDirection.store(that.fFirstDirection.load());
+ fIsVolatile = that.fIsVolatile;
+}
+
+bool operator==(const SkPath& a, const SkPath& b) {
+ // note: don't need to look at isConvex or bounds, since just comparing the
+ // raw data is sufficient.
+ return &a == &b ||
+ (a.fFillType == b.fFillType && *a.fPathRef.get() == *b.fPathRef.get());
+}
+
+void SkPath::swap(SkPath& that) {
+ if (this != &that) {
+ fPathRef.swap(that.fPathRef);
+ SkTSwap<int>(fLastMoveToIndex, that.fLastMoveToIndex);
+ SkTSwap<uint8_t>(fFillType, that.fFillType);
+ SkTSwap<uint8_t>(fConvexity, that.fConvexity);
+ // Simulate SkTSwap<uint8_t>(fFirstDirection, that.fFirstDirection);
+ uint8_t temp = fFirstDirection;
+ fFirstDirection.store(that.fFirstDirection.load());
+ that.fFirstDirection.store(temp);
+ SkTSwap<SkBool8>(fIsVolatile, that.fIsVolatile);
+ }
+}
+
+bool SkPath::isInterpolatable(const SkPath& compare) const {
+ int count = fPathRef->countVerbs();
+ if (count != compare.fPathRef->countVerbs()) {
+ return false;
+ }
+ if (!count) {
+ return true;
+ }
+ if (memcmp(fPathRef->verbsMemBegin(), compare.fPathRef->verbsMemBegin(),
+ count)) {
+ return false;
+ }
+ return !fPathRef->countWeights() ||
+ !SkToBool(memcmp(fPathRef->conicWeights(), compare.fPathRef->conicWeights(),
+ fPathRef->countWeights() * sizeof(*fPathRef->conicWeights())));
+}
+
+bool SkPath::interpolate(const SkPath& ending, SkScalar weight, SkPath* out) const {
+ int verbCount = fPathRef->countVerbs();
+ if (verbCount != ending.fPathRef->countVerbs()) {
+ return false;
+ }
+ if (!verbCount) {
+ return true;
+ }
+ out->reset();
+ out->addPath(*this);
+ fPathRef->interpolate(*ending.fPathRef, weight, out->fPathRef);
+ return true;
+}
+
+static inline bool check_edge_against_rect(const SkPoint& p0,
+ const SkPoint& p1,
+ const SkRect& rect,
+ SkPathPriv::FirstDirection dir) {
+ const SkPoint* edgeBegin;
+ SkVector v;
+ if (SkPathPriv::kCW_FirstDirection == dir) {
+ v = p1 - p0;
+ edgeBegin = &p0;
+ } else {
+ v = p0 - p1;
+ edgeBegin = &p1;
+ }
+ if (v.fX || v.fY) {
+ // check the cross product of v with the vec from edgeBegin to each rect corner
+ SkScalar yL = SkScalarMul(v.fY, rect.fLeft - edgeBegin->fX);
+ SkScalar xT = SkScalarMul(v.fX, rect.fTop - edgeBegin->fY);
+ SkScalar yR = SkScalarMul(v.fY, rect.fRight - edgeBegin->fX);
+ SkScalar xB = SkScalarMul(v.fX, rect.fBottom - edgeBegin->fY);
+ if ((xT < yL) || (xT < yR) || (xB < yL) || (xB < yR)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool SkPath::conservativelyContainsRect(const SkRect& rect) const {
+ // This only handles non-degenerate convex paths currently.
+ if (kConvex_Convexity != this->getConvexity()) {
+ return false;
+ }
+
+ SkPathPriv::FirstDirection direction;
+ if (!SkPathPriv::CheapComputeFirstDirection(*this, &direction)) {
+ return false;
+ }
+
+ SkPoint firstPt;
+ SkPoint prevPt;
+ SkPath::Iter iter(*this, true);
+ SkPath::Verb verb;
+ SkPoint pts[4];
+ SkDEBUGCODE(int moveCnt = 0;)
+ SkDEBUGCODE(int segmentCount = 0;)
+ SkDEBUGCODE(int closeCount = 0;)
+
+ while ((verb = iter.next(pts, true, true)) != kDone_Verb) {
+ int nextPt = -1;
+ switch (verb) {
+ case kMove_Verb:
+ SkASSERT(!segmentCount && !closeCount);
+ SkDEBUGCODE(++moveCnt);
+ firstPt = prevPt = pts[0];
+ break;
+ case kLine_Verb:
+ nextPt = 1;
+ SkASSERT(moveCnt && !closeCount);
+ SkDEBUGCODE(++segmentCount);
+ break;
+ case kQuad_Verb:
+ case kConic_Verb:
+ SkASSERT(moveCnt && !closeCount);
+ SkDEBUGCODE(++segmentCount);
+ nextPt = 2;
+ break;
+ case kCubic_Verb:
+ SkASSERT(moveCnt && !closeCount);
+ SkDEBUGCODE(++segmentCount);
+ nextPt = 3;
+ break;
+ case kClose_Verb:
+ SkDEBUGCODE(++closeCount;)
+ break;
+ default:
+ SkDEBUGFAIL("unknown verb");
+ }
+ if (-1 != nextPt) {
+ if (SkPath::kConic_Verb == verb) {
+ SkConic orig;
+ orig.set(pts, iter.conicWeight());
+ SkPoint quadPts[5];
+ int count = orig.chopIntoQuadsPOW2(quadPts, 1);
+ SkASSERT_RELEASE(2 == count);
+
+ if (!check_edge_against_rect(quadPts[0], quadPts[2], rect, direction)) {
+ return false;
+ }
+ if (!check_edge_against_rect(quadPts[2], quadPts[4], rect, direction)) {
+ return false;
+ }
+ } else {
+ if (!check_edge_against_rect(prevPt, pts[nextPt], rect, direction)) {
+ return false;
+ }
+ }
+ prevPt = pts[nextPt];
+ }
+ }
+
+ return check_edge_against_rect(prevPt, firstPt, rect, direction);
+}
+
+uint32_t SkPath::getGenerationID() const {
+ uint32_t genID = fPathRef->genID();
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ SkASSERT((unsigned)fFillType < (1 << (32 - kPathRefGenIDBitCnt)));
+ genID |= static_cast<uint32_t>(fFillType) << kPathRefGenIDBitCnt;
+#endif
+ return genID;
+}
+
+void SkPath::reset() {
+ SkDEBUGCODE(this->validate();)
+
+ fPathRef.reset(SkPathRef::CreateEmpty());
+ this->resetFields();
+}
+
+void SkPath::rewind() {
+ SkDEBUGCODE(this->validate();)
+
+ SkPathRef::Rewind(&fPathRef);
+ this->resetFields();
+}
+
+bool SkPath::isLastContourClosed() const {
+ int verbCount = fPathRef->countVerbs();
+ if (0 == verbCount) {
+ return false;
+ }
+ return kClose_Verb == fPathRef->atVerb(verbCount - 1);
+}
+
+bool SkPath::isLine(SkPoint line[2]) const {
+ int verbCount = fPathRef->countVerbs();
+
+ if (2 == verbCount) {
+ SkASSERT(kMove_Verb == fPathRef->atVerb(0));
+ if (kLine_Verb == fPathRef->atVerb(1)) {
+ SkASSERT(2 == fPathRef->countPoints());
+ if (line) {
+ const SkPoint* pts = fPathRef->points();
+ line[0] = pts[0];
+ line[1] = pts[1];
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ Determines if path is a rect by keeping track of changes in direction
+ and looking for a loop either clockwise or counterclockwise.
+
+ The direction is computed such that:
+ 0: vertical up
+ 1: horizontal left
+ 2: vertical down
+ 3: horizontal right
+
+A rectangle cycles up/right/down/left or up/left/down/right.
+
+The test fails if:
+ The path is closed, and followed by a line.
+ A second move creates a new endpoint.
+ A diagonal line is parsed.
+ There's more than four changes of direction.
+ There's a discontinuity on the line (e.g., a move in the middle)
+ The line reverses direction.
+ The path contains a quadratic or cubic.
+ The path contains fewer than four points.
+ *The rectangle doesn't complete a cycle.
+ *The final point isn't equal to the first point.
+
+ *These last two conditions we relax if we have a 3-edge path that would
+ form a rectangle if it were closed (as we do when we fill a path)
+
+It's OK if the path has:
+ Several colinear line segments composing a rectangle side.
+ Single points on the rectangle side.
+
+The direction takes advantage of the corners found since opposite sides
+must travel in opposite directions.
+
+FIXME: Allow colinear quads and cubics to be treated like lines.
+FIXME: If the API passes fill-only, return true if the filled stroke
+ is a rectangle, though the caller failed to close the path.
+
+ first,last,next direction state-machine:
+ 0x1 is set if the segment is horizontal
+ 0x2 is set if the segment is moving to the right or down
+ thus:
+ two directions are opposites iff (dirA ^ dirB) == 0x2
+ two directions are perpendicular iff (dirA ^ dirB) == 0x1
+
+ */
+static int rect_make_dir(SkScalar dx, SkScalar dy) {
+ return ((0 != dx) << 0) | ((dx > 0 || dy > 0) << 1);
+}
+bool SkPath::isRectContour(bool allowPartial, int* currVerb, const SkPoint** ptsPtr,
+ bool* isClosed, Direction* direction) const {
+ int corners = 0;
+ SkPoint first, last;
+ const SkPoint* pts = *ptsPtr;
+ const SkPoint* savePts = nullptr;
+ first.set(0, 0);
+ last.set(0, 0);
+ int firstDirection = 0;
+ int lastDirection = 0;
+ int nextDirection = 0;
+ bool closedOrMoved = false;
+ bool autoClose = false;
+ bool insertClose = false;
+ int verbCnt = fPathRef->countVerbs();
+ while (*currVerb < verbCnt && (!allowPartial || !autoClose)) {
+ uint8_t verb = insertClose ? (uint8_t) kClose_Verb : fPathRef->atVerb(*currVerb);
+ switch (verb) {
+ case kClose_Verb:
+ savePts = pts;
+ pts = *ptsPtr;
+ autoClose = true;
+ insertClose = false;
+ case kLine_Verb: {
+ SkScalar left = last.fX;
+ SkScalar top = last.fY;
+ SkScalar right = pts->fX;
+ SkScalar bottom = pts->fY;
+ ++pts;
+ if (left != right && top != bottom) {
+ return false; // diagonal
+ }
+ if (left == right && top == bottom) {
+ break; // single point on side OK
+ }
+ nextDirection = rect_make_dir(right - left, bottom - top);
+ if (0 == corners) {
+ firstDirection = nextDirection;
+ first = last;
+ last = pts[-1];
+ corners = 1;
+ closedOrMoved = false;
+ break;
+ }
+ if (closedOrMoved) {
+ return false; // closed followed by a line
+ }
+ if (autoClose && nextDirection == firstDirection) {
+ break; // colinear with first
+ }
+ closedOrMoved = autoClose;
+ if (lastDirection != nextDirection) {
+ if (++corners > 4) {
+ return false; // too many direction changes
+ }
+ }
+ last = pts[-1];
+ if (lastDirection == nextDirection) {
+ break; // colinear segment
+ }
+ // Possible values for corners are 2, 3, and 4.
+ // When corners == 3, nextDirection opposes firstDirection.
+ // Otherwise, nextDirection at corner 2 opposes corner 4.
+ int turn = firstDirection ^ (corners - 1);
+ int directionCycle = 3 == corners ? 0 : nextDirection ^ turn;
+ if ((directionCycle ^ turn) != nextDirection) {
+ return false; // direction didn't follow cycle
+ }
+ break;
+ }
+ case kQuad_Verb:
+ case kConic_Verb:
+ case kCubic_Verb:
+ return false; // quadratic, cubic not allowed
+ case kMove_Verb:
+ if (allowPartial && !autoClose && firstDirection) {
+ insertClose = true;
+ *currVerb -= 1; // try move again afterwards
+ goto addMissingClose;
+ }
+ if (pts != *ptsPtr) {
+ return false;
+ }
+ last = *pts++;
+ closedOrMoved = true;
+ break;
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ break;
+ }
+ *currVerb += 1;
+ lastDirection = nextDirection;
+addMissingClose:
+ ;
+ }
+ // Success if 4 corners and first point equals last
+ bool result = 4 == corners && (first == last || autoClose);
+ if (!result) {
+ // check if we are just an incomplete rectangle, in which case we can
+ // return true, but not claim to be closed.
+ // e.g.
+ // 3 sided rectangle
+ // 4 sided but the last edge is not long enough to reach the start
+ //
+ SkScalar closeX = first.x() - last.x();
+ SkScalar closeY = first.y() - last.y();
+ if (closeX && closeY) {
+ return false; // we're diagonal, abort (can we ever reach this?)
+ }
+ int closeDirection = rect_make_dir(closeX, closeY);
+ // make sure the close-segment doesn't double-back on itself
+ if (3 == corners || (4 == corners && closeDirection == lastDirection)) {
+ result = true;
+ autoClose = false; // we are not closed
+ }
+ }
+ if (savePts) {
+ *ptsPtr = savePts;
+ }
+ if (result && isClosed) {
+ *isClosed = autoClose;
+ }
+ if (result && direction) {
+ *direction = firstDirection == ((lastDirection + 1) & 3) ? kCCW_Direction : kCW_Direction;
+ }
+ return result;
+}
+
+bool SkPath::isRect(SkRect* rect, bool* isClosed, Direction* direction) const {
+ SkDEBUGCODE(this->validate();)
+ int currVerb = 0;
+ const SkPoint* pts = fPathRef->points();
+ const SkPoint* first = pts;
+ if (!this->isRectContour(false, &currVerb, &pts, isClosed, direction)) {
+ return false;
+ }
+ if (rect) {
+ int32_t num = SkToS32(pts - first);
+ if (num) {
+ rect->set(first, num);
+ } else {
+ // 'pts' isn't updated for open rects
+ *rect = this->getBounds();
+ }
+ }
+ return true;
+}
+
+bool SkPath::isNestedFillRects(SkRect rects[2], Direction dirs[2]) const {
+ SkDEBUGCODE(this->validate();)
+ int currVerb = 0;
+ const SkPoint* pts = fPathRef->points();
+ const SkPoint* first = pts;
+ Direction testDirs[2];
+ if (!isRectContour(true, &currVerb, &pts, nullptr, &testDirs[0])) {
+ return false;
+ }
+ const SkPoint* last = pts;
+ SkRect testRects[2];
+ bool isClosed;
+ if (isRectContour(false, &currVerb, &pts, &isClosed, &testDirs[1])) {
+ testRects[0].set(first, SkToS32(last - first));
+ if (!isClosed) {
+ pts = fPathRef->points() + fPathRef->countPoints();
+ }
+ testRects[1].set(last, SkToS32(pts - last));
+ if (testRects[0].contains(testRects[1])) {
+ if (rects) {
+ rects[0] = testRects[0];
+ rects[1] = testRects[1];
+ }
+ if (dirs) {
+ dirs[0] = testDirs[0];
+ dirs[1] = testDirs[1];
+ }
+ return true;
+ }
+ if (testRects[1].contains(testRects[0])) {
+ if (rects) {
+ rects[0] = testRects[1];
+ rects[1] = testRects[0];
+ }
+ if (dirs) {
+ dirs[0] = testDirs[1];
+ dirs[1] = testDirs[0];
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+int SkPath::countPoints() const {
+ return fPathRef->countPoints();
+}
+
+int SkPath::getPoints(SkPoint dst[], int max) const {
+ SkDEBUGCODE(this->validate();)
+
+ SkASSERT(max >= 0);
+ SkASSERT(!max || dst);
+ int count = SkMin32(max, fPathRef->countPoints());
+ sk_careful_memcpy(dst, fPathRef->points(), count * sizeof(SkPoint));
+ return fPathRef->countPoints();
+}
+
+SkPoint SkPath::getPoint(int index) const {
+ if ((unsigned)index < (unsigned)fPathRef->countPoints()) {
+ return fPathRef->atPoint(index);
+ }
+ return SkPoint::Make(0, 0);
+}
+
+int SkPath::countVerbs() const {
+ return fPathRef->countVerbs();
+}
+
+static inline void copy_verbs_reverse(uint8_t* inorderDst,
+ const uint8_t* reversedSrc,
+ int count) {
+ for (int i = 0; i < count; ++i) {
+ inorderDst[i] = reversedSrc[~i];
+ }
+}
+
+int SkPath::getVerbs(uint8_t dst[], int max) const {
+ SkDEBUGCODE(this->validate();)
+
+ SkASSERT(max >= 0);
+ SkASSERT(!max || dst);
+ int count = SkMin32(max, fPathRef->countVerbs());
+ copy_verbs_reverse(dst, fPathRef->verbs(), count);
+ return fPathRef->countVerbs();
+}
+
+bool SkPath::getLastPt(SkPoint* lastPt) const {
+ SkDEBUGCODE(this->validate();)
+
+ int count = fPathRef->countPoints();
+ if (count > 0) {
+ if (lastPt) {
+ *lastPt = fPathRef->atPoint(count - 1);
+ }
+ return true;
+ }
+ if (lastPt) {
+ lastPt->set(0, 0);
+ }
+ return false;
+}
+
+void SkPath::setPt(int index, SkScalar x, SkScalar y) {
+ SkDEBUGCODE(this->validate();)
+
+ int count = fPathRef->countPoints();
+ if (count <= index) {
+ return;
+ } else {
+ SkPathRef::Editor ed(&fPathRef);
+ ed.atPoint(index)->set(x, y);
+ }
+}
+
+void SkPath::setLastPt(SkScalar x, SkScalar y) {
+ SkDEBUGCODE(this->validate();)
+
+ int count = fPathRef->countPoints();
+ if (count == 0) {
+ this->moveTo(x, y);
+ } else {
+ SkPathRef::Editor ed(&fPathRef);
+ ed.atPoint(count-1)->set(x, y);
+ }
+}
+
+void SkPath::setConvexity(Convexity c) {
+ if (fConvexity != c) {
+ fConvexity = c;
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Construction methods
+
+#define DIRTY_AFTER_EDIT \
+ do { \
+ fConvexity = kUnknown_Convexity; \
+ fFirstDirection = SkPathPriv::kUnknown_FirstDirection; \
+ } while (0)
+
+void SkPath::incReserve(U16CPU inc) {
+ SkDEBUGCODE(this->validate();)
+ SkPathRef::Editor(&fPathRef, inc, inc);
+ SkDEBUGCODE(this->validate();)
+}
+
+void SkPath::moveTo(SkScalar x, SkScalar y) {
+ SkDEBUGCODE(this->validate();)
+
+ SkPathRef::Editor ed(&fPathRef);
+
+ // remember our index
+ fLastMoveToIndex = fPathRef->countPoints();
+
+ ed.growForVerb(kMove_Verb)->set(x, y);
+
+ DIRTY_AFTER_EDIT;
+}
+
+void SkPath::rMoveTo(SkScalar x, SkScalar y) {
+ SkPoint pt;
+ this->getLastPt(&pt);
+ this->moveTo(pt.fX + x, pt.fY + y);
+}
+
+void SkPath::injectMoveToIfNeeded() {
+ if (fLastMoveToIndex < 0) {
+ SkScalar x, y;
+ if (fPathRef->countVerbs() == 0) {
+ x = y = 0;
+ } else {
+ const SkPoint& pt = fPathRef->atPoint(~fLastMoveToIndex);
+ x = pt.fX;
+ y = pt.fY;
+ }
+ this->moveTo(x, y);
+ }
+}
+
+void SkPath::lineTo(SkScalar x, SkScalar y) {
+ SkDEBUGCODE(this->validate();)
+
+ this->injectMoveToIfNeeded();
+
+ SkPathRef::Editor ed(&fPathRef);
+ ed.growForVerb(kLine_Verb)->set(x, y);
+
+ DIRTY_AFTER_EDIT;
+}
+
+void SkPath::rLineTo(SkScalar x, SkScalar y) {
+ this->injectMoveToIfNeeded(); // This can change the result of this->getLastPt().
+ SkPoint pt;
+ this->getLastPt(&pt);
+ this->lineTo(pt.fX + x, pt.fY + y);
+}
+
+void SkPath::quadTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2) {
+ SkDEBUGCODE(this->validate();)
+
+ this->injectMoveToIfNeeded();
+
+ SkPathRef::Editor ed(&fPathRef);
+ SkPoint* pts = ed.growForVerb(kQuad_Verb);
+ pts[0].set(x1, y1);
+ pts[1].set(x2, y2);
+
+ DIRTY_AFTER_EDIT;
+}
+
+void SkPath::rQuadTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2) {
+ this->injectMoveToIfNeeded(); // This can change the result of this->getLastPt().
+ SkPoint pt;
+ this->getLastPt(&pt);
+ this->quadTo(pt.fX + x1, pt.fY + y1, pt.fX + x2, pt.fY + y2);
+}
+
+void SkPath::conicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2,
+ SkScalar w) {
+ // check for <= 0 or NaN with this test
+ if (!(w > 0)) {
+ this->lineTo(x2, y2);
+ } else if (!SkScalarIsFinite(w)) {
+ this->lineTo(x1, y1);
+ this->lineTo(x2, y2);
+ } else if (SK_Scalar1 == w) {
+ this->quadTo(x1, y1, x2, y2);
+ } else {
+ SkDEBUGCODE(this->validate();)
+
+ this->injectMoveToIfNeeded();
+
+ SkPathRef::Editor ed(&fPathRef);
+ SkPoint* pts = ed.growForVerb(kConic_Verb, w);
+ pts[0].set(x1, y1);
+ pts[1].set(x2, y2);
+
+ DIRTY_AFTER_EDIT;
+ }
+}
+
+void SkPath::rConicTo(SkScalar dx1, SkScalar dy1, SkScalar dx2, SkScalar dy2,
+ SkScalar w) {
+ this->injectMoveToIfNeeded(); // This can change the result of this->getLastPt().
+ SkPoint pt;
+ this->getLastPt(&pt);
+ this->conicTo(pt.fX + dx1, pt.fY + dy1, pt.fX + dx2, pt.fY + dy2, w);
+}
+
+void SkPath::cubicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2,
+ SkScalar x3, SkScalar y3) {
+ SkDEBUGCODE(this->validate();)
+
+ this->injectMoveToIfNeeded();
+
+ SkPathRef::Editor ed(&fPathRef);
+ SkPoint* pts = ed.growForVerb(kCubic_Verb);
+ pts[0].set(x1, y1);
+ pts[1].set(x2, y2);
+ pts[2].set(x3, y3);
+
+ DIRTY_AFTER_EDIT;
+}
+
+void SkPath::rCubicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2,
+ SkScalar x3, SkScalar y3) {
+ this->injectMoveToIfNeeded(); // This can change the result of this->getLastPt().
+ SkPoint pt;
+ this->getLastPt(&pt);
+ this->cubicTo(pt.fX + x1, pt.fY + y1, pt.fX + x2, pt.fY + y2,
+ pt.fX + x3, pt.fY + y3);
+}
+
+void SkPath::close() {
+ SkDEBUGCODE(this->validate();)
+
+ int count = fPathRef->countVerbs();
+ if (count > 0) {
+ switch (fPathRef->atVerb(count - 1)) {
+ case kLine_Verb:
+ case kQuad_Verb:
+ case kConic_Verb:
+ case kCubic_Verb:
+ case kMove_Verb: {
+ SkPathRef::Editor ed(&fPathRef);
+ ed.growForVerb(kClose_Verb);
+ break;
+ }
+ case kClose_Verb:
+ // don't add a close if it's the first verb or a repeat
+ break;
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ break;
+ }
+ }
+
+ // signal that we need a moveTo to follow us (unless we're done)
+#if 0
+ if (fLastMoveToIndex >= 0) {
+ fLastMoveToIndex = ~fLastMoveToIndex;
+ }
+#else
+ fLastMoveToIndex ^= ~fLastMoveToIndex >> (8 * sizeof(fLastMoveToIndex) - 1);
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+template <unsigned N>
+class PointIterator {
+public:
+ PointIterator(SkPath::Direction dir, unsigned startIndex)
+ : fCurrent(startIndex % N)
+ , fAdvance(dir == SkPath::kCW_Direction ? 1 : N - 1) { }
+
+ const SkPoint& current() const {
+ SkASSERT(fCurrent < N);
+ return fPts[fCurrent];
+ }
+
+ const SkPoint& next() {
+ fCurrent = (fCurrent + fAdvance) % N;
+ return this->current();
+ }
+
+protected:
+ SkPoint fPts[N];
+
+private:
+ unsigned fCurrent;
+ unsigned fAdvance;
+};
+
+class RectPointIterator : public PointIterator<4> {
+public:
+ RectPointIterator(const SkRect& rect, SkPath::Direction dir, unsigned startIndex)
+ : PointIterator(dir, startIndex) {
+
+ fPts[0] = SkPoint::Make(rect.fLeft, rect.fTop);
+ fPts[1] = SkPoint::Make(rect.fRight, rect.fTop);
+ fPts[2] = SkPoint::Make(rect.fRight, rect.fBottom);
+ fPts[3] = SkPoint::Make(rect.fLeft, rect.fBottom);
+ }
+};
+
+class OvalPointIterator : public PointIterator<4> {
+public:
+ OvalPointIterator(const SkRect& oval, SkPath::Direction dir, unsigned startIndex)
+ : PointIterator(dir, startIndex) {
+
+ const SkScalar cx = oval.centerX();
+ const SkScalar cy = oval.centerY();
+
+ fPts[0] = SkPoint::Make(cx, oval.fTop);
+ fPts[1] = SkPoint::Make(oval.fRight, cy);
+ fPts[2] = SkPoint::Make(cx, oval.fBottom);
+ fPts[3] = SkPoint::Make(oval.fLeft, cy);
+ }
+};
+
+class RRectPointIterator : public PointIterator<8> {
+public:
+ RRectPointIterator(const SkRRect& rrect, SkPath::Direction dir, unsigned startIndex)
+ : PointIterator(dir, startIndex) {
+
+ const SkRect& bounds = rrect.getBounds();
+ const SkScalar L = bounds.fLeft;
+ const SkScalar T = bounds.fTop;
+ const SkScalar R = bounds.fRight;
+ const SkScalar B = bounds.fBottom;
+
+ fPts[0] = SkPoint::Make(L + rrect.radii(SkRRect::kUpperLeft_Corner).fX, T);
+ fPts[1] = SkPoint::Make(R - rrect.radii(SkRRect::kUpperRight_Corner).fX, T);
+ fPts[2] = SkPoint::Make(R, T + rrect.radii(SkRRect::kUpperRight_Corner).fY);
+ fPts[3] = SkPoint::Make(R, B - rrect.radii(SkRRect::kLowerRight_Corner).fY);
+ fPts[4] = SkPoint::Make(R - rrect.radii(SkRRect::kLowerRight_Corner).fX, B);
+ fPts[5] = SkPoint::Make(L + rrect.radii(SkRRect::kLowerLeft_Corner).fX, B);
+ fPts[6] = SkPoint::Make(L, B - rrect.radii(SkRRect::kLowerLeft_Corner).fY);
+ fPts[7] = SkPoint::Make(L, T + rrect.radii(SkRRect::kUpperLeft_Corner).fY);
+ }
+};
+
+} // anonymous namespace
+
+static void assert_known_direction(int dir) {
+ SkASSERT(SkPath::kCW_Direction == dir || SkPath::kCCW_Direction == dir);
+}
+
+void SkPath::addRect(const SkRect& rect, Direction dir) {
+ this->addRect(rect, dir, 0);
+}
+
+void SkPath::addRect(SkScalar left, SkScalar top, SkScalar right,
+ SkScalar bottom, Direction dir) {
+ this->addRect(SkRect::MakeLTRB(left, top, right, bottom), dir, 0);
+}
+
+void SkPath::addRect(const SkRect &rect, Direction dir, unsigned startIndex) {
+ assert_known_direction(dir);
+ fFirstDirection = this->hasOnlyMoveTos() ?
+ (SkPathPriv::FirstDirection)dir : SkPathPriv::kUnknown_FirstDirection;
+ SkAutoDisableDirectionCheck addc(this);
+ SkAutoPathBoundsUpdate apbu(this, rect);
+
+ SkDEBUGCODE(int initialVerbCount = this->countVerbs());
+
+ const int kVerbs = 5; // moveTo + 3x lineTo + close
+ this->incReserve(kVerbs);
+
+ RectPointIterator iter(rect, dir, startIndex);
+
+ this->moveTo(iter.current());
+ this->lineTo(iter.next());
+ this->lineTo(iter.next());
+ this->lineTo(iter.next());
+ this->close();
+
+ SkASSERT(this->countVerbs() == initialVerbCount + kVerbs);
+}
+
+void SkPath::addPoly(const SkPoint pts[], int count, bool close) {
+ SkDEBUGCODE(this->validate();)
+ if (count <= 0) {
+ return;
+ }
+
+ fLastMoveToIndex = fPathRef->countPoints();
+
+ // +close makes room for the extra kClose_Verb
+ SkPathRef::Editor ed(&fPathRef, count+close, count);
+
+ ed.growForVerb(kMove_Verb)->set(pts[0].fX, pts[0].fY);
+ if (count > 1) {
+ SkPoint* p = ed.growForRepeatedVerb(kLine_Verb, count - 1);
+ memcpy(p, &pts[1], (count-1) * sizeof(SkPoint));
+ }
+
+ if (close) {
+ ed.growForVerb(kClose_Verb);
+ fLastMoveToIndex ^= ~fLastMoveToIndex >> (8 * sizeof(fLastMoveToIndex) - 1);
+ }
+
+ DIRTY_AFTER_EDIT;
+ SkDEBUGCODE(this->validate();)
+}
+
+#include "SkGeometry.h"
+
+static bool arc_is_lone_point(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle,
+ SkPoint* pt) {
+ if (0 == sweepAngle && (0 == startAngle || SkIntToScalar(360) == startAngle)) {
+ // Chrome uses this path to move into and out of ovals. If not
+ // treated as a special case the moves can distort the oval's
+ // bounding box (and break the circle special case).
+ pt->set(oval.fRight, oval.centerY());
+ return true;
+ } else if (0 == oval.width() && 0 == oval.height()) {
+ // Chrome will sometimes create 0 radius round rects. Having degenerate
+ // quad segments in the path prevents the path from being recognized as
+ // a rect.
+ // TODO: optimizing the case where only one of width or height is zero
+ // should also be considered. This case, however, doesn't seem to be
+ // as common as the single point case.
+ pt->set(oval.fRight, oval.fTop);
+ return true;
+ }
+ return false;
+}
+
+// Return the unit vectors pointing at the start/stop points for the given start/sweep angles
+//
+static void angles_to_unit_vectors(SkScalar startAngle, SkScalar sweepAngle,
+ SkVector* startV, SkVector* stopV, SkRotationDirection* dir) {
+ startV->fY = SkScalarSinCos(SkDegreesToRadians(startAngle), &startV->fX);
+ stopV->fY = SkScalarSinCos(SkDegreesToRadians(startAngle + sweepAngle), &stopV->fX);
+
+ /* If the sweep angle is nearly (but less than) 360, then due to precision
+ loss in radians-conversion and/or sin/cos, we may end up with coincident
+ vectors, which will fool SkBuildQuadArc into doing nothing (bad) instead
+ of drawing a nearly complete circle (good).
+ e.g. canvas.drawArc(0, 359.99, ...)
+ -vs- canvas.drawArc(0, 359.9, ...)
+ We try to detect this edge case, and tweak the stop vector
+ */
+ if (*startV == *stopV) {
+ SkScalar sw = SkScalarAbs(sweepAngle);
+ if (sw < SkIntToScalar(360) && sw > SkIntToScalar(359)) {
+ SkScalar stopRad = SkDegreesToRadians(startAngle + sweepAngle);
+ // make a guess at a tiny angle (in radians) to tweak by
+ SkScalar deltaRad = SkScalarCopySign(SK_Scalar1/512, sweepAngle);
+ // not sure how much will be enough, so we use a loop
+ do {
+ stopRad -= deltaRad;
+ stopV->fY = SkScalarSinCos(stopRad, &stopV->fX);
+ } while (*startV == *stopV);
+ }
+ }
+ *dir = sweepAngle > 0 ? kCW_SkRotationDirection : kCCW_SkRotationDirection;
+}
+
+/**
+ * If this returns 0, then the caller should just line-to the singlePt, else it should
+ * ignore singlePt and append the specified number of conics.
+ */
+static int build_arc_conics(const SkRect& oval, const SkVector& start, const SkVector& stop,
+ SkRotationDirection dir, SkConic conics[SkConic::kMaxConicsForArc],
+ SkPoint* singlePt) {
+ SkMatrix matrix;
+
+ matrix.setScale(SkScalarHalf(oval.width()), SkScalarHalf(oval.height()));
+ matrix.postTranslate(oval.centerX(), oval.centerY());
+
+ int count = SkConic::BuildUnitArc(start, stop, dir, &matrix, conics);
+ if (0 == count) {
+ matrix.mapXY(start.x(), start.y(), singlePt);
+ }
+ return count;
+}
+
+void SkPath::addRoundRect(const SkRect& rect, const SkScalar radii[],
+ Direction dir) {
+ SkRRect rrect;
+ rrect.setRectRadii(rect, (const SkVector*) radii);
+ this->addRRect(rrect, dir);
+}
+
+void SkPath::addRRect(const SkRRect& rrect, Direction dir) {
+ // legacy start indices: 6 (CW) and 7(CCW)
+ this->addRRect(rrect, dir, dir == kCW_Direction ? 6 : 7);
+}
+
+void SkPath::addRRect(const SkRRect &rrect, Direction dir, unsigned startIndex) {
+ assert_known_direction(dir);
+
+ if (rrect.isEmpty()) {
+ return;
+ }
+
+ bool isRRect = hasOnlyMoveTos();
+ const SkRect& bounds = rrect.getBounds();
+
+ if (rrect.isRect()) {
+ // degenerate(rect) => radii points are collapsing
+ this->addRect(bounds, dir, (startIndex + 1) / 2);
+ } else if (rrect.isOval()) {
+ // degenerate(oval) => line points are collapsing
+ this->addOval(bounds, dir, startIndex / 2);
+ } else {
+ fFirstDirection = this->hasOnlyMoveTos() ?
+ (SkPathPriv::FirstDirection)dir : SkPathPriv::kUnknown_FirstDirection;
+
+ SkAutoPathBoundsUpdate apbu(this, bounds);
+ SkAutoDisableDirectionCheck addc(this);
+
+ // we start with a conic on odd indices when moving CW vs. even indices when moving CCW
+ const bool startsWithConic = ((startIndex & 1) == (dir == kCW_Direction));
+ const SkScalar weight = SK_ScalarRoot2Over2;
+
+ SkDEBUGCODE(int initialVerbCount = this->countVerbs());
+ const int kVerbs = startsWithConic
+ ? 9 // moveTo + 4x conicTo + 3x lineTo + close
+ : 10; // moveTo + 4x lineTo + 4x conicTo + close
+ this->incReserve(kVerbs);
+
+ RRectPointIterator rrectIter(rrect, dir, startIndex);
+ // Corner iterator indices follow the collapsed radii model,
+ // adjusted such that the start pt is "behind" the radii start pt.
+ const unsigned rectStartIndex = startIndex / 2 + (dir == kCW_Direction ? 0 : 1);
+ RectPointIterator rectIter(bounds, dir, rectStartIndex);
+
+ this->moveTo(rrectIter.current());
+ if (startsWithConic) {
+ for (unsigned i = 0; i < 3; ++i) {
+ this->conicTo(rectIter.next(), rrectIter.next(), weight);
+ this->lineTo(rrectIter.next());
+ }
+ this->conicTo(rectIter.next(), rrectIter.next(), weight);
+ // final lineTo handled by close().
+ } else {
+ for (unsigned i = 0; i < 4; ++i) {
+ this->lineTo(rrectIter.next());
+ this->conicTo(rectIter.next(), rrectIter.next(), weight);
+ }
+ }
+ this->close();
+
+ SkPathRef::Editor ed(&fPathRef);
+ ed.setIsRRect(isRRect, dir, startIndex % 8);
+
+ SkASSERT(this->countVerbs() == initialVerbCount + kVerbs);
+ }
+
+ SkDEBUGCODE(fPathRef->validate();)
+}
+
+bool SkPath::hasOnlyMoveTos() const {
+ int count = fPathRef->countVerbs();
+ const uint8_t* verbs = const_cast<const SkPathRef*>(fPathRef.get())->verbsMemBegin();
+ for (int i = 0; i < count; ++i) {
+ if (*verbs == kLine_Verb ||
+ *verbs == kQuad_Verb ||
+ *verbs == kConic_Verb ||
+ *verbs == kCubic_Verb) {
+ return false;
+ }
+ ++verbs;
+ }
+ return true;
+}
+
+bool SkPath::isZeroLength() const {
+ int count = fPathRef->countPoints();
+ if (count < 2) {
+ return true;
+ }
+ const SkPoint* pts = fPathRef.get()->points();
+ const SkPoint& first = *pts;
+ for (int index = 1; index < count; ++index) {
+ if (first != pts[index]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void SkPath::addRoundRect(const SkRect& rect, SkScalar rx, SkScalar ry,
+ Direction dir) {
+ assert_known_direction(dir);
+
+ if (rx < 0 || ry < 0) {
+ SkErrorInternals::SetError( kInvalidArgument_SkError,
+ "I got %f and %f as radii to SkPath::AddRoundRect, "
+ "but negative radii are not allowed.",
+ SkScalarToDouble(rx), SkScalarToDouble(ry) );
+ return;
+ }
+
+ SkRRect rrect;
+ rrect.setRectXY(rect, rx, ry);
+ this->addRRect(rrect, dir);
+}
+
+void SkPath::addOval(const SkRect& oval, Direction dir) {
+ // legacy start index: 1
+ this->addOval(oval, dir, 1);
+}
+
+void SkPath::addOval(const SkRect &oval, Direction dir, unsigned startPointIndex) {
+ assert_known_direction(dir);
+
+ /* If addOval() is called after previous moveTo(),
+ this path is still marked as an oval. This is used to
+ fit into WebKit's calling sequences.
+ We can't simply check isEmpty() in this case, as additional
+ moveTo() would mark the path non empty.
+ */
+ bool isOval = hasOnlyMoveTos();
+ if (isOval) {
+ fFirstDirection = (SkPathPriv::FirstDirection)dir;
+ } else {
+ fFirstDirection = SkPathPriv::kUnknown_FirstDirection;
+ }
+
+ SkAutoDisableDirectionCheck addc(this);
+ SkAutoPathBoundsUpdate apbu(this, oval);
+
+ SkDEBUGCODE(int initialVerbCount = this->countVerbs());
+ const int kVerbs = 6; // moveTo + 4x conicTo + close
+ this->incReserve(kVerbs);
+
+ OvalPointIterator ovalIter(oval, dir, startPointIndex);
+ // The corner iterator pts are tracking "behind" the oval/radii pts.
+ RectPointIterator rectIter(oval, dir, startPointIndex + (dir == kCW_Direction ? 0 : 1));
+ const SkScalar weight = SK_ScalarRoot2Over2;
+
+ this->moveTo(ovalIter.current());
+ for (unsigned i = 0; i < 4; ++i) {
+ this->conicTo(rectIter.next(), ovalIter.next(), weight);
+ }
+ this->close();
+
+ SkASSERT(this->countVerbs() == initialVerbCount + kVerbs);
+
+ SkPathRef::Editor ed(&fPathRef);
+
+ ed.setIsOval(isOval, kCCW_Direction == dir, startPointIndex % 4);
+}
+
+void SkPath::addCircle(SkScalar x, SkScalar y, SkScalar r, Direction dir) {
+ if (r > 0) {
+ this->addOval(SkRect::MakeLTRB(x - r, y - r, x + r, y + r), dir);
+ }
+}
+
+void SkPath::arcTo(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle,
+ bool forceMoveTo) {
+ if (oval.width() < 0 || oval.height() < 0) {
+ return;
+ }
+
+ if (fPathRef->countVerbs() == 0) {
+ forceMoveTo = true;
+ }
+
+ SkPoint lonePt;
+ if (arc_is_lone_point(oval, startAngle, sweepAngle, &lonePt)) {
+ forceMoveTo ? this->moveTo(lonePt) : this->lineTo(lonePt);
+ return;
+ }
+
+ SkVector startV, stopV;
+ SkRotationDirection dir;
+ angles_to_unit_vectors(startAngle, sweepAngle, &startV, &stopV, &dir);
+
+ SkPoint singlePt;
+ SkConic conics[SkConic::kMaxConicsForArc];
+ int count = build_arc_conics(oval, startV, stopV, dir, conics, &singlePt);
+ if (count) {
+ this->incReserve(count * 2 + 1);
+ const SkPoint& pt = conics[0].fPts[0];
+ forceMoveTo ? this->moveTo(pt) : this->lineTo(pt);
+ for (int i = 0; i < count; ++i) {
+ this->conicTo(conics[i].fPts[1], conics[i].fPts[2], conics[i].fW);
+ }
+ } else {
+ forceMoveTo ? this->moveTo(singlePt) : this->lineTo(singlePt);
+ }
+}
+
+// This converts the SVG arc to conics.
+// Partly adapted from Niko's code in kdelibs/kdecore/svgicons.
+// Then transcribed from webkit/chrome's SVGPathNormalizer::decomposeArcToCubic()
+// See also SVG implementation notes:
+// http://www.w3.org/TR/SVG/implnote.html#ArcConversionEndpointToCenter
+// Note that arcSweep bool value is flipped from the original implementation.
+void SkPath::arcTo(SkScalar rx, SkScalar ry, SkScalar angle, SkPath::ArcSize arcLarge,
+ SkPath::Direction arcSweep, SkScalar x, SkScalar y) {
+ this->injectMoveToIfNeeded();
+ SkPoint srcPts[2];
+ this->getLastPt(&srcPts[0]);
+ // If rx = 0 or ry = 0 then this arc is treated as a straight line segment (a "lineto")
+ // joining the endpoints.
+ // http://www.w3.org/TR/SVG/implnote.html#ArcOutOfRangeParameters
+ if (!rx || !ry) {
+ this->lineTo(x, y);
+ return;
+ }
+ // If the current point and target point for the arc are identical, it should be treated as a
+ // zero length path. This ensures continuity in animations.
+ srcPts[1].set(x, y);
+ if (srcPts[0] == srcPts[1]) {
+ this->lineTo(x, y);
+ return;
+ }
+ rx = SkScalarAbs(rx);
+ ry = SkScalarAbs(ry);
+ SkVector midPointDistance = srcPts[0] - srcPts[1];
+ midPointDistance *= 0.5f;
+
+ SkMatrix pointTransform;
+ pointTransform.setRotate(-angle);
+
+ SkPoint transformedMidPoint;
+ pointTransform.mapPoints(&transformedMidPoint, &midPointDistance, 1);
+ SkScalar squareRx = rx * rx;
+ SkScalar squareRy = ry * ry;
+ SkScalar squareX = transformedMidPoint.fX * transformedMidPoint.fX;
+ SkScalar squareY = transformedMidPoint.fY * transformedMidPoint.fY;
+
+ // Check if the radii are big enough to draw the arc, scale radii if not.
+ // http://www.w3.org/TR/SVG/implnote.html#ArcCorrectionOutOfRangeRadii
+ SkScalar radiiScale = squareX / squareRx + squareY / squareRy;
+ if (radiiScale > 1) {
+ radiiScale = SkScalarSqrt(radiiScale);
+ rx *= radiiScale;
+ ry *= radiiScale;
+ }
+
+ pointTransform.setScale(1 / rx, 1 / ry);
+ pointTransform.preRotate(-angle);
+
+ SkPoint unitPts[2];
+ pointTransform.mapPoints(unitPts, srcPts, (int) SK_ARRAY_COUNT(unitPts));
+ SkVector delta = unitPts[1] - unitPts[0];
+
+ SkScalar d = delta.fX * delta.fX + delta.fY * delta.fY;
+ SkScalar scaleFactorSquared = SkTMax(1 / d - 0.25f, 0.f);
+
+ SkScalar scaleFactor = SkScalarSqrt(scaleFactorSquared);
+ if (SkToBool(arcSweep) != SkToBool(arcLarge)) { // flipped from the original implementation
+ scaleFactor = -scaleFactor;
+ }
+ delta.scale(scaleFactor);
+ SkPoint centerPoint = unitPts[0] + unitPts[1];
+ centerPoint *= 0.5f;
+ centerPoint.offset(-delta.fY, delta.fX);
+ unitPts[0] -= centerPoint;
+ unitPts[1] -= centerPoint;
+ SkScalar theta1 = SkScalarATan2(unitPts[0].fY, unitPts[0].fX);
+ SkScalar theta2 = SkScalarATan2(unitPts[1].fY, unitPts[1].fX);
+ SkScalar thetaArc = theta2 - theta1;
+ if (thetaArc < 0 && !arcSweep) { // arcSweep flipped from the original implementation
+ thetaArc += SK_ScalarPI * 2;
+ } else if (thetaArc > 0 && arcSweep) { // arcSweep flipped from the original implementation
+ thetaArc -= SK_ScalarPI * 2;
+ }
+ pointTransform.setRotate(angle);
+ pointTransform.preScale(rx, ry);
+
+ int segments = SkScalarCeilToInt(SkScalarAbs(thetaArc / (SK_ScalarPI / 2)));
+ SkScalar thetaWidth = thetaArc / segments;
+ SkScalar t = SkScalarTan(0.5f * thetaWidth);
+ if (!SkScalarIsFinite(t)) {
+ return;
+ }
+ SkScalar startTheta = theta1;
+ SkScalar w = SkScalarSqrt(SK_ScalarHalf + SkScalarCos(thetaWidth) * SK_ScalarHalf);
+ for (int i = 0; i < segments; ++i) {
+ SkScalar endTheta = startTheta + thetaWidth;
+ SkScalar cosEndTheta, sinEndTheta = SkScalarSinCos(endTheta, &cosEndTheta);
+
+ unitPts[1].set(cosEndTheta, sinEndTheta);
+ unitPts[1] += centerPoint;
+ unitPts[0] = unitPts[1];
+ unitPts[0].offset(t * sinEndTheta, -t * cosEndTheta);
+ SkPoint mapped[2];
+ pointTransform.mapPoints(mapped, unitPts, (int) SK_ARRAY_COUNT(unitPts));
+ this->conicTo(mapped[0], mapped[1], w);
+ startTheta = endTheta;
+ }
+}
+
+void SkPath::rArcTo(SkScalar rx, SkScalar ry, SkScalar xAxisRotate, SkPath::ArcSize largeArc,
+ SkPath::Direction sweep, SkScalar dx, SkScalar dy) {
+ SkPoint currentPoint;
+ this->getLastPt(&currentPoint);
+ this->arcTo(rx, ry, xAxisRotate, largeArc, sweep, currentPoint.fX + dx, currentPoint.fY + dy);
+}
+
+void SkPath::addArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle) {
+ if (oval.isEmpty() || 0 == sweepAngle) {
+ return;
+ }
+
+ const SkScalar kFullCircleAngle = SkIntToScalar(360);
+
+ if (sweepAngle >= kFullCircleAngle || sweepAngle <= -kFullCircleAngle) {
+ // We can treat the arc as an oval if it begins at one of our legal starting positions.
+ // See SkPath::addOval() docs.
+ SkScalar startOver90 = startAngle / 90.f;
+ SkScalar startOver90I = SkScalarRoundToScalar(startOver90);
+ SkScalar error = startOver90 - startOver90I;
+ if (SkScalarNearlyEqual(error, 0)) {
+ // Index 1 is at startAngle == 0.
+ SkScalar startIndex = std::fmod(startOver90I + 1.f, 4.f);
+ startIndex = startIndex < 0 ? startIndex + 4.f : startIndex;
+ this->addOval(oval, sweepAngle > 0 ? kCW_Direction : kCCW_Direction,
+ (unsigned) startIndex);
+ return;
+ }
+ }
+ this->arcTo(oval, startAngle, sweepAngle, true);
+}
+
+/*
+ Need to handle the case when the angle is sharp, and our computed end-points
+ for the arc go behind pt1 and/or p2...
+*/
+void SkPath::arcTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2, SkScalar radius) {
+ if (radius == 0) {
+ this->lineTo(x1, y1);
+ return;
+ }
+
+ SkVector before, after;
+
+ // need to know our prev pt so we can construct tangent vectors
+ {
+ SkPoint start;
+ this->getLastPt(&start);
+ // Handle degenerate cases by adding a line to the first point and
+ // bailing out.
+ before.setNormalize(x1 - start.fX, y1 - start.fY);
+ after.setNormalize(x2 - x1, y2 - y1);
+ }
+
+ SkScalar cosh = SkPoint::DotProduct(before, after);
+ SkScalar sinh = SkPoint::CrossProduct(before, after);
+
+ if (SkScalarNearlyZero(sinh)) { // angle is too tight
+ this->lineTo(x1, y1);
+ return;
+ }
+
+ SkScalar dist = SkScalarAbs(SkScalarMulDiv(radius, SK_Scalar1 - cosh, sinh));
+
+ SkScalar xx = x1 - SkScalarMul(dist, before.fX);
+ SkScalar yy = y1 - SkScalarMul(dist, before.fY);
+ after.setLength(dist);
+ this->lineTo(xx, yy);
+ SkScalar weight = SkScalarSqrt(SK_ScalarHalf + cosh * SK_ScalarHalf);
+ this->conicTo(x1, y1, x1 + after.fX, y1 + after.fY, weight);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkPath::addPath(const SkPath& path, SkScalar dx, SkScalar dy, AddPathMode mode) {
+ SkMatrix matrix;
+
+ matrix.setTranslate(dx, dy);
+ this->addPath(path, matrix, mode);
+}
+
+void SkPath::addPath(const SkPath& path, const SkMatrix& matrix, AddPathMode mode) {
+ SkPathRef::Editor(&fPathRef, path.countVerbs(), path.countPoints());
+
+ RawIter iter(path);
+ SkPoint pts[4];
+ Verb verb;
+
+ SkMatrix::MapPtsProc proc = matrix.getMapPtsProc();
+ bool firstVerb = true;
+ while ((verb = iter.next(pts)) != kDone_Verb) {
+ switch (verb) {
+ case kMove_Verb:
+ proc(matrix, &pts[0], &pts[0], 1);
+ if (firstVerb && mode == kExtend_AddPathMode && !isEmpty()) {
+ injectMoveToIfNeeded(); // In case last contour is closed
+ this->lineTo(pts[0]);
+ } else {
+ this->moveTo(pts[0]);
+ }
+ break;
+ case kLine_Verb:
+ proc(matrix, &pts[1], &pts[1], 1);
+ this->lineTo(pts[1]);
+ break;
+ case kQuad_Verb:
+ proc(matrix, &pts[1], &pts[1], 2);
+ this->quadTo(pts[1], pts[2]);
+ break;
+ case kConic_Verb:
+ proc(matrix, &pts[1], &pts[1], 2);
+ this->conicTo(pts[1], pts[2], iter.conicWeight());
+ break;
+ case kCubic_Verb:
+ proc(matrix, &pts[1], &pts[1], 3);
+ this->cubicTo(pts[1], pts[2], pts[3]);
+ break;
+ case kClose_Verb:
+ this->close();
+ break;
+ default:
+ SkDEBUGFAIL("unknown verb");
+ }
+ firstVerb = false;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int pts_in_verb(unsigned verb) {
+ static const uint8_t gPtsInVerb[] = {
+ 1, // kMove
+ 1, // kLine
+ 2, // kQuad
+ 2, // kConic
+ 3, // kCubic
+ 0, // kClose
+ 0 // kDone
+ };
+
+ SkASSERT(verb < SK_ARRAY_COUNT(gPtsInVerb));
+ return gPtsInVerb[verb];
+}
+
+// ignore the last point of the 1st contour
+void SkPath::reversePathTo(const SkPath& path) {
+ int i, vcount = path.fPathRef->countVerbs();
+ // exit early if the path is empty, or just has a moveTo.
+ if (vcount < 2) {
+ return;
+ }
+
+ SkPathRef::Editor(&fPathRef, vcount, path.countPoints());
+
+ const uint8_t* verbs = path.fPathRef->verbs();
+ const SkPoint* pts = path.fPathRef->points();
+ const SkScalar* conicWeights = path.fPathRef->conicWeights();
+
+ SkASSERT(verbs[~0] == kMove_Verb);
+ for (i = 1; i < vcount; ++i) {
+ unsigned v = verbs[~i];
+ int n = pts_in_verb(v);
+ if (n == 0) {
+ break;
+ }
+ pts += n;
+ conicWeights += (SkPath::kConic_Verb == v);
+ }
+
+ while (--i > 0) {
+ switch (verbs[~i]) {
+ case kLine_Verb:
+ this->lineTo(pts[-1].fX, pts[-1].fY);
+ break;
+ case kQuad_Verb:
+ this->quadTo(pts[-1].fX, pts[-1].fY, pts[-2].fX, pts[-2].fY);
+ break;
+ case kConic_Verb:
+ this->conicTo(pts[-1], pts[-2], *--conicWeights);
+ break;
+ case kCubic_Verb:
+ this->cubicTo(pts[-1].fX, pts[-1].fY, pts[-2].fX, pts[-2].fY,
+ pts[-3].fX, pts[-3].fY);
+ break;
+ default:
+ SkDEBUGFAIL("bad verb");
+ break;
+ }
+ pts -= pts_in_verb(verbs[~i]);
+ }
+}
+
+void SkPath::reverseAddPath(const SkPath& src) {
+ SkPathRef::Editor ed(&fPathRef, src.fPathRef->countPoints(), src.fPathRef->countVerbs());
+
+ const SkPoint* pts = src.fPathRef->pointsEnd();
+ // we will iterator through src's verbs backwards
+ const uint8_t* verbs = src.fPathRef->verbsMemBegin(); // points at the last verb
+ const uint8_t* verbsEnd = src.fPathRef->verbs(); // points just past the first verb
+ const SkScalar* conicWeights = src.fPathRef->conicWeightsEnd();
+
+ bool needMove = true;
+ bool needClose = false;
+ while (verbs < verbsEnd) {
+ uint8_t v = *(verbs++);
+ int n = pts_in_verb(v);
+
+ if (needMove) {
+ --pts;
+ this->moveTo(pts->fX, pts->fY);
+ needMove = false;
+ }
+ pts -= n;
+ switch (v) {
+ case kMove_Verb:
+ if (needClose) {
+ this->close();
+ needClose = false;
+ }
+ needMove = true;
+ pts += 1; // so we see the point in "if (needMove)" above
+ break;
+ case kLine_Verb:
+ this->lineTo(pts[0]);
+ break;
+ case kQuad_Verb:
+ this->quadTo(pts[1], pts[0]);
+ break;
+ case kConic_Verb:
+ this->conicTo(pts[1], pts[0], *--conicWeights);
+ break;
+ case kCubic_Verb:
+ this->cubicTo(pts[2], pts[1], pts[0]);
+ break;
+ case kClose_Verb:
+ needClose = true;
+ break;
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkPath::offset(SkScalar dx, SkScalar dy, SkPath* dst) const {
+ SkMatrix matrix;
+
+ matrix.setTranslate(dx, dy);
+ this->transform(matrix, dst);
+}
+
+static void subdivide_cubic_to(SkPath* path, const SkPoint pts[4],
+ int level = 2) {
+ if (--level >= 0) {
+ SkPoint tmp[7];
+
+ SkChopCubicAtHalf(pts, tmp);
+ subdivide_cubic_to(path, &tmp[0], level);
+ subdivide_cubic_to(path, &tmp[3], level);
+ } else {
+ path->cubicTo(pts[1], pts[2], pts[3]);
+ }
+}
+
+void SkPath::transform(const SkMatrix& matrix, SkPath* dst) const {
+ SkDEBUGCODE(this->validate();)
+ if (dst == nullptr) {
+ dst = (SkPath*)this;
+ }
+
+ if (matrix.hasPerspective()) {
+ SkPath tmp;
+ tmp.fFillType = fFillType;
+
+ SkPath::Iter iter(*this, false);
+ SkPoint pts[4];
+ SkPath::Verb verb;
+
+ while ((verb = iter.next(pts, false)) != kDone_Verb) {
+ switch (verb) {
+ case kMove_Verb:
+ tmp.moveTo(pts[0]);
+ break;
+ case kLine_Verb:
+ tmp.lineTo(pts[1]);
+ break;
+ case kQuad_Verb:
+ // promote the quad to a conic
+ tmp.conicTo(pts[1], pts[2],
+ SkConic::TransformW(pts, SK_Scalar1, matrix));
+ break;
+ case kConic_Verb:
+ tmp.conicTo(pts[1], pts[2],
+ SkConic::TransformW(pts, iter.conicWeight(), matrix));
+ break;
+ case kCubic_Verb:
+ subdivide_cubic_to(&tmp, pts);
+ break;
+ case kClose_Verb:
+ tmp.close();
+ break;
+ default:
+ SkDEBUGFAIL("unknown verb");
+ break;
+ }
+ }
+
+ dst->swap(tmp);
+ SkPathRef::Editor ed(&dst->fPathRef);
+ matrix.mapPoints(ed.points(), ed.pathRef()->countPoints());
+ dst->fFirstDirection = SkPathPriv::kUnknown_FirstDirection;
+ } else {
+ SkPathRef::CreateTransformedCopy(&dst->fPathRef, *fPathRef.get(), matrix);
+
+ if (this != dst) {
+ dst->fFillType = fFillType;
+ dst->fConvexity = fConvexity;
+ dst->fIsVolatile = fIsVolatile;
+ }
+
+ if (SkPathPriv::kUnknown_FirstDirection == fFirstDirection) {
+ dst->fFirstDirection = SkPathPriv::kUnknown_FirstDirection;
+ } else {
+ SkScalar det2x2 =
+ SkScalarMul(matrix.get(SkMatrix::kMScaleX), matrix.get(SkMatrix::kMScaleY)) -
+ SkScalarMul(matrix.get(SkMatrix::kMSkewX), matrix.get(SkMatrix::kMSkewY));
+ if (det2x2 < 0) {
+ dst->fFirstDirection = SkPathPriv::OppositeFirstDirection(
+ (SkPathPriv::FirstDirection)fFirstDirection.load());
+ } else if (det2x2 > 0) {
+ dst->fFirstDirection = fFirstDirection.load();
+ } else {
+ dst->fConvexity = kUnknown_Convexity;
+ dst->fFirstDirection = SkPathPriv::kUnknown_FirstDirection;
+ }
+ }
+
+ SkDEBUGCODE(dst->validate();)
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+enum SegmentState {
+ kEmptyContour_SegmentState, // The current contour is empty. We may be
+ // starting processing or we may have just
+ // closed a contour.
+ kAfterMove_SegmentState, // We have seen a move, but nothing else.
+ kAfterPrimitive_SegmentState // We have seen a primitive but not yet
+ // closed the path. Also the initial state.
+};
+
+SkPath::Iter::Iter() {
+#ifdef SK_DEBUG
+ fPts = nullptr;
+ fConicWeights = nullptr;
+ fMoveTo.fX = fMoveTo.fY = fLastPt.fX = fLastPt.fY = 0;
+ fForceClose = fCloseLine = false;
+ fSegmentState = kEmptyContour_SegmentState;
+#endif
+ // need to init enough to make next() harmlessly return kDone_Verb
+ fVerbs = nullptr;
+ fVerbStop = nullptr;
+ fNeedClose = false;
+}
+
+SkPath::Iter::Iter(const SkPath& path, bool forceClose) {
+ this->setPath(path, forceClose);
+}
+
+void SkPath::Iter::setPath(const SkPath& path, bool forceClose) {
+ fPts = path.fPathRef->points();
+ fVerbs = path.fPathRef->verbs();
+ fVerbStop = path.fPathRef->verbsMemBegin();
+ fConicWeights = path.fPathRef->conicWeights();
+ if (fConicWeights) {
+ fConicWeights -= 1; // begin one behind
+ }
+ fLastPt.fX = fLastPt.fY = 0;
+ fMoveTo.fX = fMoveTo.fY = 0;
+ fForceClose = SkToU8(forceClose);
+ fNeedClose = false;
+ fSegmentState = kEmptyContour_SegmentState;
+}
+
+bool SkPath::Iter::isClosedContour() const {
+ if (fVerbs == nullptr || fVerbs == fVerbStop) {
+ return false;
+ }
+ if (fForceClose) {
+ return true;
+ }
+
+ const uint8_t* verbs = fVerbs;
+ const uint8_t* stop = fVerbStop;
+
+ if (kMove_Verb == *(verbs - 1)) {
+ verbs -= 1; // skip the initial moveto
+ }
+
+ while (verbs > stop) {
+ // verbs points one beyond the current verb, decrement first.
+ unsigned v = *(--verbs);
+ if (kMove_Verb == v) {
+ break;
+ }
+ if (kClose_Verb == v) {
+ return true;
+ }
+ }
+ return false;
+}
+
+SkPath::Verb SkPath::Iter::autoClose(SkPoint pts[2]) {
+ SkASSERT(pts);
+ if (fLastPt != fMoveTo) {
+ // A special case: if both points are NaN, SkPoint::operation== returns
+ // false, but the iterator expects that they are treated as the same.
+ // (consider SkPoint is a 2-dimension float point).
+ if (SkScalarIsNaN(fLastPt.fX) || SkScalarIsNaN(fLastPt.fY) ||
+ SkScalarIsNaN(fMoveTo.fX) || SkScalarIsNaN(fMoveTo.fY)) {
+ return kClose_Verb;
+ }
+
+ pts[0] = fLastPt;
+ pts[1] = fMoveTo;
+ fLastPt = fMoveTo;
+ fCloseLine = true;
+ return kLine_Verb;
+ } else {
+ pts[0] = fMoveTo;
+ return kClose_Verb;
+ }
+}
+
+const SkPoint& SkPath::Iter::cons_moveTo() {
+ if (fSegmentState == kAfterMove_SegmentState) {
+ // Set the first return pt to the move pt
+ fSegmentState = kAfterPrimitive_SegmentState;
+ return fMoveTo;
+ } else {
+ SkASSERT(fSegmentState == kAfterPrimitive_SegmentState);
+ // Set the first return pt to the last pt of the previous primitive.
+ return fPts[-1];
+ }
+}
+
+void SkPath::Iter::consumeDegenerateSegments(bool exact) {
+ // We need to step over anything that will not move the current draw point
+ // forward before the next move is seen
+ const uint8_t* lastMoveVerb = 0;
+ const SkPoint* lastMovePt = 0;
+ const SkScalar* lastMoveWeight = nullptr;
+ SkPoint lastPt = fLastPt;
+ while (fVerbs != fVerbStop) {
+ unsigned verb = *(fVerbs - 1); // fVerbs is one beyond the current verb
+ switch (verb) {
+ case kMove_Verb:
+ // Keep a record of this most recent move
+ lastMoveVerb = fVerbs;
+ lastMovePt = fPts;
+ lastMoveWeight = fConicWeights;
+ lastPt = fPts[0];
+ fVerbs--;
+ fPts++;
+ break;
+
+ case kClose_Verb:
+ // A close when we are in a segment is always valid except when it
+ // follows a move which follows a segment.
+ if (fSegmentState == kAfterPrimitive_SegmentState && !lastMoveVerb) {
+ return;
+ }
+ // A close at any other time must be ignored
+ fVerbs--;
+ break;
+
+ case kLine_Verb:
+ if (!IsLineDegenerate(lastPt, fPts[0], exact)) {
+ if (lastMoveVerb) {
+ fVerbs = lastMoveVerb;
+ fPts = lastMovePt;
+ fConicWeights = lastMoveWeight;
+ return;
+ }
+ return;
+ }
+ // Ignore this line and continue
+ fVerbs--;
+ fPts++;
+ break;
+
+ case kConic_Verb:
+ case kQuad_Verb:
+ if (!IsQuadDegenerate(lastPt, fPts[0], fPts[1], exact)) {
+ if (lastMoveVerb) {
+ fVerbs = lastMoveVerb;
+ fPts = lastMovePt;
+ fConicWeights = lastMoveWeight;
+ return;
+ }
+ return;
+ }
+ // Ignore this line and continue
+ fVerbs--;
+ fPts += 2;
+ fConicWeights += (kConic_Verb == verb);
+ break;
+
+ case kCubic_Verb:
+ if (!IsCubicDegenerate(lastPt, fPts[0], fPts[1], fPts[2], exact)) {
+ if (lastMoveVerb) {
+ fVerbs = lastMoveVerb;
+ fPts = lastMovePt;
+ fConicWeights = lastMoveWeight;
+ return;
+ }
+ return;
+ }
+ // Ignore this line and continue
+ fVerbs--;
+ fPts += 3;
+ break;
+
+ default:
+ SkDEBUGFAIL("Should never see kDone_Verb");
+ }
+ }
+}
+
+SkPath::Verb SkPath::Iter::doNext(SkPoint ptsParam[4]) {
+ SkASSERT(ptsParam);
+
+ if (fVerbs == fVerbStop) {
+ // Close the curve if requested and if there is some curve to close
+ if (fNeedClose && fSegmentState == kAfterPrimitive_SegmentState) {
+ if (kLine_Verb == this->autoClose(ptsParam)) {
+ return kLine_Verb;
+ }
+ fNeedClose = false;
+ return kClose_Verb;
+ }
+ return kDone_Verb;
+ }
+
+ // fVerbs is one beyond the current verb, decrement first
+ unsigned verb = *(--fVerbs);
+ const SkPoint* SK_RESTRICT srcPts = fPts;
+ SkPoint* SK_RESTRICT pts = ptsParam;
+
+ switch (verb) {
+ case kMove_Verb:
+ if (fNeedClose) {
+ fVerbs++; // move back one verb
+ verb = this->autoClose(pts);
+ if (verb == kClose_Verb) {
+ fNeedClose = false;
+ }
+ return (Verb)verb;
+ }
+ if (fVerbs == fVerbStop) { // might be a trailing moveto
+ return kDone_Verb;
+ }
+ fMoveTo = *srcPts;
+ pts[0] = *srcPts;
+ srcPts += 1;
+ fSegmentState = kAfterMove_SegmentState;
+ fLastPt = fMoveTo;
+ fNeedClose = fForceClose;
+ break;
+ case kLine_Verb:
+ pts[0] = this->cons_moveTo();
+ pts[1] = srcPts[0];
+ fLastPt = srcPts[0];
+ fCloseLine = false;
+ srcPts += 1;
+ break;
+ case kConic_Verb:
+ fConicWeights += 1;
+ // fall-through
+ case kQuad_Verb:
+ pts[0] = this->cons_moveTo();
+ memcpy(&pts[1], srcPts, 2 * sizeof(SkPoint));
+ fLastPt = srcPts[1];
+ srcPts += 2;
+ break;
+ case kCubic_Verb:
+ pts[0] = this->cons_moveTo();
+ memcpy(&pts[1], srcPts, 3 * sizeof(SkPoint));
+ fLastPt = srcPts[2];
+ srcPts += 3;
+ break;
+ case kClose_Verb:
+ verb = this->autoClose(pts);
+ if (verb == kLine_Verb) {
+ fVerbs++; // move back one verb
+ } else {
+ fNeedClose = false;
+ fSegmentState = kEmptyContour_SegmentState;
+ }
+ fLastPt = fMoveTo;
+ break;
+ }
+ fPts = srcPts;
+ return (Verb)verb;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/*
+ Format in compressed buffer: [ptCount, verbCount, pts[], verbs[]]
+*/
+
+size_t SkPath::writeToMemory(void* storage) const {
+ SkDEBUGCODE(this->validate();)
+
+ if (nullptr == storage) {
+ const int byteCount = sizeof(int32_t) * 2 + fPathRef->writeSize();
+ return SkAlign4(byteCount);
+ }
+
+ SkWBuffer buffer(storage);
+
+ int32_t packed = (fConvexity << kConvexity_SerializationShift) |
+ (fFillType << kFillType_SerializationShift) |
+ (fFirstDirection << kDirection_SerializationShift) |
+ (fIsVolatile << kIsVolatile_SerializationShift) |
+ kCurrent_Version;
+
+ buffer.write32(packed);
+ buffer.write32(fLastMoveToIndex);
+
+ fPathRef->writeToBuffer(&buffer);
+
+ buffer.padToAlign4();
+ return buffer.pos();
+}
+
+size_t SkPath::readFromMemory(const void* storage, size_t length) {
+ SkRBufferWithSizeCheck buffer(storage, length);
+
+ int32_t packed;
+ if (!buffer.readS32(&packed)) {
+ return 0;
+ }
+
+ unsigned version = packed & 0xFF;
+ if (version >= kPathPrivLastMoveToIndex_Version && !buffer.readS32(&fLastMoveToIndex)) {
+ return 0;
+ }
+
+ fConvexity = (packed >> kConvexity_SerializationShift) & 0xFF;
+ fFillType = (packed >> kFillType_SerializationShift) & 0x3;
+ uint8_t dir = (packed >> kDirection_SerializationShift) & 0x3;
+ fIsVolatile = (packed >> kIsVolatile_SerializationShift) & 0x1;
+ SkPathRef* pathRef = SkPathRef::CreateFromBuffer(&buffer);
+ if (!pathRef) {
+ return 0;
+ }
+
+ fPathRef.reset(pathRef);
+ SkDEBUGCODE(this->validate();)
+ buffer.skipToAlign4();
+
+ // compatibility check
+ if (version < kPathPrivFirstDirection_Version) {
+ switch (dir) { // old values
+ case 0:
+ fFirstDirection = SkPathPriv::kUnknown_FirstDirection;
+ break;
+ case 1:
+ fFirstDirection = SkPathPriv::kCW_FirstDirection;
+ break;
+ case 2:
+ fFirstDirection = SkPathPriv::kCCW_FirstDirection;
+ break;
+ default:
+ SkASSERT(false);
+ }
+ } else {
+ fFirstDirection = dir;
+ }
+
+ return buffer.pos();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkStringUtils.h"
+#include "SkStream.h"
+
+static void append_params(SkString* str, const char label[], const SkPoint pts[],
+ int count, SkScalarAsStringType strType, SkScalar conicWeight = -1) {
+ str->append(label);
+ str->append("(");
+
+ const SkScalar* values = &pts[0].fX;
+ count *= 2;
+
+ for (int i = 0; i < count; ++i) {
+ SkAppendScalar(str, values[i], strType);
+ if (i < count - 1) {
+ str->append(", ");
+ }
+ }
+ if (conicWeight >= 0) {
+ str->append(", ");
+ SkAppendScalar(str, conicWeight, strType);
+ }
+ str->append(");");
+ if (kHex_SkScalarAsStringType == strType) {
+ str->append(" // ");
+ for (int i = 0; i < count; ++i) {
+ SkAppendScalarDec(str, values[i]);
+ if (i < count - 1) {
+ str->append(", ");
+ }
+ }
+ if (conicWeight >= 0) {
+ str->append(", ");
+ SkAppendScalarDec(str, conicWeight);
+ }
+ }
+ str->append("\n");
+}
+
+void SkPath::dump(SkWStream* wStream, bool forceClose, bool dumpAsHex) const {
+ SkScalarAsStringType asType = dumpAsHex ? kHex_SkScalarAsStringType : kDec_SkScalarAsStringType;
+ Iter iter(*this, forceClose);
+ SkPoint pts[4];
+ Verb verb;
+
+ if (!wStream) {
+ SkDebugf("path: forceClose=%s\n", forceClose ? "true" : "false");
+ }
+ SkString builder;
+
+ while ((verb = iter.next(pts, false)) != kDone_Verb) {
+ switch (verb) {
+ case kMove_Verb:
+ append_params(&builder, "path.moveTo", &pts[0], 1, asType);
+ break;
+ case kLine_Verb:
+ append_params(&builder, "path.lineTo", &pts[1], 1, asType);
+ break;
+ case kQuad_Verb:
+ append_params(&builder, "path.quadTo", &pts[1], 2, asType);
+ break;
+ case kConic_Verb:
+ append_params(&builder, "path.conicTo", &pts[1], 2, asType, iter.conicWeight());
+ break;
+ case kCubic_Verb:
+ append_params(&builder, "path.cubicTo", &pts[1], 3, asType);
+ break;
+ case kClose_Verb:
+ builder.append("path.close();\n");
+ break;
+ default:
+ SkDebugf(" path: UNKNOWN VERB %d, aborting dump...\n", verb);
+ verb = kDone_Verb; // stop the loop
+ break;
+ }
+ if (!wStream && builder.size()) {
+ SkDebugf("%s", builder.c_str());
+ builder.reset();
+ }
+ }
+ if (wStream) {
+ wStream->writeText(builder.c_str());
+ }
+}
+
+void SkPath::dump() const {
+ this->dump(nullptr, false, false);
+}
+
+void SkPath::dumpHex() const {
+ this->dump(nullptr, false, true);
+}
+
+#ifdef SK_DEBUG
+void SkPath::validate() const {
+ SkASSERT((fFillType & ~3) == 0);
+
+#ifdef SK_DEBUG_PATH
+ if (!fBoundsIsDirty) {
+ SkRect bounds;
+
+ bool isFinite = compute_pt_bounds(&bounds, *fPathRef.get());
+ SkASSERT(SkToBool(fIsFinite) == isFinite);
+
+ if (fPathRef->countPoints() <= 1) {
+ // if we're empty, fBounds may be empty but translated, so we can't
+ // necessarily compare to bounds directly
+ // try path.addOval(2, 2, 2, 2) which is empty, but the bounds will
+ // be [2, 2, 2, 2]
+ SkASSERT(bounds.isEmpty());
+ SkASSERT(fBounds.isEmpty());
+ } else {
+ if (bounds.isEmpty()) {
+ SkASSERT(fBounds.isEmpty());
+ } else {
+ if (!fBounds.isEmpty()) {
+ SkASSERT(fBounds.contains(bounds));
+ }
+ }
+ }
+ }
+#endif // SK_DEBUG_PATH
+}
+#endif // SK_DEBUG
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int sign(SkScalar x) { return x < 0; }
+#define kValueNeverReturnedBySign 2
+
+enum DirChange {
+ kLeft_DirChange,
+ kRight_DirChange,
+ kStraight_DirChange,
+ kBackwards_DirChange,
+
+ kInvalid_DirChange
+};
+
+
+static bool almost_equal(SkScalar compA, SkScalar compB) {
+ // The error epsilon was empirically derived; worse case round rects
+ // with a mid point outset by 2x float epsilon in tests had an error
+ // of 12.
+ const int epsilon = 16;
+ if (!SkScalarIsFinite(compA) || !SkScalarIsFinite(compB)) {
+ return false;
+ }
+ // no need to check for small numbers because SkPath::Iter has removed degenerate values
+ int aBits = SkFloatAs2sCompliment(compA);
+ int bBits = SkFloatAs2sCompliment(compB);
+ return aBits < bBits + epsilon && bBits < aBits + epsilon;
+}
+
+static bool approximately_zero_when_compared_to(double x, double y) {
+ return x == 0 || fabs(x) < fabs(y * FLT_EPSILON);
+}
+
+
+// only valid for a single contour
+struct Convexicator {
+ Convexicator()
+ : fPtCount(0)
+ , fConvexity(SkPath::kConvex_Convexity)
+ , fFirstDirection(SkPathPriv::kUnknown_FirstDirection)
+ , fIsFinite(true)
+ , fIsCurve(false) {
+ fExpectedDir = kInvalid_DirChange;
+ // warnings
+ fPriorPt.set(0,0);
+ fLastPt.set(0, 0);
+ fCurrPt.set(0, 0);
+ fLastVec.set(0, 0);
+ fFirstVec.set(0, 0);
+
+ fDx = fDy = 0;
+ fSx = fSy = kValueNeverReturnedBySign;
+ }
+
+ SkPath::Convexity getConvexity() const { return fConvexity; }
+
+ /** The direction returned is only valid if the path is determined convex */
+ SkPathPriv::FirstDirection getFirstDirection() const { return fFirstDirection; }
+
+ void addPt(const SkPoint& pt) {
+ if (SkPath::kConcave_Convexity == fConvexity || !fIsFinite) {
+ return;
+ }
+
+ if (0 == fPtCount) {
+ fCurrPt = pt;
+ ++fPtCount;
+ } else {
+ SkVector vec = pt - fCurrPt;
+ SkScalar lengthSqd = vec.lengthSqd();
+ if (!SkScalarIsFinite(lengthSqd)) {
+ fIsFinite = false;
+ } else if (lengthSqd) {
+ fPriorPt = fLastPt;
+ fLastPt = fCurrPt;
+ fCurrPt = pt;
+ if (++fPtCount == 2) {
+ fFirstVec = fLastVec = vec;
+ } else {
+ SkASSERT(fPtCount > 2);
+ this->addVec(vec);
+ }
+
+ int sx = sign(vec.fX);
+ int sy = sign(vec.fY);
+ fDx += (sx != fSx);
+ fDy += (sy != fSy);
+ fSx = sx;
+ fSy = sy;
+
+ if (fDx > 3 || fDy > 3) {
+ fConvexity = SkPath::kConcave_Convexity;
+ }
+ }
+ }
+ }
+
+ void close() {
+ if (fPtCount > 2) {
+ this->addVec(fFirstVec);
+ }
+ }
+
+ DirChange directionChange(const SkVector& curVec) {
+ SkScalar cross = SkPoint::CrossProduct(fLastVec, curVec);
+
+ SkScalar smallest = SkTMin(fCurrPt.fX, SkTMin(fCurrPt.fY, SkTMin(fLastPt.fX, fLastPt.fY)));
+ SkScalar largest = SkTMax(fCurrPt.fX, SkTMax(fCurrPt.fY, SkTMax(fLastPt.fX, fLastPt.fY)));
+ largest = SkTMax(largest, -smallest);
+
+ if (!almost_equal(largest, largest + cross)) {
+ int sign = SkScalarSignAsInt(cross);
+ if (sign) {
+ return (1 == sign) ? kRight_DirChange : kLeft_DirChange;
+ }
+ }
+
+ if (cross) {
+ double dLastVecX = SkScalarToDouble(fLastPt.fX) - SkScalarToDouble(fPriorPt.fX);
+ double dLastVecY = SkScalarToDouble(fLastPt.fY) - SkScalarToDouble(fPriorPt.fY);
+ double dCurrVecX = SkScalarToDouble(fCurrPt.fX) - SkScalarToDouble(fLastPt.fX);
+ double dCurrVecY = SkScalarToDouble(fCurrPt.fY) - SkScalarToDouble(fLastPt.fY);
+ double dCross = dLastVecX * dCurrVecY - dLastVecY * dCurrVecX;
+ if (!approximately_zero_when_compared_to(dCross, SkScalarToDouble(largest))) {
+ int sign = SkScalarSignAsInt(SkDoubleToScalar(dCross));
+ if (sign) {
+ return (1 == sign) ? kRight_DirChange : kLeft_DirChange;
+ }
+ }
+ }
+
+ if (!SkScalarNearlyZero(fLastVec.lengthSqd(), SK_ScalarNearlyZero*SK_ScalarNearlyZero) &&
+ !SkScalarNearlyZero(curVec.lengthSqd(), SK_ScalarNearlyZero*SK_ScalarNearlyZero) &&
+ fLastVec.dot(curVec) < 0.0f) {
+ return kBackwards_DirChange;
+ }
+
+ return kStraight_DirChange;
+ }
+
+
+ bool isFinite() const {
+ return fIsFinite;
+ }
+
+ void setCurve(bool isCurve) {
+ fIsCurve = isCurve;
+ }
+
+private:
+ void addVec(const SkVector& vec) {
+ SkASSERT(vec.fX || vec.fY);
+ DirChange dir = this->directionChange(vec);
+ switch (dir) {
+ case kLeft_DirChange: // fall through
+ case kRight_DirChange:
+ if (kInvalid_DirChange == fExpectedDir) {
+ fExpectedDir = dir;
+ fFirstDirection = (kRight_DirChange == dir) ? SkPathPriv::kCW_FirstDirection
+ : SkPathPriv::kCCW_FirstDirection;
+ } else if (dir != fExpectedDir) {
+ fConvexity = SkPath::kConcave_Convexity;
+ fFirstDirection = SkPathPriv::kUnknown_FirstDirection;
+ }
+ fLastVec = vec;
+ break;
+ case kStraight_DirChange:
+ break;
+ case kBackwards_DirChange:
+ if (fIsCurve) {
+ fConvexity = SkPath::kConcave_Convexity;
+ fFirstDirection = SkPathPriv::kUnknown_FirstDirection;
+ }
+ fLastVec = vec;
+ break;
+ case kInvalid_DirChange:
+ SkFAIL("Use of invalid direction change flag");
+ break;
+ }
+ }
+
+ SkPoint fPriorPt;
+ SkPoint fLastPt;
+ SkPoint fCurrPt;
+ // fLastVec does not necessarily start at fLastPt. We only advance it when the cross product
+ // value with the current vec is deemed to be of a significant value.
+ SkVector fLastVec, fFirstVec;
+ int fPtCount; // non-degenerate points
+ DirChange fExpectedDir;
+ SkPath::Convexity fConvexity;
+ SkPathPriv::FirstDirection fFirstDirection;
+ int fDx, fDy, fSx, fSy;
+ bool fIsFinite;
+ bool fIsCurve;
+};
+
+SkPath::Convexity SkPath::internalGetConvexity() const {
+ SkASSERT(kUnknown_Convexity == fConvexity);
+ SkPoint pts[4];
+ SkPath::Verb verb;
+ SkPath::Iter iter(*this, true);
+
+ int contourCount = 0;
+ int count;
+ Convexicator state;
+
+ if (!isFinite()) {
+ return kUnknown_Convexity;
+ }
+ while ((verb = iter.next(pts, true, true)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case kMove_Verb:
+ if (++contourCount > 1) {
+ fConvexity = kConcave_Convexity;
+ return kConcave_Convexity;
+ }
+ pts[1] = pts[0];
+ // fall through
+ case kLine_Verb:
+ count = 1;
+ state.setCurve(false);
+ break;
+ case kQuad_Verb:
+ // fall through
+ case kConic_Verb:
+ // fall through
+ case kCubic_Verb:
+ count = 2 + (kCubic_Verb == verb);
+ // As an additional enhancement, this could set curve true only
+ // if the curve is nonlinear
+ state.setCurve(true);
+ break;
+ case kClose_Verb:
+ state.setCurve(false);
+ state.close();
+ count = 0;
+ break;
+ default:
+ SkDEBUGFAIL("bad verb");
+ fConvexity = kConcave_Convexity;
+ return kConcave_Convexity;
+ }
+
+ for (int i = 1; i <= count; i++) {
+ state.addPt(pts[i]);
+ }
+ // early exit
+ if (!state.isFinite()) {
+ return kUnknown_Convexity;
+ }
+ if (kConcave_Convexity == state.getConvexity()) {
+ fConvexity = kConcave_Convexity;
+ return kConcave_Convexity;
+ }
+ }
+ fConvexity = state.getConvexity();
+ if (kConvex_Convexity == fConvexity && SkPathPriv::kUnknown_FirstDirection == fFirstDirection) {
+ fFirstDirection = state.getFirstDirection();
+ }
+ return static_cast<Convexity>(fConvexity);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class ContourIter {
+public:
+ ContourIter(const SkPathRef& pathRef);
+
+ bool done() const { return fDone; }
+ // if !done() then these may be called
+ int count() const { return fCurrPtCount; }
+ const SkPoint* pts() const { return fCurrPt; }
+ void next();
+
+private:
+ int fCurrPtCount;
+ const SkPoint* fCurrPt;
+ const uint8_t* fCurrVerb;
+ const uint8_t* fStopVerbs;
+ const SkScalar* fCurrConicWeight;
+ bool fDone;
+ SkDEBUGCODE(int fContourCounter;)
+};
+
+ContourIter::ContourIter(const SkPathRef& pathRef) {
+ fStopVerbs = pathRef.verbsMemBegin();
+ fDone = false;
+ fCurrPt = pathRef.points();
+ fCurrVerb = pathRef.verbs();
+ fCurrConicWeight = pathRef.conicWeights();
+ fCurrPtCount = 0;
+ SkDEBUGCODE(fContourCounter = 0;)
+ this->next();
+}
+
+void ContourIter::next() {
+ if (fCurrVerb <= fStopVerbs) {
+ fDone = true;
+ }
+ if (fDone) {
+ return;
+ }
+
+ // skip pts of prev contour
+ fCurrPt += fCurrPtCount;
+
+ SkASSERT(SkPath::kMove_Verb == fCurrVerb[~0]);
+ int ptCount = 1; // moveTo
+ const uint8_t* verbs = fCurrVerb;
+
+ for (--verbs; verbs > fStopVerbs; --verbs) {
+ switch (verbs[~0]) {
+ case SkPath::kMove_Verb:
+ goto CONTOUR_END;
+ case SkPath::kLine_Verb:
+ ptCount += 1;
+ break;
+ case SkPath::kConic_Verb:
+ fCurrConicWeight += 1;
+ // fall-through
+ case SkPath::kQuad_Verb:
+ ptCount += 2;
+ break;
+ case SkPath::kCubic_Verb:
+ ptCount += 3;
+ break;
+ case SkPath::kClose_Verb:
+ break;
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ break;
+ }
+ }
+CONTOUR_END:
+ fCurrPtCount = ptCount;
+ fCurrVerb = verbs;
+ SkDEBUGCODE(++fContourCounter;)
+}
+
+// returns cross product of (p1 - p0) and (p2 - p0)
+static SkScalar cross_prod(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2) {
+ SkScalar cross = SkPoint::CrossProduct(p1 - p0, p2 - p0);
+ // We may get 0 when the above subtracts underflow. We expect this to be
+ // very rare and lazily promote to double.
+ if (0 == cross) {
+ double p0x = SkScalarToDouble(p0.fX);
+ double p0y = SkScalarToDouble(p0.fY);
+
+ double p1x = SkScalarToDouble(p1.fX);
+ double p1y = SkScalarToDouble(p1.fY);
+
+ double p2x = SkScalarToDouble(p2.fX);
+ double p2y = SkScalarToDouble(p2.fY);
+
+ cross = SkDoubleToScalar((p1x - p0x) * (p2y - p0y) -
+ (p1y - p0y) * (p2x - p0x));
+
+ }
+ return cross;
+}
+
+// Returns the first pt with the maximum Y coordinate
+static int find_max_y(const SkPoint pts[], int count) {
+ SkASSERT(count > 0);
+ SkScalar max = pts[0].fY;
+ int firstIndex = 0;
+ for (int i = 1; i < count; ++i) {
+ SkScalar y = pts[i].fY;
+ if (y > max) {
+ max = y;
+ firstIndex = i;
+ }
+ }
+ return firstIndex;
+}
+
+static int find_diff_pt(const SkPoint pts[], int index, int n, int inc) {
+ int i = index;
+ for (;;) {
+ i = (i + inc) % n;
+ if (i == index) { // we wrapped around, so abort
+ break;
+ }
+ if (pts[index] != pts[i]) { // found a different point, success!
+ break;
+ }
+ }
+ return i;
+}
+
+/**
+ * Starting at index, and moving forward (incrementing), find the xmin and
+ * xmax of the contiguous points that have the same Y.
+ */
+static int find_min_max_x_at_y(const SkPoint pts[], int index, int n,
+ int* maxIndexPtr) {
+ const SkScalar y = pts[index].fY;
+ SkScalar min = pts[index].fX;
+ SkScalar max = min;
+ int minIndex = index;
+ int maxIndex = index;
+ for (int i = index + 1; i < n; ++i) {
+ if (pts[i].fY != y) {
+ break;
+ }
+ SkScalar x = pts[i].fX;
+ if (x < min) {
+ min = x;
+ minIndex = i;
+ } else if (x > max) {
+ max = x;
+ maxIndex = i;
+ }
+ }
+ *maxIndexPtr = maxIndex;
+ return minIndex;
+}
+
+static void crossToDir(SkScalar cross, SkPathPriv::FirstDirection* dir) {
+ *dir = cross > 0 ? SkPathPriv::kCW_FirstDirection : SkPathPriv::kCCW_FirstDirection;
+}
+
+/*
+ * We loop through all contours, and keep the computed cross-product of the
+ * contour that contained the global y-max. If we just look at the first
+ * contour, we may find one that is wound the opposite way (correctly) since
+ * it is the interior of a hole (e.g. 'o'). Thus we must find the contour
+ * that is outer most (or at least has the global y-max) before we can consider
+ * its cross product.
+ */
+bool SkPathPriv::CheapComputeFirstDirection(const SkPath& path, FirstDirection* dir) {
+ if (kUnknown_FirstDirection != path.fFirstDirection.load()) {
+ *dir = static_cast<FirstDirection>(path.fFirstDirection.load());
+ return true;
+ }
+
+ // don't want to pay the cost for computing this if it
+ // is unknown, so we don't call isConvex()
+ if (SkPath::kConvex_Convexity == path.getConvexityOrUnknown()) {
+ SkASSERT(kUnknown_FirstDirection == path.fFirstDirection);
+ *dir = static_cast<FirstDirection>(path.fFirstDirection.load());
+ return false;
+ }
+
+ ContourIter iter(*path.fPathRef.get());
+
+ // initialize with our logical y-min
+ SkScalar ymax = path.getBounds().fTop;
+ SkScalar ymaxCross = 0;
+
+ for (; !iter.done(); iter.next()) {
+ int n = iter.count();
+ if (n < 3) {
+ continue;
+ }
+
+ const SkPoint* pts = iter.pts();
+ SkScalar cross = 0;
+ int index = find_max_y(pts, n);
+ if (pts[index].fY < ymax) {
+ continue;
+ }
+
+ // If there is more than 1 distinct point at the y-max, we take the
+ // x-min and x-max of them and just subtract to compute the dir.
+ if (pts[(index + 1) % n].fY == pts[index].fY) {
+ int maxIndex;
+ int minIndex = find_min_max_x_at_y(pts, index, n, &maxIndex);
+ if (minIndex == maxIndex) {
+ goto TRY_CROSSPROD;
+ }
+ SkASSERT(pts[minIndex].fY == pts[index].fY);
+ SkASSERT(pts[maxIndex].fY == pts[index].fY);
+ SkASSERT(pts[minIndex].fX <= pts[maxIndex].fX);
+ // we just subtract the indices, and let that auto-convert to
+ // SkScalar, since we just want - or + to signal the direction.
+ cross = minIndex - maxIndex;
+ } else {
+ TRY_CROSSPROD:
+ // Find a next and prev index to use for the cross-product test,
+ // but we try to find pts that form non-zero vectors from pts[index]
+ //
+ // Its possible that we can't find two non-degenerate vectors, so
+ // we have to guard our search (e.g. all the pts could be in the
+ // same place).
+
+ // we pass n - 1 instead of -1 so we don't foul up % operator by
+ // passing it a negative LH argument.
+ int prev = find_diff_pt(pts, index, n, n - 1);
+ if (prev == index) {
+ // completely degenerate, skip to next contour
+ continue;
+ }
+ int next = find_diff_pt(pts, index, n, 1);
+ SkASSERT(next != index);
+ cross = cross_prod(pts[prev], pts[index], pts[next]);
+ // if we get a zero and the points are horizontal, then we look at the spread in
+ // x-direction. We really should continue to walk away from the degeneracy until
+ // there is a divergence.
+ if (0 == cross && pts[prev].fY == pts[index].fY && pts[next].fY == pts[index].fY) {
+ // construct the subtract so we get the correct Direction below
+ cross = pts[index].fX - pts[next].fX;
+ }
+ }
+
+ if (cross) {
+ // record our best guess so far
+ ymax = pts[index].fY;
+ ymaxCross = cross;
+ }
+ }
+ if (ymaxCross) {
+ crossToDir(ymaxCross, dir);
+ path.fFirstDirection = *dir;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool between(SkScalar a, SkScalar b, SkScalar c) {
+ SkASSERT(((a <= b && b <= c) || (a >= b && b >= c)) == ((a - b) * (c - b) <= 0)
+ || (SkScalarNearlyZero(a) && SkScalarNearlyZero(b) && SkScalarNearlyZero(c)));
+ return (a - b) * (c - b) <= 0;
+}
+
+static SkScalar eval_cubic_coeff(SkScalar A, SkScalar B, SkScalar C,
+ SkScalar D, SkScalar t) {
+ return SkScalarMulAdd(SkScalarMulAdd(SkScalarMulAdd(A, t, B), t, C), t, D);
+}
+
+static SkScalar eval_cubic_pts(SkScalar c0, SkScalar c1, SkScalar c2, SkScalar c3,
+ SkScalar t) {
+ SkScalar A = c3 + 3*(c1 - c2) - c0;
+ SkScalar B = 3*(c2 - c1 - c1 + c0);
+ SkScalar C = 3*(c1 - c0);
+ SkScalar D = c0;
+ return eval_cubic_coeff(A, B, C, D, t);
+}
+
+template <size_t N> static void find_minmax(const SkPoint pts[],
+ SkScalar* minPtr, SkScalar* maxPtr) {
+ SkScalar min, max;
+ min = max = pts[0].fX;
+ for (size_t i = 1; i < N; ++i) {
+ min = SkMinScalar(min, pts[i].fX);
+ max = SkMaxScalar(max, pts[i].fX);
+ }
+ *minPtr = min;
+ *maxPtr = max;
+}
+
+static bool checkOnCurve(SkScalar x, SkScalar y, const SkPoint& start, const SkPoint& end) {
+ if (start.fY == end.fY) {
+ return between(start.fX, x, end.fX) && x != end.fX;
+ } else {
+ return x == start.fX && y == start.fY;
+ }
+}
+
+static int winding_mono_cubic(const SkPoint pts[], SkScalar x, SkScalar y, int* onCurveCount) {
+ SkScalar y0 = pts[0].fY;
+ SkScalar y3 = pts[3].fY;
+
+ int dir = 1;
+ if (y0 > y3) {
+ SkTSwap(y0, y3);
+ dir = -1;
+ }
+ if (y < y0 || y > y3) {
+ return 0;
+ }
+ if (checkOnCurve(x, y, pts[0], pts[3])) {
+ *onCurveCount += 1;
+ return 0;
+ }
+ if (y == y3) {
+ return 0;
+ }
+
+ // quickreject or quickaccept
+ SkScalar min, max;
+ find_minmax<4>(pts, &min, &max);
+ if (x < min) {
+ return 0;
+ }
+ if (x > max) {
+ return dir;
+ }
+
+ // compute the actual x(t) value
+ SkScalar t;
+ if (!SkCubicClipper::ChopMonoAtY(pts, y, &t)) {
+ return 0;
+ }
+ SkScalar xt = eval_cubic_pts(pts[0].fX, pts[1].fX, pts[2].fX, pts[3].fX, t);
+ if (SkScalarNearlyEqual(xt, x)) {
+ if (x != pts[3].fX || y != pts[3].fY) { // don't test end points; they're start points
+ *onCurveCount += 1;
+ return 0;
+ }
+ }
+ return xt < x ? dir : 0;
+}
+
+static int winding_cubic(const SkPoint pts[], SkScalar x, SkScalar y, int* onCurveCount) {
+ SkPoint dst[10];
+ int n = SkChopCubicAtYExtrema(pts, dst);
+ int w = 0;
+ for (int i = 0; i <= n; ++i) {
+ w += winding_mono_cubic(&dst[i * 3], x, y, onCurveCount);
+ }
+ return w;
+}
+
+static double conic_eval_numerator(const SkScalar src[], SkScalar w, SkScalar t) {
+ SkASSERT(src);
+ SkASSERT(t >= 0 && t <= 1);
+ SkScalar src2w = src[2] * w;
+ SkScalar C = src[0];
+ SkScalar A = src[4] - 2 * src2w + C;
+ SkScalar B = 2 * (src2w - C);
+ return (A * t + B) * t + C;
+}
+
+
+static double conic_eval_denominator(SkScalar w, SkScalar t) {
+ SkScalar B = 2 * (w - 1);
+ SkScalar C = 1;
+ SkScalar A = -B;
+ return (A * t + B) * t + C;
+}
+
+static int winding_mono_conic(const SkConic& conic, SkScalar x, SkScalar y, int* onCurveCount) {
+ const SkPoint* pts = conic.fPts;
+ SkScalar y0 = pts[0].fY;
+ SkScalar y2 = pts[2].fY;
+
+ int dir = 1;
+ if (y0 > y2) {
+ SkTSwap(y0, y2);
+ dir = -1;
+ }
+ if (y < y0 || y > y2) {
+ return 0;
+ }
+ if (checkOnCurve(x, y, pts[0], pts[2])) {
+ *onCurveCount += 1;
+ return 0;
+ }
+ if (y == y2) {
+ return 0;
+ }
+
+ SkScalar roots[2];
+ SkScalar A = pts[2].fY;
+ SkScalar B = pts[1].fY * conic.fW - y * conic.fW + y;
+ SkScalar C = pts[0].fY;
+ A += C - 2 * B; // A = a + c - 2*(b*w - yCept*w + yCept)
+ B -= C; // B = b*w - w * yCept + yCept - a
+ C -= y;
+ int n = SkFindUnitQuadRoots(A, 2 * B, C, roots);
+ SkASSERT(n <= 1);
+ SkScalar xt;
+ if (0 == n) {
+ // zero roots are returned only when y0 == y
+ // Need [0] if dir == 1
+ // and [2] if dir == -1
+ xt = pts[1 - dir].fX;
+ } else {
+ SkScalar t = roots[0];
+ xt = conic_eval_numerator(&pts[0].fX, conic.fW, t) / conic_eval_denominator(conic.fW, t);
+ }
+ if (SkScalarNearlyEqual(xt, x)) {
+ if (x != pts[2].fX || y != pts[2].fY) { // don't test end points; they're start points
+ *onCurveCount += 1;
+ return 0;
+ }
+ }
+ return xt < x ? dir : 0;
+}
+
+static bool is_mono_quad(SkScalar y0, SkScalar y1, SkScalar y2) {
+ // return SkScalarSignAsInt(y0 - y1) + SkScalarSignAsInt(y1 - y2) != 0;
+ if (y0 == y1) {
+ return true;
+ }
+ if (y0 < y1) {
+ return y1 <= y2;
+ } else {
+ return y1 >= y2;
+ }
+}
+
+static int winding_conic(const SkPoint pts[], SkScalar x, SkScalar y, SkScalar weight,
+ int* onCurveCount) {
+ SkConic conic(pts, weight);
+ SkConic chopped[2];
+ // If the data points are very large, the conic may not be monotonic but may also
+ // fail to chop. Then, the chopper does not split the original conic in two.
+ bool isMono = is_mono_quad(pts[0].fY, pts[1].fY, pts[2].fY) || !conic.chopAtYExtrema(chopped);
+ int w = winding_mono_conic(isMono ? conic : chopped[0], x, y, onCurveCount);
+ if (!isMono) {
+ w += winding_mono_conic(chopped[1], x, y, onCurveCount);
+ }
+ return w;
+}
+
+static int winding_mono_quad(const SkPoint pts[], SkScalar x, SkScalar y, int* onCurveCount) {
+ SkScalar y0 = pts[0].fY;
+ SkScalar y2 = pts[2].fY;
+
+ int dir = 1;
+ if (y0 > y2) {
+ SkTSwap(y0, y2);
+ dir = -1;
+ }
+ if (y < y0 || y > y2) {
+ return 0;
+ }
+ if (checkOnCurve(x, y, pts[0], pts[2])) {
+ *onCurveCount += 1;
+ return 0;
+ }
+ if (y == y2) {
+ return 0;
+ }
+ // bounds check on X (not required. is it faster?)
+#if 0
+ if (pts[0].fX > x && pts[1].fX > x && pts[2].fX > x) {
+ return 0;
+ }
+#endif
+
+ SkScalar roots[2];
+ int n = SkFindUnitQuadRoots(pts[0].fY - 2 * pts[1].fY + pts[2].fY,
+ 2 * (pts[1].fY - pts[0].fY),
+ pts[0].fY - y,
+ roots);
+ SkASSERT(n <= 1);
+ SkScalar xt;
+ if (0 == n) {
+ // zero roots are returned only when y0 == y
+ // Need [0] if dir == 1
+ // and [2] if dir == -1
+ xt = pts[1 - dir].fX;
+ } else {
+ SkScalar t = roots[0];
+ SkScalar C = pts[0].fX;
+ SkScalar A = pts[2].fX - 2 * pts[1].fX + C;
+ SkScalar B = 2 * (pts[1].fX - C);
+ xt = SkScalarMulAdd(SkScalarMulAdd(A, t, B), t, C);
+ }
+ if (SkScalarNearlyEqual(xt, x)) {
+ if (x != pts[2].fX || y != pts[2].fY) { // don't test end points; they're start points
+ *onCurveCount += 1;
+ return 0;
+ }
+ }
+ return xt < x ? dir : 0;
+}
+
+static int winding_quad(const SkPoint pts[], SkScalar x, SkScalar y, int* onCurveCount) {
+ SkPoint dst[5];
+ int n = 0;
+
+ if (!is_mono_quad(pts[0].fY, pts[1].fY, pts[2].fY)) {
+ n = SkChopQuadAtYExtrema(pts, dst);
+ pts = dst;
+ }
+ int w = winding_mono_quad(pts, x, y, onCurveCount);
+ if (n > 0) {
+ w += winding_mono_quad(&pts[2], x, y, onCurveCount);
+ }
+ return w;
+}
+
+static int winding_line(const SkPoint pts[], SkScalar x, SkScalar y, int* onCurveCount) {
+ SkScalar x0 = pts[0].fX;
+ SkScalar y0 = pts[0].fY;
+ SkScalar x1 = pts[1].fX;
+ SkScalar y1 = pts[1].fY;
+
+ SkScalar dy = y1 - y0;
+
+ int dir = 1;
+ if (y0 > y1) {
+ SkTSwap(y0, y1);
+ dir = -1;
+ }
+ if (y < y0 || y > y1) {
+ return 0;
+ }
+ if (checkOnCurve(x, y, pts[0], pts[1])) {
+ *onCurveCount += 1;
+ return 0;
+ }
+ if (y == y1) {
+ return 0;
+ }
+ SkScalar cross = SkScalarMul(x1 - x0, y - pts[0].fY) - SkScalarMul(dy, x - x0);
+
+ if (!cross) {
+ // zero cross means the point is on the line, and since the case where
+ // y of the query point is at the end point is handled above, we can be
+ // sure that we're on the line (excluding the end point) here
+ if (x != x1 || y != pts[1].fY) {
+ *onCurveCount += 1;
+ }
+ dir = 0;
+ } else if (SkScalarSignAsInt(cross) == dir) {
+ dir = 0;
+ }
+ return dir;
+}
+
+static void tangent_cubic(const SkPoint pts[], SkScalar x, SkScalar y,
+ SkTDArray<SkVector>* tangents) {
+ if (!between(pts[0].fY, y, pts[1].fY) && !between(pts[1].fY, y, pts[2].fY)
+ && !between(pts[2].fY, y, pts[3].fY)) {
+ return;
+ }
+ if (!between(pts[0].fX, x, pts[1].fX) && !between(pts[1].fX, x, pts[2].fX)
+ && !between(pts[2].fX, x, pts[3].fX)) {
+ return;
+ }
+ SkPoint dst[10];
+ int n = SkChopCubicAtYExtrema(pts, dst);
+ for (int i = 0; i <= n; ++i) {
+ SkPoint* c = &dst[i * 3];
+ SkScalar t;
+ if (!SkCubicClipper::ChopMonoAtY(c, y, &t)) {
+ continue;
+ }
+ SkScalar xt = eval_cubic_pts(c[0].fX, c[1].fX, c[2].fX, c[3].fX, t);
+ if (!SkScalarNearlyEqual(x, xt)) {
+ continue;
+ }
+ SkVector tangent;
+ SkEvalCubicAt(c, t, nullptr, &tangent, nullptr);
+ tangents->push(tangent);
+ }
+}
+
+static void tangent_conic(const SkPoint pts[], SkScalar x, SkScalar y, SkScalar w,
+ SkTDArray<SkVector>* tangents) {
+ if (!between(pts[0].fY, y, pts[1].fY) && !between(pts[1].fY, y, pts[2].fY)) {
+ return;
+ }
+ if (!between(pts[0].fX, x, pts[1].fX) && !between(pts[1].fX, x, pts[2].fX)) {
+ return;
+ }
+ SkScalar roots[2];
+ SkScalar A = pts[2].fY;
+ SkScalar B = pts[1].fY * w - y * w + y;
+ SkScalar C = pts[0].fY;
+ A += C - 2 * B; // A = a + c - 2*(b*w - yCept*w + yCept)
+ B -= C; // B = b*w - w * yCept + yCept - a
+ C -= y;
+ int n = SkFindUnitQuadRoots(A, 2 * B, C, roots);
+ for (int index = 0; index < n; ++index) {
+ SkScalar t = roots[index];
+ SkScalar xt = conic_eval_numerator(&pts[0].fX, w, t) / conic_eval_denominator(w, t);
+ if (!SkScalarNearlyEqual(x, xt)) {
+ continue;
+ }
+ SkConic conic(pts, w);
+ tangents->push(conic.evalTangentAt(t));
+ }
+}
+
+static void tangent_quad(const SkPoint pts[], SkScalar x, SkScalar y,
+ SkTDArray<SkVector>* tangents) {
+ if (!between(pts[0].fY, y, pts[1].fY) && !between(pts[1].fY, y, pts[2].fY)) {
+ return;
+ }
+ if (!between(pts[0].fX, x, pts[1].fX) && !between(pts[1].fX, x, pts[2].fX)) {
+ return;
+ }
+ SkScalar roots[2];
+ int n = SkFindUnitQuadRoots(pts[0].fY - 2 * pts[1].fY + pts[2].fY,
+ 2 * (pts[1].fY - pts[0].fY),
+ pts[0].fY - y,
+ roots);
+ for (int index = 0; index < n; ++index) {
+ SkScalar t = roots[index];
+ SkScalar C = pts[0].fX;
+ SkScalar A = pts[2].fX - 2 * pts[1].fX + C;
+ SkScalar B = 2 * (pts[1].fX - C);
+ SkScalar xt = (A * t + B) * t + C;
+ if (!SkScalarNearlyEqual(x, xt)) {
+ continue;
+ }
+ tangents->push(SkEvalQuadTangentAt(pts, t));
+ }
+}
+
+static void tangent_line(const SkPoint pts[], SkScalar x, SkScalar y,
+ SkTDArray<SkVector>* tangents) {
+ SkScalar y0 = pts[0].fY;
+ SkScalar y1 = pts[1].fY;
+ if (!between(y0, y, y1)) {
+ return;
+ }
+ SkScalar x0 = pts[0].fX;
+ SkScalar x1 = pts[1].fX;
+ if (!between(x0, x, x1)) {
+ return;
+ }
+ SkScalar dx = x1 - x0;
+ SkScalar dy = y1 - y0;
+ if (!SkScalarNearlyEqual((x - x0) * dy, dx * (y - y0))) {
+ return;
+ }
+ SkVector v;
+ v.set(dx, dy);
+ tangents->push(v);
+}
+
+static bool contains_inclusive(const SkRect& r, SkScalar x, SkScalar y) {
+ return r.fLeft <= x && x <= r.fRight && r.fTop <= y && y <= r.fBottom;
+}
+
+bool SkPath::contains(SkScalar x, SkScalar y) const {
+ bool isInverse = this->isInverseFillType();
+ if (this->isEmpty()) {
+ return isInverse;
+ }
+
+ if (!contains_inclusive(this->getBounds(), x, y)) {
+ return isInverse;
+ }
+
+ SkPath::Iter iter(*this, true);
+ bool done = false;
+ int w = 0;
+ int onCurveCount = 0;
+ do {
+ SkPoint pts[4];
+ switch (iter.next(pts, false)) {
+ case SkPath::kMove_Verb:
+ case SkPath::kClose_Verb:
+ break;
+ case SkPath::kLine_Verb:
+ w += winding_line(pts, x, y, &onCurveCount);
+ break;
+ case SkPath::kQuad_Verb:
+ w += winding_quad(pts, x, y, &onCurveCount);
+ break;
+ case SkPath::kConic_Verb:
+ w += winding_conic(pts, x, y, iter.conicWeight(), &onCurveCount);
+ break;
+ case SkPath::kCubic_Verb:
+ w += winding_cubic(pts, x, y, &onCurveCount);
+ break;
+ case SkPath::kDone_Verb:
+ done = true;
+ break;
+ }
+ } while (!done);
+ bool evenOddFill = SkPath::kEvenOdd_FillType == this->getFillType()
+ || SkPath::kInverseEvenOdd_FillType == this->getFillType();
+ if (evenOddFill) {
+ w &= 1;
+ }
+ if (w) {
+ return !isInverse;
+ }
+ if (onCurveCount <= 1) {
+ return SkToBool(onCurveCount) ^ isInverse;
+ }
+ if ((onCurveCount & 1) || evenOddFill) {
+ return SkToBool(onCurveCount & 1) ^ isInverse;
+ }
+ // If the point touches an even number of curves, and the fill is winding, check for
+ // coincidence. Count coincidence as places where the on curve points have identical tangents.
+ iter.setPath(*this, true);
+ done = false;
+ SkTDArray<SkVector> tangents;
+ do {
+ SkPoint pts[4];
+ int oldCount = tangents.count();
+ switch (iter.next(pts, false)) {
+ case SkPath::kMove_Verb:
+ case SkPath::kClose_Verb:
+ break;
+ case SkPath::kLine_Verb:
+ tangent_line(pts, x, y, &tangents);
+ break;
+ case SkPath::kQuad_Verb:
+ tangent_quad(pts, x, y, &tangents);
+ break;
+ case SkPath::kConic_Verb:
+ tangent_conic(pts, x, y, iter.conicWeight(), &tangents);
+ break;
+ case SkPath::kCubic_Verb:
+ tangent_cubic(pts, x, y, &tangents);
+ break;
+ case SkPath::kDone_Verb:
+ done = true;
+ break;
+ }
+ if (tangents.count() > oldCount) {
+ int last = tangents.count() - 1;
+ const SkVector& tangent = tangents[last];
+ if (SkScalarNearlyZero(tangent.lengthSqd())) {
+ tangents.remove(last);
+ } else {
+ for (int index = 0; index < last; ++index) {
+ const SkVector& test = tangents[index];
+ if (SkScalarNearlyZero(test.cross(tangent))
+ && SkScalarSignAsInt(tangent.fX * test.fX) <= 0
+ && SkScalarSignAsInt(tangent.fY * test.fY) <= 0) {
+ tangents.remove(last);
+ tangents.removeShuffle(index);
+ break;
+ }
+ }
+ }
+ }
+ } while (!done);
+ return SkToBool(tangents.count()) ^ isInverse;
+}
+
+int SkPath::ConvertConicToQuads(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2,
+ SkScalar w, SkPoint pts[], int pow2) {
+ const SkConic conic(p0, p1, p2, w);
+ return conic.chopIntoQuadsPOW2(pts, pow2);
+}
+
+bool SkPathPriv::IsSimpleClosedRect(const SkPath& path, SkRect* rect, SkPath::Direction* direction,
+ unsigned* start) {
+ if (path.getSegmentMasks() != SkPath::kLine_SegmentMask) {
+ return false;
+ }
+ SkPath::RawIter iter(path);
+ SkPoint verbPts[4];
+ SkPath::Verb v;
+ SkPoint rectPts[5];
+ int rectPtCnt = 0;
+ while ((v = iter.next(verbPts)) != SkPath::kDone_Verb) {
+ switch (v) {
+ case SkPath::kMove_Verb:
+ if (0 != rectPtCnt) {
+ return false;
+ }
+ rectPts[0] = verbPts[0];
+ ++rectPtCnt;
+ break;
+ case SkPath::kLine_Verb:
+ if (5 == rectPtCnt) {
+ return false;
+ }
+ rectPts[rectPtCnt] = verbPts[1];
+ ++rectPtCnt;
+ break;
+ case SkPath::kClose_Verb:
+ if (4 == rectPtCnt) {
+ rectPts[4] = rectPts[0];
+ rectPtCnt = 5;
+ }
+ break;
+ default:
+ return false;
+ }
+ }
+ if (rectPtCnt < 5) {
+ return false;
+ }
+ if (rectPts[0] != rectPts[4]) {
+ return false;
+ }
+ // Check for two cases of rectangles: pts 0 and 3 form a vertical edge or a horizontal edge (
+ // and pts 1 and 2 the opposite vertical or horizontal edge).
+ bool vec03IsVertical;
+ if (rectPts[0].fX == rectPts[3].fX && rectPts[1].fX == rectPts[2].fX &&
+ rectPts[0].fY == rectPts[1].fY && rectPts[3].fY == rectPts[2].fY) {
+ // Make sure it has non-zero width and height
+ if (rectPts[0].fX == rectPts[1].fX || rectPts[0].fY == rectPts[3].fY) {
+ return false;
+ }
+ vec03IsVertical = true;
+ } else if (rectPts[0].fY == rectPts[3].fY && rectPts[1].fY == rectPts[2].fY &&
+ rectPts[0].fX == rectPts[1].fX && rectPts[3].fX == rectPts[2].fX) {
+ // Make sure it has non-zero width and height
+ if (rectPts[0].fY == rectPts[1].fY || rectPts[0].fX == rectPts[3].fX) {
+ return false;
+ }
+ vec03IsVertical = false;
+ } else {
+ return false;
+ }
+ // Set sortFlags so that it has the low bit set if pt index 0 is on right edge and second bit
+ // set if it is on the bottom edge.
+ unsigned sortFlags =
+ ((rectPts[0].fX < rectPts[2].fX) ? 0b00 : 0b01) |
+ ((rectPts[0].fY < rectPts[2].fY) ? 0b00 : 0b10);
+ switch (sortFlags) {
+ case 0b00:
+ rect->set(rectPts[0].fX, rectPts[0].fY, rectPts[2].fX, rectPts[2].fY);
+ *direction = vec03IsVertical ? SkPath::kCW_Direction : SkPath::kCCW_Direction;
+ *start = 0;
+ break;
+ case 0b01:
+ rect->set(rectPts[2].fX, rectPts[0].fY, rectPts[0].fX, rectPts[2].fY);
+ *direction = vec03IsVertical ? SkPath::kCCW_Direction : SkPath::kCW_Direction;
+ *start = 1;
+ break;
+ case 0b10:
+ rect->set(rectPts[0].fX, rectPts[2].fY, rectPts[2].fX, rectPts[0].fY);
+ *direction = vec03IsVertical ? SkPath::kCCW_Direction : SkPath::kCW_Direction;
+ *start = 3;
+ break;
+ case 0b11:
+ rect->set(rectPts[2].fX, rectPts[2].fY, rectPts[0].fX, rectPts[0].fY);
+ *direction = vec03IsVertical ? SkPath::kCW_Direction : SkPath::kCCW_Direction;
+ *start = 2;
+ break;
+ }
+ return true;
+}
+
+void SkPathPriv::CreateDrawArcPath(SkPath* path, const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter, bool isFillNoPathEffect) {
+ SkASSERT(!oval.isEmpty());
+ SkASSERT(sweepAngle);
+
+ path->reset();
+ path->setIsVolatile(true);
+ path->setFillType(SkPath::kWinding_FillType);
+ if (isFillNoPathEffect && SkScalarAbs(sweepAngle) >= 360.f) {
+ path->addOval(oval);
+ return;
+ }
+ if (useCenter) {
+ path->moveTo(oval.centerX(), oval.centerY());
+ }
+ // Arc to mods at 360 and drawArc is not supposed to.
+ bool forceMoveTo = !useCenter;
+ while (sweepAngle <= -360.f) {
+ path->arcTo(oval, startAngle, -180.f, forceMoveTo);
+ startAngle -= 180.f;
+ path->arcTo(oval, startAngle, -180.f, false);
+ startAngle -= 180.f;
+ forceMoveTo = false;
+ sweepAngle += 360.f;
+ }
+ while (sweepAngle >= 360.f) {
+ path->arcTo(oval, startAngle, 180.f, forceMoveTo);
+ startAngle += 180.f;
+ path->arcTo(oval, startAngle, 180.f, false);
+ startAngle += 180.f;
+ forceMoveTo = false;
+ sweepAngle -= 360.f;
+ }
+ path->arcTo(oval, startAngle, sweepAngle, forceMoveTo);
+ if (useCenter) {
+ path->close();
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkPathEffect.cpp b/gfx/skia/skia/src/core/SkPathEffect.cpp
new file mode 100644
index 000000000..1178348af
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPathEffect.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPathEffect.h"
+#include "SkPath.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkPathEffect::computeFastBounds(SkRect* dst, const SkRect& src) const {
+ *dst = src;
+}
+
+bool SkPathEffect::asPoints(PointData* results, const SkPath& src,
+ const SkStrokeRec&, const SkMatrix&, const SkRect*) const {
+ return false;
+}
+
+SkPathEffect::DashType SkPathEffect::asADash(DashInfo* info) const {
+ return kNone_DashType;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPairPathEffect::SkPairPathEffect(sk_sp<SkPathEffect> pe0, sk_sp<SkPathEffect> pe1)
+ : fPE0(std::move(pe0)), fPE1(std::move(pe1))
+{
+ SkASSERT(fPE0.get());
+ SkASSERT(fPE1.get());
+}
+
+/*
+ Format: [oe0-factory][pe1-factory][pe0-size][pe0-data][pe1-data]
+*/
+void SkPairPathEffect::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeFlattenable(fPE0.get());
+ buffer.writeFlattenable(fPE1.get());
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkPairPathEffect::toString(SkString* str) const {
+ str->appendf("first: ");
+ if (fPE0) {
+ fPE0->toString(str);
+ }
+ str->appendf(" second: ");
+ if (fPE1) {
+ fPE1->toString(str);
+ }
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkComposePathEffect::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkPathEffect> pe0(buffer.readPathEffect());
+ sk_sp<SkPathEffect> pe1(buffer.readPathEffect());
+ return SkComposePathEffect::Make(std::move(pe0), std::move(pe1));
+}
+
+bool SkComposePathEffect::filterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec* rec, const SkRect* cullRect) const {
+ SkPath tmp;
+ const SkPath* ptr = &src;
+
+ if (fPE1->filterPath(&tmp, src, rec, cullRect)) {
+ ptr = &tmp;
+ }
+ return fPE0->filterPath(dst, *ptr, rec, cullRect);
+}
+
+
+#ifndef SK_IGNORE_TO_STRING
+void SkComposePathEffect::toString(SkString* str) const {
+ str->appendf("SkComposePathEffect: (");
+ this->INHERITED::toString(str);
+ str->appendf(")");
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkSumPathEffect::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkPathEffect> pe0(buffer.readPathEffect());
+ sk_sp<SkPathEffect> pe1(buffer.readPathEffect());
+ return SkSumPathEffect::Make(pe0, pe1);
+}
+
+bool SkSumPathEffect::filterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec* rec, const SkRect* cullRect) const {
+ // use bit-or so that we always call both, even if the first one succeeds
+ return fPE0->filterPath(dst, src, rec, cullRect) |
+ fPE1->filterPath(dst, src, rec, cullRect);
+}
+
+
+#ifndef SK_IGNORE_TO_STRING
+void SkSumPathEffect::toString(SkString* str) const {
+ str->appendf("SkSumPathEffect: (");
+ this->INHERITED::toString(str);
+ str->appendf(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkPathMeasure.cpp b/gfx/skia/skia/src/core/SkPathMeasure.cpp
new file mode 100644
index 000000000..643ffe356
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPathMeasure.cpp
@@ -0,0 +1,710 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkPathMeasure.h"
+#include "SkPathMeasurePriv.h"
+#include "SkGeometry.h"
+#include "SkPath.h"
+#include "SkTSearch.h"
+
+#define kMaxTValue 0x3FFFFFFF
+
+static inline SkScalar tValue2Scalar(int t) {
+ SkASSERT((unsigned)t <= kMaxTValue);
+ const SkScalar kMaxTReciprocal = 1.0f / kMaxTValue;
+ return t * kMaxTReciprocal;
+}
+
+SkScalar SkPathMeasure::Segment::getScalarT() const {
+ return tValue2Scalar(fTValue);
+}
+
+const SkPathMeasure::Segment* SkPathMeasure::NextSegment(const Segment* seg) {
+ unsigned ptIndex = seg->fPtIndex;
+
+ do {
+ ++seg;
+ } while (seg->fPtIndex == ptIndex);
+ return seg;
+}
+
+void SkPathMeasure_segTo(const SkPoint pts[], unsigned segType,
+ SkScalar startT, SkScalar stopT, SkPath* dst) {
+ SkASSERT(startT >= 0 && startT <= SK_Scalar1);
+ SkASSERT(stopT >= 0 && stopT <= SK_Scalar1);
+ SkASSERT(startT <= stopT);
+
+ if (startT == stopT) {
+ /* if the dash as a zero-length on segment, add a corresponding zero-length line.
+ The stroke code will add end caps to zero length lines as appropriate */
+ SkPoint lastPt;
+ SkAssertResult(dst->getLastPt(&lastPt));
+ dst->lineTo(lastPt);
+ return;
+ }
+
+ SkPoint tmp0[7], tmp1[7];
+
+ switch (segType) {
+ case kLine_SegType:
+ if (SK_Scalar1 == stopT) {
+ dst->lineTo(pts[1]);
+ } else {
+ dst->lineTo(SkScalarInterp(pts[0].fX, pts[1].fX, stopT),
+ SkScalarInterp(pts[0].fY, pts[1].fY, stopT));
+ }
+ break;
+ case kQuad_SegType:
+ if (0 == startT) {
+ if (SK_Scalar1 == stopT) {
+ dst->quadTo(pts[1], pts[2]);
+ } else {
+ SkChopQuadAt(pts, tmp0, stopT);
+ dst->quadTo(tmp0[1], tmp0[2]);
+ }
+ } else {
+ SkChopQuadAt(pts, tmp0, startT);
+ if (SK_Scalar1 == stopT) {
+ dst->quadTo(tmp0[3], tmp0[4]);
+ } else {
+ SkChopQuadAt(&tmp0[2], tmp1, (stopT - startT) / (1 - startT));
+ dst->quadTo(tmp1[1], tmp1[2]);
+ }
+ }
+ break;
+ case kConic_SegType: {
+ SkConic conic(pts[0], pts[2], pts[3], pts[1].fX);
+
+ if (0 == startT) {
+ if (SK_Scalar1 == stopT) {
+ dst->conicTo(conic.fPts[1], conic.fPts[2], conic.fW);
+ } else {
+ SkConic tmp[2];
+ if (conic.chopAt(stopT, tmp)) {
+ dst->conicTo(tmp[0].fPts[1], tmp[0].fPts[2], tmp[0].fW);
+ }
+ }
+ } else {
+ if (SK_Scalar1 == stopT) {
+ SkConic tmp1[2];
+ if (conic.chopAt(startT, tmp1)) {
+ dst->conicTo(tmp1[1].fPts[1], tmp1[1].fPts[2], tmp1[1].fW);
+ }
+ } else {
+ SkConic tmp;
+ conic.chopAt(startT, stopT, &tmp);
+ dst->conicTo(tmp.fPts[1], tmp.fPts[2], tmp.fW);
+ }
+ }
+ } break;
+ case kCubic_SegType:
+ if (0 == startT) {
+ if (SK_Scalar1 == stopT) {
+ dst->cubicTo(pts[1], pts[2], pts[3]);
+ } else {
+ SkChopCubicAt(pts, tmp0, stopT);
+ dst->cubicTo(tmp0[1], tmp0[2], tmp0[3]);
+ }
+ } else {
+ SkChopCubicAt(pts, tmp0, startT);
+ if (SK_Scalar1 == stopT) {
+ dst->cubicTo(tmp0[4], tmp0[5], tmp0[6]);
+ } else {
+ SkChopCubicAt(&tmp0[3], tmp1, (stopT - startT) / (1 - startT));
+ dst->cubicTo(tmp1[1], tmp1[2], tmp1[3]);
+ }
+ }
+ break;
+ default:
+ SkDEBUGFAIL("unknown segType");
+ sk_throw();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline int tspan_big_enough(int tspan) {
+ SkASSERT((unsigned)tspan <= kMaxTValue);
+ return tspan >> 10;
+}
+
+// can't use tangents, since we need [0..1..................2] to be seen
+// as definitely not a line (it is when drawn, but not parametrically)
+// so we compare midpoints
+#define CHEAP_DIST_LIMIT (SK_Scalar1/2) // just made this value up
+
+bool SkPathMeasure::quad_too_curvy(const SkPoint pts[3]) {
+ // diff = (a/4 + b/2 + c/4) - (a/2 + c/2)
+ // diff = -a/4 + b/2 - c/4
+ SkScalar dx = SkScalarHalf(pts[1].fX) -
+ SkScalarHalf(SkScalarHalf(pts[0].fX + pts[2].fX));
+ SkScalar dy = SkScalarHalf(pts[1].fY) -
+ SkScalarHalf(SkScalarHalf(pts[0].fY + pts[2].fY));
+
+ SkScalar dist = SkMaxScalar(SkScalarAbs(dx), SkScalarAbs(dy));
+ return dist > fTolerance;
+}
+
+bool SkPathMeasure::conic_too_curvy(const SkPoint& firstPt, const SkPoint& midTPt,
+ const SkPoint& lastPt) {
+ SkPoint midEnds = firstPt + lastPt;
+ midEnds *= 0.5f;
+ SkVector dxy = midTPt - midEnds;
+ SkScalar dist = SkMaxScalar(SkScalarAbs(dxy.fX), SkScalarAbs(dxy.fY));
+ return dist > fTolerance;
+}
+
+bool SkPathMeasure::cheap_dist_exceeds_limit(const SkPoint& pt,
+ SkScalar x, SkScalar y) {
+ SkScalar dist = SkMaxScalar(SkScalarAbs(x - pt.fX), SkScalarAbs(y - pt.fY));
+ // just made up the 1/2
+ return dist > fTolerance;
+}
+
+bool SkPathMeasure::cubic_too_curvy(const SkPoint pts[4]) {
+ return cheap_dist_exceeds_limit(pts[1],
+ SkScalarInterp(pts[0].fX, pts[3].fX, SK_Scalar1/3),
+ SkScalarInterp(pts[0].fY, pts[3].fY, SK_Scalar1/3))
+ ||
+ cheap_dist_exceeds_limit(pts[2],
+ SkScalarInterp(pts[0].fX, pts[3].fX, SK_Scalar1*2/3),
+ SkScalarInterp(pts[0].fY, pts[3].fY, SK_Scalar1*2/3));
+}
+
+static SkScalar quad_folded_len(const SkPoint pts[3]) {
+ SkScalar t = SkFindQuadMaxCurvature(pts);
+ SkPoint pt = SkEvalQuadAt(pts, t);
+ SkVector a = pts[2] - pt;
+ SkScalar result = a.length();
+ if (0 != t) {
+ SkVector b = pts[0] - pt;
+ result += b.length();
+ }
+ SkASSERT(SkScalarIsFinite(result));
+ return result;
+}
+
+/* from http://www.malczak.linuxpl.com/blog/quadratic-bezier-curve-length/ */
+/* This works -- more needs to be done to see if it is performant on all platforms.
+ To use this to measure parts of quads requires recomputing everything -- perhaps
+ a chop-like interface can start from a larger measurement and get two new measurements
+ with one call here.
+ */
+static SkScalar compute_quad_len(const SkPoint pts[3]) {
+ SkPoint a,b;
+ a.fX = pts[0].fX - 2 * pts[1].fX + pts[2].fX;
+ a.fY = pts[0].fY - 2 * pts[1].fY + pts[2].fY;
+ SkScalar A = 4 * (a.fX * a.fX + a.fY * a.fY);
+ if (0 == A) {
+ a = pts[2] - pts[0];
+ return a.length();
+ }
+ b.fX = 2 * (pts[1].fX - pts[0].fX);
+ b.fY = 2 * (pts[1].fY - pts[0].fY);
+ SkScalar B = 4 * (a.fX * b.fX + a.fY * b.fY);
+ SkScalar C = b.fX * b.fX + b.fY * b.fY;
+ SkScalar Sabc = 2 * SkScalarSqrt(A + B + C);
+ SkScalar A_2 = SkScalarSqrt(A);
+ SkScalar A_32 = 2 * A * A_2;
+ SkScalar C_2 = 2 * SkScalarSqrt(C);
+ SkScalar BA = B / A_2;
+ if (0 == BA + C_2) {
+ return quad_folded_len(pts);
+ }
+ SkScalar J = A_32 * Sabc + A_2 * B * (Sabc - C_2);
+ SkScalar K = 4 * C * A - B * B;
+ SkScalar L = (2 * A_2 + BA + Sabc) / (BA + C_2);
+ if (L <= 0) {
+ return quad_folded_len(pts);
+ }
+ SkScalar M = SkScalarLog(L);
+ SkScalar result = (J + K * M) / (4 * A_32);
+ SkASSERT(SkScalarIsFinite(result));
+ return result;
+}
+
+SkScalar SkPathMeasure::compute_quad_segs(const SkPoint pts[3],
+ SkScalar distance, int mint, int maxt, int ptIndex) {
+ if (tspan_big_enough(maxt - mint) && quad_too_curvy(pts)) {
+ SkPoint tmp[5];
+ int halft = (mint + maxt) >> 1;
+
+ SkChopQuadAtHalf(pts, tmp);
+ distance = this->compute_quad_segs(tmp, distance, mint, halft, ptIndex);
+ distance = this->compute_quad_segs(&tmp[2], distance, halft, maxt, ptIndex);
+ } else {
+ SkScalar d = SkPoint::Distance(pts[0], pts[2]);
+ SkScalar prevD = distance;
+ distance += d;
+ if (distance > prevD) {
+ Segment* seg = fSegments.append();
+ seg->fDistance = distance;
+ seg->fPtIndex = ptIndex;
+ seg->fType = kQuad_SegType;
+ seg->fTValue = maxt;
+ }
+ }
+ return distance;
+}
+
+SkScalar SkPathMeasure::compute_conic_segs(const SkConic& conic, SkScalar distance,
+ int mint, const SkPoint& minPt,
+ int maxt, const SkPoint& maxPt, int ptIndex) {
+ int halft = (mint + maxt) >> 1;
+ SkPoint halfPt = conic.evalAt(tValue2Scalar(halft));
+ if (tspan_big_enough(maxt - mint) && conic_too_curvy(minPt, halfPt, maxPt)) {
+ distance = this->compute_conic_segs(conic, distance, mint, minPt, halft, halfPt, ptIndex);
+ distance = this->compute_conic_segs(conic, distance, halft, halfPt, maxt, maxPt, ptIndex);
+ } else {
+ SkScalar d = SkPoint::Distance(minPt, maxPt);
+ SkScalar prevD = distance;
+ distance += d;
+ if (distance > prevD) {
+ Segment* seg = fSegments.append();
+ seg->fDistance = distance;
+ seg->fPtIndex = ptIndex;
+ seg->fType = kConic_SegType;
+ seg->fTValue = maxt;
+ }
+ }
+ return distance;
+}
+
+SkScalar SkPathMeasure::compute_cubic_segs(const SkPoint pts[4],
+ SkScalar distance, int mint, int maxt, int ptIndex) {
+ if (tspan_big_enough(maxt - mint) && cubic_too_curvy(pts)) {
+ SkPoint tmp[7];
+ int halft = (mint + maxt) >> 1;
+
+ SkChopCubicAtHalf(pts, tmp);
+ distance = this->compute_cubic_segs(tmp, distance, mint, halft, ptIndex);
+ distance = this->compute_cubic_segs(&tmp[3], distance, halft, maxt, ptIndex);
+ } else {
+ SkScalar d = SkPoint::Distance(pts[0], pts[3]);
+ SkScalar prevD = distance;
+ distance += d;
+ if (distance > prevD) {
+ Segment* seg = fSegments.append();
+ seg->fDistance = distance;
+ seg->fPtIndex = ptIndex;
+ seg->fType = kCubic_SegType;
+ seg->fTValue = maxt;
+ }
+ }
+ return distance;
+}
+
+void SkPathMeasure::buildSegments() {
+ SkPoint pts[4];
+ int ptIndex = fFirstPtIndex;
+ SkScalar distance = 0;
+ bool isClosed = fForceClosed;
+ bool firstMoveTo = ptIndex < 0;
+ Segment* seg;
+
+ /* Note:
+ * as we accumulate distance, we have to check that the result of +=
+ * actually made it larger, since a very small delta might be > 0, but
+ * still have no effect on distance (if distance >>> delta).
+ *
+ * We do this check below, and in compute_quad_segs and compute_cubic_segs
+ */
+ fSegments.reset();
+ bool done = false;
+ do {
+ switch (fIter.next(pts)) {
+ case SkPath::kMove_Verb:
+ ptIndex += 1;
+ fPts.append(1, pts);
+ if (!firstMoveTo) {
+ done = true;
+ break;
+ }
+ firstMoveTo = false;
+ break;
+
+ case SkPath::kLine_Verb: {
+ SkScalar d = SkPoint::Distance(pts[0], pts[1]);
+ SkASSERT(d >= 0);
+ SkScalar prevD = distance;
+ distance += d;
+ if (distance > prevD) {
+ seg = fSegments.append();
+ seg->fDistance = distance;
+ seg->fPtIndex = ptIndex;
+ seg->fType = kLine_SegType;
+ seg->fTValue = kMaxTValue;
+ fPts.append(1, pts + 1);
+ ptIndex++;
+ }
+ } break;
+
+ case SkPath::kQuad_Verb: {
+ SkScalar prevD = distance;
+ if (false) {
+ SkScalar length = compute_quad_len(pts);
+ if (length) {
+ distance += length;
+ Segment* seg = fSegments.append();
+ seg->fDistance = distance;
+ seg->fPtIndex = ptIndex;
+ seg->fType = kQuad_SegType;
+ seg->fTValue = kMaxTValue;
+ }
+ } else {
+ distance = this->compute_quad_segs(pts, distance, 0, kMaxTValue, ptIndex);
+ }
+ if (distance > prevD) {
+ fPts.append(2, pts + 1);
+ ptIndex += 2;
+ }
+ } break;
+
+ case SkPath::kConic_Verb: {
+ const SkConic conic(pts, fIter.conicWeight());
+ SkScalar prevD = distance;
+ distance = this->compute_conic_segs(conic, distance, 0, conic.fPts[0],
+ kMaxTValue, conic.fPts[2], ptIndex);
+ if (distance > prevD) {
+ // we store the conic weight in our next point, followed by the last 2 pts
+ // thus to reconstitue a conic, you'd need to say
+ // SkConic(pts[0], pts[2], pts[3], weight = pts[1].fX)
+ fPts.append()->set(conic.fW, 0);
+ fPts.append(2, pts + 1);
+ ptIndex += 3;
+ }
+ } break;
+
+ case SkPath::kCubic_Verb: {
+ SkScalar prevD = distance;
+ distance = this->compute_cubic_segs(pts, distance, 0, kMaxTValue, ptIndex);
+ if (distance > prevD) {
+ fPts.append(3, pts + 1);
+ ptIndex += 3;
+ }
+ } break;
+
+ case SkPath::kClose_Verb:
+ isClosed = true;
+ break;
+
+ case SkPath::kDone_Verb:
+ done = true;
+ break;
+ }
+ } while (!done);
+
+ fLength = distance;
+ fIsClosed = isClosed;
+ fFirstPtIndex = ptIndex;
+
+#ifdef SK_DEBUG
+#ifndef SK_DISABLE_SLOW_DEBUG_VALIDATION
+ {
+ const Segment* seg = fSegments.begin();
+ const Segment* stop = fSegments.end();
+ unsigned ptIndex = 0;
+ SkScalar distance = 0;
+ // limit the loop to a reasonable number; pathological cases can run for minutes
+ int maxChecks = 10000000; // set to INT_MAX to defeat the check
+ while (seg < stop) {
+ SkASSERT(seg->fDistance > distance);
+ SkASSERT(seg->fPtIndex >= ptIndex);
+ SkASSERT(seg->fTValue > 0);
+
+ const Segment* s = seg;
+ while (s < stop - 1 && s[0].fPtIndex == s[1].fPtIndex && --maxChecks > 0) {
+ SkASSERT(s[0].fType == s[1].fType);
+ SkASSERT(s[0].fTValue < s[1].fTValue);
+ s += 1;
+ }
+
+ distance = seg->fDistance;
+ ptIndex = seg->fPtIndex;
+ seg += 1;
+ }
+ // SkDebugf("\n");
+ }
+#endif
+#endif
+}
+
+static void compute_pos_tan(const SkPoint pts[], unsigned segType,
+ SkScalar t, SkPoint* pos, SkVector* tangent) {
+ switch (segType) {
+ case kLine_SegType:
+ if (pos) {
+ pos->set(SkScalarInterp(pts[0].fX, pts[1].fX, t),
+ SkScalarInterp(pts[0].fY, pts[1].fY, t));
+ }
+ if (tangent) {
+ tangent->setNormalize(pts[1].fX - pts[0].fX, pts[1].fY - pts[0].fY);
+ }
+ break;
+ case kQuad_SegType:
+ SkEvalQuadAt(pts, t, pos, tangent);
+ if (tangent) {
+ tangent->normalize();
+ }
+ break;
+ case kConic_SegType: {
+ SkConic(pts[0], pts[2], pts[3], pts[1].fX).evalAt(t, pos, tangent);
+ if (tangent) {
+ tangent->normalize();
+ }
+ } break;
+ case kCubic_SegType:
+ SkEvalCubicAt(pts, t, pos, tangent, nullptr);
+ if (tangent) {
+ tangent->normalize();
+ }
+ break;
+ default:
+ SkDEBUGFAIL("unknown segType");
+ }
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+
+SkPathMeasure::SkPathMeasure() {
+ fPath = nullptr;
+ fTolerance = CHEAP_DIST_LIMIT;
+ fLength = -1; // signal we need to compute it
+ fForceClosed = false;
+ fFirstPtIndex = -1;
+}
+
+SkPathMeasure::SkPathMeasure(const SkPath& path, bool forceClosed, SkScalar resScale) {
+ fPath = &path;
+ fTolerance = CHEAP_DIST_LIMIT * SkScalarInvert(resScale);
+ fLength = -1; // signal we need to compute it
+ fForceClosed = forceClosed;
+ fFirstPtIndex = -1;
+
+ fIter.setPath(path, forceClosed);
+}
+
+SkPathMeasure::~SkPathMeasure() {}
+
+/** Assign a new path, or null to have none.
+*/
+void SkPathMeasure::setPath(const SkPath* path, bool forceClosed) {
+ fPath = path;
+ fLength = -1; // signal we need to compute it
+ fForceClosed = forceClosed;
+ fFirstPtIndex = -1;
+
+ if (path) {
+ fIter.setPath(*path, forceClosed);
+ }
+ fSegments.reset();
+ fPts.reset();
+}
+
+SkScalar SkPathMeasure::getLength() {
+ if (fPath == nullptr) {
+ return 0;
+ }
+ if (fLength < 0) {
+ this->buildSegments();
+ }
+ SkASSERT(fLength >= 0);
+ return fLength;
+}
+
+template <typename T, typename K>
+int SkTKSearch(const T base[], int count, const K& key) {
+ SkASSERT(count >= 0);
+ if (count <= 0) {
+ return ~0;
+ }
+
+ SkASSERT(base != nullptr); // base may be nullptr if count is zero
+
+ int lo = 0;
+ int hi = count - 1;
+
+ while (lo < hi) {
+ int mid = (hi + lo) >> 1;
+ if (base[mid].fDistance < key) {
+ lo = mid + 1;
+ } else {
+ hi = mid;
+ }
+ }
+
+ if (base[hi].fDistance < key) {
+ hi += 1;
+ hi = ~hi;
+ } else if (key < base[hi].fDistance) {
+ hi = ~hi;
+ }
+ return hi;
+}
+
+const SkPathMeasure::Segment* SkPathMeasure::distanceToSegment(
+ SkScalar distance, SkScalar* t) {
+ SkDEBUGCODE(SkScalar length = ) this->getLength();
+ SkASSERT(distance >= 0 && distance <= length);
+
+ const Segment* seg = fSegments.begin();
+ int count = fSegments.count();
+
+ int index = SkTKSearch<Segment, SkScalar>(seg, count, distance);
+ // don't care if we hit an exact match or not, so we xor index if it is negative
+ index ^= (index >> 31);
+ seg = &seg[index];
+
+ // now interpolate t-values with the prev segment (if possible)
+ SkScalar startT = 0, startD = 0;
+ // check if the prev segment is legal, and references the same set of points
+ if (index > 0) {
+ startD = seg[-1].fDistance;
+ if (seg[-1].fPtIndex == seg->fPtIndex) {
+ SkASSERT(seg[-1].fType == seg->fType);
+ startT = seg[-1].getScalarT();
+ }
+ }
+
+ SkASSERT(seg->getScalarT() > startT);
+ SkASSERT(distance >= startD);
+ SkASSERT(seg->fDistance > startD);
+
+ *t = startT + SkScalarMulDiv(seg->getScalarT() - startT,
+ distance - startD,
+ seg->fDistance - startD);
+ return seg;
+}
+
+bool SkPathMeasure::getPosTan(SkScalar distance, SkPoint* pos,
+ SkVector* tangent) {
+ if (nullptr == fPath) {
+ return false;
+ }
+
+ SkScalar length = this->getLength(); // call this to force computing it
+ int count = fSegments.count();
+
+ if (count == 0 || length == 0) {
+ return false;
+ }
+
+ // pin the distance to a legal range
+ if (distance < 0) {
+ distance = 0;
+ } else if (distance > length) {
+ distance = length;
+ }
+
+ SkScalar t;
+ const Segment* seg = this->distanceToSegment(distance, &t);
+
+ compute_pos_tan(&fPts[seg->fPtIndex], seg->fType, t, pos, tangent);
+ return true;
+}
+
+bool SkPathMeasure::getMatrix(SkScalar distance, SkMatrix* matrix,
+ MatrixFlags flags) {
+ if (nullptr == fPath) {
+ return false;
+ }
+
+ SkPoint position;
+ SkVector tangent;
+
+ if (this->getPosTan(distance, &position, &tangent)) {
+ if (matrix) {
+ if (flags & kGetTangent_MatrixFlag) {
+ matrix->setSinCos(tangent.fY, tangent.fX, 0, 0);
+ } else {
+ matrix->reset();
+ }
+ if (flags & kGetPosition_MatrixFlag) {
+ matrix->postTranslate(position.fX, position.fY);
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+bool SkPathMeasure::getSegment(SkScalar startD, SkScalar stopD, SkPath* dst,
+ bool startWithMoveTo) {
+ SkASSERT(dst);
+
+ SkScalar length = this->getLength(); // ensure we have built our segments
+
+ if (startD < 0) {
+ startD = 0;
+ }
+ if (stopD > length) {
+ stopD = length;
+ }
+ if (startD > stopD) {
+ return false;
+ }
+ if (!fSegments.count()) {
+ return false;
+ }
+
+ SkPoint p;
+ SkScalar startT, stopT;
+ const Segment* seg = this->distanceToSegment(startD, &startT);
+ const Segment* stopSeg = this->distanceToSegment(stopD, &stopT);
+ SkASSERT(seg <= stopSeg);
+
+ if (startWithMoveTo) {
+ compute_pos_tan(&fPts[seg->fPtIndex], seg->fType, startT, &p, nullptr);
+ dst->moveTo(p);
+ }
+
+ if (seg->fPtIndex == stopSeg->fPtIndex) {
+ SkPathMeasure_segTo(&fPts[seg->fPtIndex], seg->fType, startT, stopT, dst);
+ } else {
+ do {
+ SkPathMeasure_segTo(&fPts[seg->fPtIndex], seg->fType, startT, SK_Scalar1, dst);
+ seg = SkPathMeasure::NextSegment(seg);
+ startT = 0;
+ } while (seg->fPtIndex < stopSeg->fPtIndex);
+ SkPathMeasure_segTo(&fPts[seg->fPtIndex], seg->fType, 0, stopT, dst);
+ }
+ return true;
+}
+
+bool SkPathMeasure::isClosed() {
+ (void)this->getLength();
+ return fIsClosed;
+}
+
+/** Move to the next contour in the path. Return true if one exists, or false if
+ we're done with the path.
+*/
+bool SkPathMeasure::nextContour() {
+ fLength = -1;
+ return this->getLength() > 0;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+
+void SkPathMeasure::dump() {
+ SkDebugf("pathmeas: length=%g, segs=%d\n", fLength, fSegments.count());
+
+ for (int i = 0; i < fSegments.count(); i++) {
+ const Segment* seg = &fSegments[i];
+ SkDebugf("pathmeas: seg[%d] distance=%g, point=%d, t=%g, type=%d\n",
+ i, seg->fDistance, seg->fPtIndex, seg->getScalarT(),
+ seg->fType);
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPathMeasurePriv.h b/gfx/skia/skia/src/core/SkPathMeasurePriv.h
new file mode 100644
index 000000000..8dcf717ba
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPathMeasurePriv.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathMeasurePriv_DEFINED
+#define SkPathMeasurePriv_DEFINED
+
+#include "SkPath.h"
+#include "SkPoint.h"
+#include "SkGeometry.h"
+
+// Used in the Segment struct defined in SkPathMeasure.h
+// It is used as a 2-bit field so if you add to this
+// you must increase the size of the bitfield there.
+enum SkSegType {
+ kLine_SegType,
+ kQuad_SegType,
+ kCubic_SegType,
+ kConic_SegType,
+};
+
+
+void SkPathMeasure_segTo(const SkPoint pts[], unsigned segType,
+ SkScalar startT, SkScalar stopT, SkPath* dst);
+
+#endif // SkPathMeasurePriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkPathPriv.h b/gfx/skia/skia/src/core/SkPathPriv.h
new file mode 100644
index 000000000..029cb759d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPathPriv.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathPriv_DEFINED
+#define SkPathPriv_DEFINED
+
+#include "SkPath.h"
+
+class SkPathPriv {
+public:
+ enum FirstDirection {
+ kCW_FirstDirection, // == SkPath::kCW_Direction
+ kCCW_FirstDirection, // == SkPath::kCCW_Direction
+ kUnknown_FirstDirection,
+ };
+
+ static FirstDirection AsFirstDirection(SkPath::Direction dir) {
+ // since we agree numerically for the values in Direction, we can just cast.
+ return (FirstDirection)dir;
+ }
+
+ /**
+ * Return the opposite of the specified direction. kUnknown is its own
+ * opposite.
+ */
+ static FirstDirection OppositeFirstDirection(FirstDirection dir) {
+ static const FirstDirection gOppositeDir[] = {
+ kCCW_FirstDirection, kCW_FirstDirection, kUnknown_FirstDirection,
+ };
+ return gOppositeDir[dir];
+ }
+
+ /**
+ * Tries to quickly compute the direction of the first non-degenerate
+ * contour. If it can be computed, return true and set dir to that
+ * direction. If it cannot be (quickly) determined, return false and ignore
+ * the dir parameter. If the direction was determined, it is cached to make
+ * subsequent calls return quickly.
+ */
+ static bool CheapComputeFirstDirection(const SkPath&, FirstDirection* dir);
+
+ /**
+ * Returns true if the path's direction can be computed via
+ * cheapComputDirection() and if that computed direction matches the
+ * specified direction. If dir is kUnknown, returns true if the direction
+ * cannot be computed.
+ */
+ static bool CheapIsFirstDirection(const SkPath& path, FirstDirection dir) {
+ FirstDirection computedDir = kUnknown_FirstDirection;
+ (void)CheapComputeFirstDirection(path, &computedDir);
+ return computedDir == dir;
+ }
+
+ static bool IsClosedSingleContour(const SkPath& path) {
+ int verbCount = path.countVerbs();
+ if (verbCount == 0)
+ return false;
+ int moveCount = 0;
+ auto verbs = path.fPathRef->verbs();
+ for (int i = 0; i < verbCount; i++) {
+ switch (verbs[~i]) { // verbs are stored backwards; we use [~i] to get the i'th verb
+ case SkPath::Verb::kMove_Verb:
+ moveCount += 1;
+ if (moveCount > 1) {
+ return false;
+ }
+ break;
+ case SkPath::Verb::kClose_Verb:
+ if (i == verbCount - 1) {
+ return true;
+ }
+ return false;
+ default: break;
+ }
+ }
+ return false;
+ }
+
+ static void AddGenIDChangeListener(const SkPath& path, SkPathRef::GenIDChangeListener* listener) {
+ path.fPathRef->addGenIDChangeListener(listener);
+ }
+
+ /**
+ * This returns true for a rect that begins and ends at the same corner and has either a move
+ * followed by four lines or a move followed by 3 lines and a close. None of the parameters are
+ * optional. This does not permit degenerate line or point rectangles.
+ */
+ static bool IsSimpleClosedRect(const SkPath& path, SkRect* rect, SkPath::Direction* direction,
+ unsigned* start);
+
+ /**
+ * Creates a path from arc params using the semantics of SkCanvas::drawArc. This function
+ * assumes empty ovals and zero sweeps have already been filtered out.
+ */
+ static void CreateDrawArcPath(SkPath* path, const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter, bool isFillNoPathEffect);
+
+ /**
+ * Returns a pointer to the verb data. Note that the verbs are stored backwards in memory and
+ * thus the returned pointer is the last verb.
+ */
+ static const uint8_t* VerbData(const SkPath& path) {
+ return path.fPathRef->verbsMemBegin();
+ }
+
+ /** Returns a raw pointer to the path points */
+ static const SkPoint* PointData(const SkPath& path) {
+ return path.fPathRef->points();
+ }
+
+ /** Returns the number of conic weights in the path */
+ static int ConicWeightCnt(const SkPath& path) {
+ return path.fPathRef->countWeights();
+ }
+
+ /** Returns a raw pointer to the path conic weights. */
+ static const SkScalar* ConicWeightData(const SkPath& path) {
+ return path.fPathRef->conicWeights();
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPathRef.cpp b/gfx/skia/skia/src/core/SkPathRef.cpp
new file mode 100644
index 000000000..2f212cde1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPathRef.cpp
@@ -0,0 +1,770 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBuffer.h"
+#include "SkOnce.h"
+#include "SkPath.h"
+#include "SkPathRef.h"
+#include <limits>
+
+//////////////////////////////////////////////////////////////////////////////
+SkPathRef::Editor::Editor(SkAutoTUnref<SkPathRef>* pathRef,
+ int incReserveVerbs,
+ int incReservePoints)
+{
+ if ((*pathRef)->unique()) {
+ (*pathRef)->incReserve(incReserveVerbs, incReservePoints);
+ } else {
+ SkPathRef* copy = new SkPathRef;
+ copy->copy(**pathRef, incReserveVerbs, incReservePoints);
+ pathRef->reset(copy);
+ }
+ fPathRef = *pathRef;
+ fPathRef->callGenIDChangeListeners();
+ fPathRef->fGenerationID = 0;
+ SkDEBUGCODE(sk_atomic_inc(&fPathRef->fEditorsAttached);)
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+SkPathRef::~SkPathRef() {
+ this->callGenIDChangeListeners();
+ SkDEBUGCODE(this->validate();)
+ sk_free(fPoints);
+
+ SkDEBUGCODE(fPoints = nullptr;)
+ SkDEBUGCODE(fVerbs = nullptr;)
+ SkDEBUGCODE(fVerbCnt = 0x9999999;)
+ SkDEBUGCODE(fPointCnt = 0xAAAAAAA;)
+ SkDEBUGCODE(fPointCnt = 0xBBBBBBB;)
+ SkDEBUGCODE(fGenerationID = 0xEEEEEEEE;)
+ SkDEBUGCODE(fEditorsAttached = 0x7777777;)
+}
+
+static SkPathRef* gEmpty = nullptr;
+
+SkPathRef* SkPathRef::CreateEmpty() {
+ static SkOnce once;
+ once([]{
+ gEmpty = new SkPathRef;
+ gEmpty->computeBounds(); // Avoids races later to be the first to do this.
+ });
+ return SkRef(gEmpty);
+}
+
+static void transform_dir_and_start(const SkMatrix& matrix, bool isRRect, bool* isCCW,
+ unsigned* start) {
+ int inStart = *start;
+ int rm = 0;
+ if (isRRect) {
+ // Degenerate rrect indices to oval indices and remember the remainder.
+ // Ovals have one index per side whereas rrects have two.
+ rm = inStart & 0b1;
+ inStart /= 2;
+ }
+ // Is the antidiagonal non-zero (otherwise the diagonal is zero)
+ int antiDiag;
+ // Is the non-zero value in the top row (either kMScaleX or kMSkewX) negative
+ int topNeg;
+ // Are the two non-zero diagonal or antidiagonal values the same sign.
+ int sameSign;
+ if (matrix.get(SkMatrix::kMScaleX) != 0) {
+ antiDiag = 0b00;
+ if (matrix.get(SkMatrix::kMScaleX) > 0) {
+ topNeg = 0b00;
+ sameSign = matrix.get(SkMatrix::kMScaleY) > 0 ? 0b01 : 0b00;
+ } else {
+ topNeg = 0b10;
+ sameSign = matrix.get(SkMatrix::kMScaleY) > 0 ? 0b00 : 0b01;
+ }
+ } else {
+ antiDiag = 0b01;
+ if (matrix.get(SkMatrix::kMSkewX) > 0) {
+ topNeg = 0b00;
+ sameSign = matrix.get(SkMatrix::kMSkewY) > 0 ? 0b01 : 0b00;
+ } else {
+ topNeg = 0b10;
+ sameSign = matrix.get(SkMatrix::kMSkewY) > 0 ? 0b00 : 0b01;
+ }
+ }
+ if (sameSign != antiDiag) {
+ // This is a rotation (and maybe scale). The direction is unchanged.
+ // Trust me on the start computation (or draw yourself some pictures)
+ *start = (inStart + 4 - (topNeg | antiDiag)) % 4;
+ SkASSERT(*start < 4);
+ if (isRRect) {
+ *start = 2 * *start + rm;
+ }
+ } else {
+ // This is a mirror (and maybe scale). The direction is reversed.
+ *isCCW = !*isCCW;
+ // Trust me on the start computation (or draw yourself some pictures)
+ *start = (6 + (topNeg | antiDiag) - inStart) % 4;
+ SkASSERT(*start < 4);
+ if (isRRect) {
+ *start = 2 * *start + (rm ? 0 : 1);
+ }
+ }
+}
+
+void SkPathRef::CreateTransformedCopy(SkAutoTUnref<SkPathRef>* dst,
+ const SkPathRef& src,
+ const SkMatrix& matrix) {
+ SkDEBUGCODE(src.validate();)
+ if (matrix.isIdentity()) {
+ if (*dst != &src) {
+ src.ref();
+ dst->reset(const_cast<SkPathRef*>(&src));
+ SkDEBUGCODE((*dst)->validate();)
+ }
+ return;
+ }
+
+ if (!(*dst)->unique()) {
+ dst->reset(new SkPathRef);
+ }
+
+ if (*dst != &src) {
+ (*dst)->resetToSize(src.fVerbCnt, src.fPointCnt, src.fConicWeights.count());
+ sk_careful_memcpy((*dst)->verbsMemWritable(), src.verbsMemBegin(),
+ src.fVerbCnt * sizeof(uint8_t));
+ (*dst)->fConicWeights = src.fConicWeights;
+ }
+
+ SkASSERT((*dst)->countPoints() == src.countPoints());
+ SkASSERT((*dst)->countVerbs() == src.countVerbs());
+ SkASSERT((*dst)->fConicWeights.count() == src.fConicWeights.count());
+
+ // Need to check this here in case (&src == dst)
+ bool canXformBounds = !src.fBoundsIsDirty && matrix.rectStaysRect() && src.countPoints() > 1;
+
+ matrix.mapPoints((*dst)->fPoints, src.points(), src.fPointCnt);
+
+ /*
+ * Here we optimize the bounds computation, by noting if the bounds are
+ * already known, and if so, we just transform those as well and mark
+ * them as "known", rather than force the transformed path to have to
+ * recompute them.
+ *
+ * Special gotchas if the path is effectively empty (<= 1 point) or
+ * if it is non-finite. In those cases bounds need to stay empty,
+ * regardless of the matrix.
+ */
+ if (canXformBounds) {
+ (*dst)->fBoundsIsDirty = false;
+ if (src.fIsFinite) {
+ matrix.mapRect(&(*dst)->fBounds, src.fBounds);
+ if (!((*dst)->fIsFinite = (*dst)->fBounds.isFinite())) {
+ (*dst)->fBounds.setEmpty();
+ } else if (src.fPointCnt & 1) {
+ /* Matrix optimizations may cause the first point to use slightly different
+ * math for its transform, which can lead to it being outside the transformed
+ * bounds. Include it in the bounds just in case.
+ */
+ (*dst)->fBounds.growToInclude((*dst)->fPoints[0].fX, (*dst)->fPoints[0].fY);
+ }
+ } else {
+ (*dst)->fIsFinite = false;
+ (*dst)->fBounds.setEmpty();
+ }
+ } else {
+ (*dst)->fBoundsIsDirty = true;
+ }
+
+ (*dst)->fSegmentMask = src.fSegmentMask;
+
+ // It's an oval only if it stays a rect.
+ bool rectStaysRect = matrix.rectStaysRect();
+ (*dst)->fIsOval = src.fIsOval && rectStaysRect;
+ (*dst)->fIsRRect = src.fIsRRect && rectStaysRect;
+ if ((*dst)->fIsOval || (*dst)->fIsRRect) {
+ unsigned start = src.fRRectOrOvalStartIdx;
+ bool isCCW = SkToBool(src.fRRectOrOvalIsCCW);
+ transform_dir_and_start(matrix, (*dst)->fIsRRect, &isCCW, &start);
+ (*dst)->fRRectOrOvalIsCCW = isCCW;
+ (*dst)->fRRectOrOvalStartIdx = start;
+ }
+
+ SkDEBUGCODE((*dst)->validate();)
+}
+
+SkPathRef* SkPathRef::CreateFromBuffer(SkRBuffer* buffer) {
+ SkPathRef* ref = new SkPathRef;
+
+ int32_t packed;
+ if (!buffer->readS32(&packed)) {
+ delete ref;
+ return nullptr;
+ }
+
+ ref->fIsFinite = (packed >> kIsFinite_SerializationShift) & 1;
+ uint8_t segmentMask = (packed >> kSegmentMask_SerializationShift) & 0xF;
+ bool isOval = (packed >> kIsOval_SerializationShift) & 1;
+ bool isRRect = (packed >> kIsRRect_SerializationShift) & 1;
+ bool rrectOrOvalIsCCW = (packed >> kRRectOrOvalIsCCW_SerializationShift) & 1;
+ unsigned rrectOrOvalStartIdx = (packed >> kRRectOrOvalStartIdx_SerializationShift) & 0x7;
+
+ int32_t verbCount, pointCount, conicCount;
+ ptrdiff_t maxPtrDiff = std::numeric_limits<ptrdiff_t>::max();
+ if (!buffer->readU32(&(ref->fGenerationID)) ||
+ !buffer->readS32(&verbCount) ||
+ verbCount < 0 ||
+ static_cast<uint32_t>(verbCount) > maxPtrDiff/sizeof(uint8_t) ||
+ !buffer->readS32(&pointCount) ||
+ pointCount < 0 ||
+ static_cast<uint32_t>(pointCount) > maxPtrDiff/sizeof(SkPoint) ||
+ sizeof(uint8_t) * verbCount + sizeof(SkPoint) * pointCount >
+ static_cast<size_t>(maxPtrDiff) ||
+ !buffer->readS32(&conicCount) ||
+ conicCount < 0) {
+ delete ref;
+ return nullptr;
+ }
+
+ ref->resetToSize(verbCount, pointCount, conicCount);
+ SkASSERT(verbCount == ref->countVerbs());
+ SkASSERT(pointCount == ref->countPoints());
+ SkASSERT(conicCount == ref->fConicWeights.count());
+
+ if (!buffer->read(ref->verbsMemWritable(), verbCount * sizeof(uint8_t)) ||
+ !buffer->read(ref->fPoints, pointCount * sizeof(SkPoint)) ||
+ !buffer->read(ref->fConicWeights.begin(), conicCount * sizeof(SkScalar)) ||
+ !buffer->read(&ref->fBounds, sizeof(SkRect))) {
+ delete ref;
+ return nullptr;
+ }
+ ref->fBoundsIsDirty = false;
+
+ // resetToSize clears fSegmentMask and fIsOval
+ ref->fSegmentMask = segmentMask;
+ ref->fIsOval = isOval;
+ ref->fIsRRect = isRRect;
+ ref->fRRectOrOvalIsCCW = rrectOrOvalIsCCW;
+ ref->fRRectOrOvalStartIdx = rrectOrOvalStartIdx;
+ return ref;
+}
+
+void SkPathRef::Rewind(SkAutoTUnref<SkPathRef>* pathRef) {
+ if ((*pathRef)->unique()) {
+ SkDEBUGCODE((*pathRef)->validate();)
+ (*pathRef)->callGenIDChangeListeners();
+ (*pathRef)->fBoundsIsDirty = true; // this also invalidates fIsFinite
+ (*pathRef)->fVerbCnt = 0;
+ (*pathRef)->fPointCnt = 0;
+ (*pathRef)->fFreeSpace = (*pathRef)->currSize();
+ (*pathRef)->fGenerationID = 0;
+ (*pathRef)->fConicWeights.rewind();
+ (*pathRef)->fSegmentMask = 0;
+ (*pathRef)->fIsOval = false;
+ (*pathRef)->fIsRRect = false;
+ SkDEBUGCODE((*pathRef)->validate();)
+ } else {
+ int oldVCnt = (*pathRef)->countVerbs();
+ int oldPCnt = (*pathRef)->countPoints();
+ pathRef->reset(new SkPathRef);
+ (*pathRef)->resetToSize(0, 0, 0, oldVCnt, oldPCnt);
+ }
+}
+
+bool SkPathRef::operator== (const SkPathRef& ref) const {
+ SkDEBUGCODE(this->validate();)
+ SkDEBUGCODE(ref.validate();)
+
+ // We explicitly check fSegmentMask as a quick-reject. We could skip it,
+ // since it is only a cache of info in the fVerbs, but its a fast way to
+ // notice a difference
+ if (fSegmentMask != ref.fSegmentMask) {
+ return false;
+ }
+
+ bool genIDMatch = fGenerationID && fGenerationID == ref.fGenerationID;
+#ifdef SK_RELEASE
+ if (genIDMatch) {
+ return true;
+ }
+#endif
+ if (fPointCnt != ref.fPointCnt ||
+ fVerbCnt != ref.fVerbCnt) {
+ SkASSERT(!genIDMatch);
+ return false;
+ }
+ if (0 == ref.fVerbCnt) {
+ SkASSERT(0 == ref.fPointCnt);
+ return true;
+ }
+ SkASSERT(this->verbsMemBegin() && ref.verbsMemBegin());
+ if (0 != memcmp(this->verbsMemBegin(),
+ ref.verbsMemBegin(),
+ ref.fVerbCnt * sizeof(uint8_t))) {
+ SkASSERT(!genIDMatch);
+ return false;
+ }
+ SkASSERT(this->points() && ref.points());
+ if (0 != memcmp(this->points(),
+ ref.points(),
+ ref.fPointCnt * sizeof(SkPoint))) {
+ SkASSERT(!genIDMatch);
+ return false;
+ }
+ if (fConicWeights != ref.fConicWeights) {
+ SkASSERT(!genIDMatch);
+ return false;
+ }
+ return true;
+}
+
+void SkPathRef::writeToBuffer(SkWBuffer* buffer) const {
+ SkDEBUGCODE(this->validate();)
+ SkDEBUGCODE(size_t beforePos = buffer->pos();)
+
+ // Call getBounds() to ensure (as a side-effect) that fBounds
+ // and fIsFinite are computed.
+ const SkRect& bounds = this->getBounds();
+
+ int32_t packed = ((fRRectOrOvalStartIdx & 7) << kRRectOrOvalStartIdx_SerializationShift) |
+ ((fRRectOrOvalIsCCW & 1) << kRRectOrOvalIsCCW_SerializationShift) |
+ ((fIsFinite & 1) << kIsFinite_SerializationShift) |
+ ((fIsOval & 1) << kIsOval_SerializationShift) |
+ ((fIsRRect & 1) << kIsRRect_SerializationShift) |
+ (fSegmentMask << kSegmentMask_SerializationShift);
+ buffer->write32(packed);
+
+ // TODO: write gen ID here. Problem: We don't know if we're cross process or not from
+ // SkWBuffer. Until this is fixed we write 0.
+ buffer->write32(0);
+ buffer->write32(fVerbCnt);
+ buffer->write32(fPointCnt);
+ buffer->write32(fConicWeights.count());
+ buffer->write(verbsMemBegin(), fVerbCnt * sizeof(uint8_t));
+ buffer->write(fPoints, fPointCnt * sizeof(SkPoint));
+ buffer->write(fConicWeights.begin(), fConicWeights.bytes());
+ buffer->write(&bounds, sizeof(bounds));
+
+ SkASSERT(buffer->pos() - beforePos == (size_t) this->writeSize());
+}
+
+uint32_t SkPathRef::writeSize() const {
+ return uint32_t(5 * sizeof(uint32_t) +
+ fVerbCnt * sizeof(uint8_t) +
+ fPointCnt * sizeof(SkPoint) +
+ fConicWeights.bytes() +
+ sizeof(SkRect));
+}
+
+void SkPathRef::copy(const SkPathRef& ref,
+ int additionalReserveVerbs,
+ int additionalReservePoints) {
+ SkDEBUGCODE(this->validate();)
+ this->resetToSize(ref.fVerbCnt, ref.fPointCnt, ref.fConicWeights.count(),
+ additionalReserveVerbs, additionalReservePoints);
+ sk_careful_memcpy(this->verbsMemWritable(), ref.verbsMemBegin(), ref.fVerbCnt*sizeof(uint8_t));
+ sk_careful_memcpy(this->fPoints, ref.fPoints, ref.fPointCnt * sizeof(SkPoint));
+ fConicWeights = ref.fConicWeights;
+ fBoundsIsDirty = ref.fBoundsIsDirty;
+ if (!fBoundsIsDirty) {
+ fBounds = ref.fBounds;
+ fIsFinite = ref.fIsFinite;
+ }
+ fSegmentMask = ref.fSegmentMask;
+ fIsOval = ref.fIsOval;
+ fIsRRect = ref.fIsRRect;
+ fRRectOrOvalIsCCW = ref.fRRectOrOvalIsCCW;
+ fRRectOrOvalStartIdx = ref.fRRectOrOvalStartIdx;
+ SkDEBUGCODE(this->validate();)
+}
+
+
+void SkPathRef::interpolate(const SkPathRef& ending, SkScalar weight, SkPathRef* out) const {
+ const SkScalar* inValues = &ending.getPoints()->fX;
+ SkScalar* outValues = &out->getPoints()->fX;
+ int count = out->countPoints() * 2;
+ for (int index = 0; index < count; ++index) {
+ outValues[index] = outValues[index] * weight + inValues[index] * (1 - weight);
+ }
+ out->fBoundsIsDirty = true;
+ out->fIsOval = false;
+ out->fIsRRect = false;
+}
+
+SkPoint* SkPathRef::growForRepeatedVerb(int /*SkPath::Verb*/ verb,
+ int numVbs,
+ SkScalar** weights) {
+ // This value is just made-up for now. When count is 4, calling memset was much
+ // slower than just writing the loop. This seems odd, and hopefully in the
+ // future this will appear to have been a fluke...
+ static const unsigned int kMIN_COUNT_FOR_MEMSET_TO_BE_FAST = 16;
+
+ SkDEBUGCODE(this->validate();)
+ int pCnt;
+ bool dirtyAfterEdit = true;
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ pCnt = numVbs;
+ dirtyAfterEdit = false;
+ break;
+ case SkPath::kLine_Verb:
+ fSegmentMask |= SkPath::kLine_SegmentMask;
+ pCnt = numVbs;
+ break;
+ case SkPath::kQuad_Verb:
+ fSegmentMask |= SkPath::kQuad_SegmentMask;
+ pCnt = 2 * numVbs;
+ break;
+ case SkPath::kConic_Verb:
+ fSegmentMask |= SkPath::kConic_SegmentMask;
+ pCnt = 2 * numVbs;
+ break;
+ case SkPath::kCubic_Verb:
+ fSegmentMask |= SkPath::kCubic_SegmentMask;
+ pCnt = 3 * numVbs;
+ break;
+ case SkPath::kClose_Verb:
+ SkDEBUGFAIL("growForRepeatedVerb called for kClose_Verb");
+ pCnt = 0;
+ dirtyAfterEdit = false;
+ break;
+ case SkPath::kDone_Verb:
+ SkDEBUGFAIL("growForRepeatedVerb called for kDone");
+ // fall through
+ default:
+ SkDEBUGFAIL("default should not be reached");
+ pCnt = 0;
+ dirtyAfterEdit = false;
+ }
+
+ size_t space = numVbs * sizeof(uint8_t) + pCnt * sizeof (SkPoint);
+ this->makeSpace(space);
+
+ SkPoint* ret = fPoints + fPointCnt;
+ uint8_t* vb = fVerbs - fVerbCnt;
+
+ // cast to unsigned, so if kMIN_COUNT_FOR_MEMSET_TO_BE_FAST is defined to
+ // be 0, the compiler will remove the test/branch entirely.
+ if ((unsigned)numVbs >= kMIN_COUNT_FOR_MEMSET_TO_BE_FAST) {
+ memset(vb - numVbs, verb, numVbs);
+ } else {
+ for (int i = 0; i < numVbs; ++i) {
+ vb[~i] = verb;
+ }
+ }
+
+ fVerbCnt += numVbs;
+ fPointCnt += pCnt;
+ fFreeSpace -= space;
+ fBoundsIsDirty = true; // this also invalidates fIsFinite
+ if (dirtyAfterEdit) {
+ fIsOval = false;
+ fIsRRect = false;
+ }
+
+ if (SkPath::kConic_Verb == verb) {
+ SkASSERT(weights);
+ *weights = fConicWeights.append(numVbs);
+ }
+
+ SkDEBUGCODE(this->validate();)
+ return ret;
+}
+
+SkPoint* SkPathRef::growForVerb(int /* SkPath::Verb*/ verb, SkScalar weight) {
+ SkDEBUGCODE(this->validate();)
+ int pCnt;
+ bool dirtyAfterEdit = true;
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ pCnt = 1;
+ dirtyAfterEdit = false;
+ break;
+ case SkPath::kLine_Verb:
+ fSegmentMask |= SkPath::kLine_SegmentMask;
+ pCnt = 1;
+ break;
+ case SkPath::kQuad_Verb:
+ fSegmentMask |= SkPath::kQuad_SegmentMask;
+ pCnt = 2;
+ break;
+ case SkPath::kConic_Verb:
+ fSegmentMask |= SkPath::kConic_SegmentMask;
+ pCnt = 2;
+ break;
+ case SkPath::kCubic_Verb:
+ fSegmentMask |= SkPath::kCubic_SegmentMask;
+ pCnt = 3;
+ break;
+ case SkPath::kClose_Verb:
+ pCnt = 0;
+ dirtyAfterEdit = false;
+ break;
+ case SkPath::kDone_Verb:
+ SkDEBUGFAIL("growForVerb called for kDone");
+ // fall through
+ default:
+ SkDEBUGFAIL("default is not reached");
+ dirtyAfterEdit = false;
+ pCnt = 0;
+ }
+ size_t space = sizeof(uint8_t) + pCnt * sizeof (SkPoint);
+ this->makeSpace(space);
+ this->fVerbs[~fVerbCnt] = verb;
+ SkPoint* ret = fPoints + fPointCnt;
+ fVerbCnt += 1;
+ fPointCnt += pCnt;
+ fFreeSpace -= space;
+ fBoundsIsDirty = true; // this also invalidates fIsFinite
+ if (dirtyAfterEdit) {
+ fIsOval = false;
+ fIsRRect = false;
+ }
+
+ if (SkPath::kConic_Verb == verb) {
+ *fConicWeights.append() = weight;
+ }
+
+ SkDEBUGCODE(this->validate();)
+ return ret;
+}
+
+uint32_t SkPathRef::genID() const {
+ SkASSERT(!fEditorsAttached);
+ static const uint32_t kMask = (static_cast<int64_t>(1) << SkPath::kPathRefGenIDBitCnt) - 1;
+ if (!fGenerationID) {
+ if (0 == fPointCnt && 0 == fVerbCnt) {
+ fGenerationID = kEmptyGenID;
+ } else {
+ static int32_t gPathRefGenerationID;
+ // do a loop in case our global wraps around, as we never want to return a 0 or the
+ // empty ID
+ do {
+ fGenerationID = (sk_atomic_inc(&gPathRefGenerationID) + 1) & kMask;
+ } while (fGenerationID <= kEmptyGenID);
+ }
+ }
+ return fGenerationID;
+}
+
+void SkPathRef::addGenIDChangeListener(GenIDChangeListener* listener) {
+ if (nullptr == listener || this == gEmpty) {
+ delete listener;
+ return;
+ }
+ *fGenIDChangeListeners.append() = listener;
+}
+
+// we need to be called *before* the genID gets changed or zerod
+void SkPathRef::callGenIDChangeListeners() {
+ for (int i = 0; i < fGenIDChangeListeners.count(); i++) {
+ fGenIDChangeListeners[i]->onChange();
+ }
+
+ // Listeners get at most one shot, so whether these triggered or not, blow them away.
+ fGenIDChangeListeners.deleteAll();
+}
+
+SkRRect SkPathRef::getRRect() const {
+ const SkRect& bounds = this->getBounds();
+ SkVector radii[4] = {{0, 0}, {0, 0}, {0, 0}, {0, 0}};
+ Iter iter(*this);
+ SkPoint pts[4];
+ uint8_t verb = iter.next(pts);
+ SkASSERT(SkPath::kMove_Verb == verb);
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ if (SkPath::kConic_Verb == verb) {
+ SkVector v1_0 = pts[1] - pts[0];
+ SkVector v2_1 = pts[2] - pts[1];
+ SkVector dxdy;
+ if (v1_0.fX) {
+ SkASSERT(!v2_1.fX && !v1_0.fY);
+ dxdy.set(SkScalarAbs(v1_0.fX), SkScalarAbs(v2_1.fY));
+ } else if (!v1_0.fY) {
+ SkASSERT(!v2_1.fX || !v2_1.fY);
+ dxdy.set(SkScalarAbs(v2_1.fX), SkScalarAbs(v2_1.fY));
+ } else {
+ SkASSERT(!v2_1.fY);
+ dxdy.set(SkScalarAbs(v2_1.fX), SkScalarAbs(v1_0.fY));
+ }
+ SkRRect::Corner corner =
+ pts[1].fX == bounds.fLeft ?
+ pts[1].fY == bounds.fTop ?
+ SkRRect::kUpperLeft_Corner : SkRRect::kLowerLeft_Corner :
+ pts[1].fY == bounds.fTop ?
+ SkRRect::kUpperRight_Corner : SkRRect::kLowerRight_Corner;
+ SkASSERT(!radii[corner].fX && !radii[corner].fY);
+ radii[corner] = dxdy;
+ } else {
+ SkASSERT((verb == SkPath::kLine_Verb
+ && (!(pts[1].fX - pts[0].fX) || !(pts[1].fY - pts[0].fY)))
+ || verb == SkPath::kClose_Verb);
+ }
+ }
+ SkRRect rrect;
+ rrect.setRectRadii(bounds, radii);
+ return rrect;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPathRef::Iter::Iter() {
+#ifdef SK_DEBUG
+ fPts = nullptr;
+ fConicWeights = nullptr;
+#endif
+ // need to init enough to make next() harmlessly return kDone_Verb
+ fVerbs = nullptr;
+ fVerbStop = nullptr;
+}
+
+SkPathRef::Iter::Iter(const SkPathRef& path) {
+ this->setPathRef(path);
+}
+
+void SkPathRef::Iter::setPathRef(const SkPathRef& path) {
+ fPts = path.points();
+ fVerbs = path.verbs();
+ fVerbStop = path.verbsMemBegin();
+ fConicWeights = path.conicWeights();
+ if (fConicWeights) {
+ fConicWeights -= 1; // begin one behind
+ }
+}
+
+uint8_t SkPathRef::Iter::next(SkPoint pts[4]) {
+ SkASSERT(pts);
+ if (fVerbs == fVerbStop) {
+ return (uint8_t) SkPath::kDone_Verb;
+ }
+
+ // fVerbs points one beyond next verb so decrement first.
+ unsigned verb = *(--fVerbs);
+ const SkPoint* srcPts = fPts;
+
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ pts[0] = srcPts[0];
+ srcPts += 1;
+ break;
+ case SkPath::kLine_Verb:
+ pts[0] = srcPts[-1];
+ pts[1] = srcPts[0];
+ srcPts += 1;
+ break;
+ case SkPath::kConic_Verb:
+ fConicWeights += 1;
+ // fall-through
+ case SkPath::kQuad_Verb:
+ pts[0] = srcPts[-1];
+ pts[1] = srcPts[0];
+ pts[2] = srcPts[1];
+ srcPts += 2;
+ break;
+ case SkPath::kCubic_Verb:
+ pts[0] = srcPts[-1];
+ pts[1] = srcPts[0];
+ pts[2] = srcPts[1];
+ pts[3] = srcPts[2];
+ srcPts += 3;
+ break;
+ case SkPath::kClose_Verb:
+ break;
+ case SkPath::kDone_Verb:
+ SkASSERT(fVerbs == fVerbStop);
+ break;
+ }
+ fPts = srcPts;
+ return (uint8_t) verb;
+}
+
+uint8_t SkPathRef::Iter::peek() const {
+ const uint8_t* next = fVerbs - 1;
+ return next <= fVerbStop ? (uint8_t) SkPath::kDone_Verb : *next;
+}
+
+#ifdef SK_DEBUG
+
+#include "SkNx.h"
+
+void SkPathRef::validate() const {
+ SkASSERT(static_cast<ptrdiff_t>(fFreeSpace) >= 0);
+ SkASSERT(reinterpret_cast<intptr_t>(fVerbs) - reinterpret_cast<intptr_t>(fPoints) >= 0);
+ SkASSERT((nullptr == fPoints) == (nullptr == fVerbs));
+ SkASSERT(!(nullptr == fPoints && 0 != fFreeSpace));
+ SkASSERT(!(nullptr == fPoints && 0 != fFreeSpace));
+ SkASSERT(!(nullptr == fPoints && fPointCnt));
+ SkASSERT(!(nullptr == fVerbs && fVerbCnt));
+ SkASSERT(this->currSize() ==
+ fFreeSpace + sizeof(SkPoint) * fPointCnt + sizeof(uint8_t) * fVerbCnt);
+
+ if (fIsOval || fIsRRect) {
+ // Currently we don't allow both of these to be set, even though ovals are round rects.
+ SkASSERT(fIsOval != fIsRRect);
+ if (fIsOval) {
+ SkASSERT(fRRectOrOvalStartIdx < 4);
+ } else {
+ SkASSERT(fRRectOrOvalStartIdx < 8);
+ }
+ }
+
+ if (!fBoundsIsDirty && !fBounds.isEmpty()) {
+ bool isFinite = true;
+ Sk2s leftTop = Sk2s(fBounds.fLeft, fBounds.fTop);
+ Sk2s rightBot = Sk2s(fBounds.fRight, fBounds.fBottom);
+ for (int i = 0; i < fPointCnt; ++i) {
+ Sk2s point = Sk2s(fPoints[i].fX, fPoints[i].fY);
+#ifdef SK_DEBUG
+ if (fPoints[i].isFinite() &&
+ ((point < leftTop).anyTrue() || (point > rightBot).anyTrue())) {
+ SkDebugf("bounds: %f %f %f %f\n",
+ fBounds.fLeft, fBounds.fTop, fBounds.fRight, fBounds.fBottom);
+ for (int j = 0; j < fPointCnt; ++j) {
+ if (i == j) {
+ SkDebugf("*");
+ }
+ SkDebugf("%f %f\n", fPoints[j].fX, fPoints[j].fY);
+ }
+ }
+#endif
+
+ SkASSERT(!fPoints[i].isFinite() ||
+ (!(point < leftTop).anyTrue() && !(point > rightBot).anyTrue()));
+ if (!fPoints[i].isFinite()) {
+ isFinite = false;
+ }
+ }
+ SkASSERT(SkToBool(fIsFinite) == isFinite);
+ }
+
+#ifdef SK_DEBUG_PATH
+ uint32_t mask = 0;
+ for (int i = 0; i < fVerbCnt; ++i) {
+ switch (fVerbs[~i]) {
+ case SkPath::kMove_Verb:
+ break;
+ case SkPath::kLine_Verb:
+ mask |= SkPath::kLine_SegmentMask;
+ break;
+ case SkPath::kQuad_Verb:
+ mask |= SkPath::kQuad_SegmentMask;
+ break;
+ case SkPath::kConic_Verb:
+ mask |= SkPath::kConic_SegmentMask;
+ break;
+ case SkPath::kCubic_Verb:
+ mask |= SkPath::kCubic_SegmentMask;
+ break;
+ case SkPath::kClose_Verb:
+ break;
+ case SkPath::kDone_Verb:
+ SkDEBUGFAIL("Done verb shouldn't be recorded.");
+ break;
+ default:
+ SkDEBUGFAIL("Unknown Verb");
+ break;
+ }
+ }
+ SkASSERT(mask == fSegmentMask);
+#endif // SK_DEBUG_PATH
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkPerspIter.h b/gfx/skia/skia/src/core/SkPerspIter.h
new file mode 100644
index 000000000..c0a9083be
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPerspIter.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPerspIter_DEFINED
+#define SkPerspIter_DEFINED
+
+#include "SkFixed.h"
+#include "SkMatrix.h"
+
+class SkPerspIter {
+public:
+ /** Iterate a line through the matrix [x,y] ... [x+count-1, y].
+ @param m The matrix we will be iterating a line through
+ @param x The initial X coordinate to be mapped through the matrix
+ @param y The initial Y coordinate to be mapped through the matrix
+ @param count The number of points (x,y) (x+1,y) (x+2,y) ... we will eventually map
+ */
+ SkPerspIter(const SkMatrix& m, SkScalar x, SkScalar y, int count);
+
+ /** Return the buffer of [x,y] fixed point values we will be filling.
+ This always returns the same value, so it can be saved across calls to
+ next().
+ */
+ const SkFixed* getXY() const { return fStorage; }
+
+ /** Return the number of [x,y] pairs that have been filled in the getXY() buffer.
+ When this returns 0, the iterator is finished.
+ */
+ int next();
+
+private:
+ enum {
+ kShift = 4,
+ kCount = (1 << kShift)
+ };
+ const SkMatrix& fMatrix;
+ SkFixed fStorage[kCount * 2];
+ SkFixed fX, fY;
+ SkScalar fSX, fSY;
+ int fCount;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPicture.cpp b/gfx/skia/skia/src/core/SkPicture.cpp
new file mode 100644
index 000000000..88da70aac
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPicture.cpp
@@ -0,0 +1,280 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAtomics.h"
+#include "SkImageDeserializer.h"
+#include "SkImageGenerator.h"
+#include "SkMessageBus.h"
+#include "SkPicture.h"
+#include "SkPictureData.h"
+#include "SkPicturePlayback.h"
+#include "SkPictureRecord.h"
+#include "SkPictureRecorder.h"
+
+#if defined(SK_DISALLOW_CROSSPROCESS_PICTUREIMAGEFILTERS) || \
+ defined(SK_ENABLE_PICTURE_IO_SECURITY_PRECAUTIONS)
+static bool g_AllPictureIOSecurityPrecautionsEnabled = true;
+#else
+static bool g_AllPictureIOSecurityPrecautionsEnabled = false;
+#endif
+
+DECLARE_SKMESSAGEBUS_MESSAGE(SkPicture::DeletionMessage);
+
+#ifdef SK_SUPPORT_LEGACY_PICTUREINSTALLPIXELREF
+class InstallProcImageDeserializer : public SkImageDeserializer {
+ SkPicture::InstallPixelRefProc fProc;
+public:
+ InstallProcImageDeserializer(SkPicture::InstallPixelRefProc proc) : fProc(proc) {}
+
+ sk_sp<SkImage> makeFromMemory(const void* data, size_t length, const SkIRect* subset) override {
+ SkBitmap bitmap;
+ if (fProc(data, length, &bitmap)) {
+ bitmap.setImmutable();
+ return SkImage::MakeFromBitmap(bitmap);
+ }
+ return nullptr;
+ }
+ sk_sp<SkImage> makeFromData(SkData* data, const SkIRect* subset) override {
+ return this->makeFromMemory(data->data(), data->size(), subset);
+ }
+};
+
+sk_sp<SkPicture> SkPicture::MakeFromStream(SkStream* stream, InstallPixelRefProc proc) {
+ InstallProcImageDeserializer deserializer(proc);
+ return MakeFromStream(stream, &deserializer);
+}
+#endif
+
+/* SkPicture impl. This handles generic responsibilities like unique IDs and serialization. */
+
+SkPicture::SkPicture() : fUniqueID(0) {}
+
+SkPicture::~SkPicture() {
+ // TODO: move this to ~SkBigPicture() only?
+
+ // If the ID is still zero, no one has read it, so no need to send this message.
+ uint32_t id = sk_atomic_load(&fUniqueID, sk_memory_order_relaxed);
+ if (id != 0) {
+ SkPicture::DeletionMessage msg = { (int32_t)id };
+ SkMessageBus<SkPicture::DeletionMessage>::Post(msg);
+ }
+}
+
+uint32_t SkPicture::uniqueID() const {
+ static uint32_t gNextID = 1;
+ uint32_t id = sk_atomic_load(&fUniqueID, sk_memory_order_relaxed);
+ while (id == 0) {
+ uint32_t next = sk_atomic_fetch_add(&gNextID, 1u);
+ if (sk_atomic_compare_exchange(&fUniqueID, &id, next,
+ sk_memory_order_relaxed,
+ sk_memory_order_relaxed)) {
+ id = next;
+ } else {
+ // sk_atomic_compare_exchange replaced id with the current value of fUniqueID.
+ }
+ }
+ return id;
+}
+
+static const char kMagic[] = { 's', 'k', 'i', 'a', 'p', 'i', 'c', 't' };
+
+SkPictInfo SkPicture::createHeader() const {
+ SkPictInfo info;
+ // Copy magic bytes at the beginning of the header
+ static_assert(sizeof(kMagic) == 8, "");
+ static_assert(sizeof(kMagic) == sizeof(info.fMagic), "");
+ memcpy(info.fMagic, kMagic, sizeof(kMagic));
+
+ // Set picture info after magic bytes in the header
+ info.setVersion(CURRENT_PICTURE_VERSION);
+ info.fCullRect = this->cullRect();
+ info.fFlags = SkPictInfo::kCrossProcess_Flag;
+ // TODO: remove this flag, since we're always float (now)
+ info.fFlags |= SkPictInfo::kScalarIsFloat_Flag;
+
+ if (8 == sizeof(void*)) {
+ info.fFlags |= SkPictInfo::kPtrIs64Bit_Flag;
+ }
+ return info;
+}
+
+bool SkPicture::IsValidPictInfo(const SkPictInfo& info) {
+ if (0 != memcmp(info.fMagic, kMagic, sizeof(kMagic))) {
+ return false;
+ }
+ if (info.getVersion() < MIN_PICTURE_VERSION || info.getVersion() > CURRENT_PICTURE_VERSION) {
+ return false;
+ }
+ return true;
+}
+
+bool SkPicture::InternalOnly_StreamIsSKP(SkStream* stream, SkPictInfo* pInfo) {
+ if (!stream) {
+ return false;
+ }
+
+ SkPictInfo info;
+ SkASSERT(sizeof(kMagic) == sizeof(info.fMagic));
+ if (!stream->read(&info.fMagic, sizeof(kMagic))) {
+ return false;
+ }
+
+ info.setVersion( stream->readU32());
+ info.fCullRect.fLeft = stream->readScalar();
+ info.fCullRect.fTop = stream->readScalar();
+ info.fCullRect.fRight = stream->readScalar();
+ info.fCullRect.fBottom = stream->readScalar();
+ info.fFlags = stream->readU32();
+
+ if (IsValidPictInfo(info)) {
+ if (pInfo) { *pInfo = info; }
+ return true;
+ }
+ return false;
+}
+
+bool SkPicture::InternalOnly_BufferIsSKP(SkReadBuffer* buffer, SkPictInfo* pInfo) {
+ SkPictInfo info;
+ SkASSERT(sizeof(kMagic) == sizeof(info.fMagic));
+ if (!buffer->readByteArray(&info.fMagic, sizeof(kMagic))) {
+ return false;
+ }
+
+ info.setVersion(buffer->readUInt());
+ buffer->readRect(&info.fCullRect);
+ info.fFlags = buffer->readUInt();
+
+ if (IsValidPictInfo(info)) {
+ if (pInfo) { *pInfo = info; }
+ return true;
+ }
+ return false;
+}
+
+sk_sp<SkPicture> SkPicture::Forwardport(const SkPictInfo& info,
+ const SkPictureData* data,
+ SkReadBuffer* buffer) {
+ if (!data) {
+ return nullptr;
+ }
+ SkPicturePlayback playback(data);
+ SkPictureRecorder r;
+ playback.draw(r.beginRecording(info.fCullRect), nullptr/*no callback*/, buffer);
+ return r.finishRecordingAsPicture();
+}
+
+sk_sp<SkPicture> SkPicture::MakeFromStream(SkStream* stream, SkImageDeserializer* factory) {
+ return MakeFromStream(stream, factory, nullptr);
+}
+
+sk_sp<SkPicture> SkPicture::MakeFromStream(SkStream* stream) {
+ SkImageDeserializer factory;
+ return MakeFromStream(stream, &factory);
+}
+
+sk_sp<SkPicture> SkPicture::MakeFromData(const void* data, size_t size,
+ SkImageDeserializer* factory) {
+ SkMemoryStream stream(data, size);
+ return MakeFromStream(&stream, factory, nullptr);
+}
+
+sk_sp<SkPicture> SkPicture::MakeFromData(const SkData* data, SkImageDeserializer* factory) {
+ if (!data) {
+ return nullptr;
+ }
+ SkMemoryStream stream(data->data(), data->size());
+ return MakeFromStream(&stream, factory, nullptr);
+}
+
+sk_sp<SkPicture> SkPicture::MakeFromStream(SkStream* stream, SkImageDeserializer* factory,
+ SkTypefacePlayback* typefaces) {
+ SkPictInfo info;
+ if (!InternalOnly_StreamIsSKP(stream, &info) || !stream->readBool()) {
+ return nullptr;
+ }
+ SkAutoTDelete<SkPictureData> data(
+ SkPictureData::CreateFromStream(stream, info, factory, typefaces));
+ return Forwardport(info, data, nullptr);
+}
+
+sk_sp<SkPicture> SkPicture::MakeFromBuffer(SkReadBuffer& buffer) {
+ SkPictInfo info;
+ if (!InternalOnly_BufferIsSKP(&buffer, &info) || !buffer.readBool()) {
+ return nullptr;
+ }
+ SkAutoTDelete<SkPictureData> data(SkPictureData::CreateFromBuffer(buffer, info));
+ return Forwardport(info, data, &buffer);
+}
+
+SkPictureData* SkPicture::backport() const {
+ SkPictInfo info = this->createHeader();
+ SkPictureRecord rec(SkISize::Make(info.fCullRect.width(), info.fCullRect.height()), 0/*flags*/);
+ rec.beginRecording();
+ this->playback(&rec);
+ rec.endRecording();
+ return new SkPictureData(rec, info);
+}
+
+void SkPicture::serialize(SkWStream* stream, SkPixelSerializer* pixelSerializer) const {
+ this->serialize(stream, pixelSerializer, nullptr);
+}
+
+sk_sp<SkData> SkPicture::serialize(SkPixelSerializer* pixelSerializer) const {
+ SkDynamicMemoryWStream stream;
+ this->serialize(&stream, pixelSerializer, nullptr);
+ return stream.detachAsData();
+}
+
+void SkPicture::serialize(SkWStream* stream,
+ SkPixelSerializer* pixelSerializer,
+ SkRefCntSet* typefaceSet) const {
+ SkPictInfo info = this->createHeader();
+ SkAutoTDelete<SkPictureData> data(this->backport());
+
+ stream->write(&info, sizeof(info));
+ if (data) {
+ stream->writeBool(true);
+ data->serialize(stream, pixelSerializer, typefaceSet);
+ } else {
+ stream->writeBool(false);
+ }
+}
+
+void SkPicture::flatten(SkWriteBuffer& buffer) const {
+ SkPictInfo info = this->createHeader();
+ SkAutoTDelete<SkPictureData> data(this->backport());
+
+ buffer.writeByteArray(&info.fMagic, sizeof(info.fMagic));
+ buffer.writeUInt(info.getVersion());
+ buffer.writeRect(info.fCullRect);
+ buffer.writeUInt(info.fFlags);
+ if (data) {
+ buffer.writeBool(true);
+ data->flatten(buffer);
+ } else {
+ buffer.writeBool(false);
+ }
+}
+
+#ifdef SK_SUPPORT_LEGACY_PICTURE_GPUVETO
+bool SkPicture::suitableForGpuRasterization(GrContext*, const char** whyNot) const {
+ if (this->numSlowPaths() > 5) {
+ if (whyNot) { *whyNot = "Too many slow paths (either concave or dashed)."; }
+ return false;
+ }
+ return true;
+}
+#endif
+
+// Global setting to disable security precautions for serialization.
+void SkPicture::SetPictureIOSecurityPrecautionsEnabled_Dangerous(bool set) {
+ g_AllPictureIOSecurityPrecautionsEnabled = set;
+}
+
+bool SkPicture::PictureIOSecurityPrecautionsEnabled() {
+ return g_AllPictureIOSecurityPrecautionsEnabled;
+}
diff --git a/gfx/skia/skia/src/core/SkPictureAnalyzer.cpp b/gfx/skia/skia/src/core/SkPictureAnalyzer.cpp
new file mode 100644
index 000000000..a7a4d9449
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureAnalyzer.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPath.h"
+#include "SkPicture.h"
+#include "SkPictureAnalyzer.h"
+#include "SkPictureCommon.h"
+#include "SkRecords.h"
+
+#if SK_SUPPORT_GPU
+
+namespace {
+
+inline bool veto_predicate(uint32_t numSlowPaths) {
+ return numSlowPaths > 5;
+}
+
+} // anonymous namespace
+
+SkPictureGpuAnalyzer::SkPictureGpuAnalyzer(sk_sp<GrContextThreadSafeProxy> /* unused ATM */)
+ : fNumSlowPaths(0) { }
+
+SkPictureGpuAnalyzer::SkPictureGpuAnalyzer(const sk_sp<SkPicture>& picture,
+ sk_sp<GrContextThreadSafeProxy> ctx)
+ : SkPictureGpuAnalyzer(std::move(ctx)) {
+ this->analyzePicture(picture.get());
+}
+
+void SkPictureGpuAnalyzer::analyzePicture(const SkPicture* picture) {
+ if (!picture) {
+ return;
+ }
+
+ fNumSlowPaths += picture->numSlowPaths();
+}
+
+void SkPictureGpuAnalyzer::analyzeClipPath(const SkPath& path, SkCanvas::ClipOp op, bool doAntiAlias) {
+ const SkRecords::ClipPath clipOp = {
+ SkIRect::MakeEmpty(), // Willie don't care.
+ path,
+ SkRecords::ClipOpAndAA(op, doAntiAlias)
+ };
+
+ SkPathCounter counter;
+ counter(clipOp);
+ fNumSlowPaths += counter.fNumSlowPathsAndDashEffects;
+}
+
+void SkPictureGpuAnalyzer::reset() {
+ fNumSlowPaths = 0;
+}
+
+bool SkPictureGpuAnalyzer::suitableForGpuRasterization(const char** whyNot) const {
+ if(veto_predicate(fNumSlowPaths)) {
+ if (whyNot) { *whyNot = "Too many slow paths (either concave or dashed)."; }
+ return false;
+ }
+ return true;
+}
+
+#endif // SK_SUPPORT_GPU
diff --git a/gfx/skia/skia/src/core/SkPictureCommon.h b/gfx/skia/skia/src/core/SkPictureCommon.h
new file mode 100644
index 000000000..9b0a2f7c0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureCommon.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Some shared code used by both SkBigPicture and SkMiniPicture.
+// SkTextHunter -- SkRecord visitor that returns true when the op draws text.
+// SkBitmapHunter -- SkRecord visitor that returns true when the op draws a bitmap or image.
+// SkPathCounter -- SkRecord visitor that counts paths that draw slowly on the GPU.
+
+#include "SkPathEffect.h"
+#include "SkRecords.h"
+#include "SkTLogic.h"
+
+// N.B. This name is slightly historical: hunting season is now open for SkImages too.
+struct SkBitmapHunter {
+ // Some ops have a paint, some have an optional paint. Either way, get back a pointer.
+ static const SkPaint* AsPtr(const SkPaint& p) { return &p; }
+ static const SkPaint* AsPtr(const SkRecords::Optional<SkPaint>& p) { return p; }
+
+ // Main entry for visitor:
+ // If the op is a DrawPicture, recurse.
+ // If the op has a bitmap or image directly, return true.
+ // If the op has a paint and the paint has a bitmap, return true.
+ // Otherwise, return false.
+ bool operator()(const SkRecords::DrawPicture& op) { return op.picture->willPlayBackBitmaps(); }
+ bool operator()(const SkRecords::DrawDrawable&) { /*TODO*/ return false; }
+
+ template <typename T>
+ bool operator()(const T& op) { return CheckBitmap(op); }
+
+ // If the op is tagged as having an image, return true.
+ template <typename T>
+ static SK_WHEN(T::kTags & SkRecords::kHasImage_Tag, bool) CheckBitmap(const T&) {
+ return true;
+ }
+
+ // If not, look for one in its paint (if it has a paint).
+ template <typename T>
+ static SK_WHEN(!(T::kTags & SkRecords::kHasImage_Tag), bool) CheckBitmap(const T& op) {
+ return CheckPaint(op);
+ }
+
+ // Most draws-type ops have paints.
+ template <typename T>
+ static SK_WHEN(T::kTags & SkRecords::kHasPaint_Tag, bool) CheckPaint(const T& op) {
+ return PaintHasBitmap(AsPtr(op.paint));
+ }
+
+ template <typename T>
+ static SK_WHEN(!(T::kTags & SkRecords::kHasPaint_Tag), bool) CheckPaint(const T&) {
+ return false;
+ }
+
+private:
+ static bool PaintHasBitmap(const SkPaint* paint) {
+ if (paint) {
+ const SkShader* shader = paint->getShader();
+ if (shader && shader->isAImage()) {
+ return true;
+ }
+ }
+ return false;
+ }
+};
+
+// TODO: might be nicer to have operator() return an int (the number of slow paths) ?
+struct SkPathCounter {
+ // Some ops have a paint, some have an optional paint. Either way, get back a pointer.
+ static const SkPaint* AsPtr(const SkPaint& p) { return &p; }
+ static const SkPaint* AsPtr(const SkRecords::Optional<SkPaint>& p) { return p; }
+
+ SkPathCounter() : fNumSlowPathsAndDashEffects(0) {}
+
+ // Recurse into nested pictures.
+ void operator()(const SkRecords::DrawPicture& op) {
+ fNumSlowPathsAndDashEffects += op.picture->numSlowPaths();
+ }
+ void operator()(const SkRecords::DrawDrawable&) { /* TODO */ }
+
+ void checkPaint(const SkPaint* paint) {
+ if (paint && paint->getPathEffect()) {
+ // Initially assume it's slow.
+ fNumSlowPathsAndDashEffects++;
+ }
+ }
+
+ void operator()(const SkRecords::DrawPoints& op) {
+ this->checkPaint(&op.paint);
+ const SkPathEffect* effect = op.paint.getPathEffect();
+ if (effect) {
+ SkPathEffect::DashInfo info;
+ SkPathEffect::DashType dashType = effect->asADash(&info);
+ if (2 == op.count && SkPaint::kRound_Cap != op.paint.getStrokeCap() &&
+ SkPathEffect::kDash_DashType == dashType && 2 == info.fCount) {
+ fNumSlowPathsAndDashEffects--;
+ }
+ }
+ }
+
+ void operator()(const SkRecords::DrawPath& op) {
+ this->checkPaint(&op.paint);
+ if (op.paint.isAntiAlias() && !op.path.isConvex()) {
+ SkPaint::Style paintStyle = op.paint.getStyle();
+ const SkRect& pathBounds = op.path.getBounds();
+ if (SkPaint::kStroke_Style == paintStyle &&
+ 0 == op.paint.getStrokeWidth()) {
+ // AA hairline concave path is not slow.
+ } else if (SkPaint::kFill_Style == paintStyle && pathBounds.width() < 64.f &&
+ pathBounds.height() < 64.f && !op.path.isVolatile()) {
+ // AADF eligible concave path is not slow.
+ } else {
+ fNumSlowPathsAndDashEffects++;
+ }
+ }
+ }
+
+ void operator()(const SkRecords::ClipPath& op) {
+ // TODO: does the SkRegion op matter?
+ if (op.opAA.aa && !op.path.isConvex()) {
+ fNumSlowPathsAndDashEffects++;
+ }
+ }
+
+ void operator()(const SkRecords::SaveLayer& op) {
+ this->checkPaint(AsPtr(op.paint));
+ }
+
+ template <typename T>
+ SK_WHEN(T::kTags & SkRecords::kDraw_Tag, void) operator()(const T& op) {
+ this->checkPaint(AsPtr(op.paint));
+ }
+
+ template <typename T>
+ SK_WHEN(!(T::kTags & SkRecords::kDraw_Tag), void) operator()(const T& op) { /* do nothing */ }
+
+ int fNumSlowPathsAndDashEffects;
+};
diff --git a/gfx/skia/skia/src/core/SkPictureContentInfo.cpp b/gfx/skia/skia/src/core/SkPictureContentInfo.cpp
new file mode 100644
index 000000000..42d42c408
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureContentInfo.cpp
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPaint.h"
+#include "SkPathEffect.h"
+#include "SkPictureContentInfo.h"
+
+bool SkPictureContentInfo::suitableForGpuRasterization(GrContext* context, const char **reason,
+ int sampleCount) const {
+ // TODO: the heuristic used here needs to be refined
+ static const int kNumPaintWithPathEffectUsesTol = 1;
+ static const int kNumAAConcavePaths = 5;
+
+ SkASSERT(fNumAAHairlineConcavePaths <= fNumAAConcavePaths);
+
+ int numNonDashedPathEffects = fNumPaintWithPathEffectUses -
+ fNumFastPathDashEffects;
+
+ bool suitableForDash = (0 == fNumPaintWithPathEffectUses) ||
+ (numNonDashedPathEffects < kNumPaintWithPathEffectUsesTol
+ && 0 == sampleCount);
+
+ bool ret = suitableForDash &&
+ (fNumAAConcavePaths - fNumAAHairlineConcavePaths - fNumAADFEligibleConcavePaths)
+ < kNumAAConcavePaths;
+ if (!ret && reason) {
+ if (!suitableForDash) {
+ if (0 != sampleCount) {
+ *reason = "Can't use multisample on dash effect.";
+ } else {
+ *reason = "Too many non dashed path effects.";
+ }
+ } else if ((fNumAAConcavePaths - fNumAAHairlineConcavePaths - fNumAADFEligibleConcavePaths)
+ >= kNumAAConcavePaths) {
+ *reason = "Too many anti-aliased concave paths.";
+ } else {
+ *reason = "Unknown reason for GPU unsuitability.";
+ }
+ }
+ return ret;
+}
+
+void SkPictureContentInfo::onDrawPoints(size_t count, const SkPaint& paint) {
+ if (paint.getPathEffect() != nullptr) {
+ SkPathEffect::DashInfo info;
+ SkPathEffect::DashType dashType = paint.getPathEffect()->asADash(&info);
+ if (2 == count && SkPaint::kRound_Cap != paint.getStrokeCap() &&
+ SkPathEffect::kDash_DashType == dashType && 2 == info.fCount) {
+ ++fNumFastPathDashEffects;
+ }
+ }
+}
+
+void SkPictureContentInfo::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ if (paint.isAntiAlias() && !path.isConvex()) {
+ ++fNumAAConcavePaths;
+
+ SkPaint::Style paintStyle = paint.getStyle();
+ const SkRect& pathBounds = path.getBounds();
+ if (SkPaint::kStroke_Style == paint.getStyle() && 0 == paint.getStrokeWidth()) {
+ ++fNumAAHairlineConcavePaths;
+ } else if (SkPaint::kFill_Style == paintStyle && pathBounds.width() < 64.f &&
+ pathBounds.height() < 64.f && !path.isVolatile()) {
+ ++fNumAADFEligibleConcavePaths;
+ }
+ }
+}
+
+void SkPictureContentInfo::onAddPaintPtr(const SkPaint* paint) {
+ if (paint && paint->getPathEffect()) {
+ ++fNumPaintWithPathEffectUses;
+ }
+}
+
+void SkPictureContentInfo::onSaveLayer() {
+ *fSaveStack.append() = kSaveLayer_Flag;
+}
+
+void SkPictureContentInfo::onSave() {
+ *fSaveStack.append() = kSave_Flag;
+}
+
+void SkPictureContentInfo::onRestore() {
+ SkASSERT(fSaveStack.count() > 0);
+
+ bool containedSaveLayer = fSaveStack.top() & kContainedSaveLayer_Flag;
+
+ if (fSaveStack.top() & kSaveLayer_Flag) {
+ ++fNumLayers;
+ if (containedSaveLayer) {
+ ++fNumInteriorLayers;
+ } else {
+ ++fNumLeafLayers;
+ }
+ containedSaveLayer = true;
+ }
+
+ fSaveStack.pop();
+
+ if (containedSaveLayer && fSaveStack.count() > 0) {
+ fSaveStack.top() |= kContainedSaveLayer_Flag;
+ }
+}
+
+void SkPictureContentInfo::rescindLastSave() {
+ SkASSERT(fSaveStack.count() > 0);
+ SkASSERT(fSaveStack.top() & kSave_Flag);
+
+ bool containedSaveLayer = fSaveStack.top() & kContainedSaveLayer_Flag;
+
+ fSaveStack.pop();
+
+ if (containedSaveLayer && fSaveStack.count() > 0) {
+ fSaveStack.top() |= kContainedSaveLayer_Flag;
+ }
+}
+
+void SkPictureContentInfo::rescindLastSaveLayer() {
+ SkASSERT(fSaveStack.count() > 0);
+ SkASSERT(fSaveStack.top() & kSaveLayer_Flag);
+
+ bool containedSaveLayer = fSaveStack.top() & kContainedSaveLayer_Flag;
+
+ fSaveStack.pop();
+
+ if (containedSaveLayer && fSaveStack.count() > 0) {
+ fSaveStack.top() |= kContainedSaveLayer_Flag;
+ }
+}
+
+void SkPictureContentInfo::set(const SkPictureContentInfo& src) {
+ fNumOperations = src.fNumOperations;
+ fNumTexts = src.fNumTexts;
+ fNumPaintWithPathEffectUses = src.fNumPaintWithPathEffectUses;
+ fNumFastPathDashEffects = src.fNumFastPathDashEffects;
+ fNumAAConcavePaths = src.fNumAAConcavePaths;
+ fNumAAHairlineConcavePaths = src.fNumAAHairlineConcavePaths;
+ fNumAADFEligibleConcavePaths = src.fNumAADFEligibleConcavePaths;
+ fNumLayers = src.fNumLayers;
+ fNumInteriorLayers = src.fNumInteriorLayers;
+ fNumLeafLayers = src.fNumLeafLayers;
+ fSaveStack = src.fSaveStack;
+}
+
+void SkPictureContentInfo::reset() {
+ fNumOperations = 0;
+ fNumTexts = 0;
+ fNumPaintWithPathEffectUses = 0;
+ fNumFastPathDashEffects = 0;
+ fNumAAConcavePaths = 0;
+ fNumAAHairlineConcavePaths = 0;
+ fNumAADFEligibleConcavePaths = 0;
+ fNumLayers = 0;
+ fNumInteriorLayers = 0;
+ fNumLeafLayers = 0;
+ fSaveStack.rewind();
+}
+
+void SkPictureContentInfo::swap(SkPictureContentInfo* other) {
+ SkTSwap(fNumOperations, other->fNumOperations);
+ SkTSwap(fNumTexts, other->fNumTexts);
+ SkTSwap(fNumPaintWithPathEffectUses, other->fNumPaintWithPathEffectUses);
+ SkTSwap(fNumFastPathDashEffects, other->fNumFastPathDashEffects);
+ SkTSwap(fNumAAConcavePaths, other->fNumAAConcavePaths);
+ SkTSwap(fNumAAHairlineConcavePaths, other->fNumAAHairlineConcavePaths);
+ SkTSwap(fNumAADFEligibleConcavePaths, other->fNumAADFEligibleConcavePaths);
+ SkTSwap(fNumLayers, other->fNumLayers);
+ SkTSwap(fNumInteriorLayers, other->fNumInteriorLayers);
+ SkTSwap(fNumLeafLayers, other->fNumLeafLayers);
+ fSaveStack.swap(other->fSaveStack);
+}
diff --git a/gfx/skia/skia/src/core/SkPictureContentInfo.h b/gfx/skia/skia/src/core/SkPictureContentInfo.h
new file mode 100644
index 000000000..81c8a274e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureContentInfo.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPictureContentInfo_DEFINED
+#define SkPictureContentInfo_DEFINED
+
+#include "SkTDArray.h"
+
+class GrContext;
+
+class SkPictureContentInfo {
+public:
+ SkPictureContentInfo() { this->reset(); }
+ SkPictureContentInfo(const SkPictureContentInfo& src) { this->set(src); }
+
+ int numOperations() const { return fNumOperations; }
+ bool hasText() const { return fNumTexts > 0; }
+
+ int numLayers() const { return fNumLayers; }
+ int numInteriorLayers() const { return fNumInteriorLayers; }
+ int numLeafLayers() const { return fNumLeafLayers; }
+
+ bool suitableForGpuRasterization(GrContext* context, const char **reason,
+ int sampleCount) const;
+
+ void addOperation() { ++fNumOperations; }
+
+ void onDrawPoints(size_t count, const SkPaint& paint);
+ void onDrawPath(const SkPath& path, const SkPaint& paint);
+ void onAddPaintPtr(const SkPaint* paint);
+ void onDrawText() { ++fNumTexts; }
+
+ void onSaveLayer();
+ void onSave();
+ void onRestore();
+ void rescindLastSave();
+ void rescindLastSaveLayer();
+
+ void set(const SkPictureContentInfo& src);
+ void reset();
+ void swap(SkPictureContentInfo* other);
+
+private:
+ // Raw count of operations in the picture
+ int fNumOperations;
+ // Count of all forms of drawText
+ int fNumTexts;
+
+ // This field is incremented every time a paint with a path effect is
+ // used (i.e., it is not a de-duplicated count)
+ int fNumPaintWithPathEffectUses;
+ // This field is incremented every time a paint with a path effect that is
+ // dashed, we are drawing a line, and we can use the gpu fast path
+ int fNumFastPathDashEffects;
+ // This field is incremented every time an anti-aliased drawPath call is
+ // issued with a concave path
+ int fNumAAConcavePaths;
+ // This field is incremented every time a drawPath call is
+ // issued for a hairline stroked concave path.
+ int fNumAAHairlineConcavePaths;
+ // This field is incremented every time a drawPath call is
+ // issued for a concave path that can be rendered with distance fields
+ int fNumAADFEligibleConcavePaths;
+ // These fields track the different layer flavors. fNumLayers is just
+ // a count of all saveLayers, fNumInteriorLayers is the number of layers
+ // with a layer inside them, fNumLeafLayers is the number of layers with
+ // no layer inside them.
+ int fNumLayers;
+ int fNumInteriorLayers;
+ int fNumLeafLayers;
+
+ enum Flags {
+ kSave_Flag = 0x1,
+ kSaveLayer_Flag = 0x2,
+
+ // Did the current save or saveLayer contain another saveLayer.
+ // Percolated back down the save stack.
+ kContainedSaveLayer_Flag = 0x4
+ };
+
+ // Stack of save vs saveLayer information to track nesting
+ SkTDArray<uint32_t> fSaveStack;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPictureData.cpp b/gfx/skia/skia/src/core/SkPictureData.cpp
new file mode 100644
index 000000000..68789acdd
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureData.cpp
@@ -0,0 +1,658 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include <new>
+#include "SkImageGenerator.h"
+#include "SkPictureData.h"
+#include "SkPictureRecord.h"
+#include "SkReadBuffer.h"
+#include "SkTextBlob.h"
+#include "SkTypeface.h"
+#include "SkWriteBuffer.h"
+
+#if SK_SUPPORT_GPU
+#include "GrContext.h"
+#endif
+
+template <typename T> int SafeCount(const T* obj) {
+ return obj ? obj->count() : 0;
+}
+
+SkPictureData::SkPictureData(const SkPictInfo& info)
+ : fInfo(info) {
+ this->init();
+}
+
+void SkPictureData::initForPlayback() const {
+ // ensure that the paths bounds are pre-computed
+ for (int i = 0; i < fPaths.count(); i++) {
+ fPaths[i].updateBoundsCache();
+ }
+}
+
+SkPictureData::SkPictureData(const SkPictureRecord& record,
+ const SkPictInfo& info)
+ : fInfo(info) {
+
+ this->init();
+
+ fOpData = record.opData();
+
+ fContentInfo.set(record.fContentInfo);
+
+ fPaints = record.fPaints;
+
+ fPaths.reset(record.fPaths.count());
+ record.fPaths.foreach([this](const SkPath& path, int n) {
+ // These indices are logically 1-based, but we need to serialize them
+ // 0-based to keep the deserializing SkPictureData::getPath() working.
+ fPaths[n-1] = path;
+ });
+
+ this->initForPlayback();
+
+ const SkTDArray<const SkPicture* >& pictures = record.getPictureRefs();
+ fPictureCount = pictures.count();
+ if (fPictureCount > 0) {
+ fPictureRefs = new const SkPicture* [fPictureCount];
+ for (int i = 0; i < fPictureCount; i++) {
+ fPictureRefs[i] = pictures[i];
+ fPictureRefs[i]->ref();
+ }
+ }
+
+ const SkTDArray<SkDrawable* >& drawables = record.getDrawableRefs();
+ fDrawableCount = drawables.count();
+ if (fDrawableCount > 0) {
+ fDrawableRefs = new SkDrawable* [fDrawableCount];
+ for (int i = 0; i < fDrawableCount; i++) {
+ fDrawableRefs[i] = drawables[i];
+ fDrawableRefs[i]->ref();
+ }
+ }
+
+ // templatize to consolidate with similar picture logic?
+ const SkTDArray<const SkTextBlob*>& blobs = record.getTextBlobRefs();
+ fTextBlobCount = blobs.count();
+ if (fTextBlobCount > 0) {
+ fTextBlobRefs = new const SkTextBlob* [fTextBlobCount];
+ for (int i = 0; i < fTextBlobCount; ++i) {
+ fTextBlobRefs[i] = SkRef(blobs[i]);
+ }
+ }
+
+ const SkTDArray<const SkImage*>& imgs = record.getImageRefs();
+ fImageCount = imgs.count();
+ if (fImageCount > 0) {
+ fImageRefs = new const SkImage* [fImageCount];
+ for (int i = 0; i < fImageCount; ++i) {
+ fImageRefs[i] = SkRef(imgs[i]);
+ }
+ }
+}
+
+void SkPictureData::init() {
+ fPictureRefs = nullptr;
+ fPictureCount = 0;
+ fDrawableRefs = nullptr;
+ fDrawableCount = 0;
+ fTextBlobRefs = nullptr;
+ fTextBlobCount = 0;
+ fImageRefs = nullptr;
+ fImageCount = 0;
+ fFactoryPlayback = nullptr;
+}
+
+SkPictureData::~SkPictureData() {
+ for (int i = 0; i < fPictureCount; i++) {
+ fPictureRefs[i]->unref();
+ }
+ delete[] fPictureRefs;
+
+ for (int i = 0; i < fDrawableCount; i++) {
+ fDrawableRefs[i]->unref();
+ }
+ if (fDrawableCount > 0) {
+ SkASSERT(fDrawableRefs);
+ delete[] fDrawableRefs;
+ }
+
+ for (int i = 0; i < fTextBlobCount; i++) {
+ fTextBlobRefs[i]->unref();
+ }
+ delete[] fTextBlobRefs;
+
+ for (int i = 0; i < fImageCount; i++) {
+ fImageRefs[i]->unref();
+ }
+ delete[] fImageRefs;
+
+ delete fFactoryPlayback;
+}
+
+bool SkPictureData::containsBitmaps() const {
+ if (fBitmapImageCount > 0 || fImageCount > 0) {
+ return true;
+ }
+ for (int i = 0; i < fPictureCount; ++i) {
+ if (fPictureRefs[i]->willPlayBackBitmaps()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkStream.h"
+
+static size_t compute_chunk_size(SkFlattenable::Factory* array, int count) {
+ size_t size = 4; // for 'count'
+
+ for (int i = 0; i < count; i++) {
+ const char* name = SkFlattenable::FactoryToName(array[i]);
+ if (nullptr == name || 0 == *name) {
+ size += SkWStream::SizeOfPackedUInt(0);
+ } else {
+ size_t len = strlen(name);
+ size += SkWStream::SizeOfPackedUInt(len);
+ size += len;
+ }
+ }
+
+ return size;
+}
+
+static void write_tag_size(SkWriteBuffer& buffer, uint32_t tag, size_t size) {
+ buffer.writeUInt(tag);
+ buffer.writeUInt(SkToU32(size));
+}
+
+static void write_tag_size(SkWStream* stream, uint32_t tag, size_t size) {
+ stream->write32(tag);
+ stream->write32(SkToU32(size));
+}
+
+void SkPictureData::WriteFactories(SkWStream* stream, const SkFactorySet& rec) {
+ int count = rec.count();
+
+ SkAutoSTMalloc<16, SkFlattenable::Factory> storage(count);
+ SkFlattenable::Factory* array = (SkFlattenable::Factory*)storage.get();
+ rec.copyToArray(array);
+
+ size_t size = compute_chunk_size(array, count);
+
+ // TODO: write_tag_size should really take a size_t
+ write_tag_size(stream, SK_PICT_FACTORY_TAG, (uint32_t) size);
+ SkDEBUGCODE(size_t start = stream->bytesWritten());
+ stream->write32(count);
+
+ for (int i = 0; i < count; i++) {
+ const char* name = SkFlattenable::FactoryToName(array[i]);
+ if (nullptr == name || 0 == *name) {
+ stream->writePackedUInt(0);
+ } else {
+ size_t len = strlen(name);
+ stream->writePackedUInt(len);
+ stream->write(name, len);
+ }
+ }
+
+ SkASSERT(size == (stream->bytesWritten() - start));
+}
+
+void SkPictureData::WriteTypefaces(SkWStream* stream, const SkRefCntSet& rec) {
+ int count = rec.count();
+
+ write_tag_size(stream, SK_PICT_TYPEFACE_TAG, count);
+
+ SkAutoSTMalloc<16, SkTypeface*> storage(count);
+ SkTypeface** array = (SkTypeface**)storage.get();
+ rec.copyToArray((SkRefCnt**)array);
+
+ for (int i = 0; i < count; i++) {
+ array[i]->serialize(stream);
+ }
+}
+
+void SkPictureData::flattenToBuffer(SkWriteBuffer& buffer) const {
+ int i, n;
+
+ if ((n = fPaints.count()) > 0) {
+ write_tag_size(buffer, SK_PICT_PAINT_BUFFER_TAG, n);
+ for (i = 0; i < n; i++) {
+ buffer.writePaint(fPaints[i]);
+ }
+ }
+
+ if ((n = fPaths.count()) > 0) {
+ write_tag_size(buffer, SK_PICT_PATH_BUFFER_TAG, n);
+ buffer.writeInt(n);
+ for (int i = 0; i < n; i++) {
+ buffer.writePath(fPaths[i]);
+ }
+ }
+
+ if (fTextBlobCount > 0) {
+ write_tag_size(buffer, SK_PICT_TEXTBLOB_BUFFER_TAG, fTextBlobCount);
+ for (i = 0; i < fTextBlobCount; ++i) {
+ fTextBlobRefs[i]->flatten(buffer);
+ }
+ }
+
+ if (fImageCount > 0) {
+ write_tag_size(buffer, SK_PICT_IMAGE_BUFFER_TAG, fImageCount);
+ for (i = 0; i < fImageCount; ++i) {
+ buffer.writeImage(fImageRefs[i]);
+ }
+ }
+}
+
+void SkPictureData::serialize(SkWStream* stream,
+ SkPixelSerializer* pixelSerializer,
+ SkRefCntSet* topLevelTypeFaceSet) const {
+ // This can happen at pretty much any time, so might as well do it first.
+ write_tag_size(stream, SK_PICT_READER_TAG, fOpData->size());
+ stream->write(fOpData->bytes(), fOpData->size());
+
+ // We serialize all typefaces into the typeface section of the top-level picture.
+ SkRefCntSet localTypefaceSet;
+ SkRefCntSet* typefaceSet = topLevelTypeFaceSet ? topLevelTypeFaceSet : &localTypefaceSet;
+
+ // We delay serializing the bulk of our data until after we've serialized
+ // factories and typefaces by first serializing to an in-memory write buffer.
+ SkFactorySet factSet; // buffer refs factSet, so factSet must come first.
+ SkBinaryWriteBuffer buffer(SkBinaryWriteBuffer::kCrossProcess_Flag);
+ buffer.setFactoryRecorder(&factSet);
+ buffer.setPixelSerializer(pixelSerializer);
+ buffer.setTypefaceRecorder(typefaceSet);
+ this->flattenToBuffer(buffer);
+
+ // Dummy serialize our sub-pictures for the side effect of filling
+ // typefaceSet with typefaces from sub-pictures.
+ struct DevNull: public SkWStream {
+ DevNull() : fBytesWritten(0) {}
+ size_t fBytesWritten;
+ bool write(const void*, size_t size) override { fBytesWritten += size; return true; }
+ size_t bytesWritten() const override { return fBytesWritten; }
+ } devnull;
+ for (int i = 0; i < fPictureCount; i++) {
+ fPictureRefs[i]->serialize(&devnull, pixelSerializer, typefaceSet);
+ }
+
+ // We need to write factories before we write the buffer.
+ // We need to write typefaces before we write the buffer or any sub-picture.
+ WriteFactories(stream, factSet);
+ if (typefaceSet == &localTypefaceSet) {
+ WriteTypefaces(stream, *typefaceSet);
+ }
+
+ // Write the buffer.
+ write_tag_size(stream, SK_PICT_BUFFER_SIZE_TAG, buffer.bytesWritten());
+ buffer.writeToStream(stream);
+
+ // Write sub-pictures by calling serialize again.
+ if (fPictureCount > 0) {
+ write_tag_size(stream, SK_PICT_PICTURE_TAG, fPictureCount);
+ for (int i = 0; i < fPictureCount; i++) {
+ fPictureRefs[i]->serialize(stream, pixelSerializer, typefaceSet);
+ }
+ }
+
+ stream->write32(SK_PICT_EOF_TAG);
+}
+
+void SkPictureData::flatten(SkWriteBuffer& buffer) const {
+ write_tag_size(buffer, SK_PICT_READER_TAG, fOpData->size());
+ buffer.writeByteArray(fOpData->bytes(), fOpData->size());
+
+ if (fPictureCount > 0) {
+ write_tag_size(buffer, SK_PICT_PICTURE_TAG, fPictureCount);
+ for (int i = 0; i < fPictureCount; i++) {
+ fPictureRefs[i]->flatten(buffer);
+ }
+ }
+
+ if (fDrawableCount > 0) {
+ write_tag_size(buffer, SK_PICT_DRAWABLE_TAG, fDrawableCount);
+ for (int i = 0; i < fDrawableCount; i++) {
+ buffer.writeFlattenable(fDrawableRefs[i]);
+ }
+ }
+
+ // Write this picture playback's data into a writebuffer
+ this->flattenToBuffer(buffer);
+ buffer.write32(SK_PICT_EOF_TAG);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Return the corresponding SkReadBuffer flags, given a set of
+ * SkPictInfo flags.
+ */
+static uint32_t pictInfoFlagsToReadBufferFlags(uint32_t pictInfoFlags) {
+ static const struct {
+ uint32_t fSrc;
+ uint32_t fDst;
+ } gSD[] = {
+ { SkPictInfo::kCrossProcess_Flag, SkReadBuffer::kCrossProcess_Flag },
+ { SkPictInfo::kScalarIsFloat_Flag, SkReadBuffer::kScalarIsFloat_Flag },
+ { SkPictInfo::kPtrIs64Bit_Flag, SkReadBuffer::kPtrIs64Bit_Flag },
+ };
+
+ uint32_t rbMask = 0;
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gSD); ++i) {
+ if (pictInfoFlags & gSD[i].fSrc) {
+ rbMask |= gSD[i].fDst;
+ }
+ }
+ return rbMask;
+}
+
+bool SkPictureData::parseStreamTag(SkStream* stream,
+ uint32_t tag,
+ uint32_t size,
+ SkImageDeserializer* factory,
+ SkTypefacePlayback* topLevelTFPlayback) {
+ /*
+ * By the time we encounter BUFFER_SIZE_TAG, we need to have already seen
+ * its dependents: FACTORY_TAG and TYPEFACE_TAG. These two are not required
+ * but if they are present, they need to have been seen before the buffer.
+ *
+ * We assert that if/when we see either of these, that we have not yet seen
+ * the buffer tag, because if we have, then its too-late to deal with the
+ * factories or typefaces.
+ */
+ SkDEBUGCODE(bool haveBuffer = false;)
+
+ switch (tag) {
+ case SK_PICT_READER_TAG:
+ SkASSERT(nullptr == fOpData);
+ fOpData = SkData::MakeFromStream(stream, size);
+ if (!fOpData) {
+ return false;
+ }
+ break;
+ case SK_PICT_FACTORY_TAG: {
+ SkASSERT(!haveBuffer);
+ size = stream->readU32();
+ fFactoryPlayback = new SkFactoryPlayback(size);
+ for (size_t i = 0; i < size; i++) {
+ SkString str;
+ const size_t len = stream->readPackedUInt();
+ str.resize(len);
+ if (stream->read(str.writable_str(), len) != len) {
+ return false;
+ }
+ fFactoryPlayback->base()[i] = SkFlattenable::NameToFactory(str.c_str());
+ }
+ } break;
+ case SK_PICT_TYPEFACE_TAG: {
+ SkASSERT(!haveBuffer);
+ const int count = SkToInt(size);
+ fTFPlayback.setCount(count);
+ for (int i = 0; i < count; i++) {
+ sk_sp<SkTypeface> tf(SkTypeface::MakeDeserialize(stream));
+ if (!tf.get()) { // failed to deserialize
+ // fTFPlayback asserts it never has a null, so we plop in
+ // the default here.
+ tf = SkTypeface::MakeDefault();
+ }
+ fTFPlayback.set(i, tf.get());
+ }
+ } break;
+ case SK_PICT_PICTURE_TAG: {
+ fPictureCount = 0;
+ fPictureRefs = new const SkPicture* [size];
+ for (uint32_t i = 0; i < size; i++) {
+ fPictureRefs[i] = SkPicture::MakeFromStream(stream, factory, topLevelTFPlayback).release();
+ if (!fPictureRefs[i]) {
+ return false;
+ }
+ fPictureCount++;
+ }
+ } break;
+ case SK_PICT_BUFFER_SIZE_TAG: {
+ SkAutoMalloc storage(size);
+ if (stream->read(storage.get(), size) != size) {
+ return false;
+ }
+
+ /* Should we use SkValidatingReadBuffer instead? */
+ SkReadBuffer buffer(storage.get(), size);
+ buffer.setFlags(pictInfoFlagsToReadBufferFlags(fInfo.fFlags));
+ buffer.setVersion(fInfo.getVersion());
+
+ if (!fFactoryPlayback) {
+ return false;
+ }
+ fFactoryPlayback->setupBuffer(buffer);
+ buffer.setImageDeserializer(factory);
+
+ if (fTFPlayback.count() > 0) {
+ // .skp files <= v43 have typefaces serialized with each sub picture.
+ fTFPlayback.setupBuffer(buffer);
+ } else {
+ // Newer .skp files serialize all typefaces with the top picture.
+ topLevelTFPlayback->setupBuffer(buffer);
+ }
+
+ while (!buffer.eof() && buffer.isValid()) {
+ tag = buffer.readUInt();
+ size = buffer.readUInt();
+ if (!this->parseBufferTag(buffer, tag, size)) {
+ return false;
+ }
+ }
+ if (!buffer.isValid()) {
+ return false;
+ }
+ SkDEBUGCODE(haveBuffer = true;)
+ } break;
+ }
+ return true; // success
+}
+
+static const SkImage* create_image_from_buffer(SkReadBuffer& buffer) {
+ return buffer.readImage().release();
+}
+
+static const SkImage* create_bitmap_image_from_buffer(SkReadBuffer& buffer) {
+ return buffer.readBitmapAsImage().release();
+}
+
+// Need a shallow wrapper to return const SkPicture* to match the other factories,
+// as SkPicture::CreateFromBuffer() returns SkPicture*
+static const SkPicture* create_picture_from_buffer(SkReadBuffer& buffer) {
+ return SkPicture::MakeFromBuffer(buffer).release();
+}
+
+static const SkDrawable* create_drawable_from_buffer(SkReadBuffer& buffer) {
+ return (SkDrawable*) buffer.readFlattenable(SkFlattenable::kSkDrawable_Type);
+}
+
+template <typename T>
+bool new_array_from_buffer(SkReadBuffer& buffer, uint32_t inCount,
+ const T*** array, int* outCount, const T* (*factory)(SkReadBuffer&)) {
+ if (!buffer.validate((0 == *outCount) && (nullptr == *array))) {
+ return false;
+ }
+ if (0 == inCount) {
+ return true;
+ }
+ *outCount = inCount;
+ *array = new const T* [*outCount];
+ bool success = true;
+ int i = 0;
+ for (; i < *outCount; i++) {
+ (*array)[i] = factory(buffer);
+ if (nullptr == (*array)[i]) {
+ success = false;
+ break;
+ }
+ }
+ if (!success) {
+ // Delete all of the blobs that were already created (up to but excluding i):
+ for (int j = 0; j < i; j++) {
+ (*array)[j]->unref();
+ }
+ // Delete the array
+ delete[] * array;
+ *array = nullptr;
+ *outCount = 0;
+ return false;
+ }
+ return true;
+}
+
+bool SkPictureData::parseBufferTag(SkReadBuffer& buffer, uint32_t tag, uint32_t size) {
+ switch (tag) {
+ case SK_PICT_BITMAP_BUFFER_TAG:
+ if (!new_array_from_buffer(buffer, size, &fBitmapImageRefs, &fBitmapImageCount,
+ create_bitmap_image_from_buffer)) {
+ return false;
+ }
+ break;
+ case SK_PICT_PAINT_BUFFER_TAG: {
+ const int count = SkToInt(size);
+ fPaints.reset(count);
+ for (int i = 0; i < count; ++i) {
+ buffer.readPaint(&fPaints[i]);
+ }
+ } break;
+ case SK_PICT_PATH_BUFFER_TAG:
+ if (size > 0) {
+ const int count = buffer.readInt();
+ fPaths.reset(count);
+ for (int i = 0; i < count; i++) {
+ buffer.readPath(&fPaths[i]);
+ }
+ } break;
+ case SK_PICT_TEXTBLOB_BUFFER_TAG:
+ if (!new_array_from_buffer(buffer, size, &fTextBlobRefs, &fTextBlobCount,
+ SkTextBlob::CreateFromBuffer)) {
+ return false;
+ }
+ break;
+ case SK_PICT_IMAGE_BUFFER_TAG:
+ if (!new_array_from_buffer(buffer, size, &fImageRefs, &fImageCount,
+ create_image_from_buffer)) {
+ return false;
+ }
+ break;
+ case SK_PICT_READER_TAG: {
+ auto data(SkData::MakeUninitialized(size));
+ if (!buffer.readByteArray(data->writable_data(), size) ||
+ !buffer.validate(nullptr == fOpData)) {
+ return false;
+ }
+ SkASSERT(nullptr == fOpData);
+ fOpData = std::move(data);
+ } break;
+ case SK_PICT_PICTURE_TAG:
+ if (!new_array_from_buffer(buffer, size, &fPictureRefs, &fPictureCount,
+ create_picture_from_buffer)) {
+ return false;
+ }
+ break;
+ case SK_PICT_DRAWABLE_TAG:
+ if (!new_array_from_buffer(buffer, size, (const SkDrawable***)&fDrawableRefs,
+ &fDrawableCount, create_drawable_from_buffer)) {
+ return false;
+ }
+ break;
+ default:
+ // The tag was invalid.
+ return false;
+ }
+ return true; // success
+}
+
+SkPictureData* SkPictureData::CreateFromStream(SkStream* stream,
+ const SkPictInfo& info,
+ SkImageDeserializer* factory,
+ SkTypefacePlayback* topLevelTFPlayback) {
+ SkAutoTDelete<SkPictureData> data(new SkPictureData(info));
+ if (!topLevelTFPlayback) {
+ topLevelTFPlayback = &data->fTFPlayback;
+ }
+
+ if (!data->parseStream(stream, factory, topLevelTFPlayback)) {
+ return nullptr;
+ }
+ return data.release();
+}
+
+SkPictureData* SkPictureData::CreateFromBuffer(SkReadBuffer& buffer,
+ const SkPictInfo& info) {
+ SkAutoTDelete<SkPictureData> data(new SkPictureData(info));
+ buffer.setVersion(info.getVersion());
+
+ if (!data->parseBuffer(buffer)) {
+ return nullptr;
+ }
+ return data.release();
+}
+
+bool SkPictureData::parseStream(SkStream* stream,
+ SkImageDeserializer* factory,
+ SkTypefacePlayback* topLevelTFPlayback) {
+ for (;;) {
+ uint32_t tag = stream->readU32();
+ if (SK_PICT_EOF_TAG == tag) {
+ break;
+ }
+
+ uint32_t size = stream->readU32();
+ if (!this->parseStreamTag(stream, tag, size, factory, topLevelTFPlayback)) {
+ return false; // we're invalid
+ }
+ }
+ return true;
+}
+
+bool SkPictureData::parseBuffer(SkReadBuffer& buffer) {
+ for (;;) {
+ uint32_t tag = buffer.readUInt();
+ if (SK_PICT_EOF_TAG == tag) {
+ break;
+ }
+
+ uint32_t size = buffer.readUInt();
+ if (!this->parseBufferTag(buffer, tag, size)) {
+ return false; // we're invalid
+ }
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+bool SkPictureData::suitableForGpuRasterization(GrContext* context, const char **reason,
+ int sampleCount) const {
+ return fContentInfo.suitableForGpuRasterization(context, reason, sampleCount);
+}
+
+bool SkPictureData::suitableForGpuRasterization(GrContext* context, const char **reason,
+ GrPixelConfig config, SkScalar dpi) const {
+
+ if (context != nullptr) {
+ return this->suitableForGpuRasterization(context, reason,
+ context->getRecommendedSampleCount(config, dpi));
+ } else {
+ return this->suitableForGpuRasterization(nullptr, reason);
+ }
+}
+
+bool SkPictureData::suitableForLayerOptimization() const {
+ return fContentInfo.numLayers() > 0;
+}
+#endif
+///////////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/skia/src/core/SkPictureData.h b/gfx/skia/skia/src/core/SkPictureData.h
new file mode 100644
index 000000000..332b79963
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureData.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPictureData_DEFINED
+#define SkPictureData_DEFINED
+
+#include "SkBitmap.h"
+#include "SkDrawable.h"
+#include "SkPicture.h"
+#include "SkPictureContentInfo.h"
+#include "SkPictureFlat.h"
+
+class SkData;
+class SkPictureRecord;
+class SkPixelSerializer;
+class SkReader32;
+class SkStream;
+class SkWStream;
+class SkBBoxHierarchy;
+class SkMatrix;
+class SkPaint;
+class SkPath;
+class SkReadBuffer;
+class SkTextBlob;
+
+struct SkPictInfo {
+ enum Flags {
+ kCrossProcess_Flag = 1 << 0,
+ kScalarIsFloat_Flag = 1 << 1,
+ kPtrIs64Bit_Flag = 1 << 2,
+ };
+
+ SkPictInfo() : fVersion(~0U) {}
+
+ uint32_t getVersion() const {
+ SkASSERT(fVersion != ~0U);
+ return fVersion;
+ }
+
+ void setVersion(uint32_t version) {
+ SkASSERT(version != ~0U);
+ fVersion = version;
+ }
+
+public:
+ char fMagic[8];
+private:
+ uint32_t fVersion;
+public:
+ SkRect fCullRect;
+ uint32_t fFlags;
+};
+
+#define SK_PICT_READER_TAG SkSetFourByteTag('r', 'e', 'a', 'd')
+#define SK_PICT_FACTORY_TAG SkSetFourByteTag('f', 'a', 'c', 't')
+#define SK_PICT_TYPEFACE_TAG SkSetFourByteTag('t', 'p', 'f', 'c')
+#define SK_PICT_PICTURE_TAG SkSetFourByteTag('p', 'c', 't', 'r')
+#define SK_PICT_DRAWABLE_TAG SkSetFourByteTag('d', 'r', 'a', 'w')
+
+// This tag specifies the size of the ReadBuffer, needed for the following tags
+#define SK_PICT_BUFFER_SIZE_TAG SkSetFourByteTag('a', 'r', 'a', 'y')
+// these are all inside the ARRAYS tag
+#define SK_PICT_BITMAP_BUFFER_TAG SkSetFourByteTag('b', 't', 'm', 'p')
+#define SK_PICT_PAINT_BUFFER_TAG SkSetFourByteTag('p', 'n', 't', ' ')
+#define SK_PICT_PATH_BUFFER_TAG SkSetFourByteTag('p', 't', 'h', ' ')
+#define SK_PICT_TEXTBLOB_BUFFER_TAG SkSetFourByteTag('b', 'l', 'o', 'b')
+#define SK_PICT_IMAGE_BUFFER_TAG SkSetFourByteTag('i', 'm', 'a', 'g')
+
+// Always write this guy last (with no length field afterwards)
+#define SK_PICT_EOF_TAG SkSetFourByteTag('e', 'o', 'f', ' ')
+
+class SkPictureData {
+public:
+ SkPictureData(const SkPictureRecord& record, const SkPictInfo&);
+ // Does not affect ownership of SkStream.
+ static SkPictureData* CreateFromStream(SkStream*,
+ const SkPictInfo&,
+ SkImageDeserializer*,
+ SkTypefacePlayback*);
+ static SkPictureData* CreateFromBuffer(SkReadBuffer&, const SkPictInfo&);
+
+ virtual ~SkPictureData();
+
+ void serialize(SkWStream*, SkPixelSerializer*, SkRefCntSet*) const;
+ void flatten(SkWriteBuffer&) const;
+
+ bool containsBitmaps() const;
+
+ bool hasText() const { return fContentInfo.hasText(); }
+
+ int opCount() const { return fContentInfo.numOperations(); }
+
+ const sk_sp<SkData>& opData() const { return fOpData; }
+
+protected:
+ explicit SkPictureData(const SkPictInfo& info);
+
+ // Does not affect ownership of SkStream.
+ bool parseStream(SkStream*, SkImageDeserializer*, SkTypefacePlayback*);
+ bool parseBuffer(SkReadBuffer& buffer);
+
+public:
+ const SkImage* getBitmapAsImage(SkReadBuffer* reader) const {
+ const int index = reader->readInt();
+ return reader->validateIndex(index, fBitmapImageCount) ? fBitmapImageRefs[index] : nullptr;
+ }
+
+ const SkImage* getImage(SkReadBuffer* reader) const {
+ const int index = reader->readInt();
+ return reader->validateIndex(index, fImageCount) ? fImageRefs[index] : nullptr;
+ }
+
+ const SkPath& getPath(SkReadBuffer* reader) const {
+ const int index = reader->readInt() - 1;
+ return reader->validateIndex(index, fPaths.count()) ? fPaths[index] : fEmptyPath;
+ }
+
+ const SkPicture* getPicture(SkReadBuffer* reader) const {
+ const int index = reader->readInt() - 1;
+ return reader->validateIndex(index, fPictureCount) ? fPictureRefs[index] : nullptr;
+ }
+
+ SkDrawable* getDrawable(SkReadBuffer* reader) const {
+ int index = reader->readInt();
+ SkASSERT(index > 0 && index <= fDrawableCount);
+ return fDrawableRefs[index - 1];
+ }
+
+ const SkPaint* getPaint(SkReadBuffer* reader) const {
+ const int index = reader->readInt() - 1;
+ if (index == -1) { // recorder wrote a zero for no paint (likely drawimage)
+ return nullptr;
+ }
+ return reader->validateIndex(index, fPaints.count()) ? &fPaints[index] : nullptr;
+ }
+
+ const SkTextBlob* getTextBlob(SkReadBuffer* reader) const {
+ const int index = reader->readInt() - 1;
+ return reader->validateIndex(index, fTextBlobCount) ? fTextBlobRefs[index] : nullptr;
+ }
+
+#if SK_SUPPORT_GPU
+ /**
+ * sampleCount is the number of samples-per-pixel or zero if non-MSAA.
+ * It is defaulted to be zero.
+ */
+ bool suitableForGpuRasterization(GrContext* context, const char **reason,
+ int sampleCount = 0) const;
+
+ /**
+ * Calls getRecommendedSampleCount with GrPixelConfig and dpi to calculate sampleCount
+ * and then calls the above version of suitableForGpuRasterization
+ */
+ bool suitableForGpuRasterization(GrContext* context, const char **reason,
+ GrPixelConfig config, SkScalar dpi) const;
+
+ bool suitableForLayerOptimization() const;
+#endif
+
+private:
+ void init();
+
+ // these help us with reading/writing
+ // Does not affect ownership of SkStream.
+ bool parseStreamTag(SkStream*, uint32_t tag, uint32_t size,
+ SkImageDeserializer*, SkTypefacePlayback*);
+ bool parseBufferTag(SkReadBuffer&, uint32_t tag, uint32_t size);
+ void flattenToBuffer(SkWriteBuffer&) const;
+
+ SkTArray<SkPaint> fPaints;
+ SkTArray<SkPath> fPaths;
+
+ sk_sp<SkData> fOpData; // opcodes and parameters
+
+ const SkPath fEmptyPath;
+ const SkBitmap fEmptyBitmap;
+
+ const SkPicture** fPictureRefs;
+ int fPictureCount;
+ SkDrawable** fDrawableRefs;
+ int fDrawableCount;
+ const SkTextBlob** fTextBlobRefs;
+ int fTextBlobCount;
+ const SkImage** fImageRefs;
+ int fImageCount;
+ const SkImage** fBitmapImageRefs;
+ int fBitmapImageCount;
+
+ SkPictureContentInfo fContentInfo;
+
+ SkTypefacePlayback fTFPlayback;
+ SkFactoryPlayback* fFactoryPlayback;
+
+ const SkPictInfo fInfo;
+
+ static void WriteFactories(SkWStream* stream, const SkFactorySet& rec);
+ static void WriteTypefaces(SkWStream* stream, const SkRefCntSet& rec);
+
+ void initForPlayback() const;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPictureFlat.cpp b/gfx/skia/skia/src/core/SkPictureFlat.cpp
new file mode 100644
index 000000000..013bc7f98
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureFlat.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkPictureFlat.h"
+
+#include "SkChecksum.h"
+#include "SkColorFilter.h"
+#include "SkDrawLooper.h"
+#include "SkMaskFilter.h"
+#include "SkRasterizer.h"
+#include "SkShader.h"
+#include "SkTypeface.h"
+#include "SkXfermode.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkTypefacePlayback::SkTypefacePlayback() : fCount(0), fArray(nullptr) {}
+
+SkTypefacePlayback::~SkTypefacePlayback() {
+ this->reset(nullptr);
+}
+
+void SkTypefacePlayback::reset(const SkRefCntSet* rec) {
+ for (int i = 0; i < fCount; i++) {
+ SkASSERT(fArray[i]);
+ fArray[i]->unref();
+ }
+ delete[] fArray;
+
+ if (rec!= nullptr && rec->count() > 0) {
+ fCount = rec->count();
+ fArray = new SkRefCnt* [fCount];
+ rec->copyToArray(fArray);
+ for (int i = 0; i < fCount; i++) {
+ fArray[i]->ref();
+ }
+ } else {
+ fCount = 0;
+ fArray = nullptr;
+ }
+}
+
+void SkTypefacePlayback::setCount(int count) {
+ this->reset(nullptr);
+
+ fCount = count;
+ fArray = new SkRefCnt* [count];
+ sk_bzero(fArray, count * sizeof(SkRefCnt*));
+}
+
+SkRefCnt* SkTypefacePlayback::set(int index, SkRefCnt* obj) {
+ SkASSERT((unsigned)index < (unsigned)fCount);
+ SkRefCnt_SafeAssign(fArray[index], obj);
+ return obj;
+}
diff --git a/gfx/skia/skia/src/core/SkPictureFlat.h b/gfx/skia/skia/src/core/SkPictureFlat.h
new file mode 100644
index 000000000..beb2dd833
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureFlat.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPictureFlat_DEFINED
+#define SkPictureFlat_DEFINED
+
+#include "SkCanvas.h"
+#include "SkChecksum.h"
+#include "SkChunkAlloc.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+#include "SkPaint.h"
+#include "SkPicture.h"
+#include "SkPtrRecorder.h"
+#include "SkTDynamicHash.h"
+
+/*
+ * Note: While adding new DrawTypes, it is necessary to add to the end of this list
+ * and update LAST_DRAWTYPE_ENUM to avoid having the code read older skps wrong.
+ * (which can cause segfaults)
+ *
+ * Reordering can be done during version updates.
+ */
+enum DrawType {
+ UNUSED,
+ CLIP_PATH,
+ CLIP_REGION,
+ CLIP_RECT,
+ CLIP_RRECT,
+ CONCAT,
+ DRAW_BITMAP,
+ DRAW_BITMAP_MATRIX, // deprecated, M41 was last Chromium version to write this to an .skp
+ DRAW_BITMAP_NINE,
+ DRAW_BITMAP_RECT,
+ DRAW_CLEAR,
+ DRAW_DATA,
+ DRAW_OVAL,
+ DRAW_PAINT,
+ DRAW_PATH,
+ DRAW_PICTURE,
+ DRAW_POINTS,
+ DRAW_POS_TEXT,
+ DRAW_POS_TEXT_TOP_BOTTOM, // fast variant of DRAW_POS_TEXT
+ DRAW_POS_TEXT_H,
+ DRAW_POS_TEXT_H_TOP_BOTTOM, // fast variant of DRAW_POS_TEXT_H
+ DRAW_RECT,
+ DRAW_RRECT,
+ DRAW_SPRITE,
+ DRAW_TEXT,
+ DRAW_TEXT_ON_PATH,
+ DRAW_TEXT_TOP_BOTTOM, // fast variant of DRAW_TEXT
+ DRAW_VERTICES,
+ RESTORE,
+ ROTATE,
+ SAVE,
+ SAVE_LAYER_SAVEFLAGS_DEPRECATED,
+ SCALE,
+ SET_MATRIX,
+ SKEW,
+ TRANSLATE,
+ NOOP,
+ BEGIN_COMMENT_GROUP, // deprecated (M44)
+ COMMENT, // deprecated (M44)
+ END_COMMENT_GROUP, // deprecated (M44)
+
+ // new ops -- feel free to re-alphabetize on next version bump
+ DRAW_DRRECT,
+ PUSH_CULL, // deprecated, M41 was last Chromium version to write this to an .skp
+ POP_CULL, // deprecated, M41 was last Chromium version to write this to an .skp
+
+ DRAW_PATCH, // could not add in aphabetical order
+ DRAW_PICTURE_MATRIX_PAINT,
+ DRAW_TEXT_BLOB,
+ DRAW_IMAGE,
+ DRAW_IMAGE_RECT_STRICT, // deprecated (M45)
+ DRAW_ATLAS,
+ DRAW_IMAGE_NINE,
+ DRAW_IMAGE_RECT,
+
+ SAVE_LAYER_SAVELAYERFLAGS_DEPRECATED_JAN_2016,
+ SAVE_LAYER_SAVELAYERREC,
+
+ DRAW_ANNOTATION,
+ DRAW_DRAWABLE,
+ DRAW_DRAWABLE_MATRIX,
+ DRAW_TEXT_RSXFORM,
+
+ TRANSLATE_Z,
+
+ DRAW_SHADOWED_PICTURE_LIGHTS,
+ DRAW_IMAGE_LATTICE,
+ DRAW_ARC,
+ DRAW_REGION,
+
+ LAST_DRAWTYPE_ENUM = DRAW_REGION
+};
+
+// In the 'match' method, this constant will match any flavor of DRAW_BITMAP*
+static const int kDRAW_BITMAP_FLAVOR = LAST_DRAWTYPE_ENUM+1;
+
+enum DrawVertexFlags {
+ DRAW_VERTICES_HAS_TEXS = 0x01,
+ DRAW_VERTICES_HAS_COLORS = 0x02,
+ DRAW_VERTICES_HAS_INDICES = 0x04,
+ DRAW_VERTICES_HAS_XFER = 0x08,
+};
+
+enum DrawAtlasFlags {
+ DRAW_ATLAS_HAS_COLORS = 1 << 0,
+ DRAW_ATLAS_HAS_CULL = 1 << 1,
+};
+
+enum DrawTextRSXformFlags {
+ DRAW_TEXT_RSXFORM_HAS_CULL = 1 << 0,
+};
+
+enum SaveLayerRecFlatFlags {
+ SAVELAYERREC_HAS_BOUNDS = 1 << 0,
+ SAVELAYERREC_HAS_PAINT = 1 << 1,
+ SAVELAYERREC_HAS_BACKDROP = 1 << 2,
+ SAVELAYERREC_HAS_FLAGS = 1 << 3,
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// clipparams are packed in 5 bits
+// doAA:1 | clipOp:4
+
+static inline uint32_t ClipParams_pack(SkCanvas::ClipOp op, bool doAA) {
+ unsigned doAABit = doAA ? 1 : 0;
+ return (doAABit << 4) | op;
+}
+
+static inline SkCanvas::ClipOp ClipParams_unpackRegionOp(uint32_t packed) {
+ return (SkCanvas::ClipOp)(packed & 0xF);
+}
+
+static inline bool ClipParams_unpackDoAA(uint32_t packed) {
+ return SkToBool((packed >> 4) & 1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkTypefacePlayback {
+public:
+ SkTypefacePlayback();
+ virtual ~SkTypefacePlayback();
+
+ int count() const { return fCount; }
+
+ void reset(const SkRefCntSet*);
+
+ void setCount(int count);
+ SkRefCnt* set(int index, SkRefCnt*);
+
+ void setupBuffer(SkReadBuffer& buffer) const {
+ buffer.setTypefaceArray((SkTypeface**)fArray, fCount);
+ }
+
+protected:
+ int fCount;
+ SkRefCnt** fArray;
+};
+
+class SkFactoryPlayback {
+public:
+ SkFactoryPlayback(int count) : fCount(count) { fArray = new SkFlattenable::Factory[count]; }
+
+ ~SkFactoryPlayback() { delete[] fArray; }
+
+ SkFlattenable::Factory* base() const { return fArray; }
+
+ void setupBuffer(SkReadBuffer& buffer) const {
+ buffer.setFactoryPlayback(fArray, fCount);
+ }
+
+private:
+ int fCount;
+ SkFlattenable::Factory* fArray;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPictureImageGenerator.cpp b/gfx/skia/skia/src/core/SkPictureImageGenerator.cpp
new file mode 100644
index 000000000..6f4ffa1f5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureImageGenerator.cpp
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkImage_Base.h"
+#include "SkImageGenerator.h"
+#include "SkCanvas.h"
+#include "SkMatrix.h"
+#include "SkPaint.h"
+#include "SkPicture.h"
+#include "SkSurface.h"
+#include "SkTLazy.h"
+
+class SkPictureImageGenerator : SkImageGenerator {
+public:
+ static SkImageGenerator* Create(const SkISize&, const SkPicture*, const SkMatrix*,
+ const SkPaint*);
+
+protected:
+ bool onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes, SkPMColor ctable[],
+ int* ctableCount) override;
+ bool onComputeScaledDimensions(SkScalar scale, SupportedSizes*) override;
+ bool onGenerateScaledPixels(const SkISize&, const SkIPoint&, const SkPixmap&) override;
+
+#if SK_SUPPORT_GPU
+ GrTexture* onGenerateTexture(GrContext*, const SkIRect*) override;
+#endif
+
+private:
+ SkPictureImageGenerator(const SkISize&, const SkPicture*, const SkMatrix*, const SkPaint*);
+
+ SkAutoTUnref<const SkPicture> fPicture;
+ SkMatrix fMatrix;
+ SkTLazy<SkPaint> fPaint;
+
+ typedef SkImageGenerator INHERITED;
+};
+
+SkImageGenerator* SkPictureImageGenerator::Create(const SkISize& size, const SkPicture* picture,
+ const SkMatrix* matrix, const SkPaint* paint) {
+ if (!picture || size.isEmpty()) {
+ return nullptr;
+ }
+
+ return new SkPictureImageGenerator(size, picture, matrix, paint);
+}
+
+SkPictureImageGenerator::SkPictureImageGenerator(const SkISize& size, const SkPicture* picture,
+ const SkMatrix* matrix, const SkPaint* paint)
+ : INHERITED(SkImageInfo::MakeN32Premul(size))
+ , fPicture(SkRef(picture)) {
+
+ if (matrix) {
+ fMatrix = *matrix;
+ } else {
+ fMatrix.reset();
+ }
+
+ if (paint) {
+ fPaint.set(*paint);
+ }
+}
+
+bool SkPictureImageGenerator::onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ SkPMColor ctable[], int* ctableCount) {
+ if (info != getInfo() || ctable || ctableCount) {
+ return false;
+ }
+
+ SkBitmap bitmap;
+ if (!bitmap.installPixels(info, pixels, rowBytes)) {
+ return false;
+ }
+
+ bitmap.eraseColor(SK_ColorTRANSPARENT);
+ SkCanvas canvas(bitmap, SkSurfaceProps(0, kUnknown_SkPixelGeometry));
+ canvas.drawPicture(fPicture, &fMatrix, fPaint.getMaybeNull());
+
+ return true;
+}
+
+bool SkPictureImageGenerator::onComputeScaledDimensions(SkScalar scale,
+ SupportedSizes* sizes) {
+ SkASSERT(scale > 0 && scale <= 1);
+ const int w = this->getInfo().width();
+ const int h = this->getInfo().height();
+ const int sw = SkScalarRoundToInt(scale * w);
+ const int sh = SkScalarRoundToInt(scale * h);
+ if (sw > 0 && sh > 0) {
+ sizes->fSizes[0].set(sw, sh);
+ sizes->fSizes[1].set(sw, sh);
+ return true;
+ }
+ return false;
+}
+
+bool SkPictureImageGenerator::onGenerateScaledPixels(const SkISize& scaledSize,
+ const SkIPoint& scaledOrigin,
+ const SkPixmap& scaledPixels) {
+ int w = scaledSize.width();
+ int h = scaledSize.height();
+
+ const SkScalar scaleX = SkIntToScalar(w) / this->getInfo().width();
+ const SkScalar scaleY = SkIntToScalar(h) / this->getInfo().height();
+ SkMatrix matrix = SkMatrix::MakeScale(scaleX, scaleY);
+ matrix.postTranslate(-SkIntToScalar(scaledOrigin.x()), -SkIntToScalar(scaledOrigin.y()));
+
+ SkBitmap bitmap;
+ if (!bitmap.installPixels(scaledPixels)) {
+ return false;
+ }
+
+ bitmap.eraseColor(SK_ColorTRANSPARENT);
+ SkCanvas canvas(bitmap, SkSurfaceProps(0, kUnknown_SkPixelGeometry));
+ matrix.preConcat(fMatrix);
+ canvas.drawPicture(fPicture, &matrix, fPaint.getMaybeNull());
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkImageGenerator* SkImageGenerator::NewFromPicture(const SkISize& size, const SkPicture* picture,
+ const SkMatrix* matrix, const SkPaint* paint) {
+ return SkPictureImageGenerator::Create(size, picture, matrix, paint);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+#include "GrTexture.h"
+
+GrTexture* SkPictureImageGenerator::onGenerateTexture(GrContext* ctx, const SkIRect* subset) {
+ const SkImageInfo& info = this->getInfo();
+ SkImageInfo surfaceInfo = subset ? info.makeWH(subset->width(), subset->height()) : info;
+
+ //
+ // TODO: respect the usage, by possibly creating a different (pow2) surface
+ //
+ sk_sp<SkSurface> surface(SkSurface::MakeRenderTarget(ctx, SkBudgeted::kYes, surfaceInfo));
+ if (!surface) {
+ return nullptr;
+ }
+
+ SkMatrix matrix = fMatrix;
+ if (subset) {
+ matrix.postTranslate(-subset->x(), -subset->y());
+ }
+ surface->getCanvas()->clear(0); // does NewRenderTarget promise to do this for us?
+ surface->getCanvas()->drawPicture(fPicture, &matrix, fPaint.getMaybeNull());
+ sk_sp<SkImage> image(surface->makeImageSnapshot());
+ if (!image) {
+ return nullptr;
+ }
+ return SkSafeRef(as_IB(image)->peekTexture());
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkPicturePlayback.cpp b/gfx/skia/skia/src/core/SkPicturePlayback.cpp
new file mode 100644
index 000000000..a246a306e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPicturePlayback.cpp
@@ -0,0 +1,682 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCanvas.h"
+#include "SkPatchUtils.h"
+#include "SkPictureData.h"
+#include "SkPicturePlayback.h"
+#include "SkPictureRecord.h"
+#include "SkReadBuffer.h"
+#include "SkRSXform.h"
+#include "SkTextBlob.h"
+#include "SkTDArray.h"
+#include "SkTypes.h"
+
+// matches old SkCanvas::SaveFlags
+enum LegacySaveFlags {
+ kHasAlphaLayer_LegacySaveFlags = 0x04,
+ kClipToLayer_LegacySaveFlags = 0x10,
+};
+
+SkCanvas::SaveLayerFlags SkCanvas::LegacySaveFlagsToSaveLayerFlags(uint32_t flags) {
+ uint32_t layerFlags = 0;
+
+ if (0 == (flags & kClipToLayer_LegacySaveFlags)) {
+ layerFlags |= SkCanvas::kDontClipToLayer_PrivateSaveLayerFlag;
+ }
+ if (0 == (flags & kHasAlphaLayer_LegacySaveFlags)) {
+ layerFlags |= kIsOpaque_SaveLayerFlag;
+ }
+ return layerFlags;
+}
+
+/*
+ * Read the next op code and chunk size from 'reader'. The returned size
+ * is the entire size of the chunk (including the opcode). Thus, the
+ * offset just prior to calling ReadOpAndSize + 'size' is the offset
+ * to the next chunk's op code. This also means that the size of a chunk
+ * with no arguments (just an opcode) will be 4.
+ */
+DrawType SkPicturePlayback::ReadOpAndSize(SkReadBuffer* reader, uint32_t* size) {
+ uint32_t temp = reader->readInt();
+ uint32_t op;
+ if (((uint8_t)temp) == temp) {
+ // old skp file - no size information
+ op = temp;
+ *size = 0;
+ } else {
+ UNPACK_8_24(temp, op, *size);
+ if (MASK_24 == *size) {
+ *size = reader->readInt();
+ }
+ }
+ return (DrawType)op;
+}
+
+
+static const SkRect* get_rect_ptr(SkReadBuffer* reader, SkRect* storage) {
+ if (reader->readBool()) {
+ reader->readRect(storage);
+ return storage;
+ } else {
+ return nullptr;
+ }
+}
+
+class TextContainer {
+public:
+ size_t length() { return fByteLength; }
+ const void* text() { return (const void*)fText; }
+ size_t fByteLength;
+ const char* fText;
+};
+
+void get_text(SkReadBuffer* reader, TextContainer* text) {
+ size_t length = text->fByteLength = reader->readInt();
+ text->fText = (const char*)reader->skip(length);
+}
+
+void SkPicturePlayback::draw(SkCanvas* canvas,
+ SkPicture::AbortCallback* callback,
+ SkReadBuffer* buffer) {
+ AutoResetOpID aroi(this);
+ SkASSERT(0 == fCurOffset);
+
+ SkAutoTDelete<SkReadBuffer> reader;
+ if (buffer) {
+ reader.reset(buffer->clone(fPictureData->opData()->bytes(),
+ fPictureData->opData()->size()));
+ } else {
+ reader.reset(new SkReadBuffer(fPictureData->opData()->bytes(),
+ fPictureData->opData()->size()));
+ }
+
+ // Record this, so we can concat w/ it if we encounter a setMatrix()
+ SkMatrix initialMatrix = canvas->getTotalMatrix();
+
+ SkAutoCanvasRestore acr(canvas, false);
+
+ while (!reader->eof()) {
+ if (callback && callback->abort()) {
+ return;
+ }
+
+ fCurOffset = reader->offset();
+ uint32_t size;
+ DrawType op = ReadOpAndSize(reader, &size);
+ if (!reader->validate(op > UNUSED && op <= LAST_DRAWTYPE_ENUM)) {
+ return;
+ }
+
+ this->handleOp(reader, op, size, canvas, initialMatrix);
+ }
+
+ // need to propagate invalid state to the parent reader
+ if (buffer) {
+ buffer->validate(reader->isValid());
+ }
+}
+
+void SkPicturePlayback::handleOp(SkReadBuffer* reader,
+ DrawType op,
+ uint32_t size,
+ SkCanvas* canvas,
+ const SkMatrix& initialMatrix) {
+ switch (op) {
+ case NOOP: {
+ SkASSERT(size >= 4);
+ reader->skip(size - 4);
+ } break;
+ case CLIP_PATH: {
+ const SkPath& path = fPictureData->getPath(reader);
+ uint32_t packed = reader->readInt();
+ SkCanvas::ClipOp clipOp = ClipParams_unpackRegionOp(packed);
+ bool doAA = ClipParams_unpackDoAA(packed);
+ size_t offsetToRestore = reader->readInt();
+ SkASSERT(!offsetToRestore || offsetToRestore >= reader->offset());
+ canvas->clipPath(path, clipOp, doAA);
+ if (canvas->isClipEmpty() && offsetToRestore) {
+ reader->skip(offsetToRestore - reader->offset());
+ }
+ } break;
+ case CLIP_REGION: {
+ SkRegion region;
+ reader->readRegion(&region);
+ uint32_t packed = reader->readInt();
+ SkCanvas::ClipOp clipOp = ClipParams_unpackRegionOp(packed);
+ size_t offsetToRestore = reader->readInt();
+ SkASSERT(!offsetToRestore || offsetToRestore >= reader->offset());
+ canvas->clipRegion(region, clipOp);
+ if (canvas->isClipEmpty() && offsetToRestore) {
+ reader->skip(offsetToRestore - reader->offset());
+ }
+ } break;
+ case CLIP_RECT: {
+ SkRect rect;
+ reader->readRect(&rect);
+ uint32_t packed = reader->readInt();
+ SkCanvas::ClipOp clipOp = ClipParams_unpackRegionOp(packed);
+ bool doAA = ClipParams_unpackDoAA(packed);
+ size_t offsetToRestore = reader->readInt();
+ SkASSERT(!offsetToRestore || offsetToRestore >= reader->offset());
+ canvas->clipRect(rect, clipOp, doAA);
+ if (canvas->isClipEmpty() && offsetToRestore) {
+ reader->skip(offsetToRestore - reader->offset());
+ }
+ } break;
+ case CLIP_RRECT: {
+ SkRRect rrect;
+ reader->readRRect(&rrect);
+ uint32_t packed = reader->readInt();
+ SkCanvas::ClipOp clipOp = ClipParams_unpackRegionOp(packed);
+ bool doAA = ClipParams_unpackDoAA(packed);
+ size_t offsetToRestore = reader->readInt();
+ SkASSERT(!offsetToRestore || offsetToRestore >= reader->offset());
+ canvas->clipRRect(rrect, clipOp, doAA);
+ if (canvas->isClipEmpty() && offsetToRestore) {
+ reader->skip(offsetToRestore - reader->offset());
+ }
+ } break;
+ case PUSH_CULL: break; // Deprecated, safe to ignore both push and pop.
+ case POP_CULL: break;
+ case CONCAT: {
+ SkMatrix matrix;
+ reader->readMatrix(&matrix);
+ canvas->concat(matrix);
+ break;
+ }
+ case DRAW_ANNOTATION: {
+ SkRect rect;
+ reader->readRect(&rect);
+ SkString key;
+ reader->readString(&key);
+ canvas->drawAnnotation(rect, key.c_str(), reader->readByteArrayAsData().get());
+ } break;
+ case DRAW_ARC: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ SkRect rect;
+ reader->readRect(&rect);
+ SkScalar startAngle = reader->readScalar();
+ SkScalar sweepAngle = reader->readScalar();
+ int useCenter = reader->readInt();
+ if (paint) {
+ canvas->drawArc(rect, startAngle, sweepAngle, SkToBool(useCenter), *paint);
+ }
+ } break;
+ case DRAW_ATLAS: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ const SkImage* atlas = fPictureData->getImage(reader);
+ const uint32_t flags = reader->readUInt();
+ const int count = reader->readUInt();
+ const SkRSXform* xform = (const SkRSXform*)reader->skip(count * sizeof(SkRSXform));
+ const SkRect* tex = (const SkRect*)reader->skip(count * sizeof(SkRect));
+ const SkColor* colors = nullptr;
+ SkXfermode::Mode mode = SkXfermode::kDst_Mode;
+ if (flags & DRAW_ATLAS_HAS_COLORS) {
+ colors = (const SkColor*)reader->skip(count * sizeof(SkColor));
+ mode = (SkXfermode::Mode)reader->readUInt();
+ }
+ const SkRect* cull = nullptr;
+ if (flags & DRAW_ATLAS_HAS_CULL) {
+ cull = (const SkRect*)reader->skip(sizeof(SkRect));
+ }
+ canvas->drawAtlas(atlas, xform, tex, colors, count, mode, cull, paint);
+ } break;
+ case DRAW_BITMAP: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ const SkImage* image = fPictureData->getBitmapAsImage(reader);
+ SkPoint loc;
+ reader->readPoint(&loc);
+ canvas->drawImage(image, loc.fX, loc.fY, paint);
+ } break;
+ case DRAW_BITMAP_RECT: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ const SkImage* image = fPictureData->getBitmapAsImage(reader);
+ SkRect storage;
+ const SkRect* src = get_rect_ptr(reader, &storage); // may be null
+ SkRect dst;
+ reader->readRect(&dst); // required
+ SkCanvas::SrcRectConstraint constraint = (SkCanvas::SrcRectConstraint)reader->readInt();
+ if (src) {
+ canvas->drawImageRect(image, *src, dst, paint, constraint);
+ } else {
+ canvas->drawImageRect(image, dst, paint, constraint);
+ }
+ } break;
+ case DRAW_BITMAP_MATRIX: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ const SkImage* image = fPictureData->getBitmapAsImage(reader);
+ SkMatrix matrix;
+ reader->readMatrix(&matrix);
+
+ SkAutoCanvasRestore acr(canvas, true);
+ canvas->concat(matrix);
+ canvas->drawImage(image, 0, 0, paint);
+ } break;
+ case DRAW_BITMAP_NINE: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ const SkImage* image = fPictureData->getBitmapAsImage(reader);
+ SkIRect src;
+ reader->readIRect(&src);
+ SkRect dst;
+ reader->readRect(&dst);
+ canvas->drawImageNine(image, src, dst, paint);
+ } break;
+ case DRAW_CLEAR:
+ canvas->clear(reader->readInt());
+ break;
+ case DRAW_DATA: {
+ // This opcode is now dead, just need to skip it for backwards compatibility
+ size_t length = reader->readInt();
+ (void)reader->skip(length);
+ // skip handles padding the read out to a multiple of 4
+ } break;
+ case DRAW_DRAWABLE:
+ canvas->drawDrawable(fPictureData->getDrawable(reader));
+ break;
+ case DRAW_DRAWABLE_MATRIX: {
+ SkMatrix matrix;
+ reader->readMatrix(&matrix);
+ SkDrawable* drawable = fPictureData->getDrawable(reader);
+ canvas->drawDrawable(drawable, &matrix);
+ } break;
+ case DRAW_DRRECT: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ SkRRect outer, inner;
+ reader->readRRect(&outer);
+ reader->readRRect(&inner);
+ if (paint) {
+ canvas->drawDRRect(outer, inner, *paint);
+ }
+ } break;
+ case BEGIN_COMMENT_GROUP: {
+ SkString tmp;
+ reader->readString(&tmp);
+ // deprecated (M44)
+ break;
+ }
+ case COMMENT: {
+ SkString tmp;
+ reader->readString(&tmp);
+ reader->readString(&tmp);
+ // deprecated (M44)
+ break;
+ }
+ case END_COMMENT_GROUP:
+ // deprecated (M44)
+ break;
+ case DRAW_IMAGE: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ const SkImage* image = fPictureData->getImage(reader);
+ SkPoint loc;
+ reader->readPoint(&loc);
+ canvas->drawImage(image, loc.fX, loc.fY, paint);
+ } break;
+ case DRAW_IMAGE_LATTICE: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ const SkImage* image = fPictureData->getImage(reader);
+ SkCanvas::Lattice lattice;
+ lattice.fXCount = reader->readInt();
+ lattice.fXDivs = (const int*) reader->skip(lattice.fXCount * sizeof(int32_t));
+ lattice.fYCount = reader->readInt();
+ lattice.fYDivs = (const int*) reader->skip(lattice.fYCount * sizeof(int32_t));
+ int flagCount = reader->readInt();
+ lattice.fFlags = (0 == flagCount) ? nullptr : (const SkCanvas::Lattice::Flags*)
+ reader->skip(SkAlign4(flagCount * sizeof(SkCanvas::Lattice::Flags)));
+ SkIRect src;
+ reader->readIRect(&src);
+ lattice.fBounds = &src;
+ SkRect dst;
+ reader->readRect(&dst);
+ canvas->drawImageLattice(image, lattice, dst, paint);
+ } break;
+ case DRAW_IMAGE_NINE: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ const SkImage* image = fPictureData->getImage(reader);
+ SkIRect center;
+ reader->readIRect(&center);
+ SkRect dst;
+ reader->readRect(&dst);
+ canvas->drawImageNine(image, center, dst, paint);
+ } break;
+ case DRAW_IMAGE_RECT_STRICT:
+ case DRAW_IMAGE_RECT: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ const SkImage* image = fPictureData->getImage(reader);
+ SkRect storage;
+ const SkRect* src = get_rect_ptr(reader, &storage); // may be null
+ SkRect dst;
+ reader->readRect(&dst); // required
+ // DRAW_IMAGE_RECT_STRICT assumes this constraint, and doesn't store it
+ SkCanvas::SrcRectConstraint constraint = SkCanvas::kStrict_SrcRectConstraint;
+ if (DRAW_IMAGE_RECT == op) {
+ // newer op-code stores the constraint explicitly
+ constraint = (SkCanvas::SrcRectConstraint)reader->readInt();
+ }
+ canvas->legacy_drawImageRect(image, src, dst, paint, constraint);
+ } break;
+ case DRAW_OVAL: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ SkRect rect;
+ reader->readRect(&rect);
+ if (paint) {
+ canvas->drawOval(rect, *paint);
+ }
+ } break;
+ case DRAW_PAINT: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ if (paint) {
+ canvas->drawPaint(*paint);
+ }
+ } break;
+ case DRAW_PATCH: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+
+ const SkPoint* cubics = (const SkPoint*)reader->skip(SkPatchUtils::kNumCtrlPts *
+ sizeof(SkPoint));
+ uint32_t flag = reader->readInt();
+ const SkColor* colors = nullptr;
+ if (flag & DRAW_VERTICES_HAS_COLORS) {
+ colors = (const SkColor*)reader->skip(SkPatchUtils::kNumCorners * sizeof(SkColor));
+ }
+ const SkPoint* texCoords = nullptr;
+ if (flag & DRAW_VERTICES_HAS_TEXS) {
+ texCoords = (const SkPoint*)reader->skip(SkPatchUtils::kNumCorners *
+ sizeof(SkPoint));
+ }
+ sk_sp<SkXfermode> xfer;
+ if (flag & DRAW_VERTICES_HAS_XFER) {
+ int mode = reader->readInt();
+ if (mode < 0 || mode > SkXfermode::kLastMode) {
+ mode = SkXfermode::kModulate_Mode;
+ }
+ xfer = SkXfermode::Make((SkXfermode::Mode)mode);
+ }
+ if (paint) {
+ canvas->drawPatch(cubics, colors, texCoords, std::move(xfer), *paint);
+ }
+ } break;
+ case DRAW_PATH: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ if (paint) {
+ canvas->drawPath(fPictureData->getPath(reader), *paint);
+ }
+ } break;
+ case DRAW_PICTURE:
+ canvas->drawPicture(fPictureData->getPicture(reader));
+ break;
+ case DRAW_PICTURE_MATRIX_PAINT: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ SkMatrix matrix;
+ reader->readMatrix(&matrix);
+ const SkPicture* pic = fPictureData->getPicture(reader);
+ canvas->drawPicture(pic, &matrix, paint);
+ } break;
+ case DRAW_POINTS: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ SkCanvas::PointMode mode = (SkCanvas::PointMode)reader->readInt();
+ size_t count = reader->readInt();
+ const SkPoint* pts = (const SkPoint*)reader->skip(sizeof(SkPoint)* count);
+ if (paint) {
+ canvas->drawPoints(mode, count, pts, *paint);
+ }
+ } break;
+ case DRAW_POS_TEXT: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ TextContainer text;
+ get_text(reader, &text);
+ size_t points = reader->readInt();
+ const SkPoint* pos = (const SkPoint*)reader->skip(points * sizeof(SkPoint));
+ if (paint && text.text()) {
+ canvas->drawPosText(text.text(), text.length(), pos, *paint);
+ }
+ } break;
+ case DRAW_POS_TEXT_TOP_BOTTOM: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ TextContainer text;
+ get_text(reader, &text);
+ size_t points = reader->readInt();
+ const SkPoint* pos = (const SkPoint*)reader->skip(points * sizeof(SkPoint));
+ const SkScalar top = reader->readScalar();
+ const SkScalar bottom = reader->readScalar();
+ SkRect clip;
+ canvas->getClipBounds(&clip);
+ if (top < clip.fBottom && bottom > clip.fTop && paint && text.text()) {
+ canvas->drawPosText(text.text(), text.length(), pos, *paint);
+ }
+ } break;
+ case DRAW_POS_TEXT_H: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ TextContainer text;
+ get_text(reader, &text);
+ size_t xCount = reader->readInt();
+ const SkScalar constY = reader->readScalar();
+ const SkScalar* xpos = (const SkScalar*)reader->skip(xCount * sizeof(SkScalar));
+ if (paint && text.text()) {
+ canvas->drawPosTextH(text.text(), text.length(), xpos, constY, *paint);
+ }
+ } break;
+ case DRAW_POS_TEXT_H_TOP_BOTTOM: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ TextContainer text;
+ get_text(reader, &text);
+ size_t xCount = reader->readInt();
+ const SkScalar* xpos = (const SkScalar*)reader->skip((3 + xCount) * sizeof(SkScalar));
+ const SkScalar top = *xpos++;
+ const SkScalar bottom = *xpos++;
+ const SkScalar constY = *xpos++;
+ SkRect clip;
+ canvas->getClipBounds(&clip);
+ if (top < clip.fBottom && bottom > clip.fTop && paint && text.text()) {
+ canvas->drawPosTextH(text.text(), text.length(), xpos, constY, *paint);
+ }
+ } break;
+ case DRAW_RECT: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ SkRect rect;
+ reader->readRect(&rect);
+ if (paint) {
+ canvas->drawRect(rect, *paint);
+ }
+ } break;
+ case DRAW_REGION: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ SkRegion region;
+ reader->readRegion(&region);
+ if (paint) {
+ canvas->drawRegion(region, *paint);
+ }
+ } break;
+ case DRAW_RRECT: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ SkRRect rrect;
+ reader->readRRect(&rrect);
+ if (paint) {
+ canvas->drawRRect(rrect, *paint);
+ }
+ } break;
+ case DRAW_SPRITE: {
+ /* const SkPaint* paint = */ fPictureData->getPaint(reader);
+ /* const SkImage* image = */ fPictureData->getBitmapAsImage(reader);
+ /* int left = */ reader->readInt();
+ /* int top = */ reader->readInt();
+ // drawSprite removed dec-2015
+ } break;
+ case DRAW_TEXT: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ TextContainer text;
+ get_text(reader, &text);
+ SkScalar x = reader->readScalar();
+ SkScalar y = reader->readScalar();
+ if (paint && text.text()) {
+ canvas->drawText(text.text(), text.length(), x, y, *paint);
+ }
+ } break;
+ case DRAW_TEXT_BLOB: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ const SkTextBlob* blob = fPictureData->getTextBlob(reader);
+ SkScalar x = reader->readScalar();
+ SkScalar y = reader->readScalar();
+ if (paint) {
+ canvas->drawTextBlob(blob, x, y, *paint);
+ }
+ } break;
+ case DRAW_TEXT_TOP_BOTTOM: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ TextContainer text;
+ get_text(reader, &text);
+ const SkScalar* ptr = (const SkScalar*)reader->skip(4 * sizeof(SkScalar));
+ // ptr[0] == x
+ // ptr[1] == y
+ // ptr[2] == top
+ // ptr[3] == bottom
+ SkRect clip;
+ canvas->getClipBounds(&clip);
+ float top = ptr[2];
+ float bottom = ptr[3];
+ if (top < clip.fBottom && bottom > clip.fTop && paint && text.text()) {
+ canvas->drawText(text.text(), text.length(), ptr[0], ptr[1], *paint);
+ }
+ } break;
+ case DRAW_TEXT_ON_PATH: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ TextContainer text;
+ get_text(reader, &text);
+ const SkPath& path = fPictureData->getPath(reader);
+ SkMatrix matrix;
+ reader->readMatrix(&matrix);
+ if (paint && text.text()) {
+ canvas->drawTextOnPath(text.text(), text.length(), path, &matrix, *paint);
+ }
+ } break;
+ case DRAW_TEXT_RSXFORM: {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ int count = reader->readInt();
+ uint32_t flags = reader->read32();
+ TextContainer text;
+ get_text(reader, &text);
+ const SkRSXform* xform = (const SkRSXform*)reader->skip(count * sizeof(SkRSXform));
+ const SkRect* cull = nullptr;
+ if (flags & DRAW_TEXT_RSXFORM_HAS_CULL) {
+ cull = (const SkRect*)reader->skip(sizeof(SkRect));
+ }
+ if (text.text()) {
+ canvas->drawTextRSXform(text.text(), text.length(), xform, cull, *paint);
+ }
+ } break;
+ case DRAW_VERTICES: {
+ sk_sp<SkXfermode> xfer;
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ DrawVertexFlags flags = (DrawVertexFlags)reader->readInt();
+ SkCanvas::VertexMode vmode = (SkCanvas::VertexMode)reader->readInt();
+ int vCount = reader->readInt();
+ const SkPoint* verts = (const SkPoint*)reader->skip(vCount * sizeof(SkPoint));
+ const SkPoint* texs = nullptr;
+ const SkColor* colors = nullptr;
+ const uint16_t* indices = nullptr;
+ int iCount = 0;
+ if (flags & DRAW_VERTICES_HAS_TEXS) {
+ texs = (const SkPoint*)reader->skip(vCount * sizeof(SkPoint));
+ }
+ if (flags & DRAW_VERTICES_HAS_COLORS) {
+ colors = (const SkColor*)reader->skip(vCount * sizeof(SkColor));
+ }
+ if (flags & DRAW_VERTICES_HAS_INDICES) {
+ iCount = reader->readInt();
+ indices = (const uint16_t*)reader->skip(iCount * sizeof(uint16_t));
+ }
+ if (flags & DRAW_VERTICES_HAS_XFER) {
+ int mode = reader->readInt();
+ if (mode < 0 || mode > SkXfermode::kLastMode) {
+ mode = SkXfermode::kModulate_Mode;
+ }
+ xfer = SkXfermode::Make((SkXfermode::Mode)mode);
+ }
+ if (paint) {
+ canvas->drawVertices(vmode, vCount, verts, texs, colors,
+ xfer, indices, iCount, *paint);
+ }
+ } break;
+ case RESTORE:
+ canvas->restore();
+ break;
+ case ROTATE:
+ canvas->rotate(reader->readScalar());
+ break;
+ case SAVE:
+ // SKPs with version < 29 also store a SaveFlags param.
+ if (size > 4) {
+ if (reader->validate(8 == size)) {
+ reader->readInt();
+ }
+ }
+ canvas->save();
+ break;
+ case SAVE_LAYER_SAVEFLAGS_DEPRECATED: {
+ SkRect storage;
+ const SkRect* boundsPtr = get_rect_ptr(reader, &storage);
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ auto flags = SkCanvas::LegacySaveFlagsToSaveLayerFlags(reader->readInt());
+ canvas->saveLayer(SkCanvas::SaveLayerRec(boundsPtr, paint, flags));
+ } break;
+ case SAVE_LAYER_SAVELAYERFLAGS_DEPRECATED_JAN_2016: {
+ SkRect storage;
+ const SkRect* boundsPtr = get_rect_ptr(reader, &storage);
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ canvas->saveLayer(SkCanvas::SaveLayerRec(boundsPtr, paint, reader->readInt()));
+ } break;
+ case SAVE_LAYER_SAVELAYERREC: {
+ SkCanvas::SaveLayerRec rec(nullptr, nullptr, nullptr, 0);
+ const uint32_t flatFlags = reader->readInt();
+ SkRect bounds;
+ if (flatFlags & SAVELAYERREC_HAS_BOUNDS) {
+ reader->readRect(&bounds);
+ rec.fBounds = &bounds;
+ }
+ if (flatFlags & SAVELAYERREC_HAS_PAINT) {
+ rec.fPaint = fPictureData->getPaint(reader);
+ }
+ if (flatFlags & SAVELAYERREC_HAS_BACKDROP) {
+ const SkPaint* paint = fPictureData->getPaint(reader);
+ rec.fBackdrop = paint->getImageFilter();
+ }
+ if (flatFlags & SAVELAYERREC_HAS_FLAGS) {
+ rec.fSaveLayerFlags = reader->readInt();
+ }
+ canvas->saveLayer(rec);
+ } break;
+ case SCALE: {
+ SkScalar sx = reader->readScalar();
+ SkScalar sy = reader->readScalar();
+ canvas->scale(sx, sy);
+ } break;
+ case SET_MATRIX: {
+ SkMatrix matrix;
+ reader->readMatrix(&matrix);
+ matrix.postConcat(initialMatrix);
+ canvas->setMatrix(matrix);
+ } break;
+ case SKEW: {
+ SkScalar sx = reader->readScalar();
+ SkScalar sy = reader->readScalar();
+ canvas->skew(sx, sy);
+ } break;
+ case TRANSLATE: {
+ SkScalar dx = reader->readScalar();
+ SkScalar dy = reader->readScalar();
+ canvas->translate(dx, dy);
+ } break;
+ case TRANSLATE_Z: {
+#ifdef SK_EXPERIMENTAL_SHADOWING
+ SkScalar dz = reader->readScalar();
+ canvas->translateZ(dz);
+#endif
+ } break;
+ default:
+ SkASSERTF(false, "Unknown draw type: %d", op);
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkPicturePlayback.h b/gfx/skia/skia/src/core/SkPicturePlayback.h
new file mode 100644
index 000000000..6bc13bf74
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPicturePlayback.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPicturePlayback_DEFINED
+#define SkPicturePlayback_DEFINED
+
+#include "SkPictureFlat.h" // for DrawType
+
+class SkBitmap;
+class SkCanvas;
+class SkPaint;
+class SkPictureData;
+
+// The basic picture playback class replays the provided picture into a canvas.
+class SkPicturePlayback final : SkNoncopyable {
+public:
+ SkPicturePlayback(const SkPictureData* data)
+ : fPictureData(data)
+ , fCurOffset(0) {
+ }
+
+ void draw(SkCanvas* canvas, SkPicture::AbortCallback*, SkReadBuffer* buffer);
+
+ // TODO: remove the curOp calls after cleaning up GrGatherDevice
+ // Return the ID of the operation currently being executed when playing
+ // back. 0 indicates no call is active.
+ size_t curOpID() const { return fCurOffset; }
+ void resetOpID() { fCurOffset = 0; }
+
+protected:
+ const SkPictureData* fPictureData;
+
+ // The offset of the current operation when within the draw method
+ size_t fCurOffset;
+
+ void handleOp(SkReadBuffer* reader,
+ DrawType op,
+ uint32_t size,
+ SkCanvas* canvas,
+ const SkMatrix& initialMatrix);
+
+ static DrawType ReadOpAndSize(SkReadBuffer* reader, uint32_t* size);
+
+ class AutoResetOpID {
+ public:
+ AutoResetOpID(SkPicturePlayback* playback) : fPlayback(playback) { }
+ ~AutoResetOpID() {
+ if (fPlayback) {
+ fPlayback->resetOpID();
+ }
+ }
+
+ private:
+ SkPicturePlayback* fPlayback;
+ };
+
+private:
+ typedef SkNoncopyable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPictureRecord.cpp b/gfx/skia/skia/src/core/SkPictureRecord.cpp
new file mode 100644
index 000000000..6325edae0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureRecord.cpp
@@ -0,0 +1,996 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPictureRecord.h"
+#include "SkImage_Base.h"
+#include "SkPatchUtils.h"
+#include "SkPixelRef.h"
+#include "SkRRect.h"
+#include "SkRSXform.h"
+#include "SkTextBlob.h"
+#include "SkTSearch.h"
+
+#define HEAP_BLOCK_SIZE 4096
+
+enum {
+ // just need a value that save or getSaveCount would never return
+ kNoInitialSave = -1,
+};
+
+// A lot of basic types get stored as a uint32_t: bools, ints, paint indices, etc.
+static int const kUInt32Size = 4;
+
+SkPictureRecord::SkPictureRecord(const SkISize& dimensions, uint32_t flags)
+ : INHERITED(dimensions.width(), dimensions.height())
+ , fRecordFlags(flags)
+ , fInitialSaveCount(kNoInitialSave) {
+}
+
+SkPictureRecord::~SkPictureRecord() {
+ fImageRefs.unrefAll();
+ fPictureRefs.unrefAll();
+ fDrawableRefs.unrefAll();
+ fTextBlobRefs.unrefAll();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkPictureRecord::willSave() {
+ // record the offset to us, making it non-positive to distinguish a save
+ // from a clip entry.
+ fRestoreOffsetStack.push(-(int32_t)fWriter.bytesWritten());
+ this->recordSave();
+
+ this->INHERITED::willSave();
+}
+
+void SkPictureRecord::recordSave() {
+ fContentInfo.onSave();
+
+ // op only
+ size_t size = sizeof(kUInt32Size);
+ size_t initialOffset = this->addDraw(SAVE, &size);
+
+ this->validate(initialOffset, size);
+}
+
+SkCanvas::SaveLayerStrategy SkPictureRecord::getSaveLayerStrategy(const SaveLayerRec& rec) {
+ // record the offset to us, making it non-positive to distinguish a save
+ // from a clip entry.
+ fRestoreOffsetStack.push(-(int32_t)fWriter.bytesWritten());
+ this->recordSaveLayer(rec);
+
+ (void)this->INHERITED::getSaveLayerStrategy(rec);
+ /* No need for a (potentially very big) layer which we don't actually need
+ at this time (and may not be able to afford since during record our
+ clip starts out the size of the picture, which is often much larger
+ than the size of the actual device we'll use during playback).
+ */
+ return kNoLayer_SaveLayerStrategy;
+}
+
+void SkPictureRecord::recordSaveLayer(const SaveLayerRec& rec) {
+ fContentInfo.onSaveLayer();
+
+ // op + flatflags
+ size_t size = 2 * kUInt32Size;
+ uint32_t flatFlags = 0;
+
+ if (rec.fBounds) {
+ flatFlags |= SAVELAYERREC_HAS_BOUNDS;
+ size += sizeof(*rec.fBounds);
+ }
+ if (rec.fPaint) {
+ flatFlags |= SAVELAYERREC_HAS_PAINT;
+ size += sizeof(uint32_t); // index
+ }
+ if (rec.fBackdrop) {
+ flatFlags |= SAVELAYERREC_HAS_BACKDROP;
+ size += sizeof(uint32_t); // (paint) index
+ }
+ if (rec.fSaveLayerFlags) {
+ flatFlags |= SAVELAYERREC_HAS_FLAGS;
+ size += sizeof(uint32_t);
+ }
+
+ const size_t initialOffset = this->addDraw(SAVE_LAYER_SAVELAYERREC, &size);
+ this->addInt(flatFlags);
+ if (flatFlags & SAVELAYERREC_HAS_BOUNDS) {
+ this->addRect(*rec.fBounds);
+ }
+ if (flatFlags & SAVELAYERREC_HAS_PAINT) {
+ this->addPaintPtr(rec.fPaint);
+ }
+ if (flatFlags & SAVELAYERREC_HAS_BACKDROP) {
+ // overkill, but we didn't already track single flattenables, so using a paint for that
+ SkPaint paint;
+ paint.setImageFilter(const_cast<SkImageFilter*>(rec.fBackdrop));
+ this->addPaint(paint);
+ }
+ if (flatFlags & SAVELAYERREC_HAS_FLAGS) {
+ this->addInt(rec.fSaveLayerFlags);
+ }
+ this->validate(initialOffset, size);
+}
+
+#ifdef SK_DEBUG
+/*
+ * Read the op code from 'offset' in 'writer' and extract the size too.
+ */
+static DrawType peek_op_and_size(SkWriter32* writer, size_t offset, uint32_t* size) {
+ uint32_t peek = writer->readTAt<uint32_t>(offset);
+
+ uint32_t op;
+ UNPACK_8_24(peek, op, *size);
+ if (MASK_24 == *size) {
+ // size required its own slot right after the op code
+ *size = writer->readTAt<uint32_t>(offset + kUInt32Size);
+ }
+ return (DrawType) op;
+}
+#endif//SK_DEBUG
+
+void SkPictureRecord::willRestore() {
+#if 0
+ SkASSERT(fRestoreOffsetStack.count() > 1);
+#endif
+
+ // check for underflow
+ if (fRestoreOffsetStack.count() == 0) {
+ return;
+ }
+
+ this->recordRestore();
+
+ fRestoreOffsetStack.pop();
+
+ this->INHERITED::willRestore();
+}
+
+void SkPictureRecord::recordRestore(bool fillInSkips) {
+ fContentInfo.onRestore();
+
+ if (fillInSkips) {
+ this->fillRestoreOffsetPlaceholdersForCurrentStackLevel((uint32_t)fWriter.bytesWritten());
+ }
+ size_t size = 1 * kUInt32Size; // RESTORE consists solely of 1 op code
+ size_t initialOffset = this->addDraw(RESTORE, &size);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::recordTranslate(const SkMatrix& m) {
+ SkASSERT(SkMatrix::kTranslate_Mask == m.getType());
+
+ // op + dx + dy
+ size_t size = 1 * kUInt32Size + 2 * sizeof(SkScalar);
+ size_t initialOffset = this->addDraw(TRANSLATE, &size);
+ this->addScalar(m.getTranslateX());
+ this->addScalar(m.getTranslateY());
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::recordScale(const SkMatrix& m) {
+ SkASSERT(SkMatrix::kScale_Mask == m.getType());
+
+ // op + sx + sy
+ size_t size = 1 * kUInt32Size + 2 * sizeof(SkScalar);
+ size_t initialOffset = this->addDraw(SCALE, &size);
+ this->addScalar(m.getScaleX());
+ this->addScalar(m.getScaleY());
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::didConcat(const SkMatrix& matrix) {
+ switch (matrix.getType()) {
+ case SkMatrix::kTranslate_Mask:
+ this->recordTranslate(matrix);
+ break;
+ case SkMatrix::kScale_Mask:
+ this->recordScale(matrix);
+ break;
+ default:
+ this->recordConcat(matrix);
+ break;
+ }
+ this->INHERITED::didConcat(matrix);
+}
+
+void SkPictureRecord::recordConcat(const SkMatrix& matrix) {
+ this->validate(fWriter.bytesWritten(), 0);
+ // op + matrix
+ size_t size = kUInt32Size + matrix.writeToMemory(nullptr);
+ size_t initialOffset = this->addDraw(CONCAT, &size);
+ this->addMatrix(matrix);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::didSetMatrix(const SkMatrix& matrix) {
+ this->validate(fWriter.bytesWritten(), 0);
+ // op + matrix
+ size_t size = kUInt32Size + matrix.writeToMemory(nullptr);
+ size_t initialOffset = this->addDraw(SET_MATRIX, &size);
+ this->addMatrix(matrix);
+ this->validate(initialOffset, size);
+ this->INHERITED::didSetMatrix(matrix);
+}
+
+void SkPictureRecord::didTranslateZ(SkScalar z) {
+#ifdef SK_EXPERIMENTAL_SHADOWING
+ this->validate(fWriter.bytesWritten(), 0);
+ // op + scalar
+ size_t size = 1 * kUInt32Size + 1 * sizeof(SkScalar);
+ size_t initialOffset = this->addDraw(TRANSLATE_Z, &size);
+ this->addScalar(z);
+ this->validate(initialOffset, size);
+ this->INHERITED::didTranslateZ(z);
+#endif
+}
+
+static bool clipOpExpands(SkCanvas::ClipOp op) {
+ switch (op) {
+ case SkCanvas::kUnion_Op:
+ case SkCanvas::kXOR_Op:
+ case SkCanvas::kReverseDifference_Op:
+ case SkCanvas::kReplace_Op:
+ return true;
+ case SkCanvas::kIntersect_Op:
+ case SkCanvas::kDifference_Op:
+ return false;
+ default:
+ SkDEBUGFAIL("unknown region op");
+ return false;
+ }
+}
+
+void SkPictureRecord::fillRestoreOffsetPlaceholdersForCurrentStackLevel(uint32_t restoreOffset) {
+ int32_t offset = fRestoreOffsetStack.top();
+ while (offset > 0) {
+ uint32_t peek = fWriter.readTAt<uint32_t>(offset);
+ fWriter.overwriteTAt(offset, restoreOffset);
+ offset = peek;
+ }
+
+#ifdef SK_DEBUG
+ // offset of 0 has been disabled, so we skip it
+ if (offset > 0) {
+ // assert that the final offset value points to a save verb
+ uint32_t opSize;
+ DrawType drawOp = peek_op_and_size(&fWriter, -offset, &opSize);
+ SkASSERT(SAVE_LAYER_SAVEFLAGS_DEPRECATED != drawOp);
+ SkASSERT(SAVE_LAYER_SAVELAYERFLAGS_DEPRECATED_JAN_2016 != drawOp);
+ SkASSERT(SAVE == drawOp || SAVE_LAYER_SAVELAYERREC == drawOp);
+ }
+#endif
+}
+
+void SkPictureRecord::beginRecording() {
+ // we have to call this *after* our constructor, to ensure that it gets
+ // recorded. This is balanced by restoreToCount() call from endRecording,
+ // which in-turn calls our overridden restore(), so those get recorded too.
+ fInitialSaveCount = this->save();
+}
+
+void SkPictureRecord::endRecording() {
+ SkASSERT(kNoInitialSave != fInitialSaveCount);
+ this->restoreToCount(fInitialSaveCount);
+}
+
+size_t SkPictureRecord::recordRestoreOffsetPlaceholder(ClipOp op) {
+ if (fRestoreOffsetStack.isEmpty()) {
+ return -1;
+ }
+
+ // The RestoreOffset field is initially filled with a placeholder
+ // value that points to the offset of the previous RestoreOffset
+ // in the current stack level, thus forming a linked list so that
+ // the restore offsets can be filled in when the corresponding
+ // restore command is recorded.
+ int32_t prevOffset = fRestoreOffsetStack.top();
+
+ if (clipOpExpands(op)) {
+ // Run back through any previous clip ops, and mark their offset to
+ // be 0, disabling their ability to trigger a jump-to-restore, otherwise
+ // they could hide this clips ability to expand the clip (i.e. go from
+ // empty to non-empty).
+ this->fillRestoreOffsetPlaceholdersForCurrentStackLevel(0);
+
+ // Reset the pointer back to the previous clip so that subsequent
+ // restores don't overwrite the offsets we just cleared.
+ prevOffset = 0;
+ }
+
+ size_t offset = fWriter.bytesWritten();
+ this->addInt(prevOffset);
+ fRestoreOffsetStack.top() = SkToU32(offset);
+ return offset;
+}
+
+void SkPictureRecord::onClipRect(const SkRect& rect, SkCanvas::ClipOp op, ClipEdgeStyle edgeStyle) {
+ this->recordClipRect(rect, op, kSoft_ClipEdgeStyle == edgeStyle);
+ this->INHERITED::onClipRect(rect, op, edgeStyle);
+}
+
+size_t SkPictureRecord::recordClipRect(const SkRect& rect, SkCanvas::ClipOp op, bool doAA) {
+ // id + rect + clip params
+ size_t size = 1 * kUInt32Size + sizeof(rect) + 1 * kUInt32Size;
+ // recordRestoreOffsetPlaceholder doesn't always write an offset
+ if (!fRestoreOffsetStack.isEmpty()) {
+ // + restore offset
+ size += kUInt32Size;
+ }
+ size_t initialOffset = this->addDraw(CLIP_RECT, &size);
+ this->addRect(rect);
+ this->addInt(ClipParams_pack(op, doAA));
+ size_t offset = this->recordRestoreOffsetPlaceholder(op);
+
+ this->validate(initialOffset, size);
+ return offset;
+}
+
+void SkPictureRecord::onClipRRect(const SkRRect& rrect, SkCanvas::ClipOp op, ClipEdgeStyle edgeStyle) {
+ this->recordClipRRect(rrect, op, kSoft_ClipEdgeStyle == edgeStyle);
+ this->INHERITED::onClipRRect(rrect, op, edgeStyle);
+}
+
+size_t SkPictureRecord::recordClipRRect(const SkRRect& rrect, SkCanvas::ClipOp op, bool doAA) {
+ // op + rrect + clip params
+ size_t size = 1 * kUInt32Size + SkRRect::kSizeInMemory + 1 * kUInt32Size;
+ // recordRestoreOffsetPlaceholder doesn't always write an offset
+ if (!fRestoreOffsetStack.isEmpty()) {
+ // + restore offset
+ size += kUInt32Size;
+ }
+ size_t initialOffset = this->addDraw(CLIP_RRECT, &size);
+ this->addRRect(rrect);
+ this->addInt(ClipParams_pack(op, doAA));
+ size_t offset = recordRestoreOffsetPlaceholder(op);
+ this->validate(initialOffset, size);
+ return offset;
+}
+
+void SkPictureRecord::onClipPath(const SkPath& path, SkCanvas::ClipOp op, ClipEdgeStyle edgeStyle) {
+ int pathID = this->addPathToHeap(path);
+ this->recordClipPath(pathID, op, kSoft_ClipEdgeStyle == edgeStyle);
+ this->INHERITED::onClipPath(path, op, edgeStyle);
+}
+
+size_t SkPictureRecord::recordClipPath(int pathID, SkCanvas::ClipOp op, bool doAA) {
+ // op + path index + clip params
+ size_t size = 3 * kUInt32Size;
+ // recordRestoreOffsetPlaceholder doesn't always write an offset
+ if (!fRestoreOffsetStack.isEmpty()) {
+ // + restore offset
+ size += kUInt32Size;
+ }
+ size_t initialOffset = this->addDraw(CLIP_PATH, &size);
+ this->addInt(pathID);
+ this->addInt(ClipParams_pack(op, doAA));
+ size_t offset = recordRestoreOffsetPlaceholder(op);
+ this->validate(initialOffset, size);
+ return offset;
+}
+
+void SkPictureRecord::onClipRegion(const SkRegion& region, ClipOp op) {
+ this->recordClipRegion(region, op);
+ this->INHERITED::onClipRegion(region, op);
+}
+
+size_t SkPictureRecord::recordClipRegion(const SkRegion& region, ClipOp op) {
+ // op + clip params + region
+ size_t size = 2 * kUInt32Size + region.writeToMemory(nullptr);
+ // recordRestoreOffsetPlaceholder doesn't always write an offset
+ if (!fRestoreOffsetStack.isEmpty()) {
+ // + restore offset
+ size += kUInt32Size;
+ }
+ size_t initialOffset = this->addDraw(CLIP_REGION, &size);
+ this->addRegion(region);
+ this->addInt(ClipParams_pack(op, false));
+ size_t offset = this->recordRestoreOffsetPlaceholder(op);
+
+ this->validate(initialOffset, size);
+ return offset;
+}
+
+void SkPictureRecord::onDrawPaint(const SkPaint& paint) {
+ // op + paint index
+ size_t size = 2 * kUInt32Size;
+ size_t initialOffset = this->addDraw(DRAW_PAINT, &size);
+ this->addPaint(paint);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawPoints(PointMode mode, size_t count, const SkPoint pts[],
+ const SkPaint& paint) {
+ fContentInfo.onDrawPoints(count, paint);
+
+ // op + paint index + mode + count + point data
+ size_t size = 4 * kUInt32Size + count * sizeof(SkPoint);
+ size_t initialOffset = this->addDraw(DRAW_POINTS, &size);
+ this->addPaint(paint);
+
+ this->addInt(mode);
+ this->addInt(SkToInt(count));
+ fWriter.writeMul4(pts, count * sizeof(SkPoint));
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawOval(const SkRect& oval, const SkPaint& paint) {
+ // op + paint index + rect
+ size_t size = 2 * kUInt32Size + sizeof(oval);
+ size_t initialOffset = this->addDraw(DRAW_OVAL, &size);
+ this->addPaint(paint);
+ this->addRect(oval);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) {
+ // op + paint index + rect + start + sweep + bool (as int)
+ size_t size = 2 * kUInt32Size + sizeof(oval) + sizeof(startAngle) + sizeof(sweepAngle) +
+ sizeof(int);
+ size_t initialOffset = this->addDraw(DRAW_ARC, &size);
+ this->addPaint(paint);
+ this->addRect(oval);
+ this->addScalar(startAngle);
+ this->addScalar(sweepAngle);
+ this->addInt(useCenter);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawRect(const SkRect& rect, const SkPaint& paint) {
+ // op + paint index + rect
+ size_t size = 2 * kUInt32Size + sizeof(rect);
+ size_t initialOffset = this->addDraw(DRAW_RECT, &size);
+ this->addPaint(paint);
+ this->addRect(rect);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawRegion(const SkRegion& region, const SkPaint& paint) {
+ // op + paint index + region
+ size_t regionBytes = region.writeToMemory(nullptr);
+ size_t size = 2 * kUInt32Size + regionBytes;
+ size_t initialOffset = this->addDraw(DRAW_REGION, &size);
+ this->addPaint(paint);
+ fWriter.writeRegion(region);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ // op + paint index + rrect
+ size_t size = 2 * kUInt32Size + SkRRect::kSizeInMemory;
+ size_t initialOffset = this->addDraw(DRAW_RRECT, &size);
+ this->addPaint(paint);
+ this->addRRect(rrect);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkPaint& paint) {
+ // op + paint index + rrects
+ size_t size = 2 * kUInt32Size + SkRRect::kSizeInMemory * 2;
+ size_t initialOffset = this->addDraw(DRAW_DRRECT, &size);
+ this->addPaint(paint);
+ this->addRRect(outer);
+ this->addRRect(inner);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ fContentInfo.onDrawPath(path, paint);
+
+ // op + paint index + path index
+ size_t size = 3 * kUInt32Size;
+ size_t initialOffset = this->addDraw(DRAW_PATH, &size);
+ this->addPaint(paint);
+ this->addPath(path);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawImage(const SkImage* image, SkScalar x, SkScalar y,
+ const SkPaint* paint) {
+ // op + paint_index + image_index + x + y
+ size_t size = 3 * kUInt32Size + 2 * sizeof(SkScalar);
+ size_t initialOffset = this->addDraw(DRAW_IMAGE, &size);
+ this->addPaintPtr(paint);
+ this->addImage(image);
+ this->addScalar(x);
+ this->addScalar(y);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ // id + paint_index + image_index + bool_for_src + constraint
+ size_t size = 5 * kUInt32Size;
+ if (src) {
+ size += sizeof(*src); // + rect
+ }
+ size += sizeof(dst); // + rect
+
+ size_t initialOffset = this->addDraw(DRAW_IMAGE_RECT, &size);
+ this->addPaintPtr(paint);
+ this->addImage(image);
+ this->addRectPtr(src); // may be null
+ this->addRect(dst);
+ this->addInt(constraint);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawImageNine(const SkImage* img, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint) {
+ // id + paint_index + image_index + center + dst
+ size_t size = 3 * kUInt32Size + sizeof(SkIRect) + sizeof(SkRect);
+
+ size_t initialOffset = this->addDraw(DRAW_IMAGE_NINE, &size);
+ this->addPaintPtr(paint);
+ this->addImage(img);
+ this->addIRect(center);
+ this->addRect(dst);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawImageLattice(const SkImage* image, const Lattice& lattice,
+ const SkRect& dst, const SkPaint* paint) {
+ // xCount + xDivs + yCount+ yDivs
+ int flagCount = (nullptr == lattice.fFlags) ? 0 : (lattice.fXCount + 1) * (lattice.fYCount + 1);
+ size_t latticeSize = (1 + lattice.fXCount + 1 + lattice.fYCount + 1) * kUInt32Size +
+ SkAlign4(flagCount * sizeof(SkCanvas::Lattice::Flags)) + sizeof(SkIRect);
+
+ // op + paint index + image index + lattice + dst rect
+ size_t size = 3 * kUInt32Size + latticeSize + sizeof(dst);
+ size_t initialOffset = this->addDraw(DRAW_IMAGE_LATTICE, &size);
+ this->addPaintPtr(paint);
+ this->addImage(image);
+ this->addInt(lattice.fXCount);
+ fWriter.writePad(lattice.fXDivs, lattice.fXCount * kUInt32Size);
+ this->addInt(lattice.fYCount);
+ fWriter.writePad(lattice.fYDivs, lattice.fYCount * kUInt32Size);
+ this->addInt(flagCount);
+ fWriter.writePad(lattice.fFlags, flagCount * sizeof(SkCanvas::Lattice::Flags));
+ SkASSERT(lattice.fBounds);
+ this->addIRect(*lattice.fBounds);
+ this->addRect(dst);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawText(const void* text, size_t byteLength, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ // op + paint index + length + 'length' worth of chars + x + y
+ size_t size = 3 * kUInt32Size + SkAlign4(byteLength) + 2 * sizeof(SkScalar);
+
+ DrawType op = DRAW_TEXT;
+ size_t initialOffset = this->addDraw(op, &size);
+ this->addPaint(paint);
+ this->addText(text, byteLength);
+ this->addScalar(x);
+ this->addScalar(y);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawPosText(const void* text, size_t byteLength, const SkPoint pos[],
+ const SkPaint& paint) {
+ int points = paint.countText(text, byteLength);
+
+ // op + paint index + length + 'length' worth of data + num points + x&y point data
+ size_t size = 3 * kUInt32Size + SkAlign4(byteLength) + kUInt32Size + points * sizeof(SkPoint);
+
+ DrawType op = DRAW_POS_TEXT;
+
+ size_t initialOffset = this->addDraw(op, &size);
+ this->addPaint(paint);
+ this->addText(text, byteLength);
+ this->addInt(points);
+ fWriter.writeMul4(pos, points * sizeof(SkPoint));
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawPosTextH(const void* text, size_t byteLength, const SkScalar xpos[],
+ SkScalar constY, const SkPaint& paint) {
+ int points = paint.countText(text, byteLength);
+
+ // op + paint index + length + 'length' worth of data + num points
+ size_t size = 3 * kUInt32Size + SkAlign4(byteLength) + 1 * kUInt32Size;
+ // + y + the actual points
+ size += 1 * kUInt32Size + points * sizeof(SkScalar);
+
+ size_t initialOffset = this->addDraw(DRAW_POS_TEXT_H, &size);
+ this->addPaint(paint);
+ this->addText(text, byteLength);
+ this->addInt(points);
+ this->addScalar(constY);
+ fWriter.writeMul4(xpos, points * sizeof(SkScalar));
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawTextOnPath(const void* text, size_t byteLength, const SkPath& path,
+ const SkMatrix* matrix, const SkPaint& paint) {
+ // op + paint index + length + 'length' worth of data + path index + matrix
+ const SkMatrix& m = matrix ? *matrix : SkMatrix::I();
+ size_t size = 3 * kUInt32Size + SkAlign4(byteLength) + kUInt32Size + m.writeToMemory(nullptr);
+ size_t initialOffset = this->addDraw(DRAW_TEXT_ON_PATH, &size);
+ this->addPaint(paint);
+ this->addText(text, byteLength);
+ this->addPath(path);
+ this->addMatrix(m);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawTextRSXform(const void* text, size_t byteLength,
+ const SkRSXform xform[], const SkRect* cull,
+ const SkPaint& paint) {
+ const int count = paint.countText(text, byteLength);
+ // [op + paint-index + count + flags + length] + [text] + [xform] + cull
+ size_t size = 5 * kUInt32Size + SkAlign4(byteLength) + count * sizeof(SkRSXform);
+ uint32_t flags = 0;
+ if (cull) {
+ flags |= DRAW_TEXT_RSXFORM_HAS_CULL;
+ size += sizeof(SkRect);
+ }
+
+ size_t initialOffset = this->addDraw(DRAW_TEXT_RSXFORM, &size);
+ this->addPaint(paint);
+ this->addInt(count);
+ this->addInt(flags);
+ this->addText(text, byteLength);
+ fWriter.write(xform, count * sizeof(SkRSXform));
+ if (cull) {
+ fWriter.write(cull, sizeof(SkRect));
+ }
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+
+ // op + paint index + blob index + x/y
+ size_t size = 3 * kUInt32Size + 2 * sizeof(SkScalar);
+ size_t initialOffset = this->addDraw(DRAW_TEXT_BLOB, &size);
+
+ this->addPaint(paint);
+ this->addTextBlob(blob);
+ this->addScalar(x);
+ this->addScalar(y);
+
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint) {
+ // op + picture index
+ size_t size = 2 * kUInt32Size;
+ size_t initialOffset;
+
+ if (nullptr == matrix && nullptr == paint) {
+ initialOffset = this->addDraw(DRAW_PICTURE, &size);
+ this->addPicture(picture);
+ } else {
+ const SkMatrix& m = matrix ? *matrix : SkMatrix::I();
+ size += m.writeToMemory(nullptr) + kUInt32Size; // matrix + paint
+ initialOffset = this->addDraw(DRAW_PICTURE_MATRIX_PAINT, &size);
+ this->addPaintPtr(paint);
+ this->addMatrix(m);
+ this->addPicture(picture);
+ }
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawShadowedPicture(const SkPicture* picture,
+ const SkMatrix* matrix,
+ const SkPaint* paint,
+ const SkShadowParams& params) {
+ // op + picture index
+ size_t size = 2 * kUInt32Size;
+ size_t initialOffset;
+
+ // TODO: handle recording params.
+ if (nullptr == matrix && nullptr == paint) {
+ initialOffset = this->addDraw(DRAW_PICTURE, &size);
+ this->addPicture(picture);
+ } else {
+ const SkMatrix& m = matrix ? *matrix : SkMatrix::I();
+ size += m.writeToMemory(nullptr) + kUInt32Size; // matrix + paint
+ initialOffset = this->addDraw(DRAW_PICTURE_MATRIX_PAINT, &size);
+ this->addPaintPtr(paint);
+ this->addMatrix(m);
+ this->addPicture(picture);
+ }
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix) {
+ // op + drawable index
+ size_t size = 2 * kUInt32Size;
+ size_t initialOffset;
+
+ if (nullptr == matrix) {
+ initialOffset = this->addDraw(DRAW_DRAWABLE, &size);
+ this->addDrawable(drawable);
+ } else {
+ size += matrix->writeToMemory(nullptr); // matrix
+ initialOffset = this->addDraw(DRAW_DRAWABLE_MATRIX, &size);
+ this->addMatrix(*matrix);
+ this->addDrawable(drawable);
+ }
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawVertices(VertexMode vmode, int vertexCount,
+ const SkPoint vertices[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xfer,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint) {
+ uint32_t flags = 0;
+ if (texs) {
+ flags |= DRAW_VERTICES_HAS_TEXS;
+ }
+ if (colors) {
+ flags |= DRAW_VERTICES_HAS_COLORS;
+ }
+ if (indexCount > 0) {
+ flags |= DRAW_VERTICES_HAS_INDICES;
+ }
+ if (xfer) {
+ SkXfermode::Mode mode;
+ if (xfer->asMode(&mode) && SkXfermode::kModulate_Mode != mode) {
+ flags |= DRAW_VERTICES_HAS_XFER;
+ }
+ }
+
+ // op + paint index + flags + vmode + vCount + vertices
+ size_t size = 5 * kUInt32Size + vertexCount * sizeof(SkPoint);
+ if (flags & DRAW_VERTICES_HAS_TEXS) {
+ size += vertexCount * sizeof(SkPoint); // + uvs
+ }
+ if (flags & DRAW_VERTICES_HAS_COLORS) {
+ size += vertexCount * sizeof(SkColor); // + vert colors
+ }
+ if (flags & DRAW_VERTICES_HAS_INDICES) {
+ // + num indices + indices
+ size += 1 * kUInt32Size + SkAlign4(indexCount * sizeof(uint16_t));
+ }
+ if (flags & DRAW_VERTICES_HAS_XFER) {
+ size += kUInt32Size; // mode enum
+ }
+
+ size_t initialOffset = this->addDraw(DRAW_VERTICES, &size);
+ this->addPaint(paint);
+ this->addInt(flags);
+ this->addInt(vmode);
+ this->addInt(vertexCount);
+ this->addPoints(vertices, vertexCount);
+ if (flags & DRAW_VERTICES_HAS_TEXS) {
+ this->addPoints(texs, vertexCount);
+ }
+ if (flags & DRAW_VERTICES_HAS_COLORS) {
+ fWriter.writeMul4(colors, vertexCount * sizeof(SkColor));
+ }
+ if (flags & DRAW_VERTICES_HAS_INDICES) {
+ this->addInt(indexCount);
+ fWriter.writePad(indices, indexCount * sizeof(uint16_t));
+ }
+ if (flags & DRAW_VERTICES_HAS_XFER) {
+ SkXfermode::Mode mode = SkXfermode::kModulate_Mode;
+ (void)xfer->asMode(&mode);
+ this->addInt(mode);
+ }
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkXfermode* xmode,
+ const SkPaint& paint) {
+ // op + paint index + patch 12 control points + flag + patch 4 colors + 4 texture coordinates
+ size_t size = 2 * kUInt32Size + SkPatchUtils::kNumCtrlPts * sizeof(SkPoint) + kUInt32Size;
+ uint32_t flag = 0;
+ if (colors) {
+ flag |= DRAW_VERTICES_HAS_COLORS;
+ size += SkPatchUtils::kNumCorners * sizeof(SkColor);
+ }
+ if (texCoords) {
+ flag |= DRAW_VERTICES_HAS_TEXS;
+ size += SkPatchUtils::kNumCorners * sizeof(SkPoint);
+ }
+ if (xmode) {
+ SkXfermode::Mode mode;
+ if (xmode->asMode(&mode) && SkXfermode::kModulate_Mode != mode) {
+ flag |= DRAW_VERTICES_HAS_XFER;
+ size += kUInt32Size;
+ }
+ }
+
+ size_t initialOffset = this->addDraw(DRAW_PATCH, &size);
+ this->addPaint(paint);
+ this->addPatch(cubics);
+ this->addInt(flag);
+
+ // write optional parameters
+ if (colors) {
+ fWriter.write(colors, SkPatchUtils::kNumCorners * sizeof(SkColor));
+ }
+ if (texCoords) {
+ fWriter.write(texCoords, SkPatchUtils::kNumCorners * sizeof(SkPoint));
+ }
+ if (flag & DRAW_VERTICES_HAS_XFER) {
+ SkXfermode::Mode mode = SkXfermode::kModulate_Mode;
+ xmode->asMode(&mode);
+ this->addInt(mode);
+ }
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawAtlas(const SkImage* atlas, const SkRSXform xform[], const SkRect tex[],
+ const SkColor colors[], int count, SkXfermode::Mode mode,
+ const SkRect* cull, const SkPaint* paint) {
+ // [op + paint-index + atlas-index + flags + count] + [xform] + [tex] + [*colors + mode] + cull
+ size_t size = 5 * kUInt32Size + count * sizeof(SkRSXform) + count * sizeof(SkRect);
+ uint32_t flags = 0;
+ if (colors) {
+ flags |= DRAW_ATLAS_HAS_COLORS;
+ size += count * sizeof(SkColor);
+ size += sizeof(uint32_t); // xfermode::mode
+ }
+ if (cull) {
+ flags |= DRAW_ATLAS_HAS_CULL;
+ size += sizeof(SkRect);
+ }
+
+ size_t initialOffset = this->addDraw(DRAW_ATLAS, &size);
+ this->addPaintPtr(paint);
+ this->addImage(atlas);
+ this->addInt(flags);
+ this->addInt(count);
+ fWriter.write(xform, count * sizeof(SkRSXform));
+ fWriter.write(tex, count * sizeof(SkRect));
+
+ // write optional parameters
+ if (colors) {
+ fWriter.write(colors, count * sizeof(SkColor));
+ this->addInt(mode);
+ }
+ if (cull) {
+ fWriter.write(cull, sizeof(SkRect));
+ }
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawAnnotation(const SkRect& rect, const char key[], SkData* value) {
+ size_t keyLen = fWriter.WriteStringSize(key);
+ size_t valueLen = fWriter.WriteDataSize(value);
+ size_t size = 4 + sizeof(SkRect) + keyLen + valueLen;
+
+ size_t initialOffset = this->addDraw(DRAW_ANNOTATION, &size);
+ this->addRect(rect);
+ fWriter.writeString(key);
+ fWriter.writeData(value);
+ this->validate(initialOffset, size);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+template <typename T> int find_or_append_uniqueID(SkTDArray<const T*>& array, const T* obj) {
+ int index = array.select([&](const T* elem) {
+ return elem->uniqueID() == obj->uniqueID();
+ });
+ if (index < 0) {
+ index = array.count();
+ *array.append() = SkRef(obj);
+ }
+ return index;
+}
+
+sk_sp<SkSurface> SkPictureRecord::onNewSurface(const SkImageInfo& info, const SkSurfaceProps&) {
+ return nullptr;
+}
+
+void SkPictureRecord::addImage(const SkImage* image) {
+ // convention for images is 0-based index
+ this->addInt(find_or_append_uniqueID(fImageRefs, image));
+}
+
+void SkPictureRecord::addMatrix(const SkMatrix& matrix) {
+ fWriter.writeMatrix(matrix);
+}
+
+void SkPictureRecord::addPaintPtr(const SkPaint* paint) {
+ fContentInfo.onAddPaintPtr(paint);
+
+ if (paint) {
+ fPaints.push_back(*paint);
+ this->addInt(fPaints.count());
+ } else {
+ this->addInt(0);
+ }
+}
+
+int SkPictureRecord::addPathToHeap(const SkPath& path) {
+ if (int* n = fPaths.find(path)) {
+ return *n;
+ }
+ int n = fPaths.count() + 1; // 0 is reserved for null / error.
+ fPaths.set(path, n);
+ return n;
+}
+
+void SkPictureRecord::addPath(const SkPath& path) {
+ this->addInt(this->addPathToHeap(path));
+}
+
+void SkPictureRecord::addPatch(const SkPoint cubics[12]) {
+ fWriter.write(cubics, SkPatchUtils::kNumCtrlPts * sizeof(SkPoint));
+}
+
+void SkPictureRecord::addPicture(const SkPicture* picture) {
+ // follow the convention of recording a 1-based index
+ this->addInt(find_or_append_uniqueID(fPictureRefs, picture) + 1);
+}
+
+void SkPictureRecord::addDrawable(SkDrawable* drawable) {
+ int index = fDrawableRefs.find(drawable);
+ if (index < 0) { // not found
+ index = fDrawableRefs.count();
+ *fDrawableRefs.append() = drawable;
+ drawable->ref();
+ }
+ // follow the convention of recording a 1-based index
+ this->addInt(index + 1);
+}
+
+void SkPictureRecord::addPoint(const SkPoint& point) {
+ fWriter.writePoint(point);
+}
+
+void SkPictureRecord::addPoints(const SkPoint pts[], int count) {
+ fWriter.writeMul4(pts, count * sizeof(SkPoint));
+}
+
+void SkPictureRecord::addNoOp() {
+ size_t size = kUInt32Size; // op
+ this->addDraw(NOOP, &size);
+}
+
+void SkPictureRecord::addRect(const SkRect& rect) {
+ fWriter.writeRect(rect);
+}
+
+void SkPictureRecord::addRectPtr(const SkRect* rect) {
+ if (fWriter.writeBool(rect != nullptr)) {
+ fWriter.writeRect(*rect);
+ }
+}
+
+void SkPictureRecord::addIRect(const SkIRect& rect) {
+ fWriter.write(&rect, sizeof(rect));
+}
+
+void SkPictureRecord::addIRectPtr(const SkIRect* rect) {
+ if (fWriter.writeBool(rect != nullptr)) {
+ *(SkIRect*)fWriter.reserve(sizeof(SkIRect)) = *rect;
+ }
+}
+
+void SkPictureRecord::addRRect(const SkRRect& rrect) {
+ fWriter.writeRRect(rrect);
+}
+
+void SkPictureRecord::addRegion(const SkRegion& region) {
+ fWriter.writeRegion(region);
+}
+
+void SkPictureRecord::addText(const void* text, size_t byteLength) {
+ fContentInfo.onDrawText();
+ addInt(SkToInt(byteLength));
+ fWriter.writePad(text, byteLength);
+}
+
+void SkPictureRecord::addTextBlob(const SkTextBlob* blob) {
+ // follow the convention of recording a 1-based index
+ this->addInt(find_or_append_uniqueID(fTextBlobRefs, blob) + 1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/skia/src/core/SkPictureRecord.h b/gfx/skia/skia/src/core/SkPictureRecord.h
new file mode 100644
index 000000000..6d75609db
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureRecord.h
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPictureRecord_DEFINED
+#define SkPictureRecord_DEFINED
+
+#include "SkCanvas.h"
+#include "SkFlattenable.h"
+#include "SkPicture.h"
+#include "SkPictureData.h"
+#include "SkTArray.h"
+#include "SkTDArray.h"
+#include "SkTHash.h"
+#include "SkWriter32.h"
+
+// These macros help with packing and unpacking a single byte value and
+// a 3 byte value into/out of a uint32_t
+#define MASK_24 0x00FFFFFF
+#define UNPACK_8_24(combined, small, large) \
+ small = (combined >> 24) & 0xFF; \
+ large = combined & MASK_24;
+#define PACK_8_24(small, large) ((small << 24) | large)
+
+
+class SkPictureRecord : public SkCanvas {
+public:
+ SkPictureRecord(const SkISize& dimensions, uint32_t recordFlags);
+ virtual ~SkPictureRecord();
+
+ const SkTDArray<const SkPicture* >& getPictureRefs() const {
+ return fPictureRefs;
+ }
+
+ const SkTDArray<SkDrawable* >& getDrawableRefs() const {
+ return fDrawableRefs;
+ }
+
+ const SkTDArray<const SkTextBlob* >& getTextBlobRefs() const {
+ return fTextBlobRefs;
+ }
+
+ const SkTDArray<const SkImage* >& getImageRefs() const {
+ return fImageRefs;
+ }
+
+ sk_sp<SkData> opData() const {
+ this->validate(fWriter.bytesWritten(), 0);
+
+ if (fWriter.bytesWritten() == 0) {
+ return SkData::MakeEmpty();
+ }
+ return fWriter.snapshotAsData();
+ }
+
+ const SkPictureContentInfo& contentInfo() const {
+ return fContentInfo;
+ }
+
+ void setFlags(uint32_t recordFlags) {
+ fRecordFlags = recordFlags;
+ }
+
+ const SkWriter32& writeStream() const {
+ return fWriter;
+ }
+
+ void beginRecording();
+ void endRecording();
+
+protected:
+ void addNoOp();
+
+private:
+ void handleOptimization(int opt);
+ size_t recordRestoreOffsetPlaceholder(SkCanvas::ClipOp);
+ void fillRestoreOffsetPlaceholdersForCurrentStackLevel(uint32_t restoreOffset);
+
+ SkTDArray<int32_t> fRestoreOffsetStack;
+
+ SkTDArray<uint32_t> fCullOffsetStack;
+
+ /*
+ * Write the 'drawType' operation and chunk size to the skp. 'size'
+ * can potentially be increased if the chunk size needs its own storage
+ * location (i.e., it overflows 24 bits).
+ * Returns the start offset of the chunk. This is the location at which
+ * the opcode & size are stored.
+ * TODO: since we are handing the size into here we could call reserve
+ * and then return a pointer to the memory storage. This could decrease
+ * allocation overhead but could lead to more wasted space (the tail
+ * end of blocks could go unused). Possibly add a second addDraw that
+ * operates in this manner.
+ */
+ size_t addDraw(DrawType drawType, size_t* size) {
+ size_t offset = fWriter.bytesWritten();
+
+ this->predrawNotify();
+ fContentInfo.addOperation();
+
+ SkASSERT(0 != *size);
+ SkASSERT(((uint8_t) drawType) == drawType);
+
+ if (0 != (*size & ~MASK_24) || *size == MASK_24) {
+ fWriter.writeInt(PACK_8_24(drawType, MASK_24));
+ *size += 1;
+ fWriter.writeInt(SkToU32(*size));
+ } else {
+ fWriter.writeInt(PACK_8_24(drawType, SkToU32(*size)));
+ }
+
+ return offset;
+ }
+
+ void addInt(int value) {
+ fWriter.writeInt(value);
+ }
+ void addScalar(SkScalar scalar) {
+ fWriter.writeScalar(scalar);
+ }
+
+ void addImage(const SkImage*);
+ void addMatrix(const SkMatrix& matrix);
+ void addPaint(const SkPaint& paint) { this->addPaintPtr(&paint); }
+ void addPaintPtr(const SkPaint* paint);
+ void addPatch(const SkPoint cubics[12]);
+ void addPath(const SkPath& path);
+ void addPicture(const SkPicture* picture);
+ void addDrawable(SkDrawable* picture);
+ void addPoint(const SkPoint& point);
+ void addPoints(const SkPoint pts[], int count);
+ void addRect(const SkRect& rect);
+ void addRectPtr(const SkRect* rect);
+ void addIRect(const SkIRect& rect);
+ void addIRectPtr(const SkIRect* rect);
+ void addRRect(const SkRRect&);
+ void addRegion(const SkRegion& region);
+ void addText(const void* text, size_t byteLength);
+ void addTextBlob(const SkTextBlob* blob);
+
+ int find(const SkBitmap& bitmap);
+
+protected:
+ void validate(size_t initialOffset, size_t size) const {
+ SkASSERT(fWriter.bytesWritten() == initialOffset + size);
+ }
+
+ sk_sp<SkSurface> onNewSurface(const SkImageInfo&, const SkSurfaceProps&) override;
+ bool onPeekPixels(SkPixmap*) override { return false; }
+
+ void willSave() override;
+ SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec&) override;
+ void willRestore() override;
+
+ void didConcat(const SkMatrix&) override;
+ void didSetMatrix(const SkMatrix&) override;
+
+#ifdef SK_EXPERIMENTAL_SHADOWING
+ void didTranslateZ(SkScalar) override;
+#else
+ void didTranslateZ(SkScalar);
+#endif
+
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override;
+
+ void onDrawText(const void* text, size_t, SkScalar x, SkScalar y, const SkPaint&) override;
+ void onDrawPosText(const void* text, size_t, const SkPoint pos[], const SkPaint&) override;
+ void onDrawPosTextH(const void* text, size_t, const SkScalar xpos[], SkScalar constY,
+ const SkPaint&) override;
+ void onDrawTextOnPath(const void* text, size_t byteLength, const SkPath& path,
+ const SkMatrix* matrix, const SkPaint&) override;
+ void onDrawTextRSXform(const void* text, size_t byteLength, const SkRSXform xform[],
+ const SkRect* cull, const SkPaint&) override;
+ void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) override;
+
+ void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkXfermode* xmode,
+ const SkPaint& paint) override;
+ void onDrawAtlas(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[], int,
+ SkXfermode::Mode, const SkRect*, const SkPaint*) override;
+
+ void onDrawPaint(const SkPaint&) override;
+ void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&) override;
+ void onDrawRect(const SkRect&, const SkPaint&) override;
+ void onDrawRegion(const SkRegion&, const SkPaint&) override;
+ void onDrawOval(const SkRect&, const SkPaint&) override;
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override;
+ void onDrawRRect(const SkRRect&, const SkPaint&) override;
+ void onDrawPath(const SkPath&, const SkPaint&) override;
+ void onDrawImage(const SkImage*, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawImageRect(const SkImage*, const SkRect* src, const SkRect& dst,
+ const SkPaint*, SrcRectConstraint) override;
+ void onDrawImageNine(const SkImage*, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawImageLattice(const SkImage*, const SkCanvas::Lattice& lattice, const SkRect& dst,
+ const SkPaint*) override;
+
+ void onDrawVertices(VertexMode vmode, int vertexCount,
+ const SkPoint vertices[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint&) override;
+
+ void onClipRect(const SkRect&, ClipOp, ClipEdgeStyle) override;
+ void onClipRRect(const SkRRect&, ClipOp, ClipEdgeStyle) override;
+ void onClipPath(const SkPath&, ClipOp, ClipEdgeStyle) override;
+ void onClipRegion(const SkRegion&, ClipOp) override;
+
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override;
+
+#ifdef SK_EXPERIMENTAL_SHADOWING
+ void onDrawShadowedPicture(const SkPicture*, const SkMatrix*,
+ const SkPaint*, const SkShadowParams& params) override;
+#else
+ void onDrawShadowedPicture(const SkPicture*, const SkMatrix*,
+ const SkPaint*, const SkShadowParams& params);
+#endif
+
+ void onDrawDrawable(SkDrawable*, const SkMatrix*) override;
+ void onDrawAnnotation(const SkRect&, const char[], SkData*) override;
+
+ int addPathToHeap(const SkPath& path); // does not write to ops stream
+
+ // These entry points allow the writing of matrices, clips, saves &
+ // restores to be deferred (e.g., if the MC state is being collapsed and
+ // only written out as needed).
+ void recordConcat(const SkMatrix& matrix);
+ void recordTranslate(const SkMatrix& matrix);
+ void recordScale(const SkMatrix& matrix);
+ size_t recordClipRect(const SkRect& rect, SkCanvas::ClipOp op, bool doAA);
+ size_t recordClipRRect(const SkRRect& rrect, SkCanvas::ClipOp op, bool doAA);
+ size_t recordClipPath(int pathID, SkCanvas::ClipOp op, bool doAA);
+ size_t recordClipRegion(const SkRegion& region, SkCanvas::ClipOp op);
+ void recordSave();
+ void recordSaveLayer(const SaveLayerRec&);
+ void recordRestore(bool fillInSkips = true);
+
+ // SHOULD NEVER BE CALLED
+ void onDrawBitmap(const SkBitmap&, SkScalar left, SkScalar top, const SkPaint*) override {
+ sk_throw();
+ }
+ void onDrawBitmapRect(const SkBitmap&, const SkRect* src, const SkRect& dst, const SkPaint*,
+ SrcRectConstraint) override {
+ sk_throw();
+ }
+ void onDrawBitmapNine(const SkBitmap&, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override {
+ sk_throw();
+ }
+ void onDrawBitmapLattice(const SkBitmap&, const SkCanvas::Lattice& lattice, const SkRect& dst,
+ const SkPaint*) override {
+ sk_throw();
+ }
+
+private:
+ SkPictureContentInfo fContentInfo;
+
+ SkTArray<SkPaint> fPaints;
+
+ struct PathHash {
+ uint32_t operator()(const SkPath& p) { return p.getGenerationID(); }
+ };
+ SkTHashMap<SkPath, int, PathHash> fPaths;
+
+ SkWriter32 fWriter;
+
+ // we ref each item in these arrays
+ SkTDArray<const SkImage*> fImageRefs;
+ SkTDArray<const SkPicture*> fPictureRefs;
+ SkTDArray<SkDrawable*> fDrawableRefs;
+ SkTDArray<const SkTextBlob*> fTextBlobRefs;
+
+ uint32_t fRecordFlags;
+ int fInitialSaveCount;
+
+ friend class SkPictureData; // for SkPictureData's SkPictureRecord-based constructor
+
+ typedef SkCanvas INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPictureRecorder.cpp b/gfx/skia/skia/src/core/SkPictureRecorder.cpp
new file mode 100644
index 000000000..5631a081d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureRecorder.cpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBigPicture.h"
+#include "SkData.h"
+#include "SkDrawable.h"
+#include "SkPictureRecorder.h"
+#include "SkPictureUtils.h"
+#include "SkRecord.h"
+#include "SkRecordDraw.h"
+#include "SkRecordOpts.h"
+#include "SkRecordedDrawable.h"
+#include "SkRecorder.h"
+#include "SkTypes.h"
+
+SkPictureRecorder::SkPictureRecorder() {
+ fActivelyRecording = false;
+ fRecorder.reset(new SkRecorder(nullptr, SkRect::MakeWH(0, 0), &fMiniRecorder));
+}
+
+SkPictureRecorder::~SkPictureRecorder() {}
+
+SkCanvas* SkPictureRecorder::beginRecording(const SkRect& cullRect,
+ SkBBHFactory* bbhFactory /* = nullptr */,
+ uint32_t recordFlags /* = 0 */) {
+ fCullRect = cullRect;
+ fFlags = recordFlags;
+
+ if (bbhFactory) {
+ fBBH.reset((*bbhFactory)(cullRect));
+ SkASSERT(fBBH.get());
+ }
+
+ if (!fRecord) {
+ fRecord.reset(new SkRecord);
+ }
+ SkRecorder::DrawPictureMode dpm = (recordFlags & kPlaybackDrawPicture_RecordFlag)
+ ? SkRecorder::Playback_DrawPictureMode
+ : SkRecorder::Record_DrawPictureMode;
+ fRecorder->reset(fRecord.get(), cullRect, dpm, &fMiniRecorder);
+ fActivelyRecording = true;
+ return this->getRecordingCanvas();
+}
+
+SkCanvas* SkPictureRecorder::getRecordingCanvas() {
+ return fActivelyRecording ? fRecorder.get() : nullptr;
+}
+
+sk_sp<SkPicture> SkPictureRecorder::finishRecordingAsPicture(uint32_t finishFlags) {
+ fActivelyRecording = false;
+ fRecorder->restoreToCount(1); // If we were missing any restores, add them now.
+
+ if (fRecord->count() == 0) {
+ if (finishFlags & kReturnNullForEmpty_FinishFlag) {
+ return nullptr;
+ }
+ return fMiniRecorder.detachAsPicture(fCullRect);
+ }
+
+ // TODO: delay as much of this work until just before first playback?
+ SkRecordOptimize(fRecord);
+
+ if (fRecord->count() == 0) {
+ if (finishFlags & kReturnNullForEmpty_FinishFlag) {
+ return nullptr;
+ }
+ }
+
+ SkDrawableList* drawableList = fRecorder->getDrawableList();
+ SkBigPicture::SnapshotArray* pictList =
+ drawableList ? drawableList->newDrawableSnapshot() : nullptr;
+
+ if (fBBH.get()) {
+ SkAutoTMalloc<SkRect> bounds(fRecord->count());
+ SkRecordFillBounds(fCullRect, *fRecord, bounds);
+ fBBH->insert(bounds, fRecord->count());
+
+ // Now that we've calculated content bounds, we can update fCullRect, often trimming it.
+ // TODO: get updated fCullRect from bounds instead of forcing the BBH to return it?
+ SkRect bbhBound = fBBH->getRootBound();
+ SkASSERT((bbhBound.isEmpty() || fCullRect.contains(bbhBound))
+ || (bbhBound.isEmpty() && fCullRect.isEmpty()));
+ fCullRect = bbhBound;
+ }
+
+ size_t subPictureBytes = fRecorder->approxBytesUsedBySubPictures();
+ for (int i = 0; pictList && i < pictList->count(); i++) {
+ subPictureBytes += SkPictureUtils::ApproximateBytesUsed(pictList->begin()[i]);
+ }
+ return sk_make_sp<SkBigPicture>(fCullRect, fRecord.release(), pictList, fBBH.release(),
+ subPictureBytes);
+}
+
+sk_sp<SkPicture> SkPictureRecorder::finishRecordingAsPictureWithCull(const SkRect& cullRect,
+ uint32_t finishFlags) {
+ fCullRect = cullRect;
+ return this->finishRecordingAsPicture(finishFlags);
+}
+
+
+void SkPictureRecorder::partialReplay(SkCanvas* canvas) const {
+ if (nullptr == canvas) {
+ return;
+ }
+
+ int drawableCount = 0;
+ SkDrawable* const* drawables = nullptr;
+ SkDrawableList* drawableList = fRecorder->getDrawableList();
+ if (drawableList) {
+ drawableCount = drawableList->count();
+ drawables = drawableList->begin();
+ }
+ SkRecordDraw(*fRecord, canvas, nullptr, drawables, drawableCount, nullptr/*bbh*/, nullptr/*callback*/);
+}
+
+sk_sp<SkDrawable> SkPictureRecorder::finishRecordingAsDrawable(uint32_t finishFlags) {
+ fActivelyRecording = false;
+ fRecorder->flushMiniRecorder();
+ fRecorder->restoreToCount(1); // If we were missing any restores, add them now.
+
+ SkRecordOptimize(fRecord);
+
+ if (fRecord->count() == 0) {
+ if (finishFlags & kReturnNullForEmpty_FinishFlag) {
+ return nullptr;
+ }
+ }
+
+ if (fBBH.get()) {
+ SkAutoTMalloc<SkRect> bounds(fRecord->count());
+ SkRecordFillBounds(fCullRect, *fRecord, bounds);
+ fBBH->insert(bounds, fRecord->count());
+ }
+
+ sk_sp<SkDrawable> drawable =
+ sk_make_sp<SkRecordedDrawable>(fRecord, fBBH, fRecorder->detachDrawableList(), fCullRect);
+
+ // release our refs now, so only the drawable will be the owner.
+ fRecord.reset(nullptr);
+ fBBH.reset(nullptr);
+
+ return drawable;
+}
diff --git a/gfx/skia/skia/src/core/SkPictureShader.cpp b/gfx/skia/skia/src/core/SkPictureShader.cpp
new file mode 100644
index 000000000..3349558b7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureShader.cpp
@@ -0,0 +1,335 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPictureShader.h"
+
+#include "SkBitmap.h"
+#include "SkBitmapProcShader.h"
+#include "SkCanvas.h"
+#include "SkImage.h"
+#include "SkMatrixUtils.h"
+#include "SkPicture.h"
+#include "SkReadBuffer.h"
+#include "SkResourceCache.h"
+
+#if SK_SUPPORT_GPU
+#include "GrContext.h"
+#include "GrCaps.h"
+#endif
+
+namespace {
+static unsigned gBitmapSkaderKeyNamespaceLabel;
+
+struct BitmapShaderKey : public SkResourceCache::Key {
+public:
+ BitmapShaderKey(uint32_t pictureID,
+ const SkRect& tile,
+ SkShader::TileMode tmx,
+ SkShader::TileMode tmy,
+ const SkSize& scale,
+ const SkMatrix& localMatrix)
+ : fPictureID(pictureID)
+ , fTile(tile)
+ , fTmx(tmx)
+ , fTmy(tmy)
+ , fScale(scale) {
+
+ for (int i = 0; i < 9; ++i) {
+ fLocalMatrixStorage[i] = localMatrix[i];
+ }
+
+ static const size_t keySize = sizeof(fPictureID) +
+ sizeof(fTile) +
+ sizeof(fTmx) + sizeof(fTmy) +
+ sizeof(fScale) +
+ sizeof(fLocalMatrixStorage);
+ // This better be packed.
+ SkASSERT(sizeof(uint32_t) * (&fEndOfStruct - &fPictureID) == keySize);
+ this->init(&gBitmapSkaderKeyNamespaceLabel, 0, keySize);
+ }
+
+private:
+ uint32_t fPictureID;
+ SkRect fTile;
+ SkShader::TileMode fTmx, fTmy;
+ SkSize fScale;
+ SkScalar fLocalMatrixStorage[9];
+
+ SkDEBUGCODE(uint32_t fEndOfStruct;)
+};
+
+struct BitmapShaderRec : public SkResourceCache::Rec {
+ BitmapShaderRec(const BitmapShaderKey& key, SkShader* tileShader, size_t bitmapBytes)
+ : fKey(key)
+ , fShader(SkRef(tileShader))
+ , fBitmapBytes(bitmapBytes) {}
+
+ BitmapShaderKey fKey;
+ SkAutoTUnref<SkShader> fShader;
+ size_t fBitmapBytes;
+
+ const Key& getKey() const override { return fKey; }
+ size_t bytesUsed() const override {
+ return sizeof(fKey) + sizeof(SkShader) + fBitmapBytes;
+ }
+ const char* getCategory() const override { return "bitmap-shader"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override { return nullptr; }
+
+ static bool Visitor(const SkResourceCache::Rec& baseRec, void* contextShader) {
+ const BitmapShaderRec& rec = static_cast<const BitmapShaderRec&>(baseRec);
+ SkAutoTUnref<SkShader>* result = reinterpret_cast<SkAutoTUnref<SkShader>*>(contextShader);
+
+ result->reset(SkRef(rec.fShader.get()));
+
+ // The bitmap shader is backed by an image generator, thus it can always re-generate its
+ // pixels if discarded.
+ return true;
+ }
+};
+
+} // namespace
+
+SkPictureShader::SkPictureShader(sk_sp<SkPicture> picture, TileMode tmx, TileMode tmy,
+ const SkMatrix* localMatrix, const SkRect* tile)
+ : INHERITED(localMatrix)
+ , fPicture(std::move(picture))
+ , fTile(tile ? *tile : fPicture->cullRect())
+ , fTmx(tmx)
+ , fTmy(tmy) {
+}
+
+sk_sp<SkShader> SkPictureShader::Make(sk_sp<SkPicture> picture, TileMode tmx, TileMode tmy,
+ const SkMatrix* localMatrix, const SkRect* tile) {
+ if (!picture || picture->cullRect().isEmpty() || (tile && tile->isEmpty())) {
+ return SkShader::MakeEmptyShader();
+ }
+ return sk_sp<SkShader>(new SkPictureShader(std::move(picture), tmx, tmy, localMatrix, tile));
+}
+
+sk_sp<SkFlattenable> SkPictureShader::CreateProc(SkReadBuffer& buffer) {
+ SkMatrix lm;
+ buffer.readMatrix(&lm);
+ TileMode mx = (TileMode)buffer.read32();
+ TileMode my = (TileMode)buffer.read32();
+ SkRect tile;
+ buffer.readRect(&tile);
+
+ sk_sp<SkPicture> picture;
+
+ if (buffer.isCrossProcess() && SkPicture::PictureIOSecurityPrecautionsEnabled()) {
+ if (buffer.isVersionLT(SkReadBuffer::kPictureShaderHasPictureBool_Version)) {
+ // Older code blindly serialized pictures. We don't trust them.
+ buffer.validate(false);
+ return nullptr;
+ }
+ // Newer code won't serialize pictures in disallow-cross-process-picture mode.
+ // Assert that they didn't serialize anything except a false here.
+ buffer.validate(!buffer.readBool());
+ } else {
+ // Old code always serialized the picture. New code writes a 'true' first if it did.
+ if (buffer.isVersionLT(SkReadBuffer::kPictureShaderHasPictureBool_Version) ||
+ buffer.readBool()) {
+ picture = SkPicture::MakeFromBuffer(buffer);
+ }
+ }
+ return SkPictureShader::Make(picture, mx, my, &lm, &tile);
+}
+
+void SkPictureShader::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeMatrix(this->getLocalMatrix());
+ buffer.write32(fTmx);
+ buffer.write32(fTmy);
+ buffer.writeRect(fTile);
+
+ // The deserialization code won't trust that our serialized picture is safe to deserialize.
+ // So write a 'false' telling it that we're not serializing a picture.
+ if (buffer.isCrossProcess() && SkPicture::PictureIOSecurityPrecautionsEnabled()) {
+ buffer.writeBool(false);
+ } else {
+ buffer.writeBool(true);
+ fPicture->flatten(buffer);
+ }
+}
+
+sk_sp<SkShader> SkPictureShader::refBitmapShader(const SkMatrix& viewMatrix, const SkMatrix* localM,
+ const int maxTextureSize) const {
+ SkASSERT(fPicture && !fPicture->cullRect().isEmpty());
+
+ SkMatrix m;
+ m.setConcat(viewMatrix, this->getLocalMatrix());
+ if (localM) {
+ m.preConcat(*localM);
+ }
+
+ // Use a rotation-invariant scale
+ SkPoint scale;
+ //
+ // TODO: replace this with decomposeScale() -- but beware LayoutTest rebaselines!
+ //
+ if (!SkDecomposeUpper2x2(m, nullptr, &scale, nullptr)) {
+ // Decomposition failed, use an approximation.
+ scale.set(SkScalarSqrt(m.getScaleX() * m.getScaleX() + m.getSkewX() * m.getSkewX()),
+ SkScalarSqrt(m.getScaleY() * m.getScaleY() + m.getSkewY() * m.getSkewY()));
+ }
+ SkSize scaledSize = SkSize::Make(SkScalarAbs(scale.x() * fTile.width()),
+ SkScalarAbs(scale.y() * fTile.height()));
+
+ // Clamp the tile size to about 4M pixels
+ static const SkScalar kMaxTileArea = 2048 * 2048;
+ SkScalar tileArea = SkScalarMul(scaledSize.width(), scaledSize.height());
+ if (tileArea > kMaxTileArea) {
+ SkScalar clampScale = SkScalarSqrt(kMaxTileArea / tileArea);
+ scaledSize.set(SkScalarMul(scaledSize.width(), clampScale),
+ SkScalarMul(scaledSize.height(), clampScale));
+ }
+#if SK_SUPPORT_GPU
+ // Scale down the tile size if larger than maxTextureSize for GPU Path or it should fail on create texture
+ if (maxTextureSize) {
+ if (scaledSize.width() > maxTextureSize || scaledSize.height() > maxTextureSize) {
+ SkScalar downScale = maxTextureSize / SkMaxScalar(scaledSize.width(), scaledSize.height());
+ scaledSize.set(SkScalarFloorToScalar(SkScalarMul(scaledSize.width(), downScale)),
+ SkScalarFloorToScalar(SkScalarMul(scaledSize.height(), downScale)));
+ }
+ }
+#endif
+
+#ifdef SK_SUPPORT_LEGACY_PICTURESHADER_ROUNDING
+ const SkISize tileSize = scaledSize.toRound();
+#else
+ const SkISize tileSize = scaledSize.toCeil();
+#endif
+ if (tileSize.isEmpty()) {
+ return SkShader::MakeEmptyShader();
+ }
+
+ // The actual scale, compensating for rounding & clamping.
+ const SkSize tileScale = SkSize::Make(SkIntToScalar(tileSize.width()) / fTile.width(),
+ SkIntToScalar(tileSize.height()) / fTile.height());
+
+ sk_sp<SkShader> tileShader;
+ BitmapShaderKey key(fPicture->uniqueID(),
+ fTile,
+ fTmx,
+ fTmy,
+ tileScale,
+ this->getLocalMatrix());
+
+ if (!SkResourceCache::Find(key, BitmapShaderRec::Visitor, &tileShader)) {
+ SkMatrix tileMatrix;
+ tileMatrix.setRectToRect(fTile, SkRect::MakeIWH(tileSize.width(), tileSize.height()),
+ SkMatrix::kFill_ScaleToFit);
+
+ sk_sp<SkImage> tileImage(
+ SkImage::MakeFromPicture(fPicture, tileSize, &tileMatrix, nullptr));
+ if (!tileImage) {
+ return nullptr;
+ }
+
+ SkMatrix shaderMatrix = this->getLocalMatrix();
+ shaderMatrix.preScale(1 / tileScale.width(), 1 / tileScale.height());
+ tileShader = tileImage->makeShader(fTmx, fTmy, &shaderMatrix);
+
+ const SkImageInfo tileInfo = SkImageInfo::MakeN32Premul(tileSize);
+ SkResourceCache::Add(new BitmapShaderRec(key, tileShader.get(),
+ tileInfo.getSafeSize(tileInfo.minRowBytes())));
+ }
+
+ return tileShader;
+}
+
+size_t SkPictureShader::onContextSize(const ContextRec&) const {
+ return sizeof(PictureShaderContext);
+}
+
+SkShader::Context* SkPictureShader::onCreateContext(const ContextRec& rec, void* storage) const {
+ sk_sp<SkShader> bitmapShader(this->refBitmapShader(*rec.fMatrix, rec.fLocalMatrix));
+ if (!bitmapShader) {
+ return nullptr;
+ }
+ return PictureShaderContext::Create(storage, *this, rec, bitmapShader);
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////
+
+SkShader::Context* SkPictureShader::PictureShaderContext::Create(void* storage,
+ const SkPictureShader& shader, const ContextRec& rec,
+ sk_sp<SkShader> bitmapShader) {
+ PictureShaderContext* ctx = new (storage) PictureShaderContext(shader, rec,
+ std::move(bitmapShader));
+ if (nullptr == ctx->fBitmapShaderContext) {
+ ctx->~PictureShaderContext();
+ ctx = nullptr;
+ }
+ return ctx;
+}
+
+SkPictureShader::PictureShaderContext::PictureShaderContext(
+ const SkPictureShader& shader, const ContextRec& rec, sk_sp<SkShader> bitmapShader)
+ : INHERITED(shader, rec)
+ , fBitmapShader(std::move(bitmapShader))
+{
+ fBitmapShaderContextStorage = sk_malloc_throw(fBitmapShader->contextSize(rec));
+ fBitmapShaderContext = fBitmapShader->createContext(rec, fBitmapShaderContextStorage);
+ //if fBitmapShaderContext is null, we are invalid
+}
+
+SkPictureShader::PictureShaderContext::~PictureShaderContext() {
+ if (fBitmapShaderContext) {
+ fBitmapShaderContext->~Context();
+ }
+ sk_free(fBitmapShaderContextStorage);
+}
+
+uint32_t SkPictureShader::PictureShaderContext::getFlags() const {
+ SkASSERT(fBitmapShaderContext);
+ return fBitmapShaderContext->getFlags();
+}
+
+SkShader::Context::ShadeProc SkPictureShader::PictureShaderContext::asAShadeProc(void** ctx) {
+ SkASSERT(fBitmapShaderContext);
+ return fBitmapShaderContext->asAShadeProc(ctx);
+}
+
+void SkPictureShader::PictureShaderContext::shadeSpan(int x, int y, SkPMColor dstC[], int count) {
+ SkASSERT(fBitmapShaderContext);
+ fBitmapShaderContext->shadeSpan(x, y, dstC, count);
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkPictureShader::toString(SkString* str) const {
+ static const char* gTileModeName[SkShader::kTileModeCount] = {
+ "clamp", "repeat", "mirror"
+ };
+
+ str->appendf("PictureShader: [%f:%f:%f:%f] ",
+ fPicture->cullRect().fLeft,
+ fPicture->cullRect().fTop,
+ fPicture->cullRect().fRight,
+ fPicture->cullRect().fBottom);
+
+ str->appendf("(%s, %s)", gTileModeName[fTmx], gTileModeName[fTmy]);
+
+ this->INHERITED::toString(str);
+}
+#endif
+
+#if SK_SUPPORT_GPU
+sk_sp<GrFragmentProcessor> SkPictureShader::asFragmentProcessor(const AsFPArgs& args) const {
+ int maxTextureSize = 0;
+ if (args.fContext) {
+ maxTextureSize = args.fContext->caps()->maxTextureSize();
+ }
+ sk_sp<SkShader> bitmapShader(this->refBitmapShader(*args.fViewMatrix, args.fLocalMatrix,
+ maxTextureSize));
+ if (!bitmapShader) {
+ return nullptr;
+ }
+ return bitmapShader->asFragmentProcessor(SkShader::AsFPArgs(
+ args.fContext, args.fViewMatrix, nullptr, args.fFilterQuality, args.fDstColorSpace,
+ args.fGammaTreatment));
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkPictureShader.h b/gfx/skia/skia/src/core/SkPictureShader.h
new file mode 100644
index 000000000..f2927a032
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureShader.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPictureShader_DEFINED
+#define SkPictureShader_DEFINED
+
+#include "SkShader.h"
+
+class SkBitmap;
+class SkPicture;
+
+/*
+ * An SkPictureShader can be used to draw SkPicture-based patterns.
+ *
+ * The SkPicture is first rendered into a tile, which is then used to shade the area according
+ * to specified tiling rules.
+ */
+class SkPictureShader : public SkShader {
+public:
+ static sk_sp<SkShader> Make(sk_sp<SkPicture>, TileMode, TileMode, const SkMatrix*,
+ const SkRect*);
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkPictureShader)
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(const AsFPArgs&) const override;
+#endif
+
+protected:
+ SkPictureShader(SkReadBuffer&);
+ void flatten(SkWriteBuffer&) const override;
+ size_t onContextSize(const ContextRec&) const override;
+ Context* onCreateContext(const ContextRec&, void* storage) const override;
+
+private:
+ SkPictureShader(sk_sp<SkPicture>, TileMode, TileMode, const SkMatrix*, const SkRect*);
+
+ sk_sp<SkShader> refBitmapShader(const SkMatrix&, const SkMatrix* localMatrix,
+ const int maxTextureSize = 0) const;
+
+ sk_sp<SkPicture> fPicture;
+ SkRect fTile;
+ TileMode fTmx, fTmy;
+
+ class PictureShaderContext : public SkShader::Context {
+ public:
+ static Context* Create(void* storage, const SkPictureShader&, const ContextRec&,
+ sk_sp<SkShader> bitmapShader);
+
+ virtual ~PictureShaderContext();
+
+ uint32_t getFlags() const override;
+
+ ShadeProc asAShadeProc(void** ctx) override;
+ void shadeSpan(int x, int y, SkPMColor dstC[], int count) override;
+
+ private:
+ PictureShaderContext(const SkPictureShader&, const ContextRec&,
+ sk_sp<SkShader> bitmapShader);
+
+ sk_sp<SkShader> fBitmapShader;
+ SkShader::Context* fBitmapShaderContext;
+ void* fBitmapShaderContextStorage;
+
+ typedef SkShader::Context INHERITED;
+ };
+
+ typedef SkShader INHERITED;
+};
+
+#endif // SkPictureShader_DEFINED
diff --git a/gfx/skia/skia/src/core/SkPipe.h b/gfx/skia/skia/src/core/SkPipe.h
new file mode 100644
index 000000000..04c3ae2dc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPipe.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPipe_DEFINED
+#define SkPipe_DEFINED
+
+#include "SkData.h"
+
+class SkCanvas;
+class SkImage;
+class SkPicture;
+class SkTypefaceSerializer;
+class SkTypefaceDeserializer;
+class SkWStream;
+
+class SkPipeSerializer {
+public:
+ SkPipeSerializer();
+ ~SkPipeSerializer();
+
+ // Ownership is not transferred, so caller must ceep the serializer alive
+ void setTypefaceSerializer(SkTypefaceSerializer*);
+
+ void resetCache();
+
+ sk_sp<SkData> writeImage(SkImage*);
+ sk_sp<SkData> writePicture(SkPicture*);
+
+ void writeImage(SkImage*, SkWStream*);
+ void writePicture(SkPicture*, SkWStream*);
+
+ SkCanvas* beginWrite(const SkRect& cullBounds, SkWStream*);
+ void endWrite();
+
+private:
+ class Impl;
+ std::unique_ptr<Impl> fImpl;
+};
+
+class SkPipeDeserializer {
+public:
+ SkPipeDeserializer();
+ ~SkPipeDeserializer();
+
+ // Ownership is not transferred, so caller must ceep the deserializer alive
+ void setTypefaceDeserializer(SkTypefaceDeserializer*);
+
+ sk_sp<SkImage> readImage(const SkData* data) {
+ if (!data) {
+ return nullptr;
+ }
+ return this->readImage(data->data(), data->size());
+ }
+
+ sk_sp<SkPicture> readPicture(const SkData* data) {
+ if (!data) {
+ return nullptr;
+ }
+ return this->readPicture(data->data(), data->size());
+ }
+
+ sk_sp<SkImage> readImage(const void*, size_t);
+ sk_sp<SkPicture> readPicture(const void*, size_t);
+
+ bool playback(const void*, size_t, SkCanvas*);
+
+private:
+ class Impl;
+ std::unique_ptr<Impl> fImpl;
+};
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkTypefaceSerializer {
+public:
+ virtual ~SkTypefaceSerializer() {}
+
+ virtual sk_sp<SkData> serialize(SkTypeface*) = 0;
+};
+
+class SkTypefaceDeserializer {
+public:
+ virtual ~SkTypefaceDeserializer() {}
+
+ virtual sk_sp<SkTypeface> deserialize(const void* data, size_t size) = 0;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPixelRef.cpp b/gfx/skia/skia/src/core/SkPixelRef.cpp
new file mode 100644
index 000000000..cdc318b2a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPixelRef.cpp
@@ -0,0 +1,348 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapCache.h"
+#include "SkMutex.h"
+#include "SkPixelRef.h"
+#include "SkTraceEvent.h"
+
+//#define SK_SUPPORT_LEGACY_UNBALANCED_PIXELREF_LOCKCOUNT
+//#define SK_TRACE_PIXELREF_LIFETIME
+
+#include "SkNextID.h"
+
+uint32_t SkNextID::ImageID() {
+ static uint32_t gID = 0;
+ uint32_t id;
+ // Loop in case our global wraps around, as we never want to return a 0.
+ do {
+ id = sk_atomic_fetch_add(&gID, 2u) + 2; // Never set the low bit.
+ } while (0 == id);
+ return id;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// just need a > 0 value, so pick a funny one to aid in debugging
+#define SKPIXELREF_PRELOCKED_LOCKCOUNT 123456789
+
+static SkImageInfo validate_info(const SkImageInfo& info) {
+ SkAlphaType newAlphaType = info.alphaType();
+ SkAssertResult(SkColorTypeValidateAlphaType(info.colorType(), info.alphaType(), &newAlphaType));
+ return info.makeAlphaType(newAlphaType);
+}
+
+#ifdef SK_TRACE_PIXELREF_LIFETIME
+ static int32_t gInstCounter;
+#endif
+
+SkPixelRef::SkPixelRef(const SkImageInfo& info)
+ : fInfo(validate_info(info))
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ , fStableID(SkNextID::ImageID())
+#endif
+
+{
+#ifdef SK_TRACE_PIXELREF_LIFETIME
+ SkDebugf(" pixelref %d\n", sk_atomic_inc(&gInstCounter));
+#endif
+ fRec.zero();
+ fLockCount = 0;
+ this->needsNewGenID();
+ fMutability = kMutable;
+ fPreLocked = false;
+ fAddedToCache.store(false);
+}
+
+SkPixelRef::~SkPixelRef() {
+#ifndef SK_SUPPORT_LEGACY_UNBALANCED_PIXELREF_LOCKCOUNT
+ SkASSERT(SKPIXELREF_PRELOCKED_LOCKCOUNT == fLockCount || 0 == fLockCount);
+#endif
+
+#ifdef SK_TRACE_PIXELREF_LIFETIME
+ SkDebugf("~pixelref %d\n", sk_atomic_dec(&gInstCounter) - 1);
+#endif
+ this->callGenIDChangeListeners();
+}
+
+void SkPixelRef::needsNewGenID() {
+ fTaggedGenID.store(0);
+ SkASSERT(!this->genIDIsUnique()); // This method isn't threadsafe, so the assert should be fine.
+}
+
+void SkPixelRef::cloneGenID(const SkPixelRef& that) {
+ // This is subtle. We must call that.getGenerationID() to make sure its genID isn't 0.
+ uint32_t genID = that.getGenerationID();
+
+ // Neither ID is unique any more.
+ // (These & ~1u are actually redundant. that.getGenerationID() just did it for us.)
+ this->fTaggedGenID.store(genID & ~1u);
+ that. fTaggedGenID.store(genID & ~1u);
+
+ // This method isn't threadsafe, so these asserts should be fine.
+ SkASSERT(!this->genIDIsUnique());
+ SkASSERT(!that. genIDIsUnique());
+}
+
+static void validate_pixels_ctable(const SkImageInfo& info, const SkColorTable* ctable) {
+ if (info.isEmpty()) {
+ return; // can't require ctable if the dimensions are empty
+ }
+ if (kIndex_8_SkColorType == info.colorType()) {
+ SkASSERT(ctable);
+ } else {
+ SkASSERT(nullptr == ctable);
+ }
+}
+
+void SkPixelRef::setPreLocked(void* pixels, size_t rowBytes, SkColorTable* ctable) {
+ SkASSERT(pixels);
+ validate_pixels_ctable(fInfo, ctable);
+ // only call me in your constructor, otherwise fLockCount tracking can get
+ // out of sync.
+ fRec.fPixels = pixels;
+ fRec.fColorTable = ctable;
+ fRec.fRowBytes = rowBytes;
+ fLockCount = SKPIXELREF_PRELOCKED_LOCKCOUNT;
+ fPreLocked = true;
+}
+
+// Increments fLockCount only on success
+bool SkPixelRef::lockPixelsInsideMutex() {
+ fMutex.assertHeld();
+
+ if (1 == ++fLockCount) {
+ SkASSERT(fRec.isZero());
+ if (!this->onNewLockPixels(&fRec)) {
+ fRec.zero();
+ fLockCount -= 1; // we return fLockCount unchanged if we fail.
+ return false;
+ }
+ }
+ if (fRec.fPixels) {
+ validate_pixels_ctable(fInfo, fRec.fColorTable);
+ return true;
+ }
+ // no pixels, so we failed (somehow)
+ --fLockCount;
+ return false;
+}
+
+// For historical reasons, we always inc fLockCount, even if we return false.
+// It would be nice to change this (it seems), and only inc if we actually succeed...
+bool SkPixelRef::lockPixels() {
+ SkASSERT(!fPreLocked || SKPIXELREF_PRELOCKED_LOCKCOUNT == fLockCount);
+
+ if (!fPreLocked) {
+ TRACE_EVENT_BEGIN0("skia", "SkPixelRef::lockPixelsMutex");
+ SkAutoMutexAcquire ac(fMutex);
+ TRACE_EVENT_END0("skia", "SkPixelRef::lockPixelsMutex");
+ SkDEBUGCODE(int oldCount = fLockCount;)
+ bool success = this->lockPixelsInsideMutex();
+ // lockPixelsInsideMutex only increments the count if it succeeds.
+ SkASSERT(oldCount + (int)success == fLockCount);
+
+ if (!success) {
+ // For compatibility with SkBitmap calling lockPixels, we still want to increment
+ // fLockCount even if we failed. If we updated SkBitmap we could remove this oddity.
+ fLockCount += 1;
+ return false;
+ }
+ }
+ if (fRec.fPixels) {
+ validate_pixels_ctable(fInfo, fRec.fColorTable);
+ return true;
+ }
+ return false;
+}
+
+bool SkPixelRef::lockPixels(LockRec* rec) {
+ if (this->lockPixels()) {
+ *rec = fRec;
+ return true;
+ }
+ return false;
+}
+
+void SkPixelRef::unlockPixels() {
+ SkASSERT(!fPreLocked || SKPIXELREF_PRELOCKED_LOCKCOUNT == fLockCount);
+
+ if (!fPreLocked) {
+ SkAutoMutexAcquire ac(fMutex);
+
+ SkASSERT(fLockCount > 0);
+ if (0 == --fLockCount) {
+ // don't call onUnlockPixels unless onLockPixels succeeded
+ if (fRec.fPixels) {
+ this->onUnlockPixels();
+ fRec.zero();
+ } else {
+ SkASSERT(fRec.isZero());
+ }
+ }
+ }
+}
+
+bool SkPixelRef::requestLock(const LockRequest& request, LockResult* result) {
+ SkASSERT(result);
+ if (request.fSize.isEmpty()) {
+ return false;
+ }
+ // until we support subsets, we have to check this...
+ if (request.fSize.width() != fInfo.width() || request.fSize.height() != fInfo.height()) {
+ return false;
+ }
+
+ if (fPreLocked) {
+ result->fUnlockProc = nullptr;
+ result->fUnlockContext = nullptr;
+ result->fCTable = fRec.fColorTable;
+ result->fPixels = fRec.fPixels;
+ result->fRowBytes = fRec.fRowBytes;
+ result->fSize.set(fInfo.width(), fInfo.height());
+ } else {
+ SkAutoMutexAcquire ac(fMutex);
+ if (!this->onRequestLock(request, result)) {
+ return false;
+ }
+ }
+ if (result->fPixels) {
+ validate_pixels_ctable(fInfo, result->fCTable);
+ return true;
+ }
+ return false;
+}
+
+bool SkPixelRef::lockPixelsAreWritable() const {
+ return this->onLockPixelsAreWritable();
+}
+
+bool SkPixelRef::onLockPixelsAreWritable() const {
+ return true;
+}
+
+uint32_t SkPixelRef::getGenerationID() const {
+ uint32_t id = fTaggedGenID.load();
+ if (0 == id) {
+ uint32_t next = SkNextID::ImageID() | 1u;
+ if (fTaggedGenID.compare_exchange(&id, next)) {
+ id = next; // There was no race or we won the race. fTaggedGenID is next now.
+ } else {
+ // We lost a race to set fTaggedGenID. compare_exchange() filled id with the winner.
+ }
+ // We can't quite SkASSERT(this->genIDIsUnique()). It could be non-unique
+ // if we got here via the else path (pretty unlikely, but possible).
+ }
+ return id & ~1u; // Mask off bottom unique bit.
+}
+
+void SkPixelRef::addGenIDChangeListener(GenIDChangeListener* listener) {
+ if (nullptr == listener || !this->genIDIsUnique()) {
+ // No point in tracking this if we're not going to call it.
+ delete listener;
+ return;
+ }
+ *fGenIDChangeListeners.append() = listener;
+}
+
+// we need to be called *before* the genID gets changed or zerod
+void SkPixelRef::callGenIDChangeListeners() {
+ // We don't invalidate ourselves if we think another SkPixelRef is sharing our genID.
+ if (this->genIDIsUnique()) {
+ for (int i = 0; i < fGenIDChangeListeners.count(); i++) {
+ fGenIDChangeListeners[i]->onChange();
+ }
+
+ // TODO: SkAtomic could add "old_value = atomic.xchg(new_value)" to make this clearer.
+ if (fAddedToCache.load()) {
+ SkNotifyBitmapGenIDIsStale(this->getGenerationID());
+ fAddedToCache.store(false);
+ }
+ }
+ // Listeners get at most one shot, so whether these triggered or not, blow them away.
+ fGenIDChangeListeners.deleteAll();
+}
+
+void SkPixelRef::notifyPixelsChanged() {
+#ifdef SK_DEBUG
+ if (this->isImmutable()) {
+ SkDebugf("========== notifyPixelsChanged called on immutable pixelref");
+ }
+#endif
+ this->callGenIDChangeListeners();
+ this->needsNewGenID();
+ this->onNotifyPixelsChanged();
+}
+
+void SkPixelRef::changeAlphaType(SkAlphaType at) {
+ *const_cast<SkImageInfo*>(&fInfo) = fInfo.makeAlphaType(at);
+}
+
+void SkPixelRef::setImmutable() {
+ fMutability = kImmutable;
+}
+
+void SkPixelRef::setImmutableWithID(uint32_t genID) {
+ /*
+ * We are forcing the genID to match an external value. The caller must ensure that this
+ * value does not conflict with other content.
+ *
+ * One use is to force this pixelref's id to match an SkImage's id
+ */
+ fMutability = kImmutable;
+ fTaggedGenID.store(genID);
+}
+
+void SkPixelRef::setTemporarilyImmutable() {
+ SkASSERT(fMutability != kImmutable);
+ fMutability = kTemporarilyImmutable;
+}
+
+void SkPixelRef::restoreMutability() {
+ SkASSERT(fMutability != kImmutable);
+ fMutability = kMutable;
+}
+
+bool SkPixelRef::readPixels(SkBitmap* dst, SkColorType ct, const SkIRect* subset) {
+ return this->onReadPixels(dst, ct, subset);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkPixelRef::onReadPixels(SkBitmap* dst, SkColorType, const SkIRect* subset) {
+ return false;
+}
+
+void SkPixelRef::onNotifyPixelsChanged() { }
+
+SkData* SkPixelRef::onRefEncodedData() {
+ return nullptr;
+}
+
+size_t SkPixelRef::getAllocatedSizeInBytes() const {
+ return 0;
+}
+
+static void unlock_legacy_result(void* ctx) {
+ SkPixelRef* pr = (SkPixelRef*)ctx;
+ pr->unlockPixels();
+ pr->unref(); // balancing the Ref in onRequestLoc
+}
+
+bool SkPixelRef::onRequestLock(const LockRequest& request, LockResult* result) {
+ if (!this->lockPixelsInsideMutex()) {
+ return false;
+ }
+
+ result->fUnlockProc = unlock_legacy_result;
+ result->fUnlockContext = SkRef(this); // this is balanced in our fUnlockProc
+ result->fCTable = fRec.fColorTable;
+ result->fPixels = fRec.fPixels;
+ result->fRowBytes = fRec.fRowBytes;
+ result->fSize.set(fInfo.width(), fInfo.height());
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkPixmap.cpp b/gfx/skia/skia/src/core/SkPixmap.cpp
new file mode 100644
index 000000000..108c87757
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPixmap.cpp
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkColorPriv.h"
+#include "SkConfig8888.h"
+#include "SkData.h"
+#include "SkMask.h"
+#include "SkPixmap.h"
+#include "SkUtils.h"
+#include "SkPM4f.h"
+
+void SkAutoPixmapUnlock::reset(const SkPixmap& pm, void (*unlock)(void*), void* ctx) {
+ SkASSERT(pm.addr() != nullptr);
+
+ this->unlock();
+ fPixmap = pm;
+ fUnlockProc = unlock;
+ fUnlockContext = ctx;
+ fIsLocked = true;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkPixmap::reset() {
+ fPixels = nullptr;
+ fCTable = nullptr;
+ fRowBytes = 0;
+ fInfo = SkImageInfo::MakeUnknown();
+}
+
+void SkPixmap::reset(const SkImageInfo& info, const void* addr, size_t rowBytes, SkColorTable* ct) {
+ if (addr) {
+ SkASSERT(info.validRowBytes(rowBytes));
+ }
+ fPixels = addr;
+ fCTable = ct;
+ fRowBytes = rowBytes;
+ fInfo = info;
+}
+
+bool SkPixmap::reset(const SkMask& src) {
+ if (SkMask::kA8_Format == src.fFormat) {
+ this->reset(SkImageInfo::MakeA8(src.fBounds.width(), src.fBounds.height()),
+ src.fImage, src.fRowBytes, nullptr);
+ return true;
+ }
+ this->reset();
+ return false;
+}
+
+void SkPixmap::setColorSpace(sk_sp<SkColorSpace> cs) {
+ fInfo = fInfo.makeColorSpace(std::move(cs));
+}
+
+bool SkPixmap::extractSubset(SkPixmap* result, const SkIRect& subset) const {
+ SkIRect srcRect, r;
+ srcRect.set(0, 0, this->width(), this->height());
+ if (!r.intersect(srcRect, subset)) {
+ return false; // r is empty (i.e. no intersection)
+ }
+
+ // If the upper left of the rectangle was outside the bounds of this SkBitmap, we should have
+ // exited above.
+ SkASSERT(static_cast<unsigned>(r.fLeft) < static_cast<unsigned>(this->width()));
+ SkASSERT(static_cast<unsigned>(r.fTop) < static_cast<unsigned>(this->height()));
+
+ const void* pixels = nullptr;
+ if (fPixels) {
+ const size_t bpp = fInfo.bytesPerPixel();
+ pixels = (const uint8_t*)fPixels + r.fTop * fRowBytes + r.fLeft * bpp;
+ }
+ result->reset(fInfo.makeWH(r.width(), r.height()), pixels, fRowBytes, fCTable);
+ return true;
+}
+
+bool SkPixmap::readPixels(const SkImageInfo& requestedDstInfo, void* dstPixels, size_t dstRB,
+ int x, int y) const {
+ if (kUnknown_SkColorType == requestedDstInfo.colorType()) {
+ return false;
+ }
+ if (nullptr == dstPixels || dstRB < requestedDstInfo.minRowBytes()) {
+ return false;
+ }
+ if (0 == requestedDstInfo.width() || 0 == requestedDstInfo.height()) {
+ return false;
+ }
+
+ SkIRect srcR = SkIRect::MakeXYWH(x, y, requestedDstInfo.width(), requestedDstInfo.height());
+ if (!srcR.intersect(0, 0, this->width(), this->height())) {
+ return false;
+ }
+
+ // the intersect may have shrunk info's logical size
+ const SkImageInfo dstInfo = requestedDstInfo.makeWH(srcR.width(), srcR.height());
+
+ // if x or y are negative, then we have to adjust pixels
+ if (x > 0) {
+ x = 0;
+ }
+ if (y > 0) {
+ y = 0;
+ }
+ // here x,y are either 0 or negative
+ dstPixels = ((char*)dstPixels - y * dstRB - x * dstInfo.bytesPerPixel());
+
+ const SkImageInfo srcInfo = this->info().makeWH(dstInfo.width(), dstInfo.height());
+ const void* srcPixels = this->addr(srcR.x(), srcR.y());
+ return SkPixelInfo::CopyPixels(dstInfo, dstPixels, dstRB,
+ srcInfo, srcPixels, this->rowBytes(), this->ctable());
+}
+
+static uint16_t pack_8888_to_4444(unsigned a, unsigned r, unsigned g, unsigned b) {
+ unsigned pixel = (SkA32To4444(a) << SK_A4444_SHIFT) |
+ (SkR32To4444(r) << SK_R4444_SHIFT) |
+ (SkG32To4444(g) << SK_G4444_SHIFT) |
+ (SkB32To4444(b) << SK_B4444_SHIFT);
+ return SkToU16(pixel);
+}
+
+bool SkPixmap::erase(SkColor color, const SkIRect& inArea) const {
+ if (nullptr == fPixels) {
+ return false;
+ }
+ SkIRect area;
+ if (!area.intersect(this->bounds(), inArea)) {
+ return false;
+ }
+
+ U8CPU a = SkColorGetA(color);
+ U8CPU r = SkColorGetR(color);
+ U8CPU g = SkColorGetG(color);
+ U8CPU b = SkColorGetB(color);
+
+ int height = area.height();
+ const int width = area.width();
+ const int rowBytes = this->rowBytes();
+
+ switch (this->colorType()) {
+ case kGray_8_SkColorType: {
+ if (255 != a) {
+ r = SkMulDiv255Round(r, a);
+ g = SkMulDiv255Round(g, a);
+ b = SkMulDiv255Round(b, a);
+ }
+ int gray = SkComputeLuminance(r, g, b);
+ uint8_t* p = this->writable_addr8(area.fLeft, area.fTop);
+ while (--height >= 0) {
+ memset(p, gray, width);
+ p += rowBytes;
+ }
+ break;
+ }
+ case kAlpha_8_SkColorType: {
+ uint8_t* p = this->writable_addr8(area.fLeft, area.fTop);
+ while (--height >= 0) {
+ memset(p, a, width);
+ p += rowBytes;
+ }
+ break;
+ }
+ case kARGB_4444_SkColorType:
+ case kRGB_565_SkColorType: {
+ uint16_t* p = this->writable_addr16(area.fLeft, area.fTop);
+ uint16_t v;
+
+ // make rgb premultiplied
+ if (255 != a) {
+ r = SkMulDiv255Round(r, a);
+ g = SkMulDiv255Round(g, a);
+ b = SkMulDiv255Round(b, a);
+ }
+
+ if (kARGB_4444_SkColorType == this->colorType()) {
+ v = pack_8888_to_4444(a, r, g, b);
+ } else {
+ v = SkPackRGB16(r >> (8 - SK_R16_BITS),
+ g >> (8 - SK_G16_BITS),
+ b >> (8 - SK_B16_BITS));
+ }
+ while (--height >= 0) {
+ sk_memset16(p, v, width);
+ p = (uint16_t*)((char*)p + rowBytes);
+ }
+ break;
+ }
+ case kBGRA_8888_SkColorType:
+ case kRGBA_8888_SkColorType: {
+ uint32_t* p = this->writable_addr32(area.fLeft, area.fTop);
+
+ if (255 != a && kPremul_SkAlphaType == this->alphaType()) {
+ r = SkMulDiv255Round(r, a);
+ g = SkMulDiv255Round(g, a);
+ b = SkMulDiv255Round(b, a);
+ }
+ uint32_t v = kRGBA_8888_SkColorType == this->colorType()
+ ? SkPackARGB_as_RGBA(a, r, g, b)
+ : SkPackARGB_as_BGRA(a, r, g, b);
+
+ while (--height >= 0) {
+ sk_memset32(p, v, width);
+ p = (uint32_t*)((char*)p + rowBytes);
+ }
+ break;
+ }
+ default:
+ return false; // no change, so don't call notifyPixelsChanged()
+ }
+ return true;
+}
+
+#include "SkNx.h"
+#include "SkHalf.h"
+
+bool SkPixmap::erase(const SkColor4f& origColor, const SkIRect* subset) const {
+ SkPixmap pm;
+ if (subset) {
+ if (!this->extractSubset(&pm, *subset)) {
+ return false;
+ }
+ } else {
+ pm = *this;
+ }
+
+ const SkColor4f color = origColor.pin();
+
+ if (kRGBA_F16_SkColorType != pm.colorType()) {
+ return pm.erase(color.toSkColor());
+ }
+
+ const uint64_t half4 = color.premul().toF16();
+ for (int y = 0; y < pm.height(); ++y) {
+ sk_memset64(pm.writable_addr64(0, y), half4, pm.width());
+ }
+ return true;
+}
+
+#include "SkBitmap.h"
+#include "SkCanvas.h"
+#include "SkSurface.h"
+#include "SkXfermode.h"
+
+bool SkPixmap::scalePixels(const SkPixmap& dst, SkFilterQuality quality) const {
+ // Can't do anthing with empty src or dst
+ if (this->width() <= 0 || this->height() <= 0 || dst.width() <= 0 || dst.height() <= 0) {
+ return false;
+ }
+
+ // no scaling involved?
+ if (dst.width() == this->width() && dst.height() == this->height()) {
+ return this->readPixels(dst);
+ }
+
+ SkBitmap bitmap;
+ if (!bitmap.installPixels(*this)) {
+ return false;
+ }
+ bitmap.setIsVolatile(true); // so we don't try to cache it
+
+ auto surface(SkSurface::MakeRasterDirect(dst.info(), dst.writable_addr(), dst.rowBytes()));
+ if (!surface) {
+ return false;
+ }
+
+ SkPaint paint;
+ paint.setFilterQuality(quality);
+ paint.setBlendMode(SkBlendMode::kSrc);
+ surface->getCanvas()->drawBitmapRect(bitmap, SkRect::MakeIWH(dst.width(), dst.height()),
+ &paint);
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/skia/src/core/SkPoint.cpp b/gfx/skia/skia/src/core/SkPoint.cpp
new file mode 100644
index 000000000..162c62aca
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPoint.cpp
@@ -0,0 +1,264 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkMathPriv.h"
+#include "SkPoint.h"
+
+void SkIPoint::rotateCW(SkIPoint* dst) const {
+ SkASSERT(dst);
+
+ // use a tmp in case this == dst
+ int32_t tmp = fX;
+ dst->fX = -fY;
+ dst->fY = tmp;
+}
+
+void SkIPoint::rotateCCW(SkIPoint* dst) const {
+ SkASSERT(dst);
+
+ // use a tmp in case this == dst
+ int32_t tmp = fX;
+ dst->fX = fY;
+ dst->fY = -tmp;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkPoint::setIRectFan(int l, int t, int r, int b, size_t stride) {
+ SkASSERT(stride >= sizeof(SkPoint));
+
+ ((SkPoint*)((intptr_t)this + 0 * stride))->set(SkIntToScalar(l),
+ SkIntToScalar(t));
+ ((SkPoint*)((intptr_t)this + 1 * stride))->set(SkIntToScalar(l),
+ SkIntToScalar(b));
+ ((SkPoint*)((intptr_t)this + 2 * stride))->set(SkIntToScalar(r),
+ SkIntToScalar(b));
+ ((SkPoint*)((intptr_t)this + 3 * stride))->set(SkIntToScalar(r),
+ SkIntToScalar(t));
+}
+
+void SkPoint::rotateCW(SkPoint* dst) const {
+ SkASSERT(dst);
+
+ // use a tmp in case this == dst
+ SkScalar tmp = fX;
+ dst->fX = -fY;
+ dst->fY = tmp;
+}
+
+void SkPoint::rotateCCW(SkPoint* dst) const {
+ SkASSERT(dst);
+
+ // use a tmp in case this == dst
+ SkScalar tmp = fX;
+ dst->fX = fY;
+ dst->fY = -tmp;
+}
+
+void SkPoint::scale(SkScalar scale, SkPoint* dst) const {
+ SkASSERT(dst);
+ dst->set(SkScalarMul(fX, scale), SkScalarMul(fY, scale));
+}
+
+bool SkPoint::normalize() {
+ return this->setLength(fX, fY, SK_Scalar1);
+}
+
+bool SkPoint::setNormalize(SkScalar x, SkScalar y) {
+ return this->setLength(x, y, SK_Scalar1);
+}
+
+bool SkPoint::setLength(SkScalar length) {
+ return this->setLength(fX, fY, length);
+}
+
+// Returns the square of the Euclidian distance to (dx,dy).
+static inline float getLengthSquared(float dx, float dy) {
+ return dx * dx + dy * dy;
+}
+
+// Calculates the square of the Euclidian distance to (dx,dy) and stores it in
+// *lengthSquared. Returns true if the distance is judged to be "nearly zero".
+//
+// This logic is encapsulated in a helper method to make it explicit that we
+// always perform this check in the same manner, to avoid inconsistencies
+// (see http://code.google.com/p/skia/issues/detail?id=560 ).
+static inline bool is_length_nearly_zero(float dx, float dy,
+ float *lengthSquared) {
+ *lengthSquared = getLengthSquared(dx, dy);
+ return *lengthSquared <= (SK_ScalarNearlyZero * SK_ScalarNearlyZero);
+}
+
+SkScalar SkPoint::Normalize(SkPoint* pt) {
+ float x = pt->fX;
+ float y = pt->fY;
+ float mag2;
+ if (is_length_nearly_zero(x, y, &mag2)) {
+ pt->set(0, 0);
+ return 0;
+ }
+
+ float mag, scale;
+ if (SkScalarIsFinite(mag2)) {
+ mag = sk_float_sqrt(mag2);
+ scale = 1 / mag;
+ } else {
+ // our mag2 step overflowed to infinity, so use doubles instead.
+ // much slower, but needed when x or y are very large, other wise we
+ // divide by inf. and return (0,0) vector.
+ double xx = x;
+ double yy = y;
+ double magmag = sqrt(xx * xx + yy * yy);
+ mag = (float)magmag;
+ // we perform the divide with the double magmag, to stay exactly the
+ // same as setLength. It would be faster to perform the divide with
+ // mag, but it is possible that mag has overflowed to inf. but still
+ // have a non-zero value for scale (thanks to denormalized numbers).
+ scale = (float)(1 / magmag);
+ }
+ pt->set(x * scale, y * scale);
+ return mag;
+}
+
+SkScalar SkPoint::Length(SkScalar dx, SkScalar dy) {
+ float mag2 = dx * dx + dy * dy;
+ if (SkScalarIsFinite(mag2)) {
+ return sk_float_sqrt(mag2);
+ } else {
+ double xx = dx;
+ double yy = dy;
+ return (float)sqrt(xx * xx + yy * yy);
+ }
+}
+
+/*
+ * We have to worry about 2 tricky conditions:
+ * 1. underflow of mag2 (compared against nearlyzero^2)
+ * 2. overflow of mag2 (compared w/ isfinite)
+ *
+ * If we underflow, we return false. If we overflow, we compute again using
+ * doubles, which is much slower (3x in a desktop test) but will not overflow.
+ */
+bool SkPoint::setLength(float x, float y, float length) {
+ float mag2;
+ if (is_length_nearly_zero(x, y, &mag2)) {
+ this->set(0, 0);
+ return false;
+ }
+
+ float scale;
+ if (SkScalarIsFinite(mag2)) {
+ scale = length / sk_float_sqrt(mag2);
+ } else {
+ // our mag2 step overflowed to infinity, so use doubles instead.
+ // much slower, but needed when x or y are very large, other wise we
+ // divide by inf. and return (0,0) vector.
+ double xx = x;
+ double yy = y;
+ #ifdef SK_CPU_FLUSH_TO_ZERO
+ // The iOS ARM processor discards small denormalized numbers to go faster.
+ // Casting this to a float would cause the scale to go to zero. Keeping it
+ // as a double for the multiply keeps the scale non-zero.
+ double dscale = length / sqrt(xx * xx + yy * yy);
+ fX = x * dscale;
+ fY = y * dscale;
+ return true;
+ #else
+ scale = (float)(length / sqrt(xx * xx + yy * yy));
+ #endif
+ }
+ fX = x * scale;
+ fY = y * scale;
+ return true;
+}
+
+bool SkPoint::setLengthFast(float length) {
+ return this->setLengthFast(fX, fY, length);
+}
+
+bool SkPoint::setLengthFast(float x, float y, float length) {
+ float mag2;
+ if (is_length_nearly_zero(x, y, &mag2)) {
+ this->set(0, 0);
+ return false;
+ }
+
+ float scale;
+ if (SkScalarIsFinite(mag2)) {
+ scale = length * sk_float_rsqrt(mag2); // <--- this is the difference
+ } else {
+ // our mag2 step overflowed to infinity, so use doubles instead.
+ // much slower, but needed when x or y are very large, other wise we
+ // divide by inf. and return (0,0) vector.
+ double xx = x;
+ double yy = y;
+ scale = (float)(length / sqrt(xx * xx + yy * yy));
+ }
+ fX = x * scale;
+ fY = y * scale;
+ return true;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkScalar SkPoint::distanceToLineBetweenSqd(const SkPoint& a,
+ const SkPoint& b,
+ Side* side) const {
+
+ SkVector u = b - a;
+ SkVector v = *this - a;
+
+ SkScalar uLengthSqd = u.lengthSqd();
+ SkScalar det = u.cross(v);
+ if (side) {
+ SkASSERT(-1 == SkPoint::kLeft_Side &&
+ 0 == SkPoint::kOn_Side &&
+ 1 == kRight_Side);
+ *side = (Side) SkScalarSignAsInt(det);
+ }
+ SkScalar temp = det / uLengthSqd;
+ temp *= det;
+ return temp;
+}
+
+SkScalar SkPoint::distanceToLineSegmentBetweenSqd(const SkPoint& a,
+ const SkPoint& b) const {
+ // See comments to distanceToLineBetweenSqd. If the projection of c onto
+ // u is between a and b then this returns the same result as that
+ // function. Otherwise, it returns the distance to the closer of a and
+ // b. Let the projection of v onto u be v'. There are three cases:
+ // 1. v' points opposite to u. c is not between a and b and is closer
+ // to a than b.
+ // 2. v' points along u and has magnitude less than y. c is between
+ // a and b and the distance to the segment is the same as distance
+ // to the line ab.
+ // 3. v' points along u and has greater magnitude than u. c is not
+ // not between a and b and is closer to b than a.
+ // v' = (u dot v) * u / |u|. So if (u dot v)/|u| is less than zero we're
+ // in case 1. If (u dot v)/|u| is > |u| we are in case 3. Otherwise
+ // we're in case 2. We actually compare (u dot v) to 0 and |u|^2 to
+ // avoid a sqrt to compute |u|.
+
+ SkVector u = b - a;
+ SkVector v = *this - a;
+
+ SkScalar uLengthSqd = u.lengthSqd();
+ SkScalar uDotV = SkPoint::DotProduct(u, v);
+
+ if (uDotV <= 0) {
+ return v.lengthSqd();
+ } else if (uDotV > uLengthSqd) {
+ return b.distanceToSqd(*this);
+ } else {
+ SkScalar det = u.cross(v);
+ SkScalar temp = det / uLengthSqd;
+ temp *= det;
+ return temp;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkPoint3.cpp b/gfx/skia/skia/src/core/SkPoint3.cpp
new file mode 100644
index 000000000..3b5586b06
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPoint3.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPoint3.h"
+
+// Returns the square of the Euclidian distance to (x,y,z).
+static inline float get_length_squared(float x, float y, float z) {
+ return x * x + y * y + z * z;
+}
+
+// Calculates the square of the Euclidian distance to (x,y,z) and stores it in
+// *lengthSquared. Returns true if the distance is judged to be "nearly zero".
+//
+// This logic is encapsulated in a helper method to make it explicit that we
+// always perform this check in the same manner, to avoid inconsistencies
+// (see http://code.google.com/p/skia/issues/detail?id=560 ).
+static inline bool is_length_nearly_zero(float x, float y, float z, float *lengthSquared) {
+ *lengthSquared = get_length_squared(x, y, z);
+ return *lengthSquared <= (SK_ScalarNearlyZero * SK_ScalarNearlyZero);
+}
+
+SkScalar SkPoint3::Length(SkScalar x, SkScalar y, SkScalar z) {
+ float magSq = get_length_squared(x, y, z);
+ if (SkScalarIsFinite(magSq)) {
+ return sk_float_sqrt(magSq);
+ } else {
+ double xx = x;
+ double yy = y;
+ double zz = z;
+ return (float)sqrt(xx * xx + yy * yy + zz * zz);
+ }
+}
+
+/*
+ * We have to worry about 2 tricky conditions:
+ * 1. underflow of magSq (compared against nearlyzero^2)
+ * 2. overflow of magSq (compared w/ isfinite)
+ *
+ * If we underflow, we return false. If we overflow, we compute again using
+ * doubles, which is much slower (3x in a desktop test) but will not overflow.
+ */
+bool SkPoint3::normalize() {
+ float magSq;
+ if (is_length_nearly_zero(fX, fY, fZ, &magSq)) {
+ this->set(0, 0, 0);
+ return false;
+ }
+
+ float scale;
+ if (SkScalarIsFinite(magSq)) {
+ scale = 1.0f / sk_float_sqrt(magSq);
+ } else {
+ // our magSq step overflowed to infinity, so use doubles instead.
+ // much slower, but needed when x, y or z is very large, otherwise we
+ // divide by inf. and return (0,0,0) vector.
+ double xx = fX;
+ double yy = fY;
+ double zz = fZ;
+#ifdef SK_CPU_FLUSH_TO_ZERO
+ // The iOS ARM processor discards small denormalized numbers to go faster.
+ // Casting this to a float would cause the scale to go to zero. Keeping it
+ // as a double for the multiply keeps the scale non-zero.
+ double dscale = 1.0f / sqrt(xx * xx + yy * yy + zz * zz);
+ fX = x * dscale;
+ fY = y * dscale;
+ fZ = z * dscale;
+ return true;
+#else
+ scale = (float)(1.0f / sqrt(xx * xx + yy * yy + zz * zz));
+#endif
+ }
+ fX *= scale;
+ fY *= scale;
+ fZ *= scale;
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkPtrRecorder.cpp b/gfx/skia/skia/src/core/SkPtrRecorder.cpp
new file mode 100644
index 000000000..dd73a7c29
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPtrRecorder.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkPtrRecorder.h"
+#include "SkTSearch.h"
+
+void SkPtrSet::reset() {
+ Pair* p = fList.begin();
+ Pair* stop = fList.end();
+ while (p < stop) {
+ this->decPtr(p->fPtr);
+ p += 1;
+ }
+ fList.reset();
+}
+
+bool SkPtrSet::Less(const Pair& a, const Pair& b) {
+ return (char*)a.fPtr < (char*)b.fPtr;
+}
+
+uint32_t SkPtrSet::find(void* ptr) const {
+ if (nullptr == ptr) {
+ return 0;
+ }
+
+ int count = fList.count();
+ Pair pair;
+ pair.fPtr = ptr;
+
+ int index = SkTSearch<Pair, Less>(fList.begin(), count, pair, sizeof(pair));
+ if (index < 0) {
+ return 0;
+ }
+ return fList[index].fIndex;
+}
+
+uint32_t SkPtrSet::add(void* ptr) {
+ if (nullptr == ptr) {
+ return 0;
+ }
+
+ int count = fList.count();
+ Pair pair;
+ pair.fPtr = ptr;
+
+ int index = SkTSearch<Pair, Less>(fList.begin(), count, pair, sizeof(pair));
+ if (index < 0) {
+ index = ~index; // turn it back into an index for insertion
+ this->incPtr(ptr);
+ pair.fIndex = count + 1;
+ *fList.insert(index) = pair;
+ return count + 1;
+ } else {
+ return fList[index].fIndex;
+ }
+}
+
+void SkPtrSet::copyToArray(void* array[]) const {
+ int count = fList.count();
+ if (count > 0) {
+ SkASSERT(array);
+ const Pair* p = fList.begin();
+ // p->fIndex is base-1, so we need to subtract to find its slot
+ for (int i = 0; i < count; i++) {
+ int index = p[i].fIndex - 1;
+ SkASSERT((unsigned)index < (unsigned)count);
+ array[index] = p[i].fPtr;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkPtrRecorder.h b/gfx/skia/skia/src/core/SkPtrRecorder.h
new file mode 100644
index 000000000..402278e9b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPtrRecorder.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPtrSet_DEFINED
+#define SkPtrSet_DEFINED
+
+#include "SkRefCnt.h"
+#include "SkFlattenable.h"
+#include "SkTDArray.h"
+
+/**
+ * Maintains a set of ptrs, assigning each a unique ID [1...N]. Duplicate ptrs
+ * return the same ID (since its a set). Subclasses can override inPtr()
+ * and decPtr(). incPtr() is called each time a unique ptr is added ot the
+ * set. decPtr() is called on each ptr when the set is destroyed or reset.
+ */
+class SkPtrSet : public SkRefCnt {
+public:
+
+
+ /**
+ * Search for the specified ptr in the set. If it is found, return its
+ * 32bit ID [1..N], or if not found, return 0. Always returns 0 for nullptr.
+ */
+ uint32_t find(void*) const;
+
+ /**
+ * Add the specified ptr to the set, returning a unique 32bit ID for it
+ * [1...N]. Duplicate ptrs will return the same ID.
+ *
+ * If the ptr is nullptr, it is not added, and 0 is returned.
+ */
+ uint32_t add(void*);
+
+ /**
+ * Return the number of (non-null) ptrs in the set.
+ */
+ int count() const { return fList.count(); }
+
+ /**
+ * Copy the ptrs in the set into the specified array (allocated by the
+ * caller). The ptrs are assgined to the array based on their corresponding
+ * ID. e.g. array[ptr.ID - 1] = ptr.
+ *
+ * incPtr() and decPtr() are not called during this operation.
+ */
+ void copyToArray(void* array[]) const;
+
+ /**
+ * Call decPtr() on each ptr in the set, and the reset the size of the set
+ * to 0.
+ */
+ void reset();
+
+ /**
+ * Set iterator.
+ */
+ class Iter {
+ public:
+ Iter(const SkPtrSet& set)
+ : fSet(set)
+ , fIndex(0) {}
+
+ /**
+ * Return the next ptr in the set or null if the end was reached.
+ */
+ void* next() {
+ return fIndex < fSet.fList.count() ? fSet.fList[fIndex++].fPtr : nullptr;
+ }
+
+ private:
+ const SkPtrSet& fSet;
+ int fIndex;
+ };
+
+protected:
+ virtual void incPtr(void*) {}
+ virtual void decPtr(void*) {}
+
+private:
+ struct Pair {
+ void* fPtr; // never nullptr
+ uint32_t fIndex; // 1...N
+ };
+
+ // we store the ptrs in sorted-order (using Cmp) so that we can efficiently
+ // detect duplicates when add() is called. Hence we need to store the
+ // ptr and its ID/fIndex explicitly, since the ptr's position in the array
+ // is not related to its "index".
+ SkTDArray<Pair> fList;
+
+ static bool Less(const Pair& a, const Pair& b);
+
+ typedef SkRefCnt INHERITED;
+};
+
+/**
+ * Templated wrapper for SkPtrSet, just meant to automate typecasting
+ * parameters to and from void* (which the base class expects).
+ */
+template <typename T> class SkTPtrSet : public SkPtrSet {
+public:
+ uint32_t find(T ptr) {
+ return this->INHERITED::find((void*)ptr);
+ }
+ uint32_t add(T ptr) {
+ return this->INHERITED::add((void*)ptr);
+ }
+
+ void copyToArray(T* array) const {
+ this->INHERITED::copyToArray((void**)array);
+ }
+
+private:
+ typedef SkPtrSet INHERITED;
+};
+
+/**
+ * Subclass of SkTPtrSet specialed to call ref() and unref() when the
+ * base class's incPtr() and decPtr() are called. This makes it a valid owner
+ * of each ptr, which is released when the set is reset or destroyed.
+ */
+class SkRefCntSet : public SkTPtrSet<SkRefCnt*> {
+public:
+ virtual ~SkRefCntSet();
+
+protected:
+ // overrides
+ virtual void incPtr(void*);
+ virtual void decPtr(void*);
+};
+
+class SkFactorySet : public SkTPtrSet<SkFlattenable::Factory> {};
+
+/**
+ * Similar to SkFactorySet, but only allows Factorys that have registered names.
+ * Also has a function to return the next added Factory's name.
+ */
+class SkNamedFactorySet : public SkRefCnt {
+public:
+
+
+ SkNamedFactorySet();
+
+ /**
+ * Find the specified Factory in the set. If it is not already in the set,
+ * and has registered its name, add it to the set, and return its index.
+ * If the Factory has no registered name, return 0.
+ */
+ uint32_t find(SkFlattenable::Factory);
+
+ /**
+ * If new Factorys have been added to the set, return the name of the first
+ * Factory added after the Factory name returned by the last call to this
+ * function.
+ */
+ const char* getNextAddedFactoryName();
+private:
+ int fNextAddedFactory;
+ SkFactorySet fFactorySet;
+ SkTDArray<const char*> fNames;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkQuadClipper.cpp b/gfx/skia/skia/src/core/SkQuadClipper.cpp
new file mode 100644
index 000000000..fcde929ea
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkQuadClipper.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkQuadClipper.h"
+#include "SkGeometry.h"
+
+SkQuadClipper::SkQuadClipper() {
+ fClip.setEmpty();
+}
+
+void SkQuadClipper::setClip(const SkIRect& clip) {
+ // conver to scalars, since that's where we'll see the points
+ fClip.set(clip);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool chopMonoQuadAt(SkScalar c0, SkScalar c1, SkScalar c2,
+ SkScalar target, SkScalar* t) {
+ /* Solve F(t) = y where F(t) := [0](1-t)^2 + 2[1]t(1-t) + [2]t^2
+ * We solve for t, using quadratic equation, hence we have to rearrange
+ * our cooefficents to look like At^2 + Bt + C
+ */
+ SkScalar A = c0 - c1 - c1 + c2;
+ SkScalar B = 2*(c1 - c0);
+ SkScalar C = c0 - target;
+
+ SkScalar roots[2]; // we only expect one, but make room for 2 for safety
+ int count = SkFindUnitQuadRoots(A, B, C, roots);
+ if (count) {
+ *t = roots[0];
+ return true;
+ }
+ return false;
+}
+
+static bool chopMonoQuadAtY(SkPoint pts[3], SkScalar y, SkScalar* t) {
+ return chopMonoQuadAt(pts[0].fY, pts[1].fY, pts[2].fY, y, t);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* If we somehow returned the fact that we had to flip the pts in Y, we could
+ communicate that to setQuadratic, and then avoid having to flip it back
+ here (only to have setQuadratic do the flip again)
+ */
+bool SkQuadClipper::clipQuad(const SkPoint srcPts[3], SkPoint dst[3]) {
+ bool reverse;
+
+ // we need the data to be monotonically increasing in Y
+ if (srcPts[0].fY > srcPts[2].fY) {
+ dst[0] = srcPts[2];
+ dst[1] = srcPts[1];
+ dst[2] = srcPts[0];
+ reverse = true;
+ } else {
+ memcpy(dst, srcPts, 3 * sizeof(SkPoint));
+ reverse = false;
+ }
+
+ // are we completely above or below
+ const SkScalar ctop = fClip.fTop;
+ const SkScalar cbot = fClip.fBottom;
+ if (dst[2].fY <= ctop || dst[0].fY >= cbot) {
+ return false;
+ }
+
+ SkScalar t;
+ SkPoint tmp[5]; // for SkChopQuadAt
+
+ // are we partially above
+ if (dst[0].fY < ctop) {
+ if (chopMonoQuadAtY(dst, ctop, &t)) {
+ // take the 2nd chopped quad
+ SkChopQuadAt(dst, tmp, t);
+ dst[0] = tmp[2];
+ dst[1] = tmp[3];
+ } else {
+ // if chopMonoQuadAtY failed, then we may have hit inexact numerics
+ // so we just clamp against the top
+ for (int i = 0; i < 3; i++) {
+ if (dst[i].fY < ctop) {
+ dst[i].fY = ctop;
+ }
+ }
+ }
+ }
+
+ // are we partially below
+ if (dst[2].fY > cbot) {
+ if (chopMonoQuadAtY(dst, cbot, &t)) {
+ SkChopQuadAt(dst, tmp, t);
+ dst[1] = tmp[1];
+ dst[2] = tmp[2];
+ } else {
+ // if chopMonoQuadAtY failed, then we may have hit inexact numerics
+ // so we just clamp against the bottom
+ for (int i = 0; i < 3; i++) {
+ if (dst[i].fY > cbot) {
+ dst[i].fY = cbot;
+ }
+ }
+ }
+ }
+
+ if (reverse) {
+ SkTSwap<SkPoint>(dst[0], dst[2]);
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkQuadClipper.h b/gfx/skia/skia/src/core/SkQuadClipper.h
new file mode 100644
index 000000000..1dd8576b0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkQuadClipper.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkQuadClipper_DEFINED
+#define SkQuadClipper_DEFINED
+
+#include "SkPath.h"
+
+/** This class is initialized with a clip rectangle, and then can be fed quads,
+ which must already be monotonic in Y.
+
+ In the future, it might return a series of segments, allowing it to clip
+ also in X, to ensure that all segments fit in a finite coordinate system.
+ */
+class SkQuadClipper {
+public:
+ SkQuadClipper();
+
+ void setClip(const SkIRect& clip);
+
+ bool clipQuad(const SkPoint src[3], SkPoint dst[3]);
+
+private:
+ SkRect fClip;
+};
+
+/** Iterator that returns the clipped segements of a quad clipped to a rect.
+ The segments will be either lines or quads (based on SkPath::Verb), and
+ will all be monotonic in Y
+ */
+class SkQuadClipper2 {
+public:
+ bool clipQuad(const SkPoint pts[3], const SkRect& clip);
+ bool clipCubic(const SkPoint pts[4], const SkRect& clip);
+
+ SkPath::Verb next(SkPoint pts[]);
+
+private:
+ SkPoint* fCurrPoint;
+ SkPath::Verb* fCurrVerb;
+
+ enum {
+ kMaxVerbs = 13,
+ kMaxPoints = 32
+ };
+ SkPoint fPoints[kMaxPoints];
+ SkPath::Verb fVerbs[kMaxVerbs];
+
+ void clipMonoQuad(const SkPoint srcPts[3], const SkRect& clip);
+ void clipMonoCubic(const SkPoint srcPts[4], const SkRect& clip);
+ void appendVLine(SkScalar x, SkScalar y0, SkScalar y1, bool reverse);
+ void appendQuad(const SkPoint pts[3], bool reverse);
+ void appendCubic(const SkPoint pts[4], bool reverse);
+};
+
+#ifdef SK_DEBUG
+ void sk_assert_monotonic_x(const SkPoint pts[], int count);
+ void sk_assert_monotonic_y(const SkPoint pts[], int count);
+#else
+ #define sk_assert_monotonic_x(pts, count)
+ #define sk_assert_monotonic_y(pts, count)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRRect.cpp b/gfx/skia/skia/src/core/SkRRect.cpp
new file mode 100644
index 000000000..1188989cd
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRRect.cpp
@@ -0,0 +1,585 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <cmath>
+#include "SkRRect.h"
+#include "SkMatrix.h"
+#include "SkScaleToSides.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkRRect::setRectXY(const SkRect& rect, SkScalar xRad, SkScalar yRad) {
+ fRect = rect;
+ fRect.sort();
+
+ if (fRect.isEmpty() || !fRect.isFinite()) {
+ this->setEmpty();
+ return;
+ }
+
+ if (!SkScalarsAreFinite(xRad, yRad)) {
+ xRad = yRad = 0; // devolve into a simple rect
+ }
+ if (xRad <= 0 || yRad <= 0) {
+ // all corners are square in this case
+ this->setRect(rect);
+ return;
+ }
+
+ if (fRect.width() < xRad+xRad || fRect.height() < yRad+yRad) {
+ SkScalar scale = SkMinScalar(fRect.width() / (xRad + xRad), fRect.height() / (yRad + yRad));
+ SkASSERT(scale < SK_Scalar1);
+ xRad = SkScalarMul(xRad, scale);
+ yRad = SkScalarMul(yRad, scale);
+ }
+
+ for (int i = 0; i < 4; ++i) {
+ fRadii[i].set(xRad, yRad);
+ }
+ fType = kSimple_Type;
+ if (xRad >= SkScalarHalf(fRect.width()) && yRad >= SkScalarHalf(fRect.height())) {
+ fType = kOval_Type;
+ // TODO: assert that all the x&y radii are already W/2 & H/2
+ }
+
+ SkASSERT(this->isValid());
+}
+
+void SkRRect::setNinePatch(const SkRect& rect, SkScalar leftRad, SkScalar topRad,
+ SkScalar rightRad, SkScalar bottomRad) {
+ fRect = rect;
+ fRect.sort();
+
+ if (fRect.isEmpty() || !fRect.isFinite()) {
+ this->setEmpty();
+ return;
+ }
+
+ const SkScalar array[4] = { leftRad, topRad, rightRad, bottomRad };
+ if (!SkScalarsAreFinite(array, 4)) {
+ this->setRect(rect); // devolve into a simple rect
+ return;
+ }
+
+ leftRad = SkMaxScalar(leftRad, 0);
+ topRad = SkMaxScalar(topRad, 0);
+ rightRad = SkMaxScalar(rightRad, 0);
+ bottomRad = SkMaxScalar(bottomRad, 0);
+
+ SkScalar scale = SK_Scalar1;
+ if (leftRad + rightRad > fRect.width()) {
+ scale = fRect.width() / (leftRad + rightRad);
+ }
+ if (topRad + bottomRad > fRect.height()) {
+ scale = SkMinScalar(scale, fRect.height() / (topRad + bottomRad));
+ }
+
+ if (scale < SK_Scalar1) {
+ leftRad = SkScalarMul(leftRad, scale);
+ topRad = SkScalarMul(topRad, scale);
+ rightRad = SkScalarMul(rightRad, scale);
+ bottomRad = SkScalarMul(bottomRad, scale);
+ }
+
+ if (leftRad == rightRad && topRad == bottomRad) {
+ if (leftRad >= SkScalarHalf(fRect.width()) && topRad >= SkScalarHalf(fRect.height())) {
+ fType = kOval_Type;
+ } else if (0 == leftRad || 0 == topRad) {
+ // If the left and (by equality check above) right radii are zero then it is a rect.
+ // Same goes for top/bottom.
+ fType = kRect_Type;
+ leftRad = 0;
+ topRad = 0;
+ rightRad = 0;
+ bottomRad = 0;
+ } else {
+ fType = kSimple_Type;
+ }
+ } else {
+ fType = kNinePatch_Type;
+ }
+
+ fRadii[kUpperLeft_Corner].set(leftRad, topRad);
+ fRadii[kUpperRight_Corner].set(rightRad, topRad);
+ fRadii[kLowerRight_Corner].set(rightRad, bottomRad);
+ fRadii[kLowerLeft_Corner].set(leftRad, bottomRad);
+
+ SkASSERT(this->isValid());
+}
+
+// These parameters intentionally double. Apropos crbug.com/463920, if one of the
+// radii is huge while the other is small, single precision math can completely
+// miss the fact that a scale is required.
+static double compute_min_scale(double rad1, double rad2, double limit, double curMin) {
+ if ((rad1 + rad2) > limit) {
+ return SkTMin(curMin, limit / (rad1 + rad2));
+ }
+ return curMin;
+}
+
+void SkRRect::setRectRadii(const SkRect& rect, const SkVector radii[4]) {
+ fRect = rect;
+ fRect.sort();
+
+ if (fRect.isEmpty() || !fRect.isFinite()) {
+ this->setEmpty();
+ return;
+ }
+
+ if (!SkScalarsAreFinite(&radii[0].fX, 8)) {
+ this->setRect(rect); // devolve into a simple rect
+ return;
+ }
+
+ memcpy(fRadii, radii, sizeof(fRadii));
+
+ bool allCornersSquare = true;
+
+ // Clamp negative radii to zero
+ for (int i = 0; i < 4; ++i) {
+ if (fRadii[i].fX <= 0 || fRadii[i].fY <= 0) {
+ // In this case we are being a little fast & loose. Since one of
+ // the radii is 0 the corner is square. However, the other radii
+ // could still be non-zero and play in the global scale factor
+ // computation.
+ fRadii[i].fX = 0;
+ fRadii[i].fY = 0;
+ } else {
+ allCornersSquare = false;
+ }
+ }
+
+ if (allCornersSquare) {
+ this->setRect(rect);
+ return;
+ }
+
+ this->scaleRadii();
+}
+
+void SkRRect::scaleRadii() {
+
+ // Proportionally scale down all radii to fit. Find the minimum ratio
+ // of a side and the radii on that side (for all four sides) and use
+ // that to scale down _all_ the radii. This algorithm is from the
+ // W3 spec (http://www.w3.org/TR/css3-background/) section 5.5 - Overlapping
+ // Curves:
+ // "Let f = min(Li/Si), where i is one of { top, right, bottom, left },
+ // Si is the sum of the two corresponding radii of the corners on side i,
+ // and Ltop = Lbottom = the width of the box,
+ // and Lleft = Lright = the height of the box.
+ // If f < 1, then all corner radii are reduced by multiplying them by f."
+ double scale = 1.0;
+
+ // The sides of the rectangle may be larger than a float.
+ double width = (double)fRect.fRight - (double)fRect.fLeft;
+ double height = (double)fRect.fBottom - (double)fRect.fTop;
+ scale = compute_min_scale(fRadii[0].fX, fRadii[1].fX, width, scale);
+ scale = compute_min_scale(fRadii[1].fY, fRadii[2].fY, height, scale);
+ scale = compute_min_scale(fRadii[2].fX, fRadii[3].fX, width, scale);
+ scale = compute_min_scale(fRadii[3].fY, fRadii[0].fY, height, scale);
+
+ if (scale < 1.0) {
+ SkScaleToSides::AdjustRadii(width, scale, &fRadii[0].fX, &fRadii[1].fX);
+ SkScaleToSides::AdjustRadii(height, scale, &fRadii[1].fY, &fRadii[2].fY);
+ SkScaleToSides::AdjustRadii(width, scale, &fRadii[2].fX, &fRadii[3].fX);
+ SkScaleToSides::AdjustRadii(height, scale, &fRadii[3].fY, &fRadii[0].fY);
+ }
+
+ // At this point we're either oval, simple, or complex (not empty or rect).
+ this->computeType();
+
+ SkASSERT(this->isValid());
+}
+
+// This method determines if a point known to be inside the RRect's bounds is
+// inside all the corners.
+bool SkRRect::checkCornerContainment(SkScalar x, SkScalar y) const {
+ SkPoint canonicalPt; // (x,y) translated to one of the quadrants
+ int index;
+
+ if (kOval_Type == this->type()) {
+ canonicalPt.set(x - fRect.centerX(), y - fRect.centerY());
+ index = kUpperLeft_Corner; // any corner will do in this case
+ } else {
+ if (x < fRect.fLeft + fRadii[kUpperLeft_Corner].fX &&
+ y < fRect.fTop + fRadii[kUpperLeft_Corner].fY) {
+ // UL corner
+ index = kUpperLeft_Corner;
+ canonicalPt.set(x - (fRect.fLeft + fRadii[kUpperLeft_Corner].fX),
+ y - (fRect.fTop + fRadii[kUpperLeft_Corner].fY));
+ SkASSERT(canonicalPt.fX < 0 && canonicalPt.fY < 0);
+ } else if (x < fRect.fLeft + fRadii[kLowerLeft_Corner].fX &&
+ y > fRect.fBottom - fRadii[kLowerLeft_Corner].fY) {
+ // LL corner
+ index = kLowerLeft_Corner;
+ canonicalPt.set(x - (fRect.fLeft + fRadii[kLowerLeft_Corner].fX),
+ y - (fRect.fBottom - fRadii[kLowerLeft_Corner].fY));
+ SkASSERT(canonicalPt.fX < 0 && canonicalPt.fY > 0);
+ } else if (x > fRect.fRight - fRadii[kUpperRight_Corner].fX &&
+ y < fRect.fTop + fRadii[kUpperRight_Corner].fY) {
+ // UR corner
+ index = kUpperRight_Corner;
+ canonicalPt.set(x - (fRect.fRight - fRadii[kUpperRight_Corner].fX),
+ y - (fRect.fTop + fRadii[kUpperRight_Corner].fY));
+ SkASSERT(canonicalPt.fX > 0 && canonicalPt.fY < 0);
+ } else if (x > fRect.fRight - fRadii[kLowerRight_Corner].fX &&
+ y > fRect.fBottom - fRadii[kLowerRight_Corner].fY) {
+ // LR corner
+ index = kLowerRight_Corner;
+ canonicalPt.set(x - (fRect.fRight - fRadii[kLowerRight_Corner].fX),
+ y - (fRect.fBottom - fRadii[kLowerRight_Corner].fY));
+ SkASSERT(canonicalPt.fX > 0 && canonicalPt.fY > 0);
+ } else {
+ // not in any of the corners
+ return true;
+ }
+ }
+
+ // A point is in an ellipse (in standard position) if:
+ // x^2 y^2
+ // ----- + ----- <= 1
+ // a^2 b^2
+ // or :
+ // b^2*x^2 + a^2*y^2 <= (ab)^2
+ SkScalar dist = SkScalarMul(SkScalarSquare(canonicalPt.fX), SkScalarSquare(fRadii[index].fY)) +
+ SkScalarMul(SkScalarSquare(canonicalPt.fY), SkScalarSquare(fRadii[index].fX));
+ return dist <= SkScalarSquare(SkScalarMul(fRadii[index].fX, fRadii[index].fY));
+}
+
+bool SkRRect::allCornersCircular() const {
+ return fRadii[0].fX == fRadii[0].fY &&
+ fRadii[1].fX == fRadii[1].fY &&
+ fRadii[2].fX == fRadii[2].fY &&
+ fRadii[3].fX == fRadii[3].fY;
+}
+
+bool SkRRect::contains(const SkRect& rect) const {
+ if (!this->getBounds().contains(rect)) {
+ // If 'rect' isn't contained by the RR's bounds then the
+ // RR definitely doesn't contain it
+ return false;
+ }
+
+ if (this->isRect()) {
+ // the prior test was sufficient
+ return true;
+ }
+
+ // At this point we know all four corners of 'rect' are inside the
+ // bounds of of this RR. Check to make sure all the corners are inside
+ // all the curves
+ return this->checkCornerContainment(rect.fLeft, rect.fTop) &&
+ this->checkCornerContainment(rect.fRight, rect.fTop) &&
+ this->checkCornerContainment(rect.fRight, rect.fBottom) &&
+ this->checkCornerContainment(rect.fLeft, rect.fBottom);
+}
+
+static bool radii_are_nine_patch(const SkVector radii[4]) {
+ return radii[SkRRect::kUpperLeft_Corner].fX == radii[SkRRect::kLowerLeft_Corner].fX &&
+ radii[SkRRect::kUpperLeft_Corner].fY == radii[SkRRect::kUpperRight_Corner].fY &&
+ radii[SkRRect::kUpperRight_Corner].fX == radii[SkRRect::kLowerRight_Corner].fX &&
+ radii[SkRRect::kLowerLeft_Corner].fY == radii[SkRRect::kLowerRight_Corner].fY;
+}
+
+// There is a simplified version of this method in setRectXY
+void SkRRect::computeType() {
+ struct Validator {
+ Validator(const SkRRect* r) : fR(r) {}
+ ~Validator() { SkASSERT(fR->isValid()); }
+ const SkRRect* fR;
+ } autoValidate(this);
+
+ if (fRect.isEmpty()) {
+ fType = kEmpty_Type;
+ return;
+ }
+
+ bool allRadiiEqual = true; // are all x radii equal and all y radii?
+ bool allCornersSquare = 0 == fRadii[0].fX || 0 == fRadii[0].fY;
+
+ for (int i = 1; i < 4; ++i) {
+ if (0 != fRadii[i].fX && 0 != fRadii[i].fY) {
+ // if either radius is zero the corner is square so both have to
+ // be non-zero to have a rounded corner
+ allCornersSquare = false;
+ }
+ if (fRadii[i].fX != fRadii[i-1].fX || fRadii[i].fY != fRadii[i-1].fY) {
+ allRadiiEqual = false;
+ }
+ }
+
+ if (allCornersSquare) {
+ fType = kRect_Type;
+ return;
+ }
+
+ if (allRadiiEqual) {
+ if (fRadii[0].fX >= SkScalarHalf(fRect.width()) &&
+ fRadii[0].fY >= SkScalarHalf(fRect.height())) {
+ fType = kOval_Type;
+ } else {
+ fType = kSimple_Type;
+ }
+ return;
+ }
+
+ if (radii_are_nine_patch(fRadii)) {
+ fType = kNinePatch_Type;
+ } else {
+ fType = kComplex_Type;
+ }
+}
+
+static bool matrix_only_scale_and_translate(const SkMatrix& matrix) {
+ const SkMatrix::TypeMask m = (SkMatrix::TypeMask) (SkMatrix::kAffine_Mask
+ | SkMatrix::kPerspective_Mask);
+ return (matrix.getType() & m) == 0;
+}
+
+bool SkRRect::transform(const SkMatrix& matrix, SkRRect* dst) const {
+ if (nullptr == dst) {
+ return false;
+ }
+
+ // Assert that the caller is not trying to do this in place, which
+ // would violate const-ness. Do not return false though, so that
+ // if they know what they're doing and want to violate it they can.
+ SkASSERT(dst != this);
+
+ if (matrix.isIdentity()) {
+ *dst = *this;
+ return true;
+ }
+
+ // If transform supported 90 degree rotations (which it could), we could
+ // use SkMatrix::rectStaysRect() to check for a valid transformation.
+ if (!matrix_only_scale_and_translate(matrix)) {
+ return false;
+ }
+
+ SkRect newRect;
+ if (!matrix.mapRect(&newRect, fRect)) {
+ return false;
+ }
+
+ // The matrix may have scaled us to zero (or due to float madness, we now have collapsed
+ // some dimension of the rect, so we need to check for that.
+ if (newRect.isEmpty()) {
+ dst->setEmpty();
+ return true;
+ }
+
+ // At this point, this is guaranteed to succeed, so we can modify dst.
+ dst->fRect = newRect;
+
+ // Since the only transforms that were allowed are scale and translate, the type
+ // remains unchanged.
+ dst->fType = fType;
+
+ if (kOval_Type == fType) {
+ for (int i = 0; i < 4; ++i) {
+ dst->fRadii[i].fX = SkScalarHalf(newRect.width());
+ dst->fRadii[i].fY = SkScalarHalf(newRect.height());
+ }
+ SkASSERT(dst->isValid());
+ return true;
+ }
+
+ // Now scale each corner
+ SkScalar xScale = matrix.getScaleX();
+ const bool flipX = xScale < 0;
+ if (flipX) {
+ xScale = -xScale;
+ }
+ SkScalar yScale = matrix.getScaleY();
+ const bool flipY = yScale < 0;
+ if (flipY) {
+ yScale = -yScale;
+ }
+
+ // Scale the radii without respecting the flip.
+ for (int i = 0; i < 4; ++i) {
+ dst->fRadii[i].fX = SkScalarMul(fRadii[i].fX, xScale);
+ dst->fRadii[i].fY = SkScalarMul(fRadii[i].fY, yScale);
+ }
+
+ // Now swap as necessary.
+ if (flipX) {
+ if (flipY) {
+ // Swap with opposite corners
+ SkTSwap(dst->fRadii[kUpperLeft_Corner], dst->fRadii[kLowerRight_Corner]);
+ SkTSwap(dst->fRadii[kUpperRight_Corner], dst->fRadii[kLowerLeft_Corner]);
+ } else {
+ // Only swap in x
+ SkTSwap(dst->fRadii[kUpperRight_Corner], dst->fRadii[kUpperLeft_Corner]);
+ SkTSwap(dst->fRadii[kLowerRight_Corner], dst->fRadii[kLowerLeft_Corner]);
+ }
+ } else if (flipY) {
+ // Only swap in y
+ SkTSwap(dst->fRadii[kUpperLeft_Corner], dst->fRadii[kLowerLeft_Corner]);
+ SkTSwap(dst->fRadii[kUpperRight_Corner], dst->fRadii[kLowerRight_Corner]);
+ }
+
+ dst->scaleRadii();
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkRRect::inset(SkScalar dx, SkScalar dy, SkRRect* dst) const {
+ const SkRect r = fRect.makeInset(dx, dy);
+
+ if (r.isEmpty()) {
+ dst->setEmpty();
+ return;
+ }
+
+ SkVector radii[4];
+ memcpy(radii, fRadii, sizeof(radii));
+ for (int i = 0; i < 4; ++i) {
+ if (radii[i].fX) {
+ radii[i].fX -= dx;
+ }
+ if (radii[i].fY) {
+ radii[i].fY -= dy;
+ }
+ }
+ dst->setRectRadii(r, radii);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+size_t SkRRect::writeToMemory(void* buffer) const {
+ SkASSERT(kSizeInMemory == sizeof(SkRect) + sizeof(fRadii));
+
+ memcpy(buffer, &fRect, sizeof(SkRect));
+ memcpy((char*)buffer + sizeof(SkRect), fRadii, sizeof(fRadii));
+ return kSizeInMemory;
+}
+
+size_t SkRRect::readFromMemory(const void* buffer, size_t length) {
+ if (length < kSizeInMemory) {
+ return 0;
+ }
+
+ SkScalar storage[12];
+ SkASSERT(sizeof(storage) == kSizeInMemory);
+
+ // we make a local copy, to ensure alignment before we cast
+ memcpy(storage, buffer, kSizeInMemory);
+
+ this->setRectRadii(*(const SkRect*)&storage[0],
+ (const SkVector*)&storage[4]);
+ return kSizeInMemory;
+}
+
+#include "SkString.h"
+#include "SkStringUtils.h"
+
+void SkRRect::dump(bool asHex) const {
+ SkScalarAsStringType asType = asHex ? kHex_SkScalarAsStringType : kDec_SkScalarAsStringType;
+
+ fRect.dump(asHex);
+ SkString line("const SkPoint corners[] = {\n");
+ for (int i = 0; i < 4; ++i) {
+ SkString strX, strY;
+ SkAppendScalar(&strX, fRadii[i].x(), asType);
+ SkAppendScalar(&strY, fRadii[i].y(), asType);
+ line.appendf(" { %s, %s },", strX.c_str(), strY.c_str());
+ if (asHex) {
+ line.appendf(" /* %f %f */", fRadii[i].x(), fRadii[i].y());
+ }
+ line.append("\n");
+ }
+ line.append("};");
+ SkDebugf("%s\n", line.c_str());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * We need all combinations of predicates to be true to have a "safe" radius value.
+ */
+static bool are_radius_check_predicates_valid(SkScalar rad, SkScalar min, SkScalar max) {
+ return (min <= max) && (rad <= max - min) && (min + rad <= max) && (max - rad >= min);
+}
+
+bool SkRRect::isValid() const {
+ bool allRadiiZero = (0 == fRadii[0].fX && 0 == fRadii[0].fY);
+ bool allCornersSquare = (0 == fRadii[0].fX || 0 == fRadii[0].fY);
+ bool allRadiiSame = true;
+
+ for (int i = 1; i < 4; ++i) {
+ if (0 != fRadii[i].fX || 0 != fRadii[i].fY) {
+ allRadiiZero = false;
+ }
+
+ if (fRadii[i].fX != fRadii[i-1].fX || fRadii[i].fY != fRadii[i-1].fY) {
+ allRadiiSame = false;
+ }
+
+ if (0 != fRadii[i].fX && 0 != fRadii[i].fY) {
+ allCornersSquare = false;
+ }
+ }
+ bool patchesOfNine = radii_are_nine_patch(fRadii);
+
+ switch (fType) {
+ case kEmpty_Type:
+ if (!fRect.isEmpty() || !allRadiiZero || !allRadiiSame || !allCornersSquare) {
+ return false;
+ }
+ break;
+ case kRect_Type:
+ if (fRect.isEmpty() || !allRadiiZero || !allRadiiSame || !allCornersSquare) {
+ return false;
+ }
+ break;
+ case kOval_Type:
+ if (fRect.isEmpty() || allRadiiZero || !allRadiiSame || allCornersSquare) {
+ return false;
+ }
+
+ for (int i = 0; i < 4; ++i) {
+ if (!SkScalarNearlyEqual(fRadii[i].fX, SkScalarHalf(fRect.width())) ||
+ !SkScalarNearlyEqual(fRadii[i].fY, SkScalarHalf(fRect.height()))) {
+ return false;
+ }
+ }
+ break;
+ case kSimple_Type:
+ if (fRect.isEmpty() || allRadiiZero || !allRadiiSame || allCornersSquare) {
+ return false;
+ }
+ break;
+ case kNinePatch_Type:
+ if (fRect.isEmpty() || allRadiiZero || allRadiiSame || allCornersSquare ||
+ !patchesOfNine) {
+ return false;
+ }
+ break;
+ case kComplex_Type:
+ if (fRect.isEmpty() || allRadiiZero || allRadiiSame || allCornersSquare ||
+ patchesOfNine) {
+ return false;
+ }
+ break;
+ }
+
+ for (int i = 0; i < 4; ++i) {
+ if (!are_radius_check_predicates_valid(fRadii[i].fX, fRect.fLeft, fRect.fRight) ||
+ !are_radius_check_predicates_valid(fRadii[i].fY, fRect.fTop, fRect.fBottom)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/skia/src/core/SkRTree.cpp b/gfx/skia/skia/src/core/SkRTree.cpp
new file mode 100644
index 000000000..bae2fdce3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRTree.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkRTree.h"
+
+SkRTree::SkRTree(SkScalar aspectRatio) : fCount(0), fAspectRatio(aspectRatio) {}
+
+SkRect SkRTree::getRootBound() const {
+ if (fCount) {
+ return fRoot.fBounds;
+ } else {
+ return SkRect::MakeEmpty();
+ }
+}
+
+void SkRTree::insert(const SkRect boundsArray[], int N) {
+ SkASSERT(0 == fCount);
+
+ SkTDArray<Branch> branches;
+ branches.setReserve(N);
+
+ for (int i = 0; i < N; i++) {
+ const SkRect& bounds = boundsArray[i];
+ if (bounds.isEmpty()) {
+ continue;
+ }
+
+ Branch* b = branches.push();
+ b->fBounds = bounds;
+ b->fOpIndex = i;
+ }
+
+ fCount = branches.count();
+ if (fCount) {
+ if (1 == fCount) {
+ fNodes.setReserve(1);
+ Node* n = this->allocateNodeAtLevel(0);
+ n->fNumChildren = 1;
+ n->fChildren[0] = branches[0];
+ fRoot.fSubtree = n;
+ fRoot.fBounds = branches[0].fBounds;
+ } else {
+ fNodes.setReserve(CountNodes(fCount, fAspectRatio));
+ fRoot = this->bulkLoad(&branches);
+ }
+ }
+}
+
+SkRTree::Node* SkRTree::allocateNodeAtLevel(uint16_t level) {
+ SkDEBUGCODE(Node* p = fNodes.begin());
+ Node* out = fNodes.push();
+ SkASSERT(fNodes.begin() == p); // If this fails, we didn't setReserve() enough.
+ out->fNumChildren = 0;
+ out->fLevel = level;
+ return out;
+}
+
+// This function parallels bulkLoad, but just counts how many nodes bulkLoad would allocate.
+int SkRTree::CountNodes(int branches, SkScalar aspectRatio) {
+ if (branches == 1) {
+ return 1;
+ }
+ int numBranches = branches / kMaxChildren;
+ int remainder = branches % kMaxChildren;
+ if (remainder > 0) {
+ numBranches++;
+ if (remainder >= kMinChildren) {
+ remainder = 0;
+ } else {
+ remainder = kMinChildren - remainder;
+ }
+ }
+ int numStrips = SkScalarCeilToInt(SkScalarSqrt(SkIntToScalar(numBranches) / aspectRatio));
+ int numTiles = SkScalarCeilToInt(SkIntToScalar(numBranches) / SkIntToScalar(numStrips));
+ int currentBranch = 0;
+ int nodes = 0;
+ for (int i = 0; i < numStrips; ++i) {
+ for (int j = 0; j < numTiles && currentBranch < branches; ++j) {
+ int incrementBy = kMaxChildren;
+ if (remainder != 0) {
+ if (remainder <= kMaxChildren - kMinChildren) {
+ incrementBy -= remainder;
+ remainder = 0;
+ } else {
+ incrementBy = kMinChildren;
+ remainder -= kMaxChildren - kMinChildren;
+ }
+ }
+ nodes++;
+ currentBranch++;
+ for (int k = 1; k < incrementBy && currentBranch < branches; ++k) {
+ currentBranch++;
+ }
+ }
+ }
+ return nodes + CountNodes(nodes, aspectRatio);
+}
+
+SkRTree::Branch SkRTree::bulkLoad(SkTDArray<Branch>* branches, int level) {
+ if (branches->count() == 1) { // Only one branch. It will be the root.
+ return (*branches)[0];
+ }
+
+ // We might sort our branches here, but we expect Blink gives us a reasonable x,y order.
+ // Skipping a call to sort (in Y) here resulted in a 17% win for recording with negligible
+ // difference in playback speed.
+ int numBranches = branches->count() / kMaxChildren;
+ int remainder = branches->count() % kMaxChildren;
+ int newBranches = 0;
+
+ if (remainder > 0) {
+ ++numBranches;
+ // If the remainder isn't enough to fill a node, we'll add fewer nodes to other branches.
+ if (remainder >= kMinChildren) {
+ remainder = 0;
+ } else {
+ remainder = kMinChildren - remainder;
+ }
+ }
+
+ int numStrips = SkScalarCeilToInt(SkScalarSqrt(SkIntToScalar(numBranches) / fAspectRatio));
+ int numTiles = SkScalarCeilToInt(SkIntToScalar(numBranches) / SkIntToScalar(numStrips));
+ int currentBranch = 0;
+
+ for (int i = 0; i < numStrips; ++i) {
+ // Might be worth sorting by X here too.
+ for (int j = 0; j < numTiles && currentBranch < branches->count(); ++j) {
+ int incrementBy = kMaxChildren;
+ if (remainder != 0) {
+ // if need be, omit some nodes to make up for remainder
+ if (remainder <= kMaxChildren - kMinChildren) {
+ incrementBy -= remainder;
+ remainder = 0;
+ } else {
+ incrementBy = kMinChildren;
+ remainder -= kMaxChildren - kMinChildren;
+ }
+ }
+ Node* n = allocateNodeAtLevel(level);
+ n->fNumChildren = 1;
+ n->fChildren[0] = (*branches)[currentBranch];
+ Branch b;
+ b.fBounds = (*branches)[currentBranch].fBounds;
+ b.fSubtree = n;
+ ++currentBranch;
+ for (int k = 1; k < incrementBy && currentBranch < branches->count(); ++k) {
+ b.fBounds.join((*branches)[currentBranch].fBounds);
+ n->fChildren[k] = (*branches)[currentBranch];
+ ++n->fNumChildren;
+ ++currentBranch;
+ }
+ (*branches)[newBranches] = b;
+ ++newBranches;
+ }
+ }
+ branches->setCount(newBranches);
+ return this->bulkLoad(branches, level + 1);
+}
+
+void SkRTree::search(const SkRect& query, SkTDArray<int>* results) const {
+ if (fCount > 0 && SkRect::Intersects(fRoot.fBounds, query)) {
+ this->search(fRoot.fSubtree, query, results);
+ }
+}
+
+void SkRTree::search(Node* node, const SkRect& query, SkTDArray<int>* results) const {
+ for (int i = 0; i < node->fNumChildren; ++i) {
+ if (SkRect::Intersects(node->fChildren[i].fBounds, query)) {
+ if (0 == node->fLevel) {
+ results->push(node->fChildren[i].fOpIndex);
+ } else {
+ this->search(node->fChildren[i].fSubtree, query, results);
+ }
+ }
+ }
+}
+
+size_t SkRTree::bytesUsed() const {
+ size_t byteCount = sizeof(SkRTree);
+
+ byteCount += fNodes.reserved() * sizeof(Node);
+
+ return byteCount;
+}
diff --git a/gfx/skia/skia/src/core/SkRTree.h b/gfx/skia/skia/src/core/SkRTree.h
new file mode 100644
index 000000000..499f7a5d7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRTree.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRTree_DEFINED
+#define SkRTree_DEFINED
+
+#include "SkBBoxHierarchy.h"
+#include "SkRect.h"
+#include "SkTDArray.h"
+
+/**
+ * An R-Tree implementation. In short, it is a balanced n-ary tree containing a hierarchy of
+ * bounding rectangles.
+ *
+ * It only supports bulk-loading, i.e. creation from a batch of bounding rectangles.
+ * This performs a bottom-up bulk load using the STR (sort-tile-recursive) algorithm.
+ *
+ * TODO: Experiment with other bulk-load algorithms (in particular the Hilbert pack variant,
+ * which groups rects by position on the Hilbert curve, is probably worth a look). There also
+ * exist top-down bulk load variants (VAMSplit, TopDownGreedy, etc).
+ *
+ * For more details see:
+ *
+ * Beckmann, N.; Kriegel, H. P.; Schneider, R.; Seeger, B. (1990). "The R*-tree:
+ * an efficient and robust access method for points and rectangles"
+ */
+class SkRTree : public SkBBoxHierarchy {
+public:
+
+
+ /**
+ * If you have some prior information about the distribution of bounds you're expecting, you
+ * can provide an optional aspect ratio parameter. This allows the bulk-load algorithm to
+ * create better proportioned tiles of rectangles.
+ */
+ explicit SkRTree(SkScalar aspectRatio = 1);
+ virtual ~SkRTree() {}
+
+ void insert(const SkRect[], int N) override;
+ void search(const SkRect& query, SkTDArray<int>* results) const override;
+ size_t bytesUsed() const override;
+
+ // Methods and constants below here are only public for tests.
+
+ // Return the depth of the tree structure.
+ int getDepth() const { return fCount ? fRoot.fSubtree->fLevel + 1 : 0; }
+ // Insertion count (not overall node count, which may be greater).
+ int getCount() const { return fCount; }
+
+ // Get the root bound.
+ SkRect getRootBound() const override;
+
+ // These values were empirically determined to produce reasonable performance in most cases.
+ static const int kMinChildren = 6,
+ kMaxChildren = 11;
+
+private:
+ struct Node;
+
+ struct Branch {
+ union {
+ Node* fSubtree;
+ int fOpIndex;
+ };
+ SkRect fBounds;
+ };
+
+ struct Node {
+ uint16_t fNumChildren;
+ uint16_t fLevel;
+ Branch fChildren[kMaxChildren];
+ };
+
+ void search(Node* root, const SkRect& query, SkTDArray<int>* results) const;
+
+ // Consumes the input array.
+ Branch bulkLoad(SkTDArray<Branch>* branches, int level = 0);
+
+ // How many times will bulkLoad() call allocateNodeAtLevel()?
+ static int CountNodes(int branches, SkScalar aspectRatio);
+
+ Node* allocateNodeAtLevel(uint16_t level);
+
+ // This is the count of data elements (rather than total nodes in the tree)
+ int fCount;
+ SkScalar fAspectRatio;
+ Branch fRoot;
+ SkTDArray<Node> fNodes;
+
+ typedef SkBBoxHierarchy INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRWBuffer.cpp b/gfx/skia/skia/src/core/SkRWBuffer.cpp
new file mode 100644
index 000000000..e69070b5f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRWBuffer.cpp
@@ -0,0 +1,360 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAtomics.h"
+#include "SkRWBuffer.h"
+#include "SkStream.h"
+
+// Force small chunks to be a page's worth
+static const size_t kMinAllocSize = 4096;
+
+struct SkBufferBlock {
+ SkBufferBlock* fNext; // updated by the writer
+ size_t fUsed; // updated by the writer
+ const size_t fCapacity;
+
+ SkBufferBlock(size_t capacity) : fNext(nullptr), fUsed(0), fCapacity(capacity) {}
+
+ const void* startData() const { return this + 1; }
+
+ size_t avail() const { return fCapacity - fUsed; }
+ void* availData() { return (char*)this->startData() + fUsed; }
+
+ static SkBufferBlock* Alloc(size_t length) {
+ size_t capacity = LengthToCapacity(length);
+ void* buffer = sk_malloc_throw(sizeof(SkBufferBlock) + capacity);
+ return new (buffer) SkBufferBlock(capacity);
+ }
+
+ // Return number of bytes actually appended. Important that we always completely this block
+ // before spilling into the next, since the reader uses fCapacity to know how many it can read.
+ //
+ size_t append(const void* src, size_t length) {
+ this->validate();
+ size_t amount = SkTMin(this->avail(), length);
+ memcpy(this->availData(), src, amount);
+ fUsed += amount;
+ this->validate();
+ return amount;
+ }
+
+ // Do not call in the reader thread, since the writer may be updating fUsed.
+ // (The assertion is still true, but TSAN still may complain about its raciness.)
+ void validate() const {
+#ifdef SK_DEBUG
+ SkASSERT(fCapacity > 0);
+ SkASSERT(fUsed <= fCapacity);
+#endif
+ }
+
+private:
+ static size_t LengthToCapacity(size_t length) {
+ const size_t minSize = kMinAllocSize - sizeof(SkBufferBlock);
+ return SkTMax(length, minSize);
+ }
+};
+
+struct SkBufferHead {
+ mutable int32_t fRefCnt;
+ SkBufferBlock fBlock;
+
+ SkBufferHead(size_t capacity) : fRefCnt(1), fBlock(capacity) {}
+
+ static size_t LengthToCapacity(size_t length) {
+ const size_t minSize = kMinAllocSize - sizeof(SkBufferHead);
+ return SkTMax(length, minSize);
+ }
+
+ static SkBufferHead* Alloc(size_t length) {
+ size_t capacity = LengthToCapacity(length);
+ size_t size = sizeof(SkBufferHead) + capacity;
+ void* buffer = sk_malloc_throw(size);
+ return new (buffer) SkBufferHead(capacity);
+ }
+
+ void ref() const {
+ SkASSERT(fRefCnt > 0);
+ sk_atomic_inc(&fRefCnt);
+ }
+
+ void unref() const {
+ SkASSERT(fRefCnt > 0);
+ // A release here acts in place of all releases we "should" have been doing in ref().
+ if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) {
+ // Like unique(), the acquire is only needed on success.
+ SkBufferBlock* block = fBlock.fNext;
+ sk_free((void*)this);
+ while (block) {
+ SkBufferBlock* next = block->fNext;
+ sk_free(block);
+ block = next;
+ }
+ }
+ }
+
+ void validate(size_t minUsed, const SkBufferBlock* tail = nullptr) const {
+#ifdef SK_DEBUG
+ SkASSERT(fRefCnt > 0);
+ size_t totalUsed = 0;
+ const SkBufferBlock* block = &fBlock;
+ const SkBufferBlock* lastBlock = block;
+ while (block) {
+ block->validate();
+ totalUsed += block->fUsed;
+ lastBlock = block;
+ block = block->fNext;
+ }
+ SkASSERT(minUsed <= totalUsed);
+ if (tail) {
+ SkASSERT(tail == lastBlock);
+ }
+#endif
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// The reader can only access block.fCapacity (which never changes), and cannot access
+// block.fUsed, which may be updated by the writer.
+//
+SkROBuffer::SkROBuffer(const SkBufferHead* head, size_t available, const SkBufferBlock* tail)
+ : fHead(head), fAvailable(available), fTail(tail)
+{
+ if (head) {
+ fHead->ref();
+ SkASSERT(available > 0);
+ head->validate(available, tail);
+ } else {
+ SkASSERT(0 == available);
+ SkASSERT(!tail);
+ }
+}
+
+SkROBuffer::~SkROBuffer() {
+ if (fHead) {
+ fHead->unref();
+ }
+}
+
+SkROBuffer::Iter::Iter(const SkROBuffer* buffer) {
+ this->reset(buffer);
+}
+
+void SkROBuffer::Iter::reset(const SkROBuffer* buffer) {
+ fBuffer = buffer;
+ if (buffer && buffer->fHead) {
+ fBlock = &buffer->fHead->fBlock;
+ fRemaining = buffer->fAvailable;
+ } else {
+ fBlock = nullptr;
+ fRemaining = 0;
+ }
+}
+
+const void* SkROBuffer::Iter::data() const {
+ return fRemaining ? fBlock->startData() : nullptr;
+}
+
+size_t SkROBuffer::Iter::size() const {
+ if (!fBlock) {
+ return 0;
+ }
+ return SkTMin(fBlock->fCapacity, fRemaining);
+}
+
+bool SkROBuffer::Iter::next() {
+ if (fRemaining) {
+ fRemaining -= this->size();
+ if (fBuffer->fTail == fBlock) {
+ // There are more blocks, but fBuffer does not know about them.
+ SkASSERT(0 == fRemaining);
+ fBlock = nullptr;
+ } else {
+ fBlock = fBlock->fNext;
+ }
+ }
+ return fRemaining != 0;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkRWBuffer::SkRWBuffer(size_t initialCapacity) : fHead(nullptr), fTail(nullptr), fTotalUsed(0) {
+ if (initialCapacity) {
+ fHead = SkBufferHead::Alloc(initialCapacity);
+ fTail = &fHead->fBlock;
+ }
+}
+
+SkRWBuffer::~SkRWBuffer() {
+ this->validate();
+ if (fHead) {
+ fHead->unref();
+ }
+}
+
+// It is important that we always completely fill the current block before spilling over to the
+// next, since our reader will be using fCapacity (min'd against its total available) to know how
+// many bytes to read from a given block.
+//
+void SkRWBuffer::append(const void* src, size_t length, size_t reserve) {
+ this->validate();
+ if (0 == length) {
+ return;
+ }
+
+ fTotalUsed += length;
+
+ if (nullptr == fHead) {
+ fHead = SkBufferHead::Alloc(length + reserve);
+ fTail = &fHead->fBlock;
+ }
+
+ size_t written = fTail->append(src, length);
+ SkASSERT(written <= length);
+ src = (const char*)src + written;
+ length -= written;
+
+ if (length) {
+ SkBufferBlock* block = SkBufferBlock::Alloc(length + reserve);
+ fTail->fNext = block;
+ fTail = block;
+ written = fTail->append(src, length);
+ SkASSERT(written == length);
+ }
+ this->validate();
+}
+
+#ifdef SK_DEBUG
+void SkRWBuffer::validate() const {
+ if (fHead) {
+ fHead->validate(fTotalUsed, fTail);
+ } else {
+ SkASSERT(nullptr == fTail);
+ SkASSERT(0 == fTotalUsed);
+ }
+}
+#endif
+
+SkROBuffer* SkRWBuffer::newRBufferSnapshot() const {
+ return new SkROBuffer(fHead, fTotalUsed, fTail);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkROBufferStreamAsset : public SkStreamAsset {
+ void validate() const {
+#ifdef SK_DEBUG
+ SkASSERT(fGlobalOffset <= fBuffer->size());
+ SkASSERT(fLocalOffset <= fIter.size());
+ SkASSERT(fLocalOffset <= fGlobalOffset);
+#endif
+ }
+
+#ifdef SK_DEBUG
+ class AutoValidate {
+ SkROBufferStreamAsset* fStream;
+ public:
+ AutoValidate(SkROBufferStreamAsset* stream) : fStream(stream) { stream->validate(); }
+ ~AutoValidate() { fStream->validate(); }
+ };
+ #define AUTO_VALIDATE AutoValidate av(this);
+#else
+ #define AUTO_VALIDATE
+#endif
+
+public:
+ SkROBufferStreamAsset(const SkROBuffer* buffer) : fBuffer(SkRef(buffer)), fIter(buffer) {
+ fGlobalOffset = fLocalOffset = 0;
+ }
+
+ virtual ~SkROBufferStreamAsset() { fBuffer->unref(); }
+
+ size_t getLength() const override { return fBuffer->size(); }
+
+ bool rewind() override {
+ AUTO_VALIDATE
+ fIter.reset(fBuffer);
+ fGlobalOffset = fLocalOffset = 0;
+ return true;
+ }
+
+ size_t read(void* dst, size_t request) override {
+ AUTO_VALIDATE
+ size_t bytesRead = 0;
+ for (;;) {
+ size_t size = fIter.size();
+ SkASSERT(fLocalOffset <= size);
+ size_t avail = SkTMin(size - fLocalOffset, request - bytesRead);
+ if (dst) {
+ memcpy(dst, (const char*)fIter.data() + fLocalOffset, avail);
+ dst = (char*)dst + avail;
+ }
+ bytesRead += avail;
+ fLocalOffset += avail;
+ SkASSERT(bytesRead <= request);
+ if (bytesRead == request) {
+ break;
+ }
+ // If we get here, we've exhausted the current iter
+ SkASSERT(fLocalOffset == size);
+ fLocalOffset = 0;
+ if (!fIter.next()) {
+ break; // ran out of data
+ }
+ }
+ fGlobalOffset += bytesRead;
+ SkASSERT(fGlobalOffset <= fBuffer->size());
+ return bytesRead;
+ }
+
+ bool isAtEnd() const override {
+ return fBuffer->size() == fGlobalOffset;
+ }
+
+ SkStreamAsset* duplicate() const override { return new SkROBufferStreamAsset(fBuffer); }
+
+ size_t getPosition() const override {
+ return fGlobalOffset;
+ }
+
+ bool seek(size_t position) override {
+ AUTO_VALIDATE
+ if (position < fGlobalOffset) {
+ this->rewind();
+ }
+ (void)this->skip(position - fGlobalOffset);
+ return true;
+ }
+
+ bool move(long offset) override{
+ AUTO_VALIDATE
+ offset += fGlobalOffset;
+ if (offset <= 0) {
+ this->rewind();
+ } else {
+ (void)this->seek(SkToSizeT(offset));
+ }
+ return true;
+ }
+
+ SkStreamAsset* fork() const override {
+ SkStreamAsset* clone = this->duplicate();
+ clone->seek(this->getPosition());
+ return clone;
+ }
+
+
+private:
+ const SkROBuffer* fBuffer;
+ SkROBuffer::Iter fIter;
+ size_t fLocalOffset;
+ size_t fGlobalOffset;
+};
+
+SkStreamAsset* SkRWBuffer::newStreamSnapshot() const {
+ SkAutoTUnref<SkROBuffer> buffer(this->newRBufferSnapshot());
+ return new SkROBufferStreamAsset(buffer);
+}
diff --git a/gfx/skia/skia/src/core/SkRadialShadowMapShader.cpp b/gfx/skia/skia/src/core/SkRadialShadowMapShader.cpp
new file mode 100644
index 000000000..29773a63b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRadialShadowMapShader.cpp
@@ -0,0 +1,431 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkLights.h"
+#include "SkPoint3.h"
+#include "SkRadialShadowMapShader.h"
+
+////////////////////////////////////////////////////////////////////////////
+#ifdef SK_EXPERIMENTAL_SHADOWING
+
+
+/** \class SkRadialShadowMapShaderImpl
+ This subclass of shader applies shadowing radially around a light
+*/
+class SkRadialShadowMapShaderImpl : public SkShader {
+public:
+ /** Create a new shadowing shader that shadows radially around a light
+ */
+ SkRadialShadowMapShaderImpl(sk_sp<SkShader> occluderShader,
+ sk_sp<SkLights> lights,
+ int diffuseWidth, int diffuseHeight)
+ : fOccluderShader(std::move(occluderShader))
+ , fLight(std::move(lights))
+ , fWidth(diffuseWidth)
+ , fHeight(diffuseHeight) { }
+
+ bool isOpaque() const override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(const AsFPArgs&) const override;
+#endif
+
+ class ShadowMapRadialShaderContext : public SkShader::Context {
+ public:
+ // The context takes ownership of the states. It will call their destructors
+ // but will NOT free the memory.
+ ShadowMapRadialShaderContext(const SkRadialShadowMapShaderImpl&, const ContextRec&,
+ SkShader::Context* occluderContext,
+ void* heapAllocated);
+
+ ~ShadowMapRadialShaderContext() override;
+
+ void shadeSpan(int x, int y, SkPMColor[], int count) override;
+
+ uint32_t getFlags() const override { return fFlags; }
+
+ private:
+ SkShader::Context* fOccluderContext;
+ uint32_t fFlags;
+
+ void* fHeapAllocated;
+
+ typedef SkShader::Context INHERITED;
+ };
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkRadialShadowMapShaderImpl)
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ size_t onContextSize(const ContextRec&) const override;
+ Context* onCreateContext(const ContextRec&, void*) const override;
+
+private:
+ sk_sp<SkShader> fOccluderShader;
+ sk_sp<SkLights> fLight;
+
+ int fWidth;
+ int fHeight;
+
+ friend class SkRadialShadowMapShader;
+
+ typedef SkShader INHERITED;
+};
+
+////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+#include "GrContext.h"
+#include "GrCoordTransform.h"
+#include "GrFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "SkGr.h"
+#include "SkGrPriv.h"
+#include "SkImage_Base.h"
+#include "GrInvariantOutput.h"
+#include "SkSpecialImage.h"
+
+class RadialShadowMapFP : public GrFragmentProcessor {
+public:
+ RadialShadowMapFP(sk_sp<GrFragmentProcessor> occluder,
+ sk_sp<SkLights> light,
+ int diffuseWidth, int diffuseHeight,
+ GrContext* context) {
+ fLightPos = light->light(0).pos();
+
+ fWidth = diffuseWidth;
+ fHeight = diffuseHeight;
+
+ this->registerChildProcessor(std::move(occluder));
+ this->initClassID<RadialShadowMapFP>();
+ }
+
+ class GLSLRadialShadowMapFP : public GrGLSLFragmentProcessor {
+ public:
+ GLSLRadialShadowMapFP() { }
+
+ void emitCode(EmitArgs& args) override {
+
+ GrGLSLFragmentBuilder* fragBuilder = args.fFragBuilder;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ const char* lightPosUniName = nullptr;
+
+ fLightPosUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec3f_GrSLType,
+ kDefault_GrSLPrecision,
+ "lightPos",
+ &lightPosUniName);
+
+ const char* widthUniName = nullptr;
+ const char* heightUniName = nullptr;
+
+ fWidthUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kInt_GrSLType,
+ kDefault_GrSLPrecision,
+ "width", &widthUniName);
+ fHeightUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kInt_GrSLType,
+ kDefault_GrSLPrecision,
+ "height", &heightUniName);
+
+
+ SkString occluder("occluder");
+ this->emitChild(0, nullptr, &occluder, args);
+
+ // Modify the input texture coordinates to index into our 1D output
+ fragBuilder->codeAppend("float distHere;");
+
+ // we use a max shadow distance of 2 times the max of width/height
+ fragBuilder->codeAppend("float closestDistHere = 2;");
+ fragBuilder->codeAppend("vec2 coords = vMatrixCoord_0_0_Stage0;");
+ fragBuilder->codeAppend("coords.y = 0;");
+ fragBuilder->codeAppend("vec2 destCoords = vec2(0,0);");
+ fragBuilder->codeAppendf("float step = 1.0 / %s;", heightUniName);
+
+ // assume that we are at 0, 0 light pos
+ // TODO use correct light positions
+
+ // this goes through each depth value in the final output buffer,
+ // basically raycasting outwards, and finding the first collision.
+ // we also increment coords.y to 2 instead 1 so our shadows stretch the whole screen.
+ fragBuilder->codeAppendf("for (coords.y = 0; coords.y <= 2; coords.y += step) {");
+
+ fragBuilder->codeAppend("float theta = (coords.x * 2.0 - 1.0) * 3.1415;");
+ fragBuilder->codeAppend("float r = coords.y;");
+ fragBuilder->codeAppend("destCoords = "
+ "vec2(r * cos(theta), - r * sin(theta)) /2.0 + 0.5;");
+ fragBuilder->codeAppendf("vec2 lightOffset = (vec2(%s)/vec2(%s,%s) - 0.5)"
+ "* vec2(1.0, 1.0);",
+ lightPosUniName, widthUniName, heightUniName);
+
+ fragBuilder->codeAppend("distHere = texture(uTextureSampler0_Stage1,"
+ "destCoords + lightOffset).b;");
+ fragBuilder->codeAppend("if (distHere > 0.0) {"
+ "closestDistHere = coords.y;"
+ "break;}");
+ fragBuilder->codeAppend("}");
+
+ fragBuilder->codeAppendf("%s = vec4(vec3(closestDistHere / 2.0),1);", args.fOutputColor);
+ }
+
+ static void GenKey(const GrProcessor& proc, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ b->add32(0); // nothing to add here
+ }
+
+ protected:
+ void onSetData(const GrGLSLProgramDataManager& pdman, const GrProcessor& proc) override {
+ const RadialShadowMapFP &radialShadowMapFP = proc.cast<RadialShadowMapFP>();
+
+ const SkVector3& lightPos = radialShadowMapFP.lightPos();
+ if (lightPos != fLightPos) {
+ pdman.set3fv(fLightPosUni, 1, &lightPos.fX);
+ fLightPos = lightPos;
+ }
+
+ int width = radialShadowMapFP.width();
+ if (width != fWidth) {
+ pdman.set1i(fWidthUni, width);
+ fWidth = width;
+ }
+ int height = radialShadowMapFP.height();
+ if (height != fHeight) {
+ pdman.set1i(fHeightUni, height);
+ fHeight = height;
+ }
+ }
+
+ private:
+ SkVector3 fLightPos;
+ GrGLSLProgramDataManager::UniformHandle fLightPosUni;
+
+ int fWidth;
+ GrGLSLProgramDataManager::UniformHandle fWidthUni;
+ int fHeight;
+ GrGLSLProgramDataManager::UniformHandle fHeightUni;
+ };
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLRadialShadowMapFP::GenKey(*this, caps, b);
+ }
+
+ const char* name() const override { return "RadialShadowMapFP"; }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ inout->mulByUnknownFourComponents();
+ }
+ const SkVector3& lightPos() const {
+ return fLightPos;
+ }
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override {
+ return new GLSLRadialShadowMapFP;
+ }
+
+ bool onIsEqual(const GrFragmentProcessor& proc) const override {
+ const RadialShadowMapFP& radialShadowMapFP = proc.cast<RadialShadowMapFP>();
+
+ if (fWidth != radialShadowMapFP.fWidth || fHeight != radialShadowMapFP.fHeight) {
+ return false;
+ }
+
+ if (fLightPos != radialShadowMapFP.fLightPos) {
+ return false;
+ }
+
+ return true;
+ }
+
+ SkVector3 fLightPos;
+
+ int fHeight;
+ int fWidth;
+};
+
+////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> SkRadialShadowMapShaderImpl::asFragmentProcessor
+ (const AsFPArgs& fpargs) const {
+
+ sk_sp<GrFragmentProcessor> occluderFP = fOccluderShader->asFragmentProcessor(fpargs);
+
+ sk_sp<GrFragmentProcessor> shadowFP = sk_make_sp<RadialShadowMapFP>(std::move(occluderFP),
+ fLight, fWidth, fHeight,
+ fpargs.fContext);
+ return shadowFP;
+}
+
+#endif
+
+////////////////////////////////////////////////////////////////////////////
+
+bool SkRadialShadowMapShaderImpl::isOpaque() const {
+ return fOccluderShader->isOpaque();
+}
+
+SkRadialShadowMapShaderImpl::ShadowMapRadialShaderContext::ShadowMapRadialShaderContext(
+ const SkRadialShadowMapShaderImpl& shader, const ContextRec& rec,
+ SkShader::Context* occluderContext,
+ void* heapAllocated)
+ : INHERITED(shader, rec)
+ , fOccluderContext(occluderContext)
+ , fHeapAllocated(heapAllocated) {
+ bool isOpaque = shader.isOpaque();
+
+ // update fFlags
+ uint32_t flags = 0;
+ if (isOpaque && (255 == this->getPaintAlpha())) {
+ flags |= kOpaqueAlpha_Flag;
+ }
+
+ fFlags = flags;
+}
+
+SkRadialShadowMapShaderImpl::ShadowMapRadialShaderContext::~ShadowMapRadialShaderContext() {
+ // The dependencies have been created outside of the context on memory that was allocated by
+ // the onCreateContext() method. Call the destructors and free the memory.
+ fOccluderContext->~Context();
+
+ sk_free(fHeapAllocated);
+}
+
+static inline SkPMColor convert(SkColor3f color, U8CPU a) {
+ if (color.fX <= 0.0f) {
+ color.fX = 0.0f;
+ } else if (color.fX >= 255.0f) {
+ color.fX = 255.0f;
+ }
+
+ if (color.fY <= 0.0f) {
+ color.fY = 0.0f;
+ } else if (color.fY >= 255.0f) {
+ color.fY = 255.0f;
+ }
+
+ if (color.fZ <= 0.0f) {
+ color.fZ = 0.0f;
+ } else if (color.fZ >= 255.0f) {
+ color.fZ = 255.0f;
+ }
+
+ return SkPreMultiplyARGB(a, (int) color.fX, (int) color.fY, (int) color.fZ);
+}
+
+// larger is better (fewer times we have to loop), but we shouldn't
+// take up too much stack-space (each one here costs 16 bytes)
+#define BUFFER_MAX 16
+void SkRadialShadowMapShaderImpl::ShadowMapRadialShaderContext::shadeSpan
+ (int x, int y, SkPMColor result[], int count) {
+ do {
+ int n = SkTMin(count, BUFFER_MAX);
+
+ // just fill with white for now
+ SkPMColor accum = convert(SkColor3f::Make(1.0f, 1.0f, 1.0f), 0xFF);
+
+ for (int i = 0; i < n; ++i) {
+ result[i] = accum;
+ }
+
+ result += n;
+ x += n;
+ count -= n;
+ } while (count > 0);
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+#ifndef SK_IGNORE_TO_STRING
+void SkRadialShadowMapShaderImpl::toString(SkString* str) const {
+ str->appendf("RadialShadowMapShader: ()");
+}
+#endif
+
+sk_sp<SkFlattenable> SkRadialShadowMapShaderImpl::CreateProc(SkReadBuffer& buf) {
+
+ // Discarding SkShader flattenable params
+ bool hasLocalMatrix = buf.readBool();
+ SkAssertResult(!hasLocalMatrix);
+
+ sk_sp<SkLights> light = SkLights::MakeFromBuffer(buf);
+
+ int diffuseWidth = buf.readInt();
+ int diffuseHeight = buf.readInt();
+
+ sk_sp<SkShader> occluderShader(buf.readFlattenable<SkShader>());
+
+ return sk_make_sp<SkRadialShadowMapShaderImpl>(std::move(occluderShader),
+ std::move(light),
+ diffuseWidth, diffuseHeight);
+}
+
+void SkRadialShadowMapShaderImpl::flatten(SkWriteBuffer& buf) const {
+ this->INHERITED::flatten(buf);
+
+ fLight->flatten(buf);
+
+ buf.writeInt(fWidth);
+ buf.writeInt(fHeight);
+
+ buf.writeFlattenable(fOccluderShader.get());
+}
+
+size_t SkRadialShadowMapShaderImpl::onContextSize(const ContextRec& rec) const {
+ return sizeof(ShadowMapRadialShaderContext);
+}
+
+SkShader::Context* SkRadialShadowMapShaderImpl::onCreateContext(const ContextRec& rec,
+ void* storage) const {
+ size_t heapRequired = fOccluderShader->contextSize(rec);
+
+ void* heapAllocated = sk_malloc_throw(heapRequired);
+
+ void* occluderContextStorage = heapAllocated;
+
+ SkShader::Context* occluderContext =
+ fOccluderShader->createContext(rec, occluderContextStorage);
+
+ if (!occluderContext) {
+ sk_free(heapAllocated);
+ return nullptr;
+ }
+
+ return new (storage) ShadowMapRadialShaderContext(*this, rec, occluderContext, heapAllocated);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkShader> SkRadialShadowMapShader::Make(sk_sp<SkShader> occluderShader,
+ sk_sp<SkLights> light,
+ int diffuseWidth, int diffuseHeight) {
+ if (!occluderShader) {
+ // TODO: Use paint's color in absence of a diffuseShader
+ // TODO: Use a default implementation of normalSource instead
+ return nullptr;
+ }
+
+ return sk_make_sp<SkRadialShadowMapShaderImpl>(std::move(occluderShader),
+ std::move(light),
+ diffuseWidth, diffuseHeight);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkRadialShadowMapShader)
+SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkRadialShadowMapShaderImpl)
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END
+
+///////////////////////////////////////////////////////////////////////////////
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRadialShadowMapShader.h b/gfx/skia/skia/src/core/SkRadialShadowMapShader.h
new file mode 100644
index 000000000..4d6956ca8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRadialShadowMapShader.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkReadBuffer.h"
+
+#ifndef SkRadialShadowMapShader_DEFINED
+#define SkRadialShadowMapShader_DEFINED
+
+#ifdef SK_EXPERIMENTAL_SHADOWING
+
+class SkLights;
+class SkShader;
+
+class SK_API SkRadialShadowMapShader {
+public:
+ /** This shader creates a 1D strip depth map for radial lights.
+ * It can only take in 1 light to generate one shader at a time.
+ */
+ static sk_sp<SkShader> Make(sk_sp<SkShader> occluderShader,
+ sk_sp<SkLights> light,
+ int diffuseWidth, int diffuseHeight);
+
+ SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP()
+};
+
+#endif
+#endif
diff --git a/gfx/skia/skia/src/core/SkRasterClip.cpp b/gfx/skia/skia/src/core/SkRasterClip.cpp
new file mode 100644
index 000000000..1090c66f3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRasterClip.cpp
@@ -0,0 +1,483 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkRasterClip.h"
+#include "SkPath.h"
+
+SkRasterClip::SkRasterClip(const SkRasterClip& src) {
+ AUTO_RASTERCLIP_VALIDATE(src);
+
+ fForceConservativeRects = src.fForceConservativeRects;
+ fIsBW = src.fIsBW;
+ if (fIsBW) {
+ fBW = src.fBW;
+ } else {
+ fAA = src.fAA;
+ }
+
+ fIsEmpty = src.isEmpty();
+ fIsRect = src.isRect();
+ SkDEBUGCODE(this->validate();)
+}
+
+SkRasterClip::SkRasterClip(const SkRegion& rgn) : fBW(rgn) {
+ fForceConservativeRects = false;
+ fIsBW = true;
+ fIsEmpty = this->computeIsEmpty(); // bounds might be empty, so compute
+ fIsRect = !fIsEmpty;
+ SkDEBUGCODE(this->validate();)
+}
+
+SkRasterClip::SkRasterClip(const SkIRect& bounds, bool forceConservativeRects) : fBW(bounds) {
+ fForceConservativeRects = forceConservativeRects;
+ fIsBW = true;
+ fIsEmpty = this->computeIsEmpty(); // bounds might be empty, so compute
+ fIsRect = !fIsEmpty;
+ SkDEBUGCODE(this->validate();)
+}
+
+SkRasterClip::SkRasterClip(bool forceConservativeRects) {
+ fForceConservativeRects = forceConservativeRects;
+ fIsBW = true;
+ fIsEmpty = true;
+ fIsRect = false;
+ SkDEBUGCODE(this->validate();)
+}
+
+SkRasterClip::~SkRasterClip() {
+ SkDEBUGCODE(this->validate();)
+}
+
+bool SkRasterClip::operator==(const SkRasterClip& other) const {
+ // This impl doesn't care if fForceConservativeRects is the same in both, only the current state
+
+ if (fIsBW != other.fIsBW) {
+ return false;
+ }
+ bool isEqual = fIsBW ? fBW == other.fBW : fAA == other.fAA;
+#ifdef SK_DEBUG
+ if (isEqual) {
+ SkASSERT(fIsEmpty == other.fIsEmpty);
+ SkASSERT(fIsRect == other.fIsRect);
+ }
+#endif
+ return isEqual;
+}
+
+bool SkRasterClip::isComplex() const {
+ return fIsBW ? fBW.isComplex() : !fAA.isEmpty();
+}
+
+const SkIRect& SkRasterClip::getBounds() const {
+ return fIsBW ? fBW.getBounds() : fAA.getBounds();
+}
+
+bool SkRasterClip::setEmpty() {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ fIsBW = true;
+ fBW.setEmpty();
+ fAA.setEmpty();
+ fIsEmpty = true;
+ fIsRect = false;
+ return false;
+}
+
+bool SkRasterClip::setRect(const SkIRect& rect) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ fIsBW = true;
+ fAA.setEmpty();
+ fIsRect = fBW.setRect(rect);
+ fIsEmpty = !fIsRect;
+ return fIsRect;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+
+bool SkRasterClip::setConservativeRect(const SkRect& r, const SkIRect& clipR, bool isInverse) {
+ SkRegion::Op op;
+ if (isInverse) {
+ op = SkRegion::kDifference_Op;
+ } else {
+ op = SkRegion::kIntersect_Op;
+ }
+ fBW.setRect(clipR);
+ fBW.op(r.roundOut(), op);
+ return this->updateCacheAndReturnNonEmpty();
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+
+enum MutateResult {
+ kDoNothing_MutateResult,
+ kReplaceClippedAgainstGlobalBounds_MutateResult,
+ kContinue_MutateResult,
+};
+
+static MutateResult mutate_conservative_op(SkRegion::Op* op, bool inverseFilled) {
+ if (inverseFilled) {
+ switch (*op) {
+ case SkRegion::kIntersect_Op:
+ case SkRegion::kDifference_Op:
+ // These ops can only shrink the current clip. So leaving
+ // the clip unchanged conservatively respects the contract.
+ return kDoNothing_MutateResult;
+ case SkRegion::kUnion_Op:
+ case SkRegion::kReplace_Op:
+ case SkRegion::kReverseDifference_Op:
+ case SkRegion::kXOR_Op: {
+ // These ops can grow the current clip up to the extents of
+ // the input clip, which is inverse filled, so we just set
+ // the current clip to the device bounds.
+ *op = SkRegion::kReplace_Op;
+ return kReplaceClippedAgainstGlobalBounds_MutateResult;
+ }
+ }
+ } else {
+ // Not inverse filled
+ switch (*op) {
+ case SkRegion::kIntersect_Op:
+ case SkRegion::kUnion_Op:
+ case SkRegion::kReplace_Op:
+ return kContinue_MutateResult;
+ case SkRegion::kDifference_Op:
+ // Difference can only shrink the current clip.
+ // Leaving clip unchanged conservatively fullfills the contract.
+ return kDoNothing_MutateResult;
+ case SkRegion::kReverseDifference_Op:
+ // To reverse, we swap in the bounds with a replace op.
+ // As with difference, leave it unchanged.
+ *op = SkRegion::kReplace_Op;
+ return kContinue_MutateResult;
+ case SkRegion::kXOR_Op:
+ // Be conservative, based on (A XOR B) always included in (A union B),
+ // which is always included in (bounds(A) union bounds(B))
+ *op = SkRegion::kUnion_Op;
+ return kContinue_MutateResult;
+ }
+ }
+ SkFAIL("should not get here");
+ return kDoNothing_MutateResult;
+}
+
+bool SkRasterClip::setPath(const SkPath& path, const SkRegion& clip, bool doAA) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ if (fForceConservativeRects) {
+ return this->setConservativeRect(path.getBounds(), clip.getBounds(), path.isInverseFillType());
+ }
+
+ if (this->isBW() && !doAA) {
+ (void)fBW.setPath(path, clip);
+ } else {
+ // TODO: since we are going to over-write fAA completely (aren't we?)
+ // we should just clear our BW data (if any) and set fIsAA=true
+ if (this->isBW()) {
+ this->convertToAA();
+ }
+ (void)fAA.setPath(path, &clip, doAA);
+ }
+ return this->updateCacheAndReturnNonEmpty();
+}
+
+bool SkRasterClip::op(const SkRRect& rrect, const SkMatrix& matrix, const SkIRect& bounds,
+ SkRegion::Op op, bool doAA) {
+ if (fForceConservativeRects) {
+ return this->op(rrect.getBounds(), matrix, bounds, op, doAA);
+ }
+
+ SkPath path;
+ path.addRRect(rrect);
+
+ return this->op(path, matrix, bounds, op, doAA);
+}
+
+bool SkRasterClip::op(const SkPath& path, const SkMatrix& matrix, const SkIRect& bounds,
+ SkRegion::Op op, bool doAA) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ if (fForceConservativeRects) {
+ SkIRect ir;
+ switch (mutate_conservative_op(&op, path.isInverseFillType())) {
+ case kDoNothing_MutateResult:
+ return !this->isEmpty();
+ case kReplaceClippedAgainstGlobalBounds_MutateResult:
+ ir = bounds;
+ break;
+ case kContinue_MutateResult: {
+ SkRect bounds = path.getBounds();
+ matrix.mapRect(&bounds);
+ ir = bounds.roundOut();
+ break;
+ }
+ }
+ return this->op(ir, op);
+ }
+
+ // base is used to limit the size (and therefore memory allocation) of the
+ // region that results from scan converting devPath.
+ SkRegion base;
+
+ SkPath devPath;
+ if (matrix.isIdentity()) {
+ devPath = path;
+ } else {
+ path.transform(matrix, &devPath);
+ devPath.setIsVolatile(true);
+ }
+ if (SkRegion::kIntersect_Op == op) {
+ // since we are intersect, we can do better (tighter) with currRgn's
+ // bounds, than just using the device. However, if currRgn is complex,
+ // our region blitter may hork, so we do that case in two steps.
+ if (this->isRect()) {
+ // FIXME: we should also be able to do this when this->isBW(),
+ // but relaxing the test above triggers GM asserts in
+ // SkRgnBuilder::blitH(). We need to investigate what's going on.
+ return this->setPath(devPath, this->bwRgn(), doAA);
+ } else {
+ base.setRect(this->getBounds());
+ SkRasterClip clip(fForceConservativeRects);
+ clip.setPath(devPath, base, doAA);
+ return this->op(clip, op);
+ }
+ } else {
+ base.setRect(bounds);
+
+ if (SkRegion::kReplace_Op == op) {
+ return this->setPath(devPath, base, doAA);
+ } else {
+ SkRasterClip clip(fForceConservativeRects);
+ clip.setPath(devPath, base, doAA);
+ return this->op(clip, op);
+ }
+ }
+}
+
+bool SkRasterClip::setPath(const SkPath& path, const SkIRect& clip, bool doAA) {
+ SkRegion tmp;
+ tmp.setRect(clip);
+ return this->setPath(path, tmp, doAA);
+}
+
+bool SkRasterClip::op(const SkIRect& rect, SkRegion::Op op) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ fIsBW ? fBW.op(rect, op) : fAA.op(rect, op);
+ return this->updateCacheAndReturnNonEmpty();
+}
+
+bool SkRasterClip::op(const SkRegion& rgn, SkRegion::Op op) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ if (fIsBW) {
+ (void)fBW.op(rgn, op);
+ } else {
+ SkAAClip tmp;
+ tmp.setRegion(rgn);
+ (void)fAA.op(tmp, op);
+ }
+ return this->updateCacheAndReturnNonEmpty();
+}
+
+bool SkRasterClip::op(const SkRasterClip& clip, SkRegion::Op op) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+ clip.validate();
+
+ if (this->isBW() && clip.isBW()) {
+ (void)fBW.op(clip.fBW, op);
+ } else {
+ SkAAClip tmp;
+ const SkAAClip* other;
+
+ if (this->isBW()) {
+ this->convertToAA();
+ }
+ if (clip.isBW()) {
+ tmp.setRegion(clip.bwRgn());
+ other = &tmp;
+ } else {
+ other = &clip.aaRgn();
+ }
+ (void)fAA.op(*other, op);
+ }
+ return this->updateCacheAndReturnNonEmpty();
+}
+
+/**
+ * Our antialiasing currently has a granularity of 1/4 of a pixel along each
+ * axis. Thus we can treat an axis coordinate as an integer if it differs
+ * from its nearest int by < half of that value (1.8 in this case).
+ */
+static bool nearly_integral(SkScalar x) {
+ static const SkScalar domain = SK_Scalar1 / 4;
+ static const SkScalar halfDomain = domain / 2;
+
+ x += halfDomain;
+ return x - SkScalarFloorToScalar(x) < domain;
+}
+
+bool SkRasterClip::op(const SkRect& localRect, const SkMatrix& matrix, const SkIRect& bounds,
+ SkRegion::Op op, bool doAA) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+ SkRect devRect;
+
+ if (fForceConservativeRects) {
+ SkIRect ir;
+ switch (mutate_conservative_op(&op, false)) {
+ case kDoNothing_MutateResult:
+ return !this->isEmpty();
+ case kReplaceClippedAgainstGlobalBounds_MutateResult:
+ ir = bounds;
+ break;
+ case kContinue_MutateResult:
+ matrix.mapRect(&devRect, localRect);
+ ir = devRect.roundOut();
+ break;
+ }
+ return this->op(ir, op);
+ }
+ const bool isScaleTrans = matrix.isScaleTranslate();
+ if (!isScaleTrans) {
+ SkPath path;
+ path.addRect(localRect);
+ path.setIsVolatile(true);
+ return this->op(path, matrix, bounds, op, doAA);
+ }
+
+ matrix.mapRect(&devRect, localRect);
+
+ if (fIsBW && doAA) {
+ // check that the rect really needs aa, or is it close enought to
+ // integer boundaries that we can just treat it as a BW rect?
+ if (nearly_integral(devRect.fLeft) && nearly_integral(devRect.fTop) &&
+ nearly_integral(devRect.fRight) && nearly_integral(devRect.fBottom)) {
+ doAA = false;
+ }
+ }
+
+ if (fIsBW && !doAA) {
+ SkIRect ir;
+ devRect.round(&ir);
+ (void)fBW.op(ir, op);
+ } else {
+ if (fIsBW) {
+ this->convertToAA();
+ }
+ (void)fAA.op(devRect, op, doAA);
+ }
+ return this->updateCacheAndReturnNonEmpty();
+}
+
+void SkRasterClip::translate(int dx, int dy, SkRasterClip* dst) const {
+ if (nullptr == dst) {
+ return;
+ }
+
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ if (this->isEmpty()) {
+ dst->setEmpty();
+ return;
+ }
+ if (0 == (dx | dy)) {
+ *dst = *this;
+ return;
+ }
+
+ dst->fIsBW = fIsBW;
+ if (fIsBW) {
+ fBW.translate(dx, dy, &dst->fBW);
+ dst->fAA.setEmpty();
+ } else {
+ fAA.translate(dx, dy, &dst->fAA);
+ dst->fBW.setEmpty();
+ }
+ dst->updateCacheAndReturnNonEmpty();
+}
+
+bool SkRasterClip::quickContains(const SkIRect& ir) const {
+ return fIsBW ? fBW.quickContains(ir) : fAA.quickContains(ir);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+const SkRegion& SkRasterClip::forceGetBW() {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ if (!fIsBW) {
+ fBW.setRect(fAA.getBounds());
+ }
+ return fBW;
+}
+
+void SkRasterClip::convertToAA() {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ SkASSERT(!fForceConservativeRects);
+
+ SkASSERT(fIsBW);
+ fAA.setRegion(fBW);
+ fIsBW = false;
+
+ // since we are being explicitly asked to convert-to-aa, we pass false so we don't "optimize"
+ // ourselves back to BW.
+ (void)this->updateCacheAndReturnNonEmpty(false);
+}
+
+#ifdef SK_DEBUG
+void SkRasterClip::validate() const {
+ // can't ever assert that fBW is empty, since we may have called forceGetBW
+ if (fIsBW) {
+ SkASSERT(fAA.isEmpty());
+ }
+
+ fBW.validate();
+ fAA.validate();
+
+ SkASSERT(this->computeIsEmpty() == fIsEmpty);
+ SkASSERT(this->computeIsRect() == fIsRect);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkAAClipBlitterWrapper::SkAAClipBlitterWrapper() {
+ SkDEBUGCODE(fClipRgn = nullptr;)
+ SkDEBUGCODE(fBlitter = nullptr;)
+}
+
+SkAAClipBlitterWrapper::SkAAClipBlitterWrapper(const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ this->init(clip, blitter);
+}
+
+SkAAClipBlitterWrapper::SkAAClipBlitterWrapper(const SkAAClip* aaclip,
+ SkBlitter* blitter) {
+ SkASSERT(blitter);
+ SkASSERT(aaclip);
+ fBWRgn.setRect(aaclip->getBounds());
+ fAABlitter.init(blitter, aaclip);
+ // now our return values
+ fClipRgn = &fBWRgn;
+ fBlitter = &fAABlitter;
+}
+
+void SkAAClipBlitterWrapper::init(const SkRasterClip& clip, SkBlitter* blitter) {
+ SkASSERT(blitter);
+ if (clip.isBW()) {
+ fClipRgn = &clip.bwRgn();
+ fBlitter = blitter;
+ } else {
+ const SkAAClip& aaclip = clip.aaRgn();
+ fBWRgn.setRect(aaclip.getBounds());
+ fAABlitter.init(blitter, &aaclip);
+ // now our return values
+ fClipRgn = &fBWRgn;
+ fBlitter = &fAABlitter;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkRasterClip.h b/gfx/skia/skia/src/core/SkRasterClip.h
new file mode 100644
index 000000000..4b462479c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRasterClip.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRasterClip_DEFINED
+#define SkRasterClip_DEFINED
+
+#include "SkRegion.h"
+#include "SkAAClip.h"
+
+class SkRRect;
+
+/**
+ * Wraps a SkRegion and SkAAClip, so we have a single object that can represent either our
+ * BW or antialiased clips.
+ *
+ * This class is optimized for the raster backend of canvas, but can be expense to keep up2date,
+ * so it supports a runtime option (force-conservative-rects) to turn it into a super-fast
+ * rect-only tracker. The gpu backend uses this since it does not need the result (it uses
+ * SkClipStack instead).
+ */
+class SkRasterClip {
+public:
+ SkRasterClip(bool forceConservativeRects = false);
+ SkRasterClip(const SkIRect&, bool forceConservativeRects = false);
+ SkRasterClip(const SkRegion&);
+ SkRasterClip(const SkRasterClip&);
+ ~SkRasterClip();
+
+ // Only compares the current state. Does not compare isForceConservativeRects(), so that field
+ // could be different but this could still return true.
+ bool operator==(const SkRasterClip&) const;
+ bool operator!=(const SkRasterClip& other) const {
+ return !(*this == other);
+ }
+
+ bool isForceConservativeRects() const { return fForceConservativeRects; }
+
+ bool isBW() const { return fIsBW; }
+ bool isAA() const { return !fIsBW; }
+ const SkRegion& bwRgn() const { SkASSERT(fIsBW); return fBW; }
+ const SkAAClip& aaRgn() const { SkASSERT(!fIsBW); return fAA; }
+
+ bool isEmpty() const {
+ SkASSERT(this->computeIsEmpty() == fIsEmpty);
+ return fIsEmpty;
+ }
+
+ bool isRect() const {
+ SkASSERT(this->computeIsRect() == fIsRect);
+ return fIsRect;
+ }
+
+ bool isComplex() const;
+ const SkIRect& getBounds() const;
+
+ bool setEmpty();
+ bool setRect(const SkIRect&);
+
+ bool op(const SkIRect&, SkRegion::Op);
+ bool op(const SkRegion&, SkRegion::Op);
+ bool op(const SkRect&, const SkMatrix& matrix, const SkIRect&, SkRegion::Op, bool doAA);
+ bool op(const SkRRect&, const SkMatrix& matrix, const SkIRect&, SkRegion::Op, bool doAA);
+ bool op(const SkPath&, const SkMatrix& matrix, const SkIRect&, SkRegion::Op, bool doAA);
+
+ void translate(int dx, int dy, SkRasterClip* dst) const;
+ void translate(int dx, int dy) {
+ this->translate(dx, dy, this);
+ }
+
+ bool quickContains(const SkIRect& rect) const;
+ bool quickContains(int left, int top, int right, int bottom) const {
+ return quickContains(SkIRect::MakeLTRB(left, top, right, bottom));
+ }
+
+ /**
+ * Return true if this region is empty, or if the specified rectangle does
+ * not intersect the region. Returning false is not a guarantee that they
+ * intersect, but returning true is a guarantee that they do not.
+ */
+ bool quickReject(const SkIRect& rect) const {
+ return !SkIRect::Intersects(this->getBounds(), rect);
+ }
+
+ // hack for SkCanvas::getTotalClip
+ const SkRegion& forceGetBW();
+
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+private:
+ SkRegion fBW;
+ SkAAClip fAA;
+ bool fForceConservativeRects;
+ bool fIsBW;
+ // these 2 are caches based on querying the right obj based on fIsBW
+ bool fIsEmpty;
+ bool fIsRect;
+
+ bool computeIsEmpty() const {
+ return fIsBW ? fBW.isEmpty() : fAA.isEmpty();
+ }
+
+ bool computeIsRect() const {
+ return fIsBW ? fBW.isRect() : fAA.isRect();
+ }
+
+ bool updateCacheAndReturnNonEmpty(bool detectAARect = true) {
+ fIsEmpty = this->computeIsEmpty();
+
+ // detect that our computed AA is really just a (hard-edged) rect
+ if (detectAARect && !fIsEmpty && !fIsBW && fAA.isRect()) {
+ fBW.setRect(fAA.getBounds());
+ fAA.setEmpty(); // don't need this guy anymore
+ fIsBW = true;
+ }
+
+ fIsRect = this->computeIsRect();
+ return !fIsEmpty;
+ }
+
+ void convertToAA();
+
+ bool setPath(const SkPath& path, const SkRegion& clip, bool doAA);
+ bool setPath(const SkPath& path, const SkIRect& clip, bool doAA);
+ bool op(const SkRasterClip&, SkRegion::Op);
+ bool setConservativeRect(const SkRect& r, const SkIRect& clipR, bool isInverse);
+};
+
+class SkAutoRasterClipValidate : SkNoncopyable {
+public:
+ SkAutoRasterClipValidate(const SkRasterClip& rc) : fRC(rc) {
+ fRC.validate();
+ }
+ ~SkAutoRasterClipValidate() {
+ fRC.validate();
+ }
+private:
+ const SkRasterClip& fRC;
+};
+#define SkAutoRasterClipValidate(...) SK_REQUIRE_LOCAL_VAR(SkAutoRasterClipValidate)
+
+#ifdef SK_DEBUG
+ #define AUTO_RASTERCLIP_VALIDATE(rc) SkAutoRasterClipValidate arcv(rc)
+#else
+ #define AUTO_RASTERCLIP_VALIDATE(rc)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Encapsulates the logic of deciding if we need to change/wrap the blitter
+ * for aaclipping. If so, getRgn and getBlitter return modified values. If
+ * not, they return the raw blitter and (bw) clip region.
+ *
+ * We need to keep the constructor/destructor cost as small as possible, so we
+ * can freely put this guy on the stack, and not pay too much for the case when
+ * we're really BW anyways.
+ */
+class SkAAClipBlitterWrapper {
+public:
+ SkAAClipBlitterWrapper();
+ SkAAClipBlitterWrapper(const SkRasterClip&, SkBlitter*);
+ SkAAClipBlitterWrapper(const SkAAClip*, SkBlitter*);
+
+ void init(const SkRasterClip&, SkBlitter*);
+
+ const SkIRect& getBounds() const {
+ SkASSERT(fClipRgn);
+ return fClipRgn->getBounds();
+ }
+ const SkRegion& getRgn() const {
+ SkASSERT(fClipRgn);
+ return *fClipRgn;
+ }
+ SkBlitter* getBlitter() {
+ SkASSERT(fBlitter);
+ return fBlitter;
+ }
+
+private:
+ SkRegion fBWRgn;
+ SkAAClipBlitter fAABlitter;
+ // what we return
+ const SkRegion* fClipRgn;
+ SkBlitter* fBlitter;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRasterPipeline.cpp b/gfx/skia/skia/src/core/SkRasterPipeline.cpp
new file mode 100644
index 000000000..72d5b7b96
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRasterPipeline.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkOpts.h"
+#include "SkRasterPipeline.h"
+
+SkRasterPipeline::SkRasterPipeline() {}
+
+void SkRasterPipeline::append(SkRasterPipeline::Fn body,
+ SkRasterPipeline::Fn tail,
+ void* ctx) {
+ // Each stage holds its own context and the next function to call.
+ // So the pipeline itself has to hold onto the first function that starts the pipeline.
+ (fBody.empty() ? fBodyStart : fBody.back().fNext) = body;
+ (fTail.empty() ? fTailStart : fTail.back().fNext) = tail;
+
+ // Each last stage starts with its next function set to JustReturn as a safety net.
+ // It'll be overwritten by the next call to append().
+ fBody.push_back({ &JustReturn, ctx });
+ fTail.push_back({ &JustReturn, ctx });
+}
+
+void SkRasterPipeline::append(StockStage stage, void* ctx) {
+ this->append(SkOpts::stages_4[stage], SkOpts::stages_1_3[stage], ctx);
+}
+
+void SkRasterPipeline::extend(const SkRasterPipeline& src) {
+ SkASSERT(src.fBody.count() == src.fTail.count());
+
+ Fn body = src.fBodyStart,
+ tail = src.fTailStart;
+ for (int i = 0; i < src.fBody.count(); i++) {
+ SkASSERT(src.fBody[i].fCtx == src.fTail[i].fCtx);
+ this->append(body, tail, src.fBody[i].fCtx);
+ body = src.fBody[i].fNext;
+ tail = src.fTail[i].fNext;
+ }
+}
+
+void SkRasterPipeline::run(size_t x, size_t n) {
+ // It's fastest to start uninitialized if the compilers all let us. If not, next fastest is 0.
+ Sk4f v;
+
+ while (n >= 4) {
+ fBodyStart(fBody.begin(), x,0, v,v,v,v, v,v,v,v);
+ x += 4;
+ n -= 4;
+ }
+ if (n > 0) {
+ fTailStart(fTail.begin(), x,n, v,v,v,v, v,v,v,v);
+ }
+}
+
+void SK_VECTORCALL SkRasterPipeline::JustReturn(Stage*, size_t, size_t, Sk4f,Sk4f,Sk4f,Sk4f,
+ Sk4f,Sk4f,Sk4f,Sk4f) {}
diff --git a/gfx/skia/skia/src/core/SkRasterPipeline.h b/gfx/skia/skia/src/core/SkRasterPipeline.h
new file mode 100644
index 000000000..996c7838e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRasterPipeline.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRasterPipeline_DEFINED
+#define SkRasterPipeline_DEFINED
+
+#include "SkNx.h"
+#include "SkTArray.h"
+#include "SkTypes.h"
+
+/**
+ * SkRasterPipeline provides a cheap way to chain together a pixel processing pipeline.
+ *
+ * It's particularly designed for situations where the potential pipeline is extremely
+ * combinatoric: {N dst formats} x {M source formats} x {K mask formats} x {C transfer modes} ...
+ * No one wants to write specialized routines for all those combinations, and if we did, we'd
+ * end up bloating our code size dramatically. SkRasterPipeline stages can be chained together
+ * at runtime, so we can scale this problem linearly rather than combinatorically.
+ *
+ * Each stage is represented by a function conforming to a common interface, SkRasterPipeline::Fn,
+ * and by an arbitrary context pointer. Fn's arguments, and sometimes custom calling convention,
+ * are designed to maximize the amount of data we can pass along the pipeline cheaply.
+ * On many machines all arguments stay in registers the entire time.
+ *
+ * The meaning of the arguments to Fn are sometimes fixed:
+ * - The Stage* always represents the current stage, mainly providing access to ctx().
+ * - The first size_t is always the destination x coordinate.
+ * (If you need y, put it in your context.)
+ * - The second size_t is always tail: 0 when working on a full 4-pixel slab,
+ * or 1..3 when using only the bottom 1..3 lanes of each register.
+ * - By the time the shader's done, the first four vectors should hold source red,
+ * green, blue, and alpha, up to 4 pixels' worth each.
+ *
+ * Sometimes arguments are flexible:
+ * - In the shader, the first four vectors can be used for anything, e.g. sample coordinates.
+ * - The last four vectors are scratch registers that can be used to communicate between
+ * stages; transfer modes use these to hold the original destination pixel components.
+ *
+ * On some platforms the last four vectors are slower to work with than the other arguments.
+ *
+ * When done mutating its arguments and/or context, a stage can either:
+ * 1) call st->next() with its mutated arguments, chaining to the next stage of the pipeline; or
+ * 2) return, indicating the pipeline is complete for these pixels.
+ *
+ * Some stages that typically return are those that write a color to a destination pointer,
+ * but any stage can short-circuit the rest of the pipeline by returning instead of calling next().
+ */
+
+// TODO: There may be a better place to stuff tail, e.g. in the bottom alignment bits of
+// the Stage*. This mostly matters on 64-bit Windows where every register is precious.
+
+class SkRasterPipeline {
+public:
+ struct Stage;
+ using Fn = void(SK_VECTORCALL *)(Stage*, size_t, size_t, Sk4f,Sk4f,Sk4f,Sk4f,
+ Sk4f,Sk4f,Sk4f,Sk4f);
+ struct Stage {
+ template <typename T>
+ T ctx() { return static_cast<T>(fCtx); }
+
+ void SK_VECTORCALL next(size_t x, size_t tail, Sk4f v0, Sk4f v1, Sk4f v2, Sk4f v3,
+ Sk4f v4, Sk4f v5, Sk4f v6, Sk4f v7) {
+ // Stages are logically a pipeline, and physically are contiguous in an array.
+ // To get to the next stage, we just increment our pointer to the next array element.
+ fNext(this+1, x,tail, v0,v1,v2,v3, v4,v5,v6,v7);
+ }
+
+ // It makes next() a good bit cheaper if we hold the next function to call here,
+ // rather than logically simpler choice of the function implementing this stage.
+ Fn fNext;
+ void* fCtx;
+ };
+
+
+ SkRasterPipeline();
+
+ // Run the pipeline constructed with append(), walking x through [x,x+n),
+ // generally in 4-pixel steps, with perhaps one jagged tail step.
+ void run(size_t x, size_t n);
+ void run(size_t n) { this->run(0, n); }
+
+ enum StockStage {
+ store_565,
+ store_srgb,
+ store_f16,
+
+ load_s_565,
+ load_s_srgb,
+ load_s_f16,
+
+ load_d_565,
+ load_d_srgb,
+ load_d_f16,
+
+ scale_u8,
+
+ lerp_u8,
+ lerp_565,
+ lerp_constant_float,
+
+ constant_color,
+
+ dst,
+ dstatop,
+ dstin,
+ dstout,
+ dstover,
+ srcatop,
+ srcin,
+ srcout,
+ srcover,
+ clear,
+ modulate,
+ multiply,
+ plus_,
+ screen,
+ xor_,
+ colorburn,
+ colordodge,
+ darken,
+ difference,
+ exclusion,
+ hardlight,
+ lighten,
+ overlay,
+ softlight,
+
+ kNumStockStages,
+ };
+ void append(StockStage, void* = nullptr);
+ void append(StockStage stage, const void* ctx) { this->append(stage, const_cast<void*>(ctx)); }
+
+
+ // Append all stages to this pipeline.
+ void extend(const SkRasterPipeline&);
+
+private:
+ using Stages = SkSTArray<10, Stage, /*MEM_COPY=*/true>;
+
+ void append(Fn body, Fn tail, void*);
+
+ // This no-op default makes fBodyStart and fTailStart unconditionally safe to call,
+ // and is always the last stage's fNext as a sort of safety net to make sure even a
+ // buggy pipeline can't walk off its own end.
+ static void SK_VECTORCALL JustReturn(Stage*, size_t, size_t, Sk4f,Sk4f,Sk4f,Sk4f,
+ Sk4f,Sk4f,Sk4f,Sk4f);
+ Stages fBody,
+ fTail;
+ Fn fBodyStart = &JustReturn,
+ fTailStart = &JustReturn;
+};
+
+#endif//SkRasterPipeline_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRasterPipelineBlitter.cpp b/gfx/skia/skia/src/core/SkRasterPipelineBlitter.cpp
new file mode 100644
index 000000000..91d60bee3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRasterPipelineBlitter.cpp
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBlitter.h"
+#include "SkColor.h"
+#include "SkColorFilter.h"
+#include "SkOpts.h"
+#include "SkPM4f.h"
+#include "SkRasterPipeline.h"
+#include "SkShader.h"
+#include "SkXfermode.h"
+
+
+class SkRasterPipelineBlitter : public SkBlitter {
+public:
+ static SkBlitter* Create(const SkPixmap&, const SkPaint&, SkTBlitterAllocator*);
+
+ SkRasterPipelineBlitter(SkPixmap dst,
+ SkRasterPipeline shader,
+ SkRasterPipeline colorFilter,
+ SkRasterPipeline xfermode,
+ SkPM4f paintColor)
+ : fDst(dst)
+ , fShader(shader)
+ , fColorFilter(colorFilter)
+ , fXfermode(xfermode)
+ , fPaintColor(paintColor)
+ {}
+
+ void blitH (int x, int y, int w) override;
+ void blitAntiH(int x, int y, const SkAlpha[], const int16_t[]) override;
+ void blitMask (const SkMask&, const SkIRect& clip) override;
+
+ // TODO: The default implementations of the other blits look fine,
+ // but some of them like blitV could probably benefit from custom
+ // blits using something like a SkRasterPipeline::runFew() method.
+
+private:
+ void append_load_d(SkRasterPipeline*, const void*) const;
+ void append_store (SkRasterPipeline*, void*) const;
+
+ SkPixmap fDst;
+ SkRasterPipeline fShader, fColorFilter, fXfermode;
+ SkPM4f fPaintColor;
+
+ typedef SkBlitter INHERITED;
+};
+
+SkBlitter* SkCreateRasterPipelineBlitter(const SkPixmap& dst,
+ const SkPaint& paint,
+ SkTBlitterAllocator* alloc) {
+ return SkRasterPipelineBlitter::Create(dst, paint, alloc);
+}
+
+static bool supported(const SkImageInfo& info) {
+ switch (info.colorType()) {
+ case kN32_SkColorType: return info.gammaCloseToSRGB();
+ case kRGBA_F16_SkColorType: return true;
+ case kRGB_565_SkColorType: return true;
+ default: return false;
+ }
+}
+
+template <typename Effect>
+static bool append_effect_stages(const Effect* effect, SkRasterPipeline* pipeline) {
+ return !effect || effect->appendStages(pipeline);
+}
+
+
+SkBlitter* SkRasterPipelineBlitter::Create(const SkPixmap& dst,
+ const SkPaint& paint,
+ SkTBlitterAllocator* alloc) {
+ if (!supported(dst.info())) {
+ return nullptr;
+ }
+ if (paint.getShader()) {
+ return nullptr; // TODO: need to work out how shaders and their contexts work
+ }
+
+ SkRasterPipeline shader, colorFilter, xfermode;
+ if (!append_effect_stages(paint.getColorFilter(), &colorFilter) ||
+ !append_effect_stages(SkXfermode::Peek(paint.getBlendMode()), &xfermode )) {
+ return nullptr;
+ }
+
+ uint32_t paintColor = paint.getColor();
+
+ SkColor4f color;
+ if (SkImageInfoIsGammaCorrect(dst.info())) {
+ color = SkColor4f::FromColor(paintColor);
+ } else {
+ swizzle_rb(SkNx_cast<float>(Sk4b::Load(&paintColor)) * (1/255.0f)).store(&color);
+ }
+
+ auto blitter = alloc->createT<SkRasterPipelineBlitter>(
+ dst,
+ shader, colorFilter, xfermode,
+ color.premul());
+
+ if (!paint.getShader()) {
+ blitter->fShader.append(SkRasterPipeline::constant_color, &blitter->fPaintColor);
+ }
+ if (paint.isSrcOver()) {
+ blitter->fXfermode.append(SkRasterPipeline::srcover);
+ }
+
+ return blitter;
+}
+
+void SkRasterPipelineBlitter::append_load_d(SkRasterPipeline* p, const void* dst) const {
+ SkASSERT(supported(fDst.info()));
+
+ switch (fDst.info().colorType()) {
+ case kN32_SkColorType:
+ if (fDst.info().gammaCloseToSRGB()) {
+ p->append(SkRasterPipeline::load_d_srgb, dst);
+ }
+ break;
+ case kRGBA_F16_SkColorType:
+ p->append(SkRasterPipeline::load_d_f16, dst);
+ break;
+ case kRGB_565_SkColorType:
+ p->append(SkRasterPipeline::load_d_565, dst);
+ break;
+ default: break;
+ }
+}
+
+void SkRasterPipelineBlitter::append_store(SkRasterPipeline* p, void* dst) const {
+ SkASSERT(supported(fDst.info()));
+
+ switch (fDst.info().colorType()) {
+ case kN32_SkColorType:
+ if (fDst.info().gammaCloseToSRGB()) {
+ p->append(SkRasterPipeline::store_srgb, dst);
+ }
+ break;
+ case kRGBA_F16_SkColorType:
+ p->append(SkRasterPipeline::store_f16, dst);
+ break;
+ case kRGB_565_SkColorType:
+ p->append(SkRasterPipeline::store_565, dst);
+ break;
+ default: break;
+ }
+}
+
+void SkRasterPipelineBlitter::blitH(int x, int y, int w) {
+ auto dst = fDst.writable_addr(0,y);
+
+ SkRasterPipeline p;
+ p.extend(fShader);
+ p.extend(fColorFilter);
+ this->append_load_d(&p, dst);
+ p.extend(fXfermode);
+ this->append_store(&p, dst);
+
+ p.run(x, w);
+}
+
+void SkRasterPipelineBlitter::blitAntiH(int x, int y, const SkAlpha aa[], const int16_t runs[]) {
+ auto dst = fDst.writable_addr(0,y);
+ float coverage;
+
+ SkRasterPipeline p;
+ p.extend(fShader);
+ p.extend(fColorFilter);
+ this->append_load_d(&p, dst);
+ p.extend(fXfermode);
+ p.append(SkRasterPipeline::lerp_constant_float, &coverage);
+ this->append_store(&p, dst);
+
+ for (int16_t run = *runs; run > 0; run = *runs) {
+ coverage = *aa * (1/255.0f);
+ p.run(x, run);
+
+ x += run;
+ runs += run;
+ aa += run;
+ }
+}
+
+void SkRasterPipelineBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ if (mask.fFormat == SkMask::kBW_Format) {
+ // TODO: native BW masks?
+ return INHERITED::blitMask(mask, clip);
+ }
+
+ int x = clip.left();
+ for (int y = clip.top(); y < clip.bottom(); y++) {
+ auto dst = fDst.writable_addr(0,y);
+
+ SkRasterPipeline p;
+ p.extend(fShader);
+ p.extend(fColorFilter);
+ this->append_load_d(&p, dst);
+ p.extend(fXfermode);
+ switch (mask.fFormat) {
+ case SkMask::kA8_Format:
+ p.append(SkRasterPipeline::lerp_u8, mask.getAddr8(x,y)-x);
+ break;
+ case SkMask::kLCD16_Format:
+ p.append(SkRasterPipeline::lerp_565, mask.getAddrLCD16(x,y)-x);
+ break;
+ default: break;
+ }
+ this->append_store(&p, dst);
+
+ p.run(x, clip.width());
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkRasterizer.cpp b/gfx/skia/skia/src/core/SkRasterizer.cpp
new file mode 100644
index 000000000..994fb7f4b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRasterizer.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkRasterizer.h"
+#include "SkDraw.h"
+#include "SkMaskFilter.h"
+#include "SkPath.h"
+#include "SkStrokeRec.h"
+
+bool SkRasterizer::rasterize(const SkPath& fillPath, const SkMatrix& matrix,
+ const SkIRect* clipBounds, SkMaskFilter* filter,
+ SkMask* mask, SkMask::CreateMode mode) const {
+ SkIRect storage;
+
+ if (clipBounds && filter && SkMask::kJustRenderImage_CreateMode != mode) {
+ SkIPoint margin;
+ SkMask srcM, dstM;
+
+ srcM.fFormat = SkMask::kA8_Format;
+ srcM.fBounds.set(0, 0, 1, 1);
+ if (!filter->filterMask(&dstM, srcM, matrix, &margin)) {
+ return false;
+ }
+ storage = clipBounds->makeOutset(margin.fX, margin.fY);
+ clipBounds = &storage;
+ }
+
+ return this->onRasterize(fillPath, matrix, clipBounds, mask, mode);
+}
+
+/* Our default implementation of the virtual method just scan converts
+*/
+bool SkRasterizer::onRasterize(const SkPath& fillPath, const SkMatrix& matrix,
+ const SkIRect* clipBounds,
+ SkMask* mask, SkMask::CreateMode mode) const {
+ SkPath devPath;
+
+ fillPath.transform(matrix, &devPath);
+ return SkDraw::DrawToMask(devPath, clipBounds, nullptr, nullptr, mask, mode,
+ SkStrokeRec::kFill_InitStyle);
+}
diff --git a/gfx/skia/skia/src/core/SkReadBuffer.cpp b/gfx/skia/skia/src/core/SkReadBuffer.cpp
new file mode 100644
index 000000000..7679622c5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkReadBuffer.cpp
@@ -0,0 +1,399 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmap.h"
+#include "SkDeduper.h"
+#include "SkErrorInternals.h"
+#include "SkImage.h"
+#include "SkImageDeserializer.h"
+#include "SkImageGenerator.h"
+#include "SkReadBuffer.h"
+#include "SkStream.h"
+#include "SkTypeface.h"
+
+namespace {
+
+ // This generator intentionally should always fail on all attempts to get its pixels,
+ // simulating a bad or empty codec stream.
+ class EmptyImageGenerator final : public SkImageGenerator {
+ public:
+ EmptyImageGenerator(const SkImageInfo& info) : INHERITED(info) { }
+
+ private:
+ typedef SkImageGenerator INHERITED;
+ };
+
+ static sk_sp<SkImage> MakeEmptyImage(int width, int height) {
+ return SkImage::MakeFromGenerator(
+ new EmptyImageGenerator(SkImageInfo::MakeN32Premul(width, height)));
+ }
+
+} // anonymous namespace
+
+
+static uint32_t default_flags() {
+ uint32_t flags = 0;
+ flags |= SkReadBuffer::kScalarIsFloat_Flag;
+ if (8 == sizeof(void*)) {
+ flags |= SkReadBuffer::kPtrIs64Bit_Flag;
+ }
+ return flags;
+}
+
+// This has an empty constructor and destructor, and is thread-safe, so we can use a singleton.
+static SkImageDeserializer gDefaultImageDeserializer;
+
+SkReadBuffer::SkReadBuffer() {
+ fFlags = default_flags();
+ fVersion = 0;
+ fMemoryPtr = nullptr;
+
+ fTFArray = nullptr;
+ fTFCount = 0;
+
+ fFactoryArray = nullptr;
+ fFactoryCount = 0;
+ fImageDeserializer = &gDefaultImageDeserializer;
+#ifdef DEBUG_NON_DETERMINISTIC_ASSERT
+ fDecodedBitmapIndex = -1;
+#endif // DEBUG_NON_DETERMINISTIC_ASSERT
+}
+
+SkReadBuffer::SkReadBuffer(const void* data, size_t size) {
+ fFlags = default_flags();
+ fVersion = 0;
+ fReader.setMemory(data, size);
+ fMemoryPtr = nullptr;
+
+ fTFArray = nullptr;
+ fTFCount = 0;
+
+ fFactoryArray = nullptr;
+ fFactoryCount = 0;
+ fImageDeserializer = &gDefaultImageDeserializer;
+#ifdef DEBUG_NON_DETERMINISTIC_ASSERT
+ fDecodedBitmapIndex = -1;
+#endif // DEBUG_NON_DETERMINISTIC_ASSERT
+}
+
+SkReadBuffer::SkReadBuffer(SkStream* stream) {
+ fFlags = default_flags();
+ fVersion = 0;
+ const size_t length = stream->getLength();
+ fMemoryPtr = sk_malloc_throw(length);
+ stream->read(fMemoryPtr, length);
+ fReader.setMemory(fMemoryPtr, length);
+
+ fTFArray = nullptr;
+ fTFCount = 0;
+
+ fFactoryArray = nullptr;
+ fFactoryCount = 0;
+ fImageDeserializer = &gDefaultImageDeserializer;
+#ifdef DEBUG_NON_DETERMINISTIC_ASSERT
+ fDecodedBitmapIndex = -1;
+#endif // DEBUG_NON_DETERMINISTIC_ASSERT
+}
+
+SkReadBuffer::~SkReadBuffer() {
+ sk_free(fMemoryPtr);
+}
+
+void SkReadBuffer::setImageDeserializer(SkImageDeserializer* deserializer) {
+ fImageDeserializer = deserializer ? deserializer : &gDefaultImageDeserializer;
+}
+
+bool SkReadBuffer::readBool() {
+ return fReader.readBool();
+}
+
+SkColor SkReadBuffer::readColor() {
+ return fReader.readInt();
+}
+
+int32_t SkReadBuffer::readInt() {
+ return fReader.readInt();
+}
+
+SkScalar SkReadBuffer::readScalar() {
+ return fReader.readScalar();
+}
+
+uint32_t SkReadBuffer::readUInt() {
+ return fReader.readU32();
+}
+
+int32_t SkReadBuffer::read32() {
+ return fReader.readInt();
+}
+
+uint8_t SkReadBuffer::peekByte() {
+ SkASSERT(fReader.available() > 0);
+ return *((uint8_t*) fReader.peek());
+}
+
+void SkReadBuffer::readString(SkString* string) {
+ size_t len;
+ const char* strContents = fReader.readString(&len);
+ string->set(strContents, len);
+}
+
+void SkReadBuffer::readColor4f(SkColor4f* color) {
+ memcpy(color, fReader.skip(sizeof(SkColor4f)), sizeof(SkColor4f));
+}
+
+void SkReadBuffer::readPoint(SkPoint* point) {
+ point->fX = fReader.readScalar();
+ point->fY = fReader.readScalar();
+}
+
+void SkReadBuffer::readMatrix(SkMatrix* matrix) {
+ fReader.readMatrix(matrix);
+}
+
+void SkReadBuffer::readIRect(SkIRect* rect) {
+ memcpy(rect, fReader.skip(sizeof(SkIRect)), sizeof(SkIRect));
+}
+
+void SkReadBuffer::readRect(SkRect* rect) {
+ memcpy(rect, fReader.skip(sizeof(SkRect)), sizeof(SkRect));
+}
+
+void SkReadBuffer::readRRect(SkRRect* rrect) {
+ fReader.readRRect(rrect);
+}
+
+void SkReadBuffer::readRegion(SkRegion* region) {
+ fReader.readRegion(region);
+}
+
+void SkReadBuffer::readPath(SkPath* path) {
+ fReader.readPath(path);
+}
+
+bool SkReadBuffer::readArray(void* value, size_t size, size_t elementSize) {
+ const size_t count = this->getArrayCount();
+ if (count == size) {
+ (void)fReader.skip(sizeof(uint32_t)); // Skip array count
+ const size_t byteLength = count * elementSize;
+ memcpy(value, fReader.skip(SkAlign4(byteLength)), byteLength);
+ return true;
+ }
+ SkASSERT(false);
+ fReader.skip(fReader.available());
+ return false;
+}
+
+bool SkReadBuffer::readByteArray(void* value, size_t size) {
+ return readArray(static_cast<unsigned char*>(value), size, sizeof(unsigned char));
+}
+
+bool SkReadBuffer::readColorArray(SkColor* colors, size_t size) {
+ return readArray(colors, size, sizeof(SkColor));
+}
+
+bool SkReadBuffer::readColor4fArray(SkColor4f* colors, size_t size) {
+ return readArray(colors, size, sizeof(SkColor4f));
+}
+
+bool SkReadBuffer::readIntArray(int32_t* values, size_t size) {
+ return readArray(values, size, sizeof(int32_t));
+}
+
+bool SkReadBuffer::readPointArray(SkPoint* points, size_t size) {
+ return readArray(points, size, sizeof(SkPoint));
+}
+
+bool SkReadBuffer::readScalarArray(SkScalar* values, size_t size) {
+ return readArray(values, size, sizeof(SkScalar));
+}
+
+uint32_t SkReadBuffer::getArrayCount() {
+ return *(uint32_t*)fReader.peek();
+}
+
+sk_sp<SkImage> SkReadBuffer::readBitmapAsImage() {
+ const int width = this->readInt();
+ const int height = this->readInt();
+
+ // The writer stored a boolean value to determine whether an SkBitmapHeap was used during
+ // writing. That feature is deprecated.
+ if (this->readBool()) {
+ this->readUInt(); // Bitmap index
+ this->readUInt(); // Bitmap generation ID
+ SkErrorInternals::SetError(kParseError_SkError, "SkWriteBuffer::writeBitmap "
+ "stored the SkBitmap in an SkBitmapHeap, but "
+ "that feature is no longer supported.");
+ } else {
+ // The writer stored false, meaning the SkBitmap was not stored in an SkBitmapHeap.
+ const size_t length = this->readUInt();
+ if (length > 0) {
+#ifdef DEBUG_NON_DETERMINISTIC_ASSERT
+ fDecodedBitmapIndex++;
+#endif // DEBUG_NON_DETERMINISTIC_ASSERT
+ // A non-zero size means the SkBitmap was encoded. Read the data and pixel
+ // offset.
+ const void* data = this->skip(length);
+ const int32_t xOffset = this->readInt();
+ const int32_t yOffset = this->readInt();
+ SkIRect subset = SkIRect::MakeXYWH(xOffset, yOffset, width, height);
+ sk_sp<SkImage> image = fImageDeserializer->makeFromMemory(data, length, &subset);
+ if (image) {
+ return image;
+ }
+
+ // This bitmap was encoded when written, but we are unable to decode, possibly due to
+ // not having a decoder.
+ SkErrorInternals::SetError(kParseError_SkError,
+ "Could not decode bitmap. Resulting bitmap will be empty.");
+ // Even though we weren't able to decode the pixels, the readbuffer should still be
+ // intact, so we return true with an empty bitmap, so we don't force an abort of the
+ // larger deserialize.
+ return MakeEmptyImage(width, height);
+ } else {
+ SkBitmap bitmap;
+ if (SkBitmap::ReadRawPixels(this, &bitmap)) {
+ bitmap.setImmutable();
+ return SkImage::MakeFromBitmap(bitmap);
+ }
+ }
+ }
+ // Could not read the SkBitmap. Use a placeholder bitmap.
+ return nullptr;
+}
+
+sk_sp<SkImage> SkReadBuffer::readImage() {
+ if (fInflator) {
+ SkImage* img = fInflator->getImage(this->read32());
+ return img ? sk_ref_sp(img) : nullptr;
+ }
+
+ int width = this->read32();
+ int height = this->read32();
+ if (width <= 0 || height <= 0) { // SkImage never has a zero dimension
+ this->validate(false);
+ return nullptr;
+ }
+
+ uint32_t encoded_size = this->getArrayCount();
+ if (encoded_size == 0) {
+ // The image could not be encoded at serialization time - return an empty placeholder.
+ (void)this->readUInt(); // Swallow that encoded_size == 0 sentinel.
+ return MakeEmptyImage(width, height);
+ }
+ if (encoded_size == 1) {
+ // We had to encode the image as raw pixels via SkBitmap.
+ (void)this->readUInt(); // Swallow that encoded_size == 1 sentinel.
+ SkBitmap bm;
+ if (SkBitmap::ReadRawPixels(this, &bm)) {
+ return SkImage::MakeFromBitmap(bm);
+ }
+ return MakeEmptyImage(width, height);
+ }
+
+ // The SkImage encoded itself.
+ sk_sp<SkData> encoded(this->readByteArrayAsData());
+
+ int originX = this->read32();
+ int originY = this->read32();
+ if (originX < 0 || originY < 0) {
+ this->validate(false);
+ return nullptr;
+ }
+
+ const SkIRect subset = SkIRect::MakeXYWH(originX, originY, width, height);
+
+ sk_sp<SkImage> image = fImageDeserializer->makeFromData(encoded.get(), &subset);
+ return image ? image : MakeEmptyImage(width, height);
+}
+
+sk_sp<SkTypeface> SkReadBuffer::readTypeface() {
+ if (fInflator) {
+ return sk_ref_sp(fInflator->getTypeface(this->read32()));
+ }
+
+ uint32_t index = fReader.readU32();
+ if (0 == index || index > (unsigned)fTFCount) {
+ return nullptr;
+ } else {
+ SkASSERT(fTFArray);
+ return sk_ref_sp(fTFArray[index - 1]);
+ }
+}
+
+SkFlattenable* SkReadBuffer::readFlattenable(SkFlattenable::Type ft) {
+ //
+ // TODO: confirm that ft matches the factory we decide to use
+ //
+
+ SkFlattenable::Factory factory = nullptr;
+
+ if (fInflator) {
+ factory = fInflator->getFactory(this->read32());
+ if (!factory) {
+ return nullptr;
+ }
+ } else if (fFactoryCount > 0) {
+ int32_t index = fReader.readU32();
+ if (0 == index) {
+ return nullptr; // writer failed to give us the flattenable
+ }
+ index -= 1; // we stored the index-base-1
+ if ((unsigned)index >= (unsigned)fFactoryCount) {
+ this->validate(false);
+ return nullptr;
+ }
+ factory = fFactoryArray[index];
+ } else {
+ SkString name;
+ if (this->peekByte()) {
+ // If the first byte is non-zero, the flattenable is specified by a string.
+ this->readString(&name);
+
+ // Add the string to the dictionary.
+ fFlattenableDict.set(fFlattenableDict.count() + 1, name);
+ } else {
+ // Read the index. We are guaranteed that the first byte
+ // is zeroed, so we must shift down a byte.
+ uint32_t index = fReader.readU32() >> 8;
+ if (0 == index) {
+ return nullptr; // writer failed to give us the flattenable
+ }
+
+ SkString* namePtr = fFlattenableDict.find(index);
+ SkASSERT(namePtr);
+ name = *namePtr;
+ }
+
+ // Check if a custom Factory has been specified for this flattenable.
+ if (!(factory = this->getCustomFactory(name))) {
+ // If there is no custom Factory, check for a default.
+ if (!(factory = SkFlattenable::NameToFactory(name.c_str()))) {
+ return nullptr; // writer failed to give us the flattenable
+ }
+ }
+ }
+
+ // if we get here, factory may still be null, but if that is the case, the
+ // failure was ours, not the writer.
+ sk_sp<SkFlattenable> obj;
+ uint32_t sizeRecorded = fReader.readU32();
+ if (factory) {
+ size_t offset = fReader.offset();
+ obj = (*factory)(*this);
+ // check that we read the amount we expected
+ size_t sizeRead = fReader.offset() - offset;
+ if (sizeRecorded != sizeRead) {
+ this->validate(false);
+ return nullptr;
+ }
+ } else {
+ // we must skip the remaining data
+ fReader.skip(sizeRecorded);
+ }
+ return obj.release();
+}
diff --git a/gfx/skia/skia/src/core/SkReadBuffer.h b/gfx/skia/skia/src/core/SkReadBuffer.h
new file mode 100644
index 000000000..6b0d332e0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkReadBuffer.h
@@ -0,0 +1,275 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkReadBuffer_DEFINED
+#define SkReadBuffer_DEFINED
+
+#include "SkColorFilter.h"
+#include "SkData.h"
+#include "SkDrawLooper.h"
+#include "SkImageFilter.h"
+#include "SkMaskFilter.h"
+#include "SkPath.h"
+#include "SkPathEffect.h"
+#include "SkPicture.h"
+#include "SkRasterizer.h"
+#include "SkReadBuffer.h"
+#include "SkReader32.h"
+#include "SkRefCnt.h"
+#include "SkShader.h"
+#include "SkTHash.h"
+#include "SkWriteBuffer.h"
+#include "SkXfermode.h"
+
+class SkBitmap;
+class SkImage;
+class SkInflator;
+
+#if defined(SK_DEBUG) && defined(SK_BUILD_FOR_MAC)
+ #define DEBUG_NON_DETERMINISTIC_ASSERT
+#endif
+
+class SkReadBuffer {
+public:
+ SkReadBuffer();
+ SkReadBuffer(const void* data, size_t size);
+ SkReadBuffer(SkStream* stream);
+ virtual ~SkReadBuffer();
+
+ virtual SkReadBuffer* clone(const void* data, size_t size) const {
+ return new SkReadBuffer(data, size);
+ }
+
+ enum Version {
+ /*
+ kFilterLevelIsEnum_Version = 23,
+ kGradientFlippedFlag_Version = 24,
+ kDashWritesPhaseIntervals_Version = 25,
+ kColorShaderNoBool_Version = 26,
+ kNoUnitMappers_Version = 27,
+ kNoMoreBitmapFlatten_Version = 28,
+ kSimplifyLocalMatrix_Version = 30,
+ kImageFilterUniqueID_Version = 31,
+ kRemoveAndroidPaintOpts_Version = 32,
+ kFlattenCreateProc_Version = 33,
+ */
+ kRemoveColorTableAlpha_Version = 36,
+ kDropShadowMode_Version = 37,
+ kPictureImageFilterResolution_Version = 38,
+ kPictureImageFilterLevel_Version = 39,
+ kImageFilterNoUniqueID_Version = 40,
+ kBitmapSourceFilterQuality_Version = 41,
+ kPictureShaderHasPictureBool_Version = 42,
+ kHasDrawImageOpCodes_Version = 43,
+ kAnnotationsMovedToCanvas_Version = 44,
+ kLightingShaderWritesInvNormRotation = 45,
+ kBlurMaskFilterWritesOccluder = 47,
+ kGradientShaderFloatColor_Version = 49,
+ kXfermodeToBlendMode_Version = 50,
+ };
+
+ /**
+ * Returns true IFF the version is older than the specified version.
+ */
+ bool isVersionLT(Version targetVersion) const {
+ SkASSERT(targetVersion > 0);
+ return fVersion > 0 && fVersion < targetVersion;
+ }
+
+ uint32_t getVersion() const { return fVersion; }
+
+ /** This may be called at most once; most clients of SkReadBuffer should not mess with it. */
+ void setVersion(int version) {
+ SkASSERT(0 == fVersion || version == fVersion);
+ fVersion = version;
+ }
+
+ enum Flags {
+ kCrossProcess_Flag = 1 << 0,
+ kScalarIsFloat_Flag = 1 << 1,
+ kPtrIs64Bit_Flag = 1 << 2,
+ kValidation_Flag = 1 << 3,
+ };
+
+ void setFlags(uint32_t flags) { fFlags = flags; }
+ uint32_t getFlags() const { return fFlags; }
+
+ bool isCrossProcess() const {
+ return this->isValidating() || SkToBool(fFlags & kCrossProcess_Flag);
+ }
+ bool isScalarFloat() const { return SkToBool(fFlags & kScalarIsFloat_Flag); }
+ bool isPtr64Bit() const { return SkToBool(fFlags & kPtrIs64Bit_Flag); }
+ bool isValidating() const { return SkToBool(fFlags & kValidation_Flag); }
+
+ SkReader32* getReader32() { return &fReader; }
+
+ size_t size() { return fReader.size(); }
+ size_t offset() { return fReader.offset(); }
+ bool eof() { return fReader.eof(); }
+ virtual const void* skip(size_t size) { return fReader.skip(size); }
+
+ // primitives
+ virtual bool readBool();
+ virtual SkColor readColor();
+ virtual int32_t readInt();
+ virtual SkScalar readScalar();
+ virtual uint32_t readUInt();
+ virtual int32_t read32();
+
+ // peek
+ virtual uint8_t peekByte();
+
+ // strings -- the caller is responsible for freeing the string contents
+ virtual void readString(SkString* string);
+
+ // common data structures
+ virtual void readColor4f(SkColor4f* color);
+ virtual void readPoint(SkPoint* point);
+ SkPoint readPoint() { SkPoint p; this->readPoint(&p); return p; }
+ virtual void readMatrix(SkMatrix* matrix);
+ virtual void readIRect(SkIRect* rect);
+ virtual void readRect(SkRect* rect);
+ virtual void readRRect(SkRRect* rrect);
+ virtual void readRegion(SkRegion* region);
+
+ virtual void readPath(SkPath* path);
+ virtual void readPaint(SkPaint* paint) { paint->unflatten(*this); }
+
+ virtual SkFlattenable* readFlattenable(SkFlattenable::Type);
+ template <typename T> sk_sp<T> readFlattenable() {
+ return sk_sp<T>((T*)this->readFlattenable(T::GetFlattenableType()));
+ }
+ sk_sp<SkColorFilter> readColorFilter() { return this->readFlattenable<SkColorFilter>(); }
+ sk_sp<SkDrawLooper> readDrawLooper() { return this->readFlattenable<SkDrawLooper>(); }
+ sk_sp<SkImageFilter> readImageFilter() { return this->readFlattenable<SkImageFilter>(); }
+ sk_sp<SkMaskFilter> readMaskFilter() { return this->readFlattenable<SkMaskFilter>(); }
+ sk_sp<SkPathEffect> readPathEffect() { return this->readFlattenable<SkPathEffect>(); }
+ sk_sp<SkRasterizer> readRasterizer() { return this->readFlattenable<SkRasterizer>(); }
+ sk_sp<SkShader> readShader() { return this->readFlattenable<SkShader>(); }
+ sk_sp<SkXfermode> readXfermode() { return this->readFlattenable<SkXfermode>(); }
+
+ // binary data and arrays
+ virtual bool readByteArray(void* value, size_t size);
+ virtual bool readColorArray(SkColor* colors, size_t size);
+ virtual bool readColor4fArray(SkColor4f* colors, size_t size);
+ virtual bool readIntArray(int32_t* values, size_t size);
+ virtual bool readPointArray(SkPoint* points, size_t size);
+ virtual bool readScalarArray(SkScalar* values, size_t size);
+
+ sk_sp<SkData> readByteArrayAsData() {
+ size_t len = this->getArrayCount();
+ if (!this->validateAvailable(len)) {
+ return SkData::MakeEmpty();
+ }
+ void* buffer = sk_malloc_throw(len);
+ this->readByteArray(buffer, len);
+ return SkData::MakeFromMalloc(buffer, len);
+ }
+
+ // helpers to get info about arrays and binary data
+ virtual uint32_t getArrayCount();
+
+ sk_sp<SkImage> readBitmapAsImage();
+ sk_sp<SkImage> readImage();
+ virtual sk_sp<SkTypeface> readTypeface();
+
+ void setTypefaceArray(SkTypeface* array[], int count) {
+ fTFArray = array;
+ fTFCount = count;
+ }
+
+ /**
+ * Call this with a pre-loaded array of Factories, in the same order as
+ * were created/written by the writer. SkPicture uses this.
+ */
+ void setFactoryPlayback(SkFlattenable::Factory array[], int count) {
+ fFactoryArray = array;
+ fFactoryCount = count;
+ }
+
+ /**
+ * For an input flattenable (specified by name), set a custom factory proc
+ * to use when unflattening. Will make a copy of |name|.
+ *
+ * If the global registry already has a default factory for the flattenable,
+ * this will override that factory. If a custom factory has already been
+ * set for the flattenable, this will override that factory.
+ *
+ * Custom factories can be removed by calling setCustomFactory("...", nullptr).
+ */
+ void setCustomFactory(const SkString& name, SkFlattenable::Factory factory) {
+ fCustomFactory.set(name, factory);
+ }
+
+ // If nullptr is passed, then the default deserializer will be used
+ // which calls SkImage::MakeFromEncoded()
+ void setImageDeserializer(SkImageDeserializer* factory);
+
+ // Default impelementations don't check anything.
+ virtual bool validate(bool isValid) { return isValid; }
+ virtual bool isValid() const { return true; }
+ virtual bool validateAvailable(size_t size) { return true; }
+ bool validateIndex(int index, int count) {
+ return this->validate(index >= 0 && index < count);
+ }
+
+ SkInflator* getInflator() const { return fInflator; }
+ void setInflator(SkInflator* inf) { fInflator = inf; }
+
+// sk_sp<SkImage> inflateImage();
+
+protected:
+ /**
+ * Allows subclass to check if we are using factories for expansion
+ * of flattenables.
+ */
+ int factoryCount() { return fFactoryCount; }
+
+ /**
+ * Checks if a custom factory has been set for a given flattenable.
+ * Returns the custom factory if it exists, or nullptr otherwise.
+ */
+ SkFlattenable::Factory getCustomFactory(const SkString& name) {
+ SkFlattenable::Factory* factoryPtr = fCustomFactory.find(name);
+ return factoryPtr ? *factoryPtr : nullptr;
+ }
+
+ SkReader32 fReader;
+
+ // Only used if we do not have an fFactoryArray.
+ SkTHashMap<uint32_t, SkString> fFlattenableDict;
+
+private:
+ bool readArray(void* value, size_t size, size_t elementSize);
+
+ uint32_t fFlags;
+ int fVersion;
+
+ void* fMemoryPtr;
+
+ SkTypeface** fTFArray;
+ int fTFCount;
+
+ SkFlattenable::Factory* fFactoryArray;
+ int fFactoryCount;
+
+ // Only used if we do not have an fFactoryArray.
+ SkTHashMap<SkString, SkFlattenable::Factory> fCustomFactory;
+
+ // We do not own this ptr, we just use it (guaranteed to never be null)
+ SkImageDeserializer* fImageDeserializer;
+
+#ifdef DEBUG_NON_DETERMINISTIC_ASSERT
+ // Debugging counter to keep track of how many bitmaps we
+ // have decoded.
+ int fDecodedBitmapIndex;
+#endif // DEBUG_NON_DETERMINISTIC_ASSERT
+
+ SkInflator* fInflator = nullptr;
+};
+
+#endif // SkReadBuffer_DEFINED
diff --git a/gfx/skia/skia/src/core/SkReader32.h b/gfx/skia/skia/src/core/SkReader32.h
new file mode 100644
index 000000000..7e31fb9e2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkReader32.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkReader32_DEFINED
+#define SkReader32_DEFINED
+
+#include "SkData.h"
+#include "SkMatrix.h"
+#include "SkPath.h"
+#include "SkRegion.h"
+#include "SkRRect.h"
+#include "SkScalar.h"
+
+class SkString;
+
+class SkReader32 : SkNoncopyable {
+public:
+ SkReader32() : fCurr(nullptr), fStop(nullptr), fBase(nullptr) {}
+ SkReader32(const void* data, size_t size) {
+ this->setMemory(data, size);
+ }
+
+ void setMemory(const void* data, size_t size) {
+ SkASSERT(ptr_align_4(data));
+ SkASSERT(SkAlign4(size) == size);
+
+ fBase = fCurr = (const char*)data;
+ fStop = (const char*)data + size;
+ }
+
+ size_t size() const { return fStop - fBase; }
+ size_t offset() const { return fCurr - fBase; }
+ bool eof() const { return fCurr >= fStop; }
+ const void* base() const { return fBase; }
+ const void* peek() const { return fCurr; }
+
+ size_t available() const { return fStop - fCurr; }
+ bool isAvailable(size_t size) const { return size <= this->available(); }
+
+ void rewind() { fCurr = fBase; }
+
+ void setOffset(size_t offset) {
+ SkASSERT(SkAlign4(offset) == offset);
+ SkASSERT(offset <= this->size());
+ fCurr = fBase + offset;
+ }
+
+ bool readBool() { return this->readInt() != 0; }
+
+ int32_t readInt() {
+ SkASSERT(ptr_align_4(fCurr));
+ int32_t value = *(const int32_t*)fCurr;
+ fCurr += sizeof(value);
+ SkASSERT(fCurr <= fStop);
+ return value;
+ }
+
+ void* readPtr() {
+ void* ptr;
+ // we presume this "if" is resolved at compile-time
+ if (4 == sizeof(void*)) {
+ ptr = *(void**)fCurr;
+ } else {
+ memcpy(&ptr, fCurr, sizeof(void*));
+ }
+ fCurr += sizeof(void*);
+ return ptr;
+ }
+
+ SkScalar readScalar() {
+ SkASSERT(ptr_align_4(fCurr));
+ SkScalar value = *(const SkScalar*)fCurr;
+ fCurr += sizeof(value);
+ SkASSERT(fCurr <= fStop);
+ return value;
+ }
+
+ const void* skip(size_t size) {
+ SkASSERT(ptr_align_4(fCurr));
+ const void* addr = fCurr;
+ fCurr += SkAlign4(size);
+ SkASSERT(fCurr <= fStop);
+ return addr;
+ }
+
+ template <typename T> const T& skipT() {
+ SkASSERT(SkAlign4(sizeof(T)) == sizeof(T));
+ return *(const T*)this->skip(sizeof(T));
+ }
+
+ void read(void* dst, size_t size) {
+ SkASSERT(0 == size || dst != nullptr);
+ SkASSERT(ptr_align_4(fCurr));
+ memcpy(dst, fCurr, size);
+ fCurr += SkAlign4(size);
+ SkASSERT(fCurr <= fStop);
+ }
+
+ uint8_t readU8() { return (uint8_t)this->readInt(); }
+ uint16_t readU16() { return (uint16_t)this->readInt(); }
+ int32_t readS32() { return this->readInt(); }
+ uint32_t readU32() { return this->readInt(); }
+
+ bool readPath(SkPath* path) {
+ return this->readObjectFromMemory(path);
+ }
+
+ bool readMatrix(SkMatrix* matrix) {
+ return this->readObjectFromMemory(matrix);
+ }
+
+ bool readRRect(SkRRect* rrect) {
+ return this->readObjectFromMemory(rrect);
+ }
+
+ bool readRegion(SkRegion* rgn) {
+ return this->readObjectFromMemory(rgn);
+ }
+
+ /**
+ * Read the length of a string (written by SkWriter32::writeString) into
+ * len (if len is not nullptr) and return the null-ternimated address of the
+ * string within the reader's buffer.
+ */
+ const char* readString(size_t* len = nullptr);
+
+ /**
+ * Read the string (written by SkWriter32::writeString) and return it in
+ * copy (if copy is not null). Return the length of the string.
+ */
+ size_t readIntoString(SkString* copy);
+
+ sk_sp<SkData> readData() {
+ uint32_t byteLength = this->readU32();
+ if (0 == byteLength) {
+ return SkData::MakeEmpty();
+ }
+ return SkData::MakeWithCopy(this->skip(byteLength), byteLength);
+ }
+
+private:
+ template <typename T> bool readObjectFromMemory(T* obj) {
+ size_t size = obj->readFromMemory(this->peek(), this->available());
+ // If readFromMemory() fails (which means that available() was too small), it returns 0
+ bool success = (size > 0) && (size <= this->available()) && (SkAlign4(size) == size);
+ // In case of failure, we want to skip to the end
+ (void)this->skip(success ? size : this->available());
+ return success;
+ }
+
+ // these are always 4-byte aligned
+ const char* fCurr; // current position within buffer
+ const char* fStop; // end of buffer
+ const char* fBase; // beginning of buffer
+
+#ifdef SK_DEBUG
+ static bool ptr_align_4(const void* ptr) {
+ return (((const char*)ptr - (const char*)nullptr) & 3) == 0;
+ }
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRecord.cpp b/gfx/skia/skia/src/core/SkRecord.cpp
new file mode 100644
index 000000000..3685b2d16
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecord.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkRecord.h"
+#include <algorithm>
+
+SkRecord::~SkRecord() {
+ Destroyer destroyer;
+ for (int i = 0; i < this->count(); i++) {
+ this->mutate(i, destroyer);
+ }
+}
+
+void SkRecord::grow() {
+ SkASSERT(fCount == fReserved);
+ SkASSERT(fReserved > 0);
+ fReserved *= 2;
+ fRecords.realloc(fReserved);
+}
+
+size_t SkRecord::bytesUsed() const {
+ size_t bytes = fAlloc.approxBytesAllocated() + sizeof(SkRecord);
+ // If fReserved <= kInlineRecords, we've already accounted for fRecords with sizeof(SkRecord).
+ // When we go over that limit, they're allocated on the heap (and the inline space is wasted).
+ if (fReserved > kInlineRecords) {
+ bytes += fReserved * sizeof(Record);
+ }
+ return bytes;
+}
+
+void SkRecord::defrag() {
+ // Remove all the NoOps, preserving the order of other ops, e.g.
+ // Save, ClipRect, NoOp, DrawRect, NoOp, NoOp, Restore
+ // -> Save, ClipRect, DrawRect, Restore
+ Record* noops = std::remove_if(fRecords.get(), fRecords.get() + fCount,
+ [](Record op) { return op.type() == SkRecords::NoOp_Type; });
+ fCount = noops - fRecords.get();
+}
diff --git a/gfx/skia/skia/src/core/SkRecord.h b/gfx/skia/skia/src/core/SkRecord.h
new file mode 100644
index 000000000..79fe5232f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecord.h
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRecord_DEFINED
+#define SkRecord_DEFINED
+
+#include "SkRecords.h"
+#include "SkTLogic.h"
+#include "SkTemplates.h"
+#include "SkVarAlloc.h"
+
+// SkRecord represents a sequence of SkCanvas calls, saved for future use.
+// These future uses may include: replay, optimization, serialization, or combinations of those.
+//
+// Though an enterprising user may find calling alloc(), append(), visit(), and mutate() enough to
+// work with SkRecord, you probably want to look at SkRecorder which presents an SkCanvas interface
+// for creating an SkRecord, and SkRecordDraw which plays an SkRecord back into another SkCanvas.
+//
+// SkRecord often looks like it's compatible with any type T, but really it's compatible with any
+// type T which has a static const SkRecords::Type kType. That is to say, SkRecord is compatible
+// only with SkRecords::* structs defined in SkRecords.h. Your compiler will helpfully yell if you
+// get this wrong.
+
+class SkRecord : public SkNVRefCnt<SkRecord> {
+ enum {
+ // TODO: tune these two constants.
+ kInlineRecords = 4, // Ideally our lower limit on recorded ops per picture.
+ kInlineAllocLgBytes = 8, // 1<<8 == 256 bytes inline, then SkVarAlloc starting at 512 bytes.
+ };
+public:
+ SkRecord()
+ : fCount(0)
+ , fReserved(kInlineRecords)
+ , fAlloc(kInlineAllocLgBytes+1, // First malloc'd block is 2x as large as fInlineAlloc.
+ fInlineAlloc, sizeof(fInlineAlloc)) {}
+ ~SkRecord();
+
+ // Returns the number of canvas commands in this SkRecord.
+ int count() const { return fCount; }
+
+ // Visit the i-th canvas command with a functor matching this interface:
+ // template <typename T>
+ // R operator()(const T& record) { ... }
+ // This operator() must be defined for at least all SkRecords::*.
+ template <typename F>
+ auto visit(int i, F&& f) const -> decltype(f(SkRecords::NoOp())) {
+ return fRecords[i].visit(f);
+ }
+
+ // Mutate the i-th canvas command with a functor matching this interface:
+ // template <typename T>
+ // R operator()(T* record) { ... }
+ // This operator() must be defined for at least all SkRecords::*.
+ template <typename F>
+ auto mutate(int i, F&& f) -> decltype(f((SkRecords::NoOp*)nullptr)) {
+ return fRecords[i].mutate(f);
+ }
+
+ // Allocate contiguous space for count Ts, to be freed when the SkRecord is destroyed.
+ // Here T can be any class, not just those from SkRecords. Throws on failure.
+ template <typename T>
+ T* alloc(size_t count = 1) {
+ return (T*)fAlloc.alloc(sizeof(T) * count);
+ }
+
+ // Add a new command of type T to the end of this SkRecord.
+ // You are expected to placement new an object of type T onto this pointer.
+ template <typename T>
+ T* append() {
+ if (fCount == fReserved) {
+ this->grow();
+ }
+ return fRecords[fCount++].set(this->allocCommand<T>());
+ }
+
+ // Replace the i-th command with a new command of type T.
+ // You are expected to placement new an object of type T onto this pointer.
+ // References to the original command are invalidated.
+ template <typename T>
+ T* replace(int i) {
+ SkASSERT(i < this->count());
+
+ Destroyer destroyer;
+ this->mutate(i, destroyer);
+
+ return fRecords[i].set(this->allocCommand<T>());
+ }
+
+ // Replace the i-th command with a new command of type T.
+ // You are expected to placement new an object of type T onto this pointer.
+ // You must show proof that you've already adopted the existing command.
+ template <typename T, typename Existing>
+ T* replace(int i, const SkRecords::Adopted<Existing>& proofOfAdoption) {
+ SkASSERT(i < this->count());
+
+ SkASSERT(Existing::kType == fRecords[i].type());
+ SkASSERT(proofOfAdoption == fRecords[i].ptr());
+
+ return fRecords[i].set(this->allocCommand<T>());
+ }
+
+ // Does not return the bytes in any pointers embedded in the Records; callers
+ // need to iterate with a visitor to measure those they care for.
+ size_t bytesUsed() const;
+
+ // Rearrange and resize this record to eliminate any NoOps.
+ // May change count() and the indices of ops, but preserves their order.
+ void defrag();
+
+private:
+ // An SkRecord is structured as an array of pointers into a big chunk of memory where
+ // records representing each canvas draw call are stored:
+ //
+ // fRecords: [*][*][*]...
+ // | | |
+ // | | |
+ // | | +---------------------------------------+
+ // | +-----------------+ |
+ // | | |
+ // v v v
+ // fAlloc: [SkRecords::DrawRect][SkRecords::DrawPosTextH][SkRecords::DrawRect]...
+ //
+ // We store the types of each of the pointers alongside the pointer.
+ // The cost to append a T to this structure is 8 + sizeof(T) bytes.
+
+ // A mutator that can be used with replace to destroy canvas commands.
+ struct Destroyer {
+ template <typename T>
+ void operator()(T* record) { record->~T(); }
+ };
+
+ template <typename T>
+ SK_WHEN(std::is_empty<T>::value, T*) allocCommand() {
+ static T singleton = {};
+ return &singleton;
+ }
+
+ template <typename T>
+ SK_WHEN(!std::is_empty<T>::value, T*) allocCommand() { return this->alloc<T>(); }
+
+ void grow();
+
+ // A typed pointer to some bytes in fAlloc. visit() and mutate() allow polymorphic dispatch.
+ struct Record {
+ // On 32-bit machines we store type in 4 bytes, followed by a pointer. Simple.
+ // On 64-bit machines we store a pointer with the type slotted into two top (unused) bytes.
+ // FWIW, SkRecords::Type is tiny. It can easily fit in one byte.
+ uint64_t fTypeAndPtr;
+ static const int kTypeShift = sizeof(void*) == 4 ? 32 : 48;
+
+ // Point this record to its data in fAlloc. Returns ptr for convenience.
+ template <typename T>
+ T* set(T* ptr) {
+ fTypeAndPtr = ((uint64_t)T::kType) << kTypeShift | (uintptr_t)ptr;
+ SkASSERT(this->ptr() == ptr && this->type() == T::kType);
+ return ptr;
+ }
+
+ SkRecords::Type type() const { return (SkRecords::Type)(fTypeAndPtr >> kTypeShift); }
+ void* ptr() const { return (void*)(fTypeAndPtr & ((1ull<<kTypeShift)-1)); }
+
+ // Visit this record with functor F (see public API above).
+ template <typename F>
+ auto visit(F&& f) const -> decltype(f(SkRecords::NoOp())) {
+ #define CASE(T) case SkRecords::T##_Type: return f(*(const SkRecords::T*)this->ptr());
+ switch(this->type()) { SK_RECORD_TYPES(CASE) }
+ #undef CASE
+ SkDEBUGFAIL("Unreachable");
+ return f(SkRecords::NoOp());
+ }
+
+ // Mutate this record with functor F (see public API above).
+ template <typename F>
+ auto mutate(F&& f) -> decltype(f((SkRecords::NoOp*)nullptr)) {
+ #define CASE(T) case SkRecords::T##_Type: return f((SkRecords::T*)this->ptr());
+ switch(this->type()) { SK_RECORD_TYPES(CASE) }
+ #undef CASE
+ SkDEBUGFAIL("Unreachable");
+ return f((SkRecords::NoOp*)nullptr);
+ }
+ };
+
+ // fRecords needs to be a data structure that can append fixed length data, and need to
+ // support efficient random access and forward iteration. (It doesn't need to be contiguous.)
+ int fCount, fReserved;
+ SkAutoSTMalloc<kInlineRecords, Record> fRecords;
+
+ // fAlloc needs to be a data structure which can append variable length data in contiguous
+ // chunks, returning a stable handle to that data for later retrieval.
+ SkVarAlloc fAlloc;
+ char fInlineAlloc[1 << kInlineAllocLgBytes];
+};
+
+#endif//SkRecord_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRecordDraw.cpp b/gfx/skia/skia/src/core/SkRecordDraw.cpp
new file mode 100644
index 000000000..dca19df10
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordDraw.cpp
@@ -0,0 +1,633 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkRecordDraw.h"
+#include "SkPatchUtils.h"
+
+void SkRecordDraw(const SkRecord& record,
+ SkCanvas* canvas,
+ SkPicture const* const drawablePicts[],
+ SkDrawable* const drawables[],
+ int drawableCount,
+ const SkBBoxHierarchy* bbh,
+ SkPicture::AbortCallback* callback) {
+ SkAutoCanvasRestore saveRestore(canvas, true /*save now, restore at exit*/);
+
+ if (bbh) {
+ // Draw only ops that affect pixels in the canvas's current clip.
+ // The SkRecord and BBH were recorded in identity space. This canvas
+ // is not necessarily in that same space. getClipBounds() returns us
+ // this canvas' clip bounds transformed back into identity space, which
+ // lets us query the BBH.
+ SkRect query;
+ if (!canvas->getClipBounds(&query)) {
+ query.setEmpty();
+ }
+
+ SkTDArray<int> ops;
+ bbh->search(query, &ops);
+
+ SkRecords::Draw draw(canvas, drawablePicts, drawables, drawableCount);
+ for (int i = 0; i < ops.count(); i++) {
+ if (callback && callback->abort()) {
+ return;
+ }
+ // This visit call uses the SkRecords::Draw::operator() to call
+ // methods on the |canvas|, wrapped by methods defined with the
+ // DRAW() macro.
+ record.visit(ops[i], draw);
+ }
+ } else {
+ // Draw all ops.
+ SkRecords::Draw draw(canvas, drawablePicts, drawables, drawableCount);
+ for (int i = 0; i < record.count(); i++) {
+ if (callback && callback->abort()) {
+ return;
+ }
+ // This visit call uses the SkRecords::Draw::operator() to call
+ // methods on the |canvas|, wrapped by methods defined with the
+ // DRAW() macro.
+ record.visit(i, draw);
+ }
+ }
+}
+
+void SkRecordPartialDraw(const SkRecord& record, SkCanvas* canvas,
+ SkPicture const* const drawablePicts[], int drawableCount,
+ int start, int stop,
+ const SkMatrix& initialCTM) {
+ SkAutoCanvasRestore saveRestore(canvas, true /*save now, restore at exit*/);
+
+ stop = SkTMin(stop, record.count());
+ SkRecords::Draw draw(canvas, drawablePicts, nullptr, drawableCount, &initialCTM);
+ for (int i = start; i < stop; i++) {
+ record.visit(i, draw);
+ }
+}
+
+namespace SkRecords {
+
+// NoOps draw nothing.
+template <> void Draw::draw(const NoOp&) {}
+
+#define DRAW(T, call) template <> void Draw::draw(const T& r) { fCanvas->call; }
+DRAW(Restore, restore());
+DRAW(Save, save());
+DRAW(SaveLayer, saveLayer(SkCanvas::SaveLayerRec(r.bounds,
+ r.paint,
+ r.backdrop.get(),
+ r.saveLayerFlags)));
+DRAW(SetMatrix, setMatrix(SkMatrix::Concat(fInitialCTM, r.matrix)));
+DRAW(Concat, concat(r.matrix));
+DRAW(Translate, translate(r.dx, r.dy));
+
+DRAW(ClipPath, clipPath(r.path, r.opAA.op, r.opAA.aa));
+DRAW(ClipRRect, clipRRect(r.rrect, r.opAA.op, r.opAA.aa));
+DRAW(ClipRect, clipRect(r.rect, r.opAA.op, r.opAA.aa));
+DRAW(ClipRegion, clipRegion(r.region, r.op));
+
+#ifdef SK_EXPERIMENTAL_SHADOWING
+DRAW(TranslateZ, SkCanvas::translateZ(r.z));
+#else
+template <> void Draw::draw(const TranslateZ& r) { }
+#endif
+
+DRAW(DrawArc, drawArc(r.oval, r.startAngle, r.sweepAngle, r.useCenter, r.paint));
+DRAW(DrawDRRect, drawDRRect(r.outer, r.inner, r.paint));
+DRAW(DrawImage, drawImage(r.image.get(), r.left, r.top, r.paint));
+
+template <> void Draw::draw(const DrawImageLattice& r) {
+ SkCanvas::Lattice lattice;
+ lattice.fXCount = r.xCount;
+ lattice.fXDivs = r.xDivs;
+ lattice.fYCount = r.yCount;
+ lattice.fYDivs = r.yDivs;
+ lattice.fFlags = (0 == r.flagCount) ? nullptr : r.flags;
+ lattice.fBounds = &r.src;
+ fCanvas->drawImageLattice(r.image.get(), lattice, r.dst, r.paint);
+}
+
+DRAW(DrawImageRect, legacy_drawImageRect(r.image.get(), r.src, r.dst, r.paint, r.constraint));
+DRAW(DrawImageNine, drawImageNine(r.image.get(), r.center, r.dst, r.paint));
+DRAW(DrawOval, drawOval(r.oval, r.paint));
+DRAW(DrawPaint, drawPaint(r.paint));
+DRAW(DrawPath, drawPath(r.path, r.paint));
+DRAW(DrawPatch, drawPatch(r.cubics, r.colors, r.texCoords, r.xmode, r.paint));
+DRAW(DrawPicture, drawPicture(r.picture.get(), &r.matrix, r.paint));
+
+#ifdef SK_EXPERIMENTAL_SHADOWING
+DRAW(DrawShadowedPicture, drawShadowedPicture(r.picture.get(), &r.matrix, r.paint, r.params));
+#else
+template <> void Draw::draw(const DrawShadowedPicture& r) { }
+#endif
+
+DRAW(DrawPoints, drawPoints(r.mode, r.count, r.pts, r.paint));
+DRAW(DrawPosText, drawPosText(r.text, r.byteLength, r.pos, r.paint));
+DRAW(DrawPosTextH, drawPosTextH(r.text, r.byteLength, r.xpos, r.y, r.paint));
+DRAW(DrawRRect, drawRRect(r.rrect, r.paint));
+DRAW(DrawRect, drawRect(r.rect, r.paint));
+DRAW(DrawRegion, drawRegion(r.region, r.paint));
+DRAW(DrawText, drawText(r.text, r.byteLength, r.x, r.y, r.paint));
+DRAW(DrawTextBlob, drawTextBlob(r.blob.get(), r.x, r.y, r.paint));
+DRAW(DrawTextOnPath, drawTextOnPath(r.text, r.byteLength, r.path, &r.matrix, r.paint));
+DRAW(DrawTextRSXform, drawTextRSXform(r.text, r.byteLength, r.xforms, r.cull, r.paint));
+DRAW(DrawAtlas, drawAtlas(r.atlas.get(),
+ r.xforms, r.texs, r.colors, r.count, r.mode, r.cull, r.paint));
+DRAW(DrawVertices, drawVertices(r.vmode, r.vertexCount, r.vertices, r.texs, r.colors,
+ r.xmode, r.indices, r.indexCount, r.paint));
+DRAW(DrawAnnotation, drawAnnotation(r.rect, r.key.c_str(), r.value.get()));
+#undef DRAW
+
+template <> void Draw::draw(const DrawDrawable& r) {
+ SkASSERT(r.index >= 0);
+ SkASSERT(r.index < fDrawableCount);
+ if (fDrawables) {
+ SkASSERT(nullptr == fDrawablePicts);
+ fCanvas->drawDrawable(fDrawables[r.index], r.matrix);
+ } else {
+ fCanvas->drawPicture(fDrawablePicts[r.index], r.matrix, nullptr);
+ }
+}
+
+// This is an SkRecord visitor that fills an SkBBoxHierarchy.
+//
+// The interesting part here is how to calculate bounds for ops which don't
+// have intrinsic bounds. What is the bounds of a Save or a Translate?
+//
+// We answer this by thinking about a particular definition of bounds: if I
+// don't execute this op, pixels in this rectangle might draw incorrectly. So
+// the bounds of a Save, a Translate, a Restore, etc. are the union of the
+// bounds of Draw* ops that they might have an effect on. For any given
+// Save/Restore block, the bounds of the Save, the Restore, and any other
+// non-drawing ("control") ops inside are exactly the union of the bounds of
+// the drawing ops inside that block.
+//
+// To implement this, we keep a stack of active Save blocks. As we consume ops
+// inside the Save/Restore block, drawing ops are unioned with the bounds of
+// the block, and control ops are stashed away for later. When we finish the
+// block with a Restore, our bounds are complete, and we go back and fill them
+// in for all the control ops we stashed away.
+class FillBounds : SkNoncopyable {
+public:
+ FillBounds(const SkRect& cullRect, const SkRecord& record, SkRect bounds[])
+ : fNumRecords(record.count())
+ , fCullRect(cullRect)
+ , fBounds(bounds) {
+ fCTM = SkMatrix::I();
+ fCurrentClipBounds = fCullRect;
+ }
+
+ void cleanUp() {
+ // If we have any lingering unpaired Saves, simulate restores to make
+ // sure all ops in those Save blocks have their bounds calculated.
+ while (!fSaveStack.isEmpty()) {
+ this->popSaveBlock();
+ }
+
+ // Any control ops not part of any Save/Restore block draw everywhere.
+ while (!fControlIndices.isEmpty()) {
+ this->popControl(fCullRect);
+ }
+ }
+
+ void setCurrentOp(int currentOp) { fCurrentOp = currentOp; }
+
+
+ template <typename T> void operator()(const T& op) {
+ this->updateCTM(op);
+ this->updateClipBounds(op);
+ this->trackBounds(op);
+ }
+
+ // In this file, SkRect are in local coordinates, Bounds are translated back to identity space.
+ typedef SkRect Bounds;
+
+ int currentOp() const { return fCurrentOp; }
+ const SkMatrix& ctm() const { return fCTM; }
+ const Bounds& getBounds(int index) const { return fBounds[index]; }
+
+ // Adjust rect for all paints that may affect its geometry, then map it to identity space.
+ Bounds adjustAndMap(SkRect rect, const SkPaint* paint) const {
+ // Inverted rectangles really confuse our BBHs.
+ rect.sort();
+
+ // Adjust the rect for its own paint.
+ if (!AdjustForPaint(paint, &rect)) {
+ // The paint could do anything to our bounds. The only safe answer is the current clip.
+ return fCurrentClipBounds;
+ }
+
+ // Adjust rect for all the paints from the SaveLayers we're inside.
+ if (!this->adjustForSaveLayerPaints(&rect)) {
+ // Same deal as above.
+ return fCurrentClipBounds;
+ }
+
+ // Map the rect back to identity space.
+ fCTM.mapRect(&rect);
+
+ // Nothing can draw outside the current clip.
+ if (!rect.intersect(fCurrentClipBounds)) {
+ return Bounds::MakeEmpty();
+ }
+
+ return rect;
+ }
+
+private:
+ struct SaveBounds {
+ int controlOps; // Number of control ops in this Save block, including the Save.
+ Bounds bounds; // Bounds of everything in the block.
+ const SkPaint* paint; // Unowned. If set, adjusts the bounds of all ops in this block.
+ SkMatrix ctm;
+ };
+
+ // Only Restore, SetMatrix, Concat, and Translate change the CTM.
+ template <typename T> void updateCTM(const T&) {}
+ void updateCTM(const Restore& op) { fCTM = op.matrix; }
+ void updateCTM(const SetMatrix& op) { fCTM = op.matrix; }
+ void updateCTM(const Concat& op) { fCTM.preConcat(op.matrix); }
+ void updateCTM(const Translate& op) { fCTM.preTranslate(op.dx, op.dy); }
+
+ // Most ops don't change the clip.
+ template <typename T> void updateClipBounds(const T&) {}
+
+ // Clip{Path,RRect,Rect,Region} obviously change the clip. They all know their bounds already.
+ void updateClipBounds(const ClipPath& op) { this->updateClipBoundsForClipOp(op.devBounds); }
+ void updateClipBounds(const ClipRRect& op) { this->updateClipBoundsForClipOp(op.devBounds); }
+ void updateClipBounds(const ClipRect& op) { this->updateClipBoundsForClipOp(op.devBounds); }
+ void updateClipBounds(const ClipRegion& op) { this->updateClipBoundsForClipOp(op.devBounds); }
+
+ // The bounds of clip ops need to be adjusted for the paints of saveLayers they're inside.
+ void updateClipBoundsForClipOp(const SkIRect& devBounds) {
+ Bounds clip = SkRect::Make(devBounds);
+ // We don't call adjustAndMap() because as its last step it would intersect the adjusted
+ // clip bounds with the previous clip, exactly what we can't do when the clip grows.
+ if (this->adjustForSaveLayerPaints(&clip)) {
+ fCurrentClipBounds = clip.intersect(fCullRect) ? clip : Bounds::MakeEmpty();
+ } else {
+ fCurrentClipBounds = fCullRect;
+ }
+ }
+
+ // Restore holds the devBounds for the clip after the {save,saveLayer}/restore block completes.
+ void updateClipBounds(const Restore& op) {
+ // This is just like the clip ops above, but we need to skip the effects (if any) of our
+ // paired saveLayer (if it is one); it has not yet been popped off the save stack. Our
+ // devBounds reflect the state of the world after the saveLayer/restore block is done,
+ // so they are not affected by the saveLayer's paint.
+ const int kSavesToIgnore = 1;
+ Bounds clip = SkRect::Make(op.devBounds);
+ if (this->adjustForSaveLayerPaints(&clip, kSavesToIgnore)) {
+ fCurrentClipBounds = clip.intersect(fCullRect) ? clip : Bounds::MakeEmpty();
+ } else {
+ fCurrentClipBounds = fCullRect;
+ }
+ }
+
+ // We also take advantage of SaveLayer bounds when present to further cut the clip down.
+ void updateClipBounds(const SaveLayer& op) {
+ if (op.bounds) {
+ // adjustAndMap() intersects these layer bounds with the previous clip for us.
+ fCurrentClipBounds = this->adjustAndMap(*op.bounds, op.paint);
+ }
+ }
+
+ // The bounds of these ops must be calculated when we hit the Restore
+ // from the bounds of the ops in the same Save block.
+ void trackBounds(const Save&) { this->pushSaveBlock(nullptr); }
+ void trackBounds(const SaveLayer& op) { this->pushSaveBlock(op.paint); }
+ void trackBounds(const Restore&) { fBounds[fCurrentOp] = this->popSaveBlock(); }
+
+ void trackBounds(const SetMatrix&) { this->pushControl(); }
+ void trackBounds(const Concat&) { this->pushControl(); }
+ void trackBounds(const Translate&) { this->pushControl(); }
+ void trackBounds(const TranslateZ&) { this->pushControl(); }
+ void trackBounds(const ClipRect&) { this->pushControl(); }
+ void trackBounds(const ClipRRect&) { this->pushControl(); }
+ void trackBounds(const ClipPath&) { this->pushControl(); }
+ void trackBounds(const ClipRegion&) { this->pushControl(); }
+
+
+ // For all other ops, we can calculate and store the bounds directly now.
+ template <typename T> void trackBounds(const T& op) {
+ fBounds[fCurrentOp] = this->bounds(op);
+ this->updateSaveBounds(fBounds[fCurrentOp]);
+ }
+
+ void pushSaveBlock(const SkPaint* paint) {
+ // Starting a new Save block. Push a new entry to represent that.
+ SaveBounds sb;
+ sb.controlOps = 0;
+ // If the paint affects transparent black, the bound shouldn't be smaller
+ // than the current clip bounds.
+ sb.bounds =
+ PaintMayAffectTransparentBlack(paint) ? fCurrentClipBounds : Bounds::MakeEmpty();
+ sb.paint = paint;
+ sb.ctm = this->fCTM;
+
+ fSaveStack.push(sb);
+ this->pushControl();
+ }
+
+ static bool PaintMayAffectTransparentBlack(const SkPaint* paint) {
+ if (paint) {
+ // FIXME: this is very conservative
+ if (paint->getImageFilter() || paint->getColorFilter()) {
+ return true;
+ }
+
+ // Unusual blendmodes require us to process a saved layer
+ // even with operations outisde the clip.
+ // For example, DstIn is used by masking layers.
+ // https://code.google.com/p/skia/issues/detail?id=1291
+ // https://crbug.com/401593
+ switch (paint->getBlendMode()) {
+ // For each of the following transfer modes, if the source
+ // alpha is zero (our transparent black), the resulting
+ // blended alpha is not necessarily equal to the original
+ // destination alpha.
+ case SkBlendMode::kClear:
+ case SkBlendMode::kSrc:
+ case SkBlendMode::kSrcIn:
+ case SkBlendMode::kDstIn:
+ case SkBlendMode::kSrcOut:
+ case SkBlendMode::kDstATop:
+ case SkBlendMode::kModulate:
+ return true;
+ break;
+ default:
+ break;
+ }
+ }
+ return false;
+ }
+
+ Bounds popSaveBlock() {
+ // We're done the Save block. Apply the block's bounds to all control ops inside it.
+ SaveBounds sb;
+ fSaveStack.pop(&sb);
+
+ while (sb.controlOps --> 0) {
+ this->popControl(sb.bounds);
+ }
+
+ // This whole Save block may be part another Save block.
+ this->updateSaveBounds(sb.bounds);
+
+ // If called from a real Restore (not a phony one for balance), it'll need the bounds.
+ return sb.bounds;
+ }
+
+ void pushControl() {
+ fControlIndices.push(fCurrentOp);
+ if (!fSaveStack.isEmpty()) {
+ fSaveStack.top().controlOps++;
+ }
+ }
+
+ void popControl(const Bounds& bounds) {
+ fBounds[fControlIndices.top()] = bounds;
+ fControlIndices.pop();
+ }
+
+ void updateSaveBounds(const Bounds& bounds) {
+ // If we're in a Save block, expand its bounds to cover these bounds too.
+ if (!fSaveStack.isEmpty()) {
+ fSaveStack.top().bounds.join(bounds);
+ }
+ }
+
+ // FIXME: this method could use better bounds
+ Bounds bounds(const DrawText&) const { return fCurrentClipBounds; }
+
+ Bounds bounds(const DrawPaint&) const { return fCurrentClipBounds; }
+ Bounds bounds(const NoOp&) const { return Bounds::MakeEmpty(); } // NoOps don't draw.
+
+ Bounds bounds(const DrawRect& op) const { return this->adjustAndMap(op.rect, &op.paint); }
+ Bounds bounds(const DrawRegion& op) const {
+ SkRect rect = SkRect::Make(op.region.getBounds());
+ return this->adjustAndMap(rect, &op.paint);
+ }
+ Bounds bounds(const DrawOval& op) const { return this->adjustAndMap(op.oval, &op.paint); }
+ // Tighter arc bounds?
+ Bounds bounds(const DrawArc& op) const { return this->adjustAndMap(op.oval, &op.paint); }
+ Bounds bounds(const DrawRRect& op) const {
+ return this->adjustAndMap(op.rrect.rect(), &op.paint);
+ }
+ Bounds bounds(const DrawDRRect& op) const {
+ return this->adjustAndMap(op.outer.rect(), &op.paint);
+ }
+ Bounds bounds(const DrawImage& op) const {
+ const SkImage* image = op.image.get();
+ SkRect rect = SkRect::MakeXYWH(op.left, op.top, image->width(), image->height());
+
+ return this->adjustAndMap(rect, op.paint);
+ }
+ Bounds bounds(const DrawImageLattice& op) const {
+ return this->adjustAndMap(op.dst, op.paint);
+ }
+ Bounds bounds(const DrawImageRect& op) const {
+ return this->adjustAndMap(op.dst, op.paint);
+ }
+ Bounds bounds(const DrawImageNine& op) const {
+ return this->adjustAndMap(op.dst, op.paint);
+ }
+ Bounds bounds(const DrawPath& op) const {
+ return op.path.isInverseFillType() ? fCurrentClipBounds
+ : this->adjustAndMap(op.path.getBounds(), &op.paint);
+ }
+ Bounds bounds(const DrawPoints& op) const {
+ SkRect dst;
+ dst.set(op.pts, op.count);
+
+ // Pad the bounding box a little to make sure hairline points' bounds aren't empty.
+ SkScalar stroke = SkMaxScalar(op.paint.getStrokeWidth(), 0.01f);
+ dst.outset(stroke/2, stroke/2);
+
+ return this->adjustAndMap(dst, &op.paint);
+ }
+ Bounds bounds(const DrawPatch& op) const {
+ SkRect dst;
+ dst.set(op.cubics, SkPatchUtils::kNumCtrlPts);
+ return this->adjustAndMap(dst, &op.paint);
+ }
+ Bounds bounds(const DrawVertices& op) const {
+ SkRect dst;
+ dst.set(op.vertices, op.vertexCount);
+ return this->adjustAndMap(dst, &op.paint);
+ }
+
+ Bounds bounds(const DrawAtlas& op) const {
+ if (op.cull) {
+ // TODO: <reed> can we pass nullptr for the paint? Isn't cull already "correct"
+ // for the paint (by the caller)?
+ return this->adjustAndMap(*op.cull, op.paint);
+ } else {
+ return fCurrentClipBounds;
+ }
+ }
+
+ Bounds bounds(const DrawPicture& op) const {
+ SkRect dst = op.picture->cullRect();
+ op.matrix.mapRect(&dst);
+ return this->adjustAndMap(dst, op.paint);
+ }
+
+ Bounds bounds(const DrawShadowedPicture& op) const {
+ SkRect dst = op.picture->cullRect();
+ op.matrix.mapRect(&dst);
+ return this->adjustAndMap(dst, op.paint);
+ }
+
+ Bounds bounds(const DrawPosText& op) const {
+ const int N = op.paint.countText(op.text, op.byteLength);
+ if (N == 0) {
+ return Bounds::MakeEmpty();
+ }
+
+ SkRect dst;
+ dst.set(op.pos, N);
+ AdjustTextForFontMetrics(&dst, op.paint);
+ return this->adjustAndMap(dst, &op.paint);
+ }
+ Bounds bounds(const DrawPosTextH& op) const {
+ const int N = op.paint.countText(op.text, op.byteLength);
+ if (N == 0) {
+ return Bounds::MakeEmpty();
+ }
+
+ SkScalar left = op.xpos[0], right = op.xpos[0];
+ for (int i = 1; i < N; i++) {
+ left = SkMinScalar(left, op.xpos[i]);
+ right = SkMaxScalar(right, op.xpos[i]);
+ }
+ SkRect dst = { left, op.y, right, op.y };
+ AdjustTextForFontMetrics(&dst, op.paint);
+ return this->adjustAndMap(dst, &op.paint);
+ }
+ Bounds bounds(const DrawTextOnPath& op) const {
+ SkRect dst = op.path.getBounds();
+
+ // Pad all sides by the maximum padding in any direction we'd normally apply.
+ SkRect pad = { 0, 0, 0, 0};
+ AdjustTextForFontMetrics(&pad, op.paint);
+
+ // That maximum padding happens to always be the right pad today.
+ SkASSERT(pad.fLeft == -pad.fRight);
+ SkASSERT(pad.fTop == -pad.fBottom);
+ SkASSERT(pad.fRight > pad.fBottom);
+ dst.outset(pad.fRight, pad.fRight);
+
+ return this->adjustAndMap(dst, &op.paint);
+ }
+
+ Bounds bounds(const DrawTextRSXform& op) const {
+ if (op.cull) {
+ return this->adjustAndMap(*op.cull, nullptr);
+ } else {
+ return fCurrentClipBounds;
+ }
+ }
+
+ Bounds bounds(const DrawTextBlob& op) const {
+ SkRect dst = op.blob->bounds();
+ dst.offset(op.x, op.y);
+ return this->adjustAndMap(dst, &op.paint);
+ }
+
+ Bounds bounds(const DrawDrawable& op) const {
+ return this->adjustAndMap(op.worstCaseBounds, nullptr);
+ }
+
+ Bounds bounds(const DrawAnnotation& op) const {
+ return this->adjustAndMap(op.rect, nullptr);
+ }
+
+ static void AdjustTextForFontMetrics(SkRect* rect, const SkPaint& paint) {
+#ifdef SK_DEBUG
+ SkRect correct = *rect;
+#endif
+ // crbug.com/373785 ~~> xPad = 4x yPad
+ // crbug.com/424824 ~~> bump yPad from 2x text size to 2.5x
+ const SkScalar yPad = 2.5f * paint.getTextSize(),
+ xPad = 4.0f * yPad;
+ rect->outset(xPad, yPad);
+#ifdef SK_DEBUG
+ SkPaint::FontMetrics metrics;
+ paint.getFontMetrics(&metrics);
+ correct.fLeft += metrics.fXMin;
+ correct.fTop += metrics.fTop;
+ correct.fRight += metrics.fXMax;
+ correct.fBottom += metrics.fBottom;
+ // See skia:2862 for why we ignore small text sizes.
+ SkASSERTF(paint.getTextSize() < 0.001f || rect->contains(correct),
+ "%f %f %f %f vs. %f %f %f %f\n",
+ -xPad, -yPad, +xPad, +yPad,
+ metrics.fXMin, metrics.fTop, metrics.fXMax, metrics.fBottom);
+#endif
+ }
+
+ // Returns true if rect was meaningfully adjusted for the effects of paint,
+ // false if the paint could affect the rect in unknown ways.
+ static bool AdjustForPaint(const SkPaint* paint, SkRect* rect) {
+ if (paint) {
+ if (paint->canComputeFastBounds()) {
+ *rect = paint->computeFastBounds(*rect, rect);
+ return true;
+ }
+ return false;
+ }
+ return true;
+ }
+
+ bool adjustForSaveLayerPaints(SkRect* rect, int savesToIgnore = 0) const {
+ for (int i = fSaveStack.count() - 1 - savesToIgnore; i >= 0; i--) {
+ SkMatrix inverse;
+ if (!fSaveStack[i].ctm.invert(&inverse)) {
+ return false;
+ }
+ inverse.mapRect(rect);
+ if (!AdjustForPaint(fSaveStack[i].paint, rect)) {
+ return false;
+ }
+ fSaveStack[i].ctm.mapRect(rect);
+ }
+ return true;
+ }
+
+ const int fNumRecords;
+
+ // We do not guarantee anything for operations outside of the cull rect
+ const SkRect fCullRect;
+
+ // Conservative identity-space bounds for each op in the SkRecord.
+ Bounds* fBounds;
+
+ // We walk fCurrentOp through the SkRecord, as we go using updateCTM()
+ // and updateClipBounds() to maintain the exact CTM (fCTM) and conservative
+ // identity-space bounds of the current clip (fCurrentClipBounds).
+ int fCurrentOp;
+ SkMatrix fCTM;
+ Bounds fCurrentClipBounds;
+
+ // Used to track the bounds of Save/Restore blocks and the control ops inside them.
+ SkTDArray<SaveBounds> fSaveStack;
+ SkTDArray<int> fControlIndices;
+};
+
+} // namespace SkRecords
+
+void SkRecordFillBounds(const SkRect& cullRect, const SkRecord& record, SkRect bounds[]) {
+ SkRecords::FillBounds visitor(cullRect, record, bounds);
+ for (int curOp = 0; curOp < record.count(); curOp++) {
+ visitor.setCurrentOp(curOp);
+ record.visit(curOp, visitor);
+ }
+ visitor.cleanUp();
+}
+
diff --git a/gfx/skia/skia/src/core/SkRecordDraw.h b/gfx/skia/skia/src/core/SkRecordDraw.h
new file mode 100644
index 000000000..fdf98824a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordDraw.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRecordDraw_DEFINED
+#define SkRecordDraw_DEFINED
+
+#include "SkBBoxHierarchy.h"
+#include "SkBigPicture.h"
+#include "SkCanvas.h"
+#include "SkMatrix.h"
+#include "SkRecord.h"
+
+class SkDrawable;
+class SkLayerInfo;
+
+// Calculate conservative identity space bounds for each op in the record.
+void SkRecordFillBounds(const SkRect& cullRect, const SkRecord&, SkRect bounds[]);
+
+// SkRecordFillBounds(), and gathers information about saveLayers and stores it for later
+// use (e.g., layer hoisting). The gathered information is sufficient to determine
+// where each saveLayer will land and which ops in the picture it represents.
+void SkRecordComputeLayers(const SkRect& cullRect, const SkRecord&, SkRect bounds[],
+ const SkBigPicture::SnapshotArray*, SkLayerInfo* data);
+
+// Draw an SkRecord into an SkCanvas. A convenience wrapper around SkRecords::Draw.
+void SkRecordDraw(const SkRecord&, SkCanvas*, SkPicture const* const drawablePicts[],
+ SkDrawable* const drawables[], int drawableCount,
+ const SkBBoxHierarchy*, SkPicture::AbortCallback*);
+
+// Draw a portion of an SkRecord into an SkCanvas.
+// When drawing a portion of an SkRecord the CTM on the passed in canvas must be
+// the composition of the replay matrix with the record-time CTM (for the portion
+// of the record that is being replayed). For setMatrix calls to behave correctly
+// the initialCTM parameter must set to just the replay matrix.
+void SkRecordPartialDraw(const SkRecord&, SkCanvas*,
+ SkPicture const* const drawablePicts[], int drawableCount,
+ int start, int stop, const SkMatrix& initialCTM);
+
+namespace SkRecords {
+
+// This is an SkRecord visitor that will draw that SkRecord to an SkCanvas.
+class Draw : SkNoncopyable {
+public:
+ explicit Draw(SkCanvas* canvas, SkPicture const* const drawablePicts[],
+ SkDrawable* const drawables[], int drawableCount,
+ const SkMatrix* initialCTM = nullptr)
+ : fInitialCTM(initialCTM ? *initialCTM : canvas->getTotalMatrix())
+ , fCanvas(canvas)
+ , fDrawablePicts(drawablePicts)
+ , fDrawables(drawables)
+ , fDrawableCount(drawableCount)
+ {}
+
+ // This operator calls methods on the |canvas|. The various draw() wrapper
+ // methods around SkCanvas are defined by the DRAW() macro in
+ // SkRecordDraw.cpp.
+ template <typename T> void operator()(const T& r) {
+ this->draw(r);
+ }
+
+protected:
+ SkPicture const* const* drawablePicts() const { return fDrawablePicts; }
+ int drawableCount() const { return fDrawableCount; }
+
+private:
+ // No base case, so we'll be compile-time checked that we implement all possibilities.
+ template <typename T> void draw(const T&);
+
+ const SkMatrix fInitialCTM;
+ SkCanvas* fCanvas;
+ SkPicture const* const* fDrawablePicts;
+ SkDrawable* const* fDrawables;
+ int fDrawableCount;
+};
+
+} // namespace SkRecords
+
+#endif//SkRecordDraw_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRecordOpts.cpp b/gfx/skia/skia/src/core/SkRecordOpts.cpp
new file mode 100644
index 000000000..a7feec1fb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordOpts.cpp
@@ -0,0 +1,309 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkRecordOpts.h"
+
+#include "SkRecordPattern.h"
+#include "SkRecords.h"
+#include "SkTDArray.h"
+#include "SkXfermode.h"
+
+using namespace SkRecords;
+
+// Most of the optimizations in this file are pattern-based. These are all defined as structs with:
+// - a Match typedef
+// - a bool onMatch(SkRceord*, Match*, int begin, int end) method,
+// which returns true if it made changes and false if not.
+
+// Run a pattern-based optimization once across the SkRecord, returning true if it made any changes.
+// It looks for spans which match Pass::Match, and when found calls onMatch() with that pattern,
+// record, and [begin,end) span of the commands that matched.
+template <typename Pass>
+static bool apply(Pass* pass, SkRecord* record) {
+ typename Pass::Match match;
+ bool changed = false;
+ int begin, end = 0;
+
+ while (match.search(record, &begin, &end)) {
+ changed |= pass->onMatch(record, &match, begin, end);
+ }
+ return changed;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static void multiple_set_matrices(SkRecord* record) {
+ struct {
+ typedef Pattern<Is<SetMatrix>,
+ Greedy<Is<NoOp>>,
+ Is<SetMatrix> >
+ Match;
+
+ bool onMatch(SkRecord* record, Match* pattern, int begin, int end) {
+ record->replace<NoOp>(begin); // first SetMatrix
+ return true;
+ }
+ } pass;
+ while (apply(&pass, record));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if 0 // experimental, but needs knowledge of previous matrix to operate correctly
+static void apply_matrix_to_draw_params(SkRecord* record) {
+ struct {
+ typedef Pattern<Is<SetMatrix>,
+ Greedy<Is<NoOp>>,
+ Is<SetMatrix> >
+ Pattern;
+
+ bool onMatch(SkRecord* record, Pattern* pattern, int begin, int end) {
+ record->replace<NoOp>(begin); // first SetMatrix
+ return true;
+ }
+ } pass;
+ // No need to loop, as we never "open up" opportunities for more of this type of optimization.
+ apply(&pass, record);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Turns the logical NoOp Save and Restore in Save-Draw*-Restore patterns into actual NoOps.
+struct SaveOnlyDrawsRestoreNooper {
+ typedef Pattern<Is<Save>,
+ Greedy<Or<Is<NoOp>, IsDraw>>,
+ Is<Restore>>
+ Match;
+
+ bool onMatch(SkRecord* record, Match*, int begin, int end) {
+ record->replace<NoOp>(begin); // Save
+ record->replace<NoOp>(end-1); // Restore
+ return true;
+ }
+};
+
+static bool fold_opacity_layer_color_to_paint(const SkPaint* layerPaint,
+ bool isSaveLayer,
+ SkPaint* paint) {
+ // We assume layerPaint is always from a saveLayer. If isSaveLayer is
+ // true, we assume paint is too.
+
+ // The alpha folding can proceed if the filter layer paint does not have properties which cause
+ // the resulting filter layer to be "blended" in complex ways to the parent layer. For example,
+ // looper drawing unmodulated filter layer twice and then modulating the result produces
+ // different image to drawing modulated filter layer twice.
+ // TODO: most likely the looper and only some xfer modes are the hard constraints
+ if (!paint->isSrcOver() || paint->getLooper()) {
+ return false;
+ }
+
+ if (!isSaveLayer && paint->getImageFilter()) {
+ // For normal draws, the paint color is used as one input for the color for the draw. Image
+ // filter will operate on the result, and thus we can not change the input.
+ // For layer saves, the image filter is applied to the layer contents. The layer is then
+ // modulated with the paint color, so it's fine to proceed with the fold for saveLayer
+ // paints with image filters.
+ return false;
+ }
+
+ if (paint->getColorFilter()) {
+ // Filter input depends on the paint color.
+
+ // Here we could filter the color if we knew the draw is going to be uniform color. This
+ // should be detectable as drawPath/drawRect/.. without a shader being uniform, while
+ // drawBitmap/drawSprite or a shader being non-uniform. However, current matchers don't
+ // give the type out easily, so just do not optimize that at the moment.
+ return false;
+ }
+
+ if (layerPaint) {
+ const uint32_t layerColor = layerPaint->getColor();
+ // The layer paint color must have only alpha component.
+ if (SK_ColorTRANSPARENT != SkColorSetA(layerColor, SK_AlphaTRANSPARENT)) {
+ return false;
+ }
+
+ // The layer paint can not have any effects.
+ if (layerPaint->getPathEffect() ||
+ layerPaint->getShader() ||
+ !layerPaint->isSrcOver() ||
+ layerPaint->getMaskFilter() ||
+ layerPaint->getColorFilter() ||
+ layerPaint->getRasterizer() ||
+ layerPaint->getLooper() ||
+ layerPaint->getImageFilter()) {
+ return false;
+ }
+ paint->setAlpha(SkMulDiv255Round(paint->getAlpha(), SkColorGetA(layerColor)));
+ }
+
+ return true;
+}
+
+// Turns logical no-op Save-[non-drawing command]*-Restore patterns into actual no-ops.
+struct SaveNoDrawsRestoreNooper {
+ // Greedy matches greedily, so we also have to exclude Save and Restore.
+ // Nested SaveLayers need to be excluded, or we'll match their Restore!
+ typedef Pattern<Is<Save>,
+ Greedy<Not<Or<Is<Save>,
+ Is<SaveLayer>,
+ Is<Restore>,
+ IsDraw>>>,
+ Is<Restore>>
+ Match;
+
+ bool onMatch(SkRecord* record, Match*, int begin, int end) {
+ // The entire span between Save and Restore (inclusively) does nothing.
+ for (int i = begin; i < end; i++) {
+ record->replace<NoOp>(i);
+ }
+ return true;
+ }
+};
+void SkRecordNoopSaveRestores(SkRecord* record) {
+ SaveOnlyDrawsRestoreNooper onlyDraws;
+ SaveNoDrawsRestoreNooper noDraws;
+
+ // Run until they stop changing things.
+ while (apply(&onlyDraws, record) || apply(&noDraws, record));
+}
+
+static bool effectively_srcover(const SkPaint* paint) {
+ if (!paint || paint->isSrcOver()) {
+ return true;
+ }
+ // src-mode with opaque and no effects (which might change opaqueness) is ok too.
+ return !paint->getShader() && !paint->getColorFilter() && !paint->getImageFilter() &&
+ 0xFF == paint->getAlpha() && paint->getBlendMode() == SkBlendMode::kSrc;
+}
+
+// For some SaveLayer-[drawing command]-Restore patterns, merge the SaveLayer's alpha into the
+// draw, and no-op the SaveLayer and Restore.
+struct SaveLayerDrawRestoreNooper {
+ typedef Pattern<Is<SaveLayer>, IsDraw, Is<Restore>> Match;
+
+ bool onMatch(SkRecord* record, Match* match, int begin, int end) {
+ if (match->first<SaveLayer>()->backdrop) {
+ // can't throw away the layer if we have a backdrop
+ return false;
+ }
+
+ // A SaveLayer's bounds field is just a hint, so we should be free to ignore it.
+ SkPaint* layerPaint = match->first<SaveLayer>()->paint;
+ SkPaint* drawPaint = match->second<SkPaint>();
+
+ if (nullptr == layerPaint && effectively_srcover(drawPaint)) {
+ // There wasn't really any point to this SaveLayer at all.
+ return KillSaveLayerAndRestore(record, begin);
+ }
+
+ if (drawPaint == nullptr) {
+ // We can just give the draw the SaveLayer's paint.
+ // TODO(mtklein): figure out how to do this clearly
+ return false;
+ }
+
+ if (!fold_opacity_layer_color_to_paint(layerPaint, false /*isSaveLayer*/, drawPaint)) {
+ return false;
+ }
+
+ return KillSaveLayerAndRestore(record, begin);
+ }
+
+ static bool KillSaveLayerAndRestore(SkRecord* record, int saveLayerIndex) {
+ record->replace<NoOp>(saveLayerIndex); // SaveLayer
+ record->replace<NoOp>(saveLayerIndex+2); // Restore
+ return true;
+ }
+};
+void SkRecordNoopSaveLayerDrawRestores(SkRecord* record) {
+ SaveLayerDrawRestoreNooper pass;
+ apply(&pass, record);
+}
+
+
+/* For SVG generated:
+ SaveLayer (non-opaque, typically for CSS opacity)
+ Save
+ ClipRect
+ SaveLayer (typically for SVG filter)
+ Restore
+ Restore
+ Restore
+*/
+struct SvgOpacityAndFilterLayerMergePass {
+ typedef Pattern<Is<SaveLayer>, Is<Save>, Is<ClipRect>, Is<SaveLayer>,
+ Is<Restore>, Is<Restore>, Is<Restore>> Match;
+
+ bool onMatch(SkRecord* record, Match* match, int begin, int end) {
+ if (match->first<SaveLayer>()->backdrop) {
+ // can't throw away the layer if we have a backdrop
+ return false;
+ }
+
+ SkPaint* opacityPaint = match->first<SaveLayer>()->paint;
+ if (nullptr == opacityPaint) {
+ // There wasn't really any point to this SaveLayer at all.
+ return KillSaveLayerAndRestore(record, begin);
+ }
+
+ // This layer typically contains a filter, but this should work for layers with for other
+ // purposes too.
+ SkPaint* filterLayerPaint = match->fourth<SaveLayer>()->paint;
+ if (filterLayerPaint == nullptr) {
+ // We can just give the inner SaveLayer the paint of the outer SaveLayer.
+ // TODO(mtklein): figure out how to do this clearly
+ return false;
+ }
+
+ if (!fold_opacity_layer_color_to_paint(opacityPaint, true /*isSaveLayer*/,
+ filterLayerPaint)) {
+ return false;
+ }
+
+ return KillSaveLayerAndRestore(record, begin);
+ }
+
+ static bool KillSaveLayerAndRestore(SkRecord* record, int saveLayerIndex) {
+ record->replace<NoOp>(saveLayerIndex); // SaveLayer
+ record->replace<NoOp>(saveLayerIndex + 6); // Restore
+ return true;
+ }
+};
+
+void SkRecordMergeSvgOpacityAndFilterLayers(SkRecord* record) {
+ SvgOpacityAndFilterLayerMergePass pass;
+ apply(&pass, record);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkRecordOptimize(SkRecord* record) {
+ // This might be useful as a first pass in the future if we want to weed
+ // out junk for other optimization passes. Right now, nothing needs it,
+ // and the bounding box hierarchy will do the work of skipping no-op
+ // Save-NoDraw-Restore sequences better than we can here.
+ // As there is a known problem with this peephole and drawAnnotation, disable this.
+ // If we want to enable this we must first fix this bug:
+ // https://bugs.chromium.org/p/skia/issues/detail?id=5548
+// SkRecordNoopSaveRestores(record);
+
+ SkRecordNoopSaveLayerDrawRestores(record);
+ SkRecordMergeSvgOpacityAndFilterLayers(record);
+
+ record->defrag();
+}
+
+void SkRecordOptimize2(SkRecord* record) {
+ multiple_set_matrices(record);
+ SkRecordNoopSaveRestores(record);
+ SkRecordNoopSaveLayerDrawRestores(record);
+ SkRecordMergeSvgOpacityAndFilterLayers(record);
+
+ record->defrag();
+}
diff --git a/gfx/skia/skia/src/core/SkRecordOpts.h b/gfx/skia/skia/src/core/SkRecordOpts.h
new file mode 100644
index 000000000..d6531b522
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordOpts.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRecordOpts_DEFINED
+#define SkRecordOpts_DEFINED
+
+#include "SkRecord.h"
+
+// Run all optimizations in recommended order.
+void SkRecordOptimize(SkRecord*);
+
+// Turns logical no-op Save-[non-drawing command]*-Restore patterns into actual no-ops.
+void SkRecordNoopSaveRestores(SkRecord*);
+
+// For some SaveLayer-[drawing command]-Restore patterns, merge the SaveLayer's alpha into the
+// draw, and no-op the SaveLayer and Restore.
+void SkRecordNoopSaveLayerDrawRestores(SkRecord*);
+
+// For SVG generated SaveLayer-Save-ClipRect-SaveLayer-3xRestore patterns, merge
+// the alpha of the first SaveLayer to the second SaveLayer.
+void SkRecordMergeSvgOpacityAndFilterLayers(SkRecord*);
+
+// Experimental optimizers
+void SkRecordOptimize2(SkRecord*);
+
+#endif//SkRecordOpts_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRecordPattern.h b/gfx/skia/skia/src/core/SkRecordPattern.h
new file mode 100644
index 000000000..45f45724c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordPattern.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRecordPattern_DEFINED
+#define SkRecordPattern_DEFINED
+
+#include "SkTLogic.h"
+
+namespace SkRecords {
+
+// First, some matchers. These match a single command in the SkRecord,
+// and may hang onto some data from it. If so, you can get the data by calling .get().
+
+// Matches a command of type T, and stores that command.
+template <typename T>
+class Is {
+public:
+ Is() : fPtr(nullptr) {}
+
+ typedef T type;
+ type* get() { return fPtr; }
+
+ bool operator()(T* ptr) {
+ fPtr = ptr;
+ return true;
+ }
+
+ template <typename U>
+ bool operator()(U*) {
+ fPtr = nullptr;
+ return false;
+ }
+
+private:
+ type* fPtr;
+};
+
+// Matches any command that draws, and stores its paint.
+class IsDraw {
+public:
+ IsDraw() : fPaint(nullptr) {}
+
+ typedef SkPaint type;
+ type* get() { return fPaint; }
+
+ template <typename T>
+ SK_WHEN(T::kTags & kDraw_Tag, bool) operator()(T* draw) {
+ fPaint = AsPtr(draw->paint);
+ return true;
+ }
+
+ bool operator()(DrawDrawable*) {
+ static_assert(DrawDrawable::kTags & kDraw_Tag, "");
+ fPaint = nullptr;
+ return true;
+ }
+
+ template <typename T>
+ SK_WHEN(!(T::kTags & kDraw_Tag), bool) operator()(T* draw) {
+ fPaint = nullptr;
+ return false;
+ }
+
+private:
+ // Abstracts away whether the paint is always part of the command or optional.
+ template <typename T> static T* AsPtr(SkRecords::Optional<T>& x) { return x; }
+ template <typename T> static T* AsPtr(T& x) { return &x; }
+
+ type* fPaint;
+};
+
+// Matches if Matcher doesn't. Stores nothing.
+template <typename Matcher>
+struct Not {
+ template <typename T>
+ bool operator()(T* ptr) { return !Matcher()(ptr); }
+};
+
+// Matches if any of First or Rest... does. Stores nothing.
+template <typename First, typename... Rest>
+struct Or {
+ template <typename T>
+ bool operator()(T* ptr) { return First()(ptr) || Or<Rest...>()(ptr); }
+};
+template <typename First>
+struct Or<First> {
+ template <typename T>
+ bool operator()(T* ptr) { return First()(ptr); }
+};
+
+
+// Greedy is a special matcher that greedily matches Matcher 0 or more times. Stores nothing.
+template <typename Matcher>
+struct Greedy {
+ template <typename T>
+ bool operator()(T* ptr) { return Matcher()(ptr); }
+};
+
+// Pattern matches each of its matchers in order.
+//
+// This is the main entry point to pattern matching, and so provides a couple of extra API bits:
+// - search scans through the record to look for matches;
+// - first, second, third, ... return the data stored by their respective matchers in the pattern.
+
+template <typename... Matchers> class Pattern;
+
+template <> class Pattern<> {
+public:
+ // Bottoms out recursion. Just return whatever i the front decided on.
+ int match(SkRecord*, int i) { return i; }
+};
+
+template <typename First, typename... Rest>
+class Pattern<First, Rest...> {
+public:
+ // If this pattern matches the SkRecord starting from i,
+ // return the index just past the end of the pattern, otherwise return 0.
+ SK_ALWAYS_INLINE int match(SkRecord* record, int i) {
+ i = this->matchFirst(&fFirst, record, i);
+ return i > 0 ? fRest.match(record, i) : 0;
+ }
+
+ // Starting from *end, walk through the SkRecord to find the first span matching this pattern.
+ // If there is no such span, return false. If there is, return true and set [*begin, *end).
+ SK_ALWAYS_INLINE bool search(SkRecord* record, int* begin, int* end) {
+ for (*begin = *end; *begin < record->count(); ++(*begin)) {
+ *end = this->match(record, *begin);
+ if (*end != 0) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // TODO: some sort of smart get<i>()
+ template <typename T> T* first() { return fFirst.get(); }
+ template <typename T> T* second() { return fRest.template first<T>(); }
+ template <typename T> T* third() { return fRest.template second<T>(); }
+ template <typename T> T* fourth() { return fRest.template third<T>(); }
+
+private:
+ // If first isn't a Greedy, try to match at i once.
+ template <typename T>
+ int matchFirst(T* first, SkRecord* record, int i) {
+ if (i < record->count()) {
+ if (record->mutate(i, *first)) {
+ return i+1;
+ }
+ }
+ return 0;
+ }
+
+ // If first is a Greedy, walk i until it doesn't match.
+ template <typename T>
+ int matchFirst(Greedy<T>* first, SkRecord* record, int i) {
+ while (i < record->count()) {
+ if (!record->mutate(i, *first)) {
+ return i;
+ }
+ i++;
+ }
+ return 0;
+ }
+
+ First fFirst;
+ Pattern<Rest...> fRest;
+};
+
+} // namespace SkRecords
+
+#endif//SkRecordPattern_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRecordedDrawable.cpp b/gfx/skia/skia/src/core/SkRecordedDrawable.cpp
new file mode 100644
index 000000000..9e68be1d2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordedDrawable.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMatrix.h"
+#include "SkPictureData.h"
+#include "SkPicturePlayback.h"
+#include "SkPictureRecord.h"
+#include "SkPictureRecorder.h"
+#include "SkPictureUtils.h"
+#include "SkRecordedDrawable.h"
+#include "SkRecordDraw.h"
+
+void SkRecordedDrawable::onDraw(SkCanvas* canvas) {
+ SkDrawable* const* drawables = nullptr;
+ int drawableCount = 0;
+ if (fDrawableList) {
+ drawables = fDrawableList->begin();
+ drawableCount = fDrawableList->count();
+ }
+ SkRecordDraw(*fRecord, canvas, nullptr, drawables, drawableCount, fBBH, nullptr/*callback*/);
+}
+
+SkPicture* SkRecordedDrawable::onNewPictureSnapshot() {
+ SkBigPicture::SnapshotArray* pictList = nullptr;
+ if (fDrawableList) {
+ // TODO: should we plumb-down the BBHFactory and recordFlags from our host
+ // PictureRecorder?
+ pictList = fDrawableList->newDrawableSnapshot();
+ }
+
+ size_t subPictureBytes = 0;
+ for (int i = 0; pictList && i < pictList->count(); i++) {
+ subPictureBytes += SkPictureUtils::ApproximateBytesUsed(pictList->begin()[i]);
+ }
+ // SkBigPicture will take ownership of a ref on both fRecord and fBBH.
+ // We're not willing to give up our ownership, so we must ref them for SkPicture.
+ return new SkBigPicture(fBounds, SkRef(fRecord.get()), pictList, SkSafeRef(fBBH.get()),
+ subPictureBytes);
+}
+
+void SkRecordedDrawable::flatten(SkWriteBuffer& buffer) const {
+ // Write the bounds.
+ buffer.writeRect(fBounds);
+
+ // Create an SkPictureRecord to record the draw commands.
+ SkPictInfo info;
+ SkPictureRecord pictureRecord(SkISize::Make(fBounds.width(), fBounds.height()), 0);
+
+ // If the query contains the whole picture, don't bother with the bounding box hierarchy.
+ SkRect clipBounds;
+ pictureRecord.getClipBounds(&clipBounds);
+ SkBBoxHierarchy* bbh;
+ if (clipBounds.contains(fBounds)) {
+ bbh = nullptr;
+ } else {
+ bbh = fBBH.get();
+ }
+
+ // Record the draw commands.
+ pictureRecord.beginRecording();
+ SkRecordDraw(*fRecord, &pictureRecord, nullptr, fDrawableList->begin(), fDrawableList->count(),
+ bbh, nullptr);
+ pictureRecord.endRecording();
+
+ // Flatten the recorded commands and drawables.
+ SkPictureData pictureData(pictureRecord, info);
+ pictureData.flatten(buffer);
+}
+
+sk_sp<SkFlattenable> SkRecordedDrawable::CreateProc(SkReadBuffer& buffer) {
+ // Read the bounds.
+ SkRect bounds;
+ buffer.readRect(&bounds);
+
+ // Unflatten into a SkPictureData.
+ SkPictInfo info;
+ info.setVersion(buffer.getVersion());
+ info.fCullRect = bounds;
+ info.fFlags = 0; // ???
+ SkAutoTDelete<SkPictureData> pictureData(SkPictureData::CreateFromBuffer(buffer, info));
+ if (!pictureData) {
+ return nullptr;
+ }
+
+ // Create a drawable.
+ SkPicturePlayback playback(pictureData);
+ SkPictureRecorder recorder;
+ playback.draw(recorder.beginRecording(bounds), nullptr, &buffer);
+ return recorder.finishRecordingAsDrawable();
+}
diff --git a/gfx/skia/skia/src/core/SkRecordedDrawable.h b/gfx/skia/skia/src/core/SkRecordedDrawable.h
new file mode 100644
index 000000000..7e2d9bc59
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordedDrawable.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBBoxHierarchy.h"
+#include "SkDrawable.h"
+#include "SkRecord.h"
+#include "SkRecorder.h"
+
+class SkRecordedDrawable : public SkDrawable {
+public:
+ SkRecordedDrawable(SkRecord* record, SkBBoxHierarchy* bbh, SkDrawableList* drawableList,
+ const SkRect& bounds)
+ : fRecord(SkRef(record))
+ , fBBH(SkSafeRef(bbh))
+ , fDrawableList(drawableList) // we take ownership
+ , fBounds(bounds)
+ {}
+
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ static sk_sp<SkFlattenable> CreateProc(SkReadBuffer& buffer);
+
+ Factory getFactory() const override { return CreateProc; }
+
+protected:
+ SkRect onGetBounds() override { return fBounds; }
+
+ void onDraw(SkCanvas* canvas) override;
+
+ SkPicture* onNewPictureSnapshot() override;
+
+private:
+ SkAutoTUnref<SkRecord> fRecord;
+ SkAutoTUnref<SkBBoxHierarchy> fBBH;
+ SkAutoTDelete<SkDrawableList> fDrawableList;
+ const SkRect fBounds;
+};
diff --git a/gfx/skia/skia/src/core/SkRecorder.cpp b/gfx/skia/skia/src/core/SkRecorder.cpp
new file mode 100644
index 000000000..08c73700d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecorder.cpp
@@ -0,0 +1,431 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBigPicture.h"
+#include "SkCanvasPriv.h"
+#include "SkImage.h"
+#include "SkPatchUtils.h"
+#include "SkPicture.h"
+#include "SkPictureUtils.h"
+#include "SkRecorder.h"
+#include "SkSurface.h"
+
+SkDrawableList::~SkDrawableList() {
+ fArray.unrefAll();
+}
+
+SkBigPicture::SnapshotArray* SkDrawableList::newDrawableSnapshot() {
+ const int count = fArray.count();
+ if (0 == count) {
+ return nullptr;
+ }
+ SkAutoTMalloc<const SkPicture*> pics(count);
+ for (int i = 0; i < count; ++i) {
+ pics[i] = fArray[i]->newPictureSnapshot();
+ }
+ return new SkBigPicture::SnapshotArray(pics.release(), count);
+}
+
+void SkDrawableList::append(SkDrawable* drawable) {
+ *fArray.append() = SkRef(drawable);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////
+
+SkRecorder::SkRecorder(SkRecord* record, int width, int height, SkMiniRecorder* mr)
+ : SkCanvas(SkIRect::MakeWH(width, height), SkCanvas::kConservativeRasterClip_InitFlag)
+ , fDrawPictureMode(Record_DrawPictureMode)
+ , fApproxBytesUsedBySubPictures(0)
+ , fRecord(record)
+ , fMiniRecorder(mr) {}
+
+SkRecorder::SkRecorder(SkRecord* record, const SkRect& bounds, SkMiniRecorder* mr)
+ : SkCanvas(bounds.roundOut(), SkCanvas::kConservativeRasterClip_InitFlag)
+ , fDrawPictureMode(Record_DrawPictureMode)
+ , fApproxBytesUsedBySubPictures(0)
+ , fRecord(record)
+ , fMiniRecorder(mr) {}
+
+void SkRecorder::reset(SkRecord* record, const SkRect& bounds,
+ DrawPictureMode dpm, SkMiniRecorder* mr) {
+ this->forgetRecord();
+ fDrawPictureMode = dpm;
+ fRecord = record;
+ this->resetForNextPicture(bounds.roundOut());
+ fMiniRecorder = mr;
+}
+
+void SkRecorder::forgetRecord() {
+ fDrawableList.reset(nullptr);
+ fApproxBytesUsedBySubPictures = 0;
+ fRecord = nullptr;
+}
+
+// To make appending to fRecord a little less verbose.
+#define APPEND(T, ...) \
+ if (fMiniRecorder) { \
+ this->flushMiniRecorder(); \
+ } \
+ new (fRecord->append<SkRecords::T>()) SkRecords::T{__VA_ARGS__}
+
+#define TRY_MINIRECORDER(method, ...) \
+ if (fMiniRecorder && fMiniRecorder->method(__VA_ARGS__)) { return; }
+
+// For methods which must call back into SkCanvas.
+#define INHERITED(method, ...) this->SkCanvas::method(__VA_ARGS__)
+
+// Use copy() only for optional arguments, to be copied if present or skipped if not.
+// (For most types we just pass by value and let copy constructors do their thing.)
+template <typename T>
+T* SkRecorder::copy(const T* src) {
+ if (nullptr == src) {
+ return nullptr;
+ }
+ return new (fRecord->alloc<T>()) T(*src);
+}
+
+// This copy() is for arrays.
+// It will work with POD or non-POD, though currently we only use it for POD.
+template <typename T>
+T* SkRecorder::copy(const T src[], size_t count) {
+ if (nullptr == src) {
+ return nullptr;
+ }
+ T* dst = fRecord->alloc<T>(count);
+ for (size_t i = 0; i < count; i++) {
+ new (dst + i) T(src[i]);
+ }
+ return dst;
+}
+
+// Specialization for copying strings, using memcpy.
+// This measured around 2x faster for copying code points,
+// but I found no corresponding speedup for other arrays.
+template <>
+char* SkRecorder::copy(const char src[], size_t count) {
+ if (nullptr == src) {
+ return nullptr;
+ }
+ char* dst = fRecord->alloc<char>(count);
+ memcpy(dst, src, count);
+ return dst;
+}
+
+// As above, assuming and copying a terminating \0.
+template <>
+char* SkRecorder::copy(const char* src) {
+ return this->copy(src, strlen(src)+1);
+}
+
+void SkRecorder::flushMiniRecorder() {
+ if (fMiniRecorder) {
+ SkMiniRecorder* mr = fMiniRecorder;
+ fMiniRecorder = nullptr; // Needs to happen before flushAndReset() or we recurse forever.
+ mr->flushAndReset(this);
+ }
+}
+
+void SkRecorder::onDrawPaint(const SkPaint& paint) {
+ APPEND(DrawPaint, paint);
+}
+
+void SkRecorder::onDrawPoints(PointMode mode,
+ size_t count,
+ const SkPoint pts[],
+ const SkPaint& paint) {
+ APPEND(DrawPoints, paint, mode, SkToUInt(count), this->copy(pts, count));
+}
+
+void SkRecorder::onDrawRect(const SkRect& rect, const SkPaint& paint) {
+ TRY_MINIRECORDER(drawRect, rect, paint);
+ APPEND(DrawRect, paint, rect);
+}
+
+void SkRecorder::onDrawRegion(const SkRegion& region, const SkPaint& paint) {
+ APPEND(DrawRegion, paint, region);
+}
+
+void SkRecorder::onDrawOval(const SkRect& oval, const SkPaint& paint) {
+ APPEND(DrawOval, paint, oval);
+}
+
+void SkRecorder::onDrawArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) {
+ APPEND(DrawArc, paint, oval, startAngle, sweepAngle, useCenter);
+}
+
+void SkRecorder::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ APPEND(DrawRRect, paint, rrect);
+}
+
+void SkRecorder::onDrawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint& paint) {
+ APPEND(DrawDRRect, paint, outer, inner);
+}
+
+void SkRecorder::onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix) {
+ if (fDrawPictureMode == Record_DrawPictureMode) {
+ if (!fDrawableList) {
+ fDrawableList.reset(new SkDrawableList);
+ }
+ fDrawableList->append(drawable);
+ APPEND(DrawDrawable, this->copy(matrix), drawable->getBounds(), fDrawableList->count() - 1);
+ } else {
+ SkASSERT(fDrawPictureMode == Playback_DrawPictureMode);
+ drawable->draw(this, matrix);
+ }
+}
+
+void SkRecorder::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ TRY_MINIRECORDER(drawPath, path, paint);
+ APPEND(DrawPath, paint, path);
+}
+
+void SkRecorder::onDrawBitmap(const SkBitmap& bitmap,
+ SkScalar left,
+ SkScalar top,
+ const SkPaint* paint) {
+ sk_sp<SkImage> image = SkImage::MakeFromBitmap(bitmap);
+ if (image) {
+ this->onDrawImage(image.get(), left, top, paint);
+ }
+}
+
+void SkRecorder::onDrawBitmapRect(const SkBitmap& bitmap,
+ const SkRect* src,
+ const SkRect& dst,
+ const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ sk_sp<SkImage> image = SkImage::MakeFromBitmap(bitmap);
+ if (image) {
+ this->onDrawImageRect(image.get(), src, dst, paint, constraint);
+ }
+}
+
+void SkRecorder::onDrawBitmapNine(const SkBitmap& bitmap,
+ const SkIRect& center,
+ const SkRect& dst,
+ const SkPaint* paint) {
+ sk_sp<SkImage> image = SkImage::MakeFromBitmap(bitmap);
+ if (image) {
+ this->onDrawImageNine(image.get(), center, dst, paint);
+ }
+}
+
+void SkRecorder::onDrawBitmapLattice(const SkBitmap& bitmap, const Lattice& lattice,
+ const SkRect& dst, const SkPaint* paint) {
+ sk_sp<SkImage> image = SkImage::MakeFromBitmap(bitmap);
+ this->onDrawImageLattice(image.get(), lattice, dst, paint);
+}
+
+void SkRecorder::onDrawImage(const SkImage* image, SkScalar left, SkScalar top,
+ const SkPaint* paint) {
+ APPEND(DrawImage, this->copy(paint), sk_ref_sp(image), left, top);
+}
+
+void SkRecorder::onDrawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ APPEND(DrawImageRect, this->copy(paint), sk_ref_sp(image), this->copy(src), dst, constraint);
+}
+
+void SkRecorder::onDrawImageNine(const SkImage* image, const SkIRect& center,
+ const SkRect& dst, const SkPaint* paint) {
+ APPEND(DrawImageNine, this->copy(paint), sk_ref_sp(image), center, dst);
+}
+
+void SkRecorder::onDrawImageLattice(const SkImage* image, const Lattice& lattice, const SkRect& dst,
+ const SkPaint* paint) {
+ int flagCount = lattice.fFlags ? (lattice.fXCount + 1) * (lattice.fYCount + 1) : 0;
+ SkASSERT(lattice.fBounds);
+ APPEND(DrawImageLattice, this->copy(paint), sk_ref_sp(image),
+ lattice.fXCount, this->copy(lattice.fXDivs, lattice.fXCount),
+ lattice.fYCount, this->copy(lattice.fYDivs, lattice.fYCount),
+ flagCount, this->copy(lattice.fFlags, flagCount), *lattice.fBounds, dst);
+}
+
+void SkRecorder::onDrawText(const void* text, size_t byteLength,
+ SkScalar x, SkScalar y, const SkPaint& paint) {
+ APPEND(DrawText,
+ paint, this->copy((const char*)text, byteLength), byteLength, x, y);
+}
+
+void SkRecorder::onDrawPosText(const void* text, size_t byteLength,
+ const SkPoint pos[], const SkPaint& paint) {
+ const int points = paint.countText(text, byteLength);
+ APPEND(DrawPosText,
+ paint,
+ this->copy((const char*)text, byteLength),
+ byteLength,
+ this->copy(pos, points));
+}
+
+void SkRecorder::onDrawPosTextH(const void* text, size_t byteLength,
+ const SkScalar xpos[], SkScalar constY, const SkPaint& paint) {
+ const int points = paint.countText(text, byteLength);
+ APPEND(DrawPosTextH,
+ paint,
+ this->copy((const char*)text, byteLength),
+ SkToUInt(byteLength),
+ constY,
+ this->copy(xpos, points));
+}
+
+void SkRecorder::onDrawTextOnPath(const void* text, size_t byteLength, const SkPath& path,
+ const SkMatrix* matrix, const SkPaint& paint) {
+ APPEND(DrawTextOnPath,
+ paint,
+ this->copy((const char*)text, byteLength),
+ byteLength,
+ path,
+ matrix ? *matrix : SkMatrix::I());
+}
+
+void SkRecorder::onDrawTextRSXform(const void* text, size_t byteLength, const SkRSXform xform[],
+ const SkRect* cull, const SkPaint& paint) {
+ APPEND(DrawTextRSXform,
+ paint,
+ this->copy((const char*)text, byteLength),
+ byteLength,
+ this->copy(xform, paint.countText(text, byteLength)),
+ this->copy(cull));
+}
+
+void SkRecorder::onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ TRY_MINIRECORDER(drawTextBlob, blob, x, y, paint);
+ APPEND(DrawTextBlob, paint, sk_ref_sp(blob), x, y);
+}
+
+void SkRecorder::onDrawPicture(const SkPicture* pic, const SkMatrix* matrix, const SkPaint* paint) {
+ if (fDrawPictureMode == Record_DrawPictureMode) {
+ fApproxBytesUsedBySubPictures += SkPictureUtils::ApproximateBytesUsed(pic);
+ APPEND(DrawPicture, this->copy(paint), sk_ref_sp(pic), matrix ? *matrix : SkMatrix::I());
+ } else {
+ SkASSERT(fDrawPictureMode == Playback_DrawPictureMode);
+ SkAutoCanvasMatrixPaint acmp(this, matrix, paint, pic->cullRect());
+ pic->playback(this);
+ }
+}
+
+void SkRecorder::onDrawShadowedPicture(const SkPicture* pic, const SkMatrix* matrix,
+ const SkPaint* paint, const SkShadowParams& params) {
+ if (fDrawPictureMode == Record_DrawPictureMode) {
+ fApproxBytesUsedBySubPictures += SkPictureUtils::ApproximateBytesUsed(pic);
+ APPEND(DrawShadowedPicture, this->copy(paint),
+ sk_ref_sp(pic),
+ matrix ? *matrix : SkMatrix::I(),
+ params);
+ } else {
+ // TODO update pic->playback(this) to draw the shadowed pic
+ SkASSERT(fDrawPictureMode == Playback_DrawPictureMode);
+ SkAutoCanvasMatrixPaint acmp(this, matrix, paint, pic->cullRect());
+ pic->playback(this);
+ }
+}
+
+
+void SkRecorder::onDrawVertices(VertexMode vmode,
+ int vertexCount, const SkPoint vertices[],
+ const SkPoint texs[], const SkColor colors[],
+ SkXfermode* xmode,
+ const uint16_t indices[], int indexCount, const SkPaint& paint) {
+ APPEND(DrawVertices, paint,
+ vmode,
+ vertexCount,
+ this->copy(vertices, vertexCount),
+ texs ? this->copy(texs, vertexCount) : nullptr,
+ colors ? this->copy(colors, vertexCount) : nullptr,
+ sk_ref_sp(xmode),
+ this->copy(indices, indexCount),
+ indexCount);
+}
+
+void SkRecorder::onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkXfermode* xmode, const SkPaint& paint) {
+ APPEND(DrawPatch, paint,
+ cubics ? this->copy(cubics, SkPatchUtils::kNumCtrlPts) : nullptr,
+ colors ? this->copy(colors, SkPatchUtils::kNumCorners) : nullptr,
+ texCoords ? this->copy(texCoords, SkPatchUtils::kNumCorners) : nullptr,
+ sk_ref_sp(xmode));
+}
+
+void SkRecorder::onDrawAtlas(const SkImage* atlas, const SkRSXform xform[], const SkRect tex[],
+ const SkColor colors[], int count, SkXfermode::Mode mode,
+ const SkRect* cull, const SkPaint* paint) {
+ APPEND(DrawAtlas, this->copy(paint),
+ sk_ref_sp(atlas),
+ this->copy(xform, count),
+ this->copy(tex, count),
+ this->copy(colors, count),
+ count,
+ mode,
+ this->copy(cull));
+}
+
+void SkRecorder::onDrawAnnotation(const SkRect& rect, const char key[], SkData* value) {
+ APPEND(DrawAnnotation, rect, SkString(key), sk_ref_sp(value));
+}
+
+void SkRecorder::willSave() {
+ APPEND(Save);
+}
+
+SkCanvas::SaveLayerStrategy SkRecorder::getSaveLayerStrategy(const SaveLayerRec& rec) {
+ APPEND(SaveLayer, this->copy(rec.fBounds)
+ , this->copy(rec.fPaint)
+ , sk_ref_sp(rec.fBackdrop)
+ , rec.fSaveLayerFlags);
+ return SkCanvas::kNoLayer_SaveLayerStrategy;
+}
+
+void SkRecorder::didRestore() {
+ APPEND(Restore, this->devBounds(), this->getTotalMatrix());
+}
+
+void SkRecorder::didConcat(const SkMatrix& matrix) {
+ APPEND(Concat, matrix);
+}
+
+void SkRecorder::didSetMatrix(const SkMatrix& matrix) {
+ APPEND(SetMatrix, matrix);
+}
+
+void SkRecorder::didTranslate(SkScalar dx, SkScalar dy) {
+ APPEND(Translate, dx, dy);
+}
+
+void SkRecorder::didTranslateZ(SkScalar z) {
+#ifdef SK_EXPERIMENTAL_SHADOWING
+ APPEND(TranslateZ, z);
+#endif
+}
+
+void SkRecorder::onClipRect(const SkRect& rect, ClipOp op, ClipEdgeStyle edgeStyle) {
+ INHERITED(onClipRect, rect, op, edgeStyle);
+ SkRecords::ClipOpAndAA opAA(op, kSoft_ClipEdgeStyle == edgeStyle);
+ APPEND(ClipRect, this->devBounds(), rect, opAA);
+}
+
+void SkRecorder::onClipRRect(const SkRRect& rrect, ClipOp op, ClipEdgeStyle edgeStyle) {
+ INHERITED(onClipRRect, rrect, op, edgeStyle);
+ SkRecords::ClipOpAndAA opAA(op, kSoft_ClipEdgeStyle == edgeStyle);
+ APPEND(ClipRRect, this->devBounds(), rrect, opAA);
+}
+
+void SkRecorder::onClipPath(const SkPath& path, ClipOp op, ClipEdgeStyle edgeStyle) {
+ INHERITED(onClipPath, path, op, edgeStyle);
+ SkRecords::ClipOpAndAA opAA(op, kSoft_ClipEdgeStyle == edgeStyle);
+ APPEND(ClipPath, this->devBounds(), path, opAA);
+}
+
+void SkRecorder::onClipRegion(const SkRegion& deviceRgn, ClipOp op) {
+ INHERITED(onClipRegion, deviceRgn, op);
+ APPEND(ClipRegion, this->devBounds(), deviceRgn, op);
+}
+
+sk_sp<SkSurface> SkRecorder::onNewSurface(const SkImageInfo&, const SkSurfaceProps&) {
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkRecorder.h b/gfx/skia/skia/src/core/SkRecorder.h
new file mode 100644
index 000000000..8efae17c6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecorder.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRecorder_DEFINED
+#define SkRecorder_DEFINED
+
+#include "SkBigPicture.h"
+#include "SkCanvas.h"
+#include "SkMiniRecorder.h"
+#include "SkRecord.h"
+#include "SkRecords.h"
+#include "SkTDArray.h"
+
+class SkBBHFactory;
+
+class SkDrawableList : SkNoncopyable {
+public:
+ SkDrawableList() {}
+ ~SkDrawableList();
+
+ int count() const { return fArray.count(); }
+ SkDrawable* const* begin() const { return fArray.begin(); }
+
+ void append(SkDrawable* drawable);
+
+ // Return a new or ref'd array of pictures that were snapped from our drawables.
+ SkBigPicture::SnapshotArray* newDrawableSnapshot();
+
+private:
+ SkTDArray<SkDrawable*> fArray;
+};
+
+// SkRecorder provides an SkCanvas interface for recording into an SkRecord.
+
+class SkRecorder : public SkCanvas {
+public:
+ // Does not take ownership of the SkRecord.
+ SkRecorder(SkRecord*, int width, int height, SkMiniRecorder* = nullptr); // legacy version
+ SkRecorder(SkRecord*, const SkRect& bounds, SkMiniRecorder* = nullptr);
+
+ enum DrawPictureMode { Record_DrawPictureMode, Playback_DrawPictureMode };
+ void reset(SkRecord*, const SkRect& bounds, DrawPictureMode, SkMiniRecorder* = nullptr);
+
+ size_t approxBytesUsedBySubPictures() const { return fApproxBytesUsedBySubPictures; }
+
+ SkDrawableList* getDrawableList() const { return fDrawableList.get(); }
+ SkDrawableList* detachDrawableList() { return fDrawableList.release(); }
+
+ // Make SkRecorder forget entirely about its SkRecord*; all calls to SkRecorder will fail.
+ void forgetRecord();
+
+ void willSave() override;
+ SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec&) override;
+ void willRestore() override {}
+ void didRestore() override;
+
+ void didConcat(const SkMatrix&) override;
+ void didSetMatrix(const SkMatrix&) override;
+ void didTranslate(SkScalar, SkScalar) override;
+
+#ifdef SK_EXPERIMENTAL_SHADOWING
+ void didTranslateZ(SkScalar) override;
+#else
+ void didTranslateZ(SkScalar);
+#endif
+
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override;
+ void onDrawDrawable(SkDrawable*, const SkMatrix*) override;
+ void onDrawText(const void* text,
+ size_t byteLength,
+ SkScalar x,
+ SkScalar y,
+ const SkPaint& paint) override;
+ void onDrawPosText(const void* text,
+ size_t byteLength,
+ const SkPoint pos[],
+ const SkPaint& paint) override;
+ void onDrawPosTextH(const void* text,
+ size_t byteLength,
+ const SkScalar xpos[],
+ SkScalar constY,
+ const SkPaint& paint) override;
+ void onDrawTextOnPath(const void* text,
+ size_t byteLength,
+ const SkPath& path,
+ const SkMatrix* matrix,
+ const SkPaint& paint) override;
+ void onDrawTextRSXform(const void* text,
+ size_t byteLength,
+ const SkRSXform[],
+ const SkRect* cull,
+ const SkPaint& paint) override;
+ void onDrawTextBlob(const SkTextBlob* blob,
+ SkScalar x,
+ SkScalar y,
+ const SkPaint& paint) override;
+ void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkXfermode* xmode,
+ const SkPaint& paint) override;
+
+ void onDrawPaint(const SkPaint&) override;
+ void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&) override;
+ void onDrawRect(const SkRect&, const SkPaint&) override;
+ void onDrawRegion(const SkRegion&, const SkPaint&) override;
+ void onDrawOval(const SkRect&, const SkPaint&) override;
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override;
+ void onDrawRRect(const SkRRect&, const SkPaint&) override;
+ void onDrawPath(const SkPath&, const SkPaint&) override;
+ void onDrawBitmap(const SkBitmap&, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawBitmapRect(const SkBitmap&, const SkRect* src, const SkRect& dst, const SkPaint*,
+ SrcRectConstraint) override;
+ void onDrawImage(const SkImage*, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawImageRect(const SkImage*, const SkRect* src, const SkRect& dst,
+ const SkPaint*, SrcRectConstraint) override;
+ void onDrawImageNine(const SkImage*, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawBitmapNine(const SkBitmap&, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawImageLattice(const SkImage*, const Lattice& lattice, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawBitmapLattice(const SkBitmap&, const Lattice& lattice, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawVertices(VertexMode vmode, int vertexCount,
+ const SkPoint vertices[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint&) override;
+ void onDrawAtlas(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[],
+ int count, SkXfermode::Mode, const SkRect* cull, const SkPaint*) override;
+
+ void onClipRect(const SkRect& rect, ClipOp, ClipEdgeStyle) override;
+ void onClipRRect(const SkRRect& rrect, ClipOp, ClipEdgeStyle) override;
+ void onClipPath(const SkPath& path, ClipOp, ClipEdgeStyle) override;
+ void onClipRegion(const SkRegion& deviceRgn, ClipOp) override;
+
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override;
+
+#ifdef SK_EXPERIMENTAL_SHADOWING
+ void onDrawShadowedPicture(const SkPicture*,
+ const SkMatrix*,
+ const SkPaint*,
+ const SkShadowParams& params) override;
+#else
+ void onDrawShadowedPicture(const SkPicture*,
+ const SkMatrix*,
+ const SkPaint*,
+ const SkShadowParams& params);
+#endif
+
+ void onDrawAnnotation(const SkRect&, const char[], SkData*) override;
+
+ sk_sp<SkSurface> onNewSurface(const SkImageInfo&, const SkSurfaceProps&) override;
+
+ void flushMiniRecorder();
+
+private:
+ template <typename T>
+ T* copy(const T*);
+
+ template <typename T>
+ T* copy(const T[], size_t count);
+
+ SkIRect devBounds() const {
+ SkIRect devBounds;
+ this->getClipDeviceBounds(&devBounds);
+ return devBounds;
+ }
+
+ DrawPictureMode fDrawPictureMode;
+ size_t fApproxBytesUsedBySubPictures;
+ SkRecord* fRecord;
+ SkAutoTDelete<SkDrawableList> fDrawableList;
+
+ SkMiniRecorder* fMiniRecorder;
+};
+
+#endif//SkRecorder_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRecords.cpp b/gfx/skia/skia/src/core/SkRecords.cpp
new file mode 100644
index 000000000..81dd92f9e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecords.cpp
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPathPriv.h"
+#include "SkRecords.h"
+
+namespace SkRecords {
+ PreCachedPath::PreCachedPath(const SkPath& path) : SkPath(path) {
+ this->updateBoundsCache();
+#if 0 // Disabled to see if we ever really race on this. It costs time, chromium:496982.
+ SkPathPriv::FirstDirection junk;
+ (void)SkPathPriv::CheapComputeFirstDirection(*this, &junk);
+#endif
+ }
+
+ TypedMatrix::TypedMatrix(const SkMatrix& matrix) : SkMatrix(matrix) {
+ (void)this->getType();
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkRect.cpp b/gfx/skia/skia/src/core/SkRect.cpp
new file mode 100644
index 000000000..0b2723ab0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRect.cpp
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkRect.h"
+
+void SkIRect::join(int32_t left, int32_t top, int32_t right, int32_t bottom) {
+ // do nothing if the params are empty
+ if (left >= right || top >= bottom) {
+ return;
+ }
+
+ // if we are empty, just assign
+ if (fLeft >= fRight || fTop >= fBottom) {
+ this->set(left, top, right, bottom);
+ } else {
+ if (left < fLeft) fLeft = left;
+ if (top < fTop) fTop = top;
+ if (right > fRight) fRight = right;
+ if (bottom > fBottom) fBottom = bottom;
+ }
+}
+
+void SkIRect::sort() {
+ if (fLeft > fRight) {
+ SkTSwap<int32_t>(fLeft, fRight);
+ }
+ if (fTop > fBottom) {
+ SkTSwap<int32_t>(fTop, fBottom);
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+
+void SkRect::toQuad(SkPoint quad[4]) const {
+ SkASSERT(quad);
+
+ quad[0].set(fLeft, fTop);
+ quad[1].set(fRight, fTop);
+ quad[2].set(fRight, fBottom);
+ quad[3].set(fLeft, fBottom);
+}
+
+#include "SkNx.h"
+
+static inline bool is_finite(const Sk4s& value) {
+ auto finite = value * Sk4s(0) == Sk4s(0);
+ return finite.allTrue();
+}
+
+bool SkRect::setBoundsCheck(const SkPoint pts[], int count) {
+ SkASSERT((pts && count > 0) || count == 0);
+
+ bool isFinite = true;
+
+ if (count <= 0) {
+ sk_bzero(this, sizeof(SkRect));
+ } else {
+ Sk4s min, max, accum;
+
+ if (count & 1) {
+ min = Sk4s(pts[0].fX, pts[0].fY, pts[0].fX, pts[0].fY);
+ pts += 1;
+ count -= 1;
+ } else {
+ min = Sk4s::Load(pts);
+ pts += 2;
+ count -= 2;
+ }
+ accum = max = min;
+ accum = accum * Sk4s(0);
+
+ count >>= 1;
+ for (int i = 0; i < count; ++i) {
+ Sk4s xy = Sk4s::Load(pts);
+ accum = accum * xy;
+ min = Sk4s::Min(min, xy);
+ max = Sk4s::Max(max, xy);
+ pts += 2;
+ }
+
+ /**
+ * With some trickery, we may be able to use Min/Max to also propogate non-finites,
+ * in which case we could eliminate accum entirely, and just check min and max for
+ * "is_finite".
+ */
+ if (is_finite(accum)) {
+ float minArray[4], maxArray[4];
+ min.store(minArray);
+ max.store(maxArray);
+ this->set(SkTMin(minArray[0], minArray[2]), SkTMin(minArray[1], minArray[3]),
+ SkTMax(maxArray[0], maxArray[2]), SkTMax(maxArray[1], maxArray[3]));
+ } else {
+ // we hit a non-finite value, so zero everything and return false
+ this->setEmpty();
+ isFinite = false;
+ }
+ }
+ return isFinite;
+}
+
+#define CHECK_INTERSECT(al, at, ar, ab, bl, bt, br, bb) \
+ SkScalar L = SkMaxScalar(al, bl); \
+ SkScalar R = SkMinScalar(ar, br); \
+ SkScalar T = SkMaxScalar(at, bt); \
+ SkScalar B = SkMinScalar(ab, bb); \
+ do { if (L >= R || T >= B) return false; } while (0)
+
+bool SkRect::intersect(SkScalar left, SkScalar top, SkScalar right, SkScalar bottom) {
+ CHECK_INTERSECT(left, top, right, bottom, fLeft, fTop, fRight, fBottom);
+ this->setLTRB(L, T, R, B);
+ return true;
+}
+
+bool SkRect::intersect(const SkRect& r) {
+ return this->intersect(r.fLeft, r.fTop, r.fRight, r.fBottom);
+}
+
+bool SkRect::intersect(const SkRect& a, const SkRect& b) {
+ CHECK_INTERSECT(a.fLeft, a.fTop, a.fRight, a.fBottom, b.fLeft, b.fTop, b.fRight, b.fBottom);
+ this->setLTRB(L, T, R, B);
+ return true;
+}
+
+void SkRect::join(SkScalar left, SkScalar top, SkScalar right, SkScalar bottom) {
+ // do nothing if the params are empty
+ if (left >= right || top >= bottom) {
+ return;
+ }
+
+ // if we are empty, just assign
+ if (fLeft >= fRight || fTop >= fBottom) {
+ this->set(left, top, right, bottom);
+ } else {
+ fLeft = SkMinScalar(fLeft, left);
+ fTop = SkMinScalar(fTop, top);
+ fRight = SkMaxScalar(fRight, right);
+ fBottom = SkMaxScalar(fBottom, bottom);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "SkString.h"
+#include "SkStringUtils.h"
+
+static const char* set_scalar(SkString* storage, SkScalar value, SkScalarAsStringType asType) {
+ storage->reset();
+ SkAppendScalar(storage, value, asType);
+ return storage->c_str();
+}
+
+void SkRect::dump(bool asHex) const {
+ SkScalarAsStringType asType = asHex ? kHex_SkScalarAsStringType : kDec_SkScalarAsStringType;
+
+ SkString line;
+ if (asHex) {
+ SkString tmp;
+ line.printf( "SkRect::MakeLTRB(%s, /* %f */\n", set_scalar(&tmp, fLeft, asType), fLeft);
+ line.appendf(" %s, /* %f */\n", set_scalar(&tmp, fTop, asType), fTop);
+ line.appendf(" %s, /* %f */\n", set_scalar(&tmp, fRight, asType), fRight);
+ line.appendf(" %s /* %f */);", set_scalar(&tmp, fBottom, asType), fBottom);
+ } else {
+ SkString strL, strT, strR, strB;
+ SkAppendScalarDec(&strL, fLeft);
+ SkAppendScalarDec(&strT, fTop);
+ SkAppendScalarDec(&strR, fRight);
+ SkAppendScalarDec(&strB, fBottom);
+ line.printf("SkRect::MakeLTRB(%s, %s, %s, %s);",
+ strL.c_str(), strT.c_str(), strR.c_str(), strB.c_str());
+ }
+ SkDebugf("%s\n", line.c_str());
+}
diff --git a/gfx/skia/skia/src/core/SkRefDict.cpp b/gfx/skia/skia/src/core/SkRefDict.cpp
new file mode 100644
index 000000000..74071888a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRefDict.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkRefDict.h"
+#include "SkString.h"
+
+struct SkRefDict::Impl {
+ Impl* fNext;
+ SkString fName;
+ SkRefCnt* fData;
+};
+
+SkRefDict::SkRefDict() : fImpl(nullptr) {}
+
+SkRefDict::~SkRefDict() {
+ this->removeAll();
+}
+
+SkRefCnt* SkRefDict::find(const char name[]) const {
+ if (nullptr == name) {
+ return nullptr;
+ }
+
+ Impl* rec = fImpl;
+ while (rec) {
+ if (rec->fName.equals(name)) {
+ return rec->fData;
+ }
+ rec = rec->fNext;
+ }
+ return nullptr;
+}
+
+void SkRefDict::set(const char name[], SkRefCnt* data) {
+ if (nullptr == name) {
+ return;
+ }
+
+ Impl* rec = fImpl;
+ Impl* prev = nullptr;
+ while (rec) {
+ if (rec->fName.equals(name)) {
+ if (data) {
+ // replace
+ data->ref();
+ rec->fData->unref();
+ rec->fData = data;
+ } else {
+ // remove
+ rec->fData->unref();
+ if (prev) {
+ prev->fNext = rec->fNext;
+ } else {
+ fImpl = rec->fNext;
+ }
+ delete rec;
+ }
+ return;
+ }
+ prev = rec;
+ rec = rec->fNext;
+ }
+
+ // if get here, name was not found, so add it
+ data->ref();
+ rec = new Impl;
+ rec->fName.set(name);
+ rec->fData = data;
+ // prepend to the head of our list
+ rec->fNext = fImpl;
+ fImpl = rec;
+}
+
+void SkRefDict::removeAll() {
+ Impl* rec = fImpl;
+ while (rec) {
+ Impl* next = rec->fNext;
+ rec->fData->unref();
+ delete rec;
+ rec = next;
+ }
+ fImpl = nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkRefDict.h b/gfx/skia/skia/src/core/SkRefDict.h
new file mode 100644
index 000000000..fec49ceb7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRefDict.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkRefDict_DEFINED
+#define SkRefDict_DEFINED
+
+#include "SkRefCnt.h"
+
+/**
+ * A dictionary of string,refcnt pairs. The dictionary is also an owner of the
+ * refcnt objects while they are contained.
+ */
+class SK_API SkRefDict : SkNoncopyable {
+public:
+ SkRefDict();
+ ~SkRefDict();
+
+ /**
+ * Return the data associated with name[], or nullptr if no matching entry
+ * is found. The reference-count of the entry is not affected.
+ */
+ SkRefCnt* find(const char name[]) const;
+
+ /**
+ * If data is nullptr, remove (if present) the entry matching name and call
+ * prev_data->unref() on the data for the matching entry.
+ * If data is not-nullptr, replace the existing entry matching name and
+ * call (prev_data->unref()), or add a new one. In either case,
+ * data->ref() is called.
+ */
+ void set(const char name[], SkRefCnt* data);
+
+ /**
+ * Remove the matching entry (if found) and unref its data.
+ */
+ void remove(const char name[]) { this->set(name, nullptr); }
+
+ /**
+ * Remove all entries, and unref() their associated data.
+ */
+ void removeAll();
+
+private:
+ struct Impl;
+ Impl* fImpl;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRegion.cpp b/gfx/skia/skia/src/core/SkRegion.cpp
new file mode 100644
index 000000000..a50425afd
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRegion.cpp
@@ -0,0 +1,1479 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkAtomics.h"
+#include "SkRegionPriv.h"
+#include "SkTemplates.h"
+#include "SkUtils.h"
+
+/* Region Layout
+ *
+ * TOP
+ *
+ * [ Bottom, X-Intervals, [Left, Right]..., X-Sentinel ]
+ * ...
+ *
+ * Y-Sentinel
+ */
+
+SkDEBUGCODE(int32_t gRgnAllocCounter;)
+
+/////////////////////////////////////////////////////////////////////////////////////////////////
+
+/* Pass in the beginning with the intervals.
+ * We back up 1 to read the interval-count.
+ * Return the beginning of the next scanline (i.e. the next Y-value)
+ */
+static SkRegion::RunType* skip_intervals(const SkRegion::RunType runs[]) {
+ int intervals = runs[-1];
+#ifdef SK_DEBUG
+ if (intervals > 0) {
+ SkASSERT(runs[0] < runs[1]);
+ SkASSERT(runs[1] < SkRegion::kRunTypeSentinel);
+ } else {
+ SkASSERT(0 == intervals);
+ SkASSERT(SkRegion::kRunTypeSentinel == runs[0]);
+ }
+#endif
+ runs += intervals * 2 + 1;
+ return const_cast<SkRegion::RunType*>(runs);
+}
+
+bool SkRegion::RunsAreARect(const SkRegion::RunType runs[], int count,
+ SkIRect* bounds) {
+ assert_sentinel(runs[0], false); // top
+ SkASSERT(count >= kRectRegionRuns);
+
+ if (count == kRectRegionRuns) {
+ assert_sentinel(runs[1], false); // bottom
+ SkASSERT(1 == runs[2]);
+ assert_sentinel(runs[3], false); // left
+ assert_sentinel(runs[4], false); // right
+ assert_sentinel(runs[5], true);
+ assert_sentinel(runs[6], true);
+
+ SkASSERT(runs[0] < runs[1]); // valid height
+ SkASSERT(runs[3] < runs[4]); // valid width
+
+ bounds->set(runs[3], runs[0], runs[4], runs[1]);
+ return true;
+ }
+ return false;
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+SkRegion::SkRegion() {
+ fBounds.set(0, 0, 0, 0);
+ fRunHead = SkRegion_gEmptyRunHeadPtr;
+}
+
+SkRegion::SkRegion(const SkRegion& src) {
+ fRunHead = SkRegion_gEmptyRunHeadPtr; // just need a value that won't trigger sk_free(fRunHead)
+ this->setRegion(src);
+}
+
+SkRegion::SkRegion(const SkIRect& rect) {
+ fRunHead = SkRegion_gEmptyRunHeadPtr; // just need a value that won't trigger sk_free(fRunHead)
+ this->setRect(rect);
+}
+
+SkRegion::~SkRegion() {
+ this->freeRuns();
+}
+
+void SkRegion::freeRuns() {
+ if (this->isComplex()) {
+ SkASSERT(fRunHead->fRefCnt >= 1);
+ if (sk_atomic_dec(&fRunHead->fRefCnt) == 1) {
+ //SkASSERT(gRgnAllocCounter > 0);
+ //SkDEBUGCODE(sk_atomic_dec(&gRgnAllocCounter));
+ //SkDEBUGF(("************** gRgnAllocCounter::free %d\n", gRgnAllocCounter));
+ sk_free(fRunHead);
+ }
+ }
+}
+
+void SkRegion::allocateRuns(int count, int ySpanCount, int intervalCount) {
+ fRunHead = RunHead::Alloc(count, ySpanCount, intervalCount);
+}
+
+void SkRegion::allocateRuns(int count) {
+ fRunHead = RunHead::Alloc(count);
+}
+
+void SkRegion::allocateRuns(const RunHead& head) {
+ fRunHead = RunHead::Alloc(head.fRunCount,
+ head.getYSpanCount(),
+ head.getIntervalCount());
+}
+
+SkRegion& SkRegion::operator=(const SkRegion& src) {
+ (void)this->setRegion(src);
+ return *this;
+}
+
+void SkRegion::swap(SkRegion& other) {
+ SkTSwap<SkIRect>(fBounds, other.fBounds);
+ SkTSwap<RunHead*>(fRunHead, other.fRunHead);
+}
+
+int SkRegion::computeRegionComplexity() const {
+ if (this->isEmpty()) {
+ return 0;
+ } else if (this->isRect()) {
+ return 1;
+ }
+ return fRunHead->getIntervalCount();
+}
+
+bool SkRegion::setEmpty() {
+ this->freeRuns();
+ fBounds.set(0, 0, 0, 0);
+ fRunHead = SkRegion_gEmptyRunHeadPtr;
+ return false;
+}
+
+bool SkRegion::setRect(int32_t left, int32_t top,
+ int32_t right, int32_t bottom) {
+ if (left >= right || top >= bottom) {
+ return this->setEmpty();
+ }
+ this->freeRuns();
+ fBounds.set(left, top, right, bottom);
+ fRunHead = SkRegion_gRectRunHeadPtr;
+ return true;
+}
+
+bool SkRegion::setRect(const SkIRect& r) {
+ return this->setRect(r.fLeft, r.fTop, r.fRight, r.fBottom);
+}
+
+bool SkRegion::setRegion(const SkRegion& src) {
+ if (this != &src) {
+ this->freeRuns();
+
+ fBounds = src.fBounds;
+ fRunHead = src.fRunHead;
+ if (this->isComplex()) {
+ sk_atomic_inc(&fRunHead->fRefCnt);
+ }
+ }
+ return fRunHead != SkRegion_gEmptyRunHeadPtr;
+}
+
+bool SkRegion::op(const SkIRect& rect, const SkRegion& rgn, Op op) {
+ SkRegion tmp(rect);
+
+ return this->op(tmp, rgn, op);
+}
+
+bool SkRegion::op(const SkRegion& rgn, const SkIRect& rect, Op op) {
+ SkRegion tmp(rect);
+
+ return this->op(rgn, tmp, op);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_BUILD_FOR_ANDROID
+#include <stdio.h>
+char* SkRegion::toString() {
+ Iterator iter(*this);
+ int count = 0;
+ while (!iter.done()) {
+ count++;
+ iter.next();
+ }
+ // 4 ints, up to 10 digits each plus sign, 3 commas, '(', ')', SkRegion() and '\0'
+ const int max = (count*((11*4)+5))+11+1;
+ char* result = (char*)sk_malloc_throw(max);
+ if (result == nullptr) {
+ return nullptr;
+ }
+ count = sprintf(result, "SkRegion(");
+ iter.reset(*this);
+ while (!iter.done()) {
+ const SkIRect& r = iter.rect();
+ count += sprintf(result+count, "(%d,%d,%d,%d)", r.fLeft, r.fTop, r.fRight, r.fBottom);
+ iter.next();
+ }
+ count += sprintf(result+count, ")");
+ return result;
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+int SkRegion::count_runtype_values(int* itop, int* ibot) const {
+ int maxT;
+
+ if (this->isRect()) {
+ maxT = 2;
+ } else {
+ SkASSERT(this->isComplex());
+ maxT = fRunHead->getIntervalCount() * 2;
+ }
+ *itop = fBounds.fTop;
+ *ibot = fBounds.fBottom;
+ return maxT;
+}
+
+static bool isRunCountEmpty(int count) {
+ return count <= 2;
+}
+
+bool SkRegion::setRuns(RunType runs[], int count) {
+ SkDEBUGCODE(this->validate();)
+ SkASSERT(count > 0);
+
+ if (isRunCountEmpty(count)) {
+ // SkDEBUGF(("setRuns: empty\n"));
+ assert_sentinel(runs[count-1], true);
+ return this->setEmpty();
+ }
+
+ // trim off any empty spans from the top and bottom
+ // weird I should need this, perhaps op() could be smarter...
+ if (count > kRectRegionRuns) {
+ RunType* stop = runs + count;
+ assert_sentinel(runs[0], false); // top
+ assert_sentinel(runs[1], false); // bottom
+ // runs[2] is uncomputed intervalCount
+
+ if (runs[3] == SkRegion::kRunTypeSentinel) { // should be first left...
+ runs += 3; // skip empty initial span
+ runs[0] = runs[-2]; // set new top to prev bottom
+ assert_sentinel(runs[1], false); // bot: a sentinal would mean two in a row
+ assert_sentinel(runs[2], false); // intervalcount
+ assert_sentinel(runs[3], false); // left
+ assert_sentinel(runs[4], false); // right
+ }
+
+ assert_sentinel(stop[-1], true);
+ assert_sentinel(stop[-2], true);
+
+ // now check for a trailing empty span
+ if (stop[-5] == SkRegion::kRunTypeSentinel) { // eek, stop[-4] was a bottom with no x-runs
+ stop[-4] = SkRegion::kRunTypeSentinel; // kill empty last span
+ stop -= 3;
+ assert_sentinel(stop[-1], true); // last y-sentinel
+ assert_sentinel(stop[-2], true); // last x-sentinel
+ assert_sentinel(stop[-3], false); // last right
+ assert_sentinel(stop[-4], false); // last left
+ assert_sentinel(stop[-5], false); // last interval-count
+ assert_sentinel(stop[-6], false); // last bottom
+ }
+ count = (int)(stop - runs);
+ }
+
+ SkASSERT(count >= kRectRegionRuns);
+
+ if (SkRegion::RunsAreARect(runs, count, &fBounds)) {
+ return this->setRect(fBounds);
+ }
+
+ // if we get here, we need to become a complex region
+
+ if (!this->isComplex() || fRunHead->fRunCount != count) {
+ this->freeRuns();
+ this->allocateRuns(count);
+ }
+
+ // must call this before we can write directly into runs()
+ // in case we are sharing the buffer with another region (copy on write)
+ fRunHead = fRunHead->ensureWritable();
+ memcpy(fRunHead->writable_runs(), runs, count * sizeof(RunType));
+ fRunHead->computeRunBounds(&fBounds);
+
+ SkDEBUGCODE(this->validate();)
+
+ return true;
+}
+
+void SkRegion::BuildRectRuns(const SkIRect& bounds,
+ RunType runs[kRectRegionRuns]) {
+ runs[0] = bounds.fTop;
+ runs[1] = bounds.fBottom;
+ runs[2] = 1; // 1 interval for this scanline
+ runs[3] = bounds.fLeft;
+ runs[4] = bounds.fRight;
+ runs[5] = kRunTypeSentinel;
+ runs[6] = kRunTypeSentinel;
+}
+
+bool SkRegion::contains(int32_t x, int32_t y) const {
+ SkDEBUGCODE(this->validate();)
+
+ if (!fBounds.contains(x, y)) {
+ return false;
+ }
+ if (this->isRect()) {
+ return true;
+ }
+ SkASSERT(this->isComplex());
+
+ const RunType* runs = fRunHead->findScanline(y);
+
+ // Skip the Bottom and IntervalCount
+ runs += 2;
+
+ // Just walk this scanline, checking each interval. The X-sentinel will
+ // appear as a left-inteval (runs[0]) and should abort the search.
+ //
+ // We could do a bsearch, using interval-count (runs[1]), but need to time
+ // when that would be worthwhile.
+ //
+ for (;;) {
+ if (x < runs[0]) {
+ break;
+ }
+ if (x < runs[1]) {
+ return true;
+ }
+ runs += 2;
+ }
+ return false;
+}
+
+static SkRegion::RunType scanline_bottom(const SkRegion::RunType runs[]) {
+ return runs[0];
+}
+
+static const SkRegion::RunType* scanline_next(const SkRegion::RunType runs[]) {
+ // skip [B N [L R]... S]
+ return runs + 2 + runs[1] * 2 + 1;
+}
+
+static bool scanline_contains(const SkRegion::RunType runs[],
+ SkRegion::RunType L, SkRegion::RunType R) {
+ runs += 2; // skip Bottom and IntervalCount
+ for (;;) {
+ if (L < runs[0]) {
+ break;
+ }
+ if (R <= runs[1]) {
+ return true;
+ }
+ runs += 2;
+ }
+ return false;
+}
+
+bool SkRegion::contains(const SkIRect& r) const {
+ SkDEBUGCODE(this->validate();)
+
+ if (!fBounds.contains(r)) {
+ return false;
+ }
+ if (this->isRect()) {
+ return true;
+ }
+ SkASSERT(this->isComplex());
+
+ const RunType* scanline = fRunHead->findScanline(r.fTop);
+ for (;;) {
+ if (!scanline_contains(scanline, r.fLeft, r.fRight)) {
+ return false;
+ }
+ if (r.fBottom <= scanline_bottom(scanline)) {
+ break;
+ }
+ scanline = scanline_next(scanline);
+ }
+ return true;
+}
+
+bool SkRegion::contains(const SkRegion& rgn) const {
+ SkDEBUGCODE(this->validate();)
+ SkDEBUGCODE(rgn.validate();)
+
+ if (this->isEmpty() || rgn.isEmpty() || !fBounds.contains(rgn.fBounds)) {
+ return false;
+ }
+ if (this->isRect()) {
+ return true;
+ }
+ if (rgn.isRect()) {
+ return this->contains(rgn.getBounds());
+ }
+
+ /*
+ * A contains B is equivalent to
+ * B - A == 0
+ */
+ return !Oper(rgn, *this, kDifference_Op, nullptr);
+}
+
+const SkRegion::RunType* SkRegion::getRuns(RunType tmpStorage[],
+ int* intervals) const {
+ SkASSERT(tmpStorage && intervals);
+ const RunType* runs = tmpStorage;
+
+ if (this->isEmpty()) {
+ tmpStorage[0] = kRunTypeSentinel;
+ *intervals = 0;
+ } else if (this->isRect()) {
+ BuildRectRuns(fBounds, tmpStorage);
+ *intervals = 1;
+ } else {
+ runs = fRunHead->readonly_runs();
+ *intervals = fRunHead->getIntervalCount();
+ }
+ return runs;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool scanline_intersects(const SkRegion::RunType runs[],
+ SkRegion::RunType L, SkRegion::RunType R) {
+ runs += 2; // skip Bottom and IntervalCount
+ for (;;) {
+ if (R <= runs[0]) {
+ break;
+ }
+ if (L < runs[1]) {
+ return true;
+ }
+ runs += 2;
+ }
+ return false;
+}
+
+bool SkRegion::intersects(const SkIRect& r) const {
+ SkDEBUGCODE(this->validate();)
+
+ if (this->isEmpty() || r.isEmpty()) {
+ return false;
+ }
+
+ SkIRect sect;
+ if (!sect.intersect(fBounds, r)) {
+ return false;
+ }
+ if (this->isRect()) {
+ return true;
+ }
+ SkASSERT(this->isComplex());
+
+ const RunType* scanline = fRunHead->findScanline(sect.fTop);
+ for (;;) {
+ if (scanline_intersects(scanline, sect.fLeft, sect.fRight)) {
+ return true;
+ }
+ if (sect.fBottom <= scanline_bottom(scanline)) {
+ break;
+ }
+ scanline = scanline_next(scanline);
+ }
+ return false;
+}
+
+bool SkRegion::intersects(const SkRegion& rgn) const {
+ if (this->isEmpty() || rgn.isEmpty()) {
+ return false;
+ }
+
+ if (!SkIRect::Intersects(fBounds, rgn.fBounds)) {
+ return false;
+ }
+
+ bool weAreARect = this->isRect();
+ bool theyAreARect = rgn.isRect();
+
+ if (weAreARect && theyAreARect) {
+ return true;
+ }
+ if (weAreARect) {
+ return rgn.intersects(this->getBounds());
+ }
+ if (theyAreARect) {
+ return this->intersects(rgn.getBounds());
+ }
+
+ // both of us are complex
+ return Oper(*this, rgn, kIntersect_Op, nullptr);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkRegion::operator==(const SkRegion& b) const {
+ SkDEBUGCODE(validate();)
+ SkDEBUGCODE(b.validate();)
+
+ if (this == &b) {
+ return true;
+ }
+ if (fBounds != b.fBounds) {
+ return false;
+ }
+
+ const SkRegion::RunHead* ah = fRunHead;
+ const SkRegion::RunHead* bh = b.fRunHead;
+
+ // this catches empties and rects being equal
+ if (ah == bh) {
+ return true;
+ }
+ // now we insist that both are complex (but different ptrs)
+ if (!this->isComplex() || !b.isComplex()) {
+ return false;
+ }
+ return ah->fRunCount == bh->fRunCount &&
+ !memcmp(ah->readonly_runs(), bh->readonly_runs(),
+ ah->fRunCount * sizeof(SkRegion::RunType));
+}
+
+void SkRegion::translate(int dx, int dy, SkRegion* dst) const {
+ SkDEBUGCODE(this->validate();)
+
+ if (nullptr == dst) {
+ return;
+ }
+ if (this->isEmpty()) {
+ dst->setEmpty();
+ } else if (this->isRect()) {
+ dst->setRect(fBounds.fLeft + dx, fBounds.fTop + dy,
+ fBounds.fRight + dx, fBounds.fBottom + dy);
+ } else {
+ if (this == dst) {
+ dst->fRunHead = dst->fRunHead->ensureWritable();
+ } else {
+ SkRegion tmp;
+ tmp.allocateRuns(*fRunHead);
+ tmp.fBounds = fBounds;
+ dst->swap(tmp);
+ }
+
+ dst->fBounds.offset(dx, dy);
+
+ const RunType* sruns = fRunHead->readonly_runs();
+ RunType* druns = dst->fRunHead->writable_runs();
+
+ *druns++ = (SkRegion::RunType)(*sruns++ + dy); // top
+ for (;;) {
+ int bottom = *sruns++;
+ if (bottom == kRunTypeSentinel) {
+ break;
+ }
+ *druns++ = (SkRegion::RunType)(bottom + dy); // bottom;
+ *druns++ = *sruns++; // copy intervalCount;
+ for (;;) {
+ int x = *sruns++;
+ if (x == kRunTypeSentinel) {
+ break;
+ }
+ *druns++ = (SkRegion::RunType)(x + dx);
+ *druns++ = (SkRegion::RunType)(*sruns++ + dx);
+ }
+ *druns++ = kRunTypeSentinel; // x sentinel
+ }
+ *druns++ = kRunTypeSentinel; // y sentinel
+
+ SkASSERT(sruns - fRunHead->readonly_runs() == fRunHead->fRunCount);
+ SkASSERT(druns - dst->fRunHead->readonly_runs() == dst->fRunHead->fRunCount);
+ }
+
+ SkDEBUGCODE(this->validate();)
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkRegion::setRects(const SkIRect rects[], int count) {
+ if (0 == count) {
+ this->setEmpty();
+ } else {
+ this->setRect(rects[0]);
+ for (int i = 1; i < count; i++) {
+ this->op(rects[i], kUnion_Op);
+ }
+ }
+ return !this->isEmpty();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if defined _WIN32 // disable warning : local variable used without having been initialized
+#pragma warning ( push )
+#pragma warning ( disable : 4701 )
+#endif
+
+#ifdef SK_DEBUG
+static void assert_valid_pair(int left, int rite)
+{
+ SkASSERT(left == SkRegion::kRunTypeSentinel || left < rite);
+}
+#else
+ #define assert_valid_pair(left, rite)
+#endif
+
+struct spanRec {
+ const SkRegion::RunType* fA_runs;
+ const SkRegion::RunType* fB_runs;
+ int fA_left, fA_rite, fB_left, fB_rite;
+ int fLeft, fRite, fInside;
+
+ void init(const SkRegion::RunType a_runs[],
+ const SkRegion::RunType b_runs[]) {
+ fA_left = *a_runs++;
+ fA_rite = *a_runs++;
+ fB_left = *b_runs++;
+ fB_rite = *b_runs++;
+
+ fA_runs = a_runs;
+ fB_runs = b_runs;
+ }
+
+ bool done() const {
+ SkASSERT(fA_left <= SkRegion::kRunTypeSentinel);
+ SkASSERT(fB_left <= SkRegion::kRunTypeSentinel);
+ return fA_left == SkRegion::kRunTypeSentinel &&
+ fB_left == SkRegion::kRunTypeSentinel;
+ }
+
+ void next() {
+ assert_valid_pair(fA_left, fA_rite);
+ assert_valid_pair(fB_left, fB_rite);
+
+ int inside, left, rite SK_INIT_TO_AVOID_WARNING;
+ bool a_flush = false;
+ bool b_flush = false;
+
+ int a_left = fA_left;
+ int a_rite = fA_rite;
+ int b_left = fB_left;
+ int b_rite = fB_rite;
+
+ if (a_left < b_left) {
+ inside = 1;
+ left = a_left;
+ if (a_rite <= b_left) { // [...] <...>
+ rite = a_rite;
+ a_flush = true;
+ } else { // [...<..]...> or [...<...>...]
+ rite = a_left = b_left;
+ }
+ } else if (b_left < a_left) {
+ inside = 2;
+ left = b_left;
+ if (b_rite <= a_left) { // [...] <...>
+ rite = b_rite;
+ b_flush = true;
+ } else { // [...<..]...> or [...<...>...]
+ rite = b_left = a_left;
+ }
+ } else { // a_left == b_left
+ inside = 3;
+ left = a_left; // or b_left
+ if (a_rite <= b_rite) {
+ rite = b_left = a_rite;
+ a_flush = true;
+ }
+ if (b_rite <= a_rite) {
+ rite = a_left = b_rite;
+ b_flush = true;
+ }
+ }
+
+ if (a_flush) {
+ a_left = *fA_runs++;
+ a_rite = *fA_runs++;
+ }
+ if (b_flush) {
+ b_left = *fB_runs++;
+ b_rite = *fB_runs++;
+ }
+
+ SkASSERT(left <= rite);
+
+ // now update our state
+ fA_left = a_left;
+ fA_rite = a_rite;
+ fB_left = b_left;
+ fB_rite = b_rite;
+
+ fLeft = left;
+ fRite = rite;
+ fInside = inside;
+ }
+};
+
+static SkRegion::RunType* operate_on_span(const SkRegion::RunType a_runs[],
+ const SkRegion::RunType b_runs[],
+ SkRegion::RunType dst[],
+ int min, int max) {
+ spanRec rec;
+ bool firstInterval = true;
+
+ rec.init(a_runs, b_runs);
+
+ while (!rec.done()) {
+ rec.next();
+
+ int left = rec.fLeft;
+ int rite = rec.fRite;
+
+ // add left,rite to our dst buffer (checking for coincidence
+ if ((unsigned)(rec.fInside - min) <= (unsigned)(max - min) &&
+ left < rite) { // skip if equal
+ if (firstInterval || dst[-1] < left) {
+ *dst++ = (SkRegion::RunType)(left);
+ *dst++ = (SkRegion::RunType)(rite);
+ firstInterval = false;
+ } else {
+ // update the right edge
+ dst[-1] = (SkRegion::RunType)(rite);
+ }
+ }
+ }
+
+ *dst++ = SkRegion::kRunTypeSentinel;
+ return dst;
+}
+
+#if defined _WIN32
+#pragma warning ( pop )
+#endif
+
+static const struct {
+ uint8_t fMin;
+ uint8_t fMax;
+} gOpMinMax[] = {
+ { 1, 1 }, // Difference
+ { 3, 3 }, // Intersection
+ { 1, 3 }, // Union
+ { 1, 2 } // XOR
+};
+
+class RgnOper {
+public:
+ RgnOper(int top, SkRegion::RunType dst[], SkRegion::Op op) {
+ // need to ensure that the op enum lines up with our minmax array
+ SkASSERT(SkRegion::kDifference_Op == 0);
+ SkASSERT(SkRegion::kIntersect_Op == 1);
+ SkASSERT(SkRegion::kUnion_Op == 2);
+ SkASSERT(SkRegion::kXOR_Op == 3);
+ SkASSERT((unsigned)op <= 3);
+
+ fStartDst = dst;
+ fPrevDst = dst + 1;
+ fPrevLen = 0; // will never match a length from operate_on_span
+ fTop = (SkRegion::RunType)(top); // just a first guess, we might update this
+
+ fMin = gOpMinMax[op].fMin;
+ fMax = gOpMinMax[op].fMax;
+ }
+
+ void addSpan(int bottom, const SkRegion::RunType a_runs[],
+ const SkRegion::RunType b_runs[]) {
+ // skip X values and slots for the next Y+intervalCount
+ SkRegion::RunType* start = fPrevDst + fPrevLen + 2;
+ // start points to beginning of dst interval
+ SkRegion::RunType* stop = operate_on_span(a_runs, b_runs, start, fMin, fMax);
+ size_t len = stop - start;
+ SkASSERT(len >= 1 && (len & 1) == 1);
+ SkASSERT(SkRegion::kRunTypeSentinel == stop[-1]);
+
+ if (fPrevLen == len &&
+ (1 == len || !memcmp(fPrevDst, start,
+ (len - 1) * sizeof(SkRegion::RunType)))) {
+ // update Y value
+ fPrevDst[-2] = (SkRegion::RunType)(bottom);
+ } else { // accept the new span
+ if (len == 1 && fPrevLen == 0) {
+ fTop = (SkRegion::RunType)(bottom); // just update our bottom
+ } else {
+ start[-2] = (SkRegion::RunType)(bottom);
+ start[-1] = SkToS32(len >> 1);
+ fPrevDst = start;
+ fPrevLen = len;
+ }
+ }
+ }
+
+ int flush() {
+ fStartDst[0] = fTop;
+ fPrevDst[fPrevLen] = SkRegion::kRunTypeSentinel;
+ return (int)(fPrevDst - fStartDst + fPrevLen + 1);
+ }
+
+ bool isEmpty() const { return 0 == fPrevLen; }
+
+ uint8_t fMin, fMax;
+
+private:
+ SkRegion::RunType* fStartDst;
+ SkRegion::RunType* fPrevDst;
+ size_t fPrevLen;
+ SkRegion::RunType fTop;
+};
+
+// want a unique value to signal that we exited due to quickExit
+#define QUICK_EXIT_TRUE_COUNT (-1)
+
+static int operate(const SkRegion::RunType a_runs[],
+ const SkRegion::RunType b_runs[],
+ SkRegion::RunType dst[],
+ SkRegion::Op op,
+ bool quickExit) {
+ const SkRegion::RunType gEmptyScanline[] = {
+ 0, // dummy bottom value
+ 0, // zero intervals
+ SkRegion::kRunTypeSentinel,
+ // just need a 2nd value, since spanRec.init() reads 2 values, even
+ // though if the first value is the sentinel, it ignores the 2nd value.
+ // w/o the 2nd value here, we might read uninitialized memory.
+ // This happens when we are using gSentinel, which is pointing at
+ // our sentinel value.
+ 0
+ };
+ const SkRegion::RunType* const gSentinel = &gEmptyScanline[2];
+
+ int a_top = *a_runs++;
+ int a_bot = *a_runs++;
+ int b_top = *b_runs++;
+ int b_bot = *b_runs++;
+
+ a_runs += 1; // skip the intervalCount;
+ b_runs += 1; // skip the intervalCount;
+
+ // Now a_runs and b_runs to their intervals (or sentinel)
+
+ assert_sentinel(a_top, false);
+ assert_sentinel(a_bot, false);
+ assert_sentinel(b_top, false);
+ assert_sentinel(b_bot, false);
+
+ RgnOper oper(SkMin32(a_top, b_top), dst, op);
+
+ int prevBot = SkRegion::kRunTypeSentinel; // so we fail the first test
+
+ while (a_bot < SkRegion::kRunTypeSentinel ||
+ b_bot < SkRegion::kRunTypeSentinel) {
+ int top, bot SK_INIT_TO_AVOID_WARNING;
+ const SkRegion::RunType* run0 = gSentinel;
+ const SkRegion::RunType* run1 = gSentinel;
+ bool a_flush = false;
+ bool b_flush = false;
+
+ if (a_top < b_top) {
+ top = a_top;
+ run0 = a_runs;
+ if (a_bot <= b_top) { // [...] <...>
+ bot = a_bot;
+ a_flush = true;
+ } else { // [...<..]...> or [...<...>...]
+ bot = a_top = b_top;
+ }
+ } else if (b_top < a_top) {
+ top = b_top;
+ run1 = b_runs;
+ if (b_bot <= a_top) { // [...] <...>
+ bot = b_bot;
+ b_flush = true;
+ } else { // [...<..]...> or [...<...>...]
+ bot = b_top = a_top;
+ }
+ } else { // a_top == b_top
+ top = a_top; // or b_top
+ run0 = a_runs;
+ run1 = b_runs;
+ if (a_bot <= b_bot) {
+ bot = b_top = a_bot;
+ a_flush = true;
+ }
+ if (b_bot <= a_bot) {
+ bot = a_top = b_bot;
+ b_flush = true;
+ }
+ }
+
+ if (top > prevBot) {
+ oper.addSpan(top, gSentinel, gSentinel);
+ }
+ oper.addSpan(bot, run0, run1);
+
+ if (quickExit && !oper.isEmpty()) {
+ return QUICK_EXIT_TRUE_COUNT;
+ }
+
+ if (a_flush) {
+ a_runs = skip_intervals(a_runs);
+ a_top = a_bot;
+ a_bot = *a_runs++;
+ a_runs += 1; // skip uninitialized intervalCount
+ if (a_bot == SkRegion::kRunTypeSentinel) {
+ a_top = a_bot;
+ }
+ }
+ if (b_flush) {
+ b_runs = skip_intervals(b_runs);
+ b_top = b_bot;
+ b_bot = *b_runs++;
+ b_runs += 1; // skip uninitialized intervalCount
+ if (b_bot == SkRegion::kRunTypeSentinel) {
+ b_top = b_bot;
+ }
+ }
+
+ prevBot = bot;
+ }
+ return oper.flush();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* Given count RunTypes in a complex region, return the worst case number of
+ logical intervals that represents (i.e. number of rects that would be
+ returned from the iterator).
+
+ We could just return count/2, since there must be at least 2 values per
+ interval, but we can first trim off the const overhead of the initial TOP
+ value, plus the final BOTTOM + 2 sentinels.
+ */
+#if 0 // UNUSED
+static int count_to_intervals(int count) {
+ SkASSERT(count >= 6); // a single rect is 6 values
+ return (count - 4) >> 1;
+}
+#endif
+
+/* Given a number of intervals, what is the worst case representation of that
+ many intervals?
+
+ Worst case (from a storage perspective), is a vertical stack of single
+ intervals: TOP + N * (BOTTOM INTERVALCOUNT LEFT RIGHT SENTINEL) + SENTINEL
+ */
+static int intervals_to_count(int intervals) {
+ return 1 + intervals * 5 + 1;
+}
+
+/* Given the intervalCounts of RunTypes in two regions, return the worst-case number
+ of RunTypes need to store the result after a region-op.
+ */
+static int compute_worst_case_count(int a_intervals, int b_intervals) {
+ // Our heuristic worst case is ai * (bi + 1) + bi * (ai + 1)
+ int intervals = 2 * a_intervals * b_intervals + a_intervals + b_intervals;
+ // convert back to number of RunType values
+ return intervals_to_count(intervals);
+}
+
+static bool setEmptyCheck(SkRegion* result) {
+ return result ? result->setEmpty() : false;
+}
+
+static bool setRectCheck(SkRegion* result, const SkIRect& rect) {
+ return result ? result->setRect(rect) : !rect.isEmpty();
+}
+
+static bool setRegionCheck(SkRegion* result, const SkRegion& rgn) {
+ return result ? result->setRegion(rgn) : !rgn.isEmpty();
+}
+
+bool SkRegion::Oper(const SkRegion& rgnaOrig, const SkRegion& rgnbOrig, Op op,
+ SkRegion* result) {
+ SkASSERT((unsigned)op < kOpCount);
+
+ if (kReplace_Op == op) {
+ return setRegionCheck(result, rgnbOrig);
+ }
+
+ // swith to using pointers, so we can swap them as needed
+ const SkRegion* rgna = &rgnaOrig;
+ const SkRegion* rgnb = &rgnbOrig;
+ // after this point, do not refer to rgnaOrig or rgnbOrig!!!
+
+ // collaps difference and reverse-difference into just difference
+ if (kReverseDifference_Op == op) {
+ SkTSwap<const SkRegion*>(rgna, rgnb);
+ op = kDifference_Op;
+ }
+
+ SkIRect bounds;
+ bool a_empty = rgna->isEmpty();
+ bool b_empty = rgnb->isEmpty();
+ bool a_rect = rgna->isRect();
+ bool b_rect = rgnb->isRect();
+
+ switch (op) {
+ case kDifference_Op:
+ if (a_empty) {
+ return setEmptyCheck(result);
+ }
+ if (b_empty || !SkIRect::IntersectsNoEmptyCheck(rgna->fBounds,
+ rgnb->fBounds)) {
+ return setRegionCheck(result, *rgna);
+ }
+ if (b_rect && rgnb->fBounds.containsNoEmptyCheck(rgna->fBounds)) {
+ return setEmptyCheck(result);
+ }
+ break;
+
+ case kIntersect_Op:
+ if ((a_empty | b_empty)
+ || !bounds.intersect(rgna->fBounds, rgnb->fBounds)) {
+ return setEmptyCheck(result);
+ }
+ if (a_rect & b_rect) {
+ return setRectCheck(result, bounds);
+ }
+ if (a_rect && rgna->fBounds.contains(rgnb->fBounds)) {
+ return setRegionCheck(result, *rgnb);
+ }
+ if (b_rect && rgnb->fBounds.contains(rgna->fBounds)) {
+ return setRegionCheck(result, *rgna);
+ }
+ break;
+
+ case kUnion_Op:
+ if (a_empty) {
+ return setRegionCheck(result, *rgnb);
+ }
+ if (b_empty) {
+ return setRegionCheck(result, *rgna);
+ }
+ if (a_rect && rgna->fBounds.contains(rgnb->fBounds)) {
+ return setRegionCheck(result, *rgna);
+ }
+ if (b_rect && rgnb->fBounds.contains(rgna->fBounds)) {
+ return setRegionCheck(result, *rgnb);
+ }
+ break;
+
+ case kXOR_Op:
+ if (a_empty) {
+ return setRegionCheck(result, *rgnb);
+ }
+ if (b_empty) {
+ return setRegionCheck(result, *rgna);
+ }
+ break;
+ default:
+ SkDEBUGFAIL("unknown region op");
+ return false;
+ }
+
+ RunType tmpA[kRectRegionRuns];
+ RunType tmpB[kRectRegionRuns];
+
+ int a_intervals, b_intervals;
+ const RunType* a_runs = rgna->getRuns(tmpA, &a_intervals);
+ const RunType* b_runs = rgnb->getRuns(tmpB, &b_intervals);
+
+ int dstCount = compute_worst_case_count(a_intervals, b_intervals);
+ SkAutoSTMalloc<256, RunType> array(dstCount);
+
+#ifdef SK_DEBUG
+// Sometimes helpful to seed everything with a known value when debugging
+// sk_memset32((uint32_t*)array.get(), 0x7FFFFFFF, dstCount);
+#endif
+
+ int count = operate(a_runs, b_runs, array.get(), op, nullptr == result);
+ SkASSERT(count <= dstCount);
+
+ if (result) {
+ SkASSERT(count >= 0);
+ return result->setRuns(array.get(), count);
+ } else {
+ return (QUICK_EXIT_TRUE_COUNT == count) || !isRunCountEmpty(count);
+ }
+}
+
+bool SkRegion::op(const SkRegion& rgna, const SkRegion& rgnb, Op op) {
+ SkDEBUGCODE(this->validate();)
+ return SkRegion::Oper(rgna, rgnb, op, this);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkBuffer.h"
+
+size_t SkRegion::writeToMemory(void* storage) const {
+ if (nullptr == storage) {
+ size_t size = sizeof(int32_t); // -1 (empty), 0 (rect), runCount
+ if (!this->isEmpty()) {
+ size += sizeof(fBounds);
+ if (this->isComplex()) {
+ size += 2 * sizeof(int32_t); // ySpanCount + intervalCount
+ size += fRunHead->fRunCount * sizeof(RunType);
+ }
+ }
+ return size;
+ }
+
+ SkWBuffer buffer(storage);
+
+ if (this->isEmpty()) {
+ buffer.write32(-1);
+ } else {
+ bool isRect = this->isRect();
+
+ buffer.write32(isRect ? 0 : fRunHead->fRunCount);
+ buffer.write(&fBounds, sizeof(fBounds));
+
+ if (!isRect) {
+ buffer.write32(fRunHead->getYSpanCount());
+ buffer.write32(fRunHead->getIntervalCount());
+ buffer.write(fRunHead->readonly_runs(),
+ fRunHead->fRunCount * sizeof(RunType));
+ }
+ }
+ return buffer.pos();
+}
+
+size_t SkRegion::readFromMemory(const void* storage, size_t length) {
+ SkRBufferWithSizeCheck buffer(storage, length);
+ SkRegion tmp;
+ int32_t count;
+
+ if (buffer.readS32(&count) && (count >= 0) && buffer.read(&tmp.fBounds, sizeof(tmp.fBounds))) {
+ if (count == 0) {
+ tmp.fRunHead = SkRegion_gRectRunHeadPtr;
+ } else {
+ int32_t ySpanCount, intervalCount;
+ if (buffer.readS32(&ySpanCount) && buffer.readS32(&intervalCount) &&
+ intervalCount > 1) {
+ tmp.allocateRuns(count, ySpanCount, intervalCount);
+ buffer.read(tmp.fRunHead->writable_runs(), count * sizeof(RunType));
+ }
+ }
+ }
+ size_t sizeRead = 0;
+ if (buffer.isValid()) {
+ this->swap(tmp);
+ sizeRead = buffer.pos();
+ }
+ return sizeRead;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+const SkRegion& SkRegion::GetEmptyRegion() {
+ static SkRegion gEmpty;
+ return gEmpty;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+
+// Starts with first X-interval, and returns a ptr to the X-sentinel
+static const SkRegion::RunType* skip_intervals_slow(const SkRegion::RunType runs[]) {
+ // want to track that our intevals are all disjoint, such that
+ // prev-right < next-left. We rely on this optimization in places such as
+ // contains().
+ //
+ SkRegion::RunType prevR = -SkRegion::kRunTypeSentinel;
+
+ while (runs[0] < SkRegion::kRunTypeSentinel) {
+ SkASSERT(prevR < runs[0]);
+ SkASSERT(runs[0] < runs[1]);
+ SkASSERT(runs[1] < SkRegion::kRunTypeSentinel);
+ prevR = runs[1];
+ runs += 2;
+ }
+ return runs;
+}
+
+static void compute_bounds(const SkRegion::RunType runs[],
+ SkIRect* bounds, int* ySpanCountPtr,
+ int* intervalCountPtr) {
+ assert_sentinel(runs[0], false); // top
+
+ int left = SK_MaxS32;
+ int rite = SK_MinS32;
+ int bot;
+ int ySpanCount = 0;
+ int intervalCount = 0;
+
+ bounds->fTop = *runs++;
+ do {
+ bot = *runs++;
+ SkASSERT(SkRegion::kRunTypeSentinel > bot);
+
+ ySpanCount += 1;
+
+ runs += 1; // skip intervalCount for now
+ if (*runs < SkRegion::kRunTypeSentinel) {
+ if (left > *runs) {
+ left = *runs;
+ }
+
+ const SkRegion::RunType* prev = runs;
+ runs = skip_intervals_slow(runs);
+ int intervals = SkToInt((runs - prev) >> 1);
+ SkASSERT(prev[-1] == intervals);
+ intervalCount += intervals;
+
+ if (rite < runs[-1]) {
+ rite = runs[-1];
+ }
+ } else {
+ SkASSERT(0 == runs[-1]); // no intervals
+ }
+ SkASSERT(SkRegion::kRunTypeSentinel == *runs);
+ runs += 1;
+ } while (SkRegion::kRunTypeSentinel != *runs);
+
+ bounds->fLeft = left;
+ bounds->fRight = rite;
+ bounds->fBottom = bot;
+ *ySpanCountPtr = ySpanCount;
+ *intervalCountPtr = intervalCount;
+}
+
+void SkRegion::validate() const {
+ if (this->isEmpty()) {
+ // check for explicit empty (the zero rect), so we can compare rects to know when
+ // two regions are equal (i.e. emptyRectA == emptyRectB)
+ // this is stricter than just asserting fBounds.isEmpty()
+ SkASSERT(fBounds.fLeft == 0 && fBounds.fTop == 0 && fBounds.fRight == 0 && fBounds.fBottom == 0);
+ } else {
+ SkASSERT(!fBounds.isEmpty());
+ if (!this->isRect()) {
+ SkASSERT(fRunHead->fRefCnt >= 1);
+ SkASSERT(fRunHead->fRunCount > kRectRegionRuns);
+
+ const RunType* run = fRunHead->readonly_runs();
+
+ // check that our bounds match our runs
+ {
+ SkIRect bounds;
+ int ySpanCount, intervalCount;
+ compute_bounds(run, &bounds, &ySpanCount, &intervalCount);
+
+ SkASSERT(bounds == fBounds);
+ SkASSERT(ySpanCount > 0);
+ SkASSERT(fRunHead->getYSpanCount() == ySpanCount);
+ // SkASSERT(intervalCount > 1);
+ SkASSERT(fRunHead->getIntervalCount() == intervalCount);
+ }
+ }
+ }
+}
+
+void SkRegion::dump() const {
+ if (this->isEmpty()) {
+ SkDebugf(" rgn: empty\n");
+ } else {
+ SkDebugf(" rgn: [%d %d %d %d]", fBounds.fLeft, fBounds.fTop, fBounds.fRight, fBounds.fBottom);
+ if (this->isComplex()) {
+ const RunType* runs = fRunHead->readonly_runs();
+ for (int i = 0; i < fRunHead->fRunCount; i++)
+ SkDebugf(" %d", runs[i]);
+ }
+ SkDebugf("\n");
+ }
+}
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkRegion::Iterator::Iterator(const SkRegion& rgn) {
+ this->reset(rgn);
+}
+
+bool SkRegion::Iterator::rewind() {
+ if (fRgn) {
+ this->reset(*fRgn);
+ return true;
+ }
+ return false;
+}
+
+void SkRegion::Iterator::reset(const SkRegion& rgn) {
+ fRgn = &rgn;
+ if (rgn.isEmpty()) {
+ fDone = true;
+ } else {
+ fDone = false;
+ if (rgn.isRect()) {
+ fRect = rgn.fBounds;
+ fRuns = nullptr;
+ } else {
+ fRuns = rgn.fRunHead->readonly_runs();
+ fRect.set(fRuns[3], fRuns[0], fRuns[4], fRuns[1]);
+ fRuns += 5;
+ // Now fRuns points to the 2nd interval (or x-sentinel)
+ }
+ }
+}
+
+void SkRegion::Iterator::next() {
+ if (fDone) {
+ return;
+ }
+
+ if (fRuns == nullptr) { // rect case
+ fDone = true;
+ return;
+ }
+
+ const RunType* runs = fRuns;
+
+ if (runs[0] < kRunTypeSentinel) { // valid X value
+ fRect.fLeft = runs[0];
+ fRect.fRight = runs[1];
+ runs += 2;
+ } else { // we're at the end of a line
+ runs += 1;
+ if (runs[0] < kRunTypeSentinel) { // valid Y value
+ int intervals = runs[1];
+ if (0 == intervals) { // empty line
+ fRect.fTop = runs[0];
+ runs += 3;
+ } else {
+ fRect.fTop = fRect.fBottom;
+ }
+
+ fRect.fBottom = runs[0];
+ assert_sentinel(runs[2], false);
+ assert_sentinel(runs[3], false);
+ fRect.fLeft = runs[2];
+ fRect.fRight = runs[3];
+ runs += 4;
+ } else { // end of rgn
+ fDone = true;
+ }
+ }
+ fRuns = runs;
+}
+
+SkRegion::Cliperator::Cliperator(const SkRegion& rgn, const SkIRect& clip)
+ : fIter(rgn), fClip(clip), fDone(true) {
+ const SkIRect& r = fIter.rect();
+
+ while (!fIter.done()) {
+ if (r.fTop >= clip.fBottom) {
+ break;
+ }
+ if (fRect.intersect(clip, r)) {
+ fDone = false;
+ break;
+ }
+ fIter.next();
+ }
+}
+
+void SkRegion::Cliperator::next() {
+ if (fDone) {
+ return;
+ }
+
+ const SkIRect& r = fIter.rect();
+
+ fDone = true;
+ fIter.next();
+ while (!fIter.done()) {
+ if (r.fTop >= fClip.fBottom) {
+ break;
+ }
+ if (fRect.intersect(fClip, r)) {
+ fDone = false;
+ break;
+ }
+ fIter.next();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkRegion::Spanerator::Spanerator(const SkRegion& rgn, int y, int left,
+ int right) {
+ SkDEBUGCODE(rgn.validate();)
+
+ const SkIRect& r = rgn.getBounds();
+
+ fDone = true;
+ if (!rgn.isEmpty() && y >= r.fTop && y < r.fBottom &&
+ right > r.fLeft && left < r.fRight) {
+ if (rgn.isRect()) {
+ if (left < r.fLeft) {
+ left = r.fLeft;
+ }
+ if (right > r.fRight) {
+ right = r.fRight;
+ }
+ fLeft = left;
+ fRight = right;
+ fRuns = nullptr; // means we're a rect, not a rgn
+ fDone = false;
+ } else {
+ const SkRegion::RunType* runs = rgn.fRunHead->findScanline(y);
+ runs += 2; // skip Bottom and IntervalCount
+ for (;;) {
+ // runs[0..1] is to the right of the span, so we're done
+ if (runs[0] >= right) {
+ break;
+ }
+ // runs[0..1] is to the left of the span, so continue
+ if (runs[1] <= left) {
+ runs += 2;
+ continue;
+ }
+ // runs[0..1] intersects the span
+ fRuns = runs;
+ fLeft = left;
+ fRight = right;
+ fDone = false;
+ break;
+ }
+ }
+ }
+}
+
+bool SkRegion::Spanerator::next(int* left, int* right) {
+ if (fDone) {
+ return false;
+ }
+
+ if (fRuns == nullptr) { // we're a rect
+ fDone = true; // ok, now we're done
+ if (left) {
+ *left = fLeft;
+ }
+ if (right) {
+ *right = fRight;
+ }
+ return true; // this interval is legal
+ }
+
+ const SkRegion::RunType* runs = fRuns;
+
+ if (runs[0] >= fRight) {
+ fDone = true;
+ return false;
+ }
+
+ SkASSERT(runs[1] > fLeft);
+
+ if (left) {
+ *left = SkMax32(fLeft, runs[0]);
+ }
+ if (right) {
+ *right = SkMin32(fRight, runs[1]);
+ }
+ fRuns = runs + 2;
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+
+bool SkRegion::debugSetRuns(const RunType runs[], int count) {
+ // we need to make a copy, since the real method may modify the array, and
+ // so it cannot be const.
+
+ SkAutoTArray<RunType> storage(count);
+ memcpy(storage.get(), runs, count * sizeof(RunType));
+ return this->setRuns(storage.get(), count);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRegionPriv.h b/gfx/skia/skia/src/core/SkRegionPriv.h
new file mode 100644
index 000000000..a4cf77b7f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRegionPriv.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkRegionPriv_DEFINED
+#define SkRegionPriv_DEFINED
+
+#include "SkRegion.h"
+#include "SkAtomics.h"
+
+#define assert_sentinel(value, isSentinel) \
+ SkASSERT(((value) == SkRegion::kRunTypeSentinel) == isSentinel)
+
+//SkDEBUGCODE(extern int32_t gRgnAllocCounter;)
+
+#ifdef SK_DEBUG
+// Given the first interval (just past the interval-count), compute the
+// interval count, by search for the x-sentinel
+//
+static int compute_intervalcount(const SkRegion::RunType runs[]) {
+ const SkRegion::RunType* curr = runs;
+ while (*curr < SkRegion::kRunTypeSentinel) {
+ SkASSERT(curr[0] < curr[1]);
+ SkASSERT(curr[1] < SkRegion::kRunTypeSentinel);
+ curr += 2;
+ }
+ return SkToInt((curr - runs) >> 1);
+}
+#endif
+
+struct SkRegion::RunHead {
+private:
+
+public:
+ int32_t fRefCnt;
+ int32_t fRunCount;
+
+ /**
+ * Number of spans with different Y values. This does not count the initial
+ * Top value, nor does it count the final Y-Sentinel value. In the logical
+ * case of a rectangle, this would return 1, and an empty region would
+ * return 0.
+ */
+ int getYSpanCount() const {
+ return fYSpanCount;
+ }
+
+ /**
+ * Number of intervals in the entire region. This equals the number of
+ * rects that would be returned by the Iterator. In the logical case of
+ * a rect, this would return 1, and an empty region would return 0.
+ */
+ int getIntervalCount() const {
+ return fIntervalCount;
+ }
+
+ static RunHead* Alloc(int count) {
+ //SkDEBUGCODE(sk_atomic_inc(&gRgnAllocCounter);)
+ //SkDEBUGF(("************** gRgnAllocCounter::alloc %d\n", gRgnAllocCounter));
+
+ SkASSERT(count >= SkRegion::kRectRegionRuns);
+
+ const int64_t size = sk_64_mul(count, sizeof(RunType)) + sizeof(RunHead);
+ if (count < 0 || !sk_64_isS32(size)) { SK_ABORT("Invalid Size"); }
+
+ RunHead* head = (RunHead*)sk_malloc_throw(size);
+ head->fRefCnt = 1;
+ head->fRunCount = count;
+ // these must be filled in later, otherwise we will be invalid
+ head->fYSpanCount = 0;
+ head->fIntervalCount = 0;
+ return head;
+ }
+
+ static RunHead* Alloc(int count, int yspancount, int intervalCount) {
+ SkASSERT(yspancount > 0);
+ SkASSERT(intervalCount > 1);
+
+ RunHead* head = Alloc(count);
+ head->fYSpanCount = yspancount;
+ head->fIntervalCount = intervalCount;
+ return head;
+ }
+
+ SkRegion::RunType* writable_runs() {
+ SkASSERT(fRefCnt == 1);
+ return (SkRegion::RunType*)(this + 1);
+ }
+
+ const SkRegion::RunType* readonly_runs() const {
+ return (const SkRegion::RunType*)(this + 1);
+ }
+
+ RunHead* ensureWritable() {
+ RunHead* writable = this;
+ if (fRefCnt > 1) {
+ // We need to alloc & copy the current region before we call
+ // sk_atomic_dec because it could be freed in the meantime,
+ // otherwise.
+ writable = Alloc(fRunCount, fYSpanCount, fIntervalCount);
+ memcpy(writable->writable_runs(), this->readonly_runs(),
+ fRunCount * sizeof(RunType));
+
+ // fRefCount might have changed since we last checked.
+ // If we own the last reference at this point, we need to
+ // free the memory.
+ if (sk_atomic_dec(&fRefCnt) == 1) {
+ sk_free(this);
+ }
+ }
+ return writable;
+ }
+
+ /**
+ * Given a scanline (including its Bottom value at runs[0]), return the next
+ * scanline. Asserts that there is one (i.e. runs[0] < Sentinel)
+ */
+ static SkRegion::RunType* SkipEntireScanline(const SkRegion::RunType runs[]) {
+ // we are not the Y Sentinel
+ SkASSERT(runs[0] < SkRegion::kRunTypeSentinel);
+
+ const int intervals = runs[1];
+ SkASSERT(runs[2 + intervals * 2] == SkRegion::kRunTypeSentinel);
+#ifdef SK_DEBUG
+ {
+ int n = compute_intervalcount(&runs[2]);
+ SkASSERT(n == intervals);
+ }
+#endif
+
+ // skip the entire line [B N [L R] S]
+ runs += 1 + 1 + intervals * 2 + 1;
+ return const_cast<SkRegion::RunType*>(runs);
+ }
+
+
+ /**
+ * Return the scanline that contains the Y value. This requires that the Y
+ * value is already known to be contained within the bounds of the region,
+ * and so this routine never returns nullptr.
+ *
+ * It returns the beginning of the scanline, starting with its Bottom value.
+ */
+ SkRegion::RunType* findScanline(int y) const {
+ const RunType* runs = this->readonly_runs();
+
+ // if the top-check fails, we didn't do a quick check on the bounds
+ SkASSERT(y >= runs[0]);
+
+ runs += 1; // skip top-Y
+ for (;;) {
+ int bottom = runs[0];
+ // If we hit this, we've walked off the region, and our bounds check
+ // failed.
+ SkASSERT(bottom < SkRegion::kRunTypeSentinel);
+ if (y < bottom) {
+ break;
+ }
+ runs = SkipEntireScanline(runs);
+ }
+ return const_cast<SkRegion::RunType*>(runs);
+ }
+
+ // Copy src runs into us, computing interval counts and bounds along the way
+ void computeRunBounds(SkIRect* bounds) {
+ RunType* runs = this->writable_runs();
+ bounds->fTop = *runs++;
+
+ int bot;
+ int ySpanCount = 0;
+ int intervalCount = 0;
+ int left = SK_MaxS32;
+ int rite = SK_MinS32;
+
+ do {
+ bot = *runs++;
+ SkASSERT(bot < SkRegion::kRunTypeSentinel);
+ ySpanCount += 1;
+
+ const int intervals = *runs++;
+ SkASSERT(intervals >= 0);
+ SkASSERT(intervals < SkRegion::kRunTypeSentinel);
+
+ if (intervals > 0) {
+#ifdef SK_DEBUG
+ {
+ int n = compute_intervalcount(runs);
+ SkASSERT(n == intervals);
+ }
+#endif
+ RunType L = runs[0];
+ SkASSERT(L < SkRegion::kRunTypeSentinel);
+ if (left > L) {
+ left = L;
+ }
+
+ runs += intervals * 2;
+ RunType R = runs[-1];
+ SkASSERT(R < SkRegion::kRunTypeSentinel);
+ if (rite < R) {
+ rite = R;
+ }
+
+ intervalCount += intervals;
+ }
+ SkASSERT(SkRegion::kRunTypeSentinel == *runs);
+ runs += 1; // skip x-sentinel
+
+ // test Y-sentinel
+ } while (SkRegion::kRunTypeSentinel > *runs);
+
+#ifdef SK_DEBUG
+ // +1 to skip the last Y-sentinel
+ int runCount = SkToInt(runs - this->writable_runs() + 1);
+ SkASSERT(runCount == fRunCount);
+#endif
+
+ fYSpanCount = ySpanCount;
+ fIntervalCount = intervalCount;
+
+ bounds->fLeft = left;
+ bounds->fRight = rite;
+ bounds->fBottom = bot;
+ }
+
+private:
+ int32_t fYSpanCount;
+ int32_t fIntervalCount;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRegion_path.cpp b/gfx/skia/skia/src/core/SkRegion_path.cpp
new file mode 100644
index 000000000..47df8f6d7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRegion_path.cpp
@@ -0,0 +1,540 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkRegionPriv.h"
+#include "SkBlitter.h"
+#include "SkScan.h"
+#include "SkTSort.h"
+#include "SkTDArray.h"
+#include "SkPath.h"
+
+// The rgnbuilder caller *seems* to pass short counts, possible often seens early failure, so
+// we may not want to promote this to a "std" routine just yet.
+static bool sk_memeq32(const int32_t* SK_RESTRICT a, const int32_t* SK_RESTRICT b, int count) {
+ for (int i = 0; i < count; ++i) {
+ if (a[i] != b[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+class SkRgnBuilder : public SkBlitter {
+public:
+ SkRgnBuilder();
+ virtual ~SkRgnBuilder();
+
+ // returns true if it could allocate the working storage needed
+ bool init(int maxHeight, int maxTransitions, bool pathIsInverse);
+
+ void done() {
+ if (fCurrScanline != nullptr) {
+ fCurrScanline->fXCount = (SkRegion::RunType)((int)(fCurrXPtr - fCurrScanline->firstX()));
+ if (!this->collapsWithPrev()) { // flush the last line
+ fCurrScanline = fCurrScanline->nextScanline();
+ }
+ }
+ }
+
+ int computeRunCount() const;
+ void copyToRect(SkIRect*) const;
+ void copyToRgn(SkRegion::RunType runs[]) const;
+
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override {
+ SkDEBUGFAIL("blitAntiH not implemented");
+ }
+
+#ifdef SK_DEBUG
+ void dump() const {
+ SkDebugf("SkRgnBuilder: Top = %d\n", fTop);
+ const Scanline* line = (Scanline*)fStorage;
+ while (line < fCurrScanline) {
+ SkDebugf("SkRgnBuilder::Scanline: LastY=%d, fXCount=%d", line->fLastY, line->fXCount);
+ for (int i = 0; i < line->fXCount; i++) {
+ SkDebugf(" %d", line->firstX()[i]);
+ }
+ SkDebugf("\n");
+
+ line = line->nextScanline();
+ }
+ }
+#endif
+private:
+ /*
+ * Scanline mimics a row in the region, nearly. A row in a region is:
+ * [Bottom IntervalCount [L R]... Sentinel]
+ * while a Scanline is
+ * [LastY XCount [L R]... uninitialized]
+ * The two are the same length (which is good), but we have to transmute
+ * the scanline a little when we convert it to a region-row.
+ *
+ * Potentially we could recode this to exactly match the row format, in
+ * which case copyToRgn() could be a single memcpy. Not sure that is worth
+ * the effort.
+ */
+ struct Scanline {
+ SkRegion::RunType fLastY;
+ SkRegion::RunType fXCount;
+
+ SkRegion::RunType* firstX() const { return (SkRegion::RunType*)(this + 1); }
+ Scanline* nextScanline() const {
+ // add final +1 for the x-sentinel
+ return (Scanline*)((SkRegion::RunType*)(this + 1) + fXCount + 1);
+ }
+ };
+ SkRegion::RunType* fStorage;
+ Scanline* fCurrScanline;
+ Scanline* fPrevScanline;
+ // points at next avialable x[] in fCurrScanline
+ SkRegion::RunType* fCurrXPtr;
+ SkRegion::RunType fTop; // first Y value
+
+ int fStorageCount;
+
+ bool collapsWithPrev() {
+ if (fPrevScanline != nullptr &&
+ fPrevScanline->fLastY + 1 == fCurrScanline->fLastY &&
+ fPrevScanline->fXCount == fCurrScanline->fXCount &&
+ sk_memeq32(fPrevScanline->firstX(), fCurrScanline->firstX(), fCurrScanline->fXCount))
+ {
+ // update the height of fPrevScanline
+ fPrevScanline->fLastY = fCurrScanline->fLastY;
+ return true;
+ }
+ return false;
+ }
+};
+
+SkRgnBuilder::SkRgnBuilder()
+ : fStorage(nullptr) {
+}
+
+SkRgnBuilder::~SkRgnBuilder() {
+ sk_free(fStorage);
+}
+
+bool SkRgnBuilder::init(int maxHeight, int maxTransitions, bool pathIsInverse) {
+ if ((maxHeight | maxTransitions) < 0) {
+ return false;
+ }
+
+ if (pathIsInverse) {
+ // allow for additional X transitions to "invert" each scanline
+ // [ L' ... normal transitions ... R' ]
+ //
+ maxTransitions += 2;
+ }
+
+ // compute the count with +1 and +3 slop for the working buffer
+ int64_t count = sk_64_mul(maxHeight + 1, 3 + maxTransitions);
+
+ if (pathIsInverse) {
+ // allow for two "empty" rows for the top and bottom
+ // [ Y, 1, L, R, S] == 5 (*2 for top and bottom)
+ count += 10;
+ }
+
+ if (count < 0 || !sk_64_isS32(count)) {
+ return false;
+ }
+ fStorageCount = sk_64_asS32(count);
+
+ int64_t size = sk_64_mul(fStorageCount, sizeof(SkRegion::RunType));
+ if (size < 0 || !sk_64_isS32(size)) {
+ return false;
+ }
+
+ fStorage = (SkRegion::RunType*)sk_malloc_flags(sk_64_asS32(size), 0);
+ if (nullptr == fStorage) {
+ return false;
+ }
+
+ fCurrScanline = nullptr; // signal empty collection
+ fPrevScanline = nullptr; // signal first scanline
+ return true;
+}
+
+void SkRgnBuilder::blitH(int x, int y, int width) {
+ if (fCurrScanline == nullptr) { // first time
+ fTop = (SkRegion::RunType)(y);
+ fCurrScanline = (Scanline*)fStorage;
+ fCurrScanline->fLastY = (SkRegion::RunType)(y);
+ fCurrXPtr = fCurrScanline->firstX();
+ } else {
+ SkASSERT(y >= fCurrScanline->fLastY);
+
+ if (y > fCurrScanline->fLastY) {
+ // if we get here, we're done with fCurrScanline
+ fCurrScanline->fXCount = (SkRegion::RunType)((int)(fCurrXPtr - fCurrScanline->firstX()));
+
+ int prevLastY = fCurrScanline->fLastY;
+ if (!this->collapsWithPrev()) {
+ fPrevScanline = fCurrScanline;
+ fCurrScanline = fCurrScanline->nextScanline();
+
+ }
+ if (y - 1 > prevLastY) { // insert empty run
+ fCurrScanline->fLastY = (SkRegion::RunType)(y - 1);
+ fCurrScanline->fXCount = 0;
+ fCurrScanline = fCurrScanline->nextScanline();
+ }
+ // setup for the new curr line
+ fCurrScanline->fLastY = (SkRegion::RunType)(y);
+ fCurrXPtr = fCurrScanline->firstX();
+ }
+ }
+ // check if we should extend the current run, or add a new one
+ if (fCurrXPtr > fCurrScanline->firstX() && fCurrXPtr[-1] == x) {
+ fCurrXPtr[-1] = (SkRegion::RunType)(x + width);
+ } else {
+ fCurrXPtr[0] = (SkRegion::RunType)(x);
+ fCurrXPtr[1] = (SkRegion::RunType)(x + width);
+ fCurrXPtr += 2;
+ }
+ SkASSERT(fCurrXPtr - fStorage < fStorageCount);
+}
+
+int SkRgnBuilder::computeRunCount() const {
+ if (fCurrScanline == nullptr) {
+ return 0;
+ }
+
+ const SkRegion::RunType* line = fStorage;
+ const SkRegion::RunType* stop = (const SkRegion::RunType*)fCurrScanline;
+
+ return 2 + (int)(stop - line);
+}
+
+void SkRgnBuilder::copyToRect(SkIRect* r) const {
+ SkASSERT(fCurrScanline != nullptr);
+ // A rect's scanline is [bottom intervals left right sentinel] == 5
+ SkASSERT((const SkRegion::RunType*)fCurrScanline - fStorage == 5);
+
+ const Scanline* line = (const Scanline*)fStorage;
+ SkASSERT(line->fXCount == 2);
+
+ r->set(line->firstX()[0], fTop, line->firstX()[1], line->fLastY + 1);
+}
+
+void SkRgnBuilder::copyToRgn(SkRegion::RunType runs[]) const {
+ SkASSERT(fCurrScanline != nullptr);
+ SkASSERT((const SkRegion::RunType*)fCurrScanline - fStorage > 4);
+
+ const Scanline* line = (const Scanline*)fStorage;
+ const Scanline* stop = fCurrScanline;
+
+ *runs++ = fTop;
+ do {
+ *runs++ = (SkRegion::RunType)(line->fLastY + 1);
+ int count = line->fXCount;
+ *runs++ = count >> 1; // intervalCount
+ if (count) {
+ memcpy(runs, line->firstX(), count * sizeof(SkRegion::RunType));
+ runs += count;
+ }
+ *runs++ = SkRegion::kRunTypeSentinel;
+ line = line->nextScanline();
+ } while (line < stop);
+ SkASSERT(line == stop);
+ *runs = SkRegion::kRunTypeSentinel;
+}
+
+static unsigned verb_to_initial_last_index(unsigned verb) {
+ static const uint8_t gPathVerbToInitialLastIndex[] = {
+ 0, // kMove_Verb
+ 1, // kLine_Verb
+ 2, // kQuad_Verb
+ 2, // kConic_Verb
+ 3, // kCubic_Verb
+ 0, // kClose_Verb
+ 0 // kDone_Verb
+ };
+ SkASSERT((unsigned)verb < SK_ARRAY_COUNT(gPathVerbToInitialLastIndex));
+ return gPathVerbToInitialLastIndex[verb];
+}
+
+static unsigned verb_to_max_edges(unsigned verb) {
+ static const uint8_t gPathVerbToMaxEdges[] = {
+ 0, // kMove_Verb
+ 1, // kLine_Verb
+ 2, // kQuad_VerbB
+ 2, // kConic_VerbB
+ 3, // kCubic_Verb
+ 0, // kClose_Verb
+ 0 // kDone_Verb
+ };
+ SkASSERT((unsigned)verb < SK_ARRAY_COUNT(gPathVerbToMaxEdges));
+ return gPathVerbToMaxEdges[verb];
+}
+
+// If returns 0, ignore itop and ibot
+static int count_path_runtype_values(const SkPath& path, int* itop, int* ibot) {
+ SkPath::Iter iter(path, true);
+ SkPoint pts[4];
+ SkPath::Verb verb;
+
+ int maxEdges = 0;
+ SkScalar top = SkIntToScalar(SK_MaxS16);
+ SkScalar bot = SkIntToScalar(SK_MinS16);
+
+ while ((verb = iter.next(pts, false)) != SkPath::kDone_Verb) {
+ maxEdges += verb_to_max_edges(verb);
+
+ int lastIndex = verb_to_initial_last_index(verb);
+ if (lastIndex > 0) {
+ for (int i = 1; i <= lastIndex; i++) {
+ if (top > pts[i].fY) {
+ top = pts[i].fY;
+ } else if (bot < pts[i].fY) {
+ bot = pts[i].fY;
+ }
+ }
+ } else if (SkPath::kMove_Verb == verb) {
+ if (top > pts[0].fY) {
+ top = pts[0].fY;
+ } else if (bot < pts[0].fY) {
+ bot = pts[0].fY;
+ }
+ }
+ }
+ if (0 == maxEdges) {
+ return 0; // we have only moves+closes
+ }
+
+ SkASSERT(top <= bot);
+ *itop = SkScalarRoundToInt(top);
+ *ibot = SkScalarRoundToInt(bot);
+ return maxEdges;
+}
+
+static bool check_inverse_on_empty_return(SkRegion* dst, const SkPath& path, const SkRegion& clip) {
+ if (path.isInverseFillType()) {
+ return dst->set(clip);
+ } else {
+ return dst->setEmpty();
+ }
+}
+
+bool SkRegion::setPath(const SkPath& path, const SkRegion& clip) {
+ SkDEBUGCODE(this->validate();)
+
+ if (clip.isEmpty()) {
+ return this->setEmpty();
+ }
+
+ if (path.isEmpty()) {
+ return check_inverse_on_empty_return(this, path, clip);
+ }
+
+ // compute worst-case rgn-size for the path
+ int pathTop, pathBot;
+ int pathTransitions = count_path_runtype_values(path, &pathTop, &pathBot);
+ if (0 == pathTransitions) {
+ return check_inverse_on_empty_return(this, path, clip);
+ }
+
+ int clipTop, clipBot;
+ int clipTransitions = clip.count_runtype_values(&clipTop, &clipBot);
+
+ int top = SkMax32(pathTop, clipTop);
+ int bot = SkMin32(pathBot, clipBot);
+ if (top >= bot) {
+ return check_inverse_on_empty_return(this, path, clip);
+ }
+
+ SkRgnBuilder builder;
+
+ if (!builder.init(bot - top,
+ SkMax32(pathTransitions, clipTransitions),
+ path.isInverseFillType())) {
+ // can't allocate working space, so return false
+ return this->setEmpty();
+ }
+
+ SkScan::FillPath(path, clip, &builder);
+ builder.done();
+
+ int count = builder.computeRunCount();
+ if (count == 0) {
+ return this->setEmpty();
+ } else if (count == kRectRegionRuns) {
+ builder.copyToRect(&fBounds);
+ this->setRect(fBounds);
+ } else {
+ SkRegion tmp;
+
+ tmp.fRunHead = RunHead::Alloc(count);
+ builder.copyToRgn(tmp.fRunHead->writable_runs());
+ tmp.fRunHead->computeRunBounds(&tmp.fBounds);
+ this->swap(tmp);
+ }
+ SkDEBUGCODE(this->validate();)
+ return true;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////
+
+struct Edge {
+ enum {
+ kY0Link = 0x01,
+ kY1Link = 0x02,
+
+ kCompleteLink = (kY0Link | kY1Link)
+ };
+
+ SkRegion::RunType fX;
+ SkRegion::RunType fY0, fY1;
+ uint8_t fFlags;
+ Edge* fNext;
+
+ void set(int x, int y0, int y1) {
+ SkASSERT(y0 != y1);
+
+ fX = (SkRegion::RunType)(x);
+ fY0 = (SkRegion::RunType)(y0);
+ fY1 = (SkRegion::RunType)(y1);
+ fFlags = 0;
+ SkDEBUGCODE(fNext = nullptr;)
+ }
+
+ int top() const {
+ return SkFastMin32(fY0, fY1);
+ }
+};
+
+static void find_link(Edge* base, Edge* stop) {
+ SkASSERT(base < stop);
+
+ if (base->fFlags == Edge::kCompleteLink) {
+ SkASSERT(base->fNext);
+ return;
+ }
+
+ SkASSERT(base + 1 < stop);
+
+ int y0 = base->fY0;
+ int y1 = base->fY1;
+
+ Edge* e = base;
+ if ((base->fFlags & Edge::kY0Link) == 0) {
+ for (;;) {
+ e += 1;
+ if ((e->fFlags & Edge::kY1Link) == 0 && y0 == e->fY1) {
+ SkASSERT(nullptr == e->fNext);
+ e->fNext = base;
+ e->fFlags = SkToU8(e->fFlags | Edge::kY1Link);
+ break;
+ }
+ }
+ }
+
+ e = base;
+ if ((base->fFlags & Edge::kY1Link) == 0) {
+ for (;;) {
+ e += 1;
+ if ((e->fFlags & Edge::kY0Link) == 0 && y1 == e->fY0) {
+ SkASSERT(nullptr == base->fNext);
+ base->fNext = e;
+ e->fFlags = SkToU8(e->fFlags | Edge::kY0Link);
+ break;
+ }
+ }
+ }
+
+ base->fFlags = Edge::kCompleteLink;
+}
+
+static int extract_path(Edge* edge, Edge* stop, SkPath* path) {
+ while (0 == edge->fFlags) {
+ edge++; // skip over "used" edges
+ }
+
+ SkASSERT(edge < stop);
+
+ Edge* base = edge;
+ Edge* prev = edge;
+ edge = edge->fNext;
+ SkASSERT(edge != base);
+
+ int count = 1;
+ path->moveTo(SkIntToScalar(prev->fX), SkIntToScalar(prev->fY0));
+ prev->fFlags = 0;
+ do {
+ if (prev->fX != edge->fX || prev->fY1 != edge->fY0) { // skip collinear
+ path->lineTo(SkIntToScalar(prev->fX), SkIntToScalar(prev->fY1)); // V
+ path->lineTo(SkIntToScalar(edge->fX), SkIntToScalar(edge->fY0)); // H
+ }
+ prev = edge;
+ edge = edge->fNext;
+ count += 1;
+ prev->fFlags = 0;
+ } while (edge != base);
+ path->lineTo(SkIntToScalar(prev->fX), SkIntToScalar(prev->fY1)); // V
+ path->close();
+ return count;
+}
+
+struct EdgeLT {
+ bool operator()(const Edge& a, const Edge& b) const {
+ return (a.fX == b.fX) ? a.top() < b.top() : a.fX < b.fX;
+ }
+};
+
+bool SkRegion::getBoundaryPath(SkPath* path) const {
+ // path could safely be nullptr if we're empty, but the caller shouldn't
+ // *know* that
+ SkASSERT(path);
+
+ if (this->isEmpty()) {
+ return false;
+ }
+
+ const SkIRect& bounds = this->getBounds();
+
+ if (this->isRect()) {
+ SkRect r;
+ r.set(bounds); // this converts the ints to scalars
+ path->addRect(r);
+ return true;
+ }
+
+ SkRegion::Iterator iter(*this);
+ SkTDArray<Edge> edges;
+
+ for (const SkIRect& r = iter.rect(); !iter.done(); iter.next()) {
+ Edge* edge = edges.append(2);
+ edge[0].set(r.fLeft, r.fBottom, r.fTop);
+ edge[1].set(r.fRight, r.fTop, r.fBottom);
+ }
+
+ int count = edges.count();
+ Edge* start = edges.begin();
+ Edge* stop = start + count;
+ SkTQSort<Edge>(start, stop - 1, EdgeLT());
+
+ Edge* e;
+ for (e = start; e != stop; e++) {
+ find_link(e, stop);
+ }
+
+#ifdef SK_DEBUG
+ for (e = start; e != stop; e++) {
+ SkASSERT(e->fNext != nullptr);
+ SkASSERT(e->fFlags == Edge::kCompleteLink);
+ }
+#endif
+
+ path->incReserve(count << 1);
+ do {
+ SkASSERT(count > 1);
+ count -= extract_path(start, stop, path);
+ } while (count > 0);
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkResourceCache.cpp b/gfx/skia/skia/src/core/SkResourceCache.cpp
new file mode 100644
index 000000000..4bdc8dd9e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkResourceCache.cpp
@@ -0,0 +1,699 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMessageBus.h"
+#include "SkMipMap.h"
+#include "SkMutex.h"
+#include "SkOpts.h"
+#include "SkPixelRef.h"
+#include "SkResourceCache.h"
+#include "SkTraceMemoryDump.h"
+
+#include <stddef.h>
+#include <stdlib.h>
+
+DECLARE_SKMESSAGEBUS_MESSAGE(SkResourceCache::PurgeSharedIDMessage)
+
+// This can be defined by the caller's build system
+//#define SK_USE_DISCARDABLE_SCALEDIMAGECACHE
+
+#ifndef SK_DISCARDABLEMEMORY_SCALEDIMAGECACHE_COUNT_LIMIT
+# define SK_DISCARDABLEMEMORY_SCALEDIMAGECACHE_COUNT_LIMIT 1024
+#endif
+
+#ifndef SK_DEFAULT_IMAGE_CACHE_LIMIT
+ #define SK_DEFAULT_IMAGE_CACHE_LIMIT (32 * 1024 * 1024)
+#endif
+
+void SkResourceCache::Key::init(void* nameSpace, uint64_t sharedID, size_t dataSize) {
+ SkASSERT(SkAlign4(dataSize) == dataSize);
+
+ // fCount32 and fHash are not hashed
+ static const int kUnhashedLocal32s = 2; // fCache32 + fHash
+ static const int kSharedIDLocal32s = 2; // fSharedID_lo + fSharedID_hi
+ static const int kHashedLocal32s = kSharedIDLocal32s + (sizeof(fNamespace) >> 2);
+ static const int kLocal32s = kUnhashedLocal32s + kHashedLocal32s;
+
+ static_assert(sizeof(Key) == (kLocal32s << 2), "unaccounted_key_locals");
+ static_assert(sizeof(Key) == offsetof(Key, fNamespace) + sizeof(fNamespace),
+ "namespace_field_must_be_last");
+
+ fCount32 = SkToS32(kLocal32s + (dataSize >> 2));
+ fSharedID_lo = (uint32_t)sharedID;
+ fSharedID_hi = (uint32_t)(sharedID >> 32);
+ fNamespace = nameSpace;
+ // skip unhashed fields when computing the hash
+ fHash = SkOpts::hash(this->as32() + kUnhashedLocal32s,
+ (fCount32 - kUnhashedLocal32s) << 2);
+}
+
+#include "SkTDynamicHash.h"
+
+class SkResourceCache::Hash :
+ public SkTDynamicHash<SkResourceCache::Rec, SkResourceCache::Key> {};
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkResourceCache::init() {
+ fHead = nullptr;
+ fTail = nullptr;
+ fHash = new Hash;
+ fTotalBytesUsed = 0;
+ fCount = 0;
+ fSingleAllocationByteLimit = 0;
+ fAllocator = nullptr;
+
+ // One of these should be explicit set by the caller after we return.
+ fTotalByteLimit = 0;
+ fDiscardableFactory = nullptr;
+}
+
+#include "SkDiscardableMemory.h"
+
+class SkOneShotDiscardablePixelRef : public SkPixelRef {
+public:
+
+ // Ownership of the discardablememory is transfered to the pixelref
+ // The pixelref will ref() the colortable (if not NULL), and unref() in destructor
+ SkOneShotDiscardablePixelRef(const SkImageInfo&, SkDiscardableMemory*, size_t rowBytes,
+ SkColorTable*);
+ ~SkOneShotDiscardablePixelRef();
+
+protected:
+ bool onNewLockPixels(LockRec*) override;
+ void onUnlockPixels() override;
+ size_t getAllocatedSizeInBytes() const override;
+
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override { return fDM; }
+
+private:
+ SkDiscardableMemory* fDM;
+ size_t fRB;
+ bool fFirstTime;
+ SkColorTable* fCTable;
+
+ typedef SkPixelRef INHERITED;
+};
+
+SkOneShotDiscardablePixelRef::SkOneShotDiscardablePixelRef(const SkImageInfo& info,
+ SkDiscardableMemory* dm,
+ size_t rowBytes,
+ SkColorTable* ctable)
+ : INHERITED(info)
+ , fDM(dm)
+ , fRB(rowBytes)
+ , fCTable(ctable)
+{
+ SkASSERT(dm->data());
+ fFirstTime = true;
+ SkSafeRef(ctable);
+}
+
+SkOneShotDiscardablePixelRef::~SkOneShotDiscardablePixelRef() {
+ delete fDM;
+ SkSafeUnref(fCTable);
+}
+
+bool SkOneShotDiscardablePixelRef::onNewLockPixels(LockRec* rec) {
+ if (fFirstTime) {
+ // we're already locked
+ SkASSERT(fDM->data());
+ fFirstTime = false;
+ goto SUCCESS;
+ }
+
+ // A previous call to onUnlock may have deleted our DM, so check for that
+ if (nullptr == fDM) {
+ return false;
+ }
+
+ if (!fDM->lock()) {
+ // since it failed, we delete it now, to free-up the resource
+ delete fDM;
+ fDM = nullptr;
+ return false;
+ }
+
+SUCCESS:
+ rec->fPixels = fDM->data();
+ rec->fColorTable = fCTable;
+ rec->fRowBytes = fRB;
+ return true;
+}
+
+void SkOneShotDiscardablePixelRef::onUnlockPixels() {
+ SkASSERT(!fFirstTime);
+ fDM->unlock();
+}
+
+size_t SkOneShotDiscardablePixelRef::getAllocatedSizeInBytes() const {
+ return this->info().getSafeSize(fRB);
+}
+
+class SkResourceCacheDiscardableAllocator : public SkBitmap::Allocator {
+public:
+ SkResourceCacheDiscardableAllocator(SkResourceCache::DiscardableFactory factory) {
+ SkASSERT(factory);
+ fFactory = factory;
+ }
+
+ bool allocPixelRef(SkBitmap*, SkColorTable*) override;
+
+private:
+ SkResourceCache::DiscardableFactory fFactory;
+};
+
+bool SkResourceCacheDiscardableAllocator::allocPixelRef(SkBitmap* bitmap, SkColorTable* ctable) {
+ size_t size = bitmap->getSize();
+ uint64_t size64 = bitmap->computeSize64();
+ if (0 == size || size64 > (uint64_t)size) {
+ return false;
+ }
+
+ if (kIndex_8_SkColorType == bitmap->colorType()) {
+ if (!ctable) {
+ return false;
+ }
+ } else {
+ ctable = nullptr;
+ }
+
+ SkDiscardableMemory* dm = fFactory(size);
+ if (nullptr == dm) {
+ return false;
+ }
+
+ SkImageInfo info = bitmap->info();
+ bitmap->setPixelRef(new SkOneShotDiscardablePixelRef(info, dm, bitmap->rowBytes(),
+ ctable))->unref();
+ bitmap->lockPixels();
+ return bitmap->readyToDraw();
+}
+
+SkResourceCache::SkResourceCache(DiscardableFactory factory) {
+ this->init();
+ fDiscardableFactory = factory;
+
+ fAllocator = new SkResourceCacheDiscardableAllocator(factory);
+}
+
+SkResourceCache::SkResourceCache(size_t byteLimit) {
+ this->init();
+ fTotalByteLimit = byteLimit;
+}
+
+SkResourceCache::~SkResourceCache() {
+ SkSafeUnref(fAllocator);
+
+ Rec* rec = fHead;
+ while (rec) {
+ Rec* next = rec->fNext;
+ delete rec;
+ rec = next;
+ }
+ delete fHash;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool SkResourceCache::find(const Key& key, FindVisitor visitor, void* context) {
+ this->checkMessages();
+
+ Rec* rec = fHash->find(key);
+ if (rec) {
+ if (visitor(*rec, context)) {
+ this->moveToHead(rec); // for our LRU
+ return true;
+ } else {
+ this->remove(rec); // stale
+ return false;
+ }
+ }
+ return false;
+}
+
+static void make_size_str(size_t size, SkString* str) {
+ const char suffix[] = { 'b', 'k', 'm', 'g', 't', 0 };
+ int i = 0;
+ while (suffix[i] && (size > 1024)) {
+ i += 1;
+ size >>= 10;
+ }
+ str->printf("%zu%c", size, suffix[i]);
+}
+
+static bool gDumpCacheTransactions;
+
+void SkResourceCache::add(Rec* rec) {
+ this->checkMessages();
+
+ SkASSERT(rec);
+ // See if we already have this key (racy inserts, etc.)
+ Rec* existing = fHash->find(rec->getKey());
+ if (existing) {
+ delete rec;
+ return;
+ }
+
+ this->addToHead(rec);
+ fHash->add(rec);
+
+ if (gDumpCacheTransactions) {
+ SkString bytesStr, totalStr;
+ make_size_str(rec->bytesUsed(), &bytesStr);
+ make_size_str(fTotalBytesUsed, &totalStr);
+ SkDebugf("RC: add %5s %12p key %08x -- total %5s, count %d\n",
+ bytesStr.c_str(), rec, rec->getHash(), totalStr.c_str(), fCount);
+ }
+
+ // since the new rec may push us over-budget, we perform a purge check now
+ this->purgeAsNeeded();
+}
+
+void SkResourceCache::remove(Rec* rec) {
+ size_t used = rec->bytesUsed();
+ SkASSERT(used <= fTotalBytesUsed);
+
+ this->release(rec);
+ fHash->remove(rec->getKey());
+
+ fTotalBytesUsed -= used;
+ fCount -= 1;
+
+ if (gDumpCacheTransactions) {
+ SkString bytesStr, totalStr;
+ make_size_str(used, &bytesStr);
+ make_size_str(fTotalBytesUsed, &totalStr);
+ SkDebugf("RC: remove %5s %12p key %08x -- total %5s, count %d\n",
+ bytesStr.c_str(), rec, rec->getHash(), totalStr.c_str(), fCount);
+ }
+
+ delete rec;
+}
+
+void SkResourceCache::purgeAsNeeded(bool forcePurge) {
+ size_t byteLimit;
+ int countLimit;
+
+ if (fDiscardableFactory) {
+ countLimit = SK_DISCARDABLEMEMORY_SCALEDIMAGECACHE_COUNT_LIMIT;
+ byteLimit = SK_MaxU32; // no limit based on bytes
+ } else {
+ countLimit = SK_MaxS32; // no limit based on count
+ byteLimit = fTotalByteLimit;
+ }
+
+ Rec* rec = fTail;
+ while (rec) {
+ if (!forcePurge && fTotalBytesUsed < byteLimit && fCount < countLimit) {
+ break;
+ }
+
+ Rec* prev = rec->fPrev;
+ this->remove(rec);
+ rec = prev;
+ }
+}
+
+//#define SK_TRACK_PURGE_SHAREDID_HITRATE
+
+#ifdef SK_TRACK_PURGE_SHAREDID_HITRATE
+static int gPurgeCallCounter;
+static int gPurgeHitCounter;
+#endif
+
+void SkResourceCache::purgeSharedID(uint64_t sharedID) {
+ if (0 == sharedID) {
+ return;
+ }
+
+#ifdef SK_TRACK_PURGE_SHAREDID_HITRATE
+ gPurgeCallCounter += 1;
+ bool found = false;
+#endif
+ // go backwards, just like purgeAsNeeded, just to make the code similar.
+ // could iterate either direction and still be correct.
+ Rec* rec = fTail;
+ while (rec) {
+ Rec* prev = rec->fPrev;
+ if (rec->getKey().getSharedID() == sharedID) {
+// SkDebugf("purgeSharedID id=%llx rec=%p\n", sharedID, rec);
+ this->remove(rec);
+#ifdef SK_TRACK_PURGE_SHAREDID_HITRATE
+ found = true;
+#endif
+ }
+ rec = prev;
+ }
+
+#ifdef SK_TRACK_PURGE_SHAREDID_HITRATE
+ if (found) {
+ gPurgeHitCounter += 1;
+ }
+
+ SkDebugf("PurgeShared calls=%d hits=%d rate=%g\n", gPurgeCallCounter, gPurgeHitCounter,
+ gPurgeHitCounter * 100.0 / gPurgeCallCounter);
+#endif
+}
+
+void SkResourceCache::visitAll(Visitor visitor, void* context) {
+ // go backwards, just like purgeAsNeeded, just to make the code similar.
+ // could iterate either direction and still be correct.
+ Rec* rec = fTail;
+ while (rec) {
+ visitor(*rec, context);
+ rec = rec->fPrev;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+size_t SkResourceCache::setTotalByteLimit(size_t newLimit) {
+ size_t prevLimit = fTotalByteLimit;
+ fTotalByteLimit = newLimit;
+ if (newLimit < prevLimit) {
+ this->purgeAsNeeded();
+ }
+ return prevLimit;
+}
+
+SkCachedData* SkResourceCache::newCachedData(size_t bytes) {
+ this->checkMessages();
+
+ if (fDiscardableFactory) {
+ SkDiscardableMemory* dm = fDiscardableFactory(bytes);
+ return dm ? new SkCachedData(bytes, dm) : nullptr;
+ } else {
+ return new SkCachedData(sk_malloc_throw(bytes), bytes);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkResourceCache::release(Rec* rec) {
+ Rec* prev = rec->fPrev;
+ Rec* next = rec->fNext;
+
+ if (!prev) {
+ SkASSERT(fHead == rec);
+ fHead = next;
+ } else {
+ prev->fNext = next;
+ }
+
+ if (!next) {
+ fTail = prev;
+ } else {
+ next->fPrev = prev;
+ }
+
+ rec->fNext = rec->fPrev = nullptr;
+}
+
+void SkResourceCache::moveToHead(Rec* rec) {
+ if (fHead == rec) {
+ return;
+ }
+
+ SkASSERT(fHead);
+ SkASSERT(fTail);
+
+ this->validate();
+
+ this->release(rec);
+
+ fHead->fPrev = rec;
+ rec->fNext = fHead;
+ fHead = rec;
+
+ this->validate();
+}
+
+void SkResourceCache::addToHead(Rec* rec) {
+ this->validate();
+
+ rec->fPrev = nullptr;
+ rec->fNext = fHead;
+ if (fHead) {
+ fHead->fPrev = rec;
+ }
+ fHead = rec;
+ if (!fTail) {
+ fTail = rec;
+ }
+ fTotalBytesUsed += rec->bytesUsed();
+ fCount += 1;
+
+ this->validate();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+void SkResourceCache::validate() const {
+ if (nullptr == fHead) {
+ SkASSERT(nullptr == fTail);
+ SkASSERT(0 == fTotalBytesUsed);
+ return;
+ }
+
+ if (fHead == fTail) {
+ SkASSERT(nullptr == fHead->fPrev);
+ SkASSERT(nullptr == fHead->fNext);
+ SkASSERT(fHead->bytesUsed() == fTotalBytesUsed);
+ return;
+ }
+
+ SkASSERT(nullptr == fHead->fPrev);
+ SkASSERT(fHead->fNext);
+ SkASSERT(nullptr == fTail->fNext);
+ SkASSERT(fTail->fPrev);
+
+ size_t used = 0;
+ int count = 0;
+ const Rec* rec = fHead;
+ while (rec) {
+ count += 1;
+ used += rec->bytesUsed();
+ SkASSERT(used <= fTotalBytesUsed);
+ rec = rec->fNext;
+ }
+ SkASSERT(fCount == count);
+
+ rec = fTail;
+ while (rec) {
+ SkASSERT(count > 0);
+ count -= 1;
+ SkASSERT(used >= rec->bytesUsed());
+ used -= rec->bytesUsed();
+ rec = rec->fPrev;
+ }
+
+ SkASSERT(0 == count);
+ SkASSERT(0 == used);
+}
+#endif
+
+void SkResourceCache::dump() const {
+ this->validate();
+
+ SkDebugf("SkResourceCache: count=%d bytes=%d %s\n",
+ fCount, fTotalBytesUsed, fDiscardableFactory ? "discardable" : "malloc");
+}
+
+size_t SkResourceCache::setSingleAllocationByteLimit(size_t newLimit) {
+ size_t oldLimit = fSingleAllocationByteLimit;
+ fSingleAllocationByteLimit = newLimit;
+ return oldLimit;
+}
+
+size_t SkResourceCache::getSingleAllocationByteLimit() const {
+ return fSingleAllocationByteLimit;
+}
+
+size_t SkResourceCache::getEffectiveSingleAllocationByteLimit() const {
+ // fSingleAllocationByteLimit == 0 means the caller is asking for our default
+ size_t limit = fSingleAllocationByteLimit;
+
+ // if we're not discardable (i.e. we are fixed-budget) then cap the single-limit
+ // to our budget.
+ if (nullptr == fDiscardableFactory) {
+ if (0 == limit) {
+ limit = fTotalByteLimit;
+ } else {
+ limit = SkTMin(limit, fTotalByteLimit);
+ }
+ }
+ return limit;
+}
+
+void SkResourceCache::checkMessages() {
+ SkTArray<PurgeSharedIDMessage> msgs;
+ fPurgeSharedIDInbox.poll(&msgs);
+ for (int i = 0; i < msgs.count(); ++i) {
+ this->purgeSharedID(msgs[i].fSharedID);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SK_DECLARE_STATIC_MUTEX(gMutex);
+static SkResourceCache* gResourceCache = nullptr;
+
+/** Must hold gMutex when calling. */
+static SkResourceCache* get_cache() {
+ // gMutex is always held when this is called, so we don't need to be fancy in here.
+ gMutex.assertHeld();
+ if (nullptr == gResourceCache) {
+#ifdef SK_USE_DISCARDABLE_SCALEDIMAGECACHE
+ gResourceCache = new SkResourceCache(SkDiscardableMemory::Create);
+#else
+ gResourceCache = new SkResourceCache(SK_DEFAULT_IMAGE_CACHE_LIMIT);
+#endif
+ }
+ return gResourceCache;
+}
+
+size_t SkResourceCache::GetTotalBytesUsed() {
+ SkAutoMutexAcquire am(gMutex);
+ return get_cache()->getTotalBytesUsed();
+}
+
+size_t SkResourceCache::GetTotalByteLimit() {
+ SkAutoMutexAcquire am(gMutex);
+ return get_cache()->getTotalByteLimit();
+}
+
+size_t SkResourceCache::SetTotalByteLimit(size_t newLimit) {
+ SkAutoMutexAcquire am(gMutex);
+ return get_cache()->setTotalByteLimit(newLimit);
+}
+
+SkResourceCache::DiscardableFactory SkResourceCache::GetDiscardableFactory() {
+ SkAutoMutexAcquire am(gMutex);
+ return get_cache()->discardableFactory();
+}
+
+SkBitmap::Allocator* SkResourceCache::GetAllocator() {
+ SkAutoMutexAcquire am(gMutex);
+ return get_cache()->allocator();
+}
+
+SkCachedData* SkResourceCache::NewCachedData(size_t bytes) {
+ SkAutoMutexAcquire am(gMutex);
+ return get_cache()->newCachedData(bytes);
+}
+
+void SkResourceCache::Dump() {
+ SkAutoMutexAcquire am(gMutex);
+ get_cache()->dump();
+}
+
+size_t SkResourceCache::SetSingleAllocationByteLimit(size_t size) {
+ SkAutoMutexAcquire am(gMutex);
+ return get_cache()->setSingleAllocationByteLimit(size);
+}
+
+size_t SkResourceCache::GetSingleAllocationByteLimit() {
+ SkAutoMutexAcquire am(gMutex);
+ return get_cache()->getSingleAllocationByteLimit();
+}
+
+size_t SkResourceCache::GetEffectiveSingleAllocationByteLimit() {
+ SkAutoMutexAcquire am(gMutex);
+ return get_cache()->getEffectiveSingleAllocationByteLimit();
+}
+
+void SkResourceCache::PurgeAll() {
+ SkAutoMutexAcquire am(gMutex);
+ return get_cache()->purgeAll();
+}
+
+bool SkResourceCache::Find(const Key& key, FindVisitor visitor, void* context) {
+ SkAutoMutexAcquire am(gMutex);
+ return get_cache()->find(key, visitor, context);
+}
+
+void SkResourceCache::Add(Rec* rec) {
+ SkAutoMutexAcquire am(gMutex);
+ get_cache()->add(rec);
+}
+
+void SkResourceCache::VisitAll(Visitor visitor, void* context) {
+ SkAutoMutexAcquire am(gMutex);
+ get_cache()->visitAll(visitor, context);
+}
+
+void SkResourceCache::PostPurgeSharedID(uint64_t sharedID) {
+ if (sharedID) {
+ SkMessageBus<PurgeSharedIDMessage>::Post(PurgeSharedIDMessage(sharedID));
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkGraphics.h"
+#include "SkImageFilter.h"
+
+size_t SkGraphics::GetResourceCacheTotalBytesUsed() {
+ return SkResourceCache::GetTotalBytesUsed();
+}
+
+size_t SkGraphics::GetResourceCacheTotalByteLimit() {
+ return SkResourceCache::GetTotalByteLimit();
+}
+
+size_t SkGraphics::SetResourceCacheTotalByteLimit(size_t newLimit) {
+ return SkResourceCache::SetTotalByteLimit(newLimit);
+}
+
+size_t SkGraphics::GetResourceCacheSingleAllocationByteLimit() {
+ return SkResourceCache::GetSingleAllocationByteLimit();
+}
+
+size_t SkGraphics::SetResourceCacheSingleAllocationByteLimit(size_t newLimit) {
+ return SkResourceCache::SetSingleAllocationByteLimit(newLimit);
+}
+
+void SkGraphics::PurgeResourceCache() {
+ SkImageFilter::PurgeCache();
+ return SkResourceCache::PurgeAll();
+}
+
+/////////////
+
+static void dump_visitor(const SkResourceCache::Rec& rec, void*) {
+ SkDebugf("RC: %12s bytes %9lu discardable %p\n",
+ rec.getCategory(), rec.bytesUsed(), rec.diagnostic_only_getDiscardable());
+}
+
+void SkResourceCache::TestDumpMemoryStatistics() {
+ VisitAll(dump_visitor, nullptr);
+}
+
+static void sk_trace_dump_visitor(const SkResourceCache::Rec& rec, void* context) {
+ SkTraceMemoryDump* dump = static_cast<SkTraceMemoryDump*>(context);
+ SkString dumpName = SkStringPrintf("skia/sk_resource_cache/%s_%p", rec.getCategory(), &rec);
+ SkDiscardableMemory* discardable = rec.diagnostic_only_getDiscardable();
+ if (discardable) {
+ dump->setDiscardableMemoryBacking(dumpName.c_str(), *discardable);
+
+ // The discardable memory size will be calculated by dumper, but we also dump what we think
+ // the size of object in memory is irrespective of whether object is live or dead.
+ dump->dumpNumericValue(dumpName.c_str(), "discardable_size", "bytes", rec.bytesUsed());
+ } else {
+ dump->dumpNumericValue(dumpName.c_str(), "size", "bytes", rec.bytesUsed());
+ dump->setMemoryBacking(dumpName.c_str(), "malloc", nullptr);
+ }
+}
+
+void SkResourceCache::DumpMemoryStatistics(SkTraceMemoryDump* dump) {
+ // Since resource could be backed by malloc or discardable, the cache always dumps detailed
+ // stats to be accurate.
+ VisitAll(sk_trace_dump_visitor, dump);
+}
diff --git a/gfx/skia/skia/src/core/SkResourceCache.h b/gfx/skia/skia/src/core/SkResourceCache.h
new file mode 100644
index 000000000..591933631
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkResourceCache.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkResourceCache_DEFINED
+#define SkResourceCache_DEFINED
+
+#include "SkBitmap.h"
+#include "SkMessageBus.h"
+#include "SkTDArray.h"
+
+class SkCachedData;
+class SkDiscardableMemory;
+class SkTraceMemoryDump;
+
+/**
+ * Cache object for bitmaps (with possible scale in X Y as part of the key).
+ *
+ * Multiple caches can be instantiated, but each instance is not implicitly
+ * thread-safe, so if a given instance is to be shared across threads, the
+ * caller must manage the access itself (e.g. via a mutex).
+ *
+ * As a convenience, a global instance is also defined, which can be safely
+ * access across threads via the static methods (e.g. FindAndLock, etc.).
+ */
+class SkResourceCache {
+public:
+ struct Key {
+ /** Key subclasses must call this after their own fields and data are initialized.
+ * All fields and data must be tightly packed.
+ * @param nameSpace must be unique per Key subclass.
+ * @param sharedID == 0 means ignore this field, does not support group purging.
+ * @param dataSize is size of fields and data of the subclass, must be a multiple of 4.
+ */
+ void init(void* nameSpace, uint64_t sharedID, size_t dataSize);
+
+ /** Returns the size of this key. */
+ size_t size() const {
+ return fCount32 << 2;
+ }
+
+ void* getNamespace() const { return fNamespace; }
+ uint64_t getSharedID() const { return ((uint64_t)fSharedID_hi << 32) | fSharedID_lo; }
+
+ // This is only valid after having called init().
+ uint32_t hash() const { return fHash; }
+
+ bool operator==(const Key& other) const {
+ const uint32_t* a = this->as32();
+ const uint32_t* b = other.as32();
+ for (int i = 0; i < fCount32; ++i) { // (This checks fCount == other.fCount first.)
+ if (a[i] != b[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ private:
+ int32_t fCount32; // local + user contents count32
+ uint32_t fHash;
+ // split uint64_t into hi and lo so we don't force ourselves to pad on 32bit machines.
+ uint32_t fSharedID_lo;
+ uint32_t fSharedID_hi;
+ void* fNamespace; // A unique namespace tag. This is hashed.
+ /* uint32_t fContents32[] */
+
+ const uint32_t* as32() const { return (const uint32_t*)this; }
+ };
+
+ struct Rec {
+ typedef SkResourceCache::Key Key;
+
+ Rec() {}
+ virtual ~Rec() {}
+
+ uint32_t getHash() const { return this->getKey().hash(); }
+
+ virtual const Key& getKey() const = 0;
+ virtual size_t bytesUsed() const = 0;
+
+ // for memory usage diagnostics
+ virtual const char* getCategory() const = 0;
+ virtual SkDiscardableMemory* diagnostic_only_getDiscardable() const { return nullptr; }
+
+ // for SkTDynamicHash::Traits
+ static uint32_t Hash(const Key& key) { return key.hash(); }
+ static const Key& GetKey(const Rec& rec) { return rec.getKey(); }
+
+ private:
+ Rec* fNext;
+ Rec* fPrev;
+
+ friend class SkResourceCache;
+ };
+
+ // Used with SkMessageBus
+ struct PurgeSharedIDMessage {
+ PurgeSharedIDMessage(uint64_t sharedID) : fSharedID(sharedID) {}
+
+ uint64_t fSharedID;
+ };
+
+ typedef const Rec* ID;
+
+ /**
+ * Callback function for find(). If called, the cache will have found a match for the
+ * specified Key, and will pass in the corresponding Rec, along with a caller-specified
+ * context. The function can read the data in Rec, and copy whatever it likes into context
+ * (casting context to whatever it really is).
+ *
+ * The return value determines what the cache will do with the Rec. If the function returns
+ * true, then the Rec is considered "valid". If false is returned, the Rec will be considered
+ * "stale" and will be purged from the cache.
+ */
+ typedef bool (*FindVisitor)(const Rec&, void* context);
+
+ /**
+ * Returns a locked/pinned SkDiscardableMemory instance for the specified
+ * number of bytes, or nullptr on failure.
+ */
+ typedef SkDiscardableMemory* (*DiscardableFactory)(size_t bytes);
+
+ /*
+ * The following static methods are thread-safe wrappers around a global
+ * instance of this cache.
+ */
+
+ /**
+ * Returns true if the visitor was called on a matching Key, and the visitor returned true.
+ *
+ * Find() will search the cache for the specified Key. If no match is found, return false and
+ * do not call the FindVisitor. If a match is found, return whatever the visitor returns.
+ * Its return value is interpreted to mean:
+ * true : Rec is valid
+ * false : Rec is "stale" -- the cache will purge it.
+ */
+ static bool Find(const Key& key, FindVisitor, void* context);
+ static void Add(Rec*);
+
+ typedef void (*Visitor)(const Rec&, void* context);
+ // Call the visitor for every Rec in the cache.
+ static void VisitAll(Visitor, void* context);
+
+ static size_t GetTotalBytesUsed();
+ static size_t GetTotalByteLimit();
+ static size_t SetTotalByteLimit(size_t newLimit);
+
+ static size_t SetSingleAllocationByteLimit(size_t);
+ static size_t GetSingleAllocationByteLimit();
+ static size_t GetEffectiveSingleAllocationByteLimit();
+
+ static void PurgeAll();
+
+ static void TestDumpMemoryStatistics();
+
+ /** Dump memory usage statistics of every Rec in the cache using the
+ SkTraceMemoryDump interface.
+ */
+ static void DumpMemoryStatistics(SkTraceMemoryDump* dump);
+
+ /**
+ * Returns the DiscardableFactory used by the global cache, or nullptr.
+ */
+ static DiscardableFactory GetDiscardableFactory();
+
+ /**
+ * Use this allocator for bitmaps, so they can use ashmem when available.
+ * Returns nullptr if the ResourceCache has not been initialized with a DiscardableFactory.
+ */
+ static SkBitmap::Allocator* GetAllocator();
+
+ static SkCachedData* NewCachedData(size_t bytes);
+
+ static void PostPurgeSharedID(uint64_t sharedID);
+
+ /**
+ * Call SkDebugf() with diagnostic information about the state of the cache
+ */
+ static void Dump();
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ /**
+ * Construct the cache to call DiscardableFactory when it
+ * allocates memory for the pixels. In this mode, the cache has
+ * not explicit budget, and so methods like getTotalBytesUsed()
+ * and getTotalByteLimit() will return 0, and setTotalByteLimit
+ * will ignore its argument and return 0.
+ */
+ SkResourceCache(DiscardableFactory);
+
+ /**
+ * Construct the cache, allocating memory with malloc, and respect the
+ * byteLimit, purging automatically when a new image is added to the cache
+ * that pushes the total bytesUsed over the limit. Note: The limit can be
+ * changed at runtime with setTotalByteLimit.
+ */
+ explicit SkResourceCache(size_t byteLimit);
+ ~SkResourceCache();
+
+ /**
+ * Returns true if the visitor was called on a matching Key, and the visitor returned true.
+ *
+ * find() will search the cache for the specified Key. If no match is found, return false and
+ * do not call the FindVisitor. If a match is found, return whatever the visitor returns.
+ * Its return value is interpreted to mean:
+ * true : Rec is valid
+ * false : Rec is "stale" -- the cache will purge it.
+ */
+ bool find(const Key&, FindVisitor, void* context);
+ void add(Rec*);
+ void visitAll(Visitor, void* context);
+
+ size_t getTotalBytesUsed() const { return fTotalBytesUsed; }
+ size_t getTotalByteLimit() const { return fTotalByteLimit; }
+
+ /**
+ * This is respected by SkBitmapProcState::possiblyScaleImage.
+ * 0 is no maximum at all; this is the default.
+ * setSingleAllocationByteLimit() returns the previous value.
+ */
+ size_t setSingleAllocationByteLimit(size_t maximumAllocationSize);
+ size_t getSingleAllocationByteLimit() const;
+ // returns the logical single allocation size (pinning against the budget when the cache
+ // is not backed by discardable memory.
+ size_t getEffectiveSingleAllocationByteLimit() const;
+
+ /**
+ * Set the maximum number of bytes available to this cache. If the current
+ * cache exceeds this new value, it will be purged to try to fit within
+ * this new limit.
+ */
+ size_t setTotalByteLimit(size_t newLimit);
+
+ void purgeSharedID(uint64_t sharedID);
+
+ void purgeAll() {
+ this->purgeAsNeeded(true);
+ }
+
+ DiscardableFactory discardableFactory() const { return fDiscardableFactory; }
+ SkBitmap::Allocator* allocator() const { return fAllocator; }
+
+ SkCachedData* newCachedData(size_t bytes);
+
+ /**
+ * Call SkDebugf() with diagnostic information about the state of the cache
+ */
+ void dump() const;
+
+private:
+ Rec* fHead;
+ Rec* fTail;
+
+ class Hash;
+ Hash* fHash;
+
+ DiscardableFactory fDiscardableFactory;
+ // the allocator is nullptr or one that matches discardables
+ SkBitmap::Allocator* fAllocator;
+
+ size_t fTotalBytesUsed;
+ size_t fTotalByteLimit;
+ size_t fSingleAllocationByteLimit;
+ int fCount;
+
+ SkMessageBus<PurgeSharedIDMessage>::Inbox fPurgeSharedIDInbox;
+
+ void checkMessages();
+ void purgeAsNeeded(bool forcePurge = false);
+
+ // linklist management
+ void moveToHead(Rec*);
+ void addToHead(Rec*);
+ void release(Rec*);
+ void remove(Rec*);
+
+ void init(); // called by constructors
+
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+};
+#endif
diff --git a/gfx/skia/skia/src/core/SkSRGB.cpp b/gfx/skia/skia/src/core/SkSRGB.cpp
new file mode 100644
index 000000000..57d2ae037
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSRGB.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkSRGB.h"
+
+const float sk_linear_from_srgb[256] = {
+ 0.000000000000000000f, 0.000303526983548838f, 0.000607053967097675f, 0.000910580950646513f,
+ 0.001214107934195350f, 0.001517634917744190f, 0.001821161901293030f, 0.002124688884841860f,
+ 0.002428215868390700f, 0.002731742851939540f, 0.003034518678424960f, 0.003346535763899160f,
+ 0.003676507324047440f, 0.004024717018496310f, 0.004391442037410290f, 0.004776953480693730f,
+ 0.005181516702338390f, 0.005605391624202720f, 0.006048833022857060f, 0.006512090792594470f,
+ 0.006995410187265390f, 0.007499032043226180f, 0.008023192985384990f, 0.008568125618069310f,
+ 0.009134058702220790f, 0.009721217320237850f, 0.010329823029626900f, 0.010960094006488200f,
+ 0.011612245179743900f, 0.012286488356915900f, 0.012983032342173000f, 0.013702083047289700f,
+ 0.014443843596092500f, 0.015208514422912700f, 0.015996293365509600f, 0.016807375752887400f,
+ 0.017641954488384100f, 0.018500220128379700f, 0.019382360956935700f, 0.020288563056652400f,
+ 0.021219010376003600f, 0.022173884793387400f, 0.023153366178110400f, 0.024157632448504800f,
+ 0.025186859627361600f, 0.026241221894849900f, 0.027320891639074900f, 0.028426039504420800f,
+ 0.029556834437808800f, 0.030713443732993600f, 0.031896033073011500f, 0.033104766570885100f,
+ 0.034339806808682200f, 0.035601314875020300f, 0.036889450401100000f, 0.038204371595346500f,
+ 0.039546235276732800f, 0.040915196906853200f, 0.042311410620809700f, 0.043735029256973500f,
+ 0.045186204385675500f, 0.046665086336880100f, 0.048171824226889400f, 0.049706565984127200f,
+ 0.051269458374043200f, 0.052860647023180200f, 0.054480276442442400f, 0.056128490049600100f,
+ 0.057805430191067200f, 0.059511238162981200f, 0.061246054231617600f, 0.063010017653167700f,
+ 0.064803266692905800f, 0.066625938643772900f, 0.068478169844400200f, 0.070360095696595900f,
+ 0.072271850682317500f, 0.074213568380149600f, 0.076185381481307900f, 0.078187421805186300f,
+ 0.080219820314468300f, 0.082282707129814800f, 0.084376211544148800f, 0.086500462036549800f,
+ 0.088655586285772900f, 0.090841711183407700f, 0.093058962846687500f, 0.095307466630964700f,
+ 0.097587347141862500f, 0.099898728247113900f, 0.102241733088101000f, 0.104616484091104000f,
+ 0.107023102978268000f, 0.109461710778299000f, 0.111932427836906000f, 0.114435373826974000f,
+ 0.116970667758511000f, 0.119538427988346000f, 0.122138772229602000f, 0.124771817560950000f,
+ 0.127437680435647000f, 0.130136476690364000f, 0.132868321553818000f, 0.135633329655206000f,
+ 0.138431615032452000f, 0.141263291140272000f, 0.144128470858058000f, 0.147027266497595000f,
+ 0.149959789810609000f, 0.152926151996150000f, 0.155926463707827000f, 0.158960835060880000f,
+ 0.162029375639111000f, 0.165132194501668000f, 0.168269400189691000f, 0.171441100732823000f,
+ 0.174647403655585000f, 0.177888415983629000f, 0.181164244249860000f, 0.184474994500441000f,
+ 0.187820772300678000f, 0.191201682740791000f, 0.194617830441576000f, 0.198069319559949000f,
+ 0.201556253794397000f, 0.205078736390317000f, 0.208636870145256000f, 0.212230757414055000f,
+ 0.215860500113899000f, 0.219526199729269000f, 0.223227957316809000f, 0.226965873510098000f,
+ 0.230740048524349000f, 0.234550582161005000f, 0.238397573812271000f, 0.242281122465555000f,
+ 0.246201326707835000f, 0.250158284729953000f, 0.254152094330827000f, 0.258182852921596000f,
+ 0.262250657529696000f, 0.266355604802862000f, 0.270497791013066000f, 0.274677312060385000f,
+ 0.278894263476810000f, 0.283148740429992000f, 0.287440837726918000f, 0.291770649817536000f,
+ 0.296138270798321000f, 0.300543794415777000f, 0.304987314069886000f, 0.309468922817509000f,
+ 0.313988713375718000f, 0.318546778125092000f, 0.323143209112951000f, 0.327778098056542000f,
+ 0.332451536346179000f, 0.337163615048330000f, 0.341914424908661000f, 0.346704056355030000f,
+ 0.351532599500439000f, 0.356400144145944000f, 0.361306779783510000f, 0.366252595598840000f,
+ 0.371237680474149000f, 0.376262122990906000f, 0.381326011432530000f, 0.386429433787049000f,
+ 0.391572477749723000f, 0.396755230725627000f, 0.401977779832196000f, 0.407240211901737000f,
+ 0.412542613483904000f, 0.417885070848138000f, 0.423267669986072000f, 0.428690496613907000f,
+ 0.434153636174749000f, 0.439657173840919000f, 0.445201194516228000f, 0.450785782838223000f,
+ 0.456411023180405000f, 0.462076999654407000f, 0.467783796112159000f, 0.473531496148010000f,
+ 0.479320183100827000f, 0.485149940056070000f, 0.491020849847836000f, 0.496932995060870000f,
+ 0.502886458032569000f, 0.508881320854934000f, 0.514917665376521000f, 0.520995573204354000f,
+ 0.527115125705813000f, 0.533276404010505000f, 0.539479489012107000f, 0.545724461370187000f,
+ 0.552011401512000000f, 0.558340389634268000f, 0.564711505704929000f, 0.571124829464873000f,
+ 0.577580440429651000f, 0.584078417891164000f, 0.590618840919337000f, 0.597201788363763000f,
+ 0.603827338855338000f, 0.610495570807865000f, 0.617206562419651000f, 0.623960391675076000f,
+ 0.630757136346147000f, 0.637596873994033000f, 0.644479681970582000f, 0.651405637419824000f,
+ 0.658374817279448000f, 0.665387298282272000f, 0.672443156957688000f, 0.679542469633094000f,
+ 0.686685312435314000f, 0.693871761291990000f, 0.701101891932973000f, 0.708375779891687000f,
+ 0.715693500506481000f, 0.723055128921969000f, 0.730460740090354000f, 0.737910408772731000f,
+ 0.745404209540387000f, 0.752942216776078000f, 0.760524504675292000f, 0.768151147247507000f,
+ 0.775822218317423000f, 0.783537791526194000f, 0.791297940332630000f, 0.799102738014409000f,
+ 0.806952257669252000f, 0.814846572216101000f, 0.822785754396284000f, 0.830769876774655000f,
+ 0.838799011740740000f, 0.846873231509858000f, 0.854992608124234000f, 0.863157213454102000f,
+ 0.871367119198797000f, 0.879622396887832000f, 0.887923117881966000f, 0.896269353374266000f,
+ 0.904661174391149000f, 0.913098651793419000f, 0.921581856277295000f, 0.930110858375424000f,
+ 0.938685728457888000f, 0.947306536733200000f, 0.955973353249286000f, 0.964686247894465000f,
+ 0.973445290398413000f, 0.982250550333117000f, 0.991102097113830000f, 1.000000000000000000f,
+};
diff --git a/gfx/skia/skia/src/core/SkSRGB.h b/gfx/skia/skia/src/core/SkSRGB.h
new file mode 100644
index 000000000..e60e28886
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSRGB.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSRGB_DEFINED
+#define SkSRGB_DEFINED
+
+#include "SkNx.h"
+
+/** Components for building our canonical sRGB -> linear and linear -> sRGB transformations.
+ *
+ * Current best practices:
+ * - for sRGB -> linear, lookup R,G,B in sk_linear_from_srgb;
+ * - for linear -> sRGB, call sk_linear_to_srgb() for R,G,B;
+ * - the alpha channel is linear in both formats, needing at most *(1/255.0f) or *255.0f.
+ *
+ * sk_linear_to_srgb() will run a little faster than usual when compiled with SSE4.1+.
+ */
+
+extern const float sk_linear_from_srgb[256];
+
+static inline Sk4f sk_clamp_0_255(const Sk4f& x) {
+ // The order of the arguments is important here. We want to make sure that NaN
+ // clamps to zero. Note that max(NaN, 0) = 0, while max(0, NaN) = NaN.
+ return Sk4f::Min(Sk4f::Max(x, 0.0f), 255.0f);
+}
+
+// This should probably only be called from sk_linear_to_srgb() or sk_linear_to_srgb_noclamp().
+// It generally doesn't make sense to work with sRGB floats.
+static inline Sk4f sk_linear_to_srgb_needs_trunc(const Sk4f& x) {
+ // Approximation of the sRGB gamma curve (within 1 when scaled to 8-bit pixels).
+ //
+ // Constants tuned by brute force to minimize (in order of importance) after truncation:
+ // 1) the number of bytes that fail to round trip (0 of 256);
+ // 2) the number of points in [FLT_MIN, 1.0f] that are non-monotonic (0 of ~1 billion);
+ // 3) the number of points halfway between bytes that hit the wrong byte (131 of 255).
+ auto rsqrt = x.rsqrt(),
+ sqrt = rsqrt.invert(),
+ ftrt = rsqrt.rsqrt();
+
+ auto lo = (13.0471f * 255.0f) * x;
+
+ auto hi = (-0.0974983f * 255.0f)
+ + (+0.687999f * 255.0f) * sqrt
+ + (+0.412999f * 255.0f) * ftrt;
+ return (x < 0.0048f).thenElse(lo, hi);
+}
+
+static inline Sk4i sk_linear_to_srgb(const Sk4f& x) {
+ Sk4f f = sk_linear_to_srgb_needs_trunc(x);
+ return SkNx_cast<int>(sk_clamp_0_255(f));
+}
+
+static inline Sk4i sk_linear_to_srgb_noclamp(const Sk4f& x) {
+ Sk4f f = sk_linear_to_srgb_needs_trunc(x);
+ for (int i = 0; i < 4; i++) {
+ SkASSERTF(0.0f <= f[i] && f[i] < 256.0f, "f[%d] was %g, outside [0,256)\n", i, f[i]);
+ }
+ return SkNx_cast<int>(f);
+}
+
+// sRGB -> linear, using math instead of table lookups, scaling better to larger SIMD vectors.
+static inline Sk4f sk_linear_from_srgb_math(const Sk4i& s) {
+ auto x = SkNx_cast<float>(s);
+
+ const float u = 1/255.0f; // x is [0,255], so x^n needs scaling by u^n.
+
+ // Non-linear segment of sRGB curve approximated by
+ // l = 0.0025 + 0.6975x^2 + 0.3x^3
+ const float k0 = 0.0025f,
+ k2 = 0.6975f * u*u,
+ k3 = 0.3000f * u*u*u;
+ auto hi = k0 + (k2 + k3*x) * (x*x);
+
+ // Linear segment of sRGB curve: the normal slope, extended a little further than normal.
+ auto lo = x * (u/12.92f);
+
+ return (x < 14.025f).thenElse(lo, hi);
+}
+
+#endif//SkSRGB_DEFINED
diff --git a/gfx/skia/skia/src/core/SkScalar.cpp b/gfx/skia/skia/src/core/SkScalar.cpp
new file mode 100644
index 000000000..87a5073e1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScalar.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkMath.h"
+#include "SkScalar.h"
+
+SkScalar SkScalarInterpFunc(SkScalar searchKey, const SkScalar keys[],
+ const SkScalar values[], int length) {
+ SkASSERT(length > 0);
+ SkASSERT(keys != nullptr);
+ SkASSERT(values != nullptr);
+#ifdef SK_DEBUG
+ for (int i = 1; i < length; i++)
+ SkASSERT(keys[i] >= keys[i-1]);
+#endif
+ int right = 0;
+ while (right < length && searchKey > keys[right])
+ right++;
+ // Could use sentinel values to eliminate conditionals, but since the
+ // tables are taken as input, a simpler format is better.
+ if (length == right)
+ return values[length-1];
+ if (0 == right)
+ return values[0];
+ // Otherwise, interpolate between right - 1 and right.
+ SkScalar rightKey = keys[right];
+ SkScalar leftKey = keys[right-1];
+ SkScalar fract = (searchKey - leftKey) / (rightKey - leftKey);
+ return SkScalarInterp(values[right-1], values[right], fract);
+}
diff --git a/gfx/skia/skia/src/core/SkScaleToSides.h b/gfx/skia/skia/src/core/SkScaleToSides.h
new file mode 100644
index 000000000..c70089106
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScaleToSides.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkScaleToSides_DEFINED
+#define SkScaleToSides_DEFINED
+
+#include <cmath>
+#include "SkScalar.h"
+#include "SkTypes.h"
+
+class SkScaleToSides {
+public:
+ // This code assumes that a and b fit in a float, and therefore the resulting smaller value
+ // of a and b will fit in a float. The side of the rectangle may be larger than a float.
+ // Scale must be less than or equal to the ratio limit / (*a + *b).
+ // This code assumes that NaN and Inf are never passed in.
+ static void AdjustRadii(double limit, double scale, SkScalar* a, SkScalar* b) {
+ SkASSERTF(scale < 1.0 && scale > 0.0, "scale: %g", scale);
+
+ *a = (float)((double)*a * scale);
+ *b = (float)((double)*b * scale);
+
+ if (*a + *b > limit) {
+ float* minRadius = a;
+ float* maxRadius = b;
+
+ // Force minRadius to be the smaller of the two.
+ if (*minRadius > *maxRadius) {
+ SkTSwap(minRadius, maxRadius);
+ }
+
+ // newMinRadius must be float in order to give the actual value of the radius.
+ // The newMinRadius will always be smaller than limit. The largest that minRadius can be
+ // is 1/2 the ratio of minRadius : (minRadius + maxRadius), therefore in the resulting
+ // division, minRadius can be no larger than 1/2 limit + ULP. The newMinRadius can be
+ // 1/2 a ULP off at this point.
+ float newMinRadius = *minRadius;
+
+ // Because newMaxRadius is the result of a double to float conversion, it can be larger
+ // than limit, but only by one ULP.
+ float newMaxRadius = (float)(limit - newMinRadius);
+
+ // The total sum of newMinRadius and newMaxRadius can be upto 1.5 ULPs off. If the
+ // sum is greater than the limit then newMaxRadius may have to be reduced twice.
+ // Note: nextafterf is a c99 call and should be std::nextafter, but this is not
+ // implemented in the GCC ARM compiler.
+ if (newMaxRadius + newMinRadius > limit) {
+ newMaxRadius = nextafterf(newMaxRadius, 0.0f);
+ if (newMaxRadius + newMinRadius > limit) {
+ newMaxRadius = nextafterf(newMaxRadius, 0.0f);
+ }
+ }
+ *maxRadius = newMaxRadius;
+ }
+
+ SkASSERTF(*a >= 0.0f && *b >= 0.0f, "a: %g, b: %g, limit: %g, scale: %g", *a, *b, limit,
+ scale);
+
+ SkASSERTF(*a + *b <= limit,
+ "\nlimit: %.17f, sum: %.17f, a: %.10f, b: %.10f, scale: %.20f",
+ limit, *a + *b, *a, *b, scale);
+ }
+};
+#endif // ScaleToSides_DEFINED
diff --git a/gfx/skia/skia/src/core/SkScalerContext.cpp b/gfx/skia/skia/src/core/SkScalerContext.cpp
new file mode 100644
index 000000000..ecac82a34
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScalerContext.cpp
@@ -0,0 +1,871 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkScalerContext.h"
+#include "SkAutoPixmapStorage.h"
+#include "SkColorPriv.h"
+#include "SkDescriptor.h"
+#include "SkDraw.h"
+#include "SkGlyph.h"
+#include "SkMaskFilter.h"
+#include "SkMaskGamma.h"
+#include "SkMatrix22.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+#include "SkPathEffect.h"
+#include "SkRasterizer.h"
+#include "SkRasterClip.h"
+#include "SkStroke.h"
+#include "SkStrokeRec.h"
+
+#define ComputeBWRowBytes(width) (((unsigned)(width) + 7) >> 3)
+
+void SkGlyph::toMask(SkMask* mask) const {
+ SkASSERT(mask);
+
+ mask->fImage = (uint8_t*)fImage;
+ mask->fBounds.set(fLeft, fTop, fLeft + fWidth, fTop + fHeight);
+ mask->fRowBytes = this->rowBytes();
+ mask->fFormat = static_cast<SkMask::Format>(fMaskFormat);
+}
+
+size_t SkGlyph::computeImageSize() const {
+ const size_t size = this->rowBytes() * fHeight;
+
+ switch (fMaskFormat) {
+ case SkMask::k3D_Format:
+ return 3 * size;
+ default:
+ return size;
+ }
+}
+
+void SkGlyph::zeroMetrics() {
+ fAdvanceX = 0;
+ fAdvanceY = 0;
+ fWidth = 0;
+ fHeight = 0;
+ fTop = 0;
+ fLeft = 0;
+ fRsbDelta = 0;
+ fLsbDelta = 0;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+ #define DUMP_RECx
+#endif
+
+SkScalerContext::SkScalerContext(SkTypeface* typeface, const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : fRec(*static_cast<const Rec*>(desc->findEntry(kRec_SkDescriptorTag, nullptr)))
+
+ , fTypeface(sk_ref_sp(typeface))
+ , fPathEffect(sk_ref_sp(effects.fPathEffect))
+ , fMaskFilter(sk_ref_sp(effects.fMaskFilter))
+ , fRasterizer(sk_ref_sp(effects.fRasterizer))
+ // Initialize based on our settings. Subclasses can also force this.
+ , fGenerateImageFromPath(fRec.fFrameWidth > 0 || fPathEffect != nullptr || fRasterizer != nullptr)
+
+ , fPreBlend(fMaskFilter ? SkMaskGamma::PreBlend() : SkScalerContext::GetMaskPreBlend(fRec))
+ , fPreBlendForFilter(fMaskFilter ? SkScalerContext::GetMaskPreBlend(fRec)
+ : SkMaskGamma::PreBlend())
+{
+#ifdef DUMP_REC
+ desc->assertChecksum();
+ SkDebugf("SkScalerContext checksum %x count %d length %d\n",
+ desc->getChecksum(), desc->getCount(), desc->getLength());
+ SkDebugf(" textsize %g prescale %g preskew %g post [%g %g %g %g]\n",
+ rec->fTextSize, rec->fPreScaleX, rec->fPreSkewX, rec->fPost2x2[0][0],
+ rec->fPost2x2[0][1], rec->fPost2x2[1][0], rec->fPost2x2[1][1]);
+ SkDebugf(" frame %g miter %g hints %d framefill %d format %d join %d cap %d\n",
+ rec->fFrameWidth, rec->fMiterLimit, rec->fHints, rec->fFrameAndFill,
+ rec->fMaskFormat, rec->fStrokeJoin, rec->fStrokeCap);
+ SkDebugf(" pathEffect %x maskFilter %x\n",
+ desc->findEntry(kPathEffect_SkDescriptorTag, nullptr),
+ desc->findEntry(kMaskFilter_SkDescriptorTag, nullptr));
+#endif
+}
+
+SkScalerContext::~SkScalerContext() {}
+
+void SkScalerContext::getAdvance(SkGlyph* glyph) {
+ // mark us as just having a valid advance
+ glyph->fMaskFormat = MASK_FORMAT_JUST_ADVANCE;
+ // we mark the format before making the call, in case the impl
+ // internally ends up calling its generateMetrics, which is OK
+ // albeit slower than strictly necessary
+ generateAdvance(glyph);
+}
+
+void SkScalerContext::getMetrics(SkGlyph* glyph) {
+ generateMetrics(glyph);
+
+ // for now we have separate cache entries for devkerning on and off
+ // in the future we might share caches, but make our measure/draw
+ // code make the distinction. Thus we zap the values if the caller
+ // has not asked for them.
+ if ((fRec.fFlags & SkScalerContext::kDevKernText_Flag) == 0) {
+ // no devkern, so zap the fields
+ glyph->fLsbDelta = glyph->fRsbDelta = 0;
+ }
+
+ // if either dimension is empty, zap the image bounds of the glyph
+ if (0 == glyph->fWidth || 0 == glyph->fHeight) {
+ glyph->fWidth = 0;
+ glyph->fHeight = 0;
+ glyph->fTop = 0;
+ glyph->fLeft = 0;
+ glyph->fMaskFormat = 0;
+ return;
+ }
+
+ if (fGenerateImageFromPath) {
+ SkPath devPath, fillPath;
+ SkMatrix fillToDevMatrix;
+
+ this->internalGetPath(*glyph, &fillPath, &devPath, &fillToDevMatrix);
+
+ if (fRasterizer) {
+ SkMask mask;
+
+ if (fRasterizer->rasterize(fillPath, fillToDevMatrix, nullptr,
+ fMaskFilter.get(), &mask,
+ SkMask::kJustComputeBounds_CreateMode)) {
+ glyph->fLeft = mask.fBounds.fLeft;
+ glyph->fTop = mask.fBounds.fTop;
+ glyph->fWidth = SkToU16(mask.fBounds.width());
+ glyph->fHeight = SkToU16(mask.fBounds.height());
+ } else {
+ goto SK_ERROR;
+ }
+ } else {
+ // just use devPath
+ const SkIRect ir = devPath.getBounds().roundOut();
+
+ if (ir.isEmpty() || !ir.is16Bit()) {
+ goto SK_ERROR;
+ }
+ glyph->fLeft = ir.fLeft;
+ glyph->fTop = ir.fTop;
+ glyph->fWidth = SkToU16(ir.width());
+ glyph->fHeight = SkToU16(ir.height());
+
+ if (glyph->fWidth > 0) {
+ switch (fRec.fMaskFormat) {
+ case SkMask::kLCD16_Format:
+ glyph->fWidth += 2;
+ glyph->fLeft -= 1;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+
+ if (SkMask::kARGB32_Format != glyph->fMaskFormat) {
+ glyph->fMaskFormat = fRec.fMaskFormat;
+ }
+
+ // If we are going to create the mask, then we cannot keep the color
+ if ((fGenerateImageFromPath || fMaskFilter) &&
+ SkMask::kARGB32_Format == glyph->fMaskFormat) {
+ glyph->fMaskFormat = SkMask::kA8_Format;
+ }
+
+ if (fMaskFilter) {
+ SkMask src, dst;
+ SkMatrix matrix;
+
+ glyph->toMask(&src);
+ fRec.getMatrixFrom2x2(&matrix);
+
+ src.fImage = nullptr; // only want the bounds from the filter
+ if (fMaskFilter->filterMask(&dst, src, matrix, nullptr)) {
+ if (dst.fBounds.isEmpty() || !dst.fBounds.is16Bit()) {
+ goto SK_ERROR;
+ }
+ SkASSERT(dst.fImage == nullptr);
+ glyph->fLeft = dst.fBounds.fLeft;
+ glyph->fTop = dst.fBounds.fTop;
+ glyph->fWidth = SkToU16(dst.fBounds.width());
+ glyph->fHeight = SkToU16(dst.fBounds.height());
+ glyph->fMaskFormat = dst.fFormat;
+ }
+ }
+ return;
+
+SK_ERROR:
+ // draw nothing 'cause we failed
+ glyph->fLeft = 0;
+ glyph->fTop = 0;
+ glyph->fWidth = 0;
+ glyph->fHeight = 0;
+ // put a valid value here, in case it was earlier set to
+ // MASK_FORMAT_JUST_ADVANCE
+ glyph->fMaskFormat = fRec.fMaskFormat;
+}
+
+#define SK_SHOW_TEXT_BLIT_COVERAGE 0
+
+static void applyLUTToA8Mask(const SkMask& mask, const uint8_t* lut) {
+ uint8_t* SK_RESTRICT dst = (uint8_t*)mask.fImage;
+ unsigned rowBytes = mask.fRowBytes;
+
+ for (int y = mask.fBounds.height() - 1; y >= 0; --y) {
+ for (int x = mask.fBounds.width() - 1; x >= 0; --x) {
+ dst[x] = lut[dst[x]];
+ }
+ dst += rowBytes;
+ }
+}
+
+template<bool APPLY_PREBLEND>
+static void pack4xHToLCD16(const SkPixmap& src, const SkMask& dst,
+ const SkMaskGamma::PreBlend& maskPreBlend) {
+#define SAMPLES_PER_PIXEL 4
+#define LCD_PER_PIXEL 3
+ SkASSERT(kAlpha_8_SkColorType == src.colorType());
+ SkASSERT(SkMask::kLCD16_Format == dst.fFormat);
+
+ const int sample_width = src.width();
+ const int height = src.height();
+
+ uint16_t* dstP = (uint16_t*)dst.fImage;
+ size_t dstRB = dst.fRowBytes;
+ // An N tap FIR is defined by
+ // out[n] = coeff[0]*x[n] + coeff[1]*x[n-1] + ... + coeff[N]*x[n-N]
+ // or
+ // out[n] = sum(i, 0, N, coeff[i]*x[n-i])
+
+ // The strategy is to use one FIR (different coefficients) for each of r, g, and b.
+ // This means using every 4th FIR output value of each FIR and discarding the rest.
+ // The FIRs are aligned, and the coefficients reach 5 samples to each side of their 'center'.
+ // (For r and b this is technically incorrect, but the coeffs outside round to zero anyway.)
+
+ // These are in some fixed point repesentation.
+ // Adding up to more than one simulates ink spread.
+ // For implementation reasons, these should never add up to more than two.
+
+ // Coefficients determined by a gausian where 5 samples = 3 std deviations (0x110 'contrast').
+ // Calculated using tools/generate_fir_coeff.py
+ // With this one almost no fringing is ever seen, but it is imperceptibly blurry.
+ // The lcd smoothed text is almost imperceptibly different from gray,
+ // but is still sharper on small stems and small rounded corners than gray.
+ // This also seems to be about as wide as one can get and only have a three pixel kernel.
+ // TODO: caculate these at runtime so parameters can be adjusted (esp contrast).
+ static const unsigned int coefficients[LCD_PER_PIXEL][SAMPLES_PER_PIXEL*3] = {
+ //The red subpixel is centered inside the first sample (at 1/6 pixel), and is shifted.
+ { 0x03, 0x0b, 0x1c, 0x33, 0x40, 0x39, 0x24, 0x10, 0x05, 0x01, 0x00, 0x00, },
+ //The green subpixel is centered between two samples (at 1/2 pixel), so is symetric
+ { 0x00, 0x02, 0x08, 0x16, 0x2b, 0x3d, 0x3d, 0x2b, 0x16, 0x08, 0x02, 0x00, },
+ //The blue subpixel is centered inside the last sample (at 5/6 pixel), and is shifted.
+ { 0x00, 0x00, 0x01, 0x05, 0x10, 0x24, 0x39, 0x40, 0x33, 0x1c, 0x0b, 0x03, },
+ };
+
+ for (int y = 0; y < height; ++y) {
+ const uint8_t* srcP = src.addr8(0, y);
+
+ // TODO: this fir filter implementation is straight forward, but slow.
+ // It should be possible to make it much faster.
+ for (int sample_x = -4, pixel_x = 0; sample_x < sample_width + 4; sample_x += 4, ++pixel_x) {
+ int fir[LCD_PER_PIXEL] = { 0 };
+ for (int sample_index = SkMax32(0, sample_x - 4), coeff_index = sample_index - (sample_x - 4)
+ ; sample_index < SkMin32(sample_x + 8, sample_width)
+ ; ++sample_index, ++coeff_index)
+ {
+ int sample_value = srcP[sample_index];
+ for (int subpxl_index = 0; subpxl_index < LCD_PER_PIXEL; ++subpxl_index) {
+ fir[subpxl_index] += coefficients[subpxl_index][coeff_index] * sample_value;
+ }
+ }
+ for (int subpxl_index = 0; subpxl_index < LCD_PER_PIXEL; ++subpxl_index) {
+ fir[subpxl_index] /= 0x100;
+ fir[subpxl_index] = SkMin32(fir[subpxl_index], 255);
+ }
+
+ U8CPU r = sk_apply_lut_if<APPLY_PREBLEND>(fir[0], maskPreBlend.fR);
+ U8CPU g = sk_apply_lut_if<APPLY_PREBLEND>(fir[1], maskPreBlend.fG);
+ U8CPU b = sk_apply_lut_if<APPLY_PREBLEND>(fir[2], maskPreBlend.fB);
+#if SK_SHOW_TEXT_BLIT_COVERAGE
+ r = SkMax32(r, 10); g = SkMax32(g, 10); b = SkMax32(b, 10);
+#endif
+ dstP[pixel_x] = SkPack888ToRGB16(r, g, b);
+ }
+ dstP = (uint16_t*)((char*)dstP + dstRB);
+ }
+}
+
+static inline int convert_8_to_1(unsigned byte) {
+ SkASSERT(byte <= 0xFF);
+ return byte >> 7;
+}
+
+static uint8_t pack_8_to_1(const uint8_t alpha[8]) {
+ unsigned bits = 0;
+ for (int i = 0; i < 8; ++i) {
+ bits <<= 1;
+ bits |= convert_8_to_1(alpha[i]);
+ }
+ return SkToU8(bits);
+}
+
+static void packA8ToA1(const SkMask& mask, const uint8_t* src, size_t srcRB) {
+ const int height = mask.fBounds.height();
+ const int width = mask.fBounds.width();
+ const int octs = width >> 3;
+ const int leftOverBits = width & 7;
+
+ uint8_t* dst = mask.fImage;
+ const int dstPad = mask.fRowBytes - SkAlign8(width)/8;
+ SkASSERT(dstPad >= 0);
+
+ SkASSERT(width >= 0);
+ SkASSERT(srcRB >= (size_t)width);
+ const size_t srcPad = srcRB - width;
+
+ for (int y = 0; y < height; ++y) {
+ for (int i = 0; i < octs; ++i) {
+ *dst++ = pack_8_to_1(src);
+ src += 8;
+ }
+ if (leftOverBits > 0) {
+ unsigned bits = 0;
+ int shift = 7;
+ for (int i = 0; i < leftOverBits; ++i, --shift) {
+ bits |= convert_8_to_1(*src++) << shift;
+ }
+ *dst++ = bits;
+ }
+ src += srcPad;
+ dst += dstPad;
+ }
+}
+
+static void generateMask(const SkMask& mask, const SkPath& path,
+ const SkMaskGamma::PreBlend& maskPreBlend) {
+ SkPaint paint;
+
+ int srcW = mask.fBounds.width();
+ int srcH = mask.fBounds.height();
+ int dstW = srcW;
+ int dstH = srcH;
+ int dstRB = mask.fRowBytes;
+
+ SkMatrix matrix;
+ matrix.setTranslate(-SkIntToScalar(mask.fBounds.fLeft),
+ -SkIntToScalar(mask.fBounds.fTop));
+
+ paint.setAntiAlias(SkMask::kBW_Format != mask.fFormat);
+ switch (mask.fFormat) {
+ case SkMask::kBW_Format:
+ dstRB = 0; // signals we need a copy
+ break;
+ case SkMask::kA8_Format:
+ break;
+ case SkMask::kLCD16_Format:
+ // TODO: trigger off LCD orientation
+ dstW = 4*dstW - 8;
+ matrix.setTranslate(-SkIntToScalar(mask.fBounds.fLeft + 1),
+ -SkIntToScalar(mask.fBounds.fTop));
+ matrix.postScale(SkIntToScalar(4), SK_Scalar1);
+ dstRB = 0; // signals we need a copy
+ break;
+ default:
+ SkDEBUGFAIL("unexpected mask format");
+ }
+
+ SkRasterClip clip;
+ clip.setRect(SkIRect::MakeWH(dstW, dstH));
+
+ const SkImageInfo info = SkImageInfo::MakeA8(dstW, dstH);
+ SkAutoPixmapStorage dst;
+
+ if (0 == dstRB) {
+ if (!dst.tryAlloc(info)) {
+ // can't allocate offscreen, so empty the mask and return
+ sk_bzero(mask.fImage, mask.computeImageSize());
+ return;
+ }
+ } else {
+ dst.reset(info, mask.fImage, dstRB);
+ }
+ sk_bzero(dst.writable_addr(), dst.getSafeSize());
+
+ SkDraw draw;
+ draw.fDst = dst;
+ draw.fRC = &clip;
+ draw.fMatrix = &matrix;
+ draw.drawPath(path, paint);
+
+ switch (mask.fFormat) {
+ case SkMask::kBW_Format:
+ packA8ToA1(mask, dst.addr8(0, 0), dst.rowBytes());
+ break;
+ case SkMask::kA8_Format:
+ if (maskPreBlend.isApplicable()) {
+ applyLUTToA8Mask(mask, maskPreBlend.fG);
+ }
+ break;
+ case SkMask::kLCD16_Format:
+ if (maskPreBlend.isApplicable()) {
+ pack4xHToLCD16<true>(dst, mask, maskPreBlend);
+ } else {
+ pack4xHToLCD16<false>(dst, mask, maskPreBlend);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void extract_alpha(const SkMask& dst,
+ const SkPMColor* srcRow, size_t srcRB) {
+ int width = dst.fBounds.width();
+ int height = dst.fBounds.height();
+ int dstRB = dst.fRowBytes;
+ uint8_t* dstRow = dst.fImage;
+
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ dstRow[x] = SkGetPackedA32(srcRow[x]);
+ }
+ // zero any padding on each row
+ for (int x = width; x < dstRB; ++x) {
+ dstRow[x] = 0;
+ }
+ dstRow += dstRB;
+ srcRow = (const SkPMColor*)((const char*)srcRow + srcRB);
+ }
+}
+
+void SkScalerContext::getImage(const SkGlyph& origGlyph) {
+ const SkGlyph* glyph = &origGlyph;
+ SkGlyph tmpGlyph;
+
+ // in case we need to call generateImage on a mask-format that is different
+ // (i.e. larger) than what our caller allocated by looking at origGlyph.
+ SkAutoMalloc tmpGlyphImageStorage;
+
+ // If we are going to draw-from-path, then we cannot generate color, since
+ // the path only makes a mask. This case should have been caught up in
+ // generateMetrics().
+ SkASSERT(!fGenerateImageFromPath ||
+ SkMask::kARGB32_Format != origGlyph.fMaskFormat);
+
+ if (fMaskFilter) { // restore the prefilter bounds
+ tmpGlyph.initGlyphIdFrom(origGlyph);
+
+ // need the original bounds, sans our maskfilter
+ SkMaskFilter* mf = fMaskFilter.release(); // temp disable
+ this->getMetrics(&tmpGlyph);
+ fMaskFilter = sk_sp<SkMaskFilter>(mf); // restore
+
+ // we need the prefilter bounds to be <= filter bounds
+ SkASSERT(tmpGlyph.fWidth <= origGlyph.fWidth);
+ SkASSERT(tmpGlyph.fHeight <= origGlyph.fHeight);
+
+ if (tmpGlyph.fMaskFormat == origGlyph.fMaskFormat) {
+ tmpGlyph.fImage = origGlyph.fImage;
+ } else {
+ tmpGlyphImageStorage.reset(tmpGlyph.computeImageSize());
+ tmpGlyph.fImage = tmpGlyphImageStorage.get();
+ }
+ glyph = &tmpGlyph;
+ }
+
+ if (fGenerateImageFromPath) {
+ SkPath devPath, fillPath;
+ SkMatrix fillToDevMatrix;
+ SkMask mask;
+
+ this->internalGetPath(*glyph, &fillPath, &devPath, &fillToDevMatrix);
+ glyph->toMask(&mask);
+
+ if (fRasterizer) {
+ mask.fFormat = SkMask::kA8_Format;
+ sk_bzero(glyph->fImage, mask.computeImageSize());
+
+ if (!fRasterizer->rasterize(fillPath, fillToDevMatrix, nullptr,
+ fMaskFilter.get(), &mask,
+ SkMask::kJustRenderImage_CreateMode)) {
+ return;
+ }
+ if (fPreBlend.isApplicable()) {
+ applyLUTToA8Mask(mask, fPreBlend.fG);
+ }
+ } else {
+ SkASSERT(SkMask::kARGB32_Format != mask.fFormat);
+ generateMask(mask, devPath, fPreBlend);
+ }
+ } else {
+ generateImage(*glyph);
+ }
+
+ if (fMaskFilter) {
+ SkMask srcM, dstM;
+ SkMatrix matrix;
+
+ // the src glyph image shouldn't be 3D
+ SkASSERT(SkMask::k3D_Format != glyph->fMaskFormat);
+
+ SkAutoSMalloc<32*32> a8storage;
+ glyph->toMask(&srcM);
+ if (SkMask::kARGB32_Format == srcM.fFormat) {
+ // now we need to extract the alpha-channel from the glyph's image
+ // and copy it into a temp buffer, and then point srcM at that temp.
+ srcM.fFormat = SkMask::kA8_Format;
+ srcM.fRowBytes = SkAlign4(srcM.fBounds.width());
+ size_t size = srcM.computeImageSize();
+ a8storage.reset(size);
+ srcM.fImage = (uint8_t*)a8storage.get();
+ extract_alpha(srcM,
+ (const SkPMColor*)glyph->fImage, glyph->rowBytes());
+ }
+
+ fRec.getMatrixFrom2x2(&matrix);
+
+ if (fMaskFilter->filterMask(&dstM, srcM, matrix, nullptr)) {
+ int width = SkFastMin32(origGlyph.fWidth, dstM.fBounds.width());
+ int height = SkFastMin32(origGlyph.fHeight, dstM.fBounds.height());
+ int dstRB = origGlyph.rowBytes();
+ int srcRB = dstM.fRowBytes;
+
+ const uint8_t* src = (const uint8_t*)dstM.fImage;
+ uint8_t* dst = (uint8_t*)origGlyph.fImage;
+
+ if (SkMask::k3D_Format == dstM.fFormat) {
+ // we have to copy 3 times as much
+ height *= 3;
+ }
+
+ // clean out our glyph, since it may be larger than dstM
+ //sk_bzero(dst, height * dstRB);
+
+ while (--height >= 0) {
+ memcpy(dst, src, width);
+ src += srcRB;
+ dst += dstRB;
+ }
+ SkMask::FreeImage(dstM.fImage);
+
+ if (fPreBlendForFilter.isApplicable()) {
+ applyLUTToA8Mask(srcM, fPreBlendForFilter.fG);
+ }
+ }
+ }
+}
+
+void SkScalerContext::getPath(const SkGlyph& glyph, SkPath* path) {
+ this->internalGetPath(glyph, nullptr, path, nullptr);
+}
+
+void SkScalerContext::getFontMetrics(SkPaint::FontMetrics* fm) {
+ this->generateFontMetrics(fm);
+}
+
+SkUnichar SkScalerContext::generateGlyphToChar(uint16_t glyph) {
+ return 0;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkScalerContext::internalGetPath(const SkGlyph& glyph, SkPath* fillPath,
+ SkPath* devPath, SkMatrix* fillToDevMatrix) {
+ SkPath path;
+ generatePath(glyph, &path);
+
+ if (fRec.fFlags & SkScalerContext::kSubpixelPositioning_Flag) {
+ SkFixed dx = glyph.getSubXFixed();
+ SkFixed dy = glyph.getSubYFixed();
+ if (dx | dy) {
+ path.offset(SkFixedToScalar(dx), SkFixedToScalar(dy));
+ }
+ }
+
+ if (fRec.fFrameWidth > 0 || fPathEffect != nullptr) {
+ // need the path in user-space, with only the point-size applied
+ // so that our stroking and effects will operate the same way they
+ // would if the user had extracted the path themself, and then
+ // called drawPath
+ SkPath localPath;
+ SkMatrix matrix, inverse;
+
+ fRec.getMatrixFrom2x2(&matrix);
+ if (!matrix.invert(&inverse)) {
+ // assume fillPath and devPath are already empty.
+ return;
+ }
+ path.transform(inverse, &localPath);
+ // now localPath is only affected by the paint settings, and not the canvas matrix
+
+ SkStrokeRec rec(SkStrokeRec::kFill_InitStyle);
+
+ if (fRec.fFrameWidth > 0) {
+ rec.setStrokeStyle(fRec.fFrameWidth,
+ SkToBool(fRec.fFlags & kFrameAndFill_Flag));
+ // glyphs are always closed contours, so cap type is ignored,
+ // so we just pass something.
+ rec.setStrokeParams((SkPaint::Cap)fRec.fStrokeCap,
+ (SkPaint::Join)fRec.fStrokeJoin,
+ fRec.fMiterLimit);
+ }
+
+ if (fPathEffect) {
+ SkPath effectPath;
+ if (fPathEffect->filterPath(&effectPath, localPath, &rec, nullptr)) {
+ localPath.swap(effectPath);
+ }
+ }
+
+ if (rec.needToApply()) {
+ SkPath strokePath;
+ if (rec.applyToPath(&strokePath, localPath)) {
+ localPath.swap(strokePath);
+ }
+ }
+
+ // now return stuff to the caller
+ if (fillToDevMatrix) {
+ *fillToDevMatrix = matrix;
+ }
+ if (devPath) {
+ localPath.transform(matrix, devPath);
+ }
+ if (fillPath) {
+ fillPath->swap(localPath);
+ }
+ } else { // nothing tricky to do
+ if (fillToDevMatrix) {
+ fillToDevMatrix->reset();
+ }
+ if (devPath) {
+ if (fillPath == nullptr) {
+ devPath->swap(path);
+ } else {
+ *devPath = path;
+ }
+ }
+
+ if (fillPath) {
+ fillPath->swap(path);
+ }
+ }
+
+ if (devPath) {
+ devPath->updateBoundsCache();
+ }
+ if (fillPath) {
+ fillPath->updateBoundsCache();
+ }
+}
+
+
+void SkScalerContextRec::getMatrixFrom2x2(SkMatrix* dst) const {
+ dst->setAll(fPost2x2[0][0], fPost2x2[0][1], 0,
+ fPost2x2[1][0], fPost2x2[1][1], 0,
+ 0, 0, 1);
+}
+
+void SkScalerContextRec::getLocalMatrix(SkMatrix* m) const {
+ SkPaint::SetTextMatrix(m, fTextSize, fPreScaleX, fPreSkewX);
+}
+
+void SkScalerContextRec::getSingleMatrix(SkMatrix* m) const {
+ this->getLocalMatrix(m);
+
+ // now concat the device matrix
+ SkMatrix deviceMatrix;
+ this->getMatrixFrom2x2(&deviceMatrix);
+ m->postConcat(deviceMatrix);
+}
+
+bool SkScalerContextRec::computeMatrices(PreMatrixScale preMatrixScale, SkVector* s, SkMatrix* sA,
+ SkMatrix* GsA, SkMatrix* G_inv, SkMatrix* A_out)
+{
+ // A is the 'total' matrix.
+ SkMatrix A;
+ this->getSingleMatrix(&A);
+
+ // The caller may find the 'total' matrix useful when dealing directly with EM sizes.
+ if (A_out) {
+ *A_out = A;
+ }
+
+ // If the 'total' matrix is singular, set the 'scale' to something finite and zero the matrices.
+ // All underlying ports have issues with zero text size, so use the matricies to zero.
+
+ // Map the vectors [0,1], [1,0], [1,1] and [1,-1] (the EM) through the 'total' matrix.
+ // If the length of one of these vectors is less than 1/256 then an EM filling square will
+ // never affect any pixels.
+ SkVector diag[4] = { { A.getScaleX() , A.getSkewY() },
+ { A.getSkewX(), A.getScaleY() },
+ { A.getScaleX() + A.getSkewX(), A.getScaleY() + A.getSkewY() },
+ { A.getScaleX() - A.getSkewX(), A.getScaleY() - A.getSkewY() }, };
+ if (diag[0].lengthSqd() <= SK_ScalarNearlyZero * SK_ScalarNearlyZero ||
+ diag[1].lengthSqd() <= SK_ScalarNearlyZero * SK_ScalarNearlyZero ||
+ diag[2].lengthSqd() <= SK_ScalarNearlyZero * SK_ScalarNearlyZero ||
+ diag[3].lengthSqd() <= SK_ScalarNearlyZero * SK_ScalarNearlyZero)
+ {
+ s->fX = SK_Scalar1;
+ s->fY = SK_Scalar1;
+ sA->setScale(0, 0);
+ if (GsA) {
+ GsA->setScale(0, 0);
+ }
+ if (G_inv) {
+ G_inv->reset();
+ }
+ return false;
+ }
+
+ // GA is the matrix A with rotation removed.
+ SkMatrix GA;
+ bool skewedOrFlipped = A.getSkewX() || A.getSkewY() || A.getScaleX() < 0 || A.getScaleY() < 0;
+ if (skewedOrFlipped) {
+ // h is where A maps the horizontal baseline.
+ SkPoint h = SkPoint::Make(SK_Scalar1, 0);
+ A.mapPoints(&h, 1);
+
+ // G is the Givens Matrix for A (rotational matrix where GA[0][1] == 0).
+ SkMatrix G;
+ SkComputeGivensRotation(h, &G);
+
+ GA = G;
+ GA.preConcat(A);
+
+ // The 'remainingRotation' is G inverse, which is fairly simple since G is 2x2 rotational.
+ if (G_inv) {
+ G_inv->setAll(
+ G.get(SkMatrix::kMScaleX), -G.get(SkMatrix::kMSkewX), G.get(SkMatrix::kMTransX),
+ -G.get(SkMatrix::kMSkewY), G.get(SkMatrix::kMScaleY), G.get(SkMatrix::kMTransY),
+ G.get(SkMatrix::kMPersp0), G.get(SkMatrix::kMPersp1), G.get(SkMatrix::kMPersp2));
+ }
+ } else {
+ GA = A;
+ if (G_inv) {
+ G_inv->reset();
+ }
+ }
+
+ // At this point, given GA, create s.
+ switch (preMatrixScale) {
+ case kFull_PreMatrixScale:
+ s->fX = SkScalarAbs(GA.get(SkMatrix::kMScaleX));
+ s->fY = SkScalarAbs(GA.get(SkMatrix::kMScaleY));
+ break;
+ case kVertical_PreMatrixScale: {
+ SkScalar yScale = SkScalarAbs(GA.get(SkMatrix::kMScaleY));
+ s->fX = yScale;
+ s->fY = yScale;
+ break;
+ }
+ case kVerticalInteger_PreMatrixScale: {
+ SkScalar realYScale = SkScalarAbs(GA.get(SkMatrix::kMScaleY));
+ SkScalar intYScale = SkScalarRoundToScalar(realYScale);
+ if (intYScale == 0) {
+ intYScale = SK_Scalar1;
+ }
+ s->fX = intYScale;
+ s->fY = intYScale;
+ break;
+ }
+ }
+
+ // The 'remaining' matrix sA is the total matrix A without the scale.
+ if (!skewedOrFlipped && (
+ (kFull_PreMatrixScale == preMatrixScale) ||
+ (kVertical_PreMatrixScale == preMatrixScale && A.getScaleX() == A.getScaleY())))
+ {
+ // If GA == A and kFull_PreMatrixScale, sA is identity.
+ // If GA == A and kVertical_PreMatrixScale and A.scaleX == A.scaleY, sA is identity.
+ sA->reset();
+ } else if (!skewedOrFlipped && kVertical_PreMatrixScale == preMatrixScale) {
+ // If GA == A and kVertical_PreMatrixScale, sA.scaleY is SK_Scalar1.
+ sA->reset();
+ sA->setScaleX(A.getScaleX() / s->fY);
+ } else {
+ // TODO: like kVertical_PreMatrixScale, kVerticalInteger_PreMatrixScale with int scales.
+ *sA = A;
+ sA->preScale(SkScalarInvert(s->fX), SkScalarInvert(s->fY));
+ }
+
+ // The 'remainingWithoutRotation' matrix GsA is the non-rotational part of A without the scale.
+ if (GsA) {
+ *GsA = GA;
+ // G is rotational so reorders with the scale.
+ GsA->preScale(SkScalarInvert(s->fX), SkScalarInvert(s->fY));
+ }
+
+ return true;
+}
+
+SkAxisAlignment SkScalerContext::computeAxisAlignmentForHText() {
+ // Why fPost2x2 can be used here.
+ // getSingleMatrix multiplies in getLocalMatrix, which consists of
+ // * fTextSize (a scale, which has no effect)
+ // * fPreScaleX (a scale in x, which has no effect)
+ // * fPreSkewX (has no effect, but would on vertical text alignment).
+ // In other words, making the text bigger, stretching it along the
+ // horizontal axis, or fake italicizing it does not move the baseline.
+
+ if (0 == fRec.fPost2x2[1][0]) {
+ // The x axis is mapped onto the x axis.
+ return kX_SkAxisAlignment;
+ }
+ if (0 == fRec.fPost2x2[0][0]) {
+ // The x axis is mapped onto the y axis.
+ return kY_SkAxisAlignment;
+ }
+ return kNone_SkAxisAlignment;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkScalerContext_Empty : public SkScalerContext {
+public:
+ SkScalerContext_Empty(SkTypeface* typeface, const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : SkScalerContext(typeface, effects, desc) {}
+
+protected:
+ unsigned generateGlyphCount() override {
+ return 0;
+ }
+ uint16_t generateCharToGlyph(SkUnichar uni) override {
+ return 0;
+ }
+ void generateAdvance(SkGlyph* glyph) override {
+ glyph->zeroMetrics();
+ }
+ void generateMetrics(SkGlyph* glyph) override {
+ glyph->zeroMetrics();
+ }
+ void generateImage(const SkGlyph& glyph) override {}
+ void generatePath(const SkGlyph& glyph, SkPath* path) override {}
+ void generateFontMetrics(SkPaint::FontMetrics* metrics) override {
+ if (metrics) {
+ sk_bzero(metrics, sizeof(*metrics));
+ }
+ }
+};
+
+extern SkScalerContext* SkCreateColorScalerContext(const SkDescriptor* desc);
+
+SkScalerContext* SkTypeface::createScalerContext(const SkScalerContextEffects& effects,
+ const SkDescriptor* desc,
+ bool allowFailure) const {
+ SkScalerContext* c = this->onCreateScalerContext(effects, desc);
+
+ if (!c && !allowFailure) {
+ c = new SkScalerContext_Empty(const_cast<SkTypeface*>(this), effects, desc);
+ }
+ return c;
+}
diff --git a/gfx/skia/skia/src/core/SkScalerContext.h b/gfx/skia/skia/src/core/SkScalerContext.h
new file mode 100644
index 000000000..48ec0624c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScalerContext.h
@@ -0,0 +1,390 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkScalerContext_DEFINED
+#define SkScalerContext_DEFINED
+
+#include "SkMask.h"
+#include "SkMaskGamma.h"
+#include "SkMatrix.h"
+#include "SkPaint.h"
+#include "SkTypeface.h"
+
+class SkGlyph;
+class SkDescriptor;
+class SkMaskFilter;
+class SkPathEffect;
+class SkRasterizer;
+
+struct SkScalerContextEffects {
+ SkScalerContextEffects() : fPathEffect(nullptr), fMaskFilter(nullptr), fRasterizer(nullptr) {}
+ SkScalerContextEffects(SkPathEffect* pe, SkMaskFilter* mf, SkRasterizer* ra)
+ : fPathEffect(pe), fMaskFilter(mf), fRasterizer(ra) {}
+
+ SkPathEffect* fPathEffect;
+ SkMaskFilter* fMaskFilter;
+ SkRasterizer* fRasterizer;
+};
+
+enum SkAxisAlignment {
+ kNone_SkAxisAlignment,
+ kX_SkAxisAlignment,
+ kY_SkAxisAlignment
+};
+
+/*
+ * To allow this to be forward-declared, it must be its own typename, rather
+ * than a nested struct inside SkScalerContext (where it started).
+ */
+struct SkScalerContextRec {
+ uint32_t fFontID;
+ SkScalar fTextSize, fPreScaleX, fPreSkewX;
+ SkScalar fPost2x2[2][2];
+ SkScalar fFrameWidth, fMiterLimit;
+
+ //These describe the parameters to create (uniquely identify) the pre-blend.
+ uint32_t fLumBits;
+ uint8_t fDeviceGamma; //2.6, (0.0, 4.0) gamma, 0.0 for sRGB
+ uint8_t fPaintGamma; //2.6, (0.0, 4.0) gamma, 0.0 for sRGB
+ uint8_t fContrast; //0.8+1, [0.0, 1.0] artificial contrast
+ uint8_t fReservedAlign;
+
+ SkScalar getDeviceGamma() const {
+ return SkIntToScalar(fDeviceGamma) / (1 << 6);
+ }
+ void setDeviceGamma(SkScalar dg) {
+ SkASSERT(0 <= dg && dg < SkIntToScalar(4));
+ fDeviceGamma = SkScalarFloorToInt(dg * (1 << 6));
+ }
+
+ SkScalar getPaintGamma() const {
+ return SkIntToScalar(fPaintGamma) / (1 << 6);
+ }
+ void setPaintGamma(SkScalar pg) {
+ SkASSERT(0 <= pg && pg < SkIntToScalar(4));
+ fPaintGamma = SkScalarFloorToInt(pg * (1 << 6));
+ }
+
+ SkScalar getContrast() const {
+ return SkIntToScalar(fContrast) / ((1 << 8) - 1);
+ }
+ void setContrast(SkScalar c) {
+ SkASSERT(0 <= c && c <= SK_Scalar1);
+ fContrast = SkScalarRoundToInt(c * ((1 << 8) - 1));
+ }
+
+ /**
+ * Causes the luminance color to be ignored, and the paint and device
+ * gamma to be effectively 1.0
+ */
+ void ignoreGamma() {
+ setLuminanceColor(SK_ColorTRANSPARENT);
+ setPaintGamma(SK_Scalar1);
+ setDeviceGamma(SK_Scalar1);
+ }
+
+ /**
+ * Causes the luminance color and contrast to be ignored, and the
+ * paint and device gamma to be effectively 1.0.
+ */
+ void ignorePreBlend() {
+ ignoreGamma();
+ setContrast(0);
+ }
+
+ uint8_t fMaskFormat;
+ uint8_t fStrokeJoin : 4;
+ uint8_t fStrokeCap : 4;
+ uint16_t fFlags;
+ // Warning: when adding members note that the size of this structure
+ // must be a multiple of 4. SkDescriptor requires that its arguments be
+ // multiples of four and this structure is put in an SkDescriptor in
+ // SkPaint::MakeRec.
+
+ void getMatrixFrom2x2(SkMatrix*) const;
+ void getLocalMatrix(SkMatrix*) const;
+ void getSingleMatrix(SkMatrix*) const;
+
+ /** The kind of scale which will be applied by the underlying port (pre-matrix). */
+ enum PreMatrixScale {
+ kFull_PreMatrixScale, // The underlying port can apply both x and y scale.
+ kVertical_PreMatrixScale, // The underlying port can only apply a y scale.
+ kVerticalInteger_PreMatrixScale // The underlying port can only apply an integer y scale.
+ };
+ /**
+ * Compute useful matrices for use with sizing in underlying libraries.
+ *
+ * There are two kinds of text size, a 'requested/logical size' which is like asking for size
+ * '12' and a 'real' size which is the size after the matrix is applied. The matrices produced
+ * by this method are based on the 'real' size. This method effectively finds the total device
+ * matrix and decomposes it in various ways.
+ *
+ * The most useful decomposition is into 'scale' and 'remaining'. The 'scale' is applied first
+ * and then the 'remaining' to fully apply the total matrix. This decomposition is useful when
+ * the text size ('scale') may have meaning apart from the total matrix. This is true when
+ * hinting, and sometimes true for other properties as well.
+ *
+ * The second (optional) decomposition is of 'remaining' into a non-rotational part
+ * 'remainingWithoutRotation' and a rotational part 'remainingRotation'. The 'scale' is applied
+ * first, then 'remainingWithoutRotation', then 'remainingRotation' to fully apply the total
+ * matrix. This decomposition is helpful when only horizontal metrics can be trusted, so the
+ * 'scale' and 'remainingWithoutRotation' will be handled by the underlying library, but
+ * the final rotation 'remainingRotation' will be handled manually.
+ *
+ * The 'total' matrix is also (optionally) available. This is useful in cases where the
+ * underlying library will not be used, often when working directly with font data.
+ *
+ * The parameters 'scale' and 'remaining' are required, the other pointers may be nullptr.
+ *
+ * @param preMatrixScale the kind of scale to extract from the total matrix.
+ * @param scale the scale extracted from the total matrix (both values positive).
+ * @param remaining apply after scale to apply the total matrix.
+ * @param remainingWithoutRotation apply after scale to apply the total matrix sans rotation.
+ * @param remainingRotation apply after remainingWithoutRotation to apply the total matrix.
+ * @param total the total matrix.
+ * @return false if the matrix was singular. The output will be valid but not invertible.
+ */
+ bool computeMatrices(PreMatrixScale preMatrixScale,
+ SkVector* scale, SkMatrix* remaining,
+ SkMatrix* remainingWithoutRotation = nullptr,
+ SkMatrix* remainingRotation = nullptr,
+ SkMatrix* total = nullptr);
+
+ inline SkPaint::Hinting getHinting() const;
+ inline void setHinting(SkPaint::Hinting);
+
+ SkMask::Format getFormat() const {
+ return static_cast<SkMask::Format>(fMaskFormat);
+ }
+
+ SkColor getLuminanceColor() const {
+ return fLumBits;
+ }
+
+ void setLuminanceColor(SkColor c) {
+ fLumBits = c;
+ }
+};
+
+//The following typedef hides from the rest of the implementation the number of
+//most significant bits to consider when creating mask gamma tables. Two bits
+//per channel was chosen as a balance between fidelity (more bits) and cache
+//sizes (fewer bits). Three bits per channel was chosen when #303942; (used by
+//the Chrome UI) turned out too green.
+typedef SkTMaskGamma<3, 3, 3> SkMaskGamma;
+
+class SkScalerContext {
+public:
+ typedef SkScalerContextRec Rec;
+
+ enum Flags {
+ kFrameAndFill_Flag = 0x0001,
+ kDevKernText_Flag = 0x0002,
+ kEmbeddedBitmapText_Flag = 0x0004,
+ kEmbolden_Flag = 0x0008,
+ kSubpixelPositioning_Flag = 0x0010,
+ kForceAutohinting_Flag = 0x0020, // Use auto instead of bytcode hinting if hinting.
+ kVertical_Flag = 0x0040,
+
+ // together, these two flags resulting in a two bit value which matches
+ // up with the SkPaint::Hinting enum.
+ kHinting_Shift = 7, // to shift into the other flags above
+ kHintingBit1_Flag = 0x0080,
+ kHintingBit2_Flag = 0x0100,
+
+ // Pixel geometry information.
+ // only meaningful if fMaskFormat is kLCD16
+ kLCD_Vertical_Flag = 0x0200, // else Horizontal
+ kLCD_BGROrder_Flag = 0x0400, // else RGB order
+
+ // Generate A8 from LCD source (for GDI and CoreGraphics).
+ // only meaningful if fMaskFormat is kA8
+ kGenA8FromLCD_Flag = 0x0800, // could be 0x200 (bit meaning dependent on fMaskFormat)
+ };
+
+ // computed values
+ enum {
+ kHinting_Mask = kHintingBit1_Flag | kHintingBit2_Flag,
+ };
+
+ SkScalerContext(SkTypeface*, const SkScalerContextEffects&, const SkDescriptor*);
+ virtual ~SkScalerContext();
+
+ SkTypeface* getTypeface() const { return fTypeface.get(); }
+
+ SkMask::Format getMaskFormat() const {
+ return (SkMask::Format)fRec.fMaskFormat;
+ }
+
+ bool isSubpixel() const {
+ return SkToBool(fRec.fFlags & kSubpixelPositioning_Flag);
+ }
+
+ bool isVertical() const {
+ return SkToBool(fRec.fFlags & kVertical_Flag);
+ }
+
+ /** Return the corresponding glyph for the specified unichar. Since contexts
+ may be chained (under the hood), the glyphID that is returned may in
+ fact correspond to a different font/context. In that case, we use the
+ base-glyph-count to know how to translate back into local glyph space.
+ */
+ uint16_t charToGlyphID(SkUnichar uni) {
+ return generateCharToGlyph(uni);
+ }
+
+ /** Map the glyphID to its glyph index, and then to its char code. Unmapped
+ glyphs return zero.
+ */
+ SkUnichar glyphIDToChar(uint16_t glyphID) {
+ return (glyphID < getGlyphCount()) ? generateGlyphToChar(glyphID) : 0;
+ }
+
+ unsigned getGlyphCount() { return this->generateGlyphCount(); }
+ void getAdvance(SkGlyph*);
+ void getMetrics(SkGlyph*);
+ void getImage(const SkGlyph&);
+ void getPath(const SkGlyph&, SkPath*);
+ void getFontMetrics(SkPaint::FontMetrics*);
+
+ /** Return the size in bytes of the associated gamma lookup table
+ */
+ static size_t GetGammaLUTSize(SkScalar contrast, SkScalar paintGamma, SkScalar deviceGamma,
+ int* width, int* height);
+
+ /** Get the associated gamma lookup table. The 'data' pointer must point to pre-allocated
+ memory, with size in bytes greater than or equal to the return value of getGammaLUTSize().
+ */
+ static void GetGammaLUTData(SkScalar contrast, SkScalar paintGamma, SkScalar deviceGamma,
+ void* data);
+
+ static void MakeRec(const SkPaint&, const SkSurfaceProps* surfaceProps,
+ const SkMatrix*, Rec* rec);
+ static inline void PostMakeRec(const SkPaint&, Rec*);
+
+ static SkMaskGamma::PreBlend GetMaskPreBlend(const Rec& rec);
+
+ const Rec& getRec() const { return fRec; }
+
+ SkScalerContextEffects getEffects() const {
+ return { fPathEffect.get(), fMaskFilter.get(), fRasterizer.get() };
+ }
+
+ /**
+ * Return the axis (if any) that the baseline for horizontal text should land on.
+ * As an example, the identity matrix will return kX_SkAxisAlignment
+ */
+ SkAxisAlignment computeAxisAlignmentForHText();
+
+protected:
+ Rec fRec;
+
+ /** Generates the contents of glyph.fAdvanceX and glyph.fAdvanceY.
+ * May call getMetrics if that would be just as fast.
+ */
+ virtual void generateAdvance(SkGlyph* glyph) = 0;
+
+ /** Generates the contents of glyph.fWidth, fHeight, fTop, fLeft,
+ * as well as fAdvanceX and fAdvanceY if not already set.
+ *
+ * TODO: fMaskFormat is set by getMetrics later; cannot be set here.
+ */
+ virtual void generateMetrics(SkGlyph* glyph) = 0;
+
+ /** Generates the contents of glyph.fImage.
+ * When called, glyph.fImage will be pointing to a pre-allocated,
+ * uninitialized region of memory of size glyph.computeImageSize().
+ * This method may change glyph.fMaskFormat if the new image size is
+ * less than or equal to the old image size.
+ *
+ * Because glyph.computeImageSize() will determine the size of fImage,
+ * generateMetrics will be called before generateImage.
+ */
+ virtual void generateImage(const SkGlyph& glyph) = 0;
+
+ /** Sets the passed path to the glyph outline.
+ * If this cannot be done the path is set to empty;
+ * this is indistinguishable from a glyph with an empty path.
+ * This does not set glyph.fPath.
+ *
+ * TODO: path is always glyph.fPath, no reason to pass separately.
+ */
+ virtual void generatePath(const SkGlyph& glyph, SkPath* path) = 0;
+
+ /** Retrieves font metrics. */
+ virtual void generateFontMetrics(SkPaint::FontMetrics*) = 0;
+
+ /** Returns the number of glyphs in the font. */
+ virtual unsigned generateGlyphCount() = 0;
+
+ /** Returns the glyph id for the given unichar.
+ * If there is no 1:1 mapping from the unichar to a glyph id, returns 0.
+ */
+ virtual uint16_t generateCharToGlyph(SkUnichar unichar) = 0;
+
+ /** Returns the unichar for the given glyph id.
+ * If there is no 1:1 mapping from the glyph id to a unichar, returns 0.
+ * The default implementation always returns 0, indicating failure.
+ */
+ virtual SkUnichar generateGlyphToChar(uint16_t glyphId);
+
+ void forceGenerateImageFromPath() { fGenerateImageFromPath = true; }
+ void forceOffGenerateImageFromPath() { fGenerateImageFromPath = false; }
+
+private:
+ friend class SkRandomScalerContext; // For debug purposes
+
+ // never null
+ sk_sp<SkTypeface> fTypeface;
+
+ // optional objects, which may be null
+ sk_sp<SkPathEffect> fPathEffect;
+ sk_sp<SkMaskFilter> fMaskFilter;
+ sk_sp<SkRasterizer> fRasterizer;
+
+ // if this is set, we draw the image from a path, rather than
+ // calling generateImage.
+ bool fGenerateImageFromPath;
+
+ void internalGetPath(const SkGlyph& glyph, SkPath* fillPath,
+ SkPath* devPath, SkMatrix* fillToDevMatrix);
+
+ // returns the right context from our link-list for this char. If no match
+ // is found it returns nullptr. If a match is found then the glyphID param is
+ // set to the glyphID that maps to the provided char.
+ SkScalerContext* getContextFromChar(SkUnichar uni, uint16_t* glyphID);
+
+ // SkMaskGamma::PreBlend converts linear masks to gamma correcting masks.
+protected:
+ // Visible to subclasses so that generateImage can apply the pre-blend directly.
+ const SkMaskGamma::PreBlend fPreBlend;
+private:
+ // When there is a filter, previous steps must create a linear mask
+ // and the pre-blend applied as a final step.
+ const SkMaskGamma::PreBlend fPreBlendForFilter;
+};
+
+#define kRec_SkDescriptorTag SkSetFourByteTag('s', 'r', 'e', 'c')
+#define kPathEffect_SkDescriptorTag SkSetFourByteTag('p', 't', 'h', 'e')
+#define kMaskFilter_SkDescriptorTag SkSetFourByteTag('m', 's', 'k', 'f')
+#define kRasterizer_SkDescriptorTag SkSetFourByteTag('r', 'a', 's', 't')
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPaint::Hinting SkScalerContextRec::getHinting() const {
+ unsigned hint = (fFlags & SkScalerContext::kHinting_Mask) >>
+ SkScalerContext::kHinting_Shift;
+ return static_cast<SkPaint::Hinting>(hint);
+}
+
+void SkScalerContextRec::setHinting(SkPaint::Hinting hinting) {
+ fFlags = (fFlags & ~SkScalerContext::kHinting_Mask) |
+ (hinting << SkScalerContext::kHinting_Shift);
+}
+
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkScan.cpp b/gfx/skia/skia/src/core/SkScan.cpp
new file mode 100644
index 000000000..7fce3f172
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkScan.h"
+#include "SkBlitter.h"
+#include "SkRasterClip.h"
+
+static inline void blitrect(SkBlitter* blitter, const SkIRect& r) {
+ blitter->blitRect(r.fLeft, r.fTop, r.width(), r.height());
+}
+
+void SkScan::FillIRect(const SkIRect& r, const SkRegion* clip,
+ SkBlitter* blitter) {
+ if (!r.isEmpty()) {
+ if (clip) {
+ if (clip->isRect()) {
+ const SkIRect& clipBounds = clip->getBounds();
+
+ if (clipBounds.contains(r)) {
+ blitrect(blitter, r);
+ } else {
+ SkIRect rr = r;
+ if (rr.intersect(clipBounds)) {
+ blitrect(blitter, rr);
+ }
+ }
+ } else {
+ SkRegion::Cliperator cliper(*clip, r);
+ const SkIRect& rr = cliper.rect();
+
+ while (!cliper.done()) {
+ blitrect(blitter, rr);
+ cliper.next();
+ }
+ }
+ } else {
+ blitrect(blitter, r);
+ }
+ }
+}
+
+void SkScan::FillXRect(const SkXRect& xr, const SkRegion* clip,
+ SkBlitter* blitter) {
+ SkIRect r;
+
+ XRect_round(xr, &r);
+ SkScan::FillIRect(r, clip, blitter);
+}
+
+void SkScan::FillRect(const SkRect& r, const SkRegion* clip,
+ SkBlitter* blitter) {
+ SkIRect ir;
+
+ r.round(&ir);
+ SkScan::FillIRect(ir, clip, blitter);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkScan::FillIRect(const SkIRect& r, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isEmpty() || r.isEmpty()) {
+ return;
+ }
+
+ if (clip.isBW()) {
+ FillIRect(r, &clip.bwRgn(), blitter);
+ return;
+ }
+
+ SkAAClipBlitterWrapper wrapper(clip, blitter);
+ FillIRect(r, &wrapper.getRgn(), wrapper.getBlitter());
+}
+
+void SkScan::FillXRect(const SkXRect& xr, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isEmpty() || xr.isEmpty()) {
+ return;
+ }
+
+ if (clip.isBW()) {
+ FillXRect(xr, &clip.bwRgn(), blitter);
+ return;
+ }
+
+ SkAAClipBlitterWrapper wrapper(clip, blitter);
+ FillXRect(xr, &wrapper.getRgn(), wrapper.getBlitter());
+}
+
+void SkScan::FillRect(const SkRect& r, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isEmpty() || r.isEmpty()) {
+ return;
+ }
+
+ if (clip.isBW()) {
+ FillRect(r, &clip.bwRgn(), blitter);
+ return;
+ }
+
+ SkAAClipBlitterWrapper wrapper(clip, blitter);
+ FillRect(r, &wrapper.getRgn(), wrapper.getBlitter());
+}
diff --git a/gfx/skia/skia/src/core/SkScan.h b/gfx/skia/skia/src/core/SkScan.h
new file mode 100644
index 000000000..4aa8e4439
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkScan_DEFINED
+#define SkScan_DEFINED
+
+#include "SkFixed.h"
+#include "SkRect.h"
+
+class SkRasterClip;
+class SkRegion;
+class SkBlitter;
+class SkPath;
+
+/** Defines a fixed-point rectangle, identical to the integer SkIRect, but its
+ coordinates are treated as SkFixed rather than int32_t.
+*/
+typedef SkIRect SkXRect;
+
+class SkScan {
+public:
+ /*
+ * Draws count-1 line segments, one at a time:
+ * line(pts[0], pts[1])
+ * line(pts[1], pts[2])
+ * line(......, pts[count - 1])
+ */
+ typedef void (*HairRgnProc)(const SkPoint[], int count, const SkRegion*, SkBlitter*);
+ typedef void (*HairRCProc)(const SkPoint[], int count, const SkRasterClip&, SkBlitter*);
+
+ static void FillPath(const SkPath&, const SkIRect&, SkBlitter*);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // rasterclip
+
+ static void FillIRect(const SkIRect&, const SkRasterClip&, SkBlitter*);
+ static void FillXRect(const SkXRect&, const SkRasterClip&, SkBlitter*);
+ static void FillRect(const SkRect&, const SkRasterClip&, SkBlitter*);
+ static void AntiFillRect(const SkRect&, const SkRasterClip&, SkBlitter*);
+ static void AntiFillXRect(const SkXRect&, const SkRasterClip&, SkBlitter*);
+ static void FillPath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void AntiFillPath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void FrameRect(const SkRect&, const SkPoint& strokeSize,
+ const SkRasterClip&, SkBlitter*);
+ static void AntiFrameRect(const SkRect&, const SkPoint& strokeSize,
+ const SkRasterClip&, SkBlitter*);
+ static void FillTriangle(const SkPoint pts[], const SkRasterClip&, SkBlitter*);
+ static void HairLine(const SkPoint[], int count, const SkRasterClip&, SkBlitter*);
+ static void AntiHairLine(const SkPoint[], int count, const SkRasterClip&, SkBlitter*);
+ static void HairRect(const SkRect&, const SkRasterClip&, SkBlitter*);
+ static void AntiHairRect(const SkRect&, const SkRasterClip&, SkBlitter*);
+ static void HairPath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void AntiHairPath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void HairSquarePath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void AntiHairSquarePath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void HairRoundPath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void AntiHairRoundPath(const SkPath&, const SkRasterClip&, SkBlitter*);
+
+private:
+ friend class SkAAClip;
+ friend class SkRegion;
+
+ static void FillIRect(const SkIRect&, const SkRegion* clip, SkBlitter*);
+ static void FillXRect(const SkXRect&, const SkRegion* clip, SkBlitter*);
+ static void FillRect(const SkRect&, const SkRegion* clip, SkBlitter*);
+ static void AntiFillRect(const SkRect&, const SkRegion* clip, SkBlitter*);
+ static void AntiFillXRect(const SkXRect&, const SkRegion*, SkBlitter*);
+ static void FillPath(const SkPath&, const SkRegion& clip, SkBlitter*);
+ static void AntiFillPath(const SkPath&, const SkRegion& clip, SkBlitter*,
+ bool forceRLE = false);
+ static void FillTriangle(const SkPoint pts[], const SkRegion*, SkBlitter*);
+
+ static void AntiFrameRect(const SkRect&, const SkPoint& strokeSize,
+ const SkRegion*, SkBlitter*);
+ static void HairLineRgn(const SkPoint[], int count, const SkRegion*, SkBlitter*);
+ static void AntiHairLineRgn(const SkPoint[], int count, const SkRegion*, SkBlitter*);
+};
+
+/** Assign an SkXRect from a SkIRect, by promoting the src rect's coordinates
+ from int to SkFixed. Does not check for overflow if the src coordinates
+ exceed 32K
+*/
+static inline void XRect_set(SkXRect* xr, const SkIRect& src) {
+ xr->fLeft = SkIntToFixed(src.fLeft);
+ xr->fTop = SkIntToFixed(src.fTop);
+ xr->fRight = SkIntToFixed(src.fRight);
+ xr->fBottom = SkIntToFixed(src.fBottom);
+}
+
+/** Assign an SkXRect from a SkRect, by promoting the src rect's coordinates
+ from SkScalar to SkFixed. Does not check for overflow if the src coordinates
+ exceed 32K
+*/
+static inline void XRect_set(SkXRect* xr, const SkRect& src) {
+ xr->fLeft = SkScalarToFixed(src.fLeft);
+ xr->fTop = SkScalarToFixed(src.fTop);
+ xr->fRight = SkScalarToFixed(src.fRight);
+ xr->fBottom = SkScalarToFixed(src.fBottom);
+}
+
+/** Round the SkXRect coordinates, and store the result in the SkIRect.
+*/
+static inline void XRect_round(const SkXRect& xr, SkIRect* dst) {
+ dst->fLeft = SkFixedRoundToInt(xr.fLeft);
+ dst->fTop = SkFixedRoundToInt(xr.fTop);
+ dst->fRight = SkFixedRoundToInt(xr.fRight);
+ dst->fBottom = SkFixedRoundToInt(xr.fBottom);
+}
+
+/** Round the SkXRect coordinates out (i.e. use floor for left/top, and ceiling
+ for right/bottom), and store the result in the SkIRect.
+*/
+static inline void XRect_roundOut(const SkXRect& xr, SkIRect* dst) {
+ dst->fLeft = SkFixedFloorToInt(xr.fLeft);
+ dst->fTop = SkFixedFloorToInt(xr.fTop);
+ dst->fRight = SkFixedCeilToInt(xr.fRight);
+ dst->fBottom = SkFixedCeilToInt(xr.fBottom);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkScanPriv.h b/gfx/skia/skia/src/core/SkScanPriv.h
new file mode 100644
index 000000000..798cae6d0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScanPriv.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkScanPriv_DEFINED
+#define SkScanPriv_DEFINED
+
+#include "SkScan.h"
+#include "SkBlitter.h"
+
+class SkScanClipper {
+public:
+ SkScanClipper(SkBlitter* blitter, const SkRegion* clip, const SkIRect& bounds,
+ bool skipRejectTest = false);
+
+ SkBlitter* getBlitter() const { return fBlitter; }
+ const SkIRect* getClipRect() const { return fClipRect; }
+
+private:
+ SkRectClipBlitter fRectBlitter;
+ SkRgnClipBlitter fRgnBlitter;
+ SkBlitter* fBlitter;
+ const SkIRect* fClipRect;
+};
+
+// clipRect == null means path is entirely inside the clip
+void sk_fill_path(const SkPath& path, const SkIRect* clipRect,
+ SkBlitter* blitter, int start_y, int stop_y, int shiftEdgesUp,
+ const SkRegion& clipRgn);
+
+// blit the rects above and below avoid, clipped to clip
+void sk_blit_above(SkBlitter*, const SkIRect& avoid, const SkRegion& clip);
+void sk_blit_below(SkBlitter*, const SkIRect& avoid, const SkRegion& clip);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkScan_AntiPath.cpp b/gfx/skia/skia/src/core/SkScan_AntiPath.cpp
new file mode 100644
index 000000000..b41a99c89
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan_AntiPath.cpp
@@ -0,0 +1,767 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkScanPriv.h"
+#include "SkPath.h"
+#include "SkMatrix.h"
+#include "SkBlitter.h"
+#include "SkRegion.h"
+#include "SkAntiRun.h"
+
+#define SHIFT 2
+#define SCALE (1 << SHIFT)
+#define MASK (SCALE - 1)
+
+/** @file
+ We have two techniques for capturing the output of the supersampler:
+ - SUPERMASK, which records a large mask-bitmap
+ this is often faster for small, complex objects
+ - RLE, which records a rle-encoded scanline
+ this is often faster for large objects with big spans
+
+ These blitters use two coordinate systems:
+ - destination coordinates, scale equal to the output - often
+ abbreviated with 'i' or 'I' in variable names
+ - supersampled coordinates, scale equal to the output * SCALE
+ */
+
+//#define FORCE_SUPERMASK
+//#define FORCE_RLE
+
+///////////////////////////////////////////////////////////////////////////////
+
+/// Base class for a single-pass supersampled blitter.
+class BaseSuperBlitter : public SkBlitter {
+public:
+ BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
+ const SkRegion& clip, bool isInverse);
+
+ /// Must be explicitly defined on subclasses.
+ virtual void blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) override {
+ SkDEBUGFAIL("How did I get here?");
+ }
+ /// May not be called on BaseSuperBlitter because it blits out of order.
+ void blitV(int x, int y, int height, SkAlpha alpha) override {
+ SkDEBUGFAIL("How did I get here?");
+ }
+
+protected:
+ SkBlitter* fRealBlitter;
+ /// Current y coordinate, in destination coordinates.
+ int fCurrIY;
+ /// Widest row of region to be blitted, in destination coordinates.
+ int fWidth;
+ /// Leftmost x coordinate in any row, in destination coordinates.
+ int fLeft;
+ /// Leftmost x coordinate in any row, in supersampled coordinates.
+ int fSuperLeft;
+
+ SkDEBUGCODE(int fCurrX;)
+ /// Current y coordinate in supersampled coordinates.
+ int fCurrY;
+ /// Initial y coordinate (top of bounds).
+ int fTop;
+
+ SkIRect fSectBounds;
+};
+
+BaseSuperBlitter::BaseSuperBlitter(SkBlitter* realBlit, const SkIRect& ir, const SkRegion& clip,
+ bool isInverse) {
+ fRealBlitter = realBlit;
+
+ SkIRect sectBounds;
+ if (isInverse) {
+ // We use the clip bounds instead of the ir, since we may be asked to
+ //draw outside of the rect when we're a inverse filltype
+ sectBounds = clip.getBounds();
+ } else {
+ if (!sectBounds.intersect(ir, clip.getBounds())) {
+ sectBounds.setEmpty();
+ }
+ }
+
+ const int left = sectBounds.left();
+ const int right = sectBounds.right();
+
+ fLeft = left;
+ fSuperLeft = SkLeftShift(left, SHIFT);
+ fWidth = right - left;
+ fTop = sectBounds.top();
+ fCurrIY = fTop - 1;
+ fCurrY = SkLeftShift(fTop, SHIFT) - 1;
+
+ SkDEBUGCODE(fCurrX = -1;)
+}
+
+/// Run-length-encoded supersampling antialiased blitter.
+class SuperBlitter : public BaseSuperBlitter {
+public:
+ SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion& clip, bool isInverse);
+
+ virtual ~SuperBlitter() {
+ this->flush();
+ }
+
+ /// Once fRuns contains a complete supersampled row, flush() blits
+ /// it out through the wrapped blitter.
+ void flush();
+
+ /// Blits a row of pixels, with location and width specified
+ /// in supersampled coordinates.
+ void blitH(int x, int y, int width) override;
+ /// Blits a rectangle of pixels, with location and size specified
+ /// in supersampled coordinates.
+ void blitRect(int x, int y, int width, int height) override;
+
+private:
+ // The next three variables are used to track a circular buffer that
+ // contains the values used in SkAlphaRuns. These variables should only
+ // ever be updated in advanceRuns(), and fRuns should always point to
+ // a valid SkAlphaRuns...
+ int fRunsToBuffer;
+ void* fRunsBuffer;
+ int fCurrentRun;
+ SkAlphaRuns fRuns;
+
+ // extra one to store the zero at the end
+ int getRunsSz() const { return (fWidth + 1 + (fWidth + 2)/2) * sizeof(int16_t); }
+
+ // This function updates the fRuns variable to point to the next buffer space
+ // with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun
+ // and resets fRuns to point to an empty scanline.
+ void advanceRuns() {
+ const size_t kRunsSz = this->getRunsSz();
+ fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer;
+ fRuns.fRuns = reinterpret_cast<int16_t*>(
+ reinterpret_cast<uint8_t*>(fRunsBuffer) + fCurrentRun * kRunsSz);
+ fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1);
+ fRuns.reset(fWidth);
+ }
+
+ int fOffsetX;
+};
+
+SuperBlitter::SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion& clip,
+ bool isInverse)
+ : BaseSuperBlitter(realBlitter, ir, clip, isInverse)
+{
+ fRunsToBuffer = realBlitter->requestRowsPreserved();
+ fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz());
+ fCurrentRun = -1;
+
+ this->advanceRuns();
+
+ fOffsetX = 0;
+}
+
+void SuperBlitter::flush() {
+ if (fCurrIY >= fTop) {
+
+ SkASSERT(fCurrentRun < fRunsToBuffer);
+ if (!fRuns.empty()) {
+ // SkDEBUGCODE(fRuns.dump();)
+ fRealBlitter->blitAntiH(fLeft, fCurrIY, fRuns.fAlpha, fRuns.fRuns);
+ this->advanceRuns();
+ fOffsetX = 0;
+ }
+
+ fCurrIY = fTop - 1;
+ SkDEBUGCODE(fCurrX = -1;)
+ }
+}
+
+/** coverage_to_partial_alpha() is being used by SkAlphaRuns, which
+ *accumulates* SCALE pixels worth of "alpha" in [0,(256/SCALE)]
+ to produce a final value in [0, 255] and handles clamping 256->255
+ itself, with the same (alpha - (alpha >> 8)) correction as
+ coverage_to_exact_alpha().
+*/
+static inline int coverage_to_partial_alpha(int aa) {
+ aa <<= 8 - 2*SHIFT;
+ return aa;
+}
+
+/** coverage_to_exact_alpha() is being used by our blitter, which wants
+ a final value in [0, 255].
+*/
+static inline int coverage_to_exact_alpha(int aa) {
+ int alpha = (256 >> SHIFT) * aa;
+ // clamp 256->255
+ return alpha - (alpha >> 8);
+}
+
+void SuperBlitter::blitH(int x, int y, int width) {
+ SkASSERT(width > 0);
+
+ int iy = y >> SHIFT;
+ SkASSERT(iy >= fCurrIY);
+
+ x -= fSuperLeft;
+ // hack, until I figure out why my cubics (I think) go beyond the bounds
+ if (x < 0) {
+ width += x;
+ x = 0;
+ }
+
+#ifdef SK_DEBUG
+ SkASSERT(y != fCurrY || x >= fCurrX);
+#endif
+ SkASSERT(y >= fCurrY);
+ if (fCurrY != y) {
+ fOffsetX = 0;
+ fCurrY = y;
+ }
+
+ if (iy != fCurrIY) { // new scanline
+ this->flush();
+ fCurrIY = iy;
+ }
+
+ int start = x;
+ int stop = x + width;
+
+ SkASSERT(start >= 0 && stop > start);
+ // integer-pixel-aligned ends of blit, rounded out
+ int fb = start & MASK;
+ int fe = stop & MASK;
+ int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
+
+ if (n < 0) {
+ fb = fe - fb;
+ n = 0;
+ fe = 0;
+ } else {
+ if (fb == 0) {
+ n += 1;
+ } else {
+ fb = SCALE - fb;
+ }
+ }
+
+ fOffsetX = fRuns.add(x >> SHIFT, coverage_to_partial_alpha(fb),
+ n, coverage_to_partial_alpha(fe),
+ (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT),
+ fOffsetX);
+
+#ifdef SK_DEBUG
+ fRuns.assertValid(y & MASK, (1 << (8 - SHIFT)));
+ fCurrX = x + width;
+#endif
+}
+
+#if 0 // UNUSED
+static void set_left_rite_runs(SkAlphaRuns& runs, int ileft, U8CPU leftA,
+ int n, U8CPU riteA) {
+ SkASSERT(leftA <= 0xFF);
+ SkASSERT(riteA <= 0xFF);
+
+ int16_t* run = runs.fRuns;
+ uint8_t* aa = runs.fAlpha;
+
+ if (ileft > 0) {
+ run[0] = ileft;
+ aa[0] = 0;
+ run += ileft;
+ aa += ileft;
+ }
+
+ SkASSERT(leftA < 0xFF);
+ if (leftA > 0) {
+ *run++ = 1;
+ *aa++ = leftA;
+ }
+
+ if (n > 0) {
+ run[0] = n;
+ aa[0] = 0xFF;
+ run += n;
+ aa += n;
+ }
+
+ SkASSERT(riteA < 0xFF);
+ if (riteA > 0) {
+ *run++ = 1;
+ *aa++ = riteA;
+ }
+ run[0] = 0;
+}
+#endif
+
+void SuperBlitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(width > 0);
+ SkASSERT(height > 0);
+
+ // blit leading rows
+ while ((y & MASK)) {
+ this->blitH(x, y++, width);
+ if (--height <= 0) {
+ return;
+ }
+ }
+ SkASSERT(height > 0);
+
+ // Since this is a rect, instead of blitting supersampled rows one at a
+ // time and then resolving to the destination canvas, we can blit
+ // directly to the destintion canvas one row per SCALE supersampled rows.
+ int start_y = y >> SHIFT;
+ int stop_y = (y + height) >> SHIFT;
+ int count = stop_y - start_y;
+ if (count > 0) {
+ y += count << SHIFT;
+ height -= count << SHIFT;
+
+ // save original X for our tail blitH() loop at the bottom
+ int origX = x;
+
+ x -= fSuperLeft;
+ // hack, until I figure out why my cubics (I think) go beyond the bounds
+ if (x < 0) {
+ width += x;
+ x = 0;
+ }
+
+ // There is always a left column, a middle, and a right column.
+ // ileft is the destination x of the first pixel of the entire rect.
+ // xleft is (SCALE - # of covered supersampled pixels) in that
+ // destination pixel.
+ int ileft = x >> SHIFT;
+ int xleft = x & MASK;
+ // irite is the destination x of the last pixel of the OPAQUE section.
+ // xrite is the number of supersampled pixels extending beyond irite;
+ // xrite/SCALE should give us alpha.
+ int irite = (x + width) >> SHIFT;
+ int xrite = (x + width) & MASK;
+ if (!xrite) {
+ xrite = SCALE;
+ irite--;
+ }
+
+ // Need to call flush() to clean up pending draws before we
+ // even consider blitV(), since otherwise it can look nonmonotonic.
+ SkASSERT(start_y > fCurrIY);
+ this->flush();
+
+ int n = irite - ileft - 1;
+ if (n < 0) {
+ // If n < 0, we'll only have a single partially-transparent column
+ // of pixels to render.
+ xleft = xrite - xleft;
+ SkASSERT(xleft <= SCALE);
+ SkASSERT(xleft > 0);
+ fRealBlitter->blitV(ileft + fLeft, start_y, count,
+ coverage_to_exact_alpha(xleft));
+ } else {
+ // With n = 0, we have two possibly-transparent columns of pixels
+ // to render; with n > 0, we have opaque columns between them.
+
+ xleft = SCALE - xleft;
+
+ // Using coverage_to_exact_alpha is not consistent with blitH()
+ const int coverageL = coverage_to_exact_alpha(xleft);
+ const int coverageR = coverage_to_exact_alpha(xrite);
+
+ SkASSERT(coverageL > 0 || n > 0 || coverageR > 0);
+ SkASSERT((coverageL != 0) + n + (coverageR != 0) <= fWidth);
+
+ fRealBlitter->blitAntiRect(ileft + fLeft, start_y, n, count,
+ coverageL, coverageR);
+ }
+
+ // preamble for our next call to blitH()
+ fCurrIY = stop_y - 1;
+ fOffsetX = 0;
+ fCurrY = y - 1;
+ fRuns.reset(fWidth);
+ x = origX;
+ }
+
+ // catch any remaining few rows
+ SkASSERT(height <= MASK);
+ while (--height >= 0) {
+ this->blitH(x, y++, width);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/// Masked supersampling antialiased blitter.
+class MaskSuperBlitter : public BaseSuperBlitter {
+public:
+ MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion&, bool isInverse);
+ virtual ~MaskSuperBlitter() {
+ fRealBlitter->blitMask(fMask, fClipRect);
+ }
+
+ void blitH(int x, int y, int width) override;
+
+ static bool CanHandleRect(const SkIRect& bounds) {
+#ifdef FORCE_RLE
+ return false;
+#endif
+ int width = bounds.width();
+ int64_t rb = SkAlign4(width);
+ // use 64bits to detect overflow
+ int64_t storage = rb * bounds.height();
+
+ return (width <= MaskSuperBlitter::kMAX_WIDTH) &&
+ (storage <= MaskSuperBlitter::kMAX_STORAGE);
+ }
+
+private:
+ enum {
+#ifdef FORCE_SUPERMASK
+ kMAX_WIDTH = 2048,
+ kMAX_STORAGE = 1024 * 1024 * 2
+#else
+ kMAX_WIDTH = 32, // so we don't try to do very wide things, where the RLE blitter would be faster
+ kMAX_STORAGE = 1024
+#endif
+ };
+
+ SkMask fMask;
+ SkIRect fClipRect;
+ // we add 1 because add_aa_span can write (unchanged) 1 extra byte at the end, rather than
+ // perform a test to see if stopAlpha != 0
+ uint32_t fStorage[(kMAX_STORAGE >> 2) + 1];
+};
+
+MaskSuperBlitter::MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion& clip,
+ bool isInverse)
+ : BaseSuperBlitter(realBlitter, ir, clip, isInverse)
+{
+ SkASSERT(CanHandleRect(ir));
+ SkASSERT(!isInverse);
+
+ fMask.fImage = (uint8_t*)fStorage;
+ fMask.fBounds = ir;
+ fMask.fRowBytes = ir.width();
+ fMask.fFormat = SkMask::kA8_Format;
+
+ fClipRect = ir;
+ if (!fClipRect.intersect(clip.getBounds())) {
+ SkASSERT(0);
+ fClipRect.setEmpty();
+ }
+
+ // For valgrind, write 1 extra byte at the end so we don't read
+ // uninitialized memory. See comment in add_aa_span and fStorage[].
+ memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 1);
+}
+
+static void add_aa_span(uint8_t* alpha, U8CPU startAlpha) {
+ /* I should be able to just add alpha[x] + startAlpha.
+ However, if the trailing edge of the previous span and the leading
+ edge of the current span round to the same super-sampled x value,
+ I might overflow to 256 with this add, hence the funny subtract.
+ */
+ unsigned tmp = *alpha + startAlpha;
+ SkASSERT(tmp <= 256);
+ *alpha = SkToU8(tmp - (tmp >> 8));
+}
+
+static inline uint32_t quadplicate_byte(U8CPU value) {
+ uint32_t pair = (value << 8) | value;
+ return (pair << 16) | pair;
+}
+
+// Perform this tricky subtract, to avoid overflowing to 256. Our caller should
+// only ever call us with at most enough to hit 256 (never larger), so it is
+// enough to just subtract the high-bit. Actually clamping with a branch would
+// be slower (e.g. if (tmp > 255) tmp = 255;)
+//
+static inline void saturated_add(uint8_t* ptr, U8CPU add) {
+ unsigned tmp = *ptr + add;
+ SkASSERT(tmp <= 256);
+ *ptr = SkToU8(tmp - (tmp >> 8));
+}
+
+// minimum count before we want to setup an inner loop, adding 4-at-a-time
+#define MIN_COUNT_FOR_QUAD_LOOP 16
+
+static void add_aa_span(uint8_t* alpha, U8CPU startAlpha, int middleCount,
+ U8CPU stopAlpha, U8CPU maxValue) {
+ SkASSERT(middleCount >= 0);
+
+ saturated_add(alpha, startAlpha);
+ alpha += 1;
+
+ if (middleCount >= MIN_COUNT_FOR_QUAD_LOOP) {
+ // loop until we're quad-byte aligned
+ while (SkTCast<intptr_t>(alpha) & 0x3) {
+ alpha[0] = SkToU8(alpha[0] + maxValue);
+ alpha += 1;
+ middleCount -= 1;
+ }
+
+ int bigCount = middleCount >> 2;
+ uint32_t* qptr = reinterpret_cast<uint32_t*>(alpha);
+ uint32_t qval = quadplicate_byte(maxValue);
+ do {
+ *qptr++ += qval;
+ } while (--bigCount > 0);
+
+ middleCount &= 3;
+ alpha = reinterpret_cast<uint8_t*> (qptr);
+ // fall through to the following while-loop
+ }
+
+ while (--middleCount >= 0) {
+ alpha[0] = SkToU8(alpha[0] + maxValue);
+ alpha += 1;
+ }
+
+ // potentially this can be off the end of our "legal" alpha values, but that
+ // only happens if stopAlpha is also 0. Rather than test for stopAlpha != 0
+ // every time (slow), we just do it, and ensure that we've allocated extra space
+ // (see the + 1 comment in fStorage[]
+ saturated_add(alpha, stopAlpha);
+}
+
+void MaskSuperBlitter::blitH(int x, int y, int width) {
+ int iy = (y >> SHIFT);
+
+ SkASSERT(iy >= fMask.fBounds.fTop && iy < fMask.fBounds.fBottom);
+ iy -= fMask.fBounds.fTop; // make it relative to 0
+
+ // This should never happen, but it does. Until the true cause is
+ // discovered, let's skip this span instead of crashing.
+ // See http://crbug.com/17569.
+ if (iy < 0) {
+ return;
+ }
+
+#ifdef SK_DEBUG
+ {
+ int ix = x >> SHIFT;
+ SkASSERT(ix >= fMask.fBounds.fLeft && ix < fMask.fBounds.fRight);
+ }
+#endif
+
+ x -= SkLeftShift(fMask.fBounds.fLeft, SHIFT);
+
+ // hack, until I figure out why my cubics (I think) go beyond the bounds
+ if (x < 0) {
+ width += x;
+ x = 0;
+ }
+
+ uint8_t* row = fMask.fImage + iy * fMask.fRowBytes + (x >> SHIFT);
+
+ int start = x;
+ int stop = x + width;
+
+ SkASSERT(start >= 0 && stop > start);
+ int fb = start & MASK;
+ int fe = stop & MASK;
+ int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
+
+
+ if (n < 0) {
+ SkASSERT(row >= fMask.fImage);
+ SkASSERT(row < fMask.fImage + kMAX_STORAGE + 1);
+ add_aa_span(row, coverage_to_partial_alpha(fe - fb));
+ } else {
+ fb = SCALE - fb;
+ SkASSERT(row >= fMask.fImage);
+ SkASSERT(row + n + 1 < fMask.fImage + kMAX_STORAGE + 1);
+ add_aa_span(row, coverage_to_partial_alpha(fb),
+ n, coverage_to_partial_alpha(fe),
+ (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT));
+ }
+
+#ifdef SK_DEBUG
+ fCurrX = x + width;
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool fitsInsideLimit(const SkRect& r, SkScalar max) {
+ const SkScalar min = -max;
+ return r.fLeft > min && r.fTop > min &&
+ r.fRight < max && r.fBottom < max;
+}
+
+static int overflows_short_shift(int value, int shift) {
+ const int s = 16 + shift;
+ return (SkLeftShift(value, s) >> s) - value;
+}
+
+/**
+ Would any of the coordinates of this rectangle not fit in a short,
+ when left-shifted by shift?
+*/
+static int rect_overflows_short_shift(SkIRect rect, int shift) {
+ SkASSERT(!overflows_short_shift(8191, SHIFT));
+ SkASSERT(overflows_short_shift(8192, SHIFT));
+ SkASSERT(!overflows_short_shift(32767, 0));
+ SkASSERT(overflows_short_shift(32768, 0));
+
+ // Since we expect these to succeed, we bit-or together
+ // for a tiny extra bit of speed.
+ return overflows_short_shift(rect.fLeft, SHIFT) |
+ overflows_short_shift(rect.fRight, SHIFT) |
+ overflows_short_shift(rect.fTop, SHIFT) |
+ overflows_short_shift(rect.fBottom, SHIFT);
+}
+
+static bool safeRoundOut(const SkRect& src, SkIRect* dst, int32_t maxInt) {
+ const SkScalar maxScalar = SkIntToScalar(maxInt);
+
+ if (fitsInsideLimit(src, maxScalar)) {
+ src.roundOut(dst);
+ return true;
+ }
+ return false;
+}
+
+void SkScan::AntiFillPath(const SkPath& path, const SkRegion& origClip,
+ SkBlitter* blitter, bool forceRLE) {
+ if (origClip.isEmpty()) {
+ return;
+ }
+
+ const bool isInverse = path.isInverseFillType();
+ SkIRect ir;
+
+ if (!safeRoundOut(path.getBounds(), &ir, SK_MaxS32 >> SHIFT)) {
+#if 0
+ const SkRect& r = path.getBounds();
+ SkDebugf("--- bounds can't fit in SkIRect\n", r.fLeft, r.fTop, r.fRight, r.fBottom);
+#endif
+ return;
+ }
+ if (ir.isEmpty()) {
+ if (isInverse) {
+ blitter->blitRegion(origClip);
+ }
+ return;
+ }
+
+ // If the intersection of the path bounds and the clip bounds
+ // will overflow 32767 when << by SHIFT, we can't supersample,
+ // so draw without antialiasing.
+ SkIRect clippedIR;
+ if (isInverse) {
+ // If the path is an inverse fill, it's going to fill the entire
+ // clip, and we care whether the entire clip exceeds our limits.
+ clippedIR = origClip.getBounds();
+ } else {
+ if (!clippedIR.intersect(ir, origClip.getBounds())) {
+ return;
+ }
+ }
+ if (rect_overflows_short_shift(clippedIR, SHIFT)) {
+ SkScan::FillPath(path, origClip, blitter);
+ return;
+ }
+
+ // Our antialiasing can't handle a clip larger than 32767, so we restrict
+ // the clip to that limit here. (the runs[] uses int16_t for its index).
+ //
+ // A more general solution (one that could also eliminate the need to
+ // disable aa based on ir bounds (see overflows_short_shift) would be
+ // to tile the clip/target...
+ SkRegion tmpClipStorage;
+ const SkRegion* clipRgn = &origClip;
+ {
+ static const int32_t kMaxClipCoord = 32767;
+ const SkIRect& bounds = origClip.getBounds();
+ if (bounds.fRight > kMaxClipCoord || bounds.fBottom > kMaxClipCoord) {
+ SkIRect limit = { 0, 0, kMaxClipCoord, kMaxClipCoord };
+ tmpClipStorage.op(origClip, limit, SkRegion::kIntersect_Op);
+ clipRgn = &tmpClipStorage;
+ }
+ }
+ // for here down, use clipRgn, not origClip
+
+ SkScanClipper clipper(blitter, clipRgn, ir);
+ const SkIRect* clipRect = clipper.getClipRect();
+
+ if (clipper.getBlitter() == nullptr) { // clipped out
+ if (isInverse) {
+ blitter->blitRegion(*clipRgn);
+ }
+ return;
+ }
+
+ // now use the (possibly wrapped) blitter
+ blitter = clipper.getBlitter();
+
+ if (isInverse) {
+ sk_blit_above(blitter, ir, *clipRgn);
+ }
+
+ SkIRect superRect, *superClipRect = nullptr;
+
+ if (clipRect) {
+ superRect.set(SkLeftShift(clipRect->fLeft, SHIFT),
+ SkLeftShift(clipRect->fTop, SHIFT),
+ SkLeftShift(clipRect->fRight, SHIFT),
+ SkLeftShift(clipRect->fBottom, SHIFT));
+ superClipRect = &superRect;
+ }
+
+ SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
+
+ // MaskSuperBlitter can't handle drawing outside of ir, so we can't use it
+ // if we're an inverse filltype
+ if (!isInverse && MaskSuperBlitter::CanHandleRect(ir) && !forceRLE) {
+ MaskSuperBlitter superBlit(blitter, ir, *clipRgn, isInverse);
+ SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
+ sk_fill_path(path, superClipRect, &superBlit, ir.fTop, ir.fBottom, SHIFT, *clipRgn);
+ } else {
+ SuperBlitter superBlit(blitter, ir, *clipRgn, isInverse);
+ sk_fill_path(path, superClipRect, &superBlit, ir.fTop, ir.fBottom, SHIFT, *clipRgn);
+ }
+
+ if (isInverse) {
+ sk_blit_below(blitter, ir, *clipRgn);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkRasterClip.h"
+
+void SkScan::FillPath(const SkPath& path, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isEmpty()) {
+ return;
+ }
+
+ if (clip.isBW()) {
+ FillPath(path, clip.bwRgn(), blitter);
+ } else {
+ SkRegion tmp;
+ SkAAClipBlitter aaBlitter;
+
+ tmp.setRect(clip.getBounds());
+ aaBlitter.init(blitter, &clip.aaRgn());
+ SkScan::FillPath(path, tmp, &aaBlitter);
+ }
+}
+
+void SkScan::AntiFillPath(const SkPath& path, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isEmpty()) {
+ return;
+ }
+
+ if (clip.isBW()) {
+ AntiFillPath(path, clip.bwRgn(), blitter);
+ } else {
+ SkRegion tmp;
+ SkAAClipBlitter aaBlitter;
+
+ tmp.setRect(clip.getBounds());
+ aaBlitter.init(blitter, &clip.aaRgn());
+ SkScan::AntiFillPath(path, tmp, &aaBlitter, true);
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkScan_Antihair.cpp b/gfx/skia/skia/src/core/SkScan_Antihair.cpp
new file mode 100644
index 000000000..b3770eb64
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan_Antihair.cpp
@@ -0,0 +1,1006 @@
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkScan.h"
+#include "SkBlitter.h"
+#include "SkColorPriv.h"
+#include "SkLineClipper.h"
+#include "SkRasterClip.h"
+#include "SkFDot6.h"
+
+/* Our attempt to compute the worst case "bounds" for the horizontal and
+ vertical cases has some numerical bug in it, and we sometimes undervalue
+ our extends. The bug is that when this happens, we will set the clip to
+ nullptr (for speed), and thus draw outside of the clip by a pixel, which might
+ only look bad, but it might also access memory outside of the valid range
+ allcoated for the device bitmap.
+
+ This define enables our fix to outset our "bounds" by 1, thus avoiding the
+ chance of the bug, but at the cost of sometimes taking the rectblitter
+ case (i.e. not setting the clip to nullptr) when we might not actually need
+ to. If we can improve/fix the actual calculations, then we can remove this
+ step.
+ */
+#define OUTSET_BEFORE_CLIP_TEST true
+
+#define HLINE_STACK_BUFFER 100
+
+static inline int SmallDot6Scale(int value, int dot6) {
+ SkASSERT((int16_t)value == value);
+ SkASSERT((unsigned)dot6 <= 64);
+ return (value * dot6) >> 6;
+}
+
+//#define TEST_GAMMA
+
+#ifdef TEST_GAMMA
+ static uint8_t gGammaTable[256];
+ #define ApplyGamma(table, alpha) (table)[alpha]
+
+ static void build_gamma_table() {
+ static bool gInit = false;
+
+ if (gInit == false) {
+ for (int i = 0; i < 256; i++) {
+ SkFixed n = i * 257;
+ n += n >> 15;
+ SkASSERT(n >= 0 && n <= SK_Fixed1);
+ n = SkFixedSqrt(n);
+ n = n * 255 >> 16;
+ // SkDebugf("morph %d -> %d\n", i, n);
+ gGammaTable[i] = SkToU8(n);
+ }
+ gInit = true;
+ }
+ }
+#else
+ #define ApplyGamma(table, alpha) SkToU8(alpha)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void call_hline_blitter(SkBlitter* blitter, int x, int y, int count,
+ U8CPU alpha) {
+ SkASSERT(count > 0);
+
+ int16_t runs[HLINE_STACK_BUFFER + 1];
+ uint8_t aa[HLINE_STACK_BUFFER];
+
+ aa[0] = ApplyGamma(gGammaTable, alpha);
+ do {
+ int n = count;
+ if (n > HLINE_STACK_BUFFER) {
+ n = HLINE_STACK_BUFFER;
+ }
+ runs[0] = SkToS16(n);
+ runs[n] = 0;
+ blitter->blitAntiH(x, y, aa, runs);
+ x += n;
+ count -= n;
+ } while (count > 0);
+}
+
+class SkAntiHairBlitter {
+public:
+ SkAntiHairBlitter() : fBlitter(nullptr) {}
+ virtual ~SkAntiHairBlitter() {}
+
+ SkBlitter* getBlitter() const { return fBlitter; }
+
+ void setup(SkBlitter* blitter) {
+ fBlitter = blitter;
+ }
+
+ virtual SkFixed drawCap(int x, SkFixed fy, SkFixed slope, int mod64) = 0;
+ virtual SkFixed drawLine(int x, int stopx, SkFixed fy, SkFixed slope) = 0;
+
+private:
+ SkBlitter* fBlitter;
+};
+
+class HLine_SkAntiHairBlitter : public SkAntiHairBlitter {
+public:
+ SkFixed drawCap(int x, SkFixed fy, SkFixed slope, int mod64) override {
+ fy += SK_Fixed1/2;
+
+ int y = fy >> 16;
+ uint8_t a = (uint8_t)(fy >> 8);
+
+ // lower line
+ unsigned ma = SmallDot6Scale(a, mod64);
+ if (ma) {
+ call_hline_blitter(this->getBlitter(), x, y, 1, ma);
+ }
+
+ // upper line
+ ma = SmallDot6Scale(255 - a, mod64);
+ if (ma) {
+ call_hline_blitter(this->getBlitter(), x, y - 1, 1, ma);
+ }
+
+ return fy - SK_Fixed1/2;
+ }
+
+ virtual SkFixed drawLine(int x, int stopx, SkFixed fy,
+ SkFixed slope) override {
+ SkASSERT(x < stopx);
+ int count = stopx - x;
+ fy += SK_Fixed1/2;
+
+ int y = fy >> 16;
+ uint8_t a = (uint8_t)(fy >> 8);
+
+ // lower line
+ if (a) {
+ call_hline_blitter(this->getBlitter(), x, y, count, a);
+ }
+
+ // upper line
+ a = 255 - a;
+ if (a) {
+ call_hline_blitter(this->getBlitter(), x, y - 1, count, a);
+ }
+
+ return fy - SK_Fixed1/2;
+ }
+};
+
+class Horish_SkAntiHairBlitter : public SkAntiHairBlitter {
+public:
+ SkFixed drawCap(int x, SkFixed fy, SkFixed dy, int mod64) override {
+ fy += SK_Fixed1/2;
+
+ int lower_y = fy >> 16;
+ uint8_t a = (uint8_t)(fy >> 8);
+ unsigned a0 = SmallDot6Scale(255 - a, mod64);
+ unsigned a1 = SmallDot6Scale(a, mod64);
+ this->getBlitter()->blitAntiV2(x, lower_y - 1, a0, a1);
+
+ return fy + dy - SK_Fixed1/2;
+ }
+
+ SkFixed drawLine(int x, int stopx, SkFixed fy, SkFixed dy) override {
+ SkASSERT(x < stopx);
+
+ fy += SK_Fixed1/2;
+ SkBlitter* blitter = this->getBlitter();
+ do {
+ int lower_y = fy >> 16;
+ uint8_t a = (uint8_t)(fy >> 8);
+ blitter->blitAntiV2(x, lower_y - 1, 255 - a, a);
+ fy += dy;
+ } while (++x < stopx);
+
+ return fy - SK_Fixed1/2;
+ }
+};
+
+class VLine_SkAntiHairBlitter : public SkAntiHairBlitter {
+public:
+ SkFixed drawCap(int y, SkFixed fx, SkFixed dx, int mod64) override {
+ SkASSERT(0 == dx);
+ fx += SK_Fixed1/2;
+
+ int x = fx >> 16;
+ int a = (uint8_t)(fx >> 8);
+
+ unsigned ma = SmallDot6Scale(a, mod64);
+ if (ma) {
+ this->getBlitter()->blitV(x, y, 1, ma);
+ }
+ ma = SmallDot6Scale(255 - a, mod64);
+ if (ma) {
+ this->getBlitter()->blitV(x - 1, y, 1, ma);
+ }
+
+ return fx - SK_Fixed1/2;
+ }
+
+ SkFixed drawLine(int y, int stopy, SkFixed fx, SkFixed dx) override {
+ SkASSERT(y < stopy);
+ SkASSERT(0 == dx);
+ fx += SK_Fixed1/2;
+
+ int x = fx >> 16;
+ int a = (uint8_t)(fx >> 8);
+
+ if (a) {
+ this->getBlitter()->blitV(x, y, stopy - y, a);
+ }
+ a = 255 - a;
+ if (a) {
+ this->getBlitter()->blitV(x - 1, y, stopy - y, a);
+ }
+
+ return fx - SK_Fixed1/2;
+ }
+};
+
+class Vertish_SkAntiHairBlitter : public SkAntiHairBlitter {
+public:
+ SkFixed drawCap(int y, SkFixed fx, SkFixed dx, int mod64) override {
+ fx += SK_Fixed1/2;
+
+ int x = fx >> 16;
+ uint8_t a = (uint8_t)(fx >> 8);
+ this->getBlitter()->blitAntiH2(x - 1, y,
+ SmallDot6Scale(255 - a, mod64), SmallDot6Scale(a, mod64));
+
+ return fx + dx - SK_Fixed1/2;
+ }
+
+ SkFixed drawLine(int y, int stopy, SkFixed fx, SkFixed dx) override {
+ SkASSERT(y < stopy);
+ fx += SK_Fixed1/2;
+ do {
+ int x = fx >> 16;
+ uint8_t a = (uint8_t)(fx >> 8);
+ this->getBlitter()->blitAntiH2(x - 1, y, 255 - a, a);
+ fx += dx;
+ } while (++y < stopy);
+
+ return fx - SK_Fixed1/2;
+ }
+};
+
+static inline SkFixed fastfixdiv(SkFDot6 a, SkFDot6 b) {
+ SkASSERT((SkLeftShift(a, 16) >> 16) == a);
+ SkASSERT(b != 0);
+ return SkLeftShift(a, 16) / b;
+}
+
+#define SkBITCOUNT(x) (sizeof(x) << 3)
+
+#if 1
+// returns high-bit set iff x==0x8000...
+static inline int bad_int(int x) {
+ return x & -x;
+}
+
+static int any_bad_ints(int a, int b, int c, int d) {
+ return (bad_int(a) | bad_int(b) | bad_int(c) | bad_int(d)) >> (SkBITCOUNT(int) - 1);
+}
+#else
+static inline int good_int(int x) {
+ return x ^ (1 << (SkBITCOUNT(x) - 1));
+}
+
+static int any_bad_ints(int a, int b, int c, int d) {
+ return !(good_int(a) & good_int(b) & good_int(c) & good_int(d));
+}
+#endif
+
+#ifdef SK_DEBUG
+static bool canConvertFDot6ToFixed(SkFDot6 x) {
+ const int maxDot6 = SK_MaxS32 >> (16 - 6);
+ return SkAbs32(x) <= maxDot6;
+}
+#endif
+
+/*
+ * We want the fractional part of ordinate, but we want multiples of 64 to
+ * return 64, not 0, so we can't just say (ordinate & 63).
+ * We basically want to compute those bits, and if they're 0, return 64.
+ * We can do that w/o a branch with an extra sub and add.
+ */
+static int contribution_64(SkFDot6 ordinate) {
+#if 0
+ int result = ordinate & 63;
+ if (0 == result) {
+ result = 64;
+ }
+#else
+ int result = ((ordinate - 1) & 63) + 1;
+#endif
+ SkASSERT(result > 0 && result <= 64);
+ return result;
+}
+
+static void do_anti_hairline(SkFDot6 x0, SkFDot6 y0, SkFDot6 x1, SkFDot6 y1,
+ const SkIRect* clip, SkBlitter* blitter) {
+ // check for integer NaN (0x80000000) which we can't handle (can't negate it)
+ // It appears typically from a huge float (inf or nan) being converted to int.
+ // If we see it, just don't draw.
+ if (any_bad_ints(x0, y0, x1, y1)) {
+ return;
+ }
+
+ // The caller must clip the line to [-32767.0 ... 32767.0] ahead of time
+ // (in dot6 format)
+ SkASSERT(canConvertFDot6ToFixed(x0));
+ SkASSERT(canConvertFDot6ToFixed(y0));
+ SkASSERT(canConvertFDot6ToFixed(x1));
+ SkASSERT(canConvertFDot6ToFixed(y1));
+
+ if (SkAbs32(x1 - x0) > SkIntToFDot6(511) || SkAbs32(y1 - y0) > SkIntToFDot6(511)) {
+ /* instead of (x0 + x1) >> 1, we shift each separately. This is less
+ precise, but avoids overflowing the intermediate result if the
+ values are huge. A better fix might be to clip the original pts
+ directly (i.e. do the divide), so we don't spend time subdividing
+ huge lines at all.
+ */
+ int hx = (x0 >> 1) + (x1 >> 1);
+ int hy = (y0 >> 1) + (y1 >> 1);
+ do_anti_hairline(x0, y0, hx, hy, clip, blitter);
+ do_anti_hairline(hx, hy, x1, y1, clip, blitter);
+ return;
+ }
+
+ int scaleStart, scaleStop;
+ int istart, istop;
+ SkFixed fstart, slope;
+
+ HLine_SkAntiHairBlitter hline_blitter;
+ Horish_SkAntiHairBlitter horish_blitter;
+ VLine_SkAntiHairBlitter vline_blitter;
+ Vertish_SkAntiHairBlitter vertish_blitter;
+ SkAntiHairBlitter* hairBlitter = nullptr;
+
+ if (SkAbs32(x1 - x0) > SkAbs32(y1 - y0)) { // mostly horizontal
+ if (x0 > x1) { // we want to go left-to-right
+ SkTSwap<SkFDot6>(x0, x1);
+ SkTSwap<SkFDot6>(y0, y1);
+ }
+
+ istart = SkFDot6Floor(x0);
+ istop = SkFDot6Ceil(x1);
+ fstart = SkFDot6ToFixed(y0);
+ if (y0 == y1) { // completely horizontal, take fast case
+ slope = 0;
+ hairBlitter = &hline_blitter;
+ } else {
+ slope = fastfixdiv(y1 - y0, x1 - x0);
+ SkASSERT(slope >= -SK_Fixed1 && slope <= SK_Fixed1);
+ fstart += (slope * (32 - (x0 & 63)) + 32) >> 6;
+ hairBlitter = &horish_blitter;
+ }
+
+ SkASSERT(istop > istart);
+ if (istop - istart == 1) {
+ // we are within a single pixel
+ scaleStart = x1 - x0;
+ SkASSERT(scaleStart >= 0 && scaleStart <= 64);
+ scaleStop = 0;
+ } else {
+ scaleStart = 64 - (x0 & 63);
+ scaleStop = x1 & 63;
+ }
+
+ if (clip){
+ if (istart >= clip->fRight || istop <= clip->fLeft) {
+ return;
+ }
+ if (istart < clip->fLeft) {
+ fstart += slope * (clip->fLeft - istart);
+ istart = clip->fLeft;
+ scaleStart = 64;
+ if (istop - istart == 1) {
+ // we are within a single pixel
+ scaleStart = contribution_64(x1);
+ scaleStop = 0;
+ }
+ }
+ if (istop > clip->fRight) {
+ istop = clip->fRight;
+ scaleStop = 0; // so we don't draw this last column
+ }
+
+ SkASSERT(istart <= istop);
+ if (istart == istop) {
+ return;
+ }
+ // now test if our Y values are completely inside the clip
+ int top, bottom;
+ if (slope >= 0) { // T2B
+ top = SkFixedFloorToInt(fstart - SK_FixedHalf);
+ bottom = SkFixedCeilToInt(fstart + (istop - istart - 1) * slope + SK_FixedHalf);
+ } else { // B2T
+ bottom = SkFixedCeilToInt(fstart + SK_FixedHalf);
+ top = SkFixedFloorToInt(fstart + (istop - istart - 1) * slope - SK_FixedHalf);
+ }
+#ifdef OUTSET_BEFORE_CLIP_TEST
+ top -= 1;
+ bottom += 1;
+#endif
+ if (top >= clip->fBottom || bottom <= clip->fTop) {
+ return;
+ }
+ if (clip->fTop <= top && clip->fBottom >= bottom) {
+ clip = nullptr;
+ }
+ }
+ } else { // mostly vertical
+ if (y0 > y1) { // we want to go top-to-bottom
+ SkTSwap<SkFDot6>(x0, x1);
+ SkTSwap<SkFDot6>(y0, y1);
+ }
+
+ istart = SkFDot6Floor(y0);
+ istop = SkFDot6Ceil(y1);
+ fstart = SkFDot6ToFixed(x0);
+ if (x0 == x1) {
+ if (y0 == y1) { // are we zero length?
+ return; // nothing to do
+ }
+ slope = 0;
+ hairBlitter = &vline_blitter;
+ } else {
+ slope = fastfixdiv(x1 - x0, y1 - y0);
+ SkASSERT(slope <= SK_Fixed1 && slope >= -SK_Fixed1);
+ fstart += (slope * (32 - (y0 & 63)) + 32) >> 6;
+ hairBlitter = &vertish_blitter;
+ }
+
+ SkASSERT(istop > istart);
+ if (istop - istart == 1) {
+ // we are within a single pixel
+ scaleStart = y1 - y0;
+ SkASSERT(scaleStart >= 0 && scaleStart <= 64);
+ scaleStop = 0;
+ } else {
+ scaleStart = 64 - (y0 & 63);
+ scaleStop = y1 & 63;
+ }
+
+ if (clip) {
+ if (istart >= clip->fBottom || istop <= clip->fTop) {
+ return;
+ }
+ if (istart < clip->fTop) {
+ fstart += slope * (clip->fTop - istart);
+ istart = clip->fTop;
+ scaleStart = 64;
+ if (istop - istart == 1) {
+ // we are within a single pixel
+ scaleStart = contribution_64(y1);
+ scaleStop = 0;
+ }
+ }
+ if (istop > clip->fBottom) {
+ istop = clip->fBottom;
+ scaleStop = 0; // so we don't draw this last row
+ }
+
+ SkASSERT(istart <= istop);
+ if (istart == istop)
+ return;
+
+ // now test if our X values are completely inside the clip
+ int left, right;
+ if (slope >= 0) { // L2R
+ left = SkFixedFloorToInt(fstart - SK_FixedHalf);
+ right = SkFixedCeilToInt(fstart + (istop - istart - 1) * slope + SK_FixedHalf);
+ } else { // R2L
+ right = SkFixedCeilToInt(fstart + SK_FixedHalf);
+ left = SkFixedFloorToInt(fstart + (istop - istart - 1) * slope - SK_FixedHalf);
+ }
+#ifdef OUTSET_BEFORE_CLIP_TEST
+ left -= 1;
+ right += 1;
+#endif
+ if (left >= clip->fRight || right <= clip->fLeft) {
+ return;
+ }
+ if (clip->fLeft <= left && clip->fRight >= right) {
+ clip = nullptr;
+ }
+ }
+ }
+
+ SkRectClipBlitter rectClipper;
+ if (clip) {
+ rectClipper.init(blitter, *clip);
+ blitter = &rectClipper;
+ }
+
+ SkASSERT(hairBlitter);
+ hairBlitter->setup(blitter);
+
+#ifdef SK_DEBUG
+ if (scaleStart > 0 && scaleStop > 0) {
+ // be sure we don't draw twice in the same pixel
+ SkASSERT(istart < istop - 1);
+ }
+#endif
+
+ fstart = hairBlitter->drawCap(istart, fstart, slope, scaleStart);
+ istart += 1;
+ int fullSpans = istop - istart - (scaleStop > 0);
+ if (fullSpans > 0) {
+ fstart = hairBlitter->drawLine(istart, istart + fullSpans, fstart, slope);
+ }
+ if (scaleStop > 0) {
+ hairBlitter->drawCap(istop - 1, fstart, slope, scaleStop);
+ }
+}
+
+void SkScan::AntiHairLineRgn(const SkPoint array[], int arrayCount, const SkRegion* clip,
+ SkBlitter* blitter) {
+ if (clip && clip->isEmpty()) {
+ return;
+ }
+
+ SkASSERT(clip == nullptr || !clip->getBounds().isEmpty());
+
+#ifdef TEST_GAMMA
+ build_gamma_table();
+#endif
+
+ const SkScalar max = SkIntToScalar(32767);
+ const SkRect fixedBounds = SkRect::MakeLTRB(-max, -max, max, max);
+
+ SkRect clipBounds;
+ if (clip) {
+ clipBounds.set(clip->getBounds());
+ /* We perform integral clipping later on, but we do a scalar clip first
+ to ensure that our coordinates are expressible in fixed/integers.
+
+ antialiased hairlines can draw up to 1/2 of a pixel outside of
+ their bounds, so we need to outset the clip before calling the
+ clipper. To make the numerics safer, we outset by a whole pixel,
+ since the 1/2 pixel boundary is important to the antihair blitter,
+ we don't want to risk numerical fate by chopping on that edge.
+ */
+ clipBounds.outset(SK_Scalar1, SK_Scalar1);
+ }
+
+ for (int i = 0; i < arrayCount - 1; ++i) {
+ SkPoint pts[2];
+
+ // We have to pre-clip the line to fit in a SkFixed, so we just chop
+ // the line. TODO find a way to actually draw beyond that range.
+ if (!SkLineClipper::IntersectLine(&array[i], fixedBounds, pts)) {
+ continue;
+ }
+
+ if (clip && !SkLineClipper::IntersectLine(pts, clipBounds, pts)) {
+ continue;
+ }
+
+ SkFDot6 x0 = SkScalarToFDot6(pts[0].fX);
+ SkFDot6 y0 = SkScalarToFDot6(pts[0].fY);
+ SkFDot6 x1 = SkScalarToFDot6(pts[1].fX);
+ SkFDot6 y1 = SkScalarToFDot6(pts[1].fY);
+
+ if (clip) {
+ SkFDot6 left = SkMin32(x0, x1);
+ SkFDot6 top = SkMin32(y0, y1);
+ SkFDot6 right = SkMax32(x0, x1);
+ SkFDot6 bottom = SkMax32(y0, y1);
+ SkIRect ir;
+
+ ir.set( SkFDot6Floor(left) - 1,
+ SkFDot6Floor(top) - 1,
+ SkFDot6Ceil(right) + 1,
+ SkFDot6Ceil(bottom) + 1);
+
+ if (clip->quickReject(ir)) {
+ continue;
+ }
+ if (!clip->quickContains(ir)) {
+ SkRegion::Cliperator iter(*clip, ir);
+ const SkIRect* r = &iter.rect();
+
+ while (!iter.done()) {
+ do_anti_hairline(x0, y0, x1, y1, r, blitter);
+ iter.next();
+ }
+ continue;
+ }
+ // fall through to no-clip case
+ }
+ do_anti_hairline(x0, y0, x1, y1, nullptr, blitter);
+ }
+}
+
+void SkScan::AntiHairRect(const SkRect& rect, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ SkPoint pts[5];
+
+ pts[0].set(rect.fLeft, rect.fTop);
+ pts[1].set(rect.fRight, rect.fTop);
+ pts[2].set(rect.fRight, rect.fBottom);
+ pts[3].set(rect.fLeft, rect.fBottom);
+ pts[4] = pts[0];
+ SkScan::AntiHairLine(pts, 5, clip, blitter);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef int FDot8; // 24.8 integer fixed point
+
+static inline FDot8 SkFixedToFDot8(SkFixed x) {
+ return (x + 0x80) >> 8;
+}
+
+static void do_scanline(FDot8 L, int top, FDot8 R, U8CPU alpha,
+ SkBlitter* blitter) {
+ SkASSERT(L < R);
+
+ if ((L >> 8) == ((R - 1) >> 8)) { // 1x1 pixel
+ blitter->blitV(L >> 8, top, 1, SkAlphaMul(alpha, R - L));
+ return;
+ }
+
+ int left = L >> 8;
+
+ if (L & 0xFF) {
+ blitter->blitV(left, top, 1, SkAlphaMul(alpha, 256 - (L & 0xFF)));
+ left += 1;
+ }
+
+ int rite = R >> 8;
+ int width = rite - left;
+ if (width > 0) {
+ call_hline_blitter(blitter, left, top, width, alpha);
+ }
+ if (R & 0xFF) {
+ blitter->blitV(rite, top, 1, SkAlphaMul(alpha, R & 0xFF));
+ }
+}
+
+static void antifilldot8(FDot8 L, FDot8 T, FDot8 R, FDot8 B, SkBlitter* blitter,
+ bool fillInner) {
+ // check for empty now that we're in our reduced precision space
+ if (L >= R || T >= B) {
+ return;
+ }
+ int top = T >> 8;
+ if (top == ((B - 1) >> 8)) { // just one scanline high
+ do_scanline(L, top, R, B - T - 1, blitter);
+ return;
+ }
+
+ if (T & 0xFF) {
+ do_scanline(L, top, R, 256 - (T & 0xFF), blitter);
+ top += 1;
+ }
+
+ int bot = B >> 8;
+ int height = bot - top;
+ if (height > 0) {
+ int left = L >> 8;
+ if (left == ((R - 1) >> 8)) { // just 1-pixel wide
+ blitter->blitV(left, top, height, R - L - 1);
+ } else {
+ if (L & 0xFF) {
+ blitter->blitV(left, top, height, 256 - (L & 0xFF));
+ left += 1;
+ }
+ int rite = R >> 8;
+ int width = rite - left;
+ if (width > 0 && fillInner) {
+ blitter->blitRect(left, top, width, height);
+ }
+ if (R & 0xFF) {
+ blitter->blitV(rite, top, height, R & 0xFF);
+ }
+ }
+ }
+
+ if (B & 0xFF) {
+ do_scanline(L, bot, R, B & 0xFF, blitter);
+ }
+}
+
+static void antifillrect(const SkXRect& xr, SkBlitter* blitter) {
+ antifilldot8(SkFixedToFDot8(xr.fLeft), SkFixedToFDot8(xr.fTop),
+ SkFixedToFDot8(xr.fRight), SkFixedToFDot8(xr.fBottom),
+ blitter, true);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkScan::AntiFillXRect(const SkXRect& xr, const SkRegion* clip,
+ SkBlitter* blitter) {
+ if (nullptr == clip) {
+ antifillrect(xr, blitter);
+ } else {
+ SkIRect outerBounds;
+ XRect_roundOut(xr, &outerBounds);
+
+ if (clip->isRect()) {
+ const SkIRect& clipBounds = clip->getBounds();
+
+ if (clipBounds.contains(outerBounds)) {
+ antifillrect(xr, blitter);
+ } else {
+ SkXRect tmpR;
+ // this keeps our original edges fractional
+ XRect_set(&tmpR, clipBounds);
+ if (tmpR.intersect(xr)) {
+ antifillrect(tmpR, blitter);
+ }
+ }
+ } else {
+ SkRegion::Cliperator clipper(*clip, outerBounds);
+ const SkIRect& rr = clipper.rect();
+
+ while (!clipper.done()) {
+ SkXRect tmpR;
+
+ // this keeps our original edges fractional
+ XRect_set(&tmpR, rr);
+ if (tmpR.intersect(xr)) {
+ antifillrect(tmpR, blitter);
+ }
+ clipper.next();
+ }
+ }
+ }
+}
+
+void SkScan::AntiFillXRect(const SkXRect& xr, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isBW()) {
+ AntiFillXRect(xr, &clip.bwRgn(), blitter);
+ } else {
+ SkIRect outerBounds;
+ XRect_roundOut(xr, &outerBounds);
+
+ if (clip.quickContains(outerBounds)) {
+ AntiFillXRect(xr, nullptr, blitter);
+ } else {
+ SkAAClipBlitterWrapper wrapper(clip, blitter);
+ blitter = wrapper.getBlitter();
+
+ AntiFillXRect(xr, &wrapper.getRgn(), wrapper.getBlitter());
+ }
+ }
+}
+
+/* This guy takes a float-rect, but with the key improvement that it has
+ already been clipped, so we know that it is safe to convert it into a
+ XRect (fixedpoint), as it won't overflow.
+*/
+static void antifillrect(const SkRect& r, SkBlitter* blitter) {
+ SkXRect xr;
+
+ XRect_set(&xr, r);
+ antifillrect(xr, blitter);
+}
+
+/* We repeat the clipping logic of AntiFillXRect because the float rect might
+ overflow if we blindly converted it to an XRect. This sucks that we have to
+ repeat the clipping logic, but I don't see how to share the code/logic.
+
+ We clip r (as needed) into one or more (smaller) float rects, and then pass
+ those to our version of antifillrect, which converts it into an XRect and
+ then calls the blit.
+*/
+void SkScan::AntiFillRect(const SkRect& origR, const SkRegion* clip,
+ SkBlitter* blitter) {
+ if (clip) {
+ SkRect newR;
+ newR.set(clip->getBounds());
+ if (!newR.intersect(origR)) {
+ return;
+ }
+
+ const SkIRect outerBounds = newR.roundOut();
+
+ if (clip->isRect()) {
+ antifillrect(newR, blitter);
+ } else {
+ SkRegion::Cliperator clipper(*clip, outerBounds);
+ while (!clipper.done()) {
+ newR.set(clipper.rect());
+ if (newR.intersect(origR)) {
+ antifillrect(newR, blitter);
+ }
+ clipper.next();
+ }
+ }
+ } else {
+ antifillrect(origR, blitter);
+ }
+}
+
+void SkScan::AntiFillRect(const SkRect& r, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isBW()) {
+ AntiFillRect(r, &clip.bwRgn(), blitter);
+ } else {
+ SkAAClipBlitterWrapper wrap(clip, blitter);
+ AntiFillRect(r, &wrap.getRgn(), wrap.getBlitter());
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define SkAlphaMulRound(a, b) SkMulDiv255Round(a, b)
+
+// calls blitRect() if the rectangle is non-empty
+static void fillcheckrect(int L, int T, int R, int B, SkBlitter* blitter) {
+ if (L < R && T < B) {
+ blitter->blitRect(L, T, R - L, B - T);
+ }
+}
+
+static inline FDot8 SkScalarToFDot8(SkScalar x) {
+ return (int)(x * 256);
+}
+
+static inline int FDot8Floor(FDot8 x) {
+ return x >> 8;
+}
+
+static inline int FDot8Ceil(FDot8 x) {
+ return (x + 0xFF) >> 8;
+}
+
+// 1 - (1 - a)*(1 - b)
+static inline U8CPU InvAlphaMul(U8CPU a, U8CPU b) {
+ // need precise rounding (not just SkAlphaMul) so that values like
+ // a=228, b=252 don't overflow the result
+ return SkToU8(a + b - SkAlphaMulRound(a, b));
+}
+
+static void inner_scanline(FDot8 L, int top, FDot8 R, U8CPU alpha,
+ SkBlitter* blitter) {
+ SkASSERT(L < R);
+
+ if ((L >> 8) == ((R - 1) >> 8)) { // 1x1 pixel
+ FDot8 widClamp = R - L;
+ // border case clamp 256 to 255 instead of going through call_hline_blitter
+ // see skbug/4406
+ widClamp = widClamp - (widClamp >> 8);
+ blitter->blitV(L >> 8, top, 1, InvAlphaMul(alpha, widClamp));
+ return;
+ }
+
+ int left = L >> 8;
+ if (L & 0xFF) {
+ blitter->blitV(left, top, 1, InvAlphaMul(alpha, L & 0xFF));
+ left += 1;
+ }
+
+ int rite = R >> 8;
+ int width = rite - left;
+ if (width > 0) {
+ call_hline_blitter(blitter, left, top, width, alpha);
+ }
+
+ if (R & 0xFF) {
+ blitter->blitV(rite, top, 1, InvAlphaMul(alpha, ~R & 0xFF));
+ }
+}
+
+static void innerstrokedot8(FDot8 L, FDot8 T, FDot8 R, FDot8 B,
+ SkBlitter* blitter) {
+ SkASSERT(L < R && T < B);
+
+ int top = T >> 8;
+ if (top == ((B - 1) >> 8)) { // just one scanline high
+ // We want the inverse of B-T, since we're the inner-stroke
+ int alpha = 256 - (B - T);
+ if (alpha) {
+ inner_scanline(L, top, R, alpha, blitter);
+ }
+ return;
+ }
+
+ if (T & 0xFF) {
+ inner_scanline(L, top, R, T & 0xFF, blitter);
+ top += 1;
+ }
+
+ int bot = B >> 8;
+ int height = bot - top;
+ if (height > 0) {
+ if (L & 0xFF) {
+ blitter->blitV(L >> 8, top, height, L & 0xFF);
+ }
+ if (R & 0xFF) {
+ blitter->blitV(R >> 8, top, height, ~R & 0xFF);
+ }
+ }
+
+ if (B & 0xFF) {
+ inner_scanline(L, bot, R, ~B & 0xFF, blitter);
+ }
+}
+
+static inline void align_thin_stroke(FDot8& edge1, FDot8& edge2) {
+ SkASSERT(edge1 <= edge2);
+
+ if (FDot8Floor(edge1) == FDot8Floor(edge2)) {
+ edge2 -= (edge1 & 0xFF);
+ edge1 &= ~0xFF;
+ }
+}
+
+void SkScan::AntiFrameRect(const SkRect& r, const SkPoint& strokeSize,
+ const SkRegion* clip, SkBlitter* blitter) {
+ SkASSERT(strokeSize.fX >= 0 && strokeSize.fY >= 0);
+
+ SkScalar rx = SkScalarHalf(strokeSize.fX);
+ SkScalar ry = SkScalarHalf(strokeSize.fY);
+
+ // outset by the radius
+ FDot8 outerL = SkScalarToFDot8(r.fLeft - rx);
+ FDot8 outerT = SkScalarToFDot8(r.fTop - ry);
+ FDot8 outerR = SkScalarToFDot8(r.fRight + rx);
+ FDot8 outerB = SkScalarToFDot8(r.fBottom + ry);
+
+ SkIRect outer;
+ // set outer to the outer rect of the outer section
+ outer.set(FDot8Floor(outerL), FDot8Floor(outerT), FDot8Ceil(outerR), FDot8Ceil(outerB));
+
+ SkBlitterClipper clipper;
+ if (clip) {
+ if (clip->quickReject(outer)) {
+ return;
+ }
+ if (!clip->contains(outer)) {
+ blitter = clipper.apply(blitter, clip, &outer);
+ }
+ // now we can ignore clip for the rest of the function
+ }
+
+ // in case we lost a bit with diameter/2
+ rx = strokeSize.fX - rx;
+ ry = strokeSize.fY - ry;
+
+ // inset by the radius
+ FDot8 innerL = SkScalarToFDot8(r.fLeft + rx);
+ FDot8 innerT = SkScalarToFDot8(r.fTop + ry);
+ FDot8 innerR = SkScalarToFDot8(r.fRight - rx);
+ FDot8 innerB = SkScalarToFDot8(r.fBottom - ry);
+
+ // For sub-unit strokes, tweak the hulls such that one of the edges coincides with the pixel
+ // edge. This ensures that the general rect stroking logic below
+ // a) doesn't blit the same scanline twice
+ // b) computes the correct coverage when both edges fall within the same pixel
+ if (strokeSize.fX < 1 || strokeSize.fY < 1) {
+ align_thin_stroke(outerL, innerL);
+ align_thin_stroke(outerT, innerT);
+ align_thin_stroke(innerR, outerR);
+ align_thin_stroke(innerB, outerB);
+ }
+
+ // stroke the outer hull
+ antifilldot8(outerL, outerT, outerR, outerB, blitter, false);
+
+ // set outer to the outer rect of the middle section
+ outer.set(FDot8Ceil(outerL), FDot8Ceil(outerT), FDot8Floor(outerR), FDot8Floor(outerB));
+
+ if (innerL >= innerR || innerT >= innerB) {
+ fillcheckrect(outer.fLeft, outer.fTop, outer.fRight, outer.fBottom,
+ blitter);
+ } else {
+ SkIRect inner;
+ // set inner to the inner rect of the middle section
+ inner.set(FDot8Floor(innerL), FDot8Floor(innerT), FDot8Ceil(innerR), FDot8Ceil(innerB));
+
+ // draw the frame in 4 pieces
+ fillcheckrect(outer.fLeft, outer.fTop, outer.fRight, inner.fTop,
+ blitter);
+ fillcheckrect(outer.fLeft, inner.fTop, inner.fLeft, inner.fBottom,
+ blitter);
+ fillcheckrect(inner.fRight, inner.fTop, outer.fRight, inner.fBottom,
+ blitter);
+ fillcheckrect(outer.fLeft, inner.fBottom, outer.fRight, outer.fBottom,
+ blitter);
+
+ // now stroke the inner rect, which is similar to antifilldot8() except that
+ // it treats the fractional coordinates with the inverse bias (since its
+ // inner).
+ innerstrokedot8(innerL, innerT, innerR, innerB, blitter);
+ }
+}
+
+void SkScan::AntiFrameRect(const SkRect& r, const SkPoint& strokeSize,
+ const SkRasterClip& clip, SkBlitter* blitter) {
+ if (clip.isBW()) {
+ AntiFrameRect(r, strokeSize, &clip.bwRgn(), blitter);
+ } else {
+ SkAAClipBlitterWrapper wrap(clip, blitter);
+ AntiFrameRect(r, strokeSize, &wrap.getRgn(), wrap.getBlitter());
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkScan_Hairline.cpp b/gfx/skia/skia/src/core/SkScan_Hairline.cpp
new file mode 100644
index 000000000..ce3fe2b60
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan_Hairline.cpp
@@ -0,0 +1,705 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkScan.h"
+#include "SkBlitter.h"
+#include "SkMathPriv.h"
+#include "SkRasterClip.h"
+#include "SkFDot6.h"
+#include "SkLineClipper.h"
+
+static void horiline(int x, int stopx, SkFixed fy, SkFixed dy,
+ SkBlitter* blitter) {
+ SkASSERT(x < stopx);
+
+ do {
+ blitter->blitH(x, fy >> 16, 1);
+ fy += dy;
+ } while (++x < stopx);
+}
+
+static void vertline(int y, int stopy, SkFixed fx, SkFixed dx,
+ SkBlitter* blitter) {
+ SkASSERT(y < stopy);
+
+ do {
+ blitter->blitH(fx >> 16, y, 1);
+ fx += dx;
+ } while (++y < stopy);
+}
+
+#ifdef SK_DEBUG
+static bool canConvertFDot6ToFixed(SkFDot6 x) {
+ const int maxDot6 = SK_MaxS32 >> (16 - 6);
+ return SkAbs32(x) <= maxDot6;
+}
+#endif
+
+void SkScan::HairLineRgn(const SkPoint array[], int arrayCount, const SkRegion* clip,
+ SkBlitter* origBlitter) {
+ SkBlitterClipper clipper;
+ SkIRect clipR, ptsR;
+
+ const SkScalar max = SkIntToScalar(32767);
+ const SkRect fixedBounds = SkRect::MakeLTRB(-max, -max, max, max);
+
+ SkRect clipBounds;
+ if (clip) {
+ clipBounds.set(clip->getBounds());
+ }
+
+ for (int i = 0; i < arrayCount - 1; ++i) {
+ SkBlitter* blitter = origBlitter;
+
+ SkPoint pts[2];
+
+ // We have to pre-clip the line to fit in a SkFixed, so we just chop
+ // the line. TODO find a way to actually draw beyond that range.
+ if (!SkLineClipper::IntersectLine(&array[i], fixedBounds, pts)) {
+ continue;
+ }
+
+ // Perform a clip in scalar space, so we catch huge values which might
+ // be missed after we convert to SkFDot6 (overflow)
+ if (clip && !SkLineClipper::IntersectLine(pts, clipBounds, pts)) {
+ continue;
+ }
+
+ SkFDot6 x0 = SkScalarToFDot6(pts[0].fX);
+ SkFDot6 y0 = SkScalarToFDot6(pts[0].fY);
+ SkFDot6 x1 = SkScalarToFDot6(pts[1].fX);
+ SkFDot6 y1 = SkScalarToFDot6(pts[1].fY);
+
+ SkASSERT(canConvertFDot6ToFixed(x0));
+ SkASSERT(canConvertFDot6ToFixed(y0));
+ SkASSERT(canConvertFDot6ToFixed(x1));
+ SkASSERT(canConvertFDot6ToFixed(y1));
+
+ if (clip) {
+ // now perform clipping again, as the rounding to dot6 can wiggle us
+ // our rects are really dot6 rects, but since we've already used
+ // lineclipper, we know they will fit in 32bits (26.6)
+ const SkIRect& bounds = clip->getBounds();
+
+ clipR.set(SkIntToFDot6(bounds.fLeft), SkIntToFDot6(bounds.fTop),
+ SkIntToFDot6(bounds.fRight), SkIntToFDot6(bounds.fBottom));
+ ptsR.set(x0, y0, x1, y1);
+ ptsR.sort();
+
+ // outset the right and bottom, to account for how hairlines are
+ // actually drawn, which may hit the pixel to the right or below of
+ // the coordinate
+ ptsR.fRight += SK_FDot6One;
+ ptsR.fBottom += SK_FDot6One;
+
+ if (!SkIRect::Intersects(ptsR, clipR)) {
+ continue;
+ }
+ if (!clip->isRect() || !clipR.contains(ptsR)) {
+ blitter = clipper.apply(origBlitter, clip);
+ }
+ }
+
+ SkFDot6 dx = x1 - x0;
+ SkFDot6 dy = y1 - y0;
+
+ if (SkAbs32(dx) > SkAbs32(dy)) { // mostly horizontal
+ if (x0 > x1) { // we want to go left-to-right
+ SkTSwap<SkFDot6>(x0, x1);
+ SkTSwap<SkFDot6>(y0, y1);
+ }
+ int ix0 = SkFDot6Round(x0);
+ int ix1 = SkFDot6Round(x1);
+ if (ix0 == ix1) {// too short to draw
+ continue;
+ }
+
+ SkFixed slope = SkFixedDiv(dy, dx);
+ SkFixed startY = SkFDot6ToFixed(y0) + (slope * ((32 - x0) & 63) >> 6);
+
+ horiline(ix0, ix1, startY, slope, blitter);
+ } else { // mostly vertical
+ if (y0 > y1) { // we want to go top-to-bottom
+ SkTSwap<SkFDot6>(x0, x1);
+ SkTSwap<SkFDot6>(y0, y1);
+ }
+ int iy0 = SkFDot6Round(y0);
+ int iy1 = SkFDot6Round(y1);
+ if (iy0 == iy1) { // too short to draw
+ continue;
+ }
+
+ SkFixed slope = SkFixedDiv(dx, dy);
+ SkFixed startX = SkFDot6ToFixed(x0) + (slope * ((32 - y0) & 63) >> 6);
+
+ vertline(iy0, iy1, startX, slope, blitter);
+ }
+ }
+}
+
+// we don't just draw 4 lines, 'cause that can leave a gap in the bottom-right
+// and double-hit the top-left.
+// TODO: handle huge coordinates on rect (before calling SkScalarToFixed)
+void SkScan::HairRect(const SkRect& rect, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ SkAAClipBlitterWrapper wrapper;
+ SkBlitterClipper clipper;
+ SkIRect r;
+
+ r.set(SkScalarToFixed(rect.fLeft) >> 16,
+ SkScalarToFixed(rect.fTop) >> 16,
+ (SkScalarToFixed(rect.fRight) >> 16) + 1,
+ (SkScalarToFixed(rect.fBottom) >> 16) + 1);
+
+ if (clip.quickReject(r)) {
+ return;
+ }
+ if (!clip.quickContains(r)) {
+ const SkRegion* clipRgn;
+ if (clip.isBW()) {
+ clipRgn = &clip.bwRgn();
+ } else {
+ wrapper.init(clip, blitter);
+ clipRgn = &wrapper.getRgn();
+ blitter = wrapper.getBlitter();
+ }
+ blitter = clipper.apply(blitter, clipRgn);
+ }
+
+ int width = r.width();
+ int height = r.height();
+
+ if ((width | height) == 0) {
+ return;
+ }
+ if (width <= 2 || height <= 2) {
+ blitter->blitRect(r.fLeft, r.fTop, width, height);
+ return;
+ }
+ // if we get here, we know we have 4 segments to draw
+ blitter->blitH(r.fLeft, r.fTop, width); // top
+ blitter->blitRect(r.fLeft, r.fTop + 1, 1, height - 2); // left
+ blitter->blitRect(r.fRight - 1, r.fTop + 1, 1, height - 2); // right
+ blitter->blitH(r.fLeft, r.fBottom - 1, width); // bottom
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkPath.h"
+#include "SkGeometry.h"
+#include "SkNx.h"
+
+#define kMaxCubicSubdivideLevel 9
+#define kMaxQuadSubdivideLevel 5
+
+static int compute_int_quad_dist(const SkPoint pts[3]) {
+ // compute the vector between the control point ([1]) and the middle of the
+ // line connecting the start and end ([0] and [2])
+ SkScalar dx = SkScalarHalf(pts[0].fX + pts[2].fX) - pts[1].fX;
+ SkScalar dy = SkScalarHalf(pts[0].fY + pts[2].fY) - pts[1].fY;
+ // we want everyone to be positive
+ dx = SkScalarAbs(dx);
+ dy = SkScalarAbs(dy);
+ // convert to whole pixel values (use ceiling to be conservative)
+ int idx = SkScalarCeilToInt(dx);
+ int idy = SkScalarCeilToInt(dy);
+ // use the cheap approx for distance
+ if (idx > idy) {
+ return idx + (idy >> 1);
+ } else {
+ return idy + (idx >> 1);
+ }
+}
+
+static void hair_quad(const SkPoint pts[3], const SkRegion* clip,
+ SkBlitter* blitter, int level, SkScan::HairRgnProc lineproc) {
+ SkASSERT(level <= kMaxQuadSubdivideLevel);
+
+ SkQuadCoeff coeff(pts);
+
+ const int lines = 1 << level;
+ Sk2s t(0);
+ Sk2s dt(SK_Scalar1 / lines);
+
+ SkPoint tmp[(1 << kMaxQuadSubdivideLevel) + 1];
+ SkASSERT((unsigned)lines < SK_ARRAY_COUNT(tmp));
+
+ tmp[0] = pts[0];
+ Sk2s A = coeff.fA;
+ Sk2s B = coeff.fB;
+ Sk2s C = coeff.fC;
+ for (int i = 1; i < lines; ++i) {
+ t = t + dt;
+ ((A * t + B) * t + C).store(&tmp[i]);
+ }
+ tmp[lines] = pts[2];
+ lineproc(tmp, lines + 1, clip, blitter);
+}
+
+static SkRect compute_nocheck_quad_bounds(const SkPoint pts[3]) {
+ SkASSERT(SkScalarsAreFinite(&pts[0].fX, 6));
+
+ Sk2s min = Sk2s::Load(pts);
+ Sk2s max = min;
+ for (int i = 1; i < 3; ++i) {
+ Sk2s pair = Sk2s::Load(pts+i);
+ min = Sk2s::Min(min, pair);
+ max = Sk2s::Max(max, pair);
+ }
+ return { min[0], min[1], max[0], max[1] };
+}
+
+static bool is_inverted(const SkRect& r) {
+ return r.fLeft > r.fRight || r.fTop > r.fBottom;
+}
+
+// Can't call SkRect::intersects, since it cares about empty, and we don't (since we tracking
+// something to be stroked, so empty can still draw something (e.g. horizontal line)
+static bool geometric_overlap(const SkRect& a, const SkRect& b) {
+ SkASSERT(!is_inverted(a) && !is_inverted(b));
+ return a.fLeft < b.fRight && b.fLeft < a.fRight &&
+ a.fTop < b.fBottom && b.fTop < a.fBottom;
+}
+
+// Can't call SkRect::contains, since it cares about empty, and we don't (since we tracking
+// something to be stroked, so empty can still draw something (e.g. horizontal line)
+static bool geometric_contains(const SkRect& outer, const SkRect& inner) {
+ SkASSERT(!is_inverted(outer) && !is_inverted(inner));
+ return inner.fRight <= outer.fRight && inner.fLeft >= outer.fLeft &&
+ inner.fBottom <= outer.fBottom && inner.fTop >= outer.fTop;
+}
+
+static inline void hairquad(const SkPoint pts[3], const SkRegion* clip, const SkRect* insetClip, const SkRect* outsetClip,
+ SkBlitter* blitter, int level, SkScan::HairRgnProc lineproc) {
+ if (insetClip) {
+ SkASSERT(outsetClip);
+ SkRect bounds = compute_nocheck_quad_bounds(pts);
+ if (!geometric_overlap(*outsetClip, bounds)) {
+ return;
+ } else if (geometric_contains(*insetClip, bounds)) {
+ clip = nullptr;
+ }
+ }
+
+ hair_quad(pts, clip, blitter, level, lineproc);
+}
+
+static inline Sk2s abs(const Sk2s& value) {
+ return Sk2s::Max(value, Sk2s(0)-value);
+}
+
+static inline SkScalar max_component(const Sk2s& value) {
+ SkScalar components[2];
+ value.store(components);
+ return SkTMax(components[0], components[1]);
+}
+
+static inline int compute_cubic_segs(const SkPoint pts[4]) {
+ Sk2s p0 = from_point(pts[0]);
+ Sk2s p1 = from_point(pts[1]);
+ Sk2s p2 = from_point(pts[2]);
+ Sk2s p3 = from_point(pts[3]);
+
+ const Sk2s oneThird(1.0f / 3.0f);
+ const Sk2s twoThird(2.0f / 3.0f);
+
+ Sk2s p13 = oneThird * p3 + twoThird * p0;
+ Sk2s p23 = oneThird * p0 + twoThird * p3;
+
+ SkScalar diff = max_component(Sk2s::Max(abs(p1 - p13), abs(p2 - p23)));
+ SkScalar tol = SK_Scalar1 / 8;
+
+ for (int i = 0; i < kMaxCubicSubdivideLevel; ++i) {
+ if (diff < tol) {
+ return 1 << i;
+ }
+ tol *= 4;
+ }
+ return 1 << kMaxCubicSubdivideLevel;
+}
+
+static bool lt_90(SkPoint p0, SkPoint pivot, SkPoint p2) {
+ return SkVector::DotProduct(p0 - pivot, p2 - pivot) >= 0;
+}
+
+// The off-curve points are "inside" the limits of the on-curve pts
+static bool quick_cubic_niceness_check(const SkPoint pts[4]) {
+ return lt_90(pts[1], pts[0], pts[3]) &&
+ lt_90(pts[2], pts[0], pts[3]) &&
+ lt_90(pts[1], pts[3], pts[0]) &&
+ lt_90(pts[2], pts[3], pts[0]);
+}
+
+static void hair_cubic(const SkPoint pts[4], const SkRegion* clip, SkBlitter* blitter,
+ SkScan::HairRgnProc lineproc) {
+ const int lines = compute_cubic_segs(pts);
+ SkASSERT(lines > 0);
+ if (1 == lines) {
+ SkPoint tmp[2] = { pts[0], pts[3] };
+ lineproc(tmp, 2, clip, blitter);
+ return;
+ }
+
+ SkCubicCoeff coeff(pts);
+
+ const Sk2s dt(SK_Scalar1 / lines);
+ Sk2s t(0);
+
+ SkPoint tmp[(1 << kMaxCubicSubdivideLevel) + 1];
+ SkASSERT((unsigned)lines < SK_ARRAY_COUNT(tmp));
+
+ tmp[0] = pts[0];
+ Sk2s A = coeff.fA;
+ Sk2s B = coeff.fB;
+ Sk2s C = coeff.fC;
+ Sk2s D = coeff.fD;
+ for (int i = 1; i < lines; ++i) {
+ t = t + dt;
+ (((A * t + B) * t + C) * t + D).store(&tmp[i]);
+ }
+ tmp[lines] = pts[3];
+ lineproc(tmp, lines + 1, clip, blitter);
+}
+
+static SkRect compute_nocheck_cubic_bounds(const SkPoint pts[4]) {
+ SkASSERT(SkScalarsAreFinite(&pts[0].fX, 8));
+
+ Sk2s min = Sk2s::Load(pts);
+ Sk2s max = min;
+ for (int i = 1; i < 4; ++i) {
+ Sk2s pair = Sk2s::Load(pts+i);
+ min = Sk2s::Min(min, pair);
+ max = Sk2s::Max(max, pair);
+ }
+ return { min[0], min[1], max[0], max[1] };
+}
+
+static inline void haircubic(const SkPoint pts[4], const SkRegion* clip, const SkRect* insetClip, const SkRect* outsetClip,
+ SkBlitter* blitter, int level, SkScan::HairRgnProc lineproc) {
+ if (insetClip) {
+ SkASSERT(outsetClip);
+ SkRect bounds = compute_nocheck_cubic_bounds(pts);
+ if (!geometric_overlap(*outsetClip, bounds)) {
+ return;
+ } else if (geometric_contains(*insetClip, bounds)) {
+ clip = nullptr;
+ }
+ }
+
+ if (quick_cubic_niceness_check(pts)) {
+ hair_cubic(pts, clip, blitter, lineproc);
+ } else {
+ SkPoint tmp[13];
+ SkScalar tValues[3];
+
+ int count = SkChopCubicAtMaxCurvature(pts, tmp, tValues);
+ for (int i = 0; i < count; i++) {
+ hair_cubic(&tmp[i * 3], clip, blitter, lineproc);
+ }
+ }
+}
+
+static int compute_quad_level(const SkPoint pts[3]) {
+ int d = compute_int_quad_dist(pts);
+ /* quadratics approach the line connecting their start and end points
+ 4x closer with each subdivision, so we compute the number of
+ subdivisions to be the minimum need to get that distance to be less
+ than a pixel.
+ */
+ int level = (33 - SkCLZ(d)) >> 1;
+ // sanity check on level (from the previous version)
+ if (level > kMaxQuadSubdivideLevel) {
+ level = kMaxQuadSubdivideLevel;
+ }
+ return level;
+}
+
+/* Extend the points in the direction of the starting or ending tangent by 1/2 unit to
+ account for a round or square cap. If there's no distance between the end point and
+ the control point, use the next control point to create a tangent. If the curve
+ is degenerate, move the cap out 1/2 unit horizontally. */
+template <SkPaint::Cap capStyle>
+void extend_pts(SkPath::Verb prevVerb, SkPath::Verb nextVerb, SkPoint* pts, int ptCount) {
+ SkASSERT(SkPaint::kSquare_Cap == capStyle || SkPaint::kRound_Cap == capStyle);
+ // The area of a circle is PI*R*R. For a unit circle, R=1/2, and the cap covers half of that.
+ const SkScalar capOutset = SkPaint::kSquare_Cap == capStyle ? 0.5f : SK_ScalarPI / 8;
+ if (SkPath::kMove_Verb == prevVerb) {
+ SkPoint* first = pts;
+ SkPoint* ctrl = first;
+ int controls = ptCount - 1;
+ SkVector tangent;
+ do {
+ tangent = *first - *++ctrl;
+ } while (tangent.isZero() && --controls > 0);
+ if (tangent.isZero()) {
+ tangent.set(1, 0);
+ controls = ptCount - 1; // If all points are equal, move all but one
+ } else {
+ tangent.normalize();
+ }
+ do { // If the end point and control points are equal, loop to move them in tandem.
+ first->fX += tangent.fX * capOutset;
+ first->fY += tangent.fY * capOutset;
+ ++first;
+ } while (++controls < ptCount);
+ }
+ if (SkPath::kMove_Verb == nextVerb || SkPath::kDone_Verb == nextVerb) {
+ SkPoint* last = &pts[ptCount - 1];
+ SkPoint* ctrl = last;
+ int controls = ptCount - 1;
+ SkVector tangent;
+ do {
+ tangent = *last - *--ctrl;
+ } while (tangent.isZero() && --controls > 0);
+ if (tangent.isZero()) {
+ tangent.set(-1, 0);
+ controls = ptCount - 1;
+ } else {
+ tangent.normalize();
+ }
+ do {
+ last->fX += tangent.fX * capOutset;
+ last->fY += tangent.fY * capOutset;
+ --last;
+ } while (++controls < ptCount);
+ }
+}
+
+template <SkPaint::Cap capStyle>
+void hair_path(const SkPath& path, const SkRasterClip& rclip, SkBlitter* blitter,
+ SkScan::HairRgnProc lineproc) {
+ if (path.isEmpty()) {
+ return;
+ }
+
+ SkAAClipBlitterWrapper wrap;
+ const SkRegion* clip = nullptr;
+ SkRect insetStorage, outsetStorage;
+ const SkRect* insetClip = nullptr;
+ const SkRect* outsetClip = nullptr;
+
+ {
+ const int capOut = SkPaint::kButt_Cap == capStyle ? 1 : 2;
+ const SkIRect ibounds = path.getBounds().roundOut().makeOutset(capOut, capOut);
+ if (rclip.quickReject(ibounds)) {
+ return;
+ }
+ if (!rclip.quickContains(ibounds)) {
+ if (rclip.isBW()) {
+ clip = &rclip.bwRgn();
+ } else {
+ wrap.init(rclip, blitter);
+ blitter = wrap.getBlitter();
+ clip = &wrap.getRgn();
+ }
+
+ /*
+ * We now cache two scalar rects, to use for culling per-segment (e.g. cubic).
+ * Since we're hairlining, the "bounds" of the control points isn't necessairly the
+ * limit of where a segment can draw (it might draw up to 1 pixel beyond in aa-hairs).
+ *
+ * Compute the pt-bounds per segment is easy, so we do that, and then inversely adjust
+ * the culling bounds so we can just do a straight compare per segment.
+ *
+ * insetClip is use for quick-accept (i.e. the segment is not clipped), so we inset
+ * it from the clip-bounds (since segment bounds can be off by 1).
+ *
+ * outsetClip is used for quick-reject (i.e. the segment is entirely outside), so we
+ * outset it from the clip-bounds.
+ */
+ insetStorage.set(clip->getBounds());
+ outsetStorage = insetStorage.makeOutset(1, 1);
+ insetStorage.inset(1, 1);
+ if (is_inverted(insetStorage)) {
+ /*
+ * our bounds checks assume the rects are never inverted. If insetting has
+ * created that, we assume that the area is too small to safely perform a
+ * quick-accept, so we just mark the rect as empty (so the quick-accept check
+ * will always fail.
+ */
+ insetStorage.setEmpty(); // just so we don't pass an inverted rect
+ }
+ if (rclip.isRect()) {
+ insetClip = &insetStorage;
+ }
+ outsetClip = &outsetStorage;
+ }
+ }
+
+ SkPath::RawIter iter(path);
+ SkPoint pts[4], firstPt, lastPt;
+ SkPath::Verb verb, prevVerb;
+ SkAutoConicToQuads converter;
+
+ if (SkPaint::kButt_Cap != capStyle) {
+ prevVerb = SkPath::kDone_Verb;
+ }
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ firstPt = lastPt = pts[0];
+ break;
+ case SkPath::kLine_Verb:
+ if (SkPaint::kButt_Cap != capStyle) {
+ extend_pts<capStyle>(prevVerb, iter.peek(), pts, 2);
+ }
+ lineproc(pts, 2, clip, blitter);
+ lastPt = pts[1];
+ break;
+ case SkPath::kQuad_Verb:
+ if (SkPaint::kButt_Cap != capStyle) {
+ extend_pts<capStyle>(prevVerb, iter.peek(), pts, 3);
+ }
+ hairquad(pts, clip, insetClip, outsetClip, blitter, compute_quad_level(pts), lineproc);
+ lastPt = pts[2];
+ break;
+ case SkPath::kConic_Verb: {
+ if (SkPaint::kButt_Cap != capStyle) {
+ extend_pts<capStyle>(prevVerb, iter.peek(), pts, 3);
+ }
+ // how close should the quads be to the original conic?
+ const SkScalar tol = SK_Scalar1 / 4;
+ const SkPoint* quadPts = converter.computeQuads(pts,
+ iter.conicWeight(), tol);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ int level = compute_quad_level(quadPts);
+ hairquad(quadPts, clip, insetClip, outsetClip, blitter, level, lineproc);
+ quadPts += 2;
+ }
+ lastPt = pts[2];
+ break;
+ }
+ case SkPath::kCubic_Verb: {
+ if (SkPaint::kButt_Cap != capStyle) {
+ extend_pts<capStyle>(prevVerb, iter.peek(), pts, 4);
+ }
+ haircubic(pts, clip, insetClip, outsetClip, blitter, kMaxCubicSubdivideLevel, lineproc);
+ lastPt = pts[3];
+ } break;
+ case SkPath::kClose_Verb:
+ pts[0] = lastPt;
+ pts[1] = firstPt;
+ if (SkPaint::kButt_Cap != capStyle && prevVerb == SkPath::kMove_Verb) {
+ // cap moveTo/close to match svg expectations for degenerate segments
+ extend_pts<capStyle>(prevVerb, iter.peek(), pts, 2);
+ }
+ lineproc(pts, 2, clip, blitter);
+ break;
+ case SkPath::kDone_Verb:
+ break;
+ }
+ if (SkPaint::kButt_Cap != capStyle) {
+ if (prevVerb == SkPath::kMove_Verb &&
+ verb >= SkPath::kLine_Verb && verb <= SkPath::kCubic_Verb) {
+ firstPt = pts[0]; // the curve moved the initial point, so close to it instead
+ }
+ prevVerb = verb;
+ }
+ }
+}
+
+void SkScan::HairPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ hair_path<SkPaint::kButt_Cap>(path, clip, blitter, SkScan::HairLineRgn);
+}
+
+void SkScan::AntiHairPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ hair_path<SkPaint::kButt_Cap>(path, clip, blitter, SkScan::AntiHairLineRgn);
+}
+
+void SkScan::HairSquarePath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ hair_path<SkPaint::kSquare_Cap>(path, clip, blitter, SkScan::HairLineRgn);
+}
+
+void SkScan::AntiHairSquarePath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ hair_path<SkPaint::kSquare_Cap>(path, clip, blitter, SkScan::AntiHairLineRgn);
+}
+
+void SkScan::HairRoundPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ hair_path<SkPaint::kRound_Cap>(path, clip, blitter, SkScan::HairLineRgn);
+}
+
+void SkScan::AntiHairRoundPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ hair_path<SkPaint::kRound_Cap>(path, clip, blitter, SkScan::AntiHairLineRgn);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkScan::FrameRect(const SkRect& r, const SkPoint& strokeSize,
+ const SkRasterClip& clip, SkBlitter* blitter) {
+ SkASSERT(strokeSize.fX >= 0 && strokeSize.fY >= 0);
+
+ if (strokeSize.fX < 0 || strokeSize.fY < 0) {
+ return;
+ }
+
+ const SkScalar dx = strokeSize.fX;
+ const SkScalar dy = strokeSize.fY;
+ SkScalar rx = SkScalarHalf(dx);
+ SkScalar ry = SkScalarHalf(dy);
+ SkRect outer, tmp;
+
+ outer.set(r.fLeft - rx, r.fTop - ry,
+ r.fRight + rx, r.fBottom + ry);
+
+ if (r.width() <= dx || r.height() <= dy) {
+ SkScan::FillRect(outer, clip, blitter);
+ return;
+ }
+
+ tmp.set(outer.fLeft, outer.fTop, outer.fRight, outer.fTop + dy);
+ SkScan::FillRect(tmp, clip, blitter);
+ tmp.fTop = outer.fBottom - dy;
+ tmp.fBottom = outer.fBottom;
+ SkScan::FillRect(tmp, clip, blitter);
+
+ tmp.set(outer.fLeft, outer.fTop + dy, outer.fLeft + dx, outer.fBottom - dy);
+ SkScan::FillRect(tmp, clip, blitter);
+ tmp.fLeft = outer.fRight - dx;
+ tmp.fRight = outer.fRight;
+ SkScan::FillRect(tmp, clip, blitter);
+}
+
+void SkScan::HairLine(const SkPoint pts[], int count, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isBW()) {
+ HairLineRgn(pts, count, &clip.bwRgn(), blitter);
+ } else {
+ const SkRegion* clipRgn = nullptr;
+
+ SkRect r;
+ r.set(pts, count);
+ r.outset(SK_ScalarHalf, SK_ScalarHalf);
+
+ SkAAClipBlitterWrapper wrap;
+ if (!clip.quickContains(r.roundOut())) {
+ wrap.init(clip, blitter);
+ blitter = wrap.getBlitter();
+ clipRgn = &wrap.getRgn();
+ }
+ HairLineRgn(pts, count, clipRgn, blitter);
+ }
+}
+
+void SkScan::AntiHairLine(const SkPoint pts[], int count, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isBW()) {
+ AntiHairLineRgn(pts, count, &clip.bwRgn(), blitter);
+ } else {
+ const SkRegion* clipRgn = nullptr;
+
+ SkRect r;
+ r.set(pts, count);
+
+ SkAAClipBlitterWrapper wrap;
+ if (!clip.quickContains(r.roundOut().makeOutset(1, 1))) {
+ wrap.init(clip, blitter);
+ blitter = wrap.getBlitter();
+ clipRgn = &wrap.getRgn();
+ }
+ AntiHairLineRgn(pts, count, clipRgn, blitter);
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkScan_Path.cpp b/gfx/skia/skia/src/core/SkScan_Path.cpp
new file mode 100644
index 000000000..5b80492cf
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan_Path.cpp
@@ -0,0 +1,808 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkScanPriv.h"
+#include "SkBlitter.h"
+#include "SkEdge.h"
+#include "SkEdgeBuilder.h"
+#include "SkGeometry.h"
+#include "SkPath.h"
+#include "SkQuadClipper.h"
+#include "SkRasterClip.h"
+#include "SkRegion.h"
+#include "SkTemplates.h"
+#include "SkTSort.h"
+
+#define kEDGE_HEAD_Y SK_MinS32
+#define kEDGE_TAIL_Y SK_MaxS32
+
+#ifdef SK_DEBUG
+ static void validate_sort(const SkEdge* edge) {
+ int y = kEDGE_HEAD_Y;
+
+ while (edge->fFirstY != SK_MaxS32) {
+ edge->validate();
+ SkASSERT(y <= edge->fFirstY);
+
+ y = edge->fFirstY;
+ edge = edge->fNext;
+ }
+ }
+#else
+ #define validate_sort(edge)
+#endif
+
+static inline void remove_edge(SkEdge* edge) {
+ edge->fPrev->fNext = edge->fNext;
+ edge->fNext->fPrev = edge->fPrev;
+}
+
+static inline void insert_edge_after(SkEdge* edge, SkEdge* afterMe) {
+ edge->fPrev = afterMe;
+ edge->fNext = afterMe->fNext;
+ afterMe->fNext->fPrev = edge;
+ afterMe->fNext = edge;
+}
+
+static void backward_insert_edge_based_on_x(SkEdge* edge SkDECLAREPARAM(int, curr_y)) {
+ SkFixed x = edge->fX;
+
+ SkEdge* prev = edge->fPrev;
+ while (prev->fX > x) {
+ prev = prev->fPrev;
+ }
+ if (prev->fNext != edge) {
+ remove_edge(edge);
+ insert_edge_after(edge, prev);
+ }
+}
+
+// Start from the right side, searching backwards for the point to begin the new edge list
+// insertion, marching forwards from here. The implementation could have started from the left
+// of the prior insertion, and search to the right, or with some additional caching, binary
+// search the starting point. More work could be done to determine optimal new edge insertion.
+static SkEdge* backward_insert_start(SkEdge* prev, SkFixed x) {
+ while (prev->fX > x) {
+ prev = prev->fPrev;
+ }
+ return prev;
+}
+
+static void insert_new_edges(SkEdge* newEdge, int curr_y) {
+ if (newEdge->fFirstY != curr_y) {
+ return;
+ }
+ SkEdge* prev = newEdge->fPrev;
+ if (prev->fX <= newEdge->fX) {
+ return;
+ }
+ // find first x pos to insert
+ SkEdge* start = backward_insert_start(prev, newEdge->fX);
+ // insert the lot, fixing up the links as we go
+ do {
+ SkEdge* next = newEdge->fNext;
+ do {
+ if (start->fNext == newEdge) {
+ goto nextEdge;
+ }
+ SkEdge* after = start->fNext;
+ if (after->fX >= newEdge->fX) {
+ break;
+ }
+ start = after;
+ } while (true);
+ remove_edge(newEdge);
+ insert_edge_after(newEdge, start);
+nextEdge:
+ start = newEdge;
+ newEdge = next;
+ } while (newEdge->fFirstY == curr_y);
+}
+
+#ifdef SK_DEBUG
+static void validate_edges_for_y(const SkEdge* edge, int curr_y) {
+ while (edge->fFirstY <= curr_y) {
+ SkASSERT(edge->fPrev && edge->fNext);
+ SkASSERT(edge->fPrev->fNext == edge);
+ SkASSERT(edge->fNext->fPrev == edge);
+ SkASSERT(edge->fFirstY <= edge->fLastY);
+
+ SkASSERT(edge->fPrev->fX <= edge->fX);
+ edge = edge->fNext;
+ }
+}
+#else
+ #define validate_edges_for_y(edge, curr_y)
+#endif
+
+#if defined _WIN32 // disable warning : local variable used without having been initialized
+#pragma warning ( push )
+#pragma warning ( disable : 4701 )
+#endif
+
+typedef void (*PrePostProc)(SkBlitter* blitter, int y, bool isStartOfScanline);
+#define PREPOST_START true
+#define PREPOST_END false
+
+static void walk_edges(SkEdge* prevHead, SkPath::FillType fillType,
+ SkBlitter* blitter, int start_y, int stop_y,
+ PrePostProc proc, int rightClip) {
+ validate_sort(prevHead->fNext);
+
+ int curr_y = start_y;
+ // returns 1 for evenodd, -1 for winding, regardless of inverse-ness
+ int windingMask = (fillType & 1) ? 1 : -1;
+
+ for (;;) {
+ int w = 0;
+ int left SK_INIT_TO_AVOID_WARNING;
+ bool in_interval = false;
+ SkEdge* currE = prevHead->fNext;
+ SkFixed prevX = prevHead->fX;
+
+ validate_edges_for_y(currE, curr_y);
+
+ if (proc) {
+ proc(blitter, curr_y, PREPOST_START); // pre-proc
+ }
+
+ while (currE->fFirstY <= curr_y) {
+ SkASSERT(currE->fLastY >= curr_y);
+
+ int x = SkFixedRoundToInt(currE->fX);
+ w += currE->fWinding;
+ if ((w & windingMask) == 0) { // we finished an interval
+ SkASSERT(in_interval);
+ int width = x - left;
+ SkASSERT(width >= 0);
+ if (width)
+ blitter->blitH(left, curr_y, width);
+ in_interval = false;
+ } else if (!in_interval) {
+ left = x;
+ in_interval = true;
+ }
+
+ SkEdge* next = currE->fNext;
+ SkFixed newX;
+
+ if (currE->fLastY == curr_y) { // are we done with this edge?
+ if (currE->fCurveCount < 0) {
+ if (((SkCubicEdge*)currE)->updateCubic()) {
+ SkASSERT(currE->fFirstY == curr_y + 1);
+
+ newX = currE->fX;
+ goto NEXT_X;
+ }
+ } else if (currE->fCurveCount > 0) {
+ if (((SkQuadraticEdge*)currE)->updateQuadratic()) {
+ newX = currE->fX;
+ goto NEXT_X;
+ }
+ }
+ remove_edge(currE);
+ } else {
+ SkASSERT(currE->fLastY > curr_y);
+ newX = currE->fX + currE->fDX;
+ currE->fX = newX;
+ NEXT_X:
+ if (newX < prevX) { // ripple currE backwards until it is x-sorted
+ backward_insert_edge_based_on_x(currE SkPARAM(curr_y));
+ } else {
+ prevX = newX;
+ }
+ }
+ currE = next;
+ SkASSERT(currE);
+ }
+
+ // was our right-edge culled away?
+ if (in_interval) {
+ int width = rightClip - left;
+ if (width > 0) {
+ blitter->blitH(left, curr_y, width);
+ }
+ }
+
+ if (proc) {
+ proc(blitter, curr_y, PREPOST_END); // post-proc
+ }
+
+ curr_y += 1;
+ if (curr_y >= stop_y) {
+ break;
+ }
+ // now currE points to the first edge with a Yint larger than curr_y
+ insert_new_edges(currE, curr_y);
+ }
+}
+
+// return true if we're done with this edge
+static bool update_edge(SkEdge* edge, int last_y) {
+ SkASSERT(edge->fLastY >= last_y);
+ if (last_y == edge->fLastY) {
+ if (edge->fCurveCount < 0) {
+ if (((SkCubicEdge*)edge)->updateCubic()) {
+ SkASSERT(edge->fFirstY == last_y + 1);
+ return false;
+ }
+ } else if (edge->fCurveCount > 0) {
+ if (((SkQuadraticEdge*)edge)->updateQuadratic()) {
+ SkASSERT(edge->fFirstY == last_y + 1);
+ return false;
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+static void walk_convex_edges(SkEdge* prevHead, SkPath::FillType,
+ SkBlitter* blitter, int start_y, int stop_y,
+ PrePostProc proc) {
+ validate_sort(prevHead->fNext);
+
+ SkEdge* leftE = prevHead->fNext;
+ SkEdge* riteE = leftE->fNext;
+ SkEdge* currE = riteE->fNext;
+
+#if 0
+ int local_top = leftE->fFirstY;
+ SkASSERT(local_top == riteE->fFirstY);
+#else
+ // our edge choppers for curves can result in the initial edges
+ // not lining up, so we take the max.
+ int local_top = SkMax32(leftE->fFirstY, riteE->fFirstY);
+#endif
+ SkASSERT(local_top >= start_y);
+
+ for (;;) {
+ SkASSERT(leftE->fFirstY <= stop_y);
+ SkASSERT(riteE->fFirstY <= stop_y);
+
+ if (leftE->fX > riteE->fX || (leftE->fX == riteE->fX &&
+ leftE->fDX > riteE->fDX)) {
+ SkTSwap(leftE, riteE);
+ }
+
+ int local_bot = SkMin32(leftE->fLastY, riteE->fLastY);
+ local_bot = SkMin32(local_bot, stop_y - 1);
+ SkASSERT(local_top <= local_bot);
+
+ SkFixed left = leftE->fX;
+ SkFixed dLeft = leftE->fDX;
+ SkFixed rite = riteE->fX;
+ SkFixed dRite = riteE->fDX;
+ int count = local_bot - local_top;
+ SkASSERT(count >= 0);
+ if (0 == (dLeft | dRite)) {
+ int L = SkFixedRoundToInt(left);
+ int R = SkFixedRoundToInt(rite);
+ if (L < R) {
+ count += 1;
+ blitter->blitRect(L, local_top, R - L, count);
+ }
+ local_top = local_bot + 1;
+ } else {
+ do {
+ int L = SkFixedRoundToInt(left);
+ int R = SkFixedRoundToInt(rite);
+ if (L < R) {
+ blitter->blitH(L, local_top, R - L);
+ }
+ left += dLeft;
+ rite += dRite;
+ local_top += 1;
+ } while (--count >= 0);
+ }
+
+ leftE->fX = left;
+ riteE->fX = rite;
+
+ if (update_edge(leftE, local_bot)) {
+ if (currE->fFirstY >= stop_y) {
+ break;
+ }
+ leftE = currE;
+ currE = currE->fNext;
+ }
+ if (update_edge(riteE, local_bot)) {
+ if (currE->fFirstY >= stop_y) {
+ break;
+ }
+ riteE = currE;
+ currE = currE->fNext;
+ }
+
+ SkASSERT(leftE);
+ SkASSERT(riteE);
+
+ // check our bottom clip
+ SkASSERT(local_top == local_bot + 1);
+ if (local_top >= stop_y) {
+ break;
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// this guy overrides blitH, and will call its proxy blitter with the inverse
+// of the spans it is given (clipped to the left/right of the cliprect)
+//
+// used to implement inverse filltypes on paths
+//
+class InverseBlitter : public SkBlitter {
+public:
+ void setBlitter(SkBlitter* blitter, const SkIRect& clip, int shift) {
+ fBlitter = blitter;
+ fFirstX = clip.fLeft << shift;
+ fLastX = clip.fRight << shift;
+ }
+ void prepost(int y, bool isStart) {
+ if (isStart) {
+ fPrevX = fFirstX;
+ } else {
+ int invWidth = fLastX - fPrevX;
+ if (invWidth > 0) {
+ fBlitter->blitH(fPrevX, y, invWidth);
+ }
+ }
+ }
+
+ // overrides
+ void blitH(int x, int y, int width) override {
+ int invWidth = x - fPrevX;
+ if (invWidth > 0) {
+ fBlitter->blitH(fPrevX, y, invWidth);
+ }
+ fPrevX = x + width;
+ }
+
+ // we do not expect to get called with these entrypoints
+ void blitAntiH(int, int, const SkAlpha[], const int16_t runs[]) override {
+ SkDEBUGFAIL("blitAntiH unexpected");
+ }
+ void blitV(int x, int y, int height, SkAlpha alpha) override {
+ SkDEBUGFAIL("blitV unexpected");
+ }
+ void blitRect(int x, int y, int width, int height) override {
+ SkDEBUGFAIL("blitRect unexpected");
+ }
+ void blitMask(const SkMask&, const SkIRect& clip) override {
+ SkDEBUGFAIL("blitMask unexpected");
+ }
+ const SkPixmap* justAnOpaqueColor(uint32_t* value) override {
+ SkDEBUGFAIL("justAnOpaqueColor unexpected");
+ return nullptr;
+ }
+
+private:
+ SkBlitter* fBlitter;
+ int fFirstX, fLastX, fPrevX;
+};
+
+static void PrePostInverseBlitterProc(SkBlitter* blitter, int y, bool isStart) {
+ ((InverseBlitter*)blitter)->prepost(y, isStart);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if defined _WIN32
+#pragma warning ( pop )
+#endif
+
+static bool operator<(const SkEdge& a, const SkEdge& b) {
+ int valuea = a.fFirstY;
+ int valueb = b.fFirstY;
+
+ if (valuea == valueb) {
+ valuea = a.fX;
+ valueb = b.fX;
+ }
+
+ return valuea < valueb;
+}
+
+static SkEdge* sort_edges(SkEdge* list[], int count, SkEdge** last) {
+ SkTQSort(list, list + count - 1);
+
+ // now make the edges linked in sorted order
+ for (int i = 1; i < count; i++) {
+ list[i - 1]->fNext = list[i];
+ list[i]->fPrev = list[i - 1];
+ }
+
+ *last = list[count - 1];
+ return list[0];
+}
+
+// clipRect may be null, even though we always have a clip. This indicates that
+// the path is contained in the clip, and so we can ignore it during the blit
+//
+// clipRect (if no null) has already been shifted up
+//
+void sk_fill_path(const SkPath& path, const SkIRect* clipRect, SkBlitter* blitter,
+ int start_y, int stop_y, int shiftEdgesUp, const SkRegion& clipRgn) {
+ SkASSERT(blitter);
+
+ SkEdgeBuilder builder;
+
+ // If we're convex, then we need both edges, even the right edge is past the clip
+ const bool canCullToTheRight = !path.isConvex();
+
+ int count = builder.build(path, clipRect, shiftEdgesUp, canCullToTheRight);
+ SkASSERT(count >= 0);
+
+ SkEdge** list = builder.edgeList();
+
+ if (0 == count) {
+ if (path.isInverseFillType()) {
+ /*
+ * Since we are in inverse-fill, our caller has already drawn above
+ * our top (start_y) and will draw below our bottom (stop_y). Thus
+ * we need to restrict our drawing to the intersection of the clip
+ * and those two limits.
+ */
+ SkIRect rect = clipRgn.getBounds();
+ if (rect.fTop < start_y) {
+ rect.fTop = start_y;
+ }
+ if (rect.fBottom > stop_y) {
+ rect.fBottom = stop_y;
+ }
+ if (!rect.isEmpty()) {
+ blitter->blitRect(rect.fLeft << shiftEdgesUp,
+ rect.fTop << shiftEdgesUp,
+ rect.width() << shiftEdgesUp,
+ rect.height() << shiftEdgesUp);
+ }
+ }
+ return;
+ }
+
+ SkEdge headEdge, tailEdge, *last;
+ // this returns the first and last edge after they're sorted into a dlink list
+ SkEdge* edge = sort_edges(list, count, &last);
+
+ headEdge.fPrev = nullptr;
+ headEdge.fNext = edge;
+ headEdge.fFirstY = kEDGE_HEAD_Y;
+ headEdge.fX = SK_MinS32;
+ edge->fPrev = &headEdge;
+
+ tailEdge.fPrev = last;
+ tailEdge.fNext = nullptr;
+ tailEdge.fFirstY = kEDGE_TAIL_Y;
+ last->fNext = &tailEdge;
+
+ // now edge is the head of the sorted linklist
+
+ start_y = SkLeftShift(start_y, shiftEdgesUp);
+ stop_y = SkLeftShift(stop_y, shiftEdgesUp);
+ if (clipRect && start_y < clipRect->fTop) {
+ start_y = clipRect->fTop;
+ }
+ if (clipRect && stop_y > clipRect->fBottom) {
+ stop_y = clipRect->fBottom;
+ }
+
+ InverseBlitter ib;
+ PrePostProc proc = nullptr;
+
+ if (path.isInverseFillType()) {
+ ib.setBlitter(blitter, clipRgn.getBounds(), shiftEdgesUp);
+ blitter = &ib;
+ proc = PrePostInverseBlitterProc;
+ }
+
+ if (path.isConvex() && (nullptr == proc)) {
+ SkASSERT(count >= 2); // convex walker does not handle missing right edges
+ walk_convex_edges(&headEdge, path.getFillType(), blitter, start_y, stop_y, nullptr);
+ } else {
+ int rightEdge;
+ if (clipRect) {
+ rightEdge = clipRect->right();
+ } else {
+ rightEdge = SkScalarRoundToInt(path.getBounds().right()) << shiftEdgesUp;
+ }
+
+ walk_edges(&headEdge, path.getFillType(), blitter, start_y, stop_y, proc, rightEdge);
+ }
+}
+
+void sk_blit_above(SkBlitter* blitter, const SkIRect& ir, const SkRegion& clip) {
+ const SkIRect& cr = clip.getBounds();
+ SkIRect tmp;
+
+ tmp.fLeft = cr.fLeft;
+ tmp.fRight = cr.fRight;
+ tmp.fTop = cr.fTop;
+ tmp.fBottom = ir.fTop;
+ if (!tmp.isEmpty()) {
+ blitter->blitRectRegion(tmp, clip);
+ }
+}
+
+void sk_blit_below(SkBlitter* blitter, const SkIRect& ir, const SkRegion& clip) {
+ const SkIRect& cr = clip.getBounds();
+ SkIRect tmp;
+
+ tmp.fLeft = cr.fLeft;
+ tmp.fRight = cr.fRight;
+ tmp.fTop = ir.fBottom;
+ tmp.fBottom = cr.fBottom;
+ if (!tmp.isEmpty()) {
+ blitter->blitRectRegion(tmp, clip);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * If the caller is drawing an inverse-fill path, then it pass true for
+ * skipRejectTest, so we don't abort drawing just because the src bounds (ir)
+ * is outside of the clip.
+ */
+SkScanClipper::SkScanClipper(SkBlitter* blitter, const SkRegion* clip,
+ const SkIRect& ir, bool skipRejectTest) {
+ fBlitter = nullptr; // null means blit nothing
+ fClipRect = nullptr;
+
+ if (clip) {
+ fClipRect = &clip->getBounds();
+ if (!skipRejectTest && !SkIRect::Intersects(*fClipRect, ir)) { // completely clipped out
+ return;
+ }
+
+ if (clip->isRect()) {
+ if (fClipRect->contains(ir)) {
+ fClipRect = nullptr;
+ } else {
+ // only need a wrapper blitter if we're horizontally clipped
+ if (fClipRect->fLeft > ir.fLeft || fClipRect->fRight < ir.fRight) {
+ fRectBlitter.init(blitter, *fClipRect);
+ blitter = &fRectBlitter;
+ }
+ }
+ } else {
+ fRgnBlitter.init(blitter, clip);
+ blitter = &fRgnBlitter;
+ }
+ }
+ fBlitter = blitter;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool clip_to_limit(const SkRegion& orig, SkRegion* reduced) {
+ const int32_t limit = 32767;
+
+ SkIRect limitR;
+ limitR.set(-limit, -limit, limit, limit);
+ if (limitR.contains(orig.getBounds())) {
+ return false;
+ }
+ reduced->op(orig, limitR, SkRegion::kIntersect_Op);
+ return true;
+}
+
+/**
+ * Variants of SkScalarRoundToInt, identical to SkDScalarRoundToInt except when the input fraction
+ * is 0.5. When SK_RASTERIZE_EVEN_ROUNDING is enabled, we must bias the result before rounding to
+ * account for potential FDot6 rounding edge-cases.
+ */
+#ifdef SK_RASTERIZE_EVEN_ROUNDING
+static const double kRoundBias = 0.5 / SK_FDot6One;
+#else
+static const double kRoundBias = 0.0;
+#endif
+
+/**
+ * Round the value down. This is used to round the top and left of a rectangle,
+ * and corresponds to the way the scan converter treats the top and left edges.
+ */
+static inline int round_down_to_int(SkScalar x) {
+ double xx = x;
+ xx -= 0.5 + kRoundBias;
+ return (int)ceil(xx);
+}
+
+/**
+ * Round the value up. This is used to round the bottom and right of a rectangle,
+ * and corresponds to the way the scan converter treats the bottom and right edges.
+ */
+static inline int round_up_to_int(SkScalar x) {
+ double xx = x;
+ xx += 0.5 + kRoundBias;
+ return (int)floor(xx);
+}
+
+/**
+ * Variant of SkRect::round() that explicitly performs the rounding step (i.e. floor(x + 0.5))
+ * using double instead of SkScalar (float). It does this by calling SkDScalarRoundToInt(),
+ * which may be slower than calling SkScalarRountToInt(), but gives slightly more accurate
+ * results. Also rounds top and left using double, flooring when the fraction is exactly 0.5f.
+ *
+ * e.g.
+ * SkScalar left = 0.5f;
+ * int ileft = SkScalarRoundToInt(left);
+ * SkASSERT(0 == ileft); // <--- fails
+ * int ileft = round_down_to_int(left);
+ * SkASSERT(0 == ileft); // <--- succeeds
+ * SkScalar right = 0.49999997f;
+ * int iright = SkScalarRoundToInt(right);
+ * SkASSERT(0 == iright); // <--- fails
+ * iright = SkDScalarRoundToInt(right);
+ * SkASSERT(0 == iright); // <--- succeeds
+ *
+ *
+ * If using SK_RASTERIZE_EVEN_ROUNDING, we need to ensure we account for edges bounded by this
+ * rect being rounded to FDot6 format before being later rounded to an integer. For example, a
+ * value like 0.499 can be below 0.5, but round to 0.5 as FDot6, which would finally round to
+ * the integer 1, instead of just rounding to 0.
+ *
+ * To handle this, a small bias of half an FDot6 increment is added before actually rounding to
+ * an integer value. This simulates the rounding of SkScalarRoundToFDot6 without incurring the
+ * range loss of converting to FDot6 format first, preserving the integer range for the SkIRect.
+ * Thus, bottom and right are rounded in this manner (biased up), ensuring the rect is large
+ * enough.
+ */
+static void round_asymmetric_to_int(const SkRect& src, SkIRect* dst) {
+ SkASSERT(dst);
+ dst->set(round_down_to_int(src.fLeft), round_down_to_int(src.fTop),
+ round_up_to_int(src.fRight), round_up_to_int(src.fBottom));
+}
+
+void SkScan::FillPath(const SkPath& path, const SkRegion& origClip,
+ SkBlitter* blitter) {
+ if (origClip.isEmpty()) {
+ return;
+ }
+
+ // Our edges are fixed-point, and don't like the bounds of the clip to
+ // exceed that. Here we trim the clip just so we don't overflow later on
+ const SkRegion* clipPtr = &origClip;
+ SkRegion finiteClip;
+ if (clip_to_limit(origClip, &finiteClip)) {
+ if (finiteClip.isEmpty()) {
+ return;
+ }
+ clipPtr = &finiteClip;
+ }
+ // don't reference "origClip" any more, just use clipPtr
+
+ SkIRect ir;
+ // We deliberately call round_asymmetric_to_int() instead of round(), since we can't afford
+ // to generate a bounds that is tighter than the corresponding SkEdges. The edge code basically
+ // converts the floats to fixed, and then "rounds". If we called round() instead of
+ // round_asymmetric_to_int() here, we could generate the wrong ir for values like 0.4999997.
+ round_asymmetric_to_int(path.getBounds(), &ir);
+ if (ir.isEmpty()) {
+ if (path.isInverseFillType()) {
+ blitter->blitRegion(*clipPtr);
+ }
+ return;
+ }
+
+ SkScanClipper clipper(blitter, clipPtr, ir, path.isInverseFillType());
+
+ blitter = clipper.getBlitter();
+ if (blitter) {
+ // we have to keep our calls to blitter in sorted order, so we
+ // must blit the above section first, then the middle, then the bottom.
+ if (path.isInverseFillType()) {
+ sk_blit_above(blitter, ir, *clipPtr);
+ }
+ sk_fill_path(path, clipper.getClipRect(), blitter, ir.fTop, ir.fBottom,
+ 0, *clipPtr);
+ if (path.isInverseFillType()) {
+ sk_blit_below(blitter, ir, *clipPtr);
+ }
+ } else {
+ // what does it mean to not have a blitter if path.isInverseFillType???
+ }
+}
+
+void SkScan::FillPath(const SkPath& path, const SkIRect& ir,
+ SkBlitter* blitter) {
+ SkRegion rgn(ir);
+ FillPath(path, rgn, blitter);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int build_tri_edges(SkEdge edge[], const SkPoint pts[],
+ const SkIRect* clipRect, SkEdge* list[]) {
+ SkEdge** start = list;
+
+ if (edge->setLine(pts[0], pts[1], clipRect, 0)) {
+ *list++ = edge;
+ edge = (SkEdge*)((char*)edge + sizeof(SkEdge));
+ }
+ if (edge->setLine(pts[1], pts[2], clipRect, 0)) {
+ *list++ = edge;
+ edge = (SkEdge*)((char*)edge + sizeof(SkEdge));
+ }
+ if (edge->setLine(pts[2], pts[0], clipRect, 0)) {
+ *list++ = edge;
+ }
+ return (int)(list - start);
+}
+
+
+static void sk_fill_triangle(const SkPoint pts[], const SkIRect* clipRect,
+ SkBlitter* blitter, const SkIRect& ir) {
+ SkASSERT(pts && blitter);
+
+ SkEdge edgeStorage[3];
+ SkEdge* list[3];
+
+ int count = build_tri_edges(edgeStorage, pts, clipRect, list);
+ if (count < 2) {
+ return;
+ }
+
+ SkEdge headEdge, tailEdge, *last;
+
+ // this returns the first and last edge after they're sorted into a dlink list
+ SkEdge* edge = sort_edges(list, count, &last);
+
+ headEdge.fPrev = nullptr;
+ headEdge.fNext = edge;
+ headEdge.fFirstY = kEDGE_HEAD_Y;
+ headEdge.fX = SK_MinS32;
+ edge->fPrev = &headEdge;
+
+ tailEdge.fPrev = last;
+ tailEdge.fNext = nullptr;
+ tailEdge.fFirstY = kEDGE_TAIL_Y;
+ last->fNext = &tailEdge;
+
+ // now edge is the head of the sorted linklist
+ int stop_y = ir.fBottom;
+ if (clipRect && stop_y > clipRect->fBottom) {
+ stop_y = clipRect->fBottom;
+ }
+ int start_y = ir.fTop;
+ if (clipRect && start_y < clipRect->fTop) {
+ start_y = clipRect->fTop;
+ }
+ walk_convex_edges(&headEdge, SkPath::kEvenOdd_FillType, blitter, start_y, stop_y, nullptr);
+// walk_edges(&headEdge, SkPath::kEvenOdd_FillType, blitter, start_y, stop_y, nullptr);
+}
+
+void SkScan::FillTriangle(const SkPoint pts[], const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isEmpty()) {
+ return;
+ }
+
+ SkRect r;
+ SkIRect ir;
+ r.set(pts, 3);
+ r.round(&ir);
+ if (ir.isEmpty() || !SkIRect::Intersects(ir, clip.getBounds())) {
+ return;
+ }
+
+ SkAAClipBlitterWrapper wrap;
+ const SkRegion* clipRgn;
+ if (clip.isBW()) {
+ clipRgn = &clip.bwRgn();
+ } else {
+ wrap.init(clip, blitter);
+ clipRgn = &wrap.getRgn();
+ blitter = wrap.getBlitter();
+ }
+
+ SkScanClipper clipper(blitter, clipRgn, ir);
+ blitter = clipper.getBlitter();
+ if (blitter) {
+ sk_fill_triangle(pts, clipper.getClipRect(), blitter, ir);
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkSemaphore.cpp b/gfx/skia/skia/src/core/SkSemaphore.cpp
new file mode 100644
index 000000000..1ebe51b15
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSemaphore.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "../private/SkLeanWindows.h"
+#include "../private/SkSemaphore.h"
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+ #include <mach/mach.h>
+ struct SkBaseSemaphore::OSSemaphore {
+ semaphore_t fSemaphore;
+
+ OSSemaphore() {
+ semaphore_create(mach_task_self(), &fSemaphore, SYNC_POLICY_LIFO, 0/*initial count*/);
+ }
+ ~OSSemaphore() { semaphore_destroy(mach_task_self(), fSemaphore); }
+
+ void signal(int n) { while (n --> 0) { semaphore_signal(fSemaphore); } }
+ void wait() { semaphore_wait(fSemaphore); }
+ };
+#elif defined(SK_BUILD_FOR_WIN32)
+ struct SkBaseSemaphore::OSSemaphore {
+ HANDLE fSemaphore;
+
+ OSSemaphore() {
+ fSemaphore = CreateSemaphore(nullptr /*security attributes, optional*/,
+ 0 /*initial count*/,
+ MAXLONG /*max count*/,
+ nullptr /*name, optional*/);
+ }
+ ~OSSemaphore() { CloseHandle(fSemaphore); }
+
+ void signal(int n) {
+ ReleaseSemaphore(fSemaphore, n, nullptr/*returns previous count, optional*/);
+ }
+ void wait() { WaitForSingleObject(fSemaphore, INFINITE/*timeout in ms*/); }
+ };
+#else
+ // It's important we test for Mach before this. This code will compile but not work there.
+ #include <errno.h>
+ #include <semaphore.h>
+ struct SkBaseSemaphore::OSSemaphore {
+ sem_t fSemaphore;
+
+ OSSemaphore() { sem_init(&fSemaphore, 0/*cross process?*/, 0/*initial count*/); }
+ ~OSSemaphore() { sem_destroy(&fSemaphore); }
+
+ void signal(int n) { while (n --> 0) { sem_post(&fSemaphore); } }
+ void wait() {
+ // Try until we're not interrupted.
+ while(sem_wait(&fSemaphore) == -1 && errno == EINTR);
+ }
+ };
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkBaseSemaphore::osSignal(int n) {
+ fOSSemaphoreOnce([this] { fOSSemaphore = new OSSemaphore; });
+ fOSSemaphore->signal(n);
+}
+
+void SkBaseSemaphore::osWait() {
+ fOSSemaphoreOnce([this] { fOSSemaphore = new OSSemaphore; });
+ fOSSemaphore->wait();
+}
+
+void SkBaseSemaphore::cleanup() {
+ delete fOSSemaphore;
+}
diff --git a/gfx/skia/skia/src/core/SkShader.cpp b/gfx/skia/skia/src/core/SkShader.cpp
new file mode 100644
index 000000000..56011c578
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkShader.cpp
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAtomics.h"
+#include "SkBitmapProcShader.h"
+#include "SkColorShader.h"
+#include "SkEmptyShader.h"
+#include "SkMallocPixelRef.h"
+#include "SkPaint.h"
+#include "SkPicture.h"
+#include "SkPictureShader.h"
+#include "SkReadBuffer.h"
+#include "SkScalar.h"
+#include "SkShader.h"
+#include "SkWriteBuffer.h"
+
+#if SK_SUPPORT_GPU
+#include "GrFragmentProcessor.h"
+#endif
+
+//#define SK_TRACK_SHADER_LIFETIME
+
+#ifdef SK_TRACK_SHADER_LIFETIME
+ static int32_t gShaderCounter;
+#endif
+
+static inline void inc_shader_counter() {
+#ifdef SK_TRACK_SHADER_LIFETIME
+ int32_t prev = sk_atomic_inc(&gShaderCounter);
+ SkDebugf("+++ shader counter %d\n", prev + 1);
+#endif
+}
+static inline void dec_shader_counter() {
+#ifdef SK_TRACK_SHADER_LIFETIME
+ int32_t prev = sk_atomic_dec(&gShaderCounter);
+ SkDebugf("--- shader counter %d\n", prev - 1);
+#endif
+}
+
+SkShader::SkShader(const SkMatrix* localMatrix) {
+ inc_shader_counter();
+ if (localMatrix) {
+ fLocalMatrix = *localMatrix;
+ } else {
+ fLocalMatrix.reset();
+ }
+ // Pre-cache so future calls to fLocalMatrix.getType() are threadsafe.
+ (void)fLocalMatrix.getType();
+}
+
+SkShader::~SkShader() {
+ dec_shader_counter();
+}
+
+void SkShader::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ bool hasLocalM = !fLocalMatrix.isIdentity();
+ buffer.writeBool(hasLocalM);
+ if (hasLocalM) {
+ buffer.writeMatrix(fLocalMatrix);
+ }
+}
+
+bool SkShader::computeTotalInverse(const ContextRec& rec, SkMatrix* totalInverse) const {
+ SkMatrix total;
+ total.setConcat(*rec.fMatrix, fLocalMatrix);
+
+ const SkMatrix* m = &total;
+ if (rec.fLocalMatrix) {
+ total.setConcat(*m, *rec.fLocalMatrix);
+ m = &total;
+ }
+ return m->invert(totalInverse);
+}
+
+bool SkShader::asLuminanceColor(SkColor* colorPtr) const {
+ SkColor storage;
+ if (nullptr == colorPtr) {
+ colorPtr = &storage;
+ }
+ if (this->onAsLuminanceColor(colorPtr)) {
+ *colorPtr = SkColorSetA(*colorPtr, 0xFF); // we only return opaque
+ return true;
+ }
+ return false;
+}
+
+SkShader::Context* SkShader::createContext(const ContextRec& rec, void* storage) const {
+ if (!this->computeTotalInverse(rec, nullptr)) {
+ return nullptr;
+ }
+ return this->onCreateContext(rec, storage);
+}
+
+SkShader::Context* SkShader::onCreateContext(const ContextRec& rec, void*) const {
+ return nullptr;
+}
+
+size_t SkShader::contextSize(const ContextRec& rec) const {
+ return this->onContextSize(rec);
+}
+
+size_t SkShader::onContextSize(const ContextRec&) const {
+ return 0;
+}
+
+SkShader::Context::Context(const SkShader& shader, const ContextRec& rec)
+ : fShader(shader), fCTM(*rec.fMatrix)
+{
+ // Because the context parameters must be valid at this point, we know that the matrix is
+ // invertible.
+ SkAssertResult(fShader.computeTotalInverse(rec, &fTotalInverse));
+ fTotalInverseClass = (uint8_t)ComputeMatrixClass(fTotalInverse);
+
+ fPaintAlpha = rec.fPaint->getAlpha();
+}
+
+SkShader::Context::~Context() {}
+
+SkShader::Context::ShadeProc SkShader::Context::asAShadeProc(void** ctx) {
+ return nullptr;
+}
+
+void SkShader::Context::shadeSpan4f(int x, int y, SkPM4f dst[], int count) {
+ const int N = 128;
+ SkPMColor tmp[N];
+ while (count > 0) {
+ int n = SkTMin(count, N);
+ this->shadeSpan(x, y, tmp, n);
+ for (int i = 0; i < n; ++i) {
+ dst[i] = SkPM4f::FromPMColor(tmp[i]);
+ }
+ dst += n;
+ x += n;
+ count -= n;
+ }
+}
+
+#include "SkColorPriv.h"
+
+#define kTempColorQuadCount 6 // balance between speed (larger) and saving stack-space
+#define kTempColorCount (kTempColorQuadCount << 2)
+
+#ifdef SK_CPU_BENDIAN
+ #define SkU32BitShiftToByteOffset(shift) (3 - ((shift) >> 3))
+#else
+ #define SkU32BitShiftToByteOffset(shift) ((shift) >> 3)
+#endif
+
+void SkShader::Context::shadeSpanAlpha(int x, int y, uint8_t alpha[], int count) {
+ SkASSERT(count > 0);
+
+ SkPMColor colors[kTempColorCount];
+
+ while ((count -= kTempColorCount) >= 0) {
+ this->shadeSpan(x, y, colors, kTempColorCount);
+ x += kTempColorCount;
+
+ const uint8_t* srcA = (const uint8_t*)colors + SkU32BitShiftToByteOffset(SK_A32_SHIFT);
+ int quads = kTempColorQuadCount;
+ do {
+ U8CPU a0 = srcA[0];
+ U8CPU a1 = srcA[4];
+ U8CPU a2 = srcA[8];
+ U8CPU a3 = srcA[12];
+ srcA += 4*4;
+ *alpha++ = SkToU8(a0);
+ *alpha++ = SkToU8(a1);
+ *alpha++ = SkToU8(a2);
+ *alpha++ = SkToU8(a3);
+ } while (--quads != 0);
+ }
+ SkASSERT(count < 0);
+ SkASSERT(count + kTempColorCount >= 0);
+ if (count += kTempColorCount) {
+ this->shadeSpan(x, y, colors, count);
+
+ const uint8_t* srcA = (const uint8_t*)colors + SkU32BitShiftToByteOffset(SK_A32_SHIFT);
+ do {
+ *alpha++ = *srcA;
+ srcA += 4;
+ } while (--count != 0);
+ }
+#if 0
+ do {
+ int n = count;
+ if (n > kTempColorCount)
+ n = kTempColorCount;
+ SkASSERT(n > 0);
+
+ this->shadeSpan(x, y, colors, n);
+ x += n;
+ count -= n;
+
+ const uint8_t* srcA = (const uint8_t*)colors + SkU32BitShiftToByteOffset(SK_A32_SHIFT);
+ do {
+ *alpha++ = *srcA;
+ srcA += 4;
+ } while (--n != 0);
+ } while (count > 0);
+#endif
+}
+
+SkShader::Context::MatrixClass SkShader::Context::ComputeMatrixClass(const SkMatrix& mat) {
+ MatrixClass mc = kLinear_MatrixClass;
+
+ if (mat.hasPerspective()) {
+ if (mat.isFixedStepInX()) {
+ mc = kFixedStepInX_MatrixClass;
+ } else {
+ mc = kPerspective_MatrixClass;
+ }
+ }
+ return mc;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+SkShader::GradientType SkShader::asAGradient(GradientInfo* info) const {
+ return kNone_GradientType;
+}
+
+#if SK_SUPPORT_GPU
+sk_sp<GrFragmentProcessor> SkShader::asFragmentProcessor(const AsFPArgs&) const {
+ return nullptr;
+}
+#endif
+
+SkShader* SkShader::refAsALocalMatrixShader(SkMatrix*) const {
+ return nullptr;
+}
+
+sk_sp<SkShader> SkShader::MakeEmptyShader() { return sk_make_sp<SkEmptyShader>(); }
+
+sk_sp<SkShader> SkShader::MakeColorShader(SkColor color) { return sk_make_sp<SkColorShader>(color); }
+
+sk_sp<SkShader> SkShader::MakeBitmapShader(const SkBitmap& src, TileMode tmx, TileMode tmy,
+ const SkMatrix* localMatrix) {
+ return SkMakeBitmapShader(src, tmx, tmy, localMatrix, kIfMutable_SkCopyPixelsMode, nullptr);
+}
+
+sk_sp<SkShader> SkShader::MakePictureShader(sk_sp<SkPicture> src, TileMode tmx, TileMode tmy,
+ const SkMatrix* localMatrix, const SkRect* tile) {
+ return SkPictureShader::Make(std::move(src), tmx, tmy, localMatrix, tile);
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkShader::toString(SkString* str) const {
+ if (!fLocalMatrix.isIdentity()) {
+ str->append(" ");
+ fLocalMatrix.toString(str);
+ }
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkEmptyShader::CreateProc(SkReadBuffer&) {
+ return SkShader::MakeEmptyShader();
+}
+
+#ifndef SK_IGNORE_TO_STRING
+#include "SkEmptyShader.h"
+
+void SkEmptyShader::toString(SkString* str) const {
+ str->append("SkEmptyShader: (");
+
+ this->INHERITED::toString(str);
+
+ str->append(")");
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_SUPPORT_LEGACY_CREATESHADER_PTR
+SkShader* SkShader::CreateComposeShader(SkShader* dst, SkShader* src, SkXfermode::Mode mode) {
+ return MakeComposeShader(sk_ref_sp(dst), sk_ref_sp(src), mode).release();
+}
+SkShader* SkShader::CreateComposeShader(SkShader* dst, SkShader* src, SkXfermode* xfer) {
+ return MakeComposeShader(sk_ref_sp(dst), sk_ref_sp(src), xfer).release();
+}
+SkShader* SkShader::CreatePictureShader(const SkPicture* src, TileMode tmx, TileMode tmy,
+ const SkMatrix* localMatrix, const SkRect* tile) {
+ return MakePictureShader(sk_ref_sp(const_cast<SkPicture*>(src)), tmx, tmy,
+ localMatrix, tile).release();
+}
+SkShader* SkShader::newWithColorFilter(SkColorFilter* filter) const {
+ return this->makeWithColorFilter(sk_ref_sp(filter)).release();
+}
+#endif
+
+#ifdef SK_SUPPORT_LEGACY_XFERMODE_PTR
+#include "SkXfermode.h"
+sk_sp<SkShader> SkShader::MakeComposeShader(sk_sp<SkShader> dst, sk_sp<SkShader> src,
+ SkXfermode* xfer) {
+ return MakeComposeShader(std::move(dst), std::move(src), sk_ref_sp(xfer));
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkShadowShader.cpp b/gfx/skia/skia/src/core/SkShadowShader.cpp
new file mode 100644
index 000000000..c3ede8081
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkShadowShader.cpp
@@ -0,0 +1,957 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCanvas.h"
+#include "SkReadBuffer.h"
+#include "SkShadowShader.h"
+
+////////////////////////////////////////////////////////////////////////////
+#ifdef SK_EXPERIMENTAL_SHADOWING
+
+
+/** \class SkShadowShaderImpl
+ This subclass of shader applies shadowing
+*/
+class SkShadowShaderImpl : public SkShader {
+public:
+ /** Create a new shadowing shader that shadows
+ @param to do to do
+ */
+ SkShadowShaderImpl(sk_sp<SkShader> povDepthShader,
+ sk_sp<SkShader> diffuseShader,
+ sk_sp<SkLights> lights,
+ int diffuseWidth, int diffuseHeight,
+ const SkShadowParams& params)
+ : fPovDepthShader(std::move(povDepthShader))
+ , fDiffuseShader(std::move(diffuseShader))
+ , fLights(std::move(lights))
+ , fDiffuseWidth(diffuseWidth)
+ , fDiffuseHeight(diffuseHeight)
+ , fShadowParams(params) { }
+
+ bool isOpaque() const override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(const AsFPArgs&) const override;
+#endif
+
+ class ShadowShaderContext : public SkShader::Context {
+ public:
+ // The context takes ownership of the states. It will call their destructors
+ // but will NOT free the memory.
+ ShadowShaderContext(const SkShadowShaderImpl&, const ContextRec&,
+ SkShader::Context* povDepthContext,
+ SkShader::Context* diffuseContext,
+ void* heapAllocated);
+
+ ~ShadowShaderContext() override;
+
+ void shadeSpan(int x, int y, SkPMColor[], int count) override;
+
+ uint32_t getFlags() const override { return fFlags; }
+
+ private:
+ SkShader::Context* fPovDepthContext;
+ SkShader::Context* fDiffuseContext;
+ uint32_t fFlags;
+
+ void* fHeapAllocated;
+
+ int fNonAmbLightCnt;
+ SkPixmap* fShadowMapPixels;
+
+
+ typedef SkShader::Context INHERITED;
+ };
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkShadowShaderImpl)
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ size_t onContextSize(const ContextRec&) const override;
+ Context* onCreateContext(const ContextRec&, void*) const override;
+
+private:
+ sk_sp<SkShader> fPovDepthShader;
+ sk_sp<SkShader> fDiffuseShader;
+ sk_sp<SkLights> fLights;
+
+ int fDiffuseWidth;
+ int fDiffuseHeight;
+
+ SkShadowParams fShadowParams;
+
+ friend class SkShadowShader;
+
+ typedef SkShader INHERITED;
+};
+
+////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+#include "GrCoordTransform.h"
+#include "GrFragmentProcessor.h"
+#include "GrInvariantOutput.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "SkGr.h"
+#include "SkGrPriv.h"
+#include "SkSpecialImage.h"
+#include "SkImage_Base.h"
+#include "GrContext.h"
+
+class ShadowFP : public GrFragmentProcessor {
+public:
+ ShadowFP(sk_sp<GrFragmentProcessor> povDepth,
+ sk_sp<GrFragmentProcessor> diffuse,
+ sk_sp<SkLights> lights,
+ int diffuseWidth, int diffuseHeight,
+ const SkShadowParams& params,
+ GrContext* context) {
+
+ fAmbientColor = lights->ambientLightColor();
+
+ fNumNonAmbLights = 0; // count of non-ambient lights
+ for (int i = 0; i < lights->numLights(); ++i) {
+ if (fNumNonAmbLights < SkShadowShader::kMaxNonAmbientLights) {
+ fLightColor[fNumNonAmbLights] = lights->light(i).color();
+
+ if (SkLights::Light::kPoint_LightType == lights->light(i).type()) {
+ fLightDirOrPos[fNumNonAmbLights] = lights->light(i).pos();
+ fLightColor[fNumNonAmbLights].scale(lights->light(i).intensity());
+ } else {
+ fLightDirOrPos[fNumNonAmbLights] = lights->light(i).dir();
+ }
+
+ fIsPointLight[fNumNonAmbLights] =
+ SkLights::Light::kPoint_LightType == lights->light(i).type();
+
+ fIsRadialLight[fNumNonAmbLights] = lights->light(i).isRadial();
+
+ SkImage_Base* shadowMap = ((SkImage_Base*)lights->light(i).getShadowMap());
+
+ // gets deleted when the ShadowFP is destroyed, and frees the GrTexture*
+ fTexture[fNumNonAmbLights] = sk_sp<GrTexture>(shadowMap->asTextureRef(context,
+ GrTextureParams::ClampNoFilter(),
+ SkSourceGammaTreatment::kIgnore));
+ fDepthMapAccess[fNumNonAmbLights].reset(fTexture[fNumNonAmbLights].get());
+ this->addTextureAccess(&fDepthMapAccess[fNumNonAmbLights]);
+
+ fDepthMapHeight[fNumNonAmbLights] = shadowMap->height();
+ fDepthMapWidth[fNumNonAmbLights] = shadowMap->width();
+
+ fNumNonAmbLights++;
+ }
+ }
+
+ fWidth = diffuseWidth;
+ fHeight = diffuseHeight;
+
+ fShadowParams = params;
+
+ this->registerChildProcessor(std::move(povDepth));
+ this->registerChildProcessor(std::move(diffuse));
+ this->initClassID<ShadowFP>();
+ }
+
+ class GLSLShadowFP : public GrGLSLFragmentProcessor {
+ public:
+ GLSLShadowFP() { }
+
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFragmentBuilder* fragBuilder = args.fFragBuilder;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ const ShadowFP& shadowFP = args.fFp.cast<ShadowFP>();
+
+ SkASSERT(shadowFP.fNumNonAmbLights <= SkShadowShader::kMaxNonAmbientLights);
+
+ // add uniforms
+ int32_t numLights = shadowFP.fNumNonAmbLights;
+ SkASSERT(numLights <= SkShadowShader::kMaxNonAmbientLights);
+
+ int blurAlgorithm = shadowFP.fShadowParams.fType;
+
+ const char* lightDirOrPosUniName[SkShadowShader::kMaxNonAmbientLights] = {nullptr};
+ const char* lightColorUniName[SkShadowShader::kMaxNonAmbientLights] = {nullptr};
+ const char* ambientColorUniName = nullptr;
+
+ const char* depthMapWidthUniName[SkShadowShader::kMaxNonAmbientLights] = {nullptr};
+ const char* depthMapHeightUniName[SkShadowShader::kMaxNonAmbientLights] = {nullptr};
+ const char* widthUniName = nullptr; // dimensions of povDepth
+ const char* heightUniName = nullptr;
+
+ const char* shBiasUniName = nullptr;
+ const char* minVarianceUniName = nullptr;
+
+ // setting uniforms
+ for (int i = 0; i < shadowFP.fNumNonAmbLights; i++) {
+ SkString lightDirOrPosUniNameStr("lightDir");
+ lightDirOrPosUniNameStr.appendf("%d", i);
+ SkString lightColorUniNameStr("lightColor");
+ lightColorUniNameStr.appendf("%d", i);
+ SkString lightIntensityUniNameStr("lightIntensity");
+ lightIntensityUniNameStr.appendf("%d", i);
+
+ SkString depthMapWidthUniNameStr("dmapWidth");
+ depthMapWidthUniNameStr.appendf("%d", i);
+ SkString depthMapHeightUniNameStr("dmapHeight");
+ depthMapHeightUniNameStr.appendf("%d", i);
+
+ fLightDirOrPosUni[i] = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec3f_GrSLType,
+ kDefault_GrSLPrecision,
+ lightDirOrPosUniNameStr.c_str(),
+ &lightDirOrPosUniName[i]);
+ fLightColorUni[i] = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec3f_GrSLType,
+ kDefault_GrSLPrecision,
+ lightColorUniNameStr.c_str(),
+ &lightColorUniName[i]);
+
+ fDepthMapWidthUni[i] = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kInt_GrSLType,
+ kDefault_GrSLPrecision,
+ depthMapWidthUniNameStr.c_str(),
+ &depthMapWidthUniName[i]);
+ fDepthMapHeightUni[i] = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kInt_GrSLType,
+ kDefault_GrSLPrecision,
+ depthMapHeightUniNameStr.c_str(),
+ &depthMapHeightUniName[i]);
+ }
+
+ fBiasingConstantUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType,
+ kDefault_GrSLPrecision,
+ "shadowBias", &shBiasUniName);
+ fMinVarianceUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType,
+ kDefault_GrSLPrecision,
+ "minVariance", &minVarianceUniName);
+
+ fWidthUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kInt_GrSLType,
+ kDefault_GrSLPrecision,
+ "width", &widthUniName);
+ fHeightUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kInt_GrSLType,
+ kDefault_GrSLPrecision,
+ "height", &heightUniName);
+
+ fAmbientColorUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec3f_GrSLType, kDefault_GrSLPrecision,
+ "AmbientColor", &ambientColorUniName);
+
+ SkString povDepthSampler("_povDepth");
+ SkString povDepth("povDepth");
+ this->emitChild(0, nullptr, &povDepthSampler, args);
+ fragBuilder->codeAppendf("vec4 %s = %s;", povDepth.c_str(), povDepthSampler.c_str());
+
+ SkString diffuseColorSampler("_inDiffuseColor");
+ SkString diffuseColor("inDiffuseColor");
+ this->emitChild(1, nullptr, &diffuseColorSampler, args);
+ fragBuilder->codeAppendf("vec4 %s = %s;", diffuseColor.c_str(),
+ diffuseColorSampler.c_str());
+
+ SkString depthMaps[SkShadowShader::kMaxNonAmbientLights];
+
+ fragBuilder->codeAppendf("vec4 resultDiffuseColor = %s;", diffuseColor.c_str());
+ fragBuilder->codeAppend ("vec3 totalLightColor = vec3(0);");
+
+ // probability that a fragment is lit. For each light, we multiply this by the
+ // light's color to get its contribution to totalLightColor.
+ fragBuilder->codeAppend ("float lightProbability;");
+
+ // coordinates of current fragment in world space
+ fragBuilder->codeAppend ("vec3 worldCor;");
+
+ // Multiply by 255 to transform from sampler coordinates to world
+ // coordinates (since 1 channel is 0xFF)
+ // Note: vMatrixCoord_0_1_Stage0 is the texture sampler coordinates.
+ fragBuilder->codeAppendf("worldCor = vec3(vMatrixCoord_0_1_Stage0 * "
+ "vec2(%s, %s), %s.b * 255);",
+ widthUniName, heightUniName, povDepth.c_str());
+
+ // Applies the offset indexing that goes from our view space into the light's space.
+ for (int i = 0; i < shadowFP.fNumNonAmbLights; i++) {
+ SkString povCoord("povCoord");
+ povCoord.appendf("%d", i);
+
+ SkString offset("offset");
+ offset.appendf("%d", i);
+ fragBuilder->codeAppendf("vec2 %s;", offset.c_str());
+
+ if (shadowFP.fIsPointLight[i]) {
+ fragBuilder->codeAppendf("vec3 fragToLight%d = %s - worldCor;",
+ i, lightDirOrPosUniName[i]);
+ fragBuilder->codeAppendf("float dist%d = length(fragToLight%d);",
+ i, i);
+ fragBuilder->codeAppendf("%s = vec2(-fragToLight%d) * povDepth.b;",
+ offset.c_str(), i);
+ fragBuilder->codeAppendf("fragToLight%d = normalize(fragToLight%d);",
+ i, i);
+ }
+
+ if (shadowFP.fIsRadialLight[i]) {
+ fragBuilder->codeAppendf("vec2 %s = vec2(vMatrixCoord_0_1_Stage0.x, "
+ "1 - vMatrixCoord_0_1_Stage0.y);\n",
+ povCoord.c_str());
+
+ fragBuilder->codeAppendf("%s = (%s) * 2.0 - 1.0 + (vec2(%s)/vec2(%s,%s) - 0.5)"
+ "* vec2(-2.0, 2.0);\n",
+ povCoord.c_str(), povCoord.c_str(),
+ lightDirOrPosUniName[i],
+ widthUniName, heightUniName);
+
+ fragBuilder->codeAppendf("float theta = atan(%s.y, %s.x);",
+ povCoord.c_str(), povCoord.c_str());
+ fragBuilder->codeAppendf("float r = length(%s);", povCoord.c_str());
+
+ // map output of atan to [0, 1]
+ fragBuilder->codeAppendf("%s.x = (theta + 3.1415) / (2.0 * 3.1415);",
+ povCoord.c_str());
+ fragBuilder->codeAppendf("%s.y = 0.0;", povCoord.c_str());
+ } else {
+ // note that we flip the y-coord of the offset and then later add
+ // a value just to the y-coord of povCoord. This is to account for
+ // the shifted origins from switching from raster into GPU.
+ if (shadowFP.fIsPointLight[i]) {
+ // the 0.375s are precalculated transform values, given that the depth
+ // maps for pt lights are 4x the size (linearly) as diffuse maps.
+ // The vec2(0.375, -0.375) is used to transform us to
+ // the center of the map.
+ fragBuilder->codeAppendf("vec2 %s = ((vec2(%s, %s) *"
+ "vMatrixCoord_0_1_Stage0 +"
+ "vec2(0,%s - %s)"
+ "+ %s) / (vec2(%s, %s))) +"
+ "vec2(0.375, -0.375);",
+ povCoord.c_str(),
+ widthUniName, heightUniName,
+ depthMapHeightUniName[i], heightUniName,
+ offset.c_str(),
+ depthMapWidthUniName[i],
+ depthMapWidthUniName[i]);
+ } else {
+ fragBuilder->codeAppendf("%s = vec2(%s) * povDepth.b * "
+ "vec2(255.0, -255.0);",
+ offset.c_str(), lightDirOrPosUniName[i]);
+
+ fragBuilder->codeAppendf("vec2 %s = ((vec2(%s, %s) *"
+ "vMatrixCoord_0_1_Stage0 +"
+ "vec2(0,%s - %s)"
+ "+ %s) / vec2(%s, %s));",
+ povCoord.c_str(),
+ widthUniName, heightUniName,
+ depthMapHeightUniName[i], heightUniName,
+ offset.c_str(),
+ depthMapWidthUniName[i],
+ depthMapWidthUniName[i]);
+ }
+ }
+
+ fragBuilder->appendTextureLookup(&depthMaps[i], args.fTexSamplers[i],
+ povCoord.c_str(),
+ kVec2f_GrSLType);
+ }
+
+ // helper variables for calculating shadowing
+
+ // variance of depth at this fragment in the context of surrounding area
+ // (area size and weighting dependent on blur size and type)
+ fragBuilder->codeAppendf("float variance;");
+
+ // the difference in depth between the user POV and light POV.
+ fragBuilder->codeAppendf("float d;");
+
+ // add up light contributions from all lights to totalLightColor
+ for (int i = 0; i < numLights; i++) {
+ fragBuilder->codeAppendf("lightProbability = 1;");
+
+ if (shadowFP.fIsRadialLight[i]) {
+ fragBuilder->codeAppend("totalLightColor = vec3(0);");
+
+ fragBuilder->codeAppend("vec2 tc = vec2(povCoord0.x, 0.0);");
+ fragBuilder->codeAppend("float depth = texture(uTextureSampler0_Stage1,"
+ "povCoord0).b * 2.0;");
+
+ fragBuilder->codeAppendf("lightProbability = step(r, depth);");
+
+ // 2 is the maximum depth. If this is reached, probably we have
+ // not intersected anything. So values after this should be unshadowed.
+ fragBuilder->codeAppendf("if (%s.b != 0 || depth == 2) {"
+ "lightProbability = 1.0; }",
+ povDepth.c_str());
+ } else {
+ // 1/512 == .00195... is less than half a pixel; imperceptible
+ fragBuilder->codeAppendf("if (%s.b <= %s.b + .001953125) {",
+ povDepth.c_str(), depthMaps[i].c_str());
+ if (blurAlgorithm == SkShadowParams::kVariance_ShadowType) {
+ // We mess with depth and depth^2 in their given scales.
+ // (i.e. between 0 and 1)
+ fragBuilder->codeAppendf("vec2 moments%d = vec2(%s.b, %s.g);",
+ i, depthMaps[i].c_str(), depthMaps[i].c_str());
+
+ // variance biasing lessens light bleeding
+ fragBuilder->codeAppendf("variance = max(moments%d.y - "
+ "(moments%d.x * moments%d.x),"
+ "%s);", i, i, i,
+ minVarianceUniName);
+
+ fragBuilder->codeAppendf("d = (%s.b) - moments%d.x;",
+ povDepth.c_str(), i);
+ fragBuilder->codeAppendf("lightProbability = "
+ "(variance / (variance + d * d));");
+
+ SkString clamp("clamp");
+ clamp.appendf("%d", i);
+
+ // choosing between light artifacts or correct shape shadows
+ // linstep
+ fragBuilder->codeAppendf("float %s = clamp((lightProbability - %s) /"
+ "(1 - %s), 0, 1);",
+ clamp.c_str(), shBiasUniName, shBiasUniName);
+
+ fragBuilder->codeAppendf("lightProbability = %s;", clamp.c_str());
+ } else {
+ fragBuilder->codeAppendf("if (%s.b >= %s.b) {",
+ povDepth.c_str(), depthMaps[i].c_str());
+ fragBuilder->codeAppendf("lightProbability = 1;");
+ fragBuilder->codeAppendf("} else { lightProbability = 0; }");
+ }
+
+ // VSM: The curved shadows near plane edges are artifacts from blurring
+ // lightDir.z is equal to the lightDir dot the surface normal.
+ fragBuilder->codeAppendf("}");
+ }
+
+ if (shadowFP.isPointLight(i)) {
+ fragBuilder->codeAppendf("totalLightColor += max(fragToLight%d.z, 0) * %s /"
+ "(1 + dist%d) * lightProbability;",
+ i, lightColorUniName[i], i);
+ } else {
+ fragBuilder->codeAppendf("totalLightColor += %s.z * %s * lightProbability;",
+ lightDirOrPosUniName[i],
+ lightColorUniName[i]);
+ }
+
+ fragBuilder->codeAppendf("totalLightColor += %s;", ambientColorUniName);
+ fragBuilder->codeAppendf("%s = resultDiffuseColor * vec4(totalLightColor, 1);",
+ args.fOutputColor);
+ }
+
+ }
+
+ static void GenKey(const GrProcessor& proc, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const ShadowFP& shadowFP = proc.cast<ShadowFP>();
+ b->add32(shadowFP.fNumNonAmbLights);
+ int isPLR = 0;
+ for (int i = 0; i < SkShadowShader::kMaxNonAmbientLights; i++) {
+ isPLR = isPLR | ((shadowFP.fIsPointLight[i] ? 1 : 0) << i);
+ isPLR = isPLR | ((shadowFP.fIsRadialLight[i] ? 1 : 0) << (i+4));
+ }
+ b->add32(isPLR);
+ b->add32(shadowFP.fShadowParams.fType);
+ }
+
+ protected:
+ void onSetData(const GrGLSLProgramDataManager& pdman, const GrProcessor& proc) override {
+ const ShadowFP &shadowFP = proc.cast<ShadowFP>();
+
+ for (int i = 0; i < shadowFP.numLights(); i++) {
+ const SkVector3& lightDirOrPos = shadowFP.lightDirOrPos(i);
+ if (lightDirOrPos != fLightDirOrPos[i]) {
+ pdman.set3fv(fLightDirOrPosUni[i], 1, &lightDirOrPos.fX);
+ fLightDirOrPos[i] = lightDirOrPos;
+ }
+
+ const SkColor3f& lightColor = shadowFP.lightColor(i);
+ if (lightColor != fLightColor[i]) {
+ pdman.set3fv(fLightColorUni[i], 1, &lightColor.fX);
+ fLightColor[i] = lightColor;
+ }
+
+ int depthMapWidth = shadowFP.depthMapWidth(i);
+ if (depthMapWidth != fDepthMapWidth[i]) {
+ pdman.set1i(fDepthMapWidthUni[i], depthMapWidth);
+ fDepthMapWidth[i] = depthMapWidth;
+ }
+ int depthMapHeight = shadowFP.depthMapHeight(i);
+ if (depthMapHeight != fDepthMapHeight[i]) {
+ pdman.set1i(fDepthMapHeightUni[i], depthMapHeight);
+ fDepthMapHeight[i] = depthMapHeight;
+ }
+ }
+
+ SkScalar biasingConstant = shadowFP.shadowParams().fBiasingConstant;
+ if (biasingConstant != fBiasingConstant) {
+ pdman.set1f(fBiasingConstantUni, biasingConstant);
+ fBiasingConstant = biasingConstant;
+ }
+
+ SkScalar minVariance = shadowFP.shadowParams().fMinVariance;
+ if (minVariance != fMinVariance) {
+ // transform variance from pixel-scale to normalized scale
+ pdman.set1f(fMinVarianceUni, minVariance / 65536.0f);
+ fMinVariance = minVariance / 65536.0f;
+ }
+
+ int width = shadowFP.width();
+ if (width != fWidth) {
+ pdman.set1i(fWidthUni, width);
+ fWidth = width;
+ }
+ int height = shadowFP.height();
+ if (height != fHeight) {
+ pdman.set1i(fHeightUni, height);
+ fHeight = height;
+ }
+
+ const SkColor3f& ambientColor = shadowFP.ambientColor();
+ if (ambientColor != fAmbientColor) {
+ pdman.set3fv(fAmbientColorUni, 1, &ambientColor.fX);
+ fAmbientColor = ambientColor;
+ }
+ }
+
+ private:
+ SkVector3 fLightDirOrPos[SkShadowShader::kMaxNonAmbientLights];
+ GrGLSLProgramDataManager::UniformHandle
+ fLightDirOrPosUni[SkShadowShader::kMaxNonAmbientLights];
+
+ SkColor3f fLightColor[SkShadowShader::kMaxNonAmbientLights];
+ GrGLSLProgramDataManager::UniformHandle
+ fLightColorUni[SkShadowShader::kMaxNonAmbientLights];
+
+ int fDepthMapWidth[SkShadowShader::kMaxNonAmbientLights];
+ GrGLSLProgramDataManager::UniformHandle
+ fDepthMapWidthUni[SkShadowShader::kMaxNonAmbientLights];
+
+ int fDepthMapHeight[SkShadowShader::kMaxNonAmbientLights];
+ GrGLSLProgramDataManager::UniformHandle
+ fDepthMapHeightUni[SkShadowShader::kMaxNonAmbientLights];
+
+ int fWidth;
+ GrGLSLProgramDataManager::UniformHandle fWidthUni;
+ int fHeight;
+ GrGLSLProgramDataManager::UniformHandle fHeightUni;
+
+ SkScalar fBiasingConstant;
+ GrGLSLProgramDataManager::UniformHandle fBiasingConstantUni;
+ SkScalar fMinVariance;
+ GrGLSLProgramDataManager::UniformHandle fMinVarianceUni;
+
+ SkColor3f fAmbientColor;
+ GrGLSLProgramDataManager::UniformHandle fAmbientColorUni;
+ };
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLShadowFP::GenKey(*this, caps, b);
+ }
+
+ const char* name() const override { return "shadowFP"; }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ inout->mulByUnknownFourComponents();
+ }
+ int32_t numLights() const { return fNumNonAmbLights; }
+ const SkColor3f& ambientColor() const { return fAmbientColor; }
+ bool isPointLight(int i) const {
+ SkASSERT(i < fNumNonAmbLights);
+ return fIsPointLight[i];
+ }
+ bool isRadialLight(int i) const {
+ SkASSERT(i < fNumNonAmbLights);
+ return fIsRadialLight[i];
+ }
+ const SkVector3& lightDirOrPos(int i) const {
+ SkASSERT(i < fNumNonAmbLights);
+ return fLightDirOrPos[i];
+ }
+ const SkVector3& lightColor(int i) const {
+ SkASSERT(i < fNumNonAmbLights);
+ return fLightColor[i];
+ }
+ int depthMapWidth(int i) const {
+ SkASSERT(i < fNumNonAmbLights);
+ return fDepthMapWidth[i];
+ }
+ int depthMapHeight(int i) const {
+ SkASSERT(i < fNumNonAmbLights);
+ return fDepthMapHeight[i];
+ }
+ int width() const {return fWidth; }
+ int height() const {return fHeight; }
+
+ const SkShadowParams& shadowParams() const {return fShadowParams; }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override { return new GLSLShadowFP; }
+
+ bool onIsEqual(const GrFragmentProcessor& proc) const override {
+ const ShadowFP& shadowFP = proc.cast<ShadowFP>();
+ if (fAmbientColor != shadowFP.fAmbientColor ||
+ fNumNonAmbLights != shadowFP.fNumNonAmbLights) {
+ return false;
+ }
+
+ if (fWidth != shadowFP.fWidth || fHeight != shadowFP.fHeight) {
+ return false;
+ }
+
+ for (int i = 0; i < fNumNonAmbLights; i++) {
+ if (fLightDirOrPos[i] != shadowFP.fLightDirOrPos[i] ||
+ fLightColor[i] != shadowFP.fLightColor[i] ||
+ fIsPointLight[i] != shadowFP.fIsPointLight[i] ||
+ fIsRadialLight[i] != shadowFP.fIsRadialLight[i]) {
+ return false;
+ }
+
+ if (fDepthMapWidth[i] != shadowFP.fDepthMapWidth[i] ||
+ fDepthMapHeight[i] != shadowFP.fDepthMapHeight[i]) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ int fNumNonAmbLights;
+
+ bool fIsPointLight[SkShadowShader::kMaxNonAmbientLights];
+ bool fIsRadialLight[SkShadowShader::kMaxNonAmbientLights];
+ SkVector3 fLightDirOrPos[SkShadowShader::kMaxNonAmbientLights];
+ SkColor3f fLightColor[SkShadowShader::kMaxNonAmbientLights];
+ GrTextureAccess fDepthMapAccess[SkShadowShader::kMaxNonAmbientLights];
+ sk_sp<GrTexture> fTexture[SkShadowShader::kMaxNonAmbientLights];
+
+ int fDepthMapWidth[SkShadowShader::kMaxNonAmbientLights];
+ int fDepthMapHeight[SkShadowShader::kMaxNonAmbientLights];
+
+ int fHeight;
+ int fWidth;
+
+ SkShadowParams fShadowParams;
+
+ SkColor3f fAmbientColor;
+};
+
+////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> SkShadowShaderImpl::asFragmentProcessor(const AsFPArgs& fpargs) const {
+
+ sk_sp<GrFragmentProcessor> povDepthFP = fPovDepthShader->asFragmentProcessor(fpargs);
+
+ sk_sp<GrFragmentProcessor> diffuseFP = fDiffuseShader->asFragmentProcessor(fpargs);
+
+ sk_sp<GrFragmentProcessor> shadowfp = sk_make_sp<ShadowFP>(std::move(povDepthFP),
+ std::move(diffuseFP),
+ std::move(fLights),
+ fDiffuseWidth, fDiffuseHeight,
+ fShadowParams, fpargs.fContext);
+ return shadowfp;
+}
+
+
+#endif
+
+////////////////////////////////////////////////////////////////////////////
+
+bool SkShadowShaderImpl::isOpaque() const {
+ return fDiffuseShader->isOpaque();
+}
+
+SkShadowShaderImpl::ShadowShaderContext::ShadowShaderContext(
+ const SkShadowShaderImpl& shader, const ContextRec& rec,
+ SkShader::Context* povDepthContext,
+ SkShader::Context* diffuseContext,
+ void* heapAllocated)
+ : INHERITED(shader, rec)
+ , fPovDepthContext(povDepthContext)
+ , fDiffuseContext(diffuseContext)
+ , fHeapAllocated(heapAllocated) {
+ bool isOpaque = shader.isOpaque();
+
+ // update fFlags
+ uint32_t flags = 0;
+ if (isOpaque && (255 == this->getPaintAlpha())) {
+ flags |= kOpaqueAlpha_Flag;
+ }
+
+ fFlags = flags;
+
+ const SkShadowShaderImpl& lightShader = static_cast<const SkShadowShaderImpl&>(fShader);
+
+ fNonAmbLightCnt = lightShader.fLights->numLights();
+ fShadowMapPixels = new SkPixmap[fNonAmbLightCnt];
+
+ for (int i = 0; i < fNonAmbLightCnt; i++) {
+ if (lightShader.fLights->light(i).type() == SkLights::Light::kDirectional_LightType) {
+ lightShader.fLights->light(i).getShadowMap()->
+ peekPixels(&fShadowMapPixels[i]);
+ }
+ }
+}
+
+SkShadowShaderImpl::ShadowShaderContext::~ShadowShaderContext() {
+ delete[] fShadowMapPixels;
+
+ // The dependencies have been created outside of the context on memory that was allocated by
+ // the onCreateContext() method. Call the destructors and free the memory.
+ fPovDepthContext->~Context();
+ fDiffuseContext->~Context();
+
+ sk_free(fHeapAllocated);
+}
+
+static inline SkPMColor convert(SkColor3f color, U8CPU a) {
+ if (color.fX <= 0.0f) {
+ color.fX = 0.0f;
+ } else if (color.fX >= 255.0f) {
+ color.fX = 255.0f;
+ }
+
+ if (color.fY <= 0.0f) {
+ color.fY = 0.0f;
+ } else if (color.fY >= 255.0f) {
+ color.fY = 255.0f;
+ }
+
+ if (color.fZ <= 0.0f) {
+ color.fZ = 0.0f;
+ } else if (color.fZ >= 255.0f) {
+ color.fZ = 255.0f;
+ }
+
+ return SkPreMultiplyARGB(a, (int) color.fX, (int) color.fY, (int) color.fZ);
+}
+
+// larger is better (fewer times we have to loop), but we shouldn't
+// take up too much stack-space (each one here costs 16 bytes)
+#define BUFFER_MAX 16
+void SkShadowShaderImpl::ShadowShaderContext::shadeSpan(int x, int y,
+ SkPMColor result[], int count) {
+ const SkShadowShaderImpl& lightShader = static_cast<const SkShadowShaderImpl&>(fShader);
+
+ SkPMColor diffuse[BUFFER_MAX];
+ SkPMColor povDepth[BUFFER_MAX];
+
+ do {
+ int n = SkTMin(count, BUFFER_MAX);
+
+ fDiffuseContext->shadeSpan(x, y, diffuse, n);
+ fPovDepthContext->shadeSpan(x, y, povDepth, n);
+
+ for (int i = 0; i < n; ++i) {
+ SkColor diffColor = SkUnPreMultiply::PMColorToColor(diffuse[i]);
+ SkColor povDepthColor = povDepth[i];
+
+ SkColor3f totalLight = lightShader.fLights->ambientLightColor();
+ // This is all done in linear unpremul color space (each component 0..255.0f though)
+
+ for (int l = 0; l < lightShader.fLights->numLights(); ++l) {
+ const SkLights::Light& light = lightShader.fLights->light(l);
+
+ int pvDepth = SkColorGetB(povDepthColor); // depth stored in blue channel
+
+ if (light.type() == SkLights::Light::kDirectional_LightType) {
+
+ int xOffset = SkScalarRoundToInt(light.dir().fX * pvDepth);
+ int yOffset = SkScalarRoundToInt(light.dir().fY * pvDepth);
+
+ int shX = SkClampMax(x + i + xOffset, light.getShadowMap()->width() - 1);
+ int shY = SkClampMax(y + yOffset, light.getShadowMap()->height() - 1);
+
+ int shDepth = 0;
+ int shDepthsq = 0;
+
+ // pixmaps that point to things have nonzero heights
+ if (fShadowMapPixels[l].height() > 0) {
+ uint32_t pix = *fShadowMapPixels[l].addr32(shX, shY);
+ SkColor shColor(pix);
+
+ shDepth = SkColorGetB(shColor);
+ shDepthsq = SkColorGetG(shColor) * 256;
+ } else {
+ // Make lights w/o a shadow map receive the full light contribution
+ shDepth = pvDepth;
+ }
+
+ SkScalar lightProb = 1.0f;
+ if (pvDepth < shDepth) {
+ if (lightShader.fShadowParams.fType ==
+ SkShadowParams::ShadowType::kVariance_ShadowType) {
+ int variance = SkMaxScalar(shDepthsq - shDepth * shDepth,
+ lightShader.fShadowParams.fMinVariance);
+ int d = pvDepth - shDepth;
+
+ lightProb = (SkScalar) variance / ((SkScalar) (variance + d * d));
+
+ SkScalar bias = lightShader.fShadowParams.fBiasingConstant;
+
+ lightProb = SkMaxScalar((lightProb - bias) / (1.0f - bias), 0.0f);
+ } else {
+ lightProb = 0.0f;
+ }
+ }
+
+ // assume object normals are pointing straight up
+ totalLight.fX += light.dir().fZ * light.color().fX * lightProb;
+ totalLight.fY += light.dir().fZ * light.color().fY * lightProb;
+ totalLight.fZ += light.dir().fZ * light.color().fZ * lightProb;
+
+ } else {
+ // right now we only expect directional and point light types.
+ SkASSERT(light.type() == SkLights::Light::kPoint_LightType);
+
+ int height = lightShader.fDiffuseHeight;
+
+ SkVector3 fragToLight = SkVector3::Make(light.pos().fX - x - i,
+ light.pos().fY - (height - y),
+ light.pos().fZ - pvDepth);
+
+ SkScalar dist = fragToLight.length();
+ SkScalar normalizedZ = fragToLight.fZ / dist;
+
+ SkScalar distAttenuation = light.intensity() / (1.0f + dist);
+
+ // assume object normals are pointing straight up
+ totalLight.fX += normalizedZ * light.color().fX * distAttenuation;
+ totalLight.fY += normalizedZ * light.color().fY * distAttenuation;
+ totalLight.fZ += normalizedZ * light.color().fZ * distAttenuation;
+ }
+ }
+
+ SkColor3f totalColor = SkColor3f::Make(SkColorGetR(diffColor) * totalLight.fX,
+ SkColorGetG(diffColor) * totalLight.fY,
+ SkColorGetB(diffColor) * totalLight.fZ);
+
+ result[i] = convert(totalColor, SkColorGetA(diffColor));
+ }
+
+ result += n;
+ x += n;
+ count -= n;
+ } while (count > 0);
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+#ifndef SK_IGNORE_TO_STRING
+void SkShadowShaderImpl::toString(SkString* str) const {
+ str->appendf("ShadowShader: ()");
+}
+#endif
+
+sk_sp<SkFlattenable> SkShadowShaderImpl::CreateProc(SkReadBuffer& buf) {
+
+ // Discarding SkShader flattenable params
+ bool hasLocalMatrix = buf.readBool();
+ SkAssertResult(!hasLocalMatrix);
+
+ sk_sp<SkLights> lights = SkLights::MakeFromBuffer(buf);
+
+ SkShadowParams params;
+ params.fMinVariance = buf.readScalar();
+ params.fBiasingConstant = buf.readScalar();
+ params.fType = (SkShadowParams::ShadowType) buf.readInt();
+ params.fShadowRadius = buf.readScalar();
+
+ int diffuseWidth = buf.readInt();
+ int diffuseHeight = buf.readInt();
+
+ sk_sp<SkShader> povDepthShader(buf.readFlattenable<SkShader>());
+ sk_sp<SkShader> diffuseShader(buf.readFlattenable<SkShader>());
+
+ return sk_make_sp<SkShadowShaderImpl>(std::move(povDepthShader),
+ std::move(diffuseShader),
+ std::move(lights),
+ diffuseWidth, diffuseHeight,
+ params);
+}
+
+void SkShadowShaderImpl::flatten(SkWriteBuffer& buf) const {
+ this->INHERITED::flatten(buf);
+
+ fLights->flatten(buf);
+
+ buf.writeScalar(fShadowParams.fMinVariance);
+ buf.writeScalar(fShadowParams.fBiasingConstant);
+ buf.writeInt(fShadowParams.fType);
+ buf.writeScalar(fShadowParams.fShadowRadius);
+
+ buf.writeInt(fDiffuseWidth);
+ buf.writeInt(fDiffuseHeight);
+
+ buf.writeFlattenable(fPovDepthShader.get());
+ buf.writeFlattenable(fDiffuseShader.get());
+}
+
+size_t SkShadowShaderImpl::onContextSize(const ContextRec& rec) const {
+ return sizeof(ShadowShaderContext);
+}
+
+SkShader::Context* SkShadowShaderImpl::onCreateContext(const ContextRec& rec,
+ void* storage) const {
+ size_t heapRequired = fPovDepthShader->contextSize(rec) +
+ fDiffuseShader->contextSize(rec);
+
+ void* heapAllocated = sk_malloc_throw(heapRequired);
+
+ void* povDepthContextStorage = heapAllocated;
+
+ SkShader::Context* povDepthContext =
+ fPovDepthShader->createContext(rec, povDepthContextStorage);
+
+ if (!povDepthContext) {
+ sk_free(heapAllocated);
+ return nullptr;
+ }
+
+ void* diffuseContextStorage = (char*)heapAllocated + fPovDepthShader->contextSize(rec);
+
+ SkShader::Context* diffuseContext = fDiffuseShader->createContext(rec, diffuseContextStorage);
+ if (!diffuseContext) {
+ sk_free(heapAllocated);
+ return nullptr;
+ }
+
+ return new (storage) ShadowShaderContext(*this, rec, povDepthContext, diffuseContext,
+ heapAllocated);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkShader> SkShadowShader::Make(sk_sp<SkShader> povDepthShader,
+ sk_sp<SkShader> diffuseShader,
+ sk_sp<SkLights> lights,
+ int diffuseWidth, int diffuseHeight,
+ const SkShadowParams& params) {
+ if (!povDepthShader || !diffuseShader) {
+ // TODO: Use paint's color in absence of a diffuseShader
+ // TODO: Use a default implementation of normalSource instead
+ return nullptr;
+ }
+
+ return sk_make_sp<SkShadowShaderImpl>(std::move(povDepthShader),
+ std::move(diffuseShader),
+ std::move(lights),
+ diffuseWidth, diffuseHeight,
+ params);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkShadowShader)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkShadowShaderImpl)
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END
+
+///////////////////////////////////////////////////////////////////////////////
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkShadowShader.h b/gfx/skia/skia/src/core/SkShadowShader.h
new file mode 100644
index 000000000..ea05ccae8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkShadowShader.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkShadowShader_DEFINED
+#define SkShadowShader_DEFINED
+
+#ifdef SK_EXPERIMENTAL_SHADOWING
+
+class SkLights;
+class SkShader;
+
+class SK_API SkShadowShader {
+public:
+ /** This shader combines the diffuse color in 'diffuseShader' with the shadows
+ * determined by the 'povDepthShader' and the shadow maps stored in each of the
+ * lights in 'lights'
+ *
+ * Please note that the shadow shader is required to be in Stage0, otherwise
+ * the texture coords will be wrong within the shader.
+ */
+ static sk_sp<SkShader> Make(sk_sp<SkShader> povDepthShader,
+ sk_sp<SkShader> diffuseShader,
+ sk_sp<SkLights> lights,
+ int diffuseWidth, int diffuseHeight,
+ const SkShadowParams& params);
+
+ // The shadow shader supports any number of ambient lights, but only
+ // 4 non-ambient lights (currently just refers to directional lights).
+ static constexpr int kMaxNonAmbientLights = 4;
+
+ SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP()
+};
+
+#endif
+#endif
diff --git a/gfx/skia/skia/src/core/SkSharedMutex.cpp b/gfx/skia/skia/src/core/SkSharedMutex.cpp
new file mode 100644
index 000000000..17714a718
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSharedMutex.cpp
@@ -0,0 +1,354 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkSharedMutex.h"
+
+#include "SkAtomics.h"
+#include "SkTypes.h"
+#include "SkSemaphore.h"
+
+#if !defined(__has_feature)
+ #define __has_feature(x) 0
+#endif
+
+#if __has_feature(thread_sanitizer)
+
+ /* Report that a lock has been created at address "lock". */
+ #define ANNOTATE_RWLOCK_CREATE(lock) \
+ AnnotateRWLockCreate(__FILE__, __LINE__, lock)
+
+ /* Report that the lock at address "lock" is about to be destroyed. */
+ #define ANNOTATE_RWLOCK_DESTROY(lock) \
+ AnnotateRWLockDestroy(__FILE__, __LINE__, lock)
+
+ /* Report that the lock at address "lock" has been acquired.
+ is_w=1 for writer lock, is_w=0 for reader lock. */
+ #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
+ AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w)
+
+ /* Report that the lock at address "lock" is about to be released. */
+ #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
+ AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w)
+
+ #if defined(DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK)
+ #if defined(__GNUC__)
+ #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak))
+ #else
+ /* TODO(glider): for Windows support we may want to change this macro in order
+ to prepend __declspec(selectany) to the annotations' declarations. */
+ #error weak annotations are not supported for your compiler
+ #endif
+ #else
+ #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK
+ #endif
+
+ extern "C" {
+ void AnnotateRWLockCreate(
+ const char *file, int line,
+ const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+ void AnnotateRWLockDestroy(
+ const char *file, int line,
+ const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+ void AnnotateRWLockAcquired(
+ const char *file, int line,
+ const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+ void AnnotateRWLockReleased(
+ const char *file, int line,
+ const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+ }
+
+#else
+
+ #define ANNOTATE_RWLOCK_CREATE(lock)
+ #define ANNOTATE_RWLOCK_DESTROY(lock)
+ #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)
+ #define ANNOTATE_RWLOCK_RELEASED(lock, is_w)
+
+#endif
+
+#ifdef SK_DEBUG
+
+ #include "SkThreadID.h"
+ #include "SkTDArray.h"
+
+ class SkSharedMutex::ThreadIDSet {
+ public:
+ // Returns true if threadID is in the set.
+ bool find(SkThreadID threadID) const {
+ for (auto& t : fThreadIDs) {
+ if (t == threadID) return true;
+ }
+ return false;
+ }
+
+ // Returns true if did not already exist.
+ bool tryAdd(SkThreadID threadID) {
+ for (auto& t : fThreadIDs) {
+ if (t == threadID) return false;
+ }
+ fThreadIDs.append(1, &threadID);
+ return true;
+ }
+ // Returns true if already exists in Set.
+ bool tryRemove(SkThreadID threadID) {
+ for (int i = 0; i < fThreadIDs.count(); ++i) {
+ if (fThreadIDs[i] == threadID) {
+ fThreadIDs.remove(i);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void swap(ThreadIDSet& other) {
+ fThreadIDs.swap(other.fThreadIDs);
+ }
+
+ int count() const {
+ return fThreadIDs.count();
+ }
+
+ private:
+ SkTDArray<SkThreadID> fThreadIDs;
+ };
+
+ SkSharedMutex::SkSharedMutex()
+ : fCurrentShared(new ThreadIDSet)
+ , fWaitingExclusive(new ThreadIDSet)
+ , fWaitingShared(new ThreadIDSet){
+ ANNOTATE_RWLOCK_CREATE(this);
+ }
+
+ SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); }
+
+ void SkSharedMutex::acquire() {
+ SkThreadID threadID(SkGetThreadID());
+ int currentSharedCount;
+ int waitingExclusiveCount;
+ {
+ SkAutoMutexAcquire l(&fMu);
+
+ if (!fWaitingExclusive->tryAdd(threadID)) {
+ SkDEBUGFAILF("Thread %lx already has an exclusive lock\n", threadID);
+ }
+
+ currentSharedCount = fCurrentShared->count();
+ waitingExclusiveCount = fWaitingExclusive->count();
+ }
+
+ if (currentSharedCount > 0 || waitingExclusiveCount > 1) {
+ fExclusiveQueue.wait();
+ }
+
+ ANNOTATE_RWLOCK_ACQUIRED(this, 1);
+ }
+
+ // Implementation Detail:
+ // The shared threads need two seperate queues to keep the threads that were added after the
+ // exclusive lock separate from the threads added before.
+ void SkSharedMutex::release() {
+ ANNOTATE_RWLOCK_RELEASED(this, 1);
+ SkThreadID threadID(SkGetThreadID());
+ int sharedWaitingCount;
+ int exclusiveWaitingCount;
+ int sharedQueueSelect;
+ {
+ SkAutoMutexAcquire l(&fMu);
+ SkASSERT(0 == fCurrentShared->count());
+ if (!fWaitingExclusive->tryRemove(threadID)) {
+ SkDEBUGFAILF("Thread %lx did not have the lock held.\n", threadID);
+ }
+ exclusiveWaitingCount = fWaitingExclusive->count();
+ sharedWaitingCount = fWaitingShared->count();
+ fWaitingShared.swap(fCurrentShared);
+ sharedQueueSelect = fSharedQueueSelect;
+ if (sharedWaitingCount > 0) {
+ fSharedQueueSelect = 1 - fSharedQueueSelect;
+ }
+ }
+
+ if (sharedWaitingCount > 0) {
+ fSharedQueue[sharedQueueSelect].signal(sharedWaitingCount);
+ } else if (exclusiveWaitingCount > 0) {
+ fExclusiveQueue.signal();
+ }
+ }
+
+ void SkSharedMutex::assertHeld() const {
+ SkThreadID threadID(SkGetThreadID());
+ SkAutoMutexAcquire l(&fMu);
+ SkASSERT(0 == fCurrentShared->count());
+ SkASSERT(fWaitingExclusive->find(threadID));
+ }
+
+ void SkSharedMutex::acquireShared() {
+ SkThreadID threadID(SkGetThreadID());
+ int exclusiveWaitingCount;
+ int sharedQueueSelect;
+ {
+ SkAutoMutexAcquire l(&fMu);
+ exclusiveWaitingCount = fWaitingExclusive->count();
+ if (exclusiveWaitingCount > 0) {
+ if (!fWaitingShared->tryAdd(threadID)) {
+ SkDEBUGFAILF("Thread %lx was already waiting!\n", threadID);
+ }
+ } else {
+ if (!fCurrentShared->tryAdd(threadID)) {
+ SkDEBUGFAILF("Thread %lx already holds a shared lock!\n", threadID);
+ }
+ }
+ sharedQueueSelect = fSharedQueueSelect;
+ }
+
+ if (exclusiveWaitingCount > 0) {
+ fSharedQueue[sharedQueueSelect].wait();
+ }
+
+ ANNOTATE_RWLOCK_ACQUIRED(this, 0);
+ }
+
+ void SkSharedMutex::releaseShared() {
+ ANNOTATE_RWLOCK_RELEASED(this, 0);
+ SkThreadID threadID(SkGetThreadID());
+
+ int currentSharedCount;
+ int waitingExclusiveCount;
+ {
+ SkAutoMutexAcquire l(&fMu);
+ if (!fCurrentShared->tryRemove(threadID)) {
+ SkDEBUGFAILF("Thread %lx does not hold a shared lock.\n", threadID);
+ }
+ currentSharedCount = fCurrentShared->count();
+ waitingExclusiveCount = fWaitingExclusive->count();
+ }
+
+ if (0 == currentSharedCount && waitingExclusiveCount > 0) {
+ fExclusiveQueue.signal();
+ }
+ }
+
+ void SkSharedMutex::assertHeldShared() const {
+ SkThreadID threadID(SkGetThreadID());
+ SkAutoMutexAcquire l(&fMu);
+ SkASSERT(fCurrentShared->find(threadID));
+ }
+
+#else
+
+ // The fQueueCounts fields holds many counts in an int32_t in order to make managing them atomic.
+ // These three counts must be the same size, so each gets 10 bits. The 10 bits represent
+ // the log of the count which is 1024.
+ //
+ // The three counts held in fQueueCounts are:
+ // * Shared - the number of shared lock holders currently running.
+ // * WaitingExclusive - the number of threads waiting for an exclusive lock.
+ // * WaitingShared - the number of threads waiting to run while waiting for an exclusive thread
+ // to finish.
+ static const int kLogThreadCount = 10;
+
+ enum {
+ kSharedOffset = (0 * kLogThreadCount),
+ kWaitingExlusiveOffset = (1 * kLogThreadCount),
+ kWaitingSharedOffset = (2 * kLogThreadCount),
+ kSharedMask = ((1 << kLogThreadCount) - 1) << kSharedOffset,
+ kWaitingExclusiveMask = ((1 << kLogThreadCount) - 1) << kWaitingExlusiveOffset,
+ kWaitingSharedMask = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffset,
+ };
+
+ SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { ANNOTATE_RWLOCK_CREATE(this); }
+ SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); }
+ void SkSharedMutex::acquire() {
+ // Increment the count of exclusive queue waiters.
+ int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset,
+ sk_memory_order_acquire);
+
+ // If there are no other exclusive waiters and no shared threads are running then run
+ // else wait.
+ if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kSharedMask) > 0) {
+ fExclusiveQueue.wait();
+ }
+ ANNOTATE_RWLOCK_ACQUIRED(this, 1);
+ }
+
+ void SkSharedMutex::release() {
+ ANNOTATE_RWLOCK_RELEASED(this, 1);
+
+ int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
+ int32_t waitingShared;
+ int32_t newQueueCounts;
+ do {
+ newQueueCounts = oldQueueCounts;
+
+ // Decrement exclusive waiters.
+ newQueueCounts -= 1 << kWaitingExlusiveOffset;
+
+ // The number of threads waiting to acquire a shared lock.
+ waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSharedOffset;
+
+ // If there are any move the counts of all the shared waiters to actual shared. They are
+ // going to run next.
+ if (waitingShared > 0) {
+
+ // Set waiting shared to zero.
+ newQueueCounts &= ~kWaitingSharedMask;
+
+ // Because this is the exclusive release, then there are zero readers. So, the bits
+ // for shared locks should be zero. Since those bits are zero, we can just |= in the
+ // waitingShared count instead of clearing with an &= and then |= the count.
+ newQueueCounts |= waitingShared << kSharedOffset;
+ }
+
+ } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts,
+ sk_memory_order_release, sk_memory_order_relaxed));
+
+ if (waitingShared > 0) {
+ // Run all the shared.
+ fSharedQueue.signal(waitingShared);
+ } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
+ // Run a single exclusive waiter.
+ fExclusiveQueue.signal();
+ }
+ }
+
+ void SkSharedMutex::acquireShared() {
+ int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
+ int32_t newQueueCounts;
+ do {
+ newQueueCounts = oldQueueCounts;
+ // If there are waiting exclusives then this shared lock waits else it runs.
+ if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
+ newQueueCounts += 1 << kWaitingSharedOffset;
+ } else {
+ newQueueCounts += 1 << kSharedOffset;
+ }
+ } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts,
+ sk_memory_order_acquire, sk_memory_order_relaxed));
+
+ // If there are waiting exclusives, then this shared waits until after it runs.
+ if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
+ fSharedQueue.wait();
+ }
+ ANNOTATE_RWLOCK_ACQUIRED(this, 0);
+
+ }
+
+ void SkSharedMutex::releaseShared() {
+ ANNOTATE_RWLOCK_RELEASED(this, 0);
+
+ // Decrement the shared count.
+ int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 << kSharedOffset,
+ sk_memory_order_release);
+
+ // If shared count is going to zero (because the old count == 1) and there are exclusive
+ // waiters, then run a single exclusive waiter.
+ if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1
+ && (oldQueueCounts & kWaitingExclusiveMask) > 0) {
+ fExclusiveQueue.signal();
+ }
+ }
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSharedMutex.h b/gfx/skia/skia/src/core/SkSharedMutex.h
new file mode 100644
index 000000000..21c9f46d6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSharedMutex.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSharedLock_DEFINED
+#define SkSharedLock_DEFINED
+
+#include "SkAtomics.h"
+#include "SkSemaphore.h"
+#include "SkTypes.h"
+
+#ifdef SK_DEBUG
+ #include "SkMutex.h"
+ #include <memory>
+#endif // SK_DEBUG
+
+// There are two shared lock implementations one debug the other is high performance. They implement
+// an interface similar to pthread's rwlocks.
+// This is a shared lock implementation similar to pthreads rwlocks. The high performance
+// implementation is cribbed from Preshing's article:
+// http://preshing.com/20150316/semaphores-are-surprisingly-versatile/
+//
+// This lock does not obey strict queue ordering. It will always alternate between readers and
+// a single writer.
+class SkSharedMutex {
+public:
+ SkSharedMutex();
+ ~SkSharedMutex();
+ // Acquire lock for exclusive use.
+ void acquire();
+
+ // Release lock for exclusive use.
+ void release();
+
+ // Fail if exclusive is not held.
+ void assertHeld() const;
+
+ // Acquire lock for shared use.
+ void acquireShared();
+
+ // Release lock for shared use.
+ void releaseShared();
+
+ // Fail if shared lock not held.
+ void assertHeldShared() const;
+
+private:
+#ifdef SK_DEBUG
+ class ThreadIDSet;
+ std::unique_ptr<ThreadIDSet> fCurrentShared;
+ std::unique_ptr<ThreadIDSet> fWaitingExclusive;
+ std::unique_ptr<ThreadIDSet> fWaitingShared;
+ int fSharedQueueSelect{0};
+ mutable SkMutex fMu;
+ SkSemaphore fSharedQueue[2];
+ SkSemaphore fExclusiveQueue;
+#else
+ SkAtomic<int32_t> fQueueCounts;
+ SkSemaphore fSharedQueue;
+ SkSemaphore fExclusiveQueue;
+#endif // SK_DEBUG
+};
+
+#ifndef SK_DEBUG
+inline void SkSharedMutex::assertHeld() const {};
+inline void SkSharedMutex::assertHeldShared() const {};
+#endif // SK_DEBUG
+
+class SkAutoSharedMutexShared {
+public:
+ SkAutoSharedMutexShared(SkSharedMutex& lock) : fLock(lock) { lock.acquireShared(); }
+ ~SkAutoSharedMutexShared() { fLock.releaseShared(); }
+private:
+ SkSharedMutex& fLock;
+};
+
+#define SkAutoSharedMutexShared(...) SK_REQUIRE_LOCAL_VAR(SkAutoSharedMutexShared)
+
+#endif // SkSharedLock_DEFINED
diff --git a/gfx/skia/skia/src/core/SkSinglyLinkedList.h b/gfx/skia/skia/src/core/SkSinglyLinkedList.h
new file mode 100644
index 000000000..b01120461
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSinglyLinkedList.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkSinglyLinkedList_DEFINED
+#define SkSinglyLinkedList_DEFINED
+
+#include <utility>
+
+#include "SkTypes.h"
+
+template <typename T> class SkSinglyLinkedList {
+ struct Node;
+public:
+ SkSinglyLinkedList() : fHead(nullptr), fTail(nullptr) {}
+ ~SkSinglyLinkedList() { this->reset(); }
+ void reset() {
+ SkASSERT(fHead != nullptr || nullptr == fTail);
+ // Use a while loop rather than recursion to avoid stack overflow.
+ Node* node = fHead;
+ while (node) {
+ Node* next = node->fNext;
+ SkASSERT(next != nullptr || node == fTail);
+ delete node;
+ node = next;
+ }
+ fHead = nullptr;
+ fTail = nullptr;
+ }
+ T* back() { return fTail ? &fTail->fData : nullptr; }
+ T* front() { return fHead ? &fHead->fData : nullptr; }
+ bool empty() const { return fHead == nullptr; }
+ #ifdef SK_DEBUG
+ int count() { // O(n), debug only.
+ int count = 0;
+ for (Node* node = fHead; node; node = node->fNext) {
+ ++count;
+ }
+ return count;
+ }
+ #endif
+ void pop_front() {
+ if (Node* node = fHead) {
+ fHead = node->fNext;
+ delete node;
+ if (fHead == nullptr) {
+ fTail = nullptr;
+ }
+ }
+ }
+ template <class... Args> T* emplace_front(Args&&... args) {
+ Node* n = new Node(std::forward<Args>(args)...);
+ n->fNext = fHead;
+ if (!fTail) {
+ fTail = n;
+ SkASSERT(!fHead);
+ }
+ fHead = n;
+ return &n->fData;
+ }
+ template <class... Args> T* emplace_back(Args&&... args) {
+ Node* n = new Node(std::forward<Args>(args)...);
+ if (fTail) {
+ fTail->fNext = n;
+ } else {
+ fHead = n;
+ }
+ fTail = n;
+ return &n->fData;
+ }
+ class ConstIter {
+ public:
+ void operator++() { fNode = fNode->fNext; }
+ const T& operator*() const { return fNode->fData; }
+ bool operator!=(const ConstIter& rhs) const { return fNode != rhs.fNode; }
+ ConstIter(const Node* n) : fNode(n) {}
+ private:
+ const Node* fNode;
+ };
+ ConstIter begin() const { return ConstIter(fHead); }
+ ConstIter end() const { return ConstIter(nullptr); }
+
+private:
+ struct Node {
+ T fData;
+ Node* fNext;
+ template <class... Args>
+ Node(Args&&... args) : fData(std::forward<Args>(args)...), fNext(nullptr) {}
+ };
+ Node* fHead;
+ Node* fTail;
+ SkSinglyLinkedList(const SkSinglyLinkedList<T>&) = delete;
+ SkSinglyLinkedList& operator=(const SkSinglyLinkedList<T>&) = delete;
+};
+#endif // SkSinglyLinkedList_DEFINED
diff --git a/gfx/skia/skia/src/core/SkSmallAllocator.h b/gfx/skia/skia/src/core/SkSmallAllocator.h
new file mode 100644
index 000000000..9095fa57f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSmallAllocator.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2014 Google, Inc
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSmallAllocator_DEFINED
+#define SkSmallAllocator_DEFINED
+
+#include "SkTDArray.h"
+#include "SkTypes.h"
+
+#include <new>
+
+/*
+ * Template class for allocating small objects without additional heap memory
+ * allocations. kMaxObjects is a hard limit on the number of objects that can
+ * be allocated using this class. After that, attempts to create more objects
+ * with this class will assert and return nullptr.
+ *
+ * kTotalBytes is the total number of bytes provided for storage for all
+ * objects created by this allocator. If an object to be created is larger
+ * than the storage (minus storage already used), it will be allocated on the
+ * heap. This class's destructor will handle calling the destructor for each
+ * object it allocated and freeing its memory.
+ *
+ * Current the class always aligns each allocation to 16-bytes to be safe, but future
+ * may reduce this to only the alignment that is required per alloc.
+ */
+template<uint32_t kMaxObjects, size_t kTotalBytes>
+class SkSmallAllocator : SkNoncopyable {
+public:
+ SkSmallAllocator()
+ : fStorageUsed(0)
+ , fNumObjects(0)
+ {}
+
+ ~SkSmallAllocator() {
+ // Destruct in reverse order, in case an earlier object points to a
+ // later object.
+ while (fNumObjects > 0) {
+ fNumObjects--;
+ Rec* rec = &fRecs[fNumObjects];
+ rec->fKillProc(rec->fObj);
+ // Safe to do if fObj is in fStorage, since fHeapStorage will
+ // point to nullptr.
+ sk_free(rec->fHeapStorage);
+ }
+ }
+
+ /*
+ * Create a new object of type T. Its lifetime will be handled by this
+ * SkSmallAllocator.
+ * Note: If kMaxObjects have been created by this SkSmallAllocator, nullptr
+ * will be returned.
+ */
+ template<typename T, typename... Args>
+ T* createT(const Args&... args) {
+ void* buf = this->reserveT<T>();
+ if (nullptr == buf) {
+ return nullptr;
+ }
+ return new (buf) T(args...);
+ }
+
+ /*
+ * Reserve a specified amount of space (must be enough space for one T).
+ * The space will be in fStorage if there is room, or on the heap otherwise.
+ * Either way, this class will call ~T() in its destructor and free the heap
+ * allocation if necessary.
+ * Unlike createT(), this method will not call the constructor of T.
+ */
+ template<typename T> void* reserveT(size_t storageRequired = sizeof(T)) {
+ SkASSERT(fNumObjects < kMaxObjects);
+ SkASSERT(storageRequired >= sizeof(T));
+ if (kMaxObjects == fNumObjects) {
+ return nullptr;
+ }
+ const size_t storageRemaining = sizeof(fStorage) - fStorageUsed;
+ Rec* rec = &fRecs[fNumObjects];
+ if (storageRequired > storageRemaining) {
+ // Allocate on the heap. Ideally we want to avoid this situation.
+
+ // With the gm composeshader_bitmap2, storage required is 4476
+ // and storage remaining is 3392. Increasing the base storage
+ // causes google 3 tests to fail.
+
+ rec->fStorageSize = 0;
+ rec->fHeapStorage = sk_malloc_throw(storageRequired);
+ rec->fObj = static_cast<void*>(rec->fHeapStorage);
+ } else {
+ // There is space in fStorage.
+ rec->fStorageSize = storageRequired;
+ rec->fHeapStorage = nullptr;
+ rec->fObj = static_cast<void*>(fStorage.fBytes + fStorageUsed);
+ fStorageUsed += storageRequired;
+ }
+ rec->fKillProc = DestroyT<T>;
+ fNumObjects++;
+ return rec->fObj;
+ }
+
+ /*
+ * Free the memory reserved last without calling the destructor.
+ * Can be used in a nested way, i.e. after reserving A and B, calling
+ * freeLast once will free B and calling it again will free A.
+ */
+ void freeLast() {
+ SkASSERT(fNumObjects > 0);
+ Rec* rec = &fRecs[fNumObjects - 1];
+ sk_free(rec->fHeapStorage);
+ fStorageUsed -= rec->fStorageSize;
+
+ fNumObjects--;
+ }
+
+private:
+ struct Rec {
+ size_t fStorageSize; // 0 if allocated on heap
+ void* fObj;
+ void* fHeapStorage;
+ void (*fKillProc)(void*);
+ };
+
+ // Used to call the destructor for allocated objects.
+ template<typename T>
+ static void DestroyT(void* ptr) {
+ static_cast<T*>(ptr)->~T();
+ }
+
+ struct SK_STRUCT_ALIGN(16) Storage {
+ // we add kMaxObjects * 15 to account for the worst-case slop, where each allocation wasted
+ // 15 bytes (due to forcing each to be 16-byte aligned)
+ char fBytes[kTotalBytes + kMaxObjects * 15];
+ };
+
+ Storage fStorage;
+ // Number of bytes used so far.
+ size_t fStorageUsed;
+ uint32_t fNumObjects;
+ Rec fRecs[kMaxObjects];
+};
+
+#endif // SkSmallAllocator_DEFINED
diff --git a/gfx/skia/skia/src/core/SkSpanProcs.cpp b/gfx/skia/skia/src/core/SkSpanProcs.cpp
new file mode 100644
index 000000000..32237d5f0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpanProcs.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkColorFilter.h"
+#include "SkHalf.h"
+#include "SkNx.h"
+#include "SkPaint.h"
+#include "SkPixmap.h"
+#include "SkPM4f.h"
+#include "SkPM4fPriv.h"
+#include "SkSpanProcs.h"
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static void load_l32(const SkPixmap& src, int x, int y, SkPM4f span[], int count) {
+ SkASSERT(count > 0);
+ const uint32_t* addr = src.addr32(x, y);
+ SkASSERT(src.addr32(x + count - 1, y));
+
+ for (int i = 0; i < count; ++i) {
+ swizzle_rb_if_bgra(Sk4f_fromL32(addr[i])).store(span[i].fVec);
+ }
+}
+
+static void load_s32(const SkPixmap& src, int x, int y, SkPM4f span[], int count) {
+ SkASSERT(count > 0);
+ const uint32_t* addr = src.addr32(x, y);
+ SkASSERT(src.addr32(x + count - 1, y));
+
+ for (int i = 0; i < count; ++i) {
+ swizzle_rb_if_bgra(Sk4f_fromS32(addr[i])).store(span[i].fVec);
+ }
+}
+
+static void load_f16(const SkPixmap& src, int x, int y, SkPM4f span[], int count) {
+ SkASSERT(count > 0);
+ const uint64_t* addr = src.addr64(x, y);
+ SkASSERT(src.addr64(x + count - 1, y));
+
+ for (int i = 0; i < count; ++i) {
+ SkHalfToFloat_finite_ftz(addr[i]).store(span[i].fVec);
+ }
+}
+
+SkLoadSpanProc SkLoadSpanProc_Choose(const SkImageInfo& info) {
+ switch (info.colorType()) {
+ case kN32_SkColorType:
+ return info.gammaCloseToSRGB() ? load_s32 : load_l32;
+ case kRGBA_F16_SkColorType:
+ return load_f16;
+ default:
+ return nullptr;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static void noop_filterspan(const SkPaint& paint, SkPM4f[], int) {
+ SkASSERT(!paint.getColorFilter());
+ SkASSERT(0xFF == paint.getAlpha());
+}
+
+static void alpha_filterspan(const SkPaint& paint, SkPM4f span[], int count) {
+ SkASSERT(!paint.getColorFilter());
+ SkASSERT(0xFF != paint.getAlpha());
+ const Sk4f scale = Sk4f(paint.getAlpha() * (1.0f/255));
+ for (int i = 0; i < count; ++i) {
+ (Sk4f::Load(span[i].fVec) * scale).store(span[i].fVec);
+ }
+}
+
+static void colorfilter_filterspan(const SkPaint& paint, SkPM4f span[], int count) {
+ SkASSERT(paint.getColorFilter());
+ SkASSERT(0xFF == paint.getAlpha());
+ paint.getColorFilter()->filterSpan4f(span, count, span);
+}
+
+static void colorfilter_alpha_filterspan(const SkPaint& paint, SkPM4f span[], int count) {
+ SkASSERT(paint.getColorFilter());
+ SkASSERT(0xFF != paint.getAlpha());
+ alpha_filterspan(paint, span, count);
+ paint.getColorFilter()->filterSpan4f(span, count, span);
+}
+
+SkFilterSpanProc SkFilterSpanProc_Choose(const SkPaint& paint) {
+ if (paint.getColorFilter()) {
+ return 0xFF == paint.getAlpha() ? colorfilter_filterspan : colorfilter_alpha_filterspan;
+ } else {
+ return 0xFF == paint.getAlpha() ? noop_filterspan : alpha_filterspan;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkSpanProcs.h b/gfx/skia/skia/src/core/SkSpanProcs.h
new file mode 100644
index 000000000..891f4e2d8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpanProcs.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSpanProcs_DEFINED
+#define SkSpanProcs_DEFINED
+
+#include "SkPM4f.h"
+
+struct SkImageInfo;
+class SkPaint;
+class SkPixmap;
+struct SkPM4f;
+
+typedef void (*SkLoadSpanProc)(const SkPixmap&, int x, int y, SkPM4f span[], int count);
+typedef void (*SkFilterSpanProc)(const SkPaint& paint, SkPM4f span[], int count);
+
+SkLoadSpanProc SkLoadSpanProc_Choose(const SkImageInfo&);
+SkFilterSpanProc SkFilterSpanProc_Choose(const SkPaint&);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSpecialImage.cpp b/gfx/skia/skia/src/core/SkSpecialImage.cpp
new file mode 100644
index 000000000..5d62c6bc6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpecialImage.cpp
@@ -0,0 +1,490 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file
+ */
+
+#include "SkSpecialImage.h"
+#include "SkBitmap.h"
+#include "SkImage.h"
+#include "SkBitmapCache.h"
+#include "SkCanvas.h"
+#include "SkImage_Base.h"
+#include "SkSpecialSurface.h"
+#include "SkSurfacePriv.h"
+#include "SkPixelRef.h"
+
+#if SK_SUPPORT_GPU
+#include "GrContext.h"
+#include "GrTexture.h"
+#include "GrTextureParams.h"
+#include "SkGr.h"
+#include "SkGrPriv.h"
+#endif
+
+// Currently the raster imagefilters can only handle certain imageinfos. Call this to know if
+// a given info is supported.
+static bool valid_for_imagefilters(const SkImageInfo& info) {
+ // no support for other swizzles/depths yet
+ return info.colorType() == kN32_SkColorType;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+class SkSpecialImage_Base : public SkSpecialImage {
+public:
+ SkSpecialImage_Base(const SkIRect& subset, uint32_t uniqueID, const SkSurfaceProps* props)
+ : INHERITED(subset, uniqueID, props) {
+ }
+ ~SkSpecialImage_Base() override { }
+
+ virtual void onDraw(SkCanvas*, SkScalar x, SkScalar y, const SkPaint*) const = 0;
+
+ virtual bool onGetROPixels(SkBitmap*) const = 0;
+
+ virtual GrTexture* onPeekTexture() const { return nullptr; }
+
+ virtual SkColorSpace* onGetColorSpace() const = 0;
+
+#if SK_SUPPORT_GPU
+ virtual sk_sp<GrTexture> onAsTextureRef(GrContext* context) const = 0;
+#endif
+
+ virtual sk_sp<SkSpecialImage> onMakeSubset(const SkIRect& subset) const = 0;
+
+ virtual sk_sp<SkSpecialSurface> onMakeSurface(const SkImageFilter::OutputProperties& outProps,
+ const SkISize& size, SkAlphaType at) const = 0;
+
+ virtual sk_sp<SkImage> onMakeTightSubset(const SkIRect& subset) const = 0;
+
+ virtual sk_sp<SkSurface> onMakeTightSurface(const SkImageFilter::OutputProperties& outProps,
+ const SkISize& size, SkAlphaType at) const = 0;
+
+private:
+ typedef SkSpecialImage INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+static inline const SkSpecialImage_Base* as_SIB(const SkSpecialImage* image) {
+ return static_cast<const SkSpecialImage_Base*>(image);
+}
+
+SkSpecialImage::SkSpecialImage(const SkIRect& subset,
+ uint32_t uniqueID,
+ const SkSurfaceProps* props)
+ : fProps(SkSurfacePropsCopyOrDefault(props))
+ , fSubset(subset)
+ , fUniqueID(kNeedNewImageUniqueID_SpecialImage == uniqueID ? SkNextID::ImageID() : uniqueID) {
+}
+
+sk_sp<SkSpecialImage> SkSpecialImage::makeTextureImage(GrContext* context) {
+#if SK_SUPPORT_GPU
+ if (!context) {
+ return nullptr;
+ }
+ if (GrTexture* peek = as_SIB(this)->onPeekTexture()) {
+ return peek->getContext() == context ? sk_sp<SkSpecialImage>(SkRef(this)) : nullptr;
+ }
+
+ SkBitmap bmp;
+ // At this point, we are definitely not texture-backed, so we must be raster or generator
+ // backed. If we remove the special-wrapping-an-image subclass, we may be able to assert that
+ // we are strictly raster-backed (i.e. generator images become raster when they are specialized)
+ // in which case getROPixels could turn into peekPixels...
+ if (!this->getROPixels(&bmp)) {
+ return nullptr;
+ }
+
+ if (bmp.empty()) {
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeEmpty(), bmp, &this->props());
+ }
+
+ sk_sp<GrTexture> resultTex(GrRefCachedBitmapTexture(context,
+ bmp,
+ GrTextureParams::ClampNoFilter(),
+ SkSourceGammaTreatment::kRespect));
+ if (!resultTex) {
+ return nullptr;
+ }
+
+ return SkSpecialImage::MakeFromGpu(SkIRect::MakeWH(resultTex->width(), resultTex->height()),
+ this->uniqueID(),
+ resultTex, sk_ref_sp(this->getColorSpace()), &this->props(),
+ this->alphaType());
+#else
+ return nullptr;
+#endif
+}
+
+void SkSpecialImage::draw(SkCanvas* canvas, SkScalar x, SkScalar y, const SkPaint* paint) const {
+ return as_SIB(this)->onDraw(canvas, x, y, paint);
+}
+
+bool SkSpecialImage::getROPixels(SkBitmap* bm) const {
+ return as_SIB(this)->onGetROPixels(bm);
+}
+
+bool SkSpecialImage::isTextureBacked() const {
+#if SK_SUPPORT_GPU
+ return as_SIB(this)->onPeekTexture() && as_SIB(this)->onPeekTexture()->getContext();
+#else
+ return false;
+#endif
+}
+
+GrContext* SkSpecialImage::getContext() const {
+#if SK_SUPPORT_GPU
+ GrTexture* texture = as_SIB(this)->onPeekTexture();
+
+ if (texture) {
+ return texture->getContext();
+ }
+#endif
+ return nullptr;
+}
+
+SkColorSpace* SkSpecialImage::getColorSpace() const {
+ return as_SIB(this)->onGetColorSpace();
+}
+
+#if SK_SUPPORT_GPU
+sk_sp<GrTexture> SkSpecialImage::asTextureRef(GrContext* context) const {
+ return as_SIB(this)->onAsTextureRef(context);
+}
+#endif
+
+sk_sp<SkSpecialSurface> SkSpecialImage::makeSurface(const SkImageFilter::OutputProperties& outProps,
+ const SkISize& size, SkAlphaType at) const {
+ return as_SIB(this)->onMakeSurface(outProps, size, at);
+}
+
+sk_sp<SkSurface> SkSpecialImage::makeTightSurface(const SkImageFilter::OutputProperties& outProps,
+ const SkISize& size, SkAlphaType at) const {
+ return as_SIB(this)->onMakeTightSurface(outProps, size, at);
+}
+
+sk_sp<SkSpecialImage> SkSpecialImage::makeSubset(const SkIRect& subset) const {
+ return as_SIB(this)->onMakeSubset(subset);
+}
+
+sk_sp<SkImage> SkSpecialImage::makeTightSubset(const SkIRect& subset) const {
+ return as_SIB(this)->onMakeTightSubset(subset);
+}
+
+#ifdef SK_DEBUG
+static bool rect_fits(const SkIRect& rect, int width, int height) {
+ if (0 == width && 0 == height) {
+ SkASSERT(0 == rect.fLeft && 0 == rect.fRight && 0 == rect.fTop && 0 == rect.fBottom);
+ return true;
+ }
+
+ return rect.fLeft >= 0 && rect.fLeft < width && rect.fLeft < rect.fRight &&
+ rect.fRight >= 0 && rect.fRight <= width &&
+ rect.fTop >= 0 && rect.fTop < height && rect.fTop < rect.fBottom &&
+ rect.fBottom >= 0 && rect.fBottom <= height;
+}
+#endif
+
+sk_sp<SkSpecialImage> SkSpecialImage::MakeFromImage(const SkIRect& subset,
+ sk_sp<SkImage> image,
+ const SkSurfaceProps* props) {
+ SkASSERT(rect_fits(subset, image->width(), image->height()));
+
+#if SK_SUPPORT_GPU
+ if (GrTexture* texture = as_IB(image)->peekTexture()) {
+ return MakeFromGpu(subset, image->uniqueID(), sk_ref_sp(texture),
+ sk_ref_sp(as_IB(image)->onImageInfo().colorSpace()), props);
+ } else
+#endif
+ {
+ SkBitmap bm;
+ if (as_IB(image)->getROPixels(&bm)) {
+ return MakeFromRaster(subset, bm, props);
+ }
+ }
+ return nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkSpecialImage_Raster : public SkSpecialImage_Base {
+public:
+ SkSpecialImage_Raster(const SkIRect& subset, const SkBitmap& bm, const SkSurfaceProps* props)
+ : INHERITED(subset, bm.getGenerationID(), props)
+ , fBitmap(bm)
+ {
+ SkASSERT(bm.pixelRef());
+
+ // We have to lock now, while bm is still in scope, since it may have come from our
+ // cache, which means we need to keep it locked until we (the special) are done, since
+ // we cannot re-generate the cache entry (if bm came from a generator).
+ fBitmap.lockPixels();
+ SkASSERT(fBitmap.getPixels());
+ }
+
+ SkAlphaType alphaType() const override { return fBitmap.alphaType(); }
+
+ size_t getSize() const override { return fBitmap.getSize(); }
+
+ void onDraw(SkCanvas* canvas, SkScalar x, SkScalar y, const SkPaint* paint) const override {
+ SkRect dst = SkRect::MakeXYWH(x, y,
+ this->subset().width(), this->subset().height());
+
+ canvas->drawBitmapRect(fBitmap, this->subset(),
+ dst, paint, SkCanvas::kStrict_SrcRectConstraint);
+ }
+
+ bool onGetROPixels(SkBitmap* bm) const override {
+ *bm = fBitmap;
+ return true;
+ }
+
+ SkColorSpace* onGetColorSpace() const override {
+ return fBitmap.colorSpace();
+ }
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrTexture> onAsTextureRef(GrContext* context) const override {
+ if (context) {
+ return sk_ref_sp(GrRefCachedBitmapTexture(context,
+ fBitmap,
+ GrTextureParams::ClampNoFilter(),
+ SkSourceGammaTreatment::kRespect));
+ }
+
+ return nullptr;
+ }
+#endif
+
+// TODO: The raster implementations of image filters all currently assume that the pixels are
+// legacy N32. Until they actually check the format and operate on sRGB or F16 data appropriately,
+// we can't enable this. (They will continue to produce incorrect results, but less-so).
+#define RASTER_IMAGE_FILTERS_SUPPORT_SRGB_AND_F16 0
+
+ sk_sp<SkSpecialSurface> onMakeSurface(const SkImageFilter::OutputProperties& outProps,
+ const SkISize& size, SkAlphaType at) const override {
+#if RASTER_IMAGE_FILTERS_SUPPORT_SRGB_AND_F16
+ SkColorSpace* colorSpace = outProps.colorSpace();
+#else
+ SkColorSpace* colorSpace = nullptr;
+#endif
+ SkColorType colorType = colorSpace && colorSpace->gammaIsLinear()
+ ? kRGBA_F16_SkColorType : kN32_SkColorType;
+ SkImageInfo info = SkImageInfo::Make(size.width(), size.height(), colorType, at,
+ sk_ref_sp(colorSpace));
+ return SkSpecialSurface::MakeRaster(info, nullptr);
+ }
+
+ sk_sp<SkSpecialImage> onMakeSubset(const SkIRect& subset) const override {
+ SkBitmap subsetBM;
+
+ if (!fBitmap.extractSubset(&subsetBM, subset)) {
+ return nullptr;
+ }
+
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(subset.width(), subset.height()),
+ subsetBM,
+ &this->props());
+ }
+
+ sk_sp<SkImage> onMakeTightSubset(const SkIRect& subset) const override {
+ SkBitmap subsetBM;
+
+ if (!fBitmap.extractSubset(&subsetBM, subset)) {
+ return nullptr;
+ }
+
+ return SkImage::MakeFromBitmap(subsetBM);
+ }
+
+ sk_sp<SkSurface> onMakeTightSurface(const SkImageFilter::OutputProperties& outProps,
+ const SkISize& size, SkAlphaType at) const override {
+#if RASTER_IMAGE_FILTERS_SUPPORT_SRGB_AND_F16
+ SkColorSpace* colorSpace = outProps.colorSpace();
+#else
+ SkColorSpace* colorSpace = nullptr;
+#endif
+ SkColorType colorType = colorSpace && colorSpace->gammaIsLinear()
+ ? kRGBA_F16_SkColorType : kN32_SkColorType;
+ SkImageInfo info = SkImageInfo::Make(size.width(), size.height(), colorType, at,
+ sk_ref_sp(colorSpace));
+ return SkSurface::MakeRaster(info);
+ }
+
+private:
+ SkBitmap fBitmap;
+
+ typedef SkSpecialImage_Base INHERITED;
+};
+
+sk_sp<SkSpecialImage> SkSpecialImage::MakeFromRaster(const SkIRect& subset,
+ const SkBitmap& bm,
+ const SkSurfaceProps* props) {
+ SkASSERT(rect_fits(subset, bm.width(), bm.height()));
+
+ if (!bm.pixelRef()) {
+ return nullptr;
+ }
+
+ const SkBitmap* srcBM = &bm;
+ SkBitmap tmpStorage;
+ // ImageFilters only handle N32 at the moment, so force our src to be that
+ if (!valid_for_imagefilters(bm.info())) {
+ if (!bm.copyTo(&tmpStorage, kN32_SkColorType)) {
+ return nullptr;
+ }
+ srcBM = &tmpStorage;
+ }
+ return sk_make_sp<SkSpecialImage_Raster>(subset, *srcBM, props);
+}
+
+#if SK_SUPPORT_GPU
+///////////////////////////////////////////////////////////////////////////////
+#include "GrTexture.h"
+#include "SkImage_Gpu.h"
+
+class SkSpecialImage_Gpu : public SkSpecialImage_Base {
+public:
+ SkSpecialImage_Gpu(const SkIRect& subset,
+ uint32_t uniqueID, sk_sp<GrTexture> tex, SkAlphaType at,
+ sk_sp<SkColorSpace> colorSpace, const SkSurfaceProps* props)
+ : INHERITED(subset, uniqueID, props)
+ , fTexture(std::move(tex))
+ , fAlphaType(at)
+ , fColorSpace(std::move(colorSpace))
+ , fAddedRasterVersionToCache(false) {
+ }
+
+ ~SkSpecialImage_Gpu() override {
+ if (fAddedRasterVersionToCache.load()) {
+ SkNotifyBitmapGenIDIsStale(this->uniqueID());
+ }
+ }
+
+ SkAlphaType alphaType() const override { return fAlphaType; }
+
+ size_t getSize() const override { return fTexture->gpuMemorySize(); }
+
+ void onDraw(SkCanvas* canvas, SkScalar x, SkScalar y, const SkPaint* paint) const override {
+ SkRect dst = SkRect::MakeXYWH(x, y,
+ this->subset().width(), this->subset().height());
+
+ auto img = sk_sp<SkImage>(new SkImage_Gpu(fTexture->width(), fTexture->height(),
+ this->uniqueID(), fAlphaType, fTexture.get(),
+ fColorSpace, SkBudgeted::kNo));
+
+ canvas->drawImageRect(img, this->subset(),
+ dst, paint, SkCanvas::kStrict_SrcRectConstraint);
+ }
+
+ GrTexture* onPeekTexture() const override { return fTexture.get(); }
+
+ sk_sp<GrTexture> onAsTextureRef(GrContext*) const override { return fTexture; }
+
+ bool onGetROPixels(SkBitmap* dst) const override {
+ if (SkBitmapCache::Find(this->uniqueID(), dst)) {
+ SkASSERT(dst->getGenerationID() == this->uniqueID());
+ SkASSERT(dst->isImmutable());
+ SkASSERT(dst->getPixels());
+ return true;
+ }
+
+ SkImageInfo info = SkImageInfo::MakeN32(this->width(), this->height(),
+ this->alphaType(), fColorSpace);
+
+ if (!dst->tryAllocPixels(info)) {
+ return false;
+ }
+
+ if (!fTexture->readPixels(0, 0, dst->width(), dst->height(), kSkia8888_GrPixelConfig,
+ dst->getPixels(), dst->rowBytes())) {
+ return false;
+ }
+
+ dst->pixelRef()->setImmutableWithID(this->uniqueID());
+ SkBitmapCache::Add(this->uniqueID(), *dst);
+ fAddedRasterVersionToCache.store(true);
+ return true;
+ }
+
+ SkColorSpace* onGetColorSpace() const override {
+ return fColorSpace.get();
+ }
+
+ sk_sp<SkSpecialSurface> onMakeSurface(const SkImageFilter::OutputProperties& outProps,
+ const SkISize& size, SkAlphaType at) const override {
+ if (!fTexture->getContext()) {
+ return nullptr;
+ }
+
+ SkColorSpace* colorSpace = outProps.colorSpace();
+ return SkSpecialSurface::MakeRenderTarget(
+ fTexture->getContext(), size.width(), size.height(),
+ GrRenderableConfigForColorSpace(colorSpace), sk_ref_sp(colorSpace));
+ }
+
+ sk_sp<SkSpecialImage> onMakeSubset(const SkIRect& subset) const override {
+ return SkSpecialImage::MakeFromGpu(subset,
+ this->uniqueID(),
+ fTexture,
+ fColorSpace,
+ &this->props(),
+ fAlphaType);
+ }
+
+ sk_sp<SkImage> onMakeTightSubset(const SkIRect& subset) const override {
+ if (0 == subset.fLeft && 0 == subset.fTop &&
+ fTexture->width() == subset.width() &&
+ fTexture->height() == subset.height()) {
+ // The existing GrTexture is already tight so reuse it in the SkImage
+ return sk_make_sp<SkImage_Gpu>(fTexture->width(), fTexture->height(),
+ kNeedNewImageUniqueID,
+ fAlphaType, fTexture.get(), fColorSpace,
+ SkBudgeted::kYes);
+ }
+
+ GrContext* ctx = fTexture->getContext();
+ GrSurfaceDesc desc = fTexture->desc();
+ desc.fWidth = subset.width();
+ desc.fHeight = subset.height();
+
+ sk_sp<GrTexture> subTx(ctx->textureProvider()->createTexture(desc, SkBudgeted::kYes));
+ if (!subTx) {
+ return nullptr;
+ }
+ ctx->copySurface(subTx.get(), fTexture.get(), subset, SkIPoint::Make(0, 0));
+ return sk_make_sp<SkImage_Gpu>(desc.fWidth, desc.fHeight, kNeedNewImageUniqueID,
+ fAlphaType, subTx.get(), fColorSpace, SkBudgeted::kYes);
+ }
+
+ sk_sp<SkSurface> onMakeTightSurface(const SkImageFilter::OutputProperties& outProps,
+ const SkISize& size, SkAlphaType at) const override {
+ SkColorSpace* colorSpace = outProps.colorSpace();
+ SkColorType colorType = colorSpace && colorSpace->gammaIsLinear()
+ ? kRGBA_F16_SkColorType : kRGBA_8888_SkColorType;
+ SkImageInfo info = SkImageInfo::Make(size.width(), size.height(), colorType, at,
+ sk_ref_sp(colorSpace));
+ return SkSurface::MakeRenderTarget(fTexture->getContext(), SkBudgeted::kYes, info);
+ }
+
+private:
+ sk_sp<GrTexture> fTexture;
+ const SkAlphaType fAlphaType;
+ sk_sp<SkColorSpace> fColorSpace;
+ mutable SkAtomic<bool> fAddedRasterVersionToCache;
+
+ typedef SkSpecialImage_Base INHERITED;
+};
+
+sk_sp<SkSpecialImage> SkSpecialImage::MakeFromGpu(const SkIRect& subset,
+ uint32_t uniqueID,
+ sk_sp<GrTexture> tex,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* props,
+ SkAlphaType at) {
+ SkASSERT(rect_fits(subset, tex->width(), tex->height()));
+ return sk_make_sp<SkSpecialImage_Gpu>(subset, uniqueID, std::move(tex), at,
+ std::move(colorSpace), props);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSpecialImage.h b/gfx/skia/skia/src/core/SkSpecialImage.h
new file mode 100644
index 000000000..c1f3791ed
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpecialImage.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file
+ */
+
+#ifndef SkSpecialImage_DEFINED
+#define SkSpecialImage_DEFINED
+
+#include "SkNextID.h"
+#include "SkRefCnt.h"
+#include "SkSurfaceProps.h"
+
+#include "SkImageFilter.h" // for OutputProperties
+#include "SkImageInfo.h" // for SkAlphaType
+
+class GrContext;
+class GrTexture;
+class SkBitmap;
+class SkCanvas;
+class SkImage;
+struct SkImageInfo;
+class SkPaint;
+class SkPixmap;
+class SkSpecialSurface;
+class SkSurface;
+
+enum {
+ kNeedNewImageUniqueID_SpecialImage = 0
+};
+
+/**
+ * This is a restricted form of SkImage solely intended for internal use. It
+ * differs from SkImage in that:
+ * - it can only be backed by raster or gpu (no generators)
+ * - it can be backed by a GrTexture larger than its nominal bounds
+ * - it can't be drawn tiled
+ * - it can't be drawn with MIPMAPs
+ * It is similar to SkImage in that it abstracts how the pixels are stored/represented.
+ *
+ * Note: the contents of the backing storage outside of the subset rect are undefined.
+ */
+class SkSpecialImage : public SkRefCnt {
+public:
+ typedef void* ReleaseContext;
+ typedef void(*RasterReleaseProc)(void* pixels, ReleaseContext);
+
+ const SkSurfaceProps& props() const { return fProps; }
+
+ int width() const { return fSubset.width(); }
+ int height() const { return fSubset.height(); }
+ const SkIRect& subset() const { return fSubset; }
+ SkColorSpace* getColorSpace() const;
+
+ uint32_t uniqueID() const { return fUniqueID; }
+ virtual SkAlphaType alphaType() const = 0;
+ virtual size_t getSize() const = 0;
+
+ /**
+ * Ensures that a special image is backed by a texture (when GrContext is non-null). If no
+ * transformation is required, the returned image may be the same as this special image.
+ * If this special image is from a different GrContext, this will fail.
+ */
+ sk_sp<SkSpecialImage> makeTextureImage(GrContext*);
+
+ /**
+ * Draw this SpecialImage into the canvas.
+ */
+ void draw(SkCanvas*, SkScalar x, SkScalar y, const SkPaint*) const;
+
+ static sk_sp<SkSpecialImage> MakeFromImage(const SkIRect& subset,
+ sk_sp<SkImage>,
+ const SkSurfaceProps* = nullptr);
+ static sk_sp<SkSpecialImage> MakeFromRaster(const SkIRect& subset,
+ const SkBitmap&,
+ const SkSurfaceProps* = nullptr);
+#if SK_SUPPORT_GPU
+ static sk_sp<SkSpecialImage> MakeFromGpu(const SkIRect& subset,
+ uint32_t uniqueID,
+ sk_sp<GrTexture>,
+ sk_sp<SkColorSpace>,
+ const SkSurfaceProps* = nullptr,
+ SkAlphaType at = kPremul_SkAlphaType);
+#endif
+
+ /**
+ * Create a new special surface with a backend that is compatible with this special image.
+ */
+ sk_sp<SkSpecialSurface> makeSurface(const SkImageFilter::OutputProperties& outProps,
+ const SkISize& size,
+ SkAlphaType at = kPremul_SkAlphaType) const;
+
+ /**
+ * Create a new surface with a backend that is compatible with this special image.
+ * TODO: switch this to makeSurface once we resolved the naming issue
+ */
+ sk_sp<SkSurface> makeTightSurface(const SkImageFilter::OutputProperties& outProps,
+ const SkISize& size,
+ SkAlphaType at = kPremul_SkAlphaType) const;
+
+ /**
+ * Extract a subset of this special image and return it as a special image.
+ * It may or may not point to the same backing memory.
+ */
+ sk_sp<SkSpecialImage> makeSubset(const SkIRect& subset) const;
+
+ /**
+ * Extract a subset of this special image and return it as an SkImage.
+ * It may or may not point to the same backing memory.
+ * TODO: switch this to makeSurface once we resolved the naming issue
+ */
+ sk_sp<SkImage> makeTightSubset(const SkIRect& subset) const;
+
+ // TODO: hide this when GrLayerHoister uses SkSpecialImages more fully (see skbug.com/5063)
+ /**
+ * If the SpecialImage is backed by a gpu texture, return true.
+ */
+ bool isTextureBacked() const;
+
+ /**
+ * Return the GrContext if the SkSpecialImage is GrTexture-backed
+ */
+ GrContext* getContext() const;
+
+#if SK_SUPPORT_GPU
+ /**
+ * Regardless of the underlying backing store, return the contents as a GrTexture.
+ * The active portion of the texture can be retrieved via 'subset'.
+ */
+ sk_sp<GrTexture> asTextureRef(GrContext*) const;
+#endif
+
+ // TODO: hide this whe the imagefilter all have a consistent draw path (see skbug.com/5063)
+ /**
+ * Regardless of the underlying backing store, return the contents as an SkBitmap
+ *
+ * The returned ImageInfo represents the backing memory. Use 'subset'
+ * to get the active portion's dimensions.
+ */
+ bool getROPixels(SkBitmap*) const;
+
+protected:
+ SkSpecialImage(const SkIRect& subset, uint32_t uniqueID, const SkSurfaceProps*);
+
+private:
+ const SkSurfaceProps fProps;
+ const SkIRect fSubset;
+ const uint32_t fUniqueID;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSpecialSurface.cpp b/gfx/skia/skia/src/core/SkSpecialSurface.cpp
new file mode 100644
index 000000000..85bb61a3e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpecialSurface.cpp
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file
+ */
+
+#include "SkCanvas.h"
+#include "SkSpecialImage.h"
+#include "SkSpecialSurface.h"
+#include "SkSurfacePriv.h"
+
+ ///////////////////////////////////////////////////////////////////////////////
+class SkSpecialSurface_Base : public SkSpecialSurface {
+public:
+ SkSpecialSurface_Base(const SkIRect& subset, const SkSurfaceProps* props)
+ : INHERITED(subset, props)
+ , fCanvas(nullptr) {
+ }
+
+ virtual ~SkSpecialSurface_Base() { }
+
+ // reset is called after an SkSpecialImage has been snapped
+ void reset() { fCanvas.reset(); }
+
+ // This can return nullptr if reset has already been called or something when wrong in the ctor
+ SkCanvas* onGetCanvas() { return fCanvas; }
+
+ virtual sk_sp<SkSpecialImage> onMakeImageSnapshot() = 0;
+
+protected:
+ SkAutoTUnref<SkCanvas> fCanvas; // initialized by derived classes in ctors
+
+private:
+ typedef SkSpecialSurface INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+static SkSpecialSurface_Base* as_SB(SkSpecialSurface* surface) {
+ return static_cast<SkSpecialSurface_Base*>(surface);
+}
+
+SkSpecialSurface::SkSpecialSurface(const SkIRect& subset,
+ const SkSurfaceProps* props)
+ : fProps(SkSurfacePropsCopyOrDefault(props).flags(), kUnknown_SkPixelGeometry)
+ , fSubset(subset) {
+ SkASSERT(fSubset.width() > 0);
+ SkASSERT(fSubset.height() > 0);
+}
+
+SkCanvas* SkSpecialSurface::getCanvas() {
+ return as_SB(this)->onGetCanvas();
+}
+
+sk_sp<SkSpecialImage> SkSpecialSurface::makeImageSnapshot() {
+ sk_sp<SkSpecialImage> image(as_SB(this)->onMakeImageSnapshot());
+ as_SB(this)->reset();
+ return image; // the caller gets the creation ref
+}
+
+///////////////////////////////////////////////////////////////////////////////
+#include "SkMallocPixelRef.h"
+
+class SkSpecialSurface_Raster : public SkSpecialSurface_Base {
+public:
+ SkSpecialSurface_Raster(SkPixelRef* pr,
+ const SkIRect& subset,
+ const SkSurfaceProps* props)
+ : INHERITED(subset, props) {
+ const SkImageInfo& info = pr->info();
+
+ fBitmap.setInfo(info, info.minRowBytes());
+ fBitmap.setPixelRef(pr);
+
+ fCanvas.reset(new SkCanvas(fBitmap, this->props()));
+ fCanvas->clipRect(SkRect::Make(subset));
+#ifdef SK_IS_BOT
+ fCanvas->clear(SK_ColorRED); // catch any imageFilter sloppiness
+#endif
+ }
+
+ ~SkSpecialSurface_Raster() override { }
+
+ sk_sp<SkSpecialImage> onMakeImageSnapshot() override {
+ return SkSpecialImage::MakeFromRaster(this->subset(), fBitmap, &this->props());
+ }
+
+private:
+ SkBitmap fBitmap;
+
+ typedef SkSpecialSurface_Base INHERITED;
+};
+
+sk_sp<SkSpecialSurface> SkSpecialSurface::MakeFromBitmap(const SkIRect& subset, SkBitmap& bm,
+ const SkSurfaceProps* props) {
+ return sk_make_sp<SkSpecialSurface_Raster>(bm.pixelRef(), subset, props);
+}
+
+sk_sp<SkSpecialSurface> SkSpecialSurface::MakeRaster(const SkImageInfo& info,
+ const SkSurfaceProps* props) {
+ SkAutoTUnref<SkPixelRef> pr(SkMallocPixelRef::NewZeroed(info, 0, nullptr));
+ if (nullptr == pr.get()) {
+ return nullptr;
+ }
+
+ const SkIRect subset = SkIRect::MakeWH(pr->info().width(), pr->info().height());
+
+ return sk_make_sp<SkSpecialSurface_Raster>(pr, subset, props);
+}
+
+#if SK_SUPPORT_GPU
+///////////////////////////////////////////////////////////////////////////////
+#include "GrContext.h"
+#include "SkGpuDevice.h"
+
+class SkSpecialSurface_Gpu : public SkSpecialSurface_Base {
+public:
+ SkSpecialSurface_Gpu(sk_sp<GrDrawContext> drawContext,
+ int width, int height,
+ const SkIRect& subset)
+ : INHERITED(subset, &drawContext->surfaceProps())
+ , fDrawContext(std::move(drawContext)) {
+
+ sk_sp<SkBaseDevice> device(SkGpuDevice::Make(fDrawContext, width, height,
+ SkGpuDevice::kUninit_InitContents));
+ if (!device) {
+ return;
+ }
+
+ fCanvas.reset(new SkCanvas(device.get()));
+ fCanvas->clipRect(SkRect::Make(subset));
+#ifdef SK_IS_BOT
+ fCanvas->clear(SK_ColorRED); // catch any imageFilter sloppiness
+#endif
+ }
+
+ ~SkSpecialSurface_Gpu() override { }
+
+ sk_sp<SkSpecialImage> onMakeImageSnapshot() override {
+ sk_sp<SkSpecialImage> tmp(SkSpecialImage::MakeFromGpu(
+ this->subset(),
+ kNeedNewImageUniqueID_SpecialImage,
+ fDrawContext->asTexture(),
+ sk_ref_sp(fDrawContext->getColorSpace()),
+ &this->props()));
+ fDrawContext = nullptr;
+ return tmp;
+ }
+
+private:
+ sk_sp<GrDrawContext> fDrawContext;
+
+ typedef SkSpecialSurface_Base INHERITED;
+};
+
+sk_sp<SkSpecialSurface> SkSpecialSurface::MakeRenderTarget(GrContext* context,
+ int width, int height,
+ GrPixelConfig config,
+ sk_sp<SkColorSpace> colorSpace) {
+ if (!context) {
+ return nullptr;
+ }
+
+ sk_sp<GrDrawContext> drawContext(context->makeDrawContext(SkBackingFit::kApprox,
+ width, height, config,
+ std::move(colorSpace)));
+ if (!drawContext) {
+ return nullptr;
+ }
+
+ const SkIRect subset = SkIRect::MakeWH(width, height);
+
+ return sk_make_sp<SkSpecialSurface_Gpu>(std::move(drawContext), width, height, subset);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSpecialSurface.h b/gfx/skia/skia/src/core/SkSpecialSurface.h
new file mode 100644
index 000000000..2aa03dd4d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpecialSurface.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file
+ */
+
+#ifndef SkSpecialSurface_DEFINED
+#define SkSpecialSurface_DEFINED
+
+#include "SkRefCnt.h"
+#include "SkSurfaceProps.h"
+
+class GrContext;
+struct GrSurfaceDesc;
+class SkCanvas;
+struct SkImageInfo;
+class SkSpecialImage;
+
+/**
+ * SkSpecialSurface is a restricted form of SkSurface solely for internal use. It differs
+ * from SkSurface in that:
+ * - it can be backed by GrTextures larger than [ fWidth, fHeight ]
+ * - it can't be used for tiling
+ * - it becomes inactive once a snapshot of it is taken (i.e., no copy-on-write)
+ * - it has no generation ID
+ */
+class SkSpecialSurface : public SkRefCnt {
+public:
+ const SkSurfaceProps& props() const { return fProps; }
+
+ int width() const { return fSubset.width(); }
+ int height() const { return fSubset.height(); }
+
+ /**
+ * Return a canvas that will draw into this surface. This will always
+ * return the same canvas for a given surface, and is managed/owned by the
+ * surface.
+ *
+ * The canvas will be invalid after 'newImageSnapshot' is called.
+ */
+ SkCanvas* getCanvas();
+
+ /**
+ * Returns an image of the current state of the surface pixels up to this
+ * point. The canvas returned by 'getCanvas' becomes invalidated by this
+ * call and no more drawing to this surface is allowed.
+ *
+ * Note: the caller inherits a ref from this call that must be balanced
+ */
+ sk_sp<SkSpecialImage> makeImageSnapshot();
+
+#if SK_SUPPORT_GPU
+ /**
+ * Allocate a new GPU-backed SkSpecialSurface. If the requested surface cannot
+ * be created, nullptr will be returned.
+ */
+ static sk_sp<SkSpecialSurface> MakeRenderTarget(GrContext*,
+ int width, int height,
+ GrPixelConfig config,
+ sk_sp<SkColorSpace> colorSpace);
+#endif
+
+ /**
+ * Use and existing SkBitmap as the backing store.
+ */
+ static sk_sp<SkSpecialSurface> MakeFromBitmap(const SkIRect& subset, SkBitmap& bm,
+ const SkSurfaceProps* = nullptr);
+
+ /**
+ * Return a new CPU-backed surface, with the memory for the pixels automatically
+ * allocated.
+ *
+ * If the requested surface cannot be created, or the request is not a
+ * supported configuration, nullptr will be returned.
+ */
+ static sk_sp<SkSpecialSurface> MakeRaster(const SkImageInfo&,
+ const SkSurfaceProps* = nullptr);
+
+protected:
+ SkSpecialSurface(const SkIRect& subset, const SkSurfaceProps*);
+
+ // For testing only
+ friend class TestingSpecialSurfaceAccess;
+ const SkIRect& subset() const { return fSubset; }
+
+private:
+ const SkSurfaceProps fProps;
+ const SkIRect fSubset;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSpinlock.cpp b/gfx/skia/skia/src/core/SkSpinlock.cpp
new file mode 100644
index 000000000..eb9d6330a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpinlock.cpp
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkSpinlock.h"
+
+void SkSpinlock::contendedAcquire() {
+ // To act as a mutex, we need an acquire barrier when we acquire the lock.
+ while (fLocked.exchange(true, std::memory_order_acquire)) {
+ /*spin*/
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkSpriteBlitter.h b/gfx/skia/skia/src/core/SkSpriteBlitter.h
new file mode 100644
index 000000000..3a6286086
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpriteBlitter.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSpriteBlitter_DEFINED
+#define SkSpriteBlitter_DEFINED
+
+#include "SkBlitter.h"
+#include "SkPixmap.h"
+#include "SkShader.h"
+#include "SkSmallAllocator.h"
+
+class SkPaint;
+
+// SkSpriteBlitter specializes SkBlitter in a way to move large rectangles of pixels around.
+// Because of this use, the main primitive shifts from blitH style things to the more efficient
+// blitRect.
+class SkSpriteBlitter : public SkBlitter {
+public:
+ SkSpriteBlitter(const SkPixmap& source);
+
+ virtual void setup(const SkPixmap& dst, int left, int top, const SkPaint&);
+
+ // blitH, blitAntiH, blitV and blitMask should not be called on an SkSpriteBlitter.
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitMask(const SkMask&, const SkIRect& clip) override;
+
+ // A SkSpriteBlitter must implement blitRect.
+ void blitRect(int x, int y, int width, int height) override = 0;
+
+ static SkSpriteBlitter* ChooseD16(const SkPixmap& source, const SkPaint&, SkTBlitterAllocator*);
+ static SkSpriteBlitter* ChooseL32(const SkPixmap& source, const SkPaint&, SkTBlitterAllocator*);
+ static SkSpriteBlitter* ChooseS32(const SkPixmap& source, const SkPaint&, SkTBlitterAllocator*);
+ static SkSpriteBlitter* ChooseF16(const SkPixmap& source, const SkPaint&, SkTBlitterAllocator*);
+
+protected:
+ SkPixmap fDst;
+ const SkPixmap fSource;
+ int fLeft, fTop;
+ const SkPaint* fPaint;
+
+private:
+ typedef SkBlitter INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSpriteBlitter4f.cpp b/gfx/skia/skia/src/core/SkSpriteBlitter4f.cpp
new file mode 100644
index 000000000..38ec7394d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpriteBlitter4f.cpp
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkSpriteBlitter.h"
+#include "SkSpanProcs.h"
+#include "SkTemplates.h"
+#include "SkXfermode.h"
+
+class Sprite_4f : public SkSpriteBlitter {
+public:
+ Sprite_4f(const SkPixmap& src, const SkPaint& paint) : INHERITED(src) {
+ fXfer = SkXfermode::Peek(paint.getBlendMode());
+ fLoader = SkLoadSpanProc_Choose(src.info());
+ fFilter = SkFilterSpanProc_Choose(paint);
+ fBuffer.reset(src.width());
+ }
+
+protected:
+ SkXfermode* fXfer;
+ SkLoadSpanProc fLoader;
+ SkFilterSpanProc fFilter;
+ SkAutoTMalloc<SkPM4f> fBuffer;
+
+private:
+ typedef SkSpriteBlitter INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class Sprite_F16 : public Sprite_4f {
+public:
+ Sprite_F16(const SkPixmap& src, const SkPaint& paint) : INHERITED(src, paint) {
+ uint32_t flags = 0;
+ if (src.isOpaque()) {
+ flags |= SkXfermode::kSrcIsOpaque_F16Flag;
+ }
+ fWriter = SkXfermode::GetF16Proc(fXfer, flags);
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ SkASSERT(width > 0 && height > 0);
+ uint64_t* SK_RESTRICT dst = fDst.writable_addr64(x, y);
+ size_t dstRB = fDst.rowBytes();
+
+ for (int bottom = y + height; y < bottom; ++y) {
+ fLoader(fSource, x - fLeft, y - fTop, fBuffer, width);
+ fFilter(*fPaint, fBuffer, width);
+ fWriter(fXfer, dst, fBuffer, width, nullptr);
+ dst = (uint64_t* SK_RESTRICT)((char*)dst + dstRB);
+ }
+ }
+
+private:
+ SkXfermode::F16Proc fWriter;
+
+ typedef Sprite_4f INHERITED;
+};
+
+
+SkSpriteBlitter* SkSpriteBlitter::ChooseF16(const SkPixmap& source, const SkPaint& paint,
+ SkTBlitterAllocator* allocator) {
+ SkASSERT(allocator != nullptr);
+
+ if (paint.getMaskFilter() != nullptr) {
+ return nullptr;
+ }
+
+ switch (source.colorType()) {
+ case kN32_SkColorType:
+ case kRGBA_F16_SkColorType:
+ return allocator->createT<Sprite_F16>(source, paint);
+ default:
+ return nullptr;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class Sprite_sRGB : public Sprite_4f {
+public:
+ Sprite_sRGB(const SkPixmap& src, const SkPaint& paint) : INHERITED(src, paint) {
+ uint32_t flags = SkXfermode::kDstIsSRGB_D32Flag;
+ if (src.isOpaque()) {
+ flags |= SkXfermode::kSrcIsOpaque_D32Flag;
+ }
+ fWriter = SkXfermode::GetD32Proc(fXfer, flags);
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ SkASSERT(width > 0 && height > 0);
+ uint32_t* SK_RESTRICT dst = fDst.writable_addr32(x, y);
+ size_t dstRB = fDst.rowBytes();
+
+ for (int bottom = y + height; y < bottom; ++y) {
+ fLoader(fSource, x - fLeft, y - fTop, fBuffer, width);
+ fFilter(*fPaint, fBuffer, width);
+ fWriter(fXfer, dst, fBuffer, width, nullptr);
+ dst = (uint32_t* SK_RESTRICT)((char*)dst + dstRB);
+ }
+ }
+
+protected:
+ SkXfermode::D32Proc fWriter;
+
+private:
+ typedef Sprite_4f INHERITED;
+};
+
+
+SkSpriteBlitter* SkSpriteBlitter::ChooseS32(const SkPixmap& source, const SkPaint& paint,
+ SkTBlitterAllocator* allocator) {
+ SkASSERT(allocator != nullptr);
+
+ if (paint.getMaskFilter() != nullptr) {
+ return nullptr;
+ }
+
+ switch (source.colorType()) {
+ case kN32_SkColorType:
+ case kRGBA_F16_SkColorType:
+ return allocator->createT<Sprite_sRGB>(source, paint);
+ default:
+ return nullptr;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkSpriteBlitterTemplate.h b/gfx/skia/skia/src/core/SkSpriteBlitterTemplate.h
new file mode 100644
index 000000000..36d3852fb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpriteBlitterTemplate.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+class SkSPRITE_CLASSNAME : public SkSpriteBlitter {
+public:
+ SkSPRITE_CLASSNAME(const SkPixmap& source SkSPRITE_ARGS) : SkSpriteBlitter(source) {
+ SkSPRITE_INIT
+ }
+
+ virtual void blitRect(int x, int y, int width, int height) {
+ SkASSERT(width > 0 && height > 0);
+ int srcX = x - fLeft;
+ int srcY = y - fTop;
+ SkSPRITE_DST_TYPE* SK_RESTRICT dst =fDst.SkSPRITE_DST_GETADDR(x, y);
+ const SkSPRITE_SRC_TYPE* SK_RESTRICT src = fSource.SkSPRITE_SRC_GETADDR(srcX, srcY);
+ size_t dstRB = fDst.rowBytes();
+ size_t srcRB = fSource.rowBytes();
+
+ SkDEBUGCODE((void)fDst.SkSPRITE_DST_GETADDR(x + width - 1, y + height - 1);)
+ SkDEBUGCODE((void)fSource.SkSPRITE_SRC_GETADDR(srcX + width - 1, srcY + height - 1);)
+
+ SkSPRITE_PREAMBLE(fSource, srcX, srcY);
+
+ do {
+ SkSPRITE_DST_TYPE* d = dst;
+ const SkSPRITE_SRC_TYPE* s = src;
+#ifdef SkSPRITE_BEGIN_ROW
+ SkSPRITE_BEGIN_ROW
+#endif
+
+#ifdef SkSPRITE_ROW_PROC
+ SkSPRITE_ROW_PROC(d, s, width, x, y);
+#else
+ int w = width;
+ do {
+ SkSPRITE_SRC_TYPE sc = *s++;
+ SkSPRITE_BLIT_PIXEL(d, sc);
+ d += 1;
+ } while (--w != 0);
+#endif
+ dst = (SkSPRITE_DST_TYPE* SK_RESTRICT)((char*)dst + dstRB);
+ src = (const SkSPRITE_SRC_TYPE* SK_RESTRICT)((const char*)src + srcRB);
+ SkSPRITE_NEXT_ROW
+#ifdef SkSPRITE_ROW_PROC
+ y += 1;
+#endif
+ } while (--height != 0);
+
+ SkSPRITE_POSTAMBLE((*fSource));
+ }
+
+private:
+ SkSPRITE_FIELDS
+};
+
+#undef SkSPRITE_BLIT_PIXEL
+#undef SkSPRITE_CLASSNAME
+#undef SkSPRITE_DST_TYPE
+#undef SkSPRITE_SRC_TYPE
+#undef SkSPRITE_DST_GETADDR
+#undef SkSPRITE_SRC_GETADDR
+#undef SkSPRITE_PREAMBLE
+#undef SkSPRITE_POSTAMBLE
+#undef SkSPRITE_ARGS
+#undef SkSPRITE_FIELDS
+#undef SkSPRITE_INIT
+#undef SkSPRITE_NEXT_ROW
+#undef SkSPRITE_BEGIN_ROW
+
+#ifdef SkSPRITE_ROW_PROC
+ #undef SkSPRITE_ROW_PROC
+#endif
diff --git a/gfx/skia/skia/src/core/SkSpriteBlitter_ARGB32.cpp b/gfx/skia/skia/src/core/SkSpriteBlitter_ARGB32.cpp
new file mode 100644
index 000000000..1a76b1b2f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpriteBlitter_ARGB32.cpp
@@ -0,0 +1,296 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkSpriteBlitter.h"
+#include "SkBlitRow.h"
+#include "SkColorFilter.h"
+#include "SkColorPriv.h"
+#include "SkTemplates.h"
+#include "SkUtils.h"
+#include "SkXfermode.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+class Sprite_D32_S32 : public SkSpriteBlitter {
+public:
+ Sprite_D32_S32(const SkPixmap& src, U8CPU alpha) : INHERITED(src) {
+ SkASSERT(src.colorType() == kN32_SkColorType);
+
+ unsigned flags32 = 0;
+ if (255 != alpha) {
+ flags32 |= SkBlitRow::kGlobalAlpha_Flag32;
+ }
+ if (!src.isOpaque()) {
+ flags32 |= SkBlitRow::kSrcPixelAlpha_Flag32;
+ }
+
+ fProc32 = SkBlitRow::Factory32(flags32);
+ fAlpha = alpha;
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ SkASSERT(width > 0 && height > 0);
+ uint32_t* SK_RESTRICT dst = fDst.writable_addr32(x, y);
+ const uint32_t* SK_RESTRICT src = fSource.addr32(x - fLeft, y - fTop);
+ size_t dstRB = fDst.rowBytes();
+ size_t srcRB = fSource.rowBytes();
+ SkBlitRow::Proc32 proc = fProc32;
+ U8CPU alpha = fAlpha;
+
+ do {
+ proc(dst, src, width, alpha);
+ dst = (uint32_t* SK_RESTRICT)((char*)dst + dstRB);
+ src = (const uint32_t* SK_RESTRICT)((const char*)src + srcRB);
+ } while (--height != 0);
+ }
+
+private:
+ SkBlitRow::Proc32 fProc32;
+ U8CPU fAlpha;
+
+ typedef SkSpriteBlitter INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class Sprite_D32_XferFilter : public SkSpriteBlitter {
+public:
+ Sprite_D32_XferFilter(const SkPixmap& source, const SkPaint& paint) : SkSpriteBlitter(source) {
+ fColorFilter = paint.getColorFilter();
+ SkSafeRef(fColorFilter);
+
+ fXfermode = SkXfermode::Peek(paint.getBlendMode());
+
+ fBufferSize = 0;
+ fBuffer = nullptr;
+
+ unsigned flags32 = 0;
+ if (255 != paint.getAlpha()) {
+ flags32 |= SkBlitRow::kGlobalAlpha_Flag32;
+ }
+ if (!source.isOpaque()) {
+ flags32 |= SkBlitRow::kSrcPixelAlpha_Flag32;
+ }
+
+ fProc32 = SkBlitRow::Factory32(flags32);
+ fAlpha = paint.getAlpha();
+ }
+
+ virtual ~Sprite_D32_XferFilter() {
+ delete[] fBuffer;
+ SkSafeUnref(fColorFilter);
+ }
+
+ void setup(const SkPixmap& dst, int left, int top, const SkPaint& paint) override {
+ this->INHERITED::setup(dst, left, top, paint);
+
+ int width = dst.width();
+ if (width > fBufferSize) {
+ fBufferSize = width;
+ delete[] fBuffer;
+ fBuffer = new SkPMColor[width];
+ }
+ }
+
+protected:
+ SkColorFilter* fColorFilter;
+ SkXfermode* fXfermode;
+ int fBufferSize;
+ SkPMColor* fBuffer;
+ SkBlitRow::Proc32 fProc32;
+ U8CPU fAlpha;
+
+private:
+ typedef SkSpriteBlitter INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class Sprite_D32_S32A_XferFilter : public Sprite_D32_XferFilter {
+public:
+ Sprite_D32_S32A_XferFilter(const SkPixmap& source, const SkPaint& paint)
+ : Sprite_D32_XferFilter(source, paint) {}
+
+ void blitRect(int x, int y, int width, int height) override {
+ SkASSERT(width > 0 && height > 0);
+ uint32_t* SK_RESTRICT dst = fDst.writable_addr32(x, y);
+ const uint32_t* SK_RESTRICT src = fSource.addr32(x - fLeft, y - fTop);
+ size_t dstRB = fDst.rowBytes();
+ size_t srcRB = fSource.rowBytes();
+ SkColorFilter* colorFilter = fColorFilter;
+ SkXfermode* xfermode = fXfermode;
+
+ do {
+ const SkPMColor* tmp = src;
+
+ if (colorFilter) {
+ colorFilter->filterSpan(src, width, fBuffer);
+ tmp = fBuffer;
+ }
+
+ if (xfermode) {
+ xfermode->xfer32(dst, tmp, width, nullptr);
+ } else {
+ fProc32(dst, tmp, width, fAlpha);
+ }
+
+ dst = (uint32_t* SK_RESTRICT)((char*)dst + dstRB);
+ src = (const uint32_t* SK_RESTRICT)((const char*)src + srcRB);
+ } while (--height != 0);
+ }
+
+private:
+ typedef Sprite_D32_XferFilter INHERITED;
+};
+
+static void fillbuffer(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor16* SK_RESTRICT src, int count) {
+ SkASSERT(count > 0);
+
+ do {
+ *dst++ = SkPixel4444ToPixel32(*src++);
+ } while (--count != 0);
+}
+
+class Sprite_D32_S4444_XferFilter : public Sprite_D32_XferFilter {
+public:
+ Sprite_D32_S4444_XferFilter(const SkPixmap& source, const SkPaint& paint)
+ : Sprite_D32_XferFilter(source, paint) {}
+
+ void blitRect(int x, int y, int width, int height) override {
+ SkASSERT(width > 0 && height > 0);
+ SkPMColor* SK_RESTRICT dst = fDst.writable_addr32(x, y);
+ const SkPMColor16* SK_RESTRICT src = fSource.addr16(x - fLeft, y - fTop);
+ size_t dstRB = fDst.rowBytes();
+ size_t srcRB = fSource.rowBytes();
+ SkPMColor* SK_RESTRICT buffer = fBuffer;
+ SkColorFilter* colorFilter = fColorFilter;
+ SkXfermode* xfermode = fXfermode;
+
+ do {
+ fillbuffer(buffer, src, width);
+
+ if (colorFilter) {
+ colorFilter->filterSpan(buffer, width, buffer);
+ }
+ if (xfermode) {
+ xfermode->xfer32(dst, buffer, width, nullptr);
+ } else {
+ fProc32(dst, buffer, width, fAlpha);
+ }
+
+ dst = (SkPMColor* SK_RESTRICT)((char*)dst + dstRB);
+ src = (const SkPMColor16* SK_RESTRICT)((const char*)src + srcRB);
+ } while (--height != 0);
+ }
+
+private:
+ typedef Sprite_D32_XferFilter INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void src_row(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor16* SK_RESTRICT src, int count) {
+ do {
+ *dst = SkPixel4444ToPixel32(*src);
+ src += 1;
+ dst += 1;
+ } while (--count != 0);
+}
+
+class Sprite_D32_S4444_Opaque : public SkSpriteBlitter {
+public:
+ Sprite_D32_S4444_Opaque(const SkPixmap& source) : SkSpriteBlitter(source) {}
+
+ void blitRect(int x, int y, int width, int height) override {
+ SkASSERT(width > 0 && height > 0);
+ SkPMColor* SK_RESTRICT dst = fDst.writable_addr32(x, y);
+ const SkPMColor16* SK_RESTRICT src = fSource.addr16(x - fLeft, y - fTop);
+ size_t dstRB = fDst.rowBytes();
+ size_t srcRB = fSource.rowBytes();
+
+ do {
+ src_row(dst, src, width);
+ dst = (SkPMColor* SK_RESTRICT)((char*)dst + dstRB);
+ src = (const SkPMColor16* SK_RESTRICT)((const char*)src + srcRB);
+ } while (--height != 0);
+ }
+};
+
+static void srcover_row(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor16* SK_RESTRICT src, int count) {
+ do {
+ *dst = SkPMSrcOver(SkPixel4444ToPixel32(*src), *dst);
+ src += 1;
+ dst += 1;
+ } while (--count != 0);
+}
+
+class Sprite_D32_S4444 : public SkSpriteBlitter {
+public:
+ Sprite_D32_S4444(const SkPixmap& source) : SkSpriteBlitter(source) {}
+
+ void blitRect(int x, int y, int width, int height) override {
+ SkASSERT(width > 0 && height > 0);
+ SkPMColor* SK_RESTRICT dst = fDst.writable_addr32(x, y);
+ const SkPMColor16* SK_RESTRICT src = fSource.addr16(x - fLeft, y - fTop);
+ size_t dstRB = fDst.rowBytes();
+ size_t srcRB = fSource.rowBytes();
+
+ do {
+ srcover_row(dst, src, width);
+ dst = (SkPMColor* SK_RESTRICT)((char*)dst + dstRB);
+ src = (const SkPMColor16* SK_RESTRICT)((const char*)src + srcRB);
+ } while (--height != 0);
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkSpriteBlitter* SkSpriteBlitter::ChooseL32(const SkPixmap& source, const SkPaint& paint,
+ SkTBlitterAllocator* allocator) {
+ SkASSERT(allocator != nullptr);
+
+ if (paint.getMaskFilter() != nullptr) {
+ return nullptr;
+ }
+
+ U8CPU alpha = paint.getAlpha();
+ bool isSrcOver = paint.isSrcOver();
+ SkColorFilter* filter = paint.getColorFilter();
+ SkSpriteBlitter* blitter = nullptr;
+
+ switch (source.colorType()) {
+ case kARGB_4444_SkColorType:
+ if (alpha != 0xFF) {
+ return nullptr; // we only have opaque sprites
+ }
+ if (!isSrcOver || filter) {
+ blitter = allocator->createT<Sprite_D32_S4444_XferFilter>(source, paint);
+ } else if (source.isOpaque()) {
+ blitter = allocator->createT<Sprite_D32_S4444_Opaque>(source);
+ } else {
+ blitter = allocator->createT<Sprite_D32_S4444>(source);
+ }
+ break;
+ case kN32_SkColorType:
+ if (!isSrcOver || filter) {
+ if (255 == alpha) {
+ // this can handle xfermode or filter, but not alpha
+ blitter = allocator->createT<Sprite_D32_S32A_XferFilter>(source, paint);
+ }
+ } else {
+ // this can handle alpha, but not xfermode or filter
+ blitter = allocator->createT<Sprite_D32_S32>(source, alpha);
+ }
+ break;
+ default:
+ break;
+ }
+ return blitter;
+}
diff --git a/gfx/skia/skia/src/core/SkSpriteBlitter_RGB16.cpp b/gfx/skia/skia/src/core/SkSpriteBlitter_RGB16.cpp
new file mode 100644
index 000000000..9df7dab48
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpriteBlitter_RGB16.cpp
@@ -0,0 +1,373 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkSpriteBlitter.h"
+#include "SkBlitRow.h"
+#include "SkTemplates.h"
+#include "SkUtils.h"
+#include "SkColorPriv.h"
+
+#define D16_S32A_Opaque_Pixel(dst, sc) \
+do { \
+ if (sc) { \
+ *dst = SkSrcOver32To16(sc, *dst); \
+ } \
+} while (0)
+
+static inline void D16_S32A_Blend_Pixel_helper(uint16_t* dst, SkPMColor sc,
+ unsigned src_scale) {
+ uint16_t dc = *dst;
+ unsigned sa = SkGetPackedA32(sc);
+ unsigned dr, dg, db;
+
+ if (255 == sa) {
+ dr = SkAlphaBlend(SkPacked32ToR16(sc), SkGetPackedR16(dc), src_scale);
+ dg = SkAlphaBlend(SkPacked32ToG16(sc), SkGetPackedG16(dc), src_scale);
+ db = SkAlphaBlend(SkPacked32ToB16(sc), SkGetPackedB16(dc), src_scale);
+ } else {
+#ifdef SK_SUPPORT_LEGACY_BROKEN_LERP
+ unsigned dst_scale = 255 - SkAlphaMul(sa, src_scale);
+#else
+ unsigned dst_scale = SkAlphaMulInv256(sa, src_scale);
+#endif
+ dr = (SkPacked32ToR16(sc) * src_scale + SkGetPackedR16(dc) * dst_scale) >> 8;
+ dg = (SkPacked32ToG16(sc) * src_scale + SkGetPackedG16(dc) * dst_scale) >> 8;
+ db = (SkPacked32ToB16(sc) * src_scale + SkGetPackedB16(dc) * dst_scale) >> 8;
+ }
+ *dst = SkPackRGB16(dr, dg, db);
+}
+
+#define D16_S32A_Blend_Pixel(dst, sc, src_scale) \
+ do { if (sc) D16_S32A_Blend_Pixel_helper(dst, sc, src_scale); } while (0)
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+class Sprite_D16_S16_Opaque : public SkSpriteBlitter {
+public:
+ Sprite_D16_S16_Opaque(const SkPixmap& source) : SkSpriteBlitter(source) {}
+
+ // overrides
+ void blitRect(int x, int y, int width, int height) override {
+ uint16_t* SK_RESTRICT dst = fDst.writable_addr16(x, y);
+ const uint16_t* SK_RESTRICT src = fSource.addr16(x - fLeft, y - fTop);
+ size_t dstRB = fDst.rowBytes();
+ size_t srcRB = fSource.rowBytes();
+
+ while (--height >= 0) {
+ memcpy(dst, src, width << 1);
+ dst = (uint16_t*)((char*)dst + dstRB);
+ src = (const uint16_t*)((const char*)src + srcRB);
+ }
+ }
+};
+
+#define D16_S16_Blend_Pixel(dst, sc, scale) \
+ do { \
+ uint16_t dc = *dst; \
+ *dst = SkBlendRGB16(sc, dc, scale); \
+ } while (0)
+
+#define SkSPRITE_CLASSNAME Sprite_D16_S16_Blend
+#define SkSPRITE_ARGS , uint8_t alpha
+#define SkSPRITE_FIELDS uint8_t fSrcAlpha;
+#define SkSPRITE_INIT fSrcAlpha = alpha;
+#define SkSPRITE_DST_TYPE uint16_t
+#define SkSPRITE_SRC_TYPE uint16_t
+#define SkSPRITE_DST_GETADDR writable_addr16
+#define SkSPRITE_SRC_GETADDR addr16
+#define SkSPRITE_PREAMBLE(srcBM, x, y) int scale = SkAlpha255To256(fSrcAlpha);
+#define SkSPRITE_BLIT_PIXEL(dst, src) D16_S16_Blend_Pixel(dst, src, scale)
+#define SkSPRITE_NEXT_ROW
+#define SkSPRITE_POSTAMBLE(srcBM)
+#include "SkSpriteBlitterTemplate.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define D16_S4444_Opaque(dst, sc) \
+ do { \
+ uint16_t dc = *dst; \
+ *dst = SkSrcOver4444To16(sc, dc); \
+ } while (0)
+
+#define SkSPRITE_CLASSNAME Sprite_D16_S4444_Opaque
+#define SkSPRITE_ARGS
+#define SkSPRITE_FIELDS
+#define SkSPRITE_INIT
+#define SkSPRITE_DST_TYPE uint16_t
+#define SkSPRITE_SRC_TYPE SkPMColor16
+#define SkSPRITE_DST_GETADDR writable_addr16
+#define SkSPRITE_SRC_GETADDR addr16
+#define SkSPRITE_PREAMBLE(srcBM, x, y)
+#define SkSPRITE_BLIT_PIXEL(dst, src) D16_S4444_Opaque(dst, src)
+#define SkSPRITE_NEXT_ROW
+#define SkSPRITE_POSTAMBLE(srcBM)
+#include "SkSpriteBlitterTemplate.h"
+
+#define D16_S4444_Blend(dst, sc, scale16) \
+ do { \
+ uint16_t dc = *dst; \
+ *dst = SkBlend4444To16(sc, dc, scale16); \
+ } while (0)
+
+
+#define SkSPRITE_CLASSNAME Sprite_D16_S4444_Blend
+#define SkSPRITE_ARGS , uint8_t alpha
+#define SkSPRITE_FIELDS uint8_t fSrcAlpha;
+#define SkSPRITE_INIT fSrcAlpha = alpha;
+#define SkSPRITE_DST_TYPE uint16_t
+#define SkSPRITE_SRC_TYPE uint16_t
+#define SkSPRITE_DST_GETADDR writable_addr16
+#define SkSPRITE_SRC_GETADDR addr16
+#define SkSPRITE_PREAMBLE(srcBM, x, y) int scale = SkAlpha15To16(fSrcAlpha);
+#define SkSPRITE_BLIT_PIXEL(dst, src) D16_S4444_Blend(dst, src, scale)
+#define SkSPRITE_NEXT_ROW
+#define SkSPRITE_POSTAMBLE(srcBM)
+#include "SkSpriteBlitterTemplate.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define SkSPRITE_CLASSNAME Sprite_D16_SIndex8A_Opaque
+#define SkSPRITE_ARGS
+#define SkSPRITE_FIELDS
+#define SkSPRITE_INIT
+#define SkSPRITE_DST_TYPE uint16_t
+#define SkSPRITE_SRC_TYPE uint8_t
+#define SkSPRITE_DST_GETADDR writable_addr16
+#define SkSPRITE_SRC_GETADDR addr8
+#define SkSPRITE_PREAMBLE(srcBM, x, y) const SkPMColor* ctable = srcBM.ctable()->readColors()
+#define SkSPRITE_BLIT_PIXEL(dst, src) D16_S32A_Opaque_Pixel(dst, ctable[src])
+#define SkSPRITE_NEXT_ROW
+#define SkSPRITE_POSTAMBLE(srcBM)
+#include "SkSpriteBlitterTemplate.h"
+
+#define SkSPRITE_CLASSNAME Sprite_D16_SIndex8A_Blend
+#define SkSPRITE_ARGS , uint8_t alpha
+#define SkSPRITE_FIELDS uint8_t fSrcAlpha;
+#define SkSPRITE_INIT fSrcAlpha = alpha;
+#define SkSPRITE_DST_TYPE uint16_t
+#define SkSPRITE_SRC_TYPE uint8_t
+#define SkSPRITE_DST_GETADDR writable_addr16
+#define SkSPRITE_SRC_GETADDR addr8
+#define SkSPRITE_PREAMBLE(srcBM, x, y) const SkPMColor* ctable = srcBM.ctable()->readColors(); unsigned src_scale = SkAlpha255To256(fSrcAlpha);
+#define SkSPRITE_BLIT_PIXEL(dst, src) D16_S32A_Blend_Pixel(dst, ctable[src], src_scale)
+#define SkSPRITE_NEXT_ROW
+#define SkSPRITE_POSTAMBLE(srcBM)
+#include "SkSpriteBlitterTemplate.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+static intptr_t asint(const void* ptr) {
+ return reinterpret_cast<const char*>(ptr) - (const char*)0;
+}
+
+static void blitrow_d16_si8(uint16_t* SK_RESTRICT dst,
+ const uint8_t* SK_RESTRICT src, int count,
+ const uint16_t* SK_RESTRICT ctable) {
+ if (count <= 8) {
+ do {
+ *dst++ = ctable[*src++];
+ } while (--count);
+ return;
+ }
+
+ // eat src until we're on a 4byte boundary
+ while (asint(src) & 3) {
+ *dst++ = ctable[*src++];
+ count -= 1;
+ }
+
+ int qcount = count >> 2;
+ SkASSERT(qcount > 0);
+ const uint32_t* qsrc = reinterpret_cast<const uint32_t*>(src);
+ if (asint(dst) & 2) {
+ do {
+ uint32_t s4 = *qsrc++;
+#ifdef SK_CPU_LENDIAN
+ *dst++ = ctable[s4 & 0xFF];
+ *dst++ = ctable[(s4 >> 8) & 0xFF];
+ *dst++ = ctable[(s4 >> 16) & 0xFF];
+ *dst++ = ctable[s4 >> 24];
+#else // BENDIAN
+ *dst++ = ctable[s4 >> 24];
+ *dst++ = ctable[(s4 >> 16) & 0xFF];
+ *dst++ = ctable[(s4 >> 8) & 0xFF];
+ *dst++ = ctable[s4 & 0xFF];
+#endif
+ } while (--qcount);
+ } else { // dst is on a 4byte boundary
+ uint32_t* ddst = reinterpret_cast<uint32_t*>(dst);
+ do {
+ uint32_t s4 = *qsrc++;
+#ifdef SK_CPU_LENDIAN
+ *ddst++ = (ctable[(s4 >> 8) & 0xFF] << 16) | ctable[s4 & 0xFF];
+ *ddst++ = (ctable[s4 >> 24] << 16) | ctable[(s4 >> 16) & 0xFF];
+#else // BENDIAN
+ *ddst++ = (ctable[s4 >> 24] << 16) | ctable[(s4 >> 16) & 0xFF];
+ *ddst++ = (ctable[(s4 >> 8) & 0xFF] << 16) | ctable[s4 & 0xFF];
+#endif
+ } while (--qcount);
+ dst = reinterpret_cast<uint16_t*>(ddst);
+ }
+ src = reinterpret_cast<const uint8_t*>(qsrc);
+ count &= 3;
+ // catch any remaining (will be < 4)
+ while (--count >= 0) {
+ *dst++ = ctable[*src++];
+ }
+}
+
+#define SkSPRITE_ROW_PROC(d, s, n, x, y) blitrow_d16_si8(d, s, n, ctable)
+
+#define SkSPRITE_CLASSNAME Sprite_D16_SIndex8_Opaque
+#define SkSPRITE_ARGS
+#define SkSPRITE_FIELDS
+#define SkSPRITE_INIT
+#define SkSPRITE_DST_TYPE uint16_t
+#define SkSPRITE_SRC_TYPE uint8_t
+#define SkSPRITE_DST_GETADDR writable_addr16
+#define SkSPRITE_SRC_GETADDR addr8
+#define SkSPRITE_PREAMBLE(srcBM, x, y) const uint16_t* ctable = srcBM.ctable()->read16BitCache()
+#define SkSPRITE_BLIT_PIXEL(dst, src) *dst = ctable[src]
+#define SkSPRITE_NEXT_ROW
+#define SkSPRITE_POSTAMBLE(srcBM)
+#include "SkSpriteBlitterTemplate.h"
+
+#define SkSPRITE_CLASSNAME Sprite_D16_SIndex8_Blend
+#define SkSPRITE_ARGS , uint8_t alpha
+#define SkSPRITE_FIELDS uint8_t fSrcAlpha;
+#define SkSPRITE_INIT fSrcAlpha = alpha;
+#define SkSPRITE_DST_TYPE uint16_t
+#define SkSPRITE_SRC_TYPE uint8_t
+#define SkSPRITE_DST_GETADDR writable_addr16
+#define SkSPRITE_SRC_GETADDR addr8
+#define SkSPRITE_PREAMBLE(srcBM, x, y) const uint16_t* ctable = srcBM.ctable()->read16BitCache(); unsigned src_scale = SkAlpha255To256(fSrcAlpha);
+#define SkSPRITE_BLIT_PIXEL(dst, src) D16_S16_Blend_Pixel(dst, ctable[src], src_scale)
+#define SkSPRITE_NEXT_ROW
+#define SkSPRITE_POSTAMBLE(srcBM)
+#include "SkSpriteBlitterTemplate.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+class Sprite_D16_S32_BlitRowProc : public SkSpriteBlitter {
+public:
+ Sprite_D16_S32_BlitRowProc(const SkPixmap& source) : SkSpriteBlitter(source) {}
+
+ void setup(const SkPixmap& dst, int left, int top, const SkPaint& paint) override {
+ this->INHERITED::setup(dst, left, top, paint);
+
+ unsigned flags = 0;
+
+ if (paint.getAlpha() < 0xFF) {
+ flags |= SkBlitRow::kGlobalAlpha_Flag;
+ }
+ if (!fSource.isOpaque()) {
+ flags |= SkBlitRow::kSrcPixelAlpha_Flag;
+ }
+ if (paint.isDither()) {
+ flags |= SkBlitRow::kDither_Flag;
+ }
+ fProc = SkBlitRow::Factory16(flags);
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ uint16_t* SK_RESTRICT dst = fDst.writable_addr16(x, y);
+ const SkPMColor* SK_RESTRICT src = fSource.addr32(x - fLeft, y - fTop);
+ size_t dstRB = fDst.rowBytes();
+ size_t srcRB = fSource.rowBytes();
+ SkBlitRow::Proc16 proc = fProc;
+ U8CPU alpha = fPaint->getAlpha();
+
+ while (--height >= 0) {
+ proc(dst, src, width, alpha, x, y);
+ y += 1;
+ dst = (uint16_t* SK_RESTRICT)((char*)dst + dstRB);
+ src = (const SkPMColor* SK_RESTRICT)((const char*)src + srcRB);
+ }
+ }
+
+private:
+ SkBlitRow::Proc16 fProc;
+
+ typedef SkSpriteBlitter INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkSpriteBlitter* SkSpriteBlitter::ChooseD16(const SkPixmap& source, const SkPaint& paint,
+ SkTBlitterAllocator* allocator) {
+
+ SkASSERT(allocator != nullptr);
+
+ if (paint.getMaskFilter() != nullptr) { // may add cases for this
+ return nullptr;
+ }
+ if (!paint.isSrcOver()) { // may add cases for this
+ return nullptr;
+ }
+ if (paint.getColorFilter() != nullptr) { // may add cases for this
+ return nullptr;
+ }
+
+ const SkAlphaType at = source.alphaType();
+
+ SkSpriteBlitter* blitter = nullptr;
+ unsigned alpha = paint.getAlpha();
+
+ switch (source.colorType()) {
+ case kN32_SkColorType: {
+ if (kPremul_SkAlphaType != at && kOpaque_SkAlphaType != at) {
+ break;
+ }
+ blitter = allocator->createT<Sprite_D16_S32_BlitRowProc>(source);
+ break;
+ }
+ case kARGB_4444_SkColorType:
+ if (kPremul_SkAlphaType != at && kOpaque_SkAlphaType != at) {
+ break;
+ }
+ if (255 == alpha) {
+ blitter = allocator->createT<Sprite_D16_S4444_Opaque>(source);
+ } else {
+ blitter = allocator->createT<Sprite_D16_S4444_Blend>(source, alpha >> 4);
+ }
+ break;
+ case kRGB_565_SkColorType:
+ if (255 == alpha) {
+ blitter = allocator->createT<Sprite_D16_S16_Opaque>(source);
+ } else {
+ blitter = allocator->createT<Sprite_D16_S16_Blend>(source, alpha);
+ }
+ break;
+ case kIndex_8_SkColorType:
+ if (kPremul_SkAlphaType != at && kOpaque_SkAlphaType != at) {
+ break;
+ }
+ if (paint.isDither()) {
+ // we don't support dither yet in these special cases
+ break;
+ }
+ if (source.isOpaque()) {
+ if (255 == alpha) {
+ blitter = allocator->createT<Sprite_D16_SIndex8_Opaque>(source);
+ } else {
+ blitter = allocator->createT<Sprite_D16_SIndex8_Blend>(source, alpha);
+ }
+ } else {
+ if (255 == alpha) {
+ blitter = allocator->createT<Sprite_D16_SIndex8A_Opaque>(source);
+ } else {
+ blitter = allocator->createT<Sprite_D16_SIndex8A_Blend>(source, alpha);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ return blitter;
+}
diff --git a/gfx/skia/skia/src/core/SkStream.cpp b/gfx/skia/skia/src/core/SkStream.cpp
new file mode 100644
index 000000000..e7b3a7a7e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStream.cpp
@@ -0,0 +1,910 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkStream.h"
+#include "SkStreamPriv.h"
+#include "SkData.h"
+#include "SkFixed.h"
+#include "SkMakeUnique.h"
+#include "SkString.h"
+#include "SkOSFile.h"
+#include "SkTypes.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+
+int8_t SkStream::readS8() {
+ int8_t value;
+ SkDEBUGCODE(size_t len =) this->read(&value, 1);
+ SkASSERT(1 == len);
+ return value;
+}
+
+int16_t SkStream::readS16() {
+ int16_t value;
+ SkDEBUGCODE(size_t len =) this->read(&value, 2);
+ SkASSERT(2 == len);
+ return value;
+}
+
+int32_t SkStream::readS32() {
+ int32_t value;
+ SkDEBUGCODE(size_t len =) this->read(&value, 4);
+ SkASSERT(4 == len);
+ return value;
+}
+
+SkScalar SkStream::readScalar() {
+ SkScalar value;
+ SkDEBUGCODE(size_t len =) this->read(&value, sizeof(SkScalar));
+ SkASSERT(sizeof(SkScalar) == len);
+ return value;
+}
+
+#define SK_MAX_BYTE_FOR_U8 0xFD
+#define SK_BYTE_SENTINEL_FOR_U16 0xFE
+#define SK_BYTE_SENTINEL_FOR_U32 0xFF
+
+size_t SkStream::readPackedUInt() {
+ uint8_t byte;
+ if (!this->read(&byte, 1)) {
+ return 0;
+ }
+ if (SK_BYTE_SENTINEL_FOR_U16 == byte) {
+ return this->readU16();
+ } else if (SK_BYTE_SENTINEL_FOR_U32 == byte) {
+ return this->readU32();
+ } else {
+ return byte;
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+
+SkWStream::~SkWStream()
+{
+}
+
+void SkWStream::newline()
+{
+ this->write("\n", 1);
+}
+
+void SkWStream::flush()
+{
+}
+
+bool SkWStream::writeDecAsText(int32_t dec)
+{
+ char buffer[SkStrAppendS32_MaxSize];
+ char* stop = SkStrAppendS32(buffer, dec);
+ return this->write(buffer, stop - buffer);
+}
+
+bool SkWStream::writeBigDecAsText(int64_t dec, int minDigits)
+{
+ char buffer[SkStrAppendU64_MaxSize];
+ char* stop = SkStrAppendU64(buffer, dec, minDigits);
+ return this->write(buffer, stop - buffer);
+}
+
+bool SkWStream::writeHexAsText(uint32_t hex, int digits)
+{
+ SkString tmp;
+ tmp.appendHex(hex, digits);
+ return this->write(tmp.c_str(), tmp.size());
+}
+
+bool SkWStream::writeScalarAsText(SkScalar value)
+{
+ char buffer[SkStrAppendScalar_MaxSize];
+ char* stop = SkStrAppendScalar(buffer, value);
+ return this->write(buffer, stop - buffer);
+}
+
+bool SkWStream::write8(U8CPU value) {
+ uint8_t v = SkToU8(value);
+ return this->write(&v, 1);
+}
+
+bool SkWStream::write16(U16CPU value) {
+ uint16_t v = SkToU16(value);
+ return this->write(&v, 2);
+}
+
+bool SkWStream::write32(uint32_t value) {
+ return this->write(&value, 4);
+}
+
+bool SkWStream::writeScalar(SkScalar value) {
+ return this->write(&value, sizeof(value));
+}
+
+int SkWStream::SizeOfPackedUInt(size_t value) {
+ if (value <= SK_MAX_BYTE_FOR_U8) {
+ return 1;
+ } else if (value <= 0xFFFF) {
+ return 3;
+ }
+ return 5;
+}
+
+bool SkWStream::writePackedUInt(size_t value) {
+ uint8_t data[5];
+ size_t len = 1;
+ if (value <= SK_MAX_BYTE_FOR_U8) {
+ data[0] = value;
+ len = 1;
+ } else if (value <= 0xFFFF) {
+ uint16_t value16 = value;
+ data[0] = SK_BYTE_SENTINEL_FOR_U16;
+ memcpy(&data[1], &value16, 2);
+ len = 3;
+ } else {
+ uint32_t value32 = SkToU32(value);
+ data[0] = SK_BYTE_SENTINEL_FOR_U32;
+ memcpy(&data[1], &value32, 4);
+ len = 5;
+ }
+ return this->write(data, len);
+}
+
+bool SkWStream::writeStream(SkStream* stream, size_t length) {
+ char scratch[1024];
+ const size_t MAX = sizeof(scratch);
+
+ while (length != 0) {
+ size_t n = length;
+ if (n > MAX) {
+ n = MAX;
+ }
+ stream->read(scratch, n);
+ if (!this->write(scratch, n)) {
+ return false;
+ }
+ length -= n;
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkFILEStream::SkFILEStream(const char file[]) : fName(file), fOwnership(kCallerPasses_Ownership) {
+ fFILE = file ? sk_fopen(fName.c_str(), kRead_SkFILE_Flag) : nullptr;
+}
+
+SkFILEStream::SkFILEStream(FILE* file, Ownership ownership)
+ : fFILE(file)
+ , fOwnership(ownership) {
+}
+
+SkFILEStream::~SkFILEStream() {
+ if (fFILE && fOwnership != kCallerRetains_Ownership) {
+ sk_fclose(fFILE);
+ }
+}
+
+void SkFILEStream::setPath(const char path[]) {
+ fName.set(path);
+ if (fFILE) {
+ sk_fclose(fFILE);
+ fFILE = nullptr;
+ }
+ if (path) {
+ fFILE = sk_fopen(fName.c_str(), kRead_SkFILE_Flag);
+ }
+}
+
+size_t SkFILEStream::read(void* buffer, size_t size) {
+ if (fFILE) {
+ return sk_fread(buffer, size, fFILE);
+ }
+ return 0;
+}
+
+bool SkFILEStream::isAtEnd() const {
+ return sk_feof(fFILE);
+}
+
+bool SkFILEStream::rewind() {
+ if (fFILE) {
+ if (sk_frewind(fFILE)) {
+ return true;
+ }
+ // we hit an error
+ sk_fclose(fFILE);
+ fFILE = nullptr;
+ }
+ return false;
+}
+
+SkStreamAsset* SkFILEStream::duplicate() const {
+ if (nullptr == fFILE) {
+ return new SkMemoryStream();
+ }
+
+ if (fData.get()) {
+ return new SkMemoryStream(fData);
+ }
+
+ if (!fName.isEmpty()) {
+ SkAutoTDelete<SkFILEStream> that(new SkFILEStream(fName.c_str()));
+ if (sk_fidentical(that->fFILE, this->fFILE)) {
+ return that.release();
+ }
+ }
+
+ fData = SkData::MakeFromFILE(fFILE);
+ if (nullptr == fData) {
+ return nullptr;
+ }
+ return new SkMemoryStream(fData);
+}
+
+size_t SkFILEStream::getPosition() const {
+ return sk_ftell(fFILE);
+}
+
+bool SkFILEStream::seek(size_t position) {
+ return sk_fseek(fFILE, position);
+}
+
+bool SkFILEStream::move(long offset) {
+ return sk_fmove(fFILE, offset);
+}
+
+SkStreamAsset* SkFILEStream::fork() const {
+ SkAutoTDelete<SkStreamAsset> that(this->duplicate());
+ that->seek(this->getPosition());
+ return that.release();
+}
+
+size_t SkFILEStream::getLength() const {
+ return sk_fgetsize(fFILE);
+}
+
+const void* SkFILEStream::getMemoryBase() {
+ if (nullptr == fData.get()) {
+ return nullptr;
+ }
+ return fData->data();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static sk_sp<SkData> newFromParams(const void* src, size_t size, bool copyData) {
+ if (copyData) {
+ return SkData::MakeWithCopy(src, size);
+ } else {
+ return SkData::MakeWithoutCopy(src, size);
+ }
+}
+
+SkMemoryStream::SkMemoryStream() {
+ fData = SkData::MakeEmpty();
+ fOffset = 0;
+}
+
+SkMemoryStream::SkMemoryStream(size_t size) {
+ fData = SkData::MakeUninitialized(size);
+ fOffset = 0;
+}
+
+SkMemoryStream::SkMemoryStream(const void* src, size_t size, bool copyData) {
+ fData = newFromParams(src, size, copyData);
+ fOffset = 0;
+}
+
+SkMemoryStream::SkMemoryStream(sk_sp<SkData> data) : fData(std::move(data)) {
+ if (nullptr == fData) {
+ fData = SkData::MakeEmpty();
+ }
+ fOffset = 0;
+}
+
+#ifdef SK_SUPPORT_LEGACY_STREAM_DATA
+SkMemoryStream::SkMemoryStream(SkData* data) {
+ if (nullptr == data) {
+ fData = SkData::MakeEmpty();
+ } else {
+ fData = sk_ref_sp(data);
+ }
+ fOffset = 0;
+}
+#endif
+
+void SkMemoryStream::setMemoryOwned(const void* src, size_t size) {
+ fData = SkData::MakeFromMalloc(src, size);
+ fOffset = 0;
+}
+
+void SkMemoryStream::setMemory(const void* src, size_t size, bool copyData) {
+ fData = newFromParams(src, size, copyData);
+ fOffset = 0;
+}
+
+void SkMemoryStream::setData(sk_sp<SkData> data) {
+ if (nullptr == data) {
+ fData = SkData::MakeEmpty();
+ } else {
+ fData = data;
+ }
+ fOffset = 0;
+}
+
+void SkMemoryStream::skipToAlign4() {
+ // cast to remove unary-minus warning
+ fOffset += -(int)fOffset & 0x03;
+}
+
+size_t SkMemoryStream::read(void* buffer, size_t size) {
+ size_t dataSize = fData->size();
+
+ if (size > dataSize - fOffset) {
+ size = dataSize - fOffset;
+ }
+ if (buffer) {
+ memcpy(buffer, fData->bytes() + fOffset, size);
+ }
+ fOffset += size;
+ return size;
+}
+
+size_t SkMemoryStream::peek(void* buffer, size_t size) const {
+ SkASSERT(buffer != nullptr);
+
+ const size_t currentOffset = fOffset;
+ SkMemoryStream* nonConstThis = const_cast<SkMemoryStream*>(this);
+ const size_t bytesRead = nonConstThis->read(buffer, size);
+ nonConstThis->fOffset = currentOffset;
+ return bytesRead;
+}
+
+bool SkMemoryStream::isAtEnd() const {
+ return fOffset == fData->size();
+}
+
+bool SkMemoryStream::rewind() {
+ fOffset = 0;
+ return true;
+}
+
+SkMemoryStream* SkMemoryStream::duplicate() const { return new SkMemoryStream(fData); }
+
+size_t SkMemoryStream::getPosition() const {
+ return fOffset;
+}
+
+bool SkMemoryStream::seek(size_t position) {
+ fOffset = position > fData->size()
+ ? fData->size()
+ : position;
+ return true;
+}
+
+bool SkMemoryStream::move(long offset) {
+ return this->seek(fOffset + offset);
+}
+
+SkMemoryStream* SkMemoryStream::fork() const {
+ SkAutoTDelete<SkMemoryStream> that(this->duplicate());
+ that->seek(fOffset);
+ return that.release();
+}
+
+size_t SkMemoryStream::getLength() const {
+ return fData->size();
+}
+
+const void* SkMemoryStream::getMemoryBase() {
+ return fData->data();
+}
+
+const void* SkMemoryStream::getAtPos() {
+ return fData->bytes() + fOffset;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkFILEWStream::SkFILEWStream(const char path[])
+{
+ fFILE = sk_fopen(path, kWrite_SkFILE_Flag);
+}
+
+SkFILEWStream::~SkFILEWStream()
+{
+ if (fFILE) {
+ sk_fclose(fFILE);
+ }
+}
+
+size_t SkFILEWStream::bytesWritten() const {
+ return sk_ftell(fFILE);
+}
+
+bool SkFILEWStream::write(const void* buffer, size_t size)
+{
+ if (fFILE == nullptr) {
+ return false;
+ }
+
+ if (sk_fwrite(buffer, size, fFILE) != size)
+ {
+ SkDEBUGCODE(SkDebugf("SkFILEWStream failed writing %d bytes\n", size);)
+ sk_fclose(fFILE);
+ fFILE = nullptr;
+ return false;
+ }
+ return true;
+}
+
+void SkFILEWStream::flush()
+{
+ if (fFILE) {
+ sk_fflush(fFILE);
+ }
+}
+
+void SkFILEWStream::fsync()
+{
+ flush();
+ if (fFILE) {
+ sk_fsync(fFILE);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////
+
+SkMemoryWStream::SkMemoryWStream(void* buffer, size_t size)
+ : fBuffer((char*)buffer), fMaxLength(size), fBytesWritten(0)
+{
+}
+
+bool SkMemoryWStream::write(const void* buffer, size_t size) {
+ size = SkTMin(size, fMaxLength - fBytesWritten);
+ if (size > 0) {
+ memcpy(fBuffer + fBytesWritten, buffer, size);
+ fBytesWritten += size;
+ return true;
+ }
+ return false;
+}
+
+////////////////////////////////////////////////////////////////////////
+
+#define SkDynamicMemoryWStream_MinBlockSize 256
+
+struct SkDynamicMemoryWStream::Block {
+ Block* fNext;
+ char* fCurr;
+ char* fStop;
+
+ const char* start() const { return (const char*)(this + 1); }
+ char* start() { return (char*)(this + 1); }
+ size_t avail() const { return fStop - fCurr; }
+ size_t written() const { return fCurr - this->start(); }
+
+ void init(size_t size)
+ {
+ fNext = nullptr;
+ fCurr = this->start();
+ fStop = this->start() + size;
+ }
+
+ const void* append(const void* data, size_t size)
+ {
+ SkASSERT((size_t)(fStop - fCurr) >= size);
+ memcpy(fCurr, data, size);
+ fCurr += size;
+ return (const void*)((const char*)data + size);
+ }
+};
+
+SkDynamicMemoryWStream::SkDynamicMemoryWStream()
+ : fHead(nullptr), fTail(nullptr), fBytesWritten(0)
+{}
+
+SkDynamicMemoryWStream::~SkDynamicMemoryWStream()
+{
+ reset();
+}
+
+void SkDynamicMemoryWStream::reset()
+{
+ this->invalidateCopy();
+
+ Block* block = fHead;
+
+ while (block != nullptr) {
+ Block* next = block->fNext;
+ sk_free(block);
+ block = next;
+ }
+ fHead = fTail = nullptr;
+ fBytesWritten = 0;
+}
+
+bool SkDynamicMemoryWStream::write(const void* buffer, size_t count)
+{
+ if (count > 0) {
+ this->invalidateCopy();
+
+ fBytesWritten += count;
+
+ size_t size;
+
+ if (fTail != nullptr && fTail->avail() > 0) {
+ size = SkTMin(fTail->avail(), count);
+ buffer = fTail->append(buffer, size);
+ SkASSERT(count >= size);
+ count -= size;
+ if (count == 0)
+ return true;
+ }
+
+ size = SkTMax<size_t>(count, SkDynamicMemoryWStream_MinBlockSize);
+ Block* block = (Block*)sk_malloc_throw(sizeof(Block) + size);
+ block->init(size);
+ block->append(buffer, count);
+
+ if (fTail != nullptr)
+ fTail->fNext = block;
+ else
+ fHead = fTail = block;
+ fTail = block;
+ }
+ return true;
+}
+
+bool SkDynamicMemoryWStream::write(const void* buffer, size_t offset, size_t count)
+{
+ if (offset + count > fBytesWritten) {
+ return false; // test does not partially modify
+ }
+
+ this->invalidateCopy();
+
+ Block* block = fHead;
+ while (block != nullptr) {
+ size_t size = block->written();
+ if (offset < size) {
+ size_t part = offset + count > size ? size - offset : count;
+ memcpy(block->start() + offset, buffer, part);
+ if (count <= part)
+ return true;
+ count -= part;
+ buffer = (const void*) ((char* ) buffer + part);
+ }
+ offset = offset > size ? offset - size : 0;
+ block = block->fNext;
+ }
+ return false;
+}
+
+bool SkDynamicMemoryWStream::read(void* buffer, size_t offset, size_t count)
+{
+ if (offset + count > fBytesWritten)
+ return false; // test does not partially modify
+ Block* block = fHead;
+ while (block != nullptr) {
+ size_t size = block->written();
+ if (offset < size) {
+ size_t part = offset + count > size ? size - offset : count;
+ memcpy(buffer, block->start() + offset, part);
+ if (count <= part)
+ return true;
+ count -= part;
+ buffer = (void*) ((char* ) buffer + part);
+ }
+ offset = offset > size ? offset - size : 0;
+ block = block->fNext;
+ }
+ return false;
+}
+
+void SkDynamicMemoryWStream::copyTo(void* dst) const
+{
+ if (fCopy) {
+ memcpy(dst, fCopy->data(), fBytesWritten);
+ } else {
+ Block* block = fHead;
+
+ while (block != nullptr) {
+ size_t size = block->written();
+ memcpy(dst, block->start(), size);
+ dst = (void*)((char*)dst + size);
+ block = block->fNext;
+ }
+ }
+}
+
+void SkDynamicMemoryWStream::writeToStream(SkWStream* dst) const {
+ for (Block* block = fHead; block != nullptr; block = block->fNext) {
+ dst->write(block->start(), block->written());
+ }
+}
+
+void SkDynamicMemoryWStream::padToAlign4()
+{
+ // cast to remove unary-minus warning
+ int padBytes = -(int)fBytesWritten & 0x03;
+ if (padBytes == 0)
+ return;
+ int zero = 0;
+ write(&zero, padBytes);
+}
+
+sk_sp<SkData> SkDynamicMemoryWStream::snapshotAsData() const {
+ if (nullptr == fCopy) {
+ auto data = SkData::MakeUninitialized(fBytesWritten);
+ // be sure to call copyTo() before we assign to fCopy
+ this->copyTo(data->writable_data());
+ fCopy = std::move(data);
+ }
+ return fCopy;
+}
+
+sk_sp<SkData> SkDynamicMemoryWStream::detachAsData() {
+ sk_sp<SkData> data = this->snapshotAsData();
+ this->reset();
+ return data;
+}
+
+void SkDynamicMemoryWStream::invalidateCopy() {
+ fCopy = nullptr;
+}
+
+class SkBlockMemoryRefCnt : public SkRefCnt {
+public:
+ explicit SkBlockMemoryRefCnt(SkDynamicMemoryWStream::Block* head) : fHead(head) { }
+
+ virtual ~SkBlockMemoryRefCnt() {
+ SkDynamicMemoryWStream::Block* block = fHead;
+ while (block != nullptr) {
+ SkDynamicMemoryWStream::Block* next = block->fNext;
+ sk_free(block);
+ block = next;
+ }
+ }
+
+ SkDynamicMemoryWStream::Block* const fHead;
+};
+
+class SkBlockMemoryStream : public SkStreamAsset {
+public:
+ SkBlockMemoryStream(SkDynamicMemoryWStream::Block* head, size_t size)
+ : fBlockMemory(new SkBlockMemoryRefCnt(head))
+ , fCurrent(head)
+ , fSize(size)
+ , fOffset(0)
+ , fCurrentOffset(0) {}
+
+ SkBlockMemoryStream(SkBlockMemoryRefCnt* headRef, size_t size)
+ : fBlockMemory(SkRef(headRef)), fCurrent(fBlockMemory->fHead)
+ , fSize(size) , fOffset(0), fCurrentOffset(0) { }
+
+ size_t read(void* buffer, size_t rawCount) override {
+ size_t count = rawCount;
+ if (fOffset + count > fSize) {
+ count = fSize - fOffset;
+ }
+ size_t bytesLeftToRead = count;
+ while (fCurrent != nullptr) {
+ size_t bytesLeftInCurrent = fCurrent->written() - fCurrentOffset;
+ size_t bytesFromCurrent = SkTMin(bytesLeftToRead, bytesLeftInCurrent);
+ if (buffer) {
+ memcpy(buffer, fCurrent->start() + fCurrentOffset, bytesFromCurrent);
+ buffer = SkTAddOffset<void>(buffer, bytesFromCurrent);
+ }
+ if (bytesLeftToRead <= bytesFromCurrent) {
+ fCurrentOffset += bytesFromCurrent;
+ fOffset += count;
+ return count;
+ }
+ bytesLeftToRead -= bytesFromCurrent;
+ fCurrent = fCurrent->fNext;
+ fCurrentOffset = 0;
+ }
+ SkASSERT(false);
+ return 0;
+ }
+
+ bool isAtEnd() const override {
+ return fOffset == fSize;
+ }
+
+ size_t peek(void* buff, size_t bytesToPeek) const override {
+ SkASSERT(buff != nullptr);
+
+ bytesToPeek = SkTMin(bytesToPeek, fSize - fOffset);
+
+ size_t bytesLeftToPeek = bytesToPeek;
+ char* buffer = static_cast<char*>(buff);
+ const SkDynamicMemoryWStream::Block* current = fCurrent;
+ size_t currentOffset = fCurrentOffset;
+ while (bytesLeftToPeek) {
+ SkASSERT(current);
+ size_t bytesFromCurrent =
+ SkTMin(current->written() - currentOffset, bytesLeftToPeek);
+ memcpy(buffer, current->start() + currentOffset, bytesFromCurrent);
+ bytesLeftToPeek -= bytesFromCurrent;
+ buffer += bytesFromCurrent;
+ current = current->fNext;
+ currentOffset = 0;
+ }
+ return bytesToPeek;
+ }
+
+ bool rewind() override {
+ fCurrent = fBlockMemory->fHead;
+ fOffset = 0;
+ fCurrentOffset = 0;
+ return true;
+ }
+
+ SkBlockMemoryStream* duplicate() const override {
+ return new SkBlockMemoryStream(fBlockMemory.get(), fSize);
+ }
+
+ size_t getPosition() const override {
+ return fOffset;
+ }
+
+ bool seek(size_t position) override {
+ // If possible, skip forward.
+ if (position >= fOffset) {
+ size_t skipAmount = position - fOffset;
+ return this->skip(skipAmount) == skipAmount;
+ }
+ // If possible, move backward within the current block.
+ size_t moveBackAmount = fOffset - position;
+ if (moveBackAmount <= fCurrentOffset) {
+ fCurrentOffset -= moveBackAmount;
+ fOffset -= moveBackAmount;
+ return true;
+ }
+ // Otherwise rewind and move forward.
+ return this->rewind() && this->skip(position) == position;
+ }
+
+ bool move(long offset) override {
+ return seek(fOffset + offset);
+ }
+
+ SkBlockMemoryStream* fork() const override {
+ SkAutoTDelete<SkBlockMemoryStream> that(this->duplicate());
+ that->fCurrent = this->fCurrent;
+ that->fOffset = this->fOffset;
+ that->fCurrentOffset = this->fCurrentOffset;
+ return that.release();
+ }
+
+ size_t getLength() const override {
+ return fSize;
+ }
+
+ const void* getMemoryBase() override {
+ if (nullptr != fBlockMemory->fHead &&
+ nullptr == fBlockMemory->fHead->fNext) {
+ return fBlockMemory->fHead->start();
+ }
+ return nullptr;
+ }
+
+private:
+ SkAutoTUnref<SkBlockMemoryRefCnt> const fBlockMemory;
+ SkDynamicMemoryWStream::Block const * fCurrent;
+ size_t const fSize;
+ size_t fOffset;
+ size_t fCurrentOffset;
+};
+
+SkStreamAsset* SkDynamicMemoryWStream::detachAsStream() {
+ if (fCopy) {
+ SkMemoryStream* stream = new SkMemoryStream(fCopy);
+ this->reset();
+ return stream;
+ }
+ SkBlockMemoryStream* stream = new SkBlockMemoryStream(fHead, fBytesWritten);
+ fHead = 0;
+ this->reset();
+ return stream;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkDebugWStream::newline()
+{
+#if defined(SK_DEBUG)
+ SkDebugf("\n");
+ fBytesWritten++;
+#endif
+}
+
+bool SkDebugWStream::write(const void* buffer, size_t size)
+{
+#if defined(SK_DEBUG)
+ char* s = new char[size+1];
+ memcpy(s, buffer, size);
+ s[size] = 0;
+ SkDebugf("%s", s);
+ delete[] s;
+ fBytesWritten += size;
+#endif
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+
+static sk_sp<SkData> mmap_filename(const char path[]) {
+ FILE* file = sk_fopen(path, kRead_SkFILE_Flag);
+ if (nullptr == file) {
+ return nullptr;
+ }
+
+ auto data = SkData::MakeFromFILE(file);
+ sk_fclose(file);
+ return data;
+}
+
+std::unique_ptr<SkStreamAsset> SkStream::MakeFromFile(const char path[]) {
+ auto data(mmap_filename(path));
+ if (data) {
+ return skstd::make_unique<SkMemoryStream>(std::move(data));
+ }
+
+ // If we get here, then our attempt at using mmap failed, so try normal file access.
+ auto stream = skstd::make_unique<SkFILEStream>(path);
+ if (!stream->isValid()) {
+ return nullptr;
+ }
+ return std::move(stream);
+}
+
+// Declared in SkStreamPriv.h:
+sk_sp<SkData> SkCopyStreamToData(SkStream* stream) {
+ SkASSERT(stream != nullptr);
+
+ if (stream->hasLength()) {
+ return SkData::MakeFromStream(stream, stream->getLength());
+ }
+
+ SkDynamicMemoryWStream tempStream;
+ const size_t bufferSize = 4096;
+ char buffer[bufferSize];
+ do {
+ size_t bytesRead = stream->read(buffer, bufferSize);
+ tempStream.write(buffer, bytesRead);
+ } while (!stream->isAtEnd());
+ return tempStream.detachAsData();
+}
+
+bool SkStreamCopy(SkWStream* out, SkStream* input) {
+ const char* base = static_cast<const char*>(input->getMemoryBase());
+ if (base && input->hasPosition() && input->hasLength()) {
+ // Shortcut that avoids the while loop.
+ size_t position = input->getPosition();
+ size_t length = input->getLength();
+ SkASSERT(length >= position);
+ return out->write(&base[position], length - position);
+ }
+ char scratch[4096];
+ size_t count;
+ while (true) {
+ count = input->read(scratch, sizeof(scratch));
+ if (0 == count) {
+ return true;
+ }
+ if (!out->write(scratch, count)) {
+ return false;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkStreamPriv.h b/gfx/skia/skia/src/core/SkStreamPriv.h
new file mode 100644
index 000000000..ac822c111
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStreamPriv.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStreamPriv_DEFINED
+#define SkStreamPriv_DEFINED
+
+#include "SkRefCnt.h"
+
+class SkData;
+class SkStream;
+class SkWStream;
+
+/**
+ * Copy the provided stream to an SkData variable.
+ *
+ * Note: Assumes the stream is at the beginning. If it has a length,
+ * but is not at the beginning, this call will fail (return NULL).
+ *
+ * @param stream SkStream to be copied into data.
+ * @return SkData* The resulting SkData after the copy. This data
+ * will have a ref count of one upon return and belongs to the
+ * caller. Returns nullptr on failure.
+ */
+sk_sp<SkData> SkCopyStreamToData(SkStream* stream);
+
+/**
+ * Copies the input stream from the current position to the end.
+ * Does not rewind the input stream.
+ */
+bool SkStreamCopy(SkWStream* out, SkStream* input);
+
+#endif // SkStreamPriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkString.cpp b/gfx/skia/skia/src/core/SkString.cpp
new file mode 100644
index 000000000..6b04672fa
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkString.cpp
@@ -0,0 +1,680 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkAtomics.h"
+#include "SkString.h"
+#include "SkUtils.h"
+#include <stdarg.h>
+#include <stdio.h>
+
+// number of bytes (on the stack) to receive the printf result
+static const size_t kBufferSize = 1024;
+
+#if defined(_MSC_VER) && _MSC_VER < 1900
+ #define VSNPRINTF(buffer, size, format, args) \
+ _vsnprintf_s(buffer, size, _TRUNCATE, format, args)
+ #define SNPRINTF _snprintf
+#else
+ #define VSNPRINTF vsnprintf
+ #define SNPRINTF snprintf
+#endif
+
+#define ARGS_TO_BUFFER(format, buffer, size, written) \
+ do { \
+ va_list args; \
+ va_start(args, format); \
+ written = VSNPRINTF(buffer, size, format, args); \
+ SkASSERT(written >= 0 && written < SkToInt(size)); \
+ va_end(args); \
+ } while (0)
+
+#if defined(_MSC_VER) && _MSC_VER < 1900
+#define V_SKSTRING_PRINTF(output, format) \
+ do { \
+ va_list args; \
+ va_start(args, format); \
+ char buffer[kBufferSize]; \
+ int length = _vsnprintf_s(buffer, sizeof(buffer), \
+ _TRUNCATE, format, args); \
+ va_end(args); \
+ if (length >= 0 && length < (int)sizeof(buffer)) { \
+ output.set(buffer, length); \
+ break; \
+ } \
+ va_start(args, format); \
+ length = _vscprintf(format, args); \
+ va_end(args); \
+ SkAutoTMalloc<char> autoTMalloc((size_t)length + 1); \
+ va_start(args, format); \
+ SkDEBUGCODE(int check = ) _vsnprintf_s(autoTMalloc.get(), \
+ length + 1, _TRUNCATE, \
+ format, args); \
+ va_end(args); \
+ SkASSERT(check == length); \
+ output.set(autoTMalloc.get(), length); \
+ SkASSERT(output[length] == '\0'); \
+ } while (false)
+#else
+#define V_SKSTRING_PRINTF(output, format) \
+ do { \
+ va_list args; \
+ va_start(args, format); \
+ char buffer[kBufferSize]; \
+ int length = vsnprintf(buffer, sizeof(buffer), format, args); \
+ va_end(args); \
+ if (length < 0) { \
+ break; \
+ } \
+ if (length < (int)sizeof(buffer)) { \
+ output.set(buffer, length); \
+ break; \
+ } \
+ SkAutoTMalloc<char> autoTMalloc((size_t)length + 1); \
+ va_start(args, format); \
+ SkDEBUGCODE(int check = ) vsnprintf(autoTMalloc.get(), \
+ length + 1, format, args); \
+ va_end(args); \
+ SkASSERT(check == length); \
+ output.set(autoTMalloc.get(), length); \
+ SkASSERT(output[length] == '\0'); \
+ } while (false)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkStrEndsWith(const char string[], const char suffixStr[]) {
+ SkASSERT(string);
+ SkASSERT(suffixStr);
+ size_t strLen = strlen(string);
+ size_t suffixLen = strlen(suffixStr);
+ return strLen >= suffixLen &&
+ !strncmp(string + strLen - suffixLen, suffixStr, suffixLen);
+}
+
+bool SkStrEndsWith(const char string[], const char suffixChar) {
+ SkASSERT(string);
+ size_t strLen = strlen(string);
+ if (0 == strLen) {
+ return false;
+ } else {
+ return (suffixChar == string[strLen-1]);
+ }
+}
+
+int SkStrStartsWithOneOf(const char string[], const char prefixes[]) {
+ int index = 0;
+ do {
+ const char* limit = strchr(prefixes, '\0');
+ if (!strncmp(string, prefixes, limit - prefixes)) {
+ return index;
+ }
+ prefixes = limit + 1;
+ index++;
+ } while (prefixes[0]);
+ return -1;
+}
+
+char* SkStrAppendU32(char string[], uint32_t dec) {
+ SkDEBUGCODE(char* start = string;)
+
+ char buffer[SkStrAppendU32_MaxSize];
+ char* p = buffer + sizeof(buffer);
+
+ do {
+ *--p = SkToU8('0' + dec % 10);
+ dec /= 10;
+ } while (dec != 0);
+
+ SkASSERT(p >= buffer);
+ char* stop = buffer + sizeof(buffer);
+ while (p < stop) {
+ *string++ = *p++;
+ }
+ SkASSERT(string - start <= SkStrAppendU32_MaxSize);
+ return string;
+}
+
+char* SkStrAppendS32(char string[], int32_t dec) {
+ uint32_t udec = dec;
+ if (dec < 0) {
+ *string++ = '-';
+ udec = ~udec + 1; // udec = -udec, but silences some warnings that are trying to be helpful
+ }
+ return SkStrAppendU32(string, udec);
+}
+
+char* SkStrAppendU64(char string[], uint64_t dec, int minDigits) {
+ SkDEBUGCODE(char* start = string;)
+
+ char buffer[SkStrAppendU64_MaxSize];
+ char* p = buffer + sizeof(buffer);
+
+ do {
+ *--p = SkToU8('0' + (int32_t) (dec % 10));
+ dec /= 10;
+ minDigits--;
+ } while (dec != 0);
+
+ while (minDigits > 0) {
+ *--p = '0';
+ minDigits--;
+ }
+
+ SkASSERT(p >= buffer);
+ size_t cp_len = buffer + sizeof(buffer) - p;
+ memcpy(string, p, cp_len);
+ string += cp_len;
+
+ SkASSERT(string - start <= SkStrAppendU64_MaxSize);
+ return string;
+}
+
+char* SkStrAppendS64(char string[], int64_t dec, int minDigits) {
+ uint64_t udec = dec;
+ if (dec < 0) {
+ *string++ = '-';
+ udec = ~udec + 1; // udec = -udec, but silences some warnings that are trying to be helpful
+ }
+ return SkStrAppendU64(string, udec, minDigits);
+}
+
+char* SkStrAppendFloat(char string[], float value) {
+ // since floats have at most 8 significant digits, we limit our %g to that.
+ static const char gFormat[] = "%.8g";
+ // make it 1 larger for the terminating 0
+ char buffer[SkStrAppendScalar_MaxSize + 1];
+ int len = SNPRINTF(buffer, sizeof(buffer), gFormat, value);
+ memcpy(string, buffer, len);
+ SkASSERT(len <= SkStrAppendScalar_MaxSize);
+ return string + len;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// the 3 values are [length] [refcnt] [terminating zero data]
+const SkString::Rec SkString::gEmptyRec = { 0, 0, 0 };
+
+#define SizeOfRec() (gEmptyRec.data() - (const char*)&gEmptyRec)
+
+static uint32_t trim_size_t_to_u32(size_t value) {
+ if (sizeof(size_t) > sizeof(uint32_t)) {
+ if (value > SK_MaxU32) {
+ value = SK_MaxU32;
+ }
+ }
+ return (uint32_t)value;
+}
+
+static size_t check_add32(size_t base, size_t extra) {
+ SkASSERT(base <= SK_MaxU32);
+ if (sizeof(size_t) > sizeof(uint32_t)) {
+ if (base + extra > SK_MaxU32) {
+ extra = SK_MaxU32 - base;
+ }
+ }
+ return extra;
+}
+
+SkString::Rec* SkString::AllocRec(const char text[], size_t len) {
+ Rec* rec;
+
+ if (0 == len) {
+ rec = const_cast<Rec*>(&gEmptyRec);
+ } else {
+ len = trim_size_t_to_u32(len);
+
+ // add 1 for terminating 0, then align4 so we can have some slop when growing the string
+ rec = (Rec*)sk_malloc_throw(SizeOfRec() + SkAlign4(len + 1));
+ rec->fLength = SkToU32(len);
+ rec->fRefCnt = 1;
+ if (text) {
+ memcpy(rec->data(), text, len);
+ }
+ rec->data()[len] = 0;
+ }
+ return rec;
+}
+
+SkString::Rec* SkString::RefRec(Rec* src) {
+ if (src != &gEmptyRec) {
+ sk_atomic_inc(&src->fRefCnt);
+ }
+ return src;
+}
+
+#ifdef SK_DEBUG
+void SkString::validate() const {
+ // make sure know one has written over our global
+ SkASSERT(0 == gEmptyRec.fLength);
+ SkASSERT(0 == gEmptyRec.fRefCnt);
+ SkASSERT(0 == gEmptyRec.data()[0]);
+
+ if (fRec != &gEmptyRec) {
+ SkASSERT(fRec->fLength > 0);
+ SkASSERT(fRec->fRefCnt > 0);
+ SkASSERT(0 == fRec->data()[fRec->fLength]);
+ }
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkString::SkString() : fRec(const_cast<Rec*>(&gEmptyRec)) {
+}
+
+SkString::SkString(size_t len) {
+ fRec = AllocRec(nullptr, len);
+}
+
+SkString::SkString(const char text[]) {
+ size_t len = text ? strlen(text) : 0;
+
+ fRec = AllocRec(text, len);
+}
+
+SkString::SkString(const char text[], size_t len) {
+ fRec = AllocRec(text, len);
+}
+
+SkString::SkString(const SkString& src) {
+ src.validate();
+
+ fRec = RefRec(src.fRec);
+}
+
+SkString::SkString(SkString&& src) {
+ src.validate();
+
+ fRec = src.fRec;
+ src.fRec = const_cast<Rec*>(&gEmptyRec);
+}
+
+SkString::~SkString() {
+ this->validate();
+
+ if (fRec->fLength) {
+ SkASSERT(fRec->fRefCnt > 0);
+ if (sk_atomic_dec(&fRec->fRefCnt) == 1) {
+ sk_free(fRec);
+ }
+ }
+}
+
+bool SkString::equals(const SkString& src) const {
+ return fRec == src.fRec || this->equals(src.c_str(), src.size());
+}
+
+bool SkString::equals(const char text[]) const {
+ return this->equals(text, text ? strlen(text) : 0);
+}
+
+bool SkString::equals(const char text[], size_t len) const {
+ SkASSERT(len == 0 || text != nullptr);
+
+ return fRec->fLength == len && !memcmp(fRec->data(), text, len);
+}
+
+SkString& SkString::operator=(const SkString& src) {
+ this->validate();
+
+ if (fRec != src.fRec) {
+ SkString tmp(src);
+ this->swap(tmp);
+ }
+ return *this;
+}
+
+SkString& SkString::operator=(SkString&& src) {
+ this->validate();
+
+ if (fRec != src.fRec) {
+ this->swap(src);
+ }
+ return *this;
+}
+
+SkString& SkString::operator=(const char text[]) {
+ this->validate();
+
+ SkString tmp(text);
+ this->swap(tmp);
+
+ return *this;
+}
+
+void SkString::reset() {
+ this->validate();
+
+ if (fRec->fLength) {
+ SkASSERT(fRec->fRefCnt > 0);
+ if (sk_atomic_dec(&fRec->fRefCnt) == 1) {
+ sk_free(fRec);
+ }
+ }
+
+ fRec = const_cast<Rec*>(&gEmptyRec);
+}
+
+char* SkString::writable_str() {
+ this->validate();
+
+ if (fRec->fLength) {
+ if (fRec->fRefCnt > 1) {
+ Rec* rec = AllocRec(fRec->data(), fRec->fLength);
+ if (sk_atomic_dec(&fRec->fRefCnt) == 1) {
+ // In this case after our check of fRecCnt > 1, we suddenly
+ // did become the only owner, so now we have two copies of the
+ // data (fRec and rec), so we need to delete one of them.
+ sk_free(fRec);
+ }
+ fRec = rec;
+ }
+ }
+ return fRec->data();
+}
+
+void SkString::set(const char text[]) {
+ this->set(text, text ? strlen(text) : 0);
+}
+
+void SkString::set(const char text[], size_t len) {
+ len = trim_size_t_to_u32(len);
+
+ if (0 == len) {
+ this->reset();
+ } else if (1 == fRec->fRefCnt && len <= fRec->fLength) {
+ // should we resize if len <<<< fLength, to save RAM? (e.g. len < (fLength>>1))?
+ // just use less of the buffer without allocating a smaller one
+ char* p = this->writable_str();
+ if (text) {
+ memcpy(p, text, len);
+ }
+ p[len] = 0;
+ fRec->fLength = SkToU32(len);
+ } else if (1 == fRec->fRefCnt && (fRec->fLength >> 2) == (len >> 2)) {
+ // we have spare room in the current allocation, so don't alloc a larger one
+ char* p = this->writable_str();
+ if (text) {
+ memcpy(p, text, len);
+ }
+ p[len] = 0;
+ fRec->fLength = SkToU32(len);
+ } else {
+ SkString tmp(text, len);
+ this->swap(tmp);
+ }
+}
+
+void SkString::setUTF16(const uint16_t src[]) {
+ int count = 0;
+
+ while (src[count]) {
+ count += 1;
+ }
+ this->setUTF16(src, count);
+}
+
+void SkString::setUTF16(const uint16_t src[], size_t count) {
+ count = trim_size_t_to_u32(count);
+
+ if (0 == count) {
+ this->reset();
+ } else if (count <= fRec->fLength) {
+ // should we resize if len <<<< fLength, to save RAM? (e.g. len < (fLength>>1))
+ if (count < fRec->fLength) {
+ this->resize(count);
+ }
+ char* p = this->writable_str();
+ for (size_t i = 0; i < count; i++) {
+ p[i] = SkToU8(src[i]);
+ }
+ p[count] = 0;
+ } else {
+ SkString tmp(count); // puts a null terminator at the end of the string
+ char* p = tmp.writable_str();
+
+ for (size_t i = 0; i < count; i++) {
+ p[i] = SkToU8(src[i]);
+ }
+ this->swap(tmp);
+ }
+}
+
+void SkString::insert(size_t offset, const char text[]) {
+ this->insert(offset, text, text ? strlen(text) : 0);
+}
+
+void SkString::insert(size_t offset, const char text[], size_t len) {
+ if (len) {
+ size_t length = fRec->fLength;
+ if (offset > length) {
+ offset = length;
+ }
+
+ // Check if length + len exceeds 32bits, we trim len
+ len = check_add32(length, len);
+ if (0 == len) {
+ return;
+ }
+
+ /* If we're the only owner, and we have room in our allocation for the insert,
+ do it in place, rather than allocating a new buffer.
+
+ To know we have room, compare the allocated sizes
+ beforeAlloc = SkAlign4(length + 1)
+ afterAlloc = SkAligh4(length + 1 + len)
+ but SkAlign4(x) is (x + 3) >> 2 << 2
+ which is equivalent for testing to (length + 1 + 3) >> 2 == (length + 1 + 3 + len) >> 2
+ and we can then eliminate the +1+3 since that doesn't affec the answer
+ */
+ if (1 == fRec->fRefCnt && (length >> 2) == ((length + len) >> 2)) {
+ char* dst = this->writable_str();
+
+ if (offset < length) {
+ memmove(dst + offset + len, dst + offset, length - offset);
+ }
+ memcpy(dst + offset, text, len);
+
+ dst[length + len] = 0;
+ fRec->fLength = SkToU32(length + len);
+ } else {
+ /* Seems we should use realloc here, since that is safe if it fails
+ (we have the original data), and might be faster than alloc/copy/free.
+ */
+ SkString tmp(fRec->fLength + len);
+ char* dst = tmp.writable_str();
+
+ if (offset > 0) {
+ memcpy(dst, fRec->data(), offset);
+ }
+ memcpy(dst + offset, text, len);
+ if (offset < fRec->fLength) {
+ memcpy(dst + offset + len, fRec->data() + offset,
+ fRec->fLength - offset);
+ }
+
+ this->swap(tmp);
+ }
+ }
+}
+
+void SkString::insertUnichar(size_t offset, SkUnichar uni) {
+ char buffer[kMaxBytesInUTF8Sequence];
+ size_t len = SkUTF8_FromUnichar(uni, buffer);
+
+ if (len) {
+ this->insert(offset, buffer, len);
+ }
+}
+
+void SkString::insertS32(size_t offset, int32_t dec) {
+ char buffer[SkStrAppendS32_MaxSize];
+ char* stop = SkStrAppendS32(buffer, dec);
+ this->insert(offset, buffer, stop - buffer);
+}
+
+void SkString::insertS64(size_t offset, int64_t dec, int minDigits) {
+ char buffer[SkStrAppendS64_MaxSize];
+ char* stop = SkStrAppendS64(buffer, dec, minDigits);
+ this->insert(offset, buffer, stop - buffer);
+}
+
+void SkString::insertU32(size_t offset, uint32_t dec) {
+ char buffer[SkStrAppendU32_MaxSize];
+ char* stop = SkStrAppendU32(buffer, dec);
+ this->insert(offset, buffer, stop - buffer);
+}
+
+void SkString::insertU64(size_t offset, uint64_t dec, int minDigits) {
+ char buffer[SkStrAppendU64_MaxSize];
+ char* stop = SkStrAppendU64(buffer, dec, minDigits);
+ this->insert(offset, buffer, stop - buffer);
+}
+
+void SkString::insertHex(size_t offset, uint32_t hex, int minDigits) {
+ minDigits = SkTPin(minDigits, 0, 8);
+
+ static const char gHex[] = "0123456789ABCDEF";
+
+ char buffer[8];
+ char* p = buffer + sizeof(buffer);
+
+ do {
+ *--p = gHex[hex & 0xF];
+ hex >>= 4;
+ minDigits -= 1;
+ } while (hex != 0);
+
+ while (--minDigits >= 0) {
+ *--p = '0';
+ }
+
+ SkASSERT(p >= buffer);
+ this->insert(offset, p, buffer + sizeof(buffer) - p);
+}
+
+void SkString::insertScalar(size_t offset, SkScalar value) {
+ char buffer[SkStrAppendScalar_MaxSize];
+ char* stop = SkStrAppendScalar(buffer, value);
+ this->insert(offset, buffer, stop - buffer);
+}
+
+void SkString::printf(const char format[], ...) {
+ V_SKSTRING_PRINTF((*this), format);
+}
+
+void SkString::appendf(const char format[], ...) {
+ char buffer[kBufferSize];
+ int length;
+ ARGS_TO_BUFFER(format, buffer, kBufferSize, length);
+
+ this->append(buffer, length);
+}
+
+void SkString::appendVAList(const char format[], va_list args) {
+ char buffer[kBufferSize];
+ int length = VSNPRINTF(buffer, kBufferSize, format, args);
+ SkASSERT(length >= 0 && length < SkToInt(kBufferSize));
+
+ this->append(buffer, length);
+}
+
+void SkString::prependf(const char format[], ...) {
+ char buffer[kBufferSize];
+ int length;
+ ARGS_TO_BUFFER(format, buffer, kBufferSize, length);
+
+ this->prepend(buffer, length);
+}
+
+void SkString::prependVAList(const char format[], va_list args) {
+ char buffer[kBufferSize];
+ int length = VSNPRINTF(buffer, kBufferSize, format, args);
+ SkASSERT(length >= 0 && length < SkToInt(kBufferSize));
+
+ this->prepend(buffer, length);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkString::remove(size_t offset, size_t length) {
+ size_t size = this->size();
+
+ if (offset < size) {
+ if (length > size - offset) {
+ length = size - offset;
+ }
+ SkASSERT(length <= size);
+ SkASSERT(offset <= size - length);
+ if (length > 0) {
+ SkString tmp(size - length);
+ char* dst = tmp.writable_str();
+ const char* src = this->c_str();
+
+ if (offset) {
+ memcpy(dst, src, offset);
+ }
+ size_t tail = size - (offset + length);
+ if (tail) {
+ memcpy(dst + offset, src + (offset + length), tail);
+ }
+ SkASSERT(dst[tmp.size()] == 0);
+ this->swap(tmp);
+ }
+ }
+}
+
+void SkString::swap(SkString& other) {
+ this->validate();
+ other.validate();
+
+ SkTSwap<Rec*>(fRec, other.fRec);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkString SkStringPrintf(const char* format, ...) {
+ SkString formattedOutput;
+ V_SKSTRING_PRINTF(formattedOutput, format);
+ return formattedOutput;
+}
+
+void SkStrSplit(const char* str, const char* delimiters, SkStrSplitMode splitMode,
+ SkTArray<SkString>* out) {
+ if (splitMode == kCoalesce_SkStrSplitMode) {
+ // Skip any delimiters.
+ str += strspn(str, delimiters);
+ }
+ if (!*str) {
+ return;
+ }
+
+ while (true) {
+ // Find a token.
+ const size_t len = strcspn(str, delimiters);
+ if (splitMode == kStrict_SkStrSplitMode || len > 0) {
+ out->push_back().set(str, len);
+ str += len;
+ }
+
+ if (!*str) {
+ return;
+ }
+ if (splitMode == kCoalesce_SkStrSplitMode) {
+ // Skip any delimiters.
+ str += strspn(str, delimiters);
+ } else {
+ // Skip one delimiter.
+ str += 1;
+ }
+ }
+}
+
+#undef VSNPRINTF
+#undef SNPRINTF
diff --git a/gfx/skia/skia/src/core/SkStringUtils.cpp b/gfx/skia/skia/src/core/SkStringUtils.cpp
new file mode 100644
index 000000000..6f5699c80
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStringUtils.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkString.h"
+#include "SkStringUtils.h"
+
+void SkAddFlagToString(SkString* string, bool flag, const char* flagStr, bool* needSeparator) {
+ if (flag) {
+ if (*needSeparator) {
+ string->append("|");
+ }
+ string->append(flagStr);
+ *needSeparator = true;
+ }
+}
+
+void SkAppendScalar(SkString* str, SkScalar value, SkScalarAsStringType asType) {
+ switch (asType) {
+ case kHex_SkScalarAsStringType:
+ str->appendf("SkBits2Float(0x%08x)", SkFloat2Bits(value));
+ break;
+ case kDec_SkScalarAsStringType: {
+ SkString tmp;
+ tmp.printf("%g", value);
+ if (tmp.contains('.')) {
+ tmp.appendUnichar('f');
+ }
+ str->append(tmp);
+ break;
+ }
+ }
+}
+
+SkString SkTabString(const SkString& string, int tabCnt) {
+ if (tabCnt <= 0) {
+ return string;
+ }
+ SkString tabs;
+ for (int i = 0; i < tabCnt; ++i) {
+ tabs.append("\t");
+ }
+ SkString result;
+ static const char newline[] = "\n";
+ const char* input = string.c_str();
+ int nextNL = SkStrFind(input, newline);
+ while (nextNL >= 0) {
+ if (nextNL > 0) {
+ result.append(tabs);
+ }
+ result.append(input, nextNL + 1);
+ input += nextNL + 1;
+ nextNL = SkStrFind(input, newline);
+ }
+ if (*input != '\0') {
+ result.append(tabs);
+ result.append(input);
+ }
+ return result;
+}
diff --git a/gfx/skia/skia/src/core/SkStringUtils.h b/gfx/skia/skia/src/core/SkStringUtils.h
new file mode 100644
index 000000000..fd158c30b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStringUtils.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStringUtils_DEFINED
+#define SkStringUtils_DEFINED
+
+class SkString;
+
+/**
+ * Add 'flagStr' to 'string' and set 'needSeparator' to true only if 'flag' is
+ * true. If 'needSeparator' is true append a '|' before 'flagStr'. This method
+ * is used to streamline the creation of ASCII flag strings within the toString
+ * methods.
+ */
+void SkAddFlagToString(SkString* string, bool flag,
+ const char* flagStr, bool* needSeparator);
+
+
+enum SkScalarAsStringType {
+ kDec_SkScalarAsStringType,
+ kHex_SkScalarAsStringType,
+};
+
+void SkAppendScalar(SkString*, SkScalar, SkScalarAsStringType);
+
+static inline void SkAppendScalarDec(SkString* str, SkScalar value) {
+ SkAppendScalar(str, value, kDec_SkScalarAsStringType);
+}
+
+static inline void SkAppendScalarHex(SkString* str, SkScalar value) {
+ SkAppendScalar(str, value, kHex_SkScalarAsStringType);
+}
+
+/** Indents every non-empty line of the string by tabCnt tabs */
+SkString SkTabString(const SkString& string, int tabCnt);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkStroke.cpp b/gfx/skia/skia/src/core/SkStroke.cpp
new file mode 100644
index 000000000..8ff7910bd
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStroke.cpp
@@ -0,0 +1,1554 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkStrokerPriv.h"
+#include "SkGeometry.h"
+#include "SkPathPriv.h"
+
+enum {
+ kTangent_RecursiveLimit,
+ kCubic_RecursiveLimit,
+ kConic_RecursiveLimit,
+ kQuad_RecursiveLimit
+};
+
+// quads with extreme widths (e.g. (0,1) (1,6) (0,3) width=5e7) recurse to point of failure
+// largest seen for normal cubics : 5, 26
+// largest seen for normal quads : 11
+static const int kRecursiveLimits[] = { 5*3, 26*3, 11*3, 11*3 }; // 3x limits seen in practice
+
+static_assert(0 == kTangent_RecursiveLimit, "cubic_stroke_relies_on_tangent_equalling_zero");
+static_assert(1 == kCubic_RecursiveLimit, "cubic_stroke_relies_on_cubic_equalling_one");
+static_assert(SK_ARRAY_COUNT(kRecursiveLimits) == kQuad_RecursiveLimit + 1,
+ "recursive_limits_mismatch");
+
+#ifdef SK_DEBUG
+ int gMaxRecursion[SK_ARRAY_COUNT(kRecursiveLimits)] = { 0 };
+#endif
+#ifndef DEBUG_QUAD_STROKER
+ #define DEBUG_QUAD_STROKER 0
+#endif
+
+#if DEBUG_QUAD_STROKER
+ /* Enable to show the decisions made in subdividing the curve -- helpful when the resulting
+ stroke has more than the optimal number of quadratics and lines */
+ #define STROKER_RESULT(resultType, depth, quadPts, format, ...) \
+ SkDebugf("[%d] %s " format "\n", depth, __FUNCTION__, __VA_ARGS__), \
+ SkDebugf(" " #resultType " t=(%g,%g)\n", quadPts->fStartT, quadPts->fEndT), \
+ resultType
+ #define STROKER_DEBUG_PARAMS(...) , __VA_ARGS__
+#else
+ #define STROKER_RESULT(resultType, depth, quadPts, format, ...) \
+ resultType
+ #define STROKER_DEBUG_PARAMS(...)
+#endif
+
+static inline bool degenerate_vector(const SkVector& v) {
+ return !SkPoint::CanNormalize(v.fX, v.fY);
+}
+
+static bool set_normal_unitnormal(const SkPoint& before, const SkPoint& after, SkScalar scale,
+ SkScalar radius,
+ SkVector* normal, SkVector* unitNormal) {
+ if (!unitNormal->setNormalize((after.fX - before.fX) * scale,
+ (after.fY - before.fY) * scale)) {
+ return false;
+ }
+ unitNormal->rotateCCW();
+ unitNormal->scale(radius, normal);
+ return true;
+}
+
+static bool set_normal_unitnormal(const SkVector& vec,
+ SkScalar radius,
+ SkVector* normal, SkVector* unitNormal) {
+ if (!unitNormal->setNormalize(vec.fX, vec.fY)) {
+ return false;
+ }
+ unitNormal->rotateCCW();
+ unitNormal->scale(radius, normal);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct SkQuadConstruct { // The state of the quad stroke under construction.
+ SkPoint fQuad[3]; // the stroked quad parallel to the original curve
+ SkPoint fTangentStart; // a point tangent to fQuad[0]
+ SkPoint fTangentEnd; // a point tangent to fQuad[2]
+ SkScalar fStartT; // a segment of the original curve
+ SkScalar fMidT; // "
+ SkScalar fEndT; // "
+ bool fStartSet; // state to share common points across structs
+ bool fEndSet; // "
+ bool fOppositeTangents; // set if coincident tangents have opposite directions
+
+ // return false if start and end are too close to have a unique middle
+ bool init(SkScalar start, SkScalar end) {
+ fStartT = start;
+ fMidT = (start + end) * SK_ScalarHalf;
+ fEndT = end;
+ fStartSet = fEndSet = false;
+ return fStartT < fMidT && fMidT < fEndT;
+ }
+
+ bool initWithStart(SkQuadConstruct* parent) {
+ if (!init(parent->fStartT, parent->fMidT)) {
+ return false;
+ }
+ fQuad[0] = parent->fQuad[0];
+ fTangentStart = parent->fTangentStart;
+ fStartSet = true;
+ return true;
+ }
+
+ bool initWithEnd(SkQuadConstruct* parent) {
+ if (!init(parent->fMidT, parent->fEndT)) {
+ return false;
+ }
+ fQuad[2] = parent->fQuad[2];
+ fTangentEnd = parent->fTangentEnd;
+ fEndSet = true;
+ return true;
+ }
+};
+
+class SkPathStroker {
+public:
+ SkPathStroker(const SkPath& src,
+ SkScalar radius, SkScalar miterLimit, SkPaint::Cap,
+ SkPaint::Join, SkScalar resScale,
+ bool canIgnoreCenter);
+
+ bool hasOnlyMoveTo() const { return 0 == fSegmentCount; }
+ SkPoint moveToPt() const { return fFirstPt; }
+
+ void moveTo(const SkPoint&);
+ void lineTo(const SkPoint&, const SkPath::Iter* iter = nullptr);
+ void quadTo(const SkPoint&, const SkPoint&);
+ void conicTo(const SkPoint&, const SkPoint&, SkScalar weight);
+ void cubicTo(const SkPoint&, const SkPoint&, const SkPoint&);
+ void close(bool isLine) { this->finishContour(true, isLine); }
+
+ void done(SkPath* dst, bool isLine) {
+ this->finishContour(false, isLine);
+ fOuter.addPath(fExtra);
+ dst->swap(fOuter);
+ }
+
+ SkScalar getResScale() const { return fResScale; }
+
+ bool isZeroLength() const {
+ return fInner.isZeroLength() && fOuter.isZeroLength();
+ }
+
+private:
+ SkScalar fRadius;
+ SkScalar fInvMiterLimit;
+ SkScalar fResScale;
+ SkScalar fInvResScale;
+ SkScalar fInvResScaleSquared;
+
+ SkVector fFirstNormal, fPrevNormal, fFirstUnitNormal, fPrevUnitNormal;
+ SkPoint fFirstPt, fPrevPt; // on original path
+ SkPoint fFirstOuterPt;
+ int fSegmentCount;
+ bool fPrevIsLine;
+ bool fCanIgnoreCenter;
+
+ SkStrokerPriv::CapProc fCapper;
+ SkStrokerPriv::JoinProc fJoiner;
+
+ SkPath fInner, fOuter; // outer is our working answer, inner is temp
+ SkPath fExtra; // added as extra complete contours
+
+ enum StrokeType {
+ kOuter_StrokeType = 1, // use sign-opposite values later to flip perpendicular axis
+ kInner_StrokeType = -1
+ } fStrokeType;
+
+ enum ResultType {
+ kSplit_ResultType, // the caller should split the quad stroke in two
+ kDegenerate_ResultType, // the caller should add a line
+ kQuad_ResultType, // the caller should (continue to try to) add a quad stroke
+ };
+
+ enum ReductionType {
+ kPoint_ReductionType, // all curve points are practically identical
+ kLine_ReductionType, // the control point is on the line between the ends
+ kQuad_ReductionType, // the control point is outside the line between the ends
+ kDegenerate_ReductionType, // the control point is on the line but outside the ends
+ kDegenerate2_ReductionType, // two control points are on the line but outside ends (cubic)
+ kDegenerate3_ReductionType, // three areas of max curvature found (for cubic)
+ };
+
+ enum IntersectRayType {
+ kCtrlPt_RayType,
+ kResultType_RayType,
+ };
+
+ int fRecursionDepth; // track stack depth to abort if numerics run amok
+ bool fFoundTangents; // do less work until tangents meet (cubic)
+ bool fJoinCompleted; // previous join was not degenerate
+
+ void addDegenerateLine(const SkQuadConstruct* );
+ static ReductionType CheckConicLinear(const SkConic& , SkPoint* reduction);
+ static ReductionType CheckCubicLinear(const SkPoint cubic[4], SkPoint reduction[3],
+ const SkPoint** tanPtPtr);
+ static ReductionType CheckQuadLinear(const SkPoint quad[3], SkPoint* reduction);
+ ResultType compareQuadConic(const SkConic& , SkQuadConstruct* ) const;
+ ResultType compareQuadCubic(const SkPoint cubic[4], SkQuadConstruct* );
+ ResultType compareQuadQuad(const SkPoint quad[3], SkQuadConstruct* );
+ void conicPerpRay(const SkConic& , SkScalar t, SkPoint* tPt, SkPoint* onPt,
+ SkPoint* tangent) const;
+ void conicQuadEnds(const SkConic& , SkQuadConstruct* ) const;
+ bool conicStroke(const SkConic& , SkQuadConstruct* );
+ bool cubicMidOnLine(const SkPoint cubic[4], const SkQuadConstruct* ) const;
+ void cubicPerpRay(const SkPoint cubic[4], SkScalar t, SkPoint* tPt, SkPoint* onPt,
+ SkPoint* tangent) const;
+ void cubicQuadEnds(const SkPoint cubic[4], SkQuadConstruct* );
+ void cubicQuadMid(const SkPoint cubic[4], const SkQuadConstruct* , SkPoint* mid) const;
+ bool cubicStroke(const SkPoint cubic[4], SkQuadConstruct* );
+ void init(StrokeType strokeType, SkQuadConstruct* , SkScalar tStart, SkScalar tEnd);
+ ResultType intersectRay(SkQuadConstruct* , IntersectRayType STROKER_DEBUG_PARAMS(int) ) const;
+ bool ptInQuadBounds(const SkPoint quad[3], const SkPoint& pt) const;
+ void quadPerpRay(const SkPoint quad[3], SkScalar t, SkPoint* tPt, SkPoint* onPt,
+ SkPoint* tangent) const;
+ bool quadStroke(const SkPoint quad[3], SkQuadConstruct* );
+ void setConicEndNormal(const SkConic& ,
+ const SkVector& normalAB, const SkVector& unitNormalAB,
+ SkVector* normalBC, SkVector* unitNormalBC);
+ void setCubicEndNormal(const SkPoint cubic[4],
+ const SkVector& normalAB, const SkVector& unitNormalAB,
+ SkVector* normalCD, SkVector* unitNormalCD);
+ void setQuadEndNormal(const SkPoint quad[3],
+ const SkVector& normalAB, const SkVector& unitNormalAB,
+ SkVector* normalBC, SkVector* unitNormalBC);
+ void setRayPts(const SkPoint& tPt, SkVector* dxy, SkPoint* onPt, SkPoint* tangent) const;
+ static bool SlightAngle(SkQuadConstruct* );
+ ResultType strokeCloseEnough(const SkPoint stroke[3], const SkPoint ray[2],
+ SkQuadConstruct* STROKER_DEBUG_PARAMS(int depth) ) const;
+ ResultType tangentsMeet(const SkPoint cubic[4], SkQuadConstruct* );
+
+ void finishContour(bool close, bool isLine);
+ bool preJoinTo(const SkPoint&, SkVector* normal, SkVector* unitNormal,
+ bool isLine);
+ void postJoinTo(const SkPoint&, const SkVector& normal,
+ const SkVector& unitNormal);
+
+ void line_to(const SkPoint& currPt, const SkVector& normal);
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkPathStroker::preJoinTo(const SkPoint& currPt, SkVector* normal,
+ SkVector* unitNormal, bool currIsLine) {
+ SkASSERT(fSegmentCount >= 0);
+
+ SkScalar prevX = fPrevPt.fX;
+ SkScalar prevY = fPrevPt.fY;
+
+ if (!set_normal_unitnormal(fPrevPt, currPt, fResScale, fRadius, normal, unitNormal)) {
+ if (SkStrokerPriv::CapFactory(SkPaint::kButt_Cap) == fCapper) {
+ return false;
+ }
+ /* Square caps and round caps draw even if the segment length is zero.
+ Since the zero length segment has no direction, set the orientation
+ to upright as the default orientation */
+ normal->set(fRadius, 0);
+ unitNormal->set(1, 0);
+ }
+
+ if (fSegmentCount == 0) {
+ fFirstNormal = *normal;
+ fFirstUnitNormal = *unitNormal;
+ fFirstOuterPt.set(prevX + normal->fX, prevY + normal->fY);
+
+ fOuter.moveTo(fFirstOuterPt.fX, fFirstOuterPt.fY);
+ fInner.moveTo(prevX - normal->fX, prevY - normal->fY);
+ } else { // we have a previous segment
+ fJoiner(&fOuter, &fInner, fPrevUnitNormal, fPrevPt, *unitNormal,
+ fRadius, fInvMiterLimit, fPrevIsLine, currIsLine);
+ }
+ fPrevIsLine = currIsLine;
+ return true;
+}
+
+void SkPathStroker::postJoinTo(const SkPoint& currPt, const SkVector& normal,
+ const SkVector& unitNormal) {
+ fJoinCompleted = true;
+ fPrevPt = currPt;
+ fPrevUnitNormal = unitNormal;
+ fPrevNormal = normal;
+ fSegmentCount += 1;
+}
+
+void SkPathStroker::finishContour(bool close, bool currIsLine) {
+ if (fSegmentCount > 0) {
+ SkPoint pt;
+
+ if (close) {
+ fJoiner(&fOuter, &fInner, fPrevUnitNormal, fPrevPt,
+ fFirstUnitNormal, fRadius, fInvMiterLimit,
+ fPrevIsLine, currIsLine);
+ fOuter.close();
+
+ if (fCanIgnoreCenter) {
+ if (!fOuter.getBounds().contains(fInner.getBounds())) {
+ SkASSERT(fInner.getBounds().contains(fOuter.getBounds()));
+ fInner.swap(fOuter);
+ }
+ } else {
+ // now add fInner as its own contour
+ fInner.getLastPt(&pt);
+ fOuter.moveTo(pt.fX, pt.fY);
+ fOuter.reversePathTo(fInner);
+ fOuter.close();
+ }
+ } else { // add caps to start and end
+ // cap the end
+ fInner.getLastPt(&pt);
+ fCapper(&fOuter, fPrevPt, fPrevNormal, pt,
+ currIsLine ? &fInner : nullptr);
+ fOuter.reversePathTo(fInner);
+ // cap the start
+ fCapper(&fOuter, fFirstPt, -fFirstNormal, fFirstOuterPt,
+ fPrevIsLine ? &fInner : nullptr);
+ fOuter.close();
+ }
+ }
+ // since we may re-use fInner, we rewind instead of reset, to save on
+ // reallocating its internal storage.
+ fInner.rewind();
+ fSegmentCount = -1;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPathStroker::SkPathStroker(const SkPath& src,
+ SkScalar radius, SkScalar miterLimit,
+ SkPaint::Cap cap, SkPaint::Join join, SkScalar resScale,
+ bool canIgnoreCenter)
+ : fRadius(radius)
+ , fResScale(resScale)
+ , fCanIgnoreCenter(canIgnoreCenter) {
+
+ /* This is only used when join is miter_join, but we initialize it here
+ so that it is always defined, to fis valgrind warnings.
+ */
+ fInvMiterLimit = 0;
+
+ if (join == SkPaint::kMiter_Join) {
+ if (miterLimit <= SK_Scalar1) {
+ join = SkPaint::kBevel_Join;
+ } else {
+ fInvMiterLimit = SkScalarInvert(miterLimit);
+ }
+ }
+ fCapper = SkStrokerPriv::CapFactory(cap);
+ fJoiner = SkStrokerPriv::JoinFactory(join);
+ fSegmentCount = -1;
+ fPrevIsLine = false;
+
+ // Need some estimate of how large our final result (fOuter)
+ // and our per-contour temp (fInner) will be, so we don't spend
+ // extra time repeatedly growing these arrays.
+ //
+ // 3x for result == inner + outer + join (swag)
+ // 1x for inner == 'wag' (worst contour length would be better guess)
+ fOuter.incReserve(src.countPoints() * 3);
+ fOuter.setIsVolatile(true);
+ fInner.incReserve(src.countPoints());
+ fInner.setIsVolatile(true);
+ // TODO : write a common error function used by stroking and filling
+ // The '4' below matches the fill scan converter's error term
+ fInvResScale = SkScalarInvert(resScale * 4);
+ fInvResScaleSquared = fInvResScale * fInvResScale;
+ fRecursionDepth = 0;
+}
+
+void SkPathStroker::moveTo(const SkPoint& pt) {
+ if (fSegmentCount > 0) {
+ this->finishContour(false, false);
+ }
+ fSegmentCount = 0;
+ fFirstPt = fPrevPt = pt;
+ fJoinCompleted = false;
+}
+
+void SkPathStroker::line_to(const SkPoint& currPt, const SkVector& normal) {
+ fOuter.lineTo(currPt.fX + normal.fX, currPt.fY + normal.fY);
+ fInner.lineTo(currPt.fX - normal.fX, currPt.fY - normal.fY);
+}
+
+static bool has_valid_tangent(const SkPath::Iter* iter) {
+ SkPath::Iter copy = *iter;
+ SkPath::Verb verb;
+ SkPoint pts[4];
+ while ((verb = copy.next(pts))) {
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ return false;
+ case SkPath::kLine_Verb:
+ if (pts[0] == pts[1]) {
+ continue;
+ }
+ return true;
+ case SkPath::kQuad_Verb:
+ case SkPath::kConic_Verb:
+ if (pts[0] == pts[1] && pts[0] == pts[2]) {
+ continue;
+ }
+ return true;
+ case SkPath::kCubic_Verb:
+ if (pts[0] == pts[1] && pts[0] == pts[2] && pts[0] == pts[3]) {
+ continue;
+ }
+ return true;
+ case SkPath::kClose_Verb:
+ case SkPath::kDone_Verb:
+ return false;
+ }
+ }
+ return false;
+}
+
+void SkPathStroker::lineTo(const SkPoint& currPt, const SkPath::Iter* iter) {
+ bool teenyLine = fPrevPt.equalsWithinTolerance(currPt, SK_ScalarNearlyZero * fInvResScale);
+ if (SkStrokerPriv::CapFactory(SkPaint::kButt_Cap) == fCapper && teenyLine) {
+ return;
+ }
+ if (teenyLine && (fJoinCompleted || (iter && has_valid_tangent(iter)))) {
+ return;
+ }
+ SkVector normal, unitNormal;
+
+ if (!this->preJoinTo(currPt, &normal, &unitNormal, true)) {
+ return;
+ }
+ this->line_to(currPt, normal);
+ this->postJoinTo(currPt, normal, unitNormal);
+}
+
+void SkPathStroker::setQuadEndNormal(const SkPoint quad[3], const SkVector& normalAB,
+ const SkVector& unitNormalAB, SkVector* normalBC, SkVector* unitNormalBC) {
+ if (!set_normal_unitnormal(quad[1], quad[2], fResScale, fRadius, normalBC, unitNormalBC)) {
+ *normalBC = normalAB;
+ *unitNormalBC = unitNormalAB;
+ }
+}
+
+void SkPathStroker::setConicEndNormal(const SkConic& conic, const SkVector& normalAB,
+ const SkVector& unitNormalAB, SkVector* normalBC, SkVector* unitNormalBC) {
+ setQuadEndNormal(conic.fPts, normalAB, unitNormalAB, normalBC, unitNormalBC);
+}
+
+void SkPathStroker::setCubicEndNormal(const SkPoint cubic[4], const SkVector& normalAB,
+ const SkVector& unitNormalAB, SkVector* normalCD, SkVector* unitNormalCD) {
+ SkVector ab = cubic[1] - cubic[0];
+ SkVector cd = cubic[3] - cubic[2];
+
+ bool degenerateAB = degenerate_vector(ab);
+ bool degenerateCD = degenerate_vector(cd);
+
+ if (degenerateAB && degenerateCD) {
+ goto DEGENERATE_NORMAL;
+ }
+
+ if (degenerateAB) {
+ ab = cubic[2] - cubic[0];
+ degenerateAB = degenerate_vector(ab);
+ }
+ if (degenerateCD) {
+ cd = cubic[3] - cubic[1];
+ degenerateCD = degenerate_vector(cd);
+ }
+ if (degenerateAB || degenerateCD) {
+DEGENERATE_NORMAL:
+ *normalCD = normalAB;
+ *unitNormalCD = unitNormalAB;
+ return;
+ }
+ SkAssertResult(set_normal_unitnormal(cd, fRadius, normalCD, unitNormalCD));
+}
+
+void SkPathStroker::init(StrokeType strokeType, SkQuadConstruct* quadPts, SkScalar tStart,
+ SkScalar tEnd) {
+ fStrokeType = strokeType;
+ fFoundTangents = false;
+ quadPts->init(tStart, tEnd);
+}
+
+// returns the distance squared from the point to the line
+static SkScalar pt_to_line(const SkPoint& pt, const SkPoint& lineStart, const SkPoint& lineEnd) {
+ SkVector dxy = lineEnd - lineStart;
+ if (degenerate_vector(dxy)) {
+ return pt.distanceToSqd(lineStart);
+ }
+ SkVector ab0 = pt - lineStart;
+ SkScalar numer = dxy.dot(ab0);
+ SkScalar denom = dxy.dot(dxy);
+ SkScalar t = numer / denom;
+ SkPoint hit;
+ hit.fX = lineStart.fX * (1 - t) + lineEnd.fX * t;
+ hit.fY = lineStart.fY * (1 - t) + lineEnd.fY * t;
+ return hit.distanceToSqd(pt);
+}
+
+/* Given a cubic, determine if all four points are in a line.
+ Return true if the inner points is close to a line connecting the outermost points.
+
+ Find the outermost point by looking for the largest difference in X or Y.
+ Given the indices of the outermost points, and that outer_1 is greater than outer_2,
+ this table shows the index of the smaller of the remaining points:
+
+ outer_2
+ 0 1 2 3
+ outer_1 ----------------
+ 0 | - 2 1 1
+ 1 | - - 0 0
+ 2 | - - - 0
+ 3 | - - - -
+
+ If outer_1 == 0 and outer_2 == 1, the smaller of the remaining indices (2 and 3) is 2.
+
+ This table can be collapsed to: (1 + (2 >> outer_2)) >> outer_1
+
+ Given three indices (outer_1 outer_2 mid_1) from 0..3, the remaining index is:
+
+ mid_2 == (outer_1 ^ outer_2 ^ mid_1)
+ */
+static bool cubic_in_line(const SkPoint cubic[4]) {
+ SkScalar ptMax = -1;
+ int outer1 SK_INIT_TO_AVOID_WARNING;
+ int outer2 SK_INIT_TO_AVOID_WARNING;
+ for (int index = 0; index < 3; ++index) {
+ for (int inner = index + 1; inner < 4; ++inner) {
+ SkVector testDiff = cubic[inner] - cubic[index];
+ SkScalar testMax = SkTMax(SkScalarAbs(testDiff.fX), SkScalarAbs(testDiff.fY));
+ if (ptMax < testMax) {
+ outer1 = index;
+ outer2 = inner;
+ ptMax = testMax;
+ }
+ }
+ }
+ SkASSERT(outer1 >= 0 && outer1 <= 2);
+ SkASSERT(outer2 >= 1 && outer2 <= 3);
+ SkASSERT(outer1 < outer2);
+ int mid1 = (1 + (2 >> outer2)) >> outer1;
+ SkASSERT(mid1 >= 0 && mid1 <= 2);
+ SkASSERT(outer1 != mid1 && outer2 != mid1);
+ int mid2 = outer1 ^ outer2 ^ mid1;
+ SkASSERT(mid2 >= 1 && mid2 <= 3);
+ SkASSERT(mid2 != outer1 && mid2 != outer2 && mid2 != mid1);
+ SkASSERT(((1 << outer1) | (1 << outer2) | (1 << mid1) | (1 << mid2)) == 0x0f);
+ SkScalar lineSlop = ptMax * ptMax * 0.00001f; // this multiplier is pulled out of the air
+ return pt_to_line(cubic[mid1], cubic[outer1], cubic[outer2]) <= lineSlop
+ && pt_to_line(cubic[mid2], cubic[outer1], cubic[outer2]) <= lineSlop;
+}
+
+/* Given quad, see if all there points are in a line.
+ Return true if the inside point is close to a line connecting the outermost points.
+
+ Find the outermost point by looking for the largest difference in X or Y.
+ Since the XOR of the indices is 3 (0 ^ 1 ^ 2)
+ the missing index equals: outer_1 ^ outer_2 ^ 3
+ */
+static bool quad_in_line(const SkPoint quad[3]) {
+ SkScalar ptMax = -1;
+ int outer1 SK_INIT_TO_AVOID_WARNING;
+ int outer2 SK_INIT_TO_AVOID_WARNING;
+ for (int index = 0; index < 2; ++index) {
+ for (int inner = index + 1; inner < 3; ++inner) {
+ SkVector testDiff = quad[inner] - quad[index];
+ SkScalar testMax = SkTMax(SkScalarAbs(testDiff.fX), SkScalarAbs(testDiff.fY));
+ if (ptMax < testMax) {
+ outer1 = index;
+ outer2 = inner;
+ ptMax = testMax;
+ }
+ }
+ }
+ SkASSERT(outer1 >= 0 && outer1 <= 1);
+ SkASSERT(outer2 >= 1 && outer2 <= 2);
+ SkASSERT(outer1 < outer2);
+ int mid = outer1 ^ outer2 ^ 3;
+ SkScalar lineSlop = ptMax * ptMax * 0.00001f; // this multiplier is pulled out of the air
+ return pt_to_line(quad[mid], quad[outer1], quad[outer2]) <= lineSlop;
+}
+
+static bool conic_in_line(const SkConic& conic) {
+ return quad_in_line(conic.fPts);
+}
+
+SkPathStroker::ReductionType SkPathStroker::CheckCubicLinear(const SkPoint cubic[4],
+ SkPoint reduction[3], const SkPoint** tangentPtPtr) {
+ bool degenerateAB = degenerate_vector(cubic[1] - cubic[0]);
+ bool degenerateBC = degenerate_vector(cubic[2] - cubic[1]);
+ bool degenerateCD = degenerate_vector(cubic[3] - cubic[2]);
+ if (degenerateAB & degenerateBC & degenerateCD) {
+ return kPoint_ReductionType;
+ }
+ if (degenerateAB + degenerateBC + degenerateCD == 2) {
+ return kLine_ReductionType;
+ }
+ if (!cubic_in_line(cubic)) {
+ *tangentPtPtr = degenerateAB ? &cubic[2] : &cubic[1];
+ return kQuad_ReductionType;
+ }
+ SkScalar tValues[3];
+ int count = SkFindCubicMaxCurvature(cubic, tValues);
+ if (count == 0) {
+ return kLine_ReductionType;
+ }
+ for (int index = 0; index < count; ++index) {
+ SkScalar t = tValues[index];
+ SkEvalCubicAt(cubic, t, &reduction[index], nullptr, nullptr);
+ }
+ static_assert(kQuad_ReductionType + 1 == kDegenerate_ReductionType, "enum_out_of_whack");
+ static_assert(kQuad_ReductionType + 2 == kDegenerate2_ReductionType, "enum_out_of_whack");
+ static_assert(kQuad_ReductionType + 3 == kDegenerate3_ReductionType, "enum_out_of_whack");
+
+ return (ReductionType) (kQuad_ReductionType + count);
+}
+
+SkPathStroker::ReductionType SkPathStroker::CheckConicLinear(const SkConic& conic,
+ SkPoint* reduction) {
+ bool degenerateAB = degenerate_vector(conic.fPts[1] - conic.fPts[0]);
+ bool degenerateBC = degenerate_vector(conic.fPts[2] - conic.fPts[1]);
+ if (degenerateAB & degenerateBC) {
+ return kPoint_ReductionType;
+ }
+ if (degenerateAB | degenerateBC) {
+ return kLine_ReductionType;
+ }
+ if (!conic_in_line(conic)) {
+ return kQuad_ReductionType;
+ }
+#if 0 // once findMaxCurvature is implemented, this will be a better solution
+ SkScalar t;
+ if (!conic.findMaxCurvature(&t) || 0 == t) {
+ return kLine_ReductionType;
+ }
+#else // but for now, use extrema instead
+ SkScalar xT = 0, yT = 0;
+ (void) conic.findXExtrema(&xT);
+ (void) conic.findYExtrema(&yT);
+ SkScalar t = SkTMax(xT, yT);
+ if (0 == t) {
+ return kLine_ReductionType;
+ }
+#endif
+ conic.evalAt(t, reduction, nullptr);
+ return kDegenerate_ReductionType;
+}
+
+SkPathStroker::ReductionType SkPathStroker::CheckQuadLinear(const SkPoint quad[3],
+ SkPoint* reduction) {
+ bool degenerateAB = degenerate_vector(quad[1] - quad[0]);
+ bool degenerateBC = degenerate_vector(quad[2] - quad[1]);
+ if (degenerateAB & degenerateBC) {
+ return kPoint_ReductionType;
+ }
+ if (degenerateAB | degenerateBC) {
+ return kLine_ReductionType;
+ }
+ if (!quad_in_line(quad)) {
+ return kQuad_ReductionType;
+ }
+ SkScalar t = SkFindQuadMaxCurvature(quad);
+ if (0 == t) {
+ return kLine_ReductionType;
+ }
+ *reduction = SkEvalQuadAt(quad, t);
+ return kDegenerate_ReductionType;
+}
+
+void SkPathStroker::conicTo(const SkPoint& pt1, const SkPoint& pt2, SkScalar weight) {
+ const SkConic conic(fPrevPt, pt1, pt2, weight);
+ SkPoint reduction;
+ ReductionType reductionType = CheckConicLinear(conic, &reduction);
+ if (kPoint_ReductionType == reductionType) {
+ /* If the stroke consists of a moveTo followed by a degenerate curve, treat it
+ as if it were followed by a zero-length line. Lines without length
+ can have square and round end caps. */
+ this->lineTo(pt2);
+ return;
+ }
+ if (kLine_ReductionType == reductionType) {
+ this->lineTo(pt2);
+ return;
+ }
+ if (kDegenerate_ReductionType == reductionType) {
+ this->lineTo(reduction);
+ SkStrokerPriv::JoinProc saveJoiner = fJoiner;
+ fJoiner = SkStrokerPriv::JoinFactory(SkPaint::kRound_Join);
+ this->lineTo(pt2);
+ fJoiner = saveJoiner;
+ return;
+ }
+ SkASSERT(kQuad_ReductionType == reductionType);
+ SkVector normalAB, unitAB, normalBC, unitBC;
+ if (!this->preJoinTo(pt1, &normalAB, &unitAB, false)) {
+ this->lineTo(pt2);
+ return;
+ }
+ SkQuadConstruct quadPts;
+ this->init(kOuter_StrokeType, &quadPts, 0, 1);
+ (void) this->conicStroke(conic, &quadPts);
+ this->init(kInner_StrokeType, &quadPts, 0, 1);
+ (void) this->conicStroke(conic, &quadPts);
+ this->setConicEndNormal(conic, normalAB, unitAB, &normalBC, &unitBC);
+ this->postJoinTo(pt2, normalBC, unitBC);
+}
+
+void SkPathStroker::quadTo(const SkPoint& pt1, const SkPoint& pt2) {
+ const SkPoint quad[3] = { fPrevPt, pt1, pt2 };
+ SkPoint reduction;
+ ReductionType reductionType = CheckQuadLinear(quad, &reduction);
+ if (kPoint_ReductionType == reductionType) {
+ /* If the stroke consists of a moveTo followed by a degenerate curve, treat it
+ as if it were followed by a zero-length line. Lines without length
+ can have square and round end caps. */
+ this->lineTo(pt2);
+ return;
+ }
+ if (kLine_ReductionType == reductionType) {
+ this->lineTo(pt2);
+ return;
+ }
+ if (kDegenerate_ReductionType == reductionType) {
+ this->lineTo(reduction);
+ SkStrokerPriv::JoinProc saveJoiner = fJoiner;
+ fJoiner = SkStrokerPriv::JoinFactory(SkPaint::kRound_Join);
+ this->lineTo(pt2);
+ fJoiner = saveJoiner;
+ return;
+ }
+ SkASSERT(kQuad_ReductionType == reductionType);
+ SkVector normalAB, unitAB, normalBC, unitBC;
+ if (!this->preJoinTo(pt1, &normalAB, &unitAB, false)) {
+ this->lineTo(pt2);
+ return;
+ }
+ SkQuadConstruct quadPts;
+ this->init(kOuter_StrokeType, &quadPts, 0, 1);
+ (void) this->quadStroke(quad, &quadPts);
+ this->init(kInner_StrokeType, &quadPts, 0, 1);
+ (void) this->quadStroke(quad, &quadPts);
+ this->setQuadEndNormal(quad, normalAB, unitAB, &normalBC, &unitBC);
+
+ this->postJoinTo(pt2, normalBC, unitBC);
+}
+
+// Given a point on the curve and its derivative, scale the derivative by the radius, and
+// compute the perpendicular point and its tangent.
+void SkPathStroker::setRayPts(const SkPoint& tPt, SkVector* dxy, SkPoint* onPt,
+ SkPoint* tangent) const {
+ SkPoint oldDxy = *dxy;
+ if (!dxy->setLength(fRadius)) { // consider moving double logic into SkPoint::setLength
+ double xx = oldDxy.fX;
+ double yy = oldDxy.fY;
+ double dscale = fRadius / sqrt(xx * xx + yy * yy);
+ dxy->fX = SkDoubleToScalar(xx * dscale);
+ dxy->fY = SkDoubleToScalar(yy * dscale);
+ }
+ SkScalar axisFlip = SkIntToScalar(fStrokeType); // go opposite ways for outer, inner
+ onPt->fX = tPt.fX + axisFlip * dxy->fY;
+ onPt->fY = tPt.fY - axisFlip * dxy->fX;
+ if (tangent) {
+ tangent->fX = onPt->fX + dxy->fX;
+ tangent->fY = onPt->fY + dxy->fY;
+ }
+}
+
+// Given a conic and t, return the point on curve, its perpendicular, and the perpendicular tangent.
+// Returns false if the perpendicular could not be computed (because the derivative collapsed to 0)
+void SkPathStroker::conicPerpRay(const SkConic& conic, SkScalar t, SkPoint* tPt, SkPoint* onPt,
+ SkPoint* tangent) const {
+ SkVector dxy;
+ conic.evalAt(t, tPt, &dxy);
+ if (dxy.fX == 0 && dxy.fY == 0) {
+ dxy = conic.fPts[2] - conic.fPts[0];
+ }
+ this->setRayPts(*tPt, &dxy, onPt, tangent);
+}
+
+// Given a conic and a t range, find the start and end if they haven't been found already.
+void SkPathStroker::conicQuadEnds(const SkConic& conic, SkQuadConstruct* quadPts) const {
+ if (!quadPts->fStartSet) {
+ SkPoint conicStartPt;
+ this->conicPerpRay(conic, quadPts->fStartT, &conicStartPt, &quadPts->fQuad[0],
+ &quadPts->fTangentStart);
+ quadPts->fStartSet = true;
+ }
+ if (!quadPts->fEndSet) {
+ SkPoint conicEndPt;
+ this->conicPerpRay(conic, quadPts->fEndT, &conicEndPt, &quadPts->fQuad[2],
+ &quadPts->fTangentEnd);
+ quadPts->fEndSet = true;
+ }
+}
+
+
+// Given a cubic and t, return the point on curve, its perpendicular, and the perpendicular tangent.
+void SkPathStroker::cubicPerpRay(const SkPoint cubic[4], SkScalar t, SkPoint* tPt, SkPoint* onPt,
+ SkPoint* tangent) const {
+ SkVector dxy;
+ SkPoint chopped[7];
+ SkEvalCubicAt(cubic, t, tPt, &dxy, nullptr);
+ if (dxy.fX == 0 && dxy.fY == 0) {
+ const SkPoint* cPts = cubic;
+ if (SkScalarNearlyZero(t)) {
+ dxy = cubic[2] - cubic[0];
+ } else if (SkScalarNearlyZero(1 - t)) {
+ dxy = cubic[3] - cubic[1];
+ } else {
+ // If the cubic inflection falls on the cusp, subdivide the cubic
+ // to find the tangent at that point.
+ SkChopCubicAt(cubic, chopped, t);
+ dxy = chopped[3] - chopped[2];
+ if (dxy.fX == 0 && dxy.fY == 0) {
+ dxy = chopped[3] - chopped[1];
+ cPts = chopped;
+ }
+ }
+ if (dxy.fX == 0 && dxy.fY == 0) {
+ dxy = cPts[3] - cPts[0];
+ }
+ }
+ setRayPts(*tPt, &dxy, onPt, tangent);
+}
+
+// Given a cubic and a t range, find the start and end if they haven't been found already.
+void SkPathStroker::cubicQuadEnds(const SkPoint cubic[4], SkQuadConstruct* quadPts) {
+ if (!quadPts->fStartSet) {
+ SkPoint cubicStartPt;
+ this->cubicPerpRay(cubic, quadPts->fStartT, &cubicStartPt, &quadPts->fQuad[0],
+ &quadPts->fTangentStart);
+ quadPts->fStartSet = true;
+ }
+ if (!quadPts->fEndSet) {
+ SkPoint cubicEndPt;
+ this->cubicPerpRay(cubic, quadPts->fEndT, &cubicEndPt, &quadPts->fQuad[2],
+ &quadPts->fTangentEnd);
+ quadPts->fEndSet = true;
+ }
+}
+
+void SkPathStroker::cubicQuadMid(const SkPoint cubic[4], const SkQuadConstruct* quadPts,
+ SkPoint* mid) const {
+ SkPoint cubicMidPt;
+ this->cubicPerpRay(cubic, quadPts->fMidT, &cubicMidPt, mid, nullptr);
+}
+
+// Given a quad and t, return the point on curve, its perpendicular, and the perpendicular tangent.
+void SkPathStroker::quadPerpRay(const SkPoint quad[3], SkScalar t, SkPoint* tPt, SkPoint* onPt,
+ SkPoint* tangent) const {
+ SkVector dxy;
+ SkEvalQuadAt(quad, t, tPt, &dxy);
+ if (dxy.fX == 0 && dxy.fY == 0) {
+ dxy = quad[2] - quad[0];
+ }
+ setRayPts(*tPt, &dxy, onPt, tangent);
+}
+
+// Find the intersection of the stroke tangents to construct a stroke quad.
+// Return whether the stroke is a degenerate (a line), a quad, or must be split.
+// Optionally compute the quad's control point.
+SkPathStroker::ResultType SkPathStroker::intersectRay(SkQuadConstruct* quadPts,
+ IntersectRayType intersectRayType STROKER_DEBUG_PARAMS(int depth)) const {
+ const SkPoint& start = quadPts->fQuad[0];
+ const SkPoint& end = quadPts->fQuad[2];
+ SkVector aLen = quadPts->fTangentStart - start;
+ SkVector bLen = quadPts->fTangentEnd - end;
+ /* Slopes match when denom goes to zero:
+ axLen / ayLen == bxLen / byLen
+ (ayLen * byLen) * axLen / ayLen == (ayLen * byLen) * bxLen / byLen
+ byLen * axLen == ayLen * bxLen
+ byLen * axLen - ayLen * bxLen ( == denom )
+ */
+ SkScalar denom = aLen.cross(bLen);
+ if (denom == 0 || !SkScalarIsFinite(denom)) {
+ quadPts->fOppositeTangents = aLen.dot(bLen) < 0;
+ return STROKER_RESULT(kDegenerate_ResultType, depth, quadPts, "denom == 0");
+ }
+ quadPts->fOppositeTangents = false;
+ SkVector ab0 = start - end;
+ SkScalar numerA = bLen.cross(ab0);
+ SkScalar numerB = aLen.cross(ab0);
+ if ((numerA >= 0) == (numerB >= 0)) { // if the control point is outside the quad ends
+ // if the perpendicular distances from the quad points to the opposite tangent line
+ // are small, a straight line is good enough
+ SkScalar dist1 = pt_to_line(start, end, quadPts->fTangentEnd);
+ SkScalar dist2 = pt_to_line(end, start, quadPts->fTangentStart);
+ if (SkTMax(dist1, dist2) <= fInvResScaleSquared) {
+ return STROKER_RESULT(kDegenerate_ResultType, depth, quadPts,
+ "SkTMax(dist1=%g, dist2=%g) <= fInvResScaleSquared", dist1, dist2);
+ }
+ return STROKER_RESULT(kSplit_ResultType, depth, quadPts,
+ "(numerA=%g >= 0) == (numerB=%g >= 0)", numerA, numerB);
+ }
+ // check to see if the denominator is teeny relative to the numerator
+ // if the offset by one will be lost, the ratio is too large
+ numerA /= denom;
+ bool validDivide = numerA > numerA - 1;
+ if (validDivide) {
+ if (kCtrlPt_RayType == intersectRayType) {
+ SkPoint* ctrlPt = &quadPts->fQuad[1];
+ // the intersection of the tangents need not be on the tangent segment
+ // so 0 <= numerA <= 1 is not necessarily true
+ ctrlPt->fX = start.fX * (1 - numerA) + quadPts->fTangentStart.fX * numerA;
+ ctrlPt->fY = start.fY * (1 - numerA) + quadPts->fTangentStart.fY * numerA;
+ }
+ return STROKER_RESULT(kQuad_ResultType, depth, quadPts,
+ "(numerA=%g >= 0) != (numerB=%g >= 0)", numerA, numerB);
+ }
+ quadPts->fOppositeTangents = aLen.dot(bLen) < 0;
+ // if the lines are parallel, straight line is good enough
+ return STROKER_RESULT(kDegenerate_ResultType, depth, quadPts,
+ "SkScalarNearlyZero(denom=%g)", denom);
+}
+
+// Given a cubic and a t-range, determine if the stroke can be described by a quadratic.
+SkPathStroker::ResultType SkPathStroker::tangentsMeet(const SkPoint cubic[4],
+ SkQuadConstruct* quadPts) {
+ this->cubicQuadEnds(cubic, quadPts);
+ return this->intersectRay(quadPts, kResultType_RayType STROKER_DEBUG_PARAMS(fRecursionDepth));
+}
+
+// Intersect the line with the quad and return the t values on the quad where the line crosses.
+static int intersect_quad_ray(const SkPoint line[2], const SkPoint quad[3], SkScalar roots[2]) {
+ SkVector vec = line[1] - line[0];
+ SkScalar r[3];
+ for (int n = 0; n < 3; ++n) {
+ r[n] = (quad[n].fY - line[0].fY) * vec.fX - (quad[n].fX - line[0].fX) * vec.fY;
+ }
+ SkScalar A = r[2];
+ SkScalar B = r[1];
+ SkScalar C = r[0];
+ A += C - 2 * B; // A = a - 2*b + c
+ B -= C; // B = -(b - c)
+ return SkFindUnitQuadRoots(A, 2 * B, C, roots);
+}
+
+// Return true if the point is close to the bounds of the quad. This is used as a quick reject.
+bool SkPathStroker::ptInQuadBounds(const SkPoint quad[3], const SkPoint& pt) const {
+ SkScalar xMin = SkTMin(SkTMin(quad[0].fX, quad[1].fX), quad[2].fX);
+ if (pt.fX + fInvResScale < xMin) {
+ return false;
+ }
+ SkScalar xMax = SkTMax(SkTMax(quad[0].fX, quad[1].fX), quad[2].fX);
+ if (pt.fX - fInvResScale > xMax) {
+ return false;
+ }
+ SkScalar yMin = SkTMin(SkTMin(quad[0].fY, quad[1].fY), quad[2].fY);
+ if (pt.fY + fInvResScale < yMin) {
+ return false;
+ }
+ SkScalar yMax = SkTMax(SkTMax(quad[0].fY, quad[1].fY), quad[2].fY);
+ if (pt.fY - fInvResScale > yMax) {
+ return false;
+ }
+ return true;
+}
+
+static bool points_within_dist(const SkPoint& nearPt, const SkPoint& farPt, SkScalar limit) {
+ return nearPt.distanceToSqd(farPt) <= limit * limit;
+}
+
+static bool sharp_angle(const SkPoint quad[3]) {
+ SkVector smaller = quad[1] - quad[0];
+ SkVector larger = quad[1] - quad[2];
+ SkScalar smallerLen = smaller.lengthSqd();
+ SkScalar largerLen = larger.lengthSqd();
+ if (smallerLen > largerLen) {
+ SkTSwap(smaller, larger);
+ largerLen = smallerLen;
+ }
+ if (!smaller.setLength(largerLen)) {
+ return false;
+ }
+ SkScalar dot = smaller.dot(larger);
+ return dot > 0;
+}
+
+SkPathStroker::ResultType SkPathStroker::strokeCloseEnough(const SkPoint stroke[3],
+ const SkPoint ray[2], SkQuadConstruct* quadPts STROKER_DEBUG_PARAMS(int depth)) const {
+ SkPoint strokeMid = SkEvalQuadAt(stroke, SK_ScalarHalf);
+ // measure the distance from the curve to the quad-stroke midpoint, compare to radius
+ if (points_within_dist(ray[0], strokeMid, fInvResScale)) { // if the difference is small
+ if (sharp_angle(quadPts->fQuad)) {
+ return STROKER_RESULT(kSplit_ResultType, depth, quadPts,
+ "sharp_angle (1) =%g,%g, %g,%g, %g,%g",
+ quadPts->fQuad[0].fX, quadPts->fQuad[0].fY,
+ quadPts->fQuad[1].fX, quadPts->fQuad[1].fY,
+ quadPts->fQuad[2].fX, quadPts->fQuad[2].fY);
+ }
+ return STROKER_RESULT(kQuad_ResultType, depth, quadPts,
+ "points_within_dist(ray[0]=%g,%g, strokeMid=%g,%g, fInvResScale=%g)",
+ ray[0].fX, ray[0].fY, strokeMid.fX, strokeMid.fY, fInvResScale);
+ }
+ // measure the distance to quad's bounds (quick reject)
+ // an alternative : look for point in triangle
+ if (!ptInQuadBounds(stroke, ray[0])) { // if far, subdivide
+ return STROKER_RESULT(kSplit_ResultType, depth, quadPts,
+ "!pt_in_quad_bounds(stroke=(%g,%g %g,%g %g,%g), ray[0]=%g,%g)",
+ stroke[0].fX, stroke[0].fY, stroke[1].fX, stroke[1].fY, stroke[2].fX, stroke[2].fY,
+ ray[0].fX, ray[0].fY);
+ }
+ // measure the curve ray distance to the quad-stroke
+ SkScalar roots[2];
+ int rootCount = intersect_quad_ray(ray, stroke, roots);
+ if (rootCount != 1) {
+ return STROKER_RESULT(kSplit_ResultType, depth, quadPts,
+ "rootCount=%d != 1", rootCount);
+ }
+ SkPoint quadPt = SkEvalQuadAt(stroke, roots[0]);
+ SkScalar error = fInvResScale * (SK_Scalar1 - SkScalarAbs(roots[0] - 0.5f) * 2);
+ if (points_within_dist(ray[0], quadPt, error)) { // if the difference is small, we're done
+ if (sharp_angle(quadPts->fQuad)) {
+ return STROKER_RESULT(kSplit_ResultType, depth, quadPts,
+ "sharp_angle (2) =%g,%g, %g,%g, %g,%g",
+ quadPts->fQuad[0].fX, quadPts->fQuad[0].fY,
+ quadPts->fQuad[1].fX, quadPts->fQuad[1].fY,
+ quadPts->fQuad[2].fX, quadPts->fQuad[2].fY);
+ }
+ return STROKER_RESULT(kQuad_ResultType, depth, quadPts,
+ "points_within_dist(ray[0]=%g,%g, quadPt=%g,%g, error=%g)",
+ ray[0].fX, ray[0].fY, quadPt.fX, quadPt.fY, error);
+ }
+ // otherwise, subdivide
+ return STROKER_RESULT(kSplit_ResultType, depth, quadPts, "%s", "fall through");
+}
+
+SkPathStroker::ResultType SkPathStroker::compareQuadCubic(const SkPoint cubic[4],
+ SkQuadConstruct* quadPts) {
+ // get the quadratic approximation of the stroke
+ this->cubicQuadEnds(cubic, quadPts);
+ ResultType resultType = this->intersectRay(quadPts, kCtrlPt_RayType
+ STROKER_DEBUG_PARAMS(fRecursionDepth) );
+ if (resultType != kQuad_ResultType) {
+ return resultType;
+ }
+ // project a ray from the curve to the stroke
+ SkPoint ray[2]; // points near midpoint on quad, midpoint on cubic
+ this->cubicPerpRay(cubic, quadPts->fMidT, &ray[1], &ray[0], nullptr);
+ return this->strokeCloseEnough(quadPts->fQuad, ray, quadPts
+ STROKER_DEBUG_PARAMS(fRecursionDepth));
+}
+
+SkPathStroker::ResultType SkPathStroker::compareQuadConic(const SkConic& conic,
+ SkQuadConstruct* quadPts) const {
+ // get the quadratic approximation of the stroke
+ this->conicQuadEnds(conic, quadPts);
+ ResultType resultType = this->intersectRay(quadPts, kCtrlPt_RayType
+ STROKER_DEBUG_PARAMS(fRecursionDepth) );
+ if (resultType != kQuad_ResultType) {
+ return resultType;
+ }
+ // project a ray from the curve to the stroke
+ SkPoint ray[2]; // points near midpoint on quad, midpoint on conic
+ this->conicPerpRay(conic, quadPts->fMidT, &ray[1], &ray[0], nullptr);
+ return this->strokeCloseEnough(quadPts->fQuad, ray, quadPts
+ STROKER_DEBUG_PARAMS(fRecursionDepth));
+}
+
+SkPathStroker::ResultType SkPathStroker::compareQuadQuad(const SkPoint quad[3],
+ SkQuadConstruct* quadPts) {
+ // get the quadratic approximation of the stroke
+ if (!quadPts->fStartSet) {
+ SkPoint quadStartPt;
+ this->quadPerpRay(quad, quadPts->fStartT, &quadStartPt, &quadPts->fQuad[0],
+ &quadPts->fTangentStart);
+ quadPts->fStartSet = true;
+ }
+ if (!quadPts->fEndSet) {
+ SkPoint quadEndPt;
+ this->quadPerpRay(quad, quadPts->fEndT, &quadEndPt, &quadPts->fQuad[2],
+ &quadPts->fTangentEnd);
+ quadPts->fEndSet = true;
+ }
+ ResultType resultType = this->intersectRay(quadPts, kCtrlPt_RayType
+ STROKER_DEBUG_PARAMS(fRecursionDepth));
+ if (resultType != kQuad_ResultType) {
+ return resultType;
+ }
+ // project a ray from the curve to the stroke
+ SkPoint ray[2];
+ this->quadPerpRay(quad, quadPts->fMidT, &ray[1], &ray[0], nullptr);
+ return this->strokeCloseEnough(quadPts->fQuad, ray, quadPts
+ STROKER_DEBUG_PARAMS(fRecursionDepth));
+}
+
+void SkPathStroker::addDegenerateLine(const SkQuadConstruct* quadPts) {
+ const SkPoint* quad = quadPts->fQuad;
+ SkPath* path = fStrokeType == kOuter_StrokeType ? &fOuter : &fInner;
+ path->lineTo(quad[2].fX, quad[2].fY);
+}
+
+bool SkPathStroker::cubicMidOnLine(const SkPoint cubic[4], const SkQuadConstruct* quadPts) const {
+ SkPoint strokeMid;
+ this->cubicQuadMid(cubic, quadPts, &strokeMid);
+ SkScalar dist = pt_to_line(strokeMid, quadPts->fQuad[0], quadPts->fQuad[2]);
+ return dist < fInvResScaleSquared;
+}
+
+bool SkPathStroker::cubicStroke(const SkPoint cubic[4], SkQuadConstruct* quadPts) {
+ if (!fFoundTangents) {
+ ResultType resultType = this->tangentsMeet(cubic, quadPts);
+ if (kQuad_ResultType != resultType) {
+ if ((kDegenerate_ResultType == resultType
+ || points_within_dist(quadPts->fQuad[0], quadPts->fQuad[2],
+ fInvResScale)) && cubicMidOnLine(cubic, quadPts)) {
+ addDegenerateLine(quadPts);
+ return true;
+ }
+ } else {
+ fFoundTangents = true;
+ }
+ }
+ if (fFoundTangents) {
+ ResultType resultType = this->compareQuadCubic(cubic, quadPts);
+ if (kQuad_ResultType == resultType) {
+ SkPath* path = fStrokeType == kOuter_StrokeType ? &fOuter : &fInner;
+ const SkPoint* stroke = quadPts->fQuad;
+ path->quadTo(stroke[1].fX, stroke[1].fY, stroke[2].fX, stroke[2].fY);
+ return true;
+ }
+ if (kDegenerate_ResultType == resultType) {
+ if (!quadPts->fOppositeTangents) {
+ addDegenerateLine(quadPts);
+ return true;
+ }
+ }
+ }
+ if (!SkScalarIsFinite(quadPts->fQuad[2].fX) || !SkScalarIsFinite(quadPts->fQuad[2].fY)) {
+ return false; // just abort if projected quad isn't representable
+ }
+ SkDEBUGCODE(gMaxRecursion[fFoundTangents] = SkTMax(gMaxRecursion[fFoundTangents],
+ fRecursionDepth + 1));
+ if (++fRecursionDepth > kRecursiveLimits[fFoundTangents]) {
+ return false; // just abort if projected quad isn't representable
+ }
+ SkQuadConstruct half;
+ if (!half.initWithStart(quadPts)) {
+ addDegenerateLine(quadPts);
+ return true;
+ }
+ if (!this->cubicStroke(cubic, &half)) {
+ return false;
+ }
+ if (!half.initWithEnd(quadPts)) {
+ addDegenerateLine(quadPts);
+ return true;
+ }
+ if (!this->cubicStroke(cubic, &half)) {
+ return false;
+ }
+ --fRecursionDepth;
+ return true;
+}
+
+bool SkPathStroker::conicStroke(const SkConic& conic, SkQuadConstruct* quadPts) {
+ ResultType resultType = this->compareQuadConic(conic, quadPts);
+ if (kQuad_ResultType == resultType) {
+ const SkPoint* stroke = quadPts->fQuad;
+ SkPath* path = fStrokeType == kOuter_StrokeType ? &fOuter : &fInner;
+ path->quadTo(stroke[1].fX, stroke[1].fY, stroke[2].fX, stroke[2].fY);
+ return true;
+ }
+ if (kDegenerate_ResultType == resultType) {
+ addDegenerateLine(quadPts);
+ return true;
+ }
+ SkDEBUGCODE(gMaxRecursion[kConic_RecursiveLimit] = SkTMax(gMaxRecursion[kConic_RecursiveLimit],
+ fRecursionDepth + 1));
+ if (++fRecursionDepth > kRecursiveLimits[kConic_RecursiveLimit]) {
+ return false; // just abort if projected quad isn't representable
+ }
+ SkQuadConstruct half;
+ (void) half.initWithStart(quadPts);
+ if (!this->conicStroke(conic, &half)) {
+ return false;
+ }
+ (void) half.initWithEnd(quadPts);
+ if (!this->conicStroke(conic, &half)) {
+ return false;
+ }
+ --fRecursionDepth;
+ return true;
+}
+
+bool SkPathStroker::quadStroke(const SkPoint quad[3], SkQuadConstruct* quadPts) {
+ ResultType resultType = this->compareQuadQuad(quad, quadPts);
+ if (kQuad_ResultType == resultType) {
+ const SkPoint* stroke = quadPts->fQuad;
+ SkPath* path = fStrokeType == kOuter_StrokeType ? &fOuter : &fInner;
+ path->quadTo(stroke[1].fX, stroke[1].fY, stroke[2].fX, stroke[2].fY);
+ return true;
+ }
+ if (kDegenerate_ResultType == resultType) {
+ addDegenerateLine(quadPts);
+ return true;
+ }
+ SkDEBUGCODE(gMaxRecursion[kQuad_RecursiveLimit] = SkTMax(gMaxRecursion[kQuad_RecursiveLimit],
+ fRecursionDepth + 1));
+ if (++fRecursionDepth > kRecursiveLimits[kQuad_RecursiveLimit]) {
+ return false; // just abort if projected quad isn't representable
+ }
+ SkQuadConstruct half;
+ (void) half.initWithStart(quadPts);
+ if (!this->quadStroke(quad, &half)) {
+ return false;
+ }
+ (void) half.initWithEnd(quadPts);
+ if (!this->quadStroke(quad, &half)) {
+ return false;
+ }
+ --fRecursionDepth;
+ return true;
+}
+
+void SkPathStroker::cubicTo(const SkPoint& pt1, const SkPoint& pt2,
+ const SkPoint& pt3) {
+ const SkPoint cubic[4] = { fPrevPt, pt1, pt2, pt3 };
+ SkPoint reduction[3];
+ const SkPoint* tangentPt;
+ ReductionType reductionType = CheckCubicLinear(cubic, reduction, &tangentPt);
+ if (kPoint_ReductionType == reductionType) {
+ /* If the stroke consists of a moveTo followed by a degenerate curve, treat it
+ as if it were followed by a zero-length line. Lines without length
+ can have square and round end caps. */
+ this->lineTo(pt3);
+ return;
+ }
+ if (kLine_ReductionType == reductionType) {
+ this->lineTo(pt3);
+ return;
+ }
+ if (kDegenerate_ReductionType <= reductionType && kDegenerate3_ReductionType >= reductionType) {
+ this->lineTo(reduction[0]);
+ SkStrokerPriv::JoinProc saveJoiner = fJoiner;
+ fJoiner = SkStrokerPriv::JoinFactory(SkPaint::kRound_Join);
+ if (kDegenerate2_ReductionType <= reductionType) {
+ this->lineTo(reduction[1]);
+ }
+ if (kDegenerate3_ReductionType == reductionType) {
+ this->lineTo(reduction[2]);
+ }
+ this->lineTo(pt3);
+ fJoiner = saveJoiner;
+ return;
+ }
+ SkASSERT(kQuad_ReductionType == reductionType);
+ SkVector normalAB, unitAB, normalCD, unitCD;
+ if (!this->preJoinTo(*tangentPt, &normalAB, &unitAB, false)) {
+ this->lineTo(pt3);
+ return;
+ }
+ SkScalar tValues[2];
+ int count = SkFindCubicInflections(cubic, tValues);
+ SkScalar lastT = 0;
+ for (int index = 0; index <= count; ++index) {
+ SkScalar nextT = index < count ? tValues[index] : 1;
+ SkQuadConstruct quadPts;
+ this->init(kOuter_StrokeType, &quadPts, lastT, nextT);
+ (void) this->cubicStroke(cubic, &quadPts);
+ this->init(kInner_StrokeType, &quadPts, lastT, nextT);
+ (void) this->cubicStroke(cubic, &quadPts);
+ lastT = nextT;
+ }
+ // emit the join even if one stroke succeeded but the last one failed
+ // this avoids reversing an inner stroke with a partial path followed by another moveto
+ this->setCubicEndNormal(cubic, normalAB, unitAB, &normalCD, &unitCD);
+
+ this->postJoinTo(pt3, normalCD, unitCD);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkPaintDefaults.h"
+
+SkStroke::SkStroke() {
+ fWidth = SK_Scalar1;
+ fMiterLimit = SkPaintDefaults_MiterLimit;
+ fResScale = 1;
+ fCap = SkPaint::kDefault_Cap;
+ fJoin = SkPaint::kDefault_Join;
+ fDoFill = false;
+}
+
+SkStroke::SkStroke(const SkPaint& p) {
+ fWidth = p.getStrokeWidth();
+ fMiterLimit = p.getStrokeMiter();
+ fResScale = 1;
+ fCap = (uint8_t)p.getStrokeCap();
+ fJoin = (uint8_t)p.getStrokeJoin();
+ fDoFill = SkToU8(p.getStyle() == SkPaint::kStrokeAndFill_Style);
+}
+
+SkStroke::SkStroke(const SkPaint& p, SkScalar width) {
+ fWidth = width;
+ fMiterLimit = p.getStrokeMiter();
+ fResScale = 1;
+ fCap = (uint8_t)p.getStrokeCap();
+ fJoin = (uint8_t)p.getStrokeJoin();
+ fDoFill = SkToU8(p.getStyle() == SkPaint::kStrokeAndFill_Style);
+}
+
+void SkStroke::setWidth(SkScalar width) {
+ SkASSERT(width >= 0);
+ fWidth = width;
+}
+
+void SkStroke::setMiterLimit(SkScalar miterLimit) {
+ SkASSERT(miterLimit >= 0);
+ fMiterLimit = miterLimit;
+}
+
+void SkStroke::setCap(SkPaint::Cap cap) {
+ SkASSERT((unsigned)cap < SkPaint::kCapCount);
+ fCap = SkToU8(cap);
+}
+
+void SkStroke::setJoin(SkPaint::Join join) {
+ SkASSERT((unsigned)join < SkPaint::kJoinCount);
+ fJoin = SkToU8(join);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// If src==dst, then we use a tmp path to record the stroke, and then swap
+// its contents with src when we're done.
+class AutoTmpPath {
+public:
+ AutoTmpPath(const SkPath& src, SkPath** dst) : fSrc(src) {
+ if (&src == *dst) {
+ *dst = &fTmpDst;
+ fSwapWithSrc = true;
+ } else {
+ (*dst)->reset();
+ fSwapWithSrc = false;
+ }
+ }
+
+ ~AutoTmpPath() {
+ if (fSwapWithSrc) {
+ fTmpDst.swap(*const_cast<SkPath*>(&fSrc));
+ }
+ }
+
+private:
+ SkPath fTmpDst;
+ const SkPath& fSrc;
+ bool fSwapWithSrc;
+};
+
+void SkStroke::strokePath(const SkPath& src, SkPath* dst) const {
+ SkASSERT(dst);
+
+ SkScalar radius = SkScalarHalf(fWidth);
+
+ AutoTmpPath tmp(src, &dst);
+
+ if (radius <= 0) {
+ return;
+ }
+
+ // If src is really a rect, call our specialty strokeRect() method
+ {
+ SkRect rect;
+ bool isClosed;
+ SkPath::Direction dir;
+ if (src.isRect(&rect, &isClosed, &dir) && isClosed) {
+ this->strokeRect(rect, dst, dir);
+ // our answer should preserve the inverseness of the src
+ if (src.isInverseFillType()) {
+ SkASSERT(!dst->isInverseFillType());
+ dst->toggleInverseFillType();
+ }
+ return;
+ }
+ }
+
+ // We can always ignore centers for stroke and fill convex line-only paths
+ // TODO: remove the line-only restriction
+ bool ignoreCenter = fDoFill && (src.getSegmentMasks() == SkPath::kLine_SegmentMask) &&
+ src.isLastContourClosed() && src.isConvex();
+
+ SkPathStroker stroker(src, radius, fMiterLimit, this->getCap(), this->getJoin(),
+ fResScale, ignoreCenter);
+ SkPath::Iter iter(src, false);
+ SkPath::Verb lastSegment = SkPath::kMove_Verb;
+
+ for (;;) {
+ SkPoint pts[4];
+ switch (iter.next(pts, false)) {
+ case SkPath::kMove_Verb:
+ stroker.moveTo(pts[0]);
+ break;
+ case SkPath::kLine_Verb:
+ stroker.lineTo(pts[1], &iter);
+ lastSegment = SkPath::kLine_Verb;
+ break;
+ case SkPath::kQuad_Verb:
+ stroker.quadTo(pts[1], pts[2]);
+ lastSegment = SkPath::kQuad_Verb;
+ break;
+ case SkPath::kConic_Verb: {
+ stroker.conicTo(pts[1], pts[2], iter.conicWeight());
+ lastSegment = SkPath::kConic_Verb;
+ break;
+ } break;
+ case SkPath::kCubic_Verb:
+ stroker.cubicTo(pts[1], pts[2], pts[3]);
+ lastSegment = SkPath::kCubic_Verb;
+ break;
+ case SkPath::kClose_Verb:
+ if (SkPaint::kButt_Cap != this->getCap()) {
+ /* If the stroke consists of a moveTo followed by a close, treat it
+ as if it were followed by a zero-length line. Lines without length
+ can have square and round end caps. */
+ if (stroker.hasOnlyMoveTo()) {
+ stroker.lineTo(stroker.moveToPt());
+ goto ZERO_LENGTH;
+ }
+ /* If the stroke consists of a moveTo followed by one or more zero-length
+ verbs, then followed by a close, treat is as if it were followed by a
+ zero-length line. Lines without length can have square & round end caps. */
+ if (stroker.isZeroLength()) {
+ ZERO_LENGTH:
+ lastSegment = SkPath::kLine_Verb;
+ break;
+ }
+ }
+ stroker.close(lastSegment == SkPath::kLine_Verb);
+ break;
+ case SkPath::kDone_Verb:
+ goto DONE;
+ }
+ }
+DONE:
+ stroker.done(dst, lastSegment == SkPath::kLine_Verb);
+
+ if (fDoFill && !ignoreCenter) {
+ if (SkPathPriv::CheapIsFirstDirection(src, SkPathPriv::kCCW_FirstDirection)) {
+ dst->reverseAddPath(src);
+ } else {
+ dst->addPath(src);
+ }
+ } else {
+ // Seems like we can assume that a 2-point src would always result in
+ // a convex stroke, but testing has proved otherwise.
+ // TODO: fix the stroker to make this assumption true (without making
+ // it slower that the work that will be done in computeConvexity())
+#if 0
+ // this test results in a non-convex stroke :(
+ static void test(SkCanvas* canvas) {
+ SkPoint pts[] = { 146.333328, 192.333328, 300.333344, 293.333344 };
+ SkPaint paint;
+ paint.setStrokeWidth(7);
+ paint.setStrokeCap(SkPaint::kRound_Cap);
+ canvas->drawLine(pts[0].fX, pts[0].fY, pts[1].fX, pts[1].fY, paint);
+ }
+#endif
+#if 0
+ if (2 == src.countPoints()) {
+ dst->setIsConvex(true);
+ }
+#endif
+ }
+
+ // our answer should preserve the inverseness of the src
+ if (src.isInverseFillType()) {
+ SkASSERT(!dst->isInverseFillType());
+ dst->toggleInverseFillType();
+ }
+}
+
+static SkPath::Direction reverse_direction(SkPath::Direction dir) {
+ static const SkPath::Direction gOpposite[] = { SkPath::kCCW_Direction, SkPath::kCW_Direction };
+ return gOpposite[dir];
+}
+
+static void addBevel(SkPath* path, const SkRect& r, const SkRect& outer, SkPath::Direction dir) {
+ SkPoint pts[8];
+
+ if (SkPath::kCW_Direction == dir) {
+ pts[0].set(r.fLeft, outer.fTop);
+ pts[1].set(r.fRight, outer.fTop);
+ pts[2].set(outer.fRight, r.fTop);
+ pts[3].set(outer.fRight, r.fBottom);
+ pts[4].set(r.fRight, outer.fBottom);
+ pts[5].set(r.fLeft, outer.fBottom);
+ pts[6].set(outer.fLeft, r.fBottom);
+ pts[7].set(outer.fLeft, r.fTop);
+ } else {
+ pts[7].set(r.fLeft, outer.fTop);
+ pts[6].set(r.fRight, outer.fTop);
+ pts[5].set(outer.fRight, r.fTop);
+ pts[4].set(outer.fRight, r.fBottom);
+ pts[3].set(r.fRight, outer.fBottom);
+ pts[2].set(r.fLeft, outer.fBottom);
+ pts[1].set(outer.fLeft, r.fBottom);
+ pts[0].set(outer.fLeft, r.fTop);
+ }
+ path->addPoly(pts, 8, true);
+}
+
+void SkStroke::strokeRect(const SkRect& origRect, SkPath* dst,
+ SkPath::Direction dir) const {
+ SkASSERT(dst != nullptr);
+ dst->reset();
+
+ SkScalar radius = SkScalarHalf(fWidth);
+ if (radius <= 0) {
+ return;
+ }
+
+ SkScalar rw = origRect.width();
+ SkScalar rh = origRect.height();
+ if ((rw < 0) ^ (rh < 0)) {
+ dir = reverse_direction(dir);
+ }
+ SkRect rect(origRect);
+ rect.sort();
+ // reassign these, now that we know they'll be >= 0
+ rw = rect.width();
+ rh = rect.height();
+
+ SkRect r(rect);
+ r.outset(radius, radius);
+
+ SkPaint::Join join = (SkPaint::Join)fJoin;
+ if (SkPaint::kMiter_Join == join && fMiterLimit < SK_ScalarSqrt2) {
+ join = SkPaint::kBevel_Join;
+ }
+
+ switch (join) {
+ case SkPaint::kMiter_Join:
+ dst->addRect(r, dir);
+ break;
+ case SkPaint::kBevel_Join:
+ addBevel(dst, rect, r, dir);
+ break;
+ case SkPaint::kRound_Join:
+ dst->addRoundRect(r, radius, radius, dir);
+ break;
+ default:
+ break;
+ }
+
+ if (fWidth < SkMinScalar(rw, rh) && !fDoFill) {
+ r = rect;
+ r.inset(radius, radius);
+ dst->addRect(r, reverse_direction(dir));
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkStroke.h b/gfx/skia/skia/src/core/SkStroke.h
new file mode 100644
index 000000000..f93efdee0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStroke.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStroke_DEFINED
+#define SkStroke_DEFINED
+
+#include "SkPath.h"
+#include "SkPoint.h"
+#include "SkPaint.h"
+#include "SkStrokerPriv.h"
+
+#ifdef SK_DEBUG
+extern bool gDebugStrokerErrorSet;
+extern SkScalar gDebugStrokerError;
+extern int gMaxRecursion[];
+#endif
+
+/** \class SkStroke
+ SkStroke is the utility class that constructs paths by stroking
+ geometries (lines, rects, ovals, roundrects, paths). This is
+ invoked when a geometry or text is drawn in a canvas with the
+ kStroke_Mask bit set in the paint.
+*/
+class SkStroke {
+public:
+ SkStroke();
+ SkStroke(const SkPaint&);
+ SkStroke(const SkPaint&, SkScalar width); // width overrides paint.getStrokeWidth()
+
+ SkPaint::Cap getCap() const { return (SkPaint::Cap)fCap; }
+ void setCap(SkPaint::Cap);
+
+ SkPaint::Join getJoin() const { return (SkPaint::Join)fJoin; }
+ void setJoin(SkPaint::Join);
+
+ void setMiterLimit(SkScalar);
+ void setWidth(SkScalar);
+
+ bool getDoFill() const { return SkToBool(fDoFill); }
+ void setDoFill(bool doFill) { fDoFill = SkToU8(doFill); }
+
+ /**
+ * ResScale is the "intended" resolution for the output.
+ * Default is 1.0.
+ * Larger values (res > 1) indicate that the result should be more precise, since it will
+ * be zoomed up, and small errors will be magnified.
+ * Smaller values (0 < res < 1) indicate that the result can be less precise, since it will
+ * be zoomed down, and small errors may be invisible.
+ */
+ SkScalar getResScale() const { return fResScale; }
+ void setResScale(SkScalar rs) {
+ SkASSERT(rs > 0 && SkScalarIsFinite(rs));
+ fResScale = rs;
+ }
+
+ /**
+ * Stroke the specified rect, winding it in the specified direction..
+ */
+ void strokeRect(const SkRect& rect, SkPath* result,
+ SkPath::Direction = SkPath::kCW_Direction) const;
+ void strokePath(const SkPath& path, SkPath*) const;
+
+ ////////////////////////////////////////////////////////////////
+
+private:
+ SkScalar fWidth, fMiterLimit;
+ SkScalar fResScale;
+ uint8_t fCap, fJoin;
+ SkBool8 fDoFill;
+
+ friend class SkPaint;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkStrokeRec.cpp b/gfx/skia/skia/src/core/SkStrokeRec.cpp
new file mode 100644
index 000000000..f3cca1653
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrokeRec.cpp
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkStrokeRec.h"
+#include "SkPaintDefaults.h"
+
+// must be < 0, since ==0 means hairline, and >0 means normal stroke
+#define kStrokeRec_FillStyleWidth (-SK_Scalar1)
+
+SkStrokeRec::SkStrokeRec(InitStyle s) {
+ fResScale = 1;
+ fWidth = (kFill_InitStyle == s) ? kStrokeRec_FillStyleWidth : 0;
+ fMiterLimit = SkPaintDefaults_MiterLimit;
+ fCap = SkPaint::kDefault_Cap;
+ fJoin = SkPaint::kDefault_Join;
+ fStrokeAndFill = false;
+}
+
+SkStrokeRec::SkStrokeRec(const SkPaint& paint, SkScalar resScale) {
+ this->init(paint, paint.getStyle(), resScale);
+}
+
+SkStrokeRec::SkStrokeRec(const SkPaint& paint, SkPaint::Style styleOverride, SkScalar resScale) {
+ this->init(paint, styleOverride, resScale);
+}
+
+void SkStrokeRec::init(const SkPaint& paint, SkPaint::Style style, SkScalar resScale) {
+ fResScale = resScale;
+
+ switch (style) {
+ case SkPaint::kFill_Style:
+ fWidth = kStrokeRec_FillStyleWidth;
+ fStrokeAndFill = false;
+ break;
+ case SkPaint::kStroke_Style:
+ fWidth = paint.getStrokeWidth();
+ fStrokeAndFill = false;
+ break;
+ case SkPaint::kStrokeAndFill_Style:
+ if (0 == paint.getStrokeWidth()) {
+ // hairline+fill == fill
+ fWidth = kStrokeRec_FillStyleWidth;
+ fStrokeAndFill = false;
+ } else {
+ fWidth = paint.getStrokeWidth();
+ fStrokeAndFill = true;
+ }
+ break;
+ default:
+ SkDEBUGFAIL("unknown paint style");
+ // fall back on just fill
+ fWidth = kStrokeRec_FillStyleWidth;
+ fStrokeAndFill = false;
+ break;
+ }
+
+ // copy these from the paint, regardless of our "style"
+ fMiterLimit = paint.getStrokeMiter();
+ fCap = paint.getStrokeCap();
+ fJoin = paint.getStrokeJoin();
+}
+
+SkStrokeRec::Style SkStrokeRec::getStyle() const {
+ if (fWidth < 0) {
+ return kFill_Style;
+ } else if (0 == fWidth) {
+ return kHairline_Style;
+ } else {
+ return fStrokeAndFill ? kStrokeAndFill_Style : kStroke_Style;
+ }
+}
+
+void SkStrokeRec::setFillStyle() {
+ fWidth = kStrokeRec_FillStyleWidth;
+ fStrokeAndFill = false;
+}
+
+void SkStrokeRec::setHairlineStyle() {
+ fWidth = 0;
+ fStrokeAndFill = false;
+}
+
+void SkStrokeRec::setStrokeStyle(SkScalar width, bool strokeAndFill) {
+ if (strokeAndFill && (0 == width)) {
+ // hairline+fill == fill
+ this->setFillStyle();
+ } else {
+ fWidth = width;
+ fStrokeAndFill = strokeAndFill;
+ }
+}
+
+#include "SkStroke.h"
+
+#ifdef SK_DEBUG
+ // enables tweaking these values at runtime from SampleApp
+ bool gDebugStrokerErrorSet = false;
+ SkScalar gDebugStrokerError;
+#endif
+
+bool SkStrokeRec::applyToPath(SkPath* dst, const SkPath& src) const {
+ if (fWidth <= 0) { // hairline or fill
+ return false;
+ }
+
+ SkStroke stroker;
+ stroker.setCap((SkPaint::Cap)fCap);
+ stroker.setJoin((SkPaint::Join)fJoin);
+ stroker.setMiterLimit(fMiterLimit);
+ stroker.setWidth(fWidth);
+ stroker.setDoFill(fStrokeAndFill);
+#ifdef SK_DEBUG
+ stroker.setResScale(gDebugStrokerErrorSet ? gDebugStrokerError : fResScale);
+#else
+ stroker.setResScale(fResScale);
+#endif
+ stroker.strokePath(src, dst);
+ return true;
+}
+
+void SkStrokeRec::applyToPaint(SkPaint* paint) const {
+ if (fWidth < 0) { // fill
+ paint->setStyle(SkPaint::kFill_Style);
+ return;
+ }
+
+ paint->setStyle(fStrokeAndFill ? SkPaint::kStrokeAndFill_Style : SkPaint::kStroke_Style);
+ paint->setStrokeWidth(fWidth);
+ paint->setStrokeMiter(fMiterLimit);
+ paint->setStrokeCap((SkPaint::Cap)fCap);
+ paint->setStrokeJoin((SkPaint::Join)fJoin);
+}
+
+static inline SkScalar get_inflation_bounds(SkPaint::Join join,
+ SkScalar strokeWidth,
+ SkScalar miterLimit) {
+ if (strokeWidth < 0) { // fill
+ return 0;
+ } else if (0 == strokeWidth) {
+ return SK_Scalar1;
+ }
+ // since we're stroked, outset the rect by the radius (and join type)
+ SkScalar radius = SkScalarHalf(strokeWidth);
+ if (SkPaint::kMiter_Join == join) {
+ if (miterLimit > SK_Scalar1) {
+ radius = SkScalarMul(miterLimit, radius);
+ }
+ }
+ return radius;
+}
+
+SkScalar SkStrokeRec::getInflationRadius() const {
+ return get_inflation_bounds((SkPaint::Join)fJoin, fWidth, fMiterLimit);
+}
+
+SkScalar SkStrokeRec::GetInflationRadius(const SkPaint& paint, SkPaint::Style style) {
+ SkScalar width = SkPaint::kFill_Style == style ? -SK_Scalar1 : paint.getStrokeWidth();
+ return get_inflation_bounds(paint.getStrokeJoin(), width, paint.getStrokeMiter());
+
+}
diff --git a/gfx/skia/skia/src/core/SkStrokerPriv.cpp b/gfx/skia/skia/src/core/SkStrokerPriv.cpp
new file mode 100644
index 000000000..840f96198
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrokerPriv.cpp
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkStrokerPriv.h"
+#include "SkGeometry.h"
+#include "SkPath.h"
+
+static void ButtCapper(SkPath* path, const SkPoint& pivot,
+ const SkVector& normal, const SkPoint& stop,
+ SkPath*)
+{
+ path->lineTo(stop.fX, stop.fY);
+}
+
+static void RoundCapper(SkPath* path, const SkPoint& pivot,
+ const SkVector& normal, const SkPoint& stop,
+ SkPath*)
+{
+ SkVector parallel;
+ normal.rotateCW(&parallel);
+
+ SkPoint projectedCenter = pivot + parallel;
+
+ path->conicTo(projectedCenter + normal, projectedCenter, SK_ScalarRoot2Over2);
+ path->conicTo(projectedCenter - normal, stop, SK_ScalarRoot2Over2);
+}
+
+static void SquareCapper(SkPath* path, const SkPoint& pivot,
+ const SkVector& normal, const SkPoint& stop,
+ SkPath* otherPath)
+{
+ SkVector parallel;
+ normal.rotateCW(&parallel);
+
+ if (otherPath)
+ {
+ path->setLastPt(pivot.fX + normal.fX + parallel.fX, pivot.fY + normal.fY + parallel.fY);
+ path->lineTo(pivot.fX - normal.fX + parallel.fX, pivot.fY - normal.fY + parallel.fY);
+ }
+ else
+ {
+ path->lineTo(pivot.fX + normal.fX + parallel.fX, pivot.fY + normal.fY + parallel.fY);
+ path->lineTo(pivot.fX - normal.fX + parallel.fX, pivot.fY - normal.fY + parallel.fY);
+ path->lineTo(stop.fX, stop.fY);
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+
+static bool is_clockwise(const SkVector& before, const SkVector& after)
+{
+ return SkScalarMul(before.fX, after.fY) - SkScalarMul(before.fY, after.fX) > 0;
+}
+
+enum AngleType {
+ kNearly180_AngleType,
+ kSharp_AngleType,
+ kShallow_AngleType,
+ kNearlyLine_AngleType
+};
+
+static AngleType Dot2AngleType(SkScalar dot)
+{
+// need more precise fixed normalization
+// SkASSERT(SkScalarAbs(dot) <= SK_Scalar1 + SK_ScalarNearlyZero);
+
+ if (dot >= 0) // shallow or line
+ return SkScalarNearlyZero(SK_Scalar1 - dot) ? kNearlyLine_AngleType : kShallow_AngleType;
+ else // sharp or 180
+ return SkScalarNearlyZero(SK_Scalar1 + dot) ? kNearly180_AngleType : kSharp_AngleType;
+}
+
+static void HandleInnerJoin(SkPath* inner, const SkPoint& pivot, const SkVector& after)
+{
+#if 1
+ /* In the degenerate case that the stroke radius is larger than our segments
+ just connecting the two inner segments may "show through" as a funny
+ diagonal. To pseudo-fix this, we go through the pivot point. This adds
+ an extra point/edge, but I can't see a cheap way to know when this is
+ not needed :(
+ */
+ inner->lineTo(pivot.fX, pivot.fY);
+#endif
+
+ inner->lineTo(pivot.fX - after.fX, pivot.fY - after.fY);
+}
+
+static void BluntJoiner(SkPath* outer, SkPath* inner, const SkVector& beforeUnitNormal,
+ const SkPoint& pivot, const SkVector& afterUnitNormal,
+ SkScalar radius, SkScalar invMiterLimit, bool, bool)
+{
+ SkVector after;
+ afterUnitNormal.scale(radius, &after);
+
+ if (!is_clockwise(beforeUnitNormal, afterUnitNormal))
+ {
+ SkTSwap<SkPath*>(outer, inner);
+ after.negate();
+ }
+
+ outer->lineTo(pivot.fX + after.fX, pivot.fY + after.fY);
+ HandleInnerJoin(inner, pivot, after);
+}
+
+static void RoundJoiner(SkPath* outer, SkPath* inner, const SkVector& beforeUnitNormal,
+ const SkPoint& pivot, const SkVector& afterUnitNormal,
+ SkScalar radius, SkScalar invMiterLimit, bool, bool)
+{
+ SkScalar dotProd = SkPoint::DotProduct(beforeUnitNormal, afterUnitNormal);
+ AngleType angleType = Dot2AngleType(dotProd);
+
+ if (angleType == kNearlyLine_AngleType)
+ return;
+
+ SkVector before = beforeUnitNormal;
+ SkVector after = afterUnitNormal;
+ SkRotationDirection dir = kCW_SkRotationDirection;
+
+ if (!is_clockwise(before, after))
+ {
+ SkTSwap<SkPath*>(outer, inner);
+ before.negate();
+ after.negate();
+ dir = kCCW_SkRotationDirection;
+ }
+
+ SkMatrix matrix;
+ matrix.setScale(radius, radius);
+ matrix.postTranslate(pivot.fX, pivot.fY);
+ SkConic conics[SkConic::kMaxConicsForArc];
+ int count = SkConic::BuildUnitArc(before, after, dir, &matrix, conics);
+ if (count > 0) {
+ for (int i = 0; i < count; ++i) {
+ outer->conicTo(conics[i].fPts[1], conics[i].fPts[2], conics[i].fW);
+ }
+ after.scale(radius);
+ HandleInnerJoin(inner, pivot, after);
+ }
+}
+
+#define kOneOverSqrt2 (0.707106781f)
+
+static void MiterJoiner(SkPath* outer, SkPath* inner, const SkVector& beforeUnitNormal,
+ const SkPoint& pivot, const SkVector& afterUnitNormal,
+ SkScalar radius, SkScalar invMiterLimit,
+ bool prevIsLine, bool currIsLine)
+{
+ // negate the dot since we're using normals instead of tangents
+ SkScalar dotProd = SkPoint::DotProduct(beforeUnitNormal, afterUnitNormal);
+ AngleType angleType = Dot2AngleType(dotProd);
+ SkVector before = beforeUnitNormal;
+ SkVector after = afterUnitNormal;
+ SkVector mid;
+ SkScalar sinHalfAngle;
+ bool ccw;
+
+ if (angleType == kNearlyLine_AngleType)
+ return;
+ if (angleType == kNearly180_AngleType)
+ {
+ currIsLine = false;
+ goto DO_BLUNT;
+ }
+
+ ccw = !is_clockwise(before, after);
+ if (ccw)
+ {
+ SkTSwap<SkPath*>(outer, inner);
+ before.negate();
+ after.negate();
+ }
+
+ /* Before we enter the world of square-roots and divides,
+ check if we're trying to join an upright right angle
+ (common case for stroking rectangles). If so, special case
+ that (for speed an accuracy).
+ Note: we only need to check one normal if dot==0
+ */
+ if (0 == dotProd && invMiterLimit <= kOneOverSqrt2)
+ {
+ mid.set(SkScalarMul(before.fX + after.fX, radius),
+ SkScalarMul(before.fY + after.fY, radius));
+ goto DO_MITER;
+ }
+
+ /* midLength = radius / sinHalfAngle
+ if (midLength > miterLimit * radius) abort
+ if (radius / sinHalf > miterLimit * radius) abort
+ if (1 / sinHalf > miterLimit) abort
+ if (1 / miterLimit > sinHalf) abort
+ My dotProd is opposite sign, since it is built from normals and not tangents
+ hence 1 + dot instead of 1 - dot in the formula
+ */
+ sinHalfAngle = SkScalarSqrt(SkScalarHalf(SK_Scalar1 + dotProd));
+ if (sinHalfAngle < invMiterLimit)
+ {
+ currIsLine = false;
+ goto DO_BLUNT;
+ }
+
+ // choose the most accurate way to form the initial mid-vector
+ if (angleType == kSharp_AngleType)
+ {
+ mid.set(after.fY - before.fY, before.fX - after.fX);
+ if (ccw)
+ mid.negate();
+ }
+ else
+ mid.set(before.fX + after.fX, before.fY + after.fY);
+
+ mid.setLength(radius / sinHalfAngle);
+DO_MITER:
+ if (prevIsLine)
+ outer->setLastPt(pivot.fX + mid.fX, pivot.fY + mid.fY);
+ else
+ outer->lineTo(pivot.fX + mid.fX, pivot.fY + mid.fY);
+
+DO_BLUNT:
+ after.scale(radius);
+ if (!currIsLine)
+ outer->lineTo(pivot.fX + after.fX, pivot.fY + after.fY);
+ HandleInnerJoin(inner, pivot, after);
+}
+
+/////////////////////////////////////////////////////////////////////////////
+
+SkStrokerPriv::CapProc SkStrokerPriv::CapFactory(SkPaint::Cap cap)
+{
+ static const SkStrokerPriv::CapProc gCappers[] = {
+ ButtCapper, RoundCapper, SquareCapper
+ };
+
+ SkASSERT((unsigned)cap < SkPaint::kCapCount);
+ return gCappers[cap];
+}
+
+SkStrokerPriv::JoinProc SkStrokerPriv::JoinFactory(SkPaint::Join join)
+{
+ static const SkStrokerPriv::JoinProc gJoiners[] = {
+ MiterJoiner, RoundJoiner, BluntJoiner
+ };
+
+ SkASSERT((unsigned)join < SkPaint::kJoinCount);
+ return gJoiners[join];
+}
diff --git a/gfx/skia/skia/src/core/SkStrokerPriv.h b/gfx/skia/skia/src/core/SkStrokerPriv.h
new file mode 100644
index 000000000..d008efa11
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrokerPriv.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkStrokerPriv_DEFINED
+#define SkStrokerPriv_DEFINED
+
+#include "SkStroke.h"
+
+#define CWX(x, y) (-y)
+#define CWY(x, y) (x)
+#define CCWX(x, y) (y)
+#define CCWY(x, y) (-x)
+
+#define CUBIC_ARC_FACTOR ((SK_ScalarSqrt2 - SK_Scalar1) * 4 / 3)
+
+class SkStrokerPriv {
+public:
+ typedef void (*CapProc)(SkPath* path,
+ const SkPoint& pivot,
+ const SkVector& normal,
+ const SkPoint& stop,
+ SkPath* otherPath);
+
+ typedef void (*JoinProc)(SkPath* outer, SkPath* inner,
+ const SkVector& beforeUnitNormal,
+ const SkPoint& pivot,
+ const SkVector& afterUnitNormal,
+ SkScalar radius, SkScalar invMiterLimit,
+ bool prevIsLine, bool currIsLine);
+
+ static CapProc CapFactory(SkPaint::Cap);
+ static JoinProc JoinFactory(SkPaint::Join);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSurfacePriv.h b/gfx/skia/skia/src/core/SkSurfacePriv.h
new file mode 100644
index 000000000..74d19a6df
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSurfacePriv.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSurfacePriv_DEFINED
+#define SkSurfacePriv_DEFINED
+
+#include "SkSurfaceProps.h"
+
+static inline SkSurfaceProps SkSurfacePropsCopyOrDefault(const SkSurfaceProps* props) {
+ if (props) {
+ return *props;
+ } else {
+ return SkSurfaceProps(SkSurfaceProps::kLegacyFontHost_InitType);
+ }
+}
+
+static inline SkPixelGeometry SkSurfacePropsDefaultPixelGeometry() {
+ return SkSurfaceProps(SkSurfaceProps::kLegacyFontHost_InitType).pixelGeometry();
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSwizzle.cpp b/gfx/skia/skia/src/core/SkSwizzle.cpp
new file mode 100644
index 000000000..f080b83a8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSwizzle.cpp
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkSwizzle.h"
+
+#include "SkOpts.h"
+
+void SkSwapRB(uint32_t* dest, const uint32_t* src, int count) {
+ SkOpts::RGBA_to_BGRA(dest, src, count);
+}
diff --git a/gfx/skia/skia/src/core/SkTDPQueue.h b/gfx/skia/skia/src/core/SkTDPQueue.h
new file mode 100644
index 000000000..294c9f410
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTDPQueue.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTDPQueue_DEFINED
+#define SkTDPQueue_DEFINED
+
+#include "SkTDArray.h"
+
+/**
+ * This class implements a priority queue. T is the type of the elements in the queue. LESS is a
+ * function that compares two Ts and returns true if the first is higher priority than the second.
+ *
+ * Optionally objects may know their index into the priority queue. The queue will update the index
+ * as the objects move through the queue. This is enabled by using a non-nullptr function for INDEX.
+ * When an INDEX function is provided random deletes from the queue are allowed using remove().
+ * Additionally, the * priority is allowed to change as long as priorityDidChange() is called
+ * afterwards. In debug builds the index will be set to -1 before an element is removed from the
+ * queue.
+ */
+template <typename T,
+ bool (*LESS)(const T&, const T&),
+ int* (*INDEX)(const T&) = (int* (*)(const T&))nullptr>
+class SkTDPQueue : public SkNoncopyable {
+public:
+ SkTDPQueue() {}
+
+ /** Number of items in the queue. */
+ int count() const { return fArray.count(); }
+
+ /** Gets the next item in the queue without popping it. */
+ const T& peek() const { return fArray[0]; }
+ T& peek() { return fArray[0]; }
+
+ /** Removes the next item. */
+ void pop() {
+ this->validate();
+ SkDEBUGCODE(if (SkToBool(INDEX)) { *INDEX(fArray[0]) = -1; })
+ if (1 == fArray.count()) {
+ fArray.pop();
+ return;
+ }
+
+ fArray[0] = fArray[fArray.count() - 1];
+ this->setIndex(0);
+ fArray.pop();
+ this->percolateDownIfNecessary(0);
+
+ this->validate();
+ }
+
+ /** Inserts a new item in the queue based on its priority. */
+ void insert(T entry) {
+ this->validate();
+ int index = fArray.count();
+ *fArray.append() = entry;
+ this->setIndex(fArray.count() - 1);
+ this->percolateUpIfNecessary(index);
+ this->validate();
+ }
+
+ /** Random access removal. This requires that the INDEX function is non-nullptr. */
+ void remove(T entry) {
+ SkASSERT(nullptr != INDEX);
+ int index = *INDEX(entry);
+ SkASSERT(index >= 0 && index < fArray.count());
+ this->validate();
+ SkDEBUGCODE(*INDEX(fArray[index]) = -1;)
+ if (index == fArray.count() - 1) {
+ fArray.pop();
+ return;
+ }
+ fArray[index] = fArray[fArray.count() - 1];
+ fArray.pop();
+ this->setIndex(index);
+ this->percolateUpOrDown(index);
+ this->validate();
+ }
+
+ /** Notification that the priority of an entry has changed. This must be called after an
+ item's priority is changed to maintain correct ordering. Changing the priority is only
+ allowed if an INDEX function is provided. */
+ void priorityDidChange(T entry) {
+ SkASSERT(nullptr != INDEX);
+ int index = *INDEX(entry);
+ SkASSERT(index >= 0 && index < fArray.count());
+ this->validate(index);
+ this->percolateUpOrDown(index);
+ this->validate();
+ }
+
+ /** Gets the item at index i in the priority queue (for i < this->count()). at(0) is equivalent
+ to peek(). Otherwise, there is no guarantee about ordering of elements in the queue. */
+ T at(int i) const { return fArray[i]; }
+
+private:
+ static int LeftOf(int x) { SkASSERT(x >= 0); return 2 * x + 1; }
+ static int ParentOf(int x) { SkASSERT(x > 0); return (x - 1) >> 1; }
+
+ void percolateUpOrDown(int index) {
+ SkASSERT(index >= 0);
+ if (!percolateUpIfNecessary(index)) {
+ this->validate(index);
+ this->percolateDownIfNecessary(index);
+ }
+ }
+
+ bool percolateUpIfNecessary(int index) {
+ SkASSERT(index >= 0);
+ bool percolated = false;
+ do {
+ if (0 == index) {
+ this->setIndex(index);
+ return percolated;
+ }
+ int p = ParentOf(index);
+ if (LESS(fArray[index], fArray[p])) {
+ SkTSwap(fArray[index], fArray[p]);
+ this->setIndex(index);
+ index = p;
+ percolated = true;
+ } else {
+ this->setIndex(index);
+ return percolated;
+ }
+ this->validate(index);
+ } while (true);
+ }
+
+ void percolateDownIfNecessary(int index) {
+ SkASSERT(index >= 0);
+ do {
+ int child = LeftOf(index);
+
+ if (child >= fArray.count()) {
+ // We're a leaf.
+ this->setIndex(index);
+ return;
+ }
+
+ if (child + 1 >= fArray.count()) {
+ // We only have a left child.
+ if (LESS(fArray[child], fArray[index])) {
+ SkTSwap(fArray[child], fArray[index]);
+ this->setIndex(child);
+ this->setIndex(index);
+ return;
+ }
+ } else if (LESS(fArray[child + 1], fArray[child])) {
+ // The right child is the one we should swap with, if we swap.
+ child++;
+ }
+
+ // Check if we need to swap.
+ if (LESS(fArray[child], fArray[index])) {
+ SkTSwap(fArray[child], fArray[index]);
+ this->setIndex(index);
+ index = child;
+ } else {
+ // We're less than both our children.
+ this->setIndex(index);
+ return;
+ }
+ this->validate(index);
+ } while (true);
+ }
+
+ void setIndex(int index) {
+ SkASSERT(index < fArray.count());
+ if (SkToBool(INDEX)) {
+ *INDEX(fArray[index]) = index;
+ }
+ }
+
+ void validate(int excludedIndex = -1) const {
+#ifdef SK_DEBUG
+ for (int i = 1; i < fArray.count(); ++i) {
+ int p = ParentOf(i);
+ if (excludedIndex != p && excludedIndex != i) {
+ SkASSERT(!(LESS(fArray[i], fArray[p])));
+ SkASSERT(!SkToBool(INDEX) || *INDEX(fArray[i]) == i);
+ }
+ }
+#endif
+ }
+
+ SkTDArray<T> fArray;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTDynamicHash.h b/gfx/skia/skia/src/core/SkTDynamicHash.h
new file mode 100644
index 000000000..b144d18d4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTDynamicHash.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTDynamicHash_DEFINED
+#define SkTDynamicHash_DEFINED
+
+#include "SkMath.h"
+#include "SkTemplates.h"
+#include "SkTypes.h"
+
+// Traits requires:
+// static const Key& GetKey(const T&) { ... }
+// static uint32_t Hash(const Key&) { ... }
+// We'll look on T for these by default, or you can pass a custom Traits type.
+template <typename T,
+ typename Key,
+ typename Traits = T,
+ int kGrowPercent = 75> // Larger -> more memory efficient, but slower.
+class SkTDynamicHash {
+public:
+ SkTDynamicHash() : fCount(0), fDeleted(0), fCapacity(0), fArray(nullptr) {
+ SkASSERT(this->validate());
+ }
+
+ ~SkTDynamicHash() {
+ sk_free(fArray);
+ }
+
+ class Iter {
+ public:
+ explicit Iter(SkTDynamicHash* hash) : fHash(hash), fCurrentIndex(-1) {
+ SkASSERT(hash);
+ ++(*this);
+ }
+ bool done() const {
+ SkASSERT(fCurrentIndex <= fHash->fCapacity);
+ return fCurrentIndex == fHash->fCapacity;
+ }
+ T& operator*() const {
+ SkASSERT(!this->done());
+ return *this->current();
+ }
+ void operator++() {
+ do {
+ fCurrentIndex++;
+ } while (!this->done() && (this->current() == Empty() || this->current() == Deleted()));
+ }
+
+ private:
+ T* current() const { return fHash->fArray[fCurrentIndex]; }
+
+ SkTDynamicHash* fHash;
+ int fCurrentIndex;
+ };
+
+ class ConstIter {
+ public:
+ explicit ConstIter(const SkTDynamicHash* hash) : fHash(hash), fCurrentIndex(-1) {
+ SkASSERT(hash);
+ ++(*this);
+ }
+ bool done() const {
+ SkASSERT(fCurrentIndex <= fHash->fCapacity);
+ return fCurrentIndex == fHash->fCapacity;
+ }
+ const T& operator*() const {
+ SkASSERT(!this->done());
+ return *this->current();
+ }
+ void operator++() {
+ do {
+ fCurrentIndex++;
+ } while (!this->done() && (this->current() == Empty() || this->current() == Deleted()));
+ }
+
+ private:
+ const T* current() const { return fHash->fArray[fCurrentIndex]; }
+
+ const SkTDynamicHash* fHash;
+ int fCurrentIndex;
+ };
+
+ int count() const { return fCount; }
+
+ // Return the entry with this key if we have it, otherwise nullptr.
+ T* find(const Key& key) const {
+ int index = this->firstIndex(key);
+ for (int round = 0; round < fCapacity; round++) {
+ SkASSERT(index >= 0 && index < fCapacity);
+ T* candidate = fArray[index];
+ if (Empty() == candidate) {
+ return nullptr;
+ }
+ if (Deleted() != candidate && GetKey(*candidate) == key) {
+ return candidate;
+ }
+ index = this->nextIndex(index, round);
+ }
+ SkASSERT(fCapacity == 0);
+ return nullptr;
+ }
+
+ // Add an entry with this key. We require that no entry with newEntry's key is already present.
+ void add(T* newEntry) {
+ SkASSERT(nullptr == this->find(GetKey(*newEntry)));
+ this->maybeGrow();
+ this->innerAdd(newEntry);
+ SkASSERT(this->validate());
+ }
+
+ // Remove the entry with this key. We require that an entry with this key is present.
+ void remove(const Key& key) {
+ SkASSERT(this->find(key));
+ this->innerRemove(key);
+ SkASSERT(this->validate());
+ }
+
+ void rewind() {
+ if (fArray) {
+ sk_bzero(fArray, sizeof(T*)* fCapacity);
+ }
+ fCount = 0;
+ fDeleted = 0;
+ }
+
+ void reset() {
+ fCount = 0;
+ fDeleted = 0;
+ fCapacity = 0;
+ sk_free(fArray);
+ fArray = nullptr;
+ }
+
+protected:
+ // These methods are used by tests only.
+
+ int capacity() const { return fCapacity; }
+
+ // How many collisions do we go through before finding where this entry should be inserted?
+ int countCollisions(const Key& key) const {
+ int index = this->firstIndex(key);
+ for (int round = 0; round < fCapacity; round++) {
+ SkASSERT(index >= 0 && index < fCapacity);
+ const T* candidate = fArray[index];
+ if (Empty() == candidate || Deleted() == candidate || GetKey(*candidate) == key) {
+ return round;
+ }
+ index = this->nextIndex(index, round);
+ }
+ SkASSERT(fCapacity == 0);
+ return 0;
+ }
+
+private:
+ // We have two special values to indicate an empty or deleted entry.
+ static T* Empty() { return reinterpret_cast<T*>(0); } // i.e. nullptr
+ static T* Deleted() { return reinterpret_cast<T*>(1); } // Also an invalid pointer.
+
+ bool validate() const {
+ #define SKTDYNAMICHASH_CHECK(x) SkASSERT(x); if (!(x)) return false
+ static const int kLarge = 50; // Arbitrary, tweak to suit your patience.
+
+ // O(1) checks, always done.
+ // Is capacity sane?
+ SKTDYNAMICHASH_CHECK(SkIsPow2(fCapacity));
+
+ // O(N) checks, skipped when very large.
+ if (fCount < kLarge * kLarge) {
+ // Are fCount and fDeleted correct, and are all elements findable?
+ int count = 0, deleted = 0;
+ for (int i = 0; i < fCapacity; i++) {
+ if (Deleted() == fArray[i]) {
+ deleted++;
+ } else if (Empty() != fArray[i]) {
+ count++;
+ SKTDYNAMICHASH_CHECK(this->find(GetKey(*fArray[i])));
+ }
+ }
+ SKTDYNAMICHASH_CHECK(count == fCount);
+ SKTDYNAMICHASH_CHECK(deleted == fDeleted);
+ }
+
+ // O(N^2) checks, skipped when large.
+ if (fCount < kLarge) {
+ // Are all entries unique?
+ for (int i = 0; i < fCapacity; i++) {
+ if (Empty() == fArray[i] || Deleted() == fArray[i]) {
+ continue;
+ }
+ for (int j = i+1; j < fCapacity; j++) {
+ if (Empty() == fArray[j] || Deleted() == fArray[j]) {
+ continue;
+ }
+ SKTDYNAMICHASH_CHECK(fArray[i] != fArray[j]);
+ SKTDYNAMICHASH_CHECK(!(GetKey(*fArray[i]) == GetKey(*fArray[j])));
+ }
+ }
+ }
+ #undef SKTDYNAMICHASH_CHECK
+ return true;
+ }
+
+ void innerAdd(T* newEntry) {
+ const Key& key = GetKey(*newEntry);
+ int index = this->firstIndex(key);
+ for (int round = 0; round < fCapacity; round++) {
+ SkASSERT(index >= 0 && index < fCapacity);
+ const T* candidate = fArray[index];
+ if (Empty() == candidate || Deleted() == candidate) {
+ if (Deleted() == candidate) {
+ fDeleted--;
+ }
+ fCount++;
+ fArray[index] = newEntry;
+ return;
+ }
+ index = this->nextIndex(index, round);
+ }
+ SkASSERT(fCapacity == 0);
+ }
+
+ void innerRemove(const Key& key) {
+ const int firstIndex = this->firstIndex(key);
+ int index = firstIndex;
+ for (int round = 0; round < fCapacity; round++) {
+ SkASSERT(index >= 0 && index < fCapacity);
+ const T* candidate = fArray[index];
+ if (Deleted() != candidate && GetKey(*candidate) == key) {
+ fDeleted++;
+ fCount--;
+ fArray[index] = Deleted();
+ return;
+ }
+ index = this->nextIndex(index, round);
+ }
+ SkASSERT(fCapacity == 0);
+ }
+
+ void maybeGrow() {
+ if (100 * (fCount + fDeleted + 1) > fCapacity * kGrowPercent) {
+ this->resize(fCapacity > 0 ? fCapacity * 2 : 4);
+ }
+ }
+
+ void resize(int newCapacity) {
+ SkDEBUGCODE(int oldCount = fCount;)
+ int oldCapacity = fCapacity;
+ SkAutoTMalloc<T*> oldArray(fArray);
+
+ fCount = fDeleted = 0;
+ fCapacity = newCapacity;
+ fArray = (T**)sk_calloc_throw(sizeof(T*) * fCapacity);
+
+ for (int i = 0; i < oldCapacity; i++) {
+ T* entry = oldArray[i];
+ if (Empty() != entry && Deleted() != entry) {
+ this->innerAdd(entry);
+ }
+ }
+ SkASSERT(oldCount == fCount);
+ }
+
+ // fCapacity is always a power of 2, so this masks the correct low bits to index into our hash.
+ uint32_t hashMask() const { return fCapacity - 1; }
+
+ int firstIndex(const Key& key) const {
+ return Hash(key) & this->hashMask();
+ }
+
+ // Given index at round N, what is the index to check at N+1? round should start at 0.
+ int nextIndex(int index, int round) const {
+ // This will search a power-of-two array fully without repeating an index.
+ return (index + round + 1) & this->hashMask();
+ }
+
+ static const Key& GetKey(const T& t) { return Traits::GetKey(t); }
+ static uint32_t Hash(const Key& key) { return Traits::Hash(key); }
+
+ int fCount; // Number of non Empty(), non Deleted() entries in fArray.
+ int fDeleted; // Number of Deleted() entries in fArray.
+ int fCapacity; // Number of entries in fArray. Always a power of 2.
+ T** fArray;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTInternalLList.h b/gfx/skia/skia/src/core/SkTInternalLList.h
new file mode 100644
index 000000000..1aa1a1220
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTInternalLList.h
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTInternalLList_DEFINED
+#define SkTInternalLList_DEFINED
+
+#include "SkTypes.h"
+
+/**
+ * Helper class to automatically initialize the doubly linked list created pointers.
+ */
+template <typename T> class SkPtrWrapper {
+ public:
+ SkPtrWrapper() : fPtr(NULL) {}
+ SkPtrWrapper& operator =(T* ptr) { fPtr = ptr; return *this; }
+ operator T*() const { return fPtr; }
+ T* operator->() { return fPtr; }
+ private:
+ T* fPtr;
+};
+
+
+/**
+ * This macro creates the member variables required by the SkTInternalLList class. It should be
+ * placed in the private section of any class that will be stored in a double linked list.
+ */
+#define SK_DECLARE_INTERNAL_LLIST_INTERFACE(ClassName) \
+ friend class SkTInternalLList<ClassName>; \
+ /* back pointer to the owning list - for debugging */ \
+ SkDEBUGCODE(SkPtrWrapper<SkTInternalLList<ClassName> > fList;) \
+ SkPtrWrapper<ClassName> fPrev; \
+ SkPtrWrapper<ClassName> fNext
+
+/**
+ * This class implements a templated internal doubly linked list data structure.
+ */
+template <class T> class SkTInternalLList : SkNoncopyable {
+public:
+ SkTInternalLList()
+ : fHead(NULL)
+ , fTail(NULL) {
+ }
+
+ void remove(T* entry) {
+ SkASSERT(fHead && fTail);
+ SkASSERT(this->isInList(entry));
+
+ T* prev = entry->fPrev;
+ T* next = entry->fNext;
+
+ if (prev) {
+ prev->fNext = next;
+ } else {
+ fHead = next;
+ }
+ if (next) {
+ next->fPrev = prev;
+ } else {
+ fTail = prev;
+ }
+
+ entry->fPrev = NULL;
+ entry->fNext = NULL;
+
+#ifdef SK_DEBUG
+ entry->fList = NULL;
+#endif
+ }
+
+ void addToHead(T* entry) {
+ SkASSERT(NULL == entry->fPrev && NULL == entry->fNext);
+ SkASSERT(NULL == entry->fList);
+
+ entry->fPrev = NULL;
+ entry->fNext = fHead;
+ if (fHead) {
+ fHead->fPrev = entry;
+ }
+ fHead = entry;
+ if (NULL == fTail) {
+ fTail = entry;
+ }
+
+#ifdef SK_DEBUG
+ entry->fList = this;
+#endif
+ }
+
+ void addToTail(T* entry) {
+ SkASSERT(NULL == entry->fPrev && NULL == entry->fNext);
+ SkASSERT(NULL == entry->fList);
+
+ entry->fPrev = fTail;
+ entry->fNext = NULL;
+ if (fTail) {
+ fTail->fNext = entry;
+ }
+ fTail = entry;
+ if (NULL == fHead) {
+ fHead = entry;
+ }
+
+#ifdef SK_DEBUG
+ entry->fList = this;
+#endif
+ }
+
+ /**
+ * Inserts a new list entry before an existing list entry. The new entry must not already be
+ * a member of this or any other list. If existingEntry is NULL then the new entry is added
+ * at the tail.
+ */
+ void addBefore(T* newEntry, T* existingEntry) {
+ SkASSERT(newEntry);
+
+ if (NULL == existingEntry) {
+ this->addToTail(newEntry);
+ return;
+ }
+
+ SkASSERT(this->isInList(existingEntry));
+ newEntry->fNext = existingEntry;
+ T* prev = existingEntry->fPrev;
+ existingEntry->fPrev = newEntry;
+ newEntry->fPrev = prev;
+ if (NULL == prev) {
+ SkASSERT(fHead == existingEntry);
+ fHead = newEntry;
+ } else {
+ prev->fNext = newEntry;
+ }
+#ifdef SK_DEBUG
+ newEntry->fList = this;
+#endif
+ }
+
+ /**
+ * Inserts a new list entry after an existing list entry. The new entry must not already be
+ * a member of this or any other list. If existingEntry is NULL then the new entry is added
+ * at the head.
+ */
+ void addAfter(T* newEntry, T* existingEntry) {
+ SkASSERT(newEntry);
+
+ if (NULL == existingEntry) {
+ this->addToHead(newEntry);
+ return;
+ }
+
+ SkASSERT(this->isInList(existingEntry));
+ newEntry->fPrev = existingEntry;
+ T* next = existingEntry->fNext;
+ existingEntry->fNext = newEntry;
+ newEntry->fNext = next;
+ if (NULL == next) {
+ SkASSERT(fTail == existingEntry);
+ fTail = newEntry;
+ } else {
+ next->fPrev = newEntry;
+ }
+#ifdef SK_DEBUG
+ newEntry->fList = this;
+#endif
+ }
+
+ bool isEmpty() const {
+ return NULL == fHead && NULL == fTail;
+ }
+
+ T* head() { return fHead; }
+ T* tail() { return fTail; }
+
+ class Iter {
+ public:
+ enum IterStart {
+ kHead_IterStart,
+ kTail_IterStart
+ };
+
+ Iter() : fCurr(NULL) {}
+ Iter(const Iter& iter) : fCurr(iter.fCurr) {}
+ Iter& operator= (const Iter& iter) { fCurr = iter.fCurr; return *this; }
+
+ T* init(const SkTInternalLList& list, IterStart startLoc) {
+ if (kHead_IterStart == startLoc) {
+ fCurr = list.fHead;
+ } else {
+ SkASSERT(kTail_IterStart == startLoc);
+ fCurr = list.fTail;
+ }
+
+ return fCurr;
+ }
+
+ T* get() { return fCurr; }
+
+ /**
+ * Return the next/previous element in the list or NULL if at the end.
+ */
+ T* next() {
+ if (NULL == fCurr) {
+ return NULL;
+ }
+
+ fCurr = fCurr->fNext;
+ return fCurr;
+ }
+
+ T* prev() {
+ if (NULL == fCurr) {
+ return NULL;
+ }
+
+ fCurr = fCurr->fPrev;
+ return fCurr;
+ }
+
+ private:
+ T* fCurr;
+ };
+
+#ifdef SK_DEBUG
+ void validate() const {
+ SkASSERT(!fHead == !fTail);
+ Iter iter;
+ for (T* item = iter.init(*this, Iter::kHead_IterStart); item; item = iter.next()) {
+ SkASSERT(this->isInList(item));
+ if (NULL == item->fPrev) {
+ SkASSERT(fHead == item);
+ } else {
+ SkASSERT(item->fPrev->fNext == item);
+ }
+ if (NULL == item->fNext) {
+ SkASSERT(fTail == item);
+ } else {
+ SkASSERT(item->fNext->fPrev == item);
+ }
+ }
+ }
+
+ /**
+ * Debugging-only method that uses the list back pointer to check if 'entry' is indeed in 'this'
+ * list.
+ */
+ bool isInList(const T* entry) const {
+ return entry->fList == this;
+ }
+
+ /**
+ * Debugging-only method that laboriously counts the list entries.
+ */
+ int countEntries() const {
+ int count = 0;
+ for (T* entry = fHead; entry; entry = entry->fNext) {
+ ++count;
+ }
+ return count;
+ }
+#endif // SK_DEBUG
+
+private:
+ T* fHead;
+ T* fTail;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTLList.h b/gfx/skia/skia/src/core/SkTLList.h
new file mode 100644
index 000000000..58fa3f4aa
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTLList.h
@@ -0,0 +1,348 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTLList_DEFINED
+#define SkTLList_DEFINED
+
+#include "SkTInternalLList.h"
+#include "SkTypes.h"
+#include <utility>
+
+/** Doubly-linked list of objects. The objects' lifetimes are controlled by the list. I.e. the
+ the list creates the objects and they are deleted upon removal. This class block-allocates
+ space for entries based on a param passed to the constructor.
+
+ Elements of the list can be constructed in place using the following macros:
+ SkNEW_INSERT_IN_LLIST_BEFORE(list, location, type_name, args)
+ SkNEW_INSERT_IN_LLIST_AFTER(list, location, type_name, args)
+ where list is a SkTLList<type_name>*, location is an iterator, and args is the paren-surrounded
+ constructor arguments for type_name. These macros behave like addBefore() and addAfter().
+
+ allocCnt is the number of objects to allocate as a group. In the worst case fragmentation
+ each object is using the space required for allocCnt unfragmented objects.
+*/
+template <typename T, unsigned int N> class SkTLList : SkNoncopyable {
+private:
+ struct Block;
+ struct Node {
+ char fObj[sizeof(T)];
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(Node);
+ Block* fBlock; // owning block.
+ };
+ typedef SkTInternalLList<Node> NodeList;
+
+public:
+ class Iter;
+
+ // Having fCount initialized to -1 indicates that the first time we attempt to grab a free node
+ // all the nodes in the pre-allocated first block need to be inserted into the free list. This
+ // allows us to skip that loop in instances when the list is never populated.
+ SkTLList() : fCount(-1) {}
+
+ ~SkTLList() {
+ this->validate();
+ typename NodeList::Iter iter;
+ Node* node = iter.init(fList, Iter::kHead_IterStart);
+ while (node) {
+ SkTCast<T*>(node->fObj)->~T();
+ Block* block = node->fBlock;
+ node = iter.next();
+ if (0 == --block->fNodesInUse) {
+ for (unsigned int i = 0; i < N; ++i) {
+ block->fNodes[i].~Node();
+ }
+ if (block != &fFirstBlock) {
+ sk_free(block);
+ }
+ }
+ }
+ }
+
+ /** Adds a new element to the list at the head. */
+ template <typename... Args> T* addToHead(Args&&... args) {
+ this->validate();
+ Node* node = this->createNode();
+ fList.addToHead(node);
+ this->validate();
+ return new (node->fObj) T(std::forward<Args>(args)...);
+ }
+
+ /** Adds a new element to the list at the tail. */
+ template <typename... Args> T* addToTail(Args&&... args) {
+ this->validate();
+ Node* node = this->createNode();
+ fList.addToTail(node);
+ this->validate();
+ return new (node->fObj) T(std::forward<Args>(args)...);
+ }
+
+ /** Adds a new element to the list before the location indicated by the iterator. If the
+ iterator refers to a nullptr location then the new element is added at the tail */
+ template <typename... Args> T* addBefore(Iter location, Args&&... args) {
+ this->validate();
+ Node* node = this->createNode();
+ fList.addBefore(node, location.getNode());
+ this->validate();
+ return new (node->fObj) T(std::forward<Args>(args)...);
+ }
+
+ /** Adds a new element to the list after the location indicated by the iterator. If the
+ iterator refers to a nullptr location then the new element is added at the head */
+ template <typename... Args> T* addAfter(Iter location, Args&&... args) {
+ this->validate();
+ Node* node = this->createNode();
+ fList.addAfter(node, location.getNode());
+ this->validate();
+ return new (node->fObj) T(std::forward<Args>(args)...);
+ }
+
+ /** Convenience methods for getting an iterator initialized to the head/tail of the list. */
+ Iter headIter() const { return Iter(*this, Iter::kHead_IterStart); }
+ Iter tailIter() const { return Iter(*this, Iter::kTail_IterStart); }
+
+ T* head() { return Iter(*this, Iter::kHead_IterStart).get(); }
+ T* tail() { return Iter(*this, Iter::kTail_IterStart).get(); }
+ const T* head() const { return Iter(*this, Iter::kHead_IterStart).get(); }
+ const T* tail() const { return Iter(*this, Iter::kTail_IterStart).get(); }
+
+ void popHead() {
+ this->validate();
+ Node* node = fList.head();
+ if (node) {
+ this->removeNode(node);
+ }
+ this->validate();
+ }
+
+ void popTail() {
+ this->validate();
+ Node* node = fList.head();
+ if (node) {
+ this->removeNode(node);
+ }
+ this->validate();
+ }
+
+ void remove(T* t) {
+ this->validate();
+ Node* node = reinterpret_cast<Node*>(t);
+ SkASSERT(reinterpret_cast<T*>(node->fObj) == t);
+ this->removeNode(node);
+ this->validate();
+ }
+
+ void reset() {
+ this->validate();
+ Iter iter(*this, Iter::kHead_IterStart);
+ while (iter.get()) {
+ Iter next = iter;
+ next.next();
+ this->remove(iter.get());
+ iter = next;
+ }
+ SkASSERT(0 == fCount || -1 == fCount);
+ this->validate();
+ }
+
+ int count() const { return SkTMax(fCount ,0); }
+ bool isEmpty() const { this->validate(); return 0 == fCount || -1 == fCount; }
+
+ bool operator== (const SkTLList& list) const {
+ if (this == &list) {
+ return true;
+ }
+ // Call count() rather than use fCount because an empty list may have fCount = 0 or -1.
+ if (this->count() != list.count()) {
+ return false;
+ }
+ for (Iter a(*this, Iter::kHead_IterStart), b(list, Iter::kHead_IterStart);
+ a.get();
+ a.next(), b.next()) {
+ SkASSERT(b.get()); // already checked that counts match.
+ if (!(*a.get() == *b.get())) {
+ return false;
+ }
+ }
+ return true;
+ }
+ bool operator!= (const SkTLList& list) const { return !(*this == list); }
+
+ /** The iterator becomes invalid if the element it refers to is removed from the list. */
+ class Iter : private NodeList::Iter {
+ private:
+ typedef typename NodeList::Iter INHERITED;
+
+ public:
+ typedef typename INHERITED::IterStart IterStart;
+ //!< Start the iterator at the head of the list.
+ static const IterStart kHead_IterStart = INHERITED::kHead_IterStart;
+ //!< Start the iterator at the tail of the list.
+ static const IterStart kTail_IterStart = INHERITED::kTail_IterStart;
+
+ Iter() {}
+
+ Iter(const SkTLList& list, IterStart start = kHead_IterStart) {
+ INHERITED::init(list.fList, start);
+ }
+
+ T* init(const SkTLList& list, IterStart start = kHead_IterStart) {
+ return this->nodeToObj(INHERITED::init(list.fList, start));
+ }
+
+ T* get() { return this->nodeToObj(INHERITED::get()); }
+
+ T* next() { return this->nodeToObj(INHERITED::next()); }
+
+ T* prev() { return this->nodeToObj(INHERITED::prev()); }
+
+ Iter& operator= (const Iter& iter) { INHERITED::operator=(iter); return *this; }
+
+ private:
+ friend class SkTLList;
+ Node* getNode() { return INHERITED::get(); }
+
+ T* nodeToObj(Node* node) {
+ if (node) {
+ return reinterpret_cast<T*>(node->fObj);
+ } else {
+ return nullptr;
+ }
+ }
+ };
+
+private:
+ struct Block {
+ int fNodesInUse;
+ Node fNodes[N];
+ };
+
+ void delayedInit() {
+ SkASSERT(-1 == fCount);
+ fFirstBlock.fNodesInUse = 0;
+ for (unsigned int i = 0; i < N; ++i) {
+ fFreeList.addToHead(fFirstBlock.fNodes + i);
+ fFirstBlock.fNodes[i].fBlock = &fFirstBlock;
+ }
+ fCount = 0;
+ this->validate();
+ }
+
+ Node* createNode() {
+ if (-1 == fCount) {
+ this->delayedInit();
+ }
+ Node* node = fFreeList.head();
+ if (node) {
+ fFreeList.remove(node);
+ ++node->fBlock->fNodesInUse;
+ } else {
+ // Should not get here when count == 0 because we always have the preallocated first
+ // block.
+ SkASSERT(fCount > 0);
+ Block* block = reinterpret_cast<Block*>(sk_malloc_throw(sizeof(Block)));
+ node = &block->fNodes[0];
+ new (node) Node;
+ node->fBlock = block;
+ block->fNodesInUse = 1;
+ for (unsigned int i = 1; i < N; ++i) {
+ new (block->fNodes + i) Node;
+ fFreeList.addToHead(block->fNodes + i);
+ block->fNodes[i].fBlock = block;
+ }
+ }
+ ++fCount;
+ return node;
+ }
+
+ void removeNode(Node* node) {
+ SkASSERT(node);
+ fList.remove(node);
+ SkTCast<T*>(node->fObj)->~T();
+ Block* block = node->fBlock;
+ // Don't ever elease the first block, just add its nodes to the free list
+ if (0 == --block->fNodesInUse && block != &fFirstBlock) {
+ for (unsigned int i = 0; i < N; ++i) {
+ if (block->fNodes + i != node) {
+ fFreeList.remove(block->fNodes + i);
+ }
+ block->fNodes[i].~Node();
+ }
+ sk_free(block);
+ } else {
+ fFreeList.addToHead(node);
+ }
+ --fCount;
+ this->validate();
+ }
+
+ void validate() const {
+#ifdef SK_DEBUG
+ bool isEmpty = false;
+ if (-1 == fCount) {
+ // We should not yet have initialized the free list.
+ SkASSERT(fFreeList.isEmpty());
+ isEmpty = true;
+ } else if (0 == fCount) {
+ // Should only have the nodes from the first block in the free list.
+ SkASSERT(fFreeList.countEntries() == N);
+ isEmpty = true;
+ }
+ SkASSERT(isEmpty == fList.isEmpty());
+ fList.validate();
+ fFreeList.validate();
+ typename NodeList::Iter iter;
+ Node* freeNode = iter.init(fFreeList, Iter::kHead_IterStart);
+ while (freeNode) {
+ SkASSERT(fFreeList.isInList(freeNode));
+ Block* block = freeNode->fBlock;
+ // Only the first block is allowed to have all its nodes in the free list.
+ SkASSERT(block->fNodesInUse > 0 || block == &fFirstBlock);
+ SkASSERT((unsigned)block->fNodesInUse < N);
+ int activeCnt = 0;
+ int freeCnt = 0;
+ for (unsigned int i = 0; i < N; ++i) {
+ bool free = fFreeList.isInList(block->fNodes + i);
+ bool active = fList.isInList(block->fNodes + i);
+ SkASSERT(free != active);
+ activeCnt += active;
+ freeCnt += free;
+ }
+ SkASSERT(activeCnt == block->fNodesInUse);
+ freeNode = iter.next();
+ }
+
+ int count = 0;
+ Node* activeNode = iter.init(fList, Iter::kHead_IterStart);
+ while (activeNode) {
+ ++count;
+ SkASSERT(fList.isInList(activeNode));
+ Block* block = activeNode->fBlock;
+ SkASSERT(block->fNodesInUse > 0 && (unsigned)block->fNodesInUse <= N);
+
+ int activeCnt = 0;
+ int freeCnt = 0;
+ for (unsigned int i = 0; i < N; ++i) {
+ bool free = fFreeList.isInList(block->fNodes + i);
+ bool active = fList.isInList(block->fNodes + i);
+ SkASSERT(free != active);
+ activeCnt += active;
+ freeCnt += free;
+ }
+ SkASSERT(activeCnt == block->fNodesInUse);
+ activeNode = iter.next();
+ }
+ SkASSERT(count == fCount || (0 == count && -1 == fCount));
+#endif
+ }
+
+ NodeList fList;
+ NodeList fFreeList;
+ Block fFirstBlock;
+ int fCount;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTLS.cpp b/gfx/skia/skia/src/core/SkTLS.cpp
new file mode 100755
index 000000000..a47dc14b5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTLS.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTLS.h"
+
+// enable to help debug TLS storage
+//#define SK_TRACE_TLS_LIFETIME
+
+
+#ifdef SK_TRACE_TLS_LIFETIME
+ #include "SkAtomics.h"
+ static int32_t gTLSRecCount;
+#endif
+
+struct SkTLSRec {
+ SkTLSRec* fNext;
+ void* fData;
+ SkTLS::CreateProc fCreateProc;
+ SkTLS::DeleteProc fDeleteProc;
+
+#ifdef SK_TRACE_TLS_LIFETIME
+ SkTLSRec() {
+ int n = sk_atomic_inc(&gTLSRecCount);
+ SkDebugf(" SkTLSRec[%d]\n", n);
+ }
+#endif
+
+ ~SkTLSRec() {
+ if (fDeleteProc) {
+ fDeleteProc(fData);
+ }
+ // else we leak fData, or it will be managed by the caller
+
+#ifdef SK_TRACE_TLS_LIFETIME
+ int n = sk_atomic_dec(&gTLSRecCount);
+ SkDebugf("~SkTLSRec[%d]\n", n - 1);
+#endif
+ }
+};
+
+void SkTLS::Destructor(void* ptr) {
+#ifdef SK_TRACE_TLS_LIFETIME
+ SkDebugf("SkTLS::Destructor(%p)\n", ptr);
+#endif
+
+ SkTLSRec* rec = (SkTLSRec*)ptr;
+ do {
+ SkTLSRec* next = rec->fNext;
+ delete rec;
+ rec = next;
+ } while (rec);
+}
+
+void* SkTLS::Get(CreateProc createProc, DeleteProc deleteProc) {
+ if (nullptr == createProc) {
+ return nullptr;
+ }
+
+ void* ptr = SkTLS::PlatformGetSpecific(true);
+
+ if (ptr) {
+ const SkTLSRec* rec = (const SkTLSRec*)ptr;
+ do {
+ if (rec->fCreateProc == createProc) {
+ SkASSERT(rec->fDeleteProc == deleteProc);
+ return rec->fData;
+ }
+ } while ((rec = rec->fNext) != nullptr);
+ // not found, so create a new one
+ }
+
+ // add a new head of our change
+ SkTLSRec* rec = new SkTLSRec;
+ rec->fNext = (SkTLSRec*)ptr;
+
+ SkTLS::PlatformSetSpecific(rec);
+
+ rec->fData = createProc();
+ rec->fCreateProc = createProc;
+ rec->fDeleteProc = deleteProc;
+ return rec->fData;
+}
+
+void* SkTLS::Find(CreateProc createProc) {
+ if (nullptr == createProc) {
+ return nullptr;
+ }
+
+ void* ptr = SkTLS::PlatformGetSpecific(false);
+
+ if (ptr) {
+ const SkTLSRec* rec = (const SkTLSRec*)ptr;
+ do {
+ if (rec->fCreateProc == createProc) {
+ return rec->fData;
+ }
+ } while ((rec = rec->fNext) != nullptr);
+ }
+ return nullptr;
+}
+
+void SkTLS::Delete(CreateProc createProc) {
+ if (nullptr == createProc) {
+ return;
+ }
+
+ void* ptr = SkTLS::PlatformGetSpecific(false);
+
+ SkTLSRec* curr = (SkTLSRec*)ptr;
+ SkTLSRec* prev = nullptr;
+ while (curr) {
+ SkTLSRec* next = curr->fNext;
+ if (curr->fCreateProc == createProc) {
+ if (prev) {
+ prev->fNext = next;
+ } else {
+ // we have a new head of our chain
+ SkTLS::PlatformSetSpecific(next);
+ }
+ delete curr;
+ break;
+ }
+ prev = curr;
+ curr = next;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkTLS.h b/gfx/skia/skia/src/core/SkTLS.h
new file mode 100644
index 000000000..ace2de59e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTLS.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTLS_DEFINED
+#define SkTLS_DEFINED
+
+#include "SkTypes.h"
+
+/**
+ * Maintains a per-thread cache, using a CreateProc as the key into that cache.
+ */
+class SkTLS {
+public:
+ typedef void* (*CreateProc)();
+ typedef void (*DeleteProc)(void*);
+
+ /**
+ * If Get() has previously been called with this CreateProc, then this
+ * returns its cached data, otherwise it returns nullptr. The CreateProc is
+ * never invoked in Find, it is only used as a key for searching the
+ * cache.
+ */
+ static void* Find(CreateProc);
+
+ /**
+ * Return the cached data that was returned by the CreateProc. This proc
+ * is only called the first time Get is called, and there after it is
+ * cached (per-thread), using the CreateProc as a key to look it up.
+ *
+ * When this thread, or Delete is called, the cached data is removed, and
+ * if a DeleteProc was specified, it is passed the pointer to the cached
+ * data.
+ */
+ static void* Get(CreateProc, DeleteProc);
+
+ /**
+ * Remove (optionally calling the DeleteProc if it was specificed in Get)
+ * the cached data associated with this CreateProc. If no associated cached
+ * data is found, do nothing.
+ */
+ static void Delete(CreateProc);
+
+private:
+ // Our implementation requires only 1 TLS slot, as we manage multiple values
+ // ourselves in a list, with the platform specific value as our head.
+
+ /**
+ * Implemented by the platform, to return the value of our (one) slot per-thread
+ *
+ * If forceCreateTheSlot is true, then we must have created the "slot" for
+ * our TLS, even though we know that the return value will be nullptr in that
+ * case (i.e. no-slot and first-time-slot both return nullptr). This ensures
+ * that after calling GetSpecific, we know that we can legally call
+ * SetSpecific.
+ *
+ * If forceCreateTheSlot is false, then the impl can either create the
+ * slot or not.
+ */
+ static void* PlatformGetSpecific(bool forceCreateTheSlot);
+
+ /**
+ * Implemented by the platform, to set the value for our (one) slot per-thread
+ *
+ * The implementation can rely on GetSpecific(true) having been previously
+ * called before SetSpecific is called.
+ */
+ static void PlatformSetSpecific(void*);
+
+public:
+ /**
+ * Will delete our internal list. To be called by the platform if/when its
+ * TLS slot is deleted (often at thread shutdown).
+ *
+ * Public *only* for the platform's use, not to be called by a client.
+ */
+ static void Destructor(void* ptr);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTMultiMap.h b/gfx/skia/skia/src/core/SkTMultiMap.h
new file mode 100644
index 000000000..dc521debc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTMultiMap.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTMultiMap_DEFINED
+#define SkTMultiMap_DEFINED
+
+#include "GrTypes.h"
+#include "SkTDynamicHash.h"
+
+/** A set that contains pointers to instances of T. Instances can be looked up with key Key.
+ * Multiple (possibly same) values can have the same key.
+ */
+template <typename T,
+ typename Key,
+ typename HashTraits=T>
+class SkTMultiMap {
+ struct ValueList {
+ explicit ValueList(T* value) : fValue(value), fNext(nullptr) {}
+
+ static const Key& GetKey(const ValueList& e) { return HashTraits::GetKey(*e.fValue); }
+ static uint32_t Hash(const Key& key) { return HashTraits::Hash(key); }
+ T* fValue;
+ ValueList* fNext;
+ };
+public:
+ SkTMultiMap() : fCount(0) {}
+
+ ~SkTMultiMap() {
+ SkASSERT(fCount == 0);
+ SkASSERT(fHash.count() == 0);
+ }
+
+ void insert(const Key& key, T* value) {
+ ValueList* list = fHash.find(key);
+ if (list) {
+ // The new ValueList entry is inserted as the second element in the
+ // linked list, and it will contain the value of the first element.
+ ValueList* newEntry = new ValueList(list->fValue);
+ newEntry->fNext = list->fNext;
+ // The existing first ValueList entry is updated to contain the
+ // inserted value.
+ list->fNext = newEntry;
+ list->fValue = value;
+ } else {
+ fHash.add(new ValueList(value));
+ }
+
+ ++fCount;
+ }
+
+ void remove(const Key& key, const T* value) {
+ ValueList* list = fHash.find(key);
+ // Since we expect the caller to be fully aware of what is stored, just
+ // assert that the caller removes an existing value.
+ SkASSERT(list);
+ ValueList* prev = nullptr;
+ while (list->fValue != value) {
+ prev = list;
+ list = list->fNext;
+ }
+
+ if (list->fNext) {
+ ValueList* next = list->fNext;
+ list->fValue = next->fValue;
+ list->fNext = next->fNext;
+ delete next;
+ } else if (prev) {
+ prev->fNext = nullptr;
+ delete list;
+ } else {
+ fHash.remove(key);
+ delete list;
+ }
+
+ --fCount;
+ }
+
+ T* find(const Key& key) const {
+ ValueList* list = fHash.find(key);
+ if (list) {
+ return list->fValue;
+ }
+ return nullptr;
+ }
+
+ template<class FindPredicate>
+ T* find(const Key& key, const FindPredicate f) {
+ ValueList* list = fHash.find(key);
+ while (list) {
+ if (f(list->fValue)){
+ return list->fValue;
+ }
+ list = list->fNext;
+ }
+ return nullptr;
+ }
+
+ int count() const { return fCount; }
+
+#ifdef SK_DEBUG
+ class ConstIter {
+ public:
+ explicit ConstIter(const SkTMultiMap* mmap)
+ : fIter(&(mmap->fHash))
+ , fList(nullptr) {
+ if (!fIter.done()) {
+ fList = &(*fIter);
+ }
+ }
+
+ bool done() const {
+ return fIter.done();
+ }
+
+ const T* operator*() {
+ SkASSERT(fList);
+ return fList->fValue;
+ }
+
+ void operator++() {
+ if (fList) {
+ fList = fList->fNext;
+ }
+ if (!fList) {
+ ++fIter;
+ if (!fIter.done()) {
+ fList = &(*fIter);
+ }
+ }
+ }
+
+ private:
+ typename SkTDynamicHash<ValueList, Key>::ConstIter fIter;
+ const ValueList* fList;
+ };
+
+ bool has(const T* value, const Key& key) const {
+ for (ValueList* list = fHash.find(key); list; list = list->fNext) {
+ if (list->fValue == value) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // This is not particularly fast and only used for validation, so debug only.
+ int countForKey(const Key& key) const {
+ int count = 0;
+ ValueList* list = fHash.find(key);
+ while (list) {
+ list = list->fNext;
+ ++count;
+ }
+ return count;
+ }
+#endif
+
+private:
+ SkTDynamicHash<ValueList, Key> fHash;
+ int fCount;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTSearch.cpp b/gfx/skia/skia/src/core/SkTSearch.cpp
new file mode 100644
index 000000000..9ff977763
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTSearch.cpp
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkTSearch.h"
+#include <ctype.h>
+
+static inline const char* index_into_base(const char*const* base, int index,
+ size_t elemSize)
+{
+ return *(const char*const*)((const char*)base + index * elemSize);
+}
+
+int SkStrSearch(const char*const* base, int count, const char target[],
+ size_t target_len, size_t elemSize)
+{
+ if (count <= 0)
+ return ~0;
+
+ SkASSERT(base != nullptr);
+
+ int lo = 0;
+ int hi = count - 1;
+
+ while (lo < hi)
+ {
+ int mid = (hi + lo) >> 1;
+ const char* elem = index_into_base(base, mid, elemSize);
+
+ int cmp = strncmp(elem, target, target_len);
+ if (cmp < 0)
+ lo = mid + 1;
+ else if (cmp > 0 || strlen(elem) > target_len)
+ hi = mid;
+ else
+ return mid;
+ }
+
+ const char* elem = index_into_base(base, hi, elemSize);
+ int cmp = strncmp(elem, target, target_len);
+ if (cmp || strlen(elem) > target_len)
+ {
+ if (cmp < 0)
+ hi += 1;
+ hi = ~hi;
+ }
+ return hi;
+}
+
+int SkStrSearch(const char*const* base, int count, const char target[],
+ size_t elemSize)
+{
+ return SkStrSearch(base, count, target, strlen(target), elemSize);
+}
+
+int SkStrLCSearch(const char*const* base, int count, const char target[],
+ size_t len, size_t elemSize)
+{
+ SkASSERT(target);
+
+ SkAutoAsciiToLC tolc(target, len);
+
+ return SkStrSearch(base, count, tolc.lc(), len, elemSize);
+}
+
+int SkStrLCSearch(const char*const* base, int count, const char target[],
+ size_t elemSize)
+{
+ return SkStrLCSearch(base, count, target, strlen(target), elemSize);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+SkAutoAsciiToLC::SkAutoAsciiToLC(const char str[], size_t len)
+{
+ // see if we need to compute the length
+ if ((long)len < 0) {
+ len = strlen(str);
+ }
+ fLength = len;
+
+ // assign lc to our preallocated storage if len is small enough, or allocate
+ // it on the heap
+ char* lc;
+ if (len <= STORAGE) {
+ lc = fStorage;
+ } else {
+ lc = (char*)sk_malloc_throw(len + 1);
+ }
+ fLC = lc;
+
+ // convert any asii to lower-case. we let non-ascii (utf8) chars pass
+ // through unchanged
+ for (int i = (int)(len - 1); i >= 0; --i) {
+ int c = str[i];
+ if ((c & 0x80) == 0) { // is just ascii
+ c = tolower(c);
+ }
+ lc[i] = c;
+ }
+ lc[len] = 0;
+}
+
+SkAutoAsciiToLC::~SkAutoAsciiToLC()
+{
+ if (fLC != fStorage) {
+ sk_free(fLC);
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkTSort.h b/gfx/skia/skia/src/core/SkTSort.h
new file mode 100644
index 000000000..7101bab9b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTSort.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTSort_DEFINED
+#define SkTSort_DEFINED
+
+#include "SkTypes.h"
+#include "SkMathPriv.h"
+
+/* A comparison functor which performs the comparison 'a < b'. */
+template <typename T> struct SkTCompareLT {
+ bool operator()(const T a, const T b) const { return a < b; }
+};
+
+/* A comparison functor which performs the comparison '*a < *b'. */
+template <typename T> struct SkTPointerCompareLT {
+ bool operator()(const T* a, const T* b) const { return *a < *b; }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* Sifts a broken heap. The input array is a heap from root to bottom
+ * except that the root entry may be out of place.
+ *
+ * Sinks a hole from array[root] to leaf and then sifts the original array[root] element
+ * from the leaf level up.
+ *
+ * This version does extra work, in that it copies child to parent on the way down,
+ * then copies parent to child on the way back up. When copies are inexpensive,
+ * this is an optimization as this sift variant should only be used when
+ * the potentially out of place root entry value is expected to be small.
+ *
+ * @param root the one based index into array of the out-of-place root of the heap.
+ * @param bottom the one based index in the array of the last entry in the heap.
+ */
+template <typename T, typename C>
+void SkTHeapSort_SiftUp(T array[], size_t root, size_t bottom, C lessThan) {
+ T x = array[root-1];
+ size_t start = root;
+ size_t j = root << 1;
+ while (j <= bottom) {
+ if (j < bottom && lessThan(array[j-1], array[j])) {
+ ++j;
+ }
+ array[root-1] = array[j-1];
+ root = j;
+ j = root << 1;
+ }
+ j = root >> 1;
+ while (j >= start) {
+ if (lessThan(array[j-1], x)) {
+ array[root-1] = array[j-1];
+ root = j;
+ j = root >> 1;
+ } else {
+ break;
+ }
+ }
+ array[root-1] = x;
+}
+
+/* Sifts a broken heap. The input array is a heap from root to bottom
+ * except that the root entry may be out of place.
+ *
+ * Sifts the array[root] element from the root down.
+ *
+ * @param root the one based index into array of the out-of-place root of the heap.
+ * @param bottom the one based index in the array of the last entry in the heap.
+ */
+template <typename T, typename C>
+void SkTHeapSort_SiftDown(T array[], size_t root, size_t bottom, C lessThan) {
+ T x = array[root-1];
+ size_t child = root << 1;
+ while (child <= bottom) {
+ if (child < bottom && lessThan(array[child-1], array[child])) {
+ ++child;
+ }
+ if (lessThan(x, array[child-1])) {
+ array[root-1] = array[child-1];
+ root = child;
+ child = root << 1;
+ } else {
+ break;
+ }
+ }
+ array[root-1] = x;
+}
+
+/** Sorts the array of size count using comparator lessThan using a Heap Sort algorithm. Be sure to
+ * specialize SkTSwap if T has an efficient swap operation.
+ *
+ * @param array the array to be sorted.
+ * @param count the number of elements in the array.
+ * @param lessThan a functor with bool operator()(T a, T b) which returns true if a comes before b.
+ */
+template <typename T, typename C> void SkTHeapSort(T array[], size_t count, C lessThan) {
+ for (size_t i = count >> 1; i > 0; --i) {
+ SkTHeapSort_SiftDown(array, i, count, lessThan);
+ }
+
+ for (size_t i = count - 1; i > 0; --i) {
+ SkTSwap<T>(array[0], array[i]);
+ SkTHeapSort_SiftUp(array, 1, i, lessThan);
+ }
+}
+
+/** Sorts the array of size count using comparator '<' using a Heap Sort algorithm. */
+template <typename T> void SkTHeapSort(T array[], size_t count) {
+ SkTHeapSort(array, count, SkTCompareLT<T>());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** Sorts the array of size count using comparator lessThan using an Insertion Sort algorithm. */
+template <typename T, typename C> static void SkTInsertionSort(T* left, T* right, C lessThan) {
+ for (T* next = left + 1; next <= right; ++next) {
+ T insert = *next;
+ T* hole = next;
+ while (left < hole && lessThan(insert, *(hole - 1))) {
+ *hole = *(hole - 1);
+ --hole;
+ }
+ *hole = insert;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+template <typename T, typename C>
+static T* SkTQSort_Partition(T* left, T* right, T* pivot, C lessThan) {
+ T pivotValue = *pivot;
+ SkTSwap(*pivot, *right);
+ T* newPivot = left;
+ while (left < right) {
+ if (lessThan(*left, pivotValue)) {
+ SkTSwap(*left, *newPivot);
+ newPivot += 1;
+ }
+ left += 1;
+ }
+ SkTSwap(*newPivot, *right);
+ return newPivot;
+}
+
+/* Intro Sort is a modified Quick Sort.
+ * When the region to be sorted is a small constant size it uses Insertion Sort.
+ * When depth becomes zero, it switches over to Heap Sort.
+ * This implementation recurses on the left region after pivoting and loops on the right,
+ * we already limit the stack depth by switching to heap sort,
+ * and cache locality on the data appears more important than saving a few stack frames.
+ *
+ * @param depth at this recursion depth, switch to Heap Sort.
+ * @param left the beginning of the region to be sorted.
+ * @param right the end of the region to be sorted (inclusive).
+ * @param lessThan a functor with bool operator()(T a, T b) which returns true if a comes before b.
+ */
+template <typename T, typename C> void SkTIntroSort(int depth, T* left, T* right, C lessThan) {
+ while (true) {
+ if (right - left < 32) {
+ SkTInsertionSort(left, right, lessThan);
+ return;
+ }
+
+ if (depth == 0) {
+ SkTHeapSort<T>(left, right - left + 1, lessThan);
+ return;
+ }
+ --depth;
+
+ T* pivot = left + ((right - left) >> 1);
+ pivot = SkTQSort_Partition(left, right, pivot, lessThan);
+
+ SkTIntroSort(depth, left, pivot - 1, lessThan);
+ left = pivot + 1;
+ }
+}
+
+/** Sorts the region from left to right using comparator lessThan using a Quick Sort algorithm. Be
+ * sure to specialize SkTSwap if T has an efficient swap operation.
+ *
+ * @param left the beginning of the region to be sorted.
+ * @param right the end of the region to be sorted (inclusive).
+ * @param lessThan a functor with bool operator()(T a, T b) which returns true if a comes before b.
+ */
+template <typename T, typename C> void SkTQSort(T* left, T* right, C lessThan) {
+ if (left >= right) {
+ return;
+ }
+ // Limit Intro Sort recursion depth to no more than 2 * ceil(log2(n)).
+ int depth = 2 * SkNextLog2(SkToU32(right - left));
+ SkTIntroSort(depth, left, right, lessThan);
+}
+
+/** Sorts the region from left to right using comparator '<' using a Quick Sort algorithm. */
+template <typename T> void SkTQSort(T* left, T* right) {
+ SkTQSort(left, right, SkTCompareLT<T>());
+}
+
+/** Sorts the region from left to right using comparator '* < *' using a Quick Sort algorithm. */
+template <typename T> void SkTQSort(T** left, T** right) {
+ SkTQSort(left, right, SkTPointerCompareLT<T>());
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTTopoSort.h b/gfx/skia/skia/src/core/SkTTopoSort.h
new file mode 100644
index 000000000..35b85eeb8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTTopoSort.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTTopoSort_DEFINED
+#define SkTTopoSort_DEFINED
+
+#include "SkTDArray.h"
+
+#ifdef SK_DEBUG
+template <typename T, typename Traits = T>
+void SkTTopoSort_CheckAllUnmarked(const SkTDArray<T*>& graph) {
+ for (int i = 0; i < graph.count(); ++i) {
+ SkASSERT(!Traits::IsTempMarked(graph[i]));
+ SkASSERT(!Traits::WasOutput(graph[i]));
+ }
+}
+
+template <typename T, typename Traits = T>
+void SkTTopoSort_CleanExit(const SkTDArray<T*>& graph) {
+ for (int i = 0; i < graph.count(); ++i) {
+ SkASSERT(!Traits::IsTempMarked(graph[i]));
+ SkASSERT(Traits::WasOutput(graph[i]));
+ }
+}
+#endif
+
+// Recursively visit a node and all the other nodes it depends on.
+// Return false if there is a loop.
+template <typename T, typename Traits = T>
+bool SkTTopoSort_Visit(T* node, SkTDArray<T*>* result) {
+ if (Traits::IsTempMarked(node)) {
+ // There is a loop.
+ return false;
+ }
+
+ // If the node under consideration has been already been output it means it
+ // (and all the nodes it depends on) are already in 'result'.
+ if (!Traits::WasOutput(node)) {
+ // This node hasn't been output yet. Recursively assess all the
+ // nodes it depends on outputing them first.
+ Traits::SetTempMark(node);
+ for (int i = 0; i < Traits::NumDependencies(node); ++i) {
+ if (!SkTTopoSort_Visit<T, Traits>(Traits::Dependency(node, i), result)) {
+ return false;
+ }
+ }
+ Traits::Output(node, result->count()); // mark this node as output
+ Traits::ResetTempMark(node);
+
+ *result->append() = node;
+ }
+
+ return true;
+}
+
+// Topologically sort the nodes in 'graph'. For this sort, when node 'i' depends
+// on node 'j' it means node 'j' must appear in the result before node 'i'.
+// A false return value means there was a loop and the contents of 'graph' will
+// be in some arbitrary state.
+//
+// Traits requires:
+// static void Output(T* t, int index) { ... } // 'index' is 't's position in the result
+// static bool WasOutput(const T* t) { ... }
+//
+// static void SetTempMark(T* t) { ... } // transiently used during toposort
+// static void ResetTempMark(T* t) { ... }
+// static bool IsTempMarked(const T* t) { ... }
+//
+// static int NumDependencies(const T* t) { ... } // 't' will be output after all the other -
+// static T* Dependency(T* t, int index) { ... } // nodes on which it depends
+// We'll look on T for these by default, or you can pass a custom Traits type.
+//
+// TODO: potentially add a version that takes a seed node and just outputs that
+// node and all the nodes on which it depends. This could be used to partially
+// flush a drawTarget DAG.
+template <typename T, typename Traits = T>
+bool SkTTopoSort(SkTDArray<T*>* graph) {
+ SkTDArray<T*> result;
+
+#ifdef SK_DEBUG
+ SkTTopoSort_CheckAllUnmarked<T, Traits>(*graph);
+#endif
+
+ result.setReserve(graph->count());
+
+ for (int i = 0; i < graph->count(); ++i) {
+ if (Traits::WasOutput((*graph)[i])) {
+ // This node was depended on by some earlier node and has already
+ // been output
+ continue;
+ }
+
+ // Output this node after all the nodes it depends on have been output.
+ if (!SkTTopoSort_Visit<T, Traits>((*graph)[i], &result)) {
+ return false;
+ }
+ }
+
+ SkASSERT(graph->count() == result.count());
+ graph->swap(result);
+
+#ifdef SK_DEBUG
+ SkTTopoSort_CleanExit<T, Traits>(*graph);
+#endif
+ return true;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTaskGroup.cpp b/gfx/skia/skia/src/core/SkTaskGroup.cpp
new file mode 100644
index 000000000..d151510cf
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTaskGroup.cpp
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkLeanWindows.h"
+#include "SkOnce.h"
+#include "SkSemaphore.h"
+#include "SkSpinlock.h"
+#include "SkTArray.h"
+#include "SkTDArray.h"
+#include "SkTaskGroup.h"
+#include "SkThreadUtils.h"
+
+#if defined(SK_BUILD_FOR_WIN32)
+ static void query_num_cores(int* cores) {
+ SYSTEM_INFO sysinfo;
+ GetNativeSystemInfo(&sysinfo);
+ *cores = sysinfo.dwNumberOfProcessors;
+ }
+#else
+ #include <unistd.h>
+ static void query_num_cores(int* cores) {
+ *cores = (int)sysconf(_SC_NPROCESSORS_ONLN);
+ }
+#endif
+
+static int num_cores() {
+ // We cache num_cores() so we only query the OS once.
+ static int cores = 0;
+ static SkOnce once;
+ once(query_num_cores, &cores);
+ SkASSERT(cores > 0);
+ return cores;
+}
+
+namespace {
+
+class ThreadPool : SkNoncopyable {
+public:
+ static void Add(std::function<void(void)> fn, SkAtomic<int32_t>* pending) {
+ if (!gGlobal) {
+ return fn();
+ }
+ gGlobal->add(fn, pending);
+ }
+
+ static void Batch(int N, std::function<void(int)> fn, SkAtomic<int32_t>* pending) {
+ if (!gGlobal) {
+ for (int i = 0; i < N; i++) { fn(i); }
+ return;
+ }
+ gGlobal->batch(N, fn, pending);
+ }
+
+ static void Wait(SkAtomic<int32_t>* pending) {
+ if (!gGlobal) { // If we have no threads, the work must already be done.
+ SkASSERT(pending->load(sk_memory_order_relaxed) == 0);
+ return;
+ }
+ // Acquire pairs with decrement release here or in Loop.
+ while (pending->load(sk_memory_order_acquire) > 0) {
+ // Lend a hand until our SkTaskGroup of interest is done.
+ Work work;
+ {
+ // We're stealing work opportunistically,
+ // so we never call fWorkAvailable.wait(), which could sleep us if there's no work.
+ // This means fWorkAvailable is only an upper bound on fWork.count().
+ AutoLock lock(&gGlobal->fWorkLock);
+ if (gGlobal->fWork.empty()) {
+ // Someone has picked up all the work (including ours). How nice of them!
+ // (They may still be working on it, so we can't assert *pending == 0 here.)
+ continue;
+ }
+ work = gGlobal->fWork.back();
+ gGlobal->fWork.pop_back();
+ }
+ // This Work isn't necessarily part of our SkTaskGroup of interest, but that's fine.
+ // We threads gotta stick together. We're always making forward progress.
+ work.fn();
+ work.pending->fetch_add(-1, sk_memory_order_release); // Pairs with load above.
+ }
+ }
+
+private:
+ struct AutoLock {
+ AutoLock(SkSpinlock* lock) : fLock(lock) { fLock->acquire(); }
+ ~AutoLock() { fLock->release(); }
+ private:
+ SkSpinlock* fLock;
+ };
+
+ struct Work {
+ std::function<void(void)> fn; // A function to call
+ SkAtomic<int32_t>* pending; // then decrement pending afterwards.
+ };
+
+ explicit ThreadPool(int threads) {
+ if (threads == -1) {
+ threads = num_cores();
+ }
+ for (int i = 0; i < threads; i++) {
+ fThreads.push(new SkThread(&ThreadPool::Loop, this));
+ fThreads.top()->start();
+ }
+ }
+
+ ~ThreadPool() {
+ SkASSERT(fWork.empty()); // All SkTaskGroups should be destroyed by now.
+
+ // Send a poison pill to each thread.
+ SkAtomic<int> dummy(0);
+ for (int i = 0; i < fThreads.count(); i++) {
+ this->add(nullptr, &dummy);
+ }
+ // Wait for them all to swallow the pill and die.
+ for (int i = 0; i < fThreads.count(); i++) {
+ fThreads[i]->join();
+ }
+ SkASSERT(fWork.empty()); // Can't hurt to double check.
+ fThreads.deleteAll();
+ }
+
+ void add(std::function<void(void)> fn, SkAtomic<int32_t>* pending) {
+ Work work = { fn, pending };
+ pending->fetch_add(+1, sk_memory_order_relaxed); // No barrier needed.
+ {
+ AutoLock lock(&fWorkLock);
+ fWork.push_back(work);
+ }
+ fWorkAvailable.signal(1);
+ }
+
+ void batch(int N, std::function<void(int)> fn, SkAtomic<int32_t>* pending) {
+ pending->fetch_add(+N, sk_memory_order_relaxed); // No barrier needed.
+ {
+ AutoLock lock(&fWorkLock);
+ for (int i = 0; i < N; i++) {
+ Work work = { [i, fn]() { fn(i); }, pending };
+ fWork.push_back(work);
+ }
+ }
+ fWorkAvailable.signal(N);
+ }
+
+ static void Loop(void* arg) {
+ ThreadPool* pool = (ThreadPool*)arg;
+ Work work;
+ while (true) {
+ // Sleep until there's work available, and claim one unit of Work as we wake.
+ pool->fWorkAvailable.wait();
+ {
+ AutoLock lock(&pool->fWorkLock);
+ if (pool->fWork.empty()) {
+ // Someone in Wait() stole our work (fWorkAvailable is an upper bound).
+ // Well, that's fine, back to sleep for us.
+ continue;
+ }
+ work = pool->fWork.back();
+ pool->fWork.pop_back();
+ }
+ if (!work.fn) {
+ return; // Poison pill. Time... to die.
+ }
+ work.fn();
+ work.pending->fetch_add(-1, sk_memory_order_release); // Pairs with load in Wait().
+ }
+ }
+
+ // fWorkLock must be held when reading or modifying fWork.
+ SkSpinlock fWorkLock;
+ SkTArray<Work> fWork;
+
+ // A thread-safe upper bound for fWork.count().
+ //
+ // We'd have it be an exact count but for the loop in Wait():
+ // we never want that to block, so it can't call fWorkAvailable.wait(),
+ // and that's the only way to decrement fWorkAvailable.
+ // So fWorkAvailable may overcount actual the work available.
+ // We make do, but this means some worker threads may wake spuriously.
+ SkSemaphore fWorkAvailable;
+
+ // These are only changed in a single-threaded context.
+ SkTDArray<SkThread*> fThreads;
+ static ThreadPool* gGlobal;
+
+ friend struct SkTaskGroup::Enabler;
+};
+ThreadPool* ThreadPool::gGlobal = nullptr;
+
+} // namespace
+
+SkTaskGroup::Enabler::Enabler(int threads) {
+ SkASSERT(ThreadPool::gGlobal == nullptr);
+ if (threads != 0) {
+ ThreadPool::gGlobal = new ThreadPool(threads);
+ }
+}
+
+SkTaskGroup::Enabler::~Enabler() { delete ThreadPool::gGlobal; }
+
+SkTaskGroup::SkTaskGroup() : fPending(0) {}
+
+void SkTaskGroup::wait() { ThreadPool::Wait(&fPending); }
+void SkTaskGroup::add(std::function<void(void)> fn) { ThreadPool::Add(fn, &fPending); }
+void SkTaskGroup::batch(int N, std::function<void(int)> fn) {
+ ThreadPool::Batch(N, fn, &fPending);
+}
diff --git a/gfx/skia/skia/src/core/SkTaskGroup.h b/gfx/skia/skia/src/core/SkTaskGroup.h
new file mode 100644
index 000000000..0f793f31e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTaskGroup.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTaskGroup_DEFINED
+#define SkTaskGroup_DEFINED
+
+#include <functional>
+
+#include "SkTypes.h"
+#include "SkAtomics.h"
+#include "SkTemplates.h"
+
+class SkTaskGroup : SkNoncopyable {
+public:
+ // Create one of these in main() to enable SkTaskGroups globally.
+ struct Enabler : SkNoncopyable {
+ explicit Enabler(int threads = -1); // Default is system-reported core count.
+ ~Enabler();
+ };
+
+ SkTaskGroup();
+ ~SkTaskGroup() { this->wait(); }
+
+ // Add a task to this SkTaskGroup. It will likely run on another thread.
+ void add(std::function<void(void)> fn);
+
+ // Add a batch of N tasks, all calling fn with different arguments.
+ void batch(int N, std::function<void(int)> fn);
+
+ // Block until all Tasks previously add()ed to this SkTaskGroup have run.
+ // You may safely reuse this SkTaskGroup after wait() returns.
+ void wait();
+
+private:
+ SkAtomic<int32_t> fPending;
+};
+
+#endif//SkTaskGroup_DEFINED
diff --git a/gfx/skia/skia/src/core/SkTextBlob.cpp b/gfx/skia/skia/src/core/SkTextBlob.cpp
new file mode 100644
index 000000000..ca75a2e03
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTextBlob.cpp
@@ -0,0 +1,779 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTextBlobRunIterator.h"
+
+#include "SkReadBuffer.h"
+#include "SkTypeface.h"
+#include "SkWriteBuffer.h"
+
+namespace {
+
+// TODO(fmalita): replace with SkFont.
+class RunFont : SkNoncopyable {
+public:
+ RunFont(const SkPaint& paint)
+ : fSize(paint.getTextSize())
+ , fScaleX(paint.getTextScaleX())
+ , fTypeface(SkSafeRef(paint.getTypeface()))
+ , fSkewX(paint.getTextSkewX())
+ , fAlign(paint.getTextAlign())
+ , fHinting(paint.getHinting())
+ , fFlags(paint.getFlags() & kFlagsMask) { }
+
+ void applyToPaint(SkPaint* paint) const {
+ paint->setTextEncoding(SkPaint::kGlyphID_TextEncoding);
+ paint->setTypeface(fTypeface);
+ paint->setTextSize(fSize);
+ paint->setTextScaleX(fScaleX);
+ paint->setTextSkewX(fSkewX);
+ paint->setTextAlign(static_cast<SkPaint::Align>(fAlign));
+ paint->setHinting(static_cast<SkPaint::Hinting>(fHinting));
+
+ paint->setFlags((paint->getFlags() & ~kFlagsMask) | fFlags);
+ }
+
+ bool operator==(const RunFont& other) const {
+ return fTypeface == other.fTypeface
+ && fSize == other.fSize
+ && fScaleX == other.fScaleX
+ && fSkewX == other.fSkewX
+ && fAlign == other.fAlign
+ && fHinting == other.fHinting
+ && fFlags == other.fFlags;
+ }
+
+ bool operator!=(const RunFont& other) const {
+ return !(*this == other);
+ }
+
+ uint32_t flags() const { return fFlags; }
+
+private:
+ const static uint32_t kFlagsMask =
+ SkPaint::kAntiAlias_Flag |
+ SkPaint::kUnderlineText_Flag |
+ SkPaint::kStrikeThruText_Flag |
+ SkPaint::kFakeBoldText_Flag |
+ SkPaint::kLinearText_Flag |
+ SkPaint::kSubpixelText_Flag |
+ SkPaint::kDevKernText_Flag |
+ SkPaint::kLCDRenderText_Flag |
+ SkPaint::kEmbeddedBitmapText_Flag |
+ SkPaint::kAutoHinting_Flag |
+ SkPaint::kVerticalText_Flag |
+ SkPaint::kGenA8FromLCD_Flag;
+
+ SkScalar fSize;
+ SkScalar fScaleX;
+
+ // Keep this SkAutoTUnref off the first position, to avoid interfering with SkNoncopyable
+ // empty baseclass optimization (http://code.google.com/p/skia/issues/detail?id=3694).
+ sk_sp<SkTypeface> fTypeface;
+ SkScalar fSkewX;
+
+ static_assert(SkPaint::kAlignCount < 4, "insufficient_align_bits");
+ uint32_t fAlign : 2;
+ static_assert(SkPaint::kFull_Hinting < 4, "insufficient_hinting_bits");
+ uint32_t fHinting : 2;
+ static_assert((kFlagsMask & 0xffff) == kFlagsMask, "insufficient_flags_bits");
+ uint32_t fFlags : 16;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+struct RunFontStorageEquivalent {
+ SkScalar fSize, fScaleX;
+ void* fTypeface;
+ SkScalar fSkewX;
+ uint32_t fFlags;
+};
+static_assert(sizeof(RunFont) == sizeof(RunFontStorageEquivalent), "runfont_should_stay_packed");
+
+} // anonymous namespace
+
+//
+// Textblob data is laid out into externally-managed storage as follows:
+//
+// -----------------------------------------------------------------------------
+// | SkTextBlob | RunRecord | Glyphs[] | Pos[] | RunRecord | Glyphs[] | Pos[] | ...
+// -----------------------------------------------------------------------------
+//
+// Each run record describes a text blob run, and can be used to determine the (implicit)
+// location of the following record.
+//
+// Extended Textblob runs have more data after the Pos[] array:
+//
+// -------------------------------------------------------------------------
+// ... | RunRecord | Glyphs[] | Pos[] | TextSize | Clusters[] | Text[] | ...
+// -------------------------------------------------------------------------
+//
+// To determine the length of the extended run data, the TextSize must be read.
+//
+// Extended Textblob runs may be mixed with non-extended runs.
+
+SkDEBUGCODE(static const unsigned kRunRecordMagic = 0xb10bcafe;)
+
+namespace {
+struct RunRecordStorageEquivalent {
+ RunFont fFont;
+ SkPoint fOffset;
+ uint32_t fCount;
+ uint32_t fFlags;
+ SkDEBUGCODE(unsigned fMagic;)
+};
+}
+
+class SkTextBlob::RunRecord {
+public:
+ RunRecord(uint32_t count, uint32_t textSize, const SkPoint& offset, const SkPaint& font, GlyphPositioning pos)
+ : fFont(font)
+ , fCount(count)
+ , fOffset(offset)
+ , fPositioning(pos)
+ , fExtended(textSize > 0) {
+ SkDEBUGCODE(fMagic = kRunRecordMagic);
+ if (textSize > 0) {
+ *this->textSizePtr() = textSize;
+ }
+ }
+
+ uint32_t glyphCount() const {
+ return fCount;
+ }
+
+ const SkPoint& offset() const {
+ return fOffset;
+ }
+
+ const RunFont& font() const {
+ return fFont;
+ }
+
+ GlyphPositioning positioning() const {
+ return fPositioning;
+ }
+
+ uint16_t* glyphBuffer() const {
+ static_assert(SkIsAlignPtr(sizeof(RunRecord)), "");
+ // Glyphs are stored immediately following the record.
+ return reinterpret_cast<uint16_t*>(const_cast<RunRecord*>(this) + 1);
+ }
+
+ SkScalar* posBuffer() const {
+ // Position scalars follow the (aligned) glyph buffer.
+ return reinterpret_cast<SkScalar*>(reinterpret_cast<uint8_t*>(this->glyphBuffer()) +
+ SkAlign4(fCount * sizeof(uint16_t)));
+ }
+
+ uint32_t textSize() const { return fExtended ? *this->textSizePtr() : 0; }
+
+ uint32_t* clusterBuffer() const {
+ // clusters follow the textSize.
+ return fExtended ? 1 + this->textSizePtr() : nullptr;
+ }
+
+ char* textBuffer() const {
+ if (!fExtended) { return nullptr; }
+ return reinterpret_cast<char*>(this->clusterBuffer() + fCount);
+ }
+
+ static size_t StorageSize(int glyphCount, int textSize,
+ SkTextBlob::GlyphPositioning positioning) {
+ static_assert(SkIsAlign4(sizeof(SkScalar)), "SkScalar size alignment");
+ // RunRecord object + (aligned) glyph buffer + position buffer
+ size_t size = sizeof(SkTextBlob::RunRecord)
+ + SkAlign4(glyphCount* sizeof(uint16_t))
+ + PosCount(glyphCount, positioning) * sizeof(SkScalar);
+ if (textSize > 0) { // Extended run.
+ size += sizeof(uint32_t)
+ + sizeof(uint32_t) * glyphCount
+ + textSize;
+ }
+ return SkAlignPtr(size);
+ }
+
+ static const RunRecord* First(const SkTextBlob* blob) {
+ // The first record (if present) is stored following the blob object.
+ return reinterpret_cast<const RunRecord*>(blob + 1);
+ }
+
+ static const RunRecord* Next(const RunRecord* run) {
+ return reinterpret_cast<const RunRecord*>(
+ reinterpret_cast<const uint8_t*>(run)
+ + StorageSize(run->glyphCount(), run->textSize(), run->positioning()));
+ }
+
+ void validate(const uint8_t* storageTop) const {
+ SkASSERT(kRunRecordMagic == fMagic);
+ SkASSERT((uint8_t*)Next(this) <= storageTop);
+
+ SkASSERT(glyphBuffer() + fCount <= (uint16_t*)posBuffer());
+ SkASSERT(posBuffer() + fCount * ScalarsPerGlyph(fPositioning) <= (SkScalar*)Next(this));
+ if (fExtended) {
+ SkASSERT(textSize() > 0);
+ SkASSERT(textSizePtr() < (uint32_t*)Next(this));
+ SkASSERT(clusterBuffer() < (uint32_t*)Next(this));
+ SkASSERT(textBuffer() + textSize() <= (char*)Next(this));
+ }
+ static_assert(sizeof(SkTextBlob::RunRecord) == sizeof(RunRecordStorageEquivalent),
+ "runrecord_should_stay_packed");
+ }
+
+private:
+ friend class SkTextBlobBuilder;
+
+ static size_t PosCount(int glyphCount,
+ SkTextBlob::GlyphPositioning positioning) {
+ return glyphCount * ScalarsPerGlyph(positioning);
+ }
+
+ uint32_t* textSizePtr() const {
+ // textSize follows the position buffer.
+ SkASSERT(fExtended);
+ return (uint32_t*)(&this->posBuffer()[PosCount(fCount, fPositioning)]);
+ }
+
+ void grow(uint32_t count) {
+ SkScalar* initialPosBuffer = posBuffer();
+ uint32_t initialCount = fCount;
+ fCount += count;
+
+ // Move the initial pos scalars to their new location.
+ size_t copySize = initialCount * sizeof(SkScalar) * ScalarsPerGlyph(fPositioning);
+ SkASSERT((uint8_t*)posBuffer() + copySize <= (uint8_t*)Next(this));
+
+ // memmove, as the buffers may overlap
+ memmove(posBuffer(), initialPosBuffer, copySize);
+ }
+
+ RunFont fFont;
+ uint32_t fCount;
+ SkPoint fOffset;
+ GlyphPositioning fPositioning;
+ bool fExtended;
+
+ SkDEBUGCODE(unsigned fMagic;)
+};
+
+static int32_t gNextID = 1;
+static int32_t next_id() {
+ int32_t id;
+ do {
+ id = sk_atomic_inc(&gNextID);
+ } while (id == SK_InvalidGenID);
+ return id;
+}
+
+SkTextBlob::SkTextBlob(int runCount, const SkRect& bounds)
+ : fRunCount(runCount)
+ , fBounds(bounds)
+ , fUniqueID(next_id()) {
+}
+
+SkTextBlob::~SkTextBlob() {
+ const RunRecord* run = RunRecord::First(this);
+ for (int i = 0; i < fRunCount; ++i) {
+ const RunRecord* nextRun = RunRecord::Next(run);
+ SkDEBUGCODE(run->validate((uint8_t*)this + fStorageSize);)
+ run->~RunRecord();
+ run = nextRun;
+ }
+}
+
+namespace {
+union PositioningAndExtended {
+ int32_t intValue;
+ struct {
+ SkTextBlob::GlyphPositioning positioning;
+ bool extended;
+ uint16_t padding;
+ };
+};
+} // namespace
+
+void SkTextBlob::flatten(SkWriteBuffer& buffer) const {
+ int runCount = fRunCount;
+
+ buffer.write32(runCount);
+ buffer.writeRect(fBounds);
+
+ SkPaint runPaint;
+ SkTextBlobRunIterator it(this);
+ while (!it.done()) {
+ SkASSERT(it.glyphCount() > 0);
+
+ buffer.write32(it.glyphCount());
+ PositioningAndExtended pe;
+ pe.intValue = 0;
+ pe.positioning = it.positioning();
+ SkASSERT((int32_t)it.positioning() == pe.intValue); // backwards compat.
+
+ uint32_t textSize = it.textSize();
+ pe.extended = textSize > 0;
+ buffer.write32(pe.intValue);
+ if (pe.extended) {
+ buffer.write32(textSize);
+ }
+ buffer.writePoint(it.offset());
+ // This should go away when switching to SkFont
+ it.applyFontToPaint(&runPaint);
+ buffer.writePaint(runPaint);
+
+ buffer.writeByteArray(it.glyphs(), it.glyphCount() * sizeof(uint16_t));
+ buffer.writeByteArray(it.pos(),
+ it.glyphCount() * sizeof(SkScalar) * ScalarsPerGlyph(it.positioning()));
+ if (pe.extended) {
+ buffer.writeByteArray(it.clusters(), sizeof(uint32_t) * it.glyphCount());
+ buffer.writeByteArray(it.text(), it.textSize());
+ }
+
+ it.next();
+ SkDEBUGCODE(runCount--);
+ }
+ SkASSERT(0 == runCount);
+}
+
+sk_sp<SkTextBlob> SkTextBlob::MakeFromBuffer(SkReadBuffer& reader) {
+ int runCount = reader.read32();
+ if (runCount < 0) {
+ return nullptr;
+ }
+
+ SkRect bounds;
+ reader.readRect(&bounds);
+
+ SkTextBlobBuilder blobBuilder;
+ for (int i = 0; i < runCount; ++i) {
+ int glyphCount = reader.read32();
+
+ PositioningAndExtended pe;
+ pe.intValue = reader.read32();
+ GlyphPositioning pos = pe.positioning;
+ if (glyphCount <= 0 || pos > kFull_Positioning) {
+ return nullptr;
+ }
+ uint32_t textSize = pe.extended ? (uint32_t)reader.read32() : 0;
+
+ SkPoint offset;
+ reader.readPoint(&offset);
+ SkPaint font;
+ reader.readPaint(&font);
+
+ const SkTextBlobBuilder::RunBuffer* buf = nullptr;
+ switch (pos) {
+ case kDefault_Positioning:
+ buf = &blobBuilder.allocRunText(font, glyphCount, offset.x(), offset.y(),
+ textSize, SkString(), &bounds);
+ break;
+ case kHorizontal_Positioning:
+ buf = &blobBuilder.allocRunTextPosH(font, glyphCount, offset.y(),
+ textSize, SkString(), &bounds);
+ break;
+ case kFull_Positioning:
+ buf = &blobBuilder.allocRunTextPos(font, glyphCount, textSize, SkString(), &bounds);
+ break;
+ default:
+ return nullptr;
+ }
+
+ if (!reader.readByteArray(buf->glyphs, glyphCount * sizeof(uint16_t)) ||
+ !reader.readByteArray(buf->pos,
+ glyphCount * sizeof(SkScalar) * ScalarsPerGlyph(pos))) {
+ return nullptr;
+ }
+
+ if (pe.extended) {
+ if (!reader.readByteArray(buf->clusters, glyphCount * sizeof(uint32_t)) ||
+ !reader.readByteArray(buf->utf8text, textSize)) {
+ return nullptr;
+ }
+ }
+ }
+
+ return blobBuilder.make();
+}
+
+unsigned SkTextBlob::ScalarsPerGlyph(GlyphPositioning pos) {
+ // GlyphPositioning values are directly mapped to scalars-per-glyph.
+ SkASSERT(pos <= 2);
+ return pos;
+}
+
+SkTextBlobRunIterator::SkTextBlobRunIterator(const SkTextBlob* blob)
+ : fCurrentRun(SkTextBlob::RunRecord::First(blob))
+ , fRemainingRuns(blob->fRunCount) {
+ SkDEBUGCODE(fStorageTop = (uint8_t*)blob + blob->fStorageSize;)
+}
+
+bool SkTextBlobRunIterator::done() const {
+ return fRemainingRuns <= 0;
+}
+
+void SkTextBlobRunIterator::next() {
+ SkASSERT(!this->done());
+
+ if (!this->done()) {
+ SkDEBUGCODE(fCurrentRun->validate(fStorageTop);)
+ fCurrentRun = SkTextBlob::RunRecord::Next(fCurrentRun);
+ fRemainingRuns--;
+ }
+}
+
+uint32_t SkTextBlobRunIterator::glyphCount() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->glyphCount();
+}
+
+const uint16_t* SkTextBlobRunIterator::glyphs() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->glyphBuffer();
+}
+
+const SkScalar* SkTextBlobRunIterator::pos() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->posBuffer();
+}
+
+const SkPoint& SkTextBlobRunIterator::offset() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->offset();
+}
+
+SkTextBlob::GlyphPositioning SkTextBlobRunIterator::positioning() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->positioning();
+}
+
+void SkTextBlobRunIterator::applyFontToPaint(SkPaint* paint) const {
+ SkASSERT(!this->done());
+
+ fCurrentRun->font().applyToPaint(paint);
+}
+
+uint32_t* SkTextBlobRunIterator::clusters() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->clusterBuffer();
+}
+uint32_t SkTextBlobRunIterator::textSize() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->textSize();
+}
+char* SkTextBlobRunIterator::text() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->textBuffer();
+}
+
+
+bool SkTextBlobRunIterator::isLCD() const {
+ return SkToBool(fCurrentRun->font().flags() & SkPaint::kLCDRenderText_Flag);
+}
+
+SkTextBlobBuilder::SkTextBlobBuilder()
+ : fStorageSize(0)
+ , fStorageUsed(0)
+ , fRunCount(0)
+ , fDeferredBounds(false)
+ , fLastRun(0) {
+ fBounds.setEmpty();
+}
+
+SkTextBlobBuilder::~SkTextBlobBuilder() {
+ if (nullptr != fStorage.get()) {
+ // We are abandoning runs and must destruct the associated font data.
+ // The easiest way to accomplish that is to use the blob destructor.
+ this->make();
+ }
+}
+
+SkRect SkTextBlobBuilder::TightRunBounds(const SkTextBlob::RunRecord& run) {
+ SkRect bounds;
+ SkPaint paint;
+ run.font().applyToPaint(&paint);
+
+ if (SkTextBlob::kDefault_Positioning == run.positioning()) {
+ paint.measureText(run.glyphBuffer(), run.glyphCount() * sizeof(uint16_t), &bounds);
+ return bounds.makeOffset(run.offset().x(), run.offset().y());
+ }
+
+ SkAutoSTArray<16, SkRect> glyphBounds(run.glyphCount());
+ paint.getTextWidths(run.glyphBuffer(),
+ run.glyphCount() * sizeof(uint16_t),
+ NULL,
+ glyphBounds.get());
+
+ SkASSERT(SkTextBlob::kFull_Positioning == run.positioning() ||
+ SkTextBlob::kHorizontal_Positioning == run.positioning());
+ // kFull_Positioning => [ x, y, x, y... ]
+ // kHorizontal_Positioning => [ x, x, x... ]
+ // (const y applied by runBounds.offset(run->offset()) later)
+ const SkScalar horizontalConstY = 0;
+ const SkScalar* glyphPosX = run.posBuffer();
+ const SkScalar* glyphPosY = (run.positioning() == SkTextBlob::kFull_Positioning) ?
+ glyphPosX + 1 : &horizontalConstY;
+ const unsigned posXInc = SkTextBlob::ScalarsPerGlyph(run.positioning());
+ const unsigned posYInc = (run.positioning() == SkTextBlob::kFull_Positioning) ?
+ posXInc : 0;
+
+ bounds.setEmpty();
+ for (unsigned i = 0; i < run.glyphCount(); ++i) {
+ bounds.join(glyphBounds[i].makeOffset(*glyphPosX, *glyphPosY));
+ glyphPosX += posXInc;
+ glyphPosY += posYInc;
+ }
+
+ SkASSERT((void*)glyphPosX <= SkTextBlob::RunRecord::Next(&run));
+
+ return bounds.makeOffset(run.offset().x(), run.offset().y());
+}
+
+SkRect SkTextBlobBuilder::ConservativeRunBounds(const SkTextBlob::RunRecord& run) {
+ SkASSERT(run.glyphCount() > 0);
+ SkASSERT(SkTextBlob::kFull_Positioning == run.positioning() ||
+ SkTextBlob::kHorizontal_Positioning == run.positioning());
+
+ SkPaint paint;
+ run.font().applyToPaint(&paint);
+ const SkRect fontBounds = paint.getFontBounds();
+ if (fontBounds.isEmpty()) {
+ // Empty font bounds are likely a font bug. TightBounds has a better chance of
+ // producing useful results in this case.
+ return TightRunBounds(run);
+ }
+
+ // Compute the glyph position bbox.
+ SkRect bounds;
+ switch (run.positioning()) {
+ case SkTextBlob::kHorizontal_Positioning: {
+ const SkScalar* glyphPos = run.posBuffer();
+ SkASSERT((void*)(glyphPos + run.glyphCount()) <= SkTextBlob::RunRecord::Next(&run));
+
+ SkScalar minX = *glyphPos;
+ SkScalar maxX = *glyphPos;
+ for (unsigned i = 1; i < run.glyphCount(); ++i) {
+ SkScalar x = glyphPos[i];
+ minX = SkMinScalar(x, minX);
+ maxX = SkMaxScalar(x, maxX);
+ }
+
+ bounds.setLTRB(minX, 0, maxX, 0);
+ } break;
+ case SkTextBlob::kFull_Positioning: {
+ const SkPoint* glyphPosPts = reinterpret_cast<const SkPoint*>(run.posBuffer());
+ SkASSERT((void*)(glyphPosPts + run.glyphCount()) <= SkTextBlob::RunRecord::Next(&run));
+
+ bounds.setBounds(glyphPosPts, run.glyphCount());
+ } break;
+ default:
+ SkFAIL("unsupported positioning mode");
+ }
+
+ // Expand by typeface glyph bounds.
+ bounds.fLeft += fontBounds.left();
+ bounds.fTop += fontBounds.top();
+ bounds.fRight += fontBounds.right();
+ bounds.fBottom += fontBounds.bottom();
+
+ // Offset by run position.
+ return bounds.makeOffset(run.offset().x(), run.offset().y());
+}
+
+void SkTextBlobBuilder::updateDeferredBounds() {
+ SkASSERT(!fDeferredBounds || fRunCount > 0);
+
+ if (!fDeferredBounds) {
+ return;
+ }
+
+ SkASSERT(fLastRun >= sizeof(SkTextBlob));
+ SkTextBlob::RunRecord* run = reinterpret_cast<SkTextBlob::RunRecord*>(fStorage.get() +
+ fLastRun);
+
+ // FIXME: we should also use conservative bounds for kDefault_Positioning.
+ SkRect runBounds = SkTextBlob::kDefault_Positioning == run->positioning() ?
+ TightRunBounds(*run) : ConservativeRunBounds(*run);
+ fBounds.join(runBounds);
+ fDeferredBounds = false;
+}
+
+void SkTextBlobBuilder::reserve(size_t size) {
+ // We don't currently pre-allocate, but maybe someday...
+ if (fStorageUsed + size <= fStorageSize) {
+ return;
+ }
+
+ if (0 == fRunCount) {
+ SkASSERT(nullptr == fStorage.get());
+ SkASSERT(0 == fStorageSize);
+ SkASSERT(0 == fStorageUsed);
+
+ // the first allocation also includes blob storage
+ fStorageUsed += sizeof(SkTextBlob);
+ }
+
+ fStorageSize = fStorageUsed + size;
+ // FYI: This relies on everything we store being relocatable, particularly SkPaint.
+ fStorage.realloc(fStorageSize);
+}
+
+bool SkTextBlobBuilder::mergeRun(const SkPaint &font, SkTextBlob::GlyphPositioning positioning,
+ int count, SkPoint offset) {
+ if (0 == fLastRun) {
+ SkASSERT(0 == fRunCount);
+ return false;
+ }
+
+ SkASSERT(fLastRun >= sizeof(SkTextBlob));
+ SkTextBlob::RunRecord* run = reinterpret_cast<SkTextBlob::RunRecord*>(fStorage.get() +
+ fLastRun);
+ SkASSERT(run->glyphCount() > 0);
+
+ if (run->textSize() != 0) {
+ return false;
+ }
+
+ if (run->positioning() != positioning
+ || run->font() != font
+ || (run->glyphCount() + count < run->glyphCount())) {
+ return false;
+ }
+
+ // we can merge same-font/same-positioning runs in the following cases:
+ // * fully positioned run following another fully positioned run
+ // * horizontally postioned run following another horizontally positioned run with the same
+ // y-offset
+ if (SkTextBlob::kFull_Positioning != positioning
+ && (SkTextBlob::kHorizontal_Positioning != positioning
+ || run->offset().y() != offset.y())) {
+ return false;
+ }
+
+ size_t sizeDelta = SkTextBlob::RunRecord::StorageSize(run->glyphCount() + count, 0, positioning) -
+ SkTextBlob::RunRecord::StorageSize(run->glyphCount(), 0, positioning);
+ this->reserve(sizeDelta);
+
+ // reserve may have realloced
+ run = reinterpret_cast<SkTextBlob::RunRecord*>(fStorage.get() + fLastRun);
+ uint32_t preMergeCount = run->glyphCount();
+ run->grow(count);
+
+ // Callers expect the buffers to point at the newly added slice, ant not at the beginning.
+ fCurrentRunBuffer.glyphs = run->glyphBuffer() + preMergeCount;
+ fCurrentRunBuffer.pos = run->posBuffer()
+ + preMergeCount * SkTextBlob::ScalarsPerGlyph(positioning);
+
+ fStorageUsed += sizeDelta;
+
+ SkASSERT(fStorageUsed <= fStorageSize);
+ run->validate(fStorage.get() + fStorageUsed);
+
+ return true;
+}
+
+void SkTextBlobBuilder::allocInternal(const SkPaint &font,
+ SkTextBlob::GlyphPositioning positioning,
+ int count, int textSize, SkPoint offset, const SkRect* bounds) {
+ SkASSERT(count > 0);
+ SkASSERT(textSize >= 0);
+ SkASSERT(SkPaint::kGlyphID_TextEncoding == font.getTextEncoding());
+ if (textSize != 0 || !this->mergeRun(font, positioning, count, offset)) {
+ this->updateDeferredBounds();
+
+ size_t runSize = SkTextBlob::RunRecord::StorageSize(count, textSize, positioning);
+ this->reserve(runSize);
+
+ SkASSERT(fStorageUsed >= sizeof(SkTextBlob));
+ SkASSERT(fStorageUsed + runSize <= fStorageSize);
+
+ SkTextBlob::RunRecord* run = new (fStorage.get() + fStorageUsed)
+ SkTextBlob::RunRecord(count, textSize, offset, font, positioning);
+ fCurrentRunBuffer.glyphs = run->glyphBuffer();
+ fCurrentRunBuffer.pos = run->posBuffer();
+ fCurrentRunBuffer.utf8text = run->textBuffer();
+ fCurrentRunBuffer.clusters = run->clusterBuffer();
+
+ fLastRun = fStorageUsed;
+ fStorageUsed += runSize;
+ fRunCount++;
+
+ SkASSERT(fStorageUsed <= fStorageSize);
+ run->validate(fStorage.get() + fStorageUsed);
+ }
+ SkASSERT(textSize > 0 || nullptr == fCurrentRunBuffer.utf8text);
+ SkASSERT(textSize > 0 || nullptr == fCurrentRunBuffer.clusters);
+ if (!fDeferredBounds) {
+ if (bounds) {
+ fBounds.join(*bounds);
+ } else {
+ fDeferredBounds = true;
+ }
+ }
+}
+
+const SkTextBlobBuilder::RunBuffer& SkTextBlobBuilder::allocRunText(const SkPaint& font, int count,
+ SkScalar x, SkScalar y,
+ int textByteCount,
+ SkString lang,
+ const SkRect* bounds) {
+ this->allocInternal(font, SkTextBlob::kDefault_Positioning, count, textByteCount, SkPoint::Make(x, y), bounds);
+ return fCurrentRunBuffer;
+}
+
+const SkTextBlobBuilder::RunBuffer& SkTextBlobBuilder::allocRunTextPosH(const SkPaint& font, int count,
+ SkScalar y,
+ int textByteCount,
+ SkString lang,
+ const SkRect* bounds) {
+ this->allocInternal(font, SkTextBlob::kHorizontal_Positioning, count, textByteCount, SkPoint::Make(0, y),
+ bounds);
+
+ return fCurrentRunBuffer;
+}
+
+const SkTextBlobBuilder::RunBuffer& SkTextBlobBuilder::allocRunTextPos(const SkPaint& font, int count,
+ int textByteCount,
+ SkString lang,
+ const SkRect *bounds) {
+ this->allocInternal(font, SkTextBlob::kFull_Positioning, count, textByteCount, SkPoint::Make(0, 0), bounds);
+
+ return fCurrentRunBuffer;
+}
+
+sk_sp<SkTextBlob> SkTextBlobBuilder::make() {
+ SkASSERT((fRunCount > 0) == (nullptr != fStorage.get()));
+
+ this->updateDeferredBounds();
+
+ if (0 == fRunCount) {
+ SkASSERT(nullptr == fStorage.get());
+ fStorageUsed = sizeof(SkTextBlob);
+ fStorage.realloc(fStorageUsed);
+ }
+
+ SkTextBlob* blob = new (fStorage.release()) SkTextBlob(fRunCount, fBounds);
+ SkDEBUGCODE(const_cast<SkTextBlob*>(blob)->fStorageSize = fStorageSize;)
+
+ SkDEBUGCODE(
+ size_t validateSize = sizeof(SkTextBlob);
+ const SkTextBlob::RunRecord* run = SkTextBlob::RunRecord::First(blob);
+ for (int i = 0; i < fRunCount; ++i) {
+ validateSize += SkTextBlob::RunRecord::StorageSize(
+ run->fCount, run->textSize(), run->fPositioning);
+ run->validate(reinterpret_cast<const uint8_t*>(blob) + fStorageUsed);
+ run = SkTextBlob::RunRecord::Next(run);
+ }
+ SkASSERT(validateSize == fStorageUsed);
+ )
+
+ fStorageUsed = 0;
+ fStorageSize = 0;
+ fRunCount = 0;
+ fLastRun = 0;
+ fBounds.setEmpty();
+
+ return sk_sp<SkTextBlob>(blob);
+}
diff --git a/gfx/skia/skia/src/core/SkTextBlobRunIterator.h b/gfx/skia/skia/src/core/SkTextBlobRunIterator.h
new file mode 100644
index 000000000..2f1477bf0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTextBlobRunIterator.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkTextBlobRunIterator_DEFINED
+#define SkTextBlobRunIterator_DEFINED
+
+#include "SkTextBlob.h"
+
+/**
+ * Iterate through all of the text runs of the text blob. For example:
+ * for (SkTextBlobRunIterator it(blob); !it.done(); it.next()) {
+ * .....
+ * }
+ */
+class SkTextBlobRunIterator {
+public:
+ SkTextBlobRunIterator(const SkTextBlob* blob);
+
+ bool done() const;
+ void next();
+
+ uint32_t glyphCount() const;
+ const uint16_t* glyphs() const;
+ const SkScalar* pos() const;
+ const SkPoint& offset() const;
+ void applyFontToPaint(SkPaint*) const;
+ SkTextBlob::GlyphPositioning positioning() const;
+ uint32_t* clusters() const;
+ uint32_t textSize() const;
+ char* text() const;
+
+ bool isLCD() const;
+
+private:
+ const SkTextBlob::RunRecord* fCurrentRun;
+ int fRemainingRuns;
+
+ SkDEBUGCODE(uint8_t* fStorageTop;)
+};
+
+#endif // SkTextBlobRunIterator_DEFINED
diff --git a/gfx/skia/skia/src/core/SkTextFormatParams.h b/gfx/skia/skia/src/core/SkTextFormatParams.h
new file mode 100644
index 000000000..ef22c848b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTextFormatParams.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTextFormatParams_DEFINES
+#define SkTextFormatParams_DEFINES
+
+#include "SkScalar.h"
+#include "SkTypes.h"
+
+// Fraction of the text size to lower a strike through line below the baseline.
+#define kStdStrikeThru_Offset (-SK_Scalar1 * 6 / 21)
+// Fraction of the text size to lower a underline below the baseline.
+#define kStdUnderline_Offset (SK_Scalar1 / 9)
+// Fraction of the text size to use for a strike through or under-line.
+#define kStdUnderline_Thickness (SK_Scalar1 / 18)
+
+// The fraction of text size to embolden fake bold text scales with text size.
+// At 9 points or below, the stroke width is increased by text size / 24.
+// At 36 points and above, it is increased by text size / 32. In between,
+// it is interpolated between those values.
+static const SkScalar kStdFakeBoldInterpKeys[] = {
+ SK_Scalar1*9,
+ SK_Scalar1*36,
+};
+static const SkScalar kStdFakeBoldInterpValues[] = {
+ SK_Scalar1/24,
+ SK_Scalar1/32,
+};
+static_assert(SK_ARRAY_COUNT(kStdFakeBoldInterpKeys) == SK_ARRAY_COUNT(kStdFakeBoldInterpValues),
+ "mismatched_array_size");
+static const int kStdFakeBoldInterpLength = SK_ARRAY_COUNT(kStdFakeBoldInterpKeys);
+
+#endif //SkTextFormatParams_DEFINES
diff --git a/gfx/skia/skia/src/core/SkTextMapStateProc.h b/gfx/skia/skia/src/core/SkTextMapStateProc.h
new file mode 100644
index 000000000..944050917
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTextMapStateProc.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTextMapStateProc_DEFINED
+#define SkTextMapStateProc_DEFINED
+
+#include "SkPoint.h"
+#include "SkMatrix.h"
+
+class SkTextMapStateProc {
+public:
+ SkTextMapStateProc(const SkMatrix& matrix, const SkPoint& offset, int scalarsPerPosition)
+ : fMatrix(matrix)
+ , fProc(matrix.getMapXYProc())
+ , fOffset(offset)
+ , fScaleX(fMatrix.getScaleX()) {
+ SkASSERT(1 == scalarsPerPosition || 2 == scalarsPerPosition);
+ if (1 == scalarsPerPosition) {
+ unsigned mtype = fMatrix.getType();
+ if (mtype & (SkMatrix::kAffine_Mask | SkMatrix::kPerspective_Mask)) {
+ fMapCase = kX;
+ } else {
+ // Bake the matrix scale/translation components into fOffset,
+ // to expedite proc computations.
+ fOffset.set(SkScalarMul(offset.x(), fMatrix.getScaleX()) + fMatrix.getTranslateX(),
+ SkScalarMul(offset.y(), fMatrix.getScaleY()) + fMatrix.getTranslateY());
+
+ if (mtype & SkMatrix::kScale_Mask) {
+ fMapCase = kOnlyScaleX;
+ } else {
+ fMapCase = kOnlyTransX;
+ }
+ }
+ } else {
+ fMapCase = kXY;
+ }
+ }
+
+ void operator()(const SkScalar pos[], SkPoint* loc) const;
+
+private:
+ const SkMatrix& fMatrix;
+ enum {
+ kXY,
+ kOnlyScaleX,
+ kOnlyTransX,
+ kX
+ } fMapCase;
+ const SkMatrix::MapXYProc fProc;
+ SkPoint fOffset; // In kOnly* mode, this includes the matrix translation component.
+ SkScalar fScaleX; // This is only used by kOnly... cases.
+};
+
+inline void SkTextMapStateProc::operator()(const SkScalar pos[], SkPoint* loc) const {
+ switch(fMapCase) {
+ case kXY:
+ fProc(fMatrix, pos[0] + fOffset.x(), pos[1] + fOffset.y(), loc);
+ break;
+ case kOnlyScaleX:
+ loc->set(SkScalarMul(fScaleX, *pos) + fOffset.x(), fOffset.y());
+ break;
+ case kOnlyTransX:
+ loc->set(*pos + fOffset.x(), fOffset.y());
+ break;
+ default:
+ SkASSERT(false);
+ case kX:
+ fProc(fMatrix, *pos + fOffset.x(), fOffset.y(), loc);
+ break;
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTextToPathIter.h b/gfx/skia/skia/src/core/SkTextToPathIter.h
new file mode 100644
index 000000000..dcd5a01cd
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTextToPathIter.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTextToPathIter_DEFINED
+#define SkTextToPathIter_DEFINED
+
+#include "SkAutoKern.h"
+#include "SkPaint.h"
+
+class SkGlyphCache;
+
+class SkTextBaseIter {
+protected:
+ SkTextBaseIter(const char text[], size_t length, const SkPaint& paint,
+ bool applyStrokeAndPathEffects);
+ ~SkTextBaseIter();
+
+ SkGlyphCache* fCache;
+ SkPaint fPaint;
+ SkScalar fScale;
+ SkScalar fPrevAdvance;
+ const char* fText;
+ const char* fStop;
+ SkPaint::GlyphCacheProc fGlyphCacheProc;
+
+ SkScalar fXPos; // accumulated xpos, returned in next
+ SkAutoKern fAutoKern;
+ int fXYIndex; // cache for horizontal -vs- vertical text
+};
+
+class SkTextToPathIter : SkTextBaseIter {
+public:
+ SkTextToPathIter(const char text[], size_t length, const SkPaint& paint,
+ bool applyStrokeAndPathEffects)
+ : SkTextBaseIter(text, length, paint, applyStrokeAndPathEffects) {
+ }
+
+ const SkPaint& getPaint() const { return fPaint; }
+ SkScalar getPathScale() const { return fScale; }
+
+ /**
+ * Returns false when all of the text has been consumed
+ */
+ bool next(const SkPath** path, SkScalar* xpos);
+};
+
+class SkTextInterceptsIter : SkTextBaseIter {
+public:
+ enum class TextType {
+ kText,
+ kPosText
+ };
+
+ SkTextInterceptsIter(const char text[], size_t length, const SkPaint& paint,
+ const SkScalar bounds[2], SkScalar x, SkScalar y, TextType textType)
+ : SkTextBaseIter(text, length, paint, false)
+ , fTextType(textType) {
+ fBoundsBase[0] = bounds[0];
+ fBoundsBase[1] = bounds[1];
+ this->setPosition(x, y);
+ }
+
+ /**
+ * Returns false when all of the text has been consumed
+ */
+ bool next(SkScalar* array, int* count);
+
+ void setPosition(SkScalar x, SkScalar y) {
+ SkScalar xOffset = TextType::kText == fTextType && fXYIndex ? fXPos : 0;
+ if (TextType::kPosText == fTextType
+ && fPaint.getTextAlign() != SkPaint::kLeft_Align) { // need to measure first
+ const char* text = fText;
+ const SkGlyph& glyph = fGlyphCacheProc(fCache, &text);
+ SkScalar width = SkScalarMul(SkFloatToScalar((&glyph.fAdvanceX)[0]), fScale);
+ if (fPaint.getTextAlign() == SkPaint::kCenter_Align) {
+ width = SkScalarHalf(width);
+ }
+ xOffset = width;
+ }
+
+ for (int i = 0; i < (int) SK_ARRAY_COUNT(fBounds); ++i) {
+ SkScalar bound = fBoundsBase[i] - (fXYIndex ? x : y);
+ if (fXYIndex) {
+ bound += xOffset;
+ }
+ fBounds[i] = bound / fScale;
+ }
+
+ fXPos = xOffset + (fXYIndex ? y : x);
+ fPrevAdvance = 0;
+ }
+
+private:
+ SkScalar fBounds[2];
+ SkScalar fBoundsBase[2];
+ TextType fTextType;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkThreadID.cpp b/gfx/skia/skia/src/core/SkThreadID.cpp
new file mode 100644
index 000000000..948884d0c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkThreadID.cpp
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkLeanWindows.h"
+#include "SkThreadID.h"
+
+#ifdef SK_BUILD_FOR_WIN
+ SkThreadID SkGetThreadID() { return GetCurrentThreadId(); }
+#else
+ #include <pthread.h>
+ SkThreadID SkGetThreadID() { return (int64_t)pthread_self(); }
+#endif
diff --git a/gfx/skia/skia/src/core/SkTime.cpp b/gfx/skia/skia/src/core/SkTime.cpp
new file mode 100644
index 000000000..a496a22b5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTime.cpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkLeanWindows.h"
+#include "SkString.h"
+#include "SkTime.h"
+#include "SkTypes.h"
+
+void SkTime::DateTime::toISO8601(SkString* dst) const {
+ if (dst) {
+ int timeZoneMinutes = SkToInt(fTimeZoneMinutes);
+ char timezoneSign = timeZoneMinutes >= 0 ? '+' : '-';
+ int timeZoneHours = SkTAbs(timeZoneMinutes) / 60;
+ timeZoneMinutes = SkTAbs(timeZoneMinutes) % 60;
+ dst->printf("%04u-%02u-%02uT%02u:%02u:%02u%c%02d:%02d",
+ static_cast<unsigned>(fYear), static_cast<unsigned>(fMonth),
+ static_cast<unsigned>(fDay), static_cast<unsigned>(fHour),
+ static_cast<unsigned>(fMinute),
+ static_cast<unsigned>(fSecond), timezoneSign, timeZoneHours,
+ timeZoneMinutes);
+ }
+}
+
+#ifdef SK_BUILD_FOR_WIN32
+
+void SkTime::GetDateTime(DateTime* dt) {
+ if (dt) {
+ SYSTEMTIME st;
+ GetSystemTime(&st);
+ dt->fTimeZoneMinutes = 0;
+ dt->fYear = st.wYear;
+ dt->fMonth = SkToU8(st.wMonth);
+ dt->fDayOfWeek = SkToU8(st.wDayOfWeek);
+ dt->fDay = SkToU8(st.wDay);
+ dt->fHour = SkToU8(st.wHour);
+ dt->fMinute = SkToU8(st.wMinute);
+ dt->fSecond = SkToU8(st.wSecond);
+ }
+}
+
+#else // SK_BUILD_FOR_WIN32
+
+#include <time.h>
+void SkTime::GetDateTime(DateTime* dt) {
+ if (dt) {
+ time_t m_time;
+ time(&m_time);
+ struct tm* tstruct;
+ tstruct = gmtime(&m_time);
+ dt->fTimeZoneMinutes = 0;
+ dt->fYear = tstruct->tm_year + 1900;
+ dt->fMonth = SkToU8(tstruct->tm_mon + 1);
+ dt->fDayOfWeek = SkToU8(tstruct->tm_wday);
+ dt->fDay = SkToU8(tstruct->tm_mday);
+ dt->fHour = SkToU8(tstruct->tm_hour);
+ dt->fMinute = SkToU8(tstruct->tm_min);
+ dt->fSecond = SkToU8(tstruct->tm_sec);
+ }
+}
+#endif // SK_BUILD_FOR_WIN32
+
+#if defined(SK_BUILD_FOR_UNIX) || defined(SK_BUILD_FOR_ANDROID)
+#include <time.h>
+double SkTime::GetNSecs() {
+ struct timespec ts;
+ if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) {
+ return 0.0;
+ }
+ return ts.tv_sec * 1e9 + ts.tv_nsec;
+}
+#else
+#include <chrono>
+double SkTime::GetNSecs() {
+ auto now = std::chrono::high_resolution_clock::now();
+ std::chrono::duration<double, std::nano> ns = now.time_since_epoch();
+ return ns.count();
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkTraceEvent.h b/gfx/skia/skia/src/core/SkTraceEvent.h
new file mode 100644
index 000000000..05dc340fa
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTraceEvent.h
@@ -0,0 +1,534 @@
+// Copyright (c) 2014 Google Inc.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This header file defines implementation details of how the trace macros in
+// SkTraceEventCommon.h collect and store trace events. Anything not
+// implementation-specific should go in SkTraceEventCommon.h instead of here.
+
+#ifndef SkTraceEvent_DEFINED
+#define SkTraceEvent_DEFINED
+
+#include "SkAtomics.h"
+#include "SkEventTracer.h"
+#include "SkTraceEventCommon.h"
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementation specific tracing API definitions.
+
+// By default, const char* argument values are assumed to have long-lived scope
+// and will not be copied. Use this macro to force a const char* to be copied.
+#define TRACE_STR_COPY(str) \
+ skia::tracing_internals::TraceStringWithCopy(str)
+
+// By default, uint64 ID argument values are not mangled with the Process ID in
+// TRACE_EVENT_ASYNC macros. Use this macro to force Process ID mangling.
+#define TRACE_ID_MANGLE(id) \
+ skia::tracing_internals::TraceID::ForceMangle(id)
+
+// By default, pointers are mangled with the Process ID in TRACE_EVENT_ASYNC
+// macros. Use this macro to prevent Process ID mangling.
+#define TRACE_ID_DONT_MANGLE(id) \
+ skia::tracing_internals::TraceID::DontMangle(id)
+
+// Sets the current sample state to the given category and name (both must be
+// constant strings). These states are intended for a sampling profiler.
+// Implementation note: we store category and name together because we don't
+// want the inconsistency/expense of storing two pointers.
+// |thread_bucket| is [0..2] and is used to statically isolate samples in one
+// thread from others.
+#define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET( \
+ bucket_number, category, name) \
+ skia::tracing_internals:: \
+ TraceEventSamplingStateScope<bucket_number>::Set(category "\0" name)
+
+// Returns a current sampling state of the given bucket.
+#define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \
+ skia::tracing_internals::TraceEventSamplingStateScope<bucket_number>::Current()
+
+// Creates a scope of a sampling state of the given bucket.
+//
+// { // The sampling state is set within this scope.
+// TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name");
+// ...;
+// }
+#define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET( \
+ bucket_number, category, name) \
+ skia::tracing_internals::TraceEventSamplingStateScope<bucket_number> \
+ traceEventSamplingScope(category "\0" name);
+
+
+#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
+ *INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
+ (SkEventTracer::kEnabledForRecording_CategoryGroupEnabledFlags | \
+ SkEventTracer::kEnabledForEventCallback_CategoryGroupEnabledFlags)
+
+// Get a pointer to the enabled state of the given trace category. Only
+// long-lived literal strings should be given as the category group. The
+// returned pointer can be held permanently in a local static for example. If
+// the unsigned char is non-zero, tracing is enabled. If tracing is enabled,
+// TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled
+// between the load of the tracing state and the call to
+// TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out
+// for best performance when tracing is disabled.
+// const uint8_t*
+// TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(const char* category_group)
+#define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED \
+ SkEventTracer::GetInstance()->getCategoryGroupEnabled
+
+// Get the number of times traces have been recorded. This is used to implement
+// the TRACE_EVENT_IS_NEW_TRACE facility.
+// unsigned int TRACE_EVENT_API_GET_NUM_TRACES_RECORDED()
+#define TRACE_EVENT_API_GET_NUM_TRACES_RECORDED \
+ SkEventTracer::GetInstance()->getNumTracesRecorded
+
+// Add a trace event to the platform tracing system.
+// SkEventTracer::Handle TRACE_EVENT_API_ADD_TRACE_EVENT(
+// char phase,
+// const uint8_t* category_group_enabled,
+// const char* name,
+// uint64_t id,
+// int num_args,
+// const char** arg_names,
+// const uint8_t* arg_types,
+// const uint64_t* arg_values,
+// unsigned char flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT \
+ SkEventTracer::GetInstance()->addTraceEvent
+
+// Set the duration field of a COMPLETE trace event.
+// void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
+// const uint8_t* category_group_enabled,
+// const char* name,
+// SkEventTracer::Handle id)
+#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \
+ SkEventTracer::GetInstance()->updateTraceEventDuration
+
+#define TRACE_EVENT_API_ATOMIC_WORD intptr_t
+#define TRACE_EVENT_API_ATOMIC_LOAD(var) sk_atomic_load(&var, sk_memory_order_relaxed)
+#define TRACE_EVENT_API_ATOMIC_STORE(var, value) \
+ sk_atomic_store(&var, value, sk_memory_order_relaxed)
+
+// Defines visibility for classes in trace_event.h
+#define TRACE_EVENT_API_CLASS_EXPORT SK_API
+
+// The thread buckets for the sampling profiler.
+TRACE_EVENT_API_CLASS_EXPORT extern \
+ TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
+
+#define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket) \
+ g_trace_state[thread_bucket]
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Implementation detail: trace event macros create temporary variables
+// to keep instrumentation overhead low. These macros give each temporary
+// variable a unique name based on the line number to prevent name collisions.
+#define INTERNAL_TRACE_EVENT_UID3(a,b) \
+ trace_event_unique_##a##b
+#define INTERNAL_TRACE_EVENT_UID2(a,b) \
+ INTERNAL_TRACE_EVENT_UID3(a,b)
+#define INTERNAL_TRACE_EVENT_UID(name_prefix) \
+ INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__)
+
+// Implementation detail: internal macro to create static category.
+// No barriers are needed, because this code is designed to operate safely
+// even when the unsigned char* points to garbage data (which may be the case
+// on processors without cache coherency).
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
+ category_group, atomic, category_group_enabled) \
+ category_group_enabled = \
+ reinterpret_cast<const uint8_t*>(TRACE_EVENT_API_ATOMIC_LOAD( \
+ atomic)); \
+ if (!category_group_enabled) { \
+ category_group_enabled = \
+ TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group); \
+ TRACE_EVENT_API_ATOMIC_STORE(atomic, \
+ reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( \
+ category_group_enabled)); \
+ }
+
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group) \
+ static TRACE_EVENT_API_ATOMIC_WORD INTERNAL_TRACE_EVENT_UID(atomic) = 0; \
+ const uint8_t* INTERNAL_TRACE_EVENT_UID(category_group_enabled); \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(category_group, \
+ INTERNAL_TRACE_EVENT_UID(atomic), \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled));
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ skia::tracing_internals::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ skia::tracing_internals::kNoEventId, flags, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Implementation detail: internal macro to create static category and add begin
+// event if the category is enabled. Also adds the end event when the scope
+// ends.
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ skia::tracing_internals::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ SkEventTracer::Handle h = skia::tracing_internals::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
+ name, skia::tracing_internals::kNoEventId, \
+ TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \
+ INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
+ }
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \
+ flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ unsigned char trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
+ skia::tracing_internals::TraceID trace_event_trace_id( \
+ id, &trace_event_flags); \
+ skia::tracing_internals::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
+ name, trace_event_trace_id.data(), trace_event_flags, \
+ ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(phase, \
+ category_group, name, id, thread_id, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ unsigned char trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
+ skia::tracing_internals::TraceID trace_event_trace_id( \
+ id, &trace_event_flags); \
+ skia::tracing_internals::AddTraceEventWithThreadIdAndTimestamp( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
+ name, trace_event_trace_id.data(), \
+ thread_id, base::TimeTicks::FromInternalValue(timestamp), \
+ trace_event_flags, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+#define INTERNAL_TRACE_MEMORY(category, name)
+
+namespace skia {
+namespace tracing_internals {
+
+// Specify these values when the corresponding argument of AddTraceEvent is not
+// used.
+const int kZeroNumArgs = 0;
+const uint64_t kNoEventId = 0;
+
+// TraceID encapsulates an ID that can either be an integer or pointer. Pointers
+// are by default mangled with the Process ID so that they are unlikely to
+// collide when the same pointer is used on different processes.
+class TraceID {
+ public:
+ class DontMangle {
+ public:
+ explicit DontMangle(const void* id)
+ : data_(static_cast<uint64_t>(
+ reinterpret_cast<uintptr_t>(id))) {}
+ explicit DontMangle(uint64_t id) : data_(id) {}
+ explicit DontMangle(unsigned int id) : data_(id) {}
+ explicit DontMangle(unsigned short id) : data_(id) {}
+ explicit DontMangle(unsigned char id) : data_(id) {}
+ explicit DontMangle(long long id)
+ : data_(static_cast<uint64_t>(id)) {}
+ explicit DontMangle(long id)
+ : data_(static_cast<uint64_t>(id)) {}
+ explicit DontMangle(int id)
+ : data_(static_cast<uint64_t>(id)) {}
+ explicit DontMangle(short id)
+ : data_(static_cast<uint64_t>(id)) {}
+ explicit DontMangle(signed char id)
+ : data_(static_cast<uint64_t>(id)) {}
+ uint64_t data() const { return data_; }
+ private:
+ uint64_t data_;
+ };
+
+ class ForceMangle {
+ public:
+ explicit ForceMangle(uint64_t id) : data_(id) {}
+ explicit ForceMangle(unsigned int id) : data_(id) {}
+ explicit ForceMangle(unsigned short id) : data_(id) {}
+ explicit ForceMangle(unsigned char id) : data_(id) {}
+ explicit ForceMangle(long long id)
+ : data_(static_cast<uint64_t>(id)) {}
+ explicit ForceMangle(long id)
+ : data_(static_cast<uint64_t>(id)) {}
+ explicit ForceMangle(int id)
+ : data_(static_cast<uint64_t>(id)) {}
+ explicit ForceMangle(short id)
+ : data_(static_cast<uint64_t>(id)) {}
+ explicit ForceMangle(signed char id)
+ : data_(static_cast<uint64_t>(id)) {}
+ uint64_t data() const { return data_; }
+ private:
+ uint64_t data_;
+ };
+
+ TraceID(const void* id, unsigned char* flags)
+ : data_(static_cast<uint64_t>(
+ reinterpret_cast<uintptr_t>(id))) {
+ *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+ }
+ TraceID(ForceMangle id, unsigned char* flags) : data_(id.data()) {
+ *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+ }
+ TraceID(DontMangle id, unsigned char* flags) : data_(id.data()) {
+ }
+ TraceID(uint64_t id, unsigned char* flags)
+ : data_(id) { (void)flags; }
+ TraceID(unsigned int id, unsigned char* flags)
+ : data_(id) { (void)flags; }
+ TraceID(unsigned short id, unsigned char* flags)
+ : data_(id) { (void)flags; }
+ TraceID(unsigned char id, unsigned char* flags)
+ : data_(id) { (void)flags; }
+ TraceID(long long id, unsigned char* flags)
+ : data_(static_cast<uint64_t>(id)) { (void)flags; }
+ TraceID(long id, unsigned char* flags)
+ : data_(static_cast<uint64_t>(id)) { (void)flags; }
+ TraceID(int id, unsigned char* flags)
+ : data_(static_cast<uint64_t>(id)) { (void)flags; }
+ TraceID(short id, unsigned char* flags)
+ : data_(static_cast<uint64_t>(id)) { (void)flags; }
+ TraceID(signed char id, unsigned char* flags)
+ : data_(static_cast<uint64_t>(id)) { (void)flags; }
+
+ uint64_t data() const { return data_; }
+
+ private:
+ uint64_t data_;
+};
+
+// Simple union to store various types as uint64_t.
+union TraceValueUnion {
+ bool as_bool;
+ uint64_t as_uint;
+ long long as_int;
+ double as_double;
+ const void* as_pointer;
+ const char* as_string;
+};
+
+// Simple container for const char* that should be copied instead of retained.
+class TraceStringWithCopy {
+ public:
+ explicit TraceStringWithCopy(const char* str) : str_(str) {}
+ operator const char* () const { return str_; }
+ private:
+ const char* str_;
+};
+
+// Define SetTraceValue for each allowed type. It stores the type and
+// value in the return arguments. This allows this API to avoid declaring any
+// structures so that it is portable to third_party libraries.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, \
+ union_member, \
+ value_type_id) \
+ static inline void SetTraceValue( \
+ actual_type arg, \
+ unsigned char* type, \
+ uint64_t* value) { \
+ TraceValueUnion type_value; \
+ type_value.union_member = arg; \
+ *type = value_type_id; \
+ *value = type_value.as_uint; \
+ }
+// Simpler form for int types that can be safely casted.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, \
+ value_type_id) \
+ static inline void SetTraceValue( \
+ actual_type arg, \
+ unsigned char* type, \
+ uint64_t* value) { \
+ *type = value_type_id; \
+ *value = static_cast<uint64_t>(arg); \
+ }
+
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint64_t, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned short, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long long, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(short, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE(bool, as_bool, TRACE_VALUE_TYPE_BOOL)
+INTERNAL_DECLARE_SET_TRACE_VALUE(double, as_double, TRACE_VALUE_TYPE_DOUBLE)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, as_pointer,
+ TRACE_VALUE_TYPE_POINTER)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, as_string,
+ TRACE_VALUE_TYPE_STRING)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, as_string,
+ TRACE_VALUE_TYPE_COPY_STRING)
+
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
+
+// These AddTraceEvent and AddTraceEvent template
+// functions are defined here instead of in the macro, because the arg_values
+// could be temporary objects, such as std::string. In order to store
+// pointers to the internal c_str and pass through to the tracing API,
+// the arg_values must live throughout these procedures.
+
+static inline SkEventTracer::Handle
+AddTraceEvent(
+ char phase,
+ const uint8_t* category_group_enabled,
+ const char* name,
+ uint64_t id,
+ unsigned char flags) {
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(
+ phase, category_group_enabled, name, id,
+ kZeroNumArgs, nullptr, nullptr, nullptr, flags);
+}
+
+template<class ARG1_TYPE>
+static inline SkEventTracer::Handle
+AddTraceEvent(
+ char phase,
+ const uint8_t* category_group_enabled,
+ const char* name,
+ uint64_t id,
+ unsigned char flags,
+ const char* arg1_name,
+ const ARG1_TYPE& arg1_val) {
+ const int num_args = 1;
+ uint8_t arg_types[1];
+ uint64_t arg_values[1];
+ SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(
+ phase, category_group_enabled, name, id,
+ num_args, &arg1_name, arg_types, arg_values, flags);
+}
+
+template<class ARG1_TYPE, class ARG2_TYPE>
+static inline SkEventTracer::Handle
+AddTraceEvent(
+ char phase,
+ const uint8_t* category_group_enabled,
+ const char* name,
+ uint64_t id,
+ unsigned char flags,
+ const char* arg1_name,
+ const ARG1_TYPE& arg1_val,
+ const char* arg2_name,
+ const ARG2_TYPE& arg2_val) {
+ const int num_args = 2;
+ const char* arg_names[2] = { arg1_name, arg2_name };
+ unsigned char arg_types[2];
+ uint64_t arg_values[2];
+ SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(
+ phase, category_group_enabled, name, id,
+ num_args, arg_names, arg_types, arg_values, flags);
+}
+
+// Used by TRACE_EVENTx macros. Do not use directly.
+class TRACE_EVENT_API_CLASS_EXPORT ScopedTracer {
+ public:
+ // Note: members of data_ intentionally left uninitialized. See Initialize.
+ ScopedTracer() : p_data_(nullptr) {}
+
+ ~ScopedTracer() {
+ if (p_data_ && *data_.category_group_enabled)
+ TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
+ data_.category_group_enabled, data_.name, data_.event_handle);
+ }
+
+ void Initialize(const uint8_t* category_group_enabled,
+ const char* name,
+ SkEventTracer::Handle event_handle) {
+ data_.category_group_enabled = category_group_enabled;
+ data_.name = name;
+ data_.event_handle = event_handle;
+ p_data_ = &data_;
+ }
+
+ private:
+ // This Data struct workaround is to avoid initializing all the members
+ // in Data during construction of this object, since this object is always
+ // constructed, even when tracing is disabled. If the members of Data were
+ // members of this class instead, compiler warnings occur about potential
+ // uninitialized accesses.
+ struct Data {
+ const uint8_t* category_group_enabled;
+ const char* name;
+ SkEventTracer::Handle event_handle;
+ };
+ Data* p_data_;
+ Data data_;
+};
+
+// Used by TRACE_EVENT_BINARY_EFFICIENTx macro. Do not use directly.
+class TRACE_EVENT_API_CLASS_EXPORT ScopedTraceBinaryEfficient {
+ public:
+ ScopedTraceBinaryEfficient(const char* category_group, const char* name);
+ ~ScopedTraceBinaryEfficient();
+
+ private:
+ const uint8_t* category_group_enabled_;
+ const char* name_;
+ SkEventTracer::Handle event_handle_;
+};
+
+// This macro generates less code then TRACE_EVENT0 but is also
+// slower to execute when tracing is off. It should generally only be
+// used with code that is seldom executed or conditionally executed
+// when debugging.
+// For now the category_group must be "gpu".
+#define TRACE_EVENT_BINARY_EFFICIENT0(category_group, name) \
+ skia::tracing_internals::ScopedTraceBinaryEfficient \
+ INTERNAL_TRACE_EVENT_UID(scoped_trace)(category_group, name);
+
+// TraceEventSamplingStateScope records the current sampling state
+// and sets a new sampling state. When the scope exists, it restores
+// the sampling state having recorded.
+template<size_t BucketNumber>
+class TraceEventSamplingStateScope {
+ public:
+ TraceEventSamplingStateScope(const char* category_and_name) {
+ previous_state_ = TraceEventSamplingStateScope<BucketNumber>::Current();
+ TraceEventSamplingStateScope<BucketNumber>::Set(category_and_name);
+ }
+
+ ~TraceEventSamplingStateScope() {
+ TraceEventSamplingStateScope<BucketNumber>::Set(previous_state_);
+ }
+
+ static inline const char* Current() {
+ return reinterpret_cast<const char*>(TRACE_EVENT_API_ATOMIC_LOAD(
+ g_trace_state[BucketNumber]));
+ }
+
+ static inline void Set(const char* category_and_name) {
+ TRACE_EVENT_API_ATOMIC_STORE(
+ g_trace_state[BucketNumber],
+ reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>(
+ const_cast<char*>(category_and_name)));
+ }
+
+ private:
+ const char* previous_state_;
+};
+
+} // namespace tracing_internals
+} // namespace skia
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTraceEventCommon.h b/gfx/skia/skia/src/core/SkTraceEventCommon.h
new file mode 100644
index 000000000..57f67775e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTraceEventCommon.h
@@ -0,0 +1,1039 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This header file defines the set of trace_event macros without specifying
+// how the events actually get collected and stored. If you need to expose trace
+// events to some other universe, you can copy-and-paste this file as well as
+// trace_event.h, modifying the macros contained there as necessary for the
+// target platform. The end result is that multiple libraries can funnel events
+// through to a shared trace event collector.
+
+// IMPORTANT: To avoid conflicts, if you need to modify this file for a library,
+// land your change in base/ first, and then copy-and-paste it.
+
+// Trace events are for tracking application performance and resource usage.
+// Macros are provided to track:
+// Begin and end of function calls
+// Counters
+//
+// Events are issued against categories. Whereas LOG's
+// categories are statically defined, TRACE categories are created
+// implicitly with a string. For example:
+// TRACE_EVENT_INSTANT0("MY_SUBSYSTEM", "SomeImportantEvent",
+// TRACE_EVENT_SCOPE_THREAD)
+//
+// It is often the case that one trace may belong in multiple categories at the
+// same time. The first argument to the trace can be a comma-separated list of
+// categories, forming a category group, like:
+//
+// TRACE_EVENT_INSTANT0("input,views", "OnMouseOver", TRACE_EVENT_SCOPE_THREAD)
+//
+// We can enable/disable tracing of OnMouseOver by enabling/disabling either
+// category.
+//
+// Events can be INSTANT, or can be pairs of BEGIN and END in the same scope:
+// TRACE_EVENT_BEGIN0("MY_SUBSYSTEM", "SomethingCostly")
+// doSomethingCostly()
+// TRACE_EVENT_END0("MY_SUBSYSTEM", "SomethingCostly")
+// Note: our tools can't always determine the correct BEGIN/END pairs unless
+// these are used in the same scope. Use ASYNC_BEGIN/ASYNC_END macros if you
+// need them to be in separate scopes.
+//
+// A common use case is to trace entire function scopes. This
+// issues a trace BEGIN and END automatically:
+// void doSomethingCostly() {
+// TRACE_EVENT0("MY_SUBSYSTEM", "doSomethingCostly");
+// ...
+// }
+//
+// Additional parameters can be associated with an event:
+// void doSomethingCostly2(int howMuch) {
+// TRACE_EVENT1("MY_SUBSYSTEM", "doSomethingCostly",
+// "howMuch", howMuch);
+// ...
+// }
+//
+// The trace system will automatically add to this information the
+// current process id, thread id, and a timestamp in microseconds.
+//
+// To trace an asynchronous procedure such as an IPC send/receive, use
+// ASYNC_BEGIN and ASYNC_END:
+// [single threaded sender code]
+// static int send_count = 0;
+// ++send_count;
+// TRACE_EVENT_ASYNC_BEGIN0("ipc", "message", send_count);
+// Send(new MyMessage(send_count));
+// [receive code]
+// void OnMyMessage(send_count) {
+// TRACE_EVENT_ASYNC_END0("ipc", "message", send_count);
+// }
+// The third parameter is a unique ID to match ASYNC_BEGIN/ASYNC_END pairs.
+// ASYNC_BEGIN and ASYNC_END can occur on any thread of any traced process.
+// Pointers can be used for the ID parameter, and they will be mangled
+// internally so that the same pointer on two different processes will not
+// match. For example:
+// class MyTracedClass {
+// public:
+// MyTracedClass() {
+// TRACE_EVENT_ASYNC_BEGIN0("category", "MyTracedClass", this);
+// }
+// ~MyTracedClass() {
+// TRACE_EVENT_ASYNC_END0("category", "MyTracedClass", this);
+// }
+// }
+//
+// Trace event also supports counters, which is a way to track a quantity
+// as it varies over time. Counters are created with the following macro:
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter", g_myCounterValue);
+//
+// Counters are process-specific. The macro itself can be issued from any
+// thread, however.
+//
+// Sometimes, you want to track two counters at once. You can do this with two
+// counter macros:
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter0", g_myCounterValue[0]);
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter1", g_myCounterValue[1]);
+// Or you can do it with a combined macro:
+// TRACE_COUNTER2("MY_SUBSYSTEM", "myCounter",
+// "bytesPinned", g_myCounterValue[0],
+// "bytesAllocated", g_myCounterValue[1]);
+// This indicates to the tracing UI that these counters should be displayed
+// in a single graph, as a summed area chart.
+//
+// Since counters are in a global namespace, you may want to disambiguate with a
+// unique ID, by using the TRACE_COUNTER_ID* variations.
+//
+// By default, trace collection is compiled in, but turned off at runtime.
+// Collecting trace data is the responsibility of the embedding
+// application. In Chrome's case, navigating to about:tracing will turn on
+// tracing and display data collected across all active processes.
+//
+//
+// Memory scoping note:
+// Tracing copies the pointers, not the string content, of the strings passed
+// in for category_group, name, and arg_names. Thus, the following code will
+// cause problems:
+// char* str = strdup("importantName");
+// TRACE_EVENT_INSTANT0("SUBSYSTEM", str); // BAD!
+// free(str); // Trace system now has dangling pointer
+//
+// To avoid this issue with the |name| and |arg_name| parameters, use the
+// TRACE_EVENT_COPY_XXX overloads of the macros at additional runtime overhead.
+// Notes: The category must always be in a long-lived char* (i.e. static const).
+// The |arg_values|, when used, are always deep copied with the _COPY
+// macros.
+//
+// When are string argument values copied:
+// const char* arg_values are only referenced by default:
+// TRACE_EVENT1("category", "name",
+// "arg1", "literal string is only referenced");
+// Use TRACE_STR_COPY to force copying of a const char*:
+// TRACE_EVENT1("category", "name",
+// "arg1", TRACE_STR_COPY("string will be copied"));
+// std::string arg_values are always copied:
+// TRACE_EVENT1("category", "name",
+// "arg1", std::string("string will be copied"));
+//
+//
+// Convertable notes:
+// Converting a large data type to a string can be costly. To help with this,
+// the trace framework provides an interface ConvertableToTraceFormat. If you
+// inherit from it and implement the AppendAsTraceFormat method the trace
+// framework will call back to your object to convert a trace output time. This
+// means, if the category for the event is disabled, the conversion will not
+// happen.
+//
+// class MyData : public base::trace_event::ConvertableToTraceFormat {
+// public:
+// MyData() {}
+// void AppendAsTraceFormat(std::string* out) const override {
+// out->append("{\"foo\":1}");
+// }
+// private:
+// ~MyData() override {}
+// DISALLOW_COPY_AND_ASSIGN(MyData);
+// };
+//
+// TRACE_EVENT1("foo", "bar", "data",
+// scoped_refptr<ConvertableToTraceFormat>(new MyData()));
+//
+// The trace framework will take ownership if the passed pointer and it will
+// be free'd when the trace buffer is flushed.
+//
+// Note, we only do the conversion when the buffer is flushed, so the provided
+// data object should not be modified after it's passed to the trace framework.
+//
+//
+// Thread Safety:
+// A thread safe singleton and mutex are used for thread safety. Category
+// enabled flags are used to limit the performance impact when the system
+// is not enabled.
+//
+// TRACE_EVENT macros first cache a pointer to a category. The categories are
+// statically allocated and safe at all times, even after exit. Fetching a
+// category is protected by the TraceLog::lock_. Multiple threads initializing
+// the static variable is safe, as they will be serialized by the lock and
+// multiple calls will return the same pointer to the category.
+//
+// Then the category_group_enabled flag is checked. This is a unsigned char, and
+// not intended to be multithread safe. It optimizes access to AddTraceEvent
+// which is threadsafe internally via TraceLog::lock_. The enabled flag may
+// cause some threads to incorrectly call or skip calling AddTraceEvent near
+// the time of the system being enabled or disabled. This is acceptable as
+// we tolerate some data loss while the system is being enabled/disabled and
+// because AddTraceEvent is threadsafe internally and checks the enabled state
+// again under lock.
+//
+// Without the use of these static category pointers and enabled flags all
+// trace points would carry a significant performance cost of acquiring a lock
+// and resolving the category.
+
+#if defined(TRACE_EVENT0)
+#error "Another copy of this file has already been included."
+#endif
+
+// This will mark the trace event as disabled by default. The user will need
+// to explicitly enable the event.
+#define TRACE_DISABLED_BY_DEFAULT(name) "disabled-by-default-" name
+
+// Records a pair of begin and end events called "name" for the current
+// scope, with 0, 1 or 2 associated arguments. If the category is not
+// enabled, then this does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT0(category_group, name) \
+ INTERNAL_TRACE_MEMORY(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name)
+#define TRACE_EVENT_WITH_FLOW0(category_group, name, bind_id, flow_flags) \
+ INTERNAL_TRACE_MEMORY(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+ flow_flags)
+#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_MEMORY(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val)
+#define TRACE_EVENT_WITH_FLOW1(category_group, name, bind_id, flow_flags, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_MEMORY(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+ flow_flags, arg1_name, arg1_val)
+#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, \
+ arg2_val) \
+ INTERNAL_TRACE_MEMORY(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_WITH_FLOW2(category_group, name, bind_id, flow_flags, \
+ arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_MEMORY(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+ flow_flags, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// Records events like TRACE_EVENT2 but uses |memory_tag| for memory tracing.
+// Use this where |name| is too generic to accurately aggregate allocations.
+#define TRACE_EVENT_WITH_MEMORY_TAG2(category, name, memory_tag, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_MEMORY(category, memory_tag) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// UNSHIPPED_TRACE_EVENT* are like TRACE_EVENT* except that they are not
+// included in official builds.
+
+#if OFFICIAL_BUILD
+#undef TRACING_IS_OFFICIAL_BUILD
+#define TRACING_IS_OFFICIAL_BUILD 1
+#elif !defined(TRACING_IS_OFFICIAL_BUILD)
+#define TRACING_IS_OFFICIAL_BUILD 0
+#endif
+
+#if TRACING_IS_OFFICIAL_BUILD
+#define UNSHIPPED_TRACE_EVENT0(category_group, name) (void)0
+#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ (void)0
+#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
+ arg1_val) \
+ (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ (void)0
+#else
+#define UNSHIPPED_TRACE_EVENT0(category_group, name) \
+ TRACE_EVENT0(category_group, name)
+#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
+#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
+#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) \
+ TRACE_EVENT_INSTANT0(category_group, name, scope)
+#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
+ arg1_val) \
+ TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val)
+#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#endif
+
+// Records a single event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_INSTANT0(category_group, name, scope) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope)
+#define TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val)
+#define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_INSTANT0(category_group, name, scope) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_COPY | scope)
+#define TRACE_EVENT_COPY_INSTANT1(category_group, name, scope, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_INSTANT2(category_group, name, scope, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// Syntactic sugars for the sampling tracing in the main thread.
+#define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \
+ TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name)
+#define TRACE_EVENT_GET_SAMPLING_STATE() \
+ TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0)
+#define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \
+ TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name)
+#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(categoryAndName) \
+ TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(0, categoryAndName)
+
+// Records a single BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_BEGIN0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_BEGIN1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_BEGIN2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_BEGIN0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_BEGIN1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_BEGIN2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_BEGINx but with a custom |at| timestamp provided.
+// - |id| is used to match the _BEGIN event with the _END event.
+// Events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+#define TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \
+ thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0( \
+ category_group, name, id, thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP1( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP2( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, \
+ arg2_val)
+
+// Records a single END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_END0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_END1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_END2(category_group, name, arg1_name, arg1_val, arg2_name, \
+ arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_END0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_END1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_END2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+#define TRACE_EVENT_MARK(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_MARK, category_group, name, \
+ TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_MARK_WITH_TIMESTAMP(category_group, name, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(TRACE_EVENT_PHASE_MARK, \
+ category_group, name, timestamp, \
+ TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_COPY_MARK(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_MARK, category_group, name, \
+ TRACE_EVENT_FLAG_COPY)
+
+#define TRACE_EVENT_COPY_MARK_WITH_TIMESTAMP(category_group, name, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(TRACE_EVENT_PHASE_MARK, \
+ category_group, name, timestamp, \
+ TRACE_EVENT_FLAG_COPY)
+
+// Similar to TRACE_EVENT_ENDx but with a custom |at| timestamp provided.
+// - |id| is used to match the _BEGIN event with the _END event.
+// Events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+#define TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \
+ thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0( \
+ category_group, name, id, thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP1( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP2( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, \
+ arg2_val)
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_COUNTER1(category_group, name, value) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, "value", \
+ static_cast<int>(value))
+#define TRACE_COPY_COUNTER1(category_group, name, value) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, "value", \
+ static_cast<int>(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_COUNTER2(category_group, name, value1_name, value1_val, \
+ value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, value1_name, \
+ static_cast<int>(value1_val), value2_name, \
+ static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER2(category_group, name, value1_name, value1_val, \
+ value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, value1_name, \
+ static_cast<int>(value1_val), value2_name, \
+ static_cast<int>(value2_val))
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to disambiguate counters with the same name. It must either
+// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
+// will be xored with a hash of the process ID so that the same pointer on
+// two different processes will not collide.
+#define TRACE_COUNTER_ID1(category_group, name, id, value) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, "value", \
+ static_cast<int>(value))
+#define TRACE_COPY_COUNTER_ID1(category_group, name, id, value) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY, "value", \
+ static_cast<int>(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to disambiguate counters with the same name. It must either
+// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
+// will be xored with a hash of the process ID so that the same pointer on
+// two different processes will not collide.
+#define TRACE_COUNTER_ID2(category_group, name, id, value1_name, value1_val, \
+ value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, \
+ value1_name, static_cast<int>(value1_val), \
+ value2_name, static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER_ID2(category_group, name, id, value1_name, \
+ value1_val, value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY, \
+ value1_name, static_cast<int>(value1_val), \
+ value2_name, static_cast<int>(value2_val))
+
+// TRACE_EVENT_SAMPLE_* events are injected by the sampling profiler.
+#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP0(category_group, name, \
+ thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
+ TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP1( \
+ category_group, name, thread_id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+
+#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP2(category_group, name, \
+ thread_id, timestamp, \
+ arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// ASYNC_STEP_* APIs should be only used by legacy code. New code should
+// consider using NESTABLE_ASYNC_* APIs to describe substeps within an async
+// event.
+// Records a single ASYNC_BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to match the ASYNC_BEGIN event with the ASYNC_END event. ASYNC
+// events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+//
+// An asynchronous operation can consist of multiple phases. The first phase is
+// defined by the ASYNC_BEGIN calls. Additional phases can be defined using the
+// ASYNC_STEP_INTO or ASYNC_STEP_PAST macros. The ASYNC_STEP_INTO macro will
+// annotate the block following the call. The ASYNC_STEP_PAST macro will
+// annotate the block prior to the call. Note that any particular event must use
+// only STEP_INTO or STEP_PAST macros; they can not mix and match. When the
+// operation completes, call ASYNC_END.
+//
+// An ASYNC trace typically occurs on a single thread (if not, they will only be
+// drawn on the thread defined in the ASYNC_BEGIN event), but all events in that
+// operation must use the same |name| and |id|. Each step can have its own
+// args.
+#define TRACE_EVENT_ASYNC_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_ASYNC_BEGINx but with a custom |at| timestamp
+// provided.
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+
+// Records a single ASYNC_STEP_INTO event for |step| immediately. If the
+// category is not enabled, then this does nothing. The |name| and |id| must
+// match the ASYNC_BEGIN event above. The |step| param identifies this step
+// within the async event. This should be called at the beginning of the next
+// phase of an asynchronous operation. The ASYNC_BEGIN event must not have any
+// ASYNC_STEP_PAST events.
+#define TRACE_EVENT_ASYNC_STEP_INTO0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_ASYNC_STEP_INTO1(category_group, name, id, step, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_STEP_INTO, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+
+// Similar to TRACE_EVENT_ASYNC_STEP_INTOx but with a custom |at| timestamp
+// provided.
+#define TRACE_EVENT_ASYNC_STEP_INTO_WITH_TIMESTAMP0(category_group, name, id, \
+ step, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_STEP_INTO, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ "step", step)
+
+// Records a single ASYNC_STEP_PAST event for |step| immediately. If the
+// category is not enabled, then this does nothing. The |name| and |id| must
+// match the ASYNC_BEGIN event above. The |step| param identifies this step
+// within the async event. This should be called at the beginning of the next
+// phase of an asynchronous operation. The ASYNC_BEGIN event must not have any
+// ASYNC_STEP_INTO events.
+#define TRACE_EVENT_ASYNC_STEP_PAST0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_ASYNC_STEP_PAST1(category_group, name, id, step, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_STEP_PAST, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+
+// Records a single ASYNC_END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+#define TRACE_EVENT_ASYNC_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_END1(category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END2(category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_ASYNC_END1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_END2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_ASYNC_ENDx but with a custom |at| timestamp provided.
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP0(category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+
+// NESTABLE_ASYNC_* APIs are used to describe an async operation, which can
+// be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC
+// events.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - A pair of NESTABLE_ASYNC_BEGIN event and NESTABLE_ASYNC_END event is
+// considered as a match if their category_group, name and id all match.
+// - |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+// - |id| is used to match a child NESTABLE_ASYNC event with its parent
+// NESTABLE_ASYNC event. Therefore, events in the same nested event tree must
+// be logged using the same id and category_group.
+//
+// Unmatched NESTABLE_ASYNC_END event will be parsed as an event that starts
+// at the first NESTABLE_ASYNC event of that id, and unmatched
+// NESTABLE_ASYNC_BEGIN event will be parsed as an event that ends at the last
+// NESTABLE_ASYNC event of that id. Corresponding warning messages for
+// unmatched events will be shown in the analysis view.
+
+// Records a single NESTABLE_ASYNC_BEGIN event called "name" immediately, with
+// 0, 1 or 2 associated arguments. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 0
+// or 2 associated arguments. If the category is not enabled, then this does
+// nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 1
+// associated argument. If the category is not enabled, then this does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_END1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_END2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
+// with none, one or two associated argument. If the category is not enabled,
+// then this does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT1(category_group, name, id, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TTS2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TTS2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_NESTABLE_ASYNC_{BEGIN,END}x but with a custom
+// |timestamp| provided.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, \
+ id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(category_group, name, \
+ id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0( \
+ category_group, name, id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TIMESTAMP0( \
+ category_group, name, id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ timestamp, TRACE_EVENT_FLAG_COPY)
+
+// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
+// with 2 associated arguments. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single FLOW_BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to match the FLOW_BEGIN event with the FLOW_END event. FLOW
+// events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+// FLOW events are different from ASYNC events in how they are drawn by the
+// tracing UI. A FLOW defines asynchronous data flow, such as posting a task
+// (FLOW_BEGIN) and later executing that task (FLOW_END). Expect FLOWs to be
+// drawn as lines or arrows from FLOW_BEGIN scopes to FLOW_END scopes. Similar
+// to ASYNC, a FLOW can consist of multiple phases. The first phase is defined
+// by the FLOW_BEGIN calls. Additional phases can be defined using the FLOW_STEP
+// macros. When the operation completes, call FLOW_END. An async operation can
+// span threads and processes, but all events in that operation must use the
+// same |name| and |id|. Each event can have its own args.
+#define TRACE_EVENT_FLOW_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_FLOW_BEGIN1(category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_FLOW_BEGIN2(category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_FLOW_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single FLOW_STEP event for |step| immediately. If the category
+// is not enabled, then this does nothing. The |name| and |id| must match the
+// FLOW_BEGIN event above. The |step| param identifies this step within the
+// async event. This should be called at the beginning of the next phase of an
+// asynchronous operation.
+#define TRACE_EVENT_FLOW_STEP0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_FLOW_STEP1(category_group, name, id, step, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_STEP0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, "step", step)
+#define TRACE_EVENT_COPY_FLOW_STEP1(category_group, name, id, step, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, "step", step, arg1_name, arg1_val)
+
+// Records a single FLOW_END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+#define TRACE_EVENT_FLOW_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_FLOW_END_BIND_TO_ENCLOSING0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, \
+ TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
+#define TRACE_EVENT_FLOW_END1(category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
+ arg1_val)
+#define TRACE_EVENT_FLOW_END2(category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
+ arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_FLOW_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_FLOW_END1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
+ arg1_val)
+#define TRACE_EVENT_COPY_FLOW_END2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
+ arg1_val, arg2_name, arg2_val)
+
+// Macros to track the life time and value of arbitrary client objects.
+// See also TraceTrackableObject.
+#define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_CREATE_OBJECT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, \
+ snapshot) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID_AND_TIMESTAMP( \
+ category_group, name, id, timestamp, snapshot) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, \
+ TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+
+#define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
+
+// Macro to efficiently determine if a given category group is enabled.
+#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ *ret = true; \
+ } else { \
+ *ret = false; \
+ } \
+ } while (0)
+
+// Macro to explicitly warm up a given category group. This could be useful in
+// cases where we want to initialize a category group before any trace events
+// for that category group is reported. For example, to have a category group
+// always show up in the "record categories" list for manually selecting
+// settings in about://tracing.
+#define TRACE_EVENT_WARMUP_CATEGORY(category_group) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group)
+
+// Macro to efficiently determine, through polling, if a new trace has begun.
+#define TRACE_EVENT_IS_NEW_TRACE(ret) \
+ do { \
+ static int INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = 0; \
+ int num_traces_recorded = TRACE_EVENT_API_GET_NUM_TRACES_RECORDED(); \
+ if (num_traces_recorded != -1 && \
+ num_traces_recorded != \
+ INTERNAL_TRACE_EVENT_UID(lastRecordingNumber)) { \
+ INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = num_traces_recorded; \
+ *ret = true; \
+ } else { \
+ *ret = false; \
+ } \
+ } while (0)
+
+// Notes regarding the following definitions:
+// New values can be added and propagated to third party libraries, but existing
+// definitions must never be changed, because third party libraries may use old
+// definitions.
+
+// Phase indicates the nature of an event entry. E.g. part of a begin/end pair.
+#define TRACE_EVENT_PHASE_BEGIN ('B')
+#define TRACE_EVENT_PHASE_END ('E')
+#define TRACE_EVENT_PHASE_COMPLETE ('X')
+#define TRACE_EVENT_PHASE_INSTANT ('I')
+#define TRACE_EVENT_PHASE_ASYNC_BEGIN ('S')
+#define TRACE_EVENT_PHASE_ASYNC_STEP_INTO ('T')
+#define TRACE_EVENT_PHASE_ASYNC_STEP_PAST ('p')
+#define TRACE_EVENT_PHASE_ASYNC_END ('F')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN ('b')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_END ('e')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT ('n')
+#define TRACE_EVENT_PHASE_FLOW_BEGIN ('s')
+#define TRACE_EVENT_PHASE_FLOW_STEP ('t')
+#define TRACE_EVENT_PHASE_FLOW_END ('f')
+#define TRACE_EVENT_PHASE_METADATA ('M')
+#define TRACE_EVENT_PHASE_COUNTER ('C')
+#define TRACE_EVENT_PHASE_SAMPLE ('P')
+#define TRACE_EVENT_PHASE_CREATE_OBJECT ('N')
+#define TRACE_EVENT_PHASE_SNAPSHOT_OBJECT ('O')
+#define TRACE_EVENT_PHASE_DELETE_OBJECT ('D')
+#define TRACE_EVENT_PHASE_MEMORY_DUMP ('v')
+#define TRACE_EVENT_PHASE_MARK ('R')
+
+// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
+#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned int>(0))
+#define TRACE_EVENT_FLAG_COPY (static_cast<unsigned int>(1 << 0))
+#define TRACE_EVENT_FLAG_HAS_ID (static_cast<unsigned int>(1 << 1))
+#define TRACE_EVENT_FLAG_MANGLE_ID (static_cast<unsigned int>(1 << 2))
+#define TRACE_EVENT_FLAG_SCOPE_OFFSET (static_cast<unsigned int>(1 << 3))
+#define TRACE_EVENT_FLAG_SCOPE_EXTRA (static_cast<unsigned int>(1 << 4))
+#define TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP (static_cast<unsigned int>(1 << 5))
+#define TRACE_EVENT_FLAG_ASYNC_TTS (static_cast<unsigned int>(1 << 6))
+#define TRACE_EVENT_FLAG_BIND_TO_ENCLOSING (static_cast<unsigned int>(1 << 7))
+#define TRACE_EVENT_FLAG_FLOW_IN (static_cast<unsigned int>(1 << 8))
+#define TRACE_EVENT_FLAG_FLOW_OUT (static_cast<unsigned int>(1 << 9))
+#define TRACE_EVENT_FLAG_HAS_CONTEXT_ID (static_cast<unsigned int>(1 << 10))
+
+#define TRACE_EVENT_FLAG_SCOPE_MASK \
+ (static_cast<unsigned int>(TRACE_EVENT_FLAG_SCOPE_OFFSET | \
+ TRACE_EVENT_FLAG_SCOPE_EXTRA))
+
+// Type values for identifying types in the TraceValue union.
+#define TRACE_VALUE_TYPE_BOOL (static_cast<unsigned char>(1))
+#define TRACE_VALUE_TYPE_UINT (static_cast<unsigned char>(2))
+#define TRACE_VALUE_TYPE_INT (static_cast<unsigned char>(3))
+#define TRACE_VALUE_TYPE_DOUBLE (static_cast<unsigned char>(4))
+#define TRACE_VALUE_TYPE_POINTER (static_cast<unsigned char>(5))
+#define TRACE_VALUE_TYPE_STRING (static_cast<unsigned char>(6))
+#define TRACE_VALUE_TYPE_COPY_STRING (static_cast<unsigned char>(7))
+#define TRACE_VALUE_TYPE_CONVERTABLE (static_cast<unsigned char>(8))
+
+// Enum reflecting the scope of an INSTANT event. Must fit within
+// TRACE_EVENT_FLAG_SCOPE_MASK.
+#define TRACE_EVENT_SCOPE_GLOBAL (static_cast<unsigned char>(0 << 3))
+#define TRACE_EVENT_SCOPE_PROCESS (static_cast<unsigned char>(1 << 3))
+#define TRACE_EVENT_SCOPE_THREAD (static_cast<unsigned char>(2 << 3))
+
+#define TRACE_EVENT_SCOPE_NAME_GLOBAL ('g')
+#define TRACE_EVENT_SCOPE_NAME_PROCESS ('p')
+#define TRACE_EVENT_SCOPE_NAME_THREAD ('t')
diff --git a/gfx/skia/skia/src/core/SkTypeface.cpp b/gfx/skia/skia/src/core/SkTypeface.cpp
new file mode 100644
index 000000000..8747c6e54
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTypeface.cpp
@@ -0,0 +1,358 @@
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAdvancedTypefaceMetrics.h"
+#include "SkEndian.h"
+#include "SkFontDescriptor.h"
+#include "SkFontMgr.h"
+#include "SkMakeUnique.h"
+#include "SkMutex.h"
+#include "SkOTTable_OS_2.h"
+#include "SkOnce.h"
+#include "SkStream.h"
+#include "SkTypeface.h"
+#include "SkTypefaceCache.h"
+
+SkTypeface::SkTypeface(const SkFontStyle& style, bool isFixedPitch)
+ : fUniqueID(SkTypefaceCache::NewFontID()), fStyle(style), fIsFixedPitch(isFixedPitch) { }
+
+SkTypeface::~SkTypeface() { }
+
+#ifdef SK_WHITELIST_SERIALIZED_TYPEFACES
+extern void WhitelistSerializeTypeface(const SkTypeface*, SkWStream* );
+#define SK_TYPEFACE_DELEGATE WhitelistSerializeTypeface
+#else
+#define SK_TYPEFACE_DELEGATE nullptr
+#endif
+
+sk_sp<SkTypeface> (*gCreateTypefaceDelegate)(const char[], SkFontStyle) = nullptr;
+
+void (*gSerializeTypefaceDelegate)(const SkTypeface*, SkWStream* ) = SK_TYPEFACE_DELEGATE;
+sk_sp<SkTypeface> (*gDeserializeTypefaceDelegate)(SkStream* ) = nullptr;
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+class SkEmptyTypeface : public SkTypeface {
+public:
+ static SkEmptyTypeface* Create() { return new SkEmptyTypeface; }
+protected:
+ SkEmptyTypeface() : SkTypeface(SkFontStyle(), true) { }
+
+ SkStreamAsset* onOpenStream(int* ttcIndex) const override { return nullptr; }
+ SkScalerContext* onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*) const override {
+ return nullptr;
+ }
+ void onFilterRec(SkScalerContextRec*) const override { }
+ virtual SkAdvancedTypefaceMetrics* onGetAdvancedTypefaceMetrics(
+ PerGlyphInfo,
+ const uint32_t*, uint32_t) const override { return nullptr; }
+ void onGetFontDescriptor(SkFontDescriptor*, bool*) const override { }
+ virtual int onCharsToGlyphs(const void* chars, Encoding encoding,
+ uint16_t glyphs[], int glyphCount) const override {
+ if (glyphs && glyphCount > 0) {
+ sk_bzero(glyphs, glyphCount * sizeof(glyphs[0]));
+ }
+ return 0;
+ }
+ int onCountGlyphs() const override { return 0; }
+ int onGetUPEM() const override { return 0; }
+ class EmptyLocalizedStrings : public SkTypeface::LocalizedStrings {
+ public:
+ bool next(SkTypeface::LocalizedString*) override { return false; }
+ };
+ void onGetFamilyName(SkString* familyName) const override {
+ familyName->reset();
+ }
+ SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override {
+ return new EmptyLocalizedStrings;
+ }
+ int onGetTableTags(SkFontTableTag tags[]) const override { return 0; }
+ size_t onGetTableData(SkFontTableTag, size_t, size_t, void*) const override {
+ return 0;
+ }
+};
+
+}
+
+SkTypeface* SkTypeface::GetDefaultTypeface(Style style) {
+ static SkOnce once[4];
+ static SkTypeface* defaults[4];
+
+ SkASSERT((int)style < 4);
+ once[style]([style] {
+ SkAutoTUnref<SkFontMgr> fm(SkFontMgr::RefDefault());
+ SkTypeface* t = fm->legacyCreateTypeface(nullptr, SkFontStyle::FromOldStyle(style));
+ defaults[style] = t ? t : SkEmptyTypeface::Create();
+ });
+ return defaults[style];
+}
+
+sk_sp<SkTypeface> SkTypeface::MakeDefault(Style style) {
+ return sk_ref_sp(GetDefaultTypeface(style));
+}
+
+uint32_t SkTypeface::UniqueID(const SkTypeface* face) {
+ if (nullptr == face) {
+ face = GetDefaultTypeface();
+ }
+ return face->uniqueID();
+}
+
+bool SkTypeface::Equal(const SkTypeface* facea, const SkTypeface* faceb) {
+ return facea == faceb || SkTypeface::UniqueID(facea) == SkTypeface::UniqueID(faceb);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkTypeface> SkTypeface::MakeFromName(const char name[],
+ SkFontStyle fontStyle) {
+ if (gCreateTypefaceDelegate) {
+ sk_sp<SkTypeface> result = (*gCreateTypefaceDelegate)(name, fontStyle);
+ if (result) {
+ return result;
+ }
+ }
+ if (nullptr == name && (fontStyle.slant() == SkFontStyle::kItalic_Slant ||
+ fontStyle.slant() == SkFontStyle::kUpright_Slant) &&
+ (fontStyle.weight() == SkFontStyle::kBold_Weight ||
+ fontStyle.weight() == SkFontStyle::kNormal_Weight)) {
+ return MakeDefault(static_cast<SkTypeface::Style>(
+ (fontStyle.slant() == SkFontStyle::kItalic_Slant ? SkTypeface::kItalic :
+ SkTypeface::kNormal) |
+ (fontStyle.weight() == SkFontStyle::kBold_Weight ? SkTypeface::kBold :
+ SkTypeface::kNormal)));
+ }
+ SkAutoTUnref<SkFontMgr> fm(SkFontMgr::RefDefault());
+ return sk_sp<SkTypeface>(fm->legacyCreateTypeface(name, fontStyle));
+}
+
+sk_sp<SkTypeface> SkTypeface::MakeFromTypeface(SkTypeface* family, Style s) {
+ if (!family) {
+ return SkTypeface::MakeDefault(s);
+ }
+
+ if (family->style() == s) {
+ return sk_ref_sp(family);
+ }
+
+ SkAutoTUnref<SkFontMgr> fm(SkFontMgr::RefDefault());
+ return sk_sp<SkTypeface>(fm->matchFaceStyle(family, SkFontStyle::FromOldStyle(s)));
+}
+
+sk_sp<SkTypeface> SkTypeface::MakeFromStream(SkStreamAsset* stream, int index) {
+ SkAutoTUnref<SkFontMgr> fm(SkFontMgr::RefDefault());
+ return sk_sp<SkTypeface>(fm->createFromStream(stream, index));
+}
+
+sk_sp<SkTypeface> SkTypeface::MakeFromFontData(std::unique_ptr<SkFontData> data) {
+ SkAutoTUnref<SkFontMgr> fm(SkFontMgr::RefDefault());
+ return sk_sp<SkTypeface>(fm->createFromFontData(std::move(data)));
+}
+
+sk_sp<SkTypeface> SkTypeface::MakeFromFile(const char path[], int index) {
+ SkAutoTUnref<SkFontMgr> fm(SkFontMgr::RefDefault());
+ return sk_sp<SkTypeface>(fm->createFromFile(path, index));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkTypeface::serialize(SkWStream* wstream) const {
+ if (gSerializeTypefaceDelegate) {
+ (*gSerializeTypefaceDelegate)(this, wstream);
+ return;
+ }
+ bool isLocal = false;
+ SkFontDescriptor desc;
+ this->onGetFontDescriptor(&desc, &isLocal);
+
+ // Embed font data if it's a local font.
+ if (isLocal && !desc.hasFontData()) {
+ desc.setFontData(this->onMakeFontData());
+ }
+ desc.serialize(wstream);
+}
+
+sk_sp<SkTypeface> SkTypeface::MakeDeserialize(SkStream* stream) {
+ if (gDeserializeTypefaceDelegate) {
+ return (*gDeserializeTypefaceDelegate)(stream);
+ }
+
+ SkFontDescriptor desc;
+ if (!SkFontDescriptor::Deserialize(stream, &desc)) {
+ return nullptr;
+ }
+
+ std::unique_ptr<SkFontData> data = desc.detachFontData();
+ if (data) {
+ sk_sp<SkTypeface> typeface(SkTypeface::MakeFromFontData(std::move(data)));
+ if (typeface) {
+ return typeface;
+ }
+ }
+
+ return SkTypeface::MakeFromName(desc.getFamilyName(), desc.getStyle());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+int SkTypeface::countTables() const {
+ return this->onGetTableTags(nullptr);
+}
+
+int SkTypeface::getTableTags(SkFontTableTag tags[]) const {
+ return this->onGetTableTags(tags);
+}
+
+size_t SkTypeface::getTableSize(SkFontTableTag tag) const {
+ return this->onGetTableData(tag, 0, ~0U, nullptr);
+}
+
+size_t SkTypeface::getTableData(SkFontTableTag tag, size_t offset, size_t length,
+ void* data) const {
+ return this->onGetTableData(tag, offset, length, data);
+}
+
+SkStreamAsset* SkTypeface::openStream(int* ttcIndex) const {
+ int ttcIndexStorage;
+ if (nullptr == ttcIndex) {
+ // So our subclasses don't need to check for null param
+ ttcIndex = &ttcIndexStorage;
+ }
+ return this->onOpenStream(ttcIndex);
+}
+
+std::unique_ptr<SkFontData> SkTypeface::makeFontData() const {
+ return this->onMakeFontData();
+}
+
+// This implementation is temporary until this method can be made pure virtual.
+std::unique_ptr<SkFontData> SkTypeface::onMakeFontData() const {
+ int index;
+ std::unique_ptr<SkStreamAsset> stream(this->onOpenStream(&index));
+ return skstd::make_unique<SkFontData>(std::move(stream), index, nullptr, 0);
+};
+
+int SkTypeface::charsToGlyphs(const void* chars, Encoding encoding,
+ uint16_t glyphs[], int glyphCount) const {
+ if (glyphCount <= 0) {
+ return 0;
+ }
+ if (nullptr == chars || (unsigned)encoding > kUTF32_Encoding) {
+ if (glyphs) {
+ sk_bzero(glyphs, glyphCount * sizeof(glyphs[0]));
+ }
+ return 0;
+ }
+ return this->onCharsToGlyphs(chars, encoding, glyphs, glyphCount);
+}
+
+int SkTypeface::countGlyphs() const {
+ return this->onCountGlyphs();
+}
+
+int SkTypeface::getUnitsPerEm() const {
+ // should we try to cache this in the base-class?
+ return this->onGetUPEM();
+}
+
+bool SkTypeface::getKerningPairAdjustments(const uint16_t glyphs[], int count,
+ int32_t adjustments[]) const {
+ SkASSERT(count >= 0);
+ // check for the only legal way to pass a nullptr.. everything is 0
+ // in which case they just want to know if this face can possibly support
+ // kerning (true) or never (false).
+ if (nullptr == glyphs || nullptr == adjustments) {
+ SkASSERT(nullptr == glyphs);
+ SkASSERT(0 == count);
+ SkASSERT(nullptr == adjustments);
+ }
+ return this->onGetKerningPairAdjustments(glyphs, count, adjustments);
+}
+
+SkTypeface::LocalizedStrings* SkTypeface::createFamilyNameIterator() const {
+ return this->onCreateFamilyNameIterator();
+}
+
+void SkTypeface::getFamilyName(SkString* name) const {
+ SkASSERT(name);
+ this->onGetFamilyName(name);
+}
+
+SkAdvancedTypefaceMetrics* SkTypeface::getAdvancedTypefaceMetrics(
+ PerGlyphInfo info,
+ const uint32_t* glyphIDs,
+ uint32_t glyphIDsCount) const {
+ SkAdvancedTypefaceMetrics* result =
+ this->onGetAdvancedTypefaceMetrics(info, glyphIDs, glyphIDsCount);
+ if (result && result->fType == SkAdvancedTypefaceMetrics::kTrueType_Font) {
+ SkOTTableOS2::Version::V2::Type::Field fsType;
+ constexpr SkFontTableTag os2Tag = SkTEndian_SwapBE32(SkOTTableOS2::TAG);
+ constexpr size_t fsTypeOffset = offsetof(SkOTTableOS2::Version::V2, fsType);
+ if (this->getTableData(os2Tag, fsTypeOffset, sizeof(fsType), &fsType) == sizeof(fsType)) {
+ if (fsType.Bitmap || (fsType.Restricted && !(fsType.PreviewPrint || fsType.Editable))) {
+ result->fFlags |= SkAdvancedTypefaceMetrics::kNotEmbeddable_FontFlag;
+ }
+ if (fsType.NoSubsetting) {
+ result->fFlags |= SkAdvancedTypefaceMetrics::kNotSubsettable_FontFlag;
+ }
+ }
+ }
+ return result;
+}
+
+bool SkTypeface::onGetKerningPairAdjustments(const uint16_t glyphs[], int count,
+ int32_t adjustments[]) const {
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkDescriptor.h"
+#include "SkPaint.h"
+
+SkRect SkTypeface::getBounds() const {
+ fBoundsOnce([this] {
+ if (!this->onComputeBounds(&fBounds)) {
+ fBounds.setEmpty();
+ }
+ });
+ return fBounds;
+}
+
+bool SkTypeface::onComputeBounds(SkRect* bounds) const {
+ // we use a big size to ensure lots of significant bits from the scalercontext.
+ // then we scale back down to return our final answer (at 1-pt)
+ const SkScalar textSize = 2048;
+ const SkScalar invTextSize = 1 / textSize;
+
+ SkPaint paint;
+ paint.setTypeface(sk_ref_sp(const_cast<SkTypeface*>(this)));
+ paint.setTextSize(textSize);
+ paint.setLinearText(true);
+
+ SkScalerContext::Rec rec;
+ SkScalerContext::MakeRec(paint, nullptr, nullptr, &rec);
+
+ SkAutoDescriptor ad(sizeof(rec) + SkDescriptor::ComputeOverhead(1));
+ SkDescriptor* desc = ad.getDesc();
+ desc->init();
+ desc->addEntry(kRec_SkDescriptorTag, sizeof(rec), &rec);
+
+ SkScalerContextEffects noeffects;
+ SkAutoTDelete<SkScalerContext> ctx(this->createScalerContext(noeffects, desc, true));
+ if (ctx.get()) {
+ SkPaint::FontMetrics fm;
+ ctx->getFontMetrics(&fm);
+ bounds->set(fm.fXMin * invTextSize, fm.fTop * invTextSize,
+ fm.fXMax * invTextSize, fm.fBottom * invTextSize);
+ return true;
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/core/SkTypefaceCache.cpp b/gfx/skia/skia/src/core/SkTypefaceCache.cpp
new file mode 100644
index 000000000..05a7a8eb5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTypefaceCache.cpp
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#include "SkTypefaceCache.h"
+#include "SkAtomics.h"
+#include "SkMutex.h"
+
+#define TYPEFACE_CACHE_LIMIT 1024
+
+SkTypefaceCache::SkTypefaceCache() {}
+
+void SkTypefaceCache::add(SkTypeface* face) {
+ if (fTypefaces.count() >= TYPEFACE_CACHE_LIMIT) {
+ this->purge(TYPEFACE_CACHE_LIMIT >> 2);
+ }
+
+ fTypefaces.emplace_back(SkRef(face));
+}
+
+SkTypeface* SkTypefaceCache::findByProcAndRef(FindProc proc, void* ctx) const {
+ for (const sk_sp<SkTypeface>& typeface : fTypefaces) {
+ if (proc(typeface.get(), ctx)) {
+ return SkRef(typeface.get());
+ }
+ }
+ return nullptr;
+}
+
+void SkTypefaceCache::purge(int numToPurge) {
+ int count = fTypefaces.count();
+ int i = 0;
+ while (i < count) {
+ if (fTypefaces[i]->unique()) {
+ fTypefaces.removeShuffle(i);
+ --count;
+ if (--numToPurge == 0) {
+ return;
+ }
+ } else {
+ ++i;
+ }
+ }
+}
+
+void SkTypefaceCache::purgeAll() {
+ this->purge(fTypefaces.count());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkTypefaceCache& SkTypefaceCache::Get() {
+ static SkTypefaceCache gCache;
+ return gCache;
+}
+
+SkFontID SkTypefaceCache::NewFontID() {
+ static int32_t gFontID;
+ return sk_atomic_inc(&gFontID) + 1;
+}
+
+SK_DECLARE_STATIC_MUTEX(gMutex);
+
+void SkTypefaceCache::Add(SkTypeface* face) {
+ SkAutoMutexAcquire ama(gMutex);
+ Get().add(face);
+}
+
+SkTypeface* SkTypefaceCache::FindByProcAndRef(FindProc proc, void* ctx) {
+ SkAutoMutexAcquire ama(gMutex);
+ return Get().findByProcAndRef(proc, ctx);
+}
+
+void SkTypefaceCache::PurgeAll() {
+ SkAutoMutexAcquire ama(gMutex);
+ Get().purgeAll();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+static bool DumpProc(SkTypeface* face, void* ctx) {
+ SkString n;
+ face->getFamilyName(&n);
+ SkFontStyle s = face->fontStyle();
+ SkFontID id = face->uniqueID();
+ SkDebugf("SkTypefaceCache: face %p fontID %d weight %d width %d style %d refcnt %d name %s\n",
+ face, id, s.weight(), s.width(), s.slant(), face->getRefCnt(), n.c_str());
+ return false;
+}
+#endif
+
+void SkTypefaceCache::Dump() {
+#ifdef SK_DEBUG
+ (void)Get().findByProcAndRef(DumpProc, nullptr);
+#endif
+}
diff --git a/gfx/skia/skia/src/core/SkTypefaceCache.h b/gfx/skia/skia/src/core/SkTypefaceCache.h
new file mode 100644
index 000000000..bf90dae3e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTypefaceCache.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef SkTypefaceCache_DEFINED
+#define SkTypefaceCache_DEFINED
+
+#include "SkRefCnt.h"
+#include "SkTypeface.h"
+#include "SkTArray.h"
+
+class SkTypefaceCache {
+public:
+ SkTypefaceCache();
+
+ /**
+ * Callback for FindByProc. Returns true if the given typeface is a match
+ * for the given context. The passed typeface is owned by the cache and is
+ * not additionally ref()ed. The typeface may be in the disposed state.
+ */
+ typedef bool(*FindProc)(SkTypeface*, void* context);
+
+ /**
+ * Add a typeface to the cache. This ref()s the typeface, so that the
+ * cache is also an owner. Later, if we need to purge the cache, typefaces
+ * whose refcnt is 1 (meaning only the cache is an owner) will be
+ * unref()ed.
+ */
+ void add(SkTypeface*);
+
+ /**
+ * Iterate through the cache, calling proc(typeface, ctx) with each
+ * typeface. If proc returns true, then we return that typeface (this
+ * ref()s the typeface). If it never returns true, we return nullptr.
+ */
+ SkTypeface* findByProcAndRef(FindProc proc, void* ctx) const;
+
+ /**
+ * This will unref all of the typefaces in the cache for which the cache
+ * is the only owner. Normally this is handled automatically as needed.
+ * This function is exposed for clients that explicitly want to purge the
+ * cache (e.g. to look for leaks).
+ */
+ void purgeAll();
+
+ /**
+ * Helper: returns a unique fontID to pass to the constructor of
+ * your subclass of SkTypeface
+ */
+ static SkFontID NewFontID();
+
+ // These are static wrappers around a global instance of a cache.
+
+ static void Add(SkTypeface*);
+ static SkTypeface* FindByProcAndRef(FindProc proc, void* ctx);
+ static void PurgeAll();
+
+ /**
+ * Debugging only: dumps the status of the typefaces in the cache
+ */
+ static void Dump();
+
+private:
+ static SkTypefaceCache& Get();
+
+ void purge(int count);
+
+ SkTArray<sk_sp<SkTypeface>> fTypefaces;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTypefacePriv.h b/gfx/skia/skia/src/core/SkTypefacePriv.h
new file mode 100644
index 000000000..f8d7e63ef
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTypefacePriv.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTypefacePriv_DEFINED
+#define SkTypefacePriv_DEFINED
+
+#include "SkTypeface.h"
+
+/**
+ * Return a ref'd typeface, which must later be unref'd
+ *
+ * If the parameter is non-null, it will be ref'd and returned, otherwise
+ * it will be the default typeface.
+ */
+static inline sk_sp<SkTypeface> ref_or_default(SkTypeface* face) {
+ return face ? sk_ref_sp(face) : SkTypeface::MakeDefault();
+}
+
+/**
+ * Always resolves to a non-null typeface, either the value passed to its
+ * constructor, or the default typeface if null was passed.
+ */
+class SkAutoResolveDefaultTypeface : public sk_sp<SkTypeface> {
+public:
+ SkAutoResolveDefaultTypeface() : INHERITED(SkTypeface::MakeDefault()) {}
+
+ SkAutoResolveDefaultTypeface(SkTypeface* face)
+ : INHERITED(ref_or_default(face)) {}
+
+private:
+ typedef sk_sp<SkTypeface> INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkUnPreMultiply.cpp b/gfx/skia/skia/src/core/SkUnPreMultiply.cpp
new file mode 100644
index 000000000..a15e7d13a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkUnPreMultiply.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkUnPreMultiply.h"
+#include "SkColorPriv.h"
+
+SkColor SkUnPreMultiply::PMColorToColor(SkPMColor c) {
+ const unsigned a = SkGetPackedA32(c);
+ const Scale scale = GetScale(a);
+ return SkColorSetARGB(a,
+ ApplyScale(scale, SkGetPackedR32(c)),
+ ApplyScale(scale, SkGetPackedG32(c)),
+ ApplyScale(scale, SkGetPackedB32(c)));
+}
+
+uint32_t SkUnPreMultiply::UnPreMultiplyPreservingByteOrder(SkPMColor c) {
+ const U8CPU a = SkGetPackedA32(c);
+ const Scale scale = GetScale(a);
+ return SkPackARGB32NoCheck(a,
+ ApplyScale(scale, SkGetPackedR32(c)),
+ ApplyScale(scale, SkGetPackedG32(c)),
+ ApplyScale(scale, SkGetPackedB32(c)));
+}
+
+const uint32_t SkUnPreMultiply::gTable[] = {
+ 0x00000000, 0xFF000000, 0x7F800000, 0x55000000, 0x3FC00000, 0x33000000, 0x2A800000, 0x246DB6DB,
+ 0x1FE00000, 0x1C555555, 0x19800000, 0x172E8BA3, 0x15400000, 0x139D89D9, 0x1236DB6E, 0x11000000,
+ 0x0FF00000, 0x0F000000, 0x0E2AAAAB, 0x0D6BCA1B, 0x0CC00000, 0x0C249249, 0x0B9745D1, 0x0B1642C8,
+ 0x0AA00000, 0x0A333333, 0x09CEC4EC, 0x0971C71C, 0x091B6DB7, 0x08CB08D4, 0x08800000, 0x0839CE74,
+ 0x07F80000, 0x07BA2E8C, 0x07800000, 0x07492492, 0x07155555, 0x06E45307, 0x06B5E50D, 0x0689D89E,
+ 0x06600000, 0x063831F4, 0x06124925, 0x05EE23B9, 0x05CBA2E9, 0x05AAAAAB, 0x058B2164, 0x056CEFA9,
+ 0x05500000, 0x05343EB2, 0x0519999A, 0x05000000, 0x04E76276, 0x04CFB2B8, 0x04B8E38E, 0x04A2E8BA,
+ 0x048DB6DB, 0x0479435E, 0x0465846A, 0x045270D0, 0x04400000, 0x042E29F8, 0x041CE73A, 0x040C30C3,
+ 0x03FC0000, 0x03EC4EC5, 0x03DD1746, 0x03CE540F, 0x03C00000, 0x03B21643, 0x03A49249, 0x03976FC6,
+ 0x038AAAAB, 0x037E3F20, 0x03722983, 0x03666666, 0x035AF287, 0x034FCACE, 0x0344EC4F, 0x033A5441,
+ 0x03300000, 0x0325ED09, 0x031C18FA, 0x0312818B, 0x03092492, 0x03000000, 0x02F711DC, 0x02EE5847,
+ 0x02E5D174, 0x02DD7BAF, 0x02D55555, 0x02CD5CD6, 0x02C590B2, 0x02BDEF7C, 0x02B677D4, 0x02AF286C,
+ 0x02A80000, 0x02A0FD5C, 0x029A1F59, 0x029364D9, 0x028CCCCD, 0x0286562E, 0x02800000, 0x0279C952,
+ 0x0273B13B, 0x026DB6DB, 0x0267D95C, 0x026217ED, 0x025C71C7, 0x0256E62A, 0x0251745D, 0x024C1BAD,
+ 0x0246DB6E, 0x0241B2F9, 0x023CA1AF, 0x0237A6F5, 0x0232C235, 0x022DF2DF, 0x02293868, 0x02249249,
+ 0x02200000, 0x021B810F, 0x021714FC, 0x0212BB51, 0x020E739D, 0x020A3D71, 0x02061862, 0x02020408,
+ 0x01FE0000, 0x01FA0BE8, 0x01F62762, 0x01F25214, 0x01EE8BA3, 0x01EAD3BB, 0x01E72A08, 0x01E38E39,
+ 0x01E00000, 0x01DC7F11, 0x01D90B21, 0x01D5A3EA, 0x01D24925, 0x01CEFA8E, 0x01CBB7E3, 0x01C880E5,
+ 0x01C55555, 0x01C234F7, 0x01BF1F90, 0x01BC14E6, 0x01B914C2, 0x01B61EED, 0x01B33333, 0x01B05161,
+ 0x01AD7943, 0x01AAAAAB, 0x01A7E567, 0x01A5294A, 0x01A27627, 0x019FCBD2, 0x019D2A20, 0x019A90E8,
+ 0x01980000, 0x01957741, 0x0192F685, 0x01907DA5, 0x018E0C7D, 0x018BA2E9, 0x018940C5, 0x0186E5F1,
+ 0x01849249, 0x018245AE, 0x01800000, 0x017DC11F, 0x017B88EE, 0x0179574E, 0x01772C23, 0x01750750,
+ 0x0172E8BA, 0x0170D045, 0x016EBDD8, 0x016CB157, 0x016AAAAB, 0x0168A9B9, 0x0166AE6B, 0x0164B8A8,
+ 0x0162C859, 0x0160DD68, 0x015EF7BE, 0x015D1746, 0x015B3BEA, 0x01596596, 0x01579436, 0x0155C7B5,
+ 0x01540000, 0x01523D04, 0x01507EAE, 0x014EC4EC, 0x014D0FAC, 0x014B5EDD, 0x0149B26D, 0x01480A4B,
+ 0x01466666, 0x0144C6B0, 0x01432B17, 0x0141938C, 0x01400000, 0x013E7064, 0x013CE4A9, 0x013B5CC1,
+ 0x0139D89E, 0x01385831, 0x0136DB6E, 0x01356246, 0x0133ECAE, 0x01327A97, 0x01310BF6, 0x012FA0BF,
+ 0x012E38E4, 0x012CD45A, 0x012B7315, 0x012A150B, 0x0128BA2F, 0x01276276, 0x01260DD6, 0x0124BC45,
+ 0x01236DB7, 0x01222222, 0x0120D97D, 0x011F93BC, 0x011E50D8, 0x011D10C5, 0x011BD37A, 0x011A98EF,
+ 0x0119611A, 0x01182BF3, 0x0116F970, 0x0115C988, 0x01149C34, 0x0113716B, 0x01124925, 0x01112359,
+ 0x01100000, 0x010EDF12, 0x010DC087, 0x010CA458, 0x010B8A7E, 0x010A72F0, 0x01095DA9, 0x01084AA0,
+ 0x010739CE, 0x01062B2E, 0x01051EB8, 0x01041466, 0x01030C31, 0x01020612, 0x01010204, 0x01000000
+};
+
+#ifdef BUILD_DIVIDE_TABLE
+void SkUnPreMultiply_BuildTable() {
+ for (unsigned i = 0; i <= 255; i++) {
+ uint32_t scale;
+
+ if (0 == i) {
+ scale = 0;
+ } else {
+ scale = ((255 << 24) + (i >> 1)) / i;
+ }
+
+ SkDebugf(" 0x%08X,", scale);
+ if ((i & 7) == 7) {
+ SkDebugf("\n");
+ }
+
+ // test the result
+ for (int j = 1; j <= i; j++) {
+ uint32_t test = (j * scale + (1 << 23)) >> 24;
+ uint32_t div = roundf(j * 255.0f / i);
+ int diff = SkAbs32(test - div);
+ SkASSERT(diff <= 1 && test <= 255);
+ }
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkUtils.cpp b/gfx/skia/skia/src/core/SkUtils.cpp
new file mode 100644
index 000000000..635d1b173
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkUtils.cpp
@@ -0,0 +1,279 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkUtils.h"
+
+/* 0xxxxxxx 1 total
+ 10xxxxxx // never a leading byte
+ 110xxxxx 2 total
+ 1110xxxx 3 total
+ 11110xxx 4 total
+
+ 11 10 01 01 xx xx xx xx 0...
+ 0xE5XX0000
+ 0xE5 << 24
+*/
+
+#ifdef SK_DEBUG
+ static void assert_utf8_leadingbyte(unsigned c) {
+ SkASSERT(c <= 0xF7); // otherwise leading byte is too big (more than 4 bytes)
+ SkASSERT((c & 0xC0) != 0x80); // can't begin with a middle char
+ }
+
+ int SkUTF8_LeadByteToCount(unsigned c) {
+ assert_utf8_leadingbyte(c);
+ return (((0xE5 << 24) >> (c >> 4 << 1)) & 3) + 1;
+ }
+#else
+ #define assert_utf8_leadingbyte(c)
+#endif
+
+int SkUTF8_CountUnichars(const char utf8[]) {
+ SkASSERT(utf8);
+
+ int count = 0;
+
+ for (;;) {
+ int c = *(const uint8_t*)utf8;
+ if (c == 0) {
+ break;
+ }
+ utf8 += SkUTF8_LeadByteToCount(c);
+ count += 1;
+ }
+ return count;
+}
+
+int SkUTF8_CountUnichars(const char utf8[], size_t byteLength) {
+ SkASSERT(utf8 || 0 == byteLength);
+
+ int count = 0;
+ const char* stop = utf8 + byteLength;
+
+ while (utf8 < stop) {
+ utf8 += SkUTF8_LeadByteToCount(*(const uint8_t*)utf8);
+ count += 1;
+ }
+ return count;
+}
+
+SkUnichar SkUTF8_ToUnichar(const char utf8[]) {
+ SkASSERT(utf8);
+
+ const uint8_t* p = (const uint8_t*)utf8;
+ int c = *p;
+ int hic = c << 24;
+
+ assert_utf8_leadingbyte(c);
+
+ if (hic < 0) {
+ uint32_t mask = (uint32_t)~0x3F;
+ hic = SkLeftShift(hic, 1);
+ do {
+ c = (c << 6) | (*++p & 0x3F);
+ mask <<= 5;
+ } while ((hic = SkLeftShift(hic, 1)) < 0);
+ c &= ~mask;
+ }
+ return c;
+}
+
+SkUnichar SkUTF8_NextUnichar(const char** ptr) {
+ SkASSERT(ptr && *ptr);
+
+ const uint8_t* p = (const uint8_t*)*ptr;
+ int c = *p;
+ int hic = c << 24;
+
+ assert_utf8_leadingbyte(c);
+
+ if (hic < 0) {
+ uint32_t mask = (uint32_t)~0x3F;
+ hic = SkLeftShift(hic, 1);
+ do {
+ c = (c << 6) | (*++p & 0x3F);
+ mask <<= 5;
+ } while ((hic = SkLeftShift(hic, 1)) < 0);
+ c &= ~mask;
+ }
+ *ptr = (char*)p + 1;
+ return c;
+}
+
+SkUnichar SkUTF8_PrevUnichar(const char** ptr) {
+ SkASSERT(ptr && *ptr);
+
+ const char* p = *ptr;
+
+ if (*--p & 0x80) {
+ while (*--p & 0x40) {
+ ;
+ }
+ }
+
+ *ptr = (char*)p;
+ return SkUTF8_NextUnichar(&p);
+}
+
+size_t SkUTF8_FromUnichar(SkUnichar uni, char utf8[]) {
+ if ((uint32_t)uni > 0x10FFFF) {
+ SkDEBUGFAIL("bad unichar");
+ return 0;
+ }
+
+ if (uni <= 127) {
+ if (utf8) {
+ *utf8 = (char)uni;
+ }
+ return 1;
+ }
+
+ char tmp[4];
+ char* p = tmp;
+ size_t count = 1;
+
+ SkDEBUGCODE(SkUnichar orig = uni;)
+
+ while (uni > 0x7F >> count) {
+ *p++ = (char)(0x80 | (uni & 0x3F));
+ uni >>= 6;
+ count += 1;
+ }
+
+ if (utf8) {
+ p = tmp;
+ utf8 += count;
+ while (p < tmp + count - 1) {
+ *--utf8 = *p++;
+ }
+ *--utf8 = (char)(~(0xFF >> count) | uni);
+ }
+
+ SkASSERT(utf8 == nullptr || orig == SkUTF8_ToUnichar(utf8));
+ return count;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+int SkUTF16_CountUnichars(const uint16_t src[]) {
+ SkASSERT(src);
+
+ int count = 0;
+ unsigned c;
+ while ((c = *src++) != 0) {
+ SkASSERT(!SkUTF16_IsLowSurrogate(c));
+ if (SkUTF16_IsHighSurrogate(c)) {
+ c = *src++;
+ SkASSERT(SkUTF16_IsLowSurrogate(c));
+ }
+ count += 1;
+ }
+ return count;
+}
+
+int SkUTF16_CountUnichars(const uint16_t src[], int numberOf16BitValues) {
+ SkASSERT(src);
+
+ const uint16_t* stop = src + numberOf16BitValues;
+ int count = 0;
+ while (src < stop) {
+ unsigned c = *src++;
+ SkASSERT(!SkUTF16_IsLowSurrogate(c));
+ if (SkUTF16_IsHighSurrogate(c)) {
+ SkASSERT(src < stop);
+ c = *src++;
+ SkASSERT(SkUTF16_IsLowSurrogate(c));
+ }
+ count += 1;
+ }
+ return count;
+}
+
+SkUnichar SkUTF16_NextUnichar(const uint16_t** srcPtr) {
+ SkASSERT(srcPtr && *srcPtr);
+
+ const uint16_t* src = *srcPtr;
+ SkUnichar c = *src++;
+
+ SkASSERT(!SkUTF16_IsLowSurrogate(c));
+ if (SkUTF16_IsHighSurrogate(c)) {
+ unsigned c2 = *src++;
+ SkASSERT(SkUTF16_IsLowSurrogate(c2));
+
+ // c = ((c & 0x3FF) << 10) + (c2 & 0x3FF) + 0x10000
+ // c = (((c & 0x3FF) + 64) << 10) + (c2 & 0x3FF)
+ c = (c << 10) + c2 + (0x10000 - (0xD800 << 10) - 0xDC00);
+ }
+ *srcPtr = src;
+ return c;
+}
+
+SkUnichar SkUTF16_PrevUnichar(const uint16_t** srcPtr) {
+ SkASSERT(srcPtr && *srcPtr);
+
+ const uint16_t* src = *srcPtr;
+ SkUnichar c = *--src;
+
+ SkASSERT(!SkUTF16_IsHighSurrogate(c));
+ if (SkUTF16_IsLowSurrogate(c)) {
+ unsigned c2 = *--src;
+ SkASSERT(SkUTF16_IsHighSurrogate(c2));
+ c = (c2 << 10) + c + (0x10000 - (0xD800 << 10) - 0xDC00);
+ }
+ *srcPtr = src;
+ return c;
+}
+
+size_t SkUTF16_FromUnichar(SkUnichar uni, uint16_t dst[]) {
+ SkASSERT((unsigned)uni <= 0x10FFFF);
+
+ int extra = (uni > 0xFFFF);
+
+ if (dst) {
+ if (extra) {
+ // dst[0] = SkToU16(0xD800 | ((uni - 0x10000) >> 10));
+ // dst[0] = SkToU16(0xD800 | ((uni >> 10) - 64));
+ dst[0] = SkToU16((0xD800 - 64) + (uni >> 10));
+ dst[1] = SkToU16(0xDC00 | (uni & 0x3FF));
+
+ SkASSERT(SkUTF16_IsHighSurrogate(dst[0]));
+ SkASSERT(SkUTF16_IsLowSurrogate(dst[1]));
+ } else {
+ dst[0] = SkToU16(uni);
+ SkASSERT(!SkUTF16_IsHighSurrogate(dst[0]));
+ SkASSERT(!SkUTF16_IsLowSurrogate(dst[0]));
+ }
+ }
+ return 1 + extra;
+}
+
+size_t SkUTF16_ToUTF8(const uint16_t utf16[], int numberOf16BitValues,
+ char utf8[]) {
+ SkASSERT(numberOf16BitValues >= 0);
+ if (numberOf16BitValues <= 0) {
+ return 0;
+ }
+
+ SkASSERT(utf16 != nullptr);
+
+ const uint16_t* stop = utf16 + numberOf16BitValues;
+ size_t size = 0;
+
+ if (utf8 == nullptr) { // just count
+ while (utf16 < stop) {
+ size += SkUTF8_FromUnichar(SkUTF16_NextUnichar(&utf16), nullptr);
+ }
+ } else {
+ char* start = utf8;
+ while (utf16 < stop) {
+ utf8 += SkUTF8_FromUnichar(SkUTF16_NextUnichar(&utf16), utf8);
+ }
+ size = utf8 - start;
+ }
+ return size;
+}
diff --git a/gfx/skia/skia/src/core/SkUtils.h b/gfx/skia/skia/src/core/SkUtils.h
new file mode 100644
index 000000000..26f19e690
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkUtils.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkUtils_DEFINED
+#define SkUtils_DEFINED
+
+#include "SkTypes.h"
+
+/** Similar to memset(), but it assigns a 16bit value into the buffer.
+ @param buffer The memory to have value copied into it
+ @param value The 16bit value to be copied into buffer
+ @param count The number of times value should be copied into the buffer.
+*/
+static inline void sk_memset16(uint16_t buffer[], uint16_t value, int count) {
+ for (int i = 0; i < count; i++) {
+ buffer[i] = value;
+ }
+}
+
+/** Similar to memset(), but it assigns a 32bit value into the buffer.
+ @param buffer The memory to have value copied into it
+ @param value The 32bit value to be copied into buffer
+ @param count The number of times value should be copied into the buffer.
+*/
+static inline void sk_memset32(uint32_t buffer[], uint32_t value, int count) {
+ for (int i = 0; i < count; i++) {
+ buffer[i] = value;
+ }
+}
+
+/** Similar to memset(), but it assigns a 64bit value into the buffer.
+ @param buffer The memory to have value copied into it
+ @param value The 64bit value to be copied into buffer
+ @param count The number of times value should be copied into the buffer.
+*/
+static inline void sk_memset64(uint64_t buffer[], uint64_t value, int count) {
+ for (int i = 0; i < count; i++) {
+ buffer[i] = value;
+ }
+}
+///////////////////////////////////////////////////////////////////////////////
+
+#define kMaxBytesInUTF8Sequence 4
+
+#ifdef SK_DEBUG
+ int SkUTF8_LeadByteToCount(unsigned c);
+#else
+ #define SkUTF8_LeadByteToCount(c) ((((0xE5 << 24) >> ((unsigned)c >> 4 << 1)) & 3) + 1)
+#endif
+
+inline int SkUTF8_CountUTF8Bytes(const char utf8[]) {
+ SkASSERT(utf8);
+ return SkUTF8_LeadByteToCount(*(const uint8_t*)utf8);
+}
+
+int SkUTF8_CountUnichars(const char utf8[]);
+int SkUTF8_CountUnichars(const char utf8[], size_t byteLength);
+SkUnichar SkUTF8_ToUnichar(const char utf8[]);
+SkUnichar SkUTF8_NextUnichar(const char**);
+SkUnichar SkUTF8_PrevUnichar(const char**);
+
+/** Return the number of bytes need to convert a unichar
+ into a utf8 sequence. Will be 1..kMaxBytesInUTF8Sequence,
+ or 0 if uni is illegal.
+*/
+size_t SkUTF8_FromUnichar(SkUnichar uni, char utf8[] = NULL);
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define SkUTF16_IsHighSurrogate(c) (((c) & 0xFC00) == 0xD800)
+#define SkUTF16_IsLowSurrogate(c) (((c) & 0xFC00) == 0xDC00)
+
+int SkUTF16_CountUnichars(const uint16_t utf16[]);
+int SkUTF16_CountUnichars(const uint16_t utf16[], int numberOf16BitValues);
+// returns the current unichar and then moves past it (*p++)
+SkUnichar SkUTF16_NextUnichar(const uint16_t**);
+// this guy backs up to the previus unichar value, and returns it (*--p)
+SkUnichar SkUTF16_PrevUnichar(const uint16_t**);
+size_t SkUTF16_FromUnichar(SkUnichar uni, uint16_t utf16[] = NULL);
+
+size_t SkUTF16_ToUTF8(const uint16_t utf16[], int numberOf16BitValues,
+ char utf8[] = NULL);
+
+inline bool SkUnichar_IsVariationSelector(SkUnichar uni) {
+/* The 'true' ranges are:
+ * 0x180B <= uni <= 0x180D
+ * 0xFE00 <= uni <= 0xFE0F
+ * 0xE0100 <= uni <= 0xE01EF
+ */
+ if (uni < 0x180B || uni > 0xE01EF) {
+ return false;
+ }
+ if ((uni > 0x180D && uni < 0xFE00) || (uni > 0xFE0F && uni < 0xE0100)) {
+ return false;
+ }
+ return true;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkUtilsArm.cpp b/gfx/skia/skia/src/core/SkUtilsArm.cpp
new file mode 100644
index 000000000..c29938fdf
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkUtilsArm.cpp
@@ -0,0 +1,8 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// This file no longer needs to exist, but it's still referenced by Chrome's GYP / GN builds.
diff --git a/gfx/skia/skia/src/core/SkUtilsArm.h b/gfx/skia/skia/src/core/SkUtilsArm.h
new file mode 100644
index 000000000..7cb34e2ee
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkUtilsArm.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkUtilsArm_DEFINED
+#define SkUtilsArm_DEFINED
+
+#include "SkCpu.h"
+
+#if defined(SK_ARM_HAS_OPTIONAL_NEON)
+ #define SK_ARM_NEON_WRAP(x) (SkCpu::Supports(SkCpu::NEON) ? x ## _neon : x)
+#elif defined(SK_ARM_HAS_NEON)
+ #define SK_ARM_NEON_WRAP(x) (x ## _neon)
+#else
+ #define SK_ARM_NEON_WRAP(x) (x)
+#endif
+
+#endif // SkUtilsArm_DEFINED
diff --git a/gfx/skia/skia/src/core/SkValidatingReadBuffer.cpp b/gfx/skia/skia/src/core/SkValidatingReadBuffer.cpp
new file mode 100644
index 000000000..e1b84d597
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkValidatingReadBuffer.cpp
@@ -0,0 +1,297 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmap.h"
+#include "SkErrorInternals.h"
+#include "SkValidatingReadBuffer.h"
+#include "SkStream.h"
+#include "SkTypeface.h"
+
+SkValidatingReadBuffer::SkValidatingReadBuffer(const void* data, size_t size) :
+ fError(false) {
+ this->setMemory(data, size);
+ this->setFlags(SkReadBuffer::kValidation_Flag);
+}
+
+SkValidatingReadBuffer::~SkValidatingReadBuffer() {
+}
+
+bool SkValidatingReadBuffer::validate(bool isValid) {
+ if (!fError && !isValid) {
+ // When an error is found, send the read cursor to the end of the stream
+ fReader.skip(fReader.available());
+ fError = true;
+ }
+ return !fError;
+}
+
+bool SkValidatingReadBuffer::isValid() const {
+ return !fError;
+}
+
+void SkValidatingReadBuffer::setMemory(const void* data, size_t size) {
+ this->validate(IsPtrAlign4(data) && (SkAlign4(size) == size));
+ if (!fError) {
+ fReader.setMemory(data, size);
+ }
+}
+
+const void* SkValidatingReadBuffer::skip(size_t size) {
+ size_t inc = SkAlign4(size);
+ const void* addr = fReader.peek();
+ this->validate(IsPtrAlign4(addr) && fReader.isAvailable(inc));
+ if (fError) {
+ return nullptr;
+ }
+
+ fReader.skip(size);
+ return addr;
+}
+
+// All the methods in this file funnel down into either readInt(), readScalar() or skip(),
+// followed by a memcpy. So we've got all our validation in readInt(), readScalar() and skip();
+// if they fail they'll return a zero value or skip nothing, respectively, and set fError to
+// true, which the caller should check to see if an error occurred during the read operation.
+
+bool SkValidatingReadBuffer::readBool() {
+ uint32_t value = this->readInt();
+ // Boolean value should be either 0 or 1
+ this->validate(!(value & ~1));
+ return value != 0;
+}
+
+SkColor SkValidatingReadBuffer::readColor() {
+ return this->readInt();
+}
+
+int32_t SkValidatingReadBuffer::readInt() {
+ const size_t inc = sizeof(int32_t);
+ this->validate(IsPtrAlign4(fReader.peek()) && fReader.isAvailable(inc));
+ return fError ? 0 : fReader.readInt();
+}
+
+SkScalar SkValidatingReadBuffer::readScalar() {
+ const size_t inc = sizeof(SkScalar);
+ this->validate(IsPtrAlign4(fReader.peek()) && fReader.isAvailable(inc));
+ return fError ? 0 : fReader.readScalar();
+}
+
+uint32_t SkValidatingReadBuffer::readUInt() {
+ return this->readInt();
+}
+
+int32_t SkValidatingReadBuffer::read32() {
+ return this->readInt();
+}
+
+uint8_t SkValidatingReadBuffer::peekByte() {
+ if (fReader.available() <= 0) {
+ fError = true;
+ return 0;
+ }
+ return *((uint8_t*) fReader.peek());
+}
+
+void SkValidatingReadBuffer::readString(SkString* string) {
+ const size_t len = this->readUInt();
+ const void* ptr = fReader.peek();
+ const char* cptr = (const char*)ptr;
+
+ // skip over the string + '\0' and then pad to a multiple of 4
+ const size_t alignedSize = SkAlign4(len + 1);
+ this->skip(alignedSize);
+ if (!fError) {
+ this->validate(cptr[len] == '\0');
+ }
+ if (!fError) {
+ string->set(cptr, len);
+ }
+}
+
+void SkValidatingReadBuffer::readColor4f(SkColor4f* color) {
+ const void* ptr = this->skip(sizeof(SkColor4f));
+ if (!fError) {
+ memcpy(color, ptr, sizeof(SkColor4f));
+ }
+}
+
+void SkValidatingReadBuffer::readPoint(SkPoint* point) {
+ point->fX = this->readScalar();
+ point->fY = this->readScalar();
+}
+
+void SkValidatingReadBuffer::readMatrix(SkMatrix* matrix) {
+ size_t size = 0;
+ if (!fError) {
+ size = matrix->readFromMemory(fReader.peek(), fReader.available());
+ this->validate((SkAlign4(size) == size) && (0 != size));
+ }
+ if (!fError) {
+ (void)this->skip(size);
+ }
+}
+
+void SkValidatingReadBuffer::readIRect(SkIRect* rect) {
+ const void* ptr = this->skip(sizeof(SkIRect));
+ if (!fError) {
+ memcpy(rect, ptr, sizeof(SkIRect));
+ }
+}
+
+void SkValidatingReadBuffer::readRect(SkRect* rect) {
+ const void* ptr = this->skip(sizeof(SkRect));
+ if (!fError) {
+ memcpy(rect, ptr, sizeof(SkRect));
+ }
+}
+
+void SkValidatingReadBuffer::readRRect(SkRRect* rrect) {
+ const void* ptr = this->skip(sizeof(SkRRect));
+ if (!fError) {
+ memcpy(rrect, ptr, sizeof(SkRRect));
+ this->validate(rrect->isValid());
+ }
+
+ if (fError) {
+ rrect->setEmpty();
+ }
+}
+
+void SkValidatingReadBuffer::readRegion(SkRegion* region) {
+ size_t size = 0;
+ if (!fError) {
+ size = region->readFromMemory(fReader.peek(), fReader.available());
+ this->validate((SkAlign4(size) == size) && (0 != size));
+ }
+ if (!fError) {
+ (void)this->skip(size);
+ }
+}
+
+void SkValidatingReadBuffer::readPath(SkPath* path) {
+ size_t size = 0;
+ if (!fError) {
+ size = path->readFromMemory(fReader.peek(), fReader.available());
+ this->validate((SkAlign4(size) == size) && (0 != size));
+ }
+ if (!fError) {
+ (void)this->skip(size);
+ }
+}
+
+bool SkValidatingReadBuffer::readArray(void* value, size_t size, size_t elementSize) {
+ const uint32_t count = this->getArrayCount();
+ this->validate(size == count);
+ (void)this->skip(sizeof(uint32_t)); // Skip array count
+ const uint64_t byteLength64 = sk_64_mul(count, elementSize);
+ const size_t byteLength = count * elementSize;
+ this->validate(byteLength == byteLength64);
+ const void* ptr = this->skip(SkAlign4(byteLength));
+ if (!fError) {
+ memcpy(value, ptr, byteLength);
+ return true;
+ }
+ return false;
+}
+
+bool SkValidatingReadBuffer::readByteArray(void* value, size_t size) {
+ return readArray(static_cast<unsigned char*>(value), size, sizeof(unsigned char));
+}
+
+bool SkValidatingReadBuffer::readColorArray(SkColor* colors, size_t size) {
+ return readArray(colors, size, sizeof(SkColor));
+}
+
+bool SkValidatingReadBuffer::readColor4fArray(SkColor4f* colors, size_t size) {
+ return readArray(colors, size, sizeof(SkColor4f));
+}
+
+bool SkValidatingReadBuffer::readIntArray(int32_t* values, size_t size) {
+ return readArray(values, size, sizeof(int32_t));
+}
+
+bool SkValidatingReadBuffer::readPointArray(SkPoint* points, size_t size) {
+ return readArray(points, size, sizeof(SkPoint));
+}
+
+bool SkValidatingReadBuffer::readScalarArray(SkScalar* values, size_t size) {
+ return readArray(values, size, sizeof(SkScalar));
+}
+
+uint32_t SkValidatingReadBuffer::getArrayCount() {
+ const size_t inc = sizeof(uint32_t);
+ fError = fError || !IsPtrAlign4(fReader.peek()) || !fReader.isAvailable(inc);
+ return fError ? 0 : *(uint32_t*)fReader.peek();
+}
+
+bool SkValidatingReadBuffer::validateAvailable(size_t size) {
+ return this->validate((size <= SK_MaxU32) && fReader.isAvailable(static_cast<uint32_t>(size)));
+}
+
+SkFlattenable* SkValidatingReadBuffer::readFlattenable(SkFlattenable::Type type) {
+ // The validating read buffer always uses strings and string-indices for unflattening.
+ SkASSERT(0 == this->factoryCount());
+
+ uint8_t firstByte = this->peekByte();
+ if (fError) {
+ return nullptr;
+ }
+
+ SkString name;
+ if (firstByte) {
+ // If the first byte is non-zero, the flattenable is specified by a string.
+ this->readString(&name);
+ if (fError) {
+ return nullptr;
+ }
+
+ // Add the string to the dictionary.
+ fFlattenableDict.set(fFlattenableDict.count() + 1, name);
+ } else {
+ // Read the index. We are guaranteed that the first byte
+ // is zeroed, so we must shift down a byte.
+ uint32_t index = fReader.readU32() >> 8;
+ if (0 == index) {
+ return nullptr; // writer failed to give us the flattenable
+ }
+
+ SkString* namePtr = fFlattenableDict.find(index);
+ if (!namePtr) {
+ return nullptr;
+ }
+ name = *namePtr;
+ }
+
+ // Is this the type we wanted ?
+ const char* cname = name.c_str();
+ SkFlattenable::Type baseType;
+ if (!SkFlattenable::NameToType(cname, &baseType) || (baseType != type)) {
+ return nullptr;
+ }
+
+ // Get the factory for this flattenable.
+ SkFlattenable::Factory factory = this->getCustomFactory(name);
+ if (!factory) {
+ factory = SkFlattenable::NameToFactory(cname);
+ if (!factory) {
+ return nullptr; // writer failed to give us the flattenable
+ }
+ }
+
+ // If we get here, the factory is non-null.
+ sk_sp<SkFlattenable> obj;
+ uint32_t sizeRecorded = this->readUInt();
+ size_t offset = fReader.offset();
+ obj = (*factory)(*this);
+ // check that we read the amount we expected
+ size_t sizeRead = fReader.offset() - offset;
+ this->validate(sizeRecorded == sizeRead);
+ if (fError) {
+ obj = nullptr;
+ }
+ return obj.release();
+}
diff --git a/gfx/skia/skia/src/core/SkValidatingReadBuffer.h b/gfx/skia/skia/src/core/SkValidatingReadBuffer.h
new file mode 100644
index 000000000..825c4b9af
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkValidatingReadBuffer.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkValidatingReadBuffer_DEFINED
+#define SkValidatingReadBuffer_DEFINED
+
+#include "SkRefCnt.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+#include "SkPath.h"
+#include "SkPicture.h"
+#include "SkReader32.h"
+
+class SkBitmap;
+
+class SkValidatingReadBuffer : public SkReadBuffer {
+public:
+ SkValidatingReadBuffer(const void* data, size_t size);
+ ~SkValidatingReadBuffer() override;
+
+ SkReadBuffer* clone(const void* data, size_t size) const override {
+ return new SkValidatingReadBuffer(data, size);
+ }
+
+ const void* skip(size_t size) override;
+
+ // primitives
+ bool readBool() override;
+ SkColor readColor() override;
+ int32_t readInt() override;
+ SkScalar readScalar() override;
+ uint32_t readUInt() override;
+ int32_t read32() override;
+
+ // peek
+ uint8_t peekByte() override;
+
+ // strings -- the caller is responsible for freeing the string contents
+ void readString(SkString* string) override;
+
+ // common data structures
+ SkFlattenable* readFlattenable(SkFlattenable::Type type) override;
+ void readColor4f(SkColor4f* color) override;
+ void readPoint(SkPoint* point) override;
+ void readMatrix(SkMatrix* matrix) override;
+ void readIRect(SkIRect* rect) override;
+ void readRect(SkRect* rect) override;
+ void readRRect(SkRRect* rrect) override;
+ void readRegion(SkRegion* region) override;
+ void readPath(SkPath* path) override;
+
+ // binary data and arrays
+ bool readByteArray(void* value, size_t size) override;
+ bool readColorArray(SkColor* colors, size_t size) override;
+ bool readColor4fArray(SkColor4f* colors, size_t size) override;
+ bool readIntArray(int32_t* values, size_t size) override;
+ bool readPointArray(SkPoint* points, size_t size) override;
+ bool readScalarArray(SkScalar* values, size_t size) override;
+
+ // helpers to get info about arrays and binary data
+ uint32_t getArrayCount() override;
+
+ bool validate(bool isValid) override;
+ bool isValid() const override;
+
+ bool validateAvailable(size_t size) override;
+
+private:
+ bool readArray(void* value, size_t size, size_t elementSize);
+
+ void setMemory(const void* data, size_t size);
+
+ static bool IsPtrAlign4(const void* ptr) {
+ return SkIsAlign4((uintptr_t)ptr);
+ }
+
+ bool fError;
+
+ typedef SkReadBuffer INHERITED;
+};
+
+#endif // SkValidatingReadBuffer_DEFINED
diff --git a/gfx/skia/skia/src/core/SkValidationUtils.h b/gfx/skia/skia/src/core/SkValidationUtils.h
new file mode 100644
index 000000000..e9e59866c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkValidationUtils.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkValidationUtils_DEFINED
+#define SkValidationUtils_DEFINED
+
+#include "SkBitmap.h"
+#include "SkXfermode.h"
+
+/** Returns true if coeff's value is in the SkXfermode::Coeff enum.
+ */
+static inline bool SkIsValidCoeff(SkXfermode::Coeff coeff) {
+ return coeff >= 0 && coeff < SkXfermode::kCoeffCount;
+}
+
+/** Returns true if mode's value is in the SkXfermode::Mode enum.
+ */
+static inline bool SkIsValidMode(SkXfermode::Mode mode) {
+ return (mode >= 0) && (mode <= SkXfermode::kLastMode);
+}
+
+/** Returns true if the rect's dimensions are between 0 and SK_MaxS32
+ */
+static inline bool SkIsValidIRect(const SkIRect& rect) {
+ return rect.width() >= 0 && rect.height() >= 0;
+}
+
+/** Returns true if the rect's dimensions are between 0 and SK_ScalarMax
+ */
+static inline bool SkIsValidRect(const SkRect& rect) {
+ return (rect.fLeft <= rect.fRight) &&
+ (rect.fTop <= rect.fBottom) &&
+ SkScalarIsFinite(rect.width()) &&
+ SkScalarIsFinite(rect.height());
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkVarAlloc.cpp b/gfx/skia/skia/src/core/SkVarAlloc.cpp
new file mode 100644
index 000000000..ea0524b67
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkVarAlloc.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkVarAlloc.h"
+
+struct SkVarAlloc::Block {
+ Block* prev;
+ char* data() { return (char*)(this + 1); }
+
+ static Block* Alloc(Block* prev, size_t size) {
+ SkASSERT(size >= sizeof(Block));
+ Block* b = (Block*)sk_malloc_throw(size);
+ b->prev = prev;
+ return b;
+ }
+};
+
+SkVarAlloc::SkVarAlloc(size_t minLgSize)
+ : fBytesAllocated(0)
+ , fByte(nullptr)
+ , fRemaining(0)
+ , fLgSize(minLgSize)
+ , fBlock(nullptr) {}
+
+SkVarAlloc::SkVarAlloc(size_t minLgSize, char* storage, size_t len)
+ : fBytesAllocated(0)
+ , fByte(storage)
+ , fRemaining(len)
+ , fLgSize(minLgSize)
+ , fBlock(nullptr) {}
+
+SkVarAlloc::~SkVarAlloc() {
+ Block* b = fBlock;
+ while (b) {
+ Block* prev = b->prev;
+ sk_free(b);
+ b = prev;
+ }
+}
+
+void SkVarAlloc::makeSpace(size_t bytes) {
+ SkASSERT(SkIsAlignPtr(bytes));
+
+ size_t alloc = static_cast<size_t>(1)<<fLgSize++;
+ while (alloc < bytes + sizeof(Block)) {
+ alloc *= 2;
+ }
+ fBytesAllocated += alloc;
+ fBlock = Block::Alloc(fBlock, alloc);
+ fByte = fBlock->data();
+ fRemaining = alloc - sizeof(Block);
+}
diff --git a/gfx/skia/skia/src/core/SkVarAlloc.h b/gfx/skia/skia/src/core/SkVarAlloc.h
new file mode 100644
index 000000000..3729bad10
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkVarAlloc.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkVarAlloc_DEFINED
+#define SkVarAlloc_DEFINED
+
+#include "SkTypes.h"
+
+class SkVarAlloc : SkNoncopyable {
+public:
+ // Smallest block we'll allocate is 2**N bytes.
+ explicit SkVarAlloc(size_t minLgSize);
+ // Same as above, but first uses up to len bytes from storage.
+ SkVarAlloc(size_t minLgSize, char* storage, size_t len);
+
+ ~SkVarAlloc();
+
+ // Returns contiguous bytes aligned at least for pointers.
+ char* alloc(size_t bytes) {
+ bytes = SkAlignPtr(bytes);
+
+ if (bytes > fRemaining) {
+ this->makeSpace(bytes);
+ }
+ SkASSERT(bytes <= fRemaining);
+
+ char* ptr = fByte;
+ fByte += bytes;
+ fRemaining = SkToU32(fRemaining - bytes);
+ return ptr;
+ }
+
+ // Returns our best estimate of the number of bytes we've allocated.
+ // (We may not track this precisely to save space.)
+ size_t approxBytesAllocated() const { return fBytesAllocated; }
+
+private:
+ void makeSpace(size_t bytes);
+
+ size_t fBytesAllocated;
+
+ char* fByte;
+ unsigned fRemaining;
+ unsigned fLgSize;
+
+ struct Block;
+ Block* fBlock;
+};
+static_assert(sizeof(SkVarAlloc) <= 32, "SkVarAllocSize");
+
+#endif//SkVarAlloc_DEFINED
diff --git a/gfx/skia/skia/src/core/SkVertState.cpp b/gfx/skia/skia/src/core/SkVertState.cpp
new file mode 100644
index 000000000..7c3047ec4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkVertState.cpp
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkVertState.h"
+
+bool VertState::Triangles(VertState* state) {
+ int index = state->fCurrIndex;
+ if (index + 3 > state->fCount) {
+ return false;
+ }
+ state->f0 = index + 0;
+ state->f1 = index + 1;
+ state->f2 = index + 2;
+ state->fCurrIndex = index + 3;
+ return true;
+}
+
+bool VertState::TrianglesX(VertState* state) {
+ const uint16_t* indices = state->fIndices;
+ int index = state->fCurrIndex;
+ if (index + 3 > state->fCount) {
+ return false;
+ }
+ state->f0 = indices[index + 0];
+ state->f1 = indices[index + 1];
+ state->f2 = indices[index + 2];
+ state->fCurrIndex = index + 3;
+ return true;
+}
+
+bool VertState::TriangleStrip(VertState* state) {
+ int index = state->fCurrIndex;
+ if (index + 3 > state->fCount) {
+ return false;
+ }
+ state->f2 = index + 2;
+ if (index & 1) {
+ state->f0 = index + 1;
+ state->f1 = index + 0;
+ } else {
+ state->f0 = index + 0;
+ state->f1 = index + 1;
+ }
+ state->fCurrIndex = index + 1;
+ return true;
+}
+
+bool VertState::TriangleStripX(VertState* state) {
+ const uint16_t* indices = state->fIndices;
+ int index = state->fCurrIndex;
+ if (index + 3 > state->fCount) {
+ return false;
+ }
+ state->f2 = indices[index + 2];
+ if (index & 1) {
+ state->f0 = indices[index + 1];
+ state->f1 = indices[index + 0];
+ } else {
+ state->f0 = indices[index + 0];
+ state->f1 = indices[index + 1];
+ }
+ state->fCurrIndex = index + 1;
+ return true;
+}
+
+bool VertState::TriangleFan(VertState* state) {
+ int index = state->fCurrIndex;
+ if (index + 3 > state->fCount) {
+ return false;
+ }
+ state->f0 = 0;
+ state->f1 = index + 1;
+ state->f2 = index + 2;
+ state->fCurrIndex = index + 1;
+ return true;
+}
+
+bool VertState::TriangleFanX(VertState* state) {
+ const uint16_t* indices = state->fIndices;
+ int index = state->fCurrIndex;
+ if (index + 3 > state->fCount) {
+ return false;
+ }
+ state->f0 = indices[0];
+ state->f1 = indices[index + 1];
+ state->f2 = indices[index + 2];
+ state->fCurrIndex = index + 1;
+ return true;
+}
+
+VertState::Proc VertState::chooseProc(SkCanvas::VertexMode mode) {
+ switch (mode) {
+ case SkCanvas::kTriangles_VertexMode:
+ return fIndices ? TrianglesX : Triangles;
+ case SkCanvas::kTriangleStrip_VertexMode:
+ return fIndices ? TriangleStripX : TriangleStrip;
+ case SkCanvas::kTriangleFan_VertexMode:
+ return fIndices ? TriangleFanX : TriangleFan;
+ default:
+ return nullptr;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkVertState.h b/gfx/skia/skia/src/core/SkVertState.h
new file mode 100644
index 000000000..ab794521b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkVertState.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkVertState_DEFINED
+#define SkVertState_DEFINED
+
+#include "SkCanvas.h"
+
+/** \struct VertState
+ This is a helper for drawVertices(). It is used to iterate over the triangles
+ that are to be rendered based on an SkCanvas::VertexMode and (optionally) an
+ index array. It does not copy the index array and the client must ensure it
+ remains valid for the lifetime of the VertState object.
+*/
+
+struct VertState {
+ int f0, f1, f2;
+
+ /**
+ * Construct a VertState from a vertex count, index array, and index count.
+ * If the vertices are unindexed pass nullptr for indices.
+ */
+ VertState(int vCount, const uint16_t indices[], int indexCount)
+ : fIndices(indices) {
+ fCurrIndex = 0;
+ if (indices) {
+ fCount = indexCount;
+ } else {
+ fCount = vCount;
+ }
+ }
+
+ typedef bool (*Proc)(VertState*);
+
+ /**
+ * Choose an appropriate function to traverse the vertices.
+ * @param mode Specifies the SkCanvas::VertexMode.
+ */
+ Proc chooseProc(SkCanvas::VertexMode mode);
+
+private:
+ int fCount;
+ int fCurrIndex;
+ const uint16_t* fIndices;
+
+ static bool Triangles(VertState*);
+ static bool TrianglesX(VertState*);
+ static bool TriangleStrip(VertState*);
+ static bool TriangleStripX(VertState*);
+ static bool TriangleFan(VertState*);
+ static bool TriangleFanX(VertState*);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkWriteBuffer.cpp b/gfx/skia/skia/src/core/SkWriteBuffer.cpp
new file mode 100644
index 000000000..019bc247b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkWriteBuffer.cpp
@@ -0,0 +1,304 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkWriteBuffer.h"
+#include "SkBitmap.h"
+#include "SkData.h"
+#include "SkDeduper.h"
+#include "SkPixelRef.h"
+#include "SkPtrRecorder.h"
+#include "SkStream.h"
+#include "SkTypeface.h"
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkBinaryWriteBuffer::SkBinaryWriteBuffer(uint32_t flags)
+ : fFlags(flags)
+ , fFactorySet(nullptr)
+ , fTFSet(nullptr) {
+}
+
+SkBinaryWriteBuffer::SkBinaryWriteBuffer(void* storage, size_t storageSize, uint32_t flags)
+ : fFlags(flags)
+ , fFactorySet(nullptr)
+ , fWriter(storage, storageSize)
+ , fTFSet(nullptr) {
+}
+
+SkBinaryWriteBuffer::~SkBinaryWriteBuffer() {
+ SkSafeUnref(fFactorySet);
+ SkSafeUnref(fTFSet);
+}
+
+void SkBinaryWriteBuffer::writeByteArray(const void* data, size_t size) {
+ fWriter.write32(SkToU32(size));
+ fWriter.writePad(data, size);
+}
+
+void SkBinaryWriteBuffer::writeBool(bool value) {
+ fWriter.writeBool(value);
+}
+
+void SkBinaryWriteBuffer::writeScalar(SkScalar value) {
+ fWriter.writeScalar(value);
+}
+
+void SkBinaryWriteBuffer::writeScalarArray(const SkScalar* value, uint32_t count) {
+ fWriter.write32(count);
+ fWriter.write(value, count * sizeof(SkScalar));
+}
+
+void SkBinaryWriteBuffer::writeInt(int32_t value) {
+ fWriter.write32(value);
+}
+
+void SkBinaryWriteBuffer::writeIntArray(const int32_t* value, uint32_t count) {
+ fWriter.write32(count);
+ fWriter.write(value, count * sizeof(int32_t));
+}
+
+void SkBinaryWriteBuffer::writeUInt(uint32_t value) {
+ fWriter.write32(value);
+}
+
+void SkBinaryWriteBuffer::writeString(const char* value) {
+ fWriter.writeString(value);
+}
+
+void SkBinaryWriteBuffer::writeColor(SkColor color) {
+ fWriter.write32(color);
+}
+
+void SkBinaryWriteBuffer::writeColorArray(const SkColor* color, uint32_t count) {
+ fWriter.write32(count);
+ fWriter.write(color, count * sizeof(SkColor));
+}
+
+void SkBinaryWriteBuffer::writeColor4f(const SkColor4f& color) {
+ fWriter.write(&color, sizeof(SkColor4f));
+}
+
+void SkBinaryWriteBuffer::writeColor4fArray(const SkColor4f* color, uint32_t count) {
+ fWriter.write32(count);
+ fWriter.write(color, count * sizeof(SkColor4f));
+}
+
+void SkBinaryWriteBuffer::writePoint(const SkPoint& point) {
+ fWriter.writeScalar(point.fX);
+ fWriter.writeScalar(point.fY);
+}
+
+void SkBinaryWriteBuffer::writePointArray(const SkPoint* point, uint32_t count) {
+ fWriter.write32(count);
+ fWriter.write(point, count * sizeof(SkPoint));
+}
+
+void SkBinaryWriteBuffer::writeMatrix(const SkMatrix& matrix) {
+ fWriter.writeMatrix(matrix);
+}
+
+void SkBinaryWriteBuffer::writeIRect(const SkIRect& rect) {
+ fWriter.write(&rect, sizeof(SkIRect));
+}
+
+void SkBinaryWriteBuffer::writeRect(const SkRect& rect) {
+ fWriter.writeRect(rect);
+}
+
+void SkBinaryWriteBuffer::writeRegion(const SkRegion& region) {
+ fWriter.writeRegion(region);
+}
+
+void SkBinaryWriteBuffer::writePath(const SkPath& path) {
+ fWriter.writePath(path);
+}
+
+size_t SkBinaryWriteBuffer::writeStream(SkStream* stream, size_t length) {
+ fWriter.write32(SkToU32(length));
+ size_t bytesWritten = fWriter.readFromStream(stream, length);
+ if (bytesWritten < length) {
+ fWriter.reservePad(length - bytesWritten);
+ }
+ return bytesWritten;
+}
+
+bool SkBinaryWriteBuffer::writeToStream(SkWStream* stream) {
+ return fWriter.writeToStream(stream);
+}
+
+static void write_encoded_bitmap(SkBinaryWriteBuffer* buffer, SkData* data,
+ const SkIPoint& origin) {
+ buffer->writeDataAsByteArray(data);
+ buffer->write32(origin.fX);
+ buffer->write32(origin.fY);
+}
+
+void SkBinaryWriteBuffer::writeBitmap(const SkBitmap& bitmap) {
+ // Record the width and height. This way if readBitmap fails a dummy bitmap can be drawn at the
+ // right size.
+ this->writeInt(bitmap.width());
+ this->writeInt(bitmap.height());
+
+ // Record information about the bitmap in one of two ways, in order of priority:
+ // 1. If there is a function for encoding bitmaps, use it to write an encoded version of the
+ // bitmap. After writing a boolean value of false, signifying that a heap was not used, write
+ // the size of the encoded data. A non-zero size signifies that encoded data was written.
+ // 2. Call SkBitmap::flatten. After writing a boolean value of false, signifying that a heap was
+ // not used, write a zero to signify that the data was not encoded.
+
+ // Write a bool to indicate that we did not use an SkBitmapHeap. That feature is deprecated.
+ this->writeBool(false);
+
+ SkPixelRef* pixelRef = bitmap.pixelRef();
+ if (pixelRef) {
+ // see if the pixelref already has an encoded version
+ sk_sp<SkData> existingData(pixelRef->refEncodedData());
+ if (existingData) {
+ // Assumes that if the client did not set a serializer, they are
+ // happy to get the encoded data.
+ if (!fPixelSerializer || fPixelSerializer->useEncodedData(existingData->data(),
+ existingData->size())) {
+ write_encoded_bitmap(this, existingData.get(), bitmap.pixelRefOrigin());
+ return;
+ }
+ }
+
+ // see if the caller wants to manually encode
+ SkAutoPixmapUnlock result;
+ if (fPixelSerializer && bitmap.requestLock(&result)) {
+ sk_sp<SkData> data(fPixelSerializer->encode(result.pixmap()));
+ if (data) {
+ // if we have to "encode" the bitmap, then we assume there is no
+ // offset to share, since we are effectively creating a new pixelref
+ write_encoded_bitmap(this, data.get(), SkIPoint::Make(0, 0));
+ return;
+ }
+ }
+ }
+
+ this->writeUInt(0); // signal raw pixels
+ SkBitmap::WriteRawPixels(this, bitmap);
+}
+
+void SkBinaryWriteBuffer::writeImage(const SkImage* image) {
+ if (fDeduper) {
+ this->write32(fDeduper->findOrDefineImage(const_cast<SkImage*>(image)));
+ return;
+ }
+
+ this->writeInt(image->width());
+ this->writeInt(image->height());
+
+ sk_sp<SkData> encoded(image->encode(this->getPixelSerializer()));
+ if (encoded && encoded->size() > 0) {
+ write_encoded_bitmap(this, encoded.get(), SkIPoint::Make(0, 0));
+ return;
+ }
+
+ SkBitmap bm;
+ if (image->asLegacyBitmap(&bm, SkImage::kRO_LegacyBitmapMode)) {
+ this->writeUInt(1); // signal raw pixels.
+ SkBitmap::WriteRawPixels(this, bm);
+ return;
+ }
+
+ this->writeUInt(0); // signal no pixels (in place of the size of the encoded data)
+}
+
+void SkBinaryWriteBuffer::writeTypeface(SkTypeface* obj) {
+ if (fDeduper) {
+ this->write32(fDeduper->findOrDefineTypeface(obj));
+ return;
+ }
+
+ if (nullptr == obj || nullptr == fTFSet) {
+ fWriter.write32(0);
+ } else {
+ fWriter.write32(fTFSet->add(obj));
+ }
+}
+
+void SkBinaryWriteBuffer::writePaint(const SkPaint& paint) {
+ paint.flatten(*this);
+}
+
+SkFactorySet* SkBinaryWriteBuffer::setFactoryRecorder(SkFactorySet* rec) {
+ SkRefCnt_SafeAssign(fFactorySet, rec);
+ return rec;
+}
+
+SkRefCntSet* SkBinaryWriteBuffer::setTypefaceRecorder(SkRefCntSet* rec) {
+ SkRefCnt_SafeAssign(fTFSet, rec);
+ return rec;
+}
+
+void SkBinaryWriteBuffer::setPixelSerializer(SkPixelSerializer* serializer) {
+ fPixelSerializer.reset(serializer);
+ if (serializer) {
+ serializer->ref();
+ }
+}
+
+void SkBinaryWriteBuffer::writeFlattenable(const SkFlattenable* flattenable) {
+ if (nullptr == flattenable) {
+ this->write32(0);
+ return;
+ }
+
+ if (fDeduper) {
+ this->write32(fDeduper->findOrDefineFactory(const_cast<SkFlattenable*>(flattenable)));
+ } else {
+ /*
+ * We can write 1 of 2 versions of the flattenable:
+ * 1. index into fFactorySet : This assumes the writer will later
+ * resolve the function-ptrs into strings for its reader. SkPicture
+ * does exactly this, by writing a table of names (matching the indices)
+ * up front in its serialized form.
+ * 2. string name of the flattenable or index into fFlattenableDict: We
+ * store the string to allow the reader to specify its own factories
+ * after write time. In order to improve compression, if we have
+ * already written the string, we write its index instead.
+ */
+ if (fFactorySet) {
+ SkFlattenable::Factory factory = flattenable->getFactory();
+ SkASSERT(factory);
+ this->write32(fFactorySet->add(factory));
+ } else {
+ const char* name = flattenable->getTypeName();
+ SkASSERT(name);
+ SkString key(name);
+ if (uint32_t* indexPtr = fFlattenableDict.find(key)) {
+ // We will write the index as a 32-bit int. We want the first byte
+ // that we send to be zero - this will act as a sentinel that we
+ // have an index (not a string). This means that we will send the
+ // the index shifted left by 8. The remaining 24-bits should be
+ // plenty to store the index. Note that this strategy depends on
+ // being little endian.
+ SkASSERT(0 == *indexPtr >> 24);
+ this->write32(*indexPtr << 8);
+ } else {
+ // Otherwise write the string. Clients should not use the empty
+ // string as a name, or we will have a problem.
+ SkASSERT(strcmp("", name));
+ this->writeString(name);
+
+ // Add key to dictionary.
+ fFlattenableDict.set(key, fFlattenableDict.count() + 1);
+ }
+ }
+ }
+
+ // make room for the size of the flattened object
+ (void)fWriter.reserve(sizeof(uint32_t));
+ // record the current size, so we can subtract after the object writes.
+ size_t offset = fWriter.bytesWritten();
+ // now flatten the object
+ flattenable->flatten(*this);
+ size_t objSize = fWriter.bytesWritten() - offset;
+ // record the obj's size
+ fWriter.overwriteTAt(offset - sizeof(uint32_t), SkToU32(objSize));
+}
diff --git a/gfx/skia/skia/src/core/SkWriter32.cpp b/gfx/skia/skia/src/core/SkWriter32.cpp
new file mode 100644
index 000000000..d328644f6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkWriter32.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkReader32.h"
+#include "SkString.h"
+#include "SkWriter32.h"
+
+/*
+ * Strings are stored as: length[4-bytes] + string_data + '\0' + pad_to_mul_4
+ */
+
+const char* SkReader32::readString(size_t* outLen) {
+ size_t len = this->readU32();
+ const void* ptr = this->peek();
+
+ // skip over the string + '\0' and then pad to a multiple of 4
+ size_t alignedSize = SkAlign4(len + 1);
+ this->skip(alignedSize);
+
+ if (outLen) {
+ *outLen = len;
+ }
+ return (const char*)ptr;
+}
+
+size_t SkReader32::readIntoString(SkString* copy) {
+ size_t len;
+ const char* ptr = this->readString(&len);
+ if (copy) {
+ copy->set(ptr, len);
+ }
+ return len;
+}
+
+void SkWriter32::writeString(const char str[], size_t len) {
+ if (nullptr == str) {
+ str = "";
+ len = 0;
+ }
+ if ((long)len < 0) {
+ len = strlen(str);
+ }
+
+ // [ 4 byte len ] [ str ... ] [1 - 4 \0s]
+ uint32_t* ptr = this->reservePad(sizeof(uint32_t) + len + 1);
+ *ptr = SkToU32(len);
+ char* chars = (char*)(ptr + 1);
+ memcpy(chars, str, len);
+ chars[len] = '\0';
+}
+
+size_t SkWriter32::WriteStringSize(const char* str, size_t len) {
+ if ((long)len < 0) {
+ SkASSERT(str);
+ len = strlen(str);
+ }
+ const size_t lenBytes = 4; // we use 4 bytes to record the length
+ // add 1 since we also write a terminating 0
+ return SkAlign4(lenBytes + len + 1);
+}
+
+void SkWriter32::growToAtLeast(size_t size) {
+ const bool wasExternal = (fExternal != nullptr) && (fData == fExternal);
+
+ fCapacity = 4096 + SkTMax(size, fCapacity + (fCapacity / 2));
+ fInternal.realloc(fCapacity);
+ fData = fInternal.get();
+
+ if (wasExternal) {
+ // we were external, so copy in the data
+ memcpy(fData, fExternal, fUsed);
+ }
+}
+
+sk_sp<SkData> SkWriter32::snapshotAsData() const {
+ return SkData::MakeWithCopy(fData, fUsed);
+}
diff --git a/gfx/skia/skia/src/core/SkXfermode.cpp b/gfx/skia/skia/src/core/SkXfermode.cpp
new file mode 100644
index 000000000..38a160870
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkXfermode.cpp
@@ -0,0 +1,1535 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkXfermode.h"
+#include "SkXfermode_proccoeff.h"
+#include "SkColorPriv.h"
+#include "SkMathPriv.h"
+#include "SkOnce.h"
+#include "SkOpts.h"
+#include "SkRasterPipeline.h"
+#include "SkReadBuffer.h"
+#include "SkString.h"
+#include "SkWriteBuffer.h"
+#include "SkPM4f.h"
+
+#if SK_SUPPORT_GPU
+#include "GrFragmentProcessor.h"
+#include "effects/GrCustomXfermode.h"
+#include "effects/GrPorterDuffXferProcessor.h"
+#include "effects/GrXfermodeFragmentProcessor.h"
+#endif
+
+#define SkAlphaMulAlpha(a, b) SkMulDiv255Round(a, b)
+
+static inline unsigned saturated_add(unsigned a, unsigned b) {
+ SkASSERT(a <= 255);
+ SkASSERT(b <= 255);
+ unsigned sum = a + b;
+ if (sum > 255) {
+ sum = 255;
+ }
+ return sum;
+}
+
+static inline int clamp_signed_byte(int n) {
+ if (n < 0) {
+ n = 0;
+ } else if (n > 255) {
+ n = 255;
+ }
+ return n;
+}
+
+static inline int clamp_div255round(int prod) {
+ if (prod <= 0) {
+ return 0;
+ } else if (prod >= 255*255) {
+ return 255;
+ } else {
+ return SkDiv255Round(prod);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+#include "SkNx.h"
+
+static Sk4f alpha(const Sk4f& color) { return Sk4f(color[3]); }
+static Sk4f inv_alpha(const Sk4f& color) { return Sk4f(1 - color[3]); }
+static Sk4f pin_1(const Sk4f& value) { return Sk4f::Min(value, Sk4f(1)); }
+
+static Sk4f color_alpha(const Sk4f& color, float newAlpha) {
+ return Sk4f(color[0], color[1], color[2], newAlpha);
+}
+static Sk4f color_alpha(const Sk4f& color, const Sk4f& newAlpha) {
+ return color_alpha(color, newAlpha[3]);
+}
+
+static Sk4f set_argb(float a, float r, float g, float b) {
+ if (0 == SkPM4f::R) {
+ return Sk4f(r, g, b, a);
+ } else {
+ return Sk4f(b, g, r, a);
+ }
+}
+
+static Sk4f clear_4f(const Sk4f& s, const Sk4f& d) { return Sk4f(0); }
+static Sk4f src_4f(const Sk4f& s, const Sk4f& d) { return s; }
+static Sk4f dst_4f(const Sk4f& s, const Sk4f& d) { return d; }
+static Sk4f srcover_4f(const Sk4f& s, const Sk4f& d) { return s + inv_alpha(s) * d; }
+static Sk4f dstover_4f(const Sk4f& s, const Sk4f& d) { return d + inv_alpha(d) * s; }
+static Sk4f srcin_4f(const Sk4f& s, const Sk4f& d) { return s * alpha(d); }
+static Sk4f dstin_4f(const Sk4f& s, const Sk4f& d) { return d * alpha(s); }
+static Sk4f srcout_4f(const Sk4f& s, const Sk4f& d) { return s * inv_alpha(d); }
+static Sk4f dstout_4f(const Sk4f& s, const Sk4f& d) { return d * inv_alpha(s); }
+static Sk4f srcatop_4f(const Sk4f& s, const Sk4f& d) { return s * alpha(d) + d * inv_alpha(s); }
+static Sk4f dstatop_4f(const Sk4f& s, const Sk4f& d) { return d * alpha(s) + s * inv_alpha(d); }
+static Sk4f xor_4f(const Sk4f& s, const Sk4f& d) { return s * inv_alpha(d) + d * inv_alpha(s);}
+static Sk4f plus_4f(const Sk4f& s, const Sk4f& d) { return pin_1(s + d); }
+static Sk4f modulate_4f(const Sk4f& s, const Sk4f& d) { return s * d; }
+static Sk4f screen_4f(const Sk4f& s, const Sk4f& d) { return s + d - s * d; }
+
+static Sk4f multiply_4f(const Sk4f& s, const Sk4f& d) {
+ return s * inv_alpha(d) + d * inv_alpha(s) + s * d;
+}
+
+static Sk4f overlay_4f(const Sk4f& s, const Sk4f& d) {
+ Sk4f sa = alpha(s);
+ Sk4f da = alpha(d);
+ Sk4f two = Sk4f(2);
+ Sk4f rc = (two * d <= da).thenElse(two * s * d,
+ sa * da - two * (da - d) * (sa - s));
+ return pin_1(s + d - s * da + color_alpha(rc - d * sa, 0));
+}
+
+static Sk4f hardlight_4f(const Sk4f& s, const Sk4f& d) {
+ return overlay_4f(d, s);
+}
+
+static Sk4f darken_4f(const Sk4f& s, const Sk4f& d) {
+ Sk4f sa = alpha(s);
+ Sk4f da = alpha(d);
+ return s + d - Sk4f::Max(s * da, d * sa);
+}
+
+static Sk4f lighten_4f(const Sk4f& s, const Sk4f& d) {
+ Sk4f sa = alpha(s);
+ Sk4f da = alpha(d);
+ return s + d - Sk4f::Min(s * da, d * sa);
+}
+
+static Sk4f colordodge_4f(const Sk4f& s, const Sk4f& d) {
+ Sk4f sa = alpha(s);
+ Sk4f da = alpha(d);
+ Sk4f isa = Sk4f(1) - sa;
+ Sk4f ida = Sk4f(1) - da;
+
+ Sk4f srcover = s + d * isa;
+ Sk4f dstover = d + s * ida;
+ Sk4f otherwise = sa * Sk4f::Min(da, (d * sa) / (sa - s)) + s * ida + d * isa;
+
+ // Order matters here, preferring d==0 over s==sa.
+ auto colors = (d == Sk4f(0)).thenElse(dstover,
+ (s == sa).thenElse(srcover,
+ otherwise));
+ return color_alpha(colors, srcover);
+}
+
+static Sk4f colorburn_4f(const Sk4f& s, const Sk4f& d) {
+ Sk4f sa = alpha(s);
+ Sk4f da = alpha(d);
+ Sk4f isa = Sk4f(1) - sa;
+ Sk4f ida = Sk4f(1) - da;
+
+ Sk4f srcover = s + d * isa;
+ Sk4f dstover = d + s * ida;
+ Sk4f otherwise = sa * (da - Sk4f::Min(da, (da - d) * sa / s)) + s * ida + d * isa;
+
+ // Order matters here, preferring d==da over s==0.
+ auto colors = (d == da).thenElse(dstover,
+ (s == Sk4f(0)).thenElse(srcover,
+ otherwise));
+ return color_alpha(colors, srcover);
+}
+
+static Sk4f softlight_4f(const Sk4f& s, const Sk4f& d) {
+ Sk4f sa = alpha(s);
+ Sk4f da = alpha(d);
+ Sk4f isa = Sk4f(1) - sa;
+ Sk4f ida = Sk4f(1) - da;
+
+ // Some common terms.
+ Sk4f m = (da > Sk4f(0)).thenElse(d / da, Sk4f(0));
+ Sk4f s2 = Sk4f(2) * s;
+ Sk4f m4 = Sk4f(4) * m;
+
+ // The logic forks three ways:
+ // 1. dark src?
+ // 2. light src, dark dst?
+ // 3. light src, light dst?
+ Sk4f darkSrc = d * (sa + (s2 - sa) * (Sk4f(1) - m)); // Used in case 1.
+ Sk4f darkDst = (m4 * m4 + m4) * (m - Sk4f(1)) + Sk4f(7) * m; // Used in case 2.
+ Sk4f liteDst = m.sqrt() - m; // Used in case 3.
+ Sk4f liteSrc = d * sa + da * (s2 - sa) * (Sk4f(4) * d <= da).thenElse(darkDst,
+ liteDst); // Case 2 or 3?
+
+ return color_alpha(s * ida + d * isa + (s2 <= sa).thenElse(darkSrc, liteSrc), // Case 1 or 2/3?
+ s + d * isa);
+}
+
+static Sk4f difference_4f(const Sk4f& s, const Sk4f& d) {
+ Sk4f min = Sk4f::Min(s * alpha(d), d * alpha(s));
+ return s + d - min - color_alpha(min, 0);
+}
+
+static Sk4f exclusion_4f(const Sk4f& s, const Sk4f& d) {
+ Sk4f product = s * d;
+ return s + d - product - color_alpha(product, 0);
+}
+
+////////////////////////////////////////////////////
+
+// The CSS compositing spec introduces the following formulas:
+// (See https://dvcs.w3.org/hg/FXTF/rawfile/tip/compositing/index.html#blendingnonseparable)
+// SkComputeLuminance is similar to this formula but it uses the new definition from Rec. 709
+// while PDF and CG uses the one from Rec. Rec. 601
+// See http://www.glennchan.info/articles/technical/hd-versus-sd-color-space/hd-versus-sd-color-space.htm
+static inline float Lum(float r, float g, float b) {
+ return r * 0.2126f + g * 0.7152f + b * 0.0722f;
+}
+
+static inline float max(float a, float b, float c) {
+ return SkTMax(a, SkTMax(b, c));
+}
+
+static inline float min(float a, float b, float c) {
+ return SkTMin(a, SkTMin(b, c));
+}
+
+static inline float Sat(float r, float g, float b) {
+ return max(r, g, b) - min(r, g, b);
+}
+
+static inline void setSaturationComponents(float* Cmin, float* Cmid, float* Cmax, float s) {
+ if(*Cmax > *Cmin) {
+ *Cmid = (*Cmid - *Cmin) * s / (*Cmax - *Cmin);
+ *Cmax = s;
+ } else {
+ *Cmax = 0;
+ *Cmid = 0;
+ }
+ *Cmin = 0;
+}
+
+static inline void SetSat(float* r, float* g, float* b, float s) {
+ if(*r <= *g) {
+ if(*g <= *b) {
+ setSaturationComponents(r, g, b, s);
+ } else if(*r <= *b) {
+ setSaturationComponents(r, b, g, s);
+ } else {
+ setSaturationComponents(b, r, g, s);
+ }
+ } else if(*r <= *b) {
+ setSaturationComponents(g, r, b, s);
+ } else if(*g <= *b) {
+ setSaturationComponents(g, b, r, s);
+ } else {
+ setSaturationComponents(b, g, r, s);
+ }
+}
+
+static inline void clipColor(float* r, float* g, float* b, float a) {
+ float L = Lum(*r, *g, *b);
+ float n = min(*r, *g, *b);
+ float x = max(*r, *g, *b);
+ float denom;
+ if ((n < 0) && (denom = L - n)) { // Compute denom and make sure it's non zero
+ float scale = L / denom;
+ *r = L + (*r - L) * scale;
+ *g = L + (*g - L) * scale;
+ *b = L + (*b - L) * scale;
+ }
+
+ if ((x > a) && (denom = x - L)) { // Compute denom and make sure it's non zero
+ float scale = (a - L) / denom;
+ *r = L + (*r - L) * scale;
+ *g = L + (*g - L) * scale;
+ *b = L + (*b - L) * scale;
+ }
+}
+
+static inline void SetLum(float* r, float* g, float* b, float a, float l) {
+ float d = l - Lum(*r, *g, *b);
+ *r += d;
+ *g += d;
+ *b += d;
+ clipColor(r, g, b, a);
+}
+
+static Sk4f hue_4f(const Sk4f& s, const Sk4f& d) {
+ float sa = s[SkPM4f::A];
+ float sr = s[SkPM4f::R];
+ float sg = s[SkPM4f::G];
+ float sb = s[SkPM4f::B];
+
+ float da = d[SkPM4f::A];
+ float dr = d[SkPM4f::R];
+ float dg = d[SkPM4f::G];
+ float db = d[SkPM4f::B];
+
+ float Sr = sr;
+ float Sg = sg;
+ float Sb = sb;
+ SetSat(&Sr, &Sg, &Sb, Sat(dr, dg, db) * sa);
+ SetLum(&Sr, &Sg, &Sb, sa * da, Lum(dr, dg, db) * sa);
+
+ return color_alpha(s * inv_alpha(d) + d * inv_alpha(s) + set_argb(0, Sr, Sg, Sb),
+ sa + da - sa * da);
+}
+
+static Sk4f saturation_4f(const Sk4f& s, const Sk4f& d) {
+ float sa = s[SkPM4f::A];
+ float sr = s[SkPM4f::R];
+ float sg = s[SkPM4f::G];
+ float sb = s[SkPM4f::B];
+
+ float da = d[SkPM4f::A];
+ float dr = d[SkPM4f::R];
+ float dg = d[SkPM4f::G];
+ float db = d[SkPM4f::B];
+
+ float Dr = dr;
+ float Dg = dg;
+ float Db = db;
+ SetSat(&Dr, &Dg, &Db, Sat(sr, sg, sb) * da);
+ SetLum(&Dr, &Dg, &Db, sa * da, Lum(dr, dg, db) * sa);
+
+ return color_alpha(s * inv_alpha(d) + d * inv_alpha(s) + set_argb(0, Dr, Dg, Db),
+ sa + da - sa * da);
+}
+
+static Sk4f color_4f(const Sk4f& s, const Sk4f& d) {
+ float sa = s[SkPM4f::A];
+ float sr = s[SkPM4f::R];
+ float sg = s[SkPM4f::G];
+ float sb = s[SkPM4f::B];
+
+ float da = d[SkPM4f::A];
+ float dr = d[SkPM4f::R];
+ float dg = d[SkPM4f::G];
+ float db = d[SkPM4f::B];
+
+ float Sr = sr;
+ float Sg = sg;
+ float Sb = sb;
+ SetLum(&Sr, &Sg, &Sb, sa * da, Lum(dr, dg, db) * sa);
+
+ Sk4f res = color_alpha(s * inv_alpha(d) + d * inv_alpha(s) + set_argb(0, Sr, Sg, Sb),
+ sa + da - sa * da);
+ // Can return tiny negative values ...
+ return Sk4f::Max(res, Sk4f(0));
+}
+
+static Sk4f luminosity_4f(const Sk4f& s, const Sk4f& d) {
+ float sa = s[SkPM4f::A];
+ float sr = s[SkPM4f::R];
+ float sg = s[SkPM4f::G];
+ float sb = s[SkPM4f::B];
+
+ float da = d[SkPM4f::A];
+ float dr = d[SkPM4f::R];
+ float dg = d[SkPM4f::G];
+ float db = d[SkPM4f::B];
+
+ float Dr = dr;
+ float Dg = dg;
+ float Db = db;
+ SetLum(&Dr, &Dg, &Db, sa * da, Lum(sr, sg, sb) * da);
+
+ Sk4f res = color_alpha(s * inv_alpha(d) + d * inv_alpha(s) + set_argb(0, Dr, Dg, Db),
+ sa + da - sa * da);
+ // Can return tiny negative values ...
+ return Sk4f::Max(res, Sk4f(0));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// kClear_Mode, //!< [0, 0]
+static SkPMColor clear_modeproc(SkPMColor src, SkPMColor dst) {
+ return 0;
+}
+
+// kSrc_Mode, //!< [Sa, Sc]
+static SkPMColor src_modeproc(SkPMColor src, SkPMColor dst) {
+ return src;
+}
+
+// kDst_Mode, //!< [Da, Dc]
+static SkPMColor dst_modeproc(SkPMColor src, SkPMColor dst) {
+ return dst;
+}
+
+// kSrcOver_Mode, //!< [Sa + Da - Sa*Da, Sc + (1 - Sa)*Dc]
+static SkPMColor srcover_modeproc(SkPMColor src, SkPMColor dst) {
+#if 0
+ // this is the old, more-correct way, but it doesn't guarantee that dst==255
+ // will always stay opaque
+ return src + SkAlphaMulQ(dst, SkAlpha255To256(255 - SkGetPackedA32(src)));
+#else
+ // this is slightly faster, but more importantly guarantees that dst==255
+ // will always stay opaque
+ return src + SkAlphaMulQ(dst, 256 - SkGetPackedA32(src));
+#endif
+}
+
+// kDstOver_Mode, //!< [Sa + Da - Sa*Da, Dc + (1 - Da)*Sc]
+static SkPMColor dstover_modeproc(SkPMColor src, SkPMColor dst) {
+ // this is the reverse of srcover, just flipping src and dst
+ // see srcover's comment about the 256 for opaqueness guarantees
+ return dst + SkAlphaMulQ(src, 256 - SkGetPackedA32(dst));
+}
+
+// kSrcIn_Mode, //!< [Sa * Da, Sc * Da]
+static SkPMColor srcin_modeproc(SkPMColor src, SkPMColor dst) {
+ return SkAlphaMulQ(src, SkAlpha255To256(SkGetPackedA32(dst)));
+}
+
+// kDstIn_Mode, //!< [Sa * Da, Sa * Dc]
+static SkPMColor dstin_modeproc(SkPMColor src, SkPMColor dst) {
+ return SkAlphaMulQ(dst, SkAlpha255To256(SkGetPackedA32(src)));
+}
+
+// kSrcOut_Mode, //!< [Sa * (1 - Da), Sc * (1 - Da)]
+static SkPMColor srcout_modeproc(SkPMColor src, SkPMColor dst) {
+ return SkAlphaMulQ(src, SkAlpha255To256(255 - SkGetPackedA32(dst)));
+}
+
+// kDstOut_Mode, //!< [Da * (1 - Sa), Dc * (1 - Sa)]
+static SkPMColor dstout_modeproc(SkPMColor src, SkPMColor dst) {
+ return SkAlphaMulQ(dst, SkAlpha255To256(255 - SkGetPackedA32(src)));
+}
+
+// kSrcATop_Mode, //!< [Da, Sc * Da + (1 - Sa) * Dc]
+static SkPMColor srcatop_modeproc(SkPMColor src, SkPMColor dst) {
+ unsigned sa = SkGetPackedA32(src);
+ unsigned da = SkGetPackedA32(dst);
+ unsigned isa = 255 - sa;
+
+ return SkPackARGB32(da,
+ SkAlphaMulAlpha(da, SkGetPackedR32(src)) +
+ SkAlphaMulAlpha(isa, SkGetPackedR32(dst)),
+ SkAlphaMulAlpha(da, SkGetPackedG32(src)) +
+ SkAlphaMulAlpha(isa, SkGetPackedG32(dst)),
+ SkAlphaMulAlpha(da, SkGetPackedB32(src)) +
+ SkAlphaMulAlpha(isa, SkGetPackedB32(dst)));
+}
+
+// kDstATop_Mode, //!< [Sa, Sa * Dc + Sc * (1 - Da)]
+static SkPMColor dstatop_modeproc(SkPMColor src, SkPMColor dst) {
+ unsigned sa = SkGetPackedA32(src);
+ unsigned da = SkGetPackedA32(dst);
+ unsigned ida = 255 - da;
+
+ return SkPackARGB32(sa,
+ SkAlphaMulAlpha(ida, SkGetPackedR32(src)) +
+ SkAlphaMulAlpha(sa, SkGetPackedR32(dst)),
+ SkAlphaMulAlpha(ida, SkGetPackedG32(src)) +
+ SkAlphaMulAlpha(sa, SkGetPackedG32(dst)),
+ SkAlphaMulAlpha(ida, SkGetPackedB32(src)) +
+ SkAlphaMulAlpha(sa, SkGetPackedB32(dst)));
+}
+
+// kXor_Mode [Sa + Da - 2 * Sa * Da, Sc * (1 - Da) + (1 - Sa) * Dc]
+static SkPMColor xor_modeproc(SkPMColor src, SkPMColor dst) {
+ unsigned sa = SkGetPackedA32(src);
+ unsigned da = SkGetPackedA32(dst);
+ unsigned isa = 255 - sa;
+ unsigned ida = 255 - da;
+
+ return SkPackARGB32(sa + da - (SkAlphaMulAlpha(sa, da) << 1),
+ SkAlphaMulAlpha(ida, SkGetPackedR32(src)) +
+ SkAlphaMulAlpha(isa, SkGetPackedR32(dst)),
+ SkAlphaMulAlpha(ida, SkGetPackedG32(src)) +
+ SkAlphaMulAlpha(isa, SkGetPackedG32(dst)),
+ SkAlphaMulAlpha(ida, SkGetPackedB32(src)) +
+ SkAlphaMulAlpha(isa, SkGetPackedB32(dst)));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// kPlus_Mode
+static SkPMColor plus_modeproc(SkPMColor src, SkPMColor dst) {
+ unsigned b = saturated_add(SkGetPackedB32(src), SkGetPackedB32(dst));
+ unsigned g = saturated_add(SkGetPackedG32(src), SkGetPackedG32(dst));
+ unsigned r = saturated_add(SkGetPackedR32(src), SkGetPackedR32(dst));
+ unsigned a = saturated_add(SkGetPackedA32(src), SkGetPackedA32(dst));
+ return SkPackARGB32(a, r, g, b);
+}
+
+// kModulate_Mode
+static SkPMColor modulate_modeproc(SkPMColor src, SkPMColor dst) {
+ int a = SkAlphaMulAlpha(SkGetPackedA32(src), SkGetPackedA32(dst));
+ int r = SkAlphaMulAlpha(SkGetPackedR32(src), SkGetPackedR32(dst));
+ int g = SkAlphaMulAlpha(SkGetPackedG32(src), SkGetPackedG32(dst));
+ int b = SkAlphaMulAlpha(SkGetPackedB32(src), SkGetPackedB32(dst));
+ return SkPackARGB32(a, r, g, b);
+}
+
+static inline int srcover_byte(int a, int b) {
+ return a + b - SkAlphaMulAlpha(a, b);
+}
+
+// kMultiply_Mode
+// B(Cb, Cs) = Cb x Cs
+// multiply uses its own version of blendfunc_byte because sa and da are not needed
+static int blendfunc_multiply_byte(int sc, int dc, int sa, int da) {
+ return clamp_div255round(sc * (255 - da) + dc * (255 - sa) + sc * dc);
+}
+
+static SkPMColor multiply_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+ int r = blendfunc_multiply_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+ int g = blendfunc_multiply_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+ int b = blendfunc_multiply_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+ return SkPackARGB32(a, r, g, b);
+}
+
+// kScreen_Mode
+static SkPMColor screen_modeproc(SkPMColor src, SkPMColor dst) {
+ int a = srcover_byte(SkGetPackedA32(src), SkGetPackedA32(dst));
+ int r = srcover_byte(SkGetPackedR32(src), SkGetPackedR32(dst));
+ int g = srcover_byte(SkGetPackedG32(src), SkGetPackedG32(dst));
+ int b = srcover_byte(SkGetPackedB32(src), SkGetPackedB32(dst));
+ return SkPackARGB32(a, r, g, b);
+}
+
+// kOverlay_Mode
+static inline int overlay_byte(int sc, int dc, int sa, int da) {
+ int tmp = sc * (255 - da) + dc * (255 - sa);
+ int rc;
+ if (2 * dc <= da) {
+ rc = 2 * sc * dc;
+ } else {
+ rc = sa * da - 2 * (da - dc) * (sa - sc);
+ }
+ return clamp_div255round(rc + tmp);
+}
+static SkPMColor overlay_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+ int r = overlay_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+ int g = overlay_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+ int b = overlay_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+ return SkPackARGB32(a, r, g, b);
+}
+
+// kDarken_Mode
+static inline int darken_byte(int sc, int dc, int sa, int da) {
+ int sd = sc * da;
+ int ds = dc * sa;
+ if (sd < ds) {
+ // srcover
+ return sc + dc - SkDiv255Round(ds);
+ } else {
+ // dstover
+ return dc + sc - SkDiv255Round(sd);
+ }
+}
+static SkPMColor darken_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+ int r = darken_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+ int g = darken_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+ int b = darken_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+ return SkPackARGB32(a, r, g, b);
+}
+
+// kLighten_Mode
+static inline int lighten_byte(int sc, int dc, int sa, int da) {
+ int sd = sc * da;
+ int ds = dc * sa;
+ if (sd > ds) {
+ // srcover
+ return sc + dc - SkDiv255Round(ds);
+ } else {
+ // dstover
+ return dc + sc - SkDiv255Round(sd);
+ }
+}
+static SkPMColor lighten_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+ int r = lighten_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+ int g = lighten_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+ int b = lighten_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+ return SkPackARGB32(a, r, g, b);
+}
+
+// kColorDodge_Mode
+static inline int colordodge_byte(int sc, int dc, int sa, int da) {
+ int diff = sa - sc;
+ int rc;
+ if (0 == dc) {
+ return SkAlphaMulAlpha(sc, 255 - da);
+ } else if (0 == diff) {
+ rc = sa * da + sc * (255 - da) + dc * (255 - sa);
+ } else {
+ diff = dc * sa / diff;
+ rc = sa * ((da < diff) ? da : diff) + sc * (255 - da) + dc * (255 - sa);
+ }
+ return clamp_div255round(rc);
+}
+static SkPMColor colordodge_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+ int r = colordodge_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+ int g = colordodge_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+ int b = colordodge_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+ return SkPackARGB32(a, r, g, b);
+}
+
+// kColorBurn_Mode
+static inline int colorburn_byte(int sc, int dc, int sa, int da) {
+ int rc;
+ if (dc == da) {
+ rc = sa * da + sc * (255 - da) + dc * (255 - sa);
+ } else if (0 == sc) {
+ return SkAlphaMulAlpha(dc, 255 - sa);
+ } else {
+ int tmp = (da - dc) * sa / sc;
+ rc = sa * (da - ((da < tmp) ? da : tmp))
+ + sc * (255 - da) + dc * (255 - sa);
+ }
+ return clamp_div255round(rc);
+}
+static SkPMColor colorburn_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+ int r = colorburn_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+ int g = colorburn_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+ int b = colorburn_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+ return SkPackARGB32(a, r, g, b);
+}
+
+// kHardLight_Mode
+static inline int hardlight_byte(int sc, int dc, int sa, int da) {
+ int rc;
+ if (2 * sc <= sa) {
+ rc = 2 * sc * dc;
+ } else {
+ rc = sa * da - 2 * (da - dc) * (sa - sc);
+ }
+ return clamp_div255round(rc + sc * (255 - da) + dc * (255 - sa));
+}
+static SkPMColor hardlight_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+ int r = hardlight_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+ int g = hardlight_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+ int b = hardlight_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+ return SkPackARGB32(a, r, g, b);
+}
+
+// returns 255 * sqrt(n/255)
+static U8CPU sqrt_unit_byte(U8CPU n) {
+ return SkSqrtBits(n, 15+4);
+}
+
+// kSoftLight_Mode
+static inline int softlight_byte(int sc, int dc, int sa, int da) {
+ int m = da ? dc * 256 / da : 0;
+ int rc;
+ if (2 * sc <= sa) {
+ rc = dc * (sa + ((2 * sc - sa) * (256 - m) >> 8));
+ } else if (4 * dc <= da) {
+ int tmp = (4 * m * (4 * m + 256) * (m - 256) >> 16) + 7 * m;
+ rc = dc * sa + (da * (2 * sc - sa) * tmp >> 8);
+ } else {
+ int tmp = sqrt_unit_byte(m) - m;
+ rc = dc * sa + (da * (2 * sc - sa) * tmp >> 8);
+ }
+ return clamp_div255round(rc + sc * (255 - da) + dc * (255 - sa));
+}
+static SkPMColor softlight_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+ int r = softlight_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+ int g = softlight_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+ int b = softlight_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+ return SkPackARGB32(a, r, g, b);
+}
+
+// kDifference_Mode
+static inline int difference_byte(int sc, int dc, int sa, int da) {
+ int tmp = SkMin32(sc * da, dc * sa);
+ return clamp_signed_byte(sc + dc - 2 * SkDiv255Round(tmp));
+}
+static SkPMColor difference_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+ int r = difference_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+ int g = difference_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+ int b = difference_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+ return SkPackARGB32(a, r, g, b);
+}
+
+// kExclusion_Mode
+static inline int exclusion_byte(int sc, int dc, int, int) {
+ // this equations is wacky, wait for SVG to confirm it
+ //int r = sc * da + dc * sa - 2 * sc * dc + sc * (255 - da) + dc * (255 - sa);
+
+ // The above equation can be simplified as follows
+ int r = 255*(sc + dc) - 2 * sc * dc;
+ return clamp_div255round(r);
+}
+static SkPMColor exclusion_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+ int r = exclusion_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+ int g = exclusion_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+ int b = exclusion_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+ return SkPackARGB32(a, r, g, b);
+}
+
+// The CSS compositing spec introduces the following formulas:
+// (See https://dvcs.w3.org/hg/FXTF/rawfile/tip/compositing/index.html#blendingnonseparable)
+// SkComputeLuminance is similar to this formula but it uses the new definition from Rec. 709
+// while PDF and CG uses the one from Rec. Rec. 601
+// See http://www.glennchan.info/articles/technical/hd-versus-sd-color-space/hd-versus-sd-color-space.htm
+static inline int Lum(int r, int g, int b)
+{
+ return SkDiv255Round(r * 77 + g * 150 + b * 28);
+}
+
+static inline int min2(int a, int b) { return a < b ? a : b; }
+static inline int max2(int a, int b) { return a > b ? a : b; }
+#define minimum(a, b, c) min2(min2(a, b), c)
+#define maximum(a, b, c) max2(max2(a, b), c)
+
+static inline int Sat(int r, int g, int b) {
+ return maximum(r, g, b) - minimum(r, g, b);
+}
+
+static inline void setSaturationComponents(int* Cmin, int* Cmid, int* Cmax, int s) {
+ if(*Cmax > *Cmin) {
+ *Cmid = SkMulDiv(*Cmid - *Cmin, s, *Cmax - *Cmin);
+ *Cmax = s;
+ } else {
+ *Cmax = 0;
+ *Cmid = 0;
+ }
+
+ *Cmin = 0;
+}
+
+static inline void SetSat(int* r, int* g, int* b, int s) {
+ if(*r <= *g) {
+ if(*g <= *b) {
+ setSaturationComponents(r, g, b, s);
+ } else if(*r <= *b) {
+ setSaturationComponents(r, b, g, s);
+ } else {
+ setSaturationComponents(b, r, g, s);
+ }
+ } else if(*r <= *b) {
+ setSaturationComponents(g, r, b, s);
+ } else if(*g <= *b) {
+ setSaturationComponents(g, b, r, s);
+ } else {
+ setSaturationComponents(b, g, r, s);
+ }
+}
+
+static inline void clipColor(int* r, int* g, int* b, int a) {
+ int L = Lum(*r, *g, *b);
+ int n = minimum(*r, *g, *b);
+ int x = maximum(*r, *g, *b);
+ int denom;
+ if ((n < 0) && (denom = L - n)) { // Compute denom and make sure it's non zero
+ *r = L + SkMulDiv(*r - L, L, denom);
+ *g = L + SkMulDiv(*g - L, L, denom);
+ *b = L + SkMulDiv(*b - L, L, denom);
+ }
+
+ if ((x > a) && (denom = x - L)) { // Compute denom and make sure it's non zero
+ int numer = a - L;
+ *r = L + SkMulDiv(*r - L, numer, denom);
+ *g = L + SkMulDiv(*g - L, numer, denom);
+ *b = L + SkMulDiv(*b - L, numer, denom);
+ }
+}
+
+static inline void SetLum(int* r, int* g, int* b, int a, int l) {
+ int d = l - Lum(*r, *g, *b);
+ *r += d;
+ *g += d;
+ *b += d;
+
+ clipColor(r, g, b, a);
+}
+
+// non-separable blend modes are done in non-premultiplied alpha
+#define blendfunc_nonsep_byte(sc, dc, sa, da, blendval) \
+ clamp_div255round(sc * (255 - da) + dc * (255 - sa) + blendval)
+
+// kHue_Mode
+// B(Cb, Cs) = SetLum(SetSat(Cs, Sat(Cb)), Lum(Cb))
+// Create a color with the hue of the source color and the saturation and luminosity of the backdrop color.
+static SkPMColor hue_modeproc(SkPMColor src, SkPMColor dst) {
+ int sr = SkGetPackedR32(src);
+ int sg = SkGetPackedG32(src);
+ int sb = SkGetPackedB32(src);
+ int sa = SkGetPackedA32(src);
+
+ int dr = SkGetPackedR32(dst);
+ int dg = SkGetPackedG32(dst);
+ int db = SkGetPackedB32(dst);
+ int da = SkGetPackedA32(dst);
+ int Sr, Sg, Sb;
+
+ if(sa && da) {
+ Sr = sr * sa;
+ Sg = sg * sa;
+ Sb = sb * sa;
+ SetSat(&Sr, &Sg, &Sb, Sat(dr, dg, db) * sa);
+ SetLum(&Sr, &Sg, &Sb, sa * da, Lum(dr, dg, db) * sa);
+ } else {
+ Sr = 0;
+ Sg = 0;
+ Sb = 0;
+ }
+
+ int a = srcover_byte(sa, da);
+ int r = blendfunc_nonsep_byte(sr, dr, sa, da, Sr);
+ int g = blendfunc_nonsep_byte(sg, dg, sa, da, Sg);
+ int b = blendfunc_nonsep_byte(sb, db, sa, da, Sb);
+ return SkPackARGB32(a, r, g, b);
+}
+
+// kSaturation_Mode
+// B(Cb, Cs) = SetLum(SetSat(Cb, Sat(Cs)), Lum(Cb))
+// Create a color with the saturation of the source color and the hue and luminosity of the backdrop color.
+static SkPMColor saturation_modeproc(SkPMColor src, SkPMColor dst) {
+ int sr = SkGetPackedR32(src);
+ int sg = SkGetPackedG32(src);
+ int sb = SkGetPackedB32(src);
+ int sa = SkGetPackedA32(src);
+
+ int dr = SkGetPackedR32(dst);
+ int dg = SkGetPackedG32(dst);
+ int db = SkGetPackedB32(dst);
+ int da = SkGetPackedA32(dst);
+ int Dr, Dg, Db;
+
+ if(sa && da) {
+ Dr = dr * sa;
+ Dg = dg * sa;
+ Db = db * sa;
+ SetSat(&Dr, &Dg, &Db, Sat(sr, sg, sb) * da);
+ SetLum(&Dr, &Dg, &Db, sa * da, Lum(dr, dg, db) * sa);
+ } else {
+ Dr = 0;
+ Dg = 0;
+ Db = 0;
+ }
+
+ int a = srcover_byte(sa, da);
+ int r = blendfunc_nonsep_byte(sr, dr, sa, da, Dr);
+ int g = blendfunc_nonsep_byte(sg, dg, sa, da, Dg);
+ int b = blendfunc_nonsep_byte(sb, db, sa, da, Db);
+ return SkPackARGB32(a, r, g, b);
+}
+
+// kColor_Mode
+// B(Cb, Cs) = SetLum(Cs, Lum(Cb))
+// Create a color with the hue and saturation of the source color and the luminosity of the backdrop color.
+static SkPMColor color_modeproc(SkPMColor src, SkPMColor dst) {
+ int sr = SkGetPackedR32(src);
+ int sg = SkGetPackedG32(src);
+ int sb = SkGetPackedB32(src);
+ int sa = SkGetPackedA32(src);
+
+ int dr = SkGetPackedR32(dst);
+ int dg = SkGetPackedG32(dst);
+ int db = SkGetPackedB32(dst);
+ int da = SkGetPackedA32(dst);
+ int Sr, Sg, Sb;
+
+ if(sa && da) {
+ Sr = sr * da;
+ Sg = sg * da;
+ Sb = sb * da;
+ SetLum(&Sr, &Sg, &Sb, sa * da, Lum(dr, dg, db) * sa);
+ } else {
+ Sr = 0;
+ Sg = 0;
+ Sb = 0;
+ }
+
+ int a = srcover_byte(sa, da);
+ int r = blendfunc_nonsep_byte(sr, dr, sa, da, Sr);
+ int g = blendfunc_nonsep_byte(sg, dg, sa, da, Sg);
+ int b = blendfunc_nonsep_byte(sb, db, sa, da, Sb);
+ return SkPackARGB32(a, r, g, b);
+}
+
+// kLuminosity_Mode
+// B(Cb, Cs) = SetLum(Cb, Lum(Cs))
+// Create a color with the luminosity of the source color and the hue and saturation of the backdrop color.
+static SkPMColor luminosity_modeproc(SkPMColor src, SkPMColor dst) {
+ int sr = SkGetPackedR32(src);
+ int sg = SkGetPackedG32(src);
+ int sb = SkGetPackedB32(src);
+ int sa = SkGetPackedA32(src);
+
+ int dr = SkGetPackedR32(dst);
+ int dg = SkGetPackedG32(dst);
+ int db = SkGetPackedB32(dst);
+ int da = SkGetPackedA32(dst);
+ int Dr, Dg, Db;
+
+ if(sa && da) {
+ Dr = dr * sa;
+ Dg = dg * sa;
+ Db = db * sa;
+ SetLum(&Dr, &Dg, &Db, sa * da, Lum(sr, sg, sb) * da);
+ } else {
+ Dr = 0;
+ Dg = 0;
+ Db = 0;
+ }
+
+ int a = srcover_byte(sa, da);
+ int r = blendfunc_nonsep_byte(sr, dr, sa, da, Dr);
+ int g = blendfunc_nonsep_byte(sg, dg, sa, da, Dg);
+ int b = blendfunc_nonsep_byte(sb, db, sa, da, Db);
+ return SkPackARGB32(a, r, g, b);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static SkPM4f as_pm4f(const Sk4f& x) {
+ SkPM4f pm4;
+ x.store(pm4.fVec);
+ return pm4;
+}
+
+static Sk4f as_4f(const SkPM4f& pm4) {
+ return Sk4f::Load(pm4.fVec);
+}
+
+static void assert_unit(const SkPM4f& r) {
+#ifdef SK_DEBUG
+ const float eps = 0.00001f;
+ const float min = 0 - eps;
+ const float max = 1 + eps;
+ for (int i = 0; i < 4; ++i) {
+ SkASSERT(r.fVec[i] >= min && r.fVec[i] <= max);
+ }
+#endif
+}
+
+template <Sk4f (blend)(const Sk4f&, const Sk4f&)> SkPM4f proc_4f(const SkPM4f& s, const SkPM4f& d) {
+ assert_unit(s);
+ assert_unit(d);
+ SkPM4f r = as_pm4f(blend(as_4f(s), as_4f(d)));
+ assert_unit(r);
+ return r;
+}
+
+const ProcCoeff gProcCoeffs[] = {
+ { clear_modeproc, proc_4f<clear_4f>, SkXfermode::kZero_Coeff, SkXfermode::kZero_Coeff },
+ { src_modeproc, proc_4f<src_4f>, SkXfermode::kOne_Coeff, SkXfermode::kZero_Coeff },
+ { dst_modeproc, proc_4f<dst_4f>, SkXfermode::kZero_Coeff, SkXfermode::kOne_Coeff },
+ { srcover_modeproc, proc_4f<srcover_4f>, SkXfermode::kOne_Coeff, SkXfermode::kISA_Coeff },
+ { dstover_modeproc, proc_4f<dstover_4f>, SkXfermode::kIDA_Coeff, SkXfermode::kOne_Coeff },
+ { srcin_modeproc, proc_4f<srcin_4f>, SkXfermode::kDA_Coeff, SkXfermode::kZero_Coeff },
+ { dstin_modeproc, proc_4f<dstin_4f>, SkXfermode::kZero_Coeff, SkXfermode::kSA_Coeff },
+ { srcout_modeproc, proc_4f<srcout_4f>, SkXfermode::kIDA_Coeff, SkXfermode::kZero_Coeff },
+ { dstout_modeproc, proc_4f<dstout_4f>, SkXfermode::kZero_Coeff, SkXfermode::kISA_Coeff },
+ { srcatop_modeproc, proc_4f<srcatop_4f>, SkXfermode::kDA_Coeff, SkXfermode::kISA_Coeff },
+ { dstatop_modeproc, proc_4f<dstatop_4f>, SkXfermode::kIDA_Coeff, SkXfermode::kSA_Coeff },
+ { xor_modeproc, proc_4f<xor_4f>, SkXfermode::kIDA_Coeff, SkXfermode::kISA_Coeff },
+
+ { plus_modeproc, proc_4f<plus_4f>, SkXfermode::kOne_Coeff, SkXfermode::kOne_Coeff },
+ { modulate_modeproc, proc_4f<modulate_4f>, SkXfermode::kZero_Coeff, SkXfermode::kSC_Coeff },
+ { screen_modeproc, proc_4f<screen_4f>, SkXfermode::kOne_Coeff, SkXfermode::kISC_Coeff },
+ { overlay_modeproc, proc_4f<overlay_4f>, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { darken_modeproc, proc_4f<darken_4f>, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { lighten_modeproc, proc_4f<lighten_4f>, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { colordodge_modeproc, proc_4f<colordodge_4f>, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { colorburn_modeproc, proc_4f<colorburn_4f>, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { hardlight_modeproc, proc_4f<hardlight_4f>, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { softlight_modeproc, proc_4f<softlight_4f>, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { difference_modeproc, proc_4f<difference_4f>, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { exclusion_modeproc, proc_4f<exclusion_4f>, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { multiply_modeproc, proc_4f<multiply_4f>, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { hue_modeproc, proc_4f<hue_4f>, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { saturation_modeproc, proc_4f<saturation_4f>, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { color_modeproc, proc_4f<color_4f>, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { luminosity_modeproc, proc_4f<luminosity_4f>, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkXfermode::asMode(Mode* mode) const {
+ return false;
+}
+
+#if SK_SUPPORT_GPU
+sk_sp<GrFragmentProcessor> SkXfermode::makeFragmentProcessorForImageFilter(
+ sk_sp<GrFragmentProcessor>) const {
+ // This should never be called.
+ // TODO: make pure virtual in SkXfermode once Android update lands
+ SkASSERT(0);
+ return nullptr;
+}
+
+sk_sp<GrXPFactory> SkXfermode::asXPFactory() const {
+ // This should never be called.
+ // TODO: make pure virtual in SkXfermode once Android update lands
+ SkASSERT(0);
+ return nullptr;
+}
+#endif
+
+SkPMColor SkXfermode::xferColor(SkPMColor src, SkPMColor dst) const{
+ // no-op. subclasses should override this
+ return dst;
+}
+
+void SkXfermode::xfer32(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src, int count,
+ const SkAlpha* SK_RESTRICT aa) const {
+ SkASSERT(dst && src && count >= 0);
+
+ if (nullptr == aa) {
+ for (int i = count - 1; i >= 0; --i) {
+ dst[i] = this->xferColor(src[i], dst[i]);
+ }
+ } else {
+ for (int i = count - 1; i >= 0; --i) {
+ unsigned a = aa[i];
+ if (0 != a) {
+ SkPMColor dstC = dst[i];
+ SkPMColor C = this->xferColor(src[i], dstC);
+ if (0xFF != a) {
+ C = SkFourByteInterp(C, dstC, a);
+ }
+ dst[i] = C;
+ }
+ }
+ }
+}
+
+void SkXfermode::xfer16(uint16_t* dst,
+ const SkPMColor* SK_RESTRICT src, int count,
+ const SkAlpha* SK_RESTRICT aa) const {
+ SkASSERT(dst && src && count >= 0);
+
+ if (nullptr == aa) {
+ for (int i = count - 1; i >= 0; --i) {
+ SkPMColor dstC = SkPixel16ToPixel32(dst[i]);
+ dst[i] = SkPixel32ToPixel16_ToU16(this->xferColor(src[i], dstC));
+ }
+ } else {
+ for (int i = count - 1; i >= 0; --i) {
+ unsigned a = aa[i];
+ if (0 != a) {
+ SkPMColor dstC = SkPixel16ToPixel32(dst[i]);
+ SkPMColor C = this->xferColor(src[i], dstC);
+ if (0xFF != a) {
+ C = SkFourByteInterp(C, dstC, a);
+ }
+ dst[i] = SkPixel32ToPixel16_ToU16(C);
+ }
+ }
+ }
+}
+
+void SkXfermode::xferA8(SkAlpha* SK_RESTRICT dst,
+ const SkPMColor src[], int count,
+ const SkAlpha* SK_RESTRICT aa) const {
+ SkASSERT(dst && src && count >= 0);
+
+ if (nullptr == aa) {
+ for (int i = count - 1; i >= 0; --i) {
+ SkPMColor res = this->xferColor(src[i], (dst[i] << SK_A32_SHIFT));
+ dst[i] = SkToU8(SkGetPackedA32(res));
+ }
+ } else {
+ for (int i = count - 1; i >= 0; --i) {
+ unsigned a = aa[i];
+ if (0 != a) {
+ SkAlpha dstA = dst[i];
+ unsigned A = SkGetPackedA32(this->xferColor(src[i],
+ (SkPMColor)(dstA << SK_A32_SHIFT)));
+ if (0xFF != a) {
+ A = SkAlphaBlend(A, dstA, SkAlpha255To256(a));
+ }
+ dst[i] = SkToU8(A);
+ }
+ }
+ }
+}
+
+bool SkXfermode::supportsCoverageAsAlpha() const {
+ return false;
+}
+
+bool SkXfermode::isOpaque(SkXfermode::SrcColorOpacity opacityType) const {
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFlattenable> SkProcCoeffXfermode::CreateProc(SkReadBuffer& buffer) {
+ uint32_t mode32 = buffer.read32();
+ if (!buffer.validate(mode32 < SK_ARRAY_COUNT(gProcCoeffs))) {
+ return nullptr;
+ }
+ return SkXfermode::Make((SkXfermode::Mode)mode32);
+}
+
+void SkProcCoeffXfermode::flatten(SkWriteBuffer& buffer) const {
+ buffer.write32(fMode);
+}
+
+bool SkProcCoeffXfermode::asMode(Mode* mode) const {
+ if (mode) {
+ *mode = fMode;
+ }
+ return true;
+}
+
+bool SkProcCoeffXfermode::supportsCoverageAsAlpha() const {
+ if (CANNOT_USE_COEFF == fSrcCoeff) {
+ return false;
+ }
+
+ switch (fDstCoeff) {
+ case SkXfermode::kOne_Coeff:
+ case SkXfermode::kISA_Coeff:
+ case SkXfermode::kISC_Coeff:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SkProcCoeffXfermode::isOpaque(SkXfermode::SrcColorOpacity opacityType) const {
+ if (CANNOT_USE_COEFF == fSrcCoeff) {
+ return false;
+ }
+
+ if (SkXfermode::kDA_Coeff == fSrcCoeff || SkXfermode::kDC_Coeff == fSrcCoeff ||
+ SkXfermode::kIDA_Coeff == fSrcCoeff || SkXfermode::kIDC_Coeff == fSrcCoeff) {
+ return false;
+ }
+
+ switch (fDstCoeff) {
+ case SkXfermode::kZero_Coeff:
+ return true;
+ case SkXfermode::kISA_Coeff:
+ return SkXfermode::kOpaque_SrcColorOpacity == opacityType;
+ case SkXfermode::kSA_Coeff:
+ return SkXfermode::kTransparentBlack_SrcColorOpacity == opacityType ||
+ SkXfermode::kTransparentAlpha_SrcColorOpacity == opacityType;
+ case SkXfermode::kSC_Coeff:
+ return SkXfermode::kTransparentBlack_SrcColorOpacity == opacityType;
+ default:
+ return false;
+ }
+
+}
+
+void SkProcCoeffXfermode::xfer32(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src, int count,
+ const SkAlpha* SK_RESTRICT aa) const {
+ SkASSERT(dst && src && count >= 0);
+
+ SkXfermodeProc proc = fProc;
+
+ if (proc) {
+ if (nullptr == aa) {
+ for (int i = count - 1; i >= 0; --i) {
+ dst[i] = proc(src[i], dst[i]);
+ }
+ } else {
+ for (int i = count - 1; i >= 0; --i) {
+ unsigned a = aa[i];
+ if (0 != a) {
+ SkPMColor dstC = dst[i];
+ SkPMColor C = proc(src[i], dstC);
+ if (a != 0xFF) {
+ C = SkFourByteInterp(C, dstC, a);
+ }
+ dst[i] = C;
+ }
+ }
+ }
+ }
+}
+
+void SkProcCoeffXfermode::xfer16(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src, int count,
+ const SkAlpha* SK_RESTRICT aa) const {
+ SkASSERT(dst && src && count >= 0);
+
+ SkXfermodeProc proc = fProc;
+
+ if (proc) {
+ if (nullptr == aa) {
+ for (int i = count - 1; i >= 0; --i) {
+ SkPMColor dstC = SkPixel16ToPixel32(dst[i]);
+ dst[i] = SkPixel32ToPixel16_ToU16(proc(src[i], dstC));
+ }
+ } else {
+ for (int i = count - 1; i >= 0; --i) {
+ unsigned a = aa[i];
+ if (0 != a) {
+ SkPMColor dstC = SkPixel16ToPixel32(dst[i]);
+ SkPMColor C = proc(src[i], dstC);
+ if (0xFF != a) {
+ C = SkFourByteInterp(C, dstC, a);
+ }
+ dst[i] = SkPixel32ToPixel16_ToU16(C);
+ }
+ }
+ }
+ }
+}
+
+void SkProcCoeffXfermode::xferA8(SkAlpha* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src, int count,
+ const SkAlpha* SK_RESTRICT aa) const {
+ SkASSERT(dst && src && count >= 0);
+
+ SkXfermodeProc proc = fProc;
+
+ if (proc) {
+ if (nullptr == aa) {
+ for (int i = count - 1; i >= 0; --i) {
+ SkPMColor res = proc(src[i], dst[i] << SK_A32_SHIFT);
+ dst[i] = SkToU8(SkGetPackedA32(res));
+ }
+ } else {
+ for (int i = count - 1; i >= 0; --i) {
+ unsigned a = aa[i];
+ if (0 != a) {
+ SkAlpha dstA = dst[i];
+ SkPMColor res = proc(src[i], dstA << SK_A32_SHIFT);
+ unsigned A = SkGetPackedA32(res);
+ if (0xFF != a) {
+ A = SkAlphaBlend(A, dstA, SkAlpha255To256(a));
+ }
+ dst[i] = SkToU8(A);
+ }
+ }
+ }
+ }
+}
+
+#if SK_SUPPORT_GPU
+sk_sp<GrFragmentProcessor> SkProcCoeffXfermode::makeFragmentProcessorForImageFilter(
+ sk_sp<GrFragmentProcessor> dst) const {
+ SkASSERT(dst);
+ return GrXfermodeFragmentProcessor::MakeFromDstProcessor(std::move(dst), fMode);
+}
+
+sk_sp<GrXPFactory> SkProcCoeffXfermode::asXPFactory() const {
+ if (CANNOT_USE_COEFF != fSrcCoeff) {
+ sk_sp<GrXPFactory> result(GrPorterDuffXPFactory::Make(fMode));
+ SkASSERT(result);
+ return result;
+ }
+
+ SkASSERT(GrCustomXfermode::IsSupportedMode(fMode));
+ return GrCustomXfermode::MakeXPFactory(fMode);
+}
+#endif
+
+const char* SkXfermode::ModeName(Mode mode) {
+ SkASSERT((unsigned) mode <= (unsigned)kLastMode);
+ const char* gModeStrings[] = {
+ "Clear", "Src", "Dst", "SrcOver", "DstOver", "SrcIn", "DstIn",
+ "SrcOut", "DstOut", "SrcATop", "DstATop", "Xor", "Plus",
+ "Modulate", "Screen", "Overlay", "Darken", "Lighten", "ColorDodge",
+ "ColorBurn", "HardLight", "SoftLight", "Difference", "Exclusion",
+ "Multiply", "Hue", "Saturation", "Color", "Luminosity"
+ };
+ return gModeStrings[mode];
+ static_assert(SK_ARRAY_COUNT(gModeStrings) == kLastMode + 1, "mode_count");
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkProcCoeffXfermode::toString(SkString* str) const {
+ str->append("SkProcCoeffXfermode: ");
+
+ str->append("mode: ");
+ str->append(ModeName(fMode));
+
+ static const char* gCoeffStrings[kCoeffCount] = {
+ "Zero", "One", "SC", "ISC", "DC", "IDC", "SA", "ISA", "DA", "IDA"
+ };
+
+ str->append(" src: ");
+ if (CANNOT_USE_COEFF == fSrcCoeff) {
+ str->append("can't use");
+ } else {
+ str->append(gCoeffStrings[fSrcCoeff]);
+ }
+
+ str->append(" dst: ");
+ if (CANNOT_USE_COEFF == fDstCoeff) {
+ str->append("can't use");
+ } else {
+ str->append(gCoeffStrings[fDstCoeff]);
+ }
+}
+#endif
+
+
+sk_sp<SkXfermode> SkXfermode::Make(Mode mode) {
+ if ((unsigned)mode >= kModeCount) {
+ // report error
+ return nullptr;
+ }
+
+ // Skia's "default" mode is srcover. nullptr in SkPaint is interpreted as srcover
+ // so we can just return nullptr from the factory.
+ if (kSrcOver_Mode == mode) {
+ return nullptr;
+ }
+
+ SkASSERT(SK_ARRAY_COUNT(gProcCoeffs) == kModeCount);
+
+ static SkOnce once[SkXfermode::kLastMode+1];
+ static SkXfermode* cached[SkXfermode::kLastMode+1];
+
+ once[mode]([mode] {
+ ProcCoeff rec = gProcCoeffs[mode];
+ if (auto xfermode = SkOpts::create_xfermode(rec, mode)) {
+ cached[mode] = xfermode;
+ } else {
+ cached[mode] = new SkProcCoeffXfermode(rec, mode);
+ }
+ });
+ return sk_ref_sp(cached[mode]);
+}
+
+SkXfermodeProc SkXfermode::GetProc(Mode mode) {
+ SkXfermodeProc proc = nullptr;
+ if ((unsigned)mode < kModeCount) {
+ proc = gProcCoeffs[mode].fProc;
+ }
+ return proc;
+}
+
+SkXfermodeProc4f SkXfermode::GetProc4f(Mode mode) {
+ SkXfermodeProc4f proc = nullptr;
+ if ((unsigned)mode < kModeCount) {
+ proc = gProcCoeffs[mode].fProc4f;
+ }
+ return proc;
+}
+
+static SkPM4f missing_proc4f(const SkPM4f& src, const SkPM4f& dst) {
+ return src;
+}
+
+SkXfermodeProc4f SkXfermode::getProc4f() const {
+ Mode mode;
+ return this->asMode(&mode) ? GetProc4f(mode) : missing_proc4f;
+}
+
+bool SkXfermode::ModeAsCoeff(Mode mode, Coeff* src, Coeff* dst) {
+ SkASSERT(SK_ARRAY_COUNT(gProcCoeffs) == kModeCount);
+
+ if ((unsigned)mode >= (unsigned)kModeCount) {
+ // illegal mode parameter
+ return false;
+ }
+
+ const ProcCoeff& rec = gProcCoeffs[mode];
+
+ if (CANNOT_USE_COEFF == rec.fSC) {
+ return false;
+ }
+
+ SkASSERT(CANNOT_USE_COEFF != rec.fDC);
+ if (src) {
+ *src = rec.fSC;
+ }
+ if (dst) {
+ *dst = rec.fDC;
+ }
+ return true;
+}
+
+bool SkXfermode::AsMode(const SkXfermode* xfer, Mode* mode) {
+ if (nullptr == xfer) {
+ if (mode) {
+ *mode = kSrcOver_Mode;
+ }
+ return true;
+ }
+ return xfer->asMode(mode);
+}
+
+bool SkXfermode::IsMode(const SkXfermode* xfer, Mode mode) {
+ // if xfer==null then the mode is srcover
+ Mode m = kSrcOver_Mode;
+ if (xfer && !xfer->asMode(&m)) {
+ return false;
+ }
+ return mode == m;
+}
+
+bool SkXfermode::SupportsCoverageAsAlpha(const SkXfermode* xfer) {
+ // if xfer is nullptr we treat it as srcOver which always supports coverageAsAlpha
+ if (!xfer) {
+ return true;
+ }
+
+ return xfer->supportsCoverageAsAlpha();
+}
+
+bool SkXfermode::IsOpaque(const SkXfermode* xfer, SrcColorOpacity opacityType) {
+ // if xfer is nullptr we treat it as srcOver which is opaque if our src is opaque
+ if (!xfer) {
+ return SkXfermode::kOpaque_SrcColorOpacity == opacityType;
+ }
+
+ return xfer->isOpaque(opacityType);
+}
+
+bool SkXfermode::appendStages(SkRasterPipeline* pipeline) const {
+ return this->onAppendStages(pipeline);
+}
+
+bool SkXfermode::onAppendStages(SkRasterPipeline*) const {
+ return false;
+}
+
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkXfermode)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkProcCoeffXfermode)
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END
+
+
+bool SkProcCoeffXfermode::onAppendStages(SkRasterPipeline* p) const {
+ switch (fMode) {
+ case kSrc_Mode: /*This stage is a no-op.*/ return true;
+ case kDst_Mode: p->append(SkRasterPipeline::dst); return true;
+ case kSrcATop_Mode: p->append(SkRasterPipeline::srcatop); return true;
+ case kDstATop_Mode: p->append(SkRasterPipeline::dstatop); return true;
+ case kSrcIn_Mode: p->append(SkRasterPipeline::srcin); return true;
+ case kDstIn_Mode: p->append(SkRasterPipeline::dstin); return true;
+ case kSrcOut_Mode: p->append(SkRasterPipeline::srcout); return true;
+ case kDstOut_Mode: p->append(SkRasterPipeline::dstout); return true;
+ case kSrcOver_Mode: p->append(SkRasterPipeline::srcover); return true;
+ case kDstOver_Mode: p->append(SkRasterPipeline::dstover); return true;
+
+ case kClear_Mode: p->append(SkRasterPipeline::clear); return true;
+ case kModulate_Mode: p->append(SkRasterPipeline::modulate); return true;
+ case kMultiply_Mode: p->append(SkRasterPipeline::multiply); return true;
+ case kPlus_Mode: p->append(SkRasterPipeline::plus_); return true;
+ case kScreen_Mode: p->append(SkRasterPipeline::screen); return true;
+ case kXor_Mode: p->append(SkRasterPipeline::xor_); return true;
+
+ case kColorBurn_Mode: p->append(SkRasterPipeline::colorburn); return true;
+ case kColorDodge_Mode: p->append(SkRasterPipeline::colordodge); return true;
+ case kDarken_Mode: p->append(SkRasterPipeline::darken); return true;
+ case kDifference_Mode: p->append(SkRasterPipeline::difference); return true;
+ case kExclusion_Mode: p->append(SkRasterPipeline::exclusion); return true;
+ case kHardLight_Mode: p->append(SkRasterPipeline::hardlight); return true;
+ case kLighten_Mode: p->append(SkRasterPipeline::lighten); return true;
+ case kOverlay_Mode: p->append(SkRasterPipeline::overlay); return true;
+ case kSoftLight_Mode: p->append(SkRasterPipeline::softlight); return true;
+
+ // TODO
+ case kColor_Mode: return false;
+ case kHue_Mode: return false;
+ case kLuminosity_Mode: return false;
+ case kSaturation_Mode: return false;
+ }
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkBlendMode_SupportsCoverageAsAlpha(SkBlendMode mode) {
+ switch (mode) {
+ case SkBlendMode::kDst:
+ case SkBlendMode::kSrcOver:
+ case SkBlendMode::kDstOver:
+ case SkBlendMode::kDstOut:
+ case SkBlendMode::kSrcATop:
+ case SkBlendMode::kXor:
+ case SkBlendMode::kPlus:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+bool SkXfermode::IsOpaque(SkBlendMode mode, SrcColorOpacity opacityType) {
+ const ProcCoeff rec = gProcCoeffs[(int)mode];
+
+ switch (rec.fSC) {
+ case kDA_Coeff:
+ case kDC_Coeff:
+ case kIDA_Coeff:
+ case kIDC_Coeff:
+ return false;
+ default:
+ break;
+ }
+
+ switch (rec.fDC) {
+ case kZero_Coeff:
+ return true;
+ case kISA_Coeff:
+ return kOpaque_SrcColorOpacity == opacityType;
+ case kSA_Coeff:
+ return kTransparentBlack_SrcColorOpacity == opacityType ||
+ kTransparentAlpha_SrcColorOpacity == opacityType;
+ case kSC_Coeff:
+ return kTransparentBlack_SrcColorOpacity == opacityType;
+ default:
+ return false;
+ }
+ return false;
+}
+
+#if SK_SUPPORT_GPU
+sk_sp<GrXPFactory> SkBlendMode_AsXPFactory(SkBlendMode mode) {
+ const ProcCoeff rec = gProcCoeffs[(int)mode];
+ if (CANNOT_USE_COEFF != rec.fSC) {
+ sk_sp<GrXPFactory> result(GrPorterDuffXPFactory::Make(mode));
+ SkASSERT(result);
+ return result;
+ }
+
+ SkASSERT(GrCustomXfermode::IsSupportedMode((SkXfermode::Mode)mode));
+ return GrCustomXfermode::MakeXPFactory((SkXfermode::Mode)mode);
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkXfermode4f.cpp b/gfx/skia/skia/src/core/SkXfermode4f.cpp
new file mode 100644
index 000000000..1a9b58e21
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkXfermode4f.cpp
@@ -0,0 +1,474 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPM4fPriv.h"
+#include "SkUtils.h"
+#include "SkXfermode.h"
+#include "Sk4x4f.h"
+
+static SkPM4f rgba_to_pmcolor_order(const SkPM4f& x) {
+#ifdef SK_PMCOLOR_IS_BGRA
+ return {{ x.fVec[2], x.fVec[1], x.fVec[0], x.fVec[3] }};
+#else
+ return x;
+#endif
+}
+
+enum DstType {
+ kLinear_Dst,
+ kSRGB_Dst,
+};
+
+static Sk4f scale_by_coverage(const Sk4f& x4, uint8_t coverage) {
+ return x4 * Sk4f(coverage * (1/255.0f));
+}
+
+static Sk4f lerp(const Sk4f& src, const Sk4f& dst, uint8_t srcCoverage) {
+ return dst + (src - dst) * Sk4f(srcCoverage * (1/255.0f));
+}
+
+template <DstType D> Sk4f load_dst(SkPMColor dstC) {
+ return (D == kSRGB_Dst) ? Sk4f_fromS32(dstC) : Sk4f_fromL32(dstC);
+}
+
+template <DstType D> uint32_t store_dst(const Sk4f& x4) {
+ return (D == kSRGB_Dst) ? Sk4f_toS32(x4) : Sk4f_toL32(x4);
+}
+
+static Sk4x4f load_4_srgb(const void* vptr) {
+ auto ptr = (const uint32_t*)vptr;
+
+ Sk4x4f rgba;
+
+ rgba.r = { sk_linear_from_srgb[(ptr[0] >> 0) & 0xff],
+ sk_linear_from_srgb[(ptr[1] >> 0) & 0xff],
+ sk_linear_from_srgb[(ptr[2] >> 0) & 0xff],
+ sk_linear_from_srgb[(ptr[3] >> 0) & 0xff] };
+
+ rgba.g = { sk_linear_from_srgb[(ptr[0] >> 8) & 0xff],
+ sk_linear_from_srgb[(ptr[1] >> 8) & 0xff],
+ sk_linear_from_srgb[(ptr[2] >> 8) & 0xff],
+ sk_linear_from_srgb[(ptr[3] >> 8) & 0xff] };
+
+ rgba.b = { sk_linear_from_srgb[(ptr[0] >> 16) & 0xff],
+ sk_linear_from_srgb[(ptr[1] >> 16) & 0xff],
+ sk_linear_from_srgb[(ptr[2] >> 16) & 0xff],
+ sk_linear_from_srgb[(ptr[3] >> 16) & 0xff] };
+
+ rgba.a = SkNx_cast<float>((Sk4i::Load(ptr) >> 24) & 0xff) * (1/255.0f);
+
+ return rgba;
+}
+
+static void store_4_srgb(void* ptr, const Sk4x4f& p) {
+ ( sk_linear_to_srgb(p.r) << 0
+ | sk_linear_to_srgb(p.g) << 8
+ | sk_linear_to_srgb(p.b) << 16
+ | Sk4f_round(255.0f*p.a) << 24).store(ptr);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+template <DstType D> void general_1(const SkXfermode* xfer, uint32_t dst[],
+ const SkPM4f* src, int count, const SkAlpha aa[]) {
+ const SkPM4f s = rgba_to_pmcolor_order(*src);
+ SkXfermodeProc4f proc = xfer->getProc4f();
+ SkPM4f d;
+ if (aa) {
+ for (int i = 0; i < count; ++i) {
+ Sk4f d4 = load_dst<D>(dst[i]);
+ d4.store(d.fVec);
+ Sk4f r4 = Sk4f::Load(proc(s, d).fVec);
+ dst[i] = store_dst<D>(lerp(r4, d4, aa[i]));
+ }
+ } else {
+ for (int i = 0; i < count; ++i) {
+ load_dst<D>(dst[i]).store(d.fVec);
+ Sk4f r4 = Sk4f::Load(proc(s, d).fVec);
+ dst[i] = store_dst<D>(r4);
+ }
+ }
+}
+
+template <DstType D> void general_n(const SkXfermode* xfer, uint32_t dst[],
+ const SkPM4f src[], int count, const SkAlpha aa[]) {
+ SkXfermodeProc4f proc = xfer->getProc4f();
+ SkPM4f d;
+ if (aa) {
+ for (int i = 0; i < count; ++i) {
+ Sk4f d4 = load_dst<D>(dst[i]);
+ d4.store(d.fVec);
+ Sk4f r4 = Sk4f::Load(proc(rgba_to_pmcolor_order(src[i]), d).fVec);
+ dst[i] = store_dst<D>(lerp(r4, d4, aa[i]));
+ }
+ } else {
+ for (int i = 0; i < count; ++i) {
+ load_dst<D>(dst[i]).store(d.fVec);
+ Sk4f r4 = Sk4f::Load(proc(rgba_to_pmcolor_order(src[i]), d).fVec);
+ dst[i] = store_dst<D>(r4);
+ }
+ }
+}
+
+const SkXfermode::D32Proc gProcs_General[] = {
+ general_n<kLinear_Dst>, general_n<kLinear_Dst>,
+ general_1<kLinear_Dst>, general_1<kLinear_Dst>,
+ general_n<kSRGB_Dst>, general_n<kSRGB_Dst>,
+ general_1<kSRGB_Dst>, general_1<kSRGB_Dst>,
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static void clear_linear(const SkXfermode*, uint32_t dst[], const SkPM4f[],
+ int count, const SkAlpha aa[]) {
+ if (aa) {
+ for (int i = 0; i < count; ++i) {
+ unsigned a = aa[i];
+ if (a) {
+ SkPMColor dstC = dst[i];
+ SkPMColor C = 0;
+ if (0xFF != a) {
+ C = SkFourByteInterp(C, dstC, a);
+ }
+ dst[i] = C;
+ }
+ }
+ } else {
+ sk_memset32(dst, 0, count);
+ }
+}
+
+static void clear_srgb(const SkXfermode*, uint32_t dst[], const SkPM4f[],
+ int count, const SkAlpha aa[]) {
+ if (aa) {
+ for (int i = 0; i < count; ++i) {
+ if (aa[i]) {
+ Sk4f d = Sk4f_fromS32(dst[i]) * Sk4f((255 - aa[i]) * (1/255.0f));
+ dst[i] = Sk4f_toS32(d);
+ }
+ }
+ } else {
+ sk_memset32(dst, 0, count);
+ }
+}
+
+const SkXfermode::D32Proc gProcs_Clear[] = {
+ clear_linear, clear_linear,
+ clear_linear, clear_linear,
+ clear_srgb, clear_srgb,
+ clear_srgb, clear_srgb,
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+template <DstType D> void src_n(const SkXfermode*, uint32_t dst[],
+ const SkPM4f src[], int count, const SkAlpha aa[]) {
+ for (int i = 0; i < count; ++i) {
+ unsigned a = 0xFF;
+ if (aa) {
+ a = aa[i];
+ if (0 == a) {
+ continue;
+ }
+ }
+ Sk4f r4 = src[i].to4f_pmorder();
+ if (a != 0xFF) {
+ Sk4f d4 = load_dst<D>(dst[i]);
+ r4 = lerp(r4, d4, a);
+ }
+ dst[i] = store_dst<D>(r4);
+ }
+}
+
+static Sk4f lerp(const Sk4f& src, const Sk4f& dst, const Sk4f& src_scale) {
+ return dst + (src - dst) * src_scale;
+}
+
+template <DstType D> void src_1(const SkXfermode*, uint32_t dst[],
+ const SkPM4f* src, int count, const SkAlpha aa[]) {
+ const Sk4f s4 = src->to4f_pmorder();
+
+ if (aa) {
+ SkPMColor srcColor = store_dst<D>(s4);
+ while (count-- > 0) {
+ SkAlpha cover = *aa++;
+ switch (cover) {
+ case 0xFF: {
+ *dst++ = srcColor;
+ break;
+ }
+ case 0x00: {
+ dst++;
+ break;
+ }
+ default: {
+ Sk4f d4 = load_dst<D>(*dst);
+ *dst++ = store_dst<D>(lerp(s4, d4, cover));
+ }
+ }
+ }
+ } else {
+ sk_memset32(dst, store_dst<D>(s4), count);
+ }
+}
+
+const SkXfermode::D32Proc gProcs_Src[] = {
+ src_n<kLinear_Dst>, src_n<kLinear_Dst>,
+ src_1<kLinear_Dst>, src_1<kLinear_Dst>,
+ src_n<kSRGB_Dst>, src_n<kSRGB_Dst>,
+ src_1<kSRGB_Dst>, src_1<kSRGB_Dst>,
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static void dst(const SkXfermode*, uint32_t dst[], const SkPM4f[], int count, const SkAlpha aa[]) {}
+
+const SkXfermode::D32Proc gProcs_Dst[] = {
+ dst, dst, dst, dst, dst, dst, dst, dst,
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+
+template <DstType D> void srcover_n(const SkXfermode*, uint32_t dst[],
+ const SkPM4f src[], int count, const SkAlpha aa[]) {
+ if (aa) {
+ for (int i = 0; i < count; ++i) {
+ unsigned a = aa[i];
+ if (0 == a) {
+ continue;
+ }
+ Sk4f s4 = src[i].to4f_pmorder();
+ Sk4f d4 = load_dst<D>(dst[i]);
+ if (a != 0xFF) {
+ s4 = scale_by_coverage(s4, a);
+ }
+ Sk4f r4 = s4 + d4 * Sk4f(1 - get_alpha(s4));
+ dst[i] = store_dst<D>(r4);
+ }
+ } else {
+ while (count >= 4 && D == kSRGB_Dst) {
+ auto d = load_4_srgb(dst);
+ auto s = Sk4x4f::Transpose(src->fVec);
+ #if defined(SK_PMCOLOR_IS_BGRA)
+ SkTSwap(s.r, s.b);
+ #endif
+ auto invSA = 1.0f - s.a;
+ auto r = s.r + d.r * invSA,
+ g = s.g + d.g * invSA,
+ b = s.b + d.b * invSA,
+ a = s.a + d.a * invSA;
+ store_4_srgb(dst, Sk4x4f{r,g,b,a});
+ count -= 4;
+ dst += 4;
+ src += 4;
+ }
+ for (int i = 0; i < count; ++i) {
+ Sk4f s4 = src[i].to4f_pmorder();
+ Sk4f d4 = load_dst<D>(dst[i]);
+ Sk4f r4 = s4 + d4 * Sk4f(1 - get_alpha(s4));
+ dst[i] = store_dst<D>(r4);
+ }
+ }
+}
+
+static void srcover_linear_dst_1(const SkXfermode*, uint32_t dst[],
+ const SkPM4f* src, int count, const SkAlpha aa[]) {
+ const Sk4f s4 = src->to4f_pmorder();
+ const Sk4f dst_scale = Sk4f(1 - get_alpha(s4));
+
+ if (aa) {
+ for (int i = 0; i < count; ++i) {
+ unsigned a = aa[i];
+ if (0 == a) {
+ continue;
+ }
+ Sk4f d4 = Sk4f_fromL32(dst[i]);
+ Sk4f r4;
+ if (a != 0xFF) {
+ Sk4f s4_aa = scale_by_coverage(s4, a);
+ r4 = s4_aa + d4 * Sk4f(1 - get_alpha(s4_aa));
+ } else {
+ r4 = s4 + d4 * dst_scale;
+ }
+ dst[i] = Sk4f_toL32(r4);
+ }
+ } else {
+ for (int i = 0; i < count; ++i) {
+ Sk4f d4 = Sk4f_fromL32(dst[i]);
+ dst[i] = Sk4f_toL32(s4 + d4 * dst_scale);
+ }
+ }
+}
+
+static void srcover_srgb_dst_1(const SkXfermode*, uint32_t dst[],
+ const SkPM4f* src, int count, const SkAlpha aa[]) {
+ Sk4f s4 = src->to4f_pmorder();
+ Sk4f dst_scale = Sk4f(1 - get_alpha(s4));
+
+ if (aa) {
+ for (int i = 0; i < count; ++i) {
+ unsigned a = aa[i];
+ if (0 == a) {
+ continue;
+ }
+
+ Sk4f d4 = Sk4f_fromS32(dst[i]);
+ Sk4f r4;
+ if (a != 0xFF) {
+ const Sk4f s4_aa = scale_by_coverage(s4, a);
+ r4 = s4_aa + d4 * Sk4f(1 - get_alpha(s4_aa));
+ } else {
+ r4 = s4 + d4 * dst_scale;
+ }
+ dst[i] = Sk4f_toS32(r4);
+ }
+ } else {
+ while (count >= 4) {
+ auto d = load_4_srgb(dst);
+ auto s = Sk4x4f{{ src->r() }, { src->g() }, { src->b() }, { src->a() }};
+ #if defined(SK_PMCOLOR_IS_BGRA)
+ SkTSwap(s.r, s.b);
+ #endif
+ auto invSA = 1.0f - s.a;
+ auto r = s.r + d.r * invSA,
+ g = s.g + d.g * invSA,
+ b = s.b + d.b * invSA,
+ a = s.a + d.a * invSA;
+ store_4_srgb(dst, Sk4x4f{r,g,b,a});
+ count -= 4;
+ dst += 4;
+ }
+ for (int i = 0; i < count; ++i) {
+ Sk4f d4 = Sk4f_fromS32(dst[i]);
+ dst[i] = Sk4f_toS32(s4 + d4 * dst_scale);
+ }
+ }
+}
+
+const SkXfermode::D32Proc gProcs_SrcOver[] = {
+ srcover_n<kLinear_Dst>, src_n<kLinear_Dst>,
+ srcover_linear_dst_1, src_1<kLinear_Dst>,
+
+ srcover_n<kSRGB_Dst>, src_n<kSRGB_Dst>,
+ srcover_srgb_dst_1, src_1<kSRGB_Dst>,
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static SkXfermode::D32Proc find_proc(SkXfermode::Mode mode, uint32_t flags) {
+ SkASSERT(0 == (flags & ~7));
+ flags &= 7;
+
+ switch (mode) {
+ case SkXfermode::kClear_Mode: return gProcs_Clear[flags];
+ case SkXfermode::kSrc_Mode: return gProcs_Src[flags];
+ case SkXfermode::kDst_Mode: return gProcs_Dst[flags];
+ case SkXfermode::kSrcOver_Mode: return gProcs_SrcOver[flags];
+ default:
+ break;
+ }
+ return gProcs_General[flags];
+}
+
+SkXfermode::D32Proc SkXfermode::onGetD32Proc(uint32_t flags) const {
+ SkASSERT(0 == (flags & ~7));
+ flags &= 7;
+
+ Mode mode;
+ return this->asMode(&mode) ? find_proc(mode, flags) : gProcs_General[flags];
+}
+
+SkXfermode::D32Proc SkXfermode::GetD32Proc(SkXfermode* xfer, uint32_t flags) {
+ return xfer ? xfer->onGetD32Proc(flags) : find_proc(SkXfermode::kSrcOver_Mode, flags);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#include "SkColorPriv.h"
+
+static Sk4f lcd16_to_unit_4f(uint16_t rgb) {
+#ifdef SK_PMCOLOR_IS_RGBA
+ Sk4i rgbi = Sk4i(SkGetPackedR16(rgb), SkGetPackedG16(rgb), SkGetPackedB16(rgb), 0);
+#else
+ Sk4i rgbi = Sk4i(SkGetPackedB16(rgb), SkGetPackedG16(rgb), SkGetPackedR16(rgb), 0);
+#endif
+ return SkNx_cast<float>(rgbi) * Sk4f(1.0f/31, 1.0f/63, 1.0f/31, 0);
+}
+
+template <DstType D>
+void src_1_lcd(uint32_t dst[], const SkPM4f* src, int count, const uint16_t lcd[]) {
+ const Sk4f s4 = src->to4f_pmorder();
+
+ for (int i = 0; i < count; ++i) {
+ uint16_t rgb = lcd[i];
+ if (0 == rgb) {
+ continue;
+ }
+ Sk4f d4 = load_dst<D>(dst[i]);
+ dst[i] = store_dst<D>(lerp(s4, d4, lcd16_to_unit_4f(rgb))) | (SK_A32_MASK << SK_A32_SHIFT);
+ }
+}
+
+template <DstType D>
+void src_n_lcd(uint32_t dst[], const SkPM4f src[], int count, const uint16_t lcd[]) {
+ for (int i = 0; i < count; ++i) {
+ uint16_t rgb = lcd[i];
+ if (0 == rgb) {
+ continue;
+ }
+ Sk4f s4 = src[i].to4f_pmorder();
+ Sk4f d4 = load_dst<D>(dst[i]);
+ dst[i] = store_dst<D>(lerp(s4, d4, lcd16_to_unit_4f(rgb))) | (SK_A32_MASK << SK_A32_SHIFT);
+ }
+}
+
+template <DstType D>
+void srcover_1_lcd(uint32_t dst[], const SkPM4f* src, int count, const uint16_t lcd[]) {
+ const Sk4f s4 = src->to4f_pmorder();
+ Sk4f dst_scale = Sk4f(1 - get_alpha(s4));
+
+ for (int i = 0; i < count; ++i) {
+ uint16_t rgb = lcd[i];
+ if (0 == rgb) {
+ continue;
+ }
+ Sk4f d4 = load_dst<D>(dst[i]);
+ Sk4f r4 = s4 + d4 * dst_scale;
+ r4 = lerp(r4, d4, lcd16_to_unit_4f(rgb));
+ dst[i] = store_dst<D>(r4) | (SK_A32_MASK << SK_A32_SHIFT);
+ }
+}
+
+template <DstType D>
+void srcover_n_lcd(uint32_t dst[], const SkPM4f src[], int count, const uint16_t lcd[]) {
+ for (int i = 0; i < count; ++i) {
+ uint16_t rgb = lcd[i];
+ if (0 == rgb) {
+ continue;
+ }
+ Sk4f s4 = src[i].to4f_pmorder();
+ Sk4f dst_scale = Sk4f(1 - get_alpha(s4));
+ Sk4f d4 = load_dst<D>(dst[i]);
+ Sk4f r4 = s4 + d4 * dst_scale;
+ r4 = lerp(r4, d4, lcd16_to_unit_4f(rgb));
+ dst[i] = store_dst<D>(r4) | (SK_A32_MASK << SK_A32_SHIFT);
+ }
+}
+
+SkXfermode::LCD32Proc SkXfermode::GetLCD32Proc(uint32_t flags) {
+ SkASSERT((flags & ~7) == 0);
+ flags &= 7;
+
+ const LCD32Proc procs[] = {
+ srcover_n_lcd<kLinear_Dst>, src_n_lcd<kLinear_Dst>,
+ srcover_1_lcd<kLinear_Dst>, src_1_lcd<kLinear_Dst>,
+
+ srcover_n_lcd<kSRGB_Dst>, src_n_lcd<kSRGB_Dst>,
+ srcover_1_lcd<kSRGB_Dst>, src_1_lcd<kSRGB_Dst>,
+ };
+ return procs[flags];
+}
diff --git a/gfx/skia/skia/src/core/SkXfermodeF16.cpp b/gfx/skia/skia/src/core/SkXfermodeF16.cpp
new file mode 100644
index 000000000..9cf7254d1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkXfermodeF16.cpp
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkHalf.h"
+#include "SkPM4fPriv.h"
+#include "SkUtils.h"
+#include "SkXfermode.h"
+
+static Sk4f lerp_by_coverage(const Sk4f& src, const Sk4f& dst, uint8_t srcCoverage) {
+ return dst + (src - dst) * Sk4f(srcCoverage * (1/255.0f));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static void xfer_1(const SkXfermode* xfer, uint64_t dst[], const SkPM4f* src, int count,
+ const SkAlpha aa[]) {
+ SkXfermodeProc4f proc = xfer->getProc4f();
+ SkPM4f d;
+ if (aa) {
+ for (int i = 0; i < count; ++i) {
+ Sk4f d4 = SkHalfToFloat_finite_ftz(dst[i]);
+ d4.store(d.fVec);
+ Sk4f r4 = Sk4f::Load(proc(*src, d).fVec);
+ SkFloatToHalf_finite_ftz(lerp_by_coverage(r4, d4, aa[i])).store(&dst[i]);
+ }
+ } else {
+ for (int i = 0; i < count; ++i) {
+ SkHalfToFloat_finite_ftz(dst[i]).store(d.fVec);
+ Sk4f r4 = Sk4f::Load(proc(*src, d).fVec);
+ SkFloatToHalf_finite_ftz(r4).store(&dst[i]);
+ }
+ }
+}
+
+static void xfer_n(const SkXfermode* xfer, uint64_t dst[], const SkPM4f src[], int count,
+ const SkAlpha aa[]) {
+ SkXfermodeProc4f proc = xfer->getProc4f();
+ SkPM4f d;
+ if (aa) {
+ for (int i = 0; i < count; ++i) {
+ Sk4f d4 = SkHalfToFloat_finite_ftz(dst[i]);
+ d4.store(d.fVec);
+ Sk4f r4 = Sk4f::Load(proc(src[i], d).fVec);
+ SkFloatToHalf_finite_ftz(lerp_by_coverage(r4, d4, aa[i])).store(&dst[i]);
+ }
+ } else {
+ for (int i = 0; i < count; ++i) {
+ SkHalfToFloat_finite_ftz(dst[i]).store(d.fVec);
+ Sk4f r4 = Sk4f::Load(proc(src[i], d).fVec);
+ SkFloatToHalf_finite_ftz(r4).store(&dst[i]);
+ }
+ }
+}
+
+const SkXfermode::F16Proc gProcs_General[] = { xfer_n, xfer_n, xfer_1, xfer_1 };
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static void clear(const SkXfermode*, uint64_t dst[], const SkPM4f*, int count, const SkAlpha aa[]) {
+ if (aa) {
+ for (int i = 0; i < count; ++i) {
+ if (aa[i]) {
+ const Sk4f d4 = SkHalfToFloat_finite_ftz(dst[i]);
+ SkFloatToHalf_finite_ftz(d4 * Sk4f((255 - aa[i]) * 1.0f/255)).store(&dst[i]);
+ }
+ }
+ } else {
+ sk_memset64(dst, 0, count);
+ }
+}
+
+const SkXfermode::F16Proc gProcs_Clear[] = { clear, clear, clear, clear };
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static void src_1(const SkXfermode*, uint64_t dst[], const SkPM4f* src, int count,
+ const SkAlpha aa[]) {
+ const Sk4f s4 = Sk4f::Load(src->fVec);
+ if (aa) {
+ for (int i = 0; i < count; ++i) {
+ const Sk4f d4 = SkHalfToFloat_finite_ftz(dst[i]);
+ SkFloatToHalf_finite_ftz(lerp_by_coverage(s4, d4, aa[i])).store(&dst[i]);
+ }
+ } else {
+ uint64_t s4h;
+ SkFloatToHalf_finite_ftz(s4).store(&s4h);
+ sk_memset64(dst, s4h, count);
+ }
+}
+
+static void src_n(const SkXfermode*, uint64_t dst[], const SkPM4f src[], int count,
+ const SkAlpha aa[]) {
+ if (aa) {
+ for (int i = 0; i < count; ++i) {
+ const Sk4f s4 = Sk4f::Load(src[i].fVec);
+ const Sk4f d4 = SkHalfToFloat_finite_ftz(dst[i]);
+ SkFloatToHalf_finite_ftz(lerp_by_coverage(s4, d4, aa[i])).store(&dst[i]);
+ }
+ } else {
+ for (int i = 0; i < count; ++i) {
+ const Sk4f s4 = Sk4f::Load(src[i].fVec);
+ SkFloatToHalf_finite_ftz(s4).store(&dst[i]);
+ }
+ }
+}
+
+const SkXfermode::F16Proc gProcs_Src[] = { src_n, src_n, src_1, src_1 };
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static void dst(const SkXfermode*, uint64_t*, const SkPM4f*, int count, const SkAlpha[]) {}
+
+const SkXfermode::F16Proc gProcs_Dst[] = { dst, dst, dst, dst };
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static void srcover_1(const SkXfermode*, uint64_t dst[], const SkPM4f* src, int count,
+ const SkAlpha aa[]) {
+ const Sk4f s4 = Sk4f::Load(src->fVec);
+ const Sk4f dst_scale = Sk4f(1 - get_alpha(s4));
+ for (int i = 0; i < count; ++i) {
+ const Sk4f d4 = SkHalfToFloat_finite_ftz(dst[i]);
+ const Sk4f r4 = s4 + d4 * dst_scale;
+ if (aa) {
+ SkFloatToHalf_finite_ftz(lerp_by_coverage(r4, d4, aa[i])).store(&dst[i]);
+ } else {
+ SkFloatToHalf_finite_ftz(r4).store(&dst[i]);
+ }
+ }
+}
+
+static void srcover_n(const SkXfermode*, uint64_t dst[], const SkPM4f src[], int count,
+ const SkAlpha aa[]) {
+ for (int i = 0; i < count; ++i) {
+ Sk4f s = Sk4f::Load(src+i),
+ d = SkHalfToFloat_finite_ftz(dst[i]),
+ r = s + d*(1.0f - SkNx_shuffle<3,3,3,3>(s));
+ if (aa) {
+ r = lerp_by_coverage(r, d, aa[i]);
+ }
+ SkFloatToHalf_finite_ftz(r).store(&dst[i]);
+ }
+}
+
+const SkXfermode::F16Proc gProcs_SrcOver[] = { srcover_n, src_n, srcover_1, src_1 };
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static SkXfermode::F16Proc find_proc(SkXfermode::Mode mode, uint32_t flags) {
+ SkASSERT(0 == (flags & ~3));
+ flags &= 3;
+
+ switch (mode) {
+ case SkXfermode::kClear_Mode: return gProcs_Clear[flags];
+ case SkXfermode::kSrc_Mode: return gProcs_Src[flags];
+ case SkXfermode::kDst_Mode: return gProcs_Dst[flags];
+ case SkXfermode::kSrcOver_Mode: return gProcs_SrcOver[flags];
+ default:
+ break;
+ }
+ return gProcs_General[flags];
+}
+
+SkXfermode::F16Proc SkXfermode::onGetF16Proc(uint32_t flags) const {
+ SkASSERT(0 == (flags & ~3));
+ flags &= 3;
+
+ Mode mode;
+ return this->asMode(&mode) ? find_proc(mode, flags) : gProcs_General[flags];
+}
+
+SkXfermode::F16Proc SkXfermode::GetF16Proc(SkXfermode* xfer, uint32_t flags) {
+ return xfer ? xfer->onGetF16Proc(flags) : find_proc(SkXfermode::kSrcOver_Mode, flags);
+}
diff --git a/gfx/skia/skia/src/core/SkXfermodeInterpretation.cpp b/gfx/skia/skia/src/core/SkXfermodeInterpretation.cpp
new file mode 100644
index 000000000..3a1da368d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkXfermodeInterpretation.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkXfermodeInterpretation.h"
+#include "SkPaint.h"
+
+static bool just_solid_color(const SkPaint& p) {
+ return SK_AlphaOPAQUE == p.getAlpha() && !p.getColorFilter() && !p.getShader();
+}
+
+SkXfermodeInterpretation SkInterpretXfermode(const SkPaint& paint, bool dstIsOpaque) {
+ switch (paint.getBlendMode()) {
+ case SkBlendMode::kSrcOver:
+ return kSrcOver_SkXfermodeInterpretation;
+ case SkBlendMode::kSrc:
+ if (just_solid_color(paint)) {
+ return kSrcOver_SkXfermodeInterpretation;
+ }
+ return kNormal_SkXfermodeInterpretation;
+ case SkBlendMode::kDst:
+ return kSkipDrawing_SkXfermodeInterpretation;
+ case SkBlendMode::kDstOver:
+ if (dstIsOpaque) {
+ return kSkipDrawing_SkXfermodeInterpretation;
+ }
+ return kNormal_SkXfermodeInterpretation;
+ case SkBlendMode::kSrcIn:
+ if (dstIsOpaque && just_solid_color(paint)) {
+ return kSrcOver_SkXfermodeInterpretation;
+ }
+ return kNormal_SkXfermodeInterpretation;
+ case SkBlendMode::kDstIn:
+ if (just_solid_color(paint)) {
+ return kSkipDrawing_SkXfermodeInterpretation;
+ }
+ return kNormal_SkXfermodeInterpretation;
+ default:
+ return kNormal_SkXfermodeInterpretation;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkXfermodeInterpretation.h b/gfx/skia/skia/src/core/SkXfermodeInterpretation.h
new file mode 100644
index 000000000..d0a420f38
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkXfermodeInterpretation.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkXfermodeInterpretation_DEFINED
+#define SkXfermodeInterpretation_DEFINED
+
+class SkPaint;
+
+/**
+ * By analyzing the paint, we may decide we can take special
+ * action. This enum lists our possible actions.
+ */
+enum SkXfermodeInterpretation {
+ kNormal_SkXfermodeInterpretation, //< draw normally
+ kSrcOver_SkXfermodeInterpretation, //< draw as if in srcover mode
+ kSkipDrawing_SkXfermodeInterpretation //< draw nothing
+};
+
+/**
+ * Given a paint, determine whether the paint's transfer mode can be
+ * replaced with kSrcOver_Mode or not drawn at all. This is used by
+ * SkBlitter and SkPDFDevice.
+ */
+SkXfermodeInterpretation SkInterpretXfermode(const SkPaint&, bool dstIsOpaque);
+
+#endif // SkXfermodeInterpretation_DEFINED
diff --git a/gfx/skia/skia/src/core/SkXfermode_proccoeff.h b/gfx/skia/skia/src/core/SkXfermode_proccoeff.h
new file mode 100644
index 000000000..1e6cc482a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkXfermode_proccoeff.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkXfermode_proccoeff_DEFINED
+#define SkXfermode_proccoeff_DEFINED
+
+#include "SkXfermode.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+
+struct ProcCoeff {
+ SkXfermodeProc fProc;
+ SkXfermodeProc4f fProc4f;
+ SkXfermode::Coeff fSC;
+ SkXfermode::Coeff fDC;
+};
+
+#define CANNOT_USE_COEFF SkXfermode::Coeff(-1)
+
+class SK_API SkProcCoeffXfermode : public SkXfermode {
+public:
+ SkProcCoeffXfermode(const ProcCoeff& rec, Mode mode) {
+ fMode = mode;
+ fProc = rec.fProc;
+ // these may be valid, or may be CANNOT_USE_COEFF
+ fSrcCoeff = rec.fSC;
+ fDstCoeff = rec.fDC;
+ }
+
+ void xfer32(SkPMColor dst[], const SkPMColor src[], int count,
+ const SkAlpha aa[]) const override;
+ void xfer16(uint16_t dst[], const SkPMColor src[], int count,
+ const SkAlpha aa[]) const override;
+ void xferA8(SkAlpha dst[], const SkPMColor src[], int count,
+ const SkAlpha aa[]) const override;
+
+ bool asMode(Mode* mode) const override;
+
+ bool supportsCoverageAsAlpha() const override;
+
+ bool isOpaque(SkXfermode::SrcColorOpacity opacityType) const override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> makeFragmentProcessorForImageFilter(
+ sk_sp<GrFragmentProcessor>) const override;
+ sk_sp<GrXPFactory> asXPFactory() const override;
+#endif
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkProcCoeffXfermode)
+
+ bool onAppendStages(SkRasterPipeline*) const override;
+
+protected:
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ Mode getMode() const { return fMode; }
+
+ SkXfermodeProc getProc() const { return fProc; }
+
+private:
+ SkXfermodeProc fProc;
+ Mode fMode;
+ Coeff fSrcCoeff, fDstCoeff;
+
+ friend class SkXfermode;
+
+ typedef SkXfermode INHERITED;
+};
+
+#endif // #ifndef SkXfermode_proccoeff_DEFINED
diff --git a/gfx/skia/skia/src/core/SkYUVPlanesCache.cpp b/gfx/skia/skia/src/core/SkYUVPlanesCache.cpp
new file mode 100644
index 000000000..07a07c698
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkYUVPlanesCache.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapCache.h"
+#include "SkResourceCache.h"
+#include "SkYUVPlanesCache.h"
+
+#define CHECK_LOCAL(localCache, localName, globalName, ...) \
+ ((localCache) ? localCache->localName(__VA_ARGS__) : SkResourceCache::globalName(__VA_ARGS__))
+
+namespace {
+static unsigned gYUVPlanesKeyNamespaceLabel;
+
+struct YUVValue {
+ SkYUVPlanesCache::Info fInfo;
+ SkCachedData* fData;
+};
+
+struct YUVPlanesKey : public SkResourceCache::Key {
+ YUVPlanesKey(uint32_t genID)
+ : fGenID(genID)
+ {
+ this->init(&gYUVPlanesKeyNamespaceLabel, SkMakeResourceCacheSharedIDForBitmap(genID),
+ sizeof(genID));
+ }
+
+ uint32_t fGenID;
+};
+
+struct YUVPlanesRec : public SkResourceCache::Rec {
+ YUVPlanesRec(YUVPlanesKey key, SkCachedData* data, SkYUVPlanesCache::Info* info)
+ : fKey(key)
+ {
+ fValue.fData = data;
+ fValue.fInfo = *info;
+ fValue.fData->attachToCacheAndRef();
+ }
+ ~YUVPlanesRec() {
+ fValue.fData->detachFromCacheAndUnref();
+ }
+
+ YUVPlanesKey fKey;
+ YUVValue fValue;
+
+ const Key& getKey() const override { return fKey; }
+ size_t bytesUsed() const override { return sizeof(*this) + fValue.fData->size(); }
+ const char* getCategory() const override { return "yuv-planes"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override {
+ return fValue.fData->diagnostic_only_getDiscardable();
+ }
+
+ static bool Visitor(const SkResourceCache::Rec& baseRec, void* contextData) {
+ const YUVPlanesRec& rec = static_cast<const YUVPlanesRec&>(baseRec);
+ YUVValue* result = static_cast<YUVValue*>(contextData);
+
+ SkCachedData* tmpData = rec.fValue.fData;
+ tmpData->ref();
+ if (nullptr == tmpData->data()) {
+ tmpData->unref();
+ return false;
+ }
+ result->fData = tmpData;
+ result->fInfo = rec.fValue.fInfo;
+ return true;
+ }
+};
+} // namespace
+
+SkCachedData* SkYUVPlanesCache::FindAndRef(uint32_t genID, Info* info,
+ SkResourceCache* localCache) {
+ YUVValue result;
+ YUVPlanesKey key(genID);
+ if (!CHECK_LOCAL(localCache, find, Find, key, YUVPlanesRec::Visitor, &result)) {
+ return nullptr;
+ }
+
+ *info = result.fInfo;
+ return result.fData;
+}
+
+void SkYUVPlanesCache::Add(uint32_t genID, SkCachedData* data, Info* info,
+ SkResourceCache* localCache) {
+ YUVPlanesKey key(genID);
+ return CHECK_LOCAL(localCache, add, Add, new YUVPlanesRec(key, data, info));
+}
diff --git a/gfx/skia/skia/src/core/SkYUVPlanesCache.h b/gfx/skia/skia/src/core/SkYUVPlanesCache.h
new file mode 100644
index 000000000..1c866a2d2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkYUVPlanesCache.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkYUVPlanesCache_DEFINED
+#define SkYUVPlanesCache_DEFINED
+
+#include "SkCachedData.h"
+#include "SkImageInfo.h"
+#include "SkYUVSizeInfo.h"
+
+class SkResourceCache;
+
+class SkYUVPlanesCache {
+public:
+ /**
+ * The Info struct contains data about the 3 Y, U and V planes of memory stored
+ * contiguously, in that order, as a single block of memory within SkYUVPlanesCache.
+ *
+ * fSizeInfo: fWidth, fHeight, and fWidthBytes of each of the Y, U, and V planes.
+ * fColorSpace: color space that will be used for the YUV -> RGB conversion.
+ */
+ struct Info {
+ SkYUVSizeInfo fSizeInfo;
+ SkYUVColorSpace fColorSpace;
+ };
+ /**
+ * On success, return a ref to the SkCachedData that holds the pixels.
+ *
+ * On failure, return nullptr.
+ */
+ static SkCachedData* FindAndRef(uint32_t genID, Info* info,
+ SkResourceCache* localCache = nullptr);
+
+ /**
+ * Add a pixelRef ID and its YUV planes data to the cache.
+ */
+ static void Add(uint32_t genID, SkCachedData* data, Info* info,
+ SkResourceCache* localCache = nullptr);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/GrAlphaThresholdFragmentProcessor.cpp b/gfx/skia/skia/src/effects/GrAlphaThresholdFragmentProcessor.cpp
new file mode 100644
index 000000000..442acd012
--- /dev/null
+++ b/gfx/skia/skia/src/effects/GrAlphaThresholdFragmentProcessor.cpp
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrAlphaThresholdFragmentProcessor.h"
+
+#if SK_SUPPORT_GPU
+
+#include "GrInvariantOutput.h"
+#include "GrTextureAccess.h"
+#include "SkRefCnt.h"
+
+#include "glsl/GrGLSLColorSpaceXformHelper.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLUniformHandler.h"
+
+sk_sp<GrFragmentProcessor> GrAlphaThresholdFragmentProcessor::Make(
+ GrTexture* texture,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ GrTexture* maskTexture,
+ float innerThreshold,
+ float outerThreshold,
+ const SkIRect& bounds) {
+ return sk_sp<GrFragmentProcessor>(new GrAlphaThresholdFragmentProcessor(
+ texture, std::move(colorSpaceXform),
+ maskTexture,
+ innerThreshold, outerThreshold,
+ bounds));
+}
+
+static SkMatrix make_div_and_translate_matrix(GrTexture* texture, int x, int y) {
+ SkMatrix matrix = GrCoordTransform::MakeDivByTextureWHMatrix(texture);
+ matrix.preTranslate(SkIntToScalar(x), SkIntToScalar(y));
+ return matrix;
+}
+
+GrAlphaThresholdFragmentProcessor::GrAlphaThresholdFragmentProcessor(
+ GrTexture* texture,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ GrTexture* maskTexture,
+ float innerThreshold,
+ float outerThreshold,
+ const SkIRect& bounds)
+ : fInnerThreshold(innerThreshold)
+ , fOuterThreshold(outerThreshold)
+ , fImageCoordTransform(GrCoordTransform::MakeDivByTextureWHMatrix(texture), texture,
+ GrTextureParams::kNone_FilterMode)
+ , fImageTextureAccess(texture)
+ , fColorSpaceXform(std::move(colorSpaceXform))
+ , fMaskCoordTransform(make_div_and_translate_matrix(maskTexture, -bounds.x(), -bounds.y()),
+ maskTexture,
+ GrTextureParams::kNone_FilterMode)
+ , fMaskTextureAccess(maskTexture) {
+ this->initClassID<GrAlphaThresholdFragmentProcessor>();
+ this->addCoordTransform(&fImageCoordTransform);
+ this->addTextureAccess(&fImageTextureAccess);
+ this->addCoordTransform(&fMaskCoordTransform);
+ this->addTextureAccess(&fMaskTextureAccess);
+}
+
+bool GrAlphaThresholdFragmentProcessor::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrAlphaThresholdFragmentProcessor& s = sBase.cast<GrAlphaThresholdFragmentProcessor>();
+ return (this->fInnerThreshold == s.fInnerThreshold &&
+ this->fOuterThreshold == s.fOuterThreshold);
+}
+
+void GrAlphaThresholdFragmentProcessor::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ if (GrPixelConfigIsAlphaOnly(this->texture(0)->config())) {
+ inout->mulByUnknownSingleComponent();
+ } else if (GrPixelConfigIsOpaque(this->texture(0)->config()) && fOuterThreshold >= 1.f) {
+ inout->mulByUnknownOpaqueFourComponents();
+ } else {
+ inout->mulByUnknownFourComponents();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLAlphaThresholdFragmentProcessor : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor& effect, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrAlphaThresholdFragmentProcessor& atfp =
+ effect.cast<GrAlphaThresholdFragmentProcessor>();
+ b->add32(GrColorSpaceXform::XformKey(atfp.colorSpaceXform()));
+ }
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fInnerThresholdVar;
+ GrGLSLProgramDataManager::UniformHandle fOuterThresholdVar;
+ GrGLSLProgramDataManager::UniformHandle fColorSpaceXformVar;
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GrGLAlphaThresholdFragmentProcessor::emitCode(EmitArgs& args) {
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ fInnerThresholdVar = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType, kDefault_GrSLPrecision,
+ "inner_threshold");
+ fOuterThresholdVar = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType, kDefault_GrSLPrecision,
+ "outer_threshold");
+
+ const GrAlphaThresholdFragmentProcessor& atfp =
+ args.fFp.cast<GrAlphaThresholdFragmentProcessor>();
+ GrGLSLColorSpaceXformHelper colorSpaceHelper(uniformHandler, atfp.colorSpaceXform(),
+ &fColorSpaceXformVar);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString coords2D = fragBuilder->ensureCoords2D(args.fTransformedCoords[0]);
+ SkString maskCoords2D = fragBuilder->ensureCoords2D(args.fTransformedCoords[1]);
+
+ fragBuilder->codeAppendf("vec2 coord = %s;", coords2D.c_str());
+ fragBuilder->codeAppendf("vec2 mask_coord = %s;", maskCoords2D.c_str());
+ fragBuilder->codeAppend("vec4 input_color = ");
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], "coord", kVec2f_GrSLType,
+ &colorSpaceHelper);
+ fragBuilder->codeAppend(";");
+ fragBuilder->codeAppend("vec4 mask_color = ");
+ fragBuilder->appendTextureLookup(args.fTexSamplers[1], "mask_coord");
+ fragBuilder->codeAppend(";");
+
+ fragBuilder->codeAppendf("float inner_thresh = %s;",
+ uniformHandler->getUniformCStr(fInnerThresholdVar));
+ fragBuilder->codeAppendf("float outer_thresh = %s;",
+ uniformHandler->getUniformCStr(fOuterThresholdVar));
+ fragBuilder->codeAppend("float mask = mask_color.a;");
+
+ fragBuilder->codeAppend("vec4 color = input_color;");
+ fragBuilder->codeAppend("if (mask < 0.5) {"
+ "if (color.a > outer_thresh) {"
+ "float scale = outer_thresh / color.a;"
+ "color.rgb *= scale;"
+ "color.a = outer_thresh;"
+ "}"
+ "} else if (color.a < inner_thresh) {"
+ "float scale = inner_thresh / max(0.001, color.a);"
+ "color.rgb *= scale;"
+ "color.a = inner_thresh;"
+ "}");
+
+ fragBuilder->codeAppendf("%s = %s;", args.fOutputColor,
+ (GrGLSLExpr4(args.fInputColor) * GrGLSLExpr4("color")).c_str());
+}
+
+void GrGLAlphaThresholdFragmentProcessor::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& proc) {
+ const GrAlphaThresholdFragmentProcessor& atfp = proc.cast<GrAlphaThresholdFragmentProcessor>();
+ pdman.set1f(fInnerThresholdVar, atfp.innerThreshold());
+ pdman.set1f(fOuterThresholdVar, atfp.outerThreshold());
+ if (SkToBool(atfp.colorSpaceXform())) {
+ pdman.setSkMatrix44(fColorSpaceXformVar, atfp.colorSpaceXform()->srcToDst());
+ }
+}
+
+/////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrAlphaThresholdFragmentProcessor);
+
+sk_sp<GrFragmentProcessor> GrAlphaThresholdFragmentProcessor::TestCreate(GrProcessorTestData* d) {
+ GrTexture* bmpTex = d->fTextures[GrProcessorUnitTest::kSkiaPMTextureIdx];
+ GrTexture* maskTex = d->fTextures[GrProcessorUnitTest::kAlphaTextureIdx];
+ float innerThresh = d->fRandom->nextUScalar1();
+ float outerThresh = d->fRandom->nextUScalar1();
+ const int kMaxWidth = 1000;
+ const int kMaxHeight = 1000;
+ uint32_t width = d->fRandom->nextULessThan(kMaxWidth);
+ uint32_t height = d->fRandom->nextULessThan(kMaxHeight);
+ uint32_t x = d->fRandom->nextULessThan(kMaxWidth - width);
+ uint32_t y = d->fRandom->nextULessThan(kMaxHeight - height);
+ SkIRect bounds = SkIRect::MakeXYWH(x, y, width, height);
+ auto colorSpaceXform = GrTest::TestColorXform(d->fRandom);
+ return GrAlphaThresholdFragmentProcessor::Make(bmpTex, colorSpaceXform, maskTex,
+ innerThresh, outerThresh,
+ bounds);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrAlphaThresholdFragmentProcessor::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLAlphaThresholdFragmentProcessor::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrAlphaThresholdFragmentProcessor::onCreateGLSLInstance() const {
+ return new GrGLAlphaThresholdFragmentProcessor;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/effects/GrAlphaThresholdFragmentProcessor.h b/gfx/skia/skia/src/effects/GrAlphaThresholdFragmentProcessor.h
new file mode 100644
index 000000000..c5b8d4ede
--- /dev/null
+++ b/gfx/skia/skia/src/effects/GrAlphaThresholdFragmentProcessor.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAlphaThresholdFragmentProcessor_DEFINED
+#define GrAlphaThresholdFragmentProcessor_DEFINED
+
+#include "SkTypes.h"
+
+#if SK_SUPPORT_GPU
+
+#include "GrColorSpaceXform.h"
+#include "GrCoordTransform.h"
+#include "GrFragmentProcessor.h"
+#include "GrProcessorUnitTest.h"
+
+class GrAlphaThresholdFragmentProcessor : public GrFragmentProcessor {
+
+public:
+ static sk_sp<GrFragmentProcessor> Make(GrTexture* texture,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ GrTexture* maskTexture,
+ float innerThreshold,
+ float outerThreshold,
+ const SkIRect& bounds);
+
+ const char* name() const override { return "Alpha Threshold"; }
+
+ float innerThreshold() const { return fInnerThreshold; }
+ float outerThreshold() const { return fOuterThreshold; }
+
+ GrColorSpaceXform* colorSpaceXform() const { return fColorSpaceXform.get(); }
+
+private:
+ GrAlphaThresholdFragmentProcessor(GrTexture* texture,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ GrTexture* maskTexture,
+ float innerThreshold,
+ float outerThreshold,
+ const SkIRect& bounds);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ float fInnerThreshold;
+ float fOuterThreshold;
+ GrCoordTransform fImageCoordTransform;
+ GrTextureAccess fImageTextureAccess;
+ // Color space transform is for the image (not the mask)
+ sk_sp<GrColorSpaceXform> fColorSpaceXform;
+ GrCoordTransform fMaskCoordTransform;
+ GrTextureAccess fMaskTextureAccess;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+#endif
+#endif
diff --git a/gfx/skia/skia/src/effects/GrCircleBlurFragmentProcessor.cpp b/gfx/skia/skia/src/effects/GrCircleBlurFragmentProcessor.cpp
new file mode 100644
index 000000000..3e80ef517
--- /dev/null
+++ b/gfx/skia/skia/src/effects/GrCircleBlurFragmentProcessor.cpp
@@ -0,0 +1,358 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrCircleBlurFragmentProcessor.h"
+
+#if SK_SUPPORT_GPU
+
+#include "GrContext.h"
+#include "GrInvariantOutput.h"
+#include "GrTextureProvider.h"
+
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+
+#include "SkFixed.h"
+
+class GrCircleBlurFragmentProcessor::GLSLProcessor : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fDataUniform;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GrCircleBlurFragmentProcessor::GLSLProcessor::emitCode(EmitArgs& args) {
+ const char *dataName;
+
+ // The data is formatted as:
+ // x,y - the center of the circle
+ // z - inner radius that should map to 0th entry in the texture.
+ // w - the inverse of the distance over which the texture is stretched.
+ fDataUniform = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType,
+ kDefault_GrSLPrecision,
+ "data",
+ &dataName);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const char *fragmentPos = fragBuilder->fragmentPosition();
+
+ if (args.fInputColor) {
+ fragBuilder->codeAppendf("vec4 src=%s;", args.fInputColor);
+ } else {
+ fragBuilder->codeAppendf("vec4 src=vec4(1);");
+ }
+
+ // We just want to compute "(length(vec) - %s.z + 0.5) * %s.w" but need to rearrange
+ // for precision.
+ fragBuilder->codeAppendf("vec2 vec = vec2( (%s.x - %s.x) * %s.w , (%s.y - %s.y) * %s.w );",
+ fragmentPos, dataName, dataName,
+ fragmentPos, dataName, dataName);
+ fragBuilder->codeAppendf("float dist = length(vec) + (0.5 - %s.z) * %s.w;",
+ dataName, dataName);
+
+ fragBuilder->codeAppendf("float intensity = ");
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], "vec2(dist, 0.5)");
+ fragBuilder->codeAppend(".a;");
+
+ fragBuilder->codeAppendf("%s = src * intensity;\n", args.fOutputColor );
+}
+
+void GrCircleBlurFragmentProcessor::GLSLProcessor::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& proc) {
+ const GrCircleBlurFragmentProcessor& cbfp = proc.cast<GrCircleBlurFragmentProcessor>();
+ const SkRect& circle = cbfp.fCircle;
+
+ // The data is formatted as:
+ // x,y - the center of the circle
+ // z - inner radius that should map to 0th entry in the texture.
+ // w - the inverse of the distance over which the profile texture is stretched.
+ pdman.set4f(fDataUniform, circle.centerX(), circle.centerY(), cbfp.fSolidRadius,
+ 1.f / cbfp.fTextureRadius);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrCircleBlurFragmentProcessor::GrCircleBlurFragmentProcessor(const SkRect& circle,
+ float textureRadius,
+ float solidRadius,
+ GrTexture* blurProfile)
+ : fCircle(circle)
+ , fSolidRadius(solidRadius)
+ , fTextureRadius(textureRadius)
+ , fBlurProfileAccess(blurProfile, GrTextureParams::kBilerp_FilterMode) {
+ this->initClassID<GrCircleBlurFragmentProcessor>();
+ this->addTextureAccess(&fBlurProfileAccess);
+ this->setWillReadFragmentPosition();
+}
+
+GrGLSLFragmentProcessor* GrCircleBlurFragmentProcessor::onCreateGLSLInstance() const {
+ return new GLSLProcessor;
+}
+
+void GrCircleBlurFragmentProcessor::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ // The code for this processor is always the same so there is nothing to add to the key.
+ return;
+}
+
+void GrCircleBlurFragmentProcessor::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ inout->mulByUnknownSingleComponent();
+}
+
+// Computes an unnormalized half kernel (right side). Returns the summation of all the half kernel
+// values.
+static float make_unnormalized_half_kernel(float* halfKernel, int halfKernelSize, float sigma) {
+ const float invSigma = 1.f / sigma;
+ const float b = -0.5f * invSigma * invSigma;
+ float tot = 0.0f;
+ // Compute half kernel values at half pixel steps out from the center.
+ float t = 0.5f;
+ for (int i = 0; i < halfKernelSize; ++i) {
+ float value = expf(t * t * b);
+ tot += value;
+ halfKernel[i] = value;
+ t += 1.f;
+ }
+ return tot;
+}
+
+// Create a Gaussian half-kernel (right side) and a summed area table given a sigma and number of
+// discrete steps. The half kernel is normalized to sum to 0.5.
+static void make_half_kernel_and_summed_table(float* halfKernel, float* summedHalfKernel,
+ int halfKernelSize, float sigma) {
+ // The half kernel should sum to 0.5 not 1.0.
+ const float tot = 2.f * make_unnormalized_half_kernel(halfKernel, halfKernelSize, sigma);
+ float sum = 0.f;
+ for (int i = 0; i < halfKernelSize; ++i) {
+ halfKernel[i] /= tot;
+ sum += halfKernel[i];
+ summedHalfKernel[i] = sum;
+ }
+}
+
+// Applies the 1D half kernel vertically at points along the x axis to a circle centered at the
+// origin with radius circleR.
+void apply_kernel_in_y(float* results, int numSteps, float firstX, float circleR,
+ int halfKernelSize, const float* summedHalfKernelTable) {
+ float x = firstX;
+ for (int i = 0; i < numSteps; ++i, x += 1.f) {
+ if (x < -circleR || x > circleR) {
+ results[i] = 0;
+ continue;
+ }
+ float y = sqrtf(circleR * circleR - x * x);
+ // In the column at x we exit the circle at +y and -y
+ // The summed table entry j is actually reflects an offset of j + 0.5.
+ y -= 0.5f;
+ int yInt = SkScalarFloorToInt(y);
+ SkASSERT(yInt >= -1);
+ if (y < 0) {
+ results[i] = (y + 0.5f) * summedHalfKernelTable[0];
+ } else if (yInt >= halfKernelSize - 1) {
+ results[i] = 0.5f;
+ } else {
+ float yFrac = y - yInt;
+ results[i] = (1.f - yFrac) * summedHalfKernelTable[yInt] +
+ yFrac * summedHalfKernelTable[yInt + 1];
+ }
+ }
+}
+
+// Apply a Gaussian at point (evalX, 0) to a circle centered at the origin with radius circleR.
+// This relies on having a half kernel computed for the Gaussian and a table of applications of
+// the half kernel in y to columns at (evalX - halfKernel, evalX - halfKernel + 1, ..., evalX +
+// halfKernel) passed in as yKernelEvaluations.
+static uint8_t eval_at(float evalX, float circleR, const float* halfKernel, int halfKernelSize,
+ const float* yKernelEvaluations) {
+ float acc = 0;
+
+ float x = evalX - halfKernelSize;
+ for (int i = 0; i < halfKernelSize; ++i, x += 1.f) {
+ if (x < -circleR || x > circleR) {
+ continue;
+ }
+ float verticalEval = yKernelEvaluations[i];
+ acc += verticalEval * halfKernel[halfKernelSize - i - 1];
+ }
+ for (int i = 0; i < halfKernelSize; ++i, x += 1.f) {
+ if (x < -circleR || x > circleR) {
+ continue;
+ }
+ float verticalEval = yKernelEvaluations[i + halfKernelSize];
+ acc += verticalEval * halfKernel[i];
+ }
+ // Since we applied a half kernel in y we multiply acc by 2 (the circle is symmetric about the
+ // x axis).
+ return SkUnitScalarClampToByte(2.f * acc);
+}
+
+// This function creates a profile of a blurred circle. It does this by computing a kernel for
+// half the Gaussian and a matching summed area table. The summed area table is used to compute
+// an array of vertical applications of the half kernel to the circle along the x axis. The table
+// of y evaluations has 2 * k + n entries where k is the size of the half kernel and n is the size
+// of the profile being computed. Then for each of the n profile entries we walk out k steps in each
+// horizontal direction multiplying the corresponding y evaluation by the half kernel entry and
+// sum these values to compute the profile entry.
+static uint8_t* create_circle_profile(float sigma, float circleR, int profileTextureWidth) {
+ const int numSteps = profileTextureWidth;
+ uint8_t* weights = new uint8_t[numSteps];
+
+ // The full kernel is 6 sigmas wide.
+ int halfKernelSize = SkScalarCeilToInt(6.0f*sigma);
+ // round up to next multiple of 2 and then divide by 2
+ halfKernelSize = ((halfKernelSize + 1) & ~1) >> 1;
+
+ // Number of x steps at which to apply kernel in y to cover all the profile samples in x.
+ int numYSteps = numSteps + 2 * halfKernelSize;
+
+ SkAutoTArray<float> bulkAlloc(halfKernelSize + halfKernelSize + numYSteps);
+ float* halfKernel = bulkAlloc.get();
+ float* summedKernel = bulkAlloc.get() + halfKernelSize;
+ float* yEvals = bulkAlloc.get() + 2 * halfKernelSize;
+ make_half_kernel_and_summed_table(halfKernel, summedKernel, halfKernelSize, sigma);
+
+ float firstX = -halfKernelSize + 0.5f;
+ apply_kernel_in_y(yEvals, numYSteps, firstX, circleR, halfKernelSize, summedKernel);
+
+ for (int i = 0; i < numSteps - 1; ++i) {
+ float evalX = i + 0.5f;
+ weights[i] = eval_at(evalX, circleR, halfKernel, halfKernelSize, yEvals + i);
+ }
+ // Ensure the tail of the Gaussian goes to zero.
+ weights[numSteps - 1] = 0;
+ return weights;
+}
+
+static uint8_t* create_half_plane_profile(int profileWidth) {
+ SkASSERT(!(profileWidth & 0x1));
+ // The full kernel is 6 sigmas wide.
+ float sigma = profileWidth / 6.f;
+ int halfKernelSize = profileWidth / 2;
+
+ SkAutoTArray<float> halfKernel(halfKernelSize);
+ uint8_t* profile = new uint8_t[profileWidth];
+
+ // The half kernel should sum to 0.5.
+ const float tot = 2.f * make_unnormalized_half_kernel(halfKernel.get(), halfKernelSize, sigma);
+ float sum = 0.f;
+ // Populate the profile from the right edge to the middle.
+ for (int i = 0; i < halfKernelSize; ++i) {
+ halfKernel[halfKernelSize - i - 1] /= tot;
+ sum += halfKernel[halfKernelSize - i - 1];
+ profile[profileWidth - i - 1] = SkUnitScalarClampToByte(sum);
+ }
+ // Populate the profile from the middle to the left edge (by flipping the half kernel and
+ // continuing the summation).
+ for (int i = 0; i < halfKernelSize; ++i) {
+ sum += halfKernel[i];
+ profile[halfKernelSize - i - 1] = SkUnitScalarClampToByte(sum);
+ }
+ // Ensure tail goes to 0.
+ profile[profileWidth - 1] = 0;
+ return profile;
+}
+
+static GrTexture* create_profile_texture(GrTextureProvider* textureProvider, const SkRect& circle,
+ float sigma, float* solidRadius, float* textureRadius) {
+ float circleR = circle.width() / 2.0f;
+ // Profile textures are cached by the ratio of sigma to circle radius and by the size of the
+ // profile texture (binned by powers of 2).
+ SkScalar sigmaToCircleRRatio = sigma / circleR;
+ // When sigma is really small this becomes a equivalent to convolving a Gaussian with a half-
+ // plane. Similarly, in the extreme high ratio cases circle becomes a point WRT to the Guassian
+ // and the profile texture is a just a Gaussian evaluation. However, we haven't yet implemented
+ // this latter optimization.
+ sigmaToCircleRRatio = SkTMin(sigmaToCircleRRatio, 8.f);
+ SkFixed sigmaToCircleRRatioFixed;
+ static const SkScalar kHalfPlaneThreshold = 0.1f;
+ bool useHalfPlaneApprox = false;
+ if (sigmaToCircleRRatio <= kHalfPlaneThreshold) {
+ useHalfPlaneApprox = true;
+ sigmaToCircleRRatioFixed = 0;
+ *solidRadius = circleR - 3 * sigma;
+ *textureRadius = 6 * sigma;
+ } else {
+ // Convert to fixed point for the key.
+ sigmaToCircleRRatioFixed = SkScalarToFixed(sigmaToCircleRRatio);
+ // We shave off some bits to reduce the number of unique entries. We could probably shave
+ // off more than we do.
+ sigmaToCircleRRatioFixed &= ~0xff;
+ sigmaToCircleRRatio = SkFixedToScalar(sigmaToCircleRRatioFixed);
+ sigma = circleR * sigmaToCircleRRatio;
+ *solidRadius = 0;
+ *textureRadius = circleR + 3 * sigma;
+ }
+
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey key;
+ GrUniqueKey::Builder builder(&key, kDomain, 1);
+ builder[0] = sigmaToCircleRRatioFixed;
+ builder.finish();
+
+ GrTexture *blurProfile = textureProvider->findAndRefTextureByUniqueKey(key);
+ if (!blurProfile) {
+ static constexpr int kProfileTextureWidth = 512;
+ GrSurfaceDesc texDesc;
+ texDesc.fWidth = kProfileTextureWidth;
+ texDesc.fHeight = 1;
+ texDesc.fConfig = kAlpha_8_GrPixelConfig;
+
+ SkAutoTDeleteArray<uint8_t> profile(nullptr);
+ if (useHalfPlaneApprox) {
+ profile.reset(create_half_plane_profile(kProfileTextureWidth));
+ } else {
+ // Rescale params to the size of the texture we're creating.
+ SkScalar scale = kProfileTextureWidth / *textureRadius;
+ profile.reset(create_circle_profile(sigma * scale, circleR * scale,
+ kProfileTextureWidth));
+ }
+
+ blurProfile = textureProvider->createTexture(texDesc, SkBudgeted::kYes, profile.get(), 0);
+ if (blurProfile) {
+ textureProvider->assignUniqueKeyToTexture(key, blurProfile);
+ }
+ }
+
+ return blurProfile;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> GrCircleBlurFragmentProcessor::Make(GrTextureProvider*textureProvider,
+ const SkRect& circle, float sigma) {
+ float solidRadius;
+ float textureRadius;
+ SkAutoTUnref<GrTexture> profile(create_profile_texture(textureProvider, circle, sigma,
+ &solidRadius, &textureRadius));
+ if (!profile) {
+ return nullptr;
+ }
+ return sk_sp<GrFragmentProcessor>(new GrCircleBlurFragmentProcessor(circle, textureRadius,
+ solidRadius, profile));
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrCircleBlurFragmentProcessor);
+
+sk_sp<GrFragmentProcessor> GrCircleBlurFragmentProcessor::TestCreate(GrProcessorTestData* d) {
+ SkScalar wh = d->fRandom->nextRangeScalar(100.f, 1000.f);
+ SkScalar sigma = d->fRandom->nextRangeF(1.f,10.f);
+ SkRect circle = SkRect::MakeWH(wh, wh);
+ return GrCircleBlurFragmentProcessor::Make(d->fContext->textureProvider(), circle, sigma);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/effects/GrCircleBlurFragmentProcessor.h b/gfx/skia/skia/src/effects/GrCircleBlurFragmentProcessor.h
new file mode 100644
index 000000000..66072887d
--- /dev/null
+++ b/gfx/skia/skia/src/effects/GrCircleBlurFragmentProcessor.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCircleBlurFragmentProcessor_DEFINED
+#define GrCircleBlurFragmentProcessor_DEFINED
+
+#include "SkString.h"
+#include "SkTypes.h"
+
+#if SK_SUPPORT_GPU
+
+#include "GrFragmentProcessor.h"
+#include "GrProcessorUnitTest.h"
+
+class GrTextureProvider;
+
+// This FP handles the special case of a blurred circle. It uses a 1D
+// profile that is just rotated about the origin of the circle.
+class GrCircleBlurFragmentProcessor : public GrFragmentProcessor {
+public:
+ ~GrCircleBlurFragmentProcessor() override {}
+
+ const char* name() const override { return "CircleBlur"; }
+
+ SkString dumpInfo() const override {
+ SkString str;
+ str.appendf("Rect [L: %.2f, T: %.2f, R: %.2f, B: %.2f], solidR: %.2f, textureR: %.2f",
+ fCircle.fLeft, fCircle.fTop, fCircle.fRight, fCircle.fBottom,
+ fSolidRadius, fTextureRadius);
+ return str;
+ }
+
+ static sk_sp<GrFragmentProcessor> Make(GrTextureProvider*textureProvider,
+ const SkRect& circle, float sigma);
+
+private:
+ // This nested GLSL processor implementation is defined in the cpp file.
+ class GLSLProcessor;
+
+ /**
+ * Creates a profile texture for the circle and sigma. The texture will have a height of 1.
+ * The x texture coord should map from 0 to 1 across the radius range of solidRadius to
+ * solidRadius + textureRadius.
+ */
+ GrCircleBlurFragmentProcessor(const SkRect& circle, float textureRadius, float innerRadius,
+ GrTexture* blurProfile);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ bool onIsEqual(const GrFragmentProcessor& other) const override {
+ const GrCircleBlurFragmentProcessor& cbfp = other.cast<GrCircleBlurFragmentProcessor>();
+ return fCircle == cbfp.fCircle && fSolidRadius == cbfp.fSolidRadius &&
+ fTextureRadius == cbfp.fTextureRadius;
+ }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ SkRect fCircle;
+ SkScalar fSolidRadius;
+ float fTextureRadius;
+ GrTextureAccess fBlurProfileAccess;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+#endif
+#endif
diff --git a/gfx/skia/skia/src/effects/Sk1DPathEffect.cpp b/gfx/skia/skia/src/effects/Sk1DPathEffect.cpp
new file mode 100644
index 000000000..26cd046aa
--- /dev/null
+++ b/gfx/skia/skia/src/effects/Sk1DPathEffect.cpp
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "Sk1DPathEffect.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+#include "SkPathMeasure.h"
+#include "SkStrokeRec.h"
+
+bool Sk1DPathEffect::filterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec*, const SkRect*) const {
+ SkPathMeasure meas(src, false);
+ do {
+ SkScalar length = meas.getLength();
+ SkScalar distance = this->begin(length);
+ while (distance < length) {
+ SkScalar delta = this->next(dst, distance, meas);
+ if (delta <= 0) {
+ break;
+ }
+ distance += delta;
+ }
+ } while (meas.nextContour());
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPath1DPathEffect::SkPath1DPathEffect(const SkPath& path, SkScalar advance,
+ SkScalar phase, Style style) : fPath(path)
+{
+ SkASSERT(advance > 0 && !path.isEmpty());
+ // cleanup their phase parameter, inverting it so that it becomes an
+ // offset along the path (to match the interpretation in PostScript)
+ if (phase < 0) {
+ phase = -phase;
+ if (phase > advance) {
+ phase = SkScalarMod(phase, advance);
+ }
+ } else {
+ if (phase > advance) {
+ phase = SkScalarMod(phase, advance);
+ }
+ phase = advance - phase;
+ }
+ // now catch the edge case where phase == advance (within epsilon)
+ if (phase >= advance) {
+ phase = 0;
+ }
+ SkASSERT(phase >= 0);
+
+ fAdvance = advance;
+ fInitialOffset = phase;
+
+ if ((unsigned)style > kMorph_Style) {
+ SkDEBUGF(("SkPath1DPathEffect style enum out of range %d\n", style));
+ }
+ fStyle = style;
+}
+
+bool SkPath1DPathEffect::filterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec* rec, const SkRect* cullRect) const {
+ if (fAdvance > 0) {
+ rec->setFillStyle();
+ return this->INHERITED::filterPath(dst, src, rec, cullRect);
+ }
+ return false;
+}
+
+static bool morphpoints(SkPoint dst[], const SkPoint src[], int count,
+ SkPathMeasure& meas, SkScalar dist) {
+ for (int i = 0; i < count; i++) {
+ SkPoint pos;
+ SkVector tangent;
+
+ SkScalar sx = src[i].fX;
+ SkScalar sy = src[i].fY;
+
+ if (!meas.getPosTan(dist + sx, &pos, &tangent)) {
+ return false;
+ }
+
+ SkMatrix matrix;
+ SkPoint pt;
+
+ pt.set(sx, sy);
+ matrix.setSinCos(tangent.fY, tangent.fX, 0, 0);
+ matrix.preTranslate(-sx, 0);
+ matrix.postTranslate(pos.fX, pos.fY);
+ matrix.mapPoints(&dst[i], &pt, 1);
+ }
+ return true;
+}
+
+/* TODO
+
+Need differentially more subdivisions when the follow-path is curvy. Not sure how to
+determine that, but we need it. I guess a cheap answer is let the caller tell us,
+but that seems like a cop-out. Another answer is to get Rob Johnson to figure it out.
+*/
+static void morphpath(SkPath* dst, const SkPath& src, SkPathMeasure& meas,
+ SkScalar dist) {
+ SkPath::Iter iter(src, false);
+ SkPoint srcP[4], dstP[3];
+ SkPath::Verb verb;
+
+ while ((verb = iter.next(srcP)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ if (morphpoints(dstP, srcP, 1, meas, dist)) {
+ dst->moveTo(dstP[0]);
+ }
+ break;
+ case SkPath::kLine_Verb:
+ srcP[2] = srcP[1];
+ srcP[1].set(SkScalarAve(srcP[0].fX, srcP[2].fX),
+ SkScalarAve(srcP[0].fY, srcP[2].fY));
+ // fall through to quad
+ case SkPath::kQuad_Verb:
+ if (morphpoints(dstP, &srcP[1], 2, meas, dist)) {
+ dst->quadTo(dstP[0], dstP[1]);
+ }
+ break;
+ case SkPath::kCubic_Verb:
+ if (morphpoints(dstP, &srcP[1], 3, meas, dist)) {
+ dst->cubicTo(dstP[0], dstP[1], dstP[2]);
+ }
+ break;
+ case SkPath::kClose_Verb:
+ dst->close();
+ break;
+ default:
+ SkDEBUGFAIL("unknown verb");
+ break;
+ }
+ }
+}
+
+SkScalar SkPath1DPathEffect::begin(SkScalar contourLength) const {
+ return fInitialOffset;
+}
+
+sk_sp<SkFlattenable> SkPath1DPathEffect::CreateProc(SkReadBuffer& buffer) {
+ SkScalar advance = buffer.readScalar();
+ if (advance > 0) {
+ SkPath path;
+ buffer.readPath(&path);
+ SkScalar phase = buffer.readScalar();
+ Style style = (Style)buffer.readUInt();
+ return SkPath1DPathEffect::Make(path, advance, phase, style);
+ }
+ return nullptr;
+}
+
+void SkPath1DPathEffect::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeScalar(fAdvance);
+ if (fAdvance > 0) {
+ buffer.writePath(fPath);
+ buffer.writeScalar(fInitialOffset);
+ buffer.writeUInt(fStyle);
+ }
+}
+
+SkScalar SkPath1DPathEffect::next(SkPath* dst, SkScalar distance,
+ SkPathMeasure& meas) const {
+ switch (fStyle) {
+ case kTranslate_Style: {
+ SkPoint pos;
+ if (meas.getPosTan(distance, &pos, nullptr)) {
+ dst->addPath(fPath, pos.fX, pos.fY);
+ }
+ } break;
+ case kRotate_Style: {
+ SkMatrix matrix;
+ if (meas.getMatrix(distance, &matrix)) {
+ dst->addPath(fPath, matrix);
+ }
+ } break;
+ case kMorph_Style:
+ morphpath(dst, fPath, meas, distance);
+ break;
+ default:
+ SkDEBUGFAIL("unknown Style enum");
+ break;
+ }
+ return fAdvance;
+}
+
+
+#ifndef SK_IGNORE_TO_STRING
+void SkPath1DPathEffect::toString(SkString* str) const {
+ str->appendf("SkPath1DPathEffect: (");
+ // TODO: add path and style
+ str->appendf("advance: %.2f phase %.2f", fAdvance, fInitialOffset);
+ str->appendf(")");
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkPathEffect> SkPath1DPathEffect::Make(const SkPath& path, SkScalar advance, SkScalar phase,
+ Style style) {
+ if (advance <= 0 || path.isEmpty()) {
+ return nullptr;
+ }
+ return sk_sp<SkPathEffect>(new SkPath1DPathEffect(path, advance, phase, style));
+}
diff --git a/gfx/skia/skia/src/effects/Sk2DPathEffect.cpp b/gfx/skia/skia/src/effects/Sk2DPathEffect.cpp
new file mode 100644
index 000000000..e7ef54b6f
--- /dev/null
+++ b/gfx/skia/skia/src/effects/Sk2DPathEffect.cpp
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "Sk2DPathEffect.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+#include "SkPath.h"
+#include "SkRegion.h"
+#include "SkStrokeRec.h"
+
+Sk2DPathEffect::Sk2DPathEffect(const SkMatrix& mat) : fMatrix(mat) {
+ // Calling invert will set the type mask on both matrices, making them thread safe.
+ fMatrixIsInvertible = fMatrix.invert(&fInverse);
+}
+
+bool Sk2DPathEffect::filterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec*, const SkRect*) const {
+ if (!fMatrixIsInvertible) {
+ return false;
+ }
+
+ SkPath tmp;
+ SkIRect ir;
+
+ src.transform(fInverse, &tmp);
+ tmp.getBounds().round(&ir);
+ if (!ir.isEmpty()) {
+ this->begin(ir, dst);
+
+ SkRegion rgn;
+ rgn.setPath(tmp, SkRegion(ir));
+ SkRegion::Iterator iter(rgn);
+ for (; !iter.done(); iter.next()) {
+ const SkIRect& rect = iter.rect();
+ for (int y = rect.fTop; y < rect.fBottom; ++y) {
+ this->nextSpan(rect.fLeft, y, rect.width(), dst);
+ }
+ }
+
+ this->end(dst);
+ }
+ return true;
+}
+
+void Sk2DPathEffect::nextSpan(int x, int y, int count, SkPath* path) const {
+ if (!fMatrixIsInvertible) {
+ return;
+ }
+
+ const SkMatrix& mat = this->getMatrix();
+ SkPoint src, dst;
+
+ src.set(SkIntToScalar(x) + SK_ScalarHalf, SkIntToScalar(y) + SK_ScalarHalf);
+ do {
+ mat.mapPoints(&dst, &src, 1);
+ this->next(dst, x++, y, path);
+ src.fX += SK_Scalar1;
+ } while (--count > 0);
+}
+
+void Sk2DPathEffect::begin(const SkIRect& uvBounds, SkPath* dst) const {}
+void Sk2DPathEffect::next(const SkPoint& loc, int u, int v, SkPath* dst) const {}
+void Sk2DPathEffect::end(SkPath* dst) const {}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void Sk2DPathEffect::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeMatrix(fMatrix);
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void Sk2DPathEffect::toString(SkString* str) const {
+ str->appendf("(matrix: %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f)",
+ fMatrix[SkMatrix::kMScaleX], fMatrix[SkMatrix::kMSkewX], fMatrix[SkMatrix::kMTransX],
+ fMatrix[SkMatrix::kMSkewY], fMatrix[SkMatrix::kMScaleY], fMatrix[SkMatrix::kMTransY],
+ fMatrix[SkMatrix::kMPersp0], fMatrix[SkMatrix::kMPersp1], fMatrix[SkMatrix::kMPersp2]);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkLine2DPathEffect::filterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec* rec, const SkRect* cullRect) const {
+ if (this->INHERITED::filterPath(dst, src, rec, cullRect)) {
+ rec->setStrokeStyle(fWidth);
+ return true;
+ }
+ return false;
+}
+
+void SkLine2DPathEffect::nextSpan(int u, int v, int ucount, SkPath* dst) const {
+ if (ucount > 1) {
+ SkPoint src[2], dstP[2];
+
+ src[0].set(SkIntToScalar(u) + SK_ScalarHalf, SkIntToScalar(v) + SK_ScalarHalf);
+ src[1].set(SkIntToScalar(u+ucount) + SK_ScalarHalf, SkIntToScalar(v) + SK_ScalarHalf);
+ this->getMatrix().mapPoints(dstP, src, 2);
+
+ dst->moveTo(dstP[0]);
+ dst->lineTo(dstP[1]);
+ }
+}
+
+sk_sp<SkFlattenable> SkLine2DPathEffect::CreateProc(SkReadBuffer& buffer) {
+ SkMatrix matrix;
+ buffer.readMatrix(&matrix);
+ SkScalar width = buffer.readScalar();
+ return SkLine2DPathEffect::Make(width, matrix);
+}
+
+void SkLine2DPathEffect::flatten(SkWriteBuffer &buffer) const {
+ buffer.writeMatrix(this->getMatrix());
+ buffer.writeScalar(fWidth);
+}
+
+
+#ifndef SK_IGNORE_TO_STRING
+void SkLine2DPathEffect::toString(SkString* str) const {
+ str->appendf("SkLine2DPathEffect: (");
+ this->INHERITED::toString(str);
+ str->appendf("width: %f", fWidth);
+ str->appendf(")");
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPath2DPathEffect::SkPath2DPathEffect(const SkMatrix& m, const SkPath& p)
+ : INHERITED(m), fPath(p) {
+}
+
+sk_sp<SkFlattenable> SkPath2DPathEffect::CreateProc(SkReadBuffer& buffer) {
+ SkMatrix matrix;
+ buffer.readMatrix(&matrix);
+ SkPath path;
+ buffer.readPath(&path);
+ return SkPath2DPathEffect::Make(matrix, path);
+}
+
+void SkPath2DPathEffect::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeMatrix(this->getMatrix());
+ buffer.writePath(fPath);
+}
+
+void SkPath2DPathEffect::next(const SkPoint& loc, int u, int v,
+ SkPath* dst) const {
+ dst->addPath(fPath, loc.fX, loc.fY);
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkPath2DPathEffect::toString(SkString* str) const {
+ str->appendf("SkPath2DPathEffect: (");
+ this->INHERITED::toString(str);
+ // TODO: print out path information
+ str->appendf(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkAlphaThresholdFilter.cpp b/gfx/skia/skia/src/effects/SkAlphaThresholdFilter.cpp
new file mode 100644
index 000000000..bbae2e155
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkAlphaThresholdFilter.cpp
@@ -0,0 +1,268 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAlphaThresholdFilter.h"
+
+#include "SkBitmap.h"
+#include "SkReadBuffer.h"
+#include "SkSpecialImage.h"
+#include "SkWriteBuffer.h"
+#include "SkRegion.h"
+
+#if SK_SUPPORT_GPU
+#include "GrAlphaThresholdFragmentProcessor.h"
+#include "GrContext.h"
+#include "GrDrawContext.h"
+#include "GrFixedClip.h"
+#endif
+
+class SK_API SkAlphaThresholdFilterImpl : public SkImageFilter {
+public:
+ SkAlphaThresholdFilterImpl(const SkRegion& region, SkScalar innerThreshold,
+ SkScalar outerThreshold, sk_sp<SkImageFilter> input,
+ const CropRect* cropRect = nullptr);
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkAlphaThresholdFilterImpl)
+ friend void SkAlphaThresholdFilter::InitializeFlattenables();
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* source, const Context&,
+ SkIPoint* offset) const override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrTexture> createMaskTexture(GrContext*, const SkMatrix&, const SkIRect& bounds) const;
+#endif
+
+private:
+ SkRegion fRegion;
+ SkScalar fInnerThreshold;
+ SkScalar fOuterThreshold;
+ typedef SkImageFilter INHERITED;
+};
+
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkAlphaThresholdFilter)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkAlphaThresholdFilterImpl)
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END
+
+static SkScalar pin_0_1(SkScalar x) {
+ return SkMinScalar(SkMaxScalar(x, 0), 1);
+}
+
+sk_sp<SkImageFilter> SkAlphaThresholdFilter::Make(const SkRegion& region,
+ SkScalar innerThreshold,
+ SkScalar outerThreshold,
+ sk_sp<SkImageFilter> input,
+ const SkImageFilter::CropRect* cropRect) {
+ innerThreshold = pin_0_1(innerThreshold);
+ outerThreshold = pin_0_1(outerThreshold);
+ if (!SkScalarIsFinite(innerThreshold) || !SkScalarIsFinite(outerThreshold)) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkAlphaThresholdFilterImpl(region, innerThreshold,
+ outerThreshold,
+ std::move(input),
+ cropRect));
+}
+
+sk_sp<SkFlattenable> SkAlphaThresholdFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkScalar inner = buffer.readScalar();
+ SkScalar outer = buffer.readScalar();
+ SkRegion rgn;
+ buffer.readRegion(&rgn);
+ return SkAlphaThresholdFilter::Make(rgn, inner, outer, common.getInput(0),
+ &common.cropRect());
+}
+
+SkAlphaThresholdFilterImpl::SkAlphaThresholdFilterImpl(const SkRegion& region,
+ SkScalar innerThreshold,
+ SkScalar outerThreshold,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fRegion(region)
+ , fInnerThreshold(innerThreshold)
+ , fOuterThreshold(outerThreshold) {
+}
+
+#if SK_SUPPORT_GPU
+sk_sp<GrTexture> SkAlphaThresholdFilterImpl::createMaskTexture(GrContext* context,
+ const SkMatrix& inMatrix,
+ const SkIRect& bounds) const {
+
+ sk_sp<GrDrawContext> drawContext(context->makeDrawContextWithFallback(SkBackingFit::kApprox,
+ bounds.width(),
+ bounds.height(),
+ kAlpha_8_GrPixelConfig,
+ nullptr));
+ if (!drawContext) {
+ return nullptr;
+ }
+
+ GrPaint grPaint;
+ grPaint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ SkRegion::Iterator iter(fRegion);
+ drawContext->clear(nullptr, 0x0, true);
+
+ GrFixedClip clip(SkIRect::MakeWH(bounds.width(), bounds.height()));
+ while (!iter.done()) {
+ SkRect rect = SkRect::Make(iter.rect());
+ drawContext->drawRect(clip, grPaint, inMatrix, rect);
+ iter.next();
+ }
+
+ return drawContext->asTexture();
+}
+#endif
+
+void SkAlphaThresholdFilterImpl::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeScalar(fInnerThreshold);
+ buffer.writeScalar(fOuterThreshold);
+ buffer.writeRegion(fRegion);
+}
+
+sk_sp<SkSpecialImage> SkAlphaThresholdFilterImpl::onFilterImage(SkSpecialImage* source,
+ const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, source, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ const SkIRect inputBounds = SkIRect::MakeXYWH(inputOffset.x(), inputOffset.y(),
+ input->width(), input->height());
+
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, inputBounds, &bounds)) {
+ return nullptr;
+ }
+
+#if SK_SUPPORT_GPU
+ if (source->isTextureBacked()) {
+ GrContext* context = source->getContext();
+
+ sk_sp<GrTexture> inputTexture(input->asTextureRef(context));
+ SkASSERT(inputTexture);
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+
+ bounds.offset(-inputOffset);
+
+ SkMatrix matrix(ctx.ctm());
+ matrix.postTranslate(SkIntToScalar(-bounds.left()), SkIntToScalar(-bounds.top()));
+
+ sk_sp<GrTexture> maskTexture(this->createMaskTexture(context, matrix, bounds));
+ if (!maskTexture) {
+ return nullptr;
+ }
+
+ const OutputProperties& outProps = ctx.outputProperties();
+ sk_sp<GrColorSpaceXform> colorSpaceXform = GrColorSpaceXform::Make(input->getColorSpace(),
+ outProps.colorSpace());
+ sk_sp<GrFragmentProcessor> fp(GrAlphaThresholdFragmentProcessor::Make(
+ inputTexture.get(),
+ std::move(colorSpaceXform),
+ maskTexture.get(),
+ fInnerThreshold,
+ fOuterThreshold,
+ bounds));
+ if (!fp) {
+ return nullptr;
+ }
+
+ return DrawWithFP(context, std::move(fp), bounds, outProps);
+ }
+#endif
+
+ SkBitmap inputBM;
+
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if (inputBM.colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+
+ SkAutoLockPixels inputLock(inputBM);
+
+ if (!inputBM.getPixels() || inputBM.width() <= 0 || inputBM.height() <= 0) {
+ return nullptr;
+ }
+
+
+ SkMatrix localInverse;
+ if (!ctx.ctm().invert(&localInverse)) {
+ return nullptr;
+ }
+
+ SkImageInfo info = SkImageInfo::MakeN32(bounds.width(), bounds.height(),
+ kPremul_SkAlphaType);
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ SkAutoLockPixels dstLock(dst);
+
+ U8CPU innerThreshold = (U8CPU)(fInnerThreshold * 0xFF);
+ U8CPU outerThreshold = (U8CPU)(fOuterThreshold * 0xFF);
+ SkColor* dptr = dst.getAddr32(0, 0);
+ int dstWidth = dst.width(), dstHeight = dst.height();
+ for (int y = 0; y < dstHeight; ++y) {
+ const SkColor* sptr = inputBM.getAddr32(bounds.fLeft, bounds.fTop+y);
+
+ for (int x = 0; x < dstWidth; ++x) {
+ const SkColor& source = sptr[x];
+ SkColor outputColor(source);
+ SkPoint position;
+ localInverse.mapXY((SkScalar)x + bounds.fLeft, (SkScalar)y + bounds.fTop, &position);
+ if (fRegion.contains((int32_t)position.x(), (int32_t)position.y())) {
+ if (SkColorGetA(source) < innerThreshold) {
+ U8CPU alpha = SkColorGetA(source);
+ if (alpha == 0) {
+ alpha = 1;
+ }
+ float scale = (float)innerThreshold / alpha;
+ outputColor = SkColorSetARGB(innerThreshold,
+ (U8CPU)(SkColorGetR(source) * scale),
+ (U8CPU)(SkColorGetG(source) * scale),
+ (U8CPU)(SkColorGetB(source) * scale));
+ }
+ } else {
+ if (SkColorGetA(source) > outerThreshold) {
+ float scale = (float)outerThreshold / SkColorGetA(source);
+ outputColor = SkColorSetARGB(outerThreshold,
+ (U8CPU)(SkColorGetR(source) * scale),
+ (U8CPU)(SkColorGetG(source) * scale),
+ (U8CPU)(SkColorGetB(source) * scale));
+ }
+ }
+ dptr[y * dstWidth + x] = outputColor;
+ }
+ }
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(bounds.width(), bounds.height()),
+ dst);
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkAlphaThresholdFilterImpl::toString(SkString* str) const {
+ str->appendf("SkAlphaThresholdImageFilter: (");
+ str->appendf("inner: %f outer: %f", fInnerThreshold, fOuterThreshold);
+ str->append(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkArcToPathEffect.cpp b/gfx/skia/skia/src/effects/SkArcToPathEffect.cpp
new file mode 100644
index 000000000..a6273db09
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkArcToPathEffect.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkArcToPathEffect.h"
+#include "SkPath.h"
+#include "SkPoint.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+
+SkArcToPathEffect::SkArcToPathEffect(SkScalar radius) : fRadius(radius) {}
+
+bool SkArcToPathEffect::filterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec*, const SkRect*) const {
+ SkPath::Iter iter(src, false);
+ SkPath::Verb verb;
+ SkPoint pts[4];
+
+ SkPoint lastCorner = { 0, 0 }; // avoid warning
+ SkPath::Verb prevVerb = SkPath::kMove_Verb;
+
+ for (;;) {
+ switch (verb = iter.next(pts, false)) {
+ case SkPath::kMove_Verb:
+ if (SkPath::kLine_Verb == prevVerb) {
+ dst->lineTo(lastCorner);
+ }
+ dst->moveTo(pts[0]);
+ break;
+ case SkPath::kLine_Verb:
+ if (prevVerb == SkPath::kLine_Verb) {
+ dst->arcTo(pts[0], pts[1], fRadius);
+ }
+ lastCorner = pts[1];
+ break;
+ case SkPath::kQuad_Verb:
+ dst->quadTo(pts[1], pts[2]);
+ lastCorner = pts[2];
+ break;
+ case SkPath::kConic_Verb:
+ dst->conicTo(pts[1], pts[2], iter.conicWeight());
+ lastCorner = pts[2];
+ break;
+ case SkPath::kCubic_Verb:
+ dst->cubicTo(pts[1], pts[2], pts[3]);
+ lastCorner = pts[3];
+ break;
+ case SkPath::kClose_Verb:
+ dst->lineTo(lastCorner);
+ break;
+ case SkPath::kDone_Verb:
+ dst->lineTo(lastCorner);
+ goto DONE;
+ }
+ prevVerb = verb;
+ }
+DONE:
+ return true;
+}
+
+sk_sp<SkFlattenable> SkArcToPathEffect::CreateProc(SkReadBuffer& buffer) {
+ return SkArcToPathEffect::Make(buffer.readScalar());
+}
+
+void SkArcToPathEffect::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeScalar(fRadius);
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkArcToPathEffect::toString(SkString* str) const {
+ str->appendf("SkArcToPathEffect: (");
+ str->appendf("radius: %f", fRadius);
+ str->appendf(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkArithmeticMode.cpp b/gfx/skia/skia/src/effects/SkArithmeticMode.cpp
new file mode 100644
index 000000000..62595cf1e
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkArithmeticMode.cpp
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkArithmeticModePriv.h"
+#include "SkColorPriv.h"
+#include "SkNx.h"
+#include "SkRasterPipeline.h"
+#include "SkReadBuffer.h"
+#include "SkString.h"
+#include "SkUnPreMultiply.h"
+#include "SkWriteBuffer.h"
+#if SK_SUPPORT_GPU
+#include "SkArithmeticMode_gpu.h"
+#endif
+
+class SkArithmeticMode_scalar : public SkXfermode {
+public:
+ SkArithmeticMode_scalar(SkScalar k1, SkScalar k2, SkScalar k3, SkScalar k4,
+ bool enforcePMColor) {
+ fK[0] = k1;
+ fK[1] = k2;
+ fK[2] = k3;
+ fK[3] = k4;
+ fEnforcePMColor = enforcePMColor;
+ }
+
+ void xfer32(SkPMColor[], const SkPMColor[], int count, const SkAlpha[]) const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkArithmeticMode_scalar)
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> makeFragmentProcessorForImageFilter(
+ sk_sp<GrFragmentProcessor> dst) const override;
+ sk_sp<GrXPFactory> asXPFactory() const override;
+#endif
+
+ bool isArithmetic(SkArithmeticParams* params) const override {
+ if (params) {
+ memcpy(params->fK, fK, 4 * sizeof(float));
+ params->fEnforcePMColor = fEnforcePMColor;
+ }
+ return true;
+ }
+
+private:
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.writeScalar(fK[0]);
+ buffer.writeScalar(fK[1]);
+ buffer.writeScalar(fK[2]);
+ buffer.writeScalar(fK[3]);
+ buffer.writeBool(fEnforcePMColor);
+ }
+
+ SkScalar fK[4];
+ bool fEnforcePMColor;
+
+ friend class SkArithmeticMode;
+
+ typedef SkXfermode INHERITED;
+};
+
+sk_sp<SkFlattenable> SkArithmeticMode_scalar::CreateProc(SkReadBuffer& buffer) {
+ const SkScalar k1 = buffer.readScalar();
+ const SkScalar k2 = buffer.readScalar();
+ const SkScalar k3 = buffer.readScalar();
+ const SkScalar k4 = buffer.readScalar();
+ const bool enforcePMColor = buffer.readBool();
+ return SkArithmeticMode::Make(k1, k2, k3, k4, enforcePMColor);
+}
+
+void SkArithmeticMode_scalar::xfer32(SkPMColor dst[], const SkPMColor src[],
+ int count, const SkAlpha aaCoverage[]) const {
+ const Sk4f k1 = fK[0] * (1/255.0f),
+ k2 = fK[1],
+ k3 = fK[2],
+ k4 = fK[3] * 255.0f + 0.5f;
+
+ auto pin = [](float min, const Sk4f& val, float max) {
+ return Sk4f::Max(min, Sk4f::Min(val, max));
+ };
+
+ for (int i = 0; i < count; i++) {
+ if (aaCoverage && aaCoverage[i] == 0) {
+ continue;
+ }
+
+ Sk4f s = SkNx_cast<float>(Sk4b::Load(src+i)),
+ d = SkNx_cast<float>(Sk4b::Load(dst+i)),
+ r = pin(0, k1*s*d + k2*s + k3*d + k4, 255);
+
+ if (fEnforcePMColor) {
+ Sk4f a = SkNx_shuffle<3,3,3,3>(r);
+ r = Sk4f::Min(a, r);
+ }
+
+ if (aaCoverage && aaCoverage[i] != 255) {
+ Sk4f c = aaCoverage[i] * (1/255.0f);
+ r = d + (r-d)*c;
+ }
+
+ SkNx_cast<uint8_t>(r).store(dst+i);
+ }
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkArithmeticMode_scalar::toString(SkString* str) const {
+ str->append("SkArithmeticMode_scalar: ");
+ for (int i = 0; i < 4; ++i) {
+ str->appendScalar(fK[i]);
+ str->append(" ");
+ }
+ str->appendS32(fEnforcePMColor ? 1 : 0);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkXfermode> SkArithmeticMode::Make(SkScalar k1, SkScalar k2, SkScalar k3, SkScalar k4,
+ bool enforcePMColor) {
+ if (SkScalarNearlyZero(k1) && SkScalarNearlyEqual(k2, SK_Scalar1) &&
+ SkScalarNearlyZero(k3) && SkScalarNearlyZero(k4)) {
+ return SkXfermode::Make(SkXfermode::kSrc_Mode);
+ } else if (SkScalarNearlyZero(k1) && SkScalarNearlyZero(k2) &&
+ SkScalarNearlyEqual(k3, SK_Scalar1) && SkScalarNearlyZero(k4)) {
+ return SkXfermode::Make(SkXfermode::kDst_Mode);
+ }
+ return sk_make_sp<SkArithmeticMode_scalar>(k1, k2, k3, k4, enforcePMColor);
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+sk_sp<GrFragmentProcessor> SkArithmeticMode_scalar::makeFragmentProcessorForImageFilter(
+ sk_sp<GrFragmentProcessor> dst) const {
+ return GrArithmeticFP::Make(SkScalarToFloat(fK[0]),
+ SkScalarToFloat(fK[1]),
+ SkScalarToFloat(fK[2]),
+ SkScalarToFloat(fK[3]),
+ fEnforcePMColor,
+ std::move(dst));
+}
+
+sk_sp<GrXPFactory> SkArithmeticMode_scalar::asXPFactory() const {
+ return GrArithmeticXPFactory::Make(SkScalarToFloat(fK[0]),
+ SkScalarToFloat(fK[1]),
+ SkScalarToFloat(fK[2]),
+ SkScalarToFloat(fK[3]),
+ fEnforcePMColor);
+}
+
+#endif
+
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkArithmeticMode)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkArithmeticMode_scalar)
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END
diff --git a/gfx/skia/skia/src/effects/SkArithmeticModePriv.h b/gfx/skia/skia/src/effects/SkArithmeticModePriv.h
new file mode 100644
index 000000000..e619274d2
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkArithmeticModePriv.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkArithmeticModePriv_DEFINED
+#define SkArithmeticModePriv_DEFINED
+
+#include "SkArithmeticMode.h"
+
+struct SkArithmeticParams {
+ float fK[4];
+ bool fEnforcePMColor;
+};
+
+#ifndef SK_SUPPORT_LEGACY_ARITHMETICMODE
+
+class SK_API SkArithmeticMode {
+public:
+ /**
+ * result = clamp[k1 * src * dst + k2 * src + k3 * dst + k4]
+ *
+ * k1=k2=k3=0, k4=1.0 results in returning opaque white
+ * k1=k3=k4=0, k2=1.0 results in returning the src
+ * k1=k2=k4=0, k3=1.0 results in returning the dst
+ */
+ static sk_sp<SkXfermode> Make(SkScalar k1, SkScalar k2, SkScalar k3, SkScalar k4,
+ bool enforcePMColor = true);
+#ifdef SK_SUPPORT_LEGACY_XFERMODE_PTR
+ static SkXfermode* Create(SkScalar k1, SkScalar k2,
+ SkScalar k3, SkScalar k4,
+ bool enforcePMColor = true) {
+ return Make(k1, k2, k3, k4, enforcePMColor).release();
+ }
+#endif
+
+ SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP();
+
+private:
+ SkArithmeticMode(); // can't be instantiated
+};
+
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/effects/SkArithmeticMode_gpu.cpp b/gfx/skia/skia/src/effects/SkArithmeticMode_gpu.cpp
new file mode 100644
index 000000000..d20ebbe42
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkArithmeticMode_gpu.cpp
@@ -0,0 +1,296 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkArithmeticMode_gpu.h"
+
+#if SK_SUPPORT_GPU
+#include "GrContext.h"
+#include "GrFragmentProcessor.h"
+#include "GrInvariantOutput.h"
+#include "GrProcessor.h"
+#include "GrTexture.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "glsl/GrGLSLXferProcessor.h"
+
+static void add_arithmetic_code(GrGLSLFragmentBuilder* fragBuilder,
+ const char* srcColor,
+ const char* dstColor,
+ const char* outputColor,
+ const char* kUni,
+ bool enforcePMColor) {
+ // We don't try to optimize for this case at all
+ if (nullptr == srcColor) {
+ fragBuilder->codeAppend("const vec4 src = vec4(1);");
+ } else {
+ fragBuilder->codeAppendf("vec4 src = %s;", srcColor);
+ }
+
+ fragBuilder->codeAppendf("vec4 dst = %s;", dstColor);
+ fragBuilder->codeAppendf("%s = %s.x * src * dst + %s.y * src + %s.z * dst + %s.w;",
+ outputColor, kUni, kUni, kUni, kUni);
+ fragBuilder->codeAppendf("%s = clamp(%s, 0.0, 1.0);\n", outputColor, outputColor);
+ if (enforcePMColor) {
+ fragBuilder->codeAppendf("%s.rgb = min(%s.rgb, %s.a);",
+ outputColor, outputColor, outputColor);
+ }
+}
+
+class GLArithmeticFP : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs& args) override {
+ const GrArithmeticFP& arith = args.fFp.cast<GrArithmeticFP>();
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString dstColor("dstColor");
+ this->emitChild(0, nullptr, &dstColor, args);
+
+ fKUni = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType, kDefault_GrSLPrecision,
+ "k");
+ const char* kUni = args.fUniformHandler->getUniformCStr(fKUni);
+
+ add_arithmetic_code(fragBuilder,
+ args.fInputColor,
+ dstColor.c_str(),
+ args.fOutputColor,
+ kUni,
+ arith.enforcePMColor());
+ }
+
+ static void GenKey(const GrProcessor& proc, const GrGLSLCaps&, GrProcessorKeyBuilder* b) {
+ const GrArithmeticFP& arith = proc.cast<GrArithmeticFP>();
+ uint32_t key = arith.enforcePMColor() ? 1 : 0;
+ b->add32(key);
+ }
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager& pdman, const GrProcessor& proc) override {
+ const GrArithmeticFP& arith = proc.cast<GrArithmeticFP>();
+ pdman.set4f(fKUni, arith.k1(), arith.k2(), arith.k3(), arith.k4());
+ }
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fKUni;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrArithmeticFP::GrArithmeticFP(float k1, float k2, float k3, float k4, bool enforcePMColor,
+ sk_sp<GrFragmentProcessor> dst)
+ : fK1(k1), fK2(k2), fK3(k3), fK4(k4), fEnforcePMColor(enforcePMColor) {
+ this->initClassID<GrArithmeticFP>();
+
+ SkASSERT(dst);
+ SkDEBUGCODE(int dstIndex = )this->registerChildProcessor(std::move(dst));
+ SkASSERT(0 == dstIndex);
+}
+
+void GrArithmeticFP::onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const {
+ GLArithmeticFP::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrArithmeticFP::onCreateGLSLInstance() const {
+ return new GLArithmeticFP;
+}
+
+bool GrArithmeticFP::onIsEqual(const GrFragmentProcessor& fpBase) const {
+ const GrArithmeticFP& fp = fpBase.cast<GrArithmeticFP>();
+ return fK1 == fp.fK1 &&
+ fK2 == fp.fK2 &&
+ fK3 == fp.fK3 &&
+ fK4 == fp.fK4 &&
+ fEnforcePMColor == fp.fEnforcePMColor;
+}
+
+void GrArithmeticFP::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ // TODO: optimize this
+ inout->setToUnknown(GrInvariantOutput::kWill_ReadInput);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> GrArithmeticFP::TestCreate(GrProcessorTestData* d) {
+ float k1 = d->fRandom->nextF();
+ float k2 = d->fRandom->nextF();
+ float k3 = d->fRandom->nextF();
+ float k4 = d->fRandom->nextF();
+ bool enforcePMColor = d->fRandom->nextBool();
+
+ sk_sp<GrFragmentProcessor> dst(GrProcessorUnitTest::MakeChildFP(d));
+ return GrArithmeticFP::Make(k1, k2, k3, k4, enforcePMColor, std::move(dst));
+}
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrArithmeticFP);
+
+///////////////////////////////////////////////////////////////////////////////
+// Xfer Processor
+///////////////////////////////////////////////////////////////////////////////
+
+class ArithmeticXP : public GrXferProcessor {
+public:
+ ArithmeticXP(const DstTexture*, bool hasMixedSamples,
+ float k1, float k2, float k3, float k4, bool enforcePMColor);
+
+ const char* name() const override { return "Arithmetic"; }
+
+ GrGLSLXferProcessor* createGLSLInstance() const override;
+
+ float k1() const { return fK1; }
+ float k2() const { return fK2; }
+ float k3() const { return fK3; }
+ float k4() const { return fK4; }
+ bool enforcePMColor() const { return fEnforcePMColor; }
+
+private:
+ GrXferProcessor::OptFlags onGetOptimizations(const GrPipelineOptimizations& optimizations,
+ bool doesStencilWrite,
+ GrColor* overrideColor,
+ const GrCaps& caps) const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ bool onIsEqual(const GrXferProcessor& xpBase) const override {
+ const ArithmeticXP& xp = xpBase.cast<ArithmeticXP>();
+ if (fK1 != xp.fK1 ||
+ fK2 != xp.fK2 ||
+ fK3 != xp.fK3 ||
+ fK4 != xp.fK4 ||
+ fEnforcePMColor != xp.fEnforcePMColor) {
+ return false;
+ }
+ return true;
+ }
+
+ float fK1, fK2, fK3, fK4;
+ bool fEnforcePMColor;
+
+ typedef GrXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GLArithmeticXP : public GrGLSLXferProcessor {
+public:
+ GLArithmeticXP(const ArithmeticXP& arithmeticXP)
+ : fEnforcePMColor(arithmeticXP.enforcePMColor()) {
+ }
+
+ ~GLArithmeticXP() override {}
+
+ static void GenKey(const GrProcessor& processor, const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) {
+ const ArithmeticXP& arith = processor.cast<ArithmeticXP>();
+ uint32_t key = arith.enforcePMColor() ? 1 : 0;
+ b->add32(key);
+ }
+
+private:
+ void emitBlendCodeForDstRead(GrGLSLXPFragmentBuilder* fragBuilder,
+ GrGLSLUniformHandler* uniformHandler,
+ const char* srcColor,
+ const char* srcCoverage,
+ const char* dstColor,
+ const char* outColor,
+ const char* outColorSecondary,
+ const GrXferProcessor& proc) override {
+ fKUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType, kDefault_GrSLPrecision,
+ "k");
+ const char* kUni = uniformHandler->getUniformCStr(fKUni);
+
+ add_arithmetic_code(fragBuilder, srcColor, dstColor, outColor, kUni, fEnforcePMColor);
+
+ // Apply coverage.
+ INHERITED::DefaultCoverageModulation(fragBuilder, srcCoverage, dstColor, outColor,
+ outColorSecondary, proc);
+ }
+
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrXferProcessor& processor) override {
+ const ArithmeticXP& arith = processor.cast<ArithmeticXP>();
+ pdman.set4f(fKUni, arith.k1(), arith.k2(), arith.k3(), arith.k4());
+ fEnforcePMColor = arith.enforcePMColor();
+ }
+
+ GrGLSLProgramDataManager::UniformHandle fKUni;
+ bool fEnforcePMColor;
+
+ typedef GrGLSLXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+ArithmeticXP::ArithmeticXP(const DstTexture* dstTexture, bool hasMixedSamples,
+ float k1, float k2, float k3, float k4, bool enforcePMColor)
+ : INHERITED(dstTexture, true, hasMixedSamples)
+ , fK1(k1)
+ , fK2(k2)
+ , fK3(k3)
+ , fK4(k4)
+ , fEnforcePMColor(enforcePMColor) {
+ this->initClassID<ArithmeticXP>();
+}
+
+void ArithmeticXP::onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const {
+ GLArithmeticXP::GenKey(*this, caps, b);
+}
+
+GrGLSLXferProcessor* ArithmeticXP::createGLSLInstance() const { return new GLArithmeticXP(*this); }
+
+GrXferProcessor::OptFlags ArithmeticXP::onGetOptimizations(
+ const GrPipelineOptimizations& optimizations,
+ bool doesStencilWrite,
+ GrColor* overrideColor,
+ const GrCaps& caps) const {
+ return GrXferProcessor::kNone_OptFlags;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrArithmeticXPFactory::GrArithmeticXPFactory(float k1, float k2, float k3, float k4,
+ bool enforcePMColor)
+ : fK1(k1), fK2(k2), fK3(k3), fK4(k4), fEnforcePMColor(enforcePMColor) {
+ this->initClassID<GrArithmeticXPFactory>();
+}
+
+GrXferProcessor*
+GrArithmeticXPFactory::onCreateXferProcessor(const GrCaps& caps,
+ const GrPipelineOptimizations& optimizations,
+ bool hasMixedSamples,
+ const DstTexture* dstTexture) const {
+ return new ArithmeticXP(dstTexture, hasMixedSamples, fK1, fK2, fK3, fK4, fEnforcePMColor);
+}
+
+
+void GrArithmeticXPFactory::getInvariantBlendedColor(const GrProcOptInfo& colorPOI,
+ InvariantBlendedColor* blendedColor) const {
+ blendedColor->fWillBlendWithDst = true;
+
+ // TODO: We could try to optimize this more. For example if fK1 and fK3 are zero, then we won't
+ // be blending the color with dst at all so we can know what the output color is (up to the
+ // valid color components passed in).
+ blendedColor->fKnownColorFlags = kNone_GrColorComponentFlags;
+}
+
+GR_DEFINE_XP_FACTORY_TEST(GrArithmeticXPFactory);
+
+sk_sp<GrXPFactory> GrArithmeticXPFactory::TestCreate(GrProcessorTestData* d) {
+ float k1 = d->fRandom->nextF();
+ float k2 = d->fRandom->nextF();
+ float k3 = d->fRandom->nextF();
+ float k4 = d->fRandom->nextF();
+ bool enforcePMColor = d->fRandom->nextBool();
+
+ return GrArithmeticXPFactory::Make(k1, k2, k3, k4, enforcePMColor);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/effects/SkArithmeticMode_gpu.h b/gfx/skia/skia/src/effects/SkArithmeticMode_gpu.h
new file mode 100644
index 000000000..4704399d2
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkArithmeticMode_gpu.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkArithmeticMode_gpu_DEFINED
+#define SkArithmeticMode_gpu_DEFINED
+
+#include "SkTypes.h"
+
+#if SK_SUPPORT_GPU
+
+#include "GrCaps.h"
+#include "GrCoordTransform.h"
+#include "GrFragmentProcessor.h"
+#include "GrTextureAccess.h"
+#include "GrTypes.h"
+#include "GrXferProcessor.h"
+
+class GrInvariantOutput;
+class GrProcOptInfo;
+class GrTexture;
+
+///////////////////////////////////////////////////////////////////////////////
+// Fragment Processor
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLArtithmeticFP;
+
+class GrArithmeticFP : public GrFragmentProcessor {
+public:
+ static sk_sp<GrFragmentProcessor> Make(float k1, float k2, float k3, float k4,
+ bool enforcePMColor, sk_sp<GrFragmentProcessor> dst) {
+ return sk_sp<GrFragmentProcessor>(new GrArithmeticFP(k1, k2, k3, k4, enforcePMColor,
+ std::move(dst)));
+ }
+
+ ~GrArithmeticFP() override {}
+
+ const char* name() const override { return "Arithmetic"; }
+
+ SkString dumpInfo() const override {
+ SkString str;
+ str.appendf("K1: %.2f K2: %.2f K3: %.2f K4: %.2f", fK1, fK2, fK3, fK4);
+ return str;
+ }
+
+ float k1() const { return fK1; }
+ float k2() const { return fK2; }
+ float k3() const { return fK3; }
+ float k4() const { return fK4; }
+ bool enforcePMColor() const { return fEnforcePMColor; }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ GrArithmeticFP(float k1, float k2, float k3, float k4, bool enforcePMColor,
+ sk_sp<GrFragmentProcessor> dst);
+
+ float fK1, fK2, fK3, fK4;
+ bool fEnforcePMColor;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+ typedef GrFragmentProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// Xfer Processor
+///////////////////////////////////////////////////////////////////////////////
+
+class GrArithmeticXPFactory : public GrXPFactory {
+public:
+ static sk_sp<GrXPFactory> Make(float k1, float k2, float k3, float k4, bool enforcePMColor) {
+ return sk_sp<GrXPFactory>(new GrArithmeticXPFactory(k1, k2, k3, k4, enforcePMColor));
+ }
+
+ void getInvariantBlendedColor(const GrProcOptInfo& colorPOI,
+ GrXPFactory::InvariantBlendedColor*) const override;
+
+private:
+ GrArithmeticXPFactory(float k1, float k2, float k3, float k4, bool enforcePMColor);
+
+ GrXferProcessor* onCreateXferProcessor(const GrCaps& caps,
+ const GrPipelineOptimizations& optimizations,
+ bool hasMixedSamples,
+ const DstTexture*) const override;
+
+ bool onWillReadDstColor(const GrCaps&, const GrPipelineOptimizations&) const override {
+ return true;
+ }
+
+ bool onIsEqual(const GrXPFactory& xpfBase) const override {
+ const GrArithmeticXPFactory& xpf = xpfBase.cast<GrArithmeticXPFactory>();
+ if (fK1 != xpf.fK1 ||
+ fK2 != xpf.fK2 ||
+ fK3 != xpf.fK3 ||
+ fK4 != xpf.fK4 ||
+ fEnforcePMColor != xpf.fEnforcePMColor) {
+ return false;
+ }
+ return true;
+ }
+
+ GR_DECLARE_XP_FACTORY_TEST;
+
+ float fK1, fK2, fK3, fK4;
+ bool fEnforcePMColor;
+
+ typedef GrXPFactory INHERITED;
+};
+
+#endif
+#endif
diff --git a/gfx/skia/skia/src/effects/SkBlurDrawLooper.cpp b/gfx/skia/skia/src/effects/SkBlurDrawLooper.cpp
new file mode 100644
index 000000000..30583747c
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkBlurDrawLooper.cpp
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBlurDrawLooper.h"
+#include "SkBlurMask.h" // just for SkBlurMask::ConvertRadiusToSigma
+#include "SkBlurMaskFilter.h"
+#include "SkCanvas.h"
+#include "SkColorFilter.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+#include "SkMaskFilter.h"
+#include "SkPaint.h"
+#include "SkString.h"
+#include "SkStringUtils.h"
+
+SkBlurDrawLooper::SkBlurDrawLooper(SkColor color, SkScalar sigma,
+ SkScalar dx, SkScalar dy, uint32_t flags) {
+ this->init(sigma, dx, dy, color, flags);
+}
+
+// only call from constructor
+void SkBlurDrawLooper::initEffects() {
+ SkASSERT(fBlurFlags <= kAll_BlurFlag);
+ if (fSigma > 0) {
+ uint32_t flags = fBlurFlags & kIgnoreTransform_BlurFlag ?
+ SkBlurMaskFilter::kIgnoreTransform_BlurFlag :
+ SkBlurMaskFilter::kNone_BlurFlag;
+
+ flags |= fBlurFlags & kHighQuality_BlurFlag ?
+ SkBlurMaskFilter::kHighQuality_BlurFlag :
+ SkBlurMaskFilter::kNone_BlurFlag;
+
+ fBlur = SkBlurMaskFilter::Make(kNormal_SkBlurStyle, fSigma, flags);
+ } else {
+ fBlur = nullptr;
+ }
+
+ if (fBlurFlags & kOverrideColor_BlurFlag) {
+ // Set alpha to 1 for the override since transparency will already
+ // be baked into the blurred mask.
+ SkColor opaqueColor = SkColorSetA(fBlurColor, 255);
+ //The SrcIn xfer mode will multiply 'color' by the incoming alpha
+ fColorFilter = SkColorFilter::MakeModeFilter(opaqueColor, SkXfermode::kSrcIn_Mode);
+ } else {
+ fColorFilter = nullptr;
+ }
+}
+
+void SkBlurDrawLooper::init(SkScalar sigma, SkScalar dx, SkScalar dy,
+ SkColor color, uint32_t flags) {
+ fSigma = sigma;
+ fDx = dx;
+ fDy = dy;
+ fBlurColor = color;
+ fBlurFlags = flags;
+
+ this->initEffects();
+}
+
+sk_sp<SkFlattenable> SkBlurDrawLooper::CreateProc(SkReadBuffer& buffer) {
+ const SkColor color = buffer.readColor();
+ const SkScalar sigma = buffer.readScalar();
+ const SkScalar dx = buffer.readScalar();
+ const SkScalar dy = buffer.readScalar();
+ const uint32_t flags = buffer.read32();
+ return Make(color, sigma, dx, dy, flags);
+}
+
+void SkBlurDrawLooper::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeColor(fBlurColor);
+ buffer.writeScalar(fSigma);
+ buffer.writeScalar(fDx);
+ buffer.writeScalar(fDy);
+ buffer.write32(fBlurFlags);
+}
+
+bool SkBlurDrawLooper::asABlurShadow(BlurShadowRec* rec) const {
+ if (fSigma <= 0 || (fBlurFlags & fBlurFlags & kIgnoreTransform_BlurFlag)) {
+ return false;
+ }
+
+ if (rec) {
+ rec->fSigma = fSigma;
+ rec->fColor = fBlurColor;
+ rec->fOffset.set(fDx, fDy);
+ rec->fStyle = kNormal_SkBlurStyle;
+ rec->fQuality = (fBlurFlags & kHighQuality_BlurFlag) ?
+ kHigh_SkBlurQuality : kLow_SkBlurQuality;
+ }
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+
+SkDrawLooper::Context* SkBlurDrawLooper::createContext(SkCanvas*, void* storage) const {
+ return new (storage) BlurDrawLooperContext(this);
+}
+
+SkBlurDrawLooper::BlurDrawLooperContext::BlurDrawLooperContext(
+ const SkBlurDrawLooper* looper)
+ : fLooper(looper), fState(SkBlurDrawLooper::kBeforeEdge) {}
+
+bool SkBlurDrawLooper::BlurDrawLooperContext::next(SkCanvas* canvas,
+ SkPaint* paint) {
+ switch (fState) {
+ case kBeforeEdge:
+ // we do nothing if a maskfilter is already installed
+ if (paint->getMaskFilter()) {
+ fState = kDone;
+ return false;
+ }
+#ifdef SK_BUILD_FOR_ANDROID
+ SkColor blurColor;
+ blurColor = fLooper->fBlurColor;
+ if (SkColorGetA(blurColor) == 255) {
+ blurColor = SkColorSetA(blurColor, paint->getAlpha());
+ }
+ paint->setColor(blurColor);
+#else
+ paint->setColor(fLooper->fBlurColor);
+#endif
+ paint->setMaskFilter(fLooper->fBlur);
+ paint->setColorFilter(fLooper->fColorFilter);
+ canvas->save();
+ if (fLooper->fBlurFlags & kIgnoreTransform_BlurFlag) {
+ SkMatrix transform(canvas->getTotalMatrix());
+ transform.postTranslate(fLooper->fDx, fLooper->fDy);
+ canvas->setMatrix(transform);
+ } else {
+ canvas->translate(fLooper->fDx, fLooper->fDy);
+ }
+ fState = kAfterEdge;
+ return true;
+ case kAfterEdge:
+ canvas->restore();
+ fState = kDone;
+ return true;
+ default:
+ SkASSERT(kDone == fState);
+ return false;
+ }
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkBlurDrawLooper::toString(SkString* str) const {
+ str->append("SkBlurDrawLooper: ");
+
+ str->append("dx: ");
+ str->appendScalar(fDx);
+
+ str->append(" dy: ");
+ str->appendScalar(fDy);
+
+ str->append(" color: ");
+ str->appendHex(fBlurColor);
+
+ str->append(" flags: (");
+ if (kNone_BlurFlag == fBlurFlags) {
+ str->append("None");
+ } else {
+ bool needsSeparator = false;
+ SkAddFlagToString(str, SkToBool(kIgnoreTransform_BlurFlag & fBlurFlags), "IgnoreTransform",
+ &needsSeparator);
+ SkAddFlagToString(str, SkToBool(kOverrideColor_BlurFlag & fBlurFlags), "OverrideColor",
+ &needsSeparator);
+ SkAddFlagToString(str, SkToBool(kHighQuality_BlurFlag & fBlurFlags), "HighQuality",
+ &needsSeparator);
+ }
+ str->append(")");
+
+ // TODO: add optional "fBlurFilter->toString(str);" when SkMaskFilter::toString is added
+ // alternatively we could cache the radius in SkBlurDrawLooper and just add it here
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkBlurMask.cpp b/gfx/skia/skia/src/effects/SkBlurMask.cpp
new file mode 100644
index 000000000..d22881aa9
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkBlurMask.cpp
@@ -0,0 +1,993 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkBlurMask.h"
+#include "SkMath.h"
+#include "SkTemplates.h"
+#include "SkEndian.h"
+
+
+// This constant approximates the scaling done in the software path's
+// "high quality" mode, in SkBlurMask::Blur() (1 / sqrt(3)).
+// IMHO, it actually should be 1: we blur "less" than we should do
+// according to the CSS and canvas specs, simply because Safari does the same.
+// Firefox used to do the same too, until 4.0 where they fixed it. So at some
+// point we should probably get rid of these scaling constants and rebaseline
+// all the blur tests.
+static const SkScalar kBLUR_SIGMA_SCALE = 0.57735f;
+
+SkScalar SkBlurMask::ConvertRadiusToSigma(SkScalar radius) {
+ return radius > 0 ? kBLUR_SIGMA_SCALE * radius + 0.5f : 0.0f;
+}
+
+SkScalar SkBlurMask::ConvertSigmaToRadius(SkScalar sigma) {
+ return sigma > 0.5f ? (sigma - 0.5f) / kBLUR_SIGMA_SCALE : 0.0f;
+}
+
+#define UNROLL_SEPARABLE_LOOPS
+
+/**
+ * This function performs a box blur in X, of the given radius. If the
+ * "transpose" parameter is true, it will transpose the pixels on write,
+ * such that X and Y are swapped. Reads are always performed from contiguous
+ * memory in X, for speed. The destination buffer (dst) must be at least
+ * (width + leftRadius + rightRadius) * height bytes in size.
+ *
+ * This is what the inner loop looks like before unrolling, and with the two
+ * cases broken out separately (width < diameter, width >= diameter):
+ *
+ * if (width < diameter) {
+ * for (int x = 0; x < width; ++x) {
+ * sum += *right++;
+ * *dptr = (sum * scale + half) >> 24;
+ * dptr += dst_x_stride;
+ * }
+ * for (int x = width; x < diameter; ++x) {
+ * *dptr = (sum * scale + half) >> 24;
+ * dptr += dst_x_stride;
+ * }
+ * for (int x = 0; x < width; ++x) {
+ * *dptr = (sum * scale + half) >> 24;
+ * sum -= *left++;
+ * dptr += dst_x_stride;
+ * }
+ * } else {
+ * for (int x = 0; x < diameter; ++x) {
+ * sum += *right++;
+ * *dptr = (sum * scale + half) >> 24;
+ * dptr += dst_x_stride;
+ * }
+ * for (int x = diameter; x < width; ++x) {
+ * sum += *right++;
+ * *dptr = (sum * scale + half) >> 24;
+ * sum -= *left++;
+ * dptr += dst_x_stride;
+ * }
+ * for (int x = 0; x < diameter; ++x) {
+ * *dptr = (sum * scale + half) >> 24;
+ * sum -= *left++;
+ * dptr += dst_x_stride;
+ * }
+ * }
+ */
+static int boxBlur(const uint8_t* src, int src_y_stride, uint8_t* dst,
+ int leftRadius, int rightRadius, int width, int height,
+ bool transpose)
+{
+ int diameter = leftRadius + rightRadius;
+ int kernelSize = diameter + 1;
+ int border = SkMin32(width, diameter);
+ uint32_t scale = (1 << 24) / kernelSize;
+ int new_width = width + SkMax32(leftRadius, rightRadius) * 2;
+ int dst_x_stride = transpose ? height : 1;
+ int dst_y_stride = transpose ? 1 : new_width;
+ uint32_t half = 1 << 23;
+ for (int y = 0; y < height; ++y) {
+ uint32_t sum = 0;
+ uint8_t* dptr = dst + y * dst_y_stride;
+ const uint8_t* right = src + y * src_y_stride;
+ const uint8_t* left = right;
+ for (int x = 0; x < rightRadius - leftRadius; x++) {
+ *dptr = 0;
+ dptr += dst_x_stride;
+ }
+#define LEFT_BORDER_ITER \
+ sum += *right++; \
+ *dptr = (sum * scale + half) >> 24; \
+ dptr += dst_x_stride;
+
+ int x = 0;
+#ifdef UNROLL_SEPARABLE_LOOPS
+ for (; x < border - 16; x += 16) {
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ }
+#endif
+ for (; x < border; ++x) {
+ LEFT_BORDER_ITER
+ }
+#undef LEFT_BORDER_ITER
+#define TRIVIAL_ITER \
+ *dptr = (sum * scale + half) >> 24; \
+ dptr += dst_x_stride;
+ x = width;
+#ifdef UNROLL_SEPARABLE_LOOPS
+ for (; x < diameter - 16; x += 16) {
+ TRIVIAL_ITER
+ TRIVIAL_ITER
+ TRIVIAL_ITER
+ TRIVIAL_ITER
+ TRIVIAL_ITER
+ TRIVIAL_ITER
+ TRIVIAL_ITER
+ TRIVIAL_ITER
+ TRIVIAL_ITER
+ TRIVIAL_ITER
+ TRIVIAL_ITER
+ TRIVIAL_ITER
+ TRIVIAL_ITER
+ TRIVIAL_ITER
+ TRIVIAL_ITER
+ TRIVIAL_ITER
+ }
+#endif
+ for (; x < diameter; ++x) {
+ TRIVIAL_ITER
+ }
+#undef TRIVIAL_ITER
+#define CENTER_ITER \
+ sum += *right++; \
+ *dptr = (sum * scale + half) >> 24; \
+ sum -= *left++; \
+ dptr += dst_x_stride;
+
+ x = diameter;
+#ifdef UNROLL_SEPARABLE_LOOPS
+ for (; x < width - 16; x += 16) {
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ }
+#endif
+ for (; x < width; ++x) {
+ CENTER_ITER
+ }
+#undef CENTER_ITER
+#define RIGHT_BORDER_ITER \
+ *dptr = (sum * scale + half) >> 24; \
+ sum -= *left++; \
+ dptr += dst_x_stride;
+
+ x = 0;
+#ifdef UNROLL_SEPARABLE_LOOPS
+ for (; x < border - 16; x += 16) {
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ }
+#endif
+ for (; x < border; ++x) {
+ RIGHT_BORDER_ITER
+ }
+#undef RIGHT_BORDER_ITER
+ for (int x = 0; x < leftRadius - rightRadius; ++x) {
+ *dptr = 0;
+ dptr += dst_x_stride;
+ }
+ SkASSERT(sum == 0);
+ }
+ return new_width;
+}
+
+/**
+ * This variant of the box blur handles blurring of non-integer radii. It
+ * keeps two running sums: an outer sum for the rounded-up kernel radius, and
+ * an inner sum for the rounded-down kernel radius. For each pixel, it linearly
+ * interpolates between them. In float this would be:
+ * outer_weight * outer_sum / kernelSize +
+ * (1.0 - outer_weight) * innerSum / (kernelSize - 2)
+ *
+ * This is what the inner loop looks like before unrolling, and with the two
+ * cases broken out separately (width < diameter, width >= diameter):
+ *
+ * if (width < diameter) {
+ * for (int x = 0; x < width; x++) {
+ * inner_sum = outer_sum;
+ * outer_sum += *right++;
+ * *dptr = (outer_sum * outer_scale + inner_sum * inner_scale + half) >> 24;
+ * dptr += dst_x_stride;
+ * }
+ * for (int x = width; x < diameter; ++x) {
+ * *dptr = (outer_sum * outer_scale + inner_sum * inner_scale + half) >> 24;
+ * dptr += dst_x_stride;
+ * }
+ * for (int x = 0; x < width; x++) {
+ * inner_sum = outer_sum - *left++;
+ * *dptr = (outer_sum * outer_scale + inner_sum * inner_scale + half) >> 24;
+ * dptr += dst_x_stride;
+ * outer_sum = inner_sum;
+ * }
+ * } else {
+ * for (int x = 0; x < diameter; x++) {
+ * inner_sum = outer_sum;
+ * outer_sum += *right++;
+ * *dptr = (outer_sum * outer_scale + inner_sum * inner_scale + half) >> 24;
+ * dptr += dst_x_stride;
+ * }
+ * for (int x = diameter; x < width; ++x) {
+ * inner_sum = outer_sum - *left;
+ * outer_sum += *right++;
+ * *dptr = (outer_sum * outer_scale + inner_sum * inner_scale + half) >> 24;
+ * dptr += dst_x_stride;
+ * outer_sum -= *left++;
+ * }
+ * for (int x = 0; x < diameter; x++) {
+ * inner_sum = outer_sum - *left++;
+ * *dptr = (outer_sum * outer_scale + inner_sum * inner_scale + half) >> 24;
+ * dptr += dst_x_stride;
+ * outer_sum = inner_sum;
+ * }
+ * }
+ * }
+ * return new_width;
+ */
+
+static int boxBlurInterp(const uint8_t* src, int src_y_stride, uint8_t* dst,
+ int radius, int width, int height,
+ bool transpose, uint8_t outer_weight)
+{
+ int diameter = radius * 2;
+ int kernelSize = diameter + 1;
+ int border = SkMin32(width, diameter);
+ int inner_weight = 255 - outer_weight;
+ outer_weight += outer_weight >> 7;
+ inner_weight += inner_weight >> 7;
+ uint32_t outer_scale = (outer_weight << 16) / kernelSize;
+ uint32_t inner_scale = (inner_weight << 16) / (kernelSize - 2);
+ uint32_t half = 1 << 23;
+ int new_width = width + diameter;
+ int dst_x_stride = transpose ? height : 1;
+ int dst_y_stride = transpose ? 1 : new_width;
+ for (int y = 0; y < height; ++y) {
+ uint32_t outer_sum = 0, inner_sum = 0;
+ uint8_t* dptr = dst + y * dst_y_stride;
+ const uint8_t* right = src + y * src_y_stride;
+ const uint8_t* left = right;
+ int x = 0;
+
+#define LEFT_BORDER_ITER \
+ inner_sum = outer_sum; \
+ outer_sum += *right++; \
+ *dptr = (outer_sum * outer_scale + inner_sum * inner_scale + half) >> 24; \
+ dptr += dst_x_stride;
+
+#ifdef UNROLL_SEPARABLE_LOOPS
+ for (;x < border - 16; x += 16) {
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ LEFT_BORDER_ITER
+ }
+#endif
+
+ for (;x < border; ++x) {
+ LEFT_BORDER_ITER
+ }
+#undef LEFT_BORDER_ITER
+ for (int x = width; x < diameter; ++x) {
+ *dptr = (outer_sum * outer_scale + inner_sum * inner_scale + half) >> 24;
+ dptr += dst_x_stride;
+ }
+ x = diameter;
+
+#define CENTER_ITER \
+ inner_sum = outer_sum - *left; \
+ outer_sum += *right++; \
+ *dptr = (outer_sum * outer_scale + inner_sum * inner_scale + half) >> 24; \
+ dptr += dst_x_stride; \
+ outer_sum -= *left++;
+
+#ifdef UNROLL_SEPARABLE_LOOPS
+ for (; x < width - 16; x += 16) {
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ CENTER_ITER
+ }
+#endif
+ for (; x < width; ++x) {
+ CENTER_ITER
+ }
+#undef CENTER_ITER
+
+ #define RIGHT_BORDER_ITER \
+ inner_sum = outer_sum - *left++; \
+ *dptr = (outer_sum * outer_scale + inner_sum * inner_scale + half) >> 24; \
+ dptr += dst_x_stride; \
+ outer_sum = inner_sum;
+
+ x = 0;
+#ifdef UNROLL_SEPARABLE_LOOPS
+ for (; x < border - 16; x += 16) {
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ RIGHT_BORDER_ITER
+ }
+#endif
+ for (; x < border; ++x) {
+ RIGHT_BORDER_ITER
+ }
+#undef RIGHT_BORDER_ITER
+ SkASSERT(outer_sum == 0 && inner_sum == 0);
+ }
+ return new_width;
+}
+
+static void get_adjusted_radii(SkScalar passRadius, int *loRadius, int *hiRadius)
+{
+ *loRadius = *hiRadius = SkScalarCeilToInt(passRadius);
+ if (SkIntToScalar(*hiRadius) - passRadius > 0.5f) {
+ *loRadius = *hiRadius - 1;
+ }
+}
+
+#include "SkColorPriv.h"
+
+static void merge_src_with_blur(uint8_t dst[], int dstRB,
+ const uint8_t src[], int srcRB,
+ const uint8_t blur[], int blurRB,
+ int sw, int sh) {
+ dstRB -= sw;
+ srcRB -= sw;
+ blurRB -= sw;
+ while (--sh >= 0) {
+ for (int x = sw - 1; x >= 0; --x) {
+ *dst = SkToU8(SkAlphaMul(*blur, SkAlpha255To256(*src)));
+ dst += 1;
+ src += 1;
+ blur += 1;
+ }
+ dst += dstRB;
+ src += srcRB;
+ blur += blurRB;
+ }
+}
+
+static void clamp_with_orig(uint8_t dst[], int dstRowBytes,
+ const uint8_t src[], int srcRowBytes,
+ int sw, int sh,
+ SkBlurStyle style) {
+ int x;
+ while (--sh >= 0) {
+ switch (style) {
+ case kSolid_SkBlurStyle:
+ for (x = sw - 1; x >= 0; --x) {
+ int s = *src;
+ int d = *dst;
+ *dst = SkToU8(s + d - SkMulDiv255Round(s, d));
+ dst += 1;
+ src += 1;
+ }
+ break;
+ case kOuter_SkBlurStyle:
+ for (x = sw - 1; x >= 0; --x) {
+ if (*src) {
+ *dst = SkToU8(SkAlphaMul(*dst, SkAlpha255To256(255 - *src)));
+ }
+ dst += 1;
+ src += 1;
+ }
+ break;
+ default:
+ SkDEBUGFAIL("Unexpected blur style here");
+ break;
+ }
+ dst += dstRowBytes - sw;
+ src += srcRowBytes - sw;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// we use a local function to wrap the class static method to work around
+// a bug in gcc98
+void SkMask_FreeImage(uint8_t* image);
+void SkMask_FreeImage(uint8_t* image) {
+ SkMask::FreeImage(image);
+}
+
+bool SkBlurMask::BoxBlur(SkMask* dst, const SkMask& src,
+ SkScalar sigma, SkBlurStyle style, SkBlurQuality quality,
+ SkIPoint* margin, bool force_quality) {
+
+ if (src.fFormat != SkMask::kA8_Format) {
+ return false;
+ }
+
+ // Force high quality off for small radii (performance)
+ if (!force_quality && sigma <= SkIntToScalar(2)) {
+ quality = kLow_SkBlurQuality;
+ }
+
+ SkScalar passRadius;
+ if (kHigh_SkBlurQuality == quality) {
+ // For the high quality path the 3 pass box blur kernel width is
+ // 6*rad+1 while the full Gaussian width is 6*sigma.
+ passRadius = sigma - (1/6.0f);
+ } else {
+ // For the low quality path we only attempt to cover 3*sigma of the
+ // Gaussian blur area (1.5*sigma on each side). The single pass box
+ // blur's kernel size is 2*rad+1.
+ passRadius = 1.5f*sigma - 0.5f;
+ }
+
+ // highQuality: use three box blur passes as a cheap way
+ // to approximate a Gaussian blur
+ int passCount = (kHigh_SkBlurQuality == quality) ? 3 : 1;
+
+ int rx = SkScalarCeilToInt(passRadius);
+ int outerWeight = 255 - SkScalarRoundToInt((SkIntToScalar(rx) - passRadius) * 255);
+
+ SkASSERT(rx >= 0);
+ SkASSERT((unsigned)outerWeight <= 255);
+ if (rx <= 0) {
+ return false;
+ }
+
+ int ry = rx; // only do square blur for now
+
+ int padx = passCount * rx;
+ int pady = passCount * ry;
+
+ if (margin) {
+ margin->set(padx, pady);
+ }
+ dst->fBounds.set(src.fBounds.fLeft - padx, src.fBounds.fTop - pady,
+ src.fBounds.fRight + padx, src.fBounds.fBottom + pady);
+
+ dst->fRowBytes = dst->fBounds.width();
+ dst->fFormat = SkMask::kA8_Format;
+ dst->fImage = nullptr;
+
+ if (src.fImage) {
+ size_t dstSize = dst->computeImageSize();
+ if (0 == dstSize) {
+ return false; // too big to allocate, abort
+ }
+
+ int sw = src.fBounds.width();
+ int sh = src.fBounds.height();
+ const uint8_t* sp = src.fImage;
+ uint8_t* dp = SkMask::AllocImage(dstSize);
+ SkAutoTCallVProc<uint8_t, SkMask_FreeImage> autoCall(dp);
+
+ // build the blurry destination
+ SkAutoTMalloc<uint8_t> tmpBuffer(dstSize);
+ uint8_t* tp = tmpBuffer.get();
+ int w = sw, h = sh;
+
+ if (outerWeight == 255) {
+ int loRadius, hiRadius;
+ get_adjusted_radii(passRadius, &loRadius, &hiRadius);
+ if (kHigh_SkBlurQuality == quality) {
+ // Do three X blurs, with a transpose on the final one.
+ w = boxBlur(sp, src.fRowBytes, tp, loRadius, hiRadius, w, h, false);
+ w = boxBlur(tp, w, dp, hiRadius, loRadius, w, h, false);
+ w = boxBlur(dp, w, tp, hiRadius, hiRadius, w, h, true);
+ // Do three Y blurs, with a transpose on the final one.
+ h = boxBlur(tp, h, dp, loRadius, hiRadius, h, w, false);
+ h = boxBlur(dp, h, tp, hiRadius, loRadius, h, w, false);
+ h = boxBlur(tp, h, dp, hiRadius, hiRadius, h, w, true);
+ } else {
+ w = boxBlur(sp, src.fRowBytes, tp, rx, rx, w, h, true);
+ h = boxBlur(tp, h, dp, ry, ry, h, w, true);
+ }
+ } else {
+ if (kHigh_SkBlurQuality == quality) {
+ // Do three X blurs, with a transpose on the final one.
+ w = boxBlurInterp(sp, src.fRowBytes, tp, rx, w, h, false, outerWeight);
+ w = boxBlurInterp(tp, w, dp, rx, w, h, false, outerWeight);
+ w = boxBlurInterp(dp, w, tp, rx, w, h, true, outerWeight);
+ // Do three Y blurs, with a transpose on the final one.
+ h = boxBlurInterp(tp, h, dp, ry, h, w, false, outerWeight);
+ h = boxBlurInterp(dp, h, tp, ry, h, w, false, outerWeight);
+ h = boxBlurInterp(tp, h, dp, ry, h, w, true, outerWeight);
+ } else {
+ w = boxBlurInterp(sp, src.fRowBytes, tp, rx, w, h, true, outerWeight);
+ h = boxBlurInterp(tp, h, dp, ry, h, w, true, outerWeight);
+ }
+ }
+
+ dst->fImage = dp;
+ // if need be, alloc the "real" dst (same size as src) and copy/merge
+ // the blur into it (applying the src)
+ if (style == kInner_SkBlurStyle) {
+ // now we allocate the "real" dst, mirror the size of src
+ size_t srcSize = src.computeImageSize();
+ if (0 == srcSize) {
+ return false; // too big to allocate, abort
+ }
+ dst->fImage = SkMask::AllocImage(srcSize);
+ merge_src_with_blur(dst->fImage, src.fRowBytes,
+ sp, src.fRowBytes,
+ dp + passCount * (rx + ry * dst->fRowBytes),
+ dst->fRowBytes, sw, sh);
+ SkMask::FreeImage(dp);
+ } else if (style != kNormal_SkBlurStyle) {
+ clamp_with_orig(dp + passCount * (rx + ry * dst->fRowBytes),
+ dst->fRowBytes, sp, src.fRowBytes, sw, sh, style);
+ }
+ (void)autoCall.release();
+ }
+
+ if (style == kInner_SkBlurStyle) {
+ dst->fBounds = src.fBounds; // restore trimmed bounds
+ dst->fRowBytes = src.fRowBytes;
+ }
+
+ return true;
+}
+
+/* Convolving a box with itself three times results in a piecewise
+ quadratic function:
+
+ 0 x <= -1.5
+ 9/8 + 3/2 x + 1/2 x^2 -1.5 < x <= -.5
+ 3/4 - x^2 -.5 < x <= .5
+ 9/8 - 3/2 x + 1/2 x^2 0.5 < x <= 1.5
+ 0 1.5 < x
+
+ Mathematica:
+
+ g[x_] := Piecewise [ {
+ {9/8 + 3/2 x + 1/2 x^2 , -1.5 < x <= -.5},
+ {3/4 - x^2 , -.5 < x <= .5},
+ {9/8 - 3/2 x + 1/2 x^2 , 0.5 < x <= 1.5}
+ }, 0]
+
+ To get the profile curve of the blurred step function at the rectangle
+ edge, we evaluate the indefinite integral, which is piecewise cubic:
+
+ 0 x <= -1.5
+ 9/16 + 9/8 x + 3/4 x^2 + 1/6 x^3 -1.5 < x <= -0.5
+ 1/2 + 3/4 x - 1/3 x^3 -.5 < x <= .5
+ 7/16 + 9/8 x - 3/4 x^2 + 1/6 x^3 .5 < x <= 1.5
+ 1 1.5 < x
+
+ in Mathematica code:
+
+ gi[x_] := Piecewise[ {
+ { 0 , x <= -1.5 },
+ { 9/16 + 9/8 x + 3/4 x^2 + 1/6 x^3, -1.5 < x <= -0.5 },
+ { 1/2 + 3/4 x - 1/3 x^3 , -.5 < x <= .5},
+ { 7/16 + 9/8 x - 3/4 x^2 + 1/6 x^3, .5 < x <= 1.5}
+ },1]
+*/
+
+static float gaussianIntegral(float x) {
+ if (x > 1.5f) {
+ return 0.0f;
+ }
+ if (x < -1.5f) {
+ return 1.0f;
+ }
+
+ float x2 = x*x;
+ float x3 = x2*x;
+
+ if ( x > 0.5f ) {
+ return 0.5625f - (x3 / 6.0f - 3.0f * x2 * 0.25f + 1.125f * x);
+ }
+ if ( x > -0.5f ) {
+ return 0.5f - (0.75f * x - x3 / 3.0f);
+ }
+ return 0.4375f + (-x3 / 6.0f - 3.0f * x2 * 0.25f - 1.125f * x);
+}
+
+/* ComputeBlurProfile allocates and fills in an array of floating
+ point values between 0 and 255 for the profile signature of
+ a blurred half-plane with the given blur radius. Since we're
+ going to be doing screened multiplications (i.e., 1 - (1-x)(1-y))
+ all the time, we actually fill in the profile pre-inverted
+ (already done 255-x).
+
+ It's the responsibility of the caller to delete the
+ memory returned in profile_out.
+*/
+
+uint8_t* SkBlurMask::ComputeBlurProfile(SkScalar sigma) {
+ int size = SkScalarCeilToInt(6*sigma);
+
+ int center = size >> 1;
+ uint8_t* profile = new uint8_t[size];
+
+ float invr = 1.f/(2*sigma);
+
+ profile[0] = 255;
+ for (int x = 1 ; x < size ; ++x) {
+ float scaled_x = (center - x - .5f) * invr;
+ float gi = gaussianIntegral(scaled_x);
+ profile[x] = 255 - (uint8_t) (255.f * gi);
+ }
+
+ return profile;
+}
+
+// TODO MAYBE: Maintain a profile cache to avoid recomputing this for
+// commonly used radii. Consider baking some of the most common blur radii
+// directly in as static data?
+
+// Implementation adapted from Michael Herf's approach:
+// http://stereopsis.com/shadowrect/
+
+uint8_t SkBlurMask::ProfileLookup(const uint8_t *profile, int loc, int blurred_width, int sharp_width) {
+ int dx = SkAbs32(((loc << 1) + 1) - blurred_width) - sharp_width; // how far are we from the original edge?
+ int ox = dx >> 1;
+ if (ox < 0) {
+ ox = 0;
+ }
+
+ return profile[ox];
+}
+
+void SkBlurMask::ComputeBlurredScanline(uint8_t *pixels, const uint8_t *profile,
+ unsigned int width, SkScalar sigma) {
+
+ unsigned int profile_size = SkScalarCeilToInt(6*sigma);
+ SkAutoTMalloc<uint8_t> horizontalScanline(width);
+
+ unsigned int sw = width - profile_size;
+ // nearest odd number less than the profile size represents the center
+ // of the (2x scaled) profile
+ int center = ( profile_size & ~1 ) - 1;
+
+ int w = sw - center;
+
+ for (unsigned int x = 0 ; x < width ; ++x) {
+ if (profile_size <= sw) {
+ pixels[x] = ProfileLookup(profile, x, width, w);
+ } else {
+ float span = float(sw)/(2*sigma);
+ float giX = 1.5f - (x+.5f)/(2*sigma);
+ pixels[x] = (uint8_t) (255 * (gaussianIntegral(giX) - gaussianIntegral(giX + span)));
+ }
+ }
+}
+
+bool SkBlurMask::BlurRect(SkScalar sigma, SkMask *dst,
+ const SkRect &src, SkBlurStyle style,
+ SkIPoint *margin, SkMask::CreateMode createMode) {
+ int profile_size = SkScalarCeilToInt(6*sigma);
+
+ int pad = profile_size/2;
+ if (margin) {
+ margin->set( pad, pad );
+ }
+
+ dst->fBounds.set(SkScalarRoundToInt(src.fLeft - pad),
+ SkScalarRoundToInt(src.fTop - pad),
+ SkScalarRoundToInt(src.fRight + pad),
+ SkScalarRoundToInt(src.fBottom + pad));
+
+ dst->fRowBytes = dst->fBounds.width();
+ dst->fFormat = SkMask::kA8_Format;
+ dst->fImage = nullptr;
+
+ int sw = SkScalarFloorToInt(src.width());
+ int sh = SkScalarFloorToInt(src.height());
+
+ if (createMode == SkMask::kJustComputeBounds_CreateMode) {
+ if (style == kInner_SkBlurStyle) {
+ dst->fBounds.set(SkScalarRoundToInt(src.fLeft),
+ SkScalarRoundToInt(src.fTop),
+ SkScalarRoundToInt(src.fRight),
+ SkScalarRoundToInt(src.fBottom)); // restore trimmed bounds
+ dst->fRowBytes = sw;
+ }
+ return true;
+ }
+
+ SkAutoTDeleteArray<uint8_t> profile(ComputeBlurProfile(sigma));
+
+ size_t dstSize = dst->computeImageSize();
+ if (0 == dstSize) {
+ return false; // too big to allocate, abort
+ }
+
+ uint8_t* dp = SkMask::AllocImage(dstSize);
+
+ dst->fImage = dp;
+
+ int dstHeight = dst->fBounds.height();
+ int dstWidth = dst->fBounds.width();
+
+ uint8_t *outptr = dp;
+
+ SkAutoTMalloc<uint8_t> horizontalScanline(dstWidth);
+ SkAutoTMalloc<uint8_t> verticalScanline(dstHeight);
+
+ ComputeBlurredScanline(horizontalScanline, profile.get(), dstWidth, sigma);
+ ComputeBlurredScanline(verticalScanline, profile.get(), dstHeight, sigma);
+
+ for (int y = 0 ; y < dstHeight ; ++y) {
+ for (int x = 0 ; x < dstWidth ; x++) {
+ unsigned int maskval = SkMulDiv255Round(horizontalScanline[x], verticalScanline[y]);
+ *(outptr++) = maskval;
+ }
+ }
+
+ if (style == kInner_SkBlurStyle) {
+ // now we allocate the "real" dst, mirror the size of src
+ size_t srcSize = (size_t)(src.width() * src.height());
+ if (0 == srcSize) {
+ return false; // too big to allocate, abort
+ }
+ dst->fImage = SkMask::AllocImage(srcSize);
+ for (int y = 0 ; y < sh ; y++) {
+ uint8_t *blur_scanline = dp + (y+pad)*dstWidth + pad;
+ uint8_t *inner_scanline = dst->fImage + y*sw;
+ memcpy(inner_scanline, blur_scanline, sw);
+ }
+ SkMask::FreeImage(dp);
+
+ dst->fBounds.set(SkScalarRoundToInt(src.fLeft),
+ SkScalarRoundToInt(src.fTop),
+ SkScalarRoundToInt(src.fRight),
+ SkScalarRoundToInt(src.fBottom)); // restore trimmed bounds
+ dst->fRowBytes = sw;
+
+ } else if (style == kOuter_SkBlurStyle) {
+ for (int y = pad ; y < dstHeight-pad ; y++) {
+ uint8_t *dst_scanline = dp + y*dstWidth + pad;
+ memset(dst_scanline, 0, sw);
+ }
+ } else if (style == kSolid_SkBlurStyle) {
+ for (int y = pad ; y < dstHeight-pad ; y++) {
+ uint8_t *dst_scanline = dp + y*dstWidth + pad;
+ memset(dst_scanline, 0xff, sw);
+ }
+ }
+ // normal and solid styles are the same for analytic rect blurs, so don't
+ // need to handle solid specially.
+
+ return true;
+}
+
+bool SkBlurMask::BlurRRect(SkScalar sigma, SkMask *dst,
+ const SkRRect &src, SkBlurStyle style,
+ SkIPoint *margin, SkMask::CreateMode createMode) {
+ // Temporary for now -- always fail, should cause caller to fall back
+ // to old path. Plumbing just to land API and parallelize effort.
+
+ return false;
+}
+
+// The "simple" blur is a direct implementation of separable convolution with a discrete
+// gaussian kernel. It's "ground truth" in a sense; too slow to be used, but very
+// useful for correctness comparisons.
+
+bool SkBlurMask::BlurGroundTruth(SkScalar sigma, SkMask* dst, const SkMask& src,
+ SkBlurStyle style, SkIPoint* margin) {
+
+ if (src.fFormat != SkMask::kA8_Format) {
+ return false;
+ }
+
+ float variance = sigma * sigma;
+
+ int windowSize = SkScalarCeilToInt(sigma*6);
+ // round window size up to nearest odd number
+ windowSize |= 1;
+
+ SkAutoTMalloc<float> gaussWindow(windowSize);
+
+ int halfWindow = windowSize >> 1;
+
+ gaussWindow[halfWindow] = 1;
+
+ float windowSum = 1;
+ for (int x = 1 ; x <= halfWindow ; ++x) {
+ float gaussian = expf(-x*x / (2*variance));
+ gaussWindow[halfWindow + x] = gaussWindow[halfWindow-x] = gaussian;
+ windowSum += 2*gaussian;
+ }
+
+ // leave the filter un-normalized for now; we will divide by the normalization
+ // sum later;
+
+ int pad = halfWindow;
+ if (margin) {
+ margin->set( pad, pad );
+ }
+
+ dst->fBounds = src.fBounds;
+ dst->fBounds.outset(pad, pad);
+
+ dst->fRowBytes = dst->fBounds.width();
+ dst->fFormat = SkMask::kA8_Format;
+ dst->fImage = nullptr;
+
+ if (src.fImage) {
+
+ size_t dstSize = dst->computeImageSize();
+ if (0 == dstSize) {
+ return false; // too big to allocate, abort
+ }
+
+ int srcWidth = src.fBounds.width();
+ int srcHeight = src.fBounds.height();
+ int dstWidth = dst->fBounds.width();
+
+ const uint8_t* srcPixels = src.fImage;
+ uint8_t* dstPixels = SkMask::AllocImage(dstSize);
+ SkAutoTCallVProc<uint8_t, SkMask_FreeImage> autoCall(dstPixels);
+
+ // do the actual blur. First, make a padded copy of the source.
+ // use double pad so we never have to check if we're outside anything
+
+ int padWidth = srcWidth + 4*pad;
+ int padHeight = srcHeight;
+ int padSize = padWidth * padHeight;
+
+ SkAutoTMalloc<uint8_t> padPixels(padSize);
+ memset(padPixels, 0, padSize);
+
+ for (int y = 0 ; y < srcHeight; ++y) {
+ uint8_t* padptr = padPixels + y * padWidth + 2*pad;
+ const uint8_t* srcptr = srcPixels + y * srcWidth;
+ memcpy(padptr, srcptr, srcWidth);
+ }
+
+ // blur in X, transposing the result into a temporary floating point buffer.
+ // also double-pad the intermediate result so that the second blur doesn't
+ // have to do extra conditionals.
+
+ int tmpWidth = padHeight + 4*pad;
+ int tmpHeight = padWidth - 2*pad;
+ int tmpSize = tmpWidth * tmpHeight;
+
+ SkAutoTMalloc<float> tmpImage(tmpSize);
+ memset(tmpImage, 0, tmpSize*sizeof(tmpImage[0]));
+
+ for (int y = 0 ; y < padHeight ; ++y) {
+ uint8_t *srcScanline = padPixels + y*padWidth;
+ for (int x = pad ; x < padWidth - pad ; ++x) {
+ float *outPixel = tmpImage + (x-pad)*tmpWidth + y + 2*pad; // transposed output
+ uint8_t *windowCenter = srcScanline + x;
+ for (int i = -pad ; i <= pad ; ++i) {
+ *outPixel += gaussWindow[pad+i]*windowCenter[i];
+ }
+ *outPixel /= windowSum;
+ }
+ }
+
+ // blur in Y; now filling in the actual desired destination. We have to do
+ // the transpose again; these transposes guarantee that we read memory in
+ // linear order.
+
+ for (int y = 0 ; y < tmpHeight ; ++y) {
+ float *srcScanline = tmpImage + y*tmpWidth;
+ for (int x = pad ; x < tmpWidth - pad ; ++x) {
+ float *windowCenter = srcScanline + x;
+ float finalValue = 0;
+ for (int i = -pad ; i <= pad ; ++i) {
+ finalValue += gaussWindow[pad+i]*windowCenter[i];
+ }
+ finalValue /= windowSum;
+ uint8_t *outPixel = dstPixels + (x-pad)*dstWidth + y; // transposed output
+ int integerPixel = int(finalValue + 0.5f);
+ *outPixel = SkClampMax( SkClampPos(integerPixel), 255 );
+ }
+ }
+
+ dst->fImage = dstPixels;
+ // if need be, alloc the "real" dst (same size as src) and copy/merge
+ // the blur into it (applying the src)
+ if (style == kInner_SkBlurStyle) {
+ // now we allocate the "real" dst, mirror the size of src
+ size_t srcSize = src.computeImageSize();
+ if (0 == srcSize) {
+ return false; // too big to allocate, abort
+ }
+ dst->fImage = SkMask::AllocImage(srcSize);
+ merge_src_with_blur(dst->fImage, src.fRowBytes,
+ srcPixels, src.fRowBytes,
+ dstPixels + pad*dst->fRowBytes + pad,
+ dst->fRowBytes, srcWidth, srcHeight);
+ SkMask::FreeImage(dstPixels);
+ } else if (style != kNormal_SkBlurStyle) {
+ clamp_with_orig(dstPixels + pad*dst->fRowBytes + pad,
+ dst->fRowBytes, srcPixels, src.fRowBytes, srcWidth, srcHeight, style);
+ }
+ (void)autoCall.release();
+ }
+
+ if (style == kInner_SkBlurStyle) {
+ dst->fBounds = src.fBounds; // restore trimmed bounds
+ dst->fRowBytes = src.fRowBytes;
+ }
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/effects/SkBlurMask.h b/gfx/skia/skia/src/effects/SkBlurMask.h
new file mode 100644
index 000000000..25f890e26
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkBlurMask.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlurMask_DEFINED
+#define SkBlurMask_DEFINED
+
+#include "SkBlurTypes.h"
+#include "SkShader.h"
+#include "SkMask.h"
+#include "SkRRect.h"
+
+class SkBlurMask {
+public:
+ static bool SK_WARN_UNUSED_RESULT BlurRect(SkScalar sigma, SkMask *dst, const SkRect &src,
+ SkBlurStyle, SkIPoint *margin = nullptr,
+ SkMask::CreateMode createMode =
+ SkMask::kComputeBoundsAndRenderImage_CreateMode);
+ static bool SK_WARN_UNUSED_RESULT BlurRRect(SkScalar sigma, SkMask *dst, const SkRRect &src,
+ SkBlurStyle, SkIPoint *margin = nullptr,
+ SkMask::CreateMode createMode =
+ SkMask::kComputeBoundsAndRenderImage_CreateMode);
+
+ // forceQuality will prevent BoxBlur from falling back to the low quality approach when sigma
+ // is very small -- this can be used predict the margin bump ahead of time without completely
+ // replicating the internal logic. This permits not only simpler caching of blurred results,
+ // but also being able to predict precisely at what pixels the blurred profile of e.g. a
+ // rectangle will lie.
+
+ static bool SK_WARN_UNUSED_RESULT BoxBlur(SkMask* dst, const SkMask& src,
+ SkScalar sigma, SkBlurStyle style, SkBlurQuality,
+ SkIPoint* margin = nullptr,
+ bool forceQuality = false);
+
+ // the "ground truth" blur does a gaussian convolution; it's slow
+ // but useful for comparison purposes.
+ static bool SK_WARN_UNUSED_RESULT BlurGroundTruth(SkScalar sigma, SkMask* dst,
+ const SkMask& src,
+ SkBlurStyle, SkIPoint* margin = nullptr);
+
+ // If radius > 0, return the corresponding sigma, else return 0
+ static SkScalar ConvertRadiusToSigma(SkScalar radius);
+ // If sigma > 0.5, return the corresponding radius, else return 0
+ static SkScalar ConvertSigmaToRadius(SkScalar sigma);
+
+ /* Helper functions for analytic rectangle blurs */
+
+ /** Look up the intensity of the (one dimnensional) blurred half-plane.
+ @param profile The precomputed 1D blur profile; memory allocated by and managed by
+ ComputeBlurProfile below.
+ @param loc the location to look up; The lookup will clamp invalid inputs, but
+ meaningful data are available between 0 and blurred_width
+ @param blurred_width The width of the final, blurred rectangle
+ @param sharp_width The width of the original, unblurred rectangle.
+ */
+ static uint8_t ProfileLookup(const uint8_t* profile, int loc, int blurredWidth, int sharpWidth);
+
+ /** Allocate memory for and populate the profile of a 1D blurred halfplane. The caller
+ must free the memory. The amount of memory allocated will be exactly 6*sigma bytes.
+ @param sigma The standard deviation of the gaussian blur kernel
+ */
+
+ static uint8_t* ComputeBlurProfile(SkScalar sigma);
+
+ /** Compute an entire scanline of a blurred step function. This is a 1D helper that
+ will produce both the horizontal and vertical profiles of the blurry rectangle.
+ @param pixels Location to store the resulting pixel data; allocated and managed by caller
+ @param profile Precomputed blur profile computed by ComputeBlurProfile above.
+ @param width Size of the pixels array.
+ @param sigma Standard deviation of the gaussian blur kernel used to compute the profile;
+ this implicitly gives the size of the pixels array.
+ */
+
+ static void ComputeBlurredScanline(uint8_t* pixels, const uint8_t* profile,
+ unsigned int width, SkScalar sigma);
+
+
+
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/SkBlurMaskFilter.cpp b/gfx/skia/skia/src/effects/SkBlurMaskFilter.cpp
new file mode 100644
index 000000000..c32e111f2
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkBlurMaskFilter.cpp
@@ -0,0 +1,1572 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBlurMaskFilter.h"
+#include "SkBlurMask.h"
+#include "SkGpuBlurUtils.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+#include "SkMaskFilter.h"
+#include "SkRRect.h"
+#include "SkStringUtils.h"
+#include "SkStrokeRec.h"
+
+#if SK_SUPPORT_GPU
+#include "GrCircleBlurFragmentProcessor.h"
+#include "GrContext.h"
+#include "GrDrawContext.h"
+#include "GrTexture.h"
+#include "GrFragmentProcessor.h"
+#include "GrInvariantOutput.h"
+#include "GrStyle.h"
+#include "effects/GrSimpleTextureEffect.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLSampler.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#endif
+
+SkScalar SkBlurMaskFilter::ConvertRadiusToSigma(SkScalar radius) {
+ return SkBlurMask::ConvertRadiusToSigma(radius);
+}
+
+class SkBlurMaskFilterImpl : public SkMaskFilter {
+public:
+ SkBlurMaskFilterImpl(SkScalar sigma, SkBlurStyle, const SkRect& occluder, uint32_t flags);
+
+ // overrides from SkMaskFilter
+ SkMask::Format getFormat() const override;
+ bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix&,
+ SkIPoint* margin) const override;
+
+#if SK_SUPPORT_GPU
+ bool canFilterMaskGPU(const SkRRect& devRRect,
+ const SkIRect& clipBounds,
+ const SkMatrix& ctm,
+ SkRect* maskRect) const override;
+ bool directFilterMaskGPU(GrTextureProvider* texProvider,
+ GrDrawContext* drawContext,
+ GrPaint* grp,
+ const GrClip&,
+ const SkMatrix& viewMatrix,
+ const SkStrokeRec& strokeRec,
+ const SkPath& path) const override;
+ bool directFilterRRectMaskGPU(GrContext*,
+ GrDrawContext* drawContext,
+ GrPaint* grp,
+ const GrClip&,
+ const SkMatrix& viewMatrix,
+ const SkStrokeRec& strokeRec,
+ const SkRRect& rrect,
+ const SkRRect& devRRect) const override;
+ bool filterMaskGPU(GrTexture* src,
+ const SkMatrix& ctm,
+ const SkIRect& maskRect,
+ GrTexture** result) const override;
+#endif
+
+ void computeFastBounds(const SkRect&, SkRect*) const override;
+ bool asABlur(BlurRec*) const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkBlurMaskFilterImpl)
+
+protected:
+ FilterReturn filterRectsToNine(const SkRect[], int count, const SkMatrix&,
+ const SkIRect& clipBounds,
+ NinePatch*) const override;
+
+ FilterReturn filterRRectToNine(const SkRRect&, const SkMatrix&,
+ const SkIRect& clipBounds,
+ NinePatch*) const override;
+
+ bool filterRectMask(SkMask* dstM, const SkRect& r, const SkMatrix& matrix,
+ SkIPoint* margin, SkMask::CreateMode createMode) const;
+ bool filterRRectMask(SkMask* dstM, const SkRRect& r, const SkMatrix& matrix,
+ SkIPoint* margin, SkMask::CreateMode createMode) const;
+
+ bool ignoreXform() const {
+ return SkToBool(fBlurFlags & SkBlurMaskFilter::kIgnoreTransform_BlurFlag);
+ }
+
+private:
+ // To avoid unseemly allocation requests (esp. for finite platforms like
+ // handset) we limit the radius so something manageable. (as opposed to
+ // a request like 10,000)
+ static const SkScalar kMAX_BLUR_SIGMA;
+
+ SkScalar fSigma;
+ SkBlurStyle fBlurStyle;
+ SkRect fOccluder;
+ uint32_t fBlurFlags;
+
+ SkBlurQuality getQuality() const {
+ return (fBlurFlags & SkBlurMaskFilter::kHighQuality_BlurFlag) ?
+ kHigh_SkBlurQuality : kLow_SkBlurQuality;
+ }
+
+ SkBlurMaskFilterImpl(SkReadBuffer&);
+ void flatten(SkWriteBuffer&) const override;
+
+ SkScalar computeXformedSigma(const SkMatrix& ctm) const {
+ SkScalar xformedSigma = this->ignoreXform() ? fSigma : ctm.mapRadius(fSigma);
+ return SkMinScalar(xformedSigma, kMAX_BLUR_SIGMA);
+ }
+
+ friend class SkBlurMaskFilter;
+
+ typedef SkMaskFilter INHERITED;
+};
+
+const SkScalar SkBlurMaskFilterImpl::kMAX_BLUR_SIGMA = SkIntToScalar(128);
+
+sk_sp<SkMaskFilter> SkBlurMaskFilter::Make(SkBlurStyle style, SkScalar sigma,
+ const SkRect& occluder, uint32_t flags) {
+ if (!SkScalarIsFinite(sigma) || sigma <= 0) {
+ return nullptr;
+ }
+ if ((unsigned)style > (unsigned)kLastEnum_SkBlurStyle) {
+ return nullptr;
+ }
+ SkASSERT(flags <= SkBlurMaskFilter::kAll_BlurFlag);
+ flags &= SkBlurMaskFilter::kAll_BlurFlag;
+
+ return sk_sp<SkMaskFilter>(new SkBlurMaskFilterImpl(sigma, style, occluder, flags));
+}
+
+// linearly interpolate between y1 & y3 to match x2's position between x1 & x3
+static SkScalar interp(SkScalar x1, SkScalar x2, SkScalar x3, SkScalar y1, SkScalar y3) {
+ SkASSERT(x1 <= x2 && x2 <= x3);
+ SkASSERT(y1 <= y3);
+
+ SkScalar t = (x2 - x1) / (x3 - x1);
+ return y1 + t * (y3 - y1);
+}
+
+// Insert 'lower' and 'higher' into 'array1' and insert a new value at each matching insertion
+// point in 'array2' that linearly interpolates between the existing values.
+// Return a bit mask which contains a copy of 'inputMask' for all the cells between the two
+// insertion points.
+static uint32_t insert_into_arrays(SkScalar* array1, SkScalar* array2,
+ SkScalar lower, SkScalar higher,
+ int* num, uint32_t inputMask, int maskSize) {
+ SkASSERT(lower < higher);
+ SkASSERT(lower >= array1[0] && higher <= array1[*num-1]);
+
+ int32_t skipMask = 0x0;
+ int i;
+ for (i = 0; i < *num; ++i) {
+ if (lower >= array1[i] && lower < array1[i+1]) {
+ if (!SkScalarNearlyEqual(lower, array1[i])) {
+ memmove(&array1[i+2], &array1[i+1], (*num-i-1)*sizeof(SkScalar));
+ array1[i+1] = lower;
+ memmove(&array2[i+2], &array2[i+1], (*num-i-1)*sizeof(SkScalar));
+ array2[i+1] = interp(array1[i], lower, array1[i+2], array2[i], array2[i+2]);
+ i++;
+ (*num)++;
+ }
+ break;
+ }
+ }
+ for ( ; i < *num; ++i) {
+ skipMask |= inputMask << (i*maskSize);
+ if (higher > array1[i] && higher <= array1[i+1]) {
+ if (!SkScalarNearlyEqual(higher, array1[i+1])) {
+ memmove(&array1[i+2], &array1[i+1], (*num-i-1)*sizeof(SkScalar));
+ array1[i+1] = higher;
+ memmove(&array2[i+2], &array2[i+1], (*num-i-1)*sizeof(SkScalar));
+ array2[i+1] = interp(array1[i], higher, array1[i+2], array2[i], array2[i+2]);
+ (*num)++;
+ }
+ break;
+ }
+ }
+
+ return skipMask;
+}
+
+bool SkBlurMaskFilter::ComputeBlurredRRectParams(const SkRRect& srcRRect, const SkRRect& devRRect,
+ const SkRect& occluder,
+ SkScalar sigma, SkScalar xformedSigma,
+ SkRRect* rrectToDraw,
+ SkISize* widthHeight,
+ SkScalar rectXs[kMaxDivisions],
+ SkScalar rectYs[kMaxDivisions],
+ SkScalar texXs[kMaxDivisions],
+ SkScalar texYs[kMaxDivisions],
+ int* numXs, int* numYs, uint32_t* skipMask) {
+ unsigned int devBlurRadius = 3*SkScalarCeilToInt(xformedSigma-1/6.0f);
+ SkScalar srcBlurRadius = 3.0f * sigma;
+
+ const SkRect& devOrig = devRRect.getBounds();
+ const SkVector& devRadiiUL = devRRect.radii(SkRRect::kUpperLeft_Corner);
+ const SkVector& devRadiiUR = devRRect.radii(SkRRect::kUpperRight_Corner);
+ const SkVector& devRadiiLR = devRRect.radii(SkRRect::kLowerRight_Corner);
+ const SkVector& devRadiiLL = devRRect.radii(SkRRect::kLowerLeft_Corner);
+
+ const int devLeft = SkScalarCeilToInt(SkTMax<SkScalar>(devRadiiUL.fX, devRadiiLL.fX));
+ const int devTop = SkScalarCeilToInt(SkTMax<SkScalar>(devRadiiUL.fY, devRadiiUR.fY));
+ const int devRight = SkScalarCeilToInt(SkTMax<SkScalar>(devRadiiUR.fX, devRadiiLR.fX));
+ const int devBot = SkScalarCeilToInt(SkTMax<SkScalar>(devRadiiLL.fY, devRadiiLR.fY));
+
+ // This is a conservative check for nine-patchability
+ if (devOrig.fLeft + devLeft + devBlurRadius >= devOrig.fRight - devRight - devBlurRadius ||
+ devOrig.fTop + devTop + devBlurRadius >= devOrig.fBottom - devBot - devBlurRadius) {
+ return false;
+ }
+
+ const SkVector& srcRadiiUL = srcRRect.radii(SkRRect::kUpperLeft_Corner);
+ const SkVector& srcRadiiUR = srcRRect.radii(SkRRect::kUpperRight_Corner);
+ const SkVector& srcRadiiLR = srcRRect.radii(SkRRect::kLowerRight_Corner);
+ const SkVector& srcRadiiLL = srcRRect.radii(SkRRect::kLowerLeft_Corner);
+
+ const SkScalar srcLeft = SkTMax<SkScalar>(srcRadiiUL.fX, srcRadiiLL.fX);
+ const SkScalar srcTop = SkTMax<SkScalar>(srcRadiiUL.fY, srcRadiiUR.fY);
+ const SkScalar srcRight = SkTMax<SkScalar>(srcRadiiUR.fX, srcRadiiLR.fX);
+ const SkScalar srcBot = SkTMax<SkScalar>(srcRadiiLL.fY, srcRadiiLR.fY);
+
+ int newRRWidth = 2*devBlurRadius + devLeft + devRight + 1;
+ int newRRHeight = 2*devBlurRadius + devTop + devBot + 1;
+ widthHeight->fWidth = newRRWidth + 2 * devBlurRadius;
+ widthHeight->fHeight = newRRHeight + 2 * devBlurRadius;
+
+ const SkRect srcProxyRect = srcRRect.getBounds().makeOutset(srcBlurRadius, srcBlurRadius);
+
+ rectXs[0] = srcProxyRect.fLeft;
+ rectXs[1] = srcProxyRect.fLeft + 2*srcBlurRadius + srcLeft;
+ rectXs[2] = srcProxyRect.fRight - 2*srcBlurRadius - srcRight;
+ rectXs[3] = srcProxyRect.fRight;
+
+ rectYs[0] = srcProxyRect.fTop;
+ rectYs[1] = srcProxyRect.fTop + 2*srcBlurRadius + srcTop;
+ rectYs[2] = srcProxyRect.fBottom - 2*srcBlurRadius - srcBot;
+ rectYs[3] = srcProxyRect.fBottom;
+
+ texXs[0] = 0.0f;
+ texXs[1] = 2.0f*devBlurRadius + devLeft;
+ texXs[2] = 2.0f*devBlurRadius + devLeft + 1;
+ texXs[3] = SkIntToScalar(widthHeight->fWidth);
+
+ texYs[0] = 0.0f;
+ texYs[1] = 2.0f*devBlurRadius + devTop;
+ texYs[2] = 2.0f*devBlurRadius + devTop + 1;
+ texYs[3] = SkIntToScalar(widthHeight->fHeight);
+
+ SkRect temp = occluder;
+
+ *numXs = 4;
+ *numYs = 4;
+ *skipMask = 0;
+ if (!temp.isEmpty() && (srcProxyRect.contains(temp) || temp.intersect(srcProxyRect))) {
+ *skipMask = insert_into_arrays(rectXs, texXs, temp.fLeft, temp.fRight, numXs, 0x1, 1);
+ *skipMask = insert_into_arrays(rectYs, texYs, temp.fTop, temp.fBottom,
+ numYs, *skipMask, *numXs-1);
+ }
+
+ const SkRect newRect = SkRect::MakeXYWH(SkIntToScalar(devBlurRadius),
+ SkIntToScalar(devBlurRadius),
+ SkIntToScalar(newRRWidth),
+ SkIntToScalar(newRRHeight));
+ SkVector newRadii[4];
+ newRadii[0] = { SkScalarCeilToScalar(devRadiiUL.fX), SkScalarCeilToScalar(devRadiiUL.fY) };
+ newRadii[1] = { SkScalarCeilToScalar(devRadiiUR.fX), SkScalarCeilToScalar(devRadiiUR.fY) };
+ newRadii[2] = { SkScalarCeilToScalar(devRadiiLR.fX), SkScalarCeilToScalar(devRadiiLR.fY) };
+ newRadii[3] = { SkScalarCeilToScalar(devRadiiLL.fX), SkScalarCeilToScalar(devRadiiLL.fY) };
+
+ rrectToDraw->setRectRadii(newRect, newRadii);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkBlurMaskFilterImpl::SkBlurMaskFilterImpl(SkScalar sigma, SkBlurStyle style,
+ const SkRect& occluder, uint32_t flags)
+ : fSigma(sigma)
+ , fBlurStyle(style)
+ , fOccluder(occluder)
+ , fBlurFlags(flags) {
+ SkASSERT(fSigma > 0);
+ SkASSERT((unsigned)style <= kLastEnum_SkBlurStyle);
+ SkASSERT(flags <= SkBlurMaskFilter::kAll_BlurFlag);
+}
+
+SkMask::Format SkBlurMaskFilterImpl::getFormat() const {
+ return SkMask::kA8_Format;
+}
+
+bool SkBlurMaskFilterImpl::asABlur(BlurRec* rec) const {
+ if (this->ignoreXform()) {
+ return false;
+ }
+
+ if (rec) {
+ rec->fSigma = fSigma;
+ rec->fStyle = fBlurStyle;
+ rec->fQuality = this->getQuality();
+ }
+ return true;
+}
+
+bool SkBlurMaskFilterImpl::filterMask(SkMask* dst, const SkMask& src,
+ const SkMatrix& matrix,
+ SkIPoint* margin) const {
+ SkScalar sigma = this->computeXformedSigma(matrix);
+ return SkBlurMask::BoxBlur(dst, src, sigma, fBlurStyle, this->getQuality(), margin);
+}
+
+bool SkBlurMaskFilterImpl::filterRectMask(SkMask* dst, const SkRect& r,
+ const SkMatrix& matrix,
+ SkIPoint* margin, SkMask::CreateMode createMode) const {
+ SkScalar sigma = computeXformedSigma(matrix);
+
+ return SkBlurMask::BlurRect(sigma, dst, r, fBlurStyle, margin, createMode);
+}
+
+bool SkBlurMaskFilterImpl::filterRRectMask(SkMask* dst, const SkRRect& r,
+ const SkMatrix& matrix,
+ SkIPoint* margin, SkMask::CreateMode createMode) const {
+ SkScalar sigma = computeXformedSigma(matrix);
+
+ return SkBlurMask::BlurRRect(sigma, dst, r, fBlurStyle, margin, createMode);
+}
+
+#include "SkCanvas.h"
+
+static bool prepare_to_draw_into_mask(const SkRect& bounds, SkMask* mask) {
+ SkASSERT(mask != nullptr);
+
+ mask->fBounds = bounds.roundOut();
+ mask->fRowBytes = SkAlign4(mask->fBounds.width());
+ mask->fFormat = SkMask::kA8_Format;
+ const size_t size = mask->computeImageSize();
+ mask->fImage = SkMask::AllocImage(size);
+ if (nullptr == mask->fImage) {
+ return false;
+ }
+
+ // FIXME: use sk_calloc in AllocImage?
+ sk_bzero(mask->fImage, size);
+ return true;
+}
+
+static bool draw_rrect_into_mask(const SkRRect rrect, SkMask* mask) {
+ if (!prepare_to_draw_into_mask(rrect.rect(), mask)) {
+ return false;
+ }
+
+ // FIXME: This code duplicates code in draw_rects_into_mask, below. Is there a
+ // clean way to share more code?
+ SkBitmap bitmap;
+ bitmap.installMaskPixels(*mask);
+
+ SkCanvas canvas(bitmap);
+ canvas.translate(-SkIntToScalar(mask->fBounds.left()),
+ -SkIntToScalar(mask->fBounds.top()));
+
+ SkPaint paint;
+ paint.setAntiAlias(true);
+ canvas.drawRRect(rrect, paint);
+ return true;
+}
+
+static bool draw_rects_into_mask(const SkRect rects[], int count, SkMask* mask) {
+ if (!prepare_to_draw_into_mask(rects[0], mask)) {
+ return false;
+ }
+
+ SkBitmap bitmap;
+ bitmap.installPixels(SkImageInfo::Make(mask->fBounds.width(),
+ mask->fBounds.height(),
+ kAlpha_8_SkColorType,
+ kPremul_SkAlphaType),
+ mask->fImage, mask->fRowBytes);
+
+ SkCanvas canvas(bitmap);
+ canvas.translate(-SkIntToScalar(mask->fBounds.left()),
+ -SkIntToScalar(mask->fBounds.top()));
+
+ SkPaint paint;
+ paint.setAntiAlias(true);
+
+ if (1 == count) {
+ canvas.drawRect(rects[0], paint);
+ } else {
+ // todo: do I need a fast way to do this?
+ SkPath path;
+ path.addRect(rects[0]);
+ path.addRect(rects[1]);
+ path.setFillType(SkPath::kEvenOdd_FillType);
+ canvas.drawPath(path, paint);
+ }
+ return true;
+}
+
+static bool rect_exceeds(const SkRect& r, SkScalar v) {
+ return r.fLeft < -v || r.fTop < -v || r.fRight > v || r.fBottom > v ||
+ r.width() > v || r.height() > v;
+}
+
+#include "SkMaskCache.h"
+
+static SkCachedData* copy_mask_to_cacheddata(SkMask* mask) {
+ const size_t size = mask->computeTotalImageSize();
+ SkCachedData* data = SkResourceCache::NewCachedData(size);
+ if (data) {
+ memcpy(data->writable_data(), mask->fImage, size);
+ SkMask::FreeImage(mask->fImage);
+ mask->fImage = (uint8_t*)data->data();
+ }
+ return data;
+}
+
+static SkCachedData* find_cached_rrect(SkMask* mask, SkScalar sigma, SkBlurStyle style,
+ SkBlurQuality quality, const SkRRect& rrect) {
+ return SkMaskCache::FindAndRef(sigma, style, quality, rrect, mask);
+}
+
+static SkCachedData* add_cached_rrect(SkMask* mask, SkScalar sigma, SkBlurStyle style,
+ SkBlurQuality quality, const SkRRect& rrect) {
+ SkCachedData* cache = copy_mask_to_cacheddata(mask);
+ if (cache) {
+ SkMaskCache::Add(sigma, style, quality, rrect, *mask, cache);
+ }
+ return cache;
+}
+
+static SkCachedData* find_cached_rects(SkMask* mask, SkScalar sigma, SkBlurStyle style,
+ SkBlurQuality quality, const SkRect rects[], int count) {
+ return SkMaskCache::FindAndRef(sigma, style, quality, rects, count, mask);
+}
+
+static SkCachedData* add_cached_rects(SkMask* mask, SkScalar sigma, SkBlurStyle style,
+ SkBlurQuality quality, const SkRect rects[], int count) {
+ SkCachedData* cache = copy_mask_to_cacheddata(mask);
+ if (cache) {
+ SkMaskCache::Add(sigma, style, quality, rects, count, *mask, cache);
+ }
+ return cache;
+}
+
+#ifdef SK_IGNORE_FAST_RRECT_BLUR
+ // Use the faster analytic blur approach for ninepatch round rects
+ static const bool c_analyticBlurRRect{false};
+#else
+ static const bool c_analyticBlurRRect{true};
+#endif
+
+SkMaskFilter::FilterReturn
+SkBlurMaskFilterImpl::filterRRectToNine(const SkRRect& rrect, const SkMatrix& matrix,
+ const SkIRect& clipBounds,
+ NinePatch* patch) const {
+ SkASSERT(patch != nullptr);
+ switch (rrect.getType()) {
+ case SkRRect::kEmpty_Type:
+ // Nothing to draw.
+ return kFalse_FilterReturn;
+
+ case SkRRect::kRect_Type:
+ // We should have caught this earlier.
+ SkASSERT(false);
+ // Fall through.
+ case SkRRect::kOval_Type:
+ // The nine patch special case does not handle ovals, and we
+ // already have code for rectangles.
+ return kUnimplemented_FilterReturn;
+
+ // These three can take advantage of this fast path.
+ case SkRRect::kSimple_Type:
+ case SkRRect::kNinePatch_Type:
+ case SkRRect::kComplex_Type:
+ break;
+ }
+
+ // TODO: report correct metrics for innerstyle, where we do not grow the
+ // total bounds, but we do need an inset the size of our blur-radius
+ if (kInner_SkBlurStyle == fBlurStyle) {
+ return kUnimplemented_FilterReturn;
+ }
+
+ // TODO: take clipBounds into account to limit our coordinates up front
+ // for now, just skip too-large src rects (to take the old code path).
+ if (rect_exceeds(rrect.rect(), SkIntToScalar(32767))) {
+ return kUnimplemented_FilterReturn;
+ }
+
+ SkIPoint margin;
+ SkMask srcM, dstM;
+ srcM.fBounds = rrect.rect().roundOut();
+ srcM.fFormat = SkMask::kA8_Format;
+ srcM.fRowBytes = 0;
+
+ bool filterResult = false;
+ if (c_analyticBlurRRect) {
+ // special case for fast round rect blur
+ // don't actually do the blur the first time, just compute the correct size
+ filterResult = this->filterRRectMask(&dstM, rrect, matrix, &margin,
+ SkMask::kJustComputeBounds_CreateMode);
+ }
+
+ if (!filterResult) {
+ filterResult = this->filterMask(&dstM, srcM, matrix, &margin);
+ }
+
+ if (!filterResult) {
+ return kFalse_FilterReturn;
+ }
+
+ // Now figure out the appropriate width and height of the smaller round rectangle
+ // to stretch. It will take into account the larger radius per side as well as double
+ // the margin, to account for inner and outer blur.
+ const SkVector& UL = rrect.radii(SkRRect::kUpperLeft_Corner);
+ const SkVector& UR = rrect.radii(SkRRect::kUpperRight_Corner);
+ const SkVector& LR = rrect.radii(SkRRect::kLowerRight_Corner);
+ const SkVector& LL = rrect.radii(SkRRect::kLowerLeft_Corner);
+
+ const SkScalar leftUnstretched = SkTMax(UL.fX, LL.fX) + SkIntToScalar(2 * margin.fX);
+ const SkScalar rightUnstretched = SkTMax(UR.fX, LR.fX) + SkIntToScalar(2 * margin.fX);
+
+ // Extra space in the middle to ensure an unchanging piece for stretching. Use 3 to cover
+ // any fractional space on either side plus 1 for the part to stretch.
+ const SkScalar stretchSize = SkIntToScalar(3);
+
+ const SkScalar totalSmallWidth = leftUnstretched + rightUnstretched + stretchSize;
+ if (totalSmallWidth >= rrect.rect().width()) {
+ // There is no valid piece to stretch.
+ return kUnimplemented_FilterReturn;
+ }
+
+ const SkScalar topUnstretched = SkTMax(UL.fY, UR.fY) + SkIntToScalar(2 * margin.fY);
+ const SkScalar bottomUnstretched = SkTMax(LL.fY, LR.fY) + SkIntToScalar(2 * margin.fY);
+
+ const SkScalar totalSmallHeight = topUnstretched + bottomUnstretched + stretchSize;
+ if (totalSmallHeight >= rrect.rect().height()) {
+ // There is no valid piece to stretch.
+ return kUnimplemented_FilterReturn;
+ }
+
+ SkRect smallR = SkRect::MakeWH(totalSmallWidth, totalSmallHeight);
+
+ SkRRect smallRR;
+ SkVector radii[4];
+ radii[SkRRect::kUpperLeft_Corner] = UL;
+ radii[SkRRect::kUpperRight_Corner] = UR;
+ radii[SkRRect::kLowerRight_Corner] = LR;
+ radii[SkRRect::kLowerLeft_Corner] = LL;
+ smallRR.setRectRadii(smallR, radii);
+
+ const SkScalar sigma = this->computeXformedSigma(matrix);
+ SkCachedData* cache = find_cached_rrect(&patch->fMask, sigma, fBlurStyle,
+ this->getQuality(), smallRR);
+ if (!cache) {
+ bool analyticBlurWorked = false;
+ if (c_analyticBlurRRect) {
+ analyticBlurWorked =
+ this->filterRRectMask(&patch->fMask, smallRR, matrix, &margin,
+ SkMask::kComputeBoundsAndRenderImage_CreateMode);
+ }
+
+ if (!analyticBlurWorked) {
+ if (!draw_rrect_into_mask(smallRR, &srcM)) {
+ return kFalse_FilterReturn;
+ }
+
+ SkAutoMaskFreeImage amf(srcM.fImage);
+
+ if (!this->filterMask(&patch->fMask, srcM, matrix, &margin)) {
+ return kFalse_FilterReturn;
+ }
+ }
+ cache = add_cached_rrect(&patch->fMask, sigma, fBlurStyle, this->getQuality(), smallRR);
+ }
+
+ patch->fMask.fBounds.offsetTo(0, 0);
+ patch->fOuterRect = dstM.fBounds;
+ patch->fCenter.fX = SkScalarCeilToInt(leftUnstretched) + 1;
+ patch->fCenter.fY = SkScalarCeilToInt(topUnstretched) + 1;
+ SkASSERT(nullptr == patch->fCache);
+ patch->fCache = cache; // transfer ownership to patch
+ return kTrue_FilterReturn;
+}
+
+// Use the faster analytic blur approach for ninepatch rects
+static const bool c_analyticBlurNinepatch{true};
+
+SkMaskFilter::FilterReturn
+SkBlurMaskFilterImpl::filterRectsToNine(const SkRect rects[], int count,
+ const SkMatrix& matrix,
+ const SkIRect& clipBounds,
+ NinePatch* patch) const {
+ if (count < 1 || count > 2) {
+ return kUnimplemented_FilterReturn;
+ }
+
+ // TODO: report correct metrics for innerstyle, where we do not grow the
+ // total bounds, but we do need an inset the size of our blur-radius
+ if (kInner_SkBlurStyle == fBlurStyle || kOuter_SkBlurStyle == fBlurStyle) {
+ return kUnimplemented_FilterReturn;
+ }
+
+ // TODO: take clipBounds into account to limit our coordinates up front
+ // for now, just skip too-large src rects (to take the old code path).
+ if (rect_exceeds(rects[0], SkIntToScalar(32767))) {
+ return kUnimplemented_FilterReturn;
+ }
+
+ SkIPoint margin;
+ SkMask srcM, dstM;
+ srcM.fBounds = rects[0].roundOut();
+ srcM.fFormat = SkMask::kA8_Format;
+ srcM.fRowBytes = 0;
+
+ bool filterResult = false;
+ if (count == 1 && c_analyticBlurNinepatch) {
+ // special case for fast rect blur
+ // don't actually do the blur the first time, just compute the correct size
+ filterResult = this->filterRectMask(&dstM, rects[0], matrix, &margin,
+ SkMask::kJustComputeBounds_CreateMode);
+ } else {
+ filterResult = this->filterMask(&dstM, srcM, matrix, &margin);
+ }
+
+ if (!filterResult) {
+ return kFalse_FilterReturn;
+ }
+
+ /*
+ * smallR is the smallest version of 'rect' that will still guarantee that
+ * we get the same blur results on all edges, plus 1 center row/col that is
+ * representative of the extendible/stretchable edges of the ninepatch.
+ * Since our actual edge may be fractional we inset 1 more to be sure we
+ * don't miss any interior blur.
+ * x is an added pixel of blur, and { and } are the (fractional) edge
+ * pixels from the original rect.
+ *
+ * x x { x x .... x x } x x
+ *
+ * Thus, in this case, we inset by a total of 5 (on each side) beginning
+ * with our outer-rect (dstM.fBounds)
+ */
+ SkRect smallR[2];
+ SkIPoint center;
+
+ // +2 is from +1 for each edge (to account for possible fractional edges
+ int smallW = dstM.fBounds.width() - srcM.fBounds.width() + 2;
+ int smallH = dstM.fBounds.height() - srcM.fBounds.height() + 2;
+ SkIRect innerIR;
+
+ if (1 == count) {
+ innerIR = srcM.fBounds;
+ center.set(smallW, smallH);
+ } else {
+ SkASSERT(2 == count);
+ rects[1].roundIn(&innerIR);
+ center.set(smallW + (innerIR.left() - srcM.fBounds.left()),
+ smallH + (innerIR.top() - srcM.fBounds.top()));
+ }
+
+ // +1 so we get a clean, stretchable, center row/col
+ smallW += 1;
+ smallH += 1;
+
+ // we want the inset amounts to be integral, so we don't change any
+ // fractional phase on the fRight or fBottom of our smallR.
+ const SkScalar dx = SkIntToScalar(innerIR.width() - smallW);
+ const SkScalar dy = SkIntToScalar(innerIR.height() - smallH);
+ if (dx < 0 || dy < 0) {
+ // we're too small, relative to our blur, to break into nine-patch,
+ // so we ask to have our normal filterMask() be called.
+ return kUnimplemented_FilterReturn;
+ }
+
+ smallR[0].set(rects[0].left(), rects[0].top(), rects[0].right() - dx, rects[0].bottom() - dy);
+ if (smallR[0].width() < 2 || smallR[0].height() < 2) {
+ return kUnimplemented_FilterReturn;
+ }
+ if (2 == count) {
+ smallR[1].set(rects[1].left(), rects[1].top(),
+ rects[1].right() - dx, rects[1].bottom() - dy);
+ SkASSERT(!smallR[1].isEmpty());
+ }
+
+ const SkScalar sigma = this->computeXformedSigma(matrix);
+ SkCachedData* cache = find_cached_rects(&patch->fMask, sigma, fBlurStyle,
+ this->getQuality(), smallR, count);
+ if (!cache) {
+ if (count > 1 || !c_analyticBlurNinepatch) {
+ if (!draw_rects_into_mask(smallR, count, &srcM)) {
+ return kFalse_FilterReturn;
+ }
+
+ SkAutoMaskFreeImage amf(srcM.fImage);
+
+ if (!this->filterMask(&patch->fMask, srcM, matrix, &margin)) {
+ return kFalse_FilterReturn;
+ }
+ } else {
+ if (!this->filterRectMask(&patch->fMask, smallR[0], matrix, &margin,
+ SkMask::kComputeBoundsAndRenderImage_CreateMode)) {
+ return kFalse_FilterReturn;
+ }
+ }
+ cache = add_cached_rects(&patch->fMask, sigma, fBlurStyle, this->getQuality(), smallR, count);
+ }
+ patch->fMask.fBounds.offsetTo(0, 0);
+ patch->fOuterRect = dstM.fBounds;
+ patch->fCenter = center;
+ SkASSERT(nullptr == patch->fCache);
+ patch->fCache = cache; // transfer ownership to patch
+ return kTrue_FilterReturn;
+}
+
+void SkBlurMaskFilterImpl::computeFastBounds(const SkRect& src,
+ SkRect* dst) const {
+ SkScalar pad = 3.0f * fSigma;
+
+ dst->set(src.fLeft - pad, src.fTop - pad,
+ src.fRight + pad, src.fBottom + pad);
+}
+
+sk_sp<SkFlattenable> SkBlurMaskFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ const SkScalar sigma = buffer.readScalar();
+ const unsigned style = buffer.readUInt();
+ const unsigned flags = buffer.readUInt();
+
+ SkRect occluder;
+ if (buffer.isVersionLT(SkReadBuffer::kBlurMaskFilterWritesOccluder)) {
+ occluder.setEmpty();
+ } else {
+ buffer.readRect(&occluder);
+ }
+
+ if (style <= kLastEnum_SkBlurStyle) {
+ return SkBlurMaskFilter::Make((SkBlurStyle)style, sigma, occluder, flags);
+ }
+ return nullptr;
+}
+
+void SkBlurMaskFilterImpl::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeScalar(fSigma);
+ buffer.writeUInt(fBlurStyle);
+ buffer.writeUInt(fBlurFlags);
+ buffer.writeRect(fOccluder);
+}
+
+
+#if SK_SUPPORT_GPU
+
+class GrGLRectBlurEffect;
+
+class GrRectBlurEffect : public GrFragmentProcessor {
+public:
+ ~GrRectBlurEffect() override { }
+
+ const char* name() const override { return "RectBlur"; }
+
+ static sk_sp<GrFragmentProcessor> Make(GrTextureProvider *textureProvider,
+ const SkRect& rect, float sigma) {
+ int doubleProfileSize = SkScalarCeilToInt(12*sigma);
+
+ if (doubleProfileSize >= rect.width() || doubleProfileSize >= rect.height()) {
+ // if the blur sigma is too large so the gaussian overlaps the whole
+ // rect in either direction, fall back to CPU path for now.
+ return nullptr;
+ }
+
+ SkAutoTUnref<GrTexture> blurProfile(CreateBlurProfileTexture(textureProvider, sigma));
+ if (!blurProfile) {
+ return nullptr;
+ }
+ // in OpenGL ES, mediump floats have a minimum range of 2^14. If we have coordinates bigger
+ // than that, the shader math will end up with infinities and result in the blur effect not
+ // working correctly. To avoid this, we switch into highp when the coordinates are too big.
+ // As 2^14 is the minimum range but the actual range can be bigger, we might end up
+ // switching to highp sooner than strictly necessary, but most devices that have a bigger
+ // range for mediump also have mediump being exactly the same as highp (e.g. all non-OpenGL
+ // ES devices), and thus incur no additional penalty for the switch.
+ static const SkScalar kMAX_BLUR_COORD = SkIntToScalar(16000);
+ GrSLPrecision precision;
+ if (SkScalarAbs(rect.top()) > kMAX_BLUR_COORD ||
+ SkScalarAbs(rect.left()) > kMAX_BLUR_COORD ||
+ SkScalarAbs(rect.bottom()) > kMAX_BLUR_COORD ||
+ SkScalarAbs(rect.right()) > kMAX_BLUR_COORD ||
+ SkScalarAbs(rect.width()) > kMAX_BLUR_COORD ||
+ SkScalarAbs(rect.height()) > kMAX_BLUR_COORD) {
+ precision = kHigh_GrSLPrecision;
+ } else {
+ precision = kDefault_GrSLPrecision;
+ }
+ return sk_sp<GrFragmentProcessor>(
+ new GrRectBlurEffect(rect, sigma, blurProfile, precision));
+ }
+
+ const SkRect& getRect() const { return fRect; }
+ float getSigma() const { return fSigma; }
+ GrSLPrecision precision() const { return fPrecision; }
+
+private:
+ GrRectBlurEffect(const SkRect& rect, float sigma, GrTexture *blurProfile,
+ GrSLPrecision fPrecision);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ static GrTexture* CreateBlurProfileTexture(GrTextureProvider*, float sigma);
+
+ SkRect fRect;
+ float fSigma;
+ GrTextureAccess fBlurProfileAccess;
+ GrSLPrecision fPrecision;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+class GrGLRectBlurEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+ static void GenKey(const GrProcessor&, const GrGLSLCaps&, GrProcessorKeyBuilder* b);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+private:
+ typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;
+
+ UniformHandle fProxyRectUniform;
+ UniformHandle fProfileSizeUniform;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void OutputRectBlurProfileLookup(GrGLSLFPFragmentBuilder* fragBuilder,
+ GrGLSLFragmentProcessor::SamplerHandle sampler,
+ const char *output,
+ const char *profileSize, const char *loc,
+ const char *blurred_width,
+ const char *sharp_width) {
+ fragBuilder->codeAppendf("float %s;", output);
+ fragBuilder->codeAppendf("{");
+ fragBuilder->codeAppendf("float coord = ((abs(%s - 0.5 * %s) - 0.5 * %s)) / %s;",
+ loc, blurred_width, sharp_width, profileSize);
+ fragBuilder->codeAppendf("%s = ", output);
+ fragBuilder->appendTextureLookup(sampler, "vec2(coord,0.5)");
+ fragBuilder->codeAppend(".a;");
+ fragBuilder->codeAppendf("}");
+}
+
+
+void GrGLRectBlurEffect::GenKey(const GrProcessor& proc, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrRectBlurEffect& rbe = proc.cast<GrRectBlurEffect>();
+
+ b->add32(rbe.precision());
+}
+
+
+void GrGLRectBlurEffect::emitCode(EmitArgs& args) {
+ const GrRectBlurEffect& rbe = args.fFp.cast<GrRectBlurEffect>();
+
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ const char *rectName;
+ const char *profileSizeName;
+
+ SkString precisionString;
+ if (args.fGLSLCaps->usesPrecisionModifiers()) {
+ precisionString.printf("%s ", GrGLSLPrecisionString(rbe.precision()));
+ }
+ fProxyRectUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType,
+ rbe.precision(),
+ "proxyRect",
+ &rectName);
+ fProfileSizeUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType,
+ kDefault_GrSLPrecision,
+ "profileSize",
+ &profileSizeName);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const char *fragmentPos = fragBuilder->fragmentPosition();
+
+ if (args.fInputColor) {
+ fragBuilder->codeAppendf("vec4 src=%s;", args.fInputColor);
+ } else {
+ fragBuilder->codeAppendf("vec4 src=vec4(1);");
+ }
+
+ fragBuilder->codeAppendf("%s vec2 translatedPos = %s.xy - %s.xy;", precisionString.c_str(),
+ fragmentPos, rectName);
+ fragBuilder->codeAppendf("%s float width = %s.z - %s.x;", precisionString.c_str(), rectName,
+ rectName);
+ fragBuilder->codeAppendf("%s float height = %s.w - %s.y;", precisionString.c_str(), rectName,
+ rectName);
+
+ fragBuilder->codeAppendf("%s vec2 smallDims = vec2(width - %s, height - %s);",
+ precisionString.c_str(), profileSizeName, profileSizeName);
+ fragBuilder->codeAppendf("%s float center = 2.0 * floor(%s/2.0 + .25) - 1.0;",
+ precisionString.c_str(), profileSizeName);
+ fragBuilder->codeAppendf("%s vec2 wh = smallDims - vec2(center,center);",
+ precisionString.c_str());
+
+ OutputRectBlurProfileLookup(fragBuilder, args.fTexSamplers[0], "horiz_lookup", profileSizeName,
+ "translatedPos.x", "width", "wh.x");
+ OutputRectBlurProfileLookup(fragBuilder, args.fTexSamplers[0], "vert_lookup", profileSizeName,
+ "translatedPos.y", "height", "wh.y");
+
+ fragBuilder->codeAppendf("float final = horiz_lookup * vert_lookup;");
+ fragBuilder->codeAppendf("%s = src * final;", args.fOutputColor);
+}
+
+void GrGLRectBlurEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& proc) {
+ const GrRectBlurEffect& rbe = proc.cast<GrRectBlurEffect>();
+ SkRect rect = rbe.getRect();
+
+ pdman.set4f(fProxyRectUniform, rect.fLeft, rect.fTop, rect.fRight, rect.fBottom);
+ pdman.set1f(fProfileSizeUniform, SkScalarCeilToScalar(6*rbe.getSigma()));
+}
+
+GrTexture* GrRectBlurEffect::CreateBlurProfileTexture(GrTextureProvider* textureProvider,
+ float sigma) {
+ GrSurfaceDesc texDesc;
+
+ unsigned int profileSize = SkScalarCeilToInt(6*sigma);
+
+ texDesc.fWidth = profileSize;
+ texDesc.fHeight = 1;
+ texDesc.fConfig = kAlpha_8_GrPixelConfig;
+ texDesc.fIsMipMapped = false;
+
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey key;
+ GrUniqueKey::Builder builder(&key, kDomain, 1);
+ builder[0] = profileSize;
+ builder.finish();
+
+ GrTexture *blurProfile = textureProvider->findAndRefTextureByUniqueKey(key);
+
+ if (!blurProfile) {
+ SkAutoTDeleteArray<uint8_t> profile(SkBlurMask::ComputeBlurProfile(sigma));
+
+ blurProfile = textureProvider->createTexture(texDesc, SkBudgeted::kYes, profile.get(), 0);
+ if (blurProfile) {
+ textureProvider->assignUniqueKeyToTexture(key, blurProfile);
+ }
+ }
+
+ return blurProfile;
+}
+
+GrRectBlurEffect::GrRectBlurEffect(const SkRect& rect, float sigma, GrTexture *blurProfile,
+ GrSLPrecision precision)
+ : fRect(rect)
+ , fSigma(sigma)
+ , fBlurProfileAccess(blurProfile)
+ , fPrecision(precision) {
+ this->initClassID<GrRectBlurEffect>();
+ this->addTextureAccess(&fBlurProfileAccess);
+ this->setWillReadFragmentPosition();
+}
+
+void GrRectBlurEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLRectBlurEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrRectBlurEffect::onCreateGLSLInstance() const {
+ return new GrGLRectBlurEffect;
+}
+
+bool GrRectBlurEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrRectBlurEffect& s = sBase.cast<GrRectBlurEffect>();
+ return this->getSigma() == s.getSigma() && this->getRect() == s.getRect();
+}
+
+void GrRectBlurEffect::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ inout->mulByUnknownSingleComponent();
+}
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrRectBlurEffect);
+
+sk_sp<GrFragmentProcessor> GrRectBlurEffect::TestCreate(GrProcessorTestData* d) {
+ float sigma = d->fRandom->nextRangeF(3,8);
+ float width = d->fRandom->nextRangeF(200,300);
+ float height = d->fRandom->nextRangeF(200,300);
+ return GrRectBlurEffect::Make(d->fContext->textureProvider(), SkRect::MakeWH(width, height),
+ sigma);
+}
+
+
+bool SkBlurMaskFilterImpl::directFilterMaskGPU(GrTextureProvider* texProvider,
+ GrDrawContext* drawContext,
+ GrPaint* grp,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkStrokeRec& strokeRec,
+ const SkPath& path) const {
+ SkASSERT(drawContext);
+
+ if (fBlurStyle != kNormal_SkBlurStyle) {
+ return false;
+ }
+
+ // TODO: we could handle blurred stroked circles
+ if (!strokeRec.isFillStyle()) {
+ return false;
+ }
+
+ SkScalar xformedSigma = this->computeXformedSigma(viewMatrix);
+
+ sk_sp<GrFragmentProcessor> fp;
+
+ SkRect rect;
+ if (path.isRect(&rect)) {
+ SkScalar pad = 3.0f * xformedSigma;
+ rect.outset(pad, pad);
+
+ fp = GrRectBlurEffect::Make(texProvider, rect, xformedSigma);
+ } else if (path.isOval(&rect) && SkScalarNearlyEqual(rect.width(), rect.height())) {
+ fp = GrCircleBlurFragmentProcessor::Make(texProvider, rect, xformedSigma);
+
+ // expand the rect for the coverage geometry
+ int pad = SkScalarCeilToInt(6*xformedSigma)/2;
+ rect.outset(SkIntToScalar(pad), SkIntToScalar(pad));
+ } else {
+ return false;
+ }
+
+ if (!fp) {
+ return false;
+ }
+
+ grp->addCoverageFragmentProcessor(std::move(fp));
+
+ SkMatrix inverse;
+ if (!viewMatrix.invert(&inverse)) {
+ return false;
+ }
+
+ drawContext->fillRectWithLocalMatrix(clip, *grp, SkMatrix::I(), rect, inverse);
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GrRRectBlurEffect : public GrFragmentProcessor {
+public:
+
+ static sk_sp<GrFragmentProcessor> Make(GrContext*,
+ float sigma, float xformedSigma,
+ const SkRRect& srcRRect, const SkRRect& devRRect);
+
+ virtual ~GrRRectBlurEffect() {}
+ const char* name() const override { return "GrRRectBlur"; }
+
+ const SkRRect& getRRect() const { return fRRect; }
+ float getSigma() const { return fSigma; }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ GrRRectBlurEffect(float sigma, const SkRRect&, GrTexture* profileTexture);
+
+ virtual void onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const override;
+
+ bool onIsEqual(const GrFragmentProcessor& other) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ SkRRect fRRect;
+ float fSigma;
+ GrTextureAccess fNinePatchAccess;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+static sk_sp<GrTexture> find_or_create_rrect_blur_mask(GrContext* context,
+ const SkRRect& rrectToDraw,
+ const SkISize& size,
+ float xformedSigma,
+ bool doAA) {
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey key;
+ GrUniqueKey::Builder builder(&key, kDomain, 9);
+ builder[0] = SkScalarCeilToInt(xformedSigma-1/6.0f);
+
+ int index = 1;
+ for (auto c : { SkRRect::kUpperLeft_Corner, SkRRect::kUpperRight_Corner,
+ SkRRect::kLowerRight_Corner, SkRRect::kLowerLeft_Corner }) {
+ SkASSERT(SkScalarIsInt(rrectToDraw.radii(c).fX) && SkScalarIsInt(rrectToDraw.radii(c).fY));
+ builder[index++] = SkScalarCeilToInt(rrectToDraw.radii(c).fX);
+ builder[index++] = SkScalarCeilToInt(rrectToDraw.radii(c).fY);
+ }
+ builder.finish();
+
+ sk_sp<GrTexture> mask(context->textureProvider()->findAndRefTextureByUniqueKey(key));
+ if (!mask) {
+ // TODO: this could be approx but the texture coords will need to be updated
+ sk_sp<GrDrawContext> dc(context->makeDrawContextWithFallback(SkBackingFit::kExact,
+ size.fWidth, size.fHeight,
+ kAlpha_8_GrPixelConfig,
+ nullptr));
+ if (!dc) {
+ return nullptr;
+ }
+
+ GrPaint grPaint;
+ grPaint.setAntiAlias(doAA);
+
+ dc->clear(nullptr, 0x0, true);
+ dc->drawRRect(GrNoClip(), grPaint, SkMatrix::I(), rrectToDraw, GrStyle::SimpleFill());
+
+ sk_sp<GrTexture> srcTexture(dc->asTexture());
+ sk_sp<GrDrawContext> dc2(SkGpuBlurUtils::GaussianBlur(context,
+ srcTexture.get(),
+ nullptr,
+ SkIRect::MakeWH(size.fWidth,
+ size.fHeight),
+ nullptr,
+ xformedSigma, xformedSigma,
+ SkBackingFit::kExact));
+ if (!dc2) {
+ return nullptr;
+ }
+
+ mask = dc2->asTexture();
+ SkASSERT(mask);
+ context->textureProvider()->assignUniqueKeyToTexture(key, mask.get());
+ }
+
+ return mask;
+}
+
+sk_sp<GrFragmentProcessor> GrRRectBlurEffect::Make(GrContext* context,
+ float sigma, float xformedSigma,
+ const SkRRect& srcRRect, const SkRRect& devRRect) {
+ SkASSERT(!devRRect.isCircle() && !devRRect.isRect()); // Should've been caught up-stream
+
+ // TODO: loosen this up
+ if (!devRRect.isSimpleCircular()) {
+ return nullptr;
+ }
+
+ // Make sure we can successfully ninepatch this rrect -- the blur sigma has to be
+ // sufficiently small relative to both the size of the corner radius and the
+ // width (and height) of the rrect.
+ SkRRect rrectToDraw;
+ SkISize size;
+ SkScalar ignored[SkBlurMaskFilter::kMaxDivisions];
+ int ignoredSize;
+ uint32_t ignored32;
+
+ bool ninePatchable = SkBlurMaskFilter::ComputeBlurredRRectParams(srcRRect, devRRect,
+ SkRect::MakeEmpty(),
+ sigma, xformedSigma,
+ &rrectToDraw, &size,
+ ignored, ignored,
+ ignored, ignored,
+ &ignoredSize, &ignoredSize,
+ &ignored32);
+ if (!ninePatchable) {
+ return nullptr;
+ }
+
+ sk_sp<GrTexture> mask(find_or_create_rrect_blur_mask(context, rrectToDraw, size,
+ xformedSigma, true));
+ if (!mask) {
+ return nullptr;
+ }
+
+ return sk_sp<GrFragmentProcessor>(new GrRRectBlurEffect(xformedSigma,
+ devRRect,
+ mask.get()));
+}
+
+void GrRRectBlurEffect::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ inout->mulByUnknownSingleComponent();
+}
+
+GrRRectBlurEffect::GrRRectBlurEffect(float sigma, const SkRRect& rrect, GrTexture *ninePatchTexture)
+ : fRRect(rrect),
+ fSigma(sigma),
+ fNinePatchAccess(ninePatchTexture) {
+ this->initClassID<GrRRectBlurEffect>();
+ this->addTextureAccess(&fNinePatchAccess);
+ this->setWillReadFragmentPosition();
+}
+
+bool GrRRectBlurEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrRRectBlurEffect& rrbe = other.cast<GrRRectBlurEffect>();
+ return fRRect.getSimpleRadii().fX == rrbe.fRRect.getSimpleRadii().fX &&
+ fSigma == rrbe.fSigma &&
+ fRRect.rect() == rrbe.fRRect.rect();
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrRRectBlurEffect);
+
+sk_sp<GrFragmentProcessor> GrRRectBlurEffect::TestCreate(GrProcessorTestData* d) {
+ SkScalar w = d->fRandom->nextRangeScalar(100.f, 1000.f);
+ SkScalar h = d->fRandom->nextRangeScalar(100.f, 1000.f);
+ SkScalar r = d->fRandom->nextRangeF(1.f, 9.f);
+ SkScalar sigma = d->fRandom->nextRangeF(1.f,10.f);
+ SkRRect rrect;
+ rrect.setRectXY(SkRect::MakeWH(w, h), r, r);
+ return GrRRectBlurEffect::Make(d->fContext, sigma, sigma, rrect, rrect);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GrGLRRectBlurEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fProxyRectUniform;
+ GrGLSLProgramDataManager::UniformHandle fCornerRadiusUniform;
+ GrGLSLProgramDataManager::UniformHandle fBlurRadiusUniform;
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GrGLRRectBlurEffect::emitCode(EmitArgs& args) {
+ const char *rectName;
+ const char *cornerRadiusName;
+ const char *blurRadiusName;
+
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ // The proxy rect has left, top, right, and bottom edges correspond to
+ // components x, y, z, and w, respectively.
+
+ fProxyRectUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType,
+ kDefault_GrSLPrecision,
+ "proxyRect",
+ &rectName);
+ fCornerRadiusUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType,
+ kDefault_GrSLPrecision,
+ "cornerRadius",
+ &cornerRadiusName);
+ fBlurRadiusUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType,
+ kDefault_GrSLPrecision,
+ "blurRadius",
+ &blurRadiusName);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const char* fragmentPos = fragBuilder->fragmentPosition();
+
+ // warp the fragment position to the appropriate part of the 9patch blur texture
+
+ fragBuilder->codeAppendf("vec2 rectCenter = (%s.xy + %s.zw)/2.0;", rectName, rectName);
+ fragBuilder->codeAppendf("vec2 translatedFragPos = %s.xy - %s.xy;", fragmentPos, rectName);
+ fragBuilder->codeAppendf("float threshold = %s + 2.0*%s;", cornerRadiusName, blurRadiusName);
+ fragBuilder->codeAppendf("vec2 middle = %s.zw - %s.xy - 2.0*threshold;", rectName, rectName);
+
+ fragBuilder->codeAppendf(
+ "if (translatedFragPos.x >= threshold && translatedFragPos.x < (middle.x+threshold)) {");
+ fragBuilder->codeAppendf("translatedFragPos.x = threshold;\n");
+ fragBuilder->codeAppendf("} else if (translatedFragPos.x >= (middle.x + threshold)) {");
+ fragBuilder->codeAppendf("translatedFragPos.x -= middle.x - 1.0;");
+ fragBuilder->codeAppendf("}");
+
+ fragBuilder->codeAppendf(
+ "if (translatedFragPos.y > threshold && translatedFragPos.y < (middle.y+threshold)) {");
+ fragBuilder->codeAppendf("translatedFragPos.y = threshold;");
+ fragBuilder->codeAppendf("} else if (translatedFragPos.y >= (middle.y + threshold)) {");
+ fragBuilder->codeAppendf("translatedFragPos.y -= middle.y - 1.0;");
+ fragBuilder->codeAppendf("}");
+
+ fragBuilder->codeAppendf("vec2 proxyDims = vec2(2.0*threshold+1.0);");
+ fragBuilder->codeAppendf("vec2 texCoord = translatedFragPos / proxyDims;");
+
+ fragBuilder->codeAppendf("%s = ", args.fOutputColor);
+ fragBuilder->appendTextureLookupAndModulate(args.fInputColor, args.fTexSamplers[0], "texCoord");
+ fragBuilder->codeAppend(";");
+}
+
+void GrGLRRectBlurEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& proc) {
+ const GrRRectBlurEffect& brre = proc.cast<GrRRectBlurEffect>();
+ const SkRRect& rrect = brre.getRRect();
+
+ float blurRadius = 3.f*SkScalarCeilToScalar(brre.getSigma()-1/6.0f);
+ pdman.set1f(fBlurRadiusUniform, blurRadius);
+
+ SkRect rect = rrect.getBounds();
+ rect.outset(blurRadius, blurRadius);
+ pdman.set4f(fProxyRectUniform, rect.fLeft, rect.fTop, rect.fRight, rect.fBottom);
+
+ SkScalar radius = 0;
+ SkASSERT(rrect.isSimpleCircular() || rrect.isRect());
+ radius = rrect.getSimpleRadii().fX;
+ pdman.set1f(fCornerRadiusUniform, radius);
+}
+
+void GrRRectBlurEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLRRectBlurEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrRRectBlurEffect::onCreateGLSLInstance() const {
+ return new GrGLRRectBlurEffect;
+}
+
+bool SkBlurMaskFilterImpl::directFilterRRectMaskGPU(GrContext* context,
+ GrDrawContext* drawContext,
+ GrPaint* grp,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkStrokeRec& strokeRec,
+ const SkRRect& srcRRect,
+ const SkRRect& devRRect) const {
+ SkASSERT(drawContext);
+
+ if (fBlurStyle != kNormal_SkBlurStyle) {
+ return false;
+ }
+
+ if (!strokeRec.isFillStyle()) {
+ return false;
+ }
+
+ SkScalar xformedSigma = this->computeXformedSigma(viewMatrix);
+
+ if (devRRect.isRect() || devRRect.isCircle()) {
+ if (this->ignoreXform()) {
+ return false;
+ }
+
+ sk_sp<GrFragmentProcessor> fp;
+ if (devRRect.isRect()) {
+ SkScalar pad = 3.0f * xformedSigma;
+ const SkRect dstCoverageRect = devRRect.rect().makeOutset(pad, pad);
+
+ fp = GrRectBlurEffect::Make(context->textureProvider(), dstCoverageRect, xformedSigma);
+ } else {
+ fp = GrCircleBlurFragmentProcessor::Make(context->textureProvider(),
+ devRRect.rect(),
+ xformedSigma);
+ }
+
+ if (!fp) {
+ return false;
+ }
+
+ GrPaint newPaint(*grp);
+ newPaint.addCoverageFragmentProcessor(std::move(fp));
+ newPaint.setAntiAlias(false);
+
+ SkRect srcProxyRect = srcRRect.rect();
+ srcProxyRect.outset(3.0f*fSigma, 3.0f*fSigma);
+
+ drawContext->drawRect(clip, newPaint, viewMatrix, srcProxyRect);
+ return true;
+ }
+
+ sk_sp<GrFragmentProcessor> fp(GrRRectBlurEffect::Make(context, fSigma, xformedSigma,
+ srcRRect, devRRect));
+ if (!fp) {
+ return false;
+ }
+
+ GrPaint newPaint(*grp);
+ newPaint.addCoverageFragmentProcessor(std::move(fp));
+ newPaint.setAntiAlias(false);
+
+ if (!this->ignoreXform()) {
+ SkRect srcProxyRect = srcRRect.rect();
+ srcProxyRect.outset(3.0f*fSigma, 3.0f*fSigma);
+
+ SkPoint points[8];
+ uint16_t indices[24];
+ int numPoints, numIndices;
+
+ SkRect temp = fOccluder;
+
+ if (!temp.isEmpty() && (srcProxyRect.contains(temp) || temp.intersect(srcProxyRect))) {
+ srcProxyRect.toQuad(points);
+ temp.toQuad(&points[4]);
+ numPoints = 8;
+
+ static const uint16_t ringI[24] = { 0, 1, 5, 5, 4, 0,
+ 1, 2, 6, 6, 5, 1,
+ 2, 3, 7, 7, 6, 2,
+ 3, 0, 4, 4, 7, 3 };
+ memcpy(indices, ringI, sizeof(ringI));
+ numIndices = 24;
+ } else {
+ // full rect case
+ srcProxyRect.toQuad(points);
+ numPoints = 4;
+
+ static const uint16_t fullI[6] = { 0, 1, 2, 0, 2, 3 };
+ memcpy(indices, fullI, sizeof(fullI));
+ numIndices = 6;
+ }
+
+ drawContext->drawVertices(clip, newPaint, viewMatrix, kTriangles_GrPrimitiveType,
+ numPoints, points, nullptr, nullptr, indices, numIndices);
+
+ } else {
+ SkMatrix inverse;
+ if (!viewMatrix.invert(&inverse)) {
+ return false;
+ }
+
+ float extra=3.f*SkScalarCeilToScalar(xformedSigma-1/6.0f);
+ SkRect proxyRect = devRRect.rect();
+ proxyRect.outset(extra, extra);
+
+
+ drawContext->fillRectWithLocalMatrix(clip, newPaint, SkMatrix::I(), proxyRect, inverse);
+ }
+
+ return true;
+}
+
+bool SkBlurMaskFilterImpl::canFilterMaskGPU(const SkRRect& devRRect,
+ const SkIRect& clipBounds,
+ const SkMatrix& ctm,
+ SkRect* maskRect) const {
+ SkScalar xformedSigma = this->computeXformedSigma(ctm);
+ if (xformedSigma <= 0) {
+ return false;
+ }
+
+ // We always do circles and simple circular rrects on the GPU
+ if (!devRRect.isCircle() && !devRRect.isSimpleCircular()) {
+ static const SkScalar kMIN_GPU_BLUR_SIZE = SkIntToScalar(64);
+ static const SkScalar kMIN_GPU_BLUR_SIGMA = SkIntToScalar(32);
+
+ if (devRRect.width() <= kMIN_GPU_BLUR_SIZE &&
+ devRRect.height() <= kMIN_GPU_BLUR_SIZE &&
+ xformedSigma <= kMIN_GPU_BLUR_SIGMA) {
+ // We prefer to blur small rects with small radii on the CPU.
+ return false;
+ }
+ }
+
+ if (nullptr == maskRect) {
+ // don't need to compute maskRect
+ return true;
+ }
+
+ float sigma3 = 3 * SkScalarToFloat(xformedSigma);
+
+ SkRect clipRect = SkRect::Make(clipBounds);
+ SkRect srcRect(devRRect.rect());
+
+ // Outset srcRect and clipRect by 3 * sigma, to compute affected blur area.
+ srcRect.outset(sigma3, sigma3);
+ clipRect.outset(sigma3, sigma3);
+ if (!srcRect.intersect(clipRect)) {
+ srcRect.setEmpty();
+ }
+ *maskRect = srcRect;
+ return true;
+}
+
+bool SkBlurMaskFilterImpl::filterMaskGPU(GrTexture* src,
+ const SkMatrix& ctm,
+ const SkIRect& maskRect,
+ GrTexture** result) const {
+ // 'maskRect' isn't snapped to the UL corner but the mask in 'src' is.
+ const SkIRect clipRect = SkIRect::MakeWH(maskRect.width(), maskRect.height());
+
+ GrContext* context = src->getContext();
+
+ SkScalar xformedSigma = this->computeXformedSigma(ctm);
+ SkASSERT(xformedSigma > 0);
+
+ // If we're doing a normal blur, we can clobber the pathTexture in the
+ // gaussianBlur. Otherwise, we need to save it for later compositing.
+ bool isNormalBlur = (kNormal_SkBlurStyle == fBlurStyle);
+ sk_sp<GrDrawContext> drawContext(SkGpuBlurUtils::GaussianBlur(context, src, nullptr,
+ clipRect, nullptr,
+ xformedSigma, xformedSigma));
+ if (!drawContext) {
+ return false;
+ }
+
+ if (!isNormalBlur) {
+ GrPaint paint;
+ SkMatrix matrix;
+ matrix.setIDiv(src->width(), src->height());
+ // Blend pathTexture over blurTexture.
+ paint.addCoverageFragmentProcessor(GrSimpleTextureEffect::Make(src, nullptr, matrix));
+ if (kInner_SkBlurStyle == fBlurStyle) {
+ // inner: dst = dst * src
+ paint.setCoverageSetOpXPFactory(SkRegion::kIntersect_Op);
+ } else if (kSolid_SkBlurStyle == fBlurStyle) {
+ // solid: dst = src + dst - src * dst
+ // = src + (1 - src) * dst
+ paint.setCoverageSetOpXPFactory(SkRegion::kUnion_Op);
+ } else if (kOuter_SkBlurStyle == fBlurStyle) {
+ // outer: dst = dst * (1 - src)
+ // = 0 * src + (1 - src) * dst
+ paint.setCoverageSetOpXPFactory(SkRegion::kDifference_Op);
+ } else {
+ paint.setCoverageSetOpXPFactory(SkRegion::kReplace_Op);
+ }
+
+ drawContext->drawRect(GrNoClip(), paint, SkMatrix::I(), SkRect::Make(clipRect));
+ }
+
+ *result = drawContext->asTexture().release();
+ return true;
+}
+
+#endif // SK_SUPPORT_GPU
+
+
+#ifndef SK_IGNORE_TO_STRING
+void SkBlurMaskFilterImpl::toString(SkString* str) const {
+ str->append("SkBlurMaskFilterImpl: (");
+
+ str->append("sigma: ");
+ str->appendScalar(fSigma);
+ str->append(" ");
+
+ static const char* gStyleName[kLastEnum_SkBlurStyle + 1] = {
+ "normal", "solid", "outer", "inner"
+ };
+
+ str->appendf("style: %s ", gStyleName[fBlurStyle]);
+ str->append("flags: (");
+ if (fBlurFlags) {
+ bool needSeparator = false;
+ SkAddFlagToString(str, this->ignoreXform(), "IgnoreXform", &needSeparator);
+ SkAddFlagToString(str,
+ SkToBool(fBlurFlags & SkBlurMaskFilter::kHighQuality_BlurFlag),
+ "HighQuality", &needSeparator);
+ } else {
+ str->append("None");
+ }
+ str->append("))");
+}
+#endif
+
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkBlurMaskFilter)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkBlurMaskFilterImpl)
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END
diff --git a/gfx/skia/skia/src/effects/SkColorCubeFilter.cpp b/gfx/skia/skia/src/effects/SkColorCubeFilter.cpp
new file mode 100644
index 000000000..3eb70d32d
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkColorCubeFilter.cpp
@@ -0,0 +1,329 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkColorCubeFilter.h"
+#include "SkColorPriv.h"
+#include "SkOnce.h"
+#include "SkOpts.h"
+#include "SkReadBuffer.h"
+#include "SkUnPreMultiply.h"
+#include "SkWriteBuffer.h"
+#if SK_SUPPORT_GPU
+#include "GrContext.h"
+#include "GrCoordTransform.h"
+#include "GrInvariantOutput.h"
+#include "GrTexturePriv.h"
+#include "SkGr.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+namespace {
+
+int32_t SkNextColorCubeUniqueID() {
+ static int32_t gColorCubeUniqueID;
+ // do a loop in case our global wraps around, as we never want to return a 0
+ int32_t genID;
+ do {
+ genID = sk_atomic_inc(&gColorCubeUniqueID) + 1;
+ } while (0 == genID);
+ return genID;
+}
+
+} // end namespace
+
+static const int MIN_CUBE_SIZE = 4;
+static const int MAX_CUBE_SIZE = 64;
+
+static bool is_valid_3D_lut(SkData* cubeData, int cubeDimension) {
+ size_t minMemorySize = sizeof(uint8_t) * 4 * cubeDimension * cubeDimension * cubeDimension;
+ return (cubeDimension >= MIN_CUBE_SIZE) && (cubeDimension <= MAX_CUBE_SIZE) &&
+ (nullptr != cubeData) && (cubeData->size() >= minMemorySize);
+}
+
+sk_sp<SkColorFilter> SkColorCubeFilter::Make(sk_sp<SkData> cubeData, int cubeDimension) {
+ if (!is_valid_3D_lut(cubeData.get(), cubeDimension)) {
+ return nullptr;
+ }
+
+ return sk_sp<SkColorFilter>(new SkColorCubeFilter(std::move(cubeData), cubeDimension));
+}
+
+SkColorCubeFilter::SkColorCubeFilter(sk_sp<SkData> cubeData, int cubeDimension)
+ : fCubeData(std::move(cubeData))
+ , fUniqueID(SkNextColorCubeUniqueID())
+ , fCache(cubeDimension)
+{}
+
+uint32_t SkColorCubeFilter::getFlags() const {
+ return this->INHERITED::getFlags() | kAlphaUnchanged_Flag;
+}
+
+SkColorCubeFilter::ColorCubeProcesingCache::ColorCubeProcesingCache(int cubeDimension)
+ : fCubeDimension(cubeDimension) {
+ fColorToIndex[0] = fColorToIndex[1] = nullptr;
+ fColorToFactors[0] = fColorToFactors[1] = nullptr;
+ fColorToScalar = nullptr;
+}
+
+void SkColorCubeFilter::ColorCubeProcesingCache::getProcessingLuts(
+ const int* (*colorToIndex)[2], const SkScalar* (*colorToFactors)[2],
+ const SkScalar** colorToScalar) {
+ fLutsInitOnce(SkColorCubeFilter::ColorCubeProcesingCache::initProcessingLuts, this);
+
+ SkASSERT((fColorToIndex[0] != nullptr) &&
+ (fColorToIndex[1] != nullptr) &&
+ (fColorToFactors[0] != nullptr) &&
+ (fColorToFactors[1] != nullptr) &&
+ (fColorToScalar != nullptr));
+ (*colorToIndex)[0] = fColorToIndex[0];
+ (*colorToIndex)[1] = fColorToIndex[1];
+ (*colorToFactors)[0] = fColorToFactors[0];
+ (*colorToFactors)[1] = fColorToFactors[1];
+ (*colorToScalar) = fColorToScalar;
+}
+
+void SkColorCubeFilter::ColorCubeProcesingCache::initProcessingLuts(
+ SkColorCubeFilter::ColorCubeProcesingCache* cache) {
+ static const SkScalar inv8bit = SkScalarInvert(SkIntToScalar(255));
+
+ // We need 256 int * 2 for fColorToIndex, so a total of 512 int.
+ // We need 256 SkScalar * 2 for fColorToFactors and 256 SkScalar
+ // for fColorToScalar, so a total of 768 SkScalar.
+ cache->fLutStorage.reset(512 * sizeof(int) + 768 * sizeof(SkScalar));
+ uint8_t* storage = cache->fLutStorage.get();
+ cache->fColorToIndex[0] = (int*)storage;
+ cache->fColorToIndex[1] = cache->fColorToIndex[0] + 256;
+ cache->fColorToFactors[0] = (SkScalar*)(storage + (512 * sizeof(int)));
+ cache->fColorToFactors[1] = cache->fColorToFactors[0] + 256;
+ cache->fColorToScalar = cache->fColorToFactors[1] + 256;
+
+ SkScalar size = SkIntToScalar(cache->fCubeDimension);
+ SkScalar scale = (size - SK_Scalar1) * inv8bit;
+
+ for (int i = 0; i < 256; ++i) {
+ SkScalar index = scale * i;
+ cache->fColorToIndex[0][i] = SkScalarFloorToInt(index);
+ cache->fColorToIndex[1][i] = cache->fColorToIndex[0][i] + 1;
+ cache->fColorToScalar[i] = inv8bit * i;
+ if (cache->fColorToIndex[1][i] < cache->fCubeDimension) {
+ cache->fColorToFactors[1][i] = index - SkIntToScalar(cache->fColorToIndex[0][i]);
+ cache->fColorToFactors[0][i] = SK_Scalar1 - cache->fColorToFactors[1][i];
+ } else {
+ cache->fColorToIndex[1][i] = cache->fColorToIndex[0][i];
+ cache->fColorToFactors[0][i] = SK_Scalar1;
+ cache->fColorToFactors[1][i] = 0;
+ }
+ }
+}
+
+void SkColorCubeFilter::filterSpan(const SkPMColor src[], int count, SkPMColor dst[]) const {
+ const int* colorToIndex[2];
+ const SkScalar* colorToFactors[2];
+ const SkScalar* colorToScalar;
+ fCache.getProcessingLuts(&colorToIndex, &colorToFactors, &colorToScalar);
+
+ SkOpts::color_cube_filter_span(src, count, dst, colorToIndex,
+ colorToFactors, fCache.cubeDimension(),
+ (const SkColor*)fCubeData->data());
+}
+
+sk_sp<SkFlattenable> SkColorCubeFilter::CreateProc(SkReadBuffer& buffer) {
+ int cubeDimension = buffer.readInt();
+ auto cubeData(buffer.readByteArrayAsData());
+ if (!buffer.validate(is_valid_3D_lut(cubeData.get(), cubeDimension))) {
+ return nullptr;
+ }
+ return Make(std::move(cubeData), cubeDimension);
+}
+
+void SkColorCubeFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeInt(fCache.cubeDimension());
+ buffer.writeDataAsByteArray(fCubeData.get());
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkColorCubeFilter::toString(SkString* str) const {
+ str->append("SkColorCubeFilter ");
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+#if SK_SUPPORT_GPU
+
+class GrColorCubeEffect : public GrFragmentProcessor {
+public:
+ static sk_sp<GrFragmentProcessor> Make(GrTexture* colorCube) {
+ return (nullptr != colorCube) ? sk_sp<GrFragmentProcessor>(new GrColorCubeEffect(colorCube))
+ : nullptr;
+ }
+
+ virtual ~GrColorCubeEffect();
+
+ const char* name() const override { return "ColorCube"; }
+
+ int colorCubeSize() const { return fColorCubeAccess.getTexture()->width(); }
+
+
+ void onComputeInvariantOutput(GrInvariantOutput*) const override;
+
+ class GLSLProcessor : public GrGLSLFragmentProcessor {
+ public:
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrGLSLCaps&, GrProcessorKeyBuilder*);
+
+ protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+ private:
+ GrGLSLProgramDataManager::UniformHandle fColorCubeSizeUni;
+ GrGLSLProgramDataManager::UniformHandle fColorCubeInvSizeUni;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+ };
+
+private:
+ virtual void onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const override;
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override { return true; }
+
+ GrColorCubeEffect(GrTexture* colorCube);
+
+ GrTextureAccess fColorCubeAccess;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrColorCubeEffect::GrColorCubeEffect(GrTexture* colorCube)
+ : fColorCubeAccess(colorCube, GrTextureParams::kBilerp_FilterMode) {
+ this->initClassID<GrColorCubeEffect>();
+ this->addTextureAccess(&fColorCubeAccess);
+}
+
+GrColorCubeEffect::~GrColorCubeEffect() {
+}
+
+void GrColorCubeEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GLSLProcessor::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrColorCubeEffect::onCreateGLSLInstance() const {
+ return new GLSLProcessor;
+}
+
+void GrColorCubeEffect::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ inout->setToUnknown(GrInvariantOutput::kWill_ReadInput);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrColorCubeEffect::GLSLProcessor::emitCode(EmitArgs& args) {
+ if (nullptr == args.fInputColor) {
+ args.fInputColor = "vec4(1)";
+ }
+
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ fColorCubeSizeUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType, kDefault_GrSLPrecision,
+ "Size");
+ const char* colorCubeSizeUni = uniformHandler->getUniformCStr(fColorCubeSizeUni);
+ fColorCubeInvSizeUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType, kDefault_GrSLPrecision,
+ "InvSize");
+ const char* colorCubeInvSizeUni = uniformHandler->getUniformCStr(fColorCubeInvSizeUni);
+
+ const char* nonZeroAlpha = "nonZeroAlpha";
+ const char* unPMColor = "unPMColor";
+ const char* cubeIdx = "cubeIdx";
+ const char* cCoords1 = "cCoords1";
+ const char* cCoords2 = "cCoords2";
+
+ // Note: if implemented using texture3D in OpenGL ES older than OpenGL ES 3.0,
+ // the shader might need "#extension GL_OES_texture_3D : enable".
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ // Unpremultiply color
+ fragBuilder->codeAppendf("\tfloat %s = max(%s.a, 0.00001);\n", nonZeroAlpha, args.fInputColor);
+ fragBuilder->codeAppendf("\tvec4 %s = vec4(%s.rgb / %s, %s);\n",
+ unPMColor, args.fInputColor, nonZeroAlpha, nonZeroAlpha);
+
+ // Fit input color into the cube.
+ fragBuilder->codeAppendf(
+ "vec3 %s = vec3(%s.rg * vec2((%s - 1.0) * %s) + vec2(0.5 * %s), %s.b * (%s - 1.0));\n",
+ cubeIdx, unPMColor, colorCubeSizeUni, colorCubeInvSizeUni, colorCubeInvSizeUni,
+ unPMColor, colorCubeSizeUni);
+
+ // Compute y coord for for texture fetches.
+ fragBuilder->codeAppendf("vec2 %s = vec2(%s.r, (floor(%s.b) + %s.g) * %s);\n",
+ cCoords1, cubeIdx, cubeIdx, cubeIdx, colorCubeInvSizeUni);
+ fragBuilder->codeAppendf("vec2 %s = vec2(%s.r, (ceil(%s.b) + %s.g) * %s);\n",
+ cCoords2, cubeIdx, cubeIdx, cubeIdx, colorCubeInvSizeUni);
+
+ // Apply the cube.
+ fragBuilder->codeAppendf("%s = vec4(mix(", args.fOutputColor);
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], cCoords1);
+ fragBuilder->codeAppend(".bgr, ");
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], cCoords2);
+
+ // Premultiply color by alpha. Note that the input alpha is not modified by this shader.
+ fragBuilder->codeAppendf(".bgr, fract(%s.b)) * vec3(%s), %s.a);\n",
+ cubeIdx, nonZeroAlpha, args.fInputColor);
+}
+
+void GrColorCubeEffect::GLSLProcessor::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& proc) {
+ const GrColorCubeEffect& colorCube = proc.cast<GrColorCubeEffect>();
+ SkScalar size = SkIntToScalar(colorCube.colorCubeSize());
+ pdman.set1f(fColorCubeSizeUni, SkScalarToFloat(size));
+ pdman.set1f(fColorCubeInvSizeUni, SkScalarToFloat(SkScalarInvert(size)));
+}
+
+void GrColorCubeEffect::GLSLProcessor::GenKey(const GrProcessor& proc,
+ const GrGLSLCaps&, GrProcessorKeyBuilder* b) {
+}
+
+sk_sp<GrFragmentProcessor> SkColorCubeFilter::asFragmentProcessor(GrContext* context) const {
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey key;
+ GrUniqueKey::Builder builder(&key, kDomain, 2);
+ builder[0] = fUniqueID;
+ builder[1] = fCache.cubeDimension();
+ builder.finish();
+
+ GrSurfaceDesc desc;
+ desc.fWidth = fCache.cubeDimension();
+ desc.fHeight = fCache.cubeDimension() * fCache.cubeDimension();
+ desc.fConfig = kRGBA_8888_GrPixelConfig;
+ desc.fIsMipMapped = false;
+
+ SkAutoTUnref<GrTexture> textureCube(
+ context->textureProvider()->findAndRefTextureByUniqueKey(key));
+ if (!textureCube) {
+ textureCube.reset(context->textureProvider()->createTexture(
+ desc, SkBudgeted::kYes, fCubeData->data(), 0));
+ if (textureCube) {
+ context->textureProvider()->assignUniqueKeyToTexture(key, textureCube);
+ } else {
+ return nullptr;
+ }
+ }
+
+ return sk_sp<GrFragmentProcessor>(GrColorCubeEffect::Make(textureCube));
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkColorFilterImageFilter.cpp b/gfx/skia/skia/src/effects/SkColorFilterImageFilter.cpp
new file mode 100644
index 000000000..507a80580
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkColorFilterImageFilter.cpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkColorFilterImageFilter.h"
+
+#include "SkCanvas.h"
+#include "SkColorFilter.h"
+#include "SkReadBuffer.h"
+#include "SkSpecialImage.h"
+#include "SkSpecialSurface.h"
+#include "SkWriteBuffer.h"
+
+sk_sp<SkImageFilter> SkColorFilterImageFilter::Make(sk_sp<SkColorFilter> cf,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect) {
+ if (!cf) {
+ return nullptr;
+ }
+
+ SkColorFilter* inputCF;
+ if (input && input->isColorFilterNode(&inputCF)) {
+ // This is an optimization, as it collapses the hierarchy by just combining the two
+ // colorfilters into a single one, which the new imagefilter will wrap.
+ sk_sp<SkColorFilter> newCF(SkColorFilter::MakeComposeFilter(cf,// can't move bc of fallthru
+ sk_sp<SkColorFilter>(inputCF)));
+ if (newCF) {
+ return sk_sp<SkImageFilter>(new SkColorFilterImageFilter(std::move(newCF),
+ sk_ref_sp(input->getInput(0)),
+ cropRect));
+ }
+ }
+
+ return sk_sp<SkImageFilter>(new SkColorFilterImageFilter(std::move(cf),
+ std::move(input),
+ cropRect));
+}
+
+SkColorFilterImageFilter::SkColorFilterImageFilter(sk_sp<SkColorFilter> cf,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fColorFilter(std::move(cf)) {
+}
+
+sk_sp<SkFlattenable> SkColorFilterImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ sk_sp<SkColorFilter> cf(buffer.readColorFilter());
+ return Make(std::move(cf), common.getInput(0), &common.cropRect());
+}
+
+void SkColorFilterImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeFlattenable(fColorFilter.get());
+}
+
+sk_sp<SkSpecialImage> SkColorFilterImageFilter::onFilterImage(SkSpecialImage* source,
+ const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, source, ctx, &inputOffset));
+
+ SkIRect inputBounds;
+ if (fColorFilter->affectsTransparentBlack()) {
+ // If the color filter affects transparent black, the bounds are the entire clip.
+ inputBounds = ctx.clipBounds();
+ } else if (!input) {
+ return nullptr;
+ } else {
+ inputBounds = SkIRect::MakeXYWH(inputOffset.x(), inputOffset.y(),
+ input->width(), input->height());
+ }
+
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, inputBounds, &bounds)) {
+ return nullptr;
+ }
+
+ sk_sp<SkSpecialSurface> surf(source->makeSurface(ctx.outputProperties(), bounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ SkPaint paint;
+
+ paint.setBlendMode(SkBlendMode::kSrc);
+ paint.setColorFilter(fColorFilter);
+
+ // TODO: it may not be necessary to clear or drawPaint inside the input bounds
+ // (see skbug.com/5075)
+ if (fColorFilter->affectsTransparentBlack()) {
+ // The subsequent input->draw() call may not fill the entire canvas. For filters which
+ // affect transparent black, ensure that the filter is applied everywhere.
+ paint.setColor(SK_ColorTRANSPARENT);
+ canvas->drawPaint(paint);
+ paint.setColor(SK_ColorBLACK);
+ } else {
+ canvas->clear(0x0);
+ }
+
+ if (input) {
+ input->draw(canvas,
+ SkIntToScalar(inputOffset.fX - bounds.fLeft),
+ SkIntToScalar(inputOffset.fY - bounds.fTop),
+ &paint);
+ }
+
+ offset->fX = bounds.fLeft;
+ offset->fY = bounds.fTop;
+ return surf->makeImageSnapshot();
+}
+
+bool SkColorFilterImageFilter::onIsColorFilterNode(SkColorFilter** filter) const {
+ SkASSERT(1 == this->countInputs());
+ if (!this->cropRectIsSet()) {
+ if (filter) {
+ *filter = SkRef(fColorFilter.get());
+ }
+ return true;
+ }
+ return false;
+}
+
+bool SkColorFilterImageFilter::affectsTransparentBlack() const {
+ return fColorFilter->affectsTransparentBlack();
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkColorFilterImageFilter::toString(SkString* str) const {
+ str->appendf("SkColorFilterImageFilter: (");
+
+ str->appendf("input: (");
+
+ if (this->getInput(0)) {
+ this->getInput(0)->toString(str);
+ }
+
+ str->appendf(") color filter: ");
+ fColorFilter->toString(str);
+
+ str->append(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkColorMatrix.cpp b/gfx/skia/skia/src/effects/SkColorMatrix.cpp
new file mode 100644
index 000000000..45cd50200
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkColorMatrix.cpp
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkColorMatrix.h"
+
+// To detect if we need to apply clamping after applying a matrix, we check if
+// any output component might go outside of [0, 255] for any combination of
+// input components in [0..255].
+// Each output component is an affine transformation of the input component, so
+// the minimum and maximum values are for any combination of minimum or maximum
+// values of input components (i.e. 0 or 255).
+// E.g. if R' = x*R + y*G + z*B + w*A + t
+// Then the maximum value will be for R=255 if x>0 or R=0 if x<0, and the
+// minimum value will be for R=0 if x>0 or R=255 if x<0.
+// Same goes for all components.
+static bool component_needs_clamping(const SkScalar row[5]) {
+ SkScalar maxValue = row[4] / 255;
+ SkScalar minValue = row[4] / 255;
+ for (int i = 0; i < 4; ++i) {
+ if (row[i] > 0)
+ maxValue += row[i];
+ else
+ minValue += row[i];
+ }
+ return (maxValue > 1) || (minValue < 0);
+}
+
+bool SkColorMatrix::NeedsClamping(const SkScalar matrix[20]) {
+ return component_needs_clamping(matrix)
+ || component_needs_clamping(matrix+5)
+ || component_needs_clamping(matrix+10)
+ || component_needs_clamping(matrix+15);
+}
+
+void SkColorMatrix::SetConcat(SkScalar result[20],
+ const SkScalar outer[20], const SkScalar inner[20]) {
+ SkScalar tmp[20];
+ SkScalar* target;
+
+ if (outer == result || inner == result) {
+ target = tmp; // will memcpy answer when we're done into result
+ } else {
+ target = result;
+ }
+
+ int index = 0;
+ for (int j = 0; j < 20; j += 5) {
+ for (int i = 0; i < 4; i++) {
+ target[index++] = outer[j + 0] * inner[i + 0] +
+ outer[j + 1] * inner[i + 5] +
+ outer[j + 2] * inner[i + 10] +
+ outer[j + 3] * inner[i + 15];
+ }
+ target[index++] = outer[j + 0] * inner[4] +
+ outer[j + 1] * inner[9] +
+ outer[j + 2] * inner[14] +
+ outer[j + 3] * inner[19] +
+ outer[j + 4];
+ }
+
+ if (target != result) {
+ memcpy(result, target, 20 * sizeof(SkScalar));
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkColorMatrix::setIdentity() {
+ memset(fMat, 0, sizeof(fMat));
+ fMat[kR_Scale] = fMat[kG_Scale] = fMat[kB_Scale] = fMat[kA_Scale] = 1;
+}
+
+void SkColorMatrix::setScale(SkScalar rScale, SkScalar gScale, SkScalar bScale,
+ SkScalar aScale) {
+ memset(fMat, 0, sizeof(fMat));
+ fMat[kR_Scale] = rScale;
+ fMat[kG_Scale] = gScale;
+ fMat[kB_Scale] = bScale;
+ fMat[kA_Scale] = aScale;
+}
+
+void SkColorMatrix::postTranslate(SkScalar dr, SkScalar dg, SkScalar db,
+ SkScalar da) {
+ fMat[kR_Trans] += dr;
+ fMat[kG_Trans] += dg;
+ fMat[kB_Trans] += db;
+ fMat[kA_Trans] += da;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkColorMatrix::setRotate(Axis axis, SkScalar degrees) {
+ SkScalar S, C;
+
+ S = SkScalarSinCos(SkDegreesToRadians(degrees), &C);
+
+ this->setSinCos(axis, S, C);
+}
+
+void SkColorMatrix::setSinCos(Axis axis, SkScalar sine, SkScalar cosine) {
+ SkASSERT((unsigned)axis < 3);
+
+ static const uint8_t gRotateIndex[] = {
+ 6, 7, 11, 12,
+ 0, 10, 2, 12,
+ 0, 1, 5, 6,
+ };
+ const uint8_t* index = gRotateIndex + axis * 4;
+
+ this->setIdentity();
+ fMat[index[0]] = cosine;
+ fMat[index[1]] = sine;
+ fMat[index[2]] = -sine;
+ fMat[index[3]] = cosine;
+}
+
+void SkColorMatrix::preRotate(Axis axis, SkScalar degrees) {
+ SkColorMatrix tmp;
+ tmp.setRotate(axis, degrees);
+ this->preConcat(tmp);
+}
+
+void SkColorMatrix::postRotate(Axis axis, SkScalar degrees) {
+ SkColorMatrix tmp;
+ tmp.setRotate(axis, degrees);
+ this->postConcat(tmp);
+}
+
+void SkColorMatrix::setConcat(const SkColorMatrix& matA, const SkColorMatrix& matB) {
+ SetConcat(fMat, matA.fMat, matB.fMat);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void setrow(SkScalar row[], SkScalar r, SkScalar g, SkScalar b) {
+ row[0] = r;
+ row[1] = g;
+ row[2] = b;
+}
+
+static const SkScalar kHueR = 0.213f;
+static const SkScalar kHueG = 0.715f;
+static const SkScalar kHueB = 0.072f;
+
+void SkColorMatrix::setSaturation(SkScalar sat) {
+ memset(fMat, 0, sizeof(fMat));
+
+ const SkScalar R = kHueR * (1 - sat);
+ const SkScalar G = kHueG * (1 - sat);
+ const SkScalar B = kHueB * (1 - sat);
+
+ setrow(fMat + 0, R + sat, G, B);
+ setrow(fMat + 5, R, G + sat, B);
+ setrow(fMat + 10, R, G, B + sat);
+ fMat[kA_Scale] = 1;
+}
+
+static const SkScalar kR2Y = 0.299f;
+static const SkScalar kG2Y = 0.587f;
+static const SkScalar kB2Y = 0.114f;
+
+static const SkScalar kR2U = -0.16874f;
+static const SkScalar kG2U = -0.33126f;
+static const SkScalar kB2U = 0.5f;
+
+static const SkScalar kR2V = 0.5f;
+static const SkScalar kG2V = -0.41869f;
+static const SkScalar kB2V = -0.08131f;
+
+void SkColorMatrix::setRGB2YUV() {
+ memset(fMat, 0, sizeof(fMat));
+
+ setrow(fMat + 0, kR2Y, kG2Y, kB2Y);
+ setrow(fMat + 5, kR2U, kG2U, kB2U);
+ setrow(fMat + 10, kR2V, kG2V, kB2V);
+ fMat[kA_Scale] = 1;
+}
+
+static const SkScalar kV2R = 1.402f;
+static const SkScalar kU2G = -0.34414f;
+static const SkScalar kV2G = -0.71414f;
+static const SkScalar kU2B = 1.772f;
+
+void SkColorMatrix::setYUV2RGB() {
+ memset(fMat, 0, sizeof(fMat));
+
+ setrow(fMat + 0, 1, 0, kV2R);
+ setrow(fMat + 5, 1, kU2G, kV2G);
+ setrow(fMat + 10, 1, kU2B, 0);
+ fMat[kA_Scale] = 1;
+}
diff --git a/gfx/skia/skia/src/effects/SkColorMatrixFilter.cpp b/gfx/skia/skia/src/effects/SkColorMatrixFilter.cpp
new file mode 100644
index 000000000..32cb3d916
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkColorMatrixFilter.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkColorMatrixFilter.h"
+
+static SkScalar byte_to_scale(U8CPU byte) {
+ if (0xFF == byte) {
+ // want to get this exact
+ return 1;
+ } else {
+ return byte * 0.00392156862745f;
+ }
+}
+
+sk_sp<SkColorFilter> SkColorMatrixFilter::MakeLightingFilter(SkColor mul, SkColor add) {
+ const SkColor opaqueAlphaMask = SK_ColorBLACK;
+ // omit the alpha and compare only the RGB values
+ if (0 == (add & ~opaqueAlphaMask)) {
+ return SkColorFilter::MakeModeFilter(mul | opaqueAlphaMask,
+ SkXfermode::Mode::kModulate_Mode);
+ }
+
+ SkColorMatrix matrix;
+ matrix.setScale(byte_to_scale(SkColorGetR(mul)),
+ byte_to_scale(SkColorGetG(mul)),
+ byte_to_scale(SkColorGetB(mul)),
+ 1);
+ matrix.postTranslate(SkIntToScalar(SkColorGetR(add)),
+ SkIntToScalar(SkColorGetG(add)),
+ SkIntToScalar(SkColorGetB(add)),
+ 0);
+ return SkColorFilter::MakeMatrixFilterRowMajor255(matrix.fMat);
+}
diff --git a/gfx/skia/skia/src/effects/SkComposeImageFilter.cpp b/gfx/skia/skia/src/effects/SkComposeImageFilter.cpp
new file mode 100644
index 000000000..a5b9190a4
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkComposeImageFilter.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkComposeImageFilter.h"
+
+#include "SkReadBuffer.h"
+#include "SkSpecialImage.h"
+#include "SkWriteBuffer.h"
+
+sk_sp<SkImageFilter> SkComposeImageFilter::Make(sk_sp<SkImageFilter> outer,
+ sk_sp<SkImageFilter> inner) {
+ if (!outer) {
+ return inner;
+ }
+ if (!inner) {
+ return outer;
+ }
+ sk_sp<SkImageFilter> inputs[2] = { std::move(outer), std::move(inner) };
+ return sk_sp<SkImageFilter>(new SkComposeImageFilter(inputs));
+}
+
+SkRect SkComposeImageFilter::computeFastBounds(const SkRect& src) const {
+ SkImageFilter* outer = this->getInput(0);
+ SkImageFilter* inner = this->getInput(1);
+
+ return outer->computeFastBounds(inner->computeFastBounds(src));
+}
+
+sk_sp<SkSpecialImage> SkComposeImageFilter::onFilterImage(SkSpecialImage* source,
+ const Context& ctx,
+ SkIPoint* offset) const {
+ // The bounds passed to the inner filter must be filtered by the outer
+ // filter, so that the inner filter produces the pixels that the outer
+ // filter requires as input. This matters if the outer filter moves pixels.
+ SkIRect innerClipBounds;
+ innerClipBounds = this->getInput(0)->filterBounds(ctx.clipBounds(), ctx.ctm());
+ Context innerContext(ctx.ctm(), innerClipBounds, ctx.cache(), ctx.outputProperties());
+ SkIPoint innerOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> inner(this->filterInput(1, source, innerContext, &innerOffset));
+ if (!inner) {
+ return nullptr;
+ }
+
+ SkMatrix outerMatrix(ctx.ctm());
+ outerMatrix.postTranslate(SkIntToScalar(-innerOffset.x()), SkIntToScalar(-innerOffset.y()));
+ SkIRect clipBounds = ctx.clipBounds();
+ clipBounds.offset(-innerOffset.x(), -innerOffset.y());
+ Context outerContext(outerMatrix, clipBounds, ctx.cache(), ctx.outputProperties());
+
+ SkIPoint outerOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> outer(this->filterInput(0, inner.get(), outerContext, &outerOffset));
+ if (!outer) {
+ return nullptr;
+ }
+
+ *offset = innerOffset + outerOffset;
+ return outer;
+}
+
+SkIRect SkComposeImageFilter::onFilterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection direction) const {
+ SkImageFilter* outer = this->getInput(0);
+ SkImageFilter* inner = this->getInput(1);
+
+ return outer->filterBounds(inner->filterBounds(src, ctm, direction), ctm, direction);
+}
+
+sk_sp<SkFlattenable> SkComposeImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 2);
+ return SkComposeImageFilter::Make(common.getInput(0), common.getInput(1));
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkComposeImageFilter::toString(SkString* str) const {
+ SkImageFilter* outer = getInput(0);
+ SkImageFilter* inner = getInput(1);
+
+ str->appendf("SkComposeImageFilter: (");
+
+ str->appendf("outer: ");
+ outer->toString(str);
+
+ str->appendf("inner: ");
+ inner->toString(str);
+
+ str->appendf(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkCornerPathEffect.cpp b/gfx/skia/skia/src/effects/SkCornerPathEffect.cpp
new file mode 100644
index 000000000..909ef3147
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkCornerPathEffect.cpp
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkCornerPathEffect.h"
+#include "SkPath.h"
+#include "SkPoint.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+
+SkCornerPathEffect::SkCornerPathEffect(SkScalar radius) : fRadius(radius) {}
+SkCornerPathEffect::~SkCornerPathEffect() {}
+
+static bool ComputeStep(const SkPoint& a, const SkPoint& b, SkScalar radius,
+ SkPoint* step) {
+ SkScalar dist = SkPoint::Distance(a, b);
+
+ *step = b - a;
+ if (dist <= radius * 2) {
+ *step *= SK_ScalarHalf;
+ return false;
+ } else {
+ *step *= radius / dist;
+ return true;
+ }
+}
+
+bool SkCornerPathEffect::filterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec*, const SkRect*) const {
+ if (0 == fRadius) {
+ return false;
+ }
+
+ SkPath::Iter iter(src, false);
+ SkPath::Verb verb, prevVerb = (SkPath::Verb)-1;
+ SkPoint pts[4];
+
+ bool closed;
+ SkPoint moveTo, lastCorner;
+ SkVector firstStep, step;
+ bool prevIsValid = true;
+
+ // to avoid warnings
+ step.set(0, 0);
+ moveTo.set(0, 0);
+ firstStep.set(0, 0);
+ lastCorner.set(0, 0);
+
+ for (;;) {
+ switch (verb = iter.next(pts, false)) {
+ case SkPath::kMove_Verb:
+ // close out the previous (open) contour
+ if (SkPath::kLine_Verb == prevVerb) {
+ dst->lineTo(lastCorner);
+ }
+ closed = iter.isClosedContour();
+ if (closed) {
+ moveTo = pts[0];
+ prevIsValid = false;
+ } else {
+ dst->moveTo(pts[0]);
+ prevIsValid = true;
+ }
+ break;
+ case SkPath::kLine_Verb: {
+ bool drawSegment = ComputeStep(pts[0], pts[1], fRadius, &step);
+ // prev corner
+ if (!prevIsValid) {
+ dst->moveTo(moveTo + step);
+ prevIsValid = true;
+ } else {
+ dst->quadTo(pts[0].fX, pts[0].fY, pts[0].fX + step.fX,
+ pts[0].fY + step.fY);
+ }
+ if (drawSegment) {
+ dst->lineTo(pts[1].fX - step.fX, pts[1].fY - step.fY);
+ }
+ lastCorner = pts[1];
+ prevIsValid = true;
+ break;
+ }
+ case SkPath::kQuad_Verb:
+ // TBD - just replicate the curve for now
+ if (!prevIsValid) {
+ dst->moveTo(pts[0]);
+ prevIsValid = true;
+ }
+ dst->quadTo(pts[1], pts[2]);
+ lastCorner = pts[2];
+ firstStep.set(0, 0);
+ break;
+ case SkPath::kConic_Verb:
+ // TBD - just replicate the curve for now
+ if (!prevIsValid) {
+ dst->moveTo(pts[0]);
+ prevIsValid = true;
+ }
+ dst->conicTo(pts[1], pts[2], iter.conicWeight());
+ lastCorner = pts[2];
+ firstStep.set(0, 0);
+ break;
+ case SkPath::kCubic_Verb:
+ if (!prevIsValid) {
+ dst->moveTo(pts[0]);
+ prevIsValid = true;
+ }
+ // TBD - just replicate the curve for now
+ dst->cubicTo(pts[1], pts[2], pts[3]);
+ lastCorner = pts[3];
+ firstStep.set(0, 0);
+ break;
+ case SkPath::kClose_Verb:
+ if (firstStep.fX || firstStep.fY) {
+ dst->quadTo(lastCorner.fX, lastCorner.fY,
+ lastCorner.fX + firstStep.fX,
+ lastCorner.fY + firstStep.fY);
+ }
+ dst->close();
+ prevIsValid = false;
+ break;
+ case SkPath::kDone_Verb:
+ if (prevIsValid) {
+ dst->lineTo(lastCorner);
+ }
+ goto DONE;
+ }
+
+ if (SkPath::kMove_Verb == prevVerb) {
+ firstStep = step;
+ }
+ prevVerb = verb;
+ }
+DONE:
+ return true;
+}
+
+sk_sp<SkFlattenable> SkCornerPathEffect::CreateProc(SkReadBuffer& buffer) {
+ return SkCornerPathEffect::Make(buffer.readScalar());
+}
+
+void SkCornerPathEffect::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeScalar(fRadius);
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkCornerPathEffect::toString(SkString* str) const {
+ str->appendf("SkCornerPathEffect: (");
+ str->appendf("radius: %.2f", fRadius);
+ str->appendf(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkDashPathEffect.cpp b/gfx/skia/skia/src/effects/SkDashPathEffect.cpp
new file mode 100644
index 000000000..1dbf29336
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkDashPathEffect.cpp
@@ -0,0 +1,401 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkDashPathEffect.h"
+
+#include "SkDashPathPriv.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+#include "SkStrokeRec.h"
+
+SkDashPathEffect::SkDashPathEffect(const SkScalar intervals[], int count, SkScalar phase)
+ : fPhase(0)
+ , fInitialDashLength(-1)
+ , fInitialDashIndex(0)
+ , fIntervalLength(0) {
+ SkASSERT(intervals);
+ SkASSERT(count > 1 && SkIsAlign2(count));
+
+ fIntervals = (SkScalar*)sk_malloc_throw(sizeof(SkScalar) * count);
+ fCount = count;
+ for (int i = 0; i < count; i++) {
+ fIntervals[i] = intervals[i];
+ }
+
+ // set the internal data members
+ SkDashPath::CalcDashParameters(phase, fIntervals, fCount,
+ &fInitialDashLength, &fInitialDashIndex, &fIntervalLength, &fPhase);
+}
+
+SkDashPathEffect::~SkDashPathEffect() {
+ sk_free(fIntervals);
+}
+
+bool SkDashPathEffect::filterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec* rec, const SkRect* cullRect) const {
+ return SkDashPath::InternalFilter(dst, src, rec, cullRect, fIntervals, fCount,
+ fInitialDashLength, fInitialDashIndex, fIntervalLength);
+}
+
+static void outset_for_stroke(SkRect* rect, const SkStrokeRec& rec) {
+ SkScalar radius = SkScalarHalf(rec.getWidth());
+ if (0 == radius) {
+ radius = SK_Scalar1; // hairlines
+ }
+ if (SkPaint::kMiter_Join == rec.getJoin()) {
+ radius = SkScalarMul(radius, rec.getMiter());
+ }
+ rect->outset(radius, radius);
+}
+
+// Attempt to trim the line to minimally cover the cull rect (currently
+// only works for horizontal and vertical lines).
+// Return true if processing should continue; false otherwise.
+static bool cull_line(SkPoint* pts, const SkStrokeRec& rec,
+ const SkMatrix& ctm, const SkRect* cullRect,
+ const SkScalar intervalLength) {
+ if (nullptr == cullRect) {
+ SkASSERT(false); // Shouldn't ever occur in practice
+ return false;
+ }
+
+ SkScalar dx = pts[1].x() - pts[0].x();
+ SkScalar dy = pts[1].y() - pts[0].y();
+
+ if ((dx && dy) || (!dx && !dy)) {
+ return false;
+ }
+
+ SkRect bounds = *cullRect;
+ outset_for_stroke(&bounds, rec);
+
+ // cullRect is in device space while pts are in the local coordinate system
+ // defined by the ctm. We want our answer in the local coordinate system.
+
+ SkASSERT(ctm.rectStaysRect());
+ SkMatrix inv;
+ if (!ctm.invert(&inv)) {
+ return false;
+ }
+
+ inv.mapRect(&bounds);
+
+ if (dx) {
+ SkASSERT(dx && !dy);
+ SkScalar minX = pts[0].fX;
+ SkScalar maxX = pts[1].fX;
+
+ if (dx < 0) {
+ SkTSwap(minX, maxX);
+ }
+
+ SkASSERT(minX < maxX);
+ if (maxX <= bounds.fLeft || minX >= bounds.fRight) {
+ return false;
+ }
+
+ // Now we actually perform the chop, removing the excess to the left and
+ // right of the bounds (keeping our new line "in phase" with the dash,
+ // hence the (mod intervalLength).
+
+ if (minX < bounds.fLeft) {
+ minX = bounds.fLeft - SkScalarMod(bounds.fLeft - minX, intervalLength);
+ }
+ if (maxX > bounds.fRight) {
+ maxX = bounds.fRight + SkScalarMod(maxX - bounds.fRight, intervalLength);
+ }
+
+ SkASSERT(maxX > minX);
+ if (dx < 0) {
+ SkTSwap(minX, maxX);
+ }
+ pts[0].fX = minX;
+ pts[1].fX = maxX;
+ } else {
+ SkASSERT(dy && !dx);
+ SkScalar minY = pts[0].fY;
+ SkScalar maxY = pts[1].fY;
+
+ if (dy < 0) {
+ SkTSwap(minY, maxY);
+ }
+
+ SkASSERT(minY < maxY);
+ if (maxY <= bounds.fTop || minY >= bounds.fBottom) {
+ return false;
+ }
+
+ // Now we actually perform the chop, removing the excess to the top and
+ // bottom of the bounds (keeping our new line "in phase" with the dash,
+ // hence the (mod intervalLength).
+
+ if (minY < bounds.fTop) {
+ minY = bounds.fTop - SkScalarMod(bounds.fTop - minY, intervalLength);
+ }
+ if (maxY > bounds.fBottom) {
+ maxY = bounds.fBottom + SkScalarMod(maxY - bounds.fBottom, intervalLength);
+ }
+
+ SkASSERT(maxY > minY);
+ if (dy < 0) {
+ SkTSwap(minY, maxY);
+ }
+ pts[0].fY = minY;
+ pts[1].fY = maxY;
+ }
+
+ return true;
+}
+
+// Currently asPoints is more restrictive then it needs to be. In the future
+// we need to:
+// allow kRound_Cap capping (could allow rotations in the matrix with this)
+// allow paths to be returned
+bool SkDashPathEffect::asPoints(PointData* results,
+ const SkPath& src,
+ const SkStrokeRec& rec,
+ const SkMatrix& matrix,
+ const SkRect* cullRect) const {
+ // width < 0 -> fill && width == 0 -> hairline so requiring width > 0 rules both out
+ if (0 >= rec.getWidth()) {
+ return false;
+ }
+
+ // TODO: this next test could be eased up. We could allow any number of
+ // intervals as long as all the ons match and all the offs match.
+ // Additionally, they do not necessarily need to be integers.
+ // We cannot allow arbitrary intervals since we want the returned points
+ // to be uniformly sized.
+ if (fCount != 2 ||
+ !SkScalarNearlyEqual(fIntervals[0], fIntervals[1]) ||
+ !SkScalarIsInt(fIntervals[0]) ||
+ !SkScalarIsInt(fIntervals[1])) {
+ return false;
+ }
+
+ SkPoint pts[2];
+
+ if (!src.isLine(pts)) {
+ return false;
+ }
+
+ // TODO: this test could be eased up to allow circles
+ if (SkPaint::kButt_Cap != rec.getCap()) {
+ return false;
+ }
+
+ // TODO: this test could be eased up for circles. Rotations could be allowed.
+ if (!matrix.rectStaysRect()) {
+ return false;
+ }
+
+ // See if the line can be limited to something plausible.
+ if (!cull_line(pts, rec, matrix, cullRect, fIntervalLength)) {
+ return false;
+ }
+
+ SkScalar length = SkPoint::Distance(pts[1], pts[0]);
+
+ SkVector tangent = pts[1] - pts[0];
+ if (tangent.isZero()) {
+ return false;
+ }
+
+ tangent.scale(SkScalarInvert(length));
+
+ // TODO: make this test for horizontal & vertical lines more robust
+ bool isXAxis = true;
+ if (SkScalarNearlyEqual(SK_Scalar1, tangent.fX) ||
+ SkScalarNearlyEqual(-SK_Scalar1, tangent.fX)) {
+ results->fSize.set(SkScalarHalf(fIntervals[0]), SkScalarHalf(rec.getWidth()));
+ } else if (SkScalarNearlyEqual(SK_Scalar1, tangent.fY) ||
+ SkScalarNearlyEqual(-SK_Scalar1, tangent.fY)) {
+ results->fSize.set(SkScalarHalf(rec.getWidth()), SkScalarHalf(fIntervals[0]));
+ isXAxis = false;
+ } else if (SkPaint::kRound_Cap != rec.getCap()) {
+ // Angled lines don't have axis-aligned boxes.
+ return false;
+ }
+
+ if (results) {
+ results->fFlags = 0;
+ SkScalar clampedInitialDashLength = SkMinScalar(length, fInitialDashLength);
+
+ if (SkPaint::kRound_Cap == rec.getCap()) {
+ results->fFlags |= PointData::kCircles_PointFlag;
+ }
+
+ results->fNumPoints = 0;
+ SkScalar len2 = length;
+ if (clampedInitialDashLength > 0 || 0 == fInitialDashIndex) {
+ SkASSERT(len2 >= clampedInitialDashLength);
+ if (0 == fInitialDashIndex) {
+ if (clampedInitialDashLength > 0) {
+ if (clampedInitialDashLength >= fIntervals[0]) {
+ ++results->fNumPoints; // partial first dash
+ }
+ len2 -= clampedInitialDashLength;
+ }
+ len2 -= fIntervals[1]; // also skip first space
+ if (len2 < 0) {
+ len2 = 0;
+ }
+ } else {
+ len2 -= clampedInitialDashLength; // skip initial partial empty
+ }
+ }
+ // Too many midpoints can cause results->fNumPoints to overflow or
+ // otherwise cause the results->fPoints allocation below to OOM.
+ // Cap it to a sane value.
+ SkScalar numIntervals = len2 / fIntervalLength;
+ if (!SkScalarIsFinite(numIntervals) || numIntervals > SkDashPath::kMaxDashCount) {
+ return false;
+ }
+ int numMidPoints = SkScalarFloorToInt(numIntervals);
+ results->fNumPoints += numMidPoints;
+ len2 -= numMidPoints * fIntervalLength;
+ bool partialLast = false;
+ if (len2 > 0) {
+ if (len2 < fIntervals[0]) {
+ partialLast = true;
+ } else {
+ ++numMidPoints;
+ ++results->fNumPoints;
+ }
+ }
+
+ results->fPoints = new SkPoint[results->fNumPoints];
+
+ SkScalar distance = 0;
+ int curPt = 0;
+
+ if (clampedInitialDashLength > 0 || 0 == fInitialDashIndex) {
+ SkASSERT(clampedInitialDashLength <= length);
+
+ if (0 == fInitialDashIndex) {
+ if (clampedInitialDashLength > 0) {
+ // partial first block
+ SkASSERT(SkPaint::kRound_Cap != rec.getCap()); // can't handle partial circles
+ SkScalar x = pts[0].fX + SkScalarMul(tangent.fX, SkScalarHalf(clampedInitialDashLength));
+ SkScalar y = pts[0].fY + SkScalarMul(tangent.fY, SkScalarHalf(clampedInitialDashLength));
+ SkScalar halfWidth, halfHeight;
+ if (isXAxis) {
+ halfWidth = SkScalarHalf(clampedInitialDashLength);
+ halfHeight = SkScalarHalf(rec.getWidth());
+ } else {
+ halfWidth = SkScalarHalf(rec.getWidth());
+ halfHeight = SkScalarHalf(clampedInitialDashLength);
+ }
+ if (clampedInitialDashLength < fIntervals[0]) {
+ // This one will not be like the others
+ results->fFirst.addRect(x - halfWidth, y - halfHeight,
+ x + halfWidth, y + halfHeight);
+ } else {
+ SkASSERT(curPt < results->fNumPoints);
+ results->fPoints[curPt].set(x, y);
+ ++curPt;
+ }
+
+ distance += clampedInitialDashLength;
+ }
+
+ distance += fIntervals[1]; // skip over the next blank block too
+ } else {
+ distance += clampedInitialDashLength;
+ }
+ }
+
+ if (0 != numMidPoints) {
+ distance += SkScalarHalf(fIntervals[0]);
+
+ for (int i = 0; i < numMidPoints; ++i) {
+ SkScalar x = pts[0].fX + SkScalarMul(tangent.fX, distance);
+ SkScalar y = pts[0].fY + SkScalarMul(tangent.fY, distance);
+
+ SkASSERT(curPt < results->fNumPoints);
+ results->fPoints[curPt].set(x, y);
+ ++curPt;
+
+ distance += fIntervalLength;
+ }
+
+ distance -= SkScalarHalf(fIntervals[0]);
+ }
+
+ if (partialLast) {
+ // partial final block
+ SkASSERT(SkPaint::kRound_Cap != rec.getCap()); // can't handle partial circles
+ SkScalar temp = length - distance;
+ SkASSERT(temp < fIntervals[0]);
+ SkScalar x = pts[0].fX + SkScalarMul(tangent.fX, distance + SkScalarHalf(temp));
+ SkScalar y = pts[0].fY + SkScalarMul(tangent.fY, distance + SkScalarHalf(temp));
+ SkScalar halfWidth, halfHeight;
+ if (isXAxis) {
+ halfWidth = SkScalarHalf(temp);
+ halfHeight = SkScalarHalf(rec.getWidth());
+ } else {
+ halfWidth = SkScalarHalf(rec.getWidth());
+ halfHeight = SkScalarHalf(temp);
+ }
+ results->fLast.addRect(x - halfWidth, y - halfHeight,
+ x + halfWidth, y + halfHeight);
+ }
+
+ SkASSERT(curPt == results->fNumPoints);
+ }
+
+ return true;
+}
+
+SkPathEffect::DashType SkDashPathEffect::asADash(DashInfo* info) const {
+ if (info) {
+ if (info->fCount >= fCount && info->fIntervals) {
+ memcpy(info->fIntervals, fIntervals, fCount * sizeof(SkScalar));
+ }
+ info->fCount = fCount;
+ info->fPhase = fPhase;
+ }
+ return kDash_DashType;
+}
+
+void SkDashPathEffect::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeScalar(fPhase);
+ buffer.writeScalarArray(fIntervals, fCount);
+}
+
+sk_sp<SkFlattenable> SkDashPathEffect::CreateProc(SkReadBuffer& buffer) {
+ const SkScalar phase = buffer.readScalar();
+ uint32_t count = buffer.getArrayCount();
+ SkAutoSTArray<32, SkScalar> intervals(count);
+ if (buffer.readScalarArray(intervals.get(), count)) {
+ return Make(intervals.get(), SkToInt(count), phase);
+ }
+ return nullptr;
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkDashPathEffect::toString(SkString* str) const {
+ str->appendf("SkDashPathEffect: (");
+ str->appendf("count: %d phase %.2f intervals: (", fCount, fPhase);
+ for (int i = 0; i < fCount; ++i) {
+ str->appendf("%.2f", fIntervals[i]);
+ if (i < fCount-1) {
+ str->appendf(", ");
+ }
+ }
+ str->appendf("))");
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkPathEffect> SkDashPathEffect::Make(const SkScalar intervals[], int count, SkScalar phase) {
+ if (!SkDashPath::ValidDashPath(phase, intervals, count)) {
+ return nullptr;
+ }
+ return sk_sp<SkPathEffect>(new SkDashPathEffect(intervals, count, phase));
+}
diff --git a/gfx/skia/skia/src/effects/SkDiscretePathEffect.cpp b/gfx/skia/skia/src/effects/SkDiscretePathEffect.cpp
new file mode 100644
index 000000000..4525c1e32
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkDiscretePathEffect.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDiscretePathEffect.h"
+#include "SkFixed.h"
+#include "SkPathMeasure.h"
+#include "SkReadBuffer.h"
+#include "SkStrokeRec.h"
+#include "SkWriteBuffer.h"
+
+sk_sp<SkPathEffect> SkDiscretePathEffect::Make(SkScalar segLength, SkScalar deviation,
+ uint32_t seedAssist) {
+ if (!SkScalarIsFinite(segLength) || !SkScalarIsFinite(deviation)) {
+ return nullptr;
+ }
+ if (segLength <= SK_ScalarNearlyZero) {
+ return nullptr;
+ }
+ return sk_sp<SkPathEffect>(new SkDiscretePathEffect(segLength, deviation, seedAssist));
+}
+
+static void Perterb(SkPoint* p, const SkVector& tangent, SkScalar scale) {
+ SkVector normal = tangent;
+ normal.rotateCCW();
+ normal.setLength(scale);
+ *p += normal;
+}
+
+SkDiscretePathEffect::SkDiscretePathEffect(SkScalar segLength,
+ SkScalar deviation,
+ uint32_t seedAssist)
+ : fSegLength(segLength), fPerterb(deviation), fSeedAssist(seedAssist)
+{
+}
+
+/** \class LCGRandom
+
+ Utility class that implements pseudo random 32bit numbers using a fast
+ linear equation. Unlike rand(), this class holds its own seed (initially
+ set to 0), so that multiple instances can be used with no side-effects.
+
+ Copied from the original implementation of SkRandom. Only contains the
+ methods used by SkDiscretePathEffect::filterPath, with methods that were
+ not called directly moved to private.
+*/
+
+class LCGRandom {
+public:
+ LCGRandom(uint32_t seed) : fSeed(seed) {}
+
+ /** Return the next pseudo random number expressed as a SkScalar
+ in the range [-SK_Scalar1..SK_Scalar1).
+ */
+ SkScalar nextSScalar1() { return SkFixedToScalar(this->nextSFixed1()); }
+
+private:
+ /** Return the next pseudo random number as an unsigned 32bit value.
+ */
+ uint32_t nextU() { uint32_t r = fSeed * kMul + kAdd; fSeed = r; return r; }
+
+ /** Return the next pseudo random number as a signed 32bit value.
+ */
+ int32_t nextS() { return (int32_t)this->nextU(); }
+
+ /** Return the next pseudo random number expressed as a signed SkFixed
+ in the range [-SK_Fixed1..SK_Fixed1).
+ */
+ SkFixed nextSFixed1() { return this->nextS() >> 15; }
+
+ // See "Numerical Recipes in C", 1992 page 284 for these constants
+ enum {
+ kMul = 1664525,
+ kAdd = 1013904223
+ };
+ uint32_t fSeed;
+};
+
+bool SkDiscretePathEffect::filterPath(SkPath* dst, const SkPath& src,
+ SkStrokeRec* rec, const SkRect*) const {
+ bool doFill = rec->isFillStyle();
+
+ SkPathMeasure meas(src, doFill);
+
+ /* Caller may supply their own seed assist, which by default is 0 */
+ uint32_t seed = fSeedAssist ^ SkScalarRoundToInt(meas.getLength());
+
+ LCGRandom rand(seed ^ ((seed << 16) | (seed >> 16)));
+ SkScalar scale = fPerterb;
+ SkPoint p;
+ SkVector v;
+
+ do {
+ SkScalar length = meas.getLength();
+
+ if (fSegLength * (2 + doFill) > length) {
+ meas.getSegment(0, length, dst, true); // to short for us to mangle
+ } else {
+ int n = SkScalarRoundToInt(length / fSegLength);
+ SkScalar delta = length / n;
+ SkScalar distance = 0;
+
+ if (meas.isClosed()) {
+ n -= 1;
+ distance += delta/2;
+ }
+
+ if (meas.getPosTan(distance, &p, &v)) {
+ Perterb(&p, v, SkScalarMul(rand.nextSScalar1(), scale));
+ dst->moveTo(p);
+ }
+ while (--n >= 0) {
+ distance += delta;
+ if (meas.getPosTan(distance, &p, &v)) {
+ Perterb(&p, v, SkScalarMul(rand.nextSScalar1(), scale));
+ dst->lineTo(p);
+ }
+ }
+ if (meas.isClosed()) {
+ dst->close();
+ }
+ }
+ } while (meas.nextContour());
+ return true;
+}
+
+sk_sp<SkFlattenable> SkDiscretePathEffect::CreateProc(SkReadBuffer& buffer) {
+ SkScalar segLength = buffer.readScalar();
+ SkScalar perterb = buffer.readScalar();
+ uint32_t seed = buffer.readUInt();
+ return Make(segLength, perterb, seed);
+}
+
+void SkDiscretePathEffect::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeScalar(fSegLength);
+ buffer.writeScalar(fPerterb);
+ buffer.writeUInt(fSeedAssist);
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkDiscretePathEffect::toString(SkString* str) const {
+ str->appendf("SkDiscretePathEffect: (");
+ str->appendf("segLength: %.2f deviation: %.2f seed %d", fSegLength, fPerterb, fSeedAssist);
+ str->append(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkDisplacementMapEffect.cpp b/gfx/skia/skia/src/effects/SkDisplacementMapEffect.cpp
new file mode 100644
index 000000000..88f502666
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkDisplacementMapEffect.cpp
@@ -0,0 +1,641 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkDisplacementMapEffect.h"
+
+#include "SkBitmap.h"
+#include "SkReadBuffer.h"
+#include "SkSpecialImage.h"
+#include "SkWriteBuffer.h"
+#include "SkUnPreMultiply.h"
+#include "SkColorPriv.h"
+#if SK_SUPPORT_GPU
+#include "GrContext.h"
+#include "GrDrawContext.h"
+#include "GrCoordTransform.h"
+#include "GrInvariantOutput.h"
+#include "SkGr.h"
+#include "SkGrPriv.h"
+#include "effects/GrTextureDomain.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#endif
+
+namespace {
+
+#define kChannelSelectorKeyBits 3; // Max value is 4, so 3 bits are required at most
+
+template<SkDisplacementMapEffect::ChannelSelectorType type>
+uint32_t getValue(SkColor, const SkUnPreMultiply::Scale*) {
+ SkDEBUGFAIL("Unknown channel selector");
+ return 0;
+}
+
+template<> uint32_t getValue<SkDisplacementMapEffect::kR_ChannelSelectorType>(
+ SkColor l, const SkUnPreMultiply::Scale* table) {
+ return SkUnPreMultiply::ApplyScale(table[SkGetPackedA32(l)], SkGetPackedR32(l));
+}
+
+template<> uint32_t getValue<SkDisplacementMapEffect::kG_ChannelSelectorType>(
+ SkColor l, const SkUnPreMultiply::Scale* table) {
+ return SkUnPreMultiply::ApplyScale(table[SkGetPackedA32(l)], SkGetPackedG32(l));
+}
+
+template<> uint32_t getValue<SkDisplacementMapEffect::kB_ChannelSelectorType>(
+ SkColor l, const SkUnPreMultiply::Scale* table) {
+ return SkUnPreMultiply::ApplyScale(table[SkGetPackedA32(l)], SkGetPackedB32(l));
+}
+
+template<> uint32_t getValue<SkDisplacementMapEffect::kA_ChannelSelectorType>(
+ SkColor l, const SkUnPreMultiply::Scale*) {
+ return SkGetPackedA32(l);
+}
+
+template<SkDisplacementMapEffect::ChannelSelectorType typeX,
+ SkDisplacementMapEffect::ChannelSelectorType typeY>
+void computeDisplacement(const SkVector& scale, SkBitmap* dst,
+ const SkBitmap& displ, const SkIPoint& offset,
+ const SkBitmap& src,
+ const SkIRect& bounds) {
+ static const SkScalar Inv8bit = SkScalarInvert(255);
+ const int srcW = src.width();
+ const int srcH = src.height();
+ const SkVector scaleForColor = SkVector::Make(SkScalarMul(scale.fX, Inv8bit),
+ SkScalarMul(scale.fY, Inv8bit));
+ const SkVector scaleAdj = SkVector::Make(SK_ScalarHalf - SkScalarMul(scale.fX, SK_ScalarHalf),
+ SK_ScalarHalf - SkScalarMul(scale.fY, SK_ScalarHalf));
+ const SkUnPreMultiply::Scale* table = SkUnPreMultiply::GetScaleTable();
+ SkPMColor* dstPtr = dst->getAddr32(0, 0);
+ for (int y = bounds.top(); y < bounds.bottom(); ++y) {
+ const SkPMColor* displPtr = displ.getAddr32(bounds.left() + offset.fX, y + offset.fY);
+ for (int x = bounds.left(); x < bounds.right(); ++x, ++displPtr) {
+ const SkScalar displX = SkScalarMul(scaleForColor.fX,
+ SkIntToScalar(getValue<typeX>(*displPtr, table))) + scaleAdj.fX;
+ const SkScalar displY = SkScalarMul(scaleForColor.fY,
+ SkIntToScalar(getValue<typeY>(*displPtr, table))) + scaleAdj.fY;
+ // Truncate the displacement values
+ const int srcX = x + SkScalarTruncToInt(displX);
+ const int srcY = y + SkScalarTruncToInt(displY);
+ *dstPtr++ = ((srcX < 0) || (srcX >= srcW) || (srcY < 0) || (srcY >= srcH)) ?
+ 0 : *(src.getAddr32(srcX, srcY));
+ }
+ }
+}
+
+template<SkDisplacementMapEffect::ChannelSelectorType typeX>
+void computeDisplacement(SkDisplacementMapEffect::ChannelSelectorType yChannelSelector,
+ const SkVector& scale, SkBitmap* dst,
+ const SkBitmap& displ, const SkIPoint& offset,
+ const SkBitmap& src,
+ const SkIRect& bounds) {
+ switch (yChannelSelector) {
+ case SkDisplacementMapEffect::kR_ChannelSelectorType:
+ computeDisplacement<typeX, SkDisplacementMapEffect::kR_ChannelSelectorType>(
+ scale, dst, displ, offset, src, bounds);
+ break;
+ case SkDisplacementMapEffect::kG_ChannelSelectorType:
+ computeDisplacement<typeX, SkDisplacementMapEffect::kG_ChannelSelectorType>(
+ scale, dst, displ, offset, src, bounds);
+ break;
+ case SkDisplacementMapEffect::kB_ChannelSelectorType:
+ computeDisplacement<typeX, SkDisplacementMapEffect::kB_ChannelSelectorType>(
+ scale, dst, displ, offset, src, bounds);
+ break;
+ case SkDisplacementMapEffect::kA_ChannelSelectorType:
+ computeDisplacement<typeX, SkDisplacementMapEffect::kA_ChannelSelectorType>(
+ scale, dst, displ, offset, src, bounds);
+ break;
+ case SkDisplacementMapEffect::kUnknown_ChannelSelectorType:
+ default:
+ SkDEBUGFAIL("Unknown Y channel selector");
+ }
+}
+
+void computeDisplacement(SkDisplacementMapEffect::ChannelSelectorType xChannelSelector,
+ SkDisplacementMapEffect::ChannelSelectorType yChannelSelector,
+ const SkVector& scale, SkBitmap* dst,
+ const SkBitmap& displ, const SkIPoint& offset,
+ const SkBitmap& src,
+ const SkIRect& bounds) {
+ switch (xChannelSelector) {
+ case SkDisplacementMapEffect::kR_ChannelSelectorType:
+ computeDisplacement<SkDisplacementMapEffect::kR_ChannelSelectorType>(
+ yChannelSelector, scale, dst, displ, offset, src, bounds);
+ break;
+ case SkDisplacementMapEffect::kG_ChannelSelectorType:
+ computeDisplacement<SkDisplacementMapEffect::kG_ChannelSelectorType>(
+ yChannelSelector, scale, dst, displ, offset, src, bounds);
+ break;
+ case SkDisplacementMapEffect::kB_ChannelSelectorType:
+ computeDisplacement<SkDisplacementMapEffect::kB_ChannelSelectorType>(
+ yChannelSelector, scale, dst, displ, offset, src, bounds);
+ break;
+ case SkDisplacementMapEffect::kA_ChannelSelectorType:
+ computeDisplacement<SkDisplacementMapEffect::kA_ChannelSelectorType>(
+ yChannelSelector, scale, dst, displ, offset, src, bounds);
+ break;
+ case SkDisplacementMapEffect::kUnknown_ChannelSelectorType:
+ default:
+ SkDEBUGFAIL("Unknown X channel selector");
+ }
+}
+
+bool channel_selector_type_is_valid(SkDisplacementMapEffect::ChannelSelectorType cst) {
+ switch (cst) {
+ case SkDisplacementMapEffect::kUnknown_ChannelSelectorType:
+ case SkDisplacementMapEffect::kR_ChannelSelectorType:
+ case SkDisplacementMapEffect::kG_ChannelSelectorType:
+ case SkDisplacementMapEffect::kB_ChannelSelectorType:
+ case SkDisplacementMapEffect::kA_ChannelSelectorType:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+} // end namespace
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImageFilter> SkDisplacementMapEffect::Make(ChannelSelectorType xChannelSelector,
+ ChannelSelectorType yChannelSelector,
+ SkScalar scale,
+ sk_sp<SkImageFilter> displacement,
+ sk_sp<SkImageFilter> color,
+ const CropRect* cropRect) {
+ if (!channel_selector_type_is_valid(xChannelSelector) ||
+ !channel_selector_type_is_valid(yChannelSelector)) {
+ return nullptr;
+ }
+
+ sk_sp<SkImageFilter> inputs[2] = { std::move(displacement), std::move(color) };
+ return sk_sp<SkImageFilter>(new SkDisplacementMapEffect(xChannelSelector,
+ yChannelSelector,
+ scale, inputs, cropRect));
+}
+
+SkDisplacementMapEffect::SkDisplacementMapEffect(ChannelSelectorType xChannelSelector,
+ ChannelSelectorType yChannelSelector,
+ SkScalar scale,
+ sk_sp<SkImageFilter> inputs[2],
+ const CropRect* cropRect)
+ : INHERITED(inputs, 2, cropRect)
+ , fXChannelSelector(xChannelSelector)
+ , fYChannelSelector(yChannelSelector)
+ , fScale(scale) {
+}
+
+SkDisplacementMapEffect::~SkDisplacementMapEffect() {
+}
+
+sk_sp<SkFlattenable> SkDisplacementMapEffect::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 2);
+ ChannelSelectorType xsel = (ChannelSelectorType)buffer.readInt();
+ ChannelSelectorType ysel = (ChannelSelectorType)buffer.readInt();
+ SkScalar scale = buffer.readScalar();
+ return Make(xsel, ysel, scale,
+ common.getInput(0), common.getInput(1),
+ &common.cropRect());
+}
+
+void SkDisplacementMapEffect::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeInt((int) fXChannelSelector);
+ buffer.writeInt((int) fYChannelSelector);
+ buffer.writeScalar(fScale);
+}
+
+#if SK_SUPPORT_GPU
+class GrDisplacementMapEffect : public GrFragmentProcessor {
+public:
+ static sk_sp<GrFragmentProcessor> Make(
+ SkDisplacementMapEffect::ChannelSelectorType xChannelSelector,
+ SkDisplacementMapEffect::ChannelSelectorType yChannelSelector, SkVector scale,
+ GrTexture* displacement, const SkMatrix& offsetMatrix, GrTexture* color,
+ const SkISize& colorDimensions) {
+ return sk_sp<GrFragmentProcessor>(
+ new GrDisplacementMapEffect(xChannelSelector, yChannelSelector, scale, displacement,
+ offsetMatrix, color, colorDimensions));
+ }
+
+ virtual ~GrDisplacementMapEffect();
+
+ SkDisplacementMapEffect::ChannelSelectorType xChannelSelector() const {
+ return fXChannelSelector;
+ }
+ SkDisplacementMapEffect::ChannelSelectorType yChannelSelector() const {
+ return fYChannelSelector;
+ }
+ const SkVector& scale() const { return fScale; }
+
+ const char* name() const override { return "DisplacementMap"; }
+ const GrTextureDomain& domain() const { return fDomain; }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ GrDisplacementMapEffect(SkDisplacementMapEffect::ChannelSelectorType xChannelSelector,
+ SkDisplacementMapEffect::ChannelSelectorType yChannelSelector,
+ const SkVector& scale,
+ GrTexture* displacement, const SkMatrix& offsetMatrix,
+ GrTexture* color,
+ const SkISize& colorDimensions);
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ GrCoordTransform fDisplacementTransform;
+ GrTextureAccess fDisplacementAccess;
+ GrCoordTransform fColorTransform;
+ GrTextureDomain fDomain;
+ GrTextureAccess fColorAccess;
+ SkDisplacementMapEffect::ChannelSelectorType fXChannelSelector;
+ SkDisplacementMapEffect::ChannelSelectorType fYChannelSelector;
+ SkVector fScale;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
+
+sk_sp<SkSpecialImage> SkDisplacementMapEffect::onFilterImage(SkSpecialImage* source,
+ const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint colorOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> color(this->filterInput(1, source, ctx, &colorOffset));
+ if (!color) {
+ return nullptr;
+ }
+
+ SkIPoint displOffset = SkIPoint::Make(0, 0);
+ // Creation of the displacement map should happen in a non-colorspace aware context. This
+ // texture is a purely mathematical construct, so we want to just operate on the stored
+ // values. Consider:
+ // User supplies an sRGB displacement map. If we're rendering to a wider gamut, then we could
+ // end up filtering the displacement map into that gamut, which has the effect of reducing
+ // the amount of displacement that it represents (as encoded values move away from the
+ // primaries).
+ // With a more complex DAG attached to this input, it's not clear that working in ANY specific
+ // color space makes sense, so we ignore color spaces (and gamma) entirely. This may not be
+ // ideal, but it's at least consistent and predictable.
+ Context displContext(ctx.ctm(), ctx.clipBounds(), ctx.cache(), OutputProperties(nullptr));
+ sk_sp<SkSpecialImage> displ(this->filterInput(0, source, displContext, &displOffset));
+ if (!displ) {
+ return nullptr;
+ }
+
+ const SkIRect srcBounds = SkIRect::MakeXYWH(colorOffset.x(), colorOffset.y(),
+ color->width(), color->height());
+
+ // Both paths do bounds checking on color pixel access, we don't need to
+ // pad the color bitmap to bounds here.
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, srcBounds, &bounds)) {
+ return nullptr;
+ }
+
+ SkIRect displBounds;
+ displ = this->applyCropRect(ctx, displ.get(), &displOffset, &displBounds);
+ if (!displ) {
+ return nullptr;
+ }
+
+ if (!bounds.intersect(displBounds)) {
+ return nullptr;
+ }
+
+ const SkIRect colorBounds = bounds.makeOffset(-colorOffset.x(), -colorOffset.y());
+
+ SkVector scale = SkVector::Make(fScale, fScale);
+ ctx.ctm().mapVectors(&scale, 1);
+
+#if SK_SUPPORT_GPU
+ if (source->isTextureBacked()) {
+ GrContext* context = source->getContext();
+
+ sk_sp<GrTexture> colorTexture(color->asTextureRef(context));
+ sk_sp<GrTexture> displTexture(displ->asTextureRef(context));
+ if (!colorTexture || !displTexture) {
+ return nullptr;
+ }
+
+ GrPaint paint;
+ SkMatrix offsetMatrix = GrCoordTransform::MakeDivByTextureWHMatrix(displTexture.get());
+ offsetMatrix.preTranslate(SkIntToScalar(colorOffset.fX - displOffset.fX),
+ SkIntToScalar(colorOffset.fY - displOffset.fY));
+
+ paint.addColorFragmentProcessor(
+ GrDisplacementMapEffect::Make(fXChannelSelector,
+ fYChannelSelector,
+ scale,
+ displTexture.get(),
+ offsetMatrix,
+ colorTexture.get(),
+ SkISize::Make(color->width(), color->height())));
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ SkMatrix matrix;
+ matrix.setTranslate(-SkIntToScalar(colorBounds.x()), -SkIntToScalar(colorBounds.y()));
+
+ SkColorSpace* colorSpace = ctx.outputProperties().colorSpace();
+ sk_sp<GrDrawContext> drawContext(
+ context->makeDrawContext(SkBackingFit::kApprox, bounds.width(), bounds.height(),
+ GrRenderableConfigForColorSpace(colorSpace),
+ sk_ref_sp(colorSpace)));
+ if (!drawContext) {
+ return nullptr;
+ }
+ paint.setGammaCorrect(drawContext->isGammaCorrect());
+
+ drawContext->drawRect(GrNoClip(), paint, matrix, SkRect::Make(colorBounds));
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ return SkSpecialImage::MakeFromGpu(SkIRect::MakeWH(bounds.width(), bounds.height()),
+ kNeedNewImageUniqueID_SpecialImage,
+ drawContext->asTexture(),
+ sk_ref_sp(drawContext->getColorSpace()));
+ }
+#endif
+
+ SkBitmap colorBM, displBM;
+
+ if (!color->getROPixels(&colorBM) || !displ->getROPixels(&displBM)) {
+ return nullptr;
+ }
+
+ if ((colorBM.colorType() != kN32_SkColorType) ||
+ (displBM.colorType() != kN32_SkColorType)) {
+ return nullptr;
+ }
+
+ SkAutoLockPixels colorLock(colorBM), displLock(displBM);
+ if (!colorBM.getPixels() || !displBM.getPixels()) {
+ return nullptr;
+ }
+
+ SkImageInfo info = SkImageInfo::MakeN32(bounds.width(), bounds.height(),
+ colorBM.alphaType());
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ SkAutoLockPixels dstLock(dst);
+
+ computeDisplacement(fXChannelSelector, fYChannelSelector, scale, &dst,
+ displBM, colorOffset - displOffset, colorBM, colorBounds);
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(bounds.width(), bounds.height()),
+ dst);
+}
+
+SkRect SkDisplacementMapEffect::computeFastBounds(const SkRect& src) const {
+ SkRect bounds = this->getColorInput() ? this->getColorInput()->computeFastBounds(src) : src;
+ bounds.outset(SkScalarAbs(fScale) * SK_ScalarHalf, SkScalarAbs(fScale) * SK_ScalarHalf);
+ return bounds;
+}
+
+SkIRect SkDisplacementMapEffect::onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection) const {
+ SkVector scale = SkVector::Make(fScale, fScale);
+ ctm.mapVectors(&scale, 1);
+ return src.makeOutset(SkScalarCeilToInt(SkScalarAbs(scale.fX) * SK_ScalarHalf),
+ SkScalarCeilToInt(SkScalarAbs(scale.fY) * SK_ScalarHalf));
+}
+
+SkIRect SkDisplacementMapEffect::onFilterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection direction) const {
+ // Recurse only into color input.
+ if (this->getColorInput()) {
+ return this->getColorInput()->filterBounds(src, ctm, direction);
+ }
+ return src;
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkDisplacementMapEffect::toString(SkString* str) const {
+ str->appendf("SkDisplacementMapEffect: (");
+ str->appendf("scale: %f ", fScale);
+ str->appendf("displacement: (");
+ if (this->getDisplacementInput()) {
+ this->getDisplacementInput()->toString(str);
+ }
+ str->appendf(") color: (");
+ if (this->getColorInput()) {
+ this->getColorInput()->toString(str);
+ }
+ str->appendf("))");
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+class GrGLDisplacementMapEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrGLSLCaps&, GrProcessorKeyBuilder*);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fScaleUni;
+ GrTextureDomain::GLDomain fGLDomain;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrGLSLFragmentProcessor* GrDisplacementMapEffect::onCreateGLSLInstance() const {
+ return new GrGLDisplacementMapEffect;
+}
+
+void GrDisplacementMapEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLDisplacementMapEffect::GenKey(*this, caps, b);
+}
+
+GrDisplacementMapEffect::GrDisplacementMapEffect(
+ SkDisplacementMapEffect::ChannelSelectorType xChannelSelector,
+ SkDisplacementMapEffect::ChannelSelectorType yChannelSelector,
+ const SkVector& scale,
+ GrTexture* displacement,
+ const SkMatrix& offsetMatrix,
+ GrTexture* color,
+ const SkISize& colorDimensions)
+ : fDisplacementTransform(offsetMatrix, displacement, GrTextureParams::kNone_FilterMode)
+ , fDisplacementAccess(displacement)
+ , fColorTransform(color, GrTextureParams::kNone_FilterMode)
+ , fDomain(GrTextureDomain::MakeTexelDomain(color, SkIRect::MakeSize(colorDimensions)),
+ GrTextureDomain::kDecal_Mode)
+ , fColorAccess(color)
+ , fXChannelSelector(xChannelSelector)
+ , fYChannelSelector(yChannelSelector)
+ , fScale(scale) {
+ this->initClassID<GrDisplacementMapEffect>();
+ this->addCoordTransform(&fDisplacementTransform);
+ this->addTextureAccess(&fDisplacementAccess);
+ this->addCoordTransform(&fColorTransform);
+ this->addTextureAccess(&fColorAccess);
+}
+
+GrDisplacementMapEffect::~GrDisplacementMapEffect() {
+}
+
+bool GrDisplacementMapEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrDisplacementMapEffect& s = sBase.cast<GrDisplacementMapEffect>();
+ return fXChannelSelector == s.fXChannelSelector &&
+ fYChannelSelector == s.fYChannelSelector &&
+ fScale == s.fScale;
+}
+
+void GrDisplacementMapEffect::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ // Any displacement offset bringing a pixel out of bounds will output a color of (0,0,0,0),
+ // so the only way we'd get a constant alpha is if the input color image has a constant alpha
+ // and no displacement offset push any texture coordinates out of bounds OR if the constant
+ // alpha is 0. Since this isn't trivial to compute at this point, let's assume the output is
+ // not of constant color when a displacement effect is applied.
+ inout->setToUnknown(GrInvariantOutput::kWillNot_ReadInput);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrDisplacementMapEffect);
+
+sk_sp<GrFragmentProcessor> GrDisplacementMapEffect::TestCreate(GrProcessorTestData* d) {
+ int texIdxDispl = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx :
+ GrProcessorUnitTest::kAlphaTextureIdx;
+ int texIdxColor = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx :
+ GrProcessorUnitTest::kAlphaTextureIdx;
+ static const int kMaxComponent = 4;
+ SkDisplacementMapEffect::ChannelSelectorType xChannelSelector =
+ static_cast<SkDisplacementMapEffect::ChannelSelectorType>(
+ d->fRandom->nextRangeU(1, kMaxComponent));
+ SkDisplacementMapEffect::ChannelSelectorType yChannelSelector =
+ static_cast<SkDisplacementMapEffect::ChannelSelectorType>(
+ d->fRandom->nextRangeU(1, kMaxComponent));
+ SkVector scale = SkVector::Make(d->fRandom->nextRangeScalar(0, 100.0f),
+ d->fRandom->nextRangeScalar(0, 100.0f));
+ SkISize colorDimensions;
+ colorDimensions.fWidth = d->fRandom->nextRangeU(0, d->fTextures[texIdxColor]->width());
+ colorDimensions.fHeight = d->fRandom->nextRangeU(0, d->fTextures[texIdxColor]->height());
+ return GrDisplacementMapEffect::Make(xChannelSelector, yChannelSelector, scale,
+ d->fTextures[texIdxDispl], SkMatrix::I(),
+ d->fTextures[texIdxColor], colorDimensions);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGLDisplacementMapEffect::emitCode(EmitArgs& args) {
+ const GrDisplacementMapEffect& displacementMap = args.fFp.cast<GrDisplacementMapEffect>();
+ const GrTextureDomain& domain = displacementMap.domain();
+
+ fScaleUni = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType, kDefault_GrSLPrecision, "Scale");
+ const char* scaleUni = args.fUniformHandler->getUniformCStr(fScaleUni);
+ const char* dColor = "dColor";
+ const char* cCoords = "cCoords";
+ const char* nearZero = "1e-6"; // Since 6.10352e−5 is the smallest half float, use
+ // a number smaller than that to approximate 0, but
+ // leave room for 32-bit float GPU rounding errors.
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ fragBuilder->codeAppendf("\t\tvec4 %s = ", dColor);
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], args.fTransformedCoords[0].c_str(),
+ args.fTransformedCoords[0].getType());
+ fragBuilder->codeAppend(";\n");
+
+ // Unpremultiply the displacement
+ fragBuilder->codeAppendf(
+ "\t\t%s.rgb = (%s.a < %s) ? vec3(0.0) : clamp(%s.rgb / %s.a, 0.0, 1.0);",
+ dColor, dColor, nearZero, dColor, dColor);
+ SkString coords2D = fragBuilder->ensureCoords2D(args.fTransformedCoords[1]);
+ fragBuilder->codeAppendf("\t\tvec2 %s = %s + %s*(%s.",
+ cCoords, coords2D.c_str(), scaleUni, dColor);
+
+ switch (displacementMap.xChannelSelector()) {
+ case SkDisplacementMapEffect::kR_ChannelSelectorType:
+ fragBuilder->codeAppend("r");
+ break;
+ case SkDisplacementMapEffect::kG_ChannelSelectorType:
+ fragBuilder->codeAppend("g");
+ break;
+ case SkDisplacementMapEffect::kB_ChannelSelectorType:
+ fragBuilder->codeAppend("b");
+ break;
+ case SkDisplacementMapEffect::kA_ChannelSelectorType:
+ fragBuilder->codeAppend("a");
+ break;
+ case SkDisplacementMapEffect::kUnknown_ChannelSelectorType:
+ default:
+ SkDEBUGFAIL("Unknown X channel selector");
+ }
+
+ switch (displacementMap.yChannelSelector()) {
+ case SkDisplacementMapEffect::kR_ChannelSelectorType:
+ fragBuilder->codeAppend("r");
+ break;
+ case SkDisplacementMapEffect::kG_ChannelSelectorType:
+ fragBuilder->codeAppend("g");
+ break;
+ case SkDisplacementMapEffect::kB_ChannelSelectorType:
+ fragBuilder->codeAppend("b");
+ break;
+ case SkDisplacementMapEffect::kA_ChannelSelectorType:
+ fragBuilder->codeAppend("a");
+ break;
+ case SkDisplacementMapEffect::kUnknown_ChannelSelectorType:
+ default:
+ SkDEBUGFAIL("Unknown Y channel selector");
+ }
+ fragBuilder->codeAppend("-vec2(0.5));\t\t");
+
+ fGLDomain.sampleTexture(fragBuilder,
+ args.fUniformHandler,
+ args.fGLSLCaps,
+ domain,
+ args.fOutputColor,
+ SkString(cCoords),
+ args.fTexSamplers[1]);
+ fragBuilder->codeAppend(";\n");
+}
+
+void GrGLDisplacementMapEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& proc) {
+ const GrDisplacementMapEffect& displacementMap = proc.cast<GrDisplacementMapEffect>();
+ GrTexture* colorTex = displacementMap.texture(1);
+ SkScalar scaleX = displacementMap.scale().fX / colorTex->width();
+ SkScalar scaleY = displacementMap.scale().fY / colorTex->height();
+ pdman.set2f(fScaleUni, SkScalarToFloat(scaleX),
+ colorTex->origin() == kTopLeft_GrSurfaceOrigin ?
+ SkScalarToFloat(scaleY) : SkScalarToFloat(-scaleY));
+ fGLDomain.setData(pdman, displacementMap.domain(), colorTex->origin());
+}
+
+void GrGLDisplacementMapEffect::GenKey(const GrProcessor& proc,
+ const GrGLSLCaps&, GrProcessorKeyBuilder* b) {
+ const GrDisplacementMapEffect& displacementMap = proc.cast<GrDisplacementMapEffect>();
+
+ uint32_t xKey = displacementMap.xChannelSelector();
+ uint32_t yKey = displacementMap.yChannelSelector() << kChannelSelectorKeyBits;
+
+ b->add32(xKey | yKey);
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkDropShadowImageFilter.cpp b/gfx/skia/skia/src/effects/SkDropShadowImageFilter.cpp
new file mode 100644
index 000000000..cc43db7f0
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkDropShadowImageFilter.cpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkDropShadowImageFilter.h"
+
+#include "SkBlurImageFilter.h"
+#include "SkCanvas.h"
+#include "SkReadBuffer.h"
+#include "SkSpecialImage.h"
+#include "SkSpecialSurface.h"
+#include "SkWriteBuffer.h"
+
+sk_sp<SkImageFilter> SkDropShadowImageFilter::Make(SkScalar dx, SkScalar dy,
+ SkScalar sigmaX, SkScalar sigmaY,
+ SkColor color, ShadowMode shadowMode,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect) {
+ return sk_sp<SkImageFilter>(new SkDropShadowImageFilter(dx, dy, sigmaX, sigmaY,
+ color, shadowMode,
+ std::move(input),
+ cropRect));
+}
+
+SkDropShadowImageFilter::SkDropShadowImageFilter(SkScalar dx, SkScalar dy,
+ SkScalar sigmaX, SkScalar sigmaY, SkColor color,
+ ShadowMode shadowMode, sk_sp<SkImageFilter> input,
+ const CropRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fDx(dx)
+ , fDy(dy)
+ , fSigmaX(sigmaX)
+ , fSigmaY(sigmaY)
+ , fColor(color)
+ , fShadowMode(shadowMode) {
+}
+
+sk_sp<SkFlattenable> SkDropShadowImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkScalar dx = buffer.readScalar();
+ SkScalar dy = buffer.readScalar();
+ SkScalar sigmaX = buffer.readScalar();
+ SkScalar sigmaY = buffer.readScalar();
+ SkColor color = buffer.readColor();
+ ShadowMode shadowMode = buffer.isVersionLT(SkReadBuffer::kDropShadowMode_Version) ?
+ kDrawShadowAndForeground_ShadowMode :
+ static_cast<ShadowMode>(buffer.readInt());
+ return Make(dx, dy, sigmaX, sigmaY, color, shadowMode, common.getInput(0), &common.cropRect());
+}
+
+void SkDropShadowImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeScalar(fDx);
+ buffer.writeScalar(fDy);
+ buffer.writeScalar(fSigmaX);
+ buffer.writeScalar(fSigmaY);
+ buffer.writeColor(fColor);
+ buffer.writeInt(static_cast<int>(fShadowMode));
+}
+
+sk_sp<SkSpecialImage> SkDropShadowImageFilter::onFilterImage(SkSpecialImage* source,
+ const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, source, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ const SkIRect inputBounds = SkIRect::MakeXYWH(inputOffset.x(), inputOffset.y(),
+ input->width(), input->height());
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, inputBounds, &bounds)) {
+ return nullptr;
+ }
+
+ sk_sp<SkSpecialSurface> surf(source->makeSurface(ctx.outputProperties(), bounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ canvas->clear(0x0);
+
+ SkVector sigma = SkVector::Make(fSigmaX, fSigmaY);
+ ctx.ctm().mapVectors(&sigma, 1);
+ sigma.fX = SkMaxScalar(0, sigma.fX);
+ sigma.fY = SkMaxScalar(0, sigma.fY);
+
+ SkPaint paint;
+ paint.setImageFilter(SkBlurImageFilter::Make(sigma.fX, sigma.fY, nullptr));
+ paint.setColorFilter(SkColorFilter::MakeModeFilter(fColor, SkXfermode::kSrcIn_Mode));
+
+ SkVector offsetVec = SkVector::Make(fDx, fDy);
+ ctx.ctm().mapVectors(&offsetVec, 1);
+
+ canvas->translate(SkIntToScalar(inputOffset.fX - bounds.fLeft),
+ SkIntToScalar(inputOffset.fY - bounds.fTop));
+ input->draw(canvas, offsetVec.fX, offsetVec.fY, &paint);
+
+ if (fShadowMode == kDrawShadowAndForeground_ShadowMode) {
+ input->draw(canvas, 0, 0, nullptr);
+ }
+ offset->fX = bounds.fLeft;
+ offset->fY = bounds.fTop;
+ return surf->makeImageSnapshot();
+}
+
+SkRect SkDropShadowImageFilter::computeFastBounds(const SkRect& src) const {
+ SkRect bounds = this->getInput(0) ? this->getInput(0)->computeFastBounds(src) : src;
+ SkRect shadowBounds = bounds;
+ shadowBounds.offset(fDx, fDy);
+ shadowBounds.outset(SkScalarMul(fSigmaX, SkIntToScalar(3)),
+ SkScalarMul(fSigmaY, SkIntToScalar(3)));
+ if (fShadowMode == kDrawShadowAndForeground_ShadowMode) {
+ bounds.join(shadowBounds);
+ } else {
+ bounds = shadowBounds;
+ }
+ return bounds;
+}
+
+SkIRect SkDropShadowImageFilter::onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection direction) const {
+ SkVector offsetVec = SkVector::Make(fDx, fDy);
+ if (kReverse_MapDirection == direction) {
+ offsetVec.negate();
+ }
+ ctm.mapVectors(&offsetVec, 1);
+ SkIRect dst = src.makeOffset(SkScalarCeilToInt(offsetVec.x()),
+ SkScalarCeilToInt(offsetVec.y()));
+ SkVector sigma = SkVector::Make(fSigmaX, fSigmaY);
+ ctm.mapVectors(&sigma, 1);
+ dst.outset(
+ SkScalarCeilToInt(SkScalarAbs(SkScalarMul(sigma.x(), SkIntToScalar(3)))),
+ SkScalarCeilToInt(SkScalarAbs(SkScalarMul(sigma.y(), SkIntToScalar(3)))));
+ if (fShadowMode == kDrawShadowAndForeground_ShadowMode) {
+ dst.join(src);
+ }
+ return dst;
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkDropShadowImageFilter::toString(SkString* str) const {
+ str->appendf("SkDropShadowImageFilter: (");
+
+ str->appendf("dX: %f ", fDx);
+ str->appendf("dY: %f ", fDy);
+ str->appendf("sigmaX: %f ", fSigmaX);
+ str->appendf("sigmaY: %f ", fSigmaY);
+
+ str->append("Color: ");
+ str->appendHex(fColor);
+
+ static const char* gModeStrings[] = {
+ "kDrawShadowAndForeground", "kDrawShadowOnly"
+ };
+
+ static_assert(kShadowModeCount == SK_ARRAY_COUNT(gModeStrings), "enum_mismatch");
+
+ str->appendf(" mode: %s", gModeStrings[fShadowMode]);
+
+ str->append(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkEmbossMask.cpp b/gfx/skia/skia/src/effects/SkEmbossMask.cpp
new file mode 100644
index 000000000..7079d65aa
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkEmbossMask.cpp
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkEmbossMask.h"
+#include "SkFixed.h"
+#include "SkMath.h"
+
+static inline int nonzero_to_one(int x) {
+#if 0
+ return x != 0;
+#else
+ return ((unsigned)(x | -x)) >> 31;
+#endif
+}
+
+static inline int neq_to_one(int x, int max) {
+#if 0
+ return x != max;
+#else
+ SkASSERT(x >= 0 && x <= max);
+ return ((unsigned)(x - max)) >> 31;
+#endif
+}
+
+static inline int neq_to_mask(int x, int max) {
+#if 0
+ return -(x != max);
+#else
+ SkASSERT(x >= 0 && x <= max);
+ return (x - max) >> 31;
+#endif
+}
+
+static inline unsigned div255(unsigned x) {
+ SkASSERT(x <= (255*255));
+ return x * ((1 << 24) / 255) >> 24;
+}
+
+#define kDelta 32 // small enough to show off angle differences
+
+#include "SkEmbossMask_Table.h"
+
+#if defined(SK_BUILD_FOR_WIN32) && defined(SK_DEBUG)
+
+#include <stdio.h>
+
+void SkEmbossMask_BuildTable() {
+ // build it 0..127 x 0..127, so we use 2^15 - 1 in the numerator for our "fixed" table
+
+ FILE* file = ::fopen("SkEmbossMask_Table.h", "w");
+ SkASSERT(file);
+ ::fprintf(file, "#include \"SkTypes.h\"\n\n");
+ ::fprintf(file, "static const U16 gInvSqrtTable[128 * 128] = {\n");
+ for (int dx = 0; dx <= 255/2; dx++) {
+ for (int dy = 0; dy <= 255/2; dy++) {
+ if ((dy & 15) == 0)
+ ::fprintf(file, "\t");
+
+ uint16_t value = SkToU16((1 << 15) / SkSqrt32(dx * dx + dy * dy + kDelta*kDelta/4));
+
+ ::fprintf(file, "0x%04X", value);
+ if (dx * 128 + dy < 128*128-1) {
+ ::fprintf(file, ", ");
+ }
+ if ((dy & 15) == 15) {
+ ::fprintf(file, "\n");
+ }
+ }
+ }
+ ::fprintf(file, "};\n#define kDeltaUsedToBuildTable\t%d\n", kDelta);
+ ::fclose(file);
+}
+
+#endif
+
+void SkEmbossMask::Emboss(SkMask* mask, const SkEmbossMaskFilter::Light& light) {
+ SkASSERT(kDelta == kDeltaUsedToBuildTable);
+
+ SkASSERT(mask->fFormat == SkMask::k3D_Format);
+
+ int specular = light.fSpecular;
+ int ambient = light.fAmbient;
+ SkFixed lx = SkScalarToFixed(light.fDirection[0]);
+ SkFixed ly = SkScalarToFixed(light.fDirection[1]);
+ SkFixed lz = SkScalarToFixed(light.fDirection[2]);
+ SkFixed lz_dot_nz = lz * kDelta;
+ int lz_dot8 = lz >> 8;
+
+ size_t planeSize = mask->computeImageSize();
+ uint8_t* alpha = mask->fImage;
+ uint8_t* multiply = (uint8_t*)alpha + planeSize;
+ uint8_t* additive = multiply + planeSize;
+
+ int rowBytes = mask->fRowBytes;
+ int maxy = mask->fBounds.height() - 1;
+ int maxx = mask->fBounds.width() - 1;
+
+ int prev_row = 0;
+ for (int y = 0; y <= maxy; y++) {
+ int next_row = neq_to_mask(y, maxy) & rowBytes;
+
+ for (int x = 0; x <= maxx; x++) {
+ if (alpha[x]) {
+ int nx = alpha[x + neq_to_one(x, maxx)] - alpha[x - nonzero_to_one(x)];
+ int ny = alpha[x + next_row] - alpha[x - prev_row];
+
+ SkFixed numer = lx * nx + ly * ny + lz_dot_nz;
+ int mul = ambient;
+ int add = 0;
+
+ if (numer > 0) { // preflight when numer/denom will be <= 0
+#if 0
+ int denom = SkSqrt32(nx * nx + ny * ny + kDelta*kDelta);
+ SkFixed dot = numer / denom;
+ dot >>= 8; // now dot is 2^8 instead of 2^16
+#else
+ // can use full numer, but then we need to call SkFixedMul, since
+ // numer is 24 bits, and our table is 12 bits
+
+ // SkFixed dot = SkFixedMul(numer, gTable[]) >> 8
+ SkFixed dot = (unsigned)(numer >> 4) * gInvSqrtTable[(SkAbs32(nx) >> 1 << 7) | (SkAbs32(ny) >> 1)] >> 20;
+#endif
+ mul = SkFastMin32(mul + dot, 255);
+
+ // now for the reflection
+
+ // R = 2 (Light * Normal) Normal - Light
+ // hilite = R * Eye(0, 0, 1)
+
+ int hilite = (2 * dot - lz_dot8) * lz_dot8 >> 8;
+ if (hilite > 0) {
+ // pin hilite to 255, since our fast math is also a little sloppy
+ hilite = SkClampMax(hilite, 255);
+
+ // specular is 4.4
+ // would really like to compute the fractional part of this
+ // and then possibly cache a 256 table for a given specular
+ // value in the light, and just pass that in to this function.
+ add = hilite;
+ for (int i = specular >> 4; i > 0; --i) {
+ add = div255(add * hilite);
+ }
+ }
+ }
+ multiply[x] = SkToU8(mul);
+ additive[x] = SkToU8(add);
+
+ // multiply[x] = 0xFF;
+ // additive[x] = 0;
+ // ((uint8_t*)alpha)[x] = alpha[x] * multiply[x] >> 8;
+ }
+ }
+ alpha += rowBytes;
+ multiply += rowBytes;
+ additive += rowBytes;
+ prev_row = rowBytes;
+ }
+}
diff --git a/gfx/skia/skia/src/effects/SkEmbossMask.h b/gfx/skia/skia/src/effects/SkEmbossMask.h
new file mode 100644
index 000000000..80fcb20c3
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkEmbossMask.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkEmbossMask_DEFINED
+#define SkEmbossMask_DEFINED
+
+#include "SkEmbossMaskFilter.h"
+
+class SkEmbossMask {
+public:
+ static void Emboss(SkMask* mask, const SkEmbossMaskFilter::Light&);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/SkEmbossMaskFilter.cpp b/gfx/skia/skia/src/effects/SkEmbossMaskFilter.cpp
new file mode 100644
index 000000000..5f3952d1a
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkEmbossMaskFilter.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkEmbossMaskFilter.h"
+#include "SkBlurMaskFilter.h"
+#include "SkBlurMask.h"
+#include "SkEmbossMask.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+#include "SkString.h"
+
+sk_sp<SkMaskFilter> SkEmbossMaskFilter::Make(SkScalar blurSigma, const Light& light) {
+ return sk_sp<SkMaskFilter>(new SkEmbossMaskFilter(blurSigma, light));
+}
+
+#ifdef SK_SUPPORT_LEGACY_MASKFILTER_PTR
+SkMaskFilter* SkBlurMaskFilter::CreateEmboss(const SkScalar direction[3],
+ SkScalar ambient, SkScalar specular,
+ SkScalar blurRadius) {
+ return SkBlurMaskFilter::CreateEmboss(SkBlurMask::ConvertRadiusToSigma(blurRadius),
+ direction, ambient, specular);
+}
+#endif
+
+sk_sp<SkMaskFilter> SkBlurMaskFilter::MakeEmboss(SkScalar blurSigma, const SkScalar direction[3],
+ SkScalar ambient, SkScalar specular) {
+ if (direction == nullptr) {
+ return nullptr;
+ }
+
+ SkEmbossMaskFilter::Light light;
+
+ memcpy(light.fDirection, direction, sizeof(light.fDirection));
+ // ambient should be 0...1 as a scalar
+ light.fAmbient = SkUnitScalarClampToByte(ambient);
+ // specular should be 0..15.99 as a scalar
+ static const SkScalar kSpecularMultiplier = SkIntToScalar(255) / 16;
+ light.fSpecular = static_cast<U8CPU>(SkScalarPin(specular, 0, 16) * kSpecularMultiplier + 0.5);
+
+ return SkEmbossMaskFilter::Make(blurSigma, light);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void normalize(SkScalar v[3]) {
+ SkScalar mag = SkScalarSquare(v[0]) + SkScalarSquare(v[1]) + SkScalarSquare(v[2]);
+ mag = SkScalarSqrt(mag);
+
+ for (int i = 0; i < 3; i++) {
+ v[i] /= mag;
+ }
+}
+
+SkEmbossMaskFilter::SkEmbossMaskFilter(SkScalar blurSigma, const Light& light)
+ : fLight(light), fBlurSigma(blurSigma) {
+ normalize(fLight.fDirection);
+}
+
+SkMask::Format SkEmbossMaskFilter::getFormat() const {
+ return SkMask::k3D_Format;
+}
+
+bool SkEmbossMaskFilter::filterMask(SkMask* dst, const SkMask& src,
+ const SkMatrix& matrix, SkIPoint* margin) const {
+ SkScalar sigma = matrix.mapRadius(fBlurSigma);
+
+ if (!SkBlurMask::BoxBlur(dst, src, sigma, kInner_SkBlurStyle, kLow_SkBlurQuality)) {
+ return false;
+ }
+
+ dst->fFormat = SkMask::k3D_Format;
+ if (margin) {
+ margin->set(SkScalarCeilToInt(3*sigma), SkScalarCeilToInt(3*sigma));
+ }
+
+ if (src.fImage == nullptr) {
+ return true;
+ }
+
+ // create a larger buffer for the other two channels (should force fBlur to do this for us)
+
+ {
+ uint8_t* alphaPlane = dst->fImage;
+ size_t planeSize = dst->computeImageSize();
+ if (0 == planeSize) {
+ return false; // too big to allocate, abort
+ }
+ dst->fImage = SkMask::AllocImage(planeSize * 3);
+ memcpy(dst->fImage, alphaPlane, planeSize);
+ SkMask::FreeImage(alphaPlane);
+ }
+
+ // run the light direction through the matrix...
+ Light light = fLight;
+ matrix.mapVectors((SkVector*)(void*)light.fDirection,
+ (SkVector*)(void*)fLight.fDirection, 1);
+
+ // now restore the length of the XY component
+ // cast to SkVector so we can call setLength (this double cast silences alias warnings)
+ SkVector* vec = (SkVector*)(void*)light.fDirection;
+ vec->setLength(light.fDirection[0],
+ light.fDirection[1],
+ SkPoint::Length(fLight.fDirection[0], fLight.fDirection[1]));
+
+ SkEmbossMask::Emboss(dst, light);
+
+ // restore original alpha
+ memcpy(dst->fImage, src.fImage, src.computeImageSize());
+
+ return true;
+}
+
+sk_sp<SkFlattenable> SkEmbossMaskFilter::CreateProc(SkReadBuffer& buffer) {
+ Light light;
+ if (buffer.readByteArray(&light, sizeof(Light))) {
+ light.fPad = 0; // for the font-cache lookup to be clean
+ const SkScalar sigma = buffer.readScalar();
+ return Make(sigma, light);
+ }
+ return nullptr;
+}
+
+void SkEmbossMaskFilter::flatten(SkWriteBuffer& buffer) const {
+ Light tmpLight = fLight;
+ tmpLight.fPad = 0; // for the font-cache lookup to be clean
+ buffer.writeByteArray(&tmpLight, sizeof(tmpLight));
+ buffer.writeScalar(fBlurSigma);
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkEmbossMaskFilter::toString(SkString* str) const {
+ str->append("SkEmbossMaskFilter: (");
+
+ str->append("direction: (");
+ str->appendScalar(fLight.fDirection[0]);
+ str->append(", ");
+ str->appendScalar(fLight.fDirection[1]);
+ str->append(", ");
+ str->appendScalar(fLight.fDirection[2]);
+ str->append(") ");
+
+ str->appendf("ambient: %d specular: %d ",
+ fLight.fAmbient, fLight.fSpecular);
+
+ str->append("blurSigma: ");
+ str->appendScalar(fBlurSigma);
+ str->append(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkEmbossMask_Table.h b/gfx/skia/skia/src/effects/SkEmbossMask_Table.h
new file mode 100644
index 000000000..b7073f302
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkEmbossMask_Table.h
@@ -0,0 +1,1037 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkTypes.h"
+
+static const uint16_t gInvSqrtTable[128 * 128] = {
+ 0x0800, 0x0800, 0x0800, 0x0800, 0x0800, 0x0800, 0x0787, 0x0787, 0x0787, 0x071C, 0x071C, 0x06BC, 0x0666, 0x0666, 0x0618, 0x0618,
+ 0x05D1, 0x0590, 0x0555, 0x0555, 0x051E, 0x04EC, 0x04BD, 0x0492, 0x0492, 0x0469, 0x0444, 0x0421, 0x0400, 0x03E0, 0x03C3, 0x03C3,
+ 0x03A8, 0x038E, 0x0375, 0x035E, 0x0348, 0x0333, 0x031F, 0x030C, 0x02FA, 0x02E8, 0x02E8, 0x02D8, 0x02C8, 0x02B9, 0x02AA, 0x029C,
+ 0x028F, 0x0282, 0x0276, 0x026A, 0x025E, 0x0253, 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8,
+ 0x01F8, 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199,
+ 0x0194, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155,
+ 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124,
+ 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100,
+ 0x0800, 0x0800, 0x0800, 0x0800, 0x0800, 0x0800, 0x0787, 0x0787, 0x0787, 0x071C, 0x071C, 0x06BC, 0x0666, 0x0666, 0x0618, 0x0618,
+ 0x05D1, 0x0590, 0x0555, 0x0555, 0x051E, 0x04EC, 0x04BD, 0x0492, 0x0492, 0x0469, 0x0444, 0x0421, 0x0400, 0x03E0, 0x03C3, 0x03C3,
+ 0x03A8, 0x038E, 0x0375, 0x035E, 0x0348, 0x0333, 0x031F, 0x030C, 0x02FA, 0x02E8, 0x02E8, 0x02D8, 0x02C8, 0x02B9, 0x02AA, 0x029C,
+ 0x028F, 0x0282, 0x0276, 0x026A, 0x025E, 0x0253, 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8,
+ 0x01F8, 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199,
+ 0x0194, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155,
+ 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124,
+ 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100,
+ 0x0800, 0x0800, 0x0800, 0x0800, 0x0800, 0x0800, 0x0787, 0x0787, 0x071C, 0x071C, 0x071C, 0x06BC, 0x0666, 0x0666, 0x0618, 0x05D1,
+ 0x05D1, 0x0590, 0x0555, 0x0555, 0x051E, 0x04EC, 0x04BD, 0x0492, 0x0492, 0x0469, 0x0444, 0x0421, 0x0400, 0x03E0, 0x03C3, 0x03C3,
+ 0x03A8, 0x038E, 0x0375, 0x035E, 0x0348, 0x0333, 0x031F, 0x030C, 0x02FA, 0x02E8, 0x02E8, 0x02D8, 0x02C8, 0x02B9, 0x02AA, 0x029C,
+ 0x028F, 0x0282, 0x0276, 0x026A, 0x025E, 0x0253, 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8,
+ 0x01F0, 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199,
+ 0x0194, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155,
+ 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124,
+ 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100,
+ 0x0800, 0x0800, 0x0800, 0x0800, 0x0800, 0x0787, 0x0787, 0x0787, 0x071C, 0x071C, 0x06BC, 0x06BC, 0x0666, 0x0666, 0x0618, 0x05D1,
+ 0x05D1, 0x0590, 0x0555, 0x051E, 0x051E, 0x04EC, 0x04BD, 0x0492, 0x0469, 0x0469, 0x0444, 0x0421, 0x0400, 0x03E0, 0x03C3, 0x03A8,
+ 0x03A8, 0x038E, 0x0375, 0x035E, 0x0348, 0x0333, 0x031F, 0x030C, 0x02FA, 0x02E8, 0x02D8, 0x02D8, 0x02C8, 0x02B9, 0x02AA, 0x029C,
+ 0x028F, 0x0282, 0x0276, 0x026A, 0x025E, 0x0253, 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8,
+ 0x01F0, 0x01E9, 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199,
+ 0x0194, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155,
+ 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124,
+ 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100,
+ 0x0800, 0x0800, 0x0800, 0x0800, 0x0800, 0x0787, 0x0787, 0x0787, 0x071C, 0x071C, 0x06BC, 0x06BC, 0x0666, 0x0618, 0x0618, 0x05D1,
+ 0x05D1, 0x0590, 0x0555, 0x051E, 0x051E, 0x04EC, 0x04BD, 0x0492, 0x0469, 0x0469, 0x0444, 0x0421, 0x0400, 0x03E0, 0x03C3, 0x03A8,
+ 0x038E, 0x038E, 0x0375, 0x035E, 0x0348, 0x0333, 0x031F, 0x030C, 0x02FA, 0x02E8, 0x02D8, 0x02C8, 0x02C8, 0x02B9, 0x02AA, 0x029C,
+ 0x028F, 0x0282, 0x0276, 0x026A, 0x025E, 0x0253, 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8,
+ 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199,
+ 0x0194, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155,
+ 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124,
+ 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100,
+ 0x0800, 0x0800, 0x0800, 0x0787, 0x0787, 0x0787, 0x0787, 0x071C, 0x071C, 0x06BC, 0x06BC, 0x0666, 0x0666, 0x0618, 0x0618, 0x05D1,
+ 0x0590, 0x0590, 0x0555, 0x051E, 0x04EC, 0x04EC, 0x04BD, 0x0492, 0x0469, 0x0444, 0x0444, 0x0421, 0x0400, 0x03E0, 0x03C3, 0x03A8,
+ 0x038E, 0x0375, 0x0375, 0x035E, 0x0348, 0x0333, 0x031F, 0x030C, 0x02FA, 0x02E8, 0x02D8, 0x02C8, 0x02B9, 0x02AA, 0x02AA, 0x029C,
+ 0x028F, 0x0282, 0x0276, 0x026A, 0x025E, 0x0253, 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8,
+ 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199,
+ 0x0194, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155,
+ 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124,
+ 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100,
+ 0x0787, 0x0787, 0x0787, 0x0787, 0x0787, 0x0787, 0x071C, 0x071C, 0x071C, 0x06BC, 0x06BC, 0x0666, 0x0666, 0x0618, 0x05D1, 0x05D1,
+ 0x0590, 0x0555, 0x0555, 0x051E, 0x04EC, 0x04BD, 0x04BD, 0x0492, 0x0469, 0x0444, 0x0421, 0x0421, 0x0400, 0x03E0, 0x03C3, 0x03A8,
+ 0x038E, 0x0375, 0x035E, 0x035E, 0x0348, 0x0333, 0x031F, 0x030C, 0x02FA, 0x02E8, 0x02D8, 0x02C8, 0x02B9, 0x02AA, 0x029C, 0x028F,
+ 0x028F, 0x0282, 0x0276, 0x026A, 0x025E, 0x0253, 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8,
+ 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199,
+ 0x0194, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155,
+ 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124,
+ 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100,
+ 0x0787, 0x0787, 0x0787, 0x0787, 0x0787, 0x071C, 0x071C, 0x071C, 0x06BC, 0x06BC, 0x0666, 0x0666, 0x0618, 0x0618, 0x05D1, 0x0590,
+ 0x0590, 0x0555, 0x051E, 0x051E, 0x04EC, 0x04BD, 0x0492, 0x0492, 0x0469, 0x0444, 0x0421, 0x0400, 0x03E0, 0x03E0, 0x03C3, 0x03A8,
+ 0x038E, 0x0375, 0x035E, 0x0348, 0x0333, 0x0333, 0x031F, 0x030C, 0x02FA, 0x02E8, 0x02D8, 0x02C8, 0x02B9, 0x02AA, 0x029C, 0x028F,
+ 0x0282, 0x0276, 0x0276, 0x026A, 0x025E, 0x0253, 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8,
+ 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A9, 0x01A4, 0x019E, 0x0199,
+ 0x0194, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155,
+ 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124,
+ 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100,
+ 0x0787, 0x0787, 0x071C, 0x071C, 0x071C, 0x071C, 0x071C, 0x06BC, 0x06BC, 0x0666, 0x0666, 0x0618, 0x0618, 0x05D1, 0x05D1, 0x0590,
+ 0x0555, 0x0555, 0x051E, 0x04EC, 0x04EC, 0x04BD, 0x0492, 0x0469, 0x0469, 0x0444, 0x0421, 0x0400, 0x03E0, 0x03C3, 0x03C3, 0x03A8,
+ 0x038E, 0x0375, 0x035E, 0x0348, 0x0333, 0x031F, 0x030C, 0x030C, 0x02FA, 0x02E8, 0x02D8, 0x02C8, 0x02B9, 0x02AA, 0x029C, 0x028F,
+ 0x0282, 0x0276, 0x026A, 0x025E, 0x025E, 0x0253, 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8,
+ 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194,
+ 0x0194, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155,
+ 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124,
+ 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100,
+ 0x071C, 0x071C, 0x071C, 0x071C, 0x071C, 0x06BC, 0x06BC, 0x06BC, 0x0666, 0x0666, 0x0666, 0x0618, 0x0618, 0x05D1, 0x0590, 0x0590,
+ 0x0555, 0x051E, 0x051E, 0x04EC, 0x04BD, 0x04BD, 0x0492, 0x0469, 0x0444, 0x0421, 0x0421, 0x0400, 0x03E0, 0x03C3, 0x03A8, 0x038E,
+ 0x038E, 0x0375, 0x035E, 0x0348, 0x0333, 0x031F, 0x030C, 0x02FA, 0x02E8, 0x02E8, 0x02D8, 0x02C8, 0x02B9, 0x02AA, 0x029C, 0x028F,
+ 0x0282, 0x0276, 0x026A, 0x025E, 0x0253, 0x0249, 0x023E, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8,
+ 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194,
+ 0x018F, 0x018A, 0x0186, 0x0181, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155,
+ 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124,
+ 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100,
+ 0x071C, 0x071C, 0x071C, 0x06BC, 0x06BC, 0x06BC, 0x06BC, 0x0666, 0x0666, 0x0666, 0x0618, 0x0618, 0x05D1, 0x05D1, 0x0590, 0x0555,
+ 0x0555, 0x051E, 0x04EC, 0x04EC, 0x04BD, 0x0492, 0x0492, 0x0469, 0x0444, 0x0421, 0x0400, 0x0400, 0x03E0, 0x03C3, 0x03A8, 0x038E,
+ 0x0375, 0x035E, 0x035E, 0x0348, 0x0333, 0x031F, 0x030C, 0x02FA, 0x02E8, 0x02D8, 0x02C8, 0x02C8, 0x02B9, 0x02AA, 0x029C, 0x028F,
+ 0x0282, 0x0276, 0x026A, 0x025E, 0x0253, 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8,
+ 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194,
+ 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155,
+ 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124,
+ 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100,
+ 0x06BC, 0x06BC, 0x06BC, 0x06BC, 0x06BC, 0x0666, 0x0666, 0x0666, 0x0618, 0x0618, 0x0618, 0x05D1, 0x05D1, 0x0590, 0x0590, 0x0555,
+ 0x051E, 0x051E, 0x04EC, 0x04BD, 0x04BD, 0x0492, 0x0469, 0x0444, 0x0444, 0x0421, 0x0400, 0x03E0, 0x03C3, 0x03C3, 0x03A8, 0x038E,
+ 0x0375, 0x035E, 0x0348, 0x0333, 0x0333, 0x031F, 0x030C, 0x02FA, 0x02E8, 0x02D8, 0x02C8, 0x02B9, 0x02AA, 0x029C, 0x029C, 0x028F,
+ 0x0282, 0x0276, 0x026A, 0x025E, 0x0253, 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x0200, 0x01F8,
+ 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194,
+ 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0158, 0x0155,
+ 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124,
+ 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100,
+ 0x0666, 0x0666, 0x0666, 0x0666, 0x0666, 0x0666, 0x0666, 0x0618, 0x0618, 0x0618, 0x05D1, 0x05D1, 0x0590, 0x0590, 0x0555, 0x051E,
+ 0x051E, 0x04EC, 0x04EC, 0x04BD, 0x0492, 0x0469, 0x0469, 0x0444, 0x0421, 0x0400, 0x0400, 0x03E0, 0x03C3, 0x03A8, 0x038E, 0x038E,
+ 0x0375, 0x035E, 0x0348, 0x0333, 0x031F, 0x030C, 0x030C, 0x02FA, 0x02E8, 0x02D8, 0x02C8, 0x02B9, 0x02AA, 0x029C, 0x028F, 0x0282,
+ 0x0276, 0x0276, 0x026A, 0x025E, 0x0253, 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F0,
+ 0x01E9, 0x01E1, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194,
+ 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151,
+ 0x014E, 0x014A, 0x0147, 0x0144, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124,
+ 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100,
+ 0x0666, 0x0666, 0x0666, 0x0666, 0x0618, 0x0618, 0x0618, 0x0618, 0x05D1, 0x05D1, 0x05D1, 0x0590, 0x0590, 0x0555, 0x0555, 0x051E,
+ 0x04EC, 0x04EC, 0x04BD, 0x0492, 0x0492, 0x0469, 0x0444, 0x0444, 0x0421, 0x0400, 0x03E0, 0x03E0, 0x03C3, 0x03A8, 0x038E, 0x0375,
+ 0x035E, 0x035E, 0x0348, 0x0333, 0x031F, 0x030C, 0x02FA, 0x02E8, 0x02D8, 0x02D8, 0x02C8, 0x02B9, 0x02AA, 0x029C, 0x028F, 0x0282,
+ 0x0276, 0x026A, 0x025E, 0x0253, 0x0253, 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F0,
+ 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194,
+ 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151,
+ 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124,
+ 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100,
+ 0x0618, 0x0618, 0x0618, 0x0618, 0x0618, 0x0618, 0x05D1, 0x05D1, 0x05D1, 0x0590, 0x0590, 0x0590, 0x0555, 0x0555, 0x051E, 0x04EC,
+ 0x04EC, 0x04BD, 0x04BD, 0x0492, 0x0469, 0x0469, 0x0444, 0x0421, 0x0400, 0x0400, 0x03E0, 0x03C3, 0x03A8, 0x03A8, 0x038E, 0x0375,
+ 0x035E, 0x0348, 0x0333, 0x0333, 0x031F, 0x030C, 0x02FA, 0x02E8, 0x02D8, 0x02C8, 0x02B9, 0x02B9, 0x02AA, 0x029C, 0x028F, 0x0282,
+ 0x0276, 0x026A, 0x025E, 0x0253, 0x0249, 0x023E, 0x0234, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F0,
+ 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194,
+ 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151,
+ 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121,
+ 0x011F, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100,
+ 0x0618, 0x0618, 0x05D1, 0x05D1, 0x05D1, 0x05D1, 0x05D1, 0x0590, 0x0590, 0x0590, 0x0555, 0x0555, 0x051E, 0x051E, 0x04EC, 0x04EC,
+ 0x04BD, 0x04BD, 0x0492, 0x0469, 0x0469, 0x0444, 0x0421, 0x0421, 0x0400, 0x03E0, 0x03C3, 0x03C3, 0x03A8, 0x038E, 0x0375, 0x0375,
+ 0x035E, 0x0348, 0x0333, 0x031F, 0x030C, 0x02FA, 0x02FA, 0x02E8, 0x02D8, 0x02C8, 0x02B9, 0x02AA, 0x029C, 0x028F, 0x028F, 0x0282,
+ 0x0276, 0x026A, 0x025E, 0x0253, 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F0,
+ 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194, 0x0194,
+ 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151,
+ 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121,
+ 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100,
+ 0x05D1, 0x05D1, 0x05D1, 0x05D1, 0x05D1, 0x0590, 0x0590, 0x0590, 0x0555, 0x0555, 0x0555, 0x051E, 0x051E, 0x04EC, 0x04EC, 0x04BD,
+ 0x04BD, 0x0492, 0x0492, 0x0469, 0x0444, 0x0444, 0x0421, 0x0400, 0x0400, 0x03E0, 0x03C3, 0x03A8, 0x038E, 0x038E, 0x0375, 0x035E,
+ 0x0348, 0x0333, 0x0333, 0x031F, 0x030C, 0x02FA, 0x02E8, 0x02D8, 0x02D8, 0x02C8, 0x02B9, 0x02AA, 0x029C, 0x028F, 0x0282, 0x0276,
+ 0x026A, 0x026A, 0x025E, 0x0253, 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F0, 0x01F0,
+ 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194, 0x018F,
+ 0x018A, 0x0186, 0x0181, 0x017D, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151,
+ 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121,
+ 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE,
+ 0x0590, 0x0590, 0x0590, 0x0590, 0x0590, 0x0590, 0x0555, 0x0555, 0x0555, 0x051E, 0x051E, 0x051E, 0x04EC, 0x04EC, 0x04BD, 0x04BD,
+ 0x0492, 0x0492, 0x0469, 0x0444, 0x0444, 0x0421, 0x0400, 0x0400, 0x03E0, 0x03C3, 0x03C3, 0x03A8, 0x038E, 0x0375, 0x035E, 0x035E,
+ 0x0348, 0x0333, 0x031F, 0x030C, 0x030C, 0x02FA, 0x02E8, 0x02D8, 0x02C8, 0x02B9, 0x02AA, 0x02AA, 0x029C, 0x028F, 0x0282, 0x0276,
+ 0x026A, 0x025E, 0x0253, 0x0249, 0x023E, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F0, 0x01E9,
+ 0x01E1, 0x01DA, 0x01D4, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194, 0x018F,
+ 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151,
+ 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121,
+ 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE,
+ 0x0555, 0x0555, 0x0555, 0x0555, 0x0555, 0x0555, 0x0555, 0x051E, 0x051E, 0x051E, 0x04EC, 0x04EC, 0x04EC, 0x04BD, 0x04BD, 0x0492,
+ 0x0492, 0x0469, 0x0444, 0x0444, 0x0421, 0x0421, 0x0400, 0x03E0, 0x03C3, 0x03C3, 0x03A8, 0x038E, 0x038E, 0x0375, 0x035E, 0x0348,
+ 0x0333, 0x0333, 0x031F, 0x030C, 0x02FA, 0x02E8, 0x02E8, 0x02D8, 0x02C8, 0x02B9, 0x02AA, 0x029C, 0x028F, 0x0282, 0x0282, 0x0276,
+ 0x026A, 0x025E, 0x0253, 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F0, 0x01E9,
+ 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194, 0x018F,
+ 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E,
+ 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121,
+ 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE,
+ 0x0555, 0x0555, 0x0555, 0x051E, 0x051E, 0x051E, 0x051E, 0x051E, 0x04EC, 0x04EC, 0x04EC, 0x04BD, 0x04BD, 0x0492, 0x0492, 0x0469,
+ 0x0469, 0x0444, 0x0444, 0x0421, 0x0421, 0x0400, 0x03E0, 0x03E0, 0x03C3, 0x03A8, 0x03A8, 0x038E, 0x0375, 0x035E, 0x035E, 0x0348,
+ 0x0333, 0x031F, 0x030C, 0x030C, 0x02FA, 0x02E8, 0x02D8, 0x02C8, 0x02B9, 0x02B9, 0x02AA, 0x029C, 0x028F, 0x0282, 0x0276, 0x026A,
+ 0x025E, 0x025E, 0x0253, 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x0200, 0x01F8, 0x01F0, 0x01E9,
+ 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x019E, 0x0199, 0x0194, 0x018F,
+ 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E,
+ 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121,
+ 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE,
+ 0x051E, 0x051E, 0x051E, 0x051E, 0x051E, 0x04EC, 0x04EC, 0x04EC, 0x04EC, 0x04BD, 0x04BD, 0x04BD, 0x0492, 0x0492, 0x0469, 0x0469,
+ 0x0444, 0x0444, 0x0421, 0x0421, 0x0400, 0x03E0, 0x03E0, 0x03C3, 0x03A8, 0x03A8, 0x038E, 0x0375, 0x0375, 0x035E, 0x0348, 0x0333,
+ 0x0333, 0x031F, 0x030C, 0x02FA, 0x02E8, 0x02D8, 0x02D8, 0x02C8, 0x02B9, 0x02AA, 0x029C, 0x028F, 0x028F, 0x0282, 0x0276, 0x026A,
+ 0x025E, 0x0253, 0x0249, 0x023E, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F0, 0x01E9, 0x01E1,
+ 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194, 0x018F, 0x018A,
+ 0x0186, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E,
+ 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0129, 0x0127, 0x0124, 0x0121,
+ 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE,
+ 0x04EC, 0x04EC, 0x04EC, 0x04EC, 0x04EC, 0x04EC, 0x04BD, 0x04BD, 0x04BD, 0x04BD, 0x0492, 0x0492, 0x0469, 0x0469, 0x0469, 0x0444,
+ 0x0444, 0x0421, 0x0421, 0x0400, 0x03E0, 0x03E0, 0x03C3, 0x03A8, 0x03A8, 0x038E, 0x0375, 0x0375, 0x035E, 0x0348, 0x0348, 0x0333,
+ 0x031F, 0x030C, 0x02FA, 0x02FA, 0x02E8, 0x02D8, 0x02C8, 0x02B9, 0x02B9, 0x02AA, 0x029C, 0x028F, 0x0282, 0x0276, 0x026A, 0x026A,
+ 0x025E, 0x0253, 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F0, 0x01E9, 0x01E1,
+ 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194, 0x018F, 0x018A,
+ 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E,
+ 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F,
+ 0x011C, 0x011A, 0x0118, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE,
+ 0x04BD, 0x04BD, 0x04BD, 0x04BD, 0x04BD, 0x04BD, 0x04BD, 0x0492, 0x0492, 0x0492, 0x0492, 0x0469, 0x0469, 0x0444, 0x0444, 0x0421,
+ 0x0421, 0x0400, 0x0400, 0x03E0, 0x03E0, 0x03C3, 0x03C3, 0x03A8, 0x038E, 0x038E, 0x0375, 0x035E, 0x0348, 0x0348, 0x0333, 0x031F,
+ 0x030C, 0x030C, 0x02FA, 0x02E8, 0x02D8, 0x02D8, 0x02C8, 0x02B9, 0x02AA, 0x029C, 0x028F, 0x028F, 0x0282, 0x0276, 0x026A, 0x025E,
+ 0x0253, 0x0249, 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x0200, 0x01F8, 0x01F0, 0x01E9, 0x01E1,
+ 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194, 0x018F, 0x018A,
+ 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E,
+ 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F,
+ 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE,
+ 0x0492, 0x0492, 0x0492, 0x0492, 0x0492, 0x0492, 0x0492, 0x0492, 0x0469, 0x0469, 0x0469, 0x0444, 0x0444, 0x0444, 0x0421, 0x0421,
+ 0x0400, 0x0400, 0x03E0, 0x03E0, 0x03C3, 0x03A8, 0x03A8, 0x038E, 0x038E, 0x0375, 0x035E, 0x035E, 0x0348, 0x0333, 0x031F, 0x031F,
+ 0x030C, 0x02FA, 0x02E8, 0x02E8, 0x02D8, 0x02C8, 0x02B9, 0x02AA, 0x02AA, 0x029C, 0x028F, 0x0282, 0x0276, 0x026A, 0x026A, 0x025E,
+ 0x0253, 0x0249, 0x023E, 0x0234, 0x022B, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F0, 0x01E9, 0x01E1, 0x01E1,
+ 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194, 0x0194, 0x018F, 0x018A,
+ 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E, 0x014A,
+ 0x0147, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F,
+ 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC,
+ 0x0492, 0x0492, 0x0492, 0x0469, 0x0469, 0x0469, 0x0469, 0x0469, 0x0469, 0x0444, 0x0444, 0x0444, 0x0421, 0x0421, 0x0400, 0x0400,
+ 0x0400, 0x03E0, 0x03C3, 0x03C3, 0x03A8, 0x03A8, 0x038E, 0x038E, 0x0375, 0x035E, 0x035E, 0x0348, 0x0333, 0x0333, 0x031F, 0x030C,
+ 0x02FA, 0x02FA, 0x02E8, 0x02D8, 0x02C8, 0x02C8, 0x02B9, 0x02AA, 0x029C, 0x028F, 0x028F, 0x0282, 0x0276, 0x026A, 0x025E, 0x0253,
+ 0x0249, 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F0, 0x01E9, 0x01E1, 0x01DA,
+ 0x01D4, 0x01CD, 0x01C7, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194, 0x018F, 0x018A, 0x0186,
+ 0x0181, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E, 0x014A,
+ 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F,
+ 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC,
+ 0x0469, 0x0469, 0x0469, 0x0469, 0x0469, 0x0444, 0x0444, 0x0444, 0x0444, 0x0421, 0x0421, 0x0421, 0x0400, 0x0400, 0x0400, 0x03E0,
+ 0x03E0, 0x03C3, 0x03C3, 0x03A8, 0x03A8, 0x038E, 0x038E, 0x0375, 0x035E, 0x035E, 0x0348, 0x0333, 0x0333, 0x031F, 0x030C, 0x030C,
+ 0x02FA, 0x02E8, 0x02D8, 0x02D8, 0x02C8, 0x02B9, 0x02AA, 0x029C, 0x029C, 0x028F, 0x0282, 0x0276, 0x026A, 0x026A, 0x025E, 0x0253,
+ 0x0249, 0x023E, 0x0234, 0x022B, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F0, 0x01F0, 0x01E9, 0x01E1, 0x01DA,
+ 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194, 0x018F, 0x018A, 0x0186,
+ 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E, 0x014A,
+ 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0124, 0x0121, 0x011F,
+ 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC,
+ 0x0444, 0x0444, 0x0444, 0x0444, 0x0444, 0x0444, 0x0421, 0x0421, 0x0421, 0x0421, 0x0400, 0x0400, 0x0400, 0x03E0, 0x03E0, 0x03C3,
+ 0x03C3, 0x03C3, 0x03A8, 0x03A8, 0x038E, 0x0375, 0x0375, 0x035E, 0x035E, 0x0348, 0x0333, 0x0333, 0x031F, 0x030C, 0x030C, 0x02FA,
+ 0x02E8, 0x02E8, 0x02D8, 0x02C8, 0x02B9, 0x02B9, 0x02AA, 0x029C, 0x028F, 0x0282, 0x0282, 0x0276, 0x026A, 0x025E, 0x0253, 0x0249,
+ 0x0249, 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01D4,
+ 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0199, 0x0194, 0x018F, 0x018A, 0x0186,
+ 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0158, 0x0155, 0x0151, 0x014E, 0x014A,
+ 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C,
+ 0x011A, 0x0118, 0x0115, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC,
+ 0x0421, 0x0421, 0x0421, 0x0421, 0x0421, 0x0421, 0x0421, 0x0400, 0x0400, 0x0400, 0x0400, 0x03E0, 0x03E0, 0x03E0, 0x03C3, 0x03C3,
+ 0x03A8, 0x03A8, 0x038E, 0x038E, 0x0375, 0x0375, 0x035E, 0x035E, 0x0348, 0x0333, 0x0333, 0x031F, 0x030C, 0x030C, 0x02FA, 0x02E8,
+ 0x02E8, 0x02D8, 0x02C8, 0x02B9, 0x02B9, 0x02AA, 0x029C, 0x028F, 0x028F, 0x0282, 0x0276, 0x026A, 0x025E, 0x025E, 0x0253, 0x0249,
+ 0x023E, 0x0234, 0x022B, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F8, 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01D4,
+ 0x01CD, 0x01C7, 0x01C0, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194, 0x018F, 0x018A, 0x0186, 0x0181,
+ 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E, 0x014A, 0x0147,
+ 0x0144, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C,
+ 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC,
+ 0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x03E0, 0x03E0, 0x03E0, 0x03E0, 0x03C3, 0x03C3, 0x03C3, 0x03A8, 0x03A8,
+ 0x038E, 0x038E, 0x038E, 0x0375, 0x0375, 0x035E, 0x0348, 0x0348, 0x0333, 0x0333, 0x031F, 0x030C, 0x030C, 0x02FA, 0x02E8, 0x02E8,
+ 0x02D8, 0x02C8, 0x02C8, 0x02B9, 0x02AA, 0x029C, 0x029C, 0x028F, 0x0282, 0x0276, 0x0276, 0x026A, 0x025E, 0x0253, 0x0249, 0x023E,
+ 0x023E, 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01DA, 0x01D4,
+ 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194, 0x018F, 0x018A, 0x0186, 0x0181,
+ 0x017D, 0x0178, 0x0174, 0x0170, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E, 0x014A, 0x0147,
+ 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C,
+ 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA,
+ 0x03E0, 0x03E0, 0x03E0, 0x03E0, 0x03E0, 0x03E0, 0x03E0, 0x03E0, 0x03C3, 0x03C3, 0x03C3, 0x03C3, 0x03A8, 0x03A8, 0x03A8, 0x038E,
+ 0x038E, 0x0375, 0x0375, 0x035E, 0x035E, 0x0348, 0x0348, 0x0333, 0x0333, 0x031F, 0x030C, 0x030C, 0x02FA, 0x02E8, 0x02E8, 0x02D8,
+ 0x02C8, 0x02C8, 0x02B9, 0x02AA, 0x02AA, 0x029C, 0x028F, 0x0282, 0x0282, 0x0276, 0x026A, 0x025E, 0x0253, 0x0253, 0x0249, 0x023E,
+ 0x0234, 0x022B, 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F8, 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD,
+ 0x01C7, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194, 0x0194, 0x018F, 0x018A, 0x0186, 0x0181,
+ 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E, 0x014A, 0x0147,
+ 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0124, 0x0121, 0x011F, 0x011C,
+ 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA,
+ 0x03C3, 0x03C3, 0x03C3, 0x03C3, 0x03C3, 0x03C3, 0x03C3, 0x03C3, 0x03C3, 0x03A8, 0x03A8, 0x03A8, 0x038E, 0x038E, 0x038E, 0x0375,
+ 0x0375, 0x035E, 0x035E, 0x035E, 0x0348, 0x0348, 0x0333, 0x031F, 0x031F, 0x030C, 0x030C, 0x02FA, 0x02E8, 0x02E8, 0x02D8, 0x02C8,
+ 0x02C8, 0x02B9, 0x02AA, 0x02AA, 0x029C, 0x028F, 0x028F, 0x0282, 0x0276, 0x026A, 0x025E, 0x025E, 0x0253, 0x0249, 0x023E, 0x0234,
+ 0x0234, 0x022B, 0x0222, 0x0219, 0x0210, 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F0, 0x01E9, 0x01E1, 0x01E1, 0x01DA, 0x01D4, 0x01CD,
+ 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D,
+ 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E, 0x014A, 0x014A, 0x0147,
+ 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A,
+ 0x0118, 0x0115, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA,
+ 0x03C3, 0x03C3, 0x03C3, 0x03A8, 0x03A8, 0x03A8, 0x03A8, 0x03A8, 0x03A8, 0x038E, 0x038E, 0x038E, 0x038E, 0x0375, 0x0375, 0x0375,
+ 0x035E, 0x035E, 0x0348, 0x0348, 0x0333, 0x0333, 0x031F, 0x031F, 0x030C, 0x030C, 0x02FA, 0x02E8, 0x02E8, 0x02D8, 0x02C8, 0x02C8,
+ 0x02B9, 0x02AA, 0x02AA, 0x029C, 0x028F, 0x028F, 0x0282, 0x0276, 0x026A, 0x026A, 0x025E, 0x0253, 0x0249, 0x0249, 0x023E, 0x0234,
+ 0x022B, 0x0222, 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F8, 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7,
+ 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0199, 0x0194, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D,
+ 0x0178, 0x0174, 0x0170, 0x016C, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E, 0x014A, 0x0147, 0x0144,
+ 0x0141, 0x013E, 0x013B, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A,
+ 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA,
+ 0x03A8, 0x03A8, 0x03A8, 0x03A8, 0x038E, 0x038E, 0x038E, 0x038E, 0x038E, 0x038E, 0x0375, 0x0375, 0x0375, 0x035E, 0x035E, 0x035E,
+ 0x0348, 0x0348, 0x0333, 0x0333, 0x0333, 0x031F, 0x030C, 0x030C, 0x02FA, 0x02FA, 0x02E8, 0x02E8, 0x02D8, 0x02C8, 0x02C8, 0x02B9,
+ 0x02AA, 0x02AA, 0x029C, 0x028F, 0x028F, 0x0282, 0x0276, 0x0276, 0x026A, 0x025E, 0x0253, 0x0253, 0x0249, 0x023E, 0x0234, 0x022B,
+ 0x022B, 0x0222, 0x0219, 0x0210, 0x0208, 0x0208, 0x0200, 0x01F8, 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01DA, 0x01D4, 0x01CD, 0x01C7,
+ 0x01C0, 0x01BA, 0x01B4, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194, 0x018F, 0x018A, 0x0186, 0x0186, 0x0181, 0x017D,
+ 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E, 0x014A, 0x0147, 0x0144,
+ 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A,
+ 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FC, 0x00FA,
+ 0x038E, 0x038E, 0x038E, 0x038E, 0x038E, 0x0375, 0x0375, 0x0375, 0x0375, 0x0375, 0x035E, 0x035E, 0x035E, 0x035E, 0x0348, 0x0348,
+ 0x0333, 0x0333, 0x0333, 0x031F, 0x031F, 0x030C, 0x030C, 0x02FA, 0x02FA, 0x02E8, 0x02E8, 0x02D8, 0x02C8, 0x02C8, 0x02B9, 0x02AA,
+ 0x02AA, 0x029C, 0x028F, 0x028F, 0x0282, 0x0276, 0x0276, 0x026A, 0x025E, 0x0253, 0x0253, 0x0249, 0x023E, 0x0234, 0x0234, 0x022B,
+ 0x0222, 0x0219, 0x0210, 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F0, 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C7,
+ 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x019E, 0x0199, 0x0194, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178,
+ 0x0174, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E, 0x014A, 0x014A, 0x0147, 0x0144,
+ 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C, 0x011C, 0x011A,
+ 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8,
+ 0x0375, 0x0375, 0x0375, 0x0375, 0x0375, 0x0375, 0x035E, 0x035E, 0x035E, 0x035E, 0x035E, 0x0348, 0x0348, 0x0348, 0x0333, 0x0333,
+ 0x0333, 0x031F, 0x031F, 0x030C, 0x030C, 0x02FA, 0x02FA, 0x02E8, 0x02E8, 0x02D8, 0x02D8, 0x02C8, 0x02C8, 0x02B9, 0x02AA, 0x02AA,
+ 0x029C, 0x028F, 0x028F, 0x0282, 0x0276, 0x0276, 0x026A, 0x025E, 0x025E, 0x0253, 0x0249, 0x023E, 0x023E, 0x0234, 0x022B, 0x0222,
+ 0x0222, 0x0219, 0x0210, 0x0208, 0x0200, 0x0200, 0x01F8, 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0,
+ 0x01BA, 0x01B4, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194, 0x018F, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178,
+ 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141,
+ 0x013E, 0x013B, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A, 0x0118,
+ 0x0115, 0x0113, 0x0111, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8,
+ 0x035E, 0x035E, 0x035E, 0x035E, 0x035E, 0x035E, 0x035E, 0x0348, 0x0348, 0x0348, 0x0348, 0x0333, 0x0333, 0x0333, 0x0333, 0x031F,
+ 0x031F, 0x030C, 0x030C, 0x030C, 0x02FA, 0x02FA, 0x02E8, 0x02E8, 0x02D8, 0x02D8, 0x02C8, 0x02B9, 0x02B9, 0x02AA, 0x02AA, 0x029C,
+ 0x028F, 0x028F, 0x0282, 0x0276, 0x0276, 0x026A, 0x025E, 0x025E, 0x0253, 0x0249, 0x0249, 0x023E, 0x0234, 0x022B, 0x022B, 0x0222,
+ 0x0219, 0x0210, 0x0208, 0x0208, 0x0200, 0x01F8, 0x01F0, 0x01E9, 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C7, 0x01C0,
+ 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x019E, 0x0199, 0x0194, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x017D, 0x0178,
+ 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0155, 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141,
+ 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A, 0x0118,
+ 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8,
+ 0x0348, 0x0348, 0x0348, 0x0348, 0x0348, 0x0348, 0x0348, 0x0333, 0x0333, 0x0333, 0x0333, 0x0333, 0x031F, 0x031F, 0x031F, 0x030C,
+ 0x030C, 0x030C, 0x02FA, 0x02FA, 0x02E8, 0x02E8, 0x02D8, 0x02D8, 0x02C8, 0x02C8, 0x02B9, 0x02B9, 0x02AA, 0x02AA, 0x029C, 0x028F,
+ 0x028F, 0x0282, 0x0276, 0x0276, 0x026A, 0x025E, 0x025E, 0x0253, 0x0249, 0x0249, 0x023E, 0x0234, 0x022B, 0x022B, 0x0222, 0x0219,
+ 0x0210, 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F0, 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA,
+ 0x01B4, 0x01AF, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194, 0x018F, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174,
+ 0x0170, 0x016C, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0144, 0x0141,
+ 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011F, 0x011C, 0x011A, 0x0118,
+ 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F8,
+ 0x0333, 0x0333, 0x0333, 0x0333, 0x0333, 0x0333, 0x0333, 0x0333, 0x031F, 0x031F, 0x031F, 0x031F, 0x030C, 0x030C, 0x030C, 0x02FA,
+ 0x02FA, 0x02FA, 0x02E8, 0x02E8, 0x02D8, 0x02D8, 0x02D8, 0x02C8, 0x02C8, 0x02B9, 0x02B9, 0x02AA, 0x029C, 0x029C, 0x028F, 0x028F,
+ 0x0282, 0x0276, 0x0276, 0x026A, 0x025E, 0x025E, 0x0253, 0x0249, 0x0249, 0x023E, 0x0234, 0x0234, 0x022B, 0x0222, 0x0219, 0x0219,
+ 0x0210, 0x0208, 0x0200, 0x01F8, 0x01F8, 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01C0, 0x01BA,
+ 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x019E, 0x0199, 0x0194, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x017D, 0x0178, 0x0174,
+ 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E,
+ 0x013B, 0x0138, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115,
+ 0x0113, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6,
+ 0x031F, 0x031F, 0x031F, 0x031F, 0x031F, 0x031F, 0x031F, 0x031F, 0x030C, 0x030C, 0x030C, 0x030C, 0x030C, 0x02FA, 0x02FA, 0x02FA,
+ 0x02E8, 0x02E8, 0x02E8, 0x02D8, 0x02D8, 0x02C8, 0x02C8, 0x02B9, 0x02B9, 0x02AA, 0x02AA, 0x029C, 0x029C, 0x028F, 0x028F, 0x0282,
+ 0x0276, 0x0276, 0x026A, 0x025E, 0x025E, 0x0253, 0x0249, 0x0249, 0x023E, 0x0234, 0x0234, 0x022B, 0x0222, 0x0219, 0x0219, 0x0210,
+ 0x0208, 0x0200, 0x0200, 0x01F8, 0x01F0, 0x01E9, 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01B4,
+ 0x01AF, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194, 0x018F, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170,
+ 0x016C, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E,
+ 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115,
+ 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6,
+ 0x030C, 0x030C, 0x030C, 0x030C, 0x030C, 0x030C, 0x030C, 0x030C, 0x030C, 0x02FA, 0x02FA, 0x02FA, 0x02FA, 0x02E8, 0x02E8, 0x02E8,
+ 0x02D8, 0x02D8, 0x02D8, 0x02C8, 0x02C8, 0x02B9, 0x02B9, 0x02AA, 0x02AA, 0x029C, 0x029C, 0x028F, 0x028F, 0x0282, 0x0282, 0x0276,
+ 0x0276, 0x026A, 0x025E, 0x025E, 0x0253, 0x0249, 0x0249, 0x023E, 0x0234, 0x0234, 0x022B, 0x0222, 0x0222, 0x0219, 0x0210, 0x0208,
+ 0x0208, 0x0200, 0x01F8, 0x01F0, 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01BA, 0x01B4,
+ 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0199, 0x0194, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x017D, 0x0178, 0x0174, 0x0170,
+ 0x016C, 0x0168, 0x0164, 0x0160, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013E,
+ 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C, 0x011C, 0x011A, 0x0118, 0x0115,
+ 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00F8, 0x00F6,
+ 0x02FA, 0x02FA, 0x02FA, 0x02FA, 0x02FA, 0x02FA, 0x02FA, 0x02FA, 0x02FA, 0x02E8, 0x02E8, 0x02E8, 0x02E8, 0x02D8, 0x02D8, 0x02D8,
+ 0x02D8, 0x02C8, 0x02C8, 0x02B9, 0x02B9, 0x02B9, 0x02AA, 0x02AA, 0x029C, 0x029C, 0x028F, 0x028F, 0x0282, 0x0282, 0x0276, 0x026A,
+ 0x026A, 0x025E, 0x025E, 0x0253, 0x0249, 0x0249, 0x023E, 0x0234, 0x0234, 0x022B, 0x0222, 0x0222, 0x0219, 0x0210, 0x0208, 0x0208,
+ 0x0200, 0x01F8, 0x01F0, 0x01F0, 0x01E9, 0x01E1, 0x01DA, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01C0, 0x01BA, 0x01B4, 0x01AF,
+ 0x01A9, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194, 0x018F, 0x018A, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x0170,
+ 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B,
+ 0x0138, 0x0135, 0x0132, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113,
+ 0x0111, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4,
+ 0x02E8, 0x02E8, 0x02E8, 0x02E8, 0x02E8, 0x02E8, 0x02E8, 0x02E8, 0x02E8, 0x02E8, 0x02D8, 0x02D8, 0x02D8, 0x02D8, 0x02C8, 0x02C8,
+ 0x02C8, 0x02B9, 0x02B9, 0x02B9, 0x02AA, 0x02AA, 0x029C, 0x029C, 0x028F, 0x028F, 0x0282, 0x0282, 0x0276, 0x0276, 0x026A, 0x026A,
+ 0x025E, 0x0253, 0x0253, 0x0249, 0x0249, 0x023E, 0x0234, 0x0234, 0x022B, 0x0222, 0x0222, 0x0219, 0x0210, 0x0210, 0x0208, 0x0200,
+ 0x01F8, 0x01F8, 0x01F0, 0x01E9, 0x01E1, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01AF,
+ 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0194, 0x0194, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C,
+ 0x0168, 0x0164, 0x0160, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0144, 0x0141, 0x013E, 0x013B,
+ 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113,
+ 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4,
+ 0x02E8, 0x02E8, 0x02E8, 0x02D8, 0x02D8, 0x02D8, 0x02D8, 0x02D8, 0x02D8, 0x02D8, 0x02C8, 0x02C8, 0x02C8, 0x02C8, 0x02B9, 0x02B9,
+ 0x02B9, 0x02AA, 0x02AA, 0x02AA, 0x029C, 0x029C, 0x028F, 0x028F, 0x028F, 0x0282, 0x0282, 0x0276, 0x0276, 0x026A, 0x025E, 0x025E,
+ 0x0253, 0x0253, 0x0249, 0x0249, 0x023E, 0x0234, 0x0234, 0x022B, 0x0222, 0x0222, 0x0219, 0x0210, 0x0210, 0x0208, 0x0200, 0x01F8,
+ 0x01F8, 0x01F0, 0x01E9, 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01CD, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01BA, 0x01B4, 0x01AF, 0x01A9,
+ 0x01A4, 0x019E, 0x019E, 0x0199, 0x0194, 0x018F, 0x018A, 0x0186, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x016C,
+ 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138,
+ 0x0135, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A, 0x011A, 0x0118, 0x0115, 0x0113,
+ 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4,
+ 0x02D8, 0x02D8, 0x02D8, 0x02D8, 0x02C8, 0x02C8, 0x02C8, 0x02C8, 0x02C8, 0x02C8, 0x02C8, 0x02B9, 0x02B9, 0x02B9, 0x02B9, 0x02AA,
+ 0x02AA, 0x02AA, 0x029C, 0x029C, 0x028F, 0x028F, 0x028F, 0x0282, 0x0282, 0x0276, 0x0276, 0x026A, 0x026A, 0x025E, 0x025E, 0x0253,
+ 0x0253, 0x0249, 0x023E, 0x023E, 0x0234, 0x0234, 0x022B, 0x0222, 0x0222, 0x0219, 0x0210, 0x0210, 0x0208, 0x0200, 0x0200, 0x01F8,
+ 0x01F0, 0x01E9, 0x01E9, 0x01E1, 0x01DA, 0x01D4, 0x01D4, 0x01CD, 0x01C7, 0x01C0, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A9,
+ 0x01A4, 0x019E, 0x0199, 0x0194, 0x018F, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168,
+ 0x0164, 0x0160, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138,
+ 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111,
+ 0x010E, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F2,
+ 0x02C8, 0x02C8, 0x02C8, 0x02C8, 0x02C8, 0x02B9, 0x02B9, 0x02B9, 0x02B9, 0x02B9, 0x02B9, 0x02AA, 0x02AA, 0x02AA, 0x02AA, 0x029C,
+ 0x029C, 0x029C, 0x028F, 0x028F, 0x028F, 0x0282, 0x0282, 0x0276, 0x0276, 0x026A, 0x026A, 0x025E, 0x025E, 0x0253, 0x0253, 0x0249,
+ 0x0249, 0x023E, 0x023E, 0x0234, 0x022B, 0x022B, 0x0222, 0x0222, 0x0219, 0x0210, 0x0210, 0x0208, 0x0200, 0x0200, 0x01F8, 0x01F0,
+ 0x01E9, 0x01E9, 0x01E1, 0x01DA, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01AF, 0x01A9, 0x01A4,
+ 0x019E, 0x0199, 0x0199, 0x0194, 0x018F, 0x018A, 0x0186, 0x0181, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x016C, 0x0168,
+ 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0138,
+ 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111,
+ 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F2,
+ 0x02B9, 0x02B9, 0x02B9, 0x02B9, 0x02B9, 0x02AA, 0x02AA, 0x02AA, 0x02AA, 0x02AA, 0x02AA, 0x029C, 0x029C, 0x029C, 0x029C, 0x028F,
+ 0x028F, 0x028F, 0x0282, 0x0282, 0x0282, 0x0276, 0x0276, 0x026A, 0x026A, 0x026A, 0x025E, 0x025E, 0x0253, 0x0253, 0x0249, 0x0249,
+ 0x023E, 0x0234, 0x0234, 0x022B, 0x022B, 0x0222, 0x0219, 0x0219, 0x0210, 0x0210, 0x0208, 0x0200, 0x0200, 0x01F8, 0x01F0, 0x01E9,
+ 0x01E9, 0x01E1, 0x01DA, 0x01DA, 0x01D4, 0x01CD, 0x01C7, 0x01C7, 0x01C0, 0x01BA, 0x01B4, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x019E,
+ 0x019E, 0x0199, 0x0194, 0x018F, 0x018A, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164,
+ 0x0160, 0x015C, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135,
+ 0x0132, 0x012F, 0x012C, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0113, 0x0111,
+ 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F2,
+ 0x02AA, 0x02AA, 0x02AA, 0x02AA, 0x02AA, 0x02AA, 0x029C, 0x029C, 0x029C, 0x029C, 0x029C, 0x029C, 0x028F, 0x028F, 0x028F, 0x028F,
+ 0x0282, 0x0282, 0x0282, 0x0276, 0x0276, 0x026A, 0x026A, 0x026A, 0x025E, 0x025E, 0x0253, 0x0253, 0x0249, 0x0249, 0x023E, 0x023E,
+ 0x0234, 0x0234, 0x022B, 0x022B, 0x0222, 0x0219, 0x0219, 0x0210, 0x0208, 0x0208, 0x0200, 0x0200, 0x01F8, 0x01F0, 0x01F0, 0x01E9,
+ 0x01E1, 0x01DA, 0x01DA, 0x01D4, 0x01CD, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x01A4, 0x019E,
+ 0x0199, 0x0194, 0x018F, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0168, 0x0164,
+ 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x013B, 0x0138, 0x0135,
+ 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E,
+ 0x010C, 0x010A, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0,
+ 0x029C, 0x029C, 0x029C, 0x029C, 0x029C, 0x029C, 0x028F, 0x028F, 0x028F, 0x028F, 0x028F, 0x028F, 0x0282, 0x0282, 0x0282, 0x0282,
+ 0x0276, 0x0276, 0x0276, 0x026A, 0x026A, 0x026A, 0x025E, 0x025E, 0x0253, 0x0253, 0x0249, 0x0249, 0x023E, 0x023E, 0x0234, 0x0234,
+ 0x022B, 0x022B, 0x0222, 0x0222, 0x0219, 0x0219, 0x0210, 0x0208, 0x0208, 0x0200, 0x01F8, 0x01F8, 0x01F0, 0x01E9, 0x01E9, 0x01E1,
+ 0x01DA, 0x01DA, 0x01D4, 0x01CD, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01BA, 0x01B4, 0x01AF, 0x01A9, 0x01A9, 0x01A4, 0x019E, 0x0199,
+ 0x0194, 0x0194, 0x018F, 0x018A, 0x0186, 0x0181, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160,
+ 0x015C, 0x0158, 0x0158, 0x0155, 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132,
+ 0x012F, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E,
+ 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0,
+ 0x028F, 0x028F, 0x028F, 0x028F, 0x028F, 0x028F, 0x028F, 0x0282, 0x0282, 0x0282, 0x0282, 0x0282, 0x0276, 0x0276, 0x0276, 0x0276,
+ 0x026A, 0x026A, 0x026A, 0x025E, 0x025E, 0x025E, 0x0253, 0x0253, 0x0249, 0x0249, 0x0249, 0x023E, 0x023E, 0x0234, 0x0234, 0x022B,
+ 0x022B, 0x0222, 0x0222, 0x0219, 0x0210, 0x0210, 0x0208, 0x0208, 0x0200, 0x01F8, 0x01F8, 0x01F0, 0x01E9, 0x01E9, 0x01E1, 0x01DA,
+ 0x01DA, 0x01D4, 0x01CD, 0x01CD, 0x01C7, 0x01C0, 0x01BA, 0x01BA, 0x01B4, 0x01AF, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x0199, 0x0199,
+ 0x0194, 0x018F, 0x018A, 0x018A, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0164, 0x0160,
+ 0x015C, 0x0158, 0x0155, 0x0151, 0x014E, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x013B, 0x0138, 0x0135, 0x0132,
+ 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010E,
+ 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F2, 0x00F0,
+ 0x0282, 0x0282, 0x0282, 0x0282, 0x0282, 0x0282, 0x0282, 0x0276, 0x0276, 0x0276, 0x0276, 0x0276, 0x0276, 0x026A, 0x026A, 0x026A,
+ 0x026A, 0x025E, 0x025E, 0x025E, 0x0253, 0x0253, 0x0249, 0x0249, 0x0249, 0x023E, 0x023E, 0x0234, 0x0234, 0x022B, 0x022B, 0x0222,
+ 0x0222, 0x0219, 0x0219, 0x0210, 0x0210, 0x0208, 0x0200, 0x0200, 0x01F8, 0x01F8, 0x01F0, 0x01E9, 0x01E9, 0x01E1, 0x01DA, 0x01DA,
+ 0x01D4, 0x01CD, 0x01CD, 0x01C7, 0x01C0, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01AF, 0x01A9, 0x01A4, 0x019E, 0x019E, 0x0199, 0x0194,
+ 0x018F, 0x018F, 0x018A, 0x0186, 0x0181, 0x017D, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0168, 0x0164, 0x0160, 0x015C,
+ 0x0158, 0x0155, 0x0155, 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F,
+ 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C,
+ 0x010A, 0x0108, 0x0106, 0x0104, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00EF,
+ 0x0276, 0x0276, 0x0276, 0x0276, 0x0276, 0x0276, 0x0276, 0x0276, 0x026A, 0x026A, 0x026A, 0x026A, 0x026A, 0x025E, 0x025E, 0x025E,
+ 0x025E, 0x0253, 0x0253, 0x0253, 0x0249, 0x0249, 0x0249, 0x023E, 0x023E, 0x0234, 0x0234, 0x022B, 0x022B, 0x022B, 0x0222, 0x0222,
+ 0x0219, 0x0210, 0x0210, 0x0208, 0x0208, 0x0200, 0x0200, 0x01F8, 0x01F0, 0x01F0, 0x01E9, 0x01E9, 0x01E1, 0x01DA, 0x01DA, 0x01D4,
+ 0x01CD, 0x01CD, 0x01C7, 0x01C0, 0x01C0, 0x01BA, 0x01B4, 0x01AF, 0x01AF, 0x01A9, 0x01A4, 0x01A4, 0x019E, 0x0199, 0x0194, 0x018F,
+ 0x018F, 0x018A, 0x0186, 0x0181, 0x0181, 0x017D, 0x0178, 0x0174, 0x0170, 0x0170, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x015C,
+ 0x0158, 0x0155, 0x0151, 0x014E, 0x014A, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0138, 0x0135, 0x0132, 0x012F,
+ 0x012C, 0x0129, 0x0127, 0x0124, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x0111, 0x010E, 0x010C,
+ 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00EF,
+ 0x026A, 0x026A, 0x026A, 0x026A, 0x026A, 0x026A, 0x026A, 0x026A, 0x025E, 0x025E, 0x025E, 0x025E, 0x025E, 0x0253, 0x0253, 0x0253,
+ 0x0253, 0x0249, 0x0249, 0x0249, 0x023E, 0x023E, 0x023E, 0x0234, 0x0234, 0x022B, 0x022B, 0x022B, 0x0222, 0x0222, 0x0219, 0x0219,
+ 0x0210, 0x0210, 0x0208, 0x0208, 0x0200, 0x01F8, 0x01F8, 0x01F0, 0x01F0, 0x01E9, 0x01E9, 0x01E1, 0x01DA, 0x01DA, 0x01D4, 0x01CD,
+ 0x01CD, 0x01C7, 0x01C0, 0x01C0, 0x01BA, 0x01B4, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x01A4, 0x019E, 0x0199, 0x0194, 0x0194, 0x018F,
+ 0x018A, 0x0186, 0x0186, 0x0181, 0x017D, 0x0178, 0x0174, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0164, 0x0160, 0x015C, 0x0158,
+ 0x0155, 0x0151, 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C,
+ 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A,
+ 0x0108, 0x0106, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F2, 0x00F0, 0x00EF,
+ 0x025E, 0x025E, 0x025E, 0x025E, 0x025E, 0x025E, 0x025E, 0x025E, 0x025E, 0x0253, 0x0253, 0x0253, 0x0253, 0x0253, 0x0249, 0x0249,
+ 0x0249, 0x023E, 0x023E, 0x023E, 0x023E, 0x0234, 0x0234, 0x022B, 0x022B, 0x022B, 0x0222, 0x0222, 0x0219, 0x0219, 0x0210, 0x0210,
+ 0x0208, 0x0208, 0x0200, 0x0200, 0x01F8, 0x01F8, 0x01F0, 0x01F0, 0x01E9, 0x01E1, 0x01E1, 0x01DA, 0x01DA, 0x01D4, 0x01CD, 0x01CD,
+ 0x01C7, 0x01C0, 0x01C0, 0x01BA, 0x01B4, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x01A4, 0x019E, 0x0199, 0x0199, 0x0194, 0x018F, 0x018A,
+ 0x0186, 0x0186, 0x0181, 0x017D, 0x0178, 0x0178, 0x0174, 0x0170, 0x016C, 0x0168, 0x0168, 0x0164, 0x0160, 0x015C, 0x0158, 0x0158,
+ 0x0155, 0x0151, 0x014E, 0x014A, 0x0147, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0135, 0x0132, 0x012F, 0x012C,
+ 0x0129, 0x0127, 0x0124, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x0111, 0x010E, 0x010C, 0x010A,
+ 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00EF, 0x00ED,
+ 0x0253, 0x0253, 0x0253, 0x0253, 0x0253, 0x0253, 0x0253, 0x0253, 0x0253, 0x0249, 0x0249, 0x0249, 0x0249, 0x0249, 0x023E, 0x023E,
+ 0x023E, 0x023E, 0x0234, 0x0234, 0x0234, 0x022B, 0x022B, 0x022B, 0x0222, 0x0222, 0x0219, 0x0219, 0x0210, 0x0210, 0x0210, 0x0208,
+ 0x0208, 0x0200, 0x0200, 0x01F8, 0x01F0, 0x01F0, 0x01E9, 0x01E9, 0x01E1, 0x01E1, 0x01DA, 0x01D4, 0x01D4, 0x01CD, 0x01CD, 0x01C7,
+ 0x01C0, 0x01C0, 0x01BA, 0x01B4, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x01A4, 0x019E, 0x0199, 0x0199, 0x0194, 0x018F, 0x018A, 0x018A,
+ 0x0186, 0x0181, 0x017D, 0x017D, 0x0178, 0x0174, 0x0170, 0x016C, 0x016C, 0x0168, 0x0164, 0x0160, 0x015C, 0x015C, 0x0158, 0x0155,
+ 0x0151, 0x014E, 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x012C,
+ 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108,
+ 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00EF, 0x00ED,
+ 0x0249, 0x0249, 0x0249, 0x0249, 0x0249, 0x0249, 0x0249, 0x0249, 0x0249, 0x023E, 0x023E, 0x023E, 0x023E, 0x023E, 0x0234, 0x0234,
+ 0x0234, 0x0234, 0x022B, 0x022B, 0x022B, 0x0222, 0x0222, 0x0222, 0x0219, 0x0219, 0x0210, 0x0210, 0x0210, 0x0208, 0x0208, 0x0200,
+ 0x0200, 0x01F8, 0x01F8, 0x01F0, 0x01F0, 0x01E9, 0x01E9, 0x01E1, 0x01DA, 0x01DA, 0x01D4, 0x01D4, 0x01CD, 0x01C7, 0x01C7, 0x01C0,
+ 0x01BA, 0x01BA, 0x01B4, 0x01B4, 0x01AF, 0x01A9, 0x01A4, 0x01A4, 0x019E, 0x0199, 0x0199, 0x0194, 0x018F, 0x018A, 0x018A, 0x0186,
+ 0x0181, 0x017D, 0x017D, 0x0178, 0x0174, 0x0170, 0x0170, 0x016C, 0x0168, 0x0164, 0x0164, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151,
+ 0x0151, 0x014E, 0x014A, 0x0147, 0x0144, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x0132, 0x012F, 0x012C, 0x0129,
+ 0x0127, 0x0124, 0x0121, 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108,
+ 0x0106, 0x0104, 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00EF, 0x00ED, 0x00ED,
+ 0x023E, 0x023E, 0x023E, 0x023E, 0x023E, 0x023E, 0x023E, 0x023E, 0x023E, 0x023E, 0x0234, 0x0234, 0x0234, 0x0234, 0x0234, 0x022B,
+ 0x022B, 0x022B, 0x0222, 0x0222, 0x0222, 0x0219, 0x0219, 0x0219, 0x0210, 0x0210, 0x0210, 0x0208, 0x0208, 0x0200, 0x0200, 0x01F8,
+ 0x01F8, 0x01F0, 0x01F0, 0x01E9, 0x01E9, 0x01E1, 0x01E1, 0x01DA, 0x01DA, 0x01D4, 0x01CD, 0x01CD, 0x01C7, 0x01C7, 0x01C0, 0x01BA,
+ 0x01BA, 0x01B4, 0x01AF, 0x01AF, 0x01A9, 0x01A4, 0x01A4, 0x019E, 0x0199, 0x0199, 0x0194, 0x018F, 0x018F, 0x018A, 0x0186, 0x0181,
+ 0x0181, 0x017D, 0x0178, 0x0174, 0x0174, 0x0170, 0x016C, 0x0168, 0x0164, 0x0164, 0x0160, 0x015C, 0x0158, 0x0158, 0x0155, 0x0151,
+ 0x014E, 0x014A, 0x0147, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0129,
+ 0x0127, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0108,
+ 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00EF, 0x00ED, 0x00EB,
+ 0x0234, 0x0234, 0x0234, 0x0234, 0x0234, 0x0234, 0x0234, 0x0234, 0x0234, 0x0234, 0x022B, 0x022B, 0x022B, 0x022B, 0x022B, 0x0222,
+ 0x0222, 0x0222, 0x0222, 0x0219, 0x0219, 0x0219, 0x0210, 0x0210, 0x0210, 0x0208, 0x0208, 0x0200, 0x0200, 0x01F8, 0x01F8, 0x01F8,
+ 0x01F0, 0x01F0, 0x01E9, 0x01E9, 0x01E1, 0x01DA, 0x01DA, 0x01D4, 0x01D4, 0x01CD, 0x01CD, 0x01C7, 0x01C7, 0x01C0, 0x01BA, 0x01BA,
+ 0x01B4, 0x01AF, 0x01AF, 0x01A9, 0x01A4, 0x01A4, 0x019E, 0x0199, 0x0199, 0x0194, 0x018F, 0x018F, 0x018A, 0x0186, 0x0181, 0x0181,
+ 0x017D, 0x0178, 0x0174, 0x0174, 0x0170, 0x016C, 0x0168, 0x0168, 0x0164, 0x0160, 0x015C, 0x015C, 0x0158, 0x0155, 0x0151, 0x014E,
+ 0x014E, 0x014A, 0x0147, 0x0144, 0x0141, 0x013E, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012F, 0x012C, 0x0129, 0x0127,
+ 0x0124, 0x0121, 0x011F, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106,
+ 0x0104, 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00ED, 0x00EB,
+ 0x022B, 0x022B, 0x022B, 0x022B, 0x022B, 0x022B, 0x022B, 0x022B, 0x022B, 0x022B, 0x0222, 0x0222, 0x0222, 0x0222, 0x0222, 0x0219,
+ 0x0219, 0x0219, 0x0219, 0x0210, 0x0210, 0x0210, 0x0208, 0x0208, 0x0208, 0x0200, 0x0200, 0x01F8, 0x01F8, 0x01F8, 0x01F0, 0x01F0,
+ 0x01E9, 0x01E9, 0x01E1, 0x01E1, 0x01DA, 0x01DA, 0x01D4, 0x01D4, 0x01CD, 0x01C7, 0x01C7, 0x01C0, 0x01C0, 0x01BA, 0x01BA, 0x01B4,
+ 0x01AF, 0x01AF, 0x01A9, 0x01A4, 0x01A4, 0x019E, 0x0199, 0x0199, 0x0194, 0x018F, 0x018F, 0x018A, 0x0186, 0x0181, 0x0181, 0x017D,
+ 0x0178, 0x0178, 0x0174, 0x0170, 0x016C, 0x016C, 0x0168, 0x0164, 0x0160, 0x0160, 0x015C, 0x0158, 0x0155, 0x0151, 0x0151, 0x014E,
+ 0x014A, 0x0147, 0x0144, 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0127,
+ 0x0124, 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0108, 0x0106,
+ 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EA,
+ 0x0222, 0x0222, 0x0222, 0x0222, 0x0222, 0x0222, 0x0222, 0x0222, 0x0222, 0x0222, 0x0222, 0x0219, 0x0219, 0x0219, 0x0219, 0x0210,
+ 0x0210, 0x0210, 0x0210, 0x0208, 0x0208, 0x0208, 0x0200, 0x0200, 0x0200, 0x01F8, 0x01F8, 0x01F8, 0x01F0, 0x01F0, 0x01E9, 0x01E9,
+ 0x01E1, 0x01E1, 0x01DA, 0x01DA, 0x01D4, 0x01D4, 0x01CD, 0x01CD, 0x01C7, 0x01C7, 0x01C0, 0x01C0, 0x01BA, 0x01B4, 0x01B4, 0x01AF,
+ 0x01AF, 0x01A9, 0x01A4, 0x01A4, 0x019E, 0x0199, 0x0199, 0x0194, 0x018F, 0x018F, 0x018A, 0x0186, 0x0186, 0x0181, 0x017D, 0x0178,
+ 0x0178, 0x0174, 0x0170, 0x016C, 0x016C, 0x0168, 0x0164, 0x0160, 0x0160, 0x015C, 0x0158, 0x0155, 0x0155, 0x0151, 0x014E, 0x014A,
+ 0x0147, 0x0147, 0x0144, 0x0141, 0x013E, 0x013B, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x012C, 0x0129, 0x0127, 0x0124,
+ 0x0121, 0x011F, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104,
+ 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EA,
+ 0x0219, 0x0219, 0x0219, 0x0219, 0x0219, 0x0219, 0x0219, 0x0219, 0x0219, 0x0219, 0x0219, 0x0210, 0x0210, 0x0210, 0x0210, 0x0210,
+ 0x0208, 0x0208, 0x0208, 0x0200, 0x0200, 0x0200, 0x0200, 0x01F8, 0x01F8, 0x01F0, 0x01F0, 0x01F0, 0x01E9, 0x01E9, 0x01E1, 0x01E1,
+ 0x01DA, 0x01DA, 0x01DA, 0x01D4, 0x01D4, 0x01CD, 0x01CD, 0x01C7, 0x01C0, 0x01C0, 0x01BA, 0x01BA, 0x01B4, 0x01B4, 0x01AF, 0x01A9,
+ 0x01A9, 0x01A4, 0x01A4, 0x019E, 0x0199, 0x0199, 0x0194, 0x018F, 0x018F, 0x018A, 0x0186, 0x0186, 0x0181, 0x017D, 0x0178, 0x0178,
+ 0x0174, 0x0170, 0x0170, 0x016C, 0x0168, 0x0164, 0x0164, 0x0160, 0x015C, 0x0158, 0x0158, 0x0155, 0x0151, 0x014E, 0x014A, 0x014A,
+ 0x0147, 0x0144, 0x0141, 0x013E, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0124,
+ 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0108, 0x0106, 0x0104,
+ 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EA, 0x00EA,
+ 0x0210, 0x0210, 0x0210, 0x0210, 0x0210, 0x0210, 0x0210, 0x0210, 0x0210, 0x0210, 0x0210, 0x0208, 0x0208, 0x0208, 0x0208, 0x0208,
+ 0x0200, 0x0200, 0x0200, 0x0200, 0x01F8, 0x01F8, 0x01F8, 0x01F0, 0x01F0, 0x01F0, 0x01E9, 0x01E9, 0x01E1, 0x01E1, 0x01E1, 0x01DA,
+ 0x01DA, 0x01D4, 0x01D4, 0x01CD, 0x01CD, 0x01C7, 0x01C7, 0x01C0, 0x01C0, 0x01BA, 0x01BA, 0x01B4, 0x01AF, 0x01AF, 0x01A9, 0x01A9,
+ 0x01A4, 0x019E, 0x019E, 0x0199, 0x0199, 0x0194, 0x018F, 0x018F, 0x018A, 0x0186, 0x0186, 0x0181, 0x017D, 0x0178, 0x0178, 0x0174,
+ 0x0170, 0x0170, 0x016C, 0x0168, 0x0164, 0x0164, 0x0160, 0x015C, 0x0158, 0x0158, 0x0155, 0x0151, 0x014E, 0x014E, 0x014A, 0x0147,
+ 0x0144, 0x0141, 0x0141, 0x013E, 0x013B, 0x0138, 0x0135, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0129, 0x0127, 0x0124, 0x0121,
+ 0x011F, 0x011C, 0x011A, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102,
+ 0x0100, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EA, 0x00E8,
+ 0x0208, 0x0208, 0x0208, 0x0208, 0x0208, 0x0208, 0x0208, 0x0208, 0x0208, 0x0208, 0x0208, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200,
+ 0x01F8, 0x01F8, 0x01F8, 0x01F8, 0x01F0, 0x01F0, 0x01F0, 0x01E9, 0x01E9, 0x01E9, 0x01E1, 0x01E1, 0x01DA, 0x01DA, 0x01DA, 0x01D4,
+ 0x01D4, 0x01CD, 0x01CD, 0x01C7, 0x01C7, 0x01C0, 0x01C0, 0x01BA, 0x01BA, 0x01B4, 0x01B4, 0x01AF, 0x01AF, 0x01A9, 0x01A4, 0x01A4,
+ 0x019E, 0x019E, 0x0199, 0x0194, 0x0194, 0x018F, 0x018A, 0x018A, 0x0186, 0x0181, 0x0181, 0x017D, 0x0178, 0x0178, 0x0174, 0x0170,
+ 0x0170, 0x016C, 0x0168, 0x0164, 0x0164, 0x0160, 0x015C, 0x015C, 0x0158, 0x0155, 0x0151, 0x0151, 0x014E, 0x014A, 0x0147, 0x0144,
+ 0x0144, 0x0141, 0x013E, 0x013B, 0x0138, 0x0138, 0x0135, 0x0132, 0x012F, 0x012C, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F,
+ 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0106, 0x0104, 0x0102,
+ 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EA, 0x00EA, 0x00E8,
+ 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x01F8, 0x01F8, 0x01F8, 0x01F8,
+ 0x01F0, 0x01F0, 0x01F0, 0x01F0, 0x01E9, 0x01E9, 0x01E9, 0x01E1, 0x01E1, 0x01E1, 0x01DA, 0x01DA, 0x01DA, 0x01D4, 0x01D4, 0x01CD,
+ 0x01CD, 0x01C7, 0x01C7, 0x01C7, 0x01C0, 0x01C0, 0x01BA, 0x01BA, 0x01B4, 0x01AF, 0x01AF, 0x01A9, 0x01A9, 0x01A4, 0x01A4, 0x019E,
+ 0x0199, 0x0199, 0x0194, 0x0194, 0x018F, 0x018A, 0x018A, 0x0186, 0x0181, 0x0181, 0x017D, 0x0178, 0x0178, 0x0174, 0x0170, 0x0170,
+ 0x016C, 0x0168, 0x0168, 0x0164, 0x0160, 0x015C, 0x015C, 0x0158, 0x0155, 0x0151, 0x0151, 0x014E, 0x014A, 0x0147, 0x0147, 0x0144,
+ 0x0141, 0x013E, 0x013B, 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0124, 0x0121, 0x011F,
+ 0x011C, 0x011A, 0x0118, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100,
+ 0x00FE, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EA, 0x00E8, 0x00E6,
+ 0x01F8, 0x01F8, 0x01F8, 0x01F8, 0x01F8, 0x01F8, 0x01F8, 0x01F8, 0x01F8, 0x01F8, 0x01F8, 0x01F8, 0x01F0, 0x01F0, 0x01F0, 0x01F0,
+ 0x01F0, 0x01E9, 0x01E9, 0x01E9, 0x01E1, 0x01E1, 0x01E1, 0x01E1, 0x01DA, 0x01DA, 0x01D4, 0x01D4, 0x01D4, 0x01CD, 0x01CD, 0x01C7,
+ 0x01C7, 0x01C7, 0x01C0, 0x01C0, 0x01BA, 0x01BA, 0x01B4, 0x01B4, 0x01AF, 0x01AF, 0x01A9, 0x01A9, 0x01A4, 0x019E, 0x019E, 0x0199,
+ 0x0199, 0x0194, 0x018F, 0x018F, 0x018A, 0x018A, 0x0186, 0x0181, 0x0181, 0x017D, 0x0178, 0x0178, 0x0174, 0x0170, 0x0170, 0x016C,
+ 0x0168, 0x0168, 0x0164, 0x0160, 0x015C, 0x015C, 0x0158, 0x0155, 0x0151, 0x0151, 0x014E, 0x014A, 0x0147, 0x0147, 0x0144, 0x0141,
+ 0x013E, 0x013E, 0x013B, 0x0138, 0x0135, 0x0132, 0x0132, 0x012F, 0x012C, 0x0129, 0x0127, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C,
+ 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0104, 0x0102, 0x0100,
+ 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E6,
+ 0x01F8, 0x01F8, 0x01F0, 0x01F0, 0x01F0, 0x01F0, 0x01F0, 0x01F0, 0x01F0, 0x01F0, 0x01F0, 0x01F0, 0x01E9, 0x01E9, 0x01E9, 0x01E9,
+ 0x01E9, 0x01E1, 0x01E1, 0x01E1, 0x01E1, 0x01DA, 0x01DA, 0x01DA, 0x01D4, 0x01D4, 0x01D4, 0x01CD, 0x01CD, 0x01C7, 0x01C7, 0x01C7,
+ 0x01C0, 0x01C0, 0x01BA, 0x01BA, 0x01B4, 0x01B4, 0x01AF, 0x01AF, 0x01A9, 0x01A9, 0x01A4, 0x01A4, 0x019E, 0x019E, 0x0199, 0x0194,
+ 0x0194, 0x018F, 0x018F, 0x018A, 0x0186, 0x0186, 0x0181, 0x0181, 0x017D, 0x0178, 0x0178, 0x0174, 0x0170, 0x0170, 0x016C, 0x0168,
+ 0x0168, 0x0164, 0x0160, 0x015C, 0x015C, 0x0158, 0x0155, 0x0155, 0x0151, 0x014E, 0x014A, 0x014A, 0x0147, 0x0144, 0x0141, 0x0141,
+ 0x013E, 0x013B, 0x0138, 0x0135, 0x0135, 0x0132, 0x012F, 0x012C, 0x012C, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011F, 0x011C,
+ 0x011A, 0x0118, 0x0115, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE,
+ 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EA, 0x00E8, 0x00E6, 0x00E5,
+ 0x01F0, 0x01F0, 0x01F0, 0x01E9, 0x01E9, 0x01E9, 0x01E9, 0x01E9, 0x01E9, 0x01E9, 0x01E9, 0x01E9, 0x01E1, 0x01E1, 0x01E1, 0x01E1,
+ 0x01E1, 0x01DA, 0x01DA, 0x01DA, 0x01DA, 0x01D4, 0x01D4, 0x01D4, 0x01CD, 0x01CD, 0x01CD, 0x01C7, 0x01C7, 0x01C7, 0x01C0, 0x01C0,
+ 0x01BA, 0x01BA, 0x01B4, 0x01B4, 0x01AF, 0x01AF, 0x01AF, 0x01A9, 0x01A9, 0x01A4, 0x019E, 0x019E, 0x0199, 0x0199, 0x0194, 0x0194,
+ 0x018F, 0x018F, 0x018A, 0x0186, 0x0186, 0x0181, 0x017D, 0x017D, 0x0178, 0x0178, 0x0174, 0x0170, 0x0170, 0x016C, 0x0168, 0x0168,
+ 0x0164, 0x0160, 0x015C, 0x015C, 0x0158, 0x0155, 0x0155, 0x0151, 0x014E, 0x014A, 0x014A, 0x0147, 0x0144, 0x0141, 0x0141, 0x013E,
+ 0x013B, 0x0138, 0x0138, 0x0135, 0x0132, 0x012F, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A,
+ 0x0118, 0x0118, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0102, 0x0100, 0x00FE,
+ 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E6, 0x00E5,
+ 0x01E9, 0x01E9, 0x01E9, 0x01E9, 0x01E1, 0x01E1, 0x01E1, 0x01E1, 0x01E1, 0x01E1, 0x01E1, 0x01E1, 0x01E1, 0x01DA, 0x01DA, 0x01DA,
+ 0x01DA, 0x01D4, 0x01D4, 0x01D4, 0x01D4, 0x01CD, 0x01CD, 0x01CD, 0x01C7, 0x01C7, 0x01C7, 0x01C0, 0x01C0, 0x01C0, 0x01BA, 0x01BA,
+ 0x01B4, 0x01B4, 0x01B4, 0x01AF, 0x01AF, 0x01A9, 0x01A9, 0x01A4, 0x01A4, 0x019E, 0x019E, 0x0199, 0x0199, 0x0194, 0x018F, 0x018F,
+ 0x018A, 0x018A, 0x0186, 0x0186, 0x0181, 0x017D, 0x017D, 0x0178, 0x0174, 0x0174, 0x0170, 0x0170, 0x016C, 0x0168, 0x0168, 0x0164,
+ 0x0160, 0x015C, 0x015C, 0x0158, 0x0155, 0x0155, 0x0151, 0x014E, 0x014E, 0x014A, 0x0147, 0x0144, 0x0144, 0x0141, 0x013E, 0x013B,
+ 0x013B, 0x0138, 0x0135, 0x0132, 0x012F, 0x012F, 0x012C, 0x0129, 0x0127, 0x0127, 0x0124, 0x0121, 0x011F, 0x011C, 0x011C, 0x011A,
+ 0x0118, 0x0115, 0x0113, 0x0111, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC,
+ 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00EB, 0x00EA, 0x00E8, 0x00E6, 0x00E5, 0x00E3,
+ 0x01E1, 0x01E1, 0x01E1, 0x01E1, 0x01DA, 0x01DA, 0x01DA, 0x01DA, 0x01DA, 0x01DA, 0x01DA, 0x01DA, 0x01DA, 0x01D4, 0x01D4, 0x01D4,
+ 0x01D4, 0x01D4, 0x01CD, 0x01CD, 0x01CD, 0x01C7, 0x01C7, 0x01C7, 0x01C7, 0x01C0, 0x01C0, 0x01C0, 0x01BA, 0x01BA, 0x01B4, 0x01B4,
+ 0x01B4, 0x01AF, 0x01AF, 0x01A9, 0x01A9, 0x01A4, 0x01A4, 0x019E, 0x019E, 0x0199, 0x0199, 0x0194, 0x0194, 0x018F, 0x018F, 0x018A,
+ 0x018A, 0x0186, 0x0181, 0x0181, 0x017D, 0x017D, 0x0178, 0x0174, 0x0174, 0x0170, 0x016C, 0x016C, 0x0168, 0x0164, 0x0164, 0x0160,
+ 0x015C, 0x015C, 0x0158, 0x0155, 0x0155, 0x0151, 0x014E, 0x014E, 0x014A, 0x0147, 0x0144, 0x0144, 0x0141, 0x013E, 0x013B, 0x013B,
+ 0x0138, 0x0135, 0x0132, 0x0132, 0x012F, 0x012C, 0x0129, 0x0129, 0x0127, 0x0124, 0x0121, 0x011F, 0x011F, 0x011C, 0x011A, 0x0118,
+ 0x0115, 0x0115, 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FC,
+ 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E6, 0x00E5, 0x00E3,
+ 0x01DA, 0x01DA, 0x01DA, 0x01DA, 0x01DA, 0x01D4, 0x01D4, 0x01D4, 0x01D4, 0x01D4, 0x01D4, 0x01D4, 0x01D4, 0x01CD, 0x01CD, 0x01CD,
+ 0x01CD, 0x01CD, 0x01C7, 0x01C7, 0x01C7, 0x01C7, 0x01C0, 0x01C0, 0x01C0, 0x01BA, 0x01BA, 0x01BA, 0x01B4, 0x01B4, 0x01AF, 0x01AF,
+ 0x01AF, 0x01A9, 0x01A9, 0x01A4, 0x01A4, 0x019E, 0x019E, 0x0199, 0x0199, 0x0194, 0x0194, 0x018F, 0x018F, 0x018A, 0x018A, 0x0186,
+ 0x0186, 0x0181, 0x0181, 0x017D, 0x0178, 0x0178, 0x0174, 0x0174, 0x0170, 0x016C, 0x016C, 0x0168, 0x0164, 0x0164, 0x0160, 0x015C,
+ 0x015C, 0x0158, 0x0155, 0x0155, 0x0151, 0x014E, 0x014E, 0x014A, 0x0147, 0x0144, 0x0144, 0x0141, 0x013E, 0x013E, 0x013B, 0x0138,
+ 0x0135, 0x0135, 0x0132, 0x012F, 0x012C, 0x0129, 0x0129, 0x0127, 0x0124, 0x0121, 0x0121, 0x011F, 0x011C, 0x011A, 0x0118, 0x0118,
+ 0x0115, 0x0113, 0x0111, 0x010E, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA,
+ 0x00F8, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00EB, 0x00EA, 0x00E8, 0x00E6, 0x00E5, 0x00E3, 0x00E3,
+ 0x01D4, 0x01D4, 0x01D4, 0x01D4, 0x01D4, 0x01CD, 0x01CD, 0x01CD, 0x01CD, 0x01CD, 0x01CD, 0x01CD, 0x01CD, 0x01C7, 0x01C7, 0x01C7,
+ 0x01C7, 0x01C7, 0x01C0, 0x01C0, 0x01C0, 0x01C0, 0x01BA, 0x01BA, 0x01BA, 0x01B4, 0x01B4, 0x01B4, 0x01AF, 0x01AF, 0x01AF, 0x01A9,
+ 0x01A9, 0x01A4, 0x01A4, 0x019E, 0x019E, 0x019E, 0x0199, 0x0199, 0x0194, 0x0194, 0x018F, 0x018F, 0x018A, 0x018A, 0x0186, 0x0181,
+ 0x0181, 0x017D, 0x017D, 0x0178, 0x0178, 0x0174, 0x0170, 0x0170, 0x016C, 0x016C, 0x0168, 0x0164, 0x0164, 0x0160, 0x015C, 0x015C,
+ 0x0158, 0x0155, 0x0155, 0x0151, 0x014E, 0x014E, 0x014A, 0x0147, 0x0144, 0x0144, 0x0141, 0x013E, 0x013E, 0x013B, 0x0138, 0x0135,
+ 0x0135, 0x0132, 0x012F, 0x012C, 0x012C, 0x0129, 0x0127, 0x0124, 0x0124, 0x0121, 0x011F, 0x011C, 0x011A, 0x011A, 0x0118, 0x0115,
+ 0x0113, 0x0111, 0x0111, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FA,
+ 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F2, 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E5, 0x00E3, 0x00E1,
+ 0x01CD, 0x01CD, 0x01CD, 0x01CD, 0x01CD, 0x01CD, 0x01C7, 0x01C7, 0x01C7, 0x01C7, 0x01C7, 0x01C7, 0x01C7, 0x01C7, 0x01C0, 0x01C0,
+ 0x01C0, 0x01C0, 0x01BA, 0x01BA, 0x01BA, 0x01BA, 0x01B4, 0x01B4, 0x01B4, 0x01AF, 0x01AF, 0x01AF, 0x01A9, 0x01A9, 0x01A9, 0x01A4,
+ 0x01A4, 0x019E, 0x019E, 0x019E, 0x0199, 0x0199, 0x0194, 0x0194, 0x018F, 0x018F, 0x018A, 0x018A, 0x0186, 0x0186, 0x0181, 0x0181,
+ 0x017D, 0x017D, 0x0178, 0x0174, 0x0174, 0x0170, 0x0170, 0x016C, 0x0168, 0x0168, 0x0164, 0x0164, 0x0160, 0x015C, 0x015C, 0x0158,
+ 0x0155, 0x0155, 0x0151, 0x014E, 0x014E, 0x014A, 0x0147, 0x0147, 0x0144, 0x0141, 0x013E, 0x013E, 0x013B, 0x0138, 0x0135, 0x0135,
+ 0x0132, 0x012F, 0x012F, 0x012C, 0x0129, 0x0127, 0x0124, 0x0124, 0x0121, 0x011F, 0x011C, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113,
+ 0x0113, 0x0111, 0x010E, 0x010C, 0x010A, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8,
+ 0x00F6, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EA, 0x00E8, 0x00E6, 0x00E5, 0x00E3, 0x00E1, 0x00E1,
+ 0x01C7, 0x01C7, 0x01C7, 0x01C7, 0x01C7, 0x01C7, 0x01C0, 0x01C0, 0x01C0, 0x01C0, 0x01C0, 0x01C0, 0x01C0, 0x01C0, 0x01BA, 0x01BA,
+ 0x01BA, 0x01BA, 0x01BA, 0x01B4, 0x01B4, 0x01B4, 0x01AF, 0x01AF, 0x01AF, 0x01AF, 0x01A9, 0x01A9, 0x01A9, 0x01A4, 0x01A4, 0x019E,
+ 0x019E, 0x019E, 0x0199, 0x0199, 0x0194, 0x0194, 0x018F, 0x018F, 0x018A, 0x018A, 0x0186, 0x0186, 0x0181, 0x0181, 0x017D, 0x017D,
+ 0x0178, 0x0178, 0x0174, 0x0174, 0x0170, 0x016C, 0x016C, 0x0168, 0x0168, 0x0164, 0x0160, 0x0160, 0x015C, 0x015C, 0x0158, 0x0155,
+ 0x0155, 0x0151, 0x014E, 0x014E, 0x014A, 0x0147, 0x0147, 0x0144, 0x0141, 0x013E, 0x013E, 0x013B, 0x0138, 0x0138, 0x0135, 0x0132,
+ 0x012F, 0x012F, 0x012C, 0x0129, 0x0127, 0x0127, 0x0124, 0x0121, 0x011F, 0x011F, 0x011C, 0x011A, 0x0118, 0x0115, 0x0115, 0x0113,
+ 0x0111, 0x010E, 0x010C, 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00FA, 0x00F8,
+ 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EA, 0x00E8, 0x00E6, 0x00E6, 0x00E5, 0x00E3, 0x00E1, 0x00E0,
+ 0x01C0, 0x01C0, 0x01C0, 0x01C0, 0x01C0, 0x01C0, 0x01BA, 0x01BA, 0x01BA, 0x01BA, 0x01BA, 0x01BA, 0x01BA, 0x01BA, 0x01B4, 0x01B4,
+ 0x01B4, 0x01B4, 0x01B4, 0x01AF, 0x01AF, 0x01AF, 0x01AF, 0x01A9, 0x01A9, 0x01A9, 0x01A4, 0x01A4, 0x01A4, 0x019E, 0x019E, 0x0199,
+ 0x0199, 0x0199, 0x0194, 0x0194, 0x018F, 0x018F, 0x018F, 0x018A, 0x018A, 0x0186, 0x0186, 0x0181, 0x0181, 0x017D, 0x017D, 0x0178,
+ 0x0174, 0x0174, 0x0170, 0x0170, 0x016C, 0x016C, 0x0168, 0x0164, 0x0164, 0x0160, 0x0160, 0x015C, 0x0158, 0x0158, 0x0155, 0x0151,
+ 0x0151, 0x014E, 0x014E, 0x014A, 0x0147, 0x0144, 0x0144, 0x0141, 0x013E, 0x013E, 0x013B, 0x0138, 0x0138, 0x0135, 0x0132, 0x012F,
+ 0x012F, 0x012C, 0x0129, 0x0127, 0x0127, 0x0124, 0x0121, 0x011F, 0x011F, 0x011C, 0x011A, 0x0118, 0x0118, 0x0115, 0x0113, 0x0111,
+ 0x010E, 0x010E, 0x010C, 0x010A, 0x0108, 0x0106, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6,
+ 0x00F4, 0x00F4, 0x00F2, 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E6, 0x00E5, 0x00E3, 0x00E1, 0x00E0, 0x00E0,
+ 0x01BA, 0x01BA, 0x01BA, 0x01BA, 0x01BA, 0x01BA, 0x01BA, 0x01B4, 0x01B4, 0x01B4, 0x01B4, 0x01B4, 0x01B4, 0x01B4, 0x01AF, 0x01AF,
+ 0x01AF, 0x01AF, 0x01AF, 0x01A9, 0x01A9, 0x01A9, 0x01A9, 0x01A4, 0x01A4, 0x01A4, 0x019E, 0x019E, 0x019E, 0x0199, 0x0199, 0x0199,
+ 0x0194, 0x0194, 0x018F, 0x018F, 0x018F, 0x018A, 0x018A, 0x0186, 0x0186, 0x0181, 0x0181, 0x017D, 0x017D, 0x0178, 0x0178, 0x0174,
+ 0x0174, 0x0170, 0x0170, 0x016C, 0x0168, 0x0168, 0x0164, 0x0164, 0x0160, 0x0160, 0x015C, 0x0158, 0x0158, 0x0155, 0x0151, 0x0151,
+ 0x014E, 0x014A, 0x014A, 0x0147, 0x0144, 0x0144, 0x0141, 0x013E, 0x013E, 0x013B, 0x0138, 0x0138, 0x0135, 0x0132, 0x012F, 0x012F,
+ 0x012C, 0x0129, 0x0129, 0x0127, 0x0124, 0x0121, 0x0121, 0x011F, 0x011C, 0x011A, 0x011A, 0x0118, 0x0115, 0x0113, 0x0111, 0x0111,
+ 0x010E, 0x010C, 0x010A, 0x0108, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F8, 0x00F6,
+ 0x00F4, 0x00F2, 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00EB, 0x00EA, 0x00E8, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E1, 0x00E0, 0x00DE,
+ 0x01B4, 0x01B4, 0x01B4, 0x01B4, 0x01B4, 0x01B4, 0x01B4, 0x01AF, 0x01AF, 0x01AF, 0x01AF, 0x01AF, 0x01AF, 0x01AF, 0x01AF, 0x01A9,
+ 0x01A9, 0x01A9, 0x01A9, 0x01A4, 0x01A4, 0x01A4, 0x01A4, 0x019E, 0x019E, 0x019E, 0x0199, 0x0199, 0x0199, 0x0194, 0x0194, 0x0194,
+ 0x018F, 0x018F, 0x018F, 0x018A, 0x018A, 0x0186, 0x0186, 0x0181, 0x0181, 0x017D, 0x017D, 0x0178, 0x0178, 0x0174, 0x0174, 0x0170,
+ 0x0170, 0x016C, 0x016C, 0x0168, 0x0168, 0x0164, 0x0164, 0x0160, 0x015C, 0x015C, 0x0158, 0x0158, 0x0155, 0x0151, 0x0151, 0x014E,
+ 0x014A, 0x014A, 0x0147, 0x0144, 0x0144, 0x0141, 0x013E, 0x013E, 0x013B, 0x0138, 0x0138, 0x0135, 0x0132, 0x0132, 0x012F, 0x012C,
+ 0x0129, 0x0129, 0x0127, 0x0124, 0x0121, 0x0121, 0x011F, 0x011C, 0x011A, 0x011A, 0x0118, 0x0115, 0x0113, 0x0113, 0x0111, 0x010E,
+ 0x010C, 0x010A, 0x010A, 0x0108, 0x0106, 0x0104, 0x0102, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F6, 0x00F4,
+ 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E5, 0x00E3, 0x00E1, 0x00E0, 0x00E0, 0x00DE,
+ 0x01AF, 0x01AF, 0x01AF, 0x01AF, 0x01AF, 0x01AF, 0x01AF, 0x01A9, 0x01A9, 0x01A9, 0x01A9, 0x01A9, 0x01A9, 0x01A9, 0x01A9, 0x01A4,
+ 0x01A4, 0x01A4, 0x01A4, 0x019E, 0x019E, 0x019E, 0x019E, 0x0199, 0x0199, 0x0199, 0x0199, 0x0194, 0x0194, 0x0194, 0x018F, 0x018F,
+ 0x018A, 0x018A, 0x018A, 0x0186, 0x0186, 0x0181, 0x0181, 0x017D, 0x017D, 0x017D, 0x0178, 0x0178, 0x0174, 0x0174, 0x0170, 0x0170,
+ 0x016C, 0x0168, 0x0168, 0x0164, 0x0164, 0x0160, 0x0160, 0x015C, 0x015C, 0x0158, 0x0155, 0x0155, 0x0151, 0x0151, 0x014E, 0x014A,
+ 0x014A, 0x0147, 0x0144, 0x0144, 0x0141, 0x013E, 0x013E, 0x013B, 0x0138, 0x0138, 0x0135, 0x0132, 0x0132, 0x012F, 0x012C, 0x0129,
+ 0x0129, 0x0127, 0x0124, 0x0121, 0x0121, 0x011F, 0x011C, 0x011C, 0x011A, 0x0118, 0x0115, 0x0113, 0x0113, 0x0111, 0x010E, 0x010C,
+ 0x010C, 0x010A, 0x0108, 0x0106, 0x0104, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F4,
+ 0x00F2, 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00E8, 0x00E6, 0x00E5, 0x00E3, 0x00E3, 0x00E1, 0x00E0, 0x00DE, 0x00DD,
+ 0x01A9, 0x01A9, 0x01A9, 0x01A9, 0x01A9, 0x01A9, 0x01A9, 0x01A9, 0x01A4, 0x01A4, 0x01A4, 0x01A4, 0x01A4, 0x01A4, 0x01A4, 0x019E,
+ 0x019E, 0x019E, 0x019E, 0x019E, 0x0199, 0x0199, 0x0199, 0x0194, 0x0194, 0x0194, 0x0194, 0x018F, 0x018F, 0x018F, 0x018A, 0x018A,
+ 0x0186, 0x0186, 0x0186, 0x0181, 0x0181, 0x017D, 0x017D, 0x017D, 0x0178, 0x0178, 0x0174, 0x0174, 0x0170, 0x0170, 0x016C, 0x016C,
+ 0x0168, 0x0168, 0x0164, 0x0164, 0x0160, 0x015C, 0x015C, 0x0158, 0x0158, 0x0155, 0x0155, 0x0151, 0x014E, 0x014E, 0x014A, 0x0147,
+ 0x0147, 0x0144, 0x0144, 0x0141, 0x013E, 0x013E, 0x013B, 0x0138, 0x0138, 0x0135, 0x0132, 0x0132, 0x012F, 0x012C, 0x0129, 0x0129,
+ 0x0127, 0x0124, 0x0124, 0x0121, 0x011F, 0x011C, 0x011C, 0x011A, 0x0118, 0x0115, 0x0115, 0x0113, 0x0111, 0x010E, 0x010E, 0x010C,
+ 0x010A, 0x0108, 0x0106, 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F2,
+ 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00EB, 0x00EA, 0x00E8, 0x00E6, 0x00E6, 0x00E5, 0x00E3, 0x00E1, 0x00E0, 0x00DE, 0x00DD, 0x00DD,
+ 0x01A4, 0x01A4, 0x01A4, 0x01A4, 0x01A4, 0x01A4, 0x01A4, 0x01A4, 0x019E, 0x019E, 0x019E, 0x019E, 0x019E, 0x019E, 0x019E, 0x0199,
+ 0x0199, 0x0199, 0x0199, 0x0199, 0x0194, 0x0194, 0x0194, 0x0194, 0x018F, 0x018F, 0x018F, 0x018A, 0x018A, 0x018A, 0x0186, 0x0186,
+ 0x0186, 0x0181, 0x0181, 0x017D, 0x017D, 0x017D, 0x0178, 0x0178, 0x0174, 0x0174, 0x0170, 0x0170, 0x016C, 0x016C, 0x0168, 0x0168,
+ 0x0164, 0x0164, 0x0160, 0x0160, 0x015C, 0x015C, 0x0158, 0x0158, 0x0155, 0x0151, 0x0151, 0x014E, 0x014E, 0x014A, 0x0147, 0x0147,
+ 0x0144, 0x0141, 0x0141, 0x013E, 0x013E, 0x013B, 0x0138, 0x0138, 0x0135, 0x0132, 0x0132, 0x012F, 0x012C, 0x0129, 0x0129, 0x0127,
+ 0x0124, 0x0124, 0x0121, 0x011F, 0x011C, 0x011C, 0x011A, 0x0118, 0x0115, 0x0115, 0x0113, 0x0111, 0x010E, 0x010E, 0x010C, 0x010A,
+ 0x0108, 0x0108, 0x0106, 0x0104, 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0,
+ 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E6, 0x00E5, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00DE, 0x00DD, 0x00DB,
+ 0x019E, 0x019E, 0x019E, 0x019E, 0x019E, 0x019E, 0x019E, 0x019E, 0x0199, 0x0199, 0x0199, 0x0199, 0x0199, 0x0199, 0x0199, 0x0194,
+ 0x0194, 0x0194, 0x0194, 0x0194, 0x018F, 0x018F, 0x018F, 0x018F, 0x018A, 0x018A, 0x018A, 0x0186, 0x0186, 0x0186, 0x0181, 0x0181,
+ 0x0181, 0x017D, 0x017D, 0x017D, 0x0178, 0x0178, 0x0174, 0x0174, 0x0170, 0x0170, 0x016C, 0x016C, 0x016C, 0x0168, 0x0168, 0x0164,
+ 0x0164, 0x0160, 0x015C, 0x015C, 0x0158, 0x0158, 0x0155, 0x0155, 0x0151, 0x0151, 0x014E, 0x014A, 0x014A, 0x0147, 0x0147, 0x0144,
+ 0x0141, 0x0141, 0x013E, 0x013B, 0x013B, 0x0138, 0x0135, 0x0135, 0x0132, 0x012F, 0x012F, 0x012C, 0x0129, 0x0129, 0x0127, 0x0124,
+ 0x0124, 0x0121, 0x011F, 0x011C, 0x011C, 0x011A, 0x0118, 0x0118, 0x0115, 0x0113, 0x0111, 0x0111, 0x010E, 0x010C, 0x010A, 0x010A,
+ 0x0108, 0x0106, 0x0104, 0x0102, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F0,
+ 0x00EF, 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00E8, 0x00E6, 0x00E5, 0x00E3, 0x00E3, 0x00E1, 0x00E0, 0x00DE, 0x00DD, 0x00DB, 0x00DB,
+ 0x0199, 0x0199, 0x0199, 0x0199, 0x0199, 0x0199, 0x0199, 0x0199, 0x0194, 0x0194, 0x0194, 0x0194, 0x0194, 0x0194, 0x0194, 0x0194,
+ 0x018F, 0x018F, 0x018F, 0x018F, 0x018A, 0x018A, 0x018A, 0x018A, 0x0186, 0x0186, 0x0186, 0x0181, 0x0181, 0x0181, 0x017D, 0x017D,
+ 0x017D, 0x0178, 0x0178, 0x0178, 0x0174, 0x0174, 0x0170, 0x0170, 0x0170, 0x016C, 0x016C, 0x0168, 0x0168, 0x0164, 0x0164, 0x0160,
+ 0x0160, 0x015C, 0x015C, 0x0158, 0x0158, 0x0155, 0x0151, 0x0151, 0x014E, 0x014E, 0x014A, 0x014A, 0x0147, 0x0144, 0x0144, 0x0141,
+ 0x0141, 0x013E, 0x013B, 0x013B, 0x0138, 0x0135, 0x0135, 0x0132, 0x012F, 0x012F, 0x012C, 0x0129, 0x0129, 0x0127, 0x0124, 0x0124,
+ 0x0121, 0x011F, 0x011F, 0x011C, 0x011A, 0x0118, 0x0118, 0x0115, 0x0113, 0x0111, 0x0111, 0x010E, 0x010C, 0x010A, 0x010A, 0x0108,
+ 0x0106, 0x0104, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00EF,
+ 0x00EF, 0x00ED, 0x00EB, 0x00EA, 0x00E8, 0x00E6, 0x00E6, 0x00E5, 0x00E3, 0x00E1, 0x00E0, 0x00DE, 0x00DE, 0x00DD, 0x00DB, 0x00DA,
+ 0x0194, 0x0194, 0x0194, 0x0194, 0x0194, 0x0194, 0x0194, 0x0194, 0x0194, 0x018F, 0x018F, 0x018F, 0x018F, 0x018F, 0x018F, 0x018F,
+ 0x018A, 0x018A, 0x018A, 0x018A, 0x0186, 0x0186, 0x0186, 0x0186, 0x0181, 0x0181, 0x0181, 0x0181, 0x017D, 0x017D, 0x017D, 0x0178,
+ 0x0178, 0x0174, 0x0174, 0x0174, 0x0170, 0x0170, 0x016C, 0x016C, 0x016C, 0x0168, 0x0168, 0x0164, 0x0164, 0x0160, 0x0160, 0x015C,
+ 0x015C, 0x0158, 0x0158, 0x0155, 0x0155, 0x0151, 0x0151, 0x014E, 0x014E, 0x014A, 0x0147, 0x0147, 0x0144, 0x0144, 0x0141, 0x013E,
+ 0x013E, 0x013B, 0x013B, 0x0138, 0x0135, 0x0135, 0x0132, 0x012F, 0x012F, 0x012C, 0x0129, 0x0129, 0x0127, 0x0124, 0x0124, 0x0121,
+ 0x011F, 0x011F, 0x011C, 0x011A, 0x0118, 0x0118, 0x0115, 0x0113, 0x0111, 0x0111, 0x010E, 0x010C, 0x010C, 0x010A, 0x0108, 0x0106,
+ 0x0106, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF,
+ 0x00ED, 0x00EB, 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E5, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00DE, 0x00DD, 0x00DB, 0x00DA, 0x00DA,
+ 0x018F, 0x018F, 0x018F, 0x018F, 0x018F, 0x018F, 0x018F, 0x018F, 0x018F, 0x018A, 0x018A, 0x018A, 0x018A, 0x018A, 0x018A, 0x018A,
+ 0x0186, 0x0186, 0x0186, 0x0186, 0x0186, 0x0181, 0x0181, 0x0181, 0x0181, 0x017D, 0x017D, 0x017D, 0x0178, 0x0178, 0x0178, 0x0174,
+ 0x0174, 0x0174, 0x0170, 0x0170, 0x016C, 0x016C, 0x016C, 0x0168, 0x0168, 0x0164, 0x0164, 0x0160, 0x0160, 0x015C, 0x015C, 0x0158,
+ 0x0158, 0x0155, 0x0155, 0x0151, 0x0151, 0x014E, 0x014E, 0x014A, 0x014A, 0x0147, 0x0147, 0x0144, 0x0141, 0x0141, 0x013E, 0x013E,
+ 0x013B, 0x0138, 0x0138, 0x0135, 0x0135, 0x0132, 0x012F, 0x012F, 0x012C, 0x0129, 0x0129, 0x0127, 0x0124, 0x0124, 0x0121, 0x011F,
+ 0x011F, 0x011C, 0x011A, 0x0118, 0x0118, 0x0115, 0x0113, 0x0113, 0x0111, 0x010E, 0x010C, 0x010C, 0x010A, 0x0108, 0x0106, 0x0106,
+ 0x0104, 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F2, 0x00F0, 0x00EF, 0x00ED,
+ 0x00EB, 0x00EB, 0x00EA, 0x00E8, 0x00E6, 0x00E5, 0x00E3, 0x00E3, 0x00E1, 0x00E0, 0x00DE, 0x00DD, 0x00DD, 0x00DB, 0x00DA, 0x00D9,
+ 0x018A, 0x018A, 0x018A, 0x018A, 0x018A, 0x018A, 0x018A, 0x018A, 0x018A, 0x0186, 0x0186, 0x0186, 0x0186, 0x0186, 0x0186, 0x0186,
+ 0x0181, 0x0181, 0x0181, 0x0181, 0x0181, 0x017D, 0x017D, 0x017D, 0x017D, 0x0178, 0x0178, 0x0178, 0x0174, 0x0174, 0x0174, 0x0170,
+ 0x0170, 0x0170, 0x016C, 0x016C, 0x016C, 0x0168, 0x0168, 0x0164, 0x0164, 0x0160, 0x0160, 0x0160, 0x015C, 0x015C, 0x0158, 0x0158,
+ 0x0155, 0x0155, 0x0151, 0x0151, 0x014E, 0x014E, 0x014A, 0x0147, 0x0147, 0x0144, 0x0144, 0x0141, 0x0141, 0x013E, 0x013B, 0x013B,
+ 0x0138, 0x0138, 0x0135, 0x0132, 0x0132, 0x012F, 0x012F, 0x012C, 0x0129, 0x0129, 0x0127, 0x0124, 0x0124, 0x0121, 0x011F, 0x011F,
+ 0x011C, 0x011A, 0x0118, 0x0118, 0x0115, 0x0113, 0x0113, 0x0111, 0x010E, 0x010C, 0x010C, 0x010A, 0x0108, 0x0106, 0x0106, 0x0104,
+ 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F0, 0x00EF, 0x00ED, 0x00ED,
+ 0x00EB, 0x00EA, 0x00E8, 0x00E6, 0x00E6, 0x00E5, 0x00E3, 0x00E1, 0x00E0, 0x00DE, 0x00DE, 0x00DD, 0x00DB, 0x00DA, 0x00D9, 0x00D7,
+ 0x0186, 0x0186, 0x0186, 0x0186, 0x0186, 0x0186, 0x0186, 0x0186, 0x0186, 0x0181, 0x0181, 0x0181, 0x0181, 0x0181, 0x0181, 0x0181,
+ 0x017D, 0x017D, 0x017D, 0x017D, 0x017D, 0x0178, 0x0178, 0x0178, 0x0178, 0x0174, 0x0174, 0x0174, 0x0170, 0x0170, 0x0170, 0x016C,
+ 0x016C, 0x016C, 0x0168, 0x0168, 0x0168, 0x0164, 0x0164, 0x0160, 0x0160, 0x0160, 0x015C, 0x015C, 0x0158, 0x0158, 0x0155, 0x0155,
+ 0x0151, 0x0151, 0x014E, 0x014E, 0x014A, 0x014A, 0x0147, 0x0147, 0x0144, 0x0144, 0x0141, 0x013E, 0x013E, 0x013B, 0x013B, 0x0138,
+ 0x0135, 0x0135, 0x0132, 0x0132, 0x012F, 0x012C, 0x012C, 0x0129, 0x0127, 0x0127, 0x0124, 0x0121, 0x0121, 0x011F, 0x011C, 0x011C,
+ 0x011A, 0x0118, 0x0118, 0x0115, 0x0113, 0x0113, 0x0111, 0x010E, 0x010C, 0x010C, 0x010A, 0x0108, 0x0108, 0x0106, 0x0104, 0x0102,
+ 0x0102, 0x0100, 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00EB,
+ 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E5, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00DE, 0x00DD, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D7,
+ 0x0181, 0x0181, 0x0181, 0x0181, 0x0181, 0x0181, 0x0181, 0x0181, 0x0181, 0x0181, 0x017D, 0x017D, 0x017D, 0x017D, 0x017D, 0x017D,
+ 0x017D, 0x0178, 0x0178, 0x0178, 0x0178, 0x0174, 0x0174, 0x0174, 0x0174, 0x0170, 0x0170, 0x0170, 0x0170, 0x016C, 0x016C, 0x016C,
+ 0x0168, 0x0168, 0x0164, 0x0164, 0x0164, 0x0160, 0x0160, 0x0160, 0x015C, 0x015C, 0x0158, 0x0158, 0x0155, 0x0155, 0x0151, 0x0151,
+ 0x014E, 0x014E, 0x014A, 0x014A, 0x0147, 0x0147, 0x0144, 0x0144, 0x0141, 0x0141, 0x013E, 0x013E, 0x013B, 0x0138, 0x0138, 0x0135,
+ 0x0135, 0x0132, 0x012F, 0x012F, 0x012C, 0x012C, 0x0129, 0x0127, 0x0127, 0x0124, 0x0121, 0x0121, 0x011F, 0x011C, 0x011C, 0x011A,
+ 0x0118, 0x0118, 0x0115, 0x0113, 0x0113, 0x0111, 0x010E, 0x010E, 0x010C, 0x010A, 0x0108, 0x0108, 0x0106, 0x0104, 0x0102, 0x0102,
+ 0x0100, 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EA,
+ 0x00EA, 0x00E8, 0x00E6, 0x00E5, 0x00E3, 0x00E3, 0x00E1, 0x00E0, 0x00DE, 0x00DD, 0x00DD, 0x00DB, 0x00DA, 0x00D9, 0x00D7, 0x00D6,
+ 0x017D, 0x017D, 0x017D, 0x017D, 0x017D, 0x017D, 0x017D, 0x017D, 0x017D, 0x017D, 0x0178, 0x0178, 0x0178, 0x0178, 0x0178, 0x0178,
+ 0x0178, 0x0174, 0x0174, 0x0174, 0x0174, 0x0170, 0x0170, 0x0170, 0x0170, 0x016C, 0x016C, 0x016C, 0x016C, 0x0168, 0x0168, 0x0168,
+ 0x0164, 0x0164, 0x0164, 0x0160, 0x0160, 0x015C, 0x015C, 0x015C, 0x0158, 0x0158, 0x0155, 0x0155, 0x0151, 0x0151, 0x0151, 0x014E,
+ 0x014E, 0x014A, 0x014A, 0x0147, 0x0147, 0x0144, 0x0144, 0x0141, 0x013E, 0x013E, 0x013B, 0x013B, 0x0138, 0x0138, 0x0135, 0x0132,
+ 0x0132, 0x012F, 0x012F, 0x012C, 0x0129, 0x0129, 0x0127, 0x0127, 0x0124, 0x0121, 0x0121, 0x011F, 0x011C, 0x011C, 0x011A, 0x0118,
+ 0x0118, 0x0115, 0x0113, 0x0113, 0x0111, 0x010E, 0x010E, 0x010C, 0x010A, 0x0108, 0x0108, 0x0106, 0x0104, 0x0102, 0x0102, 0x0100,
+ 0x00FE, 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EB, 0x00EA,
+ 0x00E8, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E1, 0x00E0, 0x00DE, 0x00DE, 0x00DD, 0x00DB, 0x00DA, 0x00D9, 0x00D7, 0x00D7, 0x00D6,
+ 0x0178, 0x0178, 0x0178, 0x0178, 0x0178, 0x0178, 0x0178, 0x0178, 0x0178, 0x0178, 0x0174, 0x0174, 0x0174, 0x0174, 0x0174, 0x0174,
+ 0x0174, 0x0170, 0x0170, 0x0170, 0x0170, 0x0170, 0x016C, 0x016C, 0x016C, 0x016C, 0x0168, 0x0168, 0x0168, 0x0164, 0x0164, 0x0164,
+ 0x0160, 0x0160, 0x0160, 0x015C, 0x015C, 0x015C, 0x0158, 0x0158, 0x0155, 0x0155, 0x0151, 0x0151, 0x0151, 0x014E, 0x014E, 0x014A,
+ 0x014A, 0x0147, 0x0147, 0x0144, 0x0144, 0x0141, 0x0141, 0x013E, 0x013E, 0x013B, 0x013B, 0x0138, 0x0135, 0x0135, 0x0132, 0x0132,
+ 0x012F, 0x012F, 0x012C, 0x0129, 0x0129, 0x0127, 0x0124, 0x0124, 0x0121, 0x0121, 0x011F, 0x011C, 0x011C, 0x011A, 0x0118, 0x0118,
+ 0x0115, 0x0113, 0x0113, 0x0111, 0x010E, 0x010E, 0x010C, 0x010A, 0x0108, 0x0108, 0x0106, 0x0104, 0x0104, 0x0102, 0x0100, 0x00FE,
+ 0x00FE, 0x00FC, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F2, 0x00F0, 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EA, 0x00E8,
+ 0x00E6, 0x00E6, 0x00E5, 0x00E3, 0x00E1, 0x00E0, 0x00E0, 0x00DE, 0x00DD, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D7, 0x00D6, 0x00D4,
+ 0x0174, 0x0174, 0x0174, 0x0174, 0x0174, 0x0174, 0x0174, 0x0174, 0x0174, 0x0174, 0x0170, 0x0170, 0x0170, 0x0170, 0x0170, 0x0170,
+ 0x0170, 0x016C, 0x016C, 0x016C, 0x016C, 0x016C, 0x0168, 0x0168, 0x0168, 0x0168, 0x0164, 0x0164, 0x0164, 0x0160, 0x0160, 0x0160,
+ 0x015C, 0x015C, 0x015C, 0x0158, 0x0158, 0x0158, 0x0155, 0x0155, 0x0151, 0x0151, 0x0151, 0x014E, 0x014E, 0x014A, 0x014A, 0x0147,
+ 0x0147, 0x0144, 0x0144, 0x0141, 0x0141, 0x013E, 0x013E, 0x013B, 0x013B, 0x0138, 0x0138, 0x0135, 0x0135, 0x0132, 0x012F, 0x012F,
+ 0x012C, 0x012C, 0x0129, 0x0129, 0x0127, 0x0124, 0x0124, 0x0121, 0x011F, 0x011F, 0x011C, 0x011C, 0x011A, 0x0118, 0x0118, 0x0115,
+ 0x0113, 0x0113, 0x0111, 0x010E, 0x010E, 0x010C, 0x010A, 0x0108, 0x0108, 0x0106, 0x0104, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FE,
+ 0x00FC, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F4, 0x00F2, 0x00F2, 0x00F0, 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EA, 0x00E8, 0x00E8,
+ 0x00E6, 0x00E5, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00DE, 0x00DD, 0x00DB, 0x00DB, 0x00DA, 0x00D9, 0x00D7, 0x00D6, 0x00D6, 0x00D4,
+ 0x0170, 0x0170, 0x0170, 0x0170, 0x0170, 0x0170, 0x0170, 0x0170, 0x0170, 0x0170, 0x016C, 0x016C, 0x016C, 0x016C, 0x016C, 0x016C,
+ 0x016C, 0x0168, 0x0168, 0x0168, 0x0168, 0x0168, 0x0164, 0x0164, 0x0164, 0x0164, 0x0160, 0x0160, 0x0160, 0x015C, 0x015C, 0x015C,
+ 0x015C, 0x0158, 0x0158, 0x0155, 0x0155, 0x0155, 0x0151, 0x0151, 0x0151, 0x014E, 0x014E, 0x014A, 0x014A, 0x0147, 0x0147, 0x0144,
+ 0x0144, 0x0144, 0x0141, 0x0141, 0x013E, 0x013E, 0x013B, 0x0138, 0x0138, 0x0135, 0x0135, 0x0132, 0x0132, 0x012F, 0x012F, 0x012C,
+ 0x012C, 0x0129, 0x0127, 0x0127, 0x0124, 0x0124, 0x0121, 0x011F, 0x011F, 0x011C, 0x011A, 0x011A, 0x0118, 0x0115, 0x0115, 0x0113,
+ 0x0111, 0x0111, 0x010E, 0x010C, 0x010C, 0x010A, 0x0108, 0x0108, 0x0106, 0x0104, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FE, 0x00FC,
+ 0x00FA, 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00EB, 0x00EA, 0x00E8, 0x00E8, 0x00E6,
+ 0x00E5, 0x00E3, 0x00E3, 0x00E1, 0x00E0, 0x00DE, 0x00DD, 0x00DD, 0x00DB, 0x00DA, 0x00D9, 0x00D7, 0x00D7, 0x00D6, 0x00D4, 0x00D3,
+ 0x016C, 0x016C, 0x016C, 0x016C, 0x016C, 0x016C, 0x016C, 0x016C, 0x016C, 0x016C, 0x016C, 0x0168, 0x0168, 0x0168, 0x0168, 0x0168,
+ 0x0168, 0x0164, 0x0164, 0x0164, 0x0164, 0x0164, 0x0160, 0x0160, 0x0160, 0x0160, 0x015C, 0x015C, 0x015C, 0x015C, 0x0158, 0x0158,
+ 0x0158, 0x0155, 0x0155, 0x0155, 0x0151, 0x0151, 0x014E, 0x014E, 0x014E, 0x014A, 0x014A, 0x0147, 0x0147, 0x0144, 0x0144, 0x0144,
+ 0x0141, 0x0141, 0x013E, 0x013E, 0x013B, 0x013B, 0x0138, 0x0138, 0x0135, 0x0135, 0x0132, 0x0132, 0x012F, 0x012C, 0x012C, 0x0129,
+ 0x0129, 0x0127, 0x0127, 0x0124, 0x0121, 0x0121, 0x011F, 0x011F, 0x011C, 0x011A, 0x011A, 0x0118, 0x0115, 0x0115, 0x0113, 0x0111,
+ 0x0111, 0x010E, 0x010C, 0x010C, 0x010A, 0x0108, 0x0108, 0x0106, 0x0104, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FA,
+ 0x00FA, 0x00F8, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E6, 0x00E5,
+ 0x00E5, 0x00E3, 0x00E1, 0x00E0, 0x00DE, 0x00DE, 0x00DD, 0x00DB, 0x00DA, 0x00D9, 0x00D9, 0x00D7, 0x00D6, 0x00D4, 0x00D3, 0x00D3,
+ 0x0168, 0x0168, 0x0168, 0x0168, 0x0168, 0x0168, 0x0168, 0x0168, 0x0168, 0x0168, 0x0168, 0x0164, 0x0164, 0x0164, 0x0164, 0x0164,
+ 0x0164, 0x0164, 0x0160, 0x0160, 0x0160, 0x0160, 0x015C, 0x015C, 0x015C, 0x015C, 0x0158, 0x0158, 0x0158, 0x0158, 0x0155, 0x0155,
+ 0x0155, 0x0151, 0x0151, 0x0151, 0x014E, 0x014E, 0x014E, 0x014A, 0x014A, 0x0147, 0x0147, 0x0144, 0x0144, 0x0144, 0x0141, 0x0141,
+ 0x013E, 0x013E, 0x013B, 0x013B, 0x0138, 0x0138, 0x0135, 0x0135, 0x0132, 0x0132, 0x012F, 0x012F, 0x012C, 0x012C, 0x0129, 0x0127,
+ 0x0127, 0x0124, 0x0124, 0x0121, 0x0121, 0x011F, 0x011C, 0x011C, 0x011A, 0x011A, 0x0118, 0x0115, 0x0115, 0x0113, 0x0111, 0x0111,
+ 0x010E, 0x010C, 0x010C, 0x010A, 0x0108, 0x0108, 0x0106, 0x0104, 0x0104, 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00FA,
+ 0x00F8, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00E8, 0x00E6, 0x00E5, 0x00E5,
+ 0x00E3, 0x00E1, 0x00E0, 0x00E0, 0x00DE, 0x00DD, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D7, 0x00D6, 0x00D4, 0x00D4, 0x00D3, 0x00D2,
+ 0x0164, 0x0164, 0x0164, 0x0164, 0x0164, 0x0164, 0x0164, 0x0164, 0x0164, 0x0164, 0x0164, 0x0160, 0x0160, 0x0160, 0x0160, 0x0160,
+ 0x0160, 0x0160, 0x015C, 0x015C, 0x015C, 0x015C, 0x015C, 0x0158, 0x0158, 0x0158, 0x0158, 0x0155, 0x0155, 0x0155, 0x0151, 0x0151,
+ 0x0151, 0x014E, 0x014E, 0x014E, 0x014A, 0x014A, 0x014A, 0x0147, 0x0147, 0x0144, 0x0144, 0x0144, 0x0141, 0x0141, 0x013E, 0x013E,
+ 0x013B, 0x013B, 0x0138, 0x0138, 0x0135, 0x0135, 0x0132, 0x0132, 0x012F, 0x012F, 0x012C, 0x012C, 0x0129, 0x0129, 0x0127, 0x0127,
+ 0x0124, 0x0124, 0x0121, 0x011F, 0x011F, 0x011C, 0x011C, 0x011A, 0x0118, 0x0118, 0x0115, 0x0113, 0x0113, 0x0111, 0x0111, 0x010E,
+ 0x010C, 0x010C, 0x010A, 0x0108, 0x0108, 0x0106, 0x0104, 0x0104, 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FC, 0x00FA, 0x00FA, 0x00F8,
+ 0x00F6, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00E8, 0x00E6, 0x00E6, 0x00E5, 0x00E3,
+ 0x00E1, 0x00E1, 0x00E0, 0x00DE, 0x00DD, 0x00DB, 0x00DB, 0x00DA, 0x00D9, 0x00D7, 0x00D6, 0x00D6, 0x00D4, 0x00D3, 0x00D2, 0x00D0,
+ 0x0160, 0x0160, 0x0160, 0x0160, 0x0160, 0x0160, 0x0160, 0x0160, 0x0160, 0x0160, 0x0160, 0x015C, 0x015C, 0x015C, 0x015C, 0x015C,
+ 0x015C, 0x015C, 0x0158, 0x0158, 0x0158, 0x0158, 0x0158, 0x0155, 0x0155, 0x0155, 0x0155, 0x0151, 0x0151, 0x0151, 0x014E, 0x014E,
+ 0x014E, 0x014A, 0x014A, 0x014A, 0x0147, 0x0147, 0x0147, 0x0144, 0x0144, 0x0144, 0x0141, 0x0141, 0x013E, 0x013E, 0x013B, 0x013B,
+ 0x013B, 0x0138, 0x0138, 0x0135, 0x0135, 0x0132, 0x0132, 0x012F, 0x012F, 0x012C, 0x012C, 0x0129, 0x0129, 0x0127, 0x0124, 0x0124,
+ 0x0121, 0x0121, 0x011F, 0x011F, 0x011C, 0x011A, 0x011A, 0x0118, 0x0118, 0x0115, 0x0113, 0x0113, 0x0111, 0x010E, 0x010E, 0x010C,
+ 0x010C, 0x010A, 0x0108, 0x0108, 0x0106, 0x0104, 0x0104, 0x0102, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F6,
+ 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00E8, 0x00E6, 0x00E6, 0x00E5, 0x00E3, 0x00E1,
+ 0x00E1, 0x00E0, 0x00DE, 0x00DD, 0x00DD, 0x00DB, 0x00DA, 0x00D9, 0x00D7, 0x00D7, 0x00D6, 0x00D4, 0x00D3, 0x00D2, 0x00D2, 0x00D0,
+ 0x015C, 0x015C, 0x015C, 0x015C, 0x015C, 0x015C, 0x015C, 0x015C, 0x015C, 0x015C, 0x015C, 0x0158, 0x0158, 0x0158, 0x0158, 0x0158,
+ 0x0158, 0x0158, 0x0155, 0x0155, 0x0155, 0x0155, 0x0155, 0x0151, 0x0151, 0x0151, 0x0151, 0x014E, 0x014E, 0x014E, 0x014A, 0x014A,
+ 0x014A, 0x014A, 0x0147, 0x0147, 0x0144, 0x0144, 0x0144, 0x0141, 0x0141, 0x0141, 0x013E, 0x013E, 0x013B, 0x013B, 0x013B, 0x0138,
+ 0x0138, 0x0135, 0x0135, 0x0132, 0x0132, 0x012F, 0x012F, 0x012C, 0x012C, 0x0129, 0x0129, 0x0127, 0x0127, 0x0124, 0x0124, 0x0121,
+ 0x011F, 0x011F, 0x011C, 0x011C, 0x011A, 0x011A, 0x0118, 0x0115, 0x0115, 0x0113, 0x0113, 0x0111, 0x010E, 0x010E, 0x010C, 0x010A,
+ 0x010A, 0x0108, 0x0106, 0x0106, 0x0104, 0x0102, 0x0102, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F6, 0x00F6,
+ 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E5, 0x00E3, 0x00E3, 0x00E1,
+ 0x00E0, 0x00DE, 0x00DE, 0x00DD, 0x00DB, 0x00DA, 0x00D9, 0x00D9, 0x00D7, 0x00D6, 0x00D4, 0x00D3, 0x00D3, 0x00D2, 0x00D0, 0x00CF,
+ 0x0158, 0x0158, 0x0158, 0x0158, 0x0158, 0x0158, 0x0158, 0x0158, 0x0158, 0x0158, 0x0158, 0x0158, 0x0155, 0x0155, 0x0155, 0x0155,
+ 0x0155, 0x0155, 0x0151, 0x0151, 0x0151, 0x0151, 0x0151, 0x014E, 0x014E, 0x014E, 0x014E, 0x014A, 0x014A, 0x014A, 0x014A, 0x0147,
+ 0x0147, 0x0147, 0x0144, 0x0144, 0x0144, 0x0141, 0x0141, 0x013E, 0x013E, 0x013E, 0x013B, 0x013B, 0x0138, 0x0138, 0x0138, 0x0135,
+ 0x0135, 0x0132, 0x0132, 0x012F, 0x012F, 0x012C, 0x012C, 0x0129, 0x0129, 0x0127, 0x0127, 0x0124, 0x0124, 0x0121, 0x0121, 0x011F,
+ 0x011F, 0x011C, 0x011C, 0x011A, 0x0118, 0x0118, 0x0115, 0x0115, 0x0113, 0x0111, 0x0111, 0x010E, 0x010E, 0x010C, 0x010A, 0x010A,
+ 0x0108, 0x0106, 0x0106, 0x0104, 0x0102, 0x0102, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F6, 0x00F6, 0x00F4,
+ 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E5, 0x00E3, 0x00E3, 0x00E1, 0x00E0,
+ 0x00DE, 0x00DE, 0x00DD, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D7, 0x00D6, 0x00D4, 0x00D4, 0x00D3, 0x00D2, 0x00D0, 0x00CF, 0x00CF,
+ 0x0155, 0x0155, 0x0155, 0x0155, 0x0155, 0x0155, 0x0155, 0x0155, 0x0155, 0x0155, 0x0155, 0x0155, 0x0151, 0x0151, 0x0151, 0x0151,
+ 0x0151, 0x0151, 0x014E, 0x014E, 0x014E, 0x014E, 0x014E, 0x014A, 0x014A, 0x014A, 0x014A, 0x0147, 0x0147, 0x0147, 0x0147, 0x0144,
+ 0x0144, 0x0144, 0x0141, 0x0141, 0x0141, 0x013E, 0x013E, 0x013E, 0x013B, 0x013B, 0x0138, 0x0138, 0x0138, 0x0135, 0x0135, 0x0132,
+ 0x0132, 0x012F, 0x012F, 0x012C, 0x012C, 0x012C, 0x0129, 0x0129, 0x0127, 0x0127, 0x0124, 0x0124, 0x0121, 0x011F, 0x011F, 0x011C,
+ 0x011C, 0x011A, 0x011A, 0x0118, 0x0118, 0x0115, 0x0113, 0x0113, 0x0111, 0x0111, 0x010E, 0x010C, 0x010C, 0x010A, 0x010A, 0x0108,
+ 0x0106, 0x0106, 0x0104, 0x0102, 0x0102, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F2,
+ 0x00F0, 0x00F0, 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E1, 0x00E0, 0x00E0,
+ 0x00DE, 0x00DD, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D7, 0x00D6, 0x00D6, 0x00D4, 0x00D3, 0x00D2, 0x00D0, 0x00D0, 0x00CF, 0x00CE,
+ 0x0151, 0x0151, 0x0151, 0x0151, 0x0151, 0x0151, 0x0151, 0x0151, 0x0151, 0x0151, 0x0151, 0x0151, 0x014E, 0x014E, 0x014E, 0x014E,
+ 0x014E, 0x014E, 0x014E, 0x014A, 0x014A, 0x014A, 0x014A, 0x0147, 0x0147, 0x0147, 0x0147, 0x0144, 0x0144, 0x0144, 0x0144, 0x0141,
+ 0x0141, 0x0141, 0x013E, 0x013E, 0x013E, 0x013B, 0x013B, 0x013B, 0x0138, 0x0138, 0x0135, 0x0135, 0x0135, 0x0132, 0x0132, 0x012F,
+ 0x012F, 0x012F, 0x012C, 0x012C, 0x0129, 0x0129, 0x0127, 0x0127, 0x0124, 0x0124, 0x0121, 0x0121, 0x011F, 0x011F, 0x011C, 0x011C,
+ 0x011A, 0x0118, 0x0118, 0x0115, 0x0115, 0x0113, 0x0113, 0x0111, 0x010E, 0x010E, 0x010C, 0x010C, 0x010A, 0x0108, 0x0108, 0x0106,
+ 0x0106, 0x0104, 0x0102, 0x0102, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F2, 0x00F0,
+ 0x00F0, 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E1, 0x00E0, 0x00E0, 0x00DE,
+ 0x00DD, 0x00DB, 0x00DB, 0x00DA, 0x00D9, 0x00D7, 0x00D7, 0x00D6, 0x00D4, 0x00D3, 0x00D2, 0x00D2, 0x00D0, 0x00CF, 0x00CE, 0x00CC,
+ 0x014E, 0x014E, 0x014E, 0x014E, 0x014E, 0x014E, 0x014E, 0x014E, 0x014E, 0x014E, 0x014E, 0x014E, 0x014A, 0x014A, 0x014A, 0x014A,
+ 0x014A, 0x014A, 0x014A, 0x0147, 0x0147, 0x0147, 0x0147, 0x0147, 0x0144, 0x0144, 0x0144, 0x0144, 0x0141, 0x0141, 0x0141, 0x013E,
+ 0x013E, 0x013E, 0x013B, 0x013B, 0x013B, 0x0138, 0x0138, 0x0138, 0x0135, 0x0135, 0x0135, 0x0132, 0x0132, 0x012F, 0x012F, 0x012F,
+ 0x012C, 0x012C, 0x0129, 0x0129, 0x0127, 0x0127, 0x0124, 0x0124, 0x0121, 0x0121, 0x011F, 0x011F, 0x011C, 0x011C, 0x011A, 0x011A,
+ 0x0118, 0x0118, 0x0115, 0x0115, 0x0113, 0x0111, 0x0111, 0x010E, 0x010E, 0x010C, 0x010A, 0x010A, 0x0108, 0x0108, 0x0106, 0x0104,
+ 0x0104, 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F2, 0x00F0, 0x00F0,
+ 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E1, 0x00E0, 0x00E0, 0x00DE, 0x00DD,
+ 0x00DB, 0x00DB, 0x00DA, 0x00D9, 0x00D7, 0x00D7, 0x00D6, 0x00D4, 0x00D3, 0x00D3, 0x00D2, 0x00D0, 0x00CF, 0x00CE, 0x00CE, 0x00CC,
+ 0x014A, 0x014A, 0x014A, 0x014A, 0x014A, 0x014A, 0x014A, 0x014A, 0x014A, 0x014A, 0x014A, 0x014A, 0x0147, 0x0147, 0x0147, 0x0147,
+ 0x0147, 0x0147, 0x0147, 0x0144, 0x0144, 0x0144, 0x0144, 0x0144, 0x0141, 0x0141, 0x0141, 0x0141, 0x013E, 0x013E, 0x013E, 0x013B,
+ 0x013B, 0x013B, 0x013B, 0x0138, 0x0138, 0x0138, 0x0135, 0x0135, 0x0132, 0x0132, 0x0132, 0x012F, 0x012F, 0x012C, 0x012C, 0x012C,
+ 0x0129, 0x0129, 0x0127, 0x0127, 0x0124, 0x0124, 0x0121, 0x0121, 0x011F, 0x011F, 0x011F, 0x011C, 0x011A, 0x011A, 0x0118, 0x0118,
+ 0x0115, 0x0115, 0x0113, 0x0113, 0x0111, 0x0111, 0x010E, 0x010C, 0x010C, 0x010A, 0x010A, 0x0108, 0x0106, 0x0106, 0x0104, 0x0104,
+ 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF,
+ 0x00ED, 0x00ED, 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00DE, 0x00DD, 0x00DD,
+ 0x00DB, 0x00DA, 0x00D9, 0x00D9, 0x00D7, 0x00D6, 0x00D4, 0x00D4, 0x00D3, 0x00D2, 0x00D0, 0x00CF, 0x00CF, 0x00CE, 0x00CC, 0x00CB,
+ 0x0147, 0x0147, 0x0147, 0x0147, 0x0147, 0x0147, 0x0147, 0x0147, 0x0147, 0x0147, 0x0147, 0x0147, 0x0144, 0x0144, 0x0144, 0x0144,
+ 0x0144, 0x0144, 0x0144, 0x0141, 0x0141, 0x0141, 0x0141, 0x0141, 0x013E, 0x013E, 0x013E, 0x013E, 0x013B, 0x013B, 0x013B, 0x013B,
+ 0x0138, 0x0138, 0x0138, 0x0135, 0x0135, 0x0135, 0x0132, 0x0132, 0x0132, 0x012F, 0x012F, 0x012C, 0x012C, 0x012C, 0x0129, 0x0129,
+ 0x0127, 0x0127, 0x0124, 0x0124, 0x0124, 0x0121, 0x0121, 0x011F, 0x011F, 0x011C, 0x011C, 0x011A, 0x011A, 0x0118, 0x0118, 0x0115,
+ 0x0115, 0x0113, 0x0111, 0x0111, 0x010E, 0x010E, 0x010C, 0x010C, 0x010A, 0x0108, 0x0108, 0x0106, 0x0106, 0x0104, 0x0102, 0x0102,
+ 0x0100, 0x0100, 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00ED,
+ 0x00ED, 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00DE, 0x00DD, 0x00DD, 0x00DB,
+ 0x00DA, 0x00D9, 0x00D9, 0x00D7, 0x00D6, 0x00D4, 0x00D4, 0x00D3, 0x00D2, 0x00D0, 0x00D0, 0x00CF, 0x00CE, 0x00CC, 0x00CB, 0x00CB,
+ 0x0144, 0x0144, 0x0144, 0x0144, 0x0144, 0x0144, 0x0144, 0x0144, 0x0144, 0x0144, 0x0144, 0x0144, 0x0144, 0x0141, 0x0141, 0x0141,
+ 0x0141, 0x0141, 0x0141, 0x013E, 0x013E, 0x013E, 0x013E, 0x013E, 0x013B, 0x013B, 0x013B, 0x013B, 0x0138, 0x0138, 0x0138, 0x0138,
+ 0x0135, 0x0135, 0x0135, 0x0132, 0x0132, 0x0132, 0x012F, 0x012F, 0x012F, 0x012C, 0x012C, 0x0129, 0x0129, 0x0129, 0x0127, 0x0127,
+ 0x0124, 0x0124, 0x0124, 0x0121, 0x0121, 0x011F, 0x011F, 0x011C, 0x011C, 0x011A, 0x011A, 0x0118, 0x0118, 0x0115, 0x0115, 0x0113,
+ 0x0113, 0x0111, 0x0111, 0x010E, 0x010E, 0x010C, 0x010A, 0x010A, 0x0108, 0x0108, 0x0106, 0x0104, 0x0104, 0x0102, 0x0102, 0x0100,
+ 0x00FE, 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00ED, 0x00ED,
+ 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00DE, 0x00DD, 0x00DD, 0x00DB, 0x00DA,
+ 0x00DA, 0x00D9, 0x00D7, 0x00D6, 0x00D6, 0x00D4, 0x00D3, 0x00D2, 0x00D0, 0x00D0, 0x00CF, 0x00CE, 0x00CC, 0x00CC, 0x00CB, 0x00CA,
+ 0x0141, 0x0141, 0x0141, 0x0141, 0x0141, 0x0141, 0x0141, 0x0141, 0x0141, 0x0141, 0x0141, 0x0141, 0x0141, 0x013E, 0x013E, 0x013E,
+ 0x013E, 0x013E, 0x013E, 0x013B, 0x013B, 0x013B, 0x013B, 0x013B, 0x0138, 0x0138, 0x0138, 0x0138, 0x0135, 0x0135, 0x0135, 0x0135,
+ 0x0132, 0x0132, 0x0132, 0x012F, 0x012F, 0x012F, 0x012C, 0x012C, 0x012C, 0x0129, 0x0129, 0x0129, 0x0127, 0x0127, 0x0124, 0x0124,
+ 0x0124, 0x0121, 0x0121, 0x011F, 0x011F, 0x011C, 0x011C, 0x011A, 0x011A, 0x0118, 0x0118, 0x0115, 0x0115, 0x0113, 0x0113, 0x0111,
+ 0x0111, 0x010E, 0x010E, 0x010C, 0x010C, 0x010A, 0x010A, 0x0108, 0x0106, 0x0106, 0x0104, 0x0104, 0x0102, 0x0100, 0x0100, 0x00FE,
+ 0x00FE, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00ED, 0x00ED, 0x00EB,
+ 0x00EA, 0x00EA, 0x00E8, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00DE, 0x00DD, 0x00DD, 0x00DB, 0x00DA, 0x00DA,
+ 0x00D9, 0x00D7, 0x00D6, 0x00D6, 0x00D4, 0x00D3, 0x00D2, 0x00D2, 0x00D0, 0x00CF, 0x00CE, 0x00CE, 0x00CC, 0x00CB, 0x00CA, 0x00C9,
+ 0x013E, 0x013E, 0x013E, 0x013E, 0x013E, 0x013E, 0x013E, 0x013E, 0x013E, 0x013E, 0x013E, 0x013E, 0x013E, 0x013B, 0x013B, 0x013B,
+ 0x013B, 0x013B, 0x013B, 0x013B, 0x0138, 0x0138, 0x0138, 0x0138, 0x0135, 0x0135, 0x0135, 0x0135, 0x0135, 0x0132, 0x0132, 0x0132,
+ 0x012F, 0x012F, 0x012F, 0x012C, 0x012C, 0x012C, 0x0129, 0x0129, 0x0129, 0x0127, 0x0127, 0x0127, 0x0124, 0x0124, 0x0121, 0x0121,
+ 0x0121, 0x011F, 0x011F, 0x011C, 0x011C, 0x011A, 0x011A, 0x011A, 0x0118, 0x0118, 0x0115, 0x0115, 0x0113, 0x0113, 0x0111, 0x0111,
+ 0x010E, 0x010C, 0x010C, 0x010A, 0x010A, 0x0108, 0x0108, 0x0106, 0x0106, 0x0104, 0x0102, 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FC,
+ 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F2, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EA,
+ 0x00EA, 0x00E8, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00DE, 0x00DE, 0x00DD, 0x00DB, 0x00DA, 0x00DA, 0x00D9,
+ 0x00D7, 0x00D6, 0x00D6, 0x00D4, 0x00D3, 0x00D2, 0x00D2, 0x00D0, 0x00CF, 0x00CE, 0x00CE, 0x00CC, 0x00CB, 0x00CA, 0x00CA, 0x00C9,
+ 0x013B, 0x013B, 0x013B, 0x013B, 0x013B, 0x013B, 0x013B, 0x013B, 0x013B, 0x013B, 0x013B, 0x013B, 0x013B, 0x0138, 0x0138, 0x0138,
+ 0x0138, 0x0138, 0x0138, 0x0138, 0x0135, 0x0135, 0x0135, 0x0135, 0x0135, 0x0132, 0x0132, 0x0132, 0x0132, 0x012F, 0x012F, 0x012F,
+ 0x012C, 0x012C, 0x012C, 0x012C, 0x0129, 0x0129, 0x0129, 0x0127, 0x0127, 0x0124, 0x0124, 0x0124, 0x0121, 0x0121, 0x0121, 0x011F,
+ 0x011F, 0x011C, 0x011C, 0x011A, 0x011A, 0x011A, 0x0118, 0x0118, 0x0115, 0x0115, 0x0113, 0x0113, 0x0111, 0x0111, 0x010E, 0x010E,
+ 0x010C, 0x010C, 0x010A, 0x010A, 0x0108, 0x0106, 0x0106, 0x0104, 0x0104, 0x0102, 0x0102, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FC,
+ 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F2, 0x00F2, 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00E8,
+ 0x00E8, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00DE, 0x00DE, 0x00DD, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D7,
+ 0x00D6, 0x00D6, 0x00D4, 0x00D3, 0x00D3, 0x00D2, 0x00D0, 0x00CF, 0x00CF, 0x00CE, 0x00CC, 0x00CB, 0x00CB, 0x00CA, 0x00C9, 0x00C7,
+ 0x0138, 0x0138, 0x0138, 0x0138, 0x0138, 0x0138, 0x0138, 0x0138, 0x0138, 0x0138, 0x0138, 0x0138, 0x0138, 0x0135, 0x0135, 0x0135,
+ 0x0135, 0x0135, 0x0135, 0x0135, 0x0132, 0x0132, 0x0132, 0x0132, 0x0132, 0x012F, 0x012F, 0x012F, 0x012F, 0x012C, 0x012C, 0x012C,
+ 0x012C, 0x0129, 0x0129, 0x0129, 0x0127, 0x0127, 0x0127, 0x0124, 0x0124, 0x0124, 0x0121, 0x0121, 0x011F, 0x011F, 0x011F, 0x011C,
+ 0x011C, 0x011A, 0x011A, 0x011A, 0x0118, 0x0118, 0x0115, 0x0115, 0x0113, 0x0113, 0x0111, 0x0111, 0x010E, 0x010E, 0x010C, 0x010C,
+ 0x010A, 0x010A, 0x0108, 0x0108, 0x0106, 0x0106, 0x0104, 0x0104, 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FC, 0x00FA,
+ 0x00F8, 0x00F8, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F2, 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00E8, 0x00E8,
+ 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00DE, 0x00DE, 0x00DD, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D7, 0x00D7,
+ 0x00D6, 0x00D4, 0x00D3, 0x00D3, 0x00D2, 0x00D0, 0x00CF, 0x00CF, 0x00CE, 0x00CC, 0x00CB, 0x00CB, 0x00CA, 0x00C9, 0x00C7, 0x00C7,
+ 0x0135, 0x0135, 0x0135, 0x0135, 0x0135, 0x0135, 0x0135, 0x0135, 0x0135, 0x0135, 0x0135, 0x0135, 0x0135, 0x0132, 0x0132, 0x0132,
+ 0x0132, 0x0132, 0x0132, 0x0132, 0x012F, 0x012F, 0x012F, 0x012F, 0x012F, 0x012C, 0x012C, 0x012C, 0x012C, 0x0129, 0x0129, 0x0129,
+ 0x0129, 0x0127, 0x0127, 0x0127, 0x0124, 0x0124, 0x0124, 0x0121, 0x0121, 0x0121, 0x011F, 0x011F, 0x011F, 0x011C, 0x011C, 0x011A,
+ 0x011A, 0x011A, 0x0118, 0x0118, 0x0115, 0x0115, 0x0113, 0x0113, 0x0111, 0x0111, 0x010E, 0x010E, 0x010E, 0x010C, 0x010C, 0x010A,
+ 0x0108, 0x0108, 0x0106, 0x0106, 0x0104, 0x0104, 0x0102, 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00FA, 0x00F8,
+ 0x00F8, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00E8, 0x00E8, 0x00E6,
+ 0x00E5, 0x00E5, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00DE, 0x00DE, 0x00DD, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D7, 0x00D7, 0x00D6,
+ 0x00D4, 0x00D3, 0x00D3, 0x00D2, 0x00D0, 0x00CF, 0x00CF, 0x00CE, 0x00CC, 0x00CB, 0x00CB, 0x00CA, 0x00C9, 0x00C7, 0x00C7, 0x00C6,
+ 0x0132, 0x0132, 0x0132, 0x0132, 0x0132, 0x0132, 0x0132, 0x0132, 0x0132, 0x0132, 0x0132, 0x0132, 0x0132, 0x0132, 0x012F, 0x012F,
+ 0x012F, 0x012F, 0x012F, 0x012F, 0x012C, 0x012C, 0x012C, 0x012C, 0x012C, 0x0129, 0x0129, 0x0129, 0x0129, 0x0127, 0x0127, 0x0127,
+ 0x0127, 0x0124, 0x0124, 0x0124, 0x0121, 0x0121, 0x0121, 0x011F, 0x011F, 0x011F, 0x011C, 0x011C, 0x011C, 0x011A, 0x011A, 0x0118,
+ 0x0118, 0x0118, 0x0115, 0x0115, 0x0113, 0x0113, 0x0111, 0x0111, 0x0111, 0x010E, 0x010E, 0x010C, 0x010C, 0x010A, 0x010A, 0x0108,
+ 0x0108, 0x0106, 0x0106, 0x0104, 0x0104, 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F6,
+ 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E5,
+ 0x00E5, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00DE, 0x00DE, 0x00DD, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D7, 0x00D7, 0x00D6, 0x00D4,
+ 0x00D3, 0x00D3, 0x00D2, 0x00D0, 0x00D0, 0x00CF, 0x00CE, 0x00CC, 0x00CC, 0x00CB, 0x00CA, 0x00C9, 0x00C9, 0x00C7, 0x00C6, 0x00C5,
+ 0x012F, 0x012F, 0x012F, 0x012F, 0x012F, 0x012F, 0x012F, 0x012F, 0x012F, 0x012F, 0x012F, 0x012F, 0x012F, 0x012F, 0x012C, 0x012C,
+ 0x012C, 0x012C, 0x012C, 0x012C, 0x0129, 0x0129, 0x0129, 0x0129, 0x0129, 0x0127, 0x0127, 0x0127, 0x0127, 0x0124, 0x0124, 0x0124,
+ 0x0124, 0x0121, 0x0121, 0x0121, 0x011F, 0x011F, 0x011F, 0x011C, 0x011C, 0x011C, 0x011A, 0x011A, 0x011A, 0x0118, 0x0118, 0x0118,
+ 0x0115, 0x0115, 0x0113, 0x0113, 0x0111, 0x0111, 0x0111, 0x010E, 0x010E, 0x010C, 0x010C, 0x010A, 0x010A, 0x0108, 0x0108, 0x0106,
+ 0x0106, 0x0104, 0x0104, 0x0102, 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F6,
+ 0x00F4, 0x00F2, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E6, 0x00E6, 0x00E5, 0x00E5,
+ 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00DE, 0x00DD, 0x00DD, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D7, 0x00D7, 0x00D6, 0x00D4, 0x00D3,
+ 0x00D3, 0x00D2, 0x00D0, 0x00D0, 0x00CF, 0x00CE, 0x00CC, 0x00CC, 0x00CB, 0x00CA, 0x00C9, 0x00C9, 0x00C7, 0x00C6, 0x00C5, 0x00C5,
+ 0x012C, 0x012C, 0x012C, 0x012C, 0x012C, 0x012C, 0x012C, 0x012C, 0x012C, 0x012C, 0x012C, 0x012C, 0x012C, 0x012C, 0x0129, 0x0129,
+ 0x0129, 0x0129, 0x0129, 0x0129, 0x0129, 0x0127, 0x0127, 0x0127, 0x0127, 0x0124, 0x0124, 0x0124, 0x0124, 0x0124, 0x0121, 0x0121,
+ 0x0121, 0x011F, 0x011F, 0x011F, 0x011F, 0x011C, 0x011C, 0x011C, 0x011A, 0x011A, 0x011A, 0x0118, 0x0118, 0x0115, 0x0115, 0x0115,
+ 0x0113, 0x0113, 0x0111, 0x0111, 0x0111, 0x010E, 0x010E, 0x010C, 0x010C, 0x010A, 0x010A, 0x0108, 0x0108, 0x0106, 0x0106, 0x0104,
+ 0x0104, 0x0102, 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F4,
+ 0x00F2, 0x00F2, 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E6, 0x00E6, 0x00E5, 0x00E3, 0x00E3,
+ 0x00E1, 0x00E0, 0x00E0, 0x00DE, 0x00DD, 0x00DD, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D7, 0x00D7, 0x00D6, 0x00D4, 0x00D4, 0x00D3,
+ 0x00D2, 0x00D0, 0x00D0, 0x00CF, 0x00CE, 0x00CC, 0x00CC, 0x00CB, 0x00CA, 0x00CA, 0x00C9, 0x00C7, 0x00C6, 0x00C6, 0x00C5, 0x00C4,
+ 0x0129, 0x0129, 0x0129, 0x0129, 0x0129, 0x0129, 0x0129, 0x0129, 0x0129, 0x0129, 0x0129, 0x0129, 0x0129, 0x0129, 0x0127, 0x0127,
+ 0x0127, 0x0127, 0x0127, 0x0127, 0x0127, 0x0124, 0x0124, 0x0124, 0x0124, 0x0124, 0x0121, 0x0121, 0x0121, 0x0121, 0x011F, 0x011F,
+ 0x011F, 0x011C, 0x011C, 0x011C, 0x011C, 0x011A, 0x011A, 0x011A, 0x0118, 0x0118, 0x0118, 0x0115, 0x0115, 0x0113, 0x0113, 0x0113,
+ 0x0111, 0x0111, 0x0111, 0x010E, 0x010E, 0x010C, 0x010C, 0x010A, 0x010A, 0x0108, 0x0108, 0x0108, 0x0106, 0x0106, 0x0104, 0x0104,
+ 0x0102, 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F4, 0x00F2,
+ 0x00F0, 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E6, 0x00E5, 0x00E3, 0x00E3, 0x00E1,
+ 0x00E0, 0x00E0, 0x00DE, 0x00DD, 0x00DD, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D7, 0x00D7, 0x00D6, 0x00D4, 0x00D4, 0x00D3, 0x00D2,
+ 0x00D0, 0x00D0, 0x00CF, 0x00CE, 0x00CE, 0x00CC, 0x00CB, 0x00CA, 0x00CA, 0x00C9, 0x00C7, 0x00C6, 0x00C6, 0x00C5, 0x00C4, 0x00C3,
+ 0x0127, 0x0127, 0x0127, 0x0127, 0x0127, 0x0127, 0x0127, 0x0127, 0x0127, 0x0127, 0x0127, 0x0127, 0x0127, 0x0127, 0x0124, 0x0124,
+ 0x0124, 0x0124, 0x0124, 0x0124, 0x0124, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x011F, 0x011F, 0x011F, 0x011F, 0x011C, 0x011C,
+ 0x011C, 0x011C, 0x011A, 0x011A, 0x011A, 0x0118, 0x0118, 0x0118, 0x0115, 0x0115, 0x0115, 0x0113, 0x0113, 0x0113, 0x0111, 0x0111,
+ 0x010E, 0x010E, 0x010E, 0x010C, 0x010C, 0x010A, 0x010A, 0x0108, 0x0108, 0x0108, 0x0106, 0x0106, 0x0104, 0x0104, 0x0102, 0x0102,
+ 0x0100, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F2, 0x00F0,
+ 0x00F0, 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E3, 0x00E1, 0x00E0,
+ 0x00E0, 0x00DE, 0x00DD, 0x00DD, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D7, 0x00D7, 0x00D6, 0x00D4, 0x00D4, 0x00D3, 0x00D2, 0x00D0,
+ 0x00D0, 0x00CF, 0x00CE, 0x00CE, 0x00CC, 0x00CB, 0x00CA, 0x00CA, 0x00C9, 0x00C7, 0x00C6, 0x00C6, 0x00C5, 0x00C4, 0x00C3, 0x00C3,
+ 0x0124, 0x0124, 0x0124, 0x0124, 0x0124, 0x0124, 0x0124, 0x0124, 0x0124, 0x0124, 0x0124, 0x0124, 0x0124, 0x0124, 0x0121, 0x0121,
+ 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x011F, 0x011F, 0x011F, 0x011F, 0x011F, 0x011C, 0x011C, 0x011C, 0x011C, 0x011A, 0x011A,
+ 0x011A, 0x011A, 0x0118, 0x0118, 0x0118, 0x0115, 0x0115, 0x0115, 0x0113, 0x0113, 0x0113, 0x0111, 0x0111, 0x0111, 0x010E, 0x010E,
+ 0x010E, 0x010C, 0x010C, 0x010A, 0x010A, 0x0108, 0x0108, 0x0108, 0x0106, 0x0106, 0x0104, 0x0104, 0x0102, 0x0102, 0x0100, 0x0100,
+ 0x00FE, 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF,
+ 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00E0,
+ 0x00DE, 0x00DD, 0x00DD, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D7, 0x00D7, 0x00D6, 0x00D4, 0x00D3, 0x00D3, 0x00D2, 0x00D0, 0x00D0,
+ 0x00CF, 0x00CE, 0x00CE, 0x00CC, 0x00CB, 0x00CA, 0x00CA, 0x00C9, 0x00C7, 0x00C7, 0x00C6, 0x00C5, 0x00C4, 0x00C4, 0x00C3, 0x00C1,
+ 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x011F, 0x011F,
+ 0x011F, 0x011F, 0x011F, 0x011F, 0x011F, 0x011C, 0x011C, 0x011C, 0x011C, 0x011C, 0x011A, 0x011A, 0x011A, 0x011A, 0x0118, 0x0118,
+ 0x0118, 0x0118, 0x0115, 0x0115, 0x0115, 0x0113, 0x0113, 0x0113, 0x0111, 0x0111, 0x0111, 0x010E, 0x010E, 0x010E, 0x010C, 0x010C,
+ 0x010C, 0x010A, 0x010A, 0x0108, 0x0108, 0x0108, 0x0106, 0x0106, 0x0104, 0x0104, 0x0102, 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FE,
+ 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00EF,
+ 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E6, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00DE, 0x00DE,
+ 0x00DD, 0x00DB, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D7, 0x00D6, 0x00D6, 0x00D4, 0x00D3, 0x00D3, 0x00D2, 0x00D0, 0x00D0, 0x00CF,
+ 0x00CE, 0x00CE, 0x00CC, 0x00CB, 0x00CA, 0x00CA, 0x00C9, 0x00C7, 0x00C7, 0x00C6, 0x00C5, 0x00C4, 0x00C4, 0x00C3, 0x00C1, 0x00C0,
+ 0x011F, 0x011F, 0x011F, 0x011F, 0x011F, 0x011F, 0x011F, 0x011F, 0x011F, 0x011F, 0x011F, 0x011F, 0x011F, 0x011F, 0x011F, 0x011C,
+ 0x011C, 0x011C, 0x011C, 0x011C, 0x011C, 0x011A, 0x011A, 0x011A, 0x011A, 0x011A, 0x0118, 0x0118, 0x0118, 0x0118, 0x0115, 0x0115,
+ 0x0115, 0x0115, 0x0113, 0x0113, 0x0113, 0x0113, 0x0111, 0x0111, 0x0111, 0x010E, 0x010E, 0x010E, 0x010C, 0x010C, 0x010A, 0x010A,
+ 0x010A, 0x0108, 0x0108, 0x0106, 0x0106, 0x0106, 0x0104, 0x0104, 0x0102, 0x0102, 0x0100, 0x0100, 0x0100, 0x00FE, 0x00FE, 0x00FC,
+ 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00ED,
+ 0x00EB, 0x00EB, 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E6, 0x00E5, 0x00E3, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00DE, 0x00DE, 0x00DD,
+ 0x00DB, 0x00DB, 0x00DA, 0x00D9, 0x00D9, 0x00D7, 0x00D6, 0x00D6, 0x00D4, 0x00D3, 0x00D3, 0x00D2, 0x00D0, 0x00D0, 0x00CF, 0x00CE,
+ 0x00CE, 0x00CC, 0x00CB, 0x00CA, 0x00CA, 0x00C9, 0x00C7, 0x00C7, 0x00C6, 0x00C5, 0x00C4, 0x00C4, 0x00C3, 0x00C1, 0x00C0, 0x00C0,
+ 0x011C, 0x011C, 0x011C, 0x011C, 0x011C, 0x011C, 0x011C, 0x011C, 0x011C, 0x011C, 0x011C, 0x011C, 0x011C, 0x011C, 0x011C, 0x011A,
+ 0x011A, 0x011A, 0x011A, 0x011A, 0x011A, 0x0118, 0x0118, 0x0118, 0x0118, 0x0118, 0x0115, 0x0115, 0x0115, 0x0115, 0x0115, 0x0113,
+ 0x0113, 0x0113, 0x0111, 0x0111, 0x0111, 0x0111, 0x010E, 0x010E, 0x010E, 0x010C, 0x010C, 0x010C, 0x010A, 0x010A, 0x010A, 0x0108,
+ 0x0108, 0x0106, 0x0106, 0x0106, 0x0104, 0x0104, 0x0102, 0x0102, 0x0100, 0x0100, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FC, 0x00FA,
+ 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00EB, 0x00EB,
+ 0x00EA, 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E3, 0x00E1, 0x00E0, 0x00E0, 0x00DE, 0x00DE, 0x00DD, 0x00DB,
+ 0x00DB, 0x00DA, 0x00D9, 0x00D9, 0x00D7, 0x00D6, 0x00D6, 0x00D4, 0x00D3, 0x00D3, 0x00D2, 0x00D0, 0x00D0, 0x00CF, 0x00CE, 0x00CE,
+ 0x00CC, 0x00CB, 0x00CA, 0x00CA, 0x00C9, 0x00C7, 0x00C7, 0x00C6, 0x00C5, 0x00C4, 0x00C4, 0x00C3, 0x00C1, 0x00C1, 0x00C0, 0x00BF,
+ 0x011A, 0x011A, 0x011A, 0x011A, 0x011A, 0x011A, 0x011A, 0x011A, 0x011A, 0x011A, 0x011A, 0x011A, 0x011A, 0x011A, 0x011A, 0x0118,
+ 0x0118, 0x0118, 0x0118, 0x0118, 0x0118, 0x0118, 0x0115, 0x0115, 0x0115, 0x0115, 0x0115, 0x0113, 0x0113, 0x0113, 0x0113, 0x0111,
+ 0x0111, 0x0111, 0x0111, 0x010E, 0x010E, 0x010E, 0x010C, 0x010C, 0x010C, 0x010A, 0x010A, 0x010A, 0x0108, 0x0108, 0x0108, 0x0106,
+ 0x0106, 0x0104, 0x0104, 0x0104, 0x0102, 0x0102, 0x0100, 0x0100, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00FA, 0x00F8,
+ 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EB, 0x00EA,
+ 0x00E8, 0x00E8, 0x00E6, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00E0, 0x00DE, 0x00DD, 0x00DD, 0x00DB, 0x00DA,
+ 0x00DA, 0x00D9, 0x00D9, 0x00D7, 0x00D6, 0x00D6, 0x00D4, 0x00D3, 0x00D3, 0x00D2, 0x00D0, 0x00D0, 0x00CF, 0x00CE, 0x00CE, 0x00CC,
+ 0x00CB, 0x00CA, 0x00CA, 0x00C9, 0x00C7, 0x00C7, 0x00C6, 0x00C5, 0x00C5, 0x00C4, 0x00C3, 0x00C1, 0x00C1, 0x00C0, 0x00BF, 0x00BE,
+ 0x0118, 0x0118, 0x0118, 0x0118, 0x0118, 0x0118, 0x0118, 0x0118, 0x0118, 0x0118, 0x0118, 0x0118, 0x0118, 0x0118, 0x0118, 0x0115,
+ 0x0115, 0x0115, 0x0115, 0x0115, 0x0115, 0x0115, 0x0113, 0x0113, 0x0113, 0x0113, 0x0113, 0x0111, 0x0111, 0x0111, 0x0111, 0x010E,
+ 0x010E, 0x010E, 0x010E, 0x010C, 0x010C, 0x010C, 0x010A, 0x010A, 0x010A, 0x0108, 0x0108, 0x0108, 0x0106, 0x0106, 0x0106, 0x0104,
+ 0x0104, 0x0104, 0x0102, 0x0102, 0x0100, 0x0100, 0x0100, 0x00FE, 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F8, 0x00F8,
+ 0x00F6, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00EA, 0x00E8,
+ 0x00E8, 0x00E6, 0x00E6, 0x00E5, 0x00E3, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00DE, 0x00DE, 0x00DD, 0x00DD, 0x00DB, 0x00DA, 0x00DA,
+ 0x00D9, 0x00D7, 0x00D7, 0x00D6, 0x00D6, 0x00D4, 0x00D3, 0x00D3, 0x00D2, 0x00D0, 0x00D0, 0x00CF, 0x00CE, 0x00CE, 0x00CC, 0x00CB,
+ 0x00CA, 0x00CA, 0x00C9, 0x00C7, 0x00C7, 0x00C6, 0x00C5, 0x00C5, 0x00C4, 0x00C3, 0x00C1, 0x00C1, 0x00C0, 0x00BF, 0x00BE, 0x00BE,
+ 0x0115, 0x0115, 0x0115, 0x0115, 0x0115, 0x0115, 0x0115, 0x0115, 0x0115, 0x0115, 0x0115, 0x0115, 0x0115, 0x0115, 0x0115, 0x0113,
+ 0x0113, 0x0113, 0x0113, 0x0113, 0x0113, 0x0113, 0x0111, 0x0111, 0x0111, 0x0111, 0x0111, 0x010E, 0x010E, 0x010E, 0x010E, 0x010C,
+ 0x010C, 0x010C, 0x010C, 0x010A, 0x010A, 0x010A, 0x0108, 0x0108, 0x0108, 0x0106, 0x0106, 0x0106, 0x0104, 0x0104, 0x0104, 0x0102,
+ 0x0102, 0x0102, 0x0100, 0x0100, 0x00FE, 0x00FE, 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F8, 0x00F8, 0x00F6, 0x00F6,
+ 0x00F4, 0x00F4, 0x00F2, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E6,
+ 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E3, 0x00E1, 0x00E0, 0x00E0, 0x00DE, 0x00DE, 0x00DD, 0x00DB, 0x00DB, 0x00DA, 0x00DA, 0x00D9,
+ 0x00D7, 0x00D7, 0x00D6, 0x00D4, 0x00D4, 0x00D3, 0x00D2, 0x00D2, 0x00D0, 0x00CF, 0x00CF, 0x00CE, 0x00CC, 0x00CC, 0x00CB, 0x00CA,
+ 0x00CA, 0x00C9, 0x00C7, 0x00C7, 0x00C6, 0x00C5, 0x00C5, 0x00C4, 0x00C3, 0x00C1, 0x00C1, 0x00C0, 0x00BF, 0x00BF, 0x00BE, 0x00BD,
+ 0x0113, 0x0113, 0x0113, 0x0113, 0x0113, 0x0113, 0x0113, 0x0113, 0x0113, 0x0113, 0x0113, 0x0113, 0x0113, 0x0113, 0x0113, 0x0111,
+ 0x0111, 0x0111, 0x0111, 0x0111, 0x0111, 0x0111, 0x010E, 0x010E, 0x010E, 0x010E, 0x010E, 0x010C, 0x010C, 0x010C, 0x010C, 0x010A,
+ 0x010A, 0x010A, 0x010A, 0x0108, 0x0108, 0x0108, 0x0108, 0x0106, 0x0106, 0x0106, 0x0104, 0x0104, 0x0104, 0x0102, 0x0102, 0x0100,
+ 0x0100, 0x0100, 0x00FE, 0x00FE, 0x00FE, 0x00FC, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F8, 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F4,
+ 0x00F2, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E6,
+ 0x00E5, 0x00E3, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00E0, 0x00DE, 0x00DD, 0x00DD, 0x00DB, 0x00DB, 0x00DA, 0x00D9, 0x00D9, 0x00D7,
+ 0x00D7, 0x00D6, 0x00D4, 0x00D4, 0x00D3, 0x00D2, 0x00D2, 0x00D0, 0x00CF, 0x00CF, 0x00CE, 0x00CC, 0x00CC, 0x00CB, 0x00CA, 0x00CA,
+ 0x00C9, 0x00C7, 0x00C7, 0x00C6, 0x00C5, 0x00C5, 0x00C4, 0x00C3, 0x00C1, 0x00C1, 0x00C0, 0x00BF, 0x00BF, 0x00BE, 0x00BD, 0x00BC,
+ 0x0111, 0x0111, 0x0111, 0x0111, 0x0111, 0x0111, 0x0111, 0x0111, 0x0111, 0x0111, 0x0111, 0x0111, 0x0111, 0x0111, 0x0111, 0x010E,
+ 0x010E, 0x010E, 0x010E, 0x010E, 0x010E, 0x010E, 0x010C, 0x010C, 0x010C, 0x010C, 0x010C, 0x010A, 0x010A, 0x010A, 0x010A, 0x0108,
+ 0x0108, 0x0108, 0x0108, 0x0106, 0x0106, 0x0106, 0x0106, 0x0104, 0x0104, 0x0104, 0x0102, 0x0102, 0x0102, 0x0100, 0x0100, 0x0100,
+ 0x00FE, 0x00FE, 0x00FC, 0x00FC, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F8, 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F2,
+ 0x00F0, 0x00F0, 0x00EF, 0x00EF, 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E6, 0x00E5, 0x00E5,
+ 0x00E3, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00DE, 0x00DE, 0x00DD, 0x00DD, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D9, 0x00D7, 0x00D6,
+ 0x00D6, 0x00D4, 0x00D4, 0x00D3, 0x00D2, 0x00D2, 0x00D0, 0x00CF, 0x00CF, 0x00CE, 0x00CC, 0x00CC, 0x00CB, 0x00CA, 0x00CA, 0x00C9,
+ 0x00C7, 0x00C7, 0x00C6, 0x00C5, 0x00C5, 0x00C4, 0x00C3, 0x00C1, 0x00C1, 0x00C0, 0x00BF, 0x00BF, 0x00BE, 0x00BD, 0x00BC, 0x00BC,
+ 0x010E, 0x010E, 0x010E, 0x010E, 0x010E, 0x010E, 0x010E, 0x010E, 0x010E, 0x010E, 0x010E, 0x010E, 0x010E, 0x010E, 0x010E, 0x010E,
+ 0x010C, 0x010C, 0x010C, 0x010C, 0x010C, 0x010C, 0x010A, 0x010A, 0x010A, 0x010A, 0x010A, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108,
+ 0x0106, 0x0106, 0x0106, 0x0104, 0x0104, 0x0104, 0x0104, 0x0102, 0x0102, 0x0102, 0x0100, 0x0100, 0x0100, 0x00FE, 0x00FE, 0x00FE,
+ 0x00FC, 0x00FC, 0x00FC, 0x00FA, 0x00FA, 0x00F8, 0x00F8, 0x00F8, 0x00F6, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F2, 0x00F0, 0x00F0,
+ 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E6, 0x00E5, 0x00E3, 0x00E3,
+ 0x00E1, 0x00E1, 0x00E0, 0x00E0, 0x00DE, 0x00DE, 0x00DD, 0x00DB, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D7, 0x00D7, 0x00D6, 0x00D6,
+ 0x00D4, 0x00D3, 0x00D3, 0x00D2, 0x00D0, 0x00D0, 0x00CF, 0x00CF, 0x00CE, 0x00CC, 0x00CC, 0x00CB, 0x00CA, 0x00CA, 0x00C9, 0x00C7,
+ 0x00C7, 0x00C6, 0x00C5, 0x00C5, 0x00C4, 0x00C3, 0x00C1, 0x00C1, 0x00C0, 0x00BF, 0x00BF, 0x00BE, 0x00BD, 0x00BC, 0x00BC, 0x00BB,
+ 0x010C, 0x010C, 0x010C, 0x010C, 0x010C, 0x010C, 0x010C, 0x010C, 0x010C, 0x010C, 0x010C, 0x010C, 0x010C, 0x010C, 0x010C, 0x010C,
+ 0x010A, 0x010A, 0x010A, 0x010A, 0x010A, 0x010A, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0106, 0x0106, 0x0106, 0x0106, 0x0106,
+ 0x0104, 0x0104, 0x0104, 0x0104, 0x0102, 0x0102, 0x0102, 0x0100, 0x0100, 0x0100, 0x00FE, 0x00FE, 0x00FE, 0x00FC, 0x00FC, 0x00FC,
+ 0x00FA, 0x00FA, 0x00FA, 0x00F8, 0x00F8, 0x00F6, 0x00F6, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F2, 0x00F0, 0x00F0, 0x00F0, 0x00EF,
+ 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E3, 0x00E1,
+ 0x00E1, 0x00E0, 0x00DE, 0x00DE, 0x00DD, 0x00DD, 0x00DB, 0x00DB, 0x00DA, 0x00D9, 0x00D9, 0x00D7, 0x00D7, 0x00D6, 0x00D4, 0x00D4,
+ 0x00D3, 0x00D3, 0x00D2, 0x00D0, 0x00D0, 0x00CF, 0x00CE, 0x00CE, 0x00CC, 0x00CB, 0x00CB, 0x00CA, 0x00CA, 0x00C9, 0x00C7, 0x00C7,
+ 0x00C6, 0x00C5, 0x00C4, 0x00C4, 0x00C3, 0x00C1, 0x00C1, 0x00C0, 0x00BF, 0x00BF, 0x00BE, 0x00BD, 0x00BD, 0x00BC, 0x00BB, 0x00BA,
+ 0x010A, 0x010A, 0x010A, 0x010A, 0x010A, 0x010A, 0x010A, 0x010A, 0x010A, 0x010A, 0x010A, 0x010A, 0x010A, 0x010A, 0x010A, 0x010A,
+ 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0106, 0x0106, 0x0106, 0x0106, 0x0106, 0x0104, 0x0104, 0x0104, 0x0104,
+ 0x0102, 0x0102, 0x0102, 0x0102, 0x0100, 0x0100, 0x0100, 0x00FE, 0x00FE, 0x00FE, 0x00FC, 0x00FC, 0x00FC, 0x00FA, 0x00FA, 0x00FA,
+ 0x00F8, 0x00F8, 0x00F8, 0x00F6, 0x00F6, 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F2, 0x00F0, 0x00F0, 0x00F0, 0x00EF, 0x00EF, 0x00ED,
+ 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E3, 0x00E1, 0x00E1, 0x00E0,
+ 0x00E0, 0x00DE, 0x00DE, 0x00DD, 0x00DD, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D9, 0x00D7, 0x00D6, 0x00D6, 0x00D4, 0x00D4, 0x00D3,
+ 0x00D2, 0x00D2, 0x00D0, 0x00D0, 0x00CF, 0x00CE, 0x00CE, 0x00CC, 0x00CB, 0x00CB, 0x00CA, 0x00C9, 0x00C9, 0x00C7, 0x00C6, 0x00C6,
+ 0x00C5, 0x00C4, 0x00C4, 0x00C3, 0x00C1, 0x00C1, 0x00C0, 0x00BF, 0x00BF, 0x00BE, 0x00BD, 0x00BD, 0x00BC, 0x00BB, 0x00BA, 0x00BA,
+ 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108,
+ 0x0106, 0x0106, 0x0106, 0x0106, 0x0106, 0x0106, 0x0106, 0x0104, 0x0104, 0x0104, 0x0104, 0x0104, 0x0102, 0x0102, 0x0102, 0x0102,
+ 0x0100, 0x0100, 0x0100, 0x0100, 0x00FE, 0x00FE, 0x00FE, 0x00FC, 0x00FC, 0x00FC, 0x00FC, 0x00FA, 0x00FA, 0x00FA, 0x00F8, 0x00F8,
+ 0x00F6, 0x00F6, 0x00F6, 0x00F4, 0x00F4, 0x00F4, 0x00F2, 0x00F2, 0x00F0, 0x00F0, 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00ED, 0x00EB,
+ 0x00EB, 0x00EA, 0x00EA, 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00DE,
+ 0x00DE, 0x00DD, 0x00DD, 0x00DB, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D7, 0x00D7, 0x00D6, 0x00D6, 0x00D4, 0x00D3, 0x00D3, 0x00D2,
+ 0x00D2, 0x00D0, 0x00CF, 0x00CF, 0x00CE, 0x00CE, 0x00CC, 0x00CB, 0x00CB, 0x00CA, 0x00C9, 0x00C9, 0x00C7, 0x00C6, 0x00C6, 0x00C5,
+ 0x00C4, 0x00C4, 0x00C3, 0x00C1, 0x00C1, 0x00C0, 0x00BF, 0x00BF, 0x00BE, 0x00BD, 0x00BD, 0x00BC, 0x00BB, 0x00BA, 0x00BA, 0x00B9,
+ 0x0106, 0x0106, 0x0106, 0x0106, 0x0106, 0x0106, 0x0106, 0x0106, 0x0106, 0x0106, 0x0106, 0x0106, 0x0106, 0x0106, 0x0106, 0x0106,
+ 0x0104, 0x0104, 0x0104, 0x0104, 0x0104, 0x0104, 0x0104, 0x0102, 0x0102, 0x0102, 0x0102, 0x0102, 0x0100, 0x0100, 0x0100, 0x0100,
+ 0x00FE, 0x00FE, 0x00FE, 0x00FE, 0x00FC, 0x00FC, 0x00FC, 0x00FC, 0x00FA, 0x00FA, 0x00FA, 0x00F8, 0x00F8, 0x00F8, 0x00F6, 0x00F6,
+ 0x00F6, 0x00F4, 0x00F4, 0x00F2, 0x00F2, 0x00F2, 0x00F0, 0x00F0, 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EB, 0x00EA,
+ 0x00EA, 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00E0, 0x00DE, 0x00DE,
+ 0x00DD, 0x00DD, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D9, 0x00D7, 0x00D7, 0x00D6, 0x00D4, 0x00D4, 0x00D3, 0x00D3, 0x00D2, 0x00D0,
+ 0x00D0, 0x00CF, 0x00CF, 0x00CE, 0x00CC, 0x00CC, 0x00CB, 0x00CB, 0x00CA, 0x00C9, 0x00C9, 0x00C7, 0x00C6, 0x00C6, 0x00C5, 0x00C4,
+ 0x00C4, 0x00C3, 0x00C1, 0x00C1, 0x00C0, 0x00BF, 0x00BF, 0x00BE, 0x00BD, 0x00BD, 0x00BC, 0x00BB, 0x00BA, 0x00BA, 0x00B9, 0x00B8,
+ 0x0104, 0x0104, 0x0104, 0x0104, 0x0104, 0x0104, 0x0104, 0x0104, 0x0104, 0x0104, 0x0104, 0x0104, 0x0104, 0x0104, 0x0104, 0x0104,
+ 0x0102, 0x0102, 0x0102, 0x0102, 0x0102, 0x0102, 0x0102, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x00FE, 0x00FE, 0x00FE, 0x00FE,
+ 0x00FC, 0x00FC, 0x00FC, 0x00FC, 0x00FA, 0x00FA, 0x00FA, 0x00FA, 0x00F8, 0x00F8, 0x00F8, 0x00F6, 0x00F6, 0x00F6, 0x00F4, 0x00F4,
+ 0x00F4, 0x00F2, 0x00F2, 0x00F2, 0x00F0, 0x00F0, 0x00EF, 0x00EF, 0x00EF, 0x00ED, 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00EA, 0x00EA,
+ 0x00E8, 0x00E8, 0x00E6, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00E0, 0x00DE, 0x00DE, 0x00DD, 0x00DD,
+ 0x00DB, 0x00DB, 0x00DA, 0x00DA, 0x00D9, 0x00D7, 0x00D7, 0x00D6, 0x00D6, 0x00D4, 0x00D4, 0x00D3, 0x00D2, 0x00D2, 0x00D0, 0x00D0,
+ 0x00CF, 0x00CE, 0x00CE, 0x00CC, 0x00CC, 0x00CB, 0x00CA, 0x00CA, 0x00C9, 0x00C7, 0x00C7, 0x00C6, 0x00C6, 0x00C5, 0x00C4, 0x00C4,
+ 0x00C3, 0x00C1, 0x00C1, 0x00C0, 0x00BF, 0x00BF, 0x00BE, 0x00BD, 0x00BC, 0x00BC, 0x00BB, 0x00BA, 0x00BA, 0x00B9, 0x00B8, 0x00B8,
+ 0x0102, 0x0102, 0x0102, 0x0102, 0x0102, 0x0102, 0x0102, 0x0102, 0x0102, 0x0102, 0x0102, 0x0102, 0x0102, 0x0102, 0x0102, 0x0102,
+ 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x00FE, 0x00FE, 0x00FE, 0x00FE, 0x00FE, 0x00FC, 0x00FC, 0x00FC, 0x00FC,
+ 0x00FC, 0x00FA, 0x00FA, 0x00FA, 0x00F8, 0x00F8, 0x00F8, 0x00F8, 0x00F6, 0x00F6, 0x00F6, 0x00F4, 0x00F4, 0x00F4, 0x00F2, 0x00F2,
+ 0x00F2, 0x00F0, 0x00F0, 0x00F0, 0x00EF, 0x00EF, 0x00ED, 0x00ED, 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00EA, 0x00EA, 0x00E8, 0x00E8,
+ 0x00E6, 0x00E6, 0x00E5, 0x00E5, 0x00E3, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00E0, 0x00E0, 0x00DE, 0x00DD, 0x00DD, 0x00DB, 0x00DB,
+ 0x00DA, 0x00DA, 0x00D9, 0x00D9, 0x00D7, 0x00D7, 0x00D6, 0x00D6, 0x00D4, 0x00D3, 0x00D3, 0x00D2, 0x00D2, 0x00D0, 0x00CF, 0x00CF,
+ 0x00CE, 0x00CE, 0x00CC, 0x00CB, 0x00CB, 0x00CA, 0x00CA, 0x00C9, 0x00C7, 0x00C7, 0x00C6, 0x00C5, 0x00C5, 0x00C4, 0x00C3, 0x00C3,
+ 0x00C1, 0x00C0, 0x00C0, 0x00BF, 0x00BE, 0x00BE, 0x00BD, 0x00BC, 0x00BC, 0x00BB, 0x00BA, 0x00BA, 0x00B9, 0x00B8, 0x00B8, 0x00B7,
+ 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100,
+ 0x00FE, 0x00FE, 0x00FE, 0x00FE, 0x00FE, 0x00FE, 0x00FE, 0x00FC, 0x00FC, 0x00FC, 0x00FC, 0x00FC, 0x00FA, 0x00FA, 0x00FA, 0x00FA,
+ 0x00FA, 0x00F8, 0x00F8, 0x00F8, 0x00F8, 0x00F6, 0x00F6, 0x00F6, 0x00F4, 0x00F4, 0x00F4, 0x00F2, 0x00F2, 0x00F2, 0x00F0, 0x00F0,
+ 0x00F0, 0x00EF, 0x00EF, 0x00EF, 0x00ED, 0x00ED, 0x00ED, 0x00EB, 0x00EB, 0x00EA, 0x00EA, 0x00EA, 0x00E8, 0x00E8, 0x00E6, 0x00E6,
+ 0x00E5, 0x00E5, 0x00E3, 0x00E3, 0x00E3, 0x00E1, 0x00E1, 0x00E0, 0x00E0, 0x00DE, 0x00DE, 0x00DD, 0x00DD, 0x00DB, 0x00DB, 0x00DA,
+ 0x00DA, 0x00D9, 0x00D7, 0x00D7, 0x00D6, 0x00D6, 0x00D4, 0x00D4, 0x00D3, 0x00D3, 0x00D2, 0x00D0, 0x00D0, 0x00CF, 0x00CF, 0x00CE,
+ 0x00CC, 0x00CC, 0x00CB, 0x00CB, 0x00CA, 0x00C9, 0x00C9, 0x00C7, 0x00C7, 0x00C6, 0x00C5, 0x00C5, 0x00C4, 0x00C3, 0x00C3, 0x00C1,
+ 0x00C0, 0x00C0, 0x00BF, 0x00BE, 0x00BE, 0x00BD, 0x00BC, 0x00BC, 0x00BB, 0x00BA, 0x00BA, 0x00B9, 0x00B8, 0x00B8, 0x00B7, 0x00B6
+};
+#define kDeltaUsedToBuildTable 32
diff --git a/gfx/skia/skia/src/effects/SkGammaColorFilter.cpp b/gfx/skia/skia/src/effects/SkGammaColorFilter.cpp
new file mode 100644
index 000000000..eba8e320d
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkGammaColorFilter.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkGammaColorFilter.h"
+
+#include "SkReadBuffer.h"
+#include "SkString.h"
+
+#if SK_SUPPORT_GPU
+#include "effects/GrGammaEffect.h"
+#endif
+
+void SkGammaColorFilter::filterSpan(const SkPMColor src[], int count,
+ SkPMColor dst[]) const {
+ // Gamma-correcting bytes to bytes is pretty questionable.
+ SkASSERT(0);
+ for (int i = 0; i < count; ++i) {
+ SkPMColor c = src[i];
+
+ // TODO: implement cpu gamma correction?
+ dst[i] = c;
+ }
+}
+
+sk_sp<SkColorFilter> SkGammaColorFilter::Make(SkScalar gamma) {
+ return sk_sp<SkColorFilter>(new SkGammaColorFilter(gamma));
+}
+
+SkGammaColorFilter::SkGammaColorFilter(SkScalar gamma) : fGamma(gamma) {}
+
+sk_sp<SkFlattenable> SkGammaColorFilter::CreateProc(SkReadBuffer& buffer) {
+ SkScalar gamma = buffer.readScalar();
+
+ return Make(gamma);
+}
+
+void SkGammaColorFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeScalar(fGamma);
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkGammaColorFilter::toString(SkString* str) const {
+ str->appendf("SkGammaColorFilter (%.2f)", fGamma);
+}
+#endif
+
+#if SK_SUPPORT_GPU
+sk_sp<GrFragmentProcessor> SkGammaColorFilter::asFragmentProcessor(GrContext*) const {
+ return GrGammaEffect::Make(fGamma);
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkGaussianEdgeShader.cpp b/gfx/skia/skia/src/effects/SkGaussianEdgeShader.cpp
new file mode 100644
index 000000000..ddc5d96fc
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkGaussianEdgeShader.cpp
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkGaussianEdgeShader.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+
+ /** \class SkGaussianEdgeShaderImpl
+ This subclass of shader applies a Gaussian to shadow edge
+
+ If largerBlur is false:
+ The radius of the Gaussian blur is specified by the g value of the color, in 6.2 fixed point.
+ For spot shadows, we increase the stroke width to set the shadow against the shape. This pad
+ is specified by b, also in 6.2 fixed point. The r value represents the max final alpha.
+ The incoming alpha should be 1.
+
+ If largerBlur is true:
+ The radius of the Gaussian blur is specified by the r & g values of the color in 14.2 fixed point.
+ For spot shadows, we increase the stroke width to set the shadow against the shape. This pad
+ is specified by b, also in 6.2 fixed point. The a value represents the max final alpha.
+
+ LargerBlur will be removed once Android is migrated to the updated shader.
+ */
+class SkGaussianEdgeShaderImpl : public SkShader {
+public:
+ SkGaussianEdgeShaderImpl() {}
+
+ bool isOpaque() const override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(const AsFPArgs&) const override;
+#endif
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkGaussianEdgeShaderImpl)
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ friend class SkGaussianEdgeShader;
+
+ typedef SkShader INHERITED;
+};
+
+////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+#include "GrCoordTransform.h"
+#include "GrFragmentProcessor.h"
+#include "GrInvariantOutput.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "SkGr.h"
+#include "SkGrPriv.h"
+
+class GaussianEdgeFP : public GrFragmentProcessor {
+public:
+ GaussianEdgeFP() {
+ this->initClassID<GaussianEdgeFP>();
+
+ // enable output of distance information for shape
+ fUsesDistanceVectorField = true;
+ }
+
+ class GLSLGaussianEdgeFP : public GrGLSLFragmentProcessor {
+ public:
+ GLSLGaussianEdgeFP() {}
+
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ if (!args.fGpImplementsDistanceVector) {
+ fragBuilder->codeAppendf("// GP does not implement fsDistanceVector - "
+ " returning grey in GLSLGaussianEdgeFP\n");
+ fragBuilder->codeAppendf("vec4 color = %s;", args.fInputColor);
+ fragBuilder->codeAppendf("%s = vec4(0.0, 0.0, 0.0, color.r);", args.fOutputColor);
+ } else {
+ fragBuilder->codeAppendf("vec4 color = %s;", args.fInputColor);
+ fragBuilder->codeAppend("float radius = color.r*256.0*64.0 + color.g*64.0;");
+ fragBuilder->codeAppend("float pad = color.b*64.0;");
+
+ fragBuilder->codeAppendf("float factor = 1.0 - clamp((%s.z - pad)/radius, 0.0, 1.0);",
+ fragBuilder->distanceVectorName());
+ fragBuilder->codeAppend("factor = exp(-factor * factor * 4.0) - 0.018;");
+ fragBuilder->codeAppendf("%s = factor*vec4(0.0, 0.0, 0.0, color.a);",
+ args.fOutputColor);
+ }
+ }
+
+ static void GenKey(const GrProcessor& proc, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ // only one shader generated currently
+ b->add32(0x0);
+ }
+
+ protected:
+ void onSetData(const GrGLSLProgramDataManager& pdman, const GrProcessor& proc) override {}
+
+ bool fLargerBlur;
+ };
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLGaussianEdgeFP::GenKey(*this, caps, b);
+ }
+
+ const char* name() const override { return "GaussianEdgeFP"; }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ inout->mulByUnknownFourComponents();
+ }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override {
+ return new GLSLGaussianEdgeFP();
+ }
+
+ bool onIsEqual(const GrFragmentProcessor& proc) const override { return true; }
+};
+
+////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> SkGaussianEdgeShaderImpl::asFragmentProcessor(const AsFPArgs&) const {
+ return sk_make_sp<GaussianEdgeFP>();
+}
+
+#endif
+
+////////////////////////////////////////////////////////////////////////////
+
+bool SkGaussianEdgeShaderImpl::isOpaque() const {
+ return false;
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+#ifndef SK_IGNORE_TO_STRING
+void SkGaussianEdgeShaderImpl::toString(SkString* str) const {
+ str->appendf("GaussianEdgeShader: ()");
+}
+#endif
+
+sk_sp<SkFlattenable> SkGaussianEdgeShaderImpl::CreateProc(SkReadBuffer& buf) {
+ return sk_make_sp<SkGaussianEdgeShaderImpl>();
+}
+
+void SkGaussianEdgeShaderImpl::flatten(SkWriteBuffer& buf) const {
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkShader> SkGaussianEdgeShader::Make() {
+ return sk_make_sp<SkGaussianEdgeShaderImpl>();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkGaussianEdgeShader)
+SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkGaussianEdgeShaderImpl)
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END
+
+///////////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/skia/src/effects/SkImageSource.cpp b/gfx/skia/skia/src/effects/SkImageSource.cpp
new file mode 100644
index 000000000..f96a4a167
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkImageSource.cpp
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkImageSource.h"
+
+#include "SkCanvas.h"
+#include "SkImage.h"
+#include "SkReadBuffer.h"
+#include "SkSpecialImage.h"
+#include "SkSpecialSurface.h"
+#include "SkWriteBuffer.h"
+#include "SkString.h"
+
+sk_sp<SkImageFilter> SkImageSource::Make(sk_sp<SkImage> image) {
+ if (!image) {
+ return nullptr;
+ }
+
+ return sk_sp<SkImageFilter>(new SkImageSource(std::move(image)));
+}
+
+sk_sp<SkImageFilter> SkImageSource::Make(sk_sp<SkImage> image,
+ const SkRect& srcRect,
+ const SkRect& dstRect,
+ SkFilterQuality filterQuality) {
+ if (!image) {
+ return nullptr;
+ }
+
+ return sk_sp<SkImageFilter>(new SkImageSource(std::move(image),
+ srcRect, dstRect,
+ filterQuality));
+}
+
+SkImageSource::SkImageSource(sk_sp<SkImage> image)
+ : INHERITED(nullptr, 0, nullptr)
+ , fImage(std::move(image))
+ , fSrcRect(SkRect::MakeIWH(fImage->width(), fImage->height()))
+ , fDstRect(fSrcRect)
+ , fFilterQuality(kHigh_SkFilterQuality) {
+}
+
+SkImageSource::SkImageSource(sk_sp<SkImage> image,
+ const SkRect& srcRect,
+ const SkRect& dstRect,
+ SkFilterQuality filterQuality)
+ : INHERITED(nullptr, 0, nullptr)
+ , fImage(std::move(image))
+ , fSrcRect(srcRect)
+ , fDstRect(dstRect)
+ , fFilterQuality(filterQuality) {
+}
+
+sk_sp<SkFlattenable> SkImageSource::CreateProc(SkReadBuffer& buffer) {
+ SkFilterQuality filterQuality = (SkFilterQuality)buffer.readInt();
+
+ SkRect src, dst;
+ buffer.readRect(&src);
+ buffer.readRect(&dst);
+
+ sk_sp<SkImage> image(buffer.readImage());
+ if (!image) {
+ return nullptr;
+ }
+
+ return SkImageSource::Make(std::move(image), src, dst, filterQuality);
+}
+
+void SkImageSource::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeInt(fFilterQuality);
+ buffer.writeRect(fSrcRect);
+ buffer.writeRect(fDstRect);
+ buffer.writeImage(fImage.get());
+}
+
+sk_sp<SkSpecialImage> SkImageSource::onFilterImage(SkSpecialImage* source, const Context& ctx,
+ SkIPoint* offset) const {
+ SkRect dstRect;
+ ctx.ctm().mapRect(&dstRect, fDstRect);
+
+ SkRect bounds = SkRect::MakeIWH(fImage->width(), fImage->height());
+ if (fSrcRect == bounds && dstRect == bounds) {
+ // No regions cropped out or resized; return entire image.
+ offset->fX = offset->fY = 0;
+ return SkSpecialImage::MakeFromImage(SkIRect::MakeWH(fImage->width(), fImage->height()),
+ fImage,
+ &source->props());
+ }
+
+ const SkIRect dstIRect = dstRect.roundOut();
+
+ sk_sp<SkSpecialSurface> surf(source->makeSurface(ctx.outputProperties(), dstIRect.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ // TODO: it seems like this clear shouldn't be necessary (see skbug.com/5075)
+ canvas->clear(0x0);
+
+ SkPaint paint;
+
+ // Subtract off the integer component of the translation (will be applied in offset, below).
+ dstRect.offset(-SkIntToScalar(dstIRect.fLeft), -SkIntToScalar(dstIRect.fTop));
+ paint.setBlendMode(SkBlendMode::kSrc);
+ // FIXME: this probably shouldn't be necessary, but drawImageRect asserts
+ // None filtering when it's translate-only
+ paint.setFilterQuality(
+ fSrcRect.width() == dstRect.width() && fSrcRect.height() == dstRect.height() ?
+ kNone_SkFilterQuality : fFilterQuality);
+ canvas->drawImageRect(fImage.get(), fSrcRect, dstRect, &paint,
+ SkCanvas::kStrict_SrcRectConstraint);
+
+ offset->fX = dstIRect.fLeft;
+ offset->fY = dstIRect.fTop;
+ return surf->makeImageSnapshot();
+}
+
+SkRect SkImageSource::computeFastBounds(const SkRect& src) const {
+ return fDstRect;
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkImageSource::toString(SkString* str) const {
+ str->appendf("SkImageSource: (");
+ str->appendf("src: (%f,%f,%f,%f) dst: (%f,%f,%f,%f) ",
+ fSrcRect.fLeft, fSrcRect.fTop, fSrcRect.fRight, fSrcRect.fBottom,
+ fDstRect.fLeft, fDstRect.fTop, fDstRect.fRight, fDstRect.fBottom);
+ str->appendf("image: (%d,%d)",
+ fImage->width(), fImage->height());
+ str->append(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkLayerDrawLooper.cpp b/gfx/skia/skia/src/effects/SkLayerDrawLooper.cpp
new file mode 100644
index 000000000..784228fa8
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkLayerDrawLooper.cpp
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkCanvas.h"
+#include "SkColor.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+#include "SkLayerDrawLooper.h"
+#include "SkString.h"
+#include "SkStringUtils.h"
+#include "SkUnPreMultiply.h"
+
+SkLayerDrawLooper::LayerInfo::LayerInfo() {
+ fPaintBits = 0; // ignore our paint fields
+ fColorMode = SkXfermode::kDst_Mode; // ignore our color
+ fOffset.set(0, 0);
+ fPostTranslate = false;
+}
+
+SkLayerDrawLooper::SkLayerDrawLooper()
+ : fRecs(nullptr),
+ fCount(0) {
+}
+
+SkLayerDrawLooper::~SkLayerDrawLooper() {
+ Rec* rec = fRecs;
+ while (rec) {
+ Rec* next = rec->fNext;
+ delete rec;
+ rec = next;
+ }
+}
+
+SkLayerDrawLooper::Context* SkLayerDrawLooper::createContext(SkCanvas* canvas, void* storage) const {
+ canvas->save();
+ return new (storage) LayerDrawLooperContext(this);
+}
+
+static SkColor xferColor(SkColor src, SkColor dst, SkXfermode::Mode mode) {
+ switch (mode) {
+ case SkXfermode::kSrc_Mode:
+ return src;
+ case SkXfermode::kDst_Mode:
+ return dst;
+ default: {
+ SkPMColor pmS = SkPreMultiplyColor(src);
+ SkPMColor pmD = SkPreMultiplyColor(dst);
+ SkPMColor result = SkXfermode::GetProc(mode)(pmS, pmD);
+ return SkUnPreMultiply::PMColorToColor(result);
+ }
+ }
+}
+
+// Even with kEntirePaint_Bits, we always ensure that the master paint's
+// text-encoding is respected, since that controls how we interpret the
+// text/length parameters of a draw[Pos]Text call.
+void SkLayerDrawLooper::LayerDrawLooperContext::ApplyInfo(
+ SkPaint* dst, const SkPaint& src, const LayerInfo& info) {
+
+ dst->setColor(xferColor(src.getColor(), dst->getColor(), info.fColorMode));
+
+ BitFlags bits = info.fPaintBits;
+ SkPaint::TextEncoding encoding = dst->getTextEncoding();
+
+ if (0 == bits) {
+ return;
+ }
+ if (kEntirePaint_Bits == bits) {
+ // we've already computed these, so save it from the assignment
+ uint32_t f = dst->getFlags();
+ SkColor c = dst->getColor();
+ *dst = src;
+ dst->setFlags(f);
+ dst->setColor(c);
+ dst->setTextEncoding(encoding);
+ return;
+ }
+
+ if (bits & kStyle_Bit) {
+ dst->setStyle(src.getStyle());
+ dst->setStrokeWidth(src.getStrokeWidth());
+ dst->setStrokeMiter(src.getStrokeMiter());
+ dst->setStrokeCap(src.getStrokeCap());
+ dst->setStrokeJoin(src.getStrokeJoin());
+ }
+
+ if (bits & kTextSkewX_Bit) {
+ dst->setTextSkewX(src.getTextSkewX());
+ }
+
+ if (bits & kPathEffect_Bit) {
+ dst->setPathEffect(sk_ref_sp(src.getPathEffect()));
+ }
+ if (bits & kMaskFilter_Bit) {
+ dst->setMaskFilter(sk_ref_sp(src.getMaskFilter()));
+ }
+ if (bits & kShader_Bit) {
+ dst->setShader(sk_ref_sp(src.getShader()));
+ }
+ if (bits & kColorFilter_Bit) {
+ dst->setColorFilter(sk_ref_sp(src.getColorFilter()));
+ }
+ if (bits & kXfermode_Bit) {
+ dst->setBlendMode(src.getBlendMode());
+ }
+
+ // we don't override these
+#if 0
+ dst->setTypeface(src.getTypeface());
+ dst->setTextSize(src.getTextSize());
+ dst->setTextScaleX(src.getTextScaleX());
+ dst->setRasterizer(src.getRasterizer());
+ dst->setLooper(src.getLooper());
+ dst->setTextEncoding(src.getTextEncoding());
+ dst->setHinting(src.getHinting());
+#endif
+}
+
+// Should we add this to canvas?
+static void postTranslate(SkCanvas* canvas, SkScalar dx, SkScalar dy) {
+ SkMatrix m = canvas->getTotalMatrix();
+ m.postTranslate(dx, dy);
+ canvas->setMatrix(m);
+}
+
+SkLayerDrawLooper::LayerDrawLooperContext::LayerDrawLooperContext(
+ const SkLayerDrawLooper* looper) : fCurrRec(looper->fRecs) {}
+
+bool SkLayerDrawLooper::LayerDrawLooperContext::next(SkCanvas* canvas,
+ SkPaint* paint) {
+ canvas->restore();
+ if (nullptr == fCurrRec) {
+ return false;
+ }
+
+ ApplyInfo(paint, fCurrRec->fPaint, fCurrRec->fInfo);
+
+ canvas->save();
+ if (fCurrRec->fInfo.fPostTranslate) {
+ postTranslate(canvas, fCurrRec->fInfo.fOffset.fX,
+ fCurrRec->fInfo.fOffset.fY);
+ } else {
+ canvas->translate(fCurrRec->fInfo.fOffset.fX,
+ fCurrRec->fInfo.fOffset.fY);
+ }
+ fCurrRec = fCurrRec->fNext;
+
+ return true;
+}
+
+bool SkLayerDrawLooper::asABlurShadow(BlurShadowRec* bsRec) const {
+ if (fCount != 2) {
+ return false;
+ }
+ const Rec* rec = fRecs;
+
+ // bottom layer needs to be just blur(maskfilter)
+ if ((rec->fInfo.fPaintBits & ~kMaskFilter_Bit)) {
+ return false;
+ }
+ if (SkXfermode::kSrc_Mode != rec->fInfo.fColorMode) {
+ return false;
+ }
+ const SkMaskFilter* mf = rec->fPaint.getMaskFilter();
+ if (nullptr == mf) {
+ return false;
+ }
+ SkMaskFilter::BlurRec maskBlur;
+ if (!mf->asABlur(&maskBlur)) {
+ return false;
+ }
+
+ rec = rec->fNext;
+ // top layer needs to be "plain"
+ if (rec->fInfo.fPaintBits) {
+ return false;
+ }
+ if (SkXfermode::kDst_Mode != rec->fInfo.fColorMode) {
+ return false;
+ }
+ if (!rec->fInfo.fOffset.equals(0, 0)) {
+ return false;
+ }
+
+ if (bsRec) {
+ bsRec->fSigma = maskBlur.fSigma;
+ bsRec->fOffset = fRecs->fInfo.fOffset;
+ bsRec->fColor = fRecs->fPaint.getColor();
+ bsRec->fStyle = maskBlur.fStyle;
+ bsRec->fQuality = maskBlur.fQuality;
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkLayerDrawLooper::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeInt(fCount);
+
+ Rec* rec = fRecs;
+ for (int i = 0; i < fCount; i++) {
+ // Legacy "flagsmask" field -- now ignored, remove when we bump version
+ buffer.writeInt(0);
+
+ buffer.writeInt(rec->fInfo.fPaintBits);
+ buffer.writeInt(rec->fInfo.fColorMode);
+ buffer.writePoint(rec->fInfo.fOffset);
+ buffer.writeBool(rec->fInfo.fPostTranslate);
+ buffer.writePaint(rec->fPaint);
+ rec = rec->fNext;
+ }
+}
+
+sk_sp<SkFlattenable> SkLayerDrawLooper::CreateProc(SkReadBuffer& buffer) {
+ int count = buffer.readInt();
+
+ Builder builder;
+ for (int i = 0; i < count; i++) {
+ LayerInfo info;
+ // Legacy "flagsmask" field -- now ignored, remove when we bump version
+ (void)buffer.readInt();
+
+ info.fPaintBits = buffer.readInt();
+ info.fColorMode = (SkXfermode::Mode)buffer.readInt();
+ buffer.readPoint(&info.fOffset);
+ info.fPostTranslate = buffer.readBool();
+ buffer.readPaint(builder.addLayerOnTop(info));
+ }
+ return builder.detach();
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkLayerDrawLooper::toString(SkString* str) const {
+ str->appendf("SkLayerDrawLooper (%d): ", fCount);
+
+ Rec* rec = fRecs;
+ for (int i = 0; i < fCount; i++) {
+ str->appendf("%d: paintBits: (", i);
+ if (0 == rec->fInfo.fPaintBits) {
+ str->append("None");
+ } else if (kEntirePaint_Bits == rec->fInfo.fPaintBits) {
+ str->append("EntirePaint");
+ } else {
+ bool needSeparator = false;
+ SkAddFlagToString(str, SkToBool(kStyle_Bit & rec->fInfo.fPaintBits), "Style",
+ &needSeparator);
+ SkAddFlagToString(str, SkToBool(kTextSkewX_Bit & rec->fInfo.fPaintBits), "TextSkewX",
+ &needSeparator);
+ SkAddFlagToString(str, SkToBool(kPathEffect_Bit & rec->fInfo.fPaintBits), "PathEffect",
+ &needSeparator);
+ SkAddFlagToString(str, SkToBool(kMaskFilter_Bit & rec->fInfo.fPaintBits), "MaskFilter",
+ &needSeparator);
+ SkAddFlagToString(str, SkToBool(kShader_Bit & rec->fInfo.fPaintBits), "Shader",
+ &needSeparator);
+ SkAddFlagToString(str, SkToBool(kColorFilter_Bit & rec->fInfo.fPaintBits), "ColorFilter",
+ &needSeparator);
+ SkAddFlagToString(str, SkToBool(kXfermode_Bit & rec->fInfo.fPaintBits), "Xfermode",
+ &needSeparator);
+ }
+ str->append(") ");
+
+ static const char* gModeStrings[SkXfermode::kLastMode+1] = {
+ "kClear", "kSrc", "kDst", "kSrcOver", "kDstOver", "kSrcIn", "kDstIn",
+ "kSrcOut", "kDstOut", "kSrcATop", "kDstATop", "kXor", "kPlus",
+ "kMultiply", "kScreen", "kOverlay", "kDarken", "kLighten", "kColorDodge",
+ "kColorBurn", "kHardLight", "kSoftLight", "kDifference", "kExclusion"
+ };
+
+ str->appendf("mode: %s ", gModeStrings[rec->fInfo.fColorMode]);
+
+ str->append("offset: (");
+ str->appendScalar(rec->fInfo.fOffset.fX);
+ str->append(", ");
+ str->appendScalar(rec->fInfo.fOffset.fY);
+ str->append(") ");
+
+ str->append("postTranslate: ");
+ if (rec->fInfo.fPostTranslate) {
+ str->append("true ");
+ } else {
+ str->append("false ");
+ }
+
+ rec->fPaint.toString(str);
+ rec = rec->fNext;
+ }
+}
+#endif
+
+SkLayerDrawLooper::Builder::Builder()
+ : fRecs(nullptr),
+ fTopRec(nullptr),
+ fCount(0) {
+}
+
+SkLayerDrawLooper::Builder::~Builder() {
+ Rec* rec = fRecs;
+ while (rec) {
+ Rec* next = rec->fNext;
+ delete rec;
+ rec = next;
+ }
+}
+
+SkPaint* SkLayerDrawLooper::Builder::addLayer(const LayerInfo& info) {
+ fCount += 1;
+
+ Rec* rec = new Rec;
+ rec->fNext = fRecs;
+ rec->fInfo = info;
+ fRecs = rec;
+ if (nullptr == fTopRec) {
+ fTopRec = rec;
+ }
+
+ return &rec->fPaint;
+}
+
+void SkLayerDrawLooper::Builder::addLayer(SkScalar dx, SkScalar dy) {
+ LayerInfo info;
+
+ info.fOffset.set(dx, dy);
+ (void)this->addLayer(info);
+}
+
+SkPaint* SkLayerDrawLooper::Builder::addLayerOnTop(const LayerInfo& info) {
+ fCount += 1;
+
+ Rec* rec = new Rec;
+ rec->fNext = nullptr;
+ rec->fInfo = info;
+ if (nullptr == fRecs) {
+ fRecs = rec;
+ } else {
+ SkASSERT(fTopRec);
+ fTopRec->fNext = rec;
+ }
+ fTopRec = rec;
+
+ return &rec->fPaint;
+}
+
+sk_sp<SkDrawLooper> SkLayerDrawLooper::Builder::detach() {
+ SkLayerDrawLooper* looper = new SkLayerDrawLooper;
+ looper->fCount = fCount;
+ looper->fRecs = fRecs;
+
+ fCount = 0;
+ fRecs = nullptr;
+ fTopRec = nullptr;
+
+ return sk_sp<SkDrawLooper>(looper);
+}
diff --git a/gfx/skia/skia/src/effects/SkLayerRasterizer.cpp b/gfx/skia/skia/src/effects/SkLayerRasterizer.cpp
new file mode 100644
index 000000000..71d7fb63c
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkLayerRasterizer.cpp
@@ -0,0 +1,227 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkLayerRasterizer.h"
+#include "SkDraw.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+#include "SkMask.h"
+#include "SkMaskFilter.h"
+#include "SkPaint.h"
+#include "SkPath.h"
+#include "SkPathEffect.h"
+#include "../core/SkRasterClip.h"
+#include "../core/SkStrokeRec.h"
+#include "SkXfermode.h"
+#include <new>
+
+struct SkLayerRasterizer_Rec {
+ SkPaint fPaint;
+ SkVector fOffset;
+};
+
+SkLayerRasterizer::SkLayerRasterizer() : fLayers(new SkDeque(sizeof(SkLayerRasterizer_Rec))) {}
+
+SkLayerRasterizer::SkLayerRasterizer(SkDeque* layers) : fLayers(layers)
+{
+}
+
+// Helper function to call destructors on SkPaints held by layers and delete layers.
+static void clean_up_layers(SkDeque* layers) {
+ SkDeque::F2BIter iter(*layers);
+ SkLayerRasterizer_Rec* rec;
+
+ while ((rec = (SkLayerRasterizer_Rec*)iter.next()) != nullptr)
+ rec->fPaint.~SkPaint();
+
+ delete layers;
+}
+
+SkLayerRasterizer::~SkLayerRasterizer() {
+ SkASSERT(fLayers);
+ clean_up_layers(const_cast<SkDeque*>(fLayers));
+}
+
+static bool compute_bounds(const SkDeque& layers, const SkPath& path,
+ const SkMatrix& matrix,
+ const SkIRect* clipBounds, SkIRect* bounds) {
+ SkDeque::F2BIter iter(layers);
+ SkLayerRasterizer_Rec* rec;
+
+ bounds->set(SK_MaxS32, SK_MaxS32, SK_MinS32, SK_MinS32);
+
+ while ((rec = (SkLayerRasterizer_Rec*)iter.next()) != nullptr) {
+ const SkPaint& paint = rec->fPaint;
+ SkPath fillPath, devPath;
+ const SkPath* p = &path;
+
+ if (paint.getPathEffect() || paint.getStyle() != SkPaint::kFill_Style) {
+ paint.getFillPath(path, &fillPath);
+ p = &fillPath;
+ }
+ if (p->isEmpty()) {
+ continue;
+ }
+
+ // apply the matrix and offset
+ {
+ SkMatrix m = matrix;
+ m.preTranslate(rec->fOffset.fX, rec->fOffset.fY);
+ p->transform(m, &devPath);
+ }
+
+ SkMask mask;
+ if (!SkDraw::DrawToMask(devPath, clipBounds, paint.getMaskFilter(),
+ &matrix, &mask,
+ SkMask::kJustComputeBounds_CreateMode,
+ SkStrokeRec::kFill_InitStyle)) {
+ return false;
+ }
+
+ bounds->join(mask.fBounds);
+ }
+ return true;
+}
+
+bool SkLayerRasterizer::onRasterize(const SkPath& path, const SkMatrix& matrix,
+ const SkIRect* clipBounds,
+ SkMask* mask, SkMask::CreateMode mode) const {
+ SkASSERT(fLayers);
+ if (fLayers->empty()) {
+ return false;
+ }
+
+ if (SkMask::kJustRenderImage_CreateMode != mode) {
+ if (!compute_bounds(*fLayers, path, matrix, clipBounds, &mask->fBounds))
+ return false;
+ }
+
+ if (SkMask::kComputeBoundsAndRenderImage_CreateMode == mode) {
+ mask->fFormat = SkMask::kA8_Format;
+ mask->fRowBytes = mask->fBounds.width();
+ size_t size = mask->computeImageSize();
+ if (0 == size) {
+ return false; // too big to allocate, abort
+ }
+ mask->fImage = SkMask::AllocImage(size);
+ memset(mask->fImage, 0, size);
+ }
+
+ if (SkMask::kJustComputeBounds_CreateMode != mode) {
+ SkDraw draw;
+ if (!draw.fDst.reset(*mask)) {
+ return false;
+ }
+
+ SkRasterClip rectClip;
+ SkMatrix translatedMatrix; // this translates us to our local pixels
+ SkMatrix drawMatrix; // this translates the path by each layer's offset
+
+ rectClip.setRect(SkIRect::MakeWH(mask->fBounds.width(), mask->fBounds.height()));
+
+ translatedMatrix = matrix;
+ translatedMatrix.postTranslate(-SkIntToScalar(mask->fBounds.fLeft),
+ -SkIntToScalar(mask->fBounds.fTop));
+
+ draw.fMatrix = &drawMatrix;
+ draw.fRC = &rectClip;
+ // we set the matrixproc in the loop, as the matrix changes each time (potentially)
+
+ SkDeque::F2BIter iter(*fLayers);
+ SkLayerRasterizer_Rec* rec;
+
+ while ((rec = (SkLayerRasterizer_Rec*)iter.next()) != nullptr) {
+ drawMatrix = translatedMatrix;
+ drawMatrix.preTranslate(rec->fOffset.fX, rec->fOffset.fY);
+ draw.drawPath(path, rec->fPaint);
+ }
+ }
+ return true;
+}
+
+sk_sp<SkFlattenable> SkLayerRasterizer::CreateProc(SkReadBuffer& buffer) {
+ return sk_sp<SkFlattenable>(new SkLayerRasterizer(ReadLayers(buffer)));
+}
+
+SkDeque* SkLayerRasterizer::ReadLayers(SkReadBuffer& buffer) {
+ int count = buffer.readInt();
+
+ SkDeque* layers = new SkDeque(sizeof(SkLayerRasterizer_Rec));
+ for (int i = 0; i < count; i++) {
+ SkLayerRasterizer_Rec* rec = (SkLayerRasterizer_Rec*)layers->push_back();
+
+ new (&rec->fPaint) SkPaint;
+ buffer.readPaint(&rec->fPaint);
+ buffer.readPoint(&rec->fOffset);
+ }
+ return layers;
+}
+
+void SkLayerRasterizer::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+
+ SkASSERT(fLayers);
+ buffer.writeInt(fLayers->count());
+
+ SkDeque::F2BIter iter(*fLayers);
+ const SkLayerRasterizer_Rec* rec;
+
+ while ((rec = (const SkLayerRasterizer_Rec*)iter.next()) != nullptr) {
+ buffer.writePaint(rec->fPaint);
+ buffer.writePoint(rec->fOffset);
+ }
+}
+
+SkLayerRasterizer::Builder::Builder() : fLayers(new SkDeque(sizeof(SkLayerRasterizer_Rec))) {}
+
+SkLayerRasterizer::Builder::~Builder()
+{
+ if (fLayers != nullptr) {
+ clean_up_layers(fLayers);
+ }
+}
+
+void SkLayerRasterizer::Builder::addLayer(const SkPaint& paint, SkScalar dx,
+ SkScalar dy) {
+ SkASSERT(fLayers);
+ SkLayerRasterizer_Rec* rec = (SkLayerRasterizer_Rec*)fLayers->push_back();
+
+ new (&rec->fPaint) SkPaint(paint);
+ rec->fOffset.set(dx, dy);
+}
+
+sk_sp<SkLayerRasterizer> SkLayerRasterizer::Builder::detach() {
+ SkLayerRasterizer* rasterizer;
+ if (0 == fLayers->count()) {
+ rasterizer = nullptr;
+ delete fLayers;
+ } else {
+ rasterizer = new SkLayerRasterizer(fLayers);
+ }
+ fLayers = nullptr;
+ return sk_sp<SkLayerRasterizer>(rasterizer);
+}
+
+sk_sp<SkLayerRasterizer> SkLayerRasterizer::Builder::snapshot() const {
+ if (0 == fLayers->count()) {
+ return nullptr;
+ }
+ SkDeque* layers = new SkDeque(sizeof(SkLayerRasterizer_Rec), fLayers->count());
+ SkDeque::F2BIter iter(*fLayers);
+ const SkLayerRasterizer_Rec* recOrig;
+ SkDEBUGCODE(int count = 0;)
+ while ((recOrig = static_cast<SkLayerRasterizer_Rec*>(iter.next())) != nullptr) {
+ SkDEBUGCODE(count++);
+ SkLayerRasterizer_Rec* recCopy = static_cast<SkLayerRasterizer_Rec*>(layers->push_back());
+ new (&recCopy->fPaint) SkPaint(recOrig->fPaint);
+ recCopy->fOffset = recOrig->fOffset;
+ }
+ SkASSERT(fLayers->count() == count);
+ SkASSERT(layers->count() == count);
+ return sk_sp<SkLayerRasterizer>(new SkLayerRasterizer(layers));
+}
diff --git a/gfx/skia/skia/src/effects/SkLightingImageFilter.cpp b/gfx/skia/skia/src/effects/SkLightingImageFilter.cpp
new file mode 100644
index 000000000..057ef24e9
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkLightingImageFilter.cpp
@@ -0,0 +1,2183 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkLightingImageFilter.h"
+#include "SkBitmap.h"
+#include "SkColorPriv.h"
+#include "SkPoint3.h"
+#include "SkReadBuffer.h"
+#include "SkSpecialImage.h"
+#include "SkTypes.h"
+#include "SkWriteBuffer.h"
+
+#if SK_SUPPORT_GPU
+#include "GrContext.h"
+#include "GrDrawContext.h"
+#include "GrFixedClip.h"
+#include "GrFragmentProcessor.h"
+#include "GrInvariantOutput.h"
+#include "GrPaint.h"
+#include "SkGr.h"
+#include "SkGrPriv.h"
+#include "effects/GrSingleTextureEffect.h"
+#include "effects/GrTextureDomain.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+
+class GrGLDiffuseLightingEffect;
+class GrGLSpecularLightingEffect;
+
+// For brevity
+typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;
+#endif
+
+namespace {
+
+const SkScalar gOneThird = SkIntToScalar(1) / 3;
+const SkScalar gTwoThirds = SkIntToScalar(2) / 3;
+const SkScalar gOneHalf = 0.5f;
+const SkScalar gOneQuarter = 0.25f;
+
+#if SK_SUPPORT_GPU
+void setUniformPoint3(const GrGLSLProgramDataManager& pdman, UniformHandle uni,
+ const SkPoint3& point) {
+ GR_STATIC_ASSERT(sizeof(SkPoint3) == 3 * sizeof(float));
+ pdman.set3fv(uni, 1, &point.fX);
+}
+
+void setUniformNormal3(const GrGLSLProgramDataManager& pdman, UniformHandle uni,
+ const SkPoint3& point) {
+ setUniformPoint3(pdman, uni, point);
+}
+#endif
+
+// Shift matrix components to the left, as we advance pixels to the right.
+inline void shiftMatrixLeft(int m[9]) {
+ m[0] = m[1];
+ m[3] = m[4];
+ m[6] = m[7];
+ m[1] = m[2];
+ m[4] = m[5];
+ m[7] = m[8];
+}
+
+static inline void fast_normalize(SkPoint3* vector) {
+ // add a tiny bit so we don't have to worry about divide-by-zero
+ SkScalar magSq = vector->dot(*vector) + SK_ScalarNearlyZero;
+ SkScalar scale = sk_float_rsqrt(magSq);
+ vector->fX *= scale;
+ vector->fY *= scale;
+ vector->fZ *= scale;
+}
+
+class DiffuseLightingType {
+public:
+ DiffuseLightingType(SkScalar kd)
+ : fKD(kd) {}
+ SkPMColor light(const SkPoint3& normal, const SkPoint3& surfaceTolight,
+ const SkPoint3& lightColor) const {
+ SkScalar colorScale = SkScalarMul(fKD, normal.dot(surfaceTolight));
+ colorScale = SkScalarClampMax(colorScale, SK_Scalar1);
+ SkPoint3 color = lightColor.makeScale(colorScale);
+ return SkPackARGB32(255,
+ SkClampMax(SkScalarRoundToInt(color.fX), 255),
+ SkClampMax(SkScalarRoundToInt(color.fY), 255),
+ SkClampMax(SkScalarRoundToInt(color.fZ), 255));
+ }
+private:
+ SkScalar fKD;
+};
+
+static SkScalar max_component(const SkPoint3& p) {
+ return p.x() > p.y() ? (p.x() > p.z() ? p.x() : p.z()) : (p.y() > p.z() ? p.y() : p.z());
+}
+
+class SpecularLightingType {
+public:
+ SpecularLightingType(SkScalar ks, SkScalar shininess)
+ : fKS(ks), fShininess(shininess) {}
+ SkPMColor light(const SkPoint3& normal, const SkPoint3& surfaceTolight,
+ const SkPoint3& lightColor) const {
+ SkPoint3 halfDir(surfaceTolight);
+ halfDir.fZ += SK_Scalar1; // eye position is always (0, 0, 1)
+ fast_normalize(&halfDir);
+ SkScalar colorScale = SkScalarMul(fKS,
+ SkScalarPow(normal.dot(halfDir), fShininess));
+ colorScale = SkScalarClampMax(colorScale, SK_Scalar1);
+ SkPoint3 color = lightColor.makeScale(colorScale);
+ return SkPackARGB32(SkClampMax(SkScalarRoundToInt(max_component(color)), 255),
+ SkClampMax(SkScalarRoundToInt(color.fX), 255),
+ SkClampMax(SkScalarRoundToInt(color.fY), 255),
+ SkClampMax(SkScalarRoundToInt(color.fZ), 255));
+ }
+private:
+ SkScalar fKS;
+ SkScalar fShininess;
+};
+
+inline SkScalar sobel(int a, int b, int c, int d, int e, int f, SkScalar scale) {
+ return SkScalarMul(SkIntToScalar(-a + b - 2 * c + 2 * d -e + f), scale);
+}
+
+inline SkPoint3 pointToNormal(SkScalar x, SkScalar y, SkScalar surfaceScale) {
+ SkPoint3 vector = SkPoint3::Make(SkScalarMul(-x, surfaceScale),
+ SkScalarMul(-y, surfaceScale),
+ SK_Scalar1);
+ fast_normalize(&vector);
+ return vector;
+}
+
+inline SkPoint3 topLeftNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(0, 0, m[4], m[5], m[7], m[8], gTwoThirds),
+ sobel(0, 0, m[4], m[7], m[5], m[8], gTwoThirds),
+ surfaceScale);
+}
+
+inline SkPoint3 topNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel( 0, 0, m[3], m[5], m[6], m[8], gOneThird),
+ sobel(m[3], m[6], m[4], m[7], m[5], m[8], gOneHalf),
+ surfaceScale);
+}
+
+inline SkPoint3 topRightNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel( 0, 0, m[3], m[4], m[6], m[7], gTwoThirds),
+ sobel(m[3], m[6], m[4], m[7], 0, 0, gTwoThirds),
+ surfaceScale);
+}
+
+inline SkPoint3 leftNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(m[1], m[2], m[4], m[5], m[7], m[8], gOneHalf),
+ sobel( 0, 0, m[1], m[7], m[2], m[8], gOneThird),
+ surfaceScale);
+}
+
+
+inline SkPoint3 interiorNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(m[0], m[2], m[3], m[5], m[6], m[8], gOneQuarter),
+ sobel(m[0], m[6], m[1], m[7], m[2], m[8], gOneQuarter),
+ surfaceScale);
+}
+
+inline SkPoint3 rightNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(m[0], m[1], m[3], m[4], m[6], m[7], gOneHalf),
+ sobel(m[0], m[6], m[1], m[7], 0, 0, gOneThird),
+ surfaceScale);
+}
+
+inline SkPoint3 bottomLeftNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(m[1], m[2], m[4], m[5], 0, 0, gTwoThirds),
+ sobel( 0, 0, m[1], m[4], m[2], m[5], gTwoThirds),
+ surfaceScale);
+}
+
+inline SkPoint3 bottomNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(m[0], m[2], m[3], m[5], 0, 0, gOneThird),
+ sobel(m[0], m[3], m[1], m[4], m[2], m[5], gOneHalf),
+ surfaceScale);
+}
+
+inline SkPoint3 bottomRightNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(m[0], m[1], m[3], m[4], 0, 0, gTwoThirds),
+ sobel(m[0], m[3], m[1], m[4], 0, 0, gTwoThirds),
+ surfaceScale);
+}
+
+
+class UncheckedPixelFetcher {
+public:
+ static inline uint32_t Fetch(const SkBitmap& src, int x, int y, const SkIRect& bounds) {
+ return SkGetPackedA32(*src.getAddr32(x, y));
+ }
+};
+
+// The DecalPixelFetcher is used when the destination crop rect exceeds the input bitmap bounds.
+class DecalPixelFetcher {
+public:
+ static inline uint32_t Fetch(const SkBitmap& src, int x, int y, const SkIRect& bounds) {
+ if (x < bounds.fLeft || x >= bounds.fRight || y < bounds.fTop || y >= bounds.fBottom) {
+ return 0;
+ } else {
+ return SkGetPackedA32(*src.getAddr32(x, y));
+ }
+ }
+};
+
+template <class LightingType, class LightType, class PixelFetcher>
+void lightBitmap(const LightingType& lightingType,
+ const SkImageFilterLight* light,
+ const SkBitmap& src,
+ SkBitmap* dst,
+ SkScalar surfaceScale,
+ const SkIRect& bounds) {
+ SkASSERT(dst->width() == bounds.width() && dst->height() == bounds.height());
+ const LightType* l = static_cast<const LightType*>(light);
+ int left = bounds.left(), right = bounds.right();
+ int bottom = bounds.bottom();
+ int y = bounds.top();
+ SkIRect srcBounds = src.bounds();
+ SkPMColor* dptr = dst->getAddr32(0, 0);
+ {
+ int x = left;
+ int m[9];
+ m[4] = PixelFetcher::Fetch(src, x, y, srcBounds);
+ m[5] = PixelFetcher::Fetch(src, x + 1, y, srcBounds);
+ m[7] = PixelFetcher::Fetch(src, x, y + 1, srcBounds);
+ m[8] = PixelFetcher::Fetch(src, x + 1, y + 1, srcBounds);
+ SkPoint3 surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(topLeftNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ for (++x; x < right - 1; ++x)
+ {
+ shiftMatrixLeft(m);
+ m[5] = PixelFetcher::Fetch(src, x + 1, y, srcBounds);
+ m[8] = PixelFetcher::Fetch(src, x + 1, y + 1, srcBounds);
+ surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(topNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ }
+ shiftMatrixLeft(m);
+ surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(topRightNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ }
+
+ for (++y; y < bottom - 1; ++y) {
+ int x = left;
+ int m[9];
+ m[1] = PixelFetcher::Fetch(src, x, y - 1, srcBounds);
+ m[2] = PixelFetcher::Fetch(src, x + 1, y - 1, srcBounds);
+ m[4] = PixelFetcher::Fetch(src, x, y, srcBounds);
+ m[5] = PixelFetcher::Fetch(src, x + 1, y, srcBounds);
+ m[7] = PixelFetcher::Fetch(src, x, y + 1, srcBounds);
+ m[8] = PixelFetcher::Fetch(src, x + 1, y + 1, srcBounds);
+ SkPoint3 surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(leftNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ for (++x; x < right - 1; ++x) {
+ shiftMatrixLeft(m);
+ m[2] = PixelFetcher::Fetch(src, x + 1, y - 1, srcBounds);
+ m[5] = PixelFetcher::Fetch(src, x + 1, y, srcBounds);
+ m[8] = PixelFetcher::Fetch(src, x + 1, y + 1, srcBounds);
+ surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(interiorNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ }
+ shiftMatrixLeft(m);
+ surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(rightNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ }
+
+ {
+ int x = left;
+ int m[9];
+ m[1] = PixelFetcher::Fetch(src, x, bottom - 2, srcBounds);
+ m[2] = PixelFetcher::Fetch(src, x + 1, bottom - 2, srcBounds);
+ m[4] = PixelFetcher::Fetch(src, x, bottom - 1, srcBounds);
+ m[5] = PixelFetcher::Fetch(src, x + 1, bottom - 1, srcBounds);
+ SkPoint3 surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(bottomLeftNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ for (++x; x < right - 1; ++x)
+ {
+ shiftMatrixLeft(m);
+ m[2] = PixelFetcher::Fetch(src, x + 1, bottom - 2, srcBounds);
+ m[5] = PixelFetcher::Fetch(src, x + 1, bottom - 1, srcBounds);
+ surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(bottomNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ }
+ shiftMatrixLeft(m);
+ surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(bottomRightNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ }
+}
+
+template <class LightingType, class LightType>
+void lightBitmap(const LightingType& lightingType,
+ const SkImageFilterLight* light,
+ const SkBitmap& src,
+ SkBitmap* dst,
+ SkScalar surfaceScale,
+ const SkIRect& bounds) {
+ if (src.bounds().contains(bounds)) {
+ lightBitmap<LightingType, LightType, UncheckedPixelFetcher>(
+ lightingType, light, src, dst, surfaceScale, bounds);
+ } else {
+ lightBitmap<LightingType, LightType, DecalPixelFetcher>(
+ lightingType, light, src, dst, surfaceScale, bounds);
+ }
+}
+
+SkPoint3 readPoint3(SkReadBuffer& buffer) {
+ SkPoint3 point;
+ point.fX = buffer.readScalar();
+ point.fY = buffer.readScalar();
+ point.fZ = buffer.readScalar();
+ buffer.validate(SkScalarIsFinite(point.fX) &&
+ SkScalarIsFinite(point.fY) &&
+ SkScalarIsFinite(point.fZ));
+ return point;
+};
+
+void writePoint3(const SkPoint3& point, SkWriteBuffer& buffer) {
+ buffer.writeScalar(point.fX);
+ buffer.writeScalar(point.fY);
+ buffer.writeScalar(point.fZ);
+};
+
+enum BoundaryMode {
+ kTopLeft_BoundaryMode,
+ kTop_BoundaryMode,
+ kTopRight_BoundaryMode,
+ kLeft_BoundaryMode,
+ kInterior_BoundaryMode,
+ kRight_BoundaryMode,
+ kBottomLeft_BoundaryMode,
+ kBottom_BoundaryMode,
+ kBottomRight_BoundaryMode,
+
+ kBoundaryModeCount,
+};
+
+class SkLightingImageFilterInternal : public SkLightingImageFilter {
+protected:
+ SkLightingImageFilterInternal(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect)
+ : INHERITED(std::move(light), surfaceScale, std::move(input), cropRect) {
+ }
+
+#if SK_SUPPORT_GPU
+ sk_sp<SkSpecialImage> filterImageGPU(SkSpecialImage* source,
+ SkSpecialImage* input,
+ const SkIRect& bounds,
+ const SkMatrix& matrix,
+ const OutputProperties& outputProperties) const;
+ virtual sk_sp<GrFragmentProcessor> makeFragmentProcessor(GrTexture*,
+ const SkMatrix&,
+ const SkIRect* srcBounds,
+ BoundaryMode boundaryMode) const = 0;
+#endif
+private:
+#if SK_SUPPORT_GPU
+ void drawRect(GrDrawContext* drawContext,
+ GrTexture* src,
+ const SkMatrix& matrix,
+ const GrClip& clip,
+ const SkRect& dstRect,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds,
+ const SkIRect& bounds) const;
+#endif
+ typedef SkLightingImageFilter INHERITED;
+};
+
+#if SK_SUPPORT_GPU
+void SkLightingImageFilterInternal::drawRect(GrDrawContext* drawContext,
+ GrTexture* src,
+ const SkMatrix& matrix,
+ const GrClip& clip,
+ const SkRect& dstRect,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds,
+ const SkIRect& bounds) const {
+ SkRect srcRect = dstRect.makeOffset(SkIntToScalar(bounds.x()), SkIntToScalar(bounds.y()));
+ GrPaint paint;
+ paint.setGammaCorrect(drawContext->isGammaCorrect());
+ sk_sp<GrFragmentProcessor> fp(this->makeFragmentProcessor(src, matrix, srcBounds,
+ boundaryMode));
+ paint.addColorFragmentProcessor(std::move(fp));
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ drawContext->fillRectToRect(clip, paint, SkMatrix::I(), dstRect, srcRect);
+}
+
+sk_sp<SkSpecialImage> SkLightingImageFilterInternal::filterImageGPU(
+ SkSpecialImage* source,
+ SkSpecialImage* input,
+ const SkIRect& offsetBounds,
+ const SkMatrix& matrix,
+ const OutputProperties& outputProperties) const {
+ SkASSERT(source->isTextureBacked());
+
+ GrContext* context = source->getContext();
+
+ sk_sp<GrTexture> inputTexture(input->asTextureRef(context));
+ SkASSERT(inputTexture);
+
+ sk_sp<GrDrawContext> drawContext(
+ context->makeDrawContext(SkBackingFit::kApprox,offsetBounds.width(), offsetBounds.height(),
+ GrRenderableConfigForColorSpace(outputProperties.colorSpace()),
+ sk_ref_sp(outputProperties.colorSpace())));
+ if (!drawContext) {
+ return nullptr;
+ }
+
+ SkIRect dstIRect = SkIRect::MakeWH(offsetBounds.width(), offsetBounds.height());
+ SkRect dstRect = SkRect::Make(dstIRect);
+
+ // setup new clip
+ GrFixedClip clip(dstIRect);
+
+ const SkIRect inputBounds = SkIRect::MakeWH(input->width(), input->height());
+ SkRect topLeft = SkRect::MakeXYWH(0, 0, 1, 1);
+ SkRect top = SkRect::MakeXYWH(1, 0, dstRect.width() - 2, 1);
+ SkRect topRight = SkRect::MakeXYWH(dstRect.width() - 1, 0, 1, 1);
+ SkRect left = SkRect::MakeXYWH(0, 1, 1, dstRect.height() - 2);
+ SkRect interior = dstRect.makeInset(1, 1);
+ SkRect right = SkRect::MakeXYWH(dstRect.width() - 1, 1, 1, dstRect.height() - 2);
+ SkRect bottomLeft = SkRect::MakeXYWH(0, dstRect.height() - 1, 1, 1);
+ SkRect bottom = SkRect::MakeXYWH(1, dstRect.height() - 1, dstRect.width() - 2, 1);
+ SkRect bottomRight = SkRect::MakeXYWH(dstRect.width() - 1, dstRect.height() - 1, 1, 1);
+
+ const SkIRect* pSrcBounds = inputBounds.contains(offsetBounds) ? nullptr : &inputBounds;
+ this->drawRect(drawContext.get(), inputTexture.get(), matrix, clip, topLeft,
+ kTopLeft_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(drawContext.get(), inputTexture.get(), matrix, clip, top, kTop_BoundaryMode,
+ pSrcBounds, offsetBounds);
+ this->drawRect(drawContext.get(), inputTexture.get(), matrix, clip, topRight,
+ kTopRight_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(drawContext.get(), inputTexture.get(), matrix, clip, left, kLeft_BoundaryMode,
+ pSrcBounds, offsetBounds);
+ this->drawRect(drawContext.get(), inputTexture.get(), matrix, clip, interior,
+ kInterior_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(drawContext.get(), inputTexture.get(), matrix, clip, right, kRight_BoundaryMode,
+ pSrcBounds, offsetBounds);
+ this->drawRect(drawContext.get(), inputTexture.get(), matrix, clip, bottomLeft,
+ kBottomLeft_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(drawContext.get(), inputTexture.get(), matrix, clip, bottom,
+ kBottom_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(drawContext.get(), inputTexture.get(), matrix, clip, bottomRight,
+ kBottomRight_BoundaryMode, pSrcBounds, offsetBounds);
+
+ return SkSpecialImage::MakeFromGpu(SkIRect::MakeWH(offsetBounds.width(), offsetBounds.height()),
+ kNeedNewImageUniqueID_SpecialImage,
+ drawContext->asTexture(),
+ sk_ref_sp(drawContext->getColorSpace()));
+}
+#endif
+
+class SkDiffuseLightingImageFilter : public SkLightingImageFilterInternal {
+public:
+ static sk_sp<SkImageFilter> Make(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ SkScalar kd,
+ sk_sp<SkImageFilter>,
+ const CropRect*);
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkDiffuseLightingImageFilter)
+ SkScalar kd() const { return fKD; }
+
+protected:
+ SkDiffuseLightingImageFilter(sk_sp<SkImageFilterLight> light, SkScalar surfaceScale,
+ SkScalar kd,
+ sk_sp<SkImageFilter> input, const CropRect* cropRect);
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* source, const Context&,
+ SkIPoint* offset) const override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> makeFragmentProcessor(GrTexture*, const SkMatrix&,
+ const SkIRect* bounds,
+ BoundaryMode) const override;
+#endif
+
+private:
+ friend class SkLightingImageFilter;
+ SkScalar fKD;
+
+ typedef SkLightingImageFilterInternal INHERITED;
+};
+
+class SkSpecularLightingImageFilter : public SkLightingImageFilterInternal {
+public:
+ static sk_sp<SkImageFilter> Make(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ SkScalar ks, SkScalar shininess,
+ sk_sp<SkImageFilter>, const CropRect*);
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkSpecularLightingImageFilter)
+
+ SkScalar ks() const { return fKS; }
+ SkScalar shininess() const { return fShininess; }
+
+protected:
+ SkSpecularLightingImageFilter(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess,
+ sk_sp<SkImageFilter> input, const CropRect*);
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* source, const Context&,
+ SkIPoint* offset) const override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> makeFragmentProcessor(GrTexture*, const SkMatrix&,
+ const SkIRect* bounds,
+ BoundaryMode) const override;
+#endif
+
+private:
+ SkScalar fKS;
+ SkScalar fShininess;
+ friend class SkLightingImageFilter;
+ typedef SkLightingImageFilterInternal INHERITED;
+};
+
+#if SK_SUPPORT_GPU
+
+class GrLightingEffect : public GrSingleTextureEffect {
+public:
+ GrLightingEffect(GrTexture* texture, const SkImageFilterLight* light, SkScalar surfaceScale,
+ const SkMatrix& matrix, BoundaryMode boundaryMode, const SkIRect* srcBounds);
+ ~GrLightingEffect() override;
+
+ const SkImageFilterLight* light() const { return fLight; }
+ SkScalar surfaceScale() const { return fSurfaceScale; }
+ const SkMatrix& filterMatrix() const { return fFilterMatrix; }
+ BoundaryMode boundaryMode() const { return fBoundaryMode; }
+ const GrTextureDomain& domain() const { return fDomain; }
+
+protected:
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ // lighting shaders are complicated. We just throw up our hands.
+ inout->mulByUnknownFourComponents();
+ }
+
+private:
+ const SkImageFilterLight* fLight;
+ SkScalar fSurfaceScale;
+ SkMatrix fFilterMatrix;
+ BoundaryMode fBoundaryMode;
+ GrTextureDomain fDomain;
+
+ typedef GrSingleTextureEffect INHERITED;
+};
+
+class GrDiffuseLightingEffect : public GrLightingEffect {
+public:
+ static sk_sp<GrFragmentProcessor> Make(GrTexture* texture,
+ const SkImageFilterLight* light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ SkScalar kd,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds) {
+ return sk_sp<GrFragmentProcessor>(
+ new GrDiffuseLightingEffect(texture, light, surfaceScale, matrix, kd, boundaryMode,
+ srcBounds));
+ }
+
+ const char* name() const override { return "DiffuseLighting"; }
+
+ SkScalar kd() const { return fKD; }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ GrDiffuseLightingEffect(GrTexture* texture,
+ const SkImageFilterLight* light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ SkScalar kd,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds);
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+ typedef GrLightingEffect INHERITED;
+ SkScalar fKD;
+};
+
+class GrSpecularLightingEffect : public GrLightingEffect {
+public:
+ static sk_sp<GrFragmentProcessor> Make(GrTexture* texture,
+ const SkImageFilterLight* light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ SkScalar ks,
+ SkScalar shininess,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds) {
+ return sk_sp<GrFragmentProcessor>(
+ new GrSpecularLightingEffect(texture, light, surfaceScale, matrix, ks, shininess,
+ boundaryMode, srcBounds));
+ }
+
+ const char* name() const override { return "SpecularLighting"; }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ SkScalar ks() const { return fKS; }
+ SkScalar shininess() const { return fShininess; }
+
+private:
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ GrSpecularLightingEffect(GrTexture* texture,
+ const SkImageFilterLight* light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ SkScalar ks,
+ SkScalar shininess,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds);
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+ typedef GrLightingEffect INHERITED;
+ SkScalar fKS;
+ SkScalar fShininess;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLLight {
+public:
+ virtual ~GrGLLight() {}
+
+ /**
+ * This is called by GrGLLightingEffect::emitCode() before either of the two virtual functions
+ * below. It adds a vec3f uniform visible in the FS that represents the constant light color.
+ */
+ void emitLightColorUniform(GrGLSLUniformHandler*);
+
+ /**
+ * These two functions are called from GrGLLightingEffect's emitCode() function.
+ * emitSurfaceToLight places an expression in param out that is the vector from the surface to
+ * the light. The expression will be used in the FS. emitLightColor writes an expression into
+ * the FS that is the color of the light. Either function may add functions and/or uniforms to
+ * the FS. The default of emitLightColor appends the name of the constant light color uniform
+ * and so this function only needs to be overridden if the light color varies spatially.
+ */
+ virtual void emitSurfaceToLight(GrGLSLUniformHandler*,
+ GrGLSLFPFragmentBuilder*,
+ const char* z) = 0;
+ virtual void emitLightColor(GrGLSLUniformHandler*,
+ GrGLSLFPFragmentBuilder*,
+ const char *surfaceToLight);
+
+ // This is called from GrGLLightingEffect's setData(). Subclasses of GrGLLight must call
+ // INHERITED::setData().
+ virtual void setData(const GrGLSLProgramDataManager&, const SkImageFilterLight* light) const;
+
+protected:
+ /**
+ * Gets the constant light color uniform. Subclasses can use this in their emitLightColor
+ * function.
+ */
+ UniformHandle lightColorUni() const { return fColorUni; }
+
+private:
+ UniformHandle fColorUni;
+
+ typedef SkRefCnt INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLDistantLight : public GrGLLight {
+public:
+ virtual ~GrGLDistantLight() {}
+ void setData(const GrGLSLProgramDataManager&, const SkImageFilterLight* light) const override;
+ void emitSurfaceToLight(GrGLSLUniformHandler*, GrGLSLFPFragmentBuilder*, const char* z) override;
+
+private:
+ typedef GrGLLight INHERITED;
+ UniformHandle fDirectionUni;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLPointLight : public GrGLLight {
+public:
+ virtual ~GrGLPointLight() {}
+ void setData(const GrGLSLProgramDataManager&, const SkImageFilterLight* light) const override;
+ void emitSurfaceToLight(GrGLSLUniformHandler*, GrGLSLFPFragmentBuilder*, const char* z) override;
+
+private:
+ typedef GrGLLight INHERITED;
+ UniformHandle fLocationUni;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLSpotLight : public GrGLLight {
+public:
+ virtual ~GrGLSpotLight() {}
+ void setData(const GrGLSLProgramDataManager&, const SkImageFilterLight* light) const override;
+ void emitSurfaceToLight(GrGLSLUniformHandler*, GrGLSLFPFragmentBuilder*, const char* z) override;
+ void emitLightColor(GrGLSLUniformHandler*,
+ GrGLSLFPFragmentBuilder*,
+ const char *surfaceToLight) override;
+
+private:
+ typedef GrGLLight INHERITED;
+
+ SkString fLightColorFunc;
+ UniformHandle fLocationUni;
+ UniformHandle fExponentUni;
+ UniformHandle fCosOuterConeAngleUni;
+ UniformHandle fCosInnerConeAngleUni;
+ UniformHandle fConeScaleUni;
+ UniformHandle fSUni;
+};
+#else
+
+class GrGLLight;
+
+#endif
+
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkImageFilterLight : public SkRefCnt {
+public:
+
+
+ enum LightType {
+ kDistant_LightType,
+ kPoint_LightType,
+ kSpot_LightType,
+ };
+ virtual LightType type() const = 0;
+ const SkPoint3& color() const { return fColor; }
+ virtual GrGLLight* createGLLight() const = 0;
+ virtual bool isEqual(const SkImageFilterLight& other) const {
+ return fColor == other.fColor;
+ }
+ // Called to know whether the generated GrGLLight will require access to the fragment position.
+ virtual bool requiresFragmentPosition() const = 0;
+ virtual SkImageFilterLight* transform(const SkMatrix& matrix) const = 0;
+
+ // Defined below SkLight's subclasses.
+ void flattenLight(SkWriteBuffer& buffer) const;
+ static SkImageFilterLight* UnflattenLight(SkReadBuffer& buffer);
+
+protected:
+ SkImageFilterLight(SkColor color) {
+ fColor = SkPoint3::Make(SkIntToScalar(SkColorGetR(color)),
+ SkIntToScalar(SkColorGetG(color)),
+ SkIntToScalar(SkColorGetB(color)));
+ }
+ SkImageFilterLight(const SkPoint3& color)
+ : fColor(color) {}
+ SkImageFilterLight(SkReadBuffer& buffer) {
+ fColor = readPoint3(buffer);
+ }
+
+ virtual void onFlattenLight(SkWriteBuffer& buffer) const = 0;
+
+
+private:
+ typedef SkRefCnt INHERITED;
+ SkPoint3 fColor;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkDistantLight : public SkImageFilterLight {
+public:
+ SkDistantLight(const SkPoint3& direction, SkColor color)
+ : INHERITED(color), fDirection(direction) {
+ }
+
+ SkPoint3 surfaceToLight(int x, int y, int z, SkScalar surfaceScale) const {
+ return fDirection;
+ }
+ const SkPoint3& lightColor(const SkPoint3&) const { return this->color(); }
+ LightType type() const override { return kDistant_LightType; }
+ const SkPoint3& direction() const { return fDirection; }
+ GrGLLight* createGLLight() const override {
+#if SK_SUPPORT_GPU
+ return new GrGLDistantLight;
+#else
+ SkDEBUGFAIL("Should not call in GPU-less build");
+ return nullptr;
+#endif
+ }
+ bool requiresFragmentPosition() const override { return false; }
+
+ bool isEqual(const SkImageFilterLight& other) const override {
+ if (other.type() != kDistant_LightType) {
+ return false;
+ }
+
+ const SkDistantLight& o = static_cast<const SkDistantLight&>(other);
+ return INHERITED::isEqual(other) &&
+ fDirection == o.fDirection;
+ }
+
+ SkDistantLight(SkReadBuffer& buffer) : INHERITED(buffer) {
+ fDirection = readPoint3(buffer);
+ }
+
+protected:
+ SkDistantLight(const SkPoint3& direction, const SkPoint3& color)
+ : INHERITED(color), fDirection(direction) {
+ }
+ SkImageFilterLight* transform(const SkMatrix& matrix) const override {
+ return new SkDistantLight(direction(), color());
+ }
+ void onFlattenLight(SkWriteBuffer& buffer) const override {
+ writePoint3(fDirection, buffer);
+ }
+
+private:
+ SkPoint3 fDirection;
+
+ typedef SkImageFilterLight INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkPointLight : public SkImageFilterLight {
+public:
+ SkPointLight(const SkPoint3& location, SkColor color)
+ : INHERITED(color), fLocation(location) {}
+
+ SkPoint3 surfaceToLight(int x, int y, int z, SkScalar surfaceScale) const {
+ SkPoint3 direction = SkPoint3::Make(fLocation.fX - SkIntToScalar(x),
+ fLocation.fY - SkIntToScalar(y),
+ fLocation.fZ - SkScalarMul(SkIntToScalar(z),
+ surfaceScale));
+ fast_normalize(&direction);
+ return direction;
+ }
+ const SkPoint3& lightColor(const SkPoint3&) const { return this->color(); }
+ LightType type() const override { return kPoint_LightType; }
+ const SkPoint3& location() const { return fLocation; }
+ GrGLLight* createGLLight() const override {
+#if SK_SUPPORT_GPU
+ return new GrGLPointLight;
+#else
+ SkDEBUGFAIL("Should not call in GPU-less build");
+ return nullptr;
+#endif
+ }
+ bool requiresFragmentPosition() const override { return true; }
+ bool isEqual(const SkImageFilterLight& other) const override {
+ if (other.type() != kPoint_LightType) {
+ return false;
+ }
+ const SkPointLight& o = static_cast<const SkPointLight&>(other);
+ return INHERITED::isEqual(other) &&
+ fLocation == o.fLocation;
+ }
+ SkImageFilterLight* transform(const SkMatrix& matrix) const override {
+ SkPoint location2 = SkPoint::Make(fLocation.fX, fLocation.fY);
+ matrix.mapPoints(&location2, 1);
+ // Use X scale and Y scale on Z and average the result
+ SkPoint locationZ = SkPoint::Make(fLocation.fZ, fLocation.fZ);
+ matrix.mapVectors(&locationZ, 1);
+ SkPoint3 location = SkPoint3::Make(location2.fX,
+ location2.fY,
+ SkScalarAve(locationZ.fX, locationZ.fY));
+ return new SkPointLight(location, color());
+ }
+
+ SkPointLight(SkReadBuffer& buffer) : INHERITED(buffer) {
+ fLocation = readPoint3(buffer);
+ }
+
+protected:
+ SkPointLight(const SkPoint3& location, const SkPoint3& color)
+ : INHERITED(color), fLocation(location) {}
+ void onFlattenLight(SkWriteBuffer& buffer) const override {
+ writePoint3(fLocation, buffer);
+ }
+
+private:
+ SkPoint3 fLocation;
+
+ typedef SkImageFilterLight INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkSpotLight : public SkImageFilterLight {
+public:
+ SkSpotLight(const SkPoint3& location,
+ const SkPoint3& target,
+ SkScalar specularExponent,
+ SkScalar cutoffAngle,
+ SkColor color)
+ : INHERITED(color),
+ fLocation(location),
+ fTarget(target),
+ fSpecularExponent(SkScalarPin(specularExponent, kSpecularExponentMin, kSpecularExponentMax))
+ {
+ fS = target - location;
+ fast_normalize(&fS);
+ fCosOuterConeAngle = SkScalarCos(SkDegreesToRadians(cutoffAngle));
+ const SkScalar antiAliasThreshold = 0.016f;
+ fCosInnerConeAngle = fCosOuterConeAngle + antiAliasThreshold;
+ fConeScale = SkScalarInvert(antiAliasThreshold);
+ }
+
+ SkImageFilterLight* transform(const SkMatrix& matrix) const override {
+ SkPoint location2 = SkPoint::Make(fLocation.fX, fLocation.fY);
+ matrix.mapPoints(&location2, 1);
+ // Use X scale and Y scale on Z and average the result
+ SkPoint locationZ = SkPoint::Make(fLocation.fZ, fLocation.fZ);
+ matrix.mapVectors(&locationZ, 1);
+ SkPoint3 location = SkPoint3::Make(location2.fX, location2.fY,
+ SkScalarAve(locationZ.fX, locationZ.fY));
+ SkPoint target2 = SkPoint::Make(fTarget.fX, fTarget.fY);
+ matrix.mapPoints(&target2, 1);
+ SkPoint targetZ = SkPoint::Make(fTarget.fZ, fTarget.fZ);
+ matrix.mapVectors(&targetZ, 1);
+ SkPoint3 target = SkPoint3::Make(target2.fX, target2.fY,
+ SkScalarAve(targetZ.fX, targetZ.fY));
+ SkPoint3 s = target - location;
+ fast_normalize(&s);
+ return new SkSpotLight(location,
+ target,
+ fSpecularExponent,
+ fCosOuterConeAngle,
+ fCosInnerConeAngle,
+ fConeScale,
+ s,
+ color());
+ }
+
+ SkPoint3 surfaceToLight(int x, int y, int z, SkScalar surfaceScale) const {
+ SkPoint3 direction = SkPoint3::Make(fLocation.fX - SkIntToScalar(x),
+ fLocation.fY - SkIntToScalar(y),
+ fLocation.fZ - SkScalarMul(SkIntToScalar(z),
+ surfaceScale));
+ fast_normalize(&direction);
+ return direction;
+ }
+ SkPoint3 lightColor(const SkPoint3& surfaceToLight) const {
+ SkScalar cosAngle = -surfaceToLight.dot(fS);
+ SkScalar scale = 0;
+ if (cosAngle >= fCosOuterConeAngle) {
+ scale = SkScalarPow(cosAngle, fSpecularExponent);
+ if (cosAngle < fCosInnerConeAngle) {
+ scale = SkScalarMul(scale, cosAngle - fCosOuterConeAngle);
+ scale *= fConeScale;
+ }
+ }
+ return this->color().makeScale(scale);
+ }
+ GrGLLight* createGLLight() const override {
+#if SK_SUPPORT_GPU
+ return new GrGLSpotLight;
+#else
+ SkDEBUGFAIL("Should not call in GPU-less build");
+ return nullptr;
+#endif
+ }
+ bool requiresFragmentPosition() const override { return true; }
+ LightType type() const override { return kSpot_LightType; }
+ const SkPoint3& location() const { return fLocation; }
+ const SkPoint3& target() const { return fTarget; }
+ SkScalar specularExponent() const { return fSpecularExponent; }
+ SkScalar cosInnerConeAngle() const { return fCosInnerConeAngle; }
+ SkScalar cosOuterConeAngle() const { return fCosOuterConeAngle; }
+ SkScalar coneScale() const { return fConeScale; }
+ const SkPoint3& s() const { return fS; }
+
+ SkSpotLight(SkReadBuffer& buffer) : INHERITED(buffer) {
+ fLocation = readPoint3(buffer);
+ fTarget = readPoint3(buffer);
+ fSpecularExponent = buffer.readScalar();
+ fCosOuterConeAngle = buffer.readScalar();
+ fCosInnerConeAngle = buffer.readScalar();
+ fConeScale = buffer.readScalar();
+ fS = readPoint3(buffer);
+ buffer.validate(SkScalarIsFinite(fSpecularExponent) &&
+ SkScalarIsFinite(fCosOuterConeAngle) &&
+ SkScalarIsFinite(fCosInnerConeAngle) &&
+ SkScalarIsFinite(fConeScale));
+ }
+protected:
+ SkSpotLight(const SkPoint3& location,
+ const SkPoint3& target,
+ SkScalar specularExponent,
+ SkScalar cosOuterConeAngle,
+ SkScalar cosInnerConeAngle,
+ SkScalar coneScale,
+ const SkPoint3& s,
+ const SkPoint3& color)
+ : INHERITED(color),
+ fLocation(location),
+ fTarget(target),
+ fSpecularExponent(specularExponent),
+ fCosOuterConeAngle(cosOuterConeAngle),
+ fCosInnerConeAngle(cosInnerConeAngle),
+ fConeScale(coneScale),
+ fS(s)
+ {
+ }
+ void onFlattenLight(SkWriteBuffer& buffer) const override {
+ writePoint3(fLocation, buffer);
+ writePoint3(fTarget, buffer);
+ buffer.writeScalar(fSpecularExponent);
+ buffer.writeScalar(fCosOuterConeAngle);
+ buffer.writeScalar(fCosInnerConeAngle);
+ buffer.writeScalar(fConeScale);
+ writePoint3(fS, buffer);
+ }
+
+ bool isEqual(const SkImageFilterLight& other) const override {
+ if (other.type() != kSpot_LightType) {
+ return false;
+ }
+
+ const SkSpotLight& o = static_cast<const SkSpotLight&>(other);
+ return INHERITED::isEqual(other) &&
+ fLocation == o.fLocation &&
+ fTarget == o.fTarget &&
+ fSpecularExponent == o.fSpecularExponent &&
+ fCosOuterConeAngle == o.fCosOuterConeAngle;
+ }
+
+private:
+ static const SkScalar kSpecularExponentMin;
+ static const SkScalar kSpecularExponentMax;
+
+ SkPoint3 fLocation;
+ SkPoint3 fTarget;
+ SkScalar fSpecularExponent;
+ SkScalar fCosOuterConeAngle;
+ SkScalar fCosInnerConeAngle;
+ SkScalar fConeScale;
+ SkPoint3 fS;
+
+ typedef SkImageFilterLight INHERITED;
+};
+
+// According to the spec, the specular term should be in the range [1, 128] :
+// http://www.w3.org/TR/SVG/filters.html#feSpecularLightingSpecularExponentAttribute
+const SkScalar SkSpotLight::kSpecularExponentMin = 1.0f;
+const SkScalar SkSpotLight::kSpecularExponentMax = 128.0f;
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkImageFilterLight::flattenLight(SkWriteBuffer& buffer) const {
+ // Write type first, then baseclass, then subclass.
+ buffer.writeInt(this->type());
+ writePoint3(fColor, buffer);
+ this->onFlattenLight(buffer);
+}
+
+/*static*/ SkImageFilterLight* SkImageFilterLight::UnflattenLight(SkReadBuffer& buffer) {
+ // Read type first.
+ const SkImageFilterLight::LightType type = (SkImageFilterLight::LightType)buffer.readInt();
+ switch (type) {
+ // Each of these constructors must first call SkLight's, so we'll read the baseclass
+ // then subclass, same order as flattenLight.
+ case SkImageFilterLight::kDistant_LightType:
+ return new SkDistantLight(buffer);
+ case SkImageFilterLight::kPoint_LightType:
+ return new SkPointLight(buffer);
+ case SkImageFilterLight::kSpot_LightType:
+ return new SkSpotLight(buffer);
+ default:
+ SkDEBUGFAIL("Unknown LightType.");
+ buffer.validate(false);
+ return nullptr;
+ }
+}
+///////////////////////////////////////////////////////////////////////////////
+
+SkLightingImageFilter::SkLightingImageFilter(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ sk_sp<SkImageFilter> input, const CropRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fLight(std::move(light))
+ , fSurfaceScale(surfaceScale / 255) {
+}
+
+SkLightingImageFilter::~SkLightingImageFilter() {}
+
+sk_sp<SkImageFilter> SkLightingImageFilter::MakeDistantLitDiffuse(const SkPoint3& direction,
+ SkColor lightColor,
+ SkScalar surfaceScale,
+ SkScalar kd,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect) {
+ sk_sp<SkImageFilterLight> light(new SkDistantLight(direction, lightColor));
+ return SkDiffuseLightingImageFilter::Make(std::move(light), surfaceScale, kd,
+ std::move(input), cropRect);
+}
+
+sk_sp<SkImageFilter> SkLightingImageFilter::MakePointLitDiffuse(const SkPoint3& location,
+ SkColor lightColor,
+ SkScalar surfaceScale,
+ SkScalar kd,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect) {
+ sk_sp<SkImageFilterLight> light(new SkPointLight(location, lightColor));
+ return SkDiffuseLightingImageFilter::Make(std::move(light), surfaceScale, kd,
+ std::move(input), cropRect);
+}
+
+sk_sp<SkImageFilter> SkLightingImageFilter::MakeSpotLitDiffuse(const SkPoint3& location,
+ const SkPoint3& target,
+ SkScalar specularExponent,
+ SkScalar cutoffAngle,
+ SkColor lightColor,
+ SkScalar surfaceScale,
+ SkScalar kd,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect) {
+ sk_sp<SkImageFilterLight> light(
+ new SkSpotLight(location, target, specularExponent, cutoffAngle, lightColor));
+ return SkDiffuseLightingImageFilter::Make(std::move(light), surfaceScale, kd,
+ std::move(input), cropRect);
+}
+
+sk_sp<SkImageFilter> SkLightingImageFilter::MakeDistantLitSpecular(const SkPoint3& direction,
+ SkColor lightColor,
+ SkScalar surfaceScale,
+ SkScalar ks,
+ SkScalar shine,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect) {
+ sk_sp<SkImageFilterLight> light(new SkDistantLight(direction, lightColor));
+ return SkSpecularLightingImageFilter::Make(std::move(light), surfaceScale, ks, shine,
+ std::move(input), cropRect);
+}
+
+sk_sp<SkImageFilter> SkLightingImageFilter::MakePointLitSpecular(const SkPoint3& location,
+ SkColor lightColor,
+ SkScalar surfaceScale,
+ SkScalar ks,
+ SkScalar shine,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect) {
+ sk_sp<SkImageFilterLight> light(new SkPointLight(location, lightColor));
+ return SkSpecularLightingImageFilter::Make(std::move(light), surfaceScale, ks, shine,
+ std::move(input), cropRect);
+}
+
+sk_sp<SkImageFilter> SkLightingImageFilter::MakeSpotLitSpecular(const SkPoint3& location,
+ const SkPoint3& target,
+ SkScalar specularExponent,
+ SkScalar cutoffAngle,
+ SkColor lightColor,
+ SkScalar surfaceScale,
+ SkScalar ks,
+ SkScalar shine,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect) {
+ sk_sp<SkImageFilterLight> light(
+ new SkSpotLight(location, target, specularExponent, cutoffAngle, lightColor));
+ return SkSpecularLightingImageFilter::Make(std::move(light), surfaceScale, ks, shine,
+ std::move(input), cropRect);
+}
+
+void SkLightingImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ fLight->flattenLight(buffer);
+ buffer.writeScalar(fSurfaceScale * 255);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImageFilter> SkDiffuseLightingImageFilter::Make(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ SkScalar kd,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect) {
+ if (!light) {
+ return nullptr;
+ }
+ if (!SkScalarIsFinite(surfaceScale) || !SkScalarIsFinite(kd)) {
+ return nullptr;
+ }
+ // According to the spec, kd can be any non-negative number :
+ // http://www.w3.org/TR/SVG/filters.html#feDiffuseLightingElement
+ if (kd < 0) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkDiffuseLightingImageFilter(std::move(light), surfaceScale,
+ kd, std::move(input), cropRect));
+}
+
+SkDiffuseLightingImageFilter::SkDiffuseLightingImageFilter(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ SkScalar kd,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect)
+ : INHERITED(std::move(light), surfaceScale, std::move(input), cropRect)
+ , fKD(kd) {
+}
+
+sk_sp<SkFlattenable> SkDiffuseLightingImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ sk_sp<SkImageFilterLight> light(SkImageFilterLight::UnflattenLight(buffer));
+ SkScalar surfaceScale = buffer.readScalar();
+ SkScalar kd = buffer.readScalar();
+ return Make(std::move(light), surfaceScale, kd, common.getInput(0), &common.cropRect());
+}
+
+void SkDiffuseLightingImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeScalar(fKD);
+}
+
+sk_sp<SkSpecialImage> SkDiffuseLightingImageFilter::onFilterImage(SkSpecialImage* source,
+ const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, source, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ const SkIRect inputBounds = SkIRect::MakeXYWH(inputOffset.x(), inputOffset.y(),
+ input->width(), input->height());
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, inputBounds, &bounds)) {
+ return nullptr;
+ }
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ bounds.offset(-inputOffset);
+
+#if SK_SUPPORT_GPU
+ if (source->isTextureBacked()) {
+ SkMatrix matrix(ctx.ctm());
+ matrix.postTranslate(SkIntToScalar(-offset->fX), SkIntToScalar(-offset->fY));
+
+ return this->filterImageGPU(source, input.get(), bounds, matrix, ctx.outputProperties());
+ }
+#endif
+
+ if (bounds.width() < 2 || bounds.height() < 2) {
+ return nullptr;
+ }
+
+ SkBitmap inputBM;
+
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if (inputBM.colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+
+ SkAutoLockPixels alp(inputBM);
+ if (!inputBM.getPixels()) {
+ return nullptr;
+ }
+
+ const SkImageInfo info = SkImageInfo::MakeN32Premul(bounds.width(), bounds.height());
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ SkAutoLockPixels dstLock(dst);
+
+ SkMatrix matrix(ctx.ctm());
+ matrix.postTranslate(SkIntToScalar(-inputOffset.x()), SkIntToScalar(-inputOffset.y()));
+
+ sk_sp<SkImageFilterLight> transformedLight(light()->transform(matrix));
+
+ DiffuseLightingType lightingType(fKD);
+ switch (transformedLight->type()) {
+ case SkImageFilterLight::kDistant_LightType:
+ lightBitmap<DiffuseLightingType, SkDistantLight>(lightingType,
+ transformedLight.get(),
+ inputBM,
+ &dst,
+ surfaceScale(),
+ bounds);
+ break;
+ case SkImageFilterLight::kPoint_LightType:
+ lightBitmap<DiffuseLightingType, SkPointLight>(lightingType,
+ transformedLight.get(),
+ inputBM,
+ &dst,
+ surfaceScale(),
+ bounds);
+ break;
+ case SkImageFilterLight::kSpot_LightType:
+ lightBitmap<DiffuseLightingType, SkSpotLight>(lightingType,
+ transformedLight.get(),
+ inputBM,
+ &dst,
+ surfaceScale(),
+ bounds);
+ break;
+ }
+
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(bounds.width(), bounds.height()),
+ dst);
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkDiffuseLightingImageFilter::toString(SkString* str) const {
+ str->appendf("SkDiffuseLightingImageFilter: (");
+ str->appendf("kD: %f\n", fKD);
+ str->append(")");
+}
+#endif
+
+#if SK_SUPPORT_GPU
+sk_sp<GrFragmentProcessor> SkDiffuseLightingImageFilter::makeFragmentProcessor(
+ GrTexture* texture,
+ const SkMatrix& matrix,
+ const SkIRect* srcBounds,
+ BoundaryMode boundaryMode) const {
+ SkScalar scale = SkScalarMul(this->surfaceScale(), SkIntToScalar(255));
+ return GrDiffuseLightingEffect::Make(texture, this->light(), scale, matrix, this->kd(),
+ boundaryMode, srcBounds);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImageFilter> SkSpecularLightingImageFilter::Make(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ SkScalar ks,
+ SkScalar shininess,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect) {
+ if (!light) {
+ return nullptr;
+ }
+ if (!SkScalarIsFinite(surfaceScale) || !SkScalarIsFinite(ks) || !SkScalarIsFinite(shininess)) {
+ return nullptr;
+ }
+ // According to the spec, ks can be any non-negative number :
+ // http://www.w3.org/TR/SVG/filters.html#feSpecularLightingElement
+ if (ks < 0) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkSpecularLightingImageFilter(std::move(light), surfaceScale,
+ ks, shininess,
+ std::move(input), cropRect));
+}
+
+SkSpecularLightingImageFilter::SkSpecularLightingImageFilter(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ SkScalar ks,
+ SkScalar shininess,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect)
+ : INHERITED(std::move(light), surfaceScale, std::move(input), cropRect)
+ , fKS(ks)
+ , fShininess(shininess) {
+}
+
+sk_sp<SkFlattenable> SkSpecularLightingImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ sk_sp<SkImageFilterLight> light(SkImageFilterLight::UnflattenLight(buffer));
+ SkScalar surfaceScale = buffer.readScalar();
+ SkScalar ks = buffer.readScalar();
+ SkScalar shine = buffer.readScalar();
+ return Make(std::move(light), surfaceScale, ks, shine, common.getInput(0),
+ &common.cropRect());
+}
+
+void SkSpecularLightingImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeScalar(fKS);
+ buffer.writeScalar(fShininess);
+}
+
+sk_sp<SkSpecialImage> SkSpecularLightingImageFilter::onFilterImage(SkSpecialImage* source,
+ const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, source, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ const SkIRect inputBounds = SkIRect::MakeXYWH(inputOffset.x(), inputOffset.y(),
+ input->width(), input->height());
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, inputBounds, &bounds)) {
+ return nullptr;
+ }
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ bounds.offset(-inputOffset);
+
+#if SK_SUPPORT_GPU
+ if (source->isTextureBacked()) {
+ SkMatrix matrix(ctx.ctm());
+ matrix.postTranslate(SkIntToScalar(-offset->fX), SkIntToScalar(-offset->fY));
+
+ return this->filterImageGPU(source, input.get(), bounds, matrix, ctx.outputProperties());
+ }
+#endif
+
+ if (bounds.width() < 2 || bounds.height() < 2) {
+ return nullptr;
+ }
+
+ SkBitmap inputBM;
+
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if (inputBM.colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+
+ SkAutoLockPixels alp(inputBM);
+ if (!inputBM.getPixels()) {
+ return nullptr;
+ }
+
+ const SkImageInfo info = SkImageInfo::MakeN32Premul(bounds.width(), bounds.height());
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ SkAutoLockPixels dstLock(dst);
+
+ SpecularLightingType lightingType(fKS, fShininess);
+
+ SkMatrix matrix(ctx.ctm());
+ matrix.postTranslate(SkIntToScalar(-inputOffset.x()), SkIntToScalar(-inputOffset.y()));
+
+ sk_sp<SkImageFilterLight> transformedLight(light()->transform(matrix));
+
+ switch (transformedLight->type()) {
+ case SkImageFilterLight::kDistant_LightType:
+ lightBitmap<SpecularLightingType, SkDistantLight>(lightingType,
+ transformedLight.get(),
+ inputBM,
+ &dst,
+ surfaceScale(),
+ bounds);
+ break;
+ case SkImageFilterLight::kPoint_LightType:
+ lightBitmap<SpecularLightingType, SkPointLight>(lightingType,
+ transformedLight.get(),
+ inputBM,
+ &dst,
+ surfaceScale(),
+ bounds);
+ break;
+ case SkImageFilterLight::kSpot_LightType:
+ lightBitmap<SpecularLightingType, SkSpotLight>(lightingType,
+ transformedLight.get(),
+ inputBM,
+ &dst,
+ surfaceScale(),
+ bounds);
+ break;
+ }
+
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(bounds.width(), bounds.height()), dst);
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkSpecularLightingImageFilter::toString(SkString* str) const {
+ str->appendf("SkSpecularLightingImageFilter: (");
+ str->appendf("kS: %f shininess: %f", fKS, fShininess);
+ str->append(")");
+}
+#endif
+
+#if SK_SUPPORT_GPU
+sk_sp<GrFragmentProcessor> SkSpecularLightingImageFilter::makeFragmentProcessor(
+ GrTexture* texture,
+ const SkMatrix& matrix,
+ const SkIRect* srcBounds,
+ BoundaryMode boundaryMode) const {
+ SkScalar scale = SkScalarMul(this->surfaceScale(), SkIntToScalar(255));
+ return GrSpecularLightingEffect::Make(texture, this->light(), scale, matrix, this->ks(),
+ this->shininess(), boundaryMode, srcBounds);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+namespace {
+SkPoint3 random_point3(SkRandom* random) {
+ return SkPoint3::Make(SkScalarToFloat(random->nextSScalar1()),
+ SkScalarToFloat(random->nextSScalar1()),
+ SkScalarToFloat(random->nextSScalar1()));
+}
+
+SkImageFilterLight* create_random_light(SkRandom* random) {
+ int type = random->nextULessThan(3);
+ switch (type) {
+ case 0: {
+ return new SkDistantLight(random_point3(random), random->nextU());
+ }
+ case 1: {
+ return new SkPointLight(random_point3(random), random->nextU());
+ }
+ case 2: {
+ return new SkSpotLight(random_point3(random), random_point3(random),
+ random->nextUScalar1(), random->nextUScalar1(), random->nextU());
+ }
+ default:
+ SkFAIL("Unexpected value.");
+ return nullptr;
+ }
+}
+
+SkString emitNormalFunc(BoundaryMode mode,
+ const char* pointToNormalName,
+ const char* sobelFuncName) {
+ SkString result;
+ switch (mode) {
+ case kTopLeft_BoundaryMode:
+ result.printf("\treturn %s(%s(0.0, 0.0, m[4], m[5], m[7], m[8], %g),\n"
+ "\t %s(0.0, 0.0, m[4], m[7], m[5], m[8], %g),\n"
+ "\t surfaceScale);\n",
+ pointToNormalName, sobelFuncName, gTwoThirds,
+ sobelFuncName, gTwoThirds);
+ break;
+ case kTop_BoundaryMode:
+ result.printf("\treturn %s(%s(0.0, 0.0, m[3], m[5], m[6], m[8], %g),\n"
+ "\t %s(0.0, 0.0, m[4], m[7], m[5], m[8], %g),\n"
+ "\t surfaceScale);\n",
+ pointToNormalName, sobelFuncName, gOneThird,
+ sobelFuncName, gOneHalf);
+ break;
+ case kTopRight_BoundaryMode:
+ result.printf("\treturn %s(%s( 0.0, 0.0, m[3], m[4], m[6], m[7], %g),\n"
+ "\t %s(m[3], m[6], m[4], m[7], 0.0, 0.0, %g),\n"
+ "\t surfaceScale);\n",
+ pointToNormalName, sobelFuncName, gTwoThirds,
+ sobelFuncName, gTwoThirds);
+ break;
+ case kLeft_BoundaryMode:
+ result.printf("\treturn %s(%s(m[1], m[2], m[4], m[5], m[7], m[8], %g),\n"
+ "\t %s( 0.0, 0.0, m[1], m[7], m[2], m[8], %g),\n"
+ "\t surfaceScale);\n",
+ pointToNormalName, sobelFuncName, gOneHalf,
+ sobelFuncName, gOneThird);
+ break;
+ case kInterior_BoundaryMode:
+ result.printf("\treturn %s(%s(m[0], m[2], m[3], m[5], m[6], m[8], %g),\n"
+ "\t %s(m[0], m[6], m[1], m[7], m[2], m[8], %g),\n"
+ "\t surfaceScale);\n",
+ pointToNormalName, sobelFuncName, gOneQuarter,
+ sobelFuncName, gOneQuarter);
+ break;
+ case kRight_BoundaryMode:
+ result.printf("\treturn %s(%s(m[0], m[1], m[3], m[4], m[6], m[7], %g),\n"
+ "\t %s(m[0], m[6], m[1], m[7], 0.0, 0.0, %g),\n"
+ "\t surfaceScale);\n",
+ pointToNormalName, sobelFuncName, gOneHalf,
+ sobelFuncName, gOneThird);
+ break;
+ case kBottomLeft_BoundaryMode:
+ result.printf("\treturn %s(%s(m[1], m[2], m[4], m[5], 0.0, 0.0, %g),\n"
+ "\t %s( 0.0, 0.0, m[1], m[4], m[2], m[5], %g),\n"
+ "\t surfaceScale);\n",
+ pointToNormalName, sobelFuncName, gTwoThirds,
+ sobelFuncName, gTwoThirds);
+ break;
+ case kBottom_BoundaryMode:
+ result.printf("\treturn %s(%s(m[0], m[2], m[3], m[5], 0.0, 0.0, %g),\n"
+ "\t %s(m[0], m[3], m[1], m[4], m[2], m[5], %g),\n"
+ "\t surfaceScale);\n",
+ pointToNormalName, sobelFuncName, gOneThird,
+ sobelFuncName, gOneHalf);
+ break;
+ case kBottomRight_BoundaryMode:
+ result.printf("\treturn %s(%s(m[0], m[1], m[3], m[4], 0.0, 0.0, %g),\n"
+ "\t %s(m[0], m[3], m[1], m[4], 0.0, 0.0, %g),\n"
+ "\t surfaceScale);\n",
+ pointToNormalName, sobelFuncName, gTwoThirds,
+ sobelFuncName, gTwoThirds);
+ break;
+ default:
+ SkASSERT(false);
+ break;
+ }
+ return result;
+}
+
+}
+
+class GrGLLightingEffect : public GrGLSLFragmentProcessor {
+public:
+ GrGLLightingEffect() : fLight(nullptr) { }
+ virtual ~GrGLLightingEffect() { delete fLight; }
+
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrGLSLCaps&, GrProcessorKeyBuilder* b);
+
+protected:
+ /**
+ * Subclasses of GrGLLightingEffect must call INHERITED::onSetData();
+ */
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+ virtual void emitLightFunc(GrGLSLUniformHandler*,
+ GrGLSLFPFragmentBuilder*,
+ SkString* funcName) = 0;
+
+private:
+ typedef GrGLSLFragmentProcessor INHERITED;
+
+ UniformHandle fImageIncrementUni;
+ UniformHandle fSurfaceScaleUni;
+ GrTextureDomain::GLDomain fDomain;
+ GrGLLight* fLight;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLDiffuseLightingEffect : public GrGLLightingEffect {
+public:
+ void emitLightFunc(GrGLSLUniformHandler*, GrGLSLFPFragmentBuilder*, SkString* funcName) override;
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+private:
+ typedef GrGLLightingEffect INHERITED;
+
+ UniformHandle fKDUni;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLSpecularLightingEffect : public GrGLLightingEffect {
+public:
+ void emitLightFunc(GrGLSLUniformHandler*, GrGLSLFPFragmentBuilder*, SkString* funcName) override;
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+private:
+ typedef GrGLLightingEffect INHERITED;
+
+ UniformHandle fKSUni;
+ UniformHandle fShininessUni;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+GrTextureDomain create_domain(GrTexture* texture, const SkIRect* srcBounds,
+ GrTextureDomain::Mode mode) {
+ if (srcBounds) {
+ SkRect texelDomain = GrTextureDomain::MakeTexelDomainForMode(texture, *srcBounds, mode);
+ return GrTextureDomain(texelDomain, mode);
+ } else {
+ return GrTextureDomain(SkRect::MakeEmpty(), GrTextureDomain::kIgnore_Mode);
+ }
+}
+
+};
+
+GrLightingEffect::GrLightingEffect(GrTexture* texture,
+ const SkImageFilterLight* light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds)
+ : INHERITED(texture, nullptr, GrCoordTransform::MakeDivByTextureWHMatrix(texture))
+ , fLight(light)
+ , fSurfaceScale(surfaceScale)
+ , fFilterMatrix(matrix)
+ , fBoundaryMode(boundaryMode)
+ , fDomain(create_domain(texture, srcBounds, GrTextureDomain::kDecal_Mode)) {
+ fLight->ref();
+ if (light->requiresFragmentPosition()) {
+ this->setWillReadFragmentPosition();
+ }
+}
+
+GrLightingEffect::~GrLightingEffect() {
+ fLight->unref();
+}
+
+bool GrLightingEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrLightingEffect& s = sBase.cast<GrLightingEffect>();
+ return fLight->isEqual(*s.fLight) &&
+ fSurfaceScale == s.fSurfaceScale &&
+ fBoundaryMode == s.fBoundaryMode;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrDiffuseLightingEffect::GrDiffuseLightingEffect(GrTexture* texture,
+ const SkImageFilterLight* light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ SkScalar kd,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds)
+ : INHERITED(texture, light, surfaceScale, matrix, boundaryMode, srcBounds), fKD(kd) {
+ this->initClassID<GrDiffuseLightingEffect>();
+}
+
+bool GrDiffuseLightingEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrDiffuseLightingEffect& s = sBase.cast<GrDiffuseLightingEffect>();
+ return INHERITED::onIsEqual(sBase) && this->kd() == s.kd();
+}
+
+void GrDiffuseLightingEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLDiffuseLightingEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrDiffuseLightingEffect::onCreateGLSLInstance() const {
+ return new GrGLDiffuseLightingEffect;
+}
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrDiffuseLightingEffect);
+
+sk_sp<GrFragmentProcessor> GrDiffuseLightingEffect::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx :
+ GrProcessorUnitTest::kAlphaTextureIdx;
+ GrTexture* tex = d->fTextures[texIdx];
+ SkScalar surfaceScale = d->fRandom->nextSScalar1();
+ SkScalar kd = d->fRandom->nextUScalar1();
+ SkAutoTUnref<SkImageFilterLight> light(create_random_light(d->fRandom));
+ SkMatrix matrix;
+ for (int i = 0; i < 9; i++) {
+ matrix[i] = d->fRandom->nextUScalar1();
+ }
+ SkIRect srcBounds = SkIRect::MakeXYWH(d->fRandom->nextRangeU(0, tex->width()),
+ d->fRandom->nextRangeU(0, tex->height()),
+ d->fRandom->nextRangeU(0, tex->width()),
+ d->fRandom->nextRangeU(0, tex->height()));
+ BoundaryMode mode = static_cast<BoundaryMode>(d->fRandom->nextU() % kBoundaryModeCount);
+ return GrDiffuseLightingEffect::Make(tex, light, surfaceScale, matrix, kd, mode, &srcBounds);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGLLightingEffect::emitCode(EmitArgs& args) {
+ const GrLightingEffect& le = args.fFp.cast<GrLightingEffect>();
+ if (!fLight) {
+ fLight = le.light()->createGLLight();
+ }
+
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ fImageIncrementUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType, kDefault_GrSLPrecision,
+ "ImageIncrement");
+ fSurfaceScaleUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType, kDefault_GrSLPrecision,
+ "SurfaceScale");
+ fLight->emitLightColorUniform(uniformHandler);
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString lightFunc;
+ this->emitLightFunc(uniformHandler, fragBuilder, &lightFunc);
+ static const GrGLSLShaderVar gSobelArgs[] = {
+ GrGLSLShaderVar("a", kFloat_GrSLType),
+ GrGLSLShaderVar("b", kFloat_GrSLType),
+ GrGLSLShaderVar("c", kFloat_GrSLType),
+ GrGLSLShaderVar("d", kFloat_GrSLType),
+ GrGLSLShaderVar("e", kFloat_GrSLType),
+ GrGLSLShaderVar("f", kFloat_GrSLType),
+ GrGLSLShaderVar("scale", kFloat_GrSLType),
+ };
+ SkString sobelFuncName;
+ SkString coords2D = fragBuilder->ensureCoords2D(args.fTransformedCoords[0]);
+
+ fragBuilder->emitFunction(kFloat_GrSLType,
+ "sobel",
+ SK_ARRAY_COUNT(gSobelArgs),
+ gSobelArgs,
+ "\treturn (-a + b - 2.0 * c + 2.0 * d -e + f) * scale;\n",
+ &sobelFuncName);
+ static const GrGLSLShaderVar gPointToNormalArgs[] = {
+ GrGLSLShaderVar("x", kFloat_GrSLType),
+ GrGLSLShaderVar("y", kFloat_GrSLType),
+ GrGLSLShaderVar("scale", kFloat_GrSLType),
+ };
+ SkString pointToNormalName;
+ fragBuilder->emitFunction(kVec3f_GrSLType,
+ "pointToNormal",
+ SK_ARRAY_COUNT(gPointToNormalArgs),
+ gPointToNormalArgs,
+ "\treturn normalize(vec3(-x * scale, -y * scale, 1));\n",
+ &pointToNormalName);
+
+ static const GrGLSLShaderVar gInteriorNormalArgs[] = {
+ GrGLSLShaderVar("m", kFloat_GrSLType, 9),
+ GrGLSLShaderVar("surfaceScale", kFloat_GrSLType),
+ };
+ SkString normalBody = emitNormalFunc(le.boundaryMode(),
+ pointToNormalName.c_str(),
+ sobelFuncName.c_str());
+ SkString normalName;
+ fragBuilder->emitFunction(kVec3f_GrSLType,
+ "normal",
+ SK_ARRAY_COUNT(gInteriorNormalArgs),
+ gInteriorNormalArgs,
+ normalBody.c_str(),
+ &normalName);
+
+ fragBuilder->codeAppendf("\t\tvec2 coord = %s;\n", coords2D.c_str());
+ fragBuilder->codeAppend("\t\tfloat m[9];\n");
+
+ const char* imgInc = uniformHandler->getUniformCStr(fImageIncrementUni);
+ const char* surfScale = uniformHandler->getUniformCStr(fSurfaceScaleUni);
+
+ int index = 0;
+ for (int dy = 1; dy >= -1; dy--) {
+ for (int dx = -1; dx <= 1; dx++) {
+ SkString texCoords;
+ texCoords.appendf("coord + vec2(%d, %d) * %s", dx, dy, imgInc);
+ SkString temp;
+ temp.appendf("temp%d", index);
+ fragBuilder->codeAppendf("vec4 %s;", temp.c_str());
+ fDomain.sampleTexture(fragBuilder,
+ args.fUniformHandler,
+ args.fGLSLCaps,
+ le.domain(),
+ temp.c_str(),
+ texCoords,
+ args.fTexSamplers[0]);
+ fragBuilder->codeAppendf("m[%d] = %s.a;", index, temp.c_str());
+ index++;
+ }
+ }
+ fragBuilder->codeAppend("\t\tvec3 surfaceToLight = ");
+ SkString arg;
+ arg.appendf("%s * m[4]", surfScale);
+ fLight->emitSurfaceToLight(uniformHandler, fragBuilder, arg.c_str());
+ fragBuilder->codeAppend(";\n");
+ fragBuilder->codeAppendf("\t\t%s = %s(%s(m, %s), surfaceToLight, ",
+ args.fOutputColor, lightFunc.c_str(), normalName.c_str(), surfScale);
+ fLight->emitLightColor(uniformHandler, fragBuilder, "surfaceToLight");
+ fragBuilder->codeAppend(");\n");
+ SkString modulate;
+ GrGLSLMulVarBy4f(&modulate, args.fOutputColor, args.fInputColor);
+ fragBuilder->codeAppend(modulate.c_str());
+}
+
+void GrGLLightingEffect::GenKey(const GrProcessor& proc,
+ const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) {
+ const GrLightingEffect& lighting = proc.cast<GrLightingEffect>();
+ b->add32(lighting.boundaryMode() << 2 | lighting.light()->type());
+ b->add32(GrTextureDomain::GLDomain::DomainKey(lighting.domain()));
+}
+
+void GrGLLightingEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& proc) {
+ const GrLightingEffect& lighting = proc.cast<GrLightingEffect>();
+ if (!fLight) {
+ fLight = lighting.light()->createGLLight();
+ }
+
+ GrTexture* texture = lighting.texture(0);
+ float ySign = texture->origin() == kTopLeft_GrSurfaceOrigin ? -1.0f : 1.0f;
+ pdman.set2f(fImageIncrementUni, 1.0f / texture->width(), ySign / texture->height());
+ pdman.set1f(fSurfaceScaleUni, lighting.surfaceScale());
+ SkAutoTUnref<SkImageFilterLight> transformedLight(
+ lighting.light()->transform(lighting.filterMatrix()));
+ fDomain.setData(pdman, lighting.domain(), texture->origin());
+ fLight->setData(pdman, transformedLight);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGLDiffuseLightingEffect::emitLightFunc(GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ SkString* funcName) {
+ const char* kd;
+ fKDUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType, kDefault_GrSLPrecision,
+ "KD", &kd);
+
+ static const GrGLSLShaderVar gLightArgs[] = {
+ GrGLSLShaderVar("normal", kVec3f_GrSLType),
+ GrGLSLShaderVar("surfaceToLight", kVec3f_GrSLType),
+ GrGLSLShaderVar("lightColor", kVec3f_GrSLType)
+ };
+ SkString lightBody;
+ lightBody.appendf("\tfloat colorScale = %s * dot(normal, surfaceToLight);\n", kd);
+ lightBody.appendf("\treturn vec4(lightColor * clamp(colorScale, 0.0, 1.0), 1.0);\n");
+ fragBuilder->emitFunction(kVec4f_GrSLType,
+ "light",
+ SK_ARRAY_COUNT(gLightArgs),
+ gLightArgs,
+ lightBody.c_str(),
+ funcName);
+}
+
+void GrGLDiffuseLightingEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& proc) {
+ INHERITED::onSetData(pdman, proc);
+ const GrDiffuseLightingEffect& diffuse = proc.cast<GrDiffuseLightingEffect>();
+ pdman.set1f(fKDUni, diffuse.kd());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrSpecularLightingEffect::GrSpecularLightingEffect(GrTexture* texture,
+ const SkImageFilterLight* light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ SkScalar ks,
+ SkScalar shininess,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds)
+ : INHERITED(texture, light, surfaceScale, matrix, boundaryMode, srcBounds)
+ , fKS(ks)
+ , fShininess(shininess) {
+ this->initClassID<GrSpecularLightingEffect>();
+}
+
+bool GrSpecularLightingEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrSpecularLightingEffect& s = sBase.cast<GrSpecularLightingEffect>();
+ return INHERITED::onIsEqual(sBase) &&
+ this->ks() == s.ks() &&
+ this->shininess() == s.shininess();
+}
+
+void GrSpecularLightingEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLSpecularLightingEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrSpecularLightingEffect::onCreateGLSLInstance() const {
+ return new GrGLSpecularLightingEffect;
+}
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrSpecularLightingEffect);
+
+sk_sp<GrFragmentProcessor> GrSpecularLightingEffect::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx :
+ GrProcessorUnitTest::kAlphaTextureIdx;
+ GrTexture* tex = d->fTextures[texIdx];
+ SkScalar surfaceScale = d->fRandom->nextSScalar1();
+ SkScalar ks = d->fRandom->nextUScalar1();
+ SkScalar shininess = d->fRandom->nextUScalar1();
+ SkAutoTUnref<SkImageFilterLight> light(create_random_light(d->fRandom));
+ SkMatrix matrix;
+ for (int i = 0; i < 9; i++) {
+ matrix[i] = d->fRandom->nextUScalar1();
+ }
+ BoundaryMode mode = static_cast<BoundaryMode>(d->fRandom->nextU() % kBoundaryModeCount);
+ SkIRect srcBounds = SkIRect::MakeXYWH(d->fRandom->nextRangeU(0, tex->width()),
+ d->fRandom->nextRangeU(0, tex->height()),
+ d->fRandom->nextRangeU(0, tex->width()),
+ d->fRandom->nextRangeU(0, tex->height()));
+ return GrSpecularLightingEffect::Make(d->fTextures[GrProcessorUnitTest::kAlphaTextureIdx],
+ light, surfaceScale, matrix, ks, shininess, mode,
+ &srcBounds);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGLSpecularLightingEffect::emitLightFunc(GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ SkString* funcName) {
+ const char* ks;
+ const char* shininess;
+
+ fKSUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType, kDefault_GrSLPrecision, "KS", &ks);
+ fShininessUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType,
+ kDefault_GrSLPrecision,
+ "Shininess",
+ &shininess);
+
+ static const GrGLSLShaderVar gLightArgs[] = {
+ GrGLSLShaderVar("normal", kVec3f_GrSLType),
+ GrGLSLShaderVar("surfaceToLight", kVec3f_GrSLType),
+ GrGLSLShaderVar("lightColor", kVec3f_GrSLType)
+ };
+ SkString lightBody;
+ lightBody.appendf("\tvec3 halfDir = vec3(normalize(surfaceToLight + vec3(0, 0, 1)));\n");
+ lightBody.appendf("\tfloat colorScale = %s * pow(dot(normal, halfDir), %s);\n", ks, shininess);
+ lightBody.appendf("\tvec3 color = lightColor * clamp(colorScale, 0.0, 1.0);\n");
+ lightBody.appendf("\treturn vec4(color, max(max(color.r, color.g), color.b));\n");
+ fragBuilder->emitFunction(kVec4f_GrSLType,
+ "light",
+ SK_ARRAY_COUNT(gLightArgs),
+ gLightArgs,
+ lightBody.c_str(),
+ funcName);
+}
+
+void GrGLSpecularLightingEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& effect) {
+ INHERITED::onSetData(pdman, effect);
+ const GrSpecularLightingEffect& spec = effect.cast<GrSpecularLightingEffect>();
+ pdman.set1f(fKSUni, spec.ks());
+ pdman.set1f(fShininessUni, spec.shininess());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+void GrGLLight::emitLightColorUniform(GrGLSLUniformHandler* uniformHandler) {
+ fColorUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec3f_GrSLType, kDefault_GrSLPrecision,
+ "LightColor");
+}
+
+void GrGLLight::emitLightColor(GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ const char *surfaceToLight) {
+ fragBuilder->codeAppend(uniformHandler->getUniformCStr(this->lightColorUni()));
+}
+
+void GrGLLight::setData(const GrGLSLProgramDataManager& pdman,
+ const SkImageFilterLight* light) const {
+ setUniformPoint3(pdman, fColorUni,
+ light->color().makeScale(SkScalarInvert(SkIntToScalar(255))));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGLDistantLight::setData(const GrGLSLProgramDataManager& pdman,
+ const SkImageFilterLight* light) const {
+ INHERITED::setData(pdman, light);
+ SkASSERT(light->type() == SkImageFilterLight::kDistant_LightType);
+ const SkDistantLight* distantLight = static_cast<const SkDistantLight*>(light);
+ setUniformNormal3(pdman, fDirectionUni, distantLight->direction());
+}
+
+void GrGLDistantLight::emitSurfaceToLight(GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ const char* z) {
+ const char* dir;
+ fDirectionUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec3f_GrSLType, kDefault_GrSLPrecision,
+ "LightDirection", &dir);
+ fragBuilder->codeAppend(dir);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGLPointLight::setData(const GrGLSLProgramDataManager& pdman,
+ const SkImageFilterLight* light) const {
+ INHERITED::setData(pdman, light);
+ SkASSERT(light->type() == SkImageFilterLight::kPoint_LightType);
+ const SkPointLight* pointLight = static_cast<const SkPointLight*>(light);
+ setUniformPoint3(pdman, fLocationUni, pointLight->location());
+}
+
+void GrGLPointLight::emitSurfaceToLight(GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ const char* z) {
+ const char* loc;
+ fLocationUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec3f_GrSLType, kDefault_GrSLPrecision,
+ "LightLocation", &loc);
+ fragBuilder->codeAppendf("normalize(%s - vec3(%s.xy, %s))",
+ loc, fragBuilder->fragmentPosition(), z);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGLSpotLight::setData(const GrGLSLProgramDataManager& pdman,
+ const SkImageFilterLight* light) const {
+ INHERITED::setData(pdman, light);
+ SkASSERT(light->type() == SkImageFilterLight::kSpot_LightType);
+ const SkSpotLight* spotLight = static_cast<const SkSpotLight *>(light);
+ setUniformPoint3(pdman, fLocationUni, spotLight->location());
+ pdman.set1f(fExponentUni, spotLight->specularExponent());
+ pdman.set1f(fCosInnerConeAngleUni, spotLight->cosInnerConeAngle());
+ pdman.set1f(fCosOuterConeAngleUni, spotLight->cosOuterConeAngle());
+ pdman.set1f(fConeScaleUni, spotLight->coneScale());
+ setUniformNormal3(pdman, fSUni, spotLight->s());
+}
+
+void GrGLSpotLight::emitSurfaceToLight(GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ const char* z) {
+ const char* location;
+ fLocationUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec3f_GrSLType, kDefault_GrSLPrecision,
+ "LightLocation", &location);
+
+ fragBuilder->codeAppendf("normalize(%s - vec3(%s.xy, %s))",
+ location, fragBuilder->fragmentPosition(), z);
+}
+
+void GrGLSpotLight::emitLightColor(GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ const char *surfaceToLight) {
+
+ const char* color = uniformHandler->getUniformCStr(this->lightColorUni()); // created by parent class.
+
+ const char* exponent;
+ const char* cosInner;
+ const char* cosOuter;
+ const char* coneScale;
+ const char* s;
+ fExponentUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType, kDefault_GrSLPrecision,
+ "Exponent", &exponent);
+ fCosInnerConeAngleUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType, kDefault_GrSLPrecision,
+ "CosInnerConeAngle", &cosInner);
+ fCosOuterConeAngleUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType, kDefault_GrSLPrecision,
+ "CosOuterConeAngle", &cosOuter);
+ fConeScaleUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType, kDefault_GrSLPrecision,
+ "ConeScale", &coneScale);
+ fSUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec3f_GrSLType, kDefault_GrSLPrecision, "S", &s);
+
+ static const GrGLSLShaderVar gLightColorArgs[] = {
+ GrGLSLShaderVar("surfaceToLight", kVec3f_GrSLType)
+ };
+ SkString lightColorBody;
+ lightColorBody.appendf("\tfloat cosAngle = -dot(surfaceToLight, %s);\n", s);
+ lightColorBody.appendf("\tif (cosAngle < %s) {\n", cosOuter);
+ lightColorBody.appendf("\t\treturn vec3(0);\n");
+ lightColorBody.appendf("\t}\n");
+ lightColorBody.appendf("\tfloat scale = pow(cosAngle, %s);\n", exponent);
+ lightColorBody.appendf("\tif (cosAngle < %s) {\n", cosInner);
+ lightColorBody.appendf("\t\treturn %s * scale * (cosAngle - %s) * %s;\n",
+ color, cosOuter, coneScale);
+ lightColorBody.appendf("\t}\n");
+ lightColorBody.appendf("\treturn %s;\n", color);
+ fragBuilder->emitFunction(kVec3f_GrSLType,
+ "lightColor",
+ SK_ARRAY_COUNT(gLightColorArgs),
+ gLightColorArgs,
+ lightColorBody.c_str(),
+ &fLightColorFunc);
+
+ fragBuilder->codeAppendf("%s(%s)", fLightColorFunc.c_str(), surfaceToLight);
+}
+
+#endif
+
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkLightingImageFilter)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkDiffuseLightingImageFilter)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkSpecularLightingImageFilter)
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END
diff --git a/gfx/skia/skia/src/effects/SkLumaColorFilter.cpp b/gfx/skia/skia/src/effects/SkLumaColorFilter.cpp
new file mode 100644
index 000000000..ec94eca76
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkLumaColorFilter.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkLumaColorFilter.h"
+
+#include "SkColorPriv.h"
+#include "SkString.h"
+
+#if SK_SUPPORT_GPU
+#include "GrContext.h"
+#include "GrInvariantOutput.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#endif
+
+void SkLumaColorFilter::filterSpan(const SkPMColor src[], int count,
+ SkPMColor dst[]) const {
+ for (int i = 0; i < count; ++i) {
+ SkPMColor c = src[i];
+
+ /*
+ * While LuminanceToAlpha is defined to operate on un-premultiplied
+ * inputs, due to the final alpha scaling it can be computed based on
+ * premultipled components:
+ *
+ * LumA = (k1 * r / a + k2 * g / a + k3 * b / a) * a
+ * LumA = (k1 * r + k2 * g + k3 * b)
+ */
+ unsigned luma = SkComputeLuminance(SkGetPackedR32(c),
+ SkGetPackedG32(c),
+ SkGetPackedB32(c));
+ dst[i] = SkPackARGB32(luma, 0, 0, 0);
+ }
+}
+
+sk_sp<SkColorFilter> SkLumaColorFilter::Make() {
+ return sk_sp<SkColorFilter>(new SkLumaColorFilter);
+}
+
+SkLumaColorFilter::SkLumaColorFilter() : INHERITED() {}
+
+sk_sp<SkFlattenable> SkLumaColorFilter::CreateProc(SkReadBuffer&) {
+ return Make();
+}
+
+void SkLumaColorFilter::flatten(SkWriteBuffer&) const {}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkLumaColorFilter::toString(SkString* str) const {
+ str->append("SkLumaColorFilter ");
+}
+#endif
+
+#if SK_SUPPORT_GPU
+class LumaColorFilterEffect : public GrFragmentProcessor {
+public:
+ static sk_sp<GrFragmentProcessor> Make() {
+ return sk_sp<GrFragmentProcessor>(new LumaColorFilterEffect);
+ }
+
+ const char* name() const override { return "Luminance-to-Alpha"; }
+
+ class GLSLProcessor : public GrGLSLFragmentProcessor {
+ public:
+ static void GenKey(const GrProcessor&, const GrGLSLCaps&, GrProcessorKeyBuilder*) {}
+
+ void emitCode(EmitArgs& args) override {
+ if (nullptr == args.fInputColor) {
+ args.fInputColor = "vec4(1)";
+ }
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ fragBuilder->codeAppendf("\tfloat luma = dot(vec3(%f, %f, %f), %s.rgb);\n",
+ SK_ITU_BT709_LUM_COEFF_R,
+ SK_ITU_BT709_LUM_COEFF_G,
+ SK_ITU_BT709_LUM_COEFF_B,
+ args.fInputColor);
+ fragBuilder->codeAppendf("\t%s = vec4(0, 0, 0, luma);\n",
+ args.fOutputColor);
+
+ }
+
+ private:
+ typedef GrGLSLFragmentProcessor INHERITED;
+ };
+
+private:
+ LumaColorFilterEffect() {
+ this->initClassID<LumaColorFilterEffect>();
+ }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override {
+ return new GLSLProcessor;
+ }
+
+ virtual void onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const override {
+ GLSLProcessor::GenKey(*this, caps, b);
+ }
+
+ bool onIsEqual(const GrFragmentProcessor&) const override { return true; }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ // The output is always black. The alpha value for the color passed in is arbitrary.
+ inout->setToOther(kRGB_GrColorComponentFlags, GrColorPackRGBA(0, 0, 0, 0),
+ GrInvariantOutput::kWill_ReadInput);
+ }
+};
+
+sk_sp<GrFragmentProcessor> SkLumaColorFilter::asFragmentProcessor(GrContext*) const {
+ return LumaColorFilterEffect::Make();
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkMagnifierImageFilter.cpp b/gfx/skia/skia/src/effects/SkMagnifierImageFilter.cpp
new file mode 100644
index 000000000..c546730e8
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkMagnifierImageFilter.cpp
@@ -0,0 +1,423 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMagnifierImageFilter.h"
+
+#include "SkBitmap.h"
+#include "SkColorPriv.h"
+#include "SkReadBuffer.h"
+#include "SkSpecialImage.h"
+#include "SkWriteBuffer.h"
+#include "SkValidationUtils.h"
+
+////////////////////////////////////////////////////////////////////////////////
+#if SK_SUPPORT_GPU
+#include "GrContext.h"
+#include "GrDrawContext.h"
+#include "GrInvariantOutput.h"
+#include "effects/GrSingleTextureEffect.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+
+class GrMagnifierEffect : public GrSingleTextureEffect {
+
+public:
+ static sk_sp<GrFragmentProcessor> Make(GrTexture* texture,
+ const SkRect& bounds,
+ float xOffset,
+ float yOffset,
+ float xInvZoom,
+ float yInvZoom,
+ float xInvInset,
+ float yInvInset) {
+ return sk_sp<GrFragmentProcessor>(new GrMagnifierEffect(texture, bounds,
+ xOffset, yOffset,
+ xInvZoom, yInvZoom,
+ xInvInset, yInvInset));
+ }
+
+ ~GrMagnifierEffect() override {}
+
+ const char* name() const override { return "Magnifier"; }
+
+ const SkRect& bounds() const { return fBounds; } // Bounds of source image.
+ // Offset to apply to zoomed pixels, (srcRect position / texture size).
+ float xOffset() const { return fXOffset; }
+ float yOffset() const { return fYOffset; }
+
+ // Scale to apply to zoomed pixels (srcRect size / bounds size).
+ float xInvZoom() const { return fXInvZoom; }
+ float yInvZoom() const { return fYInvZoom; }
+
+ // 1/radius over which to transition from unzoomed to zoomed pixels (bounds size / inset).
+ float xInvInset() const { return fXInvInset; }
+ float yInvInset() const { return fYInvInset; }
+
+private:
+ GrMagnifierEffect(GrTexture* texture,
+ const SkRect& bounds,
+ float xOffset,
+ float yOffset,
+ float xInvZoom,
+ float yInvZoom,
+ float xInvInset,
+ float yInvInset)
+ : INHERITED(texture, nullptr, GrCoordTransform::MakeDivByTextureWHMatrix(texture))
+ , fBounds(bounds)
+ , fXOffset(xOffset)
+ , fYOffset(yOffset)
+ , fXInvZoom(xInvZoom)
+ , fYInvZoom(yInvZoom)
+ , fXInvInset(xInvInset)
+ , fYInvInset(yInvInset) {
+ this->initClassID<GrMagnifierEffect>();
+ }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ SkRect fBounds;
+ float fXOffset;
+ float fYOffset;
+ float fXInvZoom;
+ float fYInvZoom;
+ float fXInvInset;
+ float fYInvInset;
+
+ typedef GrSingleTextureEffect INHERITED;
+};
+
+// For brevity
+typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;
+
+class GrGLMagnifierEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+private:
+ UniformHandle fOffsetVar;
+ UniformHandle fInvZoomVar;
+ UniformHandle fInvInsetVar;
+ UniformHandle fBoundsVar;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GrGLMagnifierEffect::emitCode(EmitArgs& args) {
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ fOffsetVar = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType, kDefault_GrSLPrecision,
+ "Offset");
+ fInvZoomVar = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType, kDefault_GrSLPrecision,
+ "InvZoom");
+ fInvInsetVar = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType, kDefault_GrSLPrecision,
+ "InvInset");
+ fBoundsVar = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType, kDefault_GrSLPrecision,
+ "Bounds");
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString coords2D = fragBuilder->ensureCoords2D(args.fTransformedCoords[0]);
+ fragBuilder->codeAppendf("\t\tvec2 coord = %s;\n", coords2D.c_str());
+ fragBuilder->codeAppendf("\t\tvec2 zoom_coord = %s + %s * %s;\n",
+ uniformHandler->getUniformCStr(fOffsetVar),
+ coords2D.c_str(),
+ uniformHandler->getUniformCStr(fInvZoomVar));
+ const char* bounds = uniformHandler->getUniformCStr(fBoundsVar);
+ fragBuilder->codeAppendf("\t\tvec2 delta = (coord - %s.xy) * %s.zw;\n", bounds, bounds);
+ fragBuilder->codeAppendf("\t\tdelta = min(delta, vec2(1.0, 1.0) - delta);\n");
+ fragBuilder->codeAppendf("\t\tdelta = delta * %s;\n",
+ uniformHandler->getUniformCStr(fInvInsetVar));
+
+ fragBuilder->codeAppend("\t\tfloat weight = 0.0;\n");
+ fragBuilder->codeAppend("\t\tif (delta.s < 2.0 && delta.t < 2.0) {\n");
+ fragBuilder->codeAppend("\t\t\tdelta = vec2(2.0, 2.0) - delta;\n");
+ fragBuilder->codeAppend("\t\t\tfloat dist = length(delta);\n");
+ fragBuilder->codeAppend("\t\t\tdist = max(2.0 - dist, 0.0);\n");
+ fragBuilder->codeAppend("\t\t\tweight = min(dist * dist, 1.0);\n");
+ fragBuilder->codeAppend("\t\t} else {\n");
+ fragBuilder->codeAppend("\t\t\tvec2 delta_squared = delta * delta;\n");
+ fragBuilder->codeAppend("\t\t\tweight = min(min(delta_squared.x, delta_squared.y), 1.0);\n");
+ fragBuilder->codeAppend("\t\t}\n");
+
+ fragBuilder->codeAppend("\t\tvec2 mix_coord = mix(coord, zoom_coord, weight);\n");
+ fragBuilder->codeAppend("\t\tvec4 output_color = ");
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], "mix_coord");
+ fragBuilder->codeAppend(";\n");
+
+ fragBuilder->codeAppendf("\t\t%s = output_color;", args.fOutputColor);
+ SkString modulate;
+ GrGLSLMulVarBy4f(&modulate, args.fOutputColor, args.fInputColor);
+ fragBuilder->codeAppend(modulate.c_str());
+}
+
+void GrGLMagnifierEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& effect) {
+ const GrMagnifierEffect& zoom = effect.cast<GrMagnifierEffect>();
+ pdman.set2f(fOffsetVar, zoom.xOffset(), zoom.yOffset());
+ pdman.set2f(fInvZoomVar, zoom.xInvZoom(), zoom.yInvZoom());
+ pdman.set2f(fInvInsetVar, zoom.xInvInset(), zoom.yInvInset());
+ pdman.set4f(fBoundsVar, zoom.bounds().x(), zoom.bounds().y(),
+ zoom.bounds().width(), zoom.bounds().height());
+}
+
+/////////////////////////////////////////////////////////////////////
+
+void GrMagnifierEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLMagnifierEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrMagnifierEffect::onCreateGLSLInstance() const {
+ return new GrGLMagnifierEffect;
+}
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrMagnifierEffect);
+
+sk_sp<GrFragmentProcessor> GrMagnifierEffect::TestCreate(GrProcessorTestData* d) {
+ GrTexture* texture = d->fTextures[0];
+ const int kMaxWidth = 200;
+ const int kMaxHeight = 200;
+ const int kMaxInset = 20;
+ uint32_t width = d->fRandom->nextULessThan(kMaxWidth);
+ uint32_t height = d->fRandom->nextULessThan(kMaxHeight);
+ uint32_t x = d->fRandom->nextULessThan(kMaxWidth - width);
+ uint32_t y = d->fRandom->nextULessThan(kMaxHeight - height);
+ uint32_t inset = d->fRandom->nextULessThan(kMaxInset);
+
+ sk_sp<GrFragmentProcessor> effect(GrMagnifierEffect::Make(
+ texture,
+ SkRect::MakeWH(SkIntToScalar(kMaxWidth), SkIntToScalar(kMaxHeight)),
+ (float) width / texture->width(),
+ (float) height / texture->height(),
+ texture->width() / (float) x,
+ texture->height() / (float) y,
+ (float) inset / texture->width(),
+ (float) inset / texture->height()));
+ SkASSERT(effect);
+ return effect;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool GrMagnifierEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrMagnifierEffect& s = sBase.cast<GrMagnifierEffect>();
+ return (this->fBounds == s.fBounds &&
+ this->fXOffset == s.fXOffset &&
+ this->fYOffset == s.fYOffset &&
+ this->fXInvZoom == s.fXInvZoom &&
+ this->fYInvZoom == s.fYInvZoom &&
+ this->fXInvInset == s.fXInvInset &&
+ this->fYInvInset == s.fYInvInset);
+}
+
+void GrMagnifierEffect::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ this->updateInvariantOutputForModulation(inout);
+}
+
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImageFilter> SkMagnifierImageFilter::Make(const SkRect& srcRect, SkScalar inset,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect) {
+
+ if (!SkScalarIsFinite(inset) || !SkIsValidRect(srcRect)) {
+ return nullptr;
+ }
+ // Negative numbers in src rect are not supported
+ if (srcRect.fLeft < 0 || srcRect.fTop < 0) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkMagnifierImageFilter(srcRect, inset,
+ std::move(input),
+ cropRect));
+}
+
+
+SkMagnifierImageFilter::SkMagnifierImageFilter(const SkRect& srcRect,
+ SkScalar inset,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fSrcRect(srcRect)
+ , fInset(inset) {
+ SkASSERT(srcRect.x() >= 0 && srcRect.y() >= 0 && inset >= 0);
+}
+
+sk_sp<SkFlattenable> SkMagnifierImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkRect src;
+ buffer.readRect(&src);
+ return Make(src, buffer.readScalar(), common.getInput(0), &common.cropRect());
+}
+
+void SkMagnifierImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeRect(fSrcRect);
+ buffer.writeScalar(fInset);
+}
+
+sk_sp<SkSpecialImage> SkMagnifierImageFilter::onFilterImage(SkSpecialImage* source,
+ const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, source, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ const SkIRect inputBounds = SkIRect::MakeXYWH(inputOffset.x(), inputOffset.y(),
+ input->width(), input->height());
+
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, inputBounds, &bounds)) {
+ return nullptr;
+ }
+
+ SkScalar invInset = fInset > 0 ? SkScalarInvert(fInset) : SK_Scalar1;
+
+ SkScalar invXZoom = fSrcRect.width() / bounds.width();
+ SkScalar invYZoom = fSrcRect.height() / bounds.height();
+
+
+#if SK_SUPPORT_GPU
+ if (source->isTextureBacked()) {
+ GrContext* context = source->getContext();
+
+ sk_sp<GrTexture> inputTexture(input->asTextureRef(context));
+ SkASSERT(inputTexture);
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ bounds.offset(-inputOffset);
+
+ SkScalar yOffset = inputTexture->origin() == kTopLeft_GrSurfaceOrigin
+ ? fSrcRect.y()
+ : inputTexture->height() -
+ fSrcRect.height() * inputTexture->height() / bounds.height() - fSrcRect.y();
+ int boundsY = inputTexture->origin() == kTopLeft_GrSurfaceOrigin
+ ? bounds.y()
+ : inputTexture->height() - bounds.height();
+ SkRect effectBounds = SkRect::MakeXYWH(
+ SkIntToScalar(bounds.x()) / inputTexture->width(),
+ SkIntToScalar(boundsY) / inputTexture->height(),
+ SkIntToScalar(inputTexture->width()) / bounds.width(),
+ SkIntToScalar(inputTexture->height()) / bounds.height());
+ // SRGBTODO: Handle sRGB here
+ sk_sp<GrFragmentProcessor> fp(GrMagnifierEffect::Make(
+ inputTexture.get(),
+ effectBounds,
+ fSrcRect.x() / inputTexture->width(),
+ yOffset / inputTexture->height(),
+ invXZoom,
+ invYZoom,
+ bounds.width() * invInset,
+ bounds.height() * invInset));
+ if (!fp) {
+ return nullptr;
+ }
+
+ return DrawWithFP(context, std::move(fp), bounds, ctx.outputProperties());
+ }
+#endif
+
+ SkBitmap inputBM;
+
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if ((inputBM.colorType() != kN32_SkColorType) ||
+ (fSrcRect.width() >= inputBM.width()) || (fSrcRect.height() >= inputBM.height())) {
+ return nullptr;
+ }
+
+ SkAutoLockPixels alp(inputBM);
+ SkASSERT(inputBM.getPixels());
+ if (!inputBM.getPixels() || inputBM.width() <= 0 || inputBM.height() <= 0) {
+ return nullptr;
+ }
+
+ const SkImageInfo info = SkImageInfo::MakeN32Premul(bounds.width(), bounds.height());
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ SkAutoLockPixels dstLock(dst);
+
+ SkColor* dptr = dst.getAddr32(0, 0);
+ int dstWidth = dst.width(), dstHeight = dst.height();
+ for (int y = 0; y < dstHeight; ++y) {
+ for (int x = 0; x < dstWidth; ++x) {
+ SkScalar x_dist = SkMin32(x, dstWidth - x - 1) * invInset;
+ SkScalar y_dist = SkMin32(y, dstHeight - y - 1) * invInset;
+ SkScalar weight = 0;
+
+ static const SkScalar kScalar2 = SkScalar(2);
+
+ // To create a smooth curve at the corners, we need to work on
+ // a square twice the size of the inset.
+ if (x_dist < kScalar2 && y_dist < kScalar2) {
+ x_dist = kScalar2 - x_dist;
+ y_dist = kScalar2 - y_dist;
+
+ SkScalar dist = SkScalarSqrt(SkScalarSquare(x_dist) +
+ SkScalarSquare(y_dist));
+ dist = SkMaxScalar(kScalar2 - dist, 0);
+ weight = SkMinScalar(SkScalarSquare(dist), SK_Scalar1);
+ } else {
+ SkScalar sqDist = SkMinScalar(SkScalarSquare(x_dist),
+ SkScalarSquare(y_dist));
+ weight = SkMinScalar(sqDist, SK_Scalar1);
+ }
+
+ SkScalar x_interp = SkScalarMul(weight, (fSrcRect.x() + x * invXZoom)) +
+ (SK_Scalar1 - weight) * x;
+ SkScalar y_interp = SkScalarMul(weight, (fSrcRect.y() + y * invYZoom)) +
+ (SK_Scalar1 - weight) * y;
+
+ int x_val = SkTPin(bounds.x() + SkScalarFloorToInt(x_interp), 0, inputBM.width() - 1);
+ int y_val = SkTPin(bounds.y() + SkScalarFloorToInt(y_interp), 0, inputBM.height() - 1);
+
+ *dptr = *inputBM.getAddr32(x_val, y_val);
+ dptr++;
+ }
+ }
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(bounds.width(), bounds.height()),
+ dst);
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkMagnifierImageFilter::toString(SkString* str) const {
+ str->appendf("SkMagnifierImageFilter: (");
+ str->appendf("src: (%f,%f,%f,%f) ",
+ fSrcRect.fLeft, fSrcRect.fTop, fSrcRect.fRight, fSrcRect.fBottom);
+ str->appendf("inset: %f", fInset);
+ str->append(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkMatrixConvolutionImageFilter.cpp b/gfx/skia/skia/src/effects/SkMatrixConvolutionImageFilter.cpp
new file mode 100644
index 000000000..5477d7ab7
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkMatrixConvolutionImageFilter.cpp
@@ -0,0 +1,422 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMatrixConvolutionImageFilter.h"
+#include "SkBitmap.h"
+#include "SkColorPriv.h"
+#include "SkReadBuffer.h"
+#include "SkSpecialImage.h"
+#include "SkSpecialSurface.h"
+#include "SkWriteBuffer.h"
+#include "SkRect.h"
+#include "SkUnPreMultiply.h"
+
+#if SK_SUPPORT_GPU
+#include "GrContext.h"
+#include "GrDrawContext.h"
+#include "effects/GrMatrixConvolutionEffect.h"
+#endif
+
+// We need to be able to read at most SK_MaxS32 bytes, so divide that
+// by the size of a scalar to know how many scalars we can read.
+static const int32_t gMaxKernelSize = SK_MaxS32 / sizeof(SkScalar);
+
+SkMatrixConvolutionImageFilter::SkMatrixConvolutionImageFilter(const SkISize& kernelSize,
+ const SkScalar* kernel,
+ SkScalar gain,
+ SkScalar bias,
+ const SkIPoint& kernelOffset,
+ TileMode tileMode,
+ bool convolveAlpha,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fKernelSize(kernelSize)
+ , fGain(gain)
+ , fBias(bias)
+ , fKernelOffset(kernelOffset)
+ , fTileMode(tileMode)
+ , fConvolveAlpha(convolveAlpha) {
+ size_t size = (size_t) sk_64_mul(fKernelSize.width(), fKernelSize.height());
+ fKernel = new SkScalar[size];
+ memcpy(fKernel, kernel, size * sizeof(SkScalar));
+ SkASSERT(kernelSize.fWidth >= 1 && kernelSize.fHeight >= 1);
+ SkASSERT(kernelOffset.fX >= 0 && kernelOffset.fX < kernelSize.fWidth);
+ SkASSERT(kernelOffset.fY >= 0 && kernelOffset.fY < kernelSize.fHeight);
+}
+
+sk_sp<SkImageFilter> SkMatrixConvolutionImageFilter::Make(const SkISize& kernelSize,
+ const SkScalar* kernel,
+ SkScalar gain,
+ SkScalar bias,
+ const SkIPoint& kernelOffset,
+ TileMode tileMode,
+ bool convolveAlpha,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect) {
+ if (kernelSize.width() < 1 || kernelSize.height() < 1) {
+ return nullptr;
+ }
+ if (gMaxKernelSize / kernelSize.fWidth < kernelSize.fHeight) {
+ return nullptr;
+ }
+ if (!kernel) {
+ return nullptr;
+ }
+ if ((kernelOffset.fX < 0) || (kernelOffset.fX >= kernelSize.fWidth) ||
+ (kernelOffset.fY < 0) || (kernelOffset.fY >= kernelSize.fHeight)) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkMatrixConvolutionImageFilter(kernelSize, kernel, gain,
+ bias, kernelOffset,
+ tileMode, convolveAlpha,
+ std::move(input), cropRect));
+}
+
+sk_sp<SkFlattenable> SkMatrixConvolutionImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkISize kernelSize;
+ kernelSize.fWidth = buffer.readInt();
+ kernelSize.fHeight = buffer.readInt();
+ const int count = buffer.getArrayCount();
+
+ const int64_t kernelArea = sk_64_mul(kernelSize.width(), kernelSize.height());
+ if (!buffer.validate(kernelArea == count)) {
+ return nullptr;
+ }
+ SkAutoSTArray<16, SkScalar> kernel(count);
+ if (!buffer.readScalarArray(kernel.get(), count)) {
+ return nullptr;
+ }
+ SkScalar gain = buffer.readScalar();
+ SkScalar bias = buffer.readScalar();
+ SkIPoint kernelOffset;
+ kernelOffset.fX = buffer.readInt();
+ kernelOffset.fY = buffer.readInt();
+ TileMode tileMode = (TileMode)buffer.readInt();
+ bool convolveAlpha = buffer.readBool();
+ return Make(kernelSize, kernel.get(), gain, bias, kernelOffset, tileMode,
+ convolveAlpha, common.getInput(0), &common.cropRect());
+}
+
+void SkMatrixConvolutionImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeInt(fKernelSize.fWidth);
+ buffer.writeInt(fKernelSize.fHeight);
+ buffer.writeScalarArray(fKernel, fKernelSize.fWidth * fKernelSize.fHeight);
+ buffer.writeScalar(fGain);
+ buffer.writeScalar(fBias);
+ buffer.writeInt(fKernelOffset.fX);
+ buffer.writeInt(fKernelOffset.fY);
+ buffer.writeInt((int) fTileMode);
+ buffer.writeBool(fConvolveAlpha);
+}
+
+SkMatrixConvolutionImageFilter::~SkMatrixConvolutionImageFilter() {
+ delete[] fKernel;
+}
+
+class UncheckedPixelFetcher {
+public:
+ static inline SkPMColor fetch(const SkBitmap& src, int x, int y, const SkIRect& bounds) {
+ return *src.getAddr32(x, y);
+ }
+};
+
+class ClampPixelFetcher {
+public:
+ static inline SkPMColor fetch(const SkBitmap& src, int x, int y, const SkIRect& bounds) {
+ x = SkTPin(x, bounds.fLeft, bounds.fRight - 1);
+ y = SkTPin(y, bounds.fTop, bounds.fBottom - 1);
+ return *src.getAddr32(x, y);
+ }
+};
+
+class RepeatPixelFetcher {
+public:
+ static inline SkPMColor fetch(const SkBitmap& src, int x, int y, const SkIRect& bounds) {
+ x = (x - bounds.left()) % bounds.width() + bounds.left();
+ y = (y - bounds.top()) % bounds.height() + bounds.top();
+ if (x < bounds.left()) {
+ x += bounds.width();
+ }
+ if (y < bounds.top()) {
+ y += bounds.height();
+ }
+ return *src.getAddr32(x, y);
+ }
+};
+
+class ClampToBlackPixelFetcher {
+public:
+ static inline SkPMColor fetch(const SkBitmap& src, int x, int y, const SkIRect& bounds) {
+ if (x < bounds.fLeft || x >= bounds.fRight || y < bounds.fTop || y >= bounds.fBottom) {
+ return 0;
+ } else {
+ return *src.getAddr32(x, y);
+ }
+ }
+};
+
+template<class PixelFetcher, bool convolveAlpha>
+void SkMatrixConvolutionImageFilter::filterPixels(const SkBitmap& src,
+ SkBitmap* result,
+ const SkIRect& r,
+ const SkIRect& bounds) const {
+ SkIRect rect(r);
+ if (!rect.intersect(bounds)) {
+ return;
+ }
+ for (int y = rect.fTop; y < rect.fBottom; ++y) {
+ SkPMColor* dptr = result->getAddr32(rect.fLeft - bounds.fLeft, y - bounds.fTop);
+ for (int x = rect.fLeft; x < rect.fRight; ++x) {
+ SkScalar sumA = 0, sumR = 0, sumG = 0, sumB = 0;
+ for (int cy = 0; cy < fKernelSize.fHeight; cy++) {
+ for (int cx = 0; cx < fKernelSize.fWidth; cx++) {
+ SkPMColor s = PixelFetcher::fetch(src,
+ x + cx - fKernelOffset.fX,
+ y + cy - fKernelOffset.fY,
+ bounds);
+ SkScalar k = fKernel[cy * fKernelSize.fWidth + cx];
+ if (convolveAlpha) {
+ sumA += SkScalarMul(SkIntToScalar(SkGetPackedA32(s)), k);
+ }
+ sumR += SkScalarMul(SkIntToScalar(SkGetPackedR32(s)), k);
+ sumG += SkScalarMul(SkIntToScalar(SkGetPackedG32(s)), k);
+ sumB += SkScalarMul(SkIntToScalar(SkGetPackedB32(s)), k);
+ }
+ }
+ int a = convolveAlpha
+ ? SkClampMax(SkScalarFloorToInt(SkScalarMul(sumA, fGain) + fBias), 255)
+ : 255;
+ int r = SkClampMax(SkScalarFloorToInt(SkScalarMul(sumR, fGain) + fBias), a);
+ int g = SkClampMax(SkScalarFloorToInt(SkScalarMul(sumG, fGain) + fBias), a);
+ int b = SkClampMax(SkScalarFloorToInt(SkScalarMul(sumB, fGain) + fBias), a);
+ if (!convolveAlpha) {
+ a = SkGetPackedA32(PixelFetcher::fetch(src, x, y, bounds));
+ *dptr++ = SkPreMultiplyARGB(a, r, g, b);
+ } else {
+ *dptr++ = SkPackARGB32(a, r, g, b);
+ }
+ }
+ }
+}
+
+template<class PixelFetcher>
+void SkMatrixConvolutionImageFilter::filterPixels(const SkBitmap& src,
+ SkBitmap* result,
+ const SkIRect& rect,
+ const SkIRect& bounds) const {
+ if (fConvolveAlpha) {
+ filterPixels<PixelFetcher, true>(src, result, rect, bounds);
+ } else {
+ filterPixels<PixelFetcher, false>(src, result, rect, bounds);
+ }
+}
+
+void SkMatrixConvolutionImageFilter::filterInteriorPixels(const SkBitmap& src,
+ SkBitmap* result,
+ const SkIRect& rect,
+ const SkIRect& bounds) const {
+ filterPixels<UncheckedPixelFetcher>(src, result, rect, bounds);
+}
+
+void SkMatrixConvolutionImageFilter::filterBorderPixels(const SkBitmap& src,
+ SkBitmap* result,
+ const SkIRect& rect,
+ const SkIRect& bounds) const {
+ switch (fTileMode) {
+ case kClamp_TileMode:
+ filterPixels<ClampPixelFetcher>(src, result, rect, bounds);
+ break;
+ case kRepeat_TileMode:
+ filterPixels<RepeatPixelFetcher>(src, result, rect, bounds);
+ break;
+ case kClampToBlack_TileMode:
+ filterPixels<ClampToBlackPixelFetcher>(src, result, rect, bounds);
+ break;
+ }
+}
+
+// FIXME: This should be refactored to SkImageFilterUtils for
+// use by other filters. For now, we assume the input is always
+// premultiplied and unpremultiply it
+static SkBitmap unpremultiply_bitmap(const SkBitmap& src)
+{
+ SkAutoLockPixels alp(src);
+ if (!src.getPixels()) {
+ return SkBitmap();
+ }
+
+ const SkImageInfo info = SkImageInfo::MakeN32(src.width(), src.height(), src.alphaType());
+ SkBitmap result;
+ if (!result.tryAllocPixels(info)) {
+ return SkBitmap();
+ }
+ SkAutoLockPixels resultLock(result);
+ for (int y = 0; y < src.height(); ++y) {
+ const uint32_t* srcRow = src.getAddr32(0, y);
+ uint32_t* dstRow = result.getAddr32(0, y);
+ for (int x = 0; x < src.width(); ++x) {
+ dstRow[x] = SkUnPreMultiply::PMColorToColor(srcRow[x]);
+ }
+ }
+ return result;
+}
+
+#if SK_SUPPORT_GPU
+
+static GrTextureDomain::Mode convert_tilemodes(SkMatrixConvolutionImageFilter::TileMode tileMode) {
+ switch (tileMode) {
+ case SkMatrixConvolutionImageFilter::kClamp_TileMode:
+ return GrTextureDomain::kClamp_Mode;
+ case SkMatrixConvolutionImageFilter::kRepeat_TileMode:
+ return GrTextureDomain::kRepeat_Mode;
+ case SkMatrixConvolutionImageFilter::kClampToBlack_TileMode:
+ return GrTextureDomain::kDecal_Mode;
+ default:
+ SkASSERT(false);
+ }
+ return GrTextureDomain::kIgnore_Mode;
+}
+
+#endif
+
+sk_sp<SkSpecialImage> SkMatrixConvolutionImageFilter::onFilterImage(SkSpecialImage* source,
+ const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, source, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ SkIRect bounds;
+ input = this->applyCropRect(this->mapContext(ctx), input.get(), &inputOffset, &bounds);
+ if (!input) {
+ return nullptr;
+ }
+
+#if SK_SUPPORT_GPU
+ // Note: if the kernel is too big, the GPU path falls back to SW
+ if (source->isTextureBacked() &&
+ fKernelSize.width() * fKernelSize.height() <= MAX_KERNEL_SIZE) {
+ GrContext* context = source->getContext();
+
+ sk_sp<GrTexture> inputTexture(input->asTextureRef(context));
+ SkASSERT(inputTexture);
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ bounds.offset(-inputOffset);
+
+ // SRGBTODO: handle sRGB here
+ sk_sp<GrFragmentProcessor> fp(GrMatrixConvolutionEffect::Make(inputTexture.get(),
+ bounds,
+ fKernelSize,
+ fKernel,
+ fGain,
+ fBias,
+ fKernelOffset,
+ convert_tilemodes(fTileMode),
+ fConvolveAlpha));
+ if (!fp) {
+ return nullptr;
+ }
+
+ return DrawWithFP(context, std::move(fp), bounds, ctx.outputProperties());
+ }
+#endif
+
+ SkBitmap inputBM;
+
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if (inputBM.colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+
+ if (!fConvolveAlpha && !inputBM.isOpaque()) {
+ inputBM = unpremultiply_bitmap(inputBM);
+ }
+
+ SkAutoLockPixels alp(inputBM);
+ if (!inputBM.getPixels()) {
+ return nullptr;
+ }
+
+ const SkImageInfo info = SkImageInfo::MakeN32(bounds.width(), bounds.height(),
+ inputBM.alphaType());
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ SkAutoLockPixels dstLock(dst);
+
+ offset->fX = bounds.fLeft;
+ offset->fY = bounds.fTop;
+ bounds.offset(-inputOffset);
+ SkIRect interior = SkIRect::MakeXYWH(bounds.left() + fKernelOffset.fX,
+ bounds.top() + fKernelOffset.fY,
+ bounds.width() - fKernelSize.fWidth + 1,
+ bounds.height() - fKernelSize.fHeight + 1);
+ SkIRect top = SkIRect::MakeLTRB(bounds.left(), bounds.top(), bounds.right(), interior.top());
+ SkIRect bottom = SkIRect::MakeLTRB(bounds.left(), interior.bottom(),
+ bounds.right(), bounds.bottom());
+ SkIRect left = SkIRect::MakeLTRB(bounds.left(), interior.top(),
+ interior.left(), interior.bottom());
+ SkIRect right = SkIRect::MakeLTRB(interior.right(), interior.top(),
+ bounds.right(), interior.bottom());
+ this->filterBorderPixels(inputBM, &dst, top, bounds);
+ this->filterBorderPixels(inputBM, &dst, left, bounds);
+ this->filterInteriorPixels(inputBM, &dst, interior, bounds);
+ this->filterBorderPixels(inputBM, &dst, right, bounds);
+ this->filterBorderPixels(inputBM, &dst, bottom, bounds);
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(bounds.width(), bounds.height()),
+ dst);
+}
+
+SkIRect SkMatrixConvolutionImageFilter::onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection direction) const {
+ SkIRect dst = src;
+ int w = fKernelSize.width() - 1, h = fKernelSize.height() - 1;
+ dst.fRight += w;
+ dst.fBottom += h;
+ if (kReverse_MapDirection == direction) {
+ dst.offset(-fKernelOffset);
+ } else {
+ dst.offset(fKernelOffset - SkIPoint::Make(w, h));
+ }
+ return dst;
+}
+
+bool SkMatrixConvolutionImageFilter::affectsTransparentBlack() const {
+ // Because the kernel is applied in device-space, we have no idea what
+ // pixels it will affect in object-space.
+ return true;
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkMatrixConvolutionImageFilter::toString(SkString* str) const {
+ str->appendf("SkMatrixConvolutionImageFilter: (");
+ str->appendf("size: (%d,%d) kernel: (", fKernelSize.width(), fKernelSize.height());
+ for (int y = 0; y < fKernelSize.height(); y++) {
+ for (int x = 0; x < fKernelSize.width(); x++) {
+ str->appendf("%f ", fKernel[y * fKernelSize.width() + x]);
+ }
+ }
+ str->appendf(")");
+ str->appendf("gain: %f bias: %f ", fGain, fBias);
+ str->appendf("offset: (%d, %d) ", fKernelOffset.fX, fKernelOffset.fY);
+ str->appendf("convolveAlpha: %s", fConvolveAlpha ? "true" : "false");
+ str->append(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkMergeImageFilter.cpp b/gfx/skia/skia/src/effects/SkMergeImageFilter.cpp
new file mode 100755
index 000000000..9830669f0
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkMergeImageFilter.cpp
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMergeImageFilter.h"
+
+#include "SkCanvas.h"
+#include "SkReadBuffer.h"
+#include "SkSpecialImage.h"
+#include "SkSpecialSurface.h"
+#include "SkWriteBuffer.h"
+#include "SkValidationUtils.h"
+
+sk_sp<SkImageFilter> SkMergeImageFilter::Make(sk_sp<SkImageFilter> first,
+ sk_sp<SkImageFilter> second,
+ SkXfermode::Mode mode,
+ const CropRect* cropRect) {
+ sk_sp<SkImageFilter> inputs[2] = { first, second };
+ SkXfermode::Mode modes[2] = { mode, mode };
+ return sk_sp<SkImageFilter>(new SkMergeImageFilter(inputs, 2, modes, cropRect));
+}
+
+sk_sp<SkImageFilter> SkMergeImageFilter::Make(sk_sp<SkImageFilter> filters[],
+ int count,
+ const SkXfermode::Mode modes[],
+ const CropRect* cropRect) {
+ return sk_sp<SkImageFilter>(new SkMergeImageFilter(filters, count, modes, cropRect));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMergeImageFilter::initAllocModes() {
+ int inputCount = this->countInputs();
+ if (inputCount) {
+ size_t size = sizeof(uint8_t) * inputCount;
+ if (size <= sizeof(fStorage)) {
+ fModes = SkTCast<uint8_t*>(fStorage);
+ } else {
+ fModes = SkTCast<uint8_t*>(sk_malloc_throw(size));
+ }
+ } else {
+ fModes = nullptr;
+ }
+}
+
+void SkMergeImageFilter::initModes(const SkXfermode::Mode modes[]) {
+ if (modes) {
+ this->initAllocModes();
+ int inputCount = this->countInputs();
+ for (int i = 0; i < inputCount; ++i) {
+ fModes[i] = SkToU8(modes[i]);
+ }
+ } else {
+ fModes = nullptr;
+ }
+}
+
+SkMergeImageFilter::SkMergeImageFilter(sk_sp<SkImageFilter> filters[], int count,
+ const SkXfermode::Mode modes[],
+ const CropRect* cropRect)
+ : INHERITED(filters, count, cropRect) {
+ SkASSERT(count >= 0);
+ this->initModes(modes);
+}
+
+SkMergeImageFilter::~SkMergeImageFilter() {
+
+ if (fModes != SkTCast<uint8_t*>(fStorage)) {
+ sk_free(fModes);
+ }
+}
+
+sk_sp<SkSpecialImage> SkMergeImageFilter::onFilterImage(SkSpecialImage* source, const Context& ctx,
+ SkIPoint* offset) const {
+ int inputCount = this->countInputs();
+ if (inputCount < 1) {
+ return nullptr;
+ }
+
+ SkIRect bounds;
+ bounds.setEmpty();
+
+ SkAutoTDeleteArray<sk_sp<SkSpecialImage>> inputs(new sk_sp<SkSpecialImage>[inputCount]);
+ SkAutoTDeleteArray<SkIPoint> offsets(new SkIPoint[inputCount]);
+
+ // Filter all of the inputs.
+ for (int i = 0; i < inputCount; ++i) {
+ offsets[i].setZero();
+ inputs[i] = this->filterInput(i, source, ctx, &offsets[i]);
+ if (!inputs[i]) {
+ continue;
+ }
+ const SkIRect inputBounds = SkIRect::MakeXYWH(offsets[i].fX, offsets[i].fY,
+ inputs[i]->width(), inputs[i]->height());
+ bounds.join(inputBounds);
+ }
+ if (bounds.isEmpty()) {
+ return nullptr;
+ }
+
+ // Apply the crop rect to the union of the inputs' bounds.
+ // Note that the crop rect can only reduce the bounds, since this
+ // filter does not affect transparent black.
+ bool embiggen = false;
+ this->getCropRect().applyTo(bounds, ctx.ctm(), embiggen, &bounds);
+ if (!bounds.intersect(ctx.clipBounds())) {
+ return nullptr;
+ }
+
+ const int x0 = bounds.left();
+ const int y0 = bounds.top();
+
+ sk_sp<SkSpecialSurface> surf(source->makeSurface(ctx.outputProperties(), bounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ canvas->clear(0x0);
+
+ // Composite all of the filter inputs.
+ for (int i = 0; i < inputCount; ++i) {
+ if (!inputs[i]) {
+ continue;
+ }
+
+ SkPaint paint;
+ if (fModes) {
+ paint.setBlendMode((SkBlendMode)fModes[i]);
+ }
+
+ inputs[i]->draw(canvas,
+ SkIntToScalar(offsets[i].x() - x0), SkIntToScalar(offsets[i].y() - y0),
+ &paint);
+ }
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ return surf->makeImageSnapshot();
+}
+
+sk_sp<SkFlattenable> SkMergeImageFilter::CreateProc(SkReadBuffer& buffer) {
+ Common common;
+ if (!common.unflatten(buffer, -1)) {
+ return nullptr;
+ }
+
+ const int count = common.inputCount();
+ bool hasModes = buffer.readBool();
+ if (hasModes) {
+ SkAutoSTArray<4, SkXfermode::Mode> modes(count);
+ SkAutoSTArray<4, uint8_t> modes8(count);
+ if (!buffer.readByteArray(modes8.get(), count)) {
+ return nullptr;
+ }
+ for (int i = 0; i < count; ++i) {
+ modes[i] = (SkXfermode::Mode)modes8[i];
+ buffer.validate(SkIsValidMode(modes[i]));
+ }
+ if (!buffer.isValid()) {
+ return nullptr;
+ }
+ return Make(common.inputs(), count, modes.get(), &common.cropRect());
+ }
+ return Make(common.inputs(), count, nullptr, &common.cropRect());
+}
+
+void SkMergeImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeBool(fModes != nullptr);
+ if (fModes) {
+ buffer.writeByteArray(fModes, this->countInputs() * sizeof(fModes[0]));
+ }
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkMergeImageFilter::toString(SkString* str) const {
+ str->appendf("SkMergeImageFilter: (");
+
+ for (int i = 0; i < this->countInputs(); ++i) {
+ SkImageFilter* filter = this->getInput(i);
+ str->appendf("%d: (", i);
+ filter->toString(str);
+ str->appendf(")");
+ }
+
+ str->append(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkMorphologyImageFilter.cpp b/gfx/skia/skia/src/effects/SkMorphologyImageFilter.cpp
new file mode 100644
index 000000000..2bd792860
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkMorphologyImageFilter.cpp
@@ -0,0 +1,642 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMorphologyImageFilter.h"
+
+#include "SkBitmap.h"
+#include "SkColorPriv.h"
+#include "SkOpts.h"
+#include "SkReadBuffer.h"
+#include "SkRect.h"
+#include "SkSpecialImage.h"
+#include "SkWriteBuffer.h"
+
+#if SK_SUPPORT_GPU
+#include "GrContext.h"
+#include "GrDrawContext.h"
+#include "GrFixedClip.h"
+#include "GrInvariantOutput.h"
+#include "GrTexture.h"
+#include "SkGr.h"
+#include "SkGrPriv.h"
+#include "effects/Gr1DKernelEffect.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#endif
+
+sk_sp<SkImageFilter> SkDilateImageFilter::Make(int radiusX, int radiusY,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect) {
+ if (radiusX < 0 || radiusY < 0) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkDilateImageFilter(radiusX, radiusY,
+ std::move(input),
+ cropRect));
+}
+
+
+sk_sp<SkImageFilter> SkErodeImageFilter::Make(int radiusX, int radiusY,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect) {
+ if (radiusX < 0 || radiusY < 0) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkErodeImageFilter(radiusX, radiusY,
+ std::move(input),
+ cropRect));
+}
+
+SkMorphologyImageFilter::SkMorphologyImageFilter(int radiusX,
+ int radiusY,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fRadius(SkISize::Make(radiusX, radiusY)) {
+}
+
+void SkMorphologyImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeInt(fRadius.fWidth);
+ buffer.writeInt(fRadius.fHeight);
+}
+
+static void call_proc_X(SkMorphologyImageFilter::Proc procX,
+ const SkBitmap& src, SkBitmap* dst,
+ int radiusX, const SkIRect& bounds) {
+ procX(src.getAddr32(bounds.left(), bounds.top()), dst->getAddr32(0, 0),
+ radiusX, bounds.width(), bounds.height(),
+ src.rowBytesAsPixels(), dst->rowBytesAsPixels());
+}
+
+static void call_proc_Y(SkMorphologyImageFilter::Proc procY,
+ const SkPMColor* src, int srcRowBytesAsPixels, SkBitmap* dst,
+ int radiusY, const SkIRect& bounds) {
+ procY(src, dst->getAddr32(0, 0),
+ radiusY, bounds.height(), bounds.width(),
+ srcRowBytesAsPixels, dst->rowBytesAsPixels());
+}
+
+SkRect SkMorphologyImageFilter::computeFastBounds(const SkRect& src) const {
+ SkRect bounds = this->getInput(0) ? this->getInput(0)->computeFastBounds(src) : src;
+ bounds.outset(SkIntToScalar(fRadius.width()), SkIntToScalar(fRadius.height()));
+ return bounds;
+}
+
+SkIRect SkMorphologyImageFilter::onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection) const {
+ SkVector radius = SkVector::Make(SkIntToScalar(this->radius().width()),
+ SkIntToScalar(this->radius().height()));
+ ctm.mapVectors(&radius, 1);
+ return src.makeOutset(SkScalarCeilToInt(radius.x()), SkScalarCeilToInt(radius.y()));
+}
+
+sk_sp<SkFlattenable> SkErodeImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ const int width = buffer.readInt();
+ const int height = buffer.readInt();
+ return Make(width, height, common.getInput(0), &common.cropRect());
+}
+
+sk_sp<SkFlattenable> SkDilateImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ const int width = buffer.readInt();
+ const int height = buffer.readInt();
+ return Make(width, height, common.getInput(0), &common.cropRect());
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkErodeImageFilter::toString(SkString* str) const {
+ str->appendf("SkErodeImageFilter: (");
+ str->appendf("radius: (%d,%d)", this->radius().fWidth, this->radius().fHeight);
+ str->append(")");
+}
+#endif
+
+#ifndef SK_IGNORE_TO_STRING
+void SkDilateImageFilter::toString(SkString* str) const {
+ str->appendf("SkDilateImageFilter: (");
+ str->appendf("radius: (%d,%d)", this->radius().fWidth, this->radius().fHeight);
+ str->append(")");
+}
+#endif
+
+#if SK_SUPPORT_GPU
+
+///////////////////////////////////////////////////////////////////////////////
+/**
+ * Morphology effects. Depending upon the type of morphology, either the
+ * component-wise min (Erode_Type) or max (Dilate_Type) of all pixels in the
+ * kernel is selected as the new color. The new color is modulated by the input
+ * color.
+ */
+class GrMorphologyEffect : public Gr1DKernelEffect {
+
+public:
+
+ enum MorphologyType {
+ kErode_MorphologyType,
+ kDilate_MorphologyType,
+ };
+
+ static sk_sp<GrFragmentProcessor> Make(GrTexture* tex, Direction dir, int radius,
+ MorphologyType type) {
+ return sk_sp<GrFragmentProcessor>(new GrMorphologyEffect(tex, dir, radius, type));
+ }
+
+ static sk_sp<GrFragmentProcessor> Make(GrTexture* tex, Direction dir, int radius,
+ MorphologyType type, float bounds[2]) {
+ return sk_sp<GrFragmentProcessor>(new GrMorphologyEffect(tex, dir, radius, type, bounds));
+ }
+
+ virtual ~GrMorphologyEffect();
+
+ MorphologyType type() const { return fType; }
+ bool useRange() const { return fUseRange; }
+ const float* range() const { return fRange; }
+
+ const char* name() const override { return "Morphology"; }
+
+protected:
+
+ MorphologyType fType;
+ bool fUseRange;
+ float fRange[2];
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ GrMorphologyEffect(GrTexture*, Direction, int radius, MorphologyType);
+ GrMorphologyEffect(GrTexture*, Direction, int radius, MorphologyType,
+ float bounds[2]);
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef Gr1DKernelEffect INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLMorphologyEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrGLSLCaps&, GrProcessorKeyBuilder*);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fPixelSizeUni;
+ GrGLSLProgramDataManager::UniformHandle fRangeUni;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GrGLMorphologyEffect::emitCode(EmitArgs& args) {
+ const GrMorphologyEffect& me = args.fFp.cast<GrMorphologyEffect>();
+
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ fPixelSizeUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType, kDefault_GrSLPrecision,
+ "PixelSize");
+ const char* pixelSizeInc = uniformHandler->getUniformCStr(fPixelSizeUni);
+ fRangeUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType, kDefault_GrSLPrecision,
+ "Range");
+ const char* range = uniformHandler->getUniformCStr(fRangeUni);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString coords2D = fragBuilder->ensureCoords2D(args.fTransformedCoords[0]);
+ const char* func;
+ switch (me.type()) {
+ case GrMorphologyEffect::kErode_MorphologyType:
+ fragBuilder->codeAppendf("\t\t%s = vec4(1, 1, 1, 1);\n", args.fOutputColor);
+ func = "min";
+ break;
+ case GrMorphologyEffect::kDilate_MorphologyType:
+ fragBuilder->codeAppendf("\t\t%s = vec4(0, 0, 0, 0);\n", args.fOutputColor);
+ func = "max";
+ break;
+ default:
+ SkFAIL("Unexpected type");
+ func = ""; // suppress warning
+ break;
+ }
+
+ const char* dir;
+ switch (me.direction()) {
+ case Gr1DKernelEffect::kX_Direction:
+ dir = "x";
+ break;
+ case Gr1DKernelEffect::kY_Direction:
+ dir = "y";
+ break;
+ default:
+ SkFAIL("Unknown filter direction.");
+ dir = ""; // suppress warning
+ }
+
+ int width = GrMorphologyEffect::WidthFromRadius(me.radius());
+
+ // vec2 coord = coord2D;
+ fragBuilder->codeAppendf("\t\tvec2 coord = %s;\n", coords2D.c_str());
+ // coord.x -= radius * pixelSize;
+ fragBuilder->codeAppendf("\t\tcoord.%s -= %d.0 * %s; \n", dir, me.radius(), pixelSizeInc);
+ if (me.useRange()) {
+ // highBound = min(highBound, coord.x + (width-1) * pixelSize);
+ fragBuilder->codeAppendf("\t\tfloat highBound = min(%s.y, coord.%s + %f * %s);",
+ range, dir, float(width - 1), pixelSizeInc);
+ // coord.x = max(lowBound, coord.x);
+ fragBuilder->codeAppendf("\t\tcoord.%s = max(%s.x, coord.%s);", dir, range, dir);
+ }
+ fragBuilder->codeAppendf("\t\tfor (int i = 0; i < %d; i++) {\n", width);
+ fragBuilder->codeAppendf("\t\t\t%s = %s(%s, ", args.fOutputColor, func, args.fOutputColor);
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], "coord");
+ fragBuilder->codeAppend(");\n");
+ // coord.x += pixelSize;
+ fragBuilder->codeAppendf("\t\t\tcoord.%s += %s;\n", dir, pixelSizeInc);
+ if (me.useRange()) {
+ // coord.x = min(highBound, coord.x);
+ fragBuilder->codeAppendf("\t\t\tcoord.%s = min(highBound, coord.%s);", dir, dir);
+ }
+ fragBuilder->codeAppend("\t\t}\n");
+ SkString modulate;
+ GrGLSLMulVarBy4f(&modulate, args.fOutputColor, args.fInputColor);
+ fragBuilder->codeAppend(modulate.c_str());
+}
+
+void GrGLMorphologyEffect::GenKey(const GrProcessor& proc,
+ const GrGLSLCaps&, GrProcessorKeyBuilder* b) {
+ const GrMorphologyEffect& m = proc.cast<GrMorphologyEffect>();
+ uint32_t key = static_cast<uint32_t>(m.radius());
+ key |= (m.type() << 8);
+ key |= (m.direction() << 9);
+ if (m.useRange()) {
+ key |= 1 << 10;
+ }
+ b->add32(key);
+}
+
+void GrGLMorphologyEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& proc) {
+ const GrMorphologyEffect& m = proc.cast<GrMorphologyEffect>();
+ GrTexture& texture = *m.texture(0);
+
+ float pixelSize = 0.0f;
+ switch (m.direction()) {
+ case Gr1DKernelEffect::kX_Direction:
+ pixelSize = 1.0f / texture.width();
+ break;
+ case Gr1DKernelEffect::kY_Direction:
+ pixelSize = 1.0f / texture.height();
+ break;
+ default:
+ SkFAIL("Unknown filter direction.");
+ }
+ pdman.set1f(fPixelSizeUni, pixelSize);
+
+ if (m.useRange()) {
+ const float* range = m.range();
+ if (m.direction() && texture.origin() == kBottomLeft_GrSurfaceOrigin) {
+ pdman.set2f(fRangeUni, 1.0f - range[1], 1.0f - range[0]);
+ } else {
+ pdman.set2f(fRangeUni, range[0], range[1]);
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrMorphologyEffect::GrMorphologyEffect(GrTexture* texture,
+ Direction direction,
+ int radius,
+ MorphologyType type)
+ : INHERITED(texture, direction, radius)
+ , fType(type)
+ , fUseRange(false) {
+ this->initClassID<GrMorphologyEffect>();
+}
+
+GrMorphologyEffect::GrMorphologyEffect(GrTexture* texture,
+ Direction direction,
+ int radius,
+ MorphologyType type,
+ float range[2])
+ : INHERITED(texture, direction, radius)
+ , fType(type)
+ , fUseRange(true) {
+ this->initClassID<GrMorphologyEffect>();
+ fRange[0] = range[0];
+ fRange[1] = range[1];
+}
+
+GrMorphologyEffect::~GrMorphologyEffect() {
+}
+
+void GrMorphologyEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLMorphologyEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrMorphologyEffect::onCreateGLSLInstance() const {
+ return new GrGLMorphologyEffect;
+}
+bool GrMorphologyEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrMorphologyEffect& s = sBase.cast<GrMorphologyEffect>();
+ return (this->radius() == s.radius() &&
+ this->direction() == s.direction() &&
+ this->useRange() == s.useRange() &&
+ this->type() == s.type());
+}
+
+void GrMorphologyEffect::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ // This is valid because the color components of the result of the kernel all come
+ // exactly from existing values in the source texture.
+ this->updateInvariantOutputForModulation(inout);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrMorphologyEffect);
+
+sk_sp<GrFragmentProcessor> GrMorphologyEffect::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx :
+ GrProcessorUnitTest::kAlphaTextureIdx;
+ Direction dir = d->fRandom->nextBool() ? kX_Direction : kY_Direction;
+ static const int kMaxRadius = 10;
+ int radius = d->fRandom->nextRangeU(1, kMaxRadius);
+ MorphologyType type = d->fRandom->nextBool() ? GrMorphologyEffect::kErode_MorphologyType :
+ GrMorphologyEffect::kDilate_MorphologyType;
+
+ return GrMorphologyEffect::Make(d->fTextures[texIdx], dir, radius, type);
+}
+
+
+static void apply_morphology_rect(GrDrawContext* drawContext,
+ const GrClip& clip,
+ GrTexture* texture,
+ const SkIRect& srcRect,
+ const SkIRect& dstRect,
+ int radius,
+ GrMorphologyEffect::MorphologyType morphType,
+ float bounds[2],
+ Gr1DKernelEffect::Direction direction) {
+ GrPaint paint;
+ paint.setGammaCorrect(drawContext->isGammaCorrect());
+ paint.addColorFragmentProcessor(GrMorphologyEffect::Make(texture,
+ direction,
+ radius,
+ morphType,
+ bounds));
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ drawContext->fillRectToRect(clip, paint, SkMatrix::I(), SkRect::Make(dstRect),
+ SkRect::Make(srcRect));
+}
+
+static void apply_morphology_rect_no_bounds(GrDrawContext* drawContext,
+ const GrClip& clip,
+ GrTexture* texture,
+ const SkIRect& srcRect,
+ const SkIRect& dstRect,
+ int radius,
+ GrMorphologyEffect::MorphologyType morphType,
+ Gr1DKernelEffect::Direction direction) {
+ GrPaint paint;
+ paint.setGammaCorrect(drawContext->isGammaCorrect());
+ paint.addColorFragmentProcessor(GrMorphologyEffect::Make(texture, direction, radius,
+ morphType));
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ drawContext->fillRectToRect(clip, paint, SkMatrix::I(), SkRect::Make(dstRect),
+ SkRect::Make(srcRect));
+}
+
+static void apply_morphology_pass(GrDrawContext* drawContext,
+ const GrClip& clip,
+ GrTexture* texture,
+ const SkIRect& srcRect,
+ const SkIRect& dstRect,
+ int radius,
+ GrMorphologyEffect::MorphologyType morphType,
+ Gr1DKernelEffect::Direction direction) {
+ float bounds[2] = { 0.0f, 1.0f };
+ SkIRect lowerSrcRect = srcRect, lowerDstRect = dstRect;
+ SkIRect middleSrcRect = srcRect, middleDstRect = dstRect;
+ SkIRect upperSrcRect = srcRect, upperDstRect = dstRect;
+ if (direction == Gr1DKernelEffect::kX_Direction) {
+ bounds[0] = (SkIntToScalar(srcRect.left()) + 0.5f) / texture->width();
+ bounds[1] = (SkIntToScalar(srcRect.right()) - 0.5f) / texture->width();
+ lowerSrcRect.fRight = srcRect.left() + radius;
+ lowerDstRect.fRight = dstRect.left() + radius;
+ upperSrcRect.fLeft = srcRect.right() - radius;
+ upperDstRect.fLeft = dstRect.right() - radius;
+ middleSrcRect.inset(radius, 0);
+ middleDstRect.inset(radius, 0);
+ } else {
+ bounds[0] = (SkIntToScalar(srcRect.top()) + 0.5f) / texture->height();
+ bounds[1] = (SkIntToScalar(srcRect.bottom()) - 0.5f) / texture->height();
+ lowerSrcRect.fBottom = srcRect.top() + radius;
+ lowerDstRect.fBottom = dstRect.top() + radius;
+ upperSrcRect.fTop = srcRect.bottom() - radius;
+ upperDstRect.fTop = dstRect.bottom() - radius;
+ middleSrcRect.inset(0, radius);
+ middleDstRect.inset(0, radius);
+ }
+ if (middleSrcRect.fLeft - middleSrcRect.fRight >= 0) {
+ // radius covers srcRect; use bounds over entire draw
+ apply_morphology_rect(drawContext, clip, texture, srcRect, dstRect, radius,
+ morphType, bounds, direction);
+ } else {
+ // Draw upper and lower margins with bounds; middle without.
+ apply_morphology_rect(drawContext, clip, texture, lowerSrcRect, lowerDstRect, radius,
+ morphType, bounds, direction);
+ apply_morphology_rect(drawContext, clip, texture, upperSrcRect, upperDstRect, radius,
+ morphType, bounds, direction);
+ apply_morphology_rect_no_bounds(drawContext, clip, texture, middleSrcRect, middleDstRect,
+ radius, morphType, direction);
+ }
+}
+
+static sk_sp<SkSpecialImage> apply_morphology(
+ GrContext* context,
+ SkSpecialImage* input,
+ const SkIRect& rect,
+ GrMorphologyEffect::MorphologyType morphType,
+ SkISize radius,
+ const SkImageFilter::OutputProperties& outputProperties) {
+ sk_sp<GrTexture> srcTexture(input->asTextureRef(context));
+ SkASSERT(srcTexture);
+ sk_sp<SkColorSpace> colorSpace = sk_ref_sp(outputProperties.colorSpace());
+ GrPixelConfig config = GrRenderableConfigForColorSpace(colorSpace.get());
+
+ // setup new clip
+ const GrFixedClip clip(SkIRect::MakeWH(srcTexture->width(), srcTexture->height()));
+
+ const SkIRect dstRect = SkIRect::MakeWH(rect.width(), rect.height());
+ SkIRect srcRect = rect;
+
+ SkASSERT(radius.width() > 0 || radius.height() > 0);
+
+ if (radius.fWidth > 0) {
+ sk_sp<GrDrawContext> dstDrawContext(context->makeDrawContext(SkBackingFit::kApprox,
+ rect.width(), rect.height(),
+ config, colorSpace));
+ if (!dstDrawContext) {
+ return nullptr;
+ }
+
+ apply_morphology_pass(dstDrawContext.get(), clip, srcTexture.get(),
+ srcRect, dstRect, radius.fWidth, morphType,
+ Gr1DKernelEffect::kX_Direction);
+ SkIRect clearRect = SkIRect::MakeXYWH(dstRect.fLeft, dstRect.fBottom,
+ dstRect.width(), radius.fHeight);
+ GrColor clearColor = GrMorphologyEffect::kErode_MorphologyType == morphType
+ ? SK_ColorWHITE
+ : SK_ColorTRANSPARENT;
+ dstDrawContext->clear(&clearRect, clearColor, false);
+
+ srcTexture = dstDrawContext->asTexture();
+ srcRect = dstRect;
+ }
+ if (radius.fHeight > 0) {
+ sk_sp<GrDrawContext> dstDrawContext(context->makeDrawContext(SkBackingFit::kApprox,
+ rect.width(), rect.height(),
+ config, colorSpace));
+ if (!dstDrawContext) {
+ return nullptr;
+ }
+
+ apply_morphology_pass(dstDrawContext.get(), clip, srcTexture.get(),
+ srcRect, dstRect, radius.fHeight, morphType,
+ Gr1DKernelEffect::kY_Direction);
+
+ srcTexture = dstDrawContext->asTexture();
+ }
+
+ return SkSpecialImage::MakeFromGpu(SkIRect::MakeWH(rect.width(), rect.height()),
+ kNeedNewImageUniqueID_SpecialImage,
+ std::move(srcTexture), std::move(colorSpace),
+ &input->props());
+}
+#endif
+
+sk_sp<SkSpecialImage> SkMorphologyImageFilter::onFilterImage(SkSpecialImage* source,
+ const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, source, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ SkIRect bounds;
+ input = this->applyCropRect(this->mapContext(ctx), input.get(), &inputOffset, &bounds);
+ if (!input) {
+ return nullptr;
+ }
+
+ SkVector radius = SkVector::Make(SkIntToScalar(this->radius().width()),
+ SkIntToScalar(this->radius().height()));
+ ctx.ctm().mapVectors(&radius, 1);
+ int width = SkScalarFloorToInt(radius.fX);
+ int height = SkScalarFloorToInt(radius.fY);
+
+ if (width < 0 || height < 0) {
+ return nullptr;
+ }
+
+ SkIRect srcBounds = bounds;
+ srcBounds.offset(-inputOffset);
+
+ if (0 == width && 0 == height) {
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ return input->makeSubset(srcBounds);
+ }
+
+#if SK_SUPPORT_GPU
+ if (source->isTextureBacked()) {
+ GrContext* context = source->getContext();
+
+ auto type = (kDilate_Op == this->op()) ? GrMorphologyEffect::kDilate_MorphologyType
+ : GrMorphologyEffect::kErode_MorphologyType;
+ sk_sp<SkSpecialImage> result(apply_morphology(context, input.get(), srcBounds, type,
+ SkISize::Make(width, height),
+ ctx.outputProperties()));
+ if (result) {
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ }
+ return result;
+ }
+#endif
+
+ SkBitmap inputBM;
+
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if (inputBM.colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+
+ SkImageInfo info = SkImageInfo::Make(bounds.width(), bounds.height(),
+ inputBM.colorType(), inputBM.alphaType());
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ SkAutoLockPixels inputLock(inputBM), dstLock(dst);
+
+ SkMorphologyImageFilter::Proc procX, procY;
+
+ if (kDilate_Op == this->op()) {
+ procX = SkOpts::dilate_x;
+ procY = SkOpts::dilate_y;
+ } else {
+ procX = SkOpts::erode_x;
+ procY = SkOpts::erode_y;
+ }
+
+ if (width > 0 && height > 0) {
+ SkBitmap tmp;
+ if (!tmp.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ SkAutoLockPixels tmpLock(tmp);
+
+ call_proc_X(procX, inputBM, &tmp, width, srcBounds);
+ SkIRect tmpBounds = SkIRect::MakeWH(srcBounds.width(), srcBounds.height());
+ call_proc_Y(procY,
+ tmp.getAddr32(tmpBounds.left(), tmpBounds.top()), tmp.rowBytesAsPixels(),
+ &dst, height, tmpBounds);
+ } else if (width > 0) {
+ call_proc_X(procX, inputBM, &dst, width, srcBounds);
+ } else if (height > 0) {
+ call_proc_Y(procY,
+ inputBM.getAddr32(srcBounds.left(), srcBounds.top()),
+ inputBM.rowBytesAsPixels(),
+ &dst, height, srcBounds);
+ }
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(bounds.width(), bounds.height()),
+ dst, &source->props());
+}
diff --git a/gfx/skia/skia/src/effects/SkOffsetImageFilter.cpp b/gfx/skia/skia/src/effects/SkOffsetImageFilter.cpp
new file mode 100644
index 000000000..2e8b0d916
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkOffsetImageFilter.cpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkOffsetImageFilter.h"
+
+#include "SkCanvas.h"
+#include "SkMatrix.h"
+#include "SkPaint.h"
+#include "SkReadBuffer.h"
+#include "SkSpecialImage.h"
+#include "SkSpecialSurface.h"
+#include "SkWriteBuffer.h"
+
+sk_sp<SkImageFilter> SkOffsetImageFilter::Make(SkScalar dx, SkScalar dy,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect) {
+ if (!SkScalarIsFinite(dx) || !SkScalarIsFinite(dy)) {
+ return nullptr;
+ }
+
+ return sk_sp<SkImageFilter>(new SkOffsetImageFilter(dx, dy, std::move(input), cropRect));
+}
+
+sk_sp<SkSpecialImage> SkOffsetImageFilter::onFilterImage(SkSpecialImage* source,
+ const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint srcOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, source, ctx, &srcOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ SkVector vec;
+ ctx.ctm().mapVectors(&vec, &fOffset, 1);
+
+ if (!this->cropRectIsSet()) {
+ offset->fX = srcOffset.fX + SkScalarRoundToInt(vec.fX);
+ offset->fY = srcOffset.fY + SkScalarRoundToInt(vec.fY);
+ return input;
+ } else {
+ SkIRect bounds;
+ SkIRect srcBounds = SkIRect::MakeWH(input->width(), input->height());
+ srcBounds.offset(srcOffset);
+ if (!this->applyCropRect(ctx, srcBounds, &bounds)) {
+ return nullptr;
+ }
+
+ sk_sp<SkSpecialSurface> surf(source->makeSurface(ctx.outputProperties(), bounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ // TODO: it seems like this clear shouldn't be necessary (see skbug.com/5075)
+ canvas->clear(0x0);
+
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+ canvas->translate(SkIntToScalar(srcOffset.fX - bounds.fLeft),
+ SkIntToScalar(srcOffset.fY - bounds.fTop));
+
+ input->draw(canvas, vec.x(), vec.y(), &paint);
+
+ offset->fX = bounds.fLeft;
+ offset->fY = bounds.fTop;
+ return surf->makeImageSnapshot();
+ }
+}
+
+SkRect SkOffsetImageFilter::computeFastBounds(const SkRect& src) const {
+ SkRect bounds = this->getInput(0) ? this->getInput(0)->computeFastBounds(src) : src;
+ bounds.offset(fOffset.fX, fOffset.fY);
+ return bounds;
+}
+
+SkIRect SkOffsetImageFilter::onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection direction) const {
+ SkVector vec;
+ ctm.mapVectors(&vec, &fOffset, 1);
+ if (kReverse_MapDirection == direction) {
+ vec.negate();
+ }
+
+ return src.makeOffset(SkScalarCeilToInt(vec.fX), SkScalarCeilToInt(vec.fY));
+}
+
+sk_sp<SkFlattenable> SkOffsetImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkPoint offset;
+ buffer.readPoint(&offset);
+ return Make(offset.x(), offset.y(), common.getInput(0), &common.cropRect());
+}
+
+void SkOffsetImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writePoint(fOffset);
+}
+
+SkOffsetImageFilter::SkOffsetImageFilter(SkScalar dx, SkScalar dy,
+ sk_sp<SkImageFilter> input,
+ const CropRect* cropRect)
+ : INHERITED(&input, 1, cropRect) {
+ fOffset.set(dx, dy);
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkOffsetImageFilter::toString(SkString* str) const {
+ str->appendf("SkOffsetImageFilter: (");
+ str->appendf("offset: (%f, %f) ", fOffset.fX, fOffset.fY);
+ str->append("input: (");
+ if (this->getInput(0)) {
+ this->getInput(0)->toString(str);
+ }
+ str->append("))");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkPackBits.cpp b/gfx/skia/skia/src/effects/SkPackBits.cpp
new file mode 100644
index 000000000..286d9d140
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkPackBits.cpp
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkPackBits.h"
+
+size_t SkPackBits::ComputeMaxSize8(size_t srcSize) {
+ // worst case is the number of 8bit values + 1 byte per (up to) 128 entries.
+ return ((srcSize + 127) >> 7) + srcSize;
+}
+
+static uint8_t* flush_same8(uint8_t dst[], uint8_t value, size_t count) {
+ while (count > 0) {
+ size_t n = count > 128 ? 128 : count;
+ *dst++ = (uint8_t)(n - 1);
+ *dst++ = (uint8_t)value;
+ count -= n;
+ }
+ return dst;
+}
+
+static uint8_t* flush_diff8(uint8_t* SK_RESTRICT dst,
+ const uint8_t* SK_RESTRICT src, size_t count) {
+ while (count > 0) {
+ size_t n = count > 128 ? 128 : count;
+ *dst++ = (uint8_t)(n + 127);
+ memcpy(dst, src, n);
+ src += n;
+ dst += n;
+ count -= n;
+ }
+ return dst;
+}
+
+size_t SkPackBits::Pack8(const uint8_t* SK_RESTRICT src, size_t srcSize,
+ uint8_t* SK_RESTRICT dst, size_t dstSize) {
+ if (dstSize < ComputeMaxSize8(srcSize)) {
+ return 0;
+ }
+
+ uint8_t* const origDst = dst;
+ const uint8_t* stop = src + srcSize;
+
+ for (intptr_t count = stop - src; count > 0; count = stop - src) {
+ if (1 == count) {
+ *dst++ = 0;
+ *dst++ = *src;
+ break;
+ }
+
+ unsigned value = *src;
+ const uint8_t* s = src + 1;
+
+ if (*s == value) { // accumulate same values...
+ do {
+ s++;
+ if (s == stop) {
+ break;
+ }
+ } while (*s == value);
+ dst = flush_same8(dst, value, SkToInt(s - src));
+ } else { // accumulate diff values...
+ do {
+ if (++s == stop) {
+ goto FLUSH_DIFF;
+ }
+ // only stop if we hit 3 in a row,
+ // otherwise we get bigger than compuatemax
+ } while (*s != s[-1] || s[-1] != s[-2]);
+ s -= 2; // back up so we don't grab the "same" values that follow
+ FLUSH_DIFF:
+ dst = flush_diff8(dst, src, SkToInt(s - src));
+ }
+ src = s;
+ }
+ return dst - origDst;
+}
+
+int SkPackBits::Unpack8(const uint8_t* SK_RESTRICT src, size_t srcSize,
+ uint8_t* SK_RESTRICT dst, size_t dstSize) {
+ uint8_t* const origDst = dst;
+ uint8_t* const endDst = dst + dstSize;
+ const uint8_t* stop = src + srcSize;
+
+ while (src < stop) {
+ unsigned n = *src++;
+ if (n <= 127) { // repeat count (n + 1)
+ n += 1;
+ if (dst >(endDst - n)) {
+ return 0;
+ }
+ memset(dst, *src++, n);
+ } else { // same count (n - 127)
+ n -= 127;
+ if (dst > (endDst - n)) {
+ return 0;
+ }
+ memcpy(dst, src, n);
+ src += n;
+ }
+ dst += n;
+ }
+ SkASSERT(src <= stop);
+ return SkToInt(dst - origDst);
+}
diff --git a/gfx/skia/skia/src/effects/SkPackBits.h b/gfx/skia/skia/src/effects/SkPackBits.h
new file mode 100644
index 000000000..2dc7677af
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkPackBits.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPackBits_DEFINED
+#define SkPackBits_DEFINED
+
+#include "SkTypes.h"
+
+class SkPackBits {
+public:
+ /** Given the number of 8bit values that will be passed to Pack8,
+ returns the worst-case size needed for the dst[] buffer.
+ */
+ static size_t ComputeMaxSize8(size_t srcSize);
+
+ /** Write the src array into a packed format. The packing process may end
+ up writing more bytes than it read, so dst[] must be large enough.
+ @param src Input array of 8bit values
+ @param srcSize Number of entries in src[]
+ @param dst Buffer (allocated by caller) to write the packed data
+ into
+ @param dstSize Number of bytes in the output buffer.
+ @return the number of bytes written to dst[]
+ */
+ static size_t Pack8(const uint8_t src[], size_t srcSize, uint8_t dst[],
+ size_t dstSize);
+
+ /** Unpack the data in src[], and expand it into dst[]. The src[] data was
+ written by a previous call to Pack8.
+ @param src Input data to unpack, previously created by Pack8.
+ @param srcSize Number of bytes of src to unpack
+ @param dst Buffer (allocated by caller) to expand the src[] into.
+ @param dstSize Number of bytes in the output buffer.
+ @return the number of bytes written into dst.
+ */
+ static int Unpack8(const uint8_t src[], size_t srcSize, uint8_t dst[],
+ size_t dstSize);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/SkPaintFlagsDrawFilter.cpp b/gfx/skia/skia/src/effects/SkPaintFlagsDrawFilter.cpp
new file mode 100644
index 000000000..dc1c0074b
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkPaintFlagsDrawFilter.cpp
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPaintFlagsDrawFilter.h"
+#include "SkPaint.h"
+
+SkPaintFlagsDrawFilter::SkPaintFlagsDrawFilter(uint32_t clearFlags,
+ uint32_t setFlags) {
+ fClearFlags = SkToU16(clearFlags & SkPaint::kAllFlags);
+ fSetFlags = SkToU16(setFlags & SkPaint::kAllFlags);
+}
+
+bool SkPaintFlagsDrawFilter::filter(SkPaint* paint, Type) {
+ paint->setFlags((paint->getFlags() & ~fClearFlags) | fSetFlags);
+ return true;
+}
diff --git a/gfx/skia/skia/src/effects/SkPaintImageFilter.cpp b/gfx/skia/skia/src/effects/SkPaintImageFilter.cpp
new file mode 100644
index 000000000..0a0e4e92e
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkPaintImageFilter.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPaintImageFilter.h"
+#include "SkCanvas.h"
+#include "SkReadBuffer.h"
+#include "SkSpecialImage.h"
+#include "SkSpecialSurface.h"
+#include "SkWriteBuffer.h"
+
+sk_sp<SkImageFilter> SkPaintImageFilter::Make(const SkPaint& paint,
+ const CropRect* cropRect) {
+ return sk_sp<SkImageFilter>(new SkPaintImageFilter(paint, cropRect));
+}
+
+SkPaintImageFilter::SkPaintImageFilter(const SkPaint& paint, const CropRect* cropRect)
+ : INHERITED(nullptr, 0, cropRect)
+ , fPaint(paint) {
+}
+
+sk_sp<SkFlattenable> SkPaintImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 0);
+ SkPaint paint;
+ buffer.readPaint(&paint);
+ return SkPaintImageFilter::Make(paint, &common.cropRect());
+}
+
+void SkPaintImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writePaint(fPaint);
+}
+
+sk_sp<SkSpecialImage> SkPaintImageFilter::onFilterImage(SkSpecialImage* source,
+ const Context& ctx,
+ SkIPoint* offset) const {
+ SkIRect bounds;
+ const SkIRect srcBounds = SkIRect::MakeWH(source->width(), source->height());
+ if (!this->applyCropRect(ctx, srcBounds, &bounds)) {
+ return nullptr;
+ }
+
+ sk_sp<SkSpecialSurface> surf(source->makeSurface(ctx.outputProperties(), bounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ canvas->clear(0x0);
+
+ SkMatrix matrix(ctx.ctm());
+ matrix.postTranslate(SkIntToScalar(-bounds.left()), SkIntToScalar(-bounds.top()));
+ SkRect rect = SkRect::MakeIWH(bounds.width(), bounds.height());
+ SkMatrix inverse;
+ if (matrix.invert(&inverse)) {
+ inverse.mapRect(&rect);
+ }
+ canvas->setMatrix(matrix);
+ canvas->drawRect(rect, fPaint);
+
+ offset->fX = bounds.fLeft;
+ offset->fY = bounds.fTop;
+ return surf->makeImageSnapshot();
+}
+
+bool SkPaintImageFilter::affectsTransparentBlack() const {
+ return true;
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkPaintImageFilter::toString(SkString* str) const {
+ str->appendf("SkPaintImageFilter: (");
+ fPaint.toString(str);
+ str->append(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkPerlinNoiseShader.cpp b/gfx/skia/skia/src/effects/SkPerlinNoiseShader.cpp
new file mode 100644
index 000000000..caff4958a
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkPerlinNoiseShader.cpp
@@ -0,0 +1,983 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPerlinNoiseShader.h"
+#include "SkColorFilter.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+#include "SkShader.h"
+#include "SkUnPreMultiply.h"
+#include "SkString.h"
+
+#if SK_SUPPORT_GPU
+#include "GrContext.h"
+#include "GrCoordTransform.h"
+#include "GrInvariantOutput.h"
+#include "SkGr.h"
+#include "effects/GrConstColorProcessor.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#endif
+
+static const int kBlockSize = 256;
+static const int kBlockMask = kBlockSize - 1;
+static const int kPerlinNoise = 4096;
+static const int kRandMaximum = SK_MaxS32; // 2**31 - 1
+
+namespace {
+
+// noiseValue is the color component's value (or color)
+// limitValue is the maximum perlin noise array index value allowed
+// newValue is the current noise dimension (either width or height)
+inline int checkNoise(int noiseValue, int limitValue, int newValue) {
+ // If the noise value would bring us out of bounds of the current noise array while we are
+ // stiching noise tiles together, wrap the noise around the current dimension of the noise to
+ // stay within the array bounds in a continuous fashion (so that tiling lines are not visible)
+ if (noiseValue >= limitValue) {
+ noiseValue -= newValue;
+ }
+ return noiseValue;
+}
+
+inline SkScalar smoothCurve(SkScalar t) {
+ static const SkScalar SK_Scalar3 = 3.0f;
+
+ // returns t * t * (3 - 2 * t)
+ return SkScalarMul(SkScalarSquare(t), SK_Scalar3 - 2 * t);
+}
+
+} // end namespace
+
+struct SkPerlinNoiseShader::StitchData {
+ StitchData()
+ : fWidth(0)
+ , fWrapX(0)
+ , fHeight(0)
+ , fWrapY(0)
+ {}
+
+ bool operator==(const StitchData& other) const {
+ return fWidth == other.fWidth &&
+ fWrapX == other.fWrapX &&
+ fHeight == other.fHeight &&
+ fWrapY == other.fWrapY;
+ }
+
+ int fWidth; // How much to subtract to wrap for stitching.
+ int fWrapX; // Minimum value to wrap.
+ int fHeight;
+ int fWrapY;
+};
+
+struct SkPerlinNoiseShader::PaintingData {
+ PaintingData(const SkISize& tileSize, SkScalar seed,
+ SkScalar baseFrequencyX, SkScalar baseFrequencyY,
+ const SkMatrix& matrix)
+ {
+ SkVector vec[2] = {
+ { SkScalarInvert(baseFrequencyX), SkScalarInvert(baseFrequencyY) },
+ { SkIntToScalar(tileSize.fWidth), SkIntToScalar(tileSize.fHeight) },
+ };
+ matrix.mapVectors(vec, 2);
+
+ fBaseFrequency.set(SkScalarInvert(vec[0].fX), SkScalarInvert(vec[0].fY));
+ fTileSize.set(SkScalarRoundToInt(vec[1].fX), SkScalarRoundToInt(vec[1].fY));
+ this->init(seed);
+ if (!fTileSize.isEmpty()) {
+ this->stitch();
+ }
+
+#if SK_SUPPORT_GPU
+ fPermutationsBitmap.setInfo(SkImageInfo::MakeA8(kBlockSize, 1));
+ fPermutationsBitmap.setPixels(fLatticeSelector);
+
+ fNoiseBitmap.setInfo(SkImageInfo::MakeN32Premul(kBlockSize, 4));
+ fNoiseBitmap.setPixels(fNoise[0][0]);
+#endif
+ }
+
+ int fSeed;
+ uint8_t fLatticeSelector[kBlockSize];
+ uint16_t fNoise[4][kBlockSize][2];
+ SkPoint fGradient[4][kBlockSize];
+ SkISize fTileSize;
+ SkVector fBaseFrequency;
+ StitchData fStitchDataInit;
+
+private:
+
+#if SK_SUPPORT_GPU
+ SkBitmap fPermutationsBitmap;
+ SkBitmap fNoiseBitmap;
+#endif
+
+ inline int random() {
+ static const int gRandAmplitude = 16807; // 7**5; primitive root of m
+ static const int gRandQ = 127773; // m / a
+ static const int gRandR = 2836; // m % a
+
+ int result = gRandAmplitude * (fSeed % gRandQ) - gRandR * (fSeed / gRandQ);
+ if (result <= 0)
+ result += kRandMaximum;
+ fSeed = result;
+ return result;
+ }
+
+ // Only called once. Could be part of the constructor.
+ void init(SkScalar seed)
+ {
+ static const SkScalar gInvBlockSizef = SkScalarInvert(SkIntToScalar(kBlockSize));
+
+ // According to the SVG spec, we must truncate (not round) the seed value.
+ fSeed = SkScalarTruncToInt(seed);
+ // The seed value clamp to the range [1, kRandMaximum - 1].
+ if (fSeed <= 0) {
+ fSeed = -(fSeed % (kRandMaximum - 1)) + 1;
+ }
+ if (fSeed > kRandMaximum - 1) {
+ fSeed = kRandMaximum - 1;
+ }
+ for (int channel = 0; channel < 4; ++channel) {
+ for (int i = 0; i < kBlockSize; ++i) {
+ fLatticeSelector[i] = i;
+ fNoise[channel][i][0] = (random() % (2 * kBlockSize));
+ fNoise[channel][i][1] = (random() % (2 * kBlockSize));
+ }
+ }
+ for (int i = kBlockSize - 1; i > 0; --i) {
+ int k = fLatticeSelector[i];
+ int j = random() % kBlockSize;
+ SkASSERT(j >= 0);
+ SkASSERT(j < kBlockSize);
+ fLatticeSelector[i] = fLatticeSelector[j];
+ fLatticeSelector[j] = k;
+ }
+
+ // Perform the permutations now
+ {
+ // Copy noise data
+ uint16_t noise[4][kBlockSize][2];
+ for (int i = 0; i < kBlockSize; ++i) {
+ for (int channel = 0; channel < 4; ++channel) {
+ for (int j = 0; j < 2; ++j) {
+ noise[channel][i][j] = fNoise[channel][i][j];
+ }
+ }
+ }
+ // Do permutations on noise data
+ for (int i = 0; i < kBlockSize; ++i) {
+ for (int channel = 0; channel < 4; ++channel) {
+ for (int j = 0; j < 2; ++j) {
+ fNoise[channel][i][j] = noise[channel][fLatticeSelector[i]][j];
+ }
+ }
+ }
+ }
+
+ // Half of the largest possible value for 16 bit unsigned int
+ static const SkScalar gHalfMax16bits = 32767.5f;
+
+ // Compute gradients from permutated noise data
+ for (int channel = 0; channel < 4; ++channel) {
+ for (int i = 0; i < kBlockSize; ++i) {
+ fGradient[channel][i] = SkPoint::Make(
+ SkScalarMul(SkIntToScalar(fNoise[channel][i][0] - kBlockSize),
+ gInvBlockSizef),
+ SkScalarMul(SkIntToScalar(fNoise[channel][i][1] - kBlockSize),
+ gInvBlockSizef));
+ fGradient[channel][i].normalize();
+ // Put the normalized gradient back into the noise data
+ fNoise[channel][i][0] = SkScalarRoundToInt(SkScalarMul(
+ fGradient[channel][i].fX + SK_Scalar1, gHalfMax16bits));
+ fNoise[channel][i][1] = SkScalarRoundToInt(SkScalarMul(
+ fGradient[channel][i].fY + SK_Scalar1, gHalfMax16bits));
+ }
+ }
+ }
+
+ // Only called once. Could be part of the constructor.
+ void stitch() {
+ SkScalar tileWidth = SkIntToScalar(fTileSize.width());
+ SkScalar tileHeight = SkIntToScalar(fTileSize.height());
+ SkASSERT(tileWidth > 0 && tileHeight > 0);
+ // When stitching tiled turbulence, the frequencies must be adjusted
+ // so that the tile borders will be continuous.
+ if (fBaseFrequency.fX) {
+ SkScalar lowFrequencx =
+ SkScalarFloorToScalar(tileWidth * fBaseFrequency.fX) / tileWidth;
+ SkScalar highFrequencx =
+ SkScalarCeilToScalar(tileWidth * fBaseFrequency.fX) / tileWidth;
+ // BaseFrequency should be non-negative according to the standard.
+ if (fBaseFrequency.fX / lowFrequencx < highFrequencx / fBaseFrequency.fX) {
+ fBaseFrequency.fX = lowFrequencx;
+ } else {
+ fBaseFrequency.fX = highFrequencx;
+ }
+ }
+ if (fBaseFrequency.fY) {
+ SkScalar lowFrequency =
+ SkScalarFloorToScalar(tileHeight * fBaseFrequency.fY) / tileHeight;
+ SkScalar highFrequency =
+ SkScalarCeilToScalar(tileHeight * fBaseFrequency.fY) / tileHeight;
+ if (fBaseFrequency.fY / lowFrequency < highFrequency / fBaseFrequency.fY) {
+ fBaseFrequency.fY = lowFrequency;
+ } else {
+ fBaseFrequency.fY = highFrequency;
+ }
+ }
+ // Set up TurbulenceInitial stitch values.
+ fStitchDataInit.fWidth =
+ SkScalarRoundToInt(tileWidth * fBaseFrequency.fX);
+ fStitchDataInit.fWrapX = kPerlinNoise + fStitchDataInit.fWidth;
+ fStitchDataInit.fHeight =
+ SkScalarRoundToInt(tileHeight * fBaseFrequency.fY);
+ fStitchDataInit.fWrapY = kPerlinNoise + fStitchDataInit.fHeight;
+ }
+
+public:
+
+#if SK_SUPPORT_GPU
+ const SkBitmap& getPermutationsBitmap() const { return fPermutationsBitmap; }
+
+ const SkBitmap& getNoiseBitmap() const { return fNoiseBitmap; }
+#endif
+};
+
+sk_sp<SkShader> SkPerlinNoiseShader::MakeFractalNoise(SkScalar baseFrequencyX,
+ SkScalar baseFrequencyY,
+ int numOctaves, SkScalar seed,
+ const SkISize* tileSize) {
+ return sk_sp<SkShader>(new SkPerlinNoiseShader(kFractalNoise_Type, baseFrequencyX,
+ baseFrequencyY, numOctaves,
+ seed, tileSize));
+}
+
+sk_sp<SkShader> SkPerlinNoiseShader::MakeTurbulence(SkScalar baseFrequencyX,
+ SkScalar baseFrequencyY,
+ int numOctaves, SkScalar seed,
+ const SkISize* tileSize) {
+ return sk_sp<SkShader>(new SkPerlinNoiseShader(kTurbulence_Type, baseFrequencyX, baseFrequencyY,
+ numOctaves, seed, tileSize));
+}
+
+SkPerlinNoiseShader::SkPerlinNoiseShader(SkPerlinNoiseShader::Type type,
+ SkScalar baseFrequencyX,
+ SkScalar baseFrequencyY,
+ int numOctaves,
+ SkScalar seed,
+ const SkISize* tileSize)
+ : fType(type)
+ , fBaseFrequencyX(baseFrequencyX)
+ , fBaseFrequencyY(baseFrequencyY)
+ , fNumOctaves(SkTPin<int>(numOctaves, 0, 255)) // [0,255] octaves allowed
+ , fSeed(seed)
+ , fTileSize(nullptr == tileSize ? SkISize::Make(0, 0) : *tileSize)
+ , fStitchTiles(!fTileSize.isEmpty())
+{
+ SkASSERT(fNumOctaves >= 0 && fNumOctaves < 256);
+}
+
+SkPerlinNoiseShader::~SkPerlinNoiseShader() {
+}
+
+sk_sp<SkFlattenable> SkPerlinNoiseShader::CreateProc(SkReadBuffer& buffer) {
+ Type type = (Type)buffer.readInt();
+ SkScalar freqX = buffer.readScalar();
+ SkScalar freqY = buffer.readScalar();
+ int octaves = buffer.readInt();
+ SkScalar seed = buffer.readScalar();
+ SkISize tileSize;
+ tileSize.fWidth = buffer.readInt();
+ tileSize.fHeight = buffer.readInt();
+
+ switch (type) {
+ case kFractalNoise_Type:
+ return SkPerlinNoiseShader::MakeFractalNoise(freqX, freqY, octaves, seed,
+ &tileSize);
+ case kTurbulence_Type:
+ return SkPerlinNoiseShader::MakeTurbulence(freqX, freqY, octaves, seed,
+ &tileSize);
+ default:
+ return nullptr;
+ }
+}
+
+void SkPerlinNoiseShader::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeInt((int) fType);
+ buffer.writeScalar(fBaseFrequencyX);
+ buffer.writeScalar(fBaseFrequencyY);
+ buffer.writeInt(fNumOctaves);
+ buffer.writeScalar(fSeed);
+ buffer.writeInt(fTileSize.fWidth);
+ buffer.writeInt(fTileSize.fHeight);
+}
+
+SkScalar SkPerlinNoiseShader::PerlinNoiseShaderContext::noise2D(
+ int channel, const StitchData& stitchData, const SkPoint& noiseVector) const {
+ struct Noise {
+ int noisePositionIntegerValue;
+ int nextNoisePositionIntegerValue;
+ SkScalar noisePositionFractionValue;
+ Noise(SkScalar component)
+ {
+ SkScalar position = component + kPerlinNoise;
+ noisePositionIntegerValue = SkScalarFloorToInt(position);
+ noisePositionFractionValue = position - SkIntToScalar(noisePositionIntegerValue);
+ nextNoisePositionIntegerValue = noisePositionIntegerValue + 1;
+ }
+ };
+ Noise noiseX(noiseVector.x());
+ Noise noiseY(noiseVector.y());
+ SkScalar u, v;
+ const SkPerlinNoiseShader& perlinNoiseShader = static_cast<const SkPerlinNoiseShader&>(fShader);
+ // If stitching, adjust lattice points accordingly.
+ if (perlinNoiseShader.fStitchTiles) {
+ noiseX.noisePositionIntegerValue =
+ checkNoise(noiseX.noisePositionIntegerValue, stitchData.fWrapX, stitchData.fWidth);
+ noiseY.noisePositionIntegerValue =
+ checkNoise(noiseY.noisePositionIntegerValue, stitchData.fWrapY, stitchData.fHeight);
+ noiseX.nextNoisePositionIntegerValue =
+ checkNoise(noiseX.nextNoisePositionIntegerValue, stitchData.fWrapX, stitchData.fWidth);
+ noiseY.nextNoisePositionIntegerValue =
+ checkNoise(noiseY.nextNoisePositionIntegerValue, stitchData.fWrapY, stitchData.fHeight);
+ }
+ noiseX.noisePositionIntegerValue &= kBlockMask;
+ noiseY.noisePositionIntegerValue &= kBlockMask;
+ noiseX.nextNoisePositionIntegerValue &= kBlockMask;
+ noiseY.nextNoisePositionIntegerValue &= kBlockMask;
+ int i =
+ fPaintingData->fLatticeSelector[noiseX.noisePositionIntegerValue];
+ int j =
+ fPaintingData->fLatticeSelector[noiseX.nextNoisePositionIntegerValue];
+ int b00 = (i + noiseY.noisePositionIntegerValue) & kBlockMask;
+ int b10 = (j + noiseY.noisePositionIntegerValue) & kBlockMask;
+ int b01 = (i + noiseY.nextNoisePositionIntegerValue) & kBlockMask;
+ int b11 = (j + noiseY.nextNoisePositionIntegerValue) & kBlockMask;
+ SkScalar sx = smoothCurve(noiseX.noisePositionFractionValue);
+ SkScalar sy = smoothCurve(noiseY.noisePositionFractionValue);
+ // This is taken 1:1 from SVG spec: http://www.w3.org/TR/SVG11/filters.html#feTurbulenceElement
+ SkPoint fractionValue = SkPoint::Make(noiseX.noisePositionFractionValue,
+ noiseY.noisePositionFractionValue); // Offset (0,0)
+ u = fPaintingData->fGradient[channel][b00].dot(fractionValue);
+ fractionValue.fX -= SK_Scalar1; // Offset (-1,0)
+ v = fPaintingData->fGradient[channel][b10].dot(fractionValue);
+ SkScalar a = SkScalarInterp(u, v, sx);
+ fractionValue.fY -= SK_Scalar1; // Offset (-1,-1)
+ v = fPaintingData->fGradient[channel][b11].dot(fractionValue);
+ fractionValue.fX = noiseX.noisePositionFractionValue; // Offset (0,-1)
+ u = fPaintingData->fGradient[channel][b01].dot(fractionValue);
+ SkScalar b = SkScalarInterp(u, v, sx);
+ return SkScalarInterp(a, b, sy);
+}
+
+SkScalar SkPerlinNoiseShader::PerlinNoiseShaderContext::calculateTurbulenceValueForPoint(
+ int channel, StitchData& stitchData, const SkPoint& point) const {
+ const SkPerlinNoiseShader& perlinNoiseShader = static_cast<const SkPerlinNoiseShader&>(fShader);
+ if (perlinNoiseShader.fStitchTiles) {
+ // Set up TurbulenceInitial stitch values.
+ stitchData = fPaintingData->fStitchDataInit;
+ }
+ SkScalar turbulenceFunctionResult = 0;
+ SkPoint noiseVector(SkPoint::Make(SkScalarMul(point.x(), fPaintingData->fBaseFrequency.fX),
+ SkScalarMul(point.y(), fPaintingData->fBaseFrequency.fY)));
+ SkScalar ratio = SK_Scalar1;
+ for (int octave = 0; octave < perlinNoiseShader.fNumOctaves; ++octave) {
+ SkScalar noise = noise2D(channel, stitchData, noiseVector);
+ SkScalar numer = (perlinNoiseShader.fType == kFractalNoise_Type) ?
+ noise : SkScalarAbs(noise);
+ turbulenceFunctionResult += numer / ratio;
+ noiseVector.fX *= 2;
+ noiseVector.fY *= 2;
+ ratio *= 2;
+ if (perlinNoiseShader.fStitchTiles) {
+ // Update stitch values
+ stitchData.fWidth *= 2;
+ stitchData.fWrapX = stitchData.fWidth + kPerlinNoise;
+ stitchData.fHeight *= 2;
+ stitchData.fWrapY = stitchData.fHeight + kPerlinNoise;
+ }
+ }
+
+ // The value of turbulenceFunctionResult comes from ((turbulenceFunctionResult) + 1) / 2
+ // by fractalNoise and (turbulenceFunctionResult) by turbulence.
+ if (perlinNoiseShader.fType == kFractalNoise_Type) {
+ turbulenceFunctionResult =
+ SkScalarMul(turbulenceFunctionResult, SK_ScalarHalf) + SK_ScalarHalf;
+ }
+
+ if (channel == 3) { // Scale alpha by paint value
+ turbulenceFunctionResult *= SkIntToScalar(getPaintAlpha()) / 255;
+ }
+
+ // Clamp result
+ return SkScalarPin(turbulenceFunctionResult, 0, SK_Scalar1);
+}
+
+SkPMColor SkPerlinNoiseShader::PerlinNoiseShaderContext::shade(
+ const SkPoint& point, StitchData& stitchData) const {
+ SkPoint newPoint;
+ fMatrix.mapPoints(&newPoint, &point, 1);
+ newPoint.fX = SkScalarRoundToScalar(newPoint.fX);
+ newPoint.fY = SkScalarRoundToScalar(newPoint.fY);
+
+ U8CPU rgba[4];
+ for (int channel = 3; channel >= 0; --channel) {
+ rgba[channel] = SkScalarFloorToInt(255 *
+ calculateTurbulenceValueForPoint(channel, stitchData, newPoint));
+ }
+ return SkPreMultiplyARGB(rgba[3], rgba[0], rgba[1], rgba[2]);
+}
+
+SkShader::Context* SkPerlinNoiseShader::onCreateContext(const ContextRec& rec,
+ void* storage) const {
+ return new (storage) PerlinNoiseShaderContext(*this, rec);
+}
+
+size_t SkPerlinNoiseShader::onContextSize(const ContextRec&) const {
+ return sizeof(PerlinNoiseShaderContext);
+}
+
+SkPerlinNoiseShader::PerlinNoiseShaderContext::PerlinNoiseShaderContext(
+ const SkPerlinNoiseShader& shader, const ContextRec& rec)
+ : INHERITED(shader, rec)
+{
+ SkMatrix newMatrix = *rec.fMatrix;
+ newMatrix.preConcat(shader.getLocalMatrix());
+ if (rec.fLocalMatrix) {
+ newMatrix.preConcat(*rec.fLocalMatrix);
+ }
+ // This (1,1) translation is due to WebKit's 1 based coordinates for the noise
+ // (as opposed to 0 based, usually). The same adjustment is in the setData() function.
+ fMatrix.setTranslate(-newMatrix.getTranslateX() + SK_Scalar1, -newMatrix.getTranslateY() + SK_Scalar1);
+ fPaintingData = new PaintingData(shader.fTileSize, shader.fSeed, shader.fBaseFrequencyX,
+ shader.fBaseFrequencyY, newMatrix);
+}
+
+SkPerlinNoiseShader::PerlinNoiseShaderContext::~PerlinNoiseShaderContext() { delete fPaintingData; }
+
+void SkPerlinNoiseShader::PerlinNoiseShaderContext::shadeSpan(
+ int x, int y, SkPMColor result[], int count) {
+ SkPoint point = SkPoint::Make(SkIntToScalar(x), SkIntToScalar(y));
+ StitchData stitchData;
+ for (int i = 0; i < count; ++i) {
+ result[i] = shade(point, stitchData);
+ point.fX += SK_Scalar1;
+ }
+}
+
+/////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+class GrGLPerlinNoise : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrGLSLCaps&, GrProcessorKeyBuilder*);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fStitchDataUni;
+ GrGLSLProgramDataManager::UniformHandle fBaseFrequencyUni;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////
+
+class GrPerlinNoiseEffect : public GrFragmentProcessor {
+public:
+ static sk_sp<GrFragmentProcessor> Make(SkPerlinNoiseShader::Type type,
+ int numOctaves, bool stitchTiles,
+ SkPerlinNoiseShader::PaintingData* paintingData,
+ GrTexture* permutationsTexture, GrTexture* noiseTexture,
+ const SkMatrix& matrix) {
+ return sk_sp<GrFragmentProcessor>(
+ new GrPerlinNoiseEffect(type, numOctaves, stitchTiles, paintingData,
+ permutationsTexture, noiseTexture, matrix));
+ }
+
+ virtual ~GrPerlinNoiseEffect() { delete fPaintingData; }
+
+ const char* name() const override { return "PerlinNoise"; }
+
+ const SkPerlinNoiseShader::StitchData& stitchData() const { return fPaintingData->fStitchDataInit; }
+
+ SkPerlinNoiseShader::Type type() const { return fType; }
+ bool stitchTiles() const { return fStitchTiles; }
+ const SkVector& baseFrequency() const { return fPaintingData->fBaseFrequency; }
+ int numOctaves() const { return fNumOctaves; }
+ const SkMatrix& matrix() const { return fCoordTransform.getMatrix(); }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override {
+ return new GrGLPerlinNoise;
+ }
+
+ virtual void onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const override {
+ GrGLPerlinNoise::GenKey(*this, caps, b);
+ }
+
+ bool onIsEqual(const GrFragmentProcessor& sBase) const override {
+ const GrPerlinNoiseEffect& s = sBase.cast<GrPerlinNoiseEffect>();
+ return fType == s.fType &&
+ fPaintingData->fBaseFrequency == s.fPaintingData->fBaseFrequency &&
+ fNumOctaves == s.fNumOctaves &&
+ fStitchTiles == s.fStitchTiles &&
+ fPaintingData->fStitchDataInit == s.fPaintingData->fStitchDataInit;
+ }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ inout->setToUnknown(GrInvariantOutput::kWillNot_ReadInput);
+ }
+
+ GrPerlinNoiseEffect(SkPerlinNoiseShader::Type type,
+ int numOctaves, bool stitchTiles,
+ SkPerlinNoiseShader::PaintingData* paintingData,
+ GrTexture* permutationsTexture, GrTexture* noiseTexture,
+ const SkMatrix& matrix)
+ : fType(type)
+ , fNumOctaves(numOctaves)
+ , fStitchTiles(stitchTiles)
+ , fPermutationsAccess(permutationsTexture)
+ , fNoiseAccess(noiseTexture)
+ , fPaintingData(paintingData) {
+ this->initClassID<GrPerlinNoiseEffect>();
+ this->addTextureAccess(&fPermutationsAccess);
+ this->addTextureAccess(&fNoiseAccess);
+ fCoordTransform.reset(matrix);
+ this->addCoordTransform(&fCoordTransform);
+ }
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ SkPerlinNoiseShader::Type fType;
+ GrCoordTransform fCoordTransform;
+ int fNumOctaves;
+ bool fStitchTiles;
+ GrTextureAccess fPermutationsAccess;
+ GrTextureAccess fNoiseAccess;
+ SkPerlinNoiseShader::PaintingData *fPaintingData;
+
+private:
+ typedef GrFragmentProcessor INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrPerlinNoiseEffect);
+
+sk_sp<GrFragmentProcessor> GrPerlinNoiseEffect::TestCreate(GrProcessorTestData* d) {
+ int numOctaves = d->fRandom->nextRangeU(2, 10);
+ bool stitchTiles = d->fRandom->nextBool();
+ SkScalar seed = SkIntToScalar(d->fRandom->nextU());
+ SkISize tileSize = SkISize::Make(d->fRandom->nextRangeU(4, 4096),
+ d->fRandom->nextRangeU(4, 4096));
+ SkScalar baseFrequencyX = d->fRandom->nextRangeScalar(0.01f,
+ 0.99f);
+ SkScalar baseFrequencyY = d->fRandom->nextRangeScalar(0.01f,
+ 0.99f);
+
+ sk_sp<SkShader> shader(d->fRandom->nextBool() ?
+ SkPerlinNoiseShader::MakeFractalNoise(baseFrequencyX, baseFrequencyY, numOctaves, seed,
+ stitchTiles ? &tileSize : nullptr) :
+ SkPerlinNoiseShader::MakeTurbulence(baseFrequencyX, baseFrequencyY, numOctaves, seed,
+ stitchTiles ? &tileSize : nullptr));
+
+ SkMatrix viewMatrix = GrTest::TestMatrix(d->fRandom);
+ auto colorSpace = GrTest::TestColorSpace(d->fRandom);
+ return shader->asFragmentProcessor(SkShader::AsFPArgs(d->fContext, &viewMatrix, nullptr,
+ kNone_SkFilterQuality, colorSpace.get(),
+ SkSourceGammaTreatment::kRespect));
+}
+
+void GrGLPerlinNoise::emitCode(EmitArgs& args) {
+ const GrPerlinNoiseEffect& pne = args.fFp.cast<GrPerlinNoiseEffect>();
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ SkString vCoords = fragBuilder->ensureCoords2D(args.fTransformedCoords[0]);
+
+ fBaseFrequencyUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType, kDefault_GrSLPrecision,
+ "baseFrequency");
+ const char* baseFrequencyUni = uniformHandler->getUniformCStr(fBaseFrequencyUni);
+
+ const char* stitchDataUni = nullptr;
+ if (pne.stitchTiles()) {
+ fStitchDataUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType, kDefault_GrSLPrecision,
+ "stitchData");
+ stitchDataUni = uniformHandler->getUniformCStr(fStitchDataUni);
+ }
+
+ // There are 4 lines, so the center of each line is 1/8, 3/8, 5/8 and 7/8
+ const char* chanCoordR = "0.125";
+ const char* chanCoordG = "0.375";
+ const char* chanCoordB = "0.625";
+ const char* chanCoordA = "0.875";
+ const char* chanCoord = "chanCoord";
+ const char* stitchData = "stitchData";
+ const char* ratio = "ratio";
+ const char* noiseVec = "noiseVec";
+ const char* noiseSmooth = "noiseSmooth";
+ const char* floorVal = "floorVal";
+ const char* fractVal = "fractVal";
+ const char* uv = "uv";
+ const char* ab = "ab";
+ const char* latticeIdx = "latticeIdx";
+ const char* bcoords = "bcoords";
+ const char* lattice = "lattice";
+ const char* inc8bit = "0.00390625"; // 1.0 / 256.0
+ // This is the math to convert the two 16bit integer packed into rgba 8 bit input into a
+ // [-1,1] vector and perform a dot product between that vector and the provided vector.
+ const char* dotLattice = "dot(((%s.ga + %s.rb * vec2(%s)) * vec2(2.0) - vec2(1.0)), %s);";
+
+ // Add noise function
+ static const GrGLSLShaderVar gPerlinNoiseArgs[] = {
+ GrGLSLShaderVar(chanCoord, kFloat_GrSLType),
+ GrGLSLShaderVar(noiseVec, kVec2f_GrSLType)
+ };
+
+ static const GrGLSLShaderVar gPerlinNoiseStitchArgs[] = {
+ GrGLSLShaderVar(chanCoord, kFloat_GrSLType),
+ GrGLSLShaderVar(noiseVec, kVec2f_GrSLType),
+ GrGLSLShaderVar(stitchData, kVec2f_GrSLType)
+ };
+
+ SkString noiseCode;
+
+ noiseCode.appendf("\tvec4 %s;\n", floorVal);
+ noiseCode.appendf("\t%s.xy = floor(%s);\n", floorVal, noiseVec);
+ noiseCode.appendf("\t%s.zw = %s.xy + vec2(1.0);\n", floorVal, floorVal);
+ noiseCode.appendf("\tvec2 %s = fract(%s);\n", fractVal, noiseVec);
+
+ // smooth curve : t * t * (3 - 2 * t)
+ noiseCode.appendf("\n\tvec2 %s = %s * %s * (vec2(3.0) - vec2(2.0) * %s);",
+ noiseSmooth, fractVal, fractVal, fractVal);
+
+ // Adjust frequencies if we're stitching tiles
+ if (pne.stitchTiles()) {
+ noiseCode.appendf("\n\tif(%s.x >= %s.x) { %s.x -= %s.x; }",
+ floorVal, stitchData, floorVal, stitchData);
+ noiseCode.appendf("\n\tif(%s.y >= %s.y) { %s.y -= %s.y; }",
+ floorVal, stitchData, floorVal, stitchData);
+ noiseCode.appendf("\n\tif(%s.z >= %s.x) { %s.z -= %s.x; }",
+ floorVal, stitchData, floorVal, stitchData);
+ noiseCode.appendf("\n\tif(%s.w >= %s.y) { %s.w -= %s.y; }",
+ floorVal, stitchData, floorVal, stitchData);
+ }
+
+ // Get texture coordinates and normalize
+ noiseCode.appendf("\n\t%s = fract(floor(mod(%s, 256.0)) / vec4(256.0));\n",
+ floorVal, floorVal);
+
+ // Get permutation for x
+ {
+ SkString xCoords("");
+ xCoords.appendf("vec2(%s.x, 0.5)", floorVal);
+
+ noiseCode.appendf("\n\tvec2 %s;\n\t%s.x = ", latticeIdx, latticeIdx);
+ fragBuilder->appendTextureLookup(&noiseCode, args.fTexSamplers[0], xCoords.c_str(),
+ kVec2f_GrSLType);
+ noiseCode.append(".r;");
+ }
+
+ // Get permutation for x + 1
+ {
+ SkString xCoords("");
+ xCoords.appendf("vec2(%s.z, 0.5)", floorVal);
+
+ noiseCode.appendf("\n\t%s.y = ", latticeIdx);
+ fragBuilder->appendTextureLookup(&noiseCode, args.fTexSamplers[0], xCoords.c_str(),
+ kVec2f_GrSLType);
+ noiseCode.append(".r;");
+ }
+
+#if defined(SK_BUILD_FOR_ANDROID)
+ // Android rounding for Tegra devices, like, for example: Xoom (Tegra 2), Nexus 7 (Tegra 3).
+ // The issue is that colors aren't accurate enough on Tegra devices. For example, if an 8 bit
+ // value of 124 (or 0.486275 here) is entered, we can get a texture value of 123.513725
+ // (or 0.484368 here). The following rounding operation prevents these precision issues from
+ // affecting the result of the noise by making sure that we only have multiples of 1/255.
+ // (Note that 1/255 is about 0.003921569, which is the value used here).
+ noiseCode.appendf("\n\t%s = floor(%s * vec2(255.0) + vec2(0.5)) * vec2(0.003921569);",
+ latticeIdx, latticeIdx);
+#endif
+
+ // Get (x,y) coordinates with the permutated x
+ noiseCode.appendf("\n\tvec4 %s = fract(%s.xyxy + %s.yyww);", bcoords, latticeIdx, floorVal);
+
+ noiseCode.appendf("\n\n\tvec2 %s;", uv);
+ // Compute u, at offset (0,0)
+ {
+ SkString latticeCoords("");
+ latticeCoords.appendf("vec2(%s.x, %s)", bcoords, chanCoord);
+ noiseCode.appendf("\n\tvec4 %s = ", lattice);
+ fragBuilder->appendTextureLookup(&noiseCode, args.fTexSamplers[1], latticeCoords.c_str(),
+ kVec2f_GrSLType);
+ noiseCode.appendf(".bgra;\n\t%s.x = ", uv);
+ noiseCode.appendf(dotLattice, lattice, lattice, inc8bit, fractVal);
+ }
+
+ noiseCode.appendf("\n\t%s.x -= 1.0;", fractVal);
+ // Compute v, at offset (-1,0)
+ {
+ SkString latticeCoords("");
+ latticeCoords.appendf("vec2(%s.y, %s)", bcoords, chanCoord);
+ noiseCode.append("\n\tlattice = ");
+ fragBuilder->appendTextureLookup(&noiseCode, args.fTexSamplers[1], latticeCoords.c_str(),
+ kVec2f_GrSLType);
+ noiseCode.appendf(".bgra;\n\t%s.y = ", uv);
+ noiseCode.appendf(dotLattice, lattice, lattice, inc8bit, fractVal);
+ }
+
+ // Compute 'a' as a linear interpolation of 'u' and 'v'
+ noiseCode.appendf("\n\tvec2 %s;", ab);
+ noiseCode.appendf("\n\t%s.x = mix(%s.x, %s.y, %s.x);", ab, uv, uv, noiseSmooth);
+
+ noiseCode.appendf("\n\t%s.y -= 1.0;", fractVal);
+ // Compute v, at offset (-1,-1)
+ {
+ SkString latticeCoords("");
+ latticeCoords.appendf("vec2(%s.w, %s)", bcoords, chanCoord);
+ noiseCode.append("\n\tlattice = ");
+ fragBuilder->appendTextureLookup(&noiseCode, args.fTexSamplers[1], latticeCoords.c_str(),
+ kVec2f_GrSLType);
+ noiseCode.appendf(".bgra;\n\t%s.y = ", uv);
+ noiseCode.appendf(dotLattice, lattice, lattice, inc8bit, fractVal);
+ }
+
+ noiseCode.appendf("\n\t%s.x += 1.0;", fractVal);
+ // Compute u, at offset (0,-1)
+ {
+ SkString latticeCoords("");
+ latticeCoords.appendf("vec2(%s.z, %s)", bcoords, chanCoord);
+ noiseCode.append("\n\tlattice = ");
+ fragBuilder->appendTextureLookup(&noiseCode, args.fTexSamplers[1], latticeCoords.c_str(),
+ kVec2f_GrSLType);
+ noiseCode.appendf(".bgra;\n\t%s.x = ", uv);
+ noiseCode.appendf(dotLattice, lattice, lattice, inc8bit, fractVal);
+ }
+
+ // Compute 'b' as a linear interpolation of 'u' and 'v'
+ noiseCode.appendf("\n\t%s.y = mix(%s.x, %s.y, %s.x);", ab, uv, uv, noiseSmooth);
+ // Compute the noise as a linear interpolation of 'a' and 'b'
+ noiseCode.appendf("\n\treturn mix(%s.x, %s.y, %s.y);\n", ab, ab, noiseSmooth);
+
+ SkString noiseFuncName;
+ if (pne.stitchTiles()) {
+ fragBuilder->emitFunction(kFloat_GrSLType,
+ "perlinnoise", SK_ARRAY_COUNT(gPerlinNoiseStitchArgs),
+ gPerlinNoiseStitchArgs, noiseCode.c_str(), &noiseFuncName);
+ } else {
+ fragBuilder->emitFunction(kFloat_GrSLType,
+ "perlinnoise", SK_ARRAY_COUNT(gPerlinNoiseArgs),
+ gPerlinNoiseArgs, noiseCode.c_str(), &noiseFuncName);
+ }
+
+ // There are rounding errors if the floor operation is not performed here
+ fragBuilder->codeAppendf("\n\t\tvec2 %s = floor(%s.xy) * %s;",
+ noiseVec, vCoords.c_str(), baseFrequencyUni);
+
+ // Clear the color accumulator
+ fragBuilder->codeAppendf("\n\t\t%s = vec4(0.0);", args.fOutputColor);
+
+ if (pne.stitchTiles()) {
+ // Set up TurbulenceInitial stitch values.
+ fragBuilder->codeAppendf("vec2 %s = %s;", stitchData, stitchDataUni);
+ }
+
+ fragBuilder->codeAppendf("float %s = 1.0;", ratio);
+
+ // Loop over all octaves
+ fragBuilder->codeAppendf("for (int octave = 0; octave < %d; ++octave) {", pne.numOctaves());
+
+ fragBuilder->codeAppendf("%s += ", args.fOutputColor);
+ if (pne.type() != SkPerlinNoiseShader::kFractalNoise_Type) {
+ fragBuilder->codeAppend("abs(");
+ }
+ if (pne.stitchTiles()) {
+ fragBuilder->codeAppendf(
+ "vec4(\n\t\t\t\t%s(%s, %s, %s),\n\t\t\t\t%s(%s, %s, %s),"
+ "\n\t\t\t\t%s(%s, %s, %s),\n\t\t\t\t%s(%s, %s, %s))",
+ noiseFuncName.c_str(), chanCoordR, noiseVec, stitchData,
+ noiseFuncName.c_str(), chanCoordG, noiseVec, stitchData,
+ noiseFuncName.c_str(), chanCoordB, noiseVec, stitchData,
+ noiseFuncName.c_str(), chanCoordA, noiseVec, stitchData);
+ } else {
+ fragBuilder->codeAppendf(
+ "vec4(\n\t\t\t\t%s(%s, %s),\n\t\t\t\t%s(%s, %s),"
+ "\n\t\t\t\t%s(%s, %s),\n\t\t\t\t%s(%s, %s))",
+ noiseFuncName.c_str(), chanCoordR, noiseVec,
+ noiseFuncName.c_str(), chanCoordG, noiseVec,
+ noiseFuncName.c_str(), chanCoordB, noiseVec,
+ noiseFuncName.c_str(), chanCoordA, noiseVec);
+ }
+ if (pne.type() != SkPerlinNoiseShader::kFractalNoise_Type) {
+ fragBuilder->codeAppendf(")"); // end of "abs("
+ }
+ fragBuilder->codeAppendf(" * %s;", ratio);
+
+ fragBuilder->codeAppendf("\n\t\t\t%s *= vec2(2.0);", noiseVec);
+ fragBuilder->codeAppendf("\n\t\t\t%s *= 0.5;", ratio);
+
+ if (pne.stitchTiles()) {
+ fragBuilder->codeAppendf("\n\t\t\t%s *= vec2(2.0);", stitchData);
+ }
+ fragBuilder->codeAppend("\n\t\t}"); // end of the for loop on octaves
+
+ if (pne.type() == SkPerlinNoiseShader::kFractalNoise_Type) {
+ // The value of turbulenceFunctionResult comes from ((turbulenceFunctionResult) + 1) / 2
+ // by fractalNoise and (turbulenceFunctionResult) by turbulence.
+ fragBuilder->codeAppendf("\n\t\t%s = %s * vec4(0.5) + vec4(0.5);",
+ args.fOutputColor,args.fOutputColor);
+ }
+
+ // Clamp values
+ fragBuilder->codeAppendf("\n\t\t%s = clamp(%s, 0.0, 1.0);", args.fOutputColor, args.fOutputColor);
+
+ // Pre-multiply the result
+ fragBuilder->codeAppendf("\n\t\t%s = vec4(%s.rgb * %s.aaa, %s.a);\n",
+ args.fOutputColor, args.fOutputColor,
+ args.fOutputColor, args.fOutputColor);
+}
+
+void GrGLPerlinNoise::GenKey(const GrProcessor& processor, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrPerlinNoiseEffect& turbulence = processor.cast<GrPerlinNoiseEffect>();
+
+ uint32_t key = turbulence.numOctaves();
+
+ key = key << 3; // Make room for next 3 bits
+
+ switch (turbulence.type()) {
+ case SkPerlinNoiseShader::kFractalNoise_Type:
+ key |= 0x1;
+ break;
+ case SkPerlinNoiseShader::kTurbulence_Type:
+ key |= 0x2;
+ break;
+ default:
+ // leave key at 0
+ break;
+ }
+
+ if (turbulence.stitchTiles()) {
+ key |= 0x4; // Flip the 3rd bit if tile stitching is on
+ }
+
+ b->add32(key);
+}
+
+void GrGLPerlinNoise::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& processor) {
+ INHERITED::onSetData(pdman, processor);
+
+ const GrPerlinNoiseEffect& turbulence = processor.cast<GrPerlinNoiseEffect>();
+
+ const SkVector& baseFrequency = turbulence.baseFrequency();
+ pdman.set2f(fBaseFrequencyUni, baseFrequency.fX, baseFrequency.fY);
+
+ if (turbulence.stitchTiles()) {
+ const SkPerlinNoiseShader::StitchData& stitchData = turbulence.stitchData();
+ pdman.set2f(fStitchDataUni, SkIntToScalar(stitchData.fWidth),
+ SkIntToScalar(stitchData.fHeight));
+ }
+}
+
+/////////////////////////////////////////////////////////////////////
+sk_sp<GrFragmentProcessor> SkPerlinNoiseShader::asFragmentProcessor(const AsFPArgs& args) const {
+ SkASSERT(args.fContext);
+
+ SkMatrix localMatrix = this->getLocalMatrix();
+ if (args.fLocalMatrix) {
+ localMatrix.preConcat(*args.fLocalMatrix);
+ }
+
+ SkMatrix matrix = *args.fViewMatrix;
+ matrix.preConcat(localMatrix);
+
+ if (0 == fNumOctaves) {
+ if (kFractalNoise_Type == fType) {
+ // Extract the incoming alpha and emit rgba = (a/4, a/4, a/4, a/2)
+ sk_sp<GrFragmentProcessor> inner(
+ GrConstColorProcessor::Make(0x80404040,
+ GrConstColorProcessor::kModulateRGBA_InputMode));
+ return GrFragmentProcessor::MulOutputByInputAlpha(std::move(inner));
+ }
+ // Emit zero.
+ return GrConstColorProcessor::Make(0x0, GrConstColorProcessor::kIgnore_InputMode);
+ }
+
+ // Either we don't stitch tiles, either we have a valid tile size
+ SkASSERT(!fStitchTiles || !fTileSize.isEmpty());
+
+ SkPerlinNoiseShader::PaintingData* paintingData =
+ new PaintingData(fTileSize, fSeed, fBaseFrequencyX, fBaseFrequencyY, matrix);
+ SkAutoTUnref<GrTexture> permutationsTexture(
+ GrRefCachedBitmapTexture(args.fContext, paintingData->getPermutationsBitmap(),
+ GrTextureParams::ClampNoFilter(), args.fGammaTreatment));
+ SkAutoTUnref<GrTexture> noiseTexture(
+ GrRefCachedBitmapTexture(args.fContext, paintingData->getNoiseBitmap(),
+ GrTextureParams::ClampNoFilter(), args.fGammaTreatment));
+
+ SkMatrix m = *args.fViewMatrix;
+ m.setTranslateX(-localMatrix.getTranslateX() + SK_Scalar1);
+ m.setTranslateY(-localMatrix.getTranslateY() + SK_Scalar1);
+ if ((permutationsTexture) && (noiseTexture)) {
+ sk_sp<GrFragmentProcessor> inner(
+ GrPerlinNoiseEffect::Make(fType,
+ fNumOctaves,
+ fStitchTiles,
+ paintingData,
+ permutationsTexture, noiseTexture,
+ m));
+ return GrFragmentProcessor::MulOutputByInputAlpha(std::move(inner));
+ }
+ delete paintingData;
+ return nullptr;
+}
+
+#endif
+
+#ifndef SK_IGNORE_TO_STRING
+void SkPerlinNoiseShader::toString(SkString* str) const {
+ str->append("SkPerlinNoiseShader: (");
+
+ str->append("type: ");
+ switch (fType) {
+ case kFractalNoise_Type:
+ str->append("\"fractal noise\"");
+ break;
+ case kTurbulence_Type:
+ str->append("\"turbulence\"");
+ break;
+ default:
+ str->append("\"unknown\"");
+ break;
+ }
+ str->append(" base frequency: (");
+ str->appendScalar(fBaseFrequencyX);
+ str->append(", ");
+ str->appendScalar(fBaseFrequencyY);
+ str->append(") number of octaves: ");
+ str->appendS32(fNumOctaves);
+ str->append(" seed: ");
+ str->appendScalar(fSeed);
+ str->append(" stitch tiles: ");
+ str->append(fStitchTiles ? "true " : "false ");
+
+ this->INHERITED::toString(str);
+
+ str->append(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkPictureImageFilter.cpp b/gfx/skia/skia/src/effects/SkPictureImageFilter.cpp
new file mode 100644
index 000000000..6539104a3
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkPictureImageFilter.cpp
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPictureImageFilter.h"
+
+#include "SkCanvas.h"
+#include "SkReadBuffer.h"
+#include "SkSpecialImage.h"
+#include "SkSpecialSurface.h"
+#include "SkWriteBuffer.h"
+#include "SkValidationUtils.h"
+
+sk_sp<SkImageFilter> SkPictureImageFilter::Make(sk_sp<SkPicture> picture) {
+ return sk_sp<SkImageFilter>(new SkPictureImageFilter(std::move(picture)));
+}
+
+sk_sp<SkImageFilter> SkPictureImageFilter::Make(sk_sp<SkPicture> picture,
+ const SkRect& cropRect) {
+ return sk_sp<SkImageFilter>(new SkPictureImageFilter(std::move(picture),
+ cropRect,
+ kDeviceSpace_PictureResolution,
+ kLow_SkFilterQuality));
+}
+
+sk_sp<SkImageFilter> SkPictureImageFilter::MakeForLocalSpace(sk_sp<SkPicture> picture,
+ const SkRect& cropRect,
+ SkFilterQuality filterQuality) {
+ return sk_sp<SkImageFilter>(new SkPictureImageFilter(std::move(picture),
+ cropRect,
+ kLocalSpace_PictureResolution,
+ filterQuality));
+}
+
+SkPictureImageFilter::SkPictureImageFilter(sk_sp<SkPicture> picture)
+ : INHERITED(nullptr, 0, nullptr)
+ , fPicture(std::move(picture))
+ , fCropRect(fPicture ? fPicture->cullRect() : SkRect::MakeEmpty())
+ , fPictureResolution(kDeviceSpace_PictureResolution)
+ , fFilterQuality(kLow_SkFilterQuality) {
+}
+
+SkPictureImageFilter::SkPictureImageFilter(sk_sp<SkPicture> picture, const SkRect& cropRect,
+ PictureResolution pictureResolution,
+ SkFilterQuality filterQuality)
+ : INHERITED(nullptr, 0, nullptr)
+ , fPicture(std::move(picture))
+ , fCropRect(cropRect)
+ , fPictureResolution(pictureResolution)
+ , fFilterQuality(filterQuality) {
+}
+
+sk_sp<SkFlattenable> SkPictureImageFilter::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkPicture> picture;
+ SkRect cropRect;
+
+ if (buffer.isCrossProcess() && SkPicture::PictureIOSecurityPrecautionsEnabled()) {
+ buffer.validate(!buffer.readBool());
+ } else {
+ if (buffer.readBool()) {
+ picture = SkPicture::MakeFromBuffer(buffer);
+ }
+ }
+ buffer.readRect(&cropRect);
+ PictureResolution pictureResolution;
+ if (buffer.isVersionLT(SkReadBuffer::kPictureImageFilterResolution_Version)) {
+ pictureResolution = kDeviceSpace_PictureResolution;
+ } else {
+ pictureResolution = (PictureResolution)buffer.readInt();
+ }
+
+ if (kLocalSpace_PictureResolution == pictureResolution) {
+ //filterLevel is only serialized if pictureResolution is LocalSpace
+ SkFilterQuality filterQuality;
+ if (buffer.isVersionLT(SkReadBuffer::kPictureImageFilterLevel_Version)) {
+ filterQuality = kLow_SkFilterQuality;
+ } else {
+ filterQuality = (SkFilterQuality)buffer.readInt();
+ }
+ return MakeForLocalSpace(picture, cropRect, filterQuality);
+ }
+ return Make(picture, cropRect);
+}
+
+void SkPictureImageFilter::flatten(SkWriteBuffer& buffer) const {
+ if (buffer.isCrossProcess() && SkPicture::PictureIOSecurityPrecautionsEnabled()) {
+ buffer.writeBool(false);
+ } else {
+ bool hasPicture = (fPicture != nullptr);
+ buffer.writeBool(hasPicture);
+ if (hasPicture) {
+ fPicture->flatten(buffer);
+ }
+ }
+ buffer.writeRect(fCropRect);
+ buffer.writeInt(fPictureResolution);
+ if (kLocalSpace_PictureResolution == fPictureResolution) {
+ buffer.writeInt(fFilterQuality);
+ }
+}
+
+sk_sp<SkSpecialImage> SkPictureImageFilter::onFilterImage(SkSpecialImage* source,
+ const Context& ctx,
+ SkIPoint* offset) const {
+ if (!fPicture) {
+ return nullptr;
+ }
+
+ SkRect floatBounds;
+ ctx.ctm().mapRect(&floatBounds, fCropRect);
+ SkIRect bounds = floatBounds.roundOut();
+ if (!bounds.intersect(ctx.clipBounds())) {
+ return nullptr;
+ }
+
+ SkASSERT(!bounds.isEmpty());
+
+ sk_sp<SkSpecialSurface> surf(source->makeSurface(ctx.outputProperties(), bounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ canvas->clear(0x0);
+
+ if (kDeviceSpace_PictureResolution == fPictureResolution ||
+ 0 == (ctx.ctm().getType() & ~SkMatrix::kTranslate_Mask)) {
+ this->drawPictureAtDeviceResolution(canvas, bounds, ctx);
+ } else {
+ this->drawPictureAtLocalResolution(source, canvas, bounds, ctx);
+ }
+
+ offset->fX = bounds.fLeft;
+ offset->fY = bounds.fTop;
+ return surf->makeImageSnapshot();
+}
+
+void SkPictureImageFilter::drawPictureAtDeviceResolution(SkCanvas* canvas,
+ const SkIRect& deviceBounds,
+ const Context& ctx) const {
+ canvas->translate(-SkIntToScalar(deviceBounds.fLeft), -SkIntToScalar(deviceBounds.fTop));
+ canvas->concat(ctx.ctm());
+ canvas->drawPicture(fPicture);
+}
+
+void SkPictureImageFilter::drawPictureAtLocalResolution(SkSpecialImage* source,
+ SkCanvas* canvas,
+ const SkIRect& deviceBounds,
+ const Context& ctx) const {
+ SkMatrix inverseCtm;
+ if (!ctx.ctm().invert(&inverseCtm)) {
+ return;
+ }
+
+ SkRect localBounds = SkRect::Make(ctx.clipBounds());
+ inverseCtm.mapRect(&localBounds);
+ if (!localBounds.intersect(fCropRect)) {
+ return;
+ }
+ SkIRect localIBounds = localBounds.roundOut();
+
+ sk_sp<SkSpecialImage> localImg;
+ {
+ sk_sp<SkSpecialSurface> localSurface(source->makeSurface(ctx.outputProperties(),
+ localIBounds.size()));
+ if (!localSurface) {
+ return;
+ }
+
+ SkCanvas* localCanvas = localSurface->getCanvas();
+ SkASSERT(localCanvas);
+
+ localCanvas->clear(0x0);
+
+ localCanvas->translate(-SkIntToScalar(localIBounds.fLeft),
+ -SkIntToScalar(localIBounds.fTop));
+ localCanvas->drawPicture(fPicture);
+
+ localImg = localSurface->makeImageSnapshot();
+ SkASSERT(localImg);
+ }
+
+ {
+ canvas->translate(-SkIntToScalar(deviceBounds.fLeft), -SkIntToScalar(deviceBounds.fTop));
+ canvas->concat(ctx.ctm());
+ SkPaint paint;
+ paint.setFilterQuality(fFilterQuality);
+
+ localImg->draw(canvas,
+ SkIntToScalar(localIBounds.fLeft),
+ SkIntToScalar(localIBounds.fTop),
+ &paint);
+ }
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkPictureImageFilter::toString(SkString* str) const {
+ str->appendf("SkPictureImageFilter: (");
+ str->appendf("crop: (%f,%f,%f,%f) ",
+ fCropRect.fLeft, fCropRect.fTop, fCropRect.fRight, fCropRect.fBottom);
+ if (fPicture) {
+ str->appendf("picture: (%f,%f,%f,%f)",
+ fPicture->cullRect().fLeft, fPicture->cullRect().fTop,
+ fPicture->cullRect().fRight, fPicture->cullRect().fBottom);
+ }
+ str->append(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkRRectsGaussianEdgeShader.cpp b/gfx/skia/skia/src/effects/SkRRectsGaussianEdgeShader.cpp
new file mode 100644
index 000000000..e7703f964
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkRRectsGaussianEdgeShader.cpp
@@ -0,0 +1,433 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkRRectsGaussianEdgeShader.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+
+ /** \class SkRRectsGaussianEdgeShaderImpl
+ * This shader applies a gaussian edge to the intersection of two round rects.
+ * The round rects must have the same radii at each corner and the x&y radii
+ * must also be equal.
+ */
+class SkRRectsGaussianEdgeShaderImpl : public SkShader {
+public:
+ SkRRectsGaussianEdgeShaderImpl(const SkRRect& first, const SkRRect& second, SkScalar radius)
+ : fFirst(first)
+ , fSecond(second)
+ , fRadius(radius) {
+ }
+
+ bool isOpaque() const override { return false; }
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(const AsFPArgs&) const override;
+#endif
+
+ class GaussianEdgeShaderContext : public SkShader::Context {
+ public:
+ GaussianEdgeShaderContext(const SkRRectsGaussianEdgeShaderImpl&, const ContextRec&);
+
+ ~GaussianEdgeShaderContext() override { }
+
+ void shadeSpan(int x, int y, SkPMColor[], int count) override;
+
+ uint32_t getFlags() const override { return 0; }
+
+ private:
+ SkColor fPaintColor;
+
+ typedef SkShader::Context INHERITED;
+ };
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkRRectsGaussianEdgeShaderImpl)
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ size_t onContextSize(const ContextRec&) const override;
+ Context* onCreateContext(const ContextRec&, void*) const override;
+
+private:
+ SkRRect fFirst;
+ SkRRect fSecond;
+ SkScalar fRadius;
+
+ friend class SkRRectsGaussianEdgeShader; // for serialization registration system
+
+ typedef SkShader INHERITED;
+};
+
+////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+#include "GrCoordTransform.h"
+#include "GrFragmentProcessor.h"
+#include "GrInvariantOutput.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "SkGr.h"
+#include "SkGrPriv.h"
+
+class RRectsGaussianEdgeFP : public GrFragmentProcessor {
+public:
+ enum Mode {
+ kCircle_Mode,
+ kRect_Mode,
+ kSimpleCircular_Mode,
+ };
+
+ RRectsGaussianEdgeFP(const SkRRect& first, const SkRRect& second, SkScalar radius)
+ : fFirst(first)
+ , fSecond(second)
+ , fRadius(radius) {
+ this->initClassID<RRectsGaussianEdgeFP>();
+ this->setWillReadFragmentPosition();
+
+ fFirstMode = ComputeMode(fFirst);
+ fSecondMode = ComputeMode(fSecond);
+ }
+
+ class GLSLRRectsGaussianEdgeFP : public GrGLSLFragmentProcessor {
+ public:
+ GLSLRRectsGaussianEdgeFP() { }
+
+ // This method emits code so that, for each shape, the distance from the edge is returned
+ // in 'outputName' clamped to 0..1 with positive distance being towards the center of the
+ // shape. The distance will have been normalized by the radius.
+ void emitModeCode(Mode mode,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ const char* posName,
+ const char* sizesName,
+ const char* radiiName,
+ const char* radName,
+ const char* outputName,
+ const char indices[2]) { // how to access the params for the 2 rrects
+
+ // positive distance is towards the center of the circle
+ fragBuilder->codeAppendf("vec2 delta = %s.xy - %s.%s;",
+ fragBuilder->fragmentPosition(), posName, indices);
+
+ switch (mode) {
+ case kCircle_Mode:
+ // When a shadow circle gets large we can have some precision issues if
+ // we do "length(delta)/radius". The scaleDist temporary cuts the
+ // delta vector down a bit before invoking length.
+ fragBuilder->codeAppendf("float scaledDist = length(delta/%s);", radName);
+ fragBuilder->codeAppendf("%s = clamp((%s.%c/%s - scaledDist), 0.0, 1.0);",
+ outputName, sizesName, indices[0], radName);
+ break;
+ case kRect_Mode:
+ fragBuilder->codeAppendf(
+ "vec2 rectDist = vec2(1.0 - clamp((%s.%c - abs(delta.x))/%s, 0.0, 1.0),"
+ "1.0 - clamp((%s.%c - abs(delta.y))/%s, 0.0, 1.0));",
+ sizesName, indices[0], radName,
+ sizesName, indices[1], radName);
+ fragBuilder->codeAppendf("%s = clamp(1.0 - length(rectDist), 0.0, 1.0);",
+ outputName);
+ break;
+ case kSimpleCircular_Mode:
+ // For the circular round rect we first compute the distance
+ // to the rect. Then we compute a multiplier that is 1 if the
+ // point is in one of the circular corners. We then compute the
+ // distance from the corner and then use the multiplier to mask
+ // between the two distances.
+ fragBuilder->codeAppendf("float xDist = clamp((%s.%c - abs(delta.x))/%s,"
+ "0.0, 1.0);",
+ sizesName, indices[0], radName);
+ fragBuilder->codeAppendf("float yDist = clamp((%s.%c - abs(delta.y))/%s,"
+ "0.0, 1.0);",
+ sizesName, indices[1], radName);
+ fragBuilder->codeAppend("float rectDist = min(xDist, yDist);");
+
+ fragBuilder->codeAppendf("vec2 cornerCenter = %s.%s - %s.%s;",
+ sizesName, indices, radiiName, indices);
+ fragBuilder->codeAppend("delta = vec2(abs(delta.x) - cornerCenter.x,"
+ "abs(delta.y) - cornerCenter.y);");
+ fragBuilder->codeAppendf("xDist = %s.%c - abs(delta.x);", radiiName, indices[0]);
+ fragBuilder->codeAppendf("yDist = %s.%c - abs(delta.y);", radiiName, indices[1]);
+ fragBuilder->codeAppend("float cornerDist = min(xDist, yDist);");
+ fragBuilder->codeAppend("float multiplier = step(0.0, cornerDist);");
+
+ fragBuilder->codeAppendf("delta += %s.%s;", radiiName, indices);
+
+ fragBuilder->codeAppendf("cornerDist = clamp((2.0 * %s.%c - length(delta))/%s,"
+ "0.0, 1.0);",
+ radiiName, indices[0], radName);
+
+ fragBuilder->codeAppendf("%s = (multiplier * cornerDist) +"
+ "((1.0-multiplier) * rectDist);",
+ outputName);
+ break;
+ }
+ }
+
+ void emitCode(EmitArgs& args) override {
+ const RRectsGaussianEdgeFP& fp = args.fFp.cast<RRectsGaussianEdgeFP>();
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ const char* positionsUniName = nullptr;
+ fPositionsUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType, kDefault_GrSLPrecision,
+ "Positions", &positionsUniName);
+ const char* sizesUniName = nullptr;
+ fSizesUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType, kDefault_GrSLPrecision,
+ "Sizes", &sizesUniName);
+ const char* radiiUniName = nullptr;
+ if (fp.fFirstMode == kSimpleCircular_Mode || fp.fSecondMode == kSimpleCircular_Mode) {
+ fRadiiUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType, kDefault_GrSLPrecision,
+ "Radii", &radiiUniName);
+ }
+ const char* radUniName = nullptr;
+ fRadiusUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType, kDefault_GrSLPrecision,
+ "Radius", &radUniName);
+
+ fragBuilder->codeAppend("float firstDist;");
+ fragBuilder->codeAppend("{");
+ this->emitModeCode(fp.firstMode(), fragBuilder,
+ positionsUniName, sizesUniName, radiiUniName,
+ radUniName, "firstDist", "xy");
+ fragBuilder->codeAppend("}");
+
+ fragBuilder->codeAppend("float secondDist;");
+ fragBuilder->codeAppend("{");
+ this->emitModeCode(fp.secondMode(), fragBuilder,
+ positionsUniName, sizesUniName, radiiUniName,
+ radUniName, "secondDist", "zw");
+ fragBuilder->codeAppend("}");
+
+ fragBuilder->codeAppend("vec2 distVec = vec2(1.0 - firstDist, 1.0 - secondDist);");
+
+ // Finally use the distance to apply the Gaussian edge
+ fragBuilder->codeAppend("float factor = clamp(length(distVec), 0.0, 1.0);");
+ fragBuilder->codeAppend("factor = exp(-factor * factor * 4.0) - 0.018;");
+ fragBuilder->codeAppendf("%s = factor*%s;",
+ args.fOutputColor, args.fInputColor);
+ }
+
+ static void GenKey(const GrProcessor& proc, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const RRectsGaussianEdgeFP& fp = proc.cast<RRectsGaussianEdgeFP>();
+
+ b->add32(fp.firstMode() | (fp.secondMode() << 4));
+ }
+
+ protected:
+ void onSetData(const GrGLSLProgramDataManager& pdman, const GrProcessor& proc) override {
+ const RRectsGaussianEdgeFP& edgeFP = proc.cast<RRectsGaussianEdgeFP>();
+
+ const SkRRect& first = edgeFP.first();
+ const SkRRect& second = edgeFP.second();
+
+ pdman.set4f(fPositionsUni,
+ first.getBounds().centerX(),
+ first.getBounds().centerY(),
+ second.getBounds().centerX(),
+ second.getBounds().centerY());
+
+ pdman.set4f(fSizesUni,
+ 0.5f * first.rect().width(),
+ 0.5f * first.rect().height(),
+ 0.5f * second.rect().width(),
+ 0.5f * second.rect().height());
+
+ if (edgeFP.firstMode() == kSimpleCircular_Mode ||
+ edgeFP.secondMode() == kSimpleCircular_Mode) {
+ // This is a bit of overkill since fX should equal fY for both round rects but it
+ // makes the shader code simpler.
+ pdman.set4f(fRadiiUni,
+ 0.5f * first.getSimpleRadii().fX,
+ 0.5f * first.getSimpleRadii().fY,
+ 0.5f * second.getSimpleRadii().fX,
+ 0.5f * second.getSimpleRadii().fY);
+ }
+
+ pdman.set1f(fRadiusUni, edgeFP.radius());
+ }
+
+ private:
+ // The centers of the two round rects (x1, y1, x2, y2)
+ GrGLSLProgramDataManager::UniformHandle fPositionsUni;
+
+ // The half widths and half heights of the two round rects (w1/2, h1/2, w2/2, h2/2)
+ // For circles we still upload both width & height to simplify things
+ GrGLSLProgramDataManager::UniformHandle fSizesUni;
+
+ // The half corner radii of the two round rects (rx1/2, ry1/2, rx2/2, ry2/2)
+ // We upload both the x&y radii (although they are currently always the same) to make
+ // the indexing in the shader code simpler. In some future world we could also support
+ // non-circular corner round rects & ellipses.
+ GrGLSLProgramDataManager::UniformHandle fRadiiUni;
+
+ // The radius parameters (radius)
+ GrGLSLProgramDataManager::UniformHandle fRadiusUni;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+ };
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLRRectsGaussianEdgeFP::GenKey(*this, caps, b);
+ }
+
+ const char* name() const override { return "RRectsGaussianEdgeFP"; }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ inout->setToUnknown(GrInvariantOutput::kWill_ReadInput);
+ }
+
+ const SkRRect& first() const { return fFirst; }
+ Mode firstMode() const { return fFirstMode; }
+ const SkRRect& second() const { return fSecond; }
+ Mode secondMode() const { return fSecondMode; }
+ SkScalar radius() const { return fRadius; }
+
+private:
+ static Mode ComputeMode(const SkRRect& rr) {
+ if (rr.isCircle()) {
+ return kCircle_Mode;
+ } else if (rr.isRect()) {
+ return kRect_Mode;
+ } else {
+ SkASSERT(rr.isSimpleCircular());
+ return kSimpleCircular_Mode;
+ }
+ }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override {
+ return new GLSLRRectsGaussianEdgeFP;
+ }
+
+ bool onIsEqual(const GrFragmentProcessor& proc) const override {
+ const RRectsGaussianEdgeFP& edgeFP = proc.cast<RRectsGaussianEdgeFP>();
+ return fFirst == edgeFP.fFirst &&
+ fSecond == edgeFP.fSecond &&
+ fRadius == edgeFP.fRadius;
+ }
+
+ SkRRect fFirst;
+ Mode fFirstMode;
+ SkRRect fSecond;
+ Mode fSecondMode;
+ SkScalar fRadius;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> SkRRectsGaussianEdgeShaderImpl::asFragmentProcessor(
+ const AsFPArgs& args) const {
+ return sk_make_sp<RRectsGaussianEdgeFP>(fFirst, fSecond, fRadius);
+}
+
+#endif
+
+////////////////////////////////////////////////////////////////////////////
+
+SkRRectsGaussianEdgeShaderImpl::GaussianEdgeShaderContext::GaussianEdgeShaderContext(
+ const SkRRectsGaussianEdgeShaderImpl& shader,
+ const ContextRec& rec)
+ : INHERITED(shader, rec) {
+
+ fPaintColor = rec.fPaint->getColor();
+}
+
+void SkRRectsGaussianEdgeShaderImpl::GaussianEdgeShaderContext::shadeSpan(int x, int y,
+ SkPMColor result[],
+ int count) {
+ // TODO: implement
+ for (int i = 0; i < count; ++i) {
+ result[i] = fPaintColor;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+#ifndef SK_IGNORE_TO_STRING
+void SkRRectsGaussianEdgeShaderImpl::toString(SkString* str) const {
+ str->appendf("RRectsGaussianEdgeShader: ()");
+}
+#endif
+
+sk_sp<SkFlattenable> SkRRectsGaussianEdgeShaderImpl::CreateProc(SkReadBuffer& buf) {
+ // Discarding SkShader flattenable params
+ bool hasLocalMatrix = buf.readBool();
+ SkAssertResult(!hasLocalMatrix);
+
+ SkRect rect1, rect2;
+
+ buf.readRect(&rect1);
+ SkScalar xRad1 = buf.readScalar();
+ SkScalar yRad1 = buf.readScalar();
+
+ buf.readRect(&rect2);
+ SkScalar xRad2 = buf.readScalar();
+ SkScalar yRad2 = buf.readScalar();
+
+ SkScalar radius = buf.readScalar();
+
+ return sk_make_sp<SkRRectsGaussianEdgeShaderImpl>(SkRRect::MakeRectXY(rect1, xRad1, yRad1),
+ SkRRect::MakeRectXY(rect2, xRad2, yRad2),
+ radius);
+}
+
+void SkRRectsGaussianEdgeShaderImpl::flatten(SkWriteBuffer& buf) const {
+ INHERITED::flatten(buf);
+
+ SkASSERT(fFirst.isRect() || fFirst.isCircle() || fFirst.isSimpleCircular());
+ buf.writeRect(fFirst.rect());
+ const SkVector& radii1 = fFirst.getSimpleRadii();
+ buf.writeScalar(radii1.fX);
+ buf.writeScalar(radii1.fY);
+
+ SkASSERT(fSecond.isRect() || fSecond.isCircle() || fSecond.isSimpleCircular());
+ buf.writeRect(fSecond.rect());
+ const SkVector& radii2 = fSecond.getSimpleRadii();
+ buf.writeScalar(radii2.fX);
+ buf.writeScalar(radii2.fY);
+
+ buf.writeScalar(fRadius);
+}
+
+size_t SkRRectsGaussianEdgeShaderImpl::onContextSize(const ContextRec& rec) const {
+ return sizeof(GaussianEdgeShaderContext);
+}
+
+SkShader::Context* SkRRectsGaussianEdgeShaderImpl::onCreateContext(const ContextRec& rec,
+ void* storage) const {
+ return new (storage) GaussianEdgeShaderContext(*this, rec);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkShader> SkRRectsGaussianEdgeShader::Make(const SkRRect& first,
+ const SkRRect& second,
+ SkScalar radius, SkScalar unused) {
+ if ((!first.isRect() && !first.isCircle() && !first.isSimpleCircular()) ||
+ (!second.isRect() && !second.isCircle() && !second.isSimpleCircular())) {
+ // we only deal with the shapes where the x & y radii are equal
+ // and the same for all four corners
+ return nullptr;
+ }
+
+ return sk_make_sp<SkRRectsGaussianEdgeShaderImpl>(first, second, radius);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkRRectsGaussianEdgeShader)
+SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkRRectsGaussianEdgeShaderImpl)
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END
+
+///////////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/skia/src/effects/SkTableColorFilter.cpp b/gfx/skia/skia/src/effects/SkTableColorFilter.cpp
new file mode 100644
index 000000000..ebf646b99
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkTableColorFilter.cpp
@@ -0,0 +1,604 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "SkTableColorFilter.h"
+
+#include "SkBitmap.h"
+#include "SkColorPriv.h"
+#include "SkReadBuffer.h"
+#include "SkString.h"
+#include "SkUnPreMultiply.h"
+#include "SkWriteBuffer.h"
+
+class SkTable_ColorFilter : public SkColorFilter {
+public:
+ SkTable_ColorFilter(const uint8_t tableA[], const uint8_t tableR[],
+ const uint8_t tableG[], const uint8_t tableB[]) {
+ fBitmap = nullptr;
+ fFlags = 0;
+
+ uint8_t* dst = fStorage;
+ if (tableA) {
+ memcpy(dst, tableA, 256);
+ dst += 256;
+ fFlags |= kA_Flag;
+ }
+ if (tableR) {
+ memcpy(dst, tableR, 256);
+ dst += 256;
+ fFlags |= kR_Flag;
+ }
+ if (tableG) {
+ memcpy(dst, tableG, 256);
+ dst += 256;
+ fFlags |= kG_Flag;
+ }
+ if (tableB) {
+ memcpy(dst, tableB, 256);
+ fFlags |= kB_Flag;
+ }
+ }
+
+ virtual ~SkTable_ColorFilter() { delete fBitmap; }
+
+ bool asComponentTable(SkBitmap* table) const override;
+ sk_sp<SkColorFilter> makeComposed(sk_sp<SkColorFilter> inner) const override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(GrContext*) const override;
+#endif
+
+ void filterSpan(const SkPMColor src[], int count, SkPMColor dst[]) const override;
+
+ SK_TO_STRING_OVERRIDE()
+
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkTable_ColorFilter)
+
+ enum {
+ kA_Flag = 1 << 0,
+ kR_Flag = 1 << 1,
+ kG_Flag = 1 << 2,
+ kB_Flag = 1 << 3,
+ };
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ mutable const SkBitmap* fBitmap; // lazily allocated
+
+ uint8_t fStorage[256 * 4];
+ unsigned fFlags;
+
+ friend class SkTableColorFilter;
+
+ typedef SkColorFilter INHERITED;
+};
+
+static const uint8_t gIdentityTable[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F,
+ 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
+ 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
+ 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7,
+ 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
+ 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
+ 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
+ 0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
+ 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF
+};
+
+void SkTable_ColorFilter::filterSpan(const SkPMColor src[], int count, SkPMColor dst[]) const {
+ const uint8_t* table = fStorage;
+ const uint8_t* tableA = gIdentityTable;
+ const uint8_t* tableR = gIdentityTable;
+ const uint8_t* tableG = gIdentityTable;
+ const uint8_t* tableB = gIdentityTable;
+ if (fFlags & kA_Flag) {
+ tableA = table; table += 256;
+ }
+ if (fFlags & kR_Flag) {
+ tableR = table; table += 256;
+ }
+ if (fFlags & kG_Flag) {
+ tableG = table; table += 256;
+ }
+ if (fFlags & kB_Flag) {
+ tableB = table;
+ }
+
+ const SkUnPreMultiply::Scale* scaleTable = SkUnPreMultiply::GetScaleTable();
+ for (int i = 0; i < count; ++i) {
+ SkPMColor c = src[i];
+ unsigned a, r, g, b;
+ if (0 == c) {
+ a = r = g = b = 0;
+ } else {
+ a = SkGetPackedA32(c);
+ r = SkGetPackedR32(c);
+ g = SkGetPackedG32(c);
+ b = SkGetPackedB32(c);
+
+ if (a < 255) {
+ SkUnPreMultiply::Scale scale = scaleTable[a];
+ r = SkUnPreMultiply::ApplyScale(scale, r);
+ g = SkUnPreMultiply::ApplyScale(scale, g);
+ b = SkUnPreMultiply::ApplyScale(scale, b);
+ }
+ }
+ dst[i] = SkPremultiplyARGBInline(tableA[a], tableR[r],
+ tableG[g], tableB[b]);
+ }
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkTable_ColorFilter::toString(SkString* str) const {
+ const uint8_t* table = fStorage;
+ const uint8_t* tableA = gIdentityTable;
+ const uint8_t* tableR = gIdentityTable;
+ const uint8_t* tableG = gIdentityTable;
+ const uint8_t* tableB = gIdentityTable;
+ if (fFlags & kA_Flag) {
+ tableA = table; table += 256;
+ }
+ if (fFlags & kR_Flag) {
+ tableR = table; table += 256;
+ }
+ if (fFlags & kG_Flag) {
+ tableG = table; table += 256;
+ }
+ if (fFlags & kB_Flag) {
+ tableB = table;
+ }
+
+ str->append("SkTable_ColorFilter (");
+
+ for (int i = 0; i < 256; ++i) {
+ str->appendf("%d: %d,%d,%d,%d\n",
+ i, tableR[i], tableG[i], tableB[i], tableA[i]);
+ }
+
+ str->append(")");
+}
+#endif
+
+static const uint8_t gCountNibBits[] = {
+ 0, 1, 1, 2,
+ 1, 2, 2, 3,
+ 1, 2, 2, 3,
+ 2, 3, 3, 4
+};
+
+#include "SkPackBits.h"
+
+void SkTable_ColorFilter::flatten(SkWriteBuffer& buffer) const {
+ uint8_t storage[5*256];
+ int count = gCountNibBits[fFlags & 0xF];
+ size_t size = SkPackBits::Pack8(fStorage, count * 256, storage,
+ sizeof(storage));
+
+ buffer.write32(fFlags);
+ buffer.writeByteArray(storage, size);
+}
+
+sk_sp<SkFlattenable> SkTable_ColorFilter::CreateProc(SkReadBuffer& buffer) {
+ const int flags = buffer.read32();
+ const size_t count = gCountNibBits[flags & 0xF];
+ SkASSERT(count <= 4);
+
+ uint8_t packedStorage[5*256];
+ size_t packedSize = buffer.getArrayCount();
+ if (!buffer.validate(packedSize <= sizeof(packedStorage))) {
+ return nullptr;
+ }
+ if (!buffer.readByteArray(packedStorage, packedSize)) {
+ return nullptr;
+ }
+
+ uint8_t unpackedStorage[4*256];
+ size_t unpackedSize = SkPackBits::Unpack8(packedStorage, packedSize,
+ unpackedStorage, sizeof(unpackedStorage));
+ // now check that we got the size we expected
+ if (!buffer.validate(unpackedSize == count*256)) {
+ return nullptr;
+ }
+
+ const uint8_t* a = nullptr;
+ const uint8_t* r = nullptr;
+ const uint8_t* g = nullptr;
+ const uint8_t* b = nullptr;
+ const uint8_t* ptr = unpackedStorage;
+
+ if (flags & kA_Flag) {
+ a = ptr;
+ ptr += 256;
+ }
+ if (flags & kR_Flag) {
+ r = ptr;
+ ptr += 256;
+ }
+ if (flags & kG_Flag) {
+ g = ptr;
+ ptr += 256;
+ }
+ if (flags & kB_Flag) {
+ b = ptr;
+ ptr += 256;
+ }
+ return SkTableColorFilter::MakeARGB(a, r, g, b);
+}
+
+bool SkTable_ColorFilter::asComponentTable(SkBitmap* table) const {
+ if (table) {
+ if (nullptr == fBitmap) {
+ SkBitmap* bmp = new SkBitmap;
+ bmp->allocPixels(SkImageInfo::MakeA8(256, 4));
+ uint8_t* bitmapPixels = bmp->getAddr8(0, 0);
+ int offset = 0;
+ static const unsigned kFlags[] = { kA_Flag, kR_Flag, kG_Flag, kB_Flag };
+
+ for (int x = 0; x < 4; ++x) {
+ if (!(fFlags & kFlags[x])) {
+ memcpy(bitmapPixels, gIdentityTable, sizeof(gIdentityTable));
+ } else {
+ memcpy(bitmapPixels, fStorage + offset, 256);
+ offset += 256;
+ }
+ bitmapPixels += 256;
+ }
+ fBitmap = bmp;
+ }
+ *table = *fBitmap;
+ }
+ return true;
+}
+
+// Combines the two lookup tables so that making a lookup using res[] has
+// the same effect as making a lookup through inner[] then outer[].
+static void combine_tables(uint8_t res[256], const uint8_t outer[256], const uint8_t inner[256]) {
+ for (int i = 0; i < 256; i++) {
+ res[i] = outer[inner[i]];
+ }
+}
+
+sk_sp<SkColorFilter> SkTable_ColorFilter::makeComposed(sk_sp<SkColorFilter> innerFilter) const {
+ SkBitmap innerBM;
+ if (!innerFilter->asComponentTable(&innerBM)) {
+ return nullptr;
+ }
+
+ innerBM.lockPixels();
+ if (nullptr == innerBM.getPixels()) {
+ return nullptr;
+ }
+
+ const uint8_t* table = fStorage;
+ const uint8_t* tableA = gIdentityTable;
+ const uint8_t* tableR = gIdentityTable;
+ const uint8_t* tableG = gIdentityTable;
+ const uint8_t* tableB = gIdentityTable;
+ if (fFlags & kA_Flag) {
+ tableA = table; table += 256;
+ }
+ if (fFlags & kR_Flag) {
+ tableR = table; table += 256;
+ }
+ if (fFlags & kG_Flag) {
+ tableG = table; table += 256;
+ }
+ if (fFlags & kB_Flag) {
+ tableB = table;
+ }
+
+ uint8_t concatA[256];
+ uint8_t concatR[256];
+ uint8_t concatG[256];
+ uint8_t concatB[256];
+
+ combine_tables(concatA, tableA, innerBM.getAddr8(0, 0));
+ combine_tables(concatR, tableR, innerBM.getAddr8(0, 1));
+ combine_tables(concatG, tableG, innerBM.getAddr8(0, 2));
+ combine_tables(concatB, tableB, innerBM.getAddr8(0, 3));
+
+ return SkTableColorFilter::MakeARGB(concatA, concatR, concatG, concatB);
+}
+
+#if SK_SUPPORT_GPU
+
+#include "GrContext.h"
+#include "GrFragmentProcessor.h"
+#include "GrInvariantOutput.h"
+#include "GrTextureStripAtlas.h"
+#include "SkGr.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+
+class ColorTableEffect : public GrFragmentProcessor {
+public:
+ static sk_sp<GrFragmentProcessor> Make(GrContext* context, SkBitmap bitmap, unsigned flags);
+
+ virtual ~ColorTableEffect();
+
+ const char* name() const override { return "ColorTable"; }
+
+ const GrTextureStripAtlas* atlas() const { return fAtlas; }
+ int atlasRow() const { return fRow; }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ ColorTableEffect(GrTexture* texture, GrTextureStripAtlas* atlas, int row, unsigned flags);
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ GrTextureAccess fTextureAccess;
+
+ // currently not used in shader code, just to assist onComputeInvariantOutput().
+ unsigned fFlags;
+
+ GrTextureStripAtlas* fAtlas;
+ int fRow;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+class GLColorTableEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+ static void GenKey(const GrProcessor&, const GrGLSLCaps&, GrProcessorKeyBuilder*) {}
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+private:
+ UniformHandle fRGBAYValuesUni;
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GLColorTableEffect::onSetData(const GrGLSLProgramDataManager& pdm, const GrProcessor& proc) {
+ // The textures are organized in a strip where the rows are ordered a, r, g, b.
+ float rgbaYValues[4];
+ const ColorTableEffect& cte = proc.cast<ColorTableEffect>();
+ if (cte.atlas()) {
+ SkScalar yDelta = cte.atlas()->getNormalizedTexelHeight();
+ rgbaYValues[3] = cte.atlas()->getYOffset(cte.atlasRow()) + SK_ScalarHalf * yDelta;
+ rgbaYValues[0] = rgbaYValues[3] + yDelta;
+ rgbaYValues[1] = rgbaYValues[0] + yDelta;
+ rgbaYValues[2] = rgbaYValues[1] + yDelta;
+ } else {
+ rgbaYValues[3] = 0.125;
+ rgbaYValues[0] = 0.375;
+ rgbaYValues[1] = 0.625;
+ rgbaYValues[2] = 0.875;
+ }
+ pdm.set4fv(fRGBAYValuesUni, 1, rgbaYValues);
+}
+
+void GLColorTableEffect::emitCode(EmitArgs& args) {
+ const char* yoffsets;
+ fRGBAYValuesUni = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType, kDefault_GrSLPrecision,
+ "yoffsets", &yoffsets);
+ static const float kColorScaleFactor = 255.0f / 256.0f;
+ static const float kColorOffsetFactor = 1.0f / 512.0f;
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ if (nullptr == args.fInputColor) {
+ // the input color is solid white (all ones).
+ static const float kMaxValue = kColorScaleFactor + kColorOffsetFactor;
+ fragBuilder->codeAppendf("\t\tvec4 coord = vec4(%f, %f, %f, %f);\n",
+ kMaxValue, kMaxValue, kMaxValue, kMaxValue);
+
+ } else {
+ fragBuilder->codeAppendf("\t\tfloat nonZeroAlpha = max(%s.a, .0001);\n", args.fInputColor);
+ fragBuilder->codeAppendf("\t\tvec4 coord = vec4(%s.rgb / nonZeroAlpha, nonZeroAlpha);\n",
+ args.fInputColor);
+ fragBuilder->codeAppendf("\t\tcoord = coord * %f + vec4(%f, %f, %f, %f);\n",
+ kColorScaleFactor,
+ kColorOffsetFactor, kColorOffsetFactor,
+ kColorOffsetFactor, kColorOffsetFactor);
+ }
+
+ SkString coord;
+
+ fragBuilder->codeAppendf("\t\t%s.a = ", args.fOutputColor);
+ coord.printf("vec2(coord.a, %s.a)", yoffsets);
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], coord.c_str());
+ fragBuilder->codeAppend(".a;\n");
+
+ fragBuilder->codeAppendf("\t\t%s.r = ", args.fOutputColor);
+ coord.printf("vec2(coord.r, %s.r)", yoffsets);
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], coord.c_str());
+ fragBuilder->codeAppend(".a;\n");
+
+ fragBuilder->codeAppendf("\t\t%s.g = ", args.fOutputColor);
+ coord.printf("vec2(coord.g, %s.g)", yoffsets);
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], coord.c_str());
+ fragBuilder->codeAppend(".a;\n");
+
+ fragBuilder->codeAppendf("\t\t%s.b = ", args.fOutputColor);
+ coord.printf("vec2(coord.b, %s.b)", yoffsets);
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], coord.c_str());
+ fragBuilder->codeAppend(".a;\n");
+
+ fragBuilder->codeAppendf("\t\t%s.rgb *= %s.a;\n", args.fOutputColor, args.fOutputColor);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+sk_sp<GrFragmentProcessor> ColorTableEffect::Make(GrContext* context, SkBitmap bitmap,
+ unsigned flags) {
+
+ GrTextureStripAtlas::Desc desc;
+ desc.fWidth = bitmap.width();
+ desc.fHeight = 128;
+ desc.fRowHeight = bitmap.height();
+ desc.fContext = context;
+ desc.fConfig = SkImageInfo2GrPixelConfig(bitmap.info(), *context->caps());
+ GrTextureStripAtlas* atlas = GrTextureStripAtlas::GetAtlas(desc);
+ int row = atlas->lockRow(bitmap);
+ SkAutoTUnref<GrTexture> texture;
+ if (-1 == row) {
+ atlas = nullptr;
+ texture.reset(GrRefCachedBitmapTexture(context, bitmap, GrTextureParams::ClampNoFilter(),
+ SkSourceGammaTreatment::kRespect));
+ } else {
+ texture.reset(SkRef(atlas->getTexture()));
+ }
+
+ return sk_sp<GrFragmentProcessor>(new ColorTableEffect(texture, atlas, row, flags));
+}
+
+ColorTableEffect::ColorTableEffect(GrTexture* texture, GrTextureStripAtlas* atlas, int row,
+ unsigned flags)
+ : fTextureAccess(texture)
+ , fFlags(flags)
+ , fAtlas(atlas)
+ , fRow(row) {
+ this->initClassID<ColorTableEffect>();
+ this->addTextureAccess(&fTextureAccess);
+}
+
+ColorTableEffect::~ColorTableEffect() {
+ if (fAtlas) {
+ fAtlas->unlockRow(fRow);
+ }
+}
+
+void ColorTableEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GLColorTableEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* ColorTableEffect::onCreateGLSLInstance() const {
+ return new GLColorTableEffect;
+}
+
+bool ColorTableEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ // For non-atlased instances, the texture (compared by base class) is sufficient to
+ // differentiate different tables. For atlased instances we ensure they are using the
+ // same row.
+ const ColorTableEffect& that = other.cast<ColorTableEffect>();
+ SkASSERT(SkToBool(fAtlas) == SkToBool(that.fAtlas));
+ // Ok to always do this comparison since both would be -1 if non-atlased.
+ return fRow == that.fRow;
+}
+
+void ColorTableEffect::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ // If we kept the table in the effect then we could actually run known inputs through the
+ // table.
+ GrColorComponentFlags invalidateFlags = kNone_GrColorComponentFlags;
+ if (fFlags & SkTable_ColorFilter::kR_Flag) {
+ invalidateFlags |= kR_GrColorComponentFlag;
+ }
+ if (fFlags & SkTable_ColorFilter::kG_Flag) {
+ invalidateFlags |= kG_GrColorComponentFlag;
+ }
+ if (fFlags & SkTable_ColorFilter::kB_Flag) {
+ invalidateFlags |= kB_GrColorComponentFlag;
+ }
+ if (fFlags & SkTable_ColorFilter::kA_Flag) {
+ invalidateFlags |= kA_GrColorComponentFlag;
+ }
+ inout->invalidateComponents(invalidateFlags, GrInvariantOutput::kWill_ReadInput);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(ColorTableEffect);
+
+sk_sp<GrFragmentProcessor> ColorTableEffect::TestCreate(GrProcessorTestData* d) {
+ int flags = 0;
+ uint8_t luts[256][4];
+ do {
+ for (int i = 0; i < 4; ++i) {
+ flags |= d->fRandom->nextBool() ? (1 << i): 0;
+ }
+ } while (!flags);
+ for (int i = 0; i < 4; ++i) {
+ if (flags & (1 << i)) {
+ for (int j = 0; j < 256; ++j) {
+ luts[j][i] = SkToU8(d->fRandom->nextBits(8));
+ }
+ }
+ }
+ auto filter(SkTableColorFilter::MakeARGB(
+ (flags & (1 << 0)) ? luts[0] : nullptr,
+ (flags & (1 << 1)) ? luts[1] : nullptr,
+ (flags & (1 << 2)) ? luts[2] : nullptr,
+ (flags & (1 << 3)) ? luts[3] : nullptr
+ ));
+
+ sk_sp<GrFragmentProcessor> fp = filter->asFragmentProcessor(d->fContext);
+ SkASSERT(fp);
+ return fp;
+}
+
+sk_sp<GrFragmentProcessor> SkTable_ColorFilter::asFragmentProcessor(GrContext* context) const {
+ SkBitmap bitmap;
+ this->asComponentTable(&bitmap);
+
+ return ColorTableEffect::Make(context, bitmap, fFlags);
+}
+
+#endif // SK_SUPPORT_GPU
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_CPU_BENDIAN
+#else
+ #define SK_A32_INDEX (3 - (SK_A32_SHIFT >> 3))
+ #define SK_R32_INDEX (3 - (SK_R32_SHIFT >> 3))
+ #define SK_G32_INDEX (3 - (SK_G32_SHIFT >> 3))
+ #define SK_B32_INDEX (3 - (SK_B32_SHIFT >> 3))
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkColorFilter> SkTableColorFilter::Make(const uint8_t table[256]) {
+ return sk_make_sp<SkTable_ColorFilter>(table, table, table, table);
+}
+
+sk_sp<SkColorFilter> SkTableColorFilter::MakeARGB(const uint8_t tableA[256],
+ const uint8_t tableR[256],
+ const uint8_t tableG[256],
+ const uint8_t tableB[256]) {
+ return sk_make_sp<SkTable_ColorFilter>(tableA, tableR, tableG, tableB);
+}
+
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkTableColorFilter)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkTable_ColorFilter)
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END
diff --git a/gfx/skia/skia/src/effects/SkTableMaskFilter.cpp b/gfx/skia/skia/src/effects/SkTableMaskFilter.cpp
new file mode 100644
index 000000000..a3b4038a2
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkTableMaskFilter.cpp
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkFixed.h"
+#include "SkReadBuffer.h"
+#include "SkString.h"
+#include "SkTableMaskFilter.h"
+#include "SkWriteBuffer.h"
+
+SkTableMaskFilter::SkTableMaskFilter() {
+ for (int i = 0; i < 256; i++) {
+ fTable[i] = i;
+ }
+}
+
+SkTableMaskFilter::SkTableMaskFilter(const uint8_t table[256]) {
+ memcpy(fTable, table, sizeof(fTable));
+}
+
+SkTableMaskFilter::~SkTableMaskFilter() {}
+
+bool SkTableMaskFilter::filterMask(SkMask* dst, const SkMask& src,
+ const SkMatrix&, SkIPoint* margin) const {
+ if (src.fFormat != SkMask::kA8_Format) {
+ return false;
+ }
+
+ dst->fBounds = src.fBounds;
+ dst->fRowBytes = SkAlign4(dst->fBounds.width());
+ dst->fFormat = SkMask::kA8_Format;
+ dst->fImage = nullptr;
+
+ if (src.fImage) {
+ dst->fImage = SkMask::AllocImage(dst->computeImageSize());
+
+ const uint8_t* srcP = src.fImage;
+ uint8_t* dstP = dst->fImage;
+ const uint8_t* table = fTable;
+ int dstWidth = dst->fBounds.width();
+ int extraZeros = dst->fRowBytes - dstWidth;
+
+ for (int y = dst->fBounds.height() - 1; y >= 0; --y) {
+ for (int x = dstWidth - 1; x >= 0; --x) {
+ dstP[x] = table[srcP[x]];
+ }
+ srcP += src.fRowBytes;
+ // we can't just inc dstP by rowbytes, because if it has any
+ // padding between its width and its rowbytes, we need to zero those
+ // so that the bitters can read those safely if that is faster for
+ // them
+ dstP += dstWidth;
+ for (int i = extraZeros - 1; i >= 0; --i) {
+ *dstP++ = 0;
+ }
+ }
+ }
+
+ if (margin) {
+ margin->set(0, 0);
+ }
+ return true;
+}
+
+SkMask::Format SkTableMaskFilter::getFormat() const {
+ return SkMask::kA8_Format;
+}
+
+void SkTableMaskFilter::flatten(SkWriteBuffer& wb) const {
+ wb.writeByteArray(fTable, 256);
+}
+
+sk_sp<SkFlattenable> SkTableMaskFilter::CreateProc(SkReadBuffer& buffer) {
+ uint8_t table[256];
+ if (!buffer.readByteArray(table, 256)) {
+ return nullptr;
+ }
+ return sk_sp<SkFlattenable>(Create(table));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkTableMaskFilter::MakeGammaTable(uint8_t table[256], SkScalar gamma) {
+ const float dx = 1 / 255.0f;
+ const float g = SkScalarToFloat(gamma);
+
+ float x = 0;
+ for (int i = 0; i < 256; i++) {
+ // float ee = powf(x, g) * 255;
+ table[i] = SkTPin(sk_float_round2int(powf(x, g) * 255), 0, 255);
+ x += dx;
+ }
+}
+
+void SkTableMaskFilter::MakeClipTable(uint8_t table[256], uint8_t min,
+ uint8_t max) {
+ if (0 == max) {
+ max = 1;
+ }
+ if (min >= max) {
+ min = max - 1;
+ }
+ SkASSERT(min < max);
+
+ SkFixed scale = (1 << 16) * 255 / (max - min);
+ memset(table, 0, min + 1);
+ for (int i = min + 1; i < max; i++) {
+ int value = SkFixedRoundToInt(scale * (i - min));
+ SkASSERT(value <= 255);
+ table[i] = value;
+ }
+ memset(table + max, 255, 256 - max);
+
+#if 0
+ int j;
+ for (j = 0; j < 256; j++) {
+ if (table[j]) {
+ break;
+ }
+ }
+ SkDebugf("%d %d start [%d]", min, max, j);
+ for (; j < 256; j++) {
+ SkDebugf(" %d", table[j]);
+ }
+ SkDebugf("\n\n");
+#endif
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkTableMaskFilter::toString(SkString* str) const {
+ str->append("SkTableMaskFilter: (");
+
+ str->append("table: ");
+ for (int i = 0; i < 255; ++i) {
+ str->appendf("%d, ", fTable[i]);
+ }
+ str->appendf("%d", fTable[255]);
+
+ str->append(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkTileImageFilter.cpp b/gfx/skia/skia/src/effects/SkTileImageFilter.cpp
new file mode 100644
index 000000000..a140db216
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkTileImageFilter.cpp
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTileImageFilter.h"
+
+#include "SkCanvas.h"
+#include "SkImage.h"
+#include "SkMatrix.h"
+#include "SkOffsetImageFilter.h"
+#include "SkPaint.h"
+#include "SkReadBuffer.h"
+#include "SkShader.h"
+#include "SkSpecialImage.h"
+#include "SkSpecialSurface.h"
+#include "SkSurface.h"
+#include "SkValidationUtils.h"
+#include "SkWriteBuffer.h"
+
+sk_sp<SkImageFilter> SkTileImageFilter::Make(const SkRect& srcRect, const SkRect& dstRect,
+ sk_sp<SkImageFilter> input) {
+ if (!SkIsValidRect(srcRect) || !SkIsValidRect(dstRect)) {
+ return nullptr;
+ }
+ if (srcRect.width() == dstRect.width() && srcRect.height() == dstRect.height()) {
+ SkRect ir = dstRect;
+ if (!ir.intersect(srcRect)) {
+ return input;
+ }
+ CropRect cropRect(ir);
+ return SkOffsetImageFilter::Make(dstRect.x() - srcRect.x(),
+ dstRect.y() - srcRect.y(),
+ std::move(input),
+ &cropRect);
+ }
+ return sk_sp<SkImageFilter>(new SkTileImageFilter(srcRect, dstRect, std::move(input)));
+}
+
+sk_sp<SkSpecialImage> SkTileImageFilter::onFilterImage(SkSpecialImage* source,
+ const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, source, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ SkRect dstRect;
+ ctx.ctm().mapRect(&dstRect, fDstRect);
+ if (!dstRect.intersect(SkRect::Make(ctx.clipBounds()))) {
+ return nullptr;
+ }
+
+ const SkIRect dstIRect = dstRect.roundOut();
+ if (!fSrcRect.width() || !fSrcRect.height() || !dstIRect.width() || !dstIRect.height()) {
+ return nullptr;
+ }
+
+ SkRect srcRect;
+ ctx.ctm().mapRect(&srcRect, fSrcRect);
+ SkIRect srcIRect;
+ srcRect.roundOut(&srcIRect);
+ srcIRect.offset(-inputOffset);
+ const SkIRect inputBounds = SkIRect::MakeWH(input->width(), input->height());
+
+ if (!SkIRect::Intersects(srcIRect, inputBounds)) {
+ return nullptr;
+ }
+
+ // We create an SkImage here b.c. it needs to be a tight fit for the tiling
+ sk_sp<SkImage> subset;
+ if (inputBounds.contains(srcIRect)) {
+ subset = input->makeTightSubset(srcIRect);
+ if (!subset) {
+ return nullptr;
+ }
+ } else {
+ sk_sp<SkSurface> surf(input->makeTightSurface(ctx.outputProperties(), srcIRect.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+
+ input->draw(canvas,
+ SkIntToScalar(inputOffset.x()), SkIntToScalar(inputOffset.y()),
+ &paint);
+
+ subset = surf->makeImageSnapshot();
+ }
+ SkASSERT(subset->width() == srcIRect.width());
+ SkASSERT(subset->height() == srcIRect.height());
+
+ sk_sp<SkSpecialSurface> surf(source->makeSurface(ctx.outputProperties(), dstIRect.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+ paint.setShader(subset->makeShader(SkShader::kRepeat_TileMode, SkShader::kRepeat_TileMode));
+ canvas->translate(-dstRect.fLeft, -dstRect.fTop);
+ canvas->drawRect(dstRect, paint);
+ offset->fX = dstIRect.fLeft;
+ offset->fY = dstIRect.fTop;
+ return surf->makeImageSnapshot();
+}
+
+SkIRect SkTileImageFilter::onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection direction) const {
+ SkRect rect = kReverse_MapDirection == direction ? fSrcRect : fDstRect;
+ ctm.mapRect(&rect);
+ return rect.roundOut();
+}
+
+SkIRect SkTileImageFilter::onFilterBounds(const SkIRect& src, const SkMatrix&, MapDirection) const {
+ // Don't recurse into inputs.
+ return src;
+}
+
+SkRect SkTileImageFilter::computeFastBounds(const SkRect& src) const {
+ return fDstRect;
+}
+
+sk_sp<SkFlattenable> SkTileImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkRect src, dst;
+ buffer.readRect(&src);
+ buffer.readRect(&dst);
+ return Make(src, dst, common.getInput(0));
+}
+
+void SkTileImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeRect(fSrcRect);
+ buffer.writeRect(fDstRect);
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkTileImageFilter::toString(SkString* str) const {
+ str->appendf("SkTileImageFilter: (");
+ str->appendf("src: %.2f %.2f %.2f %.2f",
+ fSrcRect.fLeft, fSrcRect.fTop, fSrcRect.fRight, fSrcRect.fBottom);
+ str->appendf(" dst: %.2f %.2f %.2f %.2f",
+ fDstRect.fLeft, fDstRect.fTop, fDstRect.fRight, fDstRect.fBottom);
+ if (this->getInput(0)) {
+ str->appendf("input: (");
+ this->getInput(0)->toString(str);
+ str->appendf(")");
+ }
+ str->append(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkXfermodeImageFilter.cpp b/gfx/skia/skia/src/effects/SkXfermodeImageFilter.cpp
new file mode 100644
index 000000000..b0735168b
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkXfermodeImageFilter.cpp
@@ -0,0 +1,544 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkXfermodeImageFilter.h"
+#include "SkArithmeticModePriv.h"
+
+#include "SkCanvas.h"
+#include "SkColorPriv.h"
+#include "SkReadBuffer.h"
+#include "SkSpecialImage.h"
+#include "SkSpecialSurface.h"
+#include "SkWriteBuffer.h"
+#include "SkXfermode.h"
+#if SK_SUPPORT_GPU
+#include "GrContext.h"
+#include "GrDrawContext.h"
+#include "effects/GrConstColorProcessor.h"
+#include "effects/GrTextureDomain.h"
+#include "effects/GrSimpleTextureEffect.h"
+#include "SkArithmeticMode_gpu.h"
+#include "SkGr.h"
+#include "SkGrPriv.h"
+#endif
+
+class SkXfermodeImageFilter_Base : public SkImageFilter {
+public:
+ SkXfermodeImageFilter_Base(SkBlendMode mode, sk_sp<SkImageFilter> inputs[2],
+ const CropRect* cropRect);
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkXfermodeImageFilter_Base)
+
+protected:
+ sk_sp<SkSpecialImage> onFilterImage(SkSpecialImage* source, const Context&,
+ SkIPoint* offset) const override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<SkSpecialImage> filterImageGPU(SkSpecialImage* source,
+ sk_sp<SkSpecialImage> background,
+ const SkIPoint& backgroundOffset,
+ sk_sp<SkSpecialImage> foreground,
+ const SkIPoint& foregroundOffset,
+ const SkIRect& bounds,
+ const OutputProperties& outputProperties) const;
+#endif
+
+ void flatten(SkWriteBuffer&) const override;
+
+ virtual void drawForeground(SkCanvas* canvas, SkSpecialImage*, const SkIRect&) const;
+#if SK_SUPPORT_GPU
+ virtual sk_sp<GrFragmentProcessor> makeFGFrag(sk_sp<GrFragmentProcessor> bgFP) const;
+#endif
+
+private:
+ SkBlendMode fMode;
+
+ friend class SkXfermodeImageFilter;
+
+ typedef SkImageFilter INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImageFilter> SkXfermodeImageFilter::Make(SkBlendMode mode,
+ sk_sp<SkImageFilter> background,
+ sk_sp<SkImageFilter> foreground,
+ const SkImageFilter::CropRect* cropRect) {
+ sk_sp<SkImageFilter> inputs[2] = { std::move(background), std::move(foreground) };
+ return sk_sp<SkImageFilter>(new SkXfermodeImageFilter_Base(mode, inputs, cropRect));
+}
+
+#ifdef SK_SUPPORT_LEGACY_XFERMODE_OBJECT
+sk_sp<SkImageFilter> SkXfermodeImageFilter::Make(sk_sp<SkXfermode> mode,
+ sk_sp<SkImageFilter> background,
+ sk_sp<SkImageFilter> foreground,
+ const SkImageFilter::CropRect* cropRect) {
+ return Make(mode ? mode->blend() : SkBlendMode::kSrcOver,
+ std::move(background), std::move(foreground), cropRect);
+}
+#endif
+
+SkXfermodeImageFilter_Base::SkXfermodeImageFilter_Base(SkBlendMode mode,
+ sk_sp<SkImageFilter> inputs[2],
+ const CropRect* cropRect)
+ : INHERITED(inputs, 2, cropRect)
+ , fMode(mode)
+{}
+
+static int unflatten_blendmode(SkReadBuffer& buffer, SkArithmeticParams* arith) {
+ if (buffer.isVersionLT(SkReadBuffer::kXfermodeToBlendMode_Version)) {
+ sk_sp<SkXfermode> xfer = buffer.readXfermode();
+ if (xfer) {
+ if (xfer->isArithmetic(arith)) {
+ return -1;
+ }
+ return (int)xfer->blend();
+ } else {
+ return (int)SkBlendMode::kSrcOver;
+ }
+ } else {
+ uint32_t mode = buffer.read32();
+ (void)buffer.validate(mode <= (unsigned)SkBlendMode::kLastMode);
+ return mode;
+ }
+}
+
+sk_sp<SkFlattenable> SkXfermodeImageFilter_Base::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 2);
+ SkArithmeticParams arith;
+ int mode = unflatten_blendmode(buffer, &arith);
+ if (mode >= 0) {
+ return SkXfermodeImageFilter::Make((SkBlendMode)mode, common.getInput(0),
+ common.getInput(1), &common.cropRect());
+ } else {
+ return SkXfermodeImageFilter::MakeArithmetic(arith.fK[0], arith.fK[1], arith.fK[2],
+ arith.fK[3], arith.fEnforcePMColor,
+ common.getInput(0),
+ common.getInput(1), &common.cropRect());
+ }
+}
+
+void SkXfermodeImageFilter_Base::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.write32((unsigned)fMode);
+}
+
+sk_sp<SkSpecialImage> SkXfermodeImageFilter_Base::onFilterImage(SkSpecialImage* source,
+ const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint backgroundOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> background(this->filterInput(0, source, ctx, &backgroundOffset));
+
+ SkIPoint foregroundOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> foreground(this->filterInput(1, source, ctx, &foregroundOffset));
+
+ SkIRect foregroundBounds = SkIRect::EmptyIRect();
+ if (foreground) {
+ foregroundBounds = SkIRect::MakeXYWH(foregroundOffset.x(), foregroundOffset.y(),
+ foreground->width(), foreground->height());
+ }
+
+ SkIRect srcBounds = SkIRect::EmptyIRect();
+ if (background) {
+ srcBounds = SkIRect::MakeXYWH(backgroundOffset.x(), backgroundOffset.y(),
+ background->width(), background->height());
+ }
+
+ srcBounds.join(foregroundBounds);
+ if (srcBounds.isEmpty()) {
+ return nullptr;
+ }
+
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, srcBounds, &bounds)) {
+ return nullptr;
+ }
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+
+#if SK_SUPPORT_GPU
+ if (source->isTextureBacked()) {
+ return this->filterImageGPU(source,
+ background, backgroundOffset,
+ foreground, foregroundOffset,
+ bounds, ctx.outputProperties());
+ }
+#endif
+
+ sk_sp<SkSpecialSurface> surf(source->makeSurface(ctx.outputProperties(), bounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ canvas->clear(0x0); // can't count on background to fully clear the background
+ canvas->translate(SkIntToScalar(-bounds.left()), SkIntToScalar(-bounds.top()));
+
+ if (background) {
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+ background->draw(canvas,
+ SkIntToScalar(backgroundOffset.fX), SkIntToScalar(backgroundOffset.fY),
+ &paint);
+ }
+
+ this->drawForeground(canvas, foreground.get(), foregroundBounds);
+
+ return surf->makeImageSnapshot();
+}
+
+void SkXfermodeImageFilter_Base::drawForeground(SkCanvas* canvas, SkSpecialImage* img,
+ const SkIRect& fgBounds) const {
+ SkPaint paint;
+ paint.setBlendMode(fMode);
+ if (img) {
+ img->draw(canvas, SkIntToScalar(fgBounds.fLeft), SkIntToScalar(fgBounds.fTop), &paint);
+ }
+
+ SkAutoCanvasRestore acr(canvas, true);
+ canvas->clipRect(SkRect::Make(fgBounds), SkCanvas::kDifference_Op);
+ paint.setColor(0);
+ canvas->drawPaint(paint);
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkXfermodeImageFilter_Base::toString(SkString* str) const {
+ str->appendf("SkXfermodeImageFilter: (");
+ str->appendf("blendmode: (%d)", (int)fMode);
+ if (this->getInput(0)) {
+ str->appendf("foreground: (");
+ this->getInput(0)->toString(str);
+ str->appendf(")");
+ }
+ if (this->getInput(1)) {
+ str->appendf("background: (");
+ this->getInput(1)->toString(str);
+ str->appendf(")");
+ }
+ str->append(")");
+}
+#endif
+
+#if SK_SUPPORT_GPU
+
+#include "SkXfermode_proccoeff.h"
+
+sk_sp<SkSpecialImage> SkXfermodeImageFilter_Base::filterImageGPU(
+ SkSpecialImage* source,
+ sk_sp<SkSpecialImage> background,
+ const SkIPoint& backgroundOffset,
+ sk_sp<SkSpecialImage> foreground,
+ const SkIPoint& foregroundOffset,
+ const SkIRect& bounds,
+ const OutputProperties& outputProperties) const {
+ SkASSERT(source->isTextureBacked());
+
+ GrContext* context = source->getContext();
+
+ sk_sp<GrTexture> backgroundTex, foregroundTex;
+
+ if (background) {
+ backgroundTex = background->asTextureRef(context);
+ }
+
+ if (foreground) {
+ foregroundTex = foreground->asTextureRef(context);
+ }
+
+ GrPaint paint;
+ sk_sp<GrFragmentProcessor> bgFP;
+
+ if (backgroundTex) {
+ SkMatrix backgroundMatrix;
+ backgroundMatrix.setIDiv(backgroundTex->width(), backgroundTex->height());
+ backgroundMatrix.preTranslate(-SkIntToScalar(backgroundOffset.fX),
+ -SkIntToScalar(backgroundOffset.fY));
+ bgFP = GrTextureDomainEffect::Make(
+ backgroundTex.get(), nullptr, backgroundMatrix,
+ GrTextureDomain::MakeTexelDomain(backgroundTex.get(),
+ background->subset()),
+ GrTextureDomain::kDecal_Mode,
+ GrTextureParams::kNone_FilterMode);
+ } else {
+ bgFP = GrConstColorProcessor::Make(GrColor_TRANSPARENT_BLACK,
+ GrConstColorProcessor::kIgnore_InputMode);
+ }
+
+ if (foregroundTex) {
+ SkMatrix foregroundMatrix;
+ foregroundMatrix.setIDiv(foregroundTex->width(), foregroundTex->height());
+ foregroundMatrix.preTranslate(-SkIntToScalar(foregroundOffset.fX),
+ -SkIntToScalar(foregroundOffset.fY));
+
+ sk_sp<GrFragmentProcessor> foregroundFP;
+
+ foregroundFP = GrTextureDomainEffect::Make(
+ foregroundTex.get(), nullptr, foregroundMatrix,
+ GrTextureDomain::MakeTexelDomain(foregroundTex.get(),
+ foreground->subset()),
+ GrTextureDomain::kDecal_Mode,
+ GrTextureParams::kNone_FilterMode);
+
+ paint.addColorFragmentProcessor(std::move(foregroundFP));
+
+ sk_sp<GrFragmentProcessor> xferFP = this->makeFGFrag(bgFP);
+
+ // A null 'xferFP' here means kSrc_Mode was used in which case we can just proceed
+ if (xferFP) {
+ paint.addColorFragmentProcessor(std::move(xferFP));
+ }
+ } else {
+ paint.addColorFragmentProcessor(std::move(bgFP));
+ }
+
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ sk_sp<GrDrawContext> drawContext(
+ context->makeDrawContext(SkBackingFit::kApprox, bounds.width(), bounds.height(),
+ GrRenderableConfigForColorSpace(outputProperties.colorSpace()),
+ sk_ref_sp(outputProperties.colorSpace())));
+ if (!drawContext) {
+ return nullptr;
+ }
+ paint.setGammaCorrect(drawContext->isGammaCorrect());
+
+ SkMatrix matrix;
+ matrix.setTranslate(SkIntToScalar(-bounds.left()), SkIntToScalar(-bounds.top()));
+ drawContext->drawRect(GrNoClip(), paint, matrix, SkRect::Make(bounds));
+
+ return SkSpecialImage::MakeFromGpu(SkIRect::MakeWH(bounds.width(), bounds.height()),
+ kNeedNewImageUniqueID_SpecialImage,
+ drawContext->asTexture(),
+ sk_ref_sp(drawContext->getColorSpace()));
+}
+
+sk_sp<GrFragmentProcessor>
+SkXfermodeImageFilter_Base::makeFGFrag(sk_sp<GrFragmentProcessor> bgFP) const {
+ // A null fMode is interpreted to mean kSrcOver_Mode (to match raster).
+ SkXfermode* xfer = SkXfermode::Peek(fMode);
+ sk_sp<SkXfermode> srcover;
+ if (!xfer) {
+ // It would be awesome to use SkXfermode::Create here but it knows better
+ // than us and won't return a kSrcOver_Mode SkXfermode. That means we
+ // have to get one the hard way.
+ struct ProcCoeff rec;
+ rec.fProc = SkXfermode::GetProc(SkXfermode::kSrcOver_Mode);
+ SkXfermode::ModeAsCoeff(SkXfermode::kSrcOver_Mode, &rec.fSC, &rec.fDC);
+
+ srcover.reset(new SkProcCoeffXfermode(rec, SkXfermode::kSrcOver_Mode));
+ xfer = srcover.get();
+
+ }
+ return xfer->makeFragmentProcessorForImageFilter(std::move(bgFP));
+}
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkArithmeticImageFilter : public SkXfermodeImageFilter_Base {
+public:
+ SkArithmeticImageFilter(float k1, float k2, float k3, float k4, bool enforcePMColor,
+ sk_sp<SkImageFilter> inputs[2], const CropRect* cropRect)
+ // need to pass a blendmode to our inherited constructor, but we ignore it
+ : SkXfermodeImageFilter_Base(SkBlendMode::kSrcOver, inputs, cropRect)
+ , fK{ k1, k2, k3, k4 }
+ , fEnforcePMColor(enforcePMColor)
+ {}
+
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkArithmeticImageFilter)
+
+protected:
+ void flatten(SkWriteBuffer& buffer) const override {
+ this->INHERITED::flatten(buffer);
+ for (int i = 0; i < 4; ++i) {
+ buffer.writeScalar(fK[i]);
+ }
+ buffer.writeBool(fEnforcePMColor);
+ }
+ void drawForeground(SkCanvas* canvas, SkSpecialImage*, const SkIRect&) const override;
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> makeFGFrag(sk_sp<GrFragmentProcessor> bgFP) const override {
+ return GrArithmeticFP::Make(fK[0], fK[1], fK[2], fK[3], fEnforcePMColor, std::move(bgFP));
+ }
+#endif
+
+private:
+ const float fK[4];
+ const bool fEnforcePMColor;
+
+ friend class SkXfermodeImageFilter;
+
+ typedef SkXfermodeImageFilter_Base INHERITED;
+};
+
+sk_sp<SkFlattenable> SkArithmeticImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 2);
+
+ // skip the mode (srcover) our parent-class wrote
+ SkDEBUGCODE(int mode =) unflatten_blendmode(buffer, nullptr);
+ if (!buffer.isValid()) {
+ return nullptr;
+ }
+ SkASSERT(SkBlendMode::kSrcOver == (SkBlendMode)mode);
+
+ float k[4];
+ for (int i = 0; i < 4; ++i) {
+ k[i] = buffer.readScalar();
+ }
+ const bool enforcePMColor = buffer.readBool();
+ return SkXfermodeImageFilter::MakeArithmetic(k[0], k[1], k[2], k[3], enforcePMColor,
+ common.getInput(0), common.getInput(1),
+ &common.cropRect());
+}
+
+#include "SkNx.h"
+
+static Sk4f pin(float min, const Sk4f& val, float max) {
+ return Sk4f::Max(min, Sk4f::Min(val, max));
+}
+
+template<bool EnforcePMColor> void arith_span(const float k[], SkPMColor dst[],
+ const SkPMColor src[], int count) {
+ const Sk4f k1 = k[0] * (1/255.0f),
+ k2 = k[1],
+ k3 = k[2],
+ k4 = k[3] * 255.0f + 0.5f;
+
+ for (int i = 0; i < count; i++) {
+ Sk4f s = SkNx_cast<float>(Sk4b::Load(src+i)),
+ d = SkNx_cast<float>(Sk4b::Load(dst+i)),
+ r = pin(0, k1*s*d + k2*s + k3*d + k4, 255);
+ if (EnforcePMColor) {
+ Sk4f a = SkNx_shuffle<3,3,3,3>(r);
+ r = Sk4f::Min(a, r);
+ }
+ SkNx_cast<uint8_t>(r).store(dst+i);
+ }
+}
+
+// apply mode to src==transparent (0)
+template<bool EnforcePMColor> void arith_transparent(const float k[], SkPMColor dst[], int count) {
+ const Sk4f k3 = k[2],
+ k4 = k[3] * 255.0f + 0.5f;
+
+ for (int i = 0; i < count; i++) {
+ Sk4f d = SkNx_cast<float>(Sk4b::Load(dst+i)),
+ r = pin(0, k3*d + k4, 255);
+ if (EnforcePMColor) {
+ Sk4f a = SkNx_shuffle<3,3,3,3>(r);
+ r = Sk4f::Min(a, r);
+ }
+ SkNx_cast<uint8_t>(r).store(dst+i);
+ }
+}
+
+static bool intersect(SkPixmap* dst, SkPixmap* src, int srcDx, int srcDy) {
+ SkIRect dstR = SkIRect::MakeWH(dst->width(), dst->height());
+ SkIRect srcR = SkIRect::MakeXYWH(srcDx, srcDy, src->width(), src->height());
+ SkIRect sect;
+ if (!sect.intersect(dstR, srcR)) {
+ return false;
+ }
+ *dst = SkPixmap(dst->info().makeWH(sect.width(), sect.height()),
+ dst->addr(sect.fLeft, sect.fTop),
+ dst->rowBytes());
+ *src = SkPixmap(src->info().makeWH(sect.width(), sect.height()),
+ src->addr(SkTMax(0, -srcDx), SkTMax(0, -srcDy)),
+ src->rowBytes());
+ return true;
+}
+
+void SkArithmeticImageFilter::drawForeground(SkCanvas* canvas, SkSpecialImage* img,
+ const SkIRect& fgBounds) const {
+ SkPixmap dst;
+ if (!canvas->peekPixels(&dst)) {
+ return;
+ }
+
+ const SkMatrix& ctm = canvas->getTotalMatrix();
+ SkASSERT(ctm.getType() <= SkMatrix::kTranslate_Mask);
+ const int dx = SkScalarRoundToInt(ctm.getTranslateX());
+ const int dy = SkScalarRoundToInt(ctm.getTranslateY());
+
+ if (img) {
+ SkBitmap srcBM;
+ SkPixmap src;
+ if (!img->getROPixels(&srcBM)) {
+ return;
+ }
+ srcBM.lockPixels();
+ if (!srcBM.peekPixels(&src)) {
+ return;
+ }
+
+ auto proc = fEnforcePMColor ? arith_span<true> : arith_span<false>;
+ SkPixmap tmpDst = dst;
+ if (intersect(&tmpDst, &src, fgBounds.fLeft + dx, fgBounds.fTop + dy)) {
+ for (int y = 0; y < tmpDst.height(); ++y) {
+ proc(fK, tmpDst.writable_addr32(0, y), src.addr32(0, y), tmpDst.width());
+ }
+ }
+ }
+
+ // Now apply the mode with transparent-color to the outside of the fg image
+ SkRegion outside(SkIRect::MakeWH(dst.width(), dst.height()));
+ outside.op(fgBounds.makeOffset(dx, dy), SkRegion::kDifference_Op);
+ auto proc = fEnforcePMColor ? arith_transparent<true> : arith_transparent<false>;
+ for (SkRegion::Iterator iter(outside); !iter.done(); iter.next()) {
+ const SkIRect r = iter.rect();
+ for (int y = r.fTop; y < r.fBottom; ++y) {
+ proc(fK, dst.writable_addr32(r.fLeft, y), r.width());
+ }
+ }
+}
+
+sk_sp<SkImageFilter> SkXfermodeImageFilter::MakeArithmetic(float k1, float k2, float k3, float k4,
+ bool enforcePMColor,
+ sk_sp<SkImageFilter> background,
+ sk_sp<SkImageFilter> foreground,
+ const SkImageFilter::CropRect* crop) {
+ if (!SkScalarIsFinite(k1) || !SkScalarIsFinite(k2) ||
+ !SkScalarIsFinite(k3) || !SkScalarIsFinite(k4)) {
+ return nullptr;
+ }
+
+ // are we nearly some other "std" mode?
+ int mode = -1; // illegal mode
+ if (SkScalarNearlyZero(k1) && SkScalarNearlyEqual(k2, SK_Scalar1) &&
+ SkScalarNearlyZero(k3) && SkScalarNearlyZero(k4)) {
+ mode = (int)SkBlendMode::kSrc;
+ } else if (SkScalarNearlyZero(k1) && SkScalarNearlyZero(k2) &&
+ SkScalarNearlyEqual(k3, SK_Scalar1) && SkScalarNearlyZero(k4)) {
+ mode = (int)SkBlendMode::kDst;
+ } else if (SkScalarNearlyZero(k1) && SkScalarNearlyZero(k2) &&
+ SkScalarNearlyZero(k3) && SkScalarNearlyZero(k4)) {
+ mode = (int)SkBlendMode::kClear;
+ }
+ if (mode >= 0) {
+ return SkXfermodeImageFilter::Make((SkBlendMode)mode,
+ std::move(background), std::move(foreground), crop);
+ }
+
+ sk_sp<SkImageFilter> inputs[2] = { std::move(background), std::move(foreground) };
+ return sk_sp<SkImageFilter>(new SkArithmeticImageFilter(k1, k2, k3, k4, enforcePMColor,
+ inputs, crop));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkXfermodeImageFilter)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkXfermodeImageFilter_Base)
+ // manually register the legacy serialized name "SkXfermodeImageFilter"
+ SkFlattenable::Register("SkXfermodeImageFilter", SkXfermodeImageFilter_Base::CreateProc,
+ SkFlattenable::kSkImageFilter_Type);
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkArithmeticImageFilter)
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END
diff --git a/gfx/skia/skia/src/effects/gradients/Sk4fGradientBase.cpp b/gfx/skia/skia/src/effects/gradients/Sk4fGradientBase.cpp
new file mode 100644
index 000000000..fa9364a60
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/Sk4fGradientBase.cpp
@@ -0,0 +1,444 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "Sk4fGradientBase.h"
+
+#include <functional>
+
+namespace {
+
+Sk4f pack_color(SkColor c, bool premul, const Sk4f& component_scale) {
+ const SkColor4f c4f = SkColor4f::FromColor(c);
+ const Sk4f pm4f = premul
+ ? c4f.premul().to4f()
+ : Sk4f{c4f.fR, c4f.fG, c4f.fB, c4f.fA};
+
+ return pm4f * component_scale;
+}
+
+template<SkShader::TileMode>
+SkScalar tileProc(SkScalar t);
+
+template<>
+SkScalar tileProc<SkShader::kClamp_TileMode>(SkScalar t) {
+ // synthetic clamp-mode edge intervals allow for a free-floating t:
+ // [-inf..0)[0..1)[1..+inf)
+ return t;
+}
+
+template<>
+SkScalar tileProc<SkShader::kRepeat_TileMode>(SkScalar t) {
+ // t % 1 (intervals range: [0..1))
+ return t - SkScalarFloorToScalar(t);
+}
+
+template<>
+SkScalar tileProc<SkShader::kMirror_TileMode>(SkScalar t) {
+ // t % 2 (synthetic mirror intervals expand the range to [0..2)
+ return t - SkScalarFloorToScalar(t / 2) * 2;
+}
+
+class IntervalIterator {
+public:
+ IntervalIterator(const SkColor* colors, const SkScalar* pos, int count, bool reverse)
+ : fColors(colors)
+ , fPos(pos)
+ , fCount(count)
+ , fFirstPos(reverse ? SK_Scalar1 : 0)
+ , fBegin(reverse ? count - 1 : 0)
+ , fAdvance(reverse ? -1 : 1) {
+ SkASSERT(colors);
+ SkASSERT(count > 0);
+ }
+
+ void iterate(std::function<void(SkColor, SkColor, SkScalar, SkScalar)> func) const {
+ if (!fPos) {
+ this->iterateImplicitPos(func);
+ return;
+ }
+
+ const int end = fBegin + fAdvance * (fCount - 1);
+ const SkScalar lastPos = 1 - fFirstPos;
+ int prev = fBegin;
+ SkScalar prevPos = fFirstPos;
+
+ do {
+ const int curr = prev + fAdvance;
+ SkASSERT(curr >= 0 && curr < fCount);
+
+ // TODO: this sanitization should be done in SkGradientShaderBase
+ const SkScalar currPos = (fAdvance > 0)
+ ? SkTPin(fPos[curr], prevPos, lastPos)
+ : SkTPin(fPos[curr], lastPos, prevPos);
+
+ if (currPos != prevPos) {
+ SkASSERT((currPos - prevPos > 0) == (fAdvance > 0));
+ func(fColors[prev], fColors[curr], prevPos, currPos);
+ }
+
+ prev = curr;
+ prevPos = currPos;
+ } while (prev != end);
+ }
+
+private:
+ void iterateImplicitPos(std::function<void(SkColor, SkColor, SkScalar, SkScalar)> func) const {
+ // When clients don't provide explicit color stop positions (fPos == nullptr),
+ // the color stops are distributed evenly across the unit interval
+ // (implicit positioning).
+ const SkScalar dt = fAdvance * SK_Scalar1 / (fCount - 1);
+ const int end = fBegin + fAdvance * (fCount - 2);
+ int prev = fBegin;
+ SkScalar prevPos = fFirstPos;
+
+ while (prev != end) {
+ const int curr = prev + fAdvance;
+ SkASSERT(curr >= 0 && curr < fCount);
+
+ const SkScalar currPos = prevPos + dt;
+ func(fColors[prev], fColors[curr], prevPos, currPos);
+ prev = curr;
+ prevPos = currPos;
+ }
+
+ // emit the last interval with a pinned end position, to avoid precision issues
+ func(fColors[prev], fColors[prev + fAdvance], prevPos, 1 - fFirstPos);
+ }
+
+ const SkColor* fColors;
+ const SkScalar* fPos;
+ const int fCount;
+ const SkScalar fFirstPos;
+ const int fBegin;
+ const int fAdvance;
+};
+
+} // anonymous namespace
+
+SkGradientShaderBase::GradientShaderBase4fContext::
+Interval::Interval(const Sk4f& c0, SkScalar p0,
+ const Sk4f& c1, SkScalar p1)
+ : fP0(p0)
+ , fP1(p1)
+ , fZeroRamp((c0 == c1).allTrue()) {
+
+ SkASSERT(p0 != p1);
+ // Either p0 or p1 can be (-)inf for synthetic clamp edge intervals.
+ SkASSERT(SkScalarIsFinite(p0) || SkScalarIsFinite(p1));
+
+ const auto dp = p1 - p0;
+
+ // Clamp edge intervals are always zero-ramp.
+ SkASSERT(SkScalarIsFinite(dp) || fZeroRamp);
+ const Sk4f dc = SkScalarIsFinite(dp) ? (c1 - c0) / dp : 0;
+
+ c0.store(&fC0.fVec);
+ dc.store(&fDc.fVec);
+}
+
+SkGradientShaderBase::
+GradientShaderBase4fContext::GradientShaderBase4fContext(const SkGradientShaderBase& shader,
+ const ContextRec& rec)
+ : INHERITED(shader, rec)
+ , fFlags(this->INHERITED::getFlags())
+#ifdef SK_SUPPORT_LEGACY_GRADIENT_DITHERING
+ , fDither(true)
+#else
+ , fDither(rec.fPaint->isDither())
+#endif
+{
+ const SkMatrix& inverse = this->getTotalInverse();
+ fDstToPos.setConcat(shader.fPtsToUnit, inverse);
+ fDstToPosProc = fDstToPos.getMapXYProc();
+ fDstToPosClass = static_cast<uint8_t>(INHERITED::ComputeMatrixClass(fDstToPos));
+
+ if (shader.fColorsAreOpaque && this->getPaintAlpha() == SK_AlphaOPAQUE) {
+ fFlags |= kOpaqueAlpha_Flag;
+ }
+
+ fColorsArePremul =
+ (shader.fGradFlags & SkGradientShader::kInterpolateColorsInPremul_Flag)
+ || shader.fColorsAreOpaque;
+}
+
+bool SkGradientShaderBase::
+GradientShaderBase4fContext::isValid() const {
+ return fDstToPos.isFinite();
+}
+
+void SkGradientShaderBase::
+GradientShaderBase4fContext::buildIntervals(const SkGradientShaderBase& shader,
+ const ContextRec& rec, bool reverse) {
+ // The main job here is to build a specialized interval list: a different
+ // representation of the color stops data, optimized for efficient scan line
+ // access during shading.
+ //
+ // [{P0,C0} , {P1,C1}) [{P1,C2} , {P2,c3}) ... [{Pn,C2n} , {Pn+1,C2n+1})
+ //
+ // The list may be inverted when requested (such that e.g. points are sorted
+ // in increasing x order when dx < 0).
+ //
+ // Note: the current representation duplicates pos data; we could refactor to
+ // avoid this if interval storage size becomes a concern.
+ //
+ // Aside from reordering, we also perform two more pre-processing steps at
+ // this stage:
+ //
+ // 1) scale the color components depending on paint alpha and the requested
+ // interpolation space (note: the interval color storage is SkPM4f, but
+ // that doesn't necessarily mean the colors are premultiplied; that
+ // property is tracked in fColorsArePremul)
+ //
+ // 2) inject synthetic intervals to support tiling.
+ //
+ // * for kRepeat, no extra intervals are needed - the iterator just
+ // wraps around at the end:
+ //
+ // ->[P0,P1)->..[Pn-1,Pn)->
+ //
+ // * for kClamp, we add two "infinite" intervals before/after:
+ //
+ // [-/+inf , P0)->[P0 , P1)->..[Pn-1 , Pn)->[Pn , +/-inf)
+ //
+ // (the iterator should never run off the end in this mode)
+ //
+ // * for kMirror, we extend the range to [0..2] and add a flipped
+ // interval series - then the iterator operates just as in the
+ // kRepeat case:
+ //
+ // ->[P0,P1)->..[Pn-1,Pn)->[2 - Pn,2 - Pn-1)->..[2 - P1,2 - P0)->
+ //
+ // TODO: investigate collapsing intervals << 1px.
+
+ SkASSERT(shader.fColorCount > 0);
+ SkASSERT(shader.fOrigColors);
+
+ const float paintAlpha = rec.fPaint->getAlpha() * (1.0f / 255);
+ const Sk4f componentScale = fColorsArePremul
+ ? Sk4f(paintAlpha)
+ : Sk4f(1.0f, 1.0f, 1.0f, paintAlpha);
+ const int first_index = reverse ? shader.fColorCount - 1 : 0;
+ const int last_index = shader.fColorCount - 1 - first_index;
+ const SkScalar first_pos = reverse ? SK_Scalar1 : 0;
+ const SkScalar last_pos = SK_Scalar1 - first_pos;
+
+ if (shader.fTileMode == SkShader::kClamp_TileMode) {
+ // synthetic edge interval: -/+inf .. P0
+ const Sk4f clamp_color = pack_color(shader.fOrigColors[first_index],
+ fColorsArePremul, componentScale);
+ const SkScalar clamp_pos = reverse ? SK_ScalarInfinity : SK_ScalarNegativeInfinity;
+ fIntervals.emplace_back(clamp_color, clamp_pos,
+ clamp_color, first_pos);
+ } else if (shader.fTileMode == SkShader::kMirror_TileMode && reverse) {
+ // synthetic mirror intervals injected before main intervals: (2 .. 1]
+ addMirrorIntervals(shader, componentScale, false);
+ }
+
+ const IntervalIterator iter(shader.fOrigColors,
+ shader.fOrigPos,
+ shader.fColorCount,
+ reverse);
+ iter.iterate([this, &componentScale] (SkColor c0, SkColor c1, SkScalar p0, SkScalar p1) {
+ SkASSERT(fIntervals.empty() || fIntervals.back().fP1 == p0);
+
+ fIntervals.emplace_back(pack_color(c0, fColorsArePremul, componentScale),
+ p0,
+ pack_color(c1, fColorsArePremul, componentScale),
+ p1);
+ });
+
+ if (shader.fTileMode == SkShader::kClamp_TileMode) {
+ // synthetic edge interval: Pn .. +/-inf
+ const Sk4f clamp_color = pack_color(shader.fOrigColors[last_index],
+ fColorsArePremul, componentScale);
+ const SkScalar clamp_pos = reverse ? SK_ScalarNegativeInfinity : SK_ScalarInfinity;
+ fIntervals.emplace_back(clamp_color, last_pos,
+ clamp_color, clamp_pos);
+ } else if (shader.fTileMode == SkShader::kMirror_TileMode && !reverse) {
+ // synthetic mirror intervals injected after main intervals: [1 .. 2)
+ addMirrorIntervals(shader, componentScale, true);
+ }
+}
+
+void SkGradientShaderBase::
+GradientShaderBase4fContext::addMirrorIntervals(const SkGradientShaderBase& shader,
+ const Sk4f& componentScale, bool reverse) {
+ const IntervalIterator iter(shader.fOrigColors,
+ shader.fOrigPos,
+ shader.fColorCount,
+ reverse);
+ iter.iterate([this, &componentScale] (SkColor c0, SkColor c1, SkScalar p0, SkScalar p1) {
+ SkASSERT(fIntervals.empty() || fIntervals.back().fP1 == 2 - p0);
+
+ fIntervals.emplace_back(pack_color(c0, fColorsArePremul, componentScale),
+ 2 - p0,
+ pack_color(c1, fColorsArePremul, componentScale),
+ 2 - p1);
+ });
+}
+
+void SkGradientShaderBase::
+GradientShaderBase4fContext::shadeSpan(int x, int y, SkPMColor dst[], int count) {
+ if (fColorsArePremul) {
+ this->shadePremulSpan<DstType::L32, ApplyPremul::False>(x, y, dst, count);
+ } else {
+ this->shadePremulSpan<DstType::L32, ApplyPremul::True>(x, y, dst, count);
+ }
+}
+
+void SkGradientShaderBase::
+GradientShaderBase4fContext::shadeSpan4f(int x, int y, SkPM4f dst[], int count) {
+ if (fColorsArePremul) {
+ this->shadePremulSpan<DstType::F32, ApplyPremul::False>(x, y, dst, count);
+ } else {
+ this->shadePremulSpan<DstType::F32, ApplyPremul::True>(x, y, dst, count);
+ }
+}
+
+template<DstType dstType, ApplyPremul premul>
+void SkGradientShaderBase::
+GradientShaderBase4fContext::shadePremulSpan(int x, int y,
+ typename DstTraits<dstType, premul>::Type dst[],
+ int count) const {
+ const SkGradientShaderBase& shader =
+ static_cast<const SkGradientShaderBase&>(fShader);
+
+ switch (shader.fTileMode) {
+ case kClamp_TileMode:
+ this->shadeSpanInternal<dstType,
+ premul,
+ kClamp_TileMode>(x, y, dst, count);
+ break;
+ case kRepeat_TileMode:
+ this->shadeSpanInternal<dstType,
+ premul,
+ kRepeat_TileMode>(x, y, dst, count);
+ break;
+ case kMirror_TileMode:
+ this->shadeSpanInternal<dstType,
+ premul,
+ kMirror_TileMode>(x, y, dst, count);
+ break;
+ }
+}
+
+template<DstType dstType, ApplyPremul premul, SkShader::TileMode tileMode>
+void SkGradientShaderBase::
+GradientShaderBase4fContext::shadeSpanInternal(int x, int y,
+ typename DstTraits<dstType, premul>::Type dst[],
+ int count) const {
+ static const int kBufSize = 128;
+ SkScalar ts[kBufSize];
+ TSampler<dstType, tileMode> sampler(*this);
+
+ SkASSERT(count > 0);
+ do {
+ const int n = SkTMin(kBufSize, count);
+ this->mapTs(x, y, ts, n);
+ for (int i = 0; i < n; ++i) {
+ const Sk4f c = sampler.sample(ts[i]);
+ DstTraits<dstType, premul>::store(c, dst++);
+ }
+ x += n;
+ count -= n;
+ } while (count > 0);
+}
+
+template<DstType dstType, SkShader::TileMode tileMode>
+class SkGradientShaderBase::GradientShaderBase4fContext::TSampler {
+public:
+ TSampler(const GradientShaderBase4fContext& ctx)
+ : fFirstInterval(ctx.fIntervals.begin())
+ , fLastInterval(ctx.fIntervals.end() - 1)
+ , fInterval(nullptr) {
+ SkASSERT(fLastInterval >= fFirstInterval);
+ }
+
+ Sk4f sample(SkScalar t) {
+ const SkScalar tiled_t = tileProc<tileMode>(t);
+
+ if (!fInterval) {
+ // Very first sample => locate the initial interval.
+ // TODO: maybe do this in ctor to remove a branch?
+ fInterval = this->findFirstInterval(tiled_t);
+ this->loadIntervalData(fInterval);
+ } else if (tiled_t < fInterval->fP0 || tiled_t >= fInterval->fP1) {
+ fInterval = this->findNextInterval(t, tiled_t);
+ this->loadIntervalData(fInterval);
+ }
+
+ fPrevT = t;
+ return lerp(tiled_t);
+ }
+
+private:
+ Sk4f lerp(SkScalar t) {
+ SkASSERT(t >= fInterval->fP0 && t < fInterval->fP1);
+ return fCc + fDc * (t - fInterval->fP0);
+ }
+
+ const Interval* findFirstInterval(SkScalar t) const {
+ // Binary search.
+ const Interval* i0 = fFirstInterval;
+ const Interval* i1 = fLastInterval;
+
+ while (i0 != i1) {
+ SkASSERT(i0 < i1);
+ SkASSERT(t >= i0->fP0 && t < i1->fP1);
+
+ const Interval* i = i0 + ((i1 - i0) >> 1);
+
+ if (t >= i->fP1) {
+ i0 = i + 1;
+ } else {
+ i1 = i;
+ }
+ }
+
+ SkASSERT(t >= i0->fP0 && t <= i0->fP1);
+ return i0;
+ }
+
+ const Interval* findNextInterval(SkScalar t, SkScalar tiled_t) const {
+ SkASSERT(tiled_t < fInterval->fP0 || tiled_t >= fInterval->fP1);
+ SkASSERT(tiled_t >= fFirstInterval->fP0 && tiled_t < fLastInterval->fP1);
+
+ const Interval* i = fInterval;
+
+ // Use the t vs. prev_t signal to figure which direction we should search for
+ // the next interval, then perform a linear search.
+ if (t >= fPrevT) {
+ do {
+ i += 1;
+ if (i > fLastInterval) {
+ i = fFirstInterval;
+ }
+ } while (tiled_t < i->fP0 || tiled_t >= i->fP1);
+ } else {
+ do {
+ i -= 1;
+ if (i < fFirstInterval) {
+ i = fLastInterval;
+ }
+ } while (tiled_t < i->fP0 || tiled_t >= i->fP1);
+ }
+
+ return i;
+ }
+
+ void loadIntervalData(const Interval* i) {
+ fCc = DstTraits<dstType>::load(i->fC0);
+ fDc = DstTraits<dstType>::load(i->fDc);
+ }
+
+ const Interval* fFirstInterval;
+ const Interval* fLastInterval;
+ const Interval* fInterval;
+ SkScalar fPrevT;
+ Sk4f fCc;
+ Sk4f fDc;
+};
diff --git a/gfx/skia/skia/src/effects/gradients/Sk4fGradientBase.h b/gfx/skia/skia/src/effects/gradients/Sk4fGradientBase.h
new file mode 100644
index 000000000..fd6d6563a
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/Sk4fGradientBase.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Sk4fGradientBase_DEFINED
+#define Sk4fGradientBase_DEFINED
+
+#include "Sk4fGradientPriv.h"
+#include "SkColor.h"
+#include "SkGradientShaderPriv.h"
+#include "SkMatrix.h"
+#include "SkNx.h"
+#include "SkPM4f.h"
+#include "SkShader.h"
+#include "SkTArray.h"
+
+class SkGradientShaderBase::
+GradientShaderBase4fContext : public SkShader::Context {
+public:
+ GradientShaderBase4fContext(const SkGradientShaderBase&,
+ const ContextRec&);
+
+ uint32_t getFlags() const override { return fFlags; }
+
+ void shadeSpan(int x, int y, SkPMColor dst[], int count) override;
+ void shadeSpan4f(int x, int y, SkPM4f dst[], int count) override;
+
+ bool isValid() const;
+
+protected:
+ struct Interval {
+ Interval(const Sk4f& c0, SkScalar p0,
+ const Sk4f& c1, SkScalar p1);
+
+ bool isZeroRamp() const { return fZeroRamp; }
+
+ SkPM4f fC0, fDc;
+ SkScalar fP0, fP1;
+ bool fZeroRamp;
+ };
+
+ virtual void mapTs(int x, int y, SkScalar ts[], int count) const = 0;
+
+ void buildIntervals(const SkGradientShaderBase&, const ContextRec&, bool reverse);
+
+ SkSTArray<8, Interval, true> fIntervals;
+ SkMatrix fDstToPos;
+ SkMatrix::MapXYProc fDstToPosProc;
+ uint8_t fDstToPosClass;
+ uint8_t fFlags;
+ bool fDither;
+ bool fColorsArePremul;
+
+private:
+ using INHERITED = SkShader::Context;
+
+ void addMirrorIntervals(const SkGradientShaderBase&,
+ const Sk4f& componentScale, bool reverse);
+
+ template<DstType, SkShader::TileMode tileMode>
+ class TSampler;
+
+ template <DstType dstType, ApplyPremul premul>
+ void shadePremulSpan(int x, int y, typename DstTraits<dstType, premul>::Type[],
+ int count) const;
+
+ template <DstType dstType, ApplyPremul premul, SkShader::TileMode tileMode>
+ void shadeSpanInternal(int x, int y, typename DstTraits<dstType, premul>::Type[],
+ int count) const;
+};
+
+#endif // Sk4fGradientBase_DEFINED
diff --git a/gfx/skia/skia/src/effects/gradients/Sk4fGradientPriv.h b/gfx/skia/skia/src/effects/gradients/Sk4fGradientPriv.h
new file mode 100644
index 000000000..65fa821e8
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/Sk4fGradientPriv.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Sk4fGradientPriv_DEFINED
+#define Sk4fGradientPriv_DEFINED
+
+#include "SkColor.h"
+#include "SkHalf.h"
+#include "SkImageInfo.h"
+#include "SkNx.h"
+#include "SkPM4f.h"
+#include "SkPM4fPriv.h"
+#include "SkUtils.h"
+
+// Templates shared by various 4f gradient flavors.
+
+namespace {
+
+enum class ApplyPremul { True, False };
+
+enum class DstType {
+ L32, // Linear 32bit. Used for both shader/blitter paths.
+ S32, // SRGB 32bit. Used for the blitter path only.
+ F16, // Linear half-float. Used for blitters only.
+ F32, // Linear float. Used for shaders only.
+};
+
+template <ApplyPremul premul>
+inline SkPMColor trunc_from_4f_255(const Sk4f& c) {
+ SkPMColor pmc;
+ SkNx_cast<uint8_t>(c).store(&pmc);
+ if (premul == ApplyPremul::True) {
+ pmc = SkPreMultiplyARGB(SkGetPackedA32(pmc), SkGetPackedR32(pmc),
+ SkGetPackedG32(pmc), SkGetPackedB32(pmc));
+ }
+ return pmc;
+}
+
+template <ApplyPremul>
+struct PremulTraits;
+
+template <>
+struct PremulTraits<ApplyPremul::False> {
+ static Sk4f apply(const Sk4f& c) { return c; }
+};
+
+template <>
+struct PremulTraits<ApplyPremul::True> {
+ static Sk4f apply(const Sk4f& c) {
+ const float alpha = c[SkPM4f::A];
+ // FIXME: portable swizzle?
+ return c * Sk4f(alpha, alpha, alpha, 1);
+ }
+};
+
+// Struct encapsulating various dest-dependent ops:
+//
+// - load() Load a SkPM4f value into Sk4f. Normally called once per interval
+// advance. Also applies a scale and swizzle suitable for DstType.
+//
+// - store() Store one Sk4f to dest. Optionally handles premul, color space
+// conversion, etc.
+//
+// - store(count) Store the Sk4f value repeatedly to dest, count times.
+//
+// - store4x() Store 4 Sk4f values to dest (opportunistic optimization).
+//
+template <DstType, ApplyPremul premul = ApplyPremul::False>
+struct DstTraits;
+
+template <ApplyPremul premul>
+struct DstTraits<DstType::L32, premul> {
+ using Type = SkPMColor;
+
+ // For L32, we prescale the values by 255 to save a per-pixel multiplication.
+ static Sk4f load(const SkPM4f& c) {
+ return c.to4f_pmorder() * Sk4f(255);
+ }
+
+ static void store(const Sk4f& c, Type* dst) {
+ *dst = trunc_from_4f_255<premul>(c);
+ }
+
+ static void store(const Sk4f& c, Type* dst, int n) {
+ sk_memset32(dst, trunc_from_4f_255<premul>(c), n);
+ }
+
+ static void store4x(const Sk4f& c0, const Sk4f& c1,
+ const Sk4f& c2, const Sk4f& c3,
+ Type* dst) {
+ if (premul == ApplyPremul::False) {
+ Sk4f_ToBytes((uint8_t*)dst, c0, c1, c2, c3);
+ } else {
+ store(c0, dst + 0);
+ store(c1, dst + 1);
+ store(c2, dst + 2);
+ store(c3, dst + 3);
+ }
+ }
+};
+
+template <ApplyPremul premul>
+struct DstTraits<DstType::S32, premul> {
+ using PM = PremulTraits<premul>;
+ using Type = SkPMColor;
+
+ static Sk4f load(const SkPM4f& c) {
+ return c.to4f_pmorder();
+ }
+
+ static void store(const Sk4f& c, Type* dst) {
+ // FIXME: this assumes opaque colors. Handle unpremultiplication.
+ *dst = Sk4f_toS32(PM::apply(c));
+ }
+
+ static void store(const Sk4f& c, Type* dst, int n) {
+ sk_memset32(dst, Sk4f_toS32(PM::apply(c)), n);
+ }
+
+ static void store4x(const Sk4f& c0, const Sk4f& c1,
+ const Sk4f& c2, const Sk4f& c3,
+ Type* dst) {
+ store(c0, dst + 0);
+ store(c1, dst + 1);
+ store(c2, dst + 2);
+ store(c3, dst + 3);
+ }
+};
+
+template <ApplyPremul premul>
+struct DstTraits<DstType::F16, premul> {
+ using PM = PremulTraits<premul>;
+ using Type = uint64_t;
+
+ static Sk4f load(const SkPM4f& c) {
+ return c.to4f();
+ }
+
+ static void store(const Sk4f& c, Type* dst) {
+ SkFloatToHalf_finite_ftz(PM::apply(c)).store(dst);
+ }
+
+ static void store(const Sk4f& c, Type* dst, int n) {
+ uint64_t color;
+ SkFloatToHalf_finite_ftz(PM::apply(c)).store(&color);
+ sk_memset64(dst, color, n);
+ }
+
+ static void store4x(const Sk4f& c0, const Sk4f& c1,
+ const Sk4f& c2, const Sk4f& c3,
+ Type* dst) {
+ store(c0, dst + 0);
+ store(c1, dst + 1);
+ store(c2, dst + 2);
+ store(c3, dst + 3);
+ }
+};
+
+template <ApplyPremul premul>
+struct DstTraits<DstType::F32, premul> {
+ using PM = PremulTraits<premul>;
+ using Type = SkPM4f;
+
+ static Sk4f load(const SkPM4f& c) {
+ return c.to4f();
+ }
+
+ static void store(const Sk4f& c, Type* dst) {
+ PM::apply(c).store(dst->fVec);
+ }
+
+ static void store(const Sk4f& c, Type* dst, int n) {
+ const Sk4f pmc = PM::apply(c);
+ for (int i = 0; i < n; ++i) {
+ pmc.store(dst[i].fVec);
+ }
+ }
+
+ static void store4x(const Sk4f& c0, const Sk4f& c1,
+ const Sk4f& c2, const Sk4f& c3,
+ Type* dst) {
+ store(c0, dst + 0);
+ store(c1, dst + 1);
+ store(c2, dst + 2);
+ store(c3, dst + 3);
+ }
+};
+
+} // anonymous namespace
+
+#endif // Sk4fGradientPriv_DEFINED
diff --git a/gfx/skia/skia/src/effects/gradients/Sk4fLinearGradient.cpp b/gfx/skia/skia/src/effects/gradients/Sk4fLinearGradient.cpp
new file mode 100644
index 000000000..f9618dd1b
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/Sk4fLinearGradient.cpp
@@ -0,0 +1,488 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "Sk4fLinearGradient.h"
+#include "Sk4x4f.h"
+#include "SkXfermode.h"
+
+namespace {
+
+template<DstType dstType, ApplyPremul premul>
+void ramp(const Sk4f& c, const Sk4f& dc, typename DstTraits<dstType, premul>::Type dst[], int n) {
+ SkASSERT(n > 0);
+
+ const Sk4f dc2 = dc + dc;
+ const Sk4f dc4 = dc2 + dc2;
+
+ Sk4f c0 = c ;
+ Sk4f c1 = c + dc;
+ Sk4f c2 = c0 + dc2;
+ Sk4f c3 = c1 + dc2;
+
+ while (n >= 4) {
+ DstTraits<dstType, premul>::store4x(c0, c1, c2, c3, dst);
+ dst += 4;
+
+ c0 = c0 + dc4;
+ c1 = c1 + dc4;
+ c2 = c2 + dc4;
+ c3 = c3 + dc4;
+ n -= 4;
+ }
+ if (n & 2) {
+ DstTraits<dstType, premul>::store(c0, dst++);
+ DstTraits<dstType, premul>::store(c1, dst++);
+ c0 = c0 + dc2;
+ }
+ if (n & 1) {
+ DstTraits<dstType, premul>::store(c0, dst);
+ }
+}
+
+// Planar version of ramp (S32 no-premul only).
+template<>
+void ramp<DstType::S32, ApplyPremul::False>(const Sk4f& c, const Sk4f& dc, SkPMColor dst[], int n) {
+ SkASSERT(n > 0);
+
+ const Sk4f dc4 = dc * 4;
+ const Sk4x4f dc4x = { Sk4f(dc4[0]), Sk4f(dc4[1]), Sk4f(dc4[2]), Sk4f(dc4[3]) };
+ Sk4x4f c4x = Sk4x4f::Transpose(c, c + dc, c + dc * 2, c + dc * 3);
+
+ while (n >= 4) {
+ ( sk_linear_to_srgb(c4x.r) << 0
+ | sk_linear_to_srgb(c4x.g) << 8
+ | sk_linear_to_srgb(c4x.b) << 16
+ | Sk4f_round(255.0f*c4x.a) << 24).store(dst);
+
+ c4x.r += dc4x.r;
+ c4x.g += dc4x.g;
+ c4x.b += dc4x.b;
+ c4x.a += dc4x.a;
+
+ dst += 4;
+ n -= 4;
+ }
+
+ if (n & 2) {
+ DstTraits<DstType::S32, ApplyPremul::False>
+ ::store(Sk4f(c4x.r[0], c4x.g[0], c4x.b[0], c4x.a[0]), dst++);
+ DstTraits<DstType::S32, ApplyPremul::False>
+ ::store(Sk4f(c4x.r[1], c4x.g[1], c4x.b[1], c4x.a[1]), dst++);
+ }
+
+ if (n & 1) {
+ DstTraits<DstType::S32, ApplyPremul::False>
+ ::store(Sk4f(c4x.r[n & 2], c4x.g[n & 2], c4x.b[n & 2], c4x.a[n & 2]), dst);
+ }
+}
+
+template<SkShader::TileMode>
+SkScalar pinFx(SkScalar);
+
+template<>
+SkScalar pinFx<SkShader::kClamp_TileMode>(SkScalar fx) {
+ return fx;
+}
+
+template<>
+SkScalar pinFx<SkShader::kRepeat_TileMode>(SkScalar fx) {
+ const SkScalar f = SkScalarFraction(fx);
+ return f < 0 ? f + 1 : f;
+}
+
+template<>
+SkScalar pinFx<SkShader::kMirror_TileMode>(SkScalar fx) {
+ const SkScalar f = SkScalarMod(fx, 2.0f);
+ return f < 0 ? f + 2 : f;
+}
+
+// true when x is in [k1,k2), or [k2, k1) when the interval is reversed.
+// TODO(fmalita): hoist the reversed interval check out of this helper.
+bool in_range(SkScalar x, SkScalar k1, SkScalar k2) {
+ SkASSERT(k1 != k2);
+ return (k1 < k2)
+ ? (x >= k1 && x < k2)
+ : (x > k2 && x <= k1);
+}
+
+} // anonymous namespace
+
+SkLinearGradient::
+LinearGradient4fContext::LinearGradient4fContext(const SkLinearGradient& shader,
+ const ContextRec& rec)
+ : INHERITED(shader, rec) {
+
+ // Our fast path expects interval points to be monotonically increasing in x.
+ const bool reverseIntervals = this->isFast() && signbit(fDstToPos.getScaleX());
+ this->buildIntervals(shader, rec, reverseIntervals);
+
+ SkASSERT(fIntervals.count() > 0);
+ fCachedInterval = fIntervals.begin();
+}
+
+const SkGradientShaderBase::GradientShaderBase4fContext::Interval*
+SkLinearGradient::LinearGradient4fContext::findInterval(SkScalar fx) const {
+ SkASSERT(in_range(fx, fIntervals.front().fP0, fIntervals.back().fP1));
+
+ if (1) {
+ // Linear search, using the last scanline interval as a starting point.
+ SkASSERT(fCachedInterval >= fIntervals.begin());
+ SkASSERT(fCachedInterval < fIntervals.end());
+ const int search_dir = fDstToPos.getScaleX() >= 0 ? 1 : -1;
+ while (!in_range(fx, fCachedInterval->fP0, fCachedInterval->fP1)) {
+ fCachedInterval += search_dir;
+ if (fCachedInterval >= fIntervals.end()) {
+ fCachedInterval = fIntervals.begin();
+ } else if (fCachedInterval < fIntervals.begin()) {
+ fCachedInterval = fIntervals.end() - 1;
+ }
+ }
+ return fCachedInterval;
+ } else {
+ // Binary search. Seems less effective than linear + caching.
+ const Interval* i0 = fIntervals.begin();
+ const Interval* i1 = fIntervals.end() - 1;
+
+ while (i0 != i1) {
+ SkASSERT(i0 < i1);
+ SkASSERT(in_range(fx, i0->fP0, i1->fP1));
+
+ const Interval* i = i0 + ((i1 - i0) >> 1);
+
+ if (in_range(fx, i0->fP0, i->fP1)) {
+ i1 = i;
+ } else {
+ SkASSERT(in_range(fx, i->fP1, i1->fP1));
+ i0 = i + 1;
+ }
+ }
+
+ SkASSERT(in_range(fx, i0->fP0, i0->fP1));
+ return i0;
+ }
+}
+
+void SkLinearGradient::
+LinearGradient4fContext::shadeSpan(int x, int y, SkPMColor dst[], int count) {
+ if (!this->isFast()) {
+ this->INHERITED::shadeSpan(x, y, dst, count);
+ return;
+ }
+
+ // TODO: plumb dithering
+ SkASSERT(count > 0);
+ if (fColorsArePremul) {
+ this->shadePremulSpan<DstType::L32,
+ ApplyPremul::False>(x, y, dst, count);
+ } else {
+ this->shadePremulSpan<DstType::L32,
+ ApplyPremul::True>(x, y, dst, count);
+ }
+}
+
+void SkLinearGradient::
+LinearGradient4fContext::shadeSpan4f(int x, int y, SkPM4f dst[], int count) {
+ if (!this->isFast()) {
+ this->INHERITED::shadeSpan4f(x, y, dst, count);
+ return;
+ }
+
+ // TONOTDO: plumb dithering
+ SkASSERT(count > 0);
+ if (fColorsArePremul) {
+ this->shadePremulSpan<DstType::F32,
+ ApplyPremul::False>(x, y, dst, count);
+ } else {
+ this->shadePremulSpan<DstType::F32,
+ ApplyPremul::True>(x, y, dst, count);
+ }
+}
+
+template<DstType dstType, ApplyPremul premul>
+void SkLinearGradient::
+LinearGradient4fContext::shadePremulSpan(int x, int y,
+ typename DstTraits<dstType, premul>::Type dst[],
+ int count) const {
+ const SkLinearGradient& shader =
+ static_cast<const SkLinearGradient&>(fShader);
+ switch (shader.fTileMode) {
+ case kClamp_TileMode:
+ this->shadeSpanInternal<dstType,
+ premul,
+ kClamp_TileMode>(x, y, dst, count);
+ break;
+ case kRepeat_TileMode:
+ this->shadeSpanInternal<dstType,
+ premul,
+ kRepeat_TileMode>(x, y, dst, count);
+ break;
+ case kMirror_TileMode:
+ this->shadeSpanInternal<dstType,
+ premul,
+ kMirror_TileMode>(x, y, dst, count);
+ break;
+ }
+}
+
+template<DstType dstType, ApplyPremul premul, SkShader::TileMode tileMode>
+void SkLinearGradient::
+LinearGradient4fContext::shadeSpanInternal(int x, int y,
+ typename DstTraits<dstType, premul>::Type dst[],
+ int count) const {
+ SkPoint pt;
+ fDstToPosProc(fDstToPos,
+ x + SK_ScalarHalf,
+ y + SK_ScalarHalf,
+ &pt);
+ const SkScalar fx = pinFx<tileMode>(pt.x());
+ const SkScalar dx = fDstToPos.getScaleX();
+ LinearIntervalProcessor<dstType, tileMode> proc(fIntervals.begin(),
+ fIntervals.end() - 1,
+ this->findInterval(fx),
+ fx,
+ dx,
+ SkScalarNearlyZero(dx * count));
+ while (count > 0) {
+ // What we really want here is SkTPin(advance, 1, count)
+ // but that's a significant perf hit for >> stops; investigate.
+ const int n = SkScalarTruncToInt(
+ SkTMin<SkScalar>(proc.currentAdvance() + 1, SkIntToScalar(count)));
+
+ // The current interval advance can be +inf (e.g. when reaching
+ // the clamp mode end intervals) - when that happens, we expect to
+ // a) consume all remaining count in one swoop
+ // b) return a zero color gradient
+ SkASSERT(SkScalarIsFinite(proc.currentAdvance())
+ || (n == count && proc.currentRampIsZero()));
+
+ if (proc.currentRampIsZero()) {
+ DstTraits<dstType, premul>::store(proc.currentColor(),
+ dst, n);
+ } else {
+ ramp<dstType, premul>(proc.currentColor(),
+ proc.currentColorGrad(),
+ dst, n);
+ }
+
+ proc.advance(SkIntToScalar(n));
+ count -= n;
+ dst += n;
+ }
+}
+
+template<DstType dstType, SkShader::TileMode tileMode>
+class SkLinearGradient::
+LinearGradient4fContext::LinearIntervalProcessor {
+public:
+ LinearIntervalProcessor(const Interval* firstInterval,
+ const Interval* lastInterval,
+ const Interval* i,
+ SkScalar fx,
+ SkScalar dx,
+ bool is_vertical)
+ : fAdvX((i->fP1 - fx) / dx)
+ , fFirstInterval(firstInterval)
+ , fLastInterval(lastInterval)
+ , fInterval(i)
+ , fDx(dx)
+ , fIsVertical(is_vertical)
+ {
+ SkASSERT(fAdvX >= 0);
+ SkASSERT(firstInterval <= lastInterval);
+ SkASSERT(in_range(fx, i->fP0, i->fP1));
+ this->compute_interval_props(fx - i->fP0);
+ }
+
+ SkScalar currentAdvance() const {
+ SkASSERT(fAdvX >= 0);
+ SkASSERT(fAdvX <= (fInterval->fP1 - fInterval->fP0) / fDx);
+ return fAdvX;
+ }
+
+ bool currentRampIsZero() const { return fZeroRamp; }
+ const Sk4f& currentColor() const { return fCc; }
+ const Sk4f& currentColorGrad() const { return fDcDx; }
+
+ void advance(SkScalar advX) {
+ SkASSERT(advX > 0);
+ SkASSERT(fAdvX >= 0);
+
+ if (advX >= fAdvX) {
+ advX = this->advance_interval(advX);
+ }
+ SkASSERT(advX < fAdvX);
+
+ fCc = fCc + fDcDx * Sk4f(advX);
+ fAdvX -= advX;
+ }
+
+private:
+ void compute_interval_props(SkScalar t) {
+ fZeroRamp = fIsVertical || fInterval->isZeroRamp();
+ fCc = DstTraits<dstType>::load(fInterval->fC0);
+
+ if (fInterval->isZeroRamp()) {
+ fDcDx = 0;
+ } else {
+ const Sk4f dC = DstTraits<dstType>::load(fInterval->fDc);
+ fCc = fCc + dC * Sk4f(t);
+ fDcDx = dC * fDx;
+ }
+ }
+
+ const Interval* next_interval(const Interval* i) const {
+ SkASSERT(i >= fFirstInterval);
+ SkASSERT(i <= fLastInterval);
+ i++;
+
+ if (tileMode == kClamp_TileMode) {
+ SkASSERT(i <= fLastInterval);
+ return i;
+ }
+
+ return (i <= fLastInterval) ? i : fFirstInterval;
+ }
+
+ SkScalar advance_interval(SkScalar advX) {
+ SkASSERT(advX >= fAdvX);
+
+ do {
+ advX -= fAdvX;
+ fInterval = this->next_interval(fInterval);
+ fAdvX = (fInterval->fP1 - fInterval->fP0) / fDx;
+ SkASSERT(fAdvX > 0);
+ } while (advX >= fAdvX);
+
+ compute_interval_props(0);
+
+ SkASSERT(advX >= 0);
+ return advX;
+ }
+
+ // Current interval properties.
+ Sk4f fDcDx; // dst color gradient (dc/dx)
+ Sk4f fCc; // current color, interpolated in dst
+ SkScalar fAdvX; // remaining interval advance in dst
+ bool fZeroRamp; // current interval color grad is 0
+
+ const Interval* fFirstInterval;
+ const Interval* fLastInterval;
+ const Interval* fInterval; // current interval
+ const SkScalar fDx; // 'dx' for consistency with other impls; actually dt/dx
+ const bool fIsVertical;
+};
+
+void SkLinearGradient::
+LinearGradient4fContext::mapTs(int x, int y, SkScalar ts[], int count) const {
+ SkASSERT(count > 0);
+ SkASSERT(fDstToPosClass != kLinear_MatrixClass);
+
+ SkScalar sx = x + SK_ScalarHalf;
+ const SkScalar sy = y + SK_ScalarHalf;
+ SkPoint pt;
+
+ if (fDstToPosClass != kPerspective_MatrixClass) {
+ // kLinear_MatrixClass, kFixedStepInX_MatrixClass => fixed dt per scanline
+ const SkScalar dtdx = fDstToPos.fixedStepInX(sy).x();
+ fDstToPosProc(fDstToPos, sx, sy, &pt);
+
+ const Sk4f dtdx4 = Sk4f(4 * dtdx);
+ Sk4f t4 = Sk4f(pt.x() + 0 * dtdx,
+ pt.x() + 1 * dtdx,
+ pt.x() + 2 * dtdx,
+ pt.x() + 3 * dtdx);
+
+ while (count >= 4) {
+ t4.store(ts);
+ t4 = t4 + dtdx4;
+ ts += 4;
+ count -= 4;
+ }
+
+ if (count & 2) {
+ *ts++ = t4[0];
+ *ts++ = t4[1];
+ t4 = SkNx_shuffle<2, 0, 1, 3>(t4);
+ }
+
+ if (count & 1) {
+ *ts++ = t4[0];
+ }
+ } else {
+ for (int i = 0; i < count; ++i) {
+ fDstToPosProc(fDstToPos, sx, sy, &pt);
+ ts[i] = pt.x();
+ sx += SK_Scalar1;
+ }
+ }
+}
+
+bool SkLinearGradient::LinearGradient4fContext::onChooseBlitProcs(const SkImageInfo& info,
+ BlitState* state) {
+ SkXfermode::Mode mode;
+ if (!SkXfermode::AsMode(state->fXfer, &mode)) {
+ return false;
+ }
+
+ if (mode != SkXfermode::kSrc_Mode &&
+ !(mode == SkXfermode::kSrcOver_Mode && (fFlags & kOpaqueAlpha_Flag))) {
+ return false;
+ }
+
+ switch (info.colorType()) {
+ case kN32_SkColorType:
+ state->fBlitBW = D32_BlitBW;
+ return true;
+ case kRGBA_F16_SkColorType:
+ state->fBlitBW = D64_BlitBW;
+ return true;
+ default:
+ return false;
+ }
+}
+
+void SkLinearGradient::
+LinearGradient4fContext::D32_BlitBW(BlitState* state, int x, int y, const SkPixmap& dst,
+ int count) {
+ // FIXME: ignoring coverage for now
+ const LinearGradient4fContext* ctx =
+ static_cast<const LinearGradient4fContext*>(state->fCtx);
+
+ if (!dst.info().gammaCloseToSRGB()) {
+ if (ctx->fColorsArePremul) {
+ ctx->shadePremulSpan<DstType::L32, ApplyPremul::False>(
+ x, y, dst.writable_addr32(x, y), count);
+ } else {
+ ctx->shadePremulSpan<DstType::L32, ApplyPremul::True>(
+ x, y, dst.writable_addr32(x, y), count);
+ }
+ } else {
+ if (ctx->fColorsArePremul) {
+ ctx->shadePremulSpan<DstType::S32, ApplyPremul::False>(
+ x, y, dst.writable_addr32(x, y), count);
+ } else {
+ ctx->shadePremulSpan<DstType::S32, ApplyPremul::True>(
+ x, y, dst.writable_addr32(x, y), count);
+ }
+ }
+}
+
+void SkLinearGradient::
+LinearGradient4fContext::D64_BlitBW(BlitState* state, int x, int y, const SkPixmap& dst,
+ int count) {
+ // FIXME: ignoring coverage for now
+ const LinearGradient4fContext* ctx =
+ static_cast<const LinearGradient4fContext*>(state->fCtx);
+
+ if (ctx->fColorsArePremul) {
+ ctx->shadePremulSpan<DstType::F16, ApplyPremul::False>(
+ x, y, dst.writable_addr64(x, y), count);
+ } else {
+ ctx->shadePremulSpan<DstType::F16, ApplyPremul::True>(
+ x, y, dst.writable_addr64(x, y), count);
+ }
+}
diff --git a/gfx/skia/skia/src/effects/gradients/Sk4fLinearGradient.h b/gfx/skia/skia/src/effects/gradients/Sk4fLinearGradient.h
new file mode 100644
index 000000000..dc7a17958
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/Sk4fLinearGradient.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Sk4fLinearGradient_DEFINED
+#define Sk4fLinearGradient_DEFINED
+
+#include "Sk4fGradientBase.h"
+#include "SkLinearGradient.h"
+
+class SkLinearGradient::
+LinearGradient4fContext final : public GradientShaderBase4fContext {
+public:
+ LinearGradient4fContext(const SkLinearGradient&, const ContextRec&);
+
+ void shadeSpan(int x, int y, SkPMColor dst[], int count) override;
+ void shadeSpan4f(int x, int y, SkPM4f dst[], int count) override;
+
+protected:
+ void mapTs(int x, int y, SkScalar ts[], int count) const override;
+
+ bool onChooseBlitProcs(const SkImageInfo&, BlitState*) override;
+
+private:
+ using INHERITED = GradientShaderBase4fContext;
+
+ template<DstType, TileMode>
+ class LinearIntervalProcessor;
+
+ template <DstType dstType, ApplyPremul premul>
+ void shadePremulSpan(int x, int y, typename DstTraits<dstType, premul>::Type[],
+ int count) const;
+
+ template <DstType dstType, ApplyPremul premul, SkShader::TileMode tileMode>
+ void shadeSpanInternal(int x, int y, typename DstTraits<dstType, premul>::Type[],
+ int count) const;
+
+ const Interval* findInterval(SkScalar fx) const;
+
+ bool isFast() const { return fDstToPosClass == kLinear_MatrixClass; }
+
+ static void D32_BlitBW(BlitState*, int x, int y, const SkPixmap& dst, int count);
+ static void D64_BlitBW(BlitState*, int x, int y, const SkPixmap& dst, int count);
+
+ mutable const Interval* fCachedInterval;
+};
+
+#endif // Sk4fLinearGradient_DEFINED
diff --git a/gfx/skia/skia/src/effects/gradients/SkClampRange.cpp b/gfx/skia/skia/src/effects/gradients/SkClampRange.cpp
new file mode 100644
index 000000000..5fd1c0369
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/SkClampRange.cpp
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkClampRange.h"
+#include "SkMathPriv.h"
+
+static int SkCLZ64(uint64_t value) {
+ int count = 0;
+ if (value >> 32) {
+ value >>= 32;
+ } else {
+ count += 32;
+ }
+ return count + SkCLZ(SkToU32(value));
+}
+
+static bool sk_64_smul_check(int64_t a, int64_t b, int64_t* result) {
+ // Do it the slow way until we have some assembly.
+ int64_t ua = SkTAbs(a);
+ int64_t ub = SkTAbs(b);
+ int zeros = SkCLZ64(ua) + SkCLZ64(ub);
+ // this is a conservative check: it may return false when in fact it would not have overflowed.
+ // Hackers Delight uses 34 as its convervative check, but that is for 32x32 multiplies.
+ // Since we are looking at 64x64 muls, we add 32 to the check.
+ if (zeros < (32 + 34)) {
+ return false;
+ }
+ *result = a * b;
+ return true;
+}
+
+/*
+ * returns [0..count] for the number of steps (<= count) for which x0 <= edge
+ * given each step is followed by x0 += dx
+ */
+static int chop(int64_t x0, SkGradFixed edge, int64_t x1, int64_t dx, int count) {
+ SkASSERT(dx > 0);
+ SkASSERT(count >= 0);
+
+ if (x0 >= edge) {
+ return 0;
+ }
+ if (x1 <= edge) {
+ return count;
+ }
+ int64_t n = (edge - x0 + dx - 1) / dx;
+ SkASSERT(n >= 0);
+ SkASSERT(n <= count);
+ return (int)n;
+}
+
+void SkClampRange::initFor1(SkGradFixed fx) {
+ fCount0 = fCount1 = fCount2 = 0;
+ if (fx <= 0) {
+ fCount0 = 1;
+ } else if (fx < kFracMax_SkGradFixed) {
+ fCount1 = 1;
+ fFx1 = fx;
+ } else {
+ fCount2 = 1;
+ }
+}
+
+void SkClampRange::init(SkGradFixed fx0, SkGradFixed dx0, int count, int v0, int v1) {
+ SkASSERT(count > 0);
+
+ fV0 = v0;
+ fV1 = v1;
+
+ // special case 1 == count, as it is slightly common for skia
+ // and avoids us ever calling divide or 64bit multiply
+ if (1 == count) {
+ this->initFor1(fx0);
+ return;
+ }
+
+ int64_t fx = fx0;
+ int64_t dx = dx0;
+
+ // start with ex equal to the last computed value
+ int64_t count_times_dx;
+ if (!sk_64_smul_check(count - 1, dx, &count_times_dx)) {
+ // we can't represent the computed end in 32.32, so just draw something (first color)
+ fCount1 = fCount2 = 0;
+ fCount0 = count;
+ return;
+ }
+
+ int64_t ex = fx + (count - 1) * dx;
+
+ if ((uint64_t)(fx | ex) <= kFracMax_SkGradFixed) {
+ fCount0 = fCount2 = 0;
+ fCount1 = count;
+ fFx1 = fx0;
+ return;
+ }
+ if (fx <= 0 && ex <= 0) {
+ fCount1 = fCount2 = 0;
+ fCount0 = count;
+ return;
+ }
+ if (fx >= kFracMax_SkGradFixed && ex >= kFracMax_SkGradFixed) {
+ fCount0 = fCount1 = 0;
+ fCount2 = count;
+ return;
+ }
+
+ // now make ex be 1 past the last computed value
+ ex += dx;
+
+ bool doSwap = dx < 0;
+
+ if (doSwap) {
+ ex -= dx;
+ fx -= dx;
+ SkTSwap(fx, ex);
+ dx = -dx;
+ }
+
+
+ fCount0 = chop(fx, 0, ex, dx, count);
+ SkASSERT(fCount0 >= 0);
+ SkASSERT(fCount0 <= count);
+ count -= fCount0;
+ fx += fCount0 * dx;
+ SkASSERT(fx >= 0);
+ SkASSERT(fCount0 == 0 || (fx - dx) < 0);
+ fCount1 = chop(fx, kFracMax_SkGradFixed, ex, dx, count);
+ SkASSERT(fCount1 >= 0);
+ SkASSERT(fCount1 <= count);
+ count -= fCount1;
+ fCount2 = count;
+
+#ifdef SK_DEBUG
+ fx += fCount1 * dx;
+ SkASSERT(fx <= ex);
+ if (fCount2 > 0) {
+ SkASSERT(fx >= kFracMax_SkGradFixed);
+ if (fCount1 > 0) {
+ SkASSERT(fx - dx < kFracMax_SkGradFixed);
+ }
+ }
+#endif
+
+ if (doSwap) {
+ SkTSwap(fCount0, fCount2);
+ SkTSwap(fV0, fV1);
+ dx = -dx;
+ }
+
+ if (fCount1 > 0) {
+ fFx1 = fx0 + fCount0 * dx;
+ }
+}
diff --git a/gfx/skia/skia/src/effects/gradients/SkClampRange.h b/gfx/skia/skia/src/effects/gradients/SkClampRange.h
new file mode 100644
index 000000000..d3d2d08c8
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/SkClampRange.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkClampRange_DEFINED
+#define SkClampRange_DEFINED
+
+#include "SkFixed.h"
+#include "SkScalar.h"
+
+#define SkGradFixed SkFixed3232
+#define SkScalarToGradFixed(x) SkScalarToFixed3232(x)
+#define SkFixedToGradFixed(x) SkFixedToFixed3232(x)
+#define SkGradFixedToFixed(x) (SkFixed)((x) >> 16)
+#define kFracMax_SkGradFixed 0xFFFFFFFFLL
+
+/**
+ * Iteration fixed fx by dx, clamping as you go to [0..kFracMax_SkGradFixed], this class
+ * computes the (up to) 3 spans there are:
+ *
+ * range0: use constant value V0
+ * range1: iterate as usual fx += dx
+ * range2: use constant value V1
+ */
+struct SkClampRange {
+ int fCount0; // count for fV0
+ int fCount1; // count for interpolating (fV0...fV1)
+ int fCount2; // count for fV1
+ SkGradFixed fFx1; // initial fx value for the fCount1 range.
+ // only valid if fCount1 > 0
+ int fV0, fV1;
+
+ void init(SkGradFixed fx, SkGradFixed dx, int count, int v0, int v1);
+
+ void validate(int count) const {
+#ifdef SK_DEBUG
+ SkASSERT(fCount0 >= 0);
+ SkASSERT(fCount1 >= 0);
+ SkASSERT(fCount2 >= 0);
+ SkASSERT(fCount0 + fCount1 + fCount2 == count);
+#endif
+ }
+
+private:
+ void initFor1(SkGradFixed fx);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/gradients/SkGradientBitmapCache.cpp b/gfx/skia/skia/src/effects/gradients/SkGradientBitmapCache.cpp
new file mode 100644
index 000000000..20b87e02c
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/SkGradientBitmapCache.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkGradientBitmapCache.h"
+
+struct SkGradientBitmapCache::Entry {
+ Entry* fPrev;
+ Entry* fNext;
+
+ void* fBuffer;
+ size_t fSize;
+ SkBitmap fBitmap;
+
+ Entry(const void* buffer, size_t size, const SkBitmap& bm)
+ : fPrev(nullptr),
+ fNext(nullptr),
+ fBitmap(bm) {
+ fBuffer = sk_malloc_throw(size);
+ fSize = size;
+ memcpy(fBuffer, buffer, size);
+ }
+
+ ~Entry() { sk_free(fBuffer); }
+
+ bool equals(const void* buffer, size_t size) const {
+ return (fSize == size) && !memcmp(fBuffer, buffer, size);
+ }
+};
+
+SkGradientBitmapCache::SkGradientBitmapCache(int max) : fMaxEntries(max) {
+ fEntryCount = 0;
+ fHead = fTail = nullptr;
+
+ this->validate();
+}
+
+SkGradientBitmapCache::~SkGradientBitmapCache() {
+ this->validate();
+
+ Entry* entry = fHead;
+ while (entry) {
+ Entry* next = entry->fNext;
+ delete entry;
+ entry = next;
+ }
+}
+
+SkGradientBitmapCache::Entry* SkGradientBitmapCache::release(Entry* entry) const {
+ if (entry->fPrev) {
+ SkASSERT(fHead != entry);
+ entry->fPrev->fNext = entry->fNext;
+ } else {
+ SkASSERT(fHead == entry);
+ fHead = entry->fNext;
+ }
+ if (entry->fNext) {
+ SkASSERT(fTail != entry);
+ entry->fNext->fPrev = entry->fPrev;
+ } else {
+ SkASSERT(fTail == entry);
+ fTail = entry->fPrev;
+ }
+ return entry;
+}
+
+void SkGradientBitmapCache::attachToHead(Entry* entry) const {
+ entry->fPrev = nullptr;
+ entry->fNext = fHead;
+ if (fHead) {
+ fHead->fPrev = entry;
+ } else {
+ fTail = entry;
+ }
+ fHead = entry;
+}
+
+bool SkGradientBitmapCache::find(const void* buffer, size_t size, SkBitmap* bm) const {
+ AutoValidate av(this);
+
+ Entry* entry = fHead;
+ while (entry) {
+ if (entry->equals(buffer, size)) {
+ if (bm) {
+ *bm = entry->fBitmap;
+ }
+ // move to the head of our list, so we purge it last
+ this->release(entry);
+ this->attachToHead(entry);
+ return true;
+ }
+ entry = entry->fNext;
+ }
+ return false;
+}
+
+void SkGradientBitmapCache::add(const void* buffer, size_t len, const SkBitmap& bm) {
+ AutoValidate av(this);
+
+ if (fEntryCount == fMaxEntries) {
+ SkASSERT(fTail);
+ delete this->release(fTail);
+ fEntryCount -= 1;
+ }
+
+ Entry* entry = new Entry(buffer, len, bm);
+ this->attachToHead(entry);
+ fEntryCount += 1;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+
+void SkGradientBitmapCache::validate() const {
+ SkASSERT(fEntryCount >= 0 && fEntryCount <= fMaxEntries);
+
+ if (fEntryCount > 0) {
+ SkASSERT(nullptr == fHead->fPrev);
+ SkASSERT(nullptr == fTail->fNext);
+
+ if (fEntryCount == 1) {
+ SkASSERT(fHead == fTail);
+ } else {
+ SkASSERT(fHead != fTail);
+ }
+
+ Entry* entry = fHead;
+ int count = 0;
+ while (entry) {
+ count += 1;
+ entry = entry->fNext;
+ }
+ SkASSERT(count == fEntryCount);
+
+ entry = fTail;
+ while (entry) {
+ count -= 1;
+ entry = entry->fPrev;
+ }
+ SkASSERT(0 == count);
+ } else {
+ SkASSERT(nullptr == fHead);
+ SkASSERT(nullptr == fTail);
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/effects/gradients/SkGradientBitmapCache.h b/gfx/skia/skia/src/effects/gradients/SkGradientBitmapCache.h
new file mode 100644
index 000000000..0dcd32272
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/SkGradientBitmapCache.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkGradientBitmapCache_DEFINED
+#define SkGradientBitmapCache_DEFINED
+
+#include "SkBitmap.h"
+
+class SkGradientBitmapCache : SkNoncopyable {
+public:
+ SkGradientBitmapCache(int maxEntries);
+ ~SkGradientBitmapCache();
+
+ bool find(const void* buffer, size_t len, SkBitmap*) const;
+ void add(const void* buffer, size_t len, const SkBitmap&);
+
+private:
+ int fEntryCount;
+ const int fMaxEntries;
+
+ struct Entry;
+ mutable Entry* fHead;
+ mutable Entry* fTail;
+
+ inline Entry* release(Entry*) const;
+ inline void attachToHead(Entry*) const;
+
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+ class AutoValidate : SkNoncopyable {
+ public:
+ AutoValidate(const SkGradientBitmapCache* bc) : fBC(bc) { bc->validate(); }
+ ~AutoValidate() { fBC->validate(); }
+ private:
+ const SkGradientBitmapCache* fBC;
+ };
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/gradients/SkGradientShader.cpp b/gfx/skia/skia/src/effects/gradients/SkGradientShader.cpp
new file mode 100644
index 000000000..17302d9d4
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/SkGradientShader.cpp
@@ -0,0 +1,1750 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "Sk4fLinearGradient.h"
+#include "SkGradientShaderPriv.h"
+#include "SkHalf.h"
+#include "SkLinearGradient.h"
+#include "SkRadialGradient.h"
+#include "SkTwoPointConicalGradient.h"
+#include "SkSweepGradient.h"
+
+enum GradientSerializationFlags {
+ // Bits 29:31 used for various boolean flags
+ kHasPosition_GSF = 0x80000000,
+ kHasLocalMatrix_GSF = 0x40000000,
+ kHasColorSpace_GSF = 0x20000000,
+
+ // Bits 12:28 unused
+
+ // Bits 8:11 for fTileMode
+ kTileModeShift_GSF = 8,
+ kTileModeMask_GSF = 0xF,
+
+ // Bits 0:7 for fGradFlags (note that kForce4fContext_PrivateFlag is 0x80)
+ kGradFlagsShift_GSF = 0,
+ kGradFlagsMask_GSF = 0xFF,
+};
+
+void SkGradientShaderBase::Descriptor::flatten(SkWriteBuffer& buffer) const {
+ uint32_t flags = 0;
+ if (fPos) {
+ flags |= kHasPosition_GSF;
+ }
+ if (fLocalMatrix) {
+ flags |= kHasLocalMatrix_GSF;
+ }
+ sk_sp<SkData> colorSpaceData = fColorSpace ? fColorSpace->serialize() : nullptr;
+ if (colorSpaceData) {
+ flags |= kHasColorSpace_GSF;
+ }
+ SkASSERT(static_cast<uint32_t>(fTileMode) <= kTileModeMask_GSF);
+ flags |= (fTileMode << kTileModeShift_GSF);
+ SkASSERT(fGradFlags <= kGradFlagsMask_GSF);
+ flags |= (fGradFlags << kGradFlagsShift_GSF);
+
+ buffer.writeUInt(flags);
+
+ buffer.writeColor4fArray(fColors, fCount);
+ if (colorSpaceData) {
+ buffer.writeDataAsByteArray(colorSpaceData.get());
+ }
+ if (fPos) {
+ buffer.writeScalarArray(fPos, fCount);
+ }
+ if (fLocalMatrix) {
+ buffer.writeMatrix(*fLocalMatrix);
+ }
+}
+
+bool SkGradientShaderBase::DescriptorScope::unflatten(SkReadBuffer& buffer) {
+ if (buffer.isVersionLT(SkReadBuffer::kGradientShaderFloatColor_Version)) {
+ fCount = buffer.getArrayCount();
+ if (fCount > kStorageCount) {
+ size_t allocSize = (sizeof(SkColor4f) + sizeof(SkScalar)) * fCount;
+ fDynamicStorage.reset(allocSize);
+ fColors = (SkColor4f*)fDynamicStorage.get();
+ fPos = (SkScalar*)(fColors + fCount);
+ } else {
+ fColors = fColorStorage;
+ fPos = fPosStorage;
+ }
+
+ // Old gradients serialized SkColor. Read that to a temporary location, then convert.
+ SkSTArray<2, SkColor, true> colors;
+ colors.resize_back(fCount);
+ if (!buffer.readColorArray(colors.begin(), fCount)) {
+ return false;
+ }
+ for (int i = 0; i < fCount; ++i) {
+ mutableColors()[i] = SkColor4f::FromColor(colors[i]);
+ }
+
+ if (buffer.readBool()) {
+ if (!buffer.readScalarArray(const_cast<SkScalar*>(fPos), fCount)) {
+ return false;
+ }
+ } else {
+ fPos = nullptr;
+ }
+
+ fColorSpace = nullptr;
+ fTileMode = (SkShader::TileMode)buffer.read32();
+ fGradFlags = buffer.read32();
+
+ if (buffer.readBool()) {
+ fLocalMatrix = &fLocalMatrixStorage;
+ buffer.readMatrix(&fLocalMatrixStorage);
+ } else {
+ fLocalMatrix = nullptr;
+ }
+ } else {
+ // New gradient format. Includes floating point color, color space, densely packed flags
+ uint32_t flags = buffer.readUInt();
+
+ fTileMode = (SkShader::TileMode)((flags >> kTileModeShift_GSF) & kTileModeMask_GSF);
+ fGradFlags = (flags >> kGradFlagsShift_GSF) & kGradFlagsMask_GSF;
+
+ fCount = buffer.getArrayCount();
+ if (fCount > kStorageCount) {
+ size_t allocSize = (sizeof(SkColor4f) + sizeof(SkScalar)) * fCount;
+ fDynamicStorage.reset(allocSize);
+ fColors = (SkColor4f*)fDynamicStorage.get();
+ fPos = (SkScalar*)(fColors + fCount);
+ } else {
+ fColors = fColorStorage;
+ fPos = fPosStorage;
+ }
+ if (!buffer.readColor4fArray(mutableColors(), fCount)) {
+ return false;
+ }
+ if (SkToBool(flags & kHasColorSpace_GSF)) {
+ sk_sp<SkData> data = buffer.readByteArrayAsData();
+ fColorSpace = SkColorSpace::Deserialize(data->data(), data->size());
+ } else {
+ fColorSpace = nullptr;
+ }
+ if (SkToBool(flags & kHasPosition_GSF)) {
+ if (!buffer.readScalarArray(mutablePos(), fCount)) {
+ return false;
+ }
+ } else {
+ fPos = nullptr;
+ }
+ if (SkToBool(flags & kHasLocalMatrix_GSF)) {
+ fLocalMatrix = &fLocalMatrixStorage;
+ buffer.readMatrix(&fLocalMatrixStorage);
+ } else {
+ fLocalMatrix = nullptr;
+ }
+ }
+ return buffer.isValid();
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////
+
+SkGradientShaderBase::SkGradientShaderBase(const Descriptor& desc, const SkMatrix& ptsToUnit)
+ : INHERITED(desc.fLocalMatrix)
+ , fPtsToUnit(ptsToUnit)
+{
+ fPtsToUnit.getType(); // Precache so reads are threadsafe.
+ SkASSERT(desc.fCount > 1);
+
+ fGradFlags = static_cast<uint8_t>(desc.fGradFlags);
+
+ SkASSERT((unsigned)desc.fTileMode < SkShader::kTileModeCount);
+ SkASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gTileProcs));
+ fTileMode = desc.fTileMode;
+ fTileProc = gTileProcs[desc.fTileMode];
+
+ /* Note: we let the caller skip the first and/or last position.
+ i.e. pos[0] = 0.3, pos[1] = 0.7
+ In these cases, we insert dummy entries to ensure that the final data
+ will be bracketed by [0, 1].
+ i.e. our_pos[0] = 0, our_pos[1] = 0.3, our_pos[2] = 0.7, our_pos[3] = 1
+
+ Thus colorCount (the caller's value, and fColorCount (our value) may
+ differ by up to 2. In the above example:
+ colorCount = 2
+ fColorCount = 4
+ */
+ fColorCount = desc.fCount;
+ // check if we need to add in dummy start and/or end position/colors
+ bool dummyFirst = false;
+ bool dummyLast = false;
+ if (desc.fPos) {
+ dummyFirst = desc.fPos[0] != 0;
+ dummyLast = desc.fPos[desc.fCount - 1] != SK_Scalar1;
+ fColorCount += dummyFirst + dummyLast;
+ }
+
+ if (fColorCount > kColorStorageCount) {
+ size_t size = sizeof(SkColor) + sizeof(SkColor4f) + sizeof(Rec);
+ if (desc.fPos) {
+ size += sizeof(SkScalar);
+ }
+ fOrigColors = reinterpret_cast<SkColor*>(sk_malloc_throw(size * fColorCount));
+ }
+ else {
+ fOrigColors = fStorage;
+ }
+
+ fOrigColors4f = (SkColor4f*)(fOrigColors + fColorCount);
+
+ // Now copy over the colors, adding the dummies as needed
+ SkColor4f* origColors = fOrigColors4f;
+ if (dummyFirst) {
+ *origColors++ = desc.fColors[0];
+ }
+ memcpy(origColors, desc.fColors, desc.fCount * sizeof(SkColor4f));
+ if (dummyLast) {
+ origColors += desc.fCount;
+ *origColors = desc.fColors[desc.fCount - 1];
+ }
+
+ // Convert our SkColor4f colors to SkColor as well. Note that this is incorrect if the
+ // source colors are not in sRGB gamut. We would need to do a gamut transformation, but
+ // SkColorSpaceXform can't do that (yet). GrColorSpaceXform can, but we may not have GPU
+ // support compiled in here. For the common case (sRGB colors), this does the right thing.
+ for (int i = 0; i < fColorCount; ++i) {
+ fOrigColors[i] = fOrigColors4f[i].toSkColor();
+ }
+
+ if (!desc.fColorSpace) {
+ // This happens if we were constructed from SkColors, so our colors are really sRGB
+ fColorSpace = SkColorSpace::NewNamed(SkColorSpace::kSRGBLinear_Named);
+ } else {
+ // The color space refers to the float colors, so it must be linear gamma
+ SkASSERT(desc.fColorSpace->gammaIsLinear());
+ fColorSpace = desc.fColorSpace;
+ }
+
+ if (desc.fPos && fColorCount) {
+ fOrigPos = (SkScalar*)(fOrigColors4f + fColorCount);
+ fRecs = (Rec*)(fOrigPos + fColorCount);
+ } else {
+ fOrigPos = nullptr;
+ fRecs = (Rec*)(fOrigColors4f + fColorCount);
+ }
+
+ if (fColorCount > 2) {
+ Rec* recs = fRecs;
+ recs->fPos = 0;
+ // recs->fScale = 0; // unused;
+ recs += 1;
+ if (desc.fPos) {
+ SkScalar* origPosPtr = fOrigPos;
+ *origPosPtr++ = 0;
+
+ /* We need to convert the user's array of relative positions into
+ fixed-point positions and scale factors. We need these results
+ to be strictly monotonic (no two values equal or out of order).
+ Hence this complex loop that just jams a zero for the scale
+ value if it sees a segment out of order, and it assures that
+ we start at 0 and end at 1.0
+ */
+ SkScalar prev = 0;
+ int startIndex = dummyFirst ? 0 : 1;
+ int count = desc.fCount + dummyLast;
+ for (int i = startIndex; i < count; i++) {
+ // force the last value to be 1.0
+ SkScalar curr;
+ if (i == desc.fCount) { // we're really at the dummyLast
+ curr = 1;
+ } else {
+ curr = SkScalarPin(desc.fPos[i], 0, 1);
+ }
+ *origPosPtr++ = curr;
+
+ recs->fPos = SkScalarToFixed(curr);
+ SkFixed diff = SkScalarToFixed(curr - prev);
+ if (diff > 0) {
+ recs->fScale = (1 << 24) / diff;
+ } else {
+ recs->fScale = 0; // ignore this segment
+ }
+ // get ready for the next value
+ prev = curr;
+ recs += 1;
+ }
+ } else { // assume even distribution
+ fOrigPos = nullptr;
+
+ SkFixed dp = SK_Fixed1 / (desc.fCount - 1);
+ SkFixed p = dp;
+ SkFixed scale = (desc.fCount - 1) << 8; // (1 << 24) / dp
+ for (int i = 1; i < desc.fCount - 1; i++) {
+ recs->fPos = p;
+ recs->fScale = scale;
+ recs += 1;
+ p += dp;
+ }
+ recs->fPos = SK_Fixed1;
+ recs->fScale = scale;
+ }
+ } else if (desc.fPos) {
+ SkASSERT(2 == fColorCount);
+ fOrigPos[0] = SkScalarPin(desc.fPos[0], 0, 1);
+ fOrigPos[1] = SkScalarPin(desc.fPos[1], fOrigPos[0], 1);
+ if (0 == fOrigPos[0] && 1 == fOrigPos[1]) {
+ fOrigPos = nullptr;
+ }
+ }
+ this->initCommon();
+}
+
+SkGradientShaderBase::~SkGradientShaderBase() {
+ if (fOrigColors != fStorage) {
+ sk_free(fOrigColors);
+ }
+}
+
+void SkGradientShaderBase::initCommon() {
+ unsigned colorAlpha = 0xFF;
+ for (int i = 0; i < fColorCount; i++) {
+ colorAlpha &= SkColorGetA(fOrigColors[i]);
+ }
+ fColorsAreOpaque = colorAlpha == 0xFF;
+}
+
+void SkGradientShaderBase::flatten(SkWriteBuffer& buffer) const {
+ Descriptor desc;
+ desc.fColors = fOrigColors4f;
+ desc.fColorSpace = fColorSpace;
+ desc.fPos = fOrigPos;
+ desc.fCount = fColorCount;
+ desc.fTileMode = fTileMode;
+ desc.fGradFlags = fGradFlags;
+
+ const SkMatrix& m = this->getLocalMatrix();
+ desc.fLocalMatrix = m.isIdentity() ? nullptr : &m;
+ desc.flatten(buffer);
+}
+
+void SkGradientShaderBase::FlipGradientColors(SkColor* colorDst, Rec* recDst,
+ SkColor* colorSrc, Rec* recSrc,
+ int count) {
+ SkAutoSTArray<8, SkColor> colorsTemp(count);
+ for (int i = 0; i < count; ++i) {
+ int offset = count - i - 1;
+ colorsTemp[i] = colorSrc[offset];
+ }
+ if (count > 2) {
+ SkAutoSTArray<8, Rec> recsTemp(count);
+ for (int i = 0; i < count; ++i) {
+ int offset = count - i - 1;
+ recsTemp[i].fPos = SK_Fixed1 - recSrc[offset].fPos;
+ recsTemp[i].fScale = recSrc[offset].fScale;
+ }
+ memcpy(recDst, recsTemp.get(), count * sizeof(Rec));
+ }
+ memcpy(colorDst, colorsTemp.get(), count * sizeof(SkColor));
+}
+
+bool SkGradientShaderBase::isOpaque() const {
+ return fColorsAreOpaque;
+}
+
+static unsigned rounded_divide(unsigned numer, unsigned denom) {
+ return (numer + (denom >> 1)) / denom;
+}
+
+bool SkGradientShaderBase::onAsLuminanceColor(SkColor* lum) const {
+ // we just compute an average color.
+ // possibly we could weight this based on the proportional width for each color
+ // assuming they are not evenly distributed in the fPos array.
+ int r = 0;
+ int g = 0;
+ int b = 0;
+ const int n = fColorCount;
+ for (int i = 0; i < n; ++i) {
+ SkColor c = fOrigColors[i];
+ r += SkColorGetR(c);
+ g += SkColorGetG(c);
+ b += SkColorGetB(c);
+ }
+ *lum = SkColorSetRGB(rounded_divide(r, n), rounded_divide(g, n), rounded_divide(b, n));
+ return true;
+}
+
+SkGradientShaderBase::GradientShaderBaseContext::GradientShaderBaseContext(
+ const SkGradientShaderBase& shader, const ContextRec& rec)
+ : INHERITED(shader, rec)
+#ifdef SK_SUPPORT_LEGACY_GRADIENT_DITHERING
+ , fDither(true)
+#else
+ , fDither(rec.fPaint->isDither())
+#endif
+ , fCache(shader.refCache(getPaintAlpha(), fDither))
+{
+ const SkMatrix& inverse = this->getTotalInverse();
+
+ fDstToIndex.setConcat(shader.fPtsToUnit, inverse);
+
+ fDstToIndexProc = fDstToIndex.getMapXYProc();
+ fDstToIndexClass = (uint8_t)SkShader::Context::ComputeMatrixClass(fDstToIndex);
+
+ // now convert our colors in to PMColors
+ unsigned paintAlpha = this->getPaintAlpha();
+
+ fFlags = this->INHERITED::getFlags();
+ if (shader.fColorsAreOpaque && paintAlpha == 0xFF) {
+ fFlags |= kOpaqueAlpha_Flag;
+ }
+}
+
+bool SkGradientShaderBase::GradientShaderBaseContext::isValid() const {
+ return fDstToIndex.isFinite();
+}
+
+SkGradientShaderBase::GradientShaderCache::GradientShaderCache(
+ U8CPU alpha, bool dither, const SkGradientShaderBase& shader)
+ : fCacheAlpha(alpha)
+ , fCacheDither(dither)
+ , fShader(shader)
+{
+ // Only initialize the cache in getCache32.
+ fCache32 = nullptr;
+ fCache32PixelRef = nullptr;
+}
+
+SkGradientShaderBase::GradientShaderCache::~GradientShaderCache() {
+ SkSafeUnref(fCache32PixelRef);
+}
+
+/*
+ * r,g,b used to be SkFixed, but on gcc (4.2.1 mac and 4.6.3 goobuntu) in
+ * release builds, we saw a compiler error where the 0xFF parameter in
+ * SkPackARGB32() was being totally ignored whenever it was called with
+ * a non-zero add (e.g. 0x8000).
+ *
+ * We found two work-arounds:
+ * 1. change r,g,b to unsigned (or just one of them)
+ * 2. change SkPackARGB32 to + its (a << SK_A32_SHIFT) value instead
+ * of using |
+ *
+ * We chose #1 just because it was more localized.
+ * See http://code.google.com/p/skia/issues/detail?id=1113
+ *
+ * The type SkUFixed encapsulate this need for unsigned, but logically Fixed.
+ */
+typedef uint32_t SkUFixed;
+
+void SkGradientShaderBase::GradientShaderCache::Build32bitCache(
+ SkPMColor cache[], SkColor c0, SkColor c1,
+ int count, U8CPU paintAlpha, uint32_t gradFlags, bool dither) {
+ SkASSERT(count > 1);
+
+ // need to apply paintAlpha to our two endpoints
+ uint32_t a0 = SkMulDiv255Round(SkColorGetA(c0), paintAlpha);
+ uint32_t a1 = SkMulDiv255Round(SkColorGetA(c1), paintAlpha);
+
+
+ const bool interpInPremul = SkToBool(gradFlags &
+ SkGradientShader::kInterpolateColorsInPremul_Flag);
+
+ uint32_t r0 = SkColorGetR(c0);
+ uint32_t g0 = SkColorGetG(c0);
+ uint32_t b0 = SkColorGetB(c0);
+
+ uint32_t r1 = SkColorGetR(c1);
+ uint32_t g1 = SkColorGetG(c1);
+ uint32_t b1 = SkColorGetB(c1);
+
+ if (interpInPremul) {
+ r0 = SkMulDiv255Round(r0, a0);
+ g0 = SkMulDiv255Round(g0, a0);
+ b0 = SkMulDiv255Round(b0, a0);
+
+ r1 = SkMulDiv255Round(r1, a1);
+ g1 = SkMulDiv255Round(g1, a1);
+ b1 = SkMulDiv255Round(b1, a1);
+ }
+
+ SkFixed da = SkIntToFixed(a1 - a0) / (count - 1);
+ SkFixed dr = SkIntToFixed(r1 - r0) / (count - 1);
+ SkFixed dg = SkIntToFixed(g1 - g0) / (count - 1);
+ SkFixed db = SkIntToFixed(b1 - b0) / (count - 1);
+
+ /* We pre-add 1/8 to avoid having to add this to our [0] value each time
+ in the loop. Without this, the bias for each would be
+ 0x2000 0xA000 0xE000 0x6000
+ With this trick, we can add 0 for the first (no-op) and just adjust the
+ others.
+ */
+ const SkUFixed bias0 = dither ? 0x2000 : 0x8000;
+ const SkUFixed bias1 = dither ? 0x8000 : 0;
+ const SkUFixed bias2 = dither ? 0xC000 : 0;
+ const SkUFixed bias3 = dither ? 0x4000 : 0;
+
+ SkUFixed a = SkIntToFixed(a0) + bias0;
+ SkUFixed r = SkIntToFixed(r0) + bias0;
+ SkUFixed g = SkIntToFixed(g0) + bias0;
+ SkUFixed b = SkIntToFixed(b0) + bias0;
+
+ /*
+ * Our dither-cell (spatially) is
+ * 0 2
+ * 3 1
+ * Where
+ * [0] -> [-1/8 ... 1/8 ) values near 0
+ * [1] -> [ 1/8 ... 3/8 ) values near 1/4
+ * [2] -> [ 3/8 ... 5/8 ) values near 1/2
+ * [3] -> [ 5/8 ... 7/8 ) values near 3/4
+ */
+
+ if (0xFF == a0 && 0 == da) {
+ do {
+ cache[kCache32Count*0] = SkPackARGB32(0xFF, (r + 0 ) >> 16,
+ (g + 0 ) >> 16,
+ (b + 0 ) >> 16);
+ cache[kCache32Count*1] = SkPackARGB32(0xFF, (r + bias1) >> 16,
+ (g + bias1) >> 16,
+ (b + bias1) >> 16);
+ cache[kCache32Count*2] = SkPackARGB32(0xFF, (r + bias2) >> 16,
+ (g + bias2) >> 16,
+ (b + bias2) >> 16);
+ cache[kCache32Count*3] = SkPackARGB32(0xFF, (r + bias3) >> 16,
+ (g + bias3) >> 16,
+ (b + bias3) >> 16);
+ cache += 1;
+ r += dr;
+ g += dg;
+ b += db;
+ } while (--count != 0);
+ } else if (interpInPremul) {
+ do {
+ cache[kCache32Count*0] = SkPackARGB32((a + 0 ) >> 16,
+ (r + 0 ) >> 16,
+ (g + 0 ) >> 16,
+ (b + 0 ) >> 16);
+ cache[kCache32Count*1] = SkPackARGB32((a + bias1) >> 16,
+ (r + bias1) >> 16,
+ (g + bias1) >> 16,
+ (b + bias1) >> 16);
+ cache[kCache32Count*2] = SkPackARGB32((a + bias2) >> 16,
+ (r + bias2) >> 16,
+ (g + bias2) >> 16,
+ (b + bias2) >> 16);
+ cache[kCache32Count*3] = SkPackARGB32((a + bias3) >> 16,
+ (r + bias3) >> 16,
+ (g + bias3) >> 16,
+ (b + bias3) >> 16);
+ cache += 1;
+ a += da;
+ r += dr;
+ g += dg;
+ b += db;
+ } while (--count != 0);
+ } else { // interpolate in unpreml space
+ do {
+ cache[kCache32Count*0] = SkPremultiplyARGBInline((a + 0 ) >> 16,
+ (r + 0 ) >> 16,
+ (g + 0 ) >> 16,
+ (b + 0 ) >> 16);
+ cache[kCache32Count*1] = SkPremultiplyARGBInline((a + bias1) >> 16,
+ (r + bias1) >> 16,
+ (g + bias1) >> 16,
+ (b + bias1) >> 16);
+ cache[kCache32Count*2] = SkPremultiplyARGBInline((a + bias2) >> 16,
+ (r + bias2) >> 16,
+ (g + bias2) >> 16,
+ (b + bias2) >> 16);
+ cache[kCache32Count*3] = SkPremultiplyARGBInline((a + bias3) >> 16,
+ (r + bias3) >> 16,
+ (g + bias3) >> 16,
+ (b + bias3) >> 16);
+ cache += 1;
+ a += da;
+ r += dr;
+ g += dg;
+ b += db;
+ } while (--count != 0);
+ }
+}
+
+static inline int SkFixedToFFFF(SkFixed x) {
+ SkASSERT((unsigned)x <= SK_Fixed1);
+ return x - (x >> 16);
+}
+
+const SkPMColor* SkGradientShaderBase::GradientShaderCache::getCache32() {
+ fCache32InitOnce(SkGradientShaderBase::GradientShaderCache::initCache32, this);
+ SkASSERT(fCache32);
+ return fCache32;
+}
+
+void SkGradientShaderBase::GradientShaderCache::initCache32(GradientShaderCache* cache) {
+ const int kNumberOfDitherRows = 4;
+ const SkImageInfo info = SkImageInfo::MakeN32Premul(kCache32Count, kNumberOfDitherRows);
+
+ SkASSERT(nullptr == cache->fCache32PixelRef);
+ cache->fCache32PixelRef = SkMallocPixelRef::NewAllocate(info, 0, nullptr);
+ cache->fCache32 = (SkPMColor*)cache->fCache32PixelRef->getAddr();
+ if (cache->fShader.fColorCount == 2) {
+ Build32bitCache(cache->fCache32, cache->fShader.fOrigColors[0],
+ cache->fShader.fOrigColors[1], kCache32Count, cache->fCacheAlpha,
+ cache->fShader.fGradFlags, cache->fCacheDither);
+ } else {
+ Rec* rec = cache->fShader.fRecs;
+ int prevIndex = 0;
+ for (int i = 1; i < cache->fShader.fColorCount; i++) {
+ int nextIndex = SkFixedToFFFF(rec[i].fPos) >> kCache32Shift;
+ SkASSERT(nextIndex < kCache32Count);
+
+ if (nextIndex > prevIndex)
+ Build32bitCache(cache->fCache32 + prevIndex, cache->fShader.fOrigColors[i-1],
+ cache->fShader.fOrigColors[i], nextIndex - prevIndex + 1,
+ cache->fCacheAlpha, cache->fShader.fGradFlags, cache->fCacheDither);
+ prevIndex = nextIndex;
+ }
+ }
+}
+
+void SkGradientShaderBase::initLinearBitmap(SkBitmap* bitmap) const {
+ const bool interpInPremul = SkToBool(fGradFlags &
+ SkGradientShader::kInterpolateColorsInPremul_Flag);
+ bitmap->lockPixels();
+ SkHalf* pixelsF16 = reinterpret_cast<SkHalf*>(bitmap->getPixels());
+ uint32_t* pixelsS32 = reinterpret_cast<uint32_t*>(bitmap->getPixels());
+
+ typedef std::function<void(const Sk4f&, int)> pixelWriteFn_t;
+
+ pixelWriteFn_t writeF16Pixel = [&](const Sk4f& x, int index) {
+ Sk4h c = SkFloatToHalf_finite_ftz(x);
+ pixelsF16[4*index+0] = c[0];
+ pixelsF16[4*index+1] = c[1];
+ pixelsF16[4*index+2] = c[2];
+ pixelsF16[4*index+3] = c[3];
+ };
+ pixelWriteFn_t writeS32Pixel = [&](const Sk4f& c, int index) {
+ pixelsS32[index] = Sk4f_toS32(c);
+ };
+
+ pixelWriteFn_t writeSizedPixel =
+ (kRGBA_F16_SkColorType == bitmap->colorType()) ? writeF16Pixel : writeS32Pixel;
+ pixelWriteFn_t writeUnpremulPixel = [&](const Sk4f& c, int index) {
+ writeSizedPixel(c * Sk4f(c[3], c[3], c[3], 1.0f), index);
+ };
+
+ pixelWriteFn_t writePixel = interpInPremul ? writeSizedPixel : writeUnpremulPixel;
+
+ int prevIndex = 0;
+ for (int i = 1; i < fColorCount; i++) {
+ int nextIndex = (fColorCount == 2) ? (kCache32Count - 1)
+ : SkFixedToFFFF(fRecs[i].fPos) >> kCache32Shift;
+ SkASSERT(nextIndex < kCache32Count);
+
+ if (nextIndex > prevIndex) {
+ Sk4f c0 = Sk4f::Load(fOrigColors4f[i - 1].vec());
+ Sk4f c1 = Sk4f::Load(fOrigColors4f[i].vec());
+ if (interpInPremul) {
+ c0 = c0 * Sk4f(c0[3], c0[3], c0[3], 1.0f);
+ c1 = c1 * Sk4f(c1[3], c1[3], c1[3], 1.0f);
+ }
+
+ Sk4f step = Sk4f(1.0f / static_cast<float>(nextIndex - prevIndex));
+ Sk4f delta = (c1 - c0) * step;
+
+ for (int curIndex = prevIndex; curIndex <= nextIndex; ++curIndex) {
+ writePixel(c0, curIndex);
+ c0 += delta;
+ }
+ }
+ prevIndex = nextIndex;
+ }
+ SkASSERT(prevIndex == kCache32Count - 1);
+ bitmap->unlockPixels();
+}
+
+/*
+ * The gradient holds a cache for the most recent value of alpha. Successive
+ * callers with the same alpha value will share the same cache.
+ */
+SkGradientShaderBase::GradientShaderCache* SkGradientShaderBase::refCache(U8CPU alpha,
+ bool dither) const {
+ SkAutoMutexAcquire ama(fCacheMutex);
+ if (!fCache || fCache->getAlpha() != alpha || fCache->getDither() != dither) {
+ fCache.reset(new GradientShaderCache(alpha, dither, *this));
+ }
+ // Increment the ref counter inside the mutex to ensure the returned pointer is still valid.
+ // Otherwise, the pointer may have been overwritten on a different thread before the object's
+ // ref count was incremented.
+ fCache.get()->ref();
+ return fCache;
+}
+
+SK_DECLARE_STATIC_MUTEX(gGradientCacheMutex);
+/*
+ * Because our caller might rebuild the same (logically the same) gradient
+ * over and over, we'd like to return exactly the same "bitmap" if possible,
+ * allowing the client to utilize a cache of our bitmap (e.g. with a GPU).
+ * To do that, we maintain a private cache of built-bitmaps, based on our
+ * colors and positions. Note: we don't try to flatten the fMapper, so if one
+ * is present, we skip the cache for now.
+ */
+void SkGradientShaderBase::getGradientTableBitmap(SkBitmap* bitmap,
+ GradientBitmapType bitmapType) const {
+ // our caller assumes no external alpha, so we ensure that our cache is built with 0xFF
+ SkAutoTUnref<GradientShaderCache> cache(this->refCache(0xFF, true));
+
+ // build our key: [numColors + colors[] + {positions[]} + flags + colorType ]
+ int count = 1 + fColorCount + 1 + 1;
+ if (fColorCount > 2) {
+ count += fColorCount - 1; // fRecs[].fPos
+ }
+
+ SkAutoSTMalloc<16, int32_t> storage(count);
+ int32_t* buffer = storage.get();
+
+ *buffer++ = fColorCount;
+ memcpy(buffer, fOrigColors, fColorCount * sizeof(SkColor));
+ buffer += fColorCount;
+ if (fColorCount > 2) {
+ for (int i = 1; i < fColorCount; i++) {
+ *buffer++ = fRecs[i].fPos;
+ }
+ }
+ *buffer++ = fGradFlags;
+ *buffer++ = static_cast<int32_t>(bitmapType);
+ SkASSERT(buffer - storage.get() == count);
+
+ ///////////////////////////////////
+
+ static SkGradientBitmapCache* gCache;
+ // each cache cost 1K or 2K of RAM, since each bitmap will be 1x256 at either 32bpp or 64bpp
+ static const int MAX_NUM_CACHED_GRADIENT_BITMAPS = 32;
+ SkAutoMutexAcquire ama(gGradientCacheMutex);
+
+ if (nullptr == gCache) {
+ gCache = new SkGradientBitmapCache(MAX_NUM_CACHED_GRADIENT_BITMAPS);
+ }
+ size_t size = count * sizeof(int32_t);
+
+ if (!gCache->find(storage.get(), size, bitmap)) {
+ if (GradientBitmapType::kLegacy == bitmapType) {
+ // force our cache32pixelref to be built
+ (void)cache->getCache32();
+ bitmap->setInfo(SkImageInfo::MakeN32Premul(kCache32Count, 1));
+ bitmap->setPixelRef(cache->getCache32PixelRef());
+ } else {
+ // For these cases we use the bitmap cache, but not the GradientShaderCache. So just
+ // allocate and populate the bitmap's data directly.
+
+ SkImageInfo info;
+ switch (bitmapType) {
+ case GradientBitmapType::kSRGB:
+ info = SkImageInfo::Make(kCache32Count, 1, kRGBA_8888_SkColorType,
+ kPremul_SkAlphaType,
+ SkColorSpace::NewNamed(SkColorSpace::kSRGB_Named));
+ break;
+ case GradientBitmapType::kHalfFloat:
+ info = SkImageInfo::Make(
+ kCache32Count, 1, kRGBA_F16_SkColorType, kPremul_SkAlphaType,
+ SkColorSpace::NewNamed(SkColorSpace::kSRGBLinear_Named));
+ break;
+ default:
+ SkFAIL("Unexpected bitmap type");
+ return;
+ }
+ bitmap->allocPixels(info);
+ this->initLinearBitmap(bitmap);
+ }
+ gCache->add(storage.get(), size, *bitmap);
+ }
+}
+
+void SkGradientShaderBase::commonAsAGradient(GradientInfo* info, bool flipGrad) const {
+ if (info) {
+ if (info->fColorCount >= fColorCount) {
+ SkColor* colorLoc;
+ Rec* recLoc;
+ if (flipGrad && (info->fColors || info->fColorOffsets)) {
+ SkAutoSTArray<8, SkColor> colorStorage(fColorCount);
+ SkAutoSTArray<8, Rec> recStorage(fColorCount);
+ colorLoc = colorStorage.get();
+ recLoc = recStorage.get();
+ FlipGradientColors(colorLoc, recLoc, fOrigColors, fRecs, fColorCount);
+ } else {
+ colorLoc = fOrigColors;
+ recLoc = fRecs;
+ }
+ if (info->fColors) {
+ memcpy(info->fColors, colorLoc, fColorCount * sizeof(SkColor));
+ }
+ if (info->fColorOffsets) {
+ if (fColorCount == 2) {
+ info->fColorOffsets[0] = 0;
+ info->fColorOffsets[1] = SK_Scalar1;
+ } else if (fColorCount > 2) {
+ for (int i = 0; i < fColorCount; ++i) {
+ info->fColorOffsets[i] = SkFixedToScalar(recLoc[i].fPos);
+ }
+ }
+ }
+ }
+ info->fColorCount = fColorCount;
+ info->fTileMode = fTileMode;
+ info->fGradientFlags = fGradFlags;
+ }
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkGradientShaderBase::toString(SkString* str) const {
+
+ str->appendf("%d colors: ", fColorCount);
+
+ for (int i = 0; i < fColorCount; ++i) {
+ str->appendHex(fOrigColors[i], 8);
+ if (i < fColorCount-1) {
+ str->append(", ");
+ }
+ }
+
+ if (fColorCount > 2) {
+ str->append(" points: (");
+ for (int i = 0; i < fColorCount; ++i) {
+ str->appendScalar(SkFixedToScalar(fRecs[i].fPos));
+ if (i < fColorCount-1) {
+ str->append(", ");
+ }
+ }
+ str->append(")");
+ }
+
+ static const char* gTileModeName[SkShader::kTileModeCount] = {
+ "clamp", "repeat", "mirror"
+ };
+
+ str->append(" ");
+ str->append(gTileModeName[fTileMode]);
+
+ this->INHERITED::toString(str);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+// Return true if these parameters are valid/legal/safe to construct a gradient
+//
+static bool valid_grad(const SkColor4f colors[], const SkScalar pos[], int count,
+ unsigned tileMode) {
+ return nullptr != colors && count >= 1 && tileMode < (unsigned)SkShader::kTileModeCount;
+}
+
+static void desc_init(SkGradientShaderBase::Descriptor* desc,
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int colorCount,
+ SkShader::TileMode mode, uint32_t flags, const SkMatrix* localMatrix) {
+ SkASSERT(colorCount > 1);
+
+ desc->fColors = colors;
+ desc->fColorSpace = std::move(colorSpace);
+ desc->fPos = pos;
+ desc->fCount = colorCount;
+ desc->fTileMode = mode;
+ desc->fGradFlags = flags;
+ desc->fLocalMatrix = localMatrix;
+}
+
+// assumes colors is SkColor4f* and pos is SkScalar*
+#define EXPAND_1_COLOR(count) \
+ SkColor4f tmp[2]; \
+ do { \
+ if (1 == count) { \
+ tmp[0] = tmp[1] = colors[0]; \
+ colors = tmp; \
+ pos = nullptr; \
+ count = 2; \
+ } \
+ } while (0)
+
+struct ColorStopOptimizer {
+ ColorStopOptimizer(const SkColor4f* colors, const SkScalar* pos,
+ int count, SkShader::TileMode mode)
+ : fColors(colors)
+ , fPos(pos)
+ , fCount(count) {
+
+ if (!pos || count != 3) {
+ return;
+ }
+
+ if (SkScalarNearlyEqual(pos[0], 0.0f) &&
+ SkScalarNearlyEqual(pos[1], 0.0f) &&
+ SkScalarNearlyEqual(pos[2], 1.0f)) {
+
+ if (SkShader::kRepeat_TileMode == mode ||
+ SkShader::kMirror_TileMode == mode ||
+ colors[0] == colors[1]) {
+
+ // Ignore the leftmost color/pos.
+ fColors += 1;
+ fPos += 1;
+ fCount = 2;
+ }
+ } else if (SkScalarNearlyEqual(pos[0], 0.0f) &&
+ SkScalarNearlyEqual(pos[1], 1.0f) &&
+ SkScalarNearlyEqual(pos[2], 1.0f)) {
+
+ if (SkShader::kRepeat_TileMode == mode ||
+ SkShader::kMirror_TileMode == mode ||
+ colors[1] == colors[2]) {
+
+ // Ignore the rightmost color/pos.
+ fCount = 2;
+ }
+ }
+ }
+
+ const SkColor4f* fColors;
+ const SkScalar* fPos;
+ int fCount;
+};
+
+struct ColorConverter {
+ ColorConverter(const SkColor* colors, int count) {
+ for (int i = 0; i < count; ++i) {
+ fColors4f.push_back(SkColor4f::FromColor(colors[i]));
+ }
+ }
+
+ SkSTArray<2, SkColor4f, true> fColors4f;
+};
+
+sk_sp<SkShader> SkGradientShader::MakeLinear(const SkPoint pts[2],
+ const SkColor colors[],
+ const SkScalar pos[], int colorCount,
+ SkShader::TileMode mode,
+ uint32_t flags,
+ const SkMatrix* localMatrix) {
+ ColorConverter converter(colors, colorCount);
+ return MakeLinear(pts, converter.fColors4f.begin(), nullptr, pos, colorCount, mode, flags,
+ localMatrix);
+}
+
+sk_sp<SkShader> SkGradientShader::MakeLinear(const SkPoint pts[2],
+ const SkColor4f colors[],
+ sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int colorCount,
+ SkShader::TileMode mode,
+ uint32_t flags,
+ const SkMatrix* localMatrix) {
+ if (!pts || !SkScalarIsFinite((pts[1] - pts[0]).length())) {
+ return nullptr;
+ }
+ if (!valid_grad(colors, pos, colorCount, mode)) {
+ return nullptr;
+ }
+ if (1 == colorCount) {
+ return SkShader::MakeColorShader(colors[0], std::move(colorSpace));
+ }
+
+ ColorStopOptimizer opt(colors, pos, colorCount, mode);
+
+ SkGradientShaderBase::Descriptor desc;
+ desc_init(&desc, opt.fColors, std::move(colorSpace), opt.fPos, opt.fCount, mode, flags,
+ localMatrix);
+ return sk_make_sp<SkLinearGradient>(pts, desc);
+}
+
+sk_sp<SkShader> SkGradientShader::MakeRadial(const SkPoint& center, SkScalar radius,
+ const SkColor colors[],
+ const SkScalar pos[], int colorCount,
+ SkShader::TileMode mode,
+ uint32_t flags,
+ const SkMatrix* localMatrix) {
+ ColorConverter converter(colors, colorCount);
+ return MakeRadial(center, radius, converter.fColors4f.begin(), nullptr, pos, colorCount, mode,
+ flags, localMatrix);
+}
+
+sk_sp<SkShader> SkGradientShader::MakeRadial(const SkPoint& center, SkScalar radius,
+ const SkColor4f colors[],
+ sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int colorCount,
+ SkShader::TileMode mode,
+ uint32_t flags,
+ const SkMatrix* localMatrix) {
+ if (radius <= 0) {
+ return nullptr;
+ }
+ if (!valid_grad(colors, pos, colorCount, mode)) {
+ return nullptr;
+ }
+ if (1 == colorCount) {
+ return SkShader::MakeColorShader(colors[0], std::move(colorSpace));
+ }
+
+ ColorStopOptimizer opt(colors, pos, colorCount, mode);
+
+ SkGradientShaderBase::Descriptor desc;
+ desc_init(&desc, opt.fColors, std::move(colorSpace), opt.fPos, opt.fCount, mode, flags,
+ localMatrix);
+ return sk_make_sp<SkRadialGradient>(center, radius, desc);
+}
+
+sk_sp<SkShader> SkGradientShader::MakeTwoPointConical(const SkPoint& start,
+ SkScalar startRadius,
+ const SkPoint& end,
+ SkScalar endRadius,
+ const SkColor colors[],
+ const SkScalar pos[],
+ int colorCount,
+ SkShader::TileMode mode,
+ uint32_t flags,
+ const SkMatrix* localMatrix) {
+ ColorConverter converter(colors, colorCount);
+ return MakeTwoPointConical(start, startRadius, end, endRadius, converter.fColors4f.begin(),
+ nullptr, pos, colorCount, mode, flags, localMatrix);
+}
+
+sk_sp<SkShader> SkGradientShader::MakeTwoPointConical(const SkPoint& start,
+ SkScalar startRadius,
+ const SkPoint& end,
+ SkScalar endRadius,
+ const SkColor4f colors[],
+ sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[],
+ int colorCount,
+ SkShader::TileMode mode,
+ uint32_t flags,
+ const SkMatrix* localMatrix) {
+ if (startRadius < 0 || endRadius < 0) {
+ return nullptr;
+ }
+ if (!valid_grad(colors, pos, colorCount, mode)) {
+ return nullptr;
+ }
+ if (startRadius == endRadius) {
+ if (start == end || startRadius == 0) {
+ return SkShader::MakeEmptyShader();
+ }
+ }
+ EXPAND_1_COLOR(colorCount);
+
+ ColorStopOptimizer opt(colors, pos, colorCount, mode);
+
+ bool flipGradient = startRadius > endRadius;
+
+ SkGradientShaderBase::Descriptor desc;
+
+ if (!flipGradient) {
+ desc_init(&desc, opt.fColors, std::move(colorSpace), opt.fPos, opt.fCount, mode, flags,
+ localMatrix);
+ return sk_make_sp<SkTwoPointConicalGradient>(start, startRadius, end, endRadius,
+ flipGradient, desc);
+ } else {
+ SkAutoSTArray<8, SkColor4f> colorsNew(opt.fCount);
+ SkAutoSTArray<8, SkScalar> posNew(opt.fCount);
+ for (int i = 0; i < opt.fCount; ++i) {
+ colorsNew[i] = opt.fColors[opt.fCount - i - 1];
+ }
+
+ if (pos) {
+ for (int i = 0; i < opt.fCount; ++i) {
+ posNew[i] = 1 - opt.fPos[opt.fCount - i - 1];
+ }
+ desc_init(&desc, colorsNew.get(), std::move(colorSpace), posNew.get(), opt.fCount, mode,
+ flags, localMatrix);
+ } else {
+ desc_init(&desc, colorsNew.get(), std::move(colorSpace), nullptr, opt.fCount, mode,
+ flags, localMatrix);
+ }
+
+ return sk_make_sp<SkTwoPointConicalGradient>(end, endRadius, start, startRadius,
+ flipGradient, desc);
+ }
+}
+
+sk_sp<SkShader> SkGradientShader::MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor colors[],
+ const SkScalar pos[],
+ int colorCount,
+ uint32_t flags,
+ const SkMatrix* localMatrix) {
+ ColorConverter converter(colors, colorCount);
+ return MakeSweep(cx, cy, converter.fColors4f.begin(), nullptr, pos, colorCount, flags,
+ localMatrix);
+}
+
+sk_sp<SkShader> SkGradientShader::MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor4f colors[],
+ sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[],
+ int colorCount,
+ uint32_t flags,
+ const SkMatrix* localMatrix) {
+ if (!valid_grad(colors, pos, colorCount, SkShader::kClamp_TileMode)) {
+ return nullptr;
+ }
+ if (1 == colorCount) {
+ return SkShader::MakeColorShader(colors[0], std::move(colorSpace));
+ }
+
+ auto mode = SkShader::kClamp_TileMode;
+
+ ColorStopOptimizer opt(colors, pos, colorCount, mode);
+
+ SkGradientShaderBase::Descriptor desc;
+ desc_init(&desc, opt.fColors, std::move(colorSpace), opt.fPos, opt.fCount, mode, flags,
+ localMatrix);
+ return sk_make_sp<SkSweepGradient>(cx, cy, desc);
+}
+
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkGradientShader)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkLinearGradient)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkRadialGradient)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkSweepGradient)
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkTwoPointConicalGradient)
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+#include "GrContext.h"
+#include "GrInvariantOutput.h"
+#include "GrTextureStripAtlas.h"
+#include "gl/GrGLContext.h"
+#include "glsl/GrGLSLColorSpaceXformHelper.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "SkGr.h"
+
+static inline bool close_to_one_half(const SkFixed& val) {
+ return SkScalarNearlyEqual(SkFixedToScalar(val), SK_ScalarHalf);
+}
+
+static inline int color_type_to_color_count(GrGradientEffect::ColorType colorType) {
+ switch (colorType) {
+#if GR_GL_USE_ACCURATE_HARD_STOP_GRADIENTS
+ case GrGradientEffect::kHardStopCentered_ColorType:
+ return 4;
+ case GrGradientEffect::kHardStopLeftEdged_ColorType:
+ case GrGradientEffect::kHardStopRightEdged_ColorType:
+ return 3;
+#endif
+ case GrGradientEffect::kTwo_ColorType:
+ return 2;
+ case GrGradientEffect::kThree_ColorType:
+ return 3;
+ case GrGradientEffect::kTexture_ColorType:
+ return 0;
+ }
+
+ SkDEBUGFAIL("Unhandled ColorType in color_type_to_color_count()");
+ return -1;
+}
+
+GrGradientEffect::ColorType GrGradientEffect::determineColorType(
+ const SkGradientShaderBase& shader) {
+#if GR_GL_USE_ACCURATE_HARD_STOP_GRADIENTS
+ if (shader.fOrigPos) {
+ if (4 == shader.fColorCount) {
+ if (SkScalarNearlyEqual(shader.fOrigPos[0], 0.0f) &&
+ SkScalarNearlyEqual(shader.fOrigPos[1], 0.5f) &&
+ SkScalarNearlyEqual(shader.fOrigPos[2], 0.5f) &&
+ SkScalarNearlyEqual(shader.fOrigPos[3], 1.0f)) {
+
+ return kHardStopCentered_ColorType;
+ }
+ } else if (3 == shader.fColorCount) {
+ if (SkScalarNearlyEqual(shader.fOrigPos[0], 0.0f) &&
+ SkScalarNearlyEqual(shader.fOrigPos[1], 0.0f) &&
+ SkScalarNearlyEqual(shader.fOrigPos[2], 1.0f)) {
+
+ return kHardStopLeftEdged_ColorType;
+ } else if (SkScalarNearlyEqual(shader.fOrigPos[0], 0.0f) &&
+ SkScalarNearlyEqual(shader.fOrigPos[1], 1.0f) &&
+ SkScalarNearlyEqual(shader.fOrigPos[2], 1.0f)) {
+
+ return kHardStopRightEdged_ColorType;
+ }
+ }
+ }
+#endif
+
+ if (SkShader::kClamp_TileMode == shader.getTileMode()) {
+ if (2 == shader.fColorCount) {
+ return kTwo_ColorType;
+ } else if (3 == shader.fColorCount &&
+ close_to_one_half(shader.getRecs()[1].fPos)) {
+ return kThree_ColorType;
+ }
+ }
+
+ return kTexture_ColorType;
+}
+
+void GrGradientEffect::GLSLProcessor::emitUniforms(GrGLSLUniformHandler* uniformHandler,
+ const GrGradientEffect& ge) {
+ if (int colorCount = color_type_to_color_count(ge.getColorType())) {
+ fColorsUni = uniformHandler->addUniformArray(kFragment_GrShaderFlag,
+ kVec4f_GrSLType,
+ kDefault_GrSLPrecision,
+ "Colors",
+ colorCount);
+ } else {
+ fFSYUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType, kDefault_GrSLPrecision,
+ "GradientYCoordFS");
+ }
+}
+
+static inline void set_after_interp_color_uni_array(
+ const GrGLSLProgramDataManager& pdman,
+ const GrGLSLProgramDataManager::UniformHandle uni,
+ const SkTDArray<SkColor4f>& colors,
+ const GrColorSpaceXform* colorSpaceXform) {
+ int count = colors.count();
+ if (colorSpaceXform) {
+ constexpr int kSmallCount = 10;
+ SkAutoSTArray<4 * kSmallCount, float> vals(4 * count);
+
+ for (int i = 0; i < count; i++) {
+ colorSpaceXform->srcToDst().mapScalars(colors[i].vec(), &vals[4 * i]);
+ }
+
+ pdman.set4fv(uni, count, vals.get());
+ } else {
+ pdman.set4fv(uni, count, (float*)&colors[0]);
+ }
+}
+
+static inline void set_before_interp_color_uni_array(
+ const GrGLSLProgramDataManager& pdman,
+ const GrGLSLProgramDataManager::UniformHandle uni,
+ const SkTDArray<SkColor4f>& colors,
+ const GrColorSpaceXform* colorSpaceXform) {
+ int count = colors.count();
+ constexpr int kSmallCount = 10;
+ SkAutoSTArray<4 * kSmallCount, float> vals(4 * count);
+
+ for (int i = 0; i < count; i++) {
+ float a = colors[i].fA;
+ vals[4 * i + 0] = colors[i].fR * a;
+ vals[4 * i + 1] = colors[i].fG * a;
+ vals[4 * i + 2] = colors[i].fB * a;
+ vals[4 * i + 3] = a;
+ }
+
+ if (colorSpaceXform) {
+ for (int i = 0; i < count; i++) {
+ colorSpaceXform->srcToDst().mapScalars(&vals[4 * i]);
+ }
+ }
+
+ pdman.set4fv(uni, count, vals.get());
+}
+
+static inline void set_after_interp_color_uni_array(const GrGLSLProgramDataManager& pdman,
+ const GrGLSLProgramDataManager::UniformHandle uni,
+ const SkTDArray<SkColor>& colors) {
+ int count = colors.count();
+ constexpr int kSmallCount = 10;
+
+ SkAutoSTArray<4*kSmallCount, float> vals(4*count);
+
+ for (int i = 0; i < colors.count(); i++) {
+ // RGBA
+ vals[4*i + 0] = SkColorGetR(colors[i]) / 255.f;
+ vals[4*i + 1] = SkColorGetG(colors[i]) / 255.f;
+ vals[4*i + 2] = SkColorGetB(colors[i]) / 255.f;
+ vals[4*i + 3] = SkColorGetA(colors[i]) / 255.f;
+ }
+
+ pdman.set4fv(uni, colors.count(), vals.get());
+}
+
+static inline void set_before_interp_color_uni_array(const GrGLSLProgramDataManager& pdman,
+ const GrGLSLProgramDataManager::UniformHandle uni,
+ const SkTDArray<SkColor>& colors) {
+ int count = colors.count();
+ constexpr int kSmallCount = 10;
+
+ SkAutoSTArray<4*kSmallCount, float> vals(4*count);
+
+ for (int i = 0; i < count; i++) {
+ float a = SkColorGetA(colors[i]) / 255.f;
+ float aDiv255 = a / 255.f;
+
+ // RGBA
+ vals[4*i + 0] = SkColorGetR(colors[i]) * aDiv255;
+ vals[4*i + 1] = SkColorGetG(colors[i]) * aDiv255;
+ vals[4*i + 2] = SkColorGetB(colors[i]) * aDiv255;
+ vals[4*i + 3] = a;
+ }
+
+ pdman.set4fv(uni, count, vals.get());
+}
+
+void GrGradientEffect::GLSLProcessor::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& processor) {
+ const GrGradientEffect& e = processor.cast<GrGradientEffect>();
+
+ switch (e.getColorType()) {
+#if GR_GL_USE_ACCURATE_HARD_STOP_GRADIENTS
+ case GrGradientEffect::kHardStopCentered_ColorType:
+ case GrGradientEffect::kHardStopLeftEdged_ColorType:
+ case GrGradientEffect::kHardStopRightEdged_ColorType:
+#endif
+ case GrGradientEffect::kTwo_ColorType:
+ case GrGradientEffect::kThree_ColorType: {
+ if (e.fColors4f.count() > 0) {
+ // Gamma-correct / color-space aware
+ if (GrGradientEffect::kBeforeInterp_PremulType == e.getPremulType()) {
+ set_before_interp_color_uni_array(pdman, fColorsUni, e.fColors4f,
+ e.fColorSpaceXform.get());
+ } else {
+ set_after_interp_color_uni_array(pdman, fColorsUni, e.fColors4f,
+ e.fColorSpaceXform.get());
+ }
+ } else {
+ // Legacy mode. Would be nice if we had converted the 8-bit colors to float earlier
+ if (GrGradientEffect::kBeforeInterp_PremulType == e.getPremulType()) {
+ set_before_interp_color_uni_array(pdman, fColorsUni, e.fColors);
+ } else {
+ set_after_interp_color_uni_array(pdman, fColorsUni, e.fColors);
+ }
+ }
+
+ break;
+ }
+
+ case GrGradientEffect::kTexture_ColorType: {
+ SkScalar yCoord = e.getYCoord();
+ if (yCoord != fCachedYCoord) {
+ pdman.set1f(fFSYUni, yCoord);
+ fCachedYCoord = yCoord;
+ }
+ if (SkToBool(e.fColorSpaceXform)) {
+ pdman.setSkMatrix44(fColorSpaceXformUni, e.fColorSpaceXform->srcToDst());
+ }
+ break;
+ }
+ }
+}
+
+uint32_t GrGradientEffect::GLSLProcessor::GenBaseGradientKey(const GrProcessor& processor) {
+ const GrGradientEffect& e = processor.cast<GrGradientEffect>();
+
+ uint32_t key = 0;
+
+ if (GrGradientEffect::kBeforeInterp_PremulType == e.getPremulType()) {
+ key |= kPremulBeforeInterpKey;
+ }
+
+ if (GrGradientEffect::kTwo_ColorType == e.getColorType()) {
+ key |= kTwoColorKey;
+ } else if (GrGradientEffect::kThree_ColorType == e.getColorType()) {
+ key |= kThreeColorKey;
+ }
+#if GR_GL_USE_ACCURATE_HARD_STOP_GRADIENTS
+ else if (GrGradientEffect::kHardStopCentered_ColorType == e.getColorType()) {
+ key |= kHardStopCenteredKey;
+ } else if (GrGradientEffect::kHardStopLeftEdged_ColorType == e.getColorType()) {
+ key |= kHardStopZeroZeroOneKey;
+ } else if (GrGradientEffect::kHardStopRightEdged_ColorType == e.getColorType()) {
+ key |= kHardStopZeroOneOneKey;
+ }
+
+ if (SkShader::TileMode::kClamp_TileMode == e.fTileMode) {
+ key |= kClampTileMode;
+ } else if (SkShader::TileMode::kRepeat_TileMode == e.fTileMode) {
+ key |= kRepeatTileMode;
+ } else {
+ key |= kMirrorTileMode;
+ }
+#endif
+
+ key |= GrColorSpaceXform::XformKey(e.fColorSpaceXform.get()) << kReservedBits;
+
+ return key;
+}
+
+void GrGradientEffect::GLSLProcessor::emitColor(GrGLSLFPFragmentBuilder* fragBuilder,
+ GrGLSLUniformHandler* uniformHandler,
+ const GrGLSLCaps* glslCaps,
+ const GrGradientEffect& ge,
+ const char* gradientTValue,
+ const char* outputColor,
+ const char* inputColor,
+ const TextureSamplers& texSamplers) {
+ switch (ge.getColorType()) {
+#if GR_GL_USE_ACCURATE_HARD_STOP_GRADIENTS
+ case kHardStopCentered_ColorType: {
+ const char* t = gradientTValue;
+ const char* colors = uniformHandler->getUniformCStr(fColorsUni);
+
+ fragBuilder->codeAppendf("float clamp_t = clamp(%s, 0.0, 1.0);", t);
+
+ // Account for tile mode
+ if (SkShader::kRepeat_TileMode == ge.fTileMode) {
+ fragBuilder->codeAppendf("clamp_t = fract(%s);", t);
+ } else if (SkShader::kMirror_TileMode == ge.fTileMode) {
+ fragBuilder->codeAppendf("if (%s < 0.0 || %s > 1.0) {", t, t);
+ fragBuilder->codeAppendf(" if (mod(floor(%s), 2.0) == 0.0) {", t);
+ fragBuilder->codeAppendf(" clamp_t = fract(%s);", t);
+ fragBuilder->codeAppendf(" } else {");
+ fragBuilder->codeAppendf(" clamp_t = 1.0 - fract(%s);", t);
+ fragBuilder->codeAppendf(" }");
+ fragBuilder->codeAppendf("}");
+ }
+
+ // Calculate color
+ fragBuilder->codeAppendf("float relative_t = fract(2.0 * clamp_t);");
+ if (SkShader::kClamp_TileMode == ge.fTileMode) {
+ fragBuilder->codeAppendf("relative_t += step(1.0, %s);", t);
+ }
+
+ fragBuilder->codeAppendf("vec4 start = %s[0];", colors);
+ fragBuilder->codeAppendf("vec4 end = %s[1];", colors);
+ fragBuilder->codeAppendf("if (clamp_t >= 0.5) {");
+ fragBuilder->codeAppendf(" start = %s[2];", colors);
+ fragBuilder->codeAppendf(" end = %s[3];", colors);
+ fragBuilder->codeAppendf("}");
+ fragBuilder->codeAppendf("vec4 colorTemp = mix(start, end, relative_t);");
+
+ if (GrGradientEffect::kAfterInterp_PremulType == ge.getPremulType()) {
+ fragBuilder->codeAppend("colorTemp.rgb *= colorTemp.a;");
+ }
+ fragBuilder->codeAppendf("%s = %s;", outputColor,
+ (GrGLSLExpr4(inputColor) * GrGLSLExpr4("colorTemp")).c_str());
+
+ break;
+ }
+
+ case kHardStopLeftEdged_ColorType: {
+ const char* t = gradientTValue;
+ const char* colors = uniformHandler->getUniformCStr(fColorsUni);
+
+ fragBuilder->codeAppendf("float clamp_t = clamp(%s, 0.0, 1.0);", t);
+
+ // Account for tile mode
+ if (SkShader::kRepeat_TileMode == ge.fTileMode) {
+ fragBuilder->codeAppendf("clamp_t = fract(%s);", t);
+ } else if (SkShader::kMirror_TileMode == ge.fTileMode) {
+ fragBuilder->codeAppendf("if (%s < 0.0 || %s > 1.0) {", t, t);
+ fragBuilder->codeAppendf(" if (mod(floor(%s), 2.0) == 0.0) {", t);
+ fragBuilder->codeAppendf(" clamp_t = fract(%s);", t);
+ fragBuilder->codeAppendf(" } else {");
+ fragBuilder->codeAppendf(" clamp_t = 1.0 - fract(%s);", t);
+ fragBuilder->codeAppendf(" }");
+ fragBuilder->codeAppendf("}");
+ }
+
+ fragBuilder->codeAppendf("vec4 colorTemp = mix(%s[1], %s[2], clamp_t);", colors,
+ colors);
+ if (SkShader::kClamp_TileMode == ge.fTileMode) {
+ fragBuilder->codeAppendf("if (%s < 0.0) {", t);
+ fragBuilder->codeAppendf(" colorTemp = %s[0];", colors);
+ fragBuilder->codeAppendf("}");
+ }
+
+ if (GrGradientEffect::kAfterInterp_PremulType == ge.getPremulType()) {
+ fragBuilder->codeAppend("colorTemp.rgb *= colorTemp.a;");
+ }
+ fragBuilder->codeAppendf("%s = %s;", outputColor,
+ (GrGLSLExpr4(inputColor) * GrGLSLExpr4("colorTemp")).c_str());
+
+ break;
+ }
+
+ case kHardStopRightEdged_ColorType: {
+ const char* t = gradientTValue;
+ const char* colors = uniformHandler->getUniformCStr(fColorsUni);
+
+ fragBuilder->codeAppendf("float clamp_t = clamp(%s, 0.0, 1.0);", t);
+
+ // Account for tile mode
+ if (SkShader::kRepeat_TileMode == ge.fTileMode) {
+ fragBuilder->codeAppendf("clamp_t = fract(%s);", t);
+ } else if (SkShader::kMirror_TileMode == ge.fTileMode) {
+ fragBuilder->codeAppendf("if (%s < 0.0 || %s > 1.0) {", t, t);
+ fragBuilder->codeAppendf(" if (mod(floor(%s), 2.0) == 0.0) {", t);
+ fragBuilder->codeAppendf(" clamp_t = fract(%s);", t);
+ fragBuilder->codeAppendf(" } else {");
+ fragBuilder->codeAppendf(" clamp_t = 1.0 - fract(%s);", t);
+ fragBuilder->codeAppendf(" }");
+ fragBuilder->codeAppendf("}");
+ }
+
+ fragBuilder->codeAppendf("vec4 colorTemp = mix(%s[0], %s[1], clamp_t);", colors,
+ colors);
+ if (SkShader::kClamp_TileMode == ge.fTileMode) {
+ fragBuilder->codeAppendf("if (%s > 1.0) {", t);
+ fragBuilder->codeAppendf(" colorTemp = %s[2];", colors);
+ fragBuilder->codeAppendf("}");
+ }
+
+ if (GrGradientEffect::kAfterInterp_PremulType == ge.getPremulType()) {
+ fragBuilder->codeAppend("colorTemp.rgb *= colorTemp.a;");
+ }
+ fragBuilder->codeAppendf("%s = %s;", outputColor,
+ (GrGLSLExpr4(inputColor) * GrGLSLExpr4("colorTemp")).c_str());
+
+ break;
+ }
+#endif
+
+ case kTwo_ColorType: {
+ const char* t = gradientTValue;
+ const char* colors = uniformHandler->getUniformCStr(fColorsUni);
+
+ fragBuilder->codeAppendf("vec4 colorTemp = mix(%s[0], %s[1], clamp(%s, 0.0, 1.0));",
+ colors, colors, t);
+
+ // We could skip this step if both colors are known to be opaque. Two
+ // considerations:
+ // The gradient SkShader reporting opaque is more restrictive than necessary in the two
+ // pt case. Make sure the key reflects this optimization (and note that it can use the
+ // same shader as thekBeforeIterp case). This same optimization applies to the 3 color
+ // case below.
+ if (GrGradientEffect::kAfterInterp_PremulType == ge.getPremulType()) {
+ fragBuilder->codeAppend("colorTemp.rgb *= colorTemp.a;");
+ }
+
+ fragBuilder->codeAppendf("%s = %s;", outputColor,
+ (GrGLSLExpr4(inputColor) * GrGLSLExpr4("colorTemp")).c_str());
+
+ break;
+ }
+
+ case kThree_ColorType: {
+ const char* t = gradientTValue;
+ const char* colors = uniformHandler->getUniformCStr(fColorsUni);
+
+ fragBuilder->codeAppendf("float oneMinus2t = 1.0 - (2.0 * %s);", t);
+ fragBuilder->codeAppendf("vec4 colorTemp = clamp(oneMinus2t, 0.0, 1.0) * %s[0];",
+ colors);
+ if (!glslCaps->canUseMinAndAbsTogether()) {
+ // The Tegra3 compiler will sometimes never return if we have
+ // min(abs(oneMinus2t), 1.0), or do the abs first in a separate expression.
+ fragBuilder->codeAppendf("float minAbs = abs(oneMinus2t);");
+ fragBuilder->codeAppendf("minAbs = minAbs > 1.0 ? 1.0 : minAbs;");
+ fragBuilder->codeAppendf("colorTemp += (1.0 - minAbs) * %s[1];", colors);
+ } else {
+ fragBuilder->codeAppendf("colorTemp += (1.0 - min(abs(oneMinus2t), 1.0)) * %s[1];",
+ colors);
+ }
+ fragBuilder->codeAppendf("colorTemp += clamp(-oneMinus2t, 0.0, 1.0) * %s[2];", colors);
+
+ if (GrGradientEffect::kAfterInterp_PremulType == ge.getPremulType()) {
+ fragBuilder->codeAppend("colorTemp.rgb *= colorTemp.a;");
+ }
+
+ fragBuilder->codeAppendf("%s = %s;", outputColor,
+ (GrGLSLExpr4(inputColor) * GrGLSLExpr4("colorTemp")).c_str());
+
+ break;
+ }
+
+ case kTexture_ColorType: {
+ GrGLSLColorSpaceXformHelper colorSpaceHelper(uniformHandler, ge.fColorSpaceXform.get(),
+ &fColorSpaceXformUni);
+
+ const char* fsyuni = uniformHandler->getUniformCStr(fFSYUni);
+
+ fragBuilder->codeAppendf("vec2 coord = vec2(%s, %s);", gradientTValue, fsyuni);
+ fragBuilder->codeAppendf("%s = ", outputColor);
+ fragBuilder->appendTextureLookupAndModulate(inputColor, texSamplers[0], "coord",
+ kVec2f_GrSLType, &colorSpaceHelper);
+ fragBuilder->codeAppend(";");
+
+ break;
+ }
+ }
+}
+
+/////////////////////////////////////////////////////////////////////
+
+GrGradientEffect::GrGradientEffect(const CreateArgs& args) {
+ const SkGradientShaderBase& shader(*args.fShader);
+
+ fIsOpaque = shader.isOpaque();
+
+ fColorType = this->determineColorType(shader);
+ fColorSpaceXform = std::move(args.fColorSpaceXform);
+
+ if (kTexture_ColorType != fColorType) {
+ SkASSERT(shader.fOrigColors && shader.fOrigColors4f);
+ if (args.fGammaCorrect) {
+ fColors4f = SkTDArray<SkColor4f>(shader.fOrigColors4f, shader.fColorCount);
+ } else {
+ fColors = SkTDArray<SkColor>(shader.fOrigColors, shader.fColorCount);
+ }
+
+#if GR_GL_USE_ACCURATE_HARD_STOP_GRADIENTS
+ if (shader.fOrigPos) {
+ fPositions = SkTDArray<SkScalar>(shader.fOrigPos, shader.fColorCount);
+ }
+#endif
+ }
+
+#if GR_GL_USE_ACCURATE_HARD_STOP_GRADIENTS
+ fTileMode = args.fTileMode;
+#endif
+
+ switch (fColorType) {
+ // The two and three color specializations do not currently support tiling.
+ case kTwo_ColorType:
+ case kThree_ColorType:
+#if GR_GL_USE_ACCURATE_HARD_STOP_GRADIENTS
+ case kHardStopLeftEdged_ColorType:
+ case kHardStopRightEdged_ColorType:
+ case kHardStopCentered_ColorType:
+#endif
+ fRow = -1;
+
+ if (SkGradientShader::kInterpolateColorsInPremul_Flag & shader.getGradFlags()) {
+ fPremulType = kBeforeInterp_PremulType;
+ } else {
+ fPremulType = kAfterInterp_PremulType;
+ }
+
+ fCoordTransform.reset(*args.fMatrix);
+
+ break;
+ case kTexture_ColorType:
+ // doesn't matter how this is set, just be consistent because it is part of the
+ // effect key.
+ fPremulType = kBeforeInterp_PremulType;
+
+ SkGradientShaderBase::GradientBitmapType bitmapType =
+ SkGradientShaderBase::GradientBitmapType::kLegacy;
+ if (args.fGammaCorrect) {
+ // Try to use F16 if we can
+ if (args.fContext->caps()->isConfigTexturable(kRGBA_half_GrPixelConfig)) {
+ bitmapType = SkGradientShaderBase::GradientBitmapType::kHalfFloat;
+ } else if (args.fContext->caps()->isConfigTexturable(kSRGBA_8888_GrPixelConfig)) {
+ bitmapType = SkGradientShaderBase::GradientBitmapType::kSRGB;
+ } else {
+ // This can happen, but only if someone explicitly creates an unsupported
+ // (eg sRGB) surface. Just fall back to legacy behavior.
+ }
+ }
+
+ SkBitmap bitmap;
+ shader.getGradientTableBitmap(&bitmap, bitmapType);
+
+ GrTextureStripAtlas::Desc desc;
+ desc.fWidth = bitmap.width();
+ desc.fHeight = 32;
+ desc.fRowHeight = bitmap.height();
+ desc.fContext = args.fContext;
+ desc.fConfig = SkImageInfo2GrPixelConfig(bitmap.info(), *args.fContext->caps());
+ fAtlas = GrTextureStripAtlas::GetAtlas(desc);
+ SkASSERT(fAtlas);
+
+ // We always filter the gradient table. Each table is one row of a texture, always
+ // y-clamp.
+ GrTextureParams params;
+ params.setFilterMode(GrTextureParams::kBilerp_FilterMode);
+ params.setTileModeX(args.fTileMode);
+
+ fRow = fAtlas->lockRow(bitmap);
+ if (-1 != fRow) {
+ fYCoord = fAtlas->getYOffset(fRow)+SK_ScalarHalf*fAtlas->getNormalizedTexelHeight();
+ fCoordTransform.reset(*args.fMatrix, fAtlas->getTexture(), params.filterMode());
+ fTextureAccess.reset(fAtlas->getTexture(), params);
+ } else {
+ SkAutoTUnref<GrTexture> texture(
+ GrRefCachedBitmapTexture(args.fContext, bitmap, params,
+ SkSourceGammaTreatment::kRespect));
+ if (!texture) {
+ return;
+ }
+ fCoordTransform.reset(*args.fMatrix, texture, params.filterMode());
+ fTextureAccess.reset(texture, params);
+ fYCoord = SK_ScalarHalf;
+ }
+
+ this->addTextureAccess(&fTextureAccess);
+
+ break;
+ }
+
+ this->addCoordTransform(&fCoordTransform);
+}
+
+GrGradientEffect::~GrGradientEffect() {
+ if (this->useAtlas()) {
+ fAtlas->unlockRow(fRow);
+ }
+}
+
+bool GrGradientEffect::onIsEqual(const GrFragmentProcessor& processor) const {
+ const GrGradientEffect& ge = processor.cast<GrGradientEffect>();
+
+ if (this->fColorType == ge.getColorType()) {
+ if (kTexture_ColorType == fColorType) {
+ if (fYCoord != ge.getYCoord()) {
+ return false;
+ }
+ } else {
+ if (this->getPremulType() != ge.getPremulType() ||
+ this->fColors.count() != ge.fColors.count() ||
+ this->fColors4f.count() != ge.fColors4f.count()) {
+ return false;
+ }
+
+ for (int i = 0; i < this->fColors.count(); i++) {
+ if (*this->getColors(i) != *ge.getColors(i)) {
+ return false;
+ }
+ }
+ for (int i = 0; i < this->fColors4f.count(); i++) {
+ if (*this->getColors4f(i) != *ge.getColors4f(i)) {
+ return false;
+ }
+ }
+ }
+
+
+ SkASSERT(this->useAtlas() == ge.useAtlas());
+ return GrColorSpaceXform::Equals(this->fColorSpaceXform.get(), ge.fColorSpaceXform.get());
+ }
+
+ return false;
+}
+
+void GrGradientEffect::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ if (fIsOpaque) {
+ inout->mulByUnknownOpaqueFourComponents();
+ } else {
+ inout->mulByUnknownFourComponents();
+ }
+}
+
+int GrGradientEffect::RandomGradientParams(SkRandom* random,
+ SkColor colors[],
+ SkScalar** stops,
+ SkShader::TileMode* tm) {
+ int outColors = random->nextRangeU(1, kMaxRandomGradientColors);
+
+ // if one color, omit stops, otherwise randomly decide whether or not to
+ if (outColors == 1 || (outColors >= 2 && random->nextBool())) {
+ *stops = nullptr;
+ }
+
+ SkScalar stop = 0.f;
+ for (int i = 0; i < outColors; ++i) {
+ colors[i] = random->nextU();
+ if (*stops) {
+ (*stops)[i] = stop;
+ stop = i < outColors - 1 ? stop + random->nextUScalar1() * (1.f - stop) : 1.f;
+ }
+ }
+ *tm = static_cast<SkShader::TileMode>(random->nextULessThan(SkShader::kTileModeCount));
+
+ return outColors;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/effects/gradients/SkGradientShaderPriv.h b/gfx/skia/skia/src/effects/gradients/SkGradientShaderPriv.h
new file mode 100644
index 000000000..61a44184f
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/SkGradientShaderPriv.h
@@ -0,0 +1,517 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGradientShaderPriv_DEFINED
+#define SkGradientShaderPriv_DEFINED
+
+#include "SkGradientBitmapCache.h"
+#include "SkGradientShader.h"
+#include "SkClampRange.h"
+#include "SkColorPriv.h"
+#include "SkColorSpace.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+#include "SkMallocPixelRef.h"
+#include "SkUtils.h"
+#include "SkShader.h"
+#include "SkOnce.h"
+
+#if SK_SUPPORT_GPU
+ #define GR_GL_USE_ACCURATE_HARD_STOP_GRADIENTS 1
+#endif
+
+static inline void sk_memset32_dither(uint32_t dst[], uint32_t v0, uint32_t v1,
+ int count) {
+ if (count > 0) {
+ if (v0 == v1) {
+ sk_memset32(dst, v0, count);
+ } else {
+ int pairs = count >> 1;
+ for (int i = 0; i < pairs; i++) {
+ *dst++ = v0;
+ *dst++ = v1;
+ }
+ if (count & 1) {
+ *dst = v0;
+ }
+ }
+ }
+}
+
+// Clamp
+
+static inline SkFixed clamp_tileproc(SkFixed x) {
+ return SkClampMax(x, 0xFFFF);
+}
+
+// Repeat
+
+static inline SkFixed repeat_tileproc(SkFixed x) {
+ return x & 0xFFFF;
+}
+
+// Mirror
+
+static inline SkFixed mirror_tileproc(SkFixed x) {
+ int s = SkLeftShift(x, 15) >> 31;
+ return (x ^ s) & 0xFFFF;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef SkFixed (*TileProc)(SkFixed);
+
+///////////////////////////////////////////////////////////////////////////////
+
+static const TileProc gTileProcs[] = {
+ clamp_tileproc,
+ repeat_tileproc,
+ mirror_tileproc
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkGradientShaderBase : public SkShader {
+public:
+ struct Descriptor {
+ Descriptor() {
+ sk_bzero(this, sizeof(*this));
+ fTileMode = SkShader::kClamp_TileMode;
+ }
+
+ const SkMatrix* fLocalMatrix;
+ const SkColor4f* fColors;
+ sk_sp<SkColorSpace> fColorSpace;
+ const SkScalar* fPos;
+ int fCount;
+ SkShader::TileMode fTileMode;
+ uint32_t fGradFlags;
+
+ void flatten(SkWriteBuffer&) const;
+ };
+
+ class DescriptorScope : public Descriptor {
+ public:
+ DescriptorScope() {}
+
+ bool unflatten(SkReadBuffer&);
+
+ // fColors and fPos always point into local memory, so they can be safely mutated
+ //
+ SkColor4f* mutableColors() { return const_cast<SkColor4f*>(fColors); }
+ SkScalar* mutablePos() { return const_cast<SkScalar*>(fPos); }
+
+ private:
+ enum {
+ kStorageCount = 16
+ };
+ SkColor4f fColorStorage[kStorageCount];
+ SkScalar fPosStorage[kStorageCount];
+ SkMatrix fLocalMatrixStorage;
+ SkAutoMalloc fDynamicStorage;
+ };
+
+ SkGradientShaderBase(const Descriptor& desc, const SkMatrix& ptsToUnit);
+ virtual ~SkGradientShaderBase();
+
+ // The cache is initialized on-demand when getCache32 is called.
+ class GradientShaderCache : public SkRefCnt {
+ public:
+ GradientShaderCache(U8CPU alpha, bool dither, const SkGradientShaderBase& shader);
+ ~GradientShaderCache();
+
+ const SkPMColor* getCache32();
+
+ SkMallocPixelRef* getCache32PixelRef() const { return fCache32PixelRef; }
+
+ unsigned getAlpha() const { return fCacheAlpha; }
+ bool getDither() const { return fCacheDither; }
+
+ private:
+ // Working pointer. If it's nullptr, we need to recompute the cache values.
+ SkPMColor* fCache32;
+
+ SkMallocPixelRef* fCache32PixelRef;
+ const unsigned fCacheAlpha; // The alpha value we used when we computed the cache.
+ // Larger than 8bits so we can store uninitialized
+ // value.
+ const bool fCacheDither; // The dither flag used when we computed the cache.
+
+ const SkGradientShaderBase& fShader;
+
+ // Make sure we only initialize the cache once.
+ SkOnce fCache32InitOnce;
+
+ static void initCache32(GradientShaderCache* cache);
+
+ static void Build32bitCache(SkPMColor[], SkColor c0, SkColor c1, int count,
+ U8CPU alpha, uint32_t gradFlags, bool dither);
+ };
+
+ class GradientShaderBaseContext : public SkShader::Context {
+ public:
+ GradientShaderBaseContext(const SkGradientShaderBase& shader, const ContextRec&);
+
+ uint32_t getFlags() const override { return fFlags; }
+
+ bool isValid() const;
+
+ protected:
+ SkMatrix fDstToIndex;
+ SkMatrix::MapXYProc fDstToIndexProc;
+ uint8_t fDstToIndexClass;
+ uint8_t fFlags;
+ bool fDither;
+
+ SkAutoTUnref<GradientShaderCache> fCache;
+
+ private:
+ typedef SkShader::Context INHERITED;
+ };
+
+ bool isOpaque() const override;
+
+ enum class GradientBitmapType : uint8_t {
+ kLegacy,
+ kSRGB,
+ kHalfFloat,
+ };
+
+ void getGradientTableBitmap(SkBitmap*, GradientBitmapType bitmapType) const;
+
+ enum {
+ /// Seems like enough for visual accuracy. TODO: if pos[] deserves
+ /// it, use a larger cache.
+ kCache32Bits = 8,
+ kCache32Count = (1 << kCache32Bits),
+ kCache32Shift = 16 - kCache32Bits,
+ kSqrt32Shift = 8 - kCache32Bits,
+
+ /// This value is used to *read* the dither cache; it may be 0
+ /// if dithering is disabled.
+ kDitherStride32 = kCache32Count,
+ };
+
+ uint32_t getGradFlags() const { return fGradFlags; }
+
+protected:
+ class GradientShaderBase4fContext;
+
+ SkGradientShaderBase(SkReadBuffer& );
+ void flatten(SkWriteBuffer&) const override;
+ SK_TO_STRING_OVERRIDE()
+
+ const SkMatrix fPtsToUnit;
+ TileMode fTileMode;
+ TileProc fTileProc;
+ uint8_t fGradFlags;
+
+ struct Rec {
+ SkFixed fPos; // 0...1
+ uint32_t fScale; // (1 << 24) / range
+ };
+ Rec* fRecs;
+
+ void commonAsAGradient(GradientInfo*, bool flipGrad = false) const;
+
+ bool onAsLuminanceColor(SkColor*) const override;
+
+
+ void initLinearBitmap(SkBitmap* bitmap) const;
+
+ /*
+ * Takes in pointers to gradient color and Rec info as colorSrc and recSrc respectively.
+ * Count is the number of colors in the gradient
+ * It will then flip all the color and rec information and return in their respective Dst
+ * pointers. It is assumed that space has already been allocated for the Dst pointers.
+ * The rec src and dst are only assumed to be valid if count > 2
+ */
+ static void FlipGradientColors(SkColor* colorDst, Rec* recDst,
+ SkColor* colorSrc, Rec* recSrc,
+ int count);
+
+ template <typename T, typename... Args>
+ static Context* CheckedCreateContext(void* storage, Args&&... args) {
+ auto* ctx = new (storage) T(std::forward<Args>(args)...);
+ if (!ctx->isValid()) {
+ ctx->~T();
+ return nullptr;
+ }
+ return ctx;
+ }
+
+private:
+ enum {
+ kColorStorageCount = 4, // more than this many colors, and we'll use sk_malloc for the space
+
+ kStorageSize = kColorStorageCount *
+ (sizeof(SkColor) + sizeof(SkScalar) + sizeof(Rec) + sizeof(SkColor4f))
+ };
+ SkColor fStorage[(kStorageSize + 3) >> 2];
+public:
+ SkColor* fOrigColors; // original colors, before modulation by paint in context.
+ SkColor4f* fOrigColors4f; // original colors, as linear floats
+ SkScalar* fOrigPos; // original positions
+ int fColorCount;
+ sk_sp<SkColorSpace> fColorSpace; // color space of gradient stops
+
+ bool colorsAreOpaque() const { return fColorsAreOpaque; }
+
+ TileMode getTileMode() const { return fTileMode; }
+ Rec* getRecs() const { return fRecs; }
+
+private:
+ bool fColorsAreOpaque;
+
+ GradientShaderCache* refCache(U8CPU alpha, bool dither) const;
+ mutable SkMutex fCacheMutex;
+ mutable SkAutoTUnref<GradientShaderCache> fCache;
+
+ void initCommon();
+
+ typedef SkShader INHERITED;
+};
+
+static inline int init_dither_toggle(int x, int y) {
+ x &= 1;
+ y = (y & 1) << 1;
+ return (x | y) * SkGradientShaderBase::kDitherStride32;
+}
+
+static inline int next_dither_toggle(int toggle) {
+ return toggle ^ SkGradientShaderBase::kDitherStride32;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+#include "GrColorSpaceXform.h"
+#include "GrCoordTransform.h"
+#include "GrFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+
+class GrInvariantOutput;
+
+/*
+ * The interpretation of the texture matrix depends on the sample mode. The
+ * texture matrix is applied both when the texture coordinates are explicit
+ * and when vertex positions are used as texture coordinates. In the latter
+ * case the texture matrix is applied to the pre-view-matrix position
+ * values.
+ *
+ * Normal SampleMode
+ * The post-matrix texture coordinates are in normalize space with (0,0) at
+ * the top-left and (1,1) at the bottom right.
+ * RadialGradient
+ * The matrix specifies the radial gradient parameters.
+ * (0,0) in the post-matrix space is center of the radial gradient.
+ * Radial2Gradient
+ * Matrix transforms to space where first circle is centered at the
+ * origin. The second circle will be centered (x, 0) where x may be
+ * 0 and is provided by setRadial2Params. The post-matrix space is
+ * normalized such that 1 is the second radius - first radius.
+ * SweepGradient
+ * The angle from the origin of texture coordinates in post-matrix space
+ * determines the gradient value.
+ */
+
+ class GrTextureStripAtlas;
+
+// Base class for Gr gradient effects
+class GrGradientEffect : public GrFragmentProcessor {
+public:
+ struct CreateArgs {
+ CreateArgs(GrContext* context,
+ const SkGradientShaderBase* shader,
+ const SkMatrix* matrix,
+ SkShader::TileMode tileMode,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ bool gammaCorrect)
+ : fContext(context)
+ , fShader(shader)
+ , fMatrix(matrix)
+ , fTileMode(tileMode)
+ , fColorSpaceXform(std::move(colorSpaceXform))
+ , fGammaCorrect(gammaCorrect) {}
+
+ GrContext* fContext;
+ const SkGradientShaderBase* fShader;
+ const SkMatrix* fMatrix;
+ SkShader::TileMode fTileMode;
+ sk_sp<GrColorSpaceXform> fColorSpaceXform;
+ bool fGammaCorrect;
+ };
+
+ class GLSLProcessor;
+
+ GrGradientEffect(const CreateArgs&);
+
+ virtual ~GrGradientEffect();
+
+ bool useAtlas() const { return SkToBool(-1 != fRow); }
+ SkScalar getYCoord() const { return fYCoord; }
+
+ enum ColorType {
+ kTwo_ColorType,
+ kThree_ColorType, // Symmetric three color
+ kTexture_ColorType,
+
+#if GR_GL_USE_ACCURATE_HARD_STOP_GRADIENTS
+ kHardStopCentered_ColorType, // 0, 0.5, 0.5, 1
+ kHardStopLeftEdged_ColorType, // 0, 0, 1
+ kHardStopRightEdged_ColorType, // 0, 1, 1
+#endif
+ };
+
+ ColorType getColorType() const { return fColorType; }
+
+ // Determines the type of gradient, one of:
+ // - Two-color
+ // - Symmetric three-color
+ // - Texture
+ // - Centered hard stop
+ // - Left-edged hard stop
+ // - Right-edged hard stop
+ ColorType determineColorType(const SkGradientShaderBase& shader);
+
+ enum PremulType {
+ kBeforeInterp_PremulType,
+ kAfterInterp_PremulType,
+ };
+
+ PremulType getPremulType() const { return fPremulType; }
+
+ const SkColor* getColors(int pos) const {
+ SkASSERT(fColorType != kTexture_ColorType);
+ SkASSERT(pos < fColors.count());
+ return &fColors[pos];
+ }
+
+ const SkColor4f* getColors4f(int pos) const {
+ SkASSERT(fColorType != kTexture_ColorType);
+ SkASSERT(pos < fColors4f.count());
+ return &fColors4f[pos];
+ }
+
+protected:
+ /** Populates a pair of arrays with colors and stop info to construct a random gradient.
+ The function decides whether stop values should be used or not. The return value indicates
+ the number of colors, which will be capped by kMaxRandomGradientColors. colors should be
+ sized to be at least kMaxRandomGradientColors. stops is a pointer to an array of at least
+ size kMaxRandomGradientColors. It may be updated to nullptr, indicating that nullptr should
+ be passed to the gradient factory rather than the array.
+ */
+ static const int kMaxRandomGradientColors = 4;
+ static int RandomGradientParams(SkRandom* r,
+ SkColor colors[kMaxRandomGradientColors],
+ SkScalar** stops,
+ SkShader::TileMode* tm);
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ const GrCoordTransform& getCoordTransform() const { return fCoordTransform; }
+
+private:
+ // If we're in legacy mode, then fColors will be populated. If we're gamma-correct, then
+ // fColors4f and fColorSpaceXform will be populated.
+ SkTDArray<SkColor> fColors;
+
+ SkTDArray<SkColor4f> fColors4f;
+ sk_sp<GrColorSpaceXform> fColorSpaceXform;
+
+ SkTDArray<SkScalar> fPositions;
+ SkShader::TileMode fTileMode;
+
+ GrCoordTransform fCoordTransform;
+ GrTextureAccess fTextureAccess;
+ SkScalar fYCoord;
+ GrTextureStripAtlas* fAtlas;
+ int fRow;
+ bool fIsOpaque;
+ ColorType fColorType;
+ PremulType fPremulType; // This is already baked into the table for texture gradients, and
+ // only changes behavior for gradients that don't use a texture.
+ typedef GrFragmentProcessor INHERITED;
+
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Base class for GL gradient effects
+class GrGradientEffect::GLSLProcessor : public GrGLSLFragmentProcessor {
+public:
+ GLSLProcessor() {
+ fCachedYCoord = SK_ScalarMax;
+ }
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+protected:
+ /**
+ * Subclasses must call this. It will return a key for the part of the shader code controlled
+ * by the base class. The subclasses must stick it in their key and then pass it to the below
+ * emit* functions from their emitCode function.
+ */
+ static uint32_t GenBaseGradientKey(const GrProcessor&);
+
+ // Emits the uniform used as the y-coord to texture samples in derived classes. Subclasses
+ // should call this method from their emitCode().
+ void emitUniforms(GrGLSLUniformHandler*, const GrGradientEffect&);
+
+ // Emit code that gets a fragment's color from an expression for t; has branches for
+ // several control flows inside -- 2-color gradients, 3-color symmetric gradients, 4+
+ // color gradients that use the traditional texture lookup, as well as several varieties
+ // of hard stop gradients
+ void emitColor(GrGLSLFPFragmentBuilder* fragBuilder,
+ GrGLSLUniformHandler* uniformHandler,
+ const GrGLSLCaps* caps,
+ const GrGradientEffect&,
+ const char* gradientTValue,
+ const char* outputColor,
+ const char* inputColor,
+ const TextureSamplers&);
+
+private:
+ enum {
+ // First bit for premul before/after interp
+ kPremulBeforeInterpKey = 1,
+
+ // Next three bits for 2/3 color type or different special
+ // hard stop cases (neither means using texture atlas)
+ kTwoColorKey = 2,
+ kThreeColorKey = 4,
+#if GR_GL_USE_ACCURATE_HARD_STOP_GRADIENTS
+ kHardStopCenteredKey = 6,
+ kHardStopZeroZeroOneKey = 8,
+ kHardStopZeroOneOneKey = 10,
+
+ // Next two bits for tile mode
+ kClampTileMode = 16,
+ kRepeatTileMode = 32,
+ kMirrorTileMode = 48,
+
+ // Lower six bits for premul, 2/3 color type, and tile mode
+ kReservedBits = 6,
+#endif
+ };
+
+ SkScalar fCachedYCoord;
+ GrGLSLProgramDataManager::UniformHandle fColorsUni;
+ GrGLSLProgramDataManager::UniformHandle fFSYUni;
+ GrGLSLProgramDataManager::UniformHandle fColorSpaceXformUni;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/effects/gradients/SkLinearGradient.cpp b/gfx/skia/skia/src/effects/gradients/SkLinearGradient.cpp
new file mode 100644
index 000000000..df2765b46
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/SkLinearGradient.cpp
@@ -0,0 +1,769 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "Sk4fLinearGradient.h"
+#include "SkLinearGradient.h"
+#include "SkRefCnt.h"
+
+// define to test the 4f gradient path
+// #define FORCE_4F_CONTEXT
+
+static const float kInv255Float = 1.0f / 255;
+
+static inline int repeat_8bits(int x) {
+ return x & 0xFF;
+}
+
+static inline int mirror_8bits(int x) {
+ if (x & 256) {
+ x = ~x;
+ }
+ return x & 255;
+}
+
+static SkMatrix pts_to_unit_matrix(const SkPoint pts[2]) {
+ SkVector vec = pts[1] - pts[0];
+ SkScalar mag = vec.length();
+ SkScalar inv = mag ? SkScalarInvert(mag) : 0;
+
+ vec.scale(inv);
+ SkMatrix matrix;
+ matrix.setSinCos(-vec.fY, vec.fX, pts[0].fX, pts[0].fY);
+ matrix.postTranslate(-pts[0].fX, -pts[0].fY);
+ matrix.postScale(inv, inv);
+ return matrix;
+}
+
+static bool use_4f_context(const SkShader::ContextRec& rec, uint32_t flags) {
+#ifdef FORCE_4F_CONTEXT
+ return true;
+#else
+ return rec.fPreferredDstType == SkShader::ContextRec::kPM4f_DstType
+ || SkToBool(flags & SkLinearGradient::kForce4fContext_PrivateFlag);
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkLinearGradient::SkLinearGradient(const SkPoint pts[2], const Descriptor& desc)
+ : SkGradientShaderBase(desc, pts_to_unit_matrix(pts))
+ , fStart(pts[0])
+ , fEnd(pts[1]) {
+}
+
+sk_sp<SkFlattenable> SkLinearGradient::CreateProc(SkReadBuffer& buffer) {
+ DescriptorScope desc;
+ if (!desc.unflatten(buffer)) {
+ return nullptr;
+ }
+ SkPoint pts[2];
+ pts[0] = buffer.readPoint();
+ pts[1] = buffer.readPoint();
+ return SkGradientShader::MakeLinear(pts, desc.fColors, std::move(desc.fColorSpace), desc.fPos,
+ desc.fCount, desc.fTileMode, desc.fGradFlags,
+ desc.fLocalMatrix);
+}
+
+void SkLinearGradient::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writePoint(fStart);
+ buffer.writePoint(fEnd);
+}
+
+size_t SkLinearGradient::onContextSize(const ContextRec& rec) const {
+ return use_4f_context(rec, fGradFlags)
+ ? sizeof(LinearGradient4fContext)
+ : sizeof(LinearGradientContext);
+}
+
+SkShader::Context* SkLinearGradient::onCreateContext(const ContextRec& rec, void* storage) const {
+ return use_4f_context(rec, fGradFlags)
+ ? CheckedCreateContext<LinearGradient4fContext>(storage, *this, rec)
+ : CheckedCreateContext< LinearGradientContext>(storage, *this, rec);
+}
+
+// This swizzles SkColor into the same component order as SkPMColor, but does not actually
+// "pre" multiply the color components.
+//
+// This allows us to map directly to Sk4f, and eventually scale down to bytes to output a
+// SkPMColor from the floats, without having to swizzle each time.
+//
+static uint32_t SkSwizzle_Color_to_PMColor(SkColor c) {
+ return SkPackARGB32NoCheck(SkColorGetA(c), SkColorGetR(c), SkColorGetG(c), SkColorGetB(c));
+}
+
+SkLinearGradient::LinearGradientContext::LinearGradientContext(
+ const SkLinearGradient& shader, const ContextRec& ctx)
+ : INHERITED(shader, ctx)
+{
+ // setup for Sk4f
+ const int count = shader.fColorCount;
+ SkASSERT(count > 1);
+
+ fRecs.setCount(count);
+ Rec* rec = fRecs.begin();
+ if (shader.fOrigPos) {
+ rec[0].fPos = 0;
+ SkDEBUGCODE(rec[0].fPosScale = SK_FloatNaN;) // should never get used
+ for (int i = 1; i < count; ++i) {
+ rec[i].fPos = SkTPin(shader.fOrigPos[i], rec[i - 1].fPos, 1.0f);
+ float diff = rec[i].fPos - rec[i - 1].fPos;
+ if (diff > 0) {
+ rec[i].fPosScale = 1.0f / diff;
+ } else {
+ rec[i].fPosScale = 0;
+ }
+ }
+ } else {
+ // no pos specified, so we compute evenly spaced values
+ const float scale = float(count - 1);
+ const float invScale = 1.0f / scale;
+ for (int i = 0; i < count; ++i) {
+ rec[i].fPos = i * invScale;
+ rec[i].fPosScale = scale;
+ }
+ }
+ rec[count - 1].fPos = 1; // overwrite the last value just to be sure we end at 1.0
+
+ fApplyAlphaAfterInterp = true;
+ if ((shader.getGradFlags() & SkGradientShader::kInterpolateColorsInPremul_Flag) ||
+ shader.colorsAreOpaque())
+ {
+ fApplyAlphaAfterInterp = false;
+ }
+
+ if (fApplyAlphaAfterInterp) {
+ // Our fColor values are in PMColor order, but are still unpremultiplied, allowing us to
+ // interpolate in unpremultiplied space first, and then scale by alpha right before we
+ // convert to SkPMColor bytes.
+ const float paintAlpha = ctx.fPaint->getAlpha() * kInv255Float;
+ const Sk4f scale(1, 1, 1, paintAlpha);
+ for (int i = 0; i < count; ++i) {
+ uint32_t c = SkSwizzle_Color_to_PMColor(shader.fOrigColors[i]);
+ rec[i].fColor = SkNx_cast<float>(Sk4b::Load(&c)) * scale;
+ if (i > 0) {
+ SkASSERT(rec[i - 1].fPos <= rec[i].fPos);
+ }
+ }
+ } else {
+ // Our fColor values are premultiplied, so converting to SkPMColor is just a matter
+ // of converting the floats down to bytes.
+ unsigned alphaScale = ctx.fPaint->getAlpha() + (ctx.fPaint->getAlpha() >> 7);
+ for (int i = 0; i < count; ++i) {
+ SkPMColor pmc = SkPreMultiplyColor(shader.fOrigColors[i]);
+ pmc = SkAlphaMulQ(pmc, alphaScale);
+ rec[i].fColor = SkNx_cast<float>(Sk4b::Load(&pmc));
+ if (i > 0) {
+ SkASSERT(rec[i - 1].fPos <= rec[i].fPos);
+ }
+ }
+ }
+}
+
+#define NO_CHECK_ITER \
+ do { \
+ unsigned fi = SkGradFixedToFixed(fx) >> SkGradientShaderBase::kCache32Shift; \
+ SkASSERT(fi <= 0xFF); \
+ fx += dx; \
+ *dstC++ = cache[toggle + fi]; \
+ toggle = next_dither_toggle(toggle); \
+ } while (0)
+
+namespace {
+
+typedef void (*LinearShadeProc)(TileProc proc, SkGradFixed dx, SkGradFixed fx,
+ SkPMColor* dstC, const SkPMColor* cache,
+ int toggle, int count);
+
+// Linear interpolation (lerp) is unnecessary if there are no sharp
+// discontinuities in the gradient - which must be true if there are
+// only 2 colors - but it's cheap.
+void shadeSpan_linear_vertical_lerp(TileProc proc, SkGradFixed dx, SkGradFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
+ // We're a vertical gradient, so no change in a span.
+ // If colors change sharply across the gradient, dithering is
+ // insufficient (it subsamples the color space) and we need to lerp.
+ unsigned fullIndex = proc(SkGradFixedToFixed(fx) - (SK_FixedHalf >> SkGradientShaderBase::kCache32Bits));
+ unsigned fi = fullIndex >> SkGradientShaderBase::kCache32Shift;
+ unsigned remainder = fullIndex & ((1 << SkGradientShaderBase::kCache32Shift) - 1);
+
+ int index0 = fi + toggle;
+ int index1 = index0;
+ if (fi < SkGradientShaderBase::kCache32Count - 1) {
+ index1 += 1;
+ }
+ SkPMColor lerp = SkFastFourByteInterp(cache[index1], cache[index0], remainder);
+ index0 ^= SkGradientShaderBase::kDitherStride32;
+ index1 ^= SkGradientShaderBase::kDitherStride32;
+ SkPMColor dlerp = SkFastFourByteInterp(cache[index1], cache[index0], remainder);
+ sk_memset32_dither(dstC, lerp, dlerp, count);
+}
+
+void shadeSpan_linear_clamp(TileProc proc, SkGradFixed dx, SkGradFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
+ SkClampRange range;
+ range.init(fx, dx, count, 0, SkGradientShaderBase::kCache32Count - 1);
+ range.validate(count);
+
+ if ((count = range.fCount0) > 0) {
+ sk_memset32_dither(dstC,
+ cache[toggle + range.fV0],
+ cache[next_dither_toggle(toggle) + range.fV0],
+ count);
+ dstC += count;
+ }
+ if ((count = range.fCount1) > 0) {
+ int unroll = count >> 3;
+ fx = range.fFx1;
+ for (int i = 0; i < unroll; i++) {
+ NO_CHECK_ITER; NO_CHECK_ITER;
+ NO_CHECK_ITER; NO_CHECK_ITER;
+ NO_CHECK_ITER; NO_CHECK_ITER;
+ NO_CHECK_ITER; NO_CHECK_ITER;
+ }
+ if ((count &= 7) > 0) {
+ do {
+ NO_CHECK_ITER;
+ } while (--count != 0);
+ }
+ }
+ if ((count = range.fCount2) > 0) {
+ sk_memset32_dither(dstC,
+ cache[toggle + range.fV1],
+ cache[next_dither_toggle(toggle) + range.fV1],
+ count);
+ }
+}
+
+void shadeSpan_linear_mirror(TileProc proc, SkGradFixed dx, SkGradFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
+ do {
+ unsigned fi = mirror_8bits(SkGradFixedToFixed(fx) >> 8);
+ SkASSERT(fi <= 0xFF);
+ fx += dx;
+ *dstC++ = cache[toggle + fi];
+ toggle = next_dither_toggle(toggle);
+ } while (--count != 0);
+}
+
+void shadeSpan_linear_repeat(TileProc proc, SkGradFixed dx, SkGradFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
+ do {
+ unsigned fi = repeat_8bits(SkGradFixedToFixed(fx) >> 8);
+ SkASSERT(fi <= 0xFF);
+ fx += dx;
+ *dstC++ = cache[toggle + fi];
+ toggle = next_dither_toggle(toggle);
+ } while (--count != 0);
+}
+
+}
+
+void SkLinearGradient::LinearGradientContext::shadeSpan(int x, int y, SkPMColor* SK_RESTRICT dstC,
+ int count) {
+ SkASSERT(count > 0);
+ const SkLinearGradient& linearGradient = static_cast<const SkLinearGradient&>(fShader);
+
+// Only use the Sk4f impl when known to be fast.
+#if defined(SKNX_IS_FAST)
+ if (SkShader::kClamp_TileMode == linearGradient.fTileMode &&
+ kLinear_MatrixClass == fDstToIndexClass)
+ {
+ this->shade4_clamp(x, y, dstC, count);
+ return;
+ }
+#endif
+
+ SkPoint srcPt;
+ SkMatrix::MapXYProc dstProc = fDstToIndexProc;
+ TileProc proc = linearGradient.fTileProc;
+ const SkPMColor* SK_RESTRICT cache = fCache->getCache32();
+ int toggle = init_dither_toggle(x, y);
+
+ if (fDstToIndexClass != kPerspective_MatrixClass) {
+ dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
+ SkGradFixed dx, fx = SkScalarToGradFixed(srcPt.fX);
+
+ if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
+ const auto step = fDstToIndex.fixedStepInX(SkIntToScalar(y));
+ // todo: do we need a real/high-precision value for dx here?
+ dx = SkScalarToGradFixed(step.fX);
+ } else {
+ SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
+ dx = SkScalarToGradFixed(fDstToIndex.getScaleX());
+ }
+
+ LinearShadeProc shadeProc = shadeSpan_linear_repeat;
+ if (0 == dx) {
+ shadeProc = shadeSpan_linear_vertical_lerp;
+ } else if (SkShader::kClamp_TileMode == linearGradient.fTileMode) {
+ shadeProc = shadeSpan_linear_clamp;
+ } else if (SkShader::kMirror_TileMode == linearGradient.fTileMode) {
+ shadeProc = shadeSpan_linear_mirror;
+ } else {
+ SkASSERT(SkShader::kRepeat_TileMode == linearGradient.fTileMode);
+ }
+ (*shadeProc)(proc, dx, fx, dstC, cache, toggle, count);
+ } else {
+ SkScalar dstX = SkIntToScalar(x);
+ SkScalar dstY = SkIntToScalar(y);
+ do {
+ dstProc(fDstToIndex, dstX, dstY, &srcPt);
+ unsigned fi = proc(SkScalarToFixed(srcPt.fX));
+ SkASSERT(fi <= 0xFFFF);
+ *dstC++ = cache[toggle + (fi >> kCache32Shift)];
+ toggle = next_dither_toggle(toggle);
+ dstX += SK_Scalar1;
+ } while (--count != 0);
+ }
+}
+
+SkShader::GradientType SkLinearGradient::asAGradient(GradientInfo* info) const {
+ if (info) {
+ commonAsAGradient(info);
+ info->fPoint[0] = fStart;
+ info->fPoint[1] = fEnd;
+ }
+ return kLinear_GradientType;
+}
+
+#if SK_SUPPORT_GPU
+
+#include "GrColorSpaceXform.h"
+#include "glsl/GrGLSLCaps.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "SkGr.h"
+
+/////////////////////////////////////////////////////////////////////
+
+class GrLinearGradient : public GrGradientEffect {
+public:
+ class GLSLLinearProcessor;
+
+ static sk_sp<GrFragmentProcessor> Make(const CreateArgs& args) {
+ return sk_sp<GrFragmentProcessor>(new GrLinearGradient(args));
+ }
+
+ virtual ~GrLinearGradient() { }
+
+ const char* name() const override { return "Linear Gradient"; }
+
+private:
+ GrLinearGradient(const CreateArgs& args)
+ : INHERITED(args) {
+ this->initClassID<GrLinearGradient>();
+ }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ virtual void onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const override;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef GrGradientEffect INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////
+
+class GrLinearGradient::GLSLLinearProcessor : public GrGradientEffect::GLSLProcessor {
+public:
+ GLSLLinearProcessor(const GrProcessor&) {}
+
+ virtual ~GLSLLinearProcessor() { }
+
+ virtual void emitCode(EmitArgs&) override;
+
+ static void GenKey(const GrProcessor& processor, const GrGLSLCaps&, GrProcessorKeyBuilder* b) {
+ b->add32(GenBaseGradientKey(processor));
+ }
+
+private:
+ typedef GrGradientEffect::GLSLProcessor INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////
+
+GrGLSLFragmentProcessor* GrLinearGradient::onCreateGLSLInstance() const {
+ return new GrLinearGradient::GLSLLinearProcessor(*this);
+}
+
+void GrLinearGradient::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrLinearGradient::GLSLLinearProcessor::GenKey(*this, caps, b);
+}
+
+/////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrLinearGradient);
+
+sk_sp<GrFragmentProcessor> GrLinearGradient::TestCreate(GrProcessorTestData* d) {
+ SkPoint points[] = {{d->fRandom->nextUScalar1(), d->fRandom->nextUScalar1()},
+ {d->fRandom->nextUScalar1(), d->fRandom->nextUScalar1()}};
+
+ SkColor colors[kMaxRandomGradientColors];
+ SkScalar stopsArray[kMaxRandomGradientColors];
+ SkScalar* stops = stopsArray;
+ SkShader::TileMode tm;
+ int colorCount = RandomGradientParams(d->fRandom, colors, &stops, &tm);
+ auto shader = SkGradientShader::MakeLinear(points, colors, stops, colorCount, tm);
+ SkMatrix viewMatrix = GrTest::TestMatrix(d->fRandom);
+ auto dstColorSpace = GrTest::TestColorSpace(d->fRandom);
+ sk_sp<GrFragmentProcessor> fp = shader->asFragmentProcessor(SkShader::AsFPArgs(
+ d->fContext, &viewMatrix, NULL, kNone_SkFilterQuality, dstColorSpace.get(),
+ SkSourceGammaTreatment::kRespect));
+ GrAlwaysAssert(fp);
+ return fp;
+}
+
+/////////////////////////////////////////////////////////////////////
+
+void GrLinearGradient::GLSLLinearProcessor::emitCode(EmitArgs& args) {
+ const GrLinearGradient& ge = args.fFp.cast<GrLinearGradient>();
+ this->emitUniforms(args.fUniformHandler, ge);
+ SkString t = args.fFragBuilder->ensureCoords2D(args.fTransformedCoords[0]);
+ t.append(".x");
+ this->emitColor(args.fFragBuilder,
+ args.fUniformHandler,
+ args.fGLSLCaps,
+ ge,
+ t.c_str(),
+ args.fOutputColor,
+ args.fInputColor,
+ args.fTexSamplers);
+}
+
+/////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> SkLinearGradient::asFragmentProcessor(const AsFPArgs& args) const {
+ SkASSERT(args.fContext);
+
+ SkMatrix matrix;
+ if (!this->getLocalMatrix().invert(&matrix)) {
+ return nullptr;
+ }
+ if (args.fLocalMatrix) {
+ SkMatrix inv;
+ if (!args.fLocalMatrix->invert(&inv)) {
+ return nullptr;
+ }
+ matrix.postConcat(inv);
+ }
+ matrix.postConcat(fPtsToUnit);
+
+ sk_sp<GrColorSpaceXform> colorSpaceXform = GrColorSpaceXform::Make(fColorSpace.get(),
+ args.fDstColorSpace);
+ sk_sp<GrFragmentProcessor> inner(GrLinearGradient::Make(
+ GrGradientEffect::CreateArgs(args.fContext, this, &matrix, fTileMode,
+ std::move(colorSpaceXform), SkToBool(args.fDstColorSpace))));
+ return GrFragmentProcessor::MulOutputByInputAlpha(std::move(inner));
+}
+
+
+#endif
+
+#ifndef SK_IGNORE_TO_STRING
+void SkLinearGradient::toString(SkString* str) const {
+ str->append("SkLinearGradient (");
+
+ str->appendf("start: (%f, %f)", fStart.fX, fStart.fY);
+ str->appendf(" end: (%f, %f) ", fEnd.fX, fEnd.fY);
+
+ this->INHERITED::toString(str);
+
+ str->append(")");
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "SkNx.h"
+
+static const SkLinearGradient::LinearGradientContext::Rec*
+find_forward(const SkLinearGradient::LinearGradientContext::Rec rec[], float tiledX) {
+ SkASSERT(tiledX >= 0 && tiledX <= 1);
+
+ SkASSERT(rec[0].fPos >= 0 && rec[0].fPos <= 1);
+ SkASSERT(rec[1].fPos >= 0 && rec[1].fPos <= 1);
+ SkASSERT(rec[0].fPos <= rec[1].fPos);
+ rec += 1;
+ while (rec->fPos < tiledX || rec->fPosScale == 0) {
+ SkASSERT(rec[0].fPos >= 0 && rec[0].fPos <= 1);
+ SkASSERT(rec[1].fPos >= 0 && rec[1].fPos <= 1);
+ SkASSERT(rec[0].fPos <= rec[1].fPos);
+ rec += 1;
+ }
+ return rec - 1;
+}
+
+static const SkLinearGradient::LinearGradientContext::Rec*
+find_backward(const SkLinearGradient::LinearGradientContext::Rec rec[], float tiledX) {
+ SkASSERT(tiledX >= 0 && tiledX <= 1);
+
+ SkASSERT(rec[0].fPos >= 0 && rec[0].fPos <= 1);
+ SkASSERT(rec[1].fPos >= 0 && rec[1].fPos <= 1);
+ SkASSERT(rec[0].fPos <= rec[1].fPos);
+ while (tiledX < rec->fPos || rec[1].fPosScale == 0) {
+ rec -= 1;
+ SkASSERT(rec[0].fPos >= 0 && rec[0].fPos <= 1);
+ SkASSERT(rec[1].fPos >= 0 && rec[1].fPos <= 1);
+ SkASSERT(rec[0].fPos <= rec[1].fPos);
+ }
+ return rec;
+}
+
+template <bool apply_alpha> SkPMColor trunc_from_255(const Sk4f& x) {
+ SkPMColor c;
+ SkNx_cast<uint8_t>(x).store(&c);
+ if (apply_alpha) {
+ c = SkPreMultiplyARGB(SkGetPackedA32(c), SkGetPackedR32(c),
+ SkGetPackedG32(c), SkGetPackedB32(c));
+ }
+ return c;
+}
+
+template <bool apply_alpha> void fill(SkPMColor dst[], int count,
+ const Sk4f& c4, const Sk4f& c4other) {
+ sk_memset32_dither(dst, trunc_from_255<apply_alpha>(c4),
+ trunc_from_255<apply_alpha>(c4other), count);
+}
+
+template <bool apply_alpha> void fill(SkPMColor dst[], int count, const Sk4f& c4) {
+ // Assumes that c4 does not need to be dithered.
+ sk_memset32(dst, trunc_from_255<apply_alpha>(c4), count);
+}
+
+/*
+ * TODOs
+ *
+ * - tilemodes
+ * - interp before or after premul
+ * - perspective
+ * - optimizations
+ * - use fixed (32bit or 16bit) instead of floats?
+ */
+
+static Sk4f lerp_color(float fx, const SkLinearGradient::LinearGradientContext::Rec* rec) {
+ SkASSERT(fx >= rec[0].fPos);
+ SkASSERT(fx <= rec[1].fPos);
+
+ const float p0 = rec[0].fPos;
+ const Sk4f c0 = rec[0].fColor;
+ const Sk4f c1 = rec[1].fColor;
+ const Sk4f diffc = c1 - c0;
+ const float scale = rec[1].fPosScale;
+ const float t = (fx - p0) * scale;
+ return c0 + Sk4f(t) * diffc;
+}
+
+template <bool apply_alpha> void ramp(SkPMColor dstC[], int n, const Sk4f& c, const Sk4f& dc,
+ const Sk4f& dither0, const Sk4f& dither1) {
+ Sk4f dc2 = dc + dc;
+ Sk4f dc4 = dc2 + dc2;
+ Sk4f cd0 = c + dither0;
+ Sk4f cd1 = c + dc + dither1;
+ Sk4f cd2 = cd0 + dc2;
+ Sk4f cd3 = cd1 + dc2;
+ while (n >= 4) {
+ if (!apply_alpha) {
+ Sk4f_ToBytes((uint8_t*)dstC, cd0, cd1, cd2, cd3);
+ dstC += 4;
+ } else {
+ *dstC++ = trunc_from_255<apply_alpha>(cd0);
+ *dstC++ = trunc_from_255<apply_alpha>(cd1);
+ *dstC++ = trunc_from_255<apply_alpha>(cd2);
+ *dstC++ = trunc_from_255<apply_alpha>(cd3);
+ }
+ cd0 = cd0 + dc4;
+ cd1 = cd1 + dc4;
+ cd2 = cd2 + dc4;
+ cd3 = cd3 + dc4;
+ n -= 4;
+ }
+ if (n & 2) {
+ *dstC++ = trunc_from_255<apply_alpha>(cd0);
+ *dstC++ = trunc_from_255<apply_alpha>(cd1);
+ cd0 = cd0 + dc2;
+ }
+ if (n & 1) {
+ *dstC++ = trunc_from_255<apply_alpha>(cd0);
+ }
+}
+
+template <bool apply_alpha, bool dx_is_pos>
+void SkLinearGradient::LinearGradientContext::shade4_dx_clamp(SkPMColor dstC[], int count,
+ float fx, float dx, float invDx,
+ const float dither[2]) {
+ Sk4f dither0(dither[0]);
+ Sk4f dither1(dither[1]);
+ const Rec* rec = fRecs.begin();
+
+ const Sk4f dx4 = Sk4f(dx);
+ SkDEBUGCODE(SkPMColor* endDstC = dstC + count;)
+
+ if (dx_is_pos) {
+ if (fx < 0) {
+ // count is guaranteed to be positive, but the first arg may overflow int32 after
+ // increment => casting to uint32 ensures correct clamping.
+ int n = SkTMin<uint32_t>(static_cast<uint32_t>(SkFloatToIntFloor(-fx * invDx)) + 1,
+ count);
+ SkASSERT(n > 0);
+ fill<apply_alpha>(dstC, n, rec[0].fColor);
+ count -= n;
+ dstC += n;
+ fx += n * dx;
+ SkASSERT(0 == count || fx >= 0);
+ if (n & 1) {
+ SkTSwap(dither0, dither1);
+ }
+ }
+ } else { // dx < 0
+ if (fx > 1) {
+ // count is guaranteed to be positive, but the first arg may overflow int32 after
+ // increment => casting to uint32 ensures correct clamping.
+ int n = SkTMin<uint32_t>(static_cast<uint32_t>(SkFloatToIntFloor((1 - fx) * invDx)) + 1,
+ count);
+ SkASSERT(n > 0);
+ fill<apply_alpha>(dstC, n, rec[fRecs.count() - 1].fColor);
+ count -= n;
+ dstC += n;
+ fx += n * dx;
+ SkASSERT(0 == count || fx <= 1);
+ if (n & 1) {
+ SkTSwap(dither0, dither1);
+ }
+ }
+ }
+ SkASSERT(count >= 0);
+
+ const Rec* r;
+ if (dx_is_pos) {
+ r = fRecs.begin(); // start at the beginning
+ } else {
+ r = fRecs.begin() + fRecs.count() - 2; // start at the end
+ }
+
+ while (count > 0) {
+ if (dx_is_pos) {
+ if (fx >= 1) {
+ fill<apply_alpha>(dstC, count, rec[fRecs.count() - 1].fColor);
+ return;
+ }
+ } else { // dx < 0
+ if (fx <= 0) {
+ fill<apply_alpha>(dstC, count, rec[0].fColor);
+ return;
+ }
+ }
+
+ if (dx_is_pos) {
+ r = find_forward(r, fx);
+ } else {
+ r = find_backward(r, fx);
+ }
+ SkASSERT(r >= fRecs.begin() && r < fRecs.begin() + fRecs.count() - 1);
+
+ const float p0 = r[0].fPos;
+ const Sk4f c0 = r[0].fColor;
+ const float p1 = r[1].fPos;
+ const Sk4f diffc = Sk4f(r[1].fColor) - c0;
+ const float scale = r[1].fPosScale;
+ const float t = (fx - p0) * scale;
+ const Sk4f c = c0 + Sk4f(t) * diffc;
+ const Sk4f dc = diffc * dx4 * Sk4f(scale);
+
+ int n;
+ if (dx_is_pos) {
+ n = SkTMin((int)((p1 - fx) * invDx) + 1, count);
+ } else {
+ n = SkTMin((int)((p0 - fx) * invDx) + 1, count);
+ }
+
+ fx += n * dx;
+ // fx should now outside of the p0..p1 interval. However, due to float precision loss,
+ // its possible that fx is slightly too small/large, so we clamp it.
+ if (dx_is_pos) {
+ fx = SkTMax(fx, p1);
+ } else {
+ fx = SkTMin(fx, p0);
+ }
+
+ ramp<apply_alpha>(dstC, n, c, dc, dither0, dither1);
+ dstC += n;
+ SkASSERT(dstC <= endDstC);
+
+ if (n & 1) {
+ SkTSwap(dither0, dither1);
+ }
+
+ count -= n;
+ SkASSERT(count >= 0);
+ }
+}
+
+void SkLinearGradient::LinearGradientContext::shade4_clamp(int x, int y, SkPMColor dstC[],
+ int count) {
+ SkASSERT(count > 0);
+ SkASSERT(kLinear_MatrixClass == fDstToIndexClass);
+
+ SkPoint srcPt;
+ fDstToIndexProc(fDstToIndex, x + SK_ScalarHalf, y + SK_ScalarHalf, &srcPt);
+ float fx = srcPt.x();
+ const float dx = fDstToIndex.getScaleX();
+
+ // Default our dither bias values to 1/2, (rounding), which is no dithering
+ float dither0 = 0.5f;
+ float dither1 = 0.5f;
+ if (fDither) {
+ const float ditherCell[] = {
+ 1/8.0f, 5/8.0f,
+ 7/8.0f, 3/8.0f,
+ };
+ const int rowIndex = (y & 1) << 1;
+ dither0 = ditherCell[rowIndex];
+ dither1 = ditherCell[rowIndex + 1];
+ if (x & 1) {
+ SkTSwap(dither0, dither1);
+ }
+ }
+ const float dither[2] = { dither0, dither1 };
+ const float invDx = 1 / dx;
+
+ if (SkScalarNearlyZero(dx * count)) { // gradient is vertical
+ const float pinFx = SkTPin(fx, 0.0f, 1.0f);
+ Sk4f c = lerp_color(pinFx, find_forward(fRecs.begin(), pinFx));
+ if (fApplyAlphaAfterInterp) {
+ fill<true>(dstC, count, c + dither0, c + dither1);
+ } else {
+ fill<false>(dstC, count, c + dither0, c + dither1);
+ }
+ return;
+ }
+
+ if (dx > 0) {
+ if (fApplyAlphaAfterInterp) {
+ this->shade4_dx_clamp<true, true>(dstC, count, fx, dx, invDx, dither);
+ } else {
+ this->shade4_dx_clamp<false, true>(dstC, count, fx, dx, invDx, dither);
+ }
+ } else {
+ if (fApplyAlphaAfterInterp) {
+ this->shade4_dx_clamp<true, false>(dstC, count, fx, dx, invDx, dither);
+ } else {
+ this->shade4_dx_clamp<false, false>(dstC, count, fx, dx, invDx, dither);
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/effects/gradients/SkLinearGradient.h b/gfx/skia/skia/src/effects/gradients/SkLinearGradient.h
new file mode 100644
index 000000000..7a85b88cb
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/SkLinearGradient.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLinearGradient_DEFINED
+#define SkLinearGradient_DEFINED
+
+#include "SkGradientShaderPriv.h"
+#include "SkNx.h"
+
+struct Sk4fStorage {
+ float fArray[4];
+
+ operator Sk4f() const {
+ return Sk4f::Load(fArray);
+ }
+
+ Sk4fStorage& operator=(const Sk4f& src) {
+ src.store(fArray);
+ return *this;
+ }
+};
+
+class SkLinearGradient : public SkGradientShaderBase {
+public:
+ enum {
+ // Temp flag for testing the 4f impl.
+ kForce4fContext_PrivateFlag = 1 << 7,
+ };
+
+ SkLinearGradient(const SkPoint pts[2], const Descriptor&);
+
+ class LinearGradientContext : public SkGradientShaderBase::GradientShaderBaseContext {
+ public:
+ LinearGradientContext(const SkLinearGradient&, const ContextRec&);
+
+ void shadeSpan(int x, int y, SkPMColor dstC[], int count) override;
+
+ struct Rec {
+ Sk4fStorage fColor;
+ float fPos;
+ float fPosScale;
+ };
+ private:
+ SkTDArray<Rec> fRecs;
+ bool fApplyAlphaAfterInterp;
+
+ void shade4_clamp(int x, int y, SkPMColor dstC[], int count);
+ template <bool, bool> void shade4_dx_clamp(SkPMColor dstC[], int count, float fx, float dx,
+ float invDx, const float dither[2]);
+
+ typedef SkGradientShaderBase::GradientShaderBaseContext INHERITED;
+ };
+
+ GradientType asAGradient(GradientInfo* info) const override;
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(const AsFPArgs&) const override;
+#endif
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkLinearGradient)
+
+protected:
+ SkLinearGradient(SkReadBuffer& buffer);
+ void flatten(SkWriteBuffer& buffer) const override;
+ size_t onContextSize(const ContextRec&) const override;
+ Context* onCreateContext(const ContextRec&, void* storage) const override;
+
+private:
+ class LinearGradient4fContext;
+
+ friend class SkGradientShader;
+ typedef SkGradientShaderBase INHERITED;
+ const SkPoint fStart;
+ const SkPoint fEnd;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/gradients/SkRadialGradient.cpp b/gfx/skia/skia/src/effects/gradients/SkRadialGradient.cpp
new file mode 100644
index 000000000..18ef37686
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/SkRadialGradient.cpp
@@ -0,0 +1,386 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkRadialGradient.h"
+#include "SkNx.h"
+
+namespace {
+
+// GCC doesn't like using static functions as template arguments. So force these to be non-static.
+inline SkFixed mirror_tileproc_nonstatic(SkFixed x) {
+ return mirror_tileproc(x);
+}
+
+inline SkFixed repeat_tileproc_nonstatic(SkFixed x) {
+ return repeat_tileproc(x);
+}
+
+SkMatrix rad_to_unit_matrix(const SkPoint& center, SkScalar radius) {
+ SkScalar inv = SkScalarInvert(radius);
+
+ SkMatrix matrix;
+ matrix.setTranslate(-center.fX, -center.fY);
+ matrix.postScale(inv, inv);
+ return matrix;
+}
+
+
+} // namespace
+
+/////////////////////////////////////////////////////////////////////
+
+SkRadialGradient::SkRadialGradient(const SkPoint& center, SkScalar radius, const Descriptor& desc)
+ : SkGradientShaderBase(desc, rad_to_unit_matrix(center, radius))
+ , fCenter(center)
+ , fRadius(radius) {
+}
+
+size_t SkRadialGradient::onContextSize(const ContextRec&) const {
+ return sizeof(RadialGradientContext);
+}
+
+SkShader::Context* SkRadialGradient::onCreateContext(const ContextRec& rec, void* storage) const {
+ return CheckedCreateContext<RadialGradientContext>(storage, *this, rec);
+}
+
+SkRadialGradient::RadialGradientContext::RadialGradientContext(
+ const SkRadialGradient& shader, const ContextRec& rec)
+ : INHERITED(shader, rec) {}
+
+SkShader::GradientType SkRadialGradient::asAGradient(GradientInfo* info) const {
+ if (info) {
+ commonAsAGradient(info);
+ info->fPoint[0] = fCenter;
+ info->fRadius[0] = fRadius;
+ }
+ return kRadial_GradientType;
+}
+
+sk_sp<SkFlattenable> SkRadialGradient::CreateProc(SkReadBuffer& buffer) {
+ DescriptorScope desc;
+ if (!desc.unflatten(buffer)) {
+ return nullptr;
+ }
+ const SkPoint center = buffer.readPoint();
+ const SkScalar radius = buffer.readScalar();
+ return SkGradientShader::MakeRadial(center, radius, desc.fColors, std::move(desc.fColorSpace),
+ desc.fPos, desc.fCount, desc.fTileMode, desc.fGradFlags,
+ desc.fLocalMatrix);
+}
+
+void SkRadialGradient::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writePoint(fCenter);
+ buffer.writeScalar(fRadius);
+}
+
+namespace {
+
+inline bool radial_completely_pinned(SkScalar fx, SkScalar dx, SkScalar fy, SkScalar dy) {
+ // fast, overly-conservative test: checks unit square instead of unit circle
+ bool xClamped = (fx >= 1 && dx >= 0) || (fx <= -1 && dx <= 0);
+ bool yClamped = (fy >= 1 && dy >= 0) || (fy <= -1 && dy <= 0);
+ return xClamped || yClamped;
+}
+
+typedef void (* RadialShadeProc)(SkScalar sfx, SkScalar sdx,
+ SkScalar sfy, SkScalar sdy,
+ SkPMColor* dstC, const SkPMColor* cache,
+ int count, int toggle);
+
+static inline Sk4f fast_sqrt(const Sk4f& R) {
+ return R * R.rsqrt();
+}
+
+static inline Sk4f sum_squares(const Sk4f& a, const Sk4f& b) {
+ return a * a + b * b;
+}
+
+void shadeSpan_radial_clamp2(SkScalar sfx, SkScalar sdx, SkScalar sfy, SkScalar sdy,
+ SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
+ int count, int toggle) {
+ if (radial_completely_pinned(sfx, sdx, sfy, sdy)) {
+ unsigned fi = SkGradientShaderBase::kCache32Count - 1;
+ sk_memset32_dither(dstC,
+ cache[toggle + fi],
+ cache[next_dither_toggle(toggle) + fi],
+ count);
+ } else {
+ const Sk4f min(SK_ScalarNearlyZero);
+ const Sk4f max(255);
+ const float scale = 255;
+ sfx *= scale;
+ sfy *= scale;
+ sdx *= scale;
+ sdy *= scale;
+ const Sk4f fx4(sfx, sfx + sdx, sfx + 2*sdx, sfx + 3*sdx);
+ const Sk4f fy4(sfy, sfy + sdy, sfy + 2*sdy, sfy + 3*sdy);
+ const Sk4f dx4(sdx * 4);
+ const Sk4f dy4(sdy * 4);
+
+ Sk4f tmpxy = fx4 * dx4 + fy4 * dy4;
+ Sk4f tmpdxdy = sum_squares(dx4, dy4);
+ Sk4f R = Sk4f::Max(sum_squares(fx4, fy4), min);
+ Sk4f dR = tmpxy + tmpxy + tmpdxdy;
+ const Sk4f ddR = tmpdxdy + tmpdxdy;
+
+ for (int i = 0; i < (count >> 2); ++i) {
+ Sk4f dist = Sk4f::Min(fast_sqrt(R), max);
+ R = Sk4f::Max(R + dR, min);
+ dR = dR + ddR;
+
+ uint8_t fi[4];
+ SkNx_cast<uint8_t>(dist).store(fi);
+
+ for (int i = 0; i < 4; i++) {
+ *dstC++ = cache[toggle + fi[i]];
+ toggle = next_dither_toggle(toggle);
+ }
+ }
+ count &= 3;
+ if (count) {
+ Sk4f dist = Sk4f::Min(fast_sqrt(R), max);
+
+ uint8_t fi[4];
+ SkNx_cast<uint8_t>(dist).store(fi);
+ for (int i = 0; i < count; i++) {
+ *dstC++ = cache[toggle + fi[i]];
+ toggle = next_dither_toggle(toggle);
+ }
+ }
+ }
+}
+
+// Unrolling this loop doesn't seem to help (when float); we're stalling to
+// get the results of the sqrt (?), and don't have enough extra registers to
+// have many in flight.
+template <SkFixed (*TileProc)(SkFixed)>
+void shadeSpan_radial(SkScalar fx, SkScalar dx, SkScalar fy, SkScalar dy,
+ SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
+ int count, int toggle) {
+ do {
+ const SkFixed dist = SkFloatToFixed(sk_float_sqrt(fx*fx + fy*fy));
+ const unsigned fi = TileProc(dist);
+ SkASSERT(fi <= 0xFFFF);
+ *dstC++ = cache[toggle + (fi >> SkGradientShaderBase::kCache32Shift)];
+ toggle = next_dither_toggle(toggle);
+ fx += dx;
+ fy += dy;
+ } while (--count != 0);
+}
+
+void shadeSpan_radial_mirror(SkScalar fx, SkScalar dx, SkScalar fy, SkScalar dy,
+ SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
+ int count, int toggle) {
+ shadeSpan_radial<mirror_tileproc_nonstatic>(fx, dx, fy, dy, dstC, cache, count, toggle);
+}
+
+void shadeSpan_radial_repeat(SkScalar fx, SkScalar dx, SkScalar fy, SkScalar dy,
+ SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
+ int count, int toggle) {
+ shadeSpan_radial<repeat_tileproc_nonstatic>(fx, dx, fy, dy, dstC, cache, count, toggle);
+}
+
+} // namespace
+
+void SkRadialGradient::RadialGradientContext::shadeSpan(int x, int y,
+ SkPMColor* SK_RESTRICT dstC, int count) {
+ SkASSERT(count > 0);
+
+ const SkRadialGradient& radialGradient = static_cast<const SkRadialGradient&>(fShader);
+
+ SkPoint srcPt;
+ SkMatrix::MapXYProc dstProc = fDstToIndexProc;
+ TileProc proc = radialGradient.fTileProc;
+ const SkPMColor* SK_RESTRICT cache = fCache->getCache32();
+ int toggle = init_dither_toggle(x, y);
+
+ if (fDstToIndexClass != kPerspective_MatrixClass) {
+ dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
+ SkScalar sdx = fDstToIndex.getScaleX();
+ SkScalar sdy = fDstToIndex.getSkewY();
+
+ if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
+ const auto step = fDstToIndex.fixedStepInX(SkIntToScalar(y));
+ sdx = step.fX;
+ sdy = step.fY;
+ } else {
+ SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
+ }
+
+ RadialShadeProc shadeProc = shadeSpan_radial_repeat;
+ if (SkShader::kClamp_TileMode == radialGradient.fTileMode) {
+ shadeProc = shadeSpan_radial_clamp2;
+ } else if (SkShader::kMirror_TileMode == radialGradient.fTileMode) {
+ shadeProc = shadeSpan_radial_mirror;
+ } else {
+ SkASSERT(SkShader::kRepeat_TileMode == radialGradient.fTileMode);
+ }
+ (*shadeProc)(srcPt.fX, sdx, srcPt.fY, sdy, dstC, cache, count, toggle);
+ } else { // perspective case
+ SkScalar dstX = SkIntToScalar(x);
+ SkScalar dstY = SkIntToScalar(y);
+ do {
+ dstProc(fDstToIndex, dstX, dstY, &srcPt);
+ unsigned fi = proc(SkScalarToFixed(srcPt.length()));
+ SkASSERT(fi <= 0xFFFF);
+ *dstC++ = cache[fi >> SkGradientShaderBase::kCache32Shift];
+ dstX += SK_Scalar1;
+ } while (--count != 0);
+ }
+}
+
+/////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+#include "SkGr.h"
+#include "glsl/GrGLSLCaps.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+
+class GrRadialGradient : public GrGradientEffect {
+public:
+ class GLSLRadialProcessor;
+
+ static sk_sp<GrFragmentProcessor> Make(const CreateArgs& args) {
+ return sk_sp<GrFragmentProcessor>(new GrRadialGradient(args));
+ }
+
+ virtual ~GrRadialGradient() { }
+
+ const char* name() const override { return "Radial Gradient"; }
+
+private:
+ GrRadialGradient(const CreateArgs& args)
+ : INHERITED(args) {
+ this->initClassID<GrRadialGradient>();
+ }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ virtual void onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const override;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef GrGradientEffect INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////
+
+class GrRadialGradient::GLSLRadialProcessor : public GrGradientEffect::GLSLProcessor {
+public:
+ GLSLRadialProcessor(const GrProcessor&) {}
+ virtual ~GLSLRadialProcessor() { }
+
+ virtual void emitCode(EmitArgs&) override;
+
+ static void GenKey(const GrProcessor& processor, const GrGLSLCaps&, GrProcessorKeyBuilder* b) {
+ b->add32(GenBaseGradientKey(processor));
+ }
+
+private:
+ typedef GrGradientEffect::GLSLProcessor INHERITED;
+
+};
+
+/////////////////////////////////////////////////////////////////////
+
+GrGLSLFragmentProcessor* GrRadialGradient::onCreateGLSLInstance() const {
+ return new GrRadialGradient::GLSLRadialProcessor(*this);
+}
+
+void GrRadialGradient::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrRadialGradient::GLSLRadialProcessor::GenKey(*this, caps, b);
+}
+
+/////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrRadialGradient);
+
+sk_sp<GrFragmentProcessor> GrRadialGradient::TestCreate(GrProcessorTestData* d) {
+ SkPoint center = {d->fRandom->nextUScalar1(), d->fRandom->nextUScalar1()};
+ SkScalar radius = d->fRandom->nextUScalar1();
+
+ SkColor colors[kMaxRandomGradientColors];
+ SkScalar stopsArray[kMaxRandomGradientColors];
+ SkScalar* stops = stopsArray;
+ SkShader::TileMode tm;
+ int colorCount = RandomGradientParams(d->fRandom, colors, &stops, &tm);
+ auto shader = SkGradientShader::MakeRadial(center, radius, colors, stops, colorCount, tm);
+ SkMatrix viewMatrix = GrTest::TestMatrix(d->fRandom);
+ auto dstColorSpace = GrTest::TestColorSpace(d->fRandom);
+ sk_sp<GrFragmentProcessor> fp = shader->asFragmentProcessor(SkShader::AsFPArgs(
+ d->fContext, &viewMatrix, NULL, kNone_SkFilterQuality, dstColorSpace.get(),
+ SkSourceGammaTreatment::kRespect));
+ GrAlwaysAssert(fp);
+ return fp;
+}
+
+/////////////////////////////////////////////////////////////////////
+
+void GrRadialGradient::GLSLRadialProcessor::emitCode(EmitArgs& args) {
+ const GrRadialGradient& ge = args.fFp.cast<GrRadialGradient>();
+ this->emitUniforms(args.fUniformHandler, ge);
+ SkString t("length(");
+ t.append(args.fFragBuilder->ensureCoords2D(args.fTransformedCoords[0]));
+ t.append(")");
+ this->emitColor(args.fFragBuilder,
+ args.fUniformHandler,
+ args.fGLSLCaps,
+ ge, t.c_str(),
+ args.fOutputColor,
+ args.fInputColor,
+ args.fTexSamplers);
+}
+
+/////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> SkRadialGradient::asFragmentProcessor(const AsFPArgs& args) const {
+ SkASSERT(args.fContext);
+
+ SkMatrix matrix;
+ if (!this->getLocalMatrix().invert(&matrix)) {
+ return nullptr;
+ }
+ if (args.fLocalMatrix) {
+ SkMatrix inv;
+ if (!args.fLocalMatrix->invert(&inv)) {
+ return nullptr;
+ }
+ matrix.postConcat(inv);
+ }
+ matrix.postConcat(fPtsToUnit);
+ sk_sp<GrColorSpaceXform> colorSpaceXform = GrColorSpaceXform::Make(fColorSpace.get(),
+ args.fDstColorSpace);
+ sk_sp<GrFragmentProcessor> inner(GrRadialGradient::Make(
+ GrGradientEffect::CreateArgs(args.fContext, this, &matrix, fTileMode,
+ std::move(colorSpaceXform), SkToBool(args.fDstColorSpace))));
+ return GrFragmentProcessor::MulOutputByInputAlpha(std::move(inner));
+}
+
+#endif
+
+#ifndef SK_IGNORE_TO_STRING
+void SkRadialGradient::toString(SkString* str) const {
+ str->append("SkRadialGradient: (");
+
+ str->append("center: (");
+ str->appendScalar(fCenter.fX);
+ str->append(", ");
+ str->appendScalar(fCenter.fY);
+ str->append(") radius: ");
+ str->appendScalar(fRadius);
+ str->append(" ");
+
+ this->INHERITED::toString(str);
+
+ str->append(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/gradients/SkRadialGradient.h b/gfx/skia/skia/src/effects/gradients/SkRadialGradient.h
new file mode 100644
index 000000000..0b239039e
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/SkRadialGradient.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRadialGradient_DEFINED
+#define SkRadialGradient_DEFINED
+
+#include "SkGradientShaderPriv.h"
+
+class SkRadialGradient : public SkGradientShaderBase {
+public:
+ SkRadialGradient(const SkPoint& center, SkScalar radius, const Descriptor&);
+
+ class RadialGradientContext : public SkGradientShaderBase::GradientShaderBaseContext {
+ public:
+ RadialGradientContext(const SkRadialGradient&, const ContextRec&);
+
+ void shadeSpan(int x, int y, SkPMColor dstC[], int count) override;
+
+ private:
+ typedef SkGradientShaderBase::GradientShaderBaseContext INHERITED;
+ };
+
+ GradientType asAGradient(GradientInfo* info) const override;
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(const AsFPArgs&) const override;
+#endif
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkRadialGradient)
+
+protected:
+ SkRadialGradient(SkReadBuffer& buffer);
+ void flatten(SkWriteBuffer& buffer) const override;
+ size_t onContextSize(const ContextRec&) const override;
+ Context* onCreateContext(const ContextRec&, void* storage) const override;
+
+private:
+ const SkPoint fCenter;
+ const SkScalar fRadius;
+
+ friend class SkGradientShader;
+ typedef SkGradientShaderBase INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/gradients/SkSweepGradient.cpp b/gfx/skia/skia/src/effects/gradients/SkSweepGradient.cpp
new file mode 100644
index 000000000..d1fe269b9
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/SkSweepGradient.cpp
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkSweepGradient.h"
+
+static SkMatrix translate(SkScalar dx, SkScalar dy) {
+ SkMatrix matrix;
+ matrix.setTranslate(dx, dy);
+ return matrix;
+}
+
+SkSweepGradient::SkSweepGradient(SkScalar cx, SkScalar cy, const Descriptor& desc)
+ : SkGradientShaderBase(desc, translate(-cx, -cy))
+ , fCenter(SkPoint::Make(cx, cy))
+{
+ // overwrite the tilemode to a canonical value (since sweep ignores it)
+ fTileMode = SkShader::kClamp_TileMode;
+}
+
+SkShader::GradientType SkSweepGradient::asAGradient(GradientInfo* info) const {
+ if (info) {
+ commonAsAGradient(info);
+ info->fPoint[0] = fCenter;
+ }
+ return kSweep_GradientType;
+}
+
+sk_sp<SkFlattenable> SkSweepGradient::CreateProc(SkReadBuffer& buffer) {
+ DescriptorScope desc;
+ if (!desc.unflatten(buffer)) {
+ return nullptr;
+ }
+ const SkPoint center = buffer.readPoint();
+ return SkGradientShader::MakeSweep(center.x(), center.y(), desc.fColors,
+ std::move(desc.fColorSpace), desc.fPos, desc.fCount,
+ desc.fGradFlags, desc.fLocalMatrix);
+}
+
+void SkSweepGradient::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writePoint(fCenter);
+}
+
+size_t SkSweepGradient::onContextSize(const ContextRec&) const {
+ return sizeof(SweepGradientContext);
+}
+
+SkShader::Context* SkSweepGradient::onCreateContext(const ContextRec& rec, void* storage) const {
+ return CheckedCreateContext<SweepGradientContext>(storage, *this, rec);
+}
+
+SkSweepGradient::SweepGradientContext::SweepGradientContext(
+ const SkSweepGradient& shader, const ContextRec& rec)
+ : INHERITED(shader, rec) {}
+
+// returns angle in a circle [0..2PI) -> [0..255]
+static unsigned SkATan2_255(float y, float x) {
+ // static const float g255Over2PI = 255 / (2 * SK_ScalarPI);
+ static const float g255Over2PI = 40.584510488433314f;
+
+ float result = sk_float_atan2(y, x);
+ if (!SkScalarIsFinite(result)) {
+ return 0;
+ }
+ if (result < 0) {
+ result += 2 * SK_ScalarPI;
+ }
+ SkASSERT(result >= 0);
+ // since our value is always >= 0, we can cast to int, which is faster than
+ // calling floorf()
+ int ir = (int)(result * g255Over2PI);
+ SkASSERT(ir >= 0 && ir <= 255);
+ return ir;
+}
+
+void SkSweepGradient::SweepGradientContext::shadeSpan(int x, int y, SkPMColor* SK_RESTRICT dstC,
+ int count) {
+ SkMatrix::MapXYProc proc = fDstToIndexProc;
+ const SkMatrix& matrix = fDstToIndex;
+ const SkPMColor* SK_RESTRICT cache = fCache->getCache32();
+ int toggle = init_dither_toggle(x, y);
+ SkPoint srcPt;
+
+ if (fDstToIndexClass != kPerspective_MatrixClass) {
+ proc(matrix, SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
+ SkScalar dx, fx = srcPt.fX;
+ SkScalar dy, fy = srcPt.fY;
+
+ if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
+ const auto step = matrix.fixedStepInX(SkIntToScalar(y) + SK_ScalarHalf);
+ dx = step.fX;
+ dy = step.fY;
+ } else {
+ SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
+ dx = matrix.getScaleX();
+ dy = matrix.getSkewY();
+ }
+
+ for (; count > 0; --count) {
+ *dstC++ = cache[toggle + SkATan2_255(fy, fx)];
+ fx += dx;
+ fy += dy;
+ toggle = next_dither_toggle(toggle);
+ }
+ } else { // perspective case
+ for (int stop = x + count; x < stop; x++) {
+ proc(matrix, SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
+ *dstC++ = cache[toggle + SkATan2_255(srcPt.fY, srcPt.fX)];
+ toggle = next_dither_toggle(toggle);
+ }
+ }
+}
+
+/////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+#include "SkGr.h"
+#include "gl/GrGLContext.h"
+#include "glsl/GrGLSLCaps.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+
+class GrSweepGradient : public GrGradientEffect {
+public:
+ class GLSLSweepProcessor;
+
+ static sk_sp<GrFragmentProcessor> Make(const CreateArgs& args) {
+ return sk_sp<GrFragmentProcessor>(new GrSweepGradient(args));
+ }
+ virtual ~GrSweepGradient() { }
+
+ const char* name() const override { return "Sweep Gradient"; }
+
+private:
+ GrSweepGradient(const CreateArgs& args)
+ : INHERITED(args) {
+ this->initClassID<GrSweepGradient>();
+ }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ virtual void onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const override;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef GrGradientEffect INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////
+
+class GrSweepGradient::GLSLSweepProcessor : public GrGradientEffect::GLSLProcessor {
+public:
+ GLSLSweepProcessor(const GrProcessor&) {}
+ virtual ~GLSLSweepProcessor() { }
+
+ virtual void emitCode(EmitArgs&) override;
+
+ static void GenKey(const GrProcessor& processor, const GrGLSLCaps&, GrProcessorKeyBuilder* b) {
+ b->add32(GenBaseGradientKey(processor));
+ }
+
+private:
+ typedef GrGradientEffect::GLSLProcessor INHERITED;
+
+};
+
+/////////////////////////////////////////////////////////////////////
+
+GrGLSLFragmentProcessor* GrSweepGradient::onCreateGLSLInstance() const {
+ return new GrSweepGradient::GLSLSweepProcessor(*this);
+}
+
+void GrSweepGradient::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrSweepGradient::GLSLSweepProcessor::GenKey(*this, caps, b);
+}
+
+
+/////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrSweepGradient);
+
+sk_sp<GrFragmentProcessor> GrSweepGradient::TestCreate(GrProcessorTestData* d) {
+ SkPoint center = {d->fRandom->nextUScalar1(), d->fRandom->nextUScalar1()};
+
+ SkColor colors[kMaxRandomGradientColors];
+ SkScalar stopsArray[kMaxRandomGradientColors];
+ SkScalar* stops = stopsArray;
+ SkShader::TileMode tmIgnored;
+ int colorCount = RandomGradientParams(d->fRandom, colors, &stops, &tmIgnored);
+ sk_sp<SkShader> shader(SkGradientShader::MakeSweep(center.fX, center.fY, colors, stops,
+ colorCount));
+ SkMatrix viewMatrix = GrTest::TestMatrix(d->fRandom);
+ auto dstColorSpace = GrTest::TestColorSpace(d->fRandom);
+ sk_sp<GrFragmentProcessor> fp = shader->asFragmentProcessor(SkShader::AsFPArgs(
+ d->fContext, &viewMatrix, NULL, kNone_SkFilterQuality, dstColorSpace.get(),
+ SkSourceGammaTreatment::kRespect));
+ GrAlwaysAssert(fp);
+ return fp;
+}
+
+/////////////////////////////////////////////////////////////////////
+
+void GrSweepGradient::GLSLSweepProcessor::emitCode(EmitArgs& args) {
+ const GrSweepGradient& ge = args.fFp.cast<GrSweepGradient>();
+ this->emitUniforms(args.fUniformHandler, ge);
+ SkString coords2D = args.fFragBuilder->ensureCoords2D(args.fTransformedCoords[0]);
+ SkString t;
+ // 0.1591549430918 is 1/(2*pi), used since atan returns values [-pi, pi]
+ // On Intel GPU there is an issue where it reads the second arguement to atan "- %s.x" as an int
+ // thus must us -1.0 * %s.x to work correctly
+ if (args.fGLSLCaps->mustForceNegatedAtanParamToFloat()){
+ t.printf("(atan(- %s.y, -1.0 * %s.x) * 0.1591549430918 + 0.5)",
+ coords2D.c_str(), coords2D.c_str());
+ } else {
+ t.printf("(atan(- %s.y, - %s.x) * 0.1591549430918 + 0.5)",
+ coords2D.c_str(), coords2D.c_str());
+ }
+ this->emitColor(args.fFragBuilder,
+ args.fUniformHandler,
+ args.fGLSLCaps,
+ ge, t.c_str(),
+ args.fOutputColor,
+ args.fInputColor,
+ args.fTexSamplers);
+}
+
+/////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> SkSweepGradient::asFragmentProcessor(const AsFPArgs& args) const {
+
+ SkMatrix matrix;
+ if (!this->getLocalMatrix().invert(&matrix)) {
+ return nullptr;
+ }
+ if (args.fLocalMatrix) {
+ SkMatrix inv;
+ if (!args.fLocalMatrix->invert(&inv)) {
+ return nullptr;
+ }
+ matrix.postConcat(inv);
+ }
+ matrix.postConcat(fPtsToUnit);
+
+ sk_sp<GrColorSpaceXform> colorSpaceXform = GrColorSpaceXform::Make(fColorSpace.get(),
+ args.fDstColorSpace);
+ sk_sp<GrFragmentProcessor> inner(GrSweepGradient::Make(
+ GrGradientEffect::CreateArgs(args.fContext, this, &matrix, SkShader::kClamp_TileMode,
+ std::move(colorSpaceXform), SkToBool(args.fDstColorSpace))));
+ return GrFragmentProcessor::MulOutputByInputAlpha(std::move(inner));
+}
+
+#endif
+
+#ifndef SK_IGNORE_TO_STRING
+void SkSweepGradient::toString(SkString* str) const {
+ str->append("SkSweepGradient: (");
+
+ str->append("center: (");
+ str->appendScalar(fCenter.fX);
+ str->append(", ");
+ str->appendScalar(fCenter.fY);
+ str->append(") ");
+
+ this->INHERITED::toString(str);
+
+ str->append(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/gradients/SkSweepGradient.h b/gfx/skia/skia/src/effects/gradients/SkSweepGradient.h
new file mode 100644
index 000000000..f132118b3
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/SkSweepGradient.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSweepGradient_DEFINED
+#define SkSweepGradient_DEFINED
+
+#include "SkGradientShaderPriv.h"
+
+class SkSweepGradient : public SkGradientShaderBase {
+public:
+ SkSweepGradient(SkScalar cx, SkScalar cy, const Descriptor&);
+
+ class SweepGradientContext : public SkGradientShaderBase::GradientShaderBaseContext {
+ public:
+ SweepGradientContext(const SkSweepGradient& shader, const ContextRec&);
+
+ void shadeSpan(int x, int y, SkPMColor dstC[], int count) override;
+
+ private:
+ typedef SkGradientShaderBase::GradientShaderBaseContext INHERITED;
+ };
+
+ GradientType asAGradient(GradientInfo* info) const override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(const AsFPArgs&) const override;
+#endif
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkSweepGradient)
+
+protected:
+ void flatten(SkWriteBuffer& buffer) const override;
+ size_t onContextSize(const ContextRec&) const override;
+ Context* onCreateContext(const ContextRec&, void* storage) const override;
+
+private:
+ const SkPoint fCenter;
+
+ friend class SkGradientShader;
+ typedef SkGradientShaderBase INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp b/gfx/skia/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp
new file mode 100644
index 000000000..599fd4c9f
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp
@@ -0,0 +1,398 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTwoPointConicalGradient.h"
+
+struct TwoPtRadialContext {
+ const TwoPtRadial& fRec;
+ float fRelX, fRelY;
+ const float fIncX, fIncY;
+ float fB;
+ const float fDB;
+
+ TwoPtRadialContext(const TwoPtRadial& rec, SkScalar fx, SkScalar fy,
+ SkScalar dfx, SkScalar dfy);
+ SkFixed nextT();
+};
+
+static int valid_divide(float numer, float denom, float* ratio) {
+ SkASSERT(ratio);
+ if (0 == denom) {
+ return 0;
+ }
+ *ratio = numer / denom;
+ return 1;
+}
+
+// Return the number of distinct real roots, and write them into roots[] in
+// ascending order
+static int find_quad_roots(float A, float B, float C, float roots[2], bool descendingOrder = false) {
+ SkASSERT(roots);
+
+ if (A == 0) {
+ return valid_divide(-C, B, roots);
+ }
+
+ float R = B*B - 4*A*C;
+ if (R < 0) {
+ return 0;
+ }
+ R = sk_float_sqrt(R);
+
+#if 1
+ float Q = B;
+ if (Q < 0) {
+ Q -= R;
+ } else {
+ Q += R;
+ }
+#else
+ // on 10.6 this was much slower than the above branch :(
+ float Q = B + copysignf(R, B);
+#endif
+ Q *= -0.5f;
+ if (0 == Q) {
+ roots[0] = 0;
+ return 1;
+ }
+
+ float r0 = Q / A;
+ float r1 = C / Q;
+ roots[0] = r0 < r1 ? r0 : r1;
+ roots[1] = r0 > r1 ? r0 : r1;
+ if (descendingOrder) {
+ SkTSwap(roots[0], roots[1]);
+ }
+ return 2;
+}
+
+static float lerp(float x, float dx, float t) {
+ return x + t * dx;
+}
+
+static float sqr(float x) { return x * x; }
+
+void TwoPtRadial::init(const SkPoint& center0, SkScalar rad0,
+ const SkPoint& center1, SkScalar rad1,
+ bool flipped) {
+ fCenterX = SkScalarToFloat(center0.fX);
+ fCenterY = SkScalarToFloat(center0.fY);
+ fDCenterX = SkScalarToFloat(center1.fX) - fCenterX;
+ fDCenterY = SkScalarToFloat(center1.fY) - fCenterY;
+ fRadius = SkScalarToFloat(rad0);
+ fDRadius = SkScalarToFloat(rad1) - fRadius;
+
+ fA = sqr(fDCenterX) + sqr(fDCenterY) - sqr(fDRadius);
+ fRadius2 = sqr(fRadius);
+ fRDR = fRadius * fDRadius;
+
+ fFlipped = flipped;
+}
+
+TwoPtRadialContext::TwoPtRadialContext(const TwoPtRadial& rec, SkScalar fx, SkScalar fy,
+ SkScalar dfx, SkScalar dfy)
+ : fRec(rec)
+ , fRelX(SkScalarToFloat(fx) - rec.fCenterX)
+ , fRelY(SkScalarToFloat(fy) - rec.fCenterY)
+ , fIncX(SkScalarToFloat(dfx))
+ , fIncY(SkScalarToFloat(dfy))
+ , fB(-2 * (rec.fDCenterX * fRelX + rec.fDCenterY * fRelY + rec.fRDR))
+ , fDB(-2 * (rec.fDCenterX * fIncX + rec.fDCenterY * fIncY)) {}
+
+SkFixed TwoPtRadialContext::nextT() {
+ float roots[2];
+
+ float C = sqr(fRelX) + sqr(fRelY) - fRec.fRadius2;
+ int countRoots = find_quad_roots(fRec.fA, fB, C, roots, fRec.fFlipped);
+
+ fRelX += fIncX;
+ fRelY += fIncY;
+ fB += fDB;
+
+ if (0 == countRoots) {
+ return TwoPtRadial::kDontDrawT;
+ }
+
+ // Prefer the bigger t value if both give a radius(t) > 0
+ // find_quad_roots returns the values sorted, so we start with the last
+ float t = roots[countRoots - 1];
+ float r = lerp(fRec.fRadius, fRec.fDRadius, t);
+ if (r < 0) {
+ t = roots[0]; // might be the same as roots[countRoots-1]
+ r = lerp(fRec.fRadius, fRec.fDRadius, t);
+ if (r < 0) {
+ return TwoPtRadial::kDontDrawT;
+ }
+ }
+ return SkFloatToFixed(t);
+}
+
+typedef void (*TwoPointConicalProc)(TwoPtRadialContext* rec, SkPMColor* dstC,
+ const SkPMColor* cache, int toggle, int count);
+
+static void twopoint_clamp(TwoPtRadialContext* rec, SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache, int toggle,
+ int count) {
+ for (; count > 0; --count) {
+ SkFixed t = rec->nextT();
+ if (TwoPtRadial::DontDrawT(t)) {
+ *dstC++ = 0;
+ } else {
+ SkFixed index = SkClampMax(t, 0xFFFF);
+ SkASSERT(index <= 0xFFFF);
+ *dstC++ = cache[toggle +
+ (index >> SkGradientShaderBase::kCache32Shift)];
+ }
+ toggle = next_dither_toggle(toggle);
+ }
+}
+
+static void twopoint_repeat(TwoPtRadialContext* rec, SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache, int toggle,
+ int count) {
+ for (; count > 0; --count) {
+ SkFixed t = rec->nextT();
+ if (TwoPtRadial::DontDrawT(t)) {
+ *dstC++ = 0;
+ } else {
+ SkFixed index = repeat_tileproc(t);
+ SkASSERT(index <= 0xFFFF);
+ *dstC++ = cache[toggle +
+ (index >> SkGradientShaderBase::kCache32Shift)];
+ }
+ toggle = next_dither_toggle(toggle);
+ }
+}
+
+static void twopoint_mirror(TwoPtRadialContext* rec, SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache, int toggle,
+ int count) {
+ for (; count > 0; --count) {
+ SkFixed t = rec->nextT();
+ if (TwoPtRadial::DontDrawT(t)) {
+ *dstC++ = 0;
+ } else {
+ SkFixed index = mirror_tileproc(t);
+ SkASSERT(index <= 0xFFFF);
+ *dstC++ = cache[toggle +
+ (index >> SkGradientShaderBase::kCache32Shift)];
+ }
+ toggle = next_dither_toggle(toggle);
+ }
+}
+
+/////////////////////////////////////////////////////////////////////
+
+SkTwoPointConicalGradient::SkTwoPointConicalGradient(
+ const SkPoint& start, SkScalar startRadius,
+ const SkPoint& end, SkScalar endRadius,
+ bool flippedGrad, const Descriptor& desc)
+ : SkGradientShaderBase(desc, SkMatrix::I())
+ , fCenter1(start)
+ , fCenter2(end)
+ , fRadius1(startRadius)
+ , fRadius2(endRadius)
+ , fFlippedGrad(flippedGrad)
+{
+ // this is degenerate, and should be caught by our caller
+ SkASSERT(fCenter1 != fCenter2 || fRadius1 != fRadius2);
+ fRec.init(fCenter1, fRadius1, fCenter2, fRadius2, fFlippedGrad);
+}
+
+bool SkTwoPointConicalGradient::isOpaque() const {
+ // Because areas outside the cone are left untouched, we cannot treat the
+ // shader as opaque even if the gradient itself is opaque.
+ // TODO(junov): Compute whether the cone fills the plane crbug.com/222380
+ return false;
+}
+
+size_t SkTwoPointConicalGradient::onContextSize(const ContextRec&) const {
+ return sizeof(TwoPointConicalGradientContext);
+}
+
+SkShader::Context* SkTwoPointConicalGradient::onCreateContext(const ContextRec& rec,
+ void* storage) const {
+ return CheckedCreateContext<TwoPointConicalGradientContext>(storage, *this, rec);
+}
+
+SkTwoPointConicalGradient::TwoPointConicalGradientContext::TwoPointConicalGradientContext(
+ const SkTwoPointConicalGradient& shader, const ContextRec& rec)
+ : INHERITED(shader, rec)
+{
+ // in general, we might discard based on computed-radius, so clear
+ // this flag (todo: sometimes we can detect that we never discard...)
+ fFlags &= ~kOpaqueAlpha_Flag;
+}
+
+void SkTwoPointConicalGradient::TwoPointConicalGradientContext::shadeSpan(
+ int x, int y, SkPMColor* dstCParam, int count) {
+ const SkTwoPointConicalGradient& twoPointConicalGradient =
+ static_cast<const SkTwoPointConicalGradient&>(fShader);
+
+ int toggle = init_dither_toggle(x, y);
+
+ SkASSERT(count > 0);
+
+ SkPMColor* SK_RESTRICT dstC = dstCParam;
+
+ SkMatrix::MapXYProc dstProc = fDstToIndexProc;
+
+ const SkPMColor* SK_RESTRICT cache = fCache->getCache32();
+
+ TwoPointConicalProc shadeProc = twopoint_repeat;
+ if (SkShader::kClamp_TileMode == twoPointConicalGradient.fTileMode) {
+ shadeProc = twopoint_clamp;
+ } else if (SkShader::kMirror_TileMode == twoPointConicalGradient.fTileMode) {
+ shadeProc = twopoint_mirror;
+ } else {
+ SkASSERT(SkShader::kRepeat_TileMode == twoPointConicalGradient.fTileMode);
+ }
+
+ if (fDstToIndexClass != kPerspective_MatrixClass) {
+ SkPoint srcPt;
+ dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
+ SkScalar dx, fx = srcPt.fX;
+ SkScalar dy, fy = srcPt.fY;
+
+ if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
+ const auto step = fDstToIndex.fixedStepInX(SkIntToScalar(y));
+ dx = step.fX;
+ dy = step.fY;
+ } else {
+ SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
+ dx = fDstToIndex.getScaleX();
+ dy = fDstToIndex.getSkewY();
+ }
+
+ TwoPtRadialContext rec(twoPointConicalGradient.fRec, fx, fy, dx, dy);
+ (*shadeProc)(&rec, dstC, cache, toggle, count);
+ } else { // perspective case
+ SkScalar dstX = SkIntToScalar(x) + SK_ScalarHalf;
+ SkScalar dstY = SkIntToScalar(y) + SK_ScalarHalf;
+ for (; count > 0; --count) {
+ SkPoint srcPt;
+ dstProc(fDstToIndex, dstX, dstY, &srcPt);
+ TwoPtRadialContext rec(twoPointConicalGradient.fRec, srcPt.fX, srcPt.fY, 0, 0);
+ (*shadeProc)(&rec, dstC, cache, toggle, 1);
+
+ dstX += SK_Scalar1;
+ toggle = next_dither_toggle(toggle);
+ dstC += 1;
+ }
+ }
+}
+
+// Returns the original non-sorted version of the gradient
+SkShader::GradientType SkTwoPointConicalGradient::asAGradient(
+ GradientInfo* info) const {
+ if (info) {
+ commonAsAGradient(info, fFlippedGrad);
+ info->fPoint[0] = fCenter1;
+ info->fPoint[1] = fCenter2;
+ info->fRadius[0] = fRadius1;
+ info->fRadius[1] = fRadius2;
+ if (fFlippedGrad) {
+ SkTSwap(info->fPoint[0], info->fPoint[1]);
+ SkTSwap(info->fRadius[0], info->fRadius[1]);
+ }
+ }
+ return kConical_GradientType;
+}
+
+sk_sp<SkFlattenable> SkTwoPointConicalGradient::CreateProc(SkReadBuffer& buffer) {
+ DescriptorScope desc;
+ if (!desc.unflatten(buffer)) {
+ return nullptr;
+ }
+ SkPoint c1 = buffer.readPoint();
+ SkPoint c2 = buffer.readPoint();
+ SkScalar r1 = buffer.readScalar();
+ SkScalar r2 = buffer.readScalar();
+
+ if (buffer.readBool()) { // flipped
+ SkTSwap(c1, c2);
+ SkTSwap(r1, r2);
+
+ SkColor4f* colors = desc.mutableColors();
+ SkScalar* pos = desc.mutablePos();
+ const int last = desc.fCount - 1;
+ const int half = desc.fCount >> 1;
+ for (int i = 0; i < half; ++i) {
+ SkTSwap(colors[i], colors[last - i]);
+ if (pos) {
+ SkScalar tmp = pos[i];
+ pos[i] = SK_Scalar1 - pos[last - i];
+ pos[last - i] = SK_Scalar1 - tmp;
+ }
+ }
+ if (pos) {
+ if (desc.fCount & 1) {
+ pos[half] = SK_Scalar1 - pos[half];
+ }
+ }
+ }
+
+ return SkGradientShader::MakeTwoPointConical(c1, r1, c2, r2, desc.fColors,
+ std::move(desc.fColorSpace), desc.fPos,
+ desc.fCount, desc.fTileMode, desc.fGradFlags,
+ desc.fLocalMatrix);
+}
+
+void SkTwoPointConicalGradient::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writePoint(fCenter1);
+ buffer.writePoint(fCenter2);
+ buffer.writeScalar(fRadius1);
+ buffer.writeScalar(fRadius2);
+ buffer.writeBool(fFlippedGrad);
+}
+
+#if SK_SUPPORT_GPU
+
+#include "SkGr.h"
+#include "SkTwoPointConicalGradient_gpu.h"
+
+sk_sp<GrFragmentProcessor> SkTwoPointConicalGradient::asFragmentProcessor(
+ const AsFPArgs& args) const {
+ SkASSERT(args.fContext);
+ SkASSERT(fPtsToUnit.isIdentity());
+ sk_sp<GrColorSpaceXform> colorSpaceXform = GrColorSpaceXform::Make(fColorSpace.get(),
+ args.fDstColorSpace);
+ sk_sp<GrFragmentProcessor> inner(Gr2PtConicalGradientEffect::Make(
+ GrGradientEffect::CreateArgs(args.fContext, this, args.fLocalMatrix, fTileMode,
+ std::move(colorSpaceXform), SkToBool(args.fDstColorSpace))));
+ return GrFragmentProcessor::MulOutputByInputAlpha(std::move(inner));
+}
+
+#endif
+
+#ifndef SK_IGNORE_TO_STRING
+void SkTwoPointConicalGradient::toString(SkString* str) const {
+ str->append("SkTwoPointConicalGradient: (");
+
+ str->append("center1: (");
+ str->appendScalar(fCenter1.fX);
+ str->append(", ");
+ str->appendScalar(fCenter1.fY);
+ str->append(") radius1: ");
+ str->appendScalar(fRadius1);
+ str->append(" ");
+
+ str->append("center2: (");
+ str->appendScalar(fCenter2.fX);
+ str->append(", ");
+ str->appendScalar(fCenter2.fY);
+ str->append(") radius2: ");
+ str->appendScalar(fRadius2);
+ str->append(" ");
+
+ this->INHERITED::toString(str);
+
+ str->append(")");
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/gradients/SkTwoPointConicalGradient.h b/gfx/skia/skia/src/effects/gradients/SkTwoPointConicalGradient.h
new file mode 100644
index 000000000..d16e4bc3c
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/SkTwoPointConicalGradient.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTwoPointConicalGradient_DEFINED
+#define SkTwoPointConicalGradient_DEFINED
+
+#include "SkGradientShaderPriv.h"
+
+// TODO(dominikg): Worth making it truly immutable (i.e. set values in constructor)?
+// Should only be initialized once via init(). Immutable afterwards.
+struct TwoPtRadial {
+ enum {
+ // This value is outside the range SK_FixedMin to SK_FixedMax.
+ kDontDrawT = 0x80000000
+ };
+
+ float fCenterX, fCenterY;
+ float fDCenterX, fDCenterY;
+ float fRadius;
+ float fDRadius;
+ float fA;
+ float fRadius2;
+ float fRDR;
+ bool fFlipped;
+
+ void init(const SkPoint& center0, SkScalar rad0,
+ const SkPoint& center1, SkScalar rad1,
+ bool flipped);
+
+ static bool DontDrawT(SkFixed t) {
+ return kDontDrawT == (uint32_t)t;
+ }
+};
+
+
+class SkTwoPointConicalGradient : public SkGradientShaderBase {
+ TwoPtRadial fRec;
+public:
+ SkTwoPointConicalGradient(const SkPoint& start, SkScalar startRadius,
+ const SkPoint& end, SkScalar endRadius,
+ bool flippedGrad, const Descriptor&);
+
+ class TwoPointConicalGradientContext : public SkGradientShaderBase::GradientShaderBaseContext {
+ public:
+ TwoPointConicalGradientContext(const SkTwoPointConicalGradient&, const ContextRec&);
+ ~TwoPointConicalGradientContext() {}
+
+ void shadeSpan(int x, int y, SkPMColor dstC[], int count) override;
+
+ private:
+ typedef SkGradientShaderBase::GradientShaderBaseContext INHERITED;
+ };
+
+ SkShader::GradientType asAGradient(GradientInfo* info) const override;
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(const AsFPArgs&) const override;
+#endif
+ bool isOpaque() const override;
+
+ SkScalar getCenterX1() const { return SkPoint::Distance(fCenter1, fCenter2); }
+ SkScalar getStartRadius() const { return fRadius1; }
+ SkScalar getDiffRadius() const { return fRadius2 - fRadius1; }
+ const SkPoint& getStartCenter() const { return fCenter1; }
+ const SkPoint& getEndCenter() const { return fCenter2; }
+ SkScalar getEndRadius() const { return fRadius2; }
+ bool isFlippedGrad() const { return fFlippedGrad; }
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkTwoPointConicalGradient)
+
+protected:
+ SkTwoPointConicalGradient(SkReadBuffer& buffer);
+ void flatten(SkWriteBuffer& buffer) const override;
+ size_t onContextSize(const ContextRec&) const override;
+ Context* onCreateContext(const ContextRec&, void* storage) const override;
+
+private:
+ SkPoint fCenter1;
+ SkPoint fCenter2;
+ SkScalar fRadius1;
+ SkScalar fRadius2;
+ bool fFlippedGrad;
+
+ friend class SkGradientShader;
+ typedef SkGradientShaderBase INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/gradients/SkTwoPointConicalGradient_gpu.cpp b/gfx/skia/skia/src/effects/gradients/SkTwoPointConicalGradient_gpu.cpp
new file mode 100644
index 000000000..a8df3b50c
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/SkTwoPointConicalGradient_gpu.cpp
@@ -0,0 +1,1343 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkTwoPointConicalGradient.h"
+
+#if SK_SUPPORT_GPU
+#include "GrCoordTransform.h"
+#include "GrInvariantOutput.h"
+#include "GrPaint.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "SkTwoPointConicalGradient_gpu.h"
+
+// For brevity
+typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;
+
+static const SkScalar kErrorTol = 0.00001f;
+static const SkScalar kEdgeErrorTol = 5.f * kErrorTol;
+
+/**
+ * We have three general cases for 2pt conical gradients. First we always assume that
+ * the start radius <= end radius. Our first case (kInside_) is when the start circle
+ * is completely enclosed by the end circle. The second case (kOutside_) is the case
+ * when the start circle is either completely outside the end circle or the circles
+ * overlap. The final case (kEdge_) is when the start circle is inside the end one,
+ * but the two are just barely touching at 1 point along their edges.
+ */
+enum ConicalType {
+ kInside_ConicalType,
+ kOutside_ConicalType,
+ kEdge_ConicalType,
+};
+
+//////////////////////////////////////////////////////////////////////////////
+
+static void set_matrix_edge_conical(const SkTwoPointConicalGradient& shader,
+ SkMatrix* invLMatrix) {
+ // Inverse of the current local matrix is passed in then,
+ // translate to center1, rotate so center2 is on x axis.
+ const SkPoint& center1 = shader.getStartCenter();
+ const SkPoint& center2 = shader.getEndCenter();
+
+ invLMatrix->postTranslate(-center1.fX, -center1.fY);
+
+ SkPoint diff = center2 - center1;
+ SkScalar diffLen = diff.length();
+ if (0 != diffLen) {
+ SkScalar invDiffLen = SkScalarInvert(diffLen);
+ SkMatrix rot;
+ rot.setSinCos(-SkScalarMul(invDiffLen, diff.fY),
+ SkScalarMul(invDiffLen, diff.fX));
+ invLMatrix->postConcat(rot);
+ }
+}
+
+class Edge2PtConicalEffect : public GrGradientEffect {
+public:
+ class GLSLEdge2PtConicalProcessor;
+
+ static sk_sp<GrFragmentProcessor> Make(const CreateArgs& args) {
+ return sk_sp<GrFragmentProcessor>(new Edge2PtConicalEffect(args));
+ }
+
+ virtual ~Edge2PtConicalEffect() {}
+
+ const char* name() const override {
+ return "Two-Point Conical Gradient Edge Touching";
+ }
+
+ // The radial gradient parameters can collapse to a linear (instead of quadratic) equation.
+ SkScalar center() const { return fCenterX1; }
+ SkScalar diffRadius() const { return fDiffRadius; }
+ SkScalar radius() const { return fRadius0; }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor& sBase) const override {
+ const Edge2PtConicalEffect& s = sBase.cast<Edge2PtConicalEffect>();
+ return (INHERITED::onIsEqual(sBase) &&
+ this->fCenterX1 == s.fCenterX1 &&
+ this->fRadius0 == s.fRadius0 &&
+ this->fDiffRadius == s.fDiffRadius);
+ }
+
+ Edge2PtConicalEffect(const CreateArgs& args)
+ : INHERITED(args) {
+ const SkTwoPointConicalGradient& shader =
+ *static_cast<const SkTwoPointConicalGradient*>(args.fShader);
+ fCenterX1 = shader.getCenterX1();
+ fRadius0 = shader.getStartRadius();
+ fDiffRadius = shader.getDiffRadius();
+ this->initClassID<Edge2PtConicalEffect>();
+ // We should only be calling this shader if we are degenerate case with touching circles
+ // When deciding if we are in edge case, we scaled by the end radius for cases when the
+ // start radius was close to zero, otherwise we scaled by the start radius. In addition
+ // Our test for the edge case in set_matrix_circle_conical has a higher tolerance so we
+ // need the sqrt value below
+ SkASSERT(SkScalarAbs(SkScalarAbs(fDiffRadius) - fCenterX1) <
+ (fRadius0 < kErrorTol ? shader.getEndRadius() * kEdgeErrorTol :
+ fRadius0 * sqrt(kEdgeErrorTol)));
+
+ // We pass the linear part of the quadratic as a varying.
+ // float b = -2.0 * (fCenterX1 * x + fRadius0 * fDiffRadius * z)
+ fBTransform = this->getCoordTransform();
+ SkMatrix& bMatrix = *fBTransform.accessMatrix();
+ SkScalar r0dr = SkScalarMul(fRadius0, fDiffRadius);
+ bMatrix[SkMatrix::kMScaleX] = -2 * (SkScalarMul(fCenterX1, bMatrix[SkMatrix::kMScaleX]) +
+ SkScalarMul(r0dr, bMatrix[SkMatrix::kMPersp0]));
+ bMatrix[SkMatrix::kMSkewX] = -2 * (SkScalarMul(fCenterX1, bMatrix[SkMatrix::kMSkewX]) +
+ SkScalarMul(r0dr, bMatrix[SkMatrix::kMPersp1]));
+ bMatrix[SkMatrix::kMTransX] = -2 * (SkScalarMul(fCenterX1, bMatrix[SkMatrix::kMTransX]) +
+ SkScalarMul(r0dr, bMatrix[SkMatrix::kMPersp2]));
+ this->addCoordTransform(&fBTransform);
+ }
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ // @{
+ // Cache of values - these can change arbitrarily, EXCEPT
+ // we shouldn't change between degenerate and non-degenerate?!
+
+ GrCoordTransform fBTransform;
+ SkScalar fCenterX1;
+ SkScalar fRadius0;
+ SkScalar fDiffRadius;
+
+ // @}
+
+ typedef GrGradientEffect INHERITED;
+};
+
+class Edge2PtConicalEffect::GLSLEdge2PtConicalProcessor : public GrGradientEffect::GLSLProcessor {
+public:
+ GLSLEdge2PtConicalProcessor(const GrProcessor&);
+ virtual ~GLSLEdge2PtConicalProcessor() { }
+
+ virtual void emitCode(EmitArgs&) override;
+
+ static void GenKey(const GrProcessor&, const GrGLSLCaps& caps, GrProcessorKeyBuilder* b);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+ UniformHandle fParamUni;
+
+ const char* fVSVaryingName;
+ const char* fFSVaryingName;
+
+ // @{
+ /// Values last uploaded as uniforms
+
+ SkScalar fCachedRadius;
+ SkScalar fCachedDiffRadius;
+
+ // @}
+
+private:
+ typedef GrGradientEffect::GLSLProcessor INHERITED;
+
+};
+
+void Edge2PtConicalEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ Edge2PtConicalEffect::GLSLEdge2PtConicalProcessor::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* Edge2PtConicalEffect::onCreateGLSLInstance() const {
+ return new Edge2PtConicalEffect::GLSLEdge2PtConicalProcessor(*this);
+}
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(Edge2PtConicalEffect);
+
+/*
+ * All Two point conical gradient test create functions may occasionally create edge case shaders
+ */
+sk_sp<GrFragmentProcessor> Edge2PtConicalEffect::TestCreate(GrProcessorTestData* d) {
+ SkPoint center1 = {d->fRandom->nextUScalar1(), d->fRandom->nextUScalar1()};
+ SkScalar radius1 = d->fRandom->nextUScalar1();
+ SkPoint center2;
+ SkScalar radius2;
+ do {
+ center2.set(d->fRandom->nextUScalar1(), d->fRandom->nextUScalar1());
+ // If the circles are identical the factory will give us an empty shader.
+ // This will happen if we pick identical centers
+ } while (center1 == center2);
+
+ // Below makes sure that circle one is contained within circle two
+ // and both circles are touching on an edge
+ SkPoint diff = center2 - center1;
+ SkScalar diffLen = diff.length();
+ radius2 = radius1 + diffLen;
+
+ SkColor colors[kMaxRandomGradientColors];
+ SkScalar stopsArray[kMaxRandomGradientColors];
+ SkScalar* stops = stopsArray;
+ SkShader::TileMode tm;
+ int colorCount = RandomGradientParams(d->fRandom, colors, &stops, &tm);
+ auto shader = SkGradientShader::MakeTwoPointConical(center1, radius1, center2, radius2,
+ colors, stops, colorCount, tm);
+ SkMatrix viewMatrix = GrTest::TestMatrix(d->fRandom);
+ auto dstColorSpace = GrTest::TestColorSpace(d->fRandom);
+ sk_sp<GrFragmentProcessor> fp = shader->asFragmentProcessor(SkShader::AsFPArgs(
+ d->fContext, &viewMatrix, NULL, kNone_SkFilterQuality, dstColorSpace.get(),
+ SkSourceGammaTreatment::kRespect));
+ GrAlwaysAssert(fp);
+ return fp;
+}
+
+Edge2PtConicalEffect::GLSLEdge2PtConicalProcessor::GLSLEdge2PtConicalProcessor(const GrProcessor&)
+ : fVSVaryingName(nullptr)
+ , fFSVaryingName(nullptr)
+ , fCachedRadius(-SK_ScalarMax)
+ , fCachedDiffRadius(-SK_ScalarMax) {}
+
+void Edge2PtConicalEffect::GLSLEdge2PtConicalProcessor::emitCode(EmitArgs& args) {
+ const Edge2PtConicalEffect& ge = args.fFp.cast<Edge2PtConicalEffect>();
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ this->emitUniforms(uniformHandler, ge);
+ fParamUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec3f_GrSLType, kDefault_GrSLPrecision,
+ "Conical2FSParams");
+
+ SkString cName("c");
+ SkString tName("t");
+ SkString p0; // start radius
+ SkString p1; // start radius squared
+ SkString p2; // difference in radii (r1 - r0)
+
+
+ p0.appendf("%s.x", uniformHandler->getUniformVariable(fParamUni).getName().c_str());
+ p1.appendf("%s.y", uniformHandler->getUniformVariable(fParamUni).getName().c_str());
+ p2.appendf("%s.z", uniformHandler->getUniformVariable(fParamUni).getName().c_str());
+
+ // We interpolate the linear component in coords[1].
+ SkASSERT(args.fTransformedCoords[0].getType() == args.fTransformedCoords[1].getType());
+ const char* coords2D;
+ SkString bVar;
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ if (kVec3f_GrSLType == args.fTransformedCoords[0].getType()) {
+ fragBuilder->codeAppendf("\tvec3 interpolants = vec3(%s.xy / %s.z, %s.x / %s.z);\n",
+ args.fTransformedCoords[0].c_str(),
+ args.fTransformedCoords[0].c_str(),
+ args.fTransformedCoords[1].c_str(),
+ args.fTransformedCoords[1].c_str());
+ coords2D = "interpolants.xy";
+ bVar = "interpolants.z";
+ } else {
+ coords2D = args.fTransformedCoords[0].c_str();
+ bVar.printf("%s.x", args.fTransformedCoords[1].c_str());
+ }
+
+ // output will default to transparent black (we simply won't write anything
+ // else to it if invalid, instead of discarding or returning prematurely)
+ fragBuilder->codeAppendf("\t%s = vec4(0.0,0.0,0.0,0.0);\n", args.fOutputColor);
+
+ // c = (x^2)+(y^2) - params[1]
+ fragBuilder->codeAppendf("\tfloat %s = dot(%s, %s) - %s;\n",
+ cName.c_str(), coords2D, coords2D, p1.c_str());
+
+ // linear case: t = -c/b
+ fragBuilder->codeAppendf("\tfloat %s = -(%s / %s);\n", tName.c_str(),
+ cName.c_str(), bVar.c_str());
+
+ // if r(t) > 0, then t will be the x coordinate
+ fragBuilder->codeAppendf("\tif (%s * %s + %s > 0.0) {\n", tName.c_str(),
+ p2.c_str(), p0.c_str());
+ fragBuilder->codeAppend("\t");
+ this->emitColor(fragBuilder,
+ uniformHandler,
+ args.fGLSLCaps,
+ ge,
+ tName.c_str(),
+ args.fOutputColor,
+ args.fInputColor,
+ args.fTexSamplers);
+ fragBuilder->codeAppend("\t}\n");
+}
+
+void Edge2PtConicalEffect::GLSLEdge2PtConicalProcessor::onSetData(
+ const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& processor) {
+ INHERITED::onSetData(pdman, processor);
+ const Edge2PtConicalEffect& data = processor.cast<Edge2PtConicalEffect>();
+ SkScalar radius0 = data.radius();
+ SkScalar diffRadius = data.diffRadius();
+
+ if (fCachedRadius != radius0 ||
+ fCachedDiffRadius != diffRadius) {
+
+ pdman.set3f(fParamUni, SkScalarToFloat(radius0),
+ SkScalarToFloat(SkScalarMul(radius0, radius0)), SkScalarToFloat(diffRadius));
+ fCachedRadius = radius0;
+ fCachedDiffRadius = diffRadius;
+ }
+}
+
+void Edge2PtConicalEffect::GLSLEdge2PtConicalProcessor::GenKey(const GrProcessor& processor,
+ const GrGLSLCaps&, GrProcessorKeyBuilder* b) {
+ b->add32(GenBaseGradientKey(processor));
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Focal Conical Gradients
+//////////////////////////////////////////////////////////////////////////////
+
+static ConicalType set_matrix_focal_conical(const SkTwoPointConicalGradient& shader,
+ SkMatrix* invLMatrix, SkScalar* focalX) {
+ // Inverse of the current local matrix is passed in then,
+ // translate, scale, and rotate such that endCircle is unit circle on x-axis,
+ // and focal point is at the origin.
+ ConicalType conicalType;
+ const SkPoint& focal = shader.getStartCenter();
+ const SkPoint& centerEnd = shader.getEndCenter();
+ SkScalar radius = shader.getEndRadius();
+ SkScalar invRadius = 1.f / radius;
+
+ SkMatrix matrix;
+
+ matrix.setTranslate(-centerEnd.fX, -centerEnd.fY);
+ matrix.postScale(invRadius, invRadius);
+
+ SkPoint focalTrans;
+ matrix.mapPoints(&focalTrans, &focal, 1);
+ *focalX = focalTrans.length();
+
+ if (0.f != *focalX) {
+ SkScalar invFocalX = SkScalarInvert(*focalX);
+ SkMatrix rot;
+ rot.setSinCos(-SkScalarMul(invFocalX, focalTrans.fY),
+ SkScalarMul(invFocalX, focalTrans.fX));
+ matrix.postConcat(rot);
+ }
+
+ matrix.postTranslate(-(*focalX), 0.f);
+
+ // If the focal point is touching the edge of the circle it will
+ // cause a degenerate case that must be handled separately
+ // kEdgeErrorTol = 5 * kErrorTol was picked after manual testing the
+ // stability trade off versus the linear approx used in the Edge Shader
+ if (SkScalarAbs(1.f - (*focalX)) < kEdgeErrorTol) {
+ return kEdge_ConicalType;
+ }
+
+ // Scale factor 1 / (1 - focalX * focalX)
+ SkScalar oneMinusF2 = 1.f - SkScalarMul(*focalX, *focalX);
+ SkScalar s = SkScalarInvert(oneMinusF2);
+
+
+ if (s >= 0.f) {
+ conicalType = kInside_ConicalType;
+ matrix.postScale(s, s * SkScalarSqrt(oneMinusF2));
+ } else {
+ conicalType = kOutside_ConicalType;
+ matrix.postScale(s, s);
+ }
+
+ invLMatrix->postConcat(matrix);
+
+ return conicalType;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class FocalOutside2PtConicalEffect : public GrGradientEffect {
+public:
+ class GLSLFocalOutside2PtConicalProcessor;
+
+ static sk_sp<GrFragmentProcessor> Make(const CreateArgs& args, SkScalar focalX) {
+ return sk_sp<GrFragmentProcessor>(
+ new FocalOutside2PtConicalEffect(args, focalX));
+ }
+
+ virtual ~FocalOutside2PtConicalEffect() { }
+
+ const char* name() const override {
+ return "Two-Point Conical Gradient Focal Outside";
+ }
+
+ bool isFlipped() const { return fIsFlipped; }
+ SkScalar focal() const { return fFocalX; }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor& sBase) const override {
+ const FocalOutside2PtConicalEffect& s = sBase.cast<FocalOutside2PtConicalEffect>();
+ return (INHERITED::onIsEqual(sBase) &&
+ this->fFocalX == s.fFocalX &&
+ this->fIsFlipped == s.fIsFlipped);
+ }
+
+ FocalOutside2PtConicalEffect(const CreateArgs& args, SkScalar focalX)
+ : INHERITED(args)
+ , fFocalX(focalX)
+ , fIsFlipped(static_cast<const SkTwoPointConicalGradient*>(args.fShader)->isFlippedGrad()) {
+ this->initClassID<FocalOutside2PtConicalEffect>();
+ }
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ SkScalar fFocalX;
+ bool fIsFlipped;
+
+ typedef GrGradientEffect INHERITED;
+};
+
+class FocalOutside2PtConicalEffect::GLSLFocalOutside2PtConicalProcessor
+ : public GrGradientEffect::GLSLProcessor {
+public:
+ GLSLFocalOutside2PtConicalProcessor(const GrProcessor&);
+ virtual ~GLSLFocalOutside2PtConicalProcessor() { }
+
+ virtual void emitCode(EmitArgs&) override;
+
+ static void GenKey(const GrProcessor&, const GrGLSLCaps& caps, GrProcessorKeyBuilder* b);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+ UniformHandle fParamUni;
+
+ const char* fVSVaryingName;
+ const char* fFSVaryingName;
+
+ bool fIsFlipped;
+
+ // @{
+ /// Values last uploaded as uniforms
+
+ SkScalar fCachedFocal;
+
+ // @}
+
+private:
+ typedef GrGradientEffect::GLSLProcessor INHERITED;
+
+};
+
+void FocalOutside2PtConicalEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ FocalOutside2PtConicalEffect::GLSLFocalOutside2PtConicalProcessor::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* FocalOutside2PtConicalEffect::onCreateGLSLInstance() const {
+ return new FocalOutside2PtConicalEffect::GLSLFocalOutside2PtConicalProcessor(*this);
+}
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(FocalOutside2PtConicalEffect);
+
+/*
+ * All Two point conical gradient test create functions may occasionally create edge case shaders
+ */
+sk_sp<GrFragmentProcessor> FocalOutside2PtConicalEffect::TestCreate(GrProcessorTestData* d) {
+ SkPoint center1 = {d->fRandom->nextUScalar1(), d->fRandom->nextUScalar1()};
+ SkScalar radius1 = 0.f;
+ SkPoint center2;
+ SkScalar radius2;
+ do {
+ center2.set(d->fRandom->nextUScalar1(), d->fRandom->nextUScalar1());
+ // Need to make sure the centers are not the same or else focal point will be inside
+ } while (center1 == center2);
+ SkPoint diff = center2 - center1;
+ SkScalar diffLen = diff.length();
+ // Below makes sure that the focal point is not contained within circle two
+ radius2 = d->fRandom->nextRangeF(0.f, diffLen);
+
+ SkColor colors[kMaxRandomGradientColors];
+ SkScalar stopsArray[kMaxRandomGradientColors];
+ SkScalar* stops = stopsArray;
+ SkShader::TileMode tm;
+ int colorCount = RandomGradientParams(d->fRandom, colors, &stops, &tm);
+ auto shader = SkGradientShader::MakeTwoPointConical(center1, radius1, center2, radius2,
+ colors, stops, colorCount, tm);
+ SkMatrix viewMatrix = GrTest::TestMatrix(d->fRandom);
+ auto dstColorSpace = GrTest::TestColorSpace(d->fRandom);
+ sk_sp<GrFragmentProcessor> fp = shader->asFragmentProcessor(SkShader::AsFPArgs(
+ d->fContext, &viewMatrix, NULL, kNone_SkFilterQuality, dstColorSpace.get(),
+ SkSourceGammaTreatment::kRespect));
+ GrAlwaysAssert(fp);
+ return fp;
+}
+
+FocalOutside2PtConicalEffect::GLSLFocalOutside2PtConicalProcessor
+ ::GLSLFocalOutside2PtConicalProcessor(const GrProcessor& processor)
+ : fVSVaryingName(nullptr)
+ , fFSVaryingName(nullptr)
+ , fCachedFocal(SK_ScalarMax) {
+ const FocalOutside2PtConicalEffect& data = processor.cast<FocalOutside2PtConicalEffect>();
+ fIsFlipped = data.isFlipped();
+}
+
+void FocalOutside2PtConicalEffect::GLSLFocalOutside2PtConicalProcessor::emitCode(EmitArgs& args) {
+ const FocalOutside2PtConicalEffect& ge = args.fFp.cast<FocalOutside2PtConicalEffect>();
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ this->emitUniforms(uniformHandler, ge);
+ fParamUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType, kDefault_GrSLPrecision,
+ "Conical2FSParams");
+ SkString tName("t");
+ SkString p0; // focalX
+ SkString p1; // 1 - focalX * focalX
+
+ p0.appendf("%s.x", uniformHandler->getUniformVariable(fParamUni).getName().c_str());
+ p1.appendf("%s.y", uniformHandler->getUniformVariable(fParamUni).getName().c_str());
+
+ // if we have a vec3 from being in perspective, convert it to a vec2 first
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString coords2DString = fragBuilder->ensureCoords2D(args.fTransformedCoords[0]);
+ const char* coords2D = coords2DString.c_str();
+
+ // t = p.x * focal.x +/- sqrt(p.x^2 + (1 - focal.x^2) * p.y^2)
+
+ // output will default to transparent black (we simply won't write anything
+ // else to it if invalid, instead of discarding or returning prematurely)
+ fragBuilder->codeAppendf("\t%s = vec4(0.0,0.0,0.0,0.0);\n", args.fOutputColor);
+
+ fragBuilder->codeAppendf("\tfloat xs = %s.x * %s.x;\n", coords2D, coords2D);
+ fragBuilder->codeAppendf("\tfloat ys = %s.y * %s.y;\n", coords2D, coords2D);
+ fragBuilder->codeAppendf("\tfloat d = xs + %s * ys;\n", p1.c_str());
+
+ // Must check to see if we flipped the circle order (to make sure start radius < end radius)
+ // If so we must also flip sign on sqrt
+ if (!fIsFlipped) {
+ fragBuilder->codeAppendf("\tfloat %s = %s.x * %s + sqrt(d);\n", tName.c_str(),
+ coords2D, p0.c_str());
+ } else {
+ fragBuilder->codeAppendf("\tfloat %s = %s.x * %s - sqrt(d);\n", tName.c_str(),
+ coords2D, p0.c_str());
+ }
+
+ fragBuilder->codeAppendf("\tif (%s >= 0.0 && d >= 0.0) {\n", tName.c_str());
+ fragBuilder->codeAppend("\t\t");
+ this->emitColor(fragBuilder,
+ uniformHandler,
+ args.fGLSLCaps,
+ ge,
+ tName.c_str(),
+ args.fOutputColor,
+ args.fInputColor,
+ args.fTexSamplers);
+ fragBuilder->codeAppend("\t}\n");
+}
+
+void FocalOutside2PtConicalEffect::GLSLFocalOutside2PtConicalProcessor::onSetData(
+ const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& processor) {
+ INHERITED::onSetData(pdman, processor);
+ const FocalOutside2PtConicalEffect& data = processor.cast<FocalOutside2PtConicalEffect>();
+ SkASSERT(data.isFlipped() == fIsFlipped);
+ SkScalar focal = data.focal();
+
+ if (fCachedFocal != focal) {
+ SkScalar oneMinus2F = 1.f - SkScalarMul(focal, focal);
+
+ pdman.set2f(fParamUni, SkScalarToFloat(focal), SkScalarToFloat(oneMinus2F));
+ fCachedFocal = focal;
+ }
+}
+
+void FocalOutside2PtConicalEffect::GLSLFocalOutside2PtConicalProcessor::GenKey(
+ const GrProcessor& processor,
+ const GrGLSLCaps&, GrProcessorKeyBuilder* b) {
+ uint32_t* key = b->add32n(2);
+ key[0] = GenBaseGradientKey(processor);
+ key[1] = processor.cast<FocalOutside2PtConicalEffect>().isFlipped();
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class FocalInside2PtConicalEffect : public GrGradientEffect {
+public:
+ class GLSLFocalInside2PtConicalProcessor;
+
+ static sk_sp<GrFragmentProcessor> Make(const CreateArgs& args, SkScalar focalX) {
+ return sk_sp<GrFragmentProcessor>(
+ new FocalInside2PtConicalEffect(args, focalX));
+ }
+
+ virtual ~FocalInside2PtConicalEffect() {}
+
+ const char* name() const override {
+ return "Two-Point Conical Gradient Focal Inside";
+ }
+
+ SkScalar focal() const { return fFocalX; }
+
+ typedef FocalInside2PtConicalEffect::GLSLFocalInside2PtConicalProcessor GLSLProcessor;
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor& sBase) const override {
+ const FocalInside2PtConicalEffect& s = sBase.cast<FocalInside2PtConicalEffect>();
+ return (INHERITED::onIsEqual(sBase) &&
+ this->fFocalX == s.fFocalX);
+ }
+
+ FocalInside2PtConicalEffect(const CreateArgs& args, SkScalar focalX)
+ : INHERITED(args), fFocalX(focalX) {
+ this->initClassID<FocalInside2PtConicalEffect>();
+ }
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ SkScalar fFocalX;
+
+ typedef GrGradientEffect INHERITED;
+};
+
+class FocalInside2PtConicalEffect::GLSLFocalInside2PtConicalProcessor
+ : public GrGradientEffect::GLSLProcessor {
+public:
+ GLSLFocalInside2PtConicalProcessor(const GrProcessor&);
+ virtual ~GLSLFocalInside2PtConicalProcessor() {}
+
+ virtual void emitCode(EmitArgs&) override;
+
+ static void GenKey(const GrProcessor&, const GrGLSLCaps& caps, GrProcessorKeyBuilder* b);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+ UniformHandle fFocalUni;
+
+ const char* fVSVaryingName;
+ const char* fFSVaryingName;
+
+ // @{
+ /// Values last uploaded as uniforms
+
+ SkScalar fCachedFocal;
+
+ // @}
+
+private:
+ typedef GrGradientEffect::GLSLProcessor INHERITED;
+
+};
+
+void FocalInside2PtConicalEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ FocalInside2PtConicalEffect::GLSLFocalInside2PtConicalProcessor::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* FocalInside2PtConicalEffect::onCreateGLSLInstance() const {
+ return new FocalInside2PtConicalEffect::GLSLFocalInside2PtConicalProcessor(*this);
+}
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(FocalInside2PtConicalEffect);
+
+/*
+ * All Two point conical gradient test create functions may occasionally create edge case shaders
+ */
+sk_sp<GrFragmentProcessor> FocalInside2PtConicalEffect::TestCreate(GrProcessorTestData* d) {
+ SkPoint center1 = {d->fRandom->nextUScalar1(), d->fRandom->nextUScalar1()};
+ SkScalar radius1 = 0.f;
+ SkPoint center2;
+ SkScalar radius2;
+ do {
+ center2.set(d->fRandom->nextUScalar1(), d->fRandom->nextUScalar1());
+ // Below makes sure radius2 is larger enouch such that the focal point
+ // is inside the end circle
+ SkScalar increase = d->fRandom->nextUScalar1();
+ SkPoint diff = center2 - center1;
+ SkScalar diffLen = diff.length();
+ radius2 = diffLen + increase;
+ // If the circles are identical the factory will give us an empty shader.
+ } while (radius1 == radius2 && center1 == center2);
+
+ SkColor colors[kMaxRandomGradientColors];
+ SkScalar stopsArray[kMaxRandomGradientColors];
+ SkScalar* stops = stopsArray;
+ SkShader::TileMode tm;
+ int colorCount = RandomGradientParams(d->fRandom, colors, &stops, &tm);
+ auto shader = SkGradientShader::MakeTwoPointConical(center1, radius1, center2, radius2,
+ colors, stops, colorCount, tm);
+ SkMatrix viewMatrix = GrTest::TestMatrix(d->fRandom);
+ auto dstColorSpace = GrTest::TestColorSpace(d->fRandom);
+ sk_sp<GrFragmentProcessor> fp = shader->asFragmentProcessor(SkShader::AsFPArgs(
+ d->fContext, &viewMatrix, NULL, kNone_SkFilterQuality, dstColorSpace.get(),
+ SkSourceGammaTreatment::kRespect));
+ GrAlwaysAssert(fp);
+ return fp;
+}
+
+FocalInside2PtConicalEffect::GLSLFocalInside2PtConicalProcessor
+ ::GLSLFocalInside2PtConicalProcessor(const GrProcessor&)
+ : fVSVaryingName(nullptr)
+ , fFSVaryingName(nullptr)
+ , fCachedFocal(SK_ScalarMax) {}
+
+void FocalInside2PtConicalEffect::GLSLFocalInside2PtConicalProcessor::emitCode(EmitArgs& args) {
+ const FocalInside2PtConicalEffect& ge = args.fFp.cast<FocalInside2PtConicalEffect>();
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ this->emitUniforms(uniformHandler, ge);
+ fFocalUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType, kDefault_GrSLPrecision,
+ "Conical2FSParams");
+ SkString tName("t");
+
+ // this is the distance along x-axis from the end center to focal point in
+ // transformed coordinates
+ GrGLSLShaderVar focal = uniformHandler->getUniformVariable(fFocalUni);
+
+ // if we have a vec3 from being in perspective, convert it to a vec2 first
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString coords2DString = fragBuilder->ensureCoords2D(args.fTransformedCoords[0]);
+ const char* coords2D = coords2DString.c_str();
+
+ // t = p.x * focalX + length(p)
+ fragBuilder->codeAppendf("\tfloat %s = %s.x * %s + length(%s);\n", tName.c_str(),
+ coords2D, focal.c_str(), coords2D);
+
+ this->emitColor(fragBuilder,
+ uniformHandler,
+ args.fGLSLCaps,
+ ge,
+ tName.c_str(),
+ args.fOutputColor,
+ args.fInputColor,
+ args.fTexSamplers);
+}
+
+void FocalInside2PtConicalEffect::GLSLFocalInside2PtConicalProcessor::onSetData(
+ const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& processor) {
+ INHERITED::onSetData(pdman, processor);
+ const FocalInside2PtConicalEffect& data = processor.cast<FocalInside2PtConicalEffect>();
+ SkScalar focal = data.focal();
+
+ if (fCachedFocal != focal) {
+ pdman.set1f(fFocalUni, SkScalarToFloat(focal));
+ fCachedFocal = focal;
+ }
+}
+
+void FocalInside2PtConicalEffect::GLSLFocalInside2PtConicalProcessor::GenKey(
+ const GrProcessor& processor,
+ const GrGLSLCaps&, GrProcessorKeyBuilder* b) {
+ b->add32(GenBaseGradientKey(processor));
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Circle Conical Gradients
+//////////////////////////////////////////////////////////////////////////////
+
+struct CircleConicalInfo {
+ SkPoint fCenterEnd;
+ SkScalar fA;
+ SkScalar fB;
+ SkScalar fC;
+};
+
+// Returns focal distance along x-axis in transformed coords
+static ConicalType set_matrix_circle_conical(const SkTwoPointConicalGradient& shader,
+ SkMatrix* invLMatrix, CircleConicalInfo* info) {
+ // Inverse of the current local matrix is passed in then,
+ // translate and scale such that start circle is on the origin and has radius 1
+ const SkPoint& centerStart = shader.getStartCenter();
+ const SkPoint& centerEnd = shader.getEndCenter();
+ SkScalar radiusStart = shader.getStartRadius();
+ SkScalar radiusEnd = shader.getEndRadius();
+
+ SkMatrix matrix;
+
+ matrix.setTranslate(-centerStart.fX, -centerStart.fY);
+
+ SkScalar invStartRad = 1.f / radiusStart;
+ matrix.postScale(invStartRad, invStartRad);
+
+ radiusEnd /= radiusStart;
+
+ SkPoint centerEndTrans;
+ matrix.mapPoints(&centerEndTrans, &centerEnd, 1);
+
+ SkScalar A = centerEndTrans.fX * centerEndTrans.fX + centerEndTrans.fY * centerEndTrans.fY
+ - radiusEnd * radiusEnd + 2 * radiusEnd - 1;
+
+ // Check to see if start circle is inside end circle with edges touching.
+ // If touching we return that it is of kEdge_ConicalType, and leave the matrix setting
+ // to the edge shader. kEdgeErrorTol = 5 * kErrorTol was picked after manual testing
+ // so that C = 1 / A is stable, and the linear approximation used in the Edge shader is
+ // still accurate.
+ if (SkScalarAbs(A) < kEdgeErrorTol) {
+ return kEdge_ConicalType;
+ }
+
+ SkScalar C = 1.f / A;
+ SkScalar B = (radiusEnd - 1.f) * C;
+
+ matrix.postScale(C, C);
+
+ invLMatrix->postConcat(matrix);
+
+ info->fCenterEnd = centerEndTrans;
+ info->fA = A;
+ info->fB = B;
+ info->fC = C;
+
+ // if A ends up being negative, the start circle is contained completely inside the end cirlce
+ if (A < 0.f) {
+ return kInside_ConicalType;
+ }
+ return kOutside_ConicalType;
+}
+
+class CircleInside2PtConicalEffect : public GrGradientEffect {
+public:
+ class GLSLCircleInside2PtConicalProcessor;
+
+ static sk_sp<GrFragmentProcessor> Make(const CreateArgs& args, const CircleConicalInfo& info) {
+ return sk_sp<GrFragmentProcessor>(
+ new CircleInside2PtConicalEffect(args, info));
+ }
+
+ virtual ~CircleInside2PtConicalEffect() {}
+
+ const char* name() const override { return "Two-Point Conical Gradient Inside"; }
+
+ SkScalar centerX() const { return fInfo.fCenterEnd.fX; }
+ SkScalar centerY() const { return fInfo.fCenterEnd.fY; }
+ SkScalar A() const { return fInfo.fA; }
+ SkScalar B() const { return fInfo.fB; }
+ SkScalar C() const { return fInfo.fC; }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ virtual void onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const override;
+
+ bool onIsEqual(const GrFragmentProcessor& sBase) const override {
+ const CircleInside2PtConicalEffect& s = sBase.cast<CircleInside2PtConicalEffect>();
+ return (INHERITED::onIsEqual(sBase) &&
+ this->fInfo.fCenterEnd == s.fInfo.fCenterEnd &&
+ this->fInfo.fA == s.fInfo.fA &&
+ this->fInfo.fB == s.fInfo.fB &&
+ this->fInfo.fC == s.fInfo.fC);
+ }
+
+ CircleInside2PtConicalEffect(const CreateArgs& args, const CircleConicalInfo& info)
+ : INHERITED(args), fInfo(info) {
+ this->initClassID<CircleInside2PtConicalEffect>();
+ }
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ const CircleConicalInfo fInfo;
+
+ typedef GrGradientEffect INHERITED;
+};
+
+class CircleInside2PtConicalEffect::GLSLCircleInside2PtConicalProcessor
+ : public GrGradientEffect::GLSLProcessor {
+public:
+ GLSLCircleInside2PtConicalProcessor(const GrProcessor&);
+ virtual ~GLSLCircleInside2PtConicalProcessor() {}
+
+ virtual void emitCode(EmitArgs&) override;
+
+ static void GenKey(const GrProcessor&, const GrGLSLCaps& caps, GrProcessorKeyBuilder* b);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+ UniformHandle fCenterUni;
+ UniformHandle fParamUni;
+
+ const char* fVSVaryingName;
+ const char* fFSVaryingName;
+
+ // @{
+ /// Values last uploaded as uniforms
+
+ SkScalar fCachedCenterX;
+ SkScalar fCachedCenterY;
+ SkScalar fCachedA;
+ SkScalar fCachedB;
+ SkScalar fCachedC;
+
+ // @}
+
+private:
+ typedef GrGradientEffect::GLSLProcessor INHERITED;
+
+};
+
+void CircleInside2PtConicalEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ CircleInside2PtConicalEffect::GLSLCircleInside2PtConicalProcessor::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* CircleInside2PtConicalEffect::onCreateGLSLInstance() const {
+ return new CircleInside2PtConicalEffect::GLSLCircleInside2PtConicalProcessor(*this);
+}
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(CircleInside2PtConicalEffect);
+
+/*
+ * All Two point conical gradient test create functions may occasionally create edge case shaders
+ */
+sk_sp<GrFragmentProcessor> CircleInside2PtConicalEffect::TestCreate(GrProcessorTestData* d) {
+ SkPoint center1 = {d->fRandom->nextUScalar1(), d->fRandom->nextUScalar1()};
+ SkScalar radius1 = d->fRandom->nextUScalar1() + 0.0001f; // make sure radius1 != 0
+ SkPoint center2;
+ SkScalar radius2;
+ do {
+ center2.set(d->fRandom->nextUScalar1(), d->fRandom->nextUScalar1());
+ // Below makes sure that circle one is contained within circle two
+ SkScalar increase = d->fRandom->nextUScalar1();
+ SkPoint diff = center2 - center1;
+ SkScalar diffLen = diff.length();
+ radius2 = radius1 + diffLen + increase;
+ // If the circles are identical the factory will give us an empty shader.
+ } while (radius1 == radius2 && center1 == center2);
+
+ SkColor colors[kMaxRandomGradientColors];
+ SkScalar stopsArray[kMaxRandomGradientColors];
+ SkScalar* stops = stopsArray;
+ SkShader::TileMode tm;
+ int colorCount = RandomGradientParams(d->fRandom, colors, &stops, &tm);
+ auto shader = SkGradientShader::MakeTwoPointConical(center1, radius1, center2, radius2,
+ colors, stops, colorCount, tm);
+ SkMatrix viewMatrix = GrTest::TestMatrix(d->fRandom);
+ auto dstColorSpace = GrTest::TestColorSpace(d->fRandom);
+ sk_sp<GrFragmentProcessor> fp = shader->asFragmentProcessor(SkShader::AsFPArgs(
+ d->fContext, &viewMatrix, NULL, kNone_SkFilterQuality, dstColorSpace.get(),
+ SkSourceGammaTreatment::kRespect));
+ GrAlwaysAssert(fp);
+ return fp;
+}
+
+CircleInside2PtConicalEffect::GLSLCircleInside2PtConicalProcessor
+ ::GLSLCircleInside2PtConicalProcessor(const GrProcessor& processor)
+ : fVSVaryingName(nullptr)
+ , fFSVaryingName(nullptr)
+ , fCachedCenterX(SK_ScalarMax)
+ , fCachedCenterY(SK_ScalarMax)
+ , fCachedA(SK_ScalarMax)
+ , fCachedB(SK_ScalarMax)
+ , fCachedC(SK_ScalarMax) {}
+
+void CircleInside2PtConicalEffect::GLSLCircleInside2PtConicalProcessor::emitCode(EmitArgs& args) {
+ const CircleInside2PtConicalEffect& ge = args.fFp.cast<CircleInside2PtConicalEffect>();
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ this->emitUniforms(uniformHandler, ge);
+ fCenterUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType, kDefault_GrSLPrecision,
+ "Conical2FSCenter");
+ fParamUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec3f_GrSLType, kDefault_GrSLPrecision,
+ "Conical2FSParams");
+ SkString tName("t");
+
+ GrGLSLShaderVar center = uniformHandler->getUniformVariable(fCenterUni);
+ // params.x = A
+ // params.y = B
+ // params.z = C
+ GrGLSLShaderVar params = uniformHandler->getUniformVariable(fParamUni);
+
+ // if we have a vec3 from being in perspective, convert it to a vec2 first
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString coords2DString = fragBuilder->ensureCoords2D(args.fTransformedCoords[0]);
+ const char* coords2D = coords2DString.c_str();
+
+ // p = coords2D
+ // e = center end
+ // r = radius end
+ // A = dot(e, e) - r^2 + 2 * r - 1
+ // B = (r -1) / A
+ // C = 1 / A
+ // d = dot(e, p) + B
+ // t = d +/- sqrt(d^2 - A * dot(p, p) + C)
+ fragBuilder->codeAppendf("\tfloat pDotp = dot(%s, %s);\n", coords2D, coords2D);
+ fragBuilder->codeAppendf("\tfloat d = dot(%s, %s) + %s.y;\n", coords2D, center.c_str(),
+ params.c_str());
+ fragBuilder->codeAppendf("\tfloat %s = d + sqrt(d * d - %s.x * pDotp + %s.z);\n",
+ tName.c_str(), params.c_str(), params.c_str());
+
+ this->emitColor(fragBuilder,
+ uniformHandler,
+ args.fGLSLCaps,
+ ge,
+ tName.c_str(),
+ args.fOutputColor,
+ args.fInputColor,
+ args.fTexSamplers);
+}
+
+void CircleInside2PtConicalEffect::GLSLCircleInside2PtConicalProcessor::onSetData(
+ const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& processor) {
+ INHERITED::onSetData(pdman, processor);
+ const CircleInside2PtConicalEffect& data = processor.cast<CircleInside2PtConicalEffect>();
+ SkScalar centerX = data.centerX();
+ SkScalar centerY = data.centerY();
+ SkScalar A = data.A();
+ SkScalar B = data.B();
+ SkScalar C = data.C();
+
+ if (fCachedCenterX != centerX || fCachedCenterY != centerY ||
+ fCachedA != A || fCachedB != B || fCachedC != C) {
+
+ pdman.set2f(fCenterUni, SkScalarToFloat(centerX), SkScalarToFloat(centerY));
+ pdman.set3f(fParamUni, SkScalarToFloat(A), SkScalarToFloat(B), SkScalarToFloat(C));
+
+ fCachedCenterX = centerX;
+ fCachedCenterY = centerY;
+ fCachedA = A;
+ fCachedB = B;
+ fCachedC = C;
+ }
+}
+
+void CircleInside2PtConicalEffect::GLSLCircleInside2PtConicalProcessor::GenKey(
+ const GrProcessor& processor,
+ const GrGLSLCaps&, GrProcessorKeyBuilder* b) {
+ b->add32(GenBaseGradientKey(processor));
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class CircleOutside2PtConicalEffect : public GrGradientEffect {
+public:
+ class GLSLCircleOutside2PtConicalProcessor;
+
+ static sk_sp<GrFragmentProcessor> Make(const CreateArgs& args, const CircleConicalInfo& info) {
+ return sk_sp<GrFragmentProcessor>(
+ new CircleOutside2PtConicalEffect(args, info));
+ }
+
+ virtual ~CircleOutside2PtConicalEffect() {}
+
+ const char* name() const override { return "Two-Point Conical Gradient Outside"; }
+
+ SkScalar centerX() const { return fInfo.fCenterEnd.fX; }
+ SkScalar centerY() const { return fInfo.fCenterEnd.fY; }
+ SkScalar A() const { return fInfo.fA; }
+ SkScalar B() const { return fInfo.fB; }
+ SkScalar C() const { return fInfo.fC; }
+ SkScalar tLimit() const { return fTLimit; }
+ bool isFlipped() const { return fIsFlipped; }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor& sBase) const override {
+ const CircleOutside2PtConicalEffect& s = sBase.cast<CircleOutside2PtConicalEffect>();
+ return (INHERITED::onIsEqual(sBase) &&
+ this->fInfo.fCenterEnd == s.fInfo.fCenterEnd &&
+ this->fInfo.fA == s.fInfo.fA &&
+ this->fInfo.fB == s.fInfo.fB &&
+ this->fInfo.fC == s.fInfo.fC &&
+ this->fTLimit == s.fTLimit &&
+ this->fIsFlipped == s.fIsFlipped);
+ }
+
+ CircleOutside2PtConicalEffect(const CreateArgs& args, const CircleConicalInfo& info)
+ : INHERITED(args), fInfo(info) {
+ this->initClassID<CircleOutside2PtConicalEffect>();
+ const SkTwoPointConicalGradient& shader =
+ *static_cast<const SkTwoPointConicalGradient*>(args.fShader);
+ if (shader.getStartRadius() != shader.getEndRadius()) {
+ fTLimit = shader.getStartRadius() / (shader.getStartRadius() - shader.getEndRadius());
+ } else {
+ fTLimit = SK_ScalarMin;
+ }
+
+ fIsFlipped = shader.isFlippedGrad();
+ }
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ const CircleConicalInfo fInfo;
+ SkScalar fTLimit;
+ bool fIsFlipped;
+
+ typedef GrGradientEffect INHERITED;
+};
+
+class CircleOutside2PtConicalEffect::GLSLCircleOutside2PtConicalProcessor
+ : public GrGradientEffect::GLSLProcessor {
+public:
+ GLSLCircleOutside2PtConicalProcessor(const GrProcessor&);
+ virtual ~GLSLCircleOutside2PtConicalProcessor() {}
+
+ virtual void emitCode(EmitArgs&) override;
+
+ static void GenKey(const GrProcessor&, const GrGLSLCaps& caps, GrProcessorKeyBuilder* b);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+ UniformHandle fCenterUni;
+ UniformHandle fParamUni;
+
+ const char* fVSVaryingName;
+ const char* fFSVaryingName;
+
+ bool fIsFlipped;
+
+ // @{
+ /// Values last uploaded as uniforms
+
+ SkScalar fCachedCenterX;
+ SkScalar fCachedCenterY;
+ SkScalar fCachedA;
+ SkScalar fCachedB;
+ SkScalar fCachedC;
+ SkScalar fCachedTLimit;
+
+ // @}
+
+private:
+ typedef GrGradientEffect::GLSLProcessor INHERITED;
+
+};
+
+void CircleOutside2PtConicalEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ CircleOutside2PtConicalEffect::GLSLCircleOutside2PtConicalProcessor::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* CircleOutside2PtConicalEffect::onCreateGLSLInstance() const {
+ return new CircleOutside2PtConicalEffect::GLSLCircleOutside2PtConicalProcessor(*this);
+}
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(CircleOutside2PtConicalEffect);
+
+/*
+ * All Two point conical gradient test create functions may occasionally create edge case shaders
+ */
+sk_sp<GrFragmentProcessor> CircleOutside2PtConicalEffect::TestCreate(GrProcessorTestData* d) {
+ SkPoint center1 = {d->fRandom->nextUScalar1(), d->fRandom->nextUScalar1()};
+ SkScalar radius1 = d->fRandom->nextUScalar1() + 0.0001f; // make sure radius1 != 0
+ SkPoint center2;
+ SkScalar radius2;
+ SkScalar diffLen;
+ do {
+ center2.set(d->fRandom->nextUScalar1(), d->fRandom->nextUScalar1());
+ // If the circles share a center than we can't be in the outside case
+ } while (center1 == center2);
+ SkPoint diff = center2 - center1;
+ diffLen = diff.length();
+ // Below makes sure that circle one is not contained within circle two
+ // and have radius2 >= radius to match sorting on cpu side
+ radius2 = radius1 + d->fRandom->nextRangeF(0.f, diffLen);
+
+ SkColor colors[kMaxRandomGradientColors];
+ SkScalar stopsArray[kMaxRandomGradientColors];
+ SkScalar* stops = stopsArray;
+ SkShader::TileMode tm;
+ int colorCount = RandomGradientParams(d->fRandom, colors, &stops, &tm);
+ auto shader = SkGradientShader::MakeTwoPointConical(center1, radius1, center2, radius2,
+ colors, stops, colorCount, tm);
+ SkMatrix viewMatrix = GrTest::TestMatrix(d->fRandom);
+ auto dstColorSpace = GrTest::TestColorSpace(d->fRandom);
+ sk_sp<GrFragmentProcessor> fp = shader->asFragmentProcessor(SkShader::AsFPArgs(
+ d->fContext, &viewMatrix, NULL, kNone_SkFilterQuality, dstColorSpace.get(),
+ SkSourceGammaTreatment::kRespect));
+ GrAlwaysAssert(fp);
+ return fp;
+}
+
+CircleOutside2PtConicalEffect::GLSLCircleOutside2PtConicalProcessor
+ ::GLSLCircleOutside2PtConicalProcessor(const GrProcessor& processor)
+ : fVSVaryingName(nullptr)
+ , fFSVaryingName(nullptr)
+ , fCachedCenterX(SK_ScalarMax)
+ , fCachedCenterY(SK_ScalarMax)
+ , fCachedA(SK_ScalarMax)
+ , fCachedB(SK_ScalarMax)
+ , fCachedC(SK_ScalarMax)
+ , fCachedTLimit(SK_ScalarMax) {
+ const CircleOutside2PtConicalEffect& data = processor.cast<CircleOutside2PtConicalEffect>();
+ fIsFlipped = data.isFlipped();
+ }
+
+void CircleOutside2PtConicalEffect::GLSLCircleOutside2PtConicalProcessor::emitCode(EmitArgs& args) {
+ const CircleOutside2PtConicalEffect& ge = args.fFp.cast<CircleOutside2PtConicalEffect>();
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ this->emitUniforms(uniformHandler, ge);
+ fCenterUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType, kDefault_GrSLPrecision,
+ "Conical2FSCenter");
+ fParamUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType, kDefault_GrSLPrecision,
+ "Conical2FSParams");
+ SkString tName("t");
+
+ GrGLSLShaderVar center = uniformHandler->getUniformVariable(fCenterUni);
+ // params.x = A
+ // params.y = B
+ // params.z = C
+ GrGLSLShaderVar params = uniformHandler->getUniformVariable(fParamUni);
+
+ // if we have a vec3 from being in perspective, convert it to a vec2 first
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString coords2DString = fragBuilder->ensureCoords2D(args.fTransformedCoords[0]);
+ const char* coords2D = coords2DString.c_str();
+
+ // output will default to transparent black (we simply won't write anything
+ // else to it if invalid, instead of discarding or returning prematurely)
+ fragBuilder->codeAppendf("\t%s = vec4(0.0,0.0,0.0,0.0);\n", args.fOutputColor);
+
+ // p = coords2D
+ // e = center end
+ // r = radius end
+ // A = dot(e, e) - r^2 + 2 * r - 1
+ // B = (r -1) / A
+ // C = 1 / A
+ // d = dot(e, p) + B
+ // t = d +/- sqrt(d^2 - A * dot(p, p) + C)
+
+ fragBuilder->codeAppendf("\tfloat pDotp = dot(%s, %s);\n", coords2D, coords2D);
+ fragBuilder->codeAppendf("\tfloat d = dot(%s, %s) + %s.y;\n", coords2D, center.c_str(),
+ params.c_str());
+ fragBuilder->codeAppendf("\tfloat deter = d * d - %s.x * pDotp + %s.z;\n", params.c_str(),
+ params.c_str());
+
+ // Must check to see if we flipped the circle order (to make sure start radius < end radius)
+ // If so we must also flip sign on sqrt
+ if (!fIsFlipped) {
+ fragBuilder->codeAppendf("\tfloat %s = d + sqrt(deter);\n", tName.c_str());
+ } else {
+ fragBuilder->codeAppendf("\tfloat %s = d - sqrt(deter);\n", tName.c_str());
+ }
+
+ fragBuilder->codeAppendf("\tif (%s >= %s.w && deter >= 0.0) {\n",
+ tName.c_str(), params.c_str());
+ fragBuilder->codeAppend("\t\t");
+ this->emitColor(fragBuilder,
+ uniformHandler,
+ args.fGLSLCaps,
+ ge,
+ tName.c_str(),
+ args.fOutputColor,
+ args.fInputColor,
+ args.fTexSamplers);
+ fragBuilder->codeAppend("\t}\n");
+}
+
+void CircleOutside2PtConicalEffect::GLSLCircleOutside2PtConicalProcessor::onSetData(
+ const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& processor) {
+ INHERITED::onSetData(pdman, processor);
+ const CircleOutside2PtConicalEffect& data = processor.cast<CircleOutside2PtConicalEffect>();
+ SkASSERT(data.isFlipped() == fIsFlipped);
+ SkScalar centerX = data.centerX();
+ SkScalar centerY = data.centerY();
+ SkScalar A = data.A();
+ SkScalar B = data.B();
+ SkScalar C = data.C();
+ SkScalar tLimit = data.tLimit();
+
+ if (fCachedCenterX != centerX || fCachedCenterY != centerY ||
+ fCachedA != A || fCachedB != B || fCachedC != C || fCachedTLimit != tLimit) {
+
+ pdman.set2f(fCenterUni, SkScalarToFloat(centerX), SkScalarToFloat(centerY));
+ pdman.set4f(fParamUni, SkScalarToFloat(A), SkScalarToFloat(B), SkScalarToFloat(C),
+ SkScalarToFloat(tLimit));
+
+ fCachedCenterX = centerX;
+ fCachedCenterY = centerY;
+ fCachedA = A;
+ fCachedB = B;
+ fCachedC = C;
+ fCachedTLimit = tLimit;
+ }
+}
+
+void CircleOutside2PtConicalEffect::GLSLCircleOutside2PtConicalProcessor::GenKey(
+ const GrProcessor& processor,
+ const GrGLSLCaps&, GrProcessorKeyBuilder* b) {
+ uint32_t* key = b->add32n(2);
+ key[0] = GenBaseGradientKey(processor);
+ key[1] = processor.cast<CircleOutside2PtConicalEffect>().isFlipped();
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> Gr2PtConicalGradientEffect::Make(
+ const GrGradientEffect::CreateArgs& args) {
+ const SkTwoPointConicalGradient& shader =
+ *static_cast<const SkTwoPointConicalGradient*>(args.fShader);
+
+ SkMatrix matrix;
+ if (!shader.getLocalMatrix().invert(&matrix)) {
+ return nullptr;
+ }
+ if (args.fMatrix) {
+ SkMatrix inv;
+ if (!args.fMatrix->invert(&inv)) {
+ return nullptr;
+ }
+ matrix.postConcat(inv);
+ }
+
+ GrGradientEffect::CreateArgs newArgs(args.fContext, args.fShader, &matrix, args.fTileMode,
+ std::move(args.fColorSpaceXform), args.fGammaCorrect);
+
+ if (shader.getStartRadius() < kErrorTol) {
+ SkScalar focalX;
+ ConicalType type = set_matrix_focal_conical(shader, &matrix, &focalX);
+ if (type == kInside_ConicalType) {
+ return FocalInside2PtConicalEffect::Make(newArgs, focalX);
+ } else if(type == kEdge_ConicalType) {
+ set_matrix_edge_conical(shader, &matrix);
+ return Edge2PtConicalEffect::Make(newArgs);
+ } else {
+ return FocalOutside2PtConicalEffect::Make(newArgs, focalX);
+ }
+ }
+
+ CircleConicalInfo info;
+ ConicalType type = set_matrix_circle_conical(shader, &matrix, &info);
+
+ if (type == kInside_ConicalType) {
+ return CircleInside2PtConicalEffect::Make(newArgs, info);
+ } else if (type == kEdge_ConicalType) {
+ set_matrix_edge_conical(shader, &matrix);
+ return Edge2PtConicalEffect::Make(newArgs);
+ } else {
+ return CircleOutside2PtConicalEffect::Make(newArgs, info);
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/effects/gradients/SkTwoPointConicalGradient_gpu.h b/gfx/skia/skia/src/effects/gradients/SkTwoPointConicalGradient_gpu.h
new file mode 100644
index 000000000..46edb1f7d
--- /dev/null
+++ b/gfx/skia/skia/src/effects/gradients/SkTwoPointConicalGradient_gpu.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTwoPointConicalGradient_gpu_DEFINED
+#define SkTwoPointConicalGradient_gpu_DEFINED
+
+#include "SkGradientShaderPriv.h"
+
+class GrProcessor;
+class SkTwoPointConicalGradient;
+
+namespace Gr2PtConicalGradientEffect {
+ /**
+ * Creates an effect that produces a two point conical gradient based on the
+ * shader passed in.
+ */
+ sk_sp<GrFragmentProcessor> Make(const GrGradientEffect::CreateArgs& args);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/fonts/SkFontMgr_indirect.cpp b/gfx/skia/skia/src/fonts/SkFontMgr_indirect.cpp
new file mode 100644
index 000000000..070c0aa40
--- /dev/null
+++ b/gfx/skia/skia/src/fonts/SkFontMgr_indirect.cpp
@@ -0,0 +1,201 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkDataTable.h"
+#include "SkFontMgr.h"
+#include "SkFontMgr_indirect.h"
+#include "SkFontStyle.h"
+#include "SkMutex.h"
+#include "SkOnce.h"
+#include "SkRefCnt.h"
+#include "SkRemotableFontMgr.h"
+#include "SkStream.h"
+#include "SkString.h"
+#include "SkTArray.h"
+#include "SkTypeface.h"
+#include "SkTypes.h"
+#include "SkTemplates.h"
+
+class SkData;
+
+class SkStyleSet_Indirect : public SkFontStyleSet {
+public:
+ /** Takes ownership of the SkRemotableFontIdentitySet. */
+ SkStyleSet_Indirect(const SkFontMgr_Indirect* owner, int familyIndex,
+ SkRemotableFontIdentitySet* data)
+ : fOwner(SkRef(owner)), fFamilyIndex(familyIndex), fData(data)
+ { }
+
+ int count() override { return fData->count(); }
+
+ void getStyle(int index, SkFontStyle* fs, SkString* style) override {
+ if (fs) {
+ *fs = fData->at(index).fFontStyle;
+ }
+ if (style) {
+ // TODO: is this useful? Current locale?
+ style->reset();
+ }
+ }
+
+ SkTypeface* createTypeface(int index) override {
+ return fOwner->createTypefaceFromFontId(fData->at(index));
+ }
+
+ SkTypeface* matchStyle(const SkFontStyle& pattern) override {
+ if (fFamilyIndex >= 0) {
+ SkFontIdentity id = fOwner->fProxy->matchIndexStyle(fFamilyIndex, pattern);
+ return fOwner->createTypefaceFromFontId(id);
+ }
+
+ return this->matchStyleCSS3(pattern);
+ }
+private:
+ SkAutoTUnref<const SkFontMgr_Indirect> fOwner;
+ int fFamilyIndex;
+ SkAutoTUnref<SkRemotableFontIdentitySet> fData;
+};
+
+void SkFontMgr_Indirect::set_up_family_names(const SkFontMgr_Indirect* self) {
+ self->fFamilyNames = self->fProxy->getFamilyNames();
+}
+
+int SkFontMgr_Indirect::onCountFamilies() const {
+ fFamilyNamesInitOnce(SkFontMgr_Indirect::set_up_family_names, this);
+ return fFamilyNames->count();
+}
+
+void SkFontMgr_Indirect::onGetFamilyName(int index, SkString* familyName) const {
+ fFamilyNamesInitOnce(SkFontMgr_Indirect::set_up_family_names, this);
+ if (index >= fFamilyNames->count()) {
+ familyName->reset();
+ return;
+ }
+ familyName->set(fFamilyNames->atStr(index));
+}
+
+SkFontStyleSet* SkFontMgr_Indirect::onCreateStyleSet(int index) const {
+ SkRemotableFontIdentitySet* set = fProxy->getIndex(index);
+ if (nullptr == set) {
+ return nullptr;
+ }
+ return new SkStyleSet_Indirect(this, index, set);
+}
+
+SkFontStyleSet* SkFontMgr_Indirect::onMatchFamily(const char familyName[]) const {
+ return new SkStyleSet_Indirect(this, -1, fProxy->matchName(familyName));
+}
+
+SkTypeface* SkFontMgr_Indirect::createTypefaceFromFontId(const SkFontIdentity& id) const {
+ if (id.fDataId == SkFontIdentity::kInvalidDataId) {
+ return nullptr;
+ }
+
+ SkAutoMutexAcquire ama(fDataCacheMutex);
+
+ SkAutoTUnref<SkTypeface> dataTypeface;
+ int dataTypefaceIndex = 0;
+ for (int i = 0; i < fDataCache.count(); ++i) {
+ const DataEntry& entry = fDataCache[i];
+ if (entry.fDataId == id.fDataId) {
+ if (entry.fTtcIndex == id.fTtcIndex &&
+ !entry.fTypeface->weak_expired() && entry.fTypeface->try_ref())
+ {
+ return entry.fTypeface;
+ }
+ if (dataTypeface.get() == nullptr &&
+ !entry.fTypeface->weak_expired() && entry.fTypeface->try_ref())
+ {
+ dataTypeface.reset(entry.fTypeface);
+ dataTypefaceIndex = entry.fTtcIndex;
+ }
+ }
+
+ if (entry.fTypeface->weak_expired()) {
+ fDataCache.removeShuffle(i);
+ --i;
+ }
+ }
+
+ // No exact match, but did find a data match.
+ if (dataTypeface.get() != nullptr) {
+ SkAutoTDelete<SkStreamAsset> stream(dataTypeface->openStream(nullptr));
+ if (stream.get() != nullptr) {
+ return fImpl->createFromStream(stream.release(), dataTypefaceIndex);
+ }
+ }
+
+ // No data match, request data and add entry.
+ SkAutoTDelete<SkStreamAsset> stream(fProxy->getData(id.fDataId));
+ if (stream.get() == nullptr) {
+ return nullptr;
+ }
+
+ SkAutoTUnref<SkTypeface> typeface(fImpl->createFromStream(stream.release(), id.fTtcIndex));
+ if (typeface.get() == nullptr) {
+ return nullptr;
+ }
+
+ DataEntry& newEntry = fDataCache.push_back();
+ typeface->weak_ref();
+ newEntry.fDataId = id.fDataId;
+ newEntry.fTtcIndex = id.fTtcIndex;
+ newEntry.fTypeface = typeface.get(); // weak reference passed to new entry.
+
+ return typeface.release();
+}
+
+SkTypeface* SkFontMgr_Indirect::onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontStyle) const {
+ SkFontIdentity id = fProxy->matchNameStyle(familyName, fontStyle);
+ return this->createTypefaceFromFontId(id);
+}
+
+SkTypeface* SkFontMgr_Indirect::onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[],
+ int bcp47Count,
+ SkUnichar character) const {
+ SkFontIdentity id = fProxy->matchNameStyleCharacter(familyName, style, bcp47,
+ bcp47Count, character);
+ return this->createTypefaceFromFontId(id);
+}
+
+SkTypeface* SkFontMgr_Indirect::onMatchFaceStyle(const SkTypeface* familyMember,
+ const SkFontStyle& fontStyle) const {
+ SkString familyName;
+ familyMember->getFamilyName(&familyName);
+ return this->matchFamilyStyle(familyName.c_str(), fontStyle);
+}
+
+SkTypeface* SkFontMgr_Indirect::onCreateFromStream(SkStreamAsset* stream, int ttcIndex) const {
+ return fImpl->createFromStream(stream, ttcIndex);
+}
+
+SkTypeface* SkFontMgr_Indirect::onCreateFromFile(const char path[], int ttcIndex) const {
+ return fImpl->createFromFile(path, ttcIndex);
+}
+
+SkTypeface* SkFontMgr_Indirect::onCreateFromData(SkData* data, int ttcIndex) const {
+ return fImpl->createFromData(data, ttcIndex);
+}
+
+SkTypeface* SkFontMgr_Indirect::onLegacyCreateTypeface(const char familyName[],
+ SkFontStyle style) const {
+ SkAutoTUnref<SkTypeface> face(this->matchFamilyStyle(familyName, style));
+
+ if (nullptr == face.get()) {
+ face.reset(this->matchFamilyStyle(nullptr, style));
+ }
+
+ if (nullptr == face.get()) {
+ SkFontIdentity fontId = this->fProxy->matchIndexStyle(0, style);
+ face.reset(this->createTypefaceFromFontId(fontId));
+ }
+
+ return face.release();
+}
diff --git a/gfx/skia/skia/src/fonts/SkGScalerContext.cpp b/gfx/skia/skia/src/fonts/SkGScalerContext.cpp
new file mode 100644
index 000000000..5a439b7eb
--- /dev/null
+++ b/gfx/skia/skia/src/fonts/SkGScalerContext.cpp
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkDescriptor.h"
+#include "SkGScalerContext.h"
+#include "SkGlyph.h"
+#include "SkPath.h"
+#include "SkCanvas.h"
+
+#define STD_SIZE 1
+
+class SkGScalerContext : public SkScalerContext {
+public:
+ SkGScalerContext(SkGTypeface* face, const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : SkScalerContext(face, effects, desc)
+ , fFace(face)
+ {
+
+ size_t descSize = SkDescriptor::ComputeOverhead(1) + sizeof(SkScalerContext::Rec);
+ SkAutoDescriptor ad(descSize);
+ SkDescriptor* newDesc = ad.getDesc();
+
+ newDesc->init();
+ void* entry = newDesc->addEntry(kRec_SkDescriptorTag,
+ sizeof(SkScalerContext::Rec), &fRec);
+ {
+ SkScalerContext::Rec* rec = (SkScalerContext::Rec*)entry;
+ rec->fTextSize = STD_SIZE;
+ rec->fPreScaleX = SK_Scalar1;
+ rec->fPreSkewX = 0;
+ rec->fPost2x2[0][0] = rec->fPost2x2[1][1] = SK_Scalar1;
+ rec->fPost2x2[1][0] = rec->fPost2x2[0][1] = 0;
+ }
+ SkASSERT(descSize == newDesc->getLength());
+ newDesc->computeChecksum();
+
+ fProxy = face->proxy()->createScalerContext(effects, newDesc);
+
+ fRec.getSingleMatrix(&fMatrix);
+ fMatrix.preScale(SK_Scalar1 / STD_SIZE, SK_Scalar1 / STD_SIZE);
+ }
+ virtual ~SkGScalerContext() { delete fProxy; }
+
+protected:
+ unsigned generateGlyphCount() override;
+ uint16_t generateCharToGlyph(SkUnichar) override;
+ void generateAdvance(SkGlyph*) override;
+ void generateMetrics(SkGlyph*) override;
+ void generateImage(const SkGlyph&) override;
+ void generatePath(const SkGlyph&, SkPath*) override;
+ void generateFontMetrics(SkPaint::FontMetrics*) override;
+
+private:
+ SkGTypeface* fFace;
+ SkScalerContext* fProxy;
+ SkMatrix fMatrix;
+};
+
+unsigned SkGScalerContext::generateGlyphCount() {
+ return fProxy->getGlyphCount();
+}
+
+uint16_t SkGScalerContext::generateCharToGlyph(SkUnichar uni) {
+ return fProxy->charToGlyphID(uni);
+}
+
+void SkGScalerContext::generateAdvance(SkGlyph* glyph) {
+ fProxy->getAdvance(glyph);
+
+ SkVector advance;
+ fMatrix.mapXY(SkFloatToScalar(glyph->fAdvanceX),
+ SkFloatToScalar(glyph->fAdvanceY), &advance);
+ glyph->fAdvanceX = SkScalarToFloat(advance.fX);
+ glyph->fAdvanceY = SkScalarToFloat(advance.fY);
+}
+
+void SkGScalerContext::generateMetrics(SkGlyph* glyph) {
+ fProxy->getMetrics(glyph);
+
+ SkVector advance;
+ fMatrix.mapXY(SkFloatToScalar(glyph->fAdvanceX),
+ SkFloatToScalar(glyph->fAdvanceY), &advance);
+ glyph->fAdvanceX = SkScalarToFloat(advance.fX);
+ glyph->fAdvanceY = SkScalarToFloat(advance.fY);
+
+ SkPath path;
+ fProxy->getPath(*glyph, &path);
+ path.transform(fMatrix);
+
+ SkRect storage;
+ const SkPaint& paint = fFace->paint();
+ const SkRect& newBounds = paint.doComputeFastBounds(path.getBounds(),
+ &storage,
+ SkPaint::kFill_Style);
+ SkIRect ibounds;
+ newBounds.roundOut(&ibounds);
+ glyph->fLeft = ibounds.fLeft;
+ glyph->fTop = ibounds.fTop;
+ glyph->fWidth = ibounds.width();
+ glyph->fHeight = ibounds.height();
+ glyph->fMaskFormat = SkMask::kARGB32_Format;
+}
+
+void SkGScalerContext::generateImage(const SkGlyph& glyph) {
+ if (SkMask::kARGB32_Format == glyph.fMaskFormat) {
+ SkPath path;
+ fProxy->getPath(glyph, &path);
+
+ SkBitmap bm;
+ bm.installPixels(SkImageInfo::MakeN32Premul(glyph.fWidth, glyph.fHeight),
+ glyph.fImage, glyph.rowBytes());
+ bm.eraseColor(0);
+
+ SkCanvas canvas(bm);
+ canvas.translate(-SkIntToScalar(glyph.fLeft),
+ -SkIntToScalar(glyph.fTop));
+ canvas.concat(fMatrix);
+ canvas.drawPath(path, fFace->paint());
+ } else {
+ fProxy->getImage(glyph);
+ }
+}
+
+void SkGScalerContext::generatePath(const SkGlyph& glyph, SkPath* path) {
+ fProxy->getPath(glyph, path);
+ path->transform(fMatrix);
+}
+
+void SkGScalerContext::generateFontMetrics(SkPaint::FontMetrics* metrics) {
+ fProxy->getFontMetrics(metrics);
+ if (metrics) {
+ SkScalar scale = fMatrix.getScaleY();
+ metrics->fTop = SkScalarMul(metrics->fTop, scale);
+ metrics->fAscent = SkScalarMul(metrics->fAscent, scale);
+ metrics->fDescent = SkScalarMul(metrics->fDescent, scale);
+ metrics->fBottom = SkScalarMul(metrics->fBottom, scale);
+ metrics->fLeading = SkScalarMul(metrics->fLeading, scale);
+ metrics->fAvgCharWidth = SkScalarMul(metrics->fAvgCharWidth, scale);
+ metrics->fXMin = SkScalarMul(metrics->fXMin, scale);
+ metrics->fXMax = SkScalarMul(metrics->fXMax, scale);
+ metrics->fXHeight = SkScalarMul(metrics->fXHeight, scale);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkTypefaceCache.h"
+
+SkGTypeface::SkGTypeface(sk_sp<SkTypeface> proxy, const SkPaint& paint)
+ : SkTypeface(proxy->fontStyle(), false)
+ , fProxy(std::move(proxy))
+ , fPaint(paint)
+{}
+
+SkScalerContext* SkGTypeface::onCreateScalerContext(const SkScalerContextEffects& effects,
+ const SkDescriptor* desc) const {
+ return new SkGScalerContext(const_cast<SkGTypeface*>(this), effects, desc);
+}
+
+void SkGTypeface::onFilterRec(SkScalerContextRec* rec) const {
+ fProxy->filterRec(rec);
+ rec->setHinting(SkPaint::kNo_Hinting);
+ rec->fMaskFormat = SkMask::kARGB32_Format;
+}
+
+SkAdvancedTypefaceMetrics* SkGTypeface::onGetAdvancedTypefaceMetrics(
+ PerGlyphInfo info,
+ const uint32_t* glyphIDs,
+ uint32_t glyphIDsCount) const {
+ return fProxy->getAdvancedTypefaceMetrics(info, glyphIDs, glyphIDsCount);
+}
+
+SkStreamAsset* SkGTypeface::onOpenStream(int* ttcIndex) const {
+ return fProxy->openStream(ttcIndex);
+}
+
+void SkGTypeface::onGetFontDescriptor(SkFontDescriptor* desc,
+ bool* isLocal) const {
+ fProxy->getFontDescriptor(desc, isLocal);
+}
+
+int SkGTypeface::onCharsToGlyphs(const void* chars, Encoding encoding,
+ uint16_t glyphs[], int glyphCount) const {
+ return fProxy->charsToGlyphs(chars, encoding, glyphs, glyphCount);
+}
+
+int SkGTypeface::onCountGlyphs() const {
+ return fProxy->countGlyphs();
+}
+
+int SkGTypeface::onGetUPEM() const {
+ return fProxy->getUnitsPerEm();
+}
+
+void SkGTypeface::onGetFamilyName(SkString* familyName) const {
+ fProxy->getFamilyName(familyName);
+}
+
+SkTypeface::LocalizedStrings* SkGTypeface::onCreateFamilyNameIterator() const {
+ return fProxy->createFamilyNameIterator();
+}
+
+int SkGTypeface::onGetTableTags(SkFontTableTag tags[]) const {
+ return fProxy->getTableTags(tags);
+}
+
+size_t SkGTypeface::onGetTableData(SkFontTableTag tag, size_t offset,
+ size_t length, void* data) const {
+ return fProxy->getTableData(tag, offset, length, data);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if 0
+// under construction -- defining a font purely in terms of skia primitives
+// ala an SVG-font.
+class SkGFont : public SkRefCnt {
+public:
+ virtual ~SkGFont();
+
+ int unicharToGlyph(SkUnichar) const;
+
+ int countGlyphs() const { return fCount; }
+
+ float getAdvance(int index) const {
+ SkASSERT((unsigned)index < (unsigned)fCount);
+ return fGlyphs[index].fAdvance;
+ }
+
+ const SkPath& getPath(int index) const {
+ SkASSERT((unsigned)index < (unsigned)fCount);
+ return fGlyphs[index].fPath;
+ }
+
+private:
+ struct Glyph {
+ SkUnichar fUni;
+ float fAdvance;
+ SkPath fPath;
+ };
+ int fCount;
+ Glyph* fGlyphs;
+
+ friend class SkGFontBuilder;
+ SkGFont(int count, Glyph* array);
+};
+
+class SkGFontBuilder {
+public:
+
+};
+#endif
diff --git a/gfx/skia/skia/src/fonts/SkGScalerContext.h b/gfx/skia/skia/src/fonts/SkGScalerContext.h
new file mode 100644
index 000000000..3eb25a81d
--- /dev/null
+++ b/gfx/skia/skia/src/fonts/SkGScalerContext.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGScalerContext_DEFINED
+#define SkGScalerContext_DEFINED
+
+#include "SkScalerContext.h"
+#include "SkTypeface.h"
+
+class SkGTypeface : public SkTypeface {
+public:
+ SkGTypeface(sk_sp<SkTypeface> proxy, const SkPaint&);
+
+ SkTypeface* proxy() const { return fProxy.get(); }
+ const SkPaint& paint() const { return fPaint; }
+
+protected:
+ SkScalerContext* onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*) const override;
+ void onFilterRec(SkScalerContextRec*) const override;
+ SkAdvancedTypefaceMetrics* onGetAdvancedTypefaceMetrics(
+ PerGlyphInfo,
+ const uint32_t* glyphIDs,
+ uint32_t glyphIDsCount) const override;
+ SkStreamAsset* onOpenStream(int* ttcIndex) const override;
+ void onGetFontDescriptor(SkFontDescriptor*, bool* isLocal) const override;
+
+ int onCharsToGlyphs(const void* chars, Encoding encoding,
+ uint16_t glyphs[], int glyphCount) const override;
+ int onCountGlyphs() const override;
+ int onGetUPEM() const override;
+
+ void onGetFamilyName(SkString* familyName) const override;
+ SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override;
+
+ int onGetTableTags(SkFontTableTag tags[]) const override;
+ size_t onGetTableData(SkFontTableTag, size_t offset,
+ size_t length, void* data) const override;
+
+private:
+ sk_sp<SkTypeface> fProxy;
+ SkPaint fPaint;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/fonts/SkRandomScalerContext.cpp b/gfx/skia/skia/src/fonts/SkRandomScalerContext.cpp
new file mode 100644
index 000000000..c9cb87c03
--- /dev/null
+++ b/gfx/skia/skia/src/fonts/SkRandomScalerContext.cpp
@@ -0,0 +1,255 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkRandomScalerContext.h"
+#include "SkGlyph.h"
+#include "SkPath.h"
+#include "SkCanvas.h"
+#include "SkRasterizer.h"
+
+class SkRandomScalerContext : public SkScalerContext {
+public:
+ SkRandomScalerContext(SkRandomTypeface*, const SkScalerContextEffects&,
+ const SkDescriptor*, bool fFakeIt);
+ virtual ~SkRandomScalerContext();
+
+protected:
+ unsigned generateGlyphCount() override;
+ uint16_t generateCharToGlyph(SkUnichar) override;
+ void generateAdvance(SkGlyph*) override;
+ void generateMetrics(SkGlyph*) override;
+ void generateImage(const SkGlyph&) override;
+ void generatePath(const SkGlyph&, SkPath*) override;
+ void generateFontMetrics(SkPaint::FontMetrics*) override;
+
+private:
+ SkRandomTypeface* fFace;
+ SkScalerContext* fProxy;
+ bool fFakeIt;
+};
+
+#define STD_SIZE 1
+
+#include "SkDescriptor.h"
+
+SkRandomScalerContext::SkRandomScalerContext(SkRandomTypeface* face,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc,
+ bool fakeIt)
+ : SkScalerContext(face, effects, desc)
+ , fFace(face)
+ , fFakeIt(fakeIt) {
+ fProxy = face->proxy()->createScalerContext(effects, desc);
+}
+
+SkRandomScalerContext::~SkRandomScalerContext() { delete fProxy; }
+
+unsigned SkRandomScalerContext::generateGlyphCount() {
+ return fProxy->getGlyphCount();
+}
+
+uint16_t SkRandomScalerContext::generateCharToGlyph(SkUnichar uni) {
+ return fProxy->charToGlyphID(uni);
+}
+
+void SkRandomScalerContext::generateAdvance(SkGlyph* glyph) {
+ fProxy->getAdvance(glyph);
+}
+
+void SkRandomScalerContext::generateMetrics(SkGlyph* glyph) {
+ // Here we will change the mask format of the glyph
+ // NOTE this is being overridden by the base class
+ SkMask::Format format = SkMask::kARGB32_Format; // init to handle defective compilers
+ switch (glyph->getGlyphID() % 4) {
+ case 0:
+ format = SkMask::kLCD16_Format;
+ break;
+ case 1:
+ format = SkMask::kA8_Format;
+ break;
+ case 2:
+ format = SkMask::kARGB32_Format;
+ break;
+ case 3:
+ format = SkMask::kBW_Format;
+ break;
+ }
+
+ fProxy->getMetrics(glyph);
+
+ glyph->fMaskFormat = format;
+ if (fFakeIt) {
+ return;
+ }
+ if (SkMask::kARGB32_Format == format) {
+ SkPath path;
+ fProxy->getPath(*glyph, &path);
+
+ SkRect storage;
+ const SkPaint& paint = fFace->paint();
+ const SkRect& newBounds = paint.doComputeFastBounds(path.getBounds(),
+ &storage,
+ SkPaint::kFill_Style);
+ SkIRect ibounds;
+ newBounds.roundOut(&ibounds);
+ glyph->fLeft = ibounds.fLeft;
+ glyph->fTop = ibounds.fTop;
+ glyph->fWidth = ibounds.width();
+ glyph->fHeight = ibounds.height();
+ } else {
+ SkPath devPath, fillPath;
+ SkMatrix fillToDevMatrix;
+
+ this->internalGetPath(*glyph, &fillPath, &devPath, &fillToDevMatrix);
+
+ // just use devPath
+ const SkIRect ir = devPath.getBounds().roundOut();
+
+ if (ir.isEmpty() || !ir.is16Bit()) {
+ glyph->fLeft = 0;
+ glyph->fTop = 0;
+ glyph->fWidth = 0;
+ glyph->fHeight = 0;
+ return;
+ }
+ glyph->fLeft = ir.fLeft;
+ glyph->fTop = ir.fTop;
+ glyph->fWidth = SkToU16(ir.width());
+ glyph->fHeight = SkToU16(ir.height());
+
+ if (glyph->fWidth > 0) {
+ switch (glyph->fMaskFormat) {
+ case SkMask::kLCD16_Format:
+ glyph->fWidth += 2;
+ glyph->fLeft -= 1;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+}
+
+void SkRandomScalerContext::generateImage(const SkGlyph& glyph) {
+ SkMask::Format format = (SkMask::Format)glyph.fMaskFormat;
+ switch (glyph.getGlyphID() % 4) {
+ case 0:
+ format = SkMask::kLCD16_Format;
+ break;
+ case 1:
+ format = SkMask::kA8_Format;
+ break;
+ case 2:
+ format = SkMask::kARGB32_Format;
+ break;
+ case 3:
+ format = SkMask::kBW_Format;
+ break;
+ }
+ const_cast<SkGlyph&>(glyph).fMaskFormat = format;
+
+ // if the format is ARGB, we just draw the glyph from path ourselves. Otherwise, we force
+ // our proxy context to generate the image from paths.
+ if (!fFakeIt) {
+ if (SkMask::kARGB32_Format == glyph.fMaskFormat) {
+ SkPath path;
+ fProxy->getPath(glyph, &path);
+
+ SkBitmap bm;
+ bm.installPixels(SkImageInfo::MakeN32Premul(glyph.fWidth, glyph.fHeight),
+ glyph.fImage, glyph.rowBytes());
+ bm.eraseColor(0);
+
+ SkCanvas canvas(bm);
+ canvas.translate(-SkIntToScalar(glyph.fLeft),
+ -SkIntToScalar(glyph.fTop));
+ canvas.drawPath(path, fFace->paint());
+ } else {
+ fProxy->forceGenerateImageFromPath();
+ fProxy->getImage(glyph);
+ fProxy->forceOffGenerateImageFromPath();
+ }
+ } else {
+ sk_bzero(glyph.fImage, glyph.computeImageSize());
+ }
+}
+
+void SkRandomScalerContext::generatePath(const SkGlyph& glyph, SkPath* path) {
+ fProxy->getPath(glyph, path);
+}
+
+void SkRandomScalerContext::generateFontMetrics(SkPaint::FontMetrics* metrics) {
+ fProxy->getFontMetrics(metrics);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkTypefaceCache.h"
+
+SkRandomTypeface::SkRandomTypeface(sk_sp<SkTypeface> proxy, const SkPaint& paint, bool fakeIt)
+ : SkTypeface(proxy->fontStyle(), false)
+ , fProxy(std::move(proxy))
+ , fPaint(paint)
+ , fFakeIt(fakeIt) {}
+
+SkScalerContext* SkRandomTypeface::onCreateScalerContext(const SkScalerContextEffects& effects,
+ const SkDescriptor* desc) const {
+ return new SkRandomScalerContext(const_cast<SkRandomTypeface*>(this), effects, desc, fFakeIt);
+}
+
+void SkRandomTypeface::onFilterRec(SkScalerContextRec* rec) const {
+ fProxy->filterRec(rec);
+ rec->setHinting(SkPaint::kNo_Hinting);
+ rec->fMaskFormat = SkMask::kARGB32_Format;
+}
+
+SkAdvancedTypefaceMetrics* SkRandomTypeface::onGetAdvancedTypefaceMetrics(
+ PerGlyphInfo info,
+ const uint32_t* glyphIDs,
+ uint32_t glyphIDsCount) const {
+ return fProxy->getAdvancedTypefaceMetrics(info, glyphIDs, glyphIDsCount);
+}
+
+SkStreamAsset* SkRandomTypeface::onOpenStream(int* ttcIndex) const {
+ return fProxy->openStream(ttcIndex);
+}
+
+void SkRandomTypeface::onGetFontDescriptor(SkFontDescriptor* desc,
+ bool* isLocal) const {
+ fProxy->getFontDescriptor(desc, isLocal);
+}
+
+int SkRandomTypeface::onCharsToGlyphs(const void* chars, Encoding encoding,
+ uint16_t glyphs[], int glyphCount) const {
+ return fProxy->charsToGlyphs(chars, encoding, glyphs, glyphCount);
+}
+
+int SkRandomTypeface::onCountGlyphs() const {
+ return fProxy->countGlyphs();
+}
+
+int SkRandomTypeface::onGetUPEM() const {
+ return fProxy->getUnitsPerEm();
+}
+
+void SkRandomTypeface::onGetFamilyName(SkString* familyName) const {
+ fProxy->getFamilyName(familyName);
+}
+
+SkTypeface::LocalizedStrings* SkRandomTypeface::onCreateFamilyNameIterator() const {
+ return fProxy->createFamilyNameIterator();
+}
+
+int SkRandomTypeface::onGetTableTags(SkFontTableTag tags[]) const {
+ return fProxy->getTableTags(tags);
+}
+
+size_t SkRandomTypeface::onGetTableData(SkFontTableTag tag, size_t offset,
+ size_t length, void* data) const {
+ return fProxy->getTableData(tag, offset, length, data);
+}
+
diff --git a/gfx/skia/skia/src/fonts/SkRandomScalerContext.h b/gfx/skia/skia/src/fonts/SkRandomScalerContext.h
new file mode 100644
index 000000000..076689d93
--- /dev/null
+++ b/gfx/skia/skia/src/fonts/SkRandomScalerContext.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRandomScalerContext_DEFINED
+#define SkRandomScalerContext_DEFINED
+
+#include "SkScalerContext.h"
+#include "SkTypeface.h"
+
+/*
+ * This scaler context is for debug only purposes. It will 'randomly' but deterministically return
+ * LCD / A8 / BW / RBGA masks based off of the Glyph ID
+ */
+
+class SkRandomTypeface : public SkTypeface {
+public:
+ SkRandomTypeface(sk_sp<SkTypeface> proxy, const SkPaint&, bool fakeit);
+
+ SkTypeface* proxy() const { return fProxy.get(); }
+ const SkPaint& paint() const { return fPaint; }
+
+protected:
+ SkScalerContext* onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*) const override;
+ void onFilterRec(SkScalerContextRec*) const override;
+ SkAdvancedTypefaceMetrics* onGetAdvancedTypefaceMetrics(
+ PerGlyphInfo,
+ const uint32_t* glyphIDs,
+ uint32_t glyphIDsCount) const override;
+ SkStreamAsset* onOpenStream(int* ttcIndex) const override;
+ void onGetFontDescriptor(SkFontDescriptor*, bool* isLocal) const override;
+
+ int onCharsToGlyphs(const void* chars, Encoding encoding,
+ uint16_t glyphs[], int glyphCount) const override;
+ int onCountGlyphs() const override;
+ int onGetUPEM() const override;
+
+ void onGetFamilyName(SkString* familyName) const override;
+ SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override;
+
+ int onGetTableTags(SkFontTableTag tags[]) const override;
+ size_t onGetTableData(SkFontTableTag, size_t offset,
+ size_t length, void* data) const override;
+
+private:
+ sk_sp<SkTypeface> fProxy;
+ SkPaint fPaint;
+ bool fFakeIt;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/fonts/SkRemotableFontMgr.cpp b/gfx/skia/skia/src/fonts/SkRemotableFontMgr.cpp
new file mode 100644
index 000000000..aca8a0b19
--- /dev/null
+++ b/gfx/skia/skia/src/fonts/SkRemotableFontMgr.cpp
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkOnce.h"
+#include "SkRemotableFontMgr.h"
+
+SkRemotableFontIdentitySet::SkRemotableFontIdentitySet(int count, SkFontIdentity** data)
+ : fCount(count), fData(count)
+{
+ SkASSERT(data);
+ *data = fData;
+}
+
+SkRemotableFontIdentitySet* SkRemotableFontIdentitySet::NewEmpty() {
+ static SkOnce once;
+ static SkRemotableFontIdentitySet* empty;
+ once([]{ empty = new SkRemotableFontIdentitySet; });
+ return SkRef(empty);
+}
diff --git a/gfx/skia/skia/src/fonts/SkTestScalerContext.cpp b/gfx/skia/skia/src/fonts/SkTestScalerContext.cpp
new file mode 100644
index 000000000..f7678a2ca
--- /dev/null
+++ b/gfx/skia/skia/src/fonts/SkTestScalerContext.cpp
@@ -0,0 +1,301 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAdvancedTypefaceMetrics.h"
+#include "SkBitmap.h"
+#include "SkCanvas.h"
+#include "SkDescriptor.h"
+#include "SkFontDescriptor.h"
+#include "SkGlyph.h"
+#include "SkMask.h"
+// #include "SkOTUtils.h"
+#include "SkScalerContext.h"
+#include "SkTestScalerContext.h"
+#include "SkTypefaceCache.h"
+
+SkTestFont::SkTestFont(const SkTestFontData& fontData)
+ : INHERITED()
+ , fCharCodes(fontData.fCharCodes)
+ , fCharCodesCount(fontData.fCharCodes ? fontData.fCharCodesCount : 0)
+ , fWidths(fontData.fWidths)
+ , fMetrics(fontData.fMetrics)
+ , fName(fontData.fName)
+ , fPaths(nullptr)
+{
+ init(fontData.fPoints, fontData.fVerbs);
+#ifdef SK_DEBUG
+ sk_bzero(fDebugBits, sizeof(fDebugBits));
+ sk_bzero(fDebugOverage, sizeof(fDebugOverage));
+#endif
+}
+
+SkTestFont::~SkTestFont() {
+ for (unsigned index = 0; index < fCharCodesCount; ++index) {
+ delete fPaths[index];
+ }
+ delete[] fPaths;
+}
+
+#ifdef SK_DEBUG
+
+#include "SkMutex.h"
+SK_DECLARE_STATIC_MUTEX(gUsedCharsMutex);
+
+#endif
+
+int SkTestFont::codeToIndex(SkUnichar charCode) const {
+#ifdef SK_DEBUG // detect missing test font data
+ {
+ SkAutoMutexAcquire ac(gUsedCharsMutex);
+ if (charCode >= ' ' && charCode <= '~') {
+ int bitOffset = charCode - ' ';
+ fDebugBits[bitOffset >> 3] |= 1 << (bitOffset & 7);
+ } else {
+ int index = 0;
+ while (fDebugOverage[index] != 0 && fDebugOverage[index] != charCode
+ && index < (int) sizeof(fDebugOverage)) {
+ ++index;
+ }
+ SkASSERT(index < (int) sizeof(fDebugOverage));
+ if (fDebugOverage[index] == 0) {
+ fDebugOverage[index] = charCode;
+ }
+ }
+ }
+#endif
+ for (unsigned index = 0; index < fCharCodesCount; ++index) {
+ if (fCharCodes[index] == (unsigned) charCode) {
+ return (int) index;
+ }
+ }
+
+ SkDEBUGF(("missing '%c' (%d) from %s (weight %d, width %d, slant %d)\n",
+ (char) charCode, charCode, fDebugName,
+ fDebugStyle.weight(), fDebugStyle.width(), fDebugStyle.slant()));
+ return 0;
+}
+
+void SkTestFont::init(const SkScalar* pts, const unsigned char* verbs) {
+ fPaths = new SkPath* [fCharCodesCount];
+ for (unsigned index = 0; index < fCharCodesCount; ++index) {
+ SkPath* path = new SkPath;
+ SkPath::Verb verb;
+ while ((verb = (SkPath::Verb) *verbs++) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ path->moveTo(pts[0], pts[1]);
+ pts += 2;
+ break;
+ case SkPath::kLine_Verb:
+ path->lineTo(pts[0], pts[1]);
+ pts += 2;
+ break;
+ case SkPath::kQuad_Verb:
+ path->quadTo(pts[0], pts[1], pts[2], pts[3]);
+ pts += 4;
+ break;
+ case SkPath::kCubic_Verb:
+ path->cubicTo(pts[0], pts[1], pts[2], pts[3], pts[4], pts[5]);
+ pts += 6;
+ break;
+ case SkPath::kClose_Verb:
+ path->close();
+ break;
+ default:
+ SkDEBUGFAIL("bad verb");
+ return;
+ }
+ }
+ // This should make SkPath::getBounds() queries threadsafe.
+ path->updateBoundsCache();
+ fPaths[index] = path;
+ }
+}
+
+SkTestTypeface::SkTestTypeface(SkTestFont* testFont, const SkFontStyle& style)
+ : SkTypeface(style, false)
+ , fTestFont(testFont) {
+}
+
+void SkTestTypeface::getAdvance(SkGlyph* glyph) {
+ // TODO(benjaminwagner): Update users to use floats.
+ glyph->fAdvanceX = SkFixedToFloat(fTestFont->fWidths[glyph->getGlyphID()]);
+ glyph->fAdvanceY = 0;
+}
+
+void SkTestTypeface::getFontMetrics(SkPaint::FontMetrics* metrics) {
+ *metrics = fTestFont->fMetrics;
+}
+
+void SkTestTypeface::getMetrics(SkGlyph* glyph) {
+ // TODO(benjaminwagner): Update users to use floats.
+ glyph->fAdvanceX = SkFixedToFloat(fTestFont->fWidths[glyph->getGlyphID()]);
+ glyph->fAdvanceY = 0;
+}
+
+void SkTestTypeface::getPath(const SkGlyph& glyph, SkPath* path) {
+ *path = *fTestFont->fPaths[glyph.getGlyphID()];
+}
+
+void SkTestTypeface::onFilterRec(SkScalerContextRec* rec) const {
+ rec->setHinting(SkPaint::kNo_Hinting);
+}
+
+SkAdvancedTypefaceMetrics* SkTestTypeface::onGetAdvancedTypefaceMetrics(
+ PerGlyphInfo ,
+ const uint32_t* glyphIDs,
+ uint32_t glyphIDsCount) const {
+// pdf only
+ SkAdvancedTypefaceMetrics* info = new SkAdvancedTypefaceMetrics;
+ info->fFontName.set(fTestFont->fName);
+ int glyphCount = this->onCountGlyphs();
+ info->fLastGlyphID = SkToU16(glyphCount - 1);
+
+ SkTDArray<SkUnichar>& toUnicode = info->fGlyphToUnicode;
+ toUnicode.setCount(glyphCount);
+ SkASSERT(glyphCount == SkToInt(fTestFont->fCharCodesCount));
+ for (int gid = 0; gid < glyphCount; ++gid) {
+ toUnicode[gid] = SkToS32(fTestFont->fCharCodes[gid]);
+ }
+ return info;
+}
+
+void SkTestTypeface::onGetFontDescriptor(SkFontDescriptor* desc, bool* isLocal) const {
+ desc->setFamilyName(fTestFont->fName);
+ desc->setStyle(this->fontStyle());
+ *isLocal = false;
+}
+
+int SkTestTypeface::onCharsToGlyphs(const void* chars, Encoding encoding,
+ uint16_t glyphs[], int glyphCount) const {
+ SkASSERT(encoding == kUTF16_Encoding);
+ for (int index = 0; index < glyphCount; ++index) {
+ SkUnichar ch = ((SkUnichar*) chars)[index];
+ glyphs[index] = fTestFont->codeToIndex(ch);
+ }
+ return glyphCount;
+}
+
+void SkTestTypeface::onGetFamilyName(SkString* familyName) const {
+ *familyName = fTestFont->fName;
+}
+
+SkTypeface::LocalizedStrings* SkTestTypeface::onCreateFamilyNameIterator() const {
+ SkString familyName(fTestFont->fName);
+ SkString language("und"); //undetermined
+SkASSERT(0); // incomplete
+ return nullptr;
+// return new SkOTUtils::LocalizedStrings_SingleName(familyName, language);
+}
+
+class SkTestScalerContext : public SkScalerContext {
+public:
+ SkTestScalerContext(SkTestTypeface* face, const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : SkScalerContext(face, effects, desc)
+ , fFace(face)
+ {
+ fRec.getSingleMatrix(&fMatrix);
+ this->forceGenerateImageFromPath();
+ }
+
+ virtual ~SkTestScalerContext() {
+ }
+
+protected:
+ unsigned generateGlyphCount() override {
+ return fFace->onCountGlyphs();
+ }
+
+ uint16_t generateCharToGlyph(SkUnichar uni) override {
+ uint16_t glyph;
+ (void) fFace->onCharsToGlyphs((const void *) &uni, SkTypeface::kUTF16_Encoding, &glyph, 1);
+ return glyph;
+ }
+
+ void generateAdvance(SkGlyph* glyph) override {
+ fFace->getAdvance(glyph);
+
+ const SkVector advance = fMatrix.mapXY(SkFloatToScalar(glyph->fAdvanceX),
+ SkFloatToScalar(glyph->fAdvanceY));
+ glyph->fAdvanceX = SkScalarToFloat(advance.fX);
+ glyph->fAdvanceY = SkScalarToFloat(advance.fY);
+ }
+
+ void generateMetrics(SkGlyph* glyph) override {
+ fFace->getMetrics(glyph);
+
+ const SkVector advance = fMatrix.mapXY(SkFloatToScalar(glyph->fAdvanceX),
+ SkFloatToScalar(glyph->fAdvanceY));
+ glyph->fAdvanceX = SkScalarToFloat(advance.fX);
+ glyph->fAdvanceY = SkScalarToFloat(advance.fY);
+
+ SkPath path;
+ fFace->getPath(*glyph, &path);
+ path.transform(fMatrix);
+
+ SkRect storage;
+ const SkPaint paint;
+ const SkRect& newBounds = paint.doComputeFastBounds(path.getBounds(),
+ &storage,
+ SkPaint::kFill_Style);
+ SkIRect ibounds;
+ newBounds.roundOut(&ibounds);
+ glyph->fLeft = ibounds.fLeft;
+ glyph->fTop = ibounds.fTop;
+ glyph->fWidth = ibounds.width();
+ glyph->fHeight = ibounds.height();
+ }
+
+ void generateImage(const SkGlyph& glyph) override {
+ SkPath path;
+ fFace->getPath(glyph, &path);
+
+ SkBitmap bm;
+ bm.installPixels(SkImageInfo::MakeN32Premul(glyph.fWidth, glyph.fHeight),
+ glyph.fImage, glyph.rowBytes());
+ bm.eraseColor(0);
+
+ SkCanvas canvas(bm);
+ canvas.translate(-SkIntToScalar(glyph.fLeft),
+ -SkIntToScalar(glyph.fTop));
+ canvas.concat(fMatrix);
+ SkPaint paint;
+ paint.setAntiAlias(true);
+ canvas.drawPath(path, paint);
+ }
+
+ void generatePath(const SkGlyph& glyph, SkPath* path) override {
+ fFace->getPath(glyph, path);
+ path->transform(fMatrix);
+ }
+
+ void generateFontMetrics(SkPaint::FontMetrics* metrics) override {
+ fFace->getFontMetrics(metrics);
+ if (metrics) {
+ SkScalar scale = fMatrix.getScaleY();
+ metrics->fTop = SkScalarMul(metrics->fTop, scale);
+ metrics->fAscent = SkScalarMul(metrics->fAscent, scale);
+ metrics->fDescent = SkScalarMul(metrics->fDescent, scale);
+ metrics->fBottom = SkScalarMul(metrics->fBottom, scale);
+ metrics->fLeading = SkScalarMul(metrics->fLeading, scale);
+ metrics->fAvgCharWidth = SkScalarMul(metrics->fAvgCharWidth, scale);
+ metrics->fXMin = SkScalarMul(metrics->fXMin, scale);
+ metrics->fXMax = SkScalarMul(metrics->fXMax, scale);
+ metrics->fXHeight = SkScalarMul(metrics->fXHeight, scale);
+ }
+ }
+
+private:
+ SkTestTypeface* fFace;
+ SkMatrix fMatrix;
+};
+
+SkScalerContext* SkTestTypeface::onCreateScalerContext(const SkScalerContextEffects& effects,
+ const SkDescriptor* desc) const {
+ return new SkTestScalerContext(const_cast<SkTestTypeface*>(this), effects, desc);
+}
diff --git a/gfx/skia/skia/src/fonts/SkTestScalerContext.h b/gfx/skia/skia/src/fonts/SkTestScalerContext.h
new file mode 100644
index 000000000..a5fa1de90
--- /dev/null
+++ b/gfx/skia/skia/src/fonts/SkTestScalerContext.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTestScalerContext_DEFINED
+#define SkTestScalerContext_DEFINED
+
+#include "SkFixed.h"
+#include "SkPaint.h"
+#include "SkPath.h"
+#include "SkRefCnt.h"
+#include "SkTDArray.h"
+#include "SkTypeface.h"
+
+class SkTestFont;
+
+struct SkTestFontData {
+ const SkScalar* fPoints;
+ const unsigned char* fVerbs;
+ const unsigned* fCharCodes;
+ const size_t fCharCodesCount;
+ const SkFixed* fWidths;
+ const SkPaint::FontMetrics& fMetrics;
+ const char* fName;
+ SkTypeface::Style fStyle;
+ SkTestFont* fFontCache;
+};
+
+class SkTestFont : public SkRefCnt {
+public:
+
+
+ SkTestFont(const SkTestFontData& );
+ virtual ~SkTestFont();
+ int codeToIndex(SkUnichar charCode) const;
+ void init(const SkScalar* pts, const unsigned char* verbs);
+#ifdef SK_DEBUG // detect missing test font data
+ mutable unsigned char fDebugBits[16];
+ mutable SkUnichar fDebugOverage[8];
+ const char* fDebugName;
+ SkFontStyle fDebugStyle;
+ const char* debugFontName() const { return fName; }
+#endif
+private:
+ const unsigned* fCharCodes;
+ const size_t fCharCodesCount;
+ const SkFixed* fWidths;
+ const SkPaint::FontMetrics& fMetrics;
+ const char* fName;
+ SkPath** fPaths;
+ friend class SkTestTypeface;
+ typedef SkRefCnt INHERITED;
+};
+
+
+class SkTestTypeface : public SkTypeface {
+public:
+ SkTestTypeface(SkTestFont*, const SkFontStyle& style);
+ virtual ~SkTestTypeface() {
+ SkSafeUnref(fTestFont);
+ }
+ void getAdvance(SkGlyph* glyph);
+ void getFontMetrics(SkPaint::FontMetrics* metrics);
+ void getMetrics(SkGlyph* glyph);
+ void getPath(const SkGlyph& glyph, SkPath* path);
+protected:
+ SkScalerContext* onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor* desc) const override;
+ void onFilterRec(SkScalerContextRec* rec) const override;
+ SkAdvancedTypefaceMetrics* onGetAdvancedTypefaceMetrics(
+ PerGlyphInfo,
+ const uint32_t* glyphIDs,
+ uint32_t glyphIDsCount) const override;
+
+ SkStreamAsset* onOpenStream(int* ttcIndex) const override {
+ return nullptr;
+ }
+
+ void onGetFontDescriptor(SkFontDescriptor* desc, bool* isLocal) const override;
+
+ int onCharsToGlyphs(const void* chars, Encoding encoding,
+ uint16_t glyphs[], int glyphCount) const override;
+
+ int onCountGlyphs() const override {
+ return (int) fTestFont->fCharCodesCount;
+ }
+
+ int onGetUPEM() const override {
+ SkASSERT(0); // don't expect to get here
+ return 1;
+ }
+
+ void onGetFamilyName(SkString* familyName) const override;
+ SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override;
+
+ int onGetTableTags(SkFontTableTag tags[]) const override {
+ return 0;
+ }
+
+ size_t onGetTableData(SkFontTableTag tag, size_t offset,
+ size_t length, void* data) const override {
+ return 0;
+ }
+private:
+ SkTestFont* fTestFont;
+ friend class SkTestScalerContext;
+};
+
+SkTypeface* CreateTestTypeface(const char* name, SkTypeface::Style style);
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrAllocator.h b/gfx/skia/skia/src/gpu/GrAllocator.h
new file mode 100644
index 000000000..5b9bd5bab
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrAllocator.h
@@ -0,0 +1,398 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAllocator_DEFINED
+#define GrAllocator_DEFINED
+
+#include "GrConfig.h"
+#include "GrTypes.h"
+#include "SkTArray.h"
+#include "SkTypes.h"
+
+class GrAllocator : SkNoncopyable {
+public:
+ ~GrAllocator() { this->reset(); }
+
+ /**
+ * Create an allocator
+ *
+ * @param itemSize the size of each item to allocate
+ * @param itemsPerBlock the number of items to allocate at once
+ * @param initialBlock optional memory to use for the first block.
+ * Must be at least itemSize*itemsPerBlock sized.
+ * Caller is responsible for freeing this memory.
+ */
+ GrAllocator(size_t itemSize, int itemsPerBlock, void* initialBlock)
+ : fItemSize(itemSize)
+ , fItemsPerBlock(itemsPerBlock)
+ , fOwnFirstBlock(nullptr == initialBlock)
+ , fCount(0)
+ , fInsertionIndexInBlock(0) {
+ SkASSERT(itemsPerBlock > 0);
+ fBlockSize = fItemSize * fItemsPerBlock;
+ if (fOwnFirstBlock) {
+ // This force us to allocate a new block on push_back().
+ fInsertionIndexInBlock = fItemsPerBlock;
+ } else {
+ fBlocks.push_back() = initialBlock;
+ fInsertionIndexInBlock = 0;
+ }
+ }
+
+ /**
+ * Adds an item and returns pointer to it.
+ *
+ * @return pointer to the added item.
+ */
+ void* push_back() {
+ // we always have at least one block
+ if (fItemsPerBlock == fInsertionIndexInBlock) {
+ fBlocks.push_back() = sk_malloc_throw(fBlockSize);
+ fInsertionIndexInBlock = 0;
+ }
+ void* ret = (char*)fBlocks.back() + fItemSize * fInsertionIndexInBlock;
+ ++fCount;
+ ++fInsertionIndexInBlock;
+ return ret;
+ }
+
+ /**
+ * Remove the last item, only call if count() != 0
+ */
+ void pop_back() {
+ SkASSERT(fCount);
+ SkASSERT(fInsertionIndexInBlock > 0);
+ --fInsertionIndexInBlock;
+ --fCount;
+ if (0 == fInsertionIndexInBlock) {
+ // Never delete the first block
+ if (fBlocks.count() > 1) {
+ sk_free(fBlocks.back());
+ fBlocks.pop_back();
+ fInsertionIndexInBlock = fItemsPerBlock;
+ }
+ }
+ }
+
+ /**
+ * Removes all added items.
+ */
+ void reset() {
+ int firstBlockToFree = fOwnFirstBlock ? 0 : 1;
+ for (int i = firstBlockToFree; i < fBlocks.count(); ++i) {
+ sk_free(fBlocks[i]);
+ }
+ if (fOwnFirstBlock) {
+ fBlocks.reset();
+ // This force us to allocate a new block on push_back().
+ fInsertionIndexInBlock = fItemsPerBlock;
+ } else {
+ fBlocks.pop_back_n(fBlocks.count() - 1);
+ fInsertionIndexInBlock = 0;
+ }
+ fCount = 0;
+ }
+
+ /**
+ * Returns the item count.
+ */
+ int count() const {
+ return fCount;
+ }
+
+ /**
+ * Is the count 0?
+ */
+ bool empty() const { return 0 == fCount; }
+
+ /**
+ * Access last item, only call if count() != 0
+ */
+ void* back() {
+ SkASSERT(fCount);
+ SkASSERT(fInsertionIndexInBlock > 0);
+ return (char*)(fBlocks.back()) + (fInsertionIndexInBlock - 1) * fItemSize;
+ }
+
+ /**
+ * Access last item, only call if count() != 0
+ */
+ const void* back() const {
+ SkASSERT(fCount);
+ SkASSERT(fInsertionIndexInBlock > 0);
+ return (const char*)(fBlocks.back()) + (fInsertionIndexInBlock - 1) * fItemSize;
+ }
+
+ /**
+ * Iterates through the allocator. This is faster than using operator[] when walking linearly
+ * through the allocator.
+ */
+ class Iter {
+ public:
+ /**
+ * Initializes the iterator. next() must be called before get().
+ */
+ Iter(const GrAllocator* allocator)
+ : fAllocator(allocator)
+ , fBlockIndex(-1)
+ , fIndexInBlock(allocator->fItemsPerBlock - 1)
+ , fItemIndex(-1) {}
+
+ /**
+ * Advances the iterator. Iteration is finished when next() returns false.
+ */
+ bool next() {
+ ++fIndexInBlock;
+ ++fItemIndex;
+ if (fIndexInBlock == fAllocator->fItemsPerBlock) {
+ ++fBlockIndex;
+ fIndexInBlock = 0;
+ }
+ return fItemIndex < fAllocator->fCount;
+ }
+
+ /**
+ * Gets the current iterator value. Call next() at least once before calling. Don't call
+ * after next() returns false.
+ */
+ void* get() const {
+ SkASSERT(fItemIndex >= 0 && fItemIndex < fAllocator->fCount);
+ return (char*) fAllocator->fBlocks[fBlockIndex] + fIndexInBlock * fAllocator->fItemSize;
+ }
+
+ private:
+ const GrAllocator* fAllocator;
+ int fBlockIndex;
+ int fIndexInBlock;
+ int fItemIndex;
+ };
+
+ /**
+ * Access item by index.
+ */
+ void* operator[] (int i) {
+ SkASSERT(i >= 0 && i < fCount);
+ return (char*)fBlocks[i / fItemsPerBlock] +
+ fItemSize * (i % fItemsPerBlock);
+ }
+
+ /**
+ * Access item by index.
+ */
+ const void* operator[] (int i) const {
+ SkASSERT(i >= 0 && i < fCount);
+ return (const char*)fBlocks[i / fItemsPerBlock] +
+ fItemSize * (i % fItemsPerBlock);
+ }
+
+protected:
+ /**
+ * Set first block of memory to write into. Must be called before any other methods.
+ * This requires that you have passed nullptr in the constructor.
+ *
+ * @param initialBlock optional memory to use for the first block.
+ * Must be at least itemSize*itemsPerBlock sized.
+ * Caller is responsible for freeing this memory.
+ */
+ void setInitialBlock(void* initialBlock) {
+ SkASSERT(0 == fCount);
+ SkASSERT(0 == fBlocks.count());
+ SkASSERT(fItemsPerBlock == fInsertionIndexInBlock);
+ fOwnFirstBlock = false;
+ fBlocks.push_back() = initialBlock;
+ fInsertionIndexInBlock = 0;
+ }
+
+ // For access to above function.
+ template <typename T> friend class GrTAllocator;
+
+private:
+ static const int NUM_INIT_BLOCK_PTRS = 8;
+
+ SkSTArray<NUM_INIT_BLOCK_PTRS, void*, true> fBlocks;
+ size_t fBlockSize;
+ size_t fItemSize;
+ int fItemsPerBlock;
+ bool fOwnFirstBlock;
+ int fCount;
+ int fInsertionIndexInBlock;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+template <typename T> class GrTAllocator;
+template <typename T> void* operator new(size_t, GrTAllocator<T>*);
+
+template <typename T> class GrTAllocator : SkNoncopyable {
+public:
+ virtual ~GrTAllocator() { this->reset(); }
+
+ /**
+ * Create an allocator
+ *
+ * @param itemsPerBlock the number of items to allocate at once
+ */
+ explicit GrTAllocator(int itemsPerBlock)
+ : fAllocator(sizeof(T), itemsPerBlock, nullptr) {}
+
+ /**
+ * Adds an item and returns it.
+ *
+ * @return the added item.
+ */
+ T& push_back() {
+ void* item = fAllocator.push_back();
+ SkASSERT(item);
+ new (item) T;
+ return *(T*)item;
+ }
+
+ T& push_back(const T& t) {
+ void* item = fAllocator.push_back();
+ SkASSERT(item);
+ new (item) T(t);
+ return *(T*)item;
+ }
+
+ /**
+ * Remove the last item, only call if count() != 0
+ */
+ void pop_back() {
+ this->back().~T();
+ fAllocator.pop_back();
+ }
+
+ /**
+ * Removes all added items.
+ */
+ void reset() {
+ int c = fAllocator.count();
+ for (int i = 0; i < c; ++i) {
+ ((T*)fAllocator[i])->~T();
+ }
+ fAllocator.reset();
+ }
+
+ /**
+ * Returns the item count.
+ */
+ int count() const {
+ return fAllocator.count();
+ }
+
+ /**
+ * Is the count 0?
+ */
+ bool empty() const { return fAllocator.empty(); }
+
+ /**
+ * Access last item, only call if count() != 0
+ */
+ T& back() {
+ return *(T*)fAllocator.back();
+ }
+
+ /**
+ * Access last item, only call if count() != 0
+ */
+ const T& back() const {
+ return *(const T*)fAllocator.back();
+ }
+
+ /**
+ * Iterates through the allocator. This is faster than using operator[] when walking linearly
+ * through the allocator.
+ */
+ class Iter {
+ public:
+ /**
+ * Initializes the iterator. next() must be called before get() or ops * and ->.
+ */
+ Iter(const GrTAllocator* allocator) : fImpl(&allocator->fAllocator) {}
+
+ /**
+ * Advances the iterator. Iteration is finished when next() returns false.
+ */
+ bool next() { return fImpl.next(); }
+
+ /**
+ * Gets the current iterator value. Call next() at least once before calling. Don't call
+ * after next() returns false.
+ */
+ T* get() const { return (T*) fImpl.get(); }
+
+ /**
+ * Convenience operators. Same rules for calling apply as get().
+ */
+ T& operator*() const { return *this->get(); }
+ T* operator->() const { return this->get(); }
+
+ private:
+ GrAllocator::Iter fImpl;
+ };
+
+ /**
+ * Access item by index.
+ */
+ T& operator[] (int i) {
+ return *(T*)(fAllocator[i]);
+ }
+
+ /**
+ * Access item by index.
+ */
+ const T& operator[] (int i) const {
+ return *(const T*)(fAllocator[i]);
+ }
+
+protected:
+ /*
+ * Set first block of memory to write into. Must be called before any other methods.
+ *
+ * @param initialBlock optional memory to use for the first block.
+ * Must be at least size(T)*itemsPerBlock sized.
+ * Caller is responsible for freeing this memory.
+ */
+ void setInitialBlock(void* initialBlock) {
+ fAllocator.setInitialBlock(initialBlock);
+ }
+
+private:
+ friend void* operator new<T>(size_t, GrTAllocator*);
+
+ GrAllocator fAllocator;
+ typedef SkNoncopyable INHERITED;
+};
+
+template <int N, typename T> class GrSTAllocator : public GrTAllocator<T> {
+private:
+ typedef GrTAllocator<T> INHERITED;
+
+public:
+ GrSTAllocator() : INHERITED(N) {
+ this->setInitialBlock(fStorage.get());
+ }
+
+private:
+ SkAlignedSTStorage<N, T> fStorage;
+};
+
+template <typename T> void* operator new(size_t size, GrTAllocator<T>* allocator) {
+ return allocator->fAllocator.push_back();
+}
+
+// Skia doesn't use C++ exceptions but it may be compiled with them enabled. Having an op delete
+// to match the op new silences warnings about missing op delete when a constructor throws an
+// exception.
+template <typename T> void operator delete(void*, GrTAllocator<T>*) {
+ SK_ABORT("Invalid Operation");
+}
+
+#define GrNEW_APPEND_TO_ALLOCATOR(allocator_ptr, type_name, args) \
+ new (allocator_ptr) type_name args
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrAppliedClip.h b/gfx/skia/skia/src/gpu/GrAppliedClip.h
new file mode 100644
index 000000000..3e98c6cb0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrAppliedClip.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAppliedClip_DEFINED
+#define GrAppliedClip_DEFINED
+
+#include "GrScissorState.h"
+#include "GrWindowRectsState.h"
+
+class GrFragmentProcessor;
+
+/**
+ * Produced by GrClip. It provides a set of modifications to the drawing state that are used to
+ * create the final GrPipeline for a GrBatch.
+ */
+class GrAppliedClip : public SkNoncopyable {
+public:
+ GrAppliedClip(const SkRect& drawBounds)
+ : fHasStencilClip(false)
+ , fClippedDrawBounds(drawBounds) {
+ }
+
+ const GrScissorState& scissorState() const { return fScissorState; }
+ const GrWindowRectsState& windowRectsState() const { return fWindowRectsState; }
+ GrFragmentProcessor* clipCoverageFragmentProcessor() const { return fClipCoverageFP.get(); }
+ bool hasStencilClip() const { return fHasStencilClip; }
+
+ /**
+ * Intersects the applied clip with the provided rect. Returns false if the draw became empty.
+ */
+ bool addScissor(const SkIRect& irect) {
+ return fScissorState.intersect(irect) && fClippedDrawBounds.intersect(SkRect::Make(irect));
+ }
+
+ void addWindowRectangles(const GrWindowRectsState& windowState) {
+ SkASSERT(!fWindowRectsState.enabled());
+ fWindowRectsState = windowState;
+ }
+
+ void addWindowRectangles(const GrWindowRectangles& windows, const SkIPoint& origin,
+ GrWindowRectsState::Mode mode) {
+ SkASSERT(!fWindowRectsState.enabled());
+ fWindowRectsState.set(windows, origin, mode);
+ }
+
+ void addCoverageFP(sk_sp<GrFragmentProcessor> fp) {
+ SkASSERT(!fClipCoverageFP);
+ fClipCoverageFP = fp;
+ }
+
+ void addStencilClip() {
+ SkASSERT(!fHasStencilClip);
+ fHasStencilClip = true;
+ }
+
+ /**
+ * Returns the device bounds of the draw after clip has been applied. TODO: Ideally this would
+ * consider the combined effect of all clipping techniques in play (scissor, stencil, fp, etc.).
+ */
+ const SkRect& clippedDrawBounds() const { return fClippedDrawBounds; }
+
+private:
+ GrScissorState fScissorState;
+ GrWindowRectsState fWindowRectsState;
+ sk_sp<GrFragmentProcessor> fClipCoverageFP;
+ bool fHasStencilClip;
+ SkRect fClippedDrawBounds;
+ typedef SkNoncopyable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrAuditTrail.cpp b/gfx/skia/skia/src/gpu/GrAuditTrail.cpp
new file mode 100644
index 000000000..82dc7f713
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrAuditTrail.cpp
@@ -0,0 +1,298 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrAuditTrail.h"
+#include "batches/GrBatch.h"
+
+const int GrAuditTrail::kGrAuditTrailInvalidID = -1;
+
+void GrAuditTrail::addBatch(const GrBatch* batch) {
+ SkASSERT(fEnabled);
+ Batch* auditBatch = new Batch;
+ fBatchPool.emplace_back(auditBatch);
+ auditBatch->fName = batch->name();
+ auditBatch->fBounds = batch->bounds();
+ auditBatch->fClientID = kGrAuditTrailInvalidID;
+ auditBatch->fBatchListID = kGrAuditTrailInvalidID;
+ auditBatch->fChildID = kGrAuditTrailInvalidID;
+
+ // consume the current stack trace if any
+ auditBatch->fStackTrace = fCurrentStackTrace;
+ fCurrentStackTrace.reset();
+
+ if (fClientID != kGrAuditTrailInvalidID) {
+ auditBatch->fClientID = fClientID;
+ Batches** batchesLookup = fClientIDLookup.find(fClientID);
+ Batches* batches = nullptr;
+ if (!batchesLookup) {
+ batches = new Batches;
+ fClientIDLookup.set(fClientID, batches);
+ } else {
+ batches = *batchesLookup;
+ }
+
+ batches->push_back(auditBatch);
+ }
+
+ // Our algorithm doesn't bother to reorder inside of a BatchNode
+ // so the ChildID will start at 0
+ auditBatch->fBatchListID = fBatchList.count();
+ auditBatch->fChildID = 0;
+
+ // We use the batch pointer as a key to find the batchnode we are 'glomming' batches onto
+ fIDLookup.set(batch->uniqueID(), auditBatch->fBatchListID);
+ BatchNode* batchNode = new BatchNode;
+ batchNode->fBounds = batch->bounds();
+ batchNode->fRenderTargetUniqueID = batch->renderTargetUniqueID();
+ batchNode->fChildren.push_back(auditBatch);
+ fBatchList.emplace_back(batchNode);
+}
+
+void GrAuditTrail::batchingResultCombined(const GrBatch* consumer, const GrBatch* consumed) {
+ // Look up the batch we are going to glom onto
+ int* indexPtr = fIDLookup.find(consumer->uniqueID());
+ SkASSERT(indexPtr);
+ int index = *indexPtr;
+ SkASSERT(index < fBatchList.count() && fBatchList[index]);
+ BatchNode& consumerBatch = *fBatchList[index];
+
+ // Look up the batch which will be glommed
+ int* consumedPtr = fIDLookup.find(consumed->uniqueID());
+ SkASSERT(consumedPtr);
+ int consumedIndex = *consumedPtr;
+ SkASSERT(consumedIndex < fBatchList.count() && fBatchList[consumedIndex]);
+ BatchNode& consumedBatch = *fBatchList[consumedIndex];
+
+ // steal all of consumed's batches
+ for (int i = 0; i < consumedBatch.fChildren.count(); i++) {
+ Batch* childBatch = consumedBatch.fChildren[i];
+
+ // set the ids for the child batch
+ childBatch->fBatchListID = index;
+ childBatch->fChildID = consumerBatch.fChildren.count();
+ consumerBatch.fChildren.push_back(childBatch);
+ }
+
+ // Update the bounds for the combineWith node
+ consumerBatch.fBounds = consumer->bounds();
+
+ // remove the old node from our batchlist and clear the combinee's lookup
+ // NOTE: because we can't change the shape of the batchlist, we use a sentinel
+ fBatchList[consumedIndex].reset(nullptr);
+ fIDLookup.remove(consumed->uniqueID());
+}
+
+void GrAuditTrail::copyOutFromBatchList(BatchInfo* outBatchInfo, int batchListID) {
+ SkASSERT(batchListID < fBatchList.count());
+ const BatchNode* bn = fBatchList[batchListID];
+ SkASSERT(bn);
+ outBatchInfo->fBounds = bn->fBounds;
+ outBatchInfo->fRenderTargetUniqueID = bn->fRenderTargetUniqueID;
+ for (int j = 0; j < bn->fChildren.count(); j++) {
+ BatchInfo::Batch& outBatch = outBatchInfo->fBatches.push_back();
+ const Batch* currentBatch = bn->fChildren[j];
+ outBatch.fBounds = currentBatch->fBounds;
+ outBatch.fClientID = currentBatch->fClientID;
+ }
+}
+
+void GrAuditTrail::getBoundsByClientID(SkTArray<BatchInfo>* outInfo, int clientID) {
+ Batches** batchesLookup = fClientIDLookup.find(clientID);
+ if (batchesLookup) {
+ // We track which batchlistID we're currently looking at. If it changes, then we
+ // need to push back a new batch info struct. We happen to know that batches are
+ // in sequential order in the batchlist, otherwise we'd have to do more bookkeeping
+ int currentBatchListID = kGrAuditTrailInvalidID;
+ for (int i = 0; i < (*batchesLookup)->count(); i++) {
+ const Batch* batch = (**batchesLookup)[i];
+
+ // Because we will copy out all of the batches associated with a given
+ // batch list id everytime the id changes, we only have to update our struct
+ // when the id changes.
+ if (kGrAuditTrailInvalidID == currentBatchListID ||
+ batch->fBatchListID != currentBatchListID) {
+ BatchInfo& outBatchInfo = outInfo->push_back();
+
+ // copy out all of the batches so the client can display them even if
+ // they have a different clientID
+ this->copyOutFromBatchList(&outBatchInfo, batch->fBatchListID);
+ }
+ }
+ }
+}
+
+void GrAuditTrail::getBoundsByBatchListID(BatchInfo* outInfo, int batchListID) {
+ this->copyOutFromBatchList(outInfo, batchListID);
+}
+
+void GrAuditTrail::fullReset() {
+ SkASSERT(fEnabled);
+ fBatchList.reset();
+ fIDLookup.reset();
+ // free all client batches
+ fClientIDLookup.foreach([](const int&, Batches** batches) { delete *batches; });
+ fClientIDLookup.reset();
+ fBatchPool.reset(); // must be last, frees all of the memory
+}
+
+template <typename T>
+void GrAuditTrail::JsonifyTArray(SkString* json, const char* name, const T& array,
+ bool addComma) {
+ if (array.count()) {
+ if (addComma) {
+ json->appendf(",");
+ }
+ json->appendf("\"%s\": [", name);
+ const char* separator = "";
+ for (int i = 0; i < array.count(); i++) {
+ // Handle sentinel nullptrs
+ if (array[i]) {
+ json->appendf("%s", separator);
+ json->append(array[i]->toJson());
+ separator = ",";
+ }
+ }
+ json->append("]");
+ }
+}
+
+// This will pretty print a very small subset of json
+// The parsing rules are straightforward, aside from the fact that we do not want an extra newline
+// before ',' and after '}', so we have a comma exception rule.
+class PrettyPrintJson {
+public:
+ SkString prettify(const SkString& json) {
+ fPrettyJson.reset();
+ fTabCount = 0;
+ fFreshLine = false;
+ fCommaException = false;
+ for (size_t i = 0; i < json.size(); i++) {
+ if ('[' == json[i] || '{' == json[i]) {
+ this->newline();
+ this->appendChar(json[i]);
+ fTabCount++;
+ this->newline();
+ } else if (']' == json[i] || '}' == json[i]) {
+ fTabCount--;
+ this->newline();
+ this->appendChar(json[i]);
+ fCommaException = true;
+ } else if (',' == json[i]) {
+ this->appendChar(json[i]);
+ this->newline();
+ } else {
+ this->appendChar(json[i]);
+ }
+ }
+ return fPrettyJson;
+ }
+private:
+ void appendChar(char appendee) {
+ if (fCommaException && ',' != appendee) {
+ this->newline();
+ }
+ this->tab();
+ fPrettyJson += appendee;
+ fFreshLine = false;
+ fCommaException = false;
+ }
+
+ void tab() {
+ if (fFreshLine) {
+ for (int i = 0; i < fTabCount; i++) {
+ fPrettyJson += '\t';
+ }
+ }
+ }
+
+ void newline() {
+ if (!fFreshLine) {
+ fFreshLine = true;
+ fPrettyJson += '\n';
+ }
+ }
+
+ SkString fPrettyJson;
+ int fTabCount;
+ bool fFreshLine;
+ bool fCommaException;
+};
+
+static SkString pretty_print_json(SkString json) {
+ class PrettyPrintJson prettyPrintJson;
+ return prettyPrintJson.prettify(json);
+}
+
+SkString GrAuditTrail::toJson(bool prettyPrint) const {
+ SkString json;
+ json.append("{");
+ JsonifyTArray(&json, "Batches", fBatchList, false);
+ json.append("}");
+
+ if (prettyPrint) {
+ return pretty_print_json(json);
+ } else {
+ return json;
+ }
+}
+
+SkString GrAuditTrail::toJson(int clientID, bool prettyPrint) const {
+ SkString json;
+ json.append("{");
+ Batches** batches = fClientIDLookup.find(clientID);
+ if (batches) {
+ JsonifyTArray(&json, "Batches", **batches, false);
+ }
+ json.appendf("}");
+
+ if (prettyPrint) {
+ return pretty_print_json(json);
+ } else {
+ return json;
+ }
+}
+
+static void skrect_to_json(SkString* json, const char* name, const SkRect& rect) {
+ json->appendf("\"%s\": {", name);
+ json->appendf("\"Left\": %f,", rect.fLeft);
+ json->appendf("\"Right\": %f,", rect.fRight);
+ json->appendf("\"Top\": %f,", rect.fTop);
+ json->appendf("\"Bottom\": %f", rect.fBottom);
+ json->append("}");
+}
+
+SkString GrAuditTrail::Batch::toJson() const {
+ SkString json;
+ json.append("{");
+ json.appendf("\"Name\": \"%s\",", fName.c_str());
+ json.appendf("\"ClientID\": \"%d\",", fClientID);
+ json.appendf("\"BatchListID\": \"%d\",", fBatchListID);
+ json.appendf("\"ChildID\": \"%d\",", fChildID);
+ skrect_to_json(&json, "Bounds", fBounds);
+ if (fStackTrace.count()) {
+ json.append(",\"Stack\": [");
+ for (int i = 0; i < fStackTrace.count(); i++) {
+ json.appendf("\"%s\"", fStackTrace[i].c_str());
+ if (i < fStackTrace.count() - 1) {
+ json.append(",");
+ }
+ }
+ json.append("]");
+ }
+ json.append("}");
+ return json;
+}
+
+SkString GrAuditTrail::BatchNode::toJson() const {
+ SkString json;
+ json.append("{");
+ json.appendf("\"RenderTarget\": \"%u\",", fRenderTargetUniqueID);
+ skrect_to_json(&json, "Bounds", fBounds);
+ JsonifyTArray(&json, "Batches", fChildren, true);
+ json.append("}");
+ return json;
+}
diff --git a/gfx/skia/skia/src/gpu/GrAutoLocaleSetter.h b/gfx/skia/skia/src/gpu/GrAutoLocaleSetter.h
new file mode 100644
index 000000000..564abd917
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrAutoLocaleSetter.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAutoLocaleSetter_DEFINED
+#define GrAutoLocaleSetter_DEFINED
+
+#include "GrTypes.h"
+
+#if defined(SK_BUILD_FOR_WIN)
+#include "SkString.h"
+#endif
+
+#if !defined(SK_BUILD_FOR_ANDROID)
+#include <locale.h>
+#endif
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+#include <xlocale.h>
+#endif
+
+#if defined(SK_BUILD_FOR_ANDROID) || defined(__UCLIBC__) || defined(_NEWLIB_VERSION)
+#define HAVE_LOCALE_T 0
+#else
+#define HAVE_LOCALE_T 1
+#endif
+
+/**
+ * Helper class for ensuring that we don't use the wrong locale when building shaders. Android
+ * doesn't support locale in the NDK, so this is a no-op there.
+ */
+class GrAutoLocaleSetter : public SkNoncopyable {
+public:
+ GrAutoLocaleSetter (const char* name) {
+#if defined(SK_BUILD_FOR_WIN)
+ fOldPerThreadLocale = _configthreadlocale(_ENABLE_PER_THREAD_LOCALE);
+ char* oldLocale = setlocale(LC_ALL, name);
+ if (oldLocale) {
+ fOldLocale = oldLocale;
+ fShouldRestoreLocale = true;
+ } else {
+ fShouldRestoreLocale = false;
+ }
+#elif HAVE_LOCALE_T
+ fLocale = newlocale(LC_ALL, name, 0);
+ if (fLocale) {
+ fOldLocale = uselocale(fLocale);
+ } else {
+ fOldLocale = static_cast<locale_t>(0);
+ }
+#else
+ (void) name; // suppress unused param warning.
+#endif
+ }
+
+ ~GrAutoLocaleSetter () {
+#if defined(SK_BUILD_FOR_WIN)
+ if (fShouldRestoreLocale) {
+ setlocale(LC_ALL, fOldLocale.c_str());
+ }
+ _configthreadlocale(fOldPerThreadLocale);
+#elif HAVE_LOCALE_T
+ if (fLocale) {
+ uselocale(fOldLocale);
+ freelocale(fLocale);
+ }
+#endif
+ }
+
+private:
+#if defined(SK_BUILD_FOR_WIN)
+ int fOldPerThreadLocale;
+ bool fShouldRestoreLocale;
+ SkString fOldLocale;
+#elif HAVE_LOCALE_T
+ locale_t fOldLocale;
+ locale_t fLocale;
+#endif
+};
+
+#undef HAVE_LOCALE_T
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrBatchAtlas.cpp b/gfx/skia/skia/src/gpu/GrBatchAtlas.cpp
new file mode 100644
index 000000000..e0828a4e6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBatchAtlas.cpp
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrBatchAtlas.h"
+#include "GrBatchFlushState.h"
+#include "GrRectanizer.h"
+#include "GrTracing.h"
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrBatchAtlas::BatchPlot::BatchPlot(int index, uint64_t genID, int offX, int offY, int width,
+ int height, GrPixelConfig config)
+ : fLastUpload(GrBatchDrawToken::AlreadyFlushedToken())
+ , fLastUse(GrBatchDrawToken::AlreadyFlushedToken())
+ , fIndex(index)
+ , fGenID(genID)
+ , fID(CreateId(fIndex, fGenID))
+ , fData(nullptr)
+ , fWidth(width)
+ , fHeight(height)
+ , fX(offX)
+ , fY(offY)
+ , fRects(nullptr)
+ , fOffset(SkIPoint16::Make(fX * fWidth, fY * fHeight))
+ , fConfig(config)
+ , fBytesPerPixel(GrBytesPerPixel(config))
+#ifdef SK_DEBUG
+ , fDirty(false)
+#endif
+{
+ fDirtyRect.setEmpty();
+}
+
+GrBatchAtlas::BatchPlot::~BatchPlot() {
+ sk_free(fData);
+ delete fRects;
+}
+
+bool GrBatchAtlas::BatchPlot::addSubImage(int width, int height, const void* image,
+ SkIPoint16* loc) {
+ SkASSERT(width <= fWidth && height <= fHeight);
+
+ if (!fRects) {
+ fRects = GrRectanizer::Factory(fWidth, fHeight);
+ }
+
+ if (!fRects->addRect(width, height, loc)) {
+ return false;
+ }
+
+ if (!fData) {
+ fData = reinterpret_cast<unsigned char*>(sk_calloc_throw(fBytesPerPixel * fWidth *
+ fHeight));
+ }
+ size_t rowBytes = width * fBytesPerPixel;
+ const unsigned char* imagePtr = (const unsigned char*)image;
+ // point ourselves at the right starting spot
+ unsigned char* dataPtr = fData;
+ dataPtr += fBytesPerPixel * fWidth * loc->fY;
+ dataPtr += fBytesPerPixel * loc->fX;
+ // copy into the data buffer
+ for (int i = 0; i < height; ++i) {
+ memcpy(dataPtr, imagePtr, rowBytes);
+ dataPtr += fBytesPerPixel * fWidth;
+ imagePtr += rowBytes;
+ }
+
+ fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height);
+
+ loc->fX += fOffset.fX;
+ loc->fY += fOffset.fY;
+ SkDEBUGCODE(fDirty = true;)
+
+ return true;
+}
+
+void GrBatchAtlas::BatchPlot::uploadToTexture(GrDrawBatch::WritePixelsFn& writePixels,
+ GrTexture* texture) {
+ // We should only be issuing uploads if we are in fact dirty
+ SkASSERT(fDirty && fData && texture);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrBatchPlot::uploadToTexture");
+ size_t rowBytes = fBytesPerPixel * fWidth;
+ const unsigned char* dataPtr = fData;
+ dataPtr += rowBytes * fDirtyRect.fTop;
+ dataPtr += fBytesPerPixel * fDirtyRect.fLeft;
+ writePixels(texture, fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop,
+ fDirtyRect.width(), fDirtyRect.height(), fConfig, dataPtr, rowBytes);
+ fDirtyRect.setEmpty();
+ SkDEBUGCODE(fDirty = false;)
+}
+
+void GrBatchAtlas::BatchPlot::resetRects() {
+ if (fRects) {
+ fRects->reset();
+ }
+
+ fGenID++;
+ fID = CreateId(fIndex, fGenID);
+
+ // zero out the plot
+ if (fData) {
+ sk_bzero(fData, fBytesPerPixel * fWidth * fHeight);
+ }
+
+ fDirtyRect.setEmpty();
+ SkDEBUGCODE(fDirty = false;)
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrBatchAtlas::GrBatchAtlas(GrTexture* texture, int numPlotsX, int numPlotsY)
+ : fTexture(texture)
+ , fAtlasGeneration(kInvalidAtlasGeneration + 1) {
+
+ fPlotWidth = texture->width() / numPlotsX;
+ fPlotHeight = texture->height() / numPlotsY;
+ SkASSERT(numPlotsX * numPlotsY <= BulkUseTokenUpdater::kMaxPlots);
+ SkASSERT(fPlotWidth * numPlotsX == texture->width());
+ SkASSERT(fPlotHeight * numPlotsY == texture->height());
+
+ SkDEBUGCODE(fNumPlots = numPlotsX * numPlotsY;)
+
+ // We currently do not support compressed atlases...
+ SkASSERT(!GrPixelConfigIsCompressed(texture->desc().fConfig));
+
+ // set up allocated plots
+ fPlotArray = new SkAutoTUnref<BatchPlot>[numPlotsX * numPlotsY];
+
+ SkAutoTUnref<BatchPlot>* currPlot = fPlotArray;
+ for (int y = numPlotsY - 1, r = 0; y >= 0; --y, ++r) {
+ for (int x = numPlotsX - 1, c = 0; x >= 0; --x, ++c) {
+ uint32_t index = r * numPlotsX + c;
+ currPlot->reset(new BatchPlot(index, 1, x, y, fPlotWidth, fPlotHeight,
+ texture->desc().fConfig));
+
+ // build LRU list
+ fPlotList.addToHead(currPlot->get());
+ ++currPlot;
+ }
+ }
+}
+
+GrBatchAtlas::~GrBatchAtlas() {
+ SkSafeUnref(fTexture);
+ delete[] fPlotArray;
+}
+
+void GrBatchAtlas::processEviction(AtlasID id) {
+ for (int i = 0; i < fEvictionCallbacks.count(); i++) {
+ (*fEvictionCallbacks[i].fFunc)(id, fEvictionCallbacks[i].fData);
+ }
+}
+
+inline void GrBatchAtlas::updatePlot(GrDrawBatch::Target* target, AtlasID* id, BatchPlot* plot) {
+ this->makeMRU(plot);
+
+ // If our most recent upload has already occurred then we have to insert a new
+ // upload. Otherwise, we already have a scheduled upload that hasn't yet ocurred.
+ // This new update will piggy back on that previously scheduled update.
+ if (target->hasDrawBeenFlushed(plot->lastUploadToken())) {
+ // With c+14 we could move sk_sp into lamba to only ref once.
+ sk_sp<BatchPlot> plotsp(SkRef(plot));
+ GrTexture* texture = fTexture;
+ GrBatchDrawToken lastUploadToken = target->addAsapUpload(
+ [plotsp, texture] (GrDrawBatch::WritePixelsFn& writePixels) {
+ plotsp->uploadToTexture(writePixels, texture);
+ }
+ );
+ plot->setLastUploadToken(lastUploadToken);
+ }
+ *id = plot->id();
+}
+
+bool GrBatchAtlas::addToAtlas(AtlasID* id, GrDrawBatch::Target* target,
+ int width, int height, const void* image, SkIPoint16* loc) {
+ // We should already have a texture, TODO clean this up
+ SkASSERT(fTexture);
+ if (width > fPlotWidth || height > fPlotHeight) {
+ return false;
+ }
+
+ // now look through all allocated plots for one we can share, in Most Recently Refed order
+ GrBatchPlotList::Iter plotIter;
+ plotIter.init(fPlotList, GrBatchPlotList::Iter::kHead_IterStart);
+ BatchPlot* plot;
+ while ((plot = plotIter.get())) {
+ SkASSERT(GrBytesPerPixel(fTexture->desc().fConfig) == plot->bpp());
+ if (plot->addSubImage(width, height, image, loc)) {
+ this->updatePlot(target, id, plot);
+ return true;
+ }
+ plotIter.next();
+ }
+
+ // If the above fails, then see if the least recently refed plot has already been flushed to the
+ // gpu
+ plot = fPlotList.tail();
+ SkASSERT(plot);
+ if (target->hasDrawBeenFlushed(plot->lastUseToken())) {
+ this->processEviction(plot->id());
+ plot->resetRects();
+ SkASSERT(GrBytesPerPixel(fTexture->desc().fConfig) == plot->bpp());
+ SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, loc);
+ SkASSERT(verify);
+ this->updatePlot(target, id, plot);
+ fAtlasGeneration++;
+ return true;
+ }
+
+ // If this plot has been used in a draw that is currently being prepared by a batch, then we
+ // have to fail. This gives the batch a chance to enqueue the draw, and call back into this
+ // function. When that draw is enqueued, the draw token advances, and the subsequent call will
+ // continue past this branch and prepare an inline upload that will occur after the enqueued
+ // draw which references the plot's pre-upload content.
+ if (plot->lastUseToken() == target->nextDrawToken()) {
+ return false;
+ }
+
+ this->processEviction(plot->id());
+ fPlotList.remove(plot);
+ SkAutoTUnref<BatchPlot>& newPlot = fPlotArray[plot->index()];
+ newPlot.reset(plot->clone());
+
+ fPlotList.addToHead(newPlot.get());
+ SkASSERT(GrBytesPerPixel(fTexture->desc().fConfig) == newPlot->bpp());
+ SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, loc);
+ SkASSERT(verify);
+
+ // Note that this plot will be uploaded inline with the draws whereas the
+ // one it displaced most likely was uploaded asap.
+ // With c+14 we could move sk_sp into lamba to only ref once.
+ sk_sp<BatchPlot> plotsp(SkRef(newPlot.get()));
+ GrTexture* texture = fTexture;
+ GrBatchDrawToken lastUploadToken = target->addInlineUpload(
+ [plotsp, texture] (GrDrawBatch::WritePixelsFn& writePixels) {
+ plotsp->uploadToTexture(writePixels, texture);
+ }
+ );
+ newPlot->setLastUploadToken(lastUploadToken);
+
+ *id = newPlot->id();
+
+ fAtlasGeneration++;
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/GrBatchAtlas.h b/gfx/skia/skia/src/gpu/GrBatchAtlas.h
new file mode 100644
index 000000000..827106fdf
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBatchAtlas.h
@@ -0,0 +1,262 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBatchAtlas_DEFINED
+#define GrBatchAtlas_DEFINED
+
+#include "GrTexture.h"
+#include "SkPoint.h"
+#include "SkTDArray.h"
+#include "SkTInternalLList.h"
+
+#include "batches/GrDrawBatch.h"
+
+class GrRectanizer;
+
+struct GrBatchAtlasConfig {
+ int numPlotsX() const { return fWidth / fPlotWidth; }
+ int numPlotsY() const { return fHeight / fPlotWidth; }
+ int fWidth;
+ int fHeight;
+ int fLog2Width;
+ int fLog2Height;
+ int fPlotWidth;
+ int fPlotHeight;
+};
+
+class GrBatchAtlas {
+public:
+ // An AtlasID is an opaque handle which callers can use to determine if the atlas contains
+ // a specific piece of data
+ typedef uint64_t AtlasID;
+ static const uint32_t kInvalidAtlasID = 0;
+ static const uint64_t kInvalidAtlasGeneration = 0;
+
+ // A function pointer for use as a callback during eviction. Whenever GrBatchAtlas evicts a
+ // specific AtlasID, it will call all of the registered listeners so they can optionally process
+ // the eviction
+ typedef void (*EvictionFunc)(GrBatchAtlas::AtlasID, void*);
+
+ GrBatchAtlas(GrTexture*, int numPlotsX, int numPlotsY);
+ ~GrBatchAtlas();
+
+ // Adds a width x height subimage to the atlas. Upon success it returns
+ // the containing GrPlot and absolute location in the backing texture.
+ // nullptr is returned if the subimage cannot fit in the atlas.
+ // If provided, the image data will be written to the CPU-side backing bitmap.
+ // NOTE: If the client intends to refer to the atlas, they should immediately call 'setUseToken'
+ // with the currentToken from the batch target, otherwise the next call to addToAtlas might
+ // cause an eviction
+ bool addToAtlas(AtlasID*, GrDrawBatch::Target*, int width, int height, const void* image,
+ SkIPoint16* loc);
+
+ GrTexture* getTexture() const { return fTexture; }
+
+ uint64_t atlasGeneration() const { return fAtlasGeneration; }
+
+ inline bool hasID(AtlasID id) {
+ uint32_t index = GetIndexFromID(id);
+ SkASSERT(index < fNumPlots);
+ return fPlotArray[index]->genID() == GetGenerationFromID(id);
+ }
+
+ // To ensure the atlas does not evict a given entry, the client must set the last use token
+ inline void setLastUseToken(AtlasID id, GrBatchDrawToken batchToken) {
+ SkASSERT(this->hasID(id));
+ uint32_t index = GetIndexFromID(id);
+ SkASSERT(index < fNumPlots);
+ this->makeMRU(fPlotArray[index]);
+ fPlotArray[index]->setLastUseToken(batchToken);
+ }
+
+ inline void registerEvictionCallback(EvictionFunc func, void* userData) {
+ EvictionData* data = fEvictionCallbacks.append();
+ data->fFunc = func;
+ data->fData = userData;
+ }
+
+ /*
+ * A class which can be handed back to GrBatchAtlas for updating in bulk last use tokens. The
+ * current max number of plots the GrBatchAtlas can handle is 32, if in the future this is
+ * insufficient then we can move to a 64 bit int
+ */
+ class BulkUseTokenUpdater {
+ public:
+ BulkUseTokenUpdater() : fPlotAlreadyUpdated(0) {}
+ BulkUseTokenUpdater(const BulkUseTokenUpdater& that)
+ : fPlotsToUpdate(that.fPlotsToUpdate)
+ , fPlotAlreadyUpdated(that.fPlotAlreadyUpdated) {
+ }
+
+ void add(AtlasID id) {
+ int index = GrBatchAtlas::GetIndexFromID(id);
+ if (!this->find(index)) {
+ this->set(index);
+ }
+ }
+
+ void reset() {
+ fPlotsToUpdate.reset();
+ fPlotAlreadyUpdated = 0;
+ }
+
+ private:
+ bool find(int index) const {
+ SkASSERT(index < kMaxPlots);
+ return (fPlotAlreadyUpdated >> index) & 1;
+ }
+
+ void set(int index) {
+ SkASSERT(!this->find(index));
+ fPlotAlreadyUpdated = fPlotAlreadyUpdated | (1 << index);
+ fPlotsToUpdate.push_back(index);
+ }
+
+ static const int kMinItems = 4;
+ static const int kMaxPlots = 32;
+ SkSTArray<kMinItems, int, true> fPlotsToUpdate;
+ uint32_t fPlotAlreadyUpdated;
+
+ friend class GrBatchAtlas;
+ };
+
+ void setLastUseTokenBulk(const BulkUseTokenUpdater& updater, GrBatchDrawToken batchToken) {
+ int count = updater.fPlotsToUpdate.count();
+ for (int i = 0; i < count; i++) {
+ BatchPlot* plot = fPlotArray[updater.fPlotsToUpdate[i]];
+ this->makeMRU(plot);
+ plot->setLastUseToken(batchToken);
+ }
+ }
+
+ static const int kGlyphMaxDim = 256;
+ static bool GlyphTooLargeForAtlas(int width, int height) {
+ return width > kGlyphMaxDim || height > kGlyphMaxDim;
+ }
+
+private:
+ // The backing GrTexture for a GrBatchAtlas is broken into a spatial grid of BatchPlots.
+ // The BatchPlots keep track of subimage placement via their GrRectanizer. A BatchPlot
+ // manages the lifetime of its data using two tokens, a last use token and a last upload token.
+ // Once a BatchPlot is "full" (i.e. there is no room for the new subimage according to the
+ // GrRectanizer), it can no longer be used unless the last use of the GrPlot has already been
+ // flushed through to the gpu.
+ class BatchPlot : public SkRefCnt {
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(BatchPlot);
+
+ public:
+ // index() is a unique id for the plot relative to the owning GrAtlas. genID() is a
+ // monotonically incremented number which is bumped every time this plot is
+ // evicted from the cache (i.e., there is continuity in genID() across atlas spills).
+ uint32_t index() const { return fIndex; }
+ uint64_t genID() const { return fGenID; }
+ GrBatchAtlas::AtlasID id() const {
+ SkASSERT(GrBatchAtlas::kInvalidAtlasID != fID);
+ return fID;
+ }
+ SkDEBUGCODE(size_t bpp() const { return fBytesPerPixel; })
+
+ bool addSubImage(int width, int height, const void* image, SkIPoint16* loc);
+
+ // To manage the lifetime of a plot, we use two tokens. We use the last upload token to
+ // know when we can 'piggy back' uploads, ie if the last upload hasn't been flushed to gpu,
+ // we don't need to issue a new upload even if we update the cpu backing store. We use
+ // lastUse to determine when we can evict a plot from the cache, ie if the last use has
+ // already flushed through the gpu then we can reuse the plot.
+ GrBatchDrawToken lastUploadToken() const { return fLastUpload; }
+ GrBatchDrawToken lastUseToken() const { return fLastUse; }
+ void setLastUploadToken(GrBatchDrawToken batchToken) { fLastUpload = batchToken; }
+ void setLastUseToken(GrBatchDrawToken batchToken) { fLastUse = batchToken; }
+
+ void uploadToTexture(GrDrawBatch::WritePixelsFn&, GrTexture* texture);
+ void resetRects();
+
+ private:
+ BatchPlot(int index, uint64_t genID, int offX, int offY, int width, int height,
+ GrPixelConfig config);
+
+ ~BatchPlot() override;
+
+ // Create a clone of this plot. The cloned plot will take the place of the
+ // current plot in the atlas.
+ BatchPlot* clone() const {
+ return new BatchPlot(fIndex, fGenID+1, fX, fY, fWidth, fHeight, fConfig);
+ }
+
+ static GrBatchAtlas::AtlasID CreateId(uint32_t index, uint64_t generation) {
+ SkASSERT(index < (1 << 16));
+ SkASSERT(generation < ((uint64_t)1 << 48));
+ return generation << 16 | index;
+ }
+
+ GrBatchDrawToken fLastUpload;
+ GrBatchDrawToken fLastUse;
+
+ const uint32_t fIndex;
+ uint64_t fGenID;
+ GrBatchAtlas::AtlasID fID;
+ unsigned char* fData;
+ const int fWidth;
+ const int fHeight;
+ const int fX;
+ const int fY;
+ GrRectanizer* fRects;
+ const SkIPoint16 fOffset; // the offset of the plot in the backing texture
+ const GrPixelConfig fConfig;
+ const size_t fBytesPerPixel;
+ SkIRect fDirtyRect;
+ SkDEBUGCODE(bool fDirty;)
+
+ friend class GrBatchAtlas;
+
+ typedef SkRefCnt INHERITED;
+ };
+
+ typedef SkTInternalLList<BatchPlot> GrBatchPlotList;
+
+ static uint32_t GetIndexFromID(AtlasID id) {
+ return id & 0xffff;
+ }
+
+ // top 48 bits are reserved for the generation ID
+ static uint64_t GetGenerationFromID(AtlasID id) {
+ return (id >> 16) & 0xffffffffffff;
+ }
+
+ inline void updatePlot(GrDrawBatch::Target*, AtlasID*, BatchPlot*);
+
+ inline void makeMRU(BatchPlot* plot) {
+ if (fPlotList.head() == plot) {
+ return;
+ }
+
+ fPlotList.remove(plot);
+ fPlotList.addToHead(plot);
+ }
+
+ inline void processEviction(AtlasID);
+
+ GrTexture* fTexture;
+ int fPlotWidth;
+ int fPlotHeight;
+ SkDEBUGCODE(uint32_t fNumPlots;)
+
+ uint64_t fAtlasGeneration;
+
+ struct EvictionData {
+ EvictionFunc fFunc;
+ void* fData;
+ };
+
+ SkTDArray<EvictionData> fEvictionCallbacks;
+ // allocated array of GrBatchPlots
+ SkAutoTUnref<BatchPlot>* fPlotArray;
+ // LRU list of GrPlots (MRU at head - LRU at tail)
+ GrBatchPlotList fPlotList;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrBatchFlushState.cpp b/gfx/skia/skia/src/gpu/GrBatchFlushState.cpp
new file mode 100644
index 000000000..4e51ae4dd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBatchFlushState.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrBatchFlushState.h"
+
+#include "GrBatchAtlas.h"
+#include "GrPipeline.h"
+
+GrBatchFlushState::GrBatchFlushState(GrGpu* gpu, GrResourceProvider* resourceProvider)
+ : fGpu(gpu)
+ , fResourceProvider(resourceProvider)
+ , fCommandBuffer(nullptr)
+ , fVertexPool(gpu)
+ , fIndexPool(gpu)
+ , fLastIssuedToken(GrBatchDrawToken::AlreadyFlushedToken())
+ , fLastFlushedToken(0) {}
+
+void* GrBatchFlushState::makeVertexSpace(size_t vertexSize, int vertexCount,
+ const GrBuffer** buffer, int* startVertex) {
+ return fVertexPool.makeSpace(vertexSize, vertexCount, buffer, startVertex);
+}
+
+uint16_t* GrBatchFlushState::makeIndexSpace(int indexCount,
+ const GrBuffer** buffer, int* startIndex) {
+ return reinterpret_cast<uint16_t*>(fIndexPool.makeSpace(indexCount, buffer, startIndex));
+}
diff --git a/gfx/skia/skia/src/gpu/GrBatchFlushState.h b/gfx/skia/skia/src/gpu/GrBatchFlushState.h
new file mode 100644
index 000000000..d2d9a4b48
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBatchFlushState.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBatchBuffer_DEFINED
+#define GrBatchBuffer_DEFINED
+
+#include "GrBufferAllocPool.h"
+#include "batches/GrVertexBatch.h"
+
+class GrGpuCommandBuffer;
+class GrResourceProvider;
+
+/** Tracks the state across all the GrBatches in a GrDrawTarget flush. */
+class GrBatchFlushState {
+public:
+ GrBatchFlushState(GrGpu*, GrResourceProvider*);
+
+ ~GrBatchFlushState() { this->reset(); }
+
+ /** Inserts an upload to be executed after all batches in the flush prepared their draws
+ but before the draws are executed to the backend 3D API. */
+ void addASAPUpload(GrDrawBatch::DeferredUploadFn&& upload) {
+ fAsapUploads.emplace_back(std::move(upload));
+ }
+
+ const GrCaps& caps() const { return *fGpu->caps(); }
+ GrResourceProvider* resourceProvider() const { return fResourceProvider; }
+
+ /** Has the token been flushed to the backend 3D API. */
+ bool hasDrawBeenFlushed(GrBatchDrawToken token) const {
+ return token.fSequenceNumber <= fLastFlushedToken.fSequenceNumber;
+ }
+
+ /** Issue a token to an operation that is being enqueued. */
+ GrBatchDrawToken issueDrawToken() {
+ return GrBatchDrawToken(++fLastIssuedToken.fSequenceNumber);
+ }
+
+ /** Call every time a draw that was issued a token is flushed */
+ void flushToken() { ++fLastFlushedToken.fSequenceNumber; }
+
+ /** Gets the next draw token that will be issued. */
+ GrBatchDrawToken nextDrawToken() const {
+ return GrBatchDrawToken(fLastIssuedToken.fSequenceNumber + 1);
+ }
+
+ /** The last token flushed to all the way to the backend API. */
+ GrBatchDrawToken nextTokenToFlush() const {
+ return GrBatchDrawToken(fLastFlushedToken.fSequenceNumber + 1);
+ }
+
+ void* makeVertexSpace(size_t vertexSize, int vertexCount,
+ const GrBuffer** buffer, int* startVertex);
+ uint16_t* makeIndexSpace(int indexCount, const GrBuffer** buffer, int* startIndex);
+
+ /** This is called after each batch has a chance to prepare its draws and before the draws
+ are issued. */
+ void preIssueDraws() {
+ fVertexPool.unmap();
+ fIndexPool.unmap();
+ int uploadCount = fAsapUploads.count();
+
+ for (int i = 0; i < uploadCount; i++) {
+ this->doUpload(fAsapUploads[i]);
+ }
+ fAsapUploads.reset();
+ }
+
+ void doUpload(GrDrawBatch::DeferredUploadFn& upload) {
+ GrDrawBatch::WritePixelsFn wp = [this] (GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config, const void* buffer,
+ size_t rowBytes) -> bool {
+ return this->fGpu->writePixels(surface, left, top, width, height, config, buffer,
+ rowBytes);
+ };
+ upload(wp);
+ }
+
+ void putBackIndices(size_t indices) { fIndexPool.putBack(indices * sizeof(uint16_t)); }
+
+ void putBackVertexSpace(size_t sizeInBytes) { fVertexPool.putBack(sizeInBytes); }
+
+ GrGpuCommandBuffer* commandBuffer() { return fCommandBuffer; }
+ void setCommandBuffer(GrGpuCommandBuffer* buffer) { fCommandBuffer = buffer; }
+
+ GrGpu* gpu() { return fGpu; }
+
+ void reset() {
+ fVertexPool.reset();
+ fIndexPool.reset();
+ }
+
+private:
+
+ GrGpu* fGpu;
+
+ GrResourceProvider* fResourceProvider;
+
+ GrGpuCommandBuffer* fCommandBuffer;
+
+ GrVertexBufferAllocPool fVertexPool;
+ GrIndexBufferAllocPool fIndexPool;
+
+ SkSTArray<4, GrDrawBatch::DeferredUploadFn> fAsapUploads;
+
+ GrBatchDrawToken fLastIssuedToken;
+
+ GrBatchDrawToken fLastFlushedToken;
+};
+
+/**
+ * A word about uploads and tokens: Batches should usually schedule their uploads to occur at the
+ * begining of a frame whenever possible. These are called ASAP uploads. Of course, this requires
+ * that there are no draws that have yet to be flushed that rely on the old texture contents. In
+ * that case the ASAP upload would happen prior to the previous draw causing the draw to read the
+ * new (wrong) texture data. In that case they should schedule an inline upload.
+ *
+ * Batches, in conjunction with helpers such as GrBatchAtlas, can use the token system to know
+ * what the most recent draw was that referenced a resource (or portion of a resource). Each draw
+ * is assigned a token. A resource (or portion) can be tagged with the most recent draw's
+ * token. The target provides a facility for testing whether the draw corresponding to the token
+ * has been flushed. If it has not been flushed then the batch must perform an inline upload
+ * instead. When scheduling an inline upload the batch provides the token of the draw that the
+ * upload must occur before. The upload will then occur between the draw that requires the new
+ * data but after the token that requires the old data.
+ *
+ * TODO: Currently the token/upload interface is spread over GrDrawBatch, GrVertexBatch,
+ * GrDrawBatch::Target, and GrVertexBatch::Target. However, the interface at the GrDrawBatch
+ * level is not complete and isn't useful. We should push it down to GrVertexBatch until it
+ * is required at the GrDrawBatch level.
+ */
+
+/**
+ * GrDrawBatch instances use this object to allocate space for their geometry and to issue the draws
+ * that render their batch.
+ */
+class GrDrawBatch::Target {
+public:
+ Target(GrBatchFlushState* state, GrDrawBatch* batch) : fState(state), fBatch(batch) {}
+
+ /** Returns the token of the draw that this upload will occur before. */
+ GrBatchDrawToken addInlineUpload(DeferredUploadFn&& upload) {
+ fBatch->fInlineUploads.emplace_back(std::move(upload), fState->nextDrawToken());
+ return fBatch->fInlineUploads.back().fUploadBeforeToken;
+ }
+
+ /** Returns the token of the draw that this upload will occur before. Since ASAP uploads
+ are done first during a flush, this will be the first token since the most recent
+ flush. */
+ GrBatchDrawToken addAsapUpload(DeferredUploadFn&& upload) {
+ fState->addASAPUpload(std::move(upload));
+ return fState->nextTokenToFlush();
+ }
+
+ bool hasDrawBeenFlushed(GrBatchDrawToken token) const {
+ return fState->hasDrawBeenFlushed(token);
+ }
+
+ /** Gets the next draw token that will be issued by this target. This can be used by a batch
+ to record that the next draw it issues will use a resource (e.g. texture) while preparing
+ that draw. */
+ GrBatchDrawToken nextDrawToken() const { return fState->nextDrawToken(); }
+
+ const GrCaps& caps() const { return fState->caps(); }
+
+ GrResourceProvider* resourceProvider() const { return fState->resourceProvider(); }
+
+protected:
+ GrDrawBatch* batch() { return fBatch; }
+ GrBatchFlushState* state() { return fState; }
+
+private:
+ GrBatchFlushState* fState;
+ GrDrawBatch* fBatch;
+};
+
+/** Extension of GrDrawBatch::Target for use by GrVertexBatch. Adds the ability to create vertex
+ draws. */
+class GrVertexBatch::Target : public GrDrawBatch::Target {
+public:
+ Target(GrBatchFlushState* state, GrVertexBatch* batch) : INHERITED(state, batch) {}
+
+ void draw(const GrGeometryProcessor* gp, const GrMesh& mesh);
+
+ void* makeVertexSpace(size_t vertexSize, int vertexCount,
+ const GrBuffer** buffer, int* startVertex) {
+ return this->state()->makeVertexSpace(vertexSize, vertexCount, buffer, startVertex);
+ }
+
+ uint16_t* makeIndexSpace(int indexCount, const GrBuffer** buffer, int* startIndex) {
+ return this->state()->makeIndexSpace(indexCount, buffer, startIndex);
+ }
+
+ /** Helpers for batches which over-allocate and then return data to the pool. */
+ void putBackIndices(int indices) { this->state()->putBackIndices(indices); }
+ void putBackVertices(int vertices, size_t vertexStride) {
+ this->state()->putBackVertexSpace(vertices * vertexStride);
+ }
+
+private:
+ GrVertexBatch* vertexBatch() { return static_cast<GrVertexBatch*>(this->batch()); }
+ typedef GrDrawBatch::Target INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrBatchTest.cpp b/gfx/skia/skia/src/gpu/GrBatchTest.cpp
new file mode 100644
index 000000000..fe320a268
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBatchTest.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrBatchTest.h"
+#include "SkRandom.h"
+#include "SkTypes.h"
+
+#ifdef GR_TEST_UTILS
+
+DRAW_BATCH_TEST_EXTERN(AAConvexPathBatch);
+DRAW_BATCH_TEST_EXTERN(AADistanceFieldPathBatch);
+DRAW_BATCH_TEST_EXTERN(AAFillRectBatch);
+DRAW_BATCH_TEST_EXTERN(AAFillRectBatchLocalMatrix);
+DRAW_BATCH_TEST_EXTERN(AAHairlineBatch);
+DRAW_BATCH_TEST_EXTERN(AAStrokeRectBatch);
+DRAW_BATCH_TEST_EXTERN(AnalyticRectBatch);
+DRAW_BATCH_TEST_EXTERN(DashBatch);
+DRAW_BATCH_TEST_EXTERN(DefaultPathBatch);
+DRAW_BATCH_TEST_EXTERN(CircleBatch);
+DRAW_BATCH_TEST_EXTERN(DIEllipseBatch);
+DRAW_BATCH_TEST_EXTERN(EllipseBatch);
+DRAW_BATCH_TEST_EXTERN(GrDrawAtlasBatch);
+DRAW_BATCH_TEST_EXTERN(NonAAStrokeRectBatch);
+DRAW_BATCH_TEST_EXTERN(RRectBatch);
+DRAW_BATCH_TEST_EXTERN(TesselatingPathBatch);
+DRAW_BATCH_TEST_EXTERN(TextBlobBatch);
+DRAW_BATCH_TEST_EXTERN(VerticesBatch);
+
+static BatchTestFunc gTestBatches[] = {
+ DRAW_BATCH_TEST_ENTRY(AAConvexPathBatch),
+ DRAW_BATCH_TEST_ENTRY(AADistanceFieldPathBatch),
+ DRAW_BATCH_TEST_ENTRY(AAFillRectBatch),
+ DRAW_BATCH_TEST_ENTRY(AAFillRectBatchLocalMatrix),
+ DRAW_BATCH_TEST_ENTRY(AAHairlineBatch),
+ DRAW_BATCH_TEST_ENTRY(AAStrokeRectBatch),
+ DRAW_BATCH_TEST_ENTRY(AnalyticRectBatch),
+ DRAW_BATCH_TEST_ENTRY(DashBatch),
+ DRAW_BATCH_TEST_ENTRY(DefaultPathBatch),
+ DRAW_BATCH_TEST_ENTRY(CircleBatch),
+ DRAW_BATCH_TEST_ENTRY(DIEllipseBatch),
+ DRAW_BATCH_TEST_ENTRY(EllipseBatch),
+ DRAW_BATCH_TEST_ENTRY(GrDrawAtlasBatch),
+ DRAW_BATCH_TEST_ENTRY(NonAAStrokeRectBatch),
+ DRAW_BATCH_TEST_ENTRY(RRectBatch),
+ DRAW_BATCH_TEST_ENTRY(TesselatingPathBatch),
+ DRAW_BATCH_TEST_ENTRY(TextBlobBatch),
+ DRAW_BATCH_TEST_ENTRY(VerticesBatch)
+};
+
+GrDrawBatch* GrRandomDrawBatch(SkRandom* random, GrContext* context) {
+ uint32_t index = random->nextULessThan(static_cast<uint32_t>(SK_ARRAY_COUNT(gTestBatches)));
+ BatchTestFunc func = gTestBatches[index];
+ return (*func)(random, context);
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrBatchTest.h b/gfx/skia/skia/src/gpu/GrBatchTest.h
new file mode 100644
index 000000000..32e8e2893
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBatchTest.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBatchTest_DEFINED
+#define GrBatchTest_DEFINED
+
+#include "GrTestUtils.h"
+
+#ifdef GR_TEST_UTILS
+
+class GrDrawBatch;
+class GrContext;
+class SkRandom;
+
+/*
+ * This file defines some macros for testing batches, and also declares functions / objects which
+ * are generally useful for GrBatch testing
+ */
+
+// Batches should define test functions using DRAW_BATCH_TEST_DEFINE. The other macros defined
+// below are used exclusively by the test harness.
+typedef GrDrawBatch* (*BatchTestFunc)(SkRandom* random, GrContext* context);
+#define DRAW_BATCH_TEST_DEFINE(Batch) \
+ GrDrawBatch* Batch##__Test(SkRandom* random, GrContext* context)
+#define DRAW_BATCH_TEST_EXTERN(Batch) \
+ extern GrDrawBatch* Batch##__Test(SkRandom*, GrContext* context);
+#define DRAW_BATCH_TEST_ENTRY(Batch) \
+ Batch##__Test
+#define DRAW_BATCH_TEST_FRIEND(Batch) \
+ friend GrDrawBatch* Batch##__Test(SkRandom* random, GrContext* context);
+
+GrDrawBatch* GrRandomDrawBatch(SkRandom*, GrContext*);
+
+#endif
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrBlend.cpp b/gfx/skia/skia/src/gpu/GrBlend.cpp
new file mode 100644
index 000000000..72a89d838
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBlend.cpp
@@ -0,0 +1,124 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrBlend.h"
+
+/**
+ * MaskedColor is used to evaluate the color and valid color component flags through the
+ * blending equation. Could possibly extend this to be used more broadly.
+ */
+class MaskedColor {
+public:
+ MaskedColor(GrColor color, GrColorComponentFlags flags)
+ : fColor(color)
+ , fFlags(flags) {}
+
+ MaskedColor() {}
+
+ void set(GrColor color, GrColorComponentFlags flags) {
+ fColor = color;
+ fFlags = flags;
+ }
+
+ static MaskedColor Invert(const MaskedColor& in) {
+ return MaskedColor(GrInvertColor(in.fColor), in.fFlags);
+ }
+
+ static MaskedColor ExtractAlpha(const MaskedColor& in) {
+ GrColorComponentFlags flags = (in.fFlags & kA_GrColorComponentFlag) ?
+ kRGBA_GrColorComponentFlags : kNone_GrColorComponentFlags;
+ return MaskedColor(GrColorPackA4(GrColorUnpackA(in.fColor)), flags);
+ }
+
+ static MaskedColor ExtractInverseAlpha(const MaskedColor& in) {
+ GrColorComponentFlags flags = (in.fFlags & kA_GrColorComponentFlag) ?
+ kRGBA_GrColorComponentFlags : kNone_GrColorComponentFlags;
+ return MaskedColor(GrColorPackA4(0xFF - GrColorUnpackA(in.fColor)), flags);
+ }
+
+ static MaskedColor Mul(const MaskedColor& a, const MaskedColor& b) {
+ GrColorComponentFlags outFlags = (a.fFlags & b.fFlags) | a.componentsWithValue(0) |
+ b.componentsWithValue(0);
+ return MaskedColor(GrColorMul(a.fColor, b.fColor), outFlags);
+ }
+
+ static MaskedColor SatAdd(const MaskedColor& a, const MaskedColor& b) {
+ GrColorComponentFlags outFlags = (a.fFlags & b.fFlags) | a.componentsWithValue(0xFF) |
+ b.componentsWithValue(0xFF);
+ return MaskedColor(GrColorSatAdd(a.fColor, b.fColor), outFlags);
+ }
+
+ GrColor color() const { return fColor; }
+
+ GrColorComponentFlags validFlags () const { return fFlags; }
+
+private:
+ GrColorComponentFlags componentsWithValue(unsigned value) const {
+ GrColorComponentFlags flags = kNone_GrColorComponentFlags;
+ if ((kR_GrColorComponentFlag & fFlags) && value == GrColorUnpackR(fColor)) {
+ flags |= kR_GrColorComponentFlag;
+ }
+ if ((kG_GrColorComponentFlag & fFlags) && value == GrColorUnpackG(fColor)) {
+ flags |= kG_GrColorComponentFlag;
+ }
+ if ((kB_GrColorComponentFlag & fFlags) && value == GrColorUnpackB(fColor)) {
+ flags |= kB_GrColorComponentFlag;
+ }
+ if ((kA_GrColorComponentFlag & fFlags) && value == GrColorUnpackA(fColor)) {
+ flags |= kA_GrColorComponentFlag;
+ }
+ return flags;
+ }
+
+ GrColor fColor;
+ GrColorComponentFlags fFlags;
+};
+
+static MaskedColor get_term(GrBlendCoeff coeff, const MaskedColor& src, const MaskedColor& dst,
+ const MaskedColor& value) {
+ switch (coeff) {
+ case kZero_GrBlendCoeff:
+ return MaskedColor(0, kRGBA_GrColorComponentFlags);
+ case kOne_GrBlendCoeff:
+ return value;
+ case kDC_GrBlendCoeff:
+ return MaskedColor::Mul(dst, value);
+ case kIDC_GrBlendCoeff:
+ return MaskedColor::Mul(MaskedColor::Invert(dst), value);
+ case kDA_GrBlendCoeff:
+ return MaskedColor::Mul(MaskedColor::ExtractAlpha(dst), value);
+ case kIDA_GrBlendCoeff:
+ return MaskedColor::Mul(MaskedColor::ExtractInverseAlpha(dst), value);
+ case kSC_GrBlendCoeff:
+ return MaskedColor::Mul(src, value);
+ case kISC_GrBlendCoeff:
+ return MaskedColor::Mul(MaskedColor::Invert(src), value);
+ case kSA_GrBlendCoeff:
+ return MaskedColor::Mul(MaskedColor::ExtractAlpha(src), value);
+ case kISA_GrBlendCoeff:
+ return MaskedColor::Mul(MaskedColor::ExtractInverseAlpha(src), value);
+ default:
+ SkFAIL("Illegal coefficient");
+ return MaskedColor();
+ }
+}
+
+void GrGetCoeffBlendKnownComponents(GrBlendCoeff srcCoeff, GrBlendCoeff dstCoeff,
+ GrColor srcColor, GrColorComponentFlags srcColorFlags,
+ GrColor dstColor, GrColorComponentFlags dstColorFlags,
+ GrColor* outColor,
+ GrColorComponentFlags* outFlags) {
+ MaskedColor src(srcColor, srcColorFlags);
+ MaskedColor dst(dstColor, dstColorFlags);
+
+ MaskedColor srcTerm = get_term(srcCoeff, src, dst, src);
+ MaskedColor dstTerm = get_term(dstCoeff, src, dst, dst);
+
+ MaskedColor output = MaskedColor::SatAdd(srcTerm, dstTerm);
+ *outColor = output.color();
+ *outFlags = output.validFlags();
+}
diff --git a/gfx/skia/skia/src/gpu/GrBlurUtils.cpp b/gfx/skia/skia/src/gpu/GrBlurUtils.cpp
new file mode 100644
index 000000000..5f575e3c8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBlurUtils.cpp
@@ -0,0 +1,295 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrBlurUtils.h"
+#include "GrDrawContext.h"
+#include "GrCaps.h"
+#include "GrContext.h"
+#include "GrFixedClip.h"
+#include "effects/GrSimpleTextureEffect.h"
+#include "GrStyle.h"
+#include "GrTexture.h"
+#include "GrTextureProvider.h"
+#include "SkDraw.h"
+#include "SkGrPriv.h"
+#include "SkMaskFilter.h"
+#include "SkPaint.h"
+#include "SkTLazy.h"
+
+static bool clip_bounds_quick_reject(const SkIRect& clipBounds, const SkIRect& rect) {
+ return clipBounds.isEmpty() || rect.isEmpty() || !SkIRect::Intersects(clipBounds, rect);
+}
+
+// Draw a mask using the supplied paint. Since the coverage/geometry
+// is already burnt into the mask this boils down to a rect draw.
+// Return true if the mask was successfully drawn.
+static bool draw_mask(GrDrawContext* drawContext,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkIRect& maskRect,
+ GrPaint* grp,
+ GrTexture* mask) {
+ SkMatrix matrix;
+ matrix.setTranslate(-SkIntToScalar(maskRect.fLeft), -SkIntToScalar(maskRect.fTop));
+ matrix.postIDiv(mask->width(), mask->height());
+ matrix.preConcat(viewMatrix);
+ grp->addCoverageFragmentProcessor(GrSimpleTextureEffect::Make(mask, nullptr, matrix));
+
+ SkMatrix inverse;
+ if (!viewMatrix.invert(&inverse)) {
+ return false;
+ }
+ drawContext->fillRectWithLocalMatrix(clip, *grp, SkMatrix::I(), SkRect::Make(maskRect),
+ inverse);
+ return true;
+}
+
+static bool sw_draw_with_mask_filter(GrDrawContext* drawContext,
+ GrTextureProvider* textureProvider,
+ const GrClip& clipData,
+ const SkMatrix& viewMatrix,
+ const SkPath& devPath,
+ const SkMaskFilter* filter,
+ const SkIRect& clipBounds,
+ GrPaint* grp,
+ SkStrokeRec::InitStyle fillOrHairline) {
+ SkMask srcM, dstM;
+ if (!SkDraw::DrawToMask(devPath, &clipBounds, filter, &viewMatrix, &srcM,
+ SkMask::kComputeBoundsAndRenderImage_CreateMode, fillOrHairline)) {
+ return false;
+ }
+ SkAutoMaskFreeImage autoSrc(srcM.fImage);
+
+ if (!filter->filterMask(&dstM, srcM, viewMatrix, nullptr)) {
+ return false;
+ }
+ // this will free-up dstM when we're done (allocated in filterMask())
+ SkAutoMaskFreeImage autoDst(dstM.fImage);
+
+ if (clip_bounds_quick_reject(clipBounds, dstM.fBounds)) {
+ return false;
+ }
+
+ // we now have a device-aligned 8bit mask in dstM, ready to be drawn using
+ // the current clip (and identity matrix) and GrPaint settings
+ GrSurfaceDesc desc;
+ desc.fWidth = dstM.fBounds.width();
+ desc.fHeight = dstM.fBounds.height();
+ desc.fConfig = kAlpha_8_GrPixelConfig;
+
+ SkAutoTUnref<GrTexture> texture(textureProvider->createApproxTexture(desc));
+ if (!texture) {
+ return false;
+ }
+ texture->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
+ dstM.fImage, dstM.fRowBytes);
+
+ return draw_mask(drawContext, clipData, viewMatrix, dstM.fBounds, grp, texture);
+}
+
+// Create a mask of 'devPath' and place the result in 'mask'.
+static sk_sp<GrTexture> create_mask_GPU(GrContext* context,
+ const SkIRect& maskRect,
+ const SkPath& devPath,
+ SkStrokeRec::InitStyle fillOrHairline,
+ bool doAA,
+ int sampleCnt) {
+ if (!doAA) {
+ // Don't need MSAA if mask isn't AA
+ sampleCnt = 0;
+ }
+
+ sk_sp<GrDrawContext> drawContext(context->makeDrawContextWithFallback(SkBackingFit::kApprox,
+ maskRect.width(),
+ maskRect.height(),
+ kAlpha_8_GrPixelConfig,
+ nullptr,
+ sampleCnt));
+ if (!drawContext) {
+ return nullptr;
+ }
+
+ drawContext->clear(nullptr, 0x0, true);
+
+ GrPaint tempPaint;
+ tempPaint.setAntiAlias(doAA);
+ tempPaint.setCoverageSetOpXPFactory(SkRegion::kReplace_Op);
+
+ // setup new clip
+ const SkIRect clipRect = SkIRect::MakeWH(maskRect.width(), maskRect.height());
+ GrFixedClip clip(clipRect);
+
+ // Draw the mask into maskTexture with the path's integerized top-left at
+ // the origin using tempPaint.
+ SkMatrix translate;
+ translate.setTranslate(-SkIntToScalar(maskRect.fLeft), -SkIntToScalar(maskRect.fTop));
+ drawContext->drawPath(clip, tempPaint, translate, devPath, GrStyle(fillOrHairline));
+ return drawContext->asTexture();;
+}
+
+static void draw_path_with_mask_filter(GrContext* context,
+ GrDrawContext* drawContext,
+ const GrClip& clip,
+ GrPaint* paint,
+ const SkMatrix& viewMatrix,
+ const SkMaskFilter* maskFilter,
+ const GrStyle& style,
+ const SkPath* path,
+ bool pathIsMutable) {
+ SkASSERT(maskFilter);
+
+ SkIRect clipBounds;
+ clip.getConservativeBounds(drawContext->width(), drawContext->height(), &clipBounds);
+ SkTLazy<SkPath> tmpPath;
+ SkStrokeRec::InitStyle fillOrHairline;
+
+ // We just fully apply the style here.
+ if (style.applies()) {
+ SkScalar scale = GrStyle::MatrixToScaleFactor(viewMatrix);
+ if (0 == scale || !style.applyToPath(tmpPath.init(), &fillOrHairline, *path, scale)) {
+ return;
+ }
+ pathIsMutable = true;
+ path = tmpPath.get();
+ } else if (style.isSimpleHairline()) {
+ fillOrHairline = SkStrokeRec::kHairline_InitStyle;
+ } else {
+ SkASSERT(style.isSimpleFill());
+ fillOrHairline = SkStrokeRec::kFill_InitStyle;
+ }
+
+ // transform the path into device space
+ if (!viewMatrix.isIdentity()) {
+ SkPath* result;
+ if (pathIsMutable) {
+ result = const_cast<SkPath*>(path);
+ } else {
+ if (!tmpPath.isValid()) {
+ tmpPath.init();
+ }
+ result = tmpPath.get();
+ }
+ path->transform(viewMatrix, result);
+ path = result;
+ result->setIsVolatile(true);
+ pathIsMutable = true;
+ }
+
+ SkRect maskRect;
+ if (maskFilter->canFilterMaskGPU(SkRRect::MakeRect(path->getBounds()),
+ clipBounds,
+ viewMatrix,
+ &maskRect)) {
+ // This mask will ultimately be drawn as a non-AA rect (see draw_mask).
+ // Non-AA rects have a bad habit of snapping arbitrarily. Integerize here
+ // so the mask draws in a reproducible manner.
+ SkIRect finalIRect;
+ maskRect.roundOut(&finalIRect);
+ if (clip_bounds_quick_reject(clipBounds, finalIRect)) {
+ // clipped out
+ return;
+ }
+
+ if (maskFilter->directFilterMaskGPU(context->textureProvider(),
+ drawContext,
+ paint,
+ clip,
+ viewMatrix,
+ SkStrokeRec(fillOrHairline),
+ *path)) {
+ // the mask filter was able to draw itself directly, so there's nothing
+ // left to do.
+ return;
+ }
+
+ sk_sp<GrTexture> mask(create_mask_GPU(context,
+ finalIRect,
+ *path,
+ fillOrHairline,
+ paint->isAntiAlias(),
+ drawContext->numColorSamples()));
+ if (mask) {
+ GrTexture* filtered;
+
+ if (maskFilter->filterMaskGPU(mask.get(), viewMatrix, finalIRect, &filtered)) {
+ // filterMaskGPU gives us ownership of a ref to the result
+ SkAutoTUnref<GrTexture> atu(filtered);
+ if (draw_mask(drawContext, clip, viewMatrix, finalIRect, paint, filtered)) {
+ // This path is completely drawn
+ return;
+ }
+ }
+ }
+ }
+
+ sw_draw_with_mask_filter(drawContext, context->textureProvider(),
+ clip, viewMatrix, *path,
+ maskFilter, clipBounds, paint, fillOrHairline);
+}
+
+void GrBlurUtils::drawPathWithMaskFilter(GrContext* context,
+ GrDrawContext* drawContext,
+ const GrClip& clip,
+ const SkPath& path,
+ GrPaint* paint,
+ const SkMatrix& viewMatrix,
+ const SkMaskFilter* mf,
+ const GrStyle& style,
+ bool pathIsMutable) {
+ draw_path_with_mask_filter(context, drawContext, clip, paint, viewMatrix, mf,
+ style, &path, pathIsMutable);
+}
+
+void GrBlurUtils::drawPathWithMaskFilter(GrContext* context,
+ GrDrawContext* drawContext,
+ const GrClip& clip,
+ const SkPath& origPath,
+ const SkPaint& paint,
+ const SkMatrix& origViewMatrix,
+ const SkMatrix* prePathMatrix,
+ const SkIRect& clipBounds,
+ bool pathIsMutable) {
+ SkASSERT(!pathIsMutable || origPath.isVolatile());
+
+ GrStyle style(paint);
+ // If we have a prematrix, apply it to the path, optimizing for the case
+ // where the original path can in fact be modified in place (even though
+ // its parameter type is const).
+
+ const SkPath* path = &origPath;
+ SkTLazy<SkPath> tmpPath;
+
+ SkMatrix viewMatrix = origViewMatrix;
+
+ if (prePathMatrix) {
+ // Styling, blurs, and shading are supposed to be applied *after* the prePathMatrix.
+ if (!paint.getMaskFilter() && !paint.getShader() && !style.applies()) {
+ viewMatrix.preConcat(*prePathMatrix);
+ } else {
+ SkPath* result = pathIsMutable ? const_cast<SkPath*>(path) : tmpPath.init();
+ pathIsMutable = true;
+ path->transform(*prePathMatrix, result);
+ path = result;
+ result->setIsVolatile(true);
+ }
+ }
+ // at this point we're done with prePathMatrix
+ SkDEBUGCODE(prePathMatrix = (const SkMatrix*)0x50FF8001;)
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(context, drawContext, paint, viewMatrix, &grPaint)) {
+ return;
+ }
+
+ if (paint.getMaskFilter()) {
+ draw_path_with_mask_filter(context, drawContext, clip, &grPaint, viewMatrix,
+ paint.getMaskFilter(), style,
+ path, pathIsMutable);
+ } else {
+ drawContext->drawPath(clip, grPaint, viewMatrix, *path, style);
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/GrBlurUtils.h b/gfx/skia/skia/src/gpu/GrBlurUtils.h
new file mode 100644
index 000000000..aef1bdba0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBlurUtils.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBlurUtils_DEFINED
+#define GrBlurUtils_DEFINED
+
+class GrClip;
+class GrContext;
+class GrDrawContext;
+class GrPaint;
+class GrRenderTarget;
+class GrStyle;
+struct SkIRect;
+class SkMaskFilter;
+class SkMatrix;
+class SkPaint;
+class SkPath;
+class SkPathEffect;
+
+
+/**
+ * Blur utilities.
+ */
+namespace GrBlurUtils {
+ /**
+ * Draw a path handling the mask filter if present.
+ */
+ void drawPathWithMaskFilter(GrContext* context,
+ GrDrawContext* drawContext,
+ const GrClip& clip,
+ const SkPath& origSrcPath,
+ const SkPaint& paint,
+ const SkMatrix& origViewMatrix,
+ const SkMatrix* prePathMatrix,
+ const SkIRect& clipBounds,
+ bool pathIsMutable);
+
+ /**
+ * Draw a path handling the mask filter. The mask filter is not optional. The path effect is
+ * optional. The GrPaint will be modified after return.
+ */
+ void drawPathWithMaskFilter(GrContext*,
+ GrDrawContext*,
+ const GrClip&,
+ const SkPath& path,
+ GrPaint*,
+ const SkMatrix& viewMatrix,
+ const SkMaskFilter*,
+ const GrStyle&,
+ bool pathIsMutable);
+
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrBuffer.cpp b/gfx/skia/skia/src/gpu/GrBuffer.cpp
new file mode 100644
index 000000000..b565345e9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBuffer.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrBuffer.h"
+#include "GrGpu.h"
+#include "GrCaps.h"
+
+GrBuffer* GrBuffer::CreateCPUBacked(GrGpu* gpu, size_t sizeInBytes, GrBufferType intendedType,
+ const void* data) {
+ SkASSERT(GrBufferTypeIsVertexOrIndex(intendedType));
+ void* cpuData;
+ if (gpu->caps()->mustClearUploadedBufferData()) {
+ cpuData = sk_calloc_throw(sizeInBytes);
+ } else {
+ cpuData = sk_malloc_flags(sizeInBytes, SK_MALLOC_THROW);
+ }
+ if (data) {
+ memcpy(cpuData, data, sizeInBytes);
+ }
+ return new GrBuffer(gpu, sizeInBytes, intendedType, cpuData);
+}
+
+GrBuffer::GrBuffer(GrGpu* gpu, size_t sizeInBytes, GrBufferType type, void* cpuData)
+ : INHERITED(gpu),
+ fMapPtr(nullptr),
+ fSizeInBytes(sizeInBytes),
+ fAccessPattern(kDynamic_GrAccessPattern),
+ fCPUData(cpuData),
+ fIntendedType(type) {
+ this->registerWithCache(SkBudgeted::kNo);
+}
+
+GrBuffer::GrBuffer(GrGpu* gpu, size_t sizeInBytes, GrBufferType type, GrAccessPattern pattern)
+ : INHERITED(gpu),
+ fMapPtr(nullptr),
+ fSizeInBytes(sizeInBytes),
+ fAccessPattern(pattern),
+ fCPUData(nullptr),
+ fIntendedType(type) {
+ // Subclass registers with cache.
+}
+
+void GrBuffer::ComputeScratchKeyForDynamicVBO(size_t size, GrBufferType intendedType,
+ GrScratchKey* key) {
+ static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
+ GrScratchKey::Builder builder(key, kType, 1 + (sizeof(size_t) + 3) / 4);
+ // TODO: There's not always reason to cache a buffer by type. In some (all?) APIs it's just
+ // a chunk of memory we can use/reuse for any type of data. We really only need to
+ // differentiate between the "read" types (e.g. kGpuToCpu_BufferType) and "draw" types.
+ builder[0] = intendedType;
+ builder[1] = (uint32_t)size;
+ if (sizeof(size_t) > 4) {
+ builder[2] = (uint32_t)((uint64_t)size >> 32);
+ }
+}
+
+bool GrBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
+ SkASSERT(this->isCPUBacked());
+ memcpy(fCPUData, src, srcSizeInBytes);
+ return true;
+}
+
+void GrBuffer::computeScratchKey(GrScratchKey* key) const {
+ if (!this->isCPUBacked() && SkIsPow2(fSizeInBytes) &&
+ kDynamic_GrAccessPattern == fAccessPattern) {
+ ComputeScratchKeyForDynamicVBO(fSizeInBytes, fIntendedType, key);
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/GrBufferAllocPool.cpp b/gfx/skia/skia/src/gpu/GrBufferAllocPool.cpp
new file mode 100644
index 000000000..e3f30b0c1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBufferAllocPool.cpp
@@ -0,0 +1,371 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrBufferAllocPool.h"
+#include "GrBuffer.h"
+#include "GrCaps.h"
+#include "GrContext.h"
+#include "GrGpu.h"
+#include "GrResourceProvider.h"
+#include "GrTypes.h"
+
+#include "SkTraceEvent.h"
+
+#ifdef SK_DEBUG
+ #define VALIDATE validate
+#else
+ static void VALIDATE(bool = false) {}
+#endif
+
+static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15;
+static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12;
+
+// page size
+#define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 15)
+
+#define UNMAP_BUFFER(block) \
+do { \
+ TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), \
+ "GrBufferAllocPool Unmapping Buffer", \
+ TRACE_EVENT_SCOPE_THREAD, \
+ "percent_unwritten", \
+ (float)((block).fBytesFree) / (block).fBuffer->gpuMemorySize()); \
+ (block).fBuffer->unmap(); \
+} while (false)
+
+GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
+ GrBufferType bufferType,
+ size_t blockSize)
+ : fBlocks(8) {
+
+ fGpu = SkRef(gpu);
+ fCpuData = nullptr;
+ fBufferType = bufferType;
+ fBufferPtr = nullptr;
+ fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
+
+ fBytesInUse = 0;
+
+ fBufferMapThreshold = gpu->caps()->bufferMapThreshold();
+}
+
+void GrBufferAllocPool::deleteBlocks() {
+ if (fBlocks.count()) {
+ GrBuffer* buffer = fBlocks.back().fBuffer;
+ if (buffer->isMapped()) {
+ UNMAP_BUFFER(fBlocks.back());
+ }
+ }
+ while (!fBlocks.empty()) {
+ this->destroyBlock();
+ }
+ SkASSERT(!fBufferPtr);
+}
+
+GrBufferAllocPool::~GrBufferAllocPool() {
+ VALIDATE();
+ this->deleteBlocks();
+ sk_free(fCpuData);
+ fGpu->unref();
+}
+
+void GrBufferAllocPool::reset() {
+ VALIDATE();
+ fBytesInUse = 0;
+ this->deleteBlocks();
+ this->resetCpuData(0); // delete all the cpu-side memory
+ VALIDATE();
+}
+
+void GrBufferAllocPool::unmap() {
+ VALIDATE();
+
+ if (fBufferPtr) {
+ BufferBlock& block = fBlocks.back();
+ if (block.fBuffer->isMapped()) {
+ UNMAP_BUFFER(block);
+ } else {
+ size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree;
+ this->flushCpuData(fBlocks.back(), flushSize);
+ }
+ fBufferPtr = nullptr;
+ }
+ VALIDATE();
+}
+
+#ifdef SK_DEBUG
+void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
+ bool wasDestroyed = false;
+ if (fBufferPtr) {
+ SkASSERT(!fBlocks.empty());
+ if (fBlocks.back().fBuffer->isMapped()) {
+ GrBuffer* buf = fBlocks.back().fBuffer;
+ SkASSERT(buf->mapPtr() == fBufferPtr);
+ } else {
+ SkASSERT(fCpuData == fBufferPtr);
+ }
+ } else {
+ SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped());
+ }
+ size_t bytesInUse = 0;
+ for (int i = 0; i < fBlocks.count() - 1; ++i) {
+ SkASSERT(!fBlocks[i].fBuffer->isMapped());
+ }
+ for (int i = 0; !wasDestroyed && i < fBlocks.count(); ++i) {
+ if (fBlocks[i].fBuffer->wasDestroyed()) {
+ wasDestroyed = true;
+ } else {
+ size_t bytes = fBlocks[i].fBuffer->gpuMemorySize() - fBlocks[i].fBytesFree;
+ bytesInUse += bytes;
+ SkASSERT(bytes || unusedBlockAllowed);
+ }
+ }
+
+ if (!wasDestroyed) {
+ SkASSERT(bytesInUse == fBytesInUse);
+ if (unusedBlockAllowed) {
+ SkASSERT((fBytesInUse && !fBlocks.empty()) ||
+ (!fBytesInUse && (fBlocks.count() < 2)));
+ } else {
+ SkASSERT((0 == fBytesInUse) == fBlocks.empty());
+ }
+ }
+}
+#endif
+
+void* GrBufferAllocPool::makeSpace(size_t size,
+ size_t alignment,
+ const GrBuffer** buffer,
+ size_t* offset) {
+ VALIDATE();
+
+ SkASSERT(buffer);
+ SkASSERT(offset);
+
+ if (fBufferPtr) {
+ BufferBlock& back = fBlocks.back();
+ size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
+ size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
+ if ((size + pad) <= back.fBytesFree) {
+ memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
+ usedBytes += pad;
+ *offset = usedBytes;
+ *buffer = back.fBuffer;
+ back.fBytesFree -= size + pad;
+ fBytesInUse += size + pad;
+ VALIDATE();
+ return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
+ }
+ }
+
+ // We could honor the space request using by a partial update of the current
+ // VB (if there is room). But we don't currently use draw calls to GL that
+ // allow the driver to know that previously issued draws won't read from
+ // the part of the buffer we update. Also, the GL buffer implementation
+ // may be cheating on the actual buffer size by shrinking the buffer on
+ // updateData() if the amount of data passed is less than the full buffer
+ // size.
+
+ if (!this->createBlock(size)) {
+ return nullptr;
+ }
+ SkASSERT(fBufferPtr);
+
+ *offset = 0;
+ BufferBlock& back = fBlocks.back();
+ *buffer = back.fBuffer;
+ back.fBytesFree -= size;
+ fBytesInUse += size;
+ VALIDATE();
+ return fBufferPtr;
+}
+
+void GrBufferAllocPool::putBack(size_t bytes) {
+ VALIDATE();
+
+ while (bytes) {
+ // caller shouldn't try to put back more than they've taken
+ SkASSERT(!fBlocks.empty());
+ BufferBlock& block = fBlocks.back();
+ size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree;
+ if (bytes >= bytesUsed) {
+ bytes -= bytesUsed;
+ fBytesInUse -= bytesUsed;
+ // if we locked a vb to satisfy the make space and we're releasing
+ // beyond it, then unmap it.
+ if (block.fBuffer->isMapped()) {
+ UNMAP_BUFFER(block);
+ }
+ this->destroyBlock();
+ } else {
+ block.fBytesFree += bytes;
+ fBytesInUse -= bytes;
+ bytes = 0;
+ break;
+ }
+ }
+
+ VALIDATE();
+}
+
+bool GrBufferAllocPool::createBlock(size_t requestSize) {
+
+ size_t size = SkTMax(requestSize, fMinBlockSize);
+ SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
+
+ VALIDATE();
+
+ BufferBlock& block = fBlocks.push_back();
+
+ block.fBuffer = this->getBuffer(size);
+ if (!block.fBuffer) {
+ fBlocks.pop_back();
+ return false;
+ }
+
+ block.fBytesFree = block.fBuffer->gpuMemorySize();
+ if (fBufferPtr) {
+ SkASSERT(fBlocks.count() > 1);
+ BufferBlock& prev = fBlocks.fromBack(1);
+ if (prev.fBuffer->isMapped()) {
+ UNMAP_BUFFER(prev);
+ } else {
+ this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytesFree);
+ }
+ fBufferPtr = nullptr;
+ }
+
+ SkASSERT(!fBufferPtr);
+
+ // If the buffer is CPU-backed we map it because it is free to do so and saves a copy.
+ // Otherwise when buffer mapping is supported we map if the buffer size is greater than the
+ // threshold.
+ bool attemptMap = block.fBuffer->isCPUBacked();
+ if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
+ attemptMap = size > fBufferMapThreshold;
+ }
+
+ if (attemptMap) {
+ fBufferPtr = block.fBuffer->map();
+ }
+
+ if (!fBufferPtr) {
+ fBufferPtr = this->resetCpuData(block.fBytesFree);
+ }
+
+ VALIDATE(true);
+
+ return true;
+}
+
+void GrBufferAllocPool::destroyBlock() {
+ SkASSERT(!fBlocks.empty());
+
+ BufferBlock& block = fBlocks.back();
+
+ SkASSERT(!block.fBuffer->isMapped());
+ block.fBuffer->unref();
+ fBlocks.pop_back();
+ fBufferPtr = nullptr;
+}
+
+void* GrBufferAllocPool::resetCpuData(size_t newSize) {
+ sk_free(fCpuData);
+ if (newSize) {
+ if (fGpu->caps()->mustClearUploadedBufferData()) {
+ fCpuData = sk_calloc_throw(newSize);
+ } else {
+ fCpuData = sk_malloc_throw(newSize);
+ }
+ } else {
+ fCpuData = nullptr;
+ }
+ return fCpuData;
+}
+
+
+void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
+ GrBuffer* buffer = block.fBuffer;
+ SkASSERT(buffer);
+ SkASSERT(!buffer->isMapped());
+ SkASSERT(fCpuData == fBufferPtr);
+ SkASSERT(flushSize <= buffer->gpuMemorySize());
+ VALIDATE(true);
+
+ if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
+ flushSize > fBufferMapThreshold) {
+ void* data = buffer->map();
+ if (data) {
+ memcpy(data, fBufferPtr, flushSize);
+ UNMAP_BUFFER(block);
+ return;
+ }
+ }
+ buffer->updateData(fBufferPtr, flushSize);
+ VALIDATE(true);
+}
+
+GrBuffer* GrBufferAllocPool::getBuffer(size_t size) {
+
+ GrResourceProvider* rp = fGpu->getContext()->resourceProvider();
+
+ // Shouldn't have to use this flag (https://bug.skia.org/4156)
+ static const uint32_t kFlags = GrResourceProvider::kNoPendingIO_Flag;
+ return rp->createBuffer(size, fBufferType, kDynamic_GrAccessPattern, kFlags);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu)
+ : GrBufferAllocPool(gpu, kVertex_GrBufferType, MIN_VERTEX_BUFFER_SIZE) {
+}
+
+void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
+ int vertexCount,
+ const GrBuffer** buffer,
+ int* startVertex) {
+
+ SkASSERT(vertexCount >= 0);
+ SkASSERT(buffer);
+ SkASSERT(startVertex);
+
+ size_t offset = 0; // assign to suppress warning
+ void* ptr = INHERITED::makeSpace(vertexSize * vertexCount,
+ vertexSize,
+ buffer,
+ &offset);
+
+ SkASSERT(0 == offset % vertexSize);
+ *startVertex = static_cast<int>(offset / vertexSize);
+ return ptr;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu)
+ : GrBufferAllocPool(gpu, kIndex_GrBufferType, MIN_INDEX_BUFFER_SIZE) {
+}
+
+void* GrIndexBufferAllocPool::makeSpace(int indexCount,
+ const GrBuffer** buffer,
+ int* startIndex) {
+
+ SkASSERT(indexCount >= 0);
+ SkASSERT(buffer);
+ SkASSERT(startIndex);
+
+ size_t offset = 0; // assign to suppress warning
+ void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
+ sizeof(uint16_t),
+ buffer,
+ &offset);
+
+ SkASSERT(0 == offset % sizeof(uint16_t));
+ *startIndex = static_cast<int>(offset / sizeof(uint16_t));
+ return ptr;
+}
diff --git a/gfx/skia/skia/src/gpu/GrBufferAllocPool.h b/gfx/skia/skia/src/gpu/GrBufferAllocPool.h
new file mode 100644
index 000000000..071b00b06
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrBufferAllocPool.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBufferAllocPool_DEFINED
+#define GrBufferAllocPool_DEFINED
+
+#include "SkTArray.h"
+#include "SkTDArray.h"
+#include "SkTypes.h"
+#include "GrTypesPriv.h"
+
+class GrBuffer;
+class GrGpu;
+
+/**
+ * A pool of geometry buffers tied to a GrGpu.
+ *
+ * The pool allows a client to make space for geometry and then put back excess
+ * space if it over allocated. When a client is ready to draw from the pool
+ * it calls unmap on the pool ensure buffers are ready for drawing. The pool
+ * can be reset after drawing is completed to recycle space.
+ *
+ * At creation time a minimum per-buffer size can be specified. Additionally,
+ * a number of buffers to preallocate can be specified. These will
+ * be allocated at the min size and kept around until the pool is destroyed.
+ */
+class GrBufferAllocPool : SkNoncopyable {
+public:
+ /**
+ * Ensures all buffers are unmapped and have all data written to them.
+ * Call before drawing using buffers from the pool.
+ */
+ void unmap();
+
+ /**
+ * Invalidates all the data in the pool, unrefs non-preallocated buffers.
+ */
+ void reset();
+
+ /**
+ * Frees data from makeSpaces in LIFO order.
+ */
+ void putBack(size_t bytes);
+
+protected:
+ /**
+ * Constructor
+ *
+ * @param gpu The GrGpu used to create the buffers.
+ * @param bufferType The type of buffers to create.
+ * @param bufferSize The minimum size of created buffers.
+ * This value will be clamped to some
+ * reasonable minimum.
+ */
+ GrBufferAllocPool(GrGpu* gpu,
+ GrBufferType bufferType,
+ size_t bufferSize = 0);
+
+ virtual ~GrBufferAllocPool();
+
+ /**
+ * Returns a block of memory to hold data. A buffer designated to hold the
+ * data is given to the caller. The buffer may or may not be locked. The
+ * returned ptr remains valid until any of the following:
+ * *makeSpace is called again.
+ * *unmap is called.
+ * *reset is called.
+ * *this object is destroyed.
+ *
+ * Once unmap on the pool is called the data is guaranteed to be in the
+ * buffer at the offset indicated by offset. Until that time it may be
+ * in temporary storage and/or the buffer may be locked.
+ *
+ * @param size the amount of data to make space for
+ * @param alignment alignment constraint from start of buffer
+ * @param buffer returns the buffer that will hold the data.
+ * @param offset returns the offset into buffer of the data.
+ * @return pointer to where the client should write the data.
+ */
+ void* makeSpace(size_t size,
+ size_t alignment,
+ const GrBuffer** buffer,
+ size_t* offset);
+
+ GrBuffer* getBuffer(size_t size);
+
+private:
+ struct BufferBlock {
+ size_t fBytesFree;
+ GrBuffer* fBuffer;
+ };
+
+ bool createBlock(size_t requestSize);
+ void destroyBlock();
+ void deleteBlocks();
+ void flushCpuData(const BufferBlock& block, size_t flushSize);
+ void* resetCpuData(size_t newSize);
+#ifdef SK_DEBUG
+ void validate(bool unusedBlockAllowed = false) const;
+#endif
+ size_t fBytesInUse;
+
+ GrGpu* fGpu;
+ size_t fMinBlockSize;
+ GrBufferType fBufferType;
+
+ SkTArray<BufferBlock> fBlocks;
+ void* fCpuData;
+ void* fBufferPtr;
+ size_t fBufferMapThreshold;
+};
+
+/**
+ * A GrBufferAllocPool of vertex buffers
+ */
+class GrVertexBufferAllocPool : public GrBufferAllocPool {
+public:
+ /**
+ * Constructor
+ *
+ * @param gpu The GrGpu used to create the vertex buffers.
+ */
+ GrVertexBufferAllocPool(GrGpu* gpu);
+
+ /**
+ * Returns a block of memory to hold vertices. A buffer designated to hold
+ * the vertices given to the caller. The buffer may or may not be locked.
+ * The returned ptr remains valid until any of the following:
+ * *makeSpace is called again.
+ * *unmap is called.
+ * *reset is called.
+ * *this object is destroyed.
+ *
+ * Once unmap on the pool is called the vertices are guaranteed to be in
+ * the buffer at the offset indicated by startVertex. Until that time they
+ * may be in temporary storage and/or the buffer may be locked.
+ *
+ * @param vertexSize specifies size of a vertex to allocate space for
+ * @param vertexCount number of vertices to allocate space for
+ * @param buffer returns the vertex buffer that will hold the
+ * vertices.
+ * @param startVertex returns the offset into buffer of the first vertex.
+ * In units of the size of a vertex from layout param.
+ * @return pointer to first vertex.
+ */
+ void* makeSpace(size_t vertexSize,
+ int vertexCount,
+ const GrBuffer** buffer,
+ int* startVertex);
+
+private:
+ typedef GrBufferAllocPool INHERITED;
+};
+
+/**
+ * A GrBufferAllocPool of index buffers
+ */
+class GrIndexBufferAllocPool : public GrBufferAllocPool {
+public:
+ /**
+ * Constructor
+ *
+ * @param gpu The GrGpu used to create the index buffers.
+ */
+ GrIndexBufferAllocPool(GrGpu* gpu);
+
+ /**
+ * Returns a block of memory to hold indices. A buffer designated to hold
+ * the indices is given to the caller. The buffer may or may not be locked.
+ * The returned ptr remains valid until any of the following:
+ * *makeSpace is called again.
+ * *unmap is called.
+ * *reset is called.
+ * *this object is destroyed.
+ *
+ * Once unmap on the pool is called the indices are guaranteed to be in the
+ * buffer at the offset indicated by startIndex. Until that time they may be
+ * in temporary storage and/or the buffer may be locked.
+ *
+ * @param indexCount number of indices to allocate space for
+ * @param buffer returns the index buffer that will hold the indices.
+ * @param startIndex returns the offset into buffer of the first index.
+ * @return pointer to first index.
+ */
+ void* makeSpace(int indexCount,
+ const GrBuffer** buffer,
+ int* startIndex);
+
+private:
+ typedef GrBufferAllocPool INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrCaps.cpp b/gfx/skia/skia/src/gpu/GrCaps.cpp
new file mode 100644
index 000000000..0f77b5a47
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrCaps.cpp
@@ -0,0 +1,300 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrCaps.h"
+#include "GrContextOptions.h"
+#include "GrWindowRectangles.h"
+
+GrShaderCaps::GrShaderCaps() {
+ fShaderDerivativeSupport = false;
+ fGeometryShaderSupport = false;
+ fPathRenderingSupport = false;
+ fDstReadInShaderSupport = false;
+ fDualSourceBlendingSupport = false;
+ fIntegerSupport = false;
+ fTexelBufferSupport = false;
+ fShaderPrecisionVaries = false;
+}
+
+static const char* shader_type_to_string(GrShaderType type) {
+ switch (type) {
+ case kVertex_GrShaderType:
+ return "vertex";
+ case kGeometry_GrShaderType:
+ return "geometry";
+ case kFragment_GrShaderType:
+ return "fragment";
+ }
+ return "";
+}
+
+static const char* precision_to_string(GrSLPrecision p) {
+ switch (p) {
+ case kLow_GrSLPrecision:
+ return "low";
+ case kMedium_GrSLPrecision:
+ return "medium";
+ case kHigh_GrSLPrecision:
+ return "high";
+ }
+ return "";
+}
+
+SkString GrShaderCaps::dump() const {
+ SkString r;
+ static const char* gNY[] = { "NO", "YES" };
+ r.appendf("Shader Derivative Support : %s\n", gNY[fShaderDerivativeSupport]);
+ r.appendf("Geometry Shader Support : %s\n", gNY[fGeometryShaderSupport]);
+ r.appendf("Path Rendering Support : %s\n", gNY[fPathRenderingSupport]);
+ r.appendf("Dst Read In Shader Support : %s\n", gNY[fDstReadInShaderSupport]);
+ r.appendf("Dual Source Blending Support : %s\n", gNY[fDualSourceBlendingSupport]);
+ r.appendf("Integer Support : %s\n", gNY[fIntegerSupport]);
+ r.appendf("Texel Buffer Support : %s\n", gNY[fTexelBufferSupport]);
+
+ r.appendf("Shader Float Precisions (varies: %s):\n", gNY[fShaderPrecisionVaries]);
+
+ for (int s = 0; s < kGrShaderTypeCount; ++s) {
+ GrShaderType shaderType = static_cast<GrShaderType>(s);
+ r.appendf("\t%s:\n", shader_type_to_string(shaderType));
+ for (int p = 0; p < kGrSLPrecisionCount; ++p) {
+ if (fFloatPrecisions[s][p].supported()) {
+ GrSLPrecision precision = static_cast<GrSLPrecision>(p);
+ r.appendf("\t\t%s: log_low: %d log_high: %d bits: %d\n",
+ precision_to_string(precision),
+ fFloatPrecisions[s][p].fLogRangeLow,
+ fFloatPrecisions[s][p].fLogRangeHigh,
+ fFloatPrecisions[s][p].fBits);
+ }
+ }
+ }
+
+ return r;
+}
+
+void GrShaderCaps::applyOptionsOverrides(const GrContextOptions& options) {
+ fDualSourceBlendingSupport = fDualSourceBlendingSupport && !options.fSuppressDualSourceBlending;
+ this->onApplyOptionsOverrides(options);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrCaps::GrCaps(const GrContextOptions& options) {
+ fMipMapSupport = false;
+ fNPOTTextureTileSupport = false;
+ fSRGBSupport = false;
+ fSRGBWriteControl = false;
+ fTwoSidedStencilSupport = false;
+ fStencilWrapOpsSupport = false;
+ fDiscardRenderTargetSupport = false;
+ fReuseScratchTextures = true;
+ fReuseScratchBuffers = true;
+ fGpuTracingSupport = false;
+ fCompressedTexSubImageSupport = false;
+ fOversizedStencilSupport = false;
+ fTextureBarrierSupport = false;
+ fSampleLocationsSupport = false;
+ fMultisampleDisableSupport = false;
+ fUsesMixedSamples = false;
+ fPreferClientSideDynamicBuffers = false;
+ fFullClearIsFree = false;
+ fMustClearUploadedBufferData = false;
+ fSampleShadingSupport = false;
+ fFenceSyncSupport = false;
+
+ fUseDrawInsteadOfClear = false;
+
+ fInstancedSupport = InstancedSupport::kNone;
+
+ fBlendEquationSupport = kBasic_BlendEquationSupport;
+ fAdvBlendEqBlacklist = 0;
+
+ fMapBufferFlags = kNone_MapFlags;
+
+ fMaxVertexAttributes = 0;
+ fMaxRenderTargetSize = 1;
+ fMaxTextureSize = 1;
+ fMaxColorSampleCount = 0;
+ fMaxStencilSampleCount = 0;
+ fMaxRasterSamples = 0;
+ fMaxWindowRectangles = 0;
+
+ fSuppressPrints = options.fSuppressPrints;
+ fImmediateFlush = options.fImmediateMode;
+ fBufferMapThreshold = options.fBufferMapThreshold;
+ fUseDrawInsteadOfPartialRenderTargetWrite = options.fUseDrawInsteadOfPartialRenderTargetWrite;
+ fUseDrawInsteadOfAllRenderTargetWrites = false;
+ fAvoidInstancedDrawsToFPTargets = false;
+
+ fPreferVRAMUseOverFlushes = true;
+}
+
+void GrCaps::applyOptionsOverrides(const GrContextOptions& options) {
+ this->onApplyOptionsOverrides(options);
+ fMaxTextureSize = SkTMin(fMaxTextureSize, options.fMaxTextureSizeOverride);
+ // If the max tile override is zero, it means we should use the max texture size.
+ if (!options.fMaxTileSizeOverride || options.fMaxTileSizeOverride > fMaxTextureSize) {
+ fMaxTileSize = fMaxTextureSize;
+ } else {
+ fMaxTileSize = options.fMaxTileSizeOverride;
+ }
+ if (fMaxWindowRectangles > GrWindowRectangles::kMaxWindows) {
+ SkDebugf("WARNING: capping window rectangles at %i. HW advertises support for %i.\n",
+ GrWindowRectangles::kMaxWindows, fMaxWindowRectangles);
+ fMaxWindowRectangles = GrWindowRectangles::kMaxWindows;
+ }
+}
+
+static SkString map_flags_to_string(uint32_t flags) {
+ SkString str;
+ if (GrCaps::kNone_MapFlags == flags) {
+ str = "none";
+ } else {
+ SkASSERT(GrCaps::kCanMap_MapFlag & flags);
+ SkDEBUGCODE(flags &= ~GrCaps::kCanMap_MapFlag);
+ str = "can_map";
+
+ if (GrCaps::kSubset_MapFlag & flags) {
+ str.append(" partial");
+ } else {
+ str.append(" full");
+ }
+ SkDEBUGCODE(flags &= ~GrCaps::kSubset_MapFlag);
+ }
+ SkASSERT(0 == flags); // Make sure we handled all the flags.
+ return str;
+}
+
+SkString GrCaps::dump() const {
+ SkString r;
+ static const char* gNY[] = {"NO", "YES"};
+ r.appendf("MIP Map Support : %s\n", gNY[fMipMapSupport]);
+ r.appendf("NPOT Texture Tile Support : %s\n", gNY[fNPOTTextureTileSupport]);
+ r.appendf("sRGB Support : %s\n", gNY[fSRGBSupport]);
+ r.appendf("sRGB Write Control : %s\n", gNY[fSRGBWriteControl]);
+ r.appendf("Two Sided Stencil Support : %s\n", gNY[fTwoSidedStencilSupport]);
+ r.appendf("Stencil Wrap Ops Support : %s\n", gNY[fStencilWrapOpsSupport]);
+ r.appendf("Discard Render Target Support : %s\n", gNY[fDiscardRenderTargetSupport]);
+ r.appendf("Reuse Scratch Textures : %s\n", gNY[fReuseScratchTextures]);
+ r.appendf("Reuse Scratch Buffers : %s\n", gNY[fReuseScratchBuffers]);
+ r.appendf("Gpu Tracing Support : %s\n", gNY[fGpuTracingSupport]);
+ r.appendf("Compressed Update Support : %s\n", gNY[fCompressedTexSubImageSupport]);
+ r.appendf("Oversized Stencil Support : %s\n", gNY[fOversizedStencilSupport]);
+ r.appendf("Texture Barrier Support : %s\n", gNY[fTextureBarrierSupport]);
+ r.appendf("Sample Locations Support : %s\n", gNY[fSampleLocationsSupport]);
+ r.appendf("Multisample disable support : %s\n", gNY[fMultisampleDisableSupport]);
+ r.appendf("Uses Mixed Samples : %s\n", gNY[fUsesMixedSamples]);
+ r.appendf("Prefer client-side dynamic buffers : %s\n", gNY[fPreferClientSideDynamicBuffers]);
+ r.appendf("Full screen clear is free : %s\n", gNY[fFullClearIsFree]);
+ r.appendf("Must clear buffer memory : %s\n", gNY[fMustClearUploadedBufferData]);
+ r.appendf("Sample shading support : %s\n", gNY[fSampleShadingSupport]);
+ r.appendf("Fence sync support : %s\n", gNY[fFenceSyncSupport]);
+
+ r.appendf("Draw Instead of Clear [workaround] : %s\n", gNY[fUseDrawInsteadOfClear]);
+ r.appendf("Draw Instead of TexSubImage [workaround] : %s\n",
+ gNY[fUseDrawInsteadOfPartialRenderTargetWrite]);
+ r.appendf("Prefer VRAM Use over flushes [workaround] : %s\n", gNY[fPreferVRAMUseOverFlushes]);
+
+ if (this->advancedBlendEquationSupport()) {
+ r.appendf("Advanced Blend Equation Blacklist : 0x%x\n", fAdvBlendEqBlacklist);
+ }
+
+ r.appendf("Max Vertex Attributes : %d\n", fMaxVertexAttributes);
+ r.appendf("Max Texture Size : %d\n", fMaxTextureSize);
+ r.appendf("Max Render Target Size : %d\n", fMaxRenderTargetSize);
+ r.appendf("Max Color Sample Count : %d\n", fMaxColorSampleCount);
+ r.appendf("Max Stencil Sample Count : %d\n", fMaxStencilSampleCount);
+ r.appendf("Max Raster Samples : %d\n", fMaxRasterSamples);
+ r.appendf("Max Window Rectangles : %d\n", fMaxWindowRectangles);
+
+ static const char* kInstancedSupportNames[] = {
+ "None",
+ "Basic",
+ "Multisampled",
+ "Mixed Sampled",
+ };
+ GR_STATIC_ASSERT(0 == (int)InstancedSupport::kNone);
+ GR_STATIC_ASSERT(1 == (int)InstancedSupport::kBasic);
+ GR_STATIC_ASSERT(2 == (int)InstancedSupport::kMultisampled);
+ GR_STATIC_ASSERT(3 == (int)InstancedSupport::kMixedSampled);
+ GR_STATIC_ASSERT(4 == SK_ARRAY_COUNT(kInstancedSupportNames));
+
+ r.appendf("Instanced Support : %s\n",
+ kInstancedSupportNames[(int)fInstancedSupport]);
+
+ static const char* kBlendEquationSupportNames[] = {
+ "Basic",
+ "Advanced",
+ "Advanced Coherent",
+ };
+ GR_STATIC_ASSERT(0 == kBasic_BlendEquationSupport);
+ GR_STATIC_ASSERT(1 == kAdvanced_BlendEquationSupport);
+ GR_STATIC_ASSERT(2 == kAdvancedCoherent_BlendEquationSupport);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kBlendEquationSupportNames) == kLast_BlendEquationSupport + 1);
+
+ r.appendf("Blend Equation Support : %s\n",
+ kBlendEquationSupportNames[fBlendEquationSupport]);
+ r.appendf("Map Buffer Support : %s\n",
+ map_flags_to_string(fMapBufferFlags).c_str());
+
+ static const char* kConfigNames[] = {
+ "Unknown", // kUnknown_GrPixelConfig
+ "Alpha8", // kAlpha_8_GrPixelConfig,
+ "Index8", // kIndex_8_GrPixelConfig,
+ "RGB565", // kRGB_565_GrPixelConfig,
+ "RGBA444", // kRGBA_4444_GrPixelConfig,
+ "RGBA8888", // kRGBA_8888_GrPixelConfig,
+ "BGRA8888", // kBGRA_8888_GrPixelConfig,
+ "SRGBA8888",// kSRGBA_8888_GrPixelConfig,
+ "SBGRA8888",// kSBGRA_8888_GrPixelConfig,
+ "ETC1", // kETC1_GrPixelConfig,
+ "LATC", // kLATC_GrPixelConfig,
+ "R11EAC", // kR11_EAC_GrPixelConfig,
+ "ASTC12x12",// kASTC_12x12_GrPixelConfig,
+ "RGBAFloat",// kRGBA_float_GrPixelConfig
+ "AlphaHalf",// kAlpha_half_GrPixelConfig
+ "RGBAHalf", // kRGBA_half_GrPixelConfig
+ };
+ GR_STATIC_ASSERT(0 == kUnknown_GrPixelConfig);
+ GR_STATIC_ASSERT(1 == kAlpha_8_GrPixelConfig);
+ GR_STATIC_ASSERT(2 == kIndex_8_GrPixelConfig);
+ GR_STATIC_ASSERT(3 == kRGB_565_GrPixelConfig);
+ GR_STATIC_ASSERT(4 == kRGBA_4444_GrPixelConfig);
+ GR_STATIC_ASSERT(5 == kRGBA_8888_GrPixelConfig);
+ GR_STATIC_ASSERT(6 == kBGRA_8888_GrPixelConfig);
+ GR_STATIC_ASSERT(7 == kSRGBA_8888_GrPixelConfig);
+ GR_STATIC_ASSERT(8 == kSBGRA_8888_GrPixelConfig);
+ GR_STATIC_ASSERT(9 == kETC1_GrPixelConfig);
+ GR_STATIC_ASSERT(10 == kLATC_GrPixelConfig);
+ GR_STATIC_ASSERT(11 == kR11_EAC_GrPixelConfig);
+ GR_STATIC_ASSERT(12 == kASTC_12x12_GrPixelConfig);
+ GR_STATIC_ASSERT(13 == kRGBA_float_GrPixelConfig);
+ GR_STATIC_ASSERT(14 == kAlpha_half_GrPixelConfig);
+ GR_STATIC_ASSERT(15 == kRGBA_half_GrPixelConfig);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kConfigNames) == kGrPixelConfigCnt);
+
+ SkASSERT(!this->isConfigRenderable(kUnknown_GrPixelConfig, false));
+ SkASSERT(!this->isConfigRenderable(kUnknown_GrPixelConfig, true));
+
+ for (size_t i = 1; i < SK_ARRAY_COUNT(kConfigNames); ++i) {
+ GrPixelConfig config = static_cast<GrPixelConfig>(i);
+ r.appendf("%s is renderable: %s, with MSAA: %s\n",
+ kConfigNames[i],
+ gNY[this->isConfigRenderable(config, false)],
+ gNY[this->isConfigRenderable(config, true)]);
+ }
+
+ SkASSERT(!this->isConfigTexturable(kUnknown_GrPixelConfig));
+
+ for (size_t i = 1; i < SK_ARRAY_COUNT(kConfigNames); ++i) {
+ GrPixelConfig config = static_cast<GrPixelConfig>(i);
+ r.appendf("%s is uploadable to a texture: %s\n",
+ kConfigNames[i],
+ gNY[this->isConfigTexturable(config)]);
+ }
+
+ return r;
+}
diff --git a/gfx/skia/skia/src/gpu/GrClipStackClip.cpp b/gfx/skia/skia/src/gpu/GrClipStackClip.cpp
new file mode 100644
index 000000000..b25bcf29d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrClipStackClip.cpp
@@ -0,0 +1,487 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrClipStackClip.h"
+
+#include "GrAppliedClip.h"
+#include "GrContextPriv.h"
+#include "GrDrawingManager.h"
+#include "GrDrawContextPriv.h"
+#include "GrFixedClip.h"
+#include "GrGpuResourcePriv.h"
+#include "GrRenderTargetPriv.h"
+#include "GrStencilAttachment.h"
+#include "GrSWMaskHelper.h"
+#include "effects/GrConvexPolyEffect.h"
+#include "effects/GrRRectEffect.h"
+#include "effects/GrTextureDomain.h"
+
+typedef SkClipStack::Element Element;
+typedef GrReducedClip::InitialState InitialState;
+typedef GrReducedClip::ElementList ElementList;
+
+static const int kMaxAnalyticElements = 4;
+
+bool GrClipStackClip::quickContains(const SkRect& rect) const {
+ if (!fStack || fStack->isWideOpen()) {
+ return true;
+ }
+ return fStack->quickContains(rect.makeOffset(SkIntToScalar(fOrigin.x()),
+ SkIntToScalar(fOrigin.y())));
+}
+
+bool GrClipStackClip::quickContains(const SkRRect& rrect) const {
+ if (!fStack || fStack->isWideOpen()) {
+ return true;
+ }
+ return fStack->quickContains(rrect.makeOffset(SkIntToScalar(fOrigin.fX),
+ SkIntToScalar(fOrigin.fY)));
+}
+
+bool GrClipStackClip::isRRect(const SkRect& origRTBounds, SkRRect* rr, bool* aa) const {
+ if (!fStack) {
+ return false;
+ }
+ const SkRect* rtBounds = &origRTBounds;
+ SkRect tempRTBounds;
+ bool origin = fOrigin.fX || fOrigin.fY;
+ if (origin) {
+ tempRTBounds = origRTBounds;
+ tempRTBounds.offset(SkIntToScalar(fOrigin.fX), SkIntToScalar(fOrigin.fY));
+ rtBounds = &tempRTBounds;
+ }
+ if (fStack->isRRect(*rtBounds, rr, aa)) {
+ if (origin) {
+ rr->offset(-SkIntToScalar(fOrigin.fX), -SkIntToScalar(fOrigin.fY));
+ }
+ return true;
+ }
+ return false;
+}
+
+void GrClipStackClip::getConservativeBounds(int width, int height, SkIRect* devResult,
+ bool* isIntersectionOfRects) const {
+ if (!fStack) {
+ devResult->setXYWH(0, 0, width, height);
+ if (isIntersectionOfRects) {
+ *isIntersectionOfRects = true;
+ }
+ return;
+ }
+ SkRect devBounds;
+ fStack->getConservativeBounds(-fOrigin.x(), -fOrigin.y(), width, height, &devBounds,
+ isIntersectionOfRects);
+ devBounds.roundOut(devResult);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// set up the draw state to enable the aa clipping mask.
+static sk_sp<GrFragmentProcessor> create_fp_for_mask(GrTexture* result,
+ const SkIRect &devBound) {
+ SkIRect domainTexels = SkIRect::MakeWH(devBound.width(), devBound.height());
+ return GrDeviceSpaceTextureDecalFragmentProcessor::Make(result, domainTexels,
+ {devBound.fLeft, devBound.fTop});
+}
+
+// Does the path in 'element' require SW rendering? If so, return true (and,
+// optionally, set 'prOut' to NULL. If not, return false (and, optionally, set
+// 'prOut' to the non-SW path renderer that will do the job).
+bool GrClipStackClip::PathNeedsSWRenderer(GrContext* context,
+ bool hasUserStencilSettings,
+ const GrDrawContext* drawContext,
+ const SkMatrix& viewMatrix,
+ const Element* element,
+ GrPathRenderer** prOut,
+ bool needsStencil) {
+ if (Element::kRect_Type == element->getType()) {
+ // rects can always be drawn directly w/o using the software path
+ // TODO: skip rrects once we're drawing them directly.
+ if (prOut) {
+ *prOut = nullptr;
+ }
+ return false;
+ } else {
+ // We shouldn't get here with an empty clip element.
+ SkASSERT(Element::kEmpty_Type != element->getType());
+
+ // the gpu alpha mask will draw the inverse paths as non-inverse to a temp buffer
+ SkPath path;
+ element->asPath(&path);
+ if (path.isInverseFillType()) {
+ path.toggleInverseFillType();
+ }
+
+ GrPathRendererChain::DrawType type;
+
+ if (needsStencil) {
+ type = element->isAA()
+ ? GrPathRendererChain::kStencilAndColorAntiAlias_DrawType
+ : GrPathRendererChain::kStencilAndColor_DrawType;
+ } else {
+ type = element->isAA()
+ ? GrPathRendererChain::kColorAntiAlias_DrawType
+ : GrPathRendererChain::kColor_DrawType;
+ }
+
+ GrShape shape(path, GrStyle::SimpleFill());
+ GrPathRenderer::CanDrawPathArgs canDrawArgs;
+ canDrawArgs.fShaderCaps = context->caps()->shaderCaps();
+ canDrawArgs.fViewMatrix = &viewMatrix;
+ canDrawArgs.fShape = &shape;
+ canDrawArgs.fAntiAlias = element->isAA();
+ canDrawArgs.fHasUserStencilSettings = hasUserStencilSettings;
+ canDrawArgs.fIsStencilBufferMSAA = drawContext->isStencilBufferMultisampled();
+
+ // the 'false' parameter disallows use of the SW path renderer
+ GrPathRenderer* pr =
+ context->contextPriv().drawingManager()->getPathRenderer(canDrawArgs, false, type);
+ if (prOut) {
+ *prOut = pr;
+ }
+ return SkToBool(!pr);
+ }
+}
+
+/*
+ * This method traverses the clip stack to see if the GrSoftwarePathRenderer
+ * will be used on any element. If so, it returns true to indicate that the
+ * entire clip should be rendered in SW and then uploaded en masse to the gpu.
+ */
+bool GrClipStackClip::UseSWOnlyPath(GrContext* context,
+ bool hasUserStencilSettings,
+ const GrDrawContext* drawContext,
+ const GrReducedClip& reducedClip) {
+ // TODO: generalize this function so that when
+ // a clip gets complex enough it can just be done in SW regardless
+ // of whether it would invoke the GrSoftwarePathRenderer.
+
+ // Set the matrix so that rendered clip elements are transformed to mask space from clip
+ // space.
+ SkMatrix translate;
+ translate.setTranslate(SkIntToScalar(-reducedClip.left()), SkIntToScalar(-reducedClip.top()));
+
+ for (ElementList::Iter iter(reducedClip.elements()); iter.get(); iter.next()) {
+ const Element* element = iter.get();
+
+ SkCanvas::ClipOp op = element->getOp();
+ bool invert = element->isInverseFilled();
+ bool needsStencil = invert ||
+ SkCanvas::kIntersect_Op == op || SkCanvas::kReverseDifference_Op == op;
+
+ if (PathNeedsSWRenderer(context, hasUserStencilSettings,
+ drawContext, translate, element, nullptr, needsStencil)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool get_analytic_clip_processor(const ElementList& elements,
+ bool abortIfAA,
+ const SkVector& clipToRTOffset,
+ const SkRect& drawBounds,
+ sk_sp<GrFragmentProcessor>* resultFP) {
+ SkRect boundsInClipSpace;
+ boundsInClipSpace = drawBounds.makeOffset(-clipToRTOffset.fX, -clipToRTOffset.fY);
+ SkASSERT(elements.count() <= kMaxAnalyticElements);
+ SkSTArray<kMaxAnalyticElements, sk_sp<GrFragmentProcessor>> fps;
+ ElementList::Iter iter(elements);
+ while (iter.get()) {
+ SkCanvas::ClipOp op = iter.get()->getOp();
+ bool invert;
+ bool skip = false;
+ switch (op) {
+ case SkRegion::kReplace_Op:
+ SkASSERT(iter.get() == elements.head());
+ // Fallthrough, handled same as intersect.
+ case SkRegion::kIntersect_Op:
+ invert = false;
+ if (iter.get()->contains(boundsInClipSpace)) {
+ skip = true;
+ }
+ break;
+ case SkRegion::kDifference_Op:
+ invert = true;
+ // We don't currently have a cheap test for whether a rect is fully outside an
+ // element's primitive, so don't attempt to set skip.
+ break;
+ default:
+ return false;
+ }
+ if (!skip) {
+ GrPrimitiveEdgeType edgeType;
+ if (iter.get()->isAA()) {
+ if (abortIfAA) {
+ return false;
+ }
+ edgeType =
+ invert ? kInverseFillAA_GrProcessorEdgeType : kFillAA_GrProcessorEdgeType;
+ } else {
+ edgeType =
+ invert ? kInverseFillBW_GrProcessorEdgeType : kFillBW_GrProcessorEdgeType;
+ }
+
+ switch (iter.get()->getType()) {
+ case SkClipStack::Element::kPath_Type:
+ fps.emplace_back(GrConvexPolyEffect::Make(edgeType, iter.get()->getPath(),
+ &clipToRTOffset));
+ break;
+ case SkClipStack::Element::kRRect_Type: {
+ SkRRect rrect = iter.get()->getRRect();
+ rrect.offset(clipToRTOffset.fX, clipToRTOffset.fY);
+ fps.emplace_back(GrRRectEffect::Make(edgeType, rrect));
+ break;
+ }
+ case SkClipStack::Element::kRect_Type: {
+ SkRect rect = iter.get()->getRect();
+ rect.offset(clipToRTOffset.fX, clipToRTOffset.fY);
+ fps.emplace_back(GrConvexPolyEffect::Make(edgeType, rect));
+ break;
+ }
+ default:
+ break;
+ }
+ if (!fps.back()) {
+ return false;
+ }
+ }
+ iter.next();
+ }
+
+ *resultFP = nullptr;
+ if (fps.count()) {
+ *resultFP = GrFragmentProcessor::RunInSeries(fps.begin(), fps.count());
+ }
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// sort out what kind of clip mask needs to be created: alpha, stencil,
+// scissor, or entirely software
+bool GrClipStackClip::apply(GrContext* context, GrDrawContext* drawContext, bool useHWAA,
+ bool hasUserStencilSettings, GrAppliedClip* out) const {
+ if (!fStack || fStack->isWideOpen()) {
+ return true;
+ }
+
+ SkRect devBounds = SkRect::MakeIWH(drawContext->width(), drawContext->height());
+ if (!devBounds.intersect(out->clippedDrawBounds()) ||
+ GrClip::GetPixelIBounds(devBounds).isEmpty()) {
+ return false;
+ }
+
+ GrRenderTarget* rt = drawContext->accessRenderTarget();
+
+ const SkScalar clipX = SkIntToScalar(fOrigin.x()),
+ clipY = SkIntToScalar(fOrigin.y());
+
+ SkRect clipSpaceDevBounds = devBounds.makeOffset(clipX, clipY);
+ const GrReducedClip reducedClip(*fStack, clipSpaceDevBounds,
+ rt->renderTargetPriv().maxWindowRectangles());
+
+ if (reducedClip.hasIBounds() &&
+ !GrClip::IsInsideClip(reducedClip.ibounds(), clipSpaceDevBounds)) {
+ SkIRect scissorSpaceIBounds(reducedClip.ibounds());
+ scissorSpaceIBounds.offset(-fOrigin);
+ out->addScissor(scissorSpaceIBounds);
+ }
+
+ if (!reducedClip.windowRectangles().empty()) {
+ out->addWindowRectangles(reducedClip.windowRectangles(), fOrigin,
+ GrWindowRectsState::Mode::kExclusive);
+ }
+
+ if (reducedClip.elements().isEmpty()) {
+ return InitialState::kAllIn == reducedClip.initialState();
+ }
+
+ SkASSERT(reducedClip.hasIBounds());
+
+ // An element count of 4 was chosen because of the common pattern in Blink of:
+ // isect RR
+ // diff RR
+ // isect convex_poly
+ // isect convex_poly
+ // when drawing rounded div borders. This could probably be tuned based on a
+ // configuration's relative costs of switching RTs to generate a mask vs
+ // longer shaders.
+ if (reducedClip.elements().count() <= kMaxAnalyticElements) {
+ // When there are multiple samples we want to do per-sample clipping, not compute a
+ // fractional pixel coverage.
+ bool disallowAnalyticAA = drawContext->isStencilBufferMultisampled();
+ if (disallowAnalyticAA && !drawContext->numColorSamples()) {
+ // With a single color sample, any coverage info is lost from color once it hits the
+ // color buffer anyway, so we may as well use coverage AA if nothing else in the pipe
+ // is multisampled.
+ disallowAnalyticAA = useHWAA || hasUserStencilSettings;
+ }
+ sk_sp<GrFragmentProcessor> clipFP;
+ if (reducedClip.requiresAA() &&
+ get_analytic_clip_processor(reducedClip.elements(), disallowAnalyticAA,
+ {-clipX, -clipY}, devBounds, &clipFP)) {
+ out->addCoverageFP(std::move(clipFP));
+ return true;
+ }
+ }
+
+ // If the stencil buffer is multisampled we can use it to do everything.
+ if (!drawContext->isStencilBufferMultisampled() && reducedClip.requiresAA()) {
+ sk_sp<GrTexture> result;
+ if (UseSWOnlyPath(context, hasUserStencilSettings, drawContext, reducedClip)) {
+ // The clip geometry is complex enough that it will be more efficient to create it
+ // entirely in software
+ result = CreateSoftwareClipMask(context->textureProvider(), reducedClip);
+ } else {
+ result = CreateAlphaClipMask(context, reducedClip);
+ // If createAlphaClipMask fails it means UseSWOnlyPath has a bug
+ SkASSERT(result);
+ }
+
+ if (result) {
+ // The mask's top left coord should be pinned to the rounded-out top left corner of
+ // clipSpace bounds. We determine the mask's position WRT to the render target here.
+ SkIRect rtSpaceMaskBounds = reducedClip.ibounds();
+ rtSpaceMaskBounds.offset(-fOrigin);
+ out->addCoverageFP(create_fp_for_mask(result.get(), rtSpaceMaskBounds));
+ return true;
+ }
+ // if alpha clip mask creation fails fall through to the non-AA code paths
+ }
+
+ // use the stencil clip if we can't represent the clip as a rectangle.
+ // TODO: these need to be swapped over to using a StencilAttachmentProxy
+ GrStencilAttachment* stencilAttachment =
+ context->resourceProvider()->attachStencilAttachment(rt);
+ if (nullptr == stencilAttachment) {
+ SkDebugf("WARNING: failed to attach stencil buffer for clip mask. Clip will be ignored.\n");
+ return true;
+ }
+
+ // This relies on the property that a reduced sub-rect of the last clip will contain all the
+ // relevant window rectangles that were in the last clip. This subtle requirement will go away
+ // after clipping is overhauled.
+ if (stencilAttachment->mustRenderClip(reducedClip.elementsGenID(), reducedClip.ibounds(),
+ fOrigin)) {
+ reducedClip.drawStencilClipMask(context, drawContext, fOrigin);
+ stencilAttachment->setLastClip(reducedClip.elementsGenID(), reducedClip.ibounds(),
+ fOrigin);
+ }
+ out->addStencilClip();
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Create a 8-bit clip mask in alpha
+
+static void GetClipMaskKey(int32_t clipGenID, const SkIRect& bounds, GrUniqueKey* key) {
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey::Builder builder(key, kDomain, 3);
+ builder[0] = clipGenID;
+ builder[1] = SkToU16(bounds.fLeft) | (SkToU16(bounds.fRight) << 16);
+ builder[2] = SkToU16(bounds.fTop) | (SkToU16(bounds.fBottom) << 16);
+}
+
+sk_sp<GrTexture> GrClipStackClip::CreateAlphaClipMask(GrContext* context,
+ const GrReducedClip& reducedClip) {
+ GrResourceProvider* resourceProvider = context->resourceProvider();
+ GrUniqueKey key;
+ GetClipMaskKey(reducedClip.elementsGenID(), reducedClip.ibounds(), &key);
+ if (GrTexture* texture = resourceProvider->findAndRefTextureByUniqueKey(key)) {
+ return sk_sp<GrTexture>(texture);
+ }
+
+ sk_sp<GrDrawContext> dc(context->makeDrawContextWithFallback(SkBackingFit::kApprox,
+ reducedClip.width(),
+ reducedClip.height(),
+ kAlpha_8_GrPixelConfig,
+ nullptr));
+ if (!dc) {
+ return nullptr;
+ }
+
+ if (!reducedClip.drawAlphaClipMask(dc.get())) {
+ return nullptr;
+ }
+
+ sk_sp<GrTexture> texture(dc->asTexture());
+ SkASSERT(texture);
+ texture->resourcePriv().setUniqueKey(key);
+ return texture;
+}
+
+sk_sp<GrTexture> GrClipStackClip::CreateSoftwareClipMask(GrTextureProvider* texProvider,
+ const GrReducedClip& reducedClip) {
+ GrUniqueKey key;
+ GetClipMaskKey(reducedClip.elementsGenID(), reducedClip.ibounds(), &key);
+ if (GrTexture* texture = texProvider->findAndRefTextureByUniqueKey(key)) {
+ return sk_sp<GrTexture>(texture);
+ }
+
+ // The mask texture may be larger than necessary. We round out the clip space bounds and pin
+ // the top left corner of the resulting rect to the top left of the texture.
+ SkIRect maskSpaceIBounds = SkIRect::MakeWH(reducedClip.width(), reducedClip.height());
+
+ GrSWMaskHelper helper(texProvider);
+
+ // Set the matrix so that rendered clip elements are transformed to mask space from clip
+ // space.
+ SkMatrix translate;
+ translate.setTranslate(SkIntToScalar(-reducedClip.left()), SkIntToScalar(-reducedClip.top()));
+
+ helper.init(maskSpaceIBounds, &translate);
+ helper.clear(InitialState::kAllIn == reducedClip.initialState() ? 0xFF : 0x00);
+
+ for (ElementList::Iter iter(reducedClip.elements()); iter.get(); iter.next()) {
+ const Element* element = iter.get();
+ SkCanvas::ClipOp op = element->getOp();
+
+ if (SkCanvas::kIntersect_Op == op || SkCanvas::kReverseDifference_Op == op) {
+ // Intersect and reverse difference require modifying pixels outside of the geometry
+ // that is being "drawn". In both cases we erase all the pixels outside of the geometry
+ // but leave the pixels inside the geometry alone. For reverse difference we invert all
+ // the pixels before clearing the ones outside the geometry.
+ if (SkCanvas::kReverseDifference_Op == op) {
+ SkRect temp = SkRect::Make(reducedClip.ibounds());
+ // invert the entire scene
+ helper.drawRect(temp, SkRegion::kXOR_Op, false, 0xFF);
+ }
+ SkPath clipPath;
+ element->asPath(&clipPath);
+ clipPath.toggleInverseFillType();
+ GrShape shape(clipPath, GrStyle::SimpleFill());
+ helper.drawShape(shape, SkRegion::kReplace_Op, element->isAA(), 0x00);
+ continue;
+ }
+
+ // The other ops (union, xor, diff) only affect pixels inside
+ // the geometry so they can just be drawn normally
+ if (Element::kRect_Type == element->getType()) {
+ helper.drawRect(element->getRect(), (SkRegion::Op)op, element->isAA(), 0xFF);
+ } else {
+ SkPath path;
+ element->asPath(&path);
+ GrShape shape(path, GrStyle::SimpleFill());
+ helper.drawShape(shape, (SkRegion::Op)op, element->isAA(), 0xFF);
+ }
+ }
+
+ // Allocate clip mask texture
+ GrSurfaceDesc desc;
+ desc.fWidth = reducedClip.width();
+ desc.fHeight = reducedClip.height();
+ desc.fConfig = kAlpha_8_GrPixelConfig;
+
+ sk_sp<GrTexture> result(texProvider->createApproxTexture(desc));
+ if (!result) {
+ return nullptr;
+ }
+ result->resourcePriv().setUniqueKey(key);
+
+ helper.toTexture(result.get());
+
+ return result;
+}
diff --git a/gfx/skia/skia/src/gpu/GrClipStackClip.h b/gfx/skia/skia/src/gpu/GrClipStackClip.h
new file mode 100644
index 000000000..075d1d857
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrClipStackClip.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef GrClipStackClip_DEFINED
+#define GrClipStackClip_DEFINED
+
+#include "GrClip.h"
+#include "GrReducedClip.h"
+#include "SkClipStack.h"
+
+class GrPathRenderer;
+class GrTexture;
+class GrTextureProvider;
+class GrUniqueKey;
+
+/**
+ * GrClipStackClip can apply a generic SkClipStack to the draw state. It may need to generate an
+ * 8-bit alpha clip mask and/or modify the stencil buffer during apply().
+ */
+class GrClipStackClip final : public GrClip {
+public:
+ GrClipStackClip(const SkClipStack* stack = nullptr, const SkIPoint* origin = nullptr) {
+ this->reset(stack, origin);
+ }
+
+ void reset(const SkClipStack* stack = nullptr, const SkIPoint* origin = nullptr) {
+ fOrigin = origin ? *origin : SkIPoint::Make(0, 0);
+ fStack.reset(SkSafeRef(stack));
+ }
+
+ bool quickContains(const SkRect&) const final;
+ bool quickContains(const SkRRect&) const final;
+ void getConservativeBounds(int width, int height, SkIRect* devResult,
+ bool* isIntersectionOfRects) const final;
+ bool apply(GrContext*, GrDrawContext*, bool useHWAA, bool hasUserStencilSettings,
+ GrAppliedClip* out) const final;
+
+ bool isRRect(const SkRect& rtBounds, SkRRect* rr, bool* aa) const override;
+
+private:
+ static bool PathNeedsSWRenderer(GrContext* context,
+ bool hasUserStencilSettings,
+ const GrDrawContext*,
+ const SkMatrix& viewMatrix,
+ const SkClipStack::Element* element,
+ GrPathRenderer** prOut,
+ bool needsStencil);
+
+ // Creates an alpha mask of the clip. The mask is a rasterization of elements through the
+ // rect specified by clipSpaceIBounds.
+ static sk_sp<GrTexture> CreateAlphaClipMask(GrContext*, const GrReducedClip&);
+
+ // Similar to createAlphaClipMask but it rasterizes in SW and uploads to the result texture.
+ static sk_sp<GrTexture> CreateSoftwareClipMask(GrTextureProvider*, const GrReducedClip&);
+
+ static bool UseSWOnlyPath(GrContext*,
+ bool hasUserStencilSettings,
+ const GrDrawContext*,
+ const GrReducedClip&);
+
+ static GrTexture* CreateCachedMask(int width, int height, const GrUniqueKey& key,
+ bool renderTarget);
+
+ SkIPoint fOrigin;
+ SkAutoTUnref<const SkClipStack> fStack;
+};
+
+#endif // GrClipStackClip_DEFINED
diff --git a/gfx/skia/skia/src/gpu/GrColorSpaceXform.cpp b/gfx/skia/skia/src/gpu/GrColorSpaceXform.cpp
new file mode 100644
index 000000000..d2270fafd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrColorSpaceXform.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrColorSpaceXform.h"
+#include "SkColorSpace.h"
+#include "SkColorSpace_Base.h"
+#include "SkMatrix44.h"
+
+static inline bool sk_float_almost_equals(float x, float y, float tol) {
+ return sk_float_abs(x - y) <= tol;
+}
+
+static inline bool matrix_is_almost_identity(const SkMatrix44& m,
+ SkMScalar tol = SK_MScalar1 / (1 << 12)) {
+ return
+ sk_float_almost_equals(m.getFloat(0, 0), 1.0f, tol) &&
+ sk_float_almost_equals(m.getFloat(0, 1), 0.0f, tol) &&
+ sk_float_almost_equals(m.getFloat(0, 2), 0.0f, tol) &&
+ sk_float_almost_equals(m.getFloat(0, 3), 0.0f, tol) &&
+ sk_float_almost_equals(m.getFloat(1, 0), 0.0f, tol) &&
+ sk_float_almost_equals(m.getFloat(1, 1), 1.0f, tol) &&
+ sk_float_almost_equals(m.getFloat(1, 2), 0.0f, tol) &&
+ sk_float_almost_equals(m.getFloat(1, 3), 0.0f, tol) &&
+ sk_float_almost_equals(m.getFloat(2, 0), 0.0f, tol) &&
+ sk_float_almost_equals(m.getFloat(2, 1), 0.0f, tol) &&
+ sk_float_almost_equals(m.getFloat(2, 2), 1.0f, tol) &&
+ sk_float_almost_equals(m.getFloat(2, 3), 0.0f, tol) &&
+ sk_float_almost_equals(m.getFloat(3, 0), 0.0f, tol) &&
+ sk_float_almost_equals(m.getFloat(3, 1), 0.0f, tol) &&
+ sk_float_almost_equals(m.getFloat(3, 2), 0.0f, tol) &&
+ sk_float_almost_equals(m.getFloat(3, 3), 1.0f, tol);
+}
+
+GrColorSpaceXform::GrColorSpaceXform(const SkMatrix44& srcToDst)
+ : fSrcToDst(srcToDst) {}
+
+sk_sp<GrColorSpaceXform> GrColorSpaceXform::Make(SkColorSpace* src, SkColorSpace* dst) {
+ if (!src || !dst) {
+ // Invalid
+ return nullptr;
+ }
+
+ if (src == dst) {
+ // Quick equality check - no conversion needed in this case
+ return nullptr;
+ }
+
+ SkMatrix44 srcToDst(SkMatrix44::kUninitialized_Constructor);
+ srcToDst.setConcat(as_CSB(dst)->fromXYZD50(), as_CSB(src)->toXYZD50());
+
+ if (matrix_is_almost_identity(srcToDst)) {
+ return nullptr;
+ }
+
+ return sk_make_sp<GrColorSpaceXform>(srcToDst);
+}
+
+bool GrColorSpaceXform::Equals(const GrColorSpaceXform* a, const GrColorSpaceXform* b) {
+ if (a == b) {
+ return true;
+ }
+
+ if (!a || !b) {
+ return false;
+ }
+
+ return a->fSrcToDst == b->fSrcToDst;
+}
+
+GrColor4f GrColorSpaceXform::apply(const GrColor4f& srcColor) {
+ GrColor4f result;
+ fSrcToDst.mapScalars(srcColor.fRGBA, result.fRGBA);
+ return result;
+}
diff --git a/gfx/skia/skia/src/gpu/GrContext.cpp b/gfx/skia/skia/src/gpu/GrContext.cpp
new file mode 100644
index 000000000..176d5da59
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrContext.cpp
@@ -0,0 +1,843 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrContext.h"
+#include "GrContextPriv.h"
+#include "GrContextOptions.h"
+#include "GrDrawingManager.h"
+#include "GrDrawContext.h"
+#include "GrResourceCache.h"
+#include "GrResourceProvider.h"
+#include "GrSoftwarePathRenderer.h"
+#include "GrSurfacePriv.h"
+
+#include "SkConfig8888.h"
+#include "SkGrPriv.h"
+
+#include "batches/GrCopySurfaceBatch.h"
+#include "effects/GrConfigConversionEffect.h"
+#include "effects/GrGammaEffect.h"
+#include "text/GrTextBlobCache.h"
+
+#define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this)
+#define ASSERT_SINGLE_OWNER \
+ SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(&fSingleOwner);)
+#define ASSERT_SINGLE_OWNER_PRIV \
+ SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(&fContext->fSingleOwner);)
+#define RETURN_IF_ABANDONED if (fDrawingManager->wasAbandoned()) { return; }
+#define RETURN_FALSE_IF_ABANDONED if (fDrawingManager->wasAbandoned()) { return false; }
+#define RETURN_NULL_IF_ABANDONED if (fDrawingManager->wasAbandoned()) { return nullptr; }
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext) {
+ GrContextOptions defaultOptions;
+ return Create(backend, backendContext, defaultOptions);
+}
+
+GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext,
+ const GrContextOptions& options) {
+ GrContext* context = new GrContext;
+
+ if (context->init(backend, backendContext, options)) {
+ return context;
+ } else {
+ context->unref();
+ return nullptr;
+ }
+}
+
+static int32_t gNextID = 1;
+static int32_t next_id() {
+ int32_t id;
+ do {
+ id = sk_atomic_inc(&gNextID);
+ } while (id == SK_InvalidGenID);
+ return id;
+}
+
+GrContext::GrContext() : fUniqueID(next_id()) {
+ fGpu = nullptr;
+ fCaps = nullptr;
+ fResourceCache = nullptr;
+ fResourceProvider = nullptr;
+ fBatchFontCache = nullptr;
+}
+
+bool GrContext::init(GrBackend backend, GrBackendContext backendContext,
+ const GrContextOptions& options) {
+ ASSERT_SINGLE_OWNER
+ SkASSERT(!fGpu);
+
+ fGpu = GrGpu::Create(backend, backendContext, options, this);
+ if (!fGpu) {
+ return false;
+ }
+ this->initCommon(options);
+ return true;
+}
+
+void GrContext::initCommon(const GrContextOptions& options) {
+ ASSERT_SINGLE_OWNER
+
+ fCaps = SkRef(fGpu->caps());
+ fResourceCache = new GrResourceCache(fCaps);
+ fResourceProvider = new GrResourceProvider(fGpu, fResourceCache, &fSingleOwner);
+
+ fDidTestPMConversions = false;
+
+ GrDrawTarget::Options dtOptions;
+ dtOptions.fClipBatchToBounds = options.fClipBatchToBounds;
+ dtOptions.fDrawBatchBounds = options.fDrawBatchBounds;
+ dtOptions.fMaxBatchLookback = options.fMaxBatchLookback;
+ dtOptions.fMaxBatchLookahead = options.fMaxBatchLookahead;
+ GrPathRendererChain::Options prcOptions;
+ prcOptions.fDisableDistanceFieldRenderer = options.fDisableDistanceFieldPaths;
+ prcOptions.fAllowPathMaskCaching = options.fAllowPathMaskCaching;
+ prcOptions.fDisableAllPathRenderers = options.fForceSWPathMasks;
+ fDrawingManager.reset(new GrDrawingManager(this, dtOptions, prcOptions, options.fImmediateMode,
+ &fSingleOwner));
+
+ // GrBatchFontCache will eventually replace GrFontCache
+ fBatchFontCache = new GrBatchFontCache(this);
+
+ fTextBlobCache.reset(new GrTextBlobCache(TextBlobCacheOverBudgetCB, this));
+}
+
+GrContext::~GrContext() {
+ ASSERT_SINGLE_OWNER
+
+ if (!fGpu) {
+ SkASSERT(!fCaps);
+ return;
+ }
+
+ this->flush();
+
+ fDrawingManager->cleanup();
+
+ for (int i = 0; i < fCleanUpData.count(); ++i) {
+ (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo);
+ }
+
+ delete fResourceProvider;
+ delete fResourceCache;
+ delete fBatchFontCache;
+
+ fGpu->unref();
+ fCaps->unref();
+}
+
+GrContextThreadSafeProxy* GrContext::threadSafeProxy() {
+ if (!fThreadSafeProxy) {
+ fThreadSafeProxy.reset(new GrContextThreadSafeProxy(fCaps, this->uniqueID()));
+ }
+ return SkRef(fThreadSafeProxy.get());
+}
+
+void GrContext::abandonContext() {
+ ASSERT_SINGLE_OWNER
+
+ fResourceProvider->abandon();
+
+ // Need to abandon the drawing manager first so all the render targets
+ // will be released/forgotten before they too are abandoned.
+ fDrawingManager->abandon();
+
+ // abandon first to so destructors
+ // don't try to free the resources in the API.
+ fResourceCache->abandonAll();
+
+ fGpu->disconnect(GrGpu::DisconnectType::kAbandon);
+
+ fBatchFontCache->freeAll();
+ fTextBlobCache->freeAll();
+}
+
+void GrContext::releaseResourcesAndAbandonContext() {
+ ASSERT_SINGLE_OWNER
+
+ fResourceProvider->abandon();
+
+ // Need to abandon the drawing manager first so all the render targets
+ // will be released/forgotten before they too are abandoned.
+ fDrawingManager->abandon();
+
+ // Release all resources in the backend 3D API.
+ fResourceCache->releaseAll();
+
+ fGpu->disconnect(GrGpu::DisconnectType::kCleanup);
+
+ fBatchFontCache->freeAll();
+ fTextBlobCache->freeAll();
+}
+
+void GrContext::resetContext(uint32_t state) {
+ ASSERT_SINGLE_OWNER
+ fGpu->markContextDirty(state);
+}
+
+void GrContext::freeGpuResources() {
+ ASSERT_SINGLE_OWNER
+
+ this->flush();
+
+ fBatchFontCache->freeAll();
+
+ fDrawingManager->freeGpuResources();
+
+ fResourceCache->purgeAllUnlocked();
+}
+
+void GrContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
+ ASSERT_SINGLE_OWNER
+
+ if (resourceCount) {
+ *resourceCount = fResourceCache->getBudgetedResourceCount();
+ }
+ if (resourceBytes) {
+ *resourceBytes = fResourceCache->getBudgetedResourceBytes();
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrContext::TextBlobCacheOverBudgetCB(void* data) {
+ SkASSERT(data);
+ // TextBlobs are drawn at the SkGpuDevice level, therefore they cannot rely on GrDrawContext
+ // to perform a necessary flush. The solution is to move drawText calls to below the GrContext
+ // level, but this is not trivial because they call drawPath on SkGpuDevice.
+ GrContext* context = reinterpret_cast<GrContext*>(data);
+ context->flush();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrContext::flush() {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ fDrawingManager->flush();
+}
+
+bool sw_convert_to_premul(GrPixelConfig srcConfig, int width, int height, size_t inRowBytes,
+ const void* inPixels, size_t outRowBytes, void* outPixels) {
+ SkSrcPixelInfo srcPI;
+ if (!GrPixelConfigToColorType(srcConfig, &srcPI.fColorType)) {
+ return false;
+ }
+ srcPI.fAlphaType = kUnpremul_SkAlphaType;
+ srcPI.fPixels = inPixels;
+ srcPI.fRowBytes = inRowBytes;
+
+ SkDstPixelInfo dstPI;
+ dstPI.fColorType = srcPI.fColorType;
+ dstPI.fAlphaType = kPremul_SkAlphaType;
+ dstPI.fPixels = outPixels;
+ dstPI.fRowBytes = outRowBytes;
+
+ return srcPI.convertPixelsTo(&dstPI, width, height);
+}
+
+bool GrContext::writeSurfacePixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig srcConfig, const void* buffer, size_t rowBytes,
+ uint32_t pixelOpsFlags) {
+ ASSERT_SINGLE_OWNER
+ RETURN_FALSE_IF_ABANDONED
+ ASSERT_OWNED_RESOURCE(surface);
+ SkASSERT(surface);
+ GR_AUDIT_TRAIL_AUTO_FRAME(&fAuditTrail, "GrContext::writeSurfacePixels");
+
+ this->testPMConversionsIfNecessary(pixelOpsFlags);
+
+ // Trim the params here so that if we wind up making a temporary surface it can be as small as
+ // necessary and because GrGpu::getWritePixelsInfo requires it.
+ if (!GrSurfacePriv::AdjustWritePixelParams(surface->width(), surface->height(),
+ GrBytesPerPixel(srcConfig), &left, &top, &width,
+ &height, &buffer, &rowBytes)) {
+ return false;
+ }
+
+ bool applyPremulToSrc = false;
+ if (kUnpremul_PixelOpsFlag & pixelOpsFlags) {
+ if (!GrPixelConfigIs8888(srcConfig)) {
+ return false;
+ }
+ applyPremulToSrc = true;
+ }
+
+ GrGpu::DrawPreference drawPreference = GrGpu::kNoDraw_DrawPreference;
+ // Don't prefer to draw for the conversion (and thereby access a texture from the cache) when
+ // we've already determined that there isn't a roundtrip preserving conversion processor pair.
+ if (applyPremulToSrc && !this->didFailPMUPMConversionTest()) {
+ drawPreference = GrGpu::kCallerPrefersDraw_DrawPreference;
+ }
+
+ GrGpu::WritePixelTempDrawInfo tempDrawInfo;
+ if (!fGpu->getWritePixelsInfo(surface, width, height, srcConfig, &drawPreference,
+ &tempDrawInfo)) {
+ return false;
+ }
+
+ if (!(kDontFlush_PixelOpsFlag & pixelOpsFlags) && surface->surfacePriv().hasPendingIO()) {
+ this->flush();
+ }
+
+ SkAutoTUnref<GrTexture> tempTexture;
+ if (GrGpu::kNoDraw_DrawPreference != drawPreference) {
+ tempTexture.reset(
+ this->textureProvider()->createApproxTexture(tempDrawInfo.fTempSurfaceDesc));
+ if (!tempTexture && GrGpu::kRequireDraw_DrawPreference == drawPreference) {
+ return false;
+ }
+ }
+
+ // temp buffer for doing sw premul conversion, if needed.
+ SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
+ if (tempTexture) {
+ sk_sp<GrFragmentProcessor> fp;
+ SkMatrix textureMatrix;
+ textureMatrix.setIDiv(tempTexture->width(), tempTexture->height());
+ if (applyPremulToSrc) {
+ fp = this->createUPMToPMEffect(tempTexture, tempDrawInfo.fSwizzle, textureMatrix);
+ // If premultiplying was the only reason for the draw, fall back to a straight write.
+ if (!fp) {
+ if (GrGpu::kCallerPrefersDraw_DrawPreference == drawPreference) {
+ tempTexture.reset(nullptr);
+ }
+ } else {
+ applyPremulToSrc = false;
+ }
+ }
+ if (tempTexture) {
+ if (!fp) {
+ fp = GrConfigConversionEffect::Make(tempTexture, tempDrawInfo.fSwizzle,
+ GrConfigConversionEffect::kNone_PMConversion,
+ textureMatrix);
+ if (!fp) {
+ return false;
+ }
+ }
+ GrRenderTarget* renderTarget = surface->asRenderTarget();
+ SkASSERT(renderTarget);
+ if (tempTexture->surfacePriv().hasPendingIO()) {
+ this->flush();
+ }
+ if (applyPremulToSrc) {
+ size_t tmpRowBytes = 4 * width;
+ tmpPixels.reset(width * height);
+ if (!sw_convert_to_premul(srcConfig, width, height, rowBytes, buffer, tmpRowBytes,
+ tmpPixels.get())) {
+ return false;
+ }
+ rowBytes = tmpRowBytes;
+ buffer = tmpPixels.get();
+ applyPremulToSrc = false;
+ }
+ if (!fGpu->writePixels(tempTexture, 0, 0, width, height,
+ tempDrawInfo.fWriteConfig, buffer,
+ rowBytes)) {
+ return false;
+ }
+ SkMatrix matrix;
+ matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
+ // TODO: Need to decide the semantics of this function for color spaces. Do we support
+ // conversion from a passed-in color space? For now, specifying nullptr means that this
+ // path will do no conversion, so it will match the behavior of the non-draw path.
+ sk_sp<GrDrawContext> drawContext(this->contextPriv().makeWrappedDrawContext(
+ sk_ref_sp(renderTarget),
+ nullptr));
+ if (!drawContext) {
+ return false;
+ }
+ GrPaint paint;
+ paint.addColorFragmentProcessor(std::move(fp));
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ paint.setAllowSRGBInputs(true);
+ SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
+ drawContext->drawRect(GrNoClip(), paint, matrix, rect, nullptr);
+
+ if (kFlushWrites_PixelOp & pixelOpsFlags) {
+ this->flushSurfaceWrites(surface);
+ }
+ }
+ }
+ if (!tempTexture) {
+ if (applyPremulToSrc) {
+ size_t tmpRowBytes = 4 * width;
+ tmpPixels.reset(width * height);
+ if (!sw_convert_to_premul(srcConfig, width, height, rowBytes, buffer, tmpRowBytes,
+ tmpPixels.get())) {
+ return false;
+ }
+ rowBytes = tmpRowBytes;
+ buffer = tmpPixels.get();
+ applyPremulToSrc = false;
+ }
+ return fGpu->writePixels(surface, left, top, width, height, srcConfig, buffer, rowBytes);
+ }
+ return true;
+}
+
+bool GrContext::readSurfacePixels(GrSurface* src,
+ int left, int top, int width, int height,
+ GrPixelConfig dstConfig, void* buffer, size_t rowBytes,
+ uint32_t flags) {
+ ASSERT_SINGLE_OWNER
+ RETURN_FALSE_IF_ABANDONED
+ ASSERT_OWNED_RESOURCE(src);
+ SkASSERT(src);
+ GR_AUDIT_TRAIL_AUTO_FRAME(&fAuditTrail, "GrContext::readSurfacePixels");
+
+ this->testPMConversionsIfNecessary(flags);
+ SkAutoMutexAcquire ama(fReadPixelsMutex);
+
+ // Adjust the params so that if we wind up using an intermediate surface we've already done
+ // all the trimming and the temporary can be the min size required.
+ if (!GrSurfacePriv::AdjustReadPixelParams(src->width(), src->height(),
+ GrBytesPerPixel(dstConfig), &left,
+ &top, &width, &height, &buffer, &rowBytes)) {
+ return false;
+ }
+
+ if (!(kDontFlush_PixelOpsFlag & flags) && src->surfacePriv().hasPendingWrite()) {
+ this->flush();
+ }
+
+ bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags);
+ if (unpremul && !GrPixelConfigIs8888(dstConfig)) {
+ // The unpremul flag is only allowed for 8888 configs.
+ return false;
+ }
+
+ GrGpu::DrawPreference drawPreference = GrGpu::kNoDraw_DrawPreference;
+ // Don't prefer to draw for the conversion (and thereby access a texture from the cache) when
+ // we've already determined that there isn't a roundtrip preserving conversion processor pair.
+ if (unpremul && !this->didFailPMUPMConversionTest()) {
+ drawPreference = GrGpu::kCallerPrefersDraw_DrawPreference;
+ }
+
+ GrGpu::ReadPixelTempDrawInfo tempDrawInfo;
+ if (!fGpu->getReadPixelsInfo(src, width, height, rowBytes, dstConfig, &drawPreference,
+ &tempDrawInfo)) {
+ return false;
+ }
+
+ SkAutoTUnref<GrSurface> surfaceToRead(SkRef(src));
+ bool didTempDraw = false;
+ if (GrGpu::kNoDraw_DrawPreference != drawPreference) {
+ if (SkBackingFit::kExact == tempDrawInfo.fTempSurfaceFit) {
+ // We only respect this when the entire src is being read. Otherwise we can trigger too
+ // many odd ball texture sizes and trash the cache.
+ if (width != src->width() || height != src->height()) {
+ tempDrawInfo.fTempSurfaceFit= SkBackingFit::kApprox;
+ }
+ }
+ // TODO: Need to decide the semantics of this function for color spaces. Do we support
+ // conversion to a passed-in color space? For now, specifying nullptr means that this
+ // path will do no conversion, so it will match the behavior of the non-draw path.
+ sk_sp<GrDrawContext> tempDC = this->makeDrawContext(tempDrawInfo.fTempSurfaceFit,
+ tempDrawInfo.fTempSurfaceDesc.fWidth,
+ tempDrawInfo.fTempSurfaceDesc.fHeight,
+ tempDrawInfo.fTempSurfaceDesc.fConfig,
+ nullptr,
+ tempDrawInfo.fTempSurfaceDesc.fSampleCnt,
+ tempDrawInfo.fTempSurfaceDesc.fOrigin);
+ if (tempDC) {
+ SkMatrix textureMatrix;
+ textureMatrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
+ textureMatrix.postIDiv(src->width(), src->height());
+ sk_sp<GrFragmentProcessor> fp;
+ if (unpremul) {
+ fp = this->createPMToUPMEffect(src->asTexture(), tempDrawInfo.fSwizzle,
+ textureMatrix);
+ if (fp) {
+ unpremul = false; // we no longer need to do this on CPU after the read back.
+ } else if (GrGpu::kCallerPrefersDraw_DrawPreference == drawPreference) {
+ // We only wanted to do the draw in order to perform the unpremul so don't
+ // bother.
+ tempDC.reset(nullptr);
+ }
+ }
+ if (!fp && tempDC) {
+ fp = GrConfigConversionEffect::Make(src->asTexture(), tempDrawInfo.fSwizzle,
+ GrConfigConversionEffect::kNone_PMConversion,
+ textureMatrix);
+ }
+ if (fp) {
+ GrPaint paint;
+ paint.addColorFragmentProcessor(std::move(fp));
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ paint.setAllowSRGBInputs(true);
+ SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
+ tempDC->drawRect(GrNoClip(), paint, SkMatrix::I(), rect, nullptr);
+ surfaceToRead.reset(tempDC->asTexture().release());
+ left = 0;
+ top = 0;
+ didTempDraw = true;
+ }
+ }
+ }
+
+ if (GrGpu::kRequireDraw_DrawPreference == drawPreference && !didTempDraw) {
+ return false;
+ }
+ GrPixelConfig configToRead = dstConfig;
+ if (didTempDraw) {
+ this->flushSurfaceWrites(surfaceToRead);
+ configToRead = tempDrawInfo.fReadConfig;
+ }
+ if (!fGpu->readPixels(surfaceToRead, left, top, width, height, configToRead, buffer,
+ rowBytes)) {
+ return false;
+ }
+
+ // Perform umpremul conversion if we weren't able to perform it as a draw.
+ if (unpremul) {
+ SkDstPixelInfo dstPI;
+ if (!GrPixelConfigToColorType(dstConfig, &dstPI.fColorType)) {
+ return false;
+ }
+ dstPI.fAlphaType = kUnpremul_SkAlphaType;
+ dstPI.fPixels = buffer;
+ dstPI.fRowBytes = rowBytes;
+
+ SkSrcPixelInfo srcPI;
+ srcPI.fColorType = dstPI.fColorType;
+ srcPI.fAlphaType = kPremul_SkAlphaType;
+ srcPI.fPixels = buffer;
+ srcPI.fRowBytes = rowBytes;
+
+ return srcPI.convertPixelsTo(&dstPI, width, height);
+ }
+ return true;
+}
+
+void GrContext::prepareSurfaceForExternalIO(GrSurface* surface) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkASSERT(surface);
+ ASSERT_OWNED_RESOURCE(surface);
+ fDrawingManager->prepareSurfaceForExternalIO(surface);
+}
+
+bool GrContext::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ ASSERT_SINGLE_OWNER
+ RETURN_FALSE_IF_ABANDONED
+ GR_AUDIT_TRAIL_AUTO_FRAME(&fAuditTrail, "GrContext::copySurface");
+
+ if (!src || !dst) {
+ return false;
+ }
+ ASSERT_OWNED_RESOURCE(src);
+ ASSERT_OWNED_RESOURCE(dst);
+
+ if (!dst->asRenderTarget()) {
+ SkIRect clippedSrcRect;
+ SkIPoint clippedDstPoint;
+ if (!GrCopySurfaceBatch::ClipSrcRectAndDstPoint(dst, src, srcRect, dstPoint,
+ &clippedSrcRect, &clippedDstPoint)) {
+ return false;
+ }
+ // If we don't have an RT for the dst then we won't have a GrDrawContext to insert the
+ // the copy surface into. In the future we plan to have a more limited Context type
+ // (GrCopyContext?) that has the subset of GrDrawContext operations that should be
+ // allowed on textures that aren't render targets.
+ // For now we just flush any writes to the src and issue an immediate copy to the dst.
+ src->flushWrites();
+ return fGpu->copySurface(dst, src, clippedSrcRect, clippedDstPoint);
+ }
+ sk_sp<GrDrawContext> drawContext(this->contextPriv().makeWrappedDrawContext(
+ sk_ref_sp(dst->asRenderTarget()),
+ nullptr));
+ if (!drawContext) {
+ return false;
+ }
+
+ if (!drawContext->copySurface(src, srcRect, dstPoint)) {
+ return false;
+ }
+ return true;
+}
+
+void GrContext::flushSurfaceWrites(GrSurface* surface) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ if (surface->surfacePriv().hasPendingWrite()) {
+ this->flush();
+ }
+}
+
+void GrContext::flushSurfaceIO(GrSurface* surface) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ if (surface->surfacePriv().hasPendingIO()) {
+ this->flush();
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+int GrContext::getRecommendedSampleCount(GrPixelConfig config,
+ SkScalar dpi) const {
+ ASSERT_SINGLE_OWNER
+
+ if (!this->caps()->isConfigRenderable(config, true)) {
+ return 0;
+ }
+ int chosenSampleCount = 0;
+ if (fGpu->caps()->shaderCaps()->pathRenderingSupport()) {
+ if (dpi >= 250.0f) {
+ chosenSampleCount = 4;
+ } else {
+ chosenSampleCount = 16;
+ }
+ }
+ return chosenSampleCount <= fGpu->caps()->maxSampleCount() ? chosenSampleCount : 0;
+}
+
+sk_sp<GrDrawContext> GrContextPriv::makeWrappedDrawContext(sk_sp<GrRenderTarget> rt,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps) {
+ ASSERT_SINGLE_OWNER_PRIV
+ return this->drawingManager()->makeDrawContext(std::move(rt),
+ std::move(colorSpace),
+ surfaceProps);
+}
+
+sk_sp<GrDrawContext> GrContextPriv::makeBackendTextureDrawContext(const GrBackendTextureDesc& desc,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* props,
+ GrWrapOwnership ownership) {
+ ASSERT_SINGLE_OWNER_PRIV
+ SkASSERT(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
+
+ sk_sp<GrSurface> surface(fContext->textureProvider()->wrapBackendTexture(desc, ownership));
+ if (!surface) {
+ return nullptr;
+ }
+
+ return this->drawingManager()->makeDrawContext(sk_ref_sp(surface->asRenderTarget()),
+ std::move(colorSpace), props);
+}
+
+sk_sp<GrDrawContext> GrContextPriv::makeBackendRenderTargetDrawContext(
+ const GrBackendRenderTargetDesc& desc,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps) {
+ ASSERT_SINGLE_OWNER_PRIV
+
+ sk_sp<GrRenderTarget> rt(fContext->textureProvider()->wrapBackendRenderTarget(desc));
+ if (!rt) {
+ return nullptr;
+ }
+
+ return this->drawingManager()->makeDrawContext(std::move(rt),
+ std::move(colorSpace),
+ surfaceProps);
+}
+
+sk_sp<GrDrawContext> GrContextPriv::makeBackendTextureAsRenderTargetDrawContext(
+ const GrBackendTextureDesc& desc,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps) {
+ ASSERT_SINGLE_OWNER_PRIV
+ SkASSERT(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
+
+ sk_sp<GrSurface> surface(fContext->resourceProvider()->wrapBackendTextureAsRenderTarget(desc));
+ if (!surface) {
+ return nullptr;
+ }
+
+ return this->drawingManager()->makeDrawContext(sk_ref_sp(surface->asRenderTarget()),
+ std::move(colorSpace),
+ surfaceProps);
+}
+
+static inline GrPixelConfig GrPixelConfigFallback(GrPixelConfig config) {
+ static const GrPixelConfig kFallback[] = {
+ kUnknown_GrPixelConfig, // kUnknown_GrPixelConfig
+ kRGBA_8888_GrPixelConfig, // kAlpha_8_GrPixelConfig
+ kUnknown_GrPixelConfig, // kIndex_8_GrPixelConfig
+ kRGBA_8888_GrPixelConfig, // kRGB_565_GrPixelConfig
+ kRGBA_8888_GrPixelConfig, // kRGBA_4444_GrPixelConfig
+ kUnknown_GrPixelConfig, // kRGBA_8888_GrPixelConfig
+ kRGBA_8888_GrPixelConfig, // kBGRA_8888_GrPixelConfig
+ kUnknown_GrPixelConfig, // kSRGBA_8888_GrPixelConfig
+ kSRGBA_8888_GrPixelConfig, // kSBGRA_8888_GrPixelConfig
+ kUnknown_GrPixelConfig, // kETC1_GrPixelConfig
+ kUnknown_GrPixelConfig, // kLATC_GrPixelConfig
+ kUnknown_GrPixelConfig, // kR11_EAC_GrPixelConfig
+ kUnknown_GrPixelConfig, // kASTC_12x12_GrPixelConfig
+ kUnknown_GrPixelConfig, // kRGBA_float_GrPixelConfig
+ kRGBA_half_GrPixelConfig, // kAlpha_half_GrPixelConfig
+ kUnknown_GrPixelConfig, // kRGBA_half_GrPixelConfig
+ };
+ return kFallback[config];
+
+ GR_STATIC_ASSERT(0 == kUnknown_GrPixelConfig);
+ GR_STATIC_ASSERT(1 == kAlpha_8_GrPixelConfig);
+ GR_STATIC_ASSERT(2 == kIndex_8_GrPixelConfig);
+ GR_STATIC_ASSERT(3 == kRGB_565_GrPixelConfig);
+ GR_STATIC_ASSERT(4 == kRGBA_4444_GrPixelConfig);
+ GR_STATIC_ASSERT(5 == kRGBA_8888_GrPixelConfig);
+ GR_STATIC_ASSERT(6 == kBGRA_8888_GrPixelConfig);
+ GR_STATIC_ASSERT(7 == kSRGBA_8888_GrPixelConfig);
+ GR_STATIC_ASSERT(8 == kSBGRA_8888_GrPixelConfig);
+ GR_STATIC_ASSERT(9 == kETC1_GrPixelConfig);
+ GR_STATIC_ASSERT(10 == kLATC_GrPixelConfig);
+ GR_STATIC_ASSERT(11 == kR11_EAC_GrPixelConfig);
+ GR_STATIC_ASSERT(12 == kASTC_12x12_GrPixelConfig);
+ GR_STATIC_ASSERT(13 == kRGBA_float_GrPixelConfig);
+ GR_STATIC_ASSERT(14 == kAlpha_half_GrPixelConfig);
+ GR_STATIC_ASSERT(15 == kRGBA_half_GrPixelConfig);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kFallback) == kGrPixelConfigCnt);
+}
+
+sk_sp<GrDrawContext> GrContext::makeDrawContextWithFallback(SkBackingFit fit,
+ int width, int height,
+ GrPixelConfig config,
+ sk_sp<SkColorSpace> colorSpace,
+ int sampleCnt,
+ GrSurfaceOrigin origin,
+ const SkSurfaceProps* surfaceProps,
+ SkBudgeted budgeted) {
+ if (!this->caps()->isConfigRenderable(config, sampleCnt > 0)) {
+ config = GrPixelConfigFallback(config);
+ }
+
+ return this->makeDrawContext(fit, width, height, config, std::move(colorSpace),
+ sampleCnt, origin, surfaceProps, budgeted);
+}
+
+sk_sp<GrDrawContext> GrContext::makeDrawContext(SkBackingFit fit,
+ int width, int height,
+ GrPixelConfig config,
+ sk_sp<SkColorSpace> colorSpace,
+ int sampleCnt,
+ GrSurfaceOrigin origin,
+ const SkSurfaceProps* surfaceProps,
+ SkBudgeted budgeted) {
+ if (!this->caps()->isConfigRenderable(config, sampleCnt > 0)) {
+ return nullptr;
+ }
+
+ GrSurfaceDesc desc;
+ desc.fFlags = kRenderTarget_GrSurfaceFlag;
+ desc.fOrigin = origin;
+ desc.fWidth = width;
+ desc.fHeight = height;
+ desc.fConfig = config;
+ desc.fSampleCnt = sampleCnt;
+
+ sk_sp<GrTexture> tex;
+ if (SkBackingFit::kExact == fit) {
+ tex.reset(this->textureProvider()->createTexture(desc, budgeted));
+ } else {
+ tex.reset(this->textureProvider()->createApproxTexture(desc));
+ }
+ if (!tex) {
+ return nullptr;
+ }
+
+ sk_sp<GrDrawContext> drawContext(this->contextPriv().makeWrappedDrawContext(
+ sk_ref_sp(tex->asRenderTarget()),
+ std::move(colorSpace), surfaceProps));
+ if (!drawContext) {
+ return nullptr;
+ }
+
+ return drawContext;
+}
+
+bool GrContext::abandoned() const {
+ ASSERT_SINGLE_OWNER
+ return fDrawingManager->wasAbandoned();
+}
+
+namespace {
+void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) {
+ GrConfigConversionEffect::PMConversion pmToUPM;
+ GrConfigConversionEffect::PMConversion upmToPM;
+ GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM);
+ *pmToUPMValue = pmToUPM;
+ *upmToPMValue = upmToPM;
+}
+}
+
+void GrContext::testPMConversionsIfNecessary(uint32_t flags) {
+ ASSERT_SINGLE_OWNER
+ if (SkToBool(kUnpremul_PixelOpsFlag & flags)) {
+ SkAutoMutexAcquire ama(fTestPMConversionsMutex);
+ if (!fDidTestPMConversions) {
+ test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
+ fDidTestPMConversions = true;
+ }
+ }
+}
+
+sk_sp<GrFragmentProcessor> GrContext::createPMToUPMEffect(GrTexture* texture,
+ const GrSwizzle& swizzle,
+ const SkMatrix& matrix) const {
+ ASSERT_SINGLE_OWNER
+ // We should have already called this->testPMConversionsIfNecessary().
+ SkASSERT(fDidTestPMConversions);
+ GrConfigConversionEffect::PMConversion pmToUPM =
+ static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion);
+ if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) {
+ return GrConfigConversionEffect::Make(texture, swizzle, pmToUPM, matrix);
+ } else {
+ return nullptr;
+ }
+}
+
+sk_sp<GrFragmentProcessor> GrContext::createUPMToPMEffect(GrTexture* texture,
+ const GrSwizzle& swizzle,
+ const SkMatrix& matrix) const {
+ ASSERT_SINGLE_OWNER
+ // We should have already called this->testPMConversionsIfNecessary().
+ SkASSERT(fDidTestPMConversions);
+ GrConfigConversionEffect::PMConversion upmToPM =
+ static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion);
+ if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) {
+ return GrConfigConversionEffect::Make(texture, swizzle, upmToPM, matrix);
+ } else {
+ return nullptr;
+ }
+}
+
+bool GrContext::didFailPMUPMConversionTest() const {
+ ASSERT_SINGLE_OWNER
+ // We should have already called this->testPMConversionsIfNecessary().
+ SkASSERT(fDidTestPMConversions);
+ // The PM<->UPM tests fail or succeed together so we only need to check one.
+ return GrConfigConversionEffect::kNone_PMConversion == fPMToUPMConversion;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void GrContext::getResourceCacheLimits(int* maxTextures, size_t* maxTextureBytes) const {
+ ASSERT_SINGLE_OWNER
+ if (maxTextures) {
+ *maxTextures = fResourceCache->getMaxResourceCount();
+ }
+ if (maxTextureBytes) {
+ *maxTextureBytes = fResourceCache->getMaxResourceBytes();
+ }
+}
+
+void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes) {
+ ASSERT_SINGLE_OWNER
+ fResourceCache->setLimits(maxTextures, maxTextureBytes);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void GrContext::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
+ ASSERT_SINGLE_OWNER
+ fResourceCache->dumpMemoryStatistics(traceMemoryDump);
+}
diff --git a/gfx/skia/skia/src/gpu/GrContextPriv.h b/gfx/skia/skia/src/gpu/GrContextPriv.h
new file mode 100644
index 000000000..29eb151b6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrContextPriv.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrContextPriv_DEFINED
+#define GrContextPriv_DEFINED
+
+#include "GrContext.h"
+
+/** Class that adds methods to GrContext that are only intended for use internal to Skia.
+ This class is purely a privileged window into GrContext. It should never have additional
+ data members or virtual methods. */
+class GrContextPriv {
+public:
+ GrDrawingManager* drawingManager() { return fContext->fDrawingManager; }
+
+ // Create a drawContext that wraps an existing renderTarget
+ sk_sp<GrDrawContext> makeWrappedDrawContext(sk_sp<GrRenderTarget> rt,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* = nullptr);
+
+ sk_sp<GrDrawContext> makeBackendTextureDrawContext(const GrBackendTextureDesc& desc,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* = nullptr,
+ GrWrapOwnership = kBorrow_GrWrapOwnership);
+
+ sk_sp<GrDrawContext> makeBackendRenderTargetDrawContext(const GrBackendRenderTargetDesc& desc,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* = nullptr);
+
+ sk_sp<GrDrawContext> makeBackendTextureAsRenderTargetDrawContext(
+ const GrBackendTextureDesc& desc,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* = nullptr);
+
+private:
+ explicit GrContextPriv(GrContext* context) : fContext(context) {}
+ GrContextPriv(const GrContextPriv&) {} // unimpl
+ GrContextPriv& operator=(const GrContextPriv&); // unimpl
+
+ // No taking addresses of this type.
+ const GrContextPriv* operator&() const;
+ GrContextPriv* operator&();
+
+ GrContext* fContext;
+
+ friend class GrContext; // to construct/copy this type.
+};
+
+inline GrContextPriv GrContext::contextPriv() { return GrContextPriv(this); }
+
+inline const GrContextPriv GrContext::contextPriv () const {
+ return GrContextPriv(const_cast<GrContext*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrCoordTransform.cpp b/gfx/skia/skia/src/gpu/GrCoordTransform.cpp
new file mode 100644
index 000000000..2da49c43f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrCoordTransform.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrCoordTransform.h"
+#include "GrCaps.h"
+#include "GrContext.h"
+#include "GrGpu.h"
+
+void GrCoordTransform::reset(const SkMatrix& m, const GrTexture* texture,
+ GrTextureParams::FilterMode filter) {
+ SkASSERT(texture);
+ SkASSERT(!fInProcessor);
+
+ fMatrix = m;
+ fReverseY = kBottomLeft_GrSurfaceOrigin == texture->origin();
+
+ // Always start at kDefault. Then if precisions differ we see if the precision needs to be
+ // increased. Our rule is that we want at least 4 subpixel values in the representation for
+ // coords between 0 to 1 when bi- or tri-lerping and 1 value when nearest filtering. Note that
+ // this still might not be enough when drawing with repeat or mirror-repeat modes but that case
+ // can be arbitrarily bad.
+ int subPixelThresh = filter > GrTextureParams::kNone_FilterMode ? 4 : 1;
+ fPrecision = kDefault_GrSLPrecision;
+ if (texture->getContext()) {
+ const GrShaderCaps* caps = texture->getContext()->caps()->shaderCaps();
+ if (caps->floatPrecisionVaries()) {
+ int maxD = SkTMax(texture->width(), texture->height());
+ const GrShaderCaps::PrecisionInfo* info;
+ info = &caps->getFloatShaderPrecisionInfo(kFragment_GrShaderType, fPrecision);
+ do {
+ SkASSERT(info->supported());
+ // Make sure there is at least 2 bits of subpixel precision in the range of
+ // texture coords from 0.5 to 1.0.
+ if ((2 << info->fBits) / maxD > subPixelThresh) {
+ break;
+ }
+ if (kHigh_GrSLPrecision == fPrecision) {
+ break;
+ }
+ GrSLPrecision nextP = static_cast<GrSLPrecision>(fPrecision + 1);
+ info = &caps->getFloatShaderPrecisionInfo(kFragment_GrShaderType, nextP);
+ if (!info->supported()) {
+ break;
+ }
+ fPrecision = nextP;
+ } while (true);
+ }
+ }
+}
+
+void GrCoordTransform::reset(const SkMatrix& m, GrSLPrecision precision) {
+ SkASSERT(!fInProcessor);
+ fMatrix = m;
+ fReverseY = false;
+ fPrecision = precision;
+}
diff --git a/gfx/skia/skia/src/gpu/GrDefaultGeoProcFactory.cpp b/gfx/skia/skia/src/gpu/GrDefaultGeoProcFactory.cpp
new file mode 100644
index 000000000..7d89f2727
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDefaultGeoProcFactory.cpp
@@ -0,0 +1,317 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrDefaultGeoProcFactory.h"
+
+#include "GrInvariantOutput.h"
+#include "SkRefCnt.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLGeometryProcessor.h"
+#include "glsl/GrGLSLVertexShaderBuilder.h"
+#include "glsl/GrGLSLVarying.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "glsl/GrGLSLUtil.h"
+
+/*
+ * The default Geometry Processor simply takes position and multiplies it by the uniform view
+ * matrix. It also leaves coverage untouched. Behind the scenes, we may add per vertex color or
+ * local coords.
+ */
+
+enum GPFlag {
+ kColor_GPFlag = 0x1,
+ kLocalCoord_GPFlag = 0x2,
+ kCoverage_GPFlag= 0x4,
+};
+
+class DefaultGeoProc : public GrGeometryProcessor {
+public:
+ static sk_sp<GrGeometryProcessor> Make(uint32_t gpTypeFlags,
+ GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& localMatrix,
+ bool localCoordsWillBeRead,
+ bool coverageWillBeIgnored,
+ uint8_t coverage) {
+ return sk_sp<GrGeometryProcessor>(new DefaultGeoProc(
+ gpTypeFlags, color, viewMatrix, localMatrix, coverage,
+ localCoordsWillBeRead, coverageWillBeIgnored));
+ }
+
+ const char* name() const override { return "DefaultGeometryProcessor"; }
+
+ const Attribute* inPosition() const { return fInPosition; }
+ const Attribute* inColor() const { return fInColor; }
+ const Attribute* inLocalCoords() const { return fInLocalCoords; }
+ const Attribute* inCoverage() const { return fInCoverage; }
+ GrColor color() const { return fColor; }
+ bool colorIgnored() const { return GrColor_ILLEGAL == fColor; }
+ bool hasVertexColor() const { return SkToBool(fInColor); }
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+ bool localCoordsWillBeRead() const { return fLocalCoordsWillBeRead; }
+ uint8_t coverage() const { return fCoverage; }
+ bool coverageWillBeIgnored() const { return fCoverageWillBeIgnored; }
+ bool hasVertexCoverage() const { return SkToBool(fInCoverage); }
+
+ class GLSLProcessor : public GrGLSLGeometryProcessor {
+ public:
+ GLSLProcessor()
+ : fViewMatrix(SkMatrix::InvalidMatrix()), fColor(GrColor_ILLEGAL), fCoverage(0xff) {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ const DefaultGeoProc& gp = args.fGP.cast<DefaultGeoProc>();
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLPPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(gp);
+
+ // Setup pass through color
+ if (!gp.colorIgnored()) {
+ if (gp.hasVertexColor()) {
+ varyingHandler->addPassThroughAttribute(gp.inColor(), args.fOutputColor);
+ } else {
+ this->setupUniformColor(fragBuilder, uniformHandler, args.fOutputColor,
+ &fColorUniform);
+ }
+ }
+
+ // Setup position
+ this->setupPosition(vertBuilder,
+ uniformHandler,
+ gpArgs,
+ gp.inPosition()->fName,
+ gp.viewMatrix(),
+ &fViewMatrixUniform);
+
+ if (gp.hasExplicitLocalCoords()) {
+ // emit transforms with explicit local coords
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ gpArgs->fPositionVar,
+ gp.inLocalCoords()->fName,
+ gp.localMatrix(),
+ args.fFPCoordTransformHandler);
+ } else {
+ // emit transforms with position
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ gpArgs->fPositionVar,
+ gp.inPosition()->fName,
+ gp.localMatrix(),
+ args.fFPCoordTransformHandler);
+ }
+
+ // Setup coverage as pass through
+ if (!gp.coverageWillBeIgnored()) {
+ if (gp.hasVertexCoverage()) {
+ fragBuilder->codeAppendf("float alpha = 1.0;");
+ varyingHandler->addPassThroughAttribute(gp.inCoverage(), "alpha");
+ fragBuilder->codeAppendf("%s = vec4(alpha);", args.fOutputCoverage);
+ } else if (gp.coverage() == 0xff) {
+ fragBuilder->codeAppendf("%s = vec4(1);", args.fOutputCoverage);
+ } else {
+ const char* fragCoverage;
+ fCoverageUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType,
+ kDefault_GrSLPrecision,
+ "Coverage",
+ &fragCoverage);
+ fragBuilder->codeAppendf("%s = vec4(%s);", args.fOutputCoverage, fragCoverage);
+ }
+ }
+ }
+
+ static inline void GenKey(const GrGeometryProcessor& gp,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const DefaultGeoProc& def = gp.cast<DefaultGeoProc>();
+ uint32_t key = def.fFlags;
+ key |= def.colorIgnored() << 8;
+ key |= def.coverageWillBeIgnored() << 9;
+ key |= def.hasVertexColor() << 10;
+ key |= def.hasVertexCoverage() << 11;
+ key |= def.coverage() == 0xff ? 0x1 << 12 : 0;
+ key |= def.localCoordsWillBeRead() && def.localMatrix().hasPerspective() ? 0x1 << 24 :
+ 0x0;
+ key |= ComputePosKey(def.viewMatrix()) << 25;
+ b->add32(key);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman,
+ const GrPrimitiveProcessor& gp,
+ FPCoordTransformIter&& transformIter) override {
+ const DefaultGeoProc& dgp = gp.cast<DefaultGeoProc>();
+
+ if (!dgp.viewMatrix().isIdentity() && !fViewMatrix.cheapEqualTo(dgp.viewMatrix())) {
+ fViewMatrix = dgp.viewMatrix();
+ float viewMatrix[3 * 3];
+ GrGLSLGetMatrix<3>(viewMatrix, fViewMatrix);
+ pdman.setMatrix3f(fViewMatrixUniform, viewMatrix);
+ }
+
+ if (dgp.color() != fColor && !dgp.hasVertexColor()) {
+ float c[4];
+ GrColorToRGBAFloat(dgp.color(), c);
+ pdman.set4fv(fColorUniform, 1, c);
+ fColor = dgp.color();
+ }
+
+ if (!dgp.coverageWillBeIgnored() &&
+ dgp.coverage() != fCoverage && !dgp.hasVertexCoverage()) {
+ pdman.set1f(fCoverageUniform, GrNormalizeByteToFloat(dgp.coverage()));
+ fCoverage = dgp.coverage();
+ }
+ this->setTransformDataHelper(dgp.fLocalMatrix, pdman, &transformIter);
+ }
+
+ private:
+ SkMatrix fViewMatrix;
+ GrColor fColor;
+ uint8_t fCoverage;
+ UniformHandle fViewMatrixUniform;
+ UniformHandle fColorUniform;
+ UniformHandle fCoverageUniform;
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+ };
+
+ void getGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLProcessor::GenKey(*this, caps, b);
+ }
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override {
+ return new GLSLProcessor();
+ }
+
+private:
+ DefaultGeoProc(uint32_t gpTypeFlags,
+ GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& localMatrix,
+ uint8_t coverage,
+ bool localCoordsWillBeRead,
+ bool coverageWillBeIgnored)
+ : fInPosition(nullptr)
+ , fInColor(nullptr)
+ , fInLocalCoords(nullptr)
+ , fInCoverage(nullptr)
+ , fColor(color)
+ , fViewMatrix(viewMatrix)
+ , fLocalMatrix(localMatrix)
+ , fCoverage(coverage)
+ , fFlags(gpTypeFlags)
+ , fLocalCoordsWillBeRead(localCoordsWillBeRead)
+ , fCoverageWillBeIgnored(coverageWillBeIgnored) {
+ this->initClassID<DefaultGeoProc>();
+ bool hasColor = SkToBool(gpTypeFlags & kColor_GPFlag);
+ bool hasExplicitLocalCoords = SkToBool(gpTypeFlags & kLocalCoord_GPFlag);
+ bool hasCoverage = SkToBool(gpTypeFlags & kCoverage_GPFlag);
+ fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ if (hasColor) {
+ fInColor = &this->addVertexAttrib("inColor", kVec4ub_GrVertexAttribType);
+ }
+ if (hasExplicitLocalCoords) {
+ fInLocalCoords = &this->addVertexAttrib("inLocalCoord", kVec2f_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ this->setHasExplicitLocalCoords();
+ }
+ if (hasCoverage) {
+ fInCoverage = &this->addVertexAttrib("inCoverage", kFloat_GrVertexAttribType);
+ }
+ }
+
+ const Attribute* fInPosition;
+ const Attribute* fInColor;
+ const Attribute* fInLocalCoords;
+ const Attribute* fInCoverage;
+ GrColor fColor;
+ SkMatrix fViewMatrix;
+ SkMatrix fLocalMatrix;
+ uint8_t fCoverage;
+ uint32_t fFlags;
+ bool fLocalCoordsWillBeRead;
+ bool fCoverageWillBeIgnored;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(DefaultGeoProc);
+
+sk_sp<GrGeometryProcessor> DefaultGeoProc::TestCreate(GrProcessorTestData* d) {
+ uint32_t flags = 0;
+ if (d->fRandom->nextBool()) {
+ flags |= kColor_GPFlag;
+ }
+ if (d->fRandom->nextBool()) {
+ flags |= kCoverage_GPFlag;
+ }
+ if (d->fRandom->nextBool()) {
+ flags |= kLocalCoord_GPFlag;
+ }
+
+ return DefaultGeoProc::Make(flags,
+ GrRandomColor(d->fRandom),
+ GrTest::TestMatrix(d->fRandom),
+ GrTest::TestMatrix(d->fRandom),
+ d->fRandom->nextBool(),
+ d->fRandom->nextBool(),
+ GrRandomCoverage(d->fRandom));
+}
+
+sk_sp<GrGeometryProcessor> GrDefaultGeoProcFactory::Make(const Color& color,
+ const Coverage& coverage,
+ const LocalCoords& localCoords,
+ const SkMatrix& viewMatrix) {
+ uint32_t flags = 0;
+ flags |= color.fType == Color::kAttribute_Type ? kColor_GPFlag : 0;
+ flags |= coverage.fType == Coverage::kAttribute_Type ? kCoverage_GPFlag : 0;
+ flags |= localCoords.fType == LocalCoords::kHasExplicit_Type ? kLocalCoord_GPFlag : 0;
+
+ uint8_t inCoverage = coverage.fCoverage;
+ bool coverageWillBeIgnored = coverage.fType == Coverage::kNone_Type;
+ bool localCoordsWillBeRead = localCoords.fType != LocalCoords::kUnused_Type;
+
+ GrColor inColor = color.fColor;
+ return DefaultGeoProc::Make(flags,
+ inColor,
+ viewMatrix,
+ localCoords.fMatrix ? *localCoords.fMatrix : SkMatrix::I(),
+ localCoordsWillBeRead,
+ coverageWillBeIgnored,
+ inCoverage);
+}
+
+sk_sp<GrGeometryProcessor> GrDefaultGeoProcFactory::MakeForDeviceSpace(
+ const Color& color,
+ const Coverage& coverage,
+ const LocalCoords& localCoords,
+ const SkMatrix& viewMatrix) {
+ SkMatrix invert = SkMatrix::I();
+ if (LocalCoords::kUnused_Type != localCoords.fType) {
+ SkASSERT(LocalCoords::kUsePosition_Type == localCoords.fType);
+ if (!viewMatrix.isIdentity() && !viewMatrix.invert(&invert)) {
+ SkDebugf("Could not invert\n");
+ return nullptr;
+ }
+
+ if (localCoords.hasLocalMatrix()) {
+ invert.preConcat(*localCoords.fMatrix);
+ }
+ }
+
+ LocalCoords inverted(LocalCoords::kUsePosition_Type, &invert);
+ return Make(color, coverage, inverted, SkMatrix::I());
+}
diff --git a/gfx/skia/skia/src/gpu/GrDefaultGeoProcFactory.h b/gfx/skia/skia/src/gpu/GrDefaultGeoProcFactory.h
new file mode 100644
index 000000000..022930b65
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDefaultGeoProcFactory.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDefaultGeoProcFactory_DEFINED
+#define GrDefaultGeoProcFactory_DEFINED
+
+#include "GrGeometryProcessor.h"
+
+/*
+ * A factory for creating default Geometry Processors which simply multiply position by the uniform
+ * view matrix and wire through color, coverage, UV coords if requested. Right now this is only
+ * used in the creation of optimized draw states because adding default GPs to the drawstate can
+ * interfere with batching due to updating the drawstate.
+ */
+namespace GrDefaultGeoProcFactory {
+ // Structs for adding vertex attributes
+ struct PositionAttr {
+ SkPoint fPosition;
+ };
+
+ struct PositionCoverageAttr {
+ SkPoint fPosition;
+ float fCoverage;
+ };
+
+ struct PositionColorAttr {
+ SkPoint fPosition;
+ SkColor fColor;
+ };
+
+ struct PositionColorCoverageAttr {
+ SkPoint fPosition;
+ SkColor fColor;
+ float fCoverage;
+ };
+
+ struct PositionLocalCoordAttr {
+ SkPoint fPosition;
+ SkPoint fLocalCoord;
+ };
+
+ struct PositionLocalCoordCoverageAttr {
+ SkPoint fPosition;
+ SkPoint fLocalCoord;
+ float fCoverage;
+ };
+
+ struct PositionColorLocalCoordAttr {
+ SkPoint fPosition;
+ GrColor fColor;
+ SkPoint fLocalCoord;
+ };
+
+ struct PositionColorLocalCoordCoverage {
+ SkPoint fPosition;
+ GrColor fColor;
+ SkPoint fLocalCoord;
+ float fCoverage;
+ };
+
+ struct Color {
+ enum Type {
+ kNone_Type,
+ kUniform_Type,
+ kAttribute_Type,
+ };
+ Color(GrColor color) : fType(kUniform_Type), fColor(color) {}
+ Color(Type type) : fType(type), fColor(GrColor_ILLEGAL) {
+ SkASSERT(type != kUniform_Type);
+
+ // TODO This is temporary
+ if (kAttribute_Type == type) {
+ fColor = GrColor_WHITE;
+ }
+ }
+
+ Type fType;
+ GrColor fColor;
+ };
+
+ struct Coverage {
+ enum Type {
+ kNone_Type,
+ kSolid_Type,
+ kUniform_Type,
+ kAttribute_Type,
+ };
+ Coverage(uint8_t coverage) : fType(kUniform_Type), fCoverage(coverage) {}
+ Coverage(Type type) : fType(type), fCoverage(0xff) {
+ SkASSERT(type != kUniform_Type);
+ }
+
+ Type fType;
+ uint8_t fCoverage;
+ };
+
+ struct LocalCoords {
+ enum Type {
+ kUnused_Type,
+ kUsePosition_Type,
+ kHasExplicit_Type,
+ kHasTransformed_Type,
+ };
+ LocalCoords(Type type) : fType(type), fMatrix(nullptr) {}
+ LocalCoords(Type type, const SkMatrix* matrix) : fType(type), fMatrix(matrix) {
+ SkASSERT(kUnused_Type != type);
+ }
+ bool hasLocalMatrix() const { return nullptr != fMatrix; }
+
+ Type fType;
+ const SkMatrix* fMatrix;
+ };
+
+ sk_sp<GrGeometryProcessor> Make(const Color&,
+ const Coverage&,
+ const LocalCoords&,
+ const SkMatrix& viewMatrix);
+
+ /*
+ * Use this factory to create a GrGeometryProcessor that expects a device space vertex position
+ * attribute. The view matrix must still be provided to compute correctly transformed
+ * coordinates for GrFragmentProcessors. It may fail if the view matrix is not invertible.
+ */
+ sk_sp<GrGeometryProcessor> MakeForDeviceSpace(const Color&,
+ const Coverage&,
+ const LocalCoords&,
+ const SkMatrix& viewMatrix);
+
+ inline size_t DefaultVertexStride() { return sizeof(PositionAttr); }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrDrawContext.cpp b/gfx/skia/skia/src/gpu/GrDrawContext.cpp
new file mode 100644
index 000000000..42d679590
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDrawContext.cpp
@@ -0,0 +1,1435 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrBatchTest.h"
+#include "GrColor.h"
+#include "GrDrawContext.h"
+#include "GrDrawContextPriv.h"
+#include "GrDrawingManager.h"
+#include "GrFixedClip.h"
+#include "GrGpuResourcePriv.h"
+#include "GrOvalRenderer.h"
+#include "GrPathRenderer.h"
+#include "GrPipelineBuilder.h"
+#include "GrRenderTarget.h"
+#include "GrRenderTargetPriv.h"
+#include "GrResourceProvider.h"
+#include "SkSurfacePriv.h"
+
+#include "batches/GrBatch.h"
+#include "batches/GrClearBatch.h"
+#include "batches/GrDrawAtlasBatch.h"
+#include "batches/GrDrawVerticesBatch.h"
+#include "batches/GrRectBatchFactory.h"
+#include "batches/GrNinePatch.h" // TODO Factory
+#include "batches/GrRegionBatch.h"
+
+#include "effects/GrRRectEffect.h"
+
+#include "instanced/InstancedRendering.h"
+
+#include "text/GrAtlasTextContext.h"
+#include "text/GrStencilAndCoverTextContext.h"
+
+#include "../private/GrAuditTrail.h"
+
+#include "SkGr.h"
+#include "SkLatticeIter.h"
+#include "SkMatrixPriv.h"
+
+#define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == fDrawingManager->getContext())
+#define ASSERT_SINGLE_OWNER \
+ SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fSingleOwner);)
+#define ASSERT_SINGLE_OWNER_PRIV \
+ SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fDrawContext->fSingleOwner);)
+#define RETURN_IF_ABANDONED if (fDrawingManager->wasAbandoned()) { return; }
+#define RETURN_IF_ABANDONED_PRIV if (fDrawContext->fDrawingManager->wasAbandoned()) { return; }
+#define RETURN_FALSE_IF_ABANDONED if (fDrawingManager->wasAbandoned()) { return false; }
+#define RETURN_FALSE_IF_ABANDONED_PRIV if (fDrawContext->fDrawingManager->wasAbandoned()) { return false; }
+#define RETURN_NULL_IF_ABANDONED if (fDrawingManager->wasAbandoned()) { return nullptr; }
+
+using gr_instanced::InstancedRendering;
+
+class AutoCheckFlush {
+public:
+ AutoCheckFlush(GrDrawingManager* drawingManager) : fDrawingManager(drawingManager) {
+ SkASSERT(fDrawingManager);
+ }
+ ~AutoCheckFlush() { fDrawingManager->flushIfNecessary(); }
+
+private:
+ GrDrawingManager* fDrawingManager;
+};
+
+bool GrDrawContext::wasAbandoned() const {
+ return fDrawingManager->wasAbandoned();
+}
+
+// In MDB mode the reffing of the 'getLastDrawTarget' call's result allows in-progress
+// drawTargets to be picked up and added to by drawContexts lower in the call
+// stack. When this occurs with a closed drawTarget, a new one will be allocated
+// when the drawContext attempts to use it (via getDrawTarget).
+GrDrawContext::GrDrawContext(GrContext* context,
+ GrDrawingManager* drawingMgr,
+ sk_sp<GrRenderTarget> rt,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps,
+ GrAuditTrail* auditTrail,
+ GrSingleOwner* singleOwner)
+ : fDrawingManager(drawingMgr)
+ , fRenderTarget(std::move(rt))
+ , fDrawTarget(SkSafeRef(fRenderTarget->getLastDrawTarget()))
+ , fContext(context)
+ , fInstancedPipelineInfo(fRenderTarget.get())
+ , fColorSpace(std::move(colorSpace))
+ , fColorXformFromSRGB(nullptr)
+ , fSurfaceProps(SkSurfacePropsCopyOrDefault(surfaceProps))
+ , fAuditTrail(auditTrail)
+#ifdef SK_DEBUG
+ , fSingleOwner(singleOwner)
+#endif
+{
+ if (fColorSpace) {
+ // sRGB sources are very common (SkColor, etc...), so we cache that gamut transformation
+ auto srgbColorSpace = SkColorSpace::NewNamed(SkColorSpace::kSRGB_Named);
+ fColorXformFromSRGB = GrColorSpaceXform::Make(srgbColorSpace.get(), fColorSpace.get());
+ }
+ SkDEBUGCODE(this->validate();)
+}
+
+#ifdef SK_DEBUG
+void GrDrawContext::validate() const {
+ SkASSERT(fRenderTarget);
+ ASSERT_OWNED_RESOURCE(fRenderTarget);
+
+ if (fDrawTarget && !fDrawTarget->isClosed()) {
+ SkASSERT(fRenderTarget->getLastDrawTarget() == fDrawTarget);
+ }
+}
+#endif
+
+GrDrawContext::~GrDrawContext() {
+ ASSERT_SINGLE_OWNER
+ SkSafeUnref(fDrawTarget);
+}
+
+GrDrawTarget* GrDrawContext::getDrawTarget() {
+ ASSERT_SINGLE_OWNER
+ SkDEBUGCODE(this->validate();)
+
+ if (!fDrawTarget || fDrawTarget->isClosed()) {
+ fDrawTarget = fDrawingManager->newDrawTarget(fRenderTarget.get());
+ }
+
+ return fDrawTarget;
+}
+
+bool GrDrawContext::copySurface(GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint) {
+ ASSERT_SINGLE_OWNER
+ RETURN_FALSE_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrDrawContext::copySurface");
+
+ return this->getDrawTarget()->copySurface(fRenderTarget.get(), src, srcRect, dstPoint);
+}
+
+void GrDrawContext::drawText(const GrClip& clip, const GrPaint& grPaint,
+ const SkPaint& skPaint,
+ const SkMatrix& viewMatrix,
+ const char text[], size_t byteLength,
+ SkScalar x, SkScalar y, const SkIRect& clipBounds) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrDrawContext::drawText");
+
+ GrAtlasTextContext* atlasTextContext = fDrawingManager->getAtlasTextContext();
+ atlasTextContext->drawText(fContext, this, clip, grPaint, skPaint, viewMatrix, fSurfaceProps,
+ text, byteLength, x, y, clipBounds);
+}
+
+void GrDrawContext::drawPosText(const GrClip& clip, const GrPaint& grPaint,
+ const SkPaint& skPaint,
+ const SkMatrix& viewMatrix,
+ const char text[], size_t byteLength,
+ const SkScalar pos[], int scalarsPerPosition,
+ const SkPoint& offset, const SkIRect& clipBounds) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrDrawContext::drawPosText");
+
+ GrAtlasTextContext* atlasTextContext = fDrawingManager->getAtlasTextContext();
+ atlasTextContext->drawPosText(fContext, this, clip, grPaint, skPaint, viewMatrix,
+ fSurfaceProps, text, byteLength, pos, scalarsPerPosition,
+ offset, clipBounds);
+
+}
+
+void GrDrawContext::drawTextBlob(const GrClip& clip, const SkPaint& skPaint,
+ const SkMatrix& viewMatrix, const SkTextBlob* blob,
+ SkScalar x, SkScalar y,
+ SkDrawFilter* filter, const SkIRect& clipBounds) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrDrawContext::drawTextBlob");
+
+ GrAtlasTextContext* atlasTextContext = fDrawingManager->getAtlasTextContext();
+ atlasTextContext->drawTextBlob(fContext, this, clip, skPaint, viewMatrix, fSurfaceProps, blob,
+ x, y, filter, clipBounds);
+}
+
+void GrDrawContext::discard() {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrDrawContext::discard");
+
+ AutoCheckFlush acf(fDrawingManager);
+ this->getDrawTarget()->discard(fRenderTarget.get());
+}
+
+void GrDrawContext::clear(const SkIRect* rect,
+ const GrColor color,
+ bool canIgnoreRect) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrDrawContext::clear");
+
+ AutoCheckFlush acf(fDrawingManager);
+ this->internalClear(rect ? GrFixedClip(*rect) : GrFixedClip::Disabled(), color, canIgnoreRect);
+}
+
+void GrDrawContextPriv::clear(const GrFixedClip& clip,
+ const GrColor color,
+ bool canIgnoreClip) {
+ ASSERT_SINGLE_OWNER_PRIV
+ RETURN_IF_ABANDONED_PRIV
+ SkDEBUGCODE(fDrawContext->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fDrawContext->fAuditTrail, "GrDrawContextPriv::clear");
+
+ AutoCheckFlush acf(fDrawContext->fDrawingManager);
+ fDrawContext->internalClear(clip, color, canIgnoreClip);
+}
+
+void GrDrawContext::internalClear(const GrFixedClip& clip,
+ const GrColor color,
+ bool canIgnoreClip) {
+ bool isFull = false;
+ if (!clip.hasWindowRectangles()) {
+ isFull = !clip.scissorEnabled() ||
+ (canIgnoreClip && fContext->caps()->fullClearIsFree()) ||
+ clip.scissorRect().contains(SkIRect::MakeWH(this->width(), this->height()));
+ }
+
+ if (fContext->caps()->useDrawInsteadOfClear()) {
+ // This works around a driver bug with clear by drawing a rect instead.
+ // The driver will ignore a clear if it is the only thing rendered to a
+ // target before the target is read.
+ SkRect clearRect = SkRect::MakeIWH(this->width(), this->height());
+ if (isFull) {
+ this->discard();
+ } else if (!clearRect.intersect(SkRect::Make(clip.scissorRect()))) {
+ return;
+ }
+
+ GrPaint paint;
+ paint.setColor4f(GrColor4f::FromGrColor(color));
+ paint.setXPFactory(GrPorterDuffXPFactory::Make(SkXfermode::Mode::kSrc_Mode));
+
+ this->drawRect(clip, paint, SkMatrix::I(), clearRect);
+ } else if (isFull) {
+ this->getDrawTarget()->fullClear(this->accessRenderTarget(), color);
+ } else {
+ sk_sp<GrBatch> batch(GrClearBatch::Make(clip, color, this->accessRenderTarget()));
+ if (!batch) {
+ return;
+ }
+ this->getDrawTarget()->addBatch(std::move(batch));
+ }
+}
+
+void GrDrawContext::drawPaint(const GrClip& clip,
+ const GrPaint& origPaint,
+ const SkMatrix& viewMatrix) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrDrawContext::drawPaint");
+
+ // set rect to be big enough to fill the space, but not super-huge, so we
+ // don't overflow fixed-point implementations
+
+ SkRect r = fRenderTarget->getBoundsRect();
+ SkTCopyOnFirstWrite<GrPaint> paint(origPaint);
+
+ SkRRect rrect;
+ bool aaRRect;
+ // Check if we can replace a clipRRect()/drawPaint() with a drawRRect(). We only do the
+ // transformation for non-rect rrects. Rects caused a performance regression on an Android
+ // test that needs investigation. We also skip cases where there are fragment processors
+ // because they may depend on having correct local coords and this path draws in device space
+ // without a local matrix.
+ if (!paint->numTotalFragmentProcessors() &&
+ clip.isRRect(r, &rrect, &aaRRect) && !rrect.isRect()) {
+ paint.writable()->setAntiAlias(aaRRect);
+ this->drawRRect(GrNoClip(), *paint, SkMatrix::I(), rrect, GrStyle::SimpleFill());
+ return;
+ }
+
+ // by definition this fills the entire clip, no need for AA
+ if (paint->isAntiAlias()) {
+ paint.writable()->setAntiAlias(false);
+ }
+
+ bool isPerspective = viewMatrix.hasPerspective();
+
+ // We attempt to map r by the inverse matrix and draw that. mapRect will
+ // map the four corners and bound them with a new rect. This will not
+ // produce a correct result for some perspective matrices.
+ if (!isPerspective) {
+ if (!SkMatrixPriv::InverseMapRect(viewMatrix, &r, r)) {
+ SkDebugf("Could not invert matrix\n");
+ return;
+ }
+ this->drawRect(clip, *paint, viewMatrix, r);
+ } else {
+ SkMatrix localMatrix;
+ if (!viewMatrix.invert(&localMatrix)) {
+ SkDebugf("Could not invert matrix\n");
+ return;
+ }
+
+ AutoCheckFlush acf(fDrawingManager);
+
+ this->drawNonAAFilledRect(clip, *paint, SkMatrix::I(), r, nullptr, &localMatrix, nullptr,
+ false /* useHWAA */);
+ }
+}
+
+static inline bool rect_contains_inclusive(const SkRect& rect, const SkPoint& point) {
+ return point.fX >= rect.fLeft && point.fX <= rect.fRight &&
+ point.fY >= rect.fTop && point.fY <= rect.fBottom;
+}
+
+static bool view_matrix_ok_for_aa_fill_rect(const SkMatrix& viewMatrix) {
+ return viewMatrix.preservesRightAngles();
+}
+
+static bool should_apply_coverage_aa(const GrPaint& paint, GrRenderTarget* rt,
+ bool* useHWAA = nullptr) {
+ if (!paint.isAntiAlias()) {
+ if (useHWAA) {
+ *useHWAA = false;
+ }
+ return false;
+ } else {
+ if (useHWAA) {
+ *useHWAA = rt->isUnifiedMultisampled();
+ }
+ return !rt->isUnifiedMultisampled();
+ }
+}
+
+// Attempts to crop a rect and optional local rect to the clip boundaries.
+// Returns false if the draw can be skipped entirely.
+static bool crop_filled_rect(int width, int height, const GrClip& clip,
+ const SkMatrix& viewMatrix, SkRect* rect,
+ SkRect* localRect = nullptr) {
+ if (!viewMatrix.rectStaysRect()) {
+ return true;
+ }
+
+ SkIRect clipDevBounds;
+ SkRect clipBounds;
+
+ clip.getConservativeBounds(width, height, &clipDevBounds);
+ if (!SkMatrixPriv::InverseMapRect(viewMatrix, &clipBounds, SkRect::Make(clipDevBounds))) {
+ return false;
+ }
+
+ if (localRect) {
+ if (!rect->intersects(clipBounds)) {
+ return false;
+ }
+ const SkScalar dx = localRect->width() / rect->width();
+ const SkScalar dy = localRect->height() / rect->height();
+ if (clipBounds.fLeft > rect->fLeft) {
+ localRect->fLeft += (clipBounds.fLeft - rect->fLeft) * dx;
+ rect->fLeft = clipBounds.fLeft;
+ }
+ if (clipBounds.fTop > rect->fTop) {
+ localRect->fTop += (clipBounds.fTop - rect->fTop) * dy;
+ rect->fTop = clipBounds.fTop;
+ }
+ if (clipBounds.fRight < rect->fRight) {
+ localRect->fRight -= (rect->fRight - clipBounds.fRight) * dx;
+ rect->fRight = clipBounds.fRight;
+ }
+ if (clipBounds.fBottom < rect->fBottom) {
+ localRect->fBottom -= (rect->fBottom - clipBounds.fBottom) * dy;
+ rect->fBottom = clipBounds.fBottom;
+ }
+ return true;
+ }
+
+ return rect->intersect(clipBounds);
+}
+
+bool GrDrawContext::drawFilledRect(const GrClip& clip,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const GrUserStencilSettings* ss) {
+ SkRect croppedRect = rect;
+ if (!crop_filled_rect(this->width(), this->height(), clip, viewMatrix, &croppedRect)) {
+ return true;
+ }
+
+ SkAutoTUnref<GrDrawBatch> batch;
+ bool useHWAA;
+
+ if (GrCaps::InstancedSupport::kNone != fContext->caps()->instancedSupport()) {
+ InstancedRendering* ir = this->getDrawTarget()->instancedRendering();
+ batch.reset(ir->recordRect(croppedRect, viewMatrix, paint.getColor(),
+ paint.isAntiAlias(), fInstancedPipelineInfo,
+ &useHWAA));
+ if (batch) {
+ GrPipelineBuilder pipelineBuilder(paint, useHWAA);
+ if (ss) {
+ pipelineBuilder.setUserStencil(ss);
+ }
+ this->getDrawTarget()->drawBatch(pipelineBuilder, this, clip, batch);
+ return true;
+ }
+ }
+
+ if (should_apply_coverage_aa(paint, fRenderTarget.get(), &useHWAA)) {
+ // The fill path can handle rotation but not skew.
+ if (view_matrix_ok_for_aa_fill_rect(viewMatrix)) {
+ SkRect devBoundRect;
+ viewMatrix.mapRect(&devBoundRect, croppedRect);
+
+ batch.reset(GrRectBatchFactory::CreateAAFill(paint, viewMatrix, rect, croppedRect,
+ devBoundRect));
+ if (batch) {
+ GrPipelineBuilder pipelineBuilder(paint, useHWAA);
+ if (ss) {
+ pipelineBuilder.setUserStencil(ss);
+ }
+ this->getDrawTarget()->drawBatch(pipelineBuilder, this, clip, batch);
+ return true;
+ }
+ }
+ } else {
+ this->drawNonAAFilledRect(clip, paint, viewMatrix, croppedRect, nullptr, nullptr, ss,
+ useHWAA);
+ return true;
+ }
+
+ return false;
+}
+
+void GrDrawContext::drawRect(const GrClip& clip,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const GrStyle* style) {
+ if (!style) {
+ style = &GrStyle::SimpleFill();
+ }
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrDrawContext::drawRect");
+
+ // Path effects should've been devolved to a path in SkGpuDevice
+ SkASSERT(!style->pathEffect());
+
+ AutoCheckFlush acf(fDrawingManager);
+
+ const SkStrokeRec& stroke = style->strokeRec();
+ if (stroke.getStyle() == SkStrokeRec::kFill_Style) {
+
+ if (!fContext->caps()->useDrawInsteadOfClear()) {
+ // Check if this is a full RT draw and can be replaced with a clear. We don't bother
+ // checking cases where the RT is fully inside a stroke.
+ SkRect rtRect = fRenderTarget->getBoundsRect();
+ // Does the clip contain the entire RT?
+ if (clip.quickContains(rtRect)) {
+ SkMatrix invM;
+ if (!viewMatrix.invert(&invM)) {
+ return;
+ }
+ // Does the rect bound the RT?
+ SkPoint srcSpaceRTQuad[4];
+ invM.mapRectToQuad(srcSpaceRTQuad, rtRect);
+ if (rect_contains_inclusive(rect, srcSpaceRTQuad[0]) &&
+ rect_contains_inclusive(rect, srcSpaceRTQuad[1]) &&
+ rect_contains_inclusive(rect, srcSpaceRTQuad[2]) &&
+ rect_contains_inclusive(rect, srcSpaceRTQuad[3])) {
+ // Will it blend?
+ GrColor clearColor;
+ if (paint.isConstantBlendedColor(&clearColor)) {
+ this->clear(nullptr, clearColor, true);
+ return;
+ }
+ }
+ }
+ }
+
+ if (this->drawFilledRect(clip, paint, viewMatrix, rect, nullptr)) {
+ return;
+ }
+ } else if (stroke.getStyle() == SkStrokeRec::kStroke_Style ||
+ stroke.getStyle() == SkStrokeRec::kHairline_Style) {
+ if ((!rect.width() || !rect.height()) &&
+ SkStrokeRec::kHairline_Style != stroke.getStyle()) {
+ SkScalar r = stroke.getWidth() / 2;
+ // TODO: Move these stroke->fill fallbacks to GrShape?
+ switch (stroke.getJoin()) {
+ case SkPaint::kMiter_Join:
+ this->drawRect(clip, paint, viewMatrix,
+ {rect.fLeft - r, rect.fTop - r,
+ rect.fRight + r, rect.fBottom + r},
+ &GrStyle::SimpleFill());
+ return;
+ case SkPaint::kRound_Join:
+ // Raster draws nothing when both dimensions are empty.
+ if (rect.width() || rect.height()){
+ SkRRect rrect = SkRRect::MakeRectXY(rect.makeOutset(r, r), r, r);
+ this->drawRRect(clip, paint, viewMatrix, rrect, GrStyle::SimpleFill());
+ return;
+ }
+ case SkPaint::kBevel_Join:
+ if (!rect.width()) {
+ this->drawRect(clip, paint, viewMatrix,
+ {rect.fLeft - r, rect.fTop, rect.fRight + r, rect.fBottom},
+ &GrStyle::SimpleFill());
+ } else {
+ this->drawRect(clip, paint, viewMatrix,
+ {rect.fLeft, rect.fTop - r, rect.fRight, rect.fBottom + r},
+ &GrStyle::SimpleFill());
+ }
+ return;
+ }
+ }
+
+ bool useHWAA;
+ bool snapToPixelCenters = false;
+ SkAutoTUnref<GrDrawBatch> batch;
+
+ GrColor color = paint.getColor();
+ if (should_apply_coverage_aa(paint, fRenderTarget.get(), &useHWAA)) {
+ // The stroke path needs the rect to remain axis aligned (no rotation or skew).
+ if (viewMatrix.rectStaysRect()) {
+ batch.reset(GrRectBatchFactory::CreateAAStroke(color, viewMatrix, rect, stroke));
+ }
+ } else {
+ // Depending on sub-pixel coordinates and the particular GPU, we may lose a corner of
+ // hairline rects. We jam all the vertices to pixel centers to avoid this, but not
+ // when MSAA is enabled because it can cause ugly artifacts.
+ snapToPixelCenters = stroke.getStyle() == SkStrokeRec::kHairline_Style &&
+ !fRenderTarget->isUnifiedMultisampled();
+ batch.reset(GrRectBatchFactory::CreateNonAAStroke(color, viewMatrix, rect,
+ stroke, snapToPixelCenters));
+ }
+
+ if (batch) {
+ GrPipelineBuilder pipelineBuilder(paint, useHWAA);
+
+ if (snapToPixelCenters) {
+ pipelineBuilder.setState(GrPipelineBuilder::kSnapVerticesToPixelCenters_Flag,
+ snapToPixelCenters);
+ }
+
+ this->getDrawTarget()->drawBatch(pipelineBuilder, this, clip, batch);
+ return;
+ }
+ }
+
+ SkPath path;
+ path.setIsVolatile(true);
+ path.addRect(rect);
+ this->internalDrawPath(clip, paint, viewMatrix, path, *style);
+}
+
+void GrDrawContextPriv::clearStencilClip(const GrFixedClip& clip, bool insideStencilMask) {
+ ASSERT_SINGLE_OWNER_PRIV
+ RETURN_IF_ABANDONED_PRIV
+ SkDEBUGCODE(fDrawContext->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fDrawContext->fAuditTrail, "GrDrawContextPriv::clearStencilClip");
+
+ AutoCheckFlush acf(fDrawContext->fDrawingManager);
+ fDrawContext->getDrawTarget()->clearStencilClip(clip, insideStencilMask,
+ fDrawContext->accessRenderTarget());
+}
+
+void GrDrawContextPriv::stencilPath(const GrClip& clip,
+ bool useHWAA,
+ const SkMatrix& viewMatrix,
+ const GrPath* path) {
+ fDrawContext->getDrawTarget()->stencilPath(fDrawContext, clip, useHWAA, viewMatrix, path);
+}
+
+void GrDrawContextPriv::stencilRect(const GrClip& clip,
+ const GrUserStencilSettings* ss,
+ bool useHWAA,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect) {
+ ASSERT_SINGLE_OWNER_PRIV
+ RETURN_IF_ABANDONED_PRIV
+ SkDEBUGCODE(fDrawContext->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fDrawContext->fAuditTrail, "GrDrawContext::stencilRect");
+
+ AutoCheckFlush acf(fDrawContext->fDrawingManager);
+
+ GrPaint paint;
+ paint.setAntiAlias(useHWAA);
+ paint.setXPFactory(GrDisableColorXPFactory::Make());
+
+ fDrawContext->drawNonAAFilledRect(clip, paint, viewMatrix, rect, nullptr, nullptr, ss, useHWAA);
+}
+
+bool GrDrawContextPriv::drawAndStencilRect(const GrClip& clip,
+ const GrUserStencilSettings* ss,
+ SkRegion::Op op,
+ bool invert,
+ bool doAA,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect) {
+ ASSERT_SINGLE_OWNER_PRIV
+ RETURN_FALSE_IF_ABANDONED_PRIV
+ SkDEBUGCODE(fDrawContext->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fDrawContext->fAuditTrail, "GrDrawContext::drawAndStencilRect");
+
+ AutoCheckFlush acf(fDrawContext->fDrawingManager);
+
+ GrPaint paint;
+ paint.setAntiAlias(doAA);
+ paint.setCoverageSetOpXPFactory(op, invert);
+
+ if (fDrawContext->drawFilledRect(clip, paint, viewMatrix, rect, ss)) {
+ return true;
+ }
+
+ SkPath path;
+ path.setIsVolatile(true);
+ path.addRect(rect);
+ return this->drawAndStencilPath(clip, ss, op, invert, doAA, viewMatrix, path);
+}
+
+void GrDrawContext::fillRectToRect(const GrClip& clip,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& rectToDraw,
+ const SkRect& localRect) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrDrawContext::fillRectToRect");
+
+ SkRect croppedRect = rectToDraw;
+ SkRect croppedLocalRect = localRect;
+ if (!crop_filled_rect(this->width(), this->height(), clip, viewMatrix,
+ &croppedRect, &croppedLocalRect)) {
+ return;
+ }
+
+ AutoCheckFlush acf(fDrawingManager);
+ bool useHWAA;
+
+ if (GrCaps::InstancedSupport::kNone != fContext->caps()->instancedSupport()) {
+ InstancedRendering* ir = this->getDrawTarget()->instancedRendering();
+ SkAutoTUnref<GrDrawBatch> batch(ir->recordRect(croppedRect, viewMatrix, paint.getColor(),
+ croppedLocalRect, paint.isAntiAlias(),
+ fInstancedPipelineInfo, &useHWAA));
+ if (batch) {
+ GrPipelineBuilder pipelineBuilder(paint, useHWAA);
+ this->getDrawTarget()->drawBatch(pipelineBuilder, this, clip, batch);
+ return;
+ }
+ }
+
+ if (!should_apply_coverage_aa(paint, fRenderTarget.get(), &useHWAA)) {
+ this->drawNonAAFilledRect(clip, paint, viewMatrix, croppedRect, &croppedLocalRect,
+ nullptr, nullptr, useHWAA);
+ return;
+ }
+
+ if (view_matrix_ok_for_aa_fill_rect(viewMatrix)) {
+ SkAutoTUnref<GrDrawBatch> batch(GrAAFillRectBatch::CreateWithLocalRect(paint.getColor(),
+ viewMatrix,
+ croppedRect,
+ croppedLocalRect));
+ GrPipelineBuilder pipelineBuilder(paint, useHWAA);
+ this->drawBatch(pipelineBuilder, clip, batch);
+ return;
+ }
+
+ SkMatrix viewAndUnLocalMatrix;
+ if (!viewAndUnLocalMatrix.setRectToRect(localRect, rectToDraw, SkMatrix::kFill_ScaleToFit)) {
+ SkDebugf("fillRectToRect called with empty local matrix.\n");
+ return;
+ }
+ viewAndUnLocalMatrix.postConcat(viewMatrix);
+
+ SkPath path;
+ path.setIsVolatile(true);
+ path.addRect(localRect);
+ this->internalDrawPath(clip, paint, viewAndUnLocalMatrix, path, GrStyle());
+}
+
+void GrDrawContext::fillRectWithLocalMatrix(const GrClip& clip,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& rectToDraw,
+ const SkMatrix& localMatrix) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrDrawContext::fillRectWithLocalMatrix");
+
+ SkRect croppedRect = rectToDraw;
+ if (!crop_filled_rect(this->width(), this->height(), clip, viewMatrix, &croppedRect)) {
+ return;
+ }
+
+ AutoCheckFlush acf(fDrawingManager);
+ bool useHWAA;
+
+ if (GrCaps::InstancedSupport::kNone != fContext->caps()->instancedSupport()) {
+ InstancedRendering* ir = this->getDrawTarget()->instancedRendering();
+ SkAutoTUnref<GrDrawBatch> batch(ir->recordRect(croppedRect, viewMatrix, paint.getColor(),
+ localMatrix, paint.isAntiAlias(),
+ fInstancedPipelineInfo, &useHWAA));
+ if (batch) {
+ GrPipelineBuilder pipelineBuilder(paint, useHWAA);
+ this->getDrawTarget()->drawBatch(pipelineBuilder, this, clip, batch);
+ return;
+ }
+ }
+
+ if (!should_apply_coverage_aa(paint, fRenderTarget.get(), &useHWAA)) {
+ this->drawNonAAFilledRect(clip, paint, viewMatrix, croppedRect, nullptr,
+ &localMatrix, nullptr, useHWAA);
+ return;
+ }
+
+ if (view_matrix_ok_for_aa_fill_rect(viewMatrix)) {
+ SkAutoTUnref<GrDrawBatch> batch(GrAAFillRectBatch::Create(paint.getColor(), viewMatrix,
+ localMatrix, croppedRect));
+ GrPipelineBuilder pipelineBuilder(paint, useHWAA);
+ this->getDrawTarget()->drawBatch(pipelineBuilder, this, clip, batch);
+ return;
+ }
+
+ SkMatrix viewAndUnLocalMatrix;
+ if (!localMatrix.invert(&viewAndUnLocalMatrix)) {
+ SkDebugf("fillRectWithLocalMatrix called with degenerate local matrix.\n");
+ return;
+ }
+ viewAndUnLocalMatrix.postConcat(viewMatrix);
+
+ SkPath path;
+ path.setIsVolatile(true);
+ path.addRect(rectToDraw);
+ path.transform(localMatrix);
+ this->internalDrawPath(clip, paint, viewAndUnLocalMatrix, path, GrStyle());
+}
+
+void GrDrawContext::drawVertices(const GrClip& clip,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ GrPrimitiveType primitiveType,
+ int vertexCount,
+ const SkPoint positions[],
+ const SkPoint texCoords[],
+ const GrColor colors[],
+ const uint16_t indices[],
+ int indexCount) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrDrawContext::drawVertices");
+
+ AutoCheckFlush acf(fDrawingManager);
+
+ // TODO clients should give us bounds
+ SkRect bounds;
+ if (!bounds.setBoundsCheck(positions, vertexCount)) {
+ SkDebugf("drawVertices call empty bounds\n");
+ return;
+ }
+
+ viewMatrix.mapRect(&bounds);
+
+ SkAutoTUnref<GrDrawBatch> batch(new GrDrawVerticesBatch(paint.getColor(),
+ primitiveType, viewMatrix, positions,
+ vertexCount, indices, indexCount,
+ colors, texCoords, bounds));
+
+ GrPipelineBuilder pipelineBuilder(paint, this->mustUseHWAA(paint));
+ this->getDrawTarget()->drawBatch(pipelineBuilder, this, clip, batch);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrDrawContext::drawAtlas(const GrClip& clip,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ int spriteCount,
+ const SkRSXform xform[],
+ const SkRect texRect[],
+ const SkColor colors[]) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrDrawContext::drawAtlas");
+
+ AutoCheckFlush acf(fDrawingManager);
+
+ SkAutoTUnref<GrDrawBatch> batch(new GrDrawAtlasBatch(paint.getColor(), viewMatrix, spriteCount,
+ xform, texRect, colors));
+
+ GrPipelineBuilder pipelineBuilder(paint, this->mustUseHWAA(paint));
+ this->getDrawTarget()->drawBatch(pipelineBuilder, this, clip, batch);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrDrawContext::drawRRect(const GrClip& origClip,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkRRect& rrect,
+ const GrStyle& style) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrDrawContext::drawRRect");
+ if (rrect.isEmpty()) {
+ return;
+ }
+
+ GrNoClip noclip;
+ const GrClip* clip = &origClip;
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ // The Android framework frequently clips rrects to themselves where the clip is non-aa and the
+ // draw is aa. Since our lower level clip code works from batch bounds, which are SkRects, it
+ // doesn't detect that the clip can be ignored (modulo antialiasing). The following test
+ // attempts to mitigate the stencil clip cost but will only help when the entire clip stack
+ // can be ignored. We'd prefer to fix this in the framework by removing the clips calls.
+ SkRRect devRRect;
+ if (rrect.transform(viewMatrix, &devRRect) && clip->quickContains(devRRect)) {
+ clip = &noclip;
+ }
+#endif
+ SkASSERT(!style.pathEffect()); // this should've been devolved to a path in SkGpuDevice
+
+ AutoCheckFlush acf(fDrawingManager);
+ const SkStrokeRec stroke = style.strokeRec();
+ bool useHWAA;
+
+ if (GrCaps::InstancedSupport::kNone != fContext->caps()->instancedSupport() &&
+ stroke.isFillStyle()) {
+ InstancedRendering* ir = this->getDrawTarget()->instancedRendering();
+ SkAutoTUnref<GrDrawBatch> batch(ir->recordRRect(rrect, viewMatrix, paint.getColor(),
+ paint.isAntiAlias(), fInstancedPipelineInfo,
+ &useHWAA));
+ if (batch) {
+ GrPipelineBuilder pipelineBuilder(paint, useHWAA);
+ this->getDrawTarget()->drawBatch(pipelineBuilder, this, *clip, batch);
+ return;
+ }
+ }
+
+ if (should_apply_coverage_aa(paint, fRenderTarget.get(), &useHWAA)) {
+ GrShaderCaps* shaderCaps = fContext->caps()->shaderCaps();
+ SkAutoTUnref<GrDrawBatch> batch(GrOvalRenderer::CreateRRectBatch(paint.getColor(),
+ viewMatrix,
+ rrect,
+ stroke,
+ shaderCaps));
+ if (batch) {
+ GrPipelineBuilder pipelineBuilder(paint, useHWAA);
+ this->getDrawTarget()->drawBatch(pipelineBuilder, this, *clip, batch);
+ return;
+ }
+ }
+
+ SkPath path;
+ path.setIsVolatile(true);
+ path.addRRect(rrect);
+ this->internalDrawPath(*clip, paint, viewMatrix, path, style);
+}
+
+bool GrDrawContext::drawFilledDRRect(const GrClip& clip,
+ const GrPaint& paintIn,
+ const SkMatrix& viewMatrix,
+ const SkRRect& origOuter,
+ const SkRRect& origInner) {
+ SkASSERT(!origInner.isEmpty());
+ SkASSERT(!origOuter.isEmpty());
+
+ if (GrCaps::InstancedSupport::kNone != fContext->caps()->instancedSupport()) {
+ bool useHWAA;
+ InstancedRendering* ir = this->getDrawTarget()->instancedRendering();
+ SkAutoTUnref<GrDrawBatch> batch(ir->recordDRRect(origOuter, origInner, viewMatrix,
+ paintIn.getColor(), paintIn.isAntiAlias(),
+ fInstancedPipelineInfo, &useHWAA));
+ if (batch) {
+ GrPipelineBuilder pipelineBuilder(paintIn, useHWAA);
+ this->getDrawTarget()->drawBatch(pipelineBuilder, this, clip, batch);
+ return true;
+ }
+ }
+
+ bool applyAA = paintIn.isAntiAlias() && !fRenderTarget->isUnifiedMultisampled();
+
+ GrPrimitiveEdgeType innerEdgeType = applyAA ? kInverseFillAA_GrProcessorEdgeType :
+ kInverseFillBW_GrProcessorEdgeType;
+ GrPrimitiveEdgeType outerEdgeType = applyAA ? kFillAA_GrProcessorEdgeType :
+ kFillBW_GrProcessorEdgeType;
+
+ SkTCopyOnFirstWrite<SkRRect> inner(origInner), outer(origOuter);
+ SkMatrix inverseVM;
+ if (!viewMatrix.isIdentity()) {
+ if (!origInner.transform(viewMatrix, inner.writable())) {
+ return false;
+ }
+ if (!origOuter.transform(viewMatrix, outer.writable())) {
+ return false;
+ }
+ if (!viewMatrix.invert(&inverseVM)) {
+ return false;
+ }
+ } else {
+ inverseVM.reset();
+ }
+
+ GrPaint grPaint(paintIn);
+ grPaint.setAntiAlias(false);
+
+ // TODO these need to be a geometry processors
+ sk_sp<GrFragmentProcessor> innerEffect(GrRRectEffect::Make(innerEdgeType, *inner));
+ if (!innerEffect) {
+ return false;
+ }
+
+ sk_sp<GrFragmentProcessor> outerEffect(GrRRectEffect::Make(outerEdgeType, *outer));
+ if (!outerEffect) {
+ return false;
+ }
+
+ grPaint.addCoverageFragmentProcessor(std::move(innerEffect));
+ grPaint.addCoverageFragmentProcessor(std::move(outerEffect));
+
+ SkRect bounds = outer->getBounds();
+ if (applyAA) {
+ bounds.outset(SK_ScalarHalf, SK_ScalarHalf);
+ }
+
+ this->fillRectWithLocalMatrix(clip, grPaint, SkMatrix::I(), bounds, inverseVM);
+ return true;
+}
+
+void GrDrawContext::drawDRRect(const GrClip& clip,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkRRect& outer,
+ const SkRRect& inner) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrDrawContext::drawDRRect");
+
+ SkASSERT(!outer.isEmpty());
+ SkASSERT(!inner.isEmpty());
+
+ AutoCheckFlush acf(fDrawingManager);
+
+ if (this->drawFilledDRRect(clip, paint, viewMatrix, outer, inner)) {
+ return;
+ }
+
+ SkPath path;
+ path.setIsVolatile(true);
+ path.addRRect(inner);
+ path.addRRect(outer);
+ path.setFillType(SkPath::kEvenOdd_FillType);
+
+ this->internalDrawPath(clip, paint, viewMatrix, path, GrStyle::SimpleFill());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline bool is_int(float x) {
+ return x == (float) sk_float_round2int(x);
+}
+
+void GrDrawContext::drawRegion(const GrClip& clip,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkRegion& region,
+ const GrStyle& style) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrDrawContext::drawRegion");
+
+ bool isNonTranslate = SkToBool(viewMatrix.getType() & ~(SkMatrix::kTranslate_Mask));
+ bool complexStyle = !style.isSimpleFill();
+ bool antiAlias = paint.isAntiAlias() && (!is_int(viewMatrix.getTranslateX()) ||
+ !is_int(viewMatrix.getTranslateY()));
+ if (isNonTranslate || complexStyle || antiAlias) {
+ SkPath path;
+ region.getBoundaryPath(&path);
+ return this->drawPath(clip, paint, viewMatrix, path, style);
+ }
+
+ SkAutoTUnref<GrDrawBatch> batch(GrRegionBatch::Create(paint.getColor(), viewMatrix, region));
+ GrPipelineBuilder pipelineBuilder(paint, false);
+ this->getDrawTarget()->drawBatch(pipelineBuilder, this, clip, batch);
+}
+
+void GrDrawContext::drawOval(const GrClip& clip,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& oval,
+ const GrStyle& style) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrDrawContext::drawOval");
+
+ if (oval.isEmpty()) {
+ return;
+ }
+
+ SkASSERT(!style.pathEffect()); // this should've been devolved to a path in SkGpuDevice
+
+ AutoCheckFlush acf(fDrawingManager);
+ const SkStrokeRec& stroke = style.strokeRec();
+ bool useHWAA;
+
+ if (GrCaps::InstancedSupport::kNone != fContext->caps()->instancedSupport() &&
+ stroke.isFillStyle()) {
+ InstancedRendering* ir = this->getDrawTarget()->instancedRendering();
+ SkAutoTUnref<GrDrawBatch> batch(ir->recordOval(oval, viewMatrix, paint.getColor(),
+ paint.isAntiAlias(), fInstancedPipelineInfo,
+ &useHWAA));
+ if (batch) {
+ GrPipelineBuilder pipelineBuilder(paint, useHWAA);
+ this->getDrawTarget()->drawBatch(pipelineBuilder, this, clip, batch);
+ return;
+ }
+ }
+
+ if (should_apply_coverage_aa(paint, fRenderTarget.get(), &useHWAA)) {
+ GrShaderCaps* shaderCaps = fContext->caps()->shaderCaps();
+ SkAutoTUnref<GrDrawBatch> batch(GrOvalRenderer::CreateOvalBatch(paint.getColor(),
+ viewMatrix,
+ oval,
+ stroke,
+ shaderCaps));
+ if (batch) {
+ GrPipelineBuilder pipelineBuilder(paint, useHWAA);
+ this->getDrawTarget()->drawBatch(pipelineBuilder, this, clip, batch);
+ return;
+ }
+ }
+
+ SkPath path;
+ path.setIsVolatile(true);
+ path.addOval(oval);
+ this->internalDrawPath(clip, paint, viewMatrix, path, style);
+}
+
+void GrDrawContext::drawArc(const GrClip& clip,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& oval,
+ SkScalar startAngle,
+ SkScalar sweepAngle,
+ bool useCenter,
+ const GrStyle& style) {
+ bool useHWAA;
+ if (should_apply_coverage_aa(paint, fRenderTarget.get(), &useHWAA)) {
+ GrShaderCaps* shaderCaps = fContext->caps()->shaderCaps();
+ SkAutoTUnref<GrDrawBatch> batch(GrOvalRenderer::CreateArcBatch(paint.getColor(),
+ viewMatrix,
+ oval,
+ startAngle,
+ sweepAngle,
+ useCenter,
+ style,
+ shaderCaps));
+ if (batch) {
+ GrPipelineBuilder pipelineBuilder(paint, useHWAA);
+ this->getDrawTarget()->drawBatch(pipelineBuilder, this, clip, batch);
+ return;
+ }
+ }
+ SkPath path;
+ SkPathPriv::CreateDrawArcPath(&path, oval, startAngle, sweepAngle, useCenter,
+ style.isSimpleFill());
+ this->internalDrawPath(clip, paint, viewMatrix, path, style);
+ return;
+}
+
+void GrDrawContext::drawImageLattice(const GrClip& clip,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ int imageWidth,
+ int imageHeight,
+ std::unique_ptr<SkLatticeIter> iter,
+ const SkRect& dst) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrDrawContext::drawImageLattice");
+
+ AutoCheckFlush acf(fDrawingManager);
+
+ SkAutoTUnref<GrDrawBatch> batch(GrNinePatch::CreateNonAA(paint.getColor(), viewMatrix,
+ imageWidth, imageHeight,
+ std::move(iter), dst));
+
+ GrPipelineBuilder pipelineBuilder(paint, this->mustUseHWAA(paint));
+ this->getDrawTarget()->drawBatch(pipelineBuilder, this, clip, batch);
+}
+
+void GrDrawContext::prepareForExternalIO() {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrDrawContext::prepareForExternalIO");
+
+ ASSERT_OWNED_RESOURCE(fRenderTarget);
+
+ fDrawingManager->prepareSurfaceForExternalIO(fRenderTarget.get());
+}
+
+void GrDrawContext::drawNonAAFilledRect(const GrClip& clip,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkRect* localRect,
+ const SkMatrix* localMatrix,
+ const GrUserStencilSettings* ss,
+ bool useHWAA) {
+ SkASSERT(!useHWAA || this->isStencilBufferMultisampled());
+ SkAutoTUnref<GrDrawBatch> batch(
+ GrRectBatchFactory::CreateNonAAFill(paint.getColor(), viewMatrix, rect, localRect,
+ localMatrix));
+ GrPipelineBuilder pipelineBuilder(paint, useHWAA);
+ if (ss) {
+ pipelineBuilder.setUserStencil(ss);
+ }
+ this->getDrawTarget()->drawBatch(pipelineBuilder, this, clip, batch);
+}
+
+bool GrDrawContext::readPixels(const SkImageInfo& dstInfo, void* dstBuffer, size_t dstRowBytes,
+ int x, int y) {
+ // TODO: teach fRenderTarget to take ImageInfo directly to specify the src pixels
+ GrPixelConfig config = SkImageInfo2GrPixelConfig(dstInfo, *fContext->caps());
+ if (kUnknown_GrPixelConfig == config) {
+ return false;
+ }
+
+ uint32_t flags = 0;
+ if (kUnpremul_SkAlphaType == dstInfo.alphaType()) {
+ flags = GrContext::kUnpremul_PixelOpsFlag;
+ }
+
+ return fRenderTarget->readPixels(x, y, dstInfo.width(), dstInfo.height(),
+ config, dstBuffer, dstRowBytes, flags);
+}
+
+bool GrDrawContext::writePixels(const SkImageInfo& srcInfo, const void* srcBuffer,
+ size_t srcRowBytes, int x, int y) {
+ // TODO: teach fRenderTarget to take ImageInfo directly to specify the src pixels
+ GrPixelConfig config = SkImageInfo2GrPixelConfig(srcInfo, *fContext->caps());
+ if (kUnknown_GrPixelConfig == config) {
+ return false;
+ }
+ uint32_t flags = 0;
+ if (kUnpremul_SkAlphaType == srcInfo.alphaType()) {
+ flags = GrContext::kUnpremul_PixelOpsFlag;
+ }
+
+ return fRenderTarget->writePixels(x, y, srcInfo.width(), srcInfo.height(),
+ config, srcBuffer, srcRowBytes, flags);
+}
+
+// Can 'path' be drawn as a pair of filled nested rectangles?
+static bool fills_as_nested_rects(const SkMatrix& viewMatrix, const SkPath& path, SkRect rects[2]) {
+
+ if (path.isInverseFillType()) {
+ return false;
+ }
+
+ // TODO: this restriction could be lifted if we were willing to apply
+ // the matrix to all the points individually rather than just to the rect
+ if (!viewMatrix.rectStaysRect()) {
+ return false;
+ }
+
+ SkPath::Direction dirs[2];
+ if (!path.isNestedFillRects(rects, dirs)) {
+ return false;
+ }
+
+ if (SkPath::kWinding_FillType == path.getFillType() && dirs[0] == dirs[1]) {
+ // The two rects need to be wound opposite to each other
+ return false;
+ }
+
+ // Right now, nested rects where the margin is not the same width
+ // all around do not render correctly
+ const SkScalar* outer = rects[0].asScalars();
+ const SkScalar* inner = rects[1].asScalars();
+
+ bool allEq = true;
+
+ SkScalar margin = SkScalarAbs(outer[0] - inner[0]);
+ bool allGoE1 = margin >= SK_Scalar1;
+
+ for (int i = 1; i < 4; ++i) {
+ SkScalar temp = SkScalarAbs(outer[i] - inner[i]);
+ if (temp < SK_Scalar1) {
+ allGoE1 = false;
+ }
+ if (!SkScalarNearlyEqual(margin, temp)) {
+ allEq = false;
+ }
+ }
+
+ return allEq || allGoE1;
+}
+
+void GrDrawContext::drawPath(const GrClip& clip,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkPath& path,
+ const GrStyle& style) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrDrawContext::drawPath");
+
+ if (path.isEmpty()) {
+ if (path.isInverseFillType()) {
+ this->drawPaint(clip, paint, viewMatrix);
+ }
+ return;
+ }
+
+ AutoCheckFlush acf(fDrawingManager);
+
+ bool useHWAA;
+ if (should_apply_coverage_aa(paint, fRenderTarget.get(), &useHWAA) && !style.pathEffect()) {
+ if (style.isSimpleFill() && !path.isConvex()) {
+ // Concave AA paths are expensive - try to avoid them for special cases
+ SkRect rects[2];
+
+ if (fills_as_nested_rects(viewMatrix, path, rects)) {
+ SkAutoTUnref<GrDrawBatch> batch(GrRectBatchFactory::CreateAAFillNestedRects(
+ paint.getColor(), viewMatrix, rects));
+ if (batch) {
+ GrPipelineBuilder pipelineBuilder(paint, useHWAA);
+ this->getDrawTarget()->drawBatch(pipelineBuilder, this, clip, batch);
+ }
+ return;
+ }
+ }
+ SkRect ovalRect;
+ bool isOval = path.isOval(&ovalRect);
+
+ if (isOval && !path.isInverseFillType()) {
+ GrShaderCaps* shaderCaps = fContext->caps()->shaderCaps();
+ SkAutoTUnref<GrDrawBatch> batch(GrOvalRenderer::CreateOvalBatch(paint.getColor(),
+ viewMatrix,
+ ovalRect,
+ style.strokeRec(),
+ shaderCaps));
+ if (batch) {
+ GrPipelineBuilder pipelineBuilder(paint, useHWAA);
+ this->getDrawTarget()->drawBatch(pipelineBuilder, this, clip, batch);
+ return;
+ }
+ }
+ }
+
+ // Note that internalDrawPath may sw-rasterize the path into a scratch texture.
+ // Scratch textures can be recycled after they are returned to the texture
+ // cache. This presents a potential hazard for buffered drawing. However,
+ // the writePixels that uploads to the scratch will perform a flush so we're
+ // OK.
+ this->internalDrawPath(clip, paint, viewMatrix, path, style);
+}
+
+bool GrDrawContextPriv::drawAndStencilPath(const GrClip& clip,
+ const GrUserStencilSettings* ss,
+ SkRegion::Op op,
+ bool invert,
+ bool doAA,
+ const SkMatrix& viewMatrix,
+ const SkPath& path) {
+ ASSERT_SINGLE_OWNER_PRIV
+ RETURN_FALSE_IF_ABANDONED_PRIV
+ SkDEBUGCODE(fDrawContext->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fDrawContext->fAuditTrail, "GrDrawContext::drawPath");
+
+ if (path.isEmpty() && path.isInverseFillType()) {
+ this->drawAndStencilRect(clip, ss, op, invert, false, SkMatrix::I(),
+ SkRect::MakeIWH(fDrawContext->width(),
+ fDrawContext->height()));
+ return true;
+ }
+
+ AutoCheckFlush acf(fDrawContext->fDrawingManager);
+
+ // An Assumption here is that path renderer would use some form of tweaking
+ // the src color (either the input alpha or in the frag shader) to implement
+ // aa. If we have some future driver-mojo path AA that can do the right
+ // thing WRT to the blend then we'll need some query on the PR.
+ bool useCoverageAA = doAA && !fDrawContext->isUnifiedMultisampled();
+ bool hasUserStencilSettings = !ss->isUnused();
+ bool isStencilBufferMSAA = fDrawContext->isStencilBufferMultisampled();
+
+ const GrPathRendererChain::DrawType type =
+ useCoverageAA ? GrPathRendererChain::kColorAntiAlias_DrawType
+ : GrPathRendererChain::kColor_DrawType;
+
+ GrShape shape(path, GrStyle::SimpleFill());
+ GrPathRenderer::CanDrawPathArgs canDrawArgs;
+ canDrawArgs.fShaderCaps = fDrawContext->fDrawingManager->getContext()->caps()->shaderCaps();
+ canDrawArgs.fViewMatrix = &viewMatrix;
+ canDrawArgs.fShape = &shape;
+ canDrawArgs.fAntiAlias = useCoverageAA;
+ canDrawArgs.fHasUserStencilSettings = hasUserStencilSettings;
+ canDrawArgs.fIsStencilBufferMSAA = isStencilBufferMSAA;
+
+ // Don't allow the SW renderer
+ GrPathRenderer* pr = fDrawContext->fDrawingManager->getPathRenderer(canDrawArgs, false, type);
+ if (!pr) {
+ return false;
+ }
+
+ GrPaint paint;
+ paint.setCoverageSetOpXPFactory(op, invert);
+
+ GrPathRenderer::DrawPathArgs args;
+ args.fResourceProvider = fDrawContext->fDrawingManager->getContext()->resourceProvider();
+ args.fPaint = &paint;
+ args.fUserStencilSettings = ss;
+ args.fDrawContext = fDrawContext;
+ args.fClip = &clip;
+ args.fViewMatrix = &viewMatrix;
+ args.fShape = &shape;
+ args.fAntiAlias = useCoverageAA;
+ args.fGammaCorrect = fDrawContext->isGammaCorrect();
+ pr->drawPath(args);
+ return true;
+}
+
+SkBudgeted GrDrawContextPriv::isBudgeted() const {
+ ASSERT_SINGLE_OWNER_PRIV
+
+ if (fDrawContext->wasAbandoned()) {
+ return SkBudgeted::kNo;
+ }
+
+ SkDEBUGCODE(fDrawContext->validate();)
+
+ return fDrawContext->fRenderTarget->resourcePriv().isBudgeted();
+}
+
+void GrDrawContext::internalDrawPath(const GrClip& clip,
+ const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkPath& path,
+ const GrStyle& style) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkASSERT(!path.isEmpty());
+
+ bool useCoverageAA = should_apply_coverage_aa(paint, fRenderTarget.get());
+ constexpr bool kHasUserStencilSettings = false;
+ bool isStencilBufferMSAA = this->isStencilBufferMultisampled();
+
+ const GrPathRendererChain::DrawType type =
+ useCoverageAA ? GrPathRendererChain::kColorAntiAlias_DrawType
+ : GrPathRendererChain::kColor_DrawType;
+
+ GrShape shape(path, style);
+ if (shape.isEmpty()) {
+ return;
+ }
+ GrPathRenderer::CanDrawPathArgs canDrawArgs;
+ canDrawArgs.fShaderCaps = fDrawingManager->getContext()->caps()->shaderCaps();
+ canDrawArgs.fViewMatrix = &viewMatrix;
+ canDrawArgs.fShape = &shape;
+ canDrawArgs.fAntiAlias = useCoverageAA;
+ canDrawArgs.fHasUserStencilSettings = kHasUserStencilSettings;
+ canDrawArgs.fIsStencilBufferMSAA = isStencilBufferMSAA;
+
+ // Try a 1st time without applying any of the style to the geometry (and barring sw)
+ GrPathRenderer* pr = fDrawingManager->getPathRenderer(canDrawArgs, false, type);
+ SkScalar styleScale = GrStyle::MatrixToScaleFactor(viewMatrix);
+
+ if (!pr && shape.style().pathEffect()) {
+ // It didn't work above, so try again with the path effect applied.
+ shape = shape.applyStyle(GrStyle::Apply::kPathEffectOnly, styleScale);
+ if (shape.isEmpty()) {
+ return;
+ }
+ pr = fDrawingManager->getPathRenderer(canDrawArgs, false, type);
+ }
+ if (!pr) {
+ if (shape.style().applies()) {
+ shape = shape.applyStyle(GrStyle::Apply::kPathEffectAndStrokeRec, styleScale);
+ if (shape.isEmpty()) {
+ return;
+ }
+ }
+ // This time, allow SW renderer
+ pr = fDrawingManager->getPathRenderer(canDrawArgs, true, type);
+ }
+
+ if (!pr) {
+#ifdef SK_DEBUG
+ SkDebugf("Unable to find path renderer compatible with path.\n");
+#endif
+ return;
+ }
+
+ GrPathRenderer::DrawPathArgs args;
+ args.fResourceProvider = fDrawingManager->getContext()->resourceProvider();
+ args.fPaint = &paint;
+ args.fUserStencilSettings = &GrUserStencilSettings::kUnused;
+ args.fDrawContext = this;
+ args.fClip = &clip;
+ args.fViewMatrix = &viewMatrix;
+ args.fShape = canDrawArgs.fShape;
+ args.fAntiAlias = useCoverageAA;
+ args.fGammaCorrect = this->isGammaCorrect();
+ pr->drawPath(args);
+}
+
+void GrDrawContext::drawBatch(const GrPipelineBuilder& pipelineBuilder, const GrClip& clip,
+ GrDrawBatch* batch) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrDrawContext::drawBatch");
+
+ this->getDrawTarget()->drawBatch(pipelineBuilder, this, clip, batch);
+}
diff --git a/gfx/skia/skia/src/gpu/GrDrawContextPriv.h b/gfx/skia/skia/src/gpu/GrDrawContextPriv.h
new file mode 100644
index 000000000..63eae12a0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDrawContextPriv.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDrawContextPriv_DEFINED
+#define GrDrawContextPriv_DEFINED
+
+#include "GrDrawContext.h"
+#include "GrDrawTarget.h"
+#include "GrPathRendering.h"
+
+class GrFixedClip;
+class GrPath;
+struct GrUserStencilSettings;
+
+/** Class that adds methods to GrDrawContext that are only intended for use internal to Skia.
+ This class is purely a privileged window into GrDrawContext. It should never have additional
+ data members or virtual methods. */
+class GrDrawContextPriv {
+public:
+ gr_instanced::InstancedRendering* accessInstancedRendering() const {
+ return fDrawContext->getDrawTarget()->instancedRendering();
+ }
+
+ void clear(const GrFixedClip&, const GrColor, bool canIgnoreClip);
+
+ void clearStencilClip(const GrFixedClip&, bool insideStencilMask);
+
+ void stencilRect(const GrClip& clip,
+ const GrUserStencilSettings* ss,
+ bool useHWAA,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect);
+
+ void stencilPath(const GrClip&,
+ bool useHWAA,
+ const SkMatrix& viewMatrix,
+ const GrPath*);
+
+ bool drawAndStencilRect(const GrClip&,
+ const GrUserStencilSettings*,
+ SkRegion::Op op,
+ bool invert,
+ bool doAA,
+ const SkMatrix& viewMatrix,
+ const SkRect&);
+
+ bool drawAndStencilPath(const GrClip&,
+ const GrUserStencilSettings*,
+ SkRegion::Op op,
+ bool invert,
+ bool doAA,
+ const SkMatrix& viewMatrix,
+ const SkPath&);
+
+ SkBudgeted isBudgeted() const;
+
+ void testingOnly_drawBatch(const GrPaint&,
+ GrDrawBatch* batch,
+ const GrUserStencilSettings* = nullptr,
+ bool snapToCenters = false);
+
+private:
+ explicit GrDrawContextPriv(GrDrawContext* drawContext) : fDrawContext(drawContext) {}
+ GrDrawContextPriv(const GrRenderTargetPriv&) {} // unimpl
+ GrDrawContextPriv& operator=(const GrRenderTargetPriv&); // unimpl
+
+ // No taking addresses of this type.
+ const GrDrawContextPriv* operator&() const;
+ GrDrawContextPriv* operator&();
+
+ GrDrawContext* fDrawContext;
+
+ friend class GrDrawContext; // to construct/copy this type.
+};
+
+inline GrDrawContextPriv GrDrawContext::drawContextPriv() { return GrDrawContextPriv(this); }
+
+inline const GrDrawContextPriv GrDrawContext::drawContextPriv () const {
+ return GrDrawContextPriv(const_cast<GrDrawContext*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrDrawTarget.cpp b/gfx/skia/skia/src/gpu/GrDrawTarget.cpp
new file mode 100644
index 000000000..0117d0e1a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDrawTarget.cpp
@@ -0,0 +1,626 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrDrawTarget.h"
+
+#include "GrAppliedClip.h"
+#include "GrAuditTrail.h"
+#include "GrCaps.h"
+#include "GrDrawContext.h"
+#include "GrGpu.h"
+#include "GrGpuCommandBuffer.h"
+#include "GrPath.h"
+#include "GrPipeline.h"
+#include "GrMemoryPool.h"
+#include "GrPipelineBuilder.h"
+#include "GrRenderTarget.h"
+#include "GrResourceProvider.h"
+#include "GrRenderTargetPriv.h"
+#include "GrStencilAttachment.h"
+#include "GrSurfacePriv.h"
+#include "GrTexture.h"
+#include "gl/GrGLRenderTarget.h"
+
+#include "SkStrokeRec.h"
+
+#include "batches/GrClearBatch.h"
+#include "batches/GrClearStencilClipBatch.h"
+#include "batches/GrCopySurfaceBatch.h"
+#include "batches/GrDiscardBatch.h"
+#include "batches/GrDrawBatch.h"
+#include "batches/GrDrawPathBatch.h"
+#include "batches/GrRectBatchFactory.h"
+#include "batches/GrStencilPathBatch.h"
+
+#include "instanced/InstancedRendering.h"
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Experimentally we have found that most batching occurs within the first 10 comparisons.
+static const int kDefaultMaxBatchLookback = 10;
+static const int kDefaultMaxBatchLookahead = 10;
+
+GrDrawTarget::GrDrawTarget(GrRenderTarget* rt, GrGpu* gpu, GrResourceProvider* resourceProvider,
+ GrAuditTrail* auditTrail, const Options& options)
+ : fLastFullClearBatch(nullptr)
+ , fGpu(SkRef(gpu))
+ , fResourceProvider(resourceProvider)
+ , fAuditTrail(auditTrail)
+ , fFlags(0)
+ , fRenderTarget(rt) {
+ // TODO: Stop extracting the context (currently needed by GrClip)
+ fContext = fGpu->getContext();
+
+ fClipBatchToBounds = options.fClipBatchToBounds;
+ fDrawBatchBounds = options.fDrawBatchBounds;
+ fMaxBatchLookback = (options.fMaxBatchLookback < 0) ? kDefaultMaxBatchLookback :
+ options.fMaxBatchLookback;
+ fMaxBatchLookahead = (options.fMaxBatchLookahead < 0) ? kDefaultMaxBatchLookahead :
+ options.fMaxBatchLookahead;
+
+ if (GrCaps::InstancedSupport::kNone != this->caps()->instancedSupport()) {
+ fInstancedRendering.reset(fGpu->createInstancedRendering());
+ }
+
+ rt->setLastDrawTarget(this);
+
+#ifdef SK_DEBUG
+ static int debugID = 0;
+ fDebugID = debugID++;
+#endif
+}
+
+GrDrawTarget::~GrDrawTarget() {
+ if (fRenderTarget && this == fRenderTarget->getLastDrawTarget()) {
+ fRenderTarget->setLastDrawTarget(nullptr);
+ }
+
+ fGpu->unref();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Add a GrDrawTarget-based dependency
+void GrDrawTarget::addDependency(GrDrawTarget* dependedOn) {
+ SkASSERT(!dependedOn->dependsOn(this)); // loops are bad
+
+ if (this->dependsOn(dependedOn)) {
+ return; // don't add duplicate dependencies
+ }
+
+ *fDependencies.push() = dependedOn;
+}
+
+// Convert from a GrSurface-based dependency to a GrDrawTarget one
+void GrDrawTarget::addDependency(GrSurface* dependedOn) {
+ if (dependedOn->asRenderTarget() && dependedOn->asRenderTarget()->getLastDrawTarget()) {
+ // If it is still receiving dependencies, this DT shouldn't be closed
+ SkASSERT(!this->isClosed());
+
+ GrDrawTarget* dt = dependedOn->asRenderTarget()->getLastDrawTarget();
+ if (dt == this) {
+ // self-read - presumably for dst reads
+ } else {
+ this->addDependency(dt);
+
+ // Can't make it closed in the self-read case
+ dt->makeClosed();
+ }
+ }
+}
+
+#ifdef SK_DEBUG
+void GrDrawTarget::dump() const {
+ SkDebugf("--------------------------------------------------------------\n");
+ SkDebugf("node: %d -> RT: %d\n", fDebugID, fRenderTarget ? fRenderTarget->uniqueID() : -1);
+ SkDebugf("relies On (%d): ", fDependencies.count());
+ for (int i = 0; i < fDependencies.count(); ++i) {
+ SkDebugf("%d, ", fDependencies[i]->fDebugID);
+ }
+ SkDebugf("\n");
+ SkDebugf("batches (%d):\n", fRecordedBatches.count());
+ for (int i = 0; i < fRecordedBatches.count(); ++i) {
+ SkDebugf("*******************************\n");
+ if (!fRecordedBatches[i].fBatch) {
+ SkDebugf("%d: <combined forward>\n", i);
+ } else {
+ SkDebugf("%d: %s\n", i, fRecordedBatches[i].fBatch->name());
+ SkString str = fRecordedBatches[i].fBatch->dumpInfo();
+ SkDebugf("%s\n", str.c_str());
+ const SkRect& clippedBounds = fRecordedBatches[i].fClippedBounds;
+ SkDebugf("ClippedBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
+ clippedBounds.fLeft, clippedBounds.fTop, clippedBounds.fRight,
+ clippedBounds.fBottom);
+ }
+ }
+}
+#endif
+
+bool GrDrawTarget::setupDstReadIfNecessary(const GrPipelineBuilder& pipelineBuilder,
+ GrRenderTarget* rt,
+ const GrClip& clip,
+ const GrPipelineOptimizations& optimizations,
+ GrXferProcessor::DstTexture* dstTexture,
+ const SkRect& batchBounds) {
+ SkRect bounds = batchBounds;
+ bounds.outset(0.5f, 0.5f);
+
+ if (!pipelineBuilder.willXPNeedDstTexture(*this->caps(), optimizations)) {
+ return true;
+ }
+
+ if (this->caps()->textureBarrierSupport()) {
+ if (GrTexture* rtTex = rt->asTexture()) {
+ // The render target is a texture, so we can read from it directly in the shader. The XP
+ // will be responsible to detect this situation and request a texture barrier.
+ dstTexture->setTexture(rtTex);
+ dstTexture->setOffset(0, 0);
+ return true;
+ }
+ }
+
+ SkIRect copyRect;
+ clip.getConservativeBounds(rt->width(), rt->height(), &copyRect);
+
+ SkIRect drawIBounds;
+ bounds.roundOut(&drawIBounds);
+ if (!copyRect.intersect(drawIBounds)) {
+#ifdef SK_DEBUG
+ GrCapsDebugf(this->caps(), "Missed an early reject. "
+ "Bailing on draw from setupDstReadIfNecessary.\n");
+#endif
+ return false;
+ }
+
+ // MSAA consideration: When there is support for reading MSAA samples in the shader we could
+ // have per-sample dst values by making the copy multisampled.
+ GrSurfaceDesc desc;
+ if (!fGpu->initDescForDstCopy(rt, &desc)) {
+ desc.fOrigin = kDefault_GrSurfaceOrigin;
+ desc.fFlags = kRenderTarget_GrSurfaceFlag;
+ desc.fConfig = rt->config();
+ }
+
+ desc.fWidth = copyRect.width();
+ desc.fHeight = copyRect.height();
+
+ static const uint32_t kFlags = 0;
+ SkAutoTUnref<GrTexture> copy(fResourceProvider->createApproxTexture(desc, kFlags));
+
+ if (!copy) {
+ SkDebugf("Failed to create temporary copy of destination texture.\n");
+ return false;
+ }
+ SkIPoint dstPoint = {0, 0};
+ this->copySurface(copy, rt, copyRect, dstPoint);
+ dstTexture->setTexture(copy);
+ dstTexture->setOffset(copyRect.fLeft, copyRect.fTop);
+ return true;
+}
+
+void GrDrawTarget::prepareBatches(GrBatchFlushState* flushState) {
+ // Semi-usually the drawTargets are already closed at this point, but sometimes Ganesh
+ // needs to flush mid-draw. In that case, the SkGpuDevice's drawTargets won't be closed
+ // but need to be flushed anyway. Closing such drawTargets here will mean new
+ // drawTargets will be created to replace them if the SkGpuDevice(s) write to them again.
+ this->makeClosed();
+
+ // Loop over the batches that haven't yet generated their geometry
+ for (int i = 0; i < fRecordedBatches.count(); ++i) {
+ if (fRecordedBatches[i].fBatch) {
+ fRecordedBatches[i].fBatch->prepare(flushState);
+ }
+ }
+
+ if (fInstancedRendering) {
+ fInstancedRendering->beginFlush(flushState->resourceProvider());
+ }
+}
+
+bool GrDrawTarget::drawBatches(GrBatchFlushState* flushState) {
+ if (0 == fRecordedBatches.count()) {
+ return false;
+ }
+ // Draw all the generated geometry.
+ SkRandom random;
+ GrRenderTarget* currentRT = nullptr;
+ SkAutoTDelete<GrGpuCommandBuffer> commandBuffer;
+ SkRect bounds = SkRect::MakeEmpty();
+ for (int i = 0; i < fRecordedBatches.count(); ++i) {
+ if (!fRecordedBatches[i].fBatch) {
+ continue;
+ }
+ if (fRecordedBatches[i].fBatch->renderTarget() != currentRT) {
+ if (commandBuffer) {
+ commandBuffer->end();
+ if (bounds.intersect(0, 0,
+ SkIntToScalar(currentRT->width()),
+ SkIntToScalar(currentRT->height()))) {
+ SkIRect iBounds;
+ bounds.roundOut(&iBounds);
+ commandBuffer->submit(iBounds);
+ }
+ commandBuffer.reset();
+ }
+ bounds.setEmpty();
+ currentRT = fRecordedBatches[i].fBatch->renderTarget();
+ if (currentRT) {
+ static const GrGpuCommandBuffer::LoadAndStoreInfo kBasicLoadStoreInfo
+ { GrGpuCommandBuffer::LoadOp::kLoad,GrGpuCommandBuffer::StoreOp::kStore,
+ GrColor_ILLEGAL };
+ commandBuffer.reset(fGpu->createCommandBuffer(currentRT,
+ kBasicLoadStoreInfo, // Color
+ kBasicLoadStoreInfo)); // Stencil
+ }
+ flushState->setCommandBuffer(commandBuffer);
+ }
+ if (commandBuffer) {
+ bounds.join(fRecordedBatches[i].fClippedBounds);
+ }
+ if (fDrawBatchBounds) {
+ const SkRect& bounds = fRecordedBatches[i].fClippedBounds;
+ SkIRect ibounds;
+ bounds.roundOut(&ibounds);
+ // In multi-draw buffer all the batches use the same render target and we won't need to
+ // get the batchs bounds.
+ if (GrRenderTarget* rt = fRecordedBatches[i].fBatch->renderTarget()) {
+ fGpu->drawDebugWireRect(rt, ibounds, 0xFF000000 | random.nextU());
+ }
+ }
+ fRecordedBatches[i].fBatch->draw(flushState);
+ }
+ if (commandBuffer) {
+ commandBuffer->end();
+ if (bounds.intersect(0, 0,
+ SkIntToScalar(currentRT->width()),
+ SkIntToScalar(currentRT->height()))) {
+ SkIRect iBounds;
+ bounds.roundOut(&iBounds);
+ commandBuffer->submit(iBounds);
+ }
+ flushState->setCommandBuffer(nullptr);
+ }
+
+ fGpu->finishDrawTarget();
+ return true;
+}
+
+void GrDrawTarget::reset() {
+ fLastFullClearBatch = nullptr;
+ fRecordedBatches.reset();
+ if (fInstancedRendering) {
+ fInstancedRendering->endFlush();
+ }
+}
+
+static void batch_bounds(SkRect* bounds, const GrBatch* batch) {
+ *bounds = batch->bounds();
+ if (batch->hasZeroArea()) {
+ if (batch->hasAABloat()) {
+ bounds->outset(0.5f, 0.5f);
+ } else {
+ // We don't know which way the particular GPU will snap lines or points at integer
+ // coords. So we ensure that the bounds is large enough for either snap.
+ SkRect before = *bounds;
+ bounds->roundOut(bounds);
+ if (bounds->fLeft == before.fLeft) {
+ bounds->fLeft -= 1;
+ }
+ if (bounds->fTop == before.fTop) {
+ bounds->fTop -= 1;
+ }
+ if (bounds->fRight == before.fRight) {
+ bounds->fRight += 1;
+ }
+ if (bounds->fBottom == before.fBottom) {
+ bounds->fBottom += 1;
+ }
+ }
+ }
+}
+
+void GrDrawTarget::drawBatch(const GrPipelineBuilder& pipelineBuilder,
+ GrDrawContext* drawContext,
+ const GrClip& clip,
+ GrDrawBatch* batch) {
+ // Setup clip
+ SkRect bounds;
+ batch_bounds(&bounds, batch);
+ GrAppliedClip appliedClip(bounds);
+ if (!clip.apply(fContext, drawContext, pipelineBuilder.isHWAntialias(),
+ pipelineBuilder.hasUserStencilSettings(), &appliedClip)) {
+ return;
+ }
+
+ // TODO: this is the only remaining usage of the AutoRestoreFragmentProcessorState - remove it
+ GrPipelineBuilder::AutoRestoreFragmentProcessorState arfps;
+ if (appliedClip.clipCoverageFragmentProcessor()) {
+ arfps.set(&pipelineBuilder);
+ arfps.addCoverageFragmentProcessor(sk_ref_sp(appliedClip.clipCoverageFragmentProcessor()));
+ }
+
+ if (pipelineBuilder.hasUserStencilSettings() || appliedClip.hasStencilClip()) {
+ if (!fResourceProvider->attachStencilAttachment(drawContext->accessRenderTarget())) {
+ SkDebugf("ERROR creating stencil attachment. Draw skipped.\n");
+ return;
+ }
+ }
+
+ GrPipeline::CreateArgs args;
+ args.fPipelineBuilder = &pipelineBuilder;
+ args.fDrawContext = drawContext;
+ args.fCaps = this->caps();
+ batch->getPipelineOptimizations(&args.fOpts);
+ if (args.fOpts.fOverrides.fUsePLSDstRead || fClipBatchToBounds) {
+ GrGLIRect viewport;
+ viewport.fLeft = 0;
+ viewport.fBottom = 0;
+ viewport.fWidth = drawContext->width();
+ viewport.fHeight = drawContext->height();
+ SkIRect ibounds;
+ ibounds.fLeft = SkTPin(SkScalarFloorToInt(batch->bounds().fLeft), viewport.fLeft,
+ viewport.fWidth);
+ ibounds.fTop = SkTPin(SkScalarFloorToInt(batch->bounds().fTop), viewport.fBottom,
+ viewport.fHeight);
+ ibounds.fRight = SkTPin(SkScalarCeilToInt(batch->bounds().fRight), viewport.fLeft,
+ viewport.fWidth);
+ ibounds.fBottom = SkTPin(SkScalarCeilToInt(batch->bounds().fBottom), viewport.fBottom,
+ viewport.fHeight);
+ if (!appliedClip.addScissor(ibounds)) {
+ return;
+ }
+ }
+ args.fOpts.fColorPOI.completeCalculations(
+ sk_sp_address_as_pointer_address(pipelineBuilder.fColorFragmentProcessors.begin()),
+ pipelineBuilder.numColorFragmentProcessors());
+ args.fOpts.fCoveragePOI.completeCalculations(
+ sk_sp_address_as_pointer_address(pipelineBuilder.fCoverageFragmentProcessors.begin()),
+ pipelineBuilder.numCoverageFragmentProcessors());
+ args.fScissor = &appliedClip.scissorState();
+ args.fWindowRectsState = &appliedClip.windowRectsState();
+ args.fHasStencilClip = appliedClip.hasStencilClip();
+ if (!this->setupDstReadIfNecessary(pipelineBuilder, drawContext->accessRenderTarget(),
+ clip, args.fOpts,
+ &args.fDstTexture, batch->bounds())) {
+ return;
+ }
+
+ if (!batch->installPipeline(args)) {
+ return;
+ }
+
+#ifdef ENABLE_MDB
+ SkASSERT(fRenderTarget);
+ batch->pipeline()->addDependenciesTo(fRenderTarget);
+#endif
+ this->recordBatch(batch, appliedClip.clippedDrawBounds());
+}
+
+void GrDrawTarget::stencilPath(GrDrawContext* drawContext,
+ const GrClip& clip,
+ bool useHWAA,
+ const SkMatrix& viewMatrix,
+ const GrPath* path) {
+ // TODO: extract portions of checkDraw that are relevant to path stenciling.
+ SkASSERT(path);
+ SkASSERT(this->caps()->shaderCaps()->pathRenderingSupport());
+
+ // FIXME: Use path bounds instead of this WAR once
+ // https://bugs.chromium.org/p/skia/issues/detail?id=5640 is resolved.
+ SkRect bounds = SkRect::MakeIWH(drawContext->width(), drawContext->height());
+
+ // Setup clip
+ GrAppliedClip appliedClip(bounds);
+ if (!clip.apply(fContext, drawContext, useHWAA, true, &appliedClip)) {
+ return;
+ }
+ // TODO: respect fClipBatchToBounds if we ever start computing bounds here.
+
+ // Coverage AA does not make sense when rendering to the stencil buffer. The caller should never
+ // attempt this in a situation that would require coverage AA.
+ SkASSERT(!appliedClip.clipCoverageFragmentProcessor());
+
+ GrStencilAttachment* stencilAttachment = fResourceProvider->attachStencilAttachment(
+ drawContext->accessRenderTarget());
+ if (!stencilAttachment) {
+ SkDebugf("ERROR creating stencil attachment. Draw skipped.\n");
+ return;
+ }
+
+ GrBatch* batch = GrStencilPathBatch::Create(viewMatrix,
+ useHWAA,
+ path->getFillType(),
+ appliedClip.hasStencilClip(),
+ stencilAttachment->bits(),
+ appliedClip.scissorState(),
+ drawContext->accessRenderTarget(),
+ path);
+ this->recordBatch(batch, appliedClip.clippedDrawBounds());
+ batch->unref();
+}
+
+void GrDrawTarget::addBatch(sk_sp<GrBatch> batch) {
+ this->recordBatch(batch.get(), batch->bounds());
+}
+
+void GrDrawTarget::fullClear(GrRenderTarget* renderTarget, GrColor color) {
+ // Currently this just inserts or updates the last clear batch. However, once in MDB this can
+ // remove all the previously recorded batches and change the load op to clear with supplied
+ // color.
+ if (fLastFullClearBatch &&
+ fLastFullClearBatch->renderTargetUniqueID() == renderTarget->uniqueID()) {
+ // As currently implemented, fLastFullClearBatch should be the last batch because we would
+ // have cleared it when another batch was recorded.
+ SkASSERT(fRecordedBatches.back().fBatch.get() == fLastFullClearBatch);
+ fLastFullClearBatch->setColor(color);
+ return;
+ }
+ sk_sp<GrClearBatch> batch(GrClearBatch::Make(GrFixedClip::Disabled(), color, renderTarget));
+ if (batch.get() == this->recordBatch(batch.get(), batch->bounds())) {
+ fLastFullClearBatch = batch.get();
+ }
+}
+
+void GrDrawTarget::discard(GrRenderTarget* renderTarget) {
+ // Currently this just inserts a discard batch. However, once in MDB this can remove all the
+ // previously recorded batches and change the load op to discard.
+ if (this->caps()->discardRenderTargetSupport()) {
+ GrBatch* batch = new GrDiscardBatch(renderTarget);
+ this->recordBatch(batch, batch->bounds());
+ batch->unref();
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool GrDrawTarget::copySurface(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ GrBatch* batch = GrCopySurfaceBatch::Create(dst, src, srcRect, dstPoint);
+ if (!batch) {
+ return false;
+ }
+#ifdef ENABLE_MDB
+ this->addDependency(src);
+#endif
+
+ this->recordBatch(batch, batch->bounds());
+ batch->unref();
+ return true;
+}
+
+static inline bool can_reorder(const SkRect& a, const SkRect& b) {
+ return a.fRight <= b.fLeft || a.fBottom <= b.fTop ||
+ b.fRight <= a.fLeft || b.fBottom <= a.fTop;
+}
+
+static void join(SkRect* out, const SkRect& a, const SkRect& b) {
+ SkASSERT(a.fLeft <= a.fRight && a.fTop <= a.fBottom);
+ SkASSERT(b.fLeft <= b.fRight && b.fTop <= b.fBottom);
+ out->fLeft = SkTMin(a.fLeft, b.fLeft);
+ out->fTop = SkTMin(a.fTop, b.fTop);
+ out->fRight = SkTMax(a.fRight, b.fRight);
+ out->fBottom = SkTMax(a.fBottom, b.fBottom);
+}
+
+GrBatch* GrDrawTarget::recordBatch(GrBatch* batch, const SkRect& clippedBounds) {
+ // A closed drawTarget should never receive new/more batches
+ SkASSERT(!this->isClosed());
+
+ // Check if there is a Batch Draw we can batch with by linearly searching back until we either
+ // 1) check every draw
+ // 2) intersect with something
+ // 3) find a 'blocker'
+ GR_AUDIT_TRAIL_ADDBATCH(fAuditTrail, batch);
+ GrBATCH_INFO("Re-Recording (%s, B%u)\n"
+ "\tBounds LRTB (%f, %f, %f, %f)\n",
+ batch->name(),
+ batch->uniqueID(),
+ batch->bounds().fLeft, batch->bounds().fRight,
+ batch->bounds().fTop, batch->bounds().fBottom);
+ GrBATCH_INFO(SkTabString(batch->dumpInfo(), 1).c_str());
+ GrBATCH_INFO("\tClipped Bounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
+ clippedBounds.fLeft, clippedBounds.fTop, clippedBounds.fRight,
+ clippedBounds.fBottom);
+ GrBATCH_INFO("\tOutcome:\n");
+ int maxCandidates = SkTMin(fMaxBatchLookback, fRecordedBatches.count());
+ if (maxCandidates) {
+ int i = 0;
+ while (true) {
+ GrBatch* candidate = fRecordedBatches.fromBack(i).fBatch.get();
+ // We cannot continue to search backwards if the render target changes
+ if (candidate->renderTargetUniqueID() != batch->renderTargetUniqueID()) {
+ GrBATCH_INFO("\t\tBreaking because of (%s, B%u) Rendertarget\n",
+ candidate->name(), candidate->uniqueID());
+ break;
+ }
+ if (candidate->combineIfPossible(batch, *this->caps())) {
+ GrBATCH_INFO("\t\tCombining with (%s, B%u)\n", candidate->name(),
+ candidate->uniqueID());
+ GR_AUDIT_TRAIL_BATCHING_RESULT_COMBINED(fAuditTrail, candidate, batch);
+ join(&fRecordedBatches.fromBack(i).fClippedBounds,
+ fRecordedBatches.fromBack(i).fClippedBounds, clippedBounds);
+ return candidate;
+ }
+ // Stop going backwards if we would cause a painter's order violation.
+ const SkRect& candidateBounds = fRecordedBatches.fromBack(i).fClippedBounds;
+ if (!can_reorder(candidateBounds, clippedBounds)) {
+ GrBATCH_INFO("\t\tIntersects with (%s, B%u)\n", candidate->name(),
+ candidate->uniqueID());
+ break;
+ }
+ ++i;
+ if (i == maxCandidates) {
+ GrBATCH_INFO("\t\tReached max lookback or beginning of batch array %d\n", i);
+ break;
+ }
+ }
+ } else {
+ GrBATCH_INFO("\t\tFirstBatch\n");
+ }
+ GR_AUDIT_TRAIL_BATCHING_RESULT_NEW(fAuditTrail, batch);
+ fRecordedBatches.emplace_back(RecordedBatch{sk_ref_sp(batch), clippedBounds});
+ fLastFullClearBatch = nullptr;
+ return batch;
+}
+
+void GrDrawTarget::forwardCombine() {
+ if (fMaxBatchLookahead <= 0) {
+ return;
+ }
+ for (int i = 0; i < fRecordedBatches.count() - 2; ++i) {
+ GrBatch* batch = fRecordedBatches[i].fBatch.get();
+ const SkRect& batchBounds = fRecordedBatches[i].fClippedBounds;
+ int maxCandidateIdx = SkTMin(i + fMaxBatchLookahead, fRecordedBatches.count() - 1);
+ int j = i + 1;
+ while (true) {
+ GrBatch* candidate = fRecordedBatches[j].fBatch.get();
+ // We cannot continue to search if the render target changes
+ if (candidate->renderTargetUniqueID() != batch->renderTargetUniqueID()) {
+ GrBATCH_INFO("\t\tBreaking because of (%s, B%u) Rendertarget\n",
+ candidate->name(), candidate->uniqueID());
+ break;
+ }
+ if (j == i +1) {
+ // We assume batch would have combined with candidate when the candidate was added
+ // via backwards combining in recordBatch.
+ SkASSERT(!batch->combineIfPossible(candidate, *this->caps()));
+ } else if (batch->combineIfPossible(candidate, *this->caps())) {
+ GrBATCH_INFO("\t\tCombining with (%s, B%u)\n", candidate->name(),
+ candidate->uniqueID());
+ GR_AUDIT_TRAIL_BATCHING_RESULT_COMBINED(fAuditTrail, batch, candidate);
+ fRecordedBatches[j].fBatch = std::move(fRecordedBatches[i].fBatch);
+ join(&fRecordedBatches[j].fClippedBounds, fRecordedBatches[j].fClippedBounds,
+ batchBounds);
+ break;
+ }
+ // Stop going traversing if we would cause a painter's order violation.
+ const SkRect& candidateBounds = fRecordedBatches[j].fClippedBounds;
+ if (!can_reorder(candidateBounds, batchBounds)) {
+ GrBATCH_INFO("\t\tIntersects with (%s, B%u)\n", candidate->name(),
+ candidate->uniqueID());
+ break;
+ }
+ ++j;
+ if (j > maxCandidateIdx) {
+ GrBATCH_INFO("\t\tReached max lookahead or end of batch array %d\n", i);
+ break;
+ }
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrDrawTarget::clearStencilClip(const GrFixedClip& clip,
+ bool insideStencilMask,
+ GrRenderTarget* rt) {
+ GrBatch* batch = new GrClearStencilClipBatch(clip, insideStencilMask, rt);
+ this->recordBatch(batch, batch->bounds());
+ batch->unref();
+}
diff --git a/gfx/skia/skia/src/gpu/GrDrawTarget.h b/gfx/skia/skia/src/gpu/GrDrawTarget.h
new file mode 100644
index 000000000..35de23915
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDrawTarget.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDrawTarget_DEFINED
+#define GrDrawTarget_DEFINED
+
+#include "GrClip.h"
+#include "GrContext.h"
+#include "GrPathProcessor.h"
+#include "GrPrimitiveProcessor.h"
+#include "GrPathRendering.h"
+#include "GrXferProcessor.h"
+
+#include "batches/GrDrawBatch.h"
+
+#include "SkClipStack.h"
+#include "SkMatrix.h"
+#include "SkPath.h"
+#include "SkStringUtils.h"
+#include "SkStrokeRec.h"
+#include "SkTArray.h"
+#include "SkTLazy.h"
+#include "SkTypes.h"
+#include "SkXfermode.h"
+
+//#define ENABLE_MDB 1
+
+class GrAuditTrail;
+class GrBatch;
+class GrClearBatch;
+class GrClip;
+class GrCaps;
+class GrPath;
+class GrDrawPathBatchBase;
+class GrPipelineBuilder;
+
+class GrDrawTarget final : public SkRefCnt {
+public:
+ /** Options for GrDrawTarget behavior. */
+ struct Options {
+ Options ()
+ : fClipBatchToBounds(false)
+ , fDrawBatchBounds(false)
+ , fMaxBatchLookback(-1)
+ , fMaxBatchLookahead(-1) {}
+ bool fClipBatchToBounds;
+ bool fDrawBatchBounds;
+ int fMaxBatchLookback;
+ int fMaxBatchLookahead;
+ };
+
+ GrDrawTarget(GrRenderTarget*, GrGpu*, GrResourceProvider*, GrAuditTrail*, const Options&);
+
+ ~GrDrawTarget() override;
+
+ void makeClosed() {
+ fLastFullClearBatch = nullptr;
+ // We only close drawTargets When MDB is enabled. When MDB is disabled there is only
+ // ever one drawTarget and all calls will be funnelled into it.
+#ifdef ENABLE_MDB
+ this->setFlag(kClosed_Flag);
+#endif
+ this->forwardCombine();
+ }
+
+ bool isClosed() const { return this->isSetFlag(kClosed_Flag); }
+
+ // TODO: this entry point is only needed in the non-MDB world. Remove when
+ // we make the switch to MDB
+ void clearRT() { fRenderTarget = nullptr; }
+
+ /*
+ * Notify this drawTarget that it relies on the contents of 'dependedOn'
+ */
+ void addDependency(GrSurface* dependedOn);
+
+ /*
+ * Does this drawTarget depend on 'dependedOn'?
+ */
+ bool dependsOn(GrDrawTarget* dependedOn) const {
+ return fDependencies.find(dependedOn) >= 0;
+ }
+
+ /*
+ * Dump out the drawTarget dependency DAG
+ */
+ SkDEBUGCODE(void dump() const;)
+
+ /**
+ * Empties the draw buffer of any queued up draws.
+ */
+ void reset();
+
+ /**
+ * Together these two functions flush all queued up draws to GrCommandBuffer. The return value
+ * of drawBatches() indicates whether any commands were actually issued to the GPU.
+ */
+ void prepareBatches(GrBatchFlushState* flushState);
+ bool drawBatches(GrBatchFlushState* flushState);
+
+ /**
+ * Gets the capabilities of the draw target.
+ */
+ const GrCaps* caps() const { return fGpu->caps(); }
+
+ void drawBatch(const GrPipelineBuilder&, GrDrawContext*, const GrClip&, GrDrawBatch*);
+
+ void addBatch(sk_sp<GrBatch>);
+
+ /**
+ * Draws the path into user stencil bits. Upon return, all user stencil values
+ * inside the path will be nonzero. The path's fill must be either even/odd or
+ * winding (notnverse or hairline).It will respect the HW antialias boolean (if
+ * possible in the 3D API). Note, we will never have an inverse fill with
+ * stencil path.
+ */
+ void stencilPath(GrDrawContext*,
+ const GrClip&,
+ bool useHWAA,
+ const SkMatrix& viewMatrix,
+ const GrPath*);
+
+ /** Clears the entire render target */
+ void fullClear(GrRenderTarget*, GrColor color);
+
+ /** Discards the contents render target. */
+ void discard(GrRenderTarget*);
+
+ /**
+ * Copies a pixel rectangle from one surface to another. This call may finalize
+ * reserved vertex/index data (as though a draw call was made). The src pixels
+ * copied are specified by srcRect. They are copied to a rect of the same
+ * size in dst with top left at dstPoint. If the src rect is clipped by the
+ * src bounds then pixel values in the dst rect corresponding to area clipped
+ * by the src rect are not overwritten. This method is not guaranteed to succeed
+ * depending on the type of surface, configs, etc, and the backend-specific
+ * limitations.
+ */
+ bool copySurface(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ gr_instanced::InstancedRendering* instancedRendering() const {
+ SkASSERT(fInstancedRendering);
+ return fInstancedRendering;
+ }
+
+private:
+ friend class GrDrawingManager; // for resetFlag & TopoSortTraits
+ friend class GrDrawContextPriv; // for clearStencilClip
+
+ enum Flags {
+ kClosed_Flag = 0x01, //!< This drawTarget can't accept any more batches
+
+ kWasOutput_Flag = 0x02, //!< Flag for topological sorting
+ kTempMark_Flag = 0x04, //!< Flag for topological sorting
+ };
+
+ void setFlag(uint32_t flag) {
+ fFlags |= flag;
+ }
+
+ void resetFlag(uint32_t flag) {
+ fFlags &= ~flag;
+ }
+
+ bool isSetFlag(uint32_t flag) const {
+ return SkToBool(fFlags & flag);
+ }
+
+ struct TopoSortTraits {
+ static void Output(GrDrawTarget* dt, int /* index */) {
+ dt->setFlag(GrDrawTarget::kWasOutput_Flag);
+ }
+ static bool WasOutput(const GrDrawTarget* dt) {
+ return dt->isSetFlag(GrDrawTarget::kWasOutput_Flag);
+ }
+ static void SetTempMark(GrDrawTarget* dt) {
+ dt->setFlag(GrDrawTarget::kTempMark_Flag);
+ }
+ static void ResetTempMark(GrDrawTarget* dt) {
+ dt->resetFlag(GrDrawTarget::kTempMark_Flag);
+ }
+ static bool IsTempMarked(const GrDrawTarget* dt) {
+ return dt->isSetFlag(GrDrawTarget::kTempMark_Flag);
+ }
+ static int NumDependencies(const GrDrawTarget* dt) {
+ return dt->fDependencies.count();
+ }
+ static GrDrawTarget* Dependency(GrDrawTarget* dt, int index) {
+ return dt->fDependencies[index];
+ }
+ };
+
+ // Returns the batch that the input batch was combined with or the input batch if it wasn't
+ // combined.
+ GrBatch* recordBatch(GrBatch*, const SkRect& clippedBounds);
+ void forwardCombine();
+
+ // Makes a copy of the dst if it is necessary for the draw. Returns false if a copy is required
+ // but couldn't be made. Otherwise, returns true. This method needs to be protected because it
+ // needs to be accessed by GLPrograms to setup a correct drawstate
+ bool setupDstReadIfNecessary(const GrPipelineBuilder&,
+ GrRenderTarget*,
+ const GrClip&,
+ const GrPipelineOptimizations& optimizations,
+ GrXferProcessor::DstTexture*,
+ const SkRect& batchBounds);
+
+ void addDependency(GrDrawTarget* dependedOn);
+
+ // Used only by drawContextPriv.
+ void clearStencilClip(const GrFixedClip&, bool insideStencilMask, GrRenderTarget*);
+
+ struct RecordedBatch {
+ sk_sp<GrBatch> fBatch;
+ SkRect fClippedBounds;
+ };
+ SkSTArray<256, RecordedBatch, true> fRecordedBatches;
+ GrClearBatch* fLastFullClearBatch;
+ // The context is only in service of the GrClip, remove once it doesn't need this.
+ GrContext* fContext;
+ GrGpu* fGpu;
+ GrResourceProvider* fResourceProvider;
+ GrAuditTrail* fAuditTrail;
+
+ SkDEBUGCODE(int fDebugID;)
+ uint32_t fFlags;
+
+ // 'this' drawTarget relies on the output of the drawTargets in 'fDependencies'
+ SkTDArray<GrDrawTarget*> fDependencies;
+ GrRenderTarget* fRenderTarget;
+
+ bool fClipBatchToBounds;
+ bool fDrawBatchBounds;
+ int fMaxBatchLookback;
+ int fMaxBatchLookahead;
+
+ SkAutoTDelete<gr_instanced::InstancedRendering> fInstancedRendering;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrDrawingManager.cpp b/gfx/skia/skia/src/gpu/GrDrawingManager.cpp
new file mode 100644
index 000000000..5e9b68d88
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDrawingManager.cpp
@@ -0,0 +1,251 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrDrawingManager.h"
+
+#include "GrContext.h"
+#include "GrDrawContext.h"
+#include "GrDrawTarget.h"
+#include "GrPathRenderingDrawContext.h"
+#include "GrResourceProvider.h"
+#include "GrSoftwarePathRenderer.h"
+#include "GrSurfacePriv.h"
+#include "SkSurface_Gpu.h"
+#include "SkTTopoSort.h"
+
+#include "instanced/InstancedRendering.h"
+
+#include "text/GrAtlasTextContext.h"
+#include "text/GrStencilAndCoverTextContext.h"
+
+using gr_instanced::InstancedRendering;
+
+void GrDrawingManager::cleanup() {
+ for (int i = 0; i < fDrawTargets.count(); ++i) {
+ fDrawTargets[i]->makeClosed(); // no drawTarget should receive a new command after this
+ fDrawTargets[i]->clearRT();
+
+ // We shouldn't need to do this, but it turns out some clients still hold onto drawtargets
+ // after a cleanup
+ fDrawTargets[i]->reset();
+ fDrawTargets[i]->unref();
+ }
+
+ fDrawTargets.reset();
+
+ delete fPathRendererChain;
+ fPathRendererChain = nullptr;
+ SkSafeSetNull(fSoftwarePathRenderer);
+}
+
+GrDrawingManager::~GrDrawingManager() {
+ this->cleanup();
+}
+
+void GrDrawingManager::abandon() {
+ fAbandoned = true;
+ for (int i = 0; i < fDrawTargets.count(); ++i) {
+ if (GrCaps::InstancedSupport::kNone != fContext->caps()->instancedSupport()) {
+ InstancedRendering* ir = fDrawTargets[i]->instancedRendering();
+ ir->resetGpuResources(InstancedRendering::ResetType::kAbandon);
+ }
+ }
+ this->cleanup();
+}
+
+void GrDrawingManager::freeGpuResources() {
+ // a path renderer may be holding onto resources
+ delete fPathRendererChain;
+ fPathRendererChain = nullptr;
+ SkSafeSetNull(fSoftwarePathRenderer);
+ for (int i = 0; i < fDrawTargets.count(); ++i) {
+ if (GrCaps::InstancedSupport::kNone != fContext->caps()->instancedSupport()) {
+ InstancedRendering* ir = fDrawTargets[i]->instancedRendering();
+ ir->resetGpuResources(InstancedRendering::ResetType::kDestroy);
+ }
+ }
+}
+
+void GrDrawingManager::reset() {
+ for (int i = 0; i < fDrawTargets.count(); ++i) {
+ fDrawTargets[i]->reset();
+ }
+ fFlushState.reset();
+}
+
+void GrDrawingManager::internalFlush(GrResourceCache::FlushType type) {
+ if (fFlushing || this->wasAbandoned()) {
+ return;
+ }
+ fFlushing = true;
+ bool flushed = false;
+ SkDEBUGCODE(bool result =)
+ SkTTopoSort<GrDrawTarget, GrDrawTarget::TopoSortTraits>(&fDrawTargets);
+ SkASSERT(result);
+
+ for (int i = 0; i < fDrawTargets.count(); ++i) {
+ fDrawTargets[i]->prepareBatches(&fFlushState);
+ }
+
+ // Enable this to print out verbose batching information
+#if 0
+ for (int i = 0; i < fDrawTargets.count(); ++i) {
+ SkDEBUGCODE(fDrawTargets[i]->dump();)
+ }
+#endif
+
+ // Upload all data to the GPU
+ fFlushState.preIssueDraws();
+
+ for (int i = 0; i < fDrawTargets.count(); ++i) {
+ if (fDrawTargets[i]->drawBatches(&fFlushState)) {
+ flushed = true;
+ }
+ }
+
+ SkASSERT(fFlushState.nextDrawToken() == fFlushState.nextTokenToFlush());
+
+ for (int i = 0; i < fDrawTargets.count(); ++i) {
+ fDrawTargets[i]->reset();
+#ifdef ENABLE_MDB
+ fDrawTargets[i]->unref();
+#endif
+ }
+
+#ifndef ENABLE_MDB
+ // When MDB is disabled we keep reusing the same drawTarget
+ if (fDrawTargets.count()) {
+ SkASSERT(fDrawTargets.count() == 1);
+ // Clear out this flag so the topological sort's SkTTopoSort_CheckAllUnmarked check
+ // won't bark
+ fDrawTargets[0]->resetFlag(GrDrawTarget::kWasOutput_Flag);
+ }
+#else
+ fDrawTargets.reset();
+#endif
+
+ fFlushState.reset();
+ // We always have to notify the cache when it requested a flush so it can reset its state.
+ if (flushed || type == GrResourceCache::FlushType::kCacheRequested) {
+ fContext->getResourceCache()->notifyFlushOccurred(type);
+ }
+ fFlushing = false;
+}
+
+void GrDrawingManager::prepareSurfaceForExternalIO(GrSurface* surface) {
+ if (this->wasAbandoned()) {
+ return;
+ }
+ SkASSERT(surface);
+ SkASSERT(surface->getContext() == fContext);
+
+ if (surface->surfacePriv().hasPendingIO()) {
+ this->flush();
+ }
+
+ GrRenderTarget* rt = surface->asRenderTarget();
+ if (fContext->getGpu() && rt) {
+ fContext->getGpu()->resolveRenderTarget(rt);
+ }
+}
+
+GrDrawTarget* GrDrawingManager::newDrawTarget(GrRenderTarget* rt) {
+ SkASSERT(fContext);
+
+#ifndef ENABLE_MDB
+ // When MDB is disabled we always just return the single drawTarget
+ if (fDrawTargets.count()) {
+ SkASSERT(fDrawTargets.count() == 1);
+ // In the non-MDB-world the same drawTarget gets reused for multiple render targets.
+ // Update this pointer so all the asserts are happy
+ rt->setLastDrawTarget(fDrawTargets[0]);
+ // DrawingManager gets the creation ref - this ref is for the caller
+ return SkRef(fDrawTargets[0]);
+ }
+#endif
+
+ GrDrawTarget* dt = new GrDrawTarget(rt, fContext->getGpu(), fContext->resourceProvider(),
+ fContext->getAuditTrail(), fOptionsForDrawTargets);
+
+ *fDrawTargets.append() = dt;
+
+ // DrawingManager gets the creation ref - this ref is for the caller
+ return SkRef(dt);
+}
+
+GrAtlasTextContext* GrDrawingManager::getAtlasTextContext() {
+ if (!fAtlasTextContext) {
+ fAtlasTextContext.reset(GrAtlasTextContext::Create());
+ }
+
+ return fAtlasTextContext.get();
+}
+
+/*
+ * This method finds a path renderer that can draw the specified path on
+ * the provided target.
+ * Due to its expense, the software path renderer has split out so it can
+ * can be individually allowed/disallowed via the "allowSW" boolean.
+ */
+GrPathRenderer* GrDrawingManager::getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args,
+ bool allowSW,
+ GrPathRendererChain::DrawType drawType,
+ GrPathRenderer::StencilSupport* stencilSupport) {
+
+ if (!fPathRendererChain) {
+ fPathRendererChain = new GrPathRendererChain(fContext, fOptionsForPathRendererChain);
+ }
+
+ GrPathRenderer* pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport);
+ if (!pr && allowSW) {
+ if (!fSoftwarePathRenderer) {
+ fSoftwarePathRenderer =
+ new GrSoftwarePathRenderer(fContext->textureProvider(),
+ fOptionsForPathRendererChain.fAllowPathMaskCaching);
+ }
+ pr = fSoftwarePathRenderer;
+ }
+
+ return pr;
+}
+
+sk_sp<GrDrawContext> GrDrawingManager::makeDrawContext(sk_sp<GrRenderTarget> rt,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps) {
+ if (this->wasAbandoned()) {
+ return nullptr;
+ }
+
+ // SkSurface catches bad color space usage at creation. This check handles anything that slips
+ // by, including internal usage. We allow a null color space here, for read/write pixels and
+ // other special code paths. If a color space is provided, though, enforce all other rules.
+ if (colorSpace && !SkSurface_Gpu::Valid(fContext, rt->config(), colorSpace.get())) {
+ SkDEBUGFAIL("Invalid config and colorspace combination");
+ return nullptr;
+ }
+
+ bool useDIF = false;
+ if (surfaceProps) {
+ useDIF = surfaceProps->isUseDeviceIndependentFonts();
+ }
+
+ if (useDIF && fContext->caps()->shaderCaps()->pathRenderingSupport() &&
+ rt->isStencilBufferMultisampled()) {
+ GrStencilAttachment* sb = fContext->resourceProvider()->attachStencilAttachment(rt.get());
+ if (sb) {
+ return sk_sp<GrDrawContext>(new GrPathRenderingDrawContext(
+ fContext, this, std::move(rt),
+ std::move(colorSpace), surfaceProps,
+ fContext->getAuditTrail(), fSingleOwner));
+ }
+ }
+
+ return sk_sp<GrDrawContext>(new GrDrawContext(fContext, this, std::move(rt),
+ std::move(colorSpace), surfaceProps,
+ fContext->getAuditTrail(),
+ fSingleOwner));
+}
diff --git a/gfx/skia/skia/src/gpu/GrDrawingManager.h b/gfx/skia/skia/src/gpu/GrDrawingManager.h
new file mode 100644
index 000000000..9fced3816
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrDrawingManager.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDrawingManager_DEFINED
+#define GrDrawingManager_DEFINED
+
+#include "text/GrAtlasTextContext.h"
+#include "GrDrawTarget.h"
+#include "GrBatchFlushState.h"
+#include "GrPathRendererChain.h"
+#include "GrPathRenderer.h"
+#include "GrResourceCache.h"
+#include "SkTDArray.h"
+
+class GrContext;
+class GrDrawContext;
+class GrSingleOWner;
+class GrSoftwarePathRenderer;
+
+// The GrDrawingManager allocates a new GrDrawContext for each GrRenderTarget
+// but all of them still land in the same GrDrawTarget!
+//
+// In the future this class will allocate a new GrDrawContext for
+// each GrRenderTarget/GrDrawTarget and manage the DAG.
+class GrDrawingManager {
+public:
+ ~GrDrawingManager();
+
+ bool wasAbandoned() const { return fAbandoned; }
+ void freeGpuResources();
+
+ sk_sp<GrDrawContext> makeDrawContext(sk_sp<GrRenderTarget> rt,
+ sk_sp<SkColorSpace>,
+ const SkSurfaceProps*);
+
+ // The caller automatically gets a ref on the returned drawTarget. It must
+ // be balanced by an unref call.
+ GrDrawTarget* newDrawTarget(GrRenderTarget* rt);
+
+ GrContext* getContext() { return fContext; }
+
+ GrAtlasTextContext* getAtlasTextContext();
+
+ GrPathRenderer* getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args,
+ bool allowSW,
+ GrPathRendererChain::DrawType drawType,
+ GrPathRenderer::StencilSupport* stencilSupport = NULL);
+
+ void flushIfNecessary() {
+ if (fContext->getResourceCache()->requestsFlush()) {
+ this->internalFlush(GrResourceCache::kCacheRequested);
+ } else if (fIsImmediateMode) {
+ this->internalFlush(GrResourceCache::kImmediateMode);
+ }
+ }
+
+ static bool ProgramUnitTest(GrContext* context, int maxStages);
+
+ void prepareSurfaceForExternalIO(GrSurface*);
+
+private:
+ GrDrawingManager(GrContext* context, const GrDrawTarget::Options& optionsForDrawTargets,
+ const GrPathRendererChain::Options& optionsForPathRendererChain,
+ bool isImmediateMode, GrSingleOwner* singleOwner)
+ : fContext(context)
+ , fOptionsForDrawTargets(optionsForDrawTargets)
+ , fOptionsForPathRendererChain(optionsForPathRendererChain)
+ , fSingleOwner(singleOwner)
+ , fAbandoned(false)
+ , fAtlasTextContext(nullptr)
+ , fPathRendererChain(nullptr)
+ , fSoftwarePathRenderer(nullptr)
+ , fFlushState(context->getGpu(), context->resourceProvider())
+ , fFlushing(false)
+ , fIsImmediateMode(isImmediateMode) {
+ }
+
+ void abandon();
+ void cleanup();
+ void reset();
+ void flush() { this->internalFlush(GrResourceCache::FlushType::kExternal); }
+ void internalFlush(GrResourceCache::FlushType);
+
+ friend class GrContext; // for access to: ctor, abandon, reset & flush
+
+ static const int kNumPixelGeometries = 5; // The different pixel geometries
+ static const int kNumDFTOptions = 2; // DFT or no DFT
+
+ GrContext* fContext;
+ GrDrawTarget::Options fOptionsForDrawTargets;
+ GrPathRendererChain::Options fOptionsForPathRendererChain;
+
+ // In debug builds we guard against improper thread handling
+ GrSingleOwner* fSingleOwner;
+
+ bool fAbandoned;
+ SkTDArray<GrDrawTarget*> fDrawTargets;
+
+ SkAutoTDelete<GrAtlasTextContext> fAtlasTextContext;
+
+ GrPathRendererChain* fPathRendererChain;
+ GrSoftwarePathRenderer* fSoftwarePathRenderer;
+
+ GrBatchFlushState fFlushState;
+ bool fFlushing;
+
+ bool fIsImmediateMode;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrFixedClip.cpp b/gfx/skia/skia/src/gpu/GrFixedClip.cpp
new file mode 100644
index 000000000..7385028ba
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrFixedClip.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrFixedClip.h"
+
+#include "GrAppliedClip.h"
+#include "GrDrawContext.h"
+
+bool GrFixedClip::quickContains(const SkRect& rect) const {
+ if (fWindowRectsState.enabled()) {
+ return false;
+ }
+ return !fScissorState.enabled() || GrClip::IsInsideClip(fScissorState.rect(), rect);
+}
+
+void GrFixedClip::getConservativeBounds(int w, int h, SkIRect* devResult, bool* iior) const {
+ devResult->setXYWH(0, 0, w, h);
+ if (fScissorState.enabled()) {
+ if (!devResult->intersect(fScissorState.rect())) {
+ devResult->setEmpty();
+ }
+ }
+ if (iior) {
+ *iior = true;
+ }
+}
+
+bool GrFixedClip::isRRect(const SkRect& rtBounds, SkRRect* rr, bool* aa) const {
+ if (fWindowRectsState.enabled()) {
+ return false;
+ }
+ if (fScissorState.enabled()) {
+ SkRect rect = SkRect::Make(fScissorState.rect());
+ if (!rect.intersects(rtBounds)) {
+ return false;
+ }
+ rr->setRect(rect);
+ *aa = false;
+ return true;
+ }
+ return false;
+};
+
+bool GrFixedClip::apply(GrContext*, GrDrawContext* dc, bool, bool, GrAppliedClip* out) const {
+ if (fScissorState.enabled()) {
+ SkIRect tightScissor = SkIRect::MakeWH(dc->width(), dc->height());
+ if (!tightScissor.intersect(fScissorState.rect())) {
+ return false;
+ }
+ if (IsOutsideClip(tightScissor, out->clippedDrawBounds())) {
+ return false;
+ }
+ if (!IsInsideClip(fScissorState.rect(), out->clippedDrawBounds())) {
+ out->addScissor(tightScissor);
+ }
+ }
+
+ if (fWindowRectsState.enabled()) {
+ out->addWindowRectangles(fWindowRectsState);
+ }
+
+ return true;
+}
+
+const GrFixedClip& GrFixedClip::Disabled() {
+ static const GrFixedClip disabled = GrFixedClip();
+ return disabled;
+}
diff --git a/gfx/skia/skia/src/gpu/GrFixedClip.h b/gfx/skia/skia/src/gpu/GrFixedClip.h
new file mode 100644
index 000000000..6fb7d23ed
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrFixedClip.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrFixedClip_DEFINED
+#define GrFixedClip_DEFINED
+
+#include "GrClip.h"
+#include "GrScissorState.h"
+#include "GrWindowRectsState.h"
+
+/**
+ * GrFixedClip is a clip that gets implemented by fixed-function hardware.
+ */
+class GrFixedClip final : public GrClip {
+public:
+ GrFixedClip() = default;
+ explicit GrFixedClip(const SkIRect& scissorRect) : fScissorState(scissorRect) {}
+
+ const GrScissorState& scissorState() const { return fScissorState; }
+ bool scissorEnabled() const { return fScissorState.enabled(); }
+ const SkIRect& scissorRect() const { SkASSERT(scissorEnabled()); return fScissorState.rect(); }
+
+ void disableScissor() { fScissorState.setDisabled(); }
+
+ bool SK_WARN_UNUSED_RESULT intersect(const SkIRect& irect) {
+ return fScissorState.intersect(irect);
+ }
+
+ const GrWindowRectsState& windowRectsState() const { return fWindowRectsState; }
+ bool hasWindowRectangles() const { return fWindowRectsState.enabled(); }
+
+ void disableWindowRectangles() { fWindowRectsState.setDisabled(); }
+
+ void setWindowRectangles(const GrWindowRectangles& windows, const SkIPoint& origin,
+ GrWindowRectsState::Mode mode) {
+ fWindowRectsState.set(windows, origin, mode);
+ }
+
+ bool quickContains(const SkRect&) const override;
+ void getConservativeBounds(int w, int h, SkIRect* devResult, bool* iior) const override;
+ bool isRRect(const SkRect& rtBounds, SkRRect* rr, bool* aa) const override;
+ bool apply(GrContext*, GrDrawContext*, bool, bool, GrAppliedClip* out) const override;
+
+ static const GrFixedClip& Disabled();
+
+private:
+ GrScissorState fScissorState;
+ GrWindowRectsState fWindowRectsState;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrFragmentProcessor.cpp b/gfx/skia/skia/src/gpu/GrFragmentProcessor.cpp
new file mode 100644
index 000000000..123792d25
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrFragmentProcessor.cpp
@@ -0,0 +1,406 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrFragmentProcessor.h"
+#include "GrCoordTransform.h"
+#include "GrInvariantOutput.h"
+#include "GrPipeline.h"
+#include "GrProcOptInfo.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "effects/GrConstColorProcessor.h"
+#include "effects/GrXfermodeFragmentProcessor.h"
+
+GrFragmentProcessor::~GrFragmentProcessor() {
+ // If we got here then our ref count must have reached zero, so we will have converted refs
+ // to pending executions for all children.
+ for (int i = 0; i < fChildProcessors.count(); ++i) {
+ fChildProcessors[i]->completedExecution();
+ }
+}
+
+bool GrFragmentProcessor::isEqual(const GrFragmentProcessor& that) const {
+ if (this->classID() != that.classID() ||
+ !this->hasSameSamplers(that)) {
+ return false;
+ }
+ if (!this->hasSameTransforms(that)) {
+ return false;
+ }
+ if (!this->onIsEqual(that)) {
+ return false;
+ }
+ if (this->numChildProcessors() != that.numChildProcessors()) {
+ return false;
+ }
+ for (int i = 0; i < this->numChildProcessors(); ++i) {
+ if (!this->childProcessor(i).isEqual(that.childProcessor(i))) {
+ return false;
+ }
+ }
+ return true;
+}
+
+GrGLSLFragmentProcessor* GrFragmentProcessor::createGLSLInstance() const {
+ GrGLSLFragmentProcessor* glFragProc = this->onCreateGLSLInstance();
+ glFragProc->fChildProcessors.push_back_n(fChildProcessors.count());
+ for (int i = 0; i < fChildProcessors.count(); ++i) {
+ glFragProc->fChildProcessors[i] = fChildProcessors[i]->createGLSLInstance();
+ }
+ return glFragProc;
+}
+
+void GrFragmentProcessor::addTextureAccess(const GrTextureAccess* textureAccess) {
+ INHERITED::addTextureAccess(textureAccess);
+}
+
+void GrFragmentProcessor::addBufferAccess(const GrBufferAccess* bufferAccess) {
+ INHERITED::addBufferAccess(bufferAccess);
+}
+
+void GrFragmentProcessor::addCoordTransform(const GrCoordTransform* transform) {
+ fCoordTransforms.push_back(transform);
+ fUsesLocalCoords = true;
+ SkDEBUGCODE(transform->setInProcessor();)
+}
+
+int GrFragmentProcessor::registerChildProcessor(sk_sp<GrFragmentProcessor> child) {
+ this->combineRequiredFeatures(*child);
+
+ if (child->usesLocalCoords()) {
+ fUsesLocalCoords = true;
+ }
+ if (child->usesDistanceVectorField()) {
+ fUsesDistanceVectorField = true;
+ }
+
+ int index = fChildProcessors.count();
+ fChildProcessors.push_back(child.release());
+
+ return index;
+}
+
+void GrFragmentProcessor::notifyRefCntIsZero() const {
+ // See comment above GrProgramElement for a detailed explanation of why we do this.
+ for (int i = 0; i < fChildProcessors.count(); ++i) {
+ fChildProcessors[i]->addPendingExecution();
+ fChildProcessors[i]->unref();
+ }
+}
+
+bool GrFragmentProcessor::hasSameTransforms(const GrFragmentProcessor& that) const {
+ if (this->numCoordTransforms() != that.numCoordTransforms()) {
+ return false;
+ }
+ int count = this->numCoordTransforms();
+ for (int i = 0; i < count; ++i) {
+ if (this->coordTransform(i) != that.coordTransform(i)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+sk_sp<GrFragmentProcessor> GrFragmentProcessor::MulOutputByInputAlpha(
+ sk_sp<GrFragmentProcessor> fp) {
+ if (!fp) {
+ return nullptr;
+ }
+ return GrXfermodeFragmentProcessor::MakeFromDstProcessor(std::move(fp),
+ SkXfermode::kDstIn_Mode);
+}
+
+sk_sp<GrFragmentProcessor> GrFragmentProcessor::PremulInput(sk_sp<GrFragmentProcessor> fp) {
+
+ class PremulInputFragmentProcessor : public GrFragmentProcessor {
+ public:
+ PremulInputFragmentProcessor() {
+ this->initClassID<PremulInputFragmentProcessor>();
+ }
+
+ const char* name() const override { return "PremultiplyInput"; }
+
+ private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override {
+ class GLFP : public GrGLSLFragmentProcessor {
+ public:
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ fragBuilder->codeAppendf("%s = %s;", args.fOutputColor, args.fInputColor);
+ fragBuilder->codeAppendf("%s.rgb *= %s.a;",
+ args.fOutputColor, args.fInputColor);
+ }
+ };
+ return new GLFP;
+ }
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override {}
+
+ bool onIsEqual(const GrFragmentProcessor&) const override { return true; }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ inout->premulFourChannelColor();
+ }
+ };
+ if (!fp) {
+ return nullptr;
+ }
+ sk_sp<GrFragmentProcessor> fpPipeline[] = { sk_make_sp<PremulInputFragmentProcessor>(), fp};
+ return GrFragmentProcessor::RunInSeries(fpPipeline, 2);
+}
+
+sk_sp<GrFragmentProcessor> GrFragmentProcessor::MulOutputByInputUnpremulColor(
+ sk_sp<GrFragmentProcessor> fp) {
+
+ class PremulFragmentProcessor : public GrFragmentProcessor {
+ public:
+ PremulFragmentProcessor(sk_sp<GrFragmentProcessor> processor) {
+ this->initClassID<PremulFragmentProcessor>();
+ this->registerChildProcessor(processor);
+ }
+
+ const char* name() const override { return "Premultiply"; }
+
+ private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override {
+ class GLFP : public GrGLSLFragmentProcessor {
+ public:
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ this->emitChild(0, nullptr, args);
+ fragBuilder->codeAppendf("%s.rgb *= %s.rgb;", args.fOutputColor,
+ args.fInputColor);
+ fragBuilder->codeAppendf("%s *= %s.a;", args.fOutputColor, args.fInputColor);
+ }
+ };
+ return new GLFP;
+ }
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override {}
+
+ bool onIsEqual(const GrFragmentProcessor&) const override { return true; }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ // TODO: Add a helper to GrInvariantOutput that handles multiplying by color with flags?
+ if (!(inout->validFlags() & kA_GrColorComponentFlag)) {
+ inout->setToUnknown(GrInvariantOutput::kWill_ReadInput);
+ return;
+ }
+
+ GrInvariantOutput childOutput(GrColor_WHITE, kRGBA_GrColorComponentFlags, false);
+ this->childProcessor(0).computeInvariantOutput(&childOutput);
+
+ if (0 == GrColorUnpackA(inout->color()) || 0 == GrColorUnpackA(childOutput.color())) {
+ inout->mulByKnownFourComponents(0x0);
+ return;
+ }
+ GrColorComponentFlags commonFlags = childOutput.validFlags() & inout->validFlags();
+ GrColor c0 = GrPremulColor(inout->color());
+ GrColor c1 = childOutput.color();
+ GrColor color = 0x0;
+ if (commonFlags & kR_GrColorComponentFlag) {
+ color |= SkMulDiv255Round(GrColorUnpackR(c0), GrColorUnpackR(c1)) <<
+ GrColor_SHIFT_R;
+ }
+ if (commonFlags & kG_GrColorComponentFlag) {
+ color |= SkMulDiv255Round(GrColorUnpackG(c0), GrColorUnpackG(c1)) <<
+ GrColor_SHIFT_G;
+ }
+ if (commonFlags & kB_GrColorComponentFlag) {
+ color |= SkMulDiv255Round(GrColorUnpackB(c0), GrColorUnpackB(c1)) <<
+ GrColor_SHIFT_B;
+ }
+ inout->setToOther(commonFlags, color, GrInvariantOutput::kWill_ReadInput);
+ }
+ };
+ if (!fp) {
+ return nullptr;
+ }
+ return sk_sp<GrFragmentProcessor>(new PremulFragmentProcessor(std::move(fp)));
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> GrFragmentProcessor::OverrideInput(sk_sp<GrFragmentProcessor> fp,
+ GrColor4f color) {
+ class ReplaceInputFragmentProcessor : public GrFragmentProcessor {
+ public:
+ ReplaceInputFragmentProcessor(sk_sp<GrFragmentProcessor> child, GrColor4f color)
+ : fColor(color) {
+ this->initClassID<ReplaceInputFragmentProcessor>();
+ this->registerChildProcessor(std::move(child));
+ }
+
+ const char* name() const override { return "Replace Color"; }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override {
+ class GLFP : public GrGLSLFragmentProcessor {
+ public:
+ GLFP() : fHaveSetColor(false) {}
+ void emitCode(EmitArgs& args) override {
+ const char* colorName;
+ fColorUni = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType,
+ kDefault_GrSLPrecision,
+ "Color", &colorName);
+ this->emitChild(0, colorName, args);
+ }
+
+ private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& fp) override {
+ GrColor4f color = fp.cast<ReplaceInputFragmentProcessor>().fColor;
+ if (!fHaveSetColor || color != fPreviousColor) {
+ pdman.set4fv(fColorUni, 1, color.fRGBA);
+ fPreviousColor = color;
+ fHaveSetColor = true;
+ }
+ }
+
+ GrGLSLProgramDataManager::UniformHandle fColorUni;
+ bool fHaveSetColor;
+ GrColor4f fPreviousColor;
+ };
+
+ return new GLFP;
+ }
+
+ private:
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override
+ {}
+
+ bool onIsEqual(const GrFragmentProcessor& that) const override {
+ return fColor == that.cast<ReplaceInputFragmentProcessor>().fColor;
+ }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ inout->setToOther(kRGBA_GrColorComponentFlags, fColor.toGrColor(),
+ GrInvariantOutput::kWillNot_ReadInput);
+ this->childProcessor(0).computeInvariantOutput(inout);
+ }
+
+ GrColor4f fColor;
+ };
+
+ GrInvariantOutput childOut(0x0, kNone_GrColorComponentFlags, false);
+ fp->computeInvariantOutput(&childOut);
+ if (childOut.willUseInputColor()) {
+ return sk_sp<GrFragmentProcessor>(new ReplaceInputFragmentProcessor(std::move(fp), color));
+ } else {
+ return fp;
+ }
+}
+
+sk_sp<GrFragmentProcessor> GrFragmentProcessor::RunInSeries(sk_sp<GrFragmentProcessor>* series,
+ int cnt) {
+ class SeriesFragmentProcessor : public GrFragmentProcessor {
+ public:
+ SeriesFragmentProcessor(sk_sp<GrFragmentProcessor>* children, int cnt){
+ SkASSERT(cnt > 1);
+ this->initClassID<SeriesFragmentProcessor>();
+ for (int i = 0; i < cnt; ++i) {
+ this->registerChildProcessor(std::move(children[i]));
+ }
+ }
+
+ const char* name() const override { return "Series"; }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override {
+ class GLFP : public GrGLSLFragmentProcessor {
+ public:
+ void emitCode(EmitArgs& args) override {
+ // First guy's input might be nil.
+ SkString temp("out0");
+ this->emitChild(0, args.fInputColor, &temp, args);
+ SkString input = temp;
+ for (int i = 1; i < this->numChildProcessors() - 1; ++i) {
+ temp.printf("out%d", i);
+ this->emitChild(i, input.c_str(), &temp, args);
+ input = temp;
+ }
+ // Last guy writes to our output variable.
+ this->emitChild(this->numChildProcessors() - 1, input.c_str(), args);
+ }
+ };
+ return new GLFP;
+ }
+
+ private:
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override {}
+
+ bool onIsEqual(const GrFragmentProcessor&) const override { return true; }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ GrProcOptInfo info;
+ info.calcWithInitialValues(fChildProcessors.begin(), fChildProcessors.count(),
+ inout->color(), inout->validFlags(), false, false);
+ for (int i = 0; i < this->numChildProcessors(); ++i) {
+ this->childProcessor(i).computeInvariantOutput(inout);
+ }
+ }
+ };
+
+ if (!cnt) {
+ return nullptr;
+ }
+
+ // Run the through the series, do the invariant output processing, and look for eliminations.
+ GrProcOptInfo info;
+ info.calcWithInitialValues(sk_sp_address_as_pointer_address(series), cnt,
+ 0x0, kNone_GrColorComponentFlags, false, false);
+ if (kRGBA_GrColorComponentFlags == info.validFlags()) {
+ return GrConstColorProcessor::Make(info.color(), GrConstColorProcessor::kIgnore_InputMode);
+ }
+
+ SkTArray<sk_sp<GrFragmentProcessor>> replacementSeries;
+
+ int firstIdx = info.firstEffectiveProcessorIndex();
+ cnt -= firstIdx;
+ if (firstIdx > 0 && info.inputColorIsUsed()) {
+ sk_sp<GrFragmentProcessor> colorFP(GrConstColorProcessor::Make(
+ info.inputColorToFirstEffectiveProccesor(), GrConstColorProcessor::kIgnore_InputMode));
+ cnt += 1;
+ replacementSeries.reserve(cnt);
+ replacementSeries.emplace_back(std::move(colorFP));
+ for (int i = 0; i < cnt - 1; ++i) {
+ replacementSeries.emplace_back(std::move(series[firstIdx + i]));
+ }
+ series = replacementSeries.begin();
+ } else {
+ series += firstIdx;
+ cnt -= firstIdx;
+ }
+
+ if (1 == cnt) {
+ return series[0];
+ }
+ return sk_sp<GrFragmentProcessor>(new SeriesFragmentProcessor(series, cnt));
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GrFragmentProcessor::Iter::Iter(const GrPipeline& pipeline) {
+ for (int i = pipeline.numFragmentProcessors() - 1; i >= 0; --i) {
+ fFPStack.push_back(&pipeline.getFragmentProcessor(i));
+ }
+}
+
+const GrFragmentProcessor* GrFragmentProcessor::Iter::next() {
+ if (fFPStack.empty()) {
+ return nullptr;
+ }
+ const GrFragmentProcessor* back = fFPStack.back();
+ fFPStack.pop_back();
+ for (int i = back->numChildProcessors() - 1; i >= 0; --i) {
+ fFPStack.push_back(&back->childProcessor(i));
+ }
+ return back;
+}
+
diff --git a/gfx/skia/skia/src/gpu/GrGeometryProcessor.h b/gfx/skia/skia/src/gpu/GrGeometryProcessor.h
new file mode 100644
index 000000000..bc6f6ebbe
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGeometryProcessor.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGeometryProcessor_DEFINED
+#define GrGeometryProcessor_DEFINED
+
+#include "GrPrimitiveProcessor.h"
+
+/**
+ * A GrGeometryProcessor is a flexible method for rendering a primitive. The GrGeometryProcessor
+ * has complete control over vertex attributes and uniforms(aside from the render target) but it
+ * must obey the same contract as any GrPrimitiveProcessor, specifically it must emit a color and
+ * coverage into the fragment shader. Where this color and coverage come from is completely the
+ * responsibility of the GrGeometryProcessor.
+ */
+class GrGeometryProcessor : public GrPrimitiveProcessor {
+public:
+ GrGeometryProcessor()
+ : fWillUseGeoShader(false)
+ , fLocalCoordsType(kUnused_LocalCoordsType)
+ , fSampleShading(0.0) {}
+
+ bool willUseGeoShader() const override { return fWillUseGeoShader; }
+
+ bool hasExplicitLocalCoords() const override {
+ return kHasExplicit_LocalCoordsType == fLocalCoordsType;
+ }
+
+ /**
+ * Returns the minimum fraction of samples for which the fragment shader will be run. For
+ * instance, if sampleShading is 0.5 in MSAA16 mode, the fragment shader will run a minimum of
+ * 8 times per pixel. The default value is zero.
+ */
+ float getSampleShading() const override {
+ return fSampleShading;
+ }
+
+protected:
+ /**
+ * Subclasses call this from their constructor to register vertex attributes. Attributes
+ * will be padded to the nearest 4 bytes for performance reasons.
+ * TODO After deferred geometry, we should do all of this inline in GenerateGeometry alongside
+ * the struct used to actually populate the attributes. This is all extremely fragile, vertex
+ * attributes have to be added in the order they will appear in the struct which maps memory.
+ * The processor key should reflect the vertex attributes, or there lack thereof in the
+ * GrGeometryProcessor.
+ */
+ const Attribute& addVertexAttrib(const char* name, GrVertexAttribType type,
+ GrSLPrecision precision = kDefault_GrSLPrecision) {
+ fAttribs.emplace_back(name, type, precision);
+ fVertexStride += fAttribs.back().fOffset;
+ return fAttribs.back();
+ }
+
+ void setWillUseGeoShader() { fWillUseGeoShader = true; }
+
+ /**
+ * If a GrFragmentProcessor in the GrPipeline needs localCoods, we will provide them in one of
+ * three ways
+ * 1) LocalCoordTransform * Position - in Shader
+ * 2) LocalCoordTransform * ExplicitLocalCoords- in Shader
+ * 3) A transformation on the CPU uploaded via vertex attribute
+ * TODO make this GrBatches responsibility
+ */
+ enum LocalCoordsType {
+ kUnused_LocalCoordsType,
+ kHasExplicit_LocalCoordsType,
+ kHasTransformed_LocalCoordsType
+ };
+
+ void setHasExplicitLocalCoords() {
+ SkASSERT(kUnused_LocalCoordsType == fLocalCoordsType);
+ fLocalCoordsType = kHasExplicit_LocalCoordsType;
+ }
+ void setHasTransformedLocalCoords() {
+ SkASSERT(kUnused_LocalCoordsType == fLocalCoordsType);
+ fLocalCoordsType = kHasTransformed_LocalCoordsType;
+ }
+
+ void setSampleShading(float sampleShading) {
+ fSampleShading = sampleShading;
+ }
+
+private:
+ bool fWillUseGeoShader;
+ LocalCoordsType fLocalCoordsType;
+ float fSampleShading;
+
+ typedef GrPrimitiveProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrGlyph.h b/gfx/skia/skia/src/gpu/GrGlyph.h
new file mode 100644
index 000000000..5e611ce16
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGlyph.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGlyph_DEFINED
+#define GrGlyph_DEFINED
+
+#include "GrBatchAtlas.h"
+#include "GrRect.h"
+#include "GrTypes.h"
+
+#include "SkChecksum.h"
+#include "SkFixed.h"
+#include "SkPath.h"
+
+class GrPlot;
+
+/* Need this to be quad-state:
+ - complete w/ image
+ - just metrics
+ - failed to get image, but has metrics
+ - failed to get metrics
+ */
+struct GrGlyph {
+ enum MaskStyle {
+ kCoverage_MaskStyle,
+ kDistance_MaskStyle
+ };
+
+ typedef uint32_t PackedID;
+
+ GrBatchAtlas::AtlasID fID;
+ SkPath* fPath;
+ PackedID fPackedID;
+ GrMaskFormat fMaskFormat;
+ GrIRect16 fBounds;
+ SkIPoint16 fAtlasLocation;
+ bool fTooLargeForAtlas;
+
+ void init(GrGlyph::PackedID packed, const SkIRect& bounds, GrMaskFormat format) {
+ fID = GrBatchAtlas::kInvalidAtlasID;
+ fPath = nullptr;
+ fPackedID = packed;
+ fBounds.set(bounds);
+ fMaskFormat = format;
+ fAtlasLocation.set(0, 0);
+ fTooLargeForAtlas = GrBatchAtlas::GlyphTooLargeForAtlas(bounds.width(), bounds.height());
+ }
+
+ void reset() {
+ if (fPath) {
+ delete fPath;
+ fPath = nullptr;
+ }
+ }
+
+ int width() const { return fBounds.width(); }
+ int height() const { return fBounds.height(); }
+ bool isEmpty() const { return fBounds.isEmpty(); }
+ uint16_t glyphID() const { return UnpackID(fPackedID); }
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ static inline unsigned ExtractSubPixelBitsFromFixed(SkFixed pos) {
+ // two most significant fraction bits from fixed-point
+ return (pos >> 14) & 3;
+ }
+
+ static inline PackedID Pack(uint16_t glyphID, SkFixed x, SkFixed y, MaskStyle ms) {
+ x = ExtractSubPixelBitsFromFixed(x);
+ y = ExtractSubPixelBitsFromFixed(y);
+ int dfFlag = (ms == kDistance_MaskStyle) ? 0x1 : 0x0;
+ return (dfFlag << 20) | (x << 18) | (y << 16) | glyphID;
+ }
+
+ static inline SkFixed UnpackFixedX(PackedID packed) {
+ return ((packed >> 18) & 3) << 14;
+ }
+
+ static inline SkFixed UnpackFixedY(PackedID packed) {
+ return ((packed >> 16) & 3) << 14;
+ }
+
+ static inline MaskStyle UnpackMaskStyle(PackedID packed) {
+ return ((packed >> 20) & 1) ? kDistance_MaskStyle : kCoverage_MaskStyle;
+ }
+
+ static inline uint16_t UnpackID(PackedID packed) {
+ return (uint16_t)packed;
+ }
+
+ static inline const GrGlyph::PackedID& GetKey(const GrGlyph& glyph) {
+ return glyph.fPackedID;
+ }
+
+ static inline uint32_t Hash(GrGlyph::PackedID key) {
+ return SkChecksum::Mix(key);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrGpu.cpp b/gfx/skia/skia/src/gpu/GrGpu.cpp
new file mode 100644
index 000000000..0de9fedb8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGpu.cpp
@@ -0,0 +1,505 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrGpu.h"
+
+#include "GrBuffer.h"
+#include "GrCaps.h"
+#include "GrContext.h"
+#include "GrGpuResourcePriv.h"
+#include "GrMesh.h"
+#include "GrPathRendering.h"
+#include "GrPipeline.h"
+#include "GrResourceCache.h"
+#include "GrResourceProvider.h"
+#include "GrRenderTargetPriv.h"
+#include "GrStencilAttachment.h"
+#include "GrSurfacePriv.h"
+#include "GrTexturePriv.h"
+#include "SkMathPriv.h"
+
+GrMesh& GrMesh::operator =(const GrMesh& di) {
+ fPrimitiveType = di.fPrimitiveType;
+ fStartVertex = di.fStartVertex;
+ fStartIndex = di.fStartIndex;
+ fVertexCount = di.fVertexCount;
+ fIndexCount = di.fIndexCount;
+
+ fInstanceCount = di.fInstanceCount;
+ fVerticesPerInstance = di.fVerticesPerInstance;
+ fIndicesPerInstance = di.fIndicesPerInstance;
+ fMaxInstancesPerDraw = di.fMaxInstancesPerDraw;
+
+ fVertexBuffer.reset(di.vertexBuffer());
+ fIndexBuffer.reset(di.indexBuffer());
+
+ return *this;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrGpu::GrGpu(GrContext* context)
+ : fResetTimestamp(kExpiredTimestamp+1)
+ , fResetBits(kAll_GrBackendState)
+ , fContext(context) {
+ fMultisampleSpecs.emplace_back(0, 0, nullptr); // Index 0 is an invalid unique id.
+}
+
+GrGpu::~GrGpu() {}
+
+void GrGpu::disconnect(DisconnectType) {}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool GrGpu::makeCopyForTextureParams(int width, int height, const GrTextureParams& textureParams,
+ GrTextureProducer::CopyParams* copyParams) const {
+ const GrCaps& caps = *this->caps();
+ if (textureParams.isTiled() && !caps.npotTextureTileSupport() &&
+ (!SkIsPow2(width) || !SkIsPow2(height))) {
+ copyParams->fWidth = GrNextPow2(width);
+ copyParams->fHeight = GrNextPow2(height);
+ switch (textureParams.filterMode()) {
+ case GrTextureParams::kNone_FilterMode:
+ copyParams->fFilter = GrTextureParams::kNone_FilterMode;
+ break;
+ case GrTextureParams::kBilerp_FilterMode:
+ case GrTextureParams::kMipMap_FilterMode:
+ // We are only ever scaling up so no reason to ever indicate kMipMap.
+ copyParams->fFilter = GrTextureParams::kBilerp_FilterMode;
+ break;
+ }
+ return true;
+ }
+ return false;
+}
+
+static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin, bool renderTarget) {
+ // By default, GrRenderTargets are GL's normal orientation so that they
+ // can be drawn to by the outside world without the client having
+ // to render upside down.
+ if (kDefault_GrSurfaceOrigin == origin) {
+ return renderTarget ? kBottomLeft_GrSurfaceOrigin : kTopLeft_GrSurfaceOrigin;
+ } else {
+ return origin;
+ }
+}
+
+/**
+ * Prior to creating a texture, make sure the type of texture being created is
+ * supported by calling check_texture_creation_params.
+ *
+ * @param caps The capabilities of the GL device.
+ * @param desc The descriptor of the texture to create.
+ * @param isRT Indicates if the texture can be a render target.
+ */
+static bool check_texture_creation_params(const GrCaps& caps, const GrSurfaceDesc& desc,
+ bool* isRT, const SkTArray<GrMipLevel>& texels) {
+ if (!caps.isConfigTexturable(desc.fConfig)) {
+ return false;
+ }
+
+ *isRT = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
+ if (*isRT && !caps.isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) {
+ return false;
+ }
+
+ // We currently do not support multisampled textures
+ if (!*isRT && desc.fSampleCnt > 0) {
+ return false;
+ }
+
+ if (*isRT) {
+ int maxRTSize = caps.maxRenderTargetSize();
+ if (desc.fWidth > maxRTSize || desc.fHeight > maxRTSize) {
+ return false;
+ }
+ } else {
+ int maxSize = caps.maxTextureSize();
+ if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
+ return false;
+ }
+ }
+
+ for (int i = 0; i < texels.count(); ++i) {
+ if (!texels[i].fPixels) {
+ return false;
+ }
+ }
+ return true;
+}
+
+GrTexture* GrGpu::createTexture(const GrSurfaceDesc& origDesc, SkBudgeted budgeted,
+ const SkTArray<GrMipLevel>& texels) {
+ GrSurfaceDesc desc = origDesc;
+
+ const GrCaps* caps = this->caps();
+ bool isRT = false;
+ bool textureCreationParamsValid = check_texture_creation_params(*caps, desc, &isRT, texels);
+ if (!textureCreationParamsValid) {
+ return nullptr;
+ }
+
+ desc.fSampleCnt = SkTMin(desc.fSampleCnt, caps->maxSampleCount());
+ // Attempt to catch un- or wrongly intialized sample counts;
+ SkASSERT(desc.fSampleCnt >= 0 && desc.fSampleCnt <= 64);
+
+ desc.fOrigin = resolve_origin(desc.fOrigin, isRT);
+
+ GrTexture* tex = nullptr;
+
+ if (GrPixelConfigIsCompressed(desc.fConfig)) {
+ // We shouldn't be rendering into this
+ SkASSERT(!isRT);
+ SkASSERT(0 == desc.fSampleCnt);
+
+ if (!caps->npotTextureTileSupport() &&
+ (!SkIsPow2(desc.fWidth) || !SkIsPow2(desc.fHeight))) {
+ return nullptr;
+ }
+
+ this->handleDirtyContext();
+ tex = this->onCreateCompressedTexture(desc, budgeted, texels);
+ } else {
+ this->handleDirtyContext();
+ tex = this->onCreateTexture(desc, budgeted, texels);
+ }
+ if (tex) {
+ if (!caps->reuseScratchTextures() && !isRT) {
+ tex->resourcePriv().removeScratchKey();
+ }
+ fStats.incTextureCreates();
+ if (!texels.empty()) {
+ if (texels[0].fPixels) {
+ fStats.incTextureUploads();
+ }
+ }
+ // This is a current work around to get discards into newly created textures. Once we are in
+ // MDB world, we should remove this code a rely on the draw target having specified load
+ // operations.
+ if (isRT && texels.empty()) {
+ GrRenderTarget* rt = tex->asRenderTarget();
+ SkASSERT(rt);
+ rt->discard();
+ }
+ }
+ return tex;
+}
+
+GrTexture* GrGpu::wrapBackendTexture(const GrBackendTextureDesc& desc, GrWrapOwnership ownership) {
+ this->handleDirtyContext();
+ if (!this->caps()->isConfigTexturable(desc.fConfig)) {
+ return nullptr;
+ }
+ if ((desc.fFlags & kRenderTarget_GrBackendTextureFlag) &&
+ !this->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) {
+ return nullptr;
+ }
+ int maxSize = this->caps()->maxTextureSize();
+ if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
+ return nullptr;
+ }
+ GrTexture* tex = this->onWrapBackendTexture(desc, ownership);
+ if (nullptr == tex) {
+ return nullptr;
+ }
+ // TODO: defer this and attach dynamically
+ GrRenderTarget* tgt = tex->asRenderTarget();
+ if (tgt && !fContext->resourceProvider()->attachStencilAttachment(tgt)) {
+ tex->unref();
+ return nullptr;
+ } else {
+ return tex;
+ }
+}
+
+GrRenderTarget* GrGpu::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc,
+ GrWrapOwnership ownership) {
+ if (!this->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) {
+ return nullptr;
+ }
+ this->handleDirtyContext();
+ return this->onWrapBackendRenderTarget(desc, ownership);
+}
+
+GrRenderTarget* GrGpu::wrapBackendTextureAsRenderTarget(const GrBackendTextureDesc& desc) {
+ this->handleDirtyContext();
+ if (!(desc.fFlags & kRenderTarget_GrBackendTextureFlag)) {
+ return nullptr;
+ }
+ if (!this->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) {
+ return nullptr;
+ }
+ int maxSize = this->caps()->maxTextureSize();
+ if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
+ return nullptr;
+ }
+ return this->onWrapBackendTextureAsRenderTarget(desc);
+}
+
+GrBuffer* GrGpu::createBuffer(size_t size, GrBufferType intendedType,
+ GrAccessPattern accessPattern, const void* data) {
+ this->handleDirtyContext();
+ GrBuffer* buffer = this->onCreateBuffer(size, intendedType, accessPattern, data);
+ if (!this->caps()->reuseScratchBuffers()) {
+ buffer->resourcePriv().removeScratchKey();
+ }
+ return buffer;
+}
+
+gr_instanced::InstancedRendering* GrGpu::createInstancedRendering() {
+ SkASSERT(GrCaps::InstancedSupport::kNone != this->caps()->instancedSupport());
+ return this->onCreateInstancedRendering();
+}
+
+bool GrGpu::copySurface(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ SkASSERT(dst && src);
+ this->handleDirtyContext();
+ return this->onCopySurface(dst, src, srcRect, dstPoint);
+}
+
+bool GrGpu::getReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
+ GrPixelConfig readConfig, DrawPreference* drawPreference,
+ ReadPixelTempDrawInfo* tempDrawInfo) {
+ SkASSERT(drawPreference);
+ SkASSERT(tempDrawInfo);
+ SkASSERT(kGpuPrefersDraw_DrawPreference != *drawPreference);
+
+ // We currently do not support reading into a compressed buffer
+ if (GrPixelConfigIsCompressed(readConfig)) {
+ return false;
+ }
+
+ // We currently do not support reading into the packed formats 565 or 4444 as they are not
+ // required to have read back support on all devices and backends.
+ if (kRGB_565_GrPixelConfig == readConfig || kRGBA_4444_GrPixelConfig == readConfig) {
+ return false;
+ }
+
+ if (!this->onGetReadPixelsInfo(srcSurface, width, height, rowBytes, readConfig, drawPreference,
+ tempDrawInfo)) {
+ return false;
+ }
+
+ // Check to see if we're going to request that the caller draw when drawing is not possible.
+ if (!srcSurface->asTexture() ||
+ !this->caps()->isConfigRenderable(tempDrawInfo->fTempSurfaceDesc.fConfig, false)) {
+ // If we don't have a fallback to a straight read then fail.
+ if (kRequireDraw_DrawPreference == *drawPreference) {
+ return false;
+ }
+ *drawPreference = kNoDraw_DrawPreference;
+ }
+
+ return true;
+}
+bool GrGpu::getWritePixelsInfo(GrSurface* dstSurface, int width, int height,
+ GrPixelConfig srcConfig, DrawPreference* drawPreference,
+ WritePixelTempDrawInfo* tempDrawInfo) {
+ SkASSERT(drawPreference);
+ SkASSERT(tempDrawInfo);
+ SkASSERT(kGpuPrefersDraw_DrawPreference != *drawPreference);
+
+ if (GrPixelConfigIsCompressed(dstSurface->desc().fConfig) &&
+ dstSurface->desc().fConfig != srcConfig) {
+ return false;
+ }
+
+ if (SkToBool(dstSurface->asRenderTarget())) {
+ if (this->caps()->useDrawInsteadOfAllRenderTargetWrites()) {
+ ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
+ } else if (this->caps()->useDrawInsteadOfPartialRenderTargetWrite() &&
+ (width < dstSurface->width() || height < dstSurface->height())) {
+ ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
+ }
+ }
+
+ if (!this->onGetWritePixelsInfo(dstSurface, width, height, srcConfig, drawPreference,
+ tempDrawInfo)) {
+ return false;
+ }
+
+ // Check to see if we're going to request that the caller draw when drawing is not possible.
+ if (!dstSurface->asRenderTarget() ||
+ !this->caps()->isConfigTexturable(tempDrawInfo->fTempSurfaceDesc.fConfig)) {
+ // If we don't have a fallback to a straight upload then fail.
+ if (kRequireDraw_DrawPreference == *drawPreference ||
+ !this->caps()->isConfigTexturable(srcConfig)) {
+ return false;
+ }
+ *drawPreference = kNoDraw_DrawPreference;
+ }
+ return true;
+}
+
+bool GrGpu::readPixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config, void* buffer,
+ size_t rowBytes) {
+ this->handleDirtyContext();
+
+ // We cannot read pixels into a compressed buffer
+ if (GrPixelConfigIsCompressed(config)) {
+ return false;
+ }
+
+ size_t bpp = GrBytesPerPixel(config);
+ if (!GrSurfacePriv::AdjustReadPixelParams(surface->width(), surface->height(), bpp,
+ &left, &top, &width, &height,
+ &buffer,
+ &rowBytes)) {
+ return false;
+ }
+
+ return this->onReadPixels(surface,
+ left, top, width, height,
+ config, buffer,
+ rowBytes);
+}
+
+bool GrGpu::writePixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config, const SkTArray<GrMipLevel>& texels) {
+ if (!surface) {
+ return false;
+ }
+ for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
+ if (!texels[currentMipLevel].fPixels ) {
+ return false;
+ }
+ }
+
+ this->handleDirtyContext();
+ if (this->onWritePixels(surface, left, top, width, height, config, texels)) {
+ SkIRect rect = SkIRect::MakeXYWH(left, top, width, height);
+ this->didWriteToSurface(surface, &rect, texels.count());
+ fStats.incTextureUploads();
+ return true;
+ }
+ return false;
+}
+
+bool GrGpu::writePixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config, const void* buffer,
+ size_t rowBytes) {
+ GrMipLevel mipLevel;
+ mipLevel.fPixels = buffer;
+ mipLevel.fRowBytes = rowBytes;
+ SkSTArray<1, GrMipLevel> texels;
+ texels.push_back(mipLevel);
+
+ return this->writePixels(surface, left, top, width, height, config, texels);
+}
+
+bool GrGpu::transferPixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config, GrBuffer* transferBuffer,
+ size_t offset, size_t rowBytes, GrFence* fence) {
+ SkASSERT(transferBuffer);
+ SkASSERT(fence);
+
+ this->handleDirtyContext();
+ if (this->onTransferPixels(surface, left, top, width, height, config,
+ transferBuffer, offset, rowBytes)) {
+ SkIRect rect = SkIRect::MakeXYWH(left, top, width, height);
+ this->didWriteToSurface(surface, &rect);
+ fStats.incTransfersToTexture();
+
+ if (*fence) {
+ this->deleteFence(*fence);
+ }
+ *fence = this->insertFence();
+
+ return true;
+ }
+ return false;
+}
+
+void GrGpu::resolveRenderTarget(GrRenderTarget* target) {
+ SkASSERT(target);
+ this->handleDirtyContext();
+ this->onResolveRenderTarget(target);
+}
+
+void GrGpu::didWriteToSurface(GrSurface* surface, const SkIRect* bounds, uint32_t mipLevels) const {
+ SkASSERT(surface);
+ // Mark any MIP chain and resolve buffer as dirty if and only if there is a non-empty bounds.
+ if (nullptr == bounds || !bounds->isEmpty()) {
+ if (GrRenderTarget* target = surface->asRenderTarget()) {
+ target->flagAsNeedingResolve(bounds);
+ }
+ GrTexture* texture = surface->asTexture();
+ if (texture && 1 == mipLevels) {
+ texture->texturePriv().dirtyMipMaps(true);
+ }
+ }
+}
+
+const GrGpu::MultisampleSpecs& GrGpu::getMultisampleSpecs(GrRenderTarget* rt,
+ const GrStencilSettings& stencil) {
+ SkASSERT(rt->desc().fSampleCnt > 1);
+
+#ifndef SK_DEBUG
+ // In debug mode we query the multisample info every time to verify the caching is correct.
+ if (uint8_t id = rt->renderTargetPriv().accessMultisampleSpecsID()) {
+ SkASSERT(id > 0 && id < fMultisampleSpecs.count());
+ return fMultisampleSpecs[id];
+ }
+#endif
+
+ int effectiveSampleCnt;
+ SkSTArray<16, SkPoint, true> pattern;
+ this->onGetMultisampleSpecs(rt, stencil, &effectiveSampleCnt, &pattern);
+ SkASSERT(effectiveSampleCnt >= rt->desc().fSampleCnt);
+
+ uint8_t id;
+ if (this->caps()->sampleLocationsSupport()) {
+ SkASSERT(pattern.count() == effectiveSampleCnt);
+ const auto& insertResult = fMultisampleSpecsIdMap.insert(
+ MultisampleSpecsIdMap::value_type(pattern, SkTMin(fMultisampleSpecs.count(), 255)));
+ id = insertResult.first->second;
+ if (insertResult.second) {
+ // This means the insert did not find the pattern in the map already, and therefore an
+ // actual insertion took place. (We don't expect to see many unique sample patterns.)
+ const SkPoint* sampleLocations = insertResult.first->first.begin();
+ SkASSERT(id == fMultisampleSpecs.count());
+ fMultisampleSpecs.emplace_back(id, effectiveSampleCnt, sampleLocations);
+ }
+ } else {
+ id = effectiveSampleCnt;
+ for (int i = fMultisampleSpecs.count(); i <= id; ++i) {
+ fMultisampleSpecs.emplace_back(i, i, nullptr);
+ }
+ }
+ SkASSERT(id > 0);
+ SkASSERT(!rt->renderTargetPriv().accessMultisampleSpecsID() ||
+ rt->renderTargetPriv().accessMultisampleSpecsID() == id);
+
+ rt->renderTargetPriv().accessMultisampleSpecsID() = id;
+ return fMultisampleSpecs[id];
+}
+
+bool GrGpu::SamplePatternComparator::operator()(const SamplePattern& a,
+ const SamplePattern& b) const {
+ if (a.count() != b.count()) {
+ return a.count() < b.count();
+ }
+ for (int i = 0; i < a.count(); ++i) {
+ // This doesn't have geometric meaning. We just need to define an ordering for std::map.
+ if (a[i].x() != b[i].x()) {
+ return a[i].x() < b[i].x();
+ }
+ if (a[i].y() != b[i].y()) {
+ return a[i].y() < b[i].y();
+ }
+ }
+ return false; // Equal.
+}
diff --git a/gfx/skia/skia/src/gpu/GrGpu.h b/gfx/skia/skia/src/gpu/GrGpu.h
new file mode 100644
index 000000000..b8703dc68
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGpu.h
@@ -0,0 +1,611 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGpu_DEFINED
+#define GrGpu_DEFINED
+
+#include "GrGpuCommandBuffer.h"
+#include "GrProgramDesc.h"
+#include "GrSwizzle.h"
+#include "GrAllocator.h"
+#include "GrTextureParamsAdjuster.h"
+#include "GrTypes.h"
+#include "GrXferProcessor.h"
+#include "SkPath.h"
+#include "SkTArray.h"
+#include <map>
+
+class GrBatchTracker;
+class GrBuffer;
+class GrContext;
+struct GrContextOptions;
+class GrGLContext;
+class GrMesh;
+class GrNonInstancedVertices;
+class GrPath;
+class GrPathRange;
+class GrPathRenderer;
+class GrPathRendererChain;
+class GrPathRendering;
+class GrPipeline;
+class GrPrimitiveProcessor;
+class GrRenderTarget;
+class GrStencilAttachment;
+class GrStencilSettings;
+class GrSurface;
+class GrTexture;
+
+namespace gr_instanced { class InstancedRendering; }
+
+class GrGpu : public SkRefCnt {
+public:
+ /**
+ * Create an instance of GrGpu that matches the specified backend. If the requested backend is
+ * not supported (at compile-time or run-time) this returns nullptr. The context will not be
+ * fully constructed and should not be used by GrGpu until after this function returns.
+ */
+ static GrGpu* Create(GrBackend, GrBackendContext, const GrContextOptions&, GrContext* context);
+
+ ////////////////////////////////////////////////////////////////////////////
+
+ GrGpu(GrContext* context);
+ ~GrGpu() override;
+
+ GrContext* getContext() { return fContext; }
+ const GrContext* getContext() const { return fContext; }
+
+ /**
+ * Gets the capabilities of the draw target.
+ */
+ const GrCaps* caps() const { return fCaps.get(); }
+
+ GrPathRendering* pathRendering() { return fPathRendering.get(); }
+
+ enum class DisconnectType {
+ // No cleanup should be attempted, immediately cease making backend API calls
+ kAbandon,
+ // Free allocated resources (not known by GrResourceCache) before returning and
+ // ensure no backend backend 3D API calls will be made after disconnect() returns.
+ kCleanup,
+ };
+
+ // Called by GrContext when the underlying backend context is already or will be destroyed
+ // before GrContext.
+ virtual void disconnect(DisconnectType);
+
+ /**
+ * The GrGpu object normally assumes that no outsider is setting state
+ * within the underlying 3D API's context/device/whatever. This call informs
+ * the GrGpu that the state was modified and it shouldn't make assumptions
+ * about the state.
+ */
+ void markContextDirty(uint32_t state = kAll_GrBackendState) { fResetBits |= state; }
+
+ /**
+ * Creates a texture object. If kRenderTarget_GrSurfaceFlag the texture can
+ * be used as a render target by calling GrTexture::asRenderTarget(). Not all
+ * pixel configs can be used as render targets. Support for configs as textures
+ * or render targets can be checked using GrCaps.
+ *
+ * @param desc describes the texture to be created.
+ * @param budgeted does this texture count against the resource cache budget?
+ * @param texels array of mipmap levels containing texel data to load.
+ * Each level begins with full-size palette data for paletted textures.
+ * For compressed formats the level contains the compressed pixel data.
+ * Otherwise, it contains width*height texels. If there is only one
+ * element and it contains nullptr fPixels, texture data is
+ * uninitialized.
+ * @return The texture object if successful, otherwise nullptr.
+ */
+ GrTexture* createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
+ const SkTArray<GrMipLevel>& texels);
+
+ /**
+ * Simplified createTexture() interface for when there is no initial texel data to upload.
+ */
+ GrTexture* createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted) {
+ return this->createTexture(desc, budgeted, SkTArray<GrMipLevel>());
+ }
+
+ /** Simplified createTexture() interface for when there is only a base level */
+ GrTexture* createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted, const void* level0Data,
+ size_t rowBytes) {
+ SkASSERT(level0Data);
+ GrMipLevel level = { level0Data, rowBytes };
+ SkSTArray<1, GrMipLevel> array;
+ array.push_back() = level;
+ return this->createTexture(desc, budgeted, array);
+ }
+
+ /**
+ * Implements GrTextureProvider::wrapBackendTexture
+ */
+ GrTexture* wrapBackendTexture(const GrBackendTextureDesc&, GrWrapOwnership);
+
+ /**
+ * Implements GrTextureProvider::wrapBackendRenderTarget
+ */
+ GrRenderTarget* wrapBackendRenderTarget(const GrBackendRenderTargetDesc&, GrWrapOwnership);
+
+ /**
+ * Implements GrTextureProvider::wrapBackendTextureAsRenderTarget
+ */
+ GrRenderTarget* wrapBackendTextureAsRenderTarget(const GrBackendTextureDesc&);
+
+ /**
+ * Creates a buffer in GPU memory. For a client-side buffer use GrBuffer::CreateCPUBacked.
+ *
+ * @param size size of buffer to create.
+ * @param intendedType hint to the graphics subsystem about what the buffer will be used for.
+ * @param accessPattern hint to the graphics subsystem about how the data will be accessed.
+ * @param data optional data with which to initialize the buffer.
+ *
+ * @return the buffer if successful, otherwise nullptr.
+ */
+ GrBuffer* createBuffer(size_t size, GrBufferType intendedType, GrAccessPattern accessPattern,
+ const void* data = nullptr);
+
+ /**
+ * Creates an instanced rendering object if it is supported on this platform.
+ */
+ gr_instanced::InstancedRendering* createInstancedRendering();
+
+ /**
+ * Resolves MSAA.
+ */
+ void resolveRenderTarget(GrRenderTarget* target);
+
+ /** Info struct returned by getReadPixelsInfo about performing intermediate draws before
+ reading pixels for performance or correctness. */
+ struct ReadPixelTempDrawInfo {
+ /** If the GrGpu is requesting that the caller do a draw to an intermediate surface then
+ this is descriptor for the temp surface. The draw should always be a rect with
+ dst 0,0,w,h. */
+ GrSurfaceDesc fTempSurfaceDesc;
+ /** Indicates whether there is a performance advantage to using an exact match texture
+ (in terms of width and height) for the intermediate texture instead of approximate. */
+ SkBackingFit fTempSurfaceFit;
+ /** Swizzle to apply during the draw. This is used to compensate for either feature or
+ performance limitations in the underlying 3D API. */
+ GrSwizzle fSwizzle;
+ /** The config that should be used to read from the temp surface after the draw. This may be
+ different than the original read config in order to compensate for swizzling. The
+ read data will effectively be in the original read config. */
+ GrPixelConfig fReadConfig;
+ };
+
+ /** Describes why an intermediate draw must/should be performed before readPixels. */
+ enum DrawPreference {
+ /** On input means that the caller would proceed without draw if the GrGpu doesn't request
+ one.
+ On output means that the GrGpu is not requesting a draw. */
+ kNoDraw_DrawPreference,
+ /** Means that the client would prefer a draw for performance of the readback but
+ can satisfy a straight readPixels call on the inputs without an intermediate draw.
+ getReadPixelsInfo will never set the draw preference to this value but may leave
+ it set. */
+ kCallerPrefersDraw_DrawPreference,
+ /** On output means that GrGpu would prefer a draw for performance of the readback but
+ can satisfy a straight readPixels call on the inputs without an intermediate draw. The
+ caller of getReadPixelsInfo should never specify this on intput. */
+ kGpuPrefersDraw_DrawPreference,
+ /** On input means that the caller requires a draw to do a transformation and there is no
+ CPU fallback.
+ On output means that GrGpu can only satisfy the readPixels request if the intermediate
+ draw is performed.
+ */
+ kRequireDraw_DrawPreference
+ };
+
+ /**
+ * Used to negotiate whether and how an intermediate draw should or must be performed before
+ * a readPixels call. If this returns false then GrGpu could not deduce an intermediate draw
+ * that would allow a successful readPixels call. The passed width, height, and rowBytes,
+ * must be non-zero and already reflect clipping to the src bounds.
+ */
+ bool getReadPixelsInfo(GrSurface* srcSurface, int readWidth, int readHeight, size_t rowBytes,
+ GrPixelConfig readConfig, DrawPreference*, ReadPixelTempDrawInfo*);
+
+ /** Info struct returned by getWritePixelsInfo about performing an intermediate draw in order
+ to write pixels to a GrSurface for either performance or correctness reasons. */
+ struct WritePixelTempDrawInfo {
+ /** If the GrGpu is requesting that the caller upload to an intermediate surface and draw
+ that to the dst then this is the descriptor for the intermediate surface. The caller
+ should upload the pixels such that the upper left pixel of the upload rect is at 0,0 in
+ the intermediate surface.*/
+ GrSurfaceDesc fTempSurfaceDesc;
+ /** Swizzle to apply during the draw. This is used to compensate for either feature or
+ performance limitations in the underlying 3D API. */
+ GrSwizzle fSwizzle;
+ /** The config that should be specified when uploading the *original* data to the temp
+ surface before the draw. This may be different than the original src data config in
+ order to compensate for swizzling that will occur when drawing. */
+ GrPixelConfig fWriteConfig;
+ };
+
+ /**
+ * Used to negotiate whether and how an intermediate surface should be used to write pixels to
+ * a GrSurface. If this returns false then GrGpu could not deduce an intermediate draw
+ * that would allow a successful transfer of the src pixels to the dst. The passed width,
+ * height, and rowBytes, must be non-zero and already reflect clipping to the dst bounds.
+ */
+ bool getWritePixelsInfo(GrSurface* dstSurface, int width, int height,
+ GrPixelConfig srcConfig, DrawPreference*, WritePixelTempDrawInfo*);
+
+ /**
+ * Reads a rectangle of pixels from a render target.
+ *
+ * @param surface The surface to read from
+ * @param left left edge of the rectangle to read (inclusive)
+ * @param top top edge of the rectangle to read (inclusive)
+ * @param width width of rectangle to read in pixels.
+ * @param height height of rectangle to read in pixels.
+ * @param config the pixel config of the destination buffer
+ * @param buffer memory to read the rectangle into.
+ * @param rowBytes the number of bytes between consecutive rows. Zero
+ * means rows are tightly packed.
+ * @param invertY buffer should be populated bottom-to-top as opposed
+ * to top-to-bottom (skia's usual order)
+ *
+ * @return true if the read succeeded, false if not. The read can fail
+ * because of a unsupported pixel config or because no render
+ * target is currently set.
+ */
+ bool readPixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config, void* buffer, size_t rowBytes);
+
+ /**
+ * Updates the pixels in a rectangle of a surface.
+ *
+ * @param surface The surface to write to.
+ * @param left left edge of the rectangle to write (inclusive)
+ * @param top top edge of the rectangle to write (inclusive)
+ * @param width width of rectangle to write in pixels.
+ * @param height height of rectangle to write in pixels.
+ * @param config the pixel config of the source buffer
+ * @param texels array of mipmap levels containing texture data
+ */
+ bool writePixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config,
+ const SkTArray<GrMipLevel>& texels);
+
+ /**
+ * This function is a shim which creates a SkTArray<GrMipLevel> of size 1.
+ * It then calls writePixels with that SkTArray.
+ *
+ * @param buffer memory to read pixels from.
+ * @param rowBytes number of bytes between consecutive rows. Zero
+ * means rows are tightly packed.
+ */
+ bool writePixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config, const void* buffer,
+ size_t rowBytes);
+
+ /**
+ * Updates the pixels in a rectangle of a surface using a buffer
+ *
+ * @param surface The surface to write to.
+ * @param left left edge of the rectangle to write (inclusive)
+ * @param top top edge of the rectangle to write (inclusive)
+ * @param width width of rectangle to write in pixels.
+ * @param height height of rectangle to write in pixels.
+ * @param config the pixel config of the source buffer
+ * @param transferBuffer GrBuffer to read pixels from (type must be "kCpuToGpu")
+ * @param offset offset from the start of the buffer
+ * @param rowBytes number of bytes between consecutive rows. Zero
+ * means rows are tightly packed.
+ */
+ bool transferPixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config, GrBuffer* transferBuffer,
+ size_t offset, size_t rowBytes, GrFence* fence);
+
+ /**
+ * This is can be called before allocating a texture to be a dst for copySurface. This is only
+ * used for doing dst copies needed in blends, thus the src is always a GrRenderTarget. It will
+ * populate the origin, config, and flags fields of the desc such that copySurface can
+ * efficiently succeed.
+ */
+ virtual bool initDescForDstCopy(const GrRenderTarget* src, GrSurfaceDesc* desc) const = 0;
+
+ // After the client interacts directly with the 3D context state the GrGpu
+ // must resync its internal state and assumptions about 3D context state.
+ // Each time this occurs the GrGpu bumps a timestamp.
+ // state of the 3D context
+ // At 10 resets / frame and 60fps a 64bit timestamp will overflow in about
+ // a billion years.
+ typedef uint64_t ResetTimestamp;
+
+ // This timestamp is always older than the current timestamp
+ static const ResetTimestamp kExpiredTimestamp = 0;
+ // Returns a timestamp based on the number of times the context was reset.
+ // This timestamp can be used to lazily detect when cached 3D context state
+ // is dirty.
+ ResetTimestamp getResetTimestamp() const { return fResetTimestamp; }
+
+ // Called to perform a surface to surface copy. Fallbacks to issuing a draw from the src to dst
+ // take place at the GrDrawTarget level and this function implement faster copy paths. The rect
+ // and point are pre-clipped. The src rect and implied dst rect are guaranteed to be within the
+ // src/dst bounds and non-empty.
+ bool copySurface(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ struct MultisampleSpecs {
+ MultisampleSpecs(uint8_t uniqueID, int effectiveSampleCnt, const SkPoint* locations)
+ : fUniqueID(uniqueID),
+ fEffectiveSampleCnt(effectiveSampleCnt),
+ fSampleLocations(locations) {}
+
+ // Nonzero ID that uniquely identifies these multisample specs.
+ uint8_t fUniqueID;
+ // The actual number of samples the GPU will run. NOTE: this value can be greater than the
+ // the render target's sample count.
+ int fEffectiveSampleCnt;
+ // If sample locations are supported, points to the subpixel locations at which the GPU will
+ // sample. Pixel center is at (.5, .5), and (0, 0) indicates the top left corner.
+ const SkPoint* fSampleLocations;
+ };
+
+ // Finds a render target's multisample specs. The stencil settings are only needed to flush the
+ // draw state prior to querying multisample information; they should not have any effect on the
+ // multisample information itself.
+ const MultisampleSpecs& getMultisampleSpecs(GrRenderTarget*, const GrStencilSettings&);
+
+ // Creates a GrGpuCommandBuffer in which the GrDrawTarget can send draw commands to instead of
+ // directly to the Gpu object.
+ virtual GrGpuCommandBuffer* createCommandBuffer(
+ GrRenderTarget* target,
+ const GrGpuCommandBuffer::LoadAndStoreInfo& colorInfo,
+ const GrGpuCommandBuffer::LoadAndStoreInfo& stencilInfo) = 0;
+
+ // Called by drawtarget when flushing.
+ // Provides a hook for post-flush actions (e.g. PLS reset and Vulkan command buffer submits).
+ virtual void finishDrawTarget() {}
+
+ virtual GrFence SK_WARN_UNUSED_RESULT insertFence() const = 0;
+ virtual bool waitFence(GrFence, uint64_t timeout = 1000) const = 0;
+ virtual void deleteFence(GrFence) const = 0;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Debugging and Stats
+
+ class Stats {
+ public:
+#if GR_GPU_STATS
+ Stats() { this->reset(); }
+
+ void reset() {
+ fRenderTargetBinds = 0;
+ fShaderCompilations = 0;
+ fTextureCreates = 0;
+ fTextureUploads = 0;
+ fTransfersToTexture = 0;
+ fStencilAttachmentCreates = 0;
+ fNumDraws = 0;
+ fNumFailedDraws = 0;
+ }
+
+ int renderTargetBinds() const { return fRenderTargetBinds; }
+ void incRenderTargetBinds() { fRenderTargetBinds++; }
+ int shaderCompilations() const { return fShaderCompilations; }
+ void incShaderCompilations() { fShaderCompilations++; }
+ int textureCreates() const { return fTextureCreates; }
+ void incTextureCreates() { fTextureCreates++; }
+ int textureUploads() const { return fTextureUploads; }
+ void incTextureUploads() { fTextureUploads++; }
+ int transfersToTexture() const { return fTransfersToTexture; }
+ void incTransfersToTexture() { fTransfersToTexture++; }
+ void incStencilAttachmentCreates() { fStencilAttachmentCreates++; }
+ void incNumDraws() { fNumDraws++; }
+ void incNumFailedDraws() { ++fNumFailedDraws; }
+ void dump(SkString*);
+ void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values);
+ int numDraws() const { return fNumDraws; }
+ int numFailedDraws() const { return fNumFailedDraws; }
+ private:
+ int fRenderTargetBinds;
+ int fShaderCompilations;
+ int fTextureCreates;
+ int fTextureUploads;
+ int fTransfersToTexture;
+ int fStencilAttachmentCreates;
+ int fNumDraws;
+ int fNumFailedDraws;
+#else
+ void dump(SkString*) {}
+ void dumpKeyValuePairs(SkTArray<SkString>*, SkTArray<double>*) {}
+ void incRenderTargetBinds() {}
+ void incShaderCompilations() {}
+ void incTextureCreates() {}
+ void incTextureUploads() {}
+ void incTransfersToTexture() {}
+ void incStencilAttachmentCreates() {}
+ void incNumDraws() {}
+ void incNumFailedDraws() {}
+#endif
+ };
+
+ Stats* stats() { return &fStats; }
+
+ /** Creates a texture directly in the backend API without wrapping it in a GrTexture. This is
+ only to be used for testing (particularly for testing the methods that import an externally
+ created texture into Skia. Must be matched with a call to deleteTestingOnlyTexture(). */
+ virtual GrBackendObject createTestingOnlyBackendTexture(void* pixels, int w, int h,
+ GrPixelConfig config,
+ bool isRenderTarget = false) = 0;
+ /** Check a handle represents an actual texture in the backend API that has not been freed. */
+ virtual bool isTestingOnlyBackendTexture(GrBackendObject) const = 0;
+ /** If ownership of the backend texture has been transferred pass true for abandonTexture. This
+ will do any necessary cleanup of the handle without freeing the texture in the backend
+ API. */
+ virtual void deleteTestingOnlyBackendTexture(GrBackendObject,
+ bool abandonTexture = false) = 0;
+
+ // width and height may be larger than rt (if underlying API allows it).
+ // Returns nullptr if compatible sb could not be created, otherwise the caller owns the ref on
+ // the GrStencilAttachment.
+ virtual GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget*,
+ int width,
+ int height) = 0;
+ // clears target's entire stencil buffer to 0
+ virtual void clearStencil(GrRenderTarget* target) = 0;
+
+ // draws an outline rectangle for debugging/visualization purposes.
+ virtual void drawDebugWireRect(GrRenderTarget*, const SkIRect&, GrColor) = 0;
+
+ // Determines whether a texture will need to be rescaled in order to be used with the
+ // GrTextureParams. This variation is called when the caller will create a new texture using the
+ // texture provider from a non-texture src (cpu-backed image, ...).
+ bool makeCopyForTextureParams(int width, int height, const GrTextureParams&,
+ GrTextureProducer::CopyParams*) const;
+
+ // Like the above but this variation should be called when the caller is not creating the
+ // original texture but rather was handed the original texture. It adds additional checks
+ // relevant to original textures that were created external to Skia via
+ // GrTextureProvider::wrap methods.
+ bool makeCopyForTextureParams(GrTexture* texture, const GrTextureParams& params,
+ GrTextureProducer::CopyParams* copyParams) const {
+ if (this->makeCopyForTextureParams(texture->width(), texture->height(), params,
+ copyParams)) {
+ return true;
+ }
+ return this->onMakeCopyForTextureParams(texture, params, copyParams);
+ }
+
+ // This is only to be used in GL-specific tests.
+ virtual const GrGLContext* glContextForTesting() const { return nullptr; }
+
+ // This is only to be used by testing code
+ virtual void resetShaderCacheForTesting() const {}
+
+ void handleDirtyContext() {
+ if (fResetBits) {
+ this->resetContext();
+ }
+ }
+
+protected:
+ static void ElevateDrawPreference(GrGpu::DrawPreference* preference,
+ GrGpu::DrawPreference elevation) {
+ GR_STATIC_ASSERT(GrGpu::kCallerPrefersDraw_DrawPreference > GrGpu::kNoDraw_DrawPreference);
+ GR_STATIC_ASSERT(GrGpu::kGpuPrefersDraw_DrawPreference >
+ GrGpu::kCallerPrefersDraw_DrawPreference);
+ GR_STATIC_ASSERT(GrGpu::kRequireDraw_DrawPreference >
+ GrGpu::kGpuPrefersDraw_DrawPreference);
+ *preference = SkTMax(*preference, elevation);
+ }
+
+ // Handles cases where a surface will be updated without a call to flushRenderTarget
+ void didWriteToSurface(GrSurface* surface, const SkIRect* bounds, uint32_t mipLevels = 1) const;
+
+ Stats fStats;
+ SkAutoTDelete<GrPathRendering> fPathRendering;
+ // Subclass must initialize this in its constructor.
+ SkAutoTUnref<const GrCaps> fCaps;
+
+ typedef SkTArray<SkPoint, true> SamplePattern;
+
+private:
+ // called when the 3D context state is unknown. Subclass should emit any
+ // assumed 3D context state and dirty any state cache.
+ virtual void onResetContext(uint32_t resetBits) = 0;
+
+ // Called before certain draws in order to guarantee coherent results from dst reads.
+ virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0;
+
+ // overridden by backend-specific derived class to create objects.
+ // Texture size and sample size will have already been validated in base class before
+ // onCreateTexture/CompressedTexture are called.
+ virtual GrTexture* onCreateTexture(const GrSurfaceDesc& desc,
+ SkBudgeted budgeted,
+ const SkTArray<GrMipLevel>& texels) = 0;
+ virtual GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc,
+ SkBudgeted budgeted,
+ const SkTArray<GrMipLevel>& texels) = 0;
+
+ virtual GrTexture* onWrapBackendTexture(const GrBackendTextureDesc&, GrWrapOwnership) = 0;
+ virtual GrRenderTarget* onWrapBackendRenderTarget(const GrBackendRenderTargetDesc&,
+ GrWrapOwnership) = 0;
+ virtual GrRenderTarget* onWrapBackendTextureAsRenderTarget(const GrBackendTextureDesc&) = 0;
+ virtual GrBuffer* onCreateBuffer(size_t size, GrBufferType intendedType, GrAccessPattern,
+ const void* data) = 0;
+
+ virtual gr_instanced::InstancedRendering* onCreateInstancedRendering() = 0;
+
+ virtual bool onMakeCopyForTextureParams(GrTexture* texture, const GrTextureParams&,
+ GrTextureProducer::CopyParams*) const { return false; }
+
+ virtual bool onGetReadPixelsInfo(GrSurface* srcSurface, int readWidth, int readHeight,
+ size_t rowBytes, GrPixelConfig readConfig, DrawPreference*,
+ ReadPixelTempDrawInfo*) = 0;
+ virtual bool onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
+ GrPixelConfig srcConfig, DrawPreference*,
+ WritePixelTempDrawInfo*) = 0;
+
+ // overridden by backend-specific derived class to perform the surface read
+ virtual bool onReadPixels(GrSurface*,
+ int left, int top,
+ int width, int height,
+ GrPixelConfig,
+ void* buffer,
+ size_t rowBytes) = 0;
+
+ // overridden by backend-specific derived class to perform the surface write
+ virtual bool onWritePixels(GrSurface*,
+ int left, int top, int width, int height,
+ GrPixelConfig config,
+ const SkTArray<GrMipLevel>& texels) = 0;
+
+ // overridden by backend-specific derived class to perform the surface write
+ virtual bool onTransferPixels(GrSurface*,
+ int left, int top, int width, int height,
+ GrPixelConfig config, GrBuffer* transferBuffer,
+ size_t offset, size_t rowBytes) = 0;
+
+ // overridden by backend-specific derived class to perform the resolve
+ virtual void onResolveRenderTarget(GrRenderTarget* target) = 0;
+
+ // overridden by backend specific derived class to perform the copy surface
+ virtual bool onCopySurface(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) = 0;
+
+ // overridden by backend specific derived class to perform the multisample queries
+ virtual void onGetMultisampleSpecs(GrRenderTarget*, const GrStencilSettings&,
+ int* effectiveSampleCnt, SamplePattern*) = 0;
+
+ void resetContext() {
+ this->onResetContext(fResetBits);
+ fResetBits = 0;
+ ++fResetTimestamp;
+ }
+
+ struct SamplePatternComparator {
+ bool operator()(const SamplePattern&, const SamplePattern&) const;
+ };
+
+ typedef std::map<SamplePattern, uint8_t, SamplePatternComparator> MultisampleSpecsIdMap;
+
+ ResetTimestamp fResetTimestamp;
+ uint32_t fResetBits;
+ MultisampleSpecsIdMap fMultisampleSpecsIdMap;
+ SkSTArray<1, MultisampleSpecs, true> fMultisampleSpecs;
+ // The context owns us, not vice-versa, so this ptr is not ref'ed by Gpu.
+ GrContext* fContext;
+
+ friend class GrPathRendering;
+ friend class gr_instanced::InstancedRendering;
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrGpuCommandBuffer.cpp b/gfx/skia/skia/src/gpu/GrGpuCommandBuffer.cpp
new file mode 100644
index 000000000..022c16696
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGpuCommandBuffer.cpp
@@ -0,0 +1,49 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrGpuCommandBuffer.h"
+
+#include "GrCaps.h"
+#include "GrFixedClip.h"
+#include "GrGpu.h"
+#include "GrPrimitiveProcessor.h"
+#include "GrRenderTarget.h"
+#include "SkRect.h"
+
+void GrGpuCommandBuffer::submit(const SkIRect& bounds) {
+ this->gpu()->handleDirtyContext();
+ this->onSubmit(bounds);
+}
+
+void GrGpuCommandBuffer::clear(const GrFixedClip& clip, GrColor color, GrRenderTarget* rt) {
+ SkASSERT(rt);
+ SkASSERT(!clip.scissorEnabled() ||
+ (SkIRect::MakeWH(rt->width(), rt->height()).contains(clip.scissorRect()) &&
+ SkIRect::MakeWH(rt->width(), rt->height()) != clip.scissorRect()));
+ this->onClear(rt, clip, color);
+}
+
+void GrGpuCommandBuffer::clearStencilClip(const GrFixedClip& clip,
+ bool insideStencilMask,
+ GrRenderTarget* rt) {
+ SkASSERT(rt);
+ this->onClearStencilClip(rt, clip, insideStencilMask);
+}
+
+
+bool GrGpuCommandBuffer::draw(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ const GrMesh* mesh,
+ int meshCount) {
+ if (primProc.numAttribs() > this->gpu()->caps()->maxVertexAttributes()) {
+ this->gpu()->stats()->incNumFailedDraws();
+ return false;
+ }
+ this->onDraw(pipeline, primProc, mesh, meshCount);
+ return true;
+}
+
diff --git a/gfx/skia/skia/src/gpu/GrGpuCommandBuffer.h b/gfx/skia/skia/src/gpu/GrGpuCommandBuffer.h
new file mode 100644
index 000000000..2336dc5aa
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGpuCommandBuffer.h
@@ -0,0 +1,98 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrGpuCommandBuffer_DEFINED
+#define GrGpuCommandBuffer_DEFINED
+
+#include "GrColor.h"
+
+class GrFixedClip;
+class GrGpu;
+class GrMesh;
+class GrPipeline;
+class GrPrimitiveProcessor;
+class GrRenderTarget;
+struct SkIRect;
+
+/**
+ * The GrGpuCommandBuffer is a series of commands (draws, clears, and discards), which all target
+ * the same render target. It is possible that these commands execute immediately (GL), or get
+ * buffered up for later execution (Vulkan). GrBatches will execute their draw commands into a
+ * GrGpuCommandBuffer.
+ */
+class GrGpuCommandBuffer {
+public:
+ enum class LoadOp {
+ kLoad,
+ kClear,
+ kDiscard,
+ };
+
+ enum class StoreOp {
+ kStore,
+ kDiscard,
+ };
+
+ struct LoadAndStoreInfo {
+ LoadOp fLoadOp;
+ StoreOp fStoreOp;
+ GrColor fClearColor;
+ };
+
+ GrGpuCommandBuffer() {}
+ virtual ~GrGpuCommandBuffer() {}
+
+ // Signals the end of recording to the command buffer and that it can now be submitted.
+ virtual void end() = 0;
+
+ // Sends the command buffer off to the GPU object to execute the commands built up in the
+ // buffer. The gpu object is allowed to defer execution of the commands until it is flushed.
+ // The bounds should represent the bounds of all the draws put into the command buffer.
+ void submit(const SkIRect& bounds);
+
+ // We pass in an array of meshCount GrMesh to the draw. The backend should loop over each
+ // GrMesh object and emit a draw for it. Each draw will use the same GrPipeline and
+ // GrPrimitiveProcessor. This may fail if the draw would exceed any resource limits (e.g.
+ // number of vertex attributes is too large).
+ bool draw(const GrPipeline&,
+ const GrPrimitiveProcessor&,
+ const GrMesh*,
+ int meshCount);
+
+ /**
+ * Clear the passed in render target. Ignores the draw state and clip.
+ */
+ void clear(const GrFixedClip&, GrColor, GrRenderTarget*);
+
+ void clearStencilClip(const GrFixedClip&, bool insideStencilMask, GrRenderTarget*);
+ /**
+ * Discards the contents render target. nullptr indicates that the current render target should
+ * be discarded.
+ **/
+ // TODO: This should be removed in the future to favor using the load and store ops for discard
+ virtual void discard(GrRenderTarget* = nullptr) = 0;
+
+private:
+ virtual GrGpu* gpu() = 0;
+ virtual void onSubmit(const SkIRect& bounds) = 0;
+
+ // overridden by backend-specific derived class to perform the draw call.
+ virtual void onDraw(const GrPipeline&,
+ const GrPrimitiveProcessor&,
+ const GrMesh*,
+ int meshCount) = 0;
+
+ // overridden by backend-specific derived class to perform the clear.
+ virtual void onClear(GrRenderTarget*, const GrFixedClip&, GrColor) = 0;
+
+ virtual void onClearStencilClip(GrRenderTarget*,
+ const GrFixedClip&,
+ bool insideStencilMask) = 0;
+
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrGpuFactory.cpp b/gfx/skia/skia/src/gpu/GrGpuFactory.cpp
new file mode 100644
index 000000000..c6134b65a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGpuFactory.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrGpuFactory.h"
+
+#include "GrGpu.h"
+#include "gl/GrGLConfig.h"
+#include "gl/GrGLGpu.h"
+#ifdef SK_VULKAN
+#include "vk/GrVkGpu.h"
+#endif
+
+static CreateGpuProc gGpuFactories[kBackendCount] = { GrGLGpu::Create, nullptr };
+
+#ifdef SK_VULKAN
+GrGpuFactoryRegistrar gVkGpuFactoryProc(kVulkan_GrBackend, GrVkGpu::Create);
+#endif
+
+GrGpuFactoryRegistrar::GrGpuFactoryRegistrar(int i, CreateGpuProc proc) {
+ gGpuFactories[i] = proc;
+}
+
+GrGpu* GrGpu::Create(GrBackend backend,
+ GrBackendContext backendContext,
+ const GrContextOptions& options,
+ GrContext* context) {
+ SkASSERT((int)backend < kBackendCount);
+ if (!gGpuFactories[backend]) {
+ return nullptr;
+ }
+ return (gGpuFactories[backend])(backendContext, options, context);
+}
diff --git a/gfx/skia/skia/src/gpu/GrGpuFactory.h b/gfx/skia/skia/src/gpu/GrGpuFactory.h
new file mode 100644
index 000000000..aecc2c170
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGpuFactory.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGpuFactory_DEFINED
+#define GrGpuFactory_DEFINED
+
+#include "GrTypes.h"
+
+class GrGpu;
+class GrContext;
+struct GrContextOptions;
+
+typedef GrGpu* (*CreateGpuProc)(GrBackendContext, const GrContextOptions& options, GrContext*);
+
+class GrGpuFactoryRegistrar {
+public:
+ GrGpuFactoryRegistrar(int i, CreateGpuProc proc);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrGpuResource.cpp b/gfx/skia/skia/src/gpu/GrGpuResource.cpp
new file mode 100644
index 000000000..c1578b50a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGpuResource.cpp
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGpuResource.h"
+#include "GrContext.h"
+#include "GrResourceCache.h"
+#include "GrGpu.h"
+#include "GrGpuResourcePriv.h"
+#include "SkTraceMemoryDump.h"
+
+static inline GrResourceCache* get_resource_cache(GrGpu* gpu) {
+ SkASSERT(gpu);
+ SkASSERT(gpu->getContext());
+ SkASSERT(gpu->getContext()->getResourceCache());
+ return gpu->getContext()->getResourceCache();
+}
+
+GrGpuResource::GrGpuResource(GrGpu* gpu)
+ : fExternalFlushCntWhenBecamePurgeable(0)
+ , fGpu(gpu)
+ , fGpuMemorySize(kInvalidGpuMemorySize)
+ , fBudgeted(SkBudgeted::kNo)
+ , fRefsWrappedObjects(false)
+ , fUniqueID(CreateUniqueID()) {
+ SkDEBUGCODE(fCacheArrayIndex = -1);
+}
+
+void GrGpuResource::registerWithCache(SkBudgeted budgeted) {
+ SkASSERT(fBudgeted == SkBudgeted::kNo);
+ fBudgeted = budgeted;
+ this->computeScratchKey(&fScratchKey);
+ get_resource_cache(fGpu)->resourceAccess().insertResource(this);
+}
+
+void GrGpuResource::registerWithCacheWrapped() {
+ SkASSERT(fBudgeted == SkBudgeted::kNo);
+ // Currently resources referencing wrapped objects are not budgeted.
+ fRefsWrappedObjects = true;
+ get_resource_cache(fGpu)->resourceAccess().insertResource(this);
+}
+
+GrGpuResource::~GrGpuResource() {
+ // The cache should have released or destroyed this resource.
+ SkASSERT(this->wasDestroyed());
+}
+
+void GrGpuResource::release() {
+ SkASSERT(fGpu);
+ this->onRelease();
+ get_resource_cache(fGpu)->resourceAccess().removeResource(this);
+ fGpu = nullptr;
+ fGpuMemorySize = 0;
+}
+
+void GrGpuResource::abandon() {
+ if (this->wasDestroyed()) {
+ return;
+ }
+ SkASSERT(fGpu);
+ this->onAbandon();
+ get_resource_cache(fGpu)->resourceAccess().removeResource(this);
+ fGpu = nullptr;
+ fGpuMemorySize = 0;
+}
+
+void GrGpuResource::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
+ // Dump resource as "skia/gpu_resources/resource_#".
+ SkString dumpName("skia/gpu_resources/resource_");
+ dumpName.appendS32(this->uniqueID());
+
+ traceMemoryDump->dumpNumericValue(dumpName.c_str(), "size", "bytes", this->gpuMemorySize());
+
+ if (this->isPurgeable()) {
+ traceMemoryDump->dumpNumericValue(dumpName.c_str(), "purgeable_size", "bytes",
+ this->gpuMemorySize());
+ }
+
+ // Call setMemoryBacking to allow sub-classes with implementation specific backings (such as GL
+ // objects) to provide additional information.
+ this->setMemoryBacking(traceMemoryDump, dumpName);
+}
+
+const GrContext* GrGpuResource::getContext() const {
+ if (fGpu) {
+ return fGpu->getContext();
+ } else {
+ return nullptr;
+ }
+}
+
+GrContext* GrGpuResource::getContext() {
+ if (fGpu) {
+ return fGpu->getContext();
+ } else {
+ return nullptr;
+ }
+}
+
+void GrGpuResource::didChangeGpuMemorySize() const {
+ if (this->wasDestroyed()) {
+ return;
+ }
+
+ size_t oldSize = fGpuMemorySize;
+ SkASSERT(kInvalidGpuMemorySize != oldSize);
+ fGpuMemorySize = kInvalidGpuMemorySize;
+ get_resource_cache(fGpu)->resourceAccess().didChangeGpuMemorySize(this, oldSize);
+}
+
+void GrGpuResource::removeUniqueKey() {
+ if (this->wasDestroyed()) {
+ return;
+ }
+ SkASSERT(fUniqueKey.isValid());
+ get_resource_cache(fGpu)->resourceAccess().removeUniqueKey(this);
+}
+
+void GrGpuResource::setUniqueKey(const GrUniqueKey& key) {
+ SkASSERT(this->internalHasRef());
+ SkASSERT(key.isValid());
+
+ // Wrapped and uncached resources can never have a unique key.
+ if (SkBudgeted::kNo == this->resourcePriv().isBudgeted()) {
+ return;
+ }
+
+ if (this->wasDestroyed()) {
+ return;
+ }
+
+ get_resource_cache(fGpu)->resourceAccess().changeUniqueKey(this, key);
+}
+
+void GrGpuResource::notifyAllCntsAreZero(CntType lastCntTypeToReachZero) const {
+ if (this->wasDestroyed()) {
+ // We've already been removed from the cache. Goodbye cruel world!
+ delete this;
+ return;
+ }
+
+ // We should have already handled this fully in notifyRefCntIsZero().
+ SkASSERT(kRef_CntType != lastCntTypeToReachZero);
+
+ GrGpuResource* mutableThis = const_cast<GrGpuResource*>(this);
+ static const uint32_t kFlag =
+ GrResourceCache::ResourceAccess::kAllCntsReachedZero_RefNotificationFlag;
+ get_resource_cache(fGpu)->resourceAccess().notifyCntReachedZero(mutableThis, kFlag);
+}
+
+bool GrGpuResource::notifyRefCountIsZero() const {
+ if (this->wasDestroyed()) {
+ // handle this in notifyAllCntsAreZero().
+ return true;
+ }
+
+ GrGpuResource* mutableThis = const_cast<GrGpuResource*>(this);
+ uint32_t flags =
+ GrResourceCache::ResourceAccess::kRefCntReachedZero_RefNotificationFlag;
+ if (!this->internalHasPendingIO()) {
+ flags |= GrResourceCache::ResourceAccess::kAllCntsReachedZero_RefNotificationFlag;
+ }
+ get_resource_cache(fGpu)->resourceAccess().notifyCntReachedZero(mutableThis, flags);
+
+ // There is no need to call our notifyAllCntsAreZero function at this point since we already
+ // told the cache about the state of cnts.
+ return false;
+}
+
+void GrGpuResource::removeScratchKey() {
+ if (!this->wasDestroyed() && fScratchKey.isValid()) {
+ get_resource_cache(fGpu)->resourceAccess().willRemoveScratchKey(this);
+ fScratchKey.reset();
+ }
+}
+
+void GrGpuResource::makeBudgeted() {
+ if (!this->wasDestroyed() && SkBudgeted::kNo == fBudgeted) {
+ // Currently resources referencing wrapped objects are not budgeted.
+ SkASSERT(!fRefsWrappedObjects);
+ fBudgeted = SkBudgeted::kYes;
+ get_resource_cache(fGpu)->resourceAccess().didChangeBudgetStatus(this);
+ }
+}
+
+void GrGpuResource::makeUnbudgeted() {
+ if (!this->wasDestroyed() && SkBudgeted::kYes == fBudgeted &&
+ !fUniqueKey.isValid()) {
+ fBudgeted = SkBudgeted::kNo;
+ get_resource_cache(fGpu)->resourceAccess().didChangeBudgetStatus(this);
+ }
+}
+
+uint32_t GrGpuResource::CreateUniqueID() {
+ static int32_t gUniqueID = SK_InvalidUniqueID;
+ uint32_t id;
+ do {
+ id = static_cast<uint32_t>(sk_atomic_inc(&gUniqueID) + 1);
+ } while (id == SK_InvalidUniqueID);
+ return id;
+}
diff --git a/gfx/skia/skia/src/gpu/GrGpuResourceCacheAccess.h b/gfx/skia/skia/src/gpu/GrGpuResourceCacheAccess.h
new file mode 100644
index 000000000..e91f899cf
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGpuResourceCacheAccess.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGpuResourceCacheAccess_DEFINED
+#define GrGpuResourceCacheAccess_DEFINED
+
+#include "GrGpuResource.h"
+#include "GrGpuResourcePriv.h"
+
+namespace skiatest {
+ class Reporter;
+}
+
+/**
+ * This class allows GrResourceCache increased privileged access to GrGpuResource objects.
+ */
+class GrGpuResource::CacheAccess {
+private:
+ /**
+ * Is the resource currently cached as scratch? This means it is cached, has a valid scratch
+ * key, and does not have a unique key.
+ */
+ bool isScratch() const {
+ return !fResource->getUniqueKey().isValid() && fResource->fScratchKey.isValid() &&
+ SkBudgeted::kYes == fResource->resourcePriv().isBudgeted();
+ }
+
+ /**
+ * Called by the cache to delete the resource under normal circumstances.
+ */
+ void release() {
+ fResource->release();
+ if (fResource->isPurgeable()) {
+ delete fResource;
+ }
+ }
+
+ /**
+ * Called by the cache to delete the resource when the backend 3D context is no longer valid.
+ */
+ void abandon() {
+ fResource->abandon();
+ if (fResource->isPurgeable()) {
+ delete fResource;
+ }
+ }
+
+ /** Called by the cache to assign a new unique key. */
+ void setUniqueKey(const GrUniqueKey& key) { fResource->fUniqueKey = key; }
+
+ /** Called by the cache to make the unique key invalid. */
+ void removeUniqueKey() { fResource->fUniqueKey.reset(); }
+
+ uint32_t timestamp() const { return fResource->fTimestamp; }
+ void setTimestamp(uint32_t ts) { fResource->fTimestamp = ts; }
+
+ /** Called by the cache to record when this became purgeable. */
+ void setFlushCntWhenResourceBecamePurgeable(uint32_t cnt) {
+ SkASSERT(fResource->isPurgeable());
+ fResource->fExternalFlushCntWhenBecamePurgeable = cnt;
+ }
+ /**
+ * Called by the cache to determine whether this resource has been puregable for more than
+ * a threshold number of external flushes.
+ */
+ uint32_t flushCntWhenResourceBecamePurgeable() {
+ SkASSERT(fResource->isPurgeable());
+ return fResource->fExternalFlushCntWhenBecamePurgeable;
+ }
+
+ int* accessCacheIndex() const { return &fResource->fCacheArrayIndex; }
+
+ CacheAccess(GrGpuResource* resource) : fResource(resource) {}
+ CacheAccess(const CacheAccess& that) : fResource(that.fResource) {}
+ CacheAccess& operator=(const CacheAccess&); // unimpl
+
+ // No taking addresses of this type.
+ const CacheAccess* operator&() const;
+ CacheAccess* operator&();
+
+ GrGpuResource* fResource;
+
+ friend class GrGpuResource; // to construct/copy this type.
+ friend class GrResourceCache; // to use this type
+ friend void test_unbudgeted_to_scratch(skiatest::Reporter* reporter); // for unit testing
+};
+
+inline GrGpuResource::CacheAccess GrGpuResource::cacheAccess() { return CacheAccess(this); }
+
+inline const GrGpuResource::CacheAccess GrGpuResource::cacheAccess() const {
+ return CacheAccess(const_cast<GrGpuResource*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrGpuResourcePriv.h b/gfx/skia/skia/src/gpu/GrGpuResourcePriv.h
new file mode 100644
index 000000000..82bf072db
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGpuResourcePriv.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGpuResourcePriv_DEFINED
+#define GrGpuResourcePriv_DEFINED
+
+#include "GrGpuResource.h"
+
+/**
+ * This class allows code internal to Skia privileged access to manage the cache keys and budget
+ * status of a GrGpuResource object.
+ */
+class GrGpuResource::ResourcePriv {
+public:
+ /**
+ * Sets a unique key for the resource. If the resource was previously cached as scratch it will
+ * be converted to a uniquely-keyed resource. If the key is invalid then this is equivalent to
+ * removeUniqueKey(). If another resource is using the key then its unique key is removed and
+ * this resource takes over the key.
+ */
+ void setUniqueKey(const GrUniqueKey& key) { fResource->setUniqueKey(key); }
+
+ /** Removes the unique key from a resource. If the resource has a scratch key, it may be
+ preserved for recycling as scratch. */
+ void removeUniqueKey() { fResource->removeUniqueKey(); }
+
+ /**
+ * If the resource is uncached make it cached. Has no effect on resources that are wrapped or
+ * already cached.
+ */
+ void makeBudgeted() { fResource->makeBudgeted(); }
+
+ /**
+ * If the resource is cached make it uncached. Has no effect on resources that are wrapped or
+ * already uncached. Furthermore, resources with unique keys cannot be made unbudgeted.
+ */
+ void makeUnbudgeted() { fResource->makeUnbudgeted(); }
+
+ /**
+ * Does the resource count against the resource budget?
+ */
+ SkBudgeted isBudgeted() const {
+ bool ret = SkBudgeted::kYes == fResource->fBudgeted;
+ SkASSERT(ret || !fResource->getUniqueKey().isValid());
+ return SkBudgeted(ret);
+ }
+
+ /**
+ * Is the resource object wrapping an externally allocated GPU resource?
+ */
+ bool refsWrappedObjects() const { return fResource->fRefsWrappedObjects; }
+
+ /**
+ * If this resource can be used as a scratch resource this returns a valid scratch key.
+ * Otherwise it returns a key for which isNullScratch is true. The resource may currently be
+ * used as a uniquely keyed resource rather than scratch. Check isScratch().
+ */
+ const GrScratchKey& getScratchKey() const { return fResource->fScratchKey; }
+
+ /**
+ * If the resource has a scratch key, the key will be removed. Since scratch keys are installed
+ * at resource creation time, this means the resource will never again be used as scratch.
+ */
+ void removeScratchKey() const { fResource->removeScratchKey(); }
+
+protected:
+ ResourcePriv(GrGpuResource* resource) : fResource(resource) { }
+ ResourcePriv(const ResourcePriv& that) : fResource(that.fResource) {}
+ ResourcePriv& operator=(const CacheAccess&); // unimpl
+
+ // No taking addresses of this type.
+ const ResourcePriv* operator&() const;
+ ResourcePriv* operator&();
+
+ GrGpuResource* fResource;
+
+ friend class GrGpuResource; // to construct/copy this type.
+};
+
+inline GrGpuResource::ResourcePriv GrGpuResource::resourcePriv() { return ResourcePriv(this); }
+
+inline const GrGpuResource::ResourcePriv GrGpuResource::resourcePriv() const {
+ return ResourcePriv(const_cast<GrGpuResource*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrGpuResourceRef.cpp b/gfx/skia/skia/src/gpu/GrGpuResourceRef.cpp
new file mode 100644
index 000000000..405679d4f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrGpuResourceRef.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGpuResourceRef.h"
+
+GrGpuResourceRef::GrGpuResourceRef() {
+ fResource = nullptr;
+ fOwnRef = false;
+ fPendingIO = false;
+}
+
+GrGpuResourceRef::GrGpuResourceRef(GrGpuResource* resource, GrIOType ioType) {
+ fResource = nullptr;
+ fOwnRef = false;
+ fPendingIO = false;
+ this->setResource(resource, ioType);
+}
+
+GrGpuResourceRef::~GrGpuResourceRef() {
+ if (fOwnRef) {
+ SkASSERT(fResource);
+ fResource->unref();
+ }
+ if (fPendingIO) {
+ switch (fIOType) {
+ case kRead_GrIOType:
+ fResource->completedRead();
+ break;
+ case kWrite_GrIOType:
+ fResource->completedWrite();
+ break;
+ case kRW_GrIOType:
+ fResource->completedRead();
+ fResource->completedWrite();
+ break;
+ }
+ }
+}
+
+void GrGpuResourceRef::reset() {
+ SkASSERT(!fPendingIO);
+ SkASSERT(SkToBool(fResource) == fOwnRef);
+ if (fOwnRef) {
+ fResource->unref();
+ fOwnRef = false;
+ fResource = nullptr;
+ }
+}
+
+void GrGpuResourceRef::setResource(GrGpuResource* resource, GrIOType ioType) {
+ SkASSERT(!fPendingIO);
+ SkASSERT(SkToBool(fResource) == fOwnRef);
+ SkSafeUnref(fResource);
+ if (nullptr == resource) {
+ fResource = nullptr;
+ fOwnRef = false;
+ } else {
+ fResource = resource;
+ fOwnRef = true;
+ fIOType = ioType;
+ }
+}
+
+void GrGpuResourceRef::markPendingIO() const {
+ // This should only be called when the owning GrProgramElement gets its first
+ // pendingExecution ref.
+ SkASSERT(!fPendingIO);
+ SkASSERT(fResource);
+ fPendingIO = true;
+ switch (fIOType) {
+ case kRead_GrIOType:
+ fResource->addPendingRead();
+ break;
+ case kWrite_GrIOType:
+ fResource->addPendingWrite();
+ break;
+ case kRW_GrIOType:
+ fResource->addPendingRead();
+ fResource->addPendingWrite();
+ break;
+ }
+}
+
+void GrGpuResourceRef::pendingIOComplete() const {
+ // This should only be called when the owner's pending executions have ocurred but it is still
+ // reffed.
+ SkASSERT(fOwnRef);
+ SkASSERT(fPendingIO);
+ switch (fIOType) {
+ case kRead_GrIOType:
+ fResource->completedRead();
+ break;
+ case kWrite_GrIOType:
+ fResource->completedWrite();
+ break;
+ case kRW_GrIOType:
+ fResource->completedRead();
+ fResource->completedWrite();
+ break;
+
+ }
+ fPendingIO = false;
+}
+
+void GrGpuResourceRef::removeRef() const {
+ // This should only be called once, when the owners last ref goes away and
+ // there is a pending execution.
+ SkASSERT(fOwnRef);
+ SkASSERT(fPendingIO);
+ SkASSERT(fResource);
+ fResource->unref();
+ fOwnRef = false;
+}
diff --git a/gfx/skia/skia/src/gpu/GrImageIDTextureAdjuster.cpp b/gfx/skia/skia/src/gpu/GrImageIDTextureAdjuster.cpp
new file mode 100644
index 000000000..5a96b6d3e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrImageIDTextureAdjuster.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrImageIDTextureAdjuster.h"
+
+#include "GrContext.h"
+#include "GrGpuResourcePriv.h"
+#include "SkBitmap.h"
+#include "SkGrPriv.h"
+#include "SkImage_Base.h"
+#include "SkImageCacherator.h"
+#include "SkPixelRef.h"
+
+static bool bmp_is_alpha_only(const SkBitmap& bm) { return kAlpha_8_SkColorType == bm.colorType(); }
+
+GrBitmapTextureMaker::GrBitmapTextureMaker(GrContext* context, const SkBitmap& bitmap)
+ : INHERITED(context, bitmap.width(), bitmap.height(), bmp_is_alpha_only(bitmap))
+ , fBitmap(bitmap)
+{
+ if (!bitmap.isVolatile()) {
+ SkIPoint origin = bitmap.pixelRefOrigin();
+ SkIRect subset = SkIRect::MakeXYWH(origin.fX, origin.fY, bitmap.width(),
+ bitmap.height());
+ GrMakeKeyFromImageID(&fOriginalKey, bitmap.pixelRef()->getGenerationID(), subset);
+ }
+}
+
+GrTexture* GrBitmapTextureMaker::refOriginalTexture(bool willBeMipped,
+ SkSourceGammaTreatment gammaTreatment) {
+ GrTexture* tex = nullptr;
+
+ if (fOriginalKey.isValid()) {
+ tex = this->context()->textureProvider()->findAndRefTextureByUniqueKey(fOriginalKey);
+ if (tex) {
+ return tex;
+ }
+ }
+ if (willBeMipped) {
+ tex = GrGenerateMipMapsAndUploadToTexture(this->context(), fBitmap, gammaTreatment);
+ }
+ if (!tex) {
+ tex = GrUploadBitmapToTexture(this->context(), fBitmap);
+ }
+ if (tex && fOriginalKey.isValid()) {
+ tex->resourcePriv().setUniqueKey(fOriginalKey);
+ GrInstallBitmapUniqueKeyInvalidator(fOriginalKey, fBitmap.pixelRef());
+ }
+ return tex;
+}
+
+void GrBitmapTextureMaker::makeCopyKey(const CopyParams& copyParams, GrUniqueKey* copyKey) {
+ if (fOriginalKey.isValid()) {
+ MakeCopyKeyFromOrigKey(fOriginalKey, copyParams, copyKey);
+ }
+}
+
+void GrBitmapTextureMaker::didCacheCopy(const GrUniqueKey& copyKey) {
+ GrInstallBitmapUniqueKeyInvalidator(copyKey, fBitmap.pixelRef());
+}
+
+SkAlphaType GrBitmapTextureMaker::alphaType() const {
+ return fBitmap.alphaType();
+}
+
+SkColorSpace* GrBitmapTextureMaker::getColorSpace() {
+ return fBitmap.colorSpace();
+}
+
+//////////////////////////////////////////////////////////////////////////////
+static bool cacher_is_alpha_only(const SkImageCacherator& cacher) {
+ return kAlpha_8_SkColorType == cacher.info().colorType();
+}
+GrImageTextureMaker::GrImageTextureMaker(GrContext* context, SkImageCacherator* cacher,
+ const SkImage* client, SkImage::CachingHint chint)
+ : INHERITED(context, cacher->info().width(), cacher->info().height(),
+ cacher_is_alpha_only(*cacher))
+ , fCacher(cacher)
+ , fClient(client)
+ , fCachingHint(chint) {
+ if (client) {
+ GrMakeKeyFromImageID(&fOriginalKey, client->uniqueID(),
+ SkIRect::MakeWH(this->width(), this->height()));
+ }
+}
+
+GrTexture* GrImageTextureMaker::refOriginalTexture(bool willBeMipped,
+ SkSourceGammaTreatment gammaTreatment) {
+ return fCacher->lockTexture(this->context(), fOriginalKey, fClient, fCachingHint, willBeMipped,
+ gammaTreatment);
+}
+
+void GrImageTextureMaker::makeCopyKey(const CopyParams& stretch, GrUniqueKey* paramsCopyKey) {
+ if (fOriginalKey.isValid() && SkImage::kAllow_CachingHint == fCachingHint) {
+ MakeCopyKeyFromOrigKey(fOriginalKey, stretch, paramsCopyKey);
+ }
+}
+
+void GrImageTextureMaker::didCacheCopy(const GrUniqueKey& copyKey) {
+ if (fClient) {
+ as_IB(fClient)->notifyAddedToCache();
+ }
+}
+
+SkAlphaType GrImageTextureMaker::alphaType() const {
+ return fCacher->info().alphaType();
+}
+
+SkColorSpace* GrImageTextureMaker::getColorSpace() {
+ return fCacher->info().colorSpace();
+}
diff --git a/gfx/skia/skia/src/gpu/GrImageIDTextureAdjuster.h b/gfx/skia/skia/src/gpu/GrImageIDTextureAdjuster.h
new file mode 100644
index 000000000..36ac0ad3e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrImageIDTextureAdjuster.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrImageIDTextureAdjuster_DEFINED
+#define GrImageIDTextureAdjuster_DEFINED
+
+#include "GrTextureParamsAdjuster.h"
+#include "SkImage.h"
+
+class SkBitmap;
+class SkImage_Base;
+class SkImageCacherator;
+
+/** This class manages the conversion of SW-backed bitmaps to GrTextures. If the input bitmap is
+ non-volatile the texture is cached using a key created from the pixels' image id and the
+ subset of the pixelref specified by the bitmap. */
+class GrBitmapTextureMaker : public GrTextureMaker {
+public:
+ GrBitmapTextureMaker(GrContext* context, const SkBitmap& bitmap);
+
+protected:
+ GrTexture* refOriginalTexture(bool willBeMipped, SkSourceGammaTreatment) override;
+
+ void makeCopyKey(const CopyParams& copyParams, GrUniqueKey* copyKey) override;
+
+ void didCacheCopy(const GrUniqueKey& copyKey) override;
+
+ SkAlphaType alphaType() const override;
+ SkColorSpace* getColorSpace() override;
+
+private:
+ const SkBitmap fBitmap;
+ GrUniqueKey fOriginalKey;
+
+ typedef GrTextureMaker INHERITED;
+};
+
+/** This class manages the conversion of generator-backed images to GrTextures. If the caching hint
+ is kAllow the image's ID is used for the cache key. */
+class GrImageTextureMaker : public GrTextureMaker {
+public:
+ GrImageTextureMaker(GrContext* context, SkImageCacherator* cacher, const SkImage* client,
+ SkImage::CachingHint chint);
+
+protected:
+ // TODO: consider overriding this, for the case where the underlying generator might be
+ // able to efficiently produce a "stretched" texture natively (e.g. picture-backed)
+ // GrTexture* generateTextureForParams(const CopyParams&) override;
+
+ GrTexture* refOriginalTexture(bool willBeMipped, SkSourceGammaTreatment) override;
+ void makeCopyKey(const CopyParams& stretch, GrUniqueKey* paramsCopyKey) override;
+ void didCacheCopy(const GrUniqueKey& copyKey) override;
+
+ SkAlphaType alphaType() const override;
+ SkColorSpace* getColorSpace() override;
+
+private:
+ SkImageCacherator* fCacher;
+ const SkImage* fClient;
+ GrUniqueKey fOriginalKey;
+ SkImage::CachingHint fCachingHint;
+
+ typedef GrTextureMaker INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrInvariantOutput.cpp b/gfx/skia/skia/src/gpu/GrInvariantOutput.cpp
new file mode 100644
index 000000000..ee64d333a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrInvariantOutput.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrInvariantOutput.h"
+
+#ifdef SK_DEBUG
+
+void GrInvariantOutput::validate() const {
+ if (fIsSingleComponent) {
+ SkASSERT(0 == fValidFlags || kRGBA_GrColorComponentFlags == fValidFlags);
+ if (kRGBA_GrColorComponentFlags == fValidFlags) {
+ SkASSERT(this->colorComponentsAllEqual());
+ }
+ }
+
+ // If we claim that we are not using the input color we must not be modulating the input.
+ SkASSERT(fNonMulStageFound || fWillUseInputColor);
+}
+
+bool GrInvariantOutput::colorComponentsAllEqual() const {
+ unsigned colorA = GrColorUnpackA(fColor);
+ return(GrColorUnpackR(fColor) == colorA &&
+ GrColorUnpackG(fColor) == colorA &&
+ GrColorUnpackB(fColor) == colorA);
+}
+
+#endif // end DEBUG
diff --git a/gfx/skia/skia/src/gpu/GrMemoryPool.cpp b/gfx/skia/skia/src/gpu/GrMemoryPool.cpp
new file mode 100644
index 000000000..6bc0f5460
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrMemoryPool.cpp
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrMemoryPool.h"
+
+#ifdef SK_DEBUG
+ #define VALIDATE this->validate()
+#else
+ #define VALIDATE
+#endif
+
+GrMemoryPool::GrMemoryPool(size_t preallocSize, size_t minAllocSize) {
+ SkDEBUGCODE(fAllocationCnt = 0);
+ SkDEBUGCODE(fAllocBlockCnt = 0);
+
+ minAllocSize = SkTMax<size_t>(minAllocSize, 1 << 10);
+ fMinAllocSize = GrSizeAlignUp(minAllocSize + kPerAllocPad, kAlignment);
+ fPreallocSize = GrSizeAlignUp(preallocSize + kPerAllocPad, kAlignment);
+ fPreallocSize = SkTMax(fPreallocSize, fMinAllocSize);
+ fSize = 0;
+
+ fHead = CreateBlock(fPreallocSize);
+ fTail = fHead;
+ fHead->fNext = nullptr;
+ fHead->fPrev = nullptr;
+ VALIDATE;
+};
+
+GrMemoryPool::~GrMemoryPool() {
+ VALIDATE;
+ SkASSERT(0 == fAllocationCnt);
+ SkASSERT(fHead == fTail);
+ SkASSERT(0 == fHead->fLiveCount);
+ DeleteBlock(fHead);
+};
+
+void* GrMemoryPool::allocate(size_t size) {
+ VALIDATE;
+ size += kPerAllocPad;
+ size = GrSizeAlignUp(size, kAlignment);
+ if (fTail->fFreeSize < size) {
+ size_t blockSize = size;
+ blockSize = SkTMax<size_t>(blockSize, fMinAllocSize);
+ BlockHeader* block = CreateBlock(blockSize);
+
+ block->fPrev = fTail;
+ block->fNext = nullptr;
+ SkASSERT(nullptr == fTail->fNext);
+ fTail->fNext = block;
+ fTail = block;
+ fSize += block->fSize;
+ SkDEBUGCODE(++fAllocBlockCnt);
+ }
+ SkASSERT(kAssignedMarker == fTail->fBlockSentinal);
+ SkASSERT(fTail->fFreeSize >= size);
+ intptr_t ptr = fTail->fCurrPtr;
+ // We stash a pointer to the block header, just before the allocated space,
+ // so that we can decrement the live count on delete in constant time.
+ AllocHeader* allocData = reinterpret_cast<AllocHeader*>(ptr);
+ SkDEBUGCODE(allocData->fSentinal = kAssignedMarker);
+ allocData->fHeader = fTail;
+ ptr += kPerAllocPad;
+ fTail->fPrevPtr = fTail->fCurrPtr;
+ fTail->fCurrPtr += size;
+ fTail->fFreeSize -= size;
+ fTail->fLiveCount += 1;
+
+ SkDEBUGCODE(++fAllocationCnt);
+ VALIDATE;
+ return reinterpret_cast<void*>(ptr);
+}
+
+void GrMemoryPool::release(void* p) {
+ VALIDATE;
+ intptr_t ptr = reinterpret_cast<intptr_t>(p) - kPerAllocPad;
+ AllocHeader* allocData = reinterpret_cast<AllocHeader*>(ptr);
+ SkASSERT(kAssignedMarker == allocData->fSentinal);
+ SkDEBUGCODE(allocData->fSentinal = kFreedMarker);
+ BlockHeader* block = allocData->fHeader;
+ SkASSERT(kAssignedMarker == block->fBlockSentinal);
+ if (1 == block->fLiveCount) {
+ // the head block is special, it is reset rather than deleted
+ if (fHead == block) {
+ fHead->fCurrPtr = reinterpret_cast<intptr_t>(fHead) + kHeaderSize;
+ fHead->fLiveCount = 0;
+ fHead->fFreeSize = fPreallocSize;
+ } else {
+ BlockHeader* prev = block->fPrev;
+ BlockHeader* next = block->fNext;
+ SkASSERT(prev);
+ prev->fNext = next;
+ if (next) {
+ next->fPrev = prev;
+ } else {
+ SkASSERT(fTail == block);
+ fTail = prev;
+ }
+ fSize -= block->fSize;
+ DeleteBlock(block);
+ SkDEBUGCODE(fAllocBlockCnt--);
+ }
+ } else {
+ --block->fLiveCount;
+ // Trivial reclaim: if we're releasing the most recent allocation, reuse it
+ if (block->fPrevPtr == ptr) {
+ block->fFreeSize += (block->fCurrPtr - block->fPrevPtr);
+ block->fCurrPtr = block->fPrevPtr;
+ }
+ }
+ SkDEBUGCODE(--fAllocationCnt);
+ VALIDATE;
+}
+
+GrMemoryPool::BlockHeader* GrMemoryPool::CreateBlock(size_t size) {
+ size_t paddedSize = size + kHeaderSize;
+ BlockHeader* block =
+ reinterpret_cast<BlockHeader*>(sk_malloc_throw(paddedSize));
+ // we assume malloc gives us aligned memory
+ SkASSERT(!(reinterpret_cast<intptr_t>(block) % kAlignment));
+ SkDEBUGCODE(block->fBlockSentinal = kAssignedMarker);
+ block->fLiveCount = 0;
+ block->fFreeSize = size;
+ block->fCurrPtr = reinterpret_cast<intptr_t>(block) + kHeaderSize;
+ block->fPrevPtr = 0; // gcc warns on assigning nullptr to an intptr_t.
+ block->fSize = paddedSize;
+ return block;
+}
+
+void GrMemoryPool::DeleteBlock(BlockHeader* block) {
+ SkASSERT(kAssignedMarker == block->fBlockSentinal);
+ SkDEBUGCODE(block->fBlockSentinal = kFreedMarker); // FWIW
+ sk_free(block);
+}
+
+void GrMemoryPool::validate() {
+#ifdef SK_DEBUG
+ BlockHeader* block = fHead;
+ BlockHeader* prev = nullptr;
+ SkASSERT(block);
+ int allocCount = 0;
+ do {
+ SkASSERT(kAssignedMarker == block->fBlockSentinal);
+ allocCount += block->fLiveCount;
+ SkASSERT(prev == block->fPrev);
+ if (prev) {
+ SkASSERT(prev->fNext == block);
+ }
+
+ intptr_t b = reinterpret_cast<intptr_t>(block);
+ size_t ptrOffset = block->fCurrPtr - b;
+ size_t totalSize = ptrOffset + block->fFreeSize;
+ size_t userSize = totalSize - kHeaderSize;
+ intptr_t userStart = b + kHeaderSize;
+
+ SkASSERT(!(b % kAlignment));
+ SkASSERT(!(totalSize % kAlignment));
+ SkASSERT(!(userSize % kAlignment));
+ SkASSERT(!(block->fCurrPtr % kAlignment));
+ if (fHead != block) {
+ SkASSERT(block->fLiveCount);
+ SkASSERT(userSize >= fMinAllocSize);
+ } else {
+ SkASSERT(userSize == fPreallocSize);
+ }
+ if (!block->fLiveCount) {
+ SkASSERT(ptrOffset == kHeaderSize);
+ SkASSERT(userStart == block->fCurrPtr);
+ } else {
+ AllocHeader* allocData = reinterpret_cast<AllocHeader*>(userStart);
+ SkASSERT(allocData->fSentinal == kAssignedMarker ||
+ allocData->fSentinal == kFreedMarker);
+ SkASSERT(block == allocData->fHeader);
+ }
+
+ prev = block;
+ } while ((block = block->fNext));
+ SkASSERT(allocCount == fAllocationCnt);
+ SkASSERT(prev == fTail);
+ SkASSERT(fAllocBlockCnt != 0 || fSize == 0);
+#endif
+}
diff --git a/gfx/skia/skia/src/gpu/GrMemoryPool.h b/gfx/skia/skia/src/gpu/GrMemoryPool.h
new file mode 100644
index 000000000..43826d354
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrMemoryPool.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMemoryPool_DEFINED
+#define GrMemoryPool_DEFINED
+
+#include "GrTypes.h"
+
+/**
+ * Allocates memory in blocks and parcels out space in the blocks for allocation
+ * requests. It is optimized for allocate / release speed over memory
+ * effeciency. The interface is designed to be used to implement operator new
+ * and delete overrides. All allocations are expected to be released before the
+ * pool's destructor is called. Allocations will be 8-byte aligned.
+ */
+class GrMemoryPool {
+public:
+ /**
+ * Prealloc size is the amount of space to make available at pool creation
+ * time and keep around until pool destruction. The min alloc size is the
+ * smallest allowed size of additional allocations.
+ */
+ GrMemoryPool(size_t preallocSize, size_t minAllocSize);
+
+ ~GrMemoryPool();
+
+ /**
+ * Allocates memory. The memory must be freed with release().
+ */
+ void* allocate(size_t size);
+
+ /**
+ * p must have been returned by allocate()
+ */
+ void release(void* p);
+
+ /**
+ * Returns true if there are no unreleased allocations.
+ */
+ bool isEmpty() const { return fTail == fHead && !fHead->fLiveCount; }
+
+ /**
+ * Returns the total allocated size of the GrMemoryPool minus any preallocated amount
+ */
+ size_t size() const { return fSize; }
+
+private:
+ struct BlockHeader;
+
+ static BlockHeader* CreateBlock(size_t size);
+
+ static void DeleteBlock(BlockHeader* block);
+
+ void validate();
+
+ struct BlockHeader {
+#ifdef SK_DEBUG
+ uint32_t fBlockSentinal; ///< known value to check for bad back pointers to blocks
+#endif
+ BlockHeader* fNext; ///< doubly-linked list of blocks.
+ BlockHeader* fPrev;
+ int fLiveCount; ///< number of outstanding allocations in the
+ ///< block.
+ intptr_t fCurrPtr; ///< ptr to the start of blocks free space.
+ intptr_t fPrevPtr; ///< ptr to the last allocation made
+ size_t fFreeSize; ///< amount of free space left in the block.
+ size_t fSize; ///< total allocated size of the block
+ };
+
+ static const uint32_t kAssignedMarker = 0xCDCDCDCD;
+ static const uint32_t kFreedMarker = 0xEFEFEFEF;
+
+ struct AllocHeader {
+#ifdef SK_DEBUG
+ uint32_t fSentinal; ///< known value to check for memory stomping (e.g., (CD)*)
+#endif
+ BlockHeader* fHeader; ///< pointer back to the block header in which an alloc resides
+ };
+
+ enum {
+ // We assume this alignment is good enough for everybody.
+ kAlignment = 8,
+ kHeaderSize = GR_CT_ALIGN_UP(sizeof(BlockHeader), kAlignment),
+ kPerAllocPad = GR_CT_ALIGN_UP(sizeof(AllocHeader), kAlignment),
+ };
+ size_t fSize;
+ size_t fPreallocSize;
+ size_t fMinAllocSize;
+ BlockHeader* fHead;
+ BlockHeader* fTail;
+#ifdef SK_DEBUG
+ int fAllocationCnt;
+ int fAllocBlockCnt;
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrMesh.h b/gfx/skia/skia/src/gpu/GrMesh.h
new file mode 100644
index 000000000..964e0b4a8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrMesh.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMesh_DEFINED
+#define GrMesh_DEFINED
+
+#include "GrBuffer.h"
+#include "GrGpuResourceRef.h"
+
+class GrNonInstancedMesh {
+public:
+ GrPrimitiveType primitiveType() const { return fPrimitiveType; }
+ int startVertex() const { return fStartVertex; }
+ int startIndex() const { return fStartIndex; }
+ int vertexCount() const { return fVertexCount; }
+ int indexCount() const { return fIndexCount; }
+ bool isIndexed() const { return fIndexCount > 0; }
+
+ const GrBuffer* vertexBuffer() const { return fVertexBuffer.get(); }
+ const GrBuffer* indexBuffer() const { return fIndexBuffer.get(); }
+
+protected:
+ GrPrimitiveType fPrimitiveType;
+ int fStartVertex;
+ int fStartIndex;
+ int fVertexCount;
+ int fIndexCount;
+ GrPendingIOResource<const GrBuffer, kRead_GrIOType> fVertexBuffer;
+ GrPendingIOResource<const GrBuffer, kRead_GrIOType> fIndexBuffer;
+ friend class GrMesh;
+};
+
+/**
+ * Used to communicate index and vertex buffers, counts, and offsets for a draw from GrBatch to
+ * GrGpu. It also holds the primitive type for the draw. TODO: Consider moving ownership of this
+ * and draw-issuing responsibility to GrPrimitiveProcessor. The rest of the vertex info lives there
+ * already (stride, attribute mappings).
+ */
+class GrMesh : public GrNonInstancedMesh {
+public:
+ GrMesh() {}
+ GrMesh(const GrMesh& di) { (*this) = di; }
+ GrMesh& operator =(const GrMesh& di);
+
+ void init(GrPrimitiveType primType, const GrBuffer* vertexBuffer, int startVertex,
+ int vertexCount) {
+ SkASSERT(vertexBuffer);
+ SkASSERT(vertexCount);
+ SkASSERT(startVertex >= 0);
+ fPrimitiveType = primType;
+ fVertexBuffer.reset(vertexBuffer);
+ fIndexBuffer.reset(nullptr);
+ fStartVertex = startVertex;
+ fStartIndex = 0;
+ fVertexCount = vertexCount;
+ fIndexCount = 0;
+ fInstanceCount = 0;
+ fVerticesPerInstance = 0;
+ fIndicesPerInstance = 0;
+ fMaxInstancesPerDraw = 0;
+ }
+
+ void initIndexed(GrPrimitiveType primType,
+ const GrBuffer* vertexBuffer,
+ const GrBuffer* indexBuffer,
+ int startVertex,
+ int startIndex,
+ int vertexCount,
+ int indexCount) {
+ SkASSERT(indexBuffer);
+ SkASSERT(vertexBuffer);
+ SkASSERT(indexCount);
+ SkASSERT(vertexCount);
+ SkASSERT(startIndex >= 0);
+ SkASSERT(startVertex >= 0);
+ fPrimitiveType = primType;
+ fVertexBuffer.reset(vertexBuffer);
+ fIndexBuffer.reset(indexBuffer);
+ fStartVertex = startVertex;
+ fStartIndex = startIndex;
+ fVertexCount = vertexCount;
+ fIndexCount = indexCount;
+ fInstanceCount = 0;
+ fVerticesPerInstance = 0;
+ fIndicesPerInstance = 0;
+ fMaxInstancesPerDraw = 0;
+ }
+
+
+ /** Variation of the above that may be used when the total number of instances may exceed
+ the number of instances supported by the index buffer. To be used with
+ nextInstances() to draw in max-sized batches.*/
+ void initInstanced(GrPrimitiveType primType,
+ const GrBuffer* vertexBuffer,
+ const GrBuffer* indexBuffer,
+ int startVertex,
+ int verticesPerInstance,
+ int indicesPerInstance,
+ int instanceCount,
+ int maxInstancesPerDraw) {
+ SkASSERT(vertexBuffer);
+ SkASSERT(indexBuffer);
+ SkASSERT(instanceCount);
+ SkASSERT(verticesPerInstance);
+ SkASSERT(indicesPerInstance);
+ SkASSERT(startVertex >= 0);
+ fPrimitiveType = primType;
+ fVertexBuffer.reset(vertexBuffer);
+ fIndexBuffer.reset(indexBuffer);
+ fStartVertex = startVertex;
+ fStartIndex = 0;
+ fVerticesPerInstance = verticesPerInstance;
+ fIndicesPerInstance = indicesPerInstance;
+ fInstanceCount = instanceCount;
+ fVertexCount = instanceCount * fVerticesPerInstance;
+ fIndexCount = instanceCount * fIndicesPerInstance;
+ fMaxInstancesPerDraw = maxInstancesPerDraw;
+ }
+
+
+ /** These return 0 if initInstanced was not used to initialize the GrVertices. */
+ int verticesPerInstance() const { return fVerticesPerInstance; }
+ int indicesPerInstance() const { return fIndicesPerInstance; }
+ int instanceCount() const { return fInstanceCount; }
+
+ bool isInstanced() const { return fInstanceCount > 0; }
+
+ class Iterator {
+ public:
+ const GrNonInstancedMesh* init(const GrMesh& mesh) {
+ fMesh = &mesh;
+ if (mesh.fInstanceCount <= mesh.fMaxInstancesPerDraw) {
+ fInstancesRemaining = 0;
+ // Note, this also covers the non-instanced case!
+ return &mesh;
+ }
+ SkASSERT(mesh.isInstanced());
+ fInstanceBatch.fIndexBuffer.reset(mesh.fIndexBuffer.get());
+ fInstanceBatch.fVertexBuffer.reset(mesh.fVertexBuffer.get());
+ fInstanceBatch.fIndexCount = mesh.fMaxInstancesPerDraw *
+ mesh.fIndicesPerInstance;
+ fInstanceBatch.fVertexCount = mesh.fMaxInstancesPerDraw *
+ mesh.fVerticesPerInstance;
+ fInstanceBatch.fPrimitiveType = mesh.fPrimitiveType;
+ fInstanceBatch.fStartIndex = mesh.fStartIndex;
+ fInstanceBatch.fStartVertex = mesh.fStartVertex;
+ fInstancesRemaining = mesh.fInstanceCount - mesh.fMaxInstancesPerDraw;
+ return &fInstanceBatch;
+ }
+
+ const GrNonInstancedMesh* next() {
+ if (!fInstancesRemaining) {
+ return nullptr;
+ }
+ fInstanceBatch.fStartVertex += fInstanceBatch.fVertexCount;
+ int instances = SkTMin(fInstancesRemaining, fMesh->fMaxInstancesPerDraw);
+ fInstanceBatch.fIndexCount = instances * fMesh->fIndicesPerInstance;
+ fInstanceBatch.fVertexCount = instances * fMesh->fVerticesPerInstance;
+ fInstancesRemaining -= instances;
+ return &fInstanceBatch;
+ }
+ private:
+ GrNonInstancedMesh fInstanceBatch;
+ const GrMesh* fMesh;
+ int fInstancesRemaining;
+ };
+
+private:
+ int fInstanceCount;
+ int fVerticesPerInstance;
+ int fIndicesPerInstance;
+ int fMaxInstancesPerDraw;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrNonAtomicRef.h b/gfx/skia/skia/src/gpu/GrNonAtomicRef.h
new file mode 100644
index 000000000..c23637f46
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrNonAtomicRef.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrNonAtomicRef_DEFINED
+#define GrNonAtomicRef_DEFINED
+
+#include "SkRefCnt.h"
+#include "SkTArray.h"
+
+/**
+ * A simple non-atomic ref used in the GrBackend when we don't want to pay for the overhead of a
+ * threadsafe ref counted object
+ */
+template<typename TSubclass> class GrNonAtomicRef : public SkNoncopyable {
+public:
+ GrNonAtomicRef() : fRefCnt(1) {}
+
+#ifdef SK_DEBUG
+ ~GrNonAtomicRef() {
+ // fRefCnt can be one when a subclass is created statically
+ SkASSERT((0 == fRefCnt || 1 == fRefCnt));
+ // Set to invalid values.
+ fRefCnt = -10;
+ }
+#endif
+
+ bool unique() const { return 1 == fRefCnt; }
+
+ void ref() const {
+ // Once the ref cnt reaches zero it should never be ref'ed again.
+ SkASSERT(fRefCnt > 0);
+ ++fRefCnt;
+ }
+
+ void unref() const {
+ SkASSERT(fRefCnt > 0);
+ --fRefCnt;
+ if (0 == fRefCnt) {
+ GrTDeleteNonAtomicRef(static_cast<const TSubclass*>(this));
+ return;
+ }
+ }
+
+private:
+ mutable int32_t fRefCnt;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+template<typename T> inline void GrTDeleteNonAtomicRef(const T* ref) {
+ delete ref;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrOvalRenderer.cpp b/gfx/skia/skia/src/gpu/GrOvalRenderer.cpp
new file mode 100644
index 000000000..68b3f11d1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrOvalRenderer.cpp
@@ -0,0 +1,2171 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrOvalRenderer.h"
+
+#include "GrBatchFlushState.h"
+#include "GrBatchTest.h"
+#include "GrGeometryProcessor.h"
+#include "GrInvariantOutput.h"
+#include "GrProcessor.h"
+#include "GrResourceProvider.h"
+#include "GrStyle.h"
+#include "SkRRect.h"
+#include "SkStrokeRec.h"
+#include "batches/GrVertexBatch.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLGeometryProcessor.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLVarying.h"
+#include "glsl/GrGLSLVertexShaderBuilder.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "glsl/GrGLSLUtil.h"
+
+// TODO(joshualitt) - Break this file up during GrBatch post implementation cleanup
+
+namespace {
+
+struct EllipseVertex {
+ SkPoint fPos;
+ GrColor fColor;
+ SkPoint fOffset;
+ SkPoint fOuterRadii;
+ SkPoint fInnerRadii;
+};
+
+struct DIEllipseVertex {
+ SkPoint fPos;
+ GrColor fColor;
+ SkPoint fOuterOffset;
+ SkPoint fInnerOffset;
+};
+
+inline bool circle_stays_circle(const SkMatrix& m) {
+ return m.isSimilarity();
+}
+
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * The output of this effect is a modulation of the input color and coverage for a circle. It
+ * operates in a space normalized by the circle radius (outer radius in the case of a stroke)
+ * with origin at the circle center. Three vertex attributes are used:
+ * vec2f : position in device space of the bounding geometry vertices
+ * vec4ub: color
+ * vec4f : (p.xy, outerRad, innerRad)
+ * p is the position in the normalized space.
+ * outerRad is the outerRadius in device space.
+ * innerRad is the innerRadius in normalized space (ignored if not stroking).
+ * If fUsesDistanceVectorField is set in fragment processors in the same program, then
+ * an additional vertex attribute is available via args.fFragBuilder->distanceVectorName():
+ * vec4f : (v.xy, outerDistance, innerDistance)
+ * v is a normalized vector pointing to the outer edge
+ * outerDistance is the distance to the outer edge, < 0 if we are outside of the shape
+ * if stroking, innerDistance is the distance to the inner edge, < 0 if outside
+ * Additional clip planes are supported for rendering circular arcs. The additional planes are
+ * either intersected or unioned together. Up to three planes are supported (an initial plane,
+ * a plane intersected with the initial plane, and a plane unioned with the first two). Only two
+ * are useful for any given arc, but having all three in one instance allows batching different
+ * types of arcs.
+ */
+
+class CircleGeometryProcessor : public GrGeometryProcessor {
+public:
+ CircleGeometryProcessor(bool stroke, bool clipPlane, bool isectPlane, bool unionPlane,
+ const SkMatrix& localMatrix)
+ : fLocalMatrix(localMatrix) {
+ this->initClassID<CircleGeometryProcessor>();
+ fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ fInColor = &this->addVertexAttrib("inColor", kVec4ub_GrVertexAttribType);
+ fInCircleEdge = &this->addVertexAttrib("inCircleEdge", kVec4f_GrVertexAttribType);
+ if (clipPlane) {
+ fInClipPlane = &this->addVertexAttrib("inClipPlane", kVec3f_GrVertexAttribType);
+ } else {
+ fInClipPlane = nullptr;
+ }
+ if (isectPlane) {
+ fInIsectPlane = &this->addVertexAttrib("inIsectPlane", kVec3f_GrVertexAttribType);
+ } else {
+ fInIsectPlane = nullptr;
+ }
+ if (unionPlane) {
+ fInUnionPlane = &this->addVertexAttrib("inUnionPlane", kVec3f_GrVertexAttribType);
+ } else {
+ fInUnionPlane = nullptr;
+ }
+ fStroke = stroke;
+ }
+
+ bool implementsDistanceVector() const override { return !fInClipPlane; }
+
+ virtual ~CircleGeometryProcessor() {}
+
+ const char* name() const override { return "CircleEdge"; }
+
+ void getGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLProcessor::GenKey(*this, caps, b);
+ }
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override {
+ return new GLSLProcessor();
+ }
+
+private:
+ class GLSLProcessor : public GrGLSLGeometryProcessor {
+ public:
+ GLSLProcessor() {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override{
+ const CircleGeometryProcessor& cgp = args.fGP.cast<CircleGeometryProcessor>();
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ GrGLSLPPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ // emit attributes
+ varyingHandler->emitAttributes(cgp);
+ fragBuilder->codeAppend("vec4 circleEdge;");
+ varyingHandler->addPassThroughAttribute(cgp.fInCircleEdge, "circleEdge");
+ if (cgp.fInClipPlane) {
+ fragBuilder->codeAppend("vec3 clipPlane;");
+ varyingHandler->addPassThroughAttribute(cgp.fInClipPlane, "clipPlane");
+ }
+ if (cgp.fInIsectPlane) {
+ SkASSERT(cgp.fInClipPlane);
+ fragBuilder->codeAppend("vec3 isectPlane;");
+ varyingHandler->addPassThroughAttribute(cgp.fInIsectPlane, "isectPlane");
+ }
+ if (cgp.fInUnionPlane) {
+ SkASSERT(cgp.fInClipPlane);
+ fragBuilder->codeAppend("vec3 unionPlane;");
+ varyingHandler->addPassThroughAttribute(cgp.fInUnionPlane, "unionPlane");
+ }
+
+ // setup pass through color
+ varyingHandler->addPassThroughAttribute(cgp.fInColor, args.fOutputColor);
+
+ // Setup position
+ this->setupPosition(vertBuilder, gpArgs, cgp.fInPosition->fName);
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ gpArgs->fPositionVar,
+ cgp.fInPosition->fName,
+ cgp.fLocalMatrix,
+ args.fFPCoordTransformHandler);
+
+ fragBuilder->codeAppend("float d = length(circleEdge.xy);");
+ fragBuilder->codeAppend("float distanceToOuterEdge = circleEdge.z * (1.0 - d);");
+ fragBuilder->codeAppend("float edgeAlpha = clamp(distanceToOuterEdge, 0.0, 1.0);");
+ if (cgp.fStroke) {
+ fragBuilder->codeAppend("float distanceToInnerEdge = circleEdge.z * (d - circleEdge.w);");
+ fragBuilder->codeAppend("float innerAlpha = clamp(distanceToInnerEdge, 0.0, 1.0);");
+ fragBuilder->codeAppend("edgeAlpha *= innerAlpha;");
+ }
+
+ if (args.fDistanceVectorName) {
+ const char* innerEdgeDistance = cgp.fStroke ? "distanceToInnerEdge" : "0.0";
+ fragBuilder->codeAppend ("if (d == 0.0) {"); // if on the center of the circle
+ fragBuilder->codeAppendf(" %s = vec4(1.0, 0.0, distanceToOuterEdge, "
+ "%s);", // no normalize
+ args.fDistanceVectorName, innerEdgeDistance);
+ fragBuilder->codeAppend ("} else {");
+ fragBuilder->codeAppendf(" %s = vec4(normalize(circleEdge.xy), distanceToOuterEdge, %s);",
+ args.fDistanceVectorName, innerEdgeDistance);
+ fragBuilder->codeAppend ("}");
+ }
+ if (cgp.fInClipPlane) {
+ fragBuilder->codeAppend("float clip = clamp(circleEdge.z * dot(circleEdge.xy, clipPlane.xy) + clipPlane.z, 0.0, 1.0);");
+ if (cgp.fInIsectPlane) {
+ fragBuilder->codeAppend("clip *= clamp(circleEdge.z * dot(circleEdge.xy, isectPlane.xy) + isectPlane.z, 0.0, 1.0);");
+ }
+ if (cgp.fInUnionPlane) {
+ fragBuilder->codeAppend("clip += (1.0 - clip)*clamp(circleEdge.z * dot(circleEdge.xy, unionPlane.xy) + unionPlane.z, 0.0, 1.0);");
+ }
+ fragBuilder->codeAppend("edgeAlpha *= clip;");
+ }
+ fragBuilder->codeAppendf("%s = vec4(edgeAlpha);", args.fOutputCoverage);
+ }
+
+ static void GenKey(const GrGeometryProcessor& gp,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const CircleGeometryProcessor& cgp = gp.cast<CircleGeometryProcessor>();
+ uint16_t key;
+ key = cgp.fStroke ? 0x01 : 0x0;
+ key |= cgp.fLocalMatrix.hasPerspective() ? 0x02 : 0x0;
+ key |= cgp.fInClipPlane ? 0x04 : 0x0;
+ key |= cgp.fInIsectPlane ? 0x08 : 0x0;
+ key |= cgp.fInUnionPlane ? 0x10 : 0x0;
+ b->add32(key);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& primProc,
+ FPCoordTransformIter&& transformIter) override {
+ this->setTransformDataHelper(primProc.cast<CircleGeometryProcessor>().fLocalMatrix,
+ pdman, &transformIter);
+ }
+
+ private:
+ typedef GrGLSLGeometryProcessor INHERITED;
+ };
+
+ SkMatrix fLocalMatrix;
+ const Attribute* fInPosition;
+ const Attribute* fInColor;
+ const Attribute* fInCircleEdge;
+ const Attribute* fInClipPlane;
+ const Attribute* fInIsectPlane;
+ const Attribute* fInUnionPlane;
+ bool fStroke;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(CircleGeometryProcessor);
+
+sk_sp<GrGeometryProcessor> CircleGeometryProcessor::TestCreate(GrProcessorTestData* d) {
+ return sk_sp<GrGeometryProcessor>(
+ new CircleGeometryProcessor(d->fRandom->nextBool(), d->fRandom->nextBool(),
+ d->fRandom->nextBool(), d->fRandom->nextBool(),
+ GrTest::TestMatrix(d->fRandom)));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * The output of this effect is a modulation of the input color and coverage for an axis-aligned
+ * ellipse, specified as a 2D offset from center, and the reciprocals of the outer and inner radii,
+ * in both x and y directions.
+ *
+ * We are using an implicit function of x^2/a^2 + y^2/b^2 - 1 = 0.
+ */
+
+class EllipseGeometryProcessor : public GrGeometryProcessor {
+public:
+ EllipseGeometryProcessor(bool stroke, const SkMatrix& localMatrix)
+ : fLocalMatrix(localMatrix) {
+ this->initClassID<EllipseGeometryProcessor>();
+ fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType);
+ fInColor = &this->addVertexAttrib("inColor", kVec4ub_GrVertexAttribType);
+ fInEllipseOffset = &this->addVertexAttrib("inEllipseOffset", kVec2f_GrVertexAttribType);
+ fInEllipseRadii = &this->addVertexAttrib("inEllipseRadii", kVec4f_GrVertexAttribType);
+ fStroke = stroke;
+ }
+
+ virtual ~EllipseGeometryProcessor() {}
+
+ const char* name() const override { return "EllipseEdge"; }
+
+ void getGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLProcessor::GenKey(*this, caps, b);
+ }
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override {
+ return new GLSLProcessor();
+ }
+
+private:
+ class GLSLProcessor : public GrGLSLGeometryProcessor {
+ public:
+ GLSLProcessor() {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override{
+ const EllipseGeometryProcessor& egp = args.fGP.cast<EllipseGeometryProcessor>();
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(egp);
+
+ GrGLSLVertToFrag ellipseOffsets(kVec2f_GrSLType);
+ varyingHandler->addVarying("EllipseOffsets", &ellipseOffsets);
+ vertBuilder->codeAppendf("%s = %s;", ellipseOffsets.vsOut(),
+ egp.fInEllipseOffset->fName);
+
+ GrGLSLVertToFrag ellipseRadii(kVec4f_GrSLType);
+ varyingHandler->addVarying("EllipseRadii", &ellipseRadii);
+ vertBuilder->codeAppendf("%s = %s;", ellipseRadii.vsOut(),
+ egp.fInEllipseRadii->fName);
+
+ GrGLSLPPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ // setup pass through color
+ varyingHandler->addPassThroughAttribute(egp.fInColor, args.fOutputColor);
+
+ // Setup position
+ this->setupPosition(vertBuilder, gpArgs, egp.fInPosition->fName);
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ gpArgs->fPositionVar,
+ egp.fInPosition->fName,
+ egp.fLocalMatrix,
+ args.fFPCoordTransformHandler);
+
+ // for outer curve
+ fragBuilder->codeAppendf("vec2 scaledOffset = %s*%s.xy;", ellipseOffsets.fsIn(),
+ ellipseRadii.fsIn());
+ fragBuilder->codeAppend("float test = dot(scaledOffset, scaledOffset) - 1.0;");
+ fragBuilder->codeAppendf("vec2 grad = 2.0*scaledOffset*%s.xy;", ellipseRadii.fsIn());
+ fragBuilder->codeAppend("float grad_dot = dot(grad, grad);");
+
+ // avoid calling inversesqrt on zero.
+ fragBuilder->codeAppend("grad_dot = max(grad_dot, 1.0e-4);");
+ fragBuilder->codeAppend("float invlen = inversesqrt(grad_dot);");
+ fragBuilder->codeAppend("float edgeAlpha = clamp(0.5-test*invlen, 0.0, 1.0);");
+
+ // for inner curve
+ if (egp.fStroke) {
+ fragBuilder->codeAppendf("scaledOffset = %s*%s.zw;",
+ ellipseOffsets.fsIn(), ellipseRadii.fsIn());
+ fragBuilder->codeAppend("test = dot(scaledOffset, scaledOffset) - 1.0;");
+ fragBuilder->codeAppendf("grad = 2.0*scaledOffset*%s.zw;",
+ ellipseRadii.fsIn());
+ fragBuilder->codeAppend("invlen = inversesqrt(dot(grad, grad));");
+ fragBuilder->codeAppend("edgeAlpha *= clamp(0.5+test*invlen, 0.0, 1.0);");
+ }
+
+ fragBuilder->codeAppendf("%s = vec4(edgeAlpha);", args.fOutputCoverage);
+ }
+
+ static void GenKey(const GrGeometryProcessor& gp,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const EllipseGeometryProcessor& egp = gp.cast<EllipseGeometryProcessor>();
+ uint16_t key = egp.fStroke ? 0x1 : 0x0;
+ key |= egp.fLocalMatrix.hasPerspective() ? 0x2 : 0x0;
+ b->add32(key);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& primProc,
+ FPCoordTransformIter&& transformIter) override {
+ const EllipseGeometryProcessor& egp = primProc.cast<EllipseGeometryProcessor>();
+ this->setTransformDataHelper(egp.fLocalMatrix, pdman, &transformIter);
+ }
+
+ private:
+ typedef GrGLSLGeometryProcessor INHERITED;
+ };
+
+ const Attribute* fInPosition;
+ const Attribute* fInColor;
+ const Attribute* fInEllipseOffset;
+ const Attribute* fInEllipseRadii;
+ SkMatrix fLocalMatrix;
+ bool fStroke;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(EllipseGeometryProcessor);
+
+sk_sp<GrGeometryProcessor> EllipseGeometryProcessor::TestCreate(GrProcessorTestData* d) {
+ return sk_sp<GrGeometryProcessor>(
+ new EllipseGeometryProcessor(d->fRandom->nextBool(), GrTest::TestMatrix(d->fRandom)));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * The output of this effect is a modulation of the input color and coverage for an ellipse,
+ * specified as a 2D offset from center for both the outer and inner paths (if stroked). The
+ * implict equation used is for a unit circle (x^2 + y^2 - 1 = 0) and the edge corrected by
+ * using differentials.
+ *
+ * The result is device-independent and can be used with any affine matrix.
+ */
+
+enum class DIEllipseStyle { kStroke = 0, kHairline, kFill };
+
+class DIEllipseGeometryProcessor : public GrGeometryProcessor {
+public:
+ DIEllipseGeometryProcessor(const SkMatrix& viewMatrix, DIEllipseStyle style)
+ : fViewMatrix(viewMatrix) {
+ this->initClassID<DIEllipseGeometryProcessor>();
+ fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ fInColor = &this->addVertexAttrib("inColor", kVec4ub_GrVertexAttribType);
+ fInEllipseOffsets0 = &this->addVertexAttrib("inEllipseOffsets0", kVec2f_GrVertexAttribType);
+ fInEllipseOffsets1 = &this->addVertexAttrib("inEllipseOffsets1", kVec2f_GrVertexAttribType);
+ fStyle = style;
+ }
+
+
+ virtual ~DIEllipseGeometryProcessor() {}
+
+ const char* name() const override { return "DIEllipseEdge"; }
+
+ void getGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLProcessor::GenKey(*this, caps, b);
+ }
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override {
+ return new GLSLProcessor();
+ }
+
+private:
+ class GLSLProcessor : public GrGLSLGeometryProcessor {
+ public:
+ GLSLProcessor()
+ : fViewMatrix(SkMatrix::InvalidMatrix()) {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ const DIEllipseGeometryProcessor& diegp = args.fGP.cast<DIEllipseGeometryProcessor>();
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(diegp);
+
+ GrGLSLVertToFrag offsets0(kVec2f_GrSLType);
+ varyingHandler->addVarying("EllipseOffsets0", &offsets0);
+ vertBuilder->codeAppendf("%s = %s;", offsets0.vsOut(),
+ diegp.fInEllipseOffsets0->fName);
+
+ GrGLSLVertToFrag offsets1(kVec2f_GrSLType);
+ varyingHandler->addVarying("EllipseOffsets1", &offsets1);
+ vertBuilder->codeAppendf("%s = %s;", offsets1.vsOut(),
+ diegp.fInEllipseOffsets1->fName);
+
+ GrGLSLPPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ varyingHandler->addPassThroughAttribute(diegp.fInColor, args.fOutputColor);
+
+ // Setup position
+ this->setupPosition(vertBuilder,
+ uniformHandler,
+ gpArgs,
+ diegp.fInPosition->fName,
+ diegp.fViewMatrix,
+ &fViewMatrixUniform);
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ gpArgs->fPositionVar,
+ diegp.fInPosition->fName,
+ args.fFPCoordTransformHandler);
+
+ SkAssertResult(fragBuilder->enableFeature(
+ GrGLSLFragmentShaderBuilder::kStandardDerivatives_GLSLFeature));
+ // for outer curve
+ fragBuilder->codeAppendf("vec2 scaledOffset = %s.xy;", offsets0.fsIn());
+ fragBuilder->codeAppend("float test = dot(scaledOffset, scaledOffset) - 1.0;");
+ fragBuilder->codeAppendf("vec2 duvdx = dFdx(%s);", offsets0.fsIn());
+ fragBuilder->codeAppendf("vec2 duvdy = dFdy(%s);", offsets0.fsIn());
+ fragBuilder->codeAppendf("vec2 grad = vec2(2.0*%s.x*duvdx.x + 2.0*%s.y*duvdx.y,"
+ " 2.0*%s.x*duvdy.x + 2.0*%s.y*duvdy.y);",
+ offsets0.fsIn(), offsets0.fsIn(), offsets0.fsIn(),
+ offsets0.fsIn());
+
+ fragBuilder->codeAppend("float grad_dot = dot(grad, grad);");
+ // avoid calling inversesqrt on zero.
+ fragBuilder->codeAppend("grad_dot = max(grad_dot, 1.0e-4);");
+ fragBuilder->codeAppend("float invlen = inversesqrt(grad_dot);");
+ if (DIEllipseStyle::kHairline == diegp.fStyle) {
+ // can probably do this with one step
+ fragBuilder->codeAppend("float edgeAlpha = clamp(1.0-test*invlen, 0.0, 1.0);");
+ fragBuilder->codeAppend("edgeAlpha *= clamp(1.0+test*invlen, 0.0, 1.0);");
+ } else {
+ fragBuilder->codeAppend("float edgeAlpha = clamp(0.5-test*invlen, 0.0, 1.0);");
+ }
+
+ // for inner curve
+ if (DIEllipseStyle::kStroke == diegp.fStyle) {
+ fragBuilder->codeAppendf("scaledOffset = %s.xy;", offsets1.fsIn());
+ fragBuilder->codeAppend("test = dot(scaledOffset, scaledOffset) - 1.0;");
+ fragBuilder->codeAppendf("duvdx = dFdx(%s);", offsets1.fsIn());
+ fragBuilder->codeAppendf("duvdy = dFdy(%s);", offsets1.fsIn());
+ fragBuilder->codeAppendf("grad = vec2(2.0*%s.x*duvdx.x + 2.0*%s.y*duvdx.y,"
+ " 2.0*%s.x*duvdy.x + 2.0*%s.y*duvdy.y);",
+ offsets1.fsIn(), offsets1.fsIn(), offsets1.fsIn(),
+ offsets1.fsIn());
+ fragBuilder->codeAppend("invlen = inversesqrt(dot(grad, grad));");
+ fragBuilder->codeAppend("edgeAlpha *= clamp(0.5+test*invlen, 0.0, 1.0);");
+ }
+
+ fragBuilder->codeAppendf("%s = vec4(edgeAlpha);", args.fOutputCoverage);
+ }
+
+ static void GenKey(const GrGeometryProcessor& gp,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const DIEllipseGeometryProcessor& diegp = gp.cast<DIEllipseGeometryProcessor>();
+ uint16_t key = static_cast<uint16_t>(diegp.fStyle);
+ key |= ComputePosKey(diegp.fViewMatrix) << 10;
+ b->add32(key);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& gp,
+ FPCoordTransformIter&& transformIter) override {
+ const DIEllipseGeometryProcessor& diegp = gp.cast<DIEllipseGeometryProcessor>();
+
+ if (!diegp.fViewMatrix.isIdentity() && !fViewMatrix.cheapEqualTo(diegp.fViewMatrix)) {
+ fViewMatrix = diegp.fViewMatrix;
+ float viewMatrix[3 * 3];
+ GrGLSLGetMatrix<3>(viewMatrix, fViewMatrix);
+ pdman.setMatrix3f(fViewMatrixUniform, viewMatrix);
+ }
+ this->setTransformDataHelper(SkMatrix::I(), pdman, &transformIter);
+ }
+
+ private:
+ SkMatrix fViewMatrix;
+ UniformHandle fViewMatrixUniform;
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+ };
+
+ const Attribute* fInPosition;
+ const Attribute* fInColor;
+ const Attribute* fInEllipseOffsets0;
+ const Attribute* fInEllipseOffsets1;
+ SkMatrix fViewMatrix;
+ DIEllipseStyle fStyle;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(DIEllipseGeometryProcessor);
+
+sk_sp<GrGeometryProcessor> DIEllipseGeometryProcessor::TestCreate(GrProcessorTestData* d) {
+ return sk_sp<GrGeometryProcessor>(
+ new DIEllipseGeometryProcessor(GrTest::TestMatrix(d->fRandom),
+ (DIEllipseStyle)(d->fRandom->nextRangeU(0,2))));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class CircleBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ /** Optional extra params to render a partial arc rather than a full circle. */
+ struct ArcParams {
+ SkScalar fStartAngleRadians;
+ SkScalar fSweepAngleRadians;
+ bool fUseCenter;
+ };
+ static GrDrawBatch* Create(GrColor color, const SkMatrix& viewMatrix, SkPoint center,
+ SkScalar radius, const GrStyle& style,
+ const ArcParams* arcParams = nullptr) {
+ SkASSERT(circle_stays_circle(viewMatrix));
+ const SkStrokeRec& stroke = style.strokeRec();
+ if (style.hasPathEffect()) {
+ return nullptr;
+ }
+ SkStrokeRec::Style recStyle = stroke.getStyle();
+ if (arcParams) {
+ // Arc support depends on the style.
+ switch (recStyle) {
+ case SkStrokeRec::kStrokeAndFill_Style:
+ // This produces a strange result that this batch doesn't implement.
+ return nullptr;
+ case SkStrokeRec::kFill_Style:
+ // This supports all fills.
+ break;
+ case SkStrokeRec::kStroke_Style: // fall through
+ case SkStrokeRec::kHairline_Style:
+ // Strokes that don't use the center point are supported with butt cap.
+ if (arcParams->fUseCenter || stroke.getCap() != SkPaint::kButt_Cap) {
+ return nullptr;
+ }
+ break;
+ }
+ }
+
+ viewMatrix.mapPoints(&center, 1);
+ radius = viewMatrix.mapRadius(radius);
+ SkScalar strokeWidth = viewMatrix.mapRadius(stroke.getWidth());
+
+ bool isStrokeOnly = SkStrokeRec::kStroke_Style == recStyle ||
+ SkStrokeRec::kHairline_Style == recStyle;
+ bool hasStroke = isStrokeOnly || SkStrokeRec::kStrokeAndFill_Style == recStyle;
+
+ SkScalar innerRadius = 0.0f;
+ SkScalar outerRadius = radius;
+ SkScalar halfWidth = 0;
+ if (hasStroke) {
+ if (SkScalarNearlyZero(strokeWidth)) {
+ halfWidth = SK_ScalarHalf;
+ } else {
+ halfWidth = SkScalarHalf(strokeWidth);
+ }
+
+ outerRadius += halfWidth;
+ if (isStrokeOnly) {
+ innerRadius = radius - halfWidth;
+ }
+ }
+
+ // The radii are outset for two reasons. First, it allows the shader to simply perform
+ // simpler computation because the computed alpha is zero, rather than 50%, at the radius.
+ // Second, the outer radius is used to compute the verts of the bounding box that is
+ // rendered and the outset ensures the box will cover all partially covered by the circle.
+ outerRadius += SK_ScalarHalf;
+ innerRadius -= SK_ScalarHalf;
+ CircleBatch* batch = new CircleBatch();
+ batch->fViewMatrixIfUsingLocalCoords = viewMatrix;
+
+ // This makes every point fully inside the intersection plane.
+ static constexpr SkScalar kUnusedIsectPlane[] = {0.f, 0.f, 1.f};
+ // This makes every point fully outside the union plane.
+ static constexpr SkScalar kUnusedUnionPlane[] = {0.f, 0.f, 0.f};
+ SkRect devBounds = SkRect::MakeLTRB(center.fX - outerRadius, center.fY - outerRadius,
+ center.fX + outerRadius, center.fY + outerRadius);
+
+ if (arcParams) {
+ // The shader operates in a space where the circle is translated to be centered at the
+ // origin. Here we compute points on the unit circle at the starting and ending angles.
+ SkPoint startPoint, stopPoint;
+ startPoint.fY = SkScalarSinCos(arcParams->fStartAngleRadians, &startPoint.fX);
+ SkScalar endAngle = arcParams->fStartAngleRadians + arcParams->fSweepAngleRadians;
+ stopPoint.fY = SkScalarSinCos(endAngle, &stopPoint.fX);
+ // Like a fill without useCenter, butt-cap stroke can be implemented by clipping against
+ // radial lines. However, in both cases we have to be careful about the half-circle.
+ // case. In that case the two radial lines are equal and so that edge gets clipped
+ // twice. Since the shared edge goes through the center we fall back on the useCenter
+ // case.
+ bool useCenter = (arcParams->fUseCenter || isStrokeOnly) &&
+ !SkScalarNearlyEqual(SkScalarAbs(arcParams->fSweepAngleRadians),
+ SK_ScalarPI);
+ if (useCenter) {
+ SkVector norm0 = {startPoint.fY, -startPoint.fX};
+ SkVector norm1 = {stopPoint.fY, -stopPoint.fX};
+ if (arcParams->fSweepAngleRadians > 0) {
+ norm0.negate();
+ } else {
+ norm1.negate();
+ }
+ batch->fClipPlane = true;
+ if (SkScalarAbs(arcParams->fSweepAngleRadians) > SK_ScalarPI) {
+ batch->fGeoData.emplace_back(Geometry {
+ color,
+ innerRadius,
+ outerRadius,
+ {norm0.fX, norm0.fY, 0.5f},
+ {kUnusedIsectPlane[0], kUnusedIsectPlane[1], kUnusedIsectPlane[2]},
+ {norm1.fX, norm1.fY, 0.5f},
+ devBounds
+ });
+ batch->fClipPlaneIsect = false;
+ batch->fClipPlaneUnion = true;
+ } else {
+ batch->fGeoData.emplace_back(Geometry {
+ color,
+ innerRadius,
+ outerRadius,
+ {norm0.fX, norm0.fY, 0.5f},
+ {norm1.fX, norm1.fY, 0.5f},
+ {kUnusedUnionPlane[0], kUnusedUnionPlane[1], kUnusedUnionPlane[2]},
+ devBounds
+ });
+ batch->fClipPlaneIsect = true;
+ batch->fClipPlaneUnion = false;
+ }
+ } else {
+ // We clip to a secant of the original circle.
+ startPoint.scale(radius);
+ stopPoint.scale(radius);
+ SkVector norm = {startPoint.fY - stopPoint.fY, stopPoint.fX - startPoint.fX};
+ norm.normalize();
+ if (arcParams->fSweepAngleRadians > 0) {
+ norm.negate();
+ }
+ SkScalar d = -norm.dot(startPoint) + 0.5f;
+
+ batch->fGeoData.emplace_back(Geometry {
+ color,
+ innerRadius,
+ outerRadius,
+ {norm.fX, norm.fY, d},
+ {kUnusedIsectPlane[0], kUnusedIsectPlane[1], kUnusedIsectPlane[2]},
+ {kUnusedUnionPlane[0], kUnusedUnionPlane[1], kUnusedUnionPlane[2]},
+ devBounds
+ });
+ batch->fClipPlane = true;
+ batch->fClipPlaneIsect = false;
+ batch->fClipPlaneUnion = false;
+ }
+ } else {
+ batch->fGeoData.emplace_back(Geometry {
+ color,
+ innerRadius,
+ outerRadius,
+ {kUnusedIsectPlane[0], kUnusedIsectPlane[1], kUnusedIsectPlane[2]},
+ {kUnusedIsectPlane[0], kUnusedIsectPlane[1], kUnusedIsectPlane[2]},
+ {kUnusedUnionPlane[0], kUnusedUnionPlane[1], kUnusedUnionPlane[2]},
+ devBounds
+ });
+ batch->fClipPlane = false;
+ batch->fClipPlaneIsect = false;
+ batch->fClipPlaneUnion = false;
+ }
+ // Use the original radius and stroke radius for the bounds so that it does not include the
+ // AA bloat.
+ radius += halfWidth;
+ batch->setBounds({center.fX - radius, center.fY - radius,
+ center.fX + radius, center.fY + radius},
+ HasAABloat::kYes, IsZeroArea::kNo);
+ batch->fStroked = isStrokeOnly && innerRadius > 0;
+ return batch;
+ }
+
+ const char* name() const override { return "CircleBatch"; }
+
+ SkString dumpInfo() const override {
+ SkString string;
+ for (int i = 0; i < fGeoData.count(); ++i) {
+ string.appendf("Color: 0x%08x Rect [L: %.2f, T: %.2f, R: %.2f, B: %.2f],"
+ "InnerRad: %.2f, OuterRad: %.2f\n",
+ fGeoData[i].fColor,
+ fGeoData[i].fDevBounds.fLeft, fGeoData[i].fDevBounds.fTop,
+ fGeoData[i].fDevBounds.fRight, fGeoData[i].fDevBounds.fBottom,
+ fGeoData[i].fInnerRadius,
+ fGeoData[i].fOuterRadius);
+ }
+ string.append(INHERITED::dumpInfo());
+ return string;
+ }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ color->setKnownFourComponents(fGeoData[0].fColor);
+ coverage->setUnknownSingleComponent();
+ }
+
+private:
+ CircleBatch() : INHERITED(ClassID()) {}
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ // Handle any overrides that affect our GP.
+ overrides.getOverrideColorIfSet(&fGeoData[0].fColor);
+ if (!overrides.readsLocalCoords()) {
+ fViewMatrixIfUsingLocalCoords.reset();
+ }
+ }
+
+ void onPrepareDraws(Target* target) const override {
+ SkMatrix localMatrix;
+ if (!fViewMatrixIfUsingLocalCoords.invert(&localMatrix)) {
+ return;
+ }
+
+ // Setup geometry processor
+ SkAutoTUnref<GrGeometryProcessor> gp(new CircleGeometryProcessor(fStroked, fClipPlane,
+ fClipPlaneIsect,
+ fClipPlaneUnion,
+ localMatrix));
+
+ struct CircleVertex {
+ SkPoint fPos;
+ GrColor fColor;
+ SkPoint fOffset;
+ SkScalar fOuterRadius;
+ SkScalar fInnerRadius;
+ // These planes may or may not be present in the vertex buffer.
+ SkScalar fHalfPlanes[3][3];
+ };
+
+ int instanceCount = fGeoData.count();
+ size_t vertexStride = gp->getVertexStride();
+ SkASSERT(vertexStride == sizeof(CircleVertex) - (fClipPlane ? 0 : 3 * sizeof(SkScalar))
+ - (fClipPlaneIsect? 0 : 3 * sizeof(SkScalar))
+ - (fClipPlaneUnion? 0 : 3 * sizeof(SkScalar)));
+ QuadHelper helper;
+ char* vertices = reinterpret_cast<char*>(helper.init(target, vertexStride, instanceCount));
+ if (!vertices) {
+ return;
+ }
+
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& geom = fGeoData[i];
+
+ GrColor color = geom.fColor;
+ SkScalar innerRadius = geom.fInnerRadius;
+ SkScalar outerRadius = geom.fOuterRadius;
+
+ const SkRect& bounds = geom.fDevBounds;
+ CircleVertex* v0 = reinterpret_cast<CircleVertex*>(vertices + (4 * i + 0)*vertexStride);
+ CircleVertex* v1 = reinterpret_cast<CircleVertex*>(vertices + (4 * i + 1)*vertexStride);
+ CircleVertex* v2 = reinterpret_cast<CircleVertex*>(vertices + (4 * i + 2)*vertexStride);
+ CircleVertex* v3 = reinterpret_cast<CircleVertex*>(vertices + (4 * i + 3)*vertexStride);
+
+ // The inner radius in the vertex data must be specified in normalized space.
+ innerRadius = innerRadius / outerRadius;
+ v0->fPos = SkPoint::Make(bounds.fLeft, bounds.fTop);
+ v0->fColor = color;
+ v0->fOffset = SkPoint::Make(-1, -1);
+ v0->fOuterRadius = outerRadius;
+ v0->fInnerRadius = innerRadius;
+
+ v1->fPos = SkPoint::Make(bounds.fLeft, bounds.fBottom);
+ v1->fColor = color;
+ v1->fOffset = SkPoint::Make(-1, 1);
+ v1->fOuterRadius = outerRadius;
+ v1->fInnerRadius = innerRadius;
+
+ v2->fPos = SkPoint::Make(bounds.fRight, bounds.fBottom);
+ v2->fColor = color;
+ v2->fOffset = SkPoint::Make(1, 1);
+ v2->fOuterRadius = outerRadius;
+ v2->fInnerRadius = innerRadius;
+
+ v3->fPos = SkPoint::Make(bounds.fRight, bounds.fTop);
+ v3->fColor = color;
+ v3->fOffset = SkPoint::Make(1, -1);
+ v3->fOuterRadius = outerRadius;
+ v3->fInnerRadius = innerRadius;
+
+ if (fClipPlane) {
+ memcpy(v0->fHalfPlanes[0], geom.fClipPlane, 3 * sizeof(SkScalar));
+ memcpy(v1->fHalfPlanes[0], geom.fClipPlane, 3 * sizeof(SkScalar));
+ memcpy(v2->fHalfPlanes[0], geom.fClipPlane, 3 * sizeof(SkScalar));
+ memcpy(v3->fHalfPlanes[0], geom.fClipPlane, 3 * sizeof(SkScalar));
+ }
+ int unionIdx = 1;
+ if (fClipPlaneIsect) {
+ memcpy(v0->fHalfPlanes[1], geom.fIsectPlane, 3 * sizeof(SkScalar));
+ memcpy(v1->fHalfPlanes[1], geom.fIsectPlane, 3 * sizeof(SkScalar));
+ memcpy(v2->fHalfPlanes[1], geom.fIsectPlane, 3 * sizeof(SkScalar));
+ memcpy(v3->fHalfPlanes[1], geom.fIsectPlane, 3 * sizeof(SkScalar));
+ unionIdx = 2;
+ }
+ if (fClipPlaneUnion) {
+ memcpy(v0->fHalfPlanes[unionIdx], geom.fUnionPlane, 3 * sizeof(SkScalar));
+ memcpy(v1->fHalfPlanes[unionIdx], geom.fUnionPlane, 3 * sizeof(SkScalar));
+ memcpy(v2->fHalfPlanes[unionIdx], geom.fUnionPlane, 3 * sizeof(SkScalar));
+ memcpy(v3->fHalfPlanes[unionIdx], geom.fUnionPlane, 3 * sizeof(SkScalar));
+ }
+ }
+ helper.recordDraw(target, gp);
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ CircleBatch* that = t->cast<CircleBatch>();
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ if (this->fStroked != that->fStroked) {
+ return false;
+ }
+
+ // Because we've set up the batches that don't use the planes with noop values
+ // we can just accumulate used planes by later batches.
+ fClipPlane |= that->fClipPlane;
+ fClipPlaneIsect |= that->fClipPlaneIsect;
+ fClipPlaneUnion |= that->fClipPlaneUnion;
+
+ if (!fViewMatrixIfUsingLocalCoords.cheapEqualTo(that->fViewMatrixIfUsingLocalCoords)) {
+ return false;
+ }
+
+ fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
+ this->joinBounds(*that);
+ return true;
+ }
+
+ struct Geometry {
+ GrColor fColor;
+ SkScalar fInnerRadius;
+ SkScalar fOuterRadius;
+ SkScalar fClipPlane[3];
+ SkScalar fIsectPlane[3];
+ SkScalar fUnionPlane[3];
+ SkRect fDevBounds;
+ };
+
+ bool fStroked;
+ bool fClipPlane;
+ bool fClipPlaneIsect;
+ bool fClipPlaneUnion;
+ SkMatrix fViewMatrixIfUsingLocalCoords;
+ SkSTArray<1, Geometry, true> fGeoData;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class EllipseBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+ static GrDrawBatch* Create(GrColor color, const SkMatrix& viewMatrix, const SkRect& ellipse,
+ const SkStrokeRec& stroke) {
+ SkASSERT(viewMatrix.rectStaysRect());
+
+ // do any matrix crunching before we reset the draw state for device coords
+ SkPoint center = SkPoint::Make(ellipse.centerX(), ellipse.centerY());
+ viewMatrix.mapPoints(&center, 1);
+ SkScalar ellipseXRadius = SkScalarHalf(ellipse.width());
+ SkScalar ellipseYRadius = SkScalarHalf(ellipse.height());
+ SkScalar xRadius = SkScalarAbs(viewMatrix[SkMatrix::kMScaleX]*ellipseXRadius +
+ viewMatrix[SkMatrix::kMSkewY]*ellipseYRadius);
+ SkScalar yRadius = SkScalarAbs(viewMatrix[SkMatrix::kMSkewX]*ellipseXRadius +
+ viewMatrix[SkMatrix::kMScaleY]*ellipseYRadius);
+
+ // do (potentially) anisotropic mapping of stroke
+ SkVector scaledStroke;
+ SkScalar strokeWidth = stroke.getWidth();
+ scaledStroke.fX = SkScalarAbs(strokeWidth*(viewMatrix[SkMatrix::kMScaleX] +
+ viewMatrix[SkMatrix::kMSkewY]));
+ scaledStroke.fY = SkScalarAbs(strokeWidth*(viewMatrix[SkMatrix::kMSkewX] +
+ viewMatrix[SkMatrix::kMScaleY]));
+
+ SkStrokeRec::Style style = stroke.getStyle();
+ bool isStrokeOnly = SkStrokeRec::kStroke_Style == style ||
+ SkStrokeRec::kHairline_Style == style;
+ bool hasStroke = isStrokeOnly || SkStrokeRec::kStrokeAndFill_Style == style;
+
+ SkScalar innerXRadius = 0;
+ SkScalar innerYRadius = 0;
+ if (hasStroke) {
+ if (SkScalarNearlyZero(scaledStroke.length())) {
+ scaledStroke.set(SK_ScalarHalf, SK_ScalarHalf);
+ } else {
+ scaledStroke.scale(SK_ScalarHalf);
+ }
+
+ // we only handle thick strokes for near-circular ellipses
+ if (scaledStroke.length() > SK_ScalarHalf &&
+ (SK_ScalarHalf*xRadius > yRadius || SK_ScalarHalf*yRadius > xRadius)) {
+ return nullptr;
+ }
+
+ // we don't handle it if curvature of the stroke is less than curvature of the ellipse
+ if (scaledStroke.fX*(yRadius*yRadius) < (scaledStroke.fY*scaledStroke.fY)*xRadius ||
+ scaledStroke.fY*(xRadius*xRadius) < (scaledStroke.fX*scaledStroke.fX)*yRadius) {
+ return nullptr;
+ }
+
+ // this is legit only if scale & translation (which should be the case at the moment)
+ if (isStrokeOnly) {
+ innerXRadius = xRadius - scaledStroke.fX;
+ innerYRadius = yRadius - scaledStroke.fY;
+ }
+
+ xRadius += scaledStroke.fX;
+ yRadius += scaledStroke.fY;
+ }
+
+ EllipseBatch* batch = new EllipseBatch();
+ batch->fGeoData.emplace_back(Geometry {
+ color,
+ xRadius,
+ yRadius,
+ innerXRadius,
+ innerYRadius,
+ SkRect::MakeLTRB(center.fX - xRadius, center.fY - yRadius,
+ center.fX + xRadius, center.fY + yRadius)
+ });
+
+ batch->setBounds(batch->fGeoData.back().fDevBounds, HasAABloat::kYes, IsZeroArea::kNo);
+
+ // Outset bounds to include half-pixel width antialiasing.
+ batch->fGeoData[0].fDevBounds.outset(SK_ScalarHalf, SK_ScalarHalf);
+
+ batch->fStroked = isStrokeOnly && innerXRadius > 0 && innerYRadius > 0;
+ batch->fViewMatrixIfUsingLocalCoords = viewMatrix;
+ return batch;
+ }
+
+ const char* name() const override { return "EllipseBatch"; }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ color->setKnownFourComponents(fGeoData[0].fColor);
+ coverage->setUnknownSingleComponent();
+ }
+
+private:
+ EllipseBatch() : INHERITED(ClassID()) {}
+
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ // Handle any overrides that affect our GP.
+ if (!overrides.readsCoverage()) {
+ fGeoData[0].fColor = GrColor_ILLEGAL;
+ }
+ if (!overrides.readsLocalCoords()) {
+ fViewMatrixIfUsingLocalCoords.reset();
+ }
+ }
+
+ void onPrepareDraws(Target* target) const override {
+ SkMatrix localMatrix;
+ if (!fViewMatrixIfUsingLocalCoords.invert(&localMatrix)) {
+ return;
+ }
+
+ // Setup geometry processor
+ SkAutoTUnref<GrGeometryProcessor> gp(new EllipseGeometryProcessor(fStroked, localMatrix));
+
+ int instanceCount = fGeoData.count();
+ QuadHelper helper;
+ size_t vertexStride = gp->getVertexStride();
+ SkASSERT(vertexStride == sizeof(EllipseVertex));
+ EllipseVertex* verts = reinterpret_cast<EllipseVertex*>(
+ helper.init(target, vertexStride, instanceCount));
+ if (!verts) {
+ return;
+ }
+
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& geom = fGeoData[i];
+
+ GrColor color = geom.fColor;
+ SkScalar xRadius = geom.fXRadius;
+ SkScalar yRadius = geom.fYRadius;
+
+ // Compute the reciprocals of the radii here to save time in the shader
+ SkScalar xRadRecip = SkScalarInvert(xRadius);
+ SkScalar yRadRecip = SkScalarInvert(yRadius);
+ SkScalar xInnerRadRecip = SkScalarInvert(geom.fInnerXRadius);
+ SkScalar yInnerRadRecip = SkScalarInvert(geom.fInnerYRadius);
+
+ const SkRect& bounds = geom.fDevBounds;
+
+ // fOffsets are expanded from xyRadii to include the half-pixel antialiasing width.
+ SkScalar xMaxOffset = xRadius + SK_ScalarHalf;
+ SkScalar yMaxOffset = yRadius + SK_ScalarHalf;
+
+ // The inner radius in the vertex data must be specified in normalized space.
+ verts[0].fPos = SkPoint::Make(bounds.fLeft, bounds.fTop);
+ verts[0].fColor = color;
+ verts[0].fOffset = SkPoint::Make(-xMaxOffset, -yMaxOffset);
+ verts[0].fOuterRadii = SkPoint::Make(xRadRecip, yRadRecip);
+ verts[0].fInnerRadii = SkPoint::Make(xInnerRadRecip, yInnerRadRecip);
+
+ verts[1].fPos = SkPoint::Make(bounds.fLeft, bounds.fBottom);
+ verts[1].fColor = color;
+ verts[1].fOffset = SkPoint::Make(-xMaxOffset, yMaxOffset);
+ verts[1].fOuterRadii = SkPoint::Make(xRadRecip, yRadRecip);
+ verts[1].fInnerRadii = SkPoint::Make(xInnerRadRecip, yInnerRadRecip);
+
+ verts[2].fPos = SkPoint::Make(bounds.fRight, bounds.fBottom);
+ verts[2].fColor = color;
+ verts[2].fOffset = SkPoint::Make(xMaxOffset, yMaxOffset);
+ verts[2].fOuterRadii = SkPoint::Make(xRadRecip, yRadRecip);
+ verts[2].fInnerRadii = SkPoint::Make(xInnerRadRecip, yInnerRadRecip);
+
+ verts[3].fPos = SkPoint::Make(bounds.fRight, bounds.fTop);
+ verts[3].fColor = color;
+ verts[3].fOffset = SkPoint::Make(xMaxOffset, -yMaxOffset);
+ verts[3].fOuterRadii = SkPoint::Make(xRadRecip, yRadRecip);
+ verts[3].fInnerRadii = SkPoint::Make(xInnerRadRecip, yInnerRadRecip);
+
+ verts += kVerticesPerQuad;
+ }
+ helper.recordDraw(target, gp);
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ EllipseBatch* that = t->cast<EllipseBatch>();
+
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ if (fStroked != that->fStroked) {
+ return false;
+ }
+
+ if (!fViewMatrixIfUsingLocalCoords.cheapEqualTo(that->fViewMatrixIfUsingLocalCoords)) {
+ return false;
+ }
+
+ fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
+ this->joinBounds(*that);
+ return true;
+ }
+
+ struct Geometry {
+ GrColor fColor;
+ SkScalar fXRadius;
+ SkScalar fYRadius;
+ SkScalar fInnerXRadius;
+ SkScalar fInnerYRadius;
+ SkRect fDevBounds;
+ };
+
+ bool fStroked;
+ SkMatrix fViewMatrixIfUsingLocalCoords;
+ SkSTArray<1, Geometry, true> fGeoData;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////////////////////////////////
+
+class DIEllipseBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ static GrDrawBatch* Create(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& ellipse,
+ const SkStrokeRec& stroke) {
+ SkPoint center = SkPoint::Make(ellipse.centerX(), ellipse.centerY());
+ SkScalar xRadius = SkScalarHalf(ellipse.width());
+ SkScalar yRadius = SkScalarHalf(ellipse.height());
+
+ SkStrokeRec::Style style = stroke.getStyle();
+ DIEllipseStyle dieStyle = (SkStrokeRec::kStroke_Style == style) ?
+ DIEllipseStyle::kStroke :
+ (SkStrokeRec::kHairline_Style == style) ?
+ DIEllipseStyle::kHairline : DIEllipseStyle::kFill;
+
+ SkScalar innerXRadius = 0;
+ SkScalar innerYRadius = 0;
+ if (SkStrokeRec::kFill_Style != style && SkStrokeRec::kHairline_Style != style) {
+ SkScalar strokeWidth = stroke.getWidth();
+
+ if (SkScalarNearlyZero(strokeWidth)) {
+ strokeWidth = SK_ScalarHalf;
+ } else {
+ strokeWidth *= SK_ScalarHalf;
+ }
+
+ // we only handle thick strokes for near-circular ellipses
+ if (strokeWidth > SK_ScalarHalf &&
+ (SK_ScalarHalf*xRadius > yRadius || SK_ScalarHalf*yRadius > xRadius)) {
+ return nullptr;
+ }
+
+ // we don't handle it if curvature of the stroke is less than curvature of the ellipse
+ if (strokeWidth*(yRadius*yRadius) < (strokeWidth*strokeWidth)*xRadius ||
+ strokeWidth*(xRadius*xRadius) < (strokeWidth*strokeWidth)*yRadius) {
+ return nullptr;
+ }
+
+ // set inner radius (if needed)
+ if (SkStrokeRec::kStroke_Style == style) {
+ innerXRadius = xRadius - strokeWidth;
+ innerYRadius = yRadius - strokeWidth;
+ }
+
+ xRadius += strokeWidth;
+ yRadius += strokeWidth;
+ }
+ if (DIEllipseStyle::kStroke == dieStyle) {
+ dieStyle = (innerXRadius > 0 && innerYRadius > 0) ? DIEllipseStyle ::kStroke :
+ DIEllipseStyle ::kFill;
+ }
+
+ // This expands the outer rect so that after CTM we end up with a half-pixel border
+ SkScalar a = viewMatrix[SkMatrix::kMScaleX];
+ SkScalar b = viewMatrix[SkMatrix::kMSkewX];
+ SkScalar c = viewMatrix[SkMatrix::kMSkewY];
+ SkScalar d = viewMatrix[SkMatrix::kMScaleY];
+ SkScalar geoDx = SK_ScalarHalf / SkScalarSqrt(a*a + c*c);
+ SkScalar geoDy = SK_ScalarHalf / SkScalarSqrt(b*b + d*d);
+
+ DIEllipseBatch* batch = new DIEllipseBatch();
+ batch->fGeoData.emplace_back(Geometry {
+ viewMatrix,
+ color,
+ xRadius,
+ yRadius,
+ innerXRadius,
+ innerYRadius,
+ geoDx,
+ geoDy,
+ dieStyle,
+ SkRect::MakeLTRB(center.fX - xRadius - geoDx, center.fY - yRadius - geoDy,
+ center.fX + xRadius + geoDx, center.fY + yRadius + geoDy)
+ });
+ batch->setTransformedBounds(batch->fGeoData[0].fBounds, viewMatrix, HasAABloat::kYes,
+ IsZeroArea::kNo);
+ return batch;
+ }
+
+ const char* name() const override { return "DIEllipseBatch"; }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ color->setKnownFourComponents(fGeoData[0].fColor);
+ coverage->setUnknownSingleComponent();
+ }
+
+private:
+
+ DIEllipseBatch() : INHERITED(ClassID()) {}
+
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ // Handle any overrides that affect our GP.
+ overrides.getOverrideColorIfSet(&fGeoData[0].fColor);
+ fUsesLocalCoords = overrides.readsLocalCoords();
+ }
+
+ void onPrepareDraws(Target* target) const override {
+ // Setup geometry processor
+ SkAutoTUnref<GrGeometryProcessor> gp(new DIEllipseGeometryProcessor(this->viewMatrix(),
+ this->style()));
+
+ int instanceCount = fGeoData.count();
+ size_t vertexStride = gp->getVertexStride();
+ SkASSERT(vertexStride == sizeof(DIEllipseVertex));
+ QuadHelper helper;
+ DIEllipseVertex* verts = reinterpret_cast<DIEllipseVertex*>(
+ helper.init(target, vertexStride, instanceCount));
+ if (!verts) {
+ return;
+ }
+
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& geom = fGeoData[i];
+
+ GrColor color = geom.fColor;
+ SkScalar xRadius = geom.fXRadius;
+ SkScalar yRadius = geom.fYRadius;
+
+ const SkRect& bounds = geom.fBounds;
+
+ // This adjusts the "radius" to include the half-pixel border
+ SkScalar offsetDx = geom.fGeoDx / xRadius;
+ SkScalar offsetDy = geom.fGeoDy / yRadius;
+
+ SkScalar innerRatioX = xRadius / geom.fInnerXRadius;
+ SkScalar innerRatioY = yRadius / geom.fInnerYRadius;
+
+ verts[0].fPos = SkPoint::Make(bounds.fLeft, bounds.fTop);
+ verts[0].fColor = color;
+ verts[0].fOuterOffset = SkPoint::Make(-1.0f - offsetDx, -1.0f - offsetDy);
+ verts[0].fInnerOffset = SkPoint::Make(-innerRatioX - offsetDx, -innerRatioY - offsetDy);
+
+ verts[1].fPos = SkPoint::Make(bounds.fLeft, bounds.fBottom);
+ verts[1].fColor = color;
+ verts[1].fOuterOffset = SkPoint::Make(-1.0f - offsetDx, 1.0f + offsetDy);
+ verts[1].fInnerOffset = SkPoint::Make(-innerRatioX - offsetDx, innerRatioY + offsetDy);
+
+ verts[2].fPos = SkPoint::Make(bounds.fRight, bounds.fBottom);
+ verts[2].fColor = color;
+ verts[2].fOuterOffset = SkPoint::Make(1.0f + offsetDx, 1.0f + offsetDy);
+ verts[2].fInnerOffset = SkPoint::Make(innerRatioX + offsetDx, innerRatioY + offsetDy);
+
+ verts[3].fPos = SkPoint::Make(bounds.fRight, bounds.fTop);
+ verts[3].fColor = color;
+ verts[3].fOuterOffset = SkPoint::Make(1.0f + offsetDx, -1.0f - offsetDy);
+ verts[3].fInnerOffset = SkPoint::Make(innerRatioX + offsetDx, -innerRatioY - offsetDy);
+
+ verts += kVerticesPerQuad;
+ }
+ helper.recordDraw(target, gp);
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ DIEllipseBatch* that = t->cast<DIEllipseBatch>();
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ if (this->style() != that->style()) {
+ return false;
+ }
+
+ // TODO rewrite to allow positioning on CPU
+ if (!this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
+ }
+
+ fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
+ this->joinBounds(*that);
+ return true;
+ }
+
+ const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; }
+ DIEllipseStyle style() const { return fGeoData[0].fStyle; }
+
+ struct Geometry {
+ SkMatrix fViewMatrix;
+ GrColor fColor;
+ SkScalar fXRadius;
+ SkScalar fYRadius;
+ SkScalar fInnerXRadius;
+ SkScalar fInnerYRadius;
+ SkScalar fGeoDx;
+ SkScalar fGeoDy;
+ DIEllipseStyle fStyle;
+ SkRect fBounds;
+ };
+
+ bool fUsesLocalCoords;
+ SkSTArray<1, Geometry, true> fGeoData;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+// We have three possible cases for geometry for a roundrect.
+//
+// In the case of a normal fill or a stroke, we draw the roundrect as a 9-patch:
+// ____________
+// |_|________|_|
+// | | | |
+// | | | |
+// | | | |
+// |_|________|_|
+// |_|________|_|
+//
+// For strokes, we don't draw the center quad.
+//
+// For circular roundrects, in the case where the stroke width is greater than twice
+// the corner radius (overstroke), we add additional geometry to mark out the rectangle
+// in the center. The shared vertices are duplicated so we can set a different outer radius
+// for the fill calculation.
+// ____________
+// |_|________|_|
+// | |\ ____ /| |
+// | | | | | |
+// | | |____| | |
+// |_|/______\|_|
+// |_|________|_|
+//
+// We don't draw the center quad from the fill rect in this case.
+
+static const uint16_t gOverstrokeRRectIndices[] = {
+ // overstroke quads
+ // we place this at the beginning so that we can skip these indices when rendering normally
+ 16, 17, 19, 16, 19, 18,
+ 19, 17, 23, 19, 23, 21,
+ 21, 23, 22, 21, 22, 20,
+ 22, 16, 18, 22, 18, 20,
+
+ // corners
+ 0, 1, 5, 0, 5, 4,
+ 2, 3, 7, 2, 7, 6,
+ 8, 9, 13, 8, 13, 12,
+ 10, 11, 15, 10, 15, 14,
+
+ // edges
+ 1, 2, 6, 1, 6, 5,
+ 4, 5, 9, 4, 9, 8,
+ 6, 7, 11, 6, 11, 10,
+ 9, 10, 14, 9, 14, 13,
+
+ // center
+ // we place this at the end so that we can ignore these indices when not rendering as filled
+ 5, 6, 10, 5, 10, 9,
+};
+// fill and standard stroke indices skip the overstroke "ring"
+static const uint16_t* gStandardRRectIndices = gOverstrokeRRectIndices + 6*4;
+
+// overstroke count is arraysize minus the center indices
+static const int kIndicesPerOverstrokeRRect = SK_ARRAY_COUNT(gOverstrokeRRectIndices) - 6;
+// fill count skips overstroke indices and includes center
+static const int kIndicesPerFillRRect = kIndicesPerOverstrokeRRect - 6*4 + 6;
+// stroke count is fill count minus center indices
+static const int kIndicesPerStrokeRRect = kIndicesPerFillRRect - 6;
+static const int kVertsPerStandardRRect = 16;
+static const int kVertsPerOverstrokeRRect = 24;
+
+enum RRectType {
+ kFill_RRectType,
+ kStroke_RRectType,
+ kOverstroke_RRectType,
+};
+
+static int rrect_type_to_vert_count(RRectType type) {
+ static const int kTypeToVertCount[] = {
+ kVertsPerStandardRRect,
+ kVertsPerStandardRRect,
+ kVertsPerOverstrokeRRect,
+ };
+
+ return kTypeToVertCount[type];
+}
+
+static int rrect_type_to_index_count(RRectType type) {
+ static const int kTypeToIndexCount[] = {
+ kIndicesPerFillRRect,
+ kIndicesPerStrokeRRect,
+ kIndicesPerOverstrokeRRect,
+ };
+
+ return kTypeToIndexCount[type];
+}
+
+static const uint16_t* rrect_type_to_indices(RRectType type) {
+ static const uint16_t* kTypeToIndices[] = {
+ gStandardRRectIndices,
+ gStandardRRectIndices,
+ gOverstrokeRRectIndices,
+ };
+
+ return kTypeToIndices[type];
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class RRectCircleRendererBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ // A devStrokeWidth <= 0 indicates a fill only. If devStrokeWidth > 0 then strokeOnly indicates
+ // whether the rrect is only stroked or stroked and filled.
+ RRectCircleRendererBatch(GrColor color, const SkMatrix& viewMatrix, const SkRect& devRect,
+ float devRadius, float devStrokeWidth, bool strokeOnly)
+ : INHERITED(ClassID())
+ , fViewMatrixIfUsingLocalCoords(viewMatrix) {
+ SkRect bounds = devRect;
+ SkASSERT(!(devStrokeWidth <= 0 && strokeOnly));
+ SkScalar innerRadius = 0.0f;
+ SkScalar outerRadius = devRadius;
+ SkScalar halfWidth = 0;
+ RRectType type = kFill_RRectType;
+ if (devStrokeWidth > 0) {
+ if (SkScalarNearlyZero(devStrokeWidth)) {
+ halfWidth = SK_ScalarHalf;
+ } else {
+ halfWidth = SkScalarHalf(devStrokeWidth);
+ }
+
+ if (strokeOnly) {
+ // Outset stroke by 1/4 pixel
+ devStrokeWidth += 0.25f;
+ // If stroke is greater than width or height, this is still a fill
+ // Otherwise we compute stroke params
+ if (devStrokeWidth <= devRect.width() &&
+ devStrokeWidth <= devRect.height()) {
+ innerRadius = devRadius - halfWidth;
+ type = (innerRadius >= 0) ? kStroke_RRectType : kOverstroke_RRectType;
+ }
+ }
+ outerRadius += halfWidth;
+ bounds.outset(halfWidth, halfWidth);
+ }
+
+ // The radii are outset for two reasons. First, it allows the shader to simply perform
+ // simpler computation because the computed alpha is zero, rather than 50%, at the radius.
+ // Second, the outer radius is used to compute the verts of the bounding box that is
+ // rendered and the outset ensures the box will cover all partially covered by the rrect
+ // corners.
+ outerRadius += SK_ScalarHalf;
+ innerRadius -= SK_ScalarHalf;
+
+ this->setBounds(bounds, HasAABloat::kYes, IsZeroArea::kNo);
+
+ // Expand the rect for aa to generate correct vertices.
+ bounds.outset(SK_ScalarHalf, SK_ScalarHalf);
+
+ fGeoData.emplace_back(Geometry{ color, innerRadius, outerRadius, bounds, type });
+ fVertCount = rrect_type_to_vert_count(type);
+ fIndexCount = rrect_type_to_index_count(type);
+ fAllFill = (kFill_RRectType == type);
+ }
+
+ const char* name() const override { return "RRectCircleBatch"; }
+
+ SkString dumpInfo() const override {
+ SkString string;
+ for (int i = 0; i < fGeoData.count(); ++i) {
+ string.appendf("Color: 0x%08x Rect [L: %.2f, T: %.2f, R: %.2f, B: %.2f],"
+ "InnerRad: %.2f, OuterRad: %.2f\n",
+ fGeoData[i].fColor,
+ fGeoData[i].fDevBounds.fLeft, fGeoData[i].fDevBounds.fTop,
+ fGeoData[i].fDevBounds.fRight, fGeoData[i].fDevBounds.fBottom,
+ fGeoData[i].fInnerRadius,
+ fGeoData[i].fOuterRadius);
+ }
+ string.append(INHERITED::dumpInfo());
+ return string;
+ }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ color->setKnownFourComponents(fGeoData[0].fColor);
+ coverage->setUnknownSingleComponent();
+ }
+
+private:
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ // Handle any overrides that affect our GP.
+ overrides.getOverrideColorIfSet(&fGeoData[0].fColor);
+ if (!overrides.readsLocalCoords()) {
+ fViewMatrixIfUsingLocalCoords.reset();
+ }
+ }
+
+ void onPrepareDraws(Target* target) const override {
+ // Invert the view matrix as a local matrix (if any other processors require coords).
+ SkMatrix localMatrix;
+ if (!fViewMatrixIfUsingLocalCoords.invert(&localMatrix)) {
+ return;
+ }
+
+ // Setup geometry processor
+ SkAutoTUnref<GrGeometryProcessor> gp(new CircleGeometryProcessor(!fAllFill,
+ false, false,
+ false, localMatrix));
+ struct CircleVertex {
+ SkPoint fPos;
+ GrColor fColor;
+ SkPoint fOffset;
+ SkScalar fOuterRadius;
+ SkScalar fInnerRadius;
+ // No half plane, we don't use it here.
+ };
+
+ int instanceCount = fGeoData.count();
+ size_t vertexStride = gp->getVertexStride();
+ SkASSERT(sizeof(CircleVertex) == vertexStride);
+
+ const GrBuffer* vertexBuffer;
+ int firstVertex;
+
+ CircleVertex* verts = (CircleVertex*) target->makeVertexSpace(vertexStride, fVertCount,
+ &vertexBuffer, &firstVertex);
+ if (!verts) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ const GrBuffer* indexBuffer = nullptr;
+ int firstIndex = 0;
+ uint16_t* indices = target->makeIndexSpace(fIndexCount, &indexBuffer, &firstIndex);
+ if (!indices) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+
+ int currStartVertex = 0;
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& args = fGeoData[i];
+
+ GrColor color = args.fColor;
+ SkScalar outerRadius = args.fOuterRadius;
+
+ const SkRect& bounds = args.fDevBounds;
+
+ SkScalar yCoords[4] = {
+ bounds.fTop,
+ bounds.fTop + outerRadius,
+ bounds.fBottom - outerRadius,
+ bounds.fBottom
+ };
+
+ SkScalar yOuterRadii[4] = {-1, 0, 0, 1 };
+ // The inner radius in the vertex data must be specified in normalized space.
+ // For fills, specifying -1/outerRadius guarantees an alpha of 1.0 at the inner radius.
+ SkScalar innerRadius = args.fType != kFill_RRectType
+ ? args.fInnerRadius / args.fOuterRadius
+ : -1.0f / args.fOuterRadius;
+ for (int i = 0; i < 4; ++i) {
+ verts->fPos = SkPoint::Make(bounds.fLeft, yCoords[i]);
+ verts->fColor = color;
+ verts->fOffset = SkPoint::Make(-1, yOuterRadii[i]);
+ verts->fOuterRadius = outerRadius;
+ verts->fInnerRadius = innerRadius;
+ verts++;
+
+ verts->fPos = SkPoint::Make(bounds.fLeft + outerRadius, yCoords[i]);
+ verts->fColor = color;
+ verts->fOffset = SkPoint::Make(0, yOuterRadii[i]);
+ verts->fOuterRadius = outerRadius;
+ verts->fInnerRadius = innerRadius;
+ verts++;
+
+ verts->fPos = SkPoint::Make(bounds.fRight - outerRadius, yCoords[i]);
+ verts->fColor = color;
+ verts->fOffset = SkPoint::Make(0, yOuterRadii[i]);
+ verts->fOuterRadius = outerRadius;
+ verts->fInnerRadius = innerRadius;
+ verts++;
+
+ verts->fPos = SkPoint::Make(bounds.fRight, yCoords[i]);
+ verts->fColor = color;
+ verts->fOffset = SkPoint::Make(1, yOuterRadii[i]);
+ verts->fOuterRadius = outerRadius;
+ verts->fInnerRadius = innerRadius;
+ verts++;
+ }
+ // Add the additional vertices for overstroked rrects.
+ // Effectively this is an additional stroked rrect, with its
+ // outer radius = outerRadius - innerRadius, and inner radius = 0.
+ // This will give us correct AA in the center and the correct
+ // distance to the outer edge.
+ //
+ // Also, the outer offset is a constant vector pointing to the right, which
+ // guarantees that the distance value along the outer rectangle is constant.
+ if (kOverstroke_RRectType == args.fType) {
+ SkScalar overstrokeOuterRadius = outerRadius - args.fInnerRadius;
+ // this is the normalized distance from the outer rectangle of this
+ // geometry to the outer edge
+ SkScalar maxOffset = -args.fInnerRadius / overstrokeOuterRadius;
+
+ verts->fPos = SkPoint::Make(bounds.fLeft + outerRadius, yCoords[1]);
+ verts->fColor = color;
+ verts->fOffset = SkPoint::Make(maxOffset, 0);
+ verts->fOuterRadius = overstrokeOuterRadius;
+ verts->fInnerRadius = 0;
+ verts++;
+
+ verts->fPos = SkPoint::Make(bounds.fRight - outerRadius, yCoords[1]);
+ verts->fColor = color;
+ verts->fOffset = SkPoint::Make(maxOffset, 0);
+ verts->fOuterRadius = overstrokeOuterRadius;
+ verts->fInnerRadius = 0;
+ verts++;
+
+ verts->fPos = SkPoint::Make(bounds.fLeft + overstrokeOuterRadius,
+ bounds.fTop + overstrokeOuterRadius);
+ verts->fColor = color;
+ verts->fOffset = SkPoint::Make(0, 0);
+ verts->fOuterRadius = overstrokeOuterRadius;
+ verts->fInnerRadius = 0;
+ verts++;
+
+ verts->fPos = SkPoint::Make(bounds.fRight - overstrokeOuterRadius,
+ bounds.fTop + overstrokeOuterRadius);
+ verts->fColor = color;
+ verts->fOffset = SkPoint::Make(0, 0);
+ verts->fOuterRadius = overstrokeOuterRadius;
+ verts->fInnerRadius = 0;
+ verts++;
+
+ verts->fPos = SkPoint::Make(bounds.fLeft + overstrokeOuterRadius,
+ bounds.fBottom - overstrokeOuterRadius);
+ verts->fColor = color;
+ verts->fOffset = SkPoint::Make(0, 0);
+ verts->fOuterRadius = overstrokeOuterRadius;
+ verts->fInnerRadius = 0;
+ verts++;
+
+ verts->fPos = SkPoint::Make(bounds.fRight - overstrokeOuterRadius,
+ bounds.fBottom - overstrokeOuterRadius);
+ verts->fColor = color;
+ verts->fOffset = SkPoint::Make(0, 0);
+ verts->fOuterRadius = overstrokeOuterRadius;
+ verts->fInnerRadius = 0;
+ verts++;
+
+ verts->fPos = SkPoint::Make(bounds.fLeft + outerRadius, yCoords[2]);
+ verts->fColor = color;
+ verts->fOffset = SkPoint::Make(maxOffset, 0);
+ verts->fOuterRadius = overstrokeOuterRadius;
+ verts->fInnerRadius = 0;
+ verts++;
+
+ verts->fPos = SkPoint::Make(bounds.fRight - outerRadius, yCoords[2]);
+ verts->fColor = color;
+ verts->fOffset = SkPoint::Make(maxOffset, 0);
+ verts->fOuterRadius = overstrokeOuterRadius;
+ verts->fInnerRadius = 0;
+ verts++;
+ }
+
+ const uint16_t* primIndices = rrect_type_to_indices(args.fType);
+ const int primIndexCount = rrect_type_to_index_count(args.fType);
+ for (int i = 0; i < primIndexCount; ++i) {
+ *indices++ = primIndices[i] + currStartVertex;
+ }
+
+ currStartVertex += rrect_type_to_vert_count(args.fType);
+ }
+
+ GrMesh mesh;
+ mesh.initIndexed(kTriangles_GrPrimitiveType, vertexBuffer, indexBuffer, firstVertex,
+ firstIndex, fVertCount, fIndexCount);
+ target->draw(gp.get(), mesh);
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ RRectCircleRendererBatch* that = t->cast<RRectCircleRendererBatch>();
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ if (!fViewMatrixIfUsingLocalCoords.cheapEqualTo(that->fViewMatrixIfUsingLocalCoords)) {
+ return false;
+ }
+
+ fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
+ this->joinBounds(*that);
+ fVertCount += that->fVertCount;
+ fIndexCount += that->fIndexCount;
+ fAllFill = fAllFill && that->fAllFill;
+ return true;
+ }
+
+ struct Geometry {
+ GrColor fColor;
+ SkScalar fInnerRadius;
+ SkScalar fOuterRadius;
+ SkRect fDevBounds;
+ RRectType fType;
+ };
+
+ SkSTArray<1, Geometry, true> fGeoData;
+ SkMatrix fViewMatrixIfUsingLocalCoords;
+ int fVertCount;
+ int fIndexCount;
+ bool fAllFill;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+static const int kNumRRectsInIndexBuffer = 256;
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gStrokeRRectOnlyIndexBufferKey);
+GR_DECLARE_STATIC_UNIQUE_KEY(gRRectOnlyIndexBufferKey);
+static const GrBuffer* ref_rrect_index_buffer(RRectType type,
+ GrResourceProvider* resourceProvider) {
+ GR_DEFINE_STATIC_UNIQUE_KEY(gStrokeRRectOnlyIndexBufferKey);
+ GR_DEFINE_STATIC_UNIQUE_KEY(gRRectOnlyIndexBufferKey);
+ switch (type) {
+ case kFill_RRectType:
+ return resourceProvider->findOrCreateInstancedIndexBuffer(
+ gStandardRRectIndices, kIndicesPerFillRRect, kNumRRectsInIndexBuffer,
+ kVertsPerStandardRRect, gRRectOnlyIndexBufferKey);
+ case kStroke_RRectType:
+ return resourceProvider->findOrCreateInstancedIndexBuffer(
+ gStandardRRectIndices, kIndicesPerStrokeRRect, kNumRRectsInIndexBuffer,
+ kVertsPerStandardRRect, gStrokeRRectOnlyIndexBufferKey);
+ default:
+ SkASSERT(false);
+ return nullptr;
+ };
+}
+
+class RRectEllipseRendererBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ // If devStrokeWidths values are <= 0 indicates then fill only. Otherwise, strokeOnly indicates
+ // whether the rrect is only stroked or stroked and filled.
+ static GrDrawBatch* Create(GrColor color, const SkMatrix& viewMatrix, const SkRect& devRect,
+ float devXRadius, float devYRadius, SkVector devStrokeWidths,
+ bool strokeOnly) {
+ SkASSERT(devXRadius > 0.5);
+ SkASSERT(devYRadius > 0.5);
+ SkASSERT((devStrokeWidths.fX > 0) == (devStrokeWidths.fY > 0));
+ SkASSERT(!(strokeOnly && devStrokeWidths.fX <= 0));
+ SkScalar innerXRadius = 0.0f;
+ SkScalar innerYRadius = 0.0f;
+ SkRect bounds = devRect;
+ bool stroked = false;
+ if (devStrokeWidths.fX > 0) {
+ if (SkScalarNearlyZero(devStrokeWidths.length())) {
+ devStrokeWidths.set(SK_ScalarHalf, SK_ScalarHalf);
+ } else {
+ devStrokeWidths.scale(SK_ScalarHalf);
+ }
+
+ // we only handle thick strokes for near-circular ellipses
+ if (devStrokeWidths.length() > SK_ScalarHalf &&
+ (SK_ScalarHalf*devXRadius > devYRadius || SK_ScalarHalf*devYRadius > devXRadius)) {
+ return nullptr;
+ }
+
+ // we don't handle it if curvature of the stroke is less than curvature of the ellipse
+ if (devStrokeWidths.fX*(devYRadius*devYRadius) <
+ (devStrokeWidths.fY*devStrokeWidths.fY)*devXRadius) {
+ return nullptr;
+ }
+ if (devStrokeWidths.fY*(devXRadius*devXRadius) <
+ (devStrokeWidths.fX*devStrokeWidths.fX)*devYRadius) {
+ return nullptr;
+ }
+
+ // this is legit only if scale & translation (which should be the case at the moment)
+ if (strokeOnly) {
+ innerXRadius = devXRadius - devStrokeWidths.fX;
+ innerYRadius = devYRadius - devStrokeWidths.fY;
+ stroked = (innerXRadius >= 0 && innerYRadius >= 0);
+ }
+
+ devXRadius += devStrokeWidths.fX;
+ devYRadius += devStrokeWidths.fY;
+ bounds.outset(devStrokeWidths.fX, devStrokeWidths.fY);
+ }
+
+ RRectEllipseRendererBatch* batch = new RRectEllipseRendererBatch();
+ batch->fStroked = stroked;
+ batch->fViewMatrixIfUsingLocalCoords = viewMatrix;
+ batch->setBounds(bounds, HasAABloat::kYes, IsZeroArea::kNo);
+ // Expand the rect for aa in order to generate the correct vertices.
+ bounds.outset(SK_ScalarHalf, SK_ScalarHalf);
+ batch->fGeoData.emplace_back(
+ Geometry {color, devXRadius, devYRadius, innerXRadius, innerYRadius, bounds});
+ return batch;
+ }
+
+ const char* name() const override { return "RRectEllipseRendererBatch"; }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ color->setKnownFourComponents(fGeoData[0].fColor);
+ coverage->setUnknownSingleComponent();
+ }
+
+private:
+ RRectEllipseRendererBatch() : INHERITED(ClassID()) {}
+
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ // Handle overrides that affect our GP.
+ overrides.getOverrideColorIfSet(&fGeoData[0].fColor);
+ if (!overrides.readsLocalCoords()) {
+ fViewMatrixIfUsingLocalCoords.reset();
+ }
+ }
+
+ void onPrepareDraws(Target* target) const override {
+ SkMatrix localMatrix;
+ if (!fViewMatrixIfUsingLocalCoords.invert(&localMatrix)) {
+ return;
+ }
+
+ // Setup geometry processor
+ SkAutoTUnref<GrGeometryProcessor> gp(new EllipseGeometryProcessor(fStroked, localMatrix));
+
+ int instanceCount = fGeoData.count();
+ size_t vertexStride = gp->getVertexStride();
+ SkASSERT(vertexStride == sizeof(EllipseVertex));
+
+ // drop out the middle quad if we're stroked
+ int indicesPerInstance = fStroked ? kIndicesPerStrokeRRect : kIndicesPerFillRRect;
+ SkAutoTUnref<const GrBuffer> indexBuffer(
+ ref_rrect_index_buffer(fStroked ? kStroke_RRectType : kFill_RRectType,
+ target->resourceProvider()));
+
+ InstancedHelper helper;
+ EllipseVertex* verts = reinterpret_cast<EllipseVertex*>(
+ helper.init(target, kTriangles_GrPrimitiveType, vertexStride, indexBuffer,
+ kVertsPerStandardRRect, indicesPerInstance, instanceCount));
+ if (!verts || !indexBuffer) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& args = fGeoData[i];
+
+ GrColor color = args.fColor;
+
+ // Compute the reciprocals of the radii here to save time in the shader
+ SkScalar xRadRecip = SkScalarInvert(args.fXRadius);
+ SkScalar yRadRecip = SkScalarInvert(args.fYRadius);
+ SkScalar xInnerRadRecip = SkScalarInvert(args.fInnerXRadius);
+ SkScalar yInnerRadRecip = SkScalarInvert(args.fInnerYRadius);
+
+ // Extend the radii out half a pixel to antialias.
+ SkScalar xOuterRadius = args.fXRadius + SK_ScalarHalf;
+ SkScalar yOuterRadius = args.fYRadius + SK_ScalarHalf;
+
+ const SkRect& bounds = args.fDevBounds;
+
+ SkScalar yCoords[4] = {
+ bounds.fTop,
+ bounds.fTop + yOuterRadius,
+ bounds.fBottom - yOuterRadius,
+ bounds.fBottom
+ };
+ SkScalar yOuterOffsets[4] = {
+ yOuterRadius,
+ SK_ScalarNearlyZero, // we're using inversesqrt() in shader, so can't be exactly 0
+ SK_ScalarNearlyZero,
+ yOuterRadius
+ };
+
+ for (int i = 0; i < 4; ++i) {
+ verts->fPos = SkPoint::Make(bounds.fLeft, yCoords[i]);
+ verts->fColor = color;
+ verts->fOffset = SkPoint::Make(xOuterRadius, yOuterOffsets[i]);
+ verts->fOuterRadii = SkPoint::Make(xRadRecip, yRadRecip);
+ verts->fInnerRadii = SkPoint::Make(xInnerRadRecip, yInnerRadRecip);
+ verts++;
+
+ verts->fPos = SkPoint::Make(bounds.fLeft + xOuterRadius, yCoords[i]);
+ verts->fColor = color;
+ verts->fOffset = SkPoint::Make(SK_ScalarNearlyZero, yOuterOffsets[i]);
+ verts->fOuterRadii = SkPoint::Make(xRadRecip, yRadRecip);
+ verts->fInnerRadii = SkPoint::Make(xInnerRadRecip, yInnerRadRecip);
+ verts++;
+
+ verts->fPos = SkPoint::Make(bounds.fRight - xOuterRadius, yCoords[i]);
+ verts->fColor = color;
+ verts->fOffset = SkPoint::Make(SK_ScalarNearlyZero, yOuterOffsets[i]);
+ verts->fOuterRadii = SkPoint::Make(xRadRecip, yRadRecip);
+ verts->fInnerRadii = SkPoint::Make(xInnerRadRecip, yInnerRadRecip);
+ verts++;
+
+ verts->fPos = SkPoint::Make(bounds.fRight, yCoords[i]);
+ verts->fColor = color;
+ verts->fOffset = SkPoint::Make(xOuterRadius, yOuterOffsets[i]);
+ verts->fOuterRadii = SkPoint::Make(xRadRecip, yRadRecip);
+ verts->fInnerRadii = SkPoint::Make(xInnerRadRecip, yInnerRadRecip);
+ verts++;
+ }
+ }
+ helper.recordDraw(target, gp);
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ RRectEllipseRendererBatch* that = t->cast<RRectEllipseRendererBatch>();
+
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ if (fStroked != that->fStroked) {
+ return false;
+ }
+
+ if (!fViewMatrixIfUsingLocalCoords.cheapEqualTo(that->fViewMatrixIfUsingLocalCoords)) {
+ return false;
+ }
+
+ fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
+ this->joinBounds(*that);
+ return true;
+ }
+
+ struct Geometry {
+ GrColor fColor;
+ SkScalar fXRadius;
+ SkScalar fYRadius;
+ SkScalar fInnerXRadius;
+ SkScalar fInnerYRadius;
+ SkRect fDevBounds;
+ };
+
+ bool fStroked;
+ SkMatrix fViewMatrixIfUsingLocalCoords;
+ SkSTArray<1, Geometry, true> fGeoData;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+static GrDrawBatch* create_rrect_batch(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRRect& rrect,
+ const SkStrokeRec& stroke) {
+ SkASSERT(viewMatrix.rectStaysRect());
+ SkASSERT(rrect.isSimple());
+ SkASSERT(!rrect.isOval());
+
+ // RRect batchs only handle simple, but not too simple, rrects
+ // do any matrix crunching before we reset the draw state for device coords
+ const SkRect& rrectBounds = rrect.getBounds();
+ SkRect bounds;
+ viewMatrix.mapRect(&bounds, rrectBounds);
+
+ SkVector radii = rrect.getSimpleRadii();
+ SkScalar xRadius = SkScalarAbs(viewMatrix[SkMatrix::kMScaleX]*radii.fX +
+ viewMatrix[SkMatrix::kMSkewY]*radii.fY);
+ SkScalar yRadius = SkScalarAbs(viewMatrix[SkMatrix::kMSkewX]*radii.fX +
+ viewMatrix[SkMatrix::kMScaleY]*radii.fY);
+
+ SkStrokeRec::Style style = stroke.getStyle();
+
+ // Do (potentially) anisotropic mapping of stroke. Use -1s to indicate fill-only draws.
+ SkVector scaledStroke = {-1, -1};
+ SkScalar strokeWidth = stroke.getWidth();
+
+ bool isStrokeOnly = SkStrokeRec::kStroke_Style == style ||
+ SkStrokeRec::kHairline_Style == style;
+ bool hasStroke = isStrokeOnly || SkStrokeRec::kStrokeAndFill_Style == style;
+
+ bool isCircular = (xRadius == yRadius);
+ if (hasStroke) {
+ if (SkStrokeRec::kHairline_Style == style) {
+ scaledStroke.set(1, 1);
+ } else {
+ scaledStroke.fX = SkScalarAbs(strokeWidth*(viewMatrix[SkMatrix::kMScaleX] +
+ viewMatrix[SkMatrix::kMSkewY]));
+ scaledStroke.fY = SkScalarAbs(strokeWidth*(viewMatrix[SkMatrix::kMSkewX] +
+ viewMatrix[SkMatrix::kMScaleY]));
+ }
+
+ isCircular = isCircular && scaledStroke.fX == scaledStroke.fY;
+ // for non-circular rrects, if half of strokewidth is greater than radius,
+ // we don't handle that right now
+ if (!isCircular &&
+ (SK_ScalarHalf*scaledStroke.fX > xRadius || SK_ScalarHalf*scaledStroke.fY > yRadius)) {
+ return nullptr;
+ }
+ }
+
+ // The way the effect interpolates the offset-to-ellipse/circle-center attribute only works on
+ // the interior of the rrect if the radii are >= 0.5. Otherwise, the inner rect of the nine-
+ // patch will have fractional coverage. This only matters when the interior is actually filled.
+ // We could consider falling back to rect rendering here, since a tiny radius is
+ // indistinguishable from a square corner.
+ if (!isStrokeOnly && (SK_ScalarHalf > xRadius || SK_ScalarHalf > yRadius)) {
+ return nullptr;
+ }
+
+ // if the corners are circles, use the circle renderer
+ if (isCircular) {
+ return new RRectCircleRendererBatch(color, viewMatrix, bounds, xRadius, scaledStroke.fX,
+ isStrokeOnly);
+ // otherwise we use the ellipse renderer
+ } else {
+ return RRectEllipseRendererBatch::Create(color, viewMatrix, bounds, xRadius, yRadius,
+ scaledStroke, isStrokeOnly);
+
+ }
+}
+
+GrDrawBatch* GrOvalRenderer::CreateRRectBatch(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRRect& rrect,
+ const SkStrokeRec& stroke,
+ const GrShaderCaps* shaderCaps) {
+ if (rrect.isOval()) {
+ return CreateOvalBatch(color, viewMatrix, rrect.getBounds(), stroke, shaderCaps);
+ }
+
+ if (!viewMatrix.rectStaysRect() || !rrect.isSimple()) {
+ return nullptr;
+ }
+
+ return create_rrect_batch(color, viewMatrix, rrect, stroke);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrDrawBatch* GrOvalRenderer::CreateOvalBatch(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& oval,
+ const SkStrokeRec& stroke,
+ const GrShaderCaps* shaderCaps) {
+ // we can draw circles
+ SkScalar width = oval.width();
+ if (SkScalarNearlyEqual(width, oval.height()) && circle_stays_circle(viewMatrix)) {
+ SkPoint center = {oval.centerX(), oval.centerY()};
+ return CircleBatch::Create(color, viewMatrix, center, width / 2.f,
+ GrStyle(stroke, nullptr));
+ }
+
+ // if we have shader derivative support, render as device-independent
+ if (shaderCaps->shaderDerivativeSupport()) {
+ return DIEllipseBatch::Create(color, viewMatrix, oval, stroke);
+ }
+
+ // otherwise axis-aligned ellipses only
+ if (viewMatrix.rectStaysRect()) {
+ return EllipseBatch::Create(color, viewMatrix, oval, stroke);
+ }
+
+ return nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrDrawBatch* GrOvalRenderer::CreateArcBatch(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& oval,
+ SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter,
+ const GrStyle& style,
+ const GrShaderCaps* shaderCaps) {
+ SkASSERT(!oval.isEmpty());
+ SkASSERT(sweepAngle);
+ SkScalar width = oval.width();
+ if (SkScalarAbs(sweepAngle) >= 360.f) {
+ return nullptr;
+ }
+ if (!SkScalarNearlyEqual(width, oval.height()) || !circle_stays_circle(viewMatrix)) {
+ return nullptr;
+ }
+ SkPoint center = {oval.centerX(), oval.centerY()};
+ CircleBatch::ArcParams arcParams = {
+ SkDegreesToRadians(startAngle),
+ SkDegreesToRadians(sweepAngle),
+ useCenter
+ };
+ return CircleBatch::Create(color, viewMatrix, center, width/2.f, style, &arcParams);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_TEST_UTILS
+
+DRAW_BATCH_TEST_DEFINE(CircleBatch) {
+ do {
+ SkScalar rotate = random->nextSScalar1() * 360.f;
+ SkScalar translateX = random->nextSScalar1() * 1000.f;
+ SkScalar translateY = random->nextSScalar1() * 1000.f;
+ SkScalar scale = random->nextSScalar1() * 100.f;
+ SkMatrix viewMatrix;
+ viewMatrix.setRotate(rotate);
+ viewMatrix.postTranslate(translateX, translateY);
+ viewMatrix.postScale(scale, scale);
+ GrColor color = GrRandomColor(random);
+ SkRect circle = GrTest::TestSquare(random);
+ SkPoint center = {circle.centerX(), circle.centerY()};
+ SkScalar radius = circle.width() / 2.f;
+ SkStrokeRec stroke = GrTest::TestStrokeRec(random);
+ CircleBatch::ArcParams arcParamsTmp;
+ const CircleBatch::ArcParams* arcParams = nullptr;
+ if (random->nextBool()) {
+ arcParamsTmp.fStartAngleRadians = random->nextSScalar1() * SK_ScalarPI * 2;
+ arcParamsTmp.fSweepAngleRadians = random->nextSScalar1() * SK_ScalarPI * 2 - .01f;
+ arcParamsTmp.fUseCenter = random->nextBool();
+ arcParams = &arcParamsTmp;
+ }
+ GrDrawBatch* batch = CircleBatch::Create(color, viewMatrix, center, radius,
+ GrStyle(stroke, nullptr), arcParams);
+ if (batch) {
+ return batch;
+ }
+ } while (true);
+}
+
+DRAW_BATCH_TEST_DEFINE(EllipseBatch) {
+ SkMatrix viewMatrix = GrTest::TestMatrixRectStaysRect(random);
+ GrColor color = GrRandomColor(random);
+ SkRect ellipse = GrTest::TestSquare(random);
+ return EllipseBatch::Create(color, viewMatrix, ellipse, GrTest::TestStrokeRec(random));
+}
+
+DRAW_BATCH_TEST_DEFINE(DIEllipseBatch) {
+ SkMatrix viewMatrix = GrTest::TestMatrix(random);
+ GrColor color = GrRandomColor(random);
+ SkRect ellipse = GrTest::TestSquare(random);
+ return DIEllipseBatch::Create(color, viewMatrix, ellipse, GrTest::TestStrokeRec(random));
+}
+
+DRAW_BATCH_TEST_DEFINE(RRectBatch) {
+ SkMatrix viewMatrix = GrTest::TestMatrixRectStaysRect(random);
+ GrColor color = GrRandomColor(random);
+ const SkRRect& rrect = GrTest::TestRRectSimple(random);
+ return create_rrect_batch(color, viewMatrix, rrect, GrTest::TestStrokeRec(random));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrOvalRenderer.h b/gfx/skia/skia/src/gpu/GrOvalRenderer.h
new file mode 100644
index 000000000..c4ea4968d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrOvalRenderer.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrOvalRenderer_DEFINED
+#define GrOvalRenderer_DEFINED
+
+#include "GrColor.h"
+
+class GrDrawBatch;
+class GrShaderCaps;
+class GrStyle;
+class SkMatrix;
+struct SkRect;
+class SkRRect;
+class SkStrokeRec;
+
+/*
+ * This class wraps helper functions that draw ovals and roundrects (filled & stroked)
+ */
+class GrOvalRenderer {
+public:
+ static GrDrawBatch* CreateOvalBatch(GrColor,
+ const SkMatrix& viewMatrix,
+ const SkRect& oval,
+ const SkStrokeRec& stroke,
+ const GrShaderCaps* shaderCaps);
+ static GrDrawBatch* CreateRRectBatch(GrColor,
+ const SkMatrix& viewMatrix,
+ const SkRRect& rrect,
+ const SkStrokeRec& stroke,
+ const GrShaderCaps* shaderCaps);
+
+ static GrDrawBatch* CreateArcBatch(GrColor,
+ const SkMatrix& viewMatrix,
+ const SkRect& oval,
+ SkScalar startAngle,
+ SkScalar sweepAngle,
+ bool useCenter,
+ const GrStyle&,
+ const GrShaderCaps* shaderCaps);
+};
+
+#endif // GrOvalRenderer_DEFINED
diff --git a/gfx/skia/skia/src/gpu/GrPLSGeometryProcessor.h b/gfx/skia/skia/src/gpu/GrPLSGeometryProcessor.h
new file mode 100644
index 000000000..0640af63f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPLSGeometryProcessor.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPLSGeometryProcessor_DEFINED
+#define GrPLSGeometryProcessor_DEFINED
+
+#include "GrGeometryProcessor.h"
+
+/**
+ * A minor extension to GrGeometryProcessor that adds bounds tracking for pixel local storage
+ * purposes.
+ */
+class GrPLSGeometryProcessor : public GrGeometryProcessor {
+public:
+ GrPixelLocalStorageState getPixelLocalStorageState() const override {
+ return GrPixelLocalStorageState::kDraw_GrPixelLocalStorageState;
+ }
+
+ const SkRect& getBounds() const {
+ return fBounds;
+ }
+
+ void setBounds(SkRect& bounds) {
+ fBounds = bounds;
+ }
+
+private:
+ SkRect fBounds;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPaint.cpp b/gfx/skia/skia/src/gpu/GrPaint.cpp
new file mode 100644
index 000000000..d33881c09
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPaint.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrPaint.h"
+
+#include "GrProcOptInfo.h"
+#include "effects/GrCoverageSetOpXP.h"
+#include "effects/GrPorterDuffXferProcessor.h"
+#include "effects/GrSimpleTextureEffect.h"
+
+GrPaint::GrPaint()
+ : fAntiAlias(false)
+ , fDisableOutputConversionToSRGB(false)
+ , fAllowSRGBInputs(false)
+ , fUsesDistanceVectorField(false)
+ , fColor(GrColor4f::FromGrColor(GrColor_WHITE)) {}
+
+void GrPaint::setCoverageSetOpXPFactory(SkRegion::Op regionOp, bool invertCoverage) {
+ fXPFactory = GrCoverageSetOpXPFactory::Make(regionOp, invertCoverage);
+}
+
+void GrPaint::addColorTextureProcessor(GrTexture* texture,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkMatrix& matrix) {
+ this->addColorFragmentProcessor(GrSimpleTextureEffect::Make(texture,
+ std::move(colorSpaceXform),
+ matrix));
+}
+
+void GrPaint::addCoverageTextureProcessor(GrTexture* texture, const SkMatrix& matrix) {
+ this->addCoverageFragmentProcessor(GrSimpleTextureEffect::Make(texture, nullptr, matrix));
+}
+
+void GrPaint::addColorTextureProcessor(GrTexture* texture,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkMatrix& matrix,
+ const GrTextureParams& params) {
+ this->addColorFragmentProcessor(GrSimpleTextureEffect::Make(texture,
+ std::move(colorSpaceXform),
+ matrix, params));
+}
+
+void GrPaint::addCoverageTextureProcessor(GrTexture* texture,
+ const SkMatrix& matrix,
+ const GrTextureParams& params) {
+ this->addCoverageFragmentProcessor(GrSimpleTextureEffect::Make(texture, nullptr, matrix,
+ params));
+}
+
+bool GrPaint::internalIsConstantBlendedColor(GrColor paintColor, GrColor* color) const {
+ GrProcOptInfo colorProcInfo;
+ colorProcInfo.calcWithInitialValues(
+ sk_sp_address_as_pointer_address(fColorFragmentProcessors.begin()),
+ this->numColorFragmentProcessors(), paintColor, kRGBA_GrColorComponentFlags, false);
+
+ GrXPFactory::InvariantBlendedColor blendedColor;
+ if (fXPFactory) {
+ fXPFactory->getInvariantBlendedColor(colorProcInfo, &blendedColor);
+ } else {
+ GrPorterDuffXPFactory::SrcOverInvariantBlendedColor(colorProcInfo.color(),
+ colorProcInfo.validFlags(),
+ colorProcInfo.isOpaque(),
+ &blendedColor);
+ }
+
+ if (kRGBA_GrColorComponentFlags == blendedColor.fKnownColorFlags) {
+ *color = blendedColor.fKnownColor;
+ return true;
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/gpu/GrPath.cpp b/gfx/skia/skia/src/gpu/GrPath.cpp
new file mode 100644
index 000000000..836cc5ed5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPath.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrPath.h"
+#include "GrShape.h"
+
+static inline void write_style_key(uint32_t* key, const GrStyle& style) {
+ // Pass 1 for the scale since the GPU will apply the style not GrStyle::applyToPath().
+ GrStyle::WriteKey(key, style, GrStyle::Apply::kPathEffectAndStrokeRec, SK_Scalar1);
+}
+
+
+void GrPath::ComputeKey(const GrShape& shape, GrUniqueKey* key, bool* outIsVolatile) {
+ int geoCnt = shape.unstyledKeySize();
+ int styleCnt = GrStyle::KeySize(shape.style(), GrStyle::Apply::kPathEffectAndStrokeRec);
+ // This should only fail for an arbitrary path effect, and we should not have gotten
+ // here with anything other than a dash path effect.
+ SkASSERT(styleCnt >= 0);
+ if (geoCnt < 0) {
+ *outIsVolatile = true;
+ return;
+ }
+ static const GrUniqueKey::Domain kGeneralPathDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey::Builder builder(key, kGeneralPathDomain, geoCnt + styleCnt);
+ shape.writeUnstyledKey(&builder[0]);
+ if (styleCnt) {
+ write_style_key(&builder[geoCnt], shape.style());
+ }
+ *outIsVolatile = false;
+}
+
+#ifdef SK_DEBUG
+bool GrPath::isEqualTo(const SkPath& path, const GrStyle& style) const {
+ // Since this is only called in debug we don't care about performance.
+ int cnt0 = GrStyle::KeySize(fStyle, GrStyle::Apply::kPathEffectAndStrokeRec);
+ int cnt1 = GrStyle::KeySize(style, GrStyle::Apply::kPathEffectAndStrokeRec);
+ if (cnt0 < 0 || cnt1 < 0 || cnt0 != cnt1) {
+ return false;
+ }
+ if (cnt0) {
+ SkAutoTArray<uint32_t> key0(cnt0);
+ SkAutoTArray<uint32_t> key1(cnt0);
+ write_style_key(key0.get(), fStyle);
+ write_style_key(key1.get(), style);
+ if (0 != memcmp(key0.get(), key1.get(), cnt0)) {
+ return false;
+ }
+ }
+ return fSkPath == path;
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPath.h b/gfx/skia/skia/src/gpu/GrPath.h
new file mode 100644
index 000000000..19538370d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPath.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPath_DEFINED
+#define GrPath_DEFINED
+
+#include "GrGpuResource.h"
+#include "GrPathRendering.h"
+#include "GrStyle.h"
+#include "SkPath.h"
+#include "SkRect.h"
+
+class GrShape;
+
+class GrPath : public GrGpuResource {
+public:
+ /**
+ * Initialize to a path with a fixed stroke. Stroke must not be hairline.
+ */
+ GrPath(GrGpu* gpu, const SkPath& skPath, const GrStyle& style)
+ : INHERITED(gpu)
+ , fBounds(SkRect::MakeEmpty())
+ , fFillType(GrPathRendering::kWinding_FillType)
+#ifdef SK_DEBUG
+ , fSkPath(skPath)
+ , fStyle(style)
+#endif
+ {
+ }
+
+ static void ComputeKey(const GrShape&, GrUniqueKey* key, bool* outIsVolatile);
+
+ const SkRect& getBounds() const { return fBounds; }
+
+ GrPathRendering::FillType getFillType() const { return fFillType; }
+#ifdef SK_DEBUG
+ bool isEqualTo(const SkPath& path, const GrStyle& style) const;
+#endif
+
+protected:
+ // Subclass should init these.
+ SkRect fBounds;
+ GrPathRendering::FillType fFillType;
+#ifdef SK_DEBUG
+ SkPath fSkPath;
+ GrStyle fStyle;
+#endif
+
+private:
+ typedef GrGpuResource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPathProcessor.cpp b/gfx/skia/skia/src/gpu/GrPathProcessor.cpp
new file mode 100644
index 000000000..c90481b8d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathProcessor.cpp
@@ -0,0 +1,141 @@
+/*
+* Copyright 2013 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrPathProcessor.h"
+
+#include "gl/GrGLGpu.h"
+#include "glsl/GrGLSLCaps.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "glsl/GrGLSLVarying.h"
+
+class GrGLPathProcessor : public GrGLSLPrimitiveProcessor {
+public:
+ GrGLPathProcessor() : fColor(GrColor_ILLEGAL) {}
+
+ static void GenKey(const GrPathProcessor& pathProc,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ b->add32(SkToInt(pathProc.overrides().readsColor()) |
+ (SkToInt(pathProc.overrides().readsCoverage()) << 1) |
+ (SkToInt(pathProc.viewMatrix().hasPerspective()) << 2));
+ }
+
+ void emitCode(EmitArgs& args) override {
+ GrGLSLPPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const GrPathProcessor& pathProc = args.fGP.cast<GrPathProcessor>();
+
+ if (!pathProc.viewMatrix().hasPerspective()) {
+ args.fVaryingHandler->setNoPerspective();
+ }
+
+ // emit transforms
+ this->emitTransforms(args.fVaryingHandler, args.fFPCoordTransformHandler);
+
+ // Setup uniform color
+ if (pathProc.overrides().readsColor()) {
+ const char* stagedLocalVarName;
+ fColorUniform = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType,
+ kDefault_GrSLPrecision,
+ "Color",
+ &stagedLocalVarName);
+ fragBuilder->codeAppendf("%s = %s;", args.fOutputColor, stagedLocalVarName);
+ }
+
+ // setup constant solid coverage
+ if (pathProc.overrides().readsCoverage()) {
+ fragBuilder->codeAppendf("%s = vec4(1);", args.fOutputCoverage);
+ }
+ }
+
+ void emitTransforms(GrGLSLVaryingHandler* varyingHandler,
+ FPCoordTransformHandler* transformHandler) {
+ int i = 0;
+ while (const GrCoordTransform* coordTransform = transformHandler->nextCoordTransform()) {
+ GrSLType varyingType =
+ coordTransform->getMatrix().hasPerspective() ? kVec3f_GrSLType
+ : kVec2f_GrSLType;
+
+ SkString strVaryingName;
+ strVaryingName.printf("TransformedCoord_%d", i);
+ GrGLSLVertToFrag v(varyingType);
+ GrGLVaryingHandler* glVaryingHandler = (GrGLVaryingHandler*) varyingHandler;
+ fInstalledTransforms.push_back().fHandle =
+ glVaryingHandler->addPathProcessingVarying(strVaryingName.c_str(),
+ &v).toIndex();
+ fInstalledTransforms.back().fType = varyingType;
+
+ transformHandler->specifyCoordsForCurrCoordTransform(SkString(v.fsIn()), varyingType);
+ ++i;
+ }
+ }
+
+ void setData(const GrGLSLProgramDataManager& pd,
+ const GrPrimitiveProcessor& primProc,
+ FPCoordTransformIter&& transformIter) override {
+ const GrPathProcessor& pathProc = primProc.cast<GrPathProcessor>();
+ if (pathProc.overrides().readsColor() && pathProc.color() != fColor) {
+ float c[4];
+ GrColorToRGBAFloat(pathProc.color(), c);
+ pd.set4fv(fColorUniform, 1, c);
+ fColor = pathProc.color();
+ }
+
+ int t = 0;
+ while (const GrCoordTransform* coordTransform = transformIter.next()) {
+ SkASSERT(fInstalledTransforms[t].fHandle.isValid());
+ const SkMatrix& m = GetTransformMatrix(pathProc.localMatrix(), *coordTransform);
+ if (fInstalledTransforms[t].fCurrentValue.cheapEqualTo(m)) {
+ continue;
+ }
+ fInstalledTransforms[t].fCurrentValue = m;
+
+ SkASSERT(fInstalledTransforms[t].fType == kVec2f_GrSLType ||
+ fInstalledTransforms[t].fType == kVec3f_GrSLType);
+ unsigned components = fInstalledTransforms[t].fType == kVec2f_GrSLType ? 2 : 3;
+ pd.setPathFragmentInputTransform(fInstalledTransforms[t].fHandle, components, m);
+ ++t;
+ }
+ }
+
+private:
+ typedef GrGLSLProgramDataManager::VaryingHandle VaryingHandle;
+ struct TransformVarying {
+ VaryingHandle fHandle;
+ SkMatrix fCurrentValue = SkMatrix::InvalidMatrix();
+ GrSLType fType = kVoid_GrSLType;
+ };
+
+ SkTArray<TransformVarying, true> fInstalledTransforms;
+
+ UniformHandle fColorUniform;
+ GrColor fColor;
+
+ typedef GrGLSLPrimitiveProcessor INHERITED;
+};
+
+GrPathProcessor::GrPathProcessor(GrColor color,
+ const GrXPOverridesForBatch& overrides,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& localMatrix)
+ : fColor(color)
+ , fViewMatrix(viewMatrix)
+ , fLocalMatrix(localMatrix)
+ , fOverrides(overrides) {
+ this->initClassID<GrPathProcessor>();
+}
+
+void GrPathProcessor::getGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLPathProcessor::GenKey(*this, caps, b);
+}
+
+GrGLSLPrimitiveProcessor* GrPathProcessor::createGLSLInstance(const GrGLSLCaps& caps) const {
+ SkASSERT(caps.pathRenderingSupport());
+ return new GrGLPathProcessor();
+}
diff --git a/gfx/skia/skia/src/gpu/GrPathProcessor.h b/gfx/skia/skia/src/gpu/GrPathProcessor.h
new file mode 100644
index 000000000..8c9e0d6be
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathProcessor.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPathProcessor_DEFINED
+#define GrPathProcessor_DEFINED
+
+#include "GrPrimitiveProcessor.h"
+
+/*
+ * The path equivalent of the GP. For now this just manages color. In the long term we plan on
+ * extending this class to handle all nvpr uniform / varying / program work.
+ */
+class GrPathProcessor : public GrPrimitiveProcessor {
+public:
+ static GrPathProcessor* Create(GrColor color,
+ const GrXPOverridesForBatch& overrides,
+ const SkMatrix& viewMatrix = SkMatrix::I(),
+ const SkMatrix& localMatrix = SkMatrix::I()) {
+ return new GrPathProcessor(color, overrides, viewMatrix, localMatrix);
+ }
+
+ const char* name() const override { return "PathProcessor"; }
+
+ GrColor color() const { return fColor; }
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+
+ bool willUseGeoShader() const override { return false; }
+
+ virtual void getGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const override;
+
+ virtual GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps& caps) const override;
+
+ const GrXPOverridesForBatch& overrides() const { return fOverrides; }
+
+ virtual bool isPathRendering() const override { return true; }
+
+private:
+ GrPathProcessor(GrColor color, const GrXPOverridesForBatch& overrides,
+ const SkMatrix& viewMatrix, const SkMatrix& localMatrix);
+
+ bool hasExplicitLocalCoords() const override { return false; }
+
+ GrColor fColor;
+ const SkMatrix fViewMatrix;
+ const SkMatrix fLocalMatrix;
+ GrXPOverridesForBatch fOverrides;
+
+ typedef GrPrimitiveProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPathRange.cpp b/gfx/skia/skia/src/gpu/GrPathRange.cpp
new file mode 100644
index 000000000..6cf9de281
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathRange.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrPathRange.h"
+#include "SkPath.h"
+
+GrPathRange::GrPathRange(GrGpu* gpu,
+ PathGenerator* pathGenerator)
+ : INHERITED(gpu),
+ fPathGenerator(SkRef(pathGenerator)),
+ fNumPaths(fPathGenerator->getNumPaths()) {
+ const int numGroups = (fNumPaths + kPathsPerGroup - 1) / kPathsPerGroup;
+ fGeneratedPaths.reset((numGroups + 7) / 8); // 1 bit per path group.
+ memset(&fGeneratedPaths.front(), 0, fGeneratedPaths.count());
+}
+
+GrPathRange::GrPathRange(GrGpu* gpu,
+ int numPaths)
+ : INHERITED(gpu),
+ fNumPaths(numPaths) {
+}
+
+void GrPathRange::loadPathsIfNeeded(const void* indices, PathIndexType indexType, int count) const {
+ switch (indexType) {
+ case kU8_PathIndexType:
+ return this->loadPathsIfNeeded(reinterpret_cast<const uint8_t*>(indices), count);
+ case kU16_PathIndexType:
+ return this->loadPathsIfNeeded(reinterpret_cast<const uint16_t*>(indices), count);
+ case kU32_PathIndexType:
+ return this->loadPathsIfNeeded(reinterpret_cast<const uint32_t*>(indices), count);
+ default:
+ SkFAIL("Unknown path index type");
+ }
+}
+
+#ifdef SK_DEBUG
+
+void GrPathRange::assertPathsLoaded(const void* indices, PathIndexType indexType, int count) const {
+ switch (indexType) {
+ case kU8_PathIndexType:
+ return this->assertPathsLoaded(reinterpret_cast<const uint8_t*>(indices), count);
+ case kU16_PathIndexType:
+ return this->assertPathsLoaded(reinterpret_cast<const uint16_t*>(indices), count);
+ case kU32_PathIndexType:
+ return this->assertPathsLoaded(reinterpret_cast<const uint32_t*>(indices), count);
+ default:
+ SkFAIL("Unknown path index type");
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPathRange.h b/gfx/skia/skia/src/gpu/GrPathRange.h
new file mode 100644
index 000000000..7bca17f26
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathRange.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPathRange_DEFINED
+#define GrPathRange_DEFINED
+
+#include "GrGpuResource.h"
+#include "SkPath.h"
+#include "SkRefCnt.h"
+#include "SkTArray.h"
+
+class SkDescriptor;
+
+/**
+ * Represents a contiguous range of GPU path objects.
+ * This object is immutable with the exception that individual paths may be
+ * initialized lazily.
+ */
+
+class GrPathRange : public GrGpuResource {
+public:
+
+
+ enum PathIndexType {
+ kU8_PathIndexType, //!< uint8_t
+ kU16_PathIndexType, //!< uint16_t
+ kU32_PathIndexType, //!< uint32_t
+
+ kLast_PathIndexType = kU32_PathIndexType
+ };
+
+ static inline int PathIndexSizeInBytes(PathIndexType type) {
+ GR_STATIC_ASSERT(0 == kU8_PathIndexType);
+ GR_STATIC_ASSERT(1 == kU16_PathIndexType);
+ GR_STATIC_ASSERT(2 == kU32_PathIndexType);
+ GR_STATIC_ASSERT(kU32_PathIndexType == kLast_PathIndexType);
+
+ return 1 << type;
+ }
+
+ /**
+ * Class that generates the paths for a specific range.
+ */
+ class PathGenerator : public SkRefCnt {
+ public:
+ virtual int getNumPaths() = 0;
+ virtual void generatePath(int index, SkPath* out) = 0;
+#ifdef SK_DEBUG
+ virtual bool isEqualTo(const SkDescriptor&) const { return false; }
+#endif
+ virtual ~PathGenerator() {}
+ };
+
+ /**
+ * Initialize a lazy-loaded path range. This class will generate an SkPath and call
+ * onInitPath() for each path within the range before it is drawn for the first time.
+ */
+ GrPathRange(GrGpu*, PathGenerator*);
+
+ /**
+ * Initialize an eager-loaded path range. The subclass is responsible for ensuring all
+ * the paths are initialized up front.
+ */
+ GrPathRange(GrGpu*, int numPaths);
+
+ int getNumPaths() const { return fNumPaths; }
+ const PathGenerator* getPathGenerator() const { return fPathGenerator.get(); }
+
+ void loadPathsIfNeeded(const void* indices, PathIndexType, int count) const;
+
+ template<typename IndexType> void loadPathsIfNeeded(const IndexType* indices, int count) const {
+ if (!fPathGenerator) {
+ return;
+ }
+
+ bool didLoadPaths = false;
+
+ for (int i = 0; i < count; ++i) {
+ SkASSERT(indices[i] < static_cast<uint32_t>(fNumPaths));
+
+ const int groupIndex = indices[i] / kPathsPerGroup;
+ const int groupByte = groupIndex / 8;
+ const uint8_t groupBit = 1 << (groupIndex % 8);
+
+ const bool hasPath = SkToBool(fGeneratedPaths[groupByte] & groupBit);
+ if (!hasPath) {
+ // We track which paths are loaded in groups of kPathsPerGroup. To
+ // mark a path as loaded we need to load the entire group.
+ const int groupFirstPath = groupIndex * kPathsPerGroup;
+ const int groupLastPath = SkTMin(groupFirstPath + kPathsPerGroup, fNumPaths) - 1;
+
+ SkPath path;
+ for (int pathIdx = groupFirstPath; pathIdx <= groupLastPath; ++pathIdx) {
+ fPathGenerator->generatePath(pathIdx, &path);
+ this->onInitPath(pathIdx, path);
+ }
+
+ fGeneratedPaths[groupByte] |= groupBit;
+ didLoadPaths = true;
+ }
+ }
+
+ if (didLoadPaths) {
+ this->didChangeGpuMemorySize();
+ }
+ }
+
+#ifdef SK_DEBUG
+ void assertPathsLoaded(const void* indices, PathIndexType, int count) const;
+
+ template<typename IndexType> void assertPathsLoaded(const IndexType* indices, int count) const {
+ if (!fPathGenerator) {
+ return;
+ }
+
+ for (int i = 0; i < count; ++i) {
+ SkASSERT(indices[i] < static_cast<uint32_t>(fNumPaths));
+
+ const int groupIndex = indices[i] / kPathsPerGroup;
+ const int groupByte = groupIndex / 8;
+ const uint8_t groupBit = 1 << (groupIndex % 8);
+
+ SkASSERT(fGeneratedPaths[groupByte] & groupBit);
+ }
+ }
+
+ virtual bool isEqualTo(const SkDescriptor& desc) const {
+ return nullptr != fPathGenerator.get() && fPathGenerator->isEqualTo(desc);
+ }
+#endif
+protected:
+ // Initialize a path in the range before drawing. This is only called when
+ // fPathGenerator is non-null. The child class need not call didChangeGpuMemorySize(),
+ // GrPathRange will take care of that after the call is complete.
+ virtual void onInitPath(int index, const SkPath&) const = 0;
+
+private:
+ enum {
+ kPathsPerGroup = 16 // Paths get tracked in groups of 16 for lazy loading.
+ };
+
+ mutable SkAutoTUnref<PathGenerator> fPathGenerator;
+ mutable SkTArray<uint8_t, true /*MEM_COPY*/> fGeneratedPaths;
+ const int fNumPaths;
+
+ typedef GrGpuResource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPathRenderer.cpp b/gfx/skia/skia/src/gpu/GrPathRenderer.cpp
new file mode 100644
index 000000000..1f53e8405
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathRenderer.cpp
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrPathRenderer.h"
+
+GrPathRenderer::GrPathRenderer() {
+}
+
+void GrPathRenderer::GetPathDevBounds(const SkPath& path,
+ int devW, int devH,
+ const SkMatrix& matrix,
+ SkRect* bounds) {
+ if (path.isInverseFillType()) {
+ *bounds = SkRect::MakeWH(SkIntToScalar(devW), SkIntToScalar(devH));
+ return;
+ }
+ *bounds = path.getBounds();
+ matrix.mapRect(bounds);
+}
diff --git a/gfx/skia/skia/src/gpu/GrPathRenderer.h b/gfx/skia/skia/src/gpu/GrPathRenderer.h
new file mode 100644
index 000000000..37cc3f986
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathRenderer.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPathRenderer_DEFINED
+#define GrPathRenderer_DEFINED
+
+#include "GrCaps.h"
+#include "GrDrawContext.h"
+#include "GrPaint.h"
+#include "GrResourceProvider.h"
+#include "GrShape.h"
+
+#include "SkDrawProcs.h"
+#include "SkTArray.h"
+
+class SkPath;
+class GrFixedClip;
+struct GrPoint;
+
+/**
+ * Base class for drawing paths into a GrDrawTarget.
+ *
+ * Derived classes can use stages GrPaint::kTotalStages through GrPipelineBuilder::kNumStages-1.
+ * The stages before GrPaint::kTotalStages are reserved for setting up the draw (i.e., textures and
+ * filter masks).
+ */
+class SK_API GrPathRenderer : public SkRefCnt {
+public:
+ GrPathRenderer();
+
+ /**
+ * A caller may wish to use a path renderer to draw a path into the stencil buffer. However,
+ * the path renderer itself may require use of the stencil buffer. Also a path renderer may
+ * use a GrProcessor coverage stage that sets coverage to zero to eliminate pixels that are
+ * covered by bounding geometry but outside the path. These exterior pixels would still be
+ * rendered into the stencil.
+ *
+ * A GrPathRenderer can provide three levels of support for stenciling paths:
+ * 1) kNoRestriction: This is the most general. The caller sets up the GrPipelineBuilder on the target
+ * and calls drawPath(). The path is rendered exactly as the draw state
+ * indicates including support for simultaneous color and stenciling with
+ * arbitrary stenciling rules. Pixels partially covered by AA paths are
+ * affected by the stencil settings.
+ * 2) kStencilOnly: The path renderer cannot apply arbitrary stencil rules nor shade and stencil
+ * simultaneously. The path renderer does support the stencilPath() function
+ * which performs no color writes and writes a non-zero stencil value to pixels
+ * covered by the path.
+ * 3) kNoSupport: This path renderer cannot be used to stencil the path.
+ */
+ enum StencilSupport {
+ kNoSupport_StencilSupport,
+ kStencilOnly_StencilSupport,
+ kNoRestriction_StencilSupport,
+ };
+
+ /**
+ * This function is to get the stencil support for a particular path. The path's fill must
+ * not be an inverse type. The path will always be filled and not stroked.
+ *
+ * @param shape the shape that will be drawn. Must be simple fill styled and non-inverse
+ * filled.
+ */
+ StencilSupport getStencilSupport(const GrShape& shape) const {
+ SkDEBUGCODE(SkPath path;)
+ SkDEBUGCODE(shape.asPath(&path);)
+ SkASSERT(shape.style().isSimpleFill());
+ SkASSERT(!path.isInverseFillType());
+ return this->onGetStencilSupport(shape);
+ }
+
+ /** Args to canDrawPath()
+ *
+ * fShaderCaps The shader caps
+ * fPipelineBuilder The pipelineBuilder
+ * fViewMatrix The viewMatrix
+ * fShape The shape to draw
+ * fAntiAlias True if anti-aliasing is required.
+ */
+ struct CanDrawPathArgs {
+ const GrShaderCaps* fShaderCaps;
+ const SkMatrix* fViewMatrix;
+ const GrShape* fShape;
+ bool fAntiAlias;
+
+ // These next two are only used by GrStencilAndCoverPathRenderer
+ bool fHasUserStencilSettings;
+ bool fIsStencilBufferMSAA;
+
+#ifdef SK_DEBUG
+ void validate() const {
+ SkASSERT(fShaderCaps);
+ SkASSERT(fViewMatrix);
+ SkASSERT(fShape);
+ }
+#endif
+ };
+
+ /**
+ * Returns true if this path renderer is able to render the path. Returning false allows the
+ * caller to fallback to another path renderer This function is called when searching for a path
+ * renderer capable of rendering a path.
+ *
+ * @return true if the path can be drawn by this object, false otherwise.
+ */
+ bool canDrawPath(const CanDrawPathArgs& args) const {
+ SkDEBUGCODE(args.validate();)
+ return this->onCanDrawPath(args);
+ }
+
+ /**
+ * Args to drawPath()
+ *
+ * fTarget The target that the path will be rendered to
+ * fResourceProvider The resource provider for creating gpu resources to render the path
+ * fPipelineBuilder The pipelineBuilder
+ * fClip The clip
+ * fColor Color to render with
+ * fViewMatrix The viewMatrix
+ * fShape The shape to draw
+ * fAntiAlias true if anti-aliasing is required.
+ * fGammaCorrect true if gamma-correct rendering is to be used.
+ */
+ struct DrawPathArgs {
+ GrResourceProvider* fResourceProvider;
+ const GrPaint* fPaint;
+ const GrUserStencilSettings*fUserStencilSettings;
+
+ GrDrawContext* fDrawContext;
+ const GrClip* fClip;
+ const SkMatrix* fViewMatrix;
+ const GrShape* fShape;
+ bool fAntiAlias;
+ bool fGammaCorrect;
+#ifdef SK_DEBUG
+ void validate() const {
+ SkASSERT(fResourceProvider);
+ SkASSERT(fPaint);
+ SkASSERT(fUserStencilSettings);
+ SkASSERT(fDrawContext);
+ SkASSERT(fClip);
+ SkASSERT(fViewMatrix);
+ SkASSERT(fShape);
+ }
+#endif
+ };
+
+ /**
+ * Draws the path into the draw target. If getStencilSupport() would return kNoRestriction then
+ * the subclass must respect the stencil settings of the GrPipelineBuilder.
+ */
+ bool drawPath(const DrawPathArgs& args) {
+ SkDEBUGCODE(args.validate();)
+#ifdef SK_DEBUG
+ CanDrawPathArgs canArgs;
+ canArgs.fShaderCaps = args.fResourceProvider->caps()->shaderCaps();
+ canArgs.fViewMatrix = args.fViewMatrix;
+ canArgs.fShape = args.fShape;
+ canArgs.fAntiAlias = args.fAntiAlias;
+
+ canArgs.fHasUserStencilSettings = !args.fUserStencilSettings->isUnused();
+ canArgs.fIsStencilBufferMSAA = args.fDrawContext->isStencilBufferMultisampled();
+ SkASSERT(this->canDrawPath(canArgs));
+ if (!args.fUserStencilSettings->isUnused()) {
+ SkPath path;
+ args.fShape->asPath(&path);
+ SkASSERT(args.fShape->style().isSimpleFill());
+ SkASSERT(kNoRestriction_StencilSupport == this->getStencilSupport(*args.fShape));
+ }
+#endif
+ return this->onDrawPath(args);
+ }
+
+ /* Args to stencilPath().
+ *
+ * fResourceProvider The resource provider for creating gpu resources to render the path
+ * fDrawContext The target of the draws
+ * fViewMatrix Matrix applied to the path.
+ * fPath The path to draw.
+ * fIsAA Is the path to be drawn AA (only set when MSAA is available)
+ */
+ struct StencilPathArgs {
+ GrResourceProvider* fResourceProvider;
+ GrDrawContext* fDrawContext;
+ const GrClip* fClip;
+ const SkMatrix* fViewMatrix;
+ bool fIsAA;
+ const GrShape* fShape;
+
+#ifdef SK_DEBUG
+ void validate() const {
+ SkASSERT(fResourceProvider);
+ SkASSERT(fDrawContext);
+ SkASSERT(fViewMatrix);
+ SkASSERT(fShape);
+ SkASSERT(fShape->style().isSimpleFill());
+ SkPath path;
+ fShape->asPath(&path);
+ SkASSERT(!path.isInverseFillType());
+ }
+#endif
+ };
+
+ /**
+ * Draws the path to the stencil buffer. Assume the writable stencil bits are already
+ * initialized to zero. The pixels inside the path will have non-zero stencil values afterwards.
+ */
+ void stencilPath(const StencilPathArgs& args) {
+ SkDEBUGCODE(args.validate();)
+ SkASSERT(kNoSupport_StencilSupport != this->getStencilSupport(*args.fShape));
+ this->onStencilPath(args);
+ }
+
+ // Helper for determining if we can treat a thin stroke as a hairline w/ coverage.
+ // If we can, we draw lots faster (raster device does this same test).
+ static bool IsStrokeHairlineOrEquivalent(const GrStyle& style, const SkMatrix& matrix,
+ SkScalar* outCoverage) {
+ if (style.pathEffect()) {
+ return false;
+ }
+ const SkStrokeRec& stroke = style.strokeRec();
+ if (stroke.isHairlineStyle()) {
+ if (outCoverage) {
+ *outCoverage = SK_Scalar1;
+ }
+ return true;
+ }
+ return stroke.getStyle() == SkStrokeRec::kStroke_Style &&
+ SkDrawTreatAAStrokeAsHairline(stroke.getWidth(), matrix, outCoverage);
+ }
+
+protected:
+ // Helper for getting the device bounds of a path. Inverse filled paths will have bounds set
+ // by devSize. Non-inverse path bounds will not necessarily be clipped to devSize.
+ static void GetPathDevBounds(const SkPath& path,
+ int devW,
+ int devH,
+ const SkMatrix& matrix,
+ SkRect* bounds);
+
+private:
+ /**
+ * Subclass overrides if it has any limitations of stenciling support.
+ */
+ virtual StencilSupport onGetStencilSupport(const GrShape&) const {
+ return kNoRestriction_StencilSupport;
+ }
+
+ /**
+ * Subclass implementation of drawPath()
+ */
+ virtual bool onDrawPath(const DrawPathArgs& args) = 0;
+
+ /**
+ * Subclass implementation of canDrawPath()
+ */
+ virtual bool onCanDrawPath(const CanDrawPathArgs& args) const = 0;
+
+ /**
+ * Subclass implementation of stencilPath(). Subclass must override iff it ever returns
+ * kStencilOnly in onGetStencilSupport().
+ */
+ virtual void onStencilPath(const StencilPathArgs& args) {
+ static constexpr GrUserStencilSettings kIncrementStencil(
+ GrUserStencilSettings::StaticInit<
+ 0xffff,
+ GrUserStencilTest::kAlways,
+ 0xffff,
+ GrUserStencilOp::kReplace,
+ GrUserStencilOp::kReplace,
+ 0xffff>()
+ );
+
+ GrPaint paint;
+
+ DrawPathArgs drawArgs;
+ drawArgs.fResourceProvider = args.fResourceProvider;
+ drawArgs.fPaint = &paint;
+ drawArgs.fUserStencilSettings = &kIncrementStencil;
+ drawArgs.fDrawContext = args.fDrawContext;
+ drawArgs.fViewMatrix = args.fViewMatrix;
+ drawArgs.fShape = args.fShape;
+ drawArgs.fAntiAlias = false; // In this case the MSAA handles the AA so we want to draw BW
+ drawArgs.fGammaCorrect = false;
+ this->drawPath(drawArgs);
+ }
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPathRendererChain.cpp b/gfx/skia/skia/src/gpu/GrPathRendererChain.cpp
new file mode 100644
index 000000000..95105ba07
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathRendererChain.cpp
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrPathRendererChain.h"
+
+#include "GrCaps.h"
+#include "gl/GrGLCaps.h"
+#include "glsl/GrGLSLCaps.h"
+#include "GrContext.h"
+#include "GrGpu.h"
+
+#include "batches/GrAAConvexPathRenderer.h"
+#include "batches/GrAADistanceFieldPathRenderer.h"
+#include "batches/GrAAHairLinePathRenderer.h"
+#include "batches/GrAALinearizingConvexPathRenderer.h"
+#include "batches/GrDashLinePathRenderer.h"
+#include "batches/GrDefaultPathRenderer.h"
+#include "batches/GrMSAAPathRenderer.h"
+#include "batches/GrPLSPathRenderer.h"
+#include "batches/GrStencilAndCoverPathRenderer.h"
+#include "batches/GrTessellatingPathRenderer.h"
+
+GrPathRendererChain::GrPathRendererChain(GrContext* context, const Options& options) {
+ if (!options.fDisableAllPathRenderers) {
+ const GrCaps& caps = *context->caps();
+ this->addPathRenderer(new GrDashLinePathRenderer)->unref();
+
+ if (GrPathRenderer* pr = GrStencilAndCoverPathRenderer::Create(context->resourceProvider(),
+ caps)) {
+ this->addPathRenderer(pr)->unref();
+ }
+ #ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ if (caps.sampleShadingSupport()) {
+ this->addPathRenderer(new GrMSAAPathRenderer)->unref();
+ }
+ #endif
+ this->addPathRenderer(new GrAAHairLinePathRenderer)->unref();
+ this->addPathRenderer(new GrAAConvexPathRenderer)->unref();
+ this->addPathRenderer(new GrAALinearizingConvexPathRenderer)->unref();
+ if (caps.shaderCaps()->plsPathRenderingSupport()) {
+ this->addPathRenderer(new GrPLSPathRenderer)->unref();
+ }
+ if (!options.fDisableDistanceFieldRenderer) {
+ this->addPathRenderer(new GrAADistanceFieldPathRenderer)->unref();
+ }
+ this->addPathRenderer(new GrTessellatingPathRenderer)->unref();
+ this->addPathRenderer(new GrDefaultPathRenderer(caps.twoSidedStencilSupport(),
+ caps.stencilWrapOpsSupport()))->unref();
+ }
+}
+
+GrPathRendererChain::~GrPathRendererChain() {
+ for (int i = 0; i < fChain.count(); ++i) {
+ fChain[i]->unref();
+ }
+}
+
+GrPathRenderer* GrPathRendererChain::addPathRenderer(GrPathRenderer* pr) {
+ fChain.push_back() = pr;
+ pr->ref();
+ return pr;
+}
+
+GrPathRenderer* GrPathRendererChain::getPathRenderer(
+ const GrPathRenderer::CanDrawPathArgs& args,
+ DrawType drawType,
+ GrPathRenderer::StencilSupport* stencilSupport) {
+ GR_STATIC_ASSERT(GrPathRenderer::kNoSupport_StencilSupport <
+ GrPathRenderer::kStencilOnly_StencilSupport);
+ GR_STATIC_ASSERT(GrPathRenderer::kStencilOnly_StencilSupport <
+ GrPathRenderer::kNoRestriction_StencilSupport);
+ GrPathRenderer::StencilSupport minStencilSupport;
+ if (kStencilOnly_DrawType == drawType) {
+ minStencilSupport = GrPathRenderer::kStencilOnly_StencilSupport;
+ } else if (kStencilAndColor_DrawType == drawType ||
+ kStencilAndColorAntiAlias_DrawType == drawType) {
+ minStencilSupport = GrPathRenderer::kNoRestriction_StencilSupport;
+ } else {
+ minStencilSupport = GrPathRenderer::kNoSupport_StencilSupport;
+ }
+ if (minStencilSupport != GrPathRenderer::kNoSupport_StencilSupport) {
+ // We don't support (and shouldn't need) stenciling of non-fill paths.
+ if (!args.fShape->style().isSimpleFill()) {
+ return nullptr;
+ }
+ }
+
+ for (int i = 0; i < fChain.count(); ++i) {
+ if (fChain[i]->canDrawPath(args)) {
+ if (GrPathRenderer::kNoSupport_StencilSupport != minStencilSupport) {
+ GrPathRenderer::StencilSupport support = fChain[i]->getStencilSupport(*args.fShape);
+ if (support < minStencilSupport) {
+ continue;
+ } else if (stencilSupport) {
+ *stencilSupport = support;
+ }
+ }
+ return fChain[i];
+ }
+ }
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/gpu/GrPathRendererChain.h b/gfx/skia/skia/src/gpu/GrPathRendererChain.h
new file mode 100644
index 000000000..8788374d7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathRendererChain.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPathRendererChain_DEFINED
+#define GrPathRendererChain_DEFINED
+
+#include "GrPathRenderer.h"
+
+#include "SkTypes.h"
+#include "SkTArray.h"
+
+class GrContext;
+
+/**
+ * Keeps track of an ordered list of path renderers. When a path needs to be
+ * drawn this list is scanned to find the most preferred renderer. To add your
+ * path renderer to the list implement the GrPathRenderer::AddPathRenderers
+ * function.
+ */
+class GrPathRendererChain : public SkNoncopyable {
+public:
+ struct Options {
+ bool fDisableDistanceFieldRenderer = false;
+ bool fAllowPathMaskCaching = false;
+ bool fDisableAllPathRenderers = false;
+ };
+ GrPathRendererChain(GrContext* context, const Options&);
+
+ ~GrPathRendererChain();
+
+ /** Documents how the caller plans to use a GrPathRenderer to draw a path. It affects the PR
+ returned by getPathRenderer */
+ enum DrawType {
+ kColor_DrawType, // draw to the color buffer, no AA
+ kColorAntiAlias_DrawType, // draw to color buffer, with partial coverage AA
+ kStencilOnly_DrawType, // draw just to the stencil buffer
+ kStencilAndColor_DrawType, // draw the stencil and color buffer, no AA
+ kStencilAndColorAntiAlias_DrawType // draw the stencil and color buffer, with partial
+ // coverage AA.
+ };
+
+ /** Returns a GrPathRenderer compatible with the request if one is available. If the caller
+ is drawing the path to the stencil buffer then stencilSupport can be used to determine
+ whether the path can be rendered with arbitrary stencil rules or not. See comments on
+ StencilSupport in GrPathRenderer.h. */
+ GrPathRenderer* getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args,
+ DrawType drawType,
+ GrPathRenderer::StencilSupport* stencilSupport);
+
+private:
+ // takes a ref and unrefs in destructor
+ GrPathRenderer* addPathRenderer(GrPathRenderer* pr);
+
+ enum {
+ kPreAllocCount = 8,
+ };
+ SkSTArray<kPreAllocCount, GrPathRenderer*, true> fChain;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPathRendering.cpp b/gfx/skia/skia/src/gpu/GrPathRendering.cpp
new file mode 100644
index 000000000..fe912e23a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathRendering.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrPathRendering.h"
+#include "SkDescriptor.h"
+#include "SkGlyph.h"
+#include "SkMatrix.h"
+#include "SkTypeface.h"
+#include "GrPathRange.h"
+
+const GrUserStencilSettings& GrPathRendering::GetStencilPassSettings(FillType fill) {
+ switch (fill) {
+ default:
+ SkFAIL("Unexpected path fill.");
+ case GrPathRendering::kWinding_FillType: {
+ constexpr static GrUserStencilSettings kWindingStencilPass(
+ GrUserStencilSettings::StaticInit<
+ 0xffff,
+ GrUserStencilTest::kAlwaysIfInClip,
+ 0xffff,
+ GrUserStencilOp::kIncWrap,
+ GrUserStencilOp::kIncWrap,
+ 0xffff>()
+ );
+ return kWindingStencilPass;
+ }
+ case GrPathRendering::kEvenOdd_FillType: {
+ constexpr static GrUserStencilSettings kEvenOddStencilPass(
+ GrUserStencilSettings::StaticInit<
+ 0xffff,
+ GrUserStencilTest::kAlwaysIfInClip,
+ 0xffff,
+ GrUserStencilOp::kInvert,
+ GrUserStencilOp::kInvert,
+ 0xffff>()
+ );
+ return kEvenOddStencilPass;
+ }
+ }
+}
+
+class GlyphGenerator : public GrPathRange::PathGenerator {
+public:
+ GlyphGenerator(const SkTypeface& typeface, const SkScalerContextEffects& effects,
+ const SkDescriptor& desc)
+ : fScalerContext(typeface.createScalerContext(effects, &desc))
+#ifdef SK_DEBUG
+ , fDesc(desc.copy())
+#endif
+ {}
+
+ virtual ~GlyphGenerator() {
+#ifdef SK_DEBUG
+ SkDescriptor::Free(fDesc);
+#endif
+ }
+
+ int getNumPaths() override {
+ return fScalerContext->getGlyphCount();
+ }
+
+ void generatePath(int glyphID, SkPath* out) override {
+ SkGlyph skGlyph;
+ skGlyph.initWithGlyphID(glyphID);
+ fScalerContext->getMetrics(&skGlyph);
+
+ fScalerContext->getPath(skGlyph, out);
+ }
+#ifdef SK_DEBUG
+ bool isEqualTo(const SkDescriptor& desc) const override { return *fDesc == desc; }
+#endif
+private:
+ const SkAutoTDelete<SkScalerContext> fScalerContext;
+#ifdef SK_DEBUG
+ SkDescriptor* const fDesc;
+#endif
+};
+
+GrPathRange* GrPathRendering::createGlyphs(const SkTypeface* typeface,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc,
+ const GrStyle& style) {
+ if (nullptr == typeface) {
+ typeface = SkTypeface::GetDefaultTypeface();
+ SkASSERT(nullptr != typeface);
+ }
+
+ if (desc) {
+ SkAutoTUnref<GlyphGenerator> generator(new GlyphGenerator(*typeface, effects, *desc));
+ return this->createPathRange(generator, style);
+ }
+
+ SkScalerContextRec rec;
+ memset(&rec, 0, sizeof(rec));
+ rec.fFontID = typeface->uniqueID();
+ rec.fTextSize = SkPaint::kCanonicalTextSizeForPaths;
+ rec.fPreScaleX = rec.fPost2x2[0][0] = rec.fPost2x2[1][1] = SK_Scalar1;
+ // Don't bake stroke information into the glyphs, we'll let the GPU do the stroking.
+
+ SkAutoDescriptor ad(sizeof(rec) + SkDescriptor::ComputeOverhead(1));
+ SkDescriptor* genericDesc = ad.getDesc();
+
+ genericDesc->init();
+ genericDesc->addEntry(kRec_SkDescriptorTag, sizeof(rec), &rec);
+ genericDesc->computeChecksum();
+
+ // No effects, so we make a dummy struct
+ SkScalerContextEffects noEffects;
+
+ SkAutoTUnref<GlyphGenerator> generator(new GlyphGenerator(*typeface, noEffects, *genericDesc));
+ return this->createPathRange(generator, style);
+}
diff --git a/gfx/skia/skia/src/gpu/GrPathRendering.h b/gfx/skia/skia/src/gpu/GrPathRendering.h
new file mode 100644
index 000000000..f2c02d9e8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathRendering.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPathRendering_DEFINED
+#define GrPathRendering_DEFINED
+
+#include "SkPath.h"
+#include "GrGpu.h"
+#include "GrPathRange.h"
+#include "GrPipeline.h"
+
+class SkDescriptor;
+class SkTypeface;
+class GrPath;
+class GrStencilSettings;
+class GrStyle;
+
+/**
+ * Abstract class wrapping HW path rendering API.
+ *
+ * The subclasses of this class use the possible HW API to render paths (as opposed to path
+ * rendering implemented in Skia on top of a "3d" HW API).
+ * The subclasses hold the global state needed to render paths, including shadow of the global HW
+ * API state. Similar to GrGpu.
+ *
+ * It is expected that the lifetimes of GrGpuXX and GrXXPathRendering are the same. The call context
+ * interface (eg. * the concrete instance of GrGpu subclass) should be provided to the instance
+ * during construction.
+ */
+class GrPathRendering {
+public:
+ virtual ~GrPathRendering() { }
+
+ typedef GrPathRange::PathIndexType PathIndexType;
+
+ enum PathTransformType {
+ kNone_PathTransformType, //!< []
+ kTranslateX_PathTransformType, //!< [kMTransX]
+ kTranslateY_PathTransformType, //!< [kMTransY]
+ kTranslate_PathTransformType, //!< [kMTransX, kMTransY]
+ kAffine_PathTransformType, //!< [kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY]
+
+ kLast_PathTransformType = kAffine_PathTransformType
+ };
+
+ static inline int PathTransformSize(PathTransformType type) {
+ switch (type) {
+ case kNone_PathTransformType:
+ return 0;
+ case kTranslateX_PathTransformType:
+ case kTranslateY_PathTransformType:
+ return 1;
+ case kTranslate_PathTransformType:
+ return 2;
+ case kAffine_PathTransformType:
+ return 6;
+
+ default:
+ SkFAIL("Unknown path transform type");
+ return 0;
+ }
+ }
+
+ // No native support for inverse at this time
+ enum FillType {
+ /** Specifies that "inside" is computed by a non-zero sum of signed
+ edge crossings
+ */
+ kWinding_FillType,
+ /** Specifies that "inside" is computed by an odd number of edge
+ crossings
+ */
+ kEvenOdd_FillType,
+ };
+
+ static const GrUserStencilSettings& GetStencilPassSettings(FillType);
+
+ /**
+ * Creates a new gpu path, based on the specified path and stroke and returns it.
+ * The caller owns a ref on the returned path which must be balanced by a call to unref.
+ *
+ * @param SkPath the geometry.
+ * @param GrStyle the style applied to the path. Styles with non-dash path effects are not
+ * allowed.
+ * @return a new GPU path object.
+ */
+ virtual GrPath* createPath(const SkPath&, const GrStyle&) = 0;
+
+ /**
+ * Creates a range of gpu paths with a common style. The caller owns a ref on the
+ * returned path range which must be balanced by a call to unref.
+ *
+ * @param PathGenerator class that generates SkPath objects for each path in the range.
+ * @param GrStyle the common style applied to each path in the range. Styles with non-dash
+ * path effects are not allowed.
+ * @return a new path range.
+ */
+ virtual GrPathRange* createPathRange(GrPathRange::PathGenerator*, const GrStyle&) = 0;
+
+ /**
+ * Creates a range of glyph paths, indexed by glyph id. The glyphs will have an
+ * inverted y-direction in order to match the raw font path data. The caller owns
+ * a ref on the returned path range which must be balanced by a call to unref.
+ *
+ * @param SkTypeface Typeface that defines the glyphs.
+ * If null, the default typeface will be used.
+ *
+ * @param SkDescriptor Additional font configuration that specifies the font's size,
+ * stroke, and other flags. This will generally come from an
+ * SkGlyphCache.
+ *
+ * It is recommended to leave this value null when possible, in
+ * which case the glyphs will be loaded directly from the font's
+ * raw path data and sized at SkPaint::kCanonicalTextSizeForPaths.
+ * This will result in less memory usage and more efficient paths.
+ *
+ * If non-null, the glyph paths will match the font descriptor,
+ * including with the stroke information baked directly into
+ * the outlines.
+ *
+ * @param GrStyle Common style that the GPU will apply to every path. Note that
+ * if the glyph outlines contain baked-in styles from the font
+ * descriptor, the GPU style will be applied on top of those
+ * outlines.
+ *
+ * @return a new path range populated with glyphs.
+ */
+ GrPathRange* createGlyphs(const SkTypeface*, const SkScalerContextEffects&,
+ const SkDescriptor*, const GrStyle&);
+
+ /** None of these params are optional, pointers used just to avoid making copies. */
+ struct StencilPathArgs {
+ StencilPathArgs(bool useHWAA,
+ GrRenderTarget* renderTarget,
+ const SkMatrix* viewMatrix,
+ const GrScissorState* scissor,
+ const GrStencilSettings* stencil)
+ : fUseHWAA(useHWAA)
+ , fRenderTarget(renderTarget)
+ , fViewMatrix(viewMatrix)
+ , fScissor(scissor)
+ , fStencil(stencil) {
+ }
+ bool fUseHWAA;
+ GrRenderTarget* fRenderTarget;
+ const SkMatrix* fViewMatrix;
+ const GrScissorState* fScissor;
+ const GrStencilSettings* fStencil;
+ };
+
+ void stencilPath(const StencilPathArgs& args, const GrPath* path) {
+ fGpu->handleDirtyContext();
+ this->onStencilPath(args, path);
+ }
+
+ void drawPath(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ const GrStencilSettings& stencilPassSettings, // Cover pass settings in pipeline.
+ const GrPath* path) {
+ fGpu->handleDirtyContext();
+ if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*fGpu->caps())) {
+ fGpu->xferBarrier(pipeline.getRenderTarget(), barrierType);
+ }
+ this->onDrawPath(pipeline, primProc, stencilPassSettings, path);
+ }
+
+ void drawPaths(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ const GrStencilSettings& stencilPassSettings, // Cover pass settings in pipeline.
+ const GrPathRange* pathRange,
+ const void* indices,
+ PathIndexType indexType,
+ const float transformValues[],
+ PathTransformType transformType,
+ int count) {
+ fGpu->handleDirtyContext();
+ if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*fGpu->caps())) {
+ fGpu->xferBarrier(pipeline.getRenderTarget(), barrierType);
+ }
+#ifdef SK_DEBUG
+ pathRange->assertPathsLoaded(indices, indexType, count);
+#endif
+ this->onDrawPaths(pipeline, primProc, stencilPassSettings, pathRange, indices, indexType,
+ transformValues, transformType, count);
+ }
+
+protected:
+ GrPathRendering(GrGpu* gpu)
+ : fGpu(gpu) {
+ }
+ virtual void onStencilPath(const StencilPathArgs&, const GrPath*) = 0;
+ virtual void onDrawPath(const GrPipeline&,
+ const GrPrimitiveProcessor&,
+ const GrStencilSettings&,
+ const GrPath*) = 0;
+ virtual void onDrawPaths(const GrPipeline&,
+ const GrPrimitiveProcessor&,
+ const GrStencilSettings&,
+ const GrPathRange*,
+ const void* indices,
+ PathIndexType,
+ const float transformValues[],
+ PathTransformType,
+ int count) = 0;
+
+ GrGpu* fGpu;
+private:
+ GrPathRendering& operator=(const GrPathRendering&);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPathRenderingDrawContext.cpp b/gfx/skia/skia/src/gpu/GrPathRenderingDrawContext.cpp
new file mode 100644
index 000000000..1380f7dc7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathRenderingDrawContext.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrPathRenderingDrawContext.h"
+
+#include "GrDrawingManager.h"
+
+#include "text/GrStencilAndCoverTextContext.h"
+
+#define ASSERT_SINGLE_OWNER \
+ SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(this->singleOwner());)
+#define RETURN_IF_ABANDONED if (this->drawingManager()->wasAbandoned()) { return; }
+
+void GrPathRenderingDrawContext::drawText(const GrClip& clip, const GrPaint& grPaint,
+ const SkPaint& skPaint,
+ const SkMatrix& viewMatrix, const char text[],
+ size_t byteLength, SkScalar x, SkScalar y,
+ const SkIRect& clipBounds) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(this->auditTrail(), "GrPathRenderingDrawContext::drawText");
+
+ if (!fStencilAndCoverTextContext) {
+ GrAtlasTextContext* fallbackContext = this->drawingManager()->getAtlasTextContext();
+ fStencilAndCoverTextContext.reset(GrStencilAndCoverTextContext::Create(fallbackContext));
+ }
+
+ fStencilAndCoverTextContext->drawText(this->drawingManager()->getContext(), this, clip, grPaint,
+ skPaint, viewMatrix, this->surfaceProps(),
+ text, byteLength, x, y, clipBounds);
+}
+
+void GrPathRenderingDrawContext::drawPosText(const GrClip& clip, const GrPaint& grPaint,
+ const SkPaint& skPaint,
+ const SkMatrix& viewMatrix, const char text[],
+ size_t byteLength, const SkScalar pos[],
+ int scalarsPerPosition, const SkPoint& offset,
+ const SkIRect& clipBounds) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(this->auditTrail(), "GrPathRenderingDrawContext::drawPosText");
+
+ if (!fStencilAndCoverTextContext) {
+ GrAtlasTextContext* fallbackContext = this->drawingManager()->getAtlasTextContext();
+ fStencilAndCoverTextContext.reset(GrStencilAndCoverTextContext::Create(fallbackContext));
+ }
+
+ fStencilAndCoverTextContext->drawPosText(this->drawingManager()->getContext(), this, clip,
+ grPaint, skPaint, viewMatrix, this->surfaceProps(),
+ text, byteLength, pos, scalarsPerPosition, offset,
+ clipBounds);
+}
+
+void GrPathRenderingDrawContext::drawTextBlob(const GrClip& clip, const SkPaint& skPaint,
+ const SkMatrix& viewMatrix, const SkTextBlob* blob,
+ SkScalar x, SkScalar y,
+ SkDrawFilter* filter, const SkIRect& clipBounds) {
+ ASSERT_SINGLE_OWNER
+ RETURN_IF_ABANDONED
+ SkDEBUGCODE(this->validate();)
+ GR_AUDIT_TRAIL_AUTO_FRAME(this->auditTrail(), "GrPathRenderingDrawContext::drawTextBlob");
+
+ if (!fStencilAndCoverTextContext) {
+ GrAtlasTextContext* fallbackContext = this->drawingManager()->getAtlasTextContext();
+ fStencilAndCoverTextContext.reset(GrStencilAndCoverTextContext::Create(fallbackContext));
+ }
+
+ fStencilAndCoverTextContext->drawTextBlob(this->drawingManager()->getContext(), this, clip,
+ skPaint, viewMatrix, this->surfaceProps(), blob, x,
+ y, filter, clipBounds);
+}
diff --git a/gfx/skia/skia/src/gpu/GrPathRenderingDrawContext.h b/gfx/skia/skia/src/gpu/GrPathRenderingDrawContext.h
new file mode 100644
index 000000000..5c1a968b1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathRenderingDrawContext.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPathRenderingDrawContext_DEFINED
+#define GrPathRenderingDrawContext_DEFINED
+
+#include "GrDrawContext.h"
+
+class GrStencilAndCoverTextContext;
+
+class GrPathRenderingDrawContext : public GrDrawContext {
+public:
+ void drawText(const GrClip&, const GrPaint&, const SkPaint&,
+ const SkMatrix& viewMatrix, const char text[], size_t byteLength,
+ SkScalar x, SkScalar y, const SkIRect& clipBounds) override;
+ void drawPosText(const GrClip&, const GrPaint&, const SkPaint&,
+ const SkMatrix& viewMatrix, const char text[], size_t byteLength,
+ const SkScalar pos[], int scalarsPerPosition,
+ const SkPoint& offset, const SkIRect& clipBounds) override;
+ void drawTextBlob(const GrClip&, const SkPaint&,
+ const SkMatrix& viewMatrix, const SkTextBlob*,
+ SkScalar x, SkScalar y,
+ SkDrawFilter*, const SkIRect& clipBounds) override;
+protected:
+ GrPathRenderingDrawContext(GrContext* ctx, GrDrawingManager* mgr, sk_sp<GrRenderTarget> rt,
+ sk_sp<SkColorSpace> colorSpace, const SkSurfaceProps* surfaceProps,
+ GrAuditTrail* at, GrSingleOwner* so)
+ : INHERITED(ctx, mgr, std::move(rt), std::move(colorSpace), surfaceProps, at, so) {}
+
+private:
+ SkAutoTDelete<GrStencilAndCoverTextContext> fStencilAndCoverTextContext;
+
+ friend class GrDrawingManager; // for ctor
+
+ typedef GrDrawContext INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPathUtils.cpp b/gfx/skia/skia/src/gpu/GrPathUtils.cpp
new file mode 100644
index 000000000..bff949011
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathUtils.cpp
@@ -0,0 +1,826 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrPathUtils.h"
+
+#include "GrTypes.h"
+#include "SkGeometry.h"
+#include "SkMathPriv.h"
+
+SkScalar GrPathUtils::scaleToleranceToSrc(SkScalar devTol,
+ const SkMatrix& viewM,
+ const SkRect& pathBounds) {
+ // In order to tesselate the path we get a bound on how much the matrix can
+ // scale when mapping to screen coordinates.
+ SkScalar stretch = viewM.getMaxScale();
+ SkScalar srcTol = devTol;
+
+ if (stretch < 0) {
+ // take worst case mapRadius amoung four corners.
+ // (less than perfect)
+ for (int i = 0; i < 4; ++i) {
+ SkMatrix mat;
+ mat.setTranslate((i % 2) ? pathBounds.fLeft : pathBounds.fRight,
+ (i < 2) ? pathBounds.fTop : pathBounds.fBottom);
+ mat.postConcat(viewM);
+ stretch = SkMaxScalar(stretch, mat.mapRadius(SK_Scalar1));
+ }
+ }
+ return srcTol / stretch;
+}
+
+static const int MAX_POINTS_PER_CURVE = 1 << 10;
+static const SkScalar gMinCurveTol = 0.0001f;
+
+uint32_t GrPathUtils::quadraticPointCount(const SkPoint points[],
+ SkScalar tol) {
+ if (tol < gMinCurveTol) {
+ tol = gMinCurveTol;
+ }
+ SkASSERT(tol > 0);
+
+ SkScalar d = points[1].distanceToLineSegmentBetween(points[0], points[2]);
+ if (!SkScalarIsFinite(d)) {
+ return MAX_POINTS_PER_CURVE;
+ } else if (d <= tol) {
+ return 1;
+ } else {
+ // Each time we subdivide, d should be cut in 4. So we need to
+ // subdivide x = log4(d/tol) times. x subdivisions creates 2^(x)
+ // points.
+ // 2^(log4(x)) = sqrt(x);
+ SkScalar divSqrt = SkScalarSqrt(d / tol);
+ if (((SkScalar)SK_MaxS32) <= divSqrt) {
+ return MAX_POINTS_PER_CURVE;
+ } else {
+ int temp = SkScalarCeilToInt(divSqrt);
+ int pow2 = GrNextPow2(temp);
+ // Because of NaNs & INFs we can wind up with a degenerate temp
+ // such that pow2 comes out negative. Also, our point generator
+ // will always output at least one pt.
+ if (pow2 < 1) {
+ pow2 = 1;
+ }
+ return SkTMin(pow2, MAX_POINTS_PER_CURVE);
+ }
+ }
+}
+
+uint32_t GrPathUtils::generateQuadraticPoints(const SkPoint& p0,
+ const SkPoint& p1,
+ const SkPoint& p2,
+ SkScalar tolSqd,
+ SkPoint** points,
+ uint32_t pointsLeft) {
+ if (pointsLeft < 2 ||
+ (p1.distanceToLineSegmentBetweenSqd(p0, p2)) < tolSqd) {
+ (*points)[0] = p2;
+ *points += 1;
+ return 1;
+ }
+
+ SkPoint q[] = {
+ { SkScalarAve(p0.fX, p1.fX), SkScalarAve(p0.fY, p1.fY) },
+ { SkScalarAve(p1.fX, p2.fX), SkScalarAve(p1.fY, p2.fY) },
+ };
+ SkPoint r = { SkScalarAve(q[0].fX, q[1].fX), SkScalarAve(q[0].fY, q[1].fY) };
+
+ pointsLeft >>= 1;
+ uint32_t a = generateQuadraticPoints(p0, q[0], r, tolSqd, points, pointsLeft);
+ uint32_t b = generateQuadraticPoints(r, q[1], p2, tolSqd, points, pointsLeft);
+ return a + b;
+}
+
+uint32_t GrPathUtils::cubicPointCount(const SkPoint points[],
+ SkScalar tol) {
+ if (tol < gMinCurveTol) {
+ tol = gMinCurveTol;
+ }
+ SkASSERT(tol > 0);
+
+ SkScalar d = SkTMax(
+ points[1].distanceToLineSegmentBetweenSqd(points[0], points[3]),
+ points[2].distanceToLineSegmentBetweenSqd(points[0], points[3]));
+ d = SkScalarSqrt(d);
+ if (!SkScalarIsFinite(d)) {
+ return MAX_POINTS_PER_CURVE;
+ } else if (d <= tol) {
+ return 1;
+ } else {
+ SkScalar divSqrt = SkScalarSqrt(d / tol);
+ if (((SkScalar)SK_MaxS32) <= divSqrt) {
+ return MAX_POINTS_PER_CURVE;
+ } else {
+ int temp = SkScalarCeilToInt(SkScalarSqrt(d / tol));
+ int pow2 = GrNextPow2(temp);
+ // Because of NaNs & INFs we can wind up with a degenerate temp
+ // such that pow2 comes out negative. Also, our point generator
+ // will always output at least one pt.
+ if (pow2 < 1) {
+ pow2 = 1;
+ }
+ return SkTMin(pow2, MAX_POINTS_PER_CURVE);
+ }
+ }
+}
+
+uint32_t GrPathUtils::generateCubicPoints(const SkPoint& p0,
+ const SkPoint& p1,
+ const SkPoint& p2,
+ const SkPoint& p3,
+ SkScalar tolSqd,
+ SkPoint** points,
+ uint32_t pointsLeft) {
+ if (pointsLeft < 2 ||
+ (p1.distanceToLineSegmentBetweenSqd(p0, p3) < tolSqd &&
+ p2.distanceToLineSegmentBetweenSqd(p0, p3) < tolSqd)) {
+ (*points)[0] = p3;
+ *points += 1;
+ return 1;
+ }
+ SkPoint q[] = {
+ { SkScalarAve(p0.fX, p1.fX), SkScalarAve(p0.fY, p1.fY) },
+ { SkScalarAve(p1.fX, p2.fX), SkScalarAve(p1.fY, p2.fY) },
+ { SkScalarAve(p2.fX, p3.fX), SkScalarAve(p2.fY, p3.fY) }
+ };
+ SkPoint r[] = {
+ { SkScalarAve(q[0].fX, q[1].fX), SkScalarAve(q[0].fY, q[1].fY) },
+ { SkScalarAve(q[1].fX, q[2].fX), SkScalarAve(q[1].fY, q[2].fY) }
+ };
+ SkPoint s = { SkScalarAve(r[0].fX, r[1].fX), SkScalarAve(r[0].fY, r[1].fY) };
+ pointsLeft >>= 1;
+ uint32_t a = generateCubicPoints(p0, q[0], r[0], s, tolSqd, points, pointsLeft);
+ uint32_t b = generateCubicPoints(s, r[1], q[2], p3, tolSqd, points, pointsLeft);
+ return a + b;
+}
+
+int GrPathUtils::worstCasePointCount(const SkPath& path, int* subpaths,
+ SkScalar tol) {
+ if (tol < gMinCurveTol) {
+ tol = gMinCurveTol;
+ }
+ SkASSERT(tol > 0);
+
+ int pointCount = 0;
+ *subpaths = 1;
+
+ bool first = true;
+
+ SkPath::Iter iter(path, false);
+ SkPath::Verb verb;
+
+ SkPoint pts[4];
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+
+ switch (verb) {
+ case SkPath::kLine_Verb:
+ pointCount += 1;
+ break;
+ case SkPath::kConic_Verb: {
+ SkScalar weight = iter.conicWeight();
+ SkAutoConicToQuads converter;
+ const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.25f);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ pointCount += quadraticPointCount(quadPts + 2*i, tol);
+ }
+ }
+ case SkPath::kQuad_Verb:
+ pointCount += quadraticPointCount(pts, tol);
+ break;
+ case SkPath::kCubic_Verb:
+ pointCount += cubicPointCount(pts, tol);
+ break;
+ case SkPath::kMove_Verb:
+ pointCount += 1;
+ if (!first) {
+ ++(*subpaths);
+ }
+ break;
+ default:
+ break;
+ }
+ first = false;
+ }
+ return pointCount;
+}
+
+void GrPathUtils::QuadUVMatrix::set(const SkPoint qPts[3]) {
+ SkMatrix m;
+ // We want M such that M * xy_pt = uv_pt
+ // We know M * control_pts = [0 1/2 1]
+ // [0 0 1]
+ // [1 1 1]
+ // And control_pts = [x0 x1 x2]
+ // [y0 y1 y2]
+ // [1 1 1 ]
+ // We invert the control pt matrix and post concat to both sides to get M.
+ // Using the known form of the control point matrix and the result, we can
+ // optimize and improve precision.
+
+ double x0 = qPts[0].fX;
+ double y0 = qPts[0].fY;
+ double x1 = qPts[1].fX;
+ double y1 = qPts[1].fY;
+ double x2 = qPts[2].fX;
+ double y2 = qPts[2].fY;
+ double det = x0*y1 - y0*x1 + x2*y0 - y2*x0 + x1*y2 - y1*x2;
+
+ if (!sk_float_isfinite(det)
+ || SkScalarNearlyZero((float)det, SK_ScalarNearlyZero * SK_ScalarNearlyZero)) {
+ // The quad is degenerate. Hopefully this is rare. Find the pts that are
+ // farthest apart to compute a line (unless it is really a pt).
+ SkScalar maxD = qPts[0].distanceToSqd(qPts[1]);
+ int maxEdge = 0;
+ SkScalar d = qPts[1].distanceToSqd(qPts[2]);
+ if (d > maxD) {
+ maxD = d;
+ maxEdge = 1;
+ }
+ d = qPts[2].distanceToSqd(qPts[0]);
+ if (d > maxD) {
+ maxD = d;
+ maxEdge = 2;
+ }
+ // We could have a tolerance here, not sure if it would improve anything
+ if (maxD > 0) {
+ // Set the matrix to give (u = 0, v = distance_to_line)
+ SkVector lineVec = qPts[(maxEdge + 1)%3] - qPts[maxEdge];
+ // when looking from the point 0 down the line we want positive
+ // distances to be to the left. This matches the non-degenerate
+ // case.
+ lineVec.setOrthog(lineVec, SkPoint::kLeft_Side);
+ // first row
+ fM[0] = 0;
+ fM[1] = 0;
+ fM[2] = 0;
+ // second row
+ fM[3] = lineVec.fX;
+ fM[4] = lineVec.fY;
+ fM[5] = -lineVec.dot(qPts[maxEdge]);
+ } else {
+ // It's a point. It should cover zero area. Just set the matrix such
+ // that (u, v) will always be far away from the quad.
+ fM[0] = 0; fM[1] = 0; fM[2] = 100.f;
+ fM[3] = 0; fM[4] = 0; fM[5] = 100.f;
+ }
+ } else {
+ double scale = 1.0/det;
+
+ // compute adjugate matrix
+ double a2, a3, a4, a5, a6, a7, a8;
+ a2 = x1*y2-x2*y1;
+
+ a3 = y2-y0;
+ a4 = x0-x2;
+ a5 = x2*y0-x0*y2;
+
+ a6 = y0-y1;
+ a7 = x1-x0;
+ a8 = x0*y1-x1*y0;
+
+ // this performs the uv_pts*adjugate(control_pts) multiply,
+ // then does the scale by 1/det afterwards to improve precision
+ m[SkMatrix::kMScaleX] = (float)((0.5*a3 + a6)*scale);
+ m[SkMatrix::kMSkewX] = (float)((0.5*a4 + a7)*scale);
+ m[SkMatrix::kMTransX] = (float)((0.5*a5 + a8)*scale);
+
+ m[SkMatrix::kMSkewY] = (float)(a6*scale);
+ m[SkMatrix::kMScaleY] = (float)(a7*scale);
+ m[SkMatrix::kMTransY] = (float)(a8*scale);
+
+ // kMPersp0 & kMPersp1 should algebraically be zero
+ m[SkMatrix::kMPersp0] = 0.0f;
+ m[SkMatrix::kMPersp1] = 0.0f;
+ m[SkMatrix::kMPersp2] = (float)((a2 + a5 + a8)*scale);
+
+ // It may not be normalized to have 1.0 in the bottom right
+ float m33 = m.get(SkMatrix::kMPersp2);
+ if (1.f != m33) {
+ m33 = 1.f / m33;
+ fM[0] = m33 * m.get(SkMatrix::kMScaleX);
+ fM[1] = m33 * m.get(SkMatrix::kMSkewX);
+ fM[2] = m33 * m.get(SkMatrix::kMTransX);
+ fM[3] = m33 * m.get(SkMatrix::kMSkewY);
+ fM[4] = m33 * m.get(SkMatrix::kMScaleY);
+ fM[5] = m33 * m.get(SkMatrix::kMTransY);
+ } else {
+ fM[0] = m.get(SkMatrix::kMScaleX);
+ fM[1] = m.get(SkMatrix::kMSkewX);
+ fM[2] = m.get(SkMatrix::kMTransX);
+ fM[3] = m.get(SkMatrix::kMSkewY);
+ fM[4] = m.get(SkMatrix::kMScaleY);
+ fM[5] = m.get(SkMatrix::kMTransY);
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+// k = (y2 - y0, x0 - x2, (x2 - x0)*y0 - (y2 - y0)*x0 )
+// l = (2*w * (y1 - y0), 2*w * (x0 - x1), 2*w * (x1*y0 - x0*y1))
+// m = (2*w * (y2 - y1), 2*w * (x1 - x2), 2*w * (x2*y1 - x1*y2))
+void GrPathUtils::getConicKLM(const SkPoint p[3], const SkScalar weight, SkScalar klm[9]) {
+ const SkScalar w2 = 2.f * weight;
+ klm[0] = p[2].fY - p[0].fY;
+ klm[1] = p[0].fX - p[2].fX;
+ klm[2] = (p[2].fX - p[0].fX) * p[0].fY - (p[2].fY - p[0].fY) * p[0].fX;
+
+ klm[3] = w2 * (p[1].fY - p[0].fY);
+ klm[4] = w2 * (p[0].fX - p[1].fX);
+ klm[5] = w2 * (p[1].fX * p[0].fY - p[0].fX * p[1].fY);
+
+ klm[6] = w2 * (p[2].fY - p[1].fY);
+ klm[7] = w2 * (p[1].fX - p[2].fX);
+ klm[8] = w2 * (p[2].fX * p[1].fY - p[1].fX * p[2].fY);
+
+ // scale the max absolute value of coeffs to 10
+ SkScalar scale = 0.f;
+ for (int i = 0; i < 9; ++i) {
+ scale = SkMaxScalar(scale, SkScalarAbs(klm[i]));
+ }
+ SkASSERT(scale > 0.f);
+ scale = 10.f / scale;
+ for (int i = 0; i < 9; ++i) {
+ klm[i] *= scale;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+// a is the first control point of the cubic.
+// ab is the vector from a to the second control point.
+// dc is the vector from the fourth to the third control point.
+// d is the fourth control point.
+// p is the candidate quadratic control point.
+// this assumes that the cubic doesn't inflect and is simple
+bool is_point_within_cubic_tangents(const SkPoint& a,
+ const SkVector& ab,
+ const SkVector& dc,
+ const SkPoint& d,
+ SkPathPriv::FirstDirection dir,
+ const SkPoint p) {
+ SkVector ap = p - a;
+ SkScalar apXab = ap.cross(ab);
+ if (SkPathPriv::kCW_FirstDirection == dir) {
+ if (apXab > 0) {
+ return false;
+ }
+ } else {
+ SkASSERT(SkPathPriv::kCCW_FirstDirection == dir);
+ if (apXab < 0) {
+ return false;
+ }
+ }
+
+ SkVector dp = p - d;
+ SkScalar dpXdc = dp.cross(dc);
+ if (SkPathPriv::kCW_FirstDirection == dir) {
+ if (dpXdc < 0) {
+ return false;
+ }
+ } else {
+ SkASSERT(SkPathPriv::kCCW_FirstDirection == dir);
+ if (dpXdc > 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void convert_noninflect_cubic_to_quads(const SkPoint p[4],
+ SkScalar toleranceSqd,
+ bool constrainWithinTangents,
+ SkPathPriv::FirstDirection dir,
+ SkTArray<SkPoint, true>* quads,
+ int sublevel = 0) {
+
+ // Notation: Point a is always p[0]. Point b is p[1] unless p[1] == p[0], in which case it is
+ // p[2]. Point d is always p[3]. Point c is p[2] unless p[2] == p[3], in which case it is p[1].
+
+ SkVector ab = p[1] - p[0];
+ SkVector dc = p[2] - p[3];
+
+ if (ab.lengthSqd() < SK_ScalarNearlyZero) {
+ if (dc.lengthSqd() < SK_ScalarNearlyZero) {
+ SkPoint* degQuad = quads->push_back_n(3);
+ degQuad[0] = p[0];
+ degQuad[1] = p[0];
+ degQuad[2] = p[3];
+ return;
+ }
+ ab = p[2] - p[0];
+ }
+ if (dc.lengthSqd() < SK_ScalarNearlyZero) {
+ dc = p[1] - p[3];
+ }
+
+ // When the ab and cd tangents are degenerate or nearly parallel with vector from d to a the
+ // constraint that the quad point falls between the tangents becomes hard to enforce and we are
+ // likely to hit the max subdivision count. However, in this case the cubic is approaching a
+ // line and the accuracy of the quad point isn't so important. We check if the two middle cubic
+ // control points are very close to the baseline vector. If so then we just pick quadratic
+ // points on the control polygon.
+
+ if (constrainWithinTangents) {
+ SkVector da = p[0] - p[3];
+ bool doQuads = dc.lengthSqd() < SK_ScalarNearlyZero ||
+ ab.lengthSqd() < SK_ScalarNearlyZero;
+ if (!doQuads) {
+ SkScalar invDALengthSqd = da.lengthSqd();
+ if (invDALengthSqd > SK_ScalarNearlyZero) {
+ invDALengthSqd = SkScalarInvert(invDALengthSqd);
+ // cross(ab, da)^2/length(da)^2 == sqd distance from b to line from d to a.
+ // same goes for point c using vector cd.
+ SkScalar detABSqd = ab.cross(da);
+ detABSqd = SkScalarSquare(detABSqd);
+ SkScalar detDCSqd = dc.cross(da);
+ detDCSqd = SkScalarSquare(detDCSqd);
+ if (SkScalarMul(detABSqd, invDALengthSqd) < toleranceSqd &&
+ SkScalarMul(detDCSqd, invDALengthSqd) < toleranceSqd) {
+ doQuads = true;
+ }
+ }
+ }
+ if (doQuads) {
+ SkPoint b = p[0] + ab;
+ SkPoint c = p[3] + dc;
+ SkPoint mid = b + c;
+ mid.scale(SK_ScalarHalf);
+ // Insert two quadratics to cover the case when ab points away from d and/or dc
+ // points away from a.
+ if (SkVector::DotProduct(da, dc) < 0 || SkVector::DotProduct(ab,da) > 0) {
+ SkPoint* qpts = quads->push_back_n(6);
+ qpts[0] = p[0];
+ qpts[1] = b;
+ qpts[2] = mid;
+ qpts[3] = mid;
+ qpts[4] = c;
+ qpts[5] = p[3];
+ } else {
+ SkPoint* qpts = quads->push_back_n(3);
+ qpts[0] = p[0];
+ qpts[1] = mid;
+ qpts[2] = p[3];
+ }
+ return;
+ }
+ }
+
+ static const SkScalar kLengthScale = 3 * SK_Scalar1 / 2;
+ static const int kMaxSubdivs = 10;
+
+ ab.scale(kLengthScale);
+ dc.scale(kLengthScale);
+
+ // e0 and e1 are extrapolations along vectors ab and dc.
+ SkVector c0 = p[0];
+ c0 += ab;
+ SkVector c1 = p[3];
+ c1 += dc;
+
+ SkScalar dSqd = sublevel > kMaxSubdivs ? 0 : c0.distanceToSqd(c1);
+ if (dSqd < toleranceSqd) {
+ SkPoint cAvg = c0;
+ cAvg += c1;
+ cAvg.scale(SK_ScalarHalf);
+
+ bool subdivide = false;
+
+ if (constrainWithinTangents &&
+ !is_point_within_cubic_tangents(p[0], ab, dc, p[3], dir, cAvg)) {
+ // choose a new cAvg that is the intersection of the two tangent lines.
+ ab.setOrthog(ab);
+ SkScalar z0 = -ab.dot(p[0]);
+ dc.setOrthog(dc);
+ SkScalar z1 = -dc.dot(p[3]);
+ cAvg.fX = SkScalarMul(ab.fY, z1) - SkScalarMul(z0, dc.fY);
+ cAvg.fY = SkScalarMul(z0, dc.fX) - SkScalarMul(ab.fX, z1);
+ SkScalar z = SkScalarMul(ab.fX, dc.fY) - SkScalarMul(ab.fY, dc.fX);
+ z = SkScalarInvert(z);
+ cAvg.fX *= z;
+ cAvg.fY *= z;
+ if (sublevel <= kMaxSubdivs) {
+ SkScalar d0Sqd = c0.distanceToSqd(cAvg);
+ SkScalar d1Sqd = c1.distanceToSqd(cAvg);
+ // We need to subdivide if d0 + d1 > tolerance but we have the sqd values. We know
+ // the distances and tolerance can't be negative.
+ // (d0 + d1)^2 > toleranceSqd
+ // d0Sqd + 2*d0*d1 + d1Sqd > toleranceSqd
+ SkScalar d0d1 = SkScalarSqrt(SkScalarMul(d0Sqd, d1Sqd));
+ subdivide = 2 * d0d1 + d0Sqd + d1Sqd > toleranceSqd;
+ }
+ }
+ if (!subdivide) {
+ SkPoint* pts = quads->push_back_n(3);
+ pts[0] = p[0];
+ pts[1] = cAvg;
+ pts[2] = p[3];
+ return;
+ }
+ }
+ SkPoint choppedPts[7];
+ SkChopCubicAtHalf(p, choppedPts);
+ convert_noninflect_cubic_to_quads(choppedPts + 0,
+ toleranceSqd,
+ constrainWithinTangents,
+ dir,
+ quads,
+ sublevel + 1);
+ convert_noninflect_cubic_to_quads(choppedPts + 3,
+ toleranceSqd,
+ constrainWithinTangents,
+ dir,
+ quads,
+ sublevel + 1);
+}
+}
+
+void GrPathUtils::convertCubicToQuads(const SkPoint p[4],
+ SkScalar tolScale,
+ SkTArray<SkPoint, true>* quads) {
+ SkPoint chopped[10];
+ int count = SkChopCubicAtInflections(p, chopped);
+
+ const SkScalar tolSqd = SkScalarSquare(tolScale);
+
+ for (int i = 0; i < count; ++i) {
+ SkPoint* cubic = chopped + 3*i;
+ // The direction param is ignored if the third param is false.
+ convert_noninflect_cubic_to_quads(cubic, tolSqd, false,
+ SkPathPriv::kCCW_FirstDirection, quads);
+ }
+}
+
+void GrPathUtils::convertCubicToQuadsConstrainToTangents(const SkPoint p[4],
+ SkScalar tolScale,
+ SkPathPriv::FirstDirection dir,
+ SkTArray<SkPoint, true>* quads) {
+ SkPoint chopped[10];
+ int count = SkChopCubicAtInflections(p, chopped);
+
+ const SkScalar tolSqd = SkScalarSquare(tolScale);
+
+ for (int i = 0; i < count; ++i) {
+ SkPoint* cubic = chopped + 3*i;
+ convert_noninflect_cubic_to_quads(cubic, tolSqd, true, dir, quads);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Solves linear system to extract klm
+// P.K = k (similarly for l, m)
+// Where P is matrix of control points
+// K is coefficients for the line K
+// k is vector of values of K evaluated at the control points
+// Solving for K, thus K = P^(-1) . k
+static void calc_cubic_klm(const SkPoint p[4], const SkScalar controlK[4],
+ const SkScalar controlL[4], const SkScalar controlM[4],
+ SkScalar k[3], SkScalar l[3], SkScalar m[3]) {
+ SkMatrix matrix;
+ matrix.setAll(p[0].fX, p[0].fY, 1.f,
+ p[1].fX, p[1].fY, 1.f,
+ p[2].fX, p[2].fY, 1.f);
+ SkMatrix inverse;
+ if (matrix.invert(&inverse)) {
+ inverse.mapHomogeneousPoints(k, controlK, 1);
+ inverse.mapHomogeneousPoints(l, controlL, 1);
+ inverse.mapHomogeneousPoints(m, controlM, 1);
+ }
+
+}
+
+static void set_serp_klm(const SkScalar d[3], SkScalar k[4], SkScalar l[4], SkScalar m[4]) {
+ SkScalar tempSqrt = SkScalarSqrt(9.f * d[1] * d[1] - 12.f * d[0] * d[2]);
+ SkScalar ls = 3.f * d[1] - tempSqrt;
+ SkScalar lt = 6.f * d[0];
+ SkScalar ms = 3.f * d[1] + tempSqrt;
+ SkScalar mt = 6.f * d[0];
+
+ k[0] = ls * ms;
+ k[1] = (3.f * ls * ms - ls * mt - lt * ms) / 3.f;
+ k[2] = (lt * (mt - 2.f * ms) + ls * (3.f * ms - 2.f * mt)) / 3.f;
+ k[3] = (lt - ls) * (mt - ms);
+
+ l[0] = ls * ls * ls;
+ const SkScalar lt_ls = lt - ls;
+ l[1] = ls * ls * lt_ls * -1.f;
+ l[2] = lt_ls * lt_ls * ls;
+ l[3] = -1.f * lt_ls * lt_ls * lt_ls;
+
+ m[0] = ms * ms * ms;
+ const SkScalar mt_ms = mt - ms;
+ m[1] = ms * ms * mt_ms * -1.f;
+ m[2] = mt_ms * mt_ms * ms;
+ m[3] = -1.f * mt_ms * mt_ms * mt_ms;
+
+ // If d0 < 0 we need to flip the orientation of our curve
+ // This is done by negating the k and l values
+ // We want negative distance values to be on the inside
+ if ( d[0] > 0) {
+ for (int i = 0; i < 4; ++i) {
+ k[i] = -k[i];
+ l[i] = -l[i];
+ }
+ }
+}
+
+static void set_loop_klm(const SkScalar d[3], SkScalar k[4], SkScalar l[4], SkScalar m[4]) {
+ SkScalar tempSqrt = SkScalarSqrt(4.f * d[0] * d[2] - 3.f * d[1] * d[1]);
+ SkScalar ls = d[1] - tempSqrt;
+ SkScalar lt = 2.f * d[0];
+ SkScalar ms = d[1] + tempSqrt;
+ SkScalar mt = 2.f * d[0];
+
+ k[0] = ls * ms;
+ k[1] = (3.f * ls*ms - ls * mt - lt * ms) / 3.f;
+ k[2] = (lt * (mt - 2.f * ms) + ls * (3.f * ms - 2.f * mt)) / 3.f;
+ k[3] = (lt - ls) * (mt - ms);
+
+ l[0] = ls * ls * ms;
+ l[1] = (ls * (ls * (mt - 3.f * ms) + 2.f * lt * ms))/-3.f;
+ l[2] = ((lt - ls) * (ls * (2.f * mt - 3.f * ms) + lt * ms))/3.f;
+ l[3] = -1.f * (lt - ls) * (lt - ls) * (mt - ms);
+
+ m[0] = ls * ms * ms;
+ m[1] = (ms * (ls * (2.f * mt - 3.f * ms) + lt * ms))/-3.f;
+ m[2] = ((mt - ms) * (ls * (mt - 3.f * ms) + 2.f * lt * ms))/3.f;
+ m[3] = -1.f * (lt - ls) * (mt - ms) * (mt - ms);
+
+
+ // If (d0 < 0 && sign(k1) > 0) || (d0 > 0 && sign(k1) < 0),
+ // we need to flip the orientation of our curve.
+ // This is done by negating the k and l values
+ if ( (d[0] < 0 && k[1] > 0) || (d[0] > 0 && k[1] < 0)) {
+ for (int i = 0; i < 4; ++i) {
+ k[i] = -k[i];
+ l[i] = -l[i];
+ }
+ }
+}
+
+static void set_cusp_klm(const SkScalar d[3], SkScalar k[4], SkScalar l[4], SkScalar m[4]) {
+ const SkScalar ls = d[2];
+ const SkScalar lt = 3.f * d[1];
+
+ k[0] = ls;
+ k[1] = ls - lt / 3.f;
+ k[2] = ls - 2.f * lt / 3.f;
+ k[3] = ls - lt;
+
+ l[0] = ls * ls * ls;
+ const SkScalar ls_lt = ls - lt;
+ l[1] = ls * ls * ls_lt;
+ l[2] = ls_lt * ls_lt * ls;
+ l[3] = ls_lt * ls_lt * ls_lt;
+
+ m[0] = 1.f;
+ m[1] = 1.f;
+ m[2] = 1.f;
+ m[3] = 1.f;
+}
+
+// For the case when a cubic is actually a quadratic
+// M =
+// 0 0 0
+// 1/3 0 1/3
+// 2/3 1/3 2/3
+// 1 1 1
+static void set_quadratic_klm(const SkScalar d[3], SkScalar k[4], SkScalar l[4], SkScalar m[4]) {
+ k[0] = 0.f;
+ k[1] = 1.f/3.f;
+ k[2] = 2.f/3.f;
+ k[3] = 1.f;
+
+ l[0] = 0.f;
+ l[1] = 0.f;
+ l[2] = 1.f/3.f;
+ l[3] = 1.f;
+
+ m[0] = 0.f;
+ m[1] = 1.f/3.f;
+ m[2] = 2.f/3.f;
+ m[3] = 1.f;
+
+ // If d2 < 0 we need to flip the orientation of our curve
+ // This is done by negating the k and l values
+ if ( d[2] > 0) {
+ for (int i = 0; i < 4; ++i) {
+ k[i] = -k[i];
+ l[i] = -l[i];
+ }
+ }
+}
+
+int GrPathUtils::chopCubicAtLoopIntersection(const SkPoint src[4], SkPoint dst[10], SkScalar klm[9],
+ SkScalar klm_rev[3]) {
+ // Variable to store the two parametric values at the loop double point
+ SkScalar smallS = 0.f;
+ SkScalar largeS = 0.f;
+
+ SkScalar d[3];
+ SkCubicType cType = SkClassifyCubic(src, d);
+
+ int chop_count = 0;
+ if (kLoop_SkCubicType == cType) {
+ SkScalar tempSqrt = SkScalarSqrt(4.f * d[0] * d[2] - 3.f * d[1] * d[1]);
+ SkScalar ls = d[1] - tempSqrt;
+ SkScalar lt = 2.f * d[0];
+ SkScalar ms = d[1] + tempSqrt;
+ SkScalar mt = 2.f * d[0];
+ ls = ls / lt;
+ ms = ms / mt;
+ // need to have t values sorted since this is what is expected by SkChopCubicAt
+ if (ls <= ms) {
+ smallS = ls;
+ largeS = ms;
+ } else {
+ smallS = ms;
+ largeS = ls;
+ }
+
+ SkScalar chop_ts[2];
+ if (smallS > 0.f && smallS < 1.f) {
+ chop_ts[chop_count++] = smallS;
+ }
+ if (largeS > 0.f && largeS < 1.f) {
+ chop_ts[chop_count++] = largeS;
+ }
+ if(dst) {
+ SkChopCubicAt(src, dst, chop_ts, chop_count);
+ }
+ } else {
+ if (dst) {
+ memcpy(dst, src, sizeof(SkPoint) * 4);
+ }
+ }
+
+ if (klm && klm_rev) {
+ // Set klm_rev to to match the sub_section of cubic that needs to have its orientation
+ // flipped. This will always be the section that is the "loop"
+ if (2 == chop_count) {
+ klm_rev[0] = 1.f;
+ klm_rev[1] = -1.f;
+ klm_rev[2] = 1.f;
+ } else if (1 == chop_count) {
+ if (smallS < 0.f) {
+ klm_rev[0] = -1.f;
+ klm_rev[1] = 1.f;
+ } else {
+ klm_rev[0] = 1.f;
+ klm_rev[1] = -1.f;
+ }
+ } else {
+ if (smallS < 0.f && largeS > 1.f) {
+ klm_rev[0] = -1.f;
+ } else {
+ klm_rev[0] = 1.f;
+ }
+ }
+ SkScalar controlK[4];
+ SkScalar controlL[4];
+ SkScalar controlM[4];
+
+ if (kSerpentine_SkCubicType == cType || (kCusp_SkCubicType == cType && 0.f != d[0])) {
+ set_serp_klm(d, controlK, controlL, controlM);
+ } else if (kLoop_SkCubicType == cType) {
+ set_loop_klm(d, controlK, controlL, controlM);
+ } else if (kCusp_SkCubicType == cType) {
+ SkASSERT(0.f == d[0]);
+ set_cusp_klm(d, controlK, controlL, controlM);
+ } else if (kQuadratic_SkCubicType == cType) {
+ set_quadratic_klm(d, controlK, controlL, controlM);
+ }
+
+ calc_cubic_klm(src, controlK, controlL, controlM, klm, &klm[3], &klm[6]);
+ }
+ return chop_count + 1;
+}
+
+void GrPathUtils::getCubicKLM(const SkPoint p[4], SkScalar klm[9]) {
+ SkScalar d[3];
+ SkCubicType cType = SkClassifyCubic(p, d);
+
+ SkScalar controlK[4];
+ SkScalar controlL[4];
+ SkScalar controlM[4];
+
+ if (kSerpentine_SkCubicType == cType || (kCusp_SkCubicType == cType && 0.f != d[0])) {
+ set_serp_klm(d, controlK, controlL, controlM);
+ } else if (kLoop_SkCubicType == cType) {
+ set_loop_klm(d, controlK, controlL, controlM);
+ } else if (kCusp_SkCubicType == cType) {
+ SkASSERT(0.f == d[0]);
+ set_cusp_klm(d, controlK, controlL, controlM);
+ } else if (kQuadratic_SkCubicType == cType) {
+ set_quadratic_klm(d, controlK, controlL, controlM);
+ }
+
+ calc_cubic_klm(p, controlK, controlL, controlM, klm, &klm[3], &klm[6]);
+}
diff --git a/gfx/skia/skia/src/gpu/GrPathUtils.h b/gfx/skia/skia/src/gpu/GrPathUtils.h
new file mode 100644
index 000000000..fcc32c89c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPathUtils.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPathUtils_DEFINED
+#define GrPathUtils_DEFINED
+
+#include "SkRect.h"
+#include "SkPathPriv.h"
+#include "SkTArray.h"
+
+class SkMatrix;
+
+/**
+ * Utilities for evaluating paths.
+ */
+namespace GrPathUtils {
+ SkScalar scaleToleranceToSrc(SkScalar devTol,
+ const SkMatrix& viewM,
+ const SkRect& pathBounds);
+
+ /// Since we divide by tol if we're computing exact worst-case bounds,
+ /// very small tolerances will be increased to gMinCurveTol.
+ int worstCasePointCount(const SkPath&,
+ int* subpaths,
+ SkScalar tol);
+
+ /// Since we divide by tol if we're computing exact worst-case bounds,
+ /// very small tolerances will be increased to gMinCurveTol.
+ uint32_t quadraticPointCount(const SkPoint points[], SkScalar tol);
+
+ uint32_t generateQuadraticPoints(const SkPoint& p0,
+ const SkPoint& p1,
+ const SkPoint& p2,
+ SkScalar tolSqd,
+ SkPoint** points,
+ uint32_t pointsLeft);
+
+ /// Since we divide by tol if we're computing exact worst-case bounds,
+ /// very small tolerances will be increased to gMinCurveTol.
+ uint32_t cubicPointCount(const SkPoint points[], SkScalar tol);
+
+ uint32_t generateCubicPoints(const SkPoint& p0,
+ const SkPoint& p1,
+ const SkPoint& p2,
+ const SkPoint& p3,
+ SkScalar tolSqd,
+ SkPoint** points,
+ uint32_t pointsLeft);
+
+ // A 2x3 matrix that goes from the 2d space coordinates to UV space where
+ // u^2-v = 0 specifies the quad. The matrix is determined by the control
+ // points of the quadratic.
+ class QuadUVMatrix {
+ public:
+ QuadUVMatrix() {}
+ // Initialize the matrix from the control pts
+ QuadUVMatrix(const SkPoint controlPts[3]) { this->set(controlPts); }
+ void set(const SkPoint controlPts[3]);
+
+ /**
+ * Applies the matrix to vertex positions to compute UV coords. This
+ * has been templated so that the compiler can easliy unroll the loop
+ * and reorder to avoid stalling for loads. The assumption is that a
+ * path renderer will have a small fixed number of vertices that it
+ * uploads for each quad.
+ *
+ * N is the number of vertices.
+ * STRIDE is the size of each vertex.
+ * UV_OFFSET is the offset of the UV values within each vertex.
+ * vertices is a pointer to the first vertex.
+ */
+ template <int N, size_t STRIDE, size_t UV_OFFSET>
+ void apply(const void* vertices) const {
+ intptr_t xyPtr = reinterpret_cast<intptr_t>(vertices);
+ intptr_t uvPtr = reinterpret_cast<intptr_t>(vertices) + UV_OFFSET;
+ float sx = fM[0];
+ float kx = fM[1];
+ float tx = fM[2];
+ float ky = fM[3];
+ float sy = fM[4];
+ float ty = fM[5];
+ for (int i = 0; i < N; ++i) {
+ const SkPoint* xy = reinterpret_cast<const SkPoint*>(xyPtr);
+ SkPoint* uv = reinterpret_cast<SkPoint*>(uvPtr);
+ uv->fX = sx * xy->fX + kx * xy->fY + tx;
+ uv->fY = ky * xy->fX + sy * xy->fY + ty;
+ xyPtr += STRIDE;
+ uvPtr += STRIDE;
+ }
+ }
+ private:
+ float fM[6];
+ };
+
+ // Input is 3 control points and a weight for a bezier conic. Calculates the
+ // three linear functionals (K,L,M) that represent the implicit equation of the
+ // conic, K^2 - LM.
+ //
+ // Output:
+ // K = (klm[0], klm[1], klm[2])
+ // L = (klm[3], klm[4], klm[5])
+ // M = (klm[6], klm[7], klm[8])
+ void getConicKLM(const SkPoint p[3], const SkScalar weight, SkScalar klm[9]);
+
+ // Converts a cubic into a sequence of quads. If working in device space
+ // use tolScale = 1, otherwise set based on stretchiness of the matrix. The
+ // result is sets of 3 points in quads.
+ void convertCubicToQuads(const SkPoint p[4],
+ SkScalar tolScale,
+ SkTArray<SkPoint, true>* quads);
+
+ // When we approximate a cubic {a,b,c,d} with a quadratic we may have to
+ // ensure that the new control point lies between the lines ab and cd. The
+ // convex path renderer requires this. It starts with a path where all the
+ // control points taken together form a convex polygon. It relies on this
+ // property and the quadratic approximation of cubics step cannot alter it.
+ // This variation enforces this constraint. The cubic must be simple and dir
+ // must specify the orientation of the contour containing the cubic.
+ void convertCubicToQuadsConstrainToTangents(const SkPoint p[4],
+ SkScalar tolScale,
+ SkPathPriv::FirstDirection dir,
+ SkTArray<SkPoint, true>* quads);
+
+ // Chops the cubic bezier passed in by src, at the double point (intersection point)
+ // if the curve is a cubic loop. If it is a loop, there will be two parametric values for
+ // the double point: ls and ms. We chop the cubic at these values if they are between 0 and 1.
+ // Return value:
+ // Value of 3: ls and ms are both between (0,1), and dst will contain the three cubics,
+ // dst[0..3], dst[3..6], and dst[6..9] if dst is not nullptr
+ // Value of 2: Only one of ls and ms are between (0,1), and dst will contain the two cubics,
+ // dst[0..3] and dst[3..6] if dst is not nullptr
+ // Value of 1: Neither ls or ms are between (0,1), and dst will contain the one original cubic,
+ // dst[0..3] if dst is not nullptr
+ //
+ // Optional KLM Calculation:
+ // The function can also return the KLM linear functionals for the chopped cubic implicit form
+ // of K^3 - LM.
+ // It will calculate a single set of KLM values that can be shared by all sub cubics, except
+ // for the subsection that is "the loop" the K and L values need to be negated.
+ // Output:
+ // klm: Holds the values for the linear functionals as:
+ // K = (klm[0], klm[1], klm[2])
+ // L = (klm[3], klm[4], klm[5])
+ // M = (klm[6], klm[7], klm[8])
+ // klm_rev: These values are flags for the corresponding sub cubic saying whether or not
+ // the K and L values need to be flipped. A value of -1.f means flip K and L and
+ // a value of 1.f means do nothing.
+ // *****DO NOT FLIP M, JUST K AND L*****
+ //
+ // Notice that the klm lines are calculated in the same space as the input control points.
+ // If you transform the points the lines will also need to be transformed. This can be done
+ // by mapping the lines with the inverse-transpose of the matrix used to map the points.
+ int chopCubicAtLoopIntersection(const SkPoint src[4], SkPoint dst[10] = nullptr,
+ SkScalar klm[9] = nullptr, SkScalar klm_rev[3] = nullptr);
+
+ // Input is p which holds the 4 control points of a non-rational cubic Bezier curve.
+ // Output is the coefficients of the three linear functionals K, L, & M which
+ // represent the implicit form of the cubic as f(x,y,w) = K^3 - LM. The w term
+ // will always be 1. The output is stored in the array klm, where the values are:
+ // K = (klm[0], klm[1], klm[2])
+ // L = (klm[3], klm[4], klm[5])
+ // M = (klm[6], klm[7], klm[8])
+ //
+ // Notice that the klm lines are calculated in the same space as the input control points.
+ // If you transform the points the lines will also need to be transformed. This can be done
+ // by mapping the lines with the inverse-transpose of the matrix used to map the points.
+ void getCubicKLM(const SkPoint p[4], SkScalar klm[9]);
+
+ // When tessellating curved paths into linear segments, this defines the maximum distance
+ // in screen space which a segment may deviate from the mathmatically correct value.
+ // Above this value, the segment will be subdivided.
+ // This value was chosen to approximate the supersampling accuracy of the raster path (16
+ // samples, or one quarter pixel).
+ static const SkScalar kDefaultTolerance = SkDoubleToScalar(0.25);
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPendingProgramElement.h b/gfx/skia/skia/src/gpu/GrPendingProgramElement.h
new file mode 100644
index 000000000..ab1f43789
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPendingProgramElement.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPendingProgramElement_DEFINED
+#define GrPendingProgramElement_DEFINED
+
+#include "SkRefCnt.h"
+#include "GrTypes.h"
+
+/**
+ * Helper for owning a pending execution on a GrProgramElement. Using this rather than ref allows
+ * resources that are owned by the program element to be correctly tracked as having pending reads
+ * and writes rather than refs.
+ */
+template <typename T> class GrPendingProgramElement : SkNoncopyable {
+public:
+ GrPendingProgramElement() : fObj(nullptr) { }
+
+ // Adds a pending execution on obj.
+ explicit GrPendingProgramElement(T* obj) : fObj(obj) {
+ if (obj) {
+ obj->addPendingExecution();
+ }
+ }
+
+ void reset(T* obj) {
+ if (obj) {
+ obj->addPendingExecution();
+ }
+ if (fObj) {
+ fObj->completedExecution();
+ }
+ fObj = obj;
+ }
+
+ T* get() const { return fObj; }
+ operator T*() { return fObj; }
+
+ T *operator->() const { return fObj; }
+
+ ~GrPendingProgramElement() {
+ if (fObj) {
+ fObj->completedExecution();
+ }
+ }
+
+private:
+ T* fObj;
+
+ typedef SkNoncopyable INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPipeline.cpp b/gfx/skia/skia/src/gpu/GrPipeline.cpp
new file mode 100644
index 000000000..d9ebcf85f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPipeline.cpp
@@ -0,0 +1,248 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrPipeline.h"
+
+#include "GrCaps.h"
+#include "GrDrawContext.h"
+#include "GrDrawTarget.h"
+#include "GrGpu.h"
+#include "GrPipelineBuilder.h"
+#include "GrProcOptInfo.h"
+#include "GrRenderTargetPriv.h"
+#include "GrXferProcessor.h"
+
+#include "batches/GrBatch.h"
+
+GrPipeline* GrPipeline::CreateAt(void* memory, const CreateArgs& args,
+ GrXPOverridesForBatch* overrides) {
+ const GrPipelineBuilder& builder = *args.fPipelineBuilder;
+
+ GrPipeline* pipeline = new (memory) GrPipeline;
+ GrRenderTarget* rt = args.fDrawContext->accessRenderTarget();
+ pipeline->fRenderTarget.reset(rt);
+ SkASSERT(pipeline->fRenderTarget);
+ pipeline->fScissorState = *args.fScissor;
+ pipeline->fWindowRectsState = *args.fWindowRectsState;
+ if (builder.hasUserStencilSettings() || args.fHasStencilClip) {
+ const GrRenderTargetPriv& rtPriv = rt->renderTargetPriv();
+ pipeline->fStencilSettings.reset(*builder.getUserStencil(), args.fHasStencilClip,
+ rtPriv.numStencilBits());
+ SkASSERT(!pipeline->fStencilSettings.usesWrapOp() || args.fCaps->stencilWrapOpsSupport());
+ }
+ pipeline->fDrawFace = builder.getDrawFace();
+
+ pipeline->fFlags = 0;
+ if (builder.isHWAntialias()) {
+ pipeline->fFlags |= kHWAA_Flag;
+ }
+ if (builder.snapVerticesToPixelCenters()) {
+ pipeline->fFlags |= kSnapVertices_Flag;
+ }
+ if (builder.getDisableOutputConversionToSRGB()) {
+ pipeline->fFlags |= kDisableOutputConversionToSRGB_Flag;
+ }
+ if (builder.getAllowSRGBInputs()) {
+ pipeline->fFlags |= kAllowSRGBInputs_Flag;
+ }
+ if (builder.getUsesDistanceVectorField()) {
+ pipeline->fFlags |= kUsesDistanceVectorField_Flag;
+ }
+ if (args.fHasStencilClip) {
+ pipeline->fFlags |= kHasStencilClip_Flag;
+ }
+
+ // Create XferProcessor from DS's XPFactory
+ bool hasMixedSamples = args.fDrawContext->hasMixedSamples() &&
+ (builder.isHWAntialias() || !pipeline->fStencilSettings.isDisabled());
+ const GrXPFactory* xpFactory = builder.getXPFactory();
+ SkAutoTUnref<GrXferProcessor> xferProcessor;
+ if (xpFactory) {
+ xferProcessor.reset(xpFactory->createXferProcessor(args.fOpts,
+ hasMixedSamples,
+ &args.fDstTexture,
+ *args.fCaps));
+ if (!xferProcessor) {
+ pipeline->~GrPipeline();
+ return nullptr;
+ }
+ } else {
+ // This may return nullptr in the common case of src-over implemented using hw blending.
+ xferProcessor.reset(GrPorterDuffXPFactory::CreateSrcOverXferProcessor(
+ *args.fCaps,
+ args.fOpts,
+ hasMixedSamples,
+ &args.fDstTexture));
+ }
+ GrColor overrideColor = GrColor_ILLEGAL;
+ if (args.fOpts.fColorPOI.firstEffectiveProcessorIndex() != 0) {
+ overrideColor = args.fOpts.fColorPOI.inputColorToFirstEffectiveProccesor();
+ }
+
+ GrXferProcessor::OptFlags optFlags = GrXferProcessor::kNone_OptFlags;
+
+ const GrXferProcessor* xpForOpts = xferProcessor ? xferProcessor.get() :
+ &GrPorterDuffXPFactory::SimpleSrcOverXP();
+ optFlags = xpForOpts->getOptimizations(args.fOpts,
+ pipeline->fStencilSettings.doesWrite(),
+ &overrideColor,
+ *args.fCaps);
+
+ // When path rendering the stencil settings are not always set on the GrPipelineBuilder
+ // so we must check the draw type. In cases where we will skip drawing we simply return a
+ // null GrPipeline.
+ if (GrXferProcessor::kSkipDraw_OptFlag & optFlags) {
+ pipeline->~GrPipeline();
+ return nullptr;
+ }
+
+ // No need to have an override color if it isn't even going to be used.
+ if (SkToBool(GrXferProcessor::kIgnoreColor_OptFlag & optFlags)) {
+ overrideColor = GrColor_ILLEGAL;
+ }
+
+ pipeline->fXferProcessor.reset(xferProcessor);
+
+ int firstColorProcessorIdx = args.fOpts.fColorPOI.firstEffectiveProcessorIndex();
+
+ // TODO: Once we can handle single or four channel input into coverage GrFragmentProcessors
+ // then we can use GrPipelineBuilder's coverageProcInfo (like color above) to set this initial
+ // information.
+ int firstCoverageProcessorIdx = 0;
+
+ pipeline->adjustProgramFromOptimizations(builder, optFlags, args.fOpts.fColorPOI,
+ args.fOpts.fCoveragePOI, &firstColorProcessorIdx,
+ &firstCoverageProcessorIdx);
+
+ bool usesLocalCoords = false;
+
+ // Copy GrFragmentProcessors from GrPipelineBuilder to Pipeline
+ pipeline->fNumColorProcessors = builder.numColorFragmentProcessors() - firstColorProcessorIdx;
+ int numTotalProcessors = pipeline->fNumColorProcessors +
+ builder.numCoverageFragmentProcessors() - firstCoverageProcessorIdx;
+ pipeline->fFragmentProcessors.reset(numTotalProcessors);
+ int currFPIdx = 0;
+ for (int i = firstColorProcessorIdx; i < builder.numColorFragmentProcessors();
+ ++i, ++currFPIdx) {
+ const GrFragmentProcessor* fp = builder.getColorFragmentProcessor(i);
+ pipeline->fFragmentProcessors[currFPIdx].reset(fp);
+ usesLocalCoords = usesLocalCoords || fp->usesLocalCoords();
+ }
+
+ for (int i = firstCoverageProcessorIdx; i < builder.numCoverageFragmentProcessors();
+ ++i, ++currFPIdx) {
+ const GrFragmentProcessor* fp = builder.getCoverageFragmentProcessor(i);
+ pipeline->fFragmentProcessors[currFPIdx].reset(fp);
+ usesLocalCoords = usesLocalCoords || fp->usesLocalCoords();
+ }
+
+ // Setup info we need to pass to GrPrimitiveProcessors that are used with this GrPipeline.
+ overrides->fFlags = 0;
+ if (!SkToBool(optFlags & GrXferProcessor::kIgnoreColor_OptFlag)) {
+ overrides->fFlags |= GrXPOverridesForBatch::kReadsColor_Flag;
+ }
+ if (GrColor_ILLEGAL != overrideColor) {
+ overrides->fFlags |= GrXPOverridesForBatch::kUseOverrideColor_Flag;
+ overrides->fOverrideColor = overrideColor;
+ }
+ if (!SkToBool(optFlags & GrXferProcessor::kIgnoreCoverage_OptFlag)) {
+ overrides->fFlags |= GrXPOverridesForBatch::kReadsCoverage_Flag;
+ }
+ if (usesLocalCoords) {
+ overrides->fFlags |= GrXPOverridesForBatch::kReadsLocalCoords_Flag;
+ }
+ if (SkToBool(optFlags & GrXferProcessor::kCanTweakAlphaForCoverage_OptFlag)) {
+ overrides->fFlags |= GrXPOverridesForBatch::kCanTweakAlphaForCoverage_Flag;
+ }
+
+ GrXPFactory::InvariantBlendedColor blendedColor;
+ if (xpFactory) {
+ xpFactory->getInvariantBlendedColor(args.fOpts.fColorPOI, &blendedColor);
+ } else {
+ GrPorterDuffXPFactory::SrcOverInvariantBlendedColor(args.fOpts.fColorPOI.color(),
+ args.fOpts.fColorPOI.validFlags(),
+ args.fOpts.fColorPOI.isOpaque(),
+ &blendedColor);
+ }
+ if (blendedColor.fWillBlendWithDst) {
+ overrides->fFlags |= GrXPOverridesForBatch::kWillColorBlendWithDst_Flag;
+ }
+
+ return pipeline;
+}
+
+static void add_dependencies_for_processor(const GrFragmentProcessor* proc, GrRenderTarget* rt) {
+ GrFragmentProcessor::TextureAccessIter iter(proc);
+ while (const GrTextureAccess* access = iter.next()) {
+ SkASSERT(rt->getLastDrawTarget());
+ rt->getLastDrawTarget()->addDependency(access->getTexture());
+ }
+}
+
+void GrPipeline::addDependenciesTo(GrRenderTarget* rt) const {
+ for (int i = 0; i < fFragmentProcessors.count(); ++i) {
+ add_dependencies_for_processor(fFragmentProcessors[i].get(), rt);
+ }
+
+ const GrXferProcessor& xfer = this->getXferProcessor();
+
+ for (int i = 0; i < xfer.numTextures(); ++i) {
+ GrTexture* texture = xfer.textureAccess(i).getTexture();
+ SkASSERT(rt->getLastDrawTarget());
+ rt->getLastDrawTarget()->addDependency(texture);
+ }
+}
+
+void GrPipeline::adjustProgramFromOptimizations(const GrPipelineBuilder& pipelineBuilder,
+ GrXferProcessor::OptFlags flags,
+ const GrProcOptInfo& colorPOI,
+ const GrProcOptInfo& coveragePOI,
+ int* firstColorProcessorIdx,
+ int* firstCoverageProcessorIdx) {
+ fIgnoresCoverage = SkToBool(flags & GrXferProcessor::kIgnoreCoverage_OptFlag);
+
+ if ((flags & GrXferProcessor::kIgnoreColor_OptFlag) ||
+ (flags & GrXferProcessor::kOverrideColor_OptFlag)) {
+ *firstColorProcessorIdx = pipelineBuilder.numColorFragmentProcessors();
+ }
+
+ if (flags & GrXferProcessor::kIgnoreCoverage_OptFlag) {
+ *firstCoverageProcessorIdx = pipelineBuilder.numCoverageFragmentProcessors();
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool GrPipeline::AreEqual(const GrPipeline& a, const GrPipeline& b) {
+ SkASSERT(&a != &b);
+
+ if (a.getRenderTarget() != b.getRenderTarget() ||
+ a.fFragmentProcessors.count() != b.fFragmentProcessors.count() ||
+ a.fNumColorProcessors != b.fNumColorProcessors ||
+ a.fScissorState != b.fScissorState ||
+ !a.fWindowRectsState.cheapEqualTo(b.fWindowRectsState) ||
+ a.fFlags != b.fFlags ||
+ a.fStencilSettings != b.fStencilSettings ||
+ a.fDrawFace != b.fDrawFace ||
+ a.fIgnoresCoverage != b.fIgnoresCoverage) {
+ return false;
+ }
+
+ // Most of the time both are nullptr
+ if (a.fXferProcessor.get() || b.fXferProcessor.get()) {
+ if (!a.getXferProcessor().isEqual(b.getXferProcessor())) {
+ return false;
+ }
+ }
+
+ for (int i = 0; i < a.numFragmentProcessors(); i++) {
+ if (!a.getFragmentProcessor(i).isEqual(b.getFragmentProcessor(i))) {
+ return false;
+ }
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/GrPipeline.h b/gfx/skia/skia/src/gpu/GrPipeline.h
new file mode 100644
index 000000000..6366e8050
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPipeline.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPipeline_DEFINED
+#define GrPipeline_DEFINED
+
+#include "GrColor.h"
+#include "GrFragmentProcessor.h"
+#include "GrGpu.h"
+#include "GrNonAtomicRef.h"
+#include "GrPendingProgramElement.h"
+#include "GrPrimitiveProcessor.h"
+#include "GrProcOptInfo.h"
+#include "GrProgramDesc.h"
+#include "GrScissorState.h"
+#include "GrStencilSettings.h"
+#include "GrWindowRectsState.h"
+#include "SkMatrix.h"
+#include "SkRefCnt.h"
+
+#include "effects/GrCoverageSetOpXP.h"
+#include "effects/GrDisableColorXP.h"
+#include "effects/GrPorterDuffXferProcessor.h"
+#include "effects/GrSimpleTextureEffect.h"
+
+class GrBatch;
+class GrDrawContext;
+class GrDeviceCoordTexture;
+class GrPipelineBuilder;
+
+struct GrBatchToXPOverrides {
+ GrBatchToXPOverrides()
+ : fUsePLSDstRead(false) {}
+
+ bool fUsePLSDstRead;
+};
+
+struct GrPipelineOptimizations {
+ GrProcOptInfo fColorPOI;
+ GrProcOptInfo fCoveragePOI;
+ GrBatchToXPOverrides fOverrides;
+};
+
+/**
+ * Class that holds an optimized version of a GrPipelineBuilder. It is meant to be an immutable
+ * class, and contains all data needed to set the state for a gpu draw.
+ */
+class GrPipeline : public GrNonAtomicRef<GrPipeline> {
+public:
+ ///////////////////////////////////////////////////////////////////////////
+ /// @name Creation
+
+ struct CreateArgs {
+ const GrPipelineBuilder* fPipelineBuilder;
+ GrDrawContext* fDrawContext;
+ const GrCaps* fCaps;
+ GrPipelineOptimizations fOpts;
+ const GrScissorState* fScissor;
+ const GrWindowRectsState* fWindowRectsState;
+ bool fHasStencilClip;
+ GrXferProcessor::DstTexture fDstTexture;
+ };
+
+ /** Creates a pipeline into a pre-allocated buffer */
+ static GrPipeline* CreateAt(void* memory, const CreateArgs&, GrXPOverridesForBatch*);
+
+ /// @}
+
+ ///////////////////////////////////////////////////////////////////////////
+ /// @name Comparisons
+
+ /**
+ * Returns true if these pipelines are equivalent. Coord transforms may be applied either on
+ * the GPU or the CPU. When we apply them on the CPU then the matrices need not agree in order
+ * to combine draws. Therefore we take a param that indicates whether coord transforms should be
+ * compared."
+ */
+ static bool AreEqual(const GrPipeline& a, const GrPipeline& b);
+
+ /**
+ * Allows a GrBatch subclass to determine whether two GrBatches can combine. This is a stricter
+ * test than isEqual because it also considers blend barriers when the two batches' bounds
+ * overlap
+ */
+ static bool CanCombine(const GrPipeline& a, const SkRect& aBounds,
+ const GrPipeline& b, const SkRect& bBounds,
+ const GrCaps& caps) {
+ if (!AreEqual(a, b)) {
+ return false;
+ }
+ if (a.xferBarrierType(caps)) {
+ return aBounds.fRight <= bBounds.fLeft ||
+ aBounds.fBottom <= bBounds.fTop ||
+ bBounds.fRight <= aBounds.fLeft ||
+ bBounds.fBottom <= aBounds.fTop;
+ }
+ return true;
+ }
+
+ /// @}
+
+ ///////////////////////////////////////////////////////////////////////////
+ /// @name GrFragmentProcessors
+
+ // Make the renderTarget's drawTarget (if it exists) be dependent on any
+ // drawTargets in this pipeline
+ void addDependenciesTo(GrRenderTarget* rt) const;
+
+ int numColorFragmentProcessors() const { return fNumColorProcessors; }
+ int numCoverageFragmentProcessors() const {
+ return fFragmentProcessors.count() - fNumColorProcessors;
+ }
+ int numFragmentProcessors() const { return fFragmentProcessors.count(); }
+
+ const GrXferProcessor& getXferProcessor() const {
+ if (fXferProcessor.get()) {
+ return *fXferProcessor.get();
+ } else {
+ // A null xp member means the common src-over case. GrXferProcessor's ref'ing
+ // mechanism is not thread safe so we do not hold a ref on this global.
+ return GrPorterDuffXPFactory::SimpleSrcOverXP();
+ }
+ }
+
+ const GrFragmentProcessor& getColorFragmentProcessor(int idx) const {
+ SkASSERT(idx < this->numColorFragmentProcessors());
+ return *fFragmentProcessors[idx].get();
+ }
+
+ const GrFragmentProcessor& getCoverageFragmentProcessor(int idx) const {
+ SkASSERT(idx < this->numCoverageFragmentProcessors());
+ return *fFragmentProcessors[fNumColorProcessors + idx].get();
+ }
+
+ const GrFragmentProcessor& getFragmentProcessor(int idx) const {
+ return *fFragmentProcessors[idx].get();
+ }
+
+ /// @}
+
+ /**
+ * Retrieves the currently set render-target.
+ *
+ * @return The currently set render target.
+ */
+ GrRenderTarget* getRenderTarget() const { return fRenderTarget.get(); }
+
+ const GrStencilSettings& getStencil() const { return fStencilSettings; }
+
+ const GrScissorState& getScissorState() const { return fScissorState; }
+
+ const GrWindowRectsState& getWindowRectsState() const { return fWindowRectsState; }
+
+ bool isHWAntialiasState() const { return SkToBool(fFlags & kHWAA_Flag); }
+ bool snapVerticesToPixelCenters() const { return SkToBool(fFlags & kSnapVertices_Flag); }
+ bool getDisableOutputConversionToSRGB() const {
+ return SkToBool(fFlags & kDisableOutputConversionToSRGB_Flag);
+ }
+ bool getAllowSRGBInputs() const {
+ return SkToBool(fFlags & kAllowSRGBInputs_Flag);
+ }
+ bool usesDistanceVectorField() const {
+ return SkToBool(fFlags & kUsesDistanceVectorField_Flag);
+ }
+ bool hasStencilClip() const {
+ return SkToBool(fFlags & kHasStencilClip_Flag);
+ }
+
+ GrXferBarrierType xferBarrierType(const GrCaps& caps) const {
+ return this->getXferProcessor().xferBarrierType(fRenderTarget.get(), caps);
+ }
+
+ /**
+ * Gets whether the target is drawing clockwise, counterclockwise,
+ * or both faces.
+ * @return the current draw face(s).
+ */
+ GrDrawFace getDrawFace() const { return fDrawFace; }
+
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ bool ignoresCoverage() const { return fIgnoresCoverage; }
+
+private:
+ GrPipeline() { /** Initialized in factory function*/ }
+
+ /**
+ * Alter the program desc and inputs (attribs and processors) based on the blend optimization.
+ */
+ void adjustProgramFromOptimizations(const GrPipelineBuilder& ds,
+ GrXferProcessor::OptFlags,
+ const GrProcOptInfo& colorPOI,
+ const GrProcOptInfo& coveragePOI,
+ int* firstColorProcessorIdx,
+ int* firstCoverageProcessorIdx);
+
+ /**
+ * Calculates the primary and secondary output types of the shader. For certain output types
+ * the function may adjust the blend coefficients. After this function is called the src and dst
+ * blend coeffs will represent those used by backend API.
+ */
+ void setOutputStateInfo(const GrPipelineBuilder& ds, GrXferProcessor::OptFlags,
+ const GrCaps&);
+
+ enum Flags {
+ kHWAA_Flag = 0x1,
+ kSnapVertices_Flag = 0x2,
+ kDisableOutputConversionToSRGB_Flag = 0x4,
+ kAllowSRGBInputs_Flag = 0x8,
+ kUsesDistanceVectorField_Flag = 0x10,
+ kHasStencilClip_Flag = 0x20,
+ };
+
+ typedef GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> RenderTarget;
+ typedef GrPendingProgramElement<const GrFragmentProcessor> PendingFragmentProcessor;
+ typedef SkAutoSTArray<8, PendingFragmentProcessor> FragmentProcessorArray;
+ typedef GrPendingProgramElement<const GrXferProcessor> ProgramXferProcessor;
+ RenderTarget fRenderTarget;
+ GrScissorState fScissorState;
+ GrWindowRectsState fWindowRectsState;
+ GrStencilSettings fStencilSettings;
+ GrDrawFace fDrawFace;
+ uint32_t fFlags;
+ ProgramXferProcessor fXferProcessor;
+ FragmentProcessorArray fFragmentProcessors;
+ bool fIgnoresCoverage;
+
+ // This value is also the index in fFragmentProcessors where coverage processors begin.
+ int fNumColorProcessors;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPipelineBuilder.cpp b/gfx/skia/skia/src/gpu/GrPipelineBuilder.cpp
new file mode 100644
index 000000000..864d6f1bc
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPipelineBuilder.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrPipelineBuilder.h"
+
+#include "GrBlend.h"
+#include "GrPaint.h"
+#include "GrPipeline.h"
+#include "GrProcOptInfo.h"
+#include "GrXferProcessor.h"
+#include "batches/GrBatch.h"
+#include "effects/GrPorterDuffXferProcessor.h"
+
+GrPipelineBuilder::GrPipelineBuilder()
+ : fFlags(0x0)
+ , fUserStencilSettings(&GrUserStencilSettings::kUnused)
+ , fDrawFace(GrDrawFace::kBoth) {
+ SkDEBUGCODE(fBlockEffectRemovalCnt = 0;)
+}
+
+GrPipelineBuilder::GrPipelineBuilder(const GrPaint& paint, bool useHWAA)
+ : GrPipelineBuilder() {
+ SkDEBUGCODE(fBlockEffectRemovalCnt = 0;)
+
+ for (int i = 0; i < paint.numColorFragmentProcessors(); ++i) {
+ fColorFragmentProcessors.emplace_back(SkRef(paint.getColorFragmentProcessor(i)));
+ }
+
+ for (int i = 0; i < paint.numCoverageFragmentProcessors(); ++i) {
+ fCoverageFragmentProcessors.emplace_back(SkRef(paint.getCoverageFragmentProcessor(i)));
+ }
+
+ fXPFactory.reset(SkSafeRef(paint.getXPFactory()));
+
+ this->setState(GrPipelineBuilder::kHWAntialias_Flag, useHWAA);
+ this->setState(GrPipelineBuilder::kDisableOutputConversionToSRGB_Flag,
+ paint.getDisableOutputConversionToSRGB());
+ this->setState(GrPipelineBuilder::kAllowSRGBInputs_Flag,
+ paint.getAllowSRGBInputs());
+ this->setState(GrPipelineBuilder::kUsesDistanceVectorField_Flag,
+ paint.usesDistanceVectorField());
+}
+
+//////////////////////////////////////////////////////////////////////////////s
+
+bool GrPipelineBuilder::willXPNeedDstTexture(const GrCaps& caps,
+ const GrPipelineOptimizations& optimizations) const {
+ if (this->getXPFactory()) {
+ return this->getXPFactory()->willNeedDstTexture(caps, optimizations);
+ }
+ return GrPorterDuffXPFactory::SrcOverWillNeedDstTexture(caps, optimizations);
+}
+
+void GrPipelineBuilder::AutoRestoreFragmentProcessorState::set(
+ const GrPipelineBuilder* pipelineBuilder) {
+ if (fPipelineBuilder) {
+ int m = fPipelineBuilder->numColorFragmentProcessors() - fColorEffectCnt;
+ SkASSERT(m >= 0);
+ fPipelineBuilder->fColorFragmentProcessors.pop_back_n(m);
+
+ int n = fPipelineBuilder->numCoverageFragmentProcessors() - fCoverageEffectCnt;
+ SkASSERT(n >= 0);
+ fPipelineBuilder->fCoverageFragmentProcessors.pop_back_n(n);
+
+ SkDEBUGCODE(--fPipelineBuilder->fBlockEffectRemovalCnt;)
+ }
+ fPipelineBuilder = const_cast<GrPipelineBuilder*>(pipelineBuilder);
+ if (nullptr != pipelineBuilder) {
+ fColorEffectCnt = pipelineBuilder->numColorFragmentProcessors();
+ fCoverageEffectCnt = pipelineBuilder->numCoverageFragmentProcessors();
+ SkDEBUGCODE(++pipelineBuilder->fBlockEffectRemovalCnt;)
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrPipelineBuilder::~GrPipelineBuilder() {
+ SkASSERT(0 == fBlockEffectRemovalCnt);
+}
diff --git a/gfx/skia/skia/src/gpu/GrPipelineBuilder.h b/gfx/skia/skia/src/gpu/GrPipelineBuilder.h
new file mode 100644
index 000000000..0c33eb344
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPipelineBuilder.h
@@ -0,0 +1,317 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPipelineBuilder_DEFINED
+#define GrPipelineBuilder_DEFINED
+
+#include "GrBlend.h"
+#include "GrCaps.h"
+#include "GrGpuResourceRef.h"
+#include "GrProcOptInfo.h"
+#include "GrRenderTarget.h"
+#include "GrUserStencilSettings.h"
+#include "GrXferProcessor.h"
+#include "SkMatrix.h"
+#include "SkRefCnt.h"
+#include "effects/GrCoverageSetOpXP.h"
+#include "effects/GrDisableColorXP.h"
+#include "effects/GrPorterDuffXferProcessor.h"
+#include "effects/GrSimpleTextureEffect.h"
+
+class GrDrawBatch;
+class GrCaps;
+class GrPaint;
+class GrTexture;
+
+class GrPipelineBuilder : public SkNoncopyable {
+public:
+ GrPipelineBuilder();
+
+ /**
+ * Initializes the GrPipelineBuilder based on a GrPaint and MSAA availability. Note
+ * that GrPipelineBuilder encompasses more than GrPaint. Aspects of GrPipelineBuilder that have
+ * no GrPaint equivalents are set to default values with the exception of vertex attribute state
+ * which is unmodified by this function and clipping which will be enabled.
+ */
+ GrPipelineBuilder(const GrPaint&, bool useHWAA = false);
+
+ virtual ~GrPipelineBuilder();
+
+ ///////////////////////////////////////////////////////////////////////////
+ /// @name Fragment Processors
+ ///
+ /// GrFragmentProcessors are used to compute per-pixel color and per-pixel fractional coverage.
+ /// There are two chains of FPs, one for color and one for coverage. The first FP in each
+ /// chain gets the initial color/coverage from the GrPrimitiveProcessor. It computes an output
+ /// color/coverage which is fed to the next FP in the chain. The last color and coverage FPs
+ /// feed their output to the GrXferProcessor which controls blending.
+ ////
+
+ int numColorFragmentProcessors() const { return fColorFragmentProcessors.count(); }
+ int numCoverageFragmentProcessors() const { return fCoverageFragmentProcessors.count(); }
+ int numFragmentProcessors() const { return this->numColorFragmentProcessors() +
+ this->numCoverageFragmentProcessors(); }
+
+ const GrFragmentProcessor* getColorFragmentProcessor(int idx) const {
+ return fColorFragmentProcessors[idx].get();
+ }
+ const GrFragmentProcessor* getCoverageFragmentProcessor(int idx) const {
+ return fCoverageFragmentProcessors[idx].get();
+ }
+
+ void addColorFragmentProcessor(sk_sp<GrFragmentProcessor> processor) {
+ SkASSERT(processor);
+ fColorFragmentProcessors.push_back(std::move(processor));
+ }
+
+ void addCoverageFragmentProcessor(sk_sp<GrFragmentProcessor> processor) {
+ SkASSERT(processor);
+ fCoverageFragmentProcessors.push_back(std::move(processor));
+ }
+
+ /**
+ * Creates a GrSimpleTextureEffect that uses local coords as texture coordinates.
+ */
+ void addColorTextureProcessor(GrTexture* texture, const SkMatrix& matrix) {
+ this->addColorFragmentProcessor(GrSimpleTextureEffect::Make(texture, nullptr, matrix));
+ }
+
+ void addCoverageTextureProcessor(GrTexture* texture, const SkMatrix& matrix) {
+ this->addCoverageFragmentProcessor(GrSimpleTextureEffect::Make(texture, nullptr, matrix));
+ }
+
+ void addColorTextureProcessor(GrTexture* texture,
+ const SkMatrix& matrix,
+ const GrTextureParams& params) {
+ this->addColorFragmentProcessor(GrSimpleTextureEffect::Make(texture, nullptr, matrix,
+ params));
+ }
+
+ void addCoverageTextureProcessor(GrTexture* texture,
+ const SkMatrix& matrix,
+ const GrTextureParams& params) {
+ this->addCoverageFragmentProcessor(GrSimpleTextureEffect::Make(texture, nullptr, matrix,
+ params));
+ }
+
+ /**
+ * When this object is destroyed it will remove any color/coverage FPs from the pipeline builder
+ * that were added after its constructor.
+ * This class can transiently modify its "const" GrPipelineBuilder object but will restore it
+ * when done - so it is notionally "const" correct.
+ */
+ class AutoRestoreFragmentProcessorState : public ::SkNoncopyable {
+ public:
+ AutoRestoreFragmentProcessorState()
+ : fPipelineBuilder(nullptr)
+ , fColorEffectCnt(0)
+ , fCoverageEffectCnt(0) {}
+
+ AutoRestoreFragmentProcessorState(const GrPipelineBuilder& ds)
+ : fPipelineBuilder(nullptr)
+ , fColorEffectCnt(0)
+ , fCoverageEffectCnt(0) {
+ this->set(&ds);
+ }
+
+ ~AutoRestoreFragmentProcessorState() { this->set(nullptr); }
+
+ void set(const GrPipelineBuilder* ds);
+
+ bool isSet() const { return SkToBool(fPipelineBuilder); }
+
+ void addCoverageFragmentProcessor(sk_sp<GrFragmentProcessor> processor) {
+ SkASSERT(this->isSet());
+ return fPipelineBuilder->addCoverageFragmentProcessor(std::move(processor));
+ }
+
+ private:
+ // notionally const (as marginalia)
+ GrPipelineBuilder* fPipelineBuilder;
+ int fColorEffectCnt;
+ int fCoverageEffectCnt;
+ };
+
+ /// @}
+
+ ///////////////////////////////////////////////////////////////////////////
+ /// @name Blending
+ ////
+
+ /**
+ * Installs a GrXPFactory. This object controls how src color, fractional pixel coverage,
+ * and the dst color are blended.
+ */
+ void setXPFactory(sk_sp<GrXPFactory> xpFactory) {
+ fXPFactory = std::move(xpFactory);
+ }
+
+ /**
+ * Sets a GrXPFactory that disables color writes to the destination. This is useful when
+ * rendering to the stencil buffer.
+ */
+ void setDisableColorXPFactory() {
+ fXPFactory = GrDisableColorXPFactory::Make();
+ }
+
+ const GrXPFactory* getXPFactory() const {
+ return fXPFactory.get();
+ }
+
+ /**
+ * Checks whether the xp will need destination in a texture to correctly blend.
+ */
+ bool willXPNeedDstTexture(const GrCaps& caps,
+ const GrPipelineOptimizations& optimizations) const;
+
+ /// @}
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ /// @name Stencil
+ ////
+
+ bool hasUserStencilSettings() const { return !fUserStencilSettings->isUnused(); }
+ const GrUserStencilSettings* getUserStencil() const { return fUserStencilSettings; }
+
+ /**
+ * Sets the user stencil settings for the next draw.
+ * This class only stores pointers to stencil settings objects.
+ * The caller guarantees the pointer will remain valid until it
+ * changes or goes out of scope.
+ * @param settings the stencil settings to use.
+ */
+ void setUserStencil(const GrUserStencilSettings* settings) { fUserStencilSettings = settings; }
+ void disableUserStencil() { fUserStencilSettings = &GrUserStencilSettings::kUnused; }
+
+ /// @}
+
+ ///////////////////////////////////////////////////////////////////////////
+ /// @name State Flags
+ ////
+
+ /**
+ * Flags that affect rendering. Controlled using enable/disableState(). All
+ * default to disabled.
+ */
+ enum Flags {
+ /**
+ * Perform HW anti-aliasing. This means either HW FSAA, if supported by the render target,
+ * or smooth-line rendering if a line primitive is drawn and line smoothing is supported by
+ * the 3D API.
+ */
+ kHWAntialias_Flag = 0x01,
+
+ /**
+ * Modifies the vertex shader so that vertices will be positioned at pixel centers.
+ */
+ kSnapVerticesToPixelCenters_Flag = 0x02,
+
+ /**
+ * Suppress linear -> sRGB conversion when rendering to sRGB render targets.
+ */
+ kDisableOutputConversionToSRGB_Flag = 0x04,
+
+ /**
+ * Allow sRGB -> linear conversion when reading from sRGB inputs.
+ */
+ kAllowSRGBInputs_Flag = 0x08,
+
+ /**
+ * Signals that one or more FPs need access to the distance vector field to the nearest
+ * edge
+ */
+ kUsesDistanceVectorField_Flag = 0x10,
+
+ kLast_Flag = kUsesDistanceVectorField_Flag,
+ };
+
+ bool isHWAntialias() const { return SkToBool(fFlags & kHWAntialias_Flag); }
+ bool snapVerticesToPixelCenters() const {
+ return SkToBool(fFlags & kSnapVerticesToPixelCenters_Flag); }
+ bool getDisableOutputConversionToSRGB() const {
+ return SkToBool(fFlags & kDisableOutputConversionToSRGB_Flag); }
+ bool getAllowSRGBInputs() const {
+ return SkToBool(fFlags & kAllowSRGBInputs_Flag); }
+ bool getUsesDistanceVectorField() const {
+ return SkToBool(fFlags & kUsesDistanceVectorField_Flag); }
+
+ /**
+ * Enable render state settings.
+ *
+ * @param flags bitfield of Flags specifying the states to enable
+ */
+ void enableState(uint32_t flags) { fFlags |= flags; }
+
+ /**
+ * Disable render state settings.
+ *
+ * @param flags bitfield of Flags specifying the states to disable
+ */
+ void disableState(uint32_t flags) { fFlags &= ~(flags); }
+
+ /**
+ * Enable or disable flags based on a boolean.
+ *
+ * @param flags bitfield of Flags to enable or disable
+ * @param enable if true enable stateBits, otherwise disable
+ */
+ void setState(uint32_t flags, bool enable) {
+ if (enable) {
+ this->enableState(flags);
+ } else {
+ this->disableState(flags);
+ }
+ }
+
+ /// @}
+
+ ///////////////////////////////////////////////////////////////////////////
+ /// @name Face Culling
+ ////
+
+ /**
+ * Gets whether the target is drawing clockwise, counterclockwise,
+ * or both faces.
+ * @return the current draw face(s).
+ */
+ GrDrawFace getDrawFace() const { return fDrawFace; }
+
+ /**
+ * Controls whether clockwise, counterclockwise, or both faces are drawn.
+ * @param face the face(s) to draw.
+ */
+ void setDrawFace(GrDrawFace face) {
+ SkASSERT(GrDrawFace::kInvalid != face);
+ fDrawFace = face;
+ }
+
+ /// @}
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ bool usePLSDstRead(const GrDrawBatch* batch) const;
+
+private:
+ // Some of the auto restore objects assume that no effects are removed during their lifetime.
+ // This is used to assert that this condition holds.
+ SkDEBUGCODE(mutable int fBlockEffectRemovalCnt;)
+
+ typedef SkSTArray<4, sk_sp<GrFragmentProcessor>> FragmentProcessorArray;
+
+ uint32_t fFlags;
+ const GrUserStencilSettings* fUserStencilSettings;
+ GrDrawFace fDrawFace;
+ mutable sk_sp<GrXPFactory> fXPFactory;
+ FragmentProcessorArray fColorFragmentProcessors;
+ FragmentProcessorArray fCoverageFragmentProcessors;
+
+ friend class GrPipeline;
+ friend class GrDrawTarget;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrPrimitiveProcessor.cpp b/gfx/skia/skia/src/gpu/GrPrimitiveProcessor.cpp
new file mode 100644
index 000000000..2aaaa0474
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPrimitiveProcessor.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrPrimitiveProcessor.h"
+
+#include "GrCoordTransform.h"
+
+/**
+ * The key for an individual coord transform is made up of a matrix type, a precision, and a bit
+ * that indicates the source of the input coords.
+ */
+enum {
+ kMatrixTypeKeyBits = 1,
+ kMatrixTypeKeyMask = (1 << kMatrixTypeKeyBits) - 1,
+
+ kPrecisionBits = 2,
+ kPrecisionShift = kMatrixTypeKeyBits,
+
+ kPositionCoords_Flag = (1 << (kPrecisionShift + kPrecisionBits)),
+
+ kTransformKeyBits = kMatrixTypeKeyBits + kPrecisionBits + 2,
+};
+
+GR_STATIC_ASSERT(kHigh_GrSLPrecision < (1 << kPrecisionBits));
+
+/**
+ * We specialize the vertex code for each of these matrix types.
+ */
+enum MatrixType {
+ kNoPersp_MatrixType = 0,
+ kGeneral_MatrixType = 1,
+};
+
+uint32_t
+GrPrimitiveProcessor::getTransformKey(const SkTArray<const GrCoordTransform*, true>& coords,
+ int numCoords) const {
+ uint32_t totalKey = 0;
+ for (int t = 0; t < numCoords; ++t) {
+ uint32_t key = 0;
+ const GrCoordTransform* coordTransform = coords[t];
+ if (coordTransform->getMatrix().hasPerspective()) {
+ key |= kGeneral_MatrixType;
+ } else {
+ key |= kNoPersp_MatrixType;
+ }
+
+ if (!this->hasExplicitLocalCoords()) {
+ key |= kPositionCoords_Flag;
+ }
+
+ GR_STATIC_ASSERT(kGrSLPrecisionCount <= (1 << kPrecisionBits));
+ key |= (coordTransform->precision() << kPrecisionShift);
+
+ key <<= kTransformKeyBits * t;
+
+ SkASSERT(0 == (totalKey & key)); // keys for each transform ought not to overlap
+ totalKey |= key;
+ }
+ return totalKey;
+}
diff --git a/gfx/skia/skia/src/gpu/GrPrimitiveProcessor.h b/gfx/skia/skia/src/gpu/GrPrimitiveProcessor.h
new file mode 100644
index 000000000..00b4df01e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrPrimitiveProcessor.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPrimitiveProcessor_DEFINED
+#define GrPrimitiveProcessor_DEFINED
+
+#include "GrColor.h"
+#include "GrProcessor.h"
+#include "GrShaderVar.h"
+
+/*
+ * The GrPrimitiveProcessor represents some kind of geometric primitive. This includes the shape
+ * of the primitive and the inherent color of the primitive. The GrPrimitiveProcessor is
+ * responsible for providing a color and coverage input into the Ganesh rendering pipeline. Through
+ * optimization, Ganesh may decide a different color, no color, and / or no coverage are required
+ * from the GrPrimitiveProcessor, so the GrPrimitiveProcessor must be able to support this
+ * functionality. We also use the GrPrimitiveProcessor to make batching decisions.
+ *
+ * There are two feedback loops between the GrFragmentProcessors, the GrXferProcessor, and the
+ * GrPrimitiveProcessor. These loops run on the CPU and compute any invariant components which
+ * might be useful for correctness / optimization decisions. The GrPrimitiveProcessor seeds these
+ * loops, one with initial color and one with initial coverage, in its
+ * onComputeInvariantColor / Coverage calls. These seed values are processed by the subsequent
+ * stages of the rendering pipeline and the output is then fed back into the GrPrimitiveProcessor in
+ * the initBatchTracker call, where the GrPrimitiveProcessor can then initialize the GrBatchTracker
+ * struct with the appropriate values.
+ *
+ * We are evolving this system to move towards generating geometric meshes and their associated
+ * vertex data after we have batched and reordered draws. This system, known as 'deferred geometry'
+ * will allow the GrPrimitiveProcessor much greater control over how data is transmitted to shaders.
+ *
+ * In a deferred geometry world, the GrPrimitiveProcessor can always 'batch' To do this, each
+ * primitive type is associated with one GrPrimitiveProcessor, who has complete control of how
+ * it draws. Each primitive draw will bundle all required data to perform the draw, and these
+ * bundles of data will be owned by an instance of the associated GrPrimitiveProcessor. Bundles
+ * can be updated alongside the GrBatchTracker struct itself, ultimately allowing the
+ * GrPrimitiveProcessor complete control of how it gets data into the fragment shader as long as
+ * it emits the appropriate color, or none at all, as directed.
+ */
+
+class GrGLSLCaps;
+class GrGLSLPrimitiveProcessor;
+
+struct GrInitInvariantOutput;
+
+// Describes the state of pixel local storage with respect to the current draw.
+enum GrPixelLocalStorageState {
+ // The draw is actively updating PLS.
+ kDraw_GrPixelLocalStorageState,
+ // The draw is a "finish" operation which is reading from PLS and writing color.
+ kFinish_GrPixelLocalStorageState,
+ // The draw does not use PLS.
+ kDisabled_GrPixelLocalStorageState
+};
+
+/*
+ * This class allows the GrPipeline to communicate information about the pipeline to a
+ * GrBatch which should be forwarded to the GrPrimitiveProcessor(s) created by the batch.
+ * These are not properly part of the pipeline because they assume the specific inputs
+ * that the batch provided when it created the pipeline. Identical pipelines may be
+ * created by different batches with different input assumptions and therefore different
+ * computed optimizations. It is the batch-specific optimizations that allow the pipelines
+ * to be equal.
+ */
+class GrXPOverridesForBatch {
+public:
+ /** Does the pipeline require the GrPrimitiveProcessor's color? */
+ bool readsColor() const { return SkToBool(kReadsColor_Flag & fFlags); }
+
+ /** Does the pipeline require the GrPrimitiveProcessor's coverage? */
+ bool readsCoverage() const { return
+ SkToBool(kReadsCoverage_Flag & fFlags); }
+
+ /** Does the pipeline require access to (implicit or explicit) local coordinates? */
+ bool readsLocalCoords() const {
+ return SkToBool(kReadsLocalCoords_Flag & fFlags);
+ }
+
+ /** Does the pipeline allow the GrPrimitiveProcessor to combine color and coverage into one
+ color output ? */
+ bool canTweakAlphaForCoverage() const {
+ return SkToBool(kCanTweakAlphaForCoverage_Flag & fFlags);
+ }
+
+ /** Does the pipeline require the GrPrimitiveProcessor to specify a specific color (and if
+ so get the color)? */
+ bool getOverrideColorIfSet(GrColor* overrideColor) const {
+ if (SkToBool(kUseOverrideColor_Flag & fFlags)) {
+ SkASSERT(SkToBool(kReadsColor_Flag & fFlags));
+ if (overrideColor) {
+ *overrideColor = fOverrideColor;
+ }
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Returns true if the pipeline's color output will be affected by the existing render target
+ * destination pixel values (meaning we need to be careful with overlapping draws). Note that we
+ * can conflate coverage and color, so the destination color may still bleed into pixels that
+ * have partial coverage, even if this function returns false.
+ *
+ * The above comment seems incorrect for the use case. This funciton is used to turn two
+ * overlapping draws into a single draw (really to stencil multiple paths and do a single
+ * cover). It seems that what really matters is whether the dst is read for color OR for
+ * coverage.
+ */
+ bool willColorBlendWithDst() const { return SkToBool(kWillColorBlendWithDst_Flag & fFlags); }
+
+private:
+ enum {
+ // If this is not set the primitive processor need not produce a color output
+ kReadsColor_Flag = 0x1,
+
+ // If this is not set the primitive processor need not produce a coverage output
+ kReadsCoverage_Flag = 0x2,
+
+ // If this is not set the primitive processor need not produce local coordinates
+ kReadsLocalCoords_Flag = 0x4,
+
+ // If this flag is set then the primitive processor may produce color*coverage as
+ // its color output (and not output a separate coverage).
+ kCanTweakAlphaForCoverage_Flag = 0x8,
+
+ // If this flag is set the GrPrimitiveProcessor must produce fOverrideColor as its
+ // output color. If not set fOverrideColor is to be ignored.
+ kUseOverrideColor_Flag = 0x10,
+
+ kWillColorBlendWithDst_Flag = 0x20,
+ };
+
+ uint32_t fFlags;
+ GrColor fOverrideColor;
+
+ friend class GrPipeline; // To initialize this
+};
+
+/*
+ * GrPrimitiveProcessor defines an interface which all subclasses must implement. All
+ * GrPrimitiveProcessors must proivide seed color and coverage for the Ganesh color / coverage
+ * pipelines, and they must provide some notion of equality
+ */
+class GrPrimitiveProcessor : public GrProcessor {
+public:
+ // Only the GrGeometryProcessor subclass actually has a geo shader or vertex attributes, but
+ // we put these calls on the base class to prevent having to cast
+ virtual bool willUseGeoShader() const = 0;
+
+ struct Attribute {
+ Attribute()
+ : fName(nullptr)
+ , fType(kFloat_GrVertexAttribType)
+ , fOffset(0) {}
+ Attribute(const char* name, GrVertexAttribType type, GrSLPrecision precision)
+ : fName(name)
+ , fType(type)
+ , fOffset(SkAlign4(GrVertexAttribTypeSize(type)))
+ , fPrecision(precision) {}
+ const char* fName;
+ GrVertexAttribType fType;
+ size_t fOffset;
+ GrSLPrecision fPrecision;
+ };
+
+ int numAttribs() const { return fAttribs.count(); }
+ const Attribute& getAttrib(int index) const { return fAttribs[index]; }
+
+ // Returns the vertex stride of the GP. A common use case is to request geometry from a
+ // drawtarget based off of the stride, and to populate this memory using an implicit array of
+ // structs. In this case, it is best to assert the vertexstride == sizeof(VertexStruct).
+ size_t getVertexStride() const { return fVertexStride; }
+
+ /**
+ * Computes a transformKey from an array of coord transforms. Will only look at the first
+ * <numCoords> transforms in the array.
+ *
+ * TODO: A better name for this function would be "compute" instead of "get".
+ */
+ uint32_t getTransformKey(const SkTArray<const GrCoordTransform*, true>& coords,
+ int numCoords) const;
+
+ /**
+ * Sets a unique key on the GrProcessorKeyBuilder that is directly associated with this geometry
+ * processor's GL backend implementation.
+ *
+ * TODO: A better name for this function would be "compute" instead of "get".
+ */
+ virtual void getGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const = 0;
+
+
+ /** Returns a new instance of the appropriate *GL* implementation class
+ for the given GrProcessor; caller is responsible for deleting
+ the object. */
+ virtual GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps& caps) const = 0;
+
+ virtual bool isPathRendering() const { return false; }
+
+ virtual GrPixelLocalStorageState getPixelLocalStorageState() const {
+ return kDisabled_GrPixelLocalStorageState;
+ }
+
+ /**
+ * If non-null, overrides the dest color returned by GrGLSLFragmentShaderBuilder::dstColor().
+ */
+ virtual const char* getDestColorOverride() const { return nullptr; }
+
+ virtual float getSampleShading() const {
+ return 0.0;
+ }
+
+ /* Sub-class should override and return true if this primitive processor implements the distance
+ * vector field, a field of vectors to the nearest point in the edge of the shape. */
+ virtual bool implementsDistanceVector() const { return false; }
+
+protected:
+ GrPrimitiveProcessor() : fVertexStride(0) {}
+
+ enum { kPreallocAttribCnt = 8 };
+ SkSTArray<kPreallocAttribCnt, Attribute> fAttribs;
+ size_t fVertexStride;
+
+private:
+ void notifyRefCntIsZero() const final {}
+ virtual bool hasExplicitLocalCoords() const = 0;
+
+ typedef GrProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrProcOptInfo.cpp b/gfx/skia/skia/src/gpu/GrProcOptInfo.cpp
new file mode 100644
index 000000000..183a42fb3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProcOptInfo.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrProcOptInfo.h"
+
+#include "GrGeometryProcessor.h"
+
+#include "batches/GrDrawBatch.h"
+
+void GrProcOptInfo::calcWithInitialValues(const GrFragmentProcessor * const processors[],
+ int cnt,
+ GrColor startColor,
+ GrColorComponentFlags flags,
+ bool areCoverageStages,
+ bool isLCD) {
+ GrInitInvariantOutput out;
+ out.fIsSingleComponent = areCoverageStages;
+ out.fColor = startColor;
+ out.fValidFlags = flags;
+ out.fIsLCDCoverage = isLCD;
+ fInOut.reset(out);
+ this->internalCalc(processors, cnt);
+}
+
+void GrProcOptInfo::initUsingInvariantOutput(GrInitInvariantOutput invOutput) {
+ fInOut.reset(invOutput);
+}
+
+void GrProcOptInfo::completeCalculations(const GrFragmentProcessor * const processors[], int cnt) {
+ this->internalCalc(processors, cnt);
+}
+
+void GrProcOptInfo::internalCalc(const GrFragmentProcessor* const processors[], int cnt) {
+ fFirstEffectiveProcessorIndex = 0;
+ fInputColorIsUsed = true;
+ fInputColor = fInOut.color();
+
+ for (int i = 0; i < cnt; ++i) {
+ const GrFragmentProcessor* processor = processors[i];
+ fInOut.resetWillUseInputColor();
+ processor->computeInvariantOutput(&fInOut);
+ SkDEBUGCODE(fInOut.validate());
+ if (!fInOut.willUseInputColor()) {
+ fFirstEffectiveProcessorIndex = i;
+ fInputColorIsUsed = false;
+ }
+ if (kRGBA_GrColorComponentFlags == fInOut.validFlags()) {
+ fFirstEffectiveProcessorIndex = i + 1;
+ fInputColor = fInOut.color();
+ fInputColorIsUsed = true;
+ // Since we are clearing all previous color stages we are in a state where we have found
+ // zero stages that don't multiply the inputColor.
+ fInOut.resetNonMulStageFound();
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/GrProcOptInfo.h b/gfx/skia/skia/src/gpu/GrProcOptInfo.h
new file mode 100644
index 000000000..87e7cd9c6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProcOptInfo.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrProcOptInfo_DEFINED
+#define GrProcOptInfo_DEFINED
+
+#include "GrColor.h"
+#include "GrInvariantOutput.h"
+
+class GrDrawBatch;
+class GrFragmentProcessor;
+class GrPrimitiveProcessor;
+
+/**
+ * GrProcOptInfo gathers invariant data from a set of processor stages.It is used to recognize
+ * optimizations related to eliminating stages and vertex attributes that aren't necessary for a
+ * draw.
+ */
+class GrProcOptInfo {
+public:
+ GrProcOptInfo()
+ : fInOut(0, static_cast<GrColorComponentFlags>(0), false)
+ , fFirstEffectiveProcessorIndex(0)
+ , fInputColorIsUsed(true)
+ , fInputColor(0) {}
+
+ void calcWithInitialValues(const GrFragmentProcessor* const *, int cnt, GrColor startColor,
+ GrColorComponentFlags, bool areCoverageStages, bool isLCD = false);
+ void initUsingInvariantOutput(GrInitInvariantOutput invOutput);
+ void completeCalculations(const GrFragmentProcessor * const processors[], int cnt);
+
+ bool isSolidWhite() const { return fInOut.isSolidWhite(); }
+ bool isOpaque() const { return fInOut.isOpaque(); }
+ bool isSingleComponent() const { return fInOut.isSingleComponent(); }
+ bool allStagesMultiplyInput() const { return fInOut.allStagesMulInput(); }
+
+ // TODO: Once texture pixel configs quaries are updated, we no longer need this function.
+ // For now this function will correctly tell us if we are using LCD text or not and should only
+ // be called when looking at the coverage output.
+ bool isFourChannelOutput() const { return !fInOut.isSingleComponent() &&
+ fInOut.isLCDCoverage(); }
+
+ GrColor color() const { return fInOut.color(); }
+
+ GrColorComponentFlags validFlags() const {
+ return fInOut.validFlags();
+ }
+
+ /**
+ * Returns the index of the first effective color processor. If an intermediate processor
+ * doesn't read its input or has a known output, then we can ignore all earlier processors
+ * since they will not affect the final output. Thus the first effective processors index is
+ * the index to the first processor that will have an effect on the final output.
+ *
+ * If processors before the firstEffectiveProcessorIndex() are removed, corresponding values
+ * from inputColorIsUsed(), inputColorToEffectiveProcessor(), removeVertexAttribs(), and
+ * readsDst() must be used when setting up the draw to ensure correct drawing.
+ */
+ int firstEffectiveProcessorIndex() const { return fFirstEffectiveProcessorIndex; }
+
+ /**
+ * True if the first effective processor reads its input, false otherwise.
+ */
+ bool inputColorIsUsed() const { return fInputColorIsUsed; }
+
+ /**
+ * If input color is used and per-vertex colors are not used, this is the input color to the
+ * first effective processor.
+ */
+ GrColor inputColorToFirstEffectiveProccesor() const { return fInputColor; }
+
+private:
+ void internalCalc(const GrFragmentProcessor* const[], int cnt);
+
+ GrInvariantOutput fInOut;
+ int fFirstEffectiveProcessorIndex;
+ bool fInputColorIsUsed;
+ GrColor fInputColor;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrProcessor.cpp b/gfx/skia/skia/src/gpu/GrProcessor.cpp
new file mode 100644
index 000000000..5ca993596
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProcessor.cpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrProcessor.h"
+#include "GrContext.h"
+#include "GrGeometryProcessor.h"
+#include "GrInvariantOutput.h"
+#include "GrMemoryPool.h"
+#include "GrXferProcessor.h"
+#include "SkSpinlock.h"
+
+#if SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
+
+class GrFragmentProcessor;
+class GrGeometryProcessor;
+
+/*
+ * Originally these were both in the processor unit test header, but then it seemed to cause linker
+ * problems on android.
+ */
+template<>
+SkTArray<GrProcessorTestFactory<GrFragmentProcessor>*, true>*
+GrProcessorTestFactory<GrFragmentProcessor>::GetFactories() {
+ static SkTArray<GrProcessorTestFactory<GrFragmentProcessor>*, true> gFactories;
+ return &gFactories;
+}
+
+template<>
+SkTArray<GrProcessorTestFactory<GrXPFactory>*, true>*
+GrProcessorTestFactory<GrXPFactory>::GetFactories() {
+ static SkTArray<GrProcessorTestFactory<GrXPFactory>*, true> gFactories;
+ return &gFactories;
+}
+
+template<>
+SkTArray<GrProcessorTestFactory<GrGeometryProcessor>*, true>*
+GrProcessorTestFactory<GrGeometryProcessor>::GetFactories() {
+ static SkTArray<GrProcessorTestFactory<GrGeometryProcessor>*, true> gFactories;
+ return &gFactories;
+}
+
+/*
+ * To ensure we always have successful static initialization, before creating from the factories
+ * we verify the count is as expected. If a new factory is added, then these numbers must be
+ * manually adjusted.
+ */
+static const int kFPFactoryCount = 41;
+static const int kGPFactoryCount = 14;
+static const int kXPFactoryCount = 6;
+
+template<>
+void GrProcessorTestFactory<GrFragmentProcessor>::VerifyFactoryCount() {
+ if (kFPFactoryCount != GetFactories()->count()) {
+ SkFAIL("Wrong number of fragment processor factories!");
+ }
+}
+
+template<>
+void GrProcessorTestFactory<GrGeometryProcessor>::VerifyFactoryCount() {
+ if (kGPFactoryCount != GetFactories()->count()) {
+ SkFAIL("Wrong number of geometry processor factories!");
+ }
+}
+
+template<>
+void GrProcessorTestFactory<GrXPFactory>::VerifyFactoryCount() {
+ if (kXPFactoryCount != GetFactories()->count()) {
+ SkFAIL("Wrong number of xp factory factories!");
+ }
+}
+
+#endif
+
+
+// We use a global pool protected by a mutex(spinlock). Chrome may use the same GrContext on
+// different threads. The GrContext is not used concurrently on different threads and there is a
+// memory barrier between accesses of a context on different threads. Also, there may be multiple
+// GrContexts and those contexts may be in use concurrently on different threads.
+namespace {
+static SkSpinlock gProcessorSpinlock;
+class MemoryPoolAccessor {
+public:
+
+// We know in the Android framework there is only one GrContext.
+#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+ MemoryPoolAccessor() {}
+ ~MemoryPoolAccessor() {}
+#else
+ MemoryPoolAccessor() { gProcessorSpinlock.acquire(); }
+ ~MemoryPoolAccessor() { gProcessorSpinlock.release(); }
+#endif
+
+ GrMemoryPool* pool() const {
+ static GrMemoryPool gPool(4096, 4096);
+ return &gPool;
+ }
+};
+}
+
+int32_t GrProcessor::gCurrProcessorClassID = GrProcessor::kIllegalProcessorClassID;
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrProcessor::~GrProcessor() {}
+
+void GrProcessor::addTextureAccess(const GrTextureAccess* access) {
+ fTextureAccesses.push_back(access);
+ this->addGpuResource(access->getProgramTexture());
+}
+
+void GrProcessor::addBufferAccess(const GrBufferAccess* access) {
+ fBufferAccesses.push_back(access);
+ this->addGpuResource(access->getProgramBuffer());
+}
+
+void* GrProcessor::operator new(size_t size) {
+ return MemoryPoolAccessor().pool()->allocate(size);
+}
+
+void GrProcessor::operator delete(void* target) {
+ return MemoryPoolAccessor().pool()->release(target);
+}
+
+bool GrProcessor::hasSameSamplers(const GrProcessor& that) const {
+ if (this->numTextures() != that.numTextures() || this->numBuffers() != that.numBuffers()) {
+ return false;
+ }
+ for (int i = 0; i < this->numTextures(); ++i) {
+ if (this->textureAccess(i) != that.textureAccess(i)) {
+ return false;
+ }
+ }
+ for (int i = 0; i < this->numBuffers(); ++i) {
+ if (this->bufferAccess(i) != that.bufferAccess(i)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Initial static variable from GrXPFactory
+int32_t GrXPFactory::gCurrXPFClassID =
+ GrXPFactory::kIllegalXPFClassID;
diff --git a/gfx/skia/skia/src/gpu/GrProcessorUnitTest.cpp b/gfx/skia/skia/src/gpu/GrProcessorUnitTest.cpp
new file mode 100644
index 000000000..3f43389fe
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProcessorUnitTest.cpp
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrProcessorUnitTest.h"
+#include "GrFragmentProcessor.h"
+
+sk_sp<GrFragmentProcessor> GrProcessorUnitTest::MakeChildFP(GrProcessorTestData* data) {
+#if SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
+ sk_sp<GrFragmentProcessor> fp;
+ do {
+ fp = GrProcessorTestFactory<GrFragmentProcessor>::Make(data);
+ SkASSERT(fp);
+ } while (fp->numChildProcessors() != 0);
+ return fp;
+#else
+ SkFAIL("Should not be called if !SK_ALLOW_STATIC_GLOBAL_INITIALIZERS");
+ return nullptr;
+#endif
+}
diff --git a/gfx/skia/skia/src/gpu/GrProgramDesc.cpp b/gfx/skia/skia/src/gpu/GrProgramDesc.cpp
new file mode 100644
index 000000000..cfcdbb59c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProgramDesc.cpp
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "GrProgramDesc.h"
+
+#include "GrProcessor.h"
+#include "GrPipeline.h"
+#include "GrRenderTargetPriv.h"
+#include "SkChecksum.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLCaps.h"
+
+static uint16_t sampler_key(GrSLType samplerType, GrPixelConfig config, GrShaderFlags visibility,
+ const GrGLSLCaps& caps) {
+ enum {
+ kFirstSamplerType = kTexture2DSampler_GrSLType,
+ kLastSamplerType = kTextureBufferSampler_GrSLType,
+ kSamplerTypeKeyBits = 4
+ };
+ GR_STATIC_ASSERT(kLastSamplerType - kFirstSamplerType < (1 << kSamplerTypeKeyBits));
+
+ SkASSERT((int)samplerType >= kFirstSamplerType && (int)samplerType <= kLastSamplerType);
+ int samplerTypeKey = samplerType - kFirstSamplerType;
+
+ return SkToU16(caps.configTextureSwizzle(config).asKey() |
+ (samplerTypeKey << 8) |
+ (caps.samplerPrecision(config, visibility) << (8 + kSamplerTypeKeyBits)));
+}
+
+static void add_sampler_keys(GrProcessorKeyBuilder* b, const GrProcessor& proc,
+ const GrGLSLCaps& caps) {
+ int numTextures = proc.numTextures();
+ int numSamplers = numTextures + proc.numBuffers();
+ // Need two bytes per key (swizzle, sampler type, and precision).
+ int word32Count = (numSamplers + 1) / 2;
+ if (0 == word32Count) {
+ return;
+ }
+ uint16_t* k16 = SkTCast<uint16_t*>(b->add32n(word32Count));
+ int i = 0;
+ for (; i < numTextures; ++i) {
+ const GrTextureAccess& access = proc.textureAccess(i);
+ const GrTexture* tex = access.getTexture();
+ k16[i] = sampler_key(tex->samplerType(), tex->config(), access.getVisibility(), caps);
+ }
+ for (; i < numSamplers; ++i) {
+ const GrBufferAccess& access = proc.bufferAccess(i - numTextures);
+ k16[i] = sampler_key(kTextureBufferSampler_GrSLType, access.texelConfig(),
+ access.visibility(), caps);
+ }
+ // zero the last 16 bits if the number of samplers is odd.
+ if (numSamplers & 0x1) {
+ k16[numSamplers] = 0;
+ }
+}
+
+/**
+ * A function which emits a meta key into the key builder. This is required because shader code may
+ * be dependent on properties of the effect that the effect itself doesn't use
+ * in its key (e.g. the pixel format of textures used). So we create a meta-key for
+ * every effect using this function. It is also responsible for inserting the effect's class ID
+ * which must be different for every GrProcessor subclass. It can fail if an effect uses too many
+ * transforms, etc, for the space allotted in the meta-key. NOTE, both FPs and GPs share this
+ * function because it is hairy, though FPs do not have attribs, and GPs do not have transforms
+ */
+static bool gen_meta_key(const GrProcessor& proc,
+ const GrGLSLCaps& glslCaps,
+ uint32_t transformKey,
+ GrProcessorKeyBuilder* b) {
+ size_t processorKeySize = b->size();
+ uint32_t classID = proc.classID();
+
+ // Currently we allow 16 bits for the class id and the overall processor key size.
+ static const uint32_t kMetaKeyInvalidMask = ~((uint32_t)SK_MaxU16);
+ if ((processorKeySize | classID) & kMetaKeyInvalidMask) {
+ return false;
+ }
+
+ add_sampler_keys(b, proc, glslCaps);
+
+ uint32_t* key = b->add32n(2);
+ key[0] = (classID << 16) | SkToU32(processorKeySize);
+ key[1] = transformKey;
+ return true;
+}
+
+static bool gen_frag_proc_and_meta_keys(const GrPrimitiveProcessor& primProc,
+ const GrFragmentProcessor& fp,
+ const GrGLSLCaps& glslCaps,
+ GrProcessorKeyBuilder* b) {
+ for (int i = 0; i < fp.numChildProcessors(); ++i) {
+ if (!gen_frag_proc_and_meta_keys(primProc, fp.childProcessor(i), glslCaps, b)) {
+ return false;
+ }
+ }
+
+ fp.getGLSLProcessorKey(glslCaps, b);
+
+ return gen_meta_key(fp, glslCaps, primProc.getTransformKey(fp.coordTransforms(),
+ fp.numCoordTransforms()), b);
+}
+
+bool GrProgramDesc::Build(GrProgramDesc* desc,
+ const GrPrimitiveProcessor& primProc,
+ bool hasPointSize,
+ const GrPipeline& pipeline,
+ const GrGLSLCaps& glslCaps) {
+ // The descriptor is used as a cache key. Thus when a field of the
+ // descriptor will not affect program generation (because of the attribute
+ // bindings in use or other descriptor field settings) it should be set
+ // to a canonical value to avoid duplicate programs with different keys.
+
+ GR_STATIC_ASSERT(0 == kProcessorKeysOffset % sizeof(uint32_t));
+ // Make room for everything up to the effect keys.
+ desc->key().reset();
+ desc->key().push_back_n(kProcessorKeysOffset);
+
+ GrProcessorKeyBuilder b(&desc->key());
+
+ primProc.getGLSLProcessorKey(glslCaps, &b);
+ if (!gen_meta_key(primProc, glslCaps, 0, &b)) {
+ desc->key().reset();
+ return false;
+ }
+ GrProcessor::RequiredFeatures requiredFeatures = primProc.requiredFeatures();
+
+ for (int i = 0; i < pipeline.numFragmentProcessors(); ++i) {
+ const GrFragmentProcessor& fp = pipeline.getFragmentProcessor(i);
+ if (!gen_frag_proc_and_meta_keys(primProc, fp, glslCaps, &b)) {
+ desc->key().reset();
+ return false;
+ }
+ requiredFeatures |= fp.requiredFeatures();
+ }
+
+ const GrXferProcessor& xp = pipeline.getXferProcessor();
+ xp.getGLSLProcessorKey(glslCaps, &b);
+ if (!gen_meta_key(xp, glslCaps, 0, &b)) {
+ desc->key().reset();
+ return false;
+ }
+ requiredFeatures |= xp.requiredFeatures();
+
+ // --------DO NOT MOVE HEADER ABOVE THIS LINE--------------------------------------------------
+ // Because header is a pointer into the dynamic array, we can't push any new data into the key
+ // below here.
+ KeyHeader* header = desc->atOffset<KeyHeader, kHeaderOffset>();
+
+ // make sure any padding in the header is zeroed.
+ memset(header, 0, kHeaderSize);
+
+ GrRenderTarget* rt = pipeline.getRenderTarget();
+
+ if (requiredFeatures & (GrProcessor::kFragmentPosition_RequiredFeature |
+ GrProcessor::kSampleLocations_RequiredFeature)) {
+ header->fSurfaceOriginKey = GrGLSLFragmentShaderBuilder::KeyForSurfaceOrigin(rt->origin());
+ } else {
+ header->fSurfaceOriginKey = 0;
+ }
+
+ if (requiredFeatures & GrProcessor::kSampleLocations_RequiredFeature) {
+ SkASSERT(pipeline.isHWAntialiasState());
+ header->fSamplePatternKey =
+ rt->renderTargetPriv().getMultisampleSpecs(pipeline.getStencil()).fUniqueID;
+ } else {
+ header->fSamplePatternKey = 0;
+ }
+
+ header->fOutputSwizzle = glslCaps.configOutputSwizzle(rt->config()).asKey();
+
+ header->fIgnoresCoverage = pipeline.ignoresCoverage() ? 1 : 0;
+
+ header->fSnapVerticesToPixelCenters = pipeline.snapVerticesToPixelCenters();
+ header->fColorFragmentProcessorCnt = pipeline.numColorFragmentProcessors();
+ header->fCoverageFragmentProcessorCnt = pipeline.numCoverageFragmentProcessors();
+ // Fail if the client requested more processors than the key can fit.
+ if (header->fColorFragmentProcessorCnt != pipeline.numColorFragmentProcessors() ||
+ header->fCoverageFragmentProcessorCnt != pipeline.numCoverageFragmentProcessors()) {
+ return false;
+ }
+ header->fHasPointSize = hasPointSize ? 1 : 0;
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/GrProgramDesc.h b/gfx/skia/skia/src/gpu/GrProgramDesc.h
new file mode 100644
index 000000000..f304ec53e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProgramDesc.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrProgramDesc_DEFINED
+#define GrProgramDesc_DEFINED
+
+#include "GrColor.h"
+#include "GrTypesPriv.h"
+#include "SkOpts.h"
+#include "SkTArray.h"
+
+class GrGLSLCaps;
+class GrPipeline;
+class GrPrimitiveProcessor;
+
+/** This class describes a program to generate. It also serves as a program cache key */
+class GrProgramDesc {
+public:
+ // Creates an uninitialized key that must be populated by GrGpu::buildProgramDesc()
+ GrProgramDesc() {}
+
+ /**
+ * Builds a program descriptor. Before the descriptor can be used, the client must call finalize
+ * on the returned GrProgramDesc.
+ *
+ * @param GrPrimitiveProcessor The geometry
+ * @param hasPointSize Controls whether the shader will output a point size.
+ * @param GrPipeline The optimized drawstate. The descriptor will represent a program
+ * which this optstate can use to draw with. The optstate contains
+ * general draw information, as well as the specific color, geometry,
+ * and coverage stages which will be used to generate the GL Program for
+ * this optstate.
+ * @param GrGLSLCaps Capabilities of the GLSL backend.
+ * @param GrProgramDesc The built and finalized descriptor
+ **/
+ static bool Build(GrProgramDesc*,
+ const GrPrimitiveProcessor&,
+ bool hasPointSize,
+ const GrPipeline&,
+ const GrGLSLCaps&);
+
+ // Returns this as a uint32_t array to be used as a key in the program cache.
+ const uint32_t* asKey() const {
+ return reinterpret_cast<const uint32_t*>(fKey.begin());
+ }
+
+ // Gets the number of bytes in asKey(). It will be a 4-byte aligned value. When comparing two
+ // keys the size of either key can be used with memcmp() since the lengths themselves begin the
+ // keys and thus the memcmp will exit early if the keys are of different lengths.
+ uint32_t keyLength() const { return *this->atOffset<uint32_t, kLengthOffset>(); }
+
+ // Gets the a checksum of the key. Can be used as a hash value for a fast lookup in a cache.
+ uint32_t getChecksum() const { return *this->atOffset<uint32_t, kChecksumOffset>(); }
+
+ GrProgramDesc& operator= (const GrProgramDesc& other) {
+ uint32_t keyLength = other.keyLength();
+ fKey.reset(SkToInt(keyLength));
+ memcpy(fKey.begin(), other.fKey.begin(), keyLength);
+ return *this;
+ }
+
+ bool operator== (const GrProgramDesc& that) const {
+ SkASSERT(SkIsAlign4(this->keyLength()));
+ int l = this->keyLength() >> 2;
+ const uint32_t* aKey = this->asKey();
+ const uint32_t* bKey = that.asKey();
+ for (int i = 0; i < l; ++i) {
+ if (aKey[i] != bKey[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool operator!= (const GrProgramDesc& other) const {
+ return !(*this == other);
+ }
+
+ static bool Less(const GrProgramDesc& a, const GrProgramDesc& b) {
+ SkASSERT(SkIsAlign4(a.keyLength()));
+ int l = a.keyLength() >> 2;
+ const uint32_t* aKey = a.asKey();
+ const uint32_t* bKey = b.asKey();
+ for (int i = 0; i < l; ++i) {
+ if (aKey[i] != bKey[i]) {
+ return aKey[i] < bKey[i] ? true : false;
+ }
+ }
+ return false;
+ }
+
+ struct KeyHeader {
+ // Set to uniquely identify the sample pattern, or 0 if the shader doesn't use sample
+ // locations.
+ uint8_t fSamplePatternKey;
+ // Set to uniquely idenitify any swizzling of the shader's output color(s).
+ uint8_t fOutputSwizzle;
+ uint8_t fColorFragmentProcessorCnt : 4;
+ uint8_t fCoverageFragmentProcessorCnt : 4;
+ // Set to uniquely identify the rt's origin, or 0 if the shader does not require this info.
+ uint8_t fSurfaceOriginKey : 2;
+ uint8_t fIgnoresCoverage : 1;
+ uint8_t fSnapVerticesToPixelCenters : 1;
+ uint8_t fHasPointSize : 1;
+ uint8_t fPad : 3;
+ };
+ GR_STATIC_ASSERT(sizeof(KeyHeader) == 4);
+
+ // This should really only be used internally, base classes should return their own headers
+ const KeyHeader& header() const { return *this->atOffset<KeyHeader, kHeaderOffset>(); }
+
+ void finalize() {
+ int keyLength = fKey.count();
+ SkASSERT(0 == (keyLength % 4));
+ *(this->atOffset<uint32_t, GrProgramDesc::kLengthOffset>()) = SkToU32(keyLength);
+
+ uint32_t* checksum = this->atOffset<uint32_t, GrProgramDesc::kChecksumOffset>();
+ *checksum = 0; // We'll hash through these bytes, so make sure they're initialized.
+ *checksum = SkOpts::hash(fKey.begin(), keyLength);
+ }
+
+protected:
+ template<typename T, size_t OFFSET> T* atOffset() {
+ return reinterpret_cast<T*>(reinterpret_cast<intptr_t>(fKey.begin()) + OFFSET);
+ }
+
+ template<typename T, size_t OFFSET> const T* atOffset() const {
+ return reinterpret_cast<const T*>(reinterpret_cast<intptr_t>(fKey.begin()) + OFFSET);
+ }
+
+ // The key, stored in fKey, is composed of four parts:
+ // 1. uint32_t for total key length.
+ // 2. uint32_t for a checksum.
+ // 3. Header struct defined above.
+ // 4. A Backend specific payload which includes the per-processor keys.
+ enum KeyOffsets {
+ // Part 1.
+ kLengthOffset = 0,
+ // Part 2.
+ kChecksumOffset = kLengthOffset + sizeof(uint32_t),
+ // Part 3.
+ kHeaderOffset = kChecksumOffset + sizeof(uint32_t),
+ kHeaderSize = SkAlign4(sizeof(KeyHeader)),
+ // Part 4.
+ // This is the offset into the backenend specific part of the key, which includes
+ // per-processor keys.
+ kProcessorKeysOffset = kHeaderOffset + kHeaderSize,
+ };
+
+ enum {
+ kMaxPreallocProcessors = 8,
+ kIntsPerProcessor = 4, // This is an overestimate of the average effect key size.
+ kPreAllocSize = kHeaderOffset + kHeaderSize +
+ kMaxPreallocProcessors * sizeof(uint32_t) * kIntsPerProcessor,
+ };
+
+ SkSTArray<kPreAllocSize, uint8_t, true>& key() { return fKey; }
+ const SkSTArray<kPreAllocSize, uint8_t, true>& key() const { return fKey; }
+
+private:
+ SkSTArray<kPreAllocSize, uint8_t, true> fKey;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrProgramElement.cpp b/gfx/skia/skia/src/gpu/GrProgramElement.cpp
new file mode 100644
index 000000000..f1f3f4134
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrProgramElement.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrProgramElement.h"
+#include "GrGpuResourceRef.h"
+#include "SkAtomics.h"
+
+uint32_t GrProgramElement::CreateUniqueID() {
+ static int32_t gUniqueID = SK_InvalidUniqueID;
+ uint32_t id;
+ do {
+ id = static_cast<uint32_t>(sk_atomic_inc(&gUniqueID) + 1);
+ } while (id == SK_InvalidUniqueID);
+ return id;
+}
+
+void GrProgramElement::addPendingIOs() const {
+ for (int i = 0; i < fGpuResources.count(); ++i) {
+ fGpuResources[i]->markPendingIO();
+ }
+}
+
+void GrProgramElement::removeRefs() const {
+ for (int i = 0; i < fGpuResources.count(); ++i) {
+ fGpuResources[i]->removeRef();
+ }
+}
+
+void GrProgramElement::pendingIOComplete() const {
+ for (int i = 0; i < fGpuResources.count(); ++i) {
+ fGpuResources[i]->pendingIOComplete();
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/GrQuad.h b/gfx/skia/skia/src/gpu/GrQuad.h
new file mode 100644
index 000000000..3a202c61c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrQuad.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrQuad_DEFINED
+#define GrQuad_DEFINED
+
+#include "SkPoint.h"
+#include "SkMatrix.h"
+#include "SkMatrixPriv.h"
+
+/**
+ * GrQuad is a collection of 4 points which can be used to represent an arbitrary quadrilateral
+ */
+class GrQuad {
+public:
+ GrQuad() {}
+
+ GrQuad(const GrQuad& that) {
+ *this = that;
+ }
+
+ explicit GrQuad(const SkRect& rect) {
+ this->set(rect);
+ }
+
+ void set(const SkRect& rect) {
+ fPoints->setRectFan(rect.fLeft, rect.fTop, rect.fRight, rect.fBottom);
+ }
+
+ void map(const SkMatrix& matrix) {
+ matrix.mapPoints(fPoints, kNumPoints);
+ }
+
+ void setFromMappedRect(const SkRect& rect, const SkMatrix& matrix) {
+ SkMatrixPriv::SetMappedRectFan(matrix, rect, fPoints);
+ }
+
+ const GrQuad& operator=(const GrQuad& that) {
+ memcpy(fPoints, that.fPoints, sizeof(SkPoint) * kNumPoints);
+ return *this;
+ }
+
+ SkPoint* points() {
+ return fPoints;
+ }
+
+ const SkPoint* points() const {
+ return fPoints;
+ }
+
+ const SkPoint& point(int i) const {
+ SkASSERT(i < kNumPoints);
+ return fPoints[i];
+ }
+
+private:
+ static const int kNumPoints = 4;
+ SkPoint fPoints[kNumPoints];
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrRect.h b/gfx/skia/skia/src/gpu/GrRect.h
new file mode 100644
index 000000000..14130f831
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRect.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRect_DEFINED
+#define GrRect_DEFINED
+
+#include "SkTypes.h"
+#include "SkRect.h"
+
+struct GrIRect16 {
+ int16_t fLeft, fTop, fRight, fBottom;
+
+ static GrIRect16 SK_WARN_UNUSED_RESULT MakeEmpty() {
+ GrIRect16 r;
+ r.setEmpty();
+ return r;
+ }
+
+ static GrIRect16 SK_WARN_UNUSED_RESULT MakeWH(int16_t w, int16_t h) {
+ GrIRect16 r;
+ r.set(0, 0, w, h);
+ return r;
+ }
+
+ static GrIRect16 SK_WARN_UNUSED_RESULT MakeXYWH(int16_t x, int16_t y, int16_t w, int16_t h) {
+ GrIRect16 r;
+ r.set(x, y, x + w, y + h);
+ return r;
+ }
+
+ int width() const { return fRight - fLeft; }
+ int height() const { return fBottom - fTop; }
+ int area() const { return this->width() * this->height(); }
+ bool isEmpty() const { return fLeft >= fRight || fTop >= fBottom; }
+
+ void setEmpty() { memset(this, 0, sizeof(*this)); }
+
+ void set(int16_t left, int16_t top, int16_t right, int16_t bottom) {
+ fLeft = left;
+ fTop = top;
+ fRight = right;
+ fBottom = bottom;
+ }
+
+ void set(const SkIRect& r) {
+ fLeft = SkToS16(r.fLeft);
+ fTop = SkToS16(r.fTop);
+ fRight = SkToS16(r.fRight);
+ fBottom = SkToS16(r.fBottom);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrRectanizer.h b/gfx/skia/skia/src/gpu/GrRectanizer.h
new file mode 100644
index 000000000..dc697c41d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRectanizer.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRectanizer_DEFINED
+#define GrRectanizer_DEFINED
+
+#include "GrTypes.h"
+
+struct SkIPoint16;
+
+class GrRectanizer {
+public:
+ GrRectanizer(int width, int height) : fWidth(width), fHeight(height) {
+ SkASSERT(width >= 0);
+ SkASSERT(height >= 0);
+ }
+
+ virtual ~GrRectanizer() {}
+
+ virtual void reset() = 0;
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+
+ // Attempt to add a rect. Return true on success; false on failure. If
+ // successful the position in the atlas is returned in 'loc'.
+ virtual bool addRect(int width, int height, SkIPoint16* loc) = 0;
+ virtual float percentFull() const = 0;
+
+ /**
+ * Our factory, which returns the subclass du jour
+ */
+ static GrRectanizer* Factory(int width, int height);
+
+private:
+ int fWidth;
+ int fHeight;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrRectanizer_pow2.cpp b/gfx/skia/skia/src/gpu/GrRectanizer_pow2.cpp
new file mode 100644
index 000000000..bb41d7149
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRectanizer_pow2.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrRectanizer_pow2.h"
+
+bool GrRectanizerPow2::addRect(int width, int height, SkIPoint16* loc) {
+ if ((unsigned)width > (unsigned)this->width() ||
+ (unsigned)height > (unsigned)this->height()) {
+ return false;
+ }
+
+ int32_t area = width * height; // computed here since height will be modified
+
+ height = GrNextPow2(height);
+ if (height < kMIN_HEIGHT_POW2) {
+ height = kMIN_HEIGHT_POW2;
+ }
+
+ Row* row = &fRows[HeightToRowIndex(height)];
+ SkASSERT(row->fRowHeight == 0 || row->fRowHeight == height);
+
+ if (0 == row->fRowHeight) {
+ if (!this->canAddStrip(height)) {
+ return false;
+ }
+ this->initRow(row, height);
+ } else {
+ if (!row->canAddWidth(width, this->width())) {
+ if (!this->canAddStrip(height)) {
+ return false;
+ }
+ // that row is now "full", so retarget our Row record for
+ // another one
+ this->initRow(row, height);
+ }
+ }
+
+ SkASSERT(row->fRowHeight == height);
+ SkASSERT(row->canAddWidth(width, this->width()));
+ *loc = row->fLoc;
+ row->fLoc.fX += width;
+
+ SkASSERT(row->fLoc.fX <= this->width());
+ SkASSERT(row->fLoc.fY <= this->height());
+ SkASSERT(fNextStripY <= this->height());
+ fAreaSoFar += area;
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// factory is now in GrRectanizer_skyline.cpp
+//GrRectanizer* GrRectanizer::Factory(int width, int height) {
+// return new GrRectanizerPow2 (width, height);
+//}
diff --git a/gfx/skia/skia/src/gpu/GrRectanizer_pow2.h b/gfx/skia/skia/src/gpu/GrRectanizer_pow2.h
new file mode 100644
index 000000000..296e0520b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRectanizer_pow2.h
@@ -0,0 +1,80 @@
+/*
+* Copyright 2014 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrRectanizer_pow2_DEFINED
+#define GrRectanizer_pow2_DEFINED
+
+#include "GrRectanizer.h"
+#include "SkMathPriv.h"
+#include "SkPoint.h"
+
+// This Rectanizer quantizes the incoming rects to powers of 2. Each power
+// of two can have, at most, one active row/shelf. Once a row/shelf for
+// a particular power of two gets full its fRows entry is recycled to point
+// to a new row.
+// The skyline algorithm almost always provides a better packing.
+class GrRectanizerPow2 : public GrRectanizer {
+public:
+ GrRectanizerPow2(int w, int h) : INHERITED(w, h) {
+ this->reset();
+ }
+
+ virtual ~GrRectanizerPow2() { }
+
+ void reset() override {
+ fNextStripY = 0;
+ fAreaSoFar = 0;
+ sk_bzero(fRows, sizeof(fRows));
+ }
+
+ bool addRect(int w, int h, SkIPoint16* loc) override;
+
+ float percentFull() const override {
+ return fAreaSoFar / ((float)this->width() * this->height());
+ }
+
+private:
+ static const int kMIN_HEIGHT_POW2 = 2;
+ static const int kMaxExponent = 16;
+
+ struct Row {
+ SkIPoint16 fLoc;
+ // fRowHeight is actually known by this struct's position in fRows
+ // but it is used to signal if there exists an open row of this height
+ int fRowHeight;
+
+ bool canAddWidth(int width, int containerWidth) const {
+ return fLoc.fX + width <= containerWidth;
+ }
+ };
+
+ Row fRows[kMaxExponent]; // 0-th entry will be unused
+
+ int fNextStripY;
+ int32_t fAreaSoFar;
+
+ static int HeightToRowIndex(int height) {
+ SkASSERT(height >= kMIN_HEIGHT_POW2);
+ int index = 32 - SkCLZ(height - 1);
+ SkASSERT(index < kMaxExponent);
+ return index;
+ }
+
+ bool canAddStrip(int height) const {
+ return fNextStripY + height <= this->height();
+ }
+
+ void initRow(Row* row, int rowHeight) {
+ row->fLoc.set(0, fNextStripY);
+ row->fRowHeight = rowHeight;
+ fNextStripY += rowHeight;
+ }
+
+ typedef GrRectanizer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrRectanizer_skyline.cpp b/gfx/skia/skia/src/gpu/GrRectanizer_skyline.cpp
new file mode 100644
index 000000000..bbb9859d8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRectanizer_skyline.cpp
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrRectanizer_skyline.h"
+#include "SkPoint.h"
+
+bool GrRectanizerSkyline::addRect(int width, int height, SkIPoint16* loc) {
+ if ((unsigned)width > (unsigned)this->width() ||
+ (unsigned)height > (unsigned)this->height()) {
+ return false;
+ }
+
+ // find position for new rectangle
+ int bestWidth = this->width() + 1;
+ int bestX;
+ int bestY = this->height() + 1;
+ int bestIndex = -1;
+ for (int i = 0; i < fSkyline.count(); ++i) {
+ int y;
+ if (this->rectangleFits(i, width, height, &y)) {
+ // minimize y position first, then width of skyline
+ if (y < bestY || (y == bestY && fSkyline[i].fWidth < bestWidth)) {
+ bestIndex = i;
+ bestWidth = fSkyline[i].fWidth;
+ bestX = fSkyline[i].fX;
+ bestY = y;
+ }
+ }
+ }
+
+ // add rectangle to skyline
+ if (-1 != bestIndex) {
+ this->addSkylineLevel(bestIndex, bestX, bestY, width, height);
+ loc->fX = bestX;
+ loc->fY = bestY;
+
+ fAreaSoFar += width*height;
+ return true;
+ }
+
+ loc->fX = 0;
+ loc->fY = 0;
+ return false;
+}
+
+bool GrRectanizerSkyline::rectangleFits(int skylineIndex, int width, int height, int* ypos) const {
+ int x = fSkyline[skylineIndex].fX;
+ if (x + width > this->width()) {
+ return false;
+ }
+
+ int widthLeft = width;
+ int i = skylineIndex;
+ int y = fSkyline[skylineIndex].fY;
+ while (widthLeft > 0) {
+ y = SkMax32(y, fSkyline[i].fY);
+ if (y + height > this->height()) {
+ return false;
+ }
+ widthLeft -= fSkyline[i].fWidth;
+ ++i;
+ SkASSERT(i < fSkyline.count() || widthLeft <= 0);
+ }
+
+ *ypos = y;
+ return true;
+}
+
+void GrRectanizerSkyline::addSkylineLevel(int skylineIndex, int x, int y, int width, int height) {
+ SkylineSegment newSegment;
+ newSegment.fX = x;
+ newSegment.fY = y + height;
+ newSegment.fWidth = width;
+ fSkyline.insert(skylineIndex, 1, &newSegment);
+
+ SkASSERT(newSegment.fX + newSegment.fWidth <= this->width());
+ SkASSERT(newSegment.fY <= this->height());
+
+ // delete width of the new skyline segment from following ones
+ for (int i = skylineIndex+1; i < fSkyline.count(); ++i) {
+ // The new segment subsumes all or part of fSkyline[i]
+ SkASSERT(fSkyline[i-1].fX <= fSkyline[i].fX);
+
+ if (fSkyline[i].fX < fSkyline[i-1].fX + fSkyline[i-1].fWidth) {
+ int shrink = fSkyline[i-1].fX + fSkyline[i-1].fWidth - fSkyline[i].fX;
+
+ fSkyline[i].fX += shrink;
+ fSkyline[i].fWidth -= shrink;
+
+ if (fSkyline[i].fWidth <= 0) {
+ // fully consumed
+ fSkyline.remove(i);
+ --i;
+ } else {
+ // only partially consumed
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+
+ // merge fSkylines
+ for (int i = 0; i < fSkyline.count()-1; ++i) {
+ if (fSkyline[i].fY == fSkyline[i+1].fY) {
+ fSkyline[i].fWidth += fSkyline[i+1].fWidth;
+ fSkyline.remove(i+1);
+ --i;
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrRectanizer* GrRectanizer::Factory(int width, int height) {
+ return new GrRectanizerSkyline(width, height);
+}
diff --git a/gfx/skia/skia/src/gpu/GrRectanizer_skyline.h b/gfx/skia/skia/src/gpu/GrRectanizer_skyline.h
new file mode 100644
index 000000000..a1c571ef4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRectanizer_skyline.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRectanizer_skyline_DEFINED
+#define GrRectanizer_skyline_DEFINED
+
+#include "GrRectanizer.h"
+#include "SkTDArray.h"
+
+// Pack rectangles and track the current silhouette
+// Based, in part, on Jukka Jylanki's work at http://clb.demon.fi
+class GrRectanizerSkyline : public GrRectanizer {
+public:
+ GrRectanizerSkyline(int w, int h) : INHERITED(w, h) {
+ this->reset();
+ }
+
+ ~GrRectanizerSkyline() override { }
+
+ void reset() override {
+ fAreaSoFar = 0;
+ fSkyline.reset();
+ SkylineSegment* seg = fSkyline.append(1);
+ seg->fX = 0;
+ seg->fY = 0;
+ seg->fWidth = this->width();
+ }
+
+ bool addRect(int w, int h, SkIPoint16* loc) override;
+
+ float percentFull() const override {
+ return fAreaSoFar / ((float)this->width() * this->height());
+ }
+
+private:
+ struct SkylineSegment {
+ int fX;
+ int fY;
+ int fWidth;
+ };
+
+ SkTDArray<SkylineSegment> fSkyline;
+
+ int32_t fAreaSoFar;
+
+ // Can a width x height rectangle fit in the free space represented by
+ // the skyline segments >= 'skylineIndex'? If so, return true and fill in
+ // 'y' with the y-location at which it fits (the x location is pulled from
+ // 'skylineIndex's segment.
+ bool rectangleFits(int skylineIndex, int width, int height, int* y) const;
+ // Update the skyline structure to include a width x height rect located
+ // at x,y.
+ void addSkylineLevel(int skylineIndex, int x, int y, int width, int height);
+
+ typedef GrRectanizer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrReducedClip.cpp b/gfx/skia/skia/src/gpu/GrReducedClip.cpp
new file mode 100644
index 000000000..4a912d3b5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrReducedClip.cpp
@@ -0,0 +1,849 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrReducedClip.h"
+
+#include "GrAppliedClip.h"
+#include "GrClip.h"
+#include "GrColor.h"
+#include "GrContextPriv.h"
+#include "GrDrawContext.h"
+#include "GrDrawContextPriv.h"
+#include "GrDrawingManager.h"
+#include "GrFixedClip.h"
+#include "GrPathRenderer.h"
+#include "GrStyle.h"
+#include "GrUserStencilSettings.h"
+
+typedef SkClipStack::Element Element;
+
+/**
+ * There are plenty of optimizations that could be added here. Maybe flips could be folded into
+ * earlier operations. Or would inserting flips and reversing earlier ops ever be a win? Perhaps
+ * for the case where the bounds are kInsideOut_BoundsType. We could restrict earlier operations
+ * based on later intersect operations, and perhaps remove intersect-rects. We could optionally
+ * take a rect in case the caller knows a bound on what is to be drawn through this clip.
+ */
+GrReducedClip::GrReducedClip(const SkClipStack& stack, const SkRect& queryBounds,
+ int maxWindowRectangles) {
+ SkASSERT(!queryBounds.isEmpty());
+ fHasIBounds = false;
+
+ if (stack.isWideOpen()) {
+ fInitialState = InitialState::kAllIn;
+ return;
+ }
+
+ SkClipStack::BoundsType stackBoundsType;
+ SkRect stackBounds;
+ bool iior;
+ stack.getBounds(&stackBounds, &stackBoundsType, &iior);
+
+ if (stackBounds.isEmpty() || GrClip::IsOutsideClip(stackBounds, queryBounds)) {
+ bool insideOut = SkClipStack::kInsideOut_BoundsType == stackBoundsType;
+ fInitialState = insideOut ? InitialState::kAllIn : InitialState::kAllOut;
+ return;
+ }
+
+ if (iior) {
+ // "Is intersection of rects" means the clip is a single rect indicated by the stack bounds.
+ // This should only be true if aa/non-aa status matches among all elements.
+ SkASSERT(SkClipStack::kNormal_BoundsType == stackBoundsType);
+ SkClipStack::Iter iter(stack, SkClipStack::Iter::kTop_IterStart);
+ if (!iter.prev()->isAA() || GrClip::IsPixelAligned(stackBounds)) {
+ // The clip is a non-aa rect. This is the one spot where we can actually implement the
+ // clip (using fIBounds) rather than just telling the caller what it should be.
+ stackBounds.round(&fIBounds);
+ fHasIBounds = true;
+ fInitialState = fIBounds.isEmpty() ? InitialState::kAllOut : InitialState::kAllIn;
+ return;
+ }
+ if (GrClip::IsInsideClip(stackBounds, queryBounds)) {
+ fInitialState = InitialState::kAllIn;
+ return;
+ }
+
+ SkRect tightBounds;
+ SkAssertResult(tightBounds.intersect(stackBounds, queryBounds));
+ fIBounds = GrClip::GetPixelIBounds(tightBounds);
+ SkASSERT(!fIBounds.isEmpty()); // Empty should have been blocked by IsOutsideClip above.
+ fHasIBounds = true;
+
+ // Implement the clip with an AA rect element.
+ fElements.addToHead(stackBounds, SkCanvas::kReplace_Op, true/*doAA*/);
+ fElementsGenID = stack.getTopmostGenID();
+ fRequiresAA = true;
+
+ fInitialState = InitialState::kAllOut;
+ return;
+ }
+
+ SkRect tighterQuery = queryBounds;
+ if (SkClipStack::kNormal_BoundsType == stackBoundsType) {
+ // Tighten the query by introducing a new clip at the stack's pixel boundaries. (This new
+ // clip will be enforced by the scissor through fIBounds.)
+ SkAssertResult(tighterQuery.intersect(GrClip::GetPixelBounds(stackBounds)));
+ }
+
+ fIBounds = GrClip::GetPixelIBounds(tighterQuery);
+ SkASSERT(!fIBounds.isEmpty()); // Empty should have been blocked by IsOutsideClip above.
+ fHasIBounds = true;
+
+ // Now that we have determined the bounds to use and filtered out the trivial cases, call the
+ // helper that actually walks the stack.
+ this->walkStack(stack, tighterQuery, maxWindowRectangles);
+
+ if (fWindowRects.count() < maxWindowRectangles) {
+ this->addInteriorWindowRectangles(maxWindowRectangles);
+ }
+}
+
+void GrReducedClip::walkStack(const SkClipStack& stack, const SkRect& queryBounds,
+ int maxWindowRectangles) {
+ // walk backwards until we get to:
+ // a) the beginning
+ // b) an operation that is known to make the bounds all inside/outside
+ // c) a replace operation
+
+ enum class InitialTriState {
+ kUnknown = -1,
+ kAllIn = (int)GrReducedClip::InitialState::kAllIn,
+ kAllOut = (int)GrReducedClip::InitialState::kAllOut
+ } initialTriState = InitialTriState::kUnknown;
+
+ // During our backwards walk, track whether we've seen ops that either grow or shrink the clip.
+ // TODO: track these per saved clip so that we can consider them on the forward pass.
+ bool embiggens = false;
+ bool emsmallens = false;
+
+ // We use a slightly relaxed set of query bounds for element containment tests. This is to
+ // account for floating point rounding error that may have occurred during coord transforms.
+ SkRect relaxedQueryBounds = queryBounds.makeInset(GrClip::kBoundsTolerance,
+ GrClip::kBoundsTolerance);
+
+ SkClipStack::Iter iter(stack, SkClipStack::Iter::kTop_IterStart);
+ int numAAElements = 0;
+ while (InitialTriState::kUnknown == initialTriState) {
+ const Element* element = iter.prev();
+ if (nullptr == element) {
+ initialTriState = InitialTriState::kAllIn;
+ break;
+ }
+ if (SkClipStack::kEmptyGenID == element->getGenID()) {
+ initialTriState = InitialTriState::kAllOut;
+ break;
+ }
+ if (SkClipStack::kWideOpenGenID == element->getGenID()) {
+ initialTriState = InitialTriState::kAllIn;
+ break;
+ }
+
+ bool skippable = false;
+ bool isFlip = false; // does this op just flip the in/out state of every point in the bounds
+
+ switch (element->getOp()) {
+ case SkCanvas::kDifference_Op:
+ // check if the shape subtracted either contains the entire bounds (and makes
+ // the clip empty) or is outside the bounds and therefore can be skipped.
+ if (element->isInverseFilled()) {
+ if (element->contains(relaxedQueryBounds)) {
+ skippable = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ initialTriState = InitialTriState::kAllOut;
+ skippable = true;
+ }
+ } else {
+ if (element->contains(relaxedQueryBounds)) {
+ initialTriState = InitialTriState::kAllOut;
+ skippable = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ skippable = true;
+ } else if (fWindowRects.count() < maxWindowRectangles && !embiggens &&
+ !element->isAA() && Element::kRect_Type == element->getType()) {
+ this->addWindowRectangle(element->getRect(), false);
+ skippable = true;
+ }
+ }
+ if (!skippable) {
+ emsmallens = true;
+ }
+ break;
+ case SkCanvas::kIntersect_Op:
+ // check if the shape intersected contains the entire bounds and therefore can
+ // be skipped or it is outside the entire bounds and therefore makes the clip
+ // empty.
+ if (element->isInverseFilled()) {
+ if (element->contains(relaxedQueryBounds)) {
+ initialTriState = InitialTriState::kAllOut;
+ skippable = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ skippable = true;
+ }
+ } else {
+ if (element->contains(relaxedQueryBounds)) {
+ skippable = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ initialTriState = InitialTriState::kAllOut;
+ skippable = true;
+ } else if (!embiggens && !element->isAA() &&
+ Element::kRect_Type == element->getType()) {
+ // fIBounds and queryBounds have already acccounted for this element via
+ // clip stack bounds; here we just apply the non-aa rounding effect.
+ SkIRect nonaaRect;
+ element->getRect().round(&nonaaRect);
+ if (!this->intersectIBounds(nonaaRect)) {
+ return;
+ }
+ skippable = true;
+ }
+ }
+ if (!skippable) {
+ emsmallens = true;
+ }
+ break;
+ case SkCanvas::kUnion_Op:
+ // If the union-ed shape contains the entire bounds then after this element
+ // the bounds is entirely inside the clip. If the union-ed shape is outside the
+ // bounds then this op can be skipped.
+ if (element->isInverseFilled()) {
+ if (element->contains(relaxedQueryBounds)) {
+ skippable = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ initialTriState = InitialTriState::kAllIn;
+ skippable = true;
+ }
+ } else {
+ if (element->contains(relaxedQueryBounds)) {
+ initialTriState = InitialTriState::kAllIn;
+ skippable = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ skippable = true;
+ }
+ }
+ if (!skippable) {
+ embiggens = true;
+ }
+ break;
+ case SkCanvas::kXOR_Op:
+ // If the bounds is entirely inside the shape being xor-ed then the effect is
+ // to flip the inside/outside state of every point in the bounds. We may be
+ // able to take advantage of this in the forward pass. If the xor-ed shape
+ // doesn't intersect the bounds then it can be skipped.
+ if (element->isInverseFilled()) {
+ if (element->contains(relaxedQueryBounds)) {
+ skippable = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ isFlip = true;
+ }
+ } else {
+ if (element->contains(relaxedQueryBounds)) {
+ isFlip = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ skippable = true;
+ }
+ }
+ if (!skippable) {
+ emsmallens = embiggens = true;
+ }
+ break;
+ case SkCanvas::kReverseDifference_Op:
+ // When the bounds is entirely within the rev-diff shape then this behaves like xor
+ // and reverses every point inside the bounds. If the shape is completely outside
+ // the bounds then we know after this element is applied that the bounds will be
+ // all outside the current clip.B
+ if (element->isInverseFilled()) {
+ if (element->contains(relaxedQueryBounds)) {
+ initialTriState = InitialTriState::kAllOut;
+ skippable = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ isFlip = true;
+ }
+ } else {
+ if (element->contains(relaxedQueryBounds)) {
+ isFlip = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ initialTriState = InitialTriState::kAllOut;
+ skippable = true;
+ }
+ }
+ if (!skippable) {
+ emsmallens = embiggens = true;
+ }
+ break;
+
+ case SkCanvas::kReplace_Op:
+ // Replace will always terminate our walk. We will either begin the forward walk
+ // at the replace op or detect here than the shape is either completely inside
+ // or completely outside the bounds. In this latter case it can be skipped by
+ // setting the correct value for initialTriState.
+ if (element->isInverseFilled()) {
+ if (element->contains(relaxedQueryBounds)) {
+ initialTriState = InitialTriState::kAllOut;
+ skippable = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ initialTriState = InitialTriState::kAllIn;
+ skippable = true;
+ }
+ } else {
+ if (element->contains(relaxedQueryBounds)) {
+ initialTriState = InitialTriState::kAllIn;
+ skippable = true;
+ } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) {
+ initialTriState = InitialTriState::kAllOut;
+ skippable = true;
+ } else if (!embiggens && !element->isAA() &&
+ Element::kRect_Type == element->getType()) {
+ // fIBounds and queryBounds have already acccounted for this element via
+ // clip stack bounds; here we just apply the non-aa rounding effect.
+ SkIRect nonaaRect;
+ element->getRect().round(&nonaaRect);
+ if (!this->intersectIBounds(nonaaRect)) {
+ return;
+ }
+ initialTriState = InitialTriState::kAllIn;
+ skippable = true;
+ }
+ }
+ if (!skippable) {
+ initialTriState = InitialTriState::kAllOut;
+ embiggens = emsmallens = true;
+ }
+ break;
+ default:
+ SkDEBUGFAIL("Unexpected op.");
+ break;
+ }
+ if (!skippable) {
+ if (0 == fElements.count()) {
+ // This will be the last element. Record the stricter genID.
+ fElementsGenID = element->getGenID();
+ }
+
+ // if it is a flip, change it to a bounds-filling rect
+ if (isFlip) {
+ SkASSERT(SkCanvas::kXOR_Op == element->getOp() ||
+ SkCanvas::kReverseDifference_Op == element->getOp());
+ fElements.addToHead(SkRect::Make(fIBounds), SkCanvas::kReverseDifference_Op, false);
+ } else {
+ Element* newElement = fElements.addToHead(*element);
+ if (newElement->isAA()) {
+ ++numAAElements;
+ }
+ // Intersecting an inverse shape is the same as differencing the non-inverse shape.
+ // Replacing with an inverse shape is the same as setting initialState=kAllIn and
+ // differencing the non-inverse shape.
+ bool isReplace = SkCanvas::kReplace_Op == newElement->getOp();
+ if (newElement->isInverseFilled() &&
+ (SkCanvas::kIntersect_Op == newElement->getOp() || isReplace)) {
+ newElement->invertShapeFillType();
+ newElement->setOp(SkCanvas::kDifference_Op);
+ if (isReplace) {
+ SkASSERT(InitialTriState::kAllOut == initialTriState);
+ initialTriState = InitialTriState::kAllIn;
+ }
+ }
+ }
+ }
+ }
+
+ if ((InitialTriState::kAllOut == initialTriState && !embiggens) ||
+ (InitialTriState::kAllIn == initialTriState && !emsmallens)) {
+ fElements.reset();
+ numAAElements = 0;
+ } else {
+ Element* element = fElements.headIter().get();
+ while (element) {
+ bool skippable = false;
+ switch (element->getOp()) {
+ case SkCanvas::kDifference_Op:
+ // subtracting from the empty set yields the empty set.
+ skippable = InitialTriState::kAllOut == initialTriState;
+ break;
+ case SkCanvas::kIntersect_Op:
+ // intersecting with the empty set yields the empty set
+ if (InitialTriState::kAllOut == initialTriState) {
+ skippable = true;
+ } else {
+ // We can clear to zero and then simply draw the clip element.
+ initialTriState = InitialTriState::kAllOut;
+ element->setOp(SkCanvas::kReplace_Op);
+ }
+ break;
+ case SkCanvas::kUnion_Op:
+ if (InitialTriState::kAllIn == initialTriState) {
+ // unioning the infinite plane with anything is a no-op.
+ skippable = true;
+ } else {
+ // unioning the empty set with a shape is the shape.
+ element->setOp(SkCanvas::kReplace_Op);
+ }
+ break;
+ case SkCanvas::kXOR_Op:
+ if (InitialTriState::kAllOut == initialTriState) {
+ // xor could be changed to diff in the kAllIn case, not sure it's a win.
+ element->setOp(SkCanvas::kReplace_Op);
+ }
+ break;
+ case SkCanvas::kReverseDifference_Op:
+ if (InitialTriState::kAllIn == initialTriState) {
+ // subtracting the whole plane will yield the empty set.
+ skippable = true;
+ initialTriState = InitialTriState::kAllOut;
+ } else {
+ // this picks up flips inserted in the backwards pass.
+ skippable = element->isInverseFilled() ?
+ GrClip::IsOutsideClip(element->getBounds(), queryBounds) :
+ element->contains(relaxedQueryBounds);
+ if (skippable) {
+ initialTriState = InitialTriState::kAllIn;
+ } else {
+ element->setOp(SkCanvas::kReplace_Op);
+ }
+ }
+ break;
+ case SkCanvas::kReplace_Op:
+ skippable = false; // we would have skipped it in the backwards walk if we
+ // could've.
+ break;
+ default:
+ SkDEBUGFAIL("Unexpected op.");
+ break;
+ }
+ if (!skippable) {
+ break;
+ } else {
+ if (element->isAA()) {
+ --numAAElements;
+ }
+ fElements.popHead();
+ element = fElements.headIter().get();
+ }
+ }
+ }
+ fRequiresAA = numAAElements > 0;
+
+ SkASSERT(InitialTriState::kUnknown != initialTriState);
+ fInitialState = static_cast<GrReducedClip::InitialState>(initialTriState);
+}
+
+static bool element_is_pure_subtract(SkCanvas::ClipOp op) {
+ SkASSERT(op >= 0);
+ return op <= SkCanvas::kIntersect_Op;
+
+ GR_STATIC_ASSERT(0 == SkCanvas::kDifference_Op);
+ GR_STATIC_ASSERT(1 == SkCanvas::kIntersect_Op);
+}
+
+void GrReducedClip::addInteriorWindowRectangles(int maxWindowRectangles) {
+ SkASSERT(fWindowRects.count() < maxWindowRectangles);
+ // Walk backwards through the element list and add window rectangles to the interiors of
+ // "difference" elements. Quit if we encounter an element that may grow the clip.
+ ElementList::Iter iter(fElements, ElementList::Iter::kTail_IterStart);
+ for (; iter.get() && element_is_pure_subtract(iter.get()->getOp()); iter.prev()) {
+ const Element* element = iter.get();
+ if (SkCanvas::kDifference_Op != element->getOp()) {
+ continue;
+ }
+
+ if (Element::kRect_Type == element->getType()) {
+ SkASSERT(element->isAA());
+ this->addWindowRectangle(element->getRect(), true);
+ if (fWindowRects.count() >= maxWindowRectangles) {
+ return;
+ }
+ continue;
+ }
+
+ if (Element::kRRect_Type == element->getType()) {
+ // For round rects we add two overlapping windows in the shape of a plus.
+ const SkRRect& clipRRect = element->getRRect();
+ SkVector insetTL = clipRRect.radii(SkRRect::kUpperLeft_Corner);
+ SkVector insetBR = clipRRect.radii(SkRRect::kLowerRight_Corner);
+ if (SkRRect::kComplex_Type == clipRRect.getType()) {
+ const SkVector& insetTR = clipRRect.radii(SkRRect::kUpperRight_Corner);
+ const SkVector& insetBL = clipRRect.radii(SkRRect::kLowerLeft_Corner);
+ insetTL.fX = SkTMax(insetTL.x(), insetBL.x());
+ insetTL.fY = SkTMax(insetTL.y(), insetTR.y());
+ insetBR.fX = SkTMax(insetBR.x(), insetTR.x());
+ insetBR.fY = SkTMax(insetBR.y(), insetBL.y());
+ }
+ const SkRect& bounds = clipRRect.getBounds();
+ if (insetTL.x() + insetBR.x() >= bounds.width() ||
+ insetTL.y() + insetBR.y() >= bounds.height()) {
+ continue; // The interior "plus" is empty.
+ }
+
+ SkRect horzRect = SkRect::MakeLTRB(bounds.left(), bounds.top() + insetTL.y(),
+ bounds.right(), bounds.bottom() - insetBR.y());
+ this->addWindowRectangle(horzRect, element->isAA());
+ if (fWindowRects.count() >= maxWindowRectangles) {
+ return;
+ }
+
+ SkRect vertRect = SkRect::MakeLTRB(bounds.left() + insetTL.x(), bounds.top(),
+ bounds.right() - insetBR.x(), bounds.bottom());
+ this->addWindowRectangle(vertRect, element->isAA());
+ if (fWindowRects.count() >= maxWindowRectangles) {
+ return;
+ }
+ continue;
+ }
+ }
+}
+
+inline void GrReducedClip::addWindowRectangle(const SkRect& elementInteriorRect, bool elementIsAA) {
+ SkIRect window;
+ if (!elementIsAA) {
+ elementInteriorRect.round(&window);
+ } else {
+ elementInteriorRect.roundIn(&window);
+ }
+ if (!window.isEmpty()) { // Skip very thin windows that round to zero or negative dimensions.
+ fWindowRects.addWindow(window);
+ }
+}
+
+inline bool GrReducedClip::intersectIBounds(const SkIRect& irect) {
+ SkASSERT(fHasIBounds);
+ if (!fIBounds.intersect(irect)) {
+ fHasIBounds = false;
+ fWindowRects.reset();
+ fElements.reset();
+ fRequiresAA = false;
+ fInitialState = InitialState::kAllOut;
+ return false;
+ }
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Create a 8-bit clip mask in alpha
+
+static bool stencil_element(GrDrawContext* dc,
+ const GrFixedClip& clip,
+ const GrUserStencilSettings* ss,
+ const SkMatrix& viewMatrix,
+ const SkClipStack::Element* element) {
+
+ // TODO: Draw rrects directly here.
+ switch (element->getType()) {
+ case Element::kEmpty_Type:
+ SkDEBUGFAIL("Should never get here with an empty element.");
+ break;
+ case Element::kRect_Type:
+ return dc->drawContextPriv().drawAndStencilRect(clip, ss,
+ (SkRegion::Op)element->getOp(),
+ element->isInverseFilled(),
+ element->isAA(),
+ viewMatrix, element->getRect());
+ break;
+ default: {
+ SkPath path;
+ element->asPath(&path);
+ if (path.isInverseFillType()) {
+ path.toggleInverseFillType();
+ }
+
+ return dc->drawContextPriv().drawAndStencilPath(clip, ss,
+ (SkRegion::Op)element->getOp(),
+ element->isInverseFilled(),
+ element->isAA(), viewMatrix, path);
+ break;
+ }
+ }
+
+ return false;
+}
+
+static void draw_element(GrDrawContext* dc,
+ const GrClip& clip, // TODO: can this just always be WideOpen?
+ const GrPaint &paint,
+ const SkMatrix& viewMatrix,
+ const SkClipStack::Element* element) {
+
+ // TODO: Draw rrects directly here.
+ switch (element->getType()) {
+ case Element::kEmpty_Type:
+ SkDEBUGFAIL("Should never get here with an empty element.");
+ break;
+ case Element::kRect_Type:
+ dc->drawRect(clip, paint, viewMatrix, element->getRect());
+ break;
+ default: {
+ SkPath path;
+ element->asPath(&path);
+ if (path.isInverseFillType()) {
+ path.toggleInverseFillType();
+ }
+
+ dc->drawPath(clip, paint, viewMatrix, path, GrStyle::SimpleFill());
+ break;
+ }
+ }
+}
+
+bool GrReducedClip::drawAlphaClipMask(GrDrawContext* dc) const {
+ // The texture may be larger than necessary, this rect represents the part of the texture
+ // we populate with a rasterization of the clip.
+ GrFixedClip clip(SkIRect::MakeWH(fIBounds.width(), fIBounds.height()));
+
+ if (!fWindowRects.empty()) {
+ clip.setWindowRectangles(fWindowRects, {fIBounds.left(), fIBounds.top()},
+ GrWindowRectsState::Mode::kExclusive);
+ }
+
+ // The scratch texture that we are drawing into can be substantially larger than the mask. Only
+ // clear the part that we care about.
+ GrColor initialCoverage = InitialState::kAllIn == this->initialState() ? -1 : 0;
+ dc->drawContextPriv().clear(clip, initialCoverage, true);
+
+ // Set the matrix so that rendered clip elements are transformed to mask space from clip space.
+ SkMatrix translate;
+ translate.setTranslate(SkIntToScalar(-fIBounds.left()), SkIntToScalar(-fIBounds.top()));
+
+ // walk through each clip element and perform its set op
+ for (ElementList::Iter iter(fElements); iter.get(); iter.next()) {
+ const Element* element = iter.get();
+ SkRegion::Op op = (SkRegion::Op)element->getOp();
+ bool invert = element->isInverseFilled();
+ if (invert || SkRegion::kIntersect_Op == op || SkRegion::kReverseDifference_Op == op) {
+ // draw directly into the result with the stencil set to make the pixels affected
+ // by the clip shape be non-zero.
+ static constexpr GrUserStencilSettings kStencilInElement(
+ GrUserStencilSettings::StaticInit<
+ 0xffff,
+ GrUserStencilTest::kAlways,
+ 0xffff,
+ GrUserStencilOp::kReplace,
+ GrUserStencilOp::kReplace,
+ 0xffff>()
+ );
+ if (!stencil_element(dc, clip, &kStencilInElement, translate, element)) {
+ return false;
+ }
+
+ // Draw to the exterior pixels (those with a zero stencil value).
+ static constexpr GrUserStencilSettings kDrawOutsideElement(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kEqual,
+ 0xffff,
+ GrUserStencilOp::kZero,
+ GrUserStencilOp::kZero,
+ 0xffff>()
+ );
+ if (!dc->drawContextPriv().drawAndStencilRect(clip, &kDrawOutsideElement,
+ op, !invert, false,
+ translate,
+ SkRect::Make(fIBounds))) {
+ return false;
+ }
+ } else {
+ // all the remaining ops can just be directly draw into the accumulation buffer
+ GrPaint paint;
+ paint.setAntiAlias(element->isAA());
+ paint.setCoverageSetOpXPFactory(op, false);
+
+ draw_element(dc, clip, paint, translate, element);
+ }
+ }
+
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Create a 1-bit clip mask in the stencil buffer.
+
+class StencilClip final : public GrClip {
+public:
+ StencilClip(const SkIRect& scissorRect) : fFixedClip(scissorRect) {}
+ const GrFixedClip& fixedClip() const { return fFixedClip; }
+
+ void setWindowRectangles(const GrWindowRectangles& windows, const SkIPoint& origin,
+ GrWindowRectsState::Mode mode) {
+ fFixedClip.setWindowRectangles(windows, origin, mode);
+ }
+
+private:
+ bool quickContains(const SkRect&) const override {
+ return false;
+ }
+ void getConservativeBounds(int width, int height, SkIRect* bounds, bool* iior) const override {
+ fFixedClip.getConservativeBounds(width, height, bounds, iior);
+ }
+ bool isRRect(const SkRect& rtBounds, SkRRect* rr, bool* aa) const override {
+ return false;
+ }
+ bool apply(GrContext* context, GrDrawContext* drawContext, bool useHWAA,
+ bool hasUserStencilSettings, GrAppliedClip* out) const override {
+ if (!fFixedClip.apply(context, drawContext, useHWAA, hasUserStencilSettings, out)) {
+ return false;
+ }
+ out->addStencilClip();
+ return true;
+ }
+
+ GrFixedClip fFixedClip;
+
+ typedef GrClip INHERITED;
+};
+
+bool GrReducedClip::drawStencilClipMask(GrContext* context,
+ GrDrawContext* drawContext,
+ const SkIPoint& clipOrigin) const {
+ // We set the current clip to the bounds so that our recursive draws are scissored to them.
+ StencilClip stencilClip(fIBounds.makeOffset(-clipOrigin.x(), -clipOrigin.y()));
+
+ if (!fWindowRects.empty()) {
+ stencilClip.setWindowRectangles(fWindowRects, clipOrigin,
+ GrWindowRectsState::Mode::kExclusive);
+ }
+
+ bool initialState = InitialState::kAllIn == this->initialState();
+ drawContext->drawContextPriv().clearStencilClip(stencilClip.fixedClip(), initialState);
+
+ // Set the matrix so that rendered clip elements are transformed from clip to stencil space.
+ SkMatrix viewMatrix;
+ viewMatrix.setTranslate(SkIntToScalar(-clipOrigin.x()), SkIntToScalar(-clipOrigin.y()));
+
+ // walk through each clip element and perform its set op
+ // with the existing clip.
+ for (ElementList::Iter iter(fElements); iter.get(); iter.next()) {
+ const Element* element = iter.get();
+ bool useHWAA = element->isAA() && drawContext->isStencilBufferMultisampled();
+
+ bool fillInverted = false;
+
+ // This will be used to determine whether the clip shape can be rendered into the
+ // stencil with arbitrary stencil settings.
+ GrPathRenderer::StencilSupport stencilSupport;
+
+ SkRegion::Op op = (SkRegion::Op)element->getOp();
+
+ GrPathRenderer* pr = nullptr;
+ SkPath clipPath;
+ if (Element::kRect_Type == element->getType()) {
+ stencilSupport = GrPathRenderer::kNoRestriction_StencilSupport;
+ fillInverted = false;
+ } else {
+ element->asPath(&clipPath);
+ fillInverted = clipPath.isInverseFillType();
+ if (fillInverted) {
+ clipPath.toggleInverseFillType();
+ }
+
+ GrShape shape(clipPath, GrStyle::SimpleFill());
+ GrPathRenderer::CanDrawPathArgs canDrawArgs;
+ canDrawArgs.fShaderCaps = context->caps()->shaderCaps();
+ canDrawArgs.fViewMatrix = &viewMatrix;
+ canDrawArgs.fShape = &shape;
+ canDrawArgs.fAntiAlias = false;
+ canDrawArgs.fHasUserStencilSettings = false;
+ canDrawArgs.fIsStencilBufferMSAA = drawContext->isStencilBufferMultisampled();
+
+ GrDrawingManager* dm = context->contextPriv().drawingManager();
+ pr = dm->getPathRenderer(canDrawArgs, false,
+ GrPathRendererChain::kStencilOnly_DrawType,
+ &stencilSupport);
+ if (!pr) {
+ return false;
+ }
+ }
+
+ bool canRenderDirectToStencil =
+ GrPathRenderer::kNoRestriction_StencilSupport == stencilSupport;
+ bool drawDirectToClip; // Given the renderer, the element,
+ // fill rule, and set operation should
+ // we render the element directly to
+ // stencil bit used for clipping.
+ GrUserStencilSettings const* const* stencilPasses =
+ GrStencilSettings::GetClipPasses(op, canRenderDirectToStencil, fillInverted,
+ &drawDirectToClip);
+
+ // draw the element to the client stencil bits if necessary
+ if (!drawDirectToClip) {
+ static constexpr GrUserStencilSettings kDrawToStencil(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kAlways,
+ 0xffff,
+ GrUserStencilOp::kIncMaybeClamp,
+ GrUserStencilOp::kIncMaybeClamp,
+ 0xffff>()
+ );
+ if (Element::kRect_Type == element->getType()) {
+ drawContext->drawContextPriv().stencilRect(stencilClip.fixedClip(),
+ &kDrawToStencil, useHWAA,
+ viewMatrix, element->getRect());
+ } else {
+ if (!clipPath.isEmpty()) {
+ GrShape shape(clipPath, GrStyle::SimpleFill());
+ if (canRenderDirectToStencil) {
+ GrPaint paint;
+ paint.setXPFactory(GrDisableColorXPFactory::Make());
+ paint.setAntiAlias(element->isAA());
+
+ GrPathRenderer::DrawPathArgs args;
+ args.fResourceProvider = context->resourceProvider();
+ args.fPaint = &paint;
+ args.fUserStencilSettings = &kDrawToStencil;
+ args.fDrawContext = drawContext;
+ args.fClip = &stencilClip.fixedClip();
+ args.fViewMatrix = &viewMatrix;
+ args.fShape = &shape;
+ args.fAntiAlias = false;
+ args.fGammaCorrect = false;
+ pr->drawPath(args);
+ } else {
+ GrPathRenderer::StencilPathArgs args;
+ args.fResourceProvider = context->resourceProvider();
+ args.fDrawContext = drawContext;
+ args.fClip = &stencilClip.fixedClip();
+ args.fViewMatrix = &viewMatrix;
+ args.fIsAA = element->isAA();
+ args.fShape = &shape;
+ pr->stencilPath(args);
+ }
+ }
+ }
+ }
+
+ // now we modify the clip bit by rendering either the clip
+ // element directly or a bounding rect of the entire clip.
+ for (GrUserStencilSettings const* const* pass = stencilPasses; *pass; ++pass) {
+ if (drawDirectToClip) {
+ if (Element::kRect_Type == element->getType()) {
+ drawContext->drawContextPriv().stencilRect(stencilClip, *pass, useHWAA,
+ viewMatrix, element->getRect());
+ } else {
+ GrShape shape(clipPath, GrStyle::SimpleFill());
+ GrPaint paint;
+ paint.setXPFactory(GrDisableColorXPFactory::Make());
+ paint.setAntiAlias(element->isAA());
+ GrPathRenderer::DrawPathArgs args;
+ args.fResourceProvider = context->resourceProvider();
+ args.fPaint = &paint;
+ args.fUserStencilSettings = *pass;
+ args.fDrawContext = drawContext;
+ args.fClip = &stencilClip;
+ args.fViewMatrix = &viewMatrix;
+ args.fShape = &shape;
+ args.fAntiAlias = false;
+ args.fGammaCorrect = false;
+ pr->drawPath(args);
+ }
+ } else {
+ // The view matrix is setup to do clip space -> stencil space translation, so
+ // draw rect in clip space.
+ drawContext->drawContextPriv().stencilRect(stencilClip, *pass,
+ false, viewMatrix,
+ SkRect::Make(fIBounds));
+ }
+ }
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/GrReducedClip.h b/gfx/skia/skia/src/gpu/GrReducedClip.h
new file mode 100644
index 000000000..b8413e6df
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrReducedClip.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrReducedClip_DEFINED
+#define GrReducedClip_DEFINED
+
+#include "GrWindowRectangles.h"
+#include "SkClipStack.h"
+#include "SkTLList.h"
+
+class GrContext;
+class GrDrawContext;
+
+/**
+ * This class takes a clip stack and produces a reduced set of elements that are equivalent to
+ * applying that full stack within a specified query rectangle.
+ */
+class SK_API GrReducedClip {
+public:
+ GrReducedClip(const SkClipStack&, const SkRect& queryBounds, int maxWindowRectangles = 0);
+
+ /**
+ * If hasIBounds() is true, this is the bounding box within which the clip elements are valid.
+ * The caller must not modify any pixels outside this box. Undefined if hasIBounds() is false.
+ */
+ const SkIRect& ibounds() const { SkASSERT(fHasIBounds); return fIBounds; }
+ int left() const { return this->ibounds().left(); }
+ int top() const { return this->ibounds().top(); }
+ int width() const { return this->ibounds().width(); }
+ int height() const { return this->ibounds().height(); }
+
+ /**
+ * Indicates whether ibounds() are defined. They will always be defined if the elements() are
+ * nonempty.
+ */
+ bool hasIBounds() const { return fHasIBounds; }
+
+ /**
+ * If nonempty, this is a set of "exclusive" windows within which the clip elements are NOT
+ * valid. The caller must not modify any pixels inside these windows.
+ */
+ const GrWindowRectangles& windowRectangles() const { return fWindowRects; }
+
+ typedef SkTLList<SkClipStack::Element, 16> ElementList;
+
+ /**
+ * Populated with a minimal list of elements required to fully implement the clip.
+ */
+ const ElementList& elements() const { return fElements; }
+
+ /**
+ * If elements() are nonempty, uniquely identifies the list of elements within ibounds().
+ * Otherwise undefined.
+ */
+ int32_t elementsGenID() const { SkASSERT(!fElements.isEmpty()); return fElementsGenID; }
+
+ /**
+ * Indicates whether antialiasing is required to process any of the clip elements.
+ */
+ bool requiresAA() const { return fRequiresAA; }
+
+ enum class InitialState : bool {
+ kAllIn,
+ kAllOut
+ };
+
+ InitialState initialState() const { return fInitialState; }
+
+ bool drawAlphaClipMask(GrDrawContext*) const;
+ bool drawStencilClipMask(GrContext*, GrDrawContext*, const SkIPoint& clipOrigin) const;
+
+private:
+ void walkStack(const SkClipStack&, const SkRect& queryBounds, int maxWindowRectangles);
+ void addInteriorWindowRectangles(int maxWindowRectangles);
+ void addWindowRectangle(const SkRect& elementInteriorRect, bool elementIsAA);
+ bool intersectIBounds(const SkIRect&);
+
+ SkIRect fIBounds;
+ bool fHasIBounds;
+ GrWindowRectangles fWindowRects;
+ ElementList fElements;
+ int32_t fElementsGenID;
+ bool fRequiresAA;
+ InitialState fInitialState;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrRenderTarget.cpp b/gfx/skia/skia/src/gpu/GrRenderTarget.cpp
new file mode 100644
index 000000000..2053a166e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRenderTarget.cpp
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrRenderTarget.h"
+
+#include "GrContext.h"
+#include "GrContextPriv.h"
+#include "GrDrawContext.h"
+#include "GrDrawTarget.h"
+#include "GrGpu.h"
+#include "GrRenderTargetPriv.h"
+#include "GrStencilAttachment.h"
+
+GrRenderTarget::GrRenderTarget(GrGpu* gpu, const GrSurfaceDesc& desc, Flags flags,
+ GrStencilAttachment* stencil)
+ : INHERITED(gpu, desc)
+ , fStencilAttachment(stencil)
+ , fMultisampleSpecsID(0)
+ , fFlags(flags)
+ , fLastDrawTarget(nullptr) {
+ SkASSERT(!(fFlags & Flags::kMixedSampled) || fDesc.fSampleCnt > 0);
+ SkASSERT(!(fFlags & Flags::kWindowRectsSupport) || gpu->caps()->maxWindowRectangles() > 0);
+ fResolveRect.setLargestInverted();
+}
+
+GrRenderTarget::~GrRenderTarget() {
+ if (fLastDrawTarget) {
+ fLastDrawTarget->clearRT();
+ }
+ SkSafeUnref(fLastDrawTarget);
+}
+
+void GrRenderTarget::discard() {
+ // go through context so that all necessary flushing occurs
+ GrContext* context = this->getContext();
+ if (!context) {
+ return;
+ }
+
+ sk_sp<GrDrawContext> drawContext(context->contextPriv().makeWrappedDrawContext(sk_ref_sp(this),
+ nullptr));
+ if (!drawContext) {
+ return;
+ }
+
+ drawContext->discard();
+}
+
+void GrRenderTarget::flagAsNeedingResolve(const SkIRect* rect) {
+ if (kCanResolve_ResolveType == getResolveType()) {
+ if (rect) {
+ fResolveRect.join(*rect);
+ if (!fResolveRect.intersect(0, 0, this->width(), this->height())) {
+ fResolveRect.setEmpty();
+ }
+ } else {
+ fResolveRect.setLTRB(0, 0, this->width(), this->height());
+ }
+ }
+}
+
+void GrRenderTarget::overrideResolveRect(const SkIRect rect) {
+ fResolveRect = rect;
+ if (fResolveRect.isEmpty()) {
+ fResolveRect.setLargestInverted();
+ } else {
+ if (!fResolveRect.intersect(0, 0, this->width(), this->height())) {
+ fResolveRect.setLargestInverted();
+ }
+ }
+}
+
+void GrRenderTarget::onRelease() {
+ SkSafeSetNull(fStencilAttachment);
+
+ INHERITED::onRelease();
+}
+
+void GrRenderTarget::onAbandon() {
+ SkSafeSetNull(fStencilAttachment);
+
+ // The contents of this renderTarget are gone/invalid. It isn't useful to point back
+ // the creating drawTarget.
+ this->setLastDrawTarget(nullptr);
+
+ INHERITED::onAbandon();
+}
+
+void GrRenderTarget::setLastDrawTarget(GrDrawTarget* dt) {
+ if (fLastDrawTarget) {
+ // The non-MDB world never closes so we can't check this condition
+#ifdef ENABLE_MDB
+ SkASSERT(fLastDrawTarget->isClosed());
+#endif
+ fLastDrawTarget->clearRT();
+ }
+
+ SkRefCnt_SafeAssign(fLastDrawTarget, dt);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool GrRenderTargetPriv::attachStencilAttachment(GrStencilAttachment* stencil) {
+ if (!stencil && !fRenderTarget->fStencilAttachment) {
+ // No need to do any work since we currently don't have a stencil attachment and
+ // we're not acctually adding one.
+ return true;
+ }
+ fRenderTarget->fStencilAttachment = stencil;
+ if (!fRenderTarget->completeStencilAttachment()) {
+ SkSafeSetNull(fRenderTarget->fStencilAttachment);
+ return false;
+ }
+ return true;
+}
+
+int GrRenderTargetPriv::numStencilBits() const {
+ return fRenderTarget->fStencilAttachment ? fRenderTarget->fStencilAttachment->bits() : 0;
+}
+
+const GrGpu::MultisampleSpecs&
+GrRenderTargetPriv::getMultisampleSpecs(const GrStencilSettings& stencil) const {
+ return fRenderTarget->getGpu()->getMultisampleSpecs(fRenderTarget, stencil);
+}
+
+int GrRenderTargetPriv::maxWindowRectangles() const {
+ return (this->flags() & Flags::kWindowRectsSupport) ?
+ fRenderTarget->getGpu()->caps()->maxWindowRectangles() : 0;
+}
diff --git a/gfx/skia/skia/src/gpu/GrRenderTargetPriv.h b/gfx/skia/skia/src/gpu/GrRenderTargetPriv.h
new file mode 100644
index 000000000..698288e1c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRenderTargetPriv.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRenderTargetPriv_DEFINED
+#define GrRenderTargetPriv_DEFINED
+
+#include "GrRenderTarget.h"
+#include "GrGpu.h"
+
+class GrStencilSettings;
+
+/** Class that adds methods to GrRenderTarget that are only intended for use internal to Skia.
+ This class is purely a privileged window into GrRenderTarget. It should never have additional
+ data members or virtual methods. */
+class GrRenderTargetPriv {
+public:
+ /**
+ * GrStencilAttachment is not part of the public API.
+ */
+ GrStencilAttachment* getStencilAttachment() const { return fRenderTarget->fStencilAttachment; }
+
+ /**
+ * Attaches the GrStencilAttachment onto the render target. If stencil is a nullptr then the
+ * currently attached GrStencilAttachment will be removed if one was previously attached. This
+ * function returns false if there were any failure in attaching the GrStencilAttachment.
+ */
+ bool attachStencilAttachment(GrStencilAttachment* stencil);
+
+ int numStencilBits() const;
+
+ const GrGpu::MultisampleSpecs& getMultisampleSpecs(const GrStencilSettings& stencil) const;
+ uint8_t& accessMultisampleSpecsID() { return fRenderTarget->fMultisampleSpecsID; }
+
+ typedef GrRenderTarget::Flags Flags;
+
+ Flags flags() const { return fRenderTarget->fFlags; }
+ int maxWindowRectangles() const;
+
+private:
+ explicit GrRenderTargetPriv(GrRenderTarget* renderTarget) : fRenderTarget(renderTarget) {}
+ GrRenderTargetPriv(const GrRenderTargetPriv&) {} // unimpl
+ GrRenderTargetPriv& operator=(const GrRenderTargetPriv&); // unimpl
+
+ // No taking addresses of this type.
+ const GrRenderTargetPriv* operator&() const;
+ GrRenderTargetPriv* operator&();
+
+ GrRenderTarget* fRenderTarget;
+
+ friend class GrRenderTarget; // to construct/copy this type.
+};
+
+inline GrRenderTargetPriv GrRenderTarget::renderTargetPriv() { return GrRenderTargetPriv(this); }
+
+inline const GrRenderTargetPriv GrRenderTarget::renderTargetPriv () const {
+ return GrRenderTargetPriv(const_cast<GrRenderTarget*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrRenderTargetProxy.cpp b/gfx/skia/skia/src/gpu/GrRenderTargetProxy.cpp
new file mode 100644
index 000000000..fa6bd2657
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrRenderTargetProxy.cpp
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrRenderTargetProxy.h"
+
+#include "GrCaps.h"
+#include "GrDrawTarget.h"
+#include "GrGpuResourcePriv.h"
+
+// Deferred version
+// TODO: we can probably munge the 'desc' in both the wrapped and deferred
+// cases to make the sampleConfig/numSamples stuff more rational.
+GrRenderTargetProxy::GrRenderTargetProxy(const GrCaps& caps, const GrSurfaceDesc& desc,
+ SkBackingFit fit, SkBudgeted budgeted)
+ : INHERITED(desc, fit, budgeted)
+ , fTarget(nullptr)
+ , fFlags(GrRenderTargetPriv::Flags::kNone)
+ , fLastDrawTarget(nullptr) {
+ // Since we know the newly created render target will be internal, we are able to precompute
+ // what the flags will ultimately end up being.
+ if (caps.usesMixedSamples() && fDesc.fSampleCnt > 0) {
+ fFlags |= GrRenderTargetPriv::Flags::kMixedSampled;
+ }
+ if (caps.maxWindowRectangles() > 0) {
+ fFlags |= GrRenderTargetPriv::Flags::kWindowRectsSupport;
+ }
+}
+
+// Wrapped version
+GrRenderTargetProxy::GrRenderTargetProxy(const GrCaps& caps, sk_sp<GrRenderTarget> rt)
+ : INHERITED(rt->desc(), SkBackingFit::kExact,
+ rt->resourcePriv().isBudgeted(), rt->uniqueID())
+ , fTarget(std::move(rt))
+ , fFlags(fTarget->renderTargetPriv().flags())
+ , fLastDrawTarget(nullptr) {
+}
+
+GrRenderTargetProxy::~GrRenderTargetProxy() {
+ if (fLastDrawTarget) {
+ fLastDrawTarget->clearRT();
+ }
+ SkSafeUnref(fLastDrawTarget);
+}
+
+GrRenderTarget* GrRenderTargetProxy::instantiate(GrTextureProvider* texProvider) {
+ if (fTarget) {
+ return fTarget.get();
+ }
+
+ // TODO: it would be nice to not have to copy the desc here
+ GrSurfaceDesc desc = fDesc;
+ desc.fFlags |= GrSurfaceFlags::kRenderTarget_GrSurfaceFlag;
+
+ sk_sp<GrTexture> tex;
+ if (SkBackingFit::kApprox == fFit) {
+ tex.reset(texProvider->createApproxTexture(desc));
+ } else {
+ tex.reset(texProvider->createTexture(desc, fBudgeted));
+ }
+ if (!tex || !tex->asRenderTarget()) {
+ return nullptr;
+ }
+
+ fTarget = sk_ref_sp(tex->asRenderTarget());
+
+ // Check that our a priori computation matched the ultimate reality
+ SkASSERT(fFlags == fTarget->renderTargetPriv().flags());
+
+ return fTarget.get();
+}
+
+void GrRenderTargetProxy::setLastDrawTarget(GrDrawTarget* dt) {
+ if (fLastDrawTarget) {
+ // The non-MDB world never closes so we can't check this condition
+#ifdef ENABLE_MDB
+ SkASSERT(fLastDrawTarget->isClosed());
+#endif
+ fLastDrawTarget->clearRT();
+ }
+
+ SkRefCnt_SafeAssign(fLastDrawTarget, dt);
+}
+
+sk_sp<GrRenderTargetProxy> GrRenderTargetProxy::Make(const GrCaps& caps,
+ const GrSurfaceDesc& desc,
+ SkBackingFit fit,
+ SkBudgeted budgeted) {
+ return sk_sp<GrRenderTargetProxy>(new GrRenderTargetProxy(caps, desc, fit, budgeted));
+}
+
+sk_sp<GrRenderTargetProxy> GrRenderTargetProxy::Make(const GrCaps& caps, sk_sp<GrRenderTarget> rt) {
+ return sk_sp<GrRenderTargetProxy>(new GrRenderTargetProxy(caps, rt));
+}
+
diff --git a/gfx/skia/skia/src/gpu/GrResourceCache.cpp b/gfx/skia/skia/src/gpu/GrResourceCache.cpp
new file mode 100644
index 000000000..9462a7384
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrResourceCache.cpp
@@ -0,0 +1,763 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrResourceCache.h"
+
+#include "GrCaps.h"
+#include "GrGpuResourceCacheAccess.h"
+#include "GrTracing.h"
+#include "SkGr.h"
+#include "SkMessageBus.h"
+#include "SkOpts.h"
+#include "SkTSort.h"
+
+DECLARE_SKMESSAGEBUS_MESSAGE(GrUniqueKeyInvalidatedMessage);
+
+//////////////////////////////////////////////////////////////////////////////
+
+GrScratchKey::ResourceType GrScratchKey::GenerateResourceType() {
+ static int32_t gType = INHERITED::kInvalidDomain + 1;
+
+ int32_t type = sk_atomic_inc(&gType);
+ if (type > SK_MaxU16) {
+ SkFAIL("Too many Resource Types");
+ }
+
+ return static_cast<ResourceType>(type);
+}
+
+GrUniqueKey::Domain GrUniqueKey::GenerateDomain() {
+ static int32_t gDomain = INHERITED::kInvalidDomain + 1;
+
+ int32_t domain = sk_atomic_inc(&gDomain);
+ if (domain > SK_MaxU16) {
+ SkFAIL("Too many GrUniqueKey Domains");
+ }
+
+ return static_cast<Domain>(domain);
+}
+
+uint32_t GrResourceKeyHash(const uint32_t* data, size_t size) {
+ return SkOpts::hash(data, size);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GrResourceCache::AutoValidate : ::SkNoncopyable {
+public:
+ AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
+ ~AutoValidate() { fCache->validate(); }
+private:
+ GrResourceCache* fCache;
+};
+
+ //////////////////////////////////////////////////////////////////////////////
+
+
+GrResourceCache::GrResourceCache(const GrCaps* caps)
+ : fTimestamp(0)
+ , fMaxCount(kDefaultMaxCount)
+ , fMaxBytes(kDefaultMaxSize)
+ , fMaxUnusedFlushes(kDefaultMaxUnusedFlushes)
+#if GR_CACHE_STATS
+ , fHighWaterCount(0)
+ , fHighWaterBytes(0)
+ , fBudgetedHighWaterCount(0)
+ , fBudgetedHighWaterBytes(0)
+#endif
+ , fBytes(0)
+ , fBudgetedCount(0)
+ , fBudgetedBytes(0)
+ , fRequestFlush(false)
+ , fExternalFlushCnt(0)
+ , fPreferVRAMUseOverFlushes(caps->preferVRAMUseOverFlushes()) {
+ SkDEBUGCODE(fCount = 0;)
+ SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr;)
+}
+
+GrResourceCache::~GrResourceCache() {
+ this->releaseAll();
+}
+
+void GrResourceCache::setLimits(int count, size_t bytes, int maxUnusedFlushes) {
+ fMaxCount = count;
+ fMaxBytes = bytes;
+ fMaxUnusedFlushes = maxUnusedFlushes;
+ this->purgeAsNeeded();
+}
+
+void GrResourceCache::insertResource(GrGpuResource* resource) {
+ SkASSERT(resource);
+ SkASSERT(!this->isInCache(resource));
+ SkASSERT(!resource->wasDestroyed());
+ SkASSERT(!resource->isPurgeable());
+
+ // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
+ // up iterating over all the resources that already have timestamps.
+ resource->cacheAccess().setTimestamp(this->getNextTimestamp());
+
+ this->addToNonpurgeableArray(resource);
+
+ size_t size = resource->gpuMemorySize();
+ SkDEBUGCODE(++fCount;)
+ fBytes += size;
+#if GR_CACHE_STATS
+ fHighWaterCount = SkTMax(this->getResourceCount(), fHighWaterCount);
+ fHighWaterBytes = SkTMax(fBytes, fHighWaterBytes);
+#endif
+ if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
+ ++fBudgetedCount;
+ fBudgetedBytes += size;
+ TRACE_COUNTER2(TRACE_DISABLED_BY_DEFAULT("skia.gpu.cache"), "skia budget", "used",
+ fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
+#if GR_CACHE_STATS
+ fBudgetedHighWaterCount = SkTMax(fBudgetedCount, fBudgetedHighWaterCount);
+ fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
+#endif
+ }
+ if (resource->resourcePriv().getScratchKey().isValid() &&
+ !resource->getUniqueKey().isValid()) {
+ SkASSERT(!resource->resourcePriv().refsWrappedObjects());
+ fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
+ }
+
+ this->purgeAsNeeded();
+}
+
+void GrResourceCache::removeResource(GrGpuResource* resource) {
+ this->validate();
+ SkASSERT(this->isInCache(resource));
+
+ if (resource->isPurgeable()) {
+ fPurgeableQueue.remove(resource);
+ } else {
+ this->removeFromNonpurgeableArray(resource);
+ }
+
+ size_t size = resource->gpuMemorySize();
+ SkDEBUGCODE(--fCount;)
+ fBytes -= size;
+ if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
+ --fBudgetedCount;
+ fBudgetedBytes -= size;
+ TRACE_COUNTER2(TRACE_DISABLED_BY_DEFAULT("skia.gpu.cache"), "skia budget", "used",
+ fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
+ }
+
+ if (resource->resourcePriv().getScratchKey().isValid() &&
+ !resource->getUniqueKey().isValid()) {
+ fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
+ }
+ if (resource->getUniqueKey().isValid()) {
+ fUniqueHash.remove(resource->getUniqueKey());
+ }
+ this->validate();
+}
+
+void GrResourceCache::abandonAll() {
+ AutoValidate av(this);
+
+ while (fNonpurgeableResources.count()) {
+ GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
+ SkASSERT(!back->wasDestroyed());
+ back->cacheAccess().abandon();
+ }
+
+ while (fPurgeableQueue.count()) {
+ GrGpuResource* top = fPurgeableQueue.peek();
+ SkASSERT(!top->wasDestroyed());
+ top->cacheAccess().abandon();
+ }
+
+ SkASSERT(!fScratchMap.count());
+ SkASSERT(!fUniqueHash.count());
+ SkASSERT(!fCount);
+ SkASSERT(!this->getResourceCount());
+ SkASSERT(!fBytes);
+ SkASSERT(!fBudgetedCount);
+ SkASSERT(!fBudgetedBytes);
+}
+
+void GrResourceCache::releaseAll() {
+ AutoValidate av(this);
+
+ while(fNonpurgeableResources.count()) {
+ GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
+ SkASSERT(!back->wasDestroyed());
+ back->cacheAccess().release();
+ }
+
+ while (fPurgeableQueue.count()) {
+ GrGpuResource* top = fPurgeableQueue.peek();
+ SkASSERT(!top->wasDestroyed());
+ top->cacheAccess().release();
+ }
+
+ SkASSERT(!fScratchMap.count());
+ SkASSERT(!fUniqueHash.count());
+ SkASSERT(!fCount);
+ SkASSERT(!this->getResourceCount());
+ SkASSERT(!fBytes);
+ SkASSERT(!fBudgetedCount);
+ SkASSERT(!fBudgetedBytes);
+}
+
+class GrResourceCache::AvailableForScratchUse {
+public:
+ AvailableForScratchUse(bool rejectPendingIO) : fRejectPendingIO(rejectPendingIO) { }
+
+ bool operator()(const GrGpuResource* resource) const {
+ SkASSERT(!resource->getUniqueKey().isValid() &&
+ resource->resourcePriv().getScratchKey().isValid());
+ if (resource->internalHasRef() || !resource->cacheAccess().isScratch()) {
+ return false;
+ }
+ return !fRejectPendingIO || !resource->internalHasPendingIO();
+ }
+
+private:
+ bool fRejectPendingIO;
+};
+
+GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& scratchKey,
+ size_t resourceSize,
+ uint32_t flags) {
+ SkASSERT(scratchKey.isValid());
+
+ GrGpuResource* resource;
+ if (flags & (kPreferNoPendingIO_ScratchFlag | kRequireNoPendingIO_ScratchFlag)) {
+ resource = fScratchMap.find(scratchKey, AvailableForScratchUse(true));
+ if (resource) {
+ this->refAndMakeResourceMRU(resource);
+ this->validate();
+ return resource;
+ } else if (flags & kRequireNoPendingIO_ScratchFlag) {
+ return nullptr;
+ }
+ // We would prefer to consume more available VRAM rather than flushing
+ // immediately, but on ANGLE this can lead to starving of the GPU.
+ if (fPreferVRAMUseOverFlushes && this->wouldFit(resourceSize)) {
+ // kPrefer is specified, we didn't find a resource without pending io,
+ // but there is still space in our budget for the resource so force
+ // the caller to allocate a new resource.
+ return nullptr;
+ }
+ }
+ resource = fScratchMap.find(scratchKey, AvailableForScratchUse(false));
+ if (resource) {
+ this->refAndMakeResourceMRU(resource);
+ this->validate();
+ }
+ return resource;
+}
+
+void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) {
+ SkASSERT(resource->resourcePriv().getScratchKey().isValid());
+ if (!resource->getUniqueKey().isValid()) {
+ fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
+ }
+}
+
+void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
+ // Someone has a ref to this resource in order to have removed the key. When the ref count
+ // reaches zero we will get a ref cnt notification and figure out what to do with it.
+ if (resource->getUniqueKey().isValid()) {
+ SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
+ fUniqueHash.remove(resource->getUniqueKey());
+ }
+ resource->cacheAccess().removeUniqueKey();
+
+ if (resource->resourcePriv().getScratchKey().isValid()) {
+ fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
+ }
+
+ this->validate();
+}
+
+void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) {
+ SkASSERT(resource);
+ SkASSERT(this->isInCache(resource));
+
+ // If another resource has the new key, remove its key then install the key on this resource.
+ if (newKey.isValid()) {
+ // Remove the entry for this resource if it already has a unique key.
+ if (resource->getUniqueKey().isValid()) {
+ SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
+ fUniqueHash.remove(resource->getUniqueKey());
+ SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey()));
+ } else {
+ // 'resource' didn't have a valid unique key before so it is switching sides. Remove it
+ // from the ScratchMap
+ if (resource->resourcePriv().getScratchKey().isValid()) {
+ fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
+ }
+ }
+
+ if (GrGpuResource* old = fUniqueHash.find(newKey)) {
+ // If the old resource using the key is purgeable and is unreachable, then remove it.
+ if (!old->resourcePriv().getScratchKey().isValid() && old->isPurgeable()) {
+ // release may call validate() which will assert that resource is in fUniqueHash
+ // if it has a valid key. So in debug reset the key here before we assign it.
+ SkDEBUGCODE(resource->cacheAccess().removeUniqueKey();)
+ old->cacheAccess().release();
+ } else {
+ this->removeUniqueKey(old);
+ }
+ }
+ SkASSERT(nullptr == fUniqueHash.find(newKey));
+ resource->cacheAccess().setUniqueKey(newKey);
+ fUniqueHash.add(resource);
+ } else {
+ this->removeUniqueKey(resource);
+ }
+
+ this->validate();
+}
+
+void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
+ SkASSERT(resource);
+ SkASSERT(this->isInCache(resource));
+
+ if (resource->isPurgeable()) {
+ // It's about to become unpurgeable.
+ fPurgeableQueue.remove(resource);
+ this->addToNonpurgeableArray(resource);
+ }
+ resource->ref();
+
+ resource->cacheAccess().setTimestamp(this->getNextTimestamp());
+ this->validate();
+}
+
+void GrResourceCache::notifyCntReachedZero(GrGpuResource* resource, uint32_t flags) {
+ SkASSERT(resource);
+ SkASSERT(!resource->wasDestroyed());
+ SkASSERT(flags);
+ SkASSERT(this->isInCache(resource));
+ // This resource should always be in the nonpurgeable array when this function is called. It
+ // will be moved to the queue if it is newly purgeable.
+ SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource);
+
+ if (SkToBool(ResourceAccess::kRefCntReachedZero_RefNotificationFlag & flags)) {
+#ifdef SK_DEBUG
+ // When the timestamp overflows validate() is called. validate() checks that resources in
+ // the nonpurgeable array are indeed not purgeable. However, the movement from the array to
+ // the purgeable queue happens just below in this function. So we mark it as an exception.
+ if (resource->isPurgeable()) {
+ fNewlyPurgeableResourceForValidation = resource;
+ }
+#endif
+ resource->cacheAccess().setTimestamp(this->getNextTimestamp());
+ SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
+ }
+
+ if (!SkToBool(ResourceAccess::kAllCntsReachedZero_RefNotificationFlag & flags)) {
+ SkASSERT(!resource->isPurgeable());
+ return;
+ }
+
+ SkASSERT(resource->isPurgeable());
+ this->removeFromNonpurgeableArray(resource);
+ fPurgeableQueue.insert(resource);
+ resource->cacheAccess().setFlushCntWhenResourceBecamePurgeable(fExternalFlushCnt);
+
+ if (SkBudgeted::kNo == resource->resourcePriv().isBudgeted()) {
+ // Check whether this resource could still be used as a scratch resource.
+ if (!resource->resourcePriv().refsWrappedObjects() &&
+ resource->resourcePriv().getScratchKey().isValid()) {
+ // We won't purge an existing resource to make room for this one.
+ if (fBudgetedCount < fMaxCount &&
+ fBudgetedBytes + resource->gpuMemorySize() <= fMaxBytes) {
+ resource->resourcePriv().makeBudgeted();
+ return;
+ }
+ }
+ } else {
+ // Purge the resource immediately if we're over budget
+ // Also purge if the resource has neither a valid scratch key nor a unique key.
+ bool noKey = !resource->resourcePriv().getScratchKey().isValid() &&
+ !resource->getUniqueKey().isValid();
+ if (!this->overBudget() && !noKey) {
+ return;
+ }
+ }
+
+ SkDEBUGCODE(int beforeCount = this->getResourceCount();)
+ resource->cacheAccess().release();
+ // We should at least free this resource, perhaps dependent resources as well.
+ SkASSERT(this->getResourceCount() < beforeCount);
+ this->validate();
+}
+
+void GrResourceCache::didChangeGpuMemorySize(const GrGpuResource* resource, size_t oldSize) {
+ // SkASSERT(!fPurging); GrPathRange increases size during flush. :(
+ SkASSERT(resource);
+ SkASSERT(this->isInCache(resource));
+
+ ptrdiff_t delta = resource->gpuMemorySize() - oldSize;
+
+ fBytes += delta;
+#if GR_CACHE_STATS
+ fHighWaterBytes = SkTMax(fBytes, fHighWaterBytes);
+#endif
+ if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
+ fBudgetedBytes += delta;
+ TRACE_COUNTER2(TRACE_DISABLED_BY_DEFAULT("skia.gpu.cache"), "skia budget", "used",
+ fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
+#if GR_CACHE_STATS
+ fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
+#endif
+ }
+
+ this->purgeAsNeeded();
+ this->validate();
+}
+
+void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
+ SkASSERT(resource);
+ SkASSERT(this->isInCache(resource));
+
+ size_t size = resource->gpuMemorySize();
+
+ if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
+ ++fBudgetedCount;
+ fBudgetedBytes += size;
+#if GR_CACHE_STATS
+ fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
+ fBudgetedHighWaterCount = SkTMax(fBudgetedCount, fBudgetedHighWaterCount);
+#endif
+ this->purgeAsNeeded();
+ } else {
+ --fBudgetedCount;
+ fBudgetedBytes -= size;
+ }
+ TRACE_COUNTER2(TRACE_DISABLED_BY_DEFAULT("skia.gpu.cache"), "skia budget", "used",
+ fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
+
+ this->validate();
+}
+
+void GrResourceCache::purgeAsNeeded() {
+ SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs;
+ fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
+ if (invalidKeyMsgs.count()) {
+ this->processInvalidUniqueKeys(invalidKeyMsgs);
+ }
+
+ if (fMaxUnusedFlushes > 0) {
+ // We want to know how many complete flushes have occurred without the resource being used.
+ // If the resource was tagged when fExternalFlushCnt was N then this means it became
+ // purgeable during activity that became the N+1th flush. So when the flush count is N+2
+ // it has sat in the purgeable queue for one entire flush.
+ uint32_t oldestAllowedFlushCnt = fExternalFlushCnt - fMaxUnusedFlushes - 1;
+ // check for underflow
+ if (oldestAllowedFlushCnt < fExternalFlushCnt) {
+ while (fPurgeableQueue.count()) {
+ uint32_t flushWhenResourceBecamePurgeable =
+ fPurgeableQueue.peek()->cacheAccess().flushCntWhenResourceBecamePurgeable();
+ if (oldestAllowedFlushCnt < flushWhenResourceBecamePurgeable) {
+ // Resources were given both LRU timestamps and tagged with a flush cnt when
+ // they first became purgeable. The LRU timestamp won't change again until the
+ // resource is made non-purgeable again. So, at this point all the remaining
+ // resources in the timestamp-sorted queue will have a flush count >= to this
+ // one.
+ break;
+ }
+ GrGpuResource* resource = fPurgeableQueue.peek();
+ SkASSERT(resource->isPurgeable());
+ resource->cacheAccess().release();
+ }
+ }
+ }
+
+ bool stillOverbudget = this->overBudget();
+ while (stillOverbudget && fPurgeableQueue.count()) {
+ GrGpuResource* resource = fPurgeableQueue.peek();
+ SkASSERT(resource->isPurgeable());
+ resource->cacheAccess().release();
+ stillOverbudget = this->overBudget();
+ }
+
+ this->validate();
+
+ if (stillOverbudget) {
+ // Set this so that GrDrawingManager will issue a flush to free up resources with pending
+ // IO that we were unable to purge in this pass.
+ fRequestFlush = true;
+ }
+}
+
+void GrResourceCache::purgeAllUnlocked() {
+ // We could disable maintaining the heap property here, but it would add a lot of complexity.
+ // Moreover, this is rarely called.
+ while (fPurgeableQueue.count()) {
+ GrGpuResource* resource = fPurgeableQueue.peek();
+ SkASSERT(resource->isPurgeable());
+ resource->cacheAccess().release();
+ }
+
+ this->validate();
+}
+
+void GrResourceCache::processInvalidUniqueKeys(
+ const SkTArray<GrUniqueKeyInvalidatedMessage>& msgs) {
+ for (int i = 0; i < msgs.count(); ++i) {
+ GrGpuResource* resource = this->findAndRefUniqueResource(msgs[i].key());
+ if (resource) {
+ resource->resourcePriv().removeUniqueKey();
+ resource->unref(); // If this resource is now purgeable, the cache will be notified.
+ }
+ }
+}
+
+void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
+ int index = fNonpurgeableResources.count();
+ *fNonpurgeableResources.append() = resource;
+ *resource->cacheAccess().accessCacheIndex() = index;
+}
+
+void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) {
+ int* index = resource->cacheAccess().accessCacheIndex();
+ // Fill the whole we will create in the array with the tail object, adjust its index, and
+ // then pop the array
+ GrGpuResource* tail = *(fNonpurgeableResources.end() - 1);
+ SkASSERT(fNonpurgeableResources[*index] == resource);
+ fNonpurgeableResources[*index] = tail;
+ *tail->cacheAccess().accessCacheIndex() = *index;
+ fNonpurgeableResources.pop();
+ SkDEBUGCODE(*index = -1);
+}
+
+uint32_t GrResourceCache::getNextTimestamp() {
+ // If we wrap then all the existing resources will appear older than any resources that get
+ // a timestamp after the wrap.
+ if (0 == fTimestamp) {
+ int count = this->getResourceCount();
+ if (count) {
+ // Reset all the timestamps. We sort the resources by timestamp and then assign
+ // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
+ // rare.
+ SkTDArray<GrGpuResource*> sortedPurgeableResources;
+ sortedPurgeableResources.setReserve(fPurgeableQueue.count());
+
+ while (fPurgeableQueue.count()) {
+ *sortedPurgeableResources.append() = fPurgeableQueue.peek();
+ fPurgeableQueue.pop();
+ }
+
+ SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end() - 1,
+ CompareTimestamp);
+
+ // Pick resources out of the purgeable and non-purgeable arrays based on lowest
+ // timestamp and assign new timestamps.
+ int currP = 0;
+ int currNP = 0;
+ while (currP < sortedPurgeableResources.count() &&
+ currNP < fNonpurgeableResources.count()) {
+ uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
+ uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
+ SkASSERT(tsP != tsNP);
+ if (tsP < tsNP) {
+ sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
+ } else {
+ // Correct the index in the nonpurgeable array stored on the resource post-sort.
+ *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
+ fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
+ }
+ }
+
+ // The above loop ended when we hit the end of one array. Finish the other one.
+ while (currP < sortedPurgeableResources.count()) {
+ sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
+ }
+ while (currNP < fNonpurgeableResources.count()) {
+ *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
+ fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
+ }
+
+ // Rebuild the queue.
+ for (int i = 0; i < sortedPurgeableResources.count(); ++i) {
+ fPurgeableQueue.insert(sortedPurgeableResources[i]);
+ }
+
+ this->validate();
+ SkASSERT(count == this->getResourceCount());
+
+ // count should be the next timestamp we return.
+ SkASSERT(fTimestamp == SkToU32(count));
+ }
+ }
+ return fTimestamp++;
+}
+
+void GrResourceCache::notifyFlushOccurred(FlushType type) {
+ switch (type) {
+ case FlushType::kImmediateMode:
+ break;
+ case FlushType::kCacheRequested:
+ SkASSERT(fRequestFlush);
+ fRequestFlush = false;
+ break;
+ case FlushType::kExternal:
+ ++fExternalFlushCnt;
+ if (0 == fExternalFlushCnt) {
+ // When this wraps just reset all the purgeable resources' last used flush state.
+ for (int i = 0; i < fPurgeableQueue.count(); ++i) {
+ fPurgeableQueue.at(i)->cacheAccess().setFlushCntWhenResourceBecamePurgeable(0);
+ }
+ }
+ break;
+ }
+ this->purgeAsNeeded();
+}
+
+void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
+ for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
+ fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
+ }
+ for (int i = 0; i < fPurgeableQueue.count(); ++i) {
+ fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
+ }
+}
+
+#ifdef SK_DEBUG
+void GrResourceCache::validate() const {
+ // Reduce the frequency of validations for large resource counts.
+ static SkRandom gRandom;
+ int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
+ if (~mask && (gRandom.nextU() & mask)) {
+ return;
+ }
+
+ struct Stats {
+ size_t fBytes;
+ int fBudgetedCount;
+ size_t fBudgetedBytes;
+ int fLocked;
+ int fScratch;
+ int fCouldBeScratch;
+ int fContent;
+ const ScratchMap* fScratchMap;
+ const UniqueHash* fUniqueHash;
+
+ Stats(const GrResourceCache* cache) {
+ memset(this, 0, sizeof(*this));
+ fScratchMap = &cache->fScratchMap;
+ fUniqueHash = &cache->fUniqueHash;
+ }
+
+ void update(GrGpuResource* resource) {
+ fBytes += resource->gpuMemorySize();
+
+ if (!resource->isPurgeable()) {
+ ++fLocked;
+ }
+
+ const GrScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
+ const GrUniqueKey& uniqueKey = resource->getUniqueKey();
+
+ if (resource->cacheAccess().isScratch()) {
+ SkASSERT(!uniqueKey.isValid());
+ ++fScratch;
+ SkASSERT(fScratchMap->countForKey(scratchKey));
+ SkASSERT(!resource->resourcePriv().refsWrappedObjects());
+ } else if (scratchKey.isValid()) {
+ SkASSERT(SkBudgeted::kNo == resource->resourcePriv().isBudgeted() ||
+ uniqueKey.isValid());
+ if (!uniqueKey.isValid()) {
+ ++fCouldBeScratch;
+ SkASSERT(fScratchMap->countForKey(scratchKey));
+ }
+ SkASSERT(!resource->resourcePriv().refsWrappedObjects());
+ }
+ if (uniqueKey.isValid()) {
+ ++fContent;
+ SkASSERT(fUniqueHash->find(uniqueKey) == resource);
+ SkASSERT(!resource->resourcePriv().refsWrappedObjects());
+ SkASSERT(SkBudgeted::kYes == resource->resourcePriv().isBudgeted());
+
+ if (scratchKey.isValid()) {
+ SkASSERT(!fScratchMap->has(resource, scratchKey));
+ }
+ }
+
+ if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
+ ++fBudgetedCount;
+ fBudgetedBytes += resource->gpuMemorySize();
+ }
+ }
+ };
+
+ {
+ ScratchMap::ConstIter iter(&fScratchMap);
+
+ int count = 0;
+ for ( ; !iter.done(); ++iter) {
+ const GrGpuResource* resource = *iter;
+ SkASSERT(resource->resourcePriv().getScratchKey().isValid());
+ SkASSERT(!resource->getUniqueKey().isValid());
+ count++;
+ }
+ SkASSERT(count == fScratchMap.count()); // ensure the iterator is working correctly
+ }
+
+ Stats stats(this);
+
+ for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
+ SkASSERT(!fNonpurgeableResources[i]->isPurgeable() ||
+ fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
+ SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
+ SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
+ stats.update(fNonpurgeableResources[i]);
+ }
+ for (int i = 0; i < fPurgeableQueue.count(); ++i) {
+ SkASSERT(fPurgeableQueue.at(i)->isPurgeable());
+ SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i);
+ SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
+ stats.update(fPurgeableQueue.at(i));
+ }
+
+ SkASSERT(fCount == this->getResourceCount());
+ SkASSERT(fBudgetedCount <= fCount);
+ SkASSERT(fBudgetedBytes <= fBytes);
+ SkASSERT(stats.fBytes == fBytes);
+ SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
+ SkASSERT(stats.fBudgetedCount == fBudgetedCount);
+#if GR_CACHE_STATS
+ SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
+ SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
+ SkASSERT(fBytes <= fHighWaterBytes);
+ SkASSERT(fCount <= fHighWaterCount);
+ SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
+ SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
+#endif
+ SkASSERT(stats.fContent == fUniqueHash.count());
+ SkASSERT(stats.fScratch + stats.fCouldBeScratch == fScratchMap.count());
+
+ // This assertion is not currently valid because we can be in recursive notifyCntReachedZero()
+ // calls. This will be fixed when subresource registration is explicit.
+ // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount;
+ // SkASSERT(!overBudget || locked == count || fPurging);
+}
+
+bool GrResourceCache::isInCache(const GrGpuResource* resource) const {
+ int index = *resource->cacheAccess().accessCacheIndex();
+ if (index < 0) {
+ return false;
+ }
+ if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
+ return true;
+ }
+ if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
+ return true;
+ }
+ SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
+ return false;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrResourceCache.h b/gfx/skia/skia/src/gpu/GrResourceCache.h
new file mode 100644
index 000000000..ae9a4e7ee
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrResourceCache.h
@@ -0,0 +1,412 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrResourceCache_DEFINED
+#define GrResourceCache_DEFINED
+
+#include "GrGpuResource.h"
+#include "GrGpuResourceCacheAccess.h"
+#include "GrGpuResourcePriv.h"
+#include "GrResourceCache.h"
+#include "GrResourceKey.h"
+#include "SkMessageBus.h"
+#include "SkRefCnt.h"
+#include "SkTArray.h"
+#include "SkTDPQueue.h"
+#include "SkTInternalLList.h"
+#include "SkTMultiMap.h"
+
+class GrCaps;
+class SkString;
+class SkTraceMemoryDump;
+
+/**
+ * Manages the lifetime of all GrGpuResource instances.
+ *
+ * Resources may have optionally have two types of keys:
+ * 1) A scratch key. This is for resources whose allocations are cached but not their contents.
+ * Multiple resources can share the same scratch key. This is so a caller can have two
+ * resource instances with the same properties (e.g. multipass rendering that ping-pongs
+ * between two temporary surfaces). The scratch key is set at resource creation time and
+ * should never change. Resources need not have a scratch key.
+ * 2) A unique key. This key's meaning is specific to the domain that created the key. Only one
+ * resource may have a given unique key. The unique key can be set, cleared, or changed
+ * anytime after resource creation.
+ *
+ * A unique key always takes precedence over a scratch key when a resource has both types of keys.
+ * If a resource has neither key type then it will be deleted as soon as the last reference to it
+ * is dropped.
+ */
+class GrResourceCache {
+public:
+ GrResourceCache(const GrCaps* caps);
+ ~GrResourceCache();
+
+ // Default maximum number of budgeted resources in the cache.
+ static const int kDefaultMaxCount = 2 * (1 << 12);
+ // Default maximum number of bytes of gpu memory of budgeted resources in the cache.
+ static const size_t kDefaultMaxSize = 96 * (1 << 20);
+ // Default number of external flushes a budgeted resources can go unused in the cache before it
+ // is purged. Using a value <= 0 disables this feature.
+ static const int kDefaultMaxUnusedFlushes =
+ 1 * /* flushes per frame */
+ 60 * /* fps */
+ 30; /* seconds */
+
+ /** Used to access functionality needed by GrGpuResource for lifetime management. */
+ class ResourceAccess;
+ ResourceAccess resourceAccess();
+
+ /**
+ * Sets the cache limits in terms of number of resources, max gpu memory byte size, and number
+ * of external GrContext flushes that a resource can be unused before it is evicted. The latter
+ * value is a suggestion and there is no promise that a resource will be purged immediately
+ * after it hasn't been used in maxUnusedFlushes flushes.
+ */
+ void setLimits(int count, size_t bytes, int maxUnusedFlushes = kDefaultMaxUnusedFlushes);
+
+ /**
+ * Returns the number of resources.
+ */
+ int getResourceCount() const {
+ return fPurgeableQueue.count() + fNonpurgeableResources.count();
+ }
+
+ /**
+ * Returns the number of resources that count against the budget.
+ */
+ int getBudgetedResourceCount() const { return fBudgetedCount; }
+
+ /**
+ * Returns the number of bytes consumed by resources.
+ */
+ size_t getResourceBytes() const { return fBytes; }
+
+ /**
+ * Returns the number of bytes consumed by budgeted resources.
+ */
+ size_t getBudgetedResourceBytes() const { return fBudgetedBytes; }
+
+ /**
+ * Returns the cached resources count budget.
+ */
+ int getMaxResourceCount() const { return fMaxCount; }
+
+ /**
+ * Returns the number of bytes consumed by cached resources.
+ */
+ size_t getMaxResourceBytes() const { return fMaxBytes; }
+
+ /**
+ * Abandons the backend API resources owned by all GrGpuResource objects and removes them from
+ * the cache.
+ */
+ void abandonAll();
+
+ /**
+ * Releases the backend API resources owned by all GrGpuResource objects and removes them from
+ * the cache.
+ */
+ void releaseAll();
+
+ enum {
+ /** Preferentially returns scratch resources with no pending IO. */
+ kPreferNoPendingIO_ScratchFlag = 0x1,
+ /** Will not return any resources that match but have pending IO. */
+ kRequireNoPendingIO_ScratchFlag = 0x2,
+ };
+
+ /**
+ * Find a resource that matches a scratch key.
+ */
+ GrGpuResource* findAndRefScratchResource(const GrScratchKey& scratchKey,
+ size_t resourceSize,
+ uint32_t flags);
+
+#ifdef SK_DEBUG
+ // This is not particularly fast and only used for validation, so debug only.
+ int countScratchEntriesForKey(const GrScratchKey& scratchKey) const {
+ return fScratchMap.countForKey(scratchKey);
+ }
+#endif
+
+ /**
+ * Find a resource that matches a unique key.
+ */
+ GrGpuResource* findAndRefUniqueResource(const GrUniqueKey& key) {
+ GrGpuResource* resource = fUniqueHash.find(key);
+ if (resource) {
+ this->refAndMakeResourceMRU(resource);
+ }
+ return resource;
+ }
+
+ /**
+ * Query whether a unique key exists in the cache.
+ */
+ bool hasUniqueKey(const GrUniqueKey& key) const {
+ return SkToBool(fUniqueHash.find(key));
+ }
+
+ /** Purges resources to become under budget and processes resources with invalidated unique
+ keys. */
+ void purgeAsNeeded();
+
+ /** Purges all resources that don't have external owners. */
+ void purgeAllUnlocked();
+
+ /** Returns true if the cache would like a flush to occur in order to make more resources
+ purgeable. */
+ bool requestsFlush() const { return fRequestFlush; }
+
+ enum FlushType {
+ kExternal,
+ kImmediateMode,
+ kCacheRequested,
+ };
+ void notifyFlushOccurred(FlushType);
+
+#if GR_CACHE_STATS
+ struct Stats {
+ int fTotal;
+ int fNumPurgeable;
+ int fNumNonPurgeable;
+
+ int fScratch;
+ int fWrapped;
+ size_t fUnbudgetedSize;
+
+ Stats() { this->reset(); }
+
+ void reset() {
+ fTotal = 0;
+ fNumPurgeable = 0;
+ fNumNonPurgeable = 0;
+ fScratch = 0;
+ fWrapped = 0;
+ fUnbudgetedSize = 0;
+ }
+
+ void update(GrGpuResource* resource) {
+ if (resource->cacheAccess().isScratch()) {
+ ++fScratch;
+ }
+ if (resource->resourcePriv().refsWrappedObjects()) {
+ ++fWrapped;
+ }
+ if (SkBudgeted::kNo == resource->resourcePriv().isBudgeted()) {
+ fUnbudgetedSize += resource->gpuMemorySize();
+ }
+ }
+ };
+
+ void getStats(Stats*) const;
+
+ void dumpStats(SkString*) const;
+
+ void dumpStatsKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* value) const;
+#endif
+
+ // This function is for unit testing and is only defined in test tools.
+ void changeTimestamp(uint32_t newTimestamp);
+
+ // Enumerates all cached resources and dumps their details to traceMemoryDump.
+ void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
+
+private:
+ ///////////////////////////////////////////////////////////////////////////
+ /// @name Methods accessible via ResourceAccess
+ ////
+ void insertResource(GrGpuResource*);
+ void removeResource(GrGpuResource*);
+ void notifyCntReachedZero(GrGpuResource*, uint32_t flags);
+ void didChangeGpuMemorySize(const GrGpuResource*, size_t oldSize);
+ void changeUniqueKey(GrGpuResource*, const GrUniqueKey&);
+ void removeUniqueKey(GrGpuResource*);
+ void willRemoveScratchKey(const GrGpuResource*);
+ void didChangeBudgetStatus(GrGpuResource*);
+ void refAndMakeResourceMRU(GrGpuResource*);
+ /// @}
+
+ void processInvalidUniqueKeys(const SkTArray<GrUniqueKeyInvalidatedMessage>&);
+ void addToNonpurgeableArray(GrGpuResource*);
+ void removeFromNonpurgeableArray(GrGpuResource*);
+ bool overBudget() const { return fBudgetedBytes > fMaxBytes || fBudgetedCount > fMaxCount; }
+
+ bool wouldFit(size_t bytes) {
+ return fBudgetedBytes+bytes <= fMaxBytes && fBudgetedCount+1 <= fMaxCount;
+ }
+
+ uint32_t getNextTimestamp();
+
+#ifdef SK_DEBUG
+ bool isInCache(const GrGpuResource* r) const;
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+ class AutoValidate;
+
+ class AvailableForScratchUse;
+
+ struct ScratchMapTraits {
+ static const GrScratchKey& GetKey(const GrGpuResource& r) {
+ return r.resourcePriv().getScratchKey();
+ }
+
+ static uint32_t Hash(const GrScratchKey& key) { return key.hash(); }
+ };
+ typedef SkTMultiMap<GrGpuResource, GrScratchKey, ScratchMapTraits> ScratchMap;
+
+ struct UniqueHashTraits {
+ static const GrUniqueKey& GetKey(const GrGpuResource& r) { return r.getUniqueKey(); }
+
+ static uint32_t Hash(const GrUniqueKey& key) { return key.hash(); }
+ };
+ typedef SkTDynamicHash<GrGpuResource, GrUniqueKey, UniqueHashTraits> UniqueHash;
+
+ static bool CompareTimestamp(GrGpuResource* const& a, GrGpuResource* const& b) {
+ return a->cacheAccess().timestamp() < b->cacheAccess().timestamp();
+ }
+
+ static int* AccessResourceIndex(GrGpuResource* const& res) {
+ return res->cacheAccess().accessCacheIndex();
+ }
+
+ typedef SkMessageBus<GrUniqueKeyInvalidatedMessage>::Inbox InvalidUniqueKeyInbox;
+ typedef SkTDPQueue<GrGpuResource*, CompareTimestamp, AccessResourceIndex> PurgeableQueue;
+ typedef SkTDArray<GrGpuResource*> ResourceArray;
+
+ // Whenever a resource is added to the cache or the result of a cache lookup, fTimestamp is
+ // assigned as the resource's timestamp and then incremented. fPurgeableQueue orders the
+ // purgeable resources by this value, and thus is used to purge resources in LRU order.
+ uint32_t fTimestamp;
+ PurgeableQueue fPurgeableQueue;
+ ResourceArray fNonpurgeableResources;
+
+ // This map holds all resources that can be used as scratch resources.
+ ScratchMap fScratchMap;
+ // This holds all resources that have unique keys.
+ UniqueHash fUniqueHash;
+
+ // our budget, used in purgeAsNeeded()
+ int fMaxCount;
+ size_t fMaxBytes;
+ int fMaxUnusedFlushes;
+
+#if GR_CACHE_STATS
+ int fHighWaterCount;
+ size_t fHighWaterBytes;
+ int fBudgetedHighWaterCount;
+ size_t fBudgetedHighWaterBytes;
+#endif
+
+ // our current stats for all resources
+ SkDEBUGCODE(int fCount;)
+ size_t fBytes;
+
+ // our current stats for resources that count against the budget
+ int fBudgetedCount;
+ size_t fBudgetedBytes;
+
+ bool fRequestFlush;
+ uint32_t fExternalFlushCnt;
+
+ InvalidUniqueKeyInbox fInvalidUniqueKeyInbox;
+
+ // This resource is allowed to be in the nonpurgeable array for the sake of validate() because
+ // we're in the midst of converting it to purgeable status.
+ SkDEBUGCODE(GrGpuResource* fNewlyPurgeableResourceForValidation;)
+
+ bool fPreferVRAMUseOverFlushes;
+};
+
+class GrResourceCache::ResourceAccess {
+private:
+ ResourceAccess(GrResourceCache* cache) : fCache(cache) { }
+ ResourceAccess(const ResourceAccess& that) : fCache(that.fCache) { }
+ ResourceAccess& operator=(const ResourceAccess&); // unimpl
+
+ /**
+ * Insert a resource into the cache.
+ */
+ void insertResource(GrGpuResource* resource) { fCache->insertResource(resource); }
+
+ /**
+ * Removes a resource from the cache.
+ */
+ void removeResource(GrGpuResource* resource) { fCache->removeResource(resource); }
+
+ /**
+ * Notifications that should be sent to the cache when the ref/io cnt status of resources
+ * changes.
+ */
+ enum RefNotificationFlags {
+ /** All types of refs on the resource have reached zero. */
+ kAllCntsReachedZero_RefNotificationFlag = 0x1,
+ /** The normal (not pending IO type) ref cnt has reached zero. */
+ kRefCntReachedZero_RefNotificationFlag = 0x2,
+ };
+ /**
+ * Called by GrGpuResources when they detect that their ref/io cnts have reached zero. When the
+ * normal ref cnt reaches zero the flags that are set should be:
+ * a) kRefCntReachedZero if a pending IO cnt is still non-zero.
+ * b) (kRefCntReachedZero | kAllCntsReachedZero) when all pending IO cnts are also zero.
+ * kAllCntsReachedZero is set by itself if a pending IO cnt is decremented to zero and all the
+ * the other cnts are already zero.
+ */
+ void notifyCntReachedZero(GrGpuResource* resource, uint32_t flags) {
+ fCache->notifyCntReachedZero(resource, flags);
+ }
+
+ /**
+ * Called by GrGpuResources when their sizes change.
+ */
+ void didChangeGpuMemorySize(const GrGpuResource* resource, size_t oldSize) {
+ fCache->didChangeGpuMemorySize(resource, oldSize);
+ }
+
+ /**
+ * Called by GrGpuResources to change their unique keys.
+ */
+ void changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) {
+ fCache->changeUniqueKey(resource, newKey);
+ }
+
+ /**
+ * Called by a GrGpuResource to remove its unique key.
+ */
+ void removeUniqueKey(GrGpuResource* resource) { fCache->removeUniqueKey(resource); }
+
+ /**
+ * Called by a GrGpuResource when it removes its scratch key.
+ */
+ void willRemoveScratchKey(const GrGpuResource* resource) {
+ fCache->willRemoveScratchKey(resource);
+ }
+
+ /**
+ * Called by GrGpuResources when they change from budgeted to unbudgeted or vice versa.
+ */
+ void didChangeBudgetStatus(GrGpuResource* resource) { fCache->didChangeBudgetStatus(resource); }
+
+ // No taking addresses of this type.
+ const ResourceAccess* operator&() const;
+ ResourceAccess* operator&();
+
+ GrResourceCache* fCache;
+
+ friend class GrGpuResource; // To access all the proxy inline methods.
+ friend class GrResourceCache; // To create this type.
+};
+
+inline GrResourceCache::ResourceAccess GrResourceCache::resourceAccess() {
+ return ResourceAccess(this);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrResourceHandle.h b/gfx/skia/skia/src/gpu/GrResourceHandle.h
new file mode 100644
index 000000000..383bbea6c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrResourceHandle.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+*/
+
+#ifndef GrResourceHandle_DEFINED
+#define GrResourceHandle_DEFINED
+
+#include "SkTypes.h"
+
+// Opaque handle to a resource. Users should always use the macro below to create a specific
+// template instantiation of GrResourceHandle.
+template <typename kind> class GrResourceHandle {
+public:
+ GrResourceHandle(int value) : fValue(value) {
+ SkASSERT(this->isValid());
+ }
+
+ GrResourceHandle() : fValue(kInvalid_ResourceHandle) {}
+
+ bool operator==(const GrResourceHandle& other) const { return other.fValue == fValue; }
+ bool isValid() const { return kInvalid_ResourceHandle != fValue; }
+ int toIndex() const { SkASSERT(this->isValid()); return fValue; }
+
+private:
+ static const int kInvalid_ResourceHandle = -1;
+ int fValue;
+};
+
+// Creates a type "name", which is a specfic template instantiation of GrResourceHandle.
+#define GR_DEFINE_RESOURCE_HANDLE_CLASS(name) \
+ struct name##Kind {}; \
+ using name = GrResourceHandle<name##Kind>;
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrResourceProvider.cpp b/gfx/skia/skia/src/gpu/GrResourceProvider.cpp
new file mode 100644
index 000000000..7518378ad
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrResourceProvider.cpp
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrResourceProvider.h"
+
+#include "GrBuffer.h"
+#include "GrCaps.h"
+#include "GrGpu.h"
+#include "GrPathRendering.h"
+#include "GrRenderTarget.h"
+#include "GrRenderTargetPriv.h"
+#include "GrResourceCache.h"
+#include "GrResourceKey.h"
+#include "GrStencilAttachment.h"
+#include "SkMathPriv.h"
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gQuadIndexBufferKey);
+
+GrResourceProvider::GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* owner)
+ : INHERITED(gpu, cache, owner) {
+ GR_DEFINE_STATIC_UNIQUE_KEY(gQuadIndexBufferKey);
+ fQuadIndexBufferKey = gQuadIndexBufferKey;
+}
+
+const GrBuffer* GrResourceProvider::createInstancedIndexBuffer(const uint16_t* pattern,
+ int patternSize,
+ int reps,
+ int vertCount,
+ const GrUniqueKey& key) {
+ size_t bufferSize = patternSize * reps * sizeof(uint16_t);
+
+ // This is typically used in GrBatchs, so we assume kNoPendingIO.
+ GrBuffer* buffer = this->createBuffer(bufferSize, kIndex_GrBufferType, kStatic_GrAccessPattern,
+ kNoPendingIO_Flag);
+ if (!buffer) {
+ return nullptr;
+ }
+ uint16_t* data = (uint16_t*) buffer->map();
+ bool useTempData = (nullptr == data);
+ if (useTempData) {
+ data = new uint16_t[reps * patternSize];
+ }
+ for (int i = 0; i < reps; ++i) {
+ int baseIdx = i * patternSize;
+ uint16_t baseVert = (uint16_t)(i * vertCount);
+ for (int j = 0; j < patternSize; ++j) {
+ data[baseIdx+j] = baseVert + pattern[j];
+ }
+ }
+ if (useTempData) {
+ if (!buffer->updateData(data, bufferSize)) {
+ buffer->unref();
+ return nullptr;
+ }
+ delete[] data;
+ } else {
+ buffer->unmap();
+ }
+ this->assignUniqueKeyToResource(key, buffer);
+ return buffer;
+}
+
+const GrBuffer* GrResourceProvider::createQuadIndexBuffer() {
+ static const int kMaxQuads = 1 << 12; // max possible: (1 << 14) - 1;
+ GR_STATIC_ASSERT(4 * kMaxQuads <= 65535);
+ static const uint16_t kPattern[] = { 0, 1, 2, 0, 2, 3 };
+
+ return this->createInstancedIndexBuffer(kPattern, 6, kMaxQuads, 4, fQuadIndexBufferKey);
+}
+
+GrPath* GrResourceProvider::createPath(const SkPath& path, const GrStyle& style) {
+ SkASSERT(this->gpu()->pathRendering());
+ return this->gpu()->pathRendering()->createPath(path, style);
+}
+
+GrPathRange* GrResourceProvider::createPathRange(GrPathRange::PathGenerator* gen,
+ const GrStyle& style) {
+ SkASSERT(this->gpu()->pathRendering());
+ return this->gpu()->pathRendering()->createPathRange(gen, style);
+}
+
+GrPathRange* GrResourceProvider::createGlyphs(const SkTypeface* tf,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc,
+ const GrStyle& style) {
+
+ SkASSERT(this->gpu()->pathRendering());
+ return this->gpu()->pathRendering()->createGlyphs(tf, effects, desc, style);
+}
+
+GrBuffer* GrResourceProvider::createBuffer(size_t size, GrBufferType intendedType,
+ GrAccessPattern accessPattern, uint32_t flags,
+ const void* data) {
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+ if (kDynamic_GrAccessPattern != accessPattern) {
+ return this->gpu()->createBuffer(size, intendedType, accessPattern, data);
+ }
+ if (!(flags & kRequireGpuMemory_Flag) &&
+ this->gpu()->caps()->preferClientSideDynamicBuffers() &&
+ GrBufferTypeIsVertexOrIndex(intendedType) &&
+ kDynamic_GrAccessPattern == accessPattern) {
+ return GrBuffer::CreateCPUBacked(this->gpu(), size, intendedType, data);
+ }
+
+ // bin by pow2 with a reasonable min
+ static const size_t MIN_SIZE = 1 << 12;
+ size_t allocSize = size > (1u << 31)
+ ? size_t(SkTMin(uint64_t(SIZE_MAX), uint64_t(GrNextPow2(uint32_t(uint64_t(size) >> 32))) << 32))
+ : size_t(GrNextPow2(uint32_t(size)));
+ allocSize = SkTMax(allocSize, MIN_SIZE);
+
+ GrScratchKey key;
+ GrBuffer::ComputeScratchKeyForDynamicVBO(allocSize, intendedType, &key);
+ uint32_t scratchFlags = 0;
+ if (flags & kNoPendingIO_Flag) {
+ scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
+ } else {
+ scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
+ }
+ GrBuffer* buffer = static_cast<GrBuffer*>(
+ this->cache()->findAndRefScratchResource(key, allocSize, scratchFlags));
+ if (!buffer) {
+ buffer = this->gpu()->createBuffer(allocSize, intendedType, kDynamic_GrAccessPattern);
+ if (!buffer) {
+ return nullptr;
+ }
+ }
+ if (data) {
+ buffer->updateData(data, size);
+ }
+ SkASSERT(!buffer->isCPUBacked()); // We should only cache real VBOs.
+ return buffer;
+}
+
+GrBatchAtlas* GrResourceProvider::createAtlas(GrPixelConfig config,
+ int width, int height,
+ int numPlotsX, int numPlotsY,
+ GrBatchAtlas::EvictionFunc func, void* data) {
+ GrSurfaceDesc desc;
+ desc.fFlags = kNone_GrSurfaceFlags;
+ desc.fWidth = width;
+ desc.fHeight = height;
+ desc.fConfig = config;
+
+ // We don't want to flush the context so we claim we're in the middle of flushing so as to
+ // guarantee we do not recieve a texture with pending IO
+ // TODO: Determine how to avoid having to do this. (https://bug.skia.org/4156)
+ static const uint32_t kFlags = GrResourceProvider::kNoPendingIO_Flag;
+ GrTexture* texture = this->createApproxTexture(desc, kFlags);
+ if (!texture) {
+ return nullptr;
+ }
+ GrBatchAtlas* atlas = new GrBatchAtlas(texture, numPlotsX, numPlotsY);
+ atlas->registerEvictionCallback(func, data);
+ return atlas;
+}
+
+GrStencilAttachment* GrResourceProvider::attachStencilAttachment(GrRenderTarget* rt) {
+ SkASSERT(rt);
+ if (rt->renderTargetPriv().getStencilAttachment()) {
+ return rt->renderTargetPriv().getStencilAttachment();
+ }
+
+ if (!rt->wasDestroyed() && rt->canAttemptStencilAttachment()) {
+ GrUniqueKey sbKey;
+
+ int width = rt->width();
+ int height = rt->height();
+#if 0
+ if (this->caps()->oversizedStencilSupport()) {
+ width = SkNextPow2(width);
+ height = SkNextPow2(height);
+ }
+#endif
+ bool newStencil = false;
+ GrStencilAttachment::ComputeSharedStencilAttachmentKey(width, height,
+ rt->numStencilSamples(), &sbKey);
+ GrStencilAttachment* stencil = static_cast<GrStencilAttachment*>(
+ this->findAndRefResourceByUniqueKey(sbKey));
+ if (!stencil) {
+ // Need to try and create a new stencil
+ stencil = this->gpu()->createStencilAttachmentForRenderTarget(rt, width, height);
+ if (stencil) {
+ stencil->resourcePriv().setUniqueKey(sbKey);
+ newStencil = true;
+ }
+ }
+ if (rt->renderTargetPriv().attachStencilAttachment(stencil)) {
+ if (newStencil) {
+ // Right now we're clearing the stencil attachment here after it is
+ // attached to a RT for the first time. When we start matching
+ // stencil buffers with smaller color targets this will no longer
+ // be correct because it won't be guaranteed to clear the entire
+ // sb.
+ // We used to clear down in the GL subclass using a special purpose
+ // FBO. But iOS doesn't allow a stencil-only FBO. It reports unsupported
+ // FBO status.
+ this->gpu()->clearStencil(rt);
+ }
+ }
+ }
+ return rt->renderTargetPriv().getStencilAttachment();
+}
+
+GrRenderTarget* GrResourceProvider::wrapBackendTextureAsRenderTarget(
+ const GrBackendTextureDesc& desc) {
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+ return this->gpu()->wrapBackendTextureAsRenderTarget(desc);
+}
diff --git a/gfx/skia/skia/src/gpu/GrResourceProvider.h b/gfx/skia/skia/src/gpu/GrResourceProvider.h
new file mode 100644
index 000000000..c0922c0a0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrResourceProvider.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrResourceProvider_DEFINED
+#define GrResourceProvider_DEFINED
+
+#include "GrBatchAtlas.h"
+#include "GrBuffer.h"
+#include "GrTextureProvider.h"
+#include "GrPathRange.h"
+
+class GrBatchAtlas;
+class GrPath;
+class GrRenderTarget;
+class GrSingleOwner;
+class GrStencilAttachment;
+class GrStyle;
+class SkDescriptor;
+class SkPath;
+class SkTypeface;
+
+/**
+ * An extension of the texture provider for arbitrary resource types. This class is intended for
+ * use within the Gr code base, not by clients or extensions (e.g. third party GrProcessor
+ * derivatives).
+ *
+ * This currently inherits from GrTextureProvider non-publically to force callers to provider
+ * make a flags (pendingIO) decision and not use the GrTP methods that don't take flags. This
+ * can be relaxed once https://bug.skia.org/4156 is fixed.
+ */
+class GrResourceProvider : protected GrTextureProvider {
+public:
+ GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* owner);
+
+ template <typename T> T* findAndRefTByUniqueKey(const GrUniqueKey& key) {
+ return static_cast<T*>(this->findAndRefResourceByUniqueKey(key));
+ }
+
+ /**
+ * Either finds and refs, or creates an index buffer for instanced drawing with a specific
+ * pattern if the index buffer is not found. If the return is non-null, the caller owns
+ * a ref on the returned GrBuffer.
+ *
+ * @param pattern the pattern of indices to repeat
+ * @param patternSize size in bytes of the pattern
+ * @param reps number of times to repeat the pattern
+ * @param vertCount number of vertices the pattern references
+ * @param key Key to be assigned to the index buffer.
+ *
+ * @return The index buffer if successful, otherwise nullptr.
+ */
+ const GrBuffer* findOrCreateInstancedIndexBuffer(const uint16_t* pattern,
+ int patternSize,
+ int reps,
+ int vertCount,
+ const GrUniqueKey& key) {
+ if (GrBuffer* buffer = this->findAndRefTByUniqueKey<GrBuffer>(key)) {
+ return buffer;
+ }
+ return this->createInstancedIndexBuffer(pattern, patternSize, reps, vertCount, key);
+ }
+
+ /**
+ * Returns an index buffer that can be used to render quads.
+ * Six indices per quad: 0, 1, 2, 0, 2, 3, etc.
+ * The max number of quads is the buffer's index capacity divided by 6.
+ * Draw with kTriangles_GrPrimitiveType
+ * @ return the quad index buffer
+ */
+ const GrBuffer* refQuadIndexBuffer() {
+ if (GrBuffer* buffer =
+ this->findAndRefTByUniqueKey<GrBuffer>(fQuadIndexBufferKey)) {
+ return buffer;
+ }
+ return this->createQuadIndexBuffer();
+ }
+
+ /**
+ * Factories for GrPath and GrPathRange objects. It's an error to call these if path rendering
+ * is not supported.
+ */
+ GrPath* createPath(const SkPath&, const GrStyle&);
+ GrPathRange* createPathRange(GrPathRange::PathGenerator*, const GrStyle&);
+ GrPathRange* createGlyphs(const SkTypeface*, const SkScalerContextEffects&,
+ const SkDescriptor*, const GrStyle&);
+
+ using GrTextureProvider::assignUniqueKeyToResource;
+ using GrTextureProvider::findAndRefResourceByUniqueKey;
+ using GrTextureProvider::findAndRefTextureByUniqueKey;
+ using GrTextureProvider::abandon;
+
+ enum Flags {
+ /** If the caller intends to do direct reads/writes to/from the CPU then this flag must be
+ * set when accessing resources during a GrDrawTarget flush. This includes the execution of
+ * GrBatch objects. The reason is that these memory operations are done immediately and
+ * will occur out of order WRT the operations being flushed.
+ * Make this automatic: https://bug.skia.org/4156
+ */
+ kNoPendingIO_Flag = 0x1,
+
+ /** Normally the caps may indicate a preference for client-side buffers. Set this flag when
+ * creating a buffer to guarantee it resides in GPU memory.
+ */
+ kRequireGpuMemory_Flag = 0x2,
+ };
+
+ /**
+ * Returns a buffer.
+ *
+ * @param size minimum size of buffer to return.
+ * @param intendedType hint to the graphics subsystem about what the buffer will be used for.
+ * @param GrAccessPattern hint to the graphics subsystem about how the data will be accessed.
+ * @param flags see Flags enum.
+ * @param data optional data with which to initialize the buffer.
+ *
+ * @return the buffer if successful, otherwise nullptr.
+ */
+ GrBuffer* createBuffer(size_t size, GrBufferType intendedType, GrAccessPattern, uint32_t flags,
+ const void* data = nullptr);
+
+ GrTexture* createApproxTexture(const GrSurfaceDesc& desc, uint32_t flags) {
+ SkASSERT(0 == flags || kNoPendingIO_Flag == flags);
+ return this->internalCreateApproxTexture(desc, flags);
+ }
+
+ /** Returns a GrBatchAtlas. This function can be called anywhere, but the returned atlas should
+ * only be used inside of GrBatch::generateGeometry
+ * @param GrPixelConfig The pixel config which this atlas will store
+ * @param width width in pixels of the atlas
+ * @param height height in pixels of the atlas
+ * @param numPlotsX The number of plots the atlas should be broken up into in the X
+ * direction
+ * @param numPlotsY The number of plots the atlas should be broken up into in the Y
+ * direction
+ * @param func An eviction function which will be called whenever the atlas has to
+ * evict data
+ * @param data User supplied data which will be passed into func whenver an
+ * eviction occurs
+ *
+ * @return An initialized GrBatchAtlas, or nullptr if creation fails
+ */
+ GrBatchAtlas* createAtlas(GrPixelConfig, int width, int height, int numPlotsX, int numPlotsY,
+ GrBatchAtlas::EvictionFunc func, void* data);
+
+ /**
+ * If passed in render target already has a stencil buffer, return it. Otherwise attempt to
+ * attach one.
+ */
+ GrStencilAttachment* attachStencilAttachment(GrRenderTarget* rt);
+
+ const GrCaps* caps() { return this->gpu()->caps(); }
+
+ /**
+ * Wraps an existing texture with a GrRenderTarget object. This is useful when the provided
+ * texture has a format that cannot be textured from by Skia, but we want to raster to it.
+ *
+ * The texture is wrapped as borrowed. The texture object will not be freed once the
+ * render target is destroyed.
+ *
+ * @return GrRenderTarget object or NULL on failure.
+ */
+ GrRenderTarget* wrapBackendTextureAsRenderTarget(const GrBackendTextureDesc& desc);
+
+private:
+ const GrBuffer* createInstancedIndexBuffer(const uint16_t* pattern,
+ int patternSize,
+ int reps,
+ int vertCount,
+ const GrUniqueKey& key);
+
+ const GrBuffer* createQuadIndexBuffer();
+
+ GrUniqueKey fQuadIndexBufferKey;
+
+ typedef GrTextureProvider INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrSWMaskHelper.cpp b/gfx/skia/skia/src/gpu/GrSWMaskHelper.cpp
new file mode 100644
index 000000000..7bc26af14
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSWMaskHelper.cpp
@@ -0,0 +1,201 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrSWMaskHelper.h"
+
+#include "GrCaps.h"
+#include "GrContext.h"
+#include "batches/GrDrawBatch.h"
+#include "GrDrawContext.h"
+#include "GrPipelineBuilder.h"
+#include "GrShape.h"
+
+#include "SkDistanceFieldGen.h"
+
+#include "batches/GrRectBatchFactory.h"
+
+/*
+ * Convert a boolean operation into a transfer mode code
+ */
+static SkBlendMode op_to_mode(SkRegion::Op op) {
+
+ static const SkBlendMode modeMap[] = {
+ SkBlendMode::kDstOut, // kDifference_Op
+ SkBlendMode::kModulate, // kIntersect_Op
+ SkBlendMode::kSrcOver, // kUnion_Op
+ SkBlendMode::kXor, // kXOR_Op
+ SkBlendMode::kClear, // kReverseDifference_Op
+ SkBlendMode::kSrc, // kReplace_Op
+ };
+
+ return modeMap[op];
+}
+
+/**
+ * Draw a single rect element of the clip stack into the accumulation bitmap
+ */
+void GrSWMaskHelper::drawRect(const SkRect& rect, SkRegion::Op op,
+ bool antiAlias, uint8_t alpha) {
+ SkPaint paint;
+
+ paint.setBlendMode(op_to_mode(op));
+ paint.setAntiAlias(antiAlias);
+ paint.setColor(SkColorSetARGB(alpha, alpha, alpha, alpha));
+
+ fDraw.drawRect(rect, paint);
+}
+
+/**
+ * Draw a single path element of the clip stack into the accumulation bitmap
+ */
+void GrSWMaskHelper::drawShape(const GrShape& shape, SkRegion::Op op, bool antiAlias,
+ uint8_t alpha) {
+ SkPaint paint;
+ paint.setPathEffect(sk_ref_sp(shape.style().pathEffect()));
+ shape.style().strokeRec().applyToPaint(&paint);
+ paint.setAntiAlias(antiAlias);
+
+ SkPath path;
+ shape.asPath(&path);
+ if (SkRegion::kReplace_Op == op && 0xFF == alpha) {
+ SkASSERT(0xFF == paint.getAlpha());
+ fDraw.drawPathCoverage(path, paint);
+ } else {
+ paint.setBlendMode(op_to_mode(op));
+ paint.setColor(SkColorSetARGB(alpha, alpha, alpha, alpha));
+ fDraw.drawPath(path, paint);
+ }
+}
+
+bool GrSWMaskHelper::init(const SkIRect& resultBounds, const SkMatrix* matrix) {
+ if (matrix) {
+ fMatrix = *matrix;
+ } else {
+ fMatrix.setIdentity();
+ }
+
+ // Now translate so the bound's UL corner is at the origin
+ fMatrix.postTranslate(-SkIntToScalar(resultBounds.fLeft), -SkIntToScalar(resultBounds.fTop));
+ SkIRect bounds = SkIRect::MakeWH(resultBounds.width(), resultBounds.height());
+
+ const SkImageInfo bmImageInfo = SkImageInfo::MakeA8(bounds.width(), bounds.height());
+ if (!fPixels.tryAlloc(bmImageInfo)) {
+ return false;
+ }
+ fPixels.erase(0);
+
+ sk_bzero(&fDraw, sizeof(fDraw));
+ fDraw.fDst = fPixels;
+ fRasterClip.setRect(bounds);
+ fDraw.fRC = &fRasterClip;
+ fDraw.fMatrix = &fMatrix;
+ return true;
+}
+
+/**
+ * Get a texture (from the texture cache) of the correct size & format.
+ */
+GrTexture* GrSWMaskHelper::createTexture(TextureType textureType) {
+ GrSurfaceDesc desc;
+ desc.fWidth = fPixels.width();
+ desc.fHeight = fPixels.height();
+ desc.fConfig = kAlpha_8_GrPixelConfig;
+
+ if (TextureType::kApproximateFit == textureType) {
+ return fTexProvider->createApproxTexture(desc);
+ } else {
+ return fTexProvider->createTexture(desc, SkBudgeted::kYes);
+ }
+}
+
+/**
+ * Move the result of the software mask generation back to the gpu
+ */
+void GrSWMaskHelper::toTexture(GrTexture *texture) {
+ // Since we're uploading to it, and it's compressed, 'texture' shouldn't
+ // have a render target.
+ SkASSERT(!texture->asRenderTarget());
+
+ texture->writePixels(0, 0, fPixels.width(), fPixels.height(), texture->config(),
+ fPixels.addr(), fPixels.rowBytes());
+
+}
+
+/**
+ * Convert mask generation results to a signed distance field
+ */
+void GrSWMaskHelper::toSDF(unsigned char* sdf) {
+ SkGenerateDistanceFieldFromA8Image(sdf, (const unsigned char*)fPixels.addr(),
+ fPixels.width(), fPixels.height(), fPixels.rowBytes());
+}
+
+////////////////////////////////////////////////////////////////////////////////
+/**
+ * Software rasterizes shape to A8 mask and uploads the result to a scratch texture. Returns the
+ * resulting texture on success; nullptr on failure.
+ */
+GrTexture* GrSWMaskHelper::DrawShapeMaskToTexture(GrTextureProvider* texProvider,
+ const GrShape& shape,
+ const SkIRect& resultBounds,
+ bool antiAlias,
+ TextureType textureType,
+ const SkMatrix* matrix) {
+ GrSWMaskHelper helper(texProvider);
+
+ if (!helper.init(resultBounds, matrix)) {
+ return nullptr;
+ }
+
+ helper.drawShape(shape, SkRegion::kReplace_Op, antiAlias, 0xFF);
+
+ GrTexture* texture(helper.createTexture(textureType));
+ if (!texture) {
+ return nullptr;
+ }
+
+ helper.toTexture(texture);
+
+ return texture;
+}
+
+void GrSWMaskHelper::DrawToTargetWithShapeMask(GrTexture* texture,
+ GrDrawContext* drawContext,
+ const GrPaint& paint,
+ const GrUserStencilSettings& userStencilSettings,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkIPoint& textureOriginInDeviceSpace,
+ const SkIRect& deviceSpaceRectToDraw) {
+ SkMatrix invert;
+ if (!viewMatrix.invert(&invert)) {
+ return;
+ }
+
+ SkRect dstRect = SkRect::Make(deviceSpaceRectToDraw);
+
+ // We use device coords to compute the texture coordinates. We take the device coords and apply
+ // a translation so that the top-left of the device bounds maps to 0,0, and then a scaling
+ // matrix to normalized coords.
+ SkMatrix maskMatrix;
+ maskMatrix.setIDiv(texture->width(), texture->height());
+ maskMatrix.preTranslate(SkIntToScalar(-textureOriginInDeviceSpace.fX),
+ SkIntToScalar(-textureOriginInDeviceSpace.fY));
+ maskMatrix.preConcat(viewMatrix);
+ GrPipelineBuilder pipelineBuilder(paint, drawContext->mustUseHWAA(paint));
+ pipelineBuilder.setUserStencil(&userStencilSettings);
+
+ pipelineBuilder.addCoverageFragmentProcessor(
+ GrSimpleTextureEffect::Make(texture,
+ nullptr,
+ maskMatrix,
+ GrTextureParams::kNone_FilterMode));
+
+ SkAutoTUnref<GrDrawBatch> batch(GrRectBatchFactory::CreateNonAAFill(paint.getColor(),
+ SkMatrix::I(),
+ dstRect, nullptr, &invert));
+ drawContext->drawBatch(pipelineBuilder, clip, batch);
+}
diff --git a/gfx/skia/skia/src/gpu/GrSWMaskHelper.h b/gfx/skia/skia/src/gpu/GrSWMaskHelper.h
new file mode 100644
index 000000000..46520a91b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSWMaskHelper.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSWMaskHelper_DEFINED
+#define GrSWMaskHelper_DEFINED
+
+#include "GrColor.h"
+#include "GrTextureProvider.h"
+#include "SkAutoPixmapStorage.h"
+#include "SkBitmap.h"
+#include "SkDraw.h"
+#include "SkMatrix.h"
+#include "SkRasterClip.h"
+#include "SkRegion.h"
+#include "SkTypes.h"
+
+class GrClip;
+class GrPaint;
+class GrShape;
+class GrTextureProvider;
+class GrStyle;
+class GrTexture;
+struct GrUserStencilSettings;
+
+/**
+ * The GrSWMaskHelper helps generate clip masks using the software rendering
+ * path. It is intended to be used as:
+ *
+ * GrSWMaskHelper helper(context);
+ * helper.init(...);
+ *
+ * draw one or more paths/rects specifying the required boolean ops
+ *
+ * toTexture(); // to get it from the internal bitmap to the GPU
+ *
+ * The result of this process will be the final mask (on the GPU) in the
+ * upper left hand corner of the texture.
+ */
+class GrSWMaskHelper : SkNoncopyable {
+public:
+ GrSWMaskHelper(GrTextureProvider* texProvider) : fTexProvider(texProvider) { }
+
+ // set up the internal state in preparation for draws. Since many masks
+ // may be accumulated in the helper during creation, "resultBounds"
+ // allows the caller to specify the region of interest - to limit the
+ // amount of work.
+ bool init(const SkIRect& resultBounds, const SkMatrix* matrix);
+
+ // Draw a single rect into the accumulation bitmap using the specified op
+ void drawRect(const SkRect& rect, SkRegion::Op op, bool antiAlias, uint8_t alpha);
+
+ // Draw a single path into the accumuation bitmap using the specified op
+ void drawShape(const GrShape&, SkRegion::Op op, bool antiAlias, uint8_t alpha);
+
+ // Move the mask generation results from the internal bitmap to the gpu.
+ void toTexture(GrTexture* texture);
+
+ // Convert mask generation results to a signed distance field
+ void toSDF(unsigned char* sdf);
+
+ // Reset the internal bitmap
+ void clear(uint8_t alpha) {
+ fPixels.erase(SkColorSetARGB(alpha, 0xFF, 0xFF, 0xFF));
+ }
+
+
+ enum class TextureType {
+ kExactFit,
+ kApproximateFit
+ };
+
+ // Canonical usage utility that draws a single path and uploads it
+ // to the GPU. The result is returned.
+ static GrTexture* DrawShapeMaskToTexture(GrTextureProvider*,
+ const GrShape&,
+ const SkIRect& resultBounds,
+ bool antiAlias,
+ TextureType,
+ const SkMatrix* matrix);
+
+ // This utility draws a path mask generated by DrawShapeMaskToTexture using a provided paint.
+ // The rectangle is drawn in device space. The 'viewMatrix' will be used to ensure the correct
+ // local coords are provided to any fragment processors in the paint.
+ static void DrawToTargetWithShapeMask(GrTexture* texture,
+ GrDrawContext*,
+ const GrPaint& paint,
+ const GrUserStencilSettings& userStencilSettings,
+ const GrClip&,
+ const SkMatrix& viewMatrix,
+ const SkIPoint& textureOriginInDeviceSpace,
+ const SkIRect& deviceSpaceRectToDraw);
+
+private:
+ // Helper function to get a scratch texture suitable for capturing the
+ // result (i.e., right size & format)
+ GrTexture* createTexture(TextureType);
+
+ GrTextureProvider* fTexProvider;
+ SkMatrix fMatrix;
+ SkAutoPixmapStorage fPixels;
+ SkDraw fDraw;
+ SkRasterClip fRasterClip;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+#endif // GrSWMaskHelper_DEFINED
diff --git a/gfx/skia/skia/src/gpu/GrScissorState.h b/gfx/skia/skia/src/gpu/GrScissorState.h
new file mode 100644
index 000000000..59ea08807
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrScissorState.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrScissorState_DEFINED
+#define GrScissorState_DEFINED
+
+#include "SkRect.h"
+
+class GrScissorState {
+public:
+ GrScissorState() : fEnabled(false) {}
+ GrScissorState(const SkIRect& rect) : fEnabled(true), fRect(rect) {}
+ void setDisabled() { fEnabled = false; }
+ void set(const SkIRect& rect) { fRect = rect; fEnabled = true; }
+ bool SK_WARN_UNUSED_RESULT intersect(const SkIRect& rect) {
+ if (!fEnabled) {
+ this->set(rect);
+ return true;
+ }
+ return fRect.intersect(rect);
+ }
+ bool operator==(const GrScissorState& other) const {
+ return fEnabled == other.fEnabled &&
+ (false == fEnabled || fRect == other.fRect);
+ }
+ bool operator!=(const GrScissorState& other) const { return !(*this == other); }
+
+ bool enabled() const { return fEnabled; }
+ const SkIRect& rect() const { return fRect; }
+
+private:
+ bool fEnabled;
+ SkIRect fRect;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrShape.cpp b/gfx/skia/skia/src/gpu/GrShape.cpp
new file mode 100644
index 000000000..7e0a3a444
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrShape.cpp
@@ -0,0 +1,540 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrShape.h"
+
+GrShape& GrShape::operator=(const GrShape& that) {
+ fStyle = that.fStyle;
+ this->changeType(that.fType, Type::kPath == that.fType ? &that.path() : nullptr);
+ switch (fType) {
+ case Type::kEmpty:
+ break;
+ case Type::kRRect:
+ fRRectData = that.fRRectData;
+ break;
+ case Type::kLine:
+ fLineData = that.fLineData;
+ break;
+ case Type::kPath:
+ fPathData.fGenID = that.fPathData.fGenID;
+ break;
+ }
+ fInheritedKey.reset(that.fInheritedKey.count());
+ sk_careful_memcpy(fInheritedKey.get(), that.fInheritedKey.get(),
+ sizeof(uint32_t) * fInheritedKey.count());
+ return *this;
+}
+
+SkRect GrShape::bounds() const {
+ // Bounds where left == bottom or top == right can indicate a line or point shape. We return
+ // inverted bounds for a truly empty shape.
+ static constexpr SkRect kInverted = SkRect::MakeLTRB(1, 1, -1, -1);
+ switch (fType) {
+ case Type::kEmpty:
+ return kInverted;
+ case Type::kLine: {
+ SkRect bounds;
+ if (fLineData.fPts[0].fX < fLineData.fPts[1].fX) {
+ bounds.fLeft = fLineData.fPts[0].fX;
+ bounds.fRight = fLineData.fPts[1].fX;
+ } else {
+ bounds.fLeft = fLineData.fPts[1].fX;
+ bounds.fRight = fLineData.fPts[0].fX;
+ }
+ if (fLineData.fPts[0].fY < fLineData.fPts[1].fY) {
+ bounds.fTop = fLineData.fPts[0].fY;
+ bounds.fBottom = fLineData.fPts[1].fY;
+ } else {
+ bounds.fTop = fLineData.fPts[1].fY;
+ bounds.fBottom = fLineData.fPts[0].fY;
+ }
+ return bounds;
+ }
+ case Type::kRRect:
+ return fRRectData.fRRect.getBounds();
+ case Type::kPath:
+ return this->path().getBounds();
+ }
+ SkFAIL("Unknown shape type");
+ return kInverted;
+}
+
+SkRect GrShape::styledBounds() const {
+ if (Type::kEmpty == fType && !fStyle.hasNonDashPathEffect()) {
+ return SkRect::MakeEmpty();
+ }
+ SkRect bounds;
+ fStyle.adjustBounds(&bounds, this->bounds());
+ return bounds;
+}
+
+// If the path is small enough to be keyed from its data this returns key length, otherwise -1.
+static int path_key_from_data_size(const SkPath& path) {
+ const int verbCnt = path.countVerbs();
+ if (verbCnt > GrShape::kMaxKeyFromDataVerbCnt) {
+ return -1;
+ }
+ const int pointCnt = path.countPoints();
+ const int conicWeightCnt = SkPathPriv::ConicWeightCnt(path);
+
+ GR_STATIC_ASSERT(sizeof(SkPoint) == 2 * sizeof(uint32_t));
+ GR_STATIC_ASSERT(sizeof(SkScalar) == sizeof(uint32_t));
+ // 2 is for the verb cnt and a fill type. Each verb is a byte but we'll pad the verb data out to
+ // a uint32_t length.
+ return 2 + (SkAlign4(verbCnt) >> 2) + 2 * pointCnt + conicWeightCnt;
+}
+
+// Writes the path data key into the passed pointer.
+static void write_path_key_from_data(const SkPath& path, uint32_t* origKey) {
+ uint32_t* key = origKey;
+ // The check below should take care of negative values casted positive.
+ const int verbCnt = path.countVerbs();
+ const int pointCnt = path.countPoints();
+ const int conicWeightCnt = SkPathPriv::ConicWeightCnt(path);
+ SkASSERT(verbCnt <= GrShape::kMaxKeyFromDataVerbCnt);
+ SkASSERT(pointCnt && verbCnt);
+ *key++ = path.getFillType();
+ *key++ = verbCnt;
+ memcpy(key, SkPathPriv::VerbData(path), verbCnt * sizeof(uint8_t));
+ int verbKeySize = SkAlign4(verbCnt);
+ // pad out to uint32_t alignment using value that will stand out when debugging.
+ uint8_t* pad = reinterpret_cast<uint8_t*>(key)+ verbCnt;
+ memset(pad, 0xDE, verbKeySize - verbCnt);
+ key += verbKeySize >> 2;
+
+ memcpy(key, SkPathPriv::PointData(path), sizeof(SkPoint) * pointCnt);
+ GR_STATIC_ASSERT(sizeof(SkPoint) == 2 * sizeof(uint32_t));
+ key += 2 * pointCnt;
+ sk_careful_memcpy(key, SkPathPriv::ConicWeightData(path), sizeof(SkScalar) * conicWeightCnt);
+ GR_STATIC_ASSERT(sizeof(SkScalar) == sizeof(uint32_t));
+ SkDEBUGCODE(key += conicWeightCnt);
+ SkASSERT(key - origKey == path_key_from_data_size(path));
+}
+
+int GrShape::unstyledKeySize() const {
+ if (fInheritedKey.count()) {
+ return fInheritedKey.count();
+ }
+ switch (fType) {
+ case Type::kEmpty:
+ return 1;
+ case Type::kRRect:
+ SkASSERT(!fInheritedKey.count());
+ SkASSERT(0 == SkRRect::kSizeInMemory % sizeof(uint32_t));
+ // + 1 for the direction, start index, and inverseness.
+ return SkRRect::kSizeInMemory / sizeof(uint32_t) + 1;
+ case Type::kLine:
+ GR_STATIC_ASSERT(2 * sizeof(uint32_t) == sizeof(SkPoint));
+ // 4 for the end points and 1 for the inverseness
+ return 5;
+ case Type::kPath: {
+ if (0 == fPathData.fGenID) {
+ return -1;
+ }
+ int dataKeySize = path_key_from_data_size(fPathData.fPath);
+ if (dataKeySize >= 0) {
+ return dataKeySize;
+ }
+ // The key is the path ID and fill type.
+ return 2;
+ }
+ }
+ SkFAIL("Should never get here.");
+ return 0;
+}
+
+void GrShape::writeUnstyledKey(uint32_t* key) const {
+ SkASSERT(this->unstyledKeySize());
+ SkDEBUGCODE(uint32_t* origKey = key;)
+ if (fInheritedKey.count()) {
+ memcpy(key, fInheritedKey.get(), sizeof(uint32_t) * fInheritedKey.count());
+ SkDEBUGCODE(key += fInheritedKey.count();)
+ } else {
+ switch (fType) {
+ case Type::kEmpty:
+ *key++ = 1;
+ break;
+ case Type::kRRect:
+ fRRectData.fRRect.writeToMemory(key);
+ key += SkRRect::kSizeInMemory / sizeof(uint32_t);
+ *key = (fRRectData.fDir == SkPath::kCCW_Direction) ? (1 << 31) : 0;
+ *key |= fRRectData.fInverted ? (1 << 30) : 0;
+ *key++ |= fRRectData.fStart;
+ SkASSERT(fRRectData.fStart < 8);
+ break;
+ case Type::kLine:
+ memcpy(key, fLineData.fPts, 2 * sizeof(SkPoint));
+ key += 4;
+ *key++ = fLineData.fInverted ? 1 : 0;
+ break;
+ case Type::kPath: {
+ SkASSERT(fPathData.fGenID);
+ int dataKeySize = path_key_from_data_size(fPathData.fPath);
+ if (dataKeySize >= 0) {
+ write_path_key_from_data(fPathData.fPath, key);
+ return;
+ }
+ *key++ = fPathData.fGenID;
+ // We could canonicalize the fill rule for paths that don't differentiate between
+ // even/odd or winding fill (e.g. convex).
+ *key++ = this->path().getFillType();
+ break;
+ }
+ }
+ }
+ SkASSERT(key - origKey == this->unstyledKeySize());
+}
+
+void GrShape::setInheritedKey(const GrShape &parent, GrStyle::Apply apply, SkScalar scale) {
+ SkASSERT(!fInheritedKey.count());
+ // If the output shape turns out to be simple, then we will just use its geometric key
+ if (Type::kPath == fType) {
+ // We want ApplyFullStyle(ApplyPathEffect(shape)) to have the same key as
+ // ApplyFullStyle(shape).
+ // The full key is structured as (geo,path_effect,stroke).
+ // If we do ApplyPathEffect we get get,path_effect as the inherited key. If we then
+ // do ApplyFullStyle we'll memcpy geo,path_effect into the new inherited key
+ // and then append the style key (which should now be stroke only) at the end.
+ int parentCnt = parent.fInheritedKey.count();
+ bool useParentGeoKey = !parentCnt;
+ if (useParentGeoKey) {
+ parentCnt = parent.unstyledKeySize();
+ if (parentCnt < 0) {
+ // The parent's geometry has no key so we will have no key.
+ fPathData.fGenID = 0;
+ return;
+ }
+ }
+ uint32_t styleKeyFlags = 0;
+ if (parent.knownToBeClosed()) {
+ styleKeyFlags |= GrStyle::kClosed_KeyFlag;
+ }
+ if (parent.asLine(nullptr, nullptr)) {
+ styleKeyFlags |= GrStyle::kNoJoins_KeyFlag;
+ }
+ int styleCnt = GrStyle::KeySize(parent.fStyle, apply, styleKeyFlags);
+ if (styleCnt < 0) {
+ // The style doesn't allow a key, set the path gen ID to 0 so that we fail when
+ // we try to get a key for the shape.
+ fPathData.fGenID = 0;
+ return;
+ }
+ fInheritedKey.reset(parentCnt + styleCnt);
+ if (useParentGeoKey) {
+ // This will be the geo key.
+ parent.writeUnstyledKey(fInheritedKey.get());
+ } else {
+ // This should be (geo,path_effect).
+ memcpy(fInheritedKey.get(), parent.fInheritedKey.get(),
+ parentCnt * sizeof(uint32_t));
+ }
+ // Now turn (geo,path_effect) or (geo) into (geo,path_effect,stroke)
+ GrStyle::WriteKey(fInheritedKey.get() + parentCnt, parent.fStyle, apply, scale,
+ styleKeyFlags);
+ }
+}
+
+GrShape::GrShape(const GrShape& that) : fStyle(that.fStyle) {
+ const SkPath* thatPath = Type::kPath == that.fType ? &that.fPathData.fPath : nullptr;
+ this->initType(that.fType, thatPath);
+ switch (fType) {
+ case Type::kEmpty:
+ break;
+ case Type::kRRect:
+ fRRectData = that.fRRectData;
+ break;
+ case Type::kLine:
+ fLineData = that.fLineData;
+ break;
+ case Type::kPath:
+ fPathData.fGenID = that.fPathData.fGenID;
+ break;
+ }
+ fInheritedKey.reset(that.fInheritedKey.count());
+ sk_careful_memcpy(fInheritedKey.get(), that.fInheritedKey.get(),
+ sizeof(uint32_t) * fInheritedKey.count());
+}
+
+GrShape::GrShape(const GrShape& parent, GrStyle::Apply apply, SkScalar scale) {
+ // TODO: Add some quantization of scale for better cache performance here or leave that up
+ // to caller?
+ // TODO: For certain shapes and stroke params we could ignore the scale. (e.g. miter or bevel
+ // stroke of a rect).
+ if (!parent.style().applies() ||
+ (GrStyle::Apply::kPathEffectOnly == apply && !parent.style().pathEffect())) {
+ this->initType(Type::kEmpty);
+ *this = parent;
+ return;
+ }
+
+ SkPathEffect* pe = parent.fStyle.pathEffect();
+ SkTLazy<SkPath> tmpPath;
+ const GrShape* parentForKey = &parent;
+ SkTLazy<GrShape> tmpParent;
+ this->initType(Type::kPath);
+ fPathData.fGenID = 0;
+ if (pe) {
+ const SkPath* srcForPathEffect;
+ if (parent.fType == Type::kPath) {
+ srcForPathEffect = &parent.path();
+ } else {
+ srcForPathEffect = tmpPath.init();
+ parent.asPath(tmpPath.get());
+ }
+ // Should we consider bounds? Would have to include in key, but it'd be nice to know
+ // if the bounds actually modified anything before including in key.
+ SkStrokeRec strokeRec = parent.fStyle.strokeRec();
+ if (!parent.fStyle.applyPathEffectToPath(&this->path(), &strokeRec, *srcForPathEffect,
+ scale)) {
+ tmpParent.init(*srcForPathEffect, GrStyle(strokeRec, nullptr));
+ *this = tmpParent.get()->applyStyle(apply, scale);
+ return;
+ }
+ // A path effect has access to change the res scale but we aren't expecting it to and it
+ // would mess up our key computation.
+ SkASSERT(scale == strokeRec.getResScale());
+ if (GrStyle::Apply::kPathEffectAndStrokeRec == apply && strokeRec.needToApply()) {
+ // The intermediate shape may not be a general path. If we we're just applying
+ // the path effect then attemptToReduceFromPath would catch it. This means that
+ // when we subsequently applied the remaining strokeRec we would have a non-path
+ // parent shape that would be used to determine the the stroked path's key.
+ // We detect that case here and change parentForKey to a temporary that represents
+ // the simpler shape so that applying both path effect and the strokerec all at
+ // once produces the same key.
+ tmpParent.init(this->path(), GrStyle(strokeRec, nullptr));
+ tmpParent.get()->setInheritedKey(parent, GrStyle::Apply::kPathEffectOnly, scale);
+ if (!tmpPath.isValid()) {
+ tmpPath.init();
+ }
+ tmpParent.get()->asPath(tmpPath.get());
+ SkStrokeRec::InitStyle fillOrHairline;
+ // The parent shape may have simplified away the strokeRec, check for that here.
+ if (tmpParent.get()->style().applies()) {
+ SkAssertResult(tmpParent.get()->style().applyToPath(&this->path(), &fillOrHairline,
+ *tmpPath.get(), scale));
+ } else if (tmpParent.get()->style().isSimpleFill()) {
+ fillOrHairline = SkStrokeRec::kFill_InitStyle;
+ } else {
+ SkASSERT(tmpParent.get()->style().isSimpleHairline());
+ fillOrHairline = SkStrokeRec::kHairline_InitStyle;
+ }
+ fStyle.resetToInitStyle(fillOrHairline);
+ parentForKey = tmpParent.get();
+ } else {
+ fStyle = GrStyle(strokeRec, nullptr);
+ }
+ } else {
+ const SkPath* srcForParentStyle;
+ if (parent.fType == Type::kPath) {
+ srcForParentStyle = &parent.path();
+ } else {
+ srcForParentStyle = tmpPath.init();
+ parent.asPath(tmpPath.get());
+ }
+ SkStrokeRec::InitStyle fillOrHairline;
+ SkASSERT(parent.fStyle.applies());
+ SkASSERT(!parent.fStyle.pathEffect());
+ SkAssertResult(parent.fStyle.applyToPath(&this->path(), &fillOrHairline, *srcForParentStyle,
+ scale));
+ fStyle.resetToInitStyle(fillOrHairline);
+ }
+ this->attemptToSimplifyPath();
+ this->setInheritedKey(*parentForKey, apply, scale);
+}
+
+void GrShape::attemptToSimplifyPath() {
+ SkRect rect;
+ SkRRect rrect;
+ SkPath::Direction rrectDir;
+ unsigned rrectStart;
+ bool inverted = this->path().isInverseFillType();
+ SkPoint pts[2];
+ if (this->path().isEmpty()) {
+ this->changeType(Type::kEmpty);
+ } else if (this->path().isLine(pts)) {
+ this->changeType(Type::kLine);
+ fLineData.fPts[0] = pts[0];
+ fLineData.fPts[1] = pts[1];
+ fLineData.fInverted = inverted;
+ } else if (this->path().isRRect(&rrect, &rrectDir, &rrectStart)) {
+ this->changeType(Type::kRRect);
+ fRRectData.fRRect = rrect;
+ fRRectData.fDir = rrectDir;
+ fRRectData.fStart = rrectStart;
+ fRRectData.fInverted = inverted;
+ // Currently SkPath does not acknowledge that empty, rect, or oval subtypes as rrects.
+ SkASSERT(!fRRectData.fRRect.isEmpty());
+ SkASSERT(fRRectData.fRRect.getType() != SkRRect::kRect_Type);
+ SkASSERT(fRRectData.fRRect.getType() != SkRRect::kOval_Type);
+ } else if (this->path().isOval(&rect, &rrectDir, &rrectStart)) {
+ this->changeType(Type::kRRect);
+ fRRectData.fRRect.setOval(rect);
+ fRRectData.fDir = rrectDir;
+ fRRectData.fInverted = inverted;
+ // convert from oval indexing to rrect indexiing.
+ fRRectData.fStart = 2 * rrectStart;
+ } else if (SkPathPriv::IsSimpleClosedRect(this->path(), &rect, &rrectDir, &rrectStart)) {
+ this->changeType(Type::kRRect);
+ // When there is a path effect we restrict rect detection to the narrower API that
+ // gives us the starting position. Otherwise, we will retry with the more aggressive
+ // isRect().
+ fRRectData.fRRect.setRect(rect);
+ fRRectData.fInverted = inverted;
+ fRRectData.fDir = rrectDir;
+ // convert from rect indexing to rrect indexiing.
+ fRRectData.fStart = 2 * rrectStart;
+ } else if (!this->style().hasPathEffect()) {
+ bool closed;
+ if (this->path().isRect(&rect, &closed, nullptr)) {
+ if (closed || this->style().isSimpleFill()) {
+ this->changeType(Type::kRRect);
+ fRRectData.fRRect.setRect(rect);
+ // Since there is no path effect the dir and start index is immaterial.
+ fRRectData.fDir = kDefaultRRectDir;
+ fRRectData.fStart = kDefaultRRectStart;
+ // There isn't dashing so we will have to preserver inverseness.
+ fRRectData.fInverted = inverted;
+ }
+ }
+ }
+ if (Type::kPath != fType) {
+ fInheritedKey.reset(0);
+ if (Type::kRRect == fType) {
+ this->attemptToSimplifyRRect();
+ } else if (Type::kLine == fType) {
+ this->attemptToSimplifyLine();
+ }
+ } else {
+ if (fInheritedKey.count() || this->path().isVolatile()) {
+ fPathData.fGenID = 0;
+ } else {
+ fPathData.fGenID = this->path().getGenerationID();
+ }
+ if (!this->style().hasNonDashPathEffect()) {
+ if (this->style().strokeRec().getStyle() == SkStrokeRec::kStroke_Style ||
+ this->style().strokeRec().getStyle() == SkStrokeRec::kHairline_Style) {
+ // Stroke styles don't differentiate between winding and even/odd.
+ // Moreover, dashing ignores inverseness (skbug.com/5421)
+ bool inverse = !this->style().isDashed() && this->path().isInverseFillType();
+ if (inverse) {
+ this->path().setFillType(kDefaultPathInverseFillType);
+ } else {
+ this->path().setFillType(kDefaultPathFillType);
+ }
+ } else if (this->path().isConvex()) {
+ // There is no distinction between even/odd and non-zero winding count for convex
+ // paths.
+ if (this->path().isInverseFillType()) {
+ this->path().setFillType(kDefaultPathInverseFillType);
+ } else {
+ this->path().setFillType(kDefaultPathFillType);
+ }
+ }
+ }
+ }
+}
+
+void GrShape::attemptToSimplifyRRect() {
+ SkASSERT(Type::kRRect == fType);
+ SkASSERT(!fInheritedKey.count());
+ if (fRRectData.fRRect.isEmpty()) {
+ fType = Type::kEmpty;
+ return;
+ }
+ if (!this->style().hasPathEffect()) {
+ fRRectData.fDir = kDefaultRRectDir;
+ fRRectData.fStart = kDefaultRRectStart;
+ } else if (fStyle.isDashed()) {
+ // Dashing ignores the inverseness (currently). skbug.com/5421
+ fRRectData.fInverted = false;
+ }
+ // Turn a stroke-and-filled miter rect into a filled rect. TODO: more rrect stroke shortcuts.
+ if (!fStyle.hasPathEffect() &&
+ fStyle.strokeRec().getStyle() == SkStrokeRec::kStrokeAndFill_Style &&
+ fStyle.strokeRec().getJoin() == SkPaint::kMiter_Join &&
+ fStyle.strokeRec().getMiter() >= SK_ScalarSqrt2 &&
+ fRRectData.fRRect.isRect()) {
+ SkScalar r = fStyle.strokeRec().getWidth() / 2;
+ fRRectData.fRRect = SkRRect::MakeRect(fRRectData.fRRect.rect().makeOutset(r, r));
+ fStyle = GrStyle::SimpleFill();
+ }
+}
+
+void GrShape::attemptToSimplifyLine() {
+ SkASSERT(Type::kLine == fType);
+ SkASSERT(!fInheritedKey.count());
+ if (fStyle.isDashed()) {
+ // Dashing ignores inverseness.
+ fLineData.fInverted = false;
+ return;
+ } else if (fStyle.hasPathEffect()) {
+ return;
+ }
+ if (fStyle.strokeRec().getStyle() == SkStrokeRec::kStrokeAndFill_Style) {
+ // Make stroke + fill be stroke since the fill is empty.
+ SkStrokeRec rec = fStyle.strokeRec();
+ rec.setStrokeStyle(fStyle.strokeRec().getWidth(), false);
+ fStyle = GrStyle(rec, nullptr);
+ }
+ if (fStyle.isSimpleFill() && !fLineData.fInverted) {
+ this->changeType(Type::kEmpty);
+ return;
+ }
+ SkPoint* pts = fLineData.fPts;
+ if (fStyle.strokeRec().getStyle() == SkStrokeRec::kStroke_Style) {
+ // If it is horizontal or vertical we will turn it into a filled rrect.
+ SkRect rect;
+ rect.fLeft = SkTMin(pts[0].fX, pts[1].fX);
+ rect.fRight = SkTMax(pts[0].fX, pts[1].fX);
+ rect.fTop = SkTMin(pts[0].fY, pts[1].fY);
+ rect.fBottom = SkTMax(pts[0].fY, pts[1].fY);
+ bool eqX = rect.fLeft == rect.fRight;
+ bool eqY = rect.fTop == rect.fBottom;
+ if (eqX || eqY) {
+ SkScalar r = fStyle.strokeRec().getWidth() / 2;
+ bool inverted = fLineData.fInverted;
+ this->changeType(Type::kRRect);
+ switch (fStyle.strokeRec().getCap()) {
+ case SkPaint::kButt_Cap:
+ if (eqX && eqY) {
+ this->changeType(Type::kEmpty);
+ return;
+ }
+ if (eqX) {
+ rect.outset(r, 0);
+ } else {
+ rect.outset(0, r);
+ }
+ fRRectData.fRRect = SkRRect::MakeRect(rect);
+ break;
+ case SkPaint::kSquare_Cap:
+ rect.outset(r, r);
+ fRRectData.fRRect = SkRRect::MakeRect(rect);
+ break;
+ case SkPaint::kRound_Cap:
+ rect.outset(r, r);
+ fRRectData.fRRect = SkRRect::MakeRectXY(rect, r, r);
+ break;
+ }
+ fRRectData.fInverted = inverted;
+ fRRectData.fDir = kDefaultRRectDir;
+ fRRectData.fStart = kDefaultRRectStart;
+ if (fRRectData.fRRect.isEmpty()) {
+ // This can happen when r is very small relative to the rect edges.
+ this->changeType(Type::kEmpty);
+ return;
+ }
+ fStyle = GrStyle::SimpleFill();
+ return;
+ }
+ }
+ // Only path effects could care about the order of the points. Otherwise canonicalize
+ // the point order.
+ if (pts[1].fY < pts[0].fY || (pts[1].fY == pts[0].fY && pts[1].fX < pts[0].fX)) {
+ SkTSwap(pts[0], pts[1]);
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/GrShape.h b/gfx/skia/skia/src/gpu/GrShape.h
new file mode 100644
index 000000000..074278cd8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrShape.h
@@ -0,0 +1,463 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrShape_DEFINED
+#define GrShape_DEFINED
+
+#include "GrStyle.h"
+#include "SkPath.h"
+#include "SkPathPriv.h"
+#include "SkRRect.h"
+#include "SkTemplates.h"
+#include "SkTLazy.h"
+
+/**
+ * Represents a geometric shape (rrect or path) and the GrStyle that it should be rendered with.
+ * It is possible to apply the style to the GrShape to produce a new GrShape where the geometry
+ * reflects the styling information (e.g. is stroked). It is also possible to apply just the
+ * path effect from the style. In this case the resulting shape will include any remaining
+ * stroking information that is to be applied after the path effect.
+ *
+ * Shapes can produce keys that represent only the geometry information, not the style. Note that
+ * when styling information is applied to produce a new shape then the style has been converted
+ * to geometric information and is included in the new shape's key. When the same style is applied
+ * to two shapes that reflect the same underlying geometry the computed keys of the stylized shapes
+ * will be the same.
+ *
+ * Currently this can only be constructed from a path, rect, or rrect though it can become a path
+ * applying style to the geometry. The idea is to expand this to cover most or all of the geometries
+ * that have SkCanvas::draw APIs.
+ */
+class GrShape {
+public:
+ // Keys for paths may be extracted from the path data for small paths. Clients aren't supposed
+ // to have to worry about this. This value is exposed for unit tests.
+ static constexpr int kMaxKeyFromDataVerbCnt = 10;
+
+ GrShape() { this->initType(Type::kEmpty); }
+
+ explicit GrShape(const SkPath& path) : GrShape(path, GrStyle::SimpleFill()) {}
+
+ explicit GrShape(const SkRRect& rrect) : GrShape(rrect, GrStyle::SimpleFill()) {}
+
+ explicit GrShape(const SkRect& rect) : GrShape(rect, GrStyle::SimpleFill()) {}
+
+ GrShape(const SkPath& path, const GrStyle& style) : fStyle(style) {
+ this->initType(Type::kPath, &path);
+ this->attemptToSimplifyPath();
+ }
+
+ GrShape(const SkRRect& rrect, const GrStyle& style)
+ : fStyle(style) {
+ this->initType(Type::kRRect);
+ fRRectData.fRRect = rrect;
+ fRRectData.fInverted = false;
+ fRRectData.fStart = DefaultRRectDirAndStartIndex(rrect, style.hasPathEffect(),
+ &fRRectData.fDir);
+ this->attemptToSimplifyRRect();
+ }
+
+ GrShape(const SkRRect& rrect, SkPath::Direction dir, unsigned start, bool inverted,
+ const GrStyle& style)
+ : fStyle(style) {
+ this->initType(Type::kRRect);
+ fRRectData.fRRect = rrect;
+ fRRectData.fInverted = inverted;
+ if (style.pathEffect()) {
+ fRRectData.fDir = dir;
+ fRRectData.fStart = start;
+ if (fRRectData.fRRect.getType() == SkRRect::kRect_Type) {
+ fRRectData.fStart = (fRRectData.fStart + 1) & 0b110;
+ } else if (fRRectData.fRRect.getType() == SkRRect::kOval_Type) {
+ fRRectData.fStart &= 0b110;
+ }
+ } else {
+ fRRectData.fStart = DefaultRRectDirAndStartIndex(rrect, false, &fRRectData.fDir);
+ }
+ this->attemptToSimplifyRRect();
+ }
+
+ GrShape(const SkRect& rect, const GrStyle& style)
+ : fStyle(style) {
+ this->initType(Type::kRRect);
+ fRRectData.fRRect = SkRRect::MakeRect(rect);
+ fRRectData.fInverted = false;
+ fRRectData.fStart = DefaultRectDirAndStartIndex(rect, style.hasPathEffect(),
+ &fRRectData.fDir);
+ this->attemptToSimplifyRRect();
+ }
+
+ GrShape(const SkPath& path, const SkPaint& paint) : fStyle(paint) {
+ this->initType(Type::kPath, &path);
+ this->attemptToSimplifyPath();
+ }
+
+ GrShape(const SkRRect& rrect, const SkPaint& paint)
+ : fStyle(paint) {
+ this->initType(Type::kRRect);
+ fRRectData.fRRect = rrect;
+ fRRectData.fInverted = false;
+ fRRectData.fStart = DefaultRRectDirAndStartIndex(rrect, fStyle.hasPathEffect(),
+ &fRRectData.fDir);
+ this->attemptToSimplifyRRect();
+ }
+
+ GrShape(const SkRect& rect, const SkPaint& paint)
+ : fStyle(paint) {
+ this->initType(Type::kRRect);
+ fRRectData.fRRect = SkRRect::MakeRect(rect);
+ fRRectData.fInverted = false;
+ fRRectData.fStart = DefaultRectDirAndStartIndex(rect, fStyle.hasPathEffect(),
+ &fRRectData.fDir);
+ this->attemptToSimplifyRRect();
+ }
+
+ GrShape(const GrShape&);
+ GrShape& operator=(const GrShape& that);
+
+ ~GrShape() { this->changeType(Type::kEmpty); }
+
+ const GrStyle& style() const { return fStyle; }
+
+ /**
+ * Returns a shape that has either applied the path effect or path effect and stroking
+ * information from this shape's style to its geometry. Scale is used when approximating the
+ * output geometry and typically is computed from the view matrix
+ */
+ GrShape applyStyle(GrStyle::Apply apply, SkScalar scale) const {
+ return GrShape(*this, apply, scale);
+ }
+
+ /** Returns the unstyled geometry as a rrect if possible. */
+ bool asRRect(SkRRect* rrect, SkPath::Direction* dir, unsigned* start, bool* inverted) const {
+ if (Type::kRRect != fType) {
+ return false;
+ }
+ if (rrect) {
+ *rrect = fRRectData.fRRect;
+ }
+ if (dir) {
+ *dir = fRRectData.fDir;
+ }
+ if (start) {
+ *start = fRRectData.fStart;
+ }
+ if (inverted) {
+ *inverted = fRRectData.fInverted;
+ }
+ return true;
+ }
+
+ /**
+ * If the unstyled shape is a straight line segment, returns true and sets pts to the endpoints.
+ * An inverse filled line path is still considered a line.
+ */
+ bool asLine(SkPoint pts[2], bool* inverted) const {
+ if (fType != Type::kLine) {
+ return false;
+ }
+ if (pts) {
+ pts[0] = fLineData.fPts[0];
+ pts[1] = fLineData.fPts[1];
+ }
+ if (inverted) {
+ *inverted = fLineData.fInverted;
+ }
+ return true;
+ }
+
+ /** Returns the unstyled geometry as a path. */
+ void asPath(SkPath* out) const {
+ switch (fType) {
+ case Type::kEmpty:
+ out->reset();
+ break;
+ case Type::kRRect:
+ out->reset();
+ out->addRRect(fRRectData.fRRect, fRRectData.fDir, fRRectData.fStart);
+ // Below matches the fill type that attemptToSimplifyPath uses.
+ if (fRRectData.fInverted) {
+ out->setFillType(kDefaultPathInverseFillType);
+ } else {
+ out->setFillType(kDefaultPathFillType);
+ }
+ break;
+ case Type::kLine:
+ out->reset();
+ out->moveTo(fLineData.fPts[0]);
+ out->lineTo(fLineData.fPts[1]);
+ if (fLineData.fInverted) {
+ out->setFillType(kDefaultPathInverseFillType);
+ } else {
+ out->setFillType(kDefaultPathFillType);
+ }
+ break;
+ case Type::kPath:
+ *out = this->path();
+ break;
+ }
+ }
+
+ /**
+ * Returns whether the geometry is empty. Note that applying the style could produce a
+ * non-empty shape.
+ */
+ bool isEmpty() const { return Type::kEmpty == fType; }
+
+ /**
+ * Gets the bounds of the geometry without reflecting the shape's styling. This ignores
+ * the inverse fill nature of the geometry.
+ */
+ SkRect bounds() const;
+
+ /**
+ * Gets the bounds of the geometry reflecting the shape's styling (ignoring inverse fill
+ * status).
+ */
+ SkRect styledBounds() const;
+
+ /**
+ * Is this shape known to be convex, before styling is applied. An unclosed but otherwise
+ * convex path is considered to be closed if they styling reflects a fill and not otherwise.
+ * This is because filling closes all contours in the path.
+ */
+ bool knownToBeConvex() const {
+ switch (fType) {
+ case Type::kEmpty:
+ return true;
+ case Type::kRRect:
+ return true;
+ case Type::kLine:
+ return true;
+ case Type::kPath:
+ // SkPath.isConvex() really means "is this path convex were it to be closed" and
+ // thus doesn't give the correct answer for stroked paths, hence we also check
+ // whether the path is either filled or closed. Convex paths may only have one
+ // contour hence isLastContourClosed() is a sufficient for a convex path.
+ return (this->style().isSimpleFill() || this->path().isLastContourClosed()) &&
+ this->path().isConvex();
+ }
+ return false;
+ }
+
+ /** Is the pre-styled geometry inverse filled? */
+ bool inverseFilled() const {
+ bool ret = false;
+ switch (fType) {
+ case Type::kEmpty:
+ ret = false;
+ break;
+ case Type::kRRect:
+ ret = fRRectData.fInverted;
+ break;
+ case Type::kLine:
+ ret = fLineData.fInverted;
+ break;
+ case Type::kPath:
+ ret = this->path().isInverseFillType();
+ break;
+ }
+ // Dashing ignores inverseness. We should have caught this earlier. skbug.com/5421
+ SkASSERT(!(ret && this->style().isDashed()));
+ return ret;
+ }
+
+ /**
+ * Might applying the styling to the geometry produce an inverse fill. The "may" part comes in
+ * because an arbitrary path effect could produce an inverse filled path. In other cases this
+ * can be thought of as "inverseFilledAfterStyling()".
+ */
+ bool mayBeInverseFilledAfterStyling() const {
+ // An arbitrary path effect can produce an arbitrary output path, which may be inverse
+ // filled.
+ if (this->style().hasNonDashPathEffect()) {
+ return true;
+ }
+ return this->inverseFilled();
+ }
+
+ /**
+ * Is it known that the unstyled geometry has no unclosed contours. This means that it will
+ * not have any caps if stroked (modulo the effect of any path effect).
+ */
+ bool knownToBeClosed() const {
+ switch (fType) {
+ case Type::kEmpty:
+ return true;
+ case Type::kRRect:
+ return true;
+ case Type::kLine:
+ return false;
+ case Type::kPath:
+ // SkPath doesn't keep track of the closed status of each contour.
+ return SkPathPriv::IsClosedSingleContour(this->path());
+ }
+ return false;
+ }
+
+ uint32_t segmentMask() const {
+ switch (fType) {
+ case Type::kEmpty:
+ return 0;
+ case Type::kRRect:
+ if (fRRectData.fRRect.getType() == SkRRect::kOval_Type) {
+ return SkPath::kConic_SegmentMask;
+ } else if (fRRectData.fRRect.getType() == SkRRect::kRect_Type) {
+ return SkPath::kLine_SegmentMask;
+ }
+ return SkPath::kLine_SegmentMask | SkPath::kConic_SegmentMask;
+ case Type::kLine:
+ return SkPath::kLine_SegmentMask;
+ case Type::kPath:
+ return this->path().getSegmentMasks();
+ }
+ return 0;
+ }
+
+ /**
+ * Gets the size of the key for the shape represented by this GrShape (ignoring its styling).
+ * A negative value is returned if the shape has no key (shouldn't be cached).
+ */
+ int unstyledKeySize() const;
+
+ bool hasUnstyledKey() const { return this->unstyledKeySize() >= 0; }
+
+ /**
+ * Writes unstyledKeySize() bytes into the provided pointer. Assumes that there is enough
+ * space allocated for the key and that unstyledKeySize() does not return a negative value
+ * for this shape.
+ */
+ void writeUnstyledKey(uint32_t* key) const;
+
+private:
+ enum class Type {
+ kEmpty,
+ kRRect,
+ kLine,
+ kPath,
+ };
+
+ void initType(Type type, const SkPath* path = nullptr) {
+ fType = Type::kEmpty;
+ this->changeType(type, path);
+ }
+
+ void changeType(Type type, const SkPath* path = nullptr) {
+ bool wasPath = Type::kPath == fType;
+ fType = type;
+ bool isPath = Type::kPath == type;
+ SkASSERT(!path || isPath);
+ if (wasPath && !isPath) {
+ fPathData.fPath.~SkPath();
+ } else if (!wasPath && isPath) {
+ if (path) {
+ new (&fPathData.fPath) SkPath(*path);
+ } else {
+ new (&fPathData.fPath) SkPath();
+ }
+ } else if (isPath && path) {
+ fPathData.fPath = *path;
+ }
+ // Whether or not we use the path's gen ID is decided in attemptToSimplifyPath.
+ fPathData.fGenID = 0;
+ }
+
+ SkPath& path() {
+ SkASSERT(Type::kPath == fType);
+ return fPathData.fPath;
+ }
+
+ const SkPath& path() const {
+ SkASSERT(Type::kPath == fType);
+ return fPathData.fPath;
+ }
+
+ /** Constructor used by the applyStyle() function */
+ GrShape(const GrShape& parentShape, GrStyle::Apply, SkScalar scale);
+
+ /**
+ * Determines the key we should inherit from the input shape's geometry and style when
+ * we are applying the style to create a new shape.
+ */
+ void setInheritedKey(const GrShape& parentShape, GrStyle::Apply, SkScalar scale);
+
+ void attemptToSimplifyPath();
+ void attemptToSimplifyRRect();
+ void attemptToSimplifyLine();
+
+ // Defaults to use when there is no distinction between even/odd and winding fills.
+ static constexpr SkPath::FillType kDefaultPathFillType = SkPath::kEvenOdd_FillType;
+ static constexpr SkPath::FillType kDefaultPathInverseFillType =
+ SkPath::kInverseEvenOdd_FillType;
+
+ static constexpr SkPath::Direction kDefaultRRectDir = SkPath::kCW_Direction;
+ static constexpr unsigned kDefaultRRectStart = 0;
+
+ static unsigned DefaultRectDirAndStartIndex(const SkRect& rect, bool hasPathEffect,
+ SkPath::Direction* dir) {
+ *dir = kDefaultRRectDir;
+ // This comes from SkPath's interface. The default for adding a SkRect is counter clockwise
+ // beginning at index 0 (which happens to correspond to rrect index 0 or 7).
+ if (!hasPathEffect) {
+ // It doesn't matter what start we use, just be consistent to avoid redundant keys.
+ return kDefaultRRectStart;
+ }
+ // In SkPath a rect starts at index 0 by default. This is the top left corner. However,
+ // we store rects as rrects. RRects don't preserve the invertedness, but rather sort the
+ // rect edges. Thus, we may need to modify the rrect's start index to account for the sort.
+ bool swapX = rect.fLeft > rect.fRight;
+ bool swapY = rect.fTop > rect.fBottom;
+ if (swapX && swapY) {
+ // 0 becomes start index 2 and times 2 to convert from rect the rrect indices.
+ return 2 * 2;
+ } else if (swapX) {
+ *dir = SkPath::kCCW_Direction;
+ // 0 becomes start index 1 and times 2 to convert from rect the rrect indices.
+ return 2 * 1;
+ } else if (swapY) {
+ *dir = SkPath::kCCW_Direction;
+ // 0 becomes start index 3 and times 2 to convert from rect the rrect indices.
+ return 2 * 3;
+ }
+ return 0;
+ }
+
+ static unsigned DefaultRRectDirAndStartIndex(const SkRRect& rrect, bool hasPathEffect,
+ SkPath::Direction* dir) {
+ // This comes from SkPath's interface. The default for adding a SkRRect to a path is
+ // clockwise beginning at starting index 6.
+ static constexpr unsigned kPathRRectStartIdx = 6;
+ *dir = kDefaultRRectDir;
+ if (!hasPathEffect) {
+ // It doesn't matter what start we use, just be consistent to avoid redundant keys.
+ return kDefaultRRectStart;
+ }
+ return kPathRRectStartIdx;
+ }
+
+ Type fType;
+ union {
+ struct {
+ SkRRect fRRect;
+ SkPath::Direction fDir;
+ unsigned fStart;
+ bool fInverted;
+ } fRRectData;
+ struct {
+ SkPath fPath;
+ // Gen ID of the original path (fPath may be modified)
+ int32_t fGenID;
+ } fPathData;
+ struct {
+ SkPoint fPts[2];
+ bool fInverted;
+ } fLineData;
+ };
+ GrStyle fStyle;
+ SkAutoSTArray<8, uint32_t> fInheritedKey;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrSoftwarePathRenderer.cpp b/gfx/skia/skia/src/gpu/GrSoftwarePathRenderer.cpp
new file mode 100644
index 000000000..c22ce6600
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSoftwarePathRenderer.cpp
@@ -0,0 +1,228 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrSoftwarePathRenderer.h"
+#include "GrAuditTrail.h"
+#include "GrClip.h"
+#include "GrPipelineBuilder.h"
+#include "GrGpuResourcePriv.h"
+#include "GrSWMaskHelper.h"
+#include "GrTextureProvider.h"
+#include "batches/GrRectBatchFactory.h"
+
+////////////////////////////////////////////////////////////////////////////////
+bool GrSoftwarePathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ // Pass on any style that applies. The caller will apply the style if a suitable renderer is
+ // not found and try again with the new GrShape.
+ return !args.fShape->style().applies() && SkToBool(fTexProvider);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+static bool get_unclipped_shape_dev_bounds(const GrShape& shape, const SkMatrix& matrix,
+ SkIRect* devBounds) {
+ SkRect shapeBounds = shape.styledBounds();
+ if (shapeBounds.isEmpty()) {
+ return false;
+ }
+ SkRect shapeDevBounds;
+ matrix.mapRect(&shapeDevBounds, shapeBounds);
+ shapeDevBounds.roundOut(devBounds);
+ return true;
+}
+
+// Gets the shape bounds, the clip bounds, and the intersection (if any). Returns false if there
+// is no intersection.
+static bool get_shape_and_clip_bounds(int width, int height,
+ const GrClip& clip,
+ const GrShape& shape,
+ const SkMatrix& matrix,
+ SkIRect* unclippedDevShapeBounds,
+ SkIRect* clippedDevShapeBounds,
+ SkIRect* devClipBounds) {
+ // compute bounds as intersection of rt size, clip, and path
+ clip.getConservativeBounds(width, height, devClipBounds);
+
+ if (!get_unclipped_shape_dev_bounds(shape, matrix, unclippedDevShapeBounds)) {
+ *unclippedDevShapeBounds = SkIRect::EmptyIRect();
+ *clippedDevShapeBounds = SkIRect::EmptyIRect();
+ return false;
+ }
+ if (!clippedDevShapeBounds->intersect(*devClipBounds, *unclippedDevShapeBounds)) {
+ *clippedDevShapeBounds = SkIRect::EmptyIRect();
+ return false;
+ }
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrSoftwarePathRenderer::DrawNonAARect(GrDrawContext* drawContext,
+ const GrPaint& paint,
+ const GrUserStencilSettings& userStencilSettings,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkMatrix& localMatrix) {
+ SkAutoTUnref<GrDrawBatch> batch(GrRectBatchFactory::CreateNonAAFill(paint.getColor(),
+ viewMatrix, rect,
+ nullptr, &localMatrix));
+
+ GrPipelineBuilder pipelineBuilder(paint, drawContext->mustUseHWAA(paint));
+ pipelineBuilder.setUserStencil(&userStencilSettings);
+
+ drawContext->drawBatch(pipelineBuilder, clip, batch);
+}
+
+void GrSoftwarePathRenderer::DrawAroundInvPath(GrDrawContext* drawContext,
+ const GrPaint& paint,
+ const GrUserStencilSettings& userStencilSettings,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkIRect& devClipBounds,
+ const SkIRect& devPathBounds) {
+ SkMatrix invert;
+ if (!viewMatrix.invert(&invert)) {
+ return;
+ }
+
+ SkRect rect;
+ if (devClipBounds.fTop < devPathBounds.fTop) {
+ rect.iset(devClipBounds.fLeft, devClipBounds.fTop,
+ devClipBounds.fRight, devPathBounds.fTop);
+ DrawNonAARect(drawContext, paint, userStencilSettings, clip,
+ SkMatrix::I(), rect, invert);
+ }
+ if (devClipBounds.fLeft < devPathBounds.fLeft) {
+ rect.iset(devClipBounds.fLeft, devPathBounds.fTop,
+ devPathBounds.fLeft, devPathBounds.fBottom);
+ DrawNonAARect(drawContext, paint, userStencilSettings, clip,
+ SkMatrix::I(), rect, invert);
+ }
+ if (devClipBounds.fRight > devPathBounds.fRight) {
+ rect.iset(devPathBounds.fRight, devPathBounds.fTop,
+ devClipBounds.fRight, devPathBounds.fBottom);
+ DrawNonAARect(drawContext, paint, userStencilSettings, clip,
+ SkMatrix::I(), rect, invert);
+ }
+ if (devClipBounds.fBottom > devPathBounds.fBottom) {
+ rect.iset(devClipBounds.fLeft, devPathBounds.fBottom,
+ devClipBounds.fRight, devClipBounds.fBottom);
+ DrawNonAARect(drawContext, paint, userStencilSettings, clip,
+ SkMatrix::I(), rect, invert);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// return true on success; false on failure
+bool GrSoftwarePathRenderer::onDrawPath(const DrawPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fDrawContext->auditTrail(),
+ "GrSoftwarePathRenderer::onDrawPath");
+ if (!fTexProvider) {
+ return false;
+ }
+
+ // We really need to know if the shape will be inverse filled or not
+ bool inverseFilled = false;
+ SkTLazy<GrShape> tmpShape;
+ SkASSERT(!args.fShape->style().applies());
+ inverseFilled = args.fShape->inverseFilled();
+
+ SkIRect unclippedDevShapeBounds, clippedDevShapeBounds, devClipBounds;
+ // To prevent overloading the cache with entries during animations we limit the cache of masks
+ // to cases where the matrix preserves axis alignment.
+ bool useCache = fAllowCaching && !inverseFilled && args.fViewMatrix->preservesAxisAlignment() &&
+ args.fShape->hasUnstyledKey() && args.fAntiAlias;
+
+ if (!get_shape_and_clip_bounds(args.fDrawContext->width(), args.fDrawContext->height(),
+ *args.fClip, *args.fShape,
+ *args.fViewMatrix, &unclippedDevShapeBounds,
+ &clippedDevShapeBounds,
+ &devClipBounds)) {
+ if (inverseFilled) {
+ DrawAroundInvPath(args.fDrawContext, *args.fPaint, *args.fUserStencilSettings,
+ *args.fClip,
+ *args.fViewMatrix, devClipBounds, unclippedDevShapeBounds);
+
+ }
+ return true;
+ }
+
+ const SkIRect* boundsForMask = &clippedDevShapeBounds;
+ if (useCache) {
+ // Use the cache only if >50% of the path is visible.
+ int unclippedWidth = unclippedDevShapeBounds.width();
+ int unclippedHeight = unclippedDevShapeBounds.height();
+ int unclippedArea = unclippedWidth * unclippedHeight;
+ int clippedArea = clippedDevShapeBounds.width() * clippedDevShapeBounds.height();
+ int maxTextureSize = args.fDrawContext->caps()->maxTextureSize();
+ if (unclippedArea > 2 * clippedArea || unclippedWidth > maxTextureSize ||
+ unclippedHeight > maxTextureSize) {
+ useCache = false;
+ } else {
+ boundsForMask = &unclippedDevShapeBounds;
+ }
+ }
+
+ GrUniqueKey maskKey;
+ struct KeyData {
+ SkScalar fFractionalTranslateX;
+ SkScalar fFractionalTranslateY;
+ };
+
+ if (useCache) {
+ // We require the upper left 2x2 of the matrix to match exactly for a cache hit.
+ SkScalar sx = args.fViewMatrix->get(SkMatrix::kMScaleX);
+ SkScalar sy = args.fViewMatrix->get(SkMatrix::kMScaleY);
+ SkScalar kx = args.fViewMatrix->get(SkMatrix::kMSkewX);
+ SkScalar ky = args.fViewMatrix->get(SkMatrix::kMSkewY);
+ SkScalar tx = args.fViewMatrix->get(SkMatrix::kMTransX);
+ SkScalar ty = args.fViewMatrix->get(SkMatrix::kMTransY);
+ // Allow 8 bits each in x and y of subpixel positioning.
+ SkFixed fracX = SkScalarToFixed(SkScalarFraction(tx)) & 0x0000FF00;
+ SkFixed fracY = SkScalarToFixed(SkScalarFraction(ty)) & 0x0000FF00;
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey::Builder builder(&maskKey, kDomain, 5 + args.fShape->unstyledKeySize());
+ builder[0] = SkFloat2Bits(sx);
+ builder[1] = SkFloat2Bits(sy);
+ builder[2] = SkFloat2Bits(kx);
+ builder[3] = SkFloat2Bits(ky);
+ builder[4] = fracX | (fracY >> 8);
+ args.fShape->writeUnstyledKey(&builder[5]);
+ }
+
+ sk_sp<GrTexture> texture;
+ if (useCache) {
+ texture.reset(args.fResourceProvider->findAndRefTextureByUniqueKey(maskKey));
+ }
+ if (!texture) {
+ GrSWMaskHelper::TextureType type = useCache ? GrSWMaskHelper::TextureType::kExactFit
+ : GrSWMaskHelper::TextureType::kApproximateFit;
+ texture.reset(GrSWMaskHelper::DrawShapeMaskToTexture(fTexProvider, *args.fShape,
+ *boundsForMask, args.fAntiAlias,
+ type, args.fViewMatrix));
+ if (!texture) {
+ return false;
+ }
+ if (useCache) {
+ texture->resourcePriv().setUniqueKey(maskKey);
+ }
+ }
+
+ GrSWMaskHelper::DrawToTargetWithShapeMask(texture.get(), args.fDrawContext, *args.fPaint,
+ *args.fUserStencilSettings,
+ *args.fClip, *args.fViewMatrix,
+ SkIPoint {boundsForMask->fLeft, boundsForMask->fTop},
+ *boundsForMask);
+
+ if (inverseFilled) {
+ DrawAroundInvPath(args.fDrawContext, *args.fPaint, *args.fUserStencilSettings,
+ *args.fClip,
+ *args.fViewMatrix, devClipBounds, unclippedDevShapeBounds);
+ }
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/GrSoftwarePathRenderer.h b/gfx/skia/skia/src/gpu/GrSoftwarePathRenderer.h
new file mode 100644
index 000000000..72d967323
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSoftwarePathRenderer.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSoftwarePathRenderer_DEFINED
+#define GrSoftwarePathRenderer_DEFINED
+
+#include "GrPathRenderer.h"
+
+class GrTextureProvider;
+
+/**
+ * This class uses the software side to render a path to an SkBitmap and
+ * then uploads the result to the gpu
+ */
+class GrSoftwarePathRenderer : public GrPathRenderer {
+public:
+ GrSoftwarePathRenderer(GrTextureProvider* texProvider, bool allowCaching)
+ : fTexProvider(texProvider)
+ , fAllowCaching(allowCaching) {}
+private:
+ static void DrawNonAARect(GrDrawContext* drawContext,
+ const GrPaint& paint,
+ const GrUserStencilSettings& userStencilSettings,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkMatrix& localMatrix);
+ static void DrawAroundInvPath(GrDrawContext* drawContext,
+ const GrPaint& paint,
+ const GrUserStencilSettings& userStencilSettings,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkIRect& devClipBounds,
+ const SkIRect& devPathBounds);
+
+ StencilSupport onGetStencilSupport(const GrShape&) const override {
+ return GrPathRenderer::kNoSupport_StencilSupport;
+ }
+
+ bool onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+
+private:
+ GrTextureProvider* fTexProvider;
+ bool fAllowCaching;
+
+ typedef GrPathRenderer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrStencilAttachment.cpp b/gfx/skia/skia/src/gpu/GrStencilAttachment.cpp
new file mode 100644
index 000000000..bcdeec5ad
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrStencilAttachment.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrStencilAttachment.h"
+#include "GrResourceKey.h"
+
+void GrStencilAttachment::ComputeSharedStencilAttachmentKey(int width, int height, int sampleCnt,
+ GrUniqueKey* key) {
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey::Builder builder(key, kDomain, 3);
+ builder[0] = width;
+ builder[1] = height;
+ builder[2] = sampleCnt;
+}
diff --git a/gfx/skia/skia/src/gpu/GrStencilAttachment.h b/gfx/skia/skia/src/gpu/GrStencilAttachment.h
new file mode 100644
index 000000000..0ed3c8b27
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrStencilAttachment.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrStencilAttachment_DEFINED
+#define GrStencilAttachment_DEFINED
+
+#include "GrGpuResource.h"
+#include "SkClipStack.h"
+
+class GrRenderTarget;
+class GrResourceKey;
+
+class GrStencilAttachment : public GrGpuResource {
+public:
+
+
+ virtual ~GrStencilAttachment() {
+ // TODO: allow SB to be purged and detach itself from rts
+ }
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+ int bits() const { return fBits; }
+ int numSamples() const { return fSampleCnt; }
+
+ // called to note the last clip drawn to this buffer.
+ void setLastClip(int32_t clipStackGenID,
+ const SkIRect& clipSpaceRect,
+ const SkIPoint clipOrigin) {
+ fLastClipStackGenID = clipStackGenID;
+ fLastClipStackRect = clipSpaceRect;
+ fLastClipOrigin = clipOrigin;
+ }
+
+ // called to determine if we have to render the clip into SB.
+ bool mustRenderClip(int32_t clipStackGenID,
+ const SkIRect& clipSpaceRect,
+ const SkIPoint& clipOrigin) const {
+ return fLastClipStackGenID != clipStackGenID ||
+ fLastClipOrigin != clipOrigin ||
+ !fLastClipStackRect.contains(clipSpaceRect);
+ }
+
+ // We create a unique stencil buffer at each width, height and sampleCnt and share it for
+ // all render targets that require a stencil with those params.
+ static void ComputeSharedStencilAttachmentKey(int width, int height, int sampleCnt,
+ GrUniqueKey* key);
+
+protected:
+ GrStencilAttachment(GrGpu* gpu, int width, int height, int bits, int sampleCnt)
+ : GrGpuResource(gpu)
+ , fWidth(width)
+ , fHeight(height)
+ , fBits(bits)
+ , fSampleCnt(sampleCnt)
+ , fLastClipStackGenID(SkClipStack::kInvalidGenID) {
+ fLastClipStackRect.setEmpty();
+ }
+
+private:
+
+ int fWidth;
+ int fHeight;
+ int fBits;
+ int fSampleCnt;
+
+ int32_t fLastClipStackGenID;
+ SkIRect fLastClipStackRect;
+ SkIPoint fLastClipOrigin;
+
+ typedef GrGpuResource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrStencilSettings.cpp b/gfx/skia/skia/src/gpu/GrStencilSettings.cpp
new file mode 100644
index 000000000..d3216db0c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrStencilSettings.cpp
@@ -0,0 +1,489 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrStencilSettings.h"
+
+#include "GrProcessor.h"
+
+constexpr const GrUserStencilSettings gUnused(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kAlwaysIfInClip,
+ 0xffff,
+ GrUserStencilOp::kKeep,
+ GrUserStencilOp::kKeep,
+ 0x0000>()
+);
+
+GR_STATIC_ASSERT(kAll_StencilFlags == (gUnused.fFrontFlags[0] & gUnused.fBackFlags[0]));
+
+const GrUserStencilSettings& GrUserStencilSettings::kUnused = gUnused;
+
+void GrStencilSettings::reset(const GrUserStencilSettings& user, bool hasStencilClip,
+ int numStencilBits) {
+ uint16_t frontFlags = user.fFrontFlags[hasStencilClip];
+ if (frontFlags & kSingleSided_StencilFlag) {
+ fFlags = frontFlags;
+ if (!this->isDisabled()) {
+ fFront.reset(user.fFront, hasStencilClip, numStencilBits);
+ }
+ return;
+ }
+
+ uint16_t backFlags = user.fBackFlags[hasStencilClip];
+ fFlags = frontFlags & backFlags;
+ if (this->isDisabled()) {
+ return;
+ }
+ if (!(frontFlags & kDisabled_StencilFlag)) {
+ fFront.reset(user.fFront, hasStencilClip, numStencilBits);
+ } else {
+ fFront.setDisabled();
+ }
+ if (!(backFlags & kDisabled_StencilFlag)) {
+ fBack.reset(user.fBack, hasStencilClip, numStencilBits);
+ } else {
+ fBack.setDisabled();
+ }
+}
+
+void GrStencilSettings::reset(const GrStencilSettings& that) {
+ fFlags = that.fFlags;
+ if ((kInvalid_PrivateFlag | kDisabled_StencilFlag) & fFlags) {
+ return;
+ }
+ if (!this->isTwoSided()) {
+ memcpy(&fFront, &that.fFront, sizeof(Face));
+ } else {
+ memcpy(&fFront, &that.fFront, 2 * sizeof(Face));
+ GR_STATIC_ASSERT(sizeof(Face) ==
+ offsetof(GrStencilSettings, fBack) - offsetof(GrStencilSettings, fFront));
+ }
+}
+
+bool GrStencilSettings::operator==(const GrStencilSettings& that) const {
+ if ((kInvalid_PrivateFlag | kDisabled_StencilFlag) & (fFlags | that.fFlags)) {
+ // At least one is invalid and/or disabled.
+ if (kInvalid_PrivateFlag & (fFlags | that.fFlags)) {
+ return false; // We never allow invalid stencils to be equal.
+ }
+ // They're only equal if both are disabled.
+ return kDisabled_StencilFlag & (fFlags & that.fFlags);
+ }
+ if (kSingleSided_StencilFlag & (fFlags & that.fFlags)) {
+ return 0 == memcmp(&fFront, &that.fFront, sizeof(Face)); // Both are single sided.
+ } else {
+ return 0 == memcmp(&fFront, &that.fFront, 2 * sizeof(Face));
+ GR_STATIC_ASSERT(sizeof(Face) ==
+ offsetof(GrStencilSettings, fBack) - offsetof(GrStencilSettings, fFront));
+ }
+ // memcmp relies on GrStencilSettings::Face being tightly packed.
+ GR_STATIC_ASSERT(0 == offsetof(Face, fRef));
+ GR_STATIC_ASSERT(2 == sizeof(Face::fRef));
+ GR_STATIC_ASSERT(2 == offsetof(Face, fTest));
+ GR_STATIC_ASSERT(2 == sizeof(Face::fTest));
+ GR_STATIC_ASSERT(4 == offsetof(Face, fTestMask));
+ GR_STATIC_ASSERT(2 == sizeof(Face::fTestMask));
+ GR_STATIC_ASSERT(6 == offsetof(Face, fPassOp));
+ GR_STATIC_ASSERT(1 == sizeof(Face::fPassOp));
+ GR_STATIC_ASSERT(7 == offsetof(Face, fFailOp));
+ GR_STATIC_ASSERT(1 == sizeof(Face::fFailOp));
+ GR_STATIC_ASSERT(8 == offsetof(Face, fWriteMask));
+ GR_STATIC_ASSERT(2 == sizeof(Face::fWriteMask));
+ GR_STATIC_ASSERT(10 == sizeof(Face));
+}
+
+static constexpr GrStencilTest gUserStencilTestToRaw[kGrUserStencilTestCount] = {
+ // Tests that respect the clip.
+ GrStencilTest::kAlways, // kAlwaysIfInClip (This is only for when there is not a stencil clip).
+ GrStencilTest::kEqual, // kEqualIfInClip.
+ GrStencilTest::kLess, // kLessIfInClip.
+ GrStencilTest::kLEqual, // kLEqualIfInClip.
+
+ // Tests that ignore the clip.
+ GrStencilTest::kAlways,
+ GrStencilTest::kNever,
+ GrStencilTest::kGreater,
+ GrStencilTest::kGEqual,
+ GrStencilTest::kLess,
+ GrStencilTest::kLEqual,
+ GrStencilTest::kEqual,
+ GrStencilTest::kNotEqual
+};
+
+GR_STATIC_ASSERT(0 == (int)GrUserStencilTest::kAlwaysIfInClip);
+GR_STATIC_ASSERT(1 == (int)GrUserStencilTest::kEqualIfInClip);
+GR_STATIC_ASSERT(2 == (int)GrUserStencilTest::kLessIfInClip);
+GR_STATIC_ASSERT(3 == (int)GrUserStencilTest::kLEqualIfInClip);
+GR_STATIC_ASSERT(4 == (int)GrUserStencilTest::kAlways);
+GR_STATIC_ASSERT(5 == (int)GrUserStencilTest::kNever);
+GR_STATIC_ASSERT(6 == (int)GrUserStencilTest::kGreater);
+GR_STATIC_ASSERT(7 == (int)GrUserStencilTest::kGEqual);
+GR_STATIC_ASSERT(8 == (int)GrUserStencilTest::kLess);
+GR_STATIC_ASSERT(9 == (int)GrUserStencilTest::kLEqual);
+GR_STATIC_ASSERT(10 == (int)GrUserStencilTest::kEqual);
+GR_STATIC_ASSERT(11 == (int)GrUserStencilTest::kNotEqual);
+
+static constexpr GrStencilOp gUserStencilOpToRaw[kGrUserStencilOpCount] = {
+ GrStencilOp::kKeep,
+
+ // Ops that only modify user bits.
+ GrStencilOp::kZero,
+ GrStencilOp::kReplace,
+ GrStencilOp::kInvert,
+ GrStencilOp::kIncWrap,
+ GrStencilOp::kDecWrap,
+ GrStencilOp::kIncClamp, // kIncMaybeClamp.
+ GrStencilOp::kDecClamp, // kDecMaybeClamp.
+
+ // Ops that only modify the clip bit.
+ GrStencilOp::kZero, // kZeroClipBit.
+ GrStencilOp::kReplace, // kSetClipBit.
+ GrStencilOp::kInvert, // kInvertClipBit.
+
+ // Ops that modify clip and user bits.
+ GrStencilOp::kReplace, // kSetClipAndReplaceUserBits.
+ GrStencilOp::kZero // kZeroClipAndUserBits.
+};
+
+GR_STATIC_ASSERT(0 == (int)GrUserStencilOp::kKeep);
+GR_STATIC_ASSERT(1 == (int)GrUserStencilOp::kZero);
+GR_STATIC_ASSERT(2 == (int)GrUserStencilOp::kReplace);
+GR_STATIC_ASSERT(3 == (int)GrUserStencilOp::kInvert);
+GR_STATIC_ASSERT(4 == (int)GrUserStencilOp::kIncWrap);
+GR_STATIC_ASSERT(5 == (int)GrUserStencilOp::kDecWrap);
+GR_STATIC_ASSERT(6 == (int)GrUserStencilOp::kIncMaybeClamp);
+GR_STATIC_ASSERT(7 == (int)GrUserStencilOp::kDecMaybeClamp);
+GR_STATIC_ASSERT(8 == (int)GrUserStencilOp::kZeroClipBit);
+GR_STATIC_ASSERT(9 == (int)GrUserStencilOp::kSetClipBit);
+GR_STATIC_ASSERT(10 == (int)GrUserStencilOp::kInvertClipBit);
+GR_STATIC_ASSERT(11 == (int)GrUserStencilOp::kSetClipAndReplaceUserBits);
+GR_STATIC_ASSERT(12 == (int)GrUserStencilOp::kZeroClipAndUserBits);
+
+void GrStencilSettings::Face::reset(const GrUserStencilSettings::Face& user, bool hasStencilClip,
+ int numStencilBits) {
+ SkASSERT(user.fTest < (GrUserStencilTest)kGrUserStencilTestCount);
+ SkASSERT(user.fPassOp < (GrUserStencilOp)kGrUserStencilOpCount);
+ SkASSERT(user.fFailOp < (GrUserStencilOp)kGrUserStencilOpCount);
+ SkASSERT(numStencilBits > 0 && numStencilBits <= 16);
+ int clipBit = 1 << (numStencilBits - 1);
+ int userMask = clipBit - 1;
+
+ GrUserStencilOp maxOp = SkTMax(user.fPassOp, user.fFailOp);
+ SkDEBUGCODE(GrUserStencilOp otherOp = SkTMin(user.fPassOp, user.fFailOp);)
+ if (maxOp <= kLastUserOnlyStencilOp) {
+ // Ops that only modify user bits.
+ fWriteMask = user.fWriteMask & userMask;
+ SkASSERT(otherOp <= kLastUserOnlyStencilOp);
+ } else if (maxOp <= kLastClipOnlyStencilOp) {
+ // Ops that only modify the clip bit.
+ fWriteMask = clipBit;
+ SkASSERT(GrUserStencilOp::kKeep == otherOp ||
+ (otherOp > kLastUserOnlyStencilOp && otherOp <= kLastClipOnlyStencilOp));
+ } else {
+ // Ops that modify both clip and user bits.
+ fWriteMask = clipBit | (user.fWriteMask & userMask);
+ SkASSERT(GrUserStencilOp::kKeep == otherOp || otherOp > kLastClipOnlyStencilOp);
+ }
+
+ fFailOp = gUserStencilOpToRaw[(int)user.fFailOp];
+ fPassOp = gUserStencilOpToRaw[(int)user.fPassOp];
+
+ if (!hasStencilClip || user.fTest > kLastClippedStencilTest) {
+ // Ignore the clip.
+ fTestMask = user.fTestMask & userMask;
+ fTest = gUserStencilTestToRaw[(int)user.fTest];
+ } else if (GrUserStencilTest::kAlwaysIfInClip != user.fTest) {
+ // Respect the clip.
+ fTestMask = clipBit | (user.fTestMask & userMask);
+ fTest = gUserStencilTestToRaw[(int)user.fTest];
+ } else {
+ // Test only for clip.
+ fTestMask = clipBit;
+ fTest = GrStencilTest::kEqual;
+ }
+
+ fRef = (clipBit | user.fRef) & (fTestMask | fWriteMask);
+}
+
+void GrStencilSettings::Face::setDisabled() {
+ memset(this, 0, sizeof(*this));
+ GR_STATIC_ASSERT(0 == (int)GrStencilTest::kAlways);
+ GR_STATIC_ASSERT(0 == (int)GrStencilOp::kKeep);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Stencil Rules for Merging user stencil space into clip
+//
+
+///////
+// Replace
+static constexpr GrUserStencilSettings gUserToClipReplace(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kNotEqual,
+ 0xffff,
+ GrUserStencilOp::kSetClipAndReplaceUserBits,
+ GrUserStencilOp::kZeroClipAndUserBits,
+ 0xffff>()
+);
+
+static constexpr GrUserStencilSettings gInvUserToClipReplace(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kEqual,
+ 0xffff,
+ GrUserStencilOp::kSetClipAndReplaceUserBits,
+ GrUserStencilOp::kZeroClipAndUserBits,
+ 0xffff>()
+);
+
+///////
+// Intersect
+static constexpr GrUserStencilSettings gUserToClipIsect(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kLessIfInClip, // "0 < userBits" is equivalent to "0 != userBits".
+ 0xffff,
+ GrUserStencilOp::kSetClipAndReplaceUserBits,
+ GrUserStencilOp::kZeroClipAndUserBits,
+ 0xffff>()
+);
+
+///////
+// Difference
+static constexpr GrUserStencilSettings gUserToClipDiff(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kEqualIfInClip,
+ 0xffff,
+ GrUserStencilOp::kSetClipAndReplaceUserBits,
+ GrUserStencilOp::kZeroClipAndUserBits,
+ 0xffff>()
+);
+
+///////
+// Union
+static constexpr GrUserStencilSettings gUserToClipUnion(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kNotEqual,
+ 0xffff,
+ GrUserStencilOp::kSetClipAndReplaceUserBits,
+ GrUserStencilOp::kKeep,
+ 0xffff>()
+);
+
+static constexpr GrUserStencilSettings gInvUserToClipUnionPass0( // Does not zero user bits.
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kEqual,
+ 0xffff,
+ GrUserStencilOp::kSetClipBit,
+ GrUserStencilOp::kKeep,
+ 0x0000>()
+);
+
+///////
+// Xor
+static constexpr GrUserStencilSettings gUserToClipXorPass0( // Does not zero user bits.
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kNotEqual,
+ 0xffff,
+ GrUserStencilOp::kInvertClipBit,
+ GrUserStencilOp::kKeep,
+ 0x0000>()
+);
+
+static constexpr GrUserStencilSettings gInvUserToClipXorPass0( // Does not zero user bits.
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kEqual,
+ 0xffff,
+ GrUserStencilOp::kInvertClipBit,
+ GrUserStencilOp::kKeep,
+ 0x0000>()
+);
+
+///////
+// Reverse Diff
+static constexpr GrUserStencilSettings gUserToClipRDiffPass0( // Does not zero user bits.
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kNotEqual,
+ 0xffff,
+ GrUserStencilOp::kInvertClipBit,
+ GrUserStencilOp::kZeroClipBit,
+ 0x0000>()
+);
+
+static constexpr GrUserStencilSettings gInvUserToClipRDiffPass0( // Does not zero user bits.
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kEqual,
+ 0xffff,
+ GrUserStencilOp::kInvertClipBit,
+ GrUserStencilOp::kZeroClipBit,
+ 0x0000>()
+);
+
+///////
+// Second pass to clear user bits (only needed sometimes)
+static constexpr GrUserStencilSettings gZeroUserBits(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kNotEqual,
+ 0xffff,
+ GrUserStencilOp::kZero,
+ GrUserStencilOp::kKeep,
+ 0xffff>()
+);
+
+static constexpr const GrUserStencilSettings* gUserToClipTable[2][1 + SkRegion::kLastOp][3] = {
+ { /* Normal fill. */
+ {&gUserToClipDiff, nullptr, nullptr}, // kDifference_Op.
+ {&gUserToClipIsect, nullptr, nullptr}, // kIntersect_Op.
+ {&gUserToClipUnion, nullptr, nullptr}, // kUnion_Op.
+ {&gUserToClipXorPass0, &gZeroUserBits, nullptr}, // kXOR_Op.
+ {&gUserToClipRDiffPass0, &gZeroUserBits, nullptr}, // kReverseDifference_Op.
+ {&gUserToClipReplace, nullptr, nullptr} // kReplace_Op.
+
+ }, /* Inverse fill. */ {
+ {&gUserToClipIsect, nullptr, nullptr}, // ~diff (aka isect).
+ {&gUserToClipDiff, nullptr, nullptr}, // ~isect (aka diff).
+ {&gInvUserToClipUnionPass0, &gZeroUserBits, nullptr}, // ~union.
+ {&gInvUserToClipXorPass0, &gZeroUserBits, nullptr}, // ~xor.
+ {&gInvUserToClipRDiffPass0, &gZeroUserBits, nullptr}, // ~reverse diff.
+ {&gInvUserToClipReplace, nullptr, nullptr} // ~replace.
+ }
+};
+
+GR_STATIC_ASSERT(0 == SkRegion::kDifference_Op);
+GR_STATIC_ASSERT(1 == SkRegion::kIntersect_Op);
+GR_STATIC_ASSERT(2 == SkRegion::kUnion_Op);
+GR_STATIC_ASSERT(3 == SkRegion::kXOR_Op);
+GR_STATIC_ASSERT(4 == SkRegion::kReverseDifference_Op);
+GR_STATIC_ASSERT(5 == SkRegion::kReplace_Op);
+
+///////
+// Direct to Stencil
+
+// We can render a clip element directly without first writing to the client
+// portion of the clip when the fill is not inverse and the set operation will
+// only modify the in/out status of samples covered by the clip element.
+
+// this one only works if used right after stencil clip was cleared.
+// Our clip mask creation code doesn't allow midstream replace ops.
+static constexpr GrUserStencilSettings gReplaceClip(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kAlways,
+ 0xffff,
+ GrUserStencilOp::kSetClipBit,
+ GrUserStencilOp::kSetClipBit,
+ 0x0000>()
+);
+
+static constexpr GrUserStencilSettings gUnionClip(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kAlwaysIfInClip,
+ 0xffff,
+ GrUserStencilOp::kKeep,
+ GrUserStencilOp::kSetClipBit,
+ 0x0000>()
+);
+
+static constexpr GrUserStencilSettings gXorClip(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kAlways,
+ 0xffff,
+ GrUserStencilOp::kInvertClipBit,
+ GrUserStencilOp::kInvertClipBit,
+ 0x0000>()
+);
+
+static constexpr GrUserStencilSettings gDiffClip(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kAlwaysIfInClip,
+ 0xffff,
+ GrUserStencilOp::kZeroClipBit,
+ GrUserStencilOp::kKeep,
+ 0x0000>()
+);
+
+static constexpr const GrUserStencilSettings* gDirectDrawTable[1 + SkRegion::kLastOp][2] = {
+ {&gDiffClip, nullptr}, // kDifference_Op.
+ {nullptr, nullptr}, // kIntersect_Op.
+ {&gUnionClip, nullptr}, // kUnion_Op.
+ {&gXorClip, nullptr}, // kXOR_Op.
+ {nullptr, nullptr}, // kReverseDifference_Op.
+ {&gReplaceClip, nullptr} // kReplace_Op.
+};
+
+GR_STATIC_ASSERT(0 == SkRegion::kDifference_Op);
+GR_STATIC_ASSERT(1 == SkRegion::kIntersect_Op);
+GR_STATIC_ASSERT(2 == SkRegion::kUnion_Op);
+GR_STATIC_ASSERT(3 == SkRegion::kXOR_Op);
+GR_STATIC_ASSERT(4 == SkRegion::kReverseDifference_Op);
+GR_STATIC_ASSERT(5 == SkRegion::kReplace_Op);
+
+GrUserStencilSettings const* const* GrStencilSettings::GetClipPasses(SkRegion::Op op,
+ bool canBeDirect,
+ bool invertedFill,
+ bool* drawDirectToClip) {
+ SkASSERT((unsigned)op <= SkRegion::kLastOp);
+ if (canBeDirect && !invertedFill) { // TODO: inverse fill + intersect op can be direct.
+ GrUserStencilSettings const* const* directPass = gDirectDrawTable[op];
+ if (directPass[0]) {
+ *drawDirectToClip = true;
+ return directPass;
+ }
+ }
+ *drawDirectToClip = false;
+ return gUserToClipTable[invertedFill][op];
+}
+
+void GrStencilSettings::genKey(GrProcessorKeyBuilder* b) const {
+ b->add32(fFlags);
+ if (this->isDisabled()) {
+ return;
+ }
+ if (!this->isTwoSided()) {
+ constexpr int kCount16 = sizeof(Face) / sizeof(uint16_t);
+ GR_STATIC_ASSERT(0 == sizeof(Face) % sizeof(uint16_t));
+ uint16_t* key = reinterpret_cast<uint16_t*>(b->add32n((kCount16 + 1) / 2));
+ memcpy(key, &fFront, sizeof(Face));
+ key[kCount16] = 0;
+ GR_STATIC_ASSERT(1 == kCount16 % 2);
+ } else {
+ constexpr int kCount32 = (2 * sizeof(Face)) / sizeof(uint32_t);
+ GR_STATIC_ASSERT(0 == (2 * sizeof(Face)) % sizeof(uint32_t));
+ uint32_t* key = b->add32n(kCount32);
+ memcpy(key, &fFront, 2 * sizeof(Face));
+ GR_STATIC_ASSERT(sizeof(Face) ==
+ offsetof(GrStencilSettings, fBack) - offsetof(GrStencilSettings, fFront));
+ }
+ // We rely on GrStencilSettings::Face being tightly packed for the key to be reliable.
+ GR_STATIC_ASSERT(0 == offsetof(Face, fRef));
+ GR_STATIC_ASSERT(2 == sizeof(Face::fRef));
+ GR_STATIC_ASSERT(2 == offsetof(Face, fTest));
+ GR_STATIC_ASSERT(2 == sizeof(Face::fTest));
+ GR_STATIC_ASSERT(4 == offsetof(Face, fTestMask));
+ GR_STATIC_ASSERT(2 == sizeof(Face::fTestMask));
+ GR_STATIC_ASSERT(6 == offsetof(Face, fPassOp));
+ GR_STATIC_ASSERT(1 == sizeof(Face::fPassOp));
+ GR_STATIC_ASSERT(7 == offsetof(Face, fFailOp));
+ GR_STATIC_ASSERT(1 == sizeof(Face::fFailOp));
+ GR_STATIC_ASSERT(8 == offsetof(Face, fWriteMask));
+ GR_STATIC_ASSERT(2 == sizeof(Face::fWriteMask));
+ GR_STATIC_ASSERT(10 == sizeof(Face));
+}
diff --git a/gfx/skia/skia/src/gpu/GrStencilSettings.h b/gfx/skia/skia/src/gpu/GrStencilSettings.h
new file mode 100644
index 000000000..15a8cac12
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrStencilSettings.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrStencilSettings_DEFINED
+#define GrStencilSettings_DEFINED
+
+#include "GrUserStencilSettings.h"
+#include "SkRegion.h"
+
+class GrProcessorKeyBuilder;
+
+enum class GrStencilTest : uint16_t {
+ kAlways,
+ kNever,
+ kGreater,
+ kGEqual,
+ kLess,
+ kLEqual,
+ kEqual,
+ kNotEqual
+};
+static constexpr int kGrStencilTestCount = 1 + (int)GrStencilTest::kNotEqual;
+
+enum class GrStencilOp : uint8_t {
+ kKeep,
+ kZero,
+ kReplace, // Replace stencil value with fRef (only the bits enabled in fWriteMask).
+ kInvert,
+ kIncWrap,
+ kDecWrap,
+ // NOTE: clamping occurs before the write mask. So if the MSB is zero and masked out, stencil
+ // values will still wrap when using clamping ops.
+ kIncClamp,
+ kDecClamp
+};
+static constexpr int kGrStencilOpCount = 1 + (int)GrStencilOp::kDecClamp;
+
+/**
+ * This class defines concrete stencil settings that map directly to the underlying hardware. It
+ * is deduced from user stencil settings, stencil clip status, and the number of bits in the
+ * target stencil buffer.
+ */
+class GrStencilSettings {
+public:
+ GrStencilSettings() { this->setDisabled(); }
+ GrStencilSettings(const GrUserStencilSettings& user, bool hasStencilClip, int numStencilBits) {
+ this->reset(user, hasStencilClip, numStencilBits);
+ }
+ GrStencilSettings(const GrStencilSettings& that) { this->reset(that); }
+ GrStencilSettings& operator=(const GrStencilSettings& that) { this->reset(that); return *this; }
+
+ void invalidate() { fFlags |= kInvalid_PrivateFlag; }
+ void setDisabled() { fFlags = kAll_StencilFlags; }
+ void reset(const GrUserStencilSettings&, bool hasStencilClip, int numStencilBits);
+ void reset(const GrStencilSettings&);
+
+ bool isValid() const { return !(fFlags & kInvalid_PrivateFlag); }
+ bool isDisabled() const { SkASSERT(this->isValid()); return fFlags & kDisabled_StencilFlag; }
+ bool doesWrite() const { SkASSERT(this->isValid());
+ return !(fFlags & kNoModifyStencil_StencilFlag); }
+ bool isTwoSided() const { SkASSERT(this->isValid());
+ return !(fFlags & kSingleSided_StencilFlag); }
+ bool usesWrapOp() const { SkASSERT(this->isValid());
+ return !(fFlags & kNoWrapOps_StencilFlag); }
+
+ void genKey(GrProcessorKeyBuilder* b) const;
+
+ bool operator!=(const GrStencilSettings& that) const { return !(*this == that); }
+ bool operator==(const GrStencilSettings&) const;
+
+ struct Face : public GrTStencilFaceSettings<GrStencilTest, GrStencilOp> {
+ void reset(const GrUserStencilSettings::Face&, bool useStencilClip, int numStencilBits);
+ void setDisabled();
+ };
+
+ const Face& front() const { SkASSERT(!this->isDisabled()); return fFront; }
+ const Face& back() const { SkASSERT(this->isTwoSided()); return fBack; }
+
+ /**
+ * Given a thing to draw into the stencil clip, a fill type, and a set op
+ * this function determines:
+ * 1. Whether the thing can be draw directly to the stencil clip or
+ * needs to be drawn to the client portion of the stencil first.
+ * 2. How many passes are needed.
+ * 3. What those passes are.
+ *
+ * @param op the set op to combine this element with the existing clip
+ * @param canBeDirect can the caller draw this element directly (without using stencil)?
+ * @param invertedFill is this path inverted
+ * @param drawDirectToClip out: true if caller should draw the element directly, false if it
+ * should draw it into the user stencil bits first.
+ *
+ * @return a null-terminated array of settings for stencil passes.
+ *
+ * If drawDirectToClip is false, the caller must first draw the element into the user
+ * stencil bits, and then cover the clip area with multiple passes using the returned
+ * stencil settings.
+ *
+ * If drawDirectToClip is true, the returned array will only have one pass and the
+ * caller should use those stencil settings while drawing the element directly.
+ */
+ static GrUserStencilSettings const* const* GetClipPasses(SkRegion::Op op,
+ bool canBeDirect,
+ bool invertedFill,
+ bool* drawDirectToClip);
+
+private:
+ // Internal flag for backends to optionally mark their tracked stencil state as invalid.
+ enum { kInvalid_PrivateFlag = (kLast_StencilFlag << 1) };
+
+ uint32_t fFlags;
+ Face fFront;
+ Face fBack;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrStyle.cpp b/gfx/skia/skia/src/gpu/GrStyle.cpp
new file mode 100644
index 000000000..153cade91
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrStyle.cpp
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrStyle.h"
+#include "SkDashPathPriv.h"
+
+int GrStyle::KeySize(const GrStyle &style, Apply apply, uint32_t flags) {
+ GR_STATIC_ASSERT(sizeof(uint32_t) == sizeof(SkScalar));
+ int size = 0;
+ if (style.isDashed()) {
+ // One scalar for scale, one for dash phase, and one for each dash value.
+ size += 2 + style.dashIntervalCnt();
+ } else if (style.pathEffect()) {
+ // No key for a generic path effect.
+ return -1;
+ }
+
+ if (Apply::kPathEffectOnly == apply) {
+ return size;
+ }
+
+ if (style.strokeRec().needToApply()) {
+ // One for res scale, one for style/cap/join, one for miter limit, and one for width.
+ size += 4;
+ }
+ return size;
+}
+
+void GrStyle::WriteKey(uint32_t *key, const GrStyle &style, Apply apply, SkScalar scale,
+ uint32_t flags) {
+ SkASSERT(key);
+ SkASSERT(KeySize(style, apply) >= 0);
+ GR_STATIC_ASSERT(sizeof(uint32_t) == sizeof(SkScalar));
+
+ int i = 0;
+ // The scale can influence both the path effect and stroking. We want to preserve the
+ // property that the following two are equal:
+ // 1. WriteKey with apply == kPathEffectAndStrokeRec
+ // 2. WriteKey with apply == kPathEffectOnly followed by WriteKey of a GrStyle made
+ // from SkStrokeRec output by the the path effect (and no additional path effect).
+ // Since the scale can affect both parts of 2 we write it into the key twice.
+ if (style.isDashed()) {
+ GR_STATIC_ASSERT(sizeof(style.dashPhase()) == sizeof(uint32_t));
+ SkScalar phase = style.dashPhase();
+ memcpy(&key[i++], &scale, sizeof(SkScalar));
+ memcpy(&key[i++], &phase, sizeof(SkScalar));
+
+ int32_t count = style.dashIntervalCnt();
+ // Dash count should always be even.
+ SkASSERT(0 == (count & 0x1));
+ const SkScalar *intervals = style.dashIntervals();
+ int intervalByteCnt = count * sizeof(SkScalar);
+ memcpy(&key[i], intervals, intervalByteCnt);
+ i += count;
+ } else {
+ SkASSERT(!style.pathEffect());
+ }
+
+ if (Apply::kPathEffectAndStrokeRec == apply && style.strokeRec().needToApply()) {
+ memcpy(&key[i++], &scale, sizeof(SkScalar));
+ enum {
+ kStyleBits = 2,
+ kJoinBits = 2,
+ kCapBits = 32 - kStyleBits - kJoinBits,
+
+ kJoinShift = kStyleBits,
+ kCapShift = kJoinShift + kJoinBits,
+ };
+ GR_STATIC_ASSERT(SkStrokeRec::kStyleCount <= (1 << kStyleBits));
+ GR_STATIC_ASSERT(SkPaint::kJoinCount <= (1 << kJoinBits));
+ GR_STATIC_ASSERT(SkPaint::kCapCount <= (1 << kCapBits));
+ // The cap type only matters for unclosed shapes. However, a path effect could unclose
+ // the shape before it is stroked.
+ SkPaint::Cap cap = SkPaint::kDefault_Cap;
+ if (!(flags & kClosed_KeyFlag) || style.pathEffect()) {
+ cap = style.strokeRec().getCap();
+ }
+ SkScalar miter = -1.f;
+ SkPaint::Join join = SkPaint::kDefault_Join;
+
+ // Dashing will not insert joins but other path effects may.
+ if (!(flags & kNoJoins_KeyFlag) || style.hasNonDashPathEffect()) {
+ join = style.strokeRec().getJoin();
+ // Miter limit only affects miter joins
+ if (SkPaint::kMiter_Join == join) {
+ miter = style.strokeRec().getMiter();
+ }
+ }
+
+ key[i++] = style.strokeRec().getStyle() |
+ join << kJoinShift |
+ cap << kCapShift;
+
+ memcpy(&key[i++], &miter, sizeof(miter));
+
+ SkScalar width = style.strokeRec().getWidth();
+ memcpy(&key[i++], &width, sizeof(width));
+ }
+ SkASSERT(KeySize(style, apply) == i);
+}
+
+void GrStyle::initPathEffect(SkPathEffect* pe) {
+ SkASSERT(!fPathEffect);
+ SkASSERT(SkPathEffect::kNone_DashType == fDashInfo.fType);
+ SkASSERT(0 == fDashInfo.fIntervals.count());
+ if (!pe) {
+ return;
+ }
+ SkPathEffect::DashInfo info;
+ if (SkPathEffect::kDash_DashType == pe->asADash(&info)) {
+ SkStrokeRec::Style recStyle = fStrokeRec.getStyle();
+ if (recStyle != SkStrokeRec::kFill_Style && recStyle != SkStrokeRec::kStrokeAndFill_Style) {
+ fDashInfo.fType = SkPathEffect::kDash_DashType;
+ fDashInfo.fIntervals.reset(info.fCount);
+ fDashInfo.fPhase = info.fPhase;
+ info.fIntervals = fDashInfo.fIntervals.get();
+ pe->asADash(&info);
+ fPathEffect.reset(SkSafeRef(pe));
+ }
+ } else {
+ fPathEffect.reset(SkSafeRef(pe));
+ }
+}
+
+bool GrStyle::applyPathEffect(SkPath* dst, SkStrokeRec* strokeRec, const SkPath& src) const {
+ if (!fPathEffect) {
+ return false;
+ }
+ if (SkPathEffect::kDash_DashType == fDashInfo.fType) {
+ // We apply the dash ourselves here rather than using the path effect. This is so that
+ // we can control whether the dasher applies the strokeRec for special cases. Our keying
+ // depends on the strokeRec being applied separately.
+ SkScalar phase = fDashInfo.fPhase;
+ const SkScalar* intervals = fDashInfo.fIntervals.get();
+ int intervalCnt = fDashInfo.fIntervals.count();
+ SkScalar initialLength;
+ int initialIndex;
+ SkScalar intervalLength;
+ SkDashPath::CalcDashParameters(phase, intervals, intervalCnt, &initialLength,
+ &initialIndex, &intervalLength);
+ if (!SkDashPath::InternalFilter(dst, src, strokeRec,
+ nullptr, intervals, intervalCnt,
+ initialLength, initialIndex, intervalLength,
+ SkDashPath::StrokeRecApplication::kDisallow)) {
+ return false;
+ }
+ } else if (!fPathEffect->filterPath(dst, src, strokeRec, nullptr)) {
+ return false;
+ }
+ dst->setIsVolatile(true);
+ return true;
+}
+
+bool GrStyle::applyPathEffectToPath(SkPath *dst, SkStrokeRec *remainingStroke,
+ const SkPath &src, SkScalar resScale) const {
+ SkASSERT(dst);
+ SkStrokeRec strokeRec = fStrokeRec;
+ strokeRec.setResScale(resScale);
+ if (!this->applyPathEffect(dst, &strokeRec, src)) {
+ return false;
+ }
+ *remainingStroke = strokeRec;
+ return true;
+}
+
+bool GrStyle::applyToPath(SkPath* dst, SkStrokeRec::InitStyle* style, const SkPath& src,
+ SkScalar resScale) const {
+ SkASSERT(style);
+ SkASSERT(dst);
+ SkStrokeRec strokeRec = fStrokeRec;
+ strokeRec.setResScale(resScale);
+ const SkPath* pathForStrokeRec = &src;
+ if (this->applyPathEffect(dst, &strokeRec, src)) {
+ pathForStrokeRec = dst;
+ } else if (fPathEffect) {
+ return false;
+ }
+ if (strokeRec.needToApply()) {
+ if (!strokeRec.applyToPath(dst, *pathForStrokeRec)) {
+ return false;
+ }
+ dst->setIsVolatile(true);
+ *style = SkStrokeRec::kFill_InitStyle;
+ } else if (!fPathEffect) {
+ // Nothing to do for path effect or stroke, fail.
+ return false;
+ } else {
+ SkASSERT(SkStrokeRec::kFill_Style == strokeRec.getStyle() ||
+ SkStrokeRec::kHairline_Style == strokeRec.getStyle());
+ *style = strokeRec.getStyle() == SkStrokeRec::kFill_Style
+ ? SkStrokeRec::kFill_InitStyle
+ : SkStrokeRec::kHairline_InitStyle;
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/GrStyle.h b/gfx/skia/skia/src/gpu/GrStyle.h
new file mode 100644
index 000000000..9091166fe
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrStyle.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrStyle_DEFINED
+#define GrStyle_DEFINED
+
+#include "GrTypes.h"
+#include "SkPathEffect.h"
+#include "SkStrokeRec.h"
+#include "SkTemplates.h"
+
+/**
+ * Represents the various ways that a GrShape can be styled. It has fill/stroking information
+ * as well as an optional path effect. If the path effect represents dashing, the dashing
+ * information is extracted from the path effect and stored explicitly.
+ *
+ * This will replace GrStrokeInfo as GrShape is deployed.
+ */
+class GrStyle {
+public:
+ /**
+ * A style object that represents a fill with no path effect.
+ * TODO: constexpr with C++14
+ */
+ static const GrStyle& SimpleFill() {
+ static const GrStyle kFill(SkStrokeRec::kFill_InitStyle);
+ return kFill;
+ }
+
+ /**
+ * A style object that represents a hairline stroke with no path effect.
+ * TODO: constexpr with C++14
+ */
+ static const GrStyle& SimpleHairline() {
+ static const GrStyle kHairline(SkStrokeRec::kHairline_InitStyle);
+ return kHairline;
+ }
+
+ enum class Apply {
+ kPathEffectOnly,
+ kPathEffectAndStrokeRec
+ };
+
+ /**
+ * Optional flags for computing keys that may remove unnecessary variation in the key due to
+ * style settings that don't affect particular classes of geometry.
+ */
+ enum KeyFlags {
+ // The shape being styled has no open contours.
+ kClosed_KeyFlag = 0x1,
+ // The shape being styled doesn't have any joins and so isn't affected by join type.
+ kNoJoins_KeyFlag = 0x2
+ };
+
+ /**
+ * Computes the key length for a GrStyle. The return will be negative if it cannot be turned
+ * into a key. This occurs when there is a path effect that is not a dash. The key can
+ * either reflect just the path effect (if one) or the path effect and the strokerec. Note
+ * that a simple fill has a zero sized key.
+ */
+ static int KeySize(const GrStyle&, Apply, uint32_t flags = 0);
+
+ /**
+ * Writes a unique key for the style into the provided buffer. This function assumes the buffer
+ * has room for at least KeySize() values. It assumes that KeySize() returns a non-negative
+ * value for the combination of GrStyle, Apply and flags params. This is written so that the key
+ * for just dash application followed by the key for the remaining SkStrokeRec is the same as
+ * the key for applying dashing and SkStrokeRec all at once.
+ */
+ static void WriteKey(uint32_t*, const GrStyle&, Apply, SkScalar scale, uint32_t flags = 0);
+
+ GrStyle() : GrStyle(SkStrokeRec::kFill_InitStyle) {}
+
+ explicit GrStyle(SkStrokeRec::InitStyle initStyle) : fStrokeRec(initStyle) {}
+
+ GrStyle(const SkStrokeRec& strokeRec, SkPathEffect* pe) : fStrokeRec(strokeRec) {
+ this->initPathEffect(pe);
+ }
+
+ GrStyle(const GrStyle& that) : fStrokeRec(SkStrokeRec::kFill_InitStyle) { *this = that; }
+
+ explicit GrStyle(const SkPaint& paint) : fStrokeRec(paint) {
+ this->initPathEffect(paint.getPathEffect());
+ }
+
+ explicit GrStyle(const SkPaint& paint, SkPaint::Style overrideStyle)
+ : fStrokeRec(paint, overrideStyle) {
+ this->initPathEffect(paint.getPathEffect());
+ }
+
+ GrStyle& operator=(const GrStyle& that) {
+ fPathEffect = that.fPathEffect;
+ fDashInfo = that.fDashInfo;
+ fStrokeRec = that.fStrokeRec;
+ return *this;
+ }
+
+ void resetToInitStyle(SkStrokeRec::InitStyle fillOrHairline) {
+ fDashInfo.reset();
+ fPathEffect.reset(nullptr);
+ if (SkStrokeRec::kFill_InitStyle == fillOrHairline) {
+ fStrokeRec.setFillStyle();
+ } else {
+ fStrokeRec.setHairlineStyle();
+ }
+ }
+
+ /** Is this style a fill with no path effect? */
+ bool isSimpleFill() const { return fStrokeRec.isFillStyle() && !fPathEffect; }
+
+ /** Is this style a hairline with no path effect? */
+ bool isSimpleHairline() const { return fStrokeRec.isHairlineStyle() && !fPathEffect; }
+
+ SkPathEffect* pathEffect() const { return fPathEffect.get(); }
+
+ bool hasPathEffect() const { return SkToBool(fPathEffect.get()); }
+
+ bool hasNonDashPathEffect() const { return fPathEffect.get() && !this->isDashed(); }
+
+ bool isDashed() const { return SkPathEffect::kDash_DashType == fDashInfo.fType; }
+ SkScalar dashPhase() const {
+ SkASSERT(this->isDashed());
+ return fDashInfo.fPhase;
+ }
+ int dashIntervalCnt() const {
+ SkASSERT(this->isDashed());
+ return fDashInfo.fIntervals.count();
+ }
+ const SkScalar* dashIntervals() const {
+ SkASSERT(this->isDashed());
+ return fDashInfo.fIntervals.get();
+ }
+
+ const SkStrokeRec& strokeRec() const { return fStrokeRec; }
+
+ /** Hairline or fill styles without path effects make no alterations to a geometry. */
+ bool applies() const {
+ return this->pathEffect() || (!fStrokeRec.isFillStyle() && !fStrokeRec.isHairlineStyle());
+ }
+
+ static SkScalar MatrixToScaleFactor(const SkMatrix& matrix) {
+ // getMaxScale will return -1 if the matrix has perspective. In that case we can use a scale
+ // factor of 1. This isn't necessarily a good choice and in the future we might consider
+ // taking a bounds here for the perspective case.
+ return SkScalarAbs(matrix.getMaxScale());
+ }
+ /**
+ * Applies just the path effect and returns remaining stroke information. This will fail if
+ * there is no path effect. dst may or may not have been overwritten on failure. Scale controls
+ * geometric approximations made by the path effect. It is typically computed from the view
+ * matrix.
+ */
+ bool SK_WARN_UNUSED_RESULT applyPathEffectToPath(SkPath* dst, SkStrokeRec* remainingStoke,
+ const SkPath& src, SkScalar scale) const;
+
+ /**
+ * If this succeeds then the result path should be filled or hairlined as indicated by the
+ * returned SkStrokeRec::InitStyle value. Will fail if there is no path effect and the
+ * strokerec doesn't change the geometry. When this fails the outputs may or may not have
+ * been overwritten. Scale controls geometric approximations made by the path effect and
+ * stroker. It is typically computed from the view matrix.
+ */
+ bool SK_WARN_UNUSED_RESULT applyToPath(SkPath* dst, SkStrokeRec::InitStyle* fillOrHairline,
+ const SkPath& src, SkScalar scale) const;
+
+ /** Given bounds of a path compute the bounds of path with the style applied. */
+ void adjustBounds(SkRect* dst, const SkRect& src) const {
+ if (this->pathEffect()) {
+ this->pathEffect()->computeFastBounds(dst, src);
+ // This may not be the correct SkStrokeRec to use. skbug.com/5299
+ // It happens to work for dashing.
+ SkScalar radius = fStrokeRec.getInflationRadius();
+ dst->outset(radius, radius);
+ } else {
+ SkScalar radius = fStrokeRec.getInflationRadius();
+ *dst = src.makeOutset(radius, radius);
+ }
+ }
+
+private:
+ void initPathEffect(SkPathEffect* pe);
+
+ struct DashInfo {
+ DashInfo() : fType(SkPathEffect::kNone_DashType) {}
+ DashInfo& operator=(const DashInfo& that) {
+ fType = that.fType;
+ fPhase = that.fPhase;
+ fIntervals.reset(that.fIntervals.count());
+ sk_careful_memcpy(fIntervals.get(), that.fIntervals.get(),
+ sizeof(SkScalar) * that.fIntervals.count());
+ return *this;
+ }
+ void reset() {
+ fType = SkPathEffect::kNone_DashType;
+ fIntervals.reset(0);
+ }
+ SkPathEffect::DashType fType;
+ SkScalar fPhase;
+ SkAutoSTArray<4, SkScalar> fIntervals;
+ };
+
+ bool applyPathEffect(SkPath* dst, SkStrokeRec* strokeRec, const SkPath& src) const;
+
+ SkStrokeRec fStrokeRec;
+ sk_sp<SkPathEffect> fPathEffect;
+ DashInfo fDashInfo;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrSurface.cpp b/gfx/skia/skia/src/gpu/GrSurface.cpp
new file mode 100644
index 000000000..3c9368e67
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSurface.cpp
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrSurface.h"
+#include "GrContext.h"
+#include "GrSurfacePriv.h"
+
+#include "SkBitmap.h"
+#include "SkGrPriv.h"
+#include "SkImageEncoder.h"
+#include <stdio.h>
+
+size_t GrSurface::WorstCaseSize(const GrSurfaceDesc& desc) {
+ size_t size;
+
+ bool isRenderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
+ if (isRenderTarget) {
+ // We own one color value for each MSAA sample.
+ int colorValuesPerPixel = SkTMax(1, desc.fSampleCnt);
+ if (desc.fSampleCnt) {
+ // Worse case, we own the resolve buffer so that is one more sample per pixel.
+ colorValuesPerPixel += 1;
+ }
+ SkASSERT(kUnknown_GrPixelConfig != desc.fConfig);
+ SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
+ size_t colorBytes = GrBytesPerPixel(desc.fConfig);
+ SkASSERT(colorBytes > 0);
+
+ size = (size_t) colorValuesPerPixel * desc.fWidth * desc.fHeight * colorBytes;
+ } else {
+ if (GrPixelConfigIsCompressed(desc.fConfig)) {
+ size = GrCompressedFormatDataSize(desc.fConfig, desc.fWidth, desc.fHeight);
+ } else {
+ size = (size_t) desc.fWidth * desc.fHeight * GrBytesPerPixel(desc.fConfig);
+ }
+
+ size += size/3; // in case we have to mipmap
+ }
+
+ return size;
+}
+
+template<typename T> static bool adjust_params(int surfaceWidth,
+ int surfaceHeight,
+ size_t bpp,
+ int* left, int* top, int* width, int* height,
+ T** data,
+ size_t* rowBytes) {
+ if (!*rowBytes) {
+ *rowBytes = *width * bpp;
+ }
+
+ SkIRect subRect = SkIRect::MakeXYWH(*left, *top, *width, *height);
+ SkIRect bounds = SkIRect::MakeWH(surfaceWidth, surfaceHeight);
+
+ if (!subRect.intersect(bounds)) {
+ return false;
+ }
+ *data = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(*data) +
+ (subRect.fTop - *top) * *rowBytes + (subRect.fLeft - *left) * bpp);
+
+ *left = subRect.fLeft;
+ *top = subRect.fTop;
+ *width = subRect.width();
+ *height = subRect.height();
+ return true;
+}
+
+bool GrSurfacePriv::AdjustReadPixelParams(int surfaceWidth,
+ int surfaceHeight,
+ size_t bpp,
+ int* left, int* top, int* width, int* height,
+ void** data,
+ size_t* rowBytes) {
+ return adjust_params<void>(surfaceWidth, surfaceHeight, bpp, left, top, width, height, data,
+ rowBytes);
+}
+
+bool GrSurfacePriv::AdjustWritePixelParams(int surfaceWidth,
+ int surfaceHeight,
+ size_t bpp,
+ int* left, int* top, int* width, int* height,
+ const void** data,
+ size_t* rowBytes) {
+ return adjust_params<const void>(surfaceWidth, surfaceHeight, bpp, left, top, width, height,
+ data, rowBytes);
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+
+bool GrSurface::writePixels(int left, int top, int width, int height,
+ GrPixelConfig config, const void* buffer, size_t rowBytes,
+ uint32_t pixelOpsFlags) {
+ // go through context so that all necessary flushing occurs
+ GrContext* context = this->getContext();
+ if (nullptr == context) {
+ return false;
+ }
+ return context->writeSurfacePixels(this, left, top, width, height, config, buffer,
+ rowBytes, pixelOpsFlags);
+}
+
+bool GrSurface::readPixels(int left, int top, int width, int height,
+ GrPixelConfig config, void* buffer, size_t rowBytes,
+ uint32_t pixelOpsFlags) {
+ // go through context so that all necessary flushing occurs
+ GrContext* context = this->getContext();
+ if (nullptr == context) {
+ return false;
+ }
+ return context->readSurfacePixels(this, left, top, width, height, config, buffer,
+ rowBytes, pixelOpsFlags);
+}
+
+// TODO: This should probably be a non-member helper function. It might only be needed in
+// debug or developer builds.
+bool GrSurface::savePixels(const char* filename) {
+ SkBitmap bm;
+ if (!bm.tryAllocPixels(SkImageInfo::MakeN32Premul(this->width(), this->height()))) {
+ return false;
+ }
+
+ bool result = this->readPixels(0, 0, this->width(), this->height(), kSkia8888_GrPixelConfig,
+ bm.getPixels());
+ if (!result) {
+ SkDebugf("------ failed to read pixels for %s\n", filename);
+ return false;
+ }
+
+ // remove any previous version of this file
+ remove(filename);
+
+ if (!SkImageEncoder::EncodeFile(filename, bm, SkImageEncoder::kPNG_Type, 100)) {
+ SkDebugf("------ failed to encode %s\n", filename);
+ remove(filename); // remove any partial file
+ return false;
+ }
+
+ return true;
+}
+
+void GrSurface::flushWrites() {
+ if (!this->wasDestroyed()) {
+ this->getContext()->flushSurfaceWrites(this);
+ }
+}
+
+bool GrSurface::hasPendingRead() const {
+ const GrTexture* thisTex = this->asTexture();
+ if (thisTex && thisTex->internalHasPendingRead()) {
+ return true;
+ }
+ const GrRenderTarget* thisRT = this->asRenderTarget();
+ if (thisRT && thisRT->internalHasPendingRead()) {
+ return true;
+ }
+ return false;
+}
+
+bool GrSurface::hasPendingWrite() const {
+ const GrTexture* thisTex = this->asTexture();
+ if (thisTex && thisTex->internalHasPendingWrite()) {
+ return true;
+ }
+ const GrRenderTarget* thisRT = this->asRenderTarget();
+ if (thisRT && thisRT->internalHasPendingWrite()) {
+ return true;
+ }
+ return false;
+}
+
+bool GrSurface::hasPendingIO() const {
+ const GrTexture* thisTex = this->asTexture();
+ if (thisTex && thisTex->internalHasPendingIO()) {
+ return true;
+ }
+ const GrRenderTarget* thisRT = this->asRenderTarget();
+ if (thisRT && thisRT->internalHasPendingIO()) {
+ return true;
+ }
+ return false;
+}
+
+void GrSurface::onRelease() {
+ this->invokeReleaseProc();
+ this->INHERITED::onRelease();
+}
+
+void GrSurface::onAbandon() {
+ this->invokeReleaseProc();
+ this->INHERITED::onAbandon();
+}
diff --git a/gfx/skia/skia/src/gpu/GrSurfacePriv.h b/gfx/skia/skia/src/gpu/GrSurfacePriv.h
new file mode 100644
index 000000000..f2ba7baca
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSurfacePriv.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSurfacePriv_DEFINED
+#define GrSurfacePriv_DEFINED
+
+#include "GrSurface.h"
+
+/** Class that adds methods to GrSurface that are only intended for use internal to Skia.
+ This class is purely a privileged window into GrSurface. It should never have additional data
+ members or virtual methods.
+ Non-static methods that are not trivial inlines should be spring-boarded (e.g. declared and
+ implemented privately in GrSurface with a inline public method here). */
+class GrSurfacePriv {
+public:
+ /** Helpers used in read/write pixels implementations. The paramters are adjusted so that the
+ read/write respects the bounds of a surface. If the input *rowBytes is 0 it will be
+ the tight row bytes (based on width and bpp) on output. */
+ static bool AdjustReadPixelParams(int surfaceWidth,
+ int surfaceHeight,
+ size_t bpp,
+ int* left, int* top, int* width, int* height,
+ void** data,
+ size_t* rowBytes);
+ static bool AdjustWritePixelParams(int surfaceWidth,
+ int surfaceHeight,
+ size_t bpp,
+ int* left, int* top, int* width, int* height,
+ const void** data,
+ size_t* rowBytes);
+
+ /**
+ * Write the contents of the surface to a PNG. Returns true if successful.
+ * @param filename Full path to desired file
+ */
+ bool savePixels(const char* filename) { return fSurface->savePixels(filename); }
+
+ bool hasPendingRead() const { return fSurface->hasPendingRead(); }
+ bool hasPendingWrite() const { return fSurface->hasPendingWrite(); }
+ bool hasPendingIO() const { return fSurface->hasPendingIO(); }
+
+private:
+ explicit GrSurfacePriv(GrSurface* surface) : fSurface(surface) {}
+ GrSurfacePriv(const GrSurfacePriv&); // unimpl
+ GrSurfacePriv& operator=(const GrSurfacePriv&); // unimpl
+
+ // No taking addresses of this type.
+ const GrSurfacePriv* operator&() const;
+ GrSurfacePriv* operator&();
+
+ GrSurface* fSurface;
+
+ friend class GrSurface; // to construct/copy this type.
+};
+
+inline GrSurfacePriv GrSurface::surfacePriv() { return GrSurfacePriv(this); }
+
+inline const GrSurfacePriv GrSurface::surfacePriv() const {
+ return GrSurfacePriv(const_cast<GrSurface*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrSurfaceProxy.cpp b/gfx/skia/skia/src/gpu/GrSurfaceProxy.cpp
new file mode 100644
index 000000000..f5c401f67
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSurfaceProxy.cpp
@@ -0,0 +1,9 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrSurfaceProxy.h"
+
diff --git a/gfx/skia/skia/src/gpu/GrSwizzle.h b/gfx/skia/skia/src/gpu/GrSwizzle.h
new file mode 100644
index 000000000..c2288b2fe
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrSwizzle.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSwizzle_DEFINED
+#define GrSwizzle_DEFINED
+
+#include "GrColor.h"
+#include "SkRandom.h"
+
+/** Represents a rgba swizzle. It can be converted either into a string or a eight bit int.
+ Currently there is no way to specify an arbitrary swizzle, just some static swizzles and an
+ assignment operator. That could be relaxed. */
+class GrSwizzle {
+public:
+ GrSwizzle() { *this = RGBA(); }
+
+ GrSwizzle(const GrSwizzle& that) { *this = that; }
+
+ GrSwizzle& operator=(const GrSwizzle& that) {
+ memcpy(this, &that, sizeof(GrSwizzle));
+ return *this;
+ }
+
+ /** Recreates a GrSwizzle from the output of asKey() */
+ void setFromKey(uint8_t key) {
+ fKey = key;
+ for (int i = 0; i < 4; ++i) {
+ fSwiz[i] = IdxToChar(key & 3);
+ key >>= 2;
+ }
+ SkASSERT(fSwiz[4] == 0);
+ }
+
+ bool operator==(const GrSwizzle& that) const { return this->asUInt() == that.asUInt(); }
+
+ bool operator!=(const GrSwizzle& that) const { return !(*this == that); }
+
+ /** Compact representation of the swizzle suitable for a key. */
+ uint8_t asKey() const { return fKey; }
+
+ /** 4 char null terminated string consisting only of chars 'r', 'g', 'b', 'a'. */
+ const char* c_str() const { return fSwiz; }
+
+ /** Applies this swizzle to the input color and returns the swizzled color. */
+ GrColor applyTo(GrColor color) const {
+ int idx;
+ uint32_t key = fKey;
+ // Index of the input color that should be mapped to output r.
+ idx = (key & 3);
+ uint32_t outR = (color >> idx * 8) & 0xFF;
+ key >>= 2;
+ idx = (key & 3);
+ uint32_t outG = (color >> idx * 8) & 0xFF;
+ key >>= 2;
+ idx = (key & 3);
+ uint32_t outB = (color >> idx * 8) & 0xFF;
+ key >>= 2;
+ idx = (key & 3);
+ uint32_t outA = (color >> idx * 8) & 0xFF;
+ return GrColorPackRGBA(outR, outG, outB, outA);
+ }
+
+ static const GrSwizzle& RGBA() {
+ static GrSwizzle gRGBA("rgba");
+ return gRGBA;
+ }
+
+ static const GrSwizzle& AAAA() {
+ static GrSwizzle gAAAA("aaaa");
+ return gAAAA;
+ }
+
+ static const GrSwizzle& RRRR() {
+ static GrSwizzle gRRRR("rrrr");
+ return gRRRR;
+ }
+
+ static const GrSwizzle& BGRA() {
+ static GrSwizzle gBGRA("bgra");
+ return gBGRA;
+ }
+
+ static const GrSwizzle& CreateRandom(SkRandom* random) {
+ switch (random->nextU() % 4) {
+ case 0:
+ return RGBA();
+ case 1:
+ return BGRA();
+ case 2:
+ return RRRR();
+ case 3:
+ return AAAA();
+ default:
+ SkFAIL("Mod is broken?!?");
+ return RGBA();
+ }
+ }
+
+private:
+ char fSwiz[5];
+ uint8_t fKey;
+
+ static int CharToIdx(char c) {
+ switch (c) {
+ case 'r':
+ return (GrColor_SHIFT_R / 8);
+ case 'g':
+ return (GrColor_SHIFT_G / 8);
+ case 'b':
+ return (GrColor_SHIFT_B / 8);
+ case 'a':
+ return (GrColor_SHIFT_A / 8);
+ default:
+ SkFAIL("Invalid swizzle char");
+ return 0;
+ }
+ }
+
+ static /* constexpr */ char IToC(int idx) {
+ return (8*idx) == GrColor_SHIFT_R ? 'r' :
+ (8*idx) == GrColor_SHIFT_G ? 'g' :
+ (8*idx) == GrColor_SHIFT_B ? 'b' : 'a';
+ }
+
+ static char IdxToChar(int c) {
+ // Hopefully this array gets computed at compile time.
+ static const char gStr[4] = { IToC(0), IToC(1), IToC(2), IToC(3) };
+ return gStr[c];
+ }
+
+ explicit GrSwizzle(const char* str) {
+ SkASSERT(strlen(str) == 4);
+ fSwiz[0] = str[0];
+ fSwiz[1] = str[1];
+ fSwiz[2] = str[2];
+ fSwiz[3] = str[3];
+ fSwiz[4] = 0;
+ fKey = SkToU8(CharToIdx(fSwiz[0]) | (CharToIdx(fSwiz[1]) << 2) |
+ (CharToIdx(fSwiz[2]) << 4) | (CharToIdx(fSwiz[3]) << 6));
+ }
+
+ uint32_t* asUIntPtr() { return SkTCast<uint32_t*>(fSwiz); }
+ uint32_t asUInt() const { return *SkTCast<const uint32_t*>(fSwiz); }
+
+ GR_STATIC_ASSERT(sizeof(char[4]) == sizeof(uint32_t));
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTRecorder.h b/gfx/skia/skia/src/gpu/GrTRecorder.h
new file mode 100644
index 000000000..c42d9e975
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTRecorder.h
@@ -0,0 +1,390 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTRecorder_DEFINED
+#define GrTRecorder_DEFINED
+
+#include "SkTypes.h"
+
+template<typename TBase, typename TAlign> class GrTRecorder;
+template<typename TItem> struct GrTRecorderAllocWrapper;
+
+/**
+ * Records a list of items with a common base type, optional associated data, and
+ * permanent memory addresses.
+ *
+ * This class preallocates its own chunks of memory for hosting objects, so new items can
+ * be created without excessive calls to malloc().
+ *
+ * To create a new item and append it to the back of the list, use the following macros:
+ *
+ * GrNEW_APPEND_TO_RECORDER(recorder, SubclassName, (args))
+ * GrNEW_APPEND_WITH_DATA_TO_RECORDER(recorder, SubclassName, (args), sizeOfData)
+ *
+ * Upon reset or delete, the items are destructed in the same order they were received,
+ * not reverse (stack) order.
+ *
+ * @param TBase Common base type of items in the list. If TBase is not a class with a
+ * virtual destructor, the client is responsible for invoking any necessary
+ * destructors.
+ *
+ * For now, any subclass used in the list must have the same start address
+ * as TBase (or in other words, the types must be convertible via
+ * reinterpret_cast<>). Classes with multiple inheritance (or any subclass
+ * on an obscure compiler) may not be compatible. This is runtime asserted
+ * in debug builds.
+ *
+ * @param TAlign A type whose size is the desired memory alignment for object allocations.
+ * This should be the largest known alignment requirement for all objects
+ * that may be stored in the list.
+ */
+template<typename TBase, typename TAlign> class GrTRecorder : SkNoncopyable {
+public:
+ class Iter;
+ class ReverseIter;
+
+ /**
+ * Create a recorder.
+ *
+ * @param initialSizeInBytes The amount of memory reserved by the recorder initially,
+ and after calls to reset().
+ */
+ GrTRecorder(int initialSizeInBytes)
+ : fHeadBlock(MemBlock::Alloc(LengthOf(initialSizeInBytes), nullptr)),
+ fTailBlock(fHeadBlock),
+ fLastItem(nullptr) {}
+
+ ~GrTRecorder() {
+ this->reset();
+ MemBlock::Free(fHeadBlock);
+ }
+
+ bool empty() { return !fLastItem; }
+
+ TBase& back() {
+ SkASSERT(!this->empty());
+ return *reinterpret_cast<TBase*>(fLastItem);
+ }
+
+ /**
+ * Removes and destroys the last block added to the recorder. It may not be called when the
+ * recorder is empty.
+ */
+ void pop_back();
+
+ /**
+ * Destruct all items in the list and reset to empty.
+ */
+ void reset();
+
+ /**
+ * Retrieve the extra data associated with an item that was allocated using
+ * GrNEW_APPEND_WITH_DATA_TO_RECORDER().
+ *
+ * @param item The item whose data to retrieve. The pointer must be of the same type
+ * that was allocated initally; it can't be a pointer to a base class.
+ *
+ * @return The item's associated data.
+ */
+ template<typename TItem> static const void* GetDataForItem(const TItem* item) {
+ const TAlign* ptr = reinterpret_cast<const TAlign*>(item);
+ return &ptr[length_of<TItem>::kValue];
+ }
+ template<typename TItem> static void* GetDataForItem(TItem* item) {
+ TAlign* ptr = reinterpret_cast<TAlign*>(item);
+ return &ptr[length_of<TItem>::kValue];
+ }
+
+private:
+ template<typename TItem> struct length_of {
+ enum { kValue = (sizeof(TItem) + sizeof(TAlign) - 1) / sizeof(TAlign) };
+ };
+ static int LengthOf(int bytes) { return (bytes + sizeof(TAlign) - 1) / sizeof(TAlign); }
+
+ struct Header {
+ int fTotalLength; // The length of an entry including header, item, and data in TAligns.
+ int fPrevLength; // Same but for the previous entry. Used for iterating backwards.
+ };
+ template<typename TItem> void* alloc_back(int dataLength);
+
+ struct MemBlock : SkNoncopyable {
+ /** Allocates a new block and appends it to prev if not nullptr. The length param is in units
+ of TAlign. */
+ static MemBlock* Alloc(int length, MemBlock* prev) {
+ MemBlock* block = reinterpret_cast<MemBlock*>(
+ sk_malloc_throw(sizeof(TAlign) * (length_of<MemBlock>::kValue + length)));
+ block->fLength = length;
+ block->fBack = 0;
+ block->fNext = nullptr;
+ block->fPrev = prev;
+ if (prev) {
+ SkASSERT(nullptr == prev->fNext);
+ prev->fNext = block;
+ }
+ return block;
+ }
+
+ // Frees from this block forward. Also adjusts prev block's next ptr.
+ static void Free(MemBlock* block) {
+ if (block && block->fPrev) {
+ SkASSERT(block->fPrev->fNext == block);
+ block->fPrev->fNext = nullptr;
+ }
+ while (block) {
+ MemBlock* next = block->fNext;
+ sk_free(block);
+ block = next;
+ }
+ }
+
+ TAlign& operator [](int i) {
+ return reinterpret_cast<TAlign*>(this)[length_of<MemBlock>::kValue + i];
+ }
+
+ int fLength; // Length in units of TAlign of the block.
+ int fBack; // Offset, in TAligns, to unused portion of the memory block.
+ MemBlock* fNext;
+ MemBlock* fPrev;
+ };
+ MemBlock* const fHeadBlock;
+ MemBlock* fTailBlock;
+
+ void* fLastItem; // really a ptr to TBase
+
+ template<typename TItem> friend struct GrTRecorderAllocWrapper;
+
+ template <typename UBase, typename UAlign, typename UItem>
+ friend void* operator new(size_t, GrTRecorder<UBase, UAlign>&,
+ const GrTRecorderAllocWrapper<UItem>&);
+
+ friend class Iter;
+ friend class ReverseIter;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+template<typename TBase, typename TAlign>
+void GrTRecorder<TBase, TAlign>::pop_back() {
+ SkASSERT(fLastItem);
+ Header* header = reinterpret_cast<Header*>(
+ reinterpret_cast<TAlign*>(fLastItem) - length_of<Header>::kValue);
+ fTailBlock->fBack -= header->fTotalLength;
+ reinterpret_cast<TBase*>(fLastItem)->~TBase();
+
+ int lastItemLength = header->fPrevLength;
+
+ if (!header->fPrevLength) {
+ // We popped the first entry in the recorder.
+ SkASSERT(0 == fTailBlock->fBack);
+ fLastItem = nullptr;
+ return;
+ }
+ while (!fTailBlock->fBack) {
+ // We popped the last entry in a block that isn't the head block. Move back a block but
+ // don't free it since we'll probably grow into it shortly.
+ fTailBlock = fTailBlock->fPrev;
+ SkASSERT(fTailBlock);
+ }
+ fLastItem = &(*fTailBlock)[fTailBlock->fBack - lastItemLength + length_of<Header>::kValue];
+}
+
+template<typename TBase, typename TAlign>
+template<typename TItem>
+void* GrTRecorder<TBase, TAlign>::alloc_back(int dataLength) {
+ // Find the header of the previous entry and get its length. We need to store that in the new
+ // header for backwards iteration (pop_back()).
+ int prevLength = 0;
+ if (fLastItem) {
+ Header* lastHeader = reinterpret_cast<Header*>(
+ reinterpret_cast<TAlign*>(fLastItem) - length_of<Header>::kValue);
+ prevLength = lastHeader->fTotalLength;
+ }
+
+ const int totalLength = length_of<Header>::kValue + length_of<TItem>::kValue + dataLength;
+
+ // Check if there is room in the current block and if not walk to next (allocating if
+ // necessary). Note that pop_back() and reset() can leave the recorder in a state where it
+ // has preallocated blocks hanging off the tail that are currently unused.
+ while (fTailBlock->fBack + totalLength > fTailBlock->fLength) {
+ if (!fTailBlock->fNext) {
+ fTailBlock = MemBlock::Alloc(SkTMax(2 * fTailBlock->fLength, totalLength), fTailBlock);
+ } else {
+ fTailBlock = fTailBlock->fNext;
+ }
+ SkASSERT(0 == fTailBlock->fBack);
+ }
+
+ Header* header = reinterpret_cast<Header*>(&(*fTailBlock)[fTailBlock->fBack]);
+ void* rawPtr = &(*fTailBlock)[fTailBlock->fBack + length_of<Header>::kValue];
+
+ header->fTotalLength = totalLength;
+ header->fPrevLength = prevLength;
+ fLastItem = rawPtr;
+ fTailBlock->fBack += totalLength;
+
+ // FIXME: We currently require that the base and subclass share the same start address.
+ // This is not required by the C++ spec, and is likely to not be true in the case of
+ // multiple inheritance or a base class that doesn't have virtual methods (when the
+ // subclass does). It would be ideal to find a more robust solution that comes at no
+ // extra cost to performance or code generality.
+ SkDEBUGCODE(void* baseAddr = fLastItem;
+ void* subclassAddr = rawPtr);
+ SkASSERT(baseAddr == subclassAddr);
+
+ return rawPtr;
+}
+
+/**
+ * Iterates through a recorder from front to back. The initial state of the iterator is
+ * to not have the front item loaded yet; next() must be called first. Usage model:
+ *
+ * GrTRecorder<TBase, TAlign>::Iter iter(recorder);
+ * while (iter.next()) {
+ * iter->doSomething();
+ * }
+ */
+template<typename TBase, typename TAlign>
+class GrTRecorder<TBase, TAlign>::Iter {
+public:
+ Iter(GrTRecorder& recorder) : fBlock(recorder.fHeadBlock), fPosition(0), fItem(nullptr) {}
+
+ bool next() {
+ while (fPosition >= fBlock->fBack) {
+ SkASSERT(fPosition == fBlock->fBack);
+ if (!fBlock->fNext) {
+ return false;
+ }
+ fBlock = fBlock->fNext;
+ fPosition = 0;
+ }
+
+ Header* header = reinterpret_cast<Header*>(&(*fBlock)[fPosition]);
+ fItem = reinterpret_cast<TBase*>(&(*fBlock)[fPosition + length_of<Header>::kValue]);
+ fPosition += header->fTotalLength;
+ return true;
+ }
+
+ TBase* get() const {
+ SkASSERT(fItem);
+ return fItem;
+ }
+
+ TBase* operator->() const { return this->get(); }
+
+private:
+ MemBlock* fBlock;
+ int fPosition;
+ TBase* fItem;
+};
+
+/**
+ * Iterates through a recorder in reverse, from back to front. This version mirrors "Iter",
+ * so the initial state is to have recorder.back() loaded already. (Note that this will
+ * assert if the recorder is empty.) Usage model:
+ *
+ * GrTRecorder<TBase, TAlign>::ReverseIter reverseIter(recorder);
+ * do {
+ * reverseIter->doSomething();
+ * } while (reverseIter.previous());
+ */
+template<typename TBase, typename TAlign>
+class GrTRecorder<TBase, TAlign>::ReverseIter {
+public:
+ ReverseIter(GrTRecorder& recorder)
+ : fBlock(recorder.fTailBlock),
+ fItem(&recorder.back()) {
+ Header* lastHeader = reinterpret_cast<Header*>(
+ reinterpret_cast<TAlign*>(fItem) - length_of<Header>::kValue);
+ fPosition = fBlock->fBack - lastHeader->fTotalLength;
+ }
+
+ bool previous() {
+ Header* header = reinterpret_cast<Header*>(&(*fBlock)[fPosition]);
+
+ while (0 == fPosition) {
+ if (!fBlock->fPrev) {
+ // We've reached the front of the recorder.
+ return false;
+ }
+ fBlock = fBlock->fPrev;
+ fPosition = fBlock->fBack;
+ }
+
+ fPosition -= header->fPrevLength;
+ SkASSERT(fPosition >= 0);
+
+ fItem = reinterpret_cast<TBase*>(&(*fBlock)[fPosition + length_of<Header>::kValue]);
+ return true;
+ }
+
+ TBase* get() const { return fItem; }
+ TBase* operator->() const { return this->get(); }
+
+private:
+ MemBlock* fBlock;
+ int fPosition;
+ TBase* fItem;
+};
+
+template<typename TBase, typename TAlign>
+void GrTRecorder<TBase, TAlign>::reset() {
+ Iter iter(*this);
+ while (iter.next()) {
+ iter->~TBase();
+ }
+
+ // Assume the next time this recorder fills up it will use approximately the same
+ // amount of space as last time. Leave enough space for up to ~50% growth; free
+ // everything else.
+ if (fTailBlock->fBack <= fTailBlock->fLength / 2) {
+ MemBlock::Free(fTailBlock->fNext);
+ } else if (fTailBlock->fNext) {
+ MemBlock::Free(fTailBlock->fNext->fNext);
+ fTailBlock->fNext->fNext = nullptr;
+ }
+
+ for (MemBlock* block = fHeadBlock; block; block = block->fNext) {
+ block->fBack = 0;
+ }
+
+ fTailBlock = fHeadBlock;
+ fLastItem = nullptr;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+template<typename TItem> struct GrTRecorderAllocWrapper {
+ GrTRecorderAllocWrapper() : fDataLength(0) {}
+
+ template <typename TBase, typename TAlign>
+ GrTRecorderAllocWrapper(const GrTRecorder<TBase, TAlign>&, int sizeOfData)
+ : fDataLength(GrTRecorder<TBase, TAlign>::LengthOf(sizeOfData)) {}
+
+ const int fDataLength;
+};
+
+template <typename TBase, typename TAlign, typename TItem>
+void* operator new(size_t size, GrTRecorder<TBase, TAlign>& recorder,
+ const GrTRecorderAllocWrapper<TItem>& wrapper) {
+ SkASSERT(size == sizeof(TItem));
+ return recorder.template alloc_back<TItem>(wrapper.fDataLength);
+}
+
+template <typename TBase, typename TAlign, typename TItem>
+void operator delete(void*, GrTRecorder<TBase, TAlign>&, const GrTRecorderAllocWrapper<TItem>&) {
+ // We only provide an operator delete to work around compiler warnings that can come
+ // up for an unmatched operator new when compiling with exceptions.
+ SK_ABORT("Invalid Operation");
+}
+
+#define GrNEW_APPEND_TO_RECORDER(recorder, type_name, args) \
+ (new (recorder, GrTRecorderAllocWrapper<type_name>()) type_name args)
+
+#define GrNEW_APPEND_WITH_DATA_TO_RECORDER(recorder, type_name, args, size_of_data) \
+ (new (recorder, GrTRecorderAllocWrapper<type_name>(recorder, size_of_data)) type_name args)
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTessellator.cpp b/gfx/skia/skia/src/gpu/GrTessellator.cpp
new file mode 100644
index 000000000..b06cd6362
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTessellator.cpp
@@ -0,0 +1,1813 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrTessellator.h"
+
+#include "GrDefaultGeoProcFactory.h"
+#include "GrPathUtils.h"
+
+#include "SkChunkAlloc.h"
+#include "SkGeometry.h"
+#include "SkPath.h"
+
+#include <stdio.h>
+
+/*
+ * There are six stages to the basic algorithm:
+ *
+ * 1) Linearize the path contours into piecewise linear segments (path_to_contours()).
+ * 2) Build a mesh of edges connecting the vertices (build_edges()).
+ * 3) Sort the vertices in Y (and secondarily in X) (merge_sort()).
+ * 4) Simplify the mesh by inserting new vertices at intersecting edges (simplify()).
+ * 5) Tessellate the simplified mesh into monotone polygons (tessellate()).
+ * 6) Triangulate the monotone polygons directly into a vertex buffer (polys_to_triangles()).
+ *
+ * For screenspace antialiasing, the algorithm is modified as follows:
+ *
+ * Run steps 1-5 above to produce polygons.
+ * 5b) Apply fill rules to extract boundary contours from the polygons (extract_boundaries()).
+ * 5c) Simplify boundaries to remove "pointy" vertices which cause inversions (simplify_boundary()).
+ * 5d) Displace edges by half a pixel inward and outward along their normals. Intersect to find
+ * new vertices, and set zero alpha on the exterior and one alpha on the interior. Build a new
+ * antialiased mesh from those vertices (boundary_to_aa_mesh()).
+ * Run steps 3-6 above on the new mesh, and produce antialiased triangles.
+ *
+ * The vertex sorting in step (3) is a merge sort, since it plays well with the linked list
+ * of vertices (and the necessity of inserting new vertices on intersection).
+ *
+ * Stages (4) and (5) use an active edge list, which a list of all edges for which the
+ * sweep line has crossed the top vertex, but not the bottom vertex. It's sorted
+ * left-to-right based on the point where both edges are active (when both top vertices
+ * have been seen, so the "lower" top vertex of the two). If the top vertices are equal
+ * (shared), it's sorted based on the last point where both edges are active, so the
+ * "upper" bottom vertex.
+ *
+ * The most complex step is the simplification (4). It's based on the Bentley-Ottman
+ * line-sweep algorithm, but due to floating point inaccuracy, the intersection points are
+ * not exact and may violate the mesh topology or active edge list ordering. We
+ * accommodate this by adjusting the topology of the mesh and AEL to match the intersection
+ * points. This occurs in three ways:
+ *
+ * A) Intersections may cause a shortened edge to no longer be ordered with respect to its
+ * neighbouring edges at the top or bottom vertex. This is handled by merging the
+ * edges (merge_collinear_edges()).
+ * B) Intersections may cause an edge to violate the left-to-right ordering of the
+ * active edge list. This is handled by splitting the neighbour edge on the
+ * intersected vertex (cleanup_active_edges()).
+ * C) Shortening an edge may cause an active edge to become inactive or an inactive edge
+ * to become active. This is handled by removing or inserting the edge in the active
+ * edge list (fix_active_state()).
+ *
+ * The tessellation steps (5) and (6) are based on "Triangulating Simple Polygons and
+ * Equivalent Problems" (Fournier and Montuno); also a line-sweep algorithm. Note that it
+ * currently uses a linked list for the active edge list, rather than a 2-3 tree as the
+ * paper describes. The 2-3 tree gives O(lg N) lookups, but insertion and removal also
+ * become O(lg N). In all the test cases, it was found that the cost of frequent O(lg N)
+ * insertions and removals was greater than the cost of infrequent O(N) lookups with the
+ * linked list implementation. With the latter, all removals are O(1), and most insertions
+ * are O(1), since we know the adjacent edge in the active edge list based on the topology.
+ * Only type 2 vertices (see paper) require the O(N) lookups, and these are much less
+ * frequent. There may be other data structures worth investigating, however.
+ *
+ * Note that the orientation of the line sweep algorithms is determined by the aspect ratio of the
+ * path bounds. When the path is taller than it is wide, we sort vertices based on increasing Y
+ * coordinate, and secondarily by increasing X coordinate. When the path is wider than it is tall,
+ * we sort by increasing X coordinate, but secondarily by *decreasing* Y coordinate. This is so
+ * that the "left" and "right" orientation in the code remains correct (edges to the left are
+ * increasing in Y; edges to the right are decreasing in Y). That is, the setting rotates 90
+ * degrees counterclockwise, rather that transposing.
+ */
+
+#define LOGGING_ENABLED 0
+
+#if LOGGING_ENABLED
+#define LOG printf
+#else
+#define LOG(...)
+#endif
+
+#define ALLOC_NEW(Type, args, alloc) new (alloc.allocThrow(sizeof(Type))) Type args
+
+namespace {
+
+struct Vertex;
+struct Edge;
+struct Poly;
+
+template <class T, T* T::*Prev, T* T::*Next>
+void list_insert(T* t, T* prev, T* next, T** head, T** tail) {
+ t->*Prev = prev;
+ t->*Next = next;
+ if (prev) {
+ prev->*Next = t;
+ } else if (head) {
+ *head = t;
+ }
+ if (next) {
+ next->*Prev = t;
+ } else if (tail) {
+ *tail = t;
+ }
+}
+
+template <class T, T* T::*Prev, T* T::*Next>
+void list_remove(T* t, T** head, T** tail) {
+ if (t->*Prev) {
+ t->*Prev->*Next = t->*Next;
+ } else if (head) {
+ *head = t->*Next;
+ }
+ if (t->*Next) {
+ t->*Next->*Prev = t->*Prev;
+ } else if (tail) {
+ *tail = t->*Prev;
+ }
+ t->*Prev = t->*Next = nullptr;
+}
+
+/**
+ * Vertices are used in three ways: first, the path contours are converted into a
+ * circularly-linked list of Vertices for each contour. After edge construction, the same Vertices
+ * are re-ordered by the merge sort according to the sweep_lt comparator (usually, increasing
+ * in Y) using the same fPrev/fNext pointers that were used for the contours, to avoid
+ * reallocation. Finally, MonotonePolys are built containing a circularly-linked list of
+ * Vertices. (Currently, those Vertices are newly-allocated for the MonotonePolys, since
+ * an individual Vertex from the path mesh may belong to multiple
+ * MonotonePolys, so the original Vertices cannot be re-used.
+ */
+
+struct Vertex {
+ Vertex(const SkPoint& point, uint8_t alpha)
+ : fPoint(point), fPrev(nullptr), fNext(nullptr)
+ , fFirstEdgeAbove(nullptr), fLastEdgeAbove(nullptr)
+ , fFirstEdgeBelow(nullptr), fLastEdgeBelow(nullptr)
+ , fProcessed(false)
+ , fAlpha(alpha)
+#if LOGGING_ENABLED
+ , fID (-1.0f)
+#endif
+ {}
+ SkPoint fPoint; // Vertex position
+ Vertex* fPrev; // Linked list of contours, then Y-sorted vertices.
+ Vertex* fNext; // "
+ Edge* fFirstEdgeAbove; // Linked list of edges above this vertex.
+ Edge* fLastEdgeAbove; // "
+ Edge* fFirstEdgeBelow; // Linked list of edges below this vertex.
+ Edge* fLastEdgeBelow; // "
+ bool fProcessed; // Has this vertex been seen in simplify()?
+ uint8_t fAlpha;
+#if LOGGING_ENABLED
+ float fID; // Identifier used for logging.
+#endif
+};
+
+/***************************************************************************************/
+
+struct AAParams {
+ bool fTweakAlpha;
+ GrColor fColor;
+};
+
+typedef bool (*CompareFunc)(const SkPoint& a, const SkPoint& b);
+
+struct Comparator {
+ CompareFunc sweep_lt;
+ CompareFunc sweep_gt;
+};
+
+bool sweep_lt_horiz(const SkPoint& a, const SkPoint& b) {
+ return a.fX == b.fX ? a.fY > b.fY : a.fX < b.fX;
+}
+
+bool sweep_lt_vert(const SkPoint& a, const SkPoint& b) {
+ return a.fY == b.fY ? a.fX < b.fX : a.fY < b.fY;
+}
+
+bool sweep_gt_horiz(const SkPoint& a, const SkPoint& b) {
+ return a.fX == b.fX ? a.fY < b.fY : a.fX > b.fX;
+}
+
+bool sweep_gt_vert(const SkPoint& a, const SkPoint& b) {
+ return a.fY == b.fY ? a.fX > b.fX : a.fY > b.fY;
+}
+
+inline void* emit_vertex(Vertex* v, const AAParams* aaParams, void* data) {
+ if (!aaParams) {
+ SkPoint* d = static_cast<SkPoint*>(data);
+ *d++ = v->fPoint;
+ return d;
+ }
+ if (aaParams->fTweakAlpha) {
+ auto d = static_cast<GrDefaultGeoProcFactory::PositionColorAttr*>(data);
+ d->fPosition = v->fPoint;
+ d->fColor = SkAlphaMulQ(aaParams->fColor, SkAlpha255To256(v->fAlpha));
+ d++;
+ return d;
+ }
+ auto d = static_cast<GrDefaultGeoProcFactory::PositionColorCoverageAttr*>(data);
+ d->fPosition = v->fPoint;
+ d->fColor = aaParams->fColor;
+ d->fCoverage = GrNormalizeByteToFloat(v->fAlpha);
+ d++;
+ return d;
+}
+
+void* emit_triangle(Vertex* v0, Vertex* v1, Vertex* v2, const AAParams* aaParams, void* data) {
+#if TESSELLATOR_WIREFRAME
+ data = emit_vertex(v0, aaParams, data);
+ data = emit_vertex(v1, aaParams, data);
+ data = emit_vertex(v1, aaParams, data);
+ data = emit_vertex(v2, aaParams, data);
+ data = emit_vertex(v2, aaParams, data);
+ data = emit_vertex(v0, aaParams, data);
+#else
+ data = emit_vertex(v0, aaParams, data);
+ data = emit_vertex(v1, aaParams, data);
+ data = emit_vertex(v2, aaParams, data);
+#endif
+ return data;
+}
+
+struct VertexList {
+ VertexList() : fHead(nullptr), fTail(nullptr) {}
+ Vertex* fHead;
+ Vertex* fTail;
+ void insert(Vertex* v, Vertex* prev, Vertex* next) {
+ list_insert<Vertex, &Vertex::fPrev, &Vertex::fNext>(v, prev, next, &fHead, &fTail);
+ }
+ void append(Vertex* v) {
+ insert(v, fTail, nullptr);
+ }
+ void prepend(Vertex* v) {
+ insert(v, nullptr, fHead);
+ }
+ void close() {
+ if (fHead && fTail) {
+ fTail->fNext = fHead;
+ fHead->fPrev = fTail;
+ }
+ }
+};
+
+// Round to nearest quarter-pixel. This is used for screenspace tessellation.
+
+inline void round(SkPoint* p) {
+ p->fX = SkScalarRoundToScalar(p->fX * SkFloatToScalar(4.0f)) * SkFloatToScalar(0.25f);
+ p->fY = SkScalarRoundToScalar(p->fY * SkFloatToScalar(4.0f)) * SkFloatToScalar(0.25f);
+}
+
+/**
+ * An Edge joins a top Vertex to a bottom Vertex. Edge ordering for the list of "edges above" and
+ * "edge below" a vertex as well as for the active edge list is handled by isLeftOf()/isRightOf().
+ * Note that an Edge will give occasionally dist() != 0 for its own endpoints (because floating
+ * point). For speed, that case is only tested by the callers which require it (e.g.,
+ * cleanup_active_edges()). Edges also handle checking for intersection with other edges.
+ * Currently, this converts the edges to the parametric form, in order to avoid doing a division
+ * until an intersection has been confirmed. This is slightly slower in the "found" case, but
+ * a lot faster in the "not found" case.
+ *
+ * The coefficients of the line equation stored in double precision to avoid catastrphic
+ * cancellation in the isLeftOf() and isRightOf() checks. Using doubles ensures that the result is
+ * correct in float, since it's a polynomial of degree 2. The intersect() function, being
+ * degree 5, is still subject to catastrophic cancellation. We deal with that by assuming its
+ * output may be incorrect, and adjusting the mesh topology to match (see comment at the top of
+ * this file).
+ */
+
+struct Edge {
+ Edge(Vertex* top, Vertex* bottom, int winding)
+ : fWinding(winding)
+ , fTop(top)
+ , fBottom(bottom)
+ , fLeft(nullptr)
+ , fRight(nullptr)
+ , fPrevEdgeAbove(nullptr)
+ , fNextEdgeAbove(nullptr)
+ , fPrevEdgeBelow(nullptr)
+ , fNextEdgeBelow(nullptr)
+ , fLeftPoly(nullptr)
+ , fRightPoly(nullptr)
+ , fLeftPolyPrev(nullptr)
+ , fLeftPolyNext(nullptr)
+ , fRightPolyPrev(nullptr)
+ , fRightPolyNext(nullptr)
+ , fUsedInLeftPoly(false)
+ , fUsedInRightPoly(false) {
+ recompute();
+ }
+ int fWinding; // 1 == edge goes downward; -1 = edge goes upward.
+ Vertex* fTop; // The top vertex in vertex-sort-order (sweep_lt).
+ Vertex* fBottom; // The bottom vertex in vertex-sort-order.
+ Edge* fLeft; // The linked list of edges in the active edge list.
+ Edge* fRight; // "
+ Edge* fPrevEdgeAbove; // The linked list of edges in the bottom Vertex's "edges above".
+ Edge* fNextEdgeAbove; // "
+ Edge* fPrevEdgeBelow; // The linked list of edges in the top Vertex's "edges below".
+ Edge* fNextEdgeBelow; // "
+ Poly* fLeftPoly; // The Poly to the left of this edge, if any.
+ Poly* fRightPoly; // The Poly to the right of this edge, if any.
+ Edge* fLeftPolyPrev;
+ Edge* fLeftPolyNext;
+ Edge* fRightPolyPrev;
+ Edge* fRightPolyNext;
+ bool fUsedInLeftPoly;
+ bool fUsedInRightPoly;
+ double fDX; // The line equation for this edge, in implicit form.
+ double fDY; // fDY * x + fDX * y + fC = 0, for point (x, y) on the line.
+ double fC;
+ double dist(const SkPoint& p) const {
+ return fDY * p.fX - fDX * p.fY + fC;
+ }
+ bool isRightOf(Vertex* v) const {
+ return dist(v->fPoint) < 0.0;
+ }
+ bool isLeftOf(Vertex* v) const {
+ return dist(v->fPoint) > 0.0;
+ }
+ void recompute() {
+ fDX = static_cast<double>(fBottom->fPoint.fX) - fTop->fPoint.fX;
+ fDY = static_cast<double>(fBottom->fPoint.fY) - fTop->fPoint.fY;
+ fC = static_cast<double>(fTop->fPoint.fY) * fBottom->fPoint.fX -
+ static_cast<double>(fTop->fPoint.fX) * fBottom->fPoint.fY;
+ }
+ bool intersect(const Edge& other, SkPoint* p) {
+ LOG("intersecting %g -> %g with %g -> %g\n",
+ fTop->fID, fBottom->fID,
+ other.fTop->fID, other.fBottom->fID);
+ if (fTop == other.fTop || fBottom == other.fBottom) {
+ return false;
+ }
+ double denom = fDX * other.fDY - fDY * other.fDX;
+ if (denom == 0.0) {
+ return false;
+ }
+ double dx = static_cast<double>(fTop->fPoint.fX) - other.fTop->fPoint.fX;
+ double dy = static_cast<double>(fTop->fPoint.fY) - other.fTop->fPoint.fY;
+ double sNumer = dy * other.fDX - dx * other.fDY;
+ double tNumer = dy * fDX - dx * fDY;
+ // If (sNumer / denom) or (tNumer / denom) is not in [0..1], exit early.
+ // This saves us doing the divide below unless absolutely necessary.
+ if (denom > 0.0 ? (sNumer < 0.0 || sNumer > denom || tNumer < 0.0 || tNumer > denom)
+ : (sNumer > 0.0 || sNumer < denom || tNumer > 0.0 || tNumer < denom)) {
+ return false;
+ }
+ double s = sNumer / denom;
+ SkASSERT(s >= 0.0 && s <= 1.0);
+ p->fX = SkDoubleToScalar(fTop->fPoint.fX + s * fDX);
+ p->fY = SkDoubleToScalar(fTop->fPoint.fY + s * fDY);
+ return true;
+ }
+};
+
+struct EdgeList {
+ EdgeList() : fHead(nullptr), fTail(nullptr), fNext(nullptr), fCount(0) {}
+ Edge* fHead;
+ Edge* fTail;
+ EdgeList* fNext;
+ int fCount;
+ void insert(Edge* edge, Edge* prev, Edge* next) {
+ list_insert<Edge, &Edge::fLeft, &Edge::fRight>(edge, prev, next, &fHead, &fTail);
+ fCount++;
+ }
+ void append(Edge* e) {
+ insert(e, fTail, nullptr);
+ }
+ void remove(Edge* edge) {
+ list_remove<Edge, &Edge::fLeft, &Edge::fRight>(edge, &fHead, &fTail);
+ fCount--;
+ }
+ void close() {
+ if (fHead && fTail) {
+ fTail->fRight = fHead;
+ fHead->fLeft = fTail;
+ }
+ }
+ bool contains(Edge* edge) const {
+ return edge->fLeft || edge->fRight || fHead == edge;
+ }
+};
+
+/***************************************************************************************/
+
+struct Poly {
+ Poly(Vertex* v, int winding)
+ : fFirstVertex(v)
+ , fWinding(winding)
+ , fHead(nullptr)
+ , fTail(nullptr)
+ , fNext(nullptr)
+ , fPartner(nullptr)
+ , fCount(0)
+ {
+#if LOGGING_ENABLED
+ static int gID = 0;
+ fID = gID++;
+ LOG("*** created Poly %d\n", fID);
+#endif
+ }
+ typedef enum { kLeft_Side, kRight_Side } Side;
+ struct MonotonePoly {
+ MonotonePoly(Edge* edge, Side side)
+ : fSide(side)
+ , fFirstEdge(nullptr)
+ , fLastEdge(nullptr)
+ , fPrev(nullptr)
+ , fNext(nullptr) {
+ this->addEdge(edge);
+ }
+ Side fSide;
+ Edge* fFirstEdge;
+ Edge* fLastEdge;
+ MonotonePoly* fPrev;
+ MonotonePoly* fNext;
+ void addEdge(Edge* edge) {
+ if (fSide == kRight_Side) {
+ SkASSERT(!edge->fUsedInRightPoly);
+ list_insert<Edge, &Edge::fRightPolyPrev, &Edge::fRightPolyNext>(
+ edge, fLastEdge, nullptr, &fFirstEdge, &fLastEdge);
+ edge->fUsedInRightPoly = true;
+ } else {
+ SkASSERT(!edge->fUsedInLeftPoly);
+ list_insert<Edge, &Edge::fLeftPolyPrev, &Edge::fLeftPolyNext>(
+ edge, fLastEdge, nullptr, &fFirstEdge, &fLastEdge);
+ edge->fUsedInLeftPoly = true;
+ }
+ }
+
+ void* emit(const AAParams* aaParams, void* data) {
+ Edge* e = fFirstEdge;
+ e->fTop->fPrev = e->fTop->fNext = nullptr;
+ VertexList vertices;
+ vertices.append(e->fTop);
+ while (e != nullptr) {
+ e->fBottom->fPrev = e->fBottom->fNext = nullptr;
+ if (kRight_Side == fSide) {
+ vertices.append(e->fBottom);
+ e = e->fRightPolyNext;
+ } else {
+ vertices.prepend(e->fBottom);
+ e = e->fLeftPolyNext;
+ }
+ }
+ Vertex* first = vertices.fHead;
+ Vertex* v = first->fNext;
+ while (v != vertices.fTail) {
+ SkASSERT(v && v->fPrev && v->fNext);
+ Vertex* prev = v->fPrev;
+ Vertex* curr = v;
+ Vertex* next = v->fNext;
+ double ax = static_cast<double>(curr->fPoint.fX) - prev->fPoint.fX;
+ double ay = static_cast<double>(curr->fPoint.fY) - prev->fPoint.fY;
+ double bx = static_cast<double>(next->fPoint.fX) - curr->fPoint.fX;
+ double by = static_cast<double>(next->fPoint.fY) - curr->fPoint.fY;
+ if (ax * by - ay * bx >= 0.0) {
+ data = emit_triangle(prev, curr, next, aaParams, data);
+ v->fPrev->fNext = v->fNext;
+ v->fNext->fPrev = v->fPrev;
+ if (v->fPrev == first) {
+ v = v->fNext;
+ } else {
+ v = v->fPrev;
+ }
+ } else {
+ v = v->fNext;
+ }
+ }
+ return data;
+ }
+ };
+ Poly* addEdge(Edge* e, Side side, SkChunkAlloc& alloc) {
+ LOG("addEdge (%g -> %g) to poly %d, %s side\n",
+ e->fTop->fID, e->fBottom->fID, fID, side == kLeft_Side ? "left" : "right");
+ Poly* partner = fPartner;
+ Poly* poly = this;
+ if (side == kRight_Side) {
+ if (e->fUsedInRightPoly) {
+ return this;
+ }
+ } else {
+ if (e->fUsedInLeftPoly) {
+ return this;
+ }
+ }
+ if (partner) {
+ fPartner = partner->fPartner = nullptr;
+ }
+ if (!fTail) {
+ fHead = fTail = ALLOC_NEW(MonotonePoly, (e, side), alloc);
+ fCount += 2;
+ } else if (e->fBottom == fTail->fLastEdge->fBottom) {
+ return poly;
+ } else if (side == fTail->fSide) {
+ fTail->addEdge(e);
+ fCount++;
+ } else {
+ e = ALLOC_NEW(Edge, (fTail->fLastEdge->fBottom, e->fBottom, 1), alloc);
+ fTail->addEdge(e);
+ fCount++;
+ if (partner) {
+ partner->addEdge(e, side, alloc);
+ poly = partner;
+ } else {
+ MonotonePoly* m = ALLOC_NEW(MonotonePoly, (e, side), alloc);
+ m->fPrev = fTail;
+ fTail->fNext = m;
+ fTail = m;
+ }
+ }
+ return poly;
+ }
+ void* emit(const AAParams* aaParams, void *data) {
+ if (fCount < 3) {
+ return data;
+ }
+ LOG("emit() %d, size %d\n", fID, fCount);
+ for (MonotonePoly* m = fHead; m != nullptr; m = m->fNext) {
+ data = m->emit(aaParams, data);
+ }
+ return data;
+ }
+ Vertex* lastVertex() const { return fTail ? fTail->fLastEdge->fBottom : fFirstVertex; }
+ Vertex* fFirstVertex;
+ int fWinding;
+ MonotonePoly* fHead;
+ MonotonePoly* fTail;
+ Poly* fNext;
+ Poly* fPartner;
+ int fCount;
+#if LOGGING_ENABLED
+ int fID;
+#endif
+};
+
+/***************************************************************************************/
+
+bool coincident(const SkPoint& a, const SkPoint& b) {
+ return a == b;
+}
+
+Poly* new_poly(Poly** head, Vertex* v, int winding, SkChunkAlloc& alloc) {
+ Poly* poly = ALLOC_NEW(Poly, (v, winding), alloc);
+ poly->fNext = *head;
+ *head = poly;
+ return poly;
+}
+
+EdgeList* new_contour(EdgeList** head, SkChunkAlloc& alloc) {
+ EdgeList* contour = ALLOC_NEW(EdgeList, (), alloc);
+ contour->fNext = *head;
+ *head = contour;
+ return contour;
+}
+
+Vertex* append_point_to_contour(const SkPoint& p, Vertex* prev, Vertex** head,
+ SkChunkAlloc& alloc) {
+ Vertex* v = ALLOC_NEW(Vertex, (p, 255), alloc);
+#if LOGGING_ENABLED
+ static float gID = 0.0f;
+ v->fID = gID++;
+#endif
+ if (prev) {
+ prev->fNext = v;
+ v->fPrev = prev;
+ } else {
+ *head = v;
+ }
+ return v;
+}
+
+Vertex* generate_quadratic_points(const SkPoint& p0,
+ const SkPoint& p1,
+ const SkPoint& p2,
+ SkScalar tolSqd,
+ Vertex* prev,
+ Vertex** head,
+ int pointsLeft,
+ SkChunkAlloc& alloc) {
+ SkScalar d = p1.distanceToLineSegmentBetweenSqd(p0, p2);
+ if (pointsLeft < 2 || d < tolSqd || !SkScalarIsFinite(d)) {
+ return append_point_to_contour(p2, prev, head, alloc);
+ }
+
+ const SkPoint q[] = {
+ { SkScalarAve(p0.fX, p1.fX), SkScalarAve(p0.fY, p1.fY) },
+ { SkScalarAve(p1.fX, p2.fX), SkScalarAve(p1.fY, p2.fY) },
+ };
+ const SkPoint r = { SkScalarAve(q[0].fX, q[1].fX), SkScalarAve(q[0].fY, q[1].fY) };
+
+ pointsLeft >>= 1;
+ prev = generate_quadratic_points(p0, q[0], r, tolSqd, prev, head, pointsLeft, alloc);
+ prev = generate_quadratic_points(r, q[1], p2, tolSqd, prev, head, pointsLeft, alloc);
+ return prev;
+}
+
+Vertex* generate_cubic_points(const SkPoint& p0,
+ const SkPoint& p1,
+ const SkPoint& p2,
+ const SkPoint& p3,
+ SkScalar tolSqd,
+ Vertex* prev,
+ Vertex** head,
+ int pointsLeft,
+ SkChunkAlloc& alloc) {
+ SkScalar d1 = p1.distanceToLineSegmentBetweenSqd(p0, p3);
+ SkScalar d2 = p2.distanceToLineSegmentBetweenSqd(p0, p3);
+ if (pointsLeft < 2 || (d1 < tolSqd && d2 < tolSqd) ||
+ !SkScalarIsFinite(d1) || !SkScalarIsFinite(d2)) {
+ return append_point_to_contour(p3, prev, head, alloc);
+ }
+ const SkPoint q[] = {
+ { SkScalarAve(p0.fX, p1.fX), SkScalarAve(p0.fY, p1.fY) },
+ { SkScalarAve(p1.fX, p2.fX), SkScalarAve(p1.fY, p2.fY) },
+ { SkScalarAve(p2.fX, p3.fX), SkScalarAve(p2.fY, p3.fY) }
+ };
+ const SkPoint r[] = {
+ { SkScalarAve(q[0].fX, q[1].fX), SkScalarAve(q[0].fY, q[1].fY) },
+ { SkScalarAve(q[1].fX, q[2].fX), SkScalarAve(q[1].fY, q[2].fY) }
+ };
+ const SkPoint s = { SkScalarAve(r[0].fX, r[1].fX), SkScalarAve(r[0].fY, r[1].fY) };
+ pointsLeft >>= 1;
+ prev = generate_cubic_points(p0, q[0], r[0], s, tolSqd, prev, head, pointsLeft, alloc);
+ prev = generate_cubic_points(s, r[1], q[2], p3, tolSqd, prev, head, pointsLeft, alloc);
+ return prev;
+}
+
+// Stage 1: convert the input path to a set of linear contours (linked list of Vertices).
+
+void path_to_contours(const SkPath& path, SkScalar tolerance, const SkRect& clipBounds,
+ Vertex** contours, SkChunkAlloc& alloc, bool *isLinear) {
+ SkScalar toleranceSqd = tolerance * tolerance;
+
+ SkPoint pts[4];
+ bool done = false;
+ *isLinear = true;
+ SkPath::Iter iter(path, false);
+ Vertex* prev = nullptr;
+ Vertex* head = nullptr;
+ if (path.isInverseFillType()) {
+ SkPoint quad[4];
+ clipBounds.toQuad(quad);
+ for (int i = 0; i < 4; i++) {
+ prev = append_point_to_contour(quad[i], prev, &head, alloc);
+ }
+ head->fPrev = prev;
+ prev->fNext = head;
+ *contours++ = head;
+ head = prev = nullptr;
+ }
+ SkAutoConicToQuads converter;
+ while (!done) {
+ SkPath::Verb verb = iter.next(pts);
+ switch (verb) {
+ case SkPath::kConic_Verb: {
+ SkScalar weight = iter.conicWeight();
+ const SkPoint* quadPts = converter.computeQuads(pts, weight, toleranceSqd);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ int pointsLeft = GrPathUtils::quadraticPointCount(quadPts, tolerance);
+ prev = generate_quadratic_points(quadPts[0], quadPts[1], quadPts[2],
+ toleranceSqd, prev, &head, pointsLeft, alloc);
+ quadPts += 2;
+ }
+ *isLinear = false;
+ break;
+ }
+ case SkPath::kMove_Verb:
+ if (head) {
+ head->fPrev = prev;
+ prev->fNext = head;
+ *contours++ = head;
+ }
+ head = prev = nullptr;
+ prev = append_point_to_contour(pts[0], prev, &head, alloc);
+ break;
+ case SkPath::kLine_Verb: {
+ prev = append_point_to_contour(pts[1], prev, &head, alloc);
+ break;
+ }
+ case SkPath::kQuad_Verb: {
+ int pointsLeft = GrPathUtils::quadraticPointCount(pts, tolerance);
+ prev = generate_quadratic_points(pts[0], pts[1], pts[2], toleranceSqd, prev,
+ &head, pointsLeft, alloc);
+ *isLinear = false;
+ break;
+ }
+ case SkPath::kCubic_Verb: {
+ int pointsLeft = GrPathUtils::cubicPointCount(pts, tolerance);
+ prev = generate_cubic_points(pts[0], pts[1], pts[2], pts[3],
+ toleranceSqd, prev, &head, pointsLeft, alloc);
+ *isLinear = false;
+ break;
+ }
+ case SkPath::kClose_Verb:
+ if (head) {
+ head->fPrev = prev;
+ prev->fNext = head;
+ *contours++ = head;
+ }
+ head = prev = nullptr;
+ break;
+ case SkPath::kDone_Verb:
+ if (head) {
+ head->fPrev = prev;
+ prev->fNext = head;
+ *contours++ = head;
+ }
+ done = true;
+ break;
+ }
+ }
+}
+
+inline bool apply_fill_type(SkPath::FillType fillType, Poly* poly) {
+ if (!poly) {
+ return false;
+ }
+ int winding = poly->fWinding;
+ switch (fillType) {
+ case SkPath::kWinding_FillType:
+ return winding != 0;
+ case SkPath::kEvenOdd_FillType:
+ return (winding & 1) != 0;
+ case SkPath::kInverseWinding_FillType:
+ return winding == -1;
+ case SkPath::kInverseEvenOdd_FillType:
+ return (winding & 1) == 1;
+ default:
+ SkASSERT(false);
+ return false;
+ }
+}
+
+Edge* new_edge(Vertex* prev, Vertex* next, SkChunkAlloc& alloc, Comparator& c,
+ int winding_scale = 1) {
+ int winding = c.sweep_lt(prev->fPoint, next->fPoint) ? winding_scale : -winding_scale;
+ Vertex* top = winding < 0 ? next : prev;
+ Vertex* bottom = winding < 0 ? prev : next;
+ return ALLOC_NEW(Edge, (top, bottom, winding), alloc);
+}
+
+void remove_edge(Edge* edge, EdgeList* edges) {
+ LOG("removing edge %g -> %g\n", edge->fTop->fID, edge->fBottom->fID);
+ SkASSERT(edges->contains(edge));
+ edges->remove(edge);
+}
+
+void insert_edge(Edge* edge, Edge* prev, EdgeList* edges) {
+ LOG("inserting edge %g -> %g\n", edge->fTop->fID, edge->fBottom->fID);
+ SkASSERT(!edges->contains(edge));
+ Edge* next = prev ? prev->fRight : edges->fHead;
+ edges->insert(edge, prev, next);
+}
+
+void find_enclosing_edges(Vertex* v, EdgeList* edges, Edge** left, Edge** right) {
+ if (v->fFirstEdgeAbove) {
+ *left = v->fFirstEdgeAbove->fLeft;
+ *right = v->fLastEdgeAbove->fRight;
+ return;
+ }
+ Edge* next = nullptr;
+ Edge* prev;
+ for (prev = edges->fTail; prev != nullptr; prev = prev->fLeft) {
+ if (prev->isLeftOf(v)) {
+ break;
+ }
+ next = prev;
+ }
+ *left = prev;
+ *right = next;
+}
+
+void find_enclosing_edges(Edge* edge, EdgeList* edges, Comparator& c, Edge** left, Edge** right) {
+ Edge* prev = nullptr;
+ Edge* next;
+ for (next = edges->fHead; next != nullptr; next = next->fRight) {
+ if ((c.sweep_gt(edge->fTop->fPoint, next->fTop->fPoint) && next->isRightOf(edge->fTop)) ||
+ (c.sweep_gt(next->fTop->fPoint, edge->fTop->fPoint) && edge->isLeftOf(next->fTop)) ||
+ (c.sweep_lt(edge->fBottom->fPoint, next->fBottom->fPoint) &&
+ next->isRightOf(edge->fBottom)) ||
+ (c.sweep_lt(next->fBottom->fPoint, edge->fBottom->fPoint) &&
+ edge->isLeftOf(next->fBottom))) {
+ break;
+ }
+ prev = next;
+ }
+ *left = prev;
+ *right = next;
+}
+
+void fix_active_state(Edge* edge, EdgeList* activeEdges, Comparator& c) {
+ if (activeEdges && activeEdges->contains(edge)) {
+ if (edge->fBottom->fProcessed || !edge->fTop->fProcessed) {
+ remove_edge(edge, activeEdges);
+ }
+ } else if (edge->fTop->fProcessed && !edge->fBottom->fProcessed) {
+ Edge* left;
+ Edge* right;
+ find_enclosing_edges(edge, activeEdges, c, &left, &right);
+ insert_edge(edge, left, activeEdges);
+ }
+}
+
+void insert_edge_above(Edge* edge, Vertex* v, Comparator& c) {
+ if (edge->fTop->fPoint == edge->fBottom->fPoint ||
+ c.sweep_gt(edge->fTop->fPoint, edge->fBottom->fPoint)) {
+ return;
+ }
+ LOG("insert edge (%g -> %g) above vertex %g\n", edge->fTop->fID, edge->fBottom->fID, v->fID);
+ Edge* prev = nullptr;
+ Edge* next;
+ for (next = v->fFirstEdgeAbove; next; next = next->fNextEdgeAbove) {
+ if (next->isRightOf(edge->fTop)) {
+ break;
+ }
+ prev = next;
+ }
+ list_insert<Edge, &Edge::fPrevEdgeAbove, &Edge::fNextEdgeAbove>(
+ edge, prev, next, &v->fFirstEdgeAbove, &v->fLastEdgeAbove);
+}
+
+void insert_edge_below(Edge* edge, Vertex* v, Comparator& c) {
+ if (edge->fTop->fPoint == edge->fBottom->fPoint ||
+ c.sweep_gt(edge->fTop->fPoint, edge->fBottom->fPoint)) {
+ return;
+ }
+ LOG("insert edge (%g -> %g) below vertex %g\n", edge->fTop->fID, edge->fBottom->fID, v->fID);
+ Edge* prev = nullptr;
+ Edge* next;
+ for (next = v->fFirstEdgeBelow; next; next = next->fNextEdgeBelow) {
+ if (next->isRightOf(edge->fBottom)) {
+ break;
+ }
+ prev = next;
+ }
+ list_insert<Edge, &Edge::fPrevEdgeBelow, &Edge::fNextEdgeBelow>(
+ edge, prev, next, &v->fFirstEdgeBelow, &v->fLastEdgeBelow);
+}
+
+void remove_edge_above(Edge* edge) {
+ LOG("removing edge (%g -> %g) above vertex %g\n", edge->fTop->fID, edge->fBottom->fID,
+ edge->fBottom->fID);
+ list_remove<Edge, &Edge::fPrevEdgeAbove, &Edge::fNextEdgeAbove>(
+ edge, &edge->fBottom->fFirstEdgeAbove, &edge->fBottom->fLastEdgeAbove);
+}
+
+void remove_edge_below(Edge* edge) {
+ LOG("removing edge (%g -> %g) below vertex %g\n", edge->fTop->fID, edge->fBottom->fID,
+ edge->fTop->fID);
+ list_remove<Edge, &Edge::fPrevEdgeBelow, &Edge::fNextEdgeBelow>(
+ edge, &edge->fTop->fFirstEdgeBelow, &edge->fTop->fLastEdgeBelow);
+}
+
+void erase_edge_if_zero_winding(Edge* edge, EdgeList* edges) {
+ if (edge->fWinding != 0) {
+ return;
+ }
+ LOG("erasing edge (%g -> %g)\n", edge->fTop->fID, edge->fBottom->fID);
+ remove_edge_above(edge);
+ remove_edge_below(edge);
+ if (edges && edges->contains(edge)) {
+ remove_edge(edge, edges);
+ }
+}
+
+void merge_collinear_edges(Edge* edge, EdgeList* activeEdges, Comparator& c);
+
+void set_top(Edge* edge, Vertex* v, EdgeList* activeEdges, Comparator& c) {
+ remove_edge_below(edge);
+ edge->fTop = v;
+ edge->recompute();
+ insert_edge_below(edge, v, c);
+ fix_active_state(edge, activeEdges, c);
+ merge_collinear_edges(edge, activeEdges, c);
+}
+
+void set_bottom(Edge* edge, Vertex* v, EdgeList* activeEdges, Comparator& c) {
+ remove_edge_above(edge);
+ edge->fBottom = v;
+ edge->recompute();
+ insert_edge_above(edge, v, c);
+ fix_active_state(edge, activeEdges, c);
+ merge_collinear_edges(edge, activeEdges, c);
+}
+
+void merge_edges_above(Edge* edge, Edge* other, EdgeList* activeEdges, Comparator& c) {
+ if (coincident(edge->fTop->fPoint, other->fTop->fPoint)) {
+ LOG("merging coincident above edges (%g, %g) -> (%g, %g)\n",
+ edge->fTop->fPoint.fX, edge->fTop->fPoint.fY,
+ edge->fBottom->fPoint.fX, edge->fBottom->fPoint.fY);
+ other->fWinding += edge->fWinding;
+ erase_edge_if_zero_winding(other, activeEdges);
+ edge->fWinding = 0;
+ erase_edge_if_zero_winding(edge, activeEdges);
+ } else if (c.sweep_lt(edge->fTop->fPoint, other->fTop->fPoint)) {
+ other->fWinding += edge->fWinding;
+ erase_edge_if_zero_winding(other, activeEdges);
+ set_bottom(edge, other->fTop, activeEdges, c);
+ } else {
+ edge->fWinding += other->fWinding;
+ erase_edge_if_zero_winding(edge, activeEdges);
+ set_bottom(other, edge->fTop, activeEdges, c);
+ }
+}
+
+void merge_edges_below(Edge* edge, Edge* other, EdgeList* activeEdges, Comparator& c) {
+ if (coincident(edge->fBottom->fPoint, other->fBottom->fPoint)) {
+ LOG("merging coincident below edges (%g, %g) -> (%g, %g)\n",
+ edge->fTop->fPoint.fX, edge->fTop->fPoint.fY,
+ edge->fBottom->fPoint.fX, edge->fBottom->fPoint.fY);
+ other->fWinding += edge->fWinding;
+ erase_edge_if_zero_winding(other, activeEdges);
+ edge->fWinding = 0;
+ erase_edge_if_zero_winding(edge, activeEdges);
+ } else if (c.sweep_lt(edge->fBottom->fPoint, other->fBottom->fPoint)) {
+ edge->fWinding += other->fWinding;
+ erase_edge_if_zero_winding(edge, activeEdges);
+ set_top(other, edge->fBottom, activeEdges, c);
+ } else {
+ other->fWinding += edge->fWinding;
+ erase_edge_if_zero_winding(other, activeEdges);
+ set_top(edge, other->fBottom, activeEdges, c);
+ }
+}
+
+void merge_collinear_edges(Edge* edge, EdgeList* activeEdges, Comparator& c) {
+ if (edge->fPrevEdgeAbove && (edge->fTop == edge->fPrevEdgeAbove->fTop ||
+ !edge->fPrevEdgeAbove->isLeftOf(edge->fTop))) {
+ merge_edges_above(edge, edge->fPrevEdgeAbove, activeEdges, c);
+ } else if (edge->fNextEdgeAbove && (edge->fTop == edge->fNextEdgeAbove->fTop ||
+ !edge->isLeftOf(edge->fNextEdgeAbove->fTop))) {
+ merge_edges_above(edge, edge->fNextEdgeAbove, activeEdges, c);
+ }
+ if (edge->fPrevEdgeBelow && (edge->fBottom == edge->fPrevEdgeBelow->fBottom ||
+ !edge->fPrevEdgeBelow->isLeftOf(edge->fBottom))) {
+ merge_edges_below(edge, edge->fPrevEdgeBelow, activeEdges, c);
+ } else if (edge->fNextEdgeBelow && (edge->fBottom == edge->fNextEdgeBelow->fBottom ||
+ !edge->isLeftOf(edge->fNextEdgeBelow->fBottom))) {
+ merge_edges_below(edge, edge->fNextEdgeBelow, activeEdges, c);
+ }
+}
+
+void split_edge(Edge* edge, Vertex* v, EdgeList* activeEdges, Comparator& c, SkChunkAlloc& alloc);
+
+void cleanup_active_edges(Edge* edge, EdgeList* activeEdges, Comparator& c, SkChunkAlloc& alloc) {
+ Vertex* top = edge->fTop;
+ Vertex* bottom = edge->fBottom;
+ if (edge->fLeft) {
+ Vertex* leftTop = edge->fLeft->fTop;
+ Vertex* leftBottom = edge->fLeft->fBottom;
+ if (c.sweep_gt(top->fPoint, leftTop->fPoint) && !edge->fLeft->isLeftOf(top)) {
+ split_edge(edge->fLeft, edge->fTop, activeEdges, c, alloc);
+ } else if (c.sweep_gt(leftTop->fPoint, top->fPoint) && !edge->isRightOf(leftTop)) {
+ split_edge(edge, leftTop, activeEdges, c, alloc);
+ } else if (c.sweep_lt(bottom->fPoint, leftBottom->fPoint) &&
+ !edge->fLeft->isLeftOf(bottom)) {
+ split_edge(edge->fLeft, bottom, activeEdges, c, alloc);
+ } else if (c.sweep_lt(leftBottom->fPoint, bottom->fPoint) && !edge->isRightOf(leftBottom)) {
+ split_edge(edge, leftBottom, activeEdges, c, alloc);
+ }
+ }
+ if (edge->fRight) {
+ Vertex* rightTop = edge->fRight->fTop;
+ Vertex* rightBottom = edge->fRight->fBottom;
+ if (c.sweep_gt(top->fPoint, rightTop->fPoint) && !edge->fRight->isRightOf(top)) {
+ split_edge(edge->fRight, top, activeEdges, c, alloc);
+ } else if (c.sweep_gt(rightTop->fPoint, top->fPoint) && !edge->isLeftOf(rightTop)) {
+ split_edge(edge, rightTop, activeEdges, c, alloc);
+ } else if (c.sweep_lt(bottom->fPoint, rightBottom->fPoint) &&
+ !edge->fRight->isRightOf(bottom)) {
+ split_edge(edge->fRight, bottom, activeEdges, c, alloc);
+ } else if (c.sweep_lt(rightBottom->fPoint, bottom->fPoint) &&
+ !edge->isLeftOf(rightBottom)) {
+ split_edge(edge, rightBottom, activeEdges, c, alloc);
+ }
+ }
+}
+
+void split_edge(Edge* edge, Vertex* v, EdgeList* activeEdges, Comparator& c, SkChunkAlloc& alloc) {
+ LOG("splitting edge (%g -> %g) at vertex %g (%g, %g)\n",
+ edge->fTop->fID, edge->fBottom->fID,
+ v->fID, v->fPoint.fX, v->fPoint.fY);
+ if (c.sweep_lt(v->fPoint, edge->fTop->fPoint)) {
+ set_top(edge, v, activeEdges, c);
+ } else if (c.sweep_gt(v->fPoint, edge->fBottom->fPoint)) {
+ set_bottom(edge, v, activeEdges, c);
+ } else {
+ Edge* newEdge = ALLOC_NEW(Edge, (v, edge->fBottom, edge->fWinding), alloc);
+ insert_edge_below(newEdge, v, c);
+ insert_edge_above(newEdge, edge->fBottom, c);
+ set_bottom(edge, v, activeEdges, c);
+ cleanup_active_edges(edge, activeEdges, c, alloc);
+ fix_active_state(newEdge, activeEdges, c);
+ merge_collinear_edges(newEdge, activeEdges, c);
+ }
+}
+
+Edge* connect(Vertex* prev, Vertex* next, SkChunkAlloc& alloc, Comparator c,
+ int winding_scale = 1) {
+ Edge* edge = new_edge(prev, next, alloc, c, winding_scale);
+ if (edge->fWinding > 0) {
+ insert_edge_below(edge, prev, c);
+ insert_edge_above(edge, next, c);
+ } else {
+ insert_edge_below(edge, next, c);
+ insert_edge_above(edge, prev, c);
+ }
+ merge_collinear_edges(edge, nullptr, c);
+ return edge;
+}
+
+void merge_vertices(Vertex* src, Vertex* dst, Vertex** head, Comparator& c, SkChunkAlloc& alloc) {
+ LOG("found coincident verts at %g, %g; merging %g into %g\n", src->fPoint.fX, src->fPoint.fY,
+ src->fID, dst->fID);
+ dst->fAlpha = SkTMax(src->fAlpha, dst->fAlpha);
+ for (Edge* edge = src->fFirstEdgeAbove; edge;) {
+ Edge* next = edge->fNextEdgeAbove;
+ set_bottom(edge, dst, nullptr, c);
+ edge = next;
+ }
+ for (Edge* edge = src->fFirstEdgeBelow; edge;) {
+ Edge* next = edge->fNextEdgeBelow;
+ set_top(edge, dst, nullptr, c);
+ edge = next;
+ }
+ list_remove<Vertex, &Vertex::fPrev, &Vertex::fNext>(src, head, nullptr);
+}
+
+uint8_t max_edge_alpha(Edge* a, Edge* b) {
+ return SkTMax(SkTMax(a->fTop->fAlpha, a->fBottom->fAlpha),
+ SkTMax(b->fTop->fAlpha, b->fBottom->fAlpha));
+}
+
+Vertex* check_for_intersection(Edge* edge, Edge* other, EdgeList* activeEdges, Comparator& c,
+ SkChunkAlloc& alloc) {
+ SkPoint p;
+ if (!edge || !other) {
+ return nullptr;
+ }
+ if (edge->intersect(*other, &p)) {
+ Vertex* v;
+ LOG("found intersection, pt is %g, %g\n", p.fX, p.fY);
+ if (p == edge->fTop->fPoint || c.sweep_lt(p, edge->fTop->fPoint)) {
+ split_edge(other, edge->fTop, activeEdges, c, alloc);
+ v = edge->fTop;
+ } else if (p == edge->fBottom->fPoint || c.sweep_gt(p, edge->fBottom->fPoint)) {
+ split_edge(other, edge->fBottom, activeEdges, c, alloc);
+ v = edge->fBottom;
+ } else if (p == other->fTop->fPoint || c.sweep_lt(p, other->fTop->fPoint)) {
+ split_edge(edge, other->fTop, activeEdges, c, alloc);
+ v = other->fTop;
+ } else if (p == other->fBottom->fPoint || c.sweep_gt(p, other->fBottom->fPoint)) {
+ split_edge(edge, other->fBottom, activeEdges, c, alloc);
+ v = other->fBottom;
+ } else {
+ Vertex* nextV = edge->fTop;
+ while (c.sweep_lt(p, nextV->fPoint)) {
+ nextV = nextV->fPrev;
+ }
+ while (c.sweep_lt(nextV->fPoint, p)) {
+ nextV = nextV->fNext;
+ }
+ Vertex* prevV = nextV->fPrev;
+ if (coincident(prevV->fPoint, p)) {
+ v = prevV;
+ } else if (coincident(nextV->fPoint, p)) {
+ v = nextV;
+ } else {
+ uint8_t alpha = max_edge_alpha(edge, other);
+ v = ALLOC_NEW(Vertex, (p, alpha), alloc);
+ LOG("inserting between %g (%g, %g) and %g (%g, %g)\n",
+ prevV->fID, prevV->fPoint.fX, prevV->fPoint.fY,
+ nextV->fID, nextV->fPoint.fX, nextV->fPoint.fY);
+#if LOGGING_ENABLED
+ v->fID = (nextV->fID + prevV->fID) * 0.5f;
+#endif
+ v->fPrev = prevV;
+ v->fNext = nextV;
+ prevV->fNext = v;
+ nextV->fPrev = v;
+ }
+ split_edge(edge, v, activeEdges, c, alloc);
+ split_edge(other, v, activeEdges, c, alloc);
+ }
+ return v;
+ }
+ return nullptr;
+}
+
+void sanitize_contours(Vertex** contours, int contourCnt, bool approximate) {
+ for (int i = 0; i < contourCnt; ++i) {
+ SkASSERT(contours[i]);
+ for (Vertex* v = contours[i];;) {
+ if (approximate) {
+ round(&v->fPoint);
+ }
+ if (coincident(v->fPrev->fPoint, v->fPoint)) {
+ LOG("vertex %g,%g coincident; removing\n", v->fPoint.fX, v->fPoint.fY);
+ if (v->fPrev == v) {
+ contours[i] = nullptr;
+ break;
+ }
+ v->fPrev->fNext = v->fNext;
+ v->fNext->fPrev = v->fPrev;
+ if (contours[i] == v) {
+ contours[i] = v->fNext;
+ }
+ v = v->fPrev;
+ } else {
+ v = v->fNext;
+ if (v == contours[i]) break;
+ }
+ }
+ }
+}
+
+void merge_coincident_vertices(Vertex** vertices, Comparator& c, SkChunkAlloc& alloc) {
+ for (Vertex* v = (*vertices)->fNext; v != nullptr; v = v->fNext) {
+ if (c.sweep_lt(v->fPoint, v->fPrev->fPoint)) {
+ v->fPoint = v->fPrev->fPoint;
+ }
+ if (coincident(v->fPrev->fPoint, v->fPoint)) {
+ merge_vertices(v->fPrev, v, vertices, c, alloc);
+ }
+ }
+}
+
+// Stage 2: convert the contours to a mesh of edges connecting the vertices.
+
+Vertex* build_edges(Vertex** contours, int contourCnt, Comparator& c, SkChunkAlloc& alloc) {
+ Vertex* vertices = nullptr;
+ Vertex* prev = nullptr;
+ for (int i = 0; i < contourCnt; ++i) {
+ for (Vertex* v = contours[i]; v != nullptr;) {
+ Vertex* vNext = v->fNext;
+ connect(v->fPrev, v, alloc, c);
+ if (prev) {
+ prev->fNext = v;
+ v->fPrev = prev;
+ } else {
+ vertices = v;
+ }
+ prev = v;
+ v = vNext;
+ if (v == contours[i]) break;
+ }
+ }
+ if (prev) {
+ prev->fNext = vertices->fPrev = nullptr;
+ }
+ return vertices;
+}
+
+// Stage 3: sort the vertices by increasing sweep direction.
+
+Vertex* sorted_merge(Vertex* a, Vertex* b, Comparator& c);
+
+void front_back_split(Vertex* v, Vertex** pFront, Vertex** pBack) {
+ Vertex* fast;
+ Vertex* slow;
+ if (!v || !v->fNext) {
+ *pFront = v;
+ *pBack = nullptr;
+ } else {
+ slow = v;
+ fast = v->fNext;
+
+ while (fast != nullptr) {
+ fast = fast->fNext;
+ if (fast != nullptr) {
+ slow = slow->fNext;
+ fast = fast->fNext;
+ }
+ }
+
+ *pFront = v;
+ *pBack = slow->fNext;
+ slow->fNext->fPrev = nullptr;
+ slow->fNext = nullptr;
+ }
+}
+
+void merge_sort(Vertex** head, Comparator& c) {
+ if (!*head || !(*head)->fNext) {
+ return;
+ }
+
+ Vertex* a;
+ Vertex* b;
+ front_back_split(*head, &a, &b);
+
+ merge_sort(&a, c);
+ merge_sort(&b, c);
+
+ *head = sorted_merge(a, b, c);
+}
+
+Vertex* sorted_merge(Vertex* a, Vertex* b, Comparator& c) {
+ VertexList vertices;
+
+ while (a && b) {
+ if (c.sweep_lt(a->fPoint, b->fPoint)) {
+ Vertex* next = a->fNext;
+ vertices.append(a);
+ a = next;
+ } else {
+ Vertex* next = b->fNext;
+ vertices.append(b);
+ b = next;
+ }
+ }
+ if (a) {
+ vertices.insert(a, vertices.fTail, a->fNext);
+ }
+ if (b) {
+ vertices.insert(b, vertices.fTail, b->fNext);
+ }
+ return vertices.fHead;
+}
+
+// Stage 4: Simplify the mesh by inserting new vertices at intersecting edges.
+
+void simplify(Vertex* vertices, Comparator& c, SkChunkAlloc& alloc) {
+ LOG("simplifying complex polygons\n");
+ EdgeList activeEdges;
+ for (Vertex* v = vertices; v != nullptr; v = v->fNext) {
+ if (!v->fFirstEdgeAbove && !v->fFirstEdgeBelow) {
+ continue;
+ }
+#if LOGGING_ENABLED
+ LOG("\nvertex %g: (%g,%g), alpha %d\n", v->fID, v->fPoint.fX, v->fPoint.fY, v->fAlpha);
+#endif
+ Edge* leftEnclosingEdge = nullptr;
+ Edge* rightEnclosingEdge = nullptr;
+ bool restartChecks;
+ do {
+ restartChecks = false;
+ find_enclosing_edges(v, &activeEdges, &leftEnclosingEdge, &rightEnclosingEdge);
+ if (v->fFirstEdgeBelow) {
+ for (Edge* edge = v->fFirstEdgeBelow; edge != nullptr; edge = edge->fNextEdgeBelow) {
+ if (check_for_intersection(edge, leftEnclosingEdge, &activeEdges, c, alloc)) {
+ restartChecks = true;
+ break;
+ }
+ if (check_for_intersection(edge, rightEnclosingEdge, &activeEdges, c, alloc)) {
+ restartChecks = true;
+ break;
+ }
+ }
+ } else {
+ if (Vertex* pv = check_for_intersection(leftEnclosingEdge, rightEnclosingEdge,
+ &activeEdges, c, alloc)) {
+ if (c.sweep_lt(pv->fPoint, v->fPoint)) {
+ v = pv;
+ }
+ restartChecks = true;
+ }
+
+ }
+ } while (restartChecks);
+ if (v->fAlpha == 0) {
+ if ((leftEnclosingEdge && leftEnclosingEdge->fWinding < 0) &&
+ (rightEnclosingEdge && rightEnclosingEdge->fWinding > 0)) {
+ v->fAlpha = max_edge_alpha(leftEnclosingEdge, rightEnclosingEdge);
+ }
+ }
+ for (Edge* e = v->fFirstEdgeAbove; e; e = e->fNextEdgeAbove) {
+ remove_edge(e, &activeEdges);
+ }
+ Edge* leftEdge = leftEnclosingEdge;
+ for (Edge* e = v->fFirstEdgeBelow; e; e = e->fNextEdgeBelow) {
+ insert_edge(e, leftEdge, &activeEdges);
+ leftEdge = e;
+ }
+ v->fProcessed = true;
+ }
+}
+
+// Stage 5: Tessellate the simplified mesh into monotone polygons.
+
+Poly* tessellate(Vertex* vertices, SkChunkAlloc& alloc) {
+ LOG("tessellating simple polygons\n");
+ EdgeList activeEdges;
+ Poly* polys = nullptr;
+ for (Vertex* v = vertices; v != nullptr; v = v->fNext) {
+ if (!v->fFirstEdgeAbove && !v->fFirstEdgeBelow) {
+ continue;
+ }
+#if LOGGING_ENABLED
+ LOG("\nvertex %g: (%g,%g), alpha %d\n", v->fID, v->fPoint.fX, v->fPoint.fY, v->fAlpha);
+#endif
+ Edge* leftEnclosingEdge = nullptr;
+ Edge* rightEnclosingEdge = nullptr;
+ find_enclosing_edges(v, &activeEdges, &leftEnclosingEdge, &rightEnclosingEdge);
+ Poly* leftPoly = nullptr;
+ Poly* rightPoly = nullptr;
+ if (v->fFirstEdgeAbove) {
+ leftPoly = v->fFirstEdgeAbove->fLeftPoly;
+ rightPoly = v->fLastEdgeAbove->fRightPoly;
+ } else {
+ leftPoly = leftEnclosingEdge ? leftEnclosingEdge->fRightPoly : nullptr;
+ rightPoly = rightEnclosingEdge ? rightEnclosingEdge->fLeftPoly : nullptr;
+ }
+#if LOGGING_ENABLED
+ LOG("edges above:\n");
+ for (Edge* e = v->fFirstEdgeAbove; e; e = e->fNextEdgeAbove) {
+ LOG("%g -> %g, lpoly %d, rpoly %d\n", e->fTop->fID, e->fBottom->fID,
+ e->fLeftPoly ? e->fLeftPoly->fID : -1, e->fRightPoly ? e->fRightPoly->fID : -1);
+ }
+ LOG("edges below:\n");
+ for (Edge* e = v->fFirstEdgeBelow; e; e = e->fNextEdgeBelow) {
+ LOG("%g -> %g, lpoly %d, rpoly %d\n", e->fTop->fID, e->fBottom->fID,
+ e->fLeftPoly ? e->fLeftPoly->fID : -1, e->fRightPoly ? e->fRightPoly->fID : -1);
+ }
+#endif
+ if (v->fFirstEdgeAbove) {
+ if (leftPoly) {
+ leftPoly = leftPoly->addEdge(v->fFirstEdgeAbove, Poly::kRight_Side, alloc);
+ }
+ if (rightPoly) {
+ rightPoly = rightPoly->addEdge(v->fLastEdgeAbove, Poly::kLeft_Side, alloc);
+ }
+ for (Edge* e = v->fFirstEdgeAbove; e != v->fLastEdgeAbove; e = e->fNextEdgeAbove) {
+ Edge* leftEdge = e;
+ Edge* rightEdge = e->fNextEdgeAbove;
+ SkASSERT(rightEdge->isRightOf(leftEdge->fTop));
+ remove_edge(leftEdge, &activeEdges);
+ if (leftEdge->fRightPoly) {
+ leftEdge->fRightPoly->addEdge(e, Poly::kLeft_Side, alloc);
+ }
+ if (rightEdge->fLeftPoly) {
+ rightEdge->fLeftPoly->addEdge(e, Poly::kRight_Side, alloc);
+ }
+ }
+ remove_edge(v->fLastEdgeAbove, &activeEdges);
+ if (!v->fFirstEdgeBelow) {
+ if (leftPoly && rightPoly && leftPoly != rightPoly) {
+ SkASSERT(leftPoly->fPartner == nullptr && rightPoly->fPartner == nullptr);
+ rightPoly->fPartner = leftPoly;
+ leftPoly->fPartner = rightPoly;
+ }
+ }
+ }
+ if (v->fFirstEdgeBelow) {
+ if (!v->fFirstEdgeAbove) {
+ if (leftPoly && rightPoly) {
+ if (leftPoly == rightPoly) {
+ if (leftPoly->fTail && leftPoly->fTail->fSide == Poly::kLeft_Side) {
+ leftPoly = new_poly(&polys, leftPoly->lastVertex(),
+ leftPoly->fWinding, alloc);
+ leftEnclosingEdge->fRightPoly = leftPoly;
+ } else {
+ rightPoly = new_poly(&polys, rightPoly->lastVertex(),
+ rightPoly->fWinding, alloc);
+ rightEnclosingEdge->fLeftPoly = rightPoly;
+ }
+ }
+ Edge* join = ALLOC_NEW(Edge, (leftPoly->lastVertex(), v, 1), alloc);
+ leftPoly = leftPoly->addEdge(join, Poly::kRight_Side, alloc);
+ rightPoly = rightPoly->addEdge(join, Poly::kLeft_Side, alloc);
+ }
+ }
+ Edge* leftEdge = v->fFirstEdgeBelow;
+ leftEdge->fLeftPoly = leftPoly;
+ insert_edge(leftEdge, leftEnclosingEdge, &activeEdges);
+ for (Edge* rightEdge = leftEdge->fNextEdgeBelow; rightEdge;
+ rightEdge = rightEdge->fNextEdgeBelow) {
+ insert_edge(rightEdge, leftEdge, &activeEdges);
+ int winding = leftEdge->fLeftPoly ? leftEdge->fLeftPoly->fWinding : 0;
+ winding += leftEdge->fWinding;
+ if (winding != 0) {
+ Poly* poly = new_poly(&polys, v, winding, alloc);
+ leftEdge->fRightPoly = rightEdge->fLeftPoly = poly;
+ }
+ leftEdge = rightEdge;
+ }
+ v->fLastEdgeBelow->fRightPoly = rightPoly;
+ }
+#if LOGGING_ENABLED
+ LOG("\nactive edges:\n");
+ for (Edge* e = activeEdges.fHead; e != nullptr; e = e->fRight) {
+ LOG("%g -> %g, lpoly %d, rpoly %d\n", e->fTop->fID, e->fBottom->fID,
+ e->fLeftPoly ? e->fLeftPoly->fID : -1, e->fRightPoly ? e->fRightPoly->fID : -1);
+ }
+#endif
+ }
+ return polys;
+}
+
+bool is_boundary_edge(Edge* edge, SkPath::FillType fillType) {
+ return apply_fill_type(fillType, edge->fLeftPoly) !=
+ apply_fill_type(fillType, edge->fRightPoly);
+}
+
+bool is_boundary_start(Edge* edge, SkPath::FillType fillType) {
+ return !apply_fill_type(fillType, edge->fLeftPoly) &&
+ apply_fill_type(fillType, edge->fRightPoly);
+}
+
+Vertex* remove_non_boundary_edges(Vertex* vertices, SkPath::FillType fillType,
+ SkChunkAlloc& alloc) {
+ for (Vertex* v = vertices; v != nullptr; v = v->fNext) {
+ for (Edge* e = v->fFirstEdgeBelow; e != nullptr;) {
+ Edge* next = e->fNextEdgeBelow;
+ if (!is_boundary_edge(e, fillType)) {
+ remove_edge_above(e);
+ remove_edge_below(e);
+ }
+ e = next;
+ }
+ }
+ return vertices;
+}
+
+// This is different from Edge::intersect, in that it intersects lines, not line segments.
+bool intersect(const Edge& a, const Edge& b, SkPoint* point) {
+ double denom = a.fDX * b.fDY - a.fDY * b.fDX;
+ if (denom == 0.0) {
+ return false;
+ }
+ double scale = 1.0f / denom;
+ point->fX = SkDoubleToScalar((b.fDX * a.fC - a.fDX * b.fC) * scale);
+ point->fY = SkDoubleToScalar((b.fDY * a.fC - a.fDY * b.fC) * scale);
+ round(point);
+ return true;
+}
+
+void get_edge_normal(const Edge* e, SkVector* normal) {
+ normal->setNormalize(SkDoubleToScalar(e->fDX) * e->fWinding,
+ SkDoubleToScalar(e->fDY) * e->fWinding);
+}
+
+// Stage 5c: detect and remove "pointy" vertices whose edge normals point in opposite directions
+// and whose adjacent vertices are less than a quarter pixel from an edge. These are guaranteed to
+// invert on stroking.
+
+void simplify_boundary(EdgeList* boundary, Comparator& c, SkChunkAlloc& alloc) {
+ Edge* prevEdge = boundary->fTail;
+ SkVector prevNormal;
+ get_edge_normal(prevEdge, &prevNormal);
+ for (Edge* e = boundary->fHead; e != nullptr;) {
+ Vertex* prev = prevEdge->fWinding == 1 ? prevEdge->fTop : prevEdge->fBottom;
+ Vertex* next = e->fWinding == 1 ? e->fBottom : e->fTop;
+ double dist = e->dist(prev->fPoint);
+ SkVector normal;
+ get_edge_normal(e, &normal);
+ float denom = 0.25f * static_cast<float>(e->fDX * e->fDX + e->fDY * e->fDY);
+ if (prevNormal.dot(normal) < 0.0 && (dist * dist) <= denom) {
+ Edge* join = new_edge(prev, next, alloc, c);
+ insert_edge(join, e, boundary);
+ remove_edge(prevEdge, boundary);
+ remove_edge(e, boundary);
+ if (join->fLeft && join->fRight) {
+ prevEdge = join->fLeft;
+ e = join;
+ } else {
+ prevEdge = boundary->fTail;
+ e = boundary->fHead; // join->fLeft ? join->fLeft : join;
+ }
+ get_edge_normal(prevEdge, &prevNormal);
+ } else {
+ prevEdge = e;
+ prevNormal = normal;
+ e = e->fRight;
+ }
+ }
+}
+
+// Stage 5d: Displace edges by half a pixel inward and outward along their normals. Intersect to
+// find new vertices, and set zero alpha on the exterior and one alpha on the interior. Build a
+// new antialiased mesh from those vertices.
+
+void boundary_to_aa_mesh(EdgeList* boundary, VertexList* mesh, Comparator& c, SkChunkAlloc& alloc) {
+ EdgeList outerContour;
+ Edge* prevEdge = boundary->fTail;
+ float radius = 0.5f;
+ double offset = radius * sqrt(prevEdge->fDX * prevEdge->fDX + prevEdge->fDY * prevEdge->fDY)
+ * prevEdge->fWinding;
+ Edge prevInner(prevEdge->fTop, prevEdge->fBottom, prevEdge->fWinding);
+ prevInner.fC -= offset;
+ Edge prevOuter(prevEdge->fTop, prevEdge->fBottom, prevEdge->fWinding);
+ prevOuter.fC += offset;
+ VertexList innerVertices;
+ VertexList outerVertices;
+ SkScalar innerCount = SK_Scalar1, outerCount = SK_Scalar1;
+ for (Edge* e = boundary->fHead; e != nullptr; e = e->fRight) {
+ double offset = radius * sqrt(e->fDX * e->fDX + e->fDY * e->fDY) * e->fWinding;
+ Edge inner(e->fTop, e->fBottom, e->fWinding);
+ inner.fC -= offset;
+ Edge outer(e->fTop, e->fBottom, e->fWinding);
+ outer.fC += offset;
+ SkPoint innerPoint, outerPoint;
+ if (intersect(prevInner, inner, &innerPoint) &&
+ intersect(prevOuter, outer, &outerPoint)) {
+ Vertex* innerVertex = ALLOC_NEW(Vertex, (innerPoint, 255), alloc);
+ Vertex* outerVertex = ALLOC_NEW(Vertex, (outerPoint, 0), alloc);
+ if (innerVertices.fTail && outerVertices.fTail) {
+ Edge innerEdge(innerVertices.fTail, innerVertex, 1);
+ Edge outerEdge(outerVertices.fTail, outerVertex, 1);
+ SkVector innerNormal;
+ get_edge_normal(&innerEdge, &innerNormal);
+ SkVector outerNormal;
+ get_edge_normal(&outerEdge, &outerNormal);
+ SkVector normal;
+ get_edge_normal(prevEdge, &normal);
+ if (normal.dot(innerNormal) < 0) {
+ innerPoint += innerVertices.fTail->fPoint * innerCount;
+ innerCount++;
+ innerPoint *= SkScalarInvert(innerCount);
+ innerVertices.fTail->fPoint = innerVertex->fPoint = innerPoint;
+ } else {
+ innerCount = SK_Scalar1;
+ }
+ if (normal.dot(outerNormal) < 0) {
+ outerPoint += outerVertices.fTail->fPoint * outerCount;
+ outerCount++;
+ outerPoint *= SkScalarInvert(outerCount);
+ outerVertices.fTail->fPoint = outerVertex->fPoint = outerPoint;
+ } else {
+ outerCount = SK_Scalar1;
+ }
+ }
+ innerVertices.append(innerVertex);
+ outerVertices.append(outerVertex);
+ prevEdge = e;
+ }
+ prevInner = inner;
+ prevOuter = outer;
+ }
+ innerVertices.close();
+ outerVertices.close();
+
+ Vertex* innerVertex = innerVertices.fHead;
+ Vertex* outerVertex = outerVertices.fHead;
+ // Alternate clockwise and counterclockwise polys, so the tesselator
+ // doesn't cancel out the interior edges.
+ if (!innerVertex || !outerVertex) {
+ return;
+ }
+ do {
+ connect(outerVertex->fNext, outerVertex, alloc, c);
+ connect(innerVertex->fNext, innerVertex, alloc, c, 2);
+ connect(innerVertex, outerVertex->fNext, alloc, c, 2);
+ connect(outerVertex, innerVertex, alloc, c, 2);
+ Vertex* innerNext = innerVertex->fNext;
+ Vertex* outerNext = outerVertex->fNext;
+ mesh->append(innerVertex);
+ mesh->append(outerVertex);
+ innerVertex = innerNext;
+ outerVertex = outerNext;
+ } while (innerVertex != innerVertices.fHead && outerVertex != outerVertices.fHead);
+}
+
+void extract_boundary(EdgeList* boundary, Edge* e, SkPath::FillType fillType, SkChunkAlloc& alloc) {
+ bool down = is_boundary_start(e, fillType);
+ while (e) {
+ e->fWinding = down ? 1 : -1;
+ Edge* next;
+ boundary->append(e);
+ if (down) {
+ // Find outgoing edge, in clockwise order.
+ if ((next = e->fNextEdgeAbove)) {
+ down = false;
+ } else if ((next = e->fBottom->fLastEdgeBelow)) {
+ down = true;
+ } else if ((next = e->fPrevEdgeAbove)) {
+ down = false;
+ }
+ } else {
+ // Find outgoing edge, in counter-clockwise order.
+ if ((next = e->fPrevEdgeBelow)) {
+ down = true;
+ } else if ((next = e->fTop->fFirstEdgeAbove)) {
+ down = false;
+ } else if ((next = e->fNextEdgeBelow)) {
+ down = true;
+ }
+ }
+ remove_edge_above(e);
+ remove_edge_below(e);
+ e = next;
+ }
+}
+
+// Stage 5b: Extract boundary edges.
+
+EdgeList* extract_boundaries(Vertex* vertices, SkPath::FillType fillType, SkChunkAlloc& alloc) {
+ LOG("extracting boundaries\n");
+ vertices = remove_non_boundary_edges(vertices, fillType, alloc);
+ EdgeList* boundaries = nullptr;
+ for (Vertex* v = vertices; v != nullptr; v = v->fNext) {
+ while (v->fFirstEdgeBelow) {
+ EdgeList* boundary = new_contour(&boundaries, alloc);
+ extract_boundary(boundary, v->fFirstEdgeBelow, fillType, alloc);
+ }
+ }
+ return boundaries;
+}
+
+// This is a driver function which calls stages 2-5 in turn.
+
+Vertex* contours_to_mesh(Vertex** contours, int contourCnt, bool antialias,
+ Comparator& c, SkChunkAlloc& alloc) {
+#if LOGGING_ENABLED
+ for (int i = 0; i < contourCnt; ++i) {
+ Vertex* v = contours[i];
+ SkASSERT(v);
+ LOG("path.moveTo(%20.20g, %20.20g);\n", v->fPoint.fX, v->fPoint.fY);
+ for (v = v->fNext; v != contours[i]; v = v->fNext) {
+ LOG("path.lineTo(%20.20g, %20.20g);\n", v->fPoint.fX, v->fPoint.fY);
+ }
+ }
+#endif
+ sanitize_contours(contours, contourCnt, antialias);
+ return build_edges(contours, contourCnt, c, alloc);
+}
+
+Poly* mesh_to_polys(Vertex** vertices, SkPath::FillType fillType, Comparator& c,
+ SkChunkAlloc& alloc) {
+ if (!vertices || !*vertices) {
+ return nullptr;
+ }
+
+ // Sort vertices in Y (secondarily in X).
+ merge_sort(vertices, c);
+ merge_coincident_vertices(vertices, c, alloc);
+#if LOGGING_ENABLED
+ for (Vertex* v = *vertices; v != nullptr; v = v->fNext) {
+ static float gID = 0.0f;
+ v->fID = gID++;
+ }
+#endif
+ simplify(*vertices, c, alloc);
+ return tessellate(*vertices, alloc);
+}
+
+Poly* contours_to_polys(Vertex** contours, int contourCnt, SkPath::FillType fillType,
+ const SkRect& pathBounds, bool antialias,
+ SkChunkAlloc& alloc) {
+ Comparator c;
+ if (pathBounds.width() > pathBounds.height()) {
+ c.sweep_lt = sweep_lt_horiz;
+ c.sweep_gt = sweep_gt_horiz;
+ } else {
+ c.sweep_lt = sweep_lt_vert;
+ c.sweep_gt = sweep_gt_vert;
+ }
+ Vertex* mesh = contours_to_mesh(contours, contourCnt, antialias, c, alloc);
+ Poly* polys = mesh_to_polys(&mesh, fillType, c, alloc);
+ if (antialias) {
+ EdgeList* boundaries = extract_boundaries(mesh, fillType, alloc);
+ VertexList aaMesh;
+ for (EdgeList* boundary = boundaries; boundary != nullptr; boundary = boundary->fNext) {
+ simplify_boundary(boundary, c, alloc);
+ if (boundary->fCount > 2) {
+ boundary_to_aa_mesh(boundary, &aaMesh, c, alloc);
+ }
+ }
+ return mesh_to_polys(&aaMesh.fHead, SkPath::kWinding_FillType, c, alloc);
+ }
+ return polys;
+}
+
+// Stage 6: Triangulate the monotone polygons into a vertex buffer.
+void* polys_to_triangles(Poly* polys, SkPath::FillType fillType, const AAParams* aaParams,
+ void* data) {
+ for (Poly* poly = polys; poly; poly = poly->fNext) {
+ if (apply_fill_type(fillType, poly)) {
+ data = poly->emit(aaParams, data);
+ }
+ }
+ return data;
+}
+
+Poly* path_to_polys(const SkPath& path, SkScalar tolerance, const SkRect& clipBounds,
+ int contourCnt, SkChunkAlloc& alloc, bool antialias, bool* isLinear) {
+ SkPath::FillType fillType = path.getFillType();
+ if (SkPath::IsInverseFillType(fillType)) {
+ contourCnt++;
+ }
+ SkAutoTDeleteArray<Vertex*> contours(new Vertex* [contourCnt]);
+
+ path_to_contours(path, tolerance, clipBounds, contours.get(), alloc, isLinear);
+ return contours_to_polys(contours.get(), contourCnt, path.getFillType(), path.getBounds(),
+ antialias, alloc);
+}
+
+void get_contour_count_and_size_estimate(const SkPath& path, SkScalar tolerance, int* contourCnt,
+ int* sizeEstimate) {
+ int maxPts = GrPathUtils::worstCasePointCount(path, contourCnt, tolerance);
+ if (maxPts <= 0) {
+ *contourCnt = 0;
+ return;
+ }
+ if (maxPts > ((int)SK_MaxU16 + 1)) {
+ SkDebugf("Path not rendered, too many verts (%d)\n", maxPts);
+ *contourCnt = 0;
+ return;
+ }
+ // For the initial size of the chunk allocator, estimate based on the point count:
+ // one vertex per point for the initial passes, plus two for the vertices in the
+ // resulting Polys, since the same point may end up in two Polys. Assume minimal
+ // connectivity of one Edge per Vertex (will grow for intersections).
+ *sizeEstimate = maxPts * (3 * sizeof(Vertex) + sizeof(Edge));
+}
+
+int count_points(Poly* polys, SkPath::FillType fillType) {
+ int count = 0;
+ for (Poly* poly = polys; poly; poly = poly->fNext) {
+ if (apply_fill_type(fillType, poly) && poly->fCount >= 3) {
+ count += (poly->fCount - 2) * (TESSELLATOR_WIREFRAME ? 6 : 3);
+ }
+ }
+ return count;
+}
+
+} // namespace
+
+namespace GrTessellator {
+
+// Stage 6: Triangulate the monotone polygons into a vertex buffer.
+
+int PathToTriangles(const SkPath& path, SkScalar tolerance, const SkRect& clipBounds,
+ VertexAllocator* vertexAllocator, bool antialias, const GrColor& color,
+ bool canTweakAlphaForCoverage, bool* isLinear) {
+ int contourCnt;
+ int sizeEstimate;
+ get_contour_count_and_size_estimate(path, tolerance, &contourCnt, &sizeEstimate);
+ if (contourCnt <= 0) {
+ *isLinear = true;
+ return 0;
+ }
+ SkChunkAlloc alloc(sizeEstimate);
+ Poly* polys = path_to_polys(path, tolerance, clipBounds, contourCnt, alloc, antialias,
+ isLinear);
+ SkPath::FillType fillType = path.getFillType();
+ int count = count_points(polys, fillType);
+ if (0 == count) {
+ return 0;
+ }
+
+ void* verts = vertexAllocator->lock(count);
+ if (!verts) {
+ SkDebugf("Could not allocate vertices\n");
+ return 0;
+ }
+
+ LOG("emitting %d verts\n", count);
+ AAParams aaParams;
+ aaParams.fTweakAlpha = canTweakAlphaForCoverage;
+ aaParams.fColor = color;
+
+ void* end = polys_to_triangles(polys, fillType, antialias ? &aaParams : nullptr, verts);
+ int actualCount = static_cast<int>((static_cast<uint8_t*>(end) - static_cast<uint8_t*>(verts))
+ / vertexAllocator->stride());
+ SkASSERT(actualCount <= count);
+ vertexAllocator->unlock(actualCount);
+ return actualCount;
+}
+
+int PathToVertices(const SkPath& path, SkScalar tolerance, const SkRect& clipBounds,
+ GrTessellator::WindingVertex** verts) {
+ int contourCnt;
+ int sizeEstimate;
+ get_contour_count_and_size_estimate(path, tolerance, &contourCnt, &sizeEstimate);
+ if (contourCnt <= 0) {
+ return 0;
+ }
+ SkChunkAlloc alloc(sizeEstimate);
+ bool isLinear;
+ Poly* polys = path_to_polys(path, tolerance, clipBounds, contourCnt, alloc, false, &isLinear);
+ SkPath::FillType fillType = path.getFillType();
+ int count = count_points(polys, fillType);
+ if (0 == count) {
+ *verts = nullptr;
+ return 0;
+ }
+
+ *verts = new GrTessellator::WindingVertex[count];
+ GrTessellator::WindingVertex* vertsEnd = *verts;
+ SkPoint* points = new SkPoint[count];
+ SkPoint* pointsEnd = points;
+ for (Poly* poly = polys; poly; poly = poly->fNext) {
+ if (apply_fill_type(fillType, poly)) {
+ SkPoint* start = pointsEnd;
+ pointsEnd = static_cast<SkPoint*>(poly->emit(nullptr, pointsEnd));
+ while (start != pointsEnd) {
+ vertsEnd->fPos = *start;
+ vertsEnd->fWinding = poly->fWinding;
+ ++start;
+ ++vertsEnd;
+ }
+ }
+ }
+ int actualCount = static_cast<int>(vertsEnd - *verts);
+ SkASSERT(actualCount <= count);
+ SkASSERT(pointsEnd - points == actualCount);
+ delete[] points;
+ return actualCount;
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/gpu/GrTessellator.h b/gfx/skia/skia/src/gpu/GrTessellator.h
new file mode 100644
index 000000000..dd92015cc
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTessellator.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTessellator_DEFINED
+#define GrTessellator_DEFINED
+
+#include "GrColor.h"
+#include "SkPoint.h"
+
+class SkPath;
+struct SkRect;
+
+/**
+ * Provides utility functions for converting paths to a collection of triangles.
+ */
+
+#define TESSELLATOR_WIREFRAME 0
+
+namespace GrTessellator {
+
+class VertexAllocator {
+public:
+ VertexAllocator(size_t stride) : fStride(stride) {}
+ virtual ~VertexAllocator() {}
+ virtual void* lock(int vertexCount) = 0;
+ virtual void unlock(int actualCount) = 0;
+ size_t stride() const { return fStride; }
+private:
+ size_t fStride;
+};
+
+struct WindingVertex {
+ SkPoint fPos;
+ int fWinding;
+};
+
+// Triangulates a path to an array of vertices. Each triangle is represented as a set of three
+// WindingVertex entries, each of which contains the position and winding count (which is the same
+// for all three vertices of a triangle). The 'verts' out parameter is set to point to the resultant
+// vertex array. CALLER IS RESPONSIBLE for deleting this buffer to avoid a memory leak!
+int PathToVertices(const SkPath& path, SkScalar tolerance, const SkRect& clipBounds,
+ WindingVertex** verts);
+
+int PathToTriangles(const SkPath& path, SkScalar tolerance, const SkRect& clipBounds,
+ VertexAllocator*, bool antialias, const GrColor& color,
+ bool canTweakAlphaForCoverage, bool *isLinear);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTestUtils.cpp b/gfx/skia/skia/src/gpu/GrTestUtils.cpp
new file mode 100644
index 000000000..2aae8df37
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTestUtils.cpp
@@ -0,0 +1,326 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrTestUtils.h"
+#include "GrStyle.h"
+#include "SkColorSpace.h"
+#include "SkDashPathPriv.h"
+#include "SkMatrix.h"
+#include "SkPath.h"
+#include "SkRRect.h"
+
+#ifdef GR_TEST_UTILS
+
+static const SkMatrix& test_matrix(SkRandom* random,
+ bool includeNonPerspective,
+ bool includePerspective) {
+ static SkMatrix gMatrices[5];
+ static const int kPerspectiveCount = 1;
+ static bool gOnce;
+ if (!gOnce) {
+ gOnce = true;
+ gMatrices[0].reset();
+ gMatrices[1].setTranslate(SkIntToScalar(-100), SkIntToScalar(100));
+ gMatrices[2].setRotate(SkIntToScalar(17));
+ gMatrices[3].setRotate(SkIntToScalar(185));
+ gMatrices[3].postTranslate(SkIntToScalar(66), SkIntToScalar(-33));
+ gMatrices[3].postScale(SkIntToScalar(2), SK_ScalarHalf);
+
+ // Perspective matrices
+ gMatrices[4].setRotate(SkIntToScalar(215));
+ gMatrices[4].set(SkMatrix::kMPersp0, 0.00013f);
+ gMatrices[4].set(SkMatrix::kMPersp1, -0.000039f);
+ }
+
+ uint32_t count = static_cast<uint32_t>(SK_ARRAY_COUNT(gMatrices));
+ if (includeNonPerspective && includePerspective) {
+ return gMatrices[random->nextULessThan(count)];
+ } else if (!includeNonPerspective) {
+ return gMatrices[count - 1 - random->nextULessThan(kPerspectiveCount)];
+ } else {
+ SkASSERT(includeNonPerspective && !includePerspective);
+ return gMatrices[random->nextULessThan(count - kPerspectiveCount)];
+ }
+}
+
+namespace GrTest {
+const SkMatrix& TestMatrix(SkRandom* random) { return test_matrix(random, true, true); }
+
+const SkMatrix& TestMatrixPreservesRightAngles(SkRandom* random) {
+ static SkMatrix gMatrices[5];
+ static bool gOnce;
+ if (!gOnce) {
+ gOnce = true;
+ // identity
+ gMatrices[0].reset();
+ // translation
+ gMatrices[1].setTranslate(SkIntToScalar(-100), SkIntToScalar(100));
+ // scale
+ gMatrices[2].setScale(SkIntToScalar(17), SkIntToScalar(17));
+ // scale + translation
+ gMatrices[3].setScale(SkIntToScalar(-17), SkIntToScalar(-17));
+ gMatrices[3].postTranslate(SkIntToScalar(66), SkIntToScalar(-33));
+ // orthogonal basis vectors
+ gMatrices[4].reset();
+ gMatrices[4].setScale(SkIntToScalar(-1), SkIntToScalar(-1));
+ gMatrices[4].setRotate(47);
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gMatrices); i++) {
+ SkASSERT(gMatrices[i].preservesRightAngles());
+ }
+ }
+ return gMatrices[random->nextULessThan(static_cast<uint32_t>(SK_ARRAY_COUNT(gMatrices)))];
+}
+
+const SkMatrix& TestMatrixRectStaysRect(SkRandom* random) {
+ static SkMatrix gMatrices[6];
+ static bool gOnce;
+ if (!gOnce) {
+ gOnce = true;
+ // identity
+ gMatrices[0].reset();
+ // translation
+ gMatrices[1].setTranslate(SkIntToScalar(-100), SkIntToScalar(100));
+ // scale
+ gMatrices[2].setScale(SkIntToScalar(17), SkIntToScalar(17));
+ // scale + translation
+ gMatrices[3].setScale(SkIntToScalar(-17), SkIntToScalar(-17));
+ gMatrices[3].postTranslate(SkIntToScalar(66), SkIntToScalar(-33));
+ // reflection
+ gMatrices[4].setScale(SkIntToScalar(-1), SkIntToScalar(-1));
+ // 90 degress rotation
+ gMatrices[5].setRotate(90);
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gMatrices); i++) {
+ SkASSERT(gMatrices[i].rectStaysRect());
+ }
+ }
+ return gMatrices[random->nextULessThan(static_cast<uint32_t>(SK_ARRAY_COUNT(gMatrices)))];
+}
+
+const SkMatrix& TestMatrixInvertible(SkRandom* random) { return test_matrix(random, true, false); }
+const SkMatrix& TestMatrixPerspective(SkRandom* random) { return test_matrix(random, false, true); }
+
+const SkRect& TestRect(SkRandom* random) {
+ static SkRect gRects[7];
+ static bool gOnce;
+ if (!gOnce) {
+ gOnce = true;
+ gRects[0] = SkRect::MakeWH(1.f, 1.f);
+ gRects[1] = SkRect::MakeWH(1.0f, 256.0f);
+ gRects[2] = SkRect::MakeWH(256.0f, 1.0f);
+ gRects[3] = SkRect::MakeLargest();
+ gRects[4] = SkRect::MakeLTRB(-65535.0f, -65535.0f, 65535.0f, 65535.0f);
+ gRects[5] = SkRect::MakeLTRB(-10.0f, -10.0f, 10.0f, 10.0f);
+ }
+ return gRects[random->nextULessThan(static_cast<uint32_t>(SK_ARRAY_COUNT(gRects)))];
+}
+
+// Just some simple rects for code which expects its input very sanitized
+const SkRect& TestSquare(SkRandom* random) {
+ static SkRect gRects[2];
+ static bool gOnce;
+ if (!gOnce) {
+ gOnce = true;
+ gRects[0] = SkRect::MakeWH(128.f, 128.f);
+ gRects[1] = SkRect::MakeWH(256.0f, 256.0f);
+ }
+ return gRects[random->nextULessThan(static_cast<uint32_t>(SK_ARRAY_COUNT(gRects)))];
+}
+
+const SkRRect& TestRRectSimple(SkRandom* random) {
+ static SkRRect gRRect[2];
+ static bool gOnce;
+ if (!gOnce) {
+ gOnce = true;
+ SkRect rectangle = SkRect::MakeWH(10.f, 20.f);
+ // true round rect with circular corners
+ gRRect[0].setRectXY(rectangle, 1.f, 1.f);
+ // true round rect with elliptical corners
+ gRRect[1].setRectXY(rectangle, 2.0f, 1.0f);
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gRRect); i++) {
+ SkASSERT(gRRect[i].isSimple());
+ }
+ }
+ return gRRect[random->nextULessThan(static_cast<uint32_t>(SK_ARRAY_COUNT(gRRect)))];
+}
+
+const SkPath& TestPath(SkRandom* random) {
+ static SkPath gPath[7];
+ static bool gOnce;
+ if (!gOnce) {
+ gOnce = true;
+ // line
+ gPath[0].moveTo(0.f, 0.f);
+ gPath[0].lineTo(10.f, 10.f);
+ // quad
+ gPath[1].moveTo(0.f, 0.f);
+ gPath[1].quadTo(10.f, 10.f, 20.f, 20.f);
+ // conic
+ gPath[2].moveTo(0.f, 0.f);
+ gPath[2].conicTo(10.f, 10.f, 20.f, 20.f, 1.f);
+ // cubic
+ gPath[3].moveTo(0.f, 0.f);
+ gPath[3].cubicTo(10.f, 10.f, 20.f, 20.f, 30.f, 30.f);
+ // all three
+ gPath[4].moveTo(0.f, 0.f);
+ gPath[4].lineTo(10.f, 10.f);
+ gPath[4].quadTo(10.f, 10.f, 20.f, 20.f);
+ gPath[4].conicTo(10.f, 10.f, 20.f, 20.f, 1.f);
+ gPath[4].cubicTo(10.f, 10.f, 20.f, 20.f, 30.f, 30.f);
+ // convex
+ gPath[5].moveTo(0.0f, 0.0f);
+ gPath[5].lineTo(10.0f, 0.0f);
+ gPath[5].lineTo(10.0f, 10.0f);
+ gPath[5].lineTo(0.0f, 10.0f);
+ gPath[5].close();
+ // concave
+ gPath[6].moveTo(0.0f, 0.0f);
+ gPath[6].lineTo(5.0f, 5.0f);
+ gPath[6].lineTo(10.0f, 0.0f);
+ gPath[6].lineTo(10.0f, 10.0f);
+ gPath[6].lineTo(0.0f, 10.0f);
+ gPath[6].close();
+ }
+
+ return gPath[random->nextULessThan(static_cast<uint32_t>(SK_ARRAY_COUNT(gPath)))];
+}
+
+const SkPath& TestPathConvex(SkRandom* random) {
+ static SkPath gPath[3];
+ static bool gOnce;
+ if (!gOnce) {
+ gOnce = true;
+ // narrow rect
+ gPath[0].moveTo(-1.5f, -50.0f);
+ gPath[0].lineTo(-1.5f, -50.0f);
+ gPath[0].lineTo( 1.5f, -50.0f);
+ gPath[0].lineTo( 1.5f, 50.0f);
+ gPath[0].lineTo(-1.5f, 50.0f);
+ // degenerate
+ gPath[1].moveTo(-0.025f, -0.025f);
+ gPath[1].lineTo(-0.025f, -0.025f);
+ gPath[1].lineTo( 0.025f, -0.025f);
+ gPath[1].lineTo( 0.025f, 0.025f);
+ gPath[1].lineTo(-0.025f, 0.025f);
+ // clipped triangle
+ gPath[2].moveTo(-10.0f, -50.0f);
+ gPath[2].lineTo(-10.0f, -50.0f);
+ gPath[2].lineTo( 10.0f, -50.0f);
+ gPath[2].lineTo( 50.0f, 31.0f);
+ gPath[2].lineTo( 40.0f, 50.0f);
+ gPath[2].lineTo(-40.0f, 50.0f);
+ gPath[2].lineTo(-50.0f, 31.0f);
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gPath); i++) {
+ SkASSERT(SkPath::kConvex_Convexity == gPath[i].getConvexity());
+ }
+ }
+
+ return gPath[random->nextULessThan(static_cast<uint32_t>(SK_ARRAY_COUNT(gPath)))];
+}
+
+static void randomize_stroke_rec(SkStrokeRec* rec, SkRandom* random) {
+ bool strokeAndFill = random->nextBool();
+ SkScalar strokeWidth = random->nextBool() ? 0.f : 1.f;
+ rec->setStrokeStyle(strokeWidth, strokeAndFill);
+
+ SkPaint::Cap cap = SkPaint::Cap(random->nextULessThan(SkPaint::kCapCount));
+ SkPaint::Join join = SkPaint::Join(random->nextULessThan(SkPaint::kJoinCount));
+ SkScalar miterLimit = random->nextRangeScalar(1.f, 5.f);
+ rec->setStrokeParams(cap, join, miterLimit);
+}
+
+SkStrokeRec TestStrokeRec(SkRandom* random) {
+ SkStrokeRec::InitStyle style =
+ SkStrokeRec::InitStyle(random->nextULessThan(SkStrokeRec::kFill_InitStyle + 1));
+ SkStrokeRec rec(style);
+ randomize_stroke_rec(&rec, random);
+ return rec;
+}
+
+void TestStyle(SkRandom* random, GrStyle* style) {
+ SkStrokeRec::InitStyle initStyle =
+ SkStrokeRec::InitStyle(random->nextULessThan(SkStrokeRec::kFill_InitStyle + 1));
+ SkStrokeRec stroke(initStyle);
+ randomize_stroke_rec(&stroke, random);
+ sk_sp<SkPathEffect> pe;
+ if (random->nextBool()) {
+ int cnt = random->nextRangeU(1, 50) * 2;
+ SkAutoTDeleteArray<SkScalar> intervals(new SkScalar[cnt]);
+ SkScalar sum = 0;
+ for (int i = 0; i < cnt; i++) {
+ intervals[i] = random->nextRangeScalar(SkDoubleToScalar(0.01),
+ SkDoubleToScalar(10.0));
+ sum += intervals[i];
+ }
+ SkScalar phase = random->nextRangeScalar(0, sum);
+ pe = TestDashPathEffect::Make(intervals.get(), cnt, phase);
+ }
+ *style = GrStyle(stroke, pe.get());
+}
+
+TestDashPathEffect::TestDashPathEffect(const SkScalar* intervals, int count, SkScalar phase) {
+ fCount = count;
+ fIntervals.reset(count);
+ memcpy(fIntervals.get(), intervals, count * sizeof(SkScalar));
+ SkDashPath::CalcDashParameters(phase, intervals, count, &fInitialDashLength,
+ &fInitialDashIndex, &fIntervalLength, &fPhase);
+}
+
+ bool TestDashPathEffect::filterPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect) const {
+ return SkDashPath::InternalFilter(dst, src, rec, cullRect, fIntervals.get(), fCount,
+ fInitialDashLength, fInitialDashIndex, fIntervalLength);
+}
+
+SkPathEffect::DashType TestDashPathEffect::asADash(DashInfo* info) const {
+ if (info) {
+ if (info->fCount >= fCount && info->fIntervals) {
+ memcpy(info->fIntervals, fIntervals.get(), fCount * sizeof(SkScalar));
+ }
+ info->fCount = fCount;
+ info->fPhase = fPhase;
+ }
+ return kDash_DashType;
+}
+
+sk_sp<SkColorSpace> TestColorSpace(SkRandom* random) {
+ static sk_sp<SkColorSpace> gColorSpaces[3];
+ static bool gOnce;
+ if (!gOnce) {
+ gOnce = true;
+ // No color space (legacy mode)
+ gColorSpaces[0] = nullptr;
+ // sRGB or Adobe
+ gColorSpaces[1] = SkColorSpace::NewNamed(SkColorSpace::kSRGB_Named);
+ gColorSpaces[2] = SkColorSpace::NewNamed(SkColorSpace::kAdobeRGB_Named);
+ }
+ return gColorSpaces[random->nextULessThan(static_cast<uint32_t>(SK_ARRAY_COUNT(gColorSpaces)))];
+}
+
+sk_sp<GrColorSpaceXform> TestColorXform(SkRandom* random) {
+ static sk_sp<GrColorSpaceXform> gXforms[3];
+ static bool gOnce;
+ if (!gOnce) {
+ gOnce = true;
+ sk_sp<SkColorSpace> srgb = SkColorSpace::NewNamed(SkColorSpace::kSRGB_Named);
+ sk_sp<SkColorSpace> adobe = SkColorSpace::NewNamed(SkColorSpace::kAdobeRGB_Named);
+ // No gamut change
+ gXforms[0] = nullptr;
+ // To larger gamut
+ gXforms[1] = GrColorSpaceXform::Make(srgb.get(), adobe.get());
+ // To smaller gamut
+ gXforms[2] = GrColorSpaceXform::Make(adobe.get(), srgb.get());
+ }
+ return gXforms[random->nextULessThan(static_cast<uint32_t>(SK_ARRAY_COUNT(gXforms)))];
+}
+
+} // namespace GrTest
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTexture.cpp b/gfx/skia/skia/src/gpu/GrTexture.cpp
new file mode 100644
index 000000000..bb1a6bb08
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTexture.cpp
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrContext.h"
+#include "GrCaps.h"
+#include "GrGpu.h"
+#include "GrResourceKey.h"
+#include "GrRenderTarget.h"
+#include "GrRenderTargetPriv.h"
+#include "GrTexture.h"
+#include "GrTexturePriv.h"
+#include "GrTypes.h"
+#include "SkMath.h"
+#include "SkMipMap.h"
+#include "SkTypes.h"
+
+void GrTexture::dirtyMipMaps(bool mipMapsDirty) {
+ if (mipMapsDirty) {
+ if (kValid_MipMapsStatus == fMipMapsStatus) {
+ fMipMapsStatus = kAllocated_MipMapsStatus;
+ }
+ } else {
+ const bool sizeChanged = kNotAllocated_MipMapsStatus == fMipMapsStatus;
+ fMipMapsStatus = kValid_MipMapsStatus;
+ if (sizeChanged) {
+ // This must not be called until after changing fMipMapsStatus.
+ this->didChangeGpuMemorySize();
+ // TODO(http://skbug.com/4548) - The desc and scratch key should be
+ // updated to reflect the newly-allocated mipmaps.
+ }
+ }
+}
+
+size_t GrTexture::onGpuMemorySize() const {
+ size_t textureSize;
+
+ if (GrPixelConfigIsCompressed(fDesc.fConfig)) {
+ textureSize = GrCompressedFormatDataSize(fDesc.fConfig, fDesc.fWidth, fDesc.fHeight);
+ } else {
+ textureSize = (size_t) fDesc.fWidth * fDesc.fHeight * GrBytesPerPixel(fDesc.fConfig);
+ }
+
+ if (this->texturePriv().hasMipMaps()) {
+ // We don't have to worry about the mipmaps being a different size than
+ // we'd expect because we never change fDesc.fWidth/fHeight.
+ textureSize += textureSize/3;
+ }
+
+ SkASSERT(!SkToBool(fDesc.fFlags & kRenderTarget_GrSurfaceFlag));
+ SkASSERT(textureSize <= WorstCaseSize(fDesc));
+
+ return textureSize;
+}
+
+void GrTexture::validateDesc() const {
+ if (this->asRenderTarget()) {
+ // This texture has a render target
+ SkASSERT(0 != (fDesc.fFlags & kRenderTarget_GrSurfaceFlag));
+ SkASSERT(fDesc.fSampleCnt == this->asRenderTarget()->numColorSamples());
+ } else {
+ SkASSERT(0 == (fDesc.fFlags & kRenderTarget_GrSurfaceFlag));
+ SkASSERT(0 == fDesc.fSampleCnt);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+// FIXME: This should be refactored with the code in gl/GrGLGpu.cpp.
+GrSurfaceOrigin resolve_origin(const GrSurfaceDesc& desc) {
+ // By default, GrRenderTargets are GL's normal orientation so that they
+ // can be drawn to by the outside world without the client having
+ // to render upside down.
+ bool renderTarget = 0 != (desc.fFlags & kRenderTarget_GrSurfaceFlag);
+ if (kDefault_GrSurfaceOrigin == desc.fOrigin) {
+ return renderTarget ? kBottomLeft_GrSurfaceOrigin : kTopLeft_GrSurfaceOrigin;
+ } else {
+ return desc.fOrigin;
+ }
+}
+}
+
+//////////////////////////////////////////////////////////////////////////////
+GrTexture::GrTexture(GrGpu* gpu, const GrSurfaceDesc& desc, GrSLType samplerType,
+ bool wasMipMapDataProvided)
+ : INHERITED(gpu, desc)
+ , fSamplerType(samplerType)
+ // Gamma treatment is explicitly set after creation via GrTexturePriv
+ , fGammaTreatment(SkSourceGammaTreatment::kIgnore) {
+ if (wasMipMapDataProvided) {
+ fMipMapsStatus = kValid_MipMapsStatus;
+ fMaxMipMapLevel = SkMipMap::ComputeLevelCount(fDesc.fWidth, fDesc.fHeight);
+ } else {
+ fMipMapsStatus = kNotAllocated_MipMapsStatus;
+ fMaxMipMapLevel = 0;
+ }
+}
+
+void GrTexture::computeScratchKey(GrScratchKey* key) const {
+ if (!GrPixelConfigIsCompressed(fDesc.fConfig)) {
+ GrTexturePriv::ComputeScratchKey(fDesc, key);
+ }
+}
+
+void GrTexturePriv::ComputeScratchKey(const GrSurfaceDesc& desc, GrScratchKey* key) {
+ static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
+
+ GrSurfaceOrigin origin = resolve_origin(desc);
+ uint32_t flags = desc.fFlags & ~kCheckAllocation_GrSurfaceFlag;
+
+ // make sure desc.fConfig fits in 5 bits
+ SkASSERT(sk_float_log2(kLast_GrPixelConfig) <= 5);
+ SkASSERT(static_cast<int>(desc.fConfig) < (1 << 5));
+ SkASSERT(desc.fSampleCnt < (1 << 8));
+ SkASSERT(flags < (1 << 10));
+ SkASSERT(static_cast<int>(origin) < (1 << 8));
+
+ GrScratchKey::Builder builder(key, kType, 3);
+ builder[0] = desc.fWidth;
+ builder[1] = desc.fHeight;
+ builder[2] = desc.fConfig | (desc.fIsMipMapped << 5) | (desc.fSampleCnt << 6) | (flags << 14)
+ | (origin << 24);
+}
diff --git a/gfx/skia/skia/src/gpu/GrTextureAccess.cpp b/gfx/skia/skia/src/gpu/GrTextureAccess.cpp
new file mode 100644
index 000000000..675bc2077
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureAccess.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrTextureAccess.h"
+#include "GrColor.h"
+#include "GrTexture.h"
+
+GrTextureAccess::GrTextureAccess() {}
+
+GrTextureAccess::GrTextureAccess(GrTexture* texture, const GrTextureParams& params) {
+ this->reset(texture, params);
+}
+
+GrTextureAccess::GrTextureAccess(GrTexture* texture,
+ GrTextureParams::FilterMode filterMode,
+ SkShader::TileMode tileXAndY,
+ GrShaderFlags visibility) {
+ this->reset(texture, filterMode, tileXAndY, visibility);
+}
+
+void GrTextureAccess::reset(GrTexture* texture,
+ const GrTextureParams& params,
+ GrShaderFlags visibility) {
+ SkASSERT(texture);
+ fTexture.set(SkRef(texture), kRead_GrIOType);
+ fParams = params;
+ fVisibility = visibility;
+}
+
+void GrTextureAccess::reset(GrTexture* texture,
+ GrTextureParams::FilterMode filterMode,
+ SkShader::TileMode tileXAndY,
+ GrShaderFlags visibility) {
+ SkASSERT(texture);
+ fTexture.set(SkRef(texture), kRead_GrIOType);
+ fParams.reset(tileXAndY, filterMode);
+ fVisibility = visibility;
+}
diff --git a/gfx/skia/skia/src/gpu/GrTextureParamsAdjuster.cpp b/gfx/skia/skia/src/gpu/GrTextureParamsAdjuster.cpp
new file mode 100644
index 000000000..f51cc54ab
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureParamsAdjuster.cpp
@@ -0,0 +1,522 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrTextureParamsAdjuster.h"
+
+#include "GrCaps.h"
+#include "GrColorSpaceXform.h"
+#include "GrContext.h"
+#include "GrDrawContext.h"
+#include "GrGpu.h"
+#include "GrGpuResourcePriv.h"
+#include "GrResourceKey.h"
+#include "GrTexture.h"
+#include "GrTextureParams.h"
+#include "GrTextureProvider.h"
+#include "SkCanvas.h"
+#include "SkGr.h"
+#include "SkGrPriv.h"
+#include "effects/GrBicubicEffect.h"
+#include "effects/GrSimpleTextureEffect.h"
+#include "effects/GrTextureDomain.h"
+
+typedef GrTextureProducer::CopyParams CopyParams;
+
+//////////////////////////////////////////////////////////////////////////////
+
+static GrTexture* copy_on_gpu(GrTexture* inputTexture, const SkIRect* subset,
+ const CopyParams& copyParams) {
+ SkASSERT(!subset || !subset->isEmpty());
+ GrContext* context = inputTexture->getContext();
+ SkASSERT(context);
+
+ GrPixelConfig config = GrMakePixelConfigUncompressed(inputTexture->config());
+
+ sk_sp<GrDrawContext> copyDC = context->makeDrawContextWithFallback(SkBackingFit::kExact,
+ copyParams.fWidth,
+ copyParams.fHeight,
+ config, nullptr);
+ if (!copyDC) {
+ return nullptr;
+ }
+
+ GrPaint paint;
+ paint.setGammaCorrect(true);
+
+ SkScalar sx SK_INIT_TO_AVOID_WARNING;
+ SkScalar sy SK_INIT_TO_AVOID_WARNING;
+ if (subset) {
+ sx = 1.f / inputTexture->width();
+ sy = 1.f / inputTexture->height();
+ }
+
+ if (copyParams.fFilter != GrTextureParams::kNone_FilterMode && subset &&
+ (subset->width() != copyParams.fWidth || subset->height() != copyParams.fHeight)) {
+ SkRect domain;
+ domain.fLeft = (subset->fLeft + 0.5f) * sx;
+ domain.fTop = (subset->fTop + 0.5f)* sy;
+ domain.fRight = (subset->fRight - 0.5f) * sx;
+ domain.fBottom = (subset->fBottom - 0.5f) * sy;
+ // This would cause us to read values from outside the subset. Surely, the caller knows
+ // better!
+ SkASSERT(copyParams.fFilter != GrTextureParams::kMipMap_FilterMode);
+ paint.addColorFragmentProcessor(
+ GrTextureDomainEffect::Make(inputTexture, nullptr, SkMatrix::I(), domain,
+ GrTextureDomain::kClamp_Mode,
+ copyParams.fFilter));
+ } else {
+ GrTextureParams params(SkShader::kClamp_TileMode, copyParams.fFilter);
+ paint.addColorTextureProcessor(inputTexture, nullptr, SkMatrix::I(), params);
+ }
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ SkRect localRect;
+ if (subset) {
+ localRect = SkRect::Make(*subset);
+ localRect.fLeft *= sx;
+ localRect.fTop *= sy;
+ localRect.fRight *= sx;
+ localRect.fBottom *= sy;
+ } else {
+ localRect = SkRect::MakeWH(1.f, 1.f);
+ }
+
+ SkRect dstRect = SkRect::MakeIWH(copyParams.fWidth, copyParams.fHeight);
+ copyDC->fillRectToRect(GrNoClip(), paint, SkMatrix::I(), dstRect, localRect);
+ return copyDC->asTexture().release();
+}
+
+GrTextureAdjuster::GrTextureAdjuster(GrTexture* original, SkAlphaType alphaType,
+ const SkIRect& contentArea, uint32_t uniqueID,
+ SkColorSpace* cs)
+ : INHERITED(contentArea.width(), contentArea.height(),
+ GrPixelConfigIsAlphaOnly(original->config()))
+ , fOriginal(original)
+ , fAlphaType(alphaType)
+ , fColorSpace(cs)
+ , fUniqueID(uniqueID)
+{
+ SkASSERT(SkIRect::MakeWH(original->width(), original->height()).contains(contentArea));
+ if (contentArea.fLeft > 0 || contentArea.fTop > 0 ||
+ contentArea.fRight < original->width() || contentArea.fBottom < original->height()) {
+ fContentArea.set(contentArea);
+ }
+}
+
+void GrTextureAdjuster::makeCopyKey(const CopyParams& params, GrUniqueKey* copyKey) {
+ GrUniqueKey baseKey;
+ GrMakeKeyFromImageID(&baseKey, fUniqueID, SkIRect::MakeWH(this->width(), this->height()));
+ MakeCopyKeyFromOrigKey(baseKey, params, copyKey);
+}
+
+void GrTextureAdjuster::didCacheCopy(const GrUniqueKey& copyKey) {
+ // We don't currently have a mechanism for notifications on Images!
+}
+
+SkColorSpace* GrTextureAdjuster::getColorSpace() {
+ return fColorSpace;
+}
+
+GrTexture* GrTextureAdjuster::refCopy(const CopyParams& copyParams) {
+ GrTexture* texture = this->originalTexture();
+ GrContext* context = texture->getContext();
+ const SkIRect* contentArea = this->contentAreaOrNull();
+ GrUniqueKey key;
+ this->makeCopyKey(copyParams, &key);
+ if (key.isValid()) {
+ GrTexture* cachedCopy = context->textureProvider()->findAndRefTextureByUniqueKey(key);
+ if (cachedCopy) {
+ return cachedCopy;
+ }
+ }
+ GrTexture* copy = copy_on_gpu(texture, contentArea, copyParams);
+ if (copy) {
+ if (key.isValid()) {
+ copy->resourcePriv().setUniqueKey(key);
+ this->didCacheCopy(key);
+ }
+ }
+ return copy;
+}
+
+GrTexture* GrTextureAdjuster::refTextureSafeForParams(const GrTextureParams& params,
+ SkSourceGammaTreatment gammaTreatment,
+ SkIPoint* outOffset) {
+ GrTexture* texture = this->originalTexture();
+ GrContext* context = texture->getContext();
+ CopyParams copyParams;
+ const SkIRect* contentArea = this->contentAreaOrNull();
+
+ if (!context) {
+ // The texture was abandoned.
+ return nullptr;
+ }
+
+ if (contentArea && GrTextureParams::kMipMap_FilterMode == params.filterMode()) {
+ // If we generate a MIP chain for texture it will read pixel values from outside the content
+ // area.
+ copyParams.fWidth = contentArea->width();
+ copyParams.fHeight = contentArea->height();
+ copyParams.fFilter = GrTextureParams::kBilerp_FilterMode;
+ } else if (!context->getGpu()->makeCopyForTextureParams(texture, params, &copyParams)) {
+ if (outOffset) {
+ if (contentArea) {
+ outOffset->set(contentArea->fLeft, contentArea->fRight);
+ } else {
+ outOffset->set(0, 0);
+ }
+ }
+ return SkRef(texture);
+ }
+
+ GrTexture* copy = this->refCopy(copyParams);
+ if (copy && outOffset) {
+ outOffset->set(0, 0);
+ }
+ return copy;
+}
+
+enum DomainMode {
+ kNoDomain_DomainMode,
+ kDomain_DomainMode,
+ kTightCopy_DomainMode
+};
+
+/** Determines whether a texture domain is necessary and if so what domain to use. There are two
+ * rectangles to consider:
+ * - The first is the content area specified by the texture adjuster. We can *never* allow
+ * filtering to cause bleed of pixels outside this rectangle.
+ * - The second rectangle is the constraint rectangle, which is known to be contained by the
+ * content area. The filterConstraint specifies whether we are allowed to bleed across this
+ * rect.
+ *
+ * We want to avoid using a domain if possible. We consider the above rectangles, the filter type,
+ * and whether the coords generated by the draw would all fall within the constraint rect. If the
+ * latter is true we only need to consider whether the filter would extend beyond the rects.
+ */
+static DomainMode determine_domain_mode(
+ const SkRect& constraintRect,
+ GrTextureAdjuster::FilterConstraint filterConstraint,
+ bool coordsLimitedToConstraintRect,
+ int texW, int texH,
+ const SkIRect* textureContentArea,
+ const GrTextureParams::FilterMode* filterModeOrNullForBicubic,
+ SkRect* domainRect) {
+
+ SkASSERT(SkRect::MakeIWH(texW, texH).contains(constraintRect));
+ // We only expect a content area rect if there is some non-content area.
+ SkASSERT(!textureContentArea ||
+ (!textureContentArea->contains(SkIRect::MakeWH(texW, texH)) &&
+ SkRect::Make(*textureContentArea).contains(constraintRect)));
+
+ SkRect textureBounds = SkRect::MakeIWH(texW, texH);
+ // If the src rectangle contains the whole texture then no need for a domain.
+ if (constraintRect.contains(textureBounds)) {
+ return kNoDomain_DomainMode;
+ }
+
+ bool restrictFilterToRect = (filterConstraint == GrTextureProducer::kYes_FilterConstraint);
+
+ // If we can filter outside the constraint rect, and there is no non-content area of the
+ // texture, and we aren't going to generate sample coords outside the constraint rect then we
+ // don't need a domain.
+ if (!restrictFilterToRect && !textureContentArea && coordsLimitedToConstraintRect) {
+ return kNoDomain_DomainMode;
+ }
+
+ // Get the domain inset based on sampling mode (or bail if mipped)
+ SkScalar filterHalfWidth = 0.f;
+ if (filterModeOrNullForBicubic) {
+ switch (*filterModeOrNullForBicubic) {
+ case GrTextureParams::kNone_FilterMode:
+ if (coordsLimitedToConstraintRect) {
+ return kNoDomain_DomainMode;
+ } else {
+ filterHalfWidth = 0.f;
+ }
+ break;
+ case GrTextureParams::kBilerp_FilterMode:
+ filterHalfWidth = .5f;
+ break;
+ case GrTextureParams::kMipMap_FilterMode:
+ if (restrictFilterToRect || textureContentArea) {
+ // No domain can save us here.
+ return kTightCopy_DomainMode;
+ }
+ return kNoDomain_DomainMode;
+ }
+ } else {
+ // bicubic does nearest filtering internally.
+ filterHalfWidth = 1.5f;
+ }
+
+ // Both bilerp and bicubic use bilinear filtering and so need to be clamped to the center
+ // of the edge texel. Pinning to the texel center has no impact on nearest mode and MIP-maps
+
+ static const SkScalar kDomainInset = 0.5f;
+ // Figure out the limits of pixels we're allowed to sample from.
+ // Unless we know the amount of outset and the texture matrix we have to conservatively enforce
+ // the domain.
+ if (restrictFilterToRect) {
+ domainRect->fLeft = constraintRect.fLeft + kDomainInset;
+ domainRect->fTop = constraintRect.fTop + kDomainInset;
+ domainRect->fRight = constraintRect.fRight - kDomainInset;
+ domainRect->fBottom = constraintRect.fBottom - kDomainInset;
+ } else if (textureContentArea) {
+ // If we got here then: there is a textureContentArea, the coords are limited to the
+ // constraint rect, and we're allowed to filter across the constraint rect boundary. So
+ // we check whether the filter would reach across the edge of the content area.
+ // We will only set the sides that are required.
+
+ domainRect->setLargest();
+ if (coordsLimitedToConstraintRect) {
+ // We may be able to use the fact that the texture coords are limited to the constraint
+ // rect in order to avoid having to add a domain.
+ bool needContentAreaConstraint = false;
+ if (textureContentArea->fLeft > 0 &&
+ textureContentArea->fLeft + filterHalfWidth > constraintRect.fLeft) {
+ domainRect->fLeft = textureContentArea->fLeft + kDomainInset;
+ needContentAreaConstraint = true;
+ }
+ if (textureContentArea->fTop > 0 &&
+ textureContentArea->fTop + filterHalfWidth > constraintRect.fTop) {
+ domainRect->fTop = textureContentArea->fTop + kDomainInset;
+ needContentAreaConstraint = true;
+ }
+ if (textureContentArea->fRight < texW &&
+ textureContentArea->fRight - filterHalfWidth < constraintRect.fRight) {
+ domainRect->fRight = textureContentArea->fRight - kDomainInset;
+ needContentAreaConstraint = true;
+ }
+ if (textureContentArea->fBottom < texH &&
+ textureContentArea->fBottom - filterHalfWidth < constraintRect.fBottom) {
+ domainRect->fBottom = textureContentArea->fBottom - kDomainInset;
+ needContentAreaConstraint = true;
+ }
+ if (!needContentAreaConstraint) {
+ return kNoDomain_DomainMode;
+ }
+ } else {
+ // Our sample coords for the texture are allowed to be outside the constraintRect so we
+ // don't consider it when computing the domain.
+ if (textureContentArea->fLeft != 0) {
+ domainRect->fLeft = textureContentArea->fLeft + kDomainInset;
+ }
+ if (textureContentArea->fTop != 0) {
+ domainRect->fTop = textureContentArea->fTop + kDomainInset;
+ }
+ if (textureContentArea->fRight != texW) {
+ domainRect->fRight = textureContentArea->fRight - kDomainInset;
+ }
+ if (textureContentArea->fBottom != texH) {
+ domainRect->fBottom = textureContentArea->fBottom - kDomainInset;
+ }
+ }
+ } else {
+ return kNoDomain_DomainMode;
+ }
+
+ if (domainRect->fLeft > domainRect->fRight) {
+ domainRect->fLeft = domainRect->fRight = SkScalarAve(domainRect->fLeft, domainRect->fRight);
+ }
+ if (domainRect->fTop > domainRect->fBottom) {
+ domainRect->fTop = domainRect->fBottom = SkScalarAve(domainRect->fTop, domainRect->fBottom);
+ }
+ domainRect->fLeft /= texW;
+ domainRect->fTop /= texH;
+ domainRect->fRight /= texW;
+ domainRect->fBottom /= texH;
+ return kDomain_DomainMode;
+}
+
+static sk_sp<GrFragmentProcessor> create_fp_for_domain_and_filter(
+ GrTexture* texture,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkMatrix& textureMatrix,
+ DomainMode domainMode,
+ const SkRect& domain,
+ const GrTextureParams::FilterMode* filterOrNullForBicubic) {
+ SkASSERT(kTightCopy_DomainMode != domainMode);
+ if (filterOrNullForBicubic) {
+ if (kDomain_DomainMode == domainMode) {
+ return GrTextureDomainEffect::Make(texture, std::move(colorSpaceXform), textureMatrix,
+ domain, GrTextureDomain::kClamp_Mode,
+ *filterOrNullForBicubic);
+ } else {
+ GrTextureParams params(SkShader::kClamp_TileMode, *filterOrNullForBicubic);
+ return GrSimpleTextureEffect::Make(texture, std::move(colorSpaceXform), textureMatrix,
+ params);
+ }
+ } else {
+ if (kDomain_DomainMode == domainMode) {
+ return GrBicubicEffect::Make(texture, std::move(colorSpaceXform), textureMatrix,
+ domain);
+ } else {
+ static const SkShader::TileMode kClampClamp[] =
+ { SkShader::kClamp_TileMode, SkShader::kClamp_TileMode };
+ return GrBicubicEffect::Make(texture, std::move(colorSpaceXform), textureMatrix,
+ kClampClamp);
+ }
+ }
+}
+
+sk_sp<GrFragmentProcessor> GrTextureAdjuster::createFragmentProcessor(
+ const SkMatrix& origTextureMatrix,
+ const SkRect& origConstraintRect,
+ FilterConstraint filterConstraint,
+ bool coordsLimitedToConstraintRect,
+ const GrTextureParams::FilterMode* filterOrNullForBicubic,
+ SkColorSpace* dstColorSpace,
+ SkSourceGammaTreatment gammaTreatment) {
+
+ SkMatrix textureMatrix = origTextureMatrix;
+ const SkIRect* contentArea = this->contentAreaOrNull();
+ // Convert the constraintRect to be relative to the texture rather than the content area so
+ // that both rects are in the same coordinate system.
+ SkTCopyOnFirstWrite<SkRect> constraintRect(origConstraintRect);
+ if (contentArea) {
+ SkScalar l = SkIntToScalar(contentArea->fLeft);
+ SkScalar t = SkIntToScalar(contentArea->fTop);
+ constraintRect.writable()->offset(l, t);
+ textureMatrix.postTranslate(l, t);
+ }
+
+ SkRect domain;
+ GrTextureParams params;
+ if (filterOrNullForBicubic) {
+ params.setFilterMode(*filterOrNullForBicubic);
+ }
+ SkAutoTUnref<GrTexture> texture(this->refTextureSafeForParams(params, gammaTreatment, nullptr));
+ if (!texture) {
+ return nullptr;
+ }
+ // If we made a copy then we only copied the contentArea, in which case the new texture is all
+ // content.
+ if (texture != this->originalTexture()) {
+ contentArea = nullptr;
+ }
+
+ DomainMode domainMode =
+ determine_domain_mode(*constraintRect, filterConstraint, coordsLimitedToConstraintRect,
+ texture->width(), texture->height(),
+ contentArea, filterOrNullForBicubic,
+ &domain);
+ if (kTightCopy_DomainMode == domainMode) {
+ // TODO: Copy the texture and adjust the texture matrix (both parts need to consider
+ // non-int constraint rect)
+ // For now: treat as bilerp and ignore what goes on above level 0.
+
+ // We only expect MIP maps to require a tight copy.
+ SkASSERT(filterOrNullForBicubic &&
+ GrTextureParams::kMipMap_FilterMode == *filterOrNullForBicubic);
+ static const GrTextureParams::FilterMode kBilerp = GrTextureParams::kBilerp_FilterMode;
+ domainMode =
+ determine_domain_mode(*constraintRect, filterConstraint, coordsLimitedToConstraintRect,
+ texture->width(), texture->height(),
+ contentArea, &kBilerp, &domain);
+ SkASSERT(kTightCopy_DomainMode != domainMode);
+ }
+ SkASSERT(kNoDomain_DomainMode == domainMode ||
+ (domain.fLeft <= domain.fRight && domain.fTop <= domain.fBottom));
+ textureMatrix.postIDiv(texture->width(), texture->height());
+ sk_sp<GrColorSpaceXform> colorSpaceXform = GrColorSpaceXform::Make(this->getColorSpace(),
+ dstColorSpace);
+ return create_fp_for_domain_and_filter(texture, std::move(colorSpaceXform), textureMatrix,
+ domainMode, domain, filterOrNullForBicubic);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GrTexture* GrTextureMaker::refTextureForParams(const GrTextureParams& params,
+ SkSourceGammaTreatment gammaTreatment) {
+ CopyParams copyParams;
+ bool willBeMipped = params.filterMode() == GrTextureParams::kMipMap_FilterMode;
+
+ if (!fContext->caps()->mipMapSupport()) {
+ willBeMipped = false;
+ }
+
+ if (!fContext->getGpu()->makeCopyForTextureParams(this->width(), this->height(), params,
+ &copyParams)) {
+ return this->refOriginalTexture(willBeMipped, gammaTreatment);
+ }
+ GrUniqueKey copyKey;
+ this->makeCopyKey(copyParams, &copyKey);
+ if (copyKey.isValid()) {
+ GrTexture* result = fContext->textureProvider()->findAndRefTextureByUniqueKey(copyKey);
+ if (result) {
+ return result;
+ }
+ }
+
+ GrTexture* result = this->generateTextureForParams(copyParams, willBeMipped, gammaTreatment);
+ if (!result) {
+ return nullptr;
+ }
+
+ if (copyKey.isValid()) {
+ fContext->textureProvider()->assignUniqueKeyToTexture(copyKey, result);
+ this->didCacheCopy(copyKey);
+ }
+ return result;
+}
+
+sk_sp<GrFragmentProcessor> GrTextureMaker::createFragmentProcessor(
+ const SkMatrix& textureMatrix,
+ const SkRect& constraintRect,
+ FilterConstraint filterConstraint,
+ bool coordsLimitedToConstraintRect,
+ const GrTextureParams::FilterMode* filterOrNullForBicubic,
+ SkColorSpace* dstColorSpace,
+ SkSourceGammaTreatment gammaTreatment) {
+
+ const GrTextureParams::FilterMode* fmForDetermineDomain = filterOrNullForBicubic;
+ if (filterOrNullForBicubic && GrTextureParams::kMipMap_FilterMode == *filterOrNullForBicubic &&
+ kYes_FilterConstraint == filterConstraint) {
+ // TODo: Here we should force a copy restricted to the constraintRect since MIP maps will
+ // read outside the constraint rect. However, as in the adjuster case, we aren't currently
+ // doing that.
+ // We instead we compute the domain as though were bilerping which is only correct if we
+ // only sample level 0.
+ static const GrTextureParams::FilterMode kBilerp = GrTextureParams::kBilerp_FilterMode;
+ fmForDetermineDomain = &kBilerp;
+ }
+
+ GrTextureParams params;
+ if (filterOrNullForBicubic) {
+ params.reset(SkShader::kClamp_TileMode, *filterOrNullForBicubic);
+ } else {
+ // Bicubic doesn't use filtering for it's texture accesses.
+ params.reset(SkShader::kClamp_TileMode, GrTextureParams::kNone_FilterMode);
+ }
+ SkAutoTUnref<GrTexture> texture(this->refTextureForParams(params, gammaTreatment));
+ if (!texture) {
+ return nullptr;
+ }
+ SkRect domain;
+ DomainMode domainMode =
+ determine_domain_mode(constraintRect, filterConstraint, coordsLimitedToConstraintRect,
+ texture->width(), texture->height(), nullptr, fmForDetermineDomain,
+ &domain);
+ SkASSERT(kTightCopy_DomainMode != domainMode);
+ SkMatrix normalizedTextureMatrix = textureMatrix;
+ normalizedTextureMatrix.postIDiv(texture->width(), texture->height());
+ sk_sp<GrColorSpaceXform> colorSpaceXform = GrColorSpaceXform::Make(this->getColorSpace(),
+ dstColorSpace);
+ return create_fp_for_domain_and_filter(texture, std::move(colorSpaceXform),
+ normalizedTextureMatrix, domainMode, domain,
+ filterOrNullForBicubic);
+}
+
+GrTexture* GrTextureMaker::generateTextureForParams(const CopyParams& copyParams, bool willBeMipped,
+ SkSourceGammaTreatment gammaTreatment) {
+ SkAutoTUnref<GrTexture> original(this->refOriginalTexture(willBeMipped, gammaTreatment));
+ if (!original) {
+ return nullptr;
+ }
+ return copy_on_gpu(original, nullptr, copyParams);
+}
diff --git a/gfx/skia/skia/src/gpu/GrTextureParamsAdjuster.h b/gfx/skia/skia/src/gpu/GrTextureParamsAdjuster.h
new file mode 100644
index 000000000..3de4db744
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureParamsAdjuster.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextureMaker_DEFINED
+#define GrTextureMaker_DEFINED
+
+#include "GrTextureParams.h"
+#include "GrResourceKey.h"
+#include "GrTexture.h"
+#include "SkTLazy.h"
+
+class GrContext;
+class GrTextureParams;
+class GrUniqueKey;
+class SkBitmap;
+
+/**
+ * Different GPUs and API extensions have different requirements with respect to what texture
+ * sampling parameters may be used with textures of various types. This class facilitates making
+ * texture compatible with a given GrTextureParams. There are two immediate subclasses defined
+ * below. One is a base class for sources that are inherently texture-backed (e.g. a texture-backed
+ * SkImage). It supports subsetting the original texture. The other is for use cases where the
+ * source can generate a texture that represents some content (e.g. cpu pixels, SkPicture, ...).
+ */
+class GrTextureProducer : public SkNoncopyable {
+public:
+ struct CopyParams {
+ GrTextureParams::FilterMode fFilter;
+ int fWidth;
+ int fHeight;
+ };
+
+ enum FilterConstraint {
+ kYes_FilterConstraint,
+ kNo_FilterConstraint,
+ };
+
+ /**
+ * Helper for creating a fragment processor to sample the texture with a given filtering mode.
+ * It attempts to avoid making texture copies or using domains whenever possible.
+ *
+ * @param textureMatrix Matrix used to access the texture. It is applied to
+ * the local coords. The post-transformed coords should
+ * be in texel units (rather than normalized) with
+ * respect to this Producer's bounds (width()/height()).
+ * @param constraintRect A rect that represents the area of the texture to be
+ * sampled. It must be contained in the Producer's bounds
+ * as defined by width()/height().
+ * @param filterConstriant Indicates whether filtering is limited to
+ * constraintRect.
+ * @param coordsLimitedToConstraintRect Is it known that textureMatrix*localCoords is bound
+ * by the portion of the texture indicated by
+ * constraintRect (without consideration of filter
+ * width, just the raw coords).
+ * @param filterOrNullForBicubic If non-null indicates the filter mode. If null means
+ * use bicubic filtering.
+ **/
+ virtual sk_sp<GrFragmentProcessor> createFragmentProcessor(
+ const SkMatrix& textureMatrix,
+ const SkRect& constraintRect,
+ FilterConstraint filterConstraint,
+ bool coordsLimitedToConstraintRect,
+ const GrTextureParams::FilterMode* filterOrNullForBicubic,
+ SkColorSpace* dstColorSpace,
+ SkSourceGammaTreatment) = 0;
+
+ virtual ~GrTextureProducer() {}
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+ bool isAlphaOnly() const { return fIsAlphaOnly; }
+ virtual SkAlphaType alphaType() const = 0;
+ virtual SkColorSpace* getColorSpace() = 0;
+
+protected:
+ GrTextureProducer(int width, int height, bool isAlphaOnly)
+ : fWidth(width)
+ , fHeight(height)
+ , fIsAlphaOnly(isAlphaOnly) {}
+
+ /** Helper for creating a key for a copy from an original key. */
+ static void MakeCopyKeyFromOrigKey(const GrUniqueKey& origKey,
+ const CopyParams& copyParams,
+ GrUniqueKey* copyKey) {
+ SkASSERT(!copyKey->isValid());
+ if (origKey.isValid()) {
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey::Builder builder(copyKey, origKey, kDomain, 3);
+ builder[0] = copyParams.fFilter;
+ builder[1] = copyParams.fWidth;
+ builder[2] = copyParams.fHeight;
+ }
+ }
+
+ /**
+ * If we need to make a copy in order to be compatible with GrTextureParams producer is asked to
+ * return a key that identifies its original content + the CopyParms parameter. If the producer
+ * does not want to cache the stretched version (e.g. the producer is volatile), this should
+ * simply return without initializing the copyKey.
+ */
+ virtual void makeCopyKey(const CopyParams&, GrUniqueKey* copyKey) = 0;
+
+ /**
+ * If a stretched version of the texture is generated, it may be cached (assuming that
+ * makeCopyKey() returns true). In that case, the maker is notified in case it
+ * wants to note that for when the maker is destroyed.
+ */
+ virtual void didCacheCopy(const GrUniqueKey& copyKey) = 0;
+
+private:
+ const int fWidth;
+ const int fHeight;
+ const bool fIsAlphaOnly;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+/**
+ * Base class for sources that start out as textures. Optionally allows for a content area subrect.
+ * The intent is not to use content area for subrect rendering. Rather, the pixels outside the
+ * content area have undefined values and shouldn't be read *regardless* of filtering mode or
+ * the SkCanvas::SrcRectConstraint used for subrect draws.
+ */
+class GrTextureAdjuster : public GrTextureProducer {
+public:
+ /** Makes the subset of the texture safe to use with the given texture parameters.
+ outOffset will be the top-left corner of the subset if a copy is not made. Otherwise,
+ the copy will be tight to the contents and outOffset will be (0, 0). If the copy's size
+ does not match subset's dimensions then the contents are scaled to fit the copy.*/
+ GrTexture* refTextureSafeForParams(const GrTextureParams&, SkSourceGammaTreatment,
+ SkIPoint* outOffset);
+
+ sk_sp<GrFragmentProcessor> createFragmentProcessor(
+ const SkMatrix& textureMatrix,
+ const SkRect& constraintRect,
+ FilterConstraint,
+ bool coordsLimitedToConstraintRect,
+ const GrTextureParams::FilterMode* filterOrNullForBicubic,
+ SkColorSpace* dstColorSpace,
+ SkSourceGammaTreatment) override;
+
+ // We do not ref the texture nor the colorspace, so the caller must keep them in scope while
+ // this Adjuster is alive.
+ GrTextureAdjuster(GrTexture*, SkAlphaType, const SkIRect& area, uint32_t uniqueID,
+ SkColorSpace*);
+
+protected:
+ SkAlphaType alphaType() const override { return fAlphaType; }
+ SkColorSpace* getColorSpace() override;
+ void makeCopyKey(const CopyParams& params, GrUniqueKey* copyKey) override;
+ void didCacheCopy(const GrUniqueKey& copyKey) override;
+
+ GrTexture* originalTexture() const { return fOriginal; }
+
+ /** Returns the content area or null for the whole original texture */
+ const SkIRect* contentAreaOrNull() { return fContentArea.getMaybeNull(); }
+
+private:
+ SkTLazy<SkIRect> fContentArea;
+ GrTexture* fOriginal;
+ SkAlphaType fAlphaType;
+ SkColorSpace* fColorSpace;
+ uint32_t fUniqueID;
+
+ GrTexture* refCopy(const CopyParams &copyParams);
+
+ typedef GrTextureProducer INHERITED;
+};
+
+/**
+ * Base class for sources that start out as something other than a texture (encoded image,
+ * picture, ...).
+ */
+class GrTextureMaker : public GrTextureProducer {
+public:
+ /** Returns a texture that is safe for use with the params. If the size of the returned texture
+ does not match width()/height() then the contents of the original must be scaled to fit
+ the texture. */
+ GrTexture* refTextureForParams(const GrTextureParams&, SkSourceGammaTreatment);
+
+ sk_sp<GrFragmentProcessor> createFragmentProcessor(
+ const SkMatrix& textureMatrix,
+ const SkRect& constraintRect,
+ FilterConstraint filterConstraint,
+ bool coordsLimitedToConstraintRect,
+ const GrTextureParams::FilterMode* filterOrNullForBicubic,
+ SkColorSpace* dstColorSpace,
+ SkSourceGammaTreatment) override;
+
+protected:
+ GrTextureMaker(GrContext* context, int width, int height, bool isAlphaOnly)
+ : INHERITED(width, height, isAlphaOnly)
+ , fContext(context) {}
+
+ /**
+ * Return the maker's "original" texture. It is the responsibility of the maker to handle any
+ * caching of the original if desired.
+ */
+ virtual GrTexture* refOriginalTexture(bool willBeMipped, SkSourceGammaTreatment) = 0;
+
+ /**
+ * Return a new (uncached) texture that is the stretch of the maker's original.
+ *
+ * The base-class handles general logic for this, and only needs access to the following
+ * method:
+ * - refOriginalTexture()
+ *
+ * Subclass may override this if they can handle creating the texture more directly than
+ * by copying.
+ */
+ virtual GrTexture* generateTextureForParams(const CopyParams&, bool willBeMipped,
+ SkSourceGammaTreatment);
+
+ GrContext* context() const { return fContext; }
+
+private:
+ GrContext* fContext;
+
+ typedef GrTextureProducer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTexturePriv.h b/gfx/skia/skia/src/gpu/GrTexturePriv.h
new file mode 100644
index 000000000..c4e6538d1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTexturePriv.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTexturePriv_DEFINED
+#define GrTexturePriv_DEFINED
+
+#include "GrTexture.h"
+
+/** Class that adds methods to GrTexture that are only intended for use internal to Skia.
+ This class is purely a privileged window into GrTexture. It should never have additional data
+ members or virtual methods.
+ Non-static methods that are not trivial inlines should be spring-boarded (e.g. declared and
+ implemented privately in GrTexture with a inline public method here). */
+class GrTexturePriv {
+public:
+ void setFlag(GrSurfaceFlags flags) {
+ fTexture->fDesc.fFlags = fTexture->fDesc.fFlags | flags;
+ }
+
+ void resetFlag(GrSurfaceFlags flags) {
+ fTexture->fDesc.fFlags = fTexture->fDesc.fFlags & ~flags;
+ }
+
+ bool isSetFlag(GrSurfaceFlags flags) const {
+ return 0 != (fTexture->fDesc.fFlags & flags);
+ }
+
+ void dirtyMipMaps(bool mipMapsDirty) {
+ fTexture->dirtyMipMaps(mipMapsDirty);
+ }
+
+ bool mipMapsAreDirty() const {
+ return GrTexture::kValid_MipMapsStatus != fTexture->fMipMapsStatus;
+ }
+
+ bool hasMipMaps() const {
+ return GrTexture::kNotAllocated_MipMapsStatus != fTexture->fMipMapsStatus;
+ }
+
+ void setMaxMipMapLevel(int maxMipMapLevel) const {
+ fTexture->fMaxMipMapLevel = maxMipMapLevel;
+ }
+
+ int maxMipMapLevel() const {
+ return fTexture->fMaxMipMapLevel;
+ }
+
+ void setGammaTreatment(SkSourceGammaTreatment gammaTreatment) const {
+ fTexture->fGammaTreatment = gammaTreatment;
+ }
+ SkSourceGammaTreatment gammaTreatment() const { return fTexture->fGammaTreatment; }
+
+ static void ComputeScratchKey(const GrSurfaceDesc&, GrScratchKey*);
+
+private:
+ GrTexturePriv(GrTexture* texture) : fTexture(texture) { }
+ GrTexturePriv(const GrTexturePriv& that) : fTexture(that.fTexture) { }
+ GrTexturePriv& operator=(const GrTexturePriv&); // unimpl
+
+ // No taking addresses of this type.
+ const GrTexturePriv* operator&() const;
+ GrTexturePriv* operator&();
+
+ GrTexture* fTexture;
+
+ friend class GrTexture; // to construct/copy this type.
+};
+
+inline GrTexturePriv GrTexture::texturePriv() { return GrTexturePriv(this); }
+
+inline const GrTexturePriv GrTexture::texturePriv () const {
+ return GrTexturePriv(const_cast<GrTexture*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTextureProvider.cpp b/gfx/skia/skia/src/gpu/GrTextureProvider.cpp
new file mode 100644
index 000000000..68a554048
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureProvider.cpp
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrTextureProvider.h"
+
+#include "GrCaps.h"
+#include "GrTexturePriv.h"
+#include "GrResourceCache.h"
+#include "GrGpu.h"
+#include "../private/GrSingleOwner.h"
+#include "SkMathPriv.h"
+#include "SkTArray.h"
+
+#define ASSERT_SINGLE_OWNER \
+ SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fSingleOwner);)
+
+enum ScratchTextureFlags {
+ kExact_ScratchTextureFlag = 0x1,
+ kNoPendingIO_ScratchTextureFlag = 0x2,
+ kNoCreate_ScratchTextureFlag = 0x4,
+};
+
+GrTextureProvider::GrTextureProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* singleOwner)
+ : fCache(cache)
+ , fGpu(gpu)
+#ifdef SK_DEBUG
+ , fSingleOwner(singleOwner)
+#endif
+ {
+}
+
+GrTexture* GrTextureProvider::createMipMappedTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
+ const GrMipLevel* texels, int mipLevelCount) {
+ ASSERT_SINGLE_OWNER
+
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+ if (mipLevelCount && !texels) {
+ return nullptr;
+ }
+ for (int i = 0; i < mipLevelCount; ++i) {
+ if (!texels[i].fPixels) {
+ return nullptr;
+ }
+ }
+
+ if ((desc.fFlags & kRenderTarget_GrSurfaceFlag) &&
+ !fGpu->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) {
+ return nullptr;
+ }
+ if (!GrPixelConfigIsCompressed(desc.fConfig)) {
+ if (mipLevelCount < 2) {
+ static const uint32_t kFlags = kExact_ScratchTextureFlag |
+ kNoCreate_ScratchTextureFlag;
+ if (GrTexture* texture = this->refScratchTexture(desc, kFlags)) {
+ if (!mipLevelCount ||
+ texture->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
+ texels[0].fPixels, texels[0].fRowBytes)) {
+ if (SkBudgeted::kNo == budgeted) {
+ texture->resourcePriv().makeUnbudgeted();
+ }
+ return texture;
+ }
+ texture->unref();
+ }
+ }
+ }
+
+ SkTArray<GrMipLevel> texelsShallowCopy(mipLevelCount);
+ for (int i = 0; i < mipLevelCount; ++i) {
+ texelsShallowCopy.push_back(texels[i]);
+ }
+ return fGpu->createTexture(desc, budgeted, texelsShallowCopy);
+}
+
+GrTexture* GrTextureProvider::createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
+ const void* srcData, size_t rowBytes) {
+ GrMipLevel tempTexels;
+ GrMipLevel* texels = nullptr;
+ int levelCount = 0;
+ if (srcData) {
+ tempTexels.fPixels = srcData;
+ tempTexels.fRowBytes = rowBytes;
+ texels = &tempTexels;
+ levelCount = 1;
+ }
+ return this->createMipMappedTexture(desc, budgeted, texels, levelCount);
+}
+
+GrTexture* GrTextureProvider::createApproxTexture(const GrSurfaceDesc& desc) {
+ ASSERT_SINGLE_OWNER
+ return this->internalCreateApproxTexture(desc, 0);
+}
+
+GrTexture* GrTextureProvider::internalCreateApproxTexture(const GrSurfaceDesc& desc,
+ uint32_t scratchFlags) {
+ ASSERT_SINGLE_OWNER
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+ // Currently we don't recycle compressed textures as scratch.
+ if (GrPixelConfigIsCompressed(desc.fConfig)) {
+ return nullptr;
+ } else {
+ return this->refScratchTexture(desc, scratchFlags);
+ }
+}
+
+GrTexture* GrTextureProvider::refScratchTexture(const GrSurfaceDesc& inDesc,
+ uint32_t flags) {
+ ASSERT_SINGLE_OWNER
+ SkASSERT(!this->isAbandoned());
+ SkASSERT(!GrPixelConfigIsCompressed(inDesc.fConfig));
+
+ SkTCopyOnFirstWrite<GrSurfaceDesc> desc(inDesc);
+
+ if (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_GrSurfaceFlag)) {
+ if (!(kExact_ScratchTextureFlag & flags)) {
+ // bin by pow2 with a reasonable min
+ const int kMinSize = 16;
+ GrSurfaceDesc* wdesc = desc.writable();
+ wdesc->fWidth = SkTMax(kMinSize, GrNextPow2(desc->fWidth));
+ wdesc->fHeight = SkTMax(kMinSize, GrNextPow2(desc->fHeight));
+ }
+
+ GrScratchKey key;
+ GrTexturePriv::ComputeScratchKey(*desc, &key);
+ uint32_t scratchFlags = 0;
+ if (kNoPendingIO_ScratchTextureFlag & flags) {
+ scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
+ } else if (!(desc->fFlags & kRenderTarget_GrSurfaceFlag)) {
+ // If it is not a render target then it will most likely be populated by
+ // writePixels() which will trigger a flush if the texture has pending IO.
+ scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
+ }
+ GrGpuResource* resource = fCache->findAndRefScratchResource(key,
+ GrSurface::WorstCaseSize(*desc),
+ scratchFlags);
+ if (resource) {
+ GrSurface* surface = static_cast<GrSurface*>(resource);
+ GrRenderTarget* rt = surface->asRenderTarget();
+ if (rt && fGpu->caps()->discardRenderTargetSupport()) {
+ rt->discard();
+ }
+ return surface->asTexture();
+ }
+ }
+
+ if (!(kNoCreate_ScratchTextureFlag & flags)) {
+ return fGpu->createTexture(*desc, SkBudgeted::kYes);
+ }
+
+ return nullptr;
+}
+
+GrTexture* GrTextureProvider::wrapBackendTexture(const GrBackendTextureDesc& desc,
+ GrWrapOwnership ownership) {
+ ASSERT_SINGLE_OWNER
+ if (this->isAbandoned()) {
+ return nullptr;
+ }
+ return fGpu->wrapBackendTexture(desc, ownership);
+}
+
+GrRenderTarget* GrTextureProvider::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) {
+ ASSERT_SINGLE_OWNER
+ return this->isAbandoned() ? nullptr : fGpu->wrapBackendRenderTarget(desc,
+ kBorrow_GrWrapOwnership);
+}
+
+void GrTextureProvider::assignUniqueKeyToResource(const GrUniqueKey& key, GrGpuResource* resource) {
+ ASSERT_SINGLE_OWNER
+ if (this->isAbandoned() || !resource) {
+ return;
+ }
+ resource->resourcePriv().setUniqueKey(key);
+}
+
+bool GrTextureProvider::existsResourceWithUniqueKey(const GrUniqueKey& key) const {
+ ASSERT_SINGLE_OWNER
+ return this->isAbandoned() ? false : fCache->hasUniqueKey(key);
+}
+
+GrGpuResource* GrTextureProvider::findAndRefResourceByUniqueKey(const GrUniqueKey& key) {
+ ASSERT_SINGLE_OWNER
+ return this->isAbandoned() ? nullptr : fCache->findAndRefUniqueResource(key);
+}
+
+GrTexture* GrTextureProvider::findAndRefTextureByUniqueKey(const GrUniqueKey& key) {
+ ASSERT_SINGLE_OWNER
+ GrGpuResource* resource = this->findAndRefResourceByUniqueKey(key);
+ if (resource) {
+ GrTexture* texture = static_cast<GrSurface*>(resource)->asTexture();
+ SkASSERT(texture);
+ return texture;
+ }
+ return NULL;
+}
diff --git a/gfx/skia/skia/src/gpu/GrTextureProxy.cpp b/gfx/skia/skia/src/gpu/GrTextureProxy.cpp
new file mode 100644
index 000000000..205dfdd31
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureProxy.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrTextureProxy.h"
+
+#include "GrTextureProvider.h"
+#include "GrGpuResourcePriv.h"
+
+GrTextureProxy::GrTextureProxy(const GrSurfaceDesc& srcDesc, SkBackingFit fit, SkBudgeted budgeted,
+ const void* /*srcData*/, size_t /*rowBytes*/)
+ : INHERITED(srcDesc, fit, budgeted) {
+ // TODO: Handle 'srcData' here
+}
+
+GrTextureProxy::GrTextureProxy(sk_sp<GrTexture> tex)
+ : INHERITED(tex->desc(), SkBackingFit::kExact,
+ tex->resourcePriv().isBudgeted(), tex->uniqueID())
+ , fTexture(std::move(tex)) {
+}
+
+GrTexture* GrTextureProxy::instantiate(GrTextureProvider* texProvider) {
+ if (fTexture) {
+ return fTexture.get();
+ }
+
+ if (SkBackingFit::kApprox == fFit) {
+ fTexture.reset(texProvider->createApproxTexture(fDesc));
+ } else {
+ fTexture.reset(texProvider->createTexture(fDesc, fBudgeted));
+ }
+
+ return fTexture.get();
+}
+
+sk_sp<GrTextureProxy> GrTextureProxy::Make(const GrSurfaceDesc& desc,
+ SkBackingFit fit,
+ SkBudgeted budgeted,
+ const void* srcData,
+ size_t rowBytes) {
+ // TODO: handle 'srcData' (we could use the wrapped version if there is data)
+ SkASSERT(!srcData && !rowBytes);
+ return sk_sp<GrTextureProxy>(new GrTextureProxy(desc, fit, budgeted, srcData, rowBytes));
+}
+
+sk_sp<GrTextureProxy> GrTextureProxy::Make(sk_sp<GrTexture> tex) {
+ return sk_sp<GrTextureProxy>(new GrTextureProxy(std::move(tex)));
+}
diff --git a/gfx/skia/skia/src/gpu/GrTextureToYUVPlanes.cpp b/gfx/skia/skia/src/gpu/GrTextureToYUVPlanes.cpp
new file mode 100644
index 000000000..93a62d264
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureToYUVPlanes.cpp
@@ -0,0 +1,233 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrTextureToYUVPlanes.h"
+#include "effects/GrSimpleTextureEffect.h"
+#include "effects/GrYUVEffect.h"
+#include "GrClip.h"
+#include "GrContext.h"
+#include "GrDrawContext.h"
+#include "GrPaint.h"
+#include "GrTextureProvider.h"
+
+namespace {
+ using MakeFPProc = sk_sp<GrFragmentProcessor> (*)(sk_sp<GrFragmentProcessor>,
+ SkYUVColorSpace colorSpace);
+};
+
+static bool convert_texture(GrTexture* src, GrDrawContext* dst, int dstW, int dstH,
+ SkYUVColorSpace colorSpace, MakeFPProc proc) {
+
+ SkScalar xScale = SkIntToScalar(src->width()) / dstW / src->width();
+ SkScalar yScale = SkIntToScalar(src->height()) / dstH / src->height();
+ GrTextureParams::FilterMode filter;
+ if (dstW == src->width() && dstW == src->height()) {
+ filter = GrTextureParams::kNone_FilterMode;
+ } else {
+ filter = GrTextureParams::kBilerp_FilterMode;
+ }
+
+ sk_sp<GrFragmentProcessor> fp(
+ GrSimpleTextureEffect::Make(src, nullptr, SkMatrix::MakeScale(xScale, yScale), filter));
+ if (!fp) {
+ return false;
+ }
+ fp = proc(std::move(fp), colorSpace);
+ if (!fp) {
+ return false;
+ }
+ GrPaint paint;
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ paint.addColorFragmentProcessor(std::move(fp));
+ dst->drawRect(GrNoClip(), paint, SkMatrix::I(), SkRect::MakeIWH(dstW, dstH));
+ return true;
+}
+
+bool GrTextureToYUVPlanes(GrTexture* texture, const SkISize sizes[3], void* const planes[3],
+ const size_t rowBytes[3], SkYUVColorSpace colorSpace) {
+ if (GrContext* context = texture->getContext()) {
+ // Depending on the relative sizes of the y, u, and v planes we may do 1 to 3 draws/
+ // readbacks.
+ sk_sp<GrDrawContext> yuvDrawContext;
+ sk_sp<GrDrawContext> yDrawContext;
+ sk_sp<GrDrawContext> uvDrawContext;
+ sk_sp<GrDrawContext> uDrawContext;
+ sk_sp<GrDrawContext> vDrawContext;
+
+ // We issue draw(s) to convert from RGBA to Y, U, and V. All three planes may have different
+ // sizes however we optimize for two other cases - all planes are the same (1 draw to YUV),
+ // and U and V are the same but Y differs (2 draws, one for Y, one for UV).
+ if (sizes[0] == sizes[1] && sizes[1] == sizes[2]) {
+ yuvDrawContext = context->makeDrawContextWithFallback(SkBackingFit::kApprox,
+ sizes[0].fWidth,
+ sizes[0].fHeight,
+ kRGBA_8888_GrPixelConfig,
+ nullptr);
+ if (!yuvDrawContext) {
+ return false;
+ }
+ } else {
+ yDrawContext = context->makeDrawContextWithFallback(SkBackingFit::kApprox,
+ sizes[0].fWidth,
+ sizes[0].fHeight,
+ kAlpha_8_GrPixelConfig,
+ nullptr);
+ if (!yDrawContext) {
+ return false;
+ }
+ if (sizes[1] == sizes[2]) {
+ // TODO: Add support for GL_RG when available.
+ uvDrawContext = context->makeDrawContextWithFallback(SkBackingFit::kApprox,
+ sizes[1].fWidth,
+ sizes[1].fHeight,
+ kRGBA_8888_GrPixelConfig,
+ nullptr);
+ if (!uvDrawContext) {
+ return false;
+ }
+ } else {
+ uDrawContext = context->makeDrawContextWithFallback(SkBackingFit::kApprox,
+ sizes[1].fWidth,
+ sizes[1].fHeight,
+ kAlpha_8_GrPixelConfig,
+ nullptr);
+ vDrawContext = context->makeDrawContextWithFallback(SkBackingFit::kApprox,
+ sizes[2].fWidth,
+ sizes[2].fHeight,
+ kAlpha_8_GrPixelConfig,
+ nullptr);
+ if (!uDrawContext || !vDrawContext) {
+ return false;
+ }
+ }
+ }
+
+ // Do all the draws before any readback.
+ if (yuvDrawContext) {
+ if (!convert_texture(texture, yuvDrawContext.get(),
+ sizes[0].fWidth, sizes[0].fHeight,
+ colorSpace, GrYUVEffect::MakeRGBToYUV)) {
+ return false;
+ }
+ } else {
+ SkASSERT(yDrawContext);
+ if (!convert_texture(texture, yDrawContext.get(),
+ sizes[0].fWidth, sizes[0].fHeight,
+ colorSpace, GrYUVEffect::MakeRGBToY)) {
+ return false;
+ }
+ if (uvDrawContext) {
+ if (!convert_texture(texture, uvDrawContext.get(),
+ sizes[1].fWidth, sizes[1].fHeight,
+ colorSpace, GrYUVEffect::MakeRGBToUV)) {
+ return false;
+ }
+ } else {
+ SkASSERT(uDrawContext && vDrawContext);
+ if (!convert_texture(texture, uDrawContext.get(),
+ sizes[1].fWidth, sizes[1].fHeight,
+ colorSpace, GrYUVEffect::MakeRGBToU)) {
+ return false;
+ }
+ if (!convert_texture(texture, vDrawContext.get(),
+ sizes[2].fWidth, sizes[2].fHeight,
+ colorSpace, GrYUVEffect::MakeRGBToV)) {
+ return false;
+ }
+ }
+ }
+
+ if (yuvDrawContext) {
+ SkASSERT(sizes[0] == sizes[1] && sizes[1] == sizes[2]);
+ sk_sp<GrTexture> yuvTex(yuvDrawContext->asTexture());
+ SkASSERT(yuvTex);
+ SkISize yuvSize = sizes[0];
+ // We have no kRGB_888 pixel format, so readback rgba and then copy three channels.
+ SkAutoSTMalloc<128 * 128, uint32_t> tempYUV(yuvSize.fWidth * yuvSize.fHeight);
+ if (!yuvTex->readPixels(0, 0, yuvSize.fWidth, yuvSize.fHeight,
+ kRGBA_8888_GrPixelConfig, tempYUV.get(), 0)) {
+ return false;
+ }
+ size_t yRowBytes = rowBytes[0] ? rowBytes[0] : yuvSize.fWidth;
+ size_t uRowBytes = rowBytes[1] ? rowBytes[1] : yuvSize.fWidth;
+ size_t vRowBytes = rowBytes[2] ? rowBytes[2] : yuvSize.fWidth;
+ if (yRowBytes < (size_t)yuvSize.fWidth || uRowBytes < (size_t)yuvSize.fWidth ||
+ vRowBytes < (size_t)yuvSize.fWidth) {
+ return false;
+ }
+ for (int j = 0; j < yuvSize.fHeight; ++j) {
+ for (int i = 0; i < yuvSize.fWidth; ++i) {
+ // These writes could surely be made more efficient.
+ uint32_t y = GrColorUnpackR(tempYUV.get()[j * yuvSize.fWidth + i]);
+ uint32_t u = GrColorUnpackG(tempYUV.get()[j * yuvSize.fWidth + i]);
+ uint32_t v = GrColorUnpackB(tempYUV.get()[j * yuvSize.fWidth + i]);
+ uint8_t* yLoc = ((uint8_t*)planes[0]) + j * yRowBytes + i;
+ uint8_t* uLoc = ((uint8_t*)planes[1]) + j * uRowBytes + i;
+ uint8_t* vLoc = ((uint8_t*)planes[2]) + j * vRowBytes + i;
+ *yLoc = y;
+ *uLoc = u;
+ *vLoc = v;
+ }
+ }
+ return true;
+ } else {
+ SkASSERT(yDrawContext);
+ sk_sp<GrTexture> yTex(yDrawContext->asTexture());
+ SkASSERT(yTex);
+ if (!yTex->readPixels(0, 0, sizes[0].fWidth, sizes[0].fHeight,
+ kAlpha_8_GrPixelConfig, planes[0], rowBytes[0])) {
+ return false;
+ }
+ if (uvDrawContext) {
+ SkASSERT(sizes[1].fWidth == sizes[2].fWidth);
+ sk_sp<GrTexture> uvTex(uvDrawContext->asTexture());
+ SkASSERT(uvTex);
+ SkISize uvSize = sizes[1];
+ // We have no kRG_88 pixel format, so readback rgba and then copy two channels.
+ SkAutoSTMalloc<128 * 128, uint32_t> tempUV(uvSize.fWidth * uvSize.fHeight);
+ if (!uvTex->readPixels(0, 0, uvSize.fWidth, uvSize.fHeight,
+ kRGBA_8888_GrPixelConfig, tempUV.get(), 0)) {
+ return false;
+ }
+
+ size_t uRowBytes = rowBytes[1] ? rowBytes[1] : uvSize.fWidth;
+ size_t vRowBytes = rowBytes[2] ? rowBytes[2] : uvSize.fWidth;
+ if (uRowBytes < (size_t)uvSize.fWidth || vRowBytes < (size_t)uvSize.fWidth) {
+ return false;
+ }
+ for (int j = 0; j < uvSize.fHeight; ++j) {
+ for (int i = 0; i < uvSize.fWidth; ++i) {
+ // These writes could surely be made more efficient.
+ uint32_t u = GrColorUnpackR(tempUV.get()[j * uvSize.fWidth + i]);
+ uint32_t v = GrColorUnpackG(tempUV.get()[j * uvSize.fWidth + i]);
+ uint8_t* uLoc = ((uint8_t*)planes[1]) + j * uRowBytes + i;
+ uint8_t* vLoc = ((uint8_t*)planes[2]) + j * vRowBytes + i;
+ *uLoc = u;
+ *vLoc = v;
+ }
+ }
+ return true;
+ } else {
+ SkASSERT(uDrawContext && vDrawContext);
+ sk_sp<GrTexture> tex(uDrawContext->asTexture());
+ SkASSERT(tex);
+ if (!tex->readPixels(0, 0, sizes[1].fWidth, sizes[1].fHeight,
+ kAlpha_8_GrPixelConfig, planes[1], rowBytes[1])) {
+ return false;
+ }
+ tex = vDrawContext->asTexture();
+ SkASSERT(tex);
+ if (!tex->readPixels(0, 0, sizes[2].fWidth, sizes[2].fHeight,
+ kAlpha_8_GrPixelConfig, planes[2], rowBytes[2])) {
+ return false;
+ }
+ return true;
+ }
+ }
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/gpu/GrTextureToYUVPlanes.h b/gfx/skia/skia/src/gpu/GrTextureToYUVPlanes.h
new file mode 100644
index 000000000..67b6fce2e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTextureToYUVPlanes.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextureToYUVPlanes_DEFINED
+#define GrTextureToYUVPlanes_DEFINED
+
+#include "SkImageInfo.h"
+#include "SkSize.h"
+
+class GrTexture;
+
+bool GrTextureToYUVPlanes(GrTexture* texture, const SkISize[3], void* const planes[3],
+ const size_t rowBytes[3], SkYUVColorSpace);
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTraceMarker.cpp b/gfx/skia/skia/src/gpu/GrTraceMarker.cpp
new file mode 100644
index 000000000..c4d2ad4ce
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTraceMarker.cpp
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrTraceMarker.h"
+#include "GrTracing.h"
+#include "SkString.h"
+#include "SkTSort.h"
+
+////////////////////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrTraceMarkerSet::GrTraceMarkerSet(const GrTraceMarkerSet& other) {
+ this->addSet(other);
+}
+
+void GrTraceMarkerSet::add(const GrGpuTraceMarker& marker) {
+ this->fMarkerArray.push(marker);
+}
+
+void GrTraceMarkerSet::addSet(const GrTraceMarkerSet& markerSet) {
+ for (Iter iter = markerSet.begin(); iter != markerSet.end(); ++iter) {
+ this->add(*iter);
+ }
+}
+
+void GrTraceMarkerSet::remove(const GrGpuTraceMarker& marker) {
+ SkASSERT(-1 != fMarkerArray.find(marker));
+ int index = this->fMarkerArray.find(marker);
+ this->fMarkerArray.remove(index);
+}
+
+int GrTraceMarkerSet::count() const {
+ return this->fMarkerArray.count();
+}
+
+SkString GrTraceMarkerSet::toStringLast() const {
+ const int numMarkers = this->fMarkerArray.count();
+ SkString marker_string;
+ if (numMarkers > 0) {
+ GrGpuTraceMarker& lastMarker = this->fMarkerArray[numMarkers - 1];
+ marker_string.append(lastMarker.fMarker);
+ if (lastMarker.fID != -1) {
+ marker_string.append("(");
+ marker_string.appendS32(lastMarker.fID);
+ marker_string.append(")");
+ }
+ }
+ return marker_string;
+}
+
+SkString GrTraceMarkerSet::toString() const {
+ SkTQSort<GrGpuTraceMarker>(this->fMarkerArray.begin(), this->fMarkerArray.end() - 1);
+ SkString marker_string;
+ const char* prevMarkerName = "";
+ int prevMarkerID = -1;
+ int counter = 0;
+ const int numMarkers = this->fMarkerArray.count();
+
+ // check used for GrGLGpu device after we've already collapsed all markers
+ if (1 == numMarkers && -1 == this->fMarkerArray[0].fID) {
+ marker_string.append(this->fMarkerArray[0].fMarker);
+ return marker_string;
+ }
+
+ for (int i = 0; i < numMarkers; ++i ) {
+ GrGpuTraceMarker& currMarker = this->fMarkerArray[i];
+ const char* currCmd = currMarker.fMarker;
+ if (currCmd != prevMarkerName) {
+ if (prevMarkerID != -1) {
+ marker_string.append(") ");
+ }
+ marker_string.append(currCmd);
+ if (currMarker.fID != -1) {
+ marker_string.append("(");
+ marker_string.appendS32(currMarker.fID);
+ }
+ prevMarkerName = currCmd;
+ } else if (currMarker.fID != prevMarkerID) {
+ marker_string.append(", ");
+ marker_string.appendS32(currMarker.fID);
+ }
+ prevMarkerID = currMarker.fID;
+ ++counter;
+ }
+ if (counter > 0 && prevMarkerID != -1) {
+ marker_string.append(")");
+ }
+ return marker_string;
+}
+
+GrTraceMarkerSet::Iter GrTraceMarkerSet::begin() const {
+ return Iter(this, 0);
+}
+
+GrTraceMarkerSet::Iter GrTraceMarkerSet::end() const {
+ return Iter(this, this->fMarkerArray.count());
+}
diff --git a/gfx/skia/skia/src/gpu/GrTraceMarker.h b/gfx/skia/skia/src/gpu/GrTraceMarker.h
new file mode 100644
index 000000000..037112168
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTraceMarker.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTDArray.h"
+
+#ifndef GrTraceMarkerSet_DEFINED
+#define GrTraceMarkerSet_DEFINED
+
+class GrGpuTraceMarker {
+public:
+ GrGpuTraceMarker(const char* marker, int idCounter) : fMarker(marker), fID(idCounter) {}
+
+ bool operator<(const GrGpuTraceMarker& rhs) const {
+ return this->fMarker < rhs.fMarker || (this->fMarker == rhs.fMarker && this->fID < rhs.fID);
+ }
+
+ bool operator==(const GrGpuTraceMarker& rhs) const {
+ return (this->fID == rhs.fID && this->fMarker == rhs.fMarker);
+ }
+
+ const char* fMarker;
+ int fID;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkString;
+
+class GrTraceMarkerSet {
+public:
+ GrTraceMarkerSet() {}
+
+ GrTraceMarkerSet(const GrTraceMarkerSet& other);
+
+ // Adds marker to the set.
+ void add(const GrGpuTraceMarker& marker);
+ // Adds all markers from one set into this set.
+ void addSet(const GrTraceMarkerSet& markerSet);
+
+ void remove(const GrGpuTraceMarker& marker);
+
+ int count() const;
+
+ /**
+ * First sorts fMarkerArray and returns a string of the format
+ * MarkerName1(#,#,...)%MarkerName2(#,#,...):... where MarkerName is the
+ * marker string used in the TraceMarker and the (#,#,..) is a list of instance
+ * id's for the the given marker string
+ */
+ SkString toString() const;
+
+ SkString toStringLast() const;
+
+ class Iter;
+
+ Iter begin() const;
+
+ Iter end() const;
+
+private:
+ mutable SkTDArray<GrGpuTraceMarker> fMarkerArray;
+};
+
+class GrTraceMarkerSet::Iter {
+public:
+ Iter() {}
+ Iter& operator=(const Iter& i) {
+ fCurrentIndex = i.fCurrentIndex;
+ fMarkers = i.fMarkers;
+ return *this;
+ }
+ bool operator==(const Iter& i) const {
+ return fCurrentIndex == i.fCurrentIndex && fMarkers == i.fMarkers;
+ }
+ bool operator!=(const Iter& i) const { return !(*this == i); }
+ const GrGpuTraceMarker& operator*() const { return fMarkers->fMarkerArray[fCurrentIndex]; }
+ Iter& operator++() {
+ SkASSERT(*this != fMarkers->end());
+ ++fCurrentIndex;
+ return *this;
+ }
+
+private:
+ friend class GrTraceMarkerSet;
+ Iter(const GrTraceMarkerSet* markers, int index)
+ : fMarkers(markers), fCurrentIndex(index) {
+ SkASSERT(markers);
+ }
+
+ const GrTraceMarkerSet* fMarkers;
+ int fCurrentIndex;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrTracing.h b/gfx/skia/skia/src/gpu/GrTracing.h
new file mode 100644
index 000000000..273aa6598
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrTracing.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTracing_DEFINED
+#define GrTracing_DEFINED
+
+#include "GrGpu.h"
+#include "GrTraceMarker.h"
+#include "SkTLazy.h"
+#include "SkTraceEvent.h"
+
+/**
+ * Marker generation class used for adding and removing markers around code blocks
+ */
+class GrGpuTraceMarkerGenerator : public ::SkNoncopyable {
+public:
+ GrGpuTraceMarkerGenerator() {}
+
+ ~GrGpuTraceMarkerGenerator() {
+ if (fTraceMarker.isValid()) {
+ // TODO remove trace marker
+ }
+ }
+
+ void initialize(const char* marker_str, int* marker_counter) {
+ // GrGpuTraceMarker* traceMarker = fTraceMarker.init(marker_str, *marker_counter);
+ // TODO add trace marker
+ }
+
+private:
+ SkTLazy<GrGpuTraceMarker> fTraceMarker;
+};
+
+class GrGpuTraceMarkerGeneratorContext : public ::SkNoncopyable {
+public:
+ GrGpuTraceMarkerGeneratorContext(GrContext* context) {}
+
+ ~GrGpuTraceMarkerGeneratorContext() {
+ if (fTraceMarker.isValid()) {
+ // TODO remove trace marker
+ }
+ }
+
+ void initialize(const char* marker_str, int* marker_counter) {
+ // GrGpuTraceMarker* traceMarker = fTraceMarker.init(marker_str, *marker_counter);
+ // TODO add trace marker
+ }
+
+private:
+ SkTLazy<GrGpuTraceMarker> fTraceMarker;
+};
+
+/**
+ * GR_CREATE_TRACE_MARKER will place begin and end trace markers for both
+ * cpu and gpu (if gpu tracing enabled) for the current scope.
+ * name is of type const char* and target is of type GrDrawTarget*
+ */
+#define GR_CREATE_TRACE_MARKER(name, target) \
+ /* Chromium tracing */ \
+ static int SK_MACRO_APPEND_LINE(name_counter) = 0; \
+ bool SK_MACRO_APPEND_LINE(gpuTracingEnabled); \
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), \
+ &SK_MACRO_APPEND_LINE(gpuTracingEnabled)); \
+ if (SK_MACRO_APPEND_LINE(gpuTracingEnabled)) { \
+ INTERNAL_GR_CREATE_TRACE_MARKER_SCOPED(name, SK_MACRO_APPEND_LINE(name_counter), target) \
+ } \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), name, \
+ "id", SK_MACRO_APPEND_LINE(name_counter));
+
+#define INTERNAL_GR_CREATE_TRACE_MARKER_SCOPED(name, name_counter, target) \
+ static const char* SK_MACRO_APPEND_LINE(static_name) = name; \
+ INTERNAL_GR_CREATE_TRACE_MARKER(SK_MACRO_APPEND_LINE(static_name), \
+ name_counter, \
+ target) \
+ sk_atomic_inc(&name_counter);
+
+#define INTERNAL_GR_CREATE_TRACE_MARKER(name, name_counter, target, ...) \
+ GR_CREATE_GPU_TRACE_MARKER(name, name_counter, target) \
+
+#define GR_CREATE_GPU_TRACE_MARKER(name, name_counter, target) \
+ GrGpuTraceMarkerGenerator SK_MACRO_APPEND_LINE(TMG)(target); \
+ SK_MACRO_APPEND_LINE(TMG).initialize(name, &name_counter); \
+
+/**
+ * Context level GrTracing macros, classname and op are const char*, context is GrContext
+ * TODO can we just have one set of macros? Probably.
+ */
+#define GR_CREATE_TRACE_MARKER_CONTEXT(classname, op, context) \
+ /* Chromium tracing */ \
+ static int SK_MACRO_APPEND_LINE(name_counter) = 0; \
+ bool SK_MACRO_APPEND_LINE(gpuTracingEnabled); \
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), \
+ &SK_MACRO_APPEND_LINE(gpuTracingEnabled)); \
+ if (SK_MACRO_APPEND_LINE(gpuTracingEnabled)) { \
+ INTERNAL_GR_CREATE_TRACE_MARKER_SCOPED_C(classname "::" op, \
+ SK_MACRO_APPEND_LINE(name_counter), context) \
+ } \
+ GR_AUDIT_TRAIL_AUTO_FRAME(context->getAuditTrail(), classname "::" op); \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), classname "::" op, \
+ "id", SK_MACRO_APPEND_LINE(name_counter));
+
+#define INTERNAL_GR_CREATE_TRACE_MARKER_SCOPED_C(name, name_counter, context) \
+ static const char* SK_MACRO_APPEND_LINE(static_name) = name; \
+ INTERNAL_GR_CREATE_TRACE_MARKER_C(SK_MACRO_APPEND_LINE(static_name), \
+ name_counter, \
+ context) \
+ sk_atomic_inc(&name_counter);
+
+#define INTERNAL_GR_CREATE_TRACE_MARKER_C(name, name_counter, context, ...) \
+ GR_CREATE_GPU_TRACE_MARKER_C(name, name_counter, context) \
+
+#define GR_CREATE_GPU_TRACE_MARKER_C(name, name_counter, context) \
+ GrGpuTraceMarkerGeneratorContext SK_MACRO_APPEND_LINE(TMG)(context); \
+ SK_MACRO_APPEND_LINE(TMG).initialize(name, &name_counter); \
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrUserStencilSettings.h b/gfx/skia/skia/src/gpu/GrUserStencilSettings.h
new file mode 100644
index 000000000..32fb1396f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrUserStencilSettings.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrUserStencilSettings_DEFINED
+#define GrUserStencilSettings_DEFINED
+
+#include "GrTypes.h"
+
+/**
+ * Gr uses the stencil buffer to implement complex clipping inside the
+ * GrDrawTarget class. The GrDrawTarget makes a subset of the stencil buffer
+ * bits available for other uses by external code (user bits). Client code can
+ * modify these bits. GrDrawTarget will ignore ref, mask, and writemask bits
+ * provided by clients that fall outside the user range.
+ *
+ * When code outside the GrDrawTarget class uses the stencil buffer the contract
+ * is as follows:
+ *
+ * > Normal stencil funcs allow the client to pass / fail regardless of the
+ * reserved clip bits.
+ * > Additional functions allow a test against the clip along with a limited
+ * set of tests against the user bits.
+ * > Client can assume all user bits are zero initially.
+ * > Client must ensure that after all its passes are finished it has only
+ * written to the color buffer in the region inside the clip. Furthermore, it
+ * must zero all user bits that were modifed (both inside and outside the
+ * clip).
+ */
+
+enum GrStencilFlags {
+ kDisabled_StencilFlag = 0x1,
+ kNoModifyStencil_StencilFlag = 0x2,
+ kNoWrapOps_StencilFlag = 0x4,
+ kSingleSided_StencilFlag = 0x8,
+
+ kLast_StencilFlag = kSingleSided_StencilFlag,
+ kAll_StencilFlags = kLast_StencilFlag | (kLast_StencilFlag - 1)
+};
+
+template<typename TTest, typename TOp> struct GrTStencilFaceSettings {
+ uint16_t fRef; // Reference value for stencil test and ops.
+ TTest fTest; // Stencil test function, where fRef is on the left side.
+ uint16_t fTestMask; // Bitwise "and" to perform on fRef and stencil values before testing.
+ // (e.g. (fRef & fTestMask) < (stencil & fTestMask))
+ TOp fPassOp; // Op to perform when the test passes.
+ TOp fFailOp; // Op to perform when the test fails.
+ uint16_t fWriteMask; // Indicates which bits in the stencil buffer should be updated.
+ // (e.g. stencil = (newValue & fWriteMask) | (stencil & ~fWriteMask))
+};
+
+enum class GrUserStencilTest : uint16_t {
+ // Tests that respect the clip bit. If a stencil clip is not in effect, the "IfInClip" is
+ // ignored and these only act on user bits.
+ kAlwaysIfInClip,
+ kEqualIfInClip,
+ kLessIfInClip,
+ kLEqualIfInClip,
+
+ // Tests that ignore the clip bit. The client is responsible to ensure no color write occurs
+ // outside the clip if it is in use.
+ kAlways,
+ kNever,
+ kGreater,
+ kGEqual,
+ kLess,
+ kLEqual,
+ kEqual,
+ kNotEqual
+};
+constexpr static GrUserStencilTest kLastClippedStencilTest = GrUserStencilTest::kLEqualIfInClip;
+constexpr static int kGrUserStencilTestCount = 1 + (int)GrUserStencilTest::kNotEqual;
+
+enum class GrUserStencilOp : uint8_t {
+ kKeep,
+
+ // Ops that only modify user bits. These must not be paired with ops that modify the clip bit.
+ kZero,
+ kReplace, // Replace stencil value with fRef (only the bits enabled in fWriteMask).
+ kInvert,
+ kIncWrap,
+ kDecWrap,
+ // These two should only be used if wrap ops are not supported, or if the math is guaranteed
+ // to not overflow. The user bits may or may not clamp, depending on the state of non-user bits.
+ kIncMaybeClamp,
+ kDecMaybeClamp,
+
+ // Ops that only modify the clip bit. These must not be paired with ops that modify user bits.
+ kZeroClipBit,
+ kSetClipBit,
+ kInvertClipBit,
+
+ // Ops that modify both clip and user bits. These can only be paired with kKeep or each other.
+ kSetClipAndReplaceUserBits,
+ kZeroClipAndUserBits
+};
+constexpr static GrUserStencilOp kLastUserOnlyStencilOp = GrUserStencilOp::kDecMaybeClamp;
+constexpr static GrUserStencilOp kLastClipOnlyStencilOp = GrUserStencilOp::kInvertClipBit;
+constexpr static int kGrUserStencilOpCount = 1 + (int)GrUserStencilOp::kZeroClipAndUserBits;
+
+/**
+ * This struct is a compile-time constant representation of user stencil settings. It describes in
+ * abstract terms how a draw will use the stencil buffer. It gets ODR-used at runtime to define a
+ * draw's stencil settings, and is later translated into concrete settings when the pipeline is
+ * finalized.
+ */
+struct GrUserStencilSettings {
+ typedef GrTStencilFaceSettings<GrUserStencilTest, GrUserStencilOp> Face;
+
+ template<GrUserStencilTest, GrUserStencilOp PassOp, GrUserStencilOp FailOp> struct Attrs;
+
+ // Unfortunately, this is the only way to pass template arguments to a constructor.
+ template<uint16_t Ref, GrUserStencilTest Test, uint16_t TestMask,
+ GrUserStencilOp PassOp, GrUserStencilOp FailOp, uint16_t WriteMask> struct Init {};
+
+ template<uint16_t FtRef, uint16_t BkRef,
+ GrUserStencilTest FtTest, GrUserStencilTest BkTest,
+ uint16_t FtTestMask, uint16_t BkTestMask,
+ GrUserStencilOp FtPassOp, GrUserStencilOp BkPassOp,
+ GrUserStencilOp FtFailOp, GrUserStencilOp BkFailOp,
+ uint16_t FtWriteMask, uint16_t BkWriteMask> struct InitSeparate {};
+
+ template<uint16_t Ref, GrUserStencilTest Test, uint16_t TestMask,
+ GrUserStencilOp PassOp, GrUserStencilOp FailOp, uint16_t WriteMask>
+ constexpr static Init<Ref, Test, TestMask, PassOp, FailOp, WriteMask> StaticInit() {
+ return Init<Ref, Test, TestMask, PassOp, FailOp, WriteMask>();
+ }
+
+ template<uint16_t FtRef, uint16_t BkRef,
+ GrUserStencilTest FtTest, GrUserStencilTest BkTest,
+ uint16_t FtTestMask, uint16_t BkTestMask,
+ GrUserStencilOp FtPassOp, GrUserStencilOp BkPassOp,
+ GrUserStencilOp FtFailOp, GrUserStencilOp BkFailOp,
+ uint16_t FtWriteMask, uint16_t BkWriteMask>
+ constexpr static InitSeparate<FtRef, BkRef, FtTest, BkTest, FtTestMask, BkTestMask,
+ FtPassOp, BkPassOp, FtFailOp, BkFailOp, FtWriteMask,
+ BkWriteMask> StaticInitSeparate() {
+ return InitSeparate<FtRef, BkRef, FtTest, BkTest, FtTestMask, BkTestMask,
+ FtPassOp, BkPassOp, FtFailOp, BkFailOp, FtWriteMask, BkWriteMask>();
+ }
+
+ // We construct with template arguments in order to enforce that the struct be compile-time
+ // constant and to make use of static asserts.
+ template<uint16_t Ref, GrUserStencilTest Test, uint16_t TestMask,
+ GrUserStencilOp PassOp, GrUserStencilOp FailOp, uint16_t WriteMask,
+ typename Attrs = Attrs<Test, PassOp, FailOp> >
+ constexpr explicit GrUserStencilSettings(
+ const Init<Ref, Test, TestMask, PassOp, FailOp, WriteMask>&)
+ : fFrontFlags{(uint16_t)(Attrs::Flags(false) | kSingleSided_StencilFlag),
+ (uint16_t)(Attrs::Flags(true) | kSingleSided_StencilFlag)}
+ , fFront{Ref, Test, Attrs::EffectiveTestMask(TestMask), PassOp, FailOp,
+ Attrs::EffectiveWriteMask(WriteMask)}
+ , fBackFlags{(uint16_t)(Attrs::Flags(false) | kSingleSided_StencilFlag),
+ (uint16_t)(Attrs::Flags(true) | kSingleSided_StencilFlag)}
+ , fBack{Ref, Test, Attrs::EffectiveTestMask(TestMask), PassOp, FailOp,
+ Attrs::EffectiveWriteMask(WriteMask)} {
+ }
+
+ template<uint16_t FtRef, uint16_t BkRef,
+ GrUserStencilTest FtTest, GrUserStencilTest BkTest,
+ uint16_t FtTestMask, uint16_t BkTestMask,
+ GrUserStencilOp FtPassOp, GrUserStencilOp BkPassOp,
+ GrUserStencilOp FtFailOp, GrUserStencilOp BkFailOp,
+ uint16_t FtWriteMask, uint16_t BkWriteMask,
+ typename FtAttrs = Attrs<FtTest, FtPassOp, FtFailOp>,
+ typename BkAttrs = Attrs<BkTest, BkPassOp, BkFailOp> >
+ constexpr explicit GrUserStencilSettings(
+ const InitSeparate<FtRef, BkRef, FtTest, BkTest, FtTestMask, BkTestMask,
+ FtPassOp, BkPassOp, FtFailOp, BkFailOp, FtWriteMask, BkWriteMask>&)
+ : fFrontFlags{FtAttrs::Flags(false), FtAttrs::Flags(true)}
+ , fFront{FtRef, FtTest, FtAttrs::EffectiveTestMask(FtTestMask), FtPassOp, FtFailOp,
+ FtAttrs::EffectiveWriteMask(FtWriteMask)}
+ , fBackFlags{BkAttrs::Flags(false), BkAttrs::Flags(true)}
+ , fBack{BkRef, BkTest, BkAttrs::EffectiveTestMask(BkTestMask), BkPassOp, BkFailOp,
+ BkAttrs::EffectiveWriteMask(BkWriteMask)} {}
+
+ // This struct can only be constructed with static initializers.
+ GrUserStencilSettings() = delete;
+ GrUserStencilSettings(const GrUserStencilSettings&) = delete;
+
+ const uint16_t fFrontFlags[2]; // frontFlagsForDraw = fFrontFlags[hasStencilClip].
+ const Face fFront;
+ const uint16_t fBackFlags[2]; // backFlagsForDraw = fBackFlags[hasStencilClip].
+ const Face fBack;
+
+ static const GrUserStencilSettings& kUnused;
+
+ bool isUnused() const { return this == &kUnused; }
+};
+
+template<GrUserStencilTest Test, GrUserStencilOp PassOp, GrUserStencilOp FailOp>
+struct GrUserStencilSettings::Attrs {
+ // Ensure an op that only modifies user bits isn't paired with one that modifies clip bits.
+ GR_STATIC_ASSERT(GrUserStencilOp::kKeep == PassOp || GrUserStencilOp::kKeep == FailOp ||
+ (PassOp <= kLastUserOnlyStencilOp) == (FailOp <= kLastUserOnlyStencilOp));
+ // Ensure an op that only modifies clip bits isn't paired with one that modifies clip and user.
+ GR_STATIC_ASSERT(GrUserStencilOp::kKeep == PassOp || GrUserStencilOp::kKeep == FailOp ||
+ (PassOp <= kLastClipOnlyStencilOp) == (FailOp <= kLastClipOnlyStencilOp));
+
+ constexpr static bool TestAlwaysPasses(bool hasStencilClip) {
+ return (!hasStencilClip && GrUserStencilTest::kAlwaysIfInClip == Test) ||
+ GrUserStencilTest::kAlways == Test;
+ }
+ constexpr static bool DoesNotModifyStencil(bool hasStencilClip) {
+ return (GrUserStencilTest::kNever == Test || GrUserStencilOp::kKeep == PassOp) &&
+ (TestAlwaysPasses(hasStencilClip) || GrUserStencilOp::kKeep == FailOp);
+ }
+ constexpr static bool IsDisabled(bool hasStencilClip) {
+ return TestAlwaysPasses(hasStencilClip) && DoesNotModifyStencil(hasStencilClip);
+ }
+ constexpr static bool UsesWrapOps() {
+ return GrUserStencilOp::kIncWrap == PassOp || GrUserStencilOp::kDecWrap == PassOp ||
+ GrUserStencilOp::kIncWrap == FailOp || GrUserStencilOp::kDecWrap == FailOp;
+ }
+ constexpr static bool TestIgnoresRef() {
+ return (GrUserStencilTest::kAlwaysIfInClip == Test || GrUserStencilTest::kAlways == Test ||
+ GrUserStencilTest::kNever == Test);
+ }
+ constexpr static uint16_t Flags(bool hasStencilClip) {
+ return (IsDisabled(hasStencilClip) ? kDisabled_StencilFlag : 0) |
+ (DoesNotModifyStencil(hasStencilClip) ? kNoModifyStencil_StencilFlag : 0) |
+ (UsesWrapOps() ? 0 : kNoWrapOps_StencilFlag);
+ }
+ constexpr static uint16_t EffectiveTestMask(uint16_t testMask) {
+ return TestIgnoresRef() ? 0 : testMask;
+ }
+ constexpr static uint16_t EffectiveWriteMask(uint16_t writeMask) {
+ // We don't modify the mask differently when hasStencilClip=false because either the entire
+ // face gets disabled in that case (e.g. Test=kAlwaysIfInClip, PassOp=kKeep), or else the
+ // effective mask stays the same either way.
+ return DoesNotModifyStencil(true) ? 0 : writeMask;
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrWindowRectangles.h b/gfx/skia/skia/src/gpu/GrWindowRectangles.h
new file mode 100644
index 000000000..076c40d7a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrWindowRectangles.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrWindowRectangles_DEFINED
+#define GrWindowRectangles_DEFINED
+
+#include "GrNonAtomicRef.h"
+#include "SkRect.h"
+
+class GrWindowRectangles {
+public:
+ constexpr static int kMaxWindows = 8;
+
+ GrWindowRectangles() : fCount(0) {}
+ GrWindowRectangles(const GrWindowRectangles& that) : fCount(0) { *this = that; }
+ ~GrWindowRectangles() { SkSafeUnref(this->rec()); }
+
+ bool empty() const { return !fCount; }
+ int count() const { return fCount; }
+ const SkIRect* data() const;
+
+ void reset();
+ GrWindowRectangles& operator=(const GrWindowRectangles&);
+
+ SkIRect& addWindow(const SkIRect& window) { return this->addWindow() = window; }
+ SkIRect& addWindow();
+
+ bool operator!=(const GrWindowRectangles& that) const { return !(*this == that); }
+ bool operator==(const GrWindowRectangles&) const;
+
+private:
+ constexpr static int kNumLocalWindows = 1;
+ struct Rec;
+
+ const Rec* rec() const { return fCount <= kNumLocalWindows ? nullptr : fRec; }
+
+ int fCount;
+ union {
+ SkIRect fLocalWindows[kNumLocalWindows]; // If fCount <= kNumLocalWindows.
+ Rec* fRec; // If fCount > kNumLocalWindows.
+ };
+};
+
+struct GrWindowRectangles::Rec : public GrNonAtomicRef<Rec> {
+ Rec(const SkIRect* windows, int numWindows) {
+ SkASSERT(numWindows < kMaxWindows);
+ memcpy(fData, windows, sizeof(SkIRect) * numWindows);
+ }
+
+ SkIRect fData[kMaxWindows];
+};
+
+inline const SkIRect* GrWindowRectangles::data() const {
+ return fCount <= kNumLocalWindows ? fLocalWindows : fRec->fData;
+}
+
+inline void GrWindowRectangles::reset() {
+ SkSafeUnref(this->rec());
+ fCount = 0;
+}
+
+inline GrWindowRectangles& GrWindowRectangles::operator=(const GrWindowRectangles& that) {
+ SkSafeUnref(this->rec());
+ fCount = that.fCount;
+ if (fCount <= kNumLocalWindows) {
+ memcpy(fLocalWindows, that.fLocalWindows, fCount * sizeof(SkIRect));
+ } else {
+ fRec = SkRef(that.fRec);
+ }
+ return *this;
+}
+
+inline SkIRect& GrWindowRectangles::addWindow() {
+ SkASSERT(fCount < kMaxWindows);
+ if (fCount < kNumLocalWindows) {
+ return fLocalWindows[fCount++];
+ }
+ if (fCount == kNumLocalWindows) {
+ fRec = new Rec(fLocalWindows, kNumLocalWindows);
+ } else if (!fRec->unique()) { // Simple copy-on-write.
+ fRec->unref();
+ fRec = new Rec(fRec->fData, fCount);
+ }
+ return fRec->fData[fCount++];
+}
+
+inline bool GrWindowRectangles::operator==(const GrWindowRectangles& that) const {
+ if (fCount != that.fCount) {
+ return false;
+ }
+ if (fCount > kNumLocalWindows && fRec == that.fRec) {
+ return true;
+ }
+ return !fCount || !memcmp(this->data(), that.data(), sizeof(SkIRect) * fCount);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrWindowRectsState.h b/gfx/skia/skia/src/gpu/GrWindowRectsState.h
new file mode 100644
index 000000000..9d3b61b9c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrWindowRectsState.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrWindowRectsState_DEFINED
+#define GrWindowRectsState_DEFINED
+
+#include "GrWindowRectangles.h"
+
+class GrWindowRectsState {
+public:
+ enum class Mode : bool {
+ kExclusive,
+ kInclusive
+ };
+
+ GrWindowRectsState() : fMode(Mode::kExclusive) {}
+ GrWindowRectsState(const GrWindowRectangles& windows, const SkIPoint& origin, Mode mode)
+ : fMode(mode)
+ , fOrigin(origin)
+ , fWindows(windows) {
+ }
+
+ bool enabled() const { return Mode::kInclusive == fMode || !fWindows.empty(); }
+ Mode mode() const { return fMode; }
+ const SkIPoint& origin() const { return fOrigin; }
+ const GrWindowRectangles& windows() const { return fWindows; }
+ int numWindows() const { return fWindows.count(); }
+
+ void setDisabled() {
+ fMode = Mode::kExclusive;
+ fWindows.reset();
+ }
+
+ void set(const GrWindowRectangles& windows, const SkIPoint& origin, Mode mode) {
+ fMode = mode;
+ fOrigin = origin;
+ fWindows = windows;
+ }
+
+ bool cheapEqualTo(const GrWindowRectsState& that) const {
+ if (fMode != that.fMode) {
+ return false;
+ }
+ if (!fWindows.empty() && fOrigin != that.fOrigin) {
+ return false;
+ }
+ return fWindows == that.fWindows;
+ }
+
+private:
+ Mode fMode;
+ SkIPoint fOrigin;
+ GrWindowRectangles fWindows;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/GrXferProcessor.cpp b/gfx/skia/skia/src/gpu/GrXferProcessor.cpp
new file mode 100644
index 000000000..76e0ba0fc
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrXferProcessor.cpp
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrXferProcessor.h"
+#include "GrPipeline.h"
+#include "GrProcOptInfo.h"
+#include "gl/GrGLCaps.h"
+
+GrXferProcessor::GrXferProcessor()
+ : fWillReadDstColor(false)
+ , fDstReadUsesMixedSamples(false)
+ , fDstTextureOffset() {
+}
+
+GrXferProcessor::GrXferProcessor(const DstTexture* dstTexture,
+ bool willReadDstColor,
+ bool hasMixedSamples)
+ : fWillReadDstColor(willReadDstColor)
+ , fDstReadUsesMixedSamples(willReadDstColor && hasMixedSamples)
+ , fDstTextureOffset() {
+ if (dstTexture && dstTexture->texture()) {
+ SkASSERT(willReadDstColor);
+ fDstTexture.reset(dstTexture->texture());
+ fDstTextureOffset = dstTexture->offset();
+ this->addTextureAccess(&fDstTexture);
+ this->setWillReadFragmentPosition();
+ }
+}
+
+GrXferProcessor::OptFlags GrXferProcessor::getOptimizations(
+ const GrPipelineOptimizations& optimizations,
+ bool doesStencilWrite,
+ GrColor* overrideColor,
+ const GrCaps& caps) const {
+ GrXferProcessor::OptFlags flags = this->onGetOptimizations(optimizations,
+ doesStencilWrite,
+ overrideColor,
+ caps);
+
+ if (this->willReadDstColor()) {
+ // When performing a dst read we handle coverage in the base class.
+ SkASSERT(!(flags & GrXferProcessor::kIgnoreCoverage_OptFlag));
+ if (optimizations.fCoveragePOI.isSolidWhite()) {
+ flags |= GrXferProcessor::kIgnoreCoverage_OptFlag;
+ }
+ }
+ return flags;
+}
+
+bool GrXferProcessor::hasSecondaryOutput() const {
+ if (!this->willReadDstColor()) {
+ return this->onHasSecondaryOutput();
+ }
+ return this->dstReadUsesMixedSamples();
+}
+
+void GrXferProcessor::getBlendInfo(BlendInfo* blendInfo) const {
+ blendInfo->reset();
+ if (!this->willReadDstColor()) {
+ this->onGetBlendInfo(blendInfo);
+ } else if (this->dstReadUsesMixedSamples()) {
+ blendInfo->fDstBlend = kIS2A_GrBlendCoeff;
+ }
+}
+
+void GrXferProcessor::getGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const {
+ uint32_t key = this->willReadDstColor() ? 0x1 : 0x0;
+ if (key) {
+ if (const GrTexture* dstTexture = this->getDstTexture()) {
+ key |= 0x2;
+ if (kTopLeft_GrSurfaceOrigin == dstTexture->origin()) {
+ key |= 0x4;
+ }
+ }
+ if (this->dstReadUsesMixedSamples()) {
+ key |= 0x8;
+ }
+ }
+ b->add32(key);
+ this->onGetGLSLProcessorKey(caps, b);
+}
+
+GrXferBarrierType GrXferProcessor::xferBarrierType(const GrRenderTarget* rt,
+ const GrCaps& caps) const {
+ SkASSERT(rt);
+ if (static_cast<const GrSurface*>(rt) == this->getDstTexture()) {
+ // Texture barriers are required when a shader reads and renders to the same texture.
+ SkASSERT(caps.textureBarrierSupport());
+ return kTexture_GrXferBarrierType;
+ }
+ return this->onXferBarrier(rt, caps);
+}
+
+#ifdef SK_DEBUG
+static const char* equation_string(GrBlendEquation eq) {
+ switch (eq) {
+ case kAdd_GrBlendEquation:
+ return "add";
+ case kSubtract_GrBlendEquation:
+ return "subtract";
+ case kReverseSubtract_GrBlendEquation:
+ return "reverse_subtract";
+ case kScreen_GrBlendEquation:
+ return "screen";
+ case kOverlay_GrBlendEquation:
+ return "overlay";
+ case kDarken_GrBlendEquation:
+ return "darken";
+ case kLighten_GrBlendEquation:
+ return "lighten";
+ case kColorDodge_GrBlendEquation:
+ return "color_dodge";
+ case kColorBurn_GrBlendEquation:
+ return "color_burn";
+ case kHardLight_GrBlendEquation:
+ return "hard_light";
+ case kSoftLight_GrBlendEquation:
+ return "soft_light";
+ case kDifference_GrBlendEquation:
+ return "difference";
+ case kExclusion_GrBlendEquation:
+ return "exclusion";
+ case kMultiply_GrBlendEquation:
+ return "multiply";
+ case kHSLHue_GrBlendEquation:
+ return "hsl_hue";
+ case kHSLSaturation_GrBlendEquation:
+ return "hsl_saturation";
+ case kHSLColor_GrBlendEquation:
+ return "hsl_color";
+ case kHSLLuminosity_GrBlendEquation:
+ return "hsl_luminosity";
+ };
+ return "";
+}
+
+static const char* coeff_string(GrBlendCoeff coeff) {
+ switch (coeff) {
+ case kZero_GrBlendCoeff:
+ return "zero";
+ case kOne_GrBlendCoeff:
+ return "one";
+ case kSC_GrBlendCoeff:
+ return "src_color";
+ case kISC_GrBlendCoeff:
+ return "inv_src_color";
+ case kDC_GrBlendCoeff:
+ return "dst_color";
+ case kIDC_GrBlendCoeff:
+ return "inv_dst_color";
+ case kSA_GrBlendCoeff:
+ return "src_alpha";
+ case kISA_GrBlendCoeff:
+ return "inv_src_alpha";
+ case kDA_GrBlendCoeff:
+ return "dst_alpha";
+ case kIDA_GrBlendCoeff:
+ return "inv_dst_alpha";
+ case kConstC_GrBlendCoeff:
+ return "const_color";
+ case kIConstC_GrBlendCoeff:
+ return "inv_const_color";
+ case kConstA_GrBlendCoeff:
+ return "const_alpha";
+ case kIConstA_GrBlendCoeff:
+ return "inv_const_alpha";
+ case kS2C_GrBlendCoeff:
+ return "src2_color";
+ case kIS2C_GrBlendCoeff:
+ return "inv_src2_color";
+ case kS2A_GrBlendCoeff:
+ return "src2_alpha";
+ case kIS2A_GrBlendCoeff:
+ return "inv_src2_alpha";
+ }
+ return "";
+}
+
+SkString GrXferProcessor::BlendInfo::dump() const {
+ SkString out;
+ out.printf("write_color(%d) equation(%s) src_coeff(%s) dst_coeff:(%s) const(0x%08x)",
+ fWriteColor, equation_string(fEquation), coeff_string(fSrcBlend),
+ coeff_string(fDstBlend), fBlendConstant);
+ return out;
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrXferProcessor* GrXPFactory::createXferProcessor(const GrPipelineOptimizations& optimizations,
+ bool hasMixedSamples,
+ const DstTexture* dstTexture,
+ const GrCaps& caps) const {
+#ifdef SK_DEBUG
+ if (this->willReadDstColor(caps, optimizations)) {
+ if (!caps.shaderCaps()->dstReadInShaderSupport()) {
+ SkASSERT(dstTexture && dstTexture->texture());
+ } else {
+ SkASSERT(!dstTexture || !dstTexture->texture());
+ }
+ } else {
+ SkASSERT(!dstTexture || !dstTexture->texture());
+ }
+ SkASSERT(!hasMixedSamples || caps.shaderCaps()->dualSourceBlendingSupport());
+#endif
+ return this->onCreateXferProcessor(caps, optimizations, hasMixedSamples, dstTexture);
+}
+
+bool GrXPFactory::willNeedDstTexture(const GrCaps& caps,
+ const GrPipelineOptimizations& optimizations) const {
+ return (this->willReadDstColor(caps, optimizations) &&
+ !caps.shaderCaps()->dstReadInShaderSupport());
+}
+
+bool GrXPFactory::willReadDstColor(const GrCaps& caps,
+ const GrPipelineOptimizations& optimizations) const {
+ return optimizations.fOverrides.fUsePLSDstRead || this->onWillReadDstColor(caps, optimizations);
+}
diff --git a/gfx/skia/skia/src/gpu/GrYUVProvider.cpp b/gfx/skia/skia/src/gpu/GrYUVProvider.cpp
new file mode 100644
index 000000000..db58e0afe
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrYUVProvider.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrContext.h"
+#include "GrDrawContext.h"
+#include "GrYUVProvider.h"
+#include "effects/GrGammaEffect.h"
+#include "effects/GrYUVEffect.h"
+
+#include "SkCachedData.h"
+#include "SkRefCnt.h"
+#include "SkResourceCache.h"
+#include "SkYUVPlanesCache.h"
+
+namespace {
+/**
+ * Helper class to manage the resources used for storing the YUV planar data. Depending on the
+ * useCache option, we may find (and lock) the data in our ResourceCache, or we may have allocated
+ * it in scratch storage.
+ */
+class YUVScoper {
+public:
+ bool init(GrYUVProvider*, SkYUVPlanesCache::Info*, void* planes[3], bool useCache);
+
+private:
+ // we only use one or the other of these
+ SkAutoTUnref<SkCachedData> fCachedData;
+ SkAutoMalloc fStorage;
+};
+}
+
+bool YUVScoper::init(GrYUVProvider* provider, SkYUVPlanesCache::Info* yuvInfo, void* planes[3],
+ bool useCache) {
+ if (useCache) {
+ fCachedData.reset(SkYUVPlanesCache::FindAndRef(provider->onGetID(), yuvInfo));
+ }
+
+ if (fCachedData.get()) {
+ planes[0] = (void*)fCachedData->data();
+ planes[1] = (uint8_t*)planes[0] + (yuvInfo->fSizeInfo.fWidthBytes[SkYUVSizeInfo::kY] *
+ yuvInfo->fSizeInfo.fSizes[SkYUVSizeInfo::kY].fHeight);
+ planes[2] = (uint8_t*)planes[1] + (yuvInfo->fSizeInfo.fWidthBytes[SkYUVSizeInfo::kU] *
+ yuvInfo->fSizeInfo.fSizes[SkYUVSizeInfo::kU].fHeight);
+ } else {
+ // Fetch yuv plane sizes for memory allocation.
+ if (!provider->onQueryYUV8(&yuvInfo->fSizeInfo, &yuvInfo->fColorSpace)) {
+ return false;
+ }
+
+ // Allocate the memory for YUV
+ size_t totalSize(0);
+ for (int i = 0; i < 3; i++) {
+ totalSize += yuvInfo->fSizeInfo.fWidthBytes[i] * yuvInfo->fSizeInfo.fSizes[i].fHeight;
+ }
+ if (useCache) {
+ fCachedData.reset(SkResourceCache::NewCachedData(totalSize));
+ planes[0] = fCachedData->writable_data();
+ } else {
+ fStorage.reset(totalSize);
+ planes[0] = fStorage.get();
+ }
+ planes[1] = (uint8_t*)planes[0] + (yuvInfo->fSizeInfo.fWidthBytes[SkYUVSizeInfo::kY] *
+ yuvInfo->fSizeInfo.fSizes[SkYUVSizeInfo::kY].fHeight);
+ planes[2] = (uint8_t*)planes[1] + (yuvInfo->fSizeInfo.fWidthBytes[SkYUVSizeInfo::kU] *
+ yuvInfo->fSizeInfo.fSizes[SkYUVSizeInfo::kU].fHeight);
+
+ // Get the YUV planes.
+ if (!provider->onGetYUV8Planes(yuvInfo->fSizeInfo, planes)) {
+ return false;
+ }
+
+ if (useCache) {
+ // Decoding is done, cache the resulting YUV planes
+ SkYUVPlanesCache::Add(provider->onGetID(), fCachedData, yuvInfo);
+ }
+ }
+ return true;
+}
+
+sk_sp<GrTexture> GrYUVProvider::refAsTexture(GrContext* ctx,
+ const GrSurfaceDesc& desc,
+ bool useCache) {
+ SkYUVPlanesCache::Info yuvInfo;
+ void* planes[3];
+ YUVScoper scoper;
+ if (!scoper.init(this, &yuvInfo, planes, useCache)) {
+ return nullptr;
+ }
+
+ GrSurfaceDesc yuvDesc;
+ yuvDesc.fConfig = kAlpha_8_GrPixelConfig;
+ SkAutoTUnref<GrTexture> yuvTextures[3];
+ for (int i = 0; i < 3; i++) {
+ yuvDesc.fWidth = yuvInfo.fSizeInfo.fSizes[i].fWidth;
+ yuvDesc.fHeight = yuvInfo.fSizeInfo.fSizes[i].fHeight;
+ // TODO: why do we need this check?
+ bool needsExactTexture =
+ (yuvDesc.fWidth != yuvInfo.fSizeInfo.fSizes[SkYUVSizeInfo::kY].fWidth) ||
+ (yuvDesc.fHeight != yuvInfo.fSizeInfo.fSizes[SkYUVSizeInfo::kY].fHeight);
+ if (needsExactTexture) {
+ yuvTextures[i].reset(ctx->textureProvider()->createTexture(yuvDesc, SkBudgeted::kYes));
+ } else {
+ yuvTextures[i].reset(ctx->textureProvider()->createApproxTexture(yuvDesc));
+ }
+ if (!yuvTextures[i] ||
+ !yuvTextures[i]->writePixels(0, 0, yuvDesc.fWidth, yuvDesc.fHeight, yuvDesc.fConfig,
+ planes[i], yuvInfo.fSizeInfo.fWidthBytes[i])) {
+ return nullptr;
+ }
+ }
+
+ // We never want to perform color-space conversion during the decode
+ sk_sp<GrDrawContext> drawContext(ctx->makeDrawContext(SkBackingFit::kExact,
+ desc.fWidth, desc.fHeight,
+ desc.fConfig, nullptr,
+ desc.fSampleCnt));
+ if (!drawContext) {
+ return nullptr;
+ }
+
+ GrPaint paint;
+ sk_sp<GrFragmentProcessor> yuvToRgbProcessor(
+ GrYUVEffect::MakeYUVToRGB(yuvTextures[0], yuvTextures[1], yuvTextures[2],
+ yuvInfo.fSizeInfo.fSizes, yuvInfo.fColorSpace, false));
+ paint.addColorFragmentProcessor(std::move(yuvToRgbProcessor));
+
+ // If we're decoding an sRGB image, the result of our linear math on the YUV planes is already
+ // in sRGB. (The encoding is just math on bytes, with no concept of color spaces.) So, we need
+ // to output the results of that math directly to the buffer that we will then consider sRGB.
+ // If we have sRGB write control, we can just tell the HW not to do the Linear -> sRGB step.
+ // Otherwise, we do our shader math to go from YUV -> sRGB, manually convert sRGB -> Linear,
+ // then let the HW convert Linear -> sRGB.
+ if (GrPixelConfigIsSRGB(desc.fConfig)) {
+ if (ctx->caps()->srgbWriteControl()) {
+ paint.setDisableOutputConversionToSRGB(true);
+ } else {
+ paint.addColorFragmentProcessor(GrGammaEffect::Make(2.2f));
+ }
+ }
+
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+ const SkRect r = SkRect::MakeIWH(yuvInfo.fSizeInfo.fSizes[SkYUVSizeInfo::kY].fWidth,
+ yuvInfo.fSizeInfo.fSizes[SkYUVSizeInfo::kY].fHeight);
+
+ drawContext->drawRect(GrNoClip(), paint, SkMatrix::I(), r);
+
+ return drawContext->asTexture();
+}
diff --git a/gfx/skia/skia/src/gpu/GrYUVProvider.h b/gfx/skia/skia/src/gpu/GrYUVProvider.h
new file mode 100644
index 000000000..c32af15df
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/GrYUVProvider.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrYUVProvider_DEFINED
+#define GrYUVProvider_DEFINED
+
+#include "GrTypes.h"
+#include "SkImageInfo.h"
+#include "SkYUVSizeInfo.h"
+
+class GrContext;
+class GrTexture;
+
+/**
+ * There are at least 2 different ways to extract/retrieve YUV planar data...
+ * - SkPixelRef
+ * - SkImageGeneartor
+ *
+ * To share common functionality around using the planar data, we use this abstract base-class
+ * to represent accessing that data.
+ */
+class GrYUVProvider {
+public:
+ virtual ~GrYUVProvider() {}
+
+ /**
+ * On success, this returns a texture that has converted the YUV data from the provider
+ * into a form that is supported by the GPU (typically transformed into RGB). If useCache
+ * is true, then the texture will automatically have a key added, so it can be retrieved
+ * from the cache (assuming it is requested by a provider w/ the same genID).
+ *
+ * On failure (e.g. the provider had no data), this returns NULL.
+ */
+ sk_sp<GrTexture> refAsTexture(GrContext*, const GrSurfaceDesc&, bool useCache);
+
+ virtual uint32_t onGetID() = 0;
+
+ // These are not meant to be called by a client, only by the implementation
+
+ /**
+ * If decoding to YUV is supported, this returns true. Otherwise, this
+ * returns false and does not modify any of the parameters.
+ *
+ * @param sizeInfo Output parameter indicating the sizes and required
+ * allocation widths of the Y, U, and V planes.
+ * @param colorSpace Output parameter.
+ */
+ virtual bool onQueryYUV8(SkYUVSizeInfo* sizeInfo, SkYUVColorSpace* colorSpace) const = 0;
+
+ /**
+ * Returns true on success and false on failure.
+ * This always attempts to perform a full decode. If the client only
+ * wants size, it should call onQueryYUV8().
+ *
+ * @param sizeInfo Needs to exactly match the values returned by the
+ * query, except the WidthBytes may be larger than the
+ * recommendation (but not smaller).
+ * @param planes Memory for each of the Y, U, and V planes.
+ */
+ virtual bool onGetYUV8Planes(const SkYUVSizeInfo& sizeInfo, void* planes[3]) = 0;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/SkGpuDevice.cpp b/gfx/skia/skia/src/gpu/SkGpuDevice.cpp
new file mode 100644
index 000000000..248a240f0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/SkGpuDevice.cpp
@@ -0,0 +1,1823 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkGpuDevice.h"
+
+#include "GrBlurUtils.h"
+#include "GrContext.h"
+#include "GrDrawContextPriv.h"
+#include "GrGpu.h"
+#include "GrImageIDTextureAdjuster.h"
+#include "GrStyle.h"
+#include "GrTracing.h"
+
+#include "SkCanvasPriv.h"
+#include "SkDraw.h"
+#include "SkErrorInternals.h"
+#include "SkGlyphCache.h"
+#include "SkGr.h"
+#include "SkGrPriv.h"
+#include "SkImage_Base.h"
+#include "SkImageCacherator.h"
+#include "SkImageFilter.h"
+#include "SkImageFilterCache.h"
+#include "SkLatticeIter.h"
+#include "SkMaskFilter.h"
+#include "SkPathEffect.h"
+#include "SkPicture.h"
+#include "SkPictureData.h"
+#include "SkRasterClip.h"
+#include "SkRRect.h"
+#include "SkRecord.h"
+#include "SkSpecialImage.h"
+#include "SkStroke.h"
+#include "SkSurface.h"
+#include "SkSurface_Gpu.h"
+#include "SkTLazy.h"
+#include "SkUtils.h"
+#include "SkVertState.h"
+#include "SkXfermode.h"
+#include "batches/GrRectBatchFactory.h"
+#include "effects/GrBicubicEffect.h"
+#include "effects/GrDashingEffect.h"
+#include "effects/GrSimpleTextureEffect.h"
+#include "effects/GrTextureDomain.h"
+#include "text/GrTextUtils.h"
+
+#if SK_SUPPORT_GPU
+
+#define ASSERT_SINGLE_OWNER \
+ SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fContext->debugSingleOwner());)
+
+#if 0
+ extern bool (*gShouldDrawProc)();
+ #define CHECK_SHOULD_DRAW(draw) \
+ do { \
+ if (gShouldDrawProc && !gShouldDrawProc()) return; \
+ this->prepareDraw(draw); \
+ } while (0)
+#else
+ #define CHECK_SHOULD_DRAW(draw) this->prepareDraw(draw)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** Checks that the alpha type is legal and gets constructor flags. Returns false if device creation
+ should fail. */
+bool SkGpuDevice::CheckAlphaTypeAndGetFlags(
+ const SkImageInfo* info, SkGpuDevice::InitContents init, unsigned* flags) {
+ *flags = 0;
+ if (info) {
+ switch (info->alphaType()) {
+ case kPremul_SkAlphaType:
+ break;
+ case kOpaque_SkAlphaType:
+ *flags |= SkGpuDevice::kIsOpaque_Flag;
+ break;
+ default: // If it is unpremul or unknown don't try to render
+ return false;
+ }
+ }
+ if (kClear_InitContents == init) {
+ *flags |= kNeedClear_Flag;
+ }
+ return true;
+}
+
+sk_sp<SkGpuDevice> SkGpuDevice::Make(sk_sp<GrDrawContext> drawContext,
+ int width, int height,
+ InitContents init) {
+ if (!drawContext || drawContext->wasAbandoned()) {
+ return nullptr;
+ }
+ unsigned flags;
+ if (!CheckAlphaTypeAndGetFlags(nullptr, init, &flags)) {
+ return nullptr;
+ }
+ return sk_sp<SkGpuDevice>(new SkGpuDevice(std::move(drawContext), width, height, flags));
+}
+
+sk_sp<SkGpuDevice> SkGpuDevice::Make(GrContext* context, SkBudgeted budgeted,
+ const SkImageInfo& info, int sampleCount,
+ GrSurfaceOrigin origin,
+ const SkSurfaceProps* props, InitContents init) {
+ unsigned flags;
+ if (!CheckAlphaTypeAndGetFlags(&info, init, &flags)) {
+ return nullptr;
+ }
+
+ sk_sp<GrDrawContext> drawContext(MakeDrawContext(context, budgeted, info,
+ sampleCount, origin, props));
+ if (!drawContext) {
+ return nullptr;
+ }
+
+ return sk_sp<SkGpuDevice>(new SkGpuDevice(std::move(drawContext),
+ info.width(), info.height(), flags));
+}
+
+static SkImageInfo make_info(GrDrawContext* context, int w, int h, bool opaque) {
+ SkColorType colorType;
+ if (!GrPixelConfigToColorType(context->config(), &colorType)) {
+ colorType = kUnknown_SkColorType;
+ }
+ return SkImageInfo::Make(w, h, colorType,
+ opaque ? kOpaque_SkAlphaType : kPremul_SkAlphaType,
+ sk_ref_sp(context->getColorSpace()));
+}
+
+SkGpuDevice::SkGpuDevice(sk_sp<GrDrawContext> drawContext, int width, int height, unsigned flags)
+ : INHERITED(make_info(drawContext.get(), width, height, SkToBool(flags & kIsOpaque_Flag)),
+ drawContext->surfaceProps())
+ , fContext(SkRef(drawContext->accessRenderTarget()->getContext()))
+ , fDrawContext(std::move(drawContext))
+{
+ fSize.set(width, height);
+ fOpaque = SkToBool(flags & kIsOpaque_Flag);
+
+ if (flags & kNeedClear_Flag) {
+ this->clearAll();
+ }
+}
+
+sk_sp<GrDrawContext> SkGpuDevice::MakeDrawContext(GrContext* context,
+ SkBudgeted budgeted,
+ const SkImageInfo& origInfo,
+ int sampleCount,
+ GrSurfaceOrigin origin,
+ const SkSurfaceProps* surfaceProps) {
+ if (kUnknown_SkColorType == origInfo.colorType() ||
+ origInfo.width() < 0 || origInfo.height() < 0) {
+ return nullptr;
+ }
+
+ if (!context) {
+ return nullptr;
+ }
+
+ SkColorType ct = origInfo.colorType();
+ SkAlphaType at = origInfo.alphaType();
+ SkColorSpace* cs = origInfo.colorSpace();
+ if (kRGB_565_SkColorType == ct || kGray_8_SkColorType == ct) {
+ at = kOpaque_SkAlphaType; // force this setting
+ }
+ if (kOpaque_SkAlphaType != at) {
+ at = kPremul_SkAlphaType; // force this setting
+ }
+
+ GrPixelConfig config = SkImageInfo2GrPixelConfig(ct, at, cs, *context->caps());
+
+ return context->makeDrawContext(SkBackingFit::kExact, // Why exact?
+ origInfo.width(), origInfo.height(),
+ config, sk_ref_sp(cs), sampleCount,
+ origin, surfaceProps, budgeted);
+}
+
+sk_sp<SkSpecialImage> SkGpuDevice::filterTexture(const SkDraw& draw,
+ SkSpecialImage* srcImg,
+ int left, int top,
+ SkIPoint* offset,
+ const SkImageFilter* filter) {
+ SkASSERT(srcImg->isTextureBacked());
+ SkASSERT(filter);
+
+ SkMatrix matrix = *draw.fMatrix;
+ matrix.postTranslate(SkIntToScalar(-left), SkIntToScalar(-top));
+ const SkIRect clipBounds = draw.fRC->getBounds().makeOffset(-left, -top);
+ SkAutoTUnref<SkImageFilterCache> cache(this->getImageFilterCache());
+ SkImageFilter::OutputProperties outputProperties(fDrawContext->getColorSpace());
+ SkImageFilter::Context ctx(matrix, clipBounds, cache.get(), outputProperties);
+
+ return filter->filterImage(srcImg, ctx, offset);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkGpuDevice::onReadPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int x, int y) {
+ ASSERT_SINGLE_OWNER
+
+ return fDrawContext->readPixels(dstInfo, dstPixels, dstRowBytes, x, y);
+}
+
+bool SkGpuDevice::onWritePixels(const SkImageInfo& srcInfo, const void* srcPixels,
+ size_t srcRowBytes, int x, int y) {
+ ASSERT_SINGLE_OWNER
+
+ return fDrawContext->writePixels(srcInfo, srcPixels, srcRowBytes, x, y);
+}
+
+bool SkGpuDevice::onAccessPixels(SkPixmap* pmap) {
+ ASSERT_SINGLE_OWNER
+ return false;
+}
+
+// call this every draw call, to ensure that the context reflects our state,
+// and not the state from some other canvas/device
+void SkGpuDevice::prepareDraw(const SkDraw& draw) {
+ ASSERT_SINGLE_OWNER
+
+ fClip.reset(draw.fClipStack, &this->getOrigin());
+}
+
+GrDrawContext* SkGpuDevice::accessDrawContext() {
+ ASSERT_SINGLE_OWNER
+ return fDrawContext.get();
+}
+
+void SkGpuDevice::clearAll() {
+ ASSERT_SINGLE_OWNER
+ GrColor color = 0;
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "clearAll", fContext);
+ SkIRect rect = SkIRect::MakeWH(this->width(), this->height());
+ fDrawContext->clear(&rect, color, true);
+}
+
+void SkGpuDevice::replaceDrawContext(bool shouldRetainContent) {
+ ASSERT_SINGLE_OWNER
+
+ SkBudgeted budgeted = fDrawContext->drawContextPriv().isBudgeted();
+
+ sk_sp<GrDrawContext> newDC(MakeDrawContext(this->context(),
+ budgeted,
+ this->imageInfo(),
+ fDrawContext->numColorSamples(),
+ fDrawContext->origin(),
+ &this->surfaceProps()));
+ if (!newDC) {
+ return;
+ }
+
+ if (shouldRetainContent) {
+ if (fDrawContext->wasAbandoned()) {
+ return;
+ }
+ newDC->copySurface(fDrawContext->asTexture().get(),
+ SkIRect::MakeWH(this->width(), this->height()),
+ SkIPoint::Make(0, 0));
+ }
+
+ fDrawContext = newDC;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkGpuDevice::drawPaint(const SkDraw& draw, const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ CHECK_SHOULD_DRAW(draw);
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawPaint", fContext);
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fDrawContext.get(), paint, *draw.fMatrix, &grPaint)) {
+ return;
+ }
+
+ fDrawContext->drawPaint(fClip, grPaint, *draw.fMatrix);
+}
+
+// must be in SkCanvas::PointMode order
+static const GrPrimitiveType gPointMode2PrimitiveType[] = {
+ kPoints_GrPrimitiveType,
+ kLines_GrPrimitiveType,
+ kLineStrip_GrPrimitiveType
+};
+
+// suppress antialiasing on axis-aligned integer-coordinate lines
+static bool needs_antialiasing(SkCanvas::PointMode mode, size_t count, const SkPoint pts[]) {
+ if (mode == SkCanvas::PointMode::kPoints_PointMode) {
+ return false;
+ }
+ if (count == 2) {
+ // We do not antialias as long as the primary axis of the line is integer-aligned, even if
+ // the other coordinates are not. This does mean the two end pixels of the line will be
+ // sharp even when they shouldn't be, but turning antialiasing on (as things stand
+ // currently) means that the line will turn into a two-pixel-wide blur. While obviously a
+ // more complete fix is possible down the road, for the time being we accept the error on
+ // the two end pixels as being the lesser of two evils.
+ if (pts[0].fX == pts[1].fX) {
+ return ((int) pts[0].fX) != pts[0].fX;
+ }
+ if (pts[0].fY == pts[1].fY) {
+ return ((int) pts[0].fY) != pts[0].fY;
+ }
+ }
+ return true;
+}
+
+void SkGpuDevice::drawPoints(const SkDraw& draw, SkCanvas::PointMode mode,
+ size_t count, const SkPoint pts[], const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawPoints", fContext);
+ CHECK_SHOULD_DRAW(draw);
+
+ SkScalar width = paint.getStrokeWidth();
+ if (width < 0) {
+ return;
+ }
+
+ if (paint.getPathEffect() && 2 == count && SkCanvas::kLines_PointMode == mode) {
+ GrStyle style(paint, SkPaint::kStroke_Style);
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fDrawContext.get(), paint, *draw.fMatrix,
+ &grPaint)) {
+ return;
+ }
+ SkPath path;
+ path.setIsVolatile(true);
+ path.moveTo(pts[0]);
+ path.lineTo(pts[1]);
+ fDrawContext->drawPath(fClip, grPaint, *draw.fMatrix, path, style);
+ return;
+ }
+
+ SkScalar scales[2];
+ bool isHairline = (0 == width) || (1 == width && draw.fMatrix->getMinMaxScales(scales) &&
+ SkScalarNearlyEqual(scales[0], 1.f) &&
+ SkScalarNearlyEqual(scales[1], 1.f));
+ // we only handle non-antialiased hairlines and paints without path effects or mask filters,
+ // else we let the SkDraw call our drawPath()
+ if (!isHairline || paint.getPathEffect() || paint.getMaskFilter() ||
+ (paint.isAntiAlias() && needs_antialiasing(mode, count, pts))) {
+ draw.drawPoints(mode, count, pts, paint, true);
+ return;
+ }
+
+ GrPrimitiveType primitiveType = gPointMode2PrimitiveType[mode];
+
+ const SkMatrix* viewMatrix = draw.fMatrix;
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ // This offsetting in device space matches the expectations of the Android framework for non-AA
+ // points and lines.
+ SkMatrix tempMatrix;
+ if (GrIsPrimTypeLines(primitiveType) || kPoints_GrPrimitiveType == primitiveType) {
+ tempMatrix = *viewMatrix;
+ static const SkScalar kOffset = 0.063f; // Just greater than 1/16.
+ tempMatrix.postTranslate(kOffset, kOffset);
+ viewMatrix = &tempMatrix;
+ }
+#endif
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fDrawContext.get(), paint, *viewMatrix, &grPaint)) {
+ return;
+ }
+
+ fDrawContext->drawVertices(fClip,
+ grPaint,
+ *viewMatrix,
+ primitiveType,
+ SkToS32(count),
+ (SkPoint*)pts,
+ nullptr,
+ nullptr,
+ nullptr,
+ 0);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkGpuDevice::drawRect(const SkDraw& draw, const SkRect& rect, const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawRect", fContext);
+ CHECK_SHOULD_DRAW(draw);
+
+
+ // A couple reasons we might need to call drawPath.
+ if (paint.getMaskFilter() || paint.getPathEffect()) {
+ SkPath path;
+ path.setIsVolatile(true);
+ path.addRect(rect);
+ GrBlurUtils::drawPathWithMaskFilter(fContext, fDrawContext.get(),
+ fClip, path, paint,
+ *draw.fMatrix, nullptr,
+ draw.fRC->getBounds(), true);
+ return;
+ }
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fDrawContext.get(), paint, *draw.fMatrix, &grPaint)) {
+ return;
+ }
+
+ GrStyle style(paint);
+ fDrawContext->drawRect(fClip, grPaint, *draw.fMatrix, rect, &style);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkGpuDevice::drawRRect(const SkDraw& draw, const SkRRect& rrect,
+ const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawRRect", fContext);
+ CHECK_SHOULD_DRAW(draw);
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fDrawContext.get(), paint, *draw.fMatrix, &grPaint)) {
+ return;
+ }
+
+ GrStyle style(paint);
+ if (paint.getMaskFilter()) {
+ // try to hit the fast path for drawing filtered round rects
+
+ SkRRect devRRect;
+ if (rrect.transform(*draw.fMatrix, &devRRect)) {
+ if (devRRect.allCornersCircular()) {
+ SkRect maskRect;
+ if (paint.getMaskFilter()->canFilterMaskGPU(devRRect,
+ draw.fRC->getBounds(),
+ *draw.fMatrix,
+ &maskRect)) {
+ SkIRect finalIRect;
+ maskRect.roundOut(&finalIRect);
+ if (draw.fRC->quickReject(finalIRect)) {
+ // clipped out
+ return;
+ }
+ if (paint.getMaskFilter()->directFilterRRectMaskGPU(fContext,
+ fDrawContext.get(),
+ &grPaint,
+ fClip,
+ *draw.fMatrix,
+ style.strokeRec(),
+ rrect,
+ devRRect)) {
+ return;
+ }
+ }
+
+ }
+ }
+ }
+
+ if (paint.getMaskFilter() || style.pathEffect()) {
+ // The only mask filter the native rrect drawing code could've handle was taken
+ // care of above.
+ // A path effect will presumably transform this rrect into something else.
+ SkPath path;
+ path.setIsVolatile(true);
+ path.addRRect(rrect);
+ GrBlurUtils::drawPathWithMaskFilter(fContext, fDrawContext.get(),
+ fClip, path, paint,
+ *draw.fMatrix, nullptr,
+ draw.fRC->getBounds(), true);
+ return;
+ }
+
+ SkASSERT(!style.pathEffect());
+
+ fDrawContext->drawRRect(fClip, grPaint, *draw.fMatrix, rrect, style);
+}
+
+
+void SkGpuDevice::drawDRRect(const SkDraw& draw, const SkRRect& outer,
+ const SkRRect& inner, const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawDRRect", fContext);
+ CHECK_SHOULD_DRAW(draw);
+
+ if (outer.isEmpty()) {
+ return;
+ }
+
+ if (inner.isEmpty()) {
+ return this->drawRRect(draw, outer, paint);
+ }
+
+ SkStrokeRec stroke(paint);
+
+ if (stroke.isFillStyle() && !paint.getMaskFilter() && !paint.getPathEffect()) {
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fDrawContext.get(), paint, *draw.fMatrix,
+ &grPaint)) {
+ return;
+ }
+
+ fDrawContext->drawDRRect(fClip, grPaint, *draw.fMatrix, outer, inner);
+ return;
+ }
+
+ SkPath path;
+ path.setIsVolatile(true);
+ path.addRRect(outer);
+ path.addRRect(inner);
+ path.setFillType(SkPath::kEvenOdd_FillType);
+
+ GrBlurUtils::drawPathWithMaskFilter(fContext, fDrawContext.get(),
+ fClip, path, paint,
+ *draw.fMatrix, nullptr,
+ draw.fRC->getBounds(), true);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////
+
+void SkGpuDevice::drawRegion(const SkDraw& draw, const SkRegion& region, const SkPaint& paint) {
+ if (paint.getMaskFilter()) {
+ SkPath path;
+ region.getBoundaryPath(&path);
+ return this->drawPath(draw, path, paint, nullptr, false);
+ }
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fDrawContext.get(), paint, *draw.fMatrix, &grPaint)) {
+ return;
+ }
+
+ fDrawContext->drawRegion(fClip, grPaint, *draw.fMatrix, region, GrStyle(paint));
+}
+
+void SkGpuDevice::drawOval(const SkDraw& draw, const SkRect& oval, const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawOval", fContext);
+ CHECK_SHOULD_DRAW(draw);
+
+ // Presumably the path effect warps this to something other than an oval
+ if (paint.getPathEffect()) {
+ SkPath path;
+ path.setIsVolatile(true);
+ path.addOval(oval);
+ this->drawPath(draw, path, paint, nullptr, true);
+ return;
+ }
+
+ if (paint.getMaskFilter()) {
+ // The RRect path can handle special case blurring
+ SkRRect rr = SkRRect::MakeOval(oval);
+ return this->drawRRect(draw, rr, paint);
+ }
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fDrawContext.get(), paint, *draw.fMatrix, &grPaint)) {
+ return;
+ }
+
+ fDrawContext->drawOval(fClip, grPaint, *draw.fMatrix, oval, GrStyle(paint));
+}
+
+void SkGpuDevice::drawArc(const SkDraw& draw, const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter, const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawArc", fContext);
+ CHECK_SHOULD_DRAW(draw);
+
+ if (paint.getMaskFilter()) {
+ this->INHERITED::drawArc(draw, oval, startAngle, sweepAngle, useCenter, paint);
+ return;
+ }
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fDrawContext.get(), paint, *draw.fMatrix, &grPaint)) {
+ return;
+ }
+
+ fDrawContext->drawArc(fClip, grPaint, *draw.fMatrix, oval, startAngle, sweepAngle, useCenter,
+ GrStyle(paint));
+}
+
+#include "SkMaskFilter.h"
+
+///////////////////////////////////////////////////////////////////////////////
+void SkGpuDevice::drawStrokedLine(const SkPoint points[2],
+ const SkDraw& draw,
+ const SkPaint& origPaint) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawStrokedLine", fContext);
+ CHECK_SHOULD_DRAW(draw);
+
+ // Adding support for round capping would require a GrDrawContext::fillRRectWithLocalMatrix
+ // entry point
+ SkASSERT(SkPaint::kRound_Cap != origPaint.getStrokeCap());
+ SkASSERT(SkPaint::kStroke_Style == origPaint.getStyle());
+ SkASSERT(!origPaint.getPathEffect());
+ SkASSERT(!origPaint.getMaskFilter());
+
+ const SkScalar halfWidth = 0.5f * origPaint.getStrokeWidth();
+ SkASSERT(halfWidth > 0);
+
+ SkVector v = points[1] - points[0];
+
+ SkScalar length = SkPoint::Normalize(&v);
+ if (!length) {
+ v.fX = 1.0f;
+ v.fY = 0.0f;
+ }
+
+ SkPaint newPaint(origPaint);
+ newPaint.setStyle(SkPaint::kFill_Style);
+
+ SkScalar xtraLength = 0.0f;
+ if (SkPaint::kButt_Cap != origPaint.getStrokeCap()) {
+ xtraLength = halfWidth;
+ }
+
+ SkPoint mid = points[0] + points[1];
+ mid.scale(0.5f);
+
+ SkRect rect = SkRect::MakeLTRB(mid.fX-halfWidth, mid.fY - 0.5f*length - xtraLength,
+ mid.fX+halfWidth, mid.fY + 0.5f*length + xtraLength);
+ SkMatrix m;
+ m.setSinCos(v.fX, -v.fY, mid.fX, mid.fY);
+
+ SkMatrix local = m;
+
+ m.postConcat(*draw.fMatrix);
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fDrawContext.get(), newPaint, m, &grPaint)) {
+ return;
+ }
+
+ fDrawContext->fillRectWithLocalMatrix(fClip, grPaint, m, rect, local);
+}
+
+void SkGpuDevice::drawPath(const SkDraw& draw, const SkPath& origSrcPath,
+ const SkPaint& paint, const SkMatrix* prePathMatrix,
+ bool pathIsMutable) {
+ ASSERT_SINGLE_OWNER
+ if (!origSrcPath.isInverseFillType() && !paint.getPathEffect() && !prePathMatrix) {
+ SkPoint points[2];
+ if (SkPaint::kStroke_Style == paint.getStyle() && paint.getStrokeWidth() > 0 &&
+ !paint.getMaskFilter() && SkPaint::kRound_Cap != paint.getStrokeCap() &&
+ draw.fMatrix->preservesRightAngles() && origSrcPath.isLine(points)) {
+ // Path-based stroking looks better for thin rects
+ SkScalar strokeWidth = draw.fMatrix->getMaxScale() * paint.getStrokeWidth();
+ if (strokeWidth >= 1.0f) {
+ // Round capping support is currently disabled b.c. it would require
+ // a RRect batch that takes a localMatrix.
+ this->drawStrokedLine(points, draw, paint);
+ return;
+ }
+ }
+ bool isClosed;
+ SkRect rect;
+ if (origSrcPath.isRect(&rect, &isClosed) && isClosed) {
+ this->drawRect(draw, rect, paint);
+ return;
+ }
+ if (origSrcPath.isOval(&rect)) {
+ this->drawOval(draw, rect, paint);
+ return;
+ }
+ SkRRect rrect;
+ if (origSrcPath.isRRect(&rrect)) {
+ this->drawRRect(draw, rrect, paint);
+ return;
+ }
+ }
+
+ CHECK_SHOULD_DRAW(draw);
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawPath", fContext);
+
+ GrBlurUtils::drawPathWithMaskFilter(fContext, fDrawContext.get(),
+ fClip, origSrcPath, paint,
+ *draw.fMatrix, prePathMatrix,
+ draw.fRC->getBounds(), pathIsMutable);
+}
+
+static const int kBmpSmallTileSize = 1 << 10;
+
+static inline int get_tile_count(const SkIRect& srcRect, int tileSize) {
+ int tilesX = (srcRect.fRight / tileSize) - (srcRect.fLeft / tileSize) + 1;
+ int tilesY = (srcRect.fBottom / tileSize) - (srcRect.fTop / tileSize) + 1;
+ return tilesX * tilesY;
+}
+
+static int determine_tile_size(const SkIRect& src, int maxTileSize) {
+ if (maxTileSize <= kBmpSmallTileSize) {
+ return maxTileSize;
+ }
+
+ size_t maxTileTotalTileSize = get_tile_count(src, maxTileSize);
+ size_t smallTotalTileSize = get_tile_count(src, kBmpSmallTileSize);
+
+ maxTileTotalTileSize *= maxTileSize * maxTileSize;
+ smallTotalTileSize *= kBmpSmallTileSize * kBmpSmallTileSize;
+
+ if (maxTileTotalTileSize > 2 * smallTotalTileSize) {
+ return kBmpSmallTileSize;
+ } else {
+ return maxTileSize;
+ }
+}
+
+// Given a bitmap, an optional src rect, and a context with a clip and matrix determine what
+// pixels from the bitmap are necessary.
+static void determine_clipped_src_rect(int width, int height,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& srcToDstRect,
+ const SkISize& imageSize,
+ const SkRect* srcRectPtr,
+ SkIRect* clippedSrcIRect) {
+ clip.getConservativeBounds(width, height, clippedSrcIRect, nullptr);
+ SkMatrix inv = SkMatrix::Concat(viewMatrix, srcToDstRect);
+ if (!inv.invert(&inv)) {
+ clippedSrcIRect->setEmpty();
+ return;
+ }
+ SkRect clippedSrcRect = SkRect::Make(*clippedSrcIRect);
+ inv.mapRect(&clippedSrcRect);
+ if (srcRectPtr) {
+ if (!clippedSrcRect.intersect(*srcRectPtr)) {
+ clippedSrcIRect->setEmpty();
+ return;
+ }
+ }
+ clippedSrcRect.roundOut(clippedSrcIRect);
+ SkIRect bmpBounds = SkIRect::MakeSize(imageSize);
+ if (!clippedSrcIRect->intersect(bmpBounds)) {
+ clippedSrcIRect->setEmpty();
+ }
+}
+
+bool SkGpuDevice::shouldTileImageID(uint32_t imageID, const SkIRect& imageRect,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& srcToDstRect,
+ const GrTextureParams& params,
+ const SkRect* srcRectPtr,
+ int maxTileSize,
+ int* tileSize,
+ SkIRect* clippedSubset) const {
+ ASSERT_SINGLE_OWNER
+ // if it's larger than the max tile size, then we have no choice but tiling.
+ if (imageRect.width() > maxTileSize || imageRect.height() > maxTileSize) {
+ determine_clipped_src_rect(fDrawContext->width(), fDrawContext->height(), fClip, viewMatrix,
+ srcToDstRect, imageRect.size(), srcRectPtr, clippedSubset);
+ *tileSize = determine_tile_size(*clippedSubset, maxTileSize);
+ return true;
+ }
+
+ // If the image would only produce 4 tiles of the smaller size, don't bother tiling it.
+ const size_t area = imageRect.width() * imageRect.height();
+ if (area < 4 * kBmpSmallTileSize * kBmpSmallTileSize) {
+ return false;
+ }
+
+ // At this point we know we could do the draw by uploading the entire bitmap
+ // as a texture. However, if the texture would be large compared to the
+ // cache size and we don't require most of it for this draw then tile to
+ // reduce the amount of upload and cache spill.
+
+ // assumption here is that sw bitmap size is a good proxy for its size as
+ // a texture
+ size_t bmpSize = area * sizeof(SkPMColor); // assume 32bit pixels
+ size_t cacheSize;
+ fContext->getResourceCacheLimits(nullptr, &cacheSize);
+ if (bmpSize < cacheSize / 2) {
+ return false;
+ }
+
+ // Figure out how much of the src we will need based on the src rect and clipping. Reject if
+ // tiling memory savings would be < 50%.
+ determine_clipped_src_rect(fDrawContext->width(), fDrawContext->height(), fClip, viewMatrix,
+ srcToDstRect, imageRect.size(), srcRectPtr, clippedSubset);
+ *tileSize = kBmpSmallTileSize; // already know whole bitmap fits in one max sized tile.
+ size_t usedTileBytes = get_tile_count(*clippedSubset, kBmpSmallTileSize) *
+ kBmpSmallTileSize * kBmpSmallTileSize;
+
+ return usedTileBytes < 2 * bmpSize;
+}
+
+bool SkGpuDevice::shouldTileImage(const SkImage* image, const SkRect* srcRectPtr,
+ SkCanvas::SrcRectConstraint constraint, SkFilterQuality quality,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& srcToDstRect) const {
+ ASSERT_SINGLE_OWNER
+ // if image is explictly texture backed then just use the texture
+ if (as_IB(image)->peekTexture()) {
+ return false;
+ }
+
+ GrTextureParams params;
+ bool doBicubic;
+ GrTextureParams::FilterMode textureFilterMode =
+ GrSkFilterQualityToGrFilterMode(quality, viewMatrix, srcToDstRect, &doBicubic);
+
+ int tileFilterPad;
+ if (doBicubic) {
+ tileFilterPad = GrBicubicEffect::kFilterTexelPad;
+ } else if (GrTextureParams::kNone_FilterMode == textureFilterMode) {
+ tileFilterPad = 0;
+ } else {
+ tileFilterPad = 1;
+ }
+ params.setFilterMode(textureFilterMode);
+
+ int maxTileSize = fContext->caps()->maxTileSize() - 2 * tileFilterPad;
+
+ // these are output, which we safely ignore, as we just want to know the predicate
+ int outTileSize;
+ SkIRect outClippedSrcRect;
+
+ return this->shouldTileImageID(image->unique(), image->bounds(), viewMatrix, srcToDstRect,
+ params, srcRectPtr, maxTileSize, &outTileSize,
+ &outClippedSrcRect);
+}
+
+void SkGpuDevice::drawBitmap(const SkDraw& origDraw,
+ const SkBitmap& bitmap,
+ const SkMatrix& m,
+ const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ CHECK_SHOULD_DRAW(origDraw);
+ SkMatrix viewMatrix;
+ viewMatrix.setConcat(*origDraw.fMatrix, m);
+
+ int maxTileSize = fContext->caps()->maxTileSize();
+
+ // The tile code path doesn't currently support AA, so if the paint asked for aa and we could
+ // draw untiled, then we bypass checking for tiling purely for optimization reasons.
+ bool drawAA = !fDrawContext->isUnifiedMultisampled() &&
+ paint.isAntiAlias() &&
+ bitmap.width() <= maxTileSize &&
+ bitmap.height() <= maxTileSize;
+
+ bool skipTileCheck = drawAA || paint.getMaskFilter();
+
+ if (!skipTileCheck) {
+ SkRect srcRect = SkRect::MakeIWH(bitmap.width(), bitmap.height());
+ int tileSize;
+ SkIRect clippedSrcRect;
+
+ GrTextureParams params;
+ bool doBicubic;
+ GrTextureParams::FilterMode textureFilterMode =
+ GrSkFilterQualityToGrFilterMode(paint.getFilterQuality(), viewMatrix, SkMatrix::I(),
+ &doBicubic);
+
+ int tileFilterPad;
+
+ if (doBicubic) {
+ tileFilterPad = GrBicubicEffect::kFilterTexelPad;
+ } else if (GrTextureParams::kNone_FilterMode == textureFilterMode) {
+ tileFilterPad = 0;
+ } else {
+ tileFilterPad = 1;
+ }
+ params.setFilterMode(textureFilterMode);
+
+ int maxTileSizeForFilter = fContext->caps()->maxTileSize() - 2 * tileFilterPad;
+ if (this->shouldTileImageID(bitmap.getGenerationID(), bitmap.getSubset(), viewMatrix,
+ SkMatrix::I(), params, &srcRect, maxTileSizeForFilter,
+ &tileSize, &clippedSrcRect)) {
+ this->drawTiledBitmap(bitmap, viewMatrix, SkMatrix::I(), srcRect, clippedSrcRect,
+ params, paint, SkCanvas::kStrict_SrcRectConstraint, tileSize,
+ doBicubic);
+ return;
+ }
+ }
+ GrBitmapTextureMaker maker(fContext, bitmap);
+ this->drawTextureProducer(&maker, nullptr, nullptr, SkCanvas::kStrict_SrcRectConstraint,
+ viewMatrix, fClip, paint);
+}
+
+// This method outsets 'iRect' by 'outset' all around and then clamps its extents to
+// 'clamp'. 'offset' is adjusted to remain positioned over the top-left corner
+// of 'iRect' for all possible outsets/clamps.
+static inline void clamped_outset_with_offset(SkIRect* iRect,
+ int outset,
+ SkPoint* offset,
+ const SkIRect& clamp) {
+ iRect->outset(outset, outset);
+
+ int leftClampDelta = clamp.fLeft - iRect->fLeft;
+ if (leftClampDelta > 0) {
+ offset->fX -= outset - leftClampDelta;
+ iRect->fLeft = clamp.fLeft;
+ } else {
+ offset->fX -= outset;
+ }
+
+ int topClampDelta = clamp.fTop - iRect->fTop;
+ if (topClampDelta > 0) {
+ offset->fY -= outset - topClampDelta;
+ iRect->fTop = clamp.fTop;
+ } else {
+ offset->fY -= outset;
+ }
+
+ if (iRect->fRight > clamp.fRight) {
+ iRect->fRight = clamp.fRight;
+ }
+ if (iRect->fBottom > clamp.fBottom) {
+ iRect->fBottom = clamp.fBottom;
+ }
+}
+
+// Break 'bitmap' into several tiles to draw it since it has already
+// been determined to be too large to fit in VRAM
+void SkGpuDevice::drawTiledBitmap(const SkBitmap& bitmap,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& dstMatrix,
+ const SkRect& srcRect,
+ const SkIRect& clippedSrcIRect,
+ const GrTextureParams& params,
+ const SkPaint& origPaint,
+ SkCanvas::SrcRectConstraint constraint,
+ int tileSize,
+ bool bicubic) {
+ ASSERT_SINGLE_OWNER
+
+ // This is the funnel for all paths that draw tiled bitmaps/images. Log histogram entries.
+ SK_HISTOGRAM_BOOLEAN("DrawTiled", true);
+ LogDrawScaleFactor(viewMatrix, origPaint.getFilterQuality());
+
+ // The following pixel lock is technically redundant, but it is desirable
+ // to lock outside of the tile loop to prevent redecoding the whole image
+ // at each tile in cases where 'bitmap' holds an SkDiscardablePixelRef that
+ // is larger than the limit of the discardable memory pool.
+ SkAutoLockPixels alp(bitmap);
+
+ const SkPaint* paint = &origPaint;
+ SkPaint tempPaint;
+ if (origPaint.isAntiAlias() && !fDrawContext->isUnifiedMultisampled()) {
+ // Drop antialiasing to avoid seams at tile boundaries.
+ tempPaint = origPaint;
+ tempPaint.setAntiAlias(false);
+ paint = &tempPaint;
+ }
+ SkRect clippedSrcRect = SkRect::Make(clippedSrcIRect);
+
+ int nx = bitmap.width() / tileSize;
+ int ny = bitmap.height() / tileSize;
+ for (int x = 0; x <= nx; x++) {
+ for (int y = 0; y <= ny; y++) {
+ SkRect tileR;
+ tileR.set(SkIntToScalar(x * tileSize),
+ SkIntToScalar(y * tileSize),
+ SkIntToScalar((x + 1) * tileSize),
+ SkIntToScalar((y + 1) * tileSize));
+
+ if (!SkRect::Intersects(tileR, clippedSrcRect)) {
+ continue;
+ }
+
+ if (!tileR.intersect(srcRect)) {
+ continue;
+ }
+
+ SkIRect iTileR;
+ tileR.roundOut(&iTileR);
+ SkVector offset = SkPoint::Make(SkIntToScalar(iTileR.fLeft),
+ SkIntToScalar(iTileR.fTop));
+ SkRect rectToDraw = SkRect::MakeXYWH(offset.fX, offset.fY,
+ tileR.width(), tileR.height());
+ dstMatrix.mapRect(&rectToDraw);
+ if (GrTextureParams::kNone_FilterMode != params.filterMode() || bicubic) {
+ SkIRect iClampRect;
+
+ if (SkCanvas::kFast_SrcRectConstraint == constraint) {
+ // In bleed mode we want to always expand the tile on all edges
+ // but stay within the bitmap bounds
+ iClampRect = SkIRect::MakeWH(bitmap.width(), bitmap.height());
+ } else {
+ // In texture-domain/clamp mode we only want to expand the
+ // tile on edges interior to "srcRect" (i.e., we want to
+ // not bleed across the original clamped edges)
+ srcRect.roundOut(&iClampRect);
+ }
+ int outset = bicubic ? GrBicubicEffect::kFilterTexelPad : 1;
+ clamped_outset_with_offset(&iTileR, outset, &offset, iClampRect);
+ }
+
+ SkBitmap tmpB;
+ if (bitmap.extractSubset(&tmpB, iTileR)) {
+ // now offset it to make it "local" to our tmp bitmap
+ tileR.offset(-offset.fX, -offset.fY);
+ GrTextureParams paramsTemp = params;
+ // de-optimized this determination
+ bool needsTextureDomain = true;
+ this->drawBitmapTile(tmpB,
+ viewMatrix,
+ rectToDraw,
+ tileR,
+ paramsTemp,
+ *paint,
+ constraint,
+ bicubic,
+ needsTextureDomain);
+ }
+ }
+ }
+}
+
+void SkGpuDevice::drawBitmapTile(const SkBitmap& bitmap,
+ const SkMatrix& viewMatrix,
+ const SkRect& dstRect,
+ const SkRect& srcRect,
+ const GrTextureParams& params,
+ const SkPaint& paint,
+ SkCanvas::SrcRectConstraint constraint,
+ bool bicubic,
+ bool needsTextureDomain) {
+ // We should have already handled bitmaps larger than the max texture size.
+ SkASSERT(bitmap.width() <= fContext->caps()->maxTextureSize() &&
+ bitmap.height() <= fContext->caps()->maxTextureSize());
+ // We should be respecting the max tile size by the time we get here.
+ SkASSERT(bitmap.width() <= fContext->caps()->maxTileSize() &&
+ bitmap.height() <= fContext->caps()->maxTileSize());
+
+ sk_sp<GrTexture> texture = GrMakeCachedBitmapTexture(fContext, bitmap, params,
+ fDrawContext->sourceGammaTreatment());
+ if (nullptr == texture) {
+ return;
+ }
+ sk_sp<GrColorSpaceXform> colorSpaceXform =
+ GrColorSpaceXform::Make(bitmap.colorSpace(), fDrawContext->getColorSpace());
+
+ SkScalar iw = 1.f / texture->width();
+ SkScalar ih = 1.f / texture->height();
+
+ SkMatrix texMatrix;
+ // Compute a matrix that maps the rect we will draw to the src rect.
+ texMatrix.setRectToRect(dstRect, srcRect, SkMatrix::kFill_ScaleToFit);
+ texMatrix.postScale(iw, ih);
+
+ // Construct a GrPaint by setting the bitmap texture as the first effect and then configuring
+ // the rest from the SkPaint.
+ sk_sp<GrFragmentProcessor> fp;
+
+ if (needsTextureDomain && (SkCanvas::kStrict_SrcRectConstraint == constraint)) {
+ // Use a constrained texture domain to avoid color bleeding
+ SkRect domain;
+ if (srcRect.width() > SK_Scalar1) {
+ domain.fLeft = (srcRect.fLeft + 0.5f) * iw;
+ domain.fRight = (srcRect.fRight - 0.5f) * iw;
+ } else {
+ domain.fLeft = domain.fRight = srcRect.centerX() * iw;
+ }
+ if (srcRect.height() > SK_Scalar1) {
+ domain.fTop = (srcRect.fTop + 0.5f) * ih;
+ domain.fBottom = (srcRect.fBottom - 0.5f) * ih;
+ } else {
+ domain.fTop = domain.fBottom = srcRect.centerY() * ih;
+ }
+ if (bicubic) {
+ fp = GrBicubicEffect::Make(texture.get(), std::move(colorSpaceXform), texMatrix,
+ domain);
+ } else {
+ fp = GrTextureDomainEffect::Make(texture.get(), std::move(colorSpaceXform), texMatrix,
+ domain, GrTextureDomain::kClamp_Mode,
+ params.filterMode());
+ }
+ } else if (bicubic) {
+ SkASSERT(GrTextureParams::kNone_FilterMode == params.filterMode());
+ SkShader::TileMode tileModes[2] = { params.getTileModeX(), params.getTileModeY() };
+ fp = GrBicubicEffect::Make(texture.get(), std::move(colorSpaceXform), texMatrix, tileModes);
+ } else {
+ fp = GrSimpleTextureEffect::Make(texture.get(), std::move(colorSpaceXform), texMatrix, params);
+ }
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaintWithTexture(this->context(), fDrawContext.get(), paint, viewMatrix,
+ std::move(fp), kAlpha_8_SkColorType == bitmap.colorType(),
+ &grPaint)) {
+ return;
+ }
+
+ fDrawContext->drawRect(fClip, grPaint, viewMatrix, dstRect);
+}
+
+void SkGpuDevice::drawSprite(const SkDraw& draw, const SkBitmap& bitmap,
+ int left, int top, const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ CHECK_SHOULD_DRAW(draw);
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawSprite", fContext);
+
+ if (fContext->abandoned()) {
+ return;
+ }
+
+ sk_sp<GrTexture> texture;
+ {
+ SkAutoLockPixels alp(bitmap, true);
+ if (!bitmap.readyToDraw()) {
+ return;
+ }
+
+ // draw sprite neither filters nor tiles.
+ texture.reset(GrRefCachedBitmapTexture(fContext, bitmap,
+ GrTextureParams::ClampNoFilter(),
+ SkSourceGammaTreatment::kRespect));
+ if (!texture) {
+ return;
+ }
+ }
+
+ SkIRect srcRect = SkIRect::MakeXYWH(bitmap.pixelRefOrigin().fX,
+ bitmap.pixelRefOrigin().fY,
+ bitmap.width(),
+ bitmap.height());
+
+ sk_sp<SkSpecialImage> srcImg(SkSpecialImage::MakeFromGpu(srcRect,
+ bitmap.getGenerationID(),
+ std::move(texture),
+ sk_ref_sp(bitmap.colorSpace()),
+ &this->surfaceProps()));
+
+ this->drawSpecial(draw, srcImg.get(), left, top, paint);
+}
+
+
+void SkGpuDevice::drawSpecial(const SkDraw& draw,
+ SkSpecialImage* special1,
+ int left, int top,
+ const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ CHECK_SHOULD_DRAW(draw);
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawSpecial", fContext);
+
+ SkIPoint offset = { 0, 0 };
+
+ sk_sp<SkSpecialImage> result;
+ if (paint.getImageFilter()) {
+ result = this->filterTexture(draw, special1, left, top,
+ &offset,
+ paint.getImageFilter());
+ if (!result) {
+ return;
+ }
+ } else {
+ result = sk_ref_sp(special1);
+ }
+
+ SkASSERT(result->isTextureBacked());
+ sk_sp<GrTexture> texture = result->asTextureRef(fContext);
+
+ SkPaint tmpUnfiltered(paint);
+ tmpUnfiltered.setImageFilter(nullptr);
+
+ sk_sp<GrColorSpaceXform> colorSpaceXform =
+ GrColorSpaceXform::Make(result->getColorSpace(), fDrawContext->getColorSpace());
+ GrPaint grPaint;
+ sk_sp<GrFragmentProcessor> fp(GrSimpleTextureEffect::Make(texture.get(),
+ std::move(colorSpaceXform),
+ SkMatrix::I()));
+ if (GrPixelConfigIsAlphaOnly(texture->config())) {
+ fp = GrFragmentProcessor::MulOutputByInputUnpremulColor(std::move(fp));
+ } else {
+ fp = GrFragmentProcessor::MulOutputByInputAlpha(std::move(fp));
+ }
+ if (!SkPaintToGrPaintReplaceShader(this->context(), fDrawContext.get(), tmpUnfiltered,
+ std::move(fp), &grPaint)) {
+ return;
+ }
+
+ const SkIRect& subset = result->subset();
+
+ fDrawContext->fillRectToRect(fClip,
+ grPaint,
+ SkMatrix::I(),
+ SkRect::Make(SkIRect::MakeXYWH(left + offset.fX, top + offset.fY,
+ subset.width(), subset.height())),
+ SkRect::MakeXYWH(SkIntToScalar(subset.fLeft) / texture->width(),
+ SkIntToScalar(subset.fTop) / texture->height(),
+ SkIntToScalar(subset.width()) / texture->width(),
+ SkIntToScalar(subset.height()) / texture->height()));
+}
+
+void SkGpuDevice::drawBitmapRect(const SkDraw& draw, const SkBitmap& bitmap,
+ const SkRect* src, const SkRect& origDst,
+ const SkPaint& paint, SkCanvas::SrcRectConstraint constraint) {
+ ASSERT_SINGLE_OWNER
+ CHECK_SHOULD_DRAW(draw);
+
+ // The src rect is inferred to be the bmp bounds if not provided. Otherwise, the src rect must
+ // be clipped to the bmp bounds. To determine tiling parameters we need the filter mode which
+ // in turn requires knowing the src-to-dst mapping. If the src was clipped to the bmp bounds
+ // then we use the src-to-dst mapping to compute a new clipped dst rect.
+ const SkRect* dst = &origDst;
+ const SkRect bmpBounds = SkRect::MakeIWH(bitmap.width(), bitmap.height());
+ // Compute matrix from the two rectangles
+ if (!src) {
+ src = &bmpBounds;
+ }
+
+ SkMatrix srcToDstMatrix;
+ if (!srcToDstMatrix.setRectToRect(*src, *dst, SkMatrix::kFill_ScaleToFit)) {
+ return;
+ }
+ SkRect tmpSrc, tmpDst;
+ if (src != &bmpBounds) {
+ if (!bmpBounds.contains(*src)) {
+ tmpSrc = *src;
+ if (!tmpSrc.intersect(bmpBounds)) {
+ return; // nothing to draw
+ }
+ src = &tmpSrc;
+ srcToDstMatrix.mapRect(&tmpDst, *src);
+ dst = &tmpDst;
+ }
+ }
+
+ int maxTileSize = fContext->caps()->maxTileSize();
+
+ // The tile code path doesn't currently support AA, so if the paint asked for aa and we could
+ // draw untiled, then we bypass checking for tiling purely for optimization reasons.
+ bool drawAA = !fDrawContext->isUnifiedMultisampled() &&
+ paint.isAntiAlias() &&
+ bitmap.width() <= maxTileSize &&
+ bitmap.height() <= maxTileSize;
+
+ bool skipTileCheck = drawAA || paint.getMaskFilter();
+
+ if (!skipTileCheck) {
+ int tileSize;
+ SkIRect clippedSrcRect;
+
+ GrTextureParams params;
+ bool doBicubic;
+ GrTextureParams::FilterMode textureFilterMode =
+ GrSkFilterQualityToGrFilterMode(paint.getFilterQuality(), *draw.fMatrix, srcToDstMatrix,
+ &doBicubic);
+
+ int tileFilterPad;
+
+ if (doBicubic) {
+ tileFilterPad = GrBicubicEffect::kFilterTexelPad;
+ } else if (GrTextureParams::kNone_FilterMode == textureFilterMode) {
+ tileFilterPad = 0;
+ } else {
+ tileFilterPad = 1;
+ }
+ params.setFilterMode(textureFilterMode);
+
+ int maxTileSizeForFilter = fContext->caps()->maxTileSize() - 2 * tileFilterPad;
+ if (this->shouldTileImageID(bitmap.getGenerationID(), bitmap.getSubset(), *draw.fMatrix,
+ srcToDstMatrix, params, src, maxTileSizeForFilter, &tileSize,
+ &clippedSrcRect)) {
+ this->drawTiledBitmap(bitmap, *draw.fMatrix, srcToDstMatrix, *src, clippedSrcRect,
+ params, paint, constraint, tileSize, doBicubic);
+ return;
+ }
+ }
+ GrBitmapTextureMaker maker(fContext, bitmap);
+ this->drawTextureProducer(&maker, src, dst, constraint, *draw.fMatrix, fClip, paint);
+}
+
+sk_sp<SkSpecialImage> SkGpuDevice::makeSpecial(const SkBitmap& bitmap) {
+ SkAutoLockPixels alp(bitmap, true);
+ if (!bitmap.readyToDraw()) {
+ return nullptr;
+ }
+
+ sk_sp<GrTexture> texture = GrMakeCachedBitmapTexture(fContext, bitmap,
+ GrTextureParams::ClampNoFilter(),
+ SkSourceGammaTreatment::kRespect);
+ if (!texture) {
+ return nullptr;
+ }
+
+ return SkSpecialImage::MakeFromGpu(bitmap.bounds(),
+ bitmap.getGenerationID(),
+ texture,
+ sk_ref_sp(bitmap.colorSpace()),
+ &this->surfaceProps());
+}
+
+sk_sp<SkSpecialImage> SkGpuDevice::makeSpecial(const SkImage* image) {
+ SkPixmap pm;
+ if (image->isTextureBacked()) {
+ GrTexture* texture = as_IB(image)->peekTexture();
+
+ return SkSpecialImage::MakeFromGpu(SkIRect::MakeWH(image->width(), image->height()),
+ image->uniqueID(),
+ sk_ref_sp(texture),
+ sk_ref_sp(as_IB(image)->onImageInfo().colorSpace()),
+ &this->surfaceProps());
+ } else if (image->peekPixels(&pm)) {
+ SkBitmap bm;
+
+ bm.installPixels(pm);
+ return this->makeSpecial(bm);
+ } else {
+ return nullptr;
+ }
+}
+
+sk_sp<SkSpecialImage> SkGpuDevice::snapSpecial() {
+ sk_sp<GrTexture> texture(this->accessDrawContext()->asTexture());
+ if (!texture) {
+ // When the device doesn't have a texture, we create a temporary texture.
+ // TODO: we should actually only copy the portion of the source needed to apply the image
+ // filter
+ texture.reset(fContext->textureProvider()->createTexture(this->accessDrawContext()->desc(),
+ SkBudgeted::kYes));
+ if (!texture) {
+ return nullptr;
+ }
+
+ if (!fContext->copySurface(texture.get(), this->accessDrawContext()->accessRenderTarget())){
+ return nullptr;
+ }
+ }
+
+ const SkImageInfo ii = this->imageInfo();
+ const SkIRect srcRect = SkIRect::MakeWH(ii.width(), ii.height());
+
+ return SkSpecialImage::MakeFromGpu(srcRect,
+ kNeedNewImageUniqueID_SpecialImage,
+ std::move(texture),
+ sk_ref_sp(ii.colorSpace()),
+ &this->surfaceProps());
+}
+
+void SkGpuDevice::drawDevice(const SkDraw& draw, SkBaseDevice* device,
+ int left, int top, const SkPaint& paint) {
+ SkASSERT(!paint.getImageFilter());
+
+ ASSERT_SINGLE_OWNER
+ // clear of the source device must occur before CHECK_SHOULD_DRAW
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawDevice", fContext);
+
+ // drawDevice is defined to be in device coords.
+ CHECK_SHOULD_DRAW(draw);
+
+ SkGpuDevice* dev = static_cast<SkGpuDevice*>(device);
+ sk_sp<SkSpecialImage> srcImg(dev->snapSpecial());
+ if (!srcImg) {
+ return;
+ }
+
+ this->drawSpecial(draw, srcImg.get(), left, top, paint);
+}
+
+void SkGpuDevice::drawImage(const SkDraw& draw, const SkImage* image, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ SkMatrix viewMatrix = *draw.fMatrix;
+ viewMatrix.preTranslate(x, y);
+ uint32_t pinnedUniqueID;
+ if (sk_sp<GrTexture> tex = as_IB(image)->refPinnedTexture(&pinnedUniqueID)) {
+ CHECK_SHOULD_DRAW(draw);
+ GrTextureAdjuster adjuster(tex.get(), image->alphaType(), image->bounds(), pinnedUniqueID,
+ as_IB(image)->onImageInfo().colorSpace());
+ this->drawTextureProducer(&adjuster, nullptr, nullptr, SkCanvas::kFast_SrcRectConstraint,
+ viewMatrix, fClip, paint);
+ return;
+ } else {
+ SkBitmap bm;
+ if (this->shouldTileImage(image, nullptr, SkCanvas::kFast_SrcRectConstraint,
+ paint.getFilterQuality(), *draw.fMatrix, SkMatrix::I())) {
+ // only support tiling as bitmap at the moment, so force raster-version
+ if (!as_IB(image)->getROPixels(&bm)) {
+ return;
+ }
+ this->drawBitmap(draw, bm, SkMatrix::MakeTrans(x, y), paint);
+ } else if (SkImageCacherator* cacher = as_IB(image)->peekCacherator()) {
+ CHECK_SHOULD_DRAW(draw);
+ GrImageTextureMaker maker(fContext, cacher, image, SkImage::kAllow_CachingHint);
+ this->drawTextureProducer(&maker, nullptr, nullptr, SkCanvas::kFast_SrcRectConstraint,
+ viewMatrix, fClip, paint);
+ } else if (as_IB(image)->getROPixels(&bm)) {
+ this->drawBitmap(draw, bm, SkMatrix::MakeTrans(x, y), paint);
+ }
+ }
+}
+
+void SkGpuDevice::drawImageRect(const SkDraw& draw, const SkImage* image, const SkRect* src,
+ const SkRect& dst, const SkPaint& paint,
+ SkCanvas::SrcRectConstraint constraint) {
+ ASSERT_SINGLE_OWNER
+ uint32_t pinnedUniqueID;
+ if (sk_sp<GrTexture> tex = as_IB(image)->refPinnedTexture(&pinnedUniqueID)) {
+ CHECK_SHOULD_DRAW(draw);
+ GrTextureAdjuster adjuster(tex.get(), image->alphaType(), image->bounds(), pinnedUniqueID,
+ as_IB(image)->onImageInfo().colorSpace());
+ this->drawTextureProducer(&adjuster, src, &dst, constraint, *draw.fMatrix, fClip, paint);
+ return;
+ }
+ SkBitmap bm;
+ SkMatrix srcToDstRect;
+ srcToDstRect.setRectToRect((src ? *src : SkRect::MakeIWH(image->width(), image->height())),
+ dst, SkMatrix::kFill_ScaleToFit);
+ if (this->shouldTileImage(image, src, constraint, paint.getFilterQuality(), *draw.fMatrix,
+ srcToDstRect)) {
+ // only support tiling as bitmap at the moment, so force raster-version
+ if (!as_IB(image)->getROPixels(&bm)) {
+ return;
+ }
+ this->drawBitmapRect(draw, bm, src, dst, paint, constraint);
+ } else if (SkImageCacherator* cacher = as_IB(image)->peekCacherator()) {
+ CHECK_SHOULD_DRAW(draw);
+ GrImageTextureMaker maker(fContext, cacher, image, SkImage::kAllow_CachingHint);
+ this->drawTextureProducer(&maker, src, &dst, constraint, *draw.fMatrix, fClip, paint);
+ } else if (as_IB(image)->getROPixels(&bm)) {
+ this->drawBitmapRect(draw, bm, src, dst, paint, constraint);
+ }
+}
+
+void SkGpuDevice::drawProducerNine(const SkDraw& draw, GrTextureProducer* producer,
+ const SkIRect& center, const SkRect& dst, const SkPaint& paint) {
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawProducerNine", fContext);
+
+ CHECK_SHOULD_DRAW(draw);
+
+ bool useFallback = paint.getMaskFilter() || paint.isAntiAlias() ||
+ fDrawContext->isUnifiedMultisampled();
+ bool doBicubic;
+ GrTextureParams::FilterMode textureFilterMode =
+ GrSkFilterQualityToGrFilterMode(paint.getFilterQuality(), *draw.fMatrix, SkMatrix::I(),
+ &doBicubic);
+ if (useFallback || doBicubic || GrTextureParams::kNone_FilterMode != textureFilterMode) {
+ SkLatticeIter iter(producer->width(), producer->height(), center, dst);
+
+ SkRect srcR, dstR;
+ while (iter.next(&srcR, &dstR)) {
+ this->drawTextureProducer(producer, &srcR, &dstR, SkCanvas::kStrict_SrcRectConstraint,
+ *draw.fMatrix, fClip, paint);
+ }
+ return;
+ }
+
+ static const GrTextureParams::FilterMode kMode = GrTextureParams::kNone_FilterMode;
+ sk_sp<GrFragmentProcessor> fp(
+ producer->createFragmentProcessor(SkMatrix::I(),
+ SkRect::MakeIWH(producer->width(), producer->height()),
+ GrTextureProducer::kNo_FilterConstraint, true,
+ &kMode, fDrawContext->getColorSpace(),
+ fDrawContext->sourceGammaTreatment()));
+ GrPaint grPaint;
+ if (!SkPaintToGrPaintWithTexture(this->context(), fDrawContext.get(), paint, *draw.fMatrix,
+ std::move(fp), producer->isAlphaOnly(), &grPaint)) {
+ return;
+ }
+
+ std::unique_ptr<SkLatticeIter> iter(
+ new SkLatticeIter(producer->width(), producer->height(), center, dst));
+ fDrawContext->drawImageLattice(fClip, grPaint, *draw.fMatrix, producer->width(),
+ producer->height(), std::move(iter), dst);
+}
+
+void SkGpuDevice::drawImageNine(const SkDraw& draw, const SkImage* image,
+ const SkIRect& center, const SkRect& dst, const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ uint32_t pinnedUniqueID;
+ if (sk_sp<GrTexture> tex = as_IB(image)->refPinnedTexture(&pinnedUniqueID)) {
+ CHECK_SHOULD_DRAW(draw);
+ GrTextureAdjuster adjuster(tex.get(), image->alphaType(), image->bounds(), pinnedUniqueID,
+ as_IB(image)->onImageInfo().colorSpace());
+ this->drawProducerNine(draw, &adjuster, center, dst, paint);
+ } else {
+ SkBitmap bm;
+ if (SkImageCacherator* cacher = as_IB(image)->peekCacherator()) {
+ GrImageTextureMaker maker(fContext, cacher, image, SkImage::kAllow_CachingHint);
+ this->drawProducerNine(draw, &maker, center, dst, paint);
+ } else if (as_IB(image)->getROPixels(&bm)) {
+ this->drawBitmapNine(draw, bm, center, dst, paint);
+ }
+ }
+}
+
+void SkGpuDevice::drawBitmapNine(const SkDraw& draw, const SkBitmap& bitmap, const SkIRect& center,
+ const SkRect& dst, const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ GrBitmapTextureMaker maker(fContext, bitmap);
+ this->drawProducerNine(draw, &maker, center, dst, paint);
+}
+
+void SkGpuDevice::drawProducerLattice(const SkDraw& draw, GrTextureProducer* producer,
+ const SkCanvas::Lattice& lattice, const SkRect& dst,
+ const SkPaint& paint) {
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawProducerLattice", fContext);
+
+ CHECK_SHOULD_DRAW(draw);
+
+ static const GrTextureParams::FilterMode kMode = GrTextureParams::kNone_FilterMode;
+ sk_sp<GrFragmentProcessor> fp(
+ producer->createFragmentProcessor(SkMatrix::I(),
+ SkRect::MakeIWH(producer->width(), producer->height()),
+ GrTextureProducer::kNo_FilterConstraint, true,
+ &kMode, fDrawContext->getColorSpace(),
+ fDrawContext->sourceGammaTreatment()));
+ GrPaint grPaint;
+ if (!SkPaintToGrPaintWithTexture(this->context(), fDrawContext.get(), paint, *draw.fMatrix,
+ std::move(fp), producer->isAlphaOnly(), &grPaint)) {
+ return;
+ }
+
+ std::unique_ptr<SkLatticeIter> iter(
+ new SkLatticeIter(lattice, dst));
+ fDrawContext->drawImageLattice(fClip, grPaint, *draw.fMatrix, producer->width(),
+ producer->height(), std::move(iter), dst);
+}
+
+void SkGpuDevice::drawImageLattice(const SkDraw& draw, const SkImage* image,
+ const SkCanvas::Lattice& lattice, const SkRect& dst,
+ const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ uint32_t pinnedUniqueID;
+ if (sk_sp<GrTexture> tex = as_IB(image)->refPinnedTexture(&pinnedUniqueID)) {
+ CHECK_SHOULD_DRAW(draw);
+ GrTextureAdjuster adjuster(tex.get(), image->alphaType(), image->bounds(), pinnedUniqueID,
+ as_IB(image)->onImageInfo().colorSpace());
+ this->drawProducerLattice(draw, &adjuster, lattice, dst, paint);
+ } else {
+ SkBitmap bm;
+ if (SkImageCacherator* cacher = as_IB(image)->peekCacherator()) {
+ GrImageTextureMaker maker(fContext, cacher, image, SkImage::kAllow_CachingHint);
+ this->drawProducerLattice(draw, &maker, lattice, dst, paint);
+ } else if (as_IB(image)->getROPixels(&bm)) {
+ this->drawBitmapLattice(draw, bm, lattice, dst, paint);
+ }
+ }
+}
+
+void SkGpuDevice::drawBitmapLattice(const SkDraw& draw, const SkBitmap& bitmap,
+ const SkCanvas::Lattice& lattice, const SkRect& dst,
+ const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ GrBitmapTextureMaker maker(fContext, bitmap);
+ this->drawProducerLattice(draw, &maker, lattice, dst, paint);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// must be in SkCanvas::VertexMode order
+static const GrPrimitiveType gVertexMode2PrimitiveType[] = {
+ kTriangles_GrPrimitiveType,
+ kTriangleStrip_GrPrimitiveType,
+ kTriangleFan_GrPrimitiveType,
+};
+
+void SkGpuDevice::drawVertices(const SkDraw& draw, SkCanvas::VertexMode vmode,
+ int vertexCount, const SkPoint vertices[],
+ const SkPoint texs[], const SkColor colors[],
+ SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ CHECK_SHOULD_DRAW(draw);
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawVertices", fContext);
+
+ // If both textures and vertex-colors are nullptr, strokes hairlines with the paint's color.
+ if ((nullptr == texs || nullptr == paint.getShader()) && nullptr == colors) {
+
+ texs = nullptr;
+
+ SkPaint copy(paint);
+ copy.setStyle(SkPaint::kStroke_Style);
+ copy.setStrokeWidth(0);
+
+ GrPaint grPaint;
+ // we ignore the shader if texs is null.
+ if (!SkPaintToGrPaintNoShader(this->context(), fDrawContext.get(), copy, &grPaint)) {
+ return;
+ }
+
+ int triangleCount = 0;
+ int n = (nullptr == indices) ? vertexCount : indexCount;
+ switch (vmode) {
+ case SkCanvas::kTriangles_VertexMode:
+ triangleCount = n / 3;
+ break;
+ case SkCanvas::kTriangleStrip_VertexMode:
+ case SkCanvas::kTriangleFan_VertexMode:
+ triangleCount = n - 2;
+ break;
+ }
+
+ VertState state(vertexCount, indices, indexCount);
+ VertState::Proc vertProc = state.chooseProc(vmode);
+
+ //number of indices for lines per triangle with kLines
+ indexCount = triangleCount * 6;
+
+ SkAutoTDeleteArray<uint16_t> lineIndices(new uint16_t[indexCount]);
+ int i = 0;
+ while (vertProc(&state)) {
+ lineIndices[i] = state.f0;
+ lineIndices[i + 1] = state.f1;
+ lineIndices[i + 2] = state.f1;
+ lineIndices[i + 3] = state.f2;
+ lineIndices[i + 4] = state.f2;
+ lineIndices[i + 5] = state.f0;
+ i += 6;
+ }
+ fDrawContext->drawVertices(fClip,
+ grPaint,
+ *draw.fMatrix,
+ kLines_GrPrimitiveType,
+ vertexCount,
+ vertices,
+ texs,
+ colors,
+ lineIndices.get(),
+ indexCount);
+ return;
+ }
+
+ GrPrimitiveType primType = gVertexMode2PrimitiveType[vmode];
+
+ SkAutoSTMalloc<128, GrColor> convertedColors(0);
+ if (colors) {
+ // need to convert byte order and from non-PM to PM. TODO: Keep unpremul until after
+ // interpolation.
+ convertedColors.reset(vertexCount);
+ for (int i = 0; i < vertexCount; ++i) {
+ convertedColors[i] = SkColorToPremulGrColor(colors[i]);
+ }
+ colors = convertedColors.get();
+ }
+ GrPaint grPaint;
+ if (texs && paint.getShader()) {
+ if (colors) {
+ // When there are texs and colors the shader and colors are combined using xmode. A null
+ // xmode is defined to mean modulate.
+ SkXfermode::Mode colorMode;
+ if (xmode) {
+ if (!xmode->asMode(&colorMode)) {
+ return;
+ }
+ } else {
+ colorMode = SkXfermode::kModulate_Mode;
+ }
+ if (!SkPaintToGrPaintWithXfermode(this->context(), fDrawContext.get(), paint,
+ *draw.fMatrix, colorMode, false, &grPaint)) {
+ return;
+ }
+ } else {
+ // We have a shader, but no colors to blend it against.
+ if (!SkPaintToGrPaint(this->context(), fDrawContext.get(), paint, *draw.fMatrix,
+ &grPaint)) {
+ return;
+ }
+ }
+ } else {
+ if (colors) {
+ // We have colors, but either have no shader or no texture coords (which implies that
+ // we should ignore the shader).
+ if (!SkPaintToGrPaintWithPrimitiveColor(this->context(), fDrawContext.get(), paint,
+ &grPaint)) {
+ return;
+ }
+ } else {
+ // No colors and no shaders. Just draw with the paint color.
+ if (!SkPaintToGrPaintNoShader(this->context(), fDrawContext.get(), paint, &grPaint)) {
+ return;
+ }
+ }
+ }
+
+ fDrawContext->drawVertices(fClip,
+ grPaint,
+ *draw.fMatrix,
+ primType,
+ vertexCount,
+ vertices,
+ texs,
+ colors,
+ indices,
+ indexCount);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkGpuDevice::drawAtlas(const SkDraw& draw, const SkImage* atlas, const SkRSXform xform[],
+ const SkRect texRect[], const SkColor colors[], int count,
+ SkXfermode::Mode mode, const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ if (paint.isAntiAlias()) {
+ this->INHERITED::drawAtlas(draw, atlas, xform, texRect, colors, count, mode, paint);
+ return;
+ }
+
+ CHECK_SHOULD_DRAW(draw);
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawText", fContext);
+
+ SkPaint p(paint);
+ p.setShader(atlas->makeShader(SkShader::kClamp_TileMode, SkShader::kClamp_TileMode));
+
+ GrPaint grPaint;
+ if (colors) {
+ if (!SkPaintToGrPaintWithXfermode(this->context(), fDrawContext.get(), p, *draw.fMatrix,
+ mode, true, &grPaint)) {
+ return;
+ }
+ } else {
+ if (!SkPaintToGrPaint(this->context(), fDrawContext.get(), p, *draw.fMatrix, &grPaint)) {
+ return;
+ }
+ }
+
+ SkDEBUGCODE(this->validate();)
+ fDrawContext->drawAtlas(fClip, grPaint, *draw.fMatrix, count, xform, texRect, colors);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkGpuDevice::drawText(const SkDraw& draw, const void* text,
+ size_t byteLength, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ CHECK_SHOULD_DRAW(draw);
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawText", fContext);
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fDrawContext.get(), paint, *draw.fMatrix, &grPaint)) {
+ return;
+ }
+
+ SkDEBUGCODE(this->validate();)
+
+ fDrawContext->drawText(fClip, grPaint, paint, *draw.fMatrix,
+ (const char *)text, byteLength, x, y, draw.fRC->getBounds());
+}
+
+void SkGpuDevice::drawPosText(const SkDraw& draw, const void* text, size_t byteLength,
+ const SkScalar pos[], int scalarsPerPos,
+ const SkPoint& offset, const SkPaint& paint) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawPosText", fContext);
+ CHECK_SHOULD_DRAW(draw);
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(this->context(), fDrawContext.get(), paint, *draw.fMatrix, &grPaint)) {
+ return;
+ }
+
+ SkDEBUGCODE(this->validate();)
+
+ fDrawContext->drawPosText(fClip, grPaint, paint, *draw.fMatrix,
+ (const char *)text, byteLength, pos, scalarsPerPos, offset,
+ draw.fRC->getBounds());
+}
+
+void SkGpuDevice::drawTextBlob(const SkDraw& draw, const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint, SkDrawFilter* drawFilter) {
+ ASSERT_SINGLE_OWNER
+ GR_CREATE_TRACE_MARKER_CONTEXT("SkGpuDevice", "drawTextBlob", fContext);
+ CHECK_SHOULD_DRAW(draw);
+
+ SkDEBUGCODE(this->validate();)
+
+ fDrawContext->drawTextBlob(fClip, paint, *draw.fMatrix,
+ blob, x, y, drawFilter, draw.fRC->getBounds());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkGpuDevice::onShouldDisableLCD(const SkPaint& paint) const {
+ return GrTextUtils::ShouldDisableLCD(paint);
+}
+
+void SkGpuDevice::flush() {
+ ASSERT_SINGLE_OWNER
+
+ fDrawContext->prepareForExternalIO();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkBaseDevice* SkGpuDevice::onCreateDevice(const CreateInfo& cinfo, const SkPaint*) {
+ ASSERT_SINGLE_OWNER
+
+ SkSurfaceProps props(this->surfaceProps().flags(), cinfo.fPixelGeometry);
+
+ // layers are never drawn in repeat modes, so we can request an approx
+ // match and ignore any padding.
+ SkBackingFit fit = kNever_TileUsage == cinfo.fTileUsage ? SkBackingFit::kApprox
+ : SkBackingFit::kExact;
+
+ sk_sp<GrDrawContext> dc(fContext->makeDrawContext(fit,
+ cinfo.fInfo.width(), cinfo.fInfo.height(),
+ fDrawContext->config(),
+ sk_ref_sp(fDrawContext->getColorSpace()),
+ fDrawContext->desc().fSampleCnt,
+ kDefault_GrSurfaceOrigin,
+ &props));
+ if (!dc) {
+ SkErrorInternals::SetError( kInternalError_SkError,
+ "---- failed to create gpu device texture [%d %d]\n",
+ cinfo.fInfo.width(), cinfo.fInfo.height());
+ return nullptr;
+ }
+
+ // Skia's convention is to only clear a device if it is non-opaque.
+ InitContents init = cinfo.fInfo.isOpaque() ? kUninit_InitContents : kClear_InitContents;
+
+ return SkGpuDevice::Make(std::move(dc),
+ cinfo.fInfo.width(), cinfo.fInfo.height(),
+ init).release();
+}
+
+sk_sp<SkSurface> SkGpuDevice::makeSurface(const SkImageInfo& info, const SkSurfaceProps& props) {
+ ASSERT_SINGLE_OWNER
+ // TODO: Change the signature of newSurface to take a budgeted parameter.
+ static const SkBudgeted kBudgeted = SkBudgeted::kNo;
+ return SkSurface::MakeRenderTarget(fContext, kBudgeted, info, fDrawContext->desc().fSampleCnt,
+ fDrawContext->origin(), &props);
+}
+
+SkImageFilterCache* SkGpuDevice::getImageFilterCache() {
+ ASSERT_SINGLE_OWNER
+ // We always return a transient cache, so it is freed after each
+ // filter traversal.
+ return SkImageFilterCache::Create(SkImageFilterCache::kDefaultTransientSize);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/SkGpuDevice.h b/gfx/skia/skia/src/gpu/SkGpuDevice.h
new file mode 100644
index 000000000..a49d16073
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/SkGpuDevice.h
@@ -0,0 +1,254 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGpuDevice_DEFINED
+#define SkGpuDevice_DEFINED
+
+#include "SkGr.h"
+#include "SkGrPriv.h"
+#include "SkBitmap.h"
+#include "SkDevice.h"
+#include "SkPicture.h"
+#include "SkRegion.h"
+#include "SkSurface.h"
+#include "GrClipStackClip.h"
+#include "GrDrawContext.h"
+#include "GrContext.h"
+#include "GrSurfacePriv.h"
+#include "GrTypes.h"
+
+class GrAccelData;
+class GrTextureProducer;
+struct GrCachedLayer;
+
+class SkSpecialImage;
+
+/**
+ * Subclass of SkBaseDevice, which directs all drawing to the GrGpu owned by the
+ * canvas.
+ */
+class SK_API SkGpuDevice : public SkBaseDevice {
+public:
+ enum InitContents {
+ kClear_InitContents,
+ kUninit_InitContents
+ };
+
+ /**
+ * Creates an SkGpuDevice from a GrDrawContext whose backing width/height is
+ * different than its actual width/height (e.g., approx-match scratch texture).
+ */
+ static sk_sp<SkGpuDevice> Make(sk_sp<GrDrawContext> drawContext,
+ int width, int height,
+ InitContents);
+
+ /**
+ * New device that will create an offscreen renderTarget based on the ImageInfo and
+ * sampleCount. The Budgeted param controls whether the device's backing store counts against
+ * the resource cache budget. On failure, returns nullptr.
+ */
+ static sk_sp<SkGpuDevice> Make(GrContext*, SkBudgeted, const SkImageInfo&,
+ int sampleCount, GrSurfaceOrigin,
+ const SkSurfaceProps*, InitContents);
+
+ ~SkGpuDevice() override {}
+
+ GrContext* context() const override { return fContext; }
+
+ // set all pixels to 0
+ void clearAll();
+
+ void replaceDrawContext(bool shouldRetainContent);
+
+ GrDrawContext* accessDrawContext() override;
+
+ void drawPaint(const SkDraw&, const SkPaint& paint) override;
+ void drawPoints(const SkDraw&, SkCanvas::PointMode mode, size_t count, const SkPoint[],
+ const SkPaint& paint) override;
+ void drawRect(const SkDraw&, const SkRect& r, const SkPaint& paint) override;
+ void drawRRect(const SkDraw&, const SkRRect& r, const SkPaint& paint) override;
+ void drawDRRect(const SkDraw& draw, const SkRRect& outer, const SkRRect& inner,
+ const SkPaint& paint) override;
+ void drawRegion(const SkDraw&, const SkRegion& r, const SkPaint& paint) override;
+ void drawOval(const SkDraw&, const SkRect& oval, const SkPaint& paint) override;
+ void drawArc(const SkDraw&, const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) override;
+ void drawPath(const SkDraw&, const SkPath& path, const SkPaint& paint,
+ const SkMatrix* prePathMatrix, bool pathIsMutable) override;
+ void drawBitmap(const SkDraw&, const SkBitmap& bitmap, const SkMatrix&,
+ const SkPaint&) override;
+ void drawBitmapRect(const SkDraw&, const SkBitmap&, const SkRect* srcOrNull, const SkRect& dst,
+ const SkPaint& paint, SkCanvas::SrcRectConstraint) override;
+ void drawSprite(const SkDraw&, const SkBitmap& bitmap, int x, int y,
+ const SkPaint& paint) override;
+ void drawText(const SkDraw&, const void* text, size_t len, SkScalar x, SkScalar y,
+ const SkPaint&) override;
+ void drawPosText(const SkDraw&, const void* text, size_t len, const SkScalar pos[],
+ int scalarsPerPos, const SkPoint& offset, const SkPaint&) override;
+ void drawTextBlob(const SkDraw&, const SkTextBlob*, SkScalar x, SkScalar y,
+ const SkPaint& paint, SkDrawFilter* drawFilter) override;
+ void drawVertices(const SkDraw&, SkCanvas::VertexMode, int vertexCount, const SkPoint verts[],
+ const SkPoint texs[], const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount, const SkPaint&) override;
+ void drawAtlas(const SkDraw&, const SkImage* atlas, const SkRSXform[], const SkRect[],
+ const SkColor[], int count, SkXfermode::Mode, const SkPaint&) override;
+ void drawDevice(const SkDraw&, SkBaseDevice*, int x, int y, const SkPaint&) override;
+
+ void drawImage(const SkDraw&, const SkImage*, SkScalar x, SkScalar y, const SkPaint&) override;
+ void drawImageRect(const SkDraw&, const SkImage*, const SkRect* src, const SkRect& dst,
+ const SkPaint&, SkCanvas::SrcRectConstraint) override;
+
+ void drawImageNine(const SkDraw& draw, const SkImage* image, const SkIRect& center,
+ const SkRect& dst, const SkPaint& paint) override;
+ void drawBitmapNine(const SkDraw& draw, const SkBitmap& bitmap, const SkIRect& center,
+ const SkRect& dst, const SkPaint& paint) override;
+
+ void drawImageLattice(const SkDraw&, const SkImage*, const SkCanvas::Lattice&,
+ const SkRect& dst, const SkPaint&) override;
+ void drawBitmapLattice(const SkDraw&, const SkBitmap&, const SkCanvas::Lattice&,
+ const SkRect& dst, const SkPaint&) override;
+
+ void drawSpecial(const SkDraw&, SkSpecialImage*,
+ int left, int top, const SkPaint& paint) override;
+ sk_sp<SkSpecialImage> makeSpecial(const SkBitmap&) override;
+ sk_sp<SkSpecialImage> makeSpecial(const SkImage*) override;
+ sk_sp<SkSpecialImage> snapSpecial() override;
+
+ void flush() override;
+
+ bool onAccessPixels(SkPixmap*) override;
+
+ // for debugging purposes only
+ void drawTexture(GrTexture*, const SkRect& dst, const SkPaint&);
+
+protected:
+ bool onReadPixels(const SkImageInfo&, void*, size_t, int, int) override;
+ bool onWritePixels(const SkImageInfo&, const void*, size_t, int, int) override;
+ bool onShouldDisableLCD(const SkPaint&) const final;
+
+private:
+ // We want these unreffed in DrawContext, GrContext order.
+ SkAutoTUnref<GrContext> fContext;
+ sk_sp<GrDrawContext> fDrawContext;
+
+ SkIPoint fClipOrigin;
+ GrClipStackClip fClip;
+ SkISize fSize;
+ bool fOpaque;
+
+ enum Flags {
+ kNeedClear_Flag = 1 << 0, //!< Surface requires an initial clear
+ kIsOpaque_Flag = 1 << 1, //!< Hint from client that rendering to this device will be
+ // opaque even if the config supports alpha.
+ };
+ static bool CheckAlphaTypeAndGetFlags(const SkImageInfo* info, InitContents init,
+ unsigned* flags);
+
+ SkGpuDevice(sk_sp<GrDrawContext>, int width, int height, unsigned flags);
+
+ SkBaseDevice* onCreateDevice(const CreateInfo&, const SkPaint*) override;
+
+ sk_sp<SkSurface> makeSurface(const SkImageInfo&, const SkSurfaceProps&) override;
+
+ SkImageFilterCache* getImageFilterCache() override;
+
+ bool forceConservativeRasterClip() const override { return true; }
+
+ // sets the render target and clip on context
+ void prepareDraw(const SkDraw&);
+
+ /**
+ * Helper functions called by drawBitmapCommon. By the time these are called the SkDraw's
+ * matrix, clip, and the device's render target has already been set on GrContext.
+ */
+
+ // The tileSize and clippedSrcRect will be valid only if true is returned.
+ bool shouldTileImageID(uint32_t imageID, const SkIRect& imageRect,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& srcToDstRectMatrix,
+ const GrTextureParams& params,
+ const SkRect* srcRectPtr,
+ int maxTileSize,
+ int* tileSize,
+ SkIRect* clippedSubset) const;
+ // Just returns the predicate, not the out-tileSize or out-clippedSubset, as they are not
+ // needed at the moment.
+ bool shouldTileImage(const SkImage* image, const SkRect* srcRectPtr,
+ SkCanvas::SrcRectConstraint constraint, SkFilterQuality quality,
+ const SkMatrix& viewMatrix, const SkMatrix& srcToDstRect) const;
+
+ sk_sp<SkSpecialImage> filterTexture(const SkDraw&,
+ SkSpecialImage*,
+ int left, int top,
+ SkIPoint* offset,
+ const SkImageFilter* filter);
+
+ // Splits bitmap into tiles of tileSize and draws them using separate textures for each tile.
+ void drawTiledBitmap(const SkBitmap& bitmap,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& srcToDstMatrix,
+ const SkRect& srcRect,
+ const SkIRect& clippedSrcRect,
+ const GrTextureParams& params,
+ const SkPaint& paint,
+ SkCanvas::SrcRectConstraint,
+ int tileSize,
+ bool bicubic);
+
+ // Used by drawTiledBitmap to draw each tile.
+ void drawBitmapTile(const SkBitmap&,
+ const SkMatrix& viewMatrix,
+ const SkRect& dstRect,
+ const SkRect& srcRect,
+ const GrTextureParams& params,
+ const SkPaint& paint,
+ SkCanvas::SrcRectConstraint,
+ bool bicubic,
+ bool needsTextureDomain);
+
+ void drawTextureProducer(GrTextureProducer*,
+ const SkRect* srcRect,
+ const SkRect* dstRect,
+ SkCanvas::SrcRectConstraint,
+ const SkMatrix& viewMatrix,
+ const GrClip&,
+ const SkPaint&);
+
+ void drawTextureProducerImpl(GrTextureProducer*,
+ const SkRect& clippedSrcRect,
+ const SkRect& clippedDstRect,
+ SkCanvas::SrcRectConstraint,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& srcToDstMatrix,
+ const GrClip&,
+ const SkPaint&);
+
+ bool drawFilledDRRect(const SkMatrix& viewMatrix, const SkRRect& outer,
+ const SkRRect& inner, const SkPaint& paint);
+
+ void drawProducerNine(const SkDraw&, GrTextureProducer*, const SkIRect& center,
+ const SkRect& dst, const SkPaint&);
+
+ void drawProducerLattice(const SkDraw&, GrTextureProducer*, const SkCanvas::Lattice& lattice,
+ const SkRect& dst, const SkPaint&);
+
+ bool drawDashLine(const SkPoint pts[2], const SkPaint& paint);
+ void drawStrokedLine(const SkPoint pts[2], const SkDraw&, const SkPaint&);
+
+ static sk_sp<GrDrawContext> MakeDrawContext(GrContext*,
+ SkBudgeted,
+ const SkImageInfo&,
+ int sampleCount,
+ GrSurfaceOrigin,
+ const SkSurfaceProps*);
+
+ friend class GrAtlasTextContext;
+ friend class SkSurface_Gpu; // for access to surfaceProps
+ typedef SkBaseDevice INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/SkGpuDevice_drawTexture.cpp b/gfx/skia/skia/src/gpu/SkGpuDevice_drawTexture.cpp
new file mode 100644
index 000000000..46a3699bb
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/SkGpuDevice_drawTexture.cpp
@@ -0,0 +1,251 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkGpuDevice.h"
+
+#include "GrBlurUtils.h"
+#include "GrCaps.h"
+#include "GrDrawContext.h"
+#include "GrStyle.h"
+#include "GrTextureParamsAdjuster.h"
+#include "SkDraw.h"
+#include "SkGrPriv.h"
+#include "SkMaskFilter.h"
+#include "effects/GrBicubicEffect.h"
+#include "effects/GrSimpleTextureEffect.h"
+#include "effects/GrTextureDomain.h"
+
+static inline bool use_shader(bool textureIsAlphaOnly, const SkPaint& paint) {
+ return textureIsAlphaOnly && paint.getShader();
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Helper functions for dropping src rect constraint in bilerp mode.
+
+static const SkScalar kColorBleedTolerance = 0.001f;
+
+static bool has_aligned_samples(const SkRect& srcRect, const SkRect& transformedRect) {
+ // detect pixel disalignment
+ if (SkScalarAbs(SkScalarRoundToScalar(transformedRect.left()) - transformedRect.left()) < kColorBleedTolerance &&
+ SkScalarAbs(SkScalarRoundToScalar(transformedRect.top()) - transformedRect.top()) < kColorBleedTolerance &&
+ SkScalarAbs(transformedRect.width() - srcRect.width()) < kColorBleedTolerance &&
+ SkScalarAbs(transformedRect.height() - srcRect.height()) < kColorBleedTolerance) {
+ return true;
+ }
+ return false;
+}
+
+static bool may_color_bleed(const SkRect& srcRect,
+ const SkRect& transformedRect,
+ const SkMatrix& m,
+ bool isMSAA) {
+ // Only gets called if has_aligned_samples returned false.
+ // So we can assume that sampling is axis aligned but not texel aligned.
+ SkASSERT(!has_aligned_samples(srcRect, transformedRect));
+ SkRect innerSrcRect(srcRect), innerTransformedRect, outerTransformedRect(transformedRect);
+ if (isMSAA) {
+ innerSrcRect.inset(SK_Scalar1, SK_Scalar1);
+ } else {
+ innerSrcRect.inset(SK_ScalarHalf, SK_ScalarHalf);
+ }
+ m.mapRect(&innerTransformedRect, innerSrcRect);
+
+ // The gap between outerTransformedRect and innerTransformedRect
+ // represents the projection of the source border area, which is
+ // problematic for color bleeding. We must check whether any
+ // destination pixels sample the border area.
+ outerTransformedRect.inset(kColorBleedTolerance, kColorBleedTolerance);
+ innerTransformedRect.outset(kColorBleedTolerance, kColorBleedTolerance);
+ SkIRect outer, inner;
+ outerTransformedRect.round(&outer);
+ innerTransformedRect.round(&inner);
+ // If the inner and outer rects round to the same result, it means the
+ // border does not overlap any pixel centers. Yay!
+ return inner != outer;
+}
+
+static bool can_ignore_bilerp_constraint(const GrTextureProducer& producer,
+ const SkRect& srcRect,
+ const SkMatrix& srcRectToDeviceSpace,
+ bool isMSAA) {
+ if (srcRectToDeviceSpace.rectStaysRect()) {
+ // sampling is axis-aligned
+ SkRect transformedRect;
+ srcRectToDeviceSpace.mapRect(&transformedRect, srcRect);
+
+ if (has_aligned_samples(srcRect, transformedRect) ||
+ !may_color_bleed(srcRect, transformedRect, srcRectToDeviceSpace, isMSAA)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void SkGpuDevice::drawTextureProducer(GrTextureProducer* producer,
+ const SkRect* srcRect,
+ const SkRect* dstRect,
+ SkCanvas::SrcRectConstraint constraint,
+ const SkMatrix& viewMatrix,
+ const GrClip& clip,
+ const SkPaint& paint) {
+ // This is the funnel for all non-tiled bitmap/image draw calls. Log a histogram entry.
+ SK_HISTOGRAM_BOOLEAN("DrawTiled", false);
+
+ // Figure out the actual dst and src rect by clipping the src rect to the bounds of the
+ // adjuster. If the src rect is clipped then the dst rect must be recomputed. Also determine
+ // the matrix that maps the src rect to the dst rect.
+ SkRect clippedSrcRect;
+ SkRect clippedDstRect;
+ const SkRect srcBounds = SkRect::MakeIWH(producer->width(), producer->height());
+ SkMatrix srcToDstMatrix;
+ if (srcRect) {
+ if (!dstRect) {
+ dstRect = &srcBounds;
+ }
+ if (!srcBounds.contains(*srcRect)) {
+ clippedSrcRect = *srcRect;
+ if (!clippedSrcRect.intersect(srcBounds)) {
+ return;
+ }
+ if (!srcToDstMatrix.setRectToRect(*srcRect, *dstRect, SkMatrix::kFill_ScaleToFit)) {
+ return;
+ }
+ srcToDstMatrix.mapRect(&clippedDstRect, clippedSrcRect);
+ } else {
+ clippedSrcRect = *srcRect;
+ clippedDstRect = *dstRect;
+ if (!srcToDstMatrix.setRectToRect(*srcRect, *dstRect, SkMatrix::kFill_ScaleToFit)) {
+ return;
+ }
+ }
+ } else {
+ clippedSrcRect = srcBounds;
+ if (dstRect) {
+ clippedDstRect = *dstRect;
+ if (!srcToDstMatrix.setRectToRect(srcBounds, *dstRect, SkMatrix::kFill_ScaleToFit)) {
+ return;
+ }
+ } else {
+ clippedDstRect = srcBounds;
+ srcToDstMatrix.reset();
+ }
+ }
+
+ // Now that we have both the view and srcToDst matrices, log our scale factor.
+ LogDrawScaleFactor(SkMatrix::Concat(viewMatrix, srcToDstMatrix), paint.getFilterQuality());
+
+ this->drawTextureProducerImpl(producer, clippedSrcRect, clippedDstRect, constraint, viewMatrix,
+ srcToDstMatrix, clip, paint);
+}
+
+void SkGpuDevice::drawTextureProducerImpl(GrTextureProducer* producer,
+ const SkRect& clippedSrcRect,
+ const SkRect& clippedDstRect,
+ SkCanvas::SrcRectConstraint constraint,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& srcToDstMatrix,
+ const GrClip& clip,
+ const SkPaint& paint) {
+ // Specifying the texture coords as local coordinates is an attempt to enable more batching
+ // by not baking anything about the srcRect, dstRect, or viewMatrix, into the texture FP. In
+ // the future this should be an opaque optimization enabled by the combination of batch/GP and
+ // FP.
+ const SkMaskFilter* mf = paint.getMaskFilter();
+ // The shader expects proper local coords, so we can't replace local coords with texture coords
+ // if the shader will be used. If we have a mask filter we will change the underlying geometry
+ // that is rendered.
+ bool canUseTextureCoordsAsLocalCoords = !use_shader(producer->isAlphaOnly(), paint) && !mf;
+
+ bool doBicubic;
+ GrTextureParams::FilterMode fm =
+ GrSkFilterQualityToGrFilterMode(paint.getFilterQuality(), viewMatrix, srcToDstMatrix,
+ &doBicubic);
+ const GrTextureParams::FilterMode* filterMode = doBicubic ? nullptr : &fm;
+
+ GrTextureAdjuster::FilterConstraint constraintMode;
+ if (SkCanvas::kFast_SrcRectConstraint == constraint) {
+ constraintMode = GrTextureAdjuster::kNo_FilterConstraint;
+ } else {
+ constraintMode = GrTextureAdjuster::kYes_FilterConstraint;
+ }
+
+ // If we have to outset for AA then we will generate texture coords outside the src rect. The
+ // same happens for any mask filter that extends the bounds rendered in the dst.
+ // This is conservative as a mask filter does not have to expand the bounds rendered.
+ bool coordsAllInsideSrcRect = !paint.isAntiAlias() && !mf;
+
+ // Check for optimization to drop the src rect constraint when on bilerp.
+ if (filterMode && GrTextureParams::kBilerp_FilterMode == *filterMode &&
+ GrTextureAdjuster::kYes_FilterConstraint == constraintMode && coordsAllInsideSrcRect) {
+ SkMatrix combinedMatrix;
+ combinedMatrix.setConcat(viewMatrix, srcToDstMatrix);
+ if (can_ignore_bilerp_constraint(*producer, clippedSrcRect, combinedMatrix,
+ fDrawContext->isUnifiedMultisampled())) {
+ constraintMode = GrTextureAdjuster::kNo_FilterConstraint;
+ }
+ }
+
+ const SkMatrix* textureMatrix;
+ SkMatrix tempMatrix;
+ if (canUseTextureCoordsAsLocalCoords) {
+ textureMatrix = &SkMatrix::I();
+ } else {
+ if (!srcToDstMatrix.invert(&tempMatrix)) {
+ return;
+ }
+ textureMatrix = &tempMatrix;
+ }
+ sk_sp<GrFragmentProcessor> fp(producer->createFragmentProcessor(
+ *textureMatrix, clippedSrcRect, constraintMode, coordsAllInsideSrcRect, filterMode,
+ fDrawContext->getColorSpace(), fDrawContext->sourceGammaTreatment()));
+ if (!fp) {
+ return;
+ }
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaintWithTexture(fContext, fDrawContext.get(), paint, viewMatrix, fp,
+ producer->isAlphaOnly(), &grPaint)) {
+ return;
+ }
+
+ if (canUseTextureCoordsAsLocalCoords) {
+ fDrawContext->fillRectToRect(clip, grPaint, viewMatrix, clippedDstRect, clippedSrcRect);
+ return;
+ }
+
+ if (!mf) {
+ fDrawContext->drawRect(clip, grPaint, viewMatrix, clippedDstRect);
+ return;
+ }
+
+ // First see if we can do the draw + mask filter direct to the dst.
+ if (viewMatrix.isScaleTranslate()) {
+ SkRect devClippedDstRect;
+ viewMatrix.mapRectScaleTranslate(&devClippedDstRect, clippedDstRect);
+
+ SkStrokeRec rec(SkStrokeRec::kFill_InitStyle);
+ if (mf->directFilterRRectMaskGPU(fContext,
+ fDrawContext.get(),
+ &grPaint,
+ clip,
+ viewMatrix,
+ rec,
+ SkRRect::MakeRect(clippedDstRect),
+ SkRRect::MakeRect(devClippedDstRect))) {
+ return;
+ }
+ }
+
+ SkPath rectPath;
+ rectPath.addRect(clippedDstRect);
+ rectPath.setIsVolatile(true);
+ GrBlurUtils::drawPathWithMaskFilter(this->context(), fDrawContext.get(), fClip,
+ rectPath, &grPaint, viewMatrix, mf, GrStyle::SimpleFill(),
+ true);
+}
diff --git a/gfx/skia/skia/src/gpu/SkGr.cpp b/gfx/skia/skia/src/gpu/SkGr.cpp
new file mode 100644
index 000000000..ee4e40a64
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/SkGr.cpp
@@ -0,0 +1,813 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkGr.h"
+#include "SkGrPriv.h"
+
+#include "GrCaps.h"
+#include "GrContext.h"
+#include "GrDrawContext.h"
+#include "GrGpuResourcePriv.h"
+#include "GrImageIDTextureAdjuster.h"
+#include "GrTextureParamsAdjuster.h"
+#include "GrTexturePriv.h"
+#include "GrTypes.h"
+#include "GrXferProcessor.h"
+#include "GrYUVProvider.h"
+
+#include "SkBlendModePriv.h"
+#include "SkColorFilter.h"
+#include "SkConfig8888.h"
+#include "SkCanvas.h"
+#include "SkData.h"
+#include "SkErrorInternals.h"
+#include "SkMessageBus.h"
+#include "SkMipMap.h"
+#include "SkPixelRef.h"
+#include "SkPM4fPriv.h"
+#include "SkResourceCache.h"
+#include "SkTemplates.h"
+#include "SkYUVPlanesCache.h"
+#include "effects/GrBicubicEffect.h"
+#include "effects/GrConstColorProcessor.h"
+#include "effects/GrDitherEffect.h"
+#include "effects/GrPorterDuffXferProcessor.h"
+#include "effects/GrXfermodeFragmentProcessor.h"
+#include "effects/GrYUVEffect.h"
+
+#ifndef SK_IGNORE_ETC1_SUPPORT
+# include "ktx.h"
+# include "etc1.h"
+#endif
+
+GrSurfaceDesc GrImageInfoToSurfaceDesc(const SkImageInfo& info, const GrCaps& caps) {
+ GrSurfaceDesc desc;
+ desc.fFlags = kNone_GrSurfaceFlags;
+ desc.fWidth = info.width();
+ desc.fHeight = info.height();
+ desc.fConfig = SkImageInfo2GrPixelConfig(info, caps);
+ desc.fSampleCnt = 0;
+ return desc;
+}
+
+void GrMakeKeyFromImageID(GrUniqueKey* key, uint32_t imageID, const SkIRect& imageBounds) {
+ SkASSERT(key);
+ SkASSERT(imageID);
+ SkASSERT(!imageBounds.isEmpty());
+ static const GrUniqueKey::Domain kImageIDDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey::Builder builder(key, kImageIDDomain, 5);
+ builder[0] = imageID;
+ builder[1] = imageBounds.fLeft;
+ builder[2] = imageBounds.fTop;
+ builder[3] = imageBounds.fRight;
+ builder[4] = imageBounds.fBottom;
+}
+
+GrPixelConfig GrIsCompressedTextureDataSupported(GrContext* ctx, SkData* data,
+ int expectedW, int expectedH,
+ const void** outStartOfDataToUpload) {
+ *outStartOfDataToUpload = nullptr;
+#ifndef SK_IGNORE_ETC1_SUPPORT
+ if (!ctx->caps()->isConfigTexturable(kETC1_GrPixelConfig)) {
+ return kUnknown_GrPixelConfig;
+ }
+
+ const uint8_t* bytes = data->bytes();
+ if (data->size() > ETC_PKM_HEADER_SIZE && etc1_pkm_is_valid(bytes)) {
+ // Does the data match the dimensions of the bitmap? If not,
+ // then we don't know how to scale the image to match it...
+ if (etc1_pkm_get_width(bytes) != (unsigned)expectedW ||
+ etc1_pkm_get_height(bytes) != (unsigned)expectedH)
+ {
+ return kUnknown_GrPixelConfig;
+ }
+
+ *outStartOfDataToUpload = bytes + ETC_PKM_HEADER_SIZE;
+ return kETC1_GrPixelConfig;
+ } else if (SkKTXFile::is_ktx(bytes, data->size())) {
+ SkKTXFile ktx(data);
+
+ // Is it actually an ETC1 texture?
+ if (!ktx.isCompressedFormat(SkTextureCompressor::kETC1_Format)) {
+ return kUnknown_GrPixelConfig;
+ }
+
+ // Does the data match the dimensions of the bitmap? If not,
+ // then we don't know how to scale the image to match it...
+ if (ktx.width() != expectedW || ktx.height() != expectedH) {
+ return kUnknown_GrPixelConfig;
+ }
+
+ *outStartOfDataToUpload = ktx.pixelData();
+ return kETC1_GrPixelConfig;
+ }
+#endif
+ return kUnknown_GrPixelConfig;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Fill out buffer with the compressed format Ganesh expects from a colortable
+ * based bitmap. [palette (colortable) + indices].
+ *
+ * At the moment Ganesh only supports 8bit version. If Ganesh allowed we others
+ * we could detect that the colortable.count is <= 16, and then repack the
+ * indices as nibbles to save RAM, but it would take more time (i.e. a lot
+ * slower than memcpy), so skipping that for now.
+ *
+ * Ganesh wants a full 256 palette entry, even though Skia's ctable is only as big
+ * as the colortable.count says it is.
+ */
+static void build_index8_data(void* buffer, const SkPixmap& pixmap) {
+ SkASSERT(kIndex_8_SkColorType == pixmap.colorType());
+
+ const SkColorTable* ctable = pixmap.ctable();
+ char* dst = (char*)buffer;
+
+ const int count = ctable->count();
+
+ SkDstPixelInfo dstPI;
+ dstPI.fColorType = kRGBA_8888_SkColorType;
+ dstPI.fAlphaType = kPremul_SkAlphaType;
+ dstPI.fPixels = buffer;
+ dstPI.fRowBytes = count * sizeof(SkPMColor);
+
+ SkSrcPixelInfo srcPI;
+ srcPI.fColorType = kN32_SkColorType;
+ srcPI.fAlphaType = kPremul_SkAlphaType;
+ srcPI.fPixels = ctable->readColors();
+ srcPI.fRowBytes = count * sizeof(SkPMColor);
+
+ srcPI.convertPixelsTo(&dstPI, count, 1);
+
+ // always skip a full 256 number of entries, even if we memcpy'd fewer
+ dst += 256 * sizeof(GrColor);
+
+ if ((unsigned)pixmap.width() == pixmap.rowBytes()) {
+ memcpy(dst, pixmap.addr(), pixmap.getSafeSize());
+ } else {
+ // need to trim off the extra bytes per row
+ size_t width = pixmap.width();
+ size_t rowBytes = pixmap.rowBytes();
+ const uint8_t* src = pixmap.addr8();
+ for (int y = 0; y < pixmap.height(); y++) {
+ memcpy(dst, src, width);
+ src += rowBytes;
+ dst += width;
+ }
+ }
+}
+
+/**
+ * Once we have made SkImages handle all lazy/deferred/generated content, the YUV apis will
+ * be gone from SkPixelRef, and we can remove this subclass entirely.
+ */
+class PixelRef_GrYUVProvider : public GrYUVProvider {
+ SkPixelRef* fPR;
+
+public:
+ PixelRef_GrYUVProvider(SkPixelRef* pr) : fPR(pr) {}
+
+ uint32_t onGetID() override { return fPR->getGenerationID(); }
+ bool onQueryYUV8(SkYUVSizeInfo* sizeInfo, SkYUVColorSpace* colorSpace) const override {
+ return fPR->queryYUV8(sizeInfo, colorSpace);
+ }
+ bool onGetYUV8Planes(const SkYUVSizeInfo& sizeInfo, void* planes[3]) override {
+ return fPR->getYUV8Planes(sizeInfo, planes);
+ }
+};
+
+static sk_sp<GrTexture> create_texture_from_yuv(GrContext* ctx, const SkBitmap& bm,
+ const GrSurfaceDesc& desc) {
+ // Subsets are not supported, the whole pixelRef is loaded when using YUV decoding
+ SkPixelRef* pixelRef = bm.pixelRef();
+ if ((nullptr == pixelRef) ||
+ (pixelRef->info().width() != bm.info().width()) ||
+ (pixelRef->info().height() != bm.info().height())) {
+ return nullptr;
+ }
+
+ PixelRef_GrYUVProvider provider(pixelRef);
+
+ return provider.refAsTexture(ctx, desc, !bm.isVolatile());
+}
+
+static GrTexture* load_etc1_texture(GrContext* ctx, const SkBitmap &bm, GrSurfaceDesc desc) {
+ sk_sp<SkData> data(bm.pixelRef()->refEncodedData());
+ if (!data) {
+ return nullptr;
+ }
+
+ const void* startOfTexData;
+ desc.fConfig = GrIsCompressedTextureDataSupported(ctx, data.get(), bm.width(), bm.height(),
+ &startOfTexData);
+ if (kUnknown_GrPixelConfig == desc.fConfig) {
+ return nullptr;
+ }
+
+ return ctx->textureProvider()->createTexture(desc, SkBudgeted::kYes, startOfTexData, 0);
+}
+
+GrTexture* GrUploadBitmapToTexture(GrContext* ctx, const SkBitmap& bitmap) {
+ GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(bitmap.info(), *ctx->caps());
+ if (GrTexture *texture = load_etc1_texture(ctx, bitmap, desc)) {
+ return texture;
+ }
+
+ sk_sp<GrTexture> texture(create_texture_from_yuv(ctx, bitmap, desc));
+ if (texture) {
+ return texture.release();
+ }
+
+ SkAutoLockPixels alp(bitmap);
+ if (!bitmap.readyToDraw()) {
+ return nullptr;
+ }
+ SkPixmap pixmap;
+ if (!bitmap.peekPixels(&pixmap)) {
+ return nullptr;
+ }
+ return GrUploadPixmapToTexture(ctx, pixmap, SkBudgeted::kYes);
+}
+
+GrTexture* GrUploadPixmapToTexture(GrContext* ctx, const SkPixmap& pixmap, SkBudgeted budgeted) {
+ const SkPixmap* pmap = &pixmap;
+ SkPixmap tmpPixmap;
+ SkBitmap tmpBitmap;
+
+ const GrCaps* caps = ctx->caps();
+ GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(pixmap.info(), *caps);
+
+ if (caps->srgbSupport() &&
+ pixmap.info().colorSpace() && pixmap.info().colorSpace()->gammaCloseToSRGB() &&
+ !(GrPixelConfigIsSRGB(desc.fConfig) ||
+ kRGBA_half_GrPixelConfig == desc.fConfig ||
+ kRGBA_float_GrPixelConfig == desc.fConfig)) {
+ // We were supplied an sRGB-like color space, but we don't have a suitable pixel config.
+ // Convert to 8888 sRGB so we can handle the data correctly. The raster backend doesn't
+ // handle sRGB Index8 -> sRGB 8888 correctly (yet), so lie about both the source and
+ // destination (claim they're linear):
+ SkImageInfo linSrcInfo = SkImageInfo::Make(pixmap.width(), pixmap.height(),
+ pixmap.colorType(), pixmap.alphaType());
+ SkPixmap linSrcPixmap(linSrcInfo, pixmap.addr(), pixmap.rowBytes(), pixmap.ctable());
+
+ SkImageInfo dstInfo = SkImageInfo::Make(pixmap.width(), pixmap.height(),
+ kN32_SkColorType, kPremul_SkAlphaType,
+ sk_ref_sp(pixmap.info().colorSpace()));
+
+ tmpBitmap.allocPixels(dstInfo);
+
+ SkImageInfo linDstInfo = SkImageInfo::MakeN32Premul(pixmap.width(), pixmap.height());
+ if (!linSrcPixmap.readPixels(linDstInfo, tmpBitmap.getPixels(), tmpBitmap.rowBytes())) {
+ return nullptr;
+ }
+ if (!tmpBitmap.peekPixels(&tmpPixmap)) {
+ return nullptr;
+ }
+ pmap = &tmpPixmap;
+ // must rebuild desc, since we've forced the info to be N32
+ desc = GrImageInfoToSurfaceDesc(pmap->info(), *caps);
+ } else if (kGray_8_SkColorType == pixmap.colorType()) {
+ // We don't have Gray8 support as a pixel config, so expand to 8888
+
+ // We should have converted sRGB Gray8 above (if we have sRGB support):
+ SkASSERT(!caps->srgbSupport() || !pixmap.info().colorSpace() ||
+ !pixmap.info().colorSpace()->gammaCloseToSRGB());
+
+ SkImageInfo info = SkImageInfo::MakeN32(pixmap.width(), pixmap.height(),
+ kOpaque_SkAlphaType);
+ tmpBitmap.allocPixels(info);
+ if (!pixmap.readPixels(info, tmpBitmap.getPixels(), tmpBitmap.rowBytes())) {
+ return nullptr;
+ }
+ if (!tmpBitmap.peekPixels(&tmpPixmap)) {
+ return nullptr;
+ }
+ pmap = &tmpPixmap;
+ // must rebuild desc, since we've forced the info to be N32
+ desc = GrImageInfoToSurfaceDesc(pmap->info(), *caps);
+ } else if (kIndex_8_SkColorType == pixmap.colorType()) {
+ if (caps->isConfigTexturable(kIndex_8_GrPixelConfig)) {
+ size_t imageSize = GrCompressedFormatDataSize(kIndex_8_GrPixelConfig,
+ pixmap.width(), pixmap.height());
+ SkAutoMalloc storage(imageSize);
+ build_index8_data(storage.get(), pixmap);
+
+ // our compressed data will be trimmed, so pass width() for its
+ // "rowBytes", since they are the same now.
+ return ctx->textureProvider()->createTexture(desc, budgeted, storage.get(),
+ pixmap.width());
+ } else {
+ SkImageInfo info = SkImageInfo::MakeN32Premul(pixmap.width(), pixmap.height());
+ tmpBitmap.allocPixels(info);
+ if (!pixmap.readPixels(info, tmpBitmap.getPixels(), tmpBitmap.rowBytes())) {
+ return nullptr;
+ }
+ if (!tmpBitmap.peekPixels(&tmpPixmap)) {
+ return nullptr;
+ }
+ pmap = &tmpPixmap;
+ // must rebuild desc, since we've forced the info to be N32
+ desc = GrImageInfoToSurfaceDesc(pmap->info(), *caps);
+ }
+ }
+
+ return ctx->textureProvider()->createTexture(desc, budgeted, pmap->addr(),
+ pmap->rowBytes());
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrInstallBitmapUniqueKeyInvalidator(const GrUniqueKey& key, SkPixelRef* pixelRef) {
+ class Invalidator : public SkPixelRef::GenIDChangeListener {
+ public:
+ explicit Invalidator(const GrUniqueKey& key) : fMsg(key) {}
+ private:
+ GrUniqueKeyInvalidatedMessage fMsg;
+
+ void onChange() override { SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(fMsg); }
+ };
+
+ pixelRef->addGenIDChangeListener(new Invalidator(key));
+}
+
+GrTexture* GrGenerateMipMapsAndUploadToTexture(GrContext* ctx, const SkBitmap& bitmap,
+ SkSourceGammaTreatment gammaTreatment)
+{
+ GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(bitmap.info(), *ctx->caps());
+ if (kIndex_8_SkColorType != bitmap.colorType() && !bitmap.readyToDraw()) {
+ GrTexture* texture = load_etc1_texture(ctx, bitmap, desc);
+ if (texture) {
+ return texture;
+ }
+ }
+
+ sk_sp<GrTexture> texture(create_texture_from_yuv(ctx, bitmap, desc));
+ if (texture) {
+ return texture.release();
+ }
+
+ // We don't support Gray8 directly in the GL backend, so fail-over to GrUploadBitmapToTexture.
+ // That will transform the Gray8 to 8888, then use the driver/GPU to build mipmaps. If we build
+ // the mips on the CPU here, they'll all be Gray8, which isn't useful. (They get treated as A8).
+ // TODO: A better option might be to transform the initial bitmap here to 8888, then run the
+ // CPU mip-mapper on that data before uploading. This is much less code for a rare case though:
+ if (kGray_8_SkColorType == bitmap.colorType()) {
+ return nullptr;
+ }
+
+ SkASSERT(sizeof(int) <= sizeof(uint32_t));
+ if (bitmap.width() < 0 || bitmap.height() < 0) {
+ return nullptr;
+ }
+
+ SkAutoPixmapUnlock srcUnlocker;
+ if (!bitmap.requestLock(&srcUnlocker)) {
+ return nullptr;
+ }
+ const SkPixmap& pixmap = srcUnlocker.pixmap();
+ // Try to catch where we might have returned nullptr for src crbug.com/492818
+ if (nullptr == pixmap.addr()) {
+ sk_throw();
+ }
+
+ SkAutoTDelete<SkMipMap> mipmaps(SkMipMap::Build(pixmap, gammaTreatment, nullptr));
+ if (!mipmaps) {
+ return nullptr;
+ }
+
+ const int mipLevelCount = mipmaps->countLevels() + 1;
+ if (mipLevelCount < 1) {
+ return nullptr;
+ }
+
+ const bool isMipMapped = mipLevelCount > 1;
+ desc.fIsMipMapped = isMipMapped;
+
+ SkAutoTDeleteArray<GrMipLevel> texels(new GrMipLevel[mipLevelCount]);
+
+ texels[0].fPixels = pixmap.addr();
+ texels[0].fRowBytes = pixmap.rowBytes();
+
+ for (int i = 1; i < mipLevelCount; ++i) {
+ SkMipMap::Level generatedMipLevel;
+ mipmaps->getLevel(i - 1, &generatedMipLevel);
+ texels[i].fPixels = generatedMipLevel.fPixmap.addr();
+ texels[i].fRowBytes = generatedMipLevel.fPixmap.rowBytes();
+ }
+
+ {
+ GrTexture* texture = ctx->textureProvider()->createMipMappedTexture(desc,
+ SkBudgeted::kYes,
+ texels.get(),
+ mipLevelCount);
+ if (texture) {
+ texture->texturePriv().setGammaTreatment(gammaTreatment);
+ }
+ return texture;
+ }
+}
+
+GrTexture* GrUploadMipMapToTexture(GrContext* ctx, const SkImageInfo& info,
+ const GrMipLevel* texels, int mipLevelCount) {
+ const GrCaps* caps = ctx->caps();
+ return ctx->textureProvider()->createMipMappedTexture(GrImageInfoToSurfaceDesc(info, *caps),
+ SkBudgeted::kYes, texels,
+ mipLevelCount);
+}
+
+GrTexture* GrRefCachedBitmapTexture(GrContext* ctx, const SkBitmap& bitmap,
+ const GrTextureParams& params,
+ SkSourceGammaTreatment gammaTreatment) {
+ return GrBitmapTextureMaker(ctx, bitmap).refTextureForParams(params, gammaTreatment);
+}
+
+sk_sp<GrTexture> GrMakeCachedBitmapTexture(GrContext* ctx, const SkBitmap& bitmap,
+ const GrTextureParams& params,
+ SkSourceGammaTreatment gammaTreatment) {
+ GrTexture* tex = GrBitmapTextureMaker(ctx, bitmap).refTextureForParams(params, gammaTreatment);
+ return sk_sp<GrTexture>(tex);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrColor4f SkColorToPremulGrColor4f(SkColor c, bool gammaCorrect, GrColorSpaceXform* gamutXform) {
+ // We want to premultiply after linearizing, so this is easy:
+ return SkColorToUnpremulGrColor4f(c, gammaCorrect, gamutXform).premul();
+}
+
+GrColor4f SkColorToUnpremulGrColor4f(SkColor c, bool gammaCorrect, GrColorSpaceXform* gamutXform) {
+ // You can't be color-space aware in legacy mode
+ SkASSERT(gammaCorrect || !gamutXform);
+
+ GrColor4f color;
+ if (gammaCorrect) {
+ // SkColor4f::FromColor does sRGB -> Linear
+ color = GrColor4f::FromSkColor4f(SkColor4f::FromColor(c));
+ } else {
+ // GrColor4f::FromGrColor just multiplies by 1/255
+ color = GrColor4f::FromGrColor(SkColorToUnpremulGrColor(c));
+ }
+
+ if (gamutXform) {
+ color = gamutXform->apply(color);
+ }
+
+ return color;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// alphatype is ignore for now, but if GrPixelConfig is expanded to encompass
+// alpha info, that will be considered.
+GrPixelConfig SkImageInfo2GrPixelConfig(SkColorType ct, SkAlphaType, const SkColorSpace* cs,
+ const GrCaps& caps) {
+ // We intentionally ignore profile type for non-8888 formats. Anything we can't support
+ // in hardware will be expanded to sRGB 8888 in GrUploadPixmapToTexture.
+ switch (ct) {
+ case kUnknown_SkColorType:
+ return kUnknown_GrPixelConfig;
+ case kAlpha_8_SkColorType:
+ return kAlpha_8_GrPixelConfig;
+ case kRGB_565_SkColorType:
+ return kRGB_565_GrPixelConfig;
+ case kARGB_4444_SkColorType:
+ return kRGBA_4444_GrPixelConfig;
+ case kRGBA_8888_SkColorType:
+ return (caps.srgbSupport() && cs && cs->gammaCloseToSRGB())
+ ? kSRGBA_8888_GrPixelConfig : kRGBA_8888_GrPixelConfig;
+ case kBGRA_8888_SkColorType:
+ return (caps.srgbSupport() && cs && cs->gammaCloseToSRGB())
+ ? kSBGRA_8888_GrPixelConfig : kBGRA_8888_GrPixelConfig;
+ case kIndex_8_SkColorType:
+ return kIndex_8_GrPixelConfig;
+ case kGray_8_SkColorType:
+ return kAlpha_8_GrPixelConfig; // TODO: gray8 support on gpu
+ case kRGBA_F16_SkColorType:
+ return kRGBA_half_GrPixelConfig;
+ }
+ SkASSERT(0); // shouldn't get here
+ return kUnknown_GrPixelConfig;
+}
+
+bool GrPixelConfigToColorType(GrPixelConfig config, SkColorType* ctOut) {
+ SkColorType ct;
+ switch (config) {
+ case kAlpha_8_GrPixelConfig:
+ ct = kAlpha_8_SkColorType;
+ break;
+ case kIndex_8_GrPixelConfig:
+ ct = kIndex_8_SkColorType;
+ break;
+ case kRGB_565_GrPixelConfig:
+ ct = kRGB_565_SkColorType;
+ break;
+ case kRGBA_4444_GrPixelConfig:
+ ct = kARGB_4444_SkColorType;
+ break;
+ case kRGBA_8888_GrPixelConfig:
+ ct = kRGBA_8888_SkColorType;
+ break;
+ case kBGRA_8888_GrPixelConfig:
+ ct = kBGRA_8888_SkColorType;
+ break;
+ case kSRGBA_8888_GrPixelConfig:
+ ct = kRGBA_8888_SkColorType;
+ break;
+ case kSBGRA_8888_GrPixelConfig:
+ ct = kBGRA_8888_SkColorType;
+ break;
+ case kRGBA_half_GrPixelConfig:
+ ct = kRGBA_F16_SkColorType;
+ break;
+ default:
+ return false;
+ }
+ if (ctOut) {
+ *ctOut = ct;
+ }
+ return true;
+}
+
+GrPixelConfig GrRenderableConfigForColorSpace(const SkColorSpace* colorSpace) {
+ if (!colorSpace) {
+ return kRGBA_8888_GrPixelConfig;
+ } else if (colorSpace->gammaIsLinear()) {
+ return kRGBA_half_GrPixelConfig;
+ } else if (colorSpace->gammaCloseToSRGB()) {
+ return kSRGBA_8888_GrPixelConfig;
+ } else {
+ SkDEBUGFAIL("No renderable config exists for color space with strange gamma");
+ return kUnknown_GrPixelConfig;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+static inline bool blend_requires_shader(const SkXfermode::Mode mode, bool primitiveIsSrc) {
+ if (primitiveIsSrc) {
+ return SkXfermode::kSrc_Mode != mode;
+ } else {
+ return SkXfermode::kDst_Mode != mode;
+ }
+}
+
+static inline bool skpaint_to_grpaint_impl(GrContext* context,
+ GrDrawContext* dc,
+ const SkPaint& skPaint,
+ const SkMatrix& viewM,
+ sk_sp<GrFragmentProcessor>* shaderProcessor,
+ SkXfermode::Mode* primColorMode,
+ bool primitiveIsSrc,
+ GrPaint* grPaint) {
+ grPaint->setAntiAlias(skPaint.isAntiAlias());
+ grPaint->setAllowSRGBInputs(dc->isGammaCorrect());
+
+ // Convert SkPaint color to 4f format, including optional linearizing and gamut conversion.
+ GrColor4f origColor = SkColorToUnpremulGrColor4f(skPaint.getColor(), dc->isGammaCorrect(),
+ dc->getColorXformFromSRGB());
+
+ // Setup the initial color considering the shader, the SkPaint color, and the presence or not
+ // of per-vertex colors.
+ sk_sp<GrFragmentProcessor> shaderFP;
+ if (!primColorMode || blend_requires_shader(*primColorMode, primitiveIsSrc)) {
+ if (shaderProcessor) {
+ shaderFP = *shaderProcessor;
+ } else if (const SkShader* shader = skPaint.getShader()) {
+ shaderFP = shader->asFragmentProcessor(SkShader::AsFPArgs(context, &viewM, nullptr,
+ skPaint.getFilterQuality(),
+ dc->getColorSpace(),
+ dc->sourceGammaTreatment()));
+ if (!shaderFP) {
+ return false;
+ }
+ }
+ }
+
+ // Set this in below cases if the output of the shader/paint-color/paint-alpha/primXfermode is
+ // a known constant value. In that case we can simply apply a color filter during this
+ // conversion without converting the color filter to a GrFragmentProcessor.
+ bool applyColorFilterToPaintColor = false;
+ if (shaderFP) {
+ if (primColorMode) {
+ // There is a blend between the primitive color and the shader color. The shader sees
+ // the opaque paint color. The shader's output is blended using the provided mode by
+ // the primitive color. The blended color is then modulated by the paint's alpha.
+
+ // The geometry processor will insert the primitive color to start the color chain, so
+ // the GrPaint color will be ignored.
+
+ GrColor4f shaderInput = origColor.opaque();
+ shaderFP = GrFragmentProcessor::OverrideInput(shaderFP, shaderInput);
+ if (primitiveIsSrc) {
+ shaderFP = GrXfermodeFragmentProcessor::MakeFromDstProcessor(std::move(shaderFP),
+ *primColorMode);
+ } else {
+ shaderFP = GrXfermodeFragmentProcessor::MakeFromSrcProcessor(std::move(shaderFP),
+ *primColorMode);
+ }
+ // The above may return null if compose results in a pass through of the prim color.
+ if (shaderFP) {
+ grPaint->addColorFragmentProcessor(shaderFP);
+ }
+
+ // We can ignore origColor here - alpha is unchanged by gamma
+ GrColor paintAlpha = SkColorAlphaToGrColor(skPaint.getColor());
+ if (GrColor_WHITE != paintAlpha) {
+ grPaint->addColorFragmentProcessor(GrConstColorProcessor::Make(
+ paintAlpha, GrConstColorProcessor::kModulateRGBA_InputMode));
+ }
+ } else {
+ // The shader's FP sees the paint unpremul color
+ grPaint->setColor4f(origColor);
+ grPaint->addColorFragmentProcessor(std::move(shaderFP));
+ }
+ } else {
+ if (primColorMode) {
+ // There is a blend between the primitive color and the paint color. The blend considers
+ // the opaque paint color. The paint's alpha is applied to the post-blended color.
+ // SRGBTODO: Preserve 4f on this code path
+ sk_sp<GrFragmentProcessor> processor(
+ GrConstColorProcessor::Make(origColor.opaque().toGrColor(),
+ GrConstColorProcessor::kIgnore_InputMode));
+ if (primitiveIsSrc) {
+ processor = GrXfermodeFragmentProcessor::MakeFromDstProcessor(std::move(processor),
+ *primColorMode);
+ } else {
+ processor = GrXfermodeFragmentProcessor::MakeFromSrcProcessor(std::move(processor),
+ *primColorMode);
+ }
+ if (processor) {
+ grPaint->addColorFragmentProcessor(std::move(processor));
+ }
+
+ grPaint->setColor4f(origColor.opaque());
+
+ // We can ignore origColor here - alpha is unchanged by gamma
+ GrColor paintAlpha = SkColorAlphaToGrColor(skPaint.getColor());
+ if (GrColor_WHITE != paintAlpha) {
+ grPaint->addColorFragmentProcessor(GrConstColorProcessor::Make(
+ paintAlpha, GrConstColorProcessor::kModulateRGBA_InputMode));
+ }
+ } else {
+ // No shader, no primitive color.
+ grPaint->setColor4f(origColor.premul());
+ applyColorFilterToPaintColor = true;
+ }
+ }
+
+ SkColorFilter* colorFilter = skPaint.getColorFilter();
+ if (colorFilter) {
+ if (applyColorFilterToPaintColor) {
+ grPaint->setColor4f(GrColor4f::FromSkColor4f(
+ colorFilter->filterColor4f(origColor.toSkColor4f())).premul());
+ } else {
+ sk_sp<GrFragmentProcessor> cfFP(colorFilter->asFragmentProcessor(context));
+ if (cfFP) {
+ grPaint->addColorFragmentProcessor(std::move(cfFP));
+ } else {
+ return false;
+ }
+ }
+ }
+
+ // When the xfermode is null on the SkPaint (meaning kSrcOver) we need the XPFactory field on
+ // the GrPaint to also be null (also kSrcOver).
+ SkASSERT(!grPaint->getXPFactory());
+ if (!skPaint.isSrcOver()) {
+ grPaint->setXPFactory(SkBlendMode_AsXPFactory(skPaint.getBlendMode()));
+ }
+
+#ifndef SK_IGNORE_GPU_DITHER
+ if (skPaint.isDither() && grPaint->numColorFragmentProcessors() > 0 && !dc->isGammaCorrect()) {
+ grPaint->addColorFragmentProcessor(GrDitherEffect::Make());
+ }
+#endif
+ return true;
+}
+
+bool SkPaintToGrPaint(GrContext* context, GrDrawContext* dc, const SkPaint& skPaint,
+ const SkMatrix& viewM, GrPaint* grPaint) {
+ return skpaint_to_grpaint_impl(context, dc, skPaint, viewM, nullptr, nullptr, false, grPaint);
+}
+
+/** Replaces the SkShader (if any) on skPaint with the passed in GrFragmentProcessor. */
+bool SkPaintToGrPaintReplaceShader(GrContext* context,
+ GrDrawContext* dc,
+ const SkPaint& skPaint,
+ sk_sp<GrFragmentProcessor> shaderFP,
+ GrPaint* grPaint) {
+ if (!shaderFP) {
+ return false;
+ }
+ return skpaint_to_grpaint_impl(context, dc, skPaint, SkMatrix::I(), &shaderFP, nullptr, false,
+ grPaint);
+}
+
+/** Ignores the SkShader (if any) on skPaint. */
+bool SkPaintToGrPaintNoShader(GrContext* context,
+ GrDrawContext* dc,
+ const SkPaint& skPaint,
+ GrPaint* grPaint) {
+ // Use a ptr to a nullptr to to indicate that the SkShader is ignored and not replaced.
+ static sk_sp<GrFragmentProcessor> kNullShaderFP(nullptr);
+ static sk_sp<GrFragmentProcessor>* kIgnoreShader = &kNullShaderFP;
+ return skpaint_to_grpaint_impl(context, dc, skPaint, SkMatrix::I(), kIgnoreShader, nullptr,
+ false, grPaint);
+}
+
+/** Blends the SkPaint's shader (or color if no shader) with a per-primitive color which must
+be setup as a vertex attribute using the specified SkXfermode::Mode. */
+bool SkPaintToGrPaintWithXfermode(GrContext* context,
+ GrDrawContext* dc,
+ const SkPaint& skPaint,
+ const SkMatrix& viewM,
+ SkXfermode::Mode primColorMode,
+ bool primitiveIsSrc,
+ GrPaint* grPaint) {
+ return skpaint_to_grpaint_impl(context, dc, skPaint, viewM, nullptr, &primColorMode,
+ primitiveIsSrc, grPaint);
+}
+
+bool SkPaintToGrPaintWithTexture(GrContext* context,
+ GrDrawContext* dc,
+ const SkPaint& paint,
+ const SkMatrix& viewM,
+ sk_sp<GrFragmentProcessor> fp,
+ bool textureIsAlphaOnly,
+ GrPaint* grPaint) {
+ sk_sp<GrFragmentProcessor> shaderFP;
+ if (textureIsAlphaOnly) {
+ if (const SkShader* shader = paint.getShader()) {
+ shaderFP = shader->asFragmentProcessor(SkShader::AsFPArgs(context,
+ &viewM,
+ nullptr,
+ paint.getFilterQuality(),
+ dc->getColorSpace(),
+ dc->sourceGammaTreatment()));
+ if (!shaderFP) {
+ return false;
+ }
+ sk_sp<GrFragmentProcessor> fpSeries[] = { std::move(shaderFP), std::move(fp) };
+ shaderFP = GrFragmentProcessor::RunInSeries(fpSeries, 2);
+ } else {
+ shaderFP = GrFragmentProcessor::MulOutputByInputUnpremulColor(fp);
+ }
+ } else {
+ shaderFP = GrFragmentProcessor::MulOutputByInputAlpha(fp);
+ }
+
+ return SkPaintToGrPaintReplaceShader(context, dc, paint, std::move(shaderFP), grPaint);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+GrTextureParams::FilterMode GrSkFilterQualityToGrFilterMode(SkFilterQuality paintFilterQuality,
+ const SkMatrix& viewM,
+ const SkMatrix& localM,
+ bool* doBicubic) {
+ *doBicubic = false;
+ GrTextureParams::FilterMode textureFilterMode;
+ switch (paintFilterQuality) {
+ case kNone_SkFilterQuality:
+ textureFilterMode = GrTextureParams::kNone_FilterMode;
+ break;
+ case kLow_SkFilterQuality:
+ textureFilterMode = GrTextureParams::kBilerp_FilterMode;
+ break;
+ case kMedium_SkFilterQuality: {
+ SkMatrix matrix;
+ matrix.setConcat(viewM, localM);
+ if (matrix.getMinScale() < SK_Scalar1) {
+ textureFilterMode = GrTextureParams::kMipMap_FilterMode;
+ } else {
+ // Don't trigger MIP level generation unnecessarily.
+ textureFilterMode = GrTextureParams::kBilerp_FilterMode;
+ }
+ break;
+ }
+ case kHigh_SkFilterQuality: {
+ SkMatrix matrix;
+ matrix.setConcat(viewM, localM);
+ *doBicubic = GrBicubicEffect::ShouldUseBicubic(matrix, &textureFilterMode);
+ break;
+ }
+ default:
+ SkErrorInternals::SetError( kInvalidPaint_SkError,
+ "Sorry, I don't understand the filtering "
+ "mode you asked for. Falling back to "
+ "MIPMaps.");
+ textureFilterMode = GrTextureParams::kMipMap_FilterMode;
+ break;
+
+ }
+ return textureFilterMode;
+}
diff --git a/gfx/skia/skia/src/gpu/SkGrPriv.h b/gfx/skia/skia/src/gpu/SkGrPriv.h
new file mode 100644
index 000000000..b658389ba
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/SkGrPriv.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGrPriv_DEFINED
+#define SkGrPriv_DEFINED
+
+#include "GrTypes.h"
+#include "GrBlend.h"
+#include "SkImageInfo.h"
+#include "SkMatrix.h"
+#include "SkXfermode.h"
+
+class GrCaps;
+class GrContext;
+class GrDrawContext;
+class GrFragmentProcessor;
+class GrPaint;
+class GrTexture;
+class GrTextureParams;
+class GrUniqueKey;
+class SkData;
+class SkPaint;
+class SkPixelRef;
+struct SkIRect;
+
+/**
+ * Our key includes the offset, width, and height so that bitmaps created by extractSubset()
+ * are unique.
+ *
+ * The imageID is in the shared namespace (see SkNextID::ImageID())
+ * - SkBitmap/SkPixelRef
+ * - SkImage
+ * - SkImageGenerator
+ *
+ * Note: width/height must fit in 16bits for this impl.
+ */
+void GrMakeKeyFromImageID(GrUniqueKey* key, uint32_t imageID, const SkIRect& imageBounds);
+
+/** Call this after installing a GrUniqueKey on texture. It will cause the texture's key to be
+ removed should the bitmap's contents change or be destroyed. */
+void GrInstallBitmapUniqueKeyInvalidator(const GrUniqueKey& key, SkPixelRef* pixelRef);
+
+/** Converts an SkPaint to a GrPaint for a given GrContext. The matrix is required in order
+ to convert the SkShader (if any) on the SkPaint. The primitive itself has no color. */
+bool SkPaintToGrPaint(GrContext*,
+ GrDrawContext*,
+ const SkPaint& skPaint,
+ const SkMatrix& viewM,
+ GrPaint* grPaint);
+
+/** Same as above but ignores the SkShader (if any) on skPaint. */
+bool SkPaintToGrPaintNoShader(GrContext* context,
+ GrDrawContext* dc,
+ const SkPaint& skPaint,
+ GrPaint* grPaint);
+
+/** Replaces the SkShader (if any) on skPaint with the passed in GrFragmentProcessor. The processor
+ should expect an unpremul input color and produce a premultiplied output color. There is
+ no primitive color. */
+bool SkPaintToGrPaintReplaceShader(GrContext*,
+ GrDrawContext*,
+ const SkPaint& skPaint,
+ sk_sp<GrFragmentProcessor> shaderFP,
+ GrPaint* grPaint);
+
+/** Blends the SkPaint's shader (or color if no shader) with the color which specified via a
+ GrBatch's GrPrimitiveProcesssor. Currently there is a bool param to indicate whether the
+ primitive color is the dst or src color to the blend in order to work around differences between
+ drawVertices and drawAtlas. */
+bool SkPaintToGrPaintWithXfermode(GrContext* context,
+ GrDrawContext* dc,
+ const SkPaint& skPaint,
+ const SkMatrix& viewM,
+ SkXfermode::Mode primColorMode,
+ bool primitiveIsSrc,
+ GrPaint* grPaint);
+
+/** This is used when there is a primitive color, but the shader should be ignored. Currently,
+ the expectation is that the primitive color will be premultiplied, though it really should be
+ unpremultiplied so that interpolation is done in unpremul space. The paint's alpha will be
+ applied to the primitive color after interpolation. */
+inline bool SkPaintToGrPaintWithPrimitiveColor(GrContext* context, GrDrawContext* dc,
+ const SkPaint& skPaint, GrPaint* grPaint) {
+ return SkPaintToGrPaintWithXfermode(context, dc, skPaint, SkMatrix::I(), SkXfermode::kDst_Mode,
+ false, grPaint);
+}
+
+/** This is used when there may or may not be a shader, and the caller wants to plugin a texture
+ lookup. If there is a shader, then its output will only be used if the texture is alpha8. */
+bool SkPaintToGrPaintWithTexture(GrContext* context,
+ GrDrawContext* dc,
+ const SkPaint& paint,
+ const SkMatrix& viewM,
+ sk_sp<GrFragmentProcessor> fp,
+ bool textureIsAlphaOnly,
+ GrPaint* grPaint);
+
+//////////////////////////////////////////////////////////////////////////////
+
+GrSurfaceDesc GrImageInfoToSurfaceDesc(const SkImageInfo&, const GrCaps&);
+
+bool GrPixelConfigToColorType(GrPixelConfig, SkColorType*);
+
+/** When image filter code needs to construct a draw context to do intermediate rendering, we need
+ a renderable pixel config. The source (SkSpecialImage) may not be in a renderable format, but
+ we want to preserve the color space of that source. This picks an appropriate format to use. */
+GrPixelConfig GrRenderableConfigForColorSpace(const SkColorSpace*);
+
+/**
+ * If the compressed data in the SkData is supported (as a texture format, this returns
+ * the pixel-config that should be used, and sets outStartOfDataToUpload to the ptr into
+ * the data where the actual raw data starts (skipping any header bytes).
+ *
+ * If the compressed data is not supported, this returns kUnknown_GrPixelConfig, and
+ * ignores outStartOfDataToUpload.
+ */
+GrPixelConfig GrIsCompressedTextureDataSupported(GrContext* ctx, SkData* data,
+ int expectedW, int expectedH,
+ const void** outStartOfDataToUpload);
+
+
+/**
+ * Creates a new texture for the bitmap. Does not concern itself with cache keys or texture params.
+ * The bitmap must have CPU-accessible pixels. Attempts to take advantage of faster paths for
+ * compressed textures and yuv planes.
+ */
+GrTexture* GrUploadBitmapToTexture(GrContext*, const SkBitmap&);
+
+GrTexture* GrGenerateMipMapsAndUploadToTexture(GrContext*, const SkBitmap&, SkSourceGammaTreatment);
+
+/**
+ * Creates a new texture for the pixmap.
+ */
+GrTexture* GrUploadPixmapToTexture(GrContext*, const SkPixmap&, SkBudgeted budgeted);
+
+/**
+ * Creates a new texture populated with the mipmap levels.
+ */
+GrTexture* GrUploadMipMapToTexture(GrContext*, const SkImageInfo&, const GrMipLevel* texels,
+ int mipLevelCount);
+
+//////////////////////////////////////////////////////////////////////////////
+
+GR_STATIC_ASSERT((int)kZero_GrBlendCoeff == (int)SkXfermode::kZero_Coeff);
+GR_STATIC_ASSERT((int)kOne_GrBlendCoeff == (int)SkXfermode::kOne_Coeff);
+GR_STATIC_ASSERT((int)kSC_GrBlendCoeff == (int)SkXfermode::kSC_Coeff);
+GR_STATIC_ASSERT((int)kISC_GrBlendCoeff == (int)SkXfermode::kISC_Coeff);
+GR_STATIC_ASSERT((int)kDC_GrBlendCoeff == (int)SkXfermode::kDC_Coeff);
+GR_STATIC_ASSERT((int)kIDC_GrBlendCoeff == (int)SkXfermode::kIDC_Coeff);
+GR_STATIC_ASSERT((int)kSA_GrBlendCoeff == (int)SkXfermode::kSA_Coeff);
+GR_STATIC_ASSERT((int)kISA_GrBlendCoeff == (int)SkXfermode::kISA_Coeff);
+GR_STATIC_ASSERT((int)kDA_GrBlendCoeff == (int)SkXfermode::kDA_Coeff);
+GR_STATIC_ASSERT((int)kIDA_GrBlendCoeff == (int)SkXfermode::kIDA_Coeff);
+GR_STATIC_ASSERT(SkXfermode::kCoeffCount == 10);
+
+#define SkXfermodeCoeffToGrBlendCoeff(X) ((GrBlendCoeff)(X))
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrAAConvexPathRenderer.cpp b/gfx/skia/skia/src/gpu/batches/GrAAConvexPathRenderer.cpp
new file mode 100644
index 000000000..c71f46de6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrAAConvexPathRenderer.cpp
@@ -0,0 +1,1019 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrAAConvexPathRenderer.h"
+
+#include "GrAAConvexTessellator.h"
+#include "GrBatchFlushState.h"
+#include "GrBatchTest.h"
+#include "GrCaps.h"
+#include "GrContext.h"
+#include "GrDefaultGeoProcFactory.h"
+#include "GrGeometryProcessor.h"
+#include "GrInvariantOutput.h"
+#include "GrPathUtils.h"
+#include "GrProcessor.h"
+#include "GrPipelineBuilder.h"
+#include "SkGeometry.h"
+#include "SkPathPriv.h"
+#include "SkString.h"
+#include "SkTraceEvent.h"
+#include "batches/GrVertexBatch.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLGeometryProcessor.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "glsl/GrGLSLVarying.h"
+#include "glsl/GrGLSLVertexShaderBuilder.h"
+
+GrAAConvexPathRenderer::GrAAConvexPathRenderer() {
+}
+
+struct Segment {
+ enum {
+ // These enum values are assumed in member functions below.
+ kLine = 0,
+ kQuad = 1,
+ } fType;
+
+ // line uses one pt, quad uses 2 pts
+ SkPoint fPts[2];
+ // normal to edge ending at each pt
+ SkVector fNorms[2];
+ // is the corner where the previous segment meets this segment
+ // sharp. If so, fMid is a normalized bisector facing outward.
+ SkVector fMid;
+
+ int countPoints() {
+ GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
+ return fType + 1;
+ }
+ const SkPoint& endPt() const {
+ GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
+ return fPts[fType];
+ }
+ const SkPoint& endNorm() const {
+ GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
+ return fNorms[fType];
+ }
+};
+
+typedef SkTArray<Segment, true> SegmentArray;
+
+static void center_of_mass(const SegmentArray& segments, SkPoint* c) {
+ SkScalar area = 0;
+ SkPoint center = {0, 0};
+ int count = segments.count();
+ SkPoint p0 = {0, 0};
+ if (count > 2) {
+ // We translate the polygon so that the first point is at the origin.
+ // This avoids some precision issues with small area polygons far away
+ // from the origin.
+ p0 = segments[0].endPt();
+ SkPoint pi;
+ SkPoint pj;
+ // the first and last iteration of the below loop would compute
+ // zeros since the starting / ending point is (0,0). So instead we start
+ // at i=1 and make the last iteration i=count-2.
+ pj = segments[1].endPt() - p0;
+ for (int i = 1; i < count - 1; ++i) {
+ pi = pj;
+ pj = segments[i + 1].endPt() - p0;
+
+ SkScalar t = SkPoint::CrossProduct(pi, pj);
+ area += t;
+ center.fX += (pi.fX + pj.fX) * t;
+ center.fY += (pi.fY + pj.fY) * t;
+ }
+ }
+
+ // If the poly has no area then we instead return the average of
+ // its points.
+ if (SkScalarNearlyZero(area)) {
+ SkPoint avg;
+ avg.set(0, 0);
+ for (int i = 0; i < count; ++i) {
+ const SkPoint& pt = segments[i].endPt();
+ avg.fX += pt.fX;
+ avg.fY += pt.fY;
+ }
+ SkScalar denom = SK_Scalar1 / count;
+ avg.scale(denom);
+ *c = avg;
+ } else {
+ area *= 3;
+ area = SkScalarInvert(area);
+ center.scale(area);
+ // undo the translate of p0 to the origin.
+ *c = center + p0;
+ }
+ SkASSERT(!SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY));
+}
+
+static void compute_vectors(SegmentArray* segments,
+ SkPoint* fanPt,
+ SkPathPriv::FirstDirection dir,
+ int* vCount,
+ int* iCount) {
+ center_of_mass(*segments, fanPt);
+ int count = segments->count();
+
+ // Make the normals point towards the outside
+ SkPoint::Side normSide;
+ if (dir == SkPathPriv::kCCW_FirstDirection) {
+ normSide = SkPoint::kRight_Side;
+ } else {
+ normSide = SkPoint::kLeft_Side;
+ }
+
+ *vCount = 0;
+ *iCount = 0;
+ // compute normals at all points
+ for (int a = 0; a < count; ++a) {
+ Segment& sega = (*segments)[a];
+ int b = (a + 1) % count;
+ Segment& segb = (*segments)[b];
+
+ const SkPoint* prevPt = &sega.endPt();
+ int n = segb.countPoints();
+ for (int p = 0; p < n; ++p) {
+ segb.fNorms[p] = segb.fPts[p] - *prevPt;
+ segb.fNorms[p].normalize();
+ segb.fNorms[p].setOrthog(segb.fNorms[p], normSide);
+ prevPt = &segb.fPts[p];
+ }
+ if (Segment::kLine == segb.fType) {
+ *vCount += 5;
+ *iCount += 9;
+ } else {
+ *vCount += 6;
+ *iCount += 12;
+ }
+ }
+
+ // compute mid-vectors where segments meet. TODO: Detect shallow corners
+ // and leave out the wedges and close gaps by stitching segments together.
+ for (int a = 0; a < count; ++a) {
+ const Segment& sega = (*segments)[a];
+ int b = (a + 1) % count;
+ Segment& segb = (*segments)[b];
+ segb.fMid = segb.fNorms[0] + sega.endNorm();
+ segb.fMid.normalize();
+ // corner wedges
+ *vCount += 4;
+ *iCount += 6;
+ }
+}
+
+struct DegenerateTestData {
+ DegenerateTestData() { fStage = kInitial; }
+ bool isDegenerate() const { return kNonDegenerate != fStage; }
+ enum {
+ kInitial,
+ kPoint,
+ kLine,
+ kNonDegenerate
+ } fStage;
+ SkPoint fFirstPoint;
+ SkVector fLineNormal;
+ SkScalar fLineC;
+};
+
+static const SkScalar kClose = (SK_Scalar1 / 16);
+static const SkScalar kCloseSqd = SkScalarMul(kClose, kClose);
+
+static void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) {
+ switch (data->fStage) {
+ case DegenerateTestData::kInitial:
+ data->fFirstPoint = pt;
+ data->fStage = DegenerateTestData::kPoint;
+ break;
+ case DegenerateTestData::kPoint:
+ if (pt.distanceToSqd(data->fFirstPoint) > kCloseSqd) {
+ data->fLineNormal = pt - data->fFirstPoint;
+ data->fLineNormal.normalize();
+ data->fLineNormal.setOrthog(data->fLineNormal);
+ data->fLineC = -data->fLineNormal.dot(data->fFirstPoint);
+ data->fStage = DegenerateTestData::kLine;
+ }
+ break;
+ case DegenerateTestData::kLine:
+ if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) {
+ data->fStage = DegenerateTestData::kNonDegenerate;
+ }
+ case DegenerateTestData::kNonDegenerate:
+ break;
+ default:
+ SkFAIL("Unexpected degenerate test stage.");
+ }
+}
+
+static inline bool get_direction(const SkPath& path, const SkMatrix& m,
+ SkPathPriv::FirstDirection* dir) {
+ if (!SkPathPriv::CheapComputeFirstDirection(path, dir)) {
+ return false;
+ }
+ // check whether m reverses the orientation
+ SkASSERT(!m.hasPerspective());
+ SkScalar det2x2 = SkScalarMul(m.get(SkMatrix::kMScaleX), m.get(SkMatrix::kMScaleY)) -
+ SkScalarMul(m.get(SkMatrix::kMSkewX), m.get(SkMatrix::kMSkewY));
+ if (det2x2 < 0) {
+ *dir = SkPathPriv::OppositeFirstDirection(*dir);
+ }
+ return true;
+}
+
+static inline void add_line_to_segment(const SkPoint& pt,
+ SegmentArray* segments) {
+ segments->push_back();
+ segments->back().fType = Segment::kLine;
+ segments->back().fPts[0] = pt;
+}
+
+static inline void add_quad_segment(const SkPoint pts[3],
+ SegmentArray* segments) {
+ if (pts[0].distanceToSqd(pts[1]) < kCloseSqd || pts[1].distanceToSqd(pts[2]) < kCloseSqd) {
+ if (pts[0] != pts[2]) {
+ add_line_to_segment(pts[2], segments);
+ }
+ } else {
+ segments->push_back();
+ segments->back().fType = Segment::kQuad;
+ segments->back().fPts[0] = pts[1];
+ segments->back().fPts[1] = pts[2];
+ }
+}
+
+static inline void add_cubic_segments(const SkPoint pts[4],
+ SkPathPriv::FirstDirection dir,
+ SegmentArray* segments) {
+ SkSTArray<15, SkPoint, true> quads;
+ GrPathUtils::convertCubicToQuadsConstrainToTangents(pts, SK_Scalar1, dir, &quads);
+ int count = quads.count();
+ for (int q = 0; q < count; q += 3) {
+ add_quad_segment(&quads[q], segments);
+ }
+}
+
+static bool get_segments(const SkPath& path,
+ const SkMatrix& m,
+ SegmentArray* segments,
+ SkPoint* fanPt,
+ int* vCount,
+ int* iCount) {
+ SkPath::Iter iter(path, true);
+ // This renderer over-emphasizes very thin path regions. We use the distance
+ // to the path from the sample to compute coverage. Every pixel intersected
+ // by the path will be hit and the maximum distance is sqrt(2)/2. We don't
+ // notice that the sample may be close to a very thin area of the path and
+ // thus should be very light. This is particularly egregious for degenerate
+ // line paths. We detect paths that are very close to a line (zero area) and
+ // draw nothing.
+ DegenerateTestData degenerateData;
+ SkPathPriv::FirstDirection dir;
+ // get_direction can fail for some degenerate paths.
+ if (!get_direction(path, m, &dir)) {
+ return false;
+ }
+
+ for (;;) {
+ SkPoint pts[4];
+ SkPath::Verb verb = iter.next(pts);
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ m.mapPoints(pts, 1);
+ update_degenerate_test(&degenerateData, pts[0]);
+ break;
+ case SkPath::kLine_Verb: {
+ m.mapPoints(&pts[1], 1);
+ update_degenerate_test(&degenerateData, pts[1]);
+ add_line_to_segment(pts[1], segments);
+ break;
+ }
+ case SkPath::kQuad_Verb:
+ m.mapPoints(pts, 3);
+ update_degenerate_test(&degenerateData, pts[1]);
+ update_degenerate_test(&degenerateData, pts[2]);
+ add_quad_segment(pts, segments);
+ break;
+ case SkPath::kConic_Verb: {
+ m.mapPoints(pts, 3);
+ SkScalar weight = iter.conicWeight();
+ SkAutoConicToQuads converter;
+ const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.5f);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ update_degenerate_test(&degenerateData, quadPts[2*i + 1]);
+ update_degenerate_test(&degenerateData, quadPts[2*i + 2]);
+ add_quad_segment(quadPts + 2*i, segments);
+ }
+ break;
+ }
+ case SkPath::kCubic_Verb: {
+ m.mapPoints(pts, 4);
+ update_degenerate_test(&degenerateData, pts[1]);
+ update_degenerate_test(&degenerateData, pts[2]);
+ update_degenerate_test(&degenerateData, pts[3]);
+ add_cubic_segments(pts, dir, segments);
+ break;
+ };
+ case SkPath::kDone_Verb:
+ if (degenerateData.isDegenerate()) {
+ return false;
+ } else {
+ compute_vectors(segments, fanPt, dir, vCount, iCount);
+ return true;
+ }
+ default:
+ break;
+ }
+ }
+}
+
+struct QuadVertex {
+ SkPoint fPos;
+ SkPoint fUV;
+ SkScalar fD0;
+ SkScalar fD1;
+};
+
+struct Draw {
+ Draw() : fVertexCnt(0), fIndexCnt(0) {}
+ int fVertexCnt;
+ int fIndexCnt;
+};
+
+typedef SkTArray<Draw, true> DrawArray;
+
+static void create_vertices(const SegmentArray& segments,
+ const SkPoint& fanPt,
+ DrawArray* draws,
+ QuadVertex* verts,
+ uint16_t* idxs) {
+ Draw* draw = &draws->push_back();
+ // alias just to make vert/index assignments easier to read.
+ int* v = &draw->fVertexCnt;
+ int* i = &draw->fIndexCnt;
+
+ int count = segments.count();
+ for (int a = 0; a < count; ++a) {
+ const Segment& sega = segments[a];
+ int b = (a + 1) % count;
+ const Segment& segb = segments[b];
+
+ // Check whether adding the verts for this segment to the current draw would cause index
+ // values to overflow.
+ int vCount = 4;
+ if (Segment::kLine == segb.fType) {
+ vCount += 5;
+ } else {
+ vCount += 6;
+ }
+ if (draw->fVertexCnt + vCount > (1 << 16)) {
+ verts += *v;
+ idxs += *i;
+ draw = &draws->push_back();
+ v = &draw->fVertexCnt;
+ i = &draw->fIndexCnt;
+ }
+
+ // FIXME: These tris are inset in the 1 unit arc around the corner
+ verts[*v + 0].fPos = sega.endPt();
+ verts[*v + 1].fPos = verts[*v + 0].fPos + sega.endNorm();
+ verts[*v + 2].fPos = verts[*v + 0].fPos + segb.fMid;
+ verts[*v + 3].fPos = verts[*v + 0].fPos + segb.fNorms[0];
+ verts[*v + 0].fUV.set(0,0);
+ verts[*v + 1].fUV.set(0,-SK_Scalar1);
+ verts[*v + 2].fUV.set(0,-SK_Scalar1);
+ verts[*v + 3].fUV.set(0,-SK_Scalar1);
+ verts[*v + 0].fD0 = verts[*v + 0].fD1 = -SK_Scalar1;
+ verts[*v + 1].fD0 = verts[*v + 1].fD1 = -SK_Scalar1;
+ verts[*v + 2].fD0 = verts[*v + 2].fD1 = -SK_Scalar1;
+ verts[*v + 3].fD0 = verts[*v + 3].fD1 = -SK_Scalar1;
+
+ idxs[*i + 0] = *v + 0;
+ idxs[*i + 1] = *v + 2;
+ idxs[*i + 2] = *v + 1;
+ idxs[*i + 3] = *v + 0;
+ idxs[*i + 4] = *v + 3;
+ idxs[*i + 5] = *v + 2;
+
+ *v += 4;
+ *i += 6;
+
+ if (Segment::kLine == segb.fType) {
+ verts[*v + 0].fPos = fanPt;
+ verts[*v + 1].fPos = sega.endPt();
+ verts[*v + 2].fPos = segb.fPts[0];
+
+ verts[*v + 3].fPos = verts[*v + 1].fPos + segb.fNorms[0];
+ verts[*v + 4].fPos = verts[*v + 2].fPos + segb.fNorms[0];
+
+ // we draw the line edge as a degenerate quad (u is 0, v is the
+ // signed distance to the edge)
+ SkScalar dist = fanPt.distanceToLineBetween(verts[*v + 1].fPos,
+ verts[*v + 2].fPos);
+ verts[*v + 0].fUV.set(0, dist);
+ verts[*v + 1].fUV.set(0, 0);
+ verts[*v + 2].fUV.set(0, 0);
+ verts[*v + 3].fUV.set(0, -SK_Scalar1);
+ verts[*v + 4].fUV.set(0, -SK_Scalar1);
+
+ verts[*v + 0].fD0 = verts[*v + 0].fD1 = -SK_Scalar1;
+ verts[*v + 1].fD0 = verts[*v + 1].fD1 = -SK_Scalar1;
+ verts[*v + 2].fD0 = verts[*v + 2].fD1 = -SK_Scalar1;
+ verts[*v + 3].fD0 = verts[*v + 3].fD1 = -SK_Scalar1;
+ verts[*v + 4].fD0 = verts[*v + 4].fD1 = -SK_Scalar1;
+
+ idxs[*i + 0] = *v + 3;
+ idxs[*i + 1] = *v + 1;
+ idxs[*i + 2] = *v + 2;
+
+ idxs[*i + 3] = *v + 4;
+ idxs[*i + 4] = *v + 3;
+ idxs[*i + 5] = *v + 2;
+
+ *i += 6;
+
+ // Draw the interior fan if it exists.
+ // TODO: Detect and combine colinear segments. This will ensure we catch every case
+ // with no interior, and that the resulting shared edge uses the same endpoints.
+ if (count >= 3) {
+ idxs[*i + 0] = *v + 0;
+ idxs[*i + 1] = *v + 2;
+ idxs[*i + 2] = *v + 1;
+
+ *i += 3;
+ }
+
+ *v += 5;
+ } else {
+ SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]};
+
+ SkVector midVec = segb.fNorms[0] + segb.fNorms[1];
+ midVec.normalize();
+
+ verts[*v + 0].fPos = fanPt;
+ verts[*v + 1].fPos = qpts[0];
+ verts[*v + 2].fPos = qpts[2];
+ verts[*v + 3].fPos = qpts[0] + segb.fNorms[0];
+ verts[*v + 4].fPos = qpts[2] + segb.fNorms[1];
+ verts[*v + 5].fPos = qpts[1] + midVec;
+
+ SkScalar c = segb.fNorms[0].dot(qpts[0]);
+ verts[*v + 0].fD0 = -segb.fNorms[0].dot(fanPt) + c;
+ verts[*v + 1].fD0 = 0.f;
+ verts[*v + 2].fD0 = -segb.fNorms[0].dot(qpts[2]) + c;
+ verts[*v + 3].fD0 = -SK_ScalarMax/100;
+ verts[*v + 4].fD0 = -SK_ScalarMax/100;
+ verts[*v + 5].fD0 = -SK_ScalarMax/100;
+
+ c = segb.fNorms[1].dot(qpts[2]);
+ verts[*v + 0].fD1 = -segb.fNorms[1].dot(fanPt) + c;
+ verts[*v + 1].fD1 = -segb.fNorms[1].dot(qpts[0]) + c;
+ verts[*v + 2].fD1 = 0.f;
+ verts[*v + 3].fD1 = -SK_ScalarMax/100;
+ verts[*v + 4].fD1 = -SK_ScalarMax/100;
+ verts[*v + 5].fD1 = -SK_ScalarMax/100;
+
+ GrPathUtils::QuadUVMatrix toUV(qpts);
+ toUV.apply<6, sizeof(QuadVertex), sizeof(SkPoint)>(verts + *v);
+
+ idxs[*i + 0] = *v + 3;
+ idxs[*i + 1] = *v + 1;
+ idxs[*i + 2] = *v + 2;
+ idxs[*i + 3] = *v + 4;
+ idxs[*i + 4] = *v + 3;
+ idxs[*i + 5] = *v + 2;
+
+ idxs[*i + 6] = *v + 5;
+ idxs[*i + 7] = *v + 3;
+ idxs[*i + 8] = *v + 4;
+
+ *i += 9;
+
+ // Draw the interior fan if it exists.
+ // TODO: Detect and combine colinear segments. This will ensure we catch every case
+ // with no interior, and that the resulting shared edge uses the same endpoints.
+ if (count >= 3) {
+ idxs[*i + 0] = *v + 0;
+ idxs[*i + 1] = *v + 2;
+ idxs[*i + 2] = *v + 1;
+
+ *i += 3;
+ }
+
+ *v += 6;
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/*
+ * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
+ * two components of the vertex attribute. Coverage is based on signed
+ * distance with negative being inside, positive outside. The edge is specified in
+ * window space (y-down). If either the third or fourth component of the interpolated
+ * vertex coord is > 0 then the pixel is considered outside the edge. This is used to
+ * attempt to trim to a portion of the infinite quad.
+ * Requires shader derivative instruction support.
+ */
+
+class QuadEdgeEffect : public GrGeometryProcessor {
+public:
+
+ static sk_sp<GrGeometryProcessor> Make(GrColor color, const SkMatrix& localMatrix,
+ bool usesLocalCoords) {
+ return sk_sp<GrGeometryProcessor>(new QuadEdgeEffect(color, localMatrix, usesLocalCoords));
+ }
+
+ virtual ~QuadEdgeEffect() {}
+
+ const char* name() const override { return "QuadEdge"; }
+
+ const Attribute* inPosition() const { return fInPosition; }
+ const Attribute* inQuadEdge() const { return fInQuadEdge; }
+ GrColor color() const { return fColor; }
+ bool colorIgnored() const { return GrColor_ILLEGAL == fColor; }
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+
+ class GLSLProcessor : public GrGLSLGeometryProcessor {
+ public:
+ GLSLProcessor()
+ : fColor(GrColor_ILLEGAL) {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ const QuadEdgeEffect& qe = args.fGP.cast<QuadEdgeEffect>();
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(qe);
+
+ GrGLSLVertToFrag v(kVec4f_GrSLType);
+ varyingHandler->addVarying("QuadEdge", &v);
+ vertBuilder->codeAppendf("%s = %s;", v.vsOut(), qe.inQuadEdge()->fName);
+
+ GrGLSLPPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ // Setup pass through color
+ if (!qe.colorIgnored()) {
+ this->setupUniformColor(fragBuilder, uniformHandler, args.fOutputColor,
+ &fColorUniform);
+ }
+
+ // Setup position
+ this->setupPosition(vertBuilder, gpArgs, qe.inPosition()->fName);
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ gpArgs->fPositionVar,
+ qe.inPosition()->fName,
+ qe.localMatrix(),
+ args.fFPCoordTransformHandler);
+
+ SkAssertResult(fragBuilder->enableFeature(
+ GrGLSLFragmentShaderBuilder::kStandardDerivatives_GLSLFeature));
+ fragBuilder->codeAppendf("float edgeAlpha;");
+
+ // keep the derivative instructions outside the conditional
+ fragBuilder->codeAppendf("vec2 duvdx = dFdx(%s.xy);", v.fsIn());
+ fragBuilder->codeAppendf("vec2 duvdy = dFdy(%s.xy);", v.fsIn());
+ fragBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {", v.fsIn(), v.fsIn());
+ // today we know z and w are in device space. We could use derivatives
+ fragBuilder->codeAppendf("edgeAlpha = min(min(%s.z, %s.w) + 0.5, 1.0);", v.fsIn(),
+ v.fsIn());
+ fragBuilder->codeAppendf ("} else {");
+ fragBuilder->codeAppendf("vec2 gF = vec2(2.0*%s.x*duvdx.x - duvdx.y,"
+ " 2.0*%s.x*duvdy.x - duvdy.y);",
+ v.fsIn(), v.fsIn());
+ fragBuilder->codeAppendf("edgeAlpha = (%s.x*%s.x - %s.y);", v.fsIn(), v.fsIn(),
+ v.fsIn());
+ fragBuilder->codeAppendf("edgeAlpha = "
+ "clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);}");
+
+ fragBuilder->codeAppendf("%s = vec4(edgeAlpha);", args.fOutputCoverage);
+ }
+
+ static inline void GenKey(const GrGeometryProcessor& gp,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const QuadEdgeEffect& qee = gp.cast<QuadEdgeEffect>();
+ uint32_t key = 0;
+ key |= qee.usesLocalCoords() && qee.localMatrix().hasPerspective() ? 0x1 : 0x0;
+ key |= qee.colorIgnored() ? 0x2 : 0x0;
+ b->add32(key);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman,
+ const GrPrimitiveProcessor& gp,
+ FPCoordTransformIter&& transformIter) override {
+ const QuadEdgeEffect& qe = gp.cast<QuadEdgeEffect>();
+ if (qe.color() != fColor) {
+ float c[4];
+ GrColorToRGBAFloat(qe.color(), c);
+ pdman.set4fv(fColorUniform, 1, c);
+ fColor = qe.color();
+ }
+ this->setTransformDataHelper(qe.fLocalMatrix, pdman, &transformIter);
+ }
+
+ private:
+ GrColor fColor;
+ UniformHandle fColorUniform;
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+ };
+
+ void getGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLProcessor::GenKey(*this, caps, b);
+ }
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override {
+ return new GLSLProcessor();
+ }
+
+private:
+ QuadEdgeEffect(GrColor color, const SkMatrix& localMatrix, bool usesLocalCoords)
+ : fColor(color)
+ , fLocalMatrix(localMatrix)
+ , fUsesLocalCoords(usesLocalCoords) {
+ this->initClassID<QuadEdgeEffect>();
+ fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType);
+ fInQuadEdge = &this->addVertexAttrib("inQuadEdge", kVec4f_GrVertexAttribType);
+ }
+
+ const Attribute* fInPosition;
+ const Attribute* fInQuadEdge;
+ GrColor fColor;
+ SkMatrix fLocalMatrix;
+ bool fUsesLocalCoords;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect);
+
+sk_sp<GrGeometryProcessor> QuadEdgeEffect::TestCreate(GrProcessorTestData* d) {
+ // Doesn't work without derivative instructions.
+ return d->fCaps->shaderCaps()->shaderDerivativeSupport() ?
+ QuadEdgeEffect::Make(GrRandomColor(d->fRandom),
+ GrTest::TestMatrix(d->fRandom),
+ d->fRandom->nextBool()) : nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool GrAAConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ return (args.fShaderCaps->shaderDerivativeSupport() && args.fAntiAlias &&
+ args.fShape->style().isSimpleFill() && !args.fShape->inverseFilled() &&
+ args.fShape->knownToBeConvex());
+}
+
+// extract the result vertices and indices from the GrAAConvexTessellator
+static void extract_verts(const GrAAConvexTessellator& tess,
+ void* vertices,
+ size_t vertexStride,
+ GrColor color,
+ uint16_t* idxs,
+ bool tweakAlphaForCoverage) {
+ intptr_t verts = reinterpret_cast<intptr_t>(vertices);
+
+ for (int i = 0; i < tess.numPts(); ++i) {
+ *((SkPoint*)((intptr_t)verts + i * vertexStride)) = tess.point(i);
+ }
+
+ // Make 'verts' point to the colors
+ verts += sizeof(SkPoint);
+ for (int i = 0; i < tess.numPts(); ++i) {
+ if (tweakAlphaForCoverage) {
+ SkASSERT(SkScalarRoundToInt(255.0f * tess.coverage(i)) <= 255);
+ unsigned scale = SkScalarRoundToInt(255.0f * tess.coverage(i));
+ GrColor scaledColor = (0xff == scale) ? color : SkAlphaMulQ(color, scale);
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = scaledColor;
+ } else {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
+ *reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) =
+ tess.coverage(i);
+ }
+ }
+
+ for (int i = 0; i < tess.numIndices(); ++i) {
+ idxs[i] = tess.index(i);
+ }
+}
+
+static sk_sp<GrGeometryProcessor> create_fill_gp(bool tweakAlphaForCoverage,
+ const SkMatrix& viewMatrix,
+ bool usesLocalCoords,
+ bool coverageIgnored) {
+ using namespace GrDefaultGeoProcFactory;
+
+ Color color(Color::kAttribute_Type);
+ Coverage::Type coverageType;
+ // TODO remove coverage if coverage is ignored
+ /*if (coverageIgnored) {
+ coverageType = Coverage::kNone_Type;
+ } else*/ if (tweakAlphaForCoverage) {
+ coverageType = Coverage::kSolid_Type;
+ } else {
+ coverageType = Coverage::kAttribute_Type;
+ }
+ Coverage coverage(coverageType);
+ LocalCoords localCoords(usesLocalCoords ? LocalCoords::kUsePosition_Type :
+ LocalCoords::kUnused_Type);
+ return MakeForDeviceSpace(color, coverage, localCoords, viewMatrix);
+}
+
+class AAConvexPathBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+ AAConvexPathBatch(GrColor color, const SkMatrix& viewMatrix, const SkPath& path)
+ : INHERITED(ClassID()) {
+ fGeoData.emplace_back(Geometry{color, viewMatrix, path});
+ this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kYes,
+ IsZeroArea::kNo);
+ }
+
+ const char* name() const override { return "AAConvexBatch"; }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ color->setKnownFourComponents(fGeoData[0].fColor);
+ coverage->setUnknownSingleComponent();
+ }
+
+private:
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ // Handle any color overrides
+ if (!overrides.readsColor()) {
+ fGeoData[0].fColor = GrColor_ILLEGAL;
+ }
+ overrides.getOverrideColorIfSet(&fGeoData[0].fColor);
+
+ // setup batch properties
+ fBatch.fColorIgnored = !overrides.readsColor();
+ fBatch.fColor = fGeoData[0].fColor;
+ fBatch.fUsesLocalCoords = overrides.readsLocalCoords();
+ fBatch.fCoverageIgnored = !overrides.readsCoverage();
+ fBatch.fLinesOnly = SkPath::kLine_SegmentMask == fGeoData[0].fPath.getSegmentMasks();
+ fBatch.fCanTweakAlphaForCoverage = overrides.canTweakAlphaForCoverage();
+ }
+
+ void prepareLinesOnlyDraws(Target* target) const {
+ bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
+
+ // Setup GrGeometryProcessor
+ sk_sp<GrGeometryProcessor> gp(create_fill_gp(canTweakAlphaForCoverage,
+ this->viewMatrix(),
+ this->usesLocalCoords(),
+ this->coverageIgnored()));
+ if (!gp) {
+ SkDebugf("Could not create GrGeometryProcessor\n");
+ return;
+ }
+
+ size_t vertexStride = gp->getVertexStride();
+
+ SkASSERT(canTweakAlphaForCoverage ?
+ vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorAttr) :
+ vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorCoverageAttr));
+
+ GrAAConvexTessellator tess;
+
+ int instanceCount = fGeoData.count();
+
+ for (int i = 0; i < instanceCount; i++) {
+ tess.rewind();
+
+ const Geometry& args = fGeoData[i];
+
+ if (!tess.tessellate(args.fViewMatrix, args.fPath)) {
+ continue;
+ }
+
+ const GrBuffer* vertexBuffer;
+ int firstVertex;
+
+ void* verts = target->makeVertexSpace(vertexStride, tess.numPts(), &vertexBuffer,
+ &firstVertex);
+ if (!verts) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ const GrBuffer* indexBuffer;
+ int firstIndex;
+
+ uint16_t* idxs = target->makeIndexSpace(tess.numIndices(), &indexBuffer, &firstIndex);
+ if (!idxs) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+
+ extract_verts(tess, verts, vertexStride, args.fColor, idxs, canTweakAlphaForCoverage);
+
+ GrMesh mesh;
+ mesh.initIndexed(kTriangles_GrPrimitiveType,
+ vertexBuffer, indexBuffer,
+ firstVertex, firstIndex,
+ tess.numPts(), tess.numIndices());
+ target->draw(gp.get(), mesh);
+ }
+ }
+
+ void onPrepareDraws(Target* target) const override {
+#ifndef SK_IGNORE_LINEONLY_AA_CONVEX_PATH_OPTS
+ if (this->linesOnly()) {
+ this->prepareLinesOnlyDraws(target);
+ return;
+ }
+#endif
+
+ int instanceCount = fGeoData.count();
+
+ SkMatrix invert;
+ if (this->usesLocalCoords() && !this->viewMatrix().invert(&invert)) {
+ SkDebugf("Could not invert viewmatrix\n");
+ return;
+ }
+
+ // Setup GrGeometryProcessor
+ sk_sp<GrGeometryProcessor> quadProcessor(
+ QuadEdgeEffect::Make(this->color(), invert, this->usesLocalCoords()));
+
+ // TODO generate all segments for all paths and use one vertex buffer
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& args = fGeoData[i];
+
+ // We use the fact that SkPath::transform path does subdivision based on
+ // perspective. Otherwise, we apply the view matrix when copying to the
+ // segment representation.
+ const SkMatrix* viewMatrix = &args.fViewMatrix;
+
+ // We avoid initializing the path unless we have to
+ const SkPath* pathPtr = &args.fPath;
+ SkTLazy<SkPath> tmpPath;
+ if (viewMatrix->hasPerspective()) {
+ SkPath* tmpPathPtr = tmpPath.init(*pathPtr);
+ tmpPathPtr->setIsVolatile(true);
+ tmpPathPtr->transform(*viewMatrix);
+ viewMatrix = &SkMatrix::I();
+ pathPtr = tmpPathPtr;
+ }
+
+ int vertexCount;
+ int indexCount;
+ enum {
+ kPreallocSegmentCnt = 512 / sizeof(Segment),
+ kPreallocDrawCnt = 4,
+ };
+ SkSTArray<kPreallocSegmentCnt, Segment, true> segments;
+ SkPoint fanPt;
+
+ if (!get_segments(*pathPtr, *viewMatrix, &segments, &fanPt, &vertexCount,
+ &indexCount)) {
+ continue;
+ }
+
+ const GrBuffer* vertexBuffer;
+ int firstVertex;
+
+ size_t vertexStride = quadProcessor->getVertexStride();
+ QuadVertex* verts = reinterpret_cast<QuadVertex*>(target->makeVertexSpace(
+ vertexStride, vertexCount, &vertexBuffer, &firstVertex));
+
+ if (!verts) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ const GrBuffer* indexBuffer;
+ int firstIndex;
+
+ uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
+ if (!idxs) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+
+ SkSTArray<kPreallocDrawCnt, Draw, true> draws;
+ create_vertices(segments, fanPt, &draws, verts, idxs);
+
+ GrMesh mesh;
+
+ for (int j = 0; j < draws.count(); ++j) {
+ const Draw& draw = draws[j];
+ mesh.initIndexed(kTriangles_GrPrimitiveType, vertexBuffer, indexBuffer,
+ firstVertex, firstIndex, draw.fVertexCnt, draw.fIndexCnt);
+ target->draw(quadProcessor.get(), mesh);
+ firstVertex += draw.fVertexCnt;
+ firstIndex += draw.fIndexCnt;
+ }
+ }
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ AAConvexPathBatch* that = t->cast<AAConvexPathBatch>();
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ if (this->color() != that->color()) {
+ return false;
+ }
+
+ SkASSERT(this->usesLocalCoords() == that->usesLocalCoords());
+ if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
+ }
+
+ if (this->linesOnly() != that->linesOnly()) {
+ return false;
+ }
+
+ // In the event of two batches, one who can tweak, one who cannot, we just fall back to
+ // not tweaking
+ if (this->canTweakAlphaForCoverage() != that->canTweakAlphaForCoverage()) {
+ fBatch.fCanTweakAlphaForCoverage = false;
+ }
+
+ fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
+ this->joinBounds(*that);
+ return true;
+ }
+
+ GrColor color() const { return fBatch.fColor; }
+ bool linesOnly() const { return fBatch.fLinesOnly; }
+ bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
+ bool canTweakAlphaForCoverage() const { return fBatch.fCanTweakAlphaForCoverage; }
+ const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; }
+ bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
+
+ struct BatchTracker {
+ GrColor fColor;
+ bool fUsesLocalCoords;
+ bool fColorIgnored;
+ bool fCoverageIgnored;
+ bool fLinesOnly;
+ bool fCanTweakAlphaForCoverage;
+ };
+
+ struct Geometry {
+ GrColor fColor;
+ SkMatrix fViewMatrix;
+ SkPath fPath;
+ };
+
+ BatchTracker fBatch;
+ SkSTArray<1, Geometry, true> fGeoData;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+bool GrAAConvexPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fDrawContext->auditTrail(),
+ "GrAAConvexPathRenderer::onDrawPath");
+ SkASSERT(!args.fDrawContext->isUnifiedMultisampled());
+ SkASSERT(!args.fShape->isEmpty());
+
+ SkPath path;
+ args.fShape->asPath(&path);
+
+ SkAutoTUnref<GrDrawBatch> batch(new AAConvexPathBatch(args.fPaint->getColor(),
+ *args.fViewMatrix, path));
+
+ GrPipelineBuilder pipelineBuilder(*args.fPaint);
+ pipelineBuilder.setUserStencil(args.fUserStencilSettings);
+
+ args.fDrawContext->drawBatch(pipelineBuilder, *args.fClip, batch);
+
+ return true;
+
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_TEST_UTILS
+
+DRAW_BATCH_TEST_DEFINE(AAConvexPathBatch) {
+ GrColor color = GrRandomColor(random);
+ SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
+ SkPath path = GrTest::TestPathConvex(random);
+
+ return new AAConvexPathBatch(color, viewMatrix, path);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrAAConvexPathRenderer.h b/gfx/skia/skia/src/gpu/batches/GrAAConvexPathRenderer.h
new file mode 100644
index 000000000..420ca6013
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrAAConvexPathRenderer.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAAConvexPathRenderer_DEFINED
+#define GrAAConvexPathRenderer_DEFINED
+
+#include "GrPathRenderer.h"
+
+class GrAAConvexPathRenderer : public GrPathRenderer {
+public:
+ GrAAConvexPathRenderer();
+
+private:
+ bool onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrAAConvexTessellator.cpp b/gfx/skia/skia/src/gpu/batches/GrAAConvexTessellator.cpp
new file mode 100644
index 000000000..b9c44ff9b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrAAConvexTessellator.cpp
@@ -0,0 +1,1103 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrAAConvexTessellator.h"
+#include "SkCanvas.h"
+#include "SkPath.h"
+#include "SkPoint.h"
+#include "SkString.h"
+#include "GrPathUtils.h"
+
+// Next steps:
+// add an interactive sample app slide
+// add debug check that all points are suitably far apart
+// test more degenerate cases
+
+// The tolerance for fusing vertices and eliminating colinear lines (It is in device space).
+static const SkScalar kClose = (SK_Scalar1 / 16);
+static const SkScalar kCloseSqd = SkScalarMul(kClose, kClose);
+
+// tesselation tolerance values, in device space pixels
+static const SkScalar kQuadTolerance = 0.2f;
+static const SkScalar kCubicTolerance = 0.2f;
+static const SkScalar kConicTolerance = 0.5f;
+
+// dot product below which we use a round cap between curve segments
+static const SkScalar kRoundCapThreshold = 0.8f;
+
+// dot product above which we consider two adjacent curves to be part of the "same" curve
+static const SkScalar kCurveConnectionThreshold = 0.8f;
+
+static bool intersect(const SkPoint& p0, const SkPoint& n0,
+ const SkPoint& p1, const SkPoint& n1,
+ SkScalar* t) {
+ const SkPoint v = p1 - p0;
+ SkScalar perpDot = n0.fX * n1.fY - n0.fY * n1.fX;
+ if (SkScalarNearlyZero(perpDot)) {
+ return false;
+ }
+ *t = (v.fX * n1.fY - v.fY * n1.fX) / perpDot;
+ SkASSERT(SkScalarIsFinite(*t));
+ return true;
+}
+
+// This is a special case version of intersect where we have the vector
+// perpendicular to the second line rather than the vector parallel to it.
+static SkScalar perp_intersect(const SkPoint& p0, const SkPoint& n0,
+ const SkPoint& p1, const SkPoint& perp) {
+ const SkPoint v = p1 - p0;
+ SkScalar perpDot = n0.dot(perp);
+ return v.dot(perp) / perpDot;
+}
+
+static bool duplicate_pt(const SkPoint& p0, const SkPoint& p1) {
+ SkScalar distSq = p0.distanceToSqd(p1);
+ return distSq < kCloseSqd;
+}
+
+static SkScalar abs_dist_from_line(const SkPoint& p0, const SkVector& v, const SkPoint& test) {
+ SkPoint testV = test - p0;
+ SkScalar dist = testV.fX * v.fY - testV.fY * v.fX;
+ return SkScalarAbs(dist);
+}
+
+int GrAAConvexTessellator::addPt(const SkPoint& pt,
+ SkScalar depth,
+ SkScalar coverage,
+ bool movable,
+ CurveState curve) {
+ this->validate();
+
+ int index = fPts.count();
+ *fPts.push() = pt;
+ *fCoverages.push() = coverage;
+ *fMovable.push() = movable;
+ *fCurveState.push() = curve;
+
+ this->validate();
+ return index;
+}
+
+void GrAAConvexTessellator::popLastPt() {
+ this->validate();
+
+ fPts.pop();
+ fCoverages.pop();
+ fMovable.pop();
+ fCurveState.pop();
+
+ this->validate();
+}
+
+void GrAAConvexTessellator::popFirstPtShuffle() {
+ this->validate();
+
+ fPts.removeShuffle(0);
+ fCoverages.removeShuffle(0);
+ fMovable.removeShuffle(0);
+ fCurveState.removeShuffle(0);
+
+ this->validate();
+}
+
+void GrAAConvexTessellator::updatePt(int index,
+ const SkPoint& pt,
+ SkScalar depth,
+ SkScalar coverage) {
+ this->validate();
+ SkASSERT(fMovable[index]);
+
+ fPts[index] = pt;
+ fCoverages[index] = coverage;
+}
+
+void GrAAConvexTessellator::addTri(int i0, int i1, int i2) {
+ if (i0 == i1 || i1 == i2 || i2 == i0) {
+ return;
+ }
+
+ *fIndices.push() = i0;
+ *fIndices.push() = i1;
+ *fIndices.push() = i2;
+}
+
+void GrAAConvexTessellator::rewind() {
+ fPts.rewind();
+ fCoverages.rewind();
+ fMovable.rewind();
+ fIndices.rewind();
+ fNorms.rewind();
+ fCurveState.rewind();
+ fInitialRing.rewind();
+ fCandidateVerts.rewind();
+#if GR_AA_CONVEX_TESSELLATOR_VIZ
+ fRings.rewind(); // TODO: leak in this case!
+#else
+ fRings[0].rewind();
+ fRings[1].rewind();
+#endif
+}
+
+void GrAAConvexTessellator::computeBisectors() {
+ fBisectors.setCount(fNorms.count());
+
+ int prev = fBisectors.count() - 1;
+ for (int cur = 0; cur < fBisectors.count(); prev = cur, ++cur) {
+ fBisectors[cur] = fNorms[cur] + fNorms[prev];
+ if (!fBisectors[cur].normalize()) {
+ SkASSERT(SkPoint::kLeft_Side == fSide || SkPoint::kRight_Side == fSide);
+ fBisectors[cur].setOrthog(fNorms[cur], (SkPoint::Side)-fSide);
+ SkVector other;
+ other.setOrthog(fNorms[prev], fSide);
+ fBisectors[cur] += other;
+ SkAssertResult(fBisectors[cur].normalize());
+ } else {
+ fBisectors[cur].negate(); // make the bisector face in
+ }
+ if (fCurveState[prev] == kIndeterminate_CurveState) {
+ if (fCurveState[cur] == kSharp_CurveState) {
+ fCurveState[prev] = kSharp_CurveState;
+ } else {
+ if (SkScalarAbs(fNorms[cur].dot(fNorms[prev])) > kCurveConnectionThreshold) {
+ fCurveState[prev] = kCurve_CurveState;
+ fCurveState[cur] = kCurve_CurveState;
+ } else {
+ fCurveState[prev] = kSharp_CurveState;
+ fCurveState[cur] = kSharp_CurveState;
+ }
+ }
+ }
+
+ SkASSERT(SkScalarNearlyEqual(1.0f, fBisectors[cur].length()));
+ }
+}
+
+// Create as many rings as we need to (up to a predefined limit) to reach the specified target
+// depth. If we are in fill mode, the final ring will automatically be fanned.
+bool GrAAConvexTessellator::createInsetRings(Ring& previousRing, SkScalar initialDepth,
+ SkScalar initialCoverage, SkScalar targetDepth,
+ SkScalar targetCoverage, Ring** finalRing) {
+ static const int kMaxNumRings = 8;
+
+ if (previousRing.numPts() < 3) {
+ return false;
+ }
+ Ring* currentRing = &previousRing;
+ int i;
+ for (i = 0; i < kMaxNumRings; ++i) {
+ Ring* nextRing = this->getNextRing(currentRing);
+ SkASSERT(nextRing != currentRing);
+
+ bool done = this->createInsetRing(*currentRing, nextRing, initialDepth, initialCoverage,
+ targetDepth, targetCoverage, i == 0);
+ currentRing = nextRing;
+ if (done) {
+ break;
+ }
+ currentRing->init(*this);
+ }
+
+ if (kMaxNumRings == i) {
+ // Bail if we've exceeded the amount of time we want to throw at this.
+ this->terminate(*currentRing);
+ return false;
+ }
+ bool done = currentRing->numPts() >= 3;
+ if (done) {
+ currentRing->init(*this);
+ }
+ *finalRing = currentRing;
+ return done;
+}
+
+// The general idea here is to, conceptually, start with the original polygon and slide
+// the vertices along the bisectors until the first intersection. At that
+// point two of the edges collapse and the process repeats on the new polygon.
+// The polygon state is captured in the Ring class while the GrAAConvexTessellator
+// controls the iteration. The CandidateVerts holds the formative points for the
+// next ring.
+bool GrAAConvexTessellator::tessellate(const SkMatrix& m, const SkPath& path) {
+ if (!this->extractFromPath(m, path)) {
+ return false;
+ }
+
+ SkScalar coverage = 1.0f;
+ SkScalar scaleFactor = 0.0f;
+
+ if (SkStrokeRec::kStrokeAndFill_Style == fStyle) {
+ SkASSERT(m.isSimilarity());
+ scaleFactor = m.getMaxScale(); // x and y scale are the same
+ SkScalar effectiveStrokeWidth = scaleFactor * fStrokeWidth;
+ Ring outerStrokeAndAARing;
+ this->createOuterRing(fInitialRing,
+ effectiveStrokeWidth / 2 + kAntialiasingRadius, 0.0,
+ &outerStrokeAndAARing);
+
+ // discard all the triangles added between the originating ring and the new outer ring
+ fIndices.rewind();
+
+ outerStrokeAndAARing.init(*this);
+
+ outerStrokeAndAARing.makeOriginalRing();
+
+ // Add the outer stroke ring's normals to the originating ring's normals
+ // so it can also act as an originating ring
+ fNorms.setCount(fNorms.count() + outerStrokeAndAARing.numPts());
+ for (int i = 0; i < outerStrokeAndAARing.numPts(); ++i) {
+ SkASSERT(outerStrokeAndAARing.index(i) < fNorms.count());
+ fNorms[outerStrokeAndAARing.index(i)] = outerStrokeAndAARing.norm(i);
+ }
+
+ // the bisectors are only needed for the computation of the outer ring
+ fBisectors.rewind();
+
+ Ring* insetAARing;
+ this->createInsetRings(outerStrokeAndAARing,
+ 0.0f, 0.0f, 2*kAntialiasingRadius, 1.0f,
+ &insetAARing);
+
+ SkDEBUGCODE(this->validate();)
+ return true;
+ }
+
+ if (SkStrokeRec::kStroke_Style == fStyle) {
+ SkASSERT(fStrokeWidth >= 0.0f);
+ SkASSERT(m.isSimilarity());
+ scaleFactor = m.getMaxScale(); // x and y scale are the same
+ SkScalar effectiveStrokeWidth = scaleFactor * fStrokeWidth;
+ Ring outerStrokeRing;
+ this->createOuterRing(fInitialRing, effectiveStrokeWidth / 2 - kAntialiasingRadius,
+ coverage, &outerStrokeRing);
+ outerStrokeRing.init(*this);
+ Ring outerAARing;
+ this->createOuterRing(outerStrokeRing, kAntialiasingRadius * 2, 0.0f, &outerAARing);
+ } else {
+ Ring outerAARing;
+ this->createOuterRing(fInitialRing, kAntialiasingRadius, 0.0f, &outerAARing);
+ }
+
+ // the bisectors are only needed for the computation of the outer ring
+ fBisectors.rewind();
+ if (SkStrokeRec::kStroke_Style == fStyle && fInitialRing.numPts() > 2) {
+ SkASSERT(fStrokeWidth >= 0.0f);
+ SkScalar effectiveStrokeWidth = scaleFactor * fStrokeWidth;
+ Ring* insetStrokeRing;
+ SkScalar strokeDepth = effectiveStrokeWidth / 2 - kAntialiasingRadius;
+ if (this->createInsetRings(fInitialRing, 0.0f, coverage, strokeDepth, coverage,
+ &insetStrokeRing)) {
+ Ring* insetAARing;
+ this->createInsetRings(*insetStrokeRing, strokeDepth, coverage, strokeDepth +
+ kAntialiasingRadius * 2, 0.0f, &insetAARing);
+ }
+ } else {
+ Ring* insetAARing;
+ this->createInsetRings(fInitialRing, 0.0f, 0.5f, kAntialiasingRadius, 1.0f, &insetAARing);
+ }
+
+ SkDEBUGCODE(this->validate();)
+ return true;
+}
+
+SkScalar GrAAConvexTessellator::computeDepthFromEdge(int edgeIdx, const SkPoint& p) const {
+ SkASSERT(edgeIdx < fNorms.count());
+
+ SkPoint v = p - fPts[edgeIdx];
+ SkScalar depth = -fNorms[edgeIdx].dot(v);
+ return depth;
+}
+
+// Find a point that is 'desiredDepth' away from the 'edgeIdx'-th edge and lies
+// along the 'bisector' from the 'startIdx'-th point.
+bool GrAAConvexTessellator::computePtAlongBisector(int startIdx,
+ const SkVector& bisector,
+ int edgeIdx,
+ SkScalar desiredDepth,
+ SkPoint* result) const {
+ const SkPoint& norm = fNorms[edgeIdx];
+
+ // First find the point where the edge and the bisector intersect
+ SkPoint newP;
+
+ SkScalar t = perp_intersect(fPts[startIdx], bisector, fPts[edgeIdx], norm);
+ if (SkScalarNearlyEqual(t, 0.0f)) {
+ // the start point was one of the original ring points
+ SkASSERT(startIdx < fPts.count());
+ newP = fPts[startIdx];
+ } else if (t < 0.0f) {
+ newP = bisector;
+ newP.scale(t);
+ newP += fPts[startIdx];
+ } else {
+ return false;
+ }
+
+ // Then offset along the bisector from that point the correct distance
+ SkScalar dot = bisector.dot(norm);
+ t = -desiredDepth / dot;
+ *result = bisector;
+ result->scale(t);
+ *result += newP;
+
+ return true;
+}
+
+bool GrAAConvexTessellator::extractFromPath(const SkMatrix& m, const SkPath& path) {
+ SkASSERT(SkPath::kConvex_Convexity == path.getConvexity());
+
+ // Outer ring: 3*numPts
+ // Middle ring: numPts
+ // Presumptive inner ring: numPts
+ this->reservePts(5*path.countPoints());
+ // Outer ring: 12*numPts
+ // Middle ring: 0
+ // Presumptive inner ring: 6*numPts + 6
+ fIndices.setReserve(18*path.countPoints() + 6);
+
+ fNorms.setReserve(path.countPoints());
+
+ // TODO: is there a faster way to extract the points from the path? Perhaps
+ // get all the points via a new entry point, transform them all in bulk
+ // and then walk them to find duplicates?
+ SkPath::Iter iter(path, true);
+ SkPoint pts[4];
+ SkPath::Verb verb;
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kLine_Verb:
+ this->lineTo(m, pts[1], kSharp_CurveState);
+ break;
+ case SkPath::kQuad_Verb:
+ this->quadTo(m, pts);
+ break;
+ case SkPath::kCubic_Verb:
+ this->cubicTo(m, pts);
+ break;
+ case SkPath::kConic_Verb:
+ this->conicTo(m, pts, iter.conicWeight());
+ break;
+ case SkPath::kMove_Verb:
+ case SkPath::kClose_Verb:
+ case SkPath::kDone_Verb:
+ break;
+ }
+ }
+
+ if (this->numPts() < 2) {
+ return false;
+ }
+
+ // check if last point is a duplicate of the first point. If so, remove it.
+ if (duplicate_pt(fPts[this->numPts()-1], fPts[0])) {
+ this->popLastPt();
+ fNorms.pop();
+ }
+
+ SkASSERT(fPts.count() == fNorms.count()+1);
+ if (this->numPts() >= 3) {
+ if (abs_dist_from_line(fPts.top(), fNorms.top(), fPts[0]) < kClose) {
+ // The last point is on the line from the second to last to the first point.
+ this->popLastPt();
+ fNorms.pop();
+ }
+
+ *fNorms.push() = fPts[0] - fPts.top();
+ SkDEBUGCODE(SkScalar len =) SkPoint::Normalize(&fNorms.top());
+ SkASSERT(len > 0.0f);
+ SkASSERT(fPts.count() == fNorms.count());
+ }
+
+ if (this->numPts() >= 3 && abs_dist_from_line(fPts[0], fNorms.top(), fPts[1]) < kClose) {
+ // The first point is on the line from the last to the second.
+ this->popFirstPtShuffle();
+ fNorms.removeShuffle(0);
+ fNorms[0] = fPts[1] - fPts[0];
+ SkDEBUGCODE(SkScalar len =) SkPoint::Normalize(&fNorms[0]);
+ SkASSERT(len > 0.0f);
+ SkASSERT(SkScalarNearlyEqual(1.0f, fNorms[0].length()));
+ }
+
+ if (this->numPts() >= 3) {
+ // Check the cross product of the final trio
+ SkScalar cross = SkPoint::CrossProduct(fNorms[0], fNorms.top());
+ if (cross > 0.0f) {
+ fSide = SkPoint::kRight_Side;
+ } else {
+ fSide = SkPoint::kLeft_Side;
+ }
+
+ // Make all the normals face outwards rather than along the edge
+ for (int cur = 0; cur < fNorms.count(); ++cur) {
+ fNorms[cur].setOrthog(fNorms[cur], fSide);
+ SkASSERT(SkScalarNearlyEqual(1.0f, fNorms[cur].length()));
+ }
+
+ this->computeBisectors();
+ } else if (this->numPts() == 2) {
+ // We've got two points, so we're degenerate.
+ if (fStyle == SkStrokeRec::kFill_Style) {
+ // it's a fill, so we don't need to worry about degenerate paths
+ return false;
+ }
+ // For stroking, we still need to process the degenerate path, so fix it up
+ fSide = SkPoint::kLeft_Side;
+
+ // Make all the normals face outwards rather than along the edge
+ for (int cur = 0; cur < fNorms.count(); ++cur) {
+ fNorms[cur].setOrthog(fNorms[cur], fSide);
+ SkASSERT(SkScalarNearlyEqual(1.0f, fNorms[cur].length()));
+ }
+
+ fNorms.push(SkPoint::Make(-fNorms[0].fX, -fNorms[0].fY));
+ // we won't actually use the bisectors, so just push zeroes
+ fBisectors.push(SkPoint::Make(0.0, 0.0));
+ fBisectors.push(SkPoint::Make(0.0, 0.0));
+ } else {
+ return false;
+ }
+
+ fCandidateVerts.setReserve(this->numPts());
+ fInitialRing.setReserve(this->numPts());
+ for (int i = 0; i < this->numPts(); ++i) {
+ fInitialRing.addIdx(i, i);
+ }
+ fInitialRing.init(fNorms, fBisectors);
+
+ this->validate();
+ return true;
+}
+
+GrAAConvexTessellator::Ring* GrAAConvexTessellator::getNextRing(Ring* lastRing) {
+#if GR_AA_CONVEX_TESSELLATOR_VIZ
+ Ring* ring = *fRings.push() = new Ring;
+ ring->setReserve(fInitialRing.numPts());
+ ring->rewind();
+ return ring;
+#else
+ // Flip flop back and forth between fRings[0] & fRings[1]
+ int nextRing = (lastRing == &fRings[0]) ? 1 : 0;
+ fRings[nextRing].setReserve(fInitialRing.numPts());
+ fRings[nextRing].rewind();
+ return &fRings[nextRing];
+#endif
+}
+
+void GrAAConvexTessellator::fanRing(const Ring& ring) {
+ // fan out from point 0
+ int startIdx = ring.index(0);
+ for (int cur = ring.numPts() - 2; cur >= 0; --cur) {
+ this->addTri(startIdx, ring.index(cur), ring.index(cur + 1));
+ }
+}
+
+void GrAAConvexTessellator::createOuterRing(const Ring& previousRing, SkScalar outset,
+ SkScalar coverage, Ring* nextRing) {
+ const int numPts = previousRing.numPts();
+ if (numPts == 0) {
+ return;
+ }
+
+ int prev = numPts - 1;
+ int lastPerpIdx = -1, firstPerpIdx = -1;
+
+ const SkScalar outsetSq = SkScalarMul(outset, outset);
+ SkScalar miterLimitSq = SkScalarMul(outset, fMiterLimit);
+ miterLimitSq = SkScalarMul(miterLimitSq, miterLimitSq);
+ for (int cur = 0; cur < numPts; ++cur) {
+ int originalIdx = previousRing.index(cur);
+ // For each vertex of the original polygon we add at least two points to the
+ // outset polygon - one extending perpendicular to each impinging edge. Connecting these
+ // two points yields a bevel join. We need one additional point for a mitered join, and
+ // a round join requires one or more points depending upon curvature.
+
+ // The perpendicular point for the last edge
+ SkPoint normal1 = previousRing.norm(prev);
+ SkPoint perp1 = normal1;
+ perp1.scale(outset);
+ perp1 += this->point(originalIdx);
+
+ // The perpendicular point for the next edge.
+ SkPoint normal2 = previousRing.norm(cur);
+ SkPoint perp2 = normal2;
+ perp2.scale(outset);
+ perp2 += fPts[originalIdx];
+
+ CurveState curve = fCurveState[originalIdx];
+
+ // We know it isn't a duplicate of the prior point (since it and this
+ // one are just perpendicular offsets from the non-merged polygon points)
+ int perp1Idx = this->addPt(perp1, -outset, coverage, false, curve);
+ nextRing->addIdx(perp1Idx, originalIdx);
+
+ int perp2Idx;
+ // For very shallow angles all the corner points could fuse.
+ if (duplicate_pt(perp2, this->point(perp1Idx))) {
+ perp2Idx = perp1Idx;
+ } else {
+ perp2Idx = this->addPt(perp2, -outset, coverage, false, curve);
+ }
+
+ if (perp2Idx != perp1Idx) {
+ if (curve == kCurve_CurveState) {
+ // bevel or round depending upon curvature
+ SkScalar dotProd = normal1.dot(normal2);
+ if (dotProd < kRoundCapThreshold) {
+ // Currently we "round" by creating a single extra point, which produces
+ // good results for common cases. For thick strokes with high curvature, we will
+ // need to add more points; for the time being we simply fall back to software
+ // rendering for thick strokes.
+ SkPoint miter = previousRing.bisector(cur);
+ miter.setLength(-outset);
+ miter += fPts[originalIdx];
+
+ // For very shallow angles all the corner points could fuse
+ if (!duplicate_pt(miter, this->point(perp1Idx))) {
+ int miterIdx;
+ miterIdx = this->addPt(miter, -outset, coverage, false, kSharp_CurveState);
+ nextRing->addIdx(miterIdx, originalIdx);
+ // The two triangles for the corner
+ this->addTri(originalIdx, perp1Idx, miterIdx);
+ this->addTri(originalIdx, miterIdx, perp2Idx);
+ }
+ } else {
+ this->addTri(originalIdx, perp1Idx, perp2Idx);
+ }
+ } else {
+ switch (fJoin) {
+ case SkPaint::Join::kMiter_Join: {
+ // The bisector outset point
+ SkPoint miter = previousRing.bisector(cur);
+ SkScalar dotProd = normal1.dot(normal2);
+ SkScalar sinHalfAngleSq = SkScalarHalf(SK_Scalar1 + dotProd);
+ SkScalar lengthSq = outsetSq / sinHalfAngleSq;
+ if (lengthSq > miterLimitSq) {
+ // just bevel it
+ this->addTri(originalIdx, perp1Idx, perp2Idx);
+ break;
+ }
+ miter.setLength(-SkScalarSqrt(lengthSq));
+ miter += fPts[originalIdx];
+
+ // For very shallow angles all the corner points could fuse
+ if (!duplicate_pt(miter, this->point(perp1Idx))) {
+ int miterIdx;
+ miterIdx = this->addPt(miter, -outset, coverage, false,
+ kSharp_CurveState);
+ nextRing->addIdx(miterIdx, originalIdx);
+ // The two triangles for the corner
+ this->addTri(originalIdx, perp1Idx, miterIdx);
+ this->addTri(originalIdx, miterIdx, perp2Idx);
+ }
+ break;
+ }
+ case SkPaint::Join::kBevel_Join:
+ this->addTri(originalIdx, perp1Idx, perp2Idx);
+ break;
+ default:
+ // kRound_Join is unsupported for now. GrAALinearizingConvexPathRenderer is
+ // only willing to draw mitered or beveled, so we should never get here.
+ SkASSERT(false);
+ }
+ }
+
+ nextRing->addIdx(perp2Idx, originalIdx);
+ }
+
+ if (0 == cur) {
+ // Store the index of the first perpendicular point to finish up
+ firstPerpIdx = perp1Idx;
+ SkASSERT(-1 == lastPerpIdx);
+ } else {
+ // The triangles for the previous edge
+ int prevIdx = previousRing.index(prev);
+ this->addTri(prevIdx, perp1Idx, originalIdx);
+ this->addTri(prevIdx, lastPerpIdx, perp1Idx);
+ }
+
+ // Track the last perpendicular outset point so we can construct the
+ // trailing edge triangles.
+ lastPerpIdx = perp2Idx;
+ prev = cur;
+ }
+
+ // pick up the final edge rect
+ int lastIdx = previousRing.index(numPts - 1);
+ this->addTri(lastIdx, firstPerpIdx, previousRing.index(0));
+ this->addTri(lastIdx, lastPerpIdx, firstPerpIdx);
+
+ this->validate();
+}
+
+// Something went wrong in the creation of the next ring. If we're filling the shape, just go ahead
+// and fan it.
+void GrAAConvexTessellator::terminate(const Ring& ring) {
+ if (fStyle != SkStrokeRec::kStroke_Style) {
+ this->fanRing(ring);
+ }
+}
+
+static SkScalar compute_coverage(SkScalar depth, SkScalar initialDepth, SkScalar initialCoverage,
+ SkScalar targetDepth, SkScalar targetCoverage) {
+ if (SkScalarNearlyEqual(initialDepth, targetDepth)) {
+ return targetCoverage;
+ }
+ SkScalar result = (depth - initialDepth) / (targetDepth - initialDepth) *
+ (targetCoverage - initialCoverage) + initialCoverage;
+ return SkScalarClampMax(result, 1.0f);
+}
+
+// return true when processing is complete
+bool GrAAConvexTessellator::createInsetRing(const Ring& lastRing, Ring* nextRing,
+ SkScalar initialDepth, SkScalar initialCoverage,
+ SkScalar targetDepth, SkScalar targetCoverage,
+ bool forceNew) {
+ bool done = false;
+
+ fCandidateVerts.rewind();
+
+ // Loop through all the points in the ring and find the intersection with the smallest depth
+ SkScalar minDist = SK_ScalarMax, minT = 0.0f;
+ int minEdgeIdx = -1;
+
+ for (int cur = 0; cur < lastRing.numPts(); ++cur) {
+ int next = (cur + 1) % lastRing.numPts();
+
+ SkScalar t;
+ bool result = intersect(this->point(lastRing.index(cur)), lastRing.bisector(cur),
+ this->point(lastRing.index(next)), lastRing.bisector(next),
+ &t);
+ if (!result) {
+ continue;
+ }
+ SkScalar dist = -t * lastRing.norm(cur).dot(lastRing.bisector(cur));
+
+ if (minDist > dist) {
+ minDist = dist;
+ minT = t;
+ minEdgeIdx = cur;
+ }
+ }
+
+ if (minEdgeIdx == -1) {
+ return false;
+ }
+ SkPoint newPt = lastRing.bisector(minEdgeIdx);
+ newPt.scale(minT);
+ newPt += this->point(lastRing.index(minEdgeIdx));
+
+ SkScalar depth = this->computeDepthFromEdge(lastRing.origEdgeID(minEdgeIdx), newPt);
+ if (depth >= targetDepth) {
+ // None of the bisectors intersect before reaching the desired depth.
+ // Just step them all to the desired depth
+ depth = targetDepth;
+ done = true;
+ }
+
+ // 'dst' stores where each point in the last ring maps to/transforms into
+ // in the next ring.
+ SkTDArray<int> dst;
+ dst.setCount(lastRing.numPts());
+
+ // Create the first point (who compares with no one)
+ if (!this->computePtAlongBisector(lastRing.index(0),
+ lastRing.bisector(0),
+ lastRing.origEdgeID(0),
+ depth, &newPt)) {
+ this->terminate(lastRing);
+ return true;
+ }
+ dst[0] = fCandidateVerts.addNewPt(newPt,
+ lastRing.index(0), lastRing.origEdgeID(0),
+ !this->movable(lastRing.index(0)));
+
+ // Handle the middle points (who only compare with the prior point)
+ for (int cur = 1; cur < lastRing.numPts()-1; ++cur) {
+ if (!this->computePtAlongBisector(lastRing.index(cur),
+ lastRing.bisector(cur),
+ lastRing.origEdgeID(cur),
+ depth, &newPt)) {
+ this->terminate(lastRing);
+ return true;
+ }
+ if (!duplicate_pt(newPt, fCandidateVerts.lastPoint())) {
+ dst[cur] = fCandidateVerts.addNewPt(newPt,
+ lastRing.index(cur), lastRing.origEdgeID(cur),
+ !this->movable(lastRing.index(cur)));
+ } else {
+ dst[cur] = fCandidateVerts.fuseWithPrior(lastRing.origEdgeID(cur));
+ }
+ }
+
+ // Check on the last point (handling the wrap around)
+ int cur = lastRing.numPts()-1;
+ if (!this->computePtAlongBisector(lastRing.index(cur),
+ lastRing.bisector(cur),
+ lastRing.origEdgeID(cur),
+ depth, &newPt)) {
+ this->terminate(lastRing);
+ return true;
+ }
+ bool dupPrev = duplicate_pt(newPt, fCandidateVerts.lastPoint());
+ bool dupNext = duplicate_pt(newPt, fCandidateVerts.firstPoint());
+
+ if (!dupPrev && !dupNext) {
+ dst[cur] = fCandidateVerts.addNewPt(newPt,
+ lastRing.index(cur), lastRing.origEdgeID(cur),
+ !this->movable(lastRing.index(cur)));
+ } else if (dupPrev && !dupNext) {
+ dst[cur] = fCandidateVerts.fuseWithPrior(lastRing.origEdgeID(cur));
+ } else if (!dupPrev && dupNext) {
+ dst[cur] = fCandidateVerts.fuseWithNext();
+ } else {
+ bool dupPrevVsNext = duplicate_pt(fCandidateVerts.firstPoint(), fCandidateVerts.lastPoint());
+
+ if (!dupPrevVsNext) {
+ dst[cur] = fCandidateVerts.fuseWithPrior(lastRing.origEdgeID(cur));
+ } else {
+ const int fused = fCandidateVerts.fuseWithBoth();
+ dst[cur] = fused;
+ const int targetIdx = dst[cur - 1];
+ for (int i = cur - 1; i >= 0 && dst[i] == targetIdx; i--) {
+ dst[i] = fused;
+ }
+ }
+ }
+
+ // Fold the new ring's points into the global pool
+ for (int i = 0; i < fCandidateVerts.numPts(); ++i) {
+ int newIdx;
+ if (fCandidateVerts.needsToBeNew(i) || forceNew) {
+ // if the originating index is still valid then this point wasn't
+ // fused (and is thus movable)
+ SkScalar coverage = compute_coverage(depth, initialDepth, initialCoverage,
+ targetDepth, targetCoverage);
+ newIdx = this->addPt(fCandidateVerts.point(i), depth, coverage,
+ fCandidateVerts.originatingIdx(i) != -1, kSharp_CurveState);
+ } else {
+ SkASSERT(fCandidateVerts.originatingIdx(i) != -1);
+ this->updatePt(fCandidateVerts.originatingIdx(i), fCandidateVerts.point(i), depth,
+ targetCoverage);
+ newIdx = fCandidateVerts.originatingIdx(i);
+ }
+
+ nextRing->addIdx(newIdx, fCandidateVerts.origEdge(i));
+ }
+
+ // 'dst' currently has indices into the ring. Remap these to be indices
+ // into the global pool since the triangulation operates in that space.
+ for (int i = 0; i < dst.count(); ++i) {
+ dst[i] = nextRing->index(dst[i]);
+ }
+
+ for (int i = 0; i < lastRing.numPts(); ++i) {
+ int next = (i + 1) % lastRing.numPts();
+
+ this->addTri(lastRing.index(i), lastRing.index(next), dst[next]);
+ this->addTri(lastRing.index(i), dst[next], dst[i]);
+ }
+
+ if (done && fStyle != SkStrokeRec::kStroke_Style) {
+ // fill or stroke-and-fill
+ this->fanRing(*nextRing);
+ }
+
+ if (nextRing->numPts() < 3) {
+ done = true;
+ }
+ return done;
+}
+
+void GrAAConvexTessellator::validate() const {
+ SkASSERT(fPts.count() == fMovable.count());
+ SkASSERT(fPts.count() == fCoverages.count());
+ SkASSERT(fPts.count() == fCurveState.count());
+ SkASSERT(0 == (fIndices.count() % 3));
+ SkASSERT(!fBisectors.count() || fBisectors.count() == fNorms.count());
+}
+
+//////////////////////////////////////////////////////////////////////////////
+void GrAAConvexTessellator::Ring::init(const GrAAConvexTessellator& tess) {
+ this->computeNormals(tess);
+ this->computeBisectors(tess);
+}
+
+void GrAAConvexTessellator::Ring::init(const SkTDArray<SkVector>& norms,
+ const SkTDArray<SkVector>& bisectors) {
+ for (int i = 0; i < fPts.count(); ++i) {
+ fPts[i].fNorm = norms[i];
+ fPts[i].fBisector = bisectors[i];
+ }
+}
+
+// Compute the outward facing normal at each vertex.
+void GrAAConvexTessellator::Ring::computeNormals(const GrAAConvexTessellator& tess) {
+ for (int cur = 0; cur < fPts.count(); ++cur) {
+ int next = (cur + 1) % fPts.count();
+
+ fPts[cur].fNorm = tess.point(fPts[next].fIndex) - tess.point(fPts[cur].fIndex);
+ SkPoint::Normalize(&fPts[cur].fNorm);
+ fPts[cur].fNorm.setOrthog(fPts[cur].fNorm, tess.side());
+ }
+}
+
+void GrAAConvexTessellator::Ring::computeBisectors(const GrAAConvexTessellator& tess) {
+ int prev = fPts.count() - 1;
+ for (int cur = 0; cur < fPts.count(); prev = cur, ++cur) {
+ fPts[cur].fBisector = fPts[cur].fNorm + fPts[prev].fNorm;
+ if (!fPts[cur].fBisector.normalize()) {
+ SkASSERT(SkPoint::kLeft_Side == tess.side() || SkPoint::kRight_Side == tess.side());
+ fPts[cur].fBisector.setOrthog(fPts[cur].fNorm, (SkPoint::Side)-tess.side());
+ SkVector other;
+ other.setOrthog(fPts[prev].fNorm, tess.side());
+ fPts[cur].fBisector += other;
+ SkAssertResult(fPts[cur].fBisector.normalize());
+ } else {
+ fPts[cur].fBisector.negate(); // make the bisector face in
+ }
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+#ifdef SK_DEBUG
+// Is this ring convex?
+bool GrAAConvexTessellator::Ring::isConvex(const GrAAConvexTessellator& tess) const {
+ if (fPts.count() < 3) {
+ return true;
+ }
+
+ SkPoint prev = tess.point(fPts[0].fIndex) - tess.point(fPts.top().fIndex);
+ SkPoint cur = tess.point(fPts[1].fIndex) - tess.point(fPts[0].fIndex);
+ SkScalar minDot = prev.fX * cur.fY - prev.fY * cur.fX;
+ SkScalar maxDot = minDot;
+
+ prev = cur;
+ for (int i = 1; i < fPts.count(); ++i) {
+ int next = (i + 1) % fPts.count();
+
+ cur = tess.point(fPts[next].fIndex) - tess.point(fPts[i].fIndex);
+ SkScalar dot = prev.fX * cur.fY - prev.fY * cur.fX;
+
+ minDot = SkMinScalar(minDot, dot);
+ maxDot = SkMaxScalar(maxDot, dot);
+
+ prev = cur;
+ }
+
+ if (SkScalarNearlyEqual(maxDot, 0.0f, 0.005f)) {
+ maxDot = 0;
+ }
+ if (SkScalarNearlyEqual(minDot, 0.0f, 0.005f)) {
+ minDot = 0;
+ }
+ return (maxDot >= 0.0f) == (minDot >= 0.0f);
+}
+
+#endif
+
+void GrAAConvexTessellator::lineTo(const SkPoint& p, CurveState curve) {
+ if (this->numPts() > 0 && duplicate_pt(p, this->lastPoint())) {
+ return;
+ }
+
+ SkASSERT(fPts.count() <= 1 || fPts.count() == fNorms.count()+1);
+ if (this->numPts() >= 2 && abs_dist_from_line(fPts.top(), fNorms.top(), p) < kClose) {
+ // The old last point is on the line from the second to last to the new point
+ this->popLastPt();
+ fNorms.pop();
+ // double-check that the new last point is not a duplicate of the new point. In an ideal
+ // world this wouldn't be necessary (since it's only possible for non-convex paths), but
+ // floating point precision issues mean it can actually happen on paths that were
+ // determined to be convex.
+ if (duplicate_pt(p, this->lastPoint())) {
+ return;
+ }
+ }
+ SkScalar initialRingCoverage = (SkStrokeRec::kFill_Style == fStyle) ? 0.5f : 1.0f;
+ this->addPt(p, 0.0f, initialRingCoverage, false, curve);
+ if (this->numPts() > 1) {
+ *fNorms.push() = fPts.top() - fPts[fPts.count()-2];
+ SkDEBUGCODE(SkScalar len =) SkPoint::Normalize(&fNorms.top());
+ SkASSERT(len > 0.0f);
+ SkASSERT(SkScalarNearlyEqual(1.0f, fNorms.top().length()));
+ }
+}
+
+void GrAAConvexTessellator::lineTo(const SkMatrix& m, SkPoint p, CurveState curve) {
+ m.mapPoints(&p, 1);
+ this->lineTo(p, curve);
+}
+
+void GrAAConvexTessellator::quadTo(const SkPoint pts[3]) {
+ int maxCount = GrPathUtils::quadraticPointCount(pts, kQuadTolerance);
+ fPointBuffer.setReserve(maxCount);
+ SkPoint* target = fPointBuffer.begin();
+ int count = GrPathUtils::generateQuadraticPoints(pts[0], pts[1], pts[2],
+ kQuadTolerance, &target, maxCount);
+ fPointBuffer.setCount(count);
+ for (int i = 0; i < count - 1; i++) {
+ this->lineTo(fPointBuffer[i], kCurve_CurveState);
+ }
+ this->lineTo(fPointBuffer[count - 1], kIndeterminate_CurveState);
+}
+
+void GrAAConvexTessellator::quadTo(const SkMatrix& m, SkPoint pts[3]) {
+ m.mapPoints(pts, 3);
+ this->quadTo(pts);
+}
+
+void GrAAConvexTessellator::cubicTo(const SkMatrix& m, SkPoint pts[4]) {
+ m.mapPoints(pts, 4);
+ int maxCount = GrPathUtils::cubicPointCount(pts, kCubicTolerance);
+ fPointBuffer.setReserve(maxCount);
+ SkPoint* target = fPointBuffer.begin();
+ int count = GrPathUtils::generateCubicPoints(pts[0], pts[1], pts[2], pts[3],
+ kCubicTolerance, &target, maxCount);
+ fPointBuffer.setCount(count);
+ for (int i = 0; i < count - 1; i++) {
+ this->lineTo(fPointBuffer[i], kCurve_CurveState);
+ }
+ this->lineTo(fPointBuffer[count - 1], kIndeterminate_CurveState);
+}
+
+// include down here to avoid compilation errors caused by "-" overload in SkGeometry.h
+#include "SkGeometry.h"
+
+void GrAAConvexTessellator::conicTo(const SkMatrix& m, SkPoint pts[3], SkScalar w) {
+ m.mapPoints(pts, 3);
+ SkAutoConicToQuads quadder;
+ const SkPoint* quads = quadder.computeQuads(pts, w, kConicTolerance);
+ SkPoint lastPoint = *(quads++);
+ int count = quadder.countQuads();
+ for (int i = 0; i < count; ++i) {
+ SkPoint quadPts[3];
+ quadPts[0] = lastPoint;
+ quadPts[1] = quads[0];
+ quadPts[2] = i == count - 1 ? pts[2] : quads[1];
+ this->quadTo(quadPts);
+ lastPoint = quadPts[2];
+ quads += 2;
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+#if GR_AA_CONVEX_TESSELLATOR_VIZ
+static const SkScalar kPointRadius = 0.02f;
+static const SkScalar kArrowStrokeWidth = 0.0f;
+static const SkScalar kArrowLength = 0.2f;
+static const SkScalar kEdgeTextSize = 0.1f;
+static const SkScalar kPointTextSize = 0.02f;
+
+static void draw_point(SkCanvas* canvas, const SkPoint& p, SkScalar paramValue, bool stroke) {
+ SkPaint paint;
+ SkASSERT(paramValue <= 1.0f);
+ int gs = int(255*paramValue);
+ paint.setARGB(255, gs, gs, gs);
+
+ canvas->drawCircle(p.fX, p.fY, kPointRadius, paint);
+
+ if (stroke) {
+ SkPaint stroke;
+ stroke.setColor(SK_ColorYELLOW);
+ stroke.setStyle(SkPaint::kStroke_Style);
+ stroke.setStrokeWidth(kPointRadius/3.0f);
+ canvas->drawCircle(p.fX, p.fY, kPointRadius, stroke);
+ }
+}
+
+static void draw_line(SkCanvas* canvas, const SkPoint& p0, const SkPoint& p1, SkColor color) {
+ SkPaint p;
+ p.setColor(color);
+
+ canvas->drawLine(p0.fX, p0.fY, p1.fX, p1.fY, p);
+}
+
+static void draw_arrow(SkCanvas*canvas, const SkPoint& p, const SkPoint &n,
+ SkScalar len, SkColor color) {
+ SkPaint paint;
+ paint.setColor(color);
+ paint.setStrokeWidth(kArrowStrokeWidth);
+ paint.setStyle(SkPaint::kStroke_Style);
+
+ canvas->drawLine(p.fX, p.fY,
+ p.fX + len * n.fX, p.fY + len * n.fY,
+ paint);
+}
+
+void GrAAConvexTessellator::Ring::draw(SkCanvas* canvas, const GrAAConvexTessellator& tess) const {
+ SkPaint paint;
+ paint.setTextSize(kEdgeTextSize);
+
+ for (int cur = 0; cur < fPts.count(); ++cur) {
+ int next = (cur + 1) % fPts.count();
+
+ draw_line(canvas,
+ tess.point(fPts[cur].fIndex),
+ tess.point(fPts[next].fIndex),
+ SK_ColorGREEN);
+
+ SkPoint mid = tess.point(fPts[cur].fIndex) + tess.point(fPts[next].fIndex);
+ mid.scale(0.5f);
+
+ if (fPts.count()) {
+ draw_arrow(canvas, mid, fPts[cur].fNorm, kArrowLength, SK_ColorRED);
+ mid.fX += (kArrowLength/2) * fPts[cur].fNorm.fX;
+ mid.fY += (kArrowLength/2) * fPts[cur].fNorm.fY;
+ }
+
+ SkString num;
+ num.printf("%d", this->origEdgeID(cur));
+ canvas->drawText(num.c_str(), num.size(), mid.fX, mid.fY, paint);
+
+ if (fPts.count()) {
+ draw_arrow(canvas, tess.point(fPts[cur].fIndex), fPts[cur].fBisector,
+ kArrowLength, SK_ColorBLUE);
+ }
+ }
+}
+
+void GrAAConvexTessellator::draw(SkCanvas* canvas) const {
+ for (int i = 0; i < fIndices.count(); i += 3) {
+ SkASSERT(fIndices[i] < this->numPts()) ;
+ SkASSERT(fIndices[i+1] < this->numPts()) ;
+ SkASSERT(fIndices[i+2] < this->numPts()) ;
+
+ draw_line(canvas,
+ this->point(this->fIndices[i]), this->point(this->fIndices[i+1]),
+ SK_ColorBLACK);
+ draw_line(canvas,
+ this->point(this->fIndices[i+1]), this->point(this->fIndices[i+2]),
+ SK_ColorBLACK);
+ draw_line(canvas,
+ this->point(this->fIndices[i+2]), this->point(this->fIndices[i]),
+ SK_ColorBLACK);
+ }
+
+ fInitialRing.draw(canvas, *this);
+ for (int i = 0; i < fRings.count(); ++i) {
+ fRings[i]->draw(canvas, *this);
+ }
+
+ for (int i = 0; i < this->numPts(); ++i) {
+ draw_point(canvas,
+ this->point(i), 0.5f + (this->depth(i)/(2 * kAntialiasingRadius)),
+ !this->movable(i));
+
+ SkPaint paint;
+ paint.setTextSize(kPointTextSize);
+ paint.setTextAlign(SkPaint::kCenter_Align);
+ if (this->depth(i) <= -kAntialiasingRadius) {
+ paint.setColor(SK_ColorWHITE);
+ }
+
+ SkString num;
+ num.printf("%d", i);
+ canvas->drawText(num.c_str(), num.size(),
+ this->point(i).fX, this->point(i).fY+(kPointRadius/2.0f),
+ paint);
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrAAConvexTessellator.h b/gfx/skia/skia/src/gpu/batches/GrAAConvexTessellator.h
new file mode 100644
index 000000000..2fdde1032
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrAAConvexTessellator.h
@@ -0,0 +1,291 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAAConvexTessellator_DEFINED
+#define GrAAConvexTessellator_DEFINED
+
+#include "SkColor.h"
+#include "SkPaint.h"
+#include "SkPoint.h"
+#include "SkScalar.h"
+#include "SkStrokeRec.h"
+#include "SkTDArray.h"
+
+class SkCanvas;
+class SkMatrix;
+class SkPath;
+
+//#define GR_AA_CONVEX_TESSELLATOR_VIZ 1
+
+// device space distance which we inset / outset points in order to create the soft antialiased edge
+static const SkScalar kAntialiasingRadius = 0.5f;
+
+class GrAAConvexTessellator;
+
+// The AAConvexTessellator holds the global pool of points and the triangulation
+// that connects them. It also drives the tessellation process.
+// The outward facing normals of the original polygon are stored (in 'fNorms') to service
+// computeDepthFromEdge requests.
+class GrAAConvexTessellator {
+public:
+ GrAAConvexTessellator(SkStrokeRec::Style style = SkStrokeRec::kFill_Style,
+ SkScalar strokeWidth = -1.0f,
+ SkPaint::Join join = SkPaint::Join::kBevel_Join,
+ SkScalar miterLimit = 0.0f)
+ : fSide(SkPoint::kOn_Side)
+ , fStrokeWidth(strokeWidth)
+ , fStyle(style)
+ , fJoin(join)
+ , fMiterLimit(miterLimit) {
+ }
+
+ SkPoint::Side side() const { return fSide; }
+
+ bool tessellate(const SkMatrix& m, const SkPath& path);
+
+ // The next five should only be called after tessellate to extract the result
+ int numPts() const { return fPts.count(); }
+ int numIndices() const { return fIndices.count(); }
+
+ const SkPoint& lastPoint() const { return fPts.top(); }
+ const SkPoint& point(int index) const { return fPts[index]; }
+ int index(int index) const { return fIndices[index]; }
+ SkScalar coverage(int index) const { return fCoverages[index]; }
+
+#if GR_AA_CONVEX_TESSELLATOR_VIZ
+ void draw(SkCanvas* canvas) const;
+#endif
+
+ // The tessellator can be reused for multiple paths by rewinding in between
+ void rewind();
+
+private:
+ // CandidateVerts holds the vertices for the next ring while they are
+ // being generated. Its main function is to de-dup the points.
+ class CandidateVerts {
+ public:
+ void setReserve(int numPts) { fPts.setReserve(numPts); }
+ void rewind() { fPts.rewind(); }
+
+ int numPts() const { return fPts.count(); }
+
+ const SkPoint& lastPoint() const { return fPts.top().fPt; }
+ const SkPoint& firstPoint() const { return fPts[0].fPt; }
+ const SkPoint& point(int index) const { return fPts[index].fPt; }
+
+ int originatingIdx(int index) const { return fPts[index].fOriginatingIdx; }
+ int origEdge(int index) const { return fPts[index].fOrigEdgeId; }
+ bool needsToBeNew(int index) const { return fPts[index].fNeedsToBeNew; }
+
+ int addNewPt(const SkPoint& newPt, int originatingIdx, int origEdge, bool needsToBeNew) {
+ struct PointData* pt = fPts.push();
+ pt->fPt = newPt;
+ pt->fOrigEdgeId = origEdge;
+ pt->fOriginatingIdx = originatingIdx;
+ pt->fNeedsToBeNew = needsToBeNew;
+ return fPts.count() - 1;
+ }
+
+ int fuseWithPrior(int origEdgeId) {
+ fPts.top().fOrigEdgeId = origEdgeId;
+ fPts.top().fOriginatingIdx = -1;
+ fPts.top().fNeedsToBeNew = true;
+ return fPts.count() - 1;
+ }
+
+ int fuseWithNext() {
+ fPts[0].fOriginatingIdx = -1;
+ fPts[0].fNeedsToBeNew = true;
+ return 0;
+ }
+
+ int fuseWithBoth() {
+ if (fPts.count() > 1) {
+ fPts.pop();
+ }
+
+ fPts[0].fOriginatingIdx = -1;
+ fPts[0].fNeedsToBeNew = true;
+ return 0;
+ }
+
+ private:
+ struct PointData {
+ SkPoint fPt;
+ int fOriginatingIdx;
+ int fOrigEdgeId;
+ bool fNeedsToBeNew;
+ };
+
+ SkTDArray<struct PointData> fPts;
+ };
+
+ // The Ring holds a set of indices into the global pool that together define
+ // a single polygon inset.
+ class Ring {
+ public:
+ void setReserve(int numPts) { fPts.setReserve(numPts); }
+ void rewind() { fPts.rewind(); }
+
+ int numPts() const { return fPts.count(); }
+
+ void addIdx(int index, int origEdgeId) {
+ struct PointData* pt = fPts.push();
+ pt->fIndex = index;
+ pt->fOrigEdgeId = origEdgeId;
+ }
+
+ // Upgrade this ring so that it can behave like an originating ring
+ void makeOriginalRing() {
+ for (int i = 0; i < fPts.count(); ++i) {
+ fPts[i].fOrigEdgeId = fPts[i].fIndex;
+ }
+ }
+
+ // init should be called after all the indices have been added (via addIdx)
+ void init(const GrAAConvexTessellator& tess);
+ void init(const SkTDArray<SkVector>& norms, const SkTDArray<SkVector>& bisectors);
+
+ const SkPoint& norm(int index) const { return fPts[index].fNorm; }
+ const SkPoint& bisector(int index) const { return fPts[index].fBisector; }
+ int index(int index) const { return fPts[index].fIndex; }
+ int origEdgeID(int index) const { return fPts[index].fOrigEdgeId; }
+ void setOrigEdgeId(int index, int id) { fPts[index].fOrigEdgeId = id; }
+
+ #if GR_AA_CONVEX_TESSELLATOR_VIZ
+ void draw(SkCanvas* canvas, const GrAAConvexTessellator& tess) const;
+ #endif
+
+ private:
+ void computeNormals(const GrAAConvexTessellator& result);
+ void computeBisectors(const GrAAConvexTessellator& tess);
+
+ SkDEBUGCODE(bool isConvex(const GrAAConvexTessellator& tess) const;)
+
+ struct PointData {
+ SkPoint fNorm;
+ SkPoint fBisector;
+ int fIndex;
+ int fOrigEdgeId;
+ };
+
+ SkTDArray<PointData> fPts;
+ };
+
+ // Represents whether a given point is within a curve. A point is inside a curve only if it is
+ // an interior point within a quad, cubic, or conic, or if it is the endpoint of a quad, cubic,
+ // or conic with another curve meeting it at (more or less) the same angle.
+ enum CurveState {
+ // point is a sharp vertex
+ kSharp_CurveState,
+ // endpoint of a curve with the other side's curvature not yet determined
+ kIndeterminate_CurveState,
+ // point is in the interior of a curve
+ kCurve_CurveState
+ };
+
+ bool movable(int index) const { return fMovable[index]; }
+
+ // Movable points are those that can be slid along their bisector.
+ // Basically, a point is immovable if it is part of the original
+ // polygon or it results from the fusing of two bisectors.
+ int addPt(const SkPoint& pt, SkScalar depth, SkScalar coverage, bool movable, CurveState curve);
+ void popLastPt();
+ void popFirstPtShuffle();
+
+ void updatePt(int index, const SkPoint& pt, SkScalar depth, SkScalar coverage);
+
+ void addTri(int i0, int i1, int i2);
+
+ void reservePts(int count) {
+ fPts.setReserve(count);
+ fCoverages.setReserve(count);
+ fMovable.setReserve(count);
+ }
+
+ SkScalar computeDepthFromEdge(int edgeIdx, const SkPoint& p) const;
+
+ bool computePtAlongBisector(int startIdx, const SkPoint& bisector,
+ int edgeIdx, SkScalar desiredDepth,
+ SkPoint* result) const;
+
+ void lineTo(const SkPoint& p, CurveState curve);
+
+ void lineTo(const SkMatrix& m, SkPoint p, CurveState curve);
+
+ void quadTo(const SkPoint pts[3]);
+
+ void quadTo(const SkMatrix& m, SkPoint pts[3]);
+
+ void cubicTo(const SkMatrix& m, SkPoint pts[4]);
+
+ void conicTo(const SkMatrix& m, SkPoint pts[3], SkScalar w);
+
+ void terminate(const Ring& lastRing);
+
+ // return false on failure/degenerate path
+ bool extractFromPath(const SkMatrix& m, const SkPath& path);
+ void computeBisectors();
+
+ void fanRing(const Ring& ring);
+
+ Ring* getNextRing(Ring* lastRing);
+
+ void createOuterRing(const Ring& previousRing, SkScalar outset, SkScalar coverage,
+ Ring* nextRing);
+
+ bool createInsetRings(Ring& previousRing, SkScalar initialDepth, SkScalar initialCoverage,
+ SkScalar targetDepth, SkScalar targetCoverage, Ring** finalRing);
+
+ bool createInsetRing(const Ring& lastRing, Ring* nextRing,
+ SkScalar initialDepth, SkScalar initialCoverage, SkScalar targetDepth,
+ SkScalar targetCoverage, bool forceNew);
+
+ void validate() const;
+
+ // fPts, fCoverages, fMovable & fCurveState should always have the same # of elements
+ SkTDArray<SkPoint> fPts;
+ SkTDArray<SkScalar> fCoverages;
+ // movable points are those that can be slid further along their bisector
+ SkTDArray<bool> fMovable;
+ // Tracks whether a given point is interior to a curve. Such points are
+ // assumed to have shallow curvature.
+ SkTDArray<CurveState> fCurveState;
+
+ // The outward facing normals for the original polygon
+ SkTDArray<SkVector> fNorms;
+ // The inward facing bisector at each point in the original polygon. Only
+ // needed for exterior ring creation and then handed off to the initial ring.
+ SkTDArray<SkVector> fBisectors;
+
+ SkPoint::Side fSide; // winding of the original polygon
+
+ // The triangulation of the points
+ SkTDArray<int> fIndices;
+
+ Ring fInitialRing;
+#if GR_AA_CONVEX_TESSELLATOR_VIZ
+ // When visualizing save all the rings
+ SkTDArray<Ring*> fRings;
+#else
+ Ring fRings[2];
+#endif
+ CandidateVerts fCandidateVerts;
+
+ // the stroke width is only used for stroke or stroke-and-fill styles
+ SkScalar fStrokeWidth;
+ SkStrokeRec::Style fStyle;
+
+ SkPaint::Join fJoin;
+
+ SkScalar fMiterLimit;
+
+ SkTDArray<SkPoint> fPointBuffer;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp b/gfx/skia/skia/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
new file mode 100644
index 000000000..36a9ff01e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
@@ -0,0 +1,621 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrAADistanceFieldPathRenderer.h"
+
+#include "GrBatchFlushState.h"
+#include "GrBatchTest.h"
+#include "GrBuffer.h"
+#include "GrContext.h"
+#include "GrPipelineBuilder.h"
+#include "GrResourceProvider.h"
+#include "GrSurfacePriv.h"
+#include "GrSWMaskHelper.h"
+#include "GrTexturePriv.h"
+#include "batches/GrVertexBatch.h"
+#include "effects/GrDistanceFieldGeoProc.h"
+
+#include "SkDistanceFieldGen.h"
+
+#define ATLAS_TEXTURE_WIDTH 2048
+#define ATLAS_TEXTURE_HEIGHT 2048
+#define PLOT_WIDTH 512
+#define PLOT_HEIGHT 256
+
+#define NUM_PLOTS_X (ATLAS_TEXTURE_WIDTH / PLOT_WIDTH)
+#define NUM_PLOTS_Y (ATLAS_TEXTURE_HEIGHT / PLOT_HEIGHT)
+
+#ifdef DF_PATH_TRACKING
+static int g_NumCachedShapes = 0;
+static int g_NumFreedShapes = 0;
+#endif
+
+// mip levels
+static const int kSmallMIP = 32;
+static const int kMediumMIP = 73;
+static const int kLargeMIP = 162;
+
+// Callback to clear out internal path cache when eviction occurs
+void GrAADistanceFieldPathRenderer::HandleEviction(GrBatchAtlas::AtlasID id, void* pr) {
+ GrAADistanceFieldPathRenderer* dfpr = (GrAADistanceFieldPathRenderer*)pr;
+ // remove any paths that use this plot
+ ShapeDataList::Iter iter;
+ iter.init(dfpr->fShapeList, ShapeDataList::Iter::kHead_IterStart);
+ ShapeData* shapeData;
+ while ((shapeData = iter.get())) {
+ iter.next();
+ if (id == shapeData->fID) {
+ dfpr->fShapeCache.remove(shapeData->fKey);
+ dfpr->fShapeList.remove(shapeData);
+ delete shapeData;
+#ifdef DF_PATH_TRACKING
+ ++g_NumFreedPaths;
+#endif
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+GrAADistanceFieldPathRenderer::GrAADistanceFieldPathRenderer() : fAtlas(nullptr) {}
+
+GrAADistanceFieldPathRenderer::~GrAADistanceFieldPathRenderer() {
+ ShapeDataList::Iter iter;
+ iter.init(fShapeList, ShapeDataList::Iter::kHead_IterStart);
+ ShapeData* shapeData;
+ while ((shapeData = iter.get())) {
+ iter.next();
+ delete shapeData;
+ }
+ delete fAtlas;
+
+#ifdef DF_PATH_TRACKING
+ SkDebugf("Cached shapes: %d, freed shapes: %d\n", g_NumCachedShapes, g_NumFreedShapes);
+#endif
+}
+
+////////////////////////////////////////////////////////////////////////////////
+bool GrAADistanceFieldPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ if (!args.fShaderCaps->shaderDerivativeSupport()) {
+ return false;
+ }
+ // If the shape has no key then we won't get any reuse.
+ if (!args.fShape->hasUnstyledKey()) {
+ return false;
+ }
+ // This only supports filled paths, however, the caller may apply the style to make a filled
+ // path and try again.
+ if (!args.fShape->style().isSimpleFill()) {
+ return false;
+ }
+ // This does non-inverse antialiased fills.
+ if (!args.fAntiAlias) {
+ return false;
+ }
+ // TODO: Support inverse fill
+ if (args.fShape->inverseFilled()) {
+ return false;
+ }
+ // currently don't support perspective
+ if (args.fViewMatrix->hasPerspective()) {
+ return false;
+ }
+
+ // only support paths with bounds within kMediumMIP by kMediumMIP,
+ // scaled to have bounds within 2.0f*kLargeMIP by 2.0f*kLargeMIP
+ // the goal is to accelerate rendering of lots of small paths that may be scaling
+ SkScalar maxScale = args.fViewMatrix->getMaxScale();
+ SkRect bounds = args.fShape->styledBounds();
+ SkScalar maxDim = SkMaxScalar(bounds.width(), bounds.height());
+
+ return maxDim <= kMediumMIP && maxDim * maxScale <= 2.0f*kLargeMIP;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+// padding around path bounds to allow for antialiased pixels
+static const SkScalar kAntiAliasPad = 1.0f;
+
+class AADistanceFieldPathBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ typedef GrAADistanceFieldPathRenderer::ShapeData ShapeData;
+ typedef SkTDynamicHash<ShapeData, ShapeData::Key> ShapeCache;
+ typedef GrAADistanceFieldPathRenderer::ShapeDataList ShapeDataList;
+
+ AADistanceFieldPathBatch(GrColor color,
+ const GrShape& shape,
+ bool antiAlias,
+ const SkMatrix& viewMatrix,
+ GrBatchAtlas* atlas,
+ ShapeCache* shapeCache, ShapeDataList* shapeList,
+ bool gammaCorrect)
+ : INHERITED(ClassID()) {
+ SkASSERT(shape.hasUnstyledKey());
+ fBatch.fViewMatrix = viewMatrix;
+ fGeoData.emplace_back(Geometry{color, shape, antiAlias});
+
+ fAtlas = atlas;
+ fShapeCache = shapeCache;
+ fShapeList = shapeList;
+ fGammaCorrect = gammaCorrect;
+
+ // Compute bounds
+ this->setTransformedBounds(shape.bounds(), viewMatrix, HasAABloat::kYes, IsZeroArea::kNo);
+ }
+
+ const char* name() const override { return "AADistanceFieldPathBatch"; }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ color->setKnownFourComponents(fGeoData[0].fColor);
+ coverage->setUnknownSingleComponent();
+ }
+
+private:
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ // Handle any color overrides
+ if (!overrides.readsColor()) {
+ fGeoData[0].fColor = GrColor_ILLEGAL;
+ }
+ overrides.getOverrideColorIfSet(&fGeoData[0].fColor);
+
+ // setup batch properties
+ fBatch.fColorIgnored = !overrides.readsColor();
+ fBatch.fUsesLocalCoords = overrides.readsLocalCoords();
+ fBatch.fCoverageIgnored = !overrides.readsCoverage();
+ }
+
+ struct FlushInfo {
+ SkAutoTUnref<const GrBuffer> fVertexBuffer;
+ SkAutoTUnref<const GrBuffer> fIndexBuffer;
+ sk_sp<GrGeometryProcessor> fGeometryProcessor;
+ int fVertexOffset;
+ int fInstancesToFlush;
+ };
+
+ void onPrepareDraws(Target* target) const override {
+ int instanceCount = fGeoData.count();
+
+ SkMatrix invert;
+ if (this->usesLocalCoords() && !this->viewMatrix().invert(&invert)) {
+ SkDebugf("Could not invert viewmatrix\n");
+ return;
+ }
+
+ const SkMatrix& ctm = this->viewMatrix();
+ uint32_t flags = 0;
+ flags |= ctm.isScaleTranslate() ? kScaleOnly_DistanceFieldEffectFlag : 0;
+ flags |= ctm.isSimilarity() ? kSimilarity_DistanceFieldEffectFlag : 0;
+ flags |= fGammaCorrect ? kGammaCorrect_DistanceFieldEffectFlag : 0;
+
+ GrTextureParams params(SkShader::kRepeat_TileMode, GrTextureParams::kBilerp_FilterMode);
+
+ FlushInfo flushInfo;
+
+ // Setup GrGeometryProcessor
+ GrBatchAtlas* atlas = fAtlas;
+ flushInfo.fGeometryProcessor = GrDistanceFieldPathGeoProc::Make(this->color(),
+ this->viewMatrix(),
+ atlas->getTexture(),
+ params,
+ flags,
+ this->usesLocalCoords());
+
+ // allocate vertices
+ size_t vertexStride = flushInfo.fGeometryProcessor->getVertexStride();
+ SkASSERT(vertexStride == 2 * sizeof(SkPoint) + sizeof(GrColor));
+
+ const GrBuffer* vertexBuffer;
+ void* vertices = target->makeVertexSpace(vertexStride,
+ kVerticesPerQuad * instanceCount,
+ &vertexBuffer,
+ &flushInfo.fVertexOffset);
+ flushInfo.fVertexBuffer.reset(SkRef(vertexBuffer));
+ flushInfo.fIndexBuffer.reset(target->resourceProvider()->refQuadIndexBuffer());
+ if (!vertices || !flushInfo.fIndexBuffer) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ flushInfo.fInstancesToFlush = 0;
+ // Pointer to the next set of vertices to write.
+ intptr_t offset = reinterpret_cast<intptr_t>(vertices);
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& args = fGeoData[i];
+
+ // get mip level
+ SkScalar maxScale = this->viewMatrix().getMaxScale();
+ const SkRect& bounds = args.fShape.bounds();
+ SkScalar maxDim = SkMaxScalar(bounds.width(), bounds.height());
+ SkScalar size = maxScale * maxDim;
+ uint32_t desiredDimension;
+ if (size <= kSmallMIP) {
+ desiredDimension = kSmallMIP;
+ } else if (size <= kMediumMIP) {
+ desiredDimension = kMediumMIP;
+ } else {
+ desiredDimension = kLargeMIP;
+ }
+
+ // check to see if path is cached
+ ShapeData::Key key(args.fShape, desiredDimension);
+ ShapeData* shapeData = fShapeCache->find(key);
+ if (nullptr == shapeData || !atlas->hasID(shapeData->fID)) {
+ // Remove the stale cache entry
+ if (shapeData) {
+ fShapeCache->remove(shapeData->fKey);
+ fShapeList->remove(shapeData);
+ delete shapeData;
+ }
+ SkScalar scale = desiredDimension/maxDim;
+ shapeData = new ShapeData;
+ if (!this->addPathToAtlas(target,
+ &flushInfo,
+ atlas,
+ shapeData,
+ args.fShape,
+ args.fAntiAlias,
+ desiredDimension,
+ scale)) {
+ delete shapeData;
+ SkDebugf("Can't rasterize path\n");
+ continue;
+ }
+ }
+
+ atlas->setLastUseToken(shapeData->fID, target->nextDrawToken());
+
+ this->writePathVertices(target,
+ atlas,
+ offset,
+ args.fColor,
+ vertexStride,
+ this->viewMatrix(),
+ shapeData);
+ offset += kVerticesPerQuad * vertexStride;
+ flushInfo.fInstancesToFlush++;
+ }
+
+ this->flush(target, &flushInfo);
+ }
+
+ bool addPathToAtlas(GrVertexBatch::Target* target,
+ FlushInfo* flushInfo,
+ GrBatchAtlas* atlas,
+ ShapeData* shapeData,
+ const GrShape& shape,
+ bool antiAlias,
+ uint32_t dimension,
+ SkScalar scale) const {
+ const SkRect& bounds = shape.bounds();
+
+ // generate bounding rect for bitmap draw
+ SkRect scaledBounds = bounds;
+ // scale to mip level size
+ scaledBounds.fLeft *= scale;
+ scaledBounds.fTop *= scale;
+ scaledBounds.fRight *= scale;
+ scaledBounds.fBottom *= scale;
+ // move the origin to an integer boundary (gives better results)
+ SkScalar dx = SkScalarFraction(scaledBounds.fLeft);
+ SkScalar dy = SkScalarFraction(scaledBounds.fTop);
+ scaledBounds.offset(-dx, -dy);
+ // get integer boundary
+ SkIRect devPathBounds;
+ scaledBounds.roundOut(&devPathBounds);
+ // pad to allow room for antialiasing
+ const int intPad = SkScalarCeilToInt(kAntiAliasPad);
+ // pre-move origin (after outset, will be 0,0)
+ int width = devPathBounds.width();
+ int height = devPathBounds.height();
+ devPathBounds.fLeft = intPad;
+ devPathBounds.fTop = intPad;
+ devPathBounds.fRight = intPad + width;
+ devPathBounds.fBottom = intPad + height;
+ devPathBounds.outset(intPad, intPad);
+
+ // draw path to bitmap
+ SkMatrix drawMatrix;
+ drawMatrix.setTranslate(-bounds.left(), -bounds.top());
+ drawMatrix.postScale(scale, scale);
+ drawMatrix.postTranslate(kAntiAliasPad, kAntiAliasPad);
+
+ // setup bitmap backing
+ SkASSERT(devPathBounds.fLeft == 0);
+ SkASSERT(devPathBounds.fTop == 0);
+ SkAutoPixmapStorage dst;
+ if (!dst.tryAlloc(SkImageInfo::MakeA8(devPathBounds.width(),
+ devPathBounds.height()))) {
+ return false;
+ }
+ sk_bzero(dst.writable_addr(), dst.getSafeSize());
+
+ // rasterize path
+ SkPaint paint;
+ paint.setStyle(SkPaint::kFill_Style);
+ paint.setAntiAlias(antiAlias);
+
+ SkDraw draw;
+ sk_bzero(&draw, sizeof(draw));
+
+ SkRasterClip rasterClip;
+ rasterClip.setRect(devPathBounds);
+ draw.fRC = &rasterClip;
+ draw.fMatrix = &drawMatrix;
+ draw.fDst = dst;
+
+ SkPath path;
+ shape.asPath(&path);
+ draw.drawPathCoverage(path, paint);
+
+ // generate signed distance field
+ devPathBounds.outset(SK_DistanceFieldPad, SK_DistanceFieldPad);
+ width = devPathBounds.width();
+ height = devPathBounds.height();
+ // TODO We should really generate this directly into the plot somehow
+ SkAutoSMalloc<1024> dfStorage(width * height * sizeof(unsigned char));
+
+ // Generate signed distance field
+ SkGenerateDistanceFieldFromA8Image((unsigned char*)dfStorage.get(),
+ (const unsigned char*)dst.addr(),
+ dst.width(), dst.height(), dst.rowBytes());
+
+ // add to atlas
+ SkIPoint16 atlasLocation;
+ GrBatchAtlas::AtlasID id;
+ if (!atlas->addToAtlas(&id, target, width, height, dfStorage.get(), &atlasLocation)) {
+ this->flush(target, flushInfo);
+ if (!atlas->addToAtlas(&id, target, width, height, dfStorage.get(), &atlasLocation)) {
+ return false;
+ }
+ }
+
+ // add to cache
+ shapeData->fKey.set(shape, dimension);
+ shapeData->fScale = scale;
+ shapeData->fID = id;
+ // change the scaled rect to match the size of the inset distance field
+ scaledBounds.fRight = scaledBounds.fLeft +
+ SkIntToScalar(devPathBounds.width() - 2*SK_DistanceFieldInset);
+ scaledBounds.fBottom = scaledBounds.fTop +
+ SkIntToScalar(devPathBounds.height() - 2*SK_DistanceFieldInset);
+ // shift the origin to the correct place relative to the distance field
+ // need to also restore the fractional translation
+ scaledBounds.offset(-SkIntToScalar(SK_DistanceFieldInset) - kAntiAliasPad + dx,
+ -SkIntToScalar(SK_DistanceFieldInset) - kAntiAliasPad + dy);
+ shapeData->fBounds = scaledBounds;
+ // origin we render from is inset from distance field edge
+ atlasLocation.fX += SK_DistanceFieldInset;
+ atlasLocation.fY += SK_DistanceFieldInset;
+ shapeData->fAtlasLocation = atlasLocation;
+
+ fShapeCache->add(shapeData);
+ fShapeList->addToTail(shapeData);
+#ifdef DF_PATH_TRACKING
+ ++g_NumCachedPaths;
+#endif
+ return true;
+ }
+
+ void writePathVertices(GrDrawBatch::Target* target,
+ GrBatchAtlas* atlas,
+ intptr_t offset,
+ GrColor color,
+ size_t vertexStride,
+ const SkMatrix& viewMatrix,
+ const ShapeData* shapeData) const {
+ GrTexture* texture = atlas->getTexture();
+
+ SkScalar dx = shapeData->fBounds.fLeft;
+ SkScalar dy = shapeData->fBounds.fTop;
+ SkScalar width = shapeData->fBounds.width();
+ SkScalar height = shapeData->fBounds.height();
+
+ SkScalar invScale = 1.0f / shapeData->fScale;
+ dx *= invScale;
+ dy *= invScale;
+ width *= invScale;
+ height *= invScale;
+
+ SkPoint* positions = reinterpret_cast<SkPoint*>(offset);
+
+ // vertex positions
+ // TODO make the vertex attributes a struct
+ SkRect r = SkRect::MakeXYWH(dx, dy, width, height);
+ positions->setRectFan(r.left(), r.top(), r.right(), r.bottom(), vertexStride);
+
+ // colors
+ for (int i = 0; i < kVerticesPerQuad; i++) {
+ GrColor* colorPtr = (GrColor*)(offset + sizeof(SkPoint) + i * vertexStride);
+ *colorPtr = color;
+ }
+
+ const SkScalar tx = SkIntToScalar(shapeData->fAtlasLocation.fX);
+ const SkScalar ty = SkIntToScalar(shapeData->fAtlasLocation.fY);
+
+ // vertex texture coords
+ SkPoint* textureCoords = (SkPoint*)(offset + sizeof(SkPoint) + sizeof(GrColor));
+ textureCoords->setRectFan(tx / texture->width(),
+ ty / texture->height(),
+ (tx + shapeData->fBounds.width()) / texture->width(),
+ (ty + shapeData->fBounds.height()) / texture->height(),
+ vertexStride);
+ }
+
+ void flush(GrVertexBatch::Target* target, FlushInfo* flushInfo) const {
+ if (flushInfo->fInstancesToFlush) {
+ GrMesh mesh;
+ int maxInstancesPerDraw =
+ static_cast<int>(flushInfo->fIndexBuffer->gpuMemorySize() / sizeof(uint16_t) / 6);
+ mesh.initInstanced(kTriangles_GrPrimitiveType, flushInfo->fVertexBuffer,
+ flushInfo->fIndexBuffer, flushInfo->fVertexOffset, kVerticesPerQuad,
+ kIndicesPerQuad, flushInfo->fInstancesToFlush, maxInstancesPerDraw);
+ target->draw(flushInfo->fGeometryProcessor.get(), mesh);
+ flushInfo->fVertexOffset += kVerticesPerQuad * flushInfo->fInstancesToFlush;
+ flushInfo->fInstancesToFlush = 0;
+ }
+ }
+
+ GrColor color() const { return fGeoData[0].fColor; }
+ const SkMatrix& viewMatrix() const { return fBatch.fViewMatrix; }
+ bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ AADistanceFieldPathBatch* that = t->cast<AADistanceFieldPathBatch>();
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ // TODO We can position on the cpu
+ if (!this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
+ }
+
+ fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
+ this->joinBounds(*that);
+ return true;
+ }
+
+ struct BatchTracker {
+ SkMatrix fViewMatrix;
+ bool fUsesLocalCoords;
+ bool fColorIgnored;
+ bool fCoverageIgnored;
+ };
+
+ struct Geometry {
+ GrColor fColor;
+ GrShape fShape;
+ bool fAntiAlias;
+ };
+
+ BatchTracker fBatch;
+ SkSTArray<1, Geometry> fGeoData;
+ GrBatchAtlas* fAtlas;
+ ShapeCache* fShapeCache;
+ ShapeDataList* fShapeList;
+ bool fGammaCorrect;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+bool GrAADistanceFieldPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fDrawContext->auditTrail(),
+ "GrAADistanceFieldPathRenderer::onDrawPath");
+ SkASSERT(!args.fDrawContext->isUnifiedMultisampled());
+ SkASSERT(args.fShape->style().isSimpleFill());
+
+ // we've already bailed on inverse filled paths, so this is safe
+ SkASSERT(!args.fShape->isEmpty());
+ SkASSERT(args.fShape->hasUnstyledKey());
+ if (!fAtlas) {
+ fAtlas = args.fResourceProvider->createAtlas(kAlpha_8_GrPixelConfig,
+ ATLAS_TEXTURE_WIDTH, ATLAS_TEXTURE_HEIGHT,
+ NUM_PLOTS_X, NUM_PLOTS_Y,
+ &GrAADistanceFieldPathRenderer::HandleEviction,
+ (void*)this);
+ if (!fAtlas) {
+ return false;
+ }
+ }
+
+ SkAutoTUnref<GrDrawBatch> batch(new AADistanceFieldPathBatch(args.fPaint->getColor(),
+ *args.fShape,
+ args.fAntiAlias, *args.fViewMatrix,
+ fAtlas, &fShapeCache, &fShapeList,
+ args.fGammaCorrect));
+
+ GrPipelineBuilder pipelineBuilder(*args.fPaint);
+ pipelineBuilder.setUserStencil(args.fUserStencilSettings);
+
+ args.fDrawContext->drawBatch(pipelineBuilder, *args.fClip, batch);
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_TEST_UTILS
+
+struct PathTestStruct {
+ typedef GrAADistanceFieldPathRenderer::ShapeCache ShapeCache;
+ typedef GrAADistanceFieldPathRenderer::ShapeData ShapeData;
+ typedef GrAADistanceFieldPathRenderer::ShapeDataList ShapeDataList;
+ PathTestStruct() : fContextID(SK_InvalidGenID), fAtlas(nullptr) {}
+ ~PathTestStruct() { this->reset(); }
+
+ void reset() {
+ ShapeDataList::Iter iter;
+ iter.init(fShapeList, ShapeDataList::Iter::kHead_IterStart);
+ ShapeData* shapeData;
+ while ((shapeData = iter.get())) {
+ iter.next();
+ fShapeList.remove(shapeData);
+ delete shapeData;
+ }
+ delete fAtlas;
+ fShapeCache.reset();
+ }
+
+ static void HandleEviction(GrBatchAtlas::AtlasID id, void* pr) {
+ PathTestStruct* dfpr = (PathTestStruct*)pr;
+ // remove any paths that use this plot
+ ShapeDataList::Iter iter;
+ iter.init(dfpr->fShapeList, ShapeDataList::Iter::kHead_IterStart);
+ ShapeData* shapeData;
+ while ((shapeData = iter.get())) {
+ iter.next();
+ if (id == shapeData->fID) {
+ dfpr->fShapeCache.remove(shapeData->fKey);
+ dfpr->fShapeList.remove(shapeData);
+ delete shapeData;
+ }
+ }
+ }
+
+ uint32_t fContextID;
+ GrBatchAtlas* fAtlas;
+ ShapeCache fShapeCache;
+ ShapeDataList fShapeList;
+};
+
+DRAW_BATCH_TEST_DEFINE(AADistanceFieldPathBatch) {
+ static PathTestStruct gTestStruct;
+
+ if (context->uniqueID() != gTestStruct.fContextID) {
+ gTestStruct.fContextID = context->uniqueID();
+ gTestStruct.reset();
+ gTestStruct.fAtlas =
+ context->resourceProvider()->createAtlas(kAlpha_8_GrPixelConfig,
+ ATLAS_TEXTURE_WIDTH, ATLAS_TEXTURE_HEIGHT,
+ NUM_PLOTS_X, NUM_PLOTS_Y,
+ &PathTestStruct::HandleEviction,
+ (void*)&gTestStruct);
+ }
+
+ SkMatrix viewMatrix = GrTest::TestMatrix(random);
+ GrColor color = GrRandomColor(random);
+ bool gammaCorrect = random->nextBool();
+
+ // This path renderer only allows fill styles.
+ GrShape shape(GrTest::TestPath(random), GrStyle::SimpleFill());
+ bool antiAlias = random->nextBool();
+
+ return new AADistanceFieldPathBatch(color,
+ shape,
+ antiAlias,
+ viewMatrix,
+ gTestStruct.fAtlas,
+ &gTestStruct.fShapeCache,
+ &gTestStruct.fShapeList,
+ gammaCorrect);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrAADistanceFieldPathRenderer.h b/gfx/skia/skia/src/gpu/batches/GrAADistanceFieldPathRenderer.h
new file mode 100755
index 000000000..985b2f153
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrAADistanceFieldPathRenderer.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAADistanceFieldPathRenderer_DEFINED
+#define GrAADistanceFieldPathRenderer_DEFINED
+
+#include "GrBatchAtlas.h"
+#include "GrPathRenderer.h"
+#include "GrRect.h"
+#include "GrShape.h"
+
+#include "SkOpts.h"
+#include "SkTDynamicHash.h"
+
+class GrContext;
+
+class GrAADistanceFieldPathRenderer : public GrPathRenderer {
+public:
+ GrAADistanceFieldPathRenderer();
+ virtual ~GrAADistanceFieldPathRenderer();
+
+private:
+ StencilSupport onGetStencilSupport(const GrShape&) const override {
+ return GrPathRenderer::kNoSupport_StencilSupport;
+ }
+
+ bool onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+
+ struct ShapeData {
+ class Key {
+ public:
+ Key() {}
+ Key(const Key& that) { *this = that; }
+ Key(const GrShape& shape, uint32_t dim) { this->set(shape, dim); }
+
+ Key& operator=(const Key& that) {
+ fKey.reset(that.fKey.count());
+ memcpy(fKey.get(), that.fKey.get(), fKey.count() * sizeof(uint32_t));
+ return *this;
+ }
+
+ void set(const GrShape& shape, uint32_t dim) {
+ // Shapes' keys are for their pre-style geometry, but by now we shouldn't have any
+ // relevant styling information.
+ SkASSERT(shape.style().isSimpleFill());
+ SkASSERT(shape.hasUnstyledKey());
+ int shapeKeySize = shape.unstyledKeySize();
+ fKey.reset(1 + shapeKeySize);
+ fKey[0] = dim;
+ shape.writeUnstyledKey(&fKey[1]);
+ }
+
+ bool operator==(const Key& that) const {
+ return fKey.count() == that.fKey.count() &&
+ 0 == memcmp(fKey.get(), that.fKey.get(), sizeof(uint32_t) * fKey.count());
+ }
+
+ int count32() const { return fKey.count(); }
+ const uint32_t* data() const { return fKey.get(); }
+
+ private:
+ // The key is composed of the dimensions of the DF generated for the path (32x32 max,
+ // 64x64 max, 128x128 max) and the GrShape's key.
+ SkAutoSTArray<24, uint32_t> fKey;
+ };
+ Key fKey;
+ SkScalar fScale;
+ GrBatchAtlas::AtlasID fID;
+ SkRect fBounds;
+ SkIPoint16 fAtlasLocation;
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(ShapeData);
+
+ static inline const Key& GetKey(const ShapeData& data) {
+ return data.fKey;
+ }
+
+ static inline uint32_t Hash(Key key) {
+ return SkOpts::hash(key.data(), sizeof(uint32_t) * key.count32());
+ }
+ };
+
+ static void HandleEviction(GrBatchAtlas::AtlasID, void*);
+
+ typedef SkTDynamicHash<ShapeData, ShapeData::Key> ShapeCache;
+ typedef SkTInternalLList<ShapeData> ShapeDataList;
+
+ GrBatchAtlas* fAtlas;
+ ShapeCache fShapeCache;
+ ShapeDataList fShapeList;
+
+ typedef GrPathRenderer INHERITED;
+
+ friend class AADistanceFieldPathBatch;
+ friend struct PathTestStruct;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrAAFillRectBatch.cpp b/gfx/skia/skia/src/gpu/batches/GrAAFillRectBatch.cpp
new file mode 100644
index 000000000..4f93adf07
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrAAFillRectBatch.cpp
@@ -0,0 +1,408 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrAAFillRectBatch.h"
+
+#include "GrBatchFlushState.h"
+#include "GrColor.h"
+#include "GrDefaultGeoProcFactory.h"
+#include "GrResourceKey.h"
+#include "GrResourceProvider.h"
+#include "GrTypes.h"
+#include "GrVertexBatch.h"
+#include "SkMatrix.h"
+#include "SkRect.h"
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gAAFillRectIndexBufferKey);
+
+static void set_inset_fan(SkPoint* pts, size_t stride,
+ const SkRect& r, SkScalar dx, SkScalar dy) {
+ pts->setRectFan(r.fLeft + dx, r.fTop + dy,
+ r.fRight - dx, r.fBottom - dy, stride);
+}
+
+static const int kNumAAFillRectsInIndexBuffer = 256;
+static const int kVertsPerAAFillRect = 8;
+static const int kIndicesPerAAFillRect = 30;
+
+const GrBuffer* get_index_buffer(GrResourceProvider* resourceProvider) {
+ GR_DEFINE_STATIC_UNIQUE_KEY(gAAFillRectIndexBufferKey);
+
+ static const uint16_t gFillAARectIdx[] = {
+ 0, 1, 5, 5, 4, 0,
+ 1, 2, 6, 6, 5, 1,
+ 2, 3, 7, 7, 6, 2,
+ 3, 0, 4, 4, 7, 3,
+ 4, 5, 6, 6, 7, 4,
+ };
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gFillAARectIdx) == kIndicesPerAAFillRect);
+ return resourceProvider->findOrCreateInstancedIndexBuffer(gFillAARectIdx,
+ kIndicesPerAAFillRect, kNumAAFillRectsInIndexBuffer, kVertsPerAAFillRect,
+ gAAFillRectIndexBufferKey);
+}
+
+static void generate_aa_fill_rect_geometry(intptr_t verts,
+ size_t vertexStride,
+ GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkRect& devRect,
+ const GrXPOverridesForBatch& overrides,
+ const SkMatrix* localMatrix) {
+ SkPoint* fan0Pos = reinterpret_cast<SkPoint*>(verts);
+ SkPoint* fan1Pos = reinterpret_cast<SkPoint*>(verts + 4 * vertexStride);
+
+ SkScalar inset;
+
+ if (viewMatrix.rectStaysRect()) {
+ inset = SkMinScalar(devRect.width(), SK_Scalar1);
+ inset = SK_ScalarHalf * SkMinScalar(inset, devRect.height());
+
+ set_inset_fan(fan0Pos, vertexStride, devRect, -SK_ScalarHalf, -SK_ScalarHalf);
+ set_inset_fan(fan1Pos, vertexStride, devRect, inset, inset);
+ } else {
+ // compute transformed (1, 0) and (0, 1) vectors
+ SkVector vec[2] = {
+ { viewMatrix[SkMatrix::kMScaleX], viewMatrix[SkMatrix::kMSkewY] },
+ { viewMatrix[SkMatrix::kMSkewX], viewMatrix[SkMatrix::kMScaleY] }
+ };
+
+ SkScalar len1 = SkPoint::Normalize(&vec[0]);
+ vec[0].scale(SK_ScalarHalf);
+ SkScalar len2 = SkPoint::Normalize(&vec[1]);
+ vec[1].scale(SK_ScalarHalf);
+
+ inset = SkMinScalar(len1 * rect.width(), SK_Scalar1);
+ inset = SK_ScalarHalf * SkMinScalar(inset, len2 * rect.height());
+
+ // create the rotated rect
+ fan0Pos->setRectFan(rect.fLeft, rect.fTop,
+ rect.fRight, rect.fBottom, vertexStride);
+ viewMatrix.mapPointsWithStride(fan0Pos, vertexStride, 4);
+
+ // Now create the inset points and then outset the original
+ // rotated points
+
+ // TL
+ *((SkPoint*)((intptr_t)fan1Pos + 0 * vertexStride)) =
+ *((SkPoint*)((intptr_t)fan0Pos + 0 * vertexStride)) + vec[0] + vec[1];
+ *((SkPoint*)((intptr_t)fan0Pos + 0 * vertexStride)) -= vec[0] + vec[1];
+ // BL
+ *((SkPoint*)((intptr_t)fan1Pos + 1 * vertexStride)) =
+ *((SkPoint*)((intptr_t)fan0Pos + 1 * vertexStride)) + vec[0] - vec[1];
+ *((SkPoint*)((intptr_t)fan0Pos + 1 * vertexStride)) -= vec[0] - vec[1];
+ // BR
+ *((SkPoint*)((intptr_t)fan1Pos + 2 * vertexStride)) =
+ *((SkPoint*)((intptr_t)fan0Pos + 2 * vertexStride)) - vec[0] - vec[1];
+ *((SkPoint*)((intptr_t)fan0Pos + 2 * vertexStride)) += vec[0] + vec[1];
+ // TR
+ *((SkPoint*)((intptr_t)fan1Pos + 3 * vertexStride)) =
+ *((SkPoint*)((intptr_t)fan0Pos + 3 * vertexStride)) - vec[0] + vec[1];
+ *((SkPoint*)((intptr_t)fan0Pos + 3 * vertexStride)) += vec[0] - vec[1];
+ }
+
+ if (localMatrix) {
+ SkMatrix invViewMatrix;
+ if (!viewMatrix.invert(&invViewMatrix)) {
+ SkDebugf("View matrix is non-invertible, local coords will be wrong.");
+ invViewMatrix = SkMatrix::I();
+ }
+ SkMatrix localCoordMatrix;
+ localCoordMatrix.setConcat(*localMatrix, invViewMatrix);
+ SkPoint* fan0Loc = reinterpret_cast<SkPoint*>(verts + sizeof(SkPoint) + sizeof(GrColor));
+ localCoordMatrix.mapPointsWithStride(fan0Loc, fan0Pos, vertexStride, 8);
+ }
+
+ bool tweakAlphaForCoverage = overrides.canTweakAlphaForCoverage();
+
+ // Make verts point to vertex color and then set all the color and coverage vertex attrs
+ // values.
+ verts += sizeof(SkPoint);
+
+ // The coverage offset is always the last vertex attribute
+ intptr_t coverageOffset = vertexStride - sizeof(GrColor) - sizeof(SkPoint);
+ for (int i = 0; i < 4; ++i) {
+ if (tweakAlphaForCoverage) {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = 0;
+ } else {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
+ *reinterpret_cast<float*>(verts + i * vertexStride + coverageOffset) = 0;
+ }
+ }
+
+ int scale;
+ if (inset < SK_ScalarHalf) {
+ scale = SkScalarFloorToInt(512.0f * inset / (inset + SK_ScalarHalf));
+ SkASSERT(scale >= 0 && scale <= 255);
+ } else {
+ scale = 0xff;
+ }
+
+ verts += 4 * vertexStride;
+
+ float innerCoverage = GrNormalizeByteToFloat(scale);
+ GrColor scaledColor = (0xff == scale) ? color : SkAlphaMulQ(color, scale);
+
+ for (int i = 0; i < 4; ++i) {
+ if (tweakAlphaForCoverage) {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = scaledColor;
+ } else {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
+ *reinterpret_cast<float*>(verts + i * vertexStride +
+ coverageOffset) = innerCoverage;
+ }
+ }
+}
+class AAFillRectBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ AAFillRectBatch(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkRect& devRect,
+ const SkMatrix* localMatrix) : INHERITED(ClassID()) {
+ if (localMatrix) {
+ void* mem = fRectData.push_back_n(sizeof(RectWithLocalMatrixInfo));
+ new (mem) RectWithLocalMatrixInfo(color, viewMatrix, rect, devRect, *localMatrix);
+ } else {
+ void* mem = fRectData.push_back_n(sizeof(RectInfo));
+ new (mem) RectInfo(color, viewMatrix, rect, devRect);
+ }
+ IsZeroArea zeroArea = (!rect.width() || !rect.height()) ? IsZeroArea::kYes
+ : IsZeroArea::kNo;
+ this->setBounds(devRect, HasAABloat::kYes, zeroArea);
+ fRectCnt = 1;
+ }
+
+ const char* name() const override { return "AAFillRectBatch"; }
+
+ SkString dumpInfo() const override {
+ SkString str;
+ str.appendf("# batched: %d\n", fRectCnt);
+ const RectInfo* info = this->first();
+ for (int i = 0; i < fRectCnt; ++i) {
+ const SkRect& rect = info->rect();
+ str.appendf("%d: Color: 0x%08x, Rect [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
+ i, info->color(), rect.fLeft, rect.fTop, rect.fRight, rect.fBottom);
+ info = this->next(info);
+ }
+ str.append(INHERITED::dumpInfo());
+ return str;
+ }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one rect
+ color->setKnownFourComponents(this->first()->color());
+ coverage->setUnknownSingleComponent();
+ }
+
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ GrColor color;
+ if (overrides.getOverrideColorIfSet(&color)) {
+ this->first()->setColor(color);
+ }
+ fOverrides = overrides;
+ }
+
+private:
+ void onPrepareDraws(Target* target) const override {
+ bool needLocalCoords = fOverrides.readsLocalCoords();
+ using namespace GrDefaultGeoProcFactory;
+
+ Color color(Color::kAttribute_Type);
+ Coverage::Type coverageType;
+ if (fOverrides.canTweakAlphaForCoverage()) {
+ coverageType = Coverage::kSolid_Type;
+ } else {
+ coverageType = Coverage::kAttribute_Type;
+ }
+ Coverage coverage(coverageType);
+ LocalCoords lc = needLocalCoords ? LocalCoords::kHasExplicit_Type
+ : LocalCoords::kUnused_Type;
+ sk_sp<GrGeometryProcessor> gp = GrDefaultGeoProcFactory::Make(color, coverage, lc,
+ SkMatrix::I());
+ if (!gp) {
+ SkDebugf("Couldn't create GrGeometryProcessor\n");
+ return;
+ }
+
+ size_t vertexStride = gp->getVertexStride();
+
+ SkAutoTUnref<const GrBuffer> indexBuffer(get_index_buffer(target->resourceProvider()));
+ InstancedHelper helper;
+ void* vertices = helper.init(target, kTriangles_GrPrimitiveType, vertexStride,
+ indexBuffer, kVertsPerAAFillRect,
+ kIndicesPerAAFillRect, fRectCnt);
+ if (!vertices || !indexBuffer) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ const RectInfo* info = this->first();
+ const SkMatrix* localMatrix = nullptr;
+ for (int i = 0; i < fRectCnt; i++) {
+ intptr_t verts = reinterpret_cast<intptr_t>(vertices) +
+ i * kVertsPerAAFillRect * vertexStride;
+ if (needLocalCoords) {
+ if (info->hasLocalMatrix()) {
+ localMatrix = &static_cast<const RectWithLocalMatrixInfo*>(info)->localMatrix();
+ } else {
+ localMatrix = &SkMatrix::I();
+ }
+ }
+ generate_aa_fill_rect_geometry(verts, vertexStride, info->color(),
+ info->viewMatrix(), info->rect(),
+ info->devRect(), fOverrides, localMatrix);
+ info = this->next(info);
+ }
+ helper.recordDraw(target, gp.get());
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ AAFillRectBatch* that = t->cast<AAFillRectBatch>();
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ // In the event of two batches, one who can tweak, one who cannot, we just fall back to
+ // not tweaking
+ if (fOverrides.canTweakAlphaForCoverage() && !that->fOverrides.canTweakAlphaForCoverage()) {
+ fOverrides = that->fOverrides;
+ }
+
+ fRectData.push_back_n(that->fRectData.count(), that->fRectData.begin());
+ fRectCnt += that->fRectCnt;
+ this->joinBounds(*that);
+ return true;
+ }
+
+ struct RectInfo {
+ public:
+ RectInfo(GrColor color, const SkMatrix& viewMatrix, const SkRect& rect,
+ const SkRect& devRect)
+ : RectInfo(color, viewMatrix, rect, devRect, HasLocalMatrix::kNo) {}
+ bool hasLocalMatrix() const { return HasLocalMatrix::kYes == fHasLocalMatrix; }
+ GrColor color() const { return fColor; }
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+ const SkRect& rect() const { return fRect; }
+ const SkRect& devRect() const { return fDevRect; }
+
+ void setColor(GrColor color) { fColor = color; }
+ protected:
+ enum class HasLocalMatrix : uint32_t { kNo, kYes };
+
+ RectInfo(GrColor color, const SkMatrix& viewMatrix, const SkRect& rect,
+ const SkRect& devRect, HasLocalMatrix hasLM)
+ : fHasLocalMatrix(hasLM)
+ , fColor(color)
+ , fViewMatrix(viewMatrix)
+ , fRect(rect)
+ , fDevRect(devRect) {}
+
+ HasLocalMatrix fHasLocalMatrix;
+ GrColor fColor;
+ SkMatrix fViewMatrix;
+ SkRect fRect;
+ SkRect fDevRect;
+ };
+
+ struct RectWithLocalMatrixInfo : public RectInfo {
+ public:
+ RectWithLocalMatrixInfo(GrColor color, const SkMatrix& viewMatrix, const SkRect& rect,
+ const SkRect& devRect, const SkMatrix& localMatrix)
+ : RectInfo(color, viewMatrix, rect, devRect, HasLocalMatrix::kYes)
+ , fLocalMatrix(localMatrix) {}
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+ private:
+ SkMatrix fLocalMatrix;
+ };
+
+ RectInfo* first() { return reinterpret_cast<RectInfo*>(fRectData.begin()); }
+ const RectInfo* first() const { return reinterpret_cast<const RectInfo*>(fRectData.begin()); }
+ const RectInfo* next(const RectInfo* prev) const {
+ intptr_t next = reinterpret_cast<intptr_t>(prev) +
+ (prev->hasLocalMatrix() ? sizeof(RectWithLocalMatrixInfo)
+ : sizeof(RectInfo));
+ return reinterpret_cast<const RectInfo*>(next);
+ }
+
+ GrXPOverridesForBatch fOverrides;
+ SkSTArray<4 * sizeof(RectWithLocalMatrixInfo), uint8_t, true> fRectData;
+ int fRectCnt;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+namespace GrAAFillRectBatch {
+
+GrDrawBatch* Create(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkRect& devRect) {
+ return new AAFillRectBatch(color, viewMatrix, rect, devRect, nullptr);
+}
+
+GrDrawBatch* Create(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& localMatrix,
+ const SkRect& rect,
+ const SkRect& devRect) {
+ return new AAFillRectBatch(color, viewMatrix, rect, devRect, &localMatrix);
+}
+
+GrDrawBatch* Create(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& localMatrix,
+ const SkRect& rect) {
+ SkRect devRect;
+ viewMatrix.mapRect(&devRect, rect);
+ return Create(color, viewMatrix, localMatrix, rect, devRect);
+}
+
+GrDrawBatch* CreateWithLocalRect(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkRect& localRect) {
+ SkRect devRect;
+ viewMatrix.mapRect(&devRect, rect);
+ SkMatrix localMatrix;
+ if (!localMatrix.setRectToRect(rect, localRect, SkMatrix::kFill_ScaleToFit)) {
+ return nullptr;
+ }
+ return Create(color, viewMatrix, localMatrix, rect, devRect);
+}
+
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_TEST_UTILS
+
+#include "GrBatchTest.h"
+
+DRAW_BATCH_TEST_DEFINE(AAFillRectBatch) {
+ GrColor color = GrRandomColor(random);
+ SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
+ SkRect rect = GrTest::TestRect(random);
+ SkRect devRect = GrTest::TestRect(random);
+ return GrAAFillRectBatch::Create(color, viewMatrix, rect, devRect);
+}
+
+DRAW_BATCH_TEST_DEFINE(AAFillRectBatchLocalMatrix) {
+ GrColor color = GrRandomColor(random);
+ SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
+ SkMatrix localMatrix = GrTest::TestMatrix(random);
+ SkRect rect = GrTest::TestRect(random);
+ SkRect devRect = GrTest::TestRect(random);
+ return GrAAFillRectBatch::Create(color, viewMatrix, localMatrix, rect, devRect);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrAAFillRectBatch.h b/gfx/skia/skia/src/gpu/batches/GrAAFillRectBatch.h
new file mode 100644
index 000000000..1dbec995f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrAAFillRectBatch.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAAFillRectBatch_DEFINED
+#define GrAAFillRectBatch_DEFINED
+
+#include "GrColor.h"
+
+class GrBatch;
+class GrDrawBatch;
+class SkMatrix;
+struct SkRect;
+
+namespace GrAAFillRectBatch {
+GrDrawBatch* Create(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkRect& devRect);
+
+GrDrawBatch* Create(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& localMatrix,
+ const SkRect& rect);
+
+GrDrawBatch* Create(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& localMatrix,
+ const SkRect& rect,
+ const SkRect& devRect);
+
+GrDrawBatch* CreateWithLocalRect(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkRect& localRect);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrAAHairLinePathRenderer.cpp b/gfx/skia/skia/src/gpu/batches/GrAAHairLinePathRenderer.cpp
new file mode 100644
index 000000000..9d73cf4f1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrAAHairLinePathRenderer.cpp
@@ -0,0 +1,993 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrAAHairLinePathRenderer.h"
+
+#include "GrBatchFlushState.h"
+#include "GrBatchTest.h"
+#include "GrBuffer.h"
+#include "GrCaps.h"
+#include "GrContext.h"
+#include "GrDefaultGeoProcFactory.h"
+#include "GrPathUtils.h"
+#include "GrPipelineBuilder.h"
+#include "GrProcessor.h"
+#include "GrResourceProvider.h"
+#include "SkGeometry.h"
+#include "SkStroke.h"
+#include "SkTemplates.h"
+
+#include "batches/GrVertexBatch.h"
+
+#include "effects/GrBezierEffect.h"
+
+#define PREALLOC_PTARRAY(N) SkSTArray<(N),SkPoint, true>
+
+// quadratics are rendered as 5-sided polys in order to bound the
+// AA stroke around the center-curve. See comments in push_quad_index_buffer and
+// bloat_quad. Quadratics and conics share an index buffer
+
+// lines are rendered as:
+// *______________*
+// |\ -_______ /|
+// | \ \ / |
+// | *--------* |
+// | / ______/ \ |
+// */_-__________\*
+// For: 6 vertices and 18 indices (for 6 triangles)
+
+// Each quadratic is rendered as a five sided polygon. This poly bounds
+// the quadratic's bounding triangle but has been expanded so that the
+// 1-pixel wide area around the curve is inside the poly.
+// If a,b,c are the original control points then the poly a0,b0,c0,c1,a1
+// that is rendered would look like this:
+// b0
+// b
+//
+// a0 c0
+// a c
+// a1 c1
+// Each is drawn as three triangles ((a0,a1,b0), (b0,c1,c0), (a1,c1,b0))
+// specified by these 9 indices:
+static const uint16_t kQuadIdxBufPattern[] = {
+ 0, 1, 2,
+ 2, 4, 3,
+ 1, 4, 2
+};
+
+static const int kIdxsPerQuad = SK_ARRAY_COUNT(kQuadIdxBufPattern);
+static const int kQuadNumVertices = 5;
+static const int kQuadsNumInIdxBuffer = 256;
+GR_DECLARE_STATIC_UNIQUE_KEY(gQuadsIndexBufferKey);
+
+static const GrBuffer* ref_quads_index_buffer(GrResourceProvider* resourceProvider) {
+ GR_DEFINE_STATIC_UNIQUE_KEY(gQuadsIndexBufferKey);
+ return resourceProvider->findOrCreateInstancedIndexBuffer(
+ kQuadIdxBufPattern, kIdxsPerQuad, kQuadsNumInIdxBuffer, kQuadNumVertices,
+ gQuadsIndexBufferKey);
+}
+
+
+// Each line segment is rendered as two quads and two triangles.
+// p0 and p1 have alpha = 1 while all other points have alpha = 0.
+// The four external points are offset 1 pixel perpendicular to the
+// line and half a pixel parallel to the line.
+//
+// p4 p5
+// p0 p1
+// p2 p3
+//
+// Each is drawn as six triangles specified by these 18 indices:
+
+static const uint16_t kLineSegIdxBufPattern[] = {
+ 0, 1, 3,
+ 0, 3, 2,
+ 0, 4, 5,
+ 0, 5, 1,
+ 0, 2, 4,
+ 1, 5, 3
+};
+
+static const int kIdxsPerLineSeg = SK_ARRAY_COUNT(kLineSegIdxBufPattern);
+static const int kLineSegNumVertices = 6;
+static const int kLineSegsNumInIdxBuffer = 256;
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gLinesIndexBufferKey);
+
+static const GrBuffer* ref_lines_index_buffer(GrResourceProvider* resourceProvider) {
+ GR_DEFINE_STATIC_UNIQUE_KEY(gLinesIndexBufferKey);
+ return resourceProvider->findOrCreateInstancedIndexBuffer(
+ kLineSegIdxBufPattern, kIdxsPerLineSeg, kLineSegsNumInIdxBuffer, kLineSegNumVertices,
+ gLinesIndexBufferKey);
+}
+
+// Takes 178th time of logf on Z600 / VC2010
+static int get_float_exp(float x) {
+ GR_STATIC_ASSERT(sizeof(int) == sizeof(float));
+#ifdef SK_DEBUG
+ static bool tested;
+ if (!tested) {
+ tested = true;
+ SkASSERT(get_float_exp(0.25f) == -2);
+ SkASSERT(get_float_exp(0.3f) == -2);
+ SkASSERT(get_float_exp(0.5f) == -1);
+ SkASSERT(get_float_exp(1.f) == 0);
+ SkASSERT(get_float_exp(2.f) == 1);
+ SkASSERT(get_float_exp(2.5f) == 1);
+ SkASSERT(get_float_exp(8.f) == 3);
+ SkASSERT(get_float_exp(100.f) == 6);
+ SkASSERT(get_float_exp(1000.f) == 9);
+ SkASSERT(get_float_exp(1024.f) == 10);
+ SkASSERT(get_float_exp(3000000.f) == 21);
+ }
+#endif
+ const int* iptr = (const int*)&x;
+ return (((*iptr) & 0x7f800000) >> 23) - 127;
+}
+
+// Uses the max curvature function for quads to estimate
+// where to chop the conic. If the max curvature is not
+// found along the curve segment it will return 1 and
+// dst[0] is the original conic. If it returns 2 the dst[0]
+// and dst[1] are the two new conics.
+static int split_conic(const SkPoint src[3], SkConic dst[2], const SkScalar weight) {
+ SkScalar t = SkFindQuadMaxCurvature(src);
+ if (t == 0) {
+ if (dst) {
+ dst[0].set(src, weight);
+ }
+ return 1;
+ } else {
+ if (dst) {
+ SkConic conic;
+ conic.set(src, weight);
+ if (!conic.chopAt(t, dst)) {
+ dst[0].set(src, weight);
+ return 1;
+ }
+ }
+ return 2;
+ }
+}
+
+// Calls split_conic on the entire conic and then once more on each subsection.
+// Most cases will result in either 1 conic (chop point is not within t range)
+// or 3 points (split once and then one subsection is split again).
+static int chop_conic(const SkPoint src[3], SkConic dst[4], const SkScalar weight) {
+ SkConic dstTemp[2];
+ int conicCnt = split_conic(src, dstTemp, weight);
+ if (2 == conicCnt) {
+ int conicCnt2 = split_conic(dstTemp[0].fPts, dst, dstTemp[0].fW);
+ conicCnt = conicCnt2 + split_conic(dstTemp[1].fPts, &dst[conicCnt2], dstTemp[1].fW);
+ } else {
+ dst[0] = dstTemp[0];
+ }
+ return conicCnt;
+}
+
+// returns 0 if quad/conic is degen or close to it
+// in this case approx the path with lines
+// otherwise returns 1
+static int is_degen_quad_or_conic(const SkPoint p[3], SkScalar* dsqd) {
+ static const SkScalar gDegenerateToLineTol = GrPathUtils::kDefaultTolerance;
+ static const SkScalar gDegenerateToLineTolSqd =
+ SkScalarMul(gDegenerateToLineTol, gDegenerateToLineTol);
+
+ if (p[0].distanceToSqd(p[1]) < gDegenerateToLineTolSqd ||
+ p[1].distanceToSqd(p[2]) < gDegenerateToLineTolSqd) {
+ return 1;
+ }
+
+ *dsqd = p[1].distanceToLineBetweenSqd(p[0], p[2]);
+ if (*dsqd < gDegenerateToLineTolSqd) {
+ return 1;
+ }
+
+ if (p[2].distanceToLineBetweenSqd(p[1], p[0]) < gDegenerateToLineTolSqd) {
+ return 1;
+ }
+ return 0;
+}
+
+static int is_degen_quad_or_conic(const SkPoint p[3]) {
+ SkScalar dsqd;
+ return is_degen_quad_or_conic(p, &dsqd);
+}
+
+// we subdivide the quads to avoid huge overfill
+// if it returns -1 then should be drawn as lines
+static int num_quad_subdivs(const SkPoint p[3]) {
+ SkScalar dsqd;
+ if (is_degen_quad_or_conic(p, &dsqd)) {
+ return -1;
+ }
+
+ // tolerance of triangle height in pixels
+ // tuned on windows Quadro FX 380 / Z600
+ // trade off of fill vs cpu time on verts
+ // maybe different when do this using gpu (geo or tess shaders)
+ static const SkScalar gSubdivTol = 175 * SK_Scalar1;
+
+ if (dsqd <= SkScalarMul(gSubdivTol, gSubdivTol)) {
+ return 0;
+ } else {
+ static const int kMaxSub = 4;
+ // subdividing the quad reduces d by 4. so we want x = log4(d/tol)
+ // = log4(d*d/tol*tol)/2
+ // = log2(d*d/tol*tol)
+
+ // +1 since we're ignoring the mantissa contribution.
+ int log = get_float_exp(dsqd/(gSubdivTol*gSubdivTol)) + 1;
+ log = SkTMin(SkTMax(0, log), kMaxSub);
+ return log;
+ }
+}
+
+/**
+ * Generates the lines and quads to be rendered. Lines are always recorded in
+ * device space. We will do a device space bloat to account for the 1pixel
+ * thickness.
+ * Quads are recorded in device space unless m contains
+ * perspective, then in they are in src space. We do this because we will
+ * subdivide large quads to reduce over-fill. This subdivision has to be
+ * performed before applying the perspective matrix.
+ */
+static int gather_lines_and_quads(const SkPath& path,
+ const SkMatrix& m,
+ const SkIRect& devClipBounds,
+ GrAAHairLinePathRenderer::PtArray* lines,
+ GrAAHairLinePathRenderer::PtArray* quads,
+ GrAAHairLinePathRenderer::PtArray* conics,
+ GrAAHairLinePathRenderer::IntArray* quadSubdivCnts,
+ GrAAHairLinePathRenderer::FloatArray* conicWeights) {
+ SkPath::Iter iter(path, false);
+
+ int totalQuadCount = 0;
+ SkRect bounds;
+ SkIRect ibounds;
+
+ bool persp = m.hasPerspective();
+
+ for (;;) {
+ SkPoint pathPts[4];
+ SkPoint devPts[4];
+ SkPath::Verb verb = iter.next(pathPts);
+ switch (verb) {
+ case SkPath::kConic_Verb: {
+ SkConic dst[4];
+ // We chop the conics to create tighter clipping to hide error
+ // that appears near max curvature of very thin conics. Thin
+ // hyperbolas with high weight still show error.
+ int conicCnt = chop_conic(pathPts, dst, iter.conicWeight());
+ for (int i = 0; i < conicCnt; ++i) {
+ SkPoint* chopPnts = dst[i].fPts;
+ m.mapPoints(devPts, chopPnts, 3);
+ bounds.setBounds(devPts, 3);
+ bounds.outset(SK_Scalar1, SK_Scalar1);
+ bounds.roundOut(&ibounds);
+ if (SkIRect::Intersects(devClipBounds, ibounds)) {
+ if (is_degen_quad_or_conic(devPts)) {
+ SkPoint* pts = lines->push_back_n(4);
+ pts[0] = devPts[0];
+ pts[1] = devPts[1];
+ pts[2] = devPts[1];
+ pts[3] = devPts[2];
+ } else {
+ // when in perspective keep conics in src space
+ SkPoint* cPts = persp ? chopPnts : devPts;
+ SkPoint* pts = conics->push_back_n(3);
+ pts[0] = cPts[0];
+ pts[1] = cPts[1];
+ pts[2] = cPts[2];
+ conicWeights->push_back() = dst[i].fW;
+ }
+ }
+ }
+ break;
+ }
+ case SkPath::kMove_Verb:
+ break;
+ case SkPath::kLine_Verb:
+ m.mapPoints(devPts, pathPts, 2);
+ bounds.setBounds(devPts, 2);
+ bounds.outset(SK_Scalar1, SK_Scalar1);
+ bounds.roundOut(&ibounds);
+ if (SkIRect::Intersects(devClipBounds, ibounds)) {
+ SkPoint* pts = lines->push_back_n(2);
+ pts[0] = devPts[0];
+ pts[1] = devPts[1];
+ }
+ break;
+ case SkPath::kQuad_Verb: {
+ SkPoint choppedPts[5];
+ // Chopping the quad helps when the quad is either degenerate or nearly degenerate.
+ // When it is degenerate it allows the approximation with lines to work since the
+ // chop point (if there is one) will be at the parabola's vertex. In the nearly
+ // degenerate the QuadUVMatrix computed for the points is almost singular which
+ // can cause rendering artifacts.
+ int n = SkChopQuadAtMaxCurvature(pathPts, choppedPts);
+ for (int i = 0; i < n; ++i) {
+ SkPoint* quadPts = choppedPts + i * 2;
+ m.mapPoints(devPts, quadPts, 3);
+ bounds.setBounds(devPts, 3);
+ bounds.outset(SK_Scalar1, SK_Scalar1);
+ bounds.roundOut(&ibounds);
+
+ if (SkIRect::Intersects(devClipBounds, ibounds)) {
+ int subdiv = num_quad_subdivs(devPts);
+ SkASSERT(subdiv >= -1);
+ if (-1 == subdiv) {
+ SkPoint* pts = lines->push_back_n(4);
+ pts[0] = devPts[0];
+ pts[1] = devPts[1];
+ pts[2] = devPts[1];
+ pts[3] = devPts[2];
+ } else {
+ // when in perspective keep quads in src space
+ SkPoint* qPts = persp ? quadPts : devPts;
+ SkPoint* pts = quads->push_back_n(3);
+ pts[0] = qPts[0];
+ pts[1] = qPts[1];
+ pts[2] = qPts[2];
+ quadSubdivCnts->push_back() = subdiv;
+ totalQuadCount += 1 << subdiv;
+ }
+ }
+ }
+ break;
+ }
+ case SkPath::kCubic_Verb:
+ m.mapPoints(devPts, pathPts, 4);
+ bounds.setBounds(devPts, 4);
+ bounds.outset(SK_Scalar1, SK_Scalar1);
+ bounds.roundOut(&ibounds);
+ if (SkIRect::Intersects(devClipBounds, ibounds)) {
+ PREALLOC_PTARRAY(32) q;
+ // We convert cubics to quadratics (for now).
+ // In perspective have to do conversion in src space.
+ if (persp) {
+ SkScalar tolScale =
+ GrPathUtils::scaleToleranceToSrc(SK_Scalar1, m, path.getBounds());
+ GrPathUtils::convertCubicToQuads(pathPts, tolScale, &q);
+ } else {
+ GrPathUtils::convertCubicToQuads(devPts, SK_Scalar1, &q);
+ }
+ for (int i = 0; i < q.count(); i += 3) {
+ SkPoint* qInDevSpace;
+ // bounds has to be calculated in device space, but q is
+ // in src space when there is perspective.
+ if (persp) {
+ m.mapPoints(devPts, &q[i], 3);
+ bounds.setBounds(devPts, 3);
+ qInDevSpace = devPts;
+ } else {
+ bounds.setBounds(&q[i], 3);
+ qInDevSpace = &q[i];
+ }
+ bounds.outset(SK_Scalar1, SK_Scalar1);
+ bounds.roundOut(&ibounds);
+ if (SkIRect::Intersects(devClipBounds, ibounds)) {
+ int subdiv = num_quad_subdivs(qInDevSpace);
+ SkASSERT(subdiv >= -1);
+ if (-1 == subdiv) {
+ SkPoint* pts = lines->push_back_n(4);
+ // lines should always be in device coords
+ pts[0] = qInDevSpace[0];
+ pts[1] = qInDevSpace[1];
+ pts[2] = qInDevSpace[1];
+ pts[3] = qInDevSpace[2];
+ } else {
+ SkPoint* pts = quads->push_back_n(3);
+ // q is already in src space when there is no
+ // perspective and dev coords otherwise.
+ pts[0] = q[0 + i];
+ pts[1] = q[1 + i];
+ pts[2] = q[2 + i];
+ quadSubdivCnts->push_back() = subdiv;
+ totalQuadCount += 1 << subdiv;
+ }
+ }
+ }
+ }
+ break;
+ case SkPath::kClose_Verb:
+ break;
+ case SkPath::kDone_Verb:
+ return totalQuadCount;
+ }
+ }
+}
+
+struct LineVertex {
+ SkPoint fPos;
+ float fCoverage;
+};
+
+struct BezierVertex {
+ SkPoint fPos;
+ union {
+ struct {
+ SkScalar fK;
+ SkScalar fL;
+ SkScalar fM;
+ } fConic;
+ SkVector fQuadCoord;
+ struct {
+ SkScalar fBogus[4];
+ };
+ };
+};
+
+GR_STATIC_ASSERT(sizeof(BezierVertex) == 3 * sizeof(SkPoint));
+
+static void intersect_lines(const SkPoint& ptA, const SkVector& normA,
+ const SkPoint& ptB, const SkVector& normB,
+ SkPoint* result) {
+
+ SkScalar lineAW = -normA.dot(ptA);
+ SkScalar lineBW = -normB.dot(ptB);
+
+ SkScalar wInv = SkScalarMul(normA.fX, normB.fY) -
+ SkScalarMul(normA.fY, normB.fX);
+ wInv = SkScalarInvert(wInv);
+
+ result->fX = SkScalarMul(normA.fY, lineBW) - SkScalarMul(lineAW, normB.fY);
+ result->fX = SkScalarMul(result->fX, wInv);
+
+ result->fY = SkScalarMul(lineAW, normB.fX) - SkScalarMul(normA.fX, lineBW);
+ result->fY = SkScalarMul(result->fY, wInv);
+}
+
+static void set_uv_quad(const SkPoint qpts[3], BezierVertex verts[kQuadNumVertices]) {
+ // this should be in the src space, not dev coords, when we have perspective
+ GrPathUtils::QuadUVMatrix DevToUV(qpts);
+ DevToUV.apply<kQuadNumVertices, sizeof(BezierVertex), sizeof(SkPoint)>(verts);
+}
+
+static void bloat_quad(const SkPoint qpts[3], const SkMatrix* toDevice,
+ const SkMatrix* toSrc, BezierVertex verts[kQuadNumVertices]) {
+ SkASSERT(!toDevice == !toSrc);
+ // original quad is specified by tri a,b,c
+ SkPoint a = qpts[0];
+ SkPoint b = qpts[1];
+ SkPoint c = qpts[2];
+
+ if (toDevice) {
+ toDevice->mapPoints(&a, 1);
+ toDevice->mapPoints(&b, 1);
+ toDevice->mapPoints(&c, 1);
+ }
+ // make a new poly where we replace a and c by a 1-pixel wide edges orthog
+ // to edges ab and bc:
+ //
+ // before | after
+ // | b0
+ // b |
+ // |
+ // | a0 c0
+ // a c | a1 c1
+ //
+ // edges a0->b0 and b0->c0 are parallel to original edges a->b and b->c,
+ // respectively.
+ BezierVertex& a0 = verts[0];
+ BezierVertex& a1 = verts[1];
+ BezierVertex& b0 = verts[2];
+ BezierVertex& c0 = verts[3];
+ BezierVertex& c1 = verts[4];
+
+ SkVector ab = b;
+ ab -= a;
+ SkVector ac = c;
+ ac -= a;
+ SkVector cb = b;
+ cb -= c;
+
+ // We should have already handled degenerates
+ SkASSERT(ab.length() > 0 && cb.length() > 0);
+
+ ab.normalize();
+ SkVector abN;
+ abN.setOrthog(ab, SkVector::kLeft_Side);
+ if (abN.dot(ac) > 0) {
+ abN.negate();
+ }
+
+ cb.normalize();
+ SkVector cbN;
+ cbN.setOrthog(cb, SkVector::kLeft_Side);
+ if (cbN.dot(ac) < 0) {
+ cbN.negate();
+ }
+
+ a0.fPos = a;
+ a0.fPos += abN;
+ a1.fPos = a;
+ a1.fPos -= abN;
+
+ c0.fPos = c;
+ c0.fPos += cbN;
+ c1.fPos = c;
+ c1.fPos -= cbN;
+
+ intersect_lines(a0.fPos, abN, c0.fPos, cbN, &b0.fPos);
+
+ if (toSrc) {
+ toSrc->mapPointsWithStride(&verts[0].fPos, sizeof(BezierVertex), kQuadNumVertices);
+ }
+}
+
+// Equations based off of Loop-Blinn Quadratic GPU Rendering
+// Input Parametric:
+// P(t) = (P0*(1-t)^2 + 2*w*P1*t*(1-t) + P2*t^2) / (1-t)^2 + 2*w*t*(1-t) + t^2)
+// Output Implicit:
+// f(x, y, w) = f(P) = K^2 - LM
+// K = dot(k, P), L = dot(l, P), M = dot(m, P)
+// k, l, m are calculated in function GrPathUtils::getConicKLM
+static void set_conic_coeffs(const SkPoint p[3], BezierVertex verts[kQuadNumVertices],
+ const SkScalar weight) {
+ SkScalar klm[9];
+
+ GrPathUtils::getConicKLM(p, weight, klm);
+
+ for (int i = 0; i < kQuadNumVertices; ++i) {
+ const SkPoint pnt = verts[i].fPos;
+ verts[i].fConic.fK = pnt.fX * klm[0] + pnt.fY * klm[1] + klm[2];
+ verts[i].fConic.fL = pnt.fX * klm[3] + pnt.fY * klm[4] + klm[5];
+ verts[i].fConic.fM = pnt.fX * klm[6] + pnt.fY * klm[7] + klm[8];
+ }
+}
+
+static void add_conics(const SkPoint p[3],
+ const SkScalar weight,
+ const SkMatrix* toDevice,
+ const SkMatrix* toSrc,
+ BezierVertex** vert) {
+ bloat_quad(p, toDevice, toSrc, *vert);
+ set_conic_coeffs(p, *vert, weight);
+ *vert += kQuadNumVertices;
+}
+
+static void add_quads(const SkPoint p[3],
+ int subdiv,
+ const SkMatrix* toDevice,
+ const SkMatrix* toSrc,
+ BezierVertex** vert) {
+ SkASSERT(subdiv >= 0);
+ if (subdiv) {
+ SkPoint newP[5];
+ SkChopQuadAtHalf(p, newP);
+ add_quads(newP + 0, subdiv-1, toDevice, toSrc, vert);
+ add_quads(newP + 2, subdiv-1, toDevice, toSrc, vert);
+ } else {
+ bloat_quad(p, toDevice, toSrc, *vert);
+ set_uv_quad(p, *vert);
+ *vert += kQuadNumVertices;
+ }
+}
+
+static void add_line(const SkPoint p[2],
+ const SkMatrix* toSrc,
+ uint8_t coverage,
+ LineVertex** vert) {
+ const SkPoint& a = p[0];
+ const SkPoint& b = p[1];
+
+ SkVector ortho, vec = b;
+ vec -= a;
+
+ if (vec.setLength(SK_ScalarHalf)) {
+ // Create a vector orthogonal to 'vec' and of unit length
+ ortho.fX = 2.0f * vec.fY;
+ ortho.fY = -2.0f * vec.fX;
+
+ float floatCoverage = GrNormalizeByteToFloat(coverage);
+
+ (*vert)[0].fPos = a;
+ (*vert)[0].fCoverage = floatCoverage;
+ (*vert)[1].fPos = b;
+ (*vert)[1].fCoverage = floatCoverage;
+ (*vert)[2].fPos = a - vec + ortho;
+ (*vert)[2].fCoverage = 0;
+ (*vert)[3].fPos = b + vec + ortho;
+ (*vert)[3].fCoverage = 0;
+ (*vert)[4].fPos = a - vec - ortho;
+ (*vert)[4].fCoverage = 0;
+ (*vert)[5].fPos = b + vec - ortho;
+ (*vert)[5].fCoverage = 0;
+
+ if (toSrc) {
+ toSrc->mapPointsWithStride(&(*vert)->fPos,
+ sizeof(LineVertex),
+ kLineSegNumVertices);
+ }
+ } else {
+ // just make it degenerate and likely offscreen
+ for (int i = 0; i < kLineSegNumVertices; ++i) {
+ (*vert)[i].fPos.set(SK_ScalarMax, SK_ScalarMax);
+ }
+ }
+
+ *vert += kLineSegNumVertices;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool GrAAHairLinePathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ if (!args.fAntiAlias) {
+ return false;
+ }
+
+ if (!IsStrokeHairlineOrEquivalent(args.fShape->style(), *args.fViewMatrix, nullptr)) {
+ return false;
+ }
+
+ // We don't currently handle dashing in this class though perhaps we should.
+ if (args.fShape->style().pathEffect()) {
+ return false;
+ }
+
+ if (SkPath::kLine_SegmentMask == args.fShape->segmentMask() ||
+ args.fShaderCaps->shaderDerivativeSupport()) {
+ return true;
+ }
+
+ return false;
+}
+
+template <class VertexType>
+bool check_bounds(const SkMatrix& viewMatrix, const SkRect& devBounds, void* vertices, int vCount)
+{
+ SkRect tolDevBounds = devBounds;
+ // The bounds ought to be tight, but in perspective the below code runs the verts
+ // through the view matrix to get back to dev coords, which can introduce imprecision.
+ if (viewMatrix.hasPerspective()) {
+ tolDevBounds.outset(SK_Scalar1 / 1000, SK_Scalar1 / 1000);
+ } else {
+ // Non-persp matrices cause this path renderer to draw in device space.
+ SkASSERT(viewMatrix.isIdentity());
+ }
+ SkRect actualBounds;
+
+ VertexType* verts = reinterpret_cast<VertexType*>(vertices);
+ bool first = true;
+ for (int i = 0; i < vCount; ++i) {
+ SkPoint pos = verts[i].fPos;
+ // This is a hack to workaround the fact that we move some degenerate segments offscreen.
+ if (SK_ScalarMax == pos.fX) {
+ continue;
+ }
+ viewMatrix.mapPoints(&pos, 1);
+ if (first) {
+ actualBounds.set(pos.fX, pos.fY, pos.fX, pos.fY);
+ first = false;
+ } else {
+ actualBounds.growToInclude(pos.fX, pos.fY);
+ }
+ }
+ if (!first) {
+ return tolDevBounds.contains(actualBounds);
+ }
+
+ return true;
+}
+
+class AAHairlineBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ AAHairlineBatch(GrColor color,
+ uint8_t coverage,
+ const SkMatrix& viewMatrix,
+ const SkPath& path,
+ SkIRect devClipBounds) : INHERITED(ClassID()) {
+ fGeoData.emplace_back(Geometry{color, coverage, viewMatrix, path, devClipBounds});
+
+ this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kYes,
+ IsZeroArea::kYes);
+ }
+
+ const char* name() const override { return "AAHairlineBatch"; }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ color->setKnownFourComponents(fGeoData[0].fColor);
+ coverage->setUnknownSingleComponent();
+ }
+
+private:
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ // Handle any color overrides
+ if (!overrides.readsColor()) {
+ fGeoData[0].fColor = GrColor_ILLEGAL;
+ }
+ overrides.getOverrideColorIfSet(&fGeoData[0].fColor);
+
+ // setup batch properties
+ fBatch.fColorIgnored = !overrides.readsColor();
+ fBatch.fColor = fGeoData[0].fColor;
+ fBatch.fUsesLocalCoords = overrides.readsLocalCoords();
+ fBatch.fCoverageIgnored = !overrides.readsCoverage();
+ fBatch.fCoverage = fGeoData[0].fCoverage;
+ }
+
+ void onPrepareDraws(Target*) const override;
+
+ typedef SkTArray<SkPoint, true> PtArray;
+ typedef SkTArray<int, true> IntArray;
+ typedef SkTArray<float, true> FloatArray;
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ AAHairlineBatch* that = t->cast<AAHairlineBatch>();
+
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ if (this->viewMatrix().hasPerspective() != that->viewMatrix().hasPerspective()) {
+ return false;
+ }
+
+ // We go to identity if we don't have perspective
+ if (this->viewMatrix().hasPerspective() &&
+ !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
+ }
+
+ // TODO we can actually batch hairlines if they are the same color in a kind of bulk method
+ // but we haven't implemented this yet
+ // TODO investigate going to vertex color and coverage?
+ if (this->coverage() != that->coverage()) {
+ return false;
+ }
+
+ if (this->color() != that->color()) {
+ return false;
+ }
+
+ SkASSERT(this->usesLocalCoords() == that->usesLocalCoords());
+ if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
+ }
+
+ fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
+ this->joinBounds(*that);
+ return true;
+ }
+
+ GrColor color() const { return fBatch.fColor; }
+ uint8_t coverage() const { return fBatch.fCoverage; }
+ bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
+ const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; }
+ bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
+
+
+ struct Geometry {
+ GrColor fColor;
+ uint8_t fCoverage;
+ SkMatrix fViewMatrix;
+ SkPath fPath;
+ SkIRect fDevClipBounds;
+ };
+
+ struct BatchTracker {
+ GrColor fColor;
+ uint8_t fCoverage;
+ SkRect fDevBounds;
+ bool fUsesLocalCoords;
+ bool fColorIgnored;
+ bool fCoverageIgnored;
+ };
+
+ BatchTracker fBatch;
+ SkSTArray<1, Geometry, true> fGeoData;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+void AAHairlineBatch::onPrepareDraws(Target* target) const {
+ // Setup the viewmatrix and localmatrix for the GrGeometryProcessor.
+ SkMatrix invert;
+ if (!this->viewMatrix().invert(&invert)) {
+ return;
+ }
+
+ // we will transform to identity space if the viewmatrix does not have perspective
+ bool hasPerspective = this->viewMatrix().hasPerspective();
+ const SkMatrix* geometryProcessorViewM = &SkMatrix::I();
+ const SkMatrix* geometryProcessorLocalM = &invert;
+ const SkMatrix* toDevice = nullptr;
+ const SkMatrix* toSrc = nullptr;
+ if (hasPerspective) {
+ geometryProcessorViewM = &this->viewMatrix();
+ geometryProcessorLocalM = &SkMatrix::I();
+ toDevice = &this->viewMatrix();
+ toSrc = &invert;
+ }
+
+ // This is hand inlined for maximum performance.
+ PREALLOC_PTARRAY(128) lines;
+ PREALLOC_PTARRAY(128) quads;
+ PREALLOC_PTARRAY(128) conics;
+ IntArray qSubdivs;
+ FloatArray cWeights;
+ int quadCount = 0;
+
+ int instanceCount = fGeoData.count();
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& args = fGeoData[i];
+ quadCount += gather_lines_and_quads(args.fPath, args.fViewMatrix, args.fDevClipBounds,
+ &lines, &quads, &conics, &qSubdivs, &cWeights);
+ }
+
+ int lineCount = lines.count() / 2;
+ int conicCount = conics.count() / 3;
+
+ // do lines first
+ if (lineCount) {
+ sk_sp<GrGeometryProcessor> lineGP;
+ {
+ using namespace GrDefaultGeoProcFactory;
+
+ Color color(this->color());
+ Coverage coverage(Coverage::kAttribute_Type);
+ LocalCoords localCoords(this->usesLocalCoords() ? LocalCoords::kUsePosition_Type :
+ LocalCoords::kUnused_Type);
+ localCoords.fMatrix = geometryProcessorLocalM;
+ lineGP = GrDefaultGeoProcFactory::Make(color, coverage, localCoords,
+ *geometryProcessorViewM);
+ }
+
+ SkAutoTUnref<const GrBuffer> linesIndexBuffer(
+ ref_lines_index_buffer(target->resourceProvider()));
+
+ const GrBuffer* vertexBuffer;
+ int firstVertex;
+
+ size_t vertexStride = lineGP->getVertexStride();
+ int vertexCount = kLineSegNumVertices * lineCount;
+ LineVertex* verts = reinterpret_cast<LineVertex*>(
+ target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer, &firstVertex));
+
+ if (!verts|| !linesIndexBuffer) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ SkASSERT(lineGP->getVertexStride() == sizeof(LineVertex));
+
+ for (int i = 0; i < lineCount; ++i) {
+ add_line(&lines[2*i], toSrc, this->coverage(), &verts);
+ }
+
+ GrMesh mesh;
+ mesh.initInstanced(kTriangles_GrPrimitiveType, vertexBuffer, linesIndexBuffer,
+ firstVertex, kLineSegNumVertices, kIdxsPerLineSeg, lineCount,
+ kLineSegsNumInIdxBuffer);
+ target->draw(lineGP.get(), mesh);
+ }
+
+ if (quadCount || conicCount) {
+ sk_sp<GrGeometryProcessor> quadGP(
+ GrQuadEffect::Make(this->color(),
+ *geometryProcessorViewM,
+ kHairlineAA_GrProcessorEdgeType,
+ target->caps(),
+ *geometryProcessorLocalM,
+ this->usesLocalCoords(),
+ this->coverage()));
+
+ sk_sp<GrGeometryProcessor> conicGP(
+ GrConicEffect::Make(this->color(),
+ *geometryProcessorViewM,
+ kHairlineAA_GrProcessorEdgeType,
+ target->caps(),
+ *geometryProcessorLocalM,
+ this->usesLocalCoords(),
+ this->coverage()));
+
+ const GrBuffer* vertexBuffer;
+ int firstVertex;
+
+ SkAutoTUnref<const GrBuffer> quadsIndexBuffer(
+ ref_quads_index_buffer(target->resourceProvider()));
+
+ size_t vertexStride = sizeof(BezierVertex);
+ int vertexCount = kQuadNumVertices * quadCount + kQuadNumVertices * conicCount;
+ void *vertices = target->makeVertexSpace(vertexStride, vertexCount,
+ &vertexBuffer, &firstVertex);
+
+ if (!vertices || !quadsIndexBuffer) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ // Setup vertices
+ BezierVertex* bezVerts = reinterpret_cast<BezierVertex*>(vertices);
+
+ int unsubdivQuadCnt = quads.count() / 3;
+ for (int i = 0; i < unsubdivQuadCnt; ++i) {
+ SkASSERT(qSubdivs[i] >= 0);
+ add_quads(&quads[3*i], qSubdivs[i], toDevice, toSrc, &bezVerts);
+ }
+
+ // Start Conics
+ for (int i = 0; i < conicCount; ++i) {
+ add_conics(&conics[3*i], cWeights[i], toDevice, toSrc, &bezVerts);
+ }
+
+ if (quadCount > 0) {
+ GrMesh mesh;
+ mesh.initInstanced(kTriangles_GrPrimitiveType, vertexBuffer, quadsIndexBuffer,
+ firstVertex, kQuadNumVertices, kIdxsPerQuad, quadCount,
+ kQuadsNumInIdxBuffer);
+ target->draw(quadGP.get(), mesh);
+ firstVertex += quadCount * kQuadNumVertices;
+ }
+
+ if (conicCount > 0) {
+ GrMesh mesh;
+ mesh.initInstanced(kTriangles_GrPrimitiveType, vertexBuffer, quadsIndexBuffer,
+ firstVertex, kQuadNumVertices, kIdxsPerQuad, conicCount,
+ kQuadsNumInIdxBuffer);
+ target->draw(conicGP.get(), mesh);
+ }
+ }
+}
+
+static GrDrawBatch* create_hairline_batch(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkPath& path,
+ const GrStyle& style,
+ const SkIRect& devClipBounds) {
+ SkScalar hairlineCoverage;
+ uint8_t newCoverage = 0xff;
+ if (GrPathRenderer::IsStrokeHairlineOrEquivalent(style, viewMatrix, &hairlineCoverage)) {
+ newCoverage = SkScalarRoundToInt(hairlineCoverage * 0xff);
+ }
+
+ return new AAHairlineBatch(color, newCoverage, viewMatrix, path, devClipBounds);
+}
+
+bool GrAAHairLinePathRenderer::onDrawPath(const DrawPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fDrawContext->auditTrail(),
+ "GrAAHairlinePathRenderer::onDrawPath");
+ SkASSERT(!args.fDrawContext->isUnifiedMultisampled());
+
+ SkIRect devClipBounds;
+ args.fClip->getConservativeBounds(args.fDrawContext->width(), args.fDrawContext->height(),
+ &devClipBounds);
+
+ SkPath path;
+ args.fShape->asPath(&path);
+ SkAutoTUnref<GrDrawBatch> batch(create_hairline_batch(args.fPaint->getColor(),
+ *args.fViewMatrix, path,
+ args.fShape->style(), devClipBounds));
+
+ GrPipelineBuilder pipelineBuilder(*args.fPaint);
+ pipelineBuilder.setUserStencil(args.fUserStencilSettings);
+ args.fDrawContext->drawBatch(pipelineBuilder, *args.fClip, batch);
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_TEST_UTILS
+
+DRAW_BATCH_TEST_DEFINE(AAHairlineBatch) {
+ GrColor color = GrRandomColor(random);
+ SkMatrix viewMatrix = GrTest::TestMatrix(random);
+ SkPath path = GrTest::TestPath(random);
+ SkIRect devClipBounds;
+ devClipBounds.setEmpty();
+ return create_hairline_batch(color, viewMatrix, path, GrStyle::SimpleHairline(), devClipBounds);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrAAHairLinePathRenderer.h b/gfx/skia/skia/src/gpu/batches/GrAAHairLinePathRenderer.h
new file mode 100644
index 000000000..e2406a5c0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrAAHairLinePathRenderer.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAAHairLinePathRenderer_DEFINED
+#define GrAAHairLinePathRenderer_DEFINED
+
+#include "GrPathRenderer.h"
+
+class GrAAHairLinePathRenderer : public GrPathRenderer {
+public:
+ GrAAHairLinePathRenderer() {}
+
+ typedef SkTArray<SkPoint, true> PtArray;
+ typedef SkTArray<int, true> IntArray;
+ typedef SkTArray<float, true> FloatArray;
+
+private:
+ bool onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+
+ typedef GrPathRenderer INHERITED;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp b/gfx/skia/skia/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp
new file mode 100644
index 000000000..c2873b6a4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp
@@ -0,0 +1,393 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrAALinearizingConvexPathRenderer.h"
+
+#include "GrAAConvexTessellator.h"
+#include "GrBatchFlushState.h"
+#include "GrBatchTest.h"
+#include "GrContext.h"
+#include "GrDefaultGeoProcFactory.h"
+#include "GrGeometryProcessor.h"
+#include "GrInvariantOutput.h"
+#include "GrPathUtils.h"
+#include "GrProcessor.h"
+#include "GrPipelineBuilder.h"
+#include "GrStyle.h"
+#include "SkGeometry.h"
+#include "SkString.h"
+#include "SkTraceEvent.h"
+#include "SkPathPriv.h"
+#include "batches/GrVertexBatch.h"
+#include "glsl/GrGLSLGeometryProcessor.h"
+
+static const int DEFAULT_BUFFER_SIZE = 100;
+
+// The thicker the stroke, the harder it is to produce high-quality results using tessellation. For
+// the time being, we simply drop back to software rendering above this stroke width.
+static const SkScalar kMaxStrokeWidth = 20.0;
+
+GrAALinearizingConvexPathRenderer::GrAALinearizingConvexPathRenderer() {
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool GrAALinearizingConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ if (!args.fAntiAlias) {
+ return false;
+ }
+ if (!args.fShape->knownToBeConvex()) {
+ return false;
+ }
+ if (args.fShape->style().pathEffect()) {
+ return false;
+ }
+ if (args.fShape->inverseFilled()) {
+ return false;
+ }
+ const SkStrokeRec& stroke = args.fShape->style().strokeRec();
+
+ if (stroke.getStyle() == SkStrokeRec::kStroke_Style ||
+ stroke.getStyle() == SkStrokeRec::kStrokeAndFill_Style) {
+ if (!args.fViewMatrix->isSimilarity()) {
+ return false;
+ }
+ SkScalar strokeWidth = args.fViewMatrix->getMaxScale() * stroke.getWidth();
+ if (strokeWidth < 1.0f && stroke.getStyle() == SkStrokeRec::kStroke_Style) {
+ return false;
+ }
+ return strokeWidth <= kMaxStrokeWidth &&
+ args.fShape->knownToBeClosed() &&
+ stroke.getJoin() != SkPaint::Join::kRound_Join;
+ }
+ return stroke.getStyle() == SkStrokeRec::kFill_Style;
+}
+
+// extract the result vertices and indices from the GrAAConvexTessellator
+static void extract_verts(const GrAAConvexTessellator& tess,
+ void* vertices,
+ size_t vertexStride,
+ GrColor color,
+ uint16_t firstIndex,
+ uint16_t* idxs,
+ bool tweakAlphaForCoverage) {
+ intptr_t verts = reinterpret_cast<intptr_t>(vertices);
+
+ for (int i = 0; i < tess.numPts(); ++i) {
+ *((SkPoint*)((intptr_t)verts + i * vertexStride)) = tess.point(i);
+ }
+
+ // Make 'verts' point to the colors
+ verts += sizeof(SkPoint);
+ for (int i = 0; i < tess.numPts(); ++i) {
+ if (tweakAlphaForCoverage) {
+ SkASSERT(SkScalarRoundToInt(255.0f * tess.coverage(i)) <= 255);
+ unsigned scale = SkScalarRoundToInt(255.0f * tess.coverage(i));
+ GrColor scaledColor = (0xff == scale) ? color : SkAlphaMulQ(color, scale);
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = scaledColor;
+ } else {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
+ *reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) =
+ tess.coverage(i);
+ }
+ }
+
+ for (int i = 0; i < tess.numIndices(); ++i) {
+ idxs[i] = tess.index(i) + firstIndex;
+ }
+}
+
+static sk_sp<GrGeometryProcessor> create_fill_gp(bool tweakAlphaForCoverage,
+ const SkMatrix& viewMatrix,
+ bool usesLocalCoords,
+ bool coverageIgnored) {
+ using namespace GrDefaultGeoProcFactory;
+
+ Color color(Color::kAttribute_Type);
+ Coverage::Type coverageType;
+ // TODO remove coverage if coverage is ignored
+ /*if (coverageIgnored) {
+ coverageType = Coverage::kNone_Type;
+ } else*/ if (tweakAlphaForCoverage) {
+ coverageType = Coverage::kSolid_Type;
+ } else {
+ coverageType = Coverage::kAttribute_Type;
+ }
+ Coverage coverage(coverageType);
+ LocalCoords localCoords(usesLocalCoords ? LocalCoords::kUsePosition_Type :
+ LocalCoords::kUnused_Type);
+ return MakeForDeviceSpace(color, coverage, localCoords, viewMatrix);
+}
+
+class AAFlatteningConvexPathBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ AAFlatteningConvexPathBatch(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkPath& path,
+ SkScalar strokeWidth,
+ SkStrokeRec::Style style,
+ SkPaint::Join join,
+ SkScalar miterLimit) : INHERITED(ClassID()) {
+ fGeoData.emplace_back(Geometry{ color, viewMatrix, path,
+ strokeWidth, style, join, miterLimit });
+
+ // compute bounds
+ SkRect bounds = path.getBounds();
+ SkScalar w = strokeWidth;
+ if (w > 0) {
+ w /= 2;
+ // If the half stroke width is < 1 then we effectively fallback to bevel joins.
+ if (SkPaint::kMiter_Join == join && w > 1.f) {
+ w *= miterLimit;
+ }
+ bounds.outset(w, w);
+ }
+ this->setTransformedBounds(bounds, viewMatrix, HasAABloat::kYes, IsZeroArea::kNo);
+ }
+
+ const char* name() const override { return "AAConvexBatch"; }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ color->setKnownFourComponents(fGeoData[0].fColor);
+ coverage->setUnknownSingleComponent();
+ }
+
+private:
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ // Handle any color overrides
+ if (!overrides.readsColor()) {
+ fGeoData[0].fColor = GrColor_ILLEGAL;
+ }
+ overrides.getOverrideColorIfSet(&fGeoData[0].fColor);
+
+ // setup batch properties
+ fBatch.fColorIgnored = !overrides.readsColor();
+ fBatch.fColor = fGeoData[0].fColor;
+ fBatch.fUsesLocalCoords = overrides.readsLocalCoords();
+ fBatch.fCoverageIgnored = !overrides.readsCoverage();
+ fBatch.fLinesOnly = SkPath::kLine_SegmentMask == fGeoData[0].fPath.getSegmentMasks();
+ fBatch.fCanTweakAlphaForCoverage = overrides.canTweakAlphaForCoverage();
+ }
+
+ void draw(GrVertexBatch::Target* target, const GrGeometryProcessor* gp, int vertexCount,
+ size_t vertexStride, void* vertices, int indexCount, uint16_t* indices) const {
+ if (vertexCount == 0 || indexCount == 0) {
+ return;
+ }
+ const GrBuffer* vertexBuffer;
+ GrMesh mesh;
+ int firstVertex;
+ void* verts = target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer,
+ &firstVertex);
+ if (!verts) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+ memcpy(verts, vertices, vertexCount * vertexStride);
+
+ const GrBuffer* indexBuffer;
+ int firstIndex;
+ uint16_t* idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
+ if (!idxs) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+ memcpy(idxs, indices, indexCount * sizeof(uint16_t));
+ mesh.initIndexed(kTriangles_GrPrimitiveType, vertexBuffer, indexBuffer, firstVertex,
+ firstIndex, vertexCount, indexCount);
+ target->draw(gp, mesh);
+ }
+
+ void onPrepareDraws(Target* target) const override {
+ bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
+
+ // Setup GrGeometryProcessor
+ sk_sp<GrGeometryProcessor> gp(create_fill_gp(canTweakAlphaForCoverage,
+ this->viewMatrix(),
+ this->usesLocalCoords(),
+ this->coverageIgnored()));
+ if (!gp) {
+ SkDebugf("Couldn't create a GrGeometryProcessor\n");
+ return;
+ }
+
+ size_t vertexStride = gp->getVertexStride();
+
+ SkASSERT(canTweakAlphaForCoverage ?
+ vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorAttr) :
+ vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorCoverageAttr));
+
+ int instanceCount = fGeoData.count();
+
+ int vertexCount = 0;
+ int indexCount = 0;
+ int maxVertices = DEFAULT_BUFFER_SIZE;
+ int maxIndices = DEFAULT_BUFFER_SIZE;
+ uint8_t* vertices = (uint8_t*) sk_malloc_throw(maxVertices * vertexStride);
+ uint16_t* indices = (uint16_t*) sk_malloc_throw(maxIndices * sizeof(uint16_t));
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& args = fGeoData[i];
+ GrAAConvexTessellator tess(args.fStyle, args.fStrokeWidth,
+ args.fJoin, args.fMiterLimit);
+
+ if (!tess.tessellate(args.fViewMatrix, args.fPath)) {
+ continue;
+ }
+
+ int currentIndices = tess.numIndices();
+ SkASSERT(currentIndices <= UINT16_MAX);
+ if (indexCount + currentIndices > UINT16_MAX) {
+ // if we added the current instance, we would overflow the indices we can store in a
+ // uint16_t. Draw what we've got so far and reset.
+ this->draw(target, gp.get(),
+ vertexCount, vertexStride, vertices, indexCount, indices);
+ vertexCount = 0;
+ indexCount = 0;
+ }
+ int currentVertices = tess.numPts();
+ if (vertexCount + currentVertices > maxVertices) {
+ maxVertices = SkTMax(vertexCount + currentVertices, maxVertices * 2);
+ vertices = (uint8_t*) sk_realloc_throw(vertices, maxVertices * vertexStride);
+ }
+ if (indexCount + currentIndices > maxIndices) {
+ maxIndices = SkTMax(indexCount + currentIndices, maxIndices * 2);
+ indices = (uint16_t*) sk_realloc_throw(indices, maxIndices * sizeof(uint16_t));
+ }
+
+ extract_verts(tess, vertices + vertexStride * vertexCount, vertexStride, args.fColor,
+ vertexCount, indices + indexCount, canTweakAlphaForCoverage);
+ vertexCount += currentVertices;
+ indexCount += currentIndices;
+ }
+ this->draw(target, gp.get(), vertexCount, vertexStride, vertices, indexCount, indices);
+ sk_free(vertices);
+ sk_free(indices);
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ AAFlatteningConvexPathBatch* that = t->cast<AAFlatteningConvexPathBatch>();
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ SkASSERT(this->usesLocalCoords() == that->usesLocalCoords());
+ if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
+ }
+
+ // In the event of two batches, one who can tweak, one who cannot, we just fall back to
+ // not tweaking
+ if (this->canTweakAlphaForCoverage() != that->canTweakAlphaForCoverage()) {
+ fBatch.fCanTweakAlphaForCoverage = false;
+ }
+
+ fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
+ this->joinBounds(*that);
+ return true;
+ }
+
+ GrColor color() const { return fBatch.fColor; }
+ bool linesOnly() const { return fBatch.fLinesOnly; }
+ bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
+ bool canTweakAlphaForCoverage() const { return fBatch.fCanTweakAlphaForCoverage; }
+ const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; }
+ bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
+
+ struct BatchTracker {
+ GrColor fColor;
+ bool fUsesLocalCoords;
+ bool fColorIgnored;
+ bool fCoverageIgnored;
+ bool fLinesOnly;
+ bool fCanTweakAlphaForCoverage;
+ };
+
+ struct Geometry {
+ GrColor fColor;
+ SkMatrix fViewMatrix;
+ SkPath fPath;
+ SkScalar fStrokeWidth;
+ SkStrokeRec::Style fStyle;
+ SkPaint::Join fJoin;
+ SkScalar fMiterLimit;
+ };
+
+ BatchTracker fBatch;
+ SkSTArray<1, Geometry, true> fGeoData;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+bool GrAALinearizingConvexPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fDrawContext->auditTrail(),
+ "GrAALinearizingConvexPathRenderer::onDrawPath");
+ SkASSERT(!args.fDrawContext->isUnifiedMultisampled());
+ SkASSERT(!args.fShape->isEmpty());
+ SkASSERT(!args.fShape->style().pathEffect());
+
+ SkPath path;
+ args.fShape->asPath(&path);
+ bool fill = args.fShape->style().isSimpleFill();
+ const SkStrokeRec& stroke = args.fShape->style().strokeRec();
+ SkScalar strokeWidth = fill ? -1.0f : stroke.getWidth();
+ SkPaint::Join join = fill ? SkPaint::Join::kMiter_Join : stroke.getJoin();
+ SkScalar miterLimit = stroke.getMiter();
+
+ SkAutoTUnref<GrDrawBatch> batch(new AAFlatteningConvexPathBatch(args.fPaint->getColor(),
+ *args.fViewMatrix,
+ path, strokeWidth,
+ stroke.getStyle(),
+ join, miterLimit));
+
+ GrPipelineBuilder pipelineBuilder(*args.fPaint);
+ pipelineBuilder.setUserStencil(args.fUserStencilSettings);
+
+ args.fDrawContext->drawBatch(pipelineBuilder, *args.fClip, batch);
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_TEST_UTILS
+
+DRAW_BATCH_TEST_DEFINE(AAFlatteningConvexPathBatch) {
+ GrColor color = GrRandomColor(random);
+ SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
+ SkPath path = GrTest::TestPathConvex(random);
+
+ SkStrokeRec::Style styles[3] = { SkStrokeRec::kFill_Style,
+ SkStrokeRec::kStroke_Style,
+ SkStrokeRec::kStrokeAndFill_Style };
+
+ SkStrokeRec::Style style = styles[random->nextU() % 3];
+
+ SkScalar strokeWidth = -1.f;
+ SkPaint::Join join = SkPaint::kMiter_Join;
+ SkScalar miterLimit = 0.5f;
+
+ if (SkStrokeRec::kFill_Style != style) {
+ strokeWidth = random->nextRangeF(1.0f, 10.0f);
+ if (random->nextBool()) {
+ join = SkPaint::kMiter_Join;
+ } else {
+ join = SkPaint::kBevel_Join;
+ }
+ miterLimit = random->nextRangeF(0.5f, 2.0f);
+ }
+
+ return new AAFlatteningConvexPathBatch(color, viewMatrix, path, strokeWidth,
+ style, join, miterLimit);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrAALinearizingConvexPathRenderer.h b/gfx/skia/skia/src/gpu/batches/GrAALinearizingConvexPathRenderer.h
new file mode 100644
index 000000000..afee5db4d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrAALinearizingConvexPathRenderer.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAALinearizingConvexPathRenderer_DEFINED
+#define GrAALinearizingConvexPathRenderer_DEFINED
+
+#include "GrPathRenderer.h"
+
+class GrAALinearizingConvexPathRenderer : public GrPathRenderer {
+public:
+ GrAALinearizingConvexPathRenderer();
+
+private:
+ bool onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrAAStrokeRectBatch.cpp b/gfx/skia/skia/src/gpu/batches/GrAAStrokeRectBatch.cpp
new file mode 100644
index 000000000..bbee3f878
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrAAStrokeRectBatch.cpp
@@ -0,0 +1,622 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrAAStrokeRectBatch.h"
+
+#include "GrBatchFlushState.h"
+#include "GrDefaultGeoProcFactory.h"
+#include "GrResourceKey.h"
+#include "GrResourceProvider.h"
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gMiterIndexBufferKey);
+GR_DECLARE_STATIC_UNIQUE_KEY(gBevelIndexBufferKey);
+
+static void set_inset_fan(SkPoint* pts, size_t stride,
+ const SkRect& r, SkScalar dx, SkScalar dy) {
+ pts->setRectFan(r.fLeft + dx, r.fTop + dy,
+ r.fRight - dx, r.fBottom - dy, stride);
+}
+
+// We support all hairlines, bevels, and miters, but not round joins. Also, check whether the miter
+// limit makes a miter join effectively beveled.
+inline static bool allowed_stroke(const SkStrokeRec& stroke, bool* isMiter) {
+ SkASSERT(stroke.getStyle() == SkStrokeRec::kStroke_Style ||
+ stroke.getStyle() == SkStrokeRec::kHairline_Style);
+ // For hairlines, make bevel and round joins appear the same as mitered ones.
+ if (!stroke.getWidth()) {
+ *isMiter = true;
+ return true;
+ }
+ if (stroke.getJoin() == SkPaint::kBevel_Join) {
+ *isMiter = false;
+ return true;
+ }
+ if (stroke.getJoin() == SkPaint::kMiter_Join) {
+ *isMiter = stroke.getMiter() >= SK_ScalarSqrt2;
+ return true;
+ }
+ return false;
+}
+
+static void compute_rects(SkRect* devOutside, SkRect* devOutsideAssist, SkRect* devInside,
+ bool* isDegenerate, const SkMatrix& viewMatrix, const SkRect& rect,
+ SkScalar strokeWidth, bool miterStroke) {
+ SkRect devRect;
+ viewMatrix.mapRect(&devRect, rect);
+
+ SkVector devStrokeSize;
+ if (strokeWidth > 0) {
+ devStrokeSize.set(strokeWidth, strokeWidth);
+ viewMatrix.mapVectors(&devStrokeSize, 1);
+ devStrokeSize.setAbs(devStrokeSize);
+ } else {
+ devStrokeSize.set(SK_Scalar1, SK_Scalar1);
+ }
+
+ const SkScalar dx = devStrokeSize.fX;
+ const SkScalar dy = devStrokeSize.fY;
+ const SkScalar rx = SkScalarMul(dx, SK_ScalarHalf);
+ const SkScalar ry = SkScalarMul(dy, SK_ScalarHalf);
+
+ *devOutside = devRect;
+ *devOutsideAssist = devRect;
+ *devInside = devRect;
+
+ devOutside->outset(rx, ry);
+ devInside->inset(rx, ry);
+
+ // If we have a degenerate stroking rect(ie the stroke is larger than inner rect) then we
+ // make a degenerate inside rect to avoid double hitting. We will also jam all of the points
+ // together when we render these rects.
+ SkScalar spare;
+ {
+ SkScalar w = devRect.width() - dx;
+ SkScalar h = devRect.height() - dy;
+ spare = SkTMin(w, h);
+ }
+
+ *isDegenerate = spare <= 0;
+ if (*isDegenerate) {
+ devInside->fLeft = devInside->fRight = devRect.centerX();
+ devInside->fTop = devInside->fBottom = devRect.centerY();
+ }
+
+ // For bevel-stroke, use 2 SkRect instances(devOutside and devOutsideAssist)
+ // to draw the outside of the octagon. Because there are 8 vertices on the outer
+ // edge, while vertex number of inner edge is 4, the same as miter-stroke.
+ if (!miterStroke) {
+ devOutside->inset(0, ry);
+ devOutsideAssist->outset(0, ry);
+ }
+}
+
+static sk_sp<GrGeometryProcessor> create_stroke_rect_gp(bool tweakAlphaForCoverage,
+ const SkMatrix& viewMatrix,
+ bool usesLocalCoords,
+ bool coverageIgnored) {
+ using namespace GrDefaultGeoProcFactory;
+
+ Color color(Color::kAttribute_Type);
+ Coverage::Type coverageType;
+ // TODO remove coverage if coverage is ignored
+ /*if (coverageIgnored) {
+ coverageType = Coverage::kNone_Type;
+ } else*/ if (tweakAlphaForCoverage) {
+ coverageType = Coverage::kSolid_Type;
+ } else {
+ coverageType = Coverage::kAttribute_Type;
+ }
+ Coverage coverage(coverageType);
+ LocalCoords localCoords(usesLocalCoords ? LocalCoords::kUsePosition_Type :
+ LocalCoords::kUnused_Type);
+ return MakeForDeviceSpace(color, coverage, localCoords, viewMatrix);
+}
+
+class AAStrokeRectBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ AAStrokeRectBatch(GrColor color, const SkMatrix& viewMatrix,
+ const SkRect& devOutside, const SkRect& devInside)
+ : INHERITED(ClassID())
+ , fViewMatrix(viewMatrix) {
+ SkASSERT(!devOutside.isEmpty());
+ SkASSERT(!devInside.isEmpty());
+
+ fGeoData.emplace_back(Geometry{color, devOutside, devOutside, devInside, false});
+ this->setBounds(devOutside, HasAABloat::kYes, IsZeroArea::kNo);
+ fMiterStroke = true;
+ }
+
+ static GrDrawBatch* Create(GrColor color, const SkMatrix& viewMatrix, const SkRect& rect,
+ const SkStrokeRec& stroke) {
+ bool isMiter;
+ if (!allowed_stroke(stroke, &isMiter)) {
+ return nullptr;
+ }
+
+ AAStrokeRectBatch* batch = new AAStrokeRectBatch();
+ batch->fMiterStroke = isMiter;
+ Geometry& geo = batch->fGeoData.push_back();
+ compute_rects(&geo.fDevOutside, &geo.fDevOutsideAssist, &geo.fDevInside, &geo.fDegenerate,
+ viewMatrix, rect, stroke.getWidth(), isMiter);
+ geo.fColor = color;
+ batch->setBounds(geo.fDevOutside, HasAABloat::kYes, IsZeroArea::kNo);
+ batch->fViewMatrix = viewMatrix;
+ return batch;
+ }
+
+ const char* name() const override { return "AAStrokeRect"; }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ color->setKnownFourComponents(fGeoData[0].fColor);
+ coverage->setUnknownSingleComponent();
+ }
+
+private:
+ AAStrokeRectBatch() : INHERITED(ClassID()) {}
+
+ void onPrepareDraws(Target*) const override;
+ void initBatchTracker(const GrXPOverridesForBatch&) override;
+
+ static const int kMiterIndexCnt = 3 * 24;
+ static const int kMiterVertexCnt = 16;
+ static const int kNumMiterRectsInIndexBuffer = 256;
+
+ static const int kBevelIndexCnt = 48 + 36 + 24;
+ static const int kBevelVertexCnt = 24;
+ static const int kNumBevelRectsInIndexBuffer = 256;
+
+ static const GrBuffer* GetIndexBuffer(GrResourceProvider* resourceProvider, bool miterStroke);
+
+ GrColor color() const { return fBatch.fColor; }
+ bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
+ bool canTweakAlphaForCoverage() const { return fBatch.fCanTweakAlphaForCoverage; }
+ bool colorIgnored() const { return fBatch.fColorIgnored; }
+ bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+ bool miterStroke() const { return fMiterStroke; }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
+
+ void generateAAStrokeRectGeometry(void* vertices,
+ size_t offset,
+ size_t vertexStride,
+ int outerVertexNum,
+ int innerVertexNum,
+ GrColor color,
+ const SkRect& devOutside,
+ const SkRect& devOutsideAssist,
+ const SkRect& devInside,
+ bool miterStroke,
+ bool degenerate,
+ bool tweakAlphaForCoverage) const;
+
+ struct BatchTracker {
+ GrColor fColor;
+ bool fUsesLocalCoords;
+ bool fColorIgnored;
+ bool fCoverageIgnored;
+ bool fCanTweakAlphaForCoverage;
+ };
+
+ // TODO support AA rotated stroke rects by copying around view matrices
+ struct Geometry {
+ GrColor fColor;
+ SkRect fDevOutside;
+ SkRect fDevOutsideAssist;
+ SkRect fDevInside;
+ bool fDegenerate;
+ };
+
+ BatchTracker fBatch;
+ SkSTArray<1, Geometry, true> fGeoData;
+ SkMatrix fViewMatrix;
+ bool fMiterStroke;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+void AAStrokeRectBatch::initBatchTracker(const GrXPOverridesForBatch& overrides) {
+ // Handle any color overrides
+ if (!overrides.readsColor()) {
+ fGeoData[0].fColor = GrColor_ILLEGAL;
+ }
+ overrides.getOverrideColorIfSet(&fGeoData[0].fColor);
+
+ // setup batch properties
+ fBatch.fColorIgnored = !overrides.readsColor();
+ fBatch.fColor = fGeoData[0].fColor;
+ fBatch.fUsesLocalCoords = overrides.readsLocalCoords();
+ fBatch.fCoverageIgnored = !overrides.readsCoverage();
+ fBatch.fCanTweakAlphaForCoverage = overrides.canTweakAlphaForCoverage();
+}
+
+void AAStrokeRectBatch::onPrepareDraws(Target* target) const {
+ bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
+
+ sk_sp<GrGeometryProcessor> gp(create_stroke_rect_gp(canTweakAlphaForCoverage,
+ this->viewMatrix(),
+ this->usesLocalCoords(),
+ this->coverageIgnored()));
+ if (!gp) {
+ SkDebugf("Couldn't create GrGeometryProcessor\n");
+ return;
+ }
+
+ size_t vertexStride = gp->getVertexStride();
+
+ SkASSERT(canTweakAlphaForCoverage ?
+ vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorAttr) :
+ vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorCoverageAttr));
+ int innerVertexNum = 4;
+ int outerVertexNum = this->miterStroke() ? 4 : 8;
+ int verticesPerInstance = (outerVertexNum + innerVertexNum) * 2;
+ int indicesPerInstance = this->miterStroke() ? kMiterIndexCnt : kBevelIndexCnt;
+ int instanceCount = fGeoData.count();
+
+ const SkAutoTUnref<const GrBuffer> indexBuffer(
+ GetIndexBuffer(target->resourceProvider(), this->miterStroke()));
+ InstancedHelper helper;
+ void* vertices = helper.init(target, kTriangles_GrPrimitiveType, vertexStride,
+ indexBuffer, verticesPerInstance, indicesPerInstance,
+ instanceCount);
+ if (!vertices || !indexBuffer) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& args = fGeoData[i];
+ this->generateAAStrokeRectGeometry(vertices,
+ i * verticesPerInstance * vertexStride,
+ vertexStride,
+ outerVertexNum,
+ innerVertexNum,
+ args.fColor,
+ args.fDevOutside,
+ args.fDevOutsideAssist,
+ args.fDevInside,
+ fMiterStroke,
+ args.fDegenerate,
+ canTweakAlphaForCoverage);
+ }
+ helper.recordDraw(target, gp.get());
+}
+
+const GrBuffer* AAStrokeRectBatch::GetIndexBuffer(GrResourceProvider* resourceProvider,
+ bool miterStroke) {
+
+ if (miterStroke) {
+ static const uint16_t gMiterIndices[] = {
+ 0 + 0, 1 + 0, 5 + 0, 5 + 0, 4 + 0, 0 + 0,
+ 1 + 0, 2 + 0, 6 + 0, 6 + 0, 5 + 0, 1 + 0,
+ 2 + 0, 3 + 0, 7 + 0, 7 + 0, 6 + 0, 2 + 0,
+ 3 + 0, 0 + 0, 4 + 0, 4 + 0, 7 + 0, 3 + 0,
+
+ 0 + 4, 1 + 4, 5 + 4, 5 + 4, 4 + 4, 0 + 4,
+ 1 + 4, 2 + 4, 6 + 4, 6 + 4, 5 + 4, 1 + 4,
+ 2 + 4, 3 + 4, 7 + 4, 7 + 4, 6 + 4, 2 + 4,
+ 3 + 4, 0 + 4, 4 + 4, 4 + 4, 7 + 4, 3 + 4,
+
+ 0 + 8, 1 + 8, 5 + 8, 5 + 8, 4 + 8, 0 + 8,
+ 1 + 8, 2 + 8, 6 + 8, 6 + 8, 5 + 8, 1 + 8,
+ 2 + 8, 3 + 8, 7 + 8, 7 + 8, 6 + 8, 2 + 8,
+ 3 + 8, 0 + 8, 4 + 8, 4 + 8, 7 + 8, 3 + 8,
+ };
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gMiterIndices) == kMiterIndexCnt);
+ GR_DEFINE_STATIC_UNIQUE_KEY(gMiterIndexBufferKey);
+ return resourceProvider->findOrCreateInstancedIndexBuffer(gMiterIndices,
+ kMiterIndexCnt, kNumMiterRectsInIndexBuffer, kMiterVertexCnt,
+ gMiterIndexBufferKey);
+ } else {
+ /**
+ * As in miter-stroke, index = a + b, and a is the current index, b is the shift
+ * from the first index. The index layout:
+ * outer AA line: 0~3, 4~7
+ * outer edge: 8~11, 12~15
+ * inner edge: 16~19
+ * inner AA line: 20~23
+ * Following comes a bevel-stroke rect and its indices:
+ *
+ * 4 7
+ * *********************************
+ * * ______________________________ *
+ * * / 12 15 \ *
+ * * / \ *
+ * 0 * |8 16_____________________19 11 | * 3
+ * * | | | | *
+ * * | | **************** | | *
+ * * | | * 20 23 * | | *
+ * * | | * * | | *
+ * * | | * 21 22 * | | *
+ * * | | **************** | | *
+ * * | |____________________| | *
+ * 1 * |9 17 18 10| * 2
+ * * \ / *
+ * * \13 __________________________14/ *
+ * * *
+ * **********************************
+ * 5 6
+ */
+ static const uint16_t gBevelIndices[] = {
+ // Draw outer AA, from outer AA line to outer edge, shift is 0.
+ 0 + 0, 1 + 0, 9 + 0, 9 + 0, 8 + 0, 0 + 0,
+ 1 + 0, 5 + 0, 13 + 0, 13 + 0, 9 + 0, 1 + 0,
+ 5 + 0, 6 + 0, 14 + 0, 14 + 0, 13 + 0, 5 + 0,
+ 6 + 0, 2 + 0, 10 + 0, 10 + 0, 14 + 0, 6 + 0,
+ 2 + 0, 3 + 0, 11 + 0, 11 + 0, 10 + 0, 2 + 0,
+ 3 + 0, 7 + 0, 15 + 0, 15 + 0, 11 + 0, 3 + 0,
+ 7 + 0, 4 + 0, 12 + 0, 12 + 0, 15 + 0, 7 + 0,
+ 4 + 0, 0 + 0, 8 + 0, 8 + 0, 12 + 0, 4 + 0,
+
+ // Draw the stroke, from outer edge to inner edge, shift is 8.
+ 0 + 8, 1 + 8, 9 + 8, 9 + 8, 8 + 8, 0 + 8,
+ 1 + 8, 5 + 8, 9 + 8,
+ 5 + 8, 6 + 8, 10 + 8, 10 + 8, 9 + 8, 5 + 8,
+ 6 + 8, 2 + 8, 10 + 8,
+ 2 + 8, 3 + 8, 11 + 8, 11 + 8, 10 + 8, 2 + 8,
+ 3 + 8, 7 + 8, 11 + 8,
+ 7 + 8, 4 + 8, 8 + 8, 8 + 8, 11 + 8, 7 + 8,
+ 4 + 8, 0 + 8, 8 + 8,
+
+ // Draw the inner AA, from inner edge to inner AA line, shift is 16.
+ 0 + 16, 1 + 16, 5 + 16, 5 + 16, 4 + 16, 0 + 16,
+ 1 + 16, 2 + 16, 6 + 16, 6 + 16, 5 + 16, 1 + 16,
+ 2 + 16, 3 + 16, 7 + 16, 7 + 16, 6 + 16, 2 + 16,
+ 3 + 16, 0 + 16, 4 + 16, 4 + 16, 7 + 16, 3 + 16,
+ };
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gBevelIndices) == kBevelIndexCnt);
+
+ GR_DEFINE_STATIC_UNIQUE_KEY(gBevelIndexBufferKey);
+ return resourceProvider->findOrCreateInstancedIndexBuffer(gBevelIndices,
+ kBevelIndexCnt, kNumBevelRectsInIndexBuffer, kBevelVertexCnt,
+ gBevelIndexBufferKey);
+ }
+}
+
+bool AAStrokeRectBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
+ AAStrokeRectBatch* that = t->cast<AAStrokeRectBatch>();
+
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ // TODO batch across miterstroke changes
+ if (this->miterStroke() != that->miterStroke()) {
+ return false;
+ }
+
+ // We apply the viewmatrix to the rect points on the cpu. However, if the pipeline uses
+ // local coords then we won't be able to batch. We could actually upload the viewmatrix
+ // using vertex attributes in these cases, but haven't investigated that
+ if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
+ }
+
+ // In the event of two batches, one who can tweak, one who cannot, we just fall back to
+ // not tweaking
+ if (this->canTweakAlphaForCoverage() != that->canTweakAlphaForCoverage()) {
+ fBatch.fCanTweakAlphaForCoverage = false;
+ }
+
+ if (this->color() != that->color()) {
+ fBatch.fColor = GrColor_ILLEGAL;
+ }
+ fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
+ this->joinBounds(*that);
+ return true;
+}
+
+static void setup_scale(int* scale, SkScalar inset) {
+ if (inset < SK_ScalarHalf) {
+ *scale = SkScalarFloorToInt(512.0f * inset / (inset + SK_ScalarHalf));
+ SkASSERT(*scale >= 0 && *scale <= 255);
+ } else {
+ *scale = 0xff;
+ }
+}
+
+void AAStrokeRectBatch::generateAAStrokeRectGeometry(void* vertices,
+ size_t offset,
+ size_t vertexStride,
+ int outerVertexNum,
+ int innerVertexNum,
+ GrColor color,
+ const SkRect& devOutside,
+ const SkRect& devOutsideAssist,
+ const SkRect& devInside,
+ bool miterStroke,
+ bool degenerate,
+ bool tweakAlphaForCoverage) const {
+ intptr_t verts = reinterpret_cast<intptr_t>(vertices) + offset;
+
+ // We create vertices for four nested rectangles. There are two ramps from 0 to full
+ // coverage, one on the exterior of the stroke and the other on the interior.
+ // The following pointers refer to the four rects, from outermost to innermost.
+ SkPoint* fan0Pos = reinterpret_cast<SkPoint*>(verts);
+ SkPoint* fan1Pos = reinterpret_cast<SkPoint*>(verts + outerVertexNum * vertexStride);
+ SkPoint* fan2Pos = reinterpret_cast<SkPoint*>(verts + 2 * outerVertexNum * vertexStride);
+ SkPoint* fan3Pos = reinterpret_cast<SkPoint*>(verts +
+ (2 * outerVertexNum + innerVertexNum) *
+ vertexStride);
+
+#ifndef SK_IGNORE_THIN_STROKED_RECT_FIX
+ // TODO: this only really works if the X & Y margins are the same all around
+ // the rect (or if they are all >= 1.0).
+ SkScalar inset;
+ if (!degenerate) {
+ inset = SkMinScalar(SK_Scalar1, devOutside.fRight - devInside.fRight);
+ inset = SkMinScalar(inset, devInside.fLeft - devOutside.fLeft);
+ inset = SkMinScalar(inset, devInside.fTop - devOutside.fTop);
+ if (miterStroke) {
+ inset = SK_ScalarHalf * SkMinScalar(inset, devOutside.fBottom - devInside.fBottom);
+ } else {
+ inset = SK_ScalarHalf * SkMinScalar(inset, devOutsideAssist.fBottom -
+ devInside.fBottom);
+ }
+ SkASSERT(inset >= 0);
+ } else {
+ // TODO use real devRect here
+ inset = SkMinScalar(devOutside.width(), SK_Scalar1);
+ inset = SK_ScalarHalf * SkMinScalar(inset, SkTMax(devOutside.height(),
+ devOutsideAssist.height()));
+ }
+#else
+ SkScalar inset;
+ if (!degenerate) {
+ inset = SK_ScalarHalf;
+ } else {
+ // TODO use real devRect here
+ inset = SkMinScalar(devOutside.width(), SK_Scalar1);
+ inset = SK_ScalarHalf * SkMinScalar(inset, SkTMax(devOutside.height(),
+ devOutsideAssist.height()));
+ }
+#endif
+
+ if (miterStroke) {
+ // outermost
+ set_inset_fan(fan0Pos, vertexStride, devOutside, -SK_ScalarHalf, -SK_ScalarHalf);
+ // inner two
+ set_inset_fan(fan1Pos, vertexStride, devOutside, inset, inset);
+ if (!degenerate) {
+ set_inset_fan(fan2Pos, vertexStride, devInside, -inset, -inset);
+ // innermost
+ set_inset_fan(fan3Pos, vertexStride, devInside, SK_ScalarHalf, SK_ScalarHalf);
+ } else {
+ // When the interior rect has become degenerate we smoosh to a single point
+ SkASSERT(devInside.fLeft == devInside.fRight &&
+ devInside.fTop == devInside.fBottom);
+ fan2Pos->setRectFan(devInside.fLeft, devInside.fTop,
+ devInside.fRight, devInside.fBottom, vertexStride);
+ fan3Pos->setRectFan(devInside.fLeft, devInside.fTop,
+ devInside.fRight, devInside.fBottom, vertexStride);
+ }
+ } else {
+ SkPoint* fan0AssistPos = reinterpret_cast<SkPoint*>(verts + 4 * vertexStride);
+ SkPoint* fan1AssistPos = reinterpret_cast<SkPoint*>(verts +
+ (outerVertexNum + 4) *
+ vertexStride);
+ // outermost
+ set_inset_fan(fan0Pos, vertexStride, devOutside, -SK_ScalarHalf, -SK_ScalarHalf);
+ set_inset_fan(fan0AssistPos, vertexStride, devOutsideAssist, -SK_ScalarHalf,
+ -SK_ScalarHalf);
+ // outer one of the inner two
+ set_inset_fan(fan1Pos, vertexStride, devOutside, inset, inset);
+ set_inset_fan(fan1AssistPos, vertexStride, devOutsideAssist, inset, inset);
+ if (!degenerate) {
+ // inner one of the inner two
+ set_inset_fan(fan2Pos, vertexStride, devInside, -inset, -inset);
+ // innermost
+ set_inset_fan(fan3Pos, vertexStride, devInside, SK_ScalarHalf, SK_ScalarHalf);
+ } else {
+ // When the interior rect has become degenerate we smoosh to a single point
+ SkASSERT(devInside.fLeft == devInside.fRight &&
+ devInside.fTop == devInside.fBottom);
+ fan2Pos->setRectFan(devInside.fLeft, devInside.fTop,
+ devInside.fRight, devInside.fBottom, vertexStride);
+ fan3Pos->setRectFan(devInside.fLeft, devInside.fTop,
+ devInside.fRight, devInside.fBottom, vertexStride);
+ }
+ }
+
+ // Make verts point to vertex color and then set all the color and coverage vertex attrs
+ // values. The outermost rect has 0 coverage
+ verts += sizeof(SkPoint);
+ for (int i = 0; i < outerVertexNum; ++i) {
+ if (tweakAlphaForCoverage) {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = 0;
+ } else {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
+ *reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) = 0;
+ }
+ }
+
+ // scale is the coverage for the the inner two rects.
+ int scale;
+ setup_scale(&scale, inset);
+
+ float innerCoverage = GrNormalizeByteToFloat(scale);
+ GrColor scaledColor = (0xff == scale) ? color : SkAlphaMulQ(color, scale);
+
+ verts += outerVertexNum * vertexStride;
+ for (int i = 0; i < outerVertexNum + innerVertexNum; ++i) {
+ if (tweakAlphaForCoverage) {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = scaledColor;
+ } else {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
+ *reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) = innerCoverage;
+ }
+ }
+
+ // The innermost rect has 0 coverage, unless we are degenerate, in which case we must apply the
+ // scaled coverage
+ verts += (outerVertexNum + innerVertexNum) * vertexStride;
+ if (!degenerate) {
+ innerCoverage = 0;
+ scaledColor = 0;
+ }
+
+ for (int i = 0; i < innerVertexNum; ++i) {
+ if (tweakAlphaForCoverage) {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = scaledColor;
+ } else {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
+ *reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) = innerCoverage;
+ }
+ }
+}
+
+namespace GrAAStrokeRectBatch {
+
+GrDrawBatch* CreateFillBetweenRects(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& devOutside,
+ const SkRect& devInside) {
+ return new AAStrokeRectBatch(color, viewMatrix, devOutside, devInside);
+}
+
+GrDrawBatch* Create(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkStrokeRec& stroke) {
+ return AAStrokeRectBatch::Create(color, viewMatrix, rect, stroke);
+}
+
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_TEST_UTILS
+
+#include "GrBatchTest.h"
+
+DRAW_BATCH_TEST_DEFINE(AAStrokeRectBatch) {
+ bool miterStroke = random->nextBool();
+
+ // Create either a empty rect or a non-empty rect.
+ SkRect rect = random->nextBool() ? SkRect::MakeXYWH(10, 10, 50, 40) :
+ SkRect::MakeXYWH(6, 7, 0, 0);
+ SkScalar minDim = SkMinScalar(rect.width(), rect.height());
+ SkScalar strokeWidth = random->nextUScalar1() * minDim;
+
+ GrColor color = GrRandomColor(random);
+
+ SkStrokeRec rec(SkStrokeRec::kFill_InitStyle);
+ rec.setStrokeStyle(strokeWidth);
+ rec.setStrokeParams(SkPaint::kButt_Cap,
+ miterStroke ? SkPaint::kMiter_Join : SkPaint::kBevel_Join,
+ 1.f);
+ SkMatrix matrix = GrTest::TestMatrixRectStaysRect(random);
+ return GrAAStrokeRectBatch::Create(color, matrix, rect, rec);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrAAStrokeRectBatch.h b/gfx/skia/skia/src/gpu/batches/GrAAStrokeRectBatch.h
new file mode 100644
index 000000000..964cc5b4b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrAAStrokeRectBatch.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAAStrokeRectBatch_DEFINED
+#define GrAAStrokeRectBatch_DEFINED
+
+#include "GrColor.h"
+
+class GrBatch;
+class GrDrawBatch;
+class GrResourceProvider;
+class SkMatrix;
+struct SkRect;
+class SkStrokeRec;
+
+namespace GrAAStrokeRectBatch {
+
+GrDrawBatch* CreateFillBetweenRects(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& devOutside,
+ const SkRect& devInside);
+
+GrDrawBatch* Create(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkStrokeRec& stroke);
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrAnalyticRectBatch.cpp b/gfx/skia/skia/src/gpu/batches/GrAnalyticRectBatch.cpp
new file mode 100644
index 000000000..311c65213
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrAnalyticRectBatch.cpp
@@ -0,0 +1,409 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrAnalyticRectBatch.h"
+
+#include "GrBatchFlushState.h"
+#include "GrBatchTest.h"
+#include "GrGeometryProcessor.h"
+#include "GrInvariantOutput.h"
+#include "GrProcessor.h"
+#include "GrResourceProvider.h"
+#include "SkRRect.h"
+#include "SkStrokeRec.h"
+#include "batches/GrVertexBatch.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLGeometryProcessor.h"
+#include "glsl/GrGLSLGeometryProcessor.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLVarying.h"
+#include "glsl/GrGLSLVertexShaderBuilder.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "glsl/GrGLSLUtil.h"
+
+namespace {
+
+struct RectVertex {
+ SkPoint fPos;
+ GrColor fColor;
+ SkPoint fCenter;
+ SkVector fDownDir;
+ SkScalar fHalfWidth;
+ SkScalar fHalfHeight;
+};
+
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * The output of this effect is the input color and coverage for an arbitrarily oriented rect. The
+ * rect is specified as:
+ * Center of the rect
+ * Unit vector point down the height of the rect
+ * Half width + 0.5
+ * Half height + 0.5
+ * The center and vector are stored in a vec4 varying ("RectEdge") with the
+ * center in the xy components and the vector in the zw components.
+ * The munged width and height are stored in a vec2 varying ("WidthHeight")
+ * with the width in x and the height in y.
+ */
+class RectGeometryProcessor : public GrGeometryProcessor {
+public:
+ RectGeometryProcessor(const SkMatrix& localMatrix) : fLocalMatrix(localMatrix) {
+ this->initClassID<RectGeometryProcessor>();
+ fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ fInColor = &this->addVertexAttrib("inColor", kVec4ub_GrVertexAttribType);
+ fInRectEdge = &this->addVertexAttrib("inRectEdge", kVec4f_GrVertexAttribType);
+ fInWidthHeight = &this->addVertexAttrib("inWidthHeight", kVec2f_GrVertexAttribType);
+ }
+
+ bool implementsDistanceVector() const override { return true; }
+
+ const Attribute* inPosition() const { return fInPosition; }
+ const Attribute* inColor() const { return fInColor; }
+ const Attribute* inRectEdge() const { return fInRectEdge; }
+ const Attribute* inWidthHeight() const { return fInWidthHeight; }
+
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+
+ virtual ~RectGeometryProcessor() {}
+
+ const char* name() const override { return "RectEdge"; }
+
+ class GLSLProcessor : public GrGLSLGeometryProcessor {
+ public:
+ GLSLProcessor() {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override{
+ const RectGeometryProcessor& rgp = args.fGP.cast<RectGeometryProcessor>();
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(rgp);
+
+ // setup the varying for the position
+ GrGLSLVertToFrag positionVary(kVec2f_GrSLType);
+ varyingHandler->addVarying("Position", &positionVary);
+ vertBuilder->codeAppendf("%s = %s;", positionVary.vsOut(), rgp.inPosition()->fName);
+
+ // setup the varying for the center point and the unit vector that points down the
+ // height of the rect
+ GrGLSLVertToFrag rectEdgeVary(kVec4f_GrSLType);
+ varyingHandler->addVarying("RectEdge", &rectEdgeVary);
+ vertBuilder->codeAppendf("%s = %s;", rectEdgeVary.vsOut(), rgp.inRectEdge()->fName);
+
+ // setup the varying for the width/2+.5 and height/2+.5
+ GrGLSLVertToFrag widthHeightVary(kVec2f_GrSLType);
+ varyingHandler->addVarying("WidthHeight", &widthHeightVary);
+ vertBuilder->codeAppendf("%s = %s;",
+ widthHeightVary.vsOut(), rgp.inWidthHeight()->fName);
+
+ GrGLSLPPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ // setup pass through color
+ varyingHandler->addPassThroughAttribute(rgp.inColor(), args.fOutputColor);
+
+ // Setup position
+ this->setupPosition(vertBuilder, gpArgs, rgp.inPosition()->fName);
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ gpArgs->fPositionVar,
+ rgp.inPosition()->fName,
+ rgp.localMatrix(),
+ args.fFPCoordTransformHandler);
+
+ // TODO: compute all these offsets, spans, and scales in the VS
+ fragBuilder->codeAppendf("float insetW = min(1.0, %s.x) - 0.5;",
+ widthHeightVary.fsIn());
+ fragBuilder->codeAppendf("float insetH = min(1.0, %s.y) - 0.5;",
+ widthHeightVary.fsIn());
+ fragBuilder->codeAppend("float outset = 0.5;");
+ // For rects > 1 pixel wide and tall the span's are noops (i.e., 1.0). For rects
+ // < 1 pixel wide or tall they serve to normalize the < 1 ramp to a 0 .. 1 range.
+ fragBuilder->codeAppend("float spanW = insetW + outset;");
+ fragBuilder->codeAppend("float spanH = insetH + outset;");
+ // For rects < 1 pixel wide or tall, these scale factors are used to cap the maximum
+ // value of coverage that is used. In other words it is the coverage that is
+ // used in the interior of the rect after the ramp.
+ fragBuilder->codeAppend("float scaleW = min(1.0, 2.0*insetW/spanW);");
+ fragBuilder->codeAppend("float scaleH = min(1.0, 2.0*insetH/spanH);");
+ // Compute the coverage for the rect's width
+ fragBuilder->codeAppendf("vec2 offset = %s.xy - %s.xy;",
+ positionVary.fsIn(), rectEdgeVary.fsIn());
+ fragBuilder->codeAppendf("float perpDot = abs(offset.x * %s.w - offset.y * %s.z);",
+ rectEdgeVary.fsIn(), rectEdgeVary.fsIn());
+
+ if (args.fDistanceVectorName) {
+ fragBuilder->codeAppendf("float widthDistance = %s.x - perpDot;",
+ widthHeightVary.fsIn());
+ }
+
+ fragBuilder->codeAppendf(
+ "float coverage = scaleW*clamp((%s.x-perpDot)/spanW, 0.0, 1.0);",
+ widthHeightVary.fsIn());
+ // Compute the coverage for the rect's height and merge with the width
+ fragBuilder->codeAppendf("perpDot = abs(dot(offset, %s.zw));",
+ rectEdgeVary.fsIn());
+
+ if (args.fDistanceVectorName) {
+ fragBuilder->codeAppendf("float heightDistance = %s.y - perpDot;",
+ widthHeightVary.fsIn());
+ }
+
+ fragBuilder->codeAppendf(
+ "coverage = coverage*scaleH*clamp((%s.y-perpDot)/spanH, 0.0, 1.0);",
+ widthHeightVary.fsIn());
+
+ fragBuilder->codeAppendf("%s = vec4(coverage);", args.fOutputCoverage);
+
+ if (args.fDistanceVectorName) {
+ fragBuilder->codeAppend( "// Calculating distance vector\n");
+ fragBuilder->codeAppend( "vec2 dvAxis;");
+ fragBuilder->codeAppend( "float dvLength;");
+
+ fragBuilder->codeAppend( "if (heightDistance < widthDistance) {");
+ fragBuilder->codeAppendf(" dvAxis = %s.zw;", rectEdgeVary.fsIn());
+ fragBuilder->codeAppend( " dvLength = heightDistance;");
+ fragBuilder->codeAppend( "} else {");
+ fragBuilder->codeAppendf(" dvAxis = vec2(-%s.w, %s.z);",
+ rectEdgeVary.fsIn(), rectEdgeVary.fsIn());
+ fragBuilder->codeAppend( " dvLength = widthDistance;");
+ fragBuilder->codeAppend( "}");
+
+ fragBuilder->codeAppend( "float dvSign = sign(dot(offset, dvAxis));");
+ fragBuilder->codeAppendf("%s = vec4(dvSign * dvAxis, dvLength, 0.0);",
+ args.fDistanceVectorName);
+
+ }
+ }
+
+ static void GenKey(const GrGeometryProcessor& gp,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ b->add32(0x0);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& primProc,
+ FPCoordTransformIter&& transformIter) override {
+ const RectGeometryProcessor& rgp = primProc.cast<RectGeometryProcessor>();
+ this->setTransformDataHelper(rgp.fLocalMatrix, pdman,&transformIter);
+ }
+
+ private:
+ typedef GrGLSLGeometryProcessor INHERITED;
+ };
+
+ void getGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GLSLProcessor::GenKey(*this, caps, b);
+ }
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override {
+ return new GLSLProcessor();
+ }
+
+private:
+ SkMatrix fLocalMatrix;
+
+ const Attribute* fInPosition;
+ const Attribute* fInColor;
+ const Attribute* fInRectEdge;
+ const Attribute* fInWidthHeight;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(RectGeometryProcessor);
+
+sk_sp<GrGeometryProcessor> RectGeometryProcessor::TestCreate(GrProcessorTestData* d) {
+ return sk_sp<GrGeometryProcessor>(
+ new RectGeometryProcessor(GrTest::TestMatrix(d->fRandom)));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class AnalyticRectBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ AnalyticRectBatch(GrColor color, const SkMatrix& viewMatrix, const SkRect& rect,
+ const SkRect& croppedRect, const SkRect& bounds)
+ : INHERITED(ClassID())
+ , fViewMatrixIfUsingLocalCoords(viewMatrix) {
+ SkPoint center = SkPoint::Make(rect.centerX(), rect.centerY());
+ viewMatrix.mapPoints(&center, 1);
+ SkScalar halfWidth = viewMatrix.mapRadius(SkScalarHalf(rect.width()));
+ SkScalar halfHeight = viewMatrix.mapRadius(SkScalarHalf(rect.height()));
+ SkVector downDir = viewMatrix.mapVector(0.0f, 1.0f);
+ downDir.normalize();
+
+ SkRect deviceSpaceCroppedRect = croppedRect;
+ viewMatrix.mapRect(&deviceSpaceCroppedRect);
+
+ fGeoData.emplace_back(Geometry {color, center, downDir, halfWidth, halfHeight,
+ deviceSpaceCroppedRect});
+
+ this->setBounds(bounds, HasAABloat::kYes, IsZeroArea::kNo);
+ }
+
+ const char* name() const override { return "AnalyticRectBatch"; }
+
+ SkString dumpInfo() const override {
+ SkString string;
+ for (int i = 0; i < fGeoData.count(); ++i) {
+ string.appendf("Color: 0x%08x Rect [C:(%.2f, %.2f) D:<%.2f,%.3f> W/2:%.2f H/2:%.2f]\n",
+ fGeoData[i].fColor,
+ fGeoData[i].fCenter.x(), fGeoData[i].fCenter.y(),
+ fGeoData[i].fDownDir.x(), fGeoData[i].fDownDir.y(),
+ fGeoData[i].fHalfWidth,
+ fGeoData[i].fHalfHeight);
+ }
+ string.append(INHERITED::dumpInfo());
+ return string;
+ }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ color->setKnownFourComponents(fGeoData[0].fColor);
+ coverage->setUnknownSingleComponent();
+ }
+
+private:
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ // Handle any overrides that affect our GP.
+ overrides.getOverrideColorIfSet(&fGeoData[0].fColor);
+ if (!overrides.readsLocalCoords()) {
+ fViewMatrixIfUsingLocalCoords.reset();
+ }
+ }
+
+ void onPrepareDraws(Target* target) const override {
+ SkMatrix localMatrix;
+ if (!fViewMatrixIfUsingLocalCoords.invert(&localMatrix)) {
+ return;
+ }
+
+ // Setup geometry processor
+ SkAutoTUnref<GrGeometryProcessor> gp(new RectGeometryProcessor(localMatrix));
+
+ int instanceCount = fGeoData.count();
+ size_t vertexStride = gp->getVertexStride();
+ SkASSERT(vertexStride == sizeof(RectVertex));
+ QuadHelper helper;
+ RectVertex* verts = reinterpret_cast<RectVertex*>(helper.init(target, vertexStride,
+ instanceCount));
+ if (!verts) {
+ return;
+ }
+
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& geom = fGeoData[i];
+
+ GrColor color = geom.fColor;
+ SkPoint center = geom.fCenter;
+ SkVector downDir = geom.fDownDir;
+ SkScalar halfWidth = geom.fHalfWidth;
+ SkScalar halfHeight = geom.fHalfHeight;
+ SkRect croppedRect = geom.fCroppedRect;
+
+ SkVector rightDir;
+ downDir.rotateCCW(&rightDir);
+
+ verts[0].fPos = {croppedRect.fLeft, croppedRect.fTop};
+ verts[0].fColor = color;
+ verts[0].fCenter = center;
+ verts[0].fDownDir = downDir;
+ verts[0].fHalfWidth = halfWidth;
+ verts[0].fHalfHeight = halfHeight;
+
+ verts[1].fPos = {croppedRect.fRight, croppedRect.fTop};
+ verts[1].fColor = color;
+ verts[1].fCenter = center;
+ verts[1].fDownDir = downDir;
+ verts[1].fHalfWidth = halfWidth;
+ verts[1].fHalfHeight = halfHeight;
+
+ verts[2].fPos = {croppedRect.fRight, croppedRect.fBottom};
+ verts[2].fColor = color;
+ verts[2].fCenter = center;
+ verts[2].fDownDir = downDir;
+ verts[2].fHalfWidth = halfWidth;
+ verts[2].fHalfHeight = halfHeight;
+
+ verts[3].fPos = {croppedRect.fLeft, croppedRect.fBottom};
+ verts[3].fColor = color;
+ verts[3].fCenter = center;
+ verts[3].fDownDir = downDir;
+ verts[3].fHalfWidth = halfWidth;
+ verts[3].fHalfHeight = halfHeight;
+
+ verts += kVerticesPerQuad;
+ }
+ helper.recordDraw(target, gp);
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ AnalyticRectBatch* that = t->cast<AnalyticRectBatch>();
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ if (!fViewMatrixIfUsingLocalCoords.cheapEqualTo(that->fViewMatrixIfUsingLocalCoords)) {
+ return false;
+ }
+
+ fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
+ this->joinBounds(*that);
+ return true;
+ }
+
+ struct Geometry {
+ GrColor fColor;
+ SkPoint fCenter;
+ SkVector fDownDir;
+ SkScalar fHalfWidth;
+ SkScalar fHalfHeight;
+ SkRect fCroppedRect;
+ };
+
+ SkMatrix fViewMatrixIfUsingLocalCoords;
+ SkSTArray<1, Geometry, true> fGeoData;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+GrDrawBatch* GrAnalyticRectBatch::CreateAnalyticRectBatch(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkRect& croppedRect,
+ const SkRect& bounds) {
+ return new AnalyticRectBatch(color, viewMatrix, rect, croppedRect, bounds);
+}
+
+#ifdef GR_TEST_UTILS
+
+DRAW_BATCH_TEST_DEFINE(AnalyticRectBatch) {
+ SkMatrix viewMatrix = GrTest::TestMatrix(random);
+ GrColor color = GrRandomColor(random);
+ SkRect rect = GrTest::TestSquare(random);
+ SkRect croppedRect = GrTest::TestSquare(random);
+ SkRect bounds = GrTest::TestSquare(random);
+ return new AnalyticRectBatch(color, viewMatrix, rect, croppedRect, bounds);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrAnalyticRectBatch.h b/gfx/skia/skia/src/gpu/batches/GrAnalyticRectBatch.h
new file mode 100644
index 000000000..cdb6118c6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrAnalyticRectBatch.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAnalyticRectBatch_DEFINED
+#define GrAnalyticRectBatch_DEFINED
+
+#include "GrColor.h"
+
+class GrDrawBatch;
+class SkMatrix;
+struct SkRect;
+
+/*
+ * This class wraps helper functions that draw rects analytically. Used when a shader requires a
+ * distance vector.
+ *
+ * @param color the shape's color
+ * @param viewMatrix the shape's local matrix
+ * @param rect the shape in source space
+ * @param croppedRect the shape in device space, clipped to the device's bounds
+ * @param bounds the axis aligned bounds of the shape in device space
+ */
+class GrAnalyticRectBatch {
+public:
+ static GrDrawBatch* CreateAnalyticRectBatch(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkRect& croppedRect,
+ const SkRect& bounds);
+};
+
+#endif // GrAnalyticRectBatch_DEFINED
diff --git a/gfx/skia/skia/src/gpu/batches/GrAtlasTextBatch.cpp b/gfx/skia/skia/src/gpu/batches/GrAtlasTextBatch.cpp
new file mode 100644
index 000000000..6427bc084
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrAtlasTextBatch.cpp
@@ -0,0 +1,313 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrAtlasTextBatch.h"
+
+#include "GrBatchFlushState.h"
+#include "GrResourceProvider.h"
+
+#include "SkGlyphCache.h"
+#include "SkMathPriv.h"
+
+#include "effects/GrBitmapTextGeoProc.h"
+#include "effects/GrDistanceFieldGeoProc.h"
+#include "text/GrBatchFontCache.h"
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static inline GrColor skcolor_to_grcolor_nopremultiply(SkColor c) {
+ unsigned r = SkColorGetR(c);
+ unsigned g = SkColorGetG(c);
+ unsigned b = SkColorGetB(c);
+ return GrColorPackRGBA(r, g, b, 0xff);
+}
+
+static const int kDistanceAdjustLumShift = 5;
+
+SkString GrAtlasTextBatch::dumpInfo() const {
+ SkString str;
+
+ for (int i = 0; i < fGeoCount; ++i) {
+ str.appendf("%d: Color: 0x%08x Trans: %.2f,%.2f Runs: %d\n",
+ i,
+ fGeoData[i].fColor,
+ fGeoData[i].fX,
+ fGeoData[i].fY,
+ fGeoData[i].fBlob->runCount());
+ }
+
+ str.append(INHERITED::dumpInfo());
+ return str;
+}
+
+void GrAtlasTextBatch::computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const {
+ if (kColorBitmapMask_MaskType == fMaskType) {
+ color->setUnknownFourComponents();
+ } else {
+ color->setKnownFourComponents(fBatch.fColor);
+ }
+ switch (fMaskType) {
+ case kGrayscaleDistanceField_MaskType:
+ case kGrayscaleCoverageMask_MaskType:
+ coverage->setUnknownSingleComponent();
+ break;
+ case kLCDCoverageMask_MaskType:
+ case kLCDDistanceField_MaskType:
+ coverage->setUnknownOpaqueFourComponents();
+ coverage->setUsingLCDCoverage();
+ break;
+ case kColorBitmapMask_MaskType:
+ coverage->setKnownSingleComponent(0xff);
+ }
+}
+
+void GrAtlasTextBatch::initBatchTracker(const GrXPOverridesForBatch& overrides) {
+ // Handle any color overrides
+ if (!overrides.readsColor()) {
+ fGeoData[0].fColor = GrColor_ILLEGAL;
+ }
+ overrides.getOverrideColorIfSet(&fGeoData[0].fColor);
+
+ // setup batch properties
+ fBatch.fColorIgnored = !overrides.readsColor();
+ fBatch.fColor = fGeoData[0].fColor;
+ fBatch.fUsesLocalCoords = overrides.readsLocalCoords();
+ fBatch.fCoverageIgnored = !overrides.readsCoverage();
+}
+
+void GrAtlasTextBatch::onPrepareDraws(Target* target) const {
+ // if we have RGB, then we won't have any SkShaders so no need to use a localmatrix.
+ // TODO actually only invert if we don't have RGBA
+ SkMatrix localMatrix;
+ if (this->usesLocalCoords() && !this->viewMatrix().invert(&localMatrix)) {
+ SkDebugf("Cannot invert viewmatrix\n");
+ return;
+ }
+
+ GrTexture* texture = fFontCache->getTexture(this->maskFormat());
+ if (!texture) {
+ SkDebugf("Could not allocate backing texture for atlas\n");
+ return;
+ }
+
+ GrMaskFormat maskFormat = this->maskFormat();
+
+ FlushInfo flushInfo;
+ if (this->usesDistanceFields()) {
+ flushInfo.fGeometryProcessor =
+ this->setupDfProcessor(this->viewMatrix(), fFilteredColor, this->color(), texture);
+ } else {
+ GrTextureParams params(SkShader::kClamp_TileMode, GrTextureParams::kNone_FilterMode);
+ flushInfo.fGeometryProcessor = GrBitmapTextGeoProc::Make(this->color(),
+ texture,
+ params,
+ maskFormat,
+ localMatrix,
+ this->usesLocalCoords());
+ }
+
+ flushInfo.fGlyphsToFlush = 0;
+ size_t vertexStride = flushInfo.fGeometryProcessor->getVertexStride();
+ SkASSERT(vertexStride == GrAtlasTextBlob::GetVertexStride(maskFormat));
+
+ int glyphCount = this->numGlyphs();
+ const GrBuffer* vertexBuffer;
+
+ void* vertices = target->makeVertexSpace(vertexStride,
+ glyphCount * kVerticesPerGlyph,
+ &vertexBuffer,
+ &flushInfo.fVertexOffset);
+ flushInfo.fVertexBuffer.reset(SkRef(vertexBuffer));
+ flushInfo.fIndexBuffer.reset(target->resourceProvider()->refQuadIndexBuffer());
+ if (!vertices || !flushInfo.fVertexBuffer) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ unsigned char* currVertex = reinterpret_cast<unsigned char*>(vertices);
+
+ GrBlobRegenHelper helper(this, target, &flushInfo);
+ SkAutoGlyphCache glyphCache;
+ for (int i = 0; i < fGeoCount; i++) {
+ const Geometry& args = fGeoData[i];
+ Blob* blob = args.fBlob;
+ size_t byteCount;
+ void* blobVertices;
+ int subRunGlyphCount;
+ blob->regenInBatch(target, fFontCache, &helper, args.fRun, args.fSubRun, &glyphCache,
+ vertexStride, args.fViewMatrix, args.fX, args.fY, args.fColor,
+ &blobVertices, &byteCount, &subRunGlyphCount);
+
+ // now copy all vertices
+ memcpy(currVertex, blobVertices, byteCount);
+
+#ifdef SK_DEBUG
+ // bounds sanity check
+ SkRect rect;
+ rect.setLargestInverted();
+ SkPoint* vertex = (SkPoint*) ((char*)blobVertices);
+ rect.growToInclude(vertex, vertexStride, kVerticesPerGlyph * subRunGlyphCount);
+
+ if (this->usesDistanceFields()) {
+ args.fViewMatrix.mapRect(&rect);
+ }
+ // Allow for small numerical error in the bounds.
+ SkRect bounds = this->bounds();
+ bounds.outset(0.001f, 0.001f);
+ SkASSERT(bounds.contains(rect));
+#endif
+
+ currVertex += byteCount;
+ }
+
+ this->flush(target, &flushInfo);
+}
+
+void GrAtlasTextBatch::flush(GrVertexBatch::Target* target, FlushInfo* flushInfo) const {
+ GrMesh mesh;
+ int maxGlyphsPerDraw =
+ static_cast<int>(flushInfo->fIndexBuffer->gpuMemorySize() / sizeof(uint16_t) / 6);
+ mesh.initInstanced(kTriangles_GrPrimitiveType, flushInfo->fVertexBuffer,
+ flushInfo->fIndexBuffer, flushInfo->fVertexOffset,
+ kVerticesPerGlyph, kIndicesPerGlyph, flushInfo->fGlyphsToFlush,
+ maxGlyphsPerDraw);
+ target->draw(flushInfo->fGeometryProcessor.get(), mesh);
+ flushInfo->fVertexOffset += kVerticesPerGlyph * flushInfo->fGlyphsToFlush;
+ flushInfo->fGlyphsToFlush = 0;
+}
+
+bool GrAtlasTextBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
+ GrAtlasTextBatch* that = t->cast<GrAtlasTextBatch>();
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ if (fMaskType != that->fMaskType) {
+ return false;
+ }
+
+ if (!this->usesDistanceFields()) {
+ if (kColorBitmapMask_MaskType == fMaskType && this->color() != that->color()) {
+ return false;
+ }
+ if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
+ }
+ } else {
+ if (!this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
+ }
+
+ if (fFilteredColor != that->fFilteredColor) {
+ return false;
+ }
+
+ if (fUseBGR != that->fUseBGR) {
+ return false;
+ }
+ }
+
+ fBatch.fNumGlyphs += that->numGlyphs();
+
+ // Reallocate space for geo data if necessary and then import that's geo data.
+ int newGeoCount = that->fGeoCount + fGeoCount;
+ // We assume (and here enforce) that the allocation size is the smallest power of two that
+ // is greater than or equal to the number of geometries (and at least
+ // kMinGeometryAllocated).
+ int newAllocSize = GrNextPow2(newGeoCount);
+ int currAllocSize = SkTMax<int>(kMinGeometryAllocated, GrNextPow2(fGeoCount));
+
+ if (newGeoCount > currAllocSize) {
+ fGeoData.realloc(newAllocSize);
+ }
+
+ memcpy(&fGeoData[fGeoCount], that->fGeoData.get(), that->fGeoCount * sizeof(Geometry));
+ // We steal the ref on the blobs from the other TextBatch and set its count to 0 so that
+ // it doesn't try to unref them.
+#ifdef SK_DEBUG
+ for (int i = 0; i < that->fGeoCount; ++i) {
+ that->fGeoData.get()[i].fBlob = (Blob*)0x1;
+ }
+#endif
+ that->fGeoCount = 0;
+ fGeoCount = newGeoCount;
+
+ this->joinBounds(*that);
+ return true;
+}
+
+// TODO just use class params
+// TODO trying to figure out why lcd is so whack
+sk_sp<GrGeometryProcessor> GrAtlasTextBatch::setupDfProcessor(const SkMatrix& viewMatrix,
+ SkColor filteredColor,
+ GrColor color,
+ GrTexture* texture) const {
+ GrTextureParams params(SkShader::kClamp_TileMode, GrTextureParams::kBilerp_FilterMode);
+ bool isLCD = this->isLCD();
+ // set up any flags
+ uint32_t flags = viewMatrix.isSimilarity() ? kSimilarity_DistanceFieldEffectFlag : 0;
+ flags |= viewMatrix.isScaleTranslate() ? kScaleOnly_DistanceFieldEffectFlag : 0;
+ flags |= fUseGammaCorrectDistanceTable ? kGammaCorrect_DistanceFieldEffectFlag : 0;
+
+ // see if we need to create a new effect
+ if (isLCD) {
+ flags |= kUseLCD_DistanceFieldEffectFlag;
+ flags |= fUseBGR ? kBGR_DistanceFieldEffectFlag : 0;
+
+ GrColor colorNoPreMul = skcolor_to_grcolor_nopremultiply(filteredColor);
+
+ float redCorrection = fDistanceAdjustTable->getAdjustment(
+ GrColorUnpackR(colorNoPreMul) >> kDistanceAdjustLumShift,
+ fUseGammaCorrectDistanceTable);
+ float greenCorrection = fDistanceAdjustTable->getAdjustment(
+ GrColorUnpackG(colorNoPreMul) >> kDistanceAdjustLumShift,
+ fUseGammaCorrectDistanceTable);
+ float blueCorrection = fDistanceAdjustTable->getAdjustment(
+ GrColorUnpackB(colorNoPreMul) >> kDistanceAdjustLumShift,
+ fUseGammaCorrectDistanceTable);
+ GrDistanceFieldLCDTextGeoProc::DistanceAdjust widthAdjust =
+ GrDistanceFieldLCDTextGeoProc::DistanceAdjust::Make(redCorrection,
+ greenCorrection,
+ blueCorrection);
+
+ return GrDistanceFieldLCDTextGeoProc::Make(color,
+ viewMatrix,
+ texture,
+ params,
+ widthAdjust,
+ flags,
+ this->usesLocalCoords());
+ } else {
+#ifdef SK_GAMMA_APPLY_TO_A8
+ U8CPU lum = SkColorSpaceLuminance::computeLuminance(SK_GAMMA_EXPONENT, filteredColor);
+ float correction = fDistanceAdjustTable->getAdjustment(
+ lum >> kDistanceAdjustLumShift, fUseGammaCorrectDistanceTable);
+ return GrDistanceFieldA8TextGeoProc::Make(color,
+ viewMatrix,
+ texture,
+ params,
+ correction,
+ flags,
+ this->usesLocalCoords());
+#else
+ return GrDistanceFieldA8TextGeoProc::Make(color,
+ viewMatrix,
+ texture,
+ params,
+ flags,
+ this->usesLocalCoords());
+#endif
+ }
+
+}
+
+void GrBlobRegenHelper::flush() {
+ fBatch->flush(fTarget, fFlushInfo);
+}
diff --git a/gfx/skia/skia/src/gpu/batches/GrAtlasTextBatch.h b/gfx/skia/skia/src/gpu/batches/GrAtlasTextBatch.h
new file mode 100644
index 000000000..32771832a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrAtlasTextBatch.h
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAtlasTextBatch_DEFINED
+#define GrAtlasTextBatch_DEFINED
+
+#include "batches/GrVertexBatch.h"
+
+#include "text/GrAtlasTextContext.h"
+#include "text/GrDistanceFieldAdjustTable.h"
+
+class GrAtlasTextBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ static const int kVerticesPerGlyph = GrAtlasTextBlob::kVerticesPerGlyph;
+ static const int kIndicesPerGlyph = 6;
+
+ typedef GrAtlasTextBlob Blob;
+ struct Geometry {
+ SkMatrix fViewMatrix;
+ Blob* fBlob;
+ SkScalar fX;
+ SkScalar fY;
+ int fRun;
+ int fSubRun;
+ GrColor fColor;
+ };
+
+ static GrAtlasTextBatch* CreateBitmap(GrMaskFormat maskFormat, int glyphCount,
+ GrBatchFontCache* fontCache) {
+ GrAtlasTextBatch* batch = new GrAtlasTextBatch;
+
+ batch->fFontCache = fontCache;
+ switch (maskFormat) {
+ case kA8_GrMaskFormat:
+ batch->fMaskType = kGrayscaleCoverageMask_MaskType;
+ break;
+ case kA565_GrMaskFormat:
+ batch->fMaskType = kLCDCoverageMask_MaskType;
+ break;
+ case kARGB_GrMaskFormat:
+ batch->fMaskType = kColorBitmapMask_MaskType;
+ break;
+ }
+ batch->fBatch.fNumGlyphs = glyphCount;
+ batch->fGeoCount = 1;
+ batch->fFilteredColor = 0;
+ batch->fFontCache = fontCache;
+ batch->fUseBGR = false;
+ return batch;
+ }
+
+ static GrAtlasTextBatch* CreateDistanceField(
+ int glyphCount, GrBatchFontCache* fontCache,
+ const GrDistanceFieldAdjustTable* distanceAdjustTable,
+ bool useGammaCorrectDistanceTable,
+ SkColor filteredColor, bool isLCD,
+ bool useBGR) {
+ GrAtlasTextBatch* batch = new GrAtlasTextBatch;
+
+ batch->fFontCache = fontCache;
+ batch->fMaskType = isLCD ? kLCDDistanceField_MaskType : kGrayscaleDistanceField_MaskType;
+ batch->fDistanceAdjustTable.reset(SkRef(distanceAdjustTable));
+ batch->fUseGammaCorrectDistanceTable = useGammaCorrectDistanceTable;
+ batch->fFilteredColor = filteredColor;
+ batch->fUseBGR = useBGR;
+ batch->fBatch.fNumGlyphs = glyphCount;
+ batch->fGeoCount = 1;
+ return batch;
+ }
+
+ // to avoid even the initial copy of the struct, we have a getter for the first item which
+ // is used to seed the batch with its initial geometry. After seeding, the client should call
+ // init() so the Batch can initialize itself
+ Geometry& geometry() { return fGeoData[0]; }
+
+ void init() {
+ const Geometry& geo = fGeoData[0];
+ fBatch.fColor = geo.fColor;
+ SkRect bounds;
+ geo.fBlob->computeSubRunBounds(&bounds, geo.fRun, geo.fSubRun, geo.fViewMatrix, geo.fX,
+ geo.fY);
+ // We don't have tight bounds on the glyph paths in device space. For the purposes of bounds
+ // we treat this as a set of non-AA rects rendered with a texture.
+ this->setBounds(bounds, HasAABloat::kNo, IsZeroArea::kNo);
+ }
+
+ const char* name() const override { return "TextBatch"; }
+
+ SkString dumpInfo() const override;
+
+protected:
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override;
+
+
+private:
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override;
+
+ struct FlushInfo {
+ SkAutoTUnref<const GrBuffer> fVertexBuffer;
+ SkAutoTUnref<const GrBuffer> fIndexBuffer;
+ sk_sp<GrGeometryProcessor> fGeometryProcessor;
+ int fGlyphsToFlush;
+ int fVertexOffset;
+ };
+
+ void onPrepareDraws(Target* target) const override;
+
+ GrAtlasTextBatch() : INHERITED(ClassID()) {} // initialized in factory functions.
+
+ ~GrAtlasTextBatch() {
+ for (int i = 0; i < fGeoCount; i++) {
+ fGeoData[i].fBlob->unref();
+ }
+ }
+
+ GrMaskFormat maskFormat() const {
+ switch (fMaskType) {
+ case kLCDCoverageMask_MaskType:
+ return kA565_GrMaskFormat;
+ case kColorBitmapMask_MaskType:
+ return kARGB_GrMaskFormat;
+ case kGrayscaleCoverageMask_MaskType:
+ case kGrayscaleDistanceField_MaskType:
+ case kLCDDistanceField_MaskType:
+ return kA8_GrMaskFormat;
+ }
+ return kA8_GrMaskFormat; // suppress warning
+ }
+
+ bool usesDistanceFields() const {
+ return kGrayscaleDistanceField_MaskType == fMaskType ||
+ kLCDDistanceField_MaskType == fMaskType;
+ }
+
+ bool isLCD() const {
+ return kLCDCoverageMask_MaskType == fMaskType ||
+ kLCDDistanceField_MaskType == fMaskType;
+ }
+
+ inline void flush(GrVertexBatch::Target* target, FlushInfo* flushInfo) const;
+
+ GrColor color() const { return fBatch.fColor; }
+ const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; }
+ bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
+ int numGlyphs() const { return fBatch.fNumGlyphs; }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override;
+
+ // TODO just use class params
+ // TODO trying to figure out why lcd is so whack
+ sk_sp<GrGeometryProcessor> setupDfProcessor(const SkMatrix& viewMatrix, SkColor filteredColor,
+ GrColor color, GrTexture* texture) const;
+
+ struct BatchTracker {
+ GrColor fColor;
+ bool fUsesLocalCoords;
+ bool fColorIgnored;
+ bool fCoverageIgnored;
+ int fNumGlyphs;
+ };
+
+ BatchTracker fBatch;
+ // The minimum number of Geometry we will try to allocate.
+ enum { kMinGeometryAllocated = 4 };
+ SkAutoSTMalloc<kMinGeometryAllocated, Geometry> fGeoData;
+ int fGeoCount;
+
+ enum MaskType {
+ kGrayscaleCoverageMask_MaskType,
+ kLCDCoverageMask_MaskType,
+ kColorBitmapMask_MaskType,
+ kGrayscaleDistanceField_MaskType,
+ kLCDDistanceField_MaskType,
+ } fMaskType;
+ bool fUseBGR; // fold this into the enum?
+
+ GrBatchFontCache* fFontCache;
+
+ // Distance field properties
+ SkAutoTUnref<const GrDistanceFieldAdjustTable> fDistanceAdjustTable;
+ SkColor fFilteredColor;
+ bool fUseGammaCorrectDistanceTable;
+
+ friend class GrBlobRegenHelper; // Needs to trigger flushes
+
+ typedef GrVertexBatch INHERITED;
+};
+
+/*
+ * A simple helper class to abstract the interface GrAtlasTextBlob needs to regenerate itself.
+ * It'd be nicer if this was nested, but we need to forward declare it in GrAtlasTextBlob.h
+ */
+class GrBlobRegenHelper {
+public:
+ GrBlobRegenHelper(const GrAtlasTextBatch* batch,
+ GrVertexBatch::Target* target,
+ GrAtlasTextBatch::FlushInfo* flushInfo)
+ : fBatch(batch)
+ , fTarget(target)
+ , fFlushInfo(flushInfo) {}
+
+ void flush();
+
+ void incGlyphCount(int glyphCount = 1) {
+ fFlushInfo->fGlyphsToFlush += glyphCount;
+ }
+
+private:
+ const GrAtlasTextBatch* fBatch;
+ GrVertexBatch::Target* fTarget;
+ GrAtlasTextBatch::FlushInfo* fFlushInfo;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrBatch.cpp b/gfx/skia/skia/src/gpu/batches/GrBatch.cpp
new file mode 100644
index 000000000..6755cf94f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrBatch.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrBatch.h"
+
+#include "GrMemoryPool.h"
+#include "SkSpinlock.h"
+
+// TODO I noticed a small benefit to using a larger exclusive pool for batches. Its very small,
+// but seems to be mostly consistent. There is a lot in flux right now, but we should really
+// revisit this when batch is everywhere
+
+
+// We use a global pool protected by a mutex(spinlock). Chrome may use the same GrContext on
+// different threads. The GrContext is not used concurrently on different threads and there is a
+// memory barrier between accesses of a context on different threads. Also, there may be multiple
+// GrContexts and those contexts may be in use concurrently on different threads.
+namespace {
+static SkSpinlock gBatchSpinlock;
+class MemoryPoolAccessor {
+public:
+
+// We know in the Android framework there is only one GrContext.
+#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+ MemoryPoolAccessor() {}
+ ~MemoryPoolAccessor() {}
+#else
+ MemoryPoolAccessor() { gBatchSpinlock.acquire(); }
+ ~MemoryPoolAccessor() { gBatchSpinlock.release(); }
+#endif
+
+ GrMemoryPool* pool() const {
+ static GrMemoryPool gPool(16384, 16384);
+ return &gPool;
+ }
+};
+}
+
+int32_t GrBatch::gCurrBatchClassID = GrBatch::kIllegalBatchID;
+
+int32_t GrBatch::gCurrBatchUniqueID = GrBatch::kIllegalBatchID;
+
+void* GrBatch::operator new(size_t size) {
+ return MemoryPoolAccessor().pool()->allocate(size);
+}
+
+void GrBatch::operator delete(void* target) {
+ return MemoryPoolAccessor().pool()->release(target);
+}
+
+GrBatch::GrBatch(uint32_t classID)
+ : fClassID(classID)
+ , fUniqueID(kIllegalBatchID) {
+ SkASSERT(classID == SkToU32(fClassID));
+ SkDEBUGCODE(fUsed = false;)
+ SkDEBUGCODE(fBoundsFlags = kUninitialized_BoundsFlag);
+}
+
+GrBatch::~GrBatch() {}
diff --git a/gfx/skia/skia/src/gpu/batches/GrBatch.h b/gfx/skia/skia/src/gpu/batches/GrBatch.h
new file mode 100644
index 000000000..8dafe9fba
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrBatch.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBatch_DEFINED
+#define GrBatch_DEFINED
+
+#include "../private/SkAtomics.h"
+#include "GrNonAtomicRef.h"
+#include "SkMatrix.h"
+#include "SkRect.h"
+#include "SkString.h"
+
+#include <new>
+
+class GrCaps;
+class GrGpuCommandBuffer;
+class GrBatchFlushState;
+class GrRenderTarget;
+
+/**
+ * GrBatch is the base class for all Ganesh deferred geometry generators. To facilitate
+ * reorderable batching, Ganesh does not generate geometry inline with draw calls. Instead, it
+ * captures the arguments to the draw and then generates the geometry on demand. This gives GrBatch
+ * subclasses complete freedom to decide how / what they can batch.
+ *
+ * Batches are created when GrContext processes a draw call. Batches of the same subclass may be
+ * merged using combineIfPossible. When two batches merge, one takes on the union of the data
+ * and the other is left empty. The merged batch becomes responsible for drawing the data from both
+ * the original batches.
+ *
+ * If there are any possible optimizations which might require knowing more about the full state of
+ * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverage, then this
+ * information will be communicated to the GrBatch prior to geometry generation.
+ *
+ * The bounds of the batch must contain all the vertices in device space *irrespective* of the clip.
+ * The bounds are used in determining which clip elements must be applied and thus the bounds cannot
+ * in turn depend upon the clip.
+ */
+#define GR_BATCH_SPEW 0
+#if GR_BATCH_SPEW
+ #define GrBATCH_INFO(...) SkDebugf(__VA_ARGS__)
+ #define GrBATCH_SPEW(code) code
+#else
+ #define GrBATCH_SPEW(code)
+ #define GrBATCH_INFO(...)
+#endif
+
+// A helper macro to generate a class static id
+#define DEFINE_BATCH_CLASS_ID \
+ static uint32_t ClassID() { \
+ static uint32_t kClassID = GenBatchClassID(); \
+ return kClassID; \
+ }
+
+class GrBatch : public GrNonAtomicRef<GrBatch> {
+public:
+ GrBatch(uint32_t classID);
+ virtual ~GrBatch();
+
+ virtual const char* name() const = 0;
+
+ bool combineIfPossible(GrBatch* that, const GrCaps& caps) {
+ if (this->classID() != that->classID()) {
+ return false;
+ }
+
+ return this->onCombineIfPossible(that, caps);
+ }
+
+ const SkRect& bounds() const {
+ SkASSERT(kUninitialized_BoundsFlag != fBoundsFlags);
+ return fBounds;
+ }
+
+ bool hasAABloat() const {
+ SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
+ return SkToBool(fBoundsFlags & kAABloat_BoundsFlag);
+ }
+
+ bool hasZeroArea() const {
+ SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
+ return SkToBool(fBoundsFlags & kZeroArea_BoundsFlag);
+ }
+
+ void* operator new(size_t size);
+ void operator delete(void* target);
+
+ void* operator new(size_t size, void* placement) {
+ return ::operator new(size, placement);
+ }
+ void operator delete(void* target, void* placement) {
+ ::operator delete(target, placement);
+ }
+
+ /**
+ * Helper for safely down-casting to a GrBatch subclass
+ */
+ template <typename T> const T& cast() const {
+ SkASSERT(T::ClassID() == this->classID());
+ return *static_cast<const T*>(this);
+ }
+
+ template <typename T> T* cast() {
+ SkASSERT(T::ClassID() == this->classID());
+ return static_cast<T*>(this);
+ }
+
+ uint32_t classID() const { SkASSERT(kIllegalBatchID != fClassID); return fClassID; }
+
+ // We lazily initialize the uniqueID because currently the only user is GrAuditTrail
+ uint32_t uniqueID() const {
+ if (kIllegalBatchID == fUniqueID) {
+ fUniqueID = GenBatchID();
+ }
+ return fUniqueID;
+ }
+ SkDEBUGCODE(bool isUsed() const { return fUsed; })
+
+ /** Called prior to drawing. The batch should perform any resource creation necessary to
+ to quickly issue its draw when draw is called. */
+ void prepare(GrBatchFlushState* state) { this->onPrepare(state); }
+
+ /** Issues the batches commands to GrGpu. */
+ void draw(GrBatchFlushState* state) { this->onDraw(state); }
+
+ /** Used to block batching across render target changes. Remove this once we store
+ GrBatches for different RTs in different targets. */
+ virtual uint32_t renderTargetUniqueID() const = 0;
+
+ /** Used for spewing information about batches when debugging. */
+ virtual SkString dumpInfo() const {
+ SkString string;
+ string.appendf("BatchBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
+ fBounds.fLeft, fBounds.fTop, fBounds.fRight, fBounds.fBottom);
+ return string;
+ }
+
+ /** Can remove this when multi-draw-buffer lands */
+ virtual GrRenderTarget* renderTarget() const = 0;
+
+protected:
+ /**
+ * Indicates that the batch will produce geometry that extends beyond its bounds for the
+ * purpose of ensuring that the fragment shader runs on partially covered pixels for
+ * non-MSAA antialiasing.
+ */
+ enum class HasAABloat {
+ kYes,
+ kNo
+ };
+ /**
+ * Indicates that the geometry represented by the batch has zero area (i.e. it is hairline
+ * or points).
+ */
+ enum class IsZeroArea {
+ kYes,
+ kNo
+ };
+ void setBounds(const SkRect& newBounds, HasAABloat aabloat, IsZeroArea zeroArea) {
+ fBounds = newBounds;
+ this->setBoundsFlags(aabloat, zeroArea);
+ }
+ void setTransformedBounds(const SkRect& srcBounds, const SkMatrix& m,
+ HasAABloat aabloat, IsZeroArea zeroArea) {
+ m.mapRect(&fBounds, srcBounds);
+ this->setBoundsFlags(aabloat, zeroArea);
+ }
+
+ void joinBounds(const GrBatch& that) {
+ if (that.hasAABloat()) {
+ fBoundsFlags |= kAABloat_BoundsFlag;
+ }
+ if (that.hasZeroArea()) {
+ fBoundsFlags |= kZeroArea_BoundsFlag;
+ }
+ return fBounds.joinPossiblyEmptyRect(that.fBounds);
+ }
+
+ void replaceBounds(const GrBatch& that) {
+ fBounds = that.fBounds;
+ fBoundsFlags = that.fBoundsFlags;
+ }
+
+ static uint32_t GenBatchClassID() { return GenID(&gCurrBatchClassID); }
+
+private:
+ virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0;
+
+ virtual void onPrepare(GrBatchFlushState*) = 0;
+ virtual void onDraw(GrBatchFlushState*) = 0;
+
+ static uint32_t GenID(int32_t* idCounter) {
+ // The atomic inc returns the old value not the incremented value. So we add
+ // 1 to the returned value.
+ uint32_t id = static_cast<uint32_t>(sk_atomic_inc(idCounter)) + 1;
+ if (!id) {
+ SkFAIL("This should never wrap as it should only be called once for each GrBatch "
+ "subclass.");
+ }
+ return id;
+ }
+
+ void setBoundsFlags(HasAABloat aabloat, IsZeroArea zeroArea) {
+ fBoundsFlags = 0;
+ fBoundsFlags |= (HasAABloat::kYes == aabloat) ? kAABloat_BoundsFlag : 0;
+ fBoundsFlags |= (IsZeroArea ::kYes == zeroArea) ? kZeroArea_BoundsFlag : 0;
+ }
+
+ enum {
+ kIllegalBatchID = 0,
+ };
+
+ enum BoundsFlags {
+ kAABloat_BoundsFlag = 0x1,
+ kZeroArea_BoundsFlag = 0x2,
+ SkDEBUGCODE(kUninitialized_BoundsFlag = 0x4)
+ };
+
+ SkDEBUGCODE(bool fUsed;)
+ const uint16_t fClassID;
+ uint16_t fBoundsFlags;
+
+ static uint32_t GenBatchID() { return GenID(&gCurrBatchUniqueID); }
+ mutable uint32_t fUniqueID;
+ SkRect fBounds;
+
+ static int32_t gCurrBatchUniqueID;
+ static int32_t gCurrBatchClassID;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrClearBatch.h b/gfx/skia/skia/src/gpu/batches/GrClearBatch.h
new file mode 100644
index 000000000..16f1ddd7f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrClearBatch.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrClearBatch_DEFINED
+#define GrClearBatch_DEFINED
+
+#include "GrBatch.h"
+#include "GrBatchFlushState.h"
+#include "GrFixedClip.h"
+#include "GrGpu.h"
+#include "GrGpuCommandBuffer.h"
+#include "GrRenderTarget.h"
+
+class GrClearBatch final : public GrBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ static sk_sp<GrClearBatch> Make(const GrFixedClip& clip, GrColor color, GrRenderTarget* rt) {
+ sk_sp<GrClearBatch> batch(new GrClearBatch(clip, color, rt));
+ if (!batch->renderTarget()) {
+ return nullptr; // The clip did not contain any pixels within the render target.
+ }
+ return batch;
+ }
+
+ const char* name() const override { return "Clear"; }
+
+ uint32_t renderTargetUniqueID() const override { return fRenderTarget.get()->uniqueID(); }
+ GrRenderTarget* renderTarget() const override { return fRenderTarget.get(); }
+
+ SkString dumpInfo() const override {
+ SkString string("Scissor [");
+ if (fClip.scissorEnabled()) {
+ const SkIRect& r = fClip.scissorRect();
+ string.appendf("L: %d, T: %d, R: %d, B: %d", r.fLeft, r.fTop, r.fRight, r.fBottom);
+ }
+ string.appendf("], Color: 0x%08x, RT: %d", fColor, fRenderTarget.get()->uniqueID());
+ string.append(INHERITED::dumpInfo());
+ return string;
+ }
+
+ void setColor(GrColor color) { fColor = color; }
+
+private:
+ GrClearBatch(const GrFixedClip& clip, GrColor color, GrRenderTarget* rt)
+ : INHERITED(ClassID())
+ , fClip(clip)
+ , fColor(color) {
+ SkIRect rtRect = SkIRect::MakeWH(rt->width(), rt->height());
+ if (fClip.scissorEnabled()) {
+ // Don't let scissors extend outside the RT. This may improve batching.
+ if (!fClip.intersect(rtRect)) {
+ return;
+ }
+ if (fClip.scissorRect() == rtRect) {
+ fClip.disableScissor();
+ }
+ }
+ this->setBounds(SkRect::Make(fClip.scissorEnabled() ? fClip.scissorRect() : rtRect),
+ HasAABloat::kNo, IsZeroArea::kNo);
+ fRenderTarget.reset(rt);
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ // This could be much more complicated. Currently we look at cases where the new clear
+ // contains the old clear, or when the new clear is a subset of the old clear and is the
+ // same color.
+ GrClearBatch* cb = t->cast<GrClearBatch>();
+ SkASSERT(cb->fRenderTarget == fRenderTarget);
+ if (!fClip.windowRectsState().cheapEqualTo(cb->fClip.windowRectsState())) {
+ return false;
+ }
+ if (cb->contains(this)) {
+ fClip = cb->fClip;
+ this->replaceBounds(*t);
+ fColor = cb->fColor;
+ return true;
+ } else if (cb->fColor == fColor && this->contains(cb)) {
+ return true;
+ }
+ return false;
+ }
+
+ bool contains(const GrClearBatch* that) const {
+ // The constructor ensures that scissor gets disabled on any clip that fills the entire RT.
+ return !fClip.scissorEnabled() ||
+ (that->fClip.scissorEnabled() &&
+ fClip.scissorRect().contains(that->fClip.scissorRect()));
+ }
+
+ void onPrepare(GrBatchFlushState*) override {}
+
+ void onDraw(GrBatchFlushState* state) override {
+ state->commandBuffer()->clear(fClip, fColor, fRenderTarget.get());
+ }
+
+ GrFixedClip fClip;
+ GrColor fColor;
+ GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
+
+ typedef GrBatch INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrClearStencilClipBatch.h b/gfx/skia/skia/src/gpu/batches/GrClearStencilClipBatch.h
new file mode 100644
index 000000000..e51082193
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrClearStencilClipBatch.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrClearStencilClipBatch_DEFINED
+#define GrClearStencilClipBatch_DEFINED
+
+#include "GrBatch.h"
+#include "GrBatchFlushState.h"
+#include "GrFixedClip.h"
+#include "GrGpu.h"
+#include "GrGpuCommandBuffer.h"
+#include "GrRenderTarget.h"
+
+class GrClearStencilClipBatch final : public GrBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ GrClearStencilClipBatch(const GrFixedClip& clip, bool insideStencilMask, GrRenderTarget* rt)
+ : INHERITED(ClassID())
+ , fClip(clip)
+ , fInsideStencilMask(insideStencilMask)
+ , fRenderTarget(rt) {
+ const SkRect& bounds = fClip.scissorEnabled() ? SkRect::Make(fClip.scissorRect())
+ : SkRect::MakeIWH(rt->width(), rt->height());
+ this->setBounds(bounds, HasAABloat::kNo, IsZeroArea::kNo);
+ }
+
+ const char* name() const override { return "ClearStencilClip"; }
+
+ uint32_t renderTargetUniqueID() const override { return fRenderTarget.get()->uniqueID(); }
+ GrRenderTarget* renderTarget() const override { return fRenderTarget.get(); }
+
+ SkString dumpInfo() const override {
+ SkString string("Scissor [");
+ if (fClip.scissorEnabled()) {
+ const SkIRect& r = fClip.scissorRect();
+ string.appendf("L: %d, T: %d, R: %d, B: %d", r.fLeft, r.fTop, r.fRight, r.fBottom);
+ }
+ string.appendf("], IC: %d, RT: %d", fInsideStencilMask, fRenderTarget.get()->uniqueID());
+ string.append(INHERITED::dumpInfo());
+ return string;
+ }
+
+private:
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { return false; }
+
+ void onPrepare(GrBatchFlushState*) override {}
+
+ void onDraw(GrBatchFlushState* state) override {
+ state->commandBuffer()->clearStencilClip(fClip, fInsideStencilMask, fRenderTarget.get());
+ }
+
+ const GrFixedClip fClip;
+ const bool fInsideStencilMask;
+ GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
+
+ typedef GrBatch INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrCopySurfaceBatch.cpp b/gfx/skia/skia/src/gpu/batches/GrCopySurfaceBatch.cpp
new file mode 100644
index 000000000..a59ed38f5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrCopySurfaceBatch.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrCopySurfaceBatch.h"
+
+// returns true if the read/written rect intersects the src/dst and false if not.
+bool GrCopySurfaceBatch::ClipSrcRectAndDstPoint(const GrSurface* dst,
+ const GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint,
+ SkIRect* clippedSrcRect,
+ SkIPoint* clippedDstPoint) {
+ *clippedSrcRect = srcRect;
+ *clippedDstPoint = dstPoint;
+
+ // clip the left edge to src and dst bounds, adjusting dstPoint if necessary
+ if (clippedSrcRect->fLeft < 0) {
+ clippedDstPoint->fX -= clippedSrcRect->fLeft;
+ clippedSrcRect->fLeft = 0;
+ }
+ if (clippedDstPoint->fX < 0) {
+ clippedSrcRect->fLeft -= clippedDstPoint->fX;
+ clippedDstPoint->fX = 0;
+ }
+
+ // clip the top edge to src and dst bounds, adjusting dstPoint if necessary
+ if (clippedSrcRect->fTop < 0) {
+ clippedDstPoint->fY -= clippedSrcRect->fTop;
+ clippedSrcRect->fTop = 0;
+ }
+ if (clippedDstPoint->fY < 0) {
+ clippedSrcRect->fTop -= clippedDstPoint->fY;
+ clippedDstPoint->fY = 0;
+ }
+
+ // clip the right edge to the src and dst bounds.
+ if (clippedSrcRect->fRight > src->width()) {
+ clippedSrcRect->fRight = src->width();
+ }
+ if (clippedDstPoint->fX + clippedSrcRect->width() > dst->width()) {
+ clippedSrcRect->fRight = clippedSrcRect->fLeft + dst->width() - clippedDstPoint->fX;
+ }
+
+ // clip the bottom edge to the src and dst bounds.
+ if (clippedSrcRect->fBottom > src->height()) {
+ clippedSrcRect->fBottom = src->height();
+ }
+ if (clippedDstPoint->fY + clippedSrcRect->height() > dst->height()) {
+ clippedSrcRect->fBottom = clippedSrcRect->fTop + dst->height() - clippedDstPoint->fY;
+ }
+
+ // The above clipping steps may have inverted the rect if it didn't intersect either the src or
+ // dst bounds.
+ return !clippedSrcRect->isEmpty();
+}
+
+GrBatch* GrCopySurfaceBatch::Create(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ SkASSERT(dst);
+ SkASSERT(src);
+
+ SkIRect clippedSrcRect;
+ SkIPoint clippedDstPoint;
+ // If the rect is outside the src or dst then we've already succeeded.
+ if (!ClipSrcRectAndDstPoint(dst, src, srcRect, dstPoint, &clippedSrcRect, &clippedDstPoint)) {
+ return nullptr;
+ }
+ return new GrCopySurfaceBatch(dst, src, clippedSrcRect, clippedDstPoint);
+}
diff --git a/gfx/skia/skia/src/gpu/batches/GrCopySurfaceBatch.h b/gfx/skia/skia/src/gpu/batches/GrCopySurfaceBatch.h
new file mode 100644
index 000000000..fea8aae2f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrCopySurfaceBatch.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCopySurfaceBatch_DEFINED
+#define GrCopySurfaceBatch_DEFINED
+
+#include "GrBatch.h"
+#include "GrBatchFlushState.h"
+#include "GrGpu.h"
+#include "GrRenderTarget.h"
+
+class GrCopySurfaceBatch final : public GrBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ /** This should not really be exposed as Create() will apply this clipping, but there is
+ * currently a workaround in GrContext::copySurface() for non-render target dsts that relies
+ * on it. */
+ static bool ClipSrcRectAndDstPoint(const GrSurface* dst,
+ const GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint,
+ SkIRect* clippedSrcRect,
+ SkIPoint* clippedDstPoint);
+
+ static GrBatch* Create(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ const char* name() const override { return "CopySurface"; }
+
+ uint32_t renderTargetUniqueID() const override {
+ GrRenderTarget* rt = fDst.get()->asRenderTarget();
+ return rt ? rt->uniqueID() : 0;
+ }
+ GrRenderTarget* renderTarget() const override { return nullptr; }
+
+ SkString dumpInfo() const override {
+ SkString string;
+ string.printf("SRC: 0x%p, DST: 0x%p, SRECT: [L: %d, T: %d, R: %d, B: %d], "
+ "DPT:[X: %d, Y: %d]",
+ fDst.get(), fSrc.get(), fSrcRect.fLeft, fSrcRect.fTop, fSrcRect.fRight,
+ fSrcRect.fBottom, fDstPoint.fX, fDstPoint.fY);
+ string.append(INHERITED::dumpInfo());
+ return string;
+ }
+
+private:
+ GrCopySurfaceBatch(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint)
+ : INHERITED(ClassID())
+ , fDst(dst)
+ , fSrc(src)
+ , fSrcRect(srcRect)
+ , fDstPoint(dstPoint) {
+ SkRect bounds =
+ SkRect::MakeXYWH(SkIntToScalar(dstPoint.fX), SkIntToScalar(dstPoint.fY),
+ SkIntToScalar(srcRect.width()), SkIntToScalar(srcRect.height()));
+ this->setBounds(bounds, HasAABloat::kNo, IsZeroArea::kNo);
+ }
+
+ bool onCombineIfPossible(GrBatch* that, const GrCaps& caps) override { return false; }
+
+ void onPrepare(GrBatchFlushState*) override {}
+
+ void onDraw(GrBatchFlushState* state) override {
+ if (!state->commandBuffer()) {
+ state->gpu()->copySurface(fDst.get(), fSrc.get(), fSrcRect, fDstPoint);
+ } else {
+ // currently we are not sending copies through the GrGpuCommandBuffer
+ SkASSERT(false);
+ }
+ }
+
+ GrPendingIOResource<GrSurface, kWrite_GrIOType> fDst;
+ GrPendingIOResource<GrSurface, kRead_GrIOType> fSrc;
+ SkIRect fSrcRect;
+ SkIPoint fDstPoint;
+
+ typedef GrBatch INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrDashLinePathRenderer.cpp b/gfx/skia/skia/src/gpu/batches/GrDashLinePathRenderer.cpp
new file mode 100644
index 000000000..f2b75be10
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrDashLinePathRenderer.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrDashLinePathRenderer.h"
+
+#include "GrAuditTrail.h"
+#include "GrGpu.h"
+#include "GrPipelineBuilder.h"
+#include "effects/GrDashingEffect.h"
+
+bool GrDashLinePathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ SkPoint pts[2];
+ bool inverted;
+ if (args.fShape->style().isDashed() && args.fShape->asLine(pts, &inverted)) {
+ // We should never have an inverse dashed case.
+ SkASSERT(!inverted);
+ return GrDashingEffect::CanDrawDashLine(pts, args.fShape->style(), *args.fViewMatrix);
+ }
+ return false;
+}
+
+bool GrDashLinePathRenderer::onDrawPath(const DrawPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fDrawContext->auditTrail(),
+ "GrDashLinePathRenderer::onDrawPath");
+ bool useHWAA = args.fDrawContext->isUnifiedMultisampled();
+ GrDashingEffect::AAMode aaMode;
+ if (useHWAA) {
+ // We ignore args.fAntiAlias here and force anti aliasing when using MSAA. Otherwise,
+ // we can wind up with external edges antialiased and internal edges unantialiased.
+ aaMode = GrDashingEffect::AAMode::kCoverageWithMSAA;
+ } else if (args.fAntiAlias) {
+ aaMode = GrDashingEffect::AAMode::kCoverage;
+ } else {
+ aaMode = GrDashingEffect::AAMode::kNone;
+ }
+ SkPoint pts[2];
+ SkAssertResult(args.fShape->asLine(pts, nullptr));
+ SkAutoTUnref<GrDrawBatch> batch(GrDashingEffect::CreateDashLineBatch(args.fPaint->getColor(),
+ *args.fViewMatrix,
+ pts,
+ aaMode,
+ args.fShape->style()));
+ if (!batch) {
+ return false;
+ }
+
+ GrPipelineBuilder pipelineBuilder(*args.fPaint, useHWAA);
+ pipelineBuilder.setUserStencil(args.fUserStencilSettings);
+
+ args.fDrawContext->drawBatch(pipelineBuilder, *args.fClip, batch);
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/batches/GrDashLinePathRenderer.h b/gfx/skia/skia/src/gpu/batches/GrDashLinePathRenderer.h
new file mode 100644
index 000000000..d95942177
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrDashLinePathRenderer.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDashLinePathRenderer_DEFINED
+#define GrDashLinePathRenderer_DEFINED
+
+#include "GrPathRenderer.h"
+
+class GrDashLinePathRenderer : public GrPathRenderer {
+private:
+ bool onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ StencilSupport onGetStencilSupport(const GrShape&) const override {
+ return kNoSupport_StencilSupport;
+ }
+
+ bool onDrawPath(const DrawPathArgs&) override;
+
+ SkAutoTUnref<GrGpu> fGpu;
+ typedef GrPathRenderer INHERITED;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrDefaultPathRenderer.cpp b/gfx/skia/skia/src/gpu/batches/GrDefaultPathRenderer.cpp
new file mode 100644
index 000000000..7dbdd4b48
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrDefaultPathRenderer.cpp
@@ -0,0 +1,638 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrDefaultPathRenderer.h"
+
+#include "GrBatchFlushState.h"
+#include "GrBatchTest.h"
+#include "GrContext.h"
+#include "GrDefaultGeoProcFactory.h"
+#include "GrFixedClip.h"
+#include "GrMesh.h"
+#include "GrPathUtils.h"
+#include "GrPipelineBuilder.h"
+#include "SkGeometry.h"
+#include "SkString.h"
+#include "SkStrokeRec.h"
+#include "SkTLazy.h"
+#include "SkTraceEvent.h"
+
+#include "batches/GrRectBatchFactory.h"
+#include "batches/GrVertexBatch.h"
+
+GrDefaultPathRenderer::GrDefaultPathRenderer(bool separateStencilSupport,
+ bool stencilWrapOpsSupport)
+ : fSeparateStencil(separateStencilSupport)
+ , fStencilWrapOps(stencilWrapOpsSupport) {
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Helpers for drawPath
+
+#define STENCIL_OFF 0 // Always disable stencil (even when needed)
+
+static inline bool single_pass_shape(const GrShape& shape) {
+#if STENCIL_OFF
+ return true;
+#else
+ // Inverse fill is always two pass.
+ if (shape.inverseFilled()) {
+ return false;
+ }
+ // This path renderer only accepts simple fill paths or stroke paths that are either hairline
+ // or have a stroke width small enough to treat as hairline. Hairline paths are always single
+ // pass. Filled paths are single pass if they're convex.
+ if (shape.style().isSimpleFill()) {
+ return shape.knownToBeConvex();
+ }
+ return true;
+#endif
+}
+
+GrPathRenderer::StencilSupport
+GrDefaultPathRenderer::onGetStencilSupport(const GrShape& shape) const {
+ if (single_pass_shape(shape)) {
+ return GrPathRenderer::kNoRestriction_StencilSupport;
+ } else {
+ return GrPathRenderer::kStencilOnly_StencilSupport;
+ }
+}
+
+static inline void append_countour_edge_indices(bool hairLine,
+ uint16_t fanCenterIdx,
+ uint16_t edgeV0Idx,
+ uint16_t** indices) {
+ // when drawing lines we're appending line segments along
+ // the contour. When applying the other fill rules we're
+ // drawing triangle fans around fanCenterIdx.
+ if (!hairLine) {
+ *((*indices)++) = fanCenterIdx;
+ }
+ *((*indices)++) = edgeV0Idx;
+ *((*indices)++) = edgeV0Idx + 1;
+}
+
+static inline void add_quad(SkPoint** vert, const SkPoint* base, const SkPoint pts[],
+ SkScalar srcSpaceTolSqd, SkScalar srcSpaceTol, bool indexed,
+ bool isHairline, uint16_t subpathIdxStart, int offset, uint16_t** idx) {
+ // first pt of quad is the pt we ended on in previous step
+ uint16_t firstQPtIdx = (uint16_t)(*vert - base) - 1 + offset;
+ uint16_t numPts = (uint16_t)
+ GrPathUtils::generateQuadraticPoints(
+ pts[0], pts[1], pts[2],
+ srcSpaceTolSqd, vert,
+ GrPathUtils::quadraticPointCount(pts, srcSpaceTol));
+ if (indexed) {
+ for (uint16_t i = 0; i < numPts; ++i) {
+ append_countour_edge_indices(isHairline, subpathIdxStart,
+ firstQPtIdx + i, idx);
+ }
+ }
+}
+
+class DefaultPathBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ DefaultPathBatch(GrColor color, const SkPath& path, SkScalar tolerance,
+ uint8_t coverage, const SkMatrix& viewMatrix, bool isHairline,
+ const SkRect& devBounds)
+ : INHERITED(ClassID()) {
+ fBatch.fCoverage = coverage;
+ fBatch.fIsHairline = isHairline;
+ fBatch.fViewMatrix = viewMatrix;
+ fGeoData.emplace_back(Geometry{color, path, tolerance});
+
+ this->setBounds(devBounds, HasAABloat::kNo,
+ isHairline ? IsZeroArea::kYes : IsZeroArea::kNo);
+ }
+
+ const char* name() const override { return "DefaultPathBatch"; }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ color->setKnownFourComponents(fGeoData[0].fColor);
+ coverage->setKnownSingleComponent(this->coverage());
+ }
+
+private:
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ // Handle any color overrides
+ if (!overrides.readsColor()) {
+ fGeoData[0].fColor = GrColor_ILLEGAL;
+ }
+ overrides.getOverrideColorIfSet(&fGeoData[0].fColor);
+
+ // setup batch properties
+ fBatch.fColorIgnored = !overrides.readsColor();
+ fBatch.fColor = fGeoData[0].fColor;
+ fBatch.fUsesLocalCoords = overrides.readsLocalCoords();
+ fBatch.fCoverageIgnored = !overrides.readsCoverage();
+ }
+
+ void onPrepareDraws(Target* target) const override {
+ sk_sp<GrGeometryProcessor> gp;
+ {
+ using namespace GrDefaultGeoProcFactory;
+ Color color(this->color());
+ Coverage coverage(this->coverage());
+ if (this->coverageIgnored()) {
+ coverage.fType = Coverage::kNone_Type;
+ }
+ LocalCoords localCoords(this->usesLocalCoords() ? LocalCoords::kUsePosition_Type :
+ LocalCoords::kUnused_Type);
+ gp = GrDefaultGeoProcFactory::Make(color, coverage, localCoords, this->viewMatrix());
+ }
+
+ size_t vertexStride = gp->getVertexStride();
+ SkASSERT(vertexStride == sizeof(SkPoint));
+
+ int instanceCount = fGeoData.count();
+
+ // compute number of vertices
+ int maxVertices = 0;
+
+ // We will use index buffers if we have multiple paths or one path with multiple contours
+ bool isIndexed = instanceCount > 1;
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& args = fGeoData[i];
+
+ int contourCount;
+ maxVertices += GrPathUtils::worstCasePointCount(args.fPath, &contourCount,
+ args.fTolerance);
+
+ isIndexed = isIndexed || contourCount > 1;
+ }
+
+ if (maxVertices == 0 || maxVertices > ((int)SK_MaxU16 + 1)) {
+ //SkDebugf("Cannot render path (%d)\n", maxVertices);
+ return;
+ }
+
+ // determine primitiveType
+ int maxIndices = 0;
+ GrPrimitiveType primitiveType;
+ if (this->isHairline()) {
+ if (isIndexed) {
+ maxIndices = 2 * maxVertices;
+ primitiveType = kLines_GrPrimitiveType;
+ } else {
+ primitiveType = kLineStrip_GrPrimitiveType;
+ }
+ } else {
+ if (isIndexed) {
+ maxIndices = 3 * maxVertices;
+ primitiveType = kTriangles_GrPrimitiveType;
+ } else {
+ primitiveType = kTriangleFan_GrPrimitiveType;
+ }
+ }
+
+ // allocate vertex / index buffers
+ const GrBuffer* vertexBuffer;
+ int firstVertex;
+
+ void* verts = target->makeVertexSpace(vertexStride, maxVertices,
+ &vertexBuffer, &firstVertex);
+
+ if (!verts) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ const GrBuffer* indexBuffer = nullptr;
+ int firstIndex = 0;
+
+ void* indices = nullptr;
+ if (isIndexed) {
+ indices = target->makeIndexSpace(maxIndices, &indexBuffer, &firstIndex);
+
+ if (!indices) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+ }
+
+ // fill buffers
+ int vertexOffset = 0;
+ int indexOffset = 0;
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& args = fGeoData[i];
+
+ int vertexCnt = 0;
+ int indexCnt = 0;
+ if (!this->createGeom(verts,
+ vertexOffset,
+ indices,
+ indexOffset,
+ &vertexCnt,
+ &indexCnt,
+ args.fPath,
+ args.fTolerance,
+ isIndexed)) {
+ return;
+ }
+
+ vertexOffset += vertexCnt;
+ indexOffset += indexCnt;
+ SkASSERT(vertexOffset <= maxVertices && indexOffset <= maxIndices);
+ }
+
+ GrMesh mesh;
+ if (isIndexed) {
+ mesh.initIndexed(primitiveType, vertexBuffer, indexBuffer, firstVertex, firstIndex,
+ vertexOffset, indexOffset);
+ } else {
+ mesh.init(primitiveType, vertexBuffer, firstVertex, vertexOffset);
+ }
+ target->draw(gp.get(), mesh);
+
+ // put back reserves
+ target->putBackIndices((size_t)(maxIndices - indexOffset));
+ target->putBackVertices((size_t)(maxVertices - vertexOffset), (size_t)vertexStride);
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ DefaultPathBatch* that = t->cast<DefaultPathBatch>();
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ if (this->color() != that->color()) {
+ return false;
+ }
+
+ if (this->coverage() != that->coverage()) {
+ return false;
+ }
+
+ if (!this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
+ }
+
+ if (this->isHairline() != that->isHairline()) {
+ return false;
+ }
+
+ fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
+ this->joinBounds(*that);
+ return true;
+ }
+
+ bool createGeom(void* vertices,
+ size_t vertexOffset,
+ void* indices,
+ size_t indexOffset,
+ int* vertexCnt,
+ int* indexCnt,
+ const SkPath& path,
+ SkScalar srcSpaceTol,
+ bool isIndexed) const {
+ {
+ SkScalar srcSpaceTolSqd = SkScalarMul(srcSpaceTol, srcSpaceTol);
+
+ uint16_t indexOffsetU16 = (uint16_t)indexOffset;
+ uint16_t vertexOffsetU16 = (uint16_t)vertexOffset;
+
+ uint16_t* idxBase = reinterpret_cast<uint16_t*>(indices) + indexOffsetU16;
+ uint16_t* idx = idxBase;
+ uint16_t subpathIdxStart = vertexOffsetU16;
+
+ SkPoint* base = reinterpret_cast<SkPoint*>(vertices) + vertexOffset;
+ SkPoint* vert = base;
+
+ SkPoint pts[4];
+
+ bool first = true;
+ int subpath = 0;
+
+ SkPath::Iter iter(path, false);
+
+ bool done = false;
+ while (!done) {
+ SkPath::Verb verb = iter.next(pts);
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ if (!first) {
+ uint16_t currIdx = (uint16_t) (vert - base) + vertexOffsetU16;
+ subpathIdxStart = currIdx;
+ ++subpath;
+ }
+ *vert = pts[0];
+ vert++;
+ break;
+ case SkPath::kLine_Verb:
+ if (isIndexed) {
+ uint16_t prevIdx = (uint16_t)(vert - base) - 1 + vertexOffsetU16;
+ append_countour_edge_indices(this->isHairline(), subpathIdxStart,
+ prevIdx, &idx);
+ }
+ *(vert++) = pts[1];
+ break;
+ case SkPath::kConic_Verb: {
+ SkScalar weight = iter.conicWeight();
+ SkAutoConicToQuads converter;
+ // Converting in src-space, hance the finer tolerance (0.25)
+ // TODO: find a way to do this in dev-space so the tolerance means something
+ const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.25f);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ add_quad(&vert, base, quadPts + i*2, srcSpaceTolSqd, srcSpaceTol,
+ isIndexed, this->isHairline(), subpathIdxStart,
+ (int)vertexOffset, &idx);
+ }
+ break;
+ }
+ case SkPath::kQuad_Verb:
+ add_quad(&vert, base, pts, srcSpaceTolSqd, srcSpaceTol, isIndexed,
+ this->isHairline(), subpathIdxStart, (int)vertexOffset, &idx);
+ break;
+ case SkPath::kCubic_Verb: {
+ // first pt of cubic is the pt we ended on in previous step
+ uint16_t firstCPtIdx = (uint16_t)(vert - base) - 1 + vertexOffsetU16;
+ uint16_t numPts = (uint16_t) GrPathUtils::generateCubicPoints(
+ pts[0], pts[1], pts[2], pts[3],
+ srcSpaceTolSqd, &vert,
+ GrPathUtils::cubicPointCount(pts, srcSpaceTol));
+ if (isIndexed) {
+ for (uint16_t i = 0; i < numPts; ++i) {
+ append_countour_edge_indices(this->isHairline(), subpathIdxStart,
+ firstCPtIdx + i, &idx);
+ }
+ }
+ break;
+ }
+ case SkPath::kClose_Verb:
+ break;
+ case SkPath::kDone_Verb:
+ done = true;
+ }
+ first = false;
+ }
+
+ *vertexCnt = static_cast<int>(vert - base);
+ *indexCnt = static_cast<int>(idx - idxBase);
+
+ }
+ return true;
+ }
+
+ GrColor color() const { return fBatch.fColor; }
+ uint8_t coverage() const { return fBatch.fCoverage; }
+ bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
+ const SkMatrix& viewMatrix() const { return fBatch.fViewMatrix; }
+ bool isHairline() const { return fBatch.fIsHairline; }
+ bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
+
+ struct BatchTracker {
+ GrColor fColor;
+ uint8_t fCoverage;
+ SkMatrix fViewMatrix;
+ bool fUsesLocalCoords;
+ bool fColorIgnored;
+ bool fCoverageIgnored;
+ bool fIsHairline;
+ };
+
+ struct Geometry {
+ GrColor fColor;
+ SkPath fPath;
+ SkScalar fTolerance;
+ };
+
+ BatchTracker fBatch;
+ SkSTArray<1, Geometry, true> fGeoData;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+bool GrDefaultPathRenderer::internalDrawPath(GrDrawContext* drawContext,
+ const GrPaint& paint,
+ const GrUserStencilSettings& userStencilSettings,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const GrShape& shape,
+ bool stencilOnly) {
+ SkPath path;
+ shape.asPath(&path);
+
+ SkScalar hairlineCoverage;
+ uint8_t newCoverage = 0xff;
+ bool isHairline = false;
+ if (IsStrokeHairlineOrEquivalent(shape.style(), viewMatrix, &hairlineCoverage)) {
+ newCoverage = SkScalarRoundToInt(hairlineCoverage * 0xff);
+ isHairline = true;
+ } else {
+ SkASSERT(shape.style().isSimpleFill());
+ }
+
+ int passCount = 0;
+ const GrUserStencilSettings* passes[3];
+ GrDrawFace drawFace[3];
+ bool reverse = false;
+ bool lastPassIsBounds;
+
+ if (isHairline) {
+ passCount = 1;
+ if (stencilOnly) {
+ passes[0] = &gDirectToStencil;
+ } else {
+ passes[0] = &userStencilSettings;
+ }
+ lastPassIsBounds = false;
+ drawFace[0] = GrDrawFace::kBoth;
+ } else {
+ if (single_pass_shape(shape)) {
+ passCount = 1;
+ if (stencilOnly) {
+ passes[0] = &gDirectToStencil;
+ } else {
+ passes[0] = &userStencilSettings;
+ }
+ drawFace[0] = GrDrawFace::kBoth;
+ lastPassIsBounds = false;
+ } else {
+ switch (path.getFillType()) {
+ case SkPath::kInverseEvenOdd_FillType:
+ reverse = true;
+ // fallthrough
+ case SkPath::kEvenOdd_FillType:
+ passes[0] = &gEOStencilPass;
+ if (stencilOnly) {
+ passCount = 1;
+ lastPassIsBounds = false;
+ } else {
+ passCount = 2;
+ lastPassIsBounds = true;
+ if (reverse) {
+ passes[1] = &gInvEOColorPass;
+ } else {
+ passes[1] = &gEOColorPass;
+ }
+ }
+ drawFace[0] = drawFace[1] = GrDrawFace::kBoth;
+ break;
+
+ case SkPath::kInverseWinding_FillType:
+ reverse = true;
+ // fallthrough
+ case SkPath::kWinding_FillType:
+ if (fSeparateStencil) {
+ if (fStencilWrapOps) {
+ passes[0] = &gWindStencilSeparateWithWrap;
+ } else {
+ passes[0] = &gWindStencilSeparateNoWrap;
+ }
+ passCount = 2;
+ drawFace[0] = GrDrawFace::kBoth;
+ } else {
+ if (fStencilWrapOps) {
+ passes[0] = &gWindSingleStencilWithWrapInc;
+ passes[1] = &gWindSingleStencilWithWrapDec;
+ } else {
+ passes[0] = &gWindSingleStencilNoWrapInc;
+ passes[1] = &gWindSingleStencilNoWrapDec;
+ }
+ // which is cw and which is ccw is arbitrary.
+ drawFace[0] = GrDrawFace::kCW;
+ drawFace[1] = GrDrawFace::kCCW;
+ passCount = 3;
+ }
+ if (stencilOnly) {
+ lastPassIsBounds = false;
+ --passCount;
+ } else {
+ lastPassIsBounds = true;
+ drawFace[passCount-1] = GrDrawFace::kBoth;
+ if (reverse) {
+ passes[passCount-1] = &gInvWindColorPass;
+ } else {
+ passes[passCount-1] = &gWindColorPass;
+ }
+ }
+ break;
+ default:
+ SkDEBUGFAIL("Unknown path fFill!");
+ return false;
+ }
+ }
+ }
+
+ SkScalar tol = GrPathUtils::kDefaultTolerance;
+ SkScalar srcSpaceTol = GrPathUtils::scaleToleranceToSrc(tol, viewMatrix, path.getBounds());
+
+ SkRect devBounds;
+ GetPathDevBounds(path, drawContext->width(), drawContext->height(), viewMatrix, &devBounds);
+
+ for (int p = 0; p < passCount; ++p) {
+ if (lastPassIsBounds && (p == passCount-1)) {
+ SkRect bounds;
+ SkMatrix localMatrix = SkMatrix::I();
+ if (reverse) {
+ // draw over the dev bounds (which will be the whole dst surface for inv fill).
+ bounds = devBounds;
+ SkMatrix vmi;
+ // mapRect through persp matrix may not be correct
+ if (!viewMatrix.hasPerspective() && viewMatrix.invert(&vmi)) {
+ vmi.mapRect(&bounds);
+ } else {
+ if (!viewMatrix.invert(&localMatrix)) {
+ return false;
+ }
+ }
+ } else {
+ bounds = path.getBounds();
+ }
+ const SkMatrix& viewM = (reverse && viewMatrix.hasPerspective()) ? SkMatrix::I() :
+ viewMatrix;
+ SkAutoTUnref<GrDrawBatch> batch(
+ GrRectBatchFactory::CreateNonAAFill(paint.getColor(), viewM, bounds, nullptr,
+ &localMatrix));
+
+ SkASSERT(GrDrawFace::kBoth == drawFace[p]);
+ GrPipelineBuilder pipelineBuilder(paint, drawContext->mustUseHWAA(paint));
+ pipelineBuilder.setDrawFace(drawFace[p]);
+ pipelineBuilder.setUserStencil(passes[p]);
+
+ drawContext->drawBatch(pipelineBuilder, clip, batch);
+ } else {
+ SkAutoTUnref<GrDrawBatch> batch(new DefaultPathBatch(paint.getColor(), path,
+ srcSpaceTol,
+ newCoverage, viewMatrix,
+ isHairline, devBounds));
+
+ GrPipelineBuilder pipelineBuilder(paint, drawContext->mustUseHWAA(paint));
+ pipelineBuilder.setDrawFace(drawFace[p]);
+ pipelineBuilder.setUserStencil(passes[p]);
+ if (passCount > 1) {
+ pipelineBuilder.setDisableColorXPFactory();
+ }
+
+ drawContext->drawBatch(pipelineBuilder, clip, batch);
+ }
+ }
+ return true;
+}
+
+bool GrDefaultPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ // this class can draw any path with any simple fill style but doesn't do any anti-aliasing.
+ return !args.fAntiAlias &&
+ (args.fShape->style().isSimpleFill() ||
+ IsStrokeHairlineOrEquivalent(args.fShape->style(), *args.fViewMatrix, nullptr));
+}
+
+bool GrDefaultPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fDrawContext->auditTrail(),
+ "GrDefaultPathRenderer::onDrawPath");
+ return this->internalDrawPath(args.fDrawContext,
+ *args.fPaint,
+ *args.fUserStencilSettings,
+ *args.fClip,
+ *args.fViewMatrix,
+ *args.fShape,
+ false);
+}
+
+void GrDefaultPathRenderer::onStencilPath(const StencilPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fDrawContext->auditTrail(),
+ "GrDefaultPathRenderer::onStencilPath");
+ SkASSERT(!args.fShape->inverseFilled());
+
+ GrPaint paint;
+ paint.setXPFactory(GrDisableColorXPFactory::Make());
+ paint.setAntiAlias(args.fIsAA);
+
+ this->internalDrawPath(args.fDrawContext, paint, GrUserStencilSettings::kUnused, *args.fClip,
+ *args.fViewMatrix, *args.fShape, true);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_TEST_UTILS
+
+DRAW_BATCH_TEST_DEFINE(DefaultPathBatch) {
+ GrColor color = GrRandomColor(random);
+ SkMatrix viewMatrix = GrTest::TestMatrix(random);
+
+ // For now just hairlines because the other types of draws require two batches.
+ // TODO we should figure out a way to combine the stencil and cover steps into one batch
+ GrStyle style(SkStrokeRec::kHairline_InitStyle);
+ SkPath path = GrTest::TestPath(random);
+
+ // Compute srcSpaceTol
+ SkRect bounds = path.getBounds();
+ SkScalar tol = GrPathUtils::kDefaultTolerance;
+ SkScalar srcSpaceTol = GrPathUtils::scaleToleranceToSrc(tol, viewMatrix, bounds);
+
+ viewMatrix.mapRect(&bounds);
+ uint8_t coverage = GrRandomCoverage(random);
+ return new DefaultPathBatch(color, path, srcSpaceTol, coverage, viewMatrix, true, bounds);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrDefaultPathRenderer.h b/gfx/skia/skia/src/gpu/batches/GrDefaultPathRenderer.h
new file mode 100644
index 000000000..9ae23e48d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrDefaultPathRenderer.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDefaultPathRenderer_DEFINED
+#define GrDefaultPathRenderer_DEFINED
+
+#include "GrPathRenderer.h"
+#include "GrPathStencilSettings.h"
+#include "SkTypes.h"
+
+/**
+ * Subclass that renders the path using the stencil buffer to resolve fill rules
+ * (e.g. winding, even-odd)
+ */
+class SK_API GrDefaultPathRenderer : public GrPathRenderer {
+public:
+ GrDefaultPathRenderer(bool separateStencilSupport, bool stencilWrapOpsSupport);
+
+private:
+
+ StencilSupport onGetStencilSupport(const GrShape&) const override;
+
+ bool onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+
+ void onStencilPath(const StencilPathArgs&) override;
+
+ bool internalDrawPath(GrDrawContext*,
+ const GrPaint&,
+ const GrUserStencilSettings&,
+ const GrClip&,
+ const SkMatrix& viewMatrix,
+ const GrShape&,
+ bool stencilOnly);
+
+ bool fSeparateStencil;
+ bool fStencilWrapOps;
+
+ typedef GrPathRenderer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrDiscardBatch.h b/gfx/skia/skia/src/gpu/batches/GrDiscardBatch.h
new file mode 100644
index 000000000..04e0ddbce
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrDiscardBatch.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDiscardBatch_DEFINED
+#define GrDiscardBatch_DEFINED
+
+#include "GrBatch.h"
+#include "GrBatchFlushState.h"
+#include "GrGpu.h"
+#include "GrRenderTarget.h"
+
+class GrDiscardBatch final : public GrBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ GrDiscardBatch(GrRenderTarget* rt)
+ : INHERITED(ClassID())
+ , fRenderTarget(rt) {
+ this->setBounds(SkRect::MakeIWH(rt->width(), rt->height()), HasAABloat::kNo,
+ IsZeroArea::kNo);
+ }
+
+ const char* name() const override { return "Discard"; }
+
+ uint32_t renderTargetUniqueID() const override { return fRenderTarget.get()->uniqueID(); }
+ GrRenderTarget* renderTarget() const override { return fRenderTarget.get(); }
+
+ SkString dumpInfo() const override {
+ SkString string;
+ string.printf("RT: %d", fRenderTarget.get()->uniqueID());
+ string.append(INHERITED::dumpInfo());
+ return string;
+ }
+
+private:
+ bool onCombineIfPossible(GrBatch* that, const GrCaps& caps) override {
+ return this->renderTargetUniqueID() == that->renderTargetUniqueID();
+ }
+
+ void onPrepare(GrBatchFlushState*) override {}
+
+ void onDraw(GrBatchFlushState* state) override {
+ state->commandBuffer()->discard(fRenderTarget.get());
+ }
+
+ GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
+
+ typedef GrBatch INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrDrawAtlasBatch.cpp b/gfx/skia/skia/src/gpu/batches/GrDrawAtlasBatch.cpp
new file mode 100644
index 000000000..6f1bfedfe
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrDrawAtlasBatch.cpp
@@ -0,0 +1,262 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrDrawAtlasBatch.h"
+#include "GrBatchFlushState.h"
+#include "GrBatchTest.h"
+#include "SkGr.h"
+#include "SkRandom.h"
+#include "SkRSXform.h"
+
+void GrDrawAtlasBatch::initBatchTracker(const GrXPOverridesForBatch& overrides) {
+ SkASSERT(fGeoData.count() == 1);
+ // Handle any color overrides
+ if (!overrides.readsColor()) {
+ fGeoData[0].fColor = GrColor_ILLEGAL;
+ }
+ if (overrides.getOverrideColorIfSet(&fGeoData[0].fColor) && fHasColors) {
+ size_t vertexStride = sizeof(SkPoint) + sizeof(SkPoint) +
+ (this->hasColors() ? sizeof(GrColor) : 0);
+ uint8_t* currVertex = fGeoData[0].fVerts.begin();
+ for (int i = 0; i < 4*fQuadCount; ++i) {
+ *(reinterpret_cast<GrColor*>(currVertex + sizeof(SkPoint))) = fGeoData[0].fColor;
+ currVertex += vertexStride;
+ }
+ }
+
+ // setup batch properties
+ fColorIgnored = !overrides.readsColor();
+ fColor = fGeoData[0].fColor;
+ // We'd like to assert this, but we can't because of GLPrograms test
+ //SkASSERT(init.readsLocalCoords());
+ fCoverageIgnored = !overrides.readsCoverage();
+}
+
+static sk_sp<GrGeometryProcessor> set_vertex_attributes(bool hasColors,
+ GrColor color,
+ const SkMatrix& viewMatrix,
+ bool coverageIgnored) {
+ using namespace GrDefaultGeoProcFactory;
+ Color gpColor(color);
+ if (hasColors) {
+ gpColor.fType = Color::kAttribute_Type;
+ }
+
+ Coverage coverage(coverageIgnored ? Coverage::kNone_Type : Coverage::kSolid_Type);
+ LocalCoords localCoords(LocalCoords::kHasExplicit_Type);
+ return GrDefaultGeoProcFactory::Make(gpColor, coverage, localCoords, viewMatrix);
+}
+
+void GrDrawAtlasBatch::onPrepareDraws(Target* target) const {
+ // Setup geometry processor
+ sk_sp<GrGeometryProcessor> gp(set_vertex_attributes(this->hasColors(),
+ this->color(),
+ this->viewMatrix(),
+ this->coverageIgnored()));
+
+ int instanceCount = fGeoData.count();
+ size_t vertexStride = gp->getVertexStride();
+ SkASSERT(vertexStride == sizeof(SkPoint) + sizeof(SkPoint)
+ + (this->hasColors() ? sizeof(GrColor) : 0));
+
+ QuadHelper helper;
+ int numQuads = this->quadCount();
+ void* verts = helper.init(target, vertexStride, numQuads);
+ if (!verts) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ uint8_t* vertPtr = reinterpret_cast<uint8_t*>(verts);
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& args = fGeoData[i];
+
+ size_t allocSize = args.fVerts.count();
+ memcpy(vertPtr, args.fVerts.begin(), allocSize);
+ vertPtr += allocSize;
+ }
+ helper.recordDraw(target, gp.get());
+}
+
+GrDrawAtlasBatch::GrDrawAtlasBatch(GrColor color, const SkMatrix& viewMatrix, int spriteCount,
+ const SkRSXform* xforms, const SkRect* rects,
+ const SkColor* colors)
+ : INHERITED(ClassID()) {
+ SkASSERT(xforms);
+ SkASSERT(rects);
+
+ fViewMatrix = viewMatrix;
+ Geometry& installedGeo = fGeoData.push_back();
+ installedGeo.fColor = color;
+
+ // Figure out stride and offsets
+ // Order within the vertex is: position [color] texCoord
+ size_t texOffset = sizeof(SkPoint);
+ size_t vertexStride = 2*sizeof(SkPoint);
+ fHasColors = SkToBool(colors);
+ if (colors) {
+ texOffset += sizeof(GrColor);
+ vertexStride += sizeof(GrColor);
+ }
+
+ // Compute buffer size and alloc buffer
+ fQuadCount = spriteCount;
+ int allocSize = static_cast<int>(4*vertexStride*spriteCount);
+ installedGeo.fVerts.reset(allocSize);
+ uint8_t* currVertex = installedGeo.fVerts.begin();
+
+ SkRect bounds;
+ bounds.setLargestInverted();
+ int paintAlpha = GrColorUnpackA(installedGeo.fColor);
+ for (int spriteIndex = 0; spriteIndex < spriteCount; ++spriteIndex) {
+ // Transform rect
+ SkPoint quad[4];
+ const SkRect& currRect = rects[spriteIndex];
+ xforms[spriteIndex].toQuad(currRect.width(), currRect.height(), quad);
+
+ // Copy colors if necessary
+ if (colors) {
+ // convert to GrColor
+ SkColor color = colors[spriteIndex];
+ if (paintAlpha != 255) {
+ color = SkColorSetA(color, SkMulDiv255Round(SkColorGetA(color), paintAlpha));
+ }
+ GrColor grColor = SkColorToPremulGrColor(color);
+
+ *(reinterpret_cast<GrColor*>(currVertex+sizeof(SkPoint))) = grColor;
+ *(reinterpret_cast<GrColor*>(currVertex+vertexStride+sizeof(SkPoint))) = grColor;
+ *(reinterpret_cast<GrColor*>(currVertex+2*vertexStride+sizeof(SkPoint))) = grColor;
+ *(reinterpret_cast<GrColor*>(currVertex+3*vertexStride+sizeof(SkPoint))) = grColor;
+ }
+
+ // Copy position and uv to verts
+ *(reinterpret_cast<SkPoint*>(currVertex)) = quad[0];
+ *(reinterpret_cast<SkPoint*>(currVertex+texOffset)) = SkPoint::Make(currRect.fLeft,
+ currRect.fTop);
+ bounds.growToInclude(quad[0].fX, quad[0].fY);
+ currVertex += vertexStride;
+
+ *(reinterpret_cast<SkPoint*>(currVertex)) = quad[1];
+ *(reinterpret_cast<SkPoint*>(currVertex+texOffset)) = SkPoint::Make(currRect.fRight,
+ currRect.fTop);
+ bounds.growToInclude(quad[1].fX, quad[1].fY);
+ currVertex += vertexStride;
+
+ *(reinterpret_cast<SkPoint*>(currVertex)) = quad[2];
+ *(reinterpret_cast<SkPoint*>(currVertex+texOffset)) = SkPoint::Make(currRect.fRight,
+ currRect.fBottom);
+ bounds.growToInclude(quad[2].fX, quad[2].fY);
+ currVertex += vertexStride;
+
+ *(reinterpret_cast<SkPoint*>(currVertex)) = quad[3];
+ *(reinterpret_cast<SkPoint*>(currVertex+texOffset)) = SkPoint::Make(currRect.fLeft,
+ currRect.fBottom);
+ bounds.growToInclude(quad[3].fX, quad[3].fY);
+ currVertex += vertexStride;
+ }
+
+ this->setTransformedBounds(bounds, viewMatrix, HasAABloat::kNo, IsZeroArea::kNo);
+}
+
+bool GrDrawAtlasBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
+ GrDrawAtlasBatch* that = t->cast<GrDrawAtlasBatch>();
+
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ // We currently use a uniform viewmatrix for this batch
+ if (!this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
+ }
+
+ if (this->hasColors() != that->hasColors()) {
+ return false;
+ }
+
+ if (!this->hasColors() && this->color() != that->color()) {
+ return false;
+ }
+
+ if (this->color() != that->color()) {
+ fColor = GrColor_ILLEGAL;
+ }
+ fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
+ fQuadCount += that->quadCount();
+
+ this->joinBounds(*that);
+ return true;
+}
+
+#ifdef GR_TEST_UTILS
+
+static SkRSXform random_xform(SkRandom* random) {
+ static const SkScalar kMinExtent = -100.f;
+ static const SkScalar kMaxExtent = 100.f;
+ static const SkScalar kMinScale = 0.1f;
+ static const SkScalar kMaxScale = 100.f;
+ static const SkScalar kMinRotate = -SK_ScalarPI;
+ static const SkScalar kMaxRotate = SK_ScalarPI;
+
+ SkRSXform xform = SkRSXform::MakeFromRadians(random->nextRangeScalar(kMinScale, kMaxScale),
+ random->nextRangeScalar(kMinRotate, kMaxRotate),
+ random->nextRangeScalar(kMinExtent, kMaxExtent),
+ random->nextRangeScalar(kMinExtent, kMaxExtent),
+ random->nextRangeScalar(kMinExtent, kMaxExtent),
+ random->nextRangeScalar(kMinExtent, kMaxExtent));
+ return xform;
+}
+
+static SkRect random_texRect(SkRandom* random) {
+ static const SkScalar kMinCoord = 0.0f;
+ static const SkScalar kMaxCoord = 1024.f;
+
+ SkRect texRect = SkRect::MakeLTRB(random->nextRangeScalar(kMinCoord, kMaxCoord),
+ random->nextRangeScalar(kMinCoord, kMaxCoord),
+ random->nextRangeScalar(kMinCoord, kMaxCoord),
+ random->nextRangeScalar(kMinCoord, kMaxCoord));
+ texRect.sort();
+ return texRect;
+}
+
+static void randomize_params(uint32_t count, SkRandom* random,
+ SkTArray<SkRSXform>* xforms,
+ SkTArray<SkRect>* texRects,
+ SkTArray<GrColor>* colors, bool hasColors) {
+ for (uint32_t v = 0; v < count; v++) {
+ xforms->push_back(random_xform(random));
+ texRects->push_back(random_texRect(random));
+ if (hasColors) {
+ colors->push_back(GrRandomColor(random));
+ }
+ }
+}
+
+DRAW_BATCH_TEST_DEFINE(GrDrawAtlasBatch) {
+ uint32_t spriteCount = random->nextRangeU(1, 100);
+
+ SkTArray<SkRSXform> xforms(spriteCount);
+ SkTArray<SkRect> texRects(spriteCount);
+ SkTArray<GrColor> colors;
+
+ bool hasColors = random->nextBool();
+
+ randomize_params(spriteCount,
+ random,
+ &xforms,
+ &texRects,
+ &colors, hasColors);
+
+ SkMatrix viewMatrix = GrTest::TestMatrix(random);
+
+ GrColor color = GrRandomColor(random);
+ return new GrDrawAtlasBatch(color, viewMatrix, spriteCount, xforms.begin(), texRects.begin(),
+ hasColors ? colors.begin() : nullptr);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrDrawAtlasBatch.h b/gfx/skia/skia/src/gpu/batches/GrDrawAtlasBatch.h
new file mode 100644
index 000000000..d9adf22a6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrDrawAtlasBatch.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDrawAtlasBatch_DEFINED
+#define GrDrawAtlasBatch_DEFINED
+
+#include "GrColor.h"
+#include "GrDefaultGeoProcFactory.h"
+#include "GrVertexBatch.h"
+
+class GrDrawAtlasBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ GrDrawAtlasBatch(GrColor color, const SkMatrix& viewMatrix, int spriteCount,
+ const SkRSXform* xforms, const SkRect* rects, const SkColor* colors);
+
+ const char* name() const override { return "DrawAtlasBatch"; }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ if (this->hasColors()) {
+ color->setUnknownFourComponents();
+ } else {
+ color->setKnownFourComponents(fGeoData[0].fColor);
+ }
+ coverage->setKnownSingleComponent(0xff);
+ }
+
+private:
+ void onPrepareDraws(Target*) const override;
+
+ void initBatchTracker(const GrXPOverridesForBatch&) override;
+
+ GrColor color() const { return fColor; }
+ bool colorIgnored() const { return fColorIgnored; }
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+ bool hasColors() const { return fHasColors; }
+ int quadCount() const { return fQuadCount; }
+ bool coverageIgnored() const { return fCoverageIgnored; }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
+
+ struct Geometry {
+ GrColor fColor;
+ SkTArray<uint8_t, true> fVerts;
+ };
+
+ SkSTArray<1, Geometry, true> fGeoData;
+
+ SkMatrix fViewMatrix;
+ GrColor fColor;
+ int fQuadCount;
+ bool fColorIgnored;
+ bool fCoverageIgnored;
+ bool fHasColors;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrDrawBatch.cpp b/gfx/skia/skia/src/gpu/batches/GrDrawBatch.cpp
new file mode 100644
index 000000000..b73f7515e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrDrawBatch.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrDrawBatch.h"
+
+GrDrawBatch::GrDrawBatch(uint32_t classID) : INHERITED(classID), fPipelineInstalled(false) { }
+
+GrDrawBatch::~GrDrawBatch() {
+ if (fPipelineInstalled) {
+ this->pipeline()->~GrPipeline();
+ }
+}
+
+void GrDrawBatch::getPipelineOptimizations(GrPipelineOptimizations* opt) const {
+ GrInitInvariantOutput color;
+ GrInitInvariantOutput coverage;
+ this->computePipelineOptimizations(&color, &coverage, &opt->fOverrides);
+ opt->fColorPOI.initUsingInvariantOutput(color);
+ opt->fCoveragePOI.initUsingInvariantOutput(coverage);
+}
+
+bool GrDrawBatch::installPipeline(const GrPipeline::CreateArgs& args) {
+ GrXPOverridesForBatch overrides;
+ void* location = fPipelineStorage.get();
+ if (!GrPipeline::CreateAt(location, args, &overrides)) {
+ return false;
+ }
+ fPipelineInstalled = true;
+ this->initBatchTracker(overrides);
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/batches/GrDrawBatch.h b/gfx/skia/skia/src/gpu/batches/GrDrawBatch.h
new file mode 100644
index 000000000..5f37b7b00
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrDrawBatch.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDrawBatch_DEFINED
+#define GrDrawBatch_DEFINED
+
+#include <functional>
+#include "GrBatch.h"
+#include "GrPipeline.h"
+
+struct GrInitInvariantOutput;
+
+/**
+ * GrDrawBatches are flushed in two phases (preDraw, and draw). In preDraw uploads to GrGpuResources
+ * and draws are determined and scheduled. They are issued in the draw phase. GrBatchToken is used
+ * to sequence the uploads relative to each other and to draws.
+ **/
+
+class GrBatchDrawToken {
+public:
+ static GrBatchDrawToken AlreadyFlushedToken() { return GrBatchDrawToken(0); }
+
+ GrBatchDrawToken(const GrBatchDrawToken& that) : fSequenceNumber(that.fSequenceNumber) {}
+ GrBatchDrawToken& operator =(const GrBatchDrawToken& that) {
+ fSequenceNumber = that.fSequenceNumber;
+ return *this;
+ }
+ bool operator==(const GrBatchDrawToken& that) const {
+ return fSequenceNumber == that.fSequenceNumber;
+ }
+ bool operator!=(const GrBatchDrawToken& that) const { return !(*this == that); }
+
+private:
+ GrBatchDrawToken();
+ explicit GrBatchDrawToken(uint64_t sequenceNumber) : fSequenceNumber(sequenceNumber) {}
+ friend class GrBatchFlushState;
+ uint64_t fSequenceNumber;
+};
+
+/**
+ * Base class for GrBatches that draw. These batches have a GrPipeline installed by GrDrawTarget.
+ */
+class GrDrawBatch : public GrBatch {
+public:
+ /** Method that performs an upload on behalf of a DeferredUploadFn. */
+ using WritePixelsFn = std::function<bool(GrSurface* texture,
+ int left, int top, int width, int height,
+ GrPixelConfig config, const void* buffer,
+ size_t rowBytes)>;
+ /** See comments before GrDrawBatch::Target definition on how deferred uploaders work. */
+ using DeferredUploadFn = std::function<void(WritePixelsFn&)>;
+
+ class Target;
+
+ GrDrawBatch(uint32_t classID);
+ ~GrDrawBatch() override;
+
+ /**
+ * Fills in a structure informing the XP of overrides to its normal behavior.
+ */
+ void getPipelineOptimizations(GrPipelineOptimizations* override) const;
+
+ const GrPipeline* pipeline() const {
+ SkASSERT(fPipelineInstalled);
+ return reinterpret_cast<const GrPipeline*>(fPipelineStorage.get());
+ }
+
+ bool installPipeline(const GrPipeline::CreateArgs&);
+
+ // TODO no GrPrimitiveProcessors yet read fragment position
+ bool willReadFragmentPosition() const { return false; }
+
+ uint32_t renderTargetUniqueID() const final {
+ SkASSERT(fPipelineInstalled);
+ return this->pipeline()->getRenderTarget()->uniqueID();
+ }
+
+ GrRenderTarget* renderTarget() const final {
+ SkASSERT(fPipelineInstalled);
+ return this->pipeline()->getRenderTarget();
+ }
+
+ SkString dumpInfo() const override {
+ SkString string;
+ string.appendf("RT: %d\n", this->renderTargetUniqueID());
+ string.append("ColorStages:\n");
+ for (int i = 0; i < this->pipeline()->numColorFragmentProcessors(); i++) {
+ string.appendf("\t\t%s\n\t\t%s\n",
+ this->pipeline()->getColorFragmentProcessor(i).name(),
+ this->pipeline()->getColorFragmentProcessor(i).dumpInfo().c_str());
+ }
+ string.append("CoverageStages:\n");
+ for (int i = 0; i < this->pipeline()->numCoverageFragmentProcessors(); i++) {
+ string.appendf("\t\t%s\n\t\t%s\n",
+ this->pipeline()->getCoverageFragmentProcessor(i).name(),
+ this->pipeline()->getCoverageFragmentProcessor(i).dumpInfo().c_str());
+ }
+ string.appendf("XP: %s\n", this->pipeline()->getXferProcessor().name());
+
+ bool scissorEnabled = this->pipeline()->getScissorState().enabled();
+ string.appendf("Scissor: ");
+ if (scissorEnabled) {
+ string.appendf("[L: %d, T: %d, R: %d, B: %d]\n",
+ this->pipeline()->getScissorState().rect().fLeft,
+ this->pipeline()->getScissorState().rect().fTop,
+ this->pipeline()->getScissorState().rect().fRight,
+ this->pipeline()->getScissorState().rect().fBottom);
+ } else {
+ string.appendf("<disabled>\n");
+ }
+ string.append(INHERITED::dumpInfo());
+
+ return string;
+ }
+
+protected:
+ virtual void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const = 0;
+
+private:
+ /**
+ * initBatchTracker is a hook for the some additional overrides / optimization possibilities
+ * from the GrXferProcessor.
+ */
+ virtual void initBatchTracker(const GrXPOverridesForBatch&) = 0;
+
+protected:
+ struct QueuedUpload {
+ QueuedUpload(DeferredUploadFn&& upload, GrBatchDrawToken token)
+ : fUpload(std::move(upload))
+ , fUploadBeforeToken(token) {}
+ DeferredUploadFn fUpload;
+ GrBatchDrawToken fUploadBeforeToken;
+ };
+ SkTArray<QueuedUpload> fInlineUploads;
+
+private:
+ SkAlignedSTStorage<1, GrPipeline> fPipelineStorage;
+ bool fPipelineInstalled;
+ typedef GrBatch INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrDrawPathBatch.cpp b/gfx/skia/skia/src/gpu/batches/GrDrawPathBatch.cpp
new file mode 100644
index 000000000..815fe7463
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrDrawPathBatch.cpp
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrDrawPathBatch.h"
+
+#include "GrRenderTargetPriv.h"
+
+static void pre_translate_transform_values(const float* xforms,
+ GrPathRendering::PathTransformType type, int count,
+ SkScalar x, SkScalar y, float* dst);
+
+void GrDrawPathBatchBase::onPrepare(GrBatchFlushState*) {
+ const GrRenderTargetPriv& rtPriv = this->pipeline()->getRenderTarget()->renderTargetPriv();
+ fStencilPassSettings.reset(GrPathRendering::GetStencilPassSettings(fFillType),
+ this->pipeline()->hasStencilClip(), rtPriv.numStencilBits());
+}
+
+SkString GrDrawPathBatch::dumpInfo() const {
+ SkString string;
+ string.printf("PATH: 0x%p", fPath.get());
+ string.append(INHERITED::dumpInfo());
+ return string;
+}
+
+void GrDrawPathBatch::onDraw(GrBatchFlushState* state) {
+ GrProgramDesc desc;
+
+ SkAutoTUnref<GrPathProcessor> pathProc(GrPathProcessor::Create(this->color(),
+ this->overrides(),
+ this->viewMatrix()));
+ state->gpu()->pathRendering()->drawPath(*this->pipeline(), *pathProc,
+ this->stencilPassSettings(), fPath.get());
+}
+
+SkString GrDrawPathRangeBatch::dumpInfo() const {
+ SkString string;
+ string.printf("RANGE: 0x%p COUNTS: [", fPathRange.get());
+ for (DrawList::Iter iter(fDraws); iter.get(); iter.next()) {
+ string.appendf("%d, ", iter.get()->fInstanceData->count());
+ }
+ string.remove(string.size() - 2, 2);
+ string.append("]");
+ string.append(INHERITED::dumpInfo());
+ return string;
+}
+
+GrDrawPathRangeBatch::GrDrawPathRangeBatch(const SkMatrix& viewMatrix, SkScalar scale, SkScalar x,
+ SkScalar y, GrColor color,
+ GrPathRendering::FillType fill, GrPathRange* range,
+ const InstanceData* instanceData, const SkRect& bounds)
+ : INHERITED(ClassID(), viewMatrix, color, fill)
+ , fPathRange(range)
+ , fTotalPathCount(instanceData->count())
+ , fScale(scale) {
+ fDraws.addToHead()->set(instanceData, x, y);
+ this->setBounds(bounds, HasAABloat::kNo, IsZeroArea::kNo);
+}
+
+bool GrDrawPathRangeBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
+ GrDrawPathRangeBatch* that = t->cast<GrDrawPathRangeBatch>();
+ if (this->fPathRange.get() != that->fPathRange.get() ||
+ this->transformType() != that->transformType() ||
+ this->fScale != that->fScale ||
+ this->color() != that->color() ||
+ !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
+ }
+ if (!GrPipeline::AreEqual(*this->pipeline(), *that->pipeline())) {
+ return false;
+ }
+ switch (fDraws.head()->fInstanceData->transformType()) {
+ case GrPathRendering::kNone_PathTransformType:
+ if (this->fDraws.head()->fX != that->fDraws.head()->fX ||
+ this->fDraws.head()->fY != that->fDraws.head()->fY) {
+ return false;
+ }
+ break;
+ case GrPathRendering::kTranslateX_PathTransformType:
+ if (this->fDraws.head()->fY != that->fDraws.head()->fY) {
+ return false;
+ }
+ break;
+ case GrPathRendering::kTranslateY_PathTransformType:
+ if (this->fDraws.head()->fX != that->fDraws.head()->fX) {
+ return false;
+ }
+ break;
+ default: break;
+ }
+ // TODO: Check some other things here. (winding, opaque, pathProc color, vm, ...)
+ // Try to combine this call with the previous DrawPaths. We do this by stenciling all the
+ // paths together and then covering them in a single pass. This is not equivalent to two
+ // separate draw calls, so we can only do it if there is no blending (no overlap would also
+ // work). Note that it's also possible for overlapping paths to cancel each other's winding
+ // numbers, and we only partially account for this by not allowing even/odd paths to be
+ // combined. (Glyphs in the same font tend to wind the same direction so it works out OK.)
+ if (GrPathRendering::kWinding_FillType != this->fillType() ||
+ GrPathRendering::kWinding_FillType != that->fillType() ||
+ this->overrides().willColorBlendWithDst()) {
+ return false;
+ }
+ SkASSERT(!that->overrides().willColorBlendWithDst());
+ fTotalPathCount += that->fTotalPathCount;
+ while (Draw* head = that->fDraws.head()) {
+ Draw* draw = fDraws.addToTail();
+ draw->fInstanceData.reset(head->fInstanceData.release());
+ draw->fX = head->fX;
+ draw->fY = head->fY;
+ that->fDraws.popHead();
+ }
+ this->joinBounds(*that);
+ return true;
+}
+
+void GrDrawPathRangeBatch::onDraw(GrBatchFlushState* state) {
+ const Draw& head = *fDraws.head();
+
+ SkMatrix drawMatrix(this->viewMatrix());
+ drawMatrix.preScale(fScale, fScale);
+ drawMatrix.preTranslate(head.fX, head.fY);
+
+ SkMatrix localMatrix;
+ localMatrix.setScale(fScale, fScale);
+ localMatrix.preTranslate(head.fX, head.fY);
+
+ SkAutoTUnref<GrPathProcessor> pathProc(GrPathProcessor::Create(this->color(),
+ this->overrides(),
+ drawMatrix,
+ localMatrix));
+
+ if (fDraws.count() == 1) {
+ const InstanceData& instances = *head.fInstanceData;
+ state->gpu()->pathRendering()->drawPaths(*this->pipeline(),
+ *pathProc,
+ this->stencilPassSettings(),
+ fPathRange.get(),
+ instances.indices(),
+ GrPathRange::kU16_PathIndexType,
+ instances.transformValues(),
+ instances.transformType(),
+ instances.count());
+ } else {
+ int floatsPerTransform = GrPathRendering::PathTransformSize(this->transformType());
+ SkAutoSTMalloc<4096, float> transformStorage(floatsPerTransform * fTotalPathCount);
+ SkAutoSTMalloc<2048, uint16_t> indexStorage(fTotalPathCount);
+ int idx = 0;
+ for (DrawList::Iter iter(fDraws); iter.get(); iter.next()) {
+ const Draw& draw = *iter.get();
+ const InstanceData& instances = *draw.fInstanceData;
+ memcpy(&indexStorage[idx], instances.indices(), instances.count() * sizeof(uint16_t));
+ pre_translate_transform_values(instances.transformValues(), this->transformType(),
+ instances.count(),
+ draw.fX - head.fX, draw.fY - head.fY,
+ &transformStorage[floatsPerTransform * idx]);
+ idx += instances.count();
+
+ // TODO: Support mismatched transform types if we start using more types other than 2D.
+ SkASSERT(instances.transformType() == this->transformType());
+ }
+ SkASSERT(idx == fTotalPathCount);
+
+ state->gpu()->pathRendering()->drawPaths(*this->pipeline(),
+ *pathProc,
+ this->stencilPassSettings(),
+ fPathRange.get(),
+ indexStorage,
+ GrPathRange::kU16_PathIndexType,
+ transformStorage,
+ this->transformType(),
+ fTotalPathCount);
+ }
+}
+
+inline void pre_translate_transform_values(const float* xforms,
+ GrPathRendering::PathTransformType type, int count,
+ SkScalar x, SkScalar y, float* dst) {
+ if (0 == x && 0 == y) {
+ memcpy(dst, xforms, count * GrPathRendering::PathTransformSize(type) * sizeof(float));
+ return;
+ }
+ switch (type) {
+ case GrPathRendering::kNone_PathTransformType:
+ SkFAIL("Cannot pre-translate kNone_PathTransformType.");
+ break;
+ case GrPathRendering::kTranslateX_PathTransformType:
+ SkASSERT(0 == y);
+ for (int i = 0; i < count; i++) {
+ dst[i] = xforms[i] + x;
+ }
+ break;
+ case GrPathRendering::kTranslateY_PathTransformType:
+ SkASSERT(0 == x);
+ for (int i = 0; i < count; i++) {
+ dst[i] = xforms[i] + y;
+ }
+ break;
+ case GrPathRendering::kTranslate_PathTransformType:
+ for (int i = 0; i < 2 * count; i += 2) {
+ dst[i] = xforms[i] + x;
+ dst[i + 1] = xforms[i + 1] + y;
+ }
+ break;
+ case GrPathRendering::kAffine_PathTransformType:
+ for (int i = 0; i < 6 * count; i += 6) {
+ dst[i] = xforms[i];
+ dst[i + 1] = xforms[i + 1];
+ dst[i + 2] = xforms[i] * x + xforms[i + 1] * y + xforms[i + 2];
+ dst[i + 3] = xforms[i + 3];
+ dst[i + 4] = xforms[i + 4];
+ dst[i + 5] = xforms[i + 3] * x + xforms[i + 4] * y + xforms[i + 5];
+ }
+ break;
+ default:
+ SkFAIL("Unknown transform type.");
+ break;
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/batches/GrDrawPathBatch.h b/gfx/skia/skia/src/gpu/batches/GrDrawPathBatch.h
new file mode 100644
index 000000000..33bf678eb
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrDrawPathBatch.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDrawPathBatch_DEFINED
+#define GrDrawPathBatch_DEFINED
+
+#include "GrBatchFlushState.h"
+#include "GrDrawBatch.h"
+#include "GrGpu.h"
+#include "GrPath.h"
+#include "GrPathRendering.h"
+#include "GrPathProcessor.h"
+
+#include "SkTLList.h"
+
+class GrDrawPathBatchBase : public GrDrawBatch {
+public:
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ color->setKnownFourComponents(fColor);
+ coverage->setKnownSingleComponent(0xff);
+ }
+
+protected:
+ GrDrawPathBatchBase(uint32_t classID, const SkMatrix& viewMatrix, GrColor initialColor,
+ GrPathRendering::FillType fill)
+ : INHERITED(classID)
+ , fViewMatrix(viewMatrix)
+ , fColor(initialColor)
+ , fFillType(fill) {}
+
+ const GrStencilSettings& stencilPassSettings() const {
+ SkASSERT(!fStencilPassSettings.isDisabled()); // This shouldn't be called before onPrepare.
+ return fStencilPassSettings;
+ }
+ const GrXPOverridesForBatch& overrides() const { return fOverrides; }
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+ GrColor color() const { return fColor; }
+ GrPathRendering::FillType fillType() const { return fFillType; }
+
+private:
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ overrides.getOverrideColorIfSet(&fColor);
+ fOverrides = overrides;
+ }
+
+ void onPrepare(GrBatchFlushState*) override; // Initializes fStencilPassSettings.
+
+ SkMatrix fViewMatrix;
+ GrColor fColor;
+ GrPathRendering::FillType fFillType;
+ GrStencilSettings fStencilPassSettings;
+ GrXPOverridesForBatch fOverrides;
+
+ typedef GrDrawBatch INHERITED;
+};
+
+class GrDrawPathBatch final : public GrDrawPathBatchBase {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ static GrDrawBatch* Create(const SkMatrix& viewMatrix, GrColor color, const GrPath* path) {
+ return new GrDrawPathBatch(viewMatrix, color, path);
+ }
+
+ const char* name() const override { return "DrawPath"; }
+
+ SkString dumpInfo() const override;
+
+private:
+ GrDrawPathBatch(const SkMatrix& viewMatrix, GrColor color, const GrPath* path)
+ : INHERITED(ClassID(), viewMatrix, color, path->getFillType())
+ , fPath(path) {
+ this->setTransformedBounds(path->getBounds(), viewMatrix, HasAABloat::kNo, IsZeroArea::kNo);
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { return false; }
+
+ void onDraw(GrBatchFlushState* state) override;
+
+ GrPendingIOResource<const GrPath, kRead_GrIOType> fPath;
+
+ typedef GrDrawPathBatchBase INHERITED;
+};
+
+// Template this if we decide to support index types other than 16bit
+class GrDrawPathRangeBatch final : public GrDrawPathBatchBase {
+public:
+ typedef GrPathRendering::PathTransformType TransformType;
+
+ DEFINE_BATCH_CLASS_ID
+
+ struct InstanceData : public SkNoncopyable {
+ public:
+ static InstanceData* Alloc(TransformType transformType, int reserveCnt) {
+ int transformSize = GrPathRendering::PathTransformSize(transformType);
+ uint8_t* ptr = (uint8_t*)sk_malloc_throw(Align32(sizeof(InstanceData)) +
+ Align32(reserveCnt * sizeof(uint16_t)) +
+ reserveCnt * transformSize * sizeof(float));
+ InstanceData* instanceData = (InstanceData*)ptr;
+ instanceData->fIndices = (uint16_t*)&ptr[Align32(sizeof(InstanceData))];
+ instanceData->fTransformValues = (float*)&ptr[Align32(sizeof(InstanceData)) +
+ Align32(reserveCnt * sizeof(uint16_t))];
+ instanceData->fTransformType = transformType;
+ instanceData->fInstanceCount = 0;
+ instanceData->fRefCnt = 1;
+ SkDEBUGCODE(instanceData->fReserveCnt = reserveCnt;)
+ return instanceData;
+ }
+
+ // Overload this method if we start using other transform types.
+ void append(uint16_t index, float x, float y) {
+ SkASSERT(GrPathRendering::kTranslate_PathTransformType == fTransformType);
+ SkASSERT(fInstanceCount < fReserveCnt);
+ fIndices[fInstanceCount] = index;
+ fTransformValues[2 * fInstanceCount] = x;
+ fTransformValues[2 * fInstanceCount + 1] = y;
+ ++fInstanceCount;
+ }
+
+ TransformType transformType() const { return fTransformType; }
+ int count() const { return fInstanceCount; }
+
+ const uint16_t* indices() const { return fIndices; }
+ uint16_t* indices() { return fIndices; }
+
+ const float* transformValues() const { return fTransformValues; }
+ float* transformValues() { return fTransformValues; }
+
+ void ref() const { ++fRefCnt; }
+
+ void unref() const {
+ if (0 == --fRefCnt) {
+ sk_free(const_cast<InstanceData*>(this));
+ }
+ }
+
+ private:
+ static int Align32(int sizeInBytes) { return (sizeInBytes + 3) & ~3; }
+
+ InstanceData() {}
+ ~InstanceData() {}
+
+ uint16_t* fIndices;
+ float* fTransformValues;
+ TransformType fTransformType;
+ int fInstanceCount;
+ mutable int fRefCnt;
+ SkDEBUGCODE(int fReserveCnt;)
+ };
+
+ static GrDrawBatch* Create(const SkMatrix& viewMatrix, SkScalar scale, SkScalar x, SkScalar y,
+ GrColor color, GrPathRendering::FillType fill, GrPathRange* range,
+ const InstanceData* instanceData, const SkRect& bounds) {
+ return new GrDrawPathRangeBatch(viewMatrix, scale, x, y, color, fill, range, instanceData,
+ bounds);
+ }
+
+ const char* name() const override { return "DrawPathRange"; }
+
+ SkString dumpInfo() const override;
+
+private:
+ GrDrawPathRangeBatch(const SkMatrix& viewMatrix, SkScalar scale, SkScalar x, SkScalar y,
+ GrColor color, GrPathRendering::FillType fill, GrPathRange* range,
+ const InstanceData* instanceData, const SkRect& bounds);
+
+ TransformType transformType() const { return fDraws.head()->fInstanceData->transformType(); }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override;
+
+ void onDraw(GrBatchFlushState* state) override;
+
+ struct Draw {
+ void set(const InstanceData* instanceData, SkScalar x, SkScalar y) {
+ fInstanceData.reset(SkRef(instanceData));
+ fX = x;
+ fY = y;
+ }
+
+ SkAutoTUnref<const InstanceData> fInstanceData;
+ SkScalar fX, fY;
+ };
+
+ typedef GrPendingIOResource<const GrPathRange, kRead_GrIOType> PendingPathRange;
+ typedef SkTLList<Draw, 4> DrawList;
+
+ PendingPathRange fPathRange;
+ DrawList fDraws;
+ int fTotalPathCount;
+ SkScalar fScale;
+
+ typedef GrDrawPathBatchBase INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrDrawVerticesBatch.cpp b/gfx/skia/skia/src/gpu/batches/GrDrawVerticesBatch.cpp
new file mode 100644
index 000000000..e56502205
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrDrawVerticesBatch.cpp
@@ -0,0 +1,326 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrDrawVerticesBatch.h"
+
+#include "GrBatchFlushState.h"
+#include "GrInvariantOutput.h"
+#include "GrDefaultGeoProcFactory.h"
+
+static sk_sp<GrGeometryProcessor> set_vertex_attributes(bool hasLocalCoords,
+ int* colorOffset,
+ int* texOffset,
+ const SkMatrix& viewMatrix,
+ bool coverageIgnored) {
+ using namespace GrDefaultGeoProcFactory;
+ *texOffset = -1;
+ *colorOffset = -1;
+
+ Coverage coverage(coverageIgnored ? Coverage::kNone_Type : Coverage::kSolid_Type);
+ LocalCoords localCoords(hasLocalCoords ? LocalCoords::kHasExplicit_Type :
+ LocalCoords::kUsePosition_Type);
+ *colorOffset = sizeof(SkPoint);
+ if (hasLocalCoords) {
+ *texOffset = sizeof(SkPoint) + sizeof(GrColor);
+ }
+ return GrDefaultGeoProcFactory::Make(Color(Color::kAttribute_Type),
+ coverage, localCoords, viewMatrix);
+}
+
+GrDrawVerticesBatch::GrDrawVerticesBatch(GrColor color, GrPrimitiveType primitiveType,
+ const SkMatrix& viewMatrix,
+ const SkPoint* positions, int vertexCount,
+ const uint16_t* indices, int indexCount,
+ const GrColor* colors, const SkPoint* localCoords,
+ const SkRect& bounds)
+ : INHERITED(ClassID()) {
+ SkASSERT(positions);
+
+ fViewMatrix = viewMatrix;
+ Mesh& mesh = fMeshes.push_back();
+ mesh.fColor = color;
+
+ mesh.fPositions.append(vertexCount, positions);
+ if (indices) {
+ mesh.fIndices.append(indexCount, indices);
+ }
+
+ if (colors) {
+ fVariableColor = true;
+ mesh.fColors.append(vertexCount, colors);
+ } else {
+ fVariableColor = false;
+ }
+
+ if (localCoords) {
+ mesh.fLocalCoords.append(vertexCount, localCoords);
+ }
+ fVertexCount = vertexCount;
+ fIndexCount = indexCount;
+ fPrimitiveType = primitiveType;
+
+ IsZeroArea zeroArea;
+ if (GrIsPrimTypeLines(primitiveType) || kPoints_GrPrimitiveType == primitiveType) {
+ zeroArea = IsZeroArea::kYes;
+ } else {
+ zeroArea = IsZeroArea::kNo;
+ }
+ this->setBounds(bounds, HasAABloat::kNo, zeroArea);
+}
+
+void GrDrawVerticesBatch::computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const {
+ // When this is called on a batch, there is only one mesh
+ if (fVariableColor) {
+ color->setUnknownFourComponents();
+ } else {
+ color->setKnownFourComponents(fMeshes[0].fColor);
+ }
+ coverage->setKnownSingleComponent(0xff);
+}
+
+void GrDrawVerticesBatch::initBatchTracker(const GrXPOverridesForBatch& overrides) {
+ SkASSERT(fMeshes.count() == 1);
+ GrColor overrideColor;
+ if (overrides.getOverrideColorIfSet(&overrideColor)) {
+ fMeshes[0].fColor = overrideColor;
+ fMeshes[0].fColors.reset();
+ fVariableColor = false;
+ }
+ fCoverageIgnored = !overrides.readsCoverage();
+ if (!overrides.readsLocalCoords()) {
+ fMeshes[0].fLocalCoords.reset();
+ }
+}
+
+void GrDrawVerticesBatch::onPrepareDraws(Target* target) const {
+ bool hasLocalCoords = !fMeshes[0].fLocalCoords.isEmpty();
+ int colorOffset = -1, texOffset = -1;
+ sk_sp<GrGeometryProcessor> gp(set_vertex_attributes(hasLocalCoords, &colorOffset, &texOffset,
+ fViewMatrix, fCoverageIgnored));
+ size_t vertexStride = gp->getVertexStride();
+
+ SkASSERT(vertexStride == sizeof(SkPoint) + (hasLocalCoords ? sizeof(SkPoint) : 0)
+ + sizeof(GrColor));
+
+ int instanceCount = fMeshes.count();
+
+ const GrBuffer* vertexBuffer;
+ int firstVertex;
+
+ void* verts = target->makeVertexSpace(vertexStride, fVertexCount, &vertexBuffer, &firstVertex);
+
+ if (!verts) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ const GrBuffer* indexBuffer = nullptr;
+ int firstIndex = 0;
+
+ uint16_t* indices = nullptr;
+ if (!fMeshes[0].fIndices.isEmpty()) {
+ indices = target->makeIndexSpace(fIndexCount, &indexBuffer, &firstIndex);
+
+ if (!indices) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+ }
+
+ int indexOffset = 0;
+ int vertexOffset = 0;
+ for (int i = 0; i < instanceCount; i++) {
+ const Mesh& mesh = fMeshes[i];
+
+ // TODO we can actually cache this interleaved and then just memcopy
+ if (indices) {
+ for (int j = 0; j < mesh.fIndices.count(); ++j, ++indexOffset) {
+ *(indices + indexOffset) = mesh.fIndices[j] + vertexOffset;
+ }
+ }
+
+ for (int j = 0; j < mesh.fPositions.count(); ++j) {
+ *((SkPoint*)verts) = mesh.fPositions[j];
+ if (mesh.fColors.isEmpty()) {
+ *(GrColor*)((intptr_t)verts + colorOffset) = mesh.fColor;
+ } else {
+ *(GrColor*)((intptr_t)verts + colorOffset) = mesh.fColors[j];
+ }
+ if (hasLocalCoords) {
+ *(SkPoint*)((intptr_t)verts + texOffset) = mesh.fLocalCoords[j];
+ }
+ verts = (void*)((intptr_t)verts + vertexStride);
+ vertexOffset++;
+ }
+ }
+
+ GrMesh mesh;
+ if (indices) {
+ mesh.initIndexed(this->primitiveType(), vertexBuffer, indexBuffer, firstVertex,
+ firstIndex, fVertexCount, fIndexCount);
+
+ } else {
+ mesh.init(this->primitiveType(), vertexBuffer, firstVertex, fVertexCount);
+ }
+ target->draw(gp.get(), mesh);
+}
+
+bool GrDrawVerticesBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
+ GrDrawVerticesBatch* that = t->cast<GrDrawVerticesBatch>();
+
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ if (!this->batchablePrimitiveType() || this->primitiveType() != that->primitiveType()) {
+ return false;
+ }
+
+ // We currently use a uniform viewmatrix for this batch
+ if (!fViewMatrix.cheapEqualTo(that->fViewMatrix)) {
+ return false;
+ }
+
+ if (fMeshes[0].fIndices.isEmpty() != that->fMeshes[0].fIndices.isEmpty()) {
+ return false;
+ }
+
+ if (fMeshes[0].fLocalCoords.isEmpty() != that->fMeshes[0].fLocalCoords.isEmpty()) {
+ return false;
+ }
+
+ if (!fVariableColor) {
+ if (that->fVariableColor || that->fMeshes[0].fColor != fMeshes[0].fColor) {
+ fVariableColor = true;
+ }
+ }
+
+ fMeshes.push_back_n(that->fMeshes.count(), that->fMeshes.begin());
+ fVertexCount += that->fVertexCount;
+ fIndexCount += that->fIndexCount;
+
+ this->joinBounds(*that);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_TEST_UTILS
+
+#include "GrBatchTest.h"
+
+static uint32_t seed_vertices(GrPrimitiveType type) {
+ switch (type) {
+ case kTriangles_GrPrimitiveType:
+ case kTriangleStrip_GrPrimitiveType:
+ case kTriangleFan_GrPrimitiveType:
+ return 3;
+ case kPoints_GrPrimitiveType:
+ return 1;
+ case kLines_GrPrimitiveType:
+ case kLineStrip_GrPrimitiveType:
+ return 2;
+ }
+ SkFAIL("Incomplete switch\n");
+ return 0;
+}
+
+static uint32_t primitive_vertices(GrPrimitiveType type) {
+ switch (type) {
+ case kTriangles_GrPrimitiveType:
+ return 3;
+ case kLines_GrPrimitiveType:
+ return 2;
+ case kTriangleStrip_GrPrimitiveType:
+ case kTriangleFan_GrPrimitiveType:
+ case kPoints_GrPrimitiveType:
+ case kLineStrip_GrPrimitiveType:
+ return 1;
+ }
+ SkFAIL("Incomplete switch\n");
+ return 0;
+}
+
+static SkPoint random_point(SkRandom* random, SkScalar min, SkScalar max) {
+ SkPoint p;
+ p.fX = random->nextRangeScalar(min, max);
+ p.fY = random->nextRangeScalar(min, max);
+ return p;
+}
+
+static void randomize_params(size_t count, size_t maxVertex, SkScalar min, SkScalar max,
+ SkRandom* random,
+ SkTArray<SkPoint>* positions,
+ SkTArray<SkPoint>* texCoords, bool hasTexCoords,
+ SkTArray<GrColor>* colors, bool hasColors,
+ SkTArray<uint16_t>* indices, bool hasIndices) {
+ for (uint32_t v = 0; v < count; v++) {
+ positions->push_back(random_point(random, min, max));
+ if (hasTexCoords) {
+ texCoords->push_back(random_point(random, min, max));
+ }
+ if (hasColors) {
+ colors->push_back(GrRandomColor(random));
+ }
+ if (hasIndices) {
+ SkASSERT(maxVertex <= SK_MaxU16);
+ indices->push_back(random->nextULessThan((uint16_t)maxVertex));
+ }
+ }
+}
+
+DRAW_BATCH_TEST_DEFINE(VerticesBatch) {
+ GrPrimitiveType type = GrPrimitiveType(random->nextULessThan(kLast_GrPrimitiveType + 1));
+ uint32_t primitiveCount = random->nextRangeU(1, 100);
+
+ // TODO make 'sensible' indexbuffers
+ SkTArray<SkPoint> positions;
+ SkTArray<SkPoint> texCoords;
+ SkTArray<GrColor> colors;
+ SkTArray<uint16_t> indices;
+
+ bool hasTexCoords = random->nextBool();
+ bool hasIndices = random->nextBool();
+ bool hasColors = random->nextBool();
+
+ uint32_t vertexCount = seed_vertices(type) + (primitiveCount - 1) * primitive_vertices(type);
+
+ static const SkScalar kMinVertExtent = -100.f;
+ static const SkScalar kMaxVertExtent = 100.f;
+ randomize_params(seed_vertices(type), vertexCount, kMinVertExtent, kMaxVertExtent,
+ random,
+ &positions,
+ &texCoords, hasTexCoords,
+ &colors, hasColors,
+ &indices, hasIndices);
+
+ for (uint32_t i = 1; i < primitiveCount; i++) {
+ randomize_params(primitive_vertices(type), vertexCount, kMinVertExtent, kMaxVertExtent,
+ random,
+ &positions,
+ &texCoords, hasTexCoords,
+ &colors, hasColors,
+ &indices, hasIndices);
+ }
+
+ SkMatrix viewMatrix = GrTest::TestMatrix(random);
+ SkRect bounds;
+ SkDEBUGCODE(bool result = ) bounds.setBoundsCheck(positions.begin(), vertexCount);
+ SkASSERT(result);
+
+ viewMatrix.mapRect(&bounds);
+
+ GrColor color = GrRandomColor(random);
+ return new GrDrawVerticesBatch(color, type, viewMatrix, positions.begin(), vertexCount,
+ indices.begin(), hasIndices ? vertexCount : 0,
+ colors.begin(), texCoords.begin(), bounds);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrDrawVerticesBatch.h b/gfx/skia/skia/src/gpu/batches/GrDrawVerticesBatch.h
new file mode 100644
index 000000000..9665c1a90
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrDrawVerticesBatch.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDrawVerticesBatch_DEFINED
+#define GrDrawVerticesBatch_DEFINED
+
+#include "GrColor.h"
+#include "GrTypes.h"
+#include "GrVertexBatch.h"
+#include "SkMatrix.h"
+#include "SkRect.h"
+#include "SkTDArray.h"
+
+class GrBatchFlushState;
+struct GrInitInvariantOutput;
+
+class GrDrawVerticesBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+
+ GrDrawVerticesBatch(GrColor color, GrPrimitiveType primitiveType,
+ const SkMatrix& viewMatrix,
+ const SkPoint* positions, int vertexCount,
+ const uint16_t* indices, int indexCount,
+ const GrColor* colors, const SkPoint* localCoords, const SkRect& bounds);
+
+ const char* name() const override { return "DrawVerticesBatch"; }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override;
+
+private:
+ void onPrepareDraws(Target*) const override;
+ void initBatchTracker(const GrXPOverridesForBatch&) override;
+
+ GrPrimitiveType primitiveType() const { return fPrimitiveType; }
+ bool batchablePrimitiveType() const {
+ return kTriangles_GrPrimitiveType == fPrimitiveType ||
+ kLines_GrPrimitiveType == fPrimitiveType ||
+ kPoints_GrPrimitiveType == fPrimitiveType;
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
+
+ struct Mesh {
+ GrColor fColor; // Only used if there are no per-vertex colors
+ SkTDArray<SkPoint> fPositions;
+ SkTDArray<uint16_t> fIndices;
+ SkTDArray<GrColor> fColors;
+ SkTDArray<SkPoint> fLocalCoords;
+ };
+
+ GrPrimitiveType fPrimitiveType;
+ SkMatrix fViewMatrix;
+ bool fVariableColor;
+ int fVertexCount;
+ int fIndexCount;
+ bool fCoverageIgnored; // comes from initBatchTracker.
+
+ SkSTArray<1, Mesh, true> fMeshes;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrMSAAPathRenderer.cpp b/gfx/skia/skia/src/gpu/batches/GrMSAAPathRenderer.cpp
new file mode 100644
index 000000000..d01323535
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrMSAAPathRenderer.cpp
@@ -0,0 +1,716 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrMSAAPathRenderer.h"
+
+#include "GrAuditTrail.h"
+#include "GrBatchFlushState.h"
+#include "GrClip.h"
+#include "GrDefaultGeoProcFactory.h"
+#include "GrFixedClip.h"
+#include "GrPathStencilSettings.h"
+#include "GrPathUtils.h"
+#include "GrPipelineBuilder.h"
+#include "GrMesh.h"
+#include "SkGeometry.h"
+#include "SkTraceEvent.h"
+#include "glsl/GrGLSLGeometryProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLVertexShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUtil.h"
+#include "gl/GrGLVaryingHandler.h"
+#include "batches/GrRectBatchFactory.h"
+#include "batches/GrVertexBatch.h"
+
+static const float kTolerance = 0.5f;
+
+////////////////////////////////////////////////////////////////////////////////
+// Helpers for drawPath
+
+static inline bool single_pass_shape(const GrShape& shape) {
+ if (!shape.inverseFilled()) {
+ return shape.knownToBeConvex();
+ }
+ return false;
+}
+
+GrPathRenderer::StencilSupport GrMSAAPathRenderer::onGetStencilSupport(const GrShape& shape) const {
+ if (single_pass_shape(shape)) {
+ return GrPathRenderer::kNoRestriction_StencilSupport;
+ } else {
+ return GrPathRenderer::kStencilOnly_StencilSupport;
+ }
+}
+
+struct MSAALineVertices {
+ struct Vertex {
+ SkPoint fPosition;
+ SkColor fColor;
+ };
+ Vertex* vertices;
+ Vertex* nextVertex;
+#ifdef SK_DEBUG
+ Vertex* verticesEnd;
+#endif
+ uint16_t* indices;
+ uint16_t* nextIndex;
+};
+
+struct MSAAQuadVertices {
+ struct Vertex {
+ SkPoint fPosition;
+ SkPoint fUV;
+ SkColor fColor;
+ };
+ Vertex* vertices;
+ Vertex* nextVertex;
+#ifdef SK_DEBUG
+ Vertex* verticesEnd;
+#endif
+ uint16_t* indices;
+ uint16_t* nextIndex;
+};
+
+static inline void append_contour_edge_indices(uint16_t fanCenterIdx,
+ uint16_t edgeV0Idx,
+ MSAALineVertices& lines) {
+ *(lines.nextIndex++) = fanCenterIdx;
+ *(lines.nextIndex++) = edgeV0Idx;
+ *(lines.nextIndex++) = edgeV0Idx + 1;
+}
+
+static inline void add_quad(MSAALineVertices& lines, MSAAQuadVertices& quads, const SkPoint pts[],
+ SkColor color, bool indexed, uint16_t subpathLineIdxStart) {
+ SkASSERT(lines.nextVertex < lines.verticesEnd);
+ *lines.nextVertex = { pts[2], color };
+ if (indexed) {
+ int prevIdx = (uint16_t) (lines.nextVertex - lines.vertices - 1);
+ if (prevIdx > subpathLineIdxStart) {
+ append_contour_edge_indices(subpathLineIdxStart, prevIdx, lines);
+ }
+ }
+ lines.nextVertex++;
+
+ SkASSERT(quads.nextVertex + 2 < quads.verticesEnd);
+ // the texture coordinates are drawn from the Loop-Blinn rendering algorithm
+ *(quads.nextVertex++) = { pts[0], SkPoint::Make(0.0, 0.0), color };
+ *(quads.nextVertex++) = { pts[1], SkPoint::Make(0.5, 0.0), color };
+ *(quads.nextVertex++) = { pts[2], SkPoint::Make(1.0, 1.0), color };
+ if (indexed) {
+ uint16_t offset = (uint16_t) (quads.nextVertex - quads.vertices) - 3;
+ *(quads.nextIndex++) = offset++;
+ *(quads.nextIndex++) = offset++;
+ *(quads.nextIndex++) = offset++;
+ }
+}
+
+class MSAAQuadProcessor : public GrGeometryProcessor {
+public:
+ static GrGeometryProcessor* Create(const SkMatrix& viewMatrix) {
+ return new MSAAQuadProcessor(viewMatrix);
+ }
+
+ virtual ~MSAAQuadProcessor() {}
+
+ const char* name() const override { return "MSAAQuadProcessor"; }
+
+ const Attribute* inPosition() const { return fInPosition; }
+ const Attribute* inUV() const { return fInUV; }
+ const Attribute* inColor() const { return fInColor; }
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+
+ class GLSLProcessor : public GrGLSLGeometryProcessor {
+ public:
+ GLSLProcessor(const GrGeometryProcessor& qpr) {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ const MSAAQuadProcessor& qp = args.fGP.cast<MSAAQuadProcessor>();
+ GrGLSLVertexBuilder* vsBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(qp);
+ varyingHandler->addPassThroughAttribute(qp.inColor(), args.fOutputColor);
+
+ GrGLSLVertToFrag uv(kVec2f_GrSLType);
+ varyingHandler->addVarying("uv", &uv, kHigh_GrSLPrecision);
+ vsBuilder->codeAppendf("%s = %s;", uv.vsOut(), qp.inUV()->fName);
+
+ // Setup position
+ this->setupPosition(vsBuilder, uniformHandler, gpArgs, qp.inPosition()->fName,
+ qp.viewMatrix(), &fViewMatrixUniform);
+
+ // emit transforms
+ this->emitTransforms(vsBuilder, varyingHandler, uniformHandler, gpArgs->fPositionVar,
+ qp.inPosition()->fName, SkMatrix::I(),
+ args.fFPCoordTransformHandler);
+
+ GrGLSLPPFragmentBuilder* fsBuilder = args.fFragBuilder;
+ fsBuilder->codeAppendf("if (%s.x * %s.x >= %s.y) discard;", uv.fsIn(), uv.fsIn(),
+ uv.fsIn());
+ fsBuilder->codeAppendf("%s = vec4(1.0);", args.fOutputCoverage);
+ }
+
+ static inline void GenKey(const GrGeometryProcessor& gp,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const MSAAQuadProcessor& qp = gp.cast<MSAAQuadProcessor>();
+ uint32_t key = 0;
+ key |= qp.viewMatrix().hasPerspective() ? 0x1 : 0x0;
+ key |= qp.viewMatrix().isIdentity() ? 0x2: 0x0;
+ b->add32(key);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& gp,
+ FPCoordTransformIter&& transformIter) override {
+ const MSAAQuadProcessor& qp = gp.cast<MSAAQuadProcessor>();
+ if (!qp.viewMatrix().isIdentity()) {
+ float viewMatrix[3 * 3];
+ GrGLSLGetMatrix<3>(viewMatrix, qp.viewMatrix());
+ pdman.setMatrix3f(fViewMatrixUniform, viewMatrix);
+ }
+ this->setTransformDataHelper(SkMatrix::I(), pdman, &transformIter);
+ }
+
+ private:
+ typedef GrGLSLGeometryProcessor INHERITED;
+
+ UniformHandle fViewMatrixUniform;
+ };
+
+ virtual void getGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const override {
+ GLSLProcessor::GenKey(*this, caps, b);
+ }
+
+ virtual GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override {
+ return new GLSLProcessor(*this);
+ }
+
+private:
+ MSAAQuadProcessor(const SkMatrix& viewMatrix)
+ : fViewMatrix(viewMatrix) {
+ this->initClassID<MSAAQuadProcessor>();
+ fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ fInUV = &this->addVertexAttrib("inUV", kVec2f_GrVertexAttribType, kHigh_GrSLPrecision);
+ fInColor = &this->addVertexAttrib("inColor", kVec4ub_GrVertexAttribType);
+ this->setSampleShading(1.0f);
+ }
+
+ const Attribute* fInPosition;
+ const Attribute* fInUV;
+ const Attribute* fInColor;
+ SkMatrix fViewMatrix;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+class MSAAPathBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ MSAAPathBatch(GrColor color, const SkPath& path, const SkMatrix& viewMatrix,
+ const SkRect& devBounds)
+ : INHERITED(ClassID())
+ , fViewMatrix(viewMatrix) {
+ fPaths.emplace_back(PathInfo{color, path});
+ this->setBounds(devBounds, HasAABloat::kNo, IsZeroArea::kNo);
+ int contourCount;
+ this->computeWorstCasePointCount(path, &contourCount, &fMaxLineVertices, &fMaxQuadVertices);
+ fMaxLineIndices = fMaxLineVertices * 3;
+ fMaxQuadIndices = fMaxQuadVertices * 3;
+ fIsIndexed = contourCount > 1;
+ }
+
+ const char* name() const override { return "MSAAPathBatch"; }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one path
+ color->setKnownFourComponents(fPaths[0].fColor);
+ coverage->setKnownSingleComponent(0xff);
+ }
+
+ bool isValid() const {
+ return !fIsIndexed || fMaxLineIndices <= SK_MaxU16;
+ }
+
+private:
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ // Handle any color overrides
+ if (!overrides.readsColor()) {
+ fPaths[0].fColor = GrColor_ILLEGAL;
+ }
+ overrides.getOverrideColorIfSet(&fPaths[0].fColor);
+ }
+
+ void computeWorstCasePointCount(const SkPath& path, int* subpaths, int* outLinePointCount,
+ int* outQuadPointCount) const {
+ int linePointCount = 0;
+ int quadPointCount = 0;
+ *subpaths = 1;
+
+ bool first = true;
+
+ SkPath::Iter iter(path, true);
+ SkPath::Verb verb;
+
+ SkPoint pts[4];
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kLine_Verb:
+ linePointCount += 1;
+ break;
+ case SkPath::kConic_Verb: {
+ SkScalar weight = iter.conicWeight();
+ SkAutoConicToQuads converter;
+ converter.computeQuads(pts, weight, kTolerance);
+ int quadPts = converter.countQuads();
+ linePointCount += quadPts;
+ quadPointCount += 3 * quadPts;
+ }
+ case SkPath::kQuad_Verb:
+ linePointCount += 1;
+ quadPointCount += 3;
+ break;
+ case SkPath::kCubic_Verb: {
+ SkSTArray<15, SkPoint, true> quadPts;
+ GrPathUtils::convertCubicToQuads(pts, kTolerance, &quadPts);
+ int count = quadPts.count();
+ linePointCount += count / 3;
+ quadPointCount += count;
+ break;
+ }
+ case SkPath::kMove_Verb:
+ linePointCount += 1;
+ if (!first) {
+ ++(*subpaths);
+ }
+ break;
+ default:
+ break;
+ }
+ first = false;
+ }
+ *outLinePointCount = linePointCount;
+ *outQuadPointCount = quadPointCount;
+ }
+
+ void onPrepareDraws(Target* target) const override {
+ SkASSERT(this->isValid());
+ if (fMaxLineVertices == 0) {
+ SkASSERT(fMaxQuadVertices == 0);
+ return;
+ }
+
+ GrPrimitiveType primitiveType = fIsIndexed ? kTriangles_GrPrimitiveType
+ : kTriangleFan_GrPrimitiveType;
+
+ // allocate vertex / index buffers
+ const GrBuffer* lineVertexBuffer;
+ int firstLineVertex;
+ MSAALineVertices lines;
+ size_t lineVertexStride = sizeof(MSAALineVertices::Vertex);
+ lines.vertices = (MSAALineVertices::Vertex*) target->makeVertexSpace(lineVertexStride,
+ fMaxLineVertices,
+ &lineVertexBuffer,
+ &firstLineVertex);
+ if (!lines.vertices) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+ lines.nextVertex = lines.vertices;
+ SkDEBUGCODE(lines.verticesEnd = lines.vertices + fMaxLineVertices;)
+
+ MSAAQuadVertices quads;
+ size_t quadVertexStride = sizeof(MSAAQuadVertices::Vertex);
+ SkAutoFree quadVertexPtr(sk_malloc_throw(fMaxQuadVertices * quadVertexStride));
+ quads.vertices = (MSAAQuadVertices::Vertex*) quadVertexPtr.get();
+ quads.nextVertex = quads.vertices;
+ SkDEBUGCODE(quads.verticesEnd = quads.vertices + fMaxQuadVertices;)
+
+ const GrBuffer* lineIndexBuffer = nullptr;
+ int firstLineIndex;
+ if (fIsIndexed) {
+ lines.indices = target->makeIndexSpace(fMaxLineIndices, &lineIndexBuffer,
+ &firstLineIndex);
+ if (!lines.indices) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+ lines.nextIndex = lines.indices;
+ } else {
+ lines.indices = nullptr;
+ lines.nextIndex = nullptr;
+ }
+
+ SkAutoFree quadIndexPtr;
+ if (fIsIndexed) {
+ quads.indices = (uint16_t*) sk_malloc_throw(fMaxQuadIndices * sizeof(uint16_t));
+ quadIndexPtr.set(quads.indices);
+ quads.nextIndex = quads.indices;
+ } else {
+ quads.indices = nullptr;
+ quads.nextIndex = nullptr;
+ }
+
+ // fill buffers
+ for (int i = 0; i < fPaths.count(); i++) {
+ const PathInfo& pathInfo = fPaths[i];
+
+ if (!this->createGeom(lines,
+ quads,
+ pathInfo.fPath,
+ fViewMatrix,
+ pathInfo.fColor,
+ fIsIndexed)) {
+ return;
+ }
+ }
+ int lineVertexOffset = (int) (lines.nextVertex - lines.vertices);
+ int lineIndexOffset = (int) (lines.nextIndex - lines.indices);
+ SkASSERT(lineVertexOffset <= fMaxLineVertices && lineIndexOffset <= fMaxLineIndices);
+ int quadVertexOffset = (int) (quads.nextVertex - quads.vertices);
+ int quadIndexOffset = (int) (quads.nextIndex - quads.indices);
+ SkASSERT(quadVertexOffset <= fMaxQuadVertices && quadIndexOffset <= fMaxQuadIndices);
+
+ if (lineVertexOffset) {
+ sk_sp<GrGeometryProcessor> lineGP;
+ {
+ using namespace GrDefaultGeoProcFactory;
+ lineGP = GrDefaultGeoProcFactory::Make(Color(Color::kAttribute_Type),
+ Coverage(255),
+ LocalCoords(LocalCoords::kUnused_Type),
+ fViewMatrix);
+ }
+ SkASSERT(lineVertexStride == lineGP->getVertexStride());
+
+ GrMesh lineMeshes;
+ if (fIsIndexed) {
+ lineMeshes.initIndexed(primitiveType, lineVertexBuffer, lineIndexBuffer,
+ firstLineVertex, firstLineIndex, lineVertexOffset,
+ lineIndexOffset);
+ } else {
+ lineMeshes.init(primitiveType, lineVertexBuffer, firstLineVertex,
+ lineVertexOffset);
+ }
+ target->draw(lineGP.get(), lineMeshes);
+ }
+
+ if (quadVertexOffset) {
+ SkAutoTUnref<const GrGeometryProcessor> quadGP(MSAAQuadProcessor::Create(fViewMatrix));
+ SkASSERT(quadVertexStride == quadGP->getVertexStride());
+
+ const GrBuffer* quadVertexBuffer;
+ int firstQuadVertex;
+ MSAAQuadVertices::Vertex* quadVertices = (MSAAQuadVertices::Vertex*)
+ target->makeVertexSpace(quadVertexStride, quadVertexOffset, &quadVertexBuffer,
+ &firstQuadVertex);
+ memcpy(quadVertices, quads.vertices, quadVertexStride * quadVertexOffset);
+ GrMesh quadMeshes;
+ if (fIsIndexed) {
+ const GrBuffer* quadIndexBuffer;
+ int firstQuadIndex;
+ uint16_t* quadIndices = (uint16_t*) target->makeIndexSpace(quadIndexOffset,
+ &quadIndexBuffer,
+ &firstQuadIndex);
+ memcpy(quadIndices, quads.indices, sizeof(uint16_t) * quadIndexOffset);
+ quadMeshes.initIndexed(kTriangles_GrPrimitiveType, quadVertexBuffer,
+ quadIndexBuffer, firstQuadVertex, firstQuadIndex,
+ quadVertexOffset, quadIndexOffset);
+ } else {
+ quadMeshes.init(kTriangles_GrPrimitiveType, quadVertexBuffer, firstQuadVertex,
+ quadVertexOffset);
+ }
+ target->draw(quadGP, quadMeshes);
+ }
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ MSAAPathBatch* that = t->cast<MSAAPathBatch>();
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ if (!fViewMatrix.cheapEqualTo(that->fViewMatrix)) {
+ return false;
+ }
+
+ if ((fMaxLineIndices + that->fMaxLineIndices > SK_MaxU16) ||
+ (fMaxQuadIndices + that->fMaxQuadIndices > SK_MaxU16)) {
+ return false;
+ }
+
+ fPaths.push_back_n(that->fPaths.count(), that->fPaths.begin());
+ this->joinBounds(*that);
+ fIsIndexed = true;
+ fMaxLineVertices += that->fMaxLineVertices;
+ fMaxQuadVertices += that->fMaxQuadVertices;
+ fMaxLineIndices += that->fMaxLineIndices;
+ fMaxQuadIndices += that->fMaxQuadIndices;
+ return true;
+ }
+
+ bool createGeom(MSAALineVertices& lines,
+ MSAAQuadVertices& quads,
+ const SkPath& path,
+ const SkMatrix& m,
+ SkColor color,
+ bool isIndexed) const {
+ {
+ uint16_t subpathIdxStart = (uint16_t) (lines.nextVertex - lines.vertices);
+
+ SkPoint pts[4];
+
+ bool first = true;
+ SkPath::Iter iter(path, true);
+
+ bool done = false;
+ while (!done) {
+ SkPath::Verb verb = iter.next(pts);
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ if (!first) {
+ uint16_t currIdx = (uint16_t) (lines.nextVertex - lines.vertices);
+ subpathIdxStart = currIdx;
+ }
+ SkASSERT(lines.nextVertex < lines.verticesEnd);
+ *(lines.nextVertex++) = { pts[0], color };
+ break;
+ case SkPath::kLine_Verb:
+ if (isIndexed) {
+ uint16_t prevIdx = (uint16_t) (lines.nextVertex - lines.vertices - 1);
+ if (prevIdx > subpathIdxStart) {
+ append_contour_edge_indices(subpathIdxStart, prevIdx, lines);
+ }
+ }
+ SkASSERT(lines.nextVertex < lines.verticesEnd);
+ *(lines.nextVertex++) = { pts[1], color };
+ break;
+ case SkPath::kConic_Verb: {
+ SkScalar weight = iter.conicWeight();
+ SkAutoConicToQuads converter;
+ const SkPoint* quadPts = converter.computeQuads(pts, weight, kTolerance);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ add_quad(lines, quads, quadPts + i * 2, color, isIndexed,
+ subpathIdxStart);
+ }
+ break;
+ }
+ case SkPath::kQuad_Verb: {
+ add_quad(lines, quads, pts, color, isIndexed, subpathIdxStart);
+ break;
+ }
+ case SkPath::kCubic_Verb: {
+ SkSTArray<15, SkPoint, true> quadPts;
+ GrPathUtils::convertCubicToQuads(pts, kTolerance, &quadPts);
+ int count = quadPts.count();
+ for (int i = 0; i < count; i += 3) {
+ add_quad(lines, quads, &quadPts[i], color, isIndexed, subpathIdxStart);
+ }
+ break;
+ }
+ case SkPath::kClose_Verb:
+ break;
+ case SkPath::kDone_Verb:
+ done = true;
+ }
+ first = false;
+ }
+ }
+ return true;
+ }
+
+ struct PathInfo {
+ GrColor fColor;
+ SkPath fPath;
+ };
+
+ SkSTArray<1, PathInfo, true> fPaths;
+
+ SkMatrix fViewMatrix;
+ int fMaxLineVertices;
+ int fMaxQuadVertices;
+ int fMaxLineIndices;
+ int fMaxQuadIndices;
+ bool fIsIndexed;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+bool GrMSAAPathRenderer::internalDrawPath(GrDrawContext* drawContext,
+ const GrPaint& paint,
+ const GrUserStencilSettings& userStencilSettings,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const GrShape& shape,
+ bool stencilOnly) {
+ SkASSERT(shape.style().isSimpleFill());
+ SkPath path;
+ shape.asPath(&path);
+
+ static const int kMaxNumPasses = 2;
+
+ int passCount = 0;
+ const GrUserStencilSettings* passes[kMaxNumPasses];
+ bool reverse = false;
+ bool lastPassIsBounds;
+
+ if (single_pass_shape(shape)) {
+ passCount = 1;
+ if (stencilOnly) {
+ passes[0] = &gDirectToStencil;
+ } else {
+ passes[0] = &userStencilSettings;
+ }
+ lastPassIsBounds = false;
+ } else {
+ switch (path.getFillType()) {
+ case SkPath::kInverseEvenOdd_FillType:
+ reverse = true;
+ // fallthrough
+ case SkPath::kEvenOdd_FillType:
+ passes[0] = &gEOStencilPass;
+ if (stencilOnly) {
+ passCount = 1;
+ lastPassIsBounds = false;
+ } else {
+ passCount = 2;
+ lastPassIsBounds = true;
+ if (reverse) {
+ passes[1] = &gInvEOColorPass;
+ } else {
+ passes[1] = &gEOColorPass;
+ }
+ }
+ break;
+
+ case SkPath::kInverseWinding_FillType:
+ reverse = true;
+ // fallthrough
+ case SkPath::kWinding_FillType:
+ passes[0] = &gWindStencilSeparateWithWrap;
+ passCount = 2;
+ if (stencilOnly) {
+ lastPassIsBounds = false;
+ passCount = 1;
+ } else {
+ lastPassIsBounds = true;
+ if (reverse) {
+ passes[1] = &gInvWindColorPass;
+ } else {
+ passes[1] = &gWindColorPass;
+ }
+ }
+ break;
+ default:
+ SkDEBUGFAIL("Unknown path fFill!");
+ return false;
+ }
+ }
+
+ SkRect devBounds;
+ GetPathDevBounds(path, drawContext->width(), drawContext->height(), viewMatrix, &devBounds);
+
+ SkASSERT(passCount <= kMaxNumPasses);
+
+ for (int p = 0; p < passCount; ++p) {
+ if (lastPassIsBounds && (p == passCount-1)) {
+ SkRect bounds;
+ SkMatrix localMatrix = SkMatrix::I();
+ if (reverse) {
+ // draw over the dev bounds (which will be the whole dst surface for inv fill).
+ bounds = devBounds;
+ SkMatrix vmi;
+ // mapRect through persp matrix may not be correct
+ if (!viewMatrix.hasPerspective() && viewMatrix.invert(&vmi)) {
+ vmi.mapRect(&bounds);
+ } else {
+ if (!viewMatrix.invert(&localMatrix)) {
+ return false;
+ }
+ }
+ } else {
+ bounds = path.getBounds();
+ }
+ const SkMatrix& viewM = (reverse && viewMatrix.hasPerspective()) ? SkMatrix::I() :
+ viewMatrix;
+ SkAutoTUnref<GrDrawBatch> batch(
+ GrRectBatchFactory::CreateNonAAFill(paint.getColor(), viewM, bounds, nullptr,
+ &localMatrix));
+
+ GrPipelineBuilder pipelineBuilder(paint, drawContext->mustUseHWAA(paint));
+ pipelineBuilder.setUserStencil(passes[p]);
+
+ drawContext->drawBatch(pipelineBuilder, clip, batch);
+ } else {
+ SkAutoTUnref<MSAAPathBatch> batch(new MSAAPathBatch(paint.getColor(), path,
+ viewMatrix, devBounds));
+ if (!batch->isValid()) {
+ return false;
+ }
+
+ GrPipelineBuilder pipelineBuilder(paint, drawContext->mustUseHWAA(paint));
+ pipelineBuilder.setUserStencil(passes[p]);
+ if (passCount > 1) {
+ pipelineBuilder.setDisableColorXPFactory();
+ }
+
+ drawContext->drawBatch(pipelineBuilder, clip, batch);
+ }
+ }
+ return true;
+}
+
+bool GrMSAAPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ // This path renderer only fills and relies on MSAA for antialiasing. Stroked shapes are
+ // handled by passing on the original shape and letting the caller compute the stroked shape
+ // which will have a fill style.
+ return args.fShape->style().isSimpleFill() && !args.fAntiAlias;
+}
+
+bool GrMSAAPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fDrawContext->auditTrail(),
+ "GrMSAAPathRenderer::onDrawPath");
+ SkTLazy<GrShape> tmpShape;
+ const GrShape* shape = args.fShape;
+ if (shape->style().applies()) {
+ SkScalar styleScale = GrStyle::MatrixToScaleFactor(*args.fViewMatrix);
+ tmpShape.init(args.fShape->applyStyle(GrStyle::Apply::kPathEffectAndStrokeRec, styleScale));
+ shape = tmpShape.get();
+ }
+ return this->internalDrawPath(args.fDrawContext,
+ *args.fPaint,
+ *args.fUserStencilSettings,
+ *args.fClip,
+ *args.fViewMatrix,
+ *shape,
+ false);
+}
+
+void GrMSAAPathRenderer::onStencilPath(const StencilPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fDrawContext->auditTrail(),
+ "GrMSAAPathRenderer::onStencilPath");
+ SkASSERT(args.fShape->style().isSimpleFill());
+ SkASSERT(!args.fShape->mayBeInverseFilledAfterStyling());
+
+ GrPaint paint;
+ paint.setXPFactory(GrDisableColorXPFactory::Make());
+ paint.setAntiAlias(args.fIsAA);
+
+ this->internalDrawPath(args.fDrawContext, paint, GrUserStencilSettings::kUnused, *args.fClip,
+ *args.fViewMatrix, *args.fShape, true);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/skia/src/gpu/batches/GrMSAAPathRenderer.h b/gfx/skia/skia/src/gpu/batches/GrMSAAPathRenderer.h
new file mode 100644
index 000000000..3bc4ee6f7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrMSAAPathRenderer.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMSAAPathRenderer_DEFINED
+#define GrMSAAPathRenderer_DEFINED
+
+#include "GrPathRenderer.h"
+#include "SkTypes.h"
+
+class SK_API GrMSAAPathRenderer : public GrPathRenderer {
+private:
+ StencilSupport onGetStencilSupport(const GrShape&) const override;
+
+ bool onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+
+ void onStencilPath(const StencilPathArgs&) override;
+
+ bool internalDrawPath(GrDrawContext*,
+ const GrPaint&,
+ const GrUserStencilSettings&,
+ const GrClip&,
+ const SkMatrix& viewMatrix,
+ const GrShape&,
+ bool stencilOnly);
+
+ typedef GrPathRenderer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrNinePatch.cpp b/gfx/skia/skia/src/gpu/batches/GrNinePatch.cpp
new file mode 100644
index 000000000..522e775c3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrNinePatch.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrNinePatch.h"
+
+#include "GrBatchFlushState.h"
+#include "GrDefaultGeoProcFactory.h"
+#include "GrResourceProvider.h"
+#include "GrVertexBatch.h"
+#include "SkBitmap.h"
+#include "SkLatticeIter.h"
+#include "SkRect.h"
+
+static sk_sp<GrGeometryProcessor> create_gp(bool readsCoverage) {
+ using namespace GrDefaultGeoProcFactory;
+ Color color(Color::kAttribute_Type);
+ Coverage coverage(readsCoverage ? Coverage::kSolid_Type : Coverage::kNone_Type);
+ LocalCoords localCoords(LocalCoords::kHasExplicit_Type);
+ return GrDefaultGeoProcFactory::Make(color, coverage, localCoords, SkMatrix::I());
+}
+
+class GrNonAANinePatchBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ static const int kVertsPerRect = 4;
+ static const int kIndicesPerRect = 6;
+
+ GrNonAANinePatchBatch(GrColor color, const SkMatrix& viewMatrix, int imageWidth,
+ int imageHeight, std::unique_ptr<SkLatticeIter> iter, const SkRect &dst)
+ : INHERITED(ClassID()) {
+ Patch& patch = fPatches.push_back();
+ patch.fViewMatrix = viewMatrix;
+ patch.fColor = color;
+ patch.fIter = std::move(iter);
+ patch.fDst = dst;
+
+ fImageWidth = imageWidth;
+ fImageHeight = imageHeight;
+
+ // setup bounds
+ this->setTransformedBounds(patch.fDst, viewMatrix, HasAABloat::kNo, IsZeroArea::kNo);
+ }
+
+ const char* name() const override { return "NonAANinePatchBatch"; }
+
+ SkString dumpInfo() const override {
+ SkString str;
+
+ for (int i = 0; i < fPatches.count(); ++i) {
+ str.appendf("%d: Color: 0x%08x Dst [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
+ i,
+ fPatches[i].fColor,
+ fPatches[i].fDst.fLeft, fPatches[i].fDst.fTop,
+ fPatches[i].fDst.fRight, fPatches[i].fDst.fBottom);
+ }
+
+ str.append(INHERITED::dumpInfo());
+ return str;
+ }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ color->setUnknownFourComponents();
+ coverage->setKnownSingleComponent(0xff);
+ }
+
+private:
+ void onPrepareDraws(Target* target) const override {
+ sk_sp<GrGeometryProcessor> gp(create_gp(fOverrides.readsCoverage()));
+ if (!gp) {
+ SkDebugf("Couldn't create GrGeometryProcessor\n");
+ return;
+ }
+
+ size_t vertexStride = gp->getVertexStride();
+ int patchCnt = fPatches.count();
+ int numRects = 0;
+ for (int i = 0; i < patchCnt; i++) {
+ numRects += fPatches[i].fIter->numRectsToDraw();
+ }
+
+ SkAutoTUnref<const GrBuffer> indexBuffer(
+ target->resourceProvider()->refQuadIndexBuffer());
+ InstancedHelper helper;
+ void* vertices = helper.init(target, kTriangles_GrPrimitiveType, vertexStride,
+ indexBuffer, kVertsPerRect,
+ kIndicesPerRect, numRects);
+ if (!vertices || !indexBuffer) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ intptr_t verts = reinterpret_cast<intptr_t>(vertices);
+ for (int i = 0; i < patchCnt; i++) {
+ const Patch& patch = fPatches[i];
+
+ // Apply the view matrix here if it is scale-translate. Otherwise, we need to
+ // wait until we've created the dst rects.
+ bool isScaleTranslate = patch.fViewMatrix.isScaleTranslate();
+ if (isScaleTranslate) {
+ patch.fIter->mapDstScaleTranslate(patch.fViewMatrix);
+ }
+
+ SkRect srcR, dstR;
+ intptr_t patchVerts = verts;
+ while (patch.fIter->next(&srcR, &dstR)) {
+ SkPoint* positions = reinterpret_cast<SkPoint*>(verts);
+ positions->setRectFan(dstR.fLeft, dstR.fTop,
+ dstR.fRight, dstR.fBottom, vertexStride);
+
+ // Setup local coords
+ static const int kLocalOffset = sizeof(SkPoint) + sizeof(GrColor);
+ SkPoint* coords = reinterpret_cast<SkPoint*>(verts + kLocalOffset);
+ coords->setRectFan(srcR.fLeft, srcR.fTop, srcR.fRight, srcR.fBottom, vertexStride);
+
+ static const int kColorOffset = sizeof(SkPoint);
+ GrColor* vertColor = reinterpret_cast<GrColor*>(verts + kColorOffset);
+ for (int j = 0; j < 4; ++j) {
+ *vertColor = patch.fColor;
+ vertColor = (GrColor*) ((intptr_t) vertColor + vertexStride);
+ }
+ verts += kVertsPerRect * vertexStride;
+ }
+
+ // If we didn't handle it above, apply the matrix here.
+ if (!isScaleTranslate) {
+ SkPoint* positions = reinterpret_cast<SkPoint*>(patchVerts);
+ patch.fViewMatrix.mapPointsWithStride(positions, vertexStride,
+ kVertsPerRect * patch.fIter->numRectsToDraw());
+ }
+ }
+ helper.recordDraw(target, gp.get());
+ }
+
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ overrides.getOverrideColorIfSet(&fPatches[0].fColor);
+ fOverrides = overrides;
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ GrNonAANinePatchBatch* that = t->cast<GrNonAANinePatchBatch>();
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ SkASSERT(this->fImageWidth == that->fImageWidth &&
+ this->fImageHeight == that->fImageHeight);
+
+ // In the event of two batches, one who can tweak, one who cannot, we just fall back to
+ // not tweaking
+ if (fOverrides.canTweakAlphaForCoverage() && !that->fOverrides.canTweakAlphaForCoverage()) {
+ fOverrides = that->fOverrides;
+ }
+
+ fPatches.move_back_n(that->fPatches.count(), that->fPatches.begin());
+ this->joinBounds(*that);
+ return true;
+ }
+
+ struct Patch {
+ SkMatrix fViewMatrix;
+ std::unique_ptr<SkLatticeIter> fIter;
+ SkRect fDst;
+ GrColor fColor;
+ };
+
+ GrXPOverridesForBatch fOverrides;
+ int fImageWidth;
+ int fImageHeight;
+ SkSTArray<1, Patch, true> fPatches;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+namespace GrNinePatch {
+GrDrawBatch* CreateNonAA(GrColor color, const SkMatrix& viewMatrix, int imageWidth, int imageHeight,
+ std::unique_ptr<SkLatticeIter> iter, const SkRect& dst) {
+ return new GrNonAANinePatchBatch(color, viewMatrix, imageWidth, imageHeight, std::move(iter),
+ dst);
+}
+};
diff --git a/gfx/skia/skia/src/gpu/batches/GrNinePatch.h b/gfx/skia/skia/src/gpu/batches/GrNinePatch.h
new file mode 100644
index 000000000..02664c644
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrNinePatch.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrNinePatch_DEFINED
+#define GrNinePatch_DEFINED
+
+#include "GrColor.h"
+#include "SkCanvas.h"
+
+class GrDrawBatch;
+class SkBitmap;
+class SkLatticeIter;
+class SkMatrix;
+struct SkIRect;
+struct SkRect;
+
+namespace GrNinePatch {
+GrDrawBatch* CreateNonAA(GrColor color, const SkMatrix& viewMatrix, int imageWidth, int imageHeight,
+ std::unique_ptr<SkLatticeIter> iter, const SkRect& dst);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrNonAAFillRectBatch.cpp b/gfx/skia/skia/src/gpu/batches/GrNonAAFillRectBatch.cpp
new file mode 100644
index 000000000..1422951d2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrNonAAFillRectBatch.cpp
@@ -0,0 +1,224 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrNonAAFillRectBatch.h"
+
+#include "GrBatchFlushState.h"
+#include "GrColor.h"
+#include "GrDefaultGeoProcFactory.h"
+#include "GrPrimitiveProcessor.h"
+#include "GrResourceProvider.h"
+#include "GrQuad.h"
+#include "GrVertexBatch.h"
+
+#include "SkMatrixPriv.h"
+
+static const int kVertsPerInstance = 4;
+static const int kIndicesPerInstance = 6;
+
+/** We always use per-vertex colors so that rects can be batched across color changes. Sometimes
+ we have explicit local coords and sometimes not. We *could* always provide explicit local
+ coords and just duplicate the positions when the caller hasn't provided a local coord rect,
+ but we haven't seen a use case which frequently switches between local rect and no local
+ rect draws.
+
+ The vertex attrib order is always pos, color, [local coords].
+ */
+static sk_sp<GrGeometryProcessor> make_gp(bool readsCoverage) {
+ using namespace GrDefaultGeoProcFactory;
+ Color color(Color::kAttribute_Type);
+ Coverage coverage(readsCoverage ? Coverage::kSolid_Type : Coverage::kNone_Type);
+
+ LocalCoords localCoords(LocalCoords::kHasExplicit_Type);
+ return GrDefaultGeoProcFactory::Make(color, coverage, localCoords, SkMatrix::I());
+}
+
+static void tesselate(intptr_t vertices,
+ size_t vertexStride,
+ GrColor color,
+ const SkMatrix* viewMatrix,
+ const SkRect& rect,
+ const GrQuad* localQuad) {
+ SkPoint* positions = reinterpret_cast<SkPoint*>(vertices);
+
+ positions->setRectFan(rect.fLeft, rect.fTop,
+ rect.fRight, rect.fBottom, vertexStride);
+
+ if (viewMatrix) {
+ SkMatrixPriv::MapPointsWithStride(*viewMatrix, positions, vertexStride, kVertsPerInstance);
+ }
+
+ // Setup local coords
+ // TODO we should only do this if local coords are being read
+ if (localQuad) {
+ static const int kLocalOffset = sizeof(SkPoint) + sizeof(GrColor);
+ for (int i = 0; i < kVertsPerInstance; i++) {
+ SkPoint* coords = reinterpret_cast<SkPoint*>(vertices + kLocalOffset +
+ i * vertexStride);
+ *coords = localQuad->point(i);
+ }
+ }
+
+ static const int kColorOffset = sizeof(SkPoint);
+ GrColor* vertColor = reinterpret_cast<GrColor*>(vertices + kColorOffset);
+ for (int j = 0; j < 4; ++j) {
+ *vertColor = color;
+ vertColor = (GrColor*) ((intptr_t) vertColor + vertexStride);
+ }
+}
+
+class NonAAFillRectBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ NonAAFillRectBatch(GrColor color, const SkMatrix& viewMatrix, const SkRect& rect,
+ const SkRect* localRect, const SkMatrix* localMatrix)
+ : INHERITED(ClassID()) {
+ SkASSERT(!viewMatrix.hasPerspective() && (!localMatrix ||
+ !localMatrix->hasPerspective()));
+ RectInfo& info = fRects.push_back();
+ info.fColor = color;
+ info.fViewMatrix = viewMatrix;
+ info.fRect = rect;
+ if (localRect && localMatrix) {
+ info.fLocalQuad.setFromMappedRect(*localRect, *localMatrix);
+ } else if (localRect) {
+ info.fLocalQuad.set(*localRect);
+ } else if (localMatrix) {
+ info.fLocalQuad.setFromMappedRect(rect, *localMatrix);
+ } else {
+ info.fLocalQuad.set(rect);
+ }
+ this->setTransformedBounds(fRects[0].fRect, viewMatrix, HasAABloat::kNo, IsZeroArea::kNo);
+ }
+
+ const char* name() const override { return "NonAAFillRectBatch"; }
+
+ SkString dumpInfo() const override {
+ SkString str;
+ str.appendf("# batched: %d\n", fRects.count());
+ for (int i = 0; i < fRects.count(); ++i) {
+ const RectInfo& info = fRects[i];
+ str.appendf("%d: Color: 0x%08x, Rect [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
+ i, info.fColor,
+ info.fRect.fLeft, info.fRect.fTop, info.fRect.fRight, info.fRect.fBottom);
+ }
+ str.append(INHERITED::dumpInfo());
+ return str;
+ }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ color->setKnownFourComponents(fRects[0].fColor);
+ coverage->setKnownSingleComponent(0xff);
+ }
+
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ overrides.getOverrideColorIfSet(&fRects[0].fColor);
+ fOverrides = overrides;
+ }
+
+private:
+ NonAAFillRectBatch() : INHERITED(ClassID()) {}
+
+ void onPrepareDraws(Target* target) const override {
+ sk_sp<GrGeometryProcessor> gp = make_gp(fOverrides.readsCoverage());
+ if (!gp) {
+ SkDebugf("Couldn't create GrGeometryProcessor\n");
+ return;
+ }
+ SkASSERT(gp->getVertexStride() ==
+ sizeof(GrDefaultGeoProcFactory::PositionColorLocalCoordAttr));
+
+ size_t vertexStride = gp->getVertexStride();
+ int instanceCount = fRects.count();
+
+ SkAutoTUnref<const GrBuffer> indexBuffer(target->resourceProvider()->refQuadIndexBuffer());
+ InstancedHelper helper;
+ void* vertices = helper.init(target, kTriangles_GrPrimitiveType, vertexStride,
+ indexBuffer, kVertsPerInstance,
+ kIndicesPerInstance, instanceCount);
+ if (!vertices || !indexBuffer) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ for (int i = 0; i < instanceCount; i++) {
+ intptr_t verts = reinterpret_cast<intptr_t>(vertices) +
+ i * kVertsPerInstance * vertexStride;
+ tesselate(verts, vertexStride, fRects[i].fColor, &fRects[i].fViewMatrix,
+ fRects[i].fRect, &fRects[i].fLocalQuad);
+ }
+ helper.recordDraw(target, gp.get());
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ NonAAFillRectBatch* that = t->cast<NonAAFillRectBatch>();
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ // In the event of two batches, one who can tweak, one who cannot, we just fall back to
+ // not tweaking
+ if (fOverrides.canTweakAlphaForCoverage() && !that->fOverrides.canTweakAlphaForCoverage()) {
+ fOverrides = that->fOverrides;
+ }
+
+ fRects.push_back_n(that->fRects.count(), that->fRects.begin());
+ this->joinBounds(*that);
+ return true;
+ }
+
+ struct RectInfo {
+ GrColor fColor;
+ SkMatrix fViewMatrix;
+ SkRect fRect;
+ GrQuad fLocalQuad;
+ };
+
+ GrXPOverridesForBatch fOverrides;
+ SkSTArray<1, RectInfo, true> fRects;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+namespace GrNonAAFillRectBatch {
+
+GrDrawBatch* Create(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkRect* localRect,
+ const SkMatrix* localMatrix) {
+ return new NonAAFillRectBatch(color, viewMatrix, rect, localRect, localMatrix);
+}
+
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_TEST_UTILS
+
+#include "GrBatchTest.h"
+
+DRAW_BATCH_TEST_DEFINE(RectBatch) {
+ GrColor color = GrRandomColor(random);
+ SkRect rect = GrTest::TestRect(random);
+ SkRect localRect = GrTest::TestRect(random);
+ SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
+ SkMatrix localMatrix = GrTest::TestMatrix(random);
+
+ bool hasLocalRect = random->nextBool();
+ bool hasLocalMatrix = random->nextBool();
+ return GrNonAAFillRectBatch::Create(color, viewMatrix, rect,
+ hasLocalRect ? &localRect : nullptr,
+ hasLocalMatrix ? &localMatrix : nullptr);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrNonAAFillRectBatch.h b/gfx/skia/skia/src/gpu/batches/GrNonAAFillRectBatch.h
new file mode 100644
index 000000000..ac28d0fb4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrNonAAFillRectBatch.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrNonAAFillRectBatch_DEFINED
+#define GrNonAAFillRectBatch_DEFINED
+
+#include "GrColor.h"
+
+class GrDrawBatch;
+class SkMatrix;
+struct SkRect;
+
+namespace GrNonAAFillRectBatch {
+
+GrDrawBatch* Create(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkRect* localRect,
+ const SkMatrix* localMatrix);
+
+GrDrawBatch* CreateWithPerspective(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkRect* localRect,
+ const SkMatrix* localMatrix);
+
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp b/gfx/skia/skia/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp
new file mode 100644
index 000000000..aa5a4203c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrNonAAFillRectBatch.h"
+
+#include "GrBatchFlushState.h"
+#include "GrColor.h"
+#include "GrDefaultGeoProcFactory.h"
+#include "GrPrimitiveProcessor.h"
+#include "GrResourceProvider.h"
+#include "GrQuad.h"
+#include "GrVertexBatch.h"
+
+static const int kVertsPerInstance = 4;
+static const int kIndicesPerInstance = 6;
+
+/** We always use per-vertex colors so that rects can be batched across color changes. Sometimes
+ we have explicit local coords and sometimes not. We *could* always provide explicit local
+ coords and just duplicate the positions when the caller hasn't provided a local coord rect,
+ but we haven't seen a use case which frequently switches between local rect and no local
+ rect draws.
+
+ The vertex attrib order is always pos, color, [local coords].
+ */
+static sk_sp<GrGeometryProcessor> make_persp_gp(const SkMatrix& viewMatrix,
+ bool readsCoverage,
+ bool hasExplicitLocalCoords,
+ const SkMatrix* localMatrix) {
+ SkASSERT(viewMatrix.hasPerspective() || (localMatrix && localMatrix->hasPerspective()));
+
+ using namespace GrDefaultGeoProcFactory;
+ Color color(Color::kAttribute_Type);
+ Coverage coverage(readsCoverage ? Coverage::kSolid_Type : Coverage::kNone_Type);
+
+ // If we have perspective on the viewMatrix then we won't map on the CPU, nor will we map
+ // the local rect on the cpu (in case the localMatrix also has perspective).
+ // Otherwise, if we have a local rect, then we apply the localMatrix directly to the localRect
+ // to generate vertex local coords
+ if (viewMatrix.hasPerspective()) {
+ LocalCoords localCoords(hasExplicitLocalCoords ? LocalCoords::kHasExplicit_Type :
+ LocalCoords::kUsePosition_Type,
+ localMatrix);
+ return GrDefaultGeoProcFactory::Make(color, coverage, localCoords, viewMatrix);
+ } else if (hasExplicitLocalCoords) {
+ LocalCoords localCoords(LocalCoords::kHasExplicit_Type, localMatrix);
+ return GrDefaultGeoProcFactory::Make(color, coverage, localCoords, SkMatrix::I());
+ } else {
+ LocalCoords localCoords(LocalCoords::kUsePosition_Type, localMatrix);
+ return GrDefaultGeoProcFactory::MakeForDeviceSpace(color, coverage, localCoords,
+ viewMatrix);
+ }
+}
+
+static void tesselate(intptr_t vertices,
+ size_t vertexStride,
+ GrColor color,
+ const SkMatrix* viewMatrix,
+ const SkRect& rect,
+ const GrQuad* localQuad) {
+ SkPoint* positions = reinterpret_cast<SkPoint*>(vertices);
+
+ positions->setRectFan(rect.fLeft, rect.fTop,
+ rect.fRight, rect.fBottom, vertexStride);
+
+ if (viewMatrix) {
+ viewMatrix->mapPointsWithStride(positions, vertexStride, kVertsPerInstance);
+ }
+
+ // Setup local coords
+ // TODO we should only do this if local coords are being read
+ if (localQuad) {
+ static const int kLocalOffset = sizeof(SkPoint) + sizeof(GrColor);
+ for (int i = 0; i < kVertsPerInstance; i++) {
+ SkPoint* coords = reinterpret_cast<SkPoint*>(vertices + kLocalOffset +
+ i * vertexStride);
+ *coords = localQuad->point(i);
+ }
+ }
+
+ static const int kColorOffset = sizeof(SkPoint);
+ GrColor* vertColor = reinterpret_cast<GrColor*>(vertices + kColorOffset);
+ for (int j = 0; j < 4; ++j) {
+ *vertColor = color;
+ vertColor = (GrColor*) ((intptr_t) vertColor + vertexStride);
+ }
+}
+
+// We handle perspective in the local matrix or viewmatrix with special batches
+class GrNonAAFillRectPerspectiveBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ GrNonAAFillRectPerspectiveBatch(GrColor color, const SkMatrix& viewMatrix, const SkRect& rect,
+ const SkRect* localRect, const SkMatrix* localMatrix)
+ : INHERITED(ClassID())
+ , fViewMatrix(viewMatrix) {
+ SkASSERT(viewMatrix.hasPerspective() || (localMatrix &&
+ localMatrix->hasPerspective()));
+ RectInfo& info = fRects.push_back();
+ info.fColor = color;
+ info.fRect = rect;
+ fHasLocalRect = SkToBool(localRect);
+ fHasLocalMatrix = SkToBool(localMatrix);
+ if (fHasLocalMatrix) {
+ fLocalMatrix = *localMatrix;
+ }
+ if (fHasLocalRect) {
+ info.fLocalRect = *localRect;
+ }
+ this->setTransformedBounds(rect, viewMatrix, HasAABloat::kNo, IsZeroArea::kNo);
+ }
+
+ const char* name() const override { return "NonAAFillRectPerspectiveBatch"; }
+
+ SkString dumpInfo() const override {
+ SkString str;
+ str.appendf("# batched: %d\n", fRects.count());
+ for (int i = 0; i < fRects.count(); ++i) {
+ const RectInfo& geo = fRects[0];
+ str.appendf("%d: Color: 0x%08x, Rect [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
+ i, geo.fColor,
+ geo.fRect.fLeft, geo.fRect.fTop, geo.fRect.fRight, geo.fRect.fBottom);
+ }
+ str.append(INHERITED::dumpInfo());
+ return str;
+ }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ color->setKnownFourComponents(fRects[0].fColor);
+ coverage->setKnownSingleComponent(0xff);
+ }
+
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ overrides.getOverrideColorIfSet(&fRects[0].fColor);
+ fOverrides = overrides;
+ }
+
+private:
+ GrNonAAFillRectPerspectiveBatch() : INHERITED(ClassID()) {}
+
+ void onPrepareDraws(Target* target) const override {
+ sk_sp<GrGeometryProcessor> gp = make_persp_gp(fViewMatrix,
+ fOverrides.readsCoverage(),
+ fHasLocalRect,
+ fHasLocalMatrix ? &fLocalMatrix : nullptr);
+ if (!gp) {
+ SkDebugf("Couldn't create GrGeometryProcessor\n");
+ return;
+ }
+ SkASSERT(fHasLocalRect
+ ? gp->getVertexStride() ==
+ sizeof(GrDefaultGeoProcFactory::PositionColorLocalCoordAttr)
+ : gp->getVertexStride() == sizeof(GrDefaultGeoProcFactory::PositionColorAttr));
+
+ size_t vertexStride = gp->getVertexStride();
+ int instanceCount = fRects.count();
+
+ SkAutoTUnref<const GrBuffer> indexBuffer(target->resourceProvider()->refQuadIndexBuffer());
+ InstancedHelper helper;
+ void* vertices = helper.init(target, kTriangles_GrPrimitiveType, vertexStride,
+ indexBuffer, kVertsPerInstance,
+ kIndicesPerInstance, instanceCount);
+ if (!vertices || !indexBuffer) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ for (int i = 0; i < instanceCount; i++) {
+ const RectInfo& info = fRects[i];
+ intptr_t verts = reinterpret_cast<intptr_t>(vertices) +
+ i * kVertsPerInstance * vertexStride;
+ if (fHasLocalRect) {
+ GrQuad quad(info.fLocalRect);
+ tesselate(verts, vertexStride, info.fColor, nullptr, info.fRect, &quad);
+ } else {
+ tesselate(verts, vertexStride, info.fColor, nullptr, info.fRect, nullptr);
+ }
+ }
+ helper.recordDraw(target, gp.get());
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ GrNonAAFillRectPerspectiveBatch* that = t->cast<GrNonAAFillRectPerspectiveBatch>();
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ // We could batch across perspective vm changes if we really wanted to
+ if (!fViewMatrix.cheapEqualTo(that->fViewMatrix)) {
+ return false;
+ }
+ if (fHasLocalRect != that->fHasLocalRect) {
+ return false;
+ }
+ if (fHasLocalMatrix && !fLocalMatrix.cheapEqualTo(that->fLocalMatrix)) {
+ return false;
+ }
+
+ // In the event of two batches, one who can tweak, one who cannot, we just fall back to
+ // not tweaking
+ if (fOverrides.canTweakAlphaForCoverage() && !that->fOverrides.canTweakAlphaForCoverage()) {
+ fOverrides = that->fOverrides;
+ }
+
+ fRects.push_back_n(that->fRects.count(), that->fRects.begin());
+ this->joinBounds(*that);
+ return true;
+ }
+
+ struct RectInfo {
+ SkRect fRect;
+ GrColor fColor;
+ SkRect fLocalRect;
+ };
+
+ GrXPOverridesForBatch fOverrides;
+ SkSTArray<1, RectInfo, true> fRects;
+ bool fHasLocalMatrix;
+ bool fHasLocalRect;
+ SkMatrix fLocalMatrix;
+ SkMatrix fViewMatrix;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+namespace GrNonAAFillRectBatch {
+
+GrDrawBatch* CreateWithPerspective(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkRect* localRect,
+ const SkMatrix* localMatrix) {
+ return new GrNonAAFillRectPerspectiveBatch(color, viewMatrix, rect, localRect, localMatrix);
+}
+
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_TEST_UTILS
+
+#include "GrBatchTest.h"
+
+DRAW_BATCH_TEST_DEFINE(PerspRectBatch) {
+ GrColor color = GrRandomColor(random);
+ SkRect rect = GrTest::TestRect(random);
+ SkRect localRect = GrTest::TestRect(random);
+ SkMatrix viewMatrix = GrTest::TestMatrix(random);
+ bool hasLocalMatrix = random->nextBool();
+ SkMatrix localMatrix;
+ if (!viewMatrix.hasPerspective()) {
+ localMatrix = GrTest::TestMatrixPerspective(random);
+ hasLocalMatrix = true;
+ }
+
+ bool hasLocalRect = random->nextBool();
+ return GrNonAAFillRectBatch::CreateWithPerspective(color, viewMatrix, rect,
+ hasLocalRect ? &localRect : nullptr,
+ hasLocalMatrix ? &localMatrix : nullptr);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrNonAAStrokeRectBatch.cpp b/gfx/skia/skia/src/gpu/batches/GrNonAAStrokeRectBatch.cpp
new file mode 100644
index 000000000..f443b32f1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrNonAAStrokeRectBatch.cpp
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrNonAAStrokeRectBatch.h"
+
+#include "GrBatchTest.h"
+#include "GrBatchFlushState.h"
+#include "GrColor.h"
+#include "GrDefaultGeoProcFactory.h"
+#include "GrVertexBatch.h"
+#include "SkRandom.h"
+
+/* create a triangle strip that strokes the specified rect. There are 8
+ unique vertices, but we repeat the last 2 to close up. Alternatively we
+ could use an indices array, and then only send 8 verts, but not sure that
+ would be faster.
+ */
+static void init_stroke_rect_strip(SkPoint verts[10], const SkRect& rect, SkScalar width) {
+ const SkScalar rad = SkScalarHalf(width);
+ // TODO we should be able to enable this assert, but we'd have to filter these draws
+ // this is a bug
+ //SkASSERT(rad < rect.width() / 2 && rad < rect.height() / 2);
+
+ verts[0].set(rect.fLeft + rad, rect.fTop + rad);
+ verts[1].set(rect.fLeft - rad, rect.fTop - rad);
+ verts[2].set(rect.fRight - rad, rect.fTop + rad);
+ verts[3].set(rect.fRight + rad, rect.fTop - rad);
+ verts[4].set(rect.fRight - rad, rect.fBottom - rad);
+ verts[5].set(rect.fRight + rad, rect.fBottom + rad);
+ verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
+ verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
+ verts[8] = verts[0];
+ verts[9] = verts[1];
+}
+
+// Allow all hairlines and all miters, so long as the miter limit doesn't produce beveled corners.
+inline static bool allowed_stroke(const SkStrokeRec& stroke) {
+ SkASSERT(stroke.getStyle() == SkStrokeRec::kStroke_Style ||
+ stroke.getStyle() == SkStrokeRec::kHairline_Style);
+ return !stroke.getWidth() ||
+ (stroke.getJoin() == SkPaint::kMiter_Join && stroke.getMiter() > SK_ScalarSqrt2);
+}
+
+class NonAAStrokeRectBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ const char* name() const override { return "NonAAStrokeRectBatch"; }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ color->setKnownFourComponents(fColor);
+ coverage->setKnownSingleComponent(0xff);
+ }
+
+ static GrDrawBatch* Create(GrColor color, const SkMatrix& viewMatrix, const SkRect& rect,
+ const SkStrokeRec& stroke, bool snapToPixelCenters) {
+ if (!allowed_stroke(stroke)) {
+ return nullptr;
+ }
+ NonAAStrokeRectBatch* batch = new NonAAStrokeRectBatch();
+ batch->fColor = color;
+ batch->fViewMatrix = viewMatrix;
+ batch->fRect = rect;
+ // Sort the rect for hairlines
+ batch->fRect.sort();
+ batch->fStrokeWidth = stroke.getWidth();
+
+ SkScalar rad = SkScalarHalf(batch->fStrokeWidth);
+ SkRect bounds = rect;
+ bounds.outset(rad, rad);
+
+ // If our caller snaps to pixel centers then we have to round out the bounds
+ if (snapToPixelCenters) {
+ viewMatrix.mapRect(&bounds);
+ // We want to be consistent with how we snap non-aa lines. To match what we do in
+ // GrGLSLVertexShaderBuilder, we first floor all the vertex values and then add half a
+ // pixel to force us to pixel centers.
+ bounds.set(SkScalarFloorToScalar(bounds.fLeft),
+ SkScalarFloorToScalar(bounds.fTop),
+ SkScalarFloorToScalar(bounds.fRight),
+ SkScalarFloorToScalar(bounds.fBottom));
+ bounds.offset(0.5f, 0.5f);
+ batch->setBounds(bounds, HasAABloat::kNo, IsZeroArea::kNo);
+ } else {
+ batch->setTransformedBounds(bounds, batch->fViewMatrix, HasAABloat ::kNo,
+ IsZeroArea::kNo);
+ }
+ return batch;
+ }
+
+private:
+ NonAAStrokeRectBatch() : INHERITED(ClassID()) {}
+
+ void onPrepareDraws(Target* target) const override {
+ sk_sp<GrGeometryProcessor> gp;
+ {
+ using namespace GrDefaultGeoProcFactory;
+ Color color(fColor);
+ Coverage coverage(fOverrides.readsCoverage() ? Coverage::kSolid_Type
+ : Coverage::kNone_Type);
+ LocalCoords localCoords(fOverrides.readsLocalCoords() ? LocalCoords::kUsePosition_Type :
+ LocalCoords::kUnused_Type);
+ gp = GrDefaultGeoProcFactory::Make(color, coverage, localCoords, fViewMatrix);
+ }
+
+ size_t vertexStride = gp->getVertexStride();
+
+ SkASSERT(vertexStride == sizeof(GrDefaultGeoProcFactory::PositionAttr));
+
+ int vertexCount = kVertsPerHairlineRect;
+ if (fStrokeWidth > 0) {
+ vertexCount = kVertsPerStrokeRect;
+ }
+
+ const GrBuffer* vertexBuffer;
+ int firstVertex;
+
+ void* verts = target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer,
+ &firstVertex);
+
+ if (!verts) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ SkPoint* vertex = reinterpret_cast<SkPoint*>(verts);
+
+ GrPrimitiveType primType;
+ if (fStrokeWidth > 0) {
+ primType = kTriangleStrip_GrPrimitiveType;
+ init_stroke_rect_strip(vertex, fRect, fStrokeWidth);
+ } else {
+ // hairline
+ primType = kLineStrip_GrPrimitiveType;
+ vertex[0].set(fRect.fLeft, fRect.fTop);
+ vertex[1].set(fRect.fRight, fRect.fTop);
+ vertex[2].set(fRect.fRight, fRect.fBottom);
+ vertex[3].set(fRect.fLeft, fRect.fBottom);
+ vertex[4].set(fRect.fLeft, fRect.fTop);
+ }
+
+ GrMesh mesh;
+ mesh.init(primType, vertexBuffer, firstVertex, vertexCount);
+ target->draw(gp.get(), mesh);
+ }
+
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ overrides.getOverrideColorIfSet(&fColor);
+ fOverrides = overrides;
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps&) override {
+ // NonAA stroke rects cannot batch right now
+ // TODO make these batchable
+ return false;
+ }
+
+ GrColor fColor;
+ SkMatrix fViewMatrix;
+ SkRect fRect;
+ SkScalar fStrokeWidth;
+
+ GrXPOverridesForBatch fOverrides;
+
+ const static int kVertsPerHairlineRect = 5;
+ const static int kVertsPerStrokeRect = 10;
+
+
+ typedef GrVertexBatch INHERITED;
+};
+
+namespace GrNonAAStrokeRectBatch {
+
+GrDrawBatch* Create(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkStrokeRec& stroke,
+ bool snapToPixelCenters) {
+ return NonAAStrokeRectBatch::Create(color, viewMatrix, rect, stroke, snapToPixelCenters);
+}
+
+}
+
+#ifdef GR_TEST_UTILS
+
+DRAW_BATCH_TEST_DEFINE(NonAAStrokeRectBatch) {
+ SkMatrix viewMatrix = GrTest::TestMatrix(random);
+ GrColor color = GrRandomColor(random);
+ SkRect rect = GrTest::TestRect(random);
+ SkScalar strokeWidth = random->nextBool() ? 0.0f : 2.0f;
+ SkPaint paint;
+ paint.setStrokeWidth(strokeWidth);
+ paint.setStyle(SkPaint::kStroke_Style);
+ paint.setStrokeJoin(SkPaint::kMiter_Join);
+ SkStrokeRec strokeRec(paint);
+ return GrNonAAStrokeRectBatch::Create(color, viewMatrix, rect, strokeRec, random->nextBool());
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrNonAAStrokeRectBatch.h b/gfx/skia/skia/src/gpu/batches/GrNonAAStrokeRectBatch.h
new file mode 100644
index 000000000..4d94337cc
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrNonAAStrokeRectBatch.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrNonAAStrokeRectBatch_DEFINED
+#define GrNonAAStrokeRectBatch_DEFINED
+
+#include "GrColor.h"
+
+#include "SkTypes.h"
+
+class GrDrawBatch;
+struct SkRect;
+class SkStrokeRec;
+class SkMatrix;
+
+namespace GrNonAAStrokeRectBatch {
+
+GrDrawBatch* Create(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkStrokeRec&,
+ bool snapToPixelCenters);
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrPLSPathRenderer.cpp b/gfx/skia/skia/src/gpu/batches/GrPLSPathRenderer.cpp
new file mode 100644
index 000000000..e8711c09d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrPLSPathRenderer.cpp
@@ -0,0 +1,959 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrPLSPathRenderer.h"
+
+#include "SkChunkAlloc.h"
+#include "SkGeometry.h"
+#include "SkPathPriv.h"
+#include "SkString.h"
+#include "SkTSort.h"
+#include "SkTraceEvent.h"
+#include "GrBatchFlushState.h"
+#include "GrBatchTest.h"
+#include "GrCaps.h"
+#include "GrContext.h"
+#include "GrDefaultGeoProcFactory.h"
+#include "GrPLSGeometryProcessor.h"
+#include "GrInvariantOutput.h"
+#include "GrPathUtils.h"
+#include "GrProcessor.h"
+#include "GrPipelineBuilder.h"
+#include "GrStyle.h"
+#include "GrTessellator.h"
+#include "batches/GrVertexBatch.h"
+#include "glsl/GrGLSLGeometryProcessor.h"
+#include "gl/builders/GrGLProgramBuilder.h"
+#include "glsl/GrGLSLPLSPathRendering.h"
+
+GrPLSPathRenderer::GrPLSPathRenderer() {
+}
+
+struct PLSVertex {
+ SkPoint fPos;
+ // for triangles, these are the three triangle vertices
+ // for quads, vert1 is the texture UV coords, and vert2 and vert3 are the line segment
+ // comprising the flat edge of the quad
+ SkPoint fVert1;
+ SkPoint fVert2;
+ SkPoint fVert3;
+ int fWinding;
+};
+typedef SkTArray<PLSVertex, true> PLSVertices;
+
+typedef SkTArray<SkPoint, true> FinishVertices;
+
+static const float kCubicTolerance = 0.5f;
+static const float kConicTolerance = 0.5f;
+
+static const float kBloatSize = 1.0f;
+
+static const float kBloatLimit = 640000.0f;
+
+#define kQuadNumVertices 5
+static void add_quad(SkPoint pts[3], PLSVertices& vertices) {
+ SkPoint normal = SkPoint::Make(pts[0].fY - pts[2].fY,
+ pts[2].fX - pts[0].fX);
+ normal.setLength(kBloatSize);
+ SkScalar cross = (pts[1] - pts[0]).cross(pts[2] - pts[0]);
+ if (cross < 0) {
+ normal = -normal;
+ }
+ PLSVertex quad[kQuadNumVertices];
+ quad[0].fPos = pts[0] + normal;
+ quad[1].fPos = pts[0] - normal;
+ quad[2].fPos = pts[1] - normal;
+ quad[3].fPos = pts[2] - normal;
+ quad[4].fPos = pts[2] + normal;
+ for (int i = 0; i < kQuadNumVertices; i++) {
+ quad[i].fWinding = cross < 0 ? 1 : -1;
+ if (cross > 0.0) {
+ quad[i].fVert2 = pts[0];
+ quad[i].fVert3 = pts[2];
+ }
+ else {
+ quad[i].fVert2 = pts[2];
+ quad[i].fVert3 = pts[0];
+ }
+ }
+ GrPathUtils::QuadUVMatrix DevToUV(pts);
+ DevToUV.apply<kQuadNumVertices, sizeof(PLSVertex), sizeof(SkPoint)>(quad);
+ for (int i = 2; i < kQuadNumVertices; i++) {
+ vertices.push_back(quad[0]);
+ vertices.push_back(quad[i - 1]);
+ vertices.push_back(quad[i]);
+ }
+}
+
+/* Used by bloat_tri; outsets a single point. */
+static bool outset(SkPoint* p1, SkPoint line1, SkPoint line2) {
+ // rotate the two line vectors 90 degrees to form the normals, and compute
+ // the dot product of the normals
+ SkScalar dotProd = line1.fY * line2.fY + line1.fX * line2.fX;
+ SkScalar lengthSq = 1.0f / ((1.0f - dotProd) / 2.0f);
+ if (lengthSq > kBloatLimit) {
+ return false;
+ }
+ SkPoint bisector = line1 + line2;
+ bisector.setLength(SkScalarSqrt(lengthSq) * kBloatSize);
+ *p1 += bisector;
+ return true;
+}
+
+/* Bloats a triangle so as to create a border kBloatSize pixels wide all around it. */
+static bool bloat_tri(SkPoint pts[3]) {
+ SkPoint line1 = pts[0] - pts[1];
+ line1.normalize();
+ SkPoint line2 = pts[0] - pts[2];
+ line2.normalize();
+ SkPoint line3 = pts[1] - pts[2];
+ line3.normalize();
+
+ SkPoint result[3];
+ result[0] = pts[0];
+ if (!outset(&result[0], line1, line2)) {
+ return false;
+ }
+ result[1] = pts[1];
+ if (!outset(&result[1], -line1, line3)) {
+ return false;
+ }
+ result[2] = pts[2];
+ if (!outset(&result[2], -line3, -line2)) {
+ return false;
+ }
+ pts[0] = result[0];
+ pts[1] = result[1];
+ pts[2] = result[2];
+ return true;
+}
+
+static bool get_geometry(const SkPath& path, const SkMatrix& m, PLSVertices& triVertices,
+ PLSVertices& quadVertices, GrResourceProvider* resourceProvider,
+ SkRect bounds) {
+ SkScalar screenSpaceTol = GrPathUtils::kDefaultTolerance;
+ SkScalar tol = GrPathUtils::scaleToleranceToSrc(screenSpaceTol, m, bounds);
+ int contourCnt;
+ int maxPts = GrPathUtils::worstCasePointCount(path, &contourCnt, tol);
+ if (maxPts <= 0) {
+ return 0;
+ }
+ SkPath linesOnlyPath;
+ linesOnlyPath.setFillType(path.getFillType());
+ SkSTArray<15, SkPoint, true> quadPoints;
+ SkPath::Iter iter(path, true);
+ bool done = false;
+ while (!done) {
+ SkPoint pts[4];
+ SkPath::Verb verb = iter.next(pts);
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ SkASSERT(quadPoints.count() % 3 == 0);
+ for (int i = 0; i < quadPoints.count(); i += 3) {
+ add_quad(&quadPoints[i], quadVertices);
+ }
+ quadPoints.reset();
+ m.mapPoints(&pts[0], 1);
+ linesOnlyPath.moveTo(pts[0]);
+ break;
+ case SkPath::kLine_Verb:
+ m.mapPoints(&pts[1], 1);
+ linesOnlyPath.lineTo(pts[1]);
+ break;
+ case SkPath::kQuad_Verb:
+ m.mapPoints(pts, 3);
+ linesOnlyPath.lineTo(pts[2]);
+ quadPoints.push_back(pts[0]);
+ quadPoints.push_back(pts[1]);
+ quadPoints.push_back(pts[2]);
+ break;
+ case SkPath::kCubic_Verb: {
+ m.mapPoints(pts, 4);
+ SkSTArray<15, SkPoint, true> quads;
+ GrPathUtils::convertCubicToQuads(pts, kCubicTolerance, &quads);
+ int count = quads.count();
+ for (int q = 0; q < count; q += 3) {
+ linesOnlyPath.lineTo(quads[q + 2]);
+ quadPoints.push_back(quads[q]);
+ quadPoints.push_back(quads[q + 1]);
+ quadPoints.push_back(quads[q + 2]);
+ }
+ break;
+ }
+ case SkPath::kConic_Verb: {
+ m.mapPoints(pts, 3);
+ SkScalar weight = iter.conicWeight();
+ SkAutoConicToQuads converter;
+ const SkPoint* quads = converter.computeQuads(pts, weight, kConicTolerance);
+ int count = converter.countQuads();
+ for (int i = 0; i < count; ++i) {
+ linesOnlyPath.lineTo(quads[2 * i + 2]);
+ quadPoints.push_back(quads[2 * i]);
+ quadPoints.push_back(quads[2 * i + 1]);
+ quadPoints.push_back(quads[2 * i + 2]);
+ }
+ break;
+ }
+ case SkPath::kClose_Verb:
+ linesOnlyPath.close();
+ break;
+ case SkPath::kDone_Verb:
+ done = true;
+ break;
+ default: SkASSERT(false);
+ }
+ }
+ SkASSERT(quadPoints.count() % 3 == 0);
+ for (int i = 0; i < quadPoints.count(); i += 3) {
+ add_quad(&quadPoints[i], quadVertices);
+ }
+
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey key;
+ GrUniqueKey::Builder builder(&key, kDomain, 2);
+ builder[0] = path.getGenerationID();
+ builder[1] = path.getFillType();
+ builder.finish();
+ GrTessellator::WindingVertex* windingVertices;
+ int triVertexCount = GrTessellator::PathToVertices(linesOnlyPath, 0, bounds, &windingVertices);
+ if (triVertexCount > 0) {
+ for (int i = 0; i < triVertexCount; i += 3) {
+ SkPoint p1 = windingVertices[i].fPos;
+ SkPoint p2 = windingVertices[i + 1].fPos;
+ SkPoint p3 = windingVertices[i + 2].fPos;
+ int winding = windingVertices[i].fWinding;
+ SkASSERT(windingVertices[i + 1].fWinding == winding);
+ SkASSERT(windingVertices[i + 2].fWinding == winding);
+ SkScalar cross = (p2 - p1).cross(p3 - p1);
+ SkPoint bloated[3] = { p1, p2, p3 };
+ if (cross < 0.0f) {
+ SkTSwap(p1, p3);
+ }
+ if (bloat_tri(bloated)) {
+ triVertices.push_back({ bloated[0], p1, p2, p3, winding });
+ triVertices.push_back({ bloated[1], p1, p2, p3, winding });
+ triVertices.push_back({ bloated[2], p1, p2, p3, winding });
+ }
+ else {
+ SkScalar minX = SkTMin(p1.fX, SkTMin(p2.fX, p3.fX)) - 1.0f;
+ SkScalar minY = SkTMin(p1.fY, SkTMin(p2.fY, p3.fY)) - 1.0f;
+ SkScalar maxX = SkTMax(p1.fX, SkTMax(p2.fX, p3.fX)) + 1.0f;
+ SkScalar maxY = SkTMax(p1.fY, SkTMax(p2.fY, p3.fY)) + 1.0f;
+ triVertices.push_back({ { minX, minY }, p1, p2, p3, winding });
+ triVertices.push_back({ { maxX, minY }, p1, p2, p3, winding });
+ triVertices.push_back({ { minX, maxY }, p1, p2, p3, winding });
+ triVertices.push_back({ { maxX, minY }, p1, p2, p3, winding });
+ triVertices.push_back({ { maxX, maxY }, p1, p2, p3, winding });
+ triVertices.push_back({ { minX, maxY }, p1, p2, p3, winding });
+ }
+ }
+ delete[] windingVertices;
+ }
+ return triVertexCount > 0 || quadVertices.count() > 0;
+}
+
+class PLSAATriangleEffect : public GrPLSGeometryProcessor {
+public:
+
+ static GrPLSGeometryProcessor* Create(const SkMatrix& localMatrix,
+ bool usesLocalCoords) {
+ return new PLSAATriangleEffect(localMatrix, usesLocalCoords);
+ }
+
+ virtual ~PLSAATriangleEffect() {}
+
+ const char* name() const override { return "PLSAATriangle"; }
+
+ const Attribute* inPosition() const { return fInPosition; }
+ const Attribute* inVertex1() const { return fInVertex1; }
+ const Attribute* inVertex2() const { return fInVertex2; }
+ const Attribute* inVertex3() const { return fInVertex3; }
+ const Attribute* inWindings() const { return fInWindings; }
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+
+ class GLSLProcessor : public GrGLSLGeometryProcessor {
+ public:
+ GLSLProcessor(const GrGeometryProcessor&) {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ const PLSAATriangleEffect& te = args.fGP.cast<PLSAATriangleEffect>();
+ GrGLSLVertexBuilder* vsBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ varyingHandler->emitAttributes(te);
+
+ this->setupPosition(vsBuilder, gpArgs, te.inPosition()->fName);
+
+ GrGLSLVertToFrag v1(kVec2f_GrSLType);
+ varyingHandler->addVarying("Vertex1", &v1, kHigh_GrSLPrecision);
+ vsBuilder->codeAppendf("%s = vec2(%s.x, %s.y);",
+ v1.vsOut(),
+ te.inVertex1()->fName,
+ te.inVertex1()->fName);
+
+ GrGLSLVertToFrag v2(kVec2f_GrSLType);
+ varyingHandler->addVarying("Vertex2", &v2, kHigh_GrSLPrecision);
+ vsBuilder->codeAppendf("%s = vec2(%s.x, %s.y);",
+ v2.vsOut(),
+ te.inVertex2()->fName,
+ te.inVertex2()->fName);
+
+ GrGLSLVertToFrag v3(kVec2f_GrSLType);
+ varyingHandler->addVarying("Vertex3", &v3, kHigh_GrSLPrecision);
+ vsBuilder->codeAppendf("%s = vec2(%s.x, %s.y);",
+ v3.vsOut(),
+ te.inVertex3()->fName,
+ te.inVertex3()->fName);
+
+ GrGLSLVertToFrag delta1(kVec2f_GrSLType);
+ varyingHandler->addVarying("delta1", &delta1, kHigh_GrSLPrecision);
+ vsBuilder->codeAppendf("%s = vec2(%s.x - %s.x, %s.y - %s.y) * 0.5;",
+ delta1.vsOut(), v1.vsOut(), v2.vsOut(), v2.vsOut(), v1.vsOut());
+
+ GrGLSLVertToFrag delta2(kVec2f_GrSLType);
+ varyingHandler->addVarying("delta2", &delta2, kHigh_GrSLPrecision);
+ vsBuilder->codeAppendf("%s = vec2(%s.x - %s.x, %s.y - %s.y) * 0.5;",
+ delta2.vsOut(), v2.vsOut(), v3.vsOut(), v3.vsOut(), v2.vsOut());
+
+ GrGLSLVertToFrag delta3(kVec2f_GrSLType);
+ varyingHandler->addVarying("delta3", &delta3, kHigh_GrSLPrecision);
+ vsBuilder->codeAppendf("%s = vec2(%s.x - %s.x, %s.y - %s.y) * 0.5;",
+ delta3.vsOut(), v3.vsOut(), v1.vsOut(), v1.vsOut(), v3.vsOut());
+
+ GrGLSLVertToFrag windings(kInt_GrSLType);
+ varyingHandler->addFlatVarying("windings", &windings, kLow_GrSLPrecision);
+ vsBuilder->codeAppendf("%s = %s;",
+ windings.vsOut(), te.inWindings()->fName);
+
+ // emit transforms
+ this->emitTransforms(vsBuilder, varyingHandler, uniformHandler, gpArgs->fPositionVar,
+ te.inPosition()->fName, te.localMatrix(),
+ args.fFPCoordTransformHandler);
+
+ GrGLSLPPFragmentBuilder* fsBuilder = args.fFragBuilder;
+ SkAssertResult(fsBuilder->enableFeature(
+ GrGLSLFragmentShaderBuilder::kPixelLocalStorage_GLSLFeature));
+ SkAssertResult(fsBuilder->enableFeature(
+ GrGLSLFragmentShaderBuilder::kStandardDerivatives_GLSLFeature));
+ fsBuilder->declAppendf(GR_GL_PLS_PATH_DATA_DECL);
+ // Compute four subsamples, each shifted a quarter pixel along x and y from
+ // gl_FragCoord. The oriented box positioning of the subsamples is of course not
+ // optimal, but it greatly simplifies the math and this simplification is necessary for
+ // performance reasons.
+ fsBuilder->codeAppendf("highp vec2 firstSample = %s.xy - vec2(0.25);",
+ fsBuilder->fragmentPosition());
+ fsBuilder->codeAppendf("highp vec2 delta1 = %s;", delta1.fsIn());
+ fsBuilder->codeAppendf("highp vec2 delta2 = %s;", delta2.fsIn());
+ fsBuilder->codeAppendf("highp vec2 delta3 = %s;", delta3.fsIn());
+ // Check whether first sample is inside the triangle by computing three dot products. If
+ // all are < 0, we're inside. The first vector in each case is half of what it is
+ // "supposed" to be, because we re-use them later as adjustment factors for which half
+ // is the correct value, so we multiply the dots by two to compensate.
+ fsBuilder->codeAppendf("highp float d1 = dot(delta1, (firstSample - %s).yx) * 2.0;",
+ v1.fsIn());
+ fsBuilder->codeAppendf("highp float d2 = dot(delta2, (firstSample - %s).yx) * 2.0;",
+ v2.fsIn());
+ fsBuilder->codeAppendf("highp float d3 = dot(delta3, (firstSample - %s).yx) * 2.0;",
+ v3.fsIn());
+ fsBuilder->codeAppend("highp float dmax = max(d1, max(d2, d3));");
+ fsBuilder->codeAppendf("pls.windings[0] += (dmax <= 0.0) ? %s : 0;", windings.fsIn());
+ // for subsequent samples, we don't recalculate the entire dot product -- just adjust it
+ // to the value it would have if we did recompute it.
+ fsBuilder->codeAppend("d1 += delta1.x;");
+ fsBuilder->codeAppend("d2 += delta2.x;");
+ fsBuilder->codeAppend("d3 += delta3.x;");
+ fsBuilder->codeAppend("dmax = max(d1, max(d2, d3));");
+ fsBuilder->codeAppendf("pls.windings[1] += (dmax <= 0.0) ? %s : 0;", windings.fsIn());
+ fsBuilder->codeAppend("d1 += delta1.y;");
+ fsBuilder->codeAppend("d2 += delta2.y;");
+ fsBuilder->codeAppend("d3 += delta3.y;");
+ fsBuilder->codeAppend("dmax = max(d1, max(d2, d3));");
+ fsBuilder->codeAppendf("pls.windings[2] += (dmax <= 0.0) ? %s : 0;", windings.fsIn());
+ fsBuilder->codeAppend("d1 -= delta1.x;");
+ fsBuilder->codeAppend("d2 -= delta2.x;");
+ fsBuilder->codeAppend("d3 -= delta3.x;");
+ fsBuilder->codeAppend("dmax = max(d1, max(d2, d3));");
+ fsBuilder->codeAppendf("pls.windings[3] += (dmax <= 0.0) ? %s : 0;", windings.fsIn());
+ }
+
+ static inline void GenKey(const GrGeometryProcessor& gp,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const PLSAATriangleEffect& te = gp.cast<PLSAATriangleEffect>();
+ uint32_t key = 0;
+ key |= te.localMatrix().hasPerspective() ? 0x1 : 0x0;
+ b->add32(key);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& gp,
+ FPCoordTransformIter&& transformIter) override {
+ this->setTransformDataHelper(gp.cast<PLSAATriangleEffect>().fLocalMatrix, pdman,
+ &transformIter);
+ }
+
+ private:
+ typedef GrGLSLGeometryProcessor INHERITED;
+ };
+
+ virtual void getGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const override {
+ GLSLProcessor::GenKey(*this, caps, b);
+ }
+
+ virtual GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override {
+ return new GLSLProcessor(*this);
+ }
+
+private:
+ PLSAATriangleEffect(const SkMatrix& localMatrix, bool usesLocalCoords)
+ : fLocalMatrix(localMatrix)
+ , fUsesLocalCoords(usesLocalCoords) {
+ this->initClassID<PLSAATriangleEffect>();
+ fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ fInVertex1 = &this->addVertexAttrib("inVertex1", kVec2f_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ fInVertex2 = &this->addVertexAttrib("inVertex2", kVec2f_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ fInVertex3 = &this->addVertexAttrib("inVertex3", kVec2f_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ fInWindings = &this->addVertexAttrib("inWindings", kInt_GrVertexAttribType,
+ kLow_GrSLPrecision);
+ this->setWillReadFragmentPosition();
+ }
+
+ const Attribute* fInPosition;
+ const Attribute* fInVertex1;
+ const Attribute* fInVertex2;
+ const Attribute* fInVertex3;
+ const Attribute* fInWindings;
+ SkMatrix fLocalMatrix;
+ bool fUsesLocalCoords;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+/*
+ * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
+ * two components of the vertex attribute. Coverage is based on signed
+ * distance with negative being inside, positive outside. The edge is specified in
+ * window space (y-down). If either the third or fourth component of the interpolated
+ * vertex coord is > 0 then the pixel is considered outside the edge. This is used to
+ * attempt to trim to a portion of the infinite quad.
+ * Requires shader derivative instruction support.
+ */
+
+class PLSQuadEdgeEffect : public GrPLSGeometryProcessor {
+public:
+
+ static GrPLSGeometryProcessor* Create(const SkMatrix& localMatrix,
+ bool usesLocalCoords) {
+ return new PLSQuadEdgeEffect(localMatrix, usesLocalCoords);
+ }
+
+ virtual ~PLSQuadEdgeEffect() {}
+
+ const char* name() const override { return "PLSQuadEdge"; }
+
+ const Attribute* inPosition() const { return fInPosition; }
+ const Attribute* inUV() const { return fInUV; }
+ const Attribute* inEndpoint1() const { return fInEndpoint1; }
+ const Attribute* inEndpoint2() const { return fInEndpoint2; }
+ const Attribute* inWindings() const { return fInWindings; }
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+
+ class GLSLProcessor : public GrGLSLGeometryProcessor {
+ public:
+ GLSLProcessor(const GrGeometryProcessor&) {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ const PLSQuadEdgeEffect& qe = args.fGP.cast<PLSQuadEdgeEffect>();
+ GrGLSLVertexBuilder* vsBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(qe);
+
+ GrGLSLVertToFrag uv(kVec2f_GrSLType);
+ varyingHandler->addVarying("uv", &uv, kHigh_GrSLPrecision);
+ vsBuilder->codeAppendf("%s = %s;", uv.vsOut(), qe.inUV()->fName);
+
+ GrGLSLVertToFrag ep1(kVec2f_GrSLType);
+ varyingHandler->addVarying("endpoint1", &ep1, kHigh_GrSLPrecision);
+ vsBuilder->codeAppendf("%s = vec2(%s.x, %s.y);", ep1.vsOut(),
+ qe.inEndpoint1()->fName, qe.inEndpoint1()->fName);
+
+ GrGLSLVertToFrag ep2(kVec2f_GrSLType);
+ varyingHandler->addVarying("endpoint2", &ep2, kHigh_GrSLPrecision);
+ vsBuilder->codeAppendf("%s = vec2(%s.x, %s.y);", ep2.vsOut(),
+ qe.inEndpoint2()->fName, qe.inEndpoint2()->fName);
+
+ GrGLSLVertToFrag delta(kVec2f_GrSLType);
+ varyingHandler->addVarying("delta", &delta, kHigh_GrSLPrecision);
+ vsBuilder->codeAppendf("%s = vec2(%s.x - %s.x, %s.y - %s.y) * 0.5;",
+ delta.vsOut(), ep1.vsOut(), ep2.vsOut(), ep2.vsOut(),
+ ep1.vsOut());
+
+ GrGLSLVertToFrag windings(kInt_GrSLType);
+ varyingHandler->addFlatVarying("windings", &windings, kLow_GrSLPrecision);
+ vsBuilder->codeAppendf("%s = %s;",
+ windings.vsOut(), qe.inWindings()->fName);
+
+ // Setup position
+ this->setupPosition(vsBuilder, gpArgs, qe.inPosition()->fName);
+
+ // emit transforms
+ this->emitTransforms(vsBuilder, varyingHandler, uniformHandler, gpArgs->fPositionVar,
+ qe.inPosition()->fName, qe.localMatrix(),
+ args.fFPCoordTransformHandler);
+
+ GrGLSLPPFragmentBuilder* fsBuilder = args.fFragBuilder;
+ SkAssertResult(fsBuilder->enableFeature(
+ GrGLSLFragmentShaderBuilder::kPixelLocalStorage_GLSLFeature));
+ SkAssertResult(fsBuilder->enableFeature(
+ GrGLSLFragmentShaderBuilder::kStandardDerivatives_GLSLFeature));
+ static const int QUAD_ARGS = 2;
+ GrGLSLShaderVar inQuadArgs[QUAD_ARGS] = {
+ GrGLSLShaderVar("dot", kFloat_GrSLType, 0, kHigh_GrSLPrecision),
+ GrGLSLShaderVar("uv", kVec2f_GrSLType, 0, kHigh_GrSLPrecision)
+ };
+ SkString inQuadName;
+
+ const char* inQuadCode = "if (uv.x * uv.x <= uv.y) {"
+ "return dot >= 0.0;"
+ "} else {"
+ "return false;"
+ "}";
+ fsBuilder->emitFunction(kBool_GrSLType, "in_quad", QUAD_ARGS, inQuadArgs, inQuadCode,
+ &inQuadName);
+ fsBuilder->declAppendf(GR_GL_PLS_PATH_DATA_DECL);
+ // keep the derivative instructions outside the conditional
+ fsBuilder->codeAppendf("highp vec2 uvdX = dFdx(%s);", uv.fsIn());
+ fsBuilder->codeAppendf("highp vec2 uvdY = dFdy(%s);", uv.fsIn());
+ fsBuilder->codeAppend("highp vec2 uvIncX = uvdX * 0.45 + uvdY * -0.1;");
+ fsBuilder->codeAppend("highp vec2 uvIncY = uvdX * 0.1 + uvdY * 0.55;");
+ fsBuilder->codeAppendf("highp vec2 uv = %s.xy - uvdX * 0.35 - uvdY * 0.25;",
+ uv.fsIn());
+ fsBuilder->codeAppendf("highp vec2 firstSample = %s.xy - vec2(0.25);",
+ fsBuilder->fragmentPosition());
+ fsBuilder->codeAppendf("highp float d = dot(%s, (firstSample - %s).yx) * 2.0;",
+ delta.fsIn(), ep1.fsIn());
+ fsBuilder->codeAppendf("pls.windings[0] += %s(d, uv) ? %s : 0;", inQuadName.c_str(),
+ windings.fsIn());
+ fsBuilder->codeAppend("uv += uvIncX;");
+ fsBuilder->codeAppendf("d += %s.x;", delta.fsIn());
+ fsBuilder->codeAppendf("pls.windings[1] += %s(d, uv) ? %s : 0;", inQuadName.c_str(),
+ windings.fsIn());
+ fsBuilder->codeAppend("uv += uvIncY;");
+ fsBuilder->codeAppendf("d += %s.y;", delta.fsIn());
+ fsBuilder->codeAppendf("pls.windings[2] += %s(d, uv) ? %s : 0;", inQuadName.c_str(),
+ windings.fsIn());
+ fsBuilder->codeAppend("uv -= uvIncX;");
+ fsBuilder->codeAppendf("d -= %s.x;", delta.fsIn());
+ fsBuilder->codeAppendf("pls.windings[3] += %s(d, uv) ? %s : 0;", inQuadName.c_str(),
+ windings.fsIn());
+ }
+
+ static inline void GenKey(const GrGeometryProcessor& gp,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const PLSQuadEdgeEffect& qee = gp.cast<PLSQuadEdgeEffect>();
+ uint32_t key = 0;
+ key |= qee.usesLocalCoords() && qee.localMatrix().hasPerspective() ? 0x1 : 0x0;
+ b->add32(key);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& gp,
+ FPCoordTransformIter&& transformIter) override {
+ this->setTransformDataHelper(gp.cast<PLSQuadEdgeEffect>().fLocalMatrix, pdman,
+ &transformIter);
+ }
+
+ private:
+ typedef GrGLSLGeometryProcessor INHERITED;
+ };
+
+ virtual void getGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const override {
+ GLSLProcessor::GenKey(*this, caps, b);
+ }
+
+ virtual GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override {
+ return new GLSLProcessor(*this);
+ }
+
+private:
+ PLSQuadEdgeEffect(const SkMatrix& localMatrix, bool usesLocalCoords)
+ : fLocalMatrix(localMatrix)
+ , fUsesLocalCoords(usesLocalCoords) {
+ this->initClassID<PLSQuadEdgeEffect>();
+ fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ fInUV = &this->addVertexAttrib("inUV", kVec2f_GrVertexAttribType, kHigh_GrSLPrecision);
+ fInEndpoint1 = &this->addVertexAttrib("inEndpoint1", kVec2f_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ fInEndpoint2 = &this->addVertexAttrib("inEndpoint2", kVec2f_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ fInWindings = &this->addVertexAttrib("inWindings", kInt_GrVertexAttribType,
+ kLow_GrSLPrecision);
+ this->setWillReadFragmentPosition();
+ }
+
+ const Attribute* fInPosition;
+ const Attribute* fInUV;
+ const Attribute* fInEndpoint1;
+ const Attribute* fInEndpoint2;
+ const Attribute* fInWindings;
+ SkMatrix fLocalMatrix;
+ bool fUsesLocalCoords;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+class PLSFinishEffect : public GrGeometryProcessor {
+public:
+
+ static GrGeometryProcessor* Create(GrColor color, bool useEvenOdd, const SkMatrix& localMatrix,
+ bool usesLocalCoords) {
+ return new PLSFinishEffect(color, useEvenOdd, localMatrix, usesLocalCoords);
+ }
+
+ virtual ~PLSFinishEffect() {}
+
+ const char* name() const override { return "PLSFinish"; }
+
+ const Attribute* inPosition() const { return fInPosition; }
+ GrColor color() const { return fColor; }
+ bool colorIgnored() const { return GrColor_ILLEGAL == fColor; }
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+
+ GrPixelLocalStorageState getPixelLocalStorageState() const override {
+ return GrPixelLocalStorageState::kFinish_GrPixelLocalStorageState;
+ }
+
+ const char* getDestColorOverride() const override {
+ return GR_GL_PLS_DSTCOLOR_NAME;
+ }
+
+ class GLSLProcessor : public GrGLSLGeometryProcessor {
+ public:
+ GLSLProcessor(const GrGeometryProcessor&) {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ const PLSFinishEffect& fe = args.fGP.cast<PLSFinishEffect>();
+ GrGLSLVertexBuilder* vsBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ fUseEvenOdd = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType, kLow_GrSLPrecision,
+ "useEvenOdd");
+ const char* useEvenOdd = uniformHandler->getUniformCStr(fUseEvenOdd);
+
+ varyingHandler->emitAttributes(fe);
+ this->setupPosition(vsBuilder, gpArgs, fe.inPosition()->fName);
+ this->emitTransforms(vsBuilder, varyingHandler, uniformHandler, gpArgs->fPositionVar,
+ fe.inPosition()->fName, fe.localMatrix(),
+ args.fFPCoordTransformHandler);
+
+ GrGLSLPPFragmentBuilder* fsBuilder = args.fFragBuilder;
+ SkAssertResult(fsBuilder->enableFeature(
+ GrGLSLFragmentShaderBuilder::kPixelLocalStorage_GLSLFeature));
+ fsBuilder->declAppendf(GR_GL_PLS_PATH_DATA_DECL);
+ fsBuilder->codeAppend("float coverage;");
+ fsBuilder->codeAppendf("if (%s != 0.0) {", useEvenOdd);
+ fsBuilder->codeAppend("coverage = float(abs(pls.windings[0]) % 2) * 0.25;");
+ fsBuilder->codeAppend("coverage += float(abs(pls.windings[1]) % 2) * 0.25;");
+ fsBuilder->codeAppend("coverage += float(abs(pls.windings[2]) % 2) * 0.25;");
+ fsBuilder->codeAppend("coverage += float(abs(pls.windings[3]) % 2) * 0.25;");
+ fsBuilder->codeAppend("} else {");
+ fsBuilder->codeAppend("coverage = pls.windings[0] != 0 ? 0.25 : 0.0;");
+ fsBuilder->codeAppend("coverage += pls.windings[1] != 0 ? 0.25 : 0.0;");
+ fsBuilder->codeAppend("coverage += pls.windings[2] != 0 ? 0.25 : 0.0;");
+ fsBuilder->codeAppend("coverage += pls.windings[3] != 0 ? 0.25 : 0.0;");
+ fsBuilder->codeAppend("}");
+ if (!fe.colorIgnored()) {
+ this->setupUniformColor(fsBuilder, uniformHandler, args.fOutputColor,
+ &fColorUniform);
+ }
+ fsBuilder->codeAppendf("%s = vec4(coverage);", args.fOutputCoverage);
+ fsBuilder->codeAppendf("%s = vec4(1.0, 0.0, 1.0, 1.0);", args.fOutputColor);
+ }
+
+ static inline void GenKey(const GrGeometryProcessor& gp,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const PLSFinishEffect& fe = gp.cast<PLSFinishEffect>();
+ uint32_t key = 0;
+ key |= fe.usesLocalCoords() && fe.localMatrix().hasPerspective() ? 0x1 : 0x0;
+ b->add32(key);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& gp,
+ FPCoordTransformIter&& transformIter) override {
+ const PLSFinishEffect& fe = gp.cast<PLSFinishEffect>();
+ pdman.set1f(fUseEvenOdd, fe.fUseEvenOdd);
+ if (fe.color() != fColor && !fe.colorIgnored()) {
+ GrGLfloat c[4];
+ GrColorToRGBAFloat(fe.color(), c);
+ pdman.set4fv(fColorUniform, 1, c);
+ fColor = fe.color();
+ }
+ this->setTransformDataHelper(fe.fLocalMatrix, pdman, &transformIter);
+ }
+
+ private:
+ GrColor fColor;
+ UniformHandle fColorUniform;
+ UniformHandle fUseEvenOdd;
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+ };
+
+ virtual void getGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const override {
+ GLSLProcessor::GenKey(*this, caps, b);
+ }
+
+ virtual GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override {
+ return new GLSLProcessor(*this);
+ }
+
+private:
+ PLSFinishEffect(GrColor color, bool useEvenOdd, const SkMatrix& localMatrix,
+ bool usesLocalCoords)
+ : fColor(color)
+ , fUseEvenOdd(useEvenOdd)
+ , fLocalMatrix(localMatrix)
+ , fUsesLocalCoords(usesLocalCoords) {
+ this->initClassID<PLSFinishEffect>();
+ fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ }
+
+ const Attribute* fInPosition;
+ GrColor fColor;
+ bool fUseEvenOdd;
+ SkMatrix fLocalMatrix;
+ bool fUsesLocalCoords;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool GrPLSPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ // We have support for even-odd rendering, but are having some troublesome
+ // seams. Disable in the presence of even-odd for now.
+ SkPath path;
+ args.fShape->asPath(&path);
+ return args.fShaderCaps->shaderDerivativeSupport() && args.fAntiAlias &&
+ args.fShape->style().isSimpleFill() && !path.isInverseFillType() &&
+ path.getFillType() == SkPath::FillType::kWinding_FillType;
+}
+
+class PLSPathBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+ PLSPathBatch(GrColor color, const SkPath& path, const SkMatrix& viewMatrix)
+ : INHERITED(ClassID())
+ , fColor(color)
+ , fPath(path)
+ , fViewMatrix(viewMatrix) {
+ // compute bounds
+ this->setTransformedBounds(path.getBounds(), fViewMatrix, HasAABloat::kYes,
+ IsZeroArea::kNo);
+ }
+
+ const char* name() const override { return "PLSBatch"; }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ color->setKnownFourComponents(fColor);
+ coverage->setUnknownSingleComponent();
+ overrides->fUsePLSDstRead = true;
+ }
+
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ // Handle any color overrides
+ if (!overrides.readsColor()) {
+ fColor = GrColor_ILLEGAL;
+ }
+ overrides.getOverrideColorIfSet(&fColor);
+
+ // setup batch properties
+ fUsesLocalCoords = overrides.readsLocalCoords();
+ }
+
+ void onPrepareDraws(Target* target) const override {
+
+ SkMatrix invert;
+ if (fUsesLocalCoords && !fViewMatrix.invert(&invert)) {
+ SkDebugf("Could not invert viewmatrix\n");
+ return;
+ }
+
+ // Setup GrGeometryProcessors
+ SkAutoTUnref<GrPLSGeometryProcessor> triangleProcessor(
+ PLSAATriangleEffect::Create(invert, fUsesLocalCoords));
+ SkAutoTUnref<GrPLSGeometryProcessor> quadProcessor(
+ PLSQuadEdgeEffect::Create(invert, fUsesLocalCoords));
+
+ GrResourceProvider* rp = target->resourceProvider();
+ SkRect bounds;
+ this->bounds().roundOut(&bounds);
+ triangleProcessor->setBounds(bounds);
+ quadProcessor->setBounds(bounds);
+
+ // We use the fact that SkPath::transform path does subdivision based on
+ // perspective. Otherwise, we apply the view matrix when copying to the
+ // segment representation.
+ const SkMatrix* viewMatrix = &fViewMatrix;
+
+ // We avoid initializing the path unless we have to
+ const SkPath* pathPtr = &fPath;
+ SkTLazy<SkPath> tmpPath;
+ if (viewMatrix->hasPerspective()) {
+ SkPath* tmpPathPtr = tmpPath.init(*pathPtr);
+ tmpPathPtr->setIsVolatile(true);
+ tmpPathPtr->transform(*viewMatrix);
+ viewMatrix = &SkMatrix::I();
+ pathPtr = tmpPathPtr;
+ }
+
+ GrMesh mesh;
+
+ PLSVertices triVertices;
+ PLSVertices quadVertices;
+ if (!get_geometry(*pathPtr, *viewMatrix, triVertices, quadVertices, rp, bounds)) {
+ return;
+ }
+
+ if (triVertices.count()) {
+ const GrBuffer* triVertexBuffer;
+ int firstTriVertex;
+ size_t triStride = triangleProcessor->getVertexStride();
+ PLSVertex* triVerts = reinterpret_cast<PLSVertex*>(target->makeVertexSpace(
+ triStride, triVertices.count(), &triVertexBuffer, &firstTriVertex));
+ if (!triVerts) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+ for (int i = 0; i < triVertices.count(); ++i) {
+ triVerts[i] = triVertices[i];
+ }
+ mesh.init(kTriangles_GrPrimitiveType, triVertexBuffer, firstTriVertex,
+ triVertices.count());
+ target->draw(triangleProcessor, mesh);
+ }
+
+ if (quadVertices.count()) {
+ const GrBuffer* quadVertexBuffer;
+ int firstQuadVertex;
+ size_t quadStride = quadProcessor->getVertexStride();
+ PLSVertex* quadVerts = reinterpret_cast<PLSVertex*>(target->makeVertexSpace(
+ quadStride, quadVertices.count(), &quadVertexBuffer, &firstQuadVertex));
+ if (!quadVerts) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+ for (int i = 0; i < quadVertices.count(); ++i) {
+ quadVerts[i] = quadVertices[i];
+ }
+ mesh.init(kTriangles_GrPrimitiveType, quadVertexBuffer, firstQuadVertex,
+ quadVertices.count());
+ target->draw(quadProcessor, mesh);
+ }
+
+ SkAutoTUnref<GrGeometryProcessor> finishProcessor(
+ PLSFinishEffect::Create(fColor,
+ pathPtr->getFillType() ==
+ SkPath::FillType::kEvenOdd_FillType,
+ invert,
+ fUsesLocalCoords));
+ const GrBuffer* rectVertexBuffer;
+ size_t finishStride = finishProcessor->getVertexStride();
+ int firstRectVertex;
+ static const int kRectVertexCount = 6;
+ SkPoint* rectVerts = reinterpret_cast<SkPoint*>(target->makeVertexSpace(
+ finishStride, kRectVertexCount, &rectVertexBuffer, &firstRectVertex));
+ if (!rectVerts) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+ rectVerts[0] = { bounds.fLeft, bounds.fTop };
+ rectVerts[1] = { bounds.fLeft, bounds.fBottom };
+ rectVerts[2] = { bounds.fRight, bounds.fBottom };
+ rectVerts[3] = { bounds.fLeft, bounds.fTop };
+ rectVerts[4] = { bounds.fRight, bounds.fTop };
+ rectVerts[5] = { bounds.fRight, bounds.fBottom };
+
+ mesh.init(kTriangles_GrPrimitiveType, rectVertexBuffer, firstRectVertex,
+ kRectVertexCount);
+ target->draw(finishProcessor, mesh);
+ }
+
+private:
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ return false;
+ }
+
+ bool fUsesLocalCoords;
+
+ GrColor fColor;
+ SkPath fPath;
+ SkMatrix fViewMatrix;
+ typedef GrVertexBatch INHERITED;
+};
+
+SkDEBUGCODE(bool inPLSDraw = false;)
+bool GrPLSPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ SkASSERT(!args.fShape->isEmpty());
+ SkASSERT(!inPLSDraw);
+ SkDEBUGCODE(inPLSDraw = true;)
+ SkPath path;
+ args.fShape->asPath(&path);
+
+ SkAutoTUnref<GrDrawBatch> batch(new PLSPathBatch(args.fPaint->getColor(),
+ path, *args.fViewMatrix));
+
+ GrPipelineBuilder pipelineBuilder(*args.fPaint, args.fDrawContext->mustUseHWAA(*args.fPaint));
+ pipelineBuilder.setUserStencil(args.fUserStencilSettings);
+
+ args.fDrawContext->drawBatch(pipelineBuilder, *args.fClip, batch);
+
+ SkDEBUGCODE(inPLSDraw = false;)
+ return true;
+
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_TEST_UTILS
+
+DRAW_BATCH_TEST_DEFINE(PLSPathBatch) {
+ GrColor color = GrRandomColor(random);
+ SkMatrix vm = GrTest::TestMatrixInvertible(random);
+ SkPath path = GrTest::TestPathConvex(random);
+
+ return new PLSPathBatch(color, path, vm);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrPLSPathRenderer.h b/gfx/skia/skia/src/gpu/batches/GrPLSPathRenderer.h
new file mode 100644
index 000000000..39f21ba68
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrPLSPathRenderer.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPLSPathRenderer_DEFINED
+#define GrPLSPathRenderer_DEFINED
+
+#include "GrPathRenderer.h"
+
+/*
+ * Renders arbitrary antialiased paths using pixel local storage as a scratch buffer. The overall
+ * technique is very similar to the approach presented in "Resolution independent rendering of
+ * deformable vector objects using graphics hardware" by Kokojima et al.
+
+ * We first render the straight-line portions of the path (essentially pretending as if all segments
+ * were kLine_Verb) as a triangle fan, using a fragment shader which updates the winding counts
+ * appropriately. We then render the curved portions of the path using a Loop-Blinn shader which
+ * calculates which portion of the triangle is covered by the quad (conics and cubics are split down
+ * to quads). Where we diverge from Kokojima is that, instead of rendering into the stencil buffer
+ * and using built-in MSAA to handle straight-line antialiasing, we use the pixel local storage area
+ * and calculate the MSAA ourselves in the fragment shader. Essentially, we manually evaluate the
+ * coverage of each pixel four times, storing four winding counts into the pixel local storage area,
+ * and compute the final coverage based on those winding counts.
+ *
+ * Our approach is complicated by the need to perform antialiasing on straight edges as well,
+ * without relying on hardware MSAA. We instead bloat the triangles to ensure complete coverage,
+ * pass the original (un-bloated) vertices in to the fragment shader, and then have the fragment
+ * shader use these vertices to evaluate whether a given sample is located within the triangle or
+ * not. This gives us MSAA4 edges on triangles which line up nicely with no seams. We similarly face
+ * problems on the back (flat) edges of quads, where we have to ensure that the back edge is
+ * antialiased in the same way. Similar to the triangle case, we pass in the two (unbloated)
+ * vertices defining the back edge of the quad and the fragment shader uses these vertex coordinates
+ * to discard samples falling on the other side of the quad's back edge.
+ */
+class GrPLSPathRenderer : public GrPathRenderer {
+public:
+ GrPLSPathRenderer();
+
+ bool onCanDrawPath(const CanDrawPathArgs& args) const override;
+
+protected:
+ bool onDrawPath(const DrawPathArgs& args) override;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrPathStencilSettings.h b/gfx/skia/skia/src/gpu/batches/GrPathStencilSettings.h
new file mode 100644
index 000000000..f37d1b2f1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrPathStencilSettings.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrPathStencilSettings_DEFINED
+#define GrPathStencilSettings_DEFINED
+
+#include "GrUserStencilSettings.h"
+
+////////////////////////////////////////////////////////////////////////////////
+// Stencil rules for paths
+
+////// Even/Odd
+
+static constexpr GrUserStencilSettings gEOStencilPass(
+ GrUserStencilSettings::StaticInit<
+ 0xffff,
+ GrUserStencilTest::kAlwaysIfInClip,
+ 0xffff,
+ GrUserStencilOp::kInvert,
+ GrUserStencilOp::kKeep,
+ 0xffff>()
+);
+
+// ok not to check clip b/c stencil pass only wrote inside clip
+static constexpr GrUserStencilSettings gEOColorPass(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kNotEqual,
+ 0xffff,
+ GrUserStencilOp::kZero,
+ GrUserStencilOp::kZero,
+ 0xffff>()
+);
+
+// have to check clip b/c outside clip will always be zero.
+static constexpr GrUserStencilSettings gInvEOColorPass(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kEqualIfInClip,
+ 0xffff,
+ GrUserStencilOp::kZero,
+ GrUserStencilOp::kZero,
+ 0xffff>()
+);
+
+////// Winding
+
+// when we have separate stencil we increment front faces / decrement back faces
+// when we don't have wrap incr and decr we use the stencil test to simulate
+// them.
+
+static constexpr GrUserStencilSettings gWindStencilSeparateWithWrap(
+ GrUserStencilSettings::StaticInitSeparate<
+ 0xffff, 0xffff,
+ GrUserStencilTest::kAlwaysIfInClip, GrUserStencilTest::kAlwaysIfInClip,
+ 0xffff, 0xffff,
+ GrUserStencilOp::kIncWrap, GrUserStencilOp::kDecWrap,
+ GrUserStencilOp::kKeep, GrUserStencilOp::kKeep,
+ 0xffff, 0xffff>()
+);
+
+// if inc'ing the max value, invert to make 0
+// if dec'ing zero invert to make all ones.
+// we can't avoid touching the stencil on both passing and
+// failing, so we can't resctrict ourselves to the clip.
+static constexpr GrUserStencilSettings gWindStencilSeparateNoWrap(
+ GrUserStencilSettings::StaticInitSeparate<
+ 0xffff, 0x0000,
+ GrUserStencilTest::kEqual, GrUserStencilTest::kEqual,
+ 0xffff, 0xffff,
+ GrUserStencilOp::kInvert, GrUserStencilOp::kInvert,
+ GrUserStencilOp::kIncMaybeClamp, GrUserStencilOp::kDecMaybeClamp,
+ 0xffff, 0xffff>()
+);
+
+// When there are no separate faces we do two passes to setup the winding rule
+// stencil. First we draw the front faces and inc, then we draw the back faces
+// and dec. These are same as the above two split into the incrementing and
+// decrementing passes.
+static constexpr GrUserStencilSettings gWindSingleStencilWithWrapInc(
+ GrUserStencilSettings::StaticInit<
+ 0xffff,
+ GrUserStencilTest::kAlwaysIfInClip,
+ 0xffff,
+ GrUserStencilOp::kIncWrap,
+ GrUserStencilOp::kKeep,
+ 0xffff>()
+);
+
+static constexpr GrUserStencilSettings gWindSingleStencilWithWrapDec(
+ GrUserStencilSettings::StaticInit<
+ 0xffff,
+ GrUserStencilTest::kAlwaysIfInClip,
+ 0xffff,
+ GrUserStencilOp::kDecWrap,
+ GrUserStencilOp::kKeep,
+ 0xffff>()
+);
+
+static constexpr GrUserStencilSettings gWindSingleStencilNoWrapInc(
+ GrUserStencilSettings::StaticInit<
+ 0xffff,
+ GrUserStencilTest::kEqual,
+ 0xffff,
+ GrUserStencilOp::kInvert,
+ GrUserStencilOp::kIncMaybeClamp,
+ 0xffff>()
+);
+
+static constexpr GrUserStencilSettings gWindSingleStencilNoWrapDec(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kEqual,
+ 0xffff,
+ GrUserStencilOp::kInvert,
+ GrUserStencilOp::kDecMaybeClamp,
+ 0xffff>()
+);
+
+// Color passes are the same whether we use the two-sided stencil or two passes
+
+static constexpr GrUserStencilSettings gWindColorPass(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kLessIfInClip, // "0 < stencil" is equivalent to "0 != stencil".
+ 0xffff,
+ GrUserStencilOp::kZero,
+ GrUserStencilOp::kZero,
+ 0xffff>()
+);
+
+static constexpr GrUserStencilSettings gInvWindColorPass(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kEqualIfInClip,
+ 0xffff,
+ GrUserStencilOp::kZero,
+ GrUserStencilOp::kZero,
+ 0xffff>()
+);
+
+////// Normal render to stencil
+
+// Sometimes the default path renderer can draw a path directly to the stencil
+// buffer without having to first resolve the interior / exterior.
+static constexpr GrUserStencilSettings gDirectToStencil(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kAlwaysIfInClip,
+ 0xffff,
+ GrUserStencilOp::kZero,
+ GrUserStencilOp::kIncMaybeClamp,
+ 0xffff>()
+);
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrRectBatchFactory.cpp b/gfx/skia/skia/src/gpu/batches/GrRectBatchFactory.cpp
new file mode 100644
index 000000000..d2ba7f444
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrRectBatchFactory.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrRectBatchFactory.h"
+
+#include "GrAAStrokeRectBatch.h"
+
+#include "SkStrokeRec.h"
+
+namespace GrRectBatchFactory {
+
+GrDrawBatch* CreateAAFillNestedRects(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect rects[2]) {
+ SkASSERT(viewMatrix.rectStaysRect());
+ SkASSERT(!rects[0].isEmpty() && !rects[1].isEmpty());
+
+ SkRect devOutside, devInside;
+ viewMatrix.mapRect(&devOutside, rects[0]);
+ viewMatrix.mapRect(&devInside, rects[1]);
+ if (devInside.isEmpty()) {
+ if (devOutside.isEmpty()) {
+ return nullptr;
+ }
+ return GrAAFillRectBatch::Create(color, viewMatrix, devOutside, devOutside);
+ }
+
+ return GrAAStrokeRectBatch::CreateFillBetweenRects(color, viewMatrix, devOutside, devInside);
+}
+
+};
diff --git a/gfx/skia/skia/src/gpu/batches/GrRectBatchFactory.h b/gfx/skia/skia/src/gpu/batches/GrRectBatchFactory.h
new file mode 100644
index 000000000..c9b684359
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrRectBatchFactory.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRectBatchFactory_DEFINED
+#define GrRectBatchFactory_DEFINED
+
+#include "GrAAFillRectBatch.h"
+#include "GrAAStrokeRectBatch.h"
+#include "GrAnalyticRectBatch.h"
+#include "GrColor.h"
+#include "GrNonAAFillRectBatch.h"
+#include "GrNonAAStrokeRectBatch.h"
+#include "GrPaint.h"
+#include "SkMatrix.h"
+
+class GrBatch;
+struct SkRect;
+class SkStrokeRec;
+
+/*
+ * A factory for returning batches which can draw rectangles.
+ */
+namespace GrRectBatchFactory {
+
+inline GrDrawBatch* CreateNonAAFill(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkRect* localRect,
+ const SkMatrix* localMatrix) {
+ if (viewMatrix.hasPerspective() || (localMatrix && localMatrix->hasPerspective())) {
+ return GrNonAAFillRectBatch::CreateWithPerspective(color, viewMatrix, rect, localRect,
+ localMatrix);
+ } else {
+ return GrNonAAFillRectBatch::Create(color, viewMatrix, rect, localRect, localMatrix);
+ }
+}
+
+inline GrDrawBatch* CreateAAFill(const GrPaint& paint,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkRect& croppedRect,
+ const SkRect& devRect) {
+ if (!paint.usesDistanceVectorField()) {
+ return GrAAFillRectBatch::Create(paint.getColor(), viewMatrix, croppedRect, devRect);
+ } else {
+ return GrAnalyticRectBatch::CreateAnalyticRectBatch(paint.getColor(), viewMatrix, rect,
+ croppedRect, devRect);
+ }
+}
+
+inline GrDrawBatch* CreateAAFill(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkMatrix& localMatrix,
+ const SkRect& rect,
+ const SkRect& devRect) {
+ return GrAAFillRectBatch::Create(color, viewMatrix, localMatrix, rect, devRect);
+}
+
+inline GrDrawBatch* CreateNonAAStroke(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkStrokeRec& strokeRec,
+ bool snapToPixelCenters) {
+ return GrNonAAStrokeRectBatch::Create(color, viewMatrix, rect, strokeRec, snapToPixelCenters);
+}
+
+inline GrDrawBatch* CreateAAStroke(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkStrokeRec& stroke) {
+ return GrAAStrokeRectBatch::Create(color, viewMatrix, rect, stroke);
+}
+
+// First rect is outer; second rect is inner
+GrDrawBatch* CreateAAFillNestedRects(GrColor,
+ const SkMatrix& viewMatrix,
+ const SkRect rects[2]);
+
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrRegionBatch.cpp b/gfx/skia/skia/src/gpu/batches/GrRegionBatch.cpp
new file mode 100644
index 000000000..058baf0ef
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrRegionBatch.cpp
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrRegionBatch.h"
+
+#include "GrDefaultGeoProcFactory.h"
+#include "GrBatchFlushState.h"
+#include "GrResourceProvider.h"
+#include "GrVertexBatch.h"
+#include "SkMatrixPriv.h"
+#include "SkRegion.h"
+
+static const int kVertsPerInstance = 4;
+static const int kIndicesPerInstance = 6;
+
+static sk_sp<GrGeometryProcessor> make_gp(bool readsCoverage, const SkMatrix& viewMatrix) {
+ using namespace GrDefaultGeoProcFactory;
+ Color color(Color::kAttribute_Type);
+ Coverage coverage(readsCoverage ? Coverage::kSolid_Type : Coverage::kNone_Type);
+
+ LocalCoords localCoords(LocalCoords::kUsePosition_Type);
+ return GrDefaultGeoProcFactory::Make(color, coverage, localCoords, viewMatrix);
+}
+
+static void tesselate_region(intptr_t vertices,
+ size_t vertexStride,
+ GrColor color,
+ const SkRegion& region) {
+ SkRegion::Iterator iter(region);
+
+ intptr_t verts = vertices;
+ while (!iter.done()) {
+ SkRect rect = SkRect::Make(iter.rect());
+ SkPoint* position = (SkPoint*) verts;
+ position->setRectFan(rect.fLeft, rect.fTop, rect.fRight, rect.fBottom, vertexStride);
+
+ static const int kColorOffset = sizeof(SkPoint);
+ GrColor* vertColor = reinterpret_cast<GrColor*>(verts + kColorOffset);
+ for (int i = 0; i < kVertsPerInstance; i++) {
+ *vertColor = color;
+ vertColor = (GrColor*) ((intptr_t) vertColor + vertexStride);
+ }
+
+ verts += vertexStride * kVertsPerInstance;
+ iter.next();
+ }
+}
+
+class RegionBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ RegionBatch(GrColor color, const SkMatrix& viewMatrix, const SkRegion& region)
+ : INHERITED(ClassID())
+ , fViewMatrix(viewMatrix)
+ {
+ RegionInfo& info = fRegions.push_back();
+ info.fColor = color;
+ info.fRegion = region;
+
+ SkRect bounds = SkRect::Make(region.getBounds());
+ this->setTransformedBounds(bounds, viewMatrix, HasAABloat::kNo, IsZeroArea::kNo);
+ }
+
+ const char* name() const override { return "GrRegionBatch"; }
+
+ SkString dumpInfo() const override {
+ SkString str;
+ str.appendf("# batched: %d\n", fRegions.count());
+ for (int i = 0; i < fRegions.count(); ++i) {
+ const RegionInfo& info = fRegions[i];
+ str.appendf("%d: Color: 0x%08x, Region with %d rects\n",
+ i, info.fColor, info.fRegion.computeRegionComplexity());
+ }
+ str.append(INHERITED::dumpInfo());
+ return str;
+ }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one region.
+ color->setKnownFourComponents(fRegions[0].fColor);
+ coverage->setKnownSingleComponent(0xff);
+ }
+
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ overrides.getOverrideColorIfSet(&fRegions[0].fColor);
+ fOverrides = overrides;
+ }
+
+private:
+
+ void onPrepareDraws(Target* target) const override {
+ sk_sp<GrGeometryProcessor> gp = make_gp(fOverrides.readsCoverage(), fViewMatrix);
+ if (!gp) {
+ SkDebugf("Couldn't create GrGeometryProcessor\n");
+ return;
+ }
+ SkASSERT(gp->getVertexStride() == sizeof(GrDefaultGeoProcFactory::PositionColorAttr));
+
+ int numRegions = fRegions.count();
+ int numRects = 0;
+ for (int i = 0; i < numRegions; i++) {
+ numRects += fRegions[i].fRegion.computeRegionComplexity();
+ }
+
+ size_t vertexStride = gp->getVertexStride();
+ SkAutoTUnref<const GrBuffer> indexBuffer(target->resourceProvider()->refQuadIndexBuffer());
+ InstancedHelper helper;
+ void* vertices = helper.init(target, kTriangles_GrPrimitiveType, vertexStride,
+ indexBuffer, kVertsPerInstance, kIndicesPerInstance, numRects);
+ if (!vertices || !indexBuffer) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ intptr_t verts = reinterpret_cast<intptr_t>(vertices);
+ for (int i = 0; i < numRegions; i++) {
+ tesselate_region(verts, vertexStride, fRegions[i].fColor, fRegions[i].fRegion);
+ int numRectsInRegion = fRegions[i].fRegion.computeRegionComplexity();
+ verts += numRectsInRegion * kVertsPerInstance * vertexStride;
+ }
+ helper.recordDraw(target, gp.get());
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ RegionBatch* that = t->cast<RegionBatch>();
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ if (fViewMatrix != that->fViewMatrix) {
+ return false;
+ }
+
+ fRegions.push_back_n(that->fRegions.count(), that->fRegions.begin());
+ this->joinBounds(*that);
+ return true;
+ }
+
+ struct RegionInfo {
+ GrColor fColor;
+ SkRegion fRegion;
+ };
+
+ SkMatrix fViewMatrix;
+ GrXPOverridesForBatch fOverrides;
+ SkSTArray<1, RegionInfo, true> fRegions;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+namespace GrRegionBatch {
+
+GrDrawBatch* Create(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRegion& region) {
+ return new RegionBatch(color, viewMatrix, region);
+}
+
+};
diff --git a/gfx/skia/skia/src/gpu/batches/GrRegionBatch.h b/gfx/skia/skia/src/gpu/batches/GrRegionBatch.h
new file mode 100644
index 000000000..d928d0a9c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrRegionBatch.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRegionBatch_DEFINED
+#define GrRegionBatch_DEFINED
+
+#include "GrColor.h"
+
+class GrDrawBatch;
+class SkMatrix;
+class SkRegion;
+
+namespace GrRegionBatch {
+
+GrDrawBatch* Create(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRegion& region);
+
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrStencilAndCoverPathRenderer.cpp b/gfx/skia/skia/src/gpu/batches/GrStencilAndCoverPathRenderer.cpp
new file mode 100644
index 000000000..3bbd1574a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrStencilAndCoverPathRenderer.cpp
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrStencilAndCoverPathRenderer.h"
+#include "GrCaps.h"
+#include "GrContext.h"
+#include "GrDrawContextPriv.h"
+#include "GrDrawPathBatch.h"
+#include "GrFixedClip.h"
+#include "GrGpu.h"
+#include "GrPath.h"
+#include "GrPipelineBuilder.h"
+#include "GrRenderTarget.h"
+#include "GrResourceProvider.h"
+#include "GrStencilPathBatch.h"
+#include "GrStyle.h"
+#include "batches/GrRectBatchFactory.h"
+
+GrPathRenderer* GrStencilAndCoverPathRenderer::Create(GrResourceProvider* resourceProvider,
+ const GrCaps& caps) {
+ if (caps.shaderCaps()->pathRenderingSupport()) {
+ return new GrStencilAndCoverPathRenderer(resourceProvider);
+ } else {
+ return nullptr;
+ }
+}
+
+GrStencilAndCoverPathRenderer::GrStencilAndCoverPathRenderer(GrResourceProvider* resourceProvider)
+ : fResourceProvider(resourceProvider) {
+}
+
+bool GrStencilAndCoverPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ // GrPath doesn't support hairline paths. An arbitrary path effect could produce a hairline
+ // path.
+ if (args.fShape->style().strokeRec().isHairlineStyle() ||
+ args.fShape->style().hasNonDashPathEffect()) {
+ return false;
+ }
+ if (args.fHasUserStencilSettings) {
+ return false;
+ }
+ if (args.fAntiAlias) {
+ return args.fIsStencilBufferMSAA;
+ } else {
+ return true; // doesn't do per-path AA, relies on the target having MSAA
+ }
+}
+
+static GrPath* get_gr_path(GrResourceProvider* resourceProvider, const GrShape& shape) {
+ GrUniqueKey key;
+ bool isVolatile;
+ GrPath::ComputeKey(shape, &key, &isVolatile);
+ sk_sp<GrPath> path;
+ if (!isVolatile) {
+ path.reset(
+ static_cast<GrPath*>(resourceProvider->findAndRefResourceByUniqueKey(key)));
+ }
+ if (!path) {
+ SkPath skPath;
+ shape.asPath(&skPath);
+ path.reset(resourceProvider->createPath(skPath, shape.style()));
+ if (!isVolatile) {
+ resourceProvider->assignUniqueKeyToResource(key, path.get());
+ }
+ } else {
+#ifdef SK_DEBUG
+ SkPath skPath;
+ shape.asPath(&skPath);
+ SkASSERT(path->isEqualTo(skPath, shape.style()));
+#endif
+ }
+ return path.release();
+}
+
+void GrStencilAndCoverPathRenderer::onStencilPath(const StencilPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fDrawContext->auditTrail(),
+ "GrStencilAndCoverPathRenderer::onStencilPath");
+ SkASSERT(!args.fIsAA || args.fDrawContext->isStencilBufferMultisampled());
+
+ SkAutoTUnref<GrPath> p(get_gr_path(fResourceProvider, *args.fShape));
+ args.fDrawContext->drawContextPriv().stencilPath(*args.fClip, args.fIsAA, *args.fViewMatrix, p);
+}
+
+bool GrStencilAndCoverPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fDrawContext->auditTrail(),
+ "GrStencilAndCoverPathRenderer::onDrawPath");
+ SkASSERT(!args.fPaint->isAntiAlias() || args.fDrawContext->isStencilBufferMultisampled());
+ SkASSERT(!args.fShape->style().strokeRec().isHairlineStyle());
+
+ const SkMatrix& viewMatrix = *args.fViewMatrix;
+
+
+ SkAutoTUnref<GrPath> path(get_gr_path(fResourceProvider, *args.fShape));
+
+ if (args.fShape->inverseFilled()) {
+ SkMatrix invert = SkMatrix::I();
+ SkRect bounds =
+ SkRect::MakeLTRB(0, 0,
+ SkIntToScalar(args.fDrawContext->width()),
+ SkIntToScalar(args.fDrawContext->height()));
+ SkMatrix vmi;
+ // mapRect through persp matrix may not be correct
+ if (!viewMatrix.hasPerspective() && viewMatrix.invert(&vmi)) {
+ vmi.mapRect(&bounds);
+ // theoretically could set bloat = 0, instead leave it because of matrix inversion
+ // precision.
+ SkScalar bloat = viewMatrix.getMaxScale() * SK_ScalarHalf;
+ bounds.outset(bloat, bloat);
+ } else {
+ if (!viewMatrix.invert(&invert)) {
+ return false;
+ }
+ }
+ const SkMatrix& viewM = viewMatrix.hasPerspective() ? SkMatrix::I() : viewMatrix;
+
+ SkAutoTUnref<GrDrawBatch> coverBatch(
+ GrRectBatchFactory::CreateNonAAFill(args.fPaint->getColor(), viewM, bounds,
+ nullptr, &invert));
+
+ // fake inverse with a stencil and cover
+ args.fDrawContext->drawContextPriv().stencilPath(*args.fClip, args.fPaint->isAntiAlias(),
+ viewMatrix, path);
+
+ {
+ static constexpr GrUserStencilSettings kInvertedCoverPass(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ // We know our rect will hit pixels outside the clip and the user bits will
+ // be 0 outside the clip. So we can't just fill where the user bits are 0. We
+ // also need to check that the clip bit is set.
+ GrUserStencilTest::kEqualIfInClip,
+ 0xffff,
+ GrUserStencilOp::kKeep,
+ GrUserStencilOp::kZero,
+ 0xffff>()
+ );
+
+ GrPipelineBuilder pipelineBuilder(*args.fPaint,
+ args.fPaint->isAntiAlias() &&
+ !args.fDrawContext->hasMixedSamples());
+ pipelineBuilder.setUserStencil(&kInvertedCoverPass);
+
+ args.fDrawContext->drawBatch(pipelineBuilder, *args.fClip, coverBatch);
+ }
+ } else {
+ static constexpr GrUserStencilSettings kCoverPass(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kNotEqual,
+ 0xffff,
+ GrUserStencilOp::kZero,
+ GrUserStencilOp::kKeep,
+ 0xffff>()
+ );
+
+ SkAutoTUnref<GrDrawBatch> batch(GrDrawPathBatch::Create(viewMatrix, args.fPaint->getColor(),
+ path));
+
+ GrPipelineBuilder pipelineBuilder(*args.fPaint, args.fPaint->isAntiAlias());
+ pipelineBuilder.setUserStencil(&kCoverPass);
+ if (args.fAntiAlias) {
+ SkASSERT(args.fDrawContext->isStencilBufferMultisampled());
+ pipelineBuilder.enableState(GrPipelineBuilder::kHWAntialias_Flag);
+ }
+
+ args.fDrawContext->drawBatch(pipelineBuilder, *args.fClip, batch);
+ }
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/batches/GrStencilAndCoverPathRenderer.h b/gfx/skia/skia/src/gpu/batches/GrStencilAndCoverPathRenderer.h
new file mode 100644
index 000000000..c896e6154
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrStencilAndCoverPathRenderer.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBuiltInPathRenderer_DEFINED
+#define GrBuiltInPathRenderer_DEFINED
+
+#include "GrPathRenderer.h"
+
+class GrContext;
+class GrGpu;
+
+/**
+ * Uses GrGpu::stencilPath followed by a cover rectangle. This subclass doesn't apply AA; it relies
+ * on the target having MSAA if AA is desired.
+ */
+class GrStencilAndCoverPathRenderer : public GrPathRenderer {
+public:
+
+ static GrPathRenderer* Create(GrResourceProvider*, const GrCaps&);
+
+
+private:
+ StencilSupport onGetStencilSupport(const GrShape&) const override {
+ return GrPathRenderer::kStencilOnly_StencilSupport;
+ }
+
+ bool onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+
+ void onStencilPath(const StencilPathArgs&) override;
+
+ GrStencilAndCoverPathRenderer(GrResourceProvider*);
+
+ GrResourceProvider* fResourceProvider;
+
+ typedef GrPathRenderer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrStencilPathBatch.h b/gfx/skia/skia/src/gpu/batches/GrStencilPathBatch.h
new file mode 100644
index 000000000..f505a531d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrStencilPathBatch.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrStencilPathBatch_DEFINED
+#define GrStencilPathBatch_DEFINED
+
+#include "GrBatch.h"
+#include "GrBatchFlushState.h"
+#include "GrGpu.h"
+#include "GrPath.h"
+#include "GrPathRendering.h"
+#include "GrRenderTarget.h"
+
+class GrStencilPathBatch final : public GrBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ static GrBatch* Create(const SkMatrix& viewMatrix,
+ bool useHWAA,
+ GrPathRendering::FillType fillType,
+ bool hasStencilClip,
+ int numStencilBits,
+ const GrScissorState& scissor,
+ GrRenderTarget* renderTarget,
+ const GrPath* path) {
+ return new GrStencilPathBatch(viewMatrix, useHWAA, fillType, hasStencilClip,
+ numStencilBits, scissor, renderTarget, path);
+ }
+
+ const char* name() const override { return "StencilPath"; }
+
+ uint32_t renderTargetUniqueID() const override { return fRenderTarget.get()->uniqueID(); }
+ GrRenderTarget* renderTarget() const override { return fRenderTarget.get(); }
+
+ SkString dumpInfo() const override {
+ SkString string;
+ string.printf("PATH: 0x%p, AA:%d", fPath.get(), fUseHWAA);
+ string.append(INHERITED::dumpInfo());
+ return string;
+ }
+
+private:
+ GrStencilPathBatch(const SkMatrix& viewMatrix,
+ bool useHWAA,
+ GrPathRendering::FillType fillType,
+ bool hasStencilClip,
+ int numStencilBits,
+ const GrScissorState& scissor,
+ GrRenderTarget* renderTarget,
+ const GrPath* path)
+ : INHERITED(ClassID())
+ , fViewMatrix(viewMatrix)
+ , fUseHWAA(useHWAA)
+ , fStencil(GrPathRendering::GetStencilPassSettings(fillType), hasStencilClip, numStencilBits)
+ , fScissor(scissor)
+ , fRenderTarget(renderTarget)
+ , fPath(path) {
+ this->setBounds(path->getBounds(), HasAABloat::kNo, IsZeroArea::kNo);
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { return false; }
+
+ void onPrepare(GrBatchFlushState*) override {}
+
+ void onDraw(GrBatchFlushState* state) override {
+ GrPathRendering::StencilPathArgs args(fUseHWAA, fRenderTarget.get(), &fViewMatrix,
+ &fScissor, &fStencil);
+ state->gpu()->pathRendering()->stencilPath(args, fPath.get());
+ }
+
+ SkMatrix fViewMatrix;
+ bool fUseHWAA;
+ GrStencilSettings fStencil;
+ GrScissorState fScissor;
+ GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
+ GrPendingIOResource<const GrPath, kRead_GrIOType> fPath;
+
+ typedef GrBatch INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrTessellatingPathRenderer.cpp b/gfx/skia/skia/src/gpu/batches/GrTessellatingPathRenderer.cpp
new file mode 100644
index 000000000..57eac3c4b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrTessellatingPathRenderer.cpp
@@ -0,0 +1,393 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrTessellatingPathRenderer.h"
+
+#include "GrAuditTrail.h"
+#include "GrBatchFlushState.h"
+#include "GrBatchTest.h"
+#include "GrClip.h"
+#include "GrDefaultGeoProcFactory.h"
+#include "GrDrawTarget.h"
+#include "GrMesh.h"
+#include "GrPathUtils.h"
+#include "GrPipelineBuilder.h"
+#include "GrResourceCache.h"
+#include "GrResourceProvider.h"
+#include "GrTessellator.h"
+#include "SkGeometry.h"
+
+#include "batches/GrVertexBatch.h"
+
+#include <stdio.h>
+
+/*
+ * This path renderer tessellates the path into triangles using GrTessellator, uploads the
+ * triangles to a vertex buffer, and renders them with a single draw call. It can do screenspace
+ * antialiasing with a one-pixel coverage ramp.
+ */
+namespace {
+
+struct TessInfo {
+ SkScalar fTolerance;
+ int fCount;
+};
+
+// When the SkPathRef genID changes, invalidate a corresponding GrResource described by key.
+class PathInvalidator : public SkPathRef::GenIDChangeListener {
+public:
+ explicit PathInvalidator(const GrUniqueKey& key) : fMsg(key) {}
+private:
+ GrUniqueKeyInvalidatedMessage fMsg;
+
+ void onChange() override {
+ SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(fMsg);
+ }
+};
+
+bool cache_match(GrBuffer* vertexBuffer, SkScalar tol, int* actualCount) {
+ if (!vertexBuffer) {
+ return false;
+ }
+ const SkData* data = vertexBuffer->getUniqueKey().getCustomData();
+ SkASSERT(data);
+ const TessInfo* info = static_cast<const TessInfo*>(data->data());
+ if (info->fTolerance == 0 || info->fTolerance < 3.0f * tol) {
+ *actualCount = info->fCount;
+ return true;
+ }
+ return false;
+}
+
+class StaticVertexAllocator : public GrTessellator::VertexAllocator {
+public:
+ StaticVertexAllocator(size_t stride, GrResourceProvider* resourceProvider, bool canMapVB)
+ : VertexAllocator(stride)
+ , fResourceProvider(resourceProvider)
+ , fCanMapVB(canMapVB)
+ , fVertices(nullptr) {
+ }
+ void* lock(int vertexCount) override {
+ size_t size = vertexCount * stride();
+ fVertexBuffer.reset(fResourceProvider->createBuffer(
+ size, kVertex_GrBufferType, kStatic_GrAccessPattern, 0));
+ if (!fVertexBuffer.get()) {
+ return nullptr;
+ }
+ if (fCanMapVB) {
+ fVertices = fVertexBuffer->map();
+ } else {
+ fVertices = sk_malloc_throw(vertexCount * stride());
+ }
+ return fVertices;
+ }
+ void unlock(int actualCount) override {
+ if (fCanMapVB) {
+ fVertexBuffer->unmap();
+ } else {
+ fVertexBuffer->updateData(fVertices, actualCount * stride());
+ sk_free(fVertices);
+ }
+ fVertices = nullptr;
+ }
+ GrBuffer* vertexBuffer() { return fVertexBuffer.get(); }
+private:
+ SkAutoTUnref<GrBuffer> fVertexBuffer;
+ GrResourceProvider* fResourceProvider;
+ bool fCanMapVB;
+ void* fVertices;
+};
+
+class DynamicVertexAllocator : public GrTessellator::VertexAllocator {
+public:
+ DynamicVertexAllocator(size_t stride, GrVertexBatch::Target* target)
+ : VertexAllocator(stride)
+ , fTarget(target)
+ , fVertexBuffer(nullptr)
+ , fVertices(nullptr) {
+ }
+ void* lock(int vertexCount) override {
+ fVertexCount = vertexCount;
+ fVertices = fTarget->makeVertexSpace(stride(), vertexCount, &fVertexBuffer, &fFirstVertex);
+ return fVertices;
+ }
+ void unlock(int actualCount) override {
+ fTarget->putBackVertices(fVertexCount - actualCount, stride());
+ fVertices = nullptr;
+ }
+ const GrBuffer* vertexBuffer() const { return fVertexBuffer; }
+ int firstVertex() const { return fFirstVertex; }
+private:
+ GrVertexBatch::Target* fTarget;
+ const GrBuffer* fVertexBuffer;
+ int fVertexCount;
+ int fFirstVertex;
+ void* fVertices;
+};
+
+} // namespace
+
+GrTessellatingPathRenderer::GrTessellatingPathRenderer() {
+}
+
+bool GrTessellatingPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ // This path renderer can draw fill styles, and can do screenspace antialiasing via a
+ // one-pixel coverage ramp. It can do convex and concave paths, but we'll leave the convex
+ // ones to simpler algorithms. We pass on paths that have styles, though they may come back
+ // around after applying the styling information to the geometry to create a filled path. In
+ // the non-AA case, We skip paths thta don't have a key since the real advantage of this path
+ // renderer comes from caching the tessellated geometry. In the AA case, we do not cache, so we
+ // accept paths without keys.
+ if (!args.fShape->style().isSimpleFill() || args.fShape->knownToBeConvex()) {
+ return false;
+ }
+ if (args.fAntiAlias) {
+#ifdef SK_DISABLE_SCREENSPACE_TESS_AA_PATH_RENDERER
+ return false;
+#else
+ SkPath path;
+ args.fShape->asPath(&path);
+ if (path.countVerbs() > 10) {
+ return false;
+ }
+#endif
+ } else if (!args.fShape->hasUnstyledKey()) {
+ return false;
+ }
+ return true;
+}
+
+class TessellatingPathBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ static GrDrawBatch* Create(const GrColor& color,
+ const GrShape& shape,
+ const SkMatrix& viewMatrix,
+ SkIRect devClipBounds,
+ bool antiAlias) {
+ return new TessellatingPathBatch(color, shape, viewMatrix, devClipBounds, antiAlias);
+ }
+
+ const char* name() const override { return "TessellatingPathBatch"; }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ color->setKnownFourComponents(fColor);
+ coverage->setUnknownSingleComponent();
+ }
+
+private:
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ // Handle any color overrides
+ if (!overrides.readsColor()) {
+ fColor = GrColor_ILLEGAL;
+ }
+ overrides.getOverrideColorIfSet(&fColor);
+ fPipelineInfo = overrides;
+ }
+
+ SkPath getPath() const {
+ SkASSERT(!fShape.style().applies());
+ SkPath path;
+ fShape.asPath(&path);
+ return path;
+ }
+
+ void draw(Target* target, const GrGeometryProcessor* gp) const {
+ SkASSERT(!fAntiAlias);
+ GrResourceProvider* rp = target->resourceProvider();
+ bool inverseFill = fShape.inverseFilled();
+ // construct a cache key from the path's genID and the view matrix
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey key;
+ static constexpr int kClipBoundsCnt = sizeof(fDevClipBounds) / sizeof(uint32_t);
+ int shapeKeyDataCnt = fShape.unstyledKeySize();
+ SkASSERT(shapeKeyDataCnt >= 0);
+ GrUniqueKey::Builder builder(&key, kDomain, shapeKeyDataCnt + kClipBoundsCnt);
+ fShape.writeUnstyledKey(&builder[0]);
+ // For inverse fills, the tessellation is dependent on clip bounds.
+ if (inverseFill) {
+ memcpy(&builder[shapeKeyDataCnt], &fDevClipBounds, sizeof(fDevClipBounds));
+ } else {
+ memset(&builder[shapeKeyDataCnt], 0, sizeof(fDevClipBounds));
+ }
+ builder.finish();
+ SkAutoTUnref<GrBuffer> cachedVertexBuffer(rp->findAndRefTByUniqueKey<GrBuffer>(key));
+ int actualCount;
+ SkScalar tol = GrPathUtils::kDefaultTolerance;
+ tol = GrPathUtils::scaleToleranceToSrc(tol, fViewMatrix, fShape.bounds());
+ if (cache_match(cachedVertexBuffer.get(), tol, &actualCount)) {
+ this->drawVertices(target, gp, cachedVertexBuffer.get(), 0, actualCount);
+ return;
+ }
+
+ SkRect clipBounds = SkRect::Make(fDevClipBounds);
+
+ SkMatrix vmi;
+ if (!fViewMatrix.invert(&vmi)) {
+ return;
+ }
+ vmi.mapRect(&clipBounds);
+ bool isLinear;
+ bool canMapVB = GrCaps::kNone_MapFlags != target->caps().mapBufferFlags();
+ StaticVertexAllocator allocator(gp->getVertexStride(), rp, canMapVB);
+ int count = GrTessellator::PathToTriangles(getPath(), tol, clipBounds, &allocator,
+ false, GrColor(), false, &isLinear);
+ if (count == 0) {
+ return;
+ }
+ this->drawVertices(target, gp, allocator.vertexBuffer(), 0, count);
+ TessInfo info;
+ info.fTolerance = isLinear ? 0 : tol;
+ info.fCount = count;
+ key.setCustomData(SkData::MakeWithCopy(&info, sizeof(info)));
+ rp->assignUniqueKeyToResource(key, allocator.vertexBuffer());
+ }
+
+ void drawAA(Target* target, const GrGeometryProcessor* gp) const {
+ SkASSERT(fAntiAlias);
+ SkPath path = getPath();
+ if (path.isEmpty()) {
+ return;
+ }
+ SkRect clipBounds = SkRect::Make(fDevClipBounds);
+ path.transform(fViewMatrix);
+ SkScalar tol = GrPathUtils::kDefaultTolerance;
+ bool isLinear;
+ DynamicVertexAllocator allocator(gp->getVertexStride(), target);
+ bool canTweakAlphaForCoverage = fPipelineInfo.canTweakAlphaForCoverage();
+ int count = GrTessellator::PathToTriangles(path, tol, clipBounds, &allocator,
+ true, fColor, canTweakAlphaForCoverage,
+ &isLinear);
+ if (count == 0) {
+ return;
+ }
+ drawVertices(target, gp, allocator.vertexBuffer(), allocator.firstVertex(), count);
+ }
+
+ void onPrepareDraws(Target* target) const override {
+ sk_sp<GrGeometryProcessor> gp;
+ {
+ using namespace GrDefaultGeoProcFactory;
+
+ Color color(fColor);
+ LocalCoords localCoords(fPipelineInfo.readsLocalCoords() ?
+ LocalCoords::kUsePosition_Type :
+ LocalCoords::kUnused_Type);
+ Coverage::Type coverageType;
+ if (fAntiAlias) {
+ color = Color(Color::kAttribute_Type);
+ if (fPipelineInfo.canTweakAlphaForCoverage()) {
+ coverageType = Coverage::kSolid_Type;
+ } else {
+ coverageType = Coverage::kAttribute_Type;
+ }
+ } else if (fPipelineInfo.readsCoverage()) {
+ coverageType = Coverage::kSolid_Type;
+ } else {
+ coverageType = Coverage::kNone_Type;
+ }
+ Coverage coverage(coverageType);
+ if (fAntiAlias) {
+ gp = GrDefaultGeoProcFactory::MakeForDeviceSpace(color, coverage, localCoords,
+ fViewMatrix);
+ } else {
+ gp = GrDefaultGeoProcFactory::Make(color, coverage, localCoords, fViewMatrix);
+ }
+ }
+ if (fAntiAlias) {
+ this->drawAA(target, gp.get());
+ } else {
+ this->draw(target, gp.get());
+ }
+ }
+
+ void drawVertices(Target* target, const GrGeometryProcessor* gp, const GrBuffer* vb,
+ int firstVertex, int count) const {
+ GrPrimitiveType primitiveType = TESSELLATOR_WIREFRAME ? kLines_GrPrimitiveType
+ : kTriangles_GrPrimitiveType;
+ GrMesh mesh;
+ mesh.init(primitiveType, vb, firstVertex, count);
+ target->draw(gp, mesh);
+ }
+
+ bool onCombineIfPossible(GrBatch*, const GrCaps&) override { return false; }
+
+ TessellatingPathBatch(const GrColor& color,
+ const GrShape& shape,
+ const SkMatrix& viewMatrix,
+ const SkIRect& devClipBounds,
+ bool antiAlias)
+ : INHERITED(ClassID())
+ , fColor(color)
+ , fShape(shape)
+ , fViewMatrix(viewMatrix)
+ , fDevClipBounds(devClipBounds)
+ , fAntiAlias(antiAlias) {
+ SkRect devBounds;
+ viewMatrix.mapRect(&devBounds, shape.bounds());
+ if (shape.inverseFilled()) {
+ // Because the clip bounds are used to add a contour for inverse fills, they must also
+ // include the path bounds.
+ devBounds.join(SkRect::Make(fDevClipBounds));
+ }
+ this->setBounds(devBounds, HasAABloat::kNo, IsZeroArea::kNo);
+ }
+
+ GrColor fColor;
+ GrShape fShape;
+ SkMatrix fViewMatrix;
+ SkIRect fDevClipBounds;
+ bool fAntiAlias;
+ GrXPOverridesForBatch fPipelineInfo;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+bool GrTessellatingPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ GR_AUDIT_TRAIL_AUTO_FRAME(args.fDrawContext->auditTrail(),
+ "GrTessellatingPathRenderer::onDrawPath");
+ SkIRect clipBoundsI;
+ args.fClip->getConservativeBounds(args.fDrawContext->width(), args.fDrawContext->height(),
+ &clipBoundsI);
+ SkAutoTUnref<GrDrawBatch> batch(TessellatingPathBatch::Create(args.fPaint->getColor(),
+ *args.fShape,
+ *args.fViewMatrix,
+ clipBoundsI,
+ args.fAntiAlias));
+
+ GrPipelineBuilder pipelineBuilder(*args.fPaint, args.fDrawContext->mustUseHWAA(*args.fPaint));
+ pipelineBuilder.setUserStencil(args.fUserStencilSettings);
+
+ args.fDrawContext->drawBatch(pipelineBuilder, *args.fClip, batch);
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_TEST_UTILS
+
+DRAW_BATCH_TEST_DEFINE(TesselatingPathBatch) {
+ GrColor color = GrRandomColor(random);
+ SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
+ SkPath path = GrTest::TestPath(random);
+ SkIRect devClipBounds = SkIRect::MakeLTRB(
+ random->nextU(), random->nextU(), random->nextU(), random->nextU());
+ devClipBounds.sort();
+ bool antiAlias = random->nextBool();
+ GrStyle style;
+ do {
+ GrTest::TestStyle(random, &style);
+ } while (!style.isSimpleFill());
+ GrShape shape(path, style);
+ return TessellatingPathBatch::Create(color, shape, viewMatrix, devClipBounds, antiAlias);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrTessellatingPathRenderer.h b/gfx/skia/skia/src/gpu/batches/GrTessellatingPathRenderer.h
new file mode 100644
index 000000000..d5f2c7af9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrTessellatingPathRenderer.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTessellatingPathRenderer_DEFINED
+#define GrTessellatingPathRenderer_DEFINED
+
+#include "GrPathRenderer.h"
+
+/**
+ * Subclass that renders the path by converting to screen-space trapezoids plus
+ * extra 1-pixel geometry for AA.
+ */
+class SK_API GrTessellatingPathRenderer : public GrPathRenderer {
+public:
+ GrTessellatingPathRenderer();
+
+private:
+ bool onCanDrawPath(const CanDrawPathArgs& ) const override;
+
+ StencilSupport onGetStencilSupport(const GrShape&) const override {
+ return GrPathRenderer::kNoSupport_StencilSupport;
+ }
+
+ bool onDrawPath(const DrawPathArgs&) override;
+
+ typedef GrPathRenderer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrTestBatch.h b/gfx/skia/skia/src/gpu/batches/GrTestBatch.h
new file mode 100644
index 000000000..5bac48ac0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrTestBatch.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTestBatch_DEFINED
+#define GrTestBatch_DEFINED
+
+#include "GrBatchFlushState.h"
+#include "GrGeometryProcessor.h"
+
+#include "batches/GrVertexBatch.h"
+
+/*
+ * A simple solid color batch only for testing purposes which actually doesn't batch at all. It
+ * saves having to fill out some boiler plate methods.
+ */
+class GrTestBatch : public GrVertexBatch {
+public:
+ virtual const char* name() const override = 0;
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ color->setKnownFourComponents(fColor);
+ coverage->setUnknownSingleComponent();
+ }
+
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ overrides.getOverrideColorIfSet(&fColor);
+
+ fOptimizations.fColorIgnored = !overrides.readsColor();
+ fOptimizations.fUsesLocalCoords = overrides.readsLocalCoords();
+ fOptimizations.fCoverageIgnored = !overrides.readsCoverage();
+ }
+
+protected:
+ GrTestBatch(uint32_t classID, const SkRect& bounds, GrColor color)
+ : INHERITED(classID)
+ , fColor(color) {
+ // Choose some conservative values for aa bloat and zero area.
+ this->setBounds(bounds, HasAABloat::kYes, IsZeroArea::kYes);
+ }
+
+ struct Optimizations {
+ bool fColorIgnored = false;
+ bool fUsesLocalCoords = false;
+ bool fCoverageIgnored = false;
+ };
+
+ GrColor color() const { return fColor; }
+ const Optimizations optimizations() const { return fOptimizations; }
+
+private:
+ bool onCombineIfPossible(GrBatch* t, const GrCaps&) override {
+ return false;
+ }
+
+ GrColor fColor;
+ Optimizations fOptimizations;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/batches/GrVertexBatch.cpp b/gfx/skia/skia/src/gpu/batches/GrVertexBatch.cpp
new file mode 100644
index 000000000..af3a186e1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrVertexBatch.cpp
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVertexBatch.h"
+#include "GrBatchFlushState.h"
+#include "GrResourceProvider.h"
+
+GrVertexBatch::GrVertexBatch(uint32_t classID)
+ : INHERITED(classID)
+ , fBaseDrawToken(GrBatchDrawToken::AlreadyFlushedToken()) {
+}
+
+void GrVertexBatch::onPrepare(GrBatchFlushState* state) {
+ Target target(state, this);
+ this->onPrepareDraws(&target);
+}
+
+void* GrVertexBatch::InstancedHelper::init(Target* target, GrPrimitiveType primType,
+ size_t vertexStride, const GrBuffer* indexBuffer,
+ int verticesPerInstance, int indicesPerInstance,
+ int instancesToDraw) {
+ SkASSERT(target);
+ if (!indexBuffer) {
+ return nullptr;
+ }
+ const GrBuffer* vertexBuffer;
+ int firstVertex;
+ int vertexCount = verticesPerInstance * instancesToDraw;
+ void* vertices = target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer, &firstVertex);
+ if (!vertices) {
+ SkDebugf("Vertices could not be allocated for instanced rendering.");
+ return nullptr;
+ }
+ SkASSERT(vertexBuffer);
+ size_t ibSize = indexBuffer->gpuMemorySize();
+ int maxInstancesPerDraw = static_cast<int>(ibSize / (sizeof(uint16_t) * indicesPerInstance));
+
+ fMesh.initInstanced(primType, vertexBuffer, indexBuffer,
+ firstVertex, verticesPerInstance, indicesPerInstance, instancesToDraw,
+ maxInstancesPerDraw);
+ return vertices;
+}
+
+void GrVertexBatch::InstancedHelper::recordDraw(Target* target, const GrGeometryProcessor* gp) {
+ SkASSERT(fMesh.instanceCount());
+ target->draw(gp, fMesh);
+}
+
+void* GrVertexBatch::QuadHelper::init(Target* target, size_t vertexStride,
+ int quadsToDraw) {
+ SkAutoTUnref<const GrBuffer> quadIndexBuffer(
+ target->resourceProvider()->refQuadIndexBuffer());
+ if (!quadIndexBuffer) {
+ SkDebugf("Could not get quad index buffer.");
+ return nullptr;
+ }
+ return this->INHERITED::init(target, kTriangles_GrPrimitiveType, vertexStride,
+ quadIndexBuffer, kVerticesPerQuad, kIndicesPerQuad, quadsToDraw);
+}
+
+void GrVertexBatch::onDraw(GrBatchFlushState* state) {
+ int currUploadIdx = 0;
+ int currMeshIdx = 0;
+
+ SkASSERT(fQueuedDraws.empty() || fBaseDrawToken == state->nextTokenToFlush());
+
+ for (int currDrawIdx = 0; currDrawIdx < fQueuedDraws.count(); ++currDrawIdx) {
+ GrBatchDrawToken drawToken = state->nextTokenToFlush();
+ while (currUploadIdx < fInlineUploads.count() &&
+ fInlineUploads[currUploadIdx].fUploadBeforeToken == drawToken) {
+ state->doUpload(fInlineUploads[currUploadIdx++].fUpload);
+ }
+ const QueuedDraw &draw = fQueuedDraws[currDrawIdx];
+ state->commandBuffer()->draw(*this->pipeline(), *draw.fGeometryProcessor.get(),
+ fMeshes.begin() + currMeshIdx, draw.fMeshCnt);
+ currMeshIdx += draw.fMeshCnt;
+ state->flushToken();
+ }
+ SkASSERT(currUploadIdx == fInlineUploads.count());
+ SkASSERT(currMeshIdx == fMeshes.count());
+ fQueuedDraws.reset();
+ fInlineUploads.reset();
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void GrVertexBatch::Target::draw(const GrGeometryProcessor* gp, const GrMesh& mesh) {
+ GrVertexBatch* batch = this->vertexBatch();
+ batch->fMeshes.push_back(mesh);
+ if (!batch->fQueuedDraws.empty()) {
+ // If the last draw shares a geometry processor and there are no intervening uploads,
+ // add this mesh to it.
+ GrVertexBatch::QueuedDraw& lastDraw = this->vertexBatch()->fQueuedDraws.back();
+ if (lastDraw.fGeometryProcessor == gp &&
+ (batch->fInlineUploads.empty() ||
+ batch->fInlineUploads.back().fUploadBeforeToken != this->nextDrawToken())) {
+ ++lastDraw.fMeshCnt;
+ return;
+ }
+ }
+ GrVertexBatch::QueuedDraw& draw = this->vertexBatch()->fQueuedDraws.push_back();
+ GrBatchDrawToken token = this->state()->issueDrawToken();
+ draw.fGeometryProcessor.reset(gp);
+ draw.fMeshCnt = 1;
+ if (batch->fQueuedDraws.count() == 1) {
+ batch->fBaseDrawToken = token;
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/batches/GrVertexBatch.h b/gfx/skia/skia/src/gpu/batches/GrVertexBatch.h
new file mode 100644
index 000000000..19475a7af
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/batches/GrVertexBatch.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVertexBatch_DEFINED
+#define GrVertexBatch_DEFINED
+
+#include "GrDrawBatch.h"
+#include "GrGeometryProcessor.h"
+#include "GrMesh.h"
+#include "GrPendingProgramElement.h"
+
+#include "SkTLList.h"
+
+class GrBatchFlushState;
+
+/**
+ * Base class for vertex-based GrBatches.
+ */
+class GrVertexBatch : public GrDrawBatch {
+public:
+ class Target;
+
+ GrVertexBatch(uint32_t classID);
+
+protected:
+ /** Helper for rendering instances using an instanced index index buffer. This class creates the
+ space for the vertices and flushes the draws to the batch target. */
+ class InstancedHelper {
+ public:
+ InstancedHelper() {}
+ /** Returns the allocated storage for the vertices. The caller should populate the vertices
+ before calling recordDraws(). */
+ void* init(Target*, GrPrimitiveType, size_t vertexStride,
+ const GrBuffer*, int verticesPerInstance, int indicesPerInstance,
+ int instancesToDraw);
+
+ /** Call after init() to issue draws to the batch target.*/
+ void recordDraw(Target*, const GrGeometryProcessor*);
+ private:
+ GrMesh fMesh;
+ };
+
+ static const int kVerticesPerQuad = 4;
+ static const int kIndicesPerQuad = 6;
+
+ /** A specialization of InstanceHelper for quad rendering. */
+ class QuadHelper : private InstancedHelper {
+ public:
+ QuadHelper() : INHERITED() {}
+ /** Finds the cached quad index buffer and reserves vertex space. Returns nullptr on failure
+ and on success a pointer to the vertex data that the caller should populate before
+ calling recordDraws(). */
+ void* init(Target*, size_t vertexStride, int quadsToDraw);
+
+ using InstancedHelper::recordDraw;
+ private:
+ typedef InstancedHelper INHERITED;
+ };
+
+private:
+ void onPrepare(GrBatchFlushState* state) final;
+ void onDraw(GrBatchFlushState* state) final;
+
+ virtual void onPrepareDraws(Target*) const = 0;
+
+ // A set of contiguous draws that share a draw token and primitive processor. The draws all use
+ // the batch's pipeline. The meshes for the draw are stored in the fMeshes array and each
+ // Queued draw uses fMeshCnt meshes from the fMeshes array. The reason for coallescing meshes
+ // that share a primitive processor into a QueuedDraw is that it allows the Gpu object to setup
+ // the shared state once and then issue draws for each mesh.
+ struct QueuedDraw {
+ int fMeshCnt = 0;
+ GrPendingProgramElement<const GrGeometryProcessor> fGeometryProcessor;
+ };
+
+ // All draws in all the vertex batches have implicit tokens based on the order they are
+ // enqueued globally across all batches. This is the offset of the first entry in fQueuedDraws.
+ // fQueuedDraws[i]'s token is fBaseDrawToken + i.
+ GrBatchDrawToken fBaseDrawToken;
+
+ SkSTArray<4, GrMesh> fMeshes;
+ SkSTArray<4, QueuedDraw, true> fQueuedDraws;
+
+ typedef GrDrawBatch INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/Gr1DKernelEffect.h b/gfx/skia/skia/src/gpu/effects/Gr1DKernelEffect.h
new file mode 100644
index 000000000..d7402e8c4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/Gr1DKernelEffect.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Gr1DKernelEffect_DEFINED
+#define Gr1DKernelEffect_DEFINED
+
+#include "GrSingleTextureEffect.h"
+#include "SkMatrix.h"
+
+/**
+ * Base class for 1D kernel effects. The kernel operates either in X or Y and
+ * has a pixel radius. The kernel is specified in the src texture's space
+ * and the kernel center is pinned to a texel's center. The radius specifies
+ * the number of texels on either side of the center texel in X or Y that are
+ * read. Since the center pixel is also read, the total width is one larger than
+ * two times the radius.
+ */
+
+class Gr1DKernelEffect : public GrSingleTextureEffect {
+
+public:
+ enum Direction {
+ kX_Direction,
+ kY_Direction,
+ };
+
+ Gr1DKernelEffect(GrTexture* texture,
+ Direction direction,
+ int radius)
+ : INHERITED(texture, nullptr, GrCoordTransform::MakeDivByTextureWHMatrix(texture))
+ , fDirection(direction)
+ , fRadius(radius) {}
+
+ virtual ~Gr1DKernelEffect() {}
+
+ static int WidthFromRadius(int radius) { return 2 * radius + 1; }
+
+ int radius() const { return fRadius; }
+ int width() const { return WidthFromRadius(fRadius); }
+ Direction direction() const { return fDirection; }
+
+ SkString dumpInfo() const override {
+ SkString str;
+ str.appendf("Direction: %s, Radius: %d ", kX_Direction == fDirection ? "X" : "Y", fRadius);
+ str.append(INHERITED::dumpInfo());
+ return str;
+ }
+
+private:
+
+ Direction fDirection;
+ int fRadius;
+
+ typedef GrSingleTextureEffect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrBezierEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrBezierEffect.cpp
new file mode 100644
index 000000000..798695d4e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrBezierEffect.cpp
@@ -0,0 +1,712 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrBezierEffect.h"
+
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLGeometryProcessor.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "glsl/GrGLSLUtil.h"
+#include "glsl/GrGLSLVarying.h"
+#include "glsl/GrGLSLVertexShaderBuilder.h"
+
+class GrGLConicEffect : public GrGLSLGeometryProcessor {
+public:
+ GrGLConicEffect(const GrGeometryProcessor&);
+
+ void onEmitCode(EmitArgs&, GrGPArgs*) override;
+
+ static inline void GenKey(const GrGeometryProcessor&,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder*);
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& primProc,
+ FPCoordTransformIter&& transformIter) override {
+ const GrConicEffect& ce = primProc.cast<GrConicEffect>();
+
+ if (!ce.viewMatrix().isIdentity() && !fViewMatrix.cheapEqualTo(ce.viewMatrix())) {
+ fViewMatrix = ce.viewMatrix();
+ float viewMatrix[3 * 3];
+ GrGLSLGetMatrix<3>(viewMatrix, fViewMatrix);
+ pdman.setMatrix3f(fViewMatrixUniform, viewMatrix);
+ }
+
+ if (ce.color() != fColor) {
+ float c[4];
+ GrColorToRGBAFloat(ce.color(), c);
+ pdman.set4fv(fColorUniform, 1, c);
+ fColor = ce.color();
+ }
+
+ if (ce.coverageScale() != 0xff && ce.coverageScale() != fCoverageScale) {
+ pdman.set1f(fCoverageScaleUniform, GrNormalizeByteToFloat(ce.coverageScale()));
+ fCoverageScale = ce.coverageScale();
+ }
+ this->setTransformDataHelper(ce.localMatrix(), pdman, &transformIter);
+ }
+
+private:
+ SkMatrix fViewMatrix;
+ GrColor fColor;
+ uint8_t fCoverageScale;
+ GrPrimitiveEdgeType fEdgeType;
+ UniformHandle fColorUniform;
+ UniformHandle fCoverageScaleUniform;
+ UniformHandle fViewMatrixUniform;
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+GrGLConicEffect::GrGLConicEffect(const GrGeometryProcessor& processor)
+ : fViewMatrix(SkMatrix::InvalidMatrix()), fColor(GrColor_ILLEGAL), fCoverageScale(0xff) {
+ const GrConicEffect& ce = processor.cast<GrConicEffect>();
+ fEdgeType = ce.getEdgeType();
+}
+
+void GrGLConicEffect::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) {
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ const GrConicEffect& gp = args.fGP.cast<GrConicEffect>();
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(gp);
+
+ GrGLSLVertToFrag v(kVec4f_GrSLType);
+ varyingHandler->addVarying("ConicCoeffs", &v, kHigh_GrSLPrecision);
+ vertBuilder->codeAppendf("%s = %s;", v.vsOut(), gp.inConicCoeffs()->fName);
+
+ GrGLSLPPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ // Setup pass through color
+ if (!gp.colorIgnored()) {
+ this->setupUniformColor(fragBuilder, uniformHandler, args.fOutputColor, &fColorUniform);
+ }
+
+ // Setup position
+ this->setupPosition(vertBuilder,
+ uniformHandler,
+ gpArgs,
+ gp.inPosition()->fName,
+ gp.viewMatrix(),
+ &fViewMatrixUniform);
+
+ // emit transforms with position
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ gpArgs->fPositionVar,
+ gp.inPosition()->fName,
+ gp.localMatrix(),
+ args.fFPCoordTransformHandler);
+
+ // TODO: this precision check should actually be a check on the number of bits
+ // high and medium provide and the selection of the lowest level that suffices.
+ // Additionally we should assert that the upstream code only lets us get here if
+ // either high or medium provides the required number of bits.
+ GrSLPrecision precision = kHigh_GrSLPrecision;
+ const GrShaderCaps::PrecisionInfo& highP = args.fGLSLCaps->getFloatShaderPrecisionInfo(
+ kFragment_GrShaderType,
+ kHigh_GrSLPrecision);
+ if (!highP.supported()) {
+ precision = kMedium_GrSLPrecision;
+ }
+
+ GrGLSLShaderVar edgeAlpha("edgeAlpha", kFloat_GrSLType, 0, precision);
+ GrGLSLShaderVar dklmdx("dklmdx", kVec3f_GrSLType, 0, precision);
+ GrGLSLShaderVar dklmdy("dklmdy", kVec3f_GrSLType, 0, precision);
+ GrGLSLShaderVar dfdx("dfdx", kFloat_GrSLType, 0, precision);
+ GrGLSLShaderVar dfdy("dfdy", kFloat_GrSLType, 0, precision);
+ GrGLSLShaderVar gF("gF", kVec2f_GrSLType, 0, precision);
+ GrGLSLShaderVar gFM("gFM", kFloat_GrSLType, 0, precision);
+ GrGLSLShaderVar func("func", kFloat_GrSLType, 0, precision);
+
+ fragBuilder->declAppend(edgeAlpha);
+ fragBuilder->declAppend(dklmdx);
+ fragBuilder->declAppend(dklmdy);
+ fragBuilder->declAppend(dfdx);
+ fragBuilder->declAppend(dfdy);
+ fragBuilder->declAppend(gF);
+ fragBuilder->declAppend(gFM);
+ fragBuilder->declAppend(func);
+
+ switch (fEdgeType) {
+ case kHairlineAA_GrProcessorEdgeType: {
+ SkAssertResult(fragBuilder->enableFeature(
+ GrGLSLFragmentShaderBuilder::kStandardDerivatives_GLSLFeature));
+ fragBuilder->codeAppendf("%s = dFdx(%s.xyz);", dklmdx.c_str(), v.fsIn());
+ fragBuilder->codeAppendf("%s = dFdy(%s.xyz);", dklmdy.c_str(), v.fsIn());
+ fragBuilder->codeAppendf("%s = 2.0 * %s.x * %s.x - %s.y * %s.z - %s.z * %s.y;",
+ dfdx.c_str(),
+ v.fsIn(), dklmdx.c_str(),
+ v.fsIn(), dklmdx.c_str(),
+ v.fsIn(), dklmdx.c_str());
+ fragBuilder->codeAppendf("%s = 2.0 * %s.x * %s.x - %s.y * %s.z - %s.z * %s.y;",
+ dfdy.c_str(),
+ v.fsIn(), dklmdy.c_str(),
+ v.fsIn(), dklmdy.c_str(),
+ v.fsIn(), dklmdy.c_str());
+ fragBuilder->codeAppendf("%s = vec2(%s, %s);", gF.c_str(), dfdx.c_str(), dfdy.c_str());
+ fragBuilder->codeAppendf("%s = sqrt(dot(%s, %s));",
+ gFM.c_str(), gF.c_str(), gF.c_str());
+ fragBuilder->codeAppendf("%s = %s.x*%s.x - %s.y*%s.z;",
+ func.c_str(), v.fsIn(), v.fsIn(), v.fsIn(), v.fsIn());
+ fragBuilder->codeAppendf("%s = abs(%s);", func.c_str(), func.c_str());
+ fragBuilder->codeAppendf("%s = %s / %s;",
+ edgeAlpha.c_str(), func.c_str(), gFM.c_str());
+ fragBuilder->codeAppendf("%s = max(1.0 - %s, 0.0);",
+ edgeAlpha.c_str(), edgeAlpha.c_str());
+ // Add line below for smooth cubic ramp
+ // fragBuilder->codeAppend("edgeAlpha = edgeAlpha*edgeAlpha*(3.0-2.0*edgeAlpha);");
+ break;
+ }
+ case kFillAA_GrProcessorEdgeType: {
+ SkAssertResult(fragBuilder->enableFeature(
+ GrGLSLFragmentShaderBuilder::kStandardDerivatives_GLSLFeature));
+ fragBuilder->codeAppendf("%s = dFdx(%s.xyz);", dklmdx.c_str(), v.fsIn());
+ fragBuilder->codeAppendf("%s = dFdy(%s.xyz);", dklmdy.c_str(), v.fsIn());
+ fragBuilder->codeAppendf("%s ="
+ "2.0 * %s.x * %s.x - %s.y * %s.z - %s.z * %s.y;",
+ dfdx.c_str(),
+ v.fsIn(), dklmdx.c_str(),
+ v.fsIn(), dklmdx.c_str(),
+ v.fsIn(), dklmdx.c_str());
+ fragBuilder->codeAppendf("%s ="
+ "2.0 * %s.x * %s.x - %s.y * %s.z - %s.z * %s.y;",
+ dfdy.c_str(),
+ v.fsIn(), dklmdy.c_str(),
+ v.fsIn(), dklmdy.c_str(),
+ v.fsIn(), dklmdy.c_str());
+ fragBuilder->codeAppendf("%s = vec2(%s, %s);", gF.c_str(), dfdx.c_str(), dfdy.c_str());
+ fragBuilder->codeAppendf("%s = sqrt(dot(%s, %s));",
+ gFM.c_str(), gF.c_str(), gF.c_str());
+ fragBuilder->codeAppendf("%s = %s.x * %s.x - %s.y * %s.z;",
+ func.c_str(), v.fsIn(), v.fsIn(), v.fsIn(), v.fsIn());
+ fragBuilder->codeAppendf("%s = %s / %s;",
+ edgeAlpha.c_str(), func.c_str(), gFM.c_str());
+ fragBuilder->codeAppendf("%s = clamp(1.0 - %s, 0.0, 1.0);",
+ edgeAlpha.c_str(), edgeAlpha.c_str());
+ // Add line below for smooth cubic ramp
+ // fragBuilder->codeAppend("edgeAlpha = edgeAlpha*edgeAlpha*(3.0-2.0*edgeAlpha);");
+ break;
+ }
+ case kFillBW_GrProcessorEdgeType: {
+ fragBuilder->codeAppendf("%s = %s.x * %s.x - %s.y * %s.z;",
+ edgeAlpha.c_str(), v.fsIn(), v.fsIn(), v.fsIn(), v.fsIn());
+ fragBuilder->codeAppendf("%s = float(%s < 0.0);",
+ edgeAlpha.c_str(), edgeAlpha.c_str());
+ break;
+ }
+ default:
+ SkFAIL("Shouldn't get here");
+ }
+
+ // TODO should we really be doing this?
+ if (gp.coverageScale() != 0xff) {
+ const char* coverageScale;
+ fCoverageScaleUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType,
+ kHigh_GrSLPrecision,
+ "Coverage",
+ &coverageScale);
+ fragBuilder->codeAppendf("%s = vec4(%s * %s);",
+ args.fOutputCoverage, coverageScale, edgeAlpha.c_str());
+ } else {
+ fragBuilder->codeAppendf("%s = vec4(%s);", args.fOutputCoverage, edgeAlpha.c_str());
+ }
+}
+
+void GrGLConicEffect::GenKey(const GrGeometryProcessor& gp,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrConicEffect& ce = gp.cast<GrConicEffect>();
+ uint32_t key = ce.isAntiAliased() ? (ce.isFilled() ? 0x0 : 0x1) : 0x2;
+ key |= GrColor_ILLEGAL != ce.color() ? 0x4 : 0x0;
+ key |= 0xff != ce.coverageScale() ? 0x8 : 0x0;
+ key |= ce.usesLocalCoords() && ce.localMatrix().hasPerspective() ? 0x10 : 0x0;
+ key |= ComputePosKey(ce.viewMatrix()) << 5;
+ b->add32(key);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GrConicEffect::~GrConicEffect() {}
+
+void GrConicEffect::getGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLConicEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLPrimitiveProcessor* GrConicEffect::createGLSLInstance(const GrGLSLCaps&) const {
+ return new GrGLConicEffect(*this);
+}
+
+GrConicEffect::GrConicEffect(GrColor color, const SkMatrix& viewMatrix, uint8_t coverage,
+ GrPrimitiveEdgeType edgeType, const SkMatrix& localMatrix,
+ bool usesLocalCoords)
+ : fColor(color)
+ , fViewMatrix(viewMatrix)
+ , fLocalMatrix(viewMatrix)
+ , fUsesLocalCoords(usesLocalCoords)
+ , fCoverageScale(coverage)
+ , fEdgeType(edgeType) {
+ this->initClassID<GrConicEffect>();
+ fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ fInConicCoeffs = &this->addVertexAttrib("inConicCoeffs", kVec4f_GrVertexAttribType);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(GrConicEffect);
+
+sk_sp<GrGeometryProcessor> GrConicEffect::TestCreate(GrProcessorTestData* d) {
+ sk_sp<GrGeometryProcessor> gp;
+ do {
+ GrPrimitiveEdgeType edgeType =
+ static_cast<GrPrimitiveEdgeType>(
+ d->fRandom->nextULessThan(kGrProcessorEdgeTypeCnt));
+ gp = GrConicEffect::Make(GrRandomColor(d->fRandom), GrTest::TestMatrix(d->fRandom),
+ edgeType, *d->fCaps,
+ GrTest::TestMatrix(d->fRandom), d->fRandom->nextBool());
+ } while (nullptr == gp);
+ return gp;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Quad
+//////////////////////////////////////////////////////////////////////////////
+
+class GrGLQuadEffect : public GrGLSLGeometryProcessor {
+public:
+ GrGLQuadEffect(const GrGeometryProcessor&);
+
+ void onEmitCode(EmitArgs&, GrGPArgs*) override;
+
+ static inline void GenKey(const GrGeometryProcessor&,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder*);
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& primProc,
+ FPCoordTransformIter&& transformIter) override {
+ const GrQuadEffect& qe = primProc.cast<GrQuadEffect>();
+
+ if (!qe.viewMatrix().isIdentity() && !fViewMatrix.cheapEqualTo(qe.viewMatrix())) {
+ fViewMatrix = qe.viewMatrix();
+ float viewMatrix[3 * 3];
+ GrGLSLGetMatrix<3>(viewMatrix, fViewMatrix);
+ pdman.setMatrix3f(fViewMatrixUniform, viewMatrix);
+ }
+
+ if (qe.color() != fColor) {
+ float c[4];
+ GrColorToRGBAFloat(qe.color(), c);
+ pdman.set4fv(fColorUniform, 1, c);
+ fColor = qe.color();
+ }
+
+ if (qe.coverageScale() != 0xff && qe.coverageScale() != fCoverageScale) {
+ pdman.set1f(fCoverageScaleUniform, GrNormalizeByteToFloat(qe.coverageScale()));
+ fCoverageScale = qe.coverageScale();
+ }
+ this->setTransformDataHelper(qe.localMatrix(), pdman, &transformIter);
+ }
+
+private:
+ SkMatrix fViewMatrix;
+ GrColor fColor;
+ uint8_t fCoverageScale;
+ GrPrimitiveEdgeType fEdgeType;
+ UniformHandle fColorUniform;
+ UniformHandle fCoverageScaleUniform;
+ UniformHandle fViewMatrixUniform;
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+GrGLQuadEffect::GrGLQuadEffect(const GrGeometryProcessor& processor)
+ : fViewMatrix(SkMatrix::InvalidMatrix()), fColor(GrColor_ILLEGAL), fCoverageScale(0xff) {
+ const GrQuadEffect& ce = processor.cast<GrQuadEffect>();
+ fEdgeType = ce.getEdgeType();
+}
+
+void GrGLQuadEffect::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) {
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ const GrQuadEffect& gp = args.fGP.cast<GrQuadEffect>();
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(gp);
+
+ GrGLSLVertToFrag v(kVec4f_GrSLType);
+ varyingHandler->addVarying("HairQuadEdge", &v);
+ vertBuilder->codeAppendf("%s = %s;", v.vsOut(), gp.inHairQuadEdge()->fName);
+
+ GrGLSLPPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ // Setup pass through color
+ if (!gp.colorIgnored()) {
+ this->setupUniformColor(fragBuilder, uniformHandler, args.fOutputColor, &fColorUniform);
+ }
+
+ // Setup position
+ this->setupPosition(vertBuilder,
+ uniformHandler,
+ gpArgs,
+ gp.inPosition()->fName,
+ gp.viewMatrix(),
+ &fViewMatrixUniform);
+
+ // emit transforms with position
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ gpArgs->fPositionVar,
+ gp.inPosition()->fName,
+ gp.localMatrix(),
+ args.fFPCoordTransformHandler);
+
+ fragBuilder->codeAppendf("float edgeAlpha;");
+
+ switch (fEdgeType) {
+ case kHairlineAA_GrProcessorEdgeType: {
+ SkAssertResult(fragBuilder->enableFeature(
+ GrGLSLFragmentShaderBuilder::kStandardDerivatives_GLSLFeature));
+ fragBuilder->codeAppendf("vec2 duvdx = dFdx(%s.xy);", v.fsIn());
+ fragBuilder->codeAppendf("vec2 duvdy = dFdy(%s.xy);", v.fsIn());
+ fragBuilder->codeAppendf("vec2 gF = vec2(2.0 * %s.x * duvdx.x - duvdx.y,"
+ " 2.0 * %s.x * duvdy.x - duvdy.y);",
+ v.fsIn(), v.fsIn());
+ fragBuilder->codeAppendf("edgeAlpha = (%s.x * %s.x - %s.y);",
+ v.fsIn(), v.fsIn(), v.fsIn());
+ fragBuilder->codeAppend("edgeAlpha = sqrt(edgeAlpha * edgeAlpha / dot(gF, gF));");
+ fragBuilder->codeAppend("edgeAlpha = max(1.0 - edgeAlpha, 0.0);");
+ // Add line below for smooth cubic ramp
+ // fragBuilder->codeAppend("edgeAlpha = edgeAlpha*edgeAlpha*(3.0-2.0*edgeAlpha);");
+ break;
+ }
+ case kFillAA_GrProcessorEdgeType: {
+ SkAssertResult(fragBuilder->enableFeature(
+ GrGLSLFragmentShaderBuilder::kStandardDerivatives_GLSLFeature));
+ fragBuilder->codeAppendf("vec2 duvdx = dFdx(%s.xy);", v.fsIn());
+ fragBuilder->codeAppendf("vec2 duvdy = dFdy(%s.xy);", v.fsIn());
+ fragBuilder->codeAppendf("vec2 gF = vec2(2.0 * %s.x * duvdx.x - duvdx.y,"
+ " 2.0 * %s.x * duvdy.x - duvdy.y);",
+ v.fsIn(), v.fsIn());
+ fragBuilder->codeAppendf("edgeAlpha = (%s.x * %s.x - %s.y);",
+ v.fsIn(), v.fsIn(), v.fsIn());
+ fragBuilder->codeAppend("edgeAlpha = edgeAlpha / sqrt(dot(gF, gF));");
+ fragBuilder->codeAppend("edgeAlpha = clamp(1.0 - edgeAlpha, 0.0, 1.0);");
+ // Add line below for smooth cubic ramp
+ // fragBuilder->codeAppend("edgeAlpha = edgeAlpha*edgeAlpha*(3.0-2.0*edgeAlpha);");
+ break;
+ }
+ case kFillBW_GrProcessorEdgeType: {
+ fragBuilder->codeAppendf("edgeAlpha = (%s.x * %s.x - %s.y);",
+ v.fsIn(), v.fsIn(), v.fsIn());
+ fragBuilder->codeAppend("edgeAlpha = float(edgeAlpha < 0.0);");
+ break;
+ }
+ default:
+ SkFAIL("Shouldn't get here");
+ }
+
+ if (0xff != gp.coverageScale()) {
+ const char* coverageScale;
+ fCoverageScaleUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType,
+ kDefault_GrSLPrecision,
+ "Coverage",
+ &coverageScale);
+ fragBuilder->codeAppendf("%s = vec4(%s * edgeAlpha);", args.fOutputCoverage, coverageScale);
+ } else {
+ fragBuilder->codeAppendf("%s = vec4(edgeAlpha);", args.fOutputCoverage);
+ }
+}
+
+void GrGLQuadEffect::GenKey(const GrGeometryProcessor& gp,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrQuadEffect& ce = gp.cast<GrQuadEffect>();
+ uint32_t key = ce.isAntiAliased() ? (ce.isFilled() ? 0x0 : 0x1) : 0x2;
+ key |= ce.color() != GrColor_ILLEGAL ? 0x4 : 0x0;
+ key |= ce.coverageScale() != 0xff ? 0x8 : 0x0;
+ key |= ce.usesLocalCoords() && ce.localMatrix().hasPerspective() ? 0x10 : 0x0;
+ key |= ComputePosKey(ce.viewMatrix()) << 5;
+ b->add32(key);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GrQuadEffect::~GrQuadEffect() {}
+
+void GrQuadEffect::getGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLQuadEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLPrimitiveProcessor* GrQuadEffect::createGLSLInstance(const GrGLSLCaps&) const {
+ return new GrGLQuadEffect(*this);
+}
+
+GrQuadEffect::GrQuadEffect(GrColor color, const SkMatrix& viewMatrix, uint8_t coverage,
+ GrPrimitiveEdgeType edgeType, const SkMatrix& localMatrix,
+ bool usesLocalCoords)
+ : fColor(color)
+ , fViewMatrix(viewMatrix)
+ , fLocalMatrix(localMatrix)
+ , fUsesLocalCoords(usesLocalCoords)
+ , fCoverageScale(coverage)
+ , fEdgeType(edgeType) {
+ this->initClassID<GrQuadEffect>();
+ fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ fInHairQuadEdge = &this->addVertexAttrib("inHairQuadEdge", kVec4f_GrVertexAttribType);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(GrQuadEffect);
+
+sk_sp<GrGeometryProcessor> GrQuadEffect::TestCreate(GrProcessorTestData* d) {
+ sk_sp<GrGeometryProcessor> gp;
+ do {
+ GrPrimitiveEdgeType edgeType = static_cast<GrPrimitiveEdgeType>(
+ d->fRandom->nextULessThan(kGrProcessorEdgeTypeCnt));
+ gp = GrQuadEffect::Make(GrRandomColor(d->fRandom),
+ GrTest::TestMatrix(d->fRandom),
+ edgeType, *d->fCaps,
+ GrTest::TestMatrix(d->fRandom),
+ d->fRandom->nextBool());
+ } while (nullptr == gp);
+ return gp;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Cubic
+//////////////////////////////////////////////////////////////////////////////
+
+class GrGLCubicEffect : public GrGLSLGeometryProcessor {
+public:
+ GrGLCubicEffect(const GrGeometryProcessor&);
+
+ void onEmitCode(EmitArgs&, GrGPArgs*) override;
+
+ static inline void GenKey(const GrGeometryProcessor&,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder*);
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& primProc,
+ FPCoordTransformIter&& transformIter) override {
+ const GrCubicEffect& ce = primProc.cast<GrCubicEffect>();
+
+ if (!ce.viewMatrix().isIdentity() && !fViewMatrix.cheapEqualTo(ce.viewMatrix())) {
+ fViewMatrix = ce.viewMatrix();
+ float viewMatrix[3 * 3];
+ GrGLSLGetMatrix<3>(viewMatrix, fViewMatrix);
+ pdman.setMatrix3f(fViewMatrixUniform, viewMatrix);
+ }
+
+ if (ce.color() != fColor) {
+ float c[4];
+ GrColorToRGBAFloat(ce.color(), c);
+ pdman.set4fv(fColorUniform, 1, c);
+ fColor = ce.color();
+ }
+ this->setTransformDataHelper(SkMatrix::I(), pdman, &transformIter);
+ }
+
+private:
+ SkMatrix fViewMatrix;
+ GrColor fColor;
+ GrPrimitiveEdgeType fEdgeType;
+ UniformHandle fColorUniform;
+ UniformHandle fViewMatrixUniform;
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+GrGLCubicEffect::GrGLCubicEffect(const GrGeometryProcessor& processor)
+ : fViewMatrix(SkMatrix::InvalidMatrix()), fColor(GrColor_ILLEGAL) {
+ const GrCubicEffect& ce = processor.cast<GrCubicEffect>();
+ fEdgeType = ce.getEdgeType();
+}
+
+void GrGLCubicEffect::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) {
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ const GrCubicEffect& gp = args.fGP.cast<GrCubicEffect>();
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(gp);
+
+ GrGLSLVertToFrag v(kVec4f_GrSLType);
+ varyingHandler->addVarying("CubicCoeffs", &v, kHigh_GrSLPrecision);
+ vertBuilder->codeAppendf("%s = %s;", v.vsOut(), gp.inCubicCoeffs()->fName);
+
+ GrGLSLPPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ // Setup pass through color
+ if (!gp.colorIgnored()) {
+ this->setupUniformColor(fragBuilder, uniformHandler, args.fOutputColor, &fColorUniform);
+ }
+
+ // Setup position
+ this->setupPosition(vertBuilder,
+ uniformHandler,
+ gpArgs,
+ gp.inPosition()->fName,
+ gp.viewMatrix(),
+ &fViewMatrixUniform);
+
+ // emit transforms with position
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ gpArgs->fPositionVar,
+ gp.inPosition()->fName,
+ args.fFPCoordTransformHandler);
+
+
+ GrGLSLShaderVar edgeAlpha("edgeAlpha", kFloat_GrSLType, 0, kHigh_GrSLPrecision);
+ GrGLSLShaderVar dklmdx("dklmdx", kVec3f_GrSLType, 0, kHigh_GrSLPrecision);
+ GrGLSLShaderVar dklmdy("dklmdy", kVec3f_GrSLType, 0, kHigh_GrSLPrecision);
+ GrGLSLShaderVar dfdx("dfdx", kFloat_GrSLType, 0, kHigh_GrSLPrecision);
+ GrGLSLShaderVar dfdy("dfdy", kFloat_GrSLType, 0, kHigh_GrSLPrecision);
+ GrGLSLShaderVar gF("gF", kVec2f_GrSLType, 0, kHigh_GrSLPrecision);
+ GrGLSLShaderVar gFM("gFM", kFloat_GrSLType, 0, kHigh_GrSLPrecision);
+ GrGLSLShaderVar func("func", kFloat_GrSLType, 0, kHigh_GrSLPrecision);
+
+ fragBuilder->declAppend(edgeAlpha);
+ fragBuilder->declAppend(dklmdx);
+ fragBuilder->declAppend(dklmdy);
+ fragBuilder->declAppend(dfdx);
+ fragBuilder->declAppend(dfdy);
+ fragBuilder->declAppend(gF);
+ fragBuilder->declAppend(gFM);
+ fragBuilder->declAppend(func);
+
+ switch (fEdgeType) {
+ case kHairlineAA_GrProcessorEdgeType: {
+ SkAssertResult(fragBuilder->enableFeature(
+ GrGLSLFragmentShaderBuilder::kStandardDerivatives_GLSLFeature));
+ fragBuilder->codeAppendf("%s = dFdx(%s.xyz);", dklmdx.c_str(), v.fsIn());
+ fragBuilder->codeAppendf("%s = dFdy(%s.xyz);", dklmdy.c_str(), v.fsIn());
+ fragBuilder->codeAppendf("%s = 3.0 * %s.x * %s.x * %s.x - %s.y * %s.z - %s.z * %s.y;",
+ dfdx.c_str(), v.fsIn(), v.fsIn(), dklmdx.c_str(), v.fsIn(),
+ dklmdx.c_str(), v.fsIn(), dklmdx.c_str());
+ fragBuilder->codeAppendf("%s = 3.0 * %s.x * %s.x * %s.x - %s.y * %s.z - %s.z * %s.y;",
+ dfdy.c_str(), v.fsIn(), v.fsIn(), dklmdy.c_str(), v.fsIn(),
+ dklmdy.c_str(), v.fsIn(), dklmdy.c_str());
+ fragBuilder->codeAppendf("%s = vec2(%s, %s);", gF.c_str(), dfdx.c_str(), dfdy.c_str());
+ fragBuilder->codeAppendf("%s = sqrt(dot(%s, %s));",
+ gFM.c_str(), gF.c_str(), gF.c_str());
+ fragBuilder->codeAppendf("%s = %s.x * %s.x * %s.x - %s.y * %s.z;",
+ func.c_str(), v.fsIn(), v.fsIn(),
+ v.fsIn(), v.fsIn(), v.fsIn());
+ fragBuilder->codeAppendf("%s = abs(%s);", func.c_str(), func.c_str());
+ fragBuilder->codeAppendf("%s = %s / %s;",
+ edgeAlpha.c_str(), func.c_str(), gFM.c_str());
+ fragBuilder->codeAppendf("%s = max(1.0 - %s, 0.0);",
+ edgeAlpha.c_str(), edgeAlpha.c_str());
+ // Add line below for smooth cubic ramp
+ // fragBuilder->codeAppendf("%s = %s * %s * (3.0 - 2.0 * %s);",
+ // edgeAlpha.c_str(), edgeAlpha.c_str(), edgeAlpha.c_str(),
+ // edgeAlpha.c_str());
+ break;
+ }
+ case kFillAA_GrProcessorEdgeType: {
+ SkAssertResult(fragBuilder->enableFeature(
+ GrGLSLFragmentShaderBuilder::kStandardDerivatives_GLSLFeature));
+ fragBuilder->codeAppendf("%s = dFdx(%s.xyz);", dklmdx.c_str(), v.fsIn());
+ fragBuilder->codeAppendf("%s = dFdy(%s.xyz);", dklmdy.c_str(), v.fsIn());
+ fragBuilder->codeAppendf("%s ="
+ "3.0 * %s.x * %s.x * %s.x - %s.y * %s.z - %s.z * %s.y;",
+ dfdx.c_str(), v.fsIn(), v.fsIn(), dklmdx.c_str(), v.fsIn(),
+ dklmdx.c_str(), v.fsIn(), dklmdx.c_str());
+ fragBuilder->codeAppendf("%s = 3.0 * %s.x * %s.x * %s.x - %s.y * %s.z - %s.z * %s.y;",
+ dfdy.c_str(), v.fsIn(), v.fsIn(), dklmdy.c_str(), v.fsIn(),
+ dklmdy.c_str(), v.fsIn(), dklmdy.c_str());
+ fragBuilder->codeAppendf("%s = vec2(%s, %s);", gF.c_str(), dfdx.c_str(), dfdy.c_str());
+ fragBuilder->codeAppendf("%s = sqrt(dot(%s, %s));",
+ gFM.c_str(), gF.c_str(), gF.c_str());
+ fragBuilder->codeAppendf("%s = %s.x * %s.x * %s.x - %s.y * %s.z;",
+ func.c_str(),
+ v.fsIn(), v.fsIn(), v.fsIn(), v.fsIn(), v.fsIn());
+ fragBuilder->codeAppendf("%s = %s / %s;",
+ edgeAlpha.c_str(), func.c_str(), gFM.c_str());
+ fragBuilder->codeAppendf("%s = clamp(1.0 - %s, 0.0, 1.0);",
+ edgeAlpha.c_str(), edgeAlpha.c_str());
+ // Add line below for smooth cubic ramp
+ // fragBuilder->codeAppendf("%s = %s * %s * (3.0 - 2.0 * %s);",
+ // edgeAlpha.c_str(), edgeAlpha.c_str(), edgeAlpha.c_str(),
+ // edgeAlpha.c_str());
+ break;
+ }
+ case kFillBW_GrProcessorEdgeType: {
+ fragBuilder->codeAppendf("%s = %s.x * %s.x * %s.x - %s.y * %s.z;",
+ edgeAlpha.c_str(), v.fsIn(), v.fsIn(),
+ v.fsIn(), v.fsIn(), v.fsIn());
+ fragBuilder->codeAppendf("%s = float(%s < 0.0);", edgeAlpha.c_str(), edgeAlpha.c_str());
+ break;
+ }
+ default:
+ SkFAIL("Shouldn't get here");
+ }
+
+
+ fragBuilder->codeAppendf("%s = vec4(%s);", args.fOutputCoverage, edgeAlpha.c_str());
+}
+
+void GrGLCubicEffect::GenKey(const GrGeometryProcessor& gp,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrCubicEffect& ce = gp.cast<GrCubicEffect>();
+ uint32_t key = ce.isAntiAliased() ? (ce.isFilled() ? 0x0 : 0x1) : 0x2;
+ key |= ce.color() != GrColor_ILLEGAL ? 0x4 : 0x8;
+ key |= ComputePosKey(ce.viewMatrix()) << 5;
+ b->add32(key);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GrCubicEffect::~GrCubicEffect() {}
+
+void GrCubicEffect::getGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const {
+ GrGLCubicEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLPrimitiveProcessor* GrCubicEffect::createGLSLInstance(const GrGLSLCaps&) const {
+ return new GrGLCubicEffect(*this);
+}
+
+GrCubicEffect::GrCubicEffect(GrColor color, const SkMatrix& viewMatrix,
+ GrPrimitiveEdgeType edgeType)
+ : fColor(color)
+ , fViewMatrix(viewMatrix)
+ , fEdgeType(edgeType) {
+ this->initClassID<GrCubicEffect>();
+ fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ fInCubicCoeffs = &this->addVertexAttrib("inCubicCoeffs", kVec4f_GrVertexAttribType);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(GrCubicEffect);
+
+sk_sp<GrGeometryProcessor> GrCubicEffect::TestCreate(GrProcessorTestData* d) {
+ sk_sp<GrGeometryProcessor> gp;
+ do {
+ GrPrimitiveEdgeType edgeType =
+ static_cast<GrPrimitiveEdgeType>(
+ d->fRandom->nextULessThan(kGrProcessorEdgeTypeCnt));
+ gp = GrCubicEffect::Make(GrRandomColor(d->fRandom),
+ GrTest::TestMatrix(d->fRandom), edgeType, *d->fCaps);
+ } while (nullptr == gp);
+ return gp;
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrBezierEffect.h b/gfx/skia/skia/src/gpu/effects/GrBezierEffect.h
new file mode 100644
index 000000000..50dca9924
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrBezierEffect.h
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBezierEffect_DEFINED
+#define GrBezierEffect_DEFINED
+
+#include "GrCaps.h"
+#include "GrProcessor.h"
+#include "GrGeometryProcessor.h"
+#include "GrInvariantOutput.h"
+#include "GrTypesPriv.h"
+
+/**
+ * Shader is based off of Loop-Blinn Quadratic GPU Rendering
+ * The output of this effect is a hairline edge for conics.
+ * Conics specified by implicit equation K^2 - LM.
+ * K, L, and M, are the first three values of the vertex attribute,
+ * the fourth value is not used. Distance is calculated using a
+ * first order approximation from the taylor series.
+ * Coverage for AA is max(0, 1-distance).
+ *
+ * Test were also run using a second order distance approximation.
+ * There were two versions of the second order approx. The first version
+ * is of roughly the form:
+ * f(q) = |f(p)| - ||f'(p)||*||q-p|| - ||f''(p)||*||q-p||^2.
+ * The second is similar:
+ * f(q) = |f(p)| + ||f'(p)||*||q-p|| + ||f''(p)||*||q-p||^2.
+ * The exact version of the equations can be found in the paper
+ * "Distance Approximations for Rasterizing Implicit Curves" by Gabriel Taubin
+ *
+ * In both versions we solve the quadratic for ||q-p||.
+ * Version 1:
+ * gFM is magnitude of first partials and gFM2 is magnitude of 2nd partials (as derived from paper)
+ * builder->fsCodeAppend("\t\tedgeAlpha = (sqrt(gFM*gFM+4.0*func*gF2M) - gFM)/(2.0*gF2M);\n");
+ * Version 2:
+ * builder->fsCodeAppend("\t\tedgeAlpha = (gFM - sqrt(gFM*gFM-4.0*func*gF2M))/(2.0*gF2M);\n");
+ *
+ * Also note that 2nd partials of k,l,m are zero
+ *
+ * When comparing the two second order approximations to the first order approximations,
+ * the following results were found. Version 1 tends to underestimate the distances, thus it
+ * basically increases all the error that we were already seeing in the first order
+ * approx. So this version is not the one to use. Version 2 has the opposite effect
+ * and tends to overestimate the distances. This is much closer to what we are
+ * looking for. It is able to render ellipses (even thin ones) without the need to chop.
+ * However, it can not handle thin hyperbolas well and thus would still rely on
+ * chopping to tighten the clipping. Another side effect of the overestimating is
+ * that the curves become much thinner and "ropey". If all that was ever rendered
+ * were "not too thin" curves and ellipses then 2nd order may have an advantage since
+ * only one geometry would need to be rendered. However no benches were run comparing
+ * chopped first order and non chopped 2nd order.
+ */
+class GrGLConicEffect;
+
+class GrConicEffect : public GrGeometryProcessor {
+public:
+ static sk_sp<GrGeometryProcessor> Make(GrColor color,
+ const SkMatrix& viewMatrix,
+ const GrPrimitiveEdgeType edgeType,
+ const GrCaps& caps,
+ const SkMatrix& localMatrix,
+ bool usesLocalCoords,
+ uint8_t coverage = 0xff) {
+ switch (edgeType) {
+ case kFillAA_GrProcessorEdgeType:
+ if (!caps.shaderCaps()->shaderDerivativeSupport()) {
+ return nullptr;
+ }
+ return sk_sp<GrGeometryProcessor>(
+ new GrConicEffect(color, viewMatrix, coverage, kFillAA_GrProcessorEdgeType,
+ localMatrix, usesLocalCoords));
+ case kHairlineAA_GrProcessorEdgeType:
+ if (!caps.shaderCaps()->shaderDerivativeSupport()) {
+ return nullptr;
+ }
+ return sk_sp<GrGeometryProcessor>(
+ new GrConicEffect(color, viewMatrix, coverage,
+ kHairlineAA_GrProcessorEdgeType, localMatrix,
+ usesLocalCoords));
+ case kFillBW_GrProcessorEdgeType:
+ return sk_sp<GrGeometryProcessor>(
+ new GrConicEffect(color, viewMatrix, coverage, kFillBW_GrProcessorEdgeType,
+ localMatrix, usesLocalCoords));
+ default:
+ return nullptr;
+ }
+ }
+
+ virtual ~GrConicEffect();
+
+ const char* name() const override { return "Conic"; }
+
+ inline const Attribute* inPosition() const { return fInPosition; }
+ inline const Attribute* inConicCoeffs() const { return fInConicCoeffs; }
+ inline bool isAntiAliased() const { return GrProcessorEdgeTypeIsAA(fEdgeType); }
+ inline bool isFilled() const { return GrProcessorEdgeTypeIsFill(fEdgeType); }
+ inline GrPrimitiveEdgeType getEdgeType() const { return fEdgeType; }
+ GrColor color() const { return fColor; }
+ bool colorIgnored() const { return GrColor_ILLEGAL == fColor; }
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+ uint8_t coverageScale() const { return fCoverageScale; }
+
+ void getGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override;
+
+private:
+ GrConicEffect(GrColor, const SkMatrix& viewMatrix, uint8_t coverage, GrPrimitiveEdgeType,
+ const SkMatrix& localMatrix, bool usesLocalCoords);
+
+ GrColor fColor;
+ SkMatrix fViewMatrix;
+ SkMatrix fLocalMatrix;
+ bool fUsesLocalCoords;
+ uint8_t fCoverageScale;
+ GrPrimitiveEdgeType fEdgeType;
+ const Attribute* fInPosition;
+ const Attribute* fInConicCoeffs;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+/**
+ * The output of this effect is a hairline edge for quadratics.
+ * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
+ * two components of the vertex attribute. At the three control points that define
+ * the Quadratic, u, v have the values {0,0}, {1/2, 0}, and {1, 1} respectively.
+ * Coverage for AA is min(0, 1-distance). 3rd & 4th cimponent unused.
+ * Requires shader derivative instruction support.
+ */
+class GrGLQuadEffect;
+
+class GrQuadEffect : public GrGeometryProcessor {
+public:
+ static sk_sp<GrGeometryProcessor> Make(GrColor color,
+ const SkMatrix& viewMatrix,
+ const GrPrimitiveEdgeType edgeType,
+ const GrCaps& caps,
+ const SkMatrix& localMatrix,
+ bool usesLocalCoords,
+ uint8_t coverage = 0xff) {
+ switch (edgeType) {
+ case kFillAA_GrProcessorEdgeType:
+ if (!caps.shaderCaps()->shaderDerivativeSupport()) {
+ return nullptr;
+ }
+ return sk_sp<GrGeometryProcessor>(
+ new GrQuadEffect(color, viewMatrix, coverage, kFillAA_GrProcessorEdgeType,
+ localMatrix, usesLocalCoords));
+ case kHairlineAA_GrProcessorEdgeType:
+ if (!caps.shaderCaps()->shaderDerivativeSupport()) {
+ return nullptr;
+ }
+ return sk_sp<GrGeometryProcessor>(
+ new GrQuadEffect(color, viewMatrix, coverage,
+ kHairlineAA_GrProcessorEdgeType, localMatrix,
+ usesLocalCoords));
+ case kFillBW_GrProcessorEdgeType:
+ return sk_sp<GrGeometryProcessor>(
+ new GrQuadEffect(color, viewMatrix, coverage, kFillBW_GrProcessorEdgeType,
+ localMatrix, usesLocalCoords));
+ default:
+ return nullptr;
+ }
+ }
+
+ virtual ~GrQuadEffect();
+
+ const char* name() const override { return "Quad"; }
+
+ inline const Attribute* inPosition() const { return fInPosition; }
+ inline const Attribute* inHairQuadEdge() const { return fInHairQuadEdge; }
+ inline bool isAntiAliased() const { return GrProcessorEdgeTypeIsAA(fEdgeType); }
+ inline bool isFilled() const { return GrProcessorEdgeTypeIsFill(fEdgeType); }
+ inline GrPrimitiveEdgeType getEdgeType() const { return fEdgeType; }
+ GrColor color() const { return fColor; }
+ bool colorIgnored() const { return GrColor_ILLEGAL == fColor; }
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+ uint8_t coverageScale() const { return fCoverageScale; }
+
+ void getGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override;
+
+private:
+ GrQuadEffect(GrColor, const SkMatrix& viewMatrix, uint8_t coverage, GrPrimitiveEdgeType,
+ const SkMatrix& localMatrix, bool usesLocalCoords);
+
+ GrColor fColor;
+ SkMatrix fViewMatrix;
+ SkMatrix fLocalMatrix;
+ bool fUsesLocalCoords;
+ uint8_t fCoverageScale;
+ GrPrimitiveEdgeType fEdgeType;
+ const Attribute* fInPosition;
+ const Attribute* fInHairQuadEdge;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+//////////////////////////////////////////////////////////////////////////////
+/**
+ * Shader is based off of "Resolution Independent Curve Rendering using
+ * Programmable Graphics Hardware" by Loop and Blinn.
+ * The output of this effect is a hairline edge for non rational cubics.
+ * Cubics are specified by implicit equation K^3 - LM.
+ * K, L, and M, are the first three values of the vertex attribute,
+ * the fourth value is not used. Distance is calculated using a
+ * first order approximation from the taylor series.
+ * Coverage for AA is max(0, 1-distance).
+ */
+class GrGLCubicEffect;
+
+class GrCubicEffect : public GrGeometryProcessor {
+public:
+ static sk_sp<GrGeometryProcessor> Make(GrColor color,
+ const SkMatrix& viewMatrix,
+ const GrPrimitiveEdgeType edgeType,
+ const GrCaps& caps) {
+ switch (edgeType) {
+ case kFillAA_GrProcessorEdgeType:
+ if (!caps.shaderCaps()->shaderDerivativeSupport()) {
+ return nullptr;
+ }
+ return sk_sp<GrGeometryProcessor>(
+ new GrCubicEffect(color, viewMatrix, kFillAA_GrProcessorEdgeType));
+ case kHairlineAA_GrProcessorEdgeType:
+ if (!caps.shaderCaps()->shaderDerivativeSupport()) {
+ return nullptr;
+ }
+ return sk_sp<GrGeometryProcessor>(
+ new GrCubicEffect(color, viewMatrix, kHairlineAA_GrProcessorEdgeType));
+ case kFillBW_GrProcessorEdgeType:
+ return sk_sp<GrGeometryProcessor>(
+ new GrCubicEffect(color, viewMatrix, kFillBW_GrProcessorEdgeType));
+ default:
+ return nullptr;
+ }
+ }
+
+ virtual ~GrCubicEffect();
+
+ const char* name() const override { return "Cubic"; }
+
+ inline const Attribute* inPosition() const { return fInPosition; }
+ inline const Attribute* inCubicCoeffs() const { return fInCubicCoeffs; }
+ inline bool isAntiAliased() const { return GrProcessorEdgeTypeIsAA(fEdgeType); }
+ inline bool isFilled() const { return GrProcessorEdgeTypeIsFill(fEdgeType); }
+ inline GrPrimitiveEdgeType getEdgeType() const { return fEdgeType; }
+ GrColor color() const { return fColor; }
+ bool colorIgnored() const { return GrColor_ILLEGAL == fColor; }
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+
+ void getGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override;
+
+private:
+ GrCubicEffect(GrColor, const SkMatrix& viewMatrix, GrPrimitiveEdgeType);
+
+ GrColor fColor;
+ SkMatrix fViewMatrix;
+ GrPrimitiveEdgeType fEdgeType;
+ const Attribute* fInPosition;
+ const Attribute* fInCubicCoeffs;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrBicubicEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrBicubicEffect.cpp
new file mode 100644
index 000000000..737625ffe
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrBicubicEffect.cpp
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrBicubicEffect.h"
+#include "GrInvariantOutput.h"
+#include "glsl/GrGLSLColorSpaceXformHelper.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+
+#define DS(x) SkDoubleToScalar(x)
+
+const SkScalar GrBicubicEffect::gMitchellCoefficients[16] = {
+ DS( 1.0 / 18.0), DS(-9.0 / 18.0), DS( 15.0 / 18.0), DS( -7.0 / 18.0),
+ DS(16.0 / 18.0), DS( 0.0 / 18.0), DS(-36.0 / 18.0), DS( 21.0 / 18.0),
+ DS( 1.0 / 18.0), DS( 9.0 / 18.0), DS( 27.0 / 18.0), DS(-21.0 / 18.0),
+ DS( 0.0 / 18.0), DS( 0.0 / 18.0), DS( -6.0 / 18.0), DS( 7.0 / 18.0),
+};
+
+
+class GrGLBicubicEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor& effect, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrBicubicEffect& bicubicEffect = effect.cast<GrBicubicEffect>();
+ b->add32(GrTextureDomain::GLDomain::DomainKey(bicubicEffect.domain()));
+ b->add32(GrColorSpaceXform::XformKey(bicubicEffect.colorSpaceXform()));
+ }
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+private:
+ typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;
+
+ UniformHandle fCoefficientsUni;
+ UniformHandle fImageIncrementUni;
+ UniformHandle fColorSpaceXformUni;
+ GrTextureDomain::GLDomain fDomain;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GrGLBicubicEffect::emitCode(EmitArgs& args) {
+ const GrBicubicEffect& bicubicEffect = args.fFp.cast<GrBicubicEffect>();
+
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ fCoefficientsUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kMat44f_GrSLType, kDefault_GrSLPrecision,
+ "Coefficients");
+ fImageIncrementUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType, kDefault_GrSLPrecision,
+ "ImageIncrement");
+
+ const char* imgInc = uniformHandler->getUniformCStr(fImageIncrementUni);
+ const char* coeff = uniformHandler->getUniformCStr(fCoefficientsUni);
+
+ GrGLSLColorSpaceXformHelper colorSpaceHelper(uniformHandler, bicubicEffect.colorSpaceXform(),
+ &fColorSpaceXformUni);
+
+ SkString cubicBlendName;
+
+ static const GrGLSLShaderVar gCubicBlendArgs[] = {
+ GrGLSLShaderVar("coefficients", kMat44f_GrSLType),
+ GrGLSLShaderVar("t", kFloat_GrSLType),
+ GrGLSLShaderVar("c0", kVec4f_GrSLType),
+ GrGLSLShaderVar("c1", kVec4f_GrSLType),
+ GrGLSLShaderVar("c2", kVec4f_GrSLType),
+ GrGLSLShaderVar("c3", kVec4f_GrSLType),
+ };
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString coords2D = fragBuilder->ensureCoords2D(args.fTransformedCoords[0]);
+ fragBuilder->emitFunction(kVec4f_GrSLType,
+ "cubicBlend",
+ SK_ARRAY_COUNT(gCubicBlendArgs),
+ gCubicBlendArgs,
+ "\tvec4 ts = vec4(1.0, t, t * t, t * t * t);\n"
+ "\tvec4 c = coefficients * ts;\n"
+ "\treturn c.x * c0 + c.y * c1 + c.z * c2 + c.w * c3;\n",
+ &cubicBlendName);
+ fragBuilder->codeAppendf("\tvec2 coord = %s - %s * vec2(0.5);\n", coords2D.c_str(), imgInc);
+ // We unnormalize the coord in order to determine our fractional offset (f) within the texel
+ // We then snap coord to a texel center and renormalize. The snap prevents cases where the
+ // starting coords are near a texel boundary and accumulations of imgInc would cause us to skip/
+ // double hit a texel.
+ fragBuilder->codeAppendf("\tcoord /= %s;\n", imgInc);
+ fragBuilder->codeAppend("\tvec2 f = fract(coord);\n");
+ fragBuilder->codeAppendf("\tcoord = (coord - f + vec2(0.5)) * %s;\n", imgInc);
+ fragBuilder->codeAppend("\tvec4 rowColors[4];\n");
+ for (int y = 0; y < 4; ++y) {
+ for (int x = 0; x < 4; ++x) {
+ SkString coord;
+ coord.printf("coord + %s * vec2(%d, %d)", imgInc, x - 1, y - 1);
+ SkString sampleVar;
+ sampleVar.printf("rowColors[%d]", x);
+ fDomain.sampleTexture(fragBuilder,
+ args.fUniformHandler,
+ args.fGLSLCaps,
+ bicubicEffect.domain(),
+ sampleVar.c_str(),
+ coord,
+ args.fTexSamplers[0]);
+ }
+ fragBuilder->codeAppendf(
+ "\tvec4 s%d = %s(%s, f.x, rowColors[0], rowColors[1], rowColors[2], rowColors[3]);\n",
+ y, cubicBlendName.c_str(), coeff);
+ }
+ SkString bicubicColor;
+ bicubicColor.printf("%s(%s, f.y, s0, s1, s2, s3)", cubicBlendName.c_str(), coeff);
+ if (colorSpaceHelper.getXformMatrix()) {
+ SkString xformedColor;
+ fragBuilder->appendColorGamutXform(&xformedColor, bicubicColor.c_str(), &colorSpaceHelper);
+ bicubicColor.swap(xformedColor);
+ }
+ fragBuilder->codeAppendf("\t%s = %s;\n",
+ args.fOutputColor, (GrGLSLExpr4(bicubicColor.c_str()) *
+ GrGLSLExpr4(args.fInputColor)).c_str());
+}
+
+void GrGLBicubicEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& processor) {
+ const GrBicubicEffect& bicubicEffect = processor.cast<GrBicubicEffect>();
+ const GrTexture& texture = *processor.texture(0);
+ float imageIncrement[2];
+ imageIncrement[0] = 1.0f / texture.width();
+ imageIncrement[1] = 1.0f / texture.height();
+ pdman.set2fv(fImageIncrementUni, 1, imageIncrement);
+ pdman.setMatrix4f(fCoefficientsUni, bicubicEffect.coefficients());
+ fDomain.setData(pdman, bicubicEffect.domain(), texture.origin());
+ if (SkToBool(bicubicEffect.colorSpaceXform())) {
+ pdman.setSkMatrix44(fColorSpaceXformUni, bicubicEffect.colorSpaceXform()->srcToDst());
+ }
+}
+
+static inline void convert_row_major_scalar_coeffs_to_column_major_floats(float dst[16],
+ const SkScalar src[16]) {
+ for (int y = 0; y < 4; y++) {
+ for (int x = 0; x < 4; x++) {
+ dst[x * 4 + y] = SkScalarToFloat(src[y * 4 + x]);
+ }
+ }
+}
+
+GrBicubicEffect::GrBicubicEffect(GrTexture* texture,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkScalar coefficients[16],
+ const SkMatrix &matrix,
+ const SkShader::TileMode tileModes[2])
+ : INHERITED(texture, nullptr, matrix,
+ GrTextureParams(tileModes, GrTextureParams::kNone_FilterMode))
+ , fDomain(GrTextureDomain::IgnoredDomain())
+ , fColorSpaceXform(std::move(colorSpaceXform)) {
+ this->initClassID<GrBicubicEffect>();
+ convert_row_major_scalar_coeffs_to_column_major_floats(fCoefficients, coefficients);
+}
+
+GrBicubicEffect::GrBicubicEffect(GrTexture* texture,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkScalar coefficients[16],
+ const SkMatrix &matrix,
+ const SkRect& domain)
+ : INHERITED(texture, nullptr, matrix,
+ GrTextureParams(SkShader::kClamp_TileMode, GrTextureParams::kNone_FilterMode))
+ , fDomain(domain, GrTextureDomain::kClamp_Mode)
+ , fColorSpaceXform(std::move(colorSpaceXform)) {
+ this->initClassID<GrBicubicEffect>();
+ convert_row_major_scalar_coeffs_to_column_major_floats(fCoefficients, coefficients);
+}
+
+GrBicubicEffect::~GrBicubicEffect() {
+}
+
+void GrBicubicEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLBicubicEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrBicubicEffect::onCreateGLSLInstance() const {
+ return new GrGLBicubicEffect;
+}
+
+bool GrBicubicEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrBicubicEffect& s = sBase.cast<GrBicubicEffect>();
+ return !memcmp(fCoefficients, s.coefficients(), 16) &&
+ fDomain == s.fDomain;
+}
+
+void GrBicubicEffect::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ // FIXME: Perhaps we can do better.
+ inout->mulByUnknownSingleComponent();
+}
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrBicubicEffect);
+
+sk_sp<GrFragmentProcessor> GrBicubicEffect::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx :
+ GrProcessorUnitTest::kAlphaTextureIdx;
+ SkScalar coefficients[16];
+ for (int i = 0; i < 16; i++) {
+ coefficients[i] = d->fRandom->nextSScalar1();
+ }
+ auto colorSpaceXform = GrTest::TestColorXform(d->fRandom);
+ return GrBicubicEffect::Make(d->fTextures[texIdx], colorSpaceXform, coefficients);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+bool GrBicubicEffect::ShouldUseBicubic(const SkMatrix& matrix,
+ GrTextureParams::FilterMode* filterMode) {
+ if (matrix.isIdentity()) {
+ *filterMode = GrTextureParams::kNone_FilterMode;
+ return false;
+ }
+
+ SkScalar scales[2];
+ if (!matrix.getMinMaxScales(scales) || scales[0] < SK_Scalar1) {
+ // Bicubic doesn't handle arbitrary minimization well, as src texels can be skipped
+ // entirely,
+ *filterMode = GrTextureParams::kMipMap_FilterMode;
+ return false;
+ }
+ // At this point if scales[1] == SK_Scalar1 then the matrix doesn't do any scaling.
+ if (scales[1] == SK_Scalar1) {
+ if (matrix.rectStaysRect() && SkScalarIsInt(matrix.getTranslateX()) &&
+ SkScalarIsInt(matrix.getTranslateY())) {
+ *filterMode = GrTextureParams::kNone_FilterMode;
+ } else {
+ // Use bilerp to handle rotation or fractional translation.
+ *filterMode = GrTextureParams::kBilerp_FilterMode;
+ }
+ return false;
+ }
+ // When we use the bicubic filtering effect each sample is read from the texture using
+ // nearest neighbor sampling.
+ *filterMode = GrTextureParams::kNone_FilterMode;
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrBicubicEffect.h b/gfx/skia/skia/src/gpu/effects/GrBicubicEffect.h
new file mode 100644
index 000000000..58bb068d5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrBicubicEffect.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBicubicTextureEffect_DEFINED
+#define GrBicubicTextureEffect_DEFINED
+
+#include "GrSingleTextureEffect.h"
+#include "GrTextureDomain.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+
+class GrGLBicubicEffect;
+class GrInvariantOutput;
+
+class GrBicubicEffect : public GrSingleTextureEffect {
+public:
+ enum {
+ kFilterTexelPad = 2, // Given a src rect in texels to be filtered, this number of
+ // surrounding texels are needed by the kernel in x and y.
+ };
+ virtual ~GrBicubicEffect();
+
+ const float* coefficients() const { return fCoefficients; }
+
+ const char* name() const override { return "Bicubic"; }
+
+ const GrTextureDomain& domain() const { return fDomain; }
+
+ GrColorSpaceXform* colorSpaceXform() const { return fColorSpaceXform.get(); }
+
+ /**
+ * Create a simple filter effect with custom bicubic coefficients and optional domain.
+ */
+ static sk_sp<GrFragmentProcessor> Make(GrTexture* tex,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkScalar coefficients[16],
+ const SkRect* domain = nullptr) {
+ if (nullptr == domain) {
+ static const SkShader::TileMode kTileModes[] = { SkShader::kClamp_TileMode,
+ SkShader::kClamp_TileMode };
+ return Make(tex, std::move(colorSpaceXform), coefficients,
+ GrCoordTransform::MakeDivByTextureWHMatrix(tex), kTileModes);
+ } else {
+ return sk_sp<GrFragmentProcessor>(
+ new GrBicubicEffect(tex, std::move(colorSpaceXform), coefficients,
+ GrCoordTransform::MakeDivByTextureWHMatrix(tex), *domain));
+ }
+ }
+
+ /**
+ * Create a Mitchell filter effect with specified texture matrix and x/y tile modes.
+ */
+ static sk_sp<GrFragmentProcessor> Make(GrTexture* tex,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkMatrix& matrix,
+ const SkShader::TileMode tileModes[2]) {
+ return Make(tex, std::move(colorSpaceXform), gMitchellCoefficients, matrix, tileModes);
+ }
+
+ /**
+ * Create a filter effect with custom bicubic coefficients, the texture matrix, and the x/y
+ * tilemodes.
+ */
+ static sk_sp<GrFragmentProcessor> Make(GrTexture* tex,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkScalar coefficients[16],
+ const SkMatrix& matrix,
+ const SkShader::TileMode tileModes[2]) {
+ return sk_sp<GrFragmentProcessor>(new GrBicubicEffect(tex, std::move(colorSpaceXform),
+ coefficients, matrix, tileModes));
+ }
+
+ /**
+ * Create a Mitchell filter effect with a texture matrix and a domain.
+ */
+ static sk_sp<GrFragmentProcessor> Make(GrTexture* tex,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkMatrix& matrix,
+ const SkRect& domain) {
+ return sk_sp<GrFragmentProcessor>(new GrBicubicEffect(tex, std::move(colorSpaceXform),
+ gMitchellCoefficients, matrix,
+ domain));
+ }
+
+ /**
+ * Determines whether the bicubic effect should be used based on the transformation from the
+ * local coords to the device. Returns true if the bicubic effect should be used. filterMode
+ * is set to appropriate filtering mode to use regardless of the return result (e.g. when this
+ * returns false it may indicate that the best fallback is to use kMipMap, kBilerp, or
+ * kNearest).
+ */
+ static bool ShouldUseBicubic(const SkMatrix& localCoordsToDevice,
+ GrTextureParams::FilterMode* filterMode);
+
+private:
+ GrBicubicEffect(GrTexture*, sk_sp<GrColorSpaceXform>, const SkScalar coefficients[16],
+ const SkMatrix &matrix, const SkShader::TileMode tileModes[2]);
+ GrBicubicEffect(GrTexture*, sk_sp<GrColorSpaceXform>, const SkScalar coefficients[16],
+ const SkMatrix &matrix, const SkRect& domain);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ float fCoefficients[16];
+ GrTextureDomain fDomain;
+ sk_sp<GrColorSpaceXform> fColorSpaceXform;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ static const SkScalar gMitchellCoefficients[16];
+
+ typedef GrSingleTextureEffect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrBitmapTextGeoProc.cpp b/gfx/skia/skia/src/gpu/effects/GrBitmapTextGeoProc.cpp
new file mode 100644
index 000000000..7f5366363
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrBitmapTextGeoProc.cpp
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrBitmapTextGeoProc.h"
+#include "GrInvariantOutput.h"
+#include "GrTexture.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLGeometryProcessor.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "glsl/GrGLSLVarying.h"
+#include "glsl/GrGLSLVertexShaderBuilder.h"
+
+class GrGLBitmapTextGeoProc : public GrGLSLGeometryProcessor {
+public:
+ GrGLBitmapTextGeoProc() : fColor(GrColor_ILLEGAL) {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ const GrBitmapTextGeoProc& cte = args.fGP.cast<GrBitmapTextGeoProc>();
+
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(cte);
+
+ // compute numbers to be hardcoded to convert texture coordinates from int to float
+ SkASSERT(cte.numTextures() == 1);
+ SkDEBUGCODE(GrTexture* atlas = cte.textureAccess(0).getTexture());
+ SkASSERT(atlas && SkIsPow2(atlas->width()) && SkIsPow2(atlas->height()));
+
+ GrGLSLVertToFrag v(kVec2f_GrSLType);
+ varyingHandler->addVarying("TextureCoords", &v, kHigh_GrSLPrecision);
+ vertBuilder->codeAppendf("%s = %s;", v.vsOut(),
+ cte.inTextureCoords()->fName);
+
+ GrGLSLPPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ // Setup pass through color
+ if (!cte.colorIgnored()) {
+ if (cte.hasVertexColor()) {
+ varyingHandler->addPassThroughAttribute(cte.inColor(), args.fOutputColor);
+ } else {
+ this->setupUniformColor(fragBuilder, uniformHandler, args.fOutputColor,
+ &fColorUniform);
+ }
+ }
+
+ // Setup position
+ this->setupPosition(vertBuilder, gpArgs, cte.inPosition()->fName);
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ gpArgs->fPositionVar,
+ cte.inPosition()->fName,
+ cte.localMatrix(),
+ args.fFPCoordTransformHandler);
+
+ if (cte.maskFormat() == kARGB_GrMaskFormat) {
+ fragBuilder->codeAppendf("%s = ", args.fOutputColor);
+ fragBuilder->appendTextureLookupAndModulate(args.fOutputColor,
+ args.fTexSamplers[0],
+ v.fsIn(),
+ kVec2f_GrSLType);
+ fragBuilder->codeAppend(";");
+ fragBuilder->codeAppendf("%s = vec4(1);", args.fOutputCoverage);
+ } else {
+ fragBuilder->codeAppendf("%s = ", args.fOutputCoverage);
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], v.fsIn(), kVec2f_GrSLType);
+ fragBuilder->codeAppend(";");
+ if (cte.maskFormat() == kA565_GrMaskFormat) {
+ // set alpha to be max of rgb coverage
+ fragBuilder->codeAppendf("%s.a = max(max(%s.r, %s.g), %s.b);",
+ args.fOutputCoverage, args.fOutputCoverage,
+ args.fOutputCoverage, args.fOutputCoverage);
+ }
+ }
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& gp,
+ FPCoordTransformIter&& transformIter) override {
+ const GrBitmapTextGeoProc& btgp = gp.cast<GrBitmapTextGeoProc>();
+ if (btgp.color() != fColor && !btgp.hasVertexColor()) {
+ float c[4];
+ GrColorToRGBAFloat(btgp.color(), c);
+ pdman.set4fv(fColorUniform, 1, c);
+ fColor = btgp.color();
+ }
+ this->setTransformDataHelper(btgp.localMatrix(), pdman, &transformIter);
+ }
+
+ static inline void GenKey(const GrGeometryProcessor& proc,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrBitmapTextGeoProc& gp = proc.cast<GrBitmapTextGeoProc>();
+ uint32_t key = 0;
+ key |= gp.usesLocalCoords() && gp.localMatrix().hasPerspective() ? 0x1 : 0x0;
+ key |= gp.colorIgnored() ? 0x2 : 0x0;
+ key |= gp.maskFormat() << 3;
+ b->add32(key);
+
+ // Currently we hardcode numbers to convert atlas coordinates to normalized floating point
+ SkASSERT(gp.numTextures() == 1);
+ GrTexture* atlas = gp.textureAccess(0).getTexture();
+ SkASSERT(atlas);
+ b->add32(atlas->width());
+ b->add32(atlas->height());
+ }
+
+private:
+ GrColor fColor;
+ UniformHandle fColorUniform;
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrBitmapTextGeoProc::GrBitmapTextGeoProc(GrColor color, GrTexture* texture,
+ const GrTextureParams& params, GrMaskFormat format,
+ const SkMatrix& localMatrix, bool usesLocalCoords)
+ : fColor(color)
+ , fLocalMatrix(localMatrix)
+ , fUsesLocalCoords(usesLocalCoords)
+ , fTextureAccess(texture, params)
+ , fInColor(nullptr)
+ , fMaskFormat(format) {
+ this->initClassID<GrBitmapTextGeoProc>();
+ fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType);
+
+ bool hasVertexColor = kA8_GrMaskFormat == fMaskFormat ||
+ kA565_GrMaskFormat == fMaskFormat;
+ if (hasVertexColor) {
+ fInColor = &this->addVertexAttrib("inColor", kVec4ub_GrVertexAttribType);
+ }
+ fInTextureCoords = &this->addVertexAttrib("inTextureCoords", kVec2us_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ this->addTextureAccess(&fTextureAccess);
+}
+
+void GrBitmapTextGeoProc::getGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLBitmapTextGeoProc::GenKey(*this, caps, b);
+}
+
+GrGLSLPrimitiveProcessor* GrBitmapTextGeoProc::createGLSLInstance(const GrGLSLCaps& caps) const {
+ return new GrGLBitmapTextGeoProc();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(GrBitmapTextGeoProc);
+
+sk_sp<GrGeometryProcessor> GrBitmapTextGeoProc::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx :
+ GrProcessorUnitTest::kAlphaTextureIdx;
+ static const SkShader::TileMode kTileModes[] = {
+ SkShader::kClamp_TileMode,
+ SkShader::kRepeat_TileMode,
+ SkShader::kMirror_TileMode,
+ };
+ SkShader::TileMode tileModes[] = {
+ kTileModes[d->fRandom->nextULessThan(SK_ARRAY_COUNT(kTileModes))],
+ kTileModes[d->fRandom->nextULessThan(SK_ARRAY_COUNT(kTileModes))],
+ };
+ GrTextureParams params(tileModes, d->fRandom->nextBool() ? GrTextureParams::kBilerp_FilterMode :
+ GrTextureParams::kNone_FilterMode);
+
+ GrMaskFormat format = kARGB_GrMaskFormat; // init to avoid warning
+ switch (d->fRandom->nextULessThan(3)) {
+ case 0:
+ format = kA8_GrMaskFormat;
+ break;
+ case 1:
+ format = kA565_GrMaskFormat;
+ break;
+ case 2:
+ format = kARGB_GrMaskFormat;
+ break;
+ }
+
+ return GrBitmapTextGeoProc::Make(GrRandomColor(d->fRandom), d->fTextures[texIdx], params,
+ format, GrTest::TestMatrix(d->fRandom),
+ d->fRandom->nextBool());
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrBitmapTextGeoProc.h b/gfx/skia/skia/src/gpu/effects/GrBitmapTextGeoProc.h
new file mode 100644
index 000000000..226ae770e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrBitmapTextGeoProc.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBitmapTextGeoProc_DEFINED
+#define GrBitmapTextGeoProc_DEFINED
+
+#include "GrProcessor.h"
+#include "GrGeometryProcessor.h"
+
+class GrGLBitmapTextGeoProc;
+class GrInvariantOutput;
+
+/**
+ * The output color of this effect is a modulation of the input color and a sample from a texture.
+ * It allows explicit specification of the filtering and wrap modes (GrTextureParams). The input
+ * coords are a custom attribute.
+ */
+class GrBitmapTextGeoProc : public GrGeometryProcessor {
+public:
+ static sk_sp<GrGeometryProcessor> Make(GrColor color, GrTexture* tex, const GrTextureParams& p,
+ GrMaskFormat format, const SkMatrix& localMatrix,
+ bool usesLocalCoords) {
+ return sk_sp<GrGeometryProcessor>(
+ new GrBitmapTextGeoProc(color, tex, p, format, localMatrix, usesLocalCoords));
+ }
+
+ virtual ~GrBitmapTextGeoProc() {}
+
+ const char* name() const override { return "Texture"; }
+
+ const Attribute* inPosition() const { return fInPosition; }
+ const Attribute* inColor() const { return fInColor; }
+ const Attribute* inTextureCoords() const { return fInTextureCoords; }
+ GrMaskFormat maskFormat() const { return fMaskFormat; }
+ GrColor color() const { return fColor; }
+ bool colorIgnored() const { return GrColor_ILLEGAL == fColor; }
+ bool hasVertexColor() const { return SkToBool(fInColor); }
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+
+ void getGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps& caps) const override;
+
+private:
+ GrBitmapTextGeoProc(GrColor, GrTexture* texture, const GrTextureParams& params,
+ GrMaskFormat format, const SkMatrix& localMatrix, bool usesLocalCoords);
+
+ GrColor fColor;
+ SkMatrix fLocalMatrix;
+ bool fUsesLocalCoords;
+ GrTextureAccess fTextureAccess;
+ const Attribute* fInPosition;
+ const Attribute* fInColor;
+ const Attribute* fInTextureCoords;
+ GrMaskFormat fMaskFormat;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrConfigConversionEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrConfigConversionEffect.cpp
new file mode 100644
index 000000000..80a0314ac
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrConfigConversionEffect.cpp
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrConfigConversionEffect.h"
+#include "GrContext.h"
+#include "GrDrawContext.h"
+#include "GrInvariantOutput.h"
+#include "GrSimpleTextureEffect.h"
+#include "SkMatrix.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+
+class GrGLConfigConversionEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs& args) override {
+ const GrConfigConversionEffect& cce = args.fFp.cast<GrConfigConversionEffect>();
+ const GrSwizzle& swizzle = cce.swizzle();
+ GrConfigConversionEffect::PMConversion pmConversion = cce.pmConversion();
+
+ // Using highp for GLES here in order to avoid some precision issues on specific GPUs.
+ GrGLSLShaderVar tmpVar("tmpColor", kVec4f_GrSLType, 0, kHigh_GrSLPrecision);
+ SkString tmpDecl;
+ tmpVar.appendDecl(args.fGLSLCaps, &tmpDecl);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ fragBuilder->codeAppendf("%s;", tmpDecl.c_str());
+
+ fragBuilder->codeAppendf("%s = ", tmpVar.c_str());
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], args.fTransformedCoords[0].c_str(),
+ args.fTransformedCoords[0].getType());
+ fragBuilder->codeAppend(";");
+
+ if (GrConfigConversionEffect::kNone_PMConversion == pmConversion) {
+ SkASSERT(GrSwizzle::RGBA() != swizzle);
+ fragBuilder->codeAppendf("%s = %s.%s;", args.fOutputColor, tmpVar.c_str(),
+ swizzle.c_str());
+ } else {
+ switch (pmConversion) {
+ case GrConfigConversionEffect::kMulByAlpha_RoundUp_PMConversion:
+ fragBuilder->codeAppendf(
+ "%s = vec4(ceil(%s.rgb * %s.a * 255.0) / 255.0, %s.a);",
+ tmpVar.c_str(), tmpVar.c_str(), tmpVar.c_str(), tmpVar.c_str());
+ break;
+ case GrConfigConversionEffect::kMulByAlpha_RoundDown_PMConversion:
+ // Add a compensation(0.001) here to avoid the side effect of the floor operation.
+ // In Intel GPUs, the integer value converted from floor(%s.r * 255.0) / 255.0
+ // is less than the integer value converted from %s.r by 1 when the %s.r is
+ // converted from the integer value 2^n, such as 1, 2, 4, 8, etc.
+ fragBuilder->codeAppendf(
+ "%s = vec4(floor(%s.rgb * %s.a * 255.0 + 0.001) / 255.0, %s.a);",
+ tmpVar.c_str(), tmpVar.c_str(), tmpVar.c_str(), tmpVar.c_str());
+
+ break;
+ case GrConfigConversionEffect::kDivByAlpha_RoundUp_PMConversion:
+ fragBuilder->codeAppendf(
+ "%s = %s.a <= 0.0 ? vec4(0,0,0,0) : vec4(ceil(%s.rgb / %s.a * 255.0) / 255.0, %s.a);",
+ tmpVar.c_str(), tmpVar.c_str(), tmpVar.c_str(), tmpVar.c_str(),
+ tmpVar.c_str());
+ break;
+ case GrConfigConversionEffect::kDivByAlpha_RoundDown_PMConversion:
+ fragBuilder->codeAppendf(
+ "%s = %s.a <= 0.0 ? vec4(0,0,0,0) : vec4(floor(%s.rgb / %s.a * 255.0) / 255.0, %s.a);",
+ tmpVar.c_str(), tmpVar.c_str(), tmpVar.c_str(), tmpVar.c_str(),
+ tmpVar.c_str());
+ break;
+ default:
+ SkFAIL("Unknown conversion op.");
+ break;
+ }
+ fragBuilder->codeAppendf("%s = %s.%s;", args.fOutputColor, tmpVar.c_str(),
+ swizzle.c_str());
+ }
+ SkString modulate;
+ GrGLSLMulVarBy4f(&modulate, args.fOutputColor, args.fInputColor);
+ fragBuilder->codeAppend(modulate.c_str());
+ }
+
+ static inline void GenKey(const GrProcessor& processor, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrConfigConversionEffect& cce = processor.cast<GrConfigConversionEffect>();
+ uint32_t key = (cce.swizzle().asKey()) | (cce.pmConversion() << 16);
+ b->add32(key);
+ }
+
+private:
+ typedef GrGLSLFragmentProcessor INHERITED;
+
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrConfigConversionEffect::GrConfigConversionEffect(GrTexture* texture,
+ const GrSwizzle& swizzle,
+ PMConversion pmConversion,
+ const SkMatrix& matrix)
+ : INHERITED(texture, nullptr, matrix)
+ , fSwizzle(swizzle)
+ , fPMConversion(pmConversion) {
+ this->initClassID<GrConfigConversionEffect>();
+ // We expect to get here with non-BGRA/RGBA only if we're doing not doing a premul/unpremul
+ // conversion.
+ SkASSERT((kRGBA_8888_GrPixelConfig == texture->config() ||
+ kBGRA_8888_GrPixelConfig == texture->config()) ||
+ kNone_PMConversion == pmConversion);
+ // Why did we pollute our texture cache instead of using a GrSingleTextureEffect?
+ SkASSERT(swizzle != GrSwizzle::RGBA() || kNone_PMConversion != pmConversion);
+}
+
+bool GrConfigConversionEffect::onIsEqual(const GrFragmentProcessor& s) const {
+ const GrConfigConversionEffect& other = s.cast<GrConfigConversionEffect>();
+ return other.fSwizzle == fSwizzle &&
+ other.fPMConversion == fPMConversion;
+}
+
+void GrConfigConversionEffect::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ this->updateInvariantOutputForModulation(inout);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrConfigConversionEffect);
+
+#if !defined(__clang__) && _MSC_FULL_VER >= 190024213
+// Work around VS 2015 Update 3 optimizer bug that causes internal compiler error
+//https://connect.microsoft.com/VisualStudio/feedback/details/3100520/internal-compiler-error
+#pragma optimize("t", off)
+#endif
+
+sk_sp<GrFragmentProcessor> GrConfigConversionEffect::TestCreate(GrProcessorTestData* d) {
+ PMConversion pmConv = static_cast<PMConversion>(d->fRandom->nextULessThan(kPMConversionCnt));
+ GrSwizzle swizzle;
+ do {
+ swizzle = GrSwizzle::CreateRandom(d->fRandom);
+ } while (pmConv == kNone_PMConversion && swizzle == GrSwizzle::RGBA());
+ return sk_sp<GrFragmentProcessor>(
+ new GrConfigConversionEffect(d->fTextures[GrProcessorUnitTest::kSkiaPMTextureIdx],
+ swizzle, pmConv, GrTest::TestMatrix(d->fRandom)));
+}
+
+#if !defined(__clang__) && _MSC_FULL_VER >= 190024213
+// Restore optimization settings.
+#pragma optimize("", on)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrConfigConversionEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLConfigConversionEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrConfigConversionEffect::onCreateGLSLInstance() const {
+ return new GrGLConfigConversionEffect();
+}
+
+
+
+void GrConfigConversionEffect::TestForPreservingPMConversions(GrContext* context,
+ PMConversion* pmToUPMRule,
+ PMConversion* upmToPMRule) {
+ *pmToUPMRule = kNone_PMConversion;
+ *upmToPMRule = kNone_PMConversion;
+ static constexpr int kSize = 256;
+ static constexpr GrPixelConfig kConfig = kRGBA_8888_GrPixelConfig;
+ SkAutoTMalloc<uint32_t> data(kSize * kSize * 3);
+ uint32_t* srcData = data.get();
+ uint32_t* firstRead = data.get() + kSize * kSize;
+ uint32_t* secondRead = data.get() + 2 * kSize * kSize;
+
+ // Fill with every possible premultiplied A, color channel value. There will be 256-y duplicate
+ // values in row y. We set r,g, and b to the same value since they are handled identically.
+ for (int y = 0; y < kSize; ++y) {
+ for (int x = 0; x < kSize; ++x) {
+ uint8_t* color = reinterpret_cast<uint8_t*>(&srcData[kSize*y + x]);
+ color[3] = y;
+ color[2] = SkTMin(x, y);
+ color[1] = SkTMin(x, y);
+ color[0] = SkTMin(x, y);
+ }
+ }
+
+ sk_sp<GrDrawContext> readDC(context->makeDrawContext(SkBackingFit::kExact, kSize, kSize,
+ kConfig, nullptr));
+ sk_sp<GrDrawContext> tempDC(context->makeDrawContext(SkBackingFit::kExact, kSize, kSize,
+ kConfig, nullptr));
+ if (!readDC || !tempDC) {
+ return;
+ }
+ GrSurfaceDesc desc;
+ desc.fWidth = kSize;
+ desc.fHeight = kSize;
+ desc.fConfig = kConfig;
+ SkAutoTUnref<GrTexture> dataTex(context->textureProvider()->createTexture(
+ desc, SkBudgeted::kYes, data, 0));
+ if (!dataTex.get()) {
+ return;
+ }
+
+ static const PMConversion kConversionRules[][2] = {
+ {kDivByAlpha_RoundDown_PMConversion, kMulByAlpha_RoundUp_PMConversion},
+ {kDivByAlpha_RoundUp_PMConversion, kMulByAlpha_RoundDown_PMConversion},
+ };
+
+ bool failed = true;
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(kConversionRules) && failed; ++i) {
+ *pmToUPMRule = kConversionRules[i][0];
+ *upmToPMRule = kConversionRules[i][1];
+
+ static const SkRect kDstRect = SkRect::MakeIWH(kSize, kSize);
+ static const SkRect kSrcRect = SkRect::MakeIWH(1, 1);
+ // We do a PM->UPM draw from dataTex to readTex and read the data. Then we do a UPM->PM draw
+ // from readTex to tempTex followed by a PM->UPM draw to readTex and finally read the data.
+ // We then verify that two reads produced the same values.
+
+ GrPaint paint1;
+ GrPaint paint2;
+ GrPaint paint3;
+ sk_sp<GrFragmentProcessor> pmToUPM1(new GrConfigConversionEffect(
+ dataTex, GrSwizzle::RGBA(), *pmToUPMRule, SkMatrix::I()));
+ sk_sp<GrFragmentProcessor> upmToPM(new GrConfigConversionEffect(
+ readDC->asTexture().get(), GrSwizzle::RGBA(), *upmToPMRule, SkMatrix::I()));
+ sk_sp<GrFragmentProcessor> pmToUPM2(new GrConfigConversionEffect(
+ tempDC->asTexture().get(), GrSwizzle::RGBA(), *pmToUPMRule, SkMatrix::I()));
+
+ paint1.addColorFragmentProcessor(std::move(pmToUPM1));
+ paint1.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ readDC->fillRectToRect(GrNoClip(), paint1, SkMatrix::I(), kDstRect, kSrcRect);
+
+ readDC->asTexture()->readPixels(0, 0, kSize, kSize, kConfig, firstRead);
+
+ paint2.addColorFragmentProcessor(std::move(upmToPM));
+ paint2.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ tempDC->fillRectToRect(GrNoClip(), paint2, SkMatrix::I(), kDstRect, kSrcRect);
+
+ paint3.addColorFragmentProcessor(std::move(pmToUPM2));
+ paint3.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ readDC->fillRectToRect(GrNoClip(), paint3, SkMatrix::I(), kDstRect, kSrcRect);
+
+ readDC->asTexture()->readPixels(0, 0, kSize, kSize, kConfig, secondRead);
+
+ failed = false;
+ for (int y = 0; y < kSize && !failed; ++y) {
+ for (int x = 0; x <= y; ++x) {
+ if (firstRead[kSize * y + x] != secondRead[kSize * y + x]) {
+ failed = true;
+ break;
+ }
+ }
+ }
+ }
+ if (failed) {
+ *pmToUPMRule = kNone_PMConversion;
+ *upmToPMRule = kNone_PMConversion;
+ }
+}
+
+sk_sp<GrFragmentProcessor> GrConfigConversionEffect::Make(GrTexture* texture,
+ const GrSwizzle& swizzle,
+ PMConversion pmConversion,
+ const SkMatrix& matrix) {
+ if (swizzle == GrSwizzle::RGBA() && kNone_PMConversion == pmConversion) {
+ // If we returned a GrConfigConversionEffect that was equivalent to a GrSimpleTextureEffect
+ // then we may pollute our texture cache with redundant shaders. So in the case that no
+ // conversions were requested we instead return a GrSimpleTextureEffect.
+ return GrSimpleTextureEffect::Make(texture, nullptr, matrix);
+ } else {
+ if (kRGBA_8888_GrPixelConfig != texture->config() &&
+ kBGRA_8888_GrPixelConfig != texture->config() &&
+ kNone_PMConversion != pmConversion) {
+ // The PM conversions assume colors are 0..255
+ return nullptr;
+ }
+ return sk_sp<GrFragmentProcessor>(
+ new GrConfigConversionEffect(texture, swizzle, pmConversion, matrix));
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrConfigConversionEffect.h b/gfx/skia/skia/src/gpu/effects/GrConfigConversionEffect.h
new file mode 100644
index 000000000..93b49aa65
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrConfigConversionEffect.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrConfigConversionEffect_DEFINED
+#define GrConfigConversionEffect_DEFINED
+
+#include "GrSingleTextureEffect.h"
+#include "GrSwizzle.h"
+
+class GrInvariantOutput;
+
+/**
+ * This class is used to perform config conversions. Clients may want to read/write data that is
+ * unpremultiplied. Additionally, the channels may also be swizzled for optimal readback/upload
+ * performance.
+ */
+class GrConfigConversionEffect : public GrSingleTextureEffect {
+public:
+ /**
+ * The PM->UPM or UPM->PM conversions to apply.
+ */
+ enum PMConversion {
+ kNone_PMConversion = 0,
+ kMulByAlpha_RoundUp_PMConversion,
+ kMulByAlpha_RoundDown_PMConversion,
+ kDivByAlpha_RoundUp_PMConversion,
+ kDivByAlpha_RoundDown_PMConversion,
+
+ kPMConversionCnt
+ };
+
+ static sk_sp<GrFragmentProcessor> Make(GrTexture*, const GrSwizzle&, PMConversion,
+ const SkMatrix&);
+
+ const char* name() const override { return "Config Conversion"; }
+
+ const GrSwizzle& swizzle() const { return fSwizzle; }
+ PMConversion pmConversion() const { return fPMConversion; }
+
+ // This function determines whether it is possible to choose PM->UPM and UPM->PM conversions
+ // for which in any PM->UPM->PM->UPM sequence the two UPM values are the same. This means that
+ // if pixels are read back to a UPM buffer, written back to PM to the GPU, and read back again
+ // both reads will produce the same result. This test is quite expensive and should not be run
+ // multiple times for a given context.
+ static void TestForPreservingPMConversions(GrContext* context,
+ PMConversion* PMToUPMRule,
+ PMConversion* UPMToPMRule);
+
+private:
+ GrConfigConversionEffect(GrTexture*,
+ const GrSwizzle&,
+ PMConversion pmConversion,
+ const SkMatrix& matrix);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ GrSwizzle fSwizzle;
+ PMConversion fPMConversion;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef GrSingleTextureEffect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrConstColorProcessor.cpp b/gfx/skia/skia/src/gpu/effects/GrConstColorProcessor.cpp
new file mode 100644
index 000000000..0684c9cdb
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrConstColorProcessor.cpp
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "effects/GrConstColorProcessor.h"
+#include "GrInvariantOutput.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+
+class GLConstColorProcessor : public GrGLSLFragmentProcessor {
+public:
+ GLConstColorProcessor() : fPrevColor(GrColor_ILLEGAL) {}
+
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const char* colorUni;
+ fColorUniform = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType, kMedium_GrSLPrecision,
+ "constantColor",
+ &colorUni);
+ GrConstColorProcessor::InputMode mode = args.fFp.cast<GrConstColorProcessor>().inputMode();
+ if (!args.fInputColor) {
+ mode = GrConstColorProcessor::kIgnore_InputMode;
+ }
+ switch (mode) {
+ case GrConstColorProcessor::kIgnore_InputMode:
+ fragBuilder->codeAppendf("%s = %s;", args.fOutputColor, colorUni);
+ break;
+ case GrConstColorProcessor::kModulateRGBA_InputMode:
+ fragBuilder->codeAppendf("%s = %s * %s;", args.fOutputColor, args.fInputColor,
+ colorUni);
+ break;
+ case GrConstColorProcessor::kModulateA_InputMode:
+ fragBuilder->codeAppendf("%s = %s.a * %s;", args.fOutputColor, args.fInputColor,
+ colorUni);
+ break;
+ }
+ }
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager& pdm, const GrProcessor& processor) override {
+ GrColor color = processor.cast<GrConstColorProcessor>().color();
+ // We use the "illegal" color value as an uninit sentinel. However, ut isn't inherently
+ // illegal to use this processor with unpremul colors. So we correctly handle the case
+ // when the "illegal" color is used but we will always upload it.
+ if (GrColor_ILLEGAL == color || fPrevColor != color) {
+ static const float scale = 1.f / 255.f;
+ float floatColor[4] = {
+ GrColorUnpackR(color) * scale,
+ GrColorUnpackG(color) * scale,
+ GrColorUnpackB(color) * scale,
+ GrColorUnpackA(color) * scale,
+ };
+ pdm.set4fv(fColorUniform, 1, floatColor);
+ fPrevColor = color;
+ }
+ }
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fColorUniform;
+ GrColor fPrevColor;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrConstColorProcessor::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ if (kIgnore_InputMode == fMode) {
+ inout->setToOther(kRGBA_GrColorComponentFlags, fColor,
+ GrInvariantOutput::kWillNot_ReadInput);
+ } else {
+ GrColor r = GrColorUnpackR(fColor);
+ bool colorIsSingleChannel = r == GrColorUnpackG(fColor) && r == GrColorUnpackB(fColor) &&
+ r == GrColorUnpackA(fColor);
+ if (kModulateRGBA_InputMode == fMode) {
+ if (colorIsSingleChannel) {
+ inout->mulByKnownSingleComponent(r);
+ } else {
+ inout->mulByKnownFourComponents(fColor);
+ }
+ } else {
+ if (colorIsSingleChannel) {
+ inout->mulAlphaByKnownSingleComponent(r);
+ } else {
+ inout->mulAlphaByKnownFourComponents(fColor);
+ }
+ }
+ }
+}
+
+void GrConstColorProcessor::onGetGLSLProcessorKey(const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) const {
+ b->add32(fMode);
+}
+
+GrGLSLFragmentProcessor* GrConstColorProcessor::onCreateGLSLInstance() const {
+ return new GLConstColorProcessor;
+}
+
+bool GrConstColorProcessor::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrConstColorProcessor& that = other.cast<GrConstColorProcessor>();
+ return fMode == that.fMode && fColor == that.fColor;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrConstColorProcessor);
+
+sk_sp<GrFragmentProcessor> GrConstColorProcessor::TestCreate(GrProcessorTestData* d) {
+ GrColor color SK_INIT_TO_AVOID_WARNING;
+ int colorPicker = d->fRandom->nextULessThan(3);
+ switch (colorPicker) {
+ case 0: {
+ uint32_t a = d->fRandom->nextULessThan(0x100);
+ uint32_t r = d->fRandom->nextULessThan(a+1);
+ uint32_t g = d->fRandom->nextULessThan(a+1);
+ uint32_t b = d->fRandom->nextULessThan(a+1);
+ color = GrColorPackRGBA(r, g, b, a);
+ break;
+ }
+ case 1:
+ color = 0;
+ break;
+ case 2:
+ color = d->fRandom->nextULessThan(0x100);
+ color = color | (color << 8) | (color << 16) | (color << 24);
+ break;
+ }
+ InputMode mode = static_cast<InputMode>(d->fRandom->nextULessThan(kInputModeCnt));
+ return GrConstColorProcessor::Make(color, mode);
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrConvexPolyEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrConvexPolyEffect.cpp
new file mode 100644
index 000000000..5ce786799
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrConvexPolyEffect.cpp
@@ -0,0 +1,377 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrConvexPolyEffect.h"
+#include "GrInvariantOutput.h"
+#include "SkPathPriv.h"
+#include "effects/GrConstColorProcessor.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+
+//////////////////////////////////////////////////////////////////////////////
+class AARectEffect : public GrFragmentProcessor {
+public:
+ const SkRect& getRect() const { return fRect; }
+
+ static sk_sp<GrFragmentProcessor> Make(GrPrimitiveEdgeType edgeType, const SkRect& rect) {
+ return sk_sp<GrFragmentProcessor>(new AARectEffect(edgeType, rect));
+ }
+
+ GrPrimitiveEdgeType getEdgeType() const { return fEdgeType; }
+
+ const char* name() const override { return "AARect"; }
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+private:
+ AARectEffect(GrPrimitiveEdgeType edgeType, const SkRect& rect)
+ : fRect(rect), fEdgeType(edgeType) {
+ this->initClassID<AARectEffect>();
+ this->setWillReadFragmentPosition();
+ }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ bool onIsEqual(const GrFragmentProcessor& other) const override {
+ const AARectEffect& aare = other.cast<AARectEffect>();
+ return fRect == aare.fRect;
+ }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ if (fRect.isEmpty()) {
+ // An empty rect will have no coverage anywhere.
+ inout->mulByKnownSingleComponent(0);
+ } else {
+ inout->mulByUnknownSingleComponent();
+ }
+ }
+
+ SkRect fRect;
+ GrPrimitiveEdgeType fEdgeType;
+
+ typedef GrFragmentProcessor INHERITED;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+};
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(AARectEffect);
+
+sk_sp<GrFragmentProcessor> AARectEffect::TestCreate(GrProcessorTestData* d) {
+ SkRect rect = SkRect::MakeLTRB(d->fRandom->nextSScalar1(),
+ d->fRandom->nextSScalar1(),
+ d->fRandom->nextSScalar1(),
+ d->fRandom->nextSScalar1());
+ sk_sp<GrFragmentProcessor> fp;
+ do {
+ GrPrimitiveEdgeType edgeType = static_cast<GrPrimitiveEdgeType>(
+ d->fRandom->nextULessThan(kGrProcessorEdgeTypeCnt));
+
+ fp = AARectEffect::Make(edgeType, rect);
+ } while (nullptr == fp);
+ return fp;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GLAARectEffect : public GrGLSLFragmentProcessor {
+public:
+ GLAARectEffect() {
+ fPrevRect.fLeft = SK_ScalarNaN;
+ }
+
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrGLSLCaps&, GrProcessorKeyBuilder*);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fRectUniform;
+ SkRect fPrevRect;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GLAARectEffect::emitCode(EmitArgs& args) {
+ const AARectEffect& aare = args.fFp.cast<AARectEffect>();
+ const char *rectName;
+ // The rect uniform's xyzw refer to (left + 0.5, top + 0.5, right - 0.5, bottom - 0.5),
+ // respectively.
+ fRectUniform = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType,
+ kDefault_GrSLPrecision,
+ "rect",
+ &rectName);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const char* fragmentPos = fragBuilder->fragmentPosition();
+ if (GrProcessorEdgeTypeIsAA(aare.getEdgeType())) {
+ // The amount of coverage removed in x and y by the edges is computed as a pair of negative
+ // numbers, xSub and ySub.
+ fragBuilder->codeAppend("\t\tfloat xSub, ySub;\n");
+ fragBuilder->codeAppendf("\t\txSub = min(%s.x - %s.x, 0.0);\n", fragmentPos, rectName);
+ fragBuilder->codeAppendf("\t\txSub += min(%s.z - %s.x, 0.0);\n", rectName, fragmentPos);
+ fragBuilder->codeAppendf("\t\tySub = min(%s.y - %s.y, 0.0);\n", fragmentPos, rectName);
+ fragBuilder->codeAppendf("\t\tySub += min(%s.w - %s.y, 0.0);\n", rectName, fragmentPos);
+ // Now compute coverage in x and y and multiply them to get the fraction of the pixel
+ // covered.
+ fragBuilder->codeAppendf("\t\tfloat alpha = (1.0 + max(xSub, -1.0)) * (1.0 + max(ySub, -1.0));\n");
+ } else {
+ fragBuilder->codeAppendf("\t\tfloat alpha = 1.0;\n");
+ fragBuilder->codeAppendf("\t\talpha *= (%s.x - %s.x) > -0.5 ? 1.0 : 0.0;\n", fragmentPos, rectName);
+ fragBuilder->codeAppendf("\t\talpha *= (%s.z - %s.x) > -0.5 ? 1.0 : 0.0;\n", rectName, fragmentPos);
+ fragBuilder->codeAppendf("\t\talpha *= (%s.y - %s.y) > -0.5 ? 1.0 : 0.0;\n", fragmentPos, rectName);
+ fragBuilder->codeAppendf("\t\talpha *= (%s.w - %s.y) > -0.5 ? 1.0 : 0.0;\n", rectName, fragmentPos);
+ }
+
+ if (GrProcessorEdgeTypeIsInverseFill(aare.getEdgeType())) {
+ fragBuilder->codeAppend("\t\talpha = 1.0 - alpha;\n");
+ }
+ fragBuilder->codeAppendf("\t\t%s = %s;\n", args.fOutputColor,
+ (GrGLSLExpr4(args.fInputColor) * GrGLSLExpr1("alpha")).c_str());
+}
+
+void GLAARectEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& processor) {
+ const AARectEffect& aare = processor.cast<AARectEffect>();
+ const SkRect& rect = aare.getRect();
+ if (rect != fPrevRect) {
+ pdman.set4f(fRectUniform, rect.fLeft + 0.5f, rect.fTop + 0.5f,
+ rect.fRight - 0.5f, rect.fBottom - 0.5f);
+ fPrevRect = rect;
+ }
+}
+
+void GLAARectEffect::GenKey(const GrProcessor& processor, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const AARectEffect& aare = processor.cast<AARectEffect>();
+ b->add32(aare.getEdgeType());
+}
+
+void AARectEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const {
+ GLAARectEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* AARectEffect::onCreateGLSLInstance() const {
+ return new GLAARectEffect;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GrGLConvexPolyEffect : public GrGLSLFragmentProcessor {
+public:
+ GrGLConvexPolyEffect() {
+ fPrevEdges[0] = SK_ScalarNaN;
+ }
+
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrGLSLCaps&, GrProcessorKeyBuilder*);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fEdgeUniform;
+ SkScalar fPrevEdges[3 * GrConvexPolyEffect::kMaxEdges];
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GrGLConvexPolyEffect::emitCode(EmitArgs& args) {
+ const GrConvexPolyEffect& cpe = args.fFp.cast<GrConvexPolyEffect>();
+
+ const char *edgeArrayName;
+ fEdgeUniform = args.fUniformHandler->addUniformArray(kFragment_GrShaderFlag,
+ kVec3f_GrSLType,
+ kDefault_GrSLPrecision,
+ "edges",
+ cpe.getEdgeCount(),
+ &edgeArrayName);
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ fragBuilder->codeAppend("\t\tfloat alpha = 1.0;\n");
+ fragBuilder->codeAppend("\t\tfloat edge;\n");
+ const char* fragmentPos = fragBuilder->fragmentPosition();
+ for (int i = 0; i < cpe.getEdgeCount(); ++i) {
+ fragBuilder->codeAppendf("\t\tedge = dot(%s[%d], vec3(%s.x, %s.y, 1));\n",
+ edgeArrayName, i, fragmentPos, fragmentPos);
+ if (GrProcessorEdgeTypeIsAA(cpe.getEdgeType())) {
+ fragBuilder->codeAppend("\t\tedge = clamp(edge, 0.0, 1.0);\n");
+ } else {
+ fragBuilder->codeAppend("\t\tedge = edge >= 0.5 ? 1.0 : 0.0;\n");
+ }
+ fragBuilder->codeAppend("\t\talpha *= edge;\n");
+ }
+
+ if (GrProcessorEdgeTypeIsInverseFill(cpe.getEdgeType())) {
+ fragBuilder->codeAppend("\talpha = 1.0 - alpha;\n");
+ }
+ fragBuilder->codeAppendf("\t%s = %s;\n", args.fOutputColor,
+ (GrGLSLExpr4(args.fInputColor) * GrGLSLExpr1("alpha")).c_str());
+}
+
+void GrGLConvexPolyEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& effect) {
+ const GrConvexPolyEffect& cpe = effect.cast<GrConvexPolyEffect>();
+ size_t byteSize = 3 * cpe.getEdgeCount() * sizeof(SkScalar);
+ if (0 != memcmp(fPrevEdges, cpe.getEdges(), byteSize)) {
+ pdman.set3fv(fEdgeUniform, cpe.getEdgeCount(), cpe.getEdges());
+ memcpy(fPrevEdges, cpe.getEdges(), byteSize);
+ }
+}
+
+void GrGLConvexPolyEffect::GenKey(const GrProcessor& processor, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrConvexPolyEffect& cpe = processor.cast<GrConvexPolyEffect>();
+ GR_STATIC_ASSERT(kGrProcessorEdgeTypeCnt <= 8);
+ uint32_t key = (cpe.getEdgeCount() << 3) | cpe.getEdgeType();
+ b->add32(key);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> GrConvexPolyEffect::Make(GrPrimitiveEdgeType type, const SkPath& path,
+ const SkVector* offset) {
+ if (kHairlineAA_GrProcessorEdgeType == type) {
+ return nullptr;
+ }
+ if (path.getSegmentMasks() != SkPath::kLine_SegmentMask ||
+ !path.isConvex()) {
+ return nullptr;
+ }
+
+ SkPathPriv::FirstDirection dir;
+ // The only way this should fail is if the clip is effectively a infinitely thin line. In that
+ // case nothing is inside the clip. It'd be nice to detect this at a higher level and either
+ // skip the draw or omit the clip element.
+ if (!SkPathPriv::CheapComputeFirstDirection(path, &dir)) {
+ if (GrProcessorEdgeTypeIsInverseFill(type)) {
+ return GrConstColorProcessor::Make(0xFFFFFFFF,
+ GrConstColorProcessor::kModulateRGBA_InputMode);
+ }
+ return GrConstColorProcessor::Make(0, GrConstColorProcessor::kIgnore_InputMode);
+ }
+
+ SkVector t;
+ if (nullptr == offset) {
+ t.set(0, 0);
+ } else {
+ t = *offset;
+ }
+
+ SkScalar edges[3 * kMaxEdges];
+ SkPoint pts[4];
+ SkPath::Verb verb;
+ SkPath::Iter iter(path, true);
+
+ // SkPath considers itself convex so long as there is a convex contour within it,
+ // regardless of any degenerate contours such as a string of moveTos before it.
+ // Iterate here to consume any degenerate contours and only process the points
+ // on the actual convex contour.
+ int n = 0;
+ while ((verb = iter.next(pts, true, true)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ SkASSERT(n == 0);
+ case SkPath::kClose_Verb:
+ break;
+ case SkPath::kLine_Verb: {
+ if (n >= kMaxEdges) {
+ return nullptr;
+ }
+ SkVector v = pts[1] - pts[0];
+ v.normalize();
+ if (SkPathPriv::kCCW_FirstDirection == dir) {
+ edges[3 * n] = v.fY;
+ edges[3 * n + 1] = -v.fX;
+ } else {
+ edges[3 * n] = -v.fY;
+ edges[3 * n + 1] = v.fX;
+ }
+ SkPoint p = pts[1] + t;
+ edges[3 * n + 2] = -(edges[3 * n] * p.fX + edges[3 * n + 1] * p.fY);
+ ++n;
+ break;
+ }
+ default:
+ return nullptr;
+ }
+ }
+
+ if (path.isInverseFillType()) {
+ type = GrInvertProcessorEdgeType(type);
+ }
+ return Make(type, n, edges);
+}
+
+sk_sp<GrFragmentProcessor> GrConvexPolyEffect::Make(GrPrimitiveEdgeType edgeType,
+ const SkRect& rect) {
+ if (kHairlineAA_GrProcessorEdgeType == edgeType){
+ return nullptr;
+ }
+ return AARectEffect::Make(edgeType, rect);
+}
+
+GrConvexPolyEffect::~GrConvexPolyEffect() {}
+
+void GrConvexPolyEffect::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ inout->mulByUnknownSingleComponent();
+}
+
+void GrConvexPolyEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLConvexPolyEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrConvexPolyEffect::onCreateGLSLInstance() const {
+ return new GrGLConvexPolyEffect;
+}
+
+GrConvexPolyEffect::GrConvexPolyEffect(GrPrimitiveEdgeType edgeType, int n, const SkScalar edges[])
+ : fEdgeType(edgeType)
+ , fEdgeCount(n) {
+ this->initClassID<GrConvexPolyEffect>();
+ // Factory function should have already ensured this.
+ SkASSERT(n <= kMaxEdges);
+ memcpy(fEdges, edges, 3 * n * sizeof(SkScalar));
+ // Outset the edges by 0.5 so that a pixel with center on an edge is 50% covered in the AA case
+ // and 100% covered in the non-AA case.
+ for (int i = 0; i < n; ++i) {
+ fEdges[3 * i + 2] += SK_ScalarHalf;
+ }
+ this->setWillReadFragmentPosition();
+}
+
+bool GrConvexPolyEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const GrConvexPolyEffect& cpe = other.cast<GrConvexPolyEffect>();
+ // ignore the fact that 0 == -0 and just use memcmp.
+ return (cpe.fEdgeType == fEdgeType && cpe.fEdgeCount == fEdgeCount &&
+ 0 == memcmp(cpe.fEdges, fEdges, 3 * fEdgeCount * sizeof(SkScalar)));
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrConvexPolyEffect);
+
+sk_sp<GrFragmentProcessor> GrConvexPolyEffect::TestCreate(GrProcessorTestData* d) {
+ int count = d->fRandom->nextULessThan(kMaxEdges) + 1;
+ SkScalar edges[kMaxEdges * 3];
+ for (int i = 0; i < 3 * count; ++i) {
+ edges[i] = d->fRandom->nextSScalar1();
+ }
+
+ sk_sp<GrFragmentProcessor> fp;
+ do {
+ GrPrimitiveEdgeType edgeType = static_cast<GrPrimitiveEdgeType>(
+ d->fRandom->nextULessThan(kGrProcessorEdgeTypeCnt));
+ fp = GrConvexPolyEffect::Make(edgeType, count, edges);
+ } while (nullptr == fp);
+ return fp;
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrConvexPolyEffect.h b/gfx/skia/skia/src/gpu/effects/GrConvexPolyEffect.h
new file mode 100644
index 000000000..8fc76feb5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrConvexPolyEffect.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrConvexPolyEffect_DEFINED
+#define GrConvexPolyEffect_DEFINED
+
+#include "GrCaps.h"
+#include "GrFragmentProcessor.h"
+#include "GrProcessor.h"
+#include "GrTypesPriv.h"
+
+class GrInvariantOutput;
+class SkPath;
+
+/**
+ * An effect that renders a convex polygon. It is intended to be used as a coverage effect.
+ * Bounding geometry is rendered and the effect computes coverage based on the fragment's
+ * position relative to the polygon.
+ */
+class GrConvexPolyEffect : public GrFragmentProcessor {
+public:
+ enum {
+ kMaxEdges = 8,
+ };
+
+ /**
+ * edges is a set of n edge equations where n is limited to kMaxEdges. It contains 3*n values.
+ * The edges should form a convex polygon. The positive half-plane is considered to be the
+ * inside. The equations should be normalized such that the first two coefficients are a unit
+ * 2d vector.
+ *
+ * Currently the edges are specified in device space. In the future we may prefer to specify
+ * them in src space. There are a number of ways this could be accomplished but we'd probably
+ * have to modify the effect/shaderbuilder interface to make it possible (e.g. give access
+ * to the view matrix or untransformed positions in the fragment shader).
+ */
+ static sk_sp<GrFragmentProcessor> Make(GrPrimitiveEdgeType edgeType, int n,
+ const SkScalar edges[]) {
+ if (n <= 0 || n > kMaxEdges || kHairlineAA_GrProcessorEdgeType == edgeType) {
+ return nullptr;
+ }
+ return sk_sp<GrFragmentProcessor>(new GrConvexPolyEffect(edgeType, n, edges));
+ }
+
+ /**
+ * Creates an effect that clips against the path. If the path is not a convex polygon, is
+ * inverse filled, or has too many edges, this will return nullptr. If offset is non-nullptr, then
+ * the path is translated by the vector.
+ */
+ static sk_sp<GrFragmentProcessor> Make(GrPrimitiveEdgeType, const SkPath&,
+ const SkVector* offset = nullptr);
+
+ /**
+ * Creates an effect that fills inside the rect with AA edges..
+ */
+ static sk_sp<GrFragmentProcessor> Make(GrPrimitiveEdgeType, const SkRect&);
+
+ virtual ~GrConvexPolyEffect();
+
+ const char* name() const override { return "ConvexPoly"; }
+
+ GrPrimitiveEdgeType getEdgeType() const { return fEdgeType; }
+
+ int getEdgeCount() const { return fEdgeCount; }
+
+ const SkScalar* getEdges() const { return fEdges; }
+
+private:
+ GrConvexPolyEffect(GrPrimitiveEdgeType edgeType, int n, const SkScalar edges[]);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor& other) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ GrPrimitiveEdgeType fEdgeType;
+ int fEdgeCount;
+ SkScalar fEdges[3 * kMaxEdges];
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrConvolutionEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrConvolutionEffect.cpp
new file mode 100644
index 000000000..59f7ab19e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrConvolutionEffect.cpp
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrConvolutionEffect.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+
+// For brevity
+typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;
+
+class GrGLConvolutionEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrGLSLCaps&, GrProcessorKeyBuilder*);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager& pdman, const GrProcessor&) override;
+
+private:
+ UniformHandle fKernelUni;
+ UniformHandle fImageIncrementUni;
+ UniformHandle fBoundsUni;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GrGLConvolutionEffect::emitCode(EmitArgs& args) {
+ const GrConvolutionEffect& ce = args.fFp.cast<GrConvolutionEffect>();
+
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ fImageIncrementUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType, kDefault_GrSLPrecision,
+ "ImageIncrement");
+ if (ce.useBounds()) {
+ fBoundsUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType, kDefault_GrSLPrecision,
+ "Bounds");
+ }
+
+ int width = Gr1DKernelEffect::WidthFromRadius(ce.radius());
+
+ int arrayCount = (width + 3) / 4;
+ SkASSERT(4 * arrayCount >= width);
+
+ fKernelUni = uniformHandler->addUniformArray(kFragment_GrShaderFlag,
+ kVec4f_GrSLType, kDefault_GrSLPrecision,
+ "Kernel", arrayCount);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString coords2D = fragBuilder->ensureCoords2D(args.fTransformedCoords[0]);
+
+ fragBuilder->codeAppendf("%s = vec4(0, 0, 0, 0);", args.fOutputColor);
+
+ const GrGLSLShaderVar& kernel = uniformHandler->getUniformVariable(fKernelUni);
+ const char* imgInc = uniformHandler->getUniformCStr(fImageIncrementUni);
+
+ fragBuilder->codeAppendf("vec2 coord = %s - %d.0 * %s;", coords2D.c_str(), ce.radius(), imgInc);
+
+ // Manually unroll loop because some drivers don't; yields 20-30% speedup.
+ const char* kVecSuffix[4] = { ".x", ".y", ".z", ".w" };
+ for (int i = 0; i < width; i++) {
+ SkString index;
+ SkString kernelIndex;
+ index.appendS32(i/4);
+ kernel.appendArrayAccess(index.c_str(), &kernelIndex);
+ kernelIndex.append(kVecSuffix[i & 0x3]);
+
+ if (ce.useBounds()) {
+ // We used to compute a bool indicating whether we're in bounds or not, cast it to a
+ // float, and then mul weight*texture_sample by the float. However, the Adreno 430 seems
+ // to have a bug that caused corruption.
+ const char* bounds = uniformHandler->getUniformCStr(fBoundsUni);
+ const char* component = ce.direction() == Gr1DKernelEffect::kY_Direction ? "y" : "x";
+ fragBuilder->codeAppendf("if (coord.%s >= %s.x && coord.%s <= %s.y) {",
+ component, bounds, component, bounds);
+ }
+ fragBuilder->codeAppendf("\t\t%s += ", args.fOutputColor);
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], "coord");
+ fragBuilder->codeAppendf(" * %s;\n", kernelIndex.c_str());
+ if (ce.useBounds()) {
+ fragBuilder->codeAppend("}");
+ }
+ fragBuilder->codeAppendf("\t\tcoord += %s;\n", imgInc);
+ }
+
+ SkString modulate;
+ GrGLSLMulVarBy4f(&modulate, args.fOutputColor, args.fInputColor);
+ fragBuilder->codeAppend(modulate.c_str());
+}
+
+void GrGLConvolutionEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& processor) {
+ const GrConvolutionEffect& conv = processor.cast<GrConvolutionEffect>();
+ GrTexture& texture = *conv.texture(0);
+
+ float imageIncrement[2] = { 0 };
+ float ySign = texture.origin() != kTopLeft_GrSurfaceOrigin ? 1.0f : -1.0f;
+ switch (conv.direction()) {
+ case Gr1DKernelEffect::kX_Direction:
+ imageIncrement[0] = 1.0f / texture.width();
+ break;
+ case Gr1DKernelEffect::kY_Direction:
+ imageIncrement[1] = ySign / texture.height();
+ break;
+ default:
+ SkFAIL("Unknown filter direction.");
+ }
+ pdman.set2fv(fImageIncrementUni, 1, imageIncrement);
+ if (conv.useBounds()) {
+ const float* bounds = conv.bounds();
+ if (Gr1DKernelEffect::kY_Direction == conv.direction() &&
+ texture.origin() != kTopLeft_GrSurfaceOrigin) {
+ pdman.set2f(fBoundsUni, 1.0f - bounds[1], 1.0f - bounds[0]);
+ } else {
+ pdman.set2f(fBoundsUni, bounds[0], bounds[1]);
+ }
+ }
+ int width = Gr1DKernelEffect::WidthFromRadius(conv.radius());
+
+ int arrayCount = (width + 3) / 4;
+ SkASSERT(4 * arrayCount >= width);
+ pdman.set4fv(fKernelUni, arrayCount, conv.kernel());
+}
+
+void GrGLConvolutionEffect::GenKey(const GrProcessor& processor, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrConvolutionEffect& conv = processor.cast<GrConvolutionEffect>();
+ uint32_t key = conv.radius();
+ key <<= 2;
+ if (conv.useBounds()) {
+ key |= 0x2;
+ key |= GrConvolutionEffect::kY_Direction == conv.direction() ? 0x1 : 0x0;
+ }
+ b->add32(key);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrConvolutionEffect::GrConvolutionEffect(GrTexture* texture,
+ Direction direction,
+ int radius,
+ const float* kernel,
+ bool useBounds,
+ float bounds[2])
+ : INHERITED(texture, direction, radius), fUseBounds(useBounds) {
+ this->initClassID<GrConvolutionEffect>();
+ SkASSERT(radius <= kMaxKernelRadius);
+ SkASSERT(kernel);
+ int width = this->width();
+ for (int i = 0; i < width; i++) {
+ fKernel[i] = kernel[i];
+ }
+ memcpy(fBounds, bounds, sizeof(fBounds));
+}
+
+GrConvolutionEffect::GrConvolutionEffect(GrTexture* texture,
+ Direction direction,
+ int radius,
+ float gaussianSigma,
+ bool useBounds,
+ float bounds[2])
+ : INHERITED(texture, direction, radius), fUseBounds(useBounds) {
+ this->initClassID<GrConvolutionEffect>();
+ SkASSERT(radius <= kMaxKernelRadius);
+ int width = this->width();
+
+ float sum = 0.0f;
+ float denom = 1.0f / (2.0f * gaussianSigma * gaussianSigma);
+ for (int i = 0; i < width; ++i) {
+ float x = static_cast<float>(i - this->radius());
+ // Note that the constant term (1/(sqrt(2*pi*sigma^2)) of the Gaussian
+ // is dropped here, since we renormalize the kernel below.
+ fKernel[i] = sk_float_exp(- x * x * denom);
+ sum += fKernel[i];
+ }
+ // Normalize the kernel
+ float scale = 1.0f / sum;
+ for (int i = 0; i < width; ++i) {
+ fKernel[i] *= scale;
+ }
+ memcpy(fBounds, bounds, sizeof(fBounds));
+}
+
+GrConvolutionEffect::~GrConvolutionEffect() {
+}
+
+void GrConvolutionEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLConvolutionEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrConvolutionEffect::onCreateGLSLInstance() const {
+ return new GrGLConvolutionEffect;
+}
+
+bool GrConvolutionEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrConvolutionEffect& s = sBase.cast<GrConvolutionEffect>();
+ return (this->radius() == s.radius() &&
+ this->direction() == s.direction() &&
+ this->useBounds() == s.useBounds() &&
+ 0 == memcmp(fBounds, s.fBounds, sizeof(fBounds)) &&
+ 0 == memcmp(fKernel, s.fKernel, this->width() * sizeof(float)));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrConvolutionEffect);
+
+sk_sp<GrFragmentProcessor> GrConvolutionEffect::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx :
+ GrProcessorUnitTest::kAlphaTextureIdx;
+ Direction dir = d->fRandom->nextBool() ? kX_Direction : kY_Direction;
+ int radius = d->fRandom->nextRangeU(1, kMaxKernelRadius);
+ float kernel[kMaxKernelWidth];
+ for (size_t i = 0; i < SK_ARRAY_COUNT(kernel); ++i) {
+ kernel[i] = d->fRandom->nextSScalar1();
+ }
+ float bounds[2];
+ for (size_t i = 0; i < SK_ARRAY_COUNT(bounds); ++i) {
+ bounds[i] = d->fRandom->nextF();
+ }
+
+ bool useBounds = d->fRandom->nextBool();
+ return GrConvolutionEffect::Make(d->fTextures[texIdx],
+ dir,
+ radius,
+ kernel,
+ useBounds,
+ bounds);
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrConvolutionEffect.h b/gfx/skia/skia/src/gpu/effects/GrConvolutionEffect.h
new file mode 100644
index 000000000..c353542d0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrConvolutionEffect.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrConvolutionEffect_DEFINED
+#define GrConvolutionEffect_DEFINED
+
+#include "Gr1DKernelEffect.h"
+#include "GrInvariantOutput.h"
+
+/**
+ * A convolution effect. The kernel is specified as an array of 2 * half-width
+ * + 1 weights. Each texel is multiplied by it's weight and summed to determine
+ * the output color. The output color is modulated by the input color.
+ */
+class GrConvolutionEffect : public Gr1DKernelEffect {
+
+public:
+
+ /// Convolve with an arbitrary user-specified kernel
+ static sk_sp<GrFragmentProcessor> Make(GrTexture* tex,
+ Direction dir,
+ int halfWidth,
+ const float* kernel,
+ bool useBounds,
+ float bounds[2]) {
+ return sk_sp<GrFragmentProcessor>(
+ new GrConvolutionEffect(tex, dir, halfWidth, kernel, useBounds, bounds));
+ }
+
+ /// Convolve with a Gaussian kernel
+ static sk_sp<GrFragmentProcessor> MakeGaussian(GrTexture* tex,
+ Direction dir,
+ int halfWidth,
+ float gaussianSigma,
+ bool useBounds,
+ float bounds[2]) {
+ return sk_sp<GrFragmentProcessor>(
+ new GrConvolutionEffect(tex, dir, halfWidth, gaussianSigma, useBounds, bounds));
+ }
+
+ virtual ~GrConvolutionEffect();
+
+ const float* kernel() const { return fKernel; }
+
+ const float* bounds() const { return fBounds; }
+ bool useBounds() const { return fUseBounds; }
+
+ const char* name() const override { return "Convolution"; }
+
+ enum {
+ // This was decided based on the min allowed value for the max texture
+ // samples per fragment program run in DX9SM2 (32). A sigma param of 4.0
+ // on a blur filter gives a kernel width of 25 while a sigma of 5.0
+ // would exceed a 32 wide kernel.
+ kMaxKernelRadius = 12,
+ // With a C++11 we could have a constexpr version of WidthFromRadius()
+ // and not have to duplicate this calculation.
+ kMaxKernelWidth = 2 * kMaxKernelRadius + 1,
+ };
+
+protected:
+
+ float fKernel[kMaxKernelWidth];
+ bool fUseBounds;
+ float fBounds[2];
+
+private:
+ GrConvolutionEffect(GrTexture*, Direction,
+ int halfWidth,
+ const float* kernel,
+ bool useBounds,
+ float bounds[2]);
+
+ /// Convolve with a Gaussian kernel
+ GrConvolutionEffect(GrTexture*, Direction,
+ int halfWidth,
+ float gaussianSigma,
+ bool useBounds,
+ float bounds[2]);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ // If the texture was opaque we could know that the output color if we knew the sum of the
+ // kernel values.
+ inout->mulByUnknownFourComponents();
+ }
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef Gr1DKernelEffect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrCoverageSetOpXP.cpp b/gfx/skia/skia/src/gpu/effects/GrCoverageSetOpXP.cpp
new file mode 100644
index 000000000..c6abc696c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrCoverageSetOpXP.cpp
@@ -0,0 +1,344 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "effects/GrCoverageSetOpXP.h"
+#include "GrCaps.h"
+#include "GrColor.h"
+#include "GrDrawContext.h"
+#include "GrPipeline.h"
+#include "GrProcessor.h"
+#include "GrProcOptInfo.h"
+#include "glsl/GrGLSLBlend.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "glsl/GrGLSLXferProcessor.h"
+
+class CoverageSetOpXP : public GrXferProcessor {
+public:
+ static GrXferProcessor* Create(SkRegion::Op regionOp, bool invertCoverage) {
+ return new CoverageSetOpXP(regionOp, invertCoverage);
+ }
+
+ ~CoverageSetOpXP() override;
+
+ const char* name() const override { return "Coverage Set Op"; }
+
+ GrGLSLXferProcessor* createGLSLInstance() const override;
+
+ bool invertCoverage() const { return fInvertCoverage; }
+
+private:
+ CoverageSetOpXP(SkRegion::Op regionOp, bool fInvertCoverage);
+
+ GrXferProcessor::OptFlags onGetOptimizations(const GrPipelineOptimizations& optimizations,
+ bool doesStencilWrite,
+ GrColor* color,
+ const GrCaps& caps) const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ void onGetBlendInfo(GrXferProcessor::BlendInfo* blendInfo) const override;
+
+ bool onIsEqual(const GrXferProcessor& xpBase) const override {
+ const CoverageSetOpXP& xp = xpBase.cast<CoverageSetOpXP>();
+ return (fRegionOp == xp.fRegionOp &&
+ fInvertCoverage == xp.fInvertCoverage);
+ }
+
+ SkRegion::Op fRegionOp;
+ bool fInvertCoverage;
+
+ typedef GrXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GLCoverageSetOpXP : public GrGLSLXferProcessor {
+public:
+ GLCoverageSetOpXP(const GrProcessor&) {}
+
+ ~GLCoverageSetOpXP() override {}
+
+ static void GenKey(const GrProcessor& processor, const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) {
+ const CoverageSetOpXP& xp = processor.cast<CoverageSetOpXP>();
+ uint32_t key = xp.invertCoverage() ? 0x0 : 0x1;
+ b->add32(key);
+ }
+
+private:
+ void emitOutputsForBlendState(const EmitArgs& args) override {
+ const CoverageSetOpXP& xp = args.fXP.cast<CoverageSetOpXP>();
+ GrGLSLXPFragmentBuilder* fragBuilder = args.fXPFragBuilder;
+
+ if (xp.invertCoverage()) {
+ fragBuilder->codeAppendf("%s = 1.0 - %s;", args.fOutputPrimary, args.fInputCoverage);
+ } else {
+ fragBuilder->codeAppendf("%s = %s;", args.fOutputPrimary, args.fInputCoverage);
+ }
+ }
+
+ void onSetData(const GrGLSLProgramDataManager&, const GrXferProcessor&) override {}
+
+ typedef GrGLSLXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+CoverageSetOpXP::CoverageSetOpXP(SkRegion::Op regionOp, bool invertCoverage)
+ : fRegionOp(regionOp)
+ , fInvertCoverage(invertCoverage) {
+ this->initClassID<CoverageSetOpXP>();
+}
+
+CoverageSetOpXP::~CoverageSetOpXP() {
+}
+
+void CoverageSetOpXP::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GLCoverageSetOpXP::GenKey(*this, caps, b);
+}
+
+GrGLSLXferProcessor* CoverageSetOpXP::createGLSLInstance() const {
+ return new GLCoverageSetOpXP(*this);
+}
+
+GrXferProcessor::OptFlags
+CoverageSetOpXP::onGetOptimizations(const GrPipelineOptimizations& optimizations,
+ bool doesStencilWrite,
+ GrColor* color,
+ const GrCaps& caps) const {
+ // We never look at the color input
+ return GrXferProcessor::kIgnoreColor_OptFlag;
+}
+
+void CoverageSetOpXP::onGetBlendInfo(GrXferProcessor::BlendInfo* blendInfo) const {
+ switch (fRegionOp) {
+ case SkRegion::kReplace_Op:
+ blendInfo->fSrcBlend = kOne_GrBlendCoeff;
+ blendInfo->fDstBlend = kZero_GrBlendCoeff;
+ break;
+ case SkRegion::kIntersect_Op:
+ blendInfo->fSrcBlend = kDC_GrBlendCoeff;
+ blendInfo->fDstBlend = kZero_GrBlendCoeff;
+ break;
+ case SkRegion::kUnion_Op:
+ blendInfo->fSrcBlend = kOne_GrBlendCoeff;
+ blendInfo->fDstBlend = kISC_GrBlendCoeff;
+ break;
+ case SkRegion::kXOR_Op:
+ blendInfo->fSrcBlend = kIDC_GrBlendCoeff;
+ blendInfo->fDstBlend = kISC_GrBlendCoeff;
+ break;
+ case SkRegion::kDifference_Op:
+ blendInfo->fSrcBlend = kZero_GrBlendCoeff;
+ blendInfo->fDstBlend = kISC_GrBlendCoeff;
+ break;
+ case SkRegion::kReverseDifference_Op:
+ blendInfo->fSrcBlend = kIDC_GrBlendCoeff;
+ blendInfo->fDstBlend = kZero_GrBlendCoeff;
+ break;
+ }
+ blendInfo->fBlendConstant = 0;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class ShaderCSOXferProcessor : public GrXferProcessor {
+public:
+ ShaderCSOXferProcessor(const DstTexture* dstTexture,
+ bool hasMixedSamples,
+ SkRegion::Op regionOp,
+ bool invertCoverage)
+ : INHERITED(dstTexture, true, hasMixedSamples)
+ , fRegionOp(regionOp)
+ , fInvertCoverage(invertCoverage) {
+ this->initClassID<ShaderCSOXferProcessor>();
+ }
+
+ const char* name() const override { return "Coverage Set Op Shader"; }
+
+ GrGLSLXferProcessor* createGLSLInstance() const override;
+
+ SkRegion::Op regionOp() const { return fRegionOp; }
+ bool invertCoverage() const { return fInvertCoverage; }
+
+private:
+ GrXferProcessor::OptFlags onGetOptimizations(const GrPipelineOptimizations&, bool, GrColor*,
+ const GrCaps&) const override {
+ // We never look at the color input
+ return GrXferProcessor::kIgnoreColor_OptFlag;
+ }
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ bool onIsEqual(const GrXferProcessor& xpBase) const override {
+ const ShaderCSOXferProcessor& xp = xpBase.cast<ShaderCSOXferProcessor>();
+ return (fRegionOp == xp.fRegionOp &&
+ fInvertCoverage == xp.fInvertCoverage);
+ }
+
+ SkRegion::Op fRegionOp;
+ bool fInvertCoverage;
+
+ typedef GrXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GLShaderCSOXferProcessor : public GrGLSLXferProcessor {
+public:
+ static void GenKey(const GrProcessor& processor, GrProcessorKeyBuilder* b) {
+ const ShaderCSOXferProcessor& xp = processor.cast<ShaderCSOXferProcessor>();
+ b->add32(xp.regionOp());
+ uint32_t key = xp.invertCoverage() ? 0x0 : 0x1;
+ b->add32(key);
+ }
+
+private:
+ void emitBlendCodeForDstRead(GrGLSLXPFragmentBuilder* fragBuilder,
+ GrGLSLUniformHandler* uniformHandler,
+ const char* srcColor,
+ const char* srcCoverage,
+ const char* dstColor,
+ const char* outColor,
+ const char* outColorSecondary,
+ const GrXferProcessor& proc) override {
+ const ShaderCSOXferProcessor& xp = proc.cast<ShaderCSOXferProcessor>();
+
+ if (xp.invertCoverage()) {
+ fragBuilder->codeAppendf("%s = 1.0 - %s;", outColor, srcCoverage);
+ } else {
+ fragBuilder->codeAppendf("%s = %s;", outColor, srcCoverage);
+ }
+
+ GrGLSLBlend::AppendRegionOp(fragBuilder, outColor, dstColor, outColor, xp.regionOp());
+ }
+
+ void onSetData(const GrGLSLProgramDataManager&, const GrXferProcessor&) override {}
+
+ typedef GrGLSLXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+void ShaderCSOXferProcessor::onGetGLSLProcessorKey(const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) const {
+ GLShaderCSOXferProcessor::GenKey(*this, b);
+}
+
+GrGLSLXferProcessor* ShaderCSOXferProcessor::createGLSLInstance() const {
+ return new GLShaderCSOXferProcessor;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+//
+GrCoverageSetOpXPFactory::GrCoverageSetOpXPFactory(SkRegion::Op regionOp, bool invertCoverage)
+ : fRegionOp(regionOp)
+ , fInvertCoverage(invertCoverage) {
+ this->initClassID<GrCoverageSetOpXPFactory>();
+}
+
+sk_sp<GrXPFactory> GrCoverageSetOpXPFactory::Make(SkRegion::Op regionOp, bool invertCoverage) {
+ switch (regionOp) {
+ case SkRegion::kReplace_Op: {
+ if (invertCoverage) {
+ static GrCoverageSetOpXPFactory gReplaceCDXPFI(regionOp, invertCoverage);
+ return sk_sp<GrXPFactory>(SkRef(&gReplaceCDXPFI));
+ } else {
+ static GrCoverageSetOpXPFactory gReplaceCDXPF(regionOp, invertCoverage);
+ return sk_sp<GrXPFactory>(SkRef(&gReplaceCDXPF));
+ }
+ break;
+ }
+ case SkRegion::kIntersect_Op: {
+ if (invertCoverage) {
+ static GrCoverageSetOpXPFactory gIntersectCDXPFI(regionOp, invertCoverage);
+ return sk_sp<GrXPFactory>(SkRef(&gIntersectCDXPFI));
+ } else {
+ static GrCoverageSetOpXPFactory gIntersectCDXPF(regionOp, invertCoverage);
+ return sk_sp<GrXPFactory>(SkRef(&gIntersectCDXPF));
+ }
+ break;
+ }
+ case SkRegion::kUnion_Op: {
+ if (invertCoverage) {
+ static GrCoverageSetOpXPFactory gUnionCDXPFI(regionOp, invertCoverage);
+ return sk_sp<GrXPFactory>(SkRef(&gUnionCDXPFI));
+ } else {
+ static GrCoverageSetOpXPFactory gUnionCDXPF(regionOp, invertCoverage);
+ return sk_sp<GrXPFactory>(SkRef(&gUnionCDXPF));
+ }
+ break;
+ }
+ case SkRegion::kXOR_Op: {
+ if (invertCoverage) {
+ static GrCoverageSetOpXPFactory gXORCDXPFI(regionOp, invertCoverage);
+ return sk_sp<GrXPFactory>(SkRef(&gXORCDXPFI));
+ } else {
+ static GrCoverageSetOpXPFactory gXORCDXPF(regionOp, invertCoverage);
+ return sk_sp<GrXPFactory>(SkRef(&gXORCDXPF));
+ }
+ break;
+ }
+ case SkRegion::kDifference_Op: {
+ if (invertCoverage) {
+ static GrCoverageSetOpXPFactory gDifferenceCDXPFI(regionOp, invertCoverage);
+ return sk_sp<GrXPFactory>(SkRef(&gDifferenceCDXPFI));
+ } else {
+ static GrCoverageSetOpXPFactory gDifferenceCDXPF(regionOp, invertCoverage);
+ return sk_sp<GrXPFactory>(SkRef(&gDifferenceCDXPF));
+ }
+ break;
+ }
+ case SkRegion::kReverseDifference_Op: {
+ if (invertCoverage) {
+ static GrCoverageSetOpXPFactory gRevDiffCDXPFI(regionOp, invertCoverage);
+ return sk_sp<GrXPFactory>(SkRef(&gRevDiffCDXPFI));
+ } else {
+ static GrCoverageSetOpXPFactory gRevDiffCDXPF(regionOp, invertCoverage);
+ return sk_sp<GrXPFactory>(SkRef(&gRevDiffCDXPF));
+ }
+ break;
+ }
+ default:
+ return nullptr;
+ }
+}
+
+GrXferProcessor*
+GrCoverageSetOpXPFactory::onCreateXferProcessor(const GrCaps& caps,
+ const GrPipelineOptimizations& optimizations,
+ bool hasMixedSamples,
+ const DstTexture* dst) const {
+ // We don't support inverting coverage with mixed samples. We don't expect to ever want this in
+ // the future, however we could at some point make this work using an inverted coverage
+ // modulation table. Note that an inverted table still won't work if there are coverage procs.
+ if (fInvertCoverage && hasMixedSamples) {
+ SkASSERT(false);
+ return nullptr;
+ }
+
+ if (optimizations.fOverrides.fUsePLSDstRead) {
+ return new ShaderCSOXferProcessor(dst, hasMixedSamples, fRegionOp, fInvertCoverage);
+ }
+ return CoverageSetOpXP::Create(fRegionOp, fInvertCoverage);
+}
+
+void GrCoverageSetOpXPFactory::getInvariantBlendedColor(const GrProcOptInfo& colorPOI,
+ InvariantBlendedColor* blendedColor) const {
+ blendedColor->fWillBlendWithDst = SkRegion::kReplace_Op != fRegionOp;
+ blendedColor->fKnownColorFlags = kNone_GrColorComponentFlags;
+}
+
+GR_DEFINE_XP_FACTORY_TEST(GrCoverageSetOpXPFactory);
+
+sk_sp<GrXPFactory> GrCoverageSetOpXPFactory::TestCreate(GrProcessorTestData* d) {
+ SkRegion::Op regionOp = SkRegion::Op(d->fRandom->nextULessThan(SkRegion::kLastOp + 1));
+ bool invertCoverage = !d->fDrawContext->hasMixedSamples() && d->fRandom->nextBool();
+ return GrCoverageSetOpXPFactory::Make(regionOp, invertCoverage);
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrCustomXfermode.cpp b/gfx/skia/skia/src/gpu/effects/GrCustomXfermode.cpp
new file mode 100644
index 000000000..1b94a6bee
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrCustomXfermode.cpp
@@ -0,0 +1,400 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "effects/GrCustomXfermode.h"
+
+#include "GrCoordTransform.h"
+#include "GrContext.h"
+#include "GrFragmentProcessor.h"
+#include "GrInvariantOutput.h"
+#include "GrPipeline.h"
+#include "GrProcessor.h"
+#include "GrTexture.h"
+#include "GrTextureAccess.h"
+#include "SkXfermode.h"
+#include "glsl/GrGLSLBlend.h"
+#include "glsl/GrGLSLCaps.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "glsl/GrGLSLXferProcessor.h"
+
+bool GrCustomXfermode::IsSupportedMode(SkXfermode::Mode mode) {
+ return mode > SkXfermode::kLastCoeffMode && mode <= SkXfermode::kLastMode;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Static helpers
+///////////////////////////////////////////////////////////////////////////////
+
+static GrBlendEquation hw_blend_equation(SkXfermode::Mode mode) {
+ enum { kOffset = kOverlay_GrBlendEquation - SkXfermode::kOverlay_Mode };
+ return static_cast<GrBlendEquation>(mode + kOffset);
+
+ GR_STATIC_ASSERT(kOverlay_GrBlendEquation == SkXfermode::kOverlay_Mode + kOffset);
+ GR_STATIC_ASSERT(kDarken_GrBlendEquation == SkXfermode::kDarken_Mode + kOffset);
+ GR_STATIC_ASSERT(kLighten_GrBlendEquation == SkXfermode::kLighten_Mode + kOffset);
+ GR_STATIC_ASSERT(kColorDodge_GrBlendEquation == SkXfermode::kColorDodge_Mode + kOffset);
+ GR_STATIC_ASSERT(kColorBurn_GrBlendEquation == SkXfermode::kColorBurn_Mode + kOffset);
+ GR_STATIC_ASSERT(kHardLight_GrBlendEquation == SkXfermode::kHardLight_Mode + kOffset);
+ GR_STATIC_ASSERT(kSoftLight_GrBlendEquation == SkXfermode::kSoftLight_Mode + kOffset);
+ GR_STATIC_ASSERT(kDifference_GrBlendEquation == SkXfermode::kDifference_Mode + kOffset);
+ GR_STATIC_ASSERT(kExclusion_GrBlendEquation == SkXfermode::kExclusion_Mode + kOffset);
+ GR_STATIC_ASSERT(kMultiply_GrBlendEquation == SkXfermode::kMultiply_Mode + kOffset);
+ GR_STATIC_ASSERT(kHSLHue_GrBlendEquation == SkXfermode::kHue_Mode + kOffset);
+ GR_STATIC_ASSERT(kHSLSaturation_GrBlendEquation == SkXfermode::kSaturation_Mode + kOffset);
+ GR_STATIC_ASSERT(kHSLColor_GrBlendEquation == SkXfermode::kColor_Mode + kOffset);
+ GR_STATIC_ASSERT(kHSLLuminosity_GrBlendEquation == SkXfermode::kLuminosity_Mode + kOffset);
+ GR_STATIC_ASSERT(kGrBlendEquationCnt == SkXfermode::kLastMode + 1 + kOffset);
+}
+
+static bool can_use_hw_blend_equation(GrBlendEquation equation,
+ const GrPipelineOptimizations& opt,
+ const GrCaps& caps) {
+ if (!caps.advancedBlendEquationSupport()) {
+ return false;
+ }
+ if (opt.fOverrides.fUsePLSDstRead) {
+ return false;
+ }
+ if (opt.fCoveragePOI.isFourChannelOutput()) {
+ return false; // LCD coverage must be applied after the blend equation.
+ }
+ if (caps.canUseAdvancedBlendEquation(equation)) {
+ return false;
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Xfer Processor
+///////////////////////////////////////////////////////////////////////////////
+
+class CustomXP : public GrXferProcessor {
+public:
+ CustomXP(SkXfermode::Mode mode, GrBlendEquation hwBlendEquation)
+ : fMode(mode),
+ fHWBlendEquation(hwBlendEquation) {
+ this->initClassID<CustomXP>();
+ }
+
+ CustomXP(const DstTexture* dstTexture, bool hasMixedSamples, SkXfermode::Mode mode)
+ : INHERITED(dstTexture, true, hasMixedSamples),
+ fMode(mode),
+ fHWBlendEquation(static_cast<GrBlendEquation>(-1)) {
+ this->initClassID<CustomXP>();
+ }
+
+ const char* name() const override { return "Custom Xfermode"; }
+
+ GrGLSLXferProcessor* createGLSLInstance() const override;
+
+ SkXfermode::Mode mode() const { return fMode; }
+ bool hasHWBlendEquation() const { return -1 != static_cast<int>(fHWBlendEquation); }
+
+ GrBlendEquation hwBlendEquation() const {
+ SkASSERT(this->hasHWBlendEquation());
+ return fHWBlendEquation;
+ }
+
+private:
+ GrXferProcessor::OptFlags onGetOptimizations(const GrPipelineOptimizations& optimizations,
+ bool doesStencilWrite,
+ GrColor* overrideColor,
+ const GrCaps& caps) const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ GrXferBarrierType onXferBarrier(const GrRenderTarget*, const GrCaps&) const override;
+
+ void onGetBlendInfo(BlendInfo*) const override;
+
+ bool onIsEqual(const GrXferProcessor& xpBase) const override;
+
+ const SkXfermode::Mode fMode;
+ const GrBlendEquation fHWBlendEquation;
+
+ typedef GrXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GLCustomXP : public GrGLSLXferProcessor {
+public:
+ GLCustomXP(const GrXferProcessor&) {}
+ ~GLCustomXP() override {}
+
+ static void GenKey(const GrXferProcessor& p, const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) {
+ const CustomXP& xp = p.cast<CustomXP>();
+ uint32_t key = 0;
+ if (xp.hasHWBlendEquation()) {
+ SkASSERT(caps.advBlendEqInteraction() > 0); // 0 will mean !xp.hasHWBlendEquation().
+ key |= caps.advBlendEqInteraction();
+ GR_STATIC_ASSERT(GrGLSLCaps::kLast_AdvBlendEqInteraction < 4);
+ }
+ if (!xp.hasHWBlendEquation() || caps.mustEnableSpecificAdvBlendEqs()) {
+ key |= xp.mode() << 3;
+ }
+ b->add32(key);
+ }
+
+private:
+ void emitOutputsForBlendState(const EmitArgs& args) override {
+ const CustomXP& xp = args.fXP.cast<CustomXP>();
+ SkASSERT(xp.hasHWBlendEquation());
+
+ GrGLSLXPFragmentBuilder* fragBuilder = args.fXPFragBuilder;
+ fragBuilder->enableAdvancedBlendEquationIfNeeded(xp.hwBlendEquation());
+
+ // Apply coverage by multiplying it into the src color before blending. Mixed samples will
+ // "just work" automatically. (See onGetOptimizations())
+ if (args.fInputCoverage) {
+ fragBuilder->codeAppendf("%s = %s * %s;",
+ args.fOutputPrimary, args.fInputCoverage, args.fInputColor);
+ } else {
+ fragBuilder->codeAppendf("%s = %s;", args.fOutputPrimary, args.fInputColor);
+ }
+ }
+
+ void emitBlendCodeForDstRead(GrGLSLXPFragmentBuilder* fragBuilder,
+ GrGLSLUniformHandler* uniformHandler,
+ const char* srcColor,
+ const char* srcCoverage,
+ const char* dstColor,
+ const char* outColor,
+ const char* outColorSecondary,
+ const GrXferProcessor& proc) override {
+ const CustomXP& xp = proc.cast<CustomXP>();
+ SkASSERT(!xp.hasHWBlendEquation());
+
+ GrGLSLBlend::AppendMode(fragBuilder, srcColor, dstColor, outColor, xp.mode());
+
+ // Apply coverage.
+ INHERITED::DefaultCoverageModulation(fragBuilder, srcCoverage, dstColor, outColor,
+ outColorSecondary, xp);
+ }
+
+ void onSetData(const GrGLSLProgramDataManager&, const GrXferProcessor&) override {}
+
+ typedef GrGLSLXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+void CustomXP::onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const {
+ GLCustomXP::GenKey(*this, caps, b);
+}
+
+GrGLSLXferProcessor* CustomXP::createGLSLInstance() const {
+ SkASSERT(this->willReadDstColor() != this->hasHWBlendEquation());
+ return new GLCustomXP(*this);
+}
+
+bool CustomXP::onIsEqual(const GrXferProcessor& other) const {
+ const CustomXP& s = other.cast<CustomXP>();
+ return fMode == s.fMode && fHWBlendEquation == s.fHWBlendEquation;
+}
+
+GrXferProcessor::OptFlags CustomXP::onGetOptimizations(const GrPipelineOptimizations& optimizations,
+ bool doesStencilWrite,
+ GrColor* overrideColor,
+ const GrCaps& caps) const {
+ /*
+ Most the optimizations we do here are based on tweaking alpha for coverage.
+
+ The general SVG blend equation is defined in the spec as follows:
+
+ Dca' = B(Sc, Dc) * Sa * Da + Y * Sca * (1-Da) + Z * Dca * (1-Sa)
+ Da' = X * Sa * Da + Y * Sa * (1-Da) + Z * Da * (1-Sa)
+
+ (Note that Sca, Dca indicate RGB vectors that are premultiplied by alpha,
+ and that B(Sc, Dc) is a mode-specific function that accepts non-multiplied
+ RGB colors.)
+
+ For every blend mode supported by this class, i.e. the "advanced" blend
+ modes, X=Y=Z=1 and this equation reduces to the PDF blend equation.
+
+ It can be shown that when X=Y=Z=1, these equations can modulate alpha for
+ coverage.
+
+
+ == Color ==
+
+ We substitute Y=Z=1 and define a blend() function that calculates Dca' in
+ terms of premultiplied alpha only:
+
+ blend(Sca, Dca, Sa, Da) = {Dca : if Sa == 0,
+ Sca : if Da == 0,
+ B(Sca/Sa, Dca/Da) * Sa * Da + Sca * (1-Da) + Dca * (1-Sa) : if Sa,Da != 0}
+
+ And for coverage modulation, we use a post blend src-over model:
+
+ Dca'' = f * blend(Sca, Dca, Sa, Da) + (1-f) * Dca
+
+ (Where f is the fractional coverage.)
+
+ Next we show that canTweakAlphaForCoverage() is true by proving the
+ following relationship:
+
+ blend(f*Sca, Dca, f*Sa, Da) == f * blend(Sca, Dca, Sa, Da) + (1-f) * Dca
+
+ General case (f,Sa,Da != 0):
+
+ f * blend(Sca, Dca, Sa, Da) + (1-f) * Dca
+ = f * (B(Sca/Sa, Dca/Da) * Sa * Da + Sca * (1-Da) + Dca * (1-Sa)) + (1-f) * Dca [Sa,Da != 0, definition of blend()]
+ = B(Sca/Sa, Dca/Da) * f*Sa * Da + f*Sca * (1-Da) + f*Dca * (1-Sa) + Dca - f*Dca
+ = B(Sca/Sa, Dca/Da) * f*Sa * Da + f*Sca - f*Sca * Da + f*Dca - f*Dca * Sa + Dca - f*Dca
+ = B(Sca/Sa, Dca/Da) * f*Sa * Da + f*Sca - f*Sca * Da - f*Dca * Sa + Dca
+ = B(Sca/Sa, Dca/Da) * f*Sa * Da + f*Sca * (1-Da) - f*Dca * Sa + Dca
+ = B(Sca/Sa, Dca/Da) * f*Sa * Da + f*Sca * (1-Da) + Dca * (1 - f*Sa)
+ = B(f*Sca/f*Sa, Dca/Da) * f*Sa * Da + f*Sca * (1-Da) + Dca * (1 - f*Sa) [f!=0]
+ = blend(f*Sca, Dca, f*Sa, Da) [definition of blend()]
+
+ Corner cases (Sa=0, Da=0, and f=0):
+
+ Sa=0: f * blend(Sca, Dca, Sa, Da) + (1-f) * Dca
+ = f * Dca + (1-f) * Dca [Sa=0, definition of blend()]
+ = Dca
+ = blend(0, Dca, 0, Da) [definition of blend()]
+ = blend(f*Sca, Dca, f*Sa, Da) [Sa=0]
+
+ Da=0: f * blend(Sca, Dca, Sa, Da) + (1-f) * Dca
+ = f * Sca + (1-f) * Dca [Da=0, definition of blend()]
+ = f * Sca [Da=0]
+ = blend(f*Sca, 0, f*Sa, 0) [definition of blend()]
+ = blend(f*Sca, Dca, f*Sa, Da) [Da=0]
+
+ f=0: f * blend(Sca, Dca, Sa, Da) + (1-f) * Dca
+ = Dca [f=0]
+ = blend(0, Dca, 0, Da) [definition of blend()]
+ = blend(f*Sca, Dca, f*Sa, Da) [f=0]
+
+ == Alpha ==
+
+ We substitute X=Y=Z=1 and define a blend() function that calculates Da':
+
+ blend(Sa, Da) = Sa * Da + Sa * (1-Da) + Da * (1-Sa)
+ = Sa * Da + Sa - Sa * Da + Da - Da * Sa
+ = Sa + Da - Sa * Da
+
+ We use the same model for coverage modulation as we did with color:
+
+ Da'' = f * blend(Sa, Da) + (1-f) * Da
+
+ And show that canTweakAlphaForCoverage() is true by proving the following
+ relationship:
+
+ blend(f*Sa, Da) == f * blend(Sa, Da) + (1-f) * Da
+
+
+ f * blend(Sa, Da) + (1-f) * Da
+ = f * (Sa + Da - Sa * Da) + (1-f) * Da
+ = f*Sa + f*Da - f*Sa * Da + Da - f*Da
+ = f*Sa - f*Sa * Da + Da
+ = f*Sa + Da - f*Sa * Da
+ = blend(f*Sa, Da)
+ */
+
+ OptFlags flags = kNone_OptFlags;
+ if (optimizations.fColorPOI.allStagesMultiplyInput()) {
+ flags |= kCanTweakAlphaForCoverage_OptFlag;
+ }
+ if (this->hasHWBlendEquation() && optimizations.fCoveragePOI.isSolidWhite()) {
+ flags |= kIgnoreCoverage_OptFlag;
+ }
+ return flags;
+}
+
+GrXferBarrierType CustomXP::onXferBarrier(const GrRenderTarget* rt, const GrCaps& caps) const {
+ if (this->hasHWBlendEquation() && !caps.advancedCoherentBlendEquationSupport()) {
+ return kBlend_GrXferBarrierType;
+ }
+ return kNone_GrXferBarrierType;
+}
+
+void CustomXP::onGetBlendInfo(BlendInfo* blendInfo) const {
+ if (this->hasHWBlendEquation()) {
+ blendInfo->fEquation = this->hwBlendEquation();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+class CustomXPFactory : public GrXPFactory {
+public:
+ CustomXPFactory(SkXfermode::Mode mode);
+
+ void getInvariantBlendedColor(const GrProcOptInfo& colorPOI,
+ GrXPFactory::InvariantBlendedColor*) const override;
+
+private:
+ GrXferProcessor* onCreateXferProcessor(const GrCaps& caps,
+ const GrPipelineOptimizations& optimizations,
+ bool hasMixedSamples,
+ const DstTexture*) const override;
+
+ bool onWillReadDstColor(const GrCaps&, const GrPipelineOptimizations&) const override;
+
+ bool onIsEqual(const GrXPFactory& xpfBase) const override {
+ const CustomXPFactory& xpf = xpfBase.cast<CustomXPFactory>();
+ return fMode == xpf.fMode;
+ }
+
+ GR_DECLARE_XP_FACTORY_TEST;
+
+ SkXfermode::Mode fMode;
+ GrBlendEquation fHWBlendEquation;
+
+ typedef GrXPFactory INHERITED;
+};
+
+CustomXPFactory::CustomXPFactory(SkXfermode::Mode mode)
+ : fMode(mode),
+ fHWBlendEquation(hw_blend_equation(mode)) {
+ SkASSERT(GrCustomXfermode::IsSupportedMode(fMode));
+ this->initClassID<CustomXPFactory>();
+}
+
+GrXferProcessor* CustomXPFactory::onCreateXferProcessor(const GrCaps& caps,
+ const GrPipelineOptimizations& opt,
+ bool hasMixedSamples,
+ const DstTexture* dstTexture) const {
+ if (can_use_hw_blend_equation(fHWBlendEquation, opt, caps)) {
+ SkASSERT(!dstTexture || !dstTexture->texture());
+ return new CustomXP(fMode, fHWBlendEquation);
+ }
+ return new CustomXP(dstTexture, hasMixedSamples, fMode);
+}
+
+bool CustomXPFactory::onWillReadDstColor(const GrCaps& caps,
+ const GrPipelineOptimizations& optimizations) const {
+ return !can_use_hw_blend_equation(fHWBlendEquation, optimizations, caps);
+}
+
+void CustomXPFactory::getInvariantBlendedColor(const GrProcOptInfo& colorPOI,
+ InvariantBlendedColor* blendedColor) const {
+ blendedColor->fWillBlendWithDst = true;
+ blendedColor->fKnownColorFlags = kNone_GrColorComponentFlags;
+}
+
+GR_DEFINE_XP_FACTORY_TEST(CustomXPFactory);
+sk_sp<GrXPFactory> CustomXPFactory::TestCreate(GrProcessorTestData* d) {
+ int mode = d->fRandom->nextRangeU(SkXfermode::kLastCoeffMode + 1,
+ SkXfermode::kLastSeparableMode);
+
+ return sk_sp<GrXPFactory>(new CustomXPFactory(static_cast<SkXfermode::Mode>(mode)));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrXPFactory> GrCustomXfermode::MakeXPFactory(SkXfermode::Mode mode) {
+ if (!GrCustomXfermode::IsSupportedMode(mode)) {
+ return nullptr;
+ } else {
+ return sk_sp<GrXPFactory>(new CustomXPFactory(mode));
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrDashingEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrDashingEffect.cpp
new file mode 100644
index 000000000..9ce725b7e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrDashingEffect.cpp
@@ -0,0 +1,1281 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrDashingEffect.h"
+
+#include "GrBatchFlushState.h"
+#include "GrBatchTest.h"
+#include "GrCaps.h"
+#include "GrGeometryProcessor.h"
+#include "GrContext.h"
+#include "GrCoordTransform.h"
+#include "GrDefaultGeoProcFactory.h"
+#include "GrInvariantOutput.h"
+#include "GrProcessor.h"
+#include "GrStyle.h"
+#include "SkGr.h"
+#include "batches/GrVertexBatch.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLGeometryProcessor.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "glsl/GrGLSLVarying.h"
+#include "glsl/GrGLSLVertexShaderBuilder.h"
+
+using AAMode = GrDashingEffect::AAMode;
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Returns whether or not the gpu can fast path the dash line effect.
+bool GrDashingEffect::CanDrawDashLine(const SkPoint pts[2], const GrStyle& style,
+ const SkMatrix& viewMatrix) {
+ // Pts must be either horizontal or vertical in src space
+ if (pts[0].fX != pts[1].fX && pts[0].fY != pts[1].fY) {
+ return false;
+ }
+
+ // May be able to relax this to include skew. As of now cannot do perspective
+ // because of the non uniform scaling of bloating a rect
+ if (!viewMatrix.preservesRightAngles()) {
+ return false;
+ }
+
+ if (!style.isDashed() || 2 != style.dashIntervalCnt()) {
+ return false;
+ }
+
+ const SkScalar* intervals = style.dashIntervals();
+ if (0 == intervals[0] && 0 == intervals[1]) {
+ return false;
+ }
+
+ SkPaint::Cap cap = style.strokeRec().getCap();
+ // Current we do don't handle Round or Square cap dashes
+ if (SkPaint::kRound_Cap == cap && intervals[0] != 0.f) {
+ return false;
+ }
+
+ return true;
+}
+
+namespace {
+struct DashLineVertex {
+ SkPoint fPos;
+ SkPoint fDashPos;
+ SkScalar fIntervalLength;
+ SkRect fRect;
+};
+struct DashCircleVertex {
+ SkPoint fPos;
+ SkPoint fDashPos;
+ SkScalar fIntervalLength;
+ SkScalar fRadius;
+ SkScalar fCenterX;
+};
+};
+
+static void calc_dash_scaling(SkScalar* parallelScale, SkScalar* perpScale,
+ const SkMatrix& viewMatrix, const SkPoint pts[2]) {
+ SkVector vecSrc = pts[1] - pts[0];
+ SkScalar magSrc = vecSrc.length();
+ SkScalar invSrc = magSrc ? SkScalarInvert(magSrc) : 0;
+ vecSrc.scale(invSrc);
+
+ SkVector vecSrcPerp;
+ vecSrc.rotateCW(&vecSrcPerp);
+ viewMatrix.mapVectors(&vecSrc, 1);
+ viewMatrix.mapVectors(&vecSrcPerp, 1);
+
+ // parallelScale tells how much to scale along the line parallel to the dash line
+ // perpScale tells how much to scale in the direction perpendicular to the dash line
+ *parallelScale = vecSrc.length();
+ *perpScale = vecSrcPerp.length();
+}
+
+// calculates the rotation needed to aligned pts to the x axis with pts[0] < pts[1]
+// Stores the rotation matrix in rotMatrix, and the mapped points in ptsRot
+static void align_to_x_axis(const SkPoint pts[2], SkMatrix* rotMatrix, SkPoint ptsRot[2] = nullptr) {
+ SkVector vec = pts[1] - pts[0];
+ SkScalar mag = vec.length();
+ SkScalar inv = mag ? SkScalarInvert(mag) : 0;
+
+ vec.scale(inv);
+ rotMatrix->setSinCos(-vec.fY, vec.fX, pts[0].fX, pts[0].fY);
+ if (ptsRot) {
+ rotMatrix->mapPoints(ptsRot, pts, 2);
+ // correction for numerical issues if map doesn't make ptsRot exactly horizontal
+ ptsRot[1].fY = pts[0].fY;
+ }
+}
+
+// Assumes phase < sum of all intervals
+static SkScalar calc_start_adjustment(const SkScalar intervals[2], SkScalar phase) {
+ SkASSERT(phase < intervals[0] + intervals[1]);
+ if (phase >= intervals[0] && phase != 0) {
+ SkScalar srcIntervalLen = intervals[0] + intervals[1];
+ return srcIntervalLen - phase;
+ }
+ return 0;
+}
+
+static SkScalar calc_end_adjustment(const SkScalar intervals[2], const SkPoint pts[2],
+ SkScalar phase, SkScalar* endingInt) {
+ if (pts[1].fX <= pts[0].fX) {
+ return 0;
+ }
+ SkScalar srcIntervalLen = intervals[0] + intervals[1];
+ SkScalar totalLen = pts[1].fX - pts[0].fX;
+ SkScalar temp = totalLen / srcIntervalLen;
+ SkScalar numFullIntervals = SkScalarFloorToScalar(temp);
+ *endingInt = totalLen - numFullIntervals * srcIntervalLen + phase;
+ temp = *endingInt / srcIntervalLen;
+ *endingInt = *endingInt - SkScalarFloorToScalar(temp) * srcIntervalLen;
+ if (0 == *endingInt) {
+ *endingInt = srcIntervalLen;
+ }
+ if (*endingInt > intervals[0]) {
+ if (0 == intervals[0]) {
+ *endingInt -= 0.01f; // make sure we capture the last zero size pnt (used if has caps)
+ }
+ return *endingInt - intervals[0];
+ }
+ return 0;
+}
+
+enum DashCap {
+ kRound_DashCap,
+ kNonRound_DashCap,
+};
+
+static int kDashVertices = 4;
+
+template <typename T>
+void setup_dashed_rect_common(const SkRect& rect, const SkMatrix& matrix, T* vertices, int idx,
+ SkScalar offset, SkScalar bloatX, SkScalar bloatY, SkScalar len,
+ SkScalar stroke) {
+ SkScalar startDashX = offset - bloatX;
+ SkScalar endDashX = offset + len + bloatX;
+ SkScalar startDashY = -stroke - bloatY;
+ SkScalar endDashY = stroke + bloatY;
+ vertices[idx].fDashPos = SkPoint::Make(startDashX , startDashY);
+ vertices[idx + 1].fDashPos = SkPoint::Make(startDashX, endDashY);
+ vertices[idx + 2].fDashPos = SkPoint::Make(endDashX, endDashY);
+ vertices[idx + 3].fDashPos = SkPoint::Make(endDashX, startDashY);
+
+ vertices[idx].fPos = SkPoint::Make(rect.fLeft, rect.fTop);
+ vertices[idx + 1].fPos = SkPoint::Make(rect.fLeft, rect.fBottom);
+ vertices[idx + 2].fPos = SkPoint::Make(rect.fRight, rect.fBottom);
+ vertices[idx + 3].fPos = SkPoint::Make(rect.fRight, rect.fTop);
+
+ matrix.mapPointsWithStride(&vertices[idx].fPos, sizeof(T), 4);
+}
+
+static void setup_dashed_rect(const SkRect& rect, void* vertices, int idx,
+ const SkMatrix& matrix, SkScalar offset, SkScalar bloatX,
+ SkScalar bloatY, SkScalar len, SkScalar stroke,
+ SkScalar startInterval, SkScalar endInterval, SkScalar strokeWidth,
+ DashCap cap, const size_t vertexStride) {
+ SkScalar intervalLength = startInterval + endInterval;
+
+ if (kRound_DashCap == cap) {
+ SkASSERT(vertexStride == sizeof(DashCircleVertex));
+ DashCircleVertex* verts = reinterpret_cast<DashCircleVertex*>(vertices);
+
+ setup_dashed_rect_common<DashCircleVertex>(rect, matrix, verts, idx, offset, bloatX,
+ bloatY, len, stroke);
+
+ SkScalar radius = SkScalarHalf(strokeWidth) - 0.5f;
+ SkScalar centerX = SkScalarHalf(endInterval);
+
+ for (int i = 0; i < kDashVertices; i++) {
+ verts[idx + i].fIntervalLength = intervalLength;
+ verts[idx + i].fRadius = radius;
+ verts[idx + i].fCenterX = centerX;
+ }
+
+ } else {
+ SkASSERT(kNonRound_DashCap == cap && vertexStride == sizeof(DashLineVertex));
+ DashLineVertex* verts = reinterpret_cast<DashLineVertex*>(vertices);
+
+ setup_dashed_rect_common<DashLineVertex>(rect, matrix, verts, idx, offset, bloatX,
+ bloatY, len, stroke);
+
+ SkScalar halfOffLen = SkScalarHalf(endInterval);
+ SkScalar halfStroke = SkScalarHalf(strokeWidth);
+ SkRect rectParam;
+ rectParam.set(halfOffLen + 0.5f, -halfStroke + 0.5f,
+ halfOffLen + startInterval - 0.5f, halfStroke - 0.5f);
+ for (int i = 0; i < kDashVertices; i++) {
+ verts[idx + i].fIntervalLength = intervalLength;
+ verts[idx + i].fRect = rectParam;
+ }
+ }
+}
+
+static void setup_dashed_rect_pos(const SkRect& rect, int idx, const SkMatrix& matrix,
+ SkPoint* verts) {
+ verts[idx] = SkPoint::Make(rect.fLeft, rect.fTop);
+ verts[idx + 1] = SkPoint::Make(rect.fLeft, rect.fBottom);
+ verts[idx + 2] = SkPoint::Make(rect.fRight, rect.fBottom);
+ verts[idx + 3] = SkPoint::Make(rect.fRight, rect.fTop);
+ matrix.mapPoints(&verts[idx], 4);
+}
+
+
+/**
+ * An GrGeometryProcessor that renders a dashed line.
+ * This GrGeometryProcessor is meant for dashed lines that only have a single on/off interval pair.
+ * Bounding geometry is rendered and the effect computes coverage based on the fragment's
+ * position relative to the dashed line.
+ */
+static sk_sp<GrGeometryProcessor> make_dash_gp(GrColor,
+ AAMode aaMode,
+ DashCap cap,
+ const SkMatrix& localMatrix,
+ bool usesLocalCoords);
+
+class DashBatch : public GrVertexBatch {
+public:
+ DEFINE_BATCH_CLASS_ID
+ struct Geometry {
+ SkMatrix fViewMatrix;
+ SkMatrix fSrcRotInv;
+ SkPoint fPtsRot[2];
+ SkScalar fSrcStrokeWidth;
+ SkScalar fPhase;
+ SkScalar fIntervals[2];
+ SkScalar fParallelScale;
+ SkScalar fPerpendicularScale;
+ GrColor fColor;
+ };
+
+ static GrDrawBatch* Create(const Geometry& geometry, SkPaint::Cap cap, AAMode aaMode,
+ bool fullDash) {
+ return new DashBatch(geometry, cap, aaMode, fullDash);
+ }
+
+ const char* name() const override { return "DashBatch"; }
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ color->setKnownFourComponents(fGeoData[0].fColor);
+ coverage->setUnknownSingleComponent();
+ }
+
+ SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
+
+private:
+ DashBatch(const Geometry& geometry, SkPaint::Cap cap, AAMode aaMode, bool fullDash)
+ : INHERITED(ClassID()) {
+ fGeoData.push_back(geometry);
+
+ fBatch.fAAMode = aaMode;
+ fBatch.fCap = cap;
+ fBatch.fFullDash = fullDash;
+
+ // compute bounds
+ SkScalar halfStrokeWidth = 0.5f * geometry.fSrcStrokeWidth;
+ SkScalar xBloat = SkPaint::kButt_Cap == cap ? 0 : halfStrokeWidth;
+ SkRect bounds;
+ bounds.set(geometry.fPtsRot[0], geometry.fPtsRot[1]);
+ bounds.outset(xBloat, halfStrokeWidth);
+
+ // Note, we actually create the combined matrix here, and save the work
+ SkMatrix& combinedMatrix = fGeoData[0].fSrcRotInv;
+ combinedMatrix.postConcat(geometry.fViewMatrix);
+
+ IsZeroArea zeroArea = geometry.fSrcStrokeWidth ? IsZeroArea::kNo : IsZeroArea::kYes;
+ HasAABloat aaBloat = (aaMode == AAMode::kNone) ? HasAABloat ::kNo : HasAABloat::kYes;
+ this->setTransformedBounds(bounds, combinedMatrix, aaBloat, zeroArea);
+ }
+
+ void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
+ // Handle any color overrides
+ if (!overrides.readsColor()) {
+ fGeoData[0].fColor = GrColor_ILLEGAL;
+ }
+ overrides.getOverrideColorIfSet(&fGeoData[0].fColor);
+
+ // setup batch properties
+ fBatch.fColorIgnored = !overrides.readsColor();
+ fBatch.fColor = fGeoData[0].fColor;
+ fBatch.fUsesLocalCoords = overrides.readsLocalCoords();
+ fBatch.fCoverageIgnored = !overrides.readsCoverage();
+ }
+
+ struct DashDraw {
+ DashDraw(const Geometry& geo) {
+ memcpy(fPtsRot, geo.fPtsRot, sizeof(geo.fPtsRot));
+ memcpy(fIntervals, geo.fIntervals, sizeof(geo.fIntervals));
+ fPhase = geo.fPhase;
+ }
+ SkPoint fPtsRot[2];
+ SkScalar fIntervals[2];
+ SkScalar fPhase;
+ SkScalar fStartOffset;
+ SkScalar fStrokeWidth;
+ SkScalar fLineLength;
+ SkScalar fHalfDevStroke;
+ SkScalar fDevBloatX;
+ SkScalar fDevBloatY;
+ bool fLineDone;
+ bool fHasStartRect;
+ bool fHasEndRect;
+ };
+
+ void onPrepareDraws(Target* target) const override {
+ int instanceCount = fGeoData.count();
+ SkPaint::Cap cap = this->cap();
+ bool isRoundCap = SkPaint::kRound_Cap == cap;
+ DashCap capType = isRoundCap ? kRound_DashCap : kNonRound_DashCap;
+
+ sk_sp<GrGeometryProcessor> gp;
+ if (this->fullDash()) {
+ gp = make_dash_gp(this->color(), this->aaMode(), capType, this->viewMatrix(),
+ this->usesLocalCoords());
+ } else {
+ // Set up the vertex data for the line and start/end dashes
+ using namespace GrDefaultGeoProcFactory;
+ Color color(this->color());
+ Coverage coverage(this->coverageIgnored() ? Coverage::kNone_Type :
+ Coverage::kSolid_Type);
+ LocalCoords localCoords(this->usesLocalCoords() ? LocalCoords::kUsePosition_Type :
+ LocalCoords::kUnused_Type);
+ gp = MakeForDeviceSpace(color, coverage, localCoords, this->viewMatrix());
+ }
+
+ if (!gp) {
+ SkDebugf("Could not create GrGeometryProcessor\n");
+ return;
+ }
+
+ // useAA here means Edge AA or MSAA
+ bool useAA = this->aaMode() != AAMode::kNone;
+ bool fullDash = this->fullDash();
+
+ // We do two passes over all of the dashes. First we setup the start, end, and bounds,
+ // rectangles. We preserve all of this work in the rects / draws arrays below. Then we
+ // iterate again over these decomposed dashes to generate vertices
+ static const int kNumStackDashes = 128;
+ SkSTArray<kNumStackDashes, SkRect, true> rects;
+ SkSTArray<kNumStackDashes, DashDraw, true> draws;
+
+ int totalRectCount = 0;
+ int rectOffset = 0;
+ rects.push_back_n(3 * instanceCount);
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& args = fGeoData[i];
+
+ DashDraw& draw = draws.push_back(args);
+
+ bool hasCap = SkPaint::kButt_Cap != cap && 0 != args.fSrcStrokeWidth;
+
+ // We always want to at least stroke out half a pixel on each side in device space
+ // so 0.5f / perpScale gives us this min in src space
+ SkScalar halfSrcStroke =
+ SkMaxScalar(args.fSrcStrokeWidth * 0.5f, 0.5f / args.fPerpendicularScale);
+
+ SkScalar strokeAdj;
+ if (!hasCap) {
+ strokeAdj = 0.f;
+ } else {
+ strokeAdj = halfSrcStroke;
+ }
+
+ SkScalar startAdj = 0;
+
+ bool lineDone = false;
+
+ // Too simplify the algorithm, we always push back rects for start and end rect.
+ // Otherwise we'd have to track start / end rects for each individual geometry
+ SkRect& bounds = rects[rectOffset++];
+ SkRect& startRect = rects[rectOffset++];
+ SkRect& endRect = rects[rectOffset++];
+
+ bool hasStartRect = false;
+ // If we are using AA, check to see if we are drawing a partial dash at the start. If so
+ // draw it separately here and adjust our start point accordingly
+ if (useAA) {
+ if (draw.fPhase > 0 && draw.fPhase < draw.fIntervals[0]) {
+ SkPoint startPts[2];
+ startPts[0] = draw.fPtsRot[0];
+ startPts[1].fY = startPts[0].fY;
+ startPts[1].fX = SkMinScalar(startPts[0].fX + draw.fIntervals[0] - draw.fPhase,
+ draw.fPtsRot[1].fX);
+ startRect.set(startPts, 2);
+ startRect.outset(strokeAdj, halfSrcStroke);
+
+ hasStartRect = true;
+ startAdj = draw.fIntervals[0] + draw.fIntervals[1] - draw.fPhase;
+ }
+ }
+
+ // adjustments for start and end of bounding rect so we only draw dash intervals
+ // contained in the original line segment.
+ startAdj += calc_start_adjustment(draw.fIntervals, draw.fPhase);
+ if (startAdj != 0) {
+ draw.fPtsRot[0].fX += startAdj;
+ draw.fPhase = 0;
+ }
+ SkScalar endingInterval = 0;
+ SkScalar endAdj = calc_end_adjustment(draw.fIntervals, draw.fPtsRot, draw.fPhase,
+ &endingInterval);
+ draw.fPtsRot[1].fX -= endAdj;
+ if (draw.fPtsRot[0].fX >= draw.fPtsRot[1].fX) {
+ lineDone = true;
+ }
+
+ bool hasEndRect = false;
+ // If we are using AA, check to see if we are drawing a partial dash at then end. If so
+ // draw it separately here and adjust our end point accordingly
+ if (useAA && !lineDone) {
+ // If we adjusted the end then we will not be drawing a partial dash at the end.
+ // If we didn't adjust the end point then we just need to make sure the ending
+ // dash isn't a full dash
+ if (0 == endAdj && endingInterval != draw.fIntervals[0]) {
+ SkPoint endPts[2];
+ endPts[1] = draw.fPtsRot[1];
+ endPts[0].fY = endPts[1].fY;
+ endPts[0].fX = endPts[1].fX - endingInterval;
+
+ endRect.set(endPts, 2);
+ endRect.outset(strokeAdj, halfSrcStroke);
+
+ hasEndRect = true;
+ endAdj = endingInterval + draw.fIntervals[1];
+
+ draw.fPtsRot[1].fX -= endAdj;
+ if (draw.fPtsRot[0].fX >= draw.fPtsRot[1].fX) {
+ lineDone = true;
+ }
+ }
+ }
+
+ if (startAdj != 0) {
+ draw.fPhase = 0;
+ }
+
+ // Change the dashing info from src space into device space
+ SkScalar* devIntervals = draw.fIntervals;
+ devIntervals[0] = draw.fIntervals[0] * args.fParallelScale;
+ devIntervals[1] = draw.fIntervals[1] * args.fParallelScale;
+ SkScalar devPhase = draw.fPhase * args.fParallelScale;
+ SkScalar strokeWidth = args.fSrcStrokeWidth * args.fPerpendicularScale;
+
+ if ((strokeWidth < 1.f && useAA) || 0.f == strokeWidth) {
+ strokeWidth = 1.f;
+ }
+
+ SkScalar halfDevStroke = strokeWidth * 0.5f;
+
+ if (SkPaint::kSquare_Cap == cap && 0 != args.fSrcStrokeWidth) {
+ // add cap to on interval and remove from off interval
+ devIntervals[0] += strokeWidth;
+ devIntervals[1] -= strokeWidth;
+ }
+ SkScalar startOffset = devIntervals[1] * 0.5f + devPhase;
+
+ // For EdgeAA, we bloat in X & Y for both square and round caps.
+ // For MSAA, we don't bloat at all for square caps, and bloat in Y only for round caps.
+ SkScalar devBloatX = this->aaMode() == AAMode::kCoverage ? 0.5f : 0.0f;
+ SkScalar devBloatY;
+ if (SkPaint::kRound_Cap == cap && this->aaMode() == AAMode::kCoverageWithMSAA) {
+ devBloatY = 0.5f;
+ } else {
+ devBloatY = devBloatX;
+ }
+
+ SkScalar bloatX = devBloatX / args.fParallelScale;
+ SkScalar bloatY = devBloatY / args.fPerpendicularScale;
+
+ if (devIntervals[1] <= 0.f && useAA) {
+ // Case when we end up drawing a solid AA rect
+ // Reset the start rect to draw this single solid rect
+ // but it requires to upload a new intervals uniform so we can mimic
+ // one giant dash
+ draw.fPtsRot[0].fX -= hasStartRect ? startAdj : 0;
+ draw.fPtsRot[1].fX += hasEndRect ? endAdj : 0;
+ startRect.set(draw.fPtsRot, 2);
+ startRect.outset(strokeAdj, halfSrcStroke);
+ hasStartRect = true;
+ hasEndRect = false;
+ lineDone = true;
+
+ SkPoint devicePts[2];
+ args.fViewMatrix.mapPoints(devicePts, draw.fPtsRot, 2);
+ SkScalar lineLength = SkPoint::Distance(devicePts[0], devicePts[1]);
+ if (hasCap) {
+ lineLength += 2.f * halfDevStroke;
+ }
+ devIntervals[0] = lineLength;
+ }
+
+ totalRectCount += !lineDone ? 1 : 0;
+ totalRectCount += hasStartRect ? 1 : 0;
+ totalRectCount += hasEndRect ? 1 : 0;
+
+ if (SkPaint::kRound_Cap == cap && 0 != args.fSrcStrokeWidth) {
+ // need to adjust this for round caps to correctly set the dashPos attrib on
+ // vertices
+ startOffset -= halfDevStroke;
+ }
+
+ if (!lineDone) {
+ SkPoint devicePts[2];
+ args.fViewMatrix.mapPoints(devicePts, draw.fPtsRot, 2);
+ draw.fLineLength = SkPoint::Distance(devicePts[0], devicePts[1]);
+ if (hasCap) {
+ draw.fLineLength += 2.f * halfDevStroke;
+ }
+
+ bounds.set(draw.fPtsRot[0].fX, draw.fPtsRot[0].fY,
+ draw.fPtsRot[1].fX, draw.fPtsRot[1].fY);
+ bounds.outset(bloatX + strokeAdj, bloatY + halfSrcStroke);
+ }
+
+ if (hasStartRect) {
+ SkASSERT(useAA); // so that we know bloatX and bloatY have been set
+ startRect.outset(bloatX, bloatY);
+ }
+
+ if (hasEndRect) {
+ SkASSERT(useAA); // so that we know bloatX and bloatY have been set
+ endRect.outset(bloatX, bloatY);
+ }
+
+ draw.fStartOffset = startOffset;
+ draw.fDevBloatX = devBloatX;
+ draw.fDevBloatY = devBloatY;
+ draw.fHalfDevStroke = halfDevStroke;
+ draw.fStrokeWidth = strokeWidth;
+ draw.fHasStartRect = hasStartRect;
+ draw.fLineDone = lineDone;
+ draw.fHasEndRect = hasEndRect;
+ }
+
+ if (!totalRectCount) {
+ return;
+ }
+
+ QuadHelper helper;
+ void* vertices = helper.init(target, gp->getVertexStride(), totalRectCount);
+ if (!vertices) {
+ return;
+ }
+
+ int curVIdx = 0;
+ int rectIndex = 0;
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& geom = fGeoData[i];
+
+ if (!draws[i].fLineDone) {
+ if (fullDash) {
+ setup_dashed_rect(rects[rectIndex], vertices, curVIdx, geom.fSrcRotInv,
+ draws[i].fStartOffset, draws[i].fDevBloatX,
+ draws[i].fDevBloatY, draws[i].fLineLength,
+ draws[i].fHalfDevStroke, draws[i].fIntervals[0],
+ draws[i].fIntervals[1], draws[i].fStrokeWidth,
+ capType, gp->getVertexStride());
+ } else {
+ SkPoint* verts = reinterpret_cast<SkPoint*>(vertices);
+ SkASSERT(gp->getVertexStride() == sizeof(SkPoint));
+ setup_dashed_rect_pos(rects[rectIndex], curVIdx, geom.fSrcRotInv, verts);
+ }
+ curVIdx += 4;
+ }
+ rectIndex++;
+
+ if (draws[i].fHasStartRect) {
+ if (fullDash) {
+ setup_dashed_rect(rects[rectIndex], vertices, curVIdx, geom.fSrcRotInv,
+ draws[i].fStartOffset, draws[i].fDevBloatX,
+ draws[i].fDevBloatY, draws[i].fIntervals[0],
+ draws[i].fHalfDevStroke, draws[i].fIntervals[0],
+ draws[i].fIntervals[1], draws[i].fStrokeWidth, capType,
+ gp->getVertexStride());
+ } else {
+ SkPoint* verts = reinterpret_cast<SkPoint*>(vertices);
+ SkASSERT(gp->getVertexStride() == sizeof(SkPoint));
+ setup_dashed_rect_pos(rects[rectIndex], curVIdx, geom.fSrcRotInv, verts);
+ }
+ curVIdx += 4;
+ }
+ rectIndex++;
+
+ if (draws[i].fHasEndRect) {
+ if (fullDash) {
+ setup_dashed_rect(rects[rectIndex], vertices, curVIdx, geom.fSrcRotInv,
+ draws[i].fStartOffset, draws[i].fDevBloatX,
+ draws[i].fDevBloatY, draws[i].fIntervals[0],
+ draws[i].fHalfDevStroke, draws[i].fIntervals[0],
+ draws[i].fIntervals[1], draws[i].fStrokeWidth, capType,
+ gp->getVertexStride());
+ } else {
+ SkPoint* verts = reinterpret_cast<SkPoint*>(vertices);
+ SkASSERT(gp->getVertexStride() == sizeof(SkPoint));
+ setup_dashed_rect_pos(rects[rectIndex], curVIdx, geom.fSrcRotInv, verts);
+ }
+ curVIdx += 4;
+ }
+ rectIndex++;
+ }
+ SkASSERT(0 == (curVIdx % 4) && (curVIdx / 4) == totalRectCount);
+ helper.recordDraw(target, gp.get());
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ DashBatch* that = t->cast<DashBatch>();
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ if (this->aaMode() != that->aaMode()) {
+ return false;
+ }
+
+ if (this->fullDash() != that->fullDash()) {
+ return false;
+ }
+
+ if (this->cap() != that->cap()) {
+ return false;
+ }
+
+ // TODO vertex color
+ if (this->color() != that->color()) {
+ return false;
+ }
+
+ SkASSERT(this->usesLocalCoords() == that->usesLocalCoords());
+ if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
+ }
+
+ fGeoData.push_back_n(that->geoData()->count(), that->geoData()->begin());
+ this->joinBounds(*that);
+ return true;
+ }
+
+ GrColor color() const { return fBatch.fColor; }
+ bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
+ const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; }
+ AAMode aaMode() const { return fBatch.fAAMode; }
+ bool fullDash() const { return fBatch.fFullDash; }
+ SkPaint::Cap cap() const { return fBatch.fCap; }
+ bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
+
+ struct BatchTracker {
+ GrColor fColor;
+ bool fUsesLocalCoords;
+ bool fColorIgnored;
+ bool fCoverageIgnored;
+ SkPaint::Cap fCap;
+ AAMode fAAMode;
+ bool fFullDash;
+ };
+
+ static const int kVertsPerDash = 4;
+ static const int kIndicesPerDash = 6;
+
+ BatchTracker fBatch;
+ SkSTArray<1, Geometry, true> fGeoData;
+
+ typedef GrVertexBatch INHERITED;
+};
+
+GrDrawBatch* GrDashingEffect::CreateDashLineBatch(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkPoint pts[2],
+ AAMode aaMode,
+ const GrStyle& style) {
+ SkASSERT(GrDashingEffect::CanDrawDashLine(pts, style, viewMatrix));
+ const SkScalar* intervals = style.dashIntervals();
+ SkScalar phase = style.dashPhase();
+
+ SkPaint::Cap cap = style.strokeRec().getCap();
+
+ DashBatch::Geometry geometry;
+ geometry.fSrcStrokeWidth = style.strokeRec().getWidth();
+
+ // the phase should be normalized to be [0, sum of all intervals)
+ SkASSERT(phase >= 0 && phase < intervals[0] + intervals[1]);
+
+ // Rotate the src pts so they are aligned horizontally with pts[0].fX < pts[1].fX
+ if (pts[0].fY != pts[1].fY || pts[0].fX > pts[1].fX) {
+ SkMatrix rotMatrix;
+ align_to_x_axis(pts, &rotMatrix, geometry.fPtsRot);
+ if(!rotMatrix.invert(&geometry.fSrcRotInv)) {
+ SkDebugf("Failed to create invertible rotation matrix!\n");
+ return nullptr;
+ }
+ } else {
+ geometry.fSrcRotInv.reset();
+ memcpy(geometry.fPtsRot, pts, 2 * sizeof(SkPoint));
+ }
+
+ // Scale corrections of intervals and stroke from view matrix
+ calc_dash_scaling(&geometry.fParallelScale, &geometry.fPerpendicularScale, viewMatrix,
+ geometry.fPtsRot);
+
+ SkScalar offInterval = intervals[1] * geometry.fParallelScale;
+ SkScalar strokeWidth = geometry.fSrcStrokeWidth * geometry.fPerpendicularScale;
+
+ if (SkPaint::kSquare_Cap == cap && 0 != geometry.fSrcStrokeWidth) {
+ // add cap to on interveal and remove from off interval
+ offInterval -= strokeWidth;
+ }
+
+ // TODO we can do a real rect call if not using fulldash(ie no off interval, not using AA)
+ bool fullDash = offInterval > 0.f || aaMode != AAMode::kNone;
+
+ geometry.fColor = color;
+ geometry.fViewMatrix = viewMatrix;
+ geometry.fPhase = phase;
+ geometry.fIntervals[0] = intervals[0];
+ geometry.fIntervals[1] = intervals[1];
+
+ return DashBatch::Create(geometry, cap, aaMode, fullDash);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GLDashingCircleEffect;
+
+/*
+ * This effect will draw a dotted line (defined as a dashed lined with round caps and no on
+ * interval). The radius of the dots is given by the strokeWidth and the spacing by the DashInfo.
+ * Both of the previous two parameters are in device space. This effect also requires the setting of
+ * a vec2 vertex attribute for the the four corners of the bounding rect. This attribute is the
+ * "dash position" of each vertex. In other words it is the vertex coords (in device space) if we
+ * transform the line to be horizontal, with the start of line at the origin then shifted to the
+ * right by half the off interval. The line then goes in the positive x direction.
+ */
+class DashingCircleEffect : public GrGeometryProcessor {
+public:
+ typedef SkPathEffect::DashInfo DashInfo;
+
+ static sk_sp<GrGeometryProcessor> Make(GrColor,
+ AAMode aaMode,
+ const SkMatrix& localMatrix,
+ bool usesLocalCoords);
+
+ const char* name() const override { return "DashingCircleEffect"; }
+
+ const Attribute* inPosition() const { return fInPosition; }
+
+ const Attribute* inDashParams() const { return fInDashParams; }
+
+ const Attribute* inCircleParams() const { return fInCircleParams; }
+
+ AAMode aaMode() const { return fAAMode; }
+
+ GrColor color() const { return fColor; }
+
+ bool colorIgnored() const { return GrColor_ILLEGAL == fColor; }
+
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+
+ void getGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder* b) const override;
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override;
+
+private:
+ DashingCircleEffect(GrColor, AAMode aaMode, const SkMatrix& localMatrix,
+ bool usesLocalCoords);
+
+ GrColor fColor;
+ SkMatrix fLocalMatrix;
+ bool fUsesLocalCoords;
+ AAMode fAAMode;
+ const Attribute* fInPosition;
+ const Attribute* fInDashParams;
+ const Attribute* fInCircleParams;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GLDashingCircleEffect : public GrGLSLGeometryProcessor {
+public:
+ GLDashingCircleEffect();
+
+ void onEmitCode(EmitArgs&, GrGPArgs*) override;
+
+ static inline void GenKey(const GrGeometryProcessor&,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder*);
+
+ void setData(const GrGLSLProgramDataManager&, const GrPrimitiveProcessor&,
+ FPCoordTransformIter&& transformIter) override;
+private:
+ UniformHandle fParamUniform;
+ UniformHandle fColorUniform;
+ GrColor fColor;
+ SkScalar fPrevRadius;
+ SkScalar fPrevCenterX;
+ SkScalar fPrevIntervalLength;
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+GLDashingCircleEffect::GLDashingCircleEffect() {
+ fColor = GrColor_ILLEGAL;
+ fPrevRadius = SK_ScalarMin;
+ fPrevCenterX = SK_ScalarMin;
+ fPrevIntervalLength = SK_ScalarMax;
+}
+
+void GLDashingCircleEffect::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) {
+ const DashingCircleEffect& dce = args.fGP.cast<DashingCircleEffect>();
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(dce);
+
+ // XY are dashPos, Z is dashInterval
+ GrGLSLVertToFrag dashParams(kVec3f_GrSLType);
+ varyingHandler->addVarying("DashParam", &dashParams);
+ vertBuilder->codeAppendf("%s = %s;", dashParams.vsOut(), dce.inDashParams()->fName);
+
+ // x refers to circle radius - 0.5, y refers to cicle's center x coord
+ GrGLSLVertToFrag circleParams(kVec2f_GrSLType);
+ varyingHandler->addVarying("CircleParams", &circleParams);
+ vertBuilder->codeAppendf("%s = %s;", circleParams.vsOut(), dce.inCircleParams()->fName);
+
+ GrGLSLPPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ // Setup pass through color
+ if (!dce.colorIgnored()) {
+ this->setupUniformColor(fragBuilder, uniformHandler, args.fOutputColor, &fColorUniform);
+ }
+
+ // Setup position
+ this->setupPosition(vertBuilder, gpArgs, dce.inPosition()->fName);
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ gpArgs->fPositionVar,
+ dce.inPosition()->fName,
+ dce.localMatrix(),
+ args.fFPCoordTransformHandler);
+
+ // transforms all points so that we can compare them to our test circle
+ fragBuilder->codeAppendf("float xShifted = %s.x - floor(%s.x / %s.z) * %s.z;",
+ dashParams.fsIn(), dashParams.fsIn(), dashParams.fsIn(),
+ dashParams.fsIn());
+ fragBuilder->codeAppendf("vec2 fragPosShifted = vec2(xShifted, %s.y);", dashParams.fsIn());
+ fragBuilder->codeAppendf("vec2 center = vec2(%s.y, 0.0);", circleParams.fsIn());
+ fragBuilder->codeAppend("float dist = length(center - fragPosShifted);");
+ if (dce.aaMode() != AAMode::kNone) {
+ fragBuilder->codeAppendf("float diff = dist - %s.x;", circleParams.fsIn());
+ fragBuilder->codeAppend("diff = 1.0 - diff;");
+ fragBuilder->codeAppend("float alpha = clamp(diff, 0.0, 1.0);");
+ } else {
+ fragBuilder->codeAppendf("float alpha = 1.0;");
+ fragBuilder->codeAppendf("alpha *= dist < %s.x + 0.5 ? 1.0 : 0.0;", circleParams.fsIn());
+ }
+ fragBuilder->codeAppendf("%s = vec4(alpha);", args.fOutputCoverage);
+}
+
+void GLDashingCircleEffect::setData(const GrGLSLProgramDataManager& pdman,
+ const GrPrimitiveProcessor& processor,
+ FPCoordTransformIter&& transformIter) {
+ const DashingCircleEffect& dce = processor.cast<DashingCircleEffect>();
+ if (dce.color() != fColor) {
+ float c[4];
+ GrColorToRGBAFloat(dce.color(), c);
+ pdman.set4fv(fColorUniform, 1, c);
+ fColor = dce.color();
+ }
+ this->setTransformDataHelper(dce.localMatrix(), pdman, &transformIter);
+}
+
+void GLDashingCircleEffect::GenKey(const GrGeometryProcessor& gp,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const DashingCircleEffect& dce = gp.cast<DashingCircleEffect>();
+ uint32_t key = 0;
+ key |= dce.usesLocalCoords() && dce.localMatrix().hasPerspective() ? 0x1 : 0x0;
+ key |= dce.colorIgnored() ? 0x2 : 0x0;
+ key |= static_cast<uint32_t>(dce.aaMode()) << 8;
+ b->add32(key);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrGeometryProcessor> DashingCircleEffect::Make(GrColor color,
+ AAMode aaMode,
+ const SkMatrix& localMatrix,
+ bool usesLocalCoords) {
+ return sk_sp<GrGeometryProcessor>(
+ new DashingCircleEffect(color, aaMode, localMatrix, usesLocalCoords));
+}
+
+void DashingCircleEffect::getGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GLDashingCircleEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLPrimitiveProcessor* DashingCircleEffect::createGLSLInstance(const GrGLSLCaps&) const {
+ return new GLDashingCircleEffect();
+}
+
+DashingCircleEffect::DashingCircleEffect(GrColor color,
+ AAMode aaMode,
+ const SkMatrix& localMatrix,
+ bool usesLocalCoords)
+ : fColor(color)
+ , fLocalMatrix(localMatrix)
+ , fUsesLocalCoords(usesLocalCoords)
+ , fAAMode(aaMode) {
+ this->initClassID<DashingCircleEffect>();
+ fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType);
+ fInDashParams = &this->addVertexAttrib("inDashParams", kVec3f_GrVertexAttribType);
+ fInCircleParams = &this->addVertexAttrib("inCircleParams", kVec2f_GrVertexAttribType);
+}
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(DashingCircleEffect);
+
+sk_sp<GrGeometryProcessor> DashingCircleEffect::TestCreate(GrProcessorTestData* d) {
+ AAMode aaMode = static_cast<AAMode>(d->fRandom->nextULessThan(GrDashingEffect::kAAModeCnt));
+ return DashingCircleEffect::Make(GrRandomColor(d->fRandom),
+ aaMode, GrTest::TestMatrix(d->fRandom),
+ d->fRandom->nextBool());
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GLDashingLineEffect;
+
+/*
+ * This effect will draw a dashed line. The width of the dash is given by the strokeWidth and the
+ * length and spacing by the DashInfo. Both of the previous two parameters are in device space.
+ * This effect also requires the setting of a vec2 vertex attribute for the the four corners of the
+ * bounding rect. This attribute is the "dash position" of each vertex. In other words it is the
+ * vertex coords (in device space) if we transform the line to be horizontal, with the start of
+ * line at the origin then shifted to the right by half the off interval. The line then goes in the
+ * positive x direction.
+ */
+class DashingLineEffect : public GrGeometryProcessor {
+public:
+ typedef SkPathEffect::DashInfo DashInfo;
+
+ static sk_sp<GrGeometryProcessor> Make(GrColor,
+ AAMode aaMode,
+ const SkMatrix& localMatrix,
+ bool usesLocalCoords);
+
+ const char* name() const override { return "DashingEffect"; }
+
+ const Attribute* inPosition() const { return fInPosition; }
+
+ const Attribute* inDashParams() const { return fInDashParams; }
+
+ const Attribute* inRectParams() const { return fInRectParams; }
+
+ AAMode aaMode() const { return fAAMode; }
+
+ GrColor color() const { return fColor; }
+
+ bool colorIgnored() const { return GrColor_ILLEGAL == fColor; }
+
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+
+ void getGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override;
+
+private:
+ DashingLineEffect(GrColor, AAMode aaMode, const SkMatrix& localMatrix,
+ bool usesLocalCoords);
+
+ GrColor fColor;
+ SkMatrix fLocalMatrix;
+ bool fUsesLocalCoords;
+ AAMode fAAMode;
+ const Attribute* fInPosition;
+ const Attribute* fInDashParams;
+ const Attribute* fInRectParams;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GLDashingLineEffect : public GrGLSLGeometryProcessor {
+public:
+ GLDashingLineEffect();
+
+ void onEmitCode(EmitArgs&, GrGPArgs*) override;
+
+ static inline void GenKey(const GrGeometryProcessor&,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder*);
+
+ void setData(const GrGLSLProgramDataManager&, const GrPrimitiveProcessor&,
+ FPCoordTransformIter&& iter) override;
+
+private:
+ GrColor fColor;
+ UniformHandle fColorUniform;
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+GLDashingLineEffect::GLDashingLineEffect() {
+ fColor = GrColor_ILLEGAL;
+}
+
+void GLDashingLineEffect::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) {
+ const DashingLineEffect& de = args.fGP.cast<DashingLineEffect>();
+
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(de);
+
+ // XY refers to dashPos, Z is the dash interval length
+ GrGLSLVertToFrag inDashParams(kVec3f_GrSLType);
+ varyingHandler->addVarying("DashParams", &inDashParams, GrSLPrecision::kHigh_GrSLPrecision);
+ vertBuilder->codeAppendf("%s = %s;", inDashParams.vsOut(), de.inDashParams()->fName);
+
+ // The rect uniform's xyzw refer to (left + 0.5, top + 0.5, right - 0.5, bottom - 0.5),
+ // respectively.
+ GrGLSLVertToFrag inRectParams(kVec4f_GrSLType);
+ varyingHandler->addVarying("RectParams", &inRectParams, GrSLPrecision::kHigh_GrSLPrecision);
+ vertBuilder->codeAppendf("%s = %s;", inRectParams.vsOut(), de.inRectParams()->fName);
+
+ GrGLSLPPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ // Setup pass through color
+ if (!de.colorIgnored()) {
+ this->setupUniformColor(fragBuilder, uniformHandler, args.fOutputColor, &fColorUniform);
+ }
+
+ // Setup position
+ this->setupPosition(vertBuilder, gpArgs, de.inPosition()->fName);
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ gpArgs->fPositionVar,
+ de.inPosition()->fName,
+ de.localMatrix(),
+ args.fFPCoordTransformHandler);
+
+ // transforms all points so that we can compare them to our test rect
+ fragBuilder->codeAppendf("float xShifted = %s.x - floor(%s.x / %s.z) * %s.z;",
+ inDashParams.fsIn(), inDashParams.fsIn(), inDashParams.fsIn(),
+ inDashParams.fsIn());
+ fragBuilder->codeAppendf("vec2 fragPosShifted = vec2(xShifted, %s.y);", inDashParams.fsIn());
+ if (de.aaMode() == AAMode::kCoverage) {
+ // The amount of coverage removed in x and y by the edges is computed as a pair of negative
+ // numbers, xSub and ySub.
+ fragBuilder->codeAppend("float xSub, ySub;");
+ fragBuilder->codeAppendf("xSub = min(fragPosShifted.x - %s.x, 0.0);", inRectParams.fsIn());
+ fragBuilder->codeAppendf("xSub += min(%s.z - fragPosShifted.x, 0.0);", inRectParams.fsIn());
+ fragBuilder->codeAppendf("ySub = min(fragPosShifted.y - %s.y, 0.0);", inRectParams.fsIn());
+ fragBuilder->codeAppendf("ySub += min(%s.w - fragPosShifted.y, 0.0);", inRectParams.fsIn());
+ // Now compute coverage in x and y and multiply them to get the fraction of the pixel
+ // covered.
+ fragBuilder->codeAppendf(
+ "float alpha = (1.0 + max(xSub, -1.0)) * (1.0 + max(ySub, -1.0));");
+ } else if (de.aaMode() == AAMode::kCoverageWithMSAA) {
+ // For MSAA, we don't modulate the alpha by the Y distance, since MSAA coverage will handle
+ // AA on the the top and bottom edges. The shader is only responsible for intra-dash alpha.
+ fragBuilder->codeAppend("float xSub;");
+ fragBuilder->codeAppendf("xSub = min(fragPosShifted.x - %s.x, 0.0);", inRectParams.fsIn());
+ fragBuilder->codeAppendf("xSub += min(%s.z - fragPosShifted.x, 0.0);", inRectParams.fsIn());
+ // Now compute coverage in x to get the fraction of the pixel covered.
+ fragBuilder->codeAppendf("float alpha = (1.0 + max(xSub, -1.0));");
+ } else {
+ // Assuming the bounding geometry is tight so no need to check y values
+ fragBuilder->codeAppendf("float alpha = 1.0;");
+ fragBuilder->codeAppendf("alpha *= (fragPosShifted.x - %s.x) > -0.5 ? 1.0 : 0.0;",
+ inRectParams.fsIn());
+ fragBuilder->codeAppendf("alpha *= (%s.z - fragPosShifted.x) >= -0.5 ? 1.0 : 0.0;",
+ inRectParams.fsIn());
+ }
+ fragBuilder->codeAppendf("%s = vec4(alpha);", args.fOutputCoverage);
+}
+
+void GLDashingLineEffect::setData(const GrGLSLProgramDataManager& pdman,
+ const GrPrimitiveProcessor& processor,
+ FPCoordTransformIter&& transformIter) {
+ const DashingLineEffect& de = processor.cast<DashingLineEffect>();
+ if (de.color() != fColor) {
+ float c[4];
+ GrColorToRGBAFloat(de.color(), c);
+ pdman.set4fv(fColorUniform, 1, c);
+ fColor = de.color();
+ }
+ this->setTransformDataHelper(de.localMatrix(), pdman, &transformIter);
+}
+
+void GLDashingLineEffect::GenKey(const GrGeometryProcessor& gp,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const DashingLineEffect& de = gp.cast<DashingLineEffect>();
+ uint32_t key = 0;
+ key |= de.usesLocalCoords() && de.localMatrix().hasPerspective() ? 0x1 : 0x0;
+ key |= de.colorIgnored() ? 0x2 : 0x0;
+ key |= static_cast<int>(de.aaMode()) << 8;
+ b->add32(key);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrGeometryProcessor> DashingLineEffect::Make(GrColor color,
+ AAMode aaMode,
+ const SkMatrix& localMatrix,
+ bool usesLocalCoords) {
+ return sk_sp<GrGeometryProcessor>(
+ new DashingLineEffect(color, aaMode, localMatrix, usesLocalCoords));
+}
+
+void DashingLineEffect::getGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GLDashingLineEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLPrimitiveProcessor* DashingLineEffect::createGLSLInstance(const GrGLSLCaps&) const {
+ return new GLDashingLineEffect();
+}
+
+DashingLineEffect::DashingLineEffect(GrColor color,
+ AAMode aaMode,
+ const SkMatrix& localMatrix,
+ bool usesLocalCoords)
+ : fColor(color)
+ , fLocalMatrix(localMatrix)
+ , fUsesLocalCoords(usesLocalCoords)
+ , fAAMode(aaMode) {
+ this->initClassID<DashingLineEffect>();
+ fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType);
+ fInDashParams = &this->addVertexAttrib("inDashParams", kVec3f_GrVertexAttribType);
+ fInRectParams = &this->addVertexAttrib("inRect", kVec4f_GrVertexAttribType);
+}
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(DashingLineEffect);
+
+sk_sp<GrGeometryProcessor> DashingLineEffect::TestCreate(GrProcessorTestData* d) {
+ AAMode aaMode = static_cast<AAMode>(d->fRandom->nextULessThan(GrDashingEffect::kAAModeCnt));
+ return DashingLineEffect::Make(GrRandomColor(d->fRandom),
+ aaMode, GrTest::TestMatrix(d->fRandom),
+ d->fRandom->nextBool());
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+static sk_sp<GrGeometryProcessor> make_dash_gp(GrColor color,
+ AAMode aaMode,
+ DashCap cap,
+ const SkMatrix& viewMatrix,
+ bool usesLocalCoords) {
+ SkMatrix invert;
+ if (usesLocalCoords && !viewMatrix.invert(&invert)) {
+ SkDebugf("Failed to invert\n");
+ return nullptr;
+ }
+
+ switch (cap) {
+ case kRound_DashCap:
+ return DashingCircleEffect::Make(color, aaMode, invert, usesLocalCoords);
+ case kNonRound_DashCap:
+ return DashingLineEffect::Make(color, aaMode, invert, usesLocalCoords);
+ }
+ return nullptr;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_TEST_UTILS
+
+DRAW_BATCH_TEST_DEFINE(DashBatch) {
+ GrColor color = GrRandomColor(random);
+ SkMatrix viewMatrix = GrTest::TestMatrixPreservesRightAngles(random);
+ AAMode aaMode = static_cast<AAMode>(random->nextULessThan(GrDashingEffect::kAAModeCnt));
+
+ // We can only dash either horizontal or vertical lines
+ SkPoint pts[2];
+ if (random->nextBool()) {
+ // vertical
+ pts[0].fX = 1.f;
+ pts[0].fY = random->nextF() * 10.f;
+ pts[1].fX = 1.f;
+ pts[1].fY = random->nextF() * 10.f;
+ } else {
+ // horizontal
+ pts[0].fX = random->nextF() * 10.f;
+ pts[0].fY = 1.f;
+ pts[1].fX = random->nextF() * 10.f;
+ pts[1].fY = 1.f;
+ }
+
+ // pick random cap
+ SkPaint::Cap cap = SkPaint::Cap(random->nextULessThan(SkPaint::kCapCount));
+
+ SkScalar intervals[2];
+
+ // We can only dash with the following intervals
+ enum Intervals {
+ kOpenOpen_Intervals ,
+ kOpenClose_Intervals,
+ kCloseOpen_Intervals,
+ };
+
+ Intervals intervalType = SkPaint::kRound_Cap ?
+ kOpenClose_Intervals :
+ Intervals(random->nextULessThan(kCloseOpen_Intervals + 1));
+ static const SkScalar kIntervalMin = 0.1f;
+ static const SkScalar kIntervalMax = 10.f;
+ switch (intervalType) {
+ case kOpenOpen_Intervals:
+ intervals[0] = random->nextRangeScalar(kIntervalMin, kIntervalMax);
+ intervals[1] = random->nextRangeScalar(kIntervalMin, kIntervalMax);
+ break;
+ case kOpenClose_Intervals:
+ intervals[0] = 0.f;
+ intervals[1] = random->nextRangeScalar(kIntervalMin, kIntervalMax);
+ break;
+ case kCloseOpen_Intervals:
+ intervals[0] = random->nextRangeScalar(kIntervalMin, kIntervalMax);
+ intervals[1] = 0.f;
+ break;
+
+ }
+
+ // phase is 0 < sum (i0, i1)
+ SkScalar phase = random->nextRangeScalar(0, intervals[0] + intervals[1]);
+
+ SkPaint p;
+ p.setStyle(SkPaint::kStroke_Style);
+ p.setStrokeWidth(SkIntToScalar(1));
+ p.setStrokeCap(cap);
+ p.setPathEffect(GrTest::TestDashPathEffect::Make(intervals, 2, phase));
+
+ GrStyle style(p);
+
+ return GrDashingEffect::CreateDashLineBatch(color, viewMatrix, pts, aaMode, style);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrDashingEffect.h b/gfx/skia/skia/src/gpu/effects/GrDashingEffect.h
new file mode 100644
index 000000000..b2d052347
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrDashingEffect.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDashingEffect_DEFINED
+#define GrDashingEffect_DEFINED
+
+#include "GrColor.h"
+#include "GrTypesPriv.h"
+#include "SkPathEffect.h"
+
+class GrClip;
+class GrDrawBatch;
+class GrStyle;
+
+namespace GrDashingEffect {
+ enum class AAMode {
+ kNone,
+ kCoverage,
+ kCoverageWithMSAA,
+ };
+ static const int kAAModeCnt = static_cast<int>(AAMode::kCoverageWithMSAA) + 1;
+
+ GrDrawBatch* CreateDashLineBatch(GrColor,
+ const SkMatrix& viewMatrix,
+ const SkPoint pts[2],
+ AAMode,
+ const GrStyle& style);
+ bool CanDrawDashLine(const SkPoint pts[2], const GrStyle& style,
+ const SkMatrix& viewMatrix);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrDisableColorXP.cpp b/gfx/skia/skia/src/gpu/effects/GrDisableColorXP.cpp
new file mode 100644
index 000000000..35009e1a0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrDisableColorXP.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "effects/GrDisableColorXP.h"
+#include "GrPipeline.h"
+#include "GrProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLXferProcessor.h"
+
+/**
+ * This xfer processor disables color writing. Thus color and coverage and ignored and no blending
+ * occurs. This XP is usful for things like stenciling.
+ */
+class DisableColorXP : public GrXferProcessor {
+public:
+ static GrXferProcessor* Create() { return new DisableColorXP; }
+
+ ~DisableColorXP() override {}
+
+ const char* name() const override { return "Disable Color"; }
+
+ GrGLSLXferProcessor* createGLSLInstance() const override;
+
+private:
+ DisableColorXP();
+
+ GrXferProcessor::OptFlags onGetOptimizations(const GrPipelineOptimizations& optimizations,
+ bool doesStencilWrite,
+ GrColor* color,
+ const GrCaps& caps) const override {
+ return GrXferProcessor::kIgnoreColor_OptFlag | GrXferProcessor::kIgnoreCoverage_OptFlag;
+ }
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ void onGetBlendInfo(GrXferProcessor::BlendInfo* blendInfo) const override;
+
+ bool onIsEqual(const GrXferProcessor& xpBase) const override {
+ return true;
+ }
+
+ typedef GrXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GLDisableColorXP : public GrGLSLXferProcessor {
+public:
+ GLDisableColorXP(const GrProcessor&) {}
+
+ ~GLDisableColorXP() override {}
+
+ static void GenKey(const GrProcessor&, const GrGLSLCaps&, GrProcessorKeyBuilder*) {}
+
+private:
+ void emitOutputsForBlendState(const EmitArgs& args) override {
+ // This emit code should be empty. However, on the nexus 6 there is a driver bug where if
+ // you do not give gl_FragColor a value, the gl context is lost and we end up drawing
+ // nothing. So this fix just sets the gl_FragColor arbitrarily to 0.
+ GrGLSLXPFragmentBuilder* fragBuilder = args.fXPFragBuilder;
+ fragBuilder->codeAppendf("%s = vec4(0);", args.fOutputPrimary);
+ }
+
+ void onSetData(const GrGLSLProgramDataManager&, const GrXferProcessor&) override {}
+
+ typedef GrGLSLXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+DisableColorXP::DisableColorXP() {
+ this->initClassID<DisableColorXP>();
+}
+
+void DisableColorXP::onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const {
+ GLDisableColorXP::GenKey(*this, caps, b);
+}
+
+GrGLSLXferProcessor* DisableColorXP::createGLSLInstance() const { return new GLDisableColorXP(*this); }
+
+void DisableColorXP::onGetBlendInfo(GrXferProcessor::BlendInfo* blendInfo) const {
+ blendInfo->fWriteColor = false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrDisableColorXPFactory::GrDisableColorXPFactory() {
+ this->initClassID<GrDisableColorXPFactory>();
+}
+
+GrXferProcessor*
+GrDisableColorXPFactory::onCreateXferProcessor(const GrCaps& caps,
+ const GrPipelineOptimizations& optimizations,
+ bool hasMixedSamples,
+ const DstTexture* dst) const {
+ SkASSERT(!optimizations.fOverrides.fUsePLSDstRead);
+ return DisableColorXP::Create();
+}
+
+GR_DEFINE_XP_FACTORY_TEST(GrDisableColorXPFactory);
+
+sk_sp<GrXPFactory> GrDisableColorXPFactory::TestCreate(GrProcessorTestData*) {
+ return GrDisableColorXPFactory::Make();
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrDisableColorXP.h b/gfx/skia/skia/src/gpu/effects/GrDisableColorXP.h
new file mode 100644
index 000000000..4aed6b671
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrDisableColorXP.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDisableColorXP_DEFINED
+#define GrDisableColorXP_DEFINED
+
+#include "GrTypes.h"
+#include "GrXferProcessor.h"
+#include "SkRefCnt.h"
+
+class GrProcOptInfo;
+
+class GrDisableColorXPFactory : public GrXPFactory {
+public:
+ static sk_sp<GrXPFactory> Make() { return sk_sp<GrXPFactory>(new GrDisableColorXPFactory); }
+
+ void getInvariantBlendedColor(const GrProcOptInfo& colorPOI,
+ GrXPFactory::InvariantBlendedColor* blendedColor) const override {
+ blendedColor->fKnownColorFlags = kNone_GrColorComponentFlags;
+ blendedColor->fWillBlendWithDst = false;
+ }
+
+private:
+ GrDisableColorXPFactory();
+
+ GrXferProcessor* onCreateXferProcessor(const GrCaps& caps,
+ const GrPipelineOptimizations& optimizations,
+ bool hasMixedSamples,
+ const DstTexture* dstTexture) const override;
+
+ bool onWillReadDstColor(const GrCaps&, const GrPipelineOptimizations&) const override {
+ return false;
+ }
+
+ bool onIsEqual(const GrXPFactory& xpfBase) const override {
+ return true;
+ }
+
+ GR_DECLARE_XP_FACTORY_TEST;
+
+ typedef GrXPFactory INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrDistanceFieldGeoProc.cpp b/gfx/skia/skia/src/gpu/effects/GrDistanceFieldGeoProc.cpp
new file mode 100644
index 000000000..5404b0c80
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrDistanceFieldGeoProc.cpp
@@ -0,0 +1,850 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrDistanceFieldGeoProc.h"
+#include "GrInvariantOutput.h"
+#include "GrTexture.h"
+
+#include "SkDistanceFieldGen.h"
+
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLGeometryProcessor.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "glsl/GrGLSLUtil.h"
+#include "glsl/GrGLSLVarying.h"
+#include "glsl/GrGLSLVertexShaderBuilder.h"
+
+// Assuming a radius of a little less than the diagonal of the fragment
+#define SK_DistanceFieldAAFactor "0.65"
+
+class GrGLDistanceFieldA8TextGeoProc : public GrGLSLGeometryProcessor {
+public:
+ GrGLDistanceFieldA8TextGeoProc()
+ : fViewMatrix(SkMatrix::InvalidMatrix())
+#ifdef SK_GAMMA_APPLY_TO_A8
+ , fDistanceAdjust(-1.0f)
+#endif
+ {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override{
+ const GrDistanceFieldA8TextGeoProc& dfTexEffect =
+ args.fGP.cast<GrDistanceFieldA8TextGeoProc>();
+ GrGLSLPPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkAssertResult(fragBuilder->enableFeature(
+ GrGLSLFragmentShaderBuilder::kStandardDerivatives_GLSLFeature));
+
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(dfTexEffect);
+
+#ifdef SK_GAMMA_APPLY_TO_A8
+ // adjust based on gamma
+ const char* distanceAdjustUniName = nullptr;
+ // width, height, 1/(3*width)
+ fDistanceAdjustUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType, kDefault_GrSLPrecision,
+ "DistanceAdjust", &distanceAdjustUniName);
+#endif
+
+ // Setup pass through color
+ if (!dfTexEffect.colorIgnored()) {
+ varyingHandler->addPassThroughAttribute(dfTexEffect.inColor(), args.fOutputColor);
+ }
+
+ // Setup position
+ this->setupPosition(vertBuilder,
+ uniformHandler,
+ gpArgs,
+ dfTexEffect.inPosition()->fName,
+ dfTexEffect.viewMatrix(),
+ &fViewMatrixUniform);
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ gpArgs->fPositionVar,
+ dfTexEffect.inPosition()->fName,
+ args.fFPCoordTransformHandler);
+
+ // add varyings
+ GrGLSLVertToFrag recipScale(kFloat_GrSLType);
+ GrGLSLVertToFrag uv(kVec2f_GrSLType);
+ bool isUniformScale = (dfTexEffect.getFlags() & kUniformScale_DistanceFieldEffectMask) ==
+ kUniformScale_DistanceFieldEffectMask;
+ bool isSimilarity = SkToBool(dfTexEffect.getFlags() & kSimilarity_DistanceFieldEffectFlag);
+ bool isGammaCorrect =
+ SkToBool(dfTexEffect.getFlags() & kGammaCorrect_DistanceFieldEffectFlag);
+ varyingHandler->addVarying("TextureCoords", &uv, kHigh_GrSLPrecision);
+ vertBuilder->codeAppendf("%s = %s;", uv.vsOut(), dfTexEffect.inTextureCoords()->fName);
+
+ // compute numbers to be hardcoded to convert texture coordinates from float to int
+ SkASSERT(dfTexEffect.numTextures() == 1);
+ GrTexture* atlas = dfTexEffect.textureAccess(0).getTexture();
+ SkASSERT(atlas && SkIsPow2(atlas->width()) && SkIsPow2(atlas->height()));
+
+ GrGLSLVertToFrag st(kVec2f_GrSLType);
+ varyingHandler->addVarying("IntTextureCoords", &st, kHigh_GrSLPrecision);
+ vertBuilder->codeAppendf("%s = vec2(%d, %d) * %s;", st.vsOut(),
+ atlas->width(), atlas->height(),
+ dfTexEffect.inTextureCoords()->fName);
+
+ // Use highp to work around aliasing issues
+ fragBuilder->appendPrecisionModifier(kHigh_GrSLPrecision);
+ fragBuilder->codeAppendf("vec2 uv = %s;\n", uv.fsIn());
+
+ fragBuilder->codeAppend("\tfloat texColor = ");
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0],
+ "uv",
+ kVec2f_GrSLType);
+ fragBuilder->codeAppend(".r;\n");
+ fragBuilder->codeAppend("\tfloat distance = "
+ SK_DistanceFieldMultiplier "*(texColor - " SK_DistanceFieldThreshold ");");
+#ifdef SK_GAMMA_APPLY_TO_A8
+ // adjust width based on gamma
+ fragBuilder->codeAppendf("distance -= %s;", distanceAdjustUniName);
+#endif
+
+ fragBuilder->codeAppend("float afwidth;");
+ if (isUniformScale) {
+ // For uniform scale, we adjust for the effect of the transformation on the distance
+ // by using the length of the gradient of the t coordinate in the y direction.
+ // We use st coordinates to ensure we're mapping 1:1 from texel space to pixel space.
+
+ // this gives us a smooth step across approximately one fragment
+#ifdef SK_VULKAN
+ fragBuilder->codeAppendf("afwidth = abs(" SK_DistanceFieldAAFactor "*dFdx(%s.x));",
+ st.fsIn());
+#else
+ // We use the y gradient because there is a bug in the Mali 400 in the x direction.
+ fragBuilder->codeAppendf("afwidth = abs(" SK_DistanceFieldAAFactor "*dFdy(%s.y));",
+ st.fsIn());
+#endif
+ } else if (isSimilarity) {
+ // For similarity transform, we adjust the effect of the transformation on the distance
+ // by using the length of the gradient of the texture coordinates. We use st coordinates
+ // to ensure we're mapping 1:1 from texel space to pixel space.
+ // We use the y gradient because there is a bug in the Mali 400 in the x direction.
+
+ // this gives us a smooth step across approximately one fragment
+#ifdef SK_VULKAN
+ fragBuilder->codeAppendf("float st_grad_len = length(dFdx(%s));", st.fsIn());
+#else
+ // We use the y gradient because there is a bug in the Mali 400 in the x direction.
+ fragBuilder->codeAppendf("float st_grad_len = length(dFdy(%s));", st.fsIn());
+#endif
+ fragBuilder->codeAppend("afwidth = abs(" SK_DistanceFieldAAFactor "*st_grad_len);");
+ } else {
+ // For general transforms, to determine the amount of correction we multiply a unit
+ // vector pointing along the SDF gradient direction by the Jacobian of the st coords
+ // (which is the inverse transform for this fragment) and take the length of the result.
+ fragBuilder->codeAppend("vec2 dist_grad = vec2(dFdx(distance), dFdy(distance));");
+ // the length of the gradient may be 0, so we need to check for this
+ // this also compensates for the Adreno, which likes to drop tiles on division by 0
+ fragBuilder->codeAppend("float dg_len2 = dot(dist_grad, dist_grad);");
+ fragBuilder->codeAppend("if (dg_len2 < 0.0001) {");
+ fragBuilder->codeAppend("dist_grad = vec2(0.7071, 0.7071);");
+ fragBuilder->codeAppend("} else {");
+ fragBuilder->codeAppend("dist_grad = dist_grad*inversesqrt(dg_len2);");
+ fragBuilder->codeAppend("}");
+
+ fragBuilder->codeAppendf("vec2 Jdx = dFdx(%s);", st.fsIn());
+ fragBuilder->codeAppendf("vec2 Jdy = dFdy(%s);", st.fsIn());
+ fragBuilder->codeAppend("vec2 grad = vec2(dist_grad.x*Jdx.x + dist_grad.y*Jdy.x,");
+ fragBuilder->codeAppend(" dist_grad.x*Jdx.y + dist_grad.y*Jdy.y);");
+
+ // this gives us a smooth step across approximately one fragment
+ fragBuilder->codeAppend("afwidth = " SK_DistanceFieldAAFactor "*length(grad);");
+ }
+
+ // The smoothstep falloff compensates for the non-linear sRGB response curve. If we are
+ // doing gamma-correct rendering (to an sRGB or F16 buffer), then we actually want distance
+ // mapped linearly to coverage, so use a linear step:
+ if (isGammaCorrect) {
+ fragBuilder->codeAppend(
+ "float val = clamp(distance + afwidth / (2.0 * afwidth), 0.0, 1.0);");
+ } else {
+ fragBuilder->codeAppend("float val = smoothstep(-afwidth, afwidth, distance);");
+ }
+
+ fragBuilder->codeAppendf("%s = vec4(val);", args.fOutputCoverage);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& proc,
+ FPCoordTransformIter&& transformIter) override {
+#ifdef SK_GAMMA_APPLY_TO_A8
+ const GrDistanceFieldA8TextGeoProc& dfTexEffect = proc.cast<GrDistanceFieldA8TextGeoProc>();
+ float distanceAdjust = dfTexEffect.getDistanceAdjust();
+ if (distanceAdjust != fDistanceAdjust) {
+ pdman.set1f(fDistanceAdjustUni, distanceAdjust);
+ fDistanceAdjust = distanceAdjust;
+ }
+#endif
+ const GrDistanceFieldA8TextGeoProc& dfa8gp = proc.cast<GrDistanceFieldA8TextGeoProc>();
+
+ if (!dfa8gp.viewMatrix().isIdentity() && !fViewMatrix.cheapEqualTo(dfa8gp.viewMatrix())) {
+ fViewMatrix = dfa8gp.viewMatrix();
+ float viewMatrix[3 * 3];
+ GrGLSLGetMatrix<3>(viewMatrix, fViewMatrix);
+ pdman.setMatrix3f(fViewMatrixUniform, viewMatrix);
+ }
+ this->setTransformDataHelper(SkMatrix::I(), pdman, &transformIter);
+ }
+
+ static inline void GenKey(const GrGeometryProcessor& gp,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrDistanceFieldA8TextGeoProc& dfTexEffect = gp.cast<GrDistanceFieldA8TextGeoProc>();
+ uint32_t key = dfTexEffect.getFlags();
+ key |= dfTexEffect.colorIgnored() << 16;
+ key |= ComputePosKey(dfTexEffect.viewMatrix()) << 25;
+ b->add32(key);
+
+ // Currently we hardcode numbers to convert atlas coordinates to normalized floating point
+ SkASSERT(gp.numTextures() == 1);
+ GrTexture* atlas = gp.textureAccess(0).getTexture();
+ SkASSERT(atlas);
+ b->add32(atlas->width());
+ b->add32(atlas->height());
+ }
+
+private:
+ SkMatrix fViewMatrix;
+ UniformHandle fViewMatrixUniform;
+#ifdef SK_GAMMA_APPLY_TO_A8
+ float fDistanceAdjust;
+ UniformHandle fDistanceAdjustUni;
+#endif
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrDistanceFieldA8TextGeoProc::GrDistanceFieldA8TextGeoProc(GrColor color,
+ const SkMatrix& viewMatrix,
+ GrTexture* texture,
+ const GrTextureParams& params,
+#ifdef SK_GAMMA_APPLY_TO_A8
+ float distanceAdjust,
+#endif
+ uint32_t flags,
+ bool usesLocalCoords)
+ : fColor(color)
+ , fViewMatrix(viewMatrix)
+ , fTextureAccess(texture, params)
+#ifdef SK_GAMMA_APPLY_TO_A8
+ , fDistanceAdjust(distanceAdjust)
+#endif
+ , fFlags(flags & kNonLCD_DistanceFieldEffectMask)
+ , fInColor(nullptr)
+ , fUsesLocalCoords(usesLocalCoords) {
+ SkASSERT(!(flags & ~kNonLCD_DistanceFieldEffectMask));
+ this->initClassID<GrDistanceFieldA8TextGeoProc>();
+ fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ fInColor = &this->addVertexAttrib("inColor", kVec4ub_GrVertexAttribType);
+ fInTextureCoords = &this->addVertexAttrib("inTextureCoords", kVec2us_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ this->addTextureAccess(&fTextureAccess);
+}
+
+void GrDistanceFieldA8TextGeoProc::getGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLDistanceFieldA8TextGeoProc::GenKey(*this, caps, b);
+}
+
+GrGLSLPrimitiveProcessor* GrDistanceFieldA8TextGeoProc::createGLSLInstance(const GrGLSLCaps&) const {
+ return new GrGLDistanceFieldA8TextGeoProc();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(GrDistanceFieldA8TextGeoProc);
+
+sk_sp<GrGeometryProcessor> GrDistanceFieldA8TextGeoProc::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx :
+ GrProcessorUnitTest::kAlphaTextureIdx;
+ static const SkShader::TileMode kTileModes[] = {
+ SkShader::kClamp_TileMode,
+ SkShader::kRepeat_TileMode,
+ SkShader::kMirror_TileMode,
+ };
+ SkShader::TileMode tileModes[] = {
+ kTileModes[d->fRandom->nextULessThan(SK_ARRAY_COUNT(kTileModes))],
+ kTileModes[d->fRandom->nextULessThan(SK_ARRAY_COUNT(kTileModes))],
+ };
+ GrTextureParams params(tileModes, d->fRandom->nextBool() ? GrTextureParams::kBilerp_FilterMode :
+ GrTextureParams::kNone_FilterMode);
+
+ uint32_t flags = 0;
+ flags |= d->fRandom->nextBool() ? kSimilarity_DistanceFieldEffectFlag : 0;
+ if (flags & kSimilarity_DistanceFieldEffectFlag) {
+ flags |= d->fRandom->nextBool() ? kScaleOnly_DistanceFieldEffectFlag : 0;
+ }
+
+ return GrDistanceFieldA8TextGeoProc::Make(GrRandomColor(d->fRandom),
+ GrTest::TestMatrix(d->fRandom),
+ d->fTextures[texIdx], params,
+#ifdef SK_GAMMA_APPLY_TO_A8
+ d->fRandom->nextF(),
+#endif
+ flags,
+ d->fRandom->nextBool());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLDistanceFieldPathGeoProc : public GrGLSLGeometryProcessor {
+public:
+ GrGLDistanceFieldPathGeoProc()
+ : fViewMatrix(SkMatrix::InvalidMatrix())
+ , fTextureSize(SkISize::Make(-1, -1)) {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override{
+ const GrDistanceFieldPathGeoProc& dfTexEffect = args.fGP.cast<GrDistanceFieldPathGeoProc>();
+
+ GrGLSLPPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkAssertResult(fragBuilder->enableFeature(
+ GrGLSLFragmentShaderBuilder::kStandardDerivatives_GLSLFeature));
+
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(dfTexEffect);
+
+ GrGLSLVertToFrag v(kVec2f_GrSLType);
+ varyingHandler->addVarying("TextureCoords", &v, kHigh_GrSLPrecision);
+
+ // setup pass through color
+ if (!dfTexEffect.colorIgnored()) {
+ varyingHandler->addPassThroughAttribute(dfTexEffect.inColor(), args.fOutputColor);
+ }
+ vertBuilder->codeAppendf("%s = %s;", v.vsOut(), dfTexEffect.inTextureCoords()->fName);
+
+ // Setup position
+ this->setupPosition(vertBuilder,
+ uniformHandler,
+ gpArgs,
+ dfTexEffect.inPosition()->fName,
+ dfTexEffect.viewMatrix(),
+ &fViewMatrixUniform);
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ gpArgs->fPositionVar,
+ dfTexEffect.inPosition()->fName,
+ args.fFPCoordTransformHandler);
+
+ const char* textureSizeUniName = nullptr;
+ fTextureSizeUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType, kDefault_GrSLPrecision,
+ "TextureSize", &textureSizeUniName);
+
+ // Use highp to work around aliasing issues
+ fragBuilder->appendPrecisionModifier(kHigh_GrSLPrecision);
+ fragBuilder->codeAppendf("vec2 uv = %s;", v.fsIn());
+
+ fragBuilder->codeAppend("float texColor = ");
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0],
+ "uv",
+ kVec2f_GrSLType);
+ fragBuilder->codeAppend(".r;");
+ fragBuilder->codeAppend("float distance = "
+ SK_DistanceFieldMultiplier "*(texColor - " SK_DistanceFieldThreshold ");");
+
+ fragBuilder->appendPrecisionModifier(kHigh_GrSLPrecision);
+ fragBuilder->codeAppendf("vec2 st = uv*%s;", textureSizeUniName);
+ fragBuilder->codeAppend("float afwidth;");
+ bool isUniformScale = (dfTexEffect.getFlags() & kUniformScale_DistanceFieldEffectMask) ==
+ kUniformScale_DistanceFieldEffectMask;
+ bool isSimilarity = SkToBool(dfTexEffect.getFlags() & kSimilarity_DistanceFieldEffectFlag);
+ bool isGammaCorrect =
+ SkToBool(dfTexEffect.getFlags() & kGammaCorrect_DistanceFieldEffectFlag);
+ if (isUniformScale) {
+ // For uniform scale, we adjust for the effect of the transformation on the distance
+ // by using the length of the gradient of the t coordinate in the y direction.
+ // We use st coordinates to ensure we're mapping 1:1 from texel space to pixel space.
+
+ // this gives us a smooth step across approximately one fragment
+#ifdef SK_VULKAN
+ fragBuilder->codeAppend("afwidth = abs(" SK_DistanceFieldAAFactor "*dFdx(st.x));");
+#else
+ // We use the y gradient because there is a bug in the Mali 400 in the x direction.
+ fragBuilder->codeAppend("afwidth = abs(" SK_DistanceFieldAAFactor "*dFdy(st.y));");
+#endif
+ } else if (isSimilarity) {
+ // For similarity transform, we adjust the effect of the transformation on the distance
+ // by using the length of the gradient of the texture coordinates. We use st coordinates
+ // to ensure we're mapping 1:1 from texel space to pixel space.
+
+ // this gives us a smooth step across approximately one fragment
+#ifdef SK_VULKAN
+ fragBuilder->codeAppend("float st_grad_len = length(dFdx(st));");
+#else
+ // We use the y gradient because there is a bug in the Mali 400 in the x direction.
+ fragBuilder->codeAppend("float st_grad_len = length(dFdy(st));");
+#endif
+ fragBuilder->codeAppend("afwidth = abs(" SK_DistanceFieldAAFactor "*st_grad_len);");
+ } else {
+ // For general transforms, to determine the amount of correction we multiply a unit
+ // vector pointing along the SDF gradient direction by the Jacobian of the st coords
+ // (which is the inverse transform for this fragment) and take the length of the result.
+ fragBuilder->codeAppend("vec2 dist_grad = vec2(dFdx(distance), dFdy(distance));");
+ // the length of the gradient may be 0, so we need to check for this
+ // this also compensates for the Adreno, which likes to drop tiles on division by 0
+ fragBuilder->codeAppend("float dg_len2 = dot(dist_grad, dist_grad);");
+ fragBuilder->codeAppend("if (dg_len2 < 0.0001) {");
+ fragBuilder->codeAppend("dist_grad = vec2(0.7071, 0.7071);");
+ fragBuilder->codeAppend("} else {");
+ fragBuilder->codeAppend("dist_grad = dist_grad*inversesqrt(dg_len2);");
+ fragBuilder->codeAppend("}");
+
+ fragBuilder->codeAppend("vec2 Jdx = dFdx(st);");
+ fragBuilder->codeAppend("vec2 Jdy = dFdy(st);");
+ fragBuilder->codeAppend("vec2 grad = vec2(dist_grad.x*Jdx.x + dist_grad.y*Jdy.x,");
+ fragBuilder->codeAppend(" dist_grad.x*Jdx.y + dist_grad.y*Jdy.y);");
+
+ // this gives us a smooth step across approximately one fragment
+ fragBuilder->codeAppend("afwidth = " SK_DistanceFieldAAFactor "*length(grad);");
+ }
+ // The smoothstep falloff compensates for the non-linear sRGB response curve. If we are
+ // doing gamma-correct rendering (to an sRGB or F16 buffer), then we actually want distance
+ // mapped linearly to coverage, so use a linear step:
+ if (isGammaCorrect) {
+ fragBuilder->codeAppend(
+ "float val = clamp(distance + afwidth / (2.0 * afwidth), 0.0, 1.0);");
+ } else {
+ fragBuilder->codeAppend("float val = smoothstep(-afwidth, afwidth, distance);");
+ }
+
+ fragBuilder->codeAppendf("%s = vec4(val);", args.fOutputCoverage);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& proc,
+ FPCoordTransformIter&& transformIter) override {
+ SkASSERT(fTextureSizeUni.isValid());
+
+ GrTexture* texture = proc.texture(0);
+ if (texture->width() != fTextureSize.width() ||
+ texture->height() != fTextureSize.height()) {
+ fTextureSize = SkISize::Make(texture->width(), texture->height());
+ pdman.set2f(fTextureSizeUni,
+ SkIntToScalar(fTextureSize.width()),
+ SkIntToScalar(fTextureSize.height()));
+ }
+
+ const GrDistanceFieldPathGeoProc& dfpgp = proc.cast<GrDistanceFieldPathGeoProc>();
+
+ if (!dfpgp.viewMatrix().isIdentity() && !fViewMatrix.cheapEqualTo(dfpgp.viewMatrix())) {
+ fViewMatrix = dfpgp.viewMatrix();
+ float viewMatrix[3 * 3];
+ GrGLSLGetMatrix<3>(viewMatrix, fViewMatrix);
+ pdman.setMatrix3f(fViewMatrixUniform, viewMatrix);
+ }
+ this->setTransformDataHelper(SkMatrix::I(), pdman, &transformIter);
+ }
+
+ static inline void GenKey(const GrGeometryProcessor& gp,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrDistanceFieldPathGeoProc& dfTexEffect = gp.cast<GrDistanceFieldPathGeoProc>();
+
+ uint32_t key = dfTexEffect.getFlags();
+ key |= dfTexEffect.colorIgnored() << 16;
+ key |= ComputePosKey(dfTexEffect.viewMatrix()) << 25;
+ b->add32(key);
+ }
+
+private:
+ UniformHandle fTextureSizeUni;
+ UniformHandle fViewMatrixUniform;
+ SkMatrix fViewMatrix;
+ SkISize fTextureSize;
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrDistanceFieldPathGeoProc::GrDistanceFieldPathGeoProc(
+ GrColor color,
+ const SkMatrix& viewMatrix,
+ GrTexture* texture,
+ const GrTextureParams& params,
+ uint32_t flags,
+ bool usesLocalCoords)
+ : fColor(color)
+ , fViewMatrix(viewMatrix)
+ , fTextureAccess(texture, params)
+ , fFlags(flags & kNonLCD_DistanceFieldEffectMask)
+ , fInColor(nullptr)
+ , fUsesLocalCoords(usesLocalCoords) {
+ SkASSERT(!(flags & ~kNonLCD_DistanceFieldEffectMask));
+ this->initClassID<GrDistanceFieldPathGeoProc>();
+ fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ fInColor = &this->addVertexAttrib("inColor", kVec4ub_GrVertexAttribType);
+ fInTextureCoords = &this->addVertexAttrib("inTextureCoords", kVec2f_GrVertexAttribType);
+ this->addTextureAccess(&fTextureAccess);
+}
+
+void GrDistanceFieldPathGeoProc::getGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLDistanceFieldPathGeoProc::GenKey(*this, caps, b);
+}
+
+GrGLSLPrimitiveProcessor* GrDistanceFieldPathGeoProc::createGLSLInstance(const GrGLSLCaps&) const {
+ return new GrGLDistanceFieldPathGeoProc();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(GrDistanceFieldPathGeoProc);
+
+sk_sp<GrGeometryProcessor> GrDistanceFieldPathGeoProc::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx
+ : GrProcessorUnitTest::kAlphaTextureIdx;
+ static const SkShader::TileMode kTileModes[] = {
+ SkShader::kClamp_TileMode,
+ SkShader::kRepeat_TileMode,
+ SkShader::kMirror_TileMode,
+ };
+ SkShader::TileMode tileModes[] = {
+ kTileModes[d->fRandom->nextULessThan(SK_ARRAY_COUNT(kTileModes))],
+ kTileModes[d->fRandom->nextULessThan(SK_ARRAY_COUNT(kTileModes))],
+ };
+ GrTextureParams params(tileModes, d->fRandom->nextBool() ? GrTextureParams::kBilerp_FilterMode
+ : GrTextureParams::kNone_FilterMode);
+
+ uint32_t flags = 0;
+ flags |= d->fRandom->nextBool() ? kSimilarity_DistanceFieldEffectFlag : 0;
+ if (flags & kSimilarity_DistanceFieldEffectFlag) {
+ flags |= d->fRandom->nextBool() ? kScaleOnly_DistanceFieldEffectFlag : 0;
+ }
+
+ return GrDistanceFieldPathGeoProc::Make(GrRandomColor(d->fRandom),
+ GrTest::TestMatrix(d->fRandom),
+ d->fTextures[texIdx],
+ params,
+ flags,
+ d->fRandom->nextBool());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrGLDistanceFieldLCDTextGeoProc : public GrGLSLGeometryProcessor {
+public:
+ GrGLDistanceFieldLCDTextGeoProc()
+ : fViewMatrix(SkMatrix::InvalidMatrix()) {
+ fDistanceAdjust = GrDistanceFieldLCDTextGeoProc::DistanceAdjust::Make(1.0f, 1.0f, 1.0f);
+ }
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override{
+ const GrDistanceFieldLCDTextGeoProc& dfTexEffect =
+ args.fGP.cast<GrDistanceFieldLCDTextGeoProc>();
+
+ GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ // emit attributes
+ varyingHandler->emitAttributes(dfTexEffect);
+
+ GrGLSLPPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ // setup pass through color
+ if (!dfTexEffect.colorIgnored()) {
+ varyingHandler->addPassThroughAttribute(dfTexEffect.inColor(), args.fOutputColor);
+ }
+
+ // Setup position
+ this->setupPosition(vertBuilder,
+ uniformHandler,
+ gpArgs,
+ dfTexEffect.inPosition()->fName,
+ dfTexEffect.viewMatrix(),
+ &fViewMatrixUniform);
+
+ // emit transforms
+ this->emitTransforms(vertBuilder,
+ varyingHandler,
+ uniformHandler,
+ gpArgs->fPositionVar,
+ dfTexEffect.inPosition()->fName,
+ args.fFPCoordTransformHandler);
+
+ // set up varyings
+ bool isUniformScale = (dfTexEffect.getFlags() & kUniformScale_DistanceFieldEffectMask) ==
+ kUniformScale_DistanceFieldEffectMask;
+ bool isSimilarity = SkToBool(dfTexEffect.getFlags() & kSimilarity_DistanceFieldEffectFlag);
+ bool isGammaCorrect =
+ SkToBool(dfTexEffect.getFlags() & kGammaCorrect_DistanceFieldEffectFlag);
+ GrGLSLVertToFrag recipScale(kFloat_GrSLType);
+ GrGLSLVertToFrag uv(kVec2f_GrSLType);
+ varyingHandler->addVarying("TextureCoords", &uv, kHigh_GrSLPrecision);
+ vertBuilder->codeAppendf("%s = %s;", uv.vsOut(), dfTexEffect.inTextureCoords()->fName);
+
+ // compute numbers to be hardcoded to convert texture coordinates from float to int
+ SkASSERT(dfTexEffect.numTextures() == 1);
+ GrTexture* atlas = dfTexEffect.textureAccess(0).getTexture();
+ SkASSERT(atlas && SkIsPow2(atlas->width()) && SkIsPow2(atlas->height()));
+
+ GrGLSLVertToFrag st(kVec2f_GrSLType);
+ varyingHandler->addVarying("IntTextureCoords", &st, kHigh_GrSLPrecision);
+ vertBuilder->codeAppendf("%s = vec2(%d, %d) * %s;", st.vsOut(),
+ atlas->width(), atlas->height(),
+ dfTexEffect.inTextureCoords()->fName);
+
+ // add frag shader code
+
+ SkAssertResult(fragBuilder->enableFeature(
+ GrGLSLFragmentShaderBuilder::kStandardDerivatives_GLSLFeature));
+
+ // create LCD offset adjusted by inverse of transform
+ // Use highp to work around aliasing issues
+ fragBuilder->appendPrecisionModifier(kHigh_GrSLPrecision);
+ fragBuilder->codeAppendf("vec2 uv = %s;\n", uv.fsIn());
+ fragBuilder->appendPrecisionModifier(kHigh_GrSLPrecision);
+
+ SkScalar lcdDelta = 1.0f / (3.0f * atlas->width());
+ if (dfTexEffect.getFlags() & kBGR_DistanceFieldEffectFlag) {
+ fragBuilder->codeAppendf("float delta = -%.*f;\n", SK_FLT_DECIMAL_DIG, lcdDelta);
+ } else {
+ fragBuilder->codeAppendf("float delta = %.*f;\n", SK_FLT_DECIMAL_DIG, lcdDelta);
+ }
+ if (isUniformScale) {
+#ifdef SK_VULKAN
+ fragBuilder->codeAppendf("float st_grad_len = abs(dFdx(%s.x));", st.fsIn());
+#else
+ // We use the y gradient because there is a bug in the Mali 400 in the x direction.
+ fragBuilder->codeAppendf("float st_grad_len = abs(dFdy(%s.y));", st.fsIn());
+#endif
+ fragBuilder->codeAppend("vec2 offset = vec2(st_grad_len*delta, 0.0);");
+ } else if (isSimilarity) {
+ // For a similarity matrix with rotation, the gradient will not be aligned
+ // with the texel coordinate axes, so we need to calculate it.
+#ifdef SK_VULKAN
+ fragBuilder->codeAppendf("vec2 st_grad = dFdx(%s);", st.fsIn());
+ fragBuilder->codeAppend("vec2 offset = delta*st_grad;");
+#else
+ // We use dFdy because of a Mali 400 bug, and rotate -90 degrees to
+ // get the gradient in the x direction.
+ fragBuilder->codeAppendf("vec2 st_grad = dFdy(%s);", st.fsIn());
+ fragBuilder->codeAppend("vec2 offset = delta*vec2(st_grad.y, -st_grad.x);");
+#endif
+ fragBuilder->codeAppend("float st_grad_len = length(st_grad);");
+ } else {
+ fragBuilder->codeAppendf("vec2 st = %s;\n", st.fsIn());
+
+ fragBuilder->codeAppend("vec2 Jdx = dFdx(st);");
+ fragBuilder->codeAppend("vec2 Jdy = dFdy(st);");
+ fragBuilder->codeAppend("vec2 offset = delta*Jdx;");
+ }
+
+ // green is distance to uv center
+ fragBuilder->codeAppend("\tvec4 texColor = ");
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], "uv", kVec2f_GrSLType);
+ fragBuilder->codeAppend(";\n");
+ fragBuilder->codeAppend("\tvec3 distance;\n");
+ fragBuilder->codeAppend("\tdistance.y = texColor.r;\n");
+ // red is distance to left offset
+ fragBuilder->codeAppend("\tvec2 uv_adjusted = uv - offset;\n");
+ fragBuilder->codeAppend("\ttexColor = ");
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], "uv_adjusted", kVec2f_GrSLType);
+ fragBuilder->codeAppend(";\n");
+ fragBuilder->codeAppend("\tdistance.x = texColor.r;\n");
+ // blue is distance to right offset
+ fragBuilder->codeAppend("\tuv_adjusted = uv + offset;\n");
+ fragBuilder->codeAppend("\ttexColor = ");
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], "uv_adjusted", kVec2f_GrSLType);
+ fragBuilder->codeAppend(";\n");
+ fragBuilder->codeAppend("\tdistance.z = texColor.r;\n");
+
+ fragBuilder->codeAppend("\tdistance = "
+ "vec3(" SK_DistanceFieldMultiplier ")*(distance - vec3(" SK_DistanceFieldThreshold"));");
+
+ // adjust width based on gamma
+ const char* distanceAdjustUniName = nullptr;
+ fDistanceAdjustUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec3f_GrSLType, kDefault_GrSLPrecision,
+ "DistanceAdjust", &distanceAdjustUniName);
+ fragBuilder->codeAppendf("distance -= %s;", distanceAdjustUniName);
+
+ // To be strictly correct, we should compute the anti-aliasing factor separately
+ // for each color component. However, this is only important when using perspective
+ // transformations, and even then using a single factor seems like a reasonable
+ // trade-off between quality and speed.
+ fragBuilder->codeAppend("float afwidth;");
+ if (isSimilarity) {
+ // For similarity transform (uniform scale-only is a subset of this), we adjust for the
+ // effect of the transformation on the distance by using the length of the gradient of
+ // the texture coordinates. We use st coordinates to ensure we're mapping 1:1 from texel
+ // space to pixel space.
+
+ // this gives us a smooth step across approximately one fragment
+ fragBuilder->codeAppend("afwidth = " SK_DistanceFieldAAFactor "*st_grad_len;");
+ } else {
+ // For general transforms, to determine the amount of correction we multiply a unit
+ // vector pointing along the SDF gradient direction by the Jacobian of the st coords
+ // (which is the inverse transform for this fragment) and take the length of the result.
+ fragBuilder->codeAppend("vec2 dist_grad = vec2(dFdx(distance.r), dFdy(distance.r));");
+ // the length of the gradient may be 0, so we need to check for this
+ // this also compensates for the Adreno, which likes to drop tiles on division by 0
+ fragBuilder->codeAppend("float dg_len2 = dot(dist_grad, dist_grad);");
+ fragBuilder->codeAppend("if (dg_len2 < 0.0001) {");
+ fragBuilder->codeAppend("dist_grad = vec2(0.7071, 0.7071);");
+ fragBuilder->codeAppend("} else {");
+ fragBuilder->codeAppend("dist_grad = dist_grad*inversesqrt(dg_len2);");
+ fragBuilder->codeAppend("}");
+ fragBuilder->codeAppend("vec2 grad = vec2(dist_grad.x*Jdx.x + dist_grad.y*Jdy.x,");
+ fragBuilder->codeAppend(" dist_grad.x*Jdx.y + dist_grad.y*Jdy.y);");
+
+ // this gives us a smooth step across approximately one fragment
+ fragBuilder->codeAppend("afwidth = " SK_DistanceFieldAAFactor "*length(grad);");
+ }
+
+ // The smoothstep falloff compensates for the non-linear sRGB response curve. If we are
+ // doing gamma-correct rendering (to an sRGB or F16 buffer), then we actually want distance
+ // mapped linearly to coverage, so use a linear step:
+ if (isGammaCorrect) {
+ fragBuilder->codeAppend("vec4 val = "
+ "vec4(clamp(distance + vec3(afwidth) / vec3(2.0 * afwidth), 0.0, 1.0), 1.0);");
+ } else {
+ fragBuilder->codeAppend(
+ "vec4 val = vec4(smoothstep(vec3(-afwidth), vec3(afwidth), distance), 1.0);");
+ }
+
+ // set alpha to be max of rgb coverage
+ fragBuilder->codeAppend("val.a = max(max(val.r, val.g), val.b);");
+
+ fragBuilder->codeAppendf("%s = val;", args.fOutputCoverage);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& processor,
+ FPCoordTransformIter&& transformIter) override {
+ SkASSERT(fDistanceAdjustUni.isValid());
+
+ const GrDistanceFieldLCDTextGeoProc& dflcd = processor.cast<GrDistanceFieldLCDTextGeoProc>();
+ GrDistanceFieldLCDTextGeoProc::DistanceAdjust wa = dflcd.getDistanceAdjust();
+ if (wa != fDistanceAdjust) {
+ pdman.set3f(fDistanceAdjustUni,
+ wa.fR,
+ wa.fG,
+ wa.fB);
+ fDistanceAdjust = wa;
+ }
+
+ if (!dflcd.viewMatrix().isIdentity() && !fViewMatrix.cheapEqualTo(dflcd.viewMatrix())) {
+ fViewMatrix = dflcd.viewMatrix();
+ float viewMatrix[3 * 3];
+ GrGLSLGetMatrix<3>(viewMatrix, fViewMatrix);
+ pdman.setMatrix3f(fViewMatrixUniform, viewMatrix);
+ }
+ this->setTransformDataHelper(SkMatrix::I(), pdman, &transformIter);
+ }
+
+ static inline void GenKey(const GrGeometryProcessor& gp,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrDistanceFieldLCDTextGeoProc& dfTexEffect = gp.cast<GrDistanceFieldLCDTextGeoProc>();
+
+ uint32_t key = dfTexEffect.getFlags();
+ key |= dfTexEffect.colorIgnored() << 16;
+ key |= ComputePosKey(dfTexEffect.viewMatrix()) << 25;
+ b->add32(key);
+
+ // Currently we hardcode numbers to convert atlas coordinates to normalized floating point
+ SkASSERT(gp.numTextures() == 1);
+ GrTexture* atlas = gp.textureAccess(0).getTexture();
+ SkASSERT(atlas);
+ b->add32(atlas->width());
+ b->add32(atlas->height());
+ }
+
+private:
+ SkMatrix fViewMatrix;
+ UniformHandle fViewMatrixUniform;
+ UniformHandle fColorUniform;
+ GrDistanceFieldLCDTextGeoProc::DistanceAdjust fDistanceAdjust;
+ UniformHandle fDistanceAdjustUni;
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrDistanceFieldLCDTextGeoProc::GrDistanceFieldLCDTextGeoProc(
+ GrColor color, const SkMatrix& viewMatrix,
+ GrTexture* texture, const GrTextureParams& params,
+ DistanceAdjust distanceAdjust,
+ uint32_t flags, bool usesLocalCoords)
+ : fColor(color)
+ , fViewMatrix(viewMatrix)
+ , fTextureAccess(texture, params)
+ , fDistanceAdjust(distanceAdjust)
+ , fFlags(flags & kLCD_DistanceFieldEffectMask)
+ , fUsesLocalCoords(usesLocalCoords) {
+ SkASSERT(!(flags & ~kLCD_DistanceFieldEffectMask) && (flags & kUseLCD_DistanceFieldEffectFlag));
+ this->initClassID<GrDistanceFieldLCDTextGeoProc>();
+ fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ fInColor = &this->addVertexAttrib("inColor", kVec4ub_GrVertexAttribType);
+ fInTextureCoords = &this->addVertexAttrib("inTextureCoords", kVec2us_GrVertexAttribType,
+ kHigh_GrSLPrecision);
+ this->addTextureAccess(&fTextureAccess);
+}
+
+void GrDistanceFieldLCDTextGeoProc::getGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLDistanceFieldLCDTextGeoProc::GenKey(*this, caps, b);
+}
+
+GrGLSLPrimitiveProcessor* GrDistanceFieldLCDTextGeoProc::createGLSLInstance(const GrGLSLCaps&) const {
+ return new GrGLDistanceFieldLCDTextGeoProc();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(GrDistanceFieldLCDTextGeoProc);
+
+sk_sp<GrGeometryProcessor> GrDistanceFieldLCDTextGeoProc::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx :
+ GrProcessorUnitTest::kAlphaTextureIdx;
+ static const SkShader::TileMode kTileModes[] = {
+ SkShader::kClamp_TileMode,
+ SkShader::kRepeat_TileMode,
+ SkShader::kMirror_TileMode,
+ };
+ SkShader::TileMode tileModes[] = {
+ kTileModes[d->fRandom->nextULessThan(SK_ARRAY_COUNT(kTileModes))],
+ kTileModes[d->fRandom->nextULessThan(SK_ARRAY_COUNT(kTileModes))],
+ };
+ GrTextureParams params(tileModes, d->fRandom->nextBool() ? GrTextureParams::kBilerp_FilterMode :
+ GrTextureParams::kNone_FilterMode);
+ DistanceAdjust wa = { 0.0f, 0.1f, -0.1f };
+ uint32_t flags = kUseLCD_DistanceFieldEffectFlag;
+ flags |= d->fRandom->nextBool() ? kSimilarity_DistanceFieldEffectFlag : 0;
+ if (flags & kSimilarity_DistanceFieldEffectFlag) {
+ flags |= d->fRandom->nextBool() ? kScaleOnly_DistanceFieldEffectFlag : 0;
+ }
+ flags |= d->fRandom->nextBool() ? kBGR_DistanceFieldEffectFlag : 0;
+ return GrDistanceFieldLCDTextGeoProc::Make(GrRandomColor(d->fRandom),
+ GrTest::TestMatrix(d->fRandom),
+ d->fTextures[texIdx], params,
+ wa,
+ flags,
+ d->fRandom->nextBool());
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrDistanceFieldGeoProc.h b/gfx/skia/skia/src/gpu/effects/GrDistanceFieldGeoProc.h
new file mode 100644
index 000000000..339c063b3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrDistanceFieldGeoProc.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDistanceFieldGeoProc_DEFINED
+#define GrDistanceFieldGeoProc_DEFINED
+
+#include "GrProcessor.h"
+#include "GrGeometryProcessor.h"
+
+class GrGLDistanceFieldA8TextGeoProc;
+class GrGLDistanceFieldPathGeoProc;
+class GrGLDistanceFieldLCDTextGeoProc;
+class GrInvariantOutput;
+
+enum GrDistanceFieldEffectFlags {
+ kSimilarity_DistanceFieldEffectFlag = 0x01, // ctm is similarity matrix
+ kScaleOnly_DistanceFieldEffectFlag = 0x02, // ctm has only scale and translate
+ kUseLCD_DistanceFieldEffectFlag = 0x04, // use lcd text
+ kBGR_DistanceFieldEffectFlag = 0x08, // lcd display has bgr order
+ kPortrait_DistanceFieldEffectFlag = 0x10, // lcd display is in portrait mode (not used yet)
+ kGammaCorrect_DistanceFieldEffectFlag = 0x20, // assume gamma-correct output (linear blending)
+
+ kInvalid_DistanceFieldEffectFlag = 0x80, // invalid state (for initialization)
+
+ kUniformScale_DistanceFieldEffectMask = kSimilarity_DistanceFieldEffectFlag |
+ kScaleOnly_DistanceFieldEffectFlag,
+ // The subset of the flags relevant to GrDistanceFieldA8TextGeoProc
+ kNonLCD_DistanceFieldEffectMask = kSimilarity_DistanceFieldEffectFlag |
+ kScaleOnly_DistanceFieldEffectFlag |
+ kGammaCorrect_DistanceFieldEffectFlag,
+ // The subset of the flags relevant to GrDistanceFieldLCDTextGeoProc
+ kLCD_DistanceFieldEffectMask = kSimilarity_DistanceFieldEffectFlag |
+ kScaleOnly_DistanceFieldEffectFlag |
+ kUseLCD_DistanceFieldEffectFlag |
+ kBGR_DistanceFieldEffectFlag |
+ kGammaCorrect_DistanceFieldEffectFlag,
+};
+
+/**
+ * The output color of this effect is a modulation of the input color and a sample from a
+ * distance field texture (using a smoothed step function near 0.5).
+ * It allows explicit specification of the filtering and wrap modes (GrTextureParams). The input
+ * coords are a custom attribute. Gamma correction is handled via a texture LUT.
+ */
+class GrDistanceFieldA8TextGeoProc : public GrGeometryProcessor {
+public:
+#ifdef SK_GAMMA_APPLY_TO_A8
+ static sk_sp<GrGeometryProcessor> Make(GrColor color, const SkMatrix& viewMatrix,
+ GrTexture* tex, const GrTextureParams& params,
+ float lum, uint32_t flags, bool usesLocalCoords) {
+ return sk_sp<GrGeometryProcessor>(
+ new GrDistanceFieldA8TextGeoProc(color, viewMatrix, tex, params, lum, flags,
+ usesLocalCoords));
+ }
+#else
+ static sk_sp<GrGeometryProcessor> Make(GrColor color, const SkMatrix& viewMatrix,
+ GrTexture* tex, const GrTextureParams& params,
+ uint32_t flags, bool usesLocalCoords) {
+ return sk_sp<GrGeometryProcessor>(
+ new GrDistanceFieldA8TextGeoProc(color, viewMatrix, tex, params, flags,
+ usesLocalCoords));
+ }
+#endif
+
+ virtual ~GrDistanceFieldA8TextGeoProc() {}
+
+ const char* name() const override { return "DistanceFieldA8Text"; }
+
+ const Attribute* inPosition() const { return fInPosition; }
+ const Attribute* inColor() const { return fInColor; }
+ const Attribute* inTextureCoords() const { return fInTextureCoords; }
+ GrColor color() const { return fColor; }
+ bool colorIgnored() const { return GrColor_ILLEGAL == fColor; }
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+#ifdef SK_GAMMA_APPLY_TO_A8
+ float getDistanceAdjust() const { return fDistanceAdjust; }
+#endif
+ uint32_t getFlags() const { return fFlags; }
+
+ void getGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override;
+
+private:
+ GrDistanceFieldA8TextGeoProc(GrColor, const SkMatrix& viewMatrix,
+ GrTexture* texture, const GrTextureParams& params,
+#ifdef SK_GAMMA_APPLY_TO_A8
+ float distanceAdjust,
+#endif
+ uint32_t flags, bool usesLocalCoords);
+
+ GrColor fColor;
+ SkMatrix fViewMatrix;
+ GrTextureAccess fTextureAccess;
+#ifdef SK_GAMMA_APPLY_TO_A8
+ float fDistanceAdjust;
+#endif
+ uint32_t fFlags;
+ const Attribute* fInPosition;
+ const Attribute* fInColor;
+ const Attribute* fInTextureCoords;
+ bool fUsesLocalCoords;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+
+/**
+* The output color of this effect is a modulation of the input color and a sample from a
+* distance field texture (using a smoothed step function near 0.5).
+* It allows explicit specification of the filtering and wrap modes (GrTextureParams). The input
+* coords are a custom attribute. No gamma correct blending is applied. Used for paths only.
+*/
+class GrDistanceFieldPathGeoProc : public GrGeometryProcessor {
+public:
+ static sk_sp<GrGeometryProcessor> Make(GrColor color, const SkMatrix& viewMatrix,
+ GrTexture* tex, const GrTextureParams& params,
+ uint32_t flags, bool usesLocalCoords) {
+ return sk_sp<GrGeometryProcessor>(
+ new GrDistanceFieldPathGeoProc(color, viewMatrix, tex, params, flags, usesLocalCoords));
+ }
+
+ virtual ~GrDistanceFieldPathGeoProc() {}
+
+ const char* name() const override { return "DistanceFieldPath"; }
+
+ const Attribute* inPosition() const { return fInPosition; }
+ const Attribute* inColor() const { return fInColor; }
+ const Attribute* inTextureCoords() const { return fInTextureCoords; }
+ GrColor color() const { return fColor; }
+ bool colorIgnored() const { return GrColor_ILLEGAL == fColor; }
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+ uint32_t getFlags() const { return fFlags; }
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+
+ void getGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override;
+
+private:
+ GrDistanceFieldPathGeoProc(GrColor, const SkMatrix& viewMatrix, GrTexture* texture,
+ const GrTextureParams& params, uint32_t flags,
+ bool usesLocalCoords);
+
+ GrColor fColor;
+ SkMatrix fViewMatrix;
+ GrTextureAccess fTextureAccess;
+ uint32_t fFlags;
+ const Attribute* fInPosition;
+ const Attribute* fInColor;
+ const Attribute* fInTextureCoords;
+ bool fUsesLocalCoords;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+/**
+ * The output color of this effect is a modulation of the input color and samples from a
+ * distance field texture (using a smoothed step function near 0.5), adjusted for LCD displays.
+ * It allows explicit specification of the filtering and wrap modes (GrTextureParams). The input
+ * coords are a custom attribute. Gamma correction is handled via a texture LUT.
+ */
+class GrDistanceFieldLCDTextGeoProc : public GrGeometryProcessor {
+public:
+ struct DistanceAdjust {
+ SkScalar fR, fG, fB;
+ static DistanceAdjust Make(SkScalar r, SkScalar g, SkScalar b) {
+ DistanceAdjust result;
+ result.fR = r; result.fG = g; result.fB = b;
+ return result;
+ }
+ bool operator==(const DistanceAdjust& wa) const {
+ return (fR == wa.fR && fG == wa.fG && fB == wa.fB);
+ }
+ bool operator!=(const DistanceAdjust& wa) const {
+ return !(*this == wa);
+ }
+ };
+
+ static sk_sp<GrGeometryProcessor> Make(GrColor color, const SkMatrix& viewMatrix,
+ GrTexture* tex, const GrTextureParams& params,
+ DistanceAdjust distanceAdjust, uint32_t flags,
+ bool usesLocalCoords) {
+ return sk_sp<GrGeometryProcessor>(
+ new GrDistanceFieldLCDTextGeoProc(color, viewMatrix, tex, params, distanceAdjust,
+ flags, usesLocalCoords));
+ }
+
+ virtual ~GrDistanceFieldLCDTextGeoProc() {}
+
+ const char* name() const override { return "DistanceFieldLCDText"; }
+
+ const Attribute* inPosition() const { return fInPosition; }
+ const Attribute* inColor() const { return fInColor; }
+ const Attribute* inTextureCoords() const { return fInTextureCoords; }
+ DistanceAdjust getDistanceAdjust() const { return fDistanceAdjust; }
+ GrColor color() const { return fColor; }
+ bool colorIgnored() const { return GrColor_ILLEGAL == fColor; }
+ const SkMatrix& viewMatrix() const { return fViewMatrix; }
+ uint32_t getFlags() const { return fFlags; }
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+
+ void getGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override;
+
+private:
+ GrDistanceFieldLCDTextGeoProc(GrColor, const SkMatrix& viewMatrix,
+ GrTexture* texture, const GrTextureParams& params,
+ DistanceAdjust wa, uint32_t flags,
+ bool usesLocalCoords);
+
+ GrColor fColor;
+ SkMatrix fViewMatrix;
+ GrTextureAccess fTextureAccess;
+ DistanceAdjust fDistanceAdjust;
+ uint32_t fFlags;
+ const Attribute* fInPosition;
+ const Attribute* fInColor;
+ const Attribute* fInTextureCoords;
+ bool fUsesLocalCoords;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrDitherEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrDitherEffect.cpp
new file mode 100644
index 000000000..1fddec662
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrDitherEffect.cpp
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrDitherEffect.h"
+#include "GrFragmentProcessor.h"
+#include "GrInvariantOutput.h"
+#include "SkRect.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+
+//////////////////////////////////////////////////////////////////////////////
+
+class DitherEffect : public GrFragmentProcessor {
+public:
+ static sk_sp<GrFragmentProcessor> Make() {
+ return sk_sp<GrFragmentProcessor>(new DitherEffect);
+ }
+
+ virtual ~DitherEffect() {}
+
+ const char* name() const override { return "Dither"; }
+
+private:
+ DitherEffect() {
+ this->initClassID<DitherEffect>();
+ this->setWillReadFragmentPosition();
+ }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ // All dither effects are equal
+ bool onIsEqual(const GrFragmentProcessor&) const override { return true; }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+void DitherEffect::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ inout->setToUnknown(GrInvariantOutput::kWill_ReadInput);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(DitherEffect);
+
+sk_sp<GrFragmentProcessor> DitherEffect::TestCreate(GrProcessorTestData*) {
+ return DitherEffect::Make();
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GLDitherEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs& args) override;
+
+private:
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GLDitherEffect::emitCode(EmitArgs& args) {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ // Generate a random number based on the fragment position. For this
+ // random number generator, we use the "GLSL rand" function
+ // that seems to be floating around on the internet. It works under
+ // the assumption that sin(<big number>) oscillates with high frequency
+ // and sampling it will generate "randomness". Since we're using this
+ // for rendering and not cryptography it should be OK.
+
+ // For each channel c, add the random offset to the pixel to either bump
+ // it up or let it remain constant during quantization.
+ fragBuilder->codeAppendf("\t\tfloat r = "
+ "fract(sin(dot(%s.xy ,vec2(12.9898,78.233))) * 43758.5453);\n",
+ fragBuilder->fragmentPosition());
+ fragBuilder->codeAppendf("\t\t%s = (1.0/255.0) * vec4(r, r, r, r) + %s;\n",
+ args.fOutputColor, GrGLSLExpr4(args.fInputColor).c_str());
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void DitherEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GLDitherEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* DitherEffect::onCreateGLSLInstance() const {
+ return new GLDitherEffect;
+}
+
+sk_sp<GrFragmentProcessor> GrDitherEffect::Make() { return DitherEffect::Make(); }
diff --git a/gfx/skia/skia/src/gpu/effects/GrDitherEffect.h b/gfx/skia/skia/src/gpu/effects/GrDitherEffect.h
new file mode 100644
index 000000000..b92723d33
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrDitherEffect.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDitherEffect_DEFINED
+#define GrDitherEffect_DEFINED
+
+#include "GrTypes.h"
+#include "GrTypesPriv.h"
+#include "SkRefCnt.h"
+
+class GrFragmentProcessor;
+
+namespace GrDitherEffect {
+ /**
+ * Creates an effect that dithers the resulting color to an RGBA8 framebuffer
+ */
+ sk_sp<GrFragmentProcessor> Make();
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrGammaEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrGammaEffect.cpp
new file mode 100644
index 000000000..63ffc3242
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrGammaEffect.cpp
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGammaEffect.h"
+
+#include "GrContext.h"
+#include "GrCoordTransform.h"
+#include "GrFragmentProcessor.h"
+#include "GrInvariantOutput.h"
+#include "GrProcessor.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+
+class GrGLGammaEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs& args) override {
+ const GrGammaEffect& ge = args.fFp.cast<GrGammaEffect>();
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ const char* gammaUniName = nullptr;
+ if (GrGammaEffect::Mode::kExponential == ge.mode()) {
+ fGammaUni = uniformHandler->addUniform(kFragment_GrShaderFlag, kFloat_GrSLType,
+ kDefault_GrSLPrecision, "Gamma", &gammaUniName);
+ }
+
+ SkString srgbFuncName;
+ static const GrGLSLShaderVar gSrgbArgs[] = {
+ GrGLSLShaderVar("x", kFloat_GrSLType),
+ };
+ switch (ge.mode()) {
+ case GrGammaEffect::Mode::kLinearToSRGB:
+ fragBuilder->emitFunction(kFloat_GrSLType,
+ "linear_to_srgb",
+ SK_ARRAY_COUNT(gSrgbArgs),
+ gSrgbArgs,
+ "return (x <= 0.0031308) ? (x * 12.92) "
+ ": (1.055 * pow(x, 0.416666667) - 0.055);",
+ &srgbFuncName);
+ break;
+ case GrGammaEffect::Mode::kSRGBToLinear:
+ fragBuilder->emitFunction(kFloat_GrSLType,
+ "srgb_to_linear",
+ SK_ARRAY_COUNT(gSrgbArgs),
+ gSrgbArgs,
+ "return (x <= 0.04045) ? (x / 12.92) "
+ ": pow((x + 0.055) / 1.055, 2.4);",
+ &srgbFuncName);
+ default:
+ // No helper function needed
+ break;
+ }
+
+ if (nullptr == args.fInputColor) {
+ args.fInputColor = "vec4(1)";
+ }
+
+ if (GrGammaEffect::Mode::kExponential == ge.mode()) {
+ fragBuilder->codeAppendf("%s = vec4(pow(%s.rgb, vec3(%s)), %s.a);",
+ args.fOutputColor, args.fInputColor, gammaUniName,
+ args.fInputColor);
+ } else {
+ fragBuilder->codeAppendf("%s = vec4(%s(%s.r), %s(%s.g), %s(%s.b), %s.a);",
+ args.fOutputColor,
+ srgbFuncName.c_str(), args.fInputColor,
+ srgbFuncName.c_str(), args.fInputColor,
+ srgbFuncName.c_str(), args.fInputColor,
+ args.fInputColor);
+ }
+ }
+
+ void onSetData(const GrGLSLProgramDataManager& pdman, const GrProcessor& proc) override {
+ const GrGammaEffect& ge = proc.cast<GrGammaEffect>();
+ if (GrGammaEffect::Mode::kExponential == ge.mode()) {
+ pdman.set1f(fGammaUni, ge.gamma());
+ }
+ }
+
+ static inline void GenKey(const GrProcessor& processor, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrGammaEffect& ge = processor.cast<GrGammaEffect>();
+ uint32_t key = static_cast<uint32_t>(ge.mode());
+ b->add32(key);
+ }
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fGammaUni;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrGammaEffect::GrGammaEffect(Mode mode, SkScalar gamma)
+ : fMode(mode)
+ , fGamma(gamma) {
+ this->initClassID<GrGammaEffect>();
+}
+
+bool GrGammaEffect::onIsEqual(const GrFragmentProcessor& s) const {
+ const GrGammaEffect& other = s.cast<GrGammaEffect>();
+ return
+ other.fMode == fMode &&
+ (fMode != Mode::kExponential || other.fGamma == fGamma);
+}
+
+void GrGammaEffect::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ inout->setToUnknown(GrInvariantOutput::kWill_ReadInput);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrGammaEffect);
+
+sk_sp<GrFragmentProcessor> GrGammaEffect::TestCreate(GrProcessorTestData* d) {
+ // We want to be sure and test sRGB sometimes
+ Mode testMode = static_cast<Mode>(d->fRandom->nextRangeU(0, 2));
+ SkScalar gamma = d->fRandom->nextRangeScalar(0.5f, 2.0f);
+ return sk_sp<GrFragmentProcessor>(new GrGammaEffect(testMode, gamma));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGammaEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLGammaEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrGammaEffect::onCreateGLSLInstance() const {
+ return new GrGLGammaEffect();
+}
+
+sk_sp<GrFragmentProcessor> GrGammaEffect::Make(SkScalar gamma) {
+ // TODO: Once our public-facing API for specifying gamma curves settles down, expose this,
+ // and allow clients to explicitly request sRGB, rather than inferring from the exponent.
+ // Note that AdobeRGB (for example) is speficied as x^2.2, not the Rec.709 curves.
+ if (SkScalarNearlyEqual(gamma, 2.2f)) {
+ return sk_sp<GrFragmentProcessor>(new GrGammaEffect(Mode::kSRGBToLinear, 2.2f));
+ } else if (SkScalarNearlyEqual(gamma, 1.0f / 2.2f)) {
+ return sk_sp<GrFragmentProcessor>(new GrGammaEffect(Mode::kLinearToSRGB, 1.0f / 2.2f));
+ } else {
+ return sk_sp<GrFragmentProcessor>(new GrGammaEffect(Mode::kExponential, gamma));
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrGammaEffect.h b/gfx/skia/skia/src/gpu/effects/GrGammaEffect.h
new file mode 100644
index 000000000..3f84ac91e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrGammaEffect.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGammaEffect_DEFINED
+#define GrGammaEffect_DEFINED
+
+#include "GrFragmentProcessor.h"
+
+class GrGammaEffect : public GrFragmentProcessor {
+public:
+ enum class Mode {
+ kLinearToSRGB,
+ kSRGBToLinear,
+ kExponential,
+ };
+
+ /**
+ * Creates an effect that applies a gamma curve.
+ */
+ static sk_sp<GrFragmentProcessor> Make(SkScalar gamma);
+
+ const char* name() const override { return "Gamma"; }
+
+ Mode mode() const { return fMode; }
+ SkScalar gamma() const { return fGamma; }
+
+private:
+ GrGammaEffect(Mode mode, SkScalar gamma);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ Mode fMode;
+ SkScalar fGamma;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrMatrixConvolutionEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrMatrixConvolutionEffect.cpp
new file mode 100644
index 000000000..a07b67128
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrMatrixConvolutionEffect.cpp
@@ -0,0 +1,268 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "GrMatrixConvolutionEffect.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+
+class GrGLMatrixConvolutionEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrGLSLCaps&, GrProcessorKeyBuilder*);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+private:
+ typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;
+
+ UniformHandle fKernelUni;
+ UniformHandle fImageIncrementUni;
+ UniformHandle fKernelOffsetUni;
+ UniformHandle fGainUni;
+ UniformHandle fBiasUni;
+ GrTextureDomain::GLDomain fDomain;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GrGLMatrixConvolutionEffect::emitCode(EmitArgs& args) {
+ const GrMatrixConvolutionEffect& mce = args.fFp.cast<GrMatrixConvolutionEffect>();
+ const GrTextureDomain& domain = mce.domain();
+
+ int kWidth = mce.kernelSize().width();
+ int kHeight = mce.kernelSize().height();
+
+ int arrayCount = (kWidth * kHeight + 3) / 4;
+ SkASSERT(4 * arrayCount >= kWidth * kHeight);
+
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ fImageIncrementUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType, kDefault_GrSLPrecision,
+ "ImageIncrement");
+ fKernelUni = uniformHandler->addUniformArray(kFragment_GrShaderFlag,
+ kVec4f_GrSLType, kDefault_GrSLPrecision,
+ "Kernel",
+ arrayCount);
+ fKernelOffsetUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType, kDefault_GrSLPrecision,
+ "KernelOffset");
+ fGainUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType, kDefault_GrSLPrecision, "Gain");
+ fBiasUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kFloat_GrSLType, kDefault_GrSLPrecision, "Bias");
+
+ const char* kernelOffset = uniformHandler->getUniformCStr(fKernelOffsetUni);
+ const char* imgInc = uniformHandler->getUniformCStr(fImageIncrementUni);
+ const char* kernel = uniformHandler->getUniformCStr(fKernelUni);
+ const char* gain = uniformHandler->getUniformCStr(fGainUni);
+ const char* bias = uniformHandler->getUniformCStr(fBiasUni);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString coords2D = fragBuilder->ensureCoords2D(args.fTransformedCoords[0]);
+ fragBuilder->codeAppend("vec4 sum = vec4(0, 0, 0, 0);");
+ fragBuilder->codeAppendf("vec2 coord = %s - %s * %s;", coords2D.c_str(), kernelOffset, imgInc);
+ fragBuilder->codeAppend("vec4 c;");
+
+ const char* kVecSuffix[4] = { ".x", ".y", ".z", ".w" };
+ for (int y = 0; y < kHeight; y++) {
+ for (int x = 0; x < kWidth; x++) {
+ GrGLSLShaderBuilder::ShaderBlock block(fragBuilder);
+ int offset = y*kWidth + x;
+
+ fragBuilder->codeAppendf("float k = %s[%d]%s;", kernel, offset / 4,
+ kVecSuffix[offset & 0x3]);
+ SkString coord;
+ coord.printf("coord + vec2(%d, %d) * %s", x, y, imgInc);
+ fDomain.sampleTexture(fragBuilder,
+ uniformHandler,
+ args.fGLSLCaps,
+ domain,
+ "c",
+ coord,
+ args.fTexSamplers[0]);
+ if (!mce.convolveAlpha()) {
+ fragBuilder->codeAppend("c.rgb /= c.a;");
+ fragBuilder->codeAppend("c.rgb = clamp(c.rgb, 0.0, 1.0);");
+ }
+ fragBuilder->codeAppend("sum += c * k;");
+ }
+ }
+ if (mce.convolveAlpha()) {
+ fragBuilder->codeAppendf("%s = sum * %s + %s;", args.fOutputColor, gain, bias);
+ fragBuilder->codeAppendf("%s.rgb = clamp(%s.rgb, 0.0, %s.a);",
+ args.fOutputColor, args.fOutputColor, args.fOutputColor);
+ } else {
+ fDomain.sampleTexture(fragBuilder,
+ uniformHandler,
+ args.fGLSLCaps,
+ domain,
+ "c",
+ coords2D,
+ args.fTexSamplers[0]);
+ fragBuilder->codeAppendf("%s.a = c.a;", args.fOutputColor);
+ fragBuilder->codeAppendf("%s.rgb = sum.rgb * %s + %s;", args.fOutputColor, gain, bias);
+ fragBuilder->codeAppendf("%s.rgb *= %s.a;", args.fOutputColor, args.fOutputColor);
+ }
+
+ SkString modulate;
+ GrGLSLMulVarBy4f(&modulate, args.fOutputColor, args.fInputColor);
+ fragBuilder->codeAppend(modulate.c_str());
+}
+
+void GrGLMatrixConvolutionEffect::GenKey(const GrProcessor& processor,
+ const GrGLSLCaps&, GrProcessorKeyBuilder* b) {
+ const GrMatrixConvolutionEffect& m = processor.cast<GrMatrixConvolutionEffect>();
+ SkASSERT(m.kernelSize().width() <= 0x7FFF && m.kernelSize().height() <= 0xFFFF);
+ uint32_t key = m.kernelSize().width() << 16 | m.kernelSize().height();
+ key |= m.convolveAlpha() ? 1U << 31 : 0;
+ b->add32(key);
+ b->add32(GrTextureDomain::GLDomain::DomainKey(m.domain()));
+}
+
+void GrGLMatrixConvolutionEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& processor) {
+ const GrMatrixConvolutionEffect& conv = processor.cast<GrMatrixConvolutionEffect>();
+ GrTexture& texture = *conv.texture(0);
+
+ float imageIncrement[2];
+ float ySign = texture.origin() == kTopLeft_GrSurfaceOrigin ? 1.0f : -1.0f;
+ imageIncrement[0] = 1.0f / texture.width();
+ imageIncrement[1] = ySign / texture.height();
+ pdman.set2fv(fImageIncrementUni, 1, imageIncrement);
+ pdman.set2fv(fKernelOffsetUni, 1, conv.kernelOffset());
+ int kernelCount = conv.kernelSize().width() * conv.kernelSize().height();
+ int arrayCount = (kernelCount + 3) / 4;
+ SkASSERT(4 * arrayCount >= kernelCount);
+ pdman.set4fv(fKernelUni, arrayCount, conv.kernel());
+ pdman.set1f(fGainUni, conv.gain());
+ pdman.set1f(fBiasUni, conv.bias());
+ fDomain.setData(pdman, conv.domain(), texture.origin());
+}
+
+GrMatrixConvolutionEffect::GrMatrixConvolutionEffect(GrTexture* texture,
+ const SkIRect& bounds,
+ const SkISize& kernelSize,
+ const SkScalar* kernel,
+ SkScalar gain,
+ SkScalar bias,
+ const SkIPoint& kernelOffset,
+ GrTextureDomain::Mode tileMode,
+ bool convolveAlpha)
+ : INHERITED(texture, nullptr, GrCoordTransform::MakeDivByTextureWHMatrix(texture)),
+ fKernelSize(kernelSize),
+ fGain(SkScalarToFloat(gain)),
+ fBias(SkScalarToFloat(bias) / 255.0f),
+ fConvolveAlpha(convolveAlpha),
+ fDomain(GrTextureDomain::MakeTexelDomainForMode(texture, bounds, tileMode), tileMode) {
+ this->initClassID<GrMatrixConvolutionEffect>();
+ for (int i = 0; i < kernelSize.width() * kernelSize.height(); i++) {
+ fKernel[i] = SkScalarToFloat(kernel[i]);
+ }
+ fKernelOffset[0] = static_cast<float>(kernelOffset.x());
+ fKernelOffset[1] = static_cast<float>(kernelOffset.y());
+}
+
+void GrMatrixConvolutionEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLMatrixConvolutionEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrMatrixConvolutionEffect::onCreateGLSLInstance() const {
+ return new GrGLMatrixConvolutionEffect;
+}
+
+bool GrMatrixConvolutionEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrMatrixConvolutionEffect& s = sBase.cast<GrMatrixConvolutionEffect>();
+ return fKernelSize == s.kernelSize() &&
+ !memcmp(fKernel, s.kernel(),
+ fKernelSize.width() * fKernelSize.height() * sizeof(float)) &&
+ fGain == s.gain() &&
+ fBias == s.bias() &&
+ fKernelOffset == s.kernelOffset() &&
+ fConvolveAlpha == s.convolveAlpha() &&
+ fDomain == s.domain();
+}
+
+// Static function to create a 2D convolution
+sk_sp<GrFragmentProcessor>
+GrMatrixConvolutionEffect::MakeGaussian(GrTexture* texture,
+ const SkIRect& bounds,
+ const SkISize& kernelSize,
+ SkScalar gain,
+ SkScalar bias,
+ const SkIPoint& kernelOffset,
+ GrTextureDomain::Mode tileMode,
+ bool convolveAlpha,
+ SkScalar sigmaX,
+ SkScalar sigmaY) {
+ float kernel[MAX_KERNEL_SIZE];
+ int width = kernelSize.width();
+ int height = kernelSize.height();
+ SkASSERT(width * height <= MAX_KERNEL_SIZE);
+ float sum = 0.0f;
+ float sigmaXDenom = 1.0f / (2.0f * SkScalarToFloat(SkScalarSquare(sigmaX)));
+ float sigmaYDenom = 1.0f / (2.0f * SkScalarToFloat(SkScalarSquare(sigmaY)));
+ int xRadius = width / 2;
+ int yRadius = height / 2;
+ for (int x = 0; x < width; x++) {
+ float xTerm = static_cast<float>(x - xRadius);
+ xTerm = xTerm * xTerm * sigmaXDenom;
+ for (int y = 0; y < height; y++) {
+ float yTerm = static_cast<float>(y - yRadius);
+ float xyTerm = sk_float_exp(-(xTerm + yTerm * yTerm * sigmaYDenom));
+ // Note that the constant term (1/(sqrt(2*pi*sigma^2)) of the Gaussian
+ // is dropped here, since we renormalize the kernel below.
+ kernel[y * width + x] = xyTerm;
+ sum += xyTerm;
+ }
+ }
+ // Normalize the kernel
+ float scale = 1.0f / sum;
+ for (int i = 0; i < width * height; ++i) {
+ kernel[i] *= scale;
+ }
+ return sk_sp<GrFragmentProcessor>(
+ new GrMatrixConvolutionEffect(texture, bounds, kernelSize, kernel, gain, bias,
+ kernelOffset, tileMode, convolveAlpha));
+}
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrMatrixConvolutionEffect);
+
+sk_sp<GrFragmentProcessor> GrMatrixConvolutionEffect::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx :
+ GrProcessorUnitTest::kAlphaTextureIdx;
+ int width = d->fRandom->nextRangeU(1, MAX_KERNEL_SIZE);
+ int height = d->fRandom->nextRangeU(1, MAX_KERNEL_SIZE / width);
+ SkISize kernelSize = SkISize::Make(width, height);
+ SkAutoTDeleteArray<SkScalar> kernel(new SkScalar[width * height]);
+ for (int i = 0; i < width * height; i++) {
+ kernel.get()[i] = d->fRandom->nextSScalar1();
+ }
+ SkScalar gain = d->fRandom->nextSScalar1();
+ SkScalar bias = d->fRandom->nextSScalar1();
+ SkIPoint kernelOffset = SkIPoint::Make(d->fRandom->nextRangeU(0, kernelSize.width()),
+ d->fRandom->nextRangeU(0, kernelSize.height()));
+ SkIRect bounds = SkIRect::MakeXYWH(d->fRandom->nextRangeU(0, d->fTextures[texIdx]->width()),
+ d->fRandom->nextRangeU(0, d->fTextures[texIdx]->height()),
+ d->fRandom->nextRangeU(0, d->fTextures[texIdx]->width()),
+ d->fRandom->nextRangeU(0, d->fTextures[texIdx]->height()));
+ GrTextureDomain::Mode tileMode =
+ static_cast<GrTextureDomain::Mode>(d->fRandom->nextRangeU(0, 2));
+ bool convolveAlpha = d->fRandom->nextBool();
+ return GrMatrixConvolutionEffect::Make(d->fTextures[texIdx],
+ bounds,
+ kernelSize,
+ kernel.get(),
+ gain,
+ bias,
+ kernelOffset,
+ tileMode,
+ convolveAlpha);
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrMatrixConvolutionEffect.h b/gfx/skia/skia/src/gpu/effects/GrMatrixConvolutionEffect.h
new file mode 100644
index 000000000..b8df43768
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrMatrixConvolutionEffect.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMatrixConvolutionEffect_DEFINED
+#define GrMatrixConvolutionEffect_DEFINED
+
+#include "GrSingleTextureEffect.h"
+#include "GrInvariantOutput.h"
+#include "GrTextureDomain.h"
+
+// A little bit less than the minimum # uniforms required by DX9SM2 (32).
+// Allows for a 5x5 kernel (or 25x1, for that matter).
+#define MAX_KERNEL_SIZE 25
+
+class GrMatrixConvolutionEffect : public GrSingleTextureEffect {
+public:
+ static sk_sp<GrFragmentProcessor> Make(GrTexture* texture,
+ const SkIRect& bounds,
+ const SkISize& kernelSize,
+ const SkScalar* kernel,
+ SkScalar gain,
+ SkScalar bias,
+ const SkIPoint& kernelOffset,
+ GrTextureDomain::Mode tileMode,
+ bool convolveAlpha) {
+ return sk_sp<GrFragmentProcessor>(
+ new GrMatrixConvolutionEffect(texture, bounds, kernelSize, kernel, gain, bias,
+ kernelOffset, tileMode, convolveAlpha));
+ }
+
+ static sk_sp<GrFragmentProcessor> MakeGaussian(GrTexture* texture,
+ const SkIRect& bounds,
+ const SkISize& kernelSize,
+ SkScalar gain,
+ SkScalar bias,
+ const SkIPoint& kernelOffset,
+ GrTextureDomain::Mode tileMode,
+ bool convolveAlpha,
+ SkScalar sigmaX,
+ SkScalar sigmaY);
+
+ const SkIRect& bounds() const { return fBounds; }
+ const SkISize& kernelSize() const { return fKernelSize; }
+ const float* kernelOffset() const { return fKernelOffset; }
+ const float* kernel() const { return fKernel; }
+ float gain() const { return fGain; }
+ float bias() const { return fBias; }
+ bool convolveAlpha() const { return fConvolveAlpha; }
+ const GrTextureDomain& domain() const { return fDomain; }
+
+ const char* name() const override { return "MatrixConvolution"; }
+
+private:
+ GrMatrixConvolutionEffect(GrTexture*,
+ const SkIRect& bounds,
+ const SkISize& kernelSize,
+ const SkScalar* kernel,
+ SkScalar gain,
+ SkScalar bias,
+ const SkIPoint& kernelOffset,
+ GrTextureDomain::Mode tileMode,
+ bool convolveAlpha);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ // TODO: Try to do better?
+ inout->mulByUnknownFourComponents();
+ }
+
+ SkIRect fBounds;
+ SkISize fKernelSize;
+ float fKernel[MAX_KERNEL_SIZE];
+ float fGain;
+ float fBias;
+ float fKernelOffset[2];
+ bool fConvolveAlpha;
+ GrTextureDomain fDomain;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef GrSingleTextureEffect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrOvalEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrOvalEffect.cpp
new file mode 100644
index 000000000..97ea1e6d0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrOvalEffect.cpp
@@ -0,0 +1,411 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrOvalEffect.h"
+
+#include "GrFragmentProcessor.h"
+#include "GrInvariantOutput.h"
+#include "SkRect.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+
+//////////////////////////////////////////////////////////////////////////////
+
+class CircleEffect : public GrFragmentProcessor {
+public:
+ static sk_sp<GrFragmentProcessor> Make(GrPrimitiveEdgeType, const SkPoint& center,
+ SkScalar radius);
+
+ virtual ~CircleEffect() {}
+
+ const char* name() const override { return "Circle"; }
+
+ const SkPoint& getCenter() const { return fCenter; }
+ SkScalar getRadius() const { return fRadius; }
+
+ GrPrimitiveEdgeType getEdgeType() const { return fEdgeType; }
+
+private:
+ CircleEffect(GrPrimitiveEdgeType, const SkPoint& center, SkScalar radius);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ SkPoint fCenter;
+ SkScalar fRadius;
+ GrPrimitiveEdgeType fEdgeType;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+sk_sp<GrFragmentProcessor> CircleEffect::Make(GrPrimitiveEdgeType edgeType, const SkPoint& center,
+ SkScalar radius) {
+ SkASSERT(radius >= 0);
+ return sk_sp<GrFragmentProcessor>(new CircleEffect(edgeType, center, radius));
+}
+
+void CircleEffect::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ inout->mulByUnknownSingleComponent();
+}
+
+CircleEffect::CircleEffect(GrPrimitiveEdgeType edgeType, const SkPoint& c, SkScalar r)
+ : fCenter(c)
+ , fRadius(r)
+ , fEdgeType(edgeType) {
+ this->initClassID<CircleEffect>();
+ this->setWillReadFragmentPosition();
+}
+
+bool CircleEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const CircleEffect& ce = other.cast<CircleEffect>();
+ return fEdgeType == ce.fEdgeType && fCenter == ce.fCenter && fRadius == ce.fRadius;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(CircleEffect);
+
+sk_sp<GrFragmentProcessor> CircleEffect::TestCreate(GrProcessorTestData* d) {
+ SkPoint center;
+ center.fX = d->fRandom->nextRangeScalar(0.f, 1000.f);
+ center.fY = d->fRandom->nextRangeScalar(0.f, 1000.f);
+ SkScalar radius = d->fRandom->nextRangeF(0.f, 1000.f);
+ GrPrimitiveEdgeType et;
+ do {
+ et = (GrPrimitiveEdgeType)d->fRandom->nextULessThan(kGrProcessorEdgeTypeCnt);
+ } while (kHairlineAA_GrProcessorEdgeType == et);
+ return CircleEffect::Make(et, center, radius);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GLCircleEffect : public GrGLSLFragmentProcessor {
+public:
+ GLCircleEffect() : fPrevRadius(-1.0f) { }
+
+ virtual void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrGLSLCaps&, GrProcessorKeyBuilder*);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fCircleUniform;
+ SkPoint fPrevCenter;
+ SkScalar fPrevRadius;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GLCircleEffect::emitCode(EmitArgs& args) {
+ const CircleEffect& ce = args.fFp.cast<CircleEffect>();
+ const char *circleName;
+ // The circle uniform is (center.x, center.y, radius + 0.5, 1 / (radius + 0.5)) for regular
+ // fills and (..., radius - 0.5, 1 / (radius - 0.5)) for inverse fills.
+ fCircleUniform = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType, kDefault_GrSLPrecision,
+ "circle",
+ &circleName);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const char* fragmentPos = fragBuilder->fragmentPosition();
+
+ SkASSERT(kHairlineAA_GrProcessorEdgeType != ce.getEdgeType());
+ // TODO: Right now the distance to circle caclulation is performed in a space normalized to the
+ // radius and then denormalized. This is to prevent overflow on devices that have a "real"
+ // mediump. It'd be nice to only to this on mediump devices but we currently don't have the
+ // caps here.
+ if (GrProcessorEdgeTypeIsInverseFill(ce.getEdgeType())) {
+ fragBuilder->codeAppendf("float d = (length((%s.xy - %s.xy) * %s.w) - 1.0) * %s.z;",
+ circleName, fragmentPos, circleName, circleName);
+ } else {
+ fragBuilder->codeAppendf("float d = (1.0 - length((%s.xy - %s.xy) * %s.w)) * %s.z;",
+ circleName, fragmentPos, circleName, circleName);
+ }
+ if (GrProcessorEdgeTypeIsAA(ce.getEdgeType())) {
+ fragBuilder->codeAppend("d = clamp(d, 0.0, 1.0);");
+ } else {
+ fragBuilder->codeAppend("d = d > 0.5 ? 1.0 : 0.0;");
+ }
+
+ fragBuilder->codeAppendf("%s = %s;", args.fOutputColor,
+ (GrGLSLExpr4(args.fInputColor) * GrGLSLExpr1("d")).c_str());
+}
+
+void GLCircleEffect::GenKey(const GrProcessor& processor, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const CircleEffect& ce = processor.cast<CircleEffect>();
+ b->add32(ce.getEdgeType());
+}
+
+void GLCircleEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& processor) {
+ const CircleEffect& ce = processor.cast<CircleEffect>();
+ if (ce.getRadius() != fPrevRadius || ce.getCenter() != fPrevCenter) {
+ SkScalar radius = ce.getRadius();
+ if (GrProcessorEdgeTypeIsInverseFill(ce.getEdgeType())) {
+ radius -= 0.5f;
+ } else {
+ radius += 0.5f;
+ }
+ pdman.set4f(fCircleUniform, ce.getCenter().fX, ce.getCenter().fY, radius,
+ SkScalarInvert(radius));
+ fPrevCenter = ce.getCenter();
+ fPrevRadius = ce.getRadius();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void CircleEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GLCircleEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* CircleEffect::onCreateGLSLInstance() const {
+ return new GLCircleEffect;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class EllipseEffect : public GrFragmentProcessor {
+public:
+ static sk_sp<GrFragmentProcessor> Make(GrPrimitiveEdgeType, const SkPoint& center,
+ SkScalar rx, SkScalar ry);
+
+ virtual ~EllipseEffect() {}
+
+ const char* name() const override { return "Ellipse"; }
+
+ const SkPoint& getCenter() const { return fCenter; }
+ SkVector getRadii() const { return fRadii; }
+
+ GrPrimitiveEdgeType getEdgeType() const { return fEdgeType; }
+
+private:
+ EllipseEffect(GrPrimitiveEdgeType, const SkPoint& center, SkScalar rx, SkScalar ry);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ SkPoint fCenter;
+ SkVector fRadii;
+ GrPrimitiveEdgeType fEdgeType;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+sk_sp<GrFragmentProcessor> EllipseEffect::Make(GrPrimitiveEdgeType edgeType,
+ const SkPoint& center,
+ SkScalar rx,
+ SkScalar ry) {
+ SkASSERT(rx >= 0 && ry >= 0);
+ return sk_sp<GrFragmentProcessor>(new EllipseEffect(edgeType, center, rx, ry));
+}
+
+void EllipseEffect::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ inout->mulByUnknownSingleComponent();
+}
+
+EllipseEffect::EllipseEffect(GrPrimitiveEdgeType edgeType, const SkPoint& c, SkScalar rx, SkScalar ry)
+ : fCenter(c)
+ , fRadii(SkVector::Make(rx, ry))
+ , fEdgeType(edgeType) {
+ this->initClassID<EllipseEffect>();
+ this->setWillReadFragmentPosition();
+}
+
+bool EllipseEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const EllipseEffect& ee = other.cast<EllipseEffect>();
+ return fEdgeType == ee.fEdgeType && fCenter == ee.fCenter && fRadii == ee.fRadii;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(EllipseEffect);
+
+sk_sp<GrFragmentProcessor> EllipseEffect::TestCreate(GrProcessorTestData* d) {
+ SkPoint center;
+ center.fX = d->fRandom->nextRangeScalar(0.f, 1000.f);
+ center.fY = d->fRandom->nextRangeScalar(0.f, 1000.f);
+ SkScalar rx = d->fRandom->nextRangeF(0.f, 1000.f);
+ SkScalar ry = d->fRandom->nextRangeF(0.f, 1000.f);
+ GrPrimitiveEdgeType et;
+ do {
+ et = (GrPrimitiveEdgeType)d->fRandom->nextULessThan(kGrProcessorEdgeTypeCnt);
+ } while (kHairlineAA_GrProcessorEdgeType == et);
+ return EllipseEffect::Make(et, center, rx, ry);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GLEllipseEffect : public GrGLSLFragmentProcessor {
+public:
+ GLEllipseEffect() {
+ fPrevRadii.fX = -1.0f;
+ }
+
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrGLSLCaps&, GrProcessorKeyBuilder*);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fEllipseUniform;
+ GrGLSLProgramDataManager::UniformHandle fScaleUniform;
+ SkPoint fPrevCenter;
+ SkVector fPrevRadii;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GLEllipseEffect::emitCode(EmitArgs& args) {
+ const EllipseEffect& ee = args.fFp.cast<EllipseEffect>();
+ const char *ellipseName;
+ // The ellipse uniform is (center.x, center.y, 1 / rx^2, 1 / ry^2)
+ // The last two terms can underflow on mediump, so we use highp.
+ fEllipseUniform = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType, kHigh_GrSLPrecision,
+ "ellipse",
+ &ellipseName);
+ // If we're on a device with a "real" mediump then we'll do the distance computation in a space
+ // that is normalized by the larger radius. The scale uniform will be scale, 1/scale. The
+ // inverse squared radii uniform values are already in this normalized space. The center is
+ // not.
+ const char* scaleName = nullptr;
+ if (args.fGLSLCaps->floatPrecisionVaries()) {
+ fScaleUniform = args.fUniformHandler->addUniform(
+ kFragment_GrShaderFlag, kVec2f_GrSLType, kDefault_GrSLPrecision,
+ "scale", &scaleName);
+ }
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const char* fragmentPos = fragBuilder->fragmentPosition();
+
+ // d is the offset to the ellipse center
+ fragBuilder->codeAppendf("vec2 d = %s.xy - %s.xy;", fragmentPos, ellipseName);
+ if (scaleName) {
+ fragBuilder->codeAppendf("d *= %s.y;", scaleName);
+ }
+ fragBuilder->codeAppendf("vec2 Z = d * %s.zw;", ellipseName);
+ // implicit is the evaluation of (x/rx)^2 + (y/ry)^2 - 1.
+ fragBuilder->codeAppend("float implicit = dot(Z, d) - 1.0;");
+ // grad_dot is the squared length of the gradient of the implicit.
+ fragBuilder->codeAppendf("float grad_dot = 4.0 * dot(Z, Z);");
+ // Avoid calling inversesqrt on zero.
+ fragBuilder->codeAppend("grad_dot = max(grad_dot, 1.0e-4);");
+ fragBuilder->codeAppendf("float approx_dist = implicit * inversesqrt(grad_dot);");
+ if (scaleName) {
+ fragBuilder->codeAppendf("approx_dist *= %s.x;", scaleName);
+ }
+
+ switch (ee.getEdgeType()) {
+ case kFillAA_GrProcessorEdgeType:
+ fragBuilder->codeAppend("float alpha = clamp(0.5 - approx_dist, 0.0, 1.0);");
+ break;
+ case kInverseFillAA_GrProcessorEdgeType:
+ fragBuilder->codeAppend("float alpha = clamp(0.5 + approx_dist, 0.0, 1.0);");
+ break;
+ case kFillBW_GrProcessorEdgeType:
+ fragBuilder->codeAppend("float alpha = approx_dist > 0.0 ? 0.0 : 1.0;");
+ break;
+ case kInverseFillBW_GrProcessorEdgeType:
+ fragBuilder->codeAppend("float alpha = approx_dist > 0.0 ? 1.0 : 0.0;");
+ break;
+ case kHairlineAA_GrProcessorEdgeType:
+ SkFAIL("Hairline not expected here.");
+ }
+
+ fragBuilder->codeAppendf("%s = %s;", args.fOutputColor,
+ (GrGLSLExpr4(args.fInputColor) * GrGLSLExpr1("alpha")).c_str());
+}
+
+void GLEllipseEffect::GenKey(const GrProcessor& effect, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const EllipseEffect& ee = effect.cast<EllipseEffect>();
+ b->add32(ee.getEdgeType());
+}
+
+void GLEllipseEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& effect) {
+ const EllipseEffect& ee = effect.cast<EllipseEffect>();
+ if (ee.getRadii() != fPrevRadii || ee.getCenter() != fPrevCenter) {
+ float invRXSqd;
+ float invRYSqd;
+ // If we're using a scale factor to work around precision issues, choose the larger radius
+ // as the scale factor. The inv radii need to be pre-adjusted by the scale factor.
+ if (fScaleUniform.isValid()) {
+ if (ee.getRadii().fX > ee.getRadii().fY) {
+ invRXSqd = 1.f;
+ invRYSqd = (ee.getRadii().fX * ee.getRadii().fX) /
+ (ee.getRadii().fY * ee.getRadii().fY);
+ pdman.set2f(fScaleUniform, ee.getRadii().fX, 1.f / ee.getRadii().fX);
+ } else {
+ invRXSqd = (ee.getRadii().fY * ee.getRadii().fY) /
+ (ee.getRadii().fX * ee.getRadii().fX);
+ invRYSqd = 1.f;
+ pdman.set2f(fScaleUniform, ee.getRadii().fY, 1.f / ee.getRadii().fY);
+ }
+ } else {
+ invRXSqd = 1.f / (ee.getRadii().fX * ee.getRadii().fX);
+ invRYSqd = 1.f / (ee.getRadii().fY * ee.getRadii().fY);
+ }
+ pdman.set4f(fEllipseUniform, ee.getCenter().fX, ee.getCenter().fY, invRXSqd, invRYSqd);
+ fPrevCenter = ee.getCenter();
+ fPrevRadii = ee.getRadii();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void EllipseEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GLEllipseEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* EllipseEffect::onCreateGLSLInstance() const {
+ return new GLEllipseEffect;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> GrOvalEffect::Make(GrPrimitiveEdgeType edgeType, const SkRect& oval) {
+ if (kHairlineAA_GrProcessorEdgeType == edgeType) {
+ return nullptr;
+ }
+ SkScalar w = oval.width();
+ SkScalar h = oval.height();
+ if (SkScalarNearlyEqual(w, h)) {
+ w /= 2;
+ return CircleEffect::Make(edgeType, SkPoint::Make(oval.fLeft + w, oval.fTop + w), w);
+ } else {
+ w /= 2;
+ h /= 2;
+ return EllipseEffect::Make(edgeType, SkPoint::Make(oval.fLeft + w, oval.fTop + h), w, h);
+ }
+
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrOvalEffect.h b/gfx/skia/skia/src/gpu/effects/GrOvalEffect.h
new file mode 100644
index 000000000..3ff241a65
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrOvalEffect.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrOvalEffect_DEFINED
+#define GrOvalEffect_DEFINED
+
+#include "GrTypes.h"
+#include "GrTypesPriv.h"
+#include "SkRefCnt.h"
+
+class GrFragmentProcessor;
+struct SkRect;
+
+namespace GrOvalEffect {
+ /**
+ * Creates an effect that performs clipping against an oval.
+ */
+ sk_sp<GrFragmentProcessor> Make(GrPrimitiveEdgeType, const SkRect&);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrPorterDuffXferProcessor.cpp b/gfx/skia/skia/src/gpu/effects/GrPorterDuffXferProcessor.cpp
new file mode 100644
index 000000000..f51e94ba8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrPorterDuffXferProcessor.cpp
@@ -0,0 +1,923 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "effects/GrPorterDuffXferProcessor.h"
+
+#include "GrBlend.h"
+#include "GrCaps.h"
+#include "GrPipeline.h"
+#include "GrProcessor.h"
+#include "GrProcOptInfo.h"
+#include "GrTypes.h"
+#include "GrXferProcessor.h"
+#include "glsl/GrGLSLBlend.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "glsl/GrGLSLXferProcessor.h"
+#include <utility>
+
+/**
+ * Wraps the shader outputs and HW blend state that comprise a Porter Duff blend mode with coverage.
+ */
+struct BlendFormula {
+public:
+ /**
+ * Values the shader can write to primary and secondary outputs. These must all be modulated by
+ * coverage to support mixed samples. The XP will ignore the multiplies when not using coverage.
+ */
+ enum OutputType {
+ kNone_OutputType, //<! 0
+ kCoverage_OutputType, //<! inputCoverage
+ kModulate_OutputType, //<! inputColor * inputCoverage
+ kSAModulate_OutputType, //<! inputColor.a * inputCoverage
+ kISAModulate_OutputType, //<! (1 - inputColor.a) * inputCoverage
+ kISCModulate_OutputType, //<! (1 - inputColor) * inputCoverage
+
+ kLast_OutputType = kISCModulate_OutputType
+ };
+
+ enum Properties {
+ kModifiesDst_Property = 1,
+ kUsesDstColor_Property = 1 << 1,
+ kUsesInputColor_Property = 1 << 2,
+ kCanTweakAlphaForCoverage_Property = 1 << 3,
+
+ kLast_Property = kCanTweakAlphaForCoverage_Property
+ };
+
+ BlendFormula& operator =(const BlendFormula& other) {
+ fData = other.fData;
+ return *this;
+ }
+
+ bool operator ==(const BlendFormula& other) const {
+ return fData == other.fData;
+ }
+
+ bool hasSecondaryOutput() const { return kNone_OutputType != fSecondaryOutputType; }
+ bool modifiesDst() const { return SkToBool(fProps & kModifiesDst_Property); }
+ bool usesDstColor() const { return SkToBool(fProps & kUsesDstColor_Property); }
+ bool usesInputColor() const { return SkToBool(fProps & kUsesInputColor_Property); }
+ bool canTweakAlphaForCoverage() const {
+ return SkToBool(fProps & kCanTweakAlphaForCoverage_Property);
+ }
+
+ /**
+ * Deduce the properties of a compile-time constant BlendFormula.
+ */
+ template<OutputType PrimaryOut, OutputType SecondaryOut,
+ GrBlendEquation BlendEquation, GrBlendCoeff SrcCoeff, GrBlendCoeff DstCoeff>
+ struct get_properties : std::integral_constant<Properties, static_cast<Properties>(
+
+ (GR_BLEND_MODIFIES_DST(BlendEquation, SrcCoeff, DstCoeff) ?
+ kModifiesDst_Property : 0) |
+
+ (GR_BLEND_COEFFS_USE_DST_COLOR(SrcCoeff, DstCoeff) ?
+ kUsesDstColor_Property : 0) |
+
+ ((PrimaryOut >= kModulate_OutputType && GR_BLEND_COEFFS_USE_SRC_COLOR(SrcCoeff,DstCoeff)) ||
+ (SecondaryOut >= kModulate_OutputType && GR_BLEND_COEFF_REFS_SRC2(DstCoeff)) ?
+ kUsesInputColor_Property : 0) | // We assert later that SrcCoeff doesn't ref src2.
+
+ (kModulate_OutputType == PrimaryOut &&
+ kNone_OutputType == SecondaryOut &&
+ GR_BLEND_CAN_TWEAK_ALPHA_FOR_COVERAGE(BlendEquation, SrcCoeff, DstCoeff) ?
+ kCanTweakAlphaForCoverage_Property : 0))> {
+
+ // The provided formula should already be optimized.
+ GR_STATIC_ASSERT((kNone_OutputType == PrimaryOut) ==
+ !GR_BLEND_COEFFS_USE_SRC_COLOR(SrcCoeff, DstCoeff));
+ GR_STATIC_ASSERT(!GR_BLEND_COEFF_REFS_SRC2(SrcCoeff));
+ GR_STATIC_ASSERT((kNone_OutputType == SecondaryOut) ==
+ !GR_BLEND_COEFF_REFS_SRC2(DstCoeff));
+ GR_STATIC_ASSERT(PrimaryOut != SecondaryOut || kNone_OutputType == PrimaryOut);
+ GR_STATIC_ASSERT(kNone_OutputType != PrimaryOut || kNone_OutputType == SecondaryOut);
+ };
+
+ union {
+ struct {
+ // We allot the enums one more bit than they require because MSVC seems to sign-extend
+ // them when the top bit is set. (This is in violation of the C++03 standard 9.6/4)
+ OutputType fPrimaryOutputType : 4;
+ OutputType fSecondaryOutputType : 4;
+ GrBlendEquation fBlendEquation : 6;
+ GrBlendCoeff fSrcCoeff : 6;
+ GrBlendCoeff fDstCoeff : 6;
+ Properties fProps : 32 - (4 + 4 + 6 + 6 + 6);
+ };
+ uint32_t fData;
+ };
+
+ GR_STATIC_ASSERT(kLast_OutputType < (1 << 3));
+ GR_STATIC_ASSERT(kLast_GrBlendEquation < (1 << 5));
+ GR_STATIC_ASSERT(kLast_GrBlendCoeff < (1 << 5));
+ GR_STATIC_ASSERT(kLast_Property < (1 << 6));
+};
+
+GR_STATIC_ASSERT(4 == sizeof(BlendFormula));
+
+GR_MAKE_BITFIELD_OPS(BlendFormula::Properties);
+
+/**
+ * Initialize a compile-time constant BlendFormula and automatically deduce fProps.
+ */
+#define INIT_BLEND_FORMULA(PRIMARY_OUT, SECONDARY_OUT, BLEND_EQUATION, SRC_COEFF, DST_COEFF) \
+ {{{PRIMARY_OUT, \
+ SECONDARY_OUT, \
+ BLEND_EQUATION, SRC_COEFF, DST_COEFF, \
+ BlendFormula::get_properties<PRIMARY_OUT, SECONDARY_OUT, \
+ BLEND_EQUATION, SRC_COEFF, DST_COEFF>::value}}}
+
+/**
+ * When there is no coverage, or the blend mode can tweak alpha for coverage, we use the standard
+ * Porter Duff formula.
+ */
+#define COEFF_FORMULA(SRC_COEFF, DST_COEFF) \
+ INIT_BLEND_FORMULA(BlendFormula::kModulate_OutputType, \
+ BlendFormula::kNone_OutputType, \
+ kAdd_GrBlendEquation, SRC_COEFF, DST_COEFF)
+
+/**
+ * Basic coeff formula similar to COEFF_FORMULA but we will make the src f*Sa. This is used in
+ * LCD dst-out.
+ */
+#define COEFF_FORMULA_SA_MODULATE(SRC_COEFF, DST_COEFF) \
+ INIT_BLEND_FORMULA(BlendFormula::kSAModulate_OutputType, \
+ BlendFormula::kNone_OutputType, \
+ kAdd_GrBlendEquation, SRC_COEFF, DST_COEFF)
+
+/**
+ * When the coeffs are (Zero, Zero), we clear the dst. This formula has its own macro so we can set
+ * the primary output type to none.
+ */
+#define DST_CLEAR_FORMULA \
+ INIT_BLEND_FORMULA(BlendFormula::kNone_OutputType, \
+ BlendFormula::kNone_OutputType, \
+ kAdd_GrBlendEquation, kZero_GrBlendCoeff, kZero_GrBlendCoeff)
+
+/**
+ * When the coeffs are (Zero, One), we don't write to the dst at all. This formula has its own macro
+ * so we can set the primary output type to none.
+ */
+#define NO_DST_WRITE_FORMULA \
+ INIT_BLEND_FORMULA(BlendFormula::kNone_OutputType, \
+ BlendFormula::kNone_OutputType, \
+ kAdd_GrBlendEquation, kZero_GrBlendCoeff, kOne_GrBlendCoeff)
+
+/**
+ * When there is coverage, the equation with f=coverage is:
+ *
+ * D' = f * (S * srcCoeff + D * dstCoeff) + (1-f) * D
+ *
+ * This can be rewritten as:
+ *
+ * D' = f * S * srcCoeff + D * (1 - [f * (1 - dstCoeff)])
+ *
+ * To implement this formula, we output [f * (1 - dstCoeff)] for the secondary color and replace the
+ * HW dst coeff with IS2C.
+ *
+ * Xfer modes: dst-atop (Sa!=1)
+ */
+#define COVERAGE_FORMULA(ONE_MINUS_DST_COEFF_MODULATE_OUTPUT, SRC_COEFF) \
+ INIT_BLEND_FORMULA(BlendFormula::kModulate_OutputType, \
+ ONE_MINUS_DST_COEFF_MODULATE_OUTPUT, \
+ kAdd_GrBlendEquation, SRC_COEFF, kIS2C_GrBlendCoeff)
+
+/**
+ * When there is coverage and the src coeff is Zero, the equation with f=coverage becomes:
+ *
+ * D' = f * D * dstCoeff + (1-f) * D
+ *
+ * This can be rewritten as:
+ *
+ * D' = D - D * [f * (1 - dstCoeff)]
+ *
+ * To implement this formula, we output [f * (1 - dstCoeff)] for the primary color and use a reverse
+ * subtract HW blend equation with coeffs of (DC, One).
+ *
+ * Xfer modes: clear, dst-out (Sa=1), dst-in (Sa!=1), modulate (Sc!=1)
+ */
+#define COVERAGE_SRC_COEFF_ZERO_FORMULA(ONE_MINUS_DST_COEFF_MODULATE_OUTPUT) \
+ INIT_BLEND_FORMULA(ONE_MINUS_DST_COEFF_MODULATE_OUTPUT, \
+ BlendFormula::kNone_OutputType, \
+ kReverseSubtract_GrBlendEquation, kDC_GrBlendCoeff, kOne_GrBlendCoeff)
+
+/**
+ * When there is coverage and the dst coeff is Zero, the equation with f=coverage becomes:
+ *
+ * D' = f * S * srcCoeff + (1-f) * D
+ *
+ * To implement this formula, we output [f] for the secondary color and replace the HW dst coeff
+ * with IS2A. (Note that we can avoid dual source blending when Sa=1 by using ISA.)
+ *
+ * Xfer modes (Sa!=1): src, src-in, src-out
+ */
+#define COVERAGE_DST_COEFF_ZERO_FORMULA(SRC_COEFF) \
+ INIT_BLEND_FORMULA(BlendFormula::kModulate_OutputType, \
+ BlendFormula::kCoverage_OutputType, \
+ kAdd_GrBlendEquation, SRC_COEFF, kIS2A_GrBlendCoeff)
+
+/**
+ * This table outlines the blend formulas we will use with each xfermode, with and without coverage,
+ * with and without an opaque input color. Optimization properties are deduced at compile time so we
+ * can make runtime decisions quickly. RGB coverage is not supported.
+ */
+static const BlendFormula gBlendTable[2][2][SkXfermode::kLastCoeffMode + 1] = {
+
+ /*>> No coverage, input color unknown <<*/ {{
+
+ /* clear */ DST_CLEAR_FORMULA,
+ /* src */ COEFF_FORMULA( kOne_GrBlendCoeff, kZero_GrBlendCoeff),
+ /* dst */ NO_DST_WRITE_FORMULA,
+ /* src-over */ COEFF_FORMULA( kOne_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* dst-over */ COEFF_FORMULA( kIDA_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* src-in */ COEFF_FORMULA( kDA_GrBlendCoeff, kZero_GrBlendCoeff),
+ /* dst-in */ COEFF_FORMULA( kZero_GrBlendCoeff, kSA_GrBlendCoeff),
+ /* src-out */ COEFF_FORMULA( kIDA_GrBlendCoeff, kZero_GrBlendCoeff),
+ /* dst-out */ COEFF_FORMULA( kZero_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* src-atop */ COEFF_FORMULA( kDA_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* dst-atop */ COEFF_FORMULA( kIDA_GrBlendCoeff, kSA_GrBlendCoeff),
+ /* xor */ COEFF_FORMULA( kIDA_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* plus */ COEFF_FORMULA( kOne_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* modulate */ COEFF_FORMULA( kZero_GrBlendCoeff, kSC_GrBlendCoeff),
+ /* screen */ COEFF_FORMULA( kOne_GrBlendCoeff, kISC_GrBlendCoeff),
+
+ }, /*>> Has coverage, input color unknown <<*/ {
+
+ /* clear */ COVERAGE_SRC_COEFF_ZERO_FORMULA(BlendFormula::kCoverage_OutputType),
+ /* src */ COVERAGE_DST_COEFF_ZERO_FORMULA(kOne_GrBlendCoeff),
+ /* dst */ NO_DST_WRITE_FORMULA,
+ /* src-over */ COEFF_FORMULA( kOne_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* dst-over */ COEFF_FORMULA( kIDA_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* src-in */ COVERAGE_DST_COEFF_ZERO_FORMULA(kDA_GrBlendCoeff),
+ /* dst-in */ COVERAGE_SRC_COEFF_ZERO_FORMULA(BlendFormula::kISAModulate_OutputType),
+ /* src-out */ COVERAGE_DST_COEFF_ZERO_FORMULA(kIDA_GrBlendCoeff),
+ /* dst-out */ COEFF_FORMULA( kZero_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* src-atop */ COEFF_FORMULA( kDA_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* dst-atop */ COVERAGE_FORMULA(BlendFormula::kISAModulate_OutputType, kIDA_GrBlendCoeff),
+ /* xor */ COEFF_FORMULA( kIDA_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* plus */ COEFF_FORMULA( kOne_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* modulate */ COVERAGE_SRC_COEFF_ZERO_FORMULA(BlendFormula::kISCModulate_OutputType),
+ /* screen */ COEFF_FORMULA( kOne_GrBlendCoeff, kISC_GrBlendCoeff),
+
+ }}, /*>> No coverage, input color opaque <<*/ {{
+
+ /* clear */ DST_CLEAR_FORMULA,
+ /* src */ COEFF_FORMULA( kOne_GrBlendCoeff, kZero_GrBlendCoeff),
+ /* dst */ NO_DST_WRITE_FORMULA,
+ /* src-over */ COEFF_FORMULA( kOne_GrBlendCoeff, kZero_GrBlendCoeff),
+ /* dst-over */ COEFF_FORMULA( kIDA_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* src-in */ COEFF_FORMULA( kDA_GrBlendCoeff, kZero_GrBlendCoeff),
+ /* dst-in */ NO_DST_WRITE_FORMULA,
+ /* src-out */ COEFF_FORMULA( kIDA_GrBlendCoeff, kZero_GrBlendCoeff),
+ /* dst-out */ DST_CLEAR_FORMULA,
+ /* src-atop */ COEFF_FORMULA( kDA_GrBlendCoeff, kZero_GrBlendCoeff),
+ /* dst-atop */ COEFF_FORMULA( kIDA_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* xor */ COEFF_FORMULA( kIDA_GrBlendCoeff, kZero_GrBlendCoeff),
+ /* plus */ COEFF_FORMULA( kOne_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* modulate */ COEFF_FORMULA( kZero_GrBlendCoeff, kSC_GrBlendCoeff),
+ /* screen */ COEFF_FORMULA( kOne_GrBlendCoeff, kISC_GrBlendCoeff),
+
+ }, /*>> Has coverage, input color opaque <<*/ {
+
+ /* clear */ COVERAGE_SRC_COEFF_ZERO_FORMULA(BlendFormula::kCoverage_OutputType),
+ /* src */ COEFF_FORMULA( kOne_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* dst */ NO_DST_WRITE_FORMULA,
+ /* src-over */ COEFF_FORMULA( kOne_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* dst-over */ COEFF_FORMULA( kIDA_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* src-in */ COEFF_FORMULA( kDA_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* dst-in */ NO_DST_WRITE_FORMULA,
+ /* src-out */ COEFF_FORMULA( kIDA_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* dst-out */ COVERAGE_SRC_COEFF_ZERO_FORMULA(BlendFormula::kCoverage_OutputType),
+ /* src-atop */ COEFF_FORMULA( kDA_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* dst-atop */ COEFF_FORMULA( kIDA_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* xor */ COEFF_FORMULA( kIDA_GrBlendCoeff, kISA_GrBlendCoeff),
+ /* plus */ COEFF_FORMULA( kOne_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* modulate */ COVERAGE_SRC_COEFF_ZERO_FORMULA(BlendFormula::kISCModulate_OutputType),
+ /* screen */ COEFF_FORMULA( kOne_GrBlendCoeff, kISC_GrBlendCoeff),
+}}};
+
+static const BlendFormula gLCDBlendTable[SkXfermode::kLastCoeffMode + 1] = {
+ /* clear */ COVERAGE_SRC_COEFF_ZERO_FORMULA(BlendFormula::kCoverage_OutputType),
+ /* src */ COVERAGE_FORMULA(BlendFormula::kCoverage_OutputType, kOne_GrBlendCoeff),
+ /* dst */ NO_DST_WRITE_FORMULA,
+ /* src-over */ COVERAGE_FORMULA(BlendFormula::kSAModulate_OutputType, kOne_GrBlendCoeff),
+ /* dst-over */ COEFF_FORMULA( kIDA_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* src-in */ COVERAGE_FORMULA(BlendFormula::kCoverage_OutputType, kDA_GrBlendCoeff),
+ /* dst-in */ COVERAGE_SRC_COEFF_ZERO_FORMULA(BlendFormula::kISAModulate_OutputType),
+ /* src-out */ COVERAGE_FORMULA(BlendFormula::kCoverage_OutputType, kIDA_GrBlendCoeff),
+ /* dst-out */ COEFF_FORMULA_SA_MODULATE( kZero_GrBlendCoeff, kISC_GrBlendCoeff),
+ /* src-atop */ COVERAGE_FORMULA(BlendFormula::kSAModulate_OutputType, kDA_GrBlendCoeff),
+ /* dst-atop */ COVERAGE_FORMULA(BlendFormula::kISAModulate_OutputType, kIDA_GrBlendCoeff),
+ /* xor */ COVERAGE_FORMULA(BlendFormula::kSAModulate_OutputType, kIDA_GrBlendCoeff),
+ /* plus */ COEFF_FORMULA( kOne_GrBlendCoeff, kOne_GrBlendCoeff),
+ /* modulate */ COVERAGE_SRC_COEFF_ZERO_FORMULA(BlendFormula::kISCModulate_OutputType),
+ /* screen */ COEFF_FORMULA( kOne_GrBlendCoeff, kISC_GrBlendCoeff),
+};
+
+static BlendFormula get_blend_formula(const GrProcOptInfo& colorPOI,
+ const GrProcOptInfo& coveragePOI,
+ bool hasMixedSamples,
+ SkXfermode::Mode xfermode) {
+ SkASSERT(xfermode >= 0 && xfermode <= SkXfermode::kLastCoeffMode);
+ SkASSERT(!coveragePOI.isFourChannelOutput());
+
+ bool conflatesCoverage = !coveragePOI.isSolidWhite() || hasMixedSamples;
+ return gBlendTable[colorPOI.isOpaque()][conflatesCoverage][xfermode];
+}
+
+static BlendFormula get_lcd_blend_formula(const GrProcOptInfo& coveragePOI,
+ SkXfermode::Mode xfermode) {
+ SkASSERT(xfermode >= 0 && xfermode <= SkXfermode::kLastCoeffMode);
+ SkASSERT(coveragePOI.isFourChannelOutput());
+
+ return gLCDBlendTable[xfermode];
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class PorterDuffXferProcessor : public GrXferProcessor {
+public:
+ PorterDuffXferProcessor(BlendFormula blendFormula) : fBlendFormula(blendFormula) {
+ this->initClassID<PorterDuffXferProcessor>();
+ }
+
+ const char* name() const override { return "Porter Duff"; }
+
+ GrGLSLXferProcessor* createGLSLInstance() const override;
+
+ BlendFormula getBlendFormula() const { return fBlendFormula; }
+
+private:
+ GrXferProcessor::OptFlags onGetOptimizations(const GrPipelineOptimizations& optimizations,
+ bool doesStencilWrite,
+ GrColor* overrideColor,
+ const GrCaps& caps) const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ bool onHasSecondaryOutput() const override { return fBlendFormula.hasSecondaryOutput(); }
+
+ void onGetBlendInfo(GrXferProcessor::BlendInfo* blendInfo) const override {
+ blendInfo->fEquation = fBlendFormula.fBlendEquation;
+ blendInfo->fSrcBlend = fBlendFormula.fSrcCoeff;
+ blendInfo->fDstBlend = fBlendFormula.fDstCoeff;
+ blendInfo->fWriteColor = fBlendFormula.modifiesDst();
+ }
+
+ bool onIsEqual(const GrXferProcessor& xpBase) const override {
+ const PorterDuffXferProcessor& xp = xpBase.cast<PorterDuffXferProcessor>();
+ return fBlendFormula == xp.fBlendFormula;
+ }
+
+ const BlendFormula fBlendFormula;
+
+ typedef GrXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void append_color_output(const PorterDuffXferProcessor& xp,
+ GrGLSLXPFragmentBuilder* fragBuilder,
+ BlendFormula::OutputType outputType, const char* output,
+ const char* inColor, const char* inCoverage) {
+ switch (outputType) {
+ case BlendFormula::kNone_OutputType:
+ fragBuilder->codeAppendf("%s = vec4(0.0);", output);
+ break;
+ case BlendFormula::kCoverage_OutputType:
+ // We can have a coverage formula while not reading coverage if there are mixed samples.
+ if (inCoverage) {
+ fragBuilder->codeAppendf("%s = %s;", output, inCoverage);
+ } else {
+ fragBuilder->codeAppendf("%s = vec4(1.0);", output);
+ }
+ break;
+ case BlendFormula::kModulate_OutputType:
+ if (inCoverage) {
+ fragBuilder->codeAppendf("%s = %s * %s;", output, inColor, inCoverage);
+ } else {
+ fragBuilder->codeAppendf("%s = %s;", output, inColor);
+ }
+ break;
+ case BlendFormula::kSAModulate_OutputType:
+ if (inCoverage) {
+ fragBuilder->codeAppendf("%s = %s.a * %s;", output, inColor, inCoverage);
+ } else {
+ fragBuilder->codeAppendf("%s = %s;", output, inColor);
+ }
+ break;
+ case BlendFormula::kISAModulate_OutputType:
+ if (inCoverage) {
+ fragBuilder->codeAppendf("%s = (1.0 - %s.a) * %s;", output, inColor, inCoverage);
+ } else {
+ fragBuilder->codeAppendf("%s = vec4(1.0 - %s.a);", output, inColor);
+ }
+ break;
+ case BlendFormula::kISCModulate_OutputType:
+ if (inCoverage) {
+ fragBuilder->codeAppendf("%s = (vec4(1.0) - %s) * %s;", output, inColor, inCoverage);
+ } else {
+ fragBuilder->codeAppendf("%s = vec4(1.0) - %s;", output, inColor);
+ }
+ break;
+ default:
+ SkFAIL("Unsupported output type.");
+ break;
+ }
+}
+
+class GLPorterDuffXferProcessor : public GrGLSLXferProcessor {
+public:
+ static void GenKey(const GrProcessor& processor, GrProcessorKeyBuilder* b) {
+ const PorterDuffXferProcessor& xp = processor.cast<PorterDuffXferProcessor>();
+ b->add32(xp.getBlendFormula().fPrimaryOutputType |
+ (xp.getBlendFormula().fSecondaryOutputType << 3));
+ GR_STATIC_ASSERT(BlendFormula::kLast_OutputType < 8);
+ }
+
+private:
+ void emitOutputsForBlendState(const EmitArgs& args) override {
+ const PorterDuffXferProcessor& xp = args.fXP.cast<PorterDuffXferProcessor>();
+ GrGLSLXPFragmentBuilder* fragBuilder = args.fXPFragBuilder;
+
+ BlendFormula blendFormula = xp.getBlendFormula();
+ if (blendFormula.hasSecondaryOutput()) {
+ append_color_output(xp, fragBuilder, blendFormula.fSecondaryOutputType,
+ args.fOutputSecondary, args.fInputColor, args.fInputCoverage);
+ }
+ append_color_output(xp, fragBuilder, blendFormula.fPrimaryOutputType,
+ args.fOutputPrimary, args.fInputColor, args.fInputCoverage);
+ }
+
+ void onSetData(const GrGLSLProgramDataManager&, const GrXferProcessor&) override {}
+
+ typedef GrGLSLXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+void PorterDuffXferProcessor::onGetGLSLProcessorKey(const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) const {
+ GLPorterDuffXferProcessor::GenKey(*this, b);
+}
+
+GrGLSLXferProcessor* PorterDuffXferProcessor::createGLSLInstance() const {
+ return new GLPorterDuffXferProcessor;
+}
+
+GrXferProcessor::OptFlags
+PorterDuffXferProcessor::onGetOptimizations(const GrPipelineOptimizations& optimizations,
+ bool doesStencilWrite,
+ GrColor* overrideColor,
+ const GrCaps& caps) const {
+ GrXferProcessor::OptFlags optFlags = GrXferProcessor::kNone_OptFlags;
+ if (!fBlendFormula.modifiesDst()) {
+ if (!doesStencilWrite) {
+ optFlags |= GrXferProcessor::kSkipDraw_OptFlag;
+ }
+ optFlags |= (GrXferProcessor::kIgnoreColor_OptFlag |
+ GrXferProcessor::kIgnoreCoverage_OptFlag |
+ GrXferProcessor::kCanTweakAlphaForCoverage_OptFlag);
+ } else {
+ if (!fBlendFormula.usesInputColor()) {
+ optFlags |= GrXferProcessor::kIgnoreColor_OptFlag;
+ }
+ if (optimizations.fCoveragePOI.isSolidWhite()) {
+ optFlags |= GrXferProcessor::kIgnoreCoverage_OptFlag;
+ }
+ if (optimizations.fColorPOI.allStagesMultiplyInput() &&
+ fBlendFormula.canTweakAlphaForCoverage() &&
+ !optimizations.fCoveragePOI.isFourChannelOutput()) {
+ optFlags |= GrXferProcessor::kCanTweakAlphaForCoverage_OptFlag;
+ }
+ }
+ return optFlags;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class ShaderPDXferProcessor : public GrXferProcessor {
+public:
+ ShaderPDXferProcessor(const DstTexture* dstTexture,
+ bool hasMixedSamples,
+ SkXfermode::Mode xfermode)
+ : INHERITED(dstTexture, true, hasMixedSamples)
+ , fXfermode(xfermode) {
+ this->initClassID<ShaderPDXferProcessor>();
+ }
+
+ const char* name() const override { return "Porter Duff Shader"; }
+
+ GrGLSLXferProcessor* createGLSLInstance() const override;
+
+ SkXfermode::Mode getXfermode() const { return fXfermode; }
+
+private:
+ GrXferProcessor::OptFlags onGetOptimizations(const GrPipelineOptimizations&, bool, GrColor*,
+ const GrCaps&) const override {
+ return kNone_OptFlags;
+ }
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ bool onIsEqual(const GrXferProcessor& xpBase) const override {
+ const ShaderPDXferProcessor& xp = xpBase.cast<ShaderPDXferProcessor>();
+ return fXfermode == xp.fXfermode;
+ }
+
+ const SkXfermode::Mode fXfermode;
+
+ typedef GrXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GLShaderPDXferProcessor : public GrGLSLXferProcessor {
+public:
+ static void GenKey(const GrProcessor& processor, GrProcessorKeyBuilder* b) {
+ const ShaderPDXferProcessor& xp = processor.cast<ShaderPDXferProcessor>();
+ b->add32(xp.getXfermode());
+ }
+
+private:
+ void emitBlendCodeForDstRead(GrGLSLXPFragmentBuilder* fragBuilder,
+ GrGLSLUniformHandler* uniformHandler,
+ const char* srcColor,
+ const char* srcCoverage,
+ const char* dstColor,
+ const char* outColor,
+ const char* outColorSecondary,
+ const GrXferProcessor& proc) override {
+ const ShaderPDXferProcessor& xp = proc.cast<ShaderPDXferProcessor>();
+
+ GrGLSLBlend::AppendMode(fragBuilder, srcColor, dstColor, outColor, xp.getXfermode());
+
+ // Apply coverage.
+ INHERITED::DefaultCoverageModulation(fragBuilder, srcCoverage, dstColor, outColor,
+ outColorSecondary, xp);
+ }
+
+ void onSetData(const GrGLSLProgramDataManager&, const GrXferProcessor&) override {}
+
+ typedef GrGLSLXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+void ShaderPDXferProcessor::onGetGLSLProcessorKey(const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) const {
+ GLShaderPDXferProcessor::GenKey(*this, b);
+}
+
+GrGLSLXferProcessor* ShaderPDXferProcessor::createGLSLInstance() const {
+ return new GLShaderPDXferProcessor;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class PDLCDXferProcessor : public GrXferProcessor {
+public:
+ static GrXferProcessor* Create(SkXfermode::Mode xfermode, const GrProcOptInfo& colorPOI);
+
+ ~PDLCDXferProcessor() override;
+
+ const char* name() const override { return "Porter Duff LCD"; }
+
+ GrGLSLXferProcessor* createGLSLInstance() const override;
+
+private:
+ PDLCDXferProcessor(GrColor blendConstant, uint8_t alpha);
+
+ GrXferProcessor::OptFlags onGetOptimizations(const GrPipelineOptimizations& optimizations,
+ bool doesStencilWrite,
+ GrColor* overrideColor,
+ const GrCaps& caps) const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
+
+ void onGetBlendInfo(GrXferProcessor::BlendInfo* blendInfo) const override {
+ blendInfo->fSrcBlend = kConstC_GrBlendCoeff;
+ blendInfo->fDstBlend = kISC_GrBlendCoeff;
+ blendInfo->fBlendConstant = fBlendConstant;
+ }
+
+ bool onIsEqual(const GrXferProcessor& xpBase) const override {
+ const PDLCDXferProcessor& xp = xpBase.cast<PDLCDXferProcessor>();
+ if (fBlendConstant != xp.fBlendConstant ||
+ fAlpha != xp.fAlpha) {
+ return false;
+ }
+ return true;
+ }
+
+ GrColor fBlendConstant;
+ uint8_t fAlpha;
+
+ typedef GrXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GLPDLCDXferProcessor : public GrGLSLXferProcessor {
+public:
+ GLPDLCDXferProcessor(const GrProcessor&) {}
+
+ virtual ~GLPDLCDXferProcessor() {}
+
+ static void GenKey(const GrProcessor& processor, const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) {}
+
+private:
+ void emitOutputsForBlendState(const EmitArgs& args) override {
+ GrGLSLXPFragmentBuilder* fragBuilder = args.fXPFragBuilder;
+ SkASSERT(args.fInputCoverage);
+ fragBuilder->codeAppendf("%s = %s * %s;", args.fOutputPrimary, args.fInputColor,
+ args.fInputCoverage);
+ }
+
+ void onSetData(const GrGLSLProgramDataManager&, const GrXferProcessor&) override {}
+
+ typedef GrGLSLXferProcessor INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+PDLCDXferProcessor::PDLCDXferProcessor(GrColor blendConstant, uint8_t alpha)
+ : fBlendConstant(blendConstant)
+ , fAlpha(alpha) {
+ this->initClassID<PDLCDXferProcessor>();
+}
+
+GrXferProcessor* PDLCDXferProcessor::Create(SkXfermode::Mode xfermode,
+ const GrProcOptInfo& colorPOI) {
+ if (SkXfermode::kSrcOver_Mode != xfermode) {
+ return nullptr;
+ }
+
+ if (kRGBA_GrColorComponentFlags != colorPOI.validFlags()) {
+ return nullptr;
+ }
+
+ GrColor blendConstant = GrUnpremulColor(colorPOI.color());
+ uint8_t alpha = GrColorUnpackA(blendConstant);
+ blendConstant |= (0xff << GrColor_SHIFT_A);
+
+ return new PDLCDXferProcessor(blendConstant, alpha);
+}
+
+PDLCDXferProcessor::~PDLCDXferProcessor() {
+}
+
+void PDLCDXferProcessor::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GLPDLCDXferProcessor::GenKey(*this, caps, b);
+}
+
+GrGLSLXferProcessor* PDLCDXferProcessor::createGLSLInstance() const {
+ return new GLPDLCDXferProcessor(*this);
+}
+
+GrXferProcessor::OptFlags
+PDLCDXferProcessor::onGetOptimizations(const GrPipelineOptimizations& optimizations,
+ bool doesStencilWrite,
+ GrColor* overrideColor,
+ const GrCaps& caps) const {
+ // We want to force our primary output to be alpha * Coverage, where alpha is the alpha
+ // value of the blend the constant. We should already have valid blend coeff's if we are at
+ // a point where we have RGB coverage. We don't need any color stages since the known color
+ // output is already baked into the blendConstant.
+ *overrideColor = GrColorPackRGBA(fAlpha, fAlpha, fAlpha, fAlpha);
+ return GrXferProcessor::kOverrideColor_OptFlag;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrPorterDuffXPFactory::GrPorterDuffXPFactory(SkXfermode::Mode xfermode)
+ : fXfermode(xfermode) {
+ SkASSERT(fXfermode <= SkXfermode::kLastCoeffMode);
+ this->initClassID<GrPorterDuffXPFactory>();
+}
+
+sk_sp<GrXPFactory> GrPorterDuffXPFactory::Make(SkXfermode::Mode xfermode) {
+ static GrPorterDuffXPFactory gClearPDXPF(SkXfermode::kClear_Mode);
+ static GrPorterDuffXPFactory gSrcPDXPF(SkXfermode::kSrc_Mode);
+ static GrPorterDuffXPFactory gDstPDXPF(SkXfermode::kDst_Mode);
+ static GrPorterDuffXPFactory gSrcOverPDXPF(SkXfermode::kSrcOver_Mode);
+ static GrPorterDuffXPFactory gDstOverPDXPF(SkXfermode::kDstOver_Mode);
+ static GrPorterDuffXPFactory gSrcInPDXPF(SkXfermode::kSrcIn_Mode);
+ static GrPorterDuffXPFactory gDstInPDXPF(SkXfermode::kDstIn_Mode);
+ static GrPorterDuffXPFactory gSrcOutPDXPF(SkXfermode::kSrcOut_Mode);
+ static GrPorterDuffXPFactory gDstOutPDXPF(SkXfermode::kDstOut_Mode);
+ static GrPorterDuffXPFactory gSrcATopPDXPF(SkXfermode::kSrcATop_Mode);
+ static GrPorterDuffXPFactory gDstATopPDXPF(SkXfermode::kDstATop_Mode);
+ static GrPorterDuffXPFactory gXorPDXPF(SkXfermode::kXor_Mode);
+ static GrPorterDuffXPFactory gPlusPDXPF(SkXfermode::kPlus_Mode);
+ static GrPorterDuffXPFactory gModulatePDXPF(SkXfermode::kModulate_Mode);
+ static GrPorterDuffXPFactory gScreenPDXPF(SkXfermode::kScreen_Mode);
+
+ static GrPorterDuffXPFactory* gFactories[] = {
+ &gClearPDXPF, &gSrcPDXPF, &gDstPDXPF, &gSrcOverPDXPF, &gDstOverPDXPF, &gSrcInPDXPF,
+ &gDstInPDXPF, &gSrcOutPDXPF, &gDstOutPDXPF, &gSrcATopPDXPF, &gDstATopPDXPF, &gXorPDXPF,
+ &gPlusPDXPF, &gModulatePDXPF, &gScreenPDXPF
+ };
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gFactories) == SkXfermode::kLastCoeffMode + 1);
+
+ if (xfermode < 0 || xfermode > SkXfermode::kLastCoeffMode) {
+ return nullptr;
+ }
+ return sk_sp<GrXPFactory>(SkRef(gFactories[xfermode]));
+}
+
+GrXferProcessor*
+GrPorterDuffXPFactory::onCreateXferProcessor(const GrCaps& caps,
+ const GrPipelineOptimizations& optimizations,
+ bool hasMixedSamples,
+ const DstTexture* dstTexture) const {
+ if (optimizations.fOverrides.fUsePLSDstRead) {
+ return new ShaderPDXferProcessor(dstTexture, hasMixedSamples, fXfermode);
+ }
+ BlendFormula blendFormula;
+ if (optimizations.fCoveragePOI.isFourChannelOutput()) {
+ if (SkXfermode::kSrcOver_Mode == fXfermode &&
+ kRGBA_GrColorComponentFlags == optimizations.fColorPOI.validFlags() &&
+ !caps.shaderCaps()->dualSourceBlendingSupport() &&
+ !caps.shaderCaps()->dstReadInShaderSupport()) {
+ // If we don't have dual source blending or in shader dst reads, we fall back to this
+ // trick for rendering SrcOver LCD text instead of doing a dst copy.
+ SkASSERT(!dstTexture || !dstTexture->texture());
+ return PDLCDXferProcessor::Create(fXfermode, optimizations.fColorPOI);
+ }
+ blendFormula = get_lcd_blend_formula(optimizations.fCoveragePOI, fXfermode);
+ } else {
+ blendFormula = get_blend_formula(optimizations.fColorPOI, optimizations.fCoveragePOI,
+ hasMixedSamples, fXfermode);
+ }
+
+ if (blendFormula.hasSecondaryOutput() && !caps.shaderCaps()->dualSourceBlendingSupport()) {
+ return new ShaderPDXferProcessor(dstTexture, hasMixedSamples, fXfermode);
+ }
+
+ SkASSERT(!dstTexture || !dstTexture->texture());
+ return new PorterDuffXferProcessor(blendFormula);
+}
+
+void GrPorterDuffXPFactory::getInvariantBlendedColor(const GrProcOptInfo& colorPOI,
+ InvariantBlendedColor* blendedColor) const {
+ // Find the blended color info based on the formula that does not have coverage.
+ BlendFormula colorFormula = gBlendTable[colorPOI.isOpaque()][0][fXfermode];
+ if (colorFormula.usesDstColor()) {
+ blendedColor->fWillBlendWithDst = true;
+ blendedColor->fKnownColorFlags = kNone_GrColorComponentFlags;
+ return;
+ }
+
+ blendedColor->fWillBlendWithDst = false;
+
+ SkASSERT(kAdd_GrBlendEquation == colorFormula.fBlendEquation);
+
+ switch (colorFormula.fSrcCoeff) {
+ case kZero_GrBlendCoeff:
+ blendedColor->fKnownColor = 0;
+ blendedColor->fKnownColorFlags = kRGBA_GrColorComponentFlags;
+ return;
+
+ case kOne_GrBlendCoeff:
+ blendedColor->fKnownColor = colorPOI.color();
+ blendedColor->fKnownColorFlags = colorPOI.validFlags();
+ return;
+
+ default:
+ blendedColor->fKnownColorFlags = kNone_GrColorComponentFlags;
+ return;
+ }
+}
+
+bool GrPorterDuffXPFactory::onWillReadDstColor(const GrCaps& caps,
+ const GrPipelineOptimizations& optimizations) const {
+ if (caps.shaderCaps()->dualSourceBlendingSupport()) {
+ return false;
+ }
+
+ // When we have four channel coverage we always need to read the dst in order to correctly
+ // blend. The one exception is when we are using srcover mode and we know the input color into
+ // the XP.
+ if (optimizations.fCoveragePOI.isFourChannelOutput()) {
+ if (SkXfermode::kSrcOver_Mode == fXfermode &&
+ kRGBA_GrColorComponentFlags == optimizations.fColorPOI.validFlags() &&
+ !caps.shaderCaps()->dstReadInShaderSupport()) {
+ return false;
+ }
+ return get_lcd_blend_formula(optimizations.fCoveragePOI, fXfermode).hasSecondaryOutput();
+ }
+
+ // We fallback on the shader XP when the blend formula would use dual source blending but we
+ // don't have support for it.
+ static const bool kHasMixedSamples = false;
+ SkASSERT(!caps.usesMixedSamples()); // We never use mixed samples without dual source blending.
+ return get_blend_formula(optimizations.fColorPOI, optimizations.fCoveragePOI, kHasMixedSamples,
+ fXfermode).hasSecondaryOutput();
+}
+
+GR_DEFINE_XP_FACTORY_TEST(GrPorterDuffXPFactory);
+
+sk_sp<GrXPFactory> GrPorterDuffXPFactory::TestCreate(GrProcessorTestData* d) {
+ SkXfermode::Mode mode = SkXfermode::Mode(d->fRandom->nextULessThan(SkXfermode::kLastCoeffMode));
+ return GrPorterDuffXPFactory::Make(mode);
+}
+
+void GrPorterDuffXPFactory::TestGetXPOutputTypes(const GrXferProcessor* xp,
+ int* outPrimary,
+ int* outSecondary) {
+ if (!!strcmp(xp->name(), "Porter Duff")) {
+ *outPrimary = *outSecondary = -1;
+ return;
+ }
+ BlendFormula blendFormula = static_cast<const PorterDuffXferProcessor*>(xp)->getBlendFormula();
+ *outPrimary = blendFormula.fPrimaryOutputType;
+ *outSecondary = blendFormula.fSecondaryOutputType;
+}
+
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+// SrcOver Global functions
+////////////////////////////////////////////////////////////////////////////////////////////////
+const GrXferProcessor& GrPorterDuffXPFactory::SimpleSrcOverXP() {
+ static BlendFormula gSrcOverBlendFormula = COEFF_FORMULA(kOne_GrBlendCoeff,
+ kISA_GrBlendCoeff);
+ static PorterDuffXferProcessor gSrcOverXP(gSrcOverBlendFormula);
+ return gSrcOverXP;
+}
+
+GrXferProcessor* GrPorterDuffXPFactory::CreateSrcOverXferProcessor(
+ const GrCaps& caps,
+ const GrPipelineOptimizations& optimizations,
+ bool hasMixedSamples,
+ const GrXferProcessor::DstTexture* dstTexture) {
+ if (optimizations.fOverrides.fUsePLSDstRead) {
+ return new ShaderPDXferProcessor(dstTexture, hasMixedSamples, SkXfermode::kSrcOver_Mode);
+ }
+
+ // We want to not make an xfer processor if possible. Thus for the simple case where we are not
+ // doing lcd blending we will just use our global SimpleSrcOverXP. This slightly differs from
+ // the general case where we convert a src-over blend that has solid coverage and an opaque
+ // color to src-mode, which allows disabling of blending.
+ if (!optimizations.fCoveragePOI.isFourChannelOutput()) {
+ // We return nullptr here, which our caller interprets as meaning "use SimpleSrcOverXP".
+ // We don't simply return the address of that XP here because our caller would have to unref
+ // it and since it is a global object and GrProgramElement's ref-cnting system is not thread
+ // safe.
+ return nullptr;
+ }
+
+ if (kRGBA_GrColorComponentFlags == optimizations.fColorPOI.validFlags() &&
+ !caps.shaderCaps()->dualSourceBlendingSupport() &&
+ !caps.shaderCaps()->dstReadInShaderSupport()) {
+ // If we don't have dual source blending or in shader dst reads, we fall
+ // back to this trick for rendering SrcOver LCD text instead of doing a
+ // dst copy.
+ SkASSERT(!dstTexture || !dstTexture->texture());
+ return PDLCDXferProcessor::Create(SkXfermode::kSrcOver_Mode, optimizations.fColorPOI);
+ }
+
+ BlendFormula blendFormula;
+ blendFormula = get_lcd_blend_formula(optimizations.fCoveragePOI, SkXfermode::kSrcOver_Mode);
+ if (blendFormula.hasSecondaryOutput() && !caps.shaderCaps()->dualSourceBlendingSupport()) {
+ return new ShaderPDXferProcessor(dstTexture, hasMixedSamples, SkXfermode::kSrcOver_Mode);
+ }
+
+ SkASSERT(!dstTexture || !dstTexture->texture());
+ return new PorterDuffXferProcessor(blendFormula);
+}
+
+bool GrPorterDuffXPFactory::SrcOverWillNeedDstTexture(const GrCaps& caps,
+ const GrPipelineOptimizations& optimizations) {
+ if (caps.shaderCaps()->dstReadInShaderSupport() ||
+ caps.shaderCaps()->dualSourceBlendingSupport()) {
+ return false;
+ }
+
+ // When we have four channel coverage we always need to read the dst in order to correctly
+ // blend. The one exception is when we are using srcover mode and we know the input color
+ // into the XP.
+ if (optimizations.fCoveragePOI.isFourChannelOutput()) {
+ if (kRGBA_GrColorComponentFlags == optimizations.fColorPOI.validFlags() &&
+ !caps.shaderCaps()->dstReadInShaderSupport()) {
+ return false;
+ }
+ return get_lcd_blend_formula(optimizations.fCoveragePOI,
+ SkXfermode::kSrcOver_Mode).hasSecondaryOutput();
+ }
+
+ // We fallback on the shader XP when the blend formula would use dual source blending but we
+ // don't have support for it.
+ static const bool kHasMixedSamples = false;
+ SkASSERT(!caps.usesMixedSamples()); // We never use mixed samples without dual source blending.
+ return get_blend_formula(optimizations.fColorPOI, optimizations.fCoveragePOI,
+ kHasMixedSamples, SkXfermode::kSrcOver_Mode).hasSecondaryOutput();
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrRRectEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrRRectEffect.cpp
new file mode 100644
index 000000000..37bb3f884
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrRRectEffect.cpp
@@ -0,0 +1,779 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrRRectEffect.h"
+
+#include "GrConvexPolyEffect.h"
+#include "GrFragmentProcessor.h"
+#include "GrInvariantOutput.h"
+#include "GrOvalEffect.h"
+#include "SkRRect.h"
+#include "SkTLazy.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+
+// The effects defined here only handle rrect radii >= kRadiusMin.
+static const SkScalar kRadiusMin = SK_ScalarHalf;
+
+//////////////////////////////////////////////////////////////////////////////
+
+class CircularRRectEffect : public GrFragmentProcessor {
+public:
+
+ enum CornerFlags {
+ kTopLeft_CornerFlag = (1 << SkRRect::kUpperLeft_Corner),
+ kTopRight_CornerFlag = (1 << SkRRect::kUpperRight_Corner),
+ kBottomRight_CornerFlag = (1 << SkRRect::kLowerRight_Corner),
+ kBottomLeft_CornerFlag = (1 << SkRRect::kLowerLeft_Corner),
+
+ kLeft_CornerFlags = kTopLeft_CornerFlag | kBottomLeft_CornerFlag,
+ kTop_CornerFlags = kTopLeft_CornerFlag | kTopRight_CornerFlag,
+ kRight_CornerFlags = kTopRight_CornerFlag | kBottomRight_CornerFlag,
+ kBottom_CornerFlags = kBottomLeft_CornerFlag | kBottomRight_CornerFlag,
+
+ kAll_CornerFlags = kTopLeft_CornerFlag | kTopRight_CornerFlag |
+ kBottomLeft_CornerFlag | kBottomRight_CornerFlag,
+
+ kNone_CornerFlags = 0
+ };
+
+ // The flags are used to indicate which corners are circluar (unflagged corners are assumed to
+ // be square).
+ static sk_sp<GrFragmentProcessor> Make(GrPrimitiveEdgeType, uint32_t circularCornerFlags,
+ const SkRRect&);
+
+ virtual ~CircularRRectEffect() {}
+
+ const char* name() const override { return "CircularRRect"; }
+
+ const SkRRect& getRRect() const { return fRRect; }
+
+ uint32_t getCircularCornerFlags() const { return fCircularCornerFlags; }
+
+ GrPrimitiveEdgeType getEdgeType() const { return fEdgeType; }
+
+private:
+ CircularRRectEffect(GrPrimitiveEdgeType, uint32_t circularCornerFlags, const SkRRect&);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor& other) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ SkRRect fRRect;
+ GrPrimitiveEdgeType fEdgeType;
+ uint32_t fCircularCornerFlags;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+sk_sp<GrFragmentProcessor> CircularRRectEffect::Make(GrPrimitiveEdgeType edgeType,
+ uint32_t circularCornerFlags,
+ const SkRRect& rrect) {
+ if (kFillAA_GrProcessorEdgeType != edgeType && kInverseFillAA_GrProcessorEdgeType != edgeType) {
+ return nullptr;
+ }
+ return sk_sp<GrFragmentProcessor>(
+ new CircularRRectEffect(edgeType, circularCornerFlags, rrect));
+}
+
+void CircularRRectEffect::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ inout->mulByUnknownSingleComponent();
+}
+
+CircularRRectEffect::CircularRRectEffect(GrPrimitiveEdgeType edgeType, uint32_t circularCornerFlags,
+ const SkRRect& rrect)
+ : fRRect(rrect)
+ , fEdgeType(edgeType)
+ , fCircularCornerFlags(circularCornerFlags) {
+ this->initClassID<CircularRRectEffect>();
+ this->setWillReadFragmentPosition();
+}
+
+bool CircularRRectEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const CircularRRectEffect& crre = other.cast<CircularRRectEffect>();
+ // The corner flags are derived from fRRect, so no need to check them.
+ return fEdgeType == crre.fEdgeType && fRRect == crre.fRRect;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(CircularRRectEffect);
+
+sk_sp<GrFragmentProcessor> CircularRRectEffect::TestCreate(GrProcessorTestData* d) {
+ SkScalar w = d->fRandom->nextRangeScalar(20.f, 1000.f);
+ SkScalar h = d->fRandom->nextRangeScalar(20.f, 1000.f);
+ SkScalar r = d->fRandom->nextRangeF(kRadiusMin, 9.f);
+ SkRRect rrect;
+ rrect.setRectXY(SkRect::MakeWH(w, h), r, r);
+ sk_sp<GrFragmentProcessor> fp;
+ do {
+ GrPrimitiveEdgeType et =
+ (GrPrimitiveEdgeType)d->fRandom->nextULessThan(kGrProcessorEdgeTypeCnt);
+ fp = GrRRectEffect::Make(et, rrect);
+ } while (nullptr == fp);
+ return fp;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GLCircularRRectEffect : public GrGLSLFragmentProcessor {
+public:
+ GLCircularRRectEffect() {
+ fPrevRRect.setEmpty();
+ }
+
+ virtual void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrGLSLCaps&, GrProcessorKeyBuilder*);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fInnerRectUniform;
+ GrGLSLProgramDataManager::UniformHandle fRadiusPlusHalfUniform;
+ SkRRect fPrevRRect;
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GLCircularRRectEffect::emitCode(EmitArgs& args) {
+ const CircularRRectEffect& crre = args.fFp.cast<CircularRRectEffect>();
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ const char *rectName;
+ const char *radiusPlusHalfName;
+ // The inner rect is the rrect bounds inset by the radius. Its left, top, right, and bottom
+ // edges correspond to components x, y, z, and w, respectively. When a side of the rrect has
+ // only rectangular corners, that side's value corresponds to the rect edge's value outset by
+ // half a pixel.
+ fInnerRectUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType, kDefault_GrSLPrecision,
+ "innerRect",
+ &rectName);
+ // x is (r + .5) and y is 1/(r + .5)
+ fRadiusPlusHalfUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType, kDefault_GrSLPrecision,
+ "radiusPlusHalf",
+ &radiusPlusHalfName);
+
+ // If we're on a device with a "real" mediump then the length calculation could overflow.
+ SkString clampedCircleDistance;
+ if (args.fGLSLCaps->floatPrecisionVaries()) {
+ clampedCircleDistance.printf("clamp(%s.x * (1.0 - length(dxy * %s.y)), 0.0, 1.0);",
+ radiusPlusHalfName, radiusPlusHalfName);
+ } else {
+ clampedCircleDistance.printf("clamp(%s.x - length(dxy), 0.0, 1.0);", radiusPlusHalfName);
+ }
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const char* fragmentPos = fragBuilder->fragmentPosition();
+ // At each quarter-circle corner we compute a vector that is the offset of the fragment position
+ // from the circle center. The vector is pinned in x and y to be in the quarter-plane relevant
+ // to that corner. This means that points near the interior near the rrect top edge will have
+ // a vector that points straight up for both the TL left and TR corners. Computing an
+ // alpha from this vector at either the TR or TL corner will give the correct result. Similarly,
+ // fragments near the other three edges will get the correct AA. Fragments in the interior of
+ // the rrect will have a (0,0) vector at all four corners. So long as the radius > 0.5 they will
+ // correctly produce an alpha value of 1 at all four corners. We take the min of all the alphas.
+ // The code below is a simplified version of the above that performs maxs on the vector
+ // components before computing distances and alpha values so that only one distance computation
+ // need be computed to determine the min alpha.
+ //
+ // For the cases where one half of the rrect is rectangular we drop one of the x or y
+ // computations, compute a separate rect edge alpha for the rect side, and mul the two computed
+ // alphas together.
+ switch (crre.getCircularCornerFlags()) {
+ case CircularRRectEffect::kAll_CornerFlags:
+ fragBuilder->codeAppendf("vec2 dxy0 = %s.xy - %s.xy;", rectName, fragmentPos);
+ fragBuilder->codeAppendf("vec2 dxy1 = %s.xy - %s.zw;", fragmentPos, rectName);
+ fragBuilder->codeAppend("vec2 dxy = max(max(dxy0, dxy1), 0.0);");
+ fragBuilder->codeAppendf("float alpha = %s;", clampedCircleDistance.c_str());
+ break;
+ case CircularRRectEffect::kTopLeft_CornerFlag:
+ fragBuilder->codeAppendf("vec2 dxy = max(%s.xy - %s.xy, 0.0);",
+ rectName, fragmentPos);
+ fragBuilder->codeAppendf("float rightAlpha = clamp(%s.z - %s.x, 0.0, 1.0);",
+ rectName, fragmentPos);
+ fragBuilder->codeAppendf("float bottomAlpha = clamp(%s.w - %s.y, 0.0, 1.0);",
+ rectName, fragmentPos);
+ fragBuilder->codeAppendf("float alpha = bottomAlpha * rightAlpha * %s;",
+ clampedCircleDistance.c_str());
+ break;
+ case CircularRRectEffect::kTopRight_CornerFlag:
+ fragBuilder->codeAppendf("vec2 dxy = max(vec2(%s.x - %s.z, %s.y - %s.y), 0.0);",
+ fragmentPos, rectName, rectName, fragmentPos);
+ fragBuilder->codeAppendf("float leftAlpha = clamp(%s.x - %s.x, 0.0, 1.0);",
+ fragmentPos, rectName);
+ fragBuilder->codeAppendf("float bottomAlpha = clamp(%s.w - %s.y, 0.0, 1.0);",
+ rectName, fragmentPos);
+ fragBuilder->codeAppendf("float alpha = bottomAlpha * leftAlpha * %s;",
+ clampedCircleDistance.c_str());
+ break;
+ case CircularRRectEffect::kBottomRight_CornerFlag:
+ fragBuilder->codeAppendf("vec2 dxy = max(%s.xy - %s.zw, 0.0);",
+ fragmentPos, rectName);
+ fragBuilder->codeAppendf("float leftAlpha = clamp(%s.x - %s.x, 0.0, 1.0);",
+ fragmentPos, rectName);
+ fragBuilder->codeAppendf("float topAlpha = clamp(%s.y - %s.y, 0.0, 1.0);",
+ fragmentPos, rectName);
+ fragBuilder->codeAppendf("float alpha = topAlpha * leftAlpha * %s;",
+ clampedCircleDistance.c_str());
+ break;
+ case CircularRRectEffect::kBottomLeft_CornerFlag:
+ fragBuilder->codeAppendf("vec2 dxy = max(vec2(%s.x - %s.x, %s.y - %s.w), 0.0);",
+ rectName, fragmentPos, fragmentPos, rectName);
+ fragBuilder->codeAppendf("float rightAlpha = clamp(%s.z - %s.x, 0.0, 1.0);",
+ rectName, fragmentPos);
+ fragBuilder->codeAppendf("float topAlpha = clamp(%s.y - %s.y, 0.0, 1.0);",
+ fragmentPos, rectName);
+ fragBuilder->codeAppendf("float alpha = topAlpha * rightAlpha * %s;",
+ clampedCircleDistance.c_str());
+ break;
+ case CircularRRectEffect::kLeft_CornerFlags:
+ fragBuilder->codeAppendf("vec2 dxy0 = %s.xy - %s.xy;", rectName, fragmentPos);
+ fragBuilder->codeAppendf("float dy1 = %s.y - %s.w;", fragmentPos, rectName);
+ fragBuilder->codeAppend("vec2 dxy = max(vec2(dxy0.x, max(dxy0.y, dy1)), 0.0);");
+ fragBuilder->codeAppendf("float rightAlpha = clamp(%s.z - %s.x, 0.0, 1.0);",
+ rectName, fragmentPos);
+ fragBuilder->codeAppendf("float alpha = rightAlpha * %s;",
+ clampedCircleDistance.c_str());
+ break;
+ case CircularRRectEffect::kTop_CornerFlags:
+ fragBuilder->codeAppendf("vec2 dxy0 = %s.xy - %s.xy;", rectName, fragmentPos);
+ fragBuilder->codeAppendf("float dx1 = %s.x - %s.z;", fragmentPos, rectName);
+ fragBuilder->codeAppend("vec2 dxy = max(vec2(max(dxy0.x, dx1), dxy0.y), 0.0);");
+ fragBuilder->codeAppendf("float bottomAlpha = clamp(%s.w - %s.y, 0.0, 1.0);",
+ rectName, fragmentPos);
+ fragBuilder->codeAppendf("float alpha = bottomAlpha * %s;",
+ clampedCircleDistance.c_str());
+ break;
+ case CircularRRectEffect::kRight_CornerFlags:
+ fragBuilder->codeAppendf("float dy0 = %s.y - %s.y;", rectName, fragmentPos);
+ fragBuilder->codeAppendf("vec2 dxy1 = %s.xy - %s.zw;", fragmentPos, rectName);
+ fragBuilder->codeAppend("vec2 dxy = max(vec2(dxy1.x, max(dy0, dxy1.y)), 0.0);");
+ fragBuilder->codeAppendf("float leftAlpha = clamp(%s.x - %s.x, 0.0, 1.0);",
+ fragmentPos, rectName);
+ fragBuilder->codeAppendf("float alpha = leftAlpha * %s;",
+ clampedCircleDistance.c_str());
+ break;
+ case CircularRRectEffect::kBottom_CornerFlags:
+ fragBuilder->codeAppendf("float dx0 = %s.x - %s.x;", rectName, fragmentPos);
+ fragBuilder->codeAppendf("vec2 dxy1 = %s.xy - %s.zw;", fragmentPos, rectName);
+ fragBuilder->codeAppend("vec2 dxy = max(vec2(max(dx0, dxy1.x), dxy1.y), 0.0);");
+ fragBuilder->codeAppendf("float topAlpha = clamp(%s.y - %s.y, 0.0, 1.0);",
+ fragmentPos, rectName);
+ fragBuilder->codeAppendf("float alpha = topAlpha * %s;",
+ clampedCircleDistance.c_str());
+ break;
+ }
+
+ if (kInverseFillAA_GrProcessorEdgeType == crre.getEdgeType()) {
+ fragBuilder->codeAppend("alpha = 1.0 - alpha;");
+ }
+
+ fragBuilder->codeAppendf("%s = %s;", args.fOutputColor,
+ (GrGLSLExpr4(args.fInputColor) * GrGLSLExpr1("alpha")).c_str());
+}
+
+void GLCircularRRectEffect::GenKey(const GrProcessor& processor, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const CircularRRectEffect& crre = processor.cast<CircularRRectEffect>();
+ GR_STATIC_ASSERT(kGrProcessorEdgeTypeCnt <= 8);
+ b->add32((crre.getCircularCornerFlags() << 3) | crre.getEdgeType());
+}
+
+void GLCircularRRectEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& processor) {
+ const CircularRRectEffect& crre = processor.cast<CircularRRectEffect>();
+ const SkRRect& rrect = crre.getRRect();
+ if (rrect != fPrevRRect) {
+ SkRect rect = rrect.getBounds();
+ SkScalar radius = 0;
+ switch (crre.getCircularCornerFlags()) {
+ case CircularRRectEffect::kAll_CornerFlags:
+ SkASSERT(rrect.isSimpleCircular());
+ radius = rrect.getSimpleRadii().fX;
+ SkASSERT(radius >= kRadiusMin);
+ rect.inset(radius, radius);
+ break;
+ case CircularRRectEffect::kTopLeft_CornerFlag:
+ radius = rrect.radii(SkRRect::kUpperLeft_Corner).fX;
+ rect.fLeft += radius;
+ rect.fTop += radius;
+ rect.fRight += 0.5f;
+ rect.fBottom += 0.5f;
+ break;
+ case CircularRRectEffect::kTopRight_CornerFlag:
+ radius = rrect.radii(SkRRect::kUpperRight_Corner).fX;
+ rect.fLeft -= 0.5f;
+ rect.fTop += radius;
+ rect.fRight -= radius;
+ rect.fBottom += 0.5f;
+ break;
+ case CircularRRectEffect::kBottomRight_CornerFlag:
+ radius = rrect.radii(SkRRect::kLowerRight_Corner).fX;
+ rect.fLeft -= 0.5f;
+ rect.fTop -= 0.5f;
+ rect.fRight -= radius;
+ rect.fBottom -= radius;
+ break;
+ case CircularRRectEffect::kBottomLeft_CornerFlag:
+ radius = rrect.radii(SkRRect::kLowerLeft_Corner).fX;
+ rect.fLeft += radius;
+ rect.fTop -= 0.5f;
+ rect.fRight += 0.5f;
+ rect.fBottom -= radius;
+ break;
+ case CircularRRectEffect::kLeft_CornerFlags:
+ radius = rrect.radii(SkRRect::kUpperLeft_Corner).fX;
+ rect.fLeft += radius;
+ rect.fTop += radius;
+ rect.fRight += 0.5f;
+ rect.fBottom -= radius;
+ break;
+ case CircularRRectEffect::kTop_CornerFlags:
+ radius = rrect.radii(SkRRect::kUpperLeft_Corner).fX;
+ rect.fLeft += radius;
+ rect.fTop += radius;
+ rect.fRight -= radius;
+ rect.fBottom += 0.5f;
+ break;
+ case CircularRRectEffect::kRight_CornerFlags:
+ radius = rrect.radii(SkRRect::kUpperRight_Corner).fX;
+ rect.fLeft -= 0.5f;
+ rect.fTop += radius;
+ rect.fRight -= radius;
+ rect.fBottom -= radius;
+ break;
+ case CircularRRectEffect::kBottom_CornerFlags:
+ radius = rrect.radii(SkRRect::kLowerLeft_Corner).fX;
+ rect.fLeft += radius;
+ rect.fTop -= 0.5f;
+ rect.fRight -= radius;
+ rect.fBottom -= radius;
+ break;
+ default:
+ SkFAIL("Should have been one of the above cases.");
+ }
+ pdman.set4f(fInnerRectUniform, rect.fLeft, rect.fTop, rect.fRight, rect.fBottom);
+ radius += 0.5f;
+ pdman.set2f(fRadiusPlusHalfUniform, radius, 1.f / radius);
+ fPrevRRect = rrect;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void CircularRRectEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GLCircularRRectEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* CircularRRectEffect::onCreateGLSLInstance() const {
+ return new GLCircularRRectEffect;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class EllipticalRRectEffect : public GrFragmentProcessor {
+public:
+ static sk_sp<GrFragmentProcessor> Make(GrPrimitiveEdgeType, const SkRRect&);
+
+ virtual ~EllipticalRRectEffect() {}
+
+ const char* name() const override { return "EllipticalRRect"; }
+
+ const SkRRect& getRRect() const { return fRRect; }
+
+ GrPrimitiveEdgeType getEdgeType() const { return fEdgeType; }
+
+private:
+ EllipticalRRectEffect(GrPrimitiveEdgeType, const SkRRect&);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor& other) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ SkRRect fRRect;
+ GrPrimitiveEdgeType fEdgeType;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+sk_sp<GrFragmentProcessor>
+EllipticalRRectEffect::Make(GrPrimitiveEdgeType edgeType, const SkRRect& rrect) {
+ if (kFillAA_GrProcessorEdgeType != edgeType && kInverseFillAA_GrProcessorEdgeType != edgeType) {
+ return nullptr;
+ }
+ return sk_sp<GrFragmentProcessor>(new EllipticalRRectEffect(edgeType, rrect));
+}
+
+void EllipticalRRectEffect::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ inout->mulByUnknownSingleComponent();
+}
+
+EllipticalRRectEffect::EllipticalRRectEffect(GrPrimitiveEdgeType edgeType, const SkRRect& rrect)
+ : fRRect(rrect)
+ , fEdgeType(edgeType) {
+ this->initClassID<EllipticalRRectEffect>();
+ this->setWillReadFragmentPosition();
+}
+
+bool EllipticalRRectEffect::onIsEqual(const GrFragmentProcessor& other) const {
+ const EllipticalRRectEffect& erre = other.cast<EllipticalRRectEffect>();
+ return fEdgeType == erre.fEdgeType && fRRect == erre.fRRect;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(EllipticalRRectEffect);
+
+sk_sp<GrFragmentProcessor> EllipticalRRectEffect::TestCreate(GrProcessorTestData* d) {
+ SkScalar w = d->fRandom->nextRangeScalar(20.f, 1000.f);
+ SkScalar h = d->fRandom->nextRangeScalar(20.f, 1000.f);
+ SkVector r[4];
+ r[SkRRect::kUpperLeft_Corner].fX = d->fRandom->nextRangeF(kRadiusMin, 9.f);
+ // ensure at least one corner really is elliptical
+ do {
+ r[SkRRect::kUpperLeft_Corner].fY = d->fRandom->nextRangeF(kRadiusMin, 9.f);
+ } while (r[SkRRect::kUpperLeft_Corner].fY == r[SkRRect::kUpperLeft_Corner].fX);
+
+ SkRRect rrect;
+ if (d->fRandom->nextBool()) {
+ // half the time create a four-radii rrect.
+ r[SkRRect::kLowerRight_Corner].fX = d->fRandom->nextRangeF(kRadiusMin, 9.f);
+ r[SkRRect::kLowerRight_Corner].fY = d->fRandom->nextRangeF(kRadiusMin, 9.f);
+
+ r[SkRRect::kUpperRight_Corner].fX = r[SkRRect::kLowerRight_Corner].fX;
+ r[SkRRect::kUpperRight_Corner].fY = r[SkRRect::kUpperLeft_Corner].fY;
+
+ r[SkRRect::kLowerLeft_Corner].fX = r[SkRRect::kUpperLeft_Corner].fX;
+ r[SkRRect::kLowerLeft_Corner].fY = r[SkRRect::kLowerRight_Corner].fY;
+
+ rrect.setRectRadii(SkRect::MakeWH(w, h), r);
+ } else {
+ rrect.setRectXY(SkRect::MakeWH(w, h), r[SkRRect::kUpperLeft_Corner].fX,
+ r[SkRRect::kUpperLeft_Corner].fY);
+ }
+ sk_sp<GrFragmentProcessor> fp;
+ do {
+ GrPrimitiveEdgeType et =
+ (GrPrimitiveEdgeType)d->fRandom->nextULessThan(kGrProcessorEdgeTypeCnt);
+ fp = GrRRectEffect::Make(et, rrect);
+ } while (nullptr == fp);
+ return fp;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GLEllipticalRRectEffect : public GrGLSLFragmentProcessor {
+public:
+ GLEllipticalRRectEffect() {
+ fPrevRRect.setEmpty();
+ }
+
+ void emitCode(EmitArgs&) override;
+
+ static inline void GenKey(const GrProcessor&, const GrGLSLCaps&, GrProcessorKeyBuilder*);
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) override;
+
+private:
+ GrGLSLProgramDataManager::UniformHandle fInnerRectUniform;
+ GrGLSLProgramDataManager::UniformHandle fInvRadiiSqdUniform;
+ GrGLSLProgramDataManager::UniformHandle fScaleUniform;
+ SkRRect fPrevRRect;
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+void GLEllipticalRRectEffect::emitCode(EmitArgs& args) {
+ const EllipticalRRectEffect& erre = args.fFp.cast<EllipticalRRectEffect>();
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ const char *rectName;
+ // The inner rect is the rrect bounds inset by the x/y radii
+ fInnerRectUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType, kDefault_GrSLPrecision,
+ "innerRect",
+ &rectName);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const char* fragmentPos = fragBuilder->fragmentPosition();
+ // At each quarter-ellipse corner we compute a vector that is the offset of the fragment pos
+ // to the ellipse center. The vector is pinned in x and y to be in the quarter-plane relevant
+ // to that corner. This means that points near the interior near the rrect top edge will have
+ // a vector that points straight up for both the TL left and TR corners. Computing an
+ // alpha from this vector at either the TR or TL corner will give the correct result. Similarly,
+ // fragments near the other three edges will get the correct AA. Fragments in the interior of
+ // the rrect will have a (0,0) vector at all four corners. So long as the radii > 0.5 they will
+ // correctly produce an alpha value of 1 at all four corners. We take the min of all the alphas.
+ //
+ // The code below is a simplified version of the above that performs maxs on the vector
+ // components before computing distances and alpha values so that only one distance computation
+ // need be computed to determine the min alpha.
+ fragBuilder->codeAppendf("vec2 dxy0 = %s.xy - %s.xy;", rectName, fragmentPos);
+ fragBuilder->codeAppendf("vec2 dxy1 = %s.xy - %s.zw;", fragmentPos, rectName);
+
+ // If we're on a device with a "real" mediump then we'll do the distance computation in a space
+ // that is normalized by the largest radius. The scale uniform will be scale, 1/scale. The
+ // radii uniform values are already in this normalized space.
+ const char* scaleName = nullptr;
+ if (args.fGLSLCaps->floatPrecisionVaries()) {
+ fScaleUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType, kDefault_GrSLPrecision,
+ "scale", &scaleName);
+ }
+
+ // The uniforms with the inv squared radii are highp to prevent underflow.
+ switch (erre.getRRect().getType()) {
+ case SkRRect::kSimple_Type: {
+ const char *invRadiiXYSqdName;
+ fInvRadiiSqdUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType,
+ kDefault_GrSLPrecision,
+ "invRadiiXY",
+ &invRadiiXYSqdName);
+ fragBuilder->codeAppend("vec2 dxy = max(max(dxy0, dxy1), 0.0);");
+ if (scaleName) {
+ fragBuilder->codeAppendf("dxy *= %s.y;", scaleName);
+ }
+ // Z is the x/y offsets divided by squared radii.
+ fragBuilder->codeAppendf("vec2 Z = dxy * %s.xy;", invRadiiXYSqdName);
+ break;
+ }
+ case SkRRect::kNinePatch_Type: {
+ const char *invRadiiLTRBSqdName;
+ fInvRadiiSqdUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType,
+ kDefault_GrSLPrecision,
+ "invRadiiLTRB",
+ &invRadiiLTRBSqdName);
+ if (scaleName) {
+ fragBuilder->codeAppendf("dxy0 *= %s.y;", scaleName);
+ fragBuilder->codeAppendf("dxy1 *= %s.y;", scaleName);
+ }
+ fragBuilder->codeAppend("vec2 dxy = max(max(dxy0, dxy1), 0.0);");
+ // Z is the x/y offsets divided by squared radii. We only care about the (at most) one
+ // corner where both the x and y offsets are positive, hence the maxes. (The inverse
+ // squared radii will always be positive.)
+ fragBuilder->codeAppendf("vec2 Z = max(max(dxy0 * %s.xy, dxy1 * %s.zw), 0.0);",
+ invRadiiLTRBSqdName, invRadiiLTRBSqdName);
+
+ break;
+ }
+ default:
+ SkFAIL("RRect should always be simple or nine-patch.");
+ }
+ // implicit is the evaluation of (x/a)^2 + (y/b)^2 - 1.
+ fragBuilder->codeAppend("float implicit = dot(Z, dxy) - 1.0;");
+ // grad_dot is the squared length of the gradient of the implicit.
+ fragBuilder->codeAppend("float grad_dot = 4.0 * dot(Z, Z);");
+ // avoid calling inversesqrt on zero.
+ fragBuilder->codeAppend("grad_dot = max(grad_dot, 1.0e-4);");
+ fragBuilder->codeAppend("float approx_dist = implicit * inversesqrt(grad_dot);");
+ if (scaleName) {
+ fragBuilder->codeAppendf("approx_dist *= %s.x;", scaleName);
+ }
+
+ if (kFillAA_GrProcessorEdgeType == erre.getEdgeType()) {
+ fragBuilder->codeAppend("float alpha = clamp(0.5 - approx_dist, 0.0, 1.0);");
+ } else {
+ fragBuilder->codeAppend("float alpha = clamp(0.5 + approx_dist, 0.0, 1.0);");
+ }
+
+ fragBuilder->codeAppendf("%s = %s;", args.fOutputColor,
+ (GrGLSLExpr4(args.fInputColor) * GrGLSLExpr1("alpha")).c_str());
+}
+
+void GLEllipticalRRectEffect::GenKey(const GrProcessor& effect, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const EllipticalRRectEffect& erre = effect.cast<EllipticalRRectEffect>();
+ GR_STATIC_ASSERT(kLast_GrProcessorEdgeType < (1 << 3));
+ b->add32(erre.getRRect().getType() | erre.getEdgeType() << 3);
+}
+
+void GLEllipticalRRectEffect::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& effect) {
+ const EllipticalRRectEffect& erre = effect.cast<EllipticalRRectEffect>();
+ const SkRRect& rrect = erre.getRRect();
+ // If we're using a scale factor to work around precision issues, choose the largest radius
+ // as the scale factor. The inv radii need to be pre-adjusted by the scale factor.
+ if (rrect != fPrevRRect) {
+ SkRect rect = rrect.getBounds();
+ const SkVector& r0 = rrect.radii(SkRRect::kUpperLeft_Corner);
+ SkASSERT(r0.fX >= kRadiusMin);
+ SkASSERT(r0.fY >= kRadiusMin);
+ switch (erre.getRRect().getType()) {
+ case SkRRect::kSimple_Type:
+ rect.inset(r0.fX, r0.fY);
+ if (fScaleUniform.isValid()) {
+ if (r0.fX > r0.fY) {
+ pdman.set2f(fInvRadiiSqdUniform, 1.f, (r0.fX * r0.fX) / (r0.fY * r0.fY));
+ pdman.set2f(fScaleUniform, r0.fX, 1.f / r0.fX);
+ } else {
+ pdman.set2f(fInvRadiiSqdUniform, (r0.fY * r0.fY) / (r0.fX * r0.fX), 1.f);
+ pdman.set2f(fScaleUniform, r0.fY, 1.f / r0.fY);
+ }
+ } else {
+ pdman.set2f(fInvRadiiSqdUniform, 1.f / (r0.fX * r0.fX),
+ 1.f / (r0.fY * r0.fY));
+ }
+ break;
+ case SkRRect::kNinePatch_Type: {
+ const SkVector& r1 = rrect.radii(SkRRect::kLowerRight_Corner);
+ SkASSERT(r1.fX >= kRadiusMin);
+ SkASSERT(r1.fY >= kRadiusMin);
+ rect.fLeft += r0.fX;
+ rect.fTop += r0.fY;
+ rect.fRight -= r1.fX;
+ rect.fBottom -= r1.fY;
+ if (fScaleUniform.isValid()) {
+ float scale = SkTMax(SkTMax(r0.fX, r0.fY), SkTMax(r1.fX, r1.fY));
+ float scaleSqd = scale * scale;
+ pdman.set4f(fInvRadiiSqdUniform, scaleSqd / (r0.fX * r0.fX),
+ scaleSqd / (r0.fY * r0.fY),
+ scaleSqd / (r1.fX * r1.fX),
+ scaleSqd / (r1.fY * r1.fY));
+ pdman.set2f(fScaleUniform, scale, 1.f / scale);
+ } else {
+ pdman.set4f(fInvRadiiSqdUniform, 1.f / (r0.fX * r0.fX),
+ 1.f / (r0.fY * r0.fY),
+ 1.f / (r1.fX * r1.fX),
+ 1.f / (r1.fY * r1.fY));
+ }
+ break;
+ }
+ default:
+ SkFAIL("RRect should always be simple or nine-patch.");
+ }
+ pdman.set4f(fInnerRectUniform, rect.fLeft, rect.fTop, rect.fRight, rect.fBottom);
+ fPrevRRect = rrect;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void EllipticalRRectEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GLEllipticalRRectEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* EllipticalRRectEffect::onCreateGLSLInstance() const {
+ return new GLEllipticalRRectEffect;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> GrRRectEffect::Make(GrPrimitiveEdgeType edgeType, const SkRRect& rrect) {
+ if (rrect.isRect()) {
+ return GrConvexPolyEffect::Make(edgeType, rrect.getBounds());
+ }
+
+ if (rrect.isOval()) {
+ return GrOvalEffect::Make(edgeType, rrect.getBounds());
+ }
+
+ if (rrect.isSimple()) {
+ if (rrect.getSimpleRadii().fX < kRadiusMin || rrect.getSimpleRadii().fY < kRadiusMin) {
+ // In this case the corners are extremely close to rectangular and we collapse the
+ // clip to a rectangular clip.
+ return GrConvexPolyEffect::Make(edgeType, rrect.getBounds());
+ }
+ if (rrect.getSimpleRadii().fX == rrect.getSimpleRadii().fY) {
+ return CircularRRectEffect::Make(edgeType, CircularRRectEffect::kAll_CornerFlags,
+ rrect);
+ } else {
+ return EllipticalRRectEffect::Make(edgeType, rrect);
+ }
+ }
+
+ if (rrect.isComplex() || rrect.isNinePatch()) {
+ // Check for the "tab" cases - two adjacent circular corners and two square corners.
+ SkScalar circularRadius = 0;
+ uint32_t cornerFlags = 0;
+
+ SkVector radii[4];
+ bool squashedRadii = false;
+ for (int c = 0; c < 4; ++c) {
+ radii[c] = rrect.radii((SkRRect::Corner)c);
+ SkASSERT((0 == radii[c].fX) == (0 == radii[c].fY));
+ if (0 == radii[c].fX) {
+ // The corner is square, so no need to squash or flag as circular.
+ continue;
+ }
+ if (radii[c].fX < kRadiusMin || radii[c].fY < kRadiusMin) {
+ radii[c].set(0, 0);
+ squashedRadii = true;
+ continue;
+ }
+ if (radii[c].fX != radii[c].fY) {
+ cornerFlags = ~0U;
+ break;
+ }
+ if (!cornerFlags) {
+ circularRadius = radii[c].fX;
+ cornerFlags = 1 << c;
+ } else {
+ if (radii[c].fX != circularRadius) {
+ cornerFlags = ~0U;
+ break;
+ }
+ cornerFlags |= 1 << c;
+ }
+ }
+
+ switch (cornerFlags) {
+ case CircularRRectEffect::kAll_CornerFlags:
+ // This rrect should have been caught in the simple case above. Though, it would
+ // be correctly handled in the fallthrough code.
+ SkASSERT(false);
+ case CircularRRectEffect::kTopLeft_CornerFlag:
+ case CircularRRectEffect::kTopRight_CornerFlag:
+ case CircularRRectEffect::kBottomRight_CornerFlag:
+ case CircularRRectEffect::kBottomLeft_CornerFlag:
+ case CircularRRectEffect::kLeft_CornerFlags:
+ case CircularRRectEffect::kTop_CornerFlags:
+ case CircularRRectEffect::kRight_CornerFlags:
+ case CircularRRectEffect::kBottom_CornerFlags: {
+ SkTCopyOnFirstWrite<SkRRect> rr(rrect);
+ if (squashedRadii) {
+ rr.writable()->setRectRadii(rrect.getBounds(), radii);
+ }
+ return CircularRRectEffect::Make(edgeType, cornerFlags, *rr);
+ }
+ case CircularRRectEffect::kNone_CornerFlags:
+ return GrConvexPolyEffect::Make(edgeType, rrect.getBounds());
+ default: {
+ if (squashedRadii) {
+ // If we got here then we squashed some but not all the radii to zero. (If all
+ // had been squashed cornerFlags would be 0.) The elliptical effect doesn't
+ // support some rounded and some square corners.
+ return nullptr;
+ }
+ if (rrect.isNinePatch()) {
+ return EllipticalRRectEffect::Make(edgeType, rrect);
+ }
+ return nullptr;
+ }
+ }
+ }
+
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrRRectEffect.h b/gfx/skia/skia/src/gpu/effects/GrRRectEffect.h
new file mode 100644
index 000000000..6ff2cc973
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrRRectEffect.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRRectEffect_DEFINED
+#define GrRRectEffect_DEFINED
+
+#include "GrTypes.h"
+#include "GrTypesPriv.h"
+#include "SkRefCnt.h"
+
+class GrFragmentProcessor;
+class GrProcessor;
+class SkRRect;
+
+namespace GrRRectEffect {
+ /**
+ * Creates an effect that performs anti-aliased clipping against a SkRRect. It doesn't support
+ * all varieties of SkRRect so the caller must check for a nullptr return.
+ */
+ sk_sp<GrFragmentProcessor> Make(GrPrimitiveEdgeType, const SkRRect&);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrSimpleTextureEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrSimpleTextureEffect.cpp
new file mode 100644
index 000000000..beb64d582
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrSimpleTextureEffect.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrSimpleTextureEffect.h"
+#include "GrInvariantOutput.h"
+#include "GrTexture.h"
+#include "glsl/GrGLSLColorSpaceXformHelper.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+
+class GrGLSimpleTextureEffect : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs& args) override {
+ const GrSimpleTextureEffect& textureEffect = args.fFp.cast<GrSimpleTextureEffect>();
+ GrGLSLColorSpaceXformHelper colorSpaceHelper(args.fUniformHandler,
+ textureEffect.colorSpaceXform(),
+ &fColorSpaceXformUni);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ fragBuilder->codeAppendf("%s = ", args.fOutputColor);
+ fragBuilder->appendTextureLookupAndModulate(args.fInputColor,
+ args.fTexSamplers[0],
+ args.fTransformedCoords[0].c_str(),
+ args.fTransformedCoords[0].getType(),
+ &colorSpaceHelper);
+ fragBuilder->codeAppend(";");
+ }
+
+ static inline void GenKey(const GrProcessor& effect, const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const GrSimpleTextureEffect& textureEffect = effect.cast<GrSimpleTextureEffect>();
+ b->add32(GrColorSpaceXform::XformKey(textureEffect.colorSpaceXform()));
+ }
+
+protected:
+ void onSetData(const GrGLSLProgramDataManager& pdman, const GrProcessor& processor) override {
+ const GrSimpleTextureEffect& textureEffect = processor.cast<GrSimpleTextureEffect>();
+ if (SkToBool(textureEffect.colorSpaceXform())) {
+ pdman.setSkMatrix44(fColorSpaceXformUni, textureEffect.colorSpaceXform()->srcToDst());
+ }
+ }
+
+private:
+ typedef GrGLSLFragmentProcessor INHERITED;
+
+ UniformHandle fColorSpaceXformUni;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrSimpleTextureEffect::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ this->updateInvariantOutputForModulation(inout);
+}
+
+void GrSimpleTextureEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ GrGLSimpleTextureEffect::GenKey(*this, caps, b);
+}
+
+GrGLSLFragmentProcessor* GrSimpleTextureEffect::onCreateGLSLInstance() const {
+ return new GrGLSimpleTextureEffect;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrSimpleTextureEffect);
+
+sk_sp<GrFragmentProcessor> GrSimpleTextureEffect::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx :
+ GrProcessorUnitTest::kAlphaTextureIdx;
+ static const SkShader::TileMode kTileModes[] = {
+ SkShader::kClamp_TileMode,
+ SkShader::kRepeat_TileMode,
+ SkShader::kMirror_TileMode,
+ };
+ SkShader::TileMode tileModes[] = {
+ kTileModes[d->fRandom->nextULessThan(SK_ARRAY_COUNT(kTileModes))],
+ kTileModes[d->fRandom->nextULessThan(SK_ARRAY_COUNT(kTileModes))],
+ };
+ GrTextureParams params(tileModes, d->fRandom->nextBool() ? GrTextureParams::kBilerp_FilterMode :
+ GrTextureParams::kNone_FilterMode);
+
+ const SkMatrix& matrix = GrTest::TestMatrix(d->fRandom);
+ auto colorSpaceXform = GrTest::TestColorXform(d->fRandom);
+ return GrSimpleTextureEffect::Make(d->fTextures[texIdx], colorSpaceXform, matrix);
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrSimpleTextureEffect.h b/gfx/skia/skia/src/gpu/effects/GrSimpleTextureEffect.h
new file mode 100644
index 000000000..8242362a9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrSimpleTextureEffect.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSimpleTextureEffect_DEFINED
+#define GrSimpleTextureEffect_DEFINED
+
+#include "GrSingleTextureEffect.h"
+
+class GrInvariantOutput;
+
+/**
+ * The output color of this effect is a modulation of the input color and a sample from a texture.
+ * It allows explicit specification of the filtering and wrap modes (GrTextureParams) and accepts
+ * a matrix that is used to compute texture coordinates from local coordinates.
+ */
+class GrSimpleTextureEffect : public GrSingleTextureEffect {
+public:
+ /* unfiltered, clamp mode */
+ static sk_sp<GrFragmentProcessor> Make(GrTexture* tex,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkMatrix& matrix) {
+ return sk_sp<GrFragmentProcessor>(
+ new GrSimpleTextureEffect(tex, std::move(colorSpaceXform), matrix,
+ GrTextureParams::kNone_FilterMode));
+ }
+
+ /* clamp mode */
+ static sk_sp<GrFragmentProcessor> Make(GrTexture* tex,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkMatrix& matrix,
+ GrTextureParams::FilterMode filterMode) {
+ return sk_sp<GrFragmentProcessor>(
+ new GrSimpleTextureEffect(tex, std::move(colorSpaceXform), matrix, filterMode));
+ }
+
+ static sk_sp<GrFragmentProcessor> Make(GrTexture* tex,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkMatrix& matrix,
+ const GrTextureParams& p) {
+ return sk_sp<GrFragmentProcessor>(new GrSimpleTextureEffect(tex, std::move(colorSpaceXform),
+ matrix, p));
+ }
+
+ virtual ~GrSimpleTextureEffect() {}
+
+ const char* name() const override { return "SimpleTexture"; }
+
+private:
+ GrSimpleTextureEffect(GrTexture* texture,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkMatrix& matrix,
+ GrTextureParams::FilterMode filterMode)
+ : GrSingleTextureEffect(texture, std::move(colorSpaceXform), matrix, filterMode) {
+ this->initClassID<GrSimpleTextureEffect>();
+ }
+
+ GrSimpleTextureEffect(GrTexture* texture,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkMatrix& matrix,
+ const GrTextureParams& params)
+ : GrSingleTextureEffect(texture, std::move(colorSpaceXform), matrix, params) {
+ this->initClassID<GrSimpleTextureEffect>();
+ }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor& other) const override { return true; }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef GrSingleTextureEffect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrSingleTextureEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrSingleTextureEffect.cpp
new file mode 100644
index 000000000..268378943
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrSingleTextureEffect.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "effects/GrSingleTextureEffect.h"
+
+GrSingleTextureEffect::GrSingleTextureEffect(GrTexture* texture,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkMatrix& m)
+ : fCoordTransform(m, texture, GrTextureParams::kNone_FilterMode)
+ , fTextureAccess(texture)
+ , fColorSpaceXform(std::move(colorSpaceXform)) {
+ this->addCoordTransform(&fCoordTransform);
+ this->addTextureAccess(&fTextureAccess);
+}
+
+GrSingleTextureEffect::GrSingleTextureEffect(GrTexture* texture,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkMatrix& m,
+ GrTextureParams::FilterMode filterMode)
+ : fCoordTransform(m, texture, filterMode)
+ , fTextureAccess(texture, filterMode)
+ , fColorSpaceXform(std::move(colorSpaceXform)) {
+ this->addCoordTransform(&fCoordTransform);
+ this->addTextureAccess(&fTextureAccess);
+}
+
+GrSingleTextureEffect::GrSingleTextureEffect(GrTexture* texture,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkMatrix& m,
+ const GrTextureParams& params)
+ : fCoordTransform(m, texture, params.filterMode())
+ , fTextureAccess(texture, params)
+ , fColorSpaceXform(std::move(colorSpaceXform)) {
+ this->addCoordTransform(&fCoordTransform);
+ this->addTextureAccess(&fTextureAccess);
+}
+
+GrSingleTextureEffect::~GrSingleTextureEffect() {
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrSingleTextureEffect.h b/gfx/skia/skia/src/gpu/effects/GrSingleTextureEffect.h
new file mode 100644
index 000000000..7d110bf2a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrSingleTextureEffect.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSingleTextureEffect_DEFINED
+#define GrSingleTextureEffect_DEFINED
+
+#include "GrFragmentProcessor.h"
+#include "GrColorSpaceXform.h"
+#include "GrCoordTransform.h"
+#include "GrInvariantOutput.h"
+#include "SkMatrix.h"
+
+class GrTexture;
+
+/**
+ * A base class for effects that draw a single texture with a texture matrix. This effect has no
+ * backend implementations. One must be provided by the subclass.
+ */
+class GrSingleTextureEffect : public GrFragmentProcessor {
+public:
+ ~GrSingleTextureEffect() override;
+
+ SkString dumpInfo() const override {
+ SkString str;
+ str.appendf("Texture: %d", fTextureAccess.getTexture()->uniqueID());
+ return str;
+ }
+
+ GrColorSpaceXform* colorSpaceXform() const { return fColorSpaceXform.get(); }
+
+protected:
+ /** unfiltered, clamp mode */
+ GrSingleTextureEffect(GrTexture*, sk_sp<GrColorSpaceXform>, const SkMatrix&);
+ /** clamp mode */
+ GrSingleTextureEffect(GrTexture*, sk_sp<GrColorSpaceXform>, const SkMatrix&,
+ GrTextureParams::FilterMode filterMode);
+ GrSingleTextureEffect(GrTexture*,
+ sk_sp<GrColorSpaceXform>,
+ const SkMatrix&,
+ const GrTextureParams&);
+
+ /**
+ * Can be used as a helper to implement subclass onComputeInvariantOutput(). It assumes that
+ * the subclass output color will be a modulation of the input color with a value read from the
+ * texture.
+ */
+ void updateInvariantOutputForModulation(GrInvariantOutput* inout) const {
+ if (GrPixelConfigIsAlphaOnly(this->texture(0)->config())) {
+ inout->mulByUnknownSingleComponent();
+ } else if (GrPixelConfigIsOpaque(this->texture(0)->config())) {
+ inout->mulByUnknownOpaqueFourComponents();
+ } else {
+ inout->mulByUnknownFourComponents();
+ }
+ }
+
+private:
+ GrCoordTransform fCoordTransform;
+ GrTextureAccess fTextureAccess;
+ sk_sp<GrColorSpaceXform> fColorSpaceXform;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrTextureDomain.cpp b/gfx/skia/skia/src/gpu/effects/GrTextureDomain.cpp
new file mode 100644
index 000000000..55d69bf5c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrTextureDomain.cpp
@@ -0,0 +1,393 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrTextureDomain.h"
+#include "GrInvariantOutput.h"
+#include "GrSimpleTextureEffect.h"
+#include "SkFloatingPoint.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLSampler.h"
+#include "glsl/GrGLSLShaderBuilder.h"
+#include "glsl/GrGLSLUniformHandler.h"
+
+GrTextureDomain::GrTextureDomain(const SkRect& domain, Mode mode, int index)
+ : fIndex(index) {
+
+ static const SkRect kFullRect = {0, 0, SK_Scalar1, SK_Scalar1};
+ if (domain.contains(kFullRect) && kClamp_Mode == mode) {
+ fMode = kIgnore_Mode;
+ } else {
+ fMode = mode;
+ }
+
+ if (fMode != kIgnore_Mode) {
+ // We don't currently handle domains that are empty or don't intersect the texture.
+ // It is OK if the domain rect is a line or point, but it should not be inverted. We do not
+ // handle rects that do not intersect the [0..1]x[0..1] rect.
+ SkASSERT(domain.fLeft <= domain.fRight);
+ SkASSERT(domain.fTop <= domain.fBottom);
+ fDomain.fLeft = SkScalarPin(domain.fLeft, kFullRect.fLeft, kFullRect.fRight);
+ fDomain.fRight = SkScalarPin(domain.fRight, kFullRect.fLeft, kFullRect.fRight);
+ fDomain.fTop = SkScalarPin(domain.fTop, kFullRect.fTop, kFullRect.fBottom);
+ fDomain.fBottom = SkScalarPin(domain.fBottom, kFullRect.fTop, kFullRect.fBottom);
+ SkASSERT(fDomain.fLeft <= fDomain.fRight);
+ SkASSERT(fDomain.fTop <= fDomain.fBottom);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void GrTextureDomain::GLDomain::sampleTexture(GrGLSLShaderBuilder* builder,
+ GrGLSLUniformHandler* uniformHandler,
+ const GrGLSLCaps* glslCaps,
+ const GrTextureDomain& textureDomain,
+ const char* outColor,
+ const SkString& inCoords,
+ GrGLSLFragmentProcessor::SamplerHandle sampler,
+ const char* inModulateColor) {
+ SkASSERT((Mode)-1 == fMode || textureDomain.mode() == fMode);
+ SkDEBUGCODE(fMode = textureDomain.mode();)
+
+ if (textureDomain.mode() != kIgnore_Mode && !fDomainUni.isValid()) {
+ const char* name;
+ SkString uniName("TexDom");
+ if (textureDomain.fIndex >= 0) {
+ uniName.appendS32(textureDomain.fIndex);
+ }
+ fDomainUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType, kDefault_GrSLPrecision,
+ uniName.c_str(), &name);
+ fDomainName = name;
+ }
+
+ switch (textureDomain.mode()) {
+ case kIgnore_Mode: {
+ builder->codeAppendf("%s = ", outColor);
+ builder->appendTextureLookupAndModulate(inModulateColor, sampler,
+ inCoords.c_str());
+ builder->codeAppend(";");
+ break;
+ }
+ case kClamp_Mode: {
+ SkString clampedCoords;
+ clampedCoords.appendf("clamp(%s, %s.xy, %s.zw)",
+ inCoords.c_str(), fDomainName.c_str(), fDomainName.c_str());
+
+ builder->codeAppendf("%s = ", outColor);
+ builder->appendTextureLookupAndModulate(inModulateColor, sampler,
+ clampedCoords.c_str());
+ builder->codeAppend(";");
+ break;
+ }
+ case kDecal_Mode: {
+ // Add a block since we're going to declare variables.
+ GrGLSLShaderBuilder::ShaderBlock block(builder);
+
+ const char* domain = fDomainName.c_str();
+ if (!glslCaps->canUseAnyFunctionInShader()) {
+ // On the NexusS and GalaxyNexus, the other path (with the 'any'
+ // call) causes the compilation error "Calls to any function that
+ // may require a gradient calculation inside a conditional block
+ // may return undefined results". This appears to be an issue with
+ // the 'any' call since even the simple "result=black; if (any())
+ // result=white;" code fails to compile.
+ builder->codeAppend("vec4 outside = vec4(0.0, 0.0, 0.0, 0.0);");
+ builder->codeAppend("vec4 inside = ");
+ builder->appendTextureLookupAndModulate(inModulateColor, sampler,
+ inCoords.c_str());
+ builder->codeAppend(";");
+
+ builder->appendPrecisionModifier(kHigh_GrSLPrecision);
+ builder->codeAppendf("float x = (%s).x;", inCoords.c_str());
+ builder->appendPrecisionModifier(kHigh_GrSLPrecision);
+ builder->codeAppendf("float y = (%s).y;", inCoords.c_str());
+
+ builder->codeAppendf("x = abs(2.0*(x - %s.x)/(%s.z - %s.x) - 1.0);",
+ domain, domain, domain);
+ builder->codeAppendf("y = abs(2.0*(y - %s.y)/(%s.w - %s.y) - 1.0);",
+ domain, domain, domain);
+ builder->codeAppend("float blend = step(1.0, max(x, y));");
+ builder->codeAppendf("%s = mix(inside, outside, blend);", outColor);
+ } else {
+ builder->codeAppend("bvec4 outside;\n");
+ builder->codeAppendf("outside.xy = lessThan(%s, %s.xy);", inCoords.c_str(),
+ domain);
+ builder->codeAppendf("outside.zw = greaterThan(%s, %s.zw);", inCoords.c_str(),
+ domain);
+ builder->codeAppendf("%s = any(outside) ? vec4(0.0, 0.0, 0.0, 0.0) : ",
+ outColor);
+ builder->appendTextureLookupAndModulate(inModulateColor, sampler,
+ inCoords.c_str());
+ builder->codeAppend(";");
+ }
+ break;
+ }
+ case kRepeat_Mode: {
+ SkString clampedCoords;
+ clampedCoords.printf("mod(%s - %s.xy, %s.zw - %s.xy) + %s.xy",
+ inCoords.c_str(), fDomainName.c_str(), fDomainName.c_str(),
+ fDomainName.c_str(), fDomainName.c_str());
+
+ builder->codeAppendf("%s = ", outColor);
+ builder->appendTextureLookupAndModulate(inModulateColor, sampler,
+ clampedCoords.c_str());
+ builder->codeAppend(";");
+ break;
+ }
+ }
+}
+
+void GrTextureDomain::GLDomain::setData(const GrGLSLProgramDataManager& pdman,
+ const GrTextureDomain& textureDomain,
+ GrSurfaceOrigin textureOrigin) {
+ SkASSERT(textureDomain.mode() == fMode);
+ if (kIgnore_Mode != textureDomain.mode()) {
+ float values[kPrevDomainCount] = {
+ SkScalarToFloat(textureDomain.domain().left()),
+ SkScalarToFloat(textureDomain.domain().top()),
+ SkScalarToFloat(textureDomain.domain().right()),
+ SkScalarToFloat(textureDomain.domain().bottom())
+ };
+ // vertical flip if necessary
+ if (kBottomLeft_GrSurfaceOrigin == textureOrigin) {
+ values[1] = 1.0f - values[1];
+ values[3] = 1.0f - values[3];
+ // The top and bottom were just flipped, so correct the ordering
+ // of elements so that values = (l, t, r, b).
+ SkTSwap(values[1], values[3]);
+ }
+ if (0 != memcmp(values, fPrevDomain, kPrevDomainCount * sizeof(float))) {
+ pdman.set4fv(fDomainUni, 1, values);
+ memcpy(fPrevDomain, values, kPrevDomainCount * sizeof(float));
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> GrTextureDomainEffect::Make(GrTexture* texture,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkMatrix& matrix,
+ const SkRect& domain,
+ GrTextureDomain::Mode mode,
+ GrTextureParams::FilterMode filterMode) {
+ static const SkRect kFullRect = {0, 0, SK_Scalar1, SK_Scalar1};
+ if (GrTextureDomain::kIgnore_Mode == mode ||
+ (GrTextureDomain::kClamp_Mode == mode && domain.contains(kFullRect))) {
+ return GrSimpleTextureEffect::Make(texture, std::move(colorSpaceXform), matrix, filterMode);
+ } else {
+ return sk_sp<GrFragmentProcessor>(
+ new GrTextureDomainEffect(texture, std::move(colorSpaceXform), matrix, domain, mode,
+ filterMode));
+ }
+}
+
+GrTextureDomainEffect::GrTextureDomainEffect(GrTexture* texture,
+ sk_sp<GrColorSpaceXform> colorSpaceXform,
+ const SkMatrix& matrix,
+ const SkRect& domain,
+ GrTextureDomain::Mode mode,
+ GrTextureParams::FilterMode filterMode)
+ : GrSingleTextureEffect(texture, std::move(colorSpaceXform), matrix, filterMode)
+ , fTextureDomain(domain, mode) {
+ SkASSERT(mode != GrTextureDomain::kRepeat_Mode ||
+ filterMode == GrTextureParams::kNone_FilterMode);
+ this->initClassID<GrTextureDomainEffect>();
+}
+
+void GrTextureDomainEffect::onGetGLSLProcessorKey(const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const {
+ b->add32(GrTextureDomain::GLDomain::DomainKey(fTextureDomain));
+}
+
+GrGLSLFragmentProcessor* GrTextureDomainEffect::onCreateGLSLInstance() const {
+ class GLSLProcessor : public GrGLSLFragmentProcessor {
+ public:
+ void emitCode(EmitArgs& args) override {
+ const GrTextureDomainEffect& tde = args.fFp.cast<GrTextureDomainEffect>();
+ const GrTextureDomain& domain = tde.fTextureDomain;
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString coords2D = fragBuilder->ensureCoords2D(args.fTransformedCoords[0]);
+ fGLDomain.sampleTexture(fragBuilder,
+ args.fUniformHandler,
+ args.fGLSLCaps,
+ domain,
+ args.fOutputColor,
+ coords2D,
+ args.fTexSamplers[0],
+ args.fInputColor);
+ }
+
+ protected:
+ void onSetData(const GrGLSLProgramDataManager& pdman, const GrProcessor& fp) override {
+ const GrTextureDomainEffect& tde = fp.cast<GrTextureDomainEffect>();
+ const GrTextureDomain& domain = tde.fTextureDomain;
+ fGLDomain.setData(pdman, domain, tde.texture(0)->origin());
+ }
+
+ private:
+ GrTextureDomain::GLDomain fGLDomain;
+
+ };
+
+ return new GLSLProcessor;
+}
+
+bool GrTextureDomainEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrTextureDomainEffect& s = sBase.cast<GrTextureDomainEffect>();
+ return this->fTextureDomain == s.fTextureDomain && s.texture(0) == this->texture(0) &&
+ s.textureAccess(0).getParams().filterMode() ==
+ this->textureAccess(0).getParams().filterMode();
+}
+
+void GrTextureDomainEffect::onComputeInvariantOutput(GrInvariantOutput* inout) const {
+ if (GrTextureDomain::kDecal_Mode == fTextureDomain.mode()) {
+ if (GrPixelConfigIsAlphaOnly(this->texture(0)->config())) {
+ inout->mulByUnknownSingleComponent();
+ } else {
+ inout->mulByUnknownFourComponents();
+ }
+ } else {
+ this->updateInvariantOutputForModulation(inout);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrTextureDomainEffect);
+
+sk_sp<GrFragmentProcessor> GrTextureDomainEffect::TestCreate(GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx :
+ GrProcessorUnitTest::kAlphaTextureIdx;
+ SkRect domain;
+ domain.fLeft = d->fRandom->nextUScalar1();
+ domain.fRight = d->fRandom->nextRangeScalar(domain.fLeft, SK_Scalar1);
+ domain.fTop = d->fRandom->nextUScalar1();
+ domain.fBottom = d->fRandom->nextRangeScalar(domain.fTop, SK_Scalar1);
+ GrTextureDomain::Mode mode =
+ (GrTextureDomain::Mode) d->fRandom->nextULessThan(GrTextureDomain::kModeCount);
+ const SkMatrix& matrix = GrTest::TestMatrix(d->fRandom);
+ bool bilerp = mode != GrTextureDomain::kRepeat_Mode ? d->fRandom->nextBool() : false;
+ auto colorSpaceXform = GrTest::TestColorXform(d->fRandom);
+ return GrTextureDomainEffect::Make(
+ d->fTextures[texIdx],
+ colorSpaceXform,
+ matrix,
+ domain,
+ mode,
+ bilerp ? GrTextureParams::kBilerp_FilterMode : GrTextureParams::kNone_FilterMode);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> GrDeviceSpaceTextureDecalFragmentProcessor::Make(GrTexture* texture,
+ const SkIRect& subset, const SkIPoint& deviceSpaceOffset) {
+ return sk_sp<GrFragmentProcessor>(new GrDeviceSpaceTextureDecalFragmentProcessor(
+ texture, subset, deviceSpaceOffset));
+}
+
+GrDeviceSpaceTextureDecalFragmentProcessor::GrDeviceSpaceTextureDecalFragmentProcessor(
+ GrTexture* texture, const SkIRect& subset, const SkIPoint& deviceSpaceOffset)
+ : fTextureAccess(texture, GrTextureParams::ClampNoFilter())
+ , fTextureDomain(GrTextureDomain::MakeTexelDomain(texture, subset),
+ GrTextureDomain::kDecal_Mode) {
+ this->addTextureAccess(&fTextureAccess);
+ fDeviceSpaceOffset.fX = deviceSpaceOffset.fX - subset.fLeft;
+ fDeviceSpaceOffset.fY = deviceSpaceOffset.fY - subset.fTop;
+ this->initClassID<GrDeviceSpaceTextureDecalFragmentProcessor>();
+ this->setWillReadFragmentPosition();
+}
+
+GrGLSLFragmentProcessor* GrDeviceSpaceTextureDecalFragmentProcessor::onCreateGLSLInstance() const {
+ class GLSLProcessor : public GrGLSLFragmentProcessor {
+ public:
+ void emitCode(EmitArgs& args) override {
+ const GrDeviceSpaceTextureDecalFragmentProcessor& dstdfp =
+ args.fFp.cast<GrDeviceSpaceTextureDecalFragmentProcessor>();
+ const char* scaleAndTranslateName;
+ fScaleAndTranslateUni = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType,
+ kDefault_GrSLPrecision,
+ "scaleAndTranslate",
+ &scaleAndTranslateName);
+ args.fFragBuilder->codeAppendf("vec2 coords = %s.xy * %s.xy + %s.zw;",
+ args.fFragBuilder->fragmentPosition(),
+ scaleAndTranslateName, scaleAndTranslateName);
+ fGLDomain.sampleTexture(args.fFragBuilder,
+ args.fUniformHandler,
+ args.fGLSLCaps,
+ dstdfp.fTextureDomain,
+ args.fOutputColor,
+ SkString("coords"),
+ args.fTexSamplers[0],
+ args.fInputColor);
+ }
+
+ protected:
+ void onSetData(const GrGLSLProgramDataManager& pdman, const GrProcessor& fp) override {
+ const GrDeviceSpaceTextureDecalFragmentProcessor& dstdfp =
+ fp.cast<GrDeviceSpaceTextureDecalFragmentProcessor>();
+ fGLDomain.setData(pdman, dstdfp.fTextureDomain, dstdfp.texture(0)->origin());
+ float iw = 1.f / dstdfp.texture(0)->width();
+ float ih = 1.f / dstdfp.texture(0)->height();
+ float scaleAndTransData[4] = {
+ iw, ih,
+ -dstdfp.fDeviceSpaceOffset.fX * iw, -dstdfp.fDeviceSpaceOffset.fY * ih
+ };
+ if (dstdfp.texture(0)->origin() == kBottomLeft_GrSurfaceOrigin) {
+ scaleAndTransData[1] = -scaleAndTransData[1];
+ scaleAndTransData[3] = 1 - scaleAndTransData[3];
+ }
+ pdman.set4fv(fScaleAndTranslateUni, 1, scaleAndTransData);
+ }
+
+ private:
+ GrTextureDomain::GLDomain fGLDomain;
+ UniformHandle fScaleAndTranslateUni;
+ };
+
+ return new GLSLProcessor;
+}
+
+bool GrDeviceSpaceTextureDecalFragmentProcessor::onIsEqual(const GrFragmentProcessor& fp) const {
+ const GrDeviceSpaceTextureDecalFragmentProcessor& dstdfp =
+ fp.cast<GrDeviceSpaceTextureDecalFragmentProcessor>();
+ return dstdfp.fTextureAccess.getTexture() == fTextureAccess.getTexture() &&
+ dstdfp.fDeviceSpaceOffset == fDeviceSpaceOffset &&
+ dstdfp.fTextureDomain == fTextureDomain;
+}
+
+void GrDeviceSpaceTextureDecalFragmentProcessor::onComputeInvariantOutput(
+ GrInvariantOutput* inout) const {
+ if (GrPixelConfigIsAlphaOnly(this->texture(0)->config())) {
+ inout->mulByUnknownSingleComponent();
+ } else {
+ inout->mulByUnknownFourComponents();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrDeviceSpaceTextureDecalFragmentProcessor);
+
+sk_sp<GrFragmentProcessor> GrDeviceSpaceTextureDecalFragmentProcessor::TestCreate(
+ GrProcessorTestData* d) {
+ int texIdx = d->fRandom->nextBool() ? GrProcessorUnitTest::kSkiaPMTextureIdx
+ : GrProcessorUnitTest::kAlphaTextureIdx;
+ SkIRect subset;
+ subset.fLeft = d->fRandom->nextULessThan(d->fTextures[texIdx]->width() - 1);
+ subset.fRight = d->fRandom->nextRangeU(subset.fLeft, d->fTextures[texIdx]->width());
+ subset.fTop = d->fRandom->nextULessThan(d->fTextures[texIdx]->height() - 1);
+ subset.fBottom = d->fRandom->nextRangeU(subset.fTop, d->fTextures[texIdx]->height());
+ SkIPoint pt;
+ pt.fX = d->fRandom->nextULessThan(2048);
+ pt.fY = d->fRandom->nextULessThan(2048);
+ return GrDeviceSpaceTextureDecalFragmentProcessor::Make(d->fTextures[texIdx], subset, pt);
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrTextureDomain.h b/gfx/skia/skia/src/gpu/effects/GrTextureDomain.h
new file mode 100644
index 000000000..82ff73c06
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrTextureDomain.h
@@ -0,0 +1,246 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextureDomainEffect_DEFINED
+#define GrTextureDomainEffect_DEFINED
+
+#include "GrSingleTextureEffect.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+
+class GrGLProgramBuilder;
+class GrGLSLShaderBuilder;
+class GrInvariantOutput;
+class GrGLSLSampler;
+class GrGLSLUniformHandler;
+struct SkRect;
+
+/**
+ * Limits a texture's lookup coordinates to a domain. Samples outside the domain are either clamped
+ * the edge of the domain or result in a vec4 of zeros (decal mode). The domain is clipped to
+ * normalized texture coords ([0,1]x[0,1] square). Bilinear filtering can cause texels outside the
+ * domain to affect the read value unless the caller considers this when calculating the domain.
+ */
+class GrTextureDomain {
+public:
+ enum Mode {
+ // Ignore the texture domain rectangle.
+ kIgnore_Mode,
+ // Clamp texture coords to the domain rectangle.
+ kClamp_Mode,
+ // Treat the area outside the domain rectangle as fully transparent.
+ kDecal_Mode,
+ // Wrap texture coordinates. NOTE: filtering may not work as expected because Bilerp will
+ // read texels outside of the domain. We could perform additional texture reads and filter
+ // in the shader, but are not currently doing this for performance reasons
+ kRepeat_Mode,
+
+ kLastMode = kRepeat_Mode
+ };
+ static const int kModeCount = kLastMode + 1;
+
+ static const GrTextureDomain& IgnoredDomain() {
+ static const SkRect gDummyRect = {0, 0, 0, 0};
+ static const GrTextureDomain gDomain(gDummyRect, kIgnore_Mode);
+ return gDomain;
+ }
+
+ /**
+ * @param index Pass a value >= 0 if using multiple texture domains in the same effect.
+ * It is used to keep inserted variables from causing name collisions.
+ */
+ GrTextureDomain(const SkRect& domain, Mode, int index = -1);
+
+ const SkRect& domain() const { return fDomain; }
+ Mode mode() const { return fMode; }
+
+ /* Computes a domain that bounds all the texels in texelRect. Note that with bilerp enabled
+ texels neighboring the domain may be read. */
+ static const SkRect MakeTexelDomain(const GrTexture* texture, const SkIRect& texelRect) {
+ SkScalar wInv = SK_Scalar1 / texture->width();
+ SkScalar hInv = SK_Scalar1 / texture->height();
+ SkRect result = {
+ texelRect.fLeft * wInv,
+ texelRect.fTop * hInv,
+ texelRect.fRight * wInv,
+ texelRect.fBottom * hInv
+ };
+ return result;
+ }
+
+ static const SkRect MakeTexelDomainForMode(const GrTexture* texture, const SkIRect& texelRect, Mode mode) {
+ // For Clamp mode, inset by half a texel.
+ SkScalar wInv = SK_Scalar1 / texture->width();
+ SkScalar hInv = SK_Scalar1 / texture->height();
+ SkScalar inset = (mode == kClamp_Mode && !texelRect.isEmpty()) ? SK_ScalarHalf : 0;
+ return SkRect::MakeLTRB(
+ (texelRect.fLeft + inset) * wInv,
+ (texelRect.fTop + inset) * hInv,
+ (texelRect.fRight - inset) * wInv,
+ (texelRect.fBottom - inset) * hInv
+ );
+ }
+
+ bool operator==(const GrTextureDomain& that) const {
+ return fMode == that.fMode && (kIgnore_Mode == fMode || fDomain == that.fDomain);
+ }
+
+ /**
+ * A GrGLSLFragmentProcessor subclass that corresponds to a GrProcessor subclass that uses
+ * GrTextureDomain should include this helper. It generates the texture domain GLSL, produces
+ * the part of the effect key that reflects the texture domain code, and performs the uniform
+ * uploads necessary for texture domains.
+ */
+ class GLDomain {
+ public:
+ GLDomain() {
+ for (int i = 0; i < kPrevDomainCount; i++) {
+ fPrevDomain[i] = SK_FloatNaN;
+ }
+ SkDEBUGCODE(fMode = (Mode) -1;)
+ }
+
+ /**
+ * Call this from GrGLSLFragmentProcessor::emitCode() to sample the texture W.R.T. the
+ * domain and mode.
+ *
+ * @param outcolor name of vec4 variable to hold the sampled color.
+ * @param inCoords name of vec2 variable containing the coords to be used with the domain.
+ * It is assumed that this is a variable and not an expression.
+ * @param inModulateColor if non-nullptr the sampled color will be modulated with this
+ * expression before being written to outColor.
+ */
+ void sampleTexture(GrGLSLShaderBuilder* builder,
+ GrGLSLUniformHandler* uniformHandler,
+ const GrGLSLCaps* glslCaps,
+ const GrTextureDomain& textureDomain,
+ const char* outColor,
+ const SkString& inCoords,
+ GrGLSLFragmentProcessor::SamplerHandle sampler,
+ const char* inModulateColor = nullptr);
+
+ /**
+ * Call this from GrGLSLFragmentProcessor::setData() to upload uniforms necessary for the
+ * texture domain. The rectangle is automatically adjusted to account for the texture's
+ * origin.
+ */
+ void setData(const GrGLSLProgramDataManager& pdman, const GrTextureDomain& textureDomain,
+ GrSurfaceOrigin textureOrigin);
+
+ enum {
+ kDomainKeyBits = 2, // See DomainKey().
+ };
+
+ /**
+ * GrGLSLFragmentProcessor::GenKey() must call this and include the returned value in it's
+ * computed key. The returned will be limited to the lower kDomainKeyBits bits.
+ */
+ static uint32_t DomainKey(const GrTextureDomain& domain) {
+ GR_STATIC_ASSERT(kModeCount <= (1 << kDomainKeyBits));
+ return domain.mode();
+ }
+
+ private:
+ static const int kPrevDomainCount = 4;
+ SkDEBUGCODE(Mode fMode;)
+ GrGLSLProgramDataManager::UniformHandle fDomainUni;
+ SkString fDomainName;
+ float fPrevDomain[kPrevDomainCount];
+ };
+
+protected:
+ Mode fMode;
+ SkRect fDomain;
+ int fIndex;
+
+ typedef GrSingleTextureEffect INHERITED;
+};
+
+/**
+ * A basic texture effect that uses GrTextureDomain.
+ */
+class GrTextureDomainEffect : public GrSingleTextureEffect {
+
+public:
+ static sk_sp<GrFragmentProcessor> Make(GrTexture*,
+ sk_sp<GrColorSpaceXform>,
+ const SkMatrix&,
+ const SkRect& domain,
+ GrTextureDomain::Mode,
+ GrTextureParams::FilterMode filterMode);
+
+ const char* name() const override { return "TextureDomain"; }
+
+ SkString dumpInfo() const override {
+ SkString str;
+ str.appendf("Domain: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]",
+ fTextureDomain.domain().fLeft, fTextureDomain.domain().fTop,
+ fTextureDomain.domain().fRight, fTextureDomain.domain().fBottom);
+ str.append(INHERITED::dumpInfo());
+ return str;
+ }
+
+private:
+ GrTextureDomain fTextureDomain;
+
+ GrTextureDomainEffect(GrTexture*,
+ sk_sp<GrColorSpaceXform>,
+ const SkMatrix&,
+ const SkRect& domain,
+ GrTextureDomain::Mode,
+ GrTextureParams::FilterMode);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef GrSingleTextureEffect INHERITED;
+};
+
+class GrDeviceSpaceTextureDecalFragmentProcessor : public GrFragmentProcessor {
+public:
+ static sk_sp<GrFragmentProcessor> Make(GrTexture*, const SkIRect& subset,
+ const SkIPoint& deviceSpaceOffset);
+
+ const char* name() const override { return "GrDeviceSpaceTextureDecalFragmentProcessor"; }
+
+ SkString dumpInfo() const override {
+ SkString str;
+ str.appendf("Domain: [L: %.2f, T: %.2f, R: %.2f, B: %.2f] Offset: [%d %d]",
+ fTextureDomain.domain().fLeft, fTextureDomain.domain().fTop,
+ fTextureDomain.domain().fRight, fTextureDomain.domain().fBottom,
+ fDeviceSpaceOffset.fX, fDeviceSpaceOffset.fY);
+ str.append(INHERITED::dumpInfo());
+ return str;
+ }
+
+private:
+ GrTextureAccess fTextureAccess;
+ GrTextureDomain fTextureDomain;
+ SkIPoint fDeviceSpaceOffset;
+
+ GrDeviceSpaceTextureDecalFragmentProcessor(GrTexture*, const SkIRect&, const SkIPoint&);
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ // Since we always use decal mode, there is no need for key data.
+ void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override {}
+
+ bool onIsEqual(const GrFragmentProcessor& fp) const override;
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrTextureStripAtlas.cpp b/gfx/skia/skia/src/gpu/effects/GrTextureStripAtlas.cpp
new file mode 100644
index 000000000..2c882508f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrTextureStripAtlas.cpp
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrTextureStripAtlas.h"
+#include "GrContext.h"
+#include "GrTexture.h"
+#include "SkGr.h"
+#include "SkPixelRef.h"
+#include "SkTSearch.h"
+
+#ifdef SK_DEBUG
+ #define VALIDATE this->validate()
+#else
+ #define VALIDATE
+#endif
+
+class GrTextureStripAtlas::Hash : public SkTDynamicHash<GrTextureStripAtlas::AtlasEntry,
+ GrTextureStripAtlas::Desc> {};
+
+int32_t GrTextureStripAtlas::gCacheCount = 0;
+
+GrTextureStripAtlas::Hash* GrTextureStripAtlas::gAtlasCache = nullptr;
+
+GrTextureStripAtlas::Hash* GrTextureStripAtlas::GetCache() {
+
+ if (nullptr == gAtlasCache) {
+ gAtlasCache = new Hash;
+ }
+
+ return gAtlasCache;
+}
+
+// Remove the specified atlas from the cache
+void GrTextureStripAtlas::CleanUp(const GrContext*, void* info) {
+ SkASSERT(info);
+
+ AtlasEntry* entry = static_cast<AtlasEntry*>(info);
+
+ // remove the cache entry
+ GetCache()->remove(entry->fDesc);
+
+ // remove the actual entry
+ delete entry;
+
+ if (0 == GetCache()->count()) {
+ delete gAtlasCache;
+ gAtlasCache = nullptr;
+ }
+}
+
+GrTextureStripAtlas* GrTextureStripAtlas::GetAtlas(const GrTextureStripAtlas::Desc& desc) {
+ AtlasEntry* entry = GetCache()->find(desc);
+ if (nullptr == entry) {
+ entry = new AtlasEntry;
+
+ entry->fAtlas = new GrTextureStripAtlas(desc);
+ entry->fDesc = desc;
+
+ desc.fContext->addCleanUp(CleanUp, entry);
+
+ GetCache()->add(entry);
+ }
+
+ return entry->fAtlas;
+}
+
+GrTextureStripAtlas::GrTextureStripAtlas(GrTextureStripAtlas::Desc desc)
+ : fCacheKey(sk_atomic_inc(&gCacheCount))
+ , fLockedRows(0)
+ , fDesc(desc)
+ , fNumRows(desc.fHeight / desc.fRowHeight)
+ , fTexture(nullptr)
+ , fRows(new AtlasRow[fNumRows])
+ , fLRUFront(nullptr)
+ , fLRUBack(nullptr) {
+ SkASSERT(fNumRows * fDesc.fRowHeight == fDesc.fHeight);
+ this->initLRU();
+ fNormalizedYHeight = SK_Scalar1 / fDesc.fHeight;
+ VALIDATE;
+}
+
+GrTextureStripAtlas::~GrTextureStripAtlas() { delete[] fRows; }
+
+int GrTextureStripAtlas::lockRow(const SkBitmap& data) {
+ VALIDATE;
+ if (0 == fLockedRows) {
+ this->lockTexture();
+ if (!fTexture) {
+ return -1;
+ }
+ }
+
+ int key = data.getGenerationID();
+ int rowNumber = -1;
+ int index = this->searchByKey(key);
+
+ if (index >= 0) {
+ // We already have the data in a row, so we can just return that row
+ AtlasRow* row = fKeyTable[index];
+ if (0 == row->fLocks) {
+ this->removeFromLRU(row);
+ }
+ ++row->fLocks;
+ ++fLockedRows;
+
+ // Since all the rows are always stored in a contiguous array, we can save the memory
+ // required for storing row numbers and just compute it with some pointer arithmetic
+ rowNumber = static_cast<int>(row - fRows);
+ } else {
+ // ~index is the index where we will insert the new key to keep things sorted
+ index = ~index;
+
+ // We don't have this data cached, so pick the least recently used row to copy into
+ AtlasRow* row = this->getLRU();
+
+ ++fLockedRows;
+
+ if (nullptr == row) {
+ // force a flush, which should unlock all the rows; then try again
+ fDesc.fContext->flush();
+ row = this->getLRU();
+ if (nullptr == row) {
+ --fLockedRows;
+ return -1;
+ }
+ }
+
+ this->removeFromLRU(row);
+
+ uint32_t oldKey = row->fKey;
+
+ // If we are writing into a row that already held bitmap data, we need to remove the
+ // reference to that genID which is stored in our sorted table of key values.
+ if (oldKey != kEmptyAtlasRowKey) {
+
+ // Find the entry in the list; if it's before the index where we plan on adding the new
+ // entry, we decrement since it will shift elements ahead of it back by one.
+ int oldIndex = this->searchByKey(oldKey);
+ if (oldIndex < index) {
+ --index;
+ }
+
+ fKeyTable.remove(oldIndex);
+ }
+
+ row->fKey = key;
+ row->fLocks = 1;
+ fKeyTable.insert(index, 1, &row);
+ rowNumber = static_cast<int>(row - fRows);
+
+ SkAutoLockPixels lock(data);
+
+ // Pass in the kDontFlush flag, since we know we're writing to a part of this texture
+ // that is not currently in use
+ fTexture->writePixels(0, rowNumber * fDesc.fRowHeight,
+ fDesc.fWidth, fDesc.fRowHeight,
+ SkImageInfo2GrPixelConfig(data.info(), *this->getContext()->caps()),
+ data.getPixels(),
+ data.rowBytes(),
+ GrContext::kDontFlush_PixelOpsFlag);
+ }
+
+ SkASSERT(rowNumber >= 0);
+ VALIDATE;
+ return rowNumber;
+}
+
+void GrTextureStripAtlas::unlockRow(int row) {
+ VALIDATE;
+ --fRows[row].fLocks;
+ --fLockedRows;
+ SkASSERT(fRows[row].fLocks >= 0 && fLockedRows >= 0);
+ if (0 == fRows[row].fLocks) {
+ this->appendLRU(fRows + row);
+ }
+ if (0 == fLockedRows) {
+ this->unlockTexture();
+ }
+ VALIDATE;
+}
+
+GrTextureStripAtlas::AtlasRow* GrTextureStripAtlas::getLRU() {
+ // Front is least-recently-used
+ AtlasRow* row = fLRUFront;
+ return row;
+}
+
+void GrTextureStripAtlas::lockTexture() {
+ GrSurfaceDesc texDesc;
+ texDesc.fWidth = fDesc.fWidth;
+ texDesc.fHeight = fDesc.fHeight;
+ texDesc.fConfig = fDesc.fConfig;
+ texDesc.fIsMipMapped = false;
+
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey key;
+ GrUniqueKey::Builder builder(&key, kDomain, 1);
+ builder[0] = static_cast<uint32_t>(fCacheKey);
+ builder.finish();
+
+ fTexture = fDesc.fContext->textureProvider()->findAndRefTextureByUniqueKey(key);
+ if (nullptr == fTexture) {
+ fTexture = fDesc.fContext->textureProvider()->createTexture(texDesc, SkBudgeted::kYes,
+ nullptr, 0);
+ if (!fTexture) {
+ return;
+ }
+
+ // We will be issuing writes to the surface using kDontFlush_PixelOpsFlag, so we
+ // need to make sure any existing IO is flushed
+ fDesc.fContext->flushSurfaceIO(fTexture);
+ fDesc.fContext->textureProvider()->assignUniqueKeyToTexture(key, fTexture);
+ // This is a new texture, so all of our cache info is now invalid
+ this->initLRU();
+ fKeyTable.rewind();
+ }
+ SkASSERT(fTexture);
+}
+
+void GrTextureStripAtlas::unlockTexture() {
+ SkASSERT(fTexture && 0 == fLockedRows);
+ fTexture->unref();
+ fTexture = nullptr;
+}
+
+void GrTextureStripAtlas::initLRU() {
+ fLRUFront = nullptr;
+ fLRUBack = nullptr;
+ // Initially all the rows are in the LRU list
+ for (int i = 0; i < fNumRows; ++i) {
+ fRows[i].fKey = kEmptyAtlasRowKey;
+ fRows[i].fNext = nullptr;
+ fRows[i].fPrev = nullptr;
+ this->appendLRU(fRows + i);
+ }
+ SkASSERT(nullptr == fLRUFront || nullptr == fLRUFront->fPrev);
+ SkASSERT(nullptr == fLRUBack || nullptr == fLRUBack->fNext);
+}
+
+void GrTextureStripAtlas::appendLRU(AtlasRow* row) {
+ SkASSERT(nullptr == row->fPrev && nullptr == row->fNext);
+ if (nullptr == fLRUFront && nullptr == fLRUBack) {
+ fLRUFront = row;
+ fLRUBack = row;
+ } else {
+ row->fPrev = fLRUBack;
+ fLRUBack->fNext = row;
+ fLRUBack = row;
+ }
+}
+
+void GrTextureStripAtlas::removeFromLRU(AtlasRow* row) {
+ SkASSERT(row);
+ if (row->fNext && row->fPrev) {
+ row->fPrev->fNext = row->fNext;
+ row->fNext->fPrev = row->fPrev;
+ } else {
+ if (nullptr == row->fNext) {
+ SkASSERT(row == fLRUBack);
+ fLRUBack = row->fPrev;
+ if (fLRUBack) {
+ fLRUBack->fNext = nullptr;
+ }
+ }
+ if (nullptr == row->fPrev) {
+ SkASSERT(row == fLRUFront);
+ fLRUFront = row->fNext;
+ if (fLRUFront) {
+ fLRUFront->fPrev = nullptr;
+ }
+ }
+ }
+ row->fNext = nullptr;
+ row->fPrev = nullptr;
+}
+
+int GrTextureStripAtlas::searchByKey(uint32_t key) {
+ AtlasRow target;
+ target.fKey = key;
+ return SkTSearch<const AtlasRow,
+ GrTextureStripAtlas::KeyLess>((const AtlasRow**)fKeyTable.begin(),
+ fKeyTable.count(),
+ &target,
+ sizeof(AtlasRow*));
+}
+
+#ifdef SK_DEBUG
+void GrTextureStripAtlas::validate() {
+
+ // Our key table should be sorted
+ uint32_t prev = 1 > fKeyTable.count() ? 0 : fKeyTable[0]->fKey;
+ for (int i = 1; i < fKeyTable.count(); ++i) {
+ SkASSERT(prev < fKeyTable[i]->fKey);
+ SkASSERT(fKeyTable[i]->fKey != kEmptyAtlasRowKey);
+ prev = fKeyTable[i]->fKey;
+ }
+
+ int lruCount = 0;
+ // Validate LRU pointers, and count LRU entries
+ SkASSERT(nullptr == fLRUFront || nullptr == fLRUFront->fPrev);
+ SkASSERT(nullptr == fLRUBack || nullptr == fLRUBack->fNext);
+ for (AtlasRow* r = fLRUFront; r != nullptr; r = r->fNext) {
+ if (nullptr == r->fNext) {
+ SkASSERT(r == fLRUBack);
+ } else {
+ SkASSERT(r->fNext->fPrev == r);
+ }
+ ++lruCount;
+ }
+
+ int rowLocks = 0;
+ int freeRows = 0;
+
+ for (int i = 0; i < fNumRows; ++i) {
+ rowLocks += fRows[i].fLocks;
+ if (0 == fRows[i].fLocks) {
+ ++freeRows;
+ bool inLRU = false;
+ // Step through the LRU and make sure it's present
+ for (AtlasRow* r = fLRUFront; r != nullptr; r = r->fNext) {
+ if (r == &fRows[i]) {
+ inLRU = true;
+ break;
+ }
+ }
+ SkASSERT(inLRU);
+ } else {
+ // If we are locked, we should have a key
+ SkASSERT(kEmptyAtlasRowKey != fRows[i].fKey);
+ }
+
+ // If we have a key != kEmptyAtlasRowKey, it should be in the key table
+ SkASSERT(fRows[i].fKey == kEmptyAtlasRowKey || this->searchByKey(fRows[i].fKey) >= 0);
+ }
+
+ // Our count of locks should equal the sum of row locks, unless we ran out of rows and flushed,
+ // in which case we'll have one more lock than recorded in the rows (to represent the pending
+ // lock of a row; which ensures we don't unlock the texture prematurely).
+ SkASSERT(rowLocks == fLockedRows || rowLocks + 1 == fLockedRows);
+
+ // We should have one lru entry for each free row
+ SkASSERT(freeRows == lruCount);
+
+ // If we have locked rows, we should have a locked texture, otherwise
+ // it should be unlocked
+ if (fLockedRows == 0) {
+ SkASSERT(nullptr == fTexture);
+ } else {
+ SkASSERT(fTexture);
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/effects/GrXfermodeFragmentProcessor.cpp b/gfx/skia/skia/src/gpu/effects/GrXfermodeFragmentProcessor.cpp
new file mode 100644
index 000000000..051061ffa
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrXfermodeFragmentProcessor.cpp
@@ -0,0 +1,312 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "effects/GrXfermodeFragmentProcessor.h"
+
+#include "GrFragmentProcessor.h"
+#include "GrInvariantOutput.h"
+#include "effects/GrConstColorProcessor.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLBlend.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "SkGrPriv.h"
+
+class ComposeTwoFragmentProcessor : public GrFragmentProcessor {
+public:
+ ComposeTwoFragmentProcessor(sk_sp<GrFragmentProcessor> src, sk_sp<GrFragmentProcessor> dst,
+ SkXfermode::Mode mode)
+ : fMode(mode) {
+ this->initClassID<ComposeTwoFragmentProcessor>();
+ SkDEBUGCODE(int shaderAChildIndex = )this->registerChildProcessor(std::move(src));
+ SkDEBUGCODE(int shaderBChildIndex = )this->registerChildProcessor(std::move(dst));
+ SkASSERT(0 == shaderAChildIndex);
+ SkASSERT(1 == shaderBChildIndex);
+ }
+
+ const char* name() const override { return "ComposeTwo"; }
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override {
+ b->add32(fMode);
+ }
+
+ SkXfermode::Mode getMode() const { return fMode; }
+
+protected:
+ bool onIsEqual(const GrFragmentProcessor& other) const override {
+ const ComposeTwoFragmentProcessor& cs = other.cast<ComposeTwoFragmentProcessor>();
+ return fMode == cs.fMode;
+ }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ inout->setToUnknown(GrInvariantOutput::kWill_ReadInput);
+ }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ SkXfermode::Mode fMode;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////
+
+class GLComposeTwoFragmentProcessor : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs&) override;
+
+private:
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(ComposeTwoFragmentProcessor);
+
+sk_sp<GrFragmentProcessor> ComposeTwoFragmentProcessor::TestCreate(GrProcessorTestData* d) {
+ // Create two random frag procs.
+ sk_sp<GrFragmentProcessor> fpA(GrProcessorUnitTest::MakeChildFP(d));
+ sk_sp<GrFragmentProcessor> fpB(GrProcessorUnitTest::MakeChildFP(d));
+
+ SkXfermode::Mode mode = static_cast<SkXfermode::Mode>(
+ d->fRandom->nextRangeU(0, SkXfermode::kLastMode));
+ return sk_sp<GrFragmentProcessor>(
+ new ComposeTwoFragmentProcessor(std::move(fpA), std::move(fpB), mode));
+}
+
+GrGLSLFragmentProcessor* ComposeTwoFragmentProcessor::onCreateGLSLInstance() const{
+ return new GLComposeTwoFragmentProcessor;
+}
+
+/////////////////////////////////////////////////////////////////////
+
+void GLComposeTwoFragmentProcessor::emitCode(EmitArgs& args) {
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const ComposeTwoFragmentProcessor& cs = args.fFp.cast<ComposeTwoFragmentProcessor>();
+
+ const char* inputColor = nullptr;
+ if (args.fInputColor) {
+ inputColor = "inputColor";
+ fragBuilder->codeAppendf("vec4 inputColor = vec4(%s.rgb, 1.0);", args.fInputColor);
+ }
+
+ // declare outputColor and emit the code for each of the two children
+ SkString srcColor("src");
+ this->emitChild(0, inputColor, &srcColor, args);
+
+ SkString dstColor("dst");
+ this->emitChild(1, inputColor, &dstColor, args);
+
+ // emit blend code
+ SkXfermode::Mode mode = cs.getMode();
+ fragBuilder->codeAppendf("// Compose Xfer Mode: %s\n", SkXfermode::ModeName(mode));
+ GrGLSLBlend::AppendMode(fragBuilder,
+ srcColor.c_str(),
+ dstColor.c_str(),
+ args.fOutputColor,
+ mode);
+
+ // re-multiply the output color by the input color's alpha
+ if (args.fInputColor) {
+ fragBuilder->codeAppendf("%s *= %s.a;", args.fOutputColor, args.fInputColor);
+ }
+}
+
+sk_sp<GrFragmentProcessor> GrXfermodeFragmentProcessor::MakeFromTwoProcessors(
+ sk_sp<GrFragmentProcessor> src, sk_sp<GrFragmentProcessor> dst, SkXfermode::Mode mode) {
+ switch (mode) {
+ case SkXfermode::kClear_Mode:
+ return GrConstColorProcessor::Make(GrColor_TRANSPARENT_BLACK,
+ GrConstColorProcessor::kIgnore_InputMode);
+ case SkXfermode::kSrc_Mode:
+ return src;
+ case SkXfermode::kDst_Mode:
+ return dst;
+ default:
+ return sk_sp<GrFragmentProcessor>(
+ new ComposeTwoFragmentProcessor(std::move(src), std::move(dst), mode));
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+class ComposeOneFragmentProcessor : public GrFragmentProcessor {
+public:
+ enum Child {
+ kDst_Child,
+ kSrc_Child,
+ };
+
+ ComposeOneFragmentProcessor(sk_sp<GrFragmentProcessor> dst, SkXfermode::Mode mode, Child child)
+ : fMode(mode)
+ , fChild(child) {
+ this->initClassID<ComposeOneFragmentProcessor>();
+ SkDEBUGCODE(int dstIndex = )this->registerChildProcessor(std::move(dst));
+ SkASSERT(0 == dstIndex);
+ }
+
+ const char* name() const override { return "ComposeOne"; }
+
+ SkString dumpInfo() const override {
+ SkString str;
+
+ for (int i = 0; i < this->numChildProcessors(); ++i) {
+ str.append(this->childProcessor(i).dumpInfo());
+ }
+ return str;
+ }
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override {
+ GR_STATIC_ASSERT((SkXfermode::kLastMode & SK_MaxU16) == SkXfermode::kLastMode);
+ b->add32(fMode | (fChild << 16));
+ }
+
+ SkXfermode::Mode mode() const { return fMode; }
+
+ Child child() const { return fChild; }
+
+protected:
+ bool onIsEqual(const GrFragmentProcessor& that) const override {
+ return fMode == that.cast<ComposeOneFragmentProcessor>().fMode;
+ }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ SkXfermode::Coeff skSrcCoeff, skDstCoeff;
+ if (SkXfermode::ModeAsCoeff(fMode, &skSrcCoeff, &skDstCoeff)) {
+ GrBlendCoeff srcCoeff = SkXfermodeCoeffToGrBlendCoeff(skSrcCoeff);
+ GrBlendCoeff dstCoeff = SkXfermodeCoeffToGrBlendCoeff(skDstCoeff);
+ GrInvariantOutput childOutput(0xFFFFFFFF, kRGBA_GrColorComponentFlags, false);
+ this->childProcessor(0).computeInvariantOutput(&childOutput);
+ GrColor blendColor;
+ GrColorComponentFlags blendFlags;
+ if (kDst_Child == fChild) {
+ GrGetCoeffBlendKnownComponents(srcCoeff, dstCoeff,
+ inout->color(), inout->validFlags(),
+ childOutput.color(), childOutput.validFlags(),
+ &blendColor, &blendFlags);
+ } else {
+ GrGetCoeffBlendKnownComponents(srcCoeff, dstCoeff,
+ childOutput.color(), childOutput.validFlags(),
+ inout->color(), inout->validFlags(),
+ &blendColor, &blendFlags);
+ }
+ // will the shader code reference the input color?
+ GrInvariantOutput::ReadInput readsInput = GrInvariantOutput::kWillNot_ReadInput;
+ if (kDst_Child == fChild) {
+ if (kZero_GrBlendCoeff != srcCoeff || GrBlendCoeffRefsSrc(dstCoeff)) {
+ readsInput = GrInvariantOutput::kWill_ReadInput;
+ }
+ } else {
+ if (kZero_GrBlendCoeff != dstCoeff || GrBlendCoeffRefsDst(srcCoeff)) {
+ readsInput = GrInvariantOutput::kWill_ReadInput;
+ }
+ }
+ inout->setToOther(blendFlags, blendColor, readsInput);
+ } else {
+ inout->setToUnknown(GrInvariantOutput::kWill_ReadInput);
+ }
+ }
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
+
+ SkXfermode::Mode fMode;
+ Child fChild;
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+//////////////////////////////////////////////////////////////////////////////
+
+class GLComposeOneFragmentProcessor : public GrGLSLFragmentProcessor {
+public:
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkXfermode::Mode mode = args.fFp.cast<ComposeOneFragmentProcessor>().mode();
+ ComposeOneFragmentProcessor::Child child =
+ args.fFp.cast<ComposeOneFragmentProcessor>().child();
+ SkString childColor("child");
+ this->emitChild(0, nullptr, &childColor, args);
+
+ const char* inputColor = args.fInputColor;
+ // We don't try to optimize for this case at all
+ if (!inputColor) {
+ fragBuilder->codeAppendf("const vec4 ones = vec4(1);");
+ inputColor = "ones";
+ }
+
+ // emit blend code
+ fragBuilder->codeAppendf("// Compose Xfer Mode: %s\n", SkXfermode::ModeName(mode));
+ const char* childStr = childColor.c_str();
+ if (ComposeOneFragmentProcessor::kDst_Child == child) {
+ GrGLSLBlend::AppendMode(fragBuilder, inputColor, childStr, args.fOutputColor, mode);
+ } else {
+ GrGLSLBlend::AppendMode(fragBuilder, childStr, inputColor, args.fOutputColor, mode);
+ }
+ }
+
+private:
+ typedef GrGLSLFragmentProcessor INHERITED;
+};
+
+/////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(ComposeOneFragmentProcessor);
+
+sk_sp<GrFragmentProcessor> ComposeOneFragmentProcessor::TestCreate(GrProcessorTestData* d) {
+ // Create one random frag procs.
+ // For now, we'll prevent either children from being a shader with children to prevent the
+ // possibility of an arbitrarily large tree of procs.
+ sk_sp<GrFragmentProcessor> dst(GrProcessorUnitTest::MakeChildFP(d));
+ SkXfermode::Mode mode = static_cast<SkXfermode::Mode>(
+ d->fRandom->nextRangeU(0, SkXfermode::kLastMode));
+ ComposeOneFragmentProcessor::Child child = d->fRandom->nextBool() ?
+ ComposeOneFragmentProcessor::kDst_Child :
+ ComposeOneFragmentProcessor::kSrc_Child;
+ return sk_sp<GrFragmentProcessor>(new ComposeOneFragmentProcessor(std::move(dst), mode, child));
+}
+
+GrGLSLFragmentProcessor* ComposeOneFragmentProcessor::onCreateGLSLInstance() const {
+ return new GLComposeOneFragmentProcessor;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> GrXfermodeFragmentProcessor::MakeFromDstProcessor(
+ sk_sp<GrFragmentProcessor> dst, SkXfermode::Mode mode) {
+ switch (mode) {
+ case SkXfermode::kClear_Mode:
+ return GrConstColorProcessor::Make(GrColor_TRANSPARENT_BLACK,
+ GrConstColorProcessor::kIgnore_InputMode);
+ case SkXfermode::kSrc_Mode:
+ return nullptr;
+ default:
+ return sk_sp<GrFragmentProcessor>(
+ new ComposeOneFragmentProcessor(std::move(dst), mode,
+ ComposeOneFragmentProcessor::kDst_Child));
+ }
+}
+
+sk_sp<GrFragmentProcessor> GrXfermodeFragmentProcessor::MakeFromSrcProcessor(
+ sk_sp<GrFragmentProcessor> src, SkXfermode::Mode mode) {
+ switch (mode) {
+ case SkXfermode::kClear_Mode:
+ return GrConstColorProcessor::Make(GrColor_TRANSPARENT_BLACK,
+ GrConstColorProcessor::kIgnore_InputMode);
+ case SkXfermode::kDst_Mode:
+ return nullptr;
+ default:
+ return sk_sp<GrFragmentProcessor>(
+ new ComposeOneFragmentProcessor(src, mode,
+ ComposeOneFragmentProcessor::kSrc_Child));
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrYUVEffect.cpp b/gfx/skia/skia/src/gpu/effects/GrYUVEffect.cpp
new file mode 100644
index 000000000..cbe25e82f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrYUVEffect.cpp
@@ -0,0 +1,407 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrYUVEffect.h"
+
+#include "GrCoordTransform.h"
+#include "GrFragmentProcessor.h"
+#include "GrInvariantOutput.h"
+#include "GrProcessor.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+
+namespace {
+
+static const float kJPEGConversionMatrix[16] = {
+ 1.0f, 0.0f, 1.402f, -0.701f,
+ 1.0f, -0.34414f, -0.71414f, 0.529f,
+ 1.0f, 1.772f, 0.0f, -0.886f,
+ 0.0f, 0.0f, 0.0f, 1.0
+};
+
+static const float kRec601ConversionMatrix[16] = {
+ 1.164f, 0.0f, 1.596f, -0.87075f,
+ 1.164f, -0.391f, -0.813f, 0.52925f,
+ 1.164f, 2.018f, 0.0f, -1.08175f,
+ 0.0f, 0.0f, 0.0f, 1.0}
+;
+
+static const float kRec709ConversionMatrix[16] = {
+ 1.164f, 0.0f, 1.793f, -0.96925f,
+ 1.164f, -0.213f, -0.533f, 0.30025f,
+ 1.164f, 2.112f, 0.0f, -1.12875f,
+ 0.0f, 0.0f, 0.0f, 1.0f}
+;
+
+static const float kJPEGInverseConversionMatrix[16] = {
+ 0.299001f, 0.586998f, 0.114001f, 0.0000821798f,
+ -0.168736f, -0.331263f, 0.499999f, 0.499954f,
+ 0.499999f, -0.418686f, -0.0813131f, 0.499941f,
+ 0.f, 0.f, 0.f, 1.f
+};
+
+static const float kRec601InverseConversionMatrix[16] = {
+ 0.256951f, 0.504421f, 0.0977346f, 0.0625f,
+ -0.148212f, -0.290954f, 0.439166f, 0.5f,
+ 0.439166f, -0.367886f, -0.0712802f, 0.5f,
+ 0.f, 0.f, 0.f, 1.f
+};
+
+static const float kRec709InverseConversionMatrix[16] = {
+ 0.182663f, 0.614473f, 0.061971f, 0.0625f,
+ -0.100672f, -0.338658f, 0.43933f, 0.5f,
+ 0.439142f, -0.39891f, -0.040231f, 0.5f,
+ 0.f, 0.f, 0.f, 1.
+};
+
+class YUVtoRGBEffect : public GrFragmentProcessor {
+public:
+ static sk_sp<GrFragmentProcessor> Make(GrTexture* yTexture, GrTexture* uTexture,
+ GrTexture* vTexture, const SkISize sizes[3],
+ SkYUVColorSpace colorSpace, bool nv12) {
+ SkScalar w[3], h[3];
+ w[0] = SkIntToScalar(sizes[0].fWidth) / SkIntToScalar(yTexture->width());
+ h[0] = SkIntToScalar(sizes[0].fHeight) / SkIntToScalar(yTexture->height());
+ w[1] = SkIntToScalar(sizes[1].fWidth) / SkIntToScalar(uTexture->width());
+ h[1] = SkIntToScalar(sizes[1].fHeight) / SkIntToScalar(uTexture->height());
+ w[2] = SkIntToScalar(sizes[2].fWidth) / SkIntToScalar(vTexture->width());
+ h[2] = SkIntToScalar(sizes[2].fHeight) / SkIntToScalar(vTexture->height());
+ SkMatrix yuvMatrix[3];
+ yuvMatrix[0] = GrCoordTransform::MakeDivByTextureWHMatrix(yTexture);
+ yuvMatrix[1] = yuvMatrix[0];
+ yuvMatrix[1].preScale(w[1] / w[0], h[1] / h[0]);
+ yuvMatrix[2] = yuvMatrix[0];
+ yuvMatrix[2].preScale(w[2] / w[0], h[2] / h[0]);
+ GrTextureParams::FilterMode uvFilterMode =
+ ((sizes[1].fWidth != sizes[0].fWidth) ||
+ (sizes[1].fHeight != sizes[0].fHeight) ||
+ (sizes[2].fWidth != sizes[0].fWidth) ||
+ (sizes[2].fHeight != sizes[0].fHeight)) ?
+ GrTextureParams::kBilerp_FilterMode :
+ GrTextureParams::kNone_FilterMode;
+ return sk_sp<GrFragmentProcessor>(new YUVtoRGBEffect(
+ yTexture, uTexture, vTexture, yuvMatrix, uvFilterMode, colorSpace, nv12));
+ }
+
+ const char* name() const override { return "YUV to RGB"; }
+
+ SkYUVColorSpace getColorSpace() const { return fColorSpace; }
+
+ bool isNV12() const {
+ return fNV12;
+ }
+
+ class GLSLProcessor : public GrGLSLFragmentProcessor {
+ public:
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ const YUVtoRGBEffect& effect = args.fFp.cast<YUVtoRGBEffect>();
+
+ const char* colorSpaceMatrix = nullptr;
+ fMatrixUni = args.fUniformHandler->addUniform(kFragment_GrShaderFlag,
+ kMat44f_GrSLType, kDefault_GrSLPrecision,
+ "ColorSpaceMatrix", &colorSpaceMatrix);
+ fragBuilder->codeAppendf("%s = vec4(", args.fOutputColor);
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0],
+ args.fTransformedCoords[0].c_str(),
+ args.fTransformedCoords[0].getType());
+ fragBuilder->codeAppend(".r,");
+ fragBuilder->appendTextureLookup(args.fTexSamplers[1],
+ args.fTransformedCoords[1].c_str(),
+ args.fTransformedCoords[1].getType());
+ if (effect.fNV12) {
+ fragBuilder->codeAppendf(".rg,");
+ } else {
+ fragBuilder->codeAppend(".r,");
+ fragBuilder->appendTextureLookup(args.fTexSamplers[2],
+ args.fTransformedCoords[2].c_str(),
+ args.fTransformedCoords[2].getType());
+ fragBuilder->codeAppendf(".g,");
+ }
+ fragBuilder->codeAppendf("1.0) * %s;", colorSpaceMatrix);
+ }
+
+ protected:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& processor) override {
+ const YUVtoRGBEffect& yuvEffect = processor.cast<YUVtoRGBEffect>();
+ switch (yuvEffect.getColorSpace()) {
+ case kJPEG_SkYUVColorSpace:
+ pdman.setMatrix4f(fMatrixUni, kJPEGConversionMatrix);
+ break;
+ case kRec601_SkYUVColorSpace:
+ pdman.setMatrix4f(fMatrixUni, kRec601ConversionMatrix);
+ break;
+ case kRec709_SkYUVColorSpace:
+ pdman.setMatrix4f(fMatrixUni, kRec709ConversionMatrix);
+ break;
+ }
+ }
+
+ private:
+ GrGLSLProgramDataManager::UniformHandle fMatrixUni;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+ };
+
+private:
+ YUVtoRGBEffect(GrTexture* yTexture, GrTexture* uTexture, GrTexture* vTexture,
+ const SkMatrix yuvMatrix[3], GrTextureParams::FilterMode uvFilterMode,
+ SkYUVColorSpace colorSpace, bool nv12)
+ : fYTransform(yuvMatrix[0], yTexture, GrTextureParams::kNone_FilterMode)
+ , fYAccess(yTexture)
+ , fUTransform(yuvMatrix[1], uTexture, uvFilterMode)
+ , fUAccess(uTexture, uvFilterMode)
+ , fVAccess(vTexture, uvFilterMode)
+ , fColorSpace(colorSpace)
+ , fNV12(nv12) {
+ this->initClassID<YUVtoRGBEffect>();
+ this->addCoordTransform(&fYTransform);
+ this->addTextureAccess(&fYAccess);
+ this->addCoordTransform(&fUTransform);
+ this->addTextureAccess(&fUAccess);
+ if (!fNV12) {
+ fVTransform = GrCoordTransform(yuvMatrix[2], vTexture, uvFilterMode);
+ this->addCoordTransform(&fVTransform);
+ this->addTextureAccess(&fVAccess);
+ }
+ }
+
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override {
+ return new GLSLProcessor;
+ }
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override {
+ b->add32(fNV12);
+ }
+
+ bool onIsEqual(const GrFragmentProcessor& sBase) const override {
+ const YUVtoRGBEffect& s = sBase.cast<YUVtoRGBEffect>();
+ return (fColorSpace == s.getColorSpace()) && (fNV12 == s.isNV12());
+ }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ // YUV is opaque
+ inout->setToOther(kA_GrColorComponentFlag, 0xFF << GrColor_SHIFT_A,
+ GrInvariantOutput::kWillNot_ReadInput);
+ }
+
+ GrCoordTransform fYTransform;
+ GrTextureAccess fYAccess;
+ GrCoordTransform fUTransform;
+ GrTextureAccess fUAccess;
+ GrCoordTransform fVTransform;
+ GrTextureAccess fVAccess;
+ SkYUVColorSpace fColorSpace;
+ bool fNV12;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+
+class RGBToYUVEffect : public GrFragmentProcessor {
+public:
+ enum OutputChannels {
+ // output color r = y, g = u, b = v, a = a
+ kYUV_OutputChannels,
+ // output color rgba = y
+ kY_OutputChannels,
+ // output color r = u, g = v, b = 0, a = a
+ kUV_OutputChannels,
+ // output color rgba = u
+ kU_OutputChannels,
+ // output color rgba = v
+ kV_OutputChannels
+ };
+
+ RGBToYUVEffect(sk_sp<GrFragmentProcessor> rgbFP, SkYUVColorSpace colorSpace,
+ OutputChannels output)
+ : fColorSpace(colorSpace)
+ , fOutputChannels(output) {
+ this->initClassID<RGBToYUVEffect>();
+ this->registerChildProcessor(std::move(rgbFP));
+ }
+
+ const char* name() const override { return "RGBToYUV"; }
+
+ SkYUVColorSpace getColorSpace() const { return fColorSpace; }
+
+ OutputChannels outputChannels() const { return fOutputChannels; }
+
+ class GLSLProcessor : public GrGLSLFragmentProcessor {
+ public:
+ GLSLProcessor() : fLastColorSpace(-1), fLastOutputChannels(-1) {}
+
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ OutputChannels oc = args.fFp.cast<RGBToYUVEffect>().outputChannels();
+
+ SkString outputColor("rgbColor");
+ this->emitChild(0, args.fInputColor, &outputColor, args);
+
+ const char* uniName;
+ switch (oc) {
+ case kYUV_OutputChannels:
+ fRGBToYUVUni = args.fUniformHandler->addUniformArray(
+ kFragment_GrShaderFlag,
+ kVec4f_GrSLType, kDefault_GrSLPrecision,
+ "RGBToYUV", 3, &uniName);
+ fragBuilder->codeAppendf("%s = vec4(dot(rgbColor.rgb, %s[0].rgb) + %s[0].a,"
+ "dot(rgbColor.rgb, %s[1].rgb) + %s[1].a,"
+ "dot(rgbColor.rgb, %s[2].rgb) + %s[2].a,"
+ "rgbColor.a);",
+ args.fOutputColor, uniName, uniName, uniName, uniName,
+ uniName, uniName);
+ break;
+ case kUV_OutputChannels:
+ fRGBToYUVUni = args.fUniformHandler->addUniformArray(
+ kFragment_GrShaderFlag,
+ kVec4f_GrSLType, kDefault_GrSLPrecision,
+ "RGBToUV", 2, &uniName);
+ fragBuilder->codeAppendf("%s = vec4(dot(rgbColor.rgb, %s[0].rgb) + %s[0].a,"
+ "dot(rgbColor.rgb, %s[1].rgb) + %s[1].a,"
+ "0.0,"
+ "rgbColor.a);",
+ args.fOutputColor, uniName, uniName, uniName, uniName);
+ break;
+ case kY_OutputChannels:
+ case kU_OutputChannels:
+ case kV_OutputChannels:
+ fRGBToYUVUni = args.fUniformHandler->addUniform(
+ kFragment_GrShaderFlag,
+ kVec4f_GrSLType, kDefault_GrSLPrecision,
+ "RGBToYUorV", &uniName);
+ fragBuilder->codeAppendf("%s = vec4(dot(rgbColor.rgb, %s.rgb) + %s.a);\n",
+ args.fOutputColor, uniName, uniName);
+ break;
+ }
+ }
+
+ private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrProcessor& processor) override {
+ const RGBToYUVEffect& effect = processor.cast<RGBToYUVEffect>();
+ OutputChannels oc = effect.outputChannels();
+ if (effect.getColorSpace() != fLastColorSpace || oc != fLastOutputChannels) {
+
+ const float* matrix = nullptr;
+ switch (effect.getColorSpace()) {
+ case kJPEG_SkYUVColorSpace:
+ matrix = kJPEGInverseConversionMatrix;
+ break;
+ case kRec601_SkYUVColorSpace:
+ matrix = kRec601InverseConversionMatrix;
+ break;
+ case kRec709_SkYUVColorSpace:
+ matrix = kRec709InverseConversionMatrix;
+ break;
+ }
+ switch (oc) {
+ case kYUV_OutputChannels:
+ pdman.set4fv(fRGBToYUVUni, 3, matrix);
+ break;
+ case kUV_OutputChannels:
+ pdman.set4fv(fRGBToYUVUni, 2, matrix + 4);
+ break;
+ case kY_OutputChannels:
+ pdman.set4fv(fRGBToYUVUni, 1, matrix);
+ break;
+ case kU_OutputChannels:
+ pdman.set4fv(fRGBToYUVUni, 1, matrix + 4);
+ break;
+ case kV_OutputChannels:
+ pdman.set4fv(fRGBToYUVUni, 1, matrix + 8);
+ break;
+ }
+ fLastColorSpace = effect.getColorSpace();
+ }
+ }
+ GrGLSLProgramDataManager::UniformHandle fRGBToYUVUni;
+ int fLastColorSpace;
+ int fLastOutputChannels;
+
+ typedef GrGLSLFragmentProcessor INHERITED;
+ };
+
+private:
+ GrGLSLFragmentProcessor* onCreateGLSLInstance() const override {
+ return new GLSLProcessor;
+ }
+
+ void onGetGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override {
+ // kY, kU, and kV all generate the same code, just upload different coefficients.
+ if (kU_OutputChannels == fOutputChannels || kV_OutputChannels == fOutputChannels) {
+ b->add32(kY_OutputChannels);
+ } else {
+ b->add32(fOutputChannels);
+ }
+ }
+
+ bool onIsEqual(const GrFragmentProcessor& sBase) const override {
+ const RGBToYUVEffect& s = sBase.cast<RGBToYUVEffect>();
+ return fColorSpace == s.getColorSpace() && fOutputChannels == s.outputChannels();
+ }
+
+ void onComputeInvariantOutput(GrInvariantOutput* inout) const override {
+ inout->setToUnknown(GrInvariantOutput::kWillNot_ReadInput);
+ }
+
+ GrCoordTransform fTransform;
+ GrTextureAccess fAccess;
+ SkYUVColorSpace fColorSpace;
+ OutputChannels fOutputChannels;
+
+ typedef GrFragmentProcessor INHERITED;
+};
+
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+sk_sp<GrFragmentProcessor> GrYUVEffect::MakeYUVToRGB(GrTexture* yTexture, GrTexture* uTexture,
+ GrTexture* vTexture, const SkISize sizes[3],
+ SkYUVColorSpace colorSpace, bool nv12) {
+ SkASSERT(yTexture && uTexture && vTexture && sizes);
+ return YUVtoRGBEffect::Make(yTexture, uTexture, vTexture, sizes, colorSpace, nv12);
+}
+
+sk_sp<GrFragmentProcessor>
+GrYUVEffect::MakeRGBToYUV(sk_sp<GrFragmentProcessor> rgbFP, SkYUVColorSpace colorSpace) {
+ SkASSERT(rgbFP);
+ return sk_sp<GrFragmentProcessor>(
+ new RGBToYUVEffect(std::move(rgbFP), colorSpace, RGBToYUVEffect::kYUV_OutputChannels));
+}
+
+sk_sp<GrFragmentProcessor>
+GrYUVEffect::MakeRGBToY(sk_sp<GrFragmentProcessor> rgbFP, SkYUVColorSpace colorSpace) {
+ SkASSERT(rgbFP);
+ return sk_sp<GrFragmentProcessor>(
+ new RGBToYUVEffect(std::move(rgbFP), colorSpace, RGBToYUVEffect::kY_OutputChannels));
+}
+
+sk_sp<GrFragmentProcessor>
+GrYUVEffect::MakeRGBToUV(sk_sp<GrFragmentProcessor> rgbFP, SkYUVColorSpace colorSpace) {
+ SkASSERT(rgbFP);
+ return sk_sp<GrFragmentProcessor>(
+ new RGBToYUVEffect(std::move(rgbFP), colorSpace, RGBToYUVEffect::kUV_OutputChannels));
+}
+
+sk_sp<GrFragmentProcessor>
+GrYUVEffect::MakeRGBToU(sk_sp<GrFragmentProcessor> rgbFP, SkYUVColorSpace colorSpace) {
+ SkASSERT(rgbFP);
+ return sk_sp<GrFragmentProcessor>(
+ new RGBToYUVEffect(std::move(rgbFP), colorSpace, RGBToYUVEffect::kU_OutputChannels));
+}
+
+sk_sp<GrFragmentProcessor>
+GrYUVEffect::MakeRGBToV(sk_sp<GrFragmentProcessor> rgbFP, SkYUVColorSpace colorSpace) {
+ SkASSERT(rgbFP);
+ return sk_sp<GrFragmentProcessor>(
+ new RGBToYUVEffect(std::move(rgbFP), colorSpace, RGBToYUVEffect::kV_OutputChannels));
+}
diff --git a/gfx/skia/skia/src/gpu/effects/GrYUVEffect.h b/gfx/skia/skia/src/gpu/effects/GrYUVEffect.h
new file mode 100644
index 000000000..902a181fd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/effects/GrYUVEffect.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrYUVEffect_DEFINED
+#define GrYUVEffect_DEFINED
+
+#include "SkImageInfo.h"
+
+class GrFragmentProcessor;
+class GrTexture;
+
+namespace GrYUVEffect {
+ /**
+ * Creates an effect that performs color conversion from YUV to RGB. The input textures are
+ * assumed to be kA8_GrPixelConfig.
+ */
+ sk_sp<GrFragmentProcessor> MakeYUVToRGB(GrTexture* yTexture, GrTexture* uTexture,
+ GrTexture* vTexture, const SkISize sizes[3],
+ SkYUVColorSpace colorSpace, bool nv12);
+
+ /**
+ * Creates a processor that performs color conversion from the passed in processor's RGB
+ * channels to Y, U ,and V channels. The output color is (y, u, v, a) where a is the passed in
+ * processor's alpha output.
+ */
+ sk_sp<GrFragmentProcessor> MakeRGBToYUV(sk_sp<GrFragmentProcessor>,
+ SkYUVColorSpace colorSpace);
+
+ /**
+ * Creates a processor that performs color conversion from the passed in processor's RGB
+ * channels to U and V channels. The output color is (u, v, 0, a) where a is the passed in
+ * processor's alpha output.
+ */
+ sk_sp<GrFragmentProcessor> MakeRGBToUV(sk_sp<GrFragmentProcessor>,
+ SkYUVColorSpace colorSpace);
+ /**
+ * Creates a processor that performs color conversion from the passed in fragment processors's
+ * RGB channels to Y, U, or V (replicated across all four output color channels). The alpha
+ * output of the passed in fragment processor is ignored.
+ */
+ sk_sp<GrFragmentProcessor> MakeRGBToY(sk_sp<GrFragmentProcessor>, SkYUVColorSpace colorSpace);
+ sk_sp<GrFragmentProcessor> MakeRGBToU(sk_sp<GrFragmentProcessor>, SkYUVColorSpace colorSpace);
+ sk_sp<GrFragmentProcessor> MakeRGBToV(sk_sp<GrFragmentProcessor>, SkYUVColorSpace colorSpace);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLAssembleInterface.cpp b/gfx/skia/skia/src/gpu/gl/GrGLAssembleInterface.cpp
new file mode 100644
index 000000000..b435655a5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLAssembleInterface.cpp
@@ -0,0 +1,938 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "gl/GrGLAssembleInterface.h"
+#include "GrGLUtil.h"
+
+#define GET_PROC(F) functions->f ## F = (GrGL ## F ## Proc) get(ctx, "gl" #F)
+#define GET_PROC_SUFFIX(F, S) functions->f ## F = (GrGL ## F ## Proc) get(ctx, "gl" #F #S)
+#define GET_PROC_LOCAL(F) GrGL ## F ## Proc F = (GrGL ## F ## Proc) get(ctx, "gl" #F)
+
+#define GET_EGL_PROC_SUFFIX(F, S) functions->fEGL ## F = (GrEGL ## F ## Proc) get(ctx, "egl" #F #S)
+
+const GrGLInterface* GrGLAssembleInterface(void* ctx, GrGLGetProc get) {
+ GET_PROC_LOCAL(GetString);
+ if (nullptr == GetString) {
+ return nullptr;
+ }
+
+ const char* verStr = reinterpret_cast<const char*>(GetString(GR_GL_VERSION));
+ if (nullptr == verStr) {
+ return nullptr;
+ }
+
+ GrGLStandard standard = GrGLGetStandardInUseFromString(verStr);
+
+ if (kGLES_GrGLStandard == standard) {
+ return GrGLAssembleGLESInterface(ctx, get);
+ } else if (kGL_GrGLStandard == standard) {
+ return GrGLAssembleGLInterface(ctx, get);
+ }
+ return nullptr;
+}
+
+static void get_egl_query_and_display(GrEGLQueryStringProc* queryString, GrEGLDisplay* display,
+ void* ctx, GrGLGetProc get) {
+ *queryString = (GrEGLQueryStringProc) get(ctx, "eglQueryString");
+ *display = GR_EGL_NO_DISPLAY;
+ if (*queryString) {
+ GrEGLGetCurrentDisplayProc getCurrentDisplay =
+ (GrEGLGetCurrentDisplayProc) get(ctx, "eglGetCurrentDisplay");
+ if (getCurrentDisplay) {
+ *display = getCurrentDisplay();
+ } else {
+ *queryString = nullptr;
+ }
+ }
+}
+
+const GrGLInterface* GrGLAssembleGLInterface(void* ctx, GrGLGetProc get) {
+ GET_PROC_LOCAL(GetString);
+ GET_PROC_LOCAL(GetStringi);
+ GET_PROC_LOCAL(GetIntegerv);
+
+ // GetStringi may be nullptr depending on the GL version.
+ if (nullptr == GetString || nullptr == GetIntegerv) {
+ return nullptr;
+ }
+
+ const char* versionString = (const char*) GetString(GR_GL_VERSION);
+ GrGLVersion glVer = GrGLGetVersionFromString(versionString);
+
+ if (glVer < GR_GL_VER(1,5) || GR_GL_INVALID_VER == glVer) {
+ // We must have array and element_array buffer objects.
+ return nullptr;
+ }
+
+ GrEGLQueryStringProc queryString;
+ GrEGLDisplay display;
+ get_egl_query_and_display(&queryString, &display, ctx, get);
+ GrGLExtensions extensions;
+ if (!extensions.init(kGL_GrGLStandard, GetString, GetStringi, GetIntegerv, queryString,
+ display)) {
+ return nullptr;
+ }
+
+ GrGLInterface* interface = new GrGLInterface();
+ GrGLInterface::Functions* functions = &interface->fFunctions;
+
+ GET_PROC(ActiveTexture);
+ GET_PROC(AttachShader);
+ GET_PROC(BindAttribLocation);
+ GET_PROC(BindBuffer);
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(BindFragDataLocation);
+ }
+ GET_PROC(BeginQuery);
+ GET_PROC(BindTexture);
+
+ if (extensions.has("GL_KHR_blend_equation_advanced")) {
+ GET_PROC_SUFFIX(BlendBarrier, KHR);
+ } else if (extensions.has("GL_NV_blend_equation_advanced")) {
+ GET_PROC_SUFFIX(BlendBarrier, NV);
+ }
+
+ if (glVer >= GR_GL_VER(1,4) ||
+ extensions.has("GL_ARB_imaging")) {
+ GET_PROC(BlendColor);
+ } else if (extensions.has("GL_EXT_blend_color")) {
+ GET_PROC_SUFFIX(BlendColor, EXT);
+ }
+
+ if (glVer >= GR_GL_VER(1,4) ||
+ extensions.has("GL_ARB_imaging")) {
+ GET_PROC(BlendEquation);
+ } else if (extensions.has("GL_EXT_blend_subtract")) {
+ GET_PROC_SUFFIX(BlendEquation, EXT);
+ }
+
+ GET_PROC(BlendFunc);
+ GET_PROC(BufferData);
+ GET_PROC(BufferSubData);
+ GET_PROC(Clear);
+ GET_PROC(ClearColor);
+ GET_PROC(ClearStencil);
+ GET_PROC(ColorMask);
+ GET_PROC(CompileShader);
+ GET_PROC(CompressedTexImage2D);
+ GET_PROC(CompressedTexSubImage2D);
+ GET_PROC(CopyTexSubImage2D);
+ GET_PROC(CreateProgram);
+ GET_PROC(CreateShader);
+ GET_PROC(CullFace);
+ GET_PROC(DeleteBuffers);
+ GET_PROC(DeleteProgram);
+ GET_PROC(DeleteQueries);
+ GET_PROC(DeleteShader);
+ GET_PROC(DeleteTextures);
+ GET_PROC(DepthMask);
+ GET_PROC(Disable);
+ GET_PROC(DisableVertexAttribArray);
+ GET_PROC(DrawArrays);
+ GET_PROC(DrawBuffer);
+ GET_PROC(DrawBuffers);
+ GET_PROC(DrawElements);
+
+ if (glVer >= GR_GL_VER(3,1) || extensions.has("GL_ARB_draw_instanced") ||
+ extensions.has("GL_EXT_draw_instanced")) {
+ GET_PROC(DrawArraysInstanced);
+ GET_PROC(DrawElementsInstanced);
+ }
+
+ if (glVer >= GR_GL_VER(4,0) || extensions.has("GL_ARB_draw_indirect")) {
+ GET_PROC(DrawArraysIndirect);
+ GET_PROC(DrawElementsIndirect);
+ }
+ if (glVer >= GR_GL_VER(2,0)) {
+ GET_PROC(DrawRangeElements);
+ }
+ GET_PROC(Enable);
+ GET_PROC(EnableVertexAttribArray);
+ GET_PROC(EndQuery);
+ GET_PROC(Finish);
+ GET_PROC(Flush);
+ GET_PROC(FrontFace);
+ GET_PROC(GenBuffers);
+ GET_PROC(GenerateMipmap);
+ GET_PROC(GetBufferParameteriv);
+ GET_PROC(GetError);
+ GET_PROC(GetIntegerv);
+ if (glVer >= GR_GL_VER(3,2) || extensions.has("GL_ARB_texture_multisample")) {
+ GET_PROC(GetMultisamplefv);
+ }
+ GET_PROC(GetQueryObjectiv);
+ GET_PROC(GetQueryObjectuiv);
+ if (glVer >= GR_GL_VER(3,3) || extensions.has("GL_ARB_timer_query")) {
+ GET_PROC(GetQueryObjecti64v);
+ GET_PROC(GetQueryObjectui64v);
+ GET_PROC(QueryCounter);
+ } else if (extensions.has("GL_EXT_timer_query")) {
+ GET_PROC_SUFFIX(GetQueryObjecti64v, EXT);
+ GET_PROC_SUFFIX(GetQueryObjectui64v, EXT);
+ }
+ GET_PROC(GetQueryiv);
+ GET_PROC(GetProgramInfoLog);
+ GET_PROC(GetProgramiv);
+ GET_PROC(GetShaderInfoLog);
+ GET_PROC(GetShaderiv);
+ GET_PROC(GetString);
+ GET_PROC(GetStringi);
+ GET_PROC(GetShaderPrecisionFormat);
+ GET_PROC(GetTexLevelParameteriv);
+ GET_PROC(GenQueries);
+ GET_PROC(GenTextures);
+ GET_PROC(GetUniformLocation);
+ GET_PROC(IsTexture);
+ GET_PROC(LineWidth);
+ GET_PROC(LinkProgram);
+ GET_PROC(MapBuffer);
+
+ if (glVer >= GR_GL_VER(4,3) || extensions.has("GL_ARB_multi_draw_indirect")) {
+ GET_PROC(MultiDrawArraysIndirect);
+ GET_PROC(MultiDrawElementsIndirect);
+ }
+
+ GET_PROC(PixelStorei);
+ if (extensions.has("GL_EXT_raster_multisample")) {
+ GET_PROC_SUFFIX(RasterSamples, EXT);
+ }
+ GET_PROC(ReadBuffer);
+ GET_PROC(ReadPixels);
+ GET_PROC(Scissor);
+ GET_PROC(ShaderSource);
+ GET_PROC(StencilFunc);
+ GET_PROC(StencilFuncSeparate);
+ GET_PROC(StencilMask);
+ GET_PROC(StencilMaskSeparate);
+ GET_PROC(StencilOp);
+ GET_PROC(StencilOpSeparate);
+ if (glVer >= GR_GL_VER(3,1)) {
+ GET_PROC(TexBuffer);
+ }
+ if (glVer >= GR_GL_VER(4,3)) {
+ GET_PROC(TexBufferRange);
+ }
+ GET_PROC(TexImage2D);
+ GET_PROC(TexParameteri);
+ GET_PROC(TexParameteriv);
+ if (glVer >= GR_GL_VER(4,2) || extensions.has("GL_ARB_texture_storage")) {
+ GET_PROC(TexStorage2D);
+ } else if (extensions.has("GL_EXT_texture_storage")) {
+ GET_PROC_SUFFIX(TexStorage2D, EXT);
+ }
+ GET_PROC(TexSubImage2D);
+ if (glVer >= GR_GL_VER(4,5) || extensions.has("GL_ARB_texture_barrier")) {
+ GET_PROC(TextureBarrier);
+ } else if (extensions.has("GL_NV_texture_barrier")) {
+ GET_PROC_SUFFIX(TextureBarrier, NV);
+ }
+ GET_PROC(Uniform1f);
+ GET_PROC(Uniform1i);
+ GET_PROC(Uniform1fv);
+ GET_PROC(Uniform1iv);
+ GET_PROC(Uniform2f);
+ GET_PROC(Uniform2i);
+ GET_PROC(Uniform2fv);
+ GET_PROC(Uniform2iv);
+ GET_PROC(Uniform3f);
+ GET_PROC(Uniform3i);
+ GET_PROC(Uniform3fv);
+ GET_PROC(Uniform3iv);
+ GET_PROC(Uniform4f);
+ GET_PROC(Uniform4i);
+ GET_PROC(Uniform4fv);
+ GET_PROC(Uniform4iv);
+ GET_PROC(UniformMatrix2fv);
+ GET_PROC(UniformMatrix3fv);
+ GET_PROC(UniformMatrix4fv);
+ GET_PROC(UnmapBuffer);
+ GET_PROC(UseProgram);
+ GET_PROC(VertexAttrib1f);
+ GET_PROC(VertexAttrib2fv);
+ GET_PROC(VertexAttrib3fv);
+ GET_PROC(VertexAttrib4fv);
+
+ if (glVer >= GR_GL_VER(3,2) || extensions.has("GL_ARB_instanced_arrays")) {
+ GET_PROC(VertexAttribDivisor);
+ }
+
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC(VertexAttribIPointer);
+ }
+
+ GET_PROC(VertexAttribPointer);
+ GET_PROC(Viewport);
+ GET_PROC(BindFragDataLocationIndexed);
+
+ if (glVer >= GR_GL_VER(3,0) || extensions.has("GL_ARB_vertex_array_object")) {
+ // no ARB suffix for GL_ARB_vertex_array_object
+ GET_PROC(BindVertexArray);
+ GET_PROC(GenVertexArrays);
+ GET_PROC(DeleteVertexArrays);
+ } else if (extensions.has("GL_APPLE_vertex_array_object")) {
+ GET_PROC_SUFFIX(BindVertexArray, APPLE);
+ GET_PROC_SUFFIX(GenVertexArrays, APPLE);
+ GET_PROC_SUFFIX(DeleteVertexArrays, APPLE);
+ }
+
+ if (glVer >= GR_GL_VER(3,0) || extensions.has("GL_ARB_map_buffer_range")) {
+ GET_PROC(MapBufferRange);
+ GET_PROC(FlushMappedBufferRange);
+ }
+
+ // First look for GL3.0 FBO or GL_ARB_framebuffer_object (same since
+ // GL_ARB_framebuffer_object doesn't use ARB suffix.)
+ if (glVer >= GR_GL_VER(3,0) || extensions.has("GL_ARB_framebuffer_object")) {
+ GET_PROC(GenFramebuffers);
+ GET_PROC(GetFramebufferAttachmentParameteriv);
+ GET_PROC(GetRenderbufferParameteriv);
+ GET_PROC(BindFramebuffer);
+ GET_PROC(FramebufferTexture2D);
+ GET_PROC(CheckFramebufferStatus);
+ GET_PROC(DeleteFramebuffers);
+ GET_PROC(RenderbufferStorage);
+ GET_PROC(GenRenderbuffers);
+ GET_PROC(DeleteRenderbuffers);
+ GET_PROC(FramebufferRenderbuffer);
+ GET_PROC(BindRenderbuffer);
+ GET_PROC(RenderbufferStorageMultisample);
+ GET_PROC(BlitFramebuffer);
+ } else if (extensions.has("GL_EXT_framebuffer_object")) {
+ GET_PROC_SUFFIX(GenFramebuffers, EXT);
+ GET_PROC_SUFFIX(GetFramebufferAttachmentParameteriv, EXT);
+ GET_PROC_SUFFIX(GetRenderbufferParameteriv, EXT);
+ GET_PROC_SUFFIX(BindFramebuffer, EXT);
+ GET_PROC_SUFFIX(FramebufferTexture2D, EXT);
+ GET_PROC_SUFFIX(CheckFramebufferStatus, EXT);
+ GET_PROC_SUFFIX(DeleteFramebuffers, EXT);
+ GET_PROC_SUFFIX(RenderbufferStorage, EXT);
+ GET_PROC_SUFFIX(GenRenderbuffers, EXT);
+ GET_PROC_SUFFIX(DeleteRenderbuffers, EXT);
+ GET_PROC_SUFFIX(FramebufferRenderbuffer, EXT);
+ GET_PROC_SUFFIX(BindRenderbuffer, EXT);
+ if (extensions.has("GL_EXT_framebuffer_multisample")) {
+ GET_PROC_SUFFIX(RenderbufferStorageMultisample, EXT);
+ }
+ if (extensions.has("GL_EXT_framebuffer_blit")) {
+ GET_PROC_SUFFIX(BlitFramebuffer, EXT);
+ }
+ } else {
+ // we must have FBOs
+ delete interface;
+ return nullptr;
+ }
+
+ if (extensions.has("GL_NV_path_rendering")) {
+ GET_PROC_SUFFIX(MatrixLoadf, EXT);
+ GET_PROC_SUFFIX(MatrixLoadIdentity, EXT);
+ GET_PROC_SUFFIX(PathCommands, NV);
+ GET_PROC_SUFFIX(PathParameteri, NV);
+ GET_PROC_SUFFIX(PathParameterf, NV);
+ GET_PROC_SUFFIX(GenPaths, NV);
+ GET_PROC_SUFFIX(DeletePaths, NV);
+ GET_PROC_SUFFIX(IsPath, NV);
+ GET_PROC_SUFFIX(PathStencilFunc, NV);
+ GET_PROC_SUFFIX(StencilFillPath, NV);
+ GET_PROC_SUFFIX(StencilStrokePath, NV);
+ GET_PROC_SUFFIX(StencilFillPathInstanced, NV);
+ GET_PROC_SUFFIX(StencilStrokePathInstanced, NV);
+ GET_PROC_SUFFIX(CoverFillPath, NV);
+ GET_PROC_SUFFIX(CoverStrokePath, NV);
+ GET_PROC_SUFFIX(CoverFillPathInstanced, NV);
+ GET_PROC_SUFFIX(CoverStrokePathInstanced, NV);
+ GET_PROC_SUFFIX(StencilThenCoverFillPath, NV);
+ GET_PROC_SUFFIX(StencilThenCoverStrokePath, NV);
+ GET_PROC_SUFFIX(StencilThenCoverFillPathInstanced, NV);
+ GET_PROC_SUFFIX(StencilThenCoverStrokePathInstanced, NV);
+ GET_PROC_SUFFIX(ProgramPathFragmentInputGen, NV);
+ }
+
+ if (extensions.has("GL_NV_framebuffer_mixed_samples")) {
+ GET_PROC_SUFFIX(CoverageModulation, NV);
+ }
+
+ if (extensions.has("GL_EXT_debug_marker")) {
+ GET_PROC_SUFFIX(InsertEventMarker, EXT);
+ GET_PROC_SUFFIX(PushGroupMarker, EXT);
+ GET_PROC_SUFFIX(PopGroupMarker, EXT);
+ }
+
+ if (glVer >= GR_GL_VER(4,3) || extensions.has("GL_ARB_invalidate_subdata")) {
+ GET_PROC(InvalidateBufferData);
+ GET_PROC(InvalidateBufferSubData);
+ GET_PROC(InvalidateFramebuffer);
+ GET_PROC(InvalidateSubFramebuffer);
+ GET_PROC(InvalidateTexImage);
+ GET_PROC(InvalidateTexSubImage);
+ }
+
+ if (glVer >= GR_GL_VER(4,3) || extensions.has("GL_ARB_program_interface_query")) {
+ GET_PROC(GetProgramResourceLocation);
+ }
+
+ if (extensions.has("GL_NV_bindless_texture")) {
+ GET_PROC_SUFFIX(GetTextureHandle, NV);
+ GET_PROC_SUFFIX(GetTextureSamplerHandle, NV);
+ GET_PROC_SUFFIX(MakeTextureHandleResident, NV);
+ GET_PROC_SUFFIX(MakeTextureHandleNonResident, NV);
+ GET_PROC_SUFFIX(GetImageHandle, NV);
+ GET_PROC_SUFFIX(MakeImageHandleResident, NV);
+ GET_PROC_SUFFIX(MakeImageHandleNonResident, NV);
+ GET_PROC_SUFFIX(IsTextureHandleResident, NV);
+ GET_PROC_SUFFIX(IsImageHandleResident, NV);
+ GET_PROC_SUFFIX(UniformHandleui64, NV);
+ GET_PROC_SUFFIX(UniformHandleui64v, NV);
+ GET_PROC_SUFFIX(ProgramUniformHandleui64, NV);
+ GET_PROC_SUFFIX(ProgramUniformHandleui64v, NV);
+ }
+
+ if (extensions.has("GL_EXT_direct_state_access")) {
+ GET_PROC_SUFFIX(TextureParameteri, EXT);
+ GET_PROC_SUFFIX(TextureParameteriv, EXT);
+ GET_PROC_SUFFIX(TextureParameterf, EXT);
+ GET_PROC_SUFFIX(TextureParameterfv, EXT);
+ GET_PROC_SUFFIX(TextureImage1D, EXT);
+ GET_PROC_SUFFIX(TextureImage2D, EXT);
+ GET_PROC_SUFFIX(TextureSubImage1D, EXT);
+ GET_PROC_SUFFIX(TextureSubImage2D, EXT);
+ GET_PROC_SUFFIX(CopyTextureImage1D, EXT);
+ GET_PROC_SUFFIX(CopyTextureImage2D, EXT);
+ GET_PROC_SUFFIX(CopyTextureSubImage1D, EXT);
+ GET_PROC_SUFFIX(CopyTextureSubImage2D, EXT);
+ GET_PROC_SUFFIX(GetTextureImage, EXT);
+ GET_PROC_SUFFIX(GetTextureParameterfv, EXT);
+ GET_PROC_SUFFIX(GetTextureParameteriv, EXT);
+ GET_PROC_SUFFIX(GetTextureLevelParameterfv, EXT);
+ GET_PROC_SUFFIX(GetTextureLevelParameteriv, EXT);
+ if (glVer >= GR_GL_VER(1,2)) {
+ GET_PROC_SUFFIX(TextureImage3D, EXT);
+ GET_PROC_SUFFIX(TextureSubImage3D, EXT);
+ GET_PROC_SUFFIX(CopyTextureSubImage3D, EXT);
+ GET_PROC_SUFFIX(CompressedTextureImage3D, EXT);
+ GET_PROC_SUFFIX(CompressedTextureImage2D, EXT);
+ GET_PROC_SUFFIX(CompressedTextureImage1D, EXT);
+ GET_PROC_SUFFIX(CompressedTextureSubImage3D, EXT);
+ GET_PROC_SUFFIX(CompressedTextureSubImage2D, EXT);
+ GET_PROC_SUFFIX(CompressedTextureSubImage1D, EXT);
+ GET_PROC_SUFFIX(GetCompressedTextureImage, EXT);
+ }
+ if (glVer >= GR_GL_VER(1,5)) {
+ GET_PROC_SUFFIX(NamedBufferData, EXT);
+ GET_PROC_SUFFIX(NamedBufferSubData, EXT);
+ GET_PROC_SUFFIX(MapNamedBuffer, EXT);
+ GET_PROC_SUFFIX(UnmapNamedBuffer, EXT);
+ GET_PROC_SUFFIX(GetNamedBufferParameteriv, EXT);
+ GET_PROC_SUFFIX(GetNamedBufferPointerv, EXT);
+ GET_PROC_SUFFIX(GetNamedBufferSubData, EXT);
+ }
+ if (glVer >= GR_GL_VER(2,0)) {
+ GET_PROC_SUFFIX(ProgramUniform1f, EXT);
+ GET_PROC_SUFFIX(ProgramUniform2f, EXT);
+ GET_PROC_SUFFIX(ProgramUniform3f, EXT);
+ GET_PROC_SUFFIX(ProgramUniform4f, EXT);
+ GET_PROC_SUFFIX(ProgramUniform1i, EXT);
+ GET_PROC_SUFFIX(ProgramUniform2i, EXT);
+ GET_PROC_SUFFIX(ProgramUniform3i, EXT);
+ GET_PROC_SUFFIX(ProgramUniform4i, EXT);
+ GET_PROC_SUFFIX(ProgramUniform1fv, EXT);
+ GET_PROC_SUFFIX(ProgramUniform2fv, EXT);
+ GET_PROC_SUFFIX(ProgramUniform3fv, EXT);
+ GET_PROC_SUFFIX(ProgramUniform4fv, EXT);
+ GET_PROC_SUFFIX(ProgramUniform1iv, EXT);
+ GET_PROC_SUFFIX(ProgramUniform2iv, EXT);
+ GET_PROC_SUFFIX(ProgramUniform3iv, EXT);
+ GET_PROC_SUFFIX(ProgramUniform4iv, EXT);
+ GET_PROC_SUFFIX(ProgramUniformMatrix2fv, EXT);
+ GET_PROC_SUFFIX(ProgramUniformMatrix3fv, EXT);
+ GET_PROC_SUFFIX(ProgramUniformMatrix4fv, EXT);
+ }
+ if (glVer >= GR_GL_VER(2,1)) {
+ GET_PROC_SUFFIX(ProgramUniformMatrix2x3fv, EXT);
+ GET_PROC_SUFFIX(ProgramUniformMatrix3x2fv, EXT);
+ GET_PROC_SUFFIX(ProgramUniformMatrix2x4fv, EXT);
+ GET_PROC_SUFFIX(ProgramUniformMatrix4x2fv, EXT);
+ GET_PROC_SUFFIX(ProgramUniformMatrix3x4fv, EXT);
+ GET_PROC_SUFFIX(ProgramUniformMatrix4x3fv, EXT);
+ }
+ if (glVer >= GR_GL_VER(3,0)) {
+ GET_PROC_SUFFIX(NamedRenderbufferStorage, EXT);
+ GET_PROC_SUFFIX(GetNamedRenderbufferParameteriv, EXT);
+ GET_PROC_SUFFIX(NamedRenderbufferStorageMultisample, EXT);
+ GET_PROC_SUFFIX(CheckNamedFramebufferStatus, EXT);
+ GET_PROC_SUFFIX(NamedFramebufferTexture1D, EXT);
+ GET_PROC_SUFFIX(NamedFramebufferTexture2D, EXT);
+ GET_PROC_SUFFIX(NamedFramebufferTexture3D, EXT);
+ GET_PROC_SUFFIX(NamedFramebufferRenderbuffer, EXT);
+ GET_PROC_SUFFIX(GetNamedFramebufferAttachmentParameteriv, EXT);
+ GET_PROC_SUFFIX(GenerateTextureMipmap, EXT);
+ GET_PROC_SUFFIX(FramebufferDrawBuffer, EXT);
+ GET_PROC_SUFFIX(FramebufferDrawBuffers, EXT);
+ GET_PROC_SUFFIX(FramebufferReadBuffer, EXT);
+ GET_PROC_SUFFIX(GetFramebufferParameteriv, EXT);
+ GET_PROC_SUFFIX(NamedCopyBufferSubData, EXT);
+ GET_PROC_SUFFIX(VertexArrayVertexOffset, EXT);
+ GET_PROC_SUFFIX(VertexArrayColorOffset, EXT);
+ GET_PROC_SUFFIX(VertexArrayEdgeFlagOffset, EXT);
+ GET_PROC_SUFFIX(VertexArrayIndexOffset, EXT);
+ GET_PROC_SUFFIX(VertexArrayNormalOffset, EXT);
+ GET_PROC_SUFFIX(VertexArrayTexCoordOffset, EXT);
+ GET_PROC_SUFFIX(VertexArrayMultiTexCoordOffset, EXT);
+ GET_PROC_SUFFIX(VertexArrayFogCoordOffset, EXT);
+ GET_PROC_SUFFIX(VertexArraySecondaryColorOffset, EXT);
+ GET_PROC_SUFFIX(VertexArrayVertexAttribOffset, EXT);
+ GET_PROC_SUFFIX(VertexArrayVertexAttribIOffset, EXT);
+ GET_PROC_SUFFIX(EnableVertexArray, EXT);
+ GET_PROC_SUFFIX(DisableVertexArray, EXT);
+ GET_PROC_SUFFIX(EnableVertexArrayAttrib, EXT);
+ GET_PROC_SUFFIX(DisableVertexArrayAttrib, EXT);
+ GET_PROC_SUFFIX(GetVertexArrayIntegerv, EXT);
+ GET_PROC_SUFFIX(GetVertexArrayPointerv, EXT);
+ GET_PROC_SUFFIX(GetVertexArrayIntegeri_v, EXT);
+ GET_PROC_SUFFIX(GetVertexArrayPointeri_v, EXT);
+ GET_PROC_SUFFIX(MapNamedBufferRange, EXT);
+ GET_PROC_SUFFIX(FlushMappedNamedBufferRange, EXT);
+ }
+ if (glVer >= GR_GL_VER(3,1)) {
+ GET_PROC_SUFFIX(TextureBuffer, EXT);
+ }
+ }
+
+ if (glVer >= GR_GL_VER(4,3) || extensions.has("GL_KHR_debug")) {
+ // KHR_debug defines these methods to have no suffix in an OpenGL (not ES) context.
+ GET_PROC(DebugMessageControl);
+ GET_PROC(DebugMessageInsert);
+ GET_PROC(DebugMessageCallback);
+ GET_PROC(GetDebugMessageLog);
+ GET_PROC(PushDebugGroup);
+ GET_PROC(PopDebugGroup);
+ GET_PROC(ObjectLabel);
+ }
+
+ if (extensions.has("GL_EXT_window_rectangles")) {
+ GET_PROC_SUFFIX(WindowRectangles, EXT);
+ }
+
+ if (extensions.has("EGL_KHR_image") || extensions.has("EGL_KHR_image_base")) {
+ GET_EGL_PROC_SUFFIX(CreateImage, KHR);
+ GET_EGL_PROC_SUFFIX(DestroyImage, KHR);
+ }
+
+ if (glVer >= GR_GL_VER(4, 0) || extensions.has("GL_ARB_sample_shading")) {
+ GET_PROC(MinSampleShading);
+ }
+
+ if (glVer >= GR_GL_VER(3, 2) || extensions.has("GL_ARB_sync")) {
+ GET_PROC(FenceSync);
+ GET_PROC(ClientWaitSync);
+ GET_PROC(DeleteSync);
+ }
+
+ interface->fStandard = kGL_GrGLStandard;
+ interface->fExtensions.swap(&extensions);
+
+ return interface;
+}
+
+const GrGLInterface* GrGLAssembleGLESInterface(void* ctx, GrGLGetProc get) {
+ GET_PROC_LOCAL(GetString);
+ if (nullptr == GetString) {
+ return nullptr;
+ }
+
+ const char* verStr = reinterpret_cast<const char*>(GetString(GR_GL_VERSION));
+ GrGLVersion version = GrGLGetVersionFromString(verStr);
+
+ if (version < GR_GL_VER(2,0)) {
+ return nullptr;
+ }
+
+ GET_PROC_LOCAL(GetIntegerv);
+ GET_PROC_LOCAL(GetStringi);
+ GrEGLQueryStringProc queryString;
+ GrEGLDisplay display;
+ get_egl_query_and_display(&queryString, &display, ctx, get);
+ GrGLExtensions extensions;
+ if (!extensions.init(kGLES_GrGLStandard, GetString, GetStringi, GetIntegerv, queryString,
+ display)) {
+ return nullptr;
+ }
+
+ GrGLInterface* interface = new GrGLInterface;
+ GrGLInterface::Functions* functions = &interface->fFunctions;
+
+ GET_PROC(ActiveTexture);
+ GET_PROC(AttachShader);
+ GET_PROC(BindAttribLocation);
+ GET_PROC(BindBuffer);
+ GET_PROC(BindTexture);
+ GET_PROC_SUFFIX(BindVertexArray, OES);
+
+ if (version >= GR_GL_VER(3,0) && extensions.has("GL_EXT_blend_func_extended")) {
+ GET_PROC_SUFFIX(BindFragDataLocation, EXT);
+ GET_PROC_SUFFIX(BindFragDataLocationIndexed, EXT);
+ }
+
+ if (extensions.has("GL_KHR_blend_equation_advanced")) {
+ GET_PROC_SUFFIX(BlendBarrier, KHR);
+ } else if (extensions.has("GL_NV_blend_equation_advanced")) {
+ GET_PROC_SUFFIX(BlendBarrier, NV);
+ }
+
+ GET_PROC(BlendColor);
+ GET_PROC(BlendEquation);
+ GET_PROC(BlendFunc);
+ GET_PROC(BufferData);
+ GET_PROC(BufferSubData);
+ GET_PROC(Clear);
+ GET_PROC(ClearColor);
+ GET_PROC(ClearStencil);
+ GET_PROC(ColorMask);
+ GET_PROC(CompileShader);
+ GET_PROC(CompressedTexImage2D);
+ GET_PROC(CompressedTexSubImage2D);
+ GET_PROC(CopyTexSubImage2D);
+ GET_PROC(CreateProgram);
+ GET_PROC(CreateShader);
+ GET_PROC(CullFace);
+ GET_PROC(DeleteBuffers);
+ GET_PROC(DeleteProgram);
+ GET_PROC(DeleteShader);
+ GET_PROC(DeleteTextures);
+ GET_PROC_SUFFIX(DeleteVertexArrays, OES);
+ GET_PROC(DepthMask);
+ GET_PROC(Disable);
+ GET_PROC(DisableVertexAttribArray);
+ GET_PROC(DrawArrays);
+
+ if (version >= GR_GL_VER(3,0)) {
+ GET_PROC(DrawArraysInstanced);
+ GET_PROC(DrawElementsInstanced);
+ } else if (extensions.has("GL_EXT_draw_instanced")) {
+ GET_PROC_SUFFIX(DrawArraysInstanced, EXT);
+ GET_PROC_SUFFIX(DrawElementsInstanced, EXT);
+ }
+
+ if (version >= GR_GL_VER(3,1)) {
+ GET_PROC(DrawArraysIndirect);
+ GET_PROC(DrawElementsIndirect);
+ }
+
+ GET_PROC(DrawElements);
+ if (version >= GR_GL_VER(3,0)) {
+ GET_PROC(DrawRangeElements);
+ }
+ GET_PROC(Enable);
+ GET_PROC(EnableVertexAttribArray);
+ GET_PROC(Finish);
+ GET_PROC(Flush);
+ GET_PROC(FrontFace);
+ GET_PROC(GenBuffers);
+ GET_PROC(GenerateMipmap);
+ GET_PROC(GenTextures);
+ GET_PROC_SUFFIX(GenVertexArrays, OES);
+ GET_PROC(GetBufferParameteriv);
+ GET_PROC(GetError);
+ GET_PROC(GetIntegerv);
+
+ if (version >= GR_GL_VER(3,1)) {
+ GET_PROC(GetMultisamplefv);
+ }
+
+ GET_PROC(GetProgramInfoLog);
+ GET_PROC(GetProgramiv);
+ GET_PROC(GetShaderInfoLog);
+ GET_PROC(GetShaderPrecisionFormat);
+ GET_PROC(GetShaderiv);
+ GET_PROC(GetString);
+ GET_PROC(GetStringi);
+ GET_PROC(GetUniformLocation);
+ GET_PROC(IsTexture);
+ GET_PROC(LineWidth);
+ GET_PROC(LinkProgram);
+
+ if (extensions.has("GL_EXT_multi_draw_indirect")) {
+ GET_PROC_SUFFIX(MultiDrawArraysIndirect, EXT);
+ GET_PROC_SUFFIX(MultiDrawElementsIndirect, EXT);
+ }
+
+ GET_PROC(PixelStorei);
+
+ if (extensions.has("GL_EXT_raster_multisample")) {
+ GET_PROC_SUFFIX(RasterSamples, EXT);
+ }
+
+ GET_PROC(ReadPixels);
+ GET_PROC(Scissor);
+ GET_PROC(ShaderSource);
+ GET_PROC(StencilFunc);
+ GET_PROC(StencilFuncSeparate);
+ GET_PROC(StencilMask);
+ GET_PROC(StencilMaskSeparate);
+ GET_PROC(StencilOp);
+ GET_PROC(StencilOpSeparate);
+
+ if (version >= GR_GL_VER(3,2)) {
+ GET_PROC(TexBuffer);
+ GET_PROC(TexBufferRange);
+ } else if (extensions.has("GL_OES_texture_buffer")) {
+ GET_PROC_SUFFIX(TexBuffer, OES);
+ GET_PROC_SUFFIX(TexBufferRange, OES);
+ } else if (extensions.has("GL_EXT_texture_buffer")) {
+ GET_PROC_SUFFIX(TexBuffer, EXT);
+ GET_PROC_SUFFIX(TexBufferRange, EXT);
+ }
+
+ GET_PROC(TexImage2D);
+ GET_PROC(TexParameteri);
+ GET_PROC(TexParameteriv);
+ GET_PROC(TexSubImage2D);
+
+ if (version >= GR_GL_VER(3,0)) {
+ GET_PROC(TexStorage2D);
+ } else {
+ GET_PROC_SUFFIX(TexStorage2D, EXT);
+ }
+
+ if (extensions.has("GL_NV_texture_barrier")) {
+ GET_PROC_SUFFIX(TextureBarrier, NV);
+ }
+
+ GET_PROC_SUFFIX(DiscardFramebuffer, EXT);
+ GET_PROC(Uniform1f);
+ GET_PROC(Uniform1i);
+ GET_PROC(Uniform1fv);
+ GET_PROC(Uniform1iv);
+ GET_PROC(Uniform2f);
+ GET_PROC(Uniform2i);
+ GET_PROC(Uniform2fv);
+ GET_PROC(Uniform2iv);
+ GET_PROC(Uniform3f);
+ GET_PROC(Uniform3i);
+ GET_PROC(Uniform3fv);
+ GET_PROC(Uniform3iv);
+ GET_PROC(Uniform4f);
+ GET_PROC(Uniform4i);
+ GET_PROC(Uniform4fv);
+ GET_PROC(Uniform4iv);
+ GET_PROC(UniformMatrix2fv);
+ GET_PROC(UniformMatrix3fv);
+ GET_PROC(UniformMatrix4fv);
+ GET_PROC(UseProgram);
+ GET_PROC(VertexAttrib1f);
+ GET_PROC(VertexAttrib2fv);
+ GET_PROC(VertexAttrib3fv);
+ GET_PROC(VertexAttrib4fv);
+
+ if (version >= GR_GL_VER(3,0)) {
+ GET_PROC(VertexAttribDivisor);
+ } else if (extensions.has("GL_EXT_instanced_arrays")) {
+ GET_PROC_SUFFIX(VertexAttribDivisor, EXT);
+ }
+
+ if (version >= GR_GL_VER(3,0)) {
+ GET_PROC(VertexAttribIPointer);
+ }
+
+ GET_PROC(VertexAttribPointer);
+ GET_PROC(Viewport);
+ GET_PROC(BindFramebuffer);
+ GET_PROC(BindRenderbuffer);
+ GET_PROC(CheckFramebufferStatus);
+ GET_PROC(DeleteFramebuffers);
+ GET_PROC(DeleteRenderbuffers);
+ GET_PROC(FramebufferRenderbuffer);
+ GET_PROC(FramebufferTexture2D);
+
+ if (extensions.has("GL_CHROMIUM_framebuffer_multisample")) {
+ GET_PROC_SUFFIX(RenderbufferStorageMultisample, CHROMIUM);
+ GET_PROC_SUFFIX(BlitFramebuffer, CHROMIUM);
+ } else if (version >= GR_GL_VER(3,0)) {
+ GET_PROC(RenderbufferStorageMultisample);
+ GET_PROC(BlitFramebuffer);
+ }
+
+ if (extensions.has("GL_CHROMIUM_map_sub")) {
+ GET_PROC_SUFFIX(MapBufferSubData, CHROMIUM);
+ GET_PROC_SUFFIX(MapTexSubImage2D, CHROMIUM);
+ GET_PROC_SUFFIX(UnmapBufferSubData, CHROMIUM);
+ GET_PROC_SUFFIX(UnmapTexSubImage2D, CHROMIUM);
+ }
+
+ if (extensions.has("GL_EXT_multisampled_render_to_texture")) {
+ GET_PROC_SUFFIX(FramebufferTexture2DMultisample, EXT);
+ functions->fRenderbufferStorageMultisampleES2EXT = (GrGLRenderbufferStorageMultisampleProc) get(ctx, "glRenderbufferStorageMultisampleEXT");
+ } else if (extensions.has("GL_IMG_multisampled_render_to_texture")) {
+ GET_PROC_SUFFIX(FramebufferTexture2DMultisample, IMG);
+ functions->fRenderbufferStorageMultisampleES2EXT = (GrGLRenderbufferStorageMultisampleProc) get(ctx, "glRenderbufferStorageMultisampleIMG");
+ } else if (extensions.has("GL_APPLE_framebuffer_multisample")) {
+ functions->fRenderbufferStorageMultisampleES2APPLE = (GrGLRenderbufferStorageMultisampleProc) get(ctx, "glRenderbufferStorageMultisampleAPPLE");
+ GET_PROC_SUFFIX(ResolveMultisampleFramebuffer, APPLE);
+ }
+
+ GET_PROC(GenFramebuffers);
+ GET_PROC(GenRenderbuffers);
+ GET_PROC(GetFramebufferAttachmentParameteriv);
+ GET_PROC(GetRenderbufferParameteriv);
+ GET_PROC(RenderbufferStorage);
+
+ GET_PROC_SUFFIX(MapBuffer, OES);
+ GET_PROC_SUFFIX(UnmapBuffer, OES);
+
+ if (version >= GR_GL_VER(3,0)) {
+ GET_PROC(MapBufferRange);
+ GET_PROC(FlushMappedBufferRange);
+ } else if (extensions.has("GL_EXT_map_buffer_range")) {
+ GET_PROC_SUFFIX(MapBufferRange, EXT);
+ GET_PROC_SUFFIX(FlushMappedBufferRange, EXT);
+ }
+
+ if (extensions.has("GL_EXT_debug_marker")) {
+ GET_PROC(InsertEventMarker);
+ GET_PROC(PushGroupMarker);
+ GET_PROC(PopGroupMarker);
+ // The below check is here because a device has been found that has the extension string but
+ // returns nullptr from the eglGetProcAddress for the functions
+ if (nullptr == functions->fInsertEventMarker ||
+ nullptr == functions->fPushGroupMarker ||
+ nullptr == functions->fPopGroupMarker) {
+ extensions.remove("GL_EXT_debug_marker");
+ }
+ }
+
+ GET_PROC(InvalidateFramebuffer);
+ GET_PROC(InvalidateSubFramebuffer);
+ GET_PROC(InvalidateBufferData);
+ GET_PROC(InvalidateBufferSubData);
+ GET_PROC(InvalidateTexImage);
+ GET_PROC(InvalidateTexSubImage);
+
+ if (version >= GR_GL_VER(3,1)) {
+ GET_PROC(GetProgramResourceLocation);
+ }
+
+ if (extensions.has("GL_NV_path_rendering")) {
+ GET_PROC_SUFFIX(MatrixLoadf, EXT);
+ GET_PROC_SUFFIX(MatrixLoadIdentity, EXT);
+ GET_PROC_SUFFIX(PathCommands, NV);
+ GET_PROC_SUFFIX(PathParameteri, NV);
+ GET_PROC_SUFFIX(PathParameterf, NV);
+ GET_PROC_SUFFIX(GenPaths, NV);
+ GET_PROC_SUFFIX(DeletePaths, NV);
+ GET_PROC_SUFFIX(IsPath, NV);
+ GET_PROC_SUFFIX(PathStencilFunc, NV);
+ GET_PROC_SUFFIX(StencilFillPath, NV);
+ GET_PROC_SUFFIX(StencilStrokePath, NV);
+ GET_PROC_SUFFIX(StencilFillPathInstanced, NV);
+ GET_PROC_SUFFIX(StencilStrokePathInstanced, NV);
+ GET_PROC_SUFFIX(CoverFillPath, NV);
+ GET_PROC_SUFFIX(CoverStrokePath, NV);
+ GET_PROC_SUFFIX(CoverFillPathInstanced, NV);
+ GET_PROC_SUFFIX(CoverStrokePathInstanced, NV);
+ GET_PROC_SUFFIX(StencilThenCoverFillPath, NV);
+ GET_PROC_SUFFIX(StencilThenCoverStrokePath, NV);
+ GET_PROC_SUFFIX(StencilThenCoverFillPathInstanced, NV);
+ GET_PROC_SUFFIX(StencilThenCoverStrokePathInstanced, NV);
+ GET_PROC_SUFFIX(ProgramPathFragmentInputGen, NV);
+ }
+
+ if (extensions.has("GL_CHROMIUM_path_rendering")) {
+ GET_PROC_SUFFIX(MatrixLoadf, CHROMIUM);
+ GET_PROC_SUFFIX(MatrixLoadIdentity, CHROMIUM);
+ GET_PROC_SUFFIX(PathCommands, CHROMIUM);
+ GET_PROC_SUFFIX(PathParameteri, CHROMIUM);
+ GET_PROC_SUFFIX(PathParameterf, CHROMIUM);
+ GET_PROC_SUFFIX(GenPaths, CHROMIUM);
+ GET_PROC_SUFFIX(DeletePaths, CHROMIUM);
+ GET_PROC_SUFFIX(IsPath, CHROMIUM);
+ GET_PROC_SUFFIX(PathStencilFunc, CHROMIUM);
+ GET_PROC_SUFFIX(StencilFillPath, CHROMIUM);
+ GET_PROC_SUFFIX(StencilStrokePath, CHROMIUM);
+ GET_PROC_SUFFIX(StencilFillPathInstanced, CHROMIUM);
+ GET_PROC_SUFFIX(StencilStrokePathInstanced, CHROMIUM);
+ GET_PROC_SUFFIX(CoverFillPath, CHROMIUM);
+ GET_PROC_SUFFIX(CoverStrokePath, CHROMIUM);
+ GET_PROC_SUFFIX(CoverFillPathInstanced, CHROMIUM);
+ GET_PROC_SUFFIX(CoverStrokePathInstanced, CHROMIUM);
+ GET_PROC_SUFFIX(StencilThenCoverFillPath, CHROMIUM);
+ GET_PROC_SUFFIX(StencilThenCoverStrokePath, CHROMIUM);
+ GET_PROC_SUFFIX(StencilThenCoverFillPathInstanced, CHROMIUM);
+ GET_PROC_SUFFIX(StencilThenCoverStrokePathInstanced, CHROMIUM);
+ GET_PROC_SUFFIX(ProgramPathFragmentInputGen, CHROMIUM);
+ // GL_CHROMIUM_path_rendering additions:
+ GET_PROC_SUFFIX(BindFragmentInputLocation, CHROMIUM);
+ }
+
+ if (extensions.has("GL_NV_framebuffer_mixed_samples")) {
+ GET_PROC_SUFFIX(CoverageModulation, NV);
+ }
+ if (extensions.has("GL_CHROMIUM_framebuffer_mixed_samples")) {
+ GET_PROC_SUFFIX(CoverageModulation, CHROMIUM);
+ }
+
+ if (extensions.has("GL_NV_bindless_texture")) {
+ GET_PROC_SUFFIX(GetTextureHandle, NV);
+ GET_PROC_SUFFIX(GetTextureSamplerHandle, NV);
+ GET_PROC_SUFFIX(MakeTextureHandleResident, NV);
+ GET_PROC_SUFFIX(MakeTextureHandleNonResident, NV);
+ GET_PROC_SUFFIX(GetImageHandle, NV);
+ GET_PROC_SUFFIX(MakeImageHandleResident, NV);
+ GET_PROC_SUFFIX(MakeImageHandleNonResident, NV);
+ GET_PROC_SUFFIX(IsTextureHandleResident, NV);
+ GET_PROC_SUFFIX(IsImageHandleResident, NV);
+ GET_PROC_SUFFIX(UniformHandleui64, NV);
+ GET_PROC_SUFFIX(UniformHandleui64v, NV);
+ GET_PROC_SUFFIX(ProgramUniformHandleui64, NV);
+ GET_PROC_SUFFIX(ProgramUniformHandleui64v, NV);
+ }
+
+ if (extensions.has("GL_KHR_debug")) {
+ GET_PROC_SUFFIX(DebugMessageControl, KHR);
+ GET_PROC_SUFFIX(DebugMessageInsert, KHR);
+ GET_PROC_SUFFIX(DebugMessageCallback, KHR);
+ GET_PROC_SUFFIX(GetDebugMessageLog, KHR);
+ GET_PROC_SUFFIX(PushDebugGroup, KHR);
+ GET_PROC_SUFFIX(PopDebugGroup, KHR);
+ GET_PROC_SUFFIX(ObjectLabel, KHR);
+ // In general we have a policy against removing extension strings when the driver does
+ // not provide function pointers for an advertised extension. However, because there is a
+ // known device that advertises GL_KHR_debug but fails to provide the functions and this is
+ // a debugging- only extension we've made an exception. This also can happen when using
+ // APITRACE.
+ if (!interface->fFunctions.fDebugMessageControl) {
+ extensions.remove("GL_KHR_debug");
+ }
+ }
+
+ if (extensions.has("GL_CHROMIUM_bind_uniform_location")) {
+ GET_PROC_SUFFIX(BindUniformLocation, CHROMIUM);
+ }
+
+ if (extensions.has("GL_EXT_window_rectangles")) {
+ GET_PROC_SUFFIX(WindowRectangles, EXT);
+ }
+
+ if (extensions.has("EGL_KHR_image") || extensions.has("EGL_KHR_image_base")) {
+ GET_EGL_PROC_SUFFIX(CreateImage, KHR);
+ GET_EGL_PROC_SUFFIX(DestroyImage, KHR);
+ }
+
+ if (extensions.has("GL_OES_sample_shading")) {
+ GET_PROC_SUFFIX(MinSampleShading, OES);
+ }
+
+ if (version >= GR_GL_VER(3, 0)) {
+ GET_PROC(FenceSync);
+ GET_PROC(ClientWaitSync);
+ GET_PROC(DeleteSync);
+ }
+
+ interface->fStandard = kGLES_GrGLStandard;
+ interface->fExtensions.swap(&extensions);
+
+ return interface;
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLBuffer.cpp b/gfx/skia/skia/src/gpu/gl/GrGLBuffer.cpp
new file mode 100644
index 000000000..96226b912
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLBuffer.cpp
@@ -0,0 +1,288 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLBuffer.h"
+#include "GrGLGpu.h"
+#include "SkTraceMemoryDump.h"
+
+#define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
+#define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glGpu()->glInterface(), RET, X)
+
+#if GR_GL_CHECK_ALLOC_WITH_GET_ERROR
+ #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface)
+ #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call)
+ #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface)
+#else
+ #define CLEAR_ERROR_BEFORE_ALLOC(iface)
+ #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call)
+ #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR
+#endif
+
+#ifdef SK_DEBUG
+#define VALIDATE() this->validate()
+#else
+#define VALIDATE() do {} while(false)
+#endif
+
+GrGLBuffer* GrGLBuffer::Create(GrGLGpu* gpu, size_t size, GrBufferType intendedType,
+ GrAccessPattern accessPattern, const void* data) {
+ SkAutoTUnref<GrGLBuffer> buffer(new GrGLBuffer(gpu, size, intendedType, accessPattern, data));
+ if (0 == buffer->bufferID()) {
+ return nullptr;
+ }
+ return buffer.release();
+}
+
+// GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
+// objects are implemented as client-side-arrays on tile-deferred architectures.
+#define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW
+
+inline static GrGLenum gr_to_gl_access_pattern(GrBufferType bufferType,
+ GrAccessPattern accessPattern) {
+ static const GrGLenum drawUsages[] = {
+ DYNAMIC_DRAW_PARAM, // TODO: Do we really want to use STREAM_DRAW here on non-Chromium?
+ GR_GL_STATIC_DRAW, // kStatic_GrAccessPattern
+ GR_GL_STREAM_DRAW // kStream_GrAccessPattern
+ };
+
+ static const GrGLenum readUsages[] = {
+ GR_GL_DYNAMIC_READ, // kDynamic_GrAccessPattern
+ GR_GL_STATIC_READ, // kStatic_GrAccessPattern
+ GR_GL_STREAM_READ // kStream_GrAccessPattern
+ };
+
+ GR_STATIC_ASSERT(0 == kDynamic_GrAccessPattern);
+ GR_STATIC_ASSERT(1 == kStatic_GrAccessPattern);
+ GR_STATIC_ASSERT(2 == kStream_GrAccessPattern);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(drawUsages) == 1 + kLast_GrAccessPattern);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(readUsages) == 1 + kLast_GrAccessPattern);
+
+ static GrGLenum const* const usageTypes[] = {
+ drawUsages, // kVertex_GrBufferType,
+ drawUsages, // kIndex_GrBufferType,
+ drawUsages, // kTexel_GrBufferType,
+ drawUsages, // kDrawIndirect_GrBufferType,
+ drawUsages, // kXferCpuToGpu_GrBufferType,
+ readUsages // kXferGpuToCpu_GrBufferType,
+ };
+
+ GR_STATIC_ASSERT(0 == kVertex_GrBufferType);
+ GR_STATIC_ASSERT(1 == kIndex_GrBufferType);
+ GR_STATIC_ASSERT(2 == kTexel_GrBufferType);
+ GR_STATIC_ASSERT(3 == kDrawIndirect_GrBufferType);
+ GR_STATIC_ASSERT(4 == kXferCpuToGpu_GrBufferType);
+ GR_STATIC_ASSERT(5 == kXferGpuToCpu_GrBufferType);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(usageTypes) == kGrBufferTypeCount);
+
+ SkASSERT(bufferType >= 0 && bufferType <= kLast_GrBufferType);
+ SkASSERT(accessPattern >= 0 && accessPattern <= kLast_GrAccessPattern);
+
+ return usageTypes[bufferType][accessPattern];
+}
+
+GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, size_t size, GrBufferType intendedType,
+ GrAccessPattern accessPattern, const void* data)
+ : INHERITED(gpu, size, intendedType, accessPattern),
+ fIntendedType(intendedType),
+ fBufferID(0),
+ fUsage(gr_to_gl_access_pattern(intendedType, accessPattern)),
+ fGLSizeInBytes(0),
+ fHasAttachedToTexture(false) {
+ GL_CALL(GenBuffers(1, &fBufferID));
+ if (fBufferID) {
+ GrGLenum target = gpu->bindBuffer(fIntendedType, this);
+ CLEAR_ERROR_BEFORE_ALLOC(gpu->glInterface());
+ // make sure driver can allocate memory for this buffer
+ GL_ALLOC_CALL(gpu->glInterface(), BufferData(target,
+ (GrGLsizeiptr) size,
+ data,
+ fUsage));
+ if (CHECK_ALLOC_ERROR(gpu->glInterface()) != GR_GL_NO_ERROR) {
+ GL_CALL(DeleteBuffers(1, &fBufferID));
+ fBufferID = 0;
+ } else {
+ fGLSizeInBytes = size;
+ }
+ }
+ VALIDATE();
+ this->registerWithCache(SkBudgeted::kYes);
+}
+
+inline GrGLGpu* GrGLBuffer::glGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrGLGpu*>(this->getGpu());
+}
+
+inline const GrGLCaps& GrGLBuffer::glCaps() const {
+ return this->glGpu()->glCaps();
+}
+
+void GrGLBuffer::onRelease() {
+ if (!this->wasDestroyed()) {
+ VALIDATE();
+ // make sure we've not been abandoned or already released
+ if (fBufferID) {
+ GL_CALL(DeleteBuffers(1, &fBufferID));
+ fBufferID = 0;
+ fGLSizeInBytes = 0;
+ this->glGpu()->notifyBufferReleased(this);
+ }
+ fMapPtr = nullptr;
+ VALIDATE();
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrGLBuffer::onAbandon() {
+ fBufferID = 0;
+ fGLSizeInBytes = 0;
+ fMapPtr = nullptr;
+ VALIDATE();
+ INHERITED::onAbandon();
+}
+
+void GrGLBuffer::onMap() {
+ if (this->wasDestroyed()) {
+ return;
+ }
+
+ VALIDATE();
+ SkASSERT(!this->isMapped());
+
+ // TODO: Make this a function parameter.
+ bool readOnly = (kXferGpuToCpu_GrBufferType == fIntendedType);
+
+ // Handling dirty context is done in the bindBuffer call
+ switch (this->glCaps().mapBufferType()) {
+ case GrGLCaps::kNone_MapBufferType:
+ break;
+ case GrGLCaps::kMapBuffer_MapBufferType: {
+ GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
+ // Let driver know it can discard the old data
+ if (GR_GL_USE_BUFFER_DATA_NULL_HINT || fGLSizeInBytes != this->sizeInBytes()) {
+ GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage));
+ }
+ GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
+ break;
+ }
+ case GrGLCaps::kMapBufferRange_MapBufferType: {
+ GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
+ // Make sure the GL buffer size agrees with fDesc before mapping.
+ if (fGLSizeInBytes != this->sizeInBytes()) {
+ GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage));
+ }
+ GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT;
+ if (kXferCpuToGpu_GrBufferType != fIntendedType) {
+ // TODO: Make this a function parameter.
+ writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
+ }
+ GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->sizeInBytes(),
+ readOnly ? GR_GL_MAP_READ_BIT : writeAccess));
+ break;
+ }
+ case GrGLCaps::kChromium_MapBufferType: {
+ GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
+ // Make sure the GL buffer size agrees with fDesc before mapping.
+ if (fGLSizeInBytes != this->sizeInBytes()) {
+ GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage));
+ }
+ GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->sizeInBytes(),
+ readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
+ break;
+ }
+ }
+ fGLSizeInBytes = this->sizeInBytes();
+ VALIDATE();
+}
+
+void GrGLBuffer::onUnmap() {
+ if (this->wasDestroyed()) {
+ return;
+ }
+
+ VALIDATE();
+ SkASSERT(this->isMapped());
+ if (0 == fBufferID) {
+ fMapPtr = nullptr;
+ return;
+ }
+ // bind buffer handles the dirty context
+ switch (this->glCaps().mapBufferType()) {
+ case GrGLCaps::kNone_MapBufferType:
+ SkDEBUGFAIL("Shouldn't get here.");
+ return;
+ case GrGLCaps::kMapBuffer_MapBufferType: // fall through
+ case GrGLCaps::kMapBufferRange_MapBufferType: {
+ GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
+ GL_CALL(UnmapBuffer(target));
+ break;
+ }
+ case GrGLCaps::kChromium_MapBufferType:
+ this->glGpu()->bindBuffer(fIntendedType, this); // TODO: Is this needed?
+ GL_CALL(UnmapBufferSubData(fMapPtr));
+ break;
+ }
+ fMapPtr = nullptr;
+}
+
+bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
+ if (this->wasDestroyed()) {
+ return false;
+ }
+
+ SkASSERT(!this->isMapped());
+ VALIDATE();
+ if (srcSizeInBytes > this->sizeInBytes()) {
+ return false;
+ }
+ SkASSERT(srcSizeInBytes <= this->sizeInBytes());
+ // bindbuffer handles dirty context
+ GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
+
+#if GR_GL_USE_BUFFER_DATA_NULL_HINT
+ if (this->sizeInBytes() == srcSizeInBytes) {
+ GL_CALL(BufferData(target, (GrGLsizeiptr) srcSizeInBytes, src, fUsage));
+ } else {
+ // Before we call glBufferSubData we give the driver a hint using
+ // glBufferData with nullptr. This makes the old buffer contents
+ // inaccessible to future draws. The GPU may still be processing
+ // draws that reference the old contents. With this hint it can
+ // assign a different allocation for the new contents to avoid
+ // flushing the gpu past draws consuming the old contents.
+ // TODO I think we actually want to try calling bufferData here
+ GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage));
+ GL_CALL(BufferSubData(target, 0, (GrGLsizeiptr) srcSizeInBytes, src));
+ }
+ fGLSizeInBytes = this->sizeInBytes();
+#else
+ // Note that we're cheating on the size here. Currently no methods
+ // allow a partial update that preserves contents of non-updated
+ // portions of the buffer (map() does a glBufferData(..size, nullptr..))
+ GL_CALL(BufferData(target, srcSizeInBytes, src, fUsage));
+ fGLSizeInBytes = srcSizeInBytes;
+#endif
+ VALIDATE();
+ return true;
+}
+
+void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& dumpName) const {
+ SkString buffer_id;
+ buffer_id.appendU32(this->bufferID());
+ traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer",
+ buffer_id.c_str());
+}
+
+#ifdef SK_DEBUG
+
+void GrGLBuffer::validate() const {
+ SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes);
+ SkASSERT(nullptr == fMapPtr || fGLSizeInBytes <= this->sizeInBytes());
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLBuffer.h b/gfx/skia/skia/src/gpu/gl/GrGLBuffer.h
new file mode 100644
index 000000000..6a90d0334
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLBuffer.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLBuffer_DEFINED
+#define GrGLBuffer_DEFINED
+
+#include "GrBuffer.h"
+#include "gl/GrGLTypes.h"
+
+class GrGLGpu;
+class GrGLCaps;
+
+class GrGLBuffer : public GrBuffer {
+public:
+ static GrGLBuffer* Create(GrGLGpu*, size_t size, GrBufferType intendedType, GrAccessPattern,
+ const void* data = nullptr);
+
+ ~GrGLBuffer() {
+ // either release or abandon should have been called by the owner of this object.
+ SkASSERT(0 == fBufferID);
+ }
+
+ GrGLuint bufferID() const { return fBufferID; }
+
+ /**
+ * Returns the actual size of the underlying GL buffer object. In certain cases we may make this
+ * smaller than the size reported by GrBuffer.
+ */
+ size_t glSizeInBytes() const { return fGLSizeInBytes; }
+
+ void setHasAttachedToTexture() { fHasAttachedToTexture = true; }
+ bool hasAttachedToTexture() const { return fHasAttachedToTexture; }
+
+protected:
+ GrGLBuffer(GrGLGpu*, size_t size, GrBufferType intendedType, GrAccessPattern, const void* data);
+
+ void onAbandon() override;
+ void onRelease() override;
+ void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& dumpName) const override;
+
+private:
+ GrGLGpu* glGpu() const;
+ const GrGLCaps& glCaps() const;
+
+ void onMap() override;
+ void onUnmap() override;
+ bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
+
+#ifdef SK_DEBUG
+ void validate() const;
+#endif
+
+ GrBufferType fIntendedType;
+ GrGLuint fBufferID;
+ GrGLenum fUsage;
+ size_t fGLSizeInBytes;
+ bool fHasAttachedToTexture;
+
+ typedef GrBuffer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLCaps.cpp b/gfx/skia/skia/src/gpu/gl/GrGLCaps.cpp
new file mode 100644
index 000000000..1a7b105a2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLCaps.cpp
@@ -0,0 +1,1972 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrGLCaps.h"
+
+#include "GrContextOptions.h"
+#include "GrGLContext.h"
+#include "GrGLRenderTarget.h"
+#include "glsl/GrGLSLCaps.h"
+#include "instanced/GLInstancedRendering.h"
+#include "SkTSearch.h"
+#include "SkTSort.h"
+
+GrGLCaps::GrGLCaps(const GrContextOptions& contextOptions,
+ const GrGLContextInfo& ctxInfo,
+ const GrGLInterface* glInterface) : INHERITED(contextOptions) {
+ fStandard = ctxInfo.standard();
+
+ fStencilFormats.reset();
+ fMSFBOType = kNone_MSFBOType;
+ fInvalidateFBType = kNone_InvalidateFBType;
+ fMapBufferType = kNone_MapBufferType;
+ fTransferBufferType = kNone_TransferBufferType;
+ fMaxFragmentUniformVectors = 0;
+ fUnpackRowLengthSupport = false;
+ fUnpackFlipYSupport = false;
+ fPackRowLengthSupport = false;
+ fPackFlipYSupport = false;
+ fTextureUsageSupport = false;
+ fTextureRedSupport = false;
+ fImagingSupport = false;
+ fVertexArrayObjectSupport = false;
+ fDirectStateAccessSupport = false;
+ fDebugSupport = false;
+ fES2CompatibilitySupport = false;
+ fDrawInstancedSupport = false;
+ fDrawIndirectSupport = false;
+ fMultiDrawIndirectSupport = false;
+ fBaseInstanceSupport = false;
+ fIsCoreProfile = false;
+ fBindFragDataLocationSupport = false;
+ fRectangleTextureSupport = false;
+ fTextureSwizzleSupport = false;
+ fRGBA8888PixelsOpsAreSlow = false;
+ fPartialFBOReadIsSlow = false;
+ fMipMapLevelAndLodControlSupport = false;
+ fRGBAToBGRAReadbackConversionsAreSlow = false;
+ fDoManualMipmapping = false;
+
+ fBlitFramebufferSupport = kNone_BlitFramebufferSupport;
+
+ fShaderCaps.reset(new GrGLSLCaps(contextOptions));
+
+ this->init(contextOptions, ctxInfo, glInterface);
+}
+
+void GrGLCaps::init(const GrContextOptions& contextOptions,
+ const GrGLContextInfo& ctxInfo,
+ const GrGLInterface* gli) {
+ GrGLStandard standard = ctxInfo.standard();
+ GrGLVersion version = ctxInfo.version();
+
+ /**************************************************************************
+ * Caps specific to GrGLCaps
+ **************************************************************************/
+
+ if (kGLES_GrGLStandard == standard) {
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_FRAGMENT_UNIFORM_VECTORS,
+ &fMaxFragmentUniformVectors);
+ } else {
+ SkASSERT(kGL_GrGLStandard == standard);
+ GrGLint max;
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_FRAGMENT_UNIFORM_COMPONENTS, &max);
+ fMaxFragmentUniformVectors = max / 4;
+ if (version >= GR_GL_VER(3, 2)) {
+ GrGLint profileMask;
+ GR_GL_GetIntegerv(gli, GR_GL_CONTEXT_PROFILE_MASK, &profileMask);
+ fIsCoreProfile = SkToBool(profileMask & GR_GL_CONTEXT_CORE_PROFILE_BIT);
+ }
+ }
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_VERTEX_ATTRIBS, &fMaxVertexAttributes);
+
+ if (kGL_GrGLStandard == standard) {
+ fUnpackRowLengthSupport = true;
+ fUnpackFlipYSupport = false;
+ fPackRowLengthSupport = true;
+ fPackFlipYSupport = false;
+ } else {
+ fUnpackRowLengthSupport = version >= GR_GL_VER(3,0) ||
+ ctxInfo.hasExtension("GL_EXT_unpack_subimage");
+ fUnpackFlipYSupport = ctxInfo.hasExtension("GL_CHROMIUM_flipy");
+ fPackRowLengthSupport = version >= GR_GL_VER(3,0) ||
+ ctxInfo.hasExtension("GL_NV_pack_subimage");
+ fPackFlipYSupport =
+ ctxInfo.hasExtension("GL_ANGLE_pack_reverse_row_order");
+ }
+
+ fTextureUsageSupport = (kGLES_GrGLStandard == standard) &&
+ ctxInfo.hasExtension("GL_ANGLE_texture_usage");
+
+ if (kGL_GrGLStandard == standard) {
+ fTextureBarrierSupport = version >= GR_GL_VER(4,5) ||
+ ctxInfo.hasExtension("GL_ARB_texture_barrier") ||
+ ctxInfo.hasExtension("GL_NV_texture_barrier");
+ } else {
+ fTextureBarrierSupport = ctxInfo.hasExtension("GL_NV_texture_barrier");
+ }
+
+ if (kGL_GrGLStandard == standard) {
+ fSampleLocationsSupport = version >= GR_GL_VER(3,2) ||
+ ctxInfo.hasExtension("GL_ARB_texture_multisample");
+ } else {
+ fSampleLocationsSupport = version >= GR_GL_VER(3,1);
+ }
+
+ // ARB_texture_rg is part of OpenGL 3.0, but mesa doesn't support GL_RED
+ // and GL_RG on FBO textures.
+ if (kMesa_GrGLDriver != ctxInfo.driver()) {
+ if (kGL_GrGLStandard == standard) {
+ fTextureRedSupport = version >= GR_GL_VER(3,0) ||
+ ctxInfo.hasExtension("GL_ARB_texture_rg");
+ } else {
+ fTextureRedSupport = version >= GR_GL_VER(3,0) ||
+ ctxInfo.hasExtension("GL_EXT_texture_rg");
+ }
+ }
+ fImagingSupport = kGL_GrGLStandard == standard &&
+ ctxInfo.hasExtension("GL_ARB_imaging");
+
+ // A driver but on the nexus 6 causes incorrect dst copies when invalidate is called beforehand.
+ // Thus we are blacklisting this extension for now on Adreno4xx devices.
+ if (kAdreno4xx_GrGLRenderer != ctxInfo.renderer() &&
+ ((kGL_GrGLStandard == standard && version >= GR_GL_VER(4,3)) ||
+ (kGLES_GrGLStandard == standard && version >= GR_GL_VER(3,0)) ||
+ ctxInfo.hasExtension("GL_ARB_invalidate_subdata"))) {
+ fDiscardRenderTargetSupport = true;
+ fInvalidateFBType = kInvalidate_InvalidateFBType;
+ } else if (ctxInfo.hasExtension("GL_EXT_discard_framebuffer")) {
+ fDiscardRenderTargetSupport = true;
+ fInvalidateFBType = kDiscard_InvalidateFBType;
+ }
+
+ if (kARM_GrGLVendor == ctxInfo.vendor() || kImagination_GrGLVendor == ctxInfo.vendor()) {
+ fFullClearIsFree = true;
+ }
+
+ if (kGL_GrGLStandard == standard) {
+ fVertexArrayObjectSupport = version >= GR_GL_VER(3, 0) ||
+ ctxInfo.hasExtension("GL_ARB_vertex_array_object") ||
+ ctxInfo.hasExtension("GL_APPLE_vertex_array_object");
+ } else {
+ fVertexArrayObjectSupport = version >= GR_GL_VER(3, 0) ||
+ ctxInfo.hasExtension("GL_OES_vertex_array_object");
+ }
+
+ if (kGL_GrGLStandard == standard) {
+ fDirectStateAccessSupport = ctxInfo.hasExtension("GL_EXT_direct_state_access");
+ } else {
+ fDirectStateAccessSupport = false;
+ }
+
+ if (kGL_GrGLStandard == standard && version >= GR_GL_VER(4,3)) {
+ fDebugSupport = true;
+ } else {
+ fDebugSupport = ctxInfo.hasExtension("GL_KHR_debug");
+ }
+
+ if (kGL_GrGLStandard == standard) {
+ fES2CompatibilitySupport = ctxInfo.hasExtension("GL_ARB_ES2_compatibility");
+ }
+ else {
+ fES2CompatibilitySupport = true;
+ }
+
+ if (kGL_GrGLStandard == standard) {
+ fMultisampleDisableSupport = true;
+ } else {
+ fMultisampleDisableSupport = ctxInfo.hasExtension("GL_EXT_multisample_compatibility");
+ }
+
+ if (kGL_GrGLStandard == standard) {
+ if (version >= GR_GL_VER(3, 0)) {
+ fBindFragDataLocationSupport = true;
+ }
+ } else {
+ if (version >= GR_GL_VER(3, 0) && ctxInfo.hasExtension("GL_EXT_blend_func_extended")) {
+ fBindFragDataLocationSupport = true;
+ }
+ }
+
+ fBindUniformLocationSupport = ctxInfo.hasExtension("GL_CHROMIUM_bind_uniform_location");
+
+ if (kGL_GrGLStandard == standard) {
+ if (version >= GR_GL_VER(3, 1) || ctxInfo.hasExtension("GL_ARB_texture_rectangle")) {
+ // We also require textureSize() support for rectangle 2D samplers which was added in
+ // GLSL 1.40.
+ if (ctxInfo.glslGeneration() >= k140_GrGLSLGeneration) {
+ fRectangleTextureSupport = true;
+ }
+ }
+ } else {
+ // Command buffer exposes this in GL ES context for Chromium reasons,
+ // but it should not be used. Also, at the time of writing command buffer
+ // lacks TexImage2D support and ANGLE lacks GL ES 3.0 support.
+ }
+
+ if (kGL_GrGLStandard == standard) {
+ if (version >= GR_GL_VER(3,3) || ctxInfo.hasExtension("GL_ARB_texture_swizzle")) {
+ fTextureSwizzleSupport = true;
+ }
+ } else {
+ if (version >= GR_GL_VER(3,0)) {
+ fTextureSwizzleSupport = true;
+ }
+ }
+
+ if (kGL_GrGLStandard == standard) {
+ fMipMapLevelAndLodControlSupport = true;
+ } else if (kGLES_GrGLStandard == standard) {
+ if (version >= GR_GL_VER(3,0)) {
+ fMipMapLevelAndLodControlSupport = true;
+ }
+ }
+
+#ifdef SK_BUILD_FOR_WIN
+ // We're assuming that on Windows Chromium we're using ANGLE.
+ bool isANGLE = kANGLE_GrGLDriver == ctxInfo.driver() ||
+ kChromium_GrGLDriver == ctxInfo.driver();
+ // Angle has slow read/write pixel paths for 32bit RGBA (but fast for BGRA).
+ fRGBA8888PixelsOpsAreSlow = isANGLE;
+ // On DX9 ANGLE reading a partial FBO is slow. TODO: Check whether this is still true and
+ // check DX11 ANGLE.
+ fPartialFBOReadIsSlow = isANGLE;
+#endif
+
+ bool isMESA = kMesa_GrGLDriver == ctxInfo.driver();
+ bool isMAC = false;
+#ifdef SK_BUILD_FOR_MAC
+ isMAC = true;
+#endif
+
+ // Both mesa and mac have reduced performance if reading back an RGBA framebuffer as BGRA or
+ // vis-versa.
+ fRGBAToBGRAReadbackConversionsAreSlow = isMESA || isMAC;
+
+ /**************************************************************************
+ * GrShaderCaps fields
+ **************************************************************************/
+
+ // This must be called after fCoreProfile is set on the GrGLCaps
+ this->initGLSL(ctxInfo);
+ GrGLSLCaps* glslCaps = static_cast<GrGLSLCaps*>(fShaderCaps.get());
+
+ glslCaps->fPathRenderingSupport = this->hasPathRenderingSupport(ctxInfo, gli);
+
+ // For now these two are equivalent but we could have dst read in shader via some other method.
+ // Before setting this, initGLSL() must have been called.
+ glslCaps->fDstReadInShaderSupport = glslCaps->fFBFetchSupport;
+
+ // Enable supported shader-related caps
+ if (kGL_GrGLStandard == standard) {
+ glslCaps->fDualSourceBlendingSupport = (ctxInfo.version() >= GR_GL_VER(3, 3) ||
+ ctxInfo.hasExtension("GL_ARB_blend_func_extended")) &&
+ GrGLSLSupportsNamedFragmentShaderOutputs(ctxInfo.glslGeneration());
+ glslCaps->fShaderDerivativeSupport = true;
+ // we don't support GL_ARB_geometry_shader4, just GL 3.2+ GS
+ glslCaps->fGeometryShaderSupport = ctxInfo.version() >= GR_GL_VER(3, 2) &&
+ ctxInfo.glslGeneration() >= k150_GrGLSLGeneration;
+ glslCaps->fIntegerSupport = ctxInfo.version() >= GR_GL_VER(3, 0) &&
+ ctxInfo.glslGeneration() >= k130_GrGLSLGeneration;
+ }
+ else {
+ glslCaps->fDualSourceBlendingSupport = ctxInfo.hasExtension("GL_EXT_blend_func_extended");
+
+ glslCaps->fShaderDerivativeSupport = ctxInfo.version() >= GR_GL_VER(3, 0) ||
+ ctxInfo.hasExtension("GL_OES_standard_derivatives");
+
+ glslCaps->fIntegerSupport = ctxInfo.version() >= GR_GL_VER(3, 0) &&
+ ctxInfo.glslGeneration() >= k330_GrGLSLGeneration; // We use this value for GLSL ES 3.0.
+ }
+
+ if (ctxInfo.hasExtension("GL_EXT_shader_pixel_local_storage")) {
+ #define GL_MAX_SHADER_PIXEL_LOCAL_STORAGE_FAST_SIZE_EXT 0x8F63
+ GR_GL_GetIntegerv(gli, GL_MAX_SHADER_PIXEL_LOCAL_STORAGE_FAST_SIZE_EXT,
+ &glslCaps->fPixelLocalStorageSize);
+ glslCaps->fPLSPathRenderingSupport = glslCaps->fFBFetchSupport;
+ }
+ else {
+ glslCaps->fPixelLocalStorageSize = 0;
+ glslCaps->fPLSPathRenderingSupport = false;
+ }
+
+ // Protect ourselves against tracking huge amounts of texture state.
+ static const uint8_t kMaxSaneSamplers = 32;
+ GrGLint maxSamplers;
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS, &maxSamplers);
+ glslCaps->fMaxVertexSamplers = SkTMin<GrGLint>(kMaxSaneSamplers, maxSamplers);
+ if (glslCaps->fGeometryShaderSupport) {
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS, &maxSamplers);
+ glslCaps->fMaxGeometrySamplers = SkTMin<GrGLint>(kMaxSaneSamplers, maxSamplers);
+ }
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_TEXTURE_IMAGE_UNITS, &maxSamplers);
+ glslCaps->fMaxFragmentSamplers = SkTMin<GrGLint>(kMaxSaneSamplers, maxSamplers);
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, &maxSamplers);
+ glslCaps->fMaxCombinedSamplers = SkTMin<GrGLint>(kMaxSaneSamplers, maxSamplers);
+
+ /**************************************************************************
+ * GrCaps fields
+ **************************************************************************/
+
+ // We need dual source blending and the ability to disable multisample in order to support mixed
+ // samples in every corner case.
+ if (fMultisampleDisableSupport &&
+ glslCaps->dualSourceBlendingSupport() &&
+ fShaderCaps->pathRenderingSupport()) {
+ fUsesMixedSamples = ctxInfo.hasExtension("GL_NV_framebuffer_mixed_samples") ||
+ ctxInfo.hasExtension("GL_CHROMIUM_framebuffer_mixed_samples");
+ // Workaround NVIDIA bug related to glInvalidateFramebuffer and mixed samples.
+ if (fUsesMixedSamples && (kNVIDIA_GrGLDriver == ctxInfo.driver() ||
+ kChromium_GrGLDriver == ctxInfo.driver())) {
+ fDiscardRenderTargetSupport = false;
+ fInvalidateFBType = kNone_InvalidateFBType;
+ }
+ }
+
+ // SGX and Mali GPUs that are based on a tiled-deferred architecture that have trouble with
+ // frequently changing VBOs. We've measured a performance increase using non-VBO vertex
+ // data for dynamic content on these GPUs. Perhaps we should read the renderer string and
+ // limit this decision to specific GPU families rather than basing it on the vendor alone.
+ if (!GR_GL_MUST_USE_VBO &&
+ !fIsCoreProfile &&
+ (kARM_GrGLVendor == ctxInfo.vendor() ||
+ kImagination_GrGLVendor == ctxInfo.vendor() ||
+ kQualcomm_GrGLVendor == ctxInfo.vendor())) {
+ fPreferClientSideDynamicBuffers = true;
+ }
+
+ // fUsesMixedSamples must be set before calling initFSAASupport.
+ this->initFSAASupport(ctxInfo, gli);
+ this->initBlendEqationSupport(ctxInfo);
+ this->initStencilFormats(ctxInfo);
+
+ if (kGL_GrGLStandard == standard) {
+ // we could also look for GL_ATI_separate_stencil extension or
+ // GL_EXT_stencil_two_side but they use different function signatures
+ // than GL2.0+ (and than each other).
+ fTwoSidedStencilSupport = (ctxInfo.version() >= GR_GL_VER(2,0));
+ // supported on GL 1.4 and higher or by extension
+ fStencilWrapOpsSupport = (ctxInfo.version() >= GR_GL_VER(1,4)) ||
+ ctxInfo.hasExtension("GL_EXT_stencil_wrap");
+ } else {
+ // ES 2 has two sided stencil and stencil wrap
+ fTwoSidedStencilSupport = true;
+ fStencilWrapOpsSupport = true;
+ }
+
+ if (kGL_GrGLStandard == standard) {
+ fMapBufferFlags = kCanMap_MapFlag; // we require VBO support and the desktop VBO
+ // extension includes glMapBuffer.
+ if (version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_ARB_map_buffer_range")) {
+ fMapBufferFlags |= kSubset_MapFlag;
+ fMapBufferType = kMapBufferRange_MapBufferType;
+ } else {
+ fMapBufferType = kMapBuffer_MapBufferType;
+ }
+ } else {
+ // Unextended GLES2 doesn't have any buffer mapping.
+ fMapBufferFlags = kNone_MapBufferType;
+ if (ctxInfo.hasExtension("GL_CHROMIUM_map_sub")) {
+ fMapBufferFlags = kCanMap_MapFlag | kSubset_MapFlag;
+ fMapBufferType = kChromium_MapBufferType;
+ } else if (version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_EXT_map_buffer_range")) {
+ fMapBufferFlags = kCanMap_MapFlag | kSubset_MapFlag;
+ fMapBufferType = kMapBufferRange_MapBufferType;
+ } else if (ctxInfo.hasExtension("GL_OES_mapbuffer")) {
+ fMapBufferFlags = kCanMap_MapFlag;
+ fMapBufferType = kMapBuffer_MapBufferType;
+ }
+ }
+
+ if (kGL_GrGLStandard == standard) {
+ if (version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_ARB_pixel_buffer_object")) {
+ fTransferBufferType = kPBO_TransferBufferType;
+ }
+ } else {
+ if (version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_NV_pixel_buffer_object")) {
+ fTransferBufferType = kPBO_TransferBufferType;
+ } else if (ctxInfo.hasExtension("GL_CHROMIUM_pixel_transfer_buffer_object")) {
+ fTransferBufferType = kChromium_TransferBufferType;
+ }
+ }
+
+ // On many GPUs, map memory is very expensive, so we effectively disable it here by setting the
+ // threshold to the maximum unless the client gives us a hint that map memory is cheap.
+ if (fBufferMapThreshold < 0) {
+ // We think mapping on Chromium will be cheaper once we know ahead of time how much space
+ // we will use for all GrBatchs. Right now we might wind up mapping a large buffer and using
+ // a small subset.
+#if 0
+ fBufferMapThreshold = kChromium_GrGLDriver == ctxInfo.driver() ? 0 : SK_MaxS32;
+#else
+ fBufferMapThreshold = SK_MaxS32;
+#endif
+ }
+
+ if (kGL_GrGLStandard == standard) {
+ SkASSERT(ctxInfo.version() >= GR_GL_VER(2,0) ||
+ ctxInfo.hasExtension("GL_ARB_texture_non_power_of_two"));
+ fNPOTTextureTileSupport = true;
+ fMipMapSupport = true;
+ } else {
+ // Unextended ES2 supports NPOT textures with clamp_to_edge and non-mip filters only
+ // ES3 has no limitations.
+ fNPOTTextureTileSupport = ctxInfo.version() >= GR_GL_VER(3,0) ||
+ ctxInfo.hasExtension("GL_OES_texture_npot");
+ // ES2 supports MIP mapping for POT textures but our caps don't allow for limited MIP
+ // support. The OES extension or ES 3.0 allow for MIPS on NPOT textures. So, apparently,
+ // does the undocumented GL_IMG_texture_npot extension. This extension does not seem to
+ // to alllow arbitrary wrap modes, however.
+ fMipMapSupport = fNPOTTextureTileSupport || ctxInfo.hasExtension("GL_IMG_texture_npot");
+ }
+
+ // Using MIPs on this GPU seems to be a source of trouble.
+ if (kPowerVR54x_GrGLRenderer == ctxInfo.renderer()) {
+ fMipMapSupport = false;
+ }
+
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_TEXTURE_SIZE, &fMaxTextureSize);
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_RENDERBUFFER_SIZE, &fMaxRenderTargetSize);
+ // Our render targets are always created with textures as the color
+ // attachment, hence this min:
+ fMaxRenderTargetSize = SkTMin(fMaxTextureSize, fMaxRenderTargetSize);
+
+ fGpuTracingSupport = ctxInfo.hasExtension("GL_EXT_debug_marker");
+
+ // Disable scratch texture reuse on Mali and Adreno devices
+ fReuseScratchTextures = kARM_GrGLVendor != ctxInfo.vendor();
+
+#if 0
+ fReuseScratchBuffers = kARM_GrGLVendor != ctxInfo.vendor() &&
+ kQualcomm_GrGLVendor != ctxInfo.vendor();
+#endif
+
+ // initFSAASupport() must have been called before this point
+ if (GrGLCaps::kES_IMG_MsToTexture_MSFBOType == fMSFBOType) {
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_SAMPLES_IMG, &fMaxStencilSampleCount);
+ } else if (GrGLCaps::kNone_MSFBOType != fMSFBOType) {
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_SAMPLES, &fMaxStencilSampleCount);
+ }
+ // We only have a use for raster multisample if there is coverage modulation from mixed samples.
+ if (fUsesMixedSamples && ctxInfo.hasExtension("GL_EXT_raster_multisample")) {
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_RASTER_SAMPLES, &fMaxRasterSamples);
+ // This is to guard against platforms that may not support as many samples for
+ // glRasterSamples as they do for framebuffers.
+ fMaxStencilSampleCount = SkTMin(fMaxStencilSampleCount, fMaxRasterSamples);
+ }
+ fMaxColorSampleCount = fMaxStencilSampleCount;
+
+ if (ctxInfo.hasExtension("GL_EXT_window_rectangles")) {
+ GR_GL_GetIntegerv(gli, GR_GL_MAX_WINDOW_RECTANGLES, &fMaxWindowRectangles);
+ }
+
+ if (kPowerVR54x_GrGLRenderer == ctxInfo.renderer() ||
+ kPowerVRRogue_GrGLRenderer == ctxInfo.renderer() ||
+ kAdreno3xx_GrGLRenderer == ctxInfo.renderer()) {
+ fUseDrawInsteadOfClear = true;
+ }
+
+ if (kAdreno4xx_GrGLRenderer == ctxInfo.renderer()) {
+ fUseDrawInsteadOfPartialRenderTargetWrite = true;
+ }
+
+ // Texture uploads sometimes seem to be ignored to textures bound to FBOS on Tegra3.
+ if (kTegra3_GrGLRenderer == ctxInfo.renderer()) {
+ fUseDrawInsteadOfPartialRenderTargetWrite = true;
+ fUseDrawInsteadOfAllRenderTargetWrites = true;
+ }
+
+#ifdef SK_BUILD_FOR_WIN
+ // On ANGLE deferring flushes can lead to GPU starvation
+ fPreferVRAMUseOverFlushes = !isANGLE;
+#endif
+
+ if (kChromium_GrGLDriver == ctxInfo.driver()) {
+ fMustClearUploadedBufferData = true;
+ }
+
+ if (kGL_GrGLStandard == standard) {
+ // ARB allows mixed size FBO attachments, EXT does not.
+ if (ctxInfo.version() >= GR_GL_VER(3, 0) ||
+ ctxInfo.hasExtension("GL_ARB_framebuffer_object")) {
+ fOversizedStencilSupport = true;
+ } else {
+ SkASSERT(ctxInfo.hasExtension("GL_EXT_framebuffer_object"));
+ }
+ } else {
+ // ES 3.0 supports mixed size FBO attachments, 2.0 does not.
+ fOversizedStencilSupport = ctxInfo.version() >= GR_GL_VER(3, 0);
+ }
+
+ if (kGL_GrGLStandard == standard) {
+ // 3.1 has draw_instanced but not instanced_arrays, for the time being we only care about
+ // instanced arrays, but we could make this more granular if we wanted
+ fDrawInstancedSupport =
+ version >= GR_GL_VER(3, 2) ||
+ (ctxInfo.hasExtension("GL_ARB_draw_instanced") &&
+ ctxInfo.hasExtension("GL_ARB_instanced_arrays"));
+ } else {
+ fDrawInstancedSupport =
+ version >= GR_GL_VER(3, 0) ||
+ (ctxInfo.hasExtension("GL_EXT_draw_instanced") &&
+ ctxInfo.hasExtension("GL_EXT_instanced_arrays"));
+ }
+
+ if (kGL_GrGLStandard == standard) {
+ fDrawIndirectSupport = version >= GR_GL_VER(4,0) ||
+ ctxInfo.hasExtension("GL_ARB_draw_indirect");
+ fBaseInstanceSupport = version >= GR_GL_VER(4,2);
+ fMultiDrawIndirectSupport = version >= GR_GL_VER(4,3) ||
+ (fDrawIndirectSupport &&
+ !fBaseInstanceSupport && // The ARB extension has no base inst.
+ ctxInfo.hasExtension("GL_ARB_multi_draw_indirect"));
+ fDrawRangeElementsSupport = version >= GR_GL_VER(2,0);
+ } else {
+ fDrawIndirectSupport = version >= GR_GL_VER(3,1);
+ fMultiDrawIndirectSupport = fDrawIndirectSupport &&
+ ctxInfo.hasExtension("GL_EXT_multi_draw_indirect");
+ fBaseInstanceSupport = fDrawIndirectSupport &&
+ ctxInfo.hasExtension("GL_EXT_base_instance");
+ fDrawRangeElementsSupport = version >= GR_GL_VER(3,0);
+ }
+
+ this->initShaderPrecisionTable(ctxInfo, gli, glslCaps);
+
+ if (contextOptions.fUseShaderSwizzling) {
+ fTextureSwizzleSupport = false;
+ }
+
+ if (kGL_GrGLStandard == standard) {
+ if ((version >= GR_GL_VER(4, 0) || ctxInfo.hasExtension("GL_ARB_sample_shading")) &&
+ ctxInfo.vendor() != kIntel_GrGLVendor) {
+ fSampleShadingSupport = true;
+ }
+ } else if (ctxInfo.hasExtension("GL_OES_sample_shading")) {
+ fSampleShadingSupport = true;
+ }
+
+ // TODO: support CHROMIUM_sync_point and maybe KHR_fence_sync
+ if (kGL_GrGLStandard == standard) {
+ if (version >= GR_GL_VER(3, 2) || ctxInfo.hasExtension("GL_ARB_sync")) {
+ fFenceSyncSupport = true;
+ }
+ } else if (version >= GR_GL_VER(3, 0)) {
+ fFenceSyncSupport = true;
+ }
+
+ // We support manual mip-map generation (via iterative downsampling draw calls). This fixes
+ // bugs on some cards/drivers that produce incorrect mip-maps for sRGB textures when using
+ // glGenerateMipmap. Our implementation requires mip-level sampling control. Additionally,
+ // it can be much slower (especially on mobile GPUs), so we opt-in only when necessary:
+ if (fMipMapLevelAndLodControlSupport &&
+ (contextOptions.fDoManualMipmapping ||
+ (kIntel_GrGLVendor == ctxInfo.vendor()) ||
+ (kNVIDIA_GrGLDriver == ctxInfo.driver() && isMAC) ||
+ (kATI_GrGLVendor == ctxInfo.vendor()))) {
+ fDoManualMipmapping = true;
+ }
+
+ // Requires fTextureRedSupport, fTextureSwizzleSupport, msaa support, ES compatibility have
+ // already been detected.
+ this->initConfigTable(ctxInfo, gli, glslCaps);
+
+ this->applyOptionsOverrides(contextOptions);
+ glslCaps->applyOptionsOverrides(contextOptions);
+}
+
+const char* get_glsl_version_decl_string(GrGLStandard standard, GrGLSLGeneration generation,
+ bool isCoreProfile) {
+ switch (generation) {
+ case k110_GrGLSLGeneration:
+ if (kGLES_GrGLStandard == standard) {
+ // ES2s shader language is based on version 1.20 but is version
+ // 1.00 of the ES language.
+ return "#version 100\n";
+ } else {
+ SkASSERT(kGL_GrGLStandard == standard);
+ return "#version 110\n";
+ }
+ case k130_GrGLSLGeneration:
+ SkASSERT(kGL_GrGLStandard == standard);
+ return "#version 130\n";
+ case k140_GrGLSLGeneration:
+ SkASSERT(kGL_GrGLStandard == standard);
+ return "#version 140\n";
+ case k150_GrGLSLGeneration:
+ SkASSERT(kGL_GrGLStandard == standard);
+ if (isCoreProfile) {
+ return "#version 150\n";
+ } else {
+ return "#version 150 compatibility\n";
+ }
+ case k330_GrGLSLGeneration:
+ if (kGLES_GrGLStandard == standard) {
+ return "#version 300 es\n";
+ } else {
+ SkASSERT(kGL_GrGLStandard == standard);
+ if (isCoreProfile) {
+ return "#version 330\n";
+ } else {
+ return "#version 330 compatibility\n";
+ }
+ }
+ case k400_GrGLSLGeneration:
+ SkASSERT(kGL_GrGLStandard == standard);
+ if (isCoreProfile) {
+ return "#version 400\n";
+ } else {
+ return "#version 400 compatibility\n";
+ }
+ case k310es_GrGLSLGeneration:
+ SkASSERT(kGLES_GrGLStandard == standard);
+ return "#version 310 es\n";
+ case k320es_GrGLSLGeneration:
+ SkASSERT(kGLES_GrGLStandard == standard);
+ return "#version 320 es\n";
+ }
+ return "<no version>";
+}
+
+void GrGLCaps::initGLSL(const GrGLContextInfo& ctxInfo) {
+ GrGLStandard standard = ctxInfo.standard();
+ GrGLVersion version = ctxInfo.version();
+
+ /**************************************************************************
+ * Caps specific to GrGLSLCaps
+ **************************************************************************/
+
+ GrGLSLCaps* glslCaps = static_cast<GrGLSLCaps*>(fShaderCaps.get());
+ glslCaps->fGLSLGeneration = ctxInfo.glslGeneration();
+ if (kGLES_GrGLStandard == standard) {
+ if (ctxInfo.hasExtension("GL_EXT_shader_framebuffer_fetch")) {
+ glslCaps->fFBFetchNeedsCustomOutput = (version >= GR_GL_VER(3, 0));
+ glslCaps->fFBFetchSupport = true;
+ glslCaps->fFBFetchColorName = "gl_LastFragData[0]";
+ glslCaps->fFBFetchExtensionString = "GL_EXT_shader_framebuffer_fetch";
+ }
+ else if (ctxInfo.hasExtension("GL_NV_shader_framebuffer_fetch")) {
+ // Actually, we haven't seen an ES3.0 device with this extension yet, so we don't know
+ glslCaps->fFBFetchNeedsCustomOutput = false;
+ glslCaps->fFBFetchSupport = true;
+ glslCaps->fFBFetchColorName = "gl_LastFragData[0]";
+ glslCaps->fFBFetchExtensionString = "GL_NV_shader_framebuffer_fetch";
+ }
+ else if (ctxInfo.hasExtension("GL_ARM_shader_framebuffer_fetch")) {
+ // The arm extension also requires an additional flag which we will set onResetContext
+ glslCaps->fFBFetchNeedsCustomOutput = false;
+ glslCaps->fFBFetchSupport = true;
+ glslCaps->fFBFetchColorName = "gl_LastFragColorARM";
+ glslCaps->fFBFetchExtensionString = "GL_ARM_shader_framebuffer_fetch";
+ }
+ glslCaps->fUsesPrecisionModifiers = true;
+ }
+
+ // Currently the extension is advertised but fb fetch is broken on 500 series Adrenos like the
+ // Galaxy S7.
+ // TODO: Once this is fixed we can update the check here to look at a driver version number too.
+ if (kAdreno5xx_GrGLRenderer == ctxInfo.renderer()) {
+ glslCaps->fFBFetchSupport = false;
+ }
+
+ glslCaps->fBindlessTextureSupport = ctxInfo.hasExtension("GL_NV_bindless_texture");
+
+ if (kGL_GrGLStandard == standard) {
+ glslCaps->fFlatInterpolationSupport = ctxInfo.glslGeneration() >= k130_GrGLSLGeneration;
+ } else {
+ glslCaps->fFlatInterpolationSupport =
+ ctxInfo.glslGeneration() >= k330_GrGLSLGeneration; // This is the value for GLSL ES 3.0.
+ }
+
+ if (kGL_GrGLStandard == standard) {
+ glslCaps->fNoPerspectiveInterpolationSupport =
+ ctxInfo.glslGeneration() >= k130_GrGLSLGeneration;
+ } else {
+ if (ctxInfo.hasExtension("GL_NV_shader_noperspective_interpolation")) {
+ glslCaps->fNoPerspectiveInterpolationSupport = true;
+ glslCaps->fNoPerspectiveInterpolationExtensionString =
+ "GL_NV_shader_noperspective_interpolation";
+ }
+ }
+
+ if (kGL_GrGLStandard == standard) {
+ glslCaps->fMultisampleInterpolationSupport =
+ ctxInfo.glslGeneration() >= k400_GrGLSLGeneration;
+ } else {
+ if (ctxInfo.glslGeneration() >= k320es_GrGLSLGeneration) {
+ glslCaps->fMultisampleInterpolationSupport = true;
+ } else if (ctxInfo.hasExtension("GL_OES_shader_multisample_interpolation")) {
+ glslCaps->fMultisampleInterpolationSupport = true;
+ glslCaps->fMultisampleInterpolationExtensionString =
+ "GL_OES_shader_multisample_interpolation";
+ }
+ }
+
+ if (kGL_GrGLStandard == standard) {
+ glslCaps->fSampleVariablesSupport = ctxInfo.glslGeneration() >= k400_GrGLSLGeneration;
+ } else {
+ if (ctxInfo.glslGeneration() >= k320es_GrGLSLGeneration) {
+ glslCaps->fSampleVariablesSupport = true;
+ } else if (ctxInfo.hasExtension("GL_OES_sample_variables")) {
+ glslCaps->fSampleVariablesSupport = true;
+ glslCaps->fSampleVariablesExtensionString = "GL_OES_sample_variables";
+ }
+ }
+
+ if (glslCaps->fSampleVariablesSupport &&
+ ctxInfo.hasExtension("GL_NV_sample_mask_override_coverage")) {
+ // Pre-361 NVIDIA has a bug with NV_sample_mask_override_coverage.
+ glslCaps->fSampleMaskOverrideCoverageSupport =
+ kNVIDIA_GrGLDriver != ctxInfo.driver() ||
+ ctxInfo.driverVersion() >= GR_GL_DRIVER_VER(361,00);
+ }
+
+ // Adreno GPUs have a tendency to drop tiles when there is a divide-by-zero in a shader
+ glslCaps->fDropsTileOnZeroDivide = kQualcomm_GrGLVendor == ctxInfo.vendor();
+
+ // On the NexusS and GalaxyNexus, the use of 'any' causes the compilation error "Calls to any
+ // function that may require a gradient calculation inside a conditional block may return
+ // undefined results". This appears to be an issue with the 'any' call since even the simple
+ // "result=black; if (any()) result=white;" code fails to compile. This issue comes into play
+ // from our GrTextureDomain processor.
+ glslCaps->fCanUseAnyFunctionInShader = kImagination_GrGLVendor != ctxInfo.vendor();
+
+ glslCaps->fVersionDeclString = get_glsl_version_decl_string(standard, glslCaps->fGLSLGeneration,
+ fIsCoreProfile);
+
+ if (kGLES_GrGLStandard == standard && k110_GrGLSLGeneration == glslCaps->fGLSLGeneration) {
+ glslCaps->fShaderDerivativeExtensionString = "GL_OES_standard_derivatives";
+ }
+
+ // Frag Coords Convention support is not part of ES
+ // Known issue on at least some Intel platforms:
+ // http://code.google.com/p/skia/issues/detail?id=946
+ if (kIntel_GrGLVendor != ctxInfo.vendor() &&
+ kGLES_GrGLStandard != standard &&
+ (ctxInfo.glslGeneration() >= k150_GrGLSLGeneration ||
+ ctxInfo.hasExtension("GL_ARB_fragment_coord_conventions"))) {
+ glslCaps->fFragCoordConventionsExtensionString = "GL_ARB_fragment_coord_conventions";
+ }
+
+ if (kGLES_GrGLStandard == standard) {
+ glslCaps->fSecondaryOutputExtensionString = "GL_EXT_blend_func_extended";
+ }
+
+ if (ctxInfo.hasExtension("GL_OES_EGL_image_external")) {
+ if (ctxInfo.glslGeneration() == k110_GrGLSLGeneration) {
+ glslCaps->fExternalTextureSupport = true;
+ } else if (ctxInfo.hasExtension("GL_OES_EGL_image_external_essl3") ||
+ ctxInfo.hasExtension("OES_EGL_image_external_essl3")) {
+ // At least one driver has been found that has this extension without the "GL_" prefix.
+ glslCaps->fExternalTextureSupport = true;
+ }
+ }
+
+ if (glslCaps->fExternalTextureSupport) {
+ if (ctxInfo.glslGeneration() == k110_GrGLSLGeneration) {
+ glslCaps->fExternalTextureExtensionString = "GL_OES_EGL_image_external";
+ } else {
+ glslCaps->fExternalTextureExtensionString = "GL_OES_EGL_image_external_essl3";
+ }
+ }
+
+ if (kGL_GrGLStandard == standard) {
+ glslCaps->fTexelFetchSupport = ctxInfo.glslGeneration() >= k130_GrGLSLGeneration;
+ } else {
+ glslCaps->fTexelFetchSupport =
+ ctxInfo.glslGeneration() >= k330_GrGLSLGeneration; // We use this value for GLSL ES 3.0.
+ }
+
+ if (glslCaps->fTexelFetchSupport) {
+ if (kGL_GrGLStandard == standard) {
+ glslCaps->fTexelBufferSupport = ctxInfo.version() >= GR_GL_VER(3, 1) &&
+ ctxInfo.glslGeneration() >= k330_GrGLSLGeneration;
+ } else {
+ if (ctxInfo.version() >= GR_GL_VER(3, 2) &&
+ ctxInfo.glslGeneration() >= k320es_GrGLSLGeneration) {
+ glslCaps->fTexelBufferSupport = true;
+ } else if (ctxInfo.hasExtension("GL_OES_texture_buffer")) {
+ glslCaps->fTexelBufferSupport = true;
+ glslCaps->fTexelBufferExtensionString = "GL_OES_texture_buffer";
+ } else if (ctxInfo.hasExtension("GL_EXT_texture_buffer")) {
+ glslCaps->fTexelBufferSupport = true;
+ glslCaps->fTexelBufferExtensionString = "GL_EXT_texture_buffer";
+ }
+ }
+ }
+
+ // The Tegra3 compiler will sometimes never return if we have min(abs(x), 1.0), so we must do
+ // the abs first in a separate expression.
+ if (kTegra3_GrGLRenderer == ctxInfo.renderer()) {
+ glslCaps->fCanUseMinAndAbsTogether = false;
+ }
+
+ // On Intel GPU there is an issue where it reads the second argument to atan "- %s.x" as an int
+ // thus must us -1.0 * %s.x to work correctly
+ if (kIntel_GrGLVendor == ctxInfo.vendor()) {
+ glslCaps->fMustForceNegatedAtanParamToFloat = true;
+ }
+
+ // On Adreno devices with framebuffer fetch support, there is a bug where they always return
+ // the original dst color when reading the outColor even after being written to. By using a
+ // local outColor we can work around this bug.
+ if (glslCaps->fFBFetchSupport && kQualcomm_GrGLVendor == ctxInfo.vendor()) {
+ glslCaps->fRequiresLocalOutputColorForFBFetch = true;
+ }
+}
+
+bool GrGLCaps::hasPathRenderingSupport(const GrGLContextInfo& ctxInfo, const GrGLInterface* gli) {
+ bool hasChromiumPathRendering = ctxInfo.hasExtension("GL_CHROMIUM_path_rendering");
+
+ if (!(ctxInfo.hasExtension("GL_NV_path_rendering") || hasChromiumPathRendering)) {
+ return false;
+ }
+
+ if (kGL_GrGLStandard == ctxInfo.standard()) {
+ if (ctxInfo.version() < GR_GL_VER(4, 3) &&
+ !ctxInfo.hasExtension("GL_ARB_program_interface_query")) {
+ return false;
+ }
+ } else {
+ if (!hasChromiumPathRendering &&
+ ctxInfo.version() < GR_GL_VER(3, 1)) {
+ return false;
+ }
+ }
+ // We only support v1.3+ of GL_NV_path_rendering which allows us to
+ // set individual fragment inputs with ProgramPathFragmentInputGen. The API
+ // additions are detected by checking the existence of the function.
+ // We also use *Then* functions that not all drivers might have. Check
+ // them for consistency.
+ if (!gli->fFunctions.fStencilThenCoverFillPath ||
+ !gli->fFunctions.fStencilThenCoverStrokePath ||
+ !gli->fFunctions.fStencilThenCoverFillPathInstanced ||
+ !gli->fFunctions.fStencilThenCoverStrokePathInstanced ||
+ !gli->fFunctions.fProgramPathFragmentInputGen) {
+ return false;
+ }
+ return true;
+}
+
+bool GrGLCaps::readPixelsSupported(GrPixelConfig rtConfig,
+ GrPixelConfig readConfig,
+ std::function<void (GrGLenum, GrGLint*)> getIntegerv,
+ std::function<bool ()> bindRenderTarget) const {
+ // If it's not possible to even have a render target of rtConfig then read pixels is
+ // not supported regardless of readConfig.
+ if (!this->isConfigRenderable(rtConfig, false)) {
+ return false;
+ }
+
+ GrGLenum readFormat;
+ GrGLenum readType;
+ if (!this->getReadPixelsFormat(rtConfig, readConfig, &readFormat, &readType)) {
+ return false;
+ }
+
+ if (kGL_GrGLStandard == fStandard) {
+ // Some OpenGL implementations allow GL_ALPHA as a format to glReadPixels. However,
+ // the manual (https://www.opengl.org/sdk/docs/man/) says only these formats are allowed:
+ // GL_STENCIL_INDEX, GL_DEPTH_COMPONENT, GL_DEPTH_STENCIL, GL_RED, GL_GREEN, GL_BLUE,
+ // GL_RGB, GL_BGR, GL_RGBA, and GL_BGRA. We check for the subset that we would use.
+ if (readFormat != GR_GL_RED && readFormat != GR_GL_RGB && readFormat != GR_GL_RGBA &&
+ readFormat != GR_GL_BGRA) {
+ return false;
+ }
+ // There is also a set of allowed types, but all the types we use are in the set:
+ // GL_UNSIGNED_BYTE, GL_BYTE, GL_UNSIGNED_SHORT, GL_SHORT, GL_UNSIGNED_INT, GL_INT,
+ // GL_HALF_FLOAT, GL_FLOAT, GL_UNSIGNED_BYTE_3_3_2, GL_UNSIGNED_BYTE_2_3_3_REV,
+ // GL_UNSIGNED_SHORT_5_6_5, GL_UNSIGNED_SHORT_5_6_5_REV, GL_UNSIGNED_SHORT_4_4_4_4,
+ // GL_UNSIGNED_SHORT_4_4_4_4_REV, GL_UNSIGNED_SHORT_5_5_5_1, GL_UNSIGNED_SHORT_1_5_5_5_REV,
+ // GL_UNSIGNED_INT_8_8_8_8, GL_UNSIGNED_INT_8_8_8_8_REV,GL_UNSIGNED_INT_10_10_10_2,
+ // GL_UNSIGNED_INT_2_10_10_10_REV, GL_UNSIGNED_INT_24_8, GL_UNSIGNED_INT_10F_11F_11F_REV,
+ // GL_UNSIGNED_INT_5_9_9_9_REV, or GL_FLOAT_32_UNSIGNED_INT_24_8_REV.
+ return true;
+ }
+
+ // See Section 16.1.2 in the ES 3.2 specification.
+
+ if (kNormalizedFixedPoint_FormatType == fConfigTable[rtConfig].fFormatType) {
+ if (GR_GL_RGBA == readFormat && GR_GL_UNSIGNED_BYTE == readType) {
+ return true;
+ }
+ } else {
+ SkASSERT(kFloat_FormatType == fConfigTable[rtConfig].fFormatType);
+ if (GR_GL_RGBA == readFormat && GR_GL_FLOAT == readType) {
+ return true;
+ }
+ }
+
+ if (0 == fConfigTable[rtConfig].fSecondReadPixelsFormat.fFormat) {
+ ReadPixelsFormat* rpFormat =
+ const_cast<ReadPixelsFormat*>(&fConfigTable[rtConfig].fSecondReadPixelsFormat);
+ GrGLint format = 0, type = 0;
+ if (!bindRenderTarget()) {
+ return false;
+ }
+ getIntegerv(GR_GL_IMPLEMENTATION_COLOR_READ_FORMAT, &format);
+ getIntegerv(GR_GL_IMPLEMENTATION_COLOR_READ_TYPE, &type);
+ rpFormat->fFormat = format;
+ rpFormat->fType = type;
+ }
+
+ return fConfigTable[rtConfig].fSecondReadPixelsFormat.fFormat == readFormat &&
+ fConfigTable[rtConfig].fSecondReadPixelsFormat.fType == readType;
+}
+
+void GrGLCaps::initFSAASupport(const GrGLContextInfo& ctxInfo, const GrGLInterface* gli) {
+
+ fMSFBOType = kNone_MSFBOType;
+ if (kGL_GrGLStandard != ctxInfo.standard()) {
+ // We prefer the EXT/IMG extension over ES3 MSAA because we've observed
+ // ES3 driver bugs on at least one device with a tiled GPU (N10).
+ if (ctxInfo.hasExtension("GL_EXT_multisampled_render_to_texture")) {
+ fMSFBOType = kES_EXT_MsToTexture_MSFBOType;
+ } else if (ctxInfo.hasExtension("GL_IMG_multisampled_render_to_texture")) {
+ fMSFBOType = kES_IMG_MsToTexture_MSFBOType;
+ } else if (fUsesMixedSamples) {
+ fMSFBOType = kMixedSamples_MSFBOType;
+ } else if (ctxInfo.version() >= GR_GL_VER(3,0)) {
+ fMSFBOType = GrGLCaps::kES_3_0_MSFBOType;
+ } else if (ctxInfo.hasExtension("GL_CHROMIUM_framebuffer_multisample")) {
+ // chrome's extension is equivalent to the EXT msaa
+ // and fbo_blit extensions.
+ fMSFBOType = kDesktop_EXT_MSFBOType;
+ } else if (ctxInfo.hasExtension("GL_APPLE_framebuffer_multisample")) {
+ fMSFBOType = kES_Apple_MSFBOType;
+ }
+
+ // Above determined the preferred MSAA approach, now decide whether glBlitFramebuffer
+ // is available.
+ if (ctxInfo.version() >= GR_GL_VER(3, 0)) {
+ fBlitFramebufferSupport = kFull_BlitFramebufferSupport;
+ } else if (ctxInfo.hasExtension("GL_CHROMIUM_framebuffer_multisample")) {
+ // The CHROMIUM extension uses the ANGLE version of glBlitFramebuffer and includes its
+ // limitations.
+ fBlitFramebufferSupport = kNoScalingNoMirroring_BlitFramebufferSupport;
+ }
+ } else {
+ if (fUsesMixedSamples) {
+ fMSFBOType = kMixedSamples_MSFBOType;
+ fBlitFramebufferSupport = kFull_BlitFramebufferSupport;
+ } else if ((ctxInfo.version() >= GR_GL_VER(3,0)) ||
+ ctxInfo.hasExtension("GL_ARB_framebuffer_object")) {
+ fMSFBOType = GrGLCaps::kDesktop_ARB_MSFBOType;
+ fBlitFramebufferSupport = kFull_BlitFramebufferSupport;
+ } else if (ctxInfo.hasExtension("GL_EXT_framebuffer_multisample") &&
+ ctxInfo.hasExtension("GL_EXT_framebuffer_blit")) {
+ fMSFBOType = GrGLCaps::kDesktop_EXT_MSFBOType;
+ fBlitFramebufferSupport = kFull_BlitFramebufferSupport;
+ }
+ }
+}
+
+void GrGLCaps::initBlendEqationSupport(const GrGLContextInfo& ctxInfo) {
+ GrGLSLCaps* glslCaps = static_cast<GrGLSLCaps*>(fShaderCaps.get());
+
+ // Disabling advanced blend on various platforms with major known issues. We also block Chrome
+ // for now until its own blacklists can be updated.
+ if (kAdreno4xx_GrGLRenderer == ctxInfo.renderer() ||
+ kIntel_GrGLDriver == ctxInfo.driver() ||
+ kChromium_GrGLDriver == ctxInfo.driver()) {
+ return;
+ }
+
+ if (ctxInfo.hasExtension("GL_NV_blend_equation_advanced_coherent")) {
+ fBlendEquationSupport = kAdvancedCoherent_BlendEquationSupport;
+ glslCaps->fAdvBlendEqInteraction = GrGLSLCaps::kAutomatic_AdvBlendEqInteraction;
+ } else if (ctxInfo.hasExtension("GL_KHR_blend_equation_advanced_coherent")) {
+ fBlendEquationSupport = kAdvancedCoherent_BlendEquationSupport;
+ glslCaps->fAdvBlendEqInteraction = GrGLSLCaps::kGeneralEnable_AdvBlendEqInteraction;
+ } else if (kNVIDIA_GrGLDriver == ctxInfo.driver() &&
+ ctxInfo.driverVersion() < GR_GL_DRIVER_VER(337,00)) {
+ // Non-coherent advanced blend has an issue on NVIDIA pre 337.00.
+ return;
+ } else if (ctxInfo.hasExtension("GL_NV_blend_equation_advanced")) {
+ fBlendEquationSupport = kAdvanced_BlendEquationSupport;
+ glslCaps->fAdvBlendEqInteraction = GrGLSLCaps::kAutomatic_AdvBlendEqInteraction;
+ } else if (ctxInfo.hasExtension("GL_KHR_blend_equation_advanced")) {
+ fBlendEquationSupport = kAdvanced_BlendEquationSupport;
+ glslCaps->fAdvBlendEqInteraction = GrGLSLCaps::kGeneralEnable_AdvBlendEqInteraction;
+ // TODO: Use kSpecificEnables_AdvBlendEqInteraction if "blend_support_all_equations" is
+ // slow on a particular platform.
+ } else {
+ return; // No advanced blend support.
+ }
+
+ SkASSERT(this->advancedBlendEquationSupport());
+
+ if (kNVIDIA_GrGLDriver == ctxInfo.driver() &&
+ ctxInfo.driverVersion() < GR_GL_DRIVER_VER(355,00)) {
+ // Blacklist color-dodge and color-burn on pre-355.00 NVIDIA.
+ fAdvBlendEqBlacklist |= (1 << kColorDodge_GrBlendEquation) |
+ (1 << kColorBurn_GrBlendEquation);
+ }
+ if (kARM_GrGLVendor == ctxInfo.vendor()) {
+ // Blacklist color-burn on ARM until the fix is released.
+ fAdvBlendEqBlacklist |= (1 << kColorBurn_GrBlendEquation);
+ }
+}
+
+namespace {
+const GrGLuint kUnknownBitCount = GrGLStencilAttachment::kUnknownBitCount;
+}
+
+void GrGLCaps::initStencilFormats(const GrGLContextInfo& ctxInfo) {
+
+ // Build up list of legal stencil formats (though perhaps not supported on
+ // the particular gpu/driver) from most preferred to least.
+
+ // these consts are in order of most preferred to least preferred
+ // we don't bother with GL_STENCIL_INDEX1 or GL_DEPTH32F_STENCIL8
+
+ static const StencilFormat
+ // internal Format stencil bits total bits packed?
+ gS8 = {GR_GL_STENCIL_INDEX8, 8, 8, false},
+ gS16 = {GR_GL_STENCIL_INDEX16, 16, 16, false},
+ gD24S8 = {GR_GL_DEPTH24_STENCIL8, 8, 32, true },
+ gS4 = {GR_GL_STENCIL_INDEX4, 4, 4, false},
+ // gS = {GR_GL_STENCIL_INDEX, kUnknownBitCount, kUnknownBitCount, false},
+ gDS = {GR_GL_DEPTH_STENCIL, kUnknownBitCount, kUnknownBitCount, true };
+
+ if (kGL_GrGLStandard == ctxInfo.standard()) {
+ bool supportsPackedDS =
+ ctxInfo.version() >= GR_GL_VER(3,0) ||
+ ctxInfo.hasExtension("GL_EXT_packed_depth_stencil") ||
+ ctxInfo.hasExtension("GL_ARB_framebuffer_object");
+
+ // S1 thru S16 formats are in GL 3.0+, EXT_FBO, and ARB_FBO since we
+ // require FBO support we can expect these are legal formats and don't
+ // check. These also all support the unsized GL_STENCIL_INDEX.
+ fStencilFormats.push_back() = gS8;
+ fStencilFormats.push_back() = gS16;
+ if (supportsPackedDS) {
+ fStencilFormats.push_back() = gD24S8;
+ }
+ fStencilFormats.push_back() = gS4;
+ if (supportsPackedDS) {
+ fStencilFormats.push_back() = gDS;
+ }
+ } else {
+ // ES2 has STENCIL_INDEX8 without extensions but requires extensions
+ // for other formats.
+ // ES doesn't support using the unsized format.
+
+ fStencilFormats.push_back() = gS8;
+ //fStencilFormats.push_back() = gS16;
+ if (ctxInfo.version() >= GR_GL_VER(3,0) ||
+ ctxInfo.hasExtension("GL_OES_packed_depth_stencil")) {
+ fStencilFormats.push_back() = gD24S8;
+ }
+ if (ctxInfo.hasExtension("GL_OES_stencil4")) {
+ fStencilFormats.push_back() = gS4;
+ }
+ }
+}
+
+SkString GrGLCaps::dump() const {
+
+ SkString r = INHERITED::dump();
+
+ r.appendf("--- GL-Specific ---\n");
+ for (int i = 0; i < fStencilFormats.count(); ++i) {
+ r.appendf("Stencil Format %d, stencil bits: %02d, total bits: %02d\n",
+ i,
+ fStencilFormats[i].fStencilBits,
+ fStencilFormats[i].fTotalBits);
+ }
+
+ static const char* kMSFBOExtStr[] = {
+ "None",
+ "ARB",
+ "EXT",
+ "ES 3.0",
+ "Apple",
+ "IMG MS To Texture",
+ "EXT MS To Texture",
+ "MixedSamples",
+ };
+ GR_STATIC_ASSERT(0 == kNone_MSFBOType);
+ GR_STATIC_ASSERT(1 == kDesktop_ARB_MSFBOType);
+ GR_STATIC_ASSERT(2 == kDesktop_EXT_MSFBOType);
+ GR_STATIC_ASSERT(3 == kES_3_0_MSFBOType);
+ GR_STATIC_ASSERT(4 == kES_Apple_MSFBOType);
+ GR_STATIC_ASSERT(5 == kES_IMG_MsToTexture_MSFBOType);
+ GR_STATIC_ASSERT(6 == kES_EXT_MsToTexture_MSFBOType);
+ GR_STATIC_ASSERT(7 == kMixedSamples_MSFBOType);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kMSFBOExtStr) == kLast_MSFBOType + 1);
+
+ static const char* kInvalidateFBTypeStr[] = {
+ "None",
+ "Discard",
+ "Invalidate",
+ };
+ GR_STATIC_ASSERT(0 == kNone_InvalidateFBType);
+ GR_STATIC_ASSERT(1 == kDiscard_InvalidateFBType);
+ GR_STATIC_ASSERT(2 == kInvalidate_InvalidateFBType);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kInvalidateFBTypeStr) == kLast_InvalidateFBType + 1);
+
+ static const char* kMapBufferTypeStr[] = {
+ "None",
+ "MapBuffer",
+ "MapBufferRange",
+ "Chromium",
+ };
+ GR_STATIC_ASSERT(0 == kNone_MapBufferType);
+ GR_STATIC_ASSERT(1 == kMapBuffer_MapBufferType);
+ GR_STATIC_ASSERT(2 == kMapBufferRange_MapBufferType);
+ GR_STATIC_ASSERT(3 == kChromium_MapBufferType);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kMapBufferTypeStr) == kLast_MapBufferType + 1);
+
+ r.appendf("Core Profile: %s\n", (fIsCoreProfile ? "YES" : "NO"));
+ r.appendf("MSAA Type: %s\n", kMSFBOExtStr[fMSFBOType]);
+ r.appendf("Invalidate FB Type: %s\n", kInvalidateFBTypeStr[fInvalidateFBType]);
+ r.appendf("Map Buffer Type: %s\n", kMapBufferTypeStr[fMapBufferType]);
+ r.appendf("Max FS Uniform Vectors: %d\n", fMaxFragmentUniformVectors);
+ r.appendf("Unpack Row length support: %s\n", (fUnpackRowLengthSupport ? "YES": "NO"));
+ r.appendf("Unpack Flip Y support: %s\n", (fUnpackFlipYSupport ? "YES": "NO"));
+ r.appendf("Pack Row length support: %s\n", (fPackRowLengthSupport ? "YES": "NO"));
+ r.appendf("Pack Flip Y support: %s\n", (fPackFlipYSupport ? "YES": "NO"));
+
+ r.appendf("Texture Usage support: %s\n", (fTextureUsageSupport ? "YES": "NO"));
+ r.appendf("GL_R support: %s\n", (fTextureRedSupport ? "YES": "NO"));
+ r.appendf("GL_ARB_imaging support: %s\n", (fImagingSupport ? "YES": "NO"));
+ r.appendf("Vertex array object support: %s\n", (fVertexArrayObjectSupport ? "YES": "NO"));
+ r.appendf("Direct state access support: %s\n", (fDirectStateAccessSupport ? "YES": "NO"));
+ r.appendf("Debug support: %s\n", (fDebugSupport ? "YES": "NO"));
+ r.appendf("Draw instanced support: %s\n", (fDrawInstancedSupport ? "YES" : "NO"));
+ r.appendf("Draw indirect support: %s\n", (fDrawIndirectSupport ? "YES" : "NO"));
+ r.appendf("Multi draw indirect support: %s\n", (fMultiDrawIndirectSupport ? "YES" : "NO"));
+ r.appendf("Base instance support: %s\n", (fBaseInstanceSupport ? "YES" : "NO"));
+ r.appendf("RGBA 8888 pixel ops are slow: %s\n", (fRGBA8888PixelsOpsAreSlow ? "YES" : "NO"));
+ r.appendf("Partial FBO read is slow: %s\n", (fPartialFBOReadIsSlow ? "YES" : "NO"));
+ r.appendf("Bind uniform location support: %s\n", (fBindUniformLocationSupport ? "YES" : "NO"));
+ r.appendf("Rectangle texture support: %s\n", (fRectangleTextureSupport? "YES" : "NO"));
+ r.appendf("Texture swizzle support: %s\n", (fTextureSwizzleSupport ? "YES" : "NO"));
+ r.appendf("BGRA to RGBA readback conversions are slow: %s\n",
+ (fRGBAToBGRAReadbackConversionsAreSlow ? "YES" : "NO"));
+
+ r.append("Configs\n-------\n");
+ for (int i = 0; i < kGrPixelConfigCnt; ++i) {
+ r.appendf(" cfg: %d flags: 0x%04x, b_internal: 0x%08x s_internal: 0x%08x, e_format: "
+ "0x%08x, e_format_teximage: 0x%08x, e_type: 0x%08x, i_for_teximage: 0x%08x, "
+ "i_for_renderbuffer: 0x%08x\n",
+ i,
+ fConfigTable[i].fFlags,
+ fConfigTable[i].fFormats.fBaseInternalFormat,
+ fConfigTable[i].fFormats.fSizedInternalFormat,
+ fConfigTable[i].fFormats.fExternalFormat[kOther_ExternalFormatUsage],
+ fConfigTable[i].fFormats.fExternalFormat[kTexImage_ExternalFormatUsage],
+ fConfigTable[i].fFormats.fExternalType,
+ fConfigTable[i].fFormats.fInternalFormatTexImage,
+ fConfigTable[i].fFormats.fInternalFormatRenderbuffer);
+ }
+
+ return r;
+}
+
+static GrGLenum precision_to_gl_float_type(GrSLPrecision p) {
+ switch (p) {
+ case kLow_GrSLPrecision:
+ return GR_GL_LOW_FLOAT;
+ case kMedium_GrSLPrecision:
+ return GR_GL_MEDIUM_FLOAT;
+ case kHigh_GrSLPrecision:
+ return GR_GL_HIGH_FLOAT;
+ }
+ SkFAIL("Unknown precision.");
+ return -1;
+}
+
+static GrGLenum shader_type_to_gl_shader(GrShaderType type) {
+ switch (type) {
+ case kVertex_GrShaderType:
+ return GR_GL_VERTEX_SHADER;
+ case kGeometry_GrShaderType:
+ return GR_GL_GEOMETRY_SHADER;
+ case kFragment_GrShaderType:
+ return GR_GL_FRAGMENT_SHADER;
+ }
+ SkFAIL("Unknown shader type.");
+ return -1;
+}
+
+void GrGLCaps::initShaderPrecisionTable(const GrGLContextInfo& ctxInfo,
+ const GrGLInterface* intf,
+ GrGLSLCaps* glslCaps) {
+ if (kGLES_GrGLStandard == ctxInfo.standard() || ctxInfo.version() >= GR_GL_VER(4, 1) ||
+ ctxInfo.hasExtension("GL_ARB_ES2_compatibility")) {
+ for (int s = 0; s < kGrShaderTypeCount; ++s) {
+ if (kGeometry_GrShaderType != s) {
+ GrShaderType shaderType = static_cast<GrShaderType>(s);
+ GrGLenum glShader = shader_type_to_gl_shader(shaderType);
+ GrShaderCaps::PrecisionInfo* first = nullptr;
+ glslCaps->fShaderPrecisionVaries = false;
+ for (int p = 0; p < kGrSLPrecisionCount; ++p) {
+ GrSLPrecision precision = static_cast<GrSLPrecision>(p);
+ GrGLenum glPrecision = precision_to_gl_float_type(precision);
+ GrGLint range[2];
+ GrGLint bits;
+ GR_GL_GetShaderPrecisionFormat(intf, glShader, glPrecision, range, &bits);
+ if (bits) {
+ glslCaps->fFloatPrecisions[s][p].fLogRangeLow = range[0];
+ glslCaps->fFloatPrecisions[s][p].fLogRangeHigh = range[1];
+ glslCaps->fFloatPrecisions[s][p].fBits = bits;
+ if (!first) {
+ first = &glslCaps->fFloatPrecisions[s][p];
+ }
+ else if (!glslCaps->fShaderPrecisionVaries) {
+ glslCaps->fShaderPrecisionVaries =
+ (*first != glslCaps->fFloatPrecisions[s][p]);
+ }
+ }
+ }
+ }
+ }
+ }
+ else {
+ // We're on a desktop GL that doesn't have precision info. Assume they're all 32bit float.
+ glslCaps->fShaderPrecisionVaries = false;
+ for (int s = 0; s < kGrShaderTypeCount; ++s) {
+ if (kGeometry_GrShaderType != s) {
+ for (int p = 0; p < kGrSLPrecisionCount; ++p) {
+ glslCaps->fFloatPrecisions[s][p].fLogRangeLow = 127;
+ glslCaps->fFloatPrecisions[s][p].fLogRangeHigh = 127;
+ glslCaps->fFloatPrecisions[s][p].fBits = 23;
+ }
+ }
+ }
+ }
+ // GetShaderPrecisionFormat doesn't accept GL_GEOMETRY_SHADER as a shader type. Assume they're
+ // the same as the vertex shader. Only fragment shaders were ever allowed to omit support for
+ // highp. GS was added after GetShaderPrecisionFormat was added to the list of features that
+ // are recommended against.
+ if (glslCaps->fGeometryShaderSupport) {
+ for (int p = 0; p < kGrSLPrecisionCount; ++p) {
+ glslCaps->fFloatPrecisions[kGeometry_GrShaderType][p] =
+ glslCaps->fFloatPrecisions[kVertex_GrShaderType][p];
+ }
+ }
+ glslCaps->initSamplerPrecisionTable();
+}
+
+bool GrGLCaps::bgraIsInternalFormat() const {
+ return fConfigTable[kBGRA_8888_GrPixelConfig].fFormats.fBaseInternalFormat == GR_GL_BGRA;
+}
+
+bool GrGLCaps::getTexImageFormats(GrPixelConfig surfaceConfig, GrPixelConfig externalConfig,
+ GrGLenum* internalFormat, GrGLenum* externalFormat,
+ GrGLenum* externalType) const {
+ if (!this->getExternalFormat(surfaceConfig, externalConfig, kTexImage_ExternalFormatUsage,
+ externalFormat, externalType)) {
+ return false;
+ }
+ *internalFormat = fConfigTable[surfaceConfig].fFormats.fInternalFormatTexImage;
+ return true;
+}
+
+bool GrGLCaps::getCompressedTexImageFormats(GrPixelConfig surfaceConfig,
+ GrGLenum* internalFormat) const {
+ if (!GrPixelConfigIsCompressed(surfaceConfig)) {
+ return false;
+ }
+ *internalFormat = fConfigTable[surfaceConfig].fFormats.fInternalFormatTexImage;
+ return true;
+}
+
+bool GrGLCaps::getReadPixelsFormat(GrPixelConfig surfaceConfig, GrPixelConfig externalConfig,
+ GrGLenum* externalFormat, GrGLenum* externalType) const {
+ if (!this->getExternalFormat(surfaceConfig, externalConfig, kOther_ExternalFormatUsage,
+ externalFormat, externalType)) {
+ return false;
+ }
+ return true;
+}
+
+bool GrGLCaps::getRenderbufferFormat(GrPixelConfig config, GrGLenum* internalFormat) const {
+ if (GrPixelConfigIsCompressed(config)) {
+ return false;
+ }
+ *internalFormat = fConfigTable[config].fFormats.fInternalFormatRenderbuffer;
+ return true;
+}
+
+bool GrGLCaps::getExternalFormat(GrPixelConfig surfaceConfig, GrPixelConfig memoryConfig,
+ ExternalFormatUsage usage, GrGLenum* externalFormat,
+ GrGLenum* externalType) const {
+ SkASSERT(externalFormat && externalType);
+ if (GrPixelConfigIsCompressed(memoryConfig)) {
+ return false;
+ }
+
+ bool surfaceIsAlphaOnly = GrPixelConfigIsAlphaOnly(surfaceConfig);
+ bool memoryIsAlphaOnly = GrPixelConfigIsAlphaOnly(memoryConfig);
+
+ // We don't currently support moving RGBA data into and out of ALPHA surfaces. It could be
+ // made to work in many cases using glPixelStore and what not but is not needed currently.
+ if (surfaceIsAlphaOnly && !memoryIsAlphaOnly) {
+ return false;
+ }
+
+ *externalFormat = fConfigTable[memoryConfig].fFormats.fExternalFormat[usage];
+ *externalType = fConfigTable[memoryConfig].fFormats.fExternalType;
+
+ // When GL_RED is supported as a texture format, our alpha-only textures are stored using
+ // GL_RED and we swizzle in order to map all components to 'r'. However, in this case the
+ // surface is not alpha-only and we want alpha to really mean the alpha component of the
+ // texture, not the red component.
+ if (memoryIsAlphaOnly && !surfaceIsAlphaOnly) {
+ if (this->textureRedSupport()) {
+ SkASSERT(GR_GL_RED == *externalFormat);
+ *externalFormat = GR_GL_ALPHA;
+ }
+ }
+
+ return true;
+}
+
+void GrGLCaps::initConfigTable(const GrGLContextInfo& ctxInfo, const GrGLInterface* gli,
+ GrGLSLCaps* glslCaps) {
+ /*
+ Comments on renderability of configs on various GL versions.
+ OpenGL < 3.0:
+ no built in support for render targets.
+ GL_EXT_framebuffer_object adds possible support for any sized format with base internal
+ format RGB, RGBA and NV float formats we don't use.
+ This is the following:
+ R3_G3_B2, RGB4, RGB5, RGB8, RGB10, RGB12, RGB16, RGBA2, RGBA4, RGB5_A1, RGBA8
+ RGB10_A2, RGBA12,RGBA16
+ Though, it is hard to believe the more obscure formats such as RGBA12 would work
+ since they aren't required by later standards and the driver can simply return
+ FRAMEBUFFER_UNSUPPORTED for anything it doesn't allow.
+ GL_ARB_framebuffer_object adds everything added by the EXT extension and additionally
+ any sized internal format with a base internal format of ALPHA, LUMINANCE,
+ LUMINANCE_ALPHA, INTENSITY, RED, and RG.
+ This adds a lot of additional renderable sized formats, including ALPHA8.
+ The GL_ARB_texture_rg brings in the RED and RG formats (8, 8I, 8UI, 16, 16I, 16UI,
+ 16F, 32I, 32UI, and 32F variants).
+ Again, the driver has an escape hatch via FRAMEBUFFER_UNSUPPORTED.
+
+ For both the above extensions we limit ourselves to those that are also required by
+ OpenGL 3.0.
+
+ OpenGL 3.0:
+ Any format with base internal format ALPHA, RED, RG, RGB or RGBA is "color-renderable"
+ but are not required to be supported as renderable textures/renderbuffer.
+ Required renderable color formats:
+ - RGBA32F, RGBA32I, RGBA32UI, RGBA16, RGBA16F, RGBA16I,
+ RGBA16UI, RGBA8, RGBA8I, RGBA8UI, SRGB8_ALPHA8, and
+ RGB10_A2.
+ - R11F_G11F_B10F.
+ - RG32F, RG32I, RG32UI, RG16, RG16F, RG16I, RG16UI, RG8, RG8I,
+ and RG8UI.
+ - R32F, R32I, R32UI, R16F, R16I, R16UI, R16, R8, R8I, and R8UI.
+ - ALPHA8
+
+ OpenGL 3.1, 3.2, 3.3
+ Same as 3.0 except ALPHA8 requires GL_ARB_compatibility/compatibility profile.
+ OpengGL 3.3, 4.0, 4.1
+ Adds RGB10_A2UI.
+ OpengGL 4.2
+ Adds
+ - RGB5_A1, RGBA4
+ - RGB565
+ OpenGL 4.4
+ Does away with the separate list and adds a column to the sized internal color format
+ table. However, no new formats become required color renderable.
+
+ ES 2.0
+ color renderable: RGBA4, RGB5_A1, RGB565
+ GL_EXT_texture_rg adds support for R8, RG5 as a color render target
+ GL_OES_rgb8_rgba8 adds support for RGB8 and RGBA8
+ GL_ARM_rgba8 adds support for RGBA8 (but not RGB8)
+ GL_EXT_texture_format_BGRA8888 does not add renderbuffer support
+ GL_CHROMIUM_renderbuffer_format_BGRA8888 adds BGRA8 as color-renderable
+ GL_APPLE_texture_format_BGRA8888 does not add renderbuffer support
+
+ ES 3.0
+ - RGBA32I, RGBA32UI, RGBA16I, RGBA16UI, RGBA8, RGBA8I,
+ RGBA8UI, SRGB8_ALPHA8, RGB10_A2, RGB10_A2UI, RGBA4, and
+ RGB5_A1.
+ - RGB8 and RGB565.
+ - RG32I, RG32UI, RG16I, RG16UI, RG8, RG8I, and RG8UI.
+ - R32I, R32UI, R16I, R16UI, R8, R8I, and R8UI
+ ES 3.1
+ Adds RGB10_A2, RGB10_A2UI,
+ ES 3.2
+ Adds R16F, RG16F, RGBA16F, R32F, RG32F, RGBA32F, R11F_G11F_B10F.
+ */
+ uint32_t allRenderFlags = ConfigInfo::kRenderable_Flag;
+ if (kNone_MSFBOType != fMSFBOType) {
+ allRenderFlags |= ConfigInfo::kRenderableWithMSAA_Flag;
+ }
+
+ GrGLStandard standard = ctxInfo.standard();
+ GrGLVersion version = ctxInfo.version();
+
+ bool texStorageSupported = false;
+ if (kGL_GrGLStandard == standard) {
+ // The EXT version can apply to either GL or GLES.
+ texStorageSupported = version >= GR_GL_VER(4,2) ||
+ ctxInfo.hasExtension("GL_ARB_texture_storage") ||
+ ctxInfo.hasExtension("GL_EXT_texture_storage");
+ } else {
+ // Qualcomm Adreno drivers appear to have issues with texture storage.
+ texStorageSupported = (version >= GR_GL_VER(3,0) &&
+ kQualcomm_GrGLVendor != ctxInfo.vendor()) &&
+ ctxInfo.hasExtension("GL_EXT_texture_storage");
+ }
+
+ // TODO: remove after command buffer supports full ES 3.0
+ if (kGLES_GrGLStandard == standard && version >= GR_GL_VER(3,0) &&
+ kChromium_GrGLDriver == ctxInfo.driver()) {
+ texStorageSupported = false;
+ }
+
+ bool texelBufferSupport = this->shaderCaps()->texelBufferSupport();
+
+ fConfigTable[kUnknown_GrPixelConfig].fFormats.fBaseInternalFormat = 0;
+ fConfigTable[kUnknown_GrPixelConfig].fFormats.fSizedInternalFormat = 0;
+ fConfigTable[kUnknown_GrPixelConfig].fFormats.fExternalFormat[kOther_ExternalFormatUsage] = 0;
+ fConfigTable[kUnknown_GrPixelConfig].fFormats.fExternalType = 0;
+ fConfigTable[kUnknown_GrPixelConfig].fFormatType = kNormalizedFixedPoint_FormatType;
+ fConfigTable[kUnknown_GrPixelConfig].fSwizzle = GrSwizzle::RGBA();
+
+ fConfigTable[kRGBA_8888_GrPixelConfig].fFormats.fBaseInternalFormat = GR_GL_RGBA;
+ fConfigTable[kRGBA_8888_GrPixelConfig].fFormats.fSizedInternalFormat = GR_GL_RGBA8;
+ fConfigTable[kRGBA_8888_GrPixelConfig].fFormats.fExternalFormat[kOther_ExternalFormatUsage] =
+ GR_GL_RGBA;
+ fConfigTable[kRGBA_8888_GrPixelConfig].fFormats.fExternalType = GR_GL_UNSIGNED_BYTE;
+ fConfigTable[kRGBA_8888_GrPixelConfig].fFormatType = kNormalizedFixedPoint_FormatType;
+ fConfigTable[kRGBA_8888_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag;
+ if (kGL_GrGLStandard == standard) {
+ // We require some form of FBO support and all GLs with FBO support can render to RGBA8
+ fConfigTable[kRGBA_8888_GrPixelConfig].fFlags |= allRenderFlags;
+ } else {
+ if (version >= GR_GL_VER(3,0) || ctxInfo.hasExtension("GL_OES_rgb8_rgba8") ||
+ ctxInfo.hasExtension("GL_ARM_rgba8")) {
+ fConfigTable[kRGBA_8888_GrPixelConfig].fFlags |= allRenderFlags;
+ }
+ }
+ if (texStorageSupported) {
+ fConfigTable[kRGBA_8888_GrPixelConfig].fFlags |= ConfigInfo::kCanUseTexStorage_Flag;
+ }
+ if (texelBufferSupport) {
+ fConfigTable[kRGBA_8888_GrPixelConfig].fFlags |= ConfigInfo::kCanUseWithTexelBuffer_Flag;
+ }
+ fConfigTable[kRGBA_8888_GrPixelConfig].fSwizzle = GrSwizzle::RGBA();
+
+ fConfigTable[kBGRA_8888_GrPixelConfig].fFormats.fExternalFormat[kOther_ExternalFormatUsage] =
+ GR_GL_BGRA;
+ fConfigTable[kBGRA_8888_GrPixelConfig].fFormats.fExternalType = GR_GL_UNSIGNED_BYTE;
+ fConfigTable[kBGRA_8888_GrPixelConfig].fFormatType = kNormalizedFixedPoint_FormatType;
+ if (kGL_GrGLStandard == standard) {
+ fConfigTable[kBGRA_8888_GrPixelConfig].fFormats.fBaseInternalFormat = GR_GL_RGBA;
+ fConfigTable[kBGRA_8888_GrPixelConfig].fFormats.fSizedInternalFormat = GR_GL_RGBA8;
+ if (version >= GR_GL_VER(1, 2) || ctxInfo.hasExtension("GL_EXT_bgra")) {
+ // Since the internal format is RGBA8, it is also renderable.
+ fConfigTable[kBGRA_8888_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag |
+ allRenderFlags;
+ }
+ } else {
+ fConfigTable[kBGRA_8888_GrPixelConfig].fFormats.fBaseInternalFormat = GR_GL_BGRA;
+ fConfigTable[kBGRA_8888_GrPixelConfig].fFormats.fSizedInternalFormat = GR_GL_BGRA8;
+ if (ctxInfo.hasExtension("GL_APPLE_texture_format_BGRA8888")) {
+ // The APPLE extension doesn't make this renderable.
+ fConfigTable[kBGRA_8888_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag;
+ if (version < GR_GL_VER(3,0) && !ctxInfo.hasExtension("GL_EXT_texture_storage")) {
+ // On ES2 the internal format of a BGRA texture is RGBA with the APPLE extension.
+ // Though, that seems to not be the case if the texture storage extension is
+ // present. The specs don't exactly make that clear.
+ fConfigTable[kBGRA_8888_GrPixelConfig].fFormats.fBaseInternalFormat = GR_GL_RGBA;
+ fConfigTable[kBGRA_8888_GrPixelConfig].fFormats.fSizedInternalFormat = GR_GL_RGBA8;
+ }
+ } else if (ctxInfo.hasExtension("GL_EXT_texture_format_BGRA8888")) {
+ fConfigTable[kBGRA_8888_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag |
+ ConfigInfo::kRenderable_Flag;
+ if (ctxInfo.hasExtension("GL_CHROMIUM_renderbuffer_format_BGRA8888") &&
+ (this->usesMSAARenderBuffers() || this->fMSFBOType == kMixedSamples_MSFBOType)) {
+ fConfigTable[kBGRA_8888_GrPixelConfig].fFlags |=
+ ConfigInfo::kRenderableWithMSAA_Flag;
+ }
+ }
+ }
+ if (texStorageSupported) {
+ fConfigTable[kBGRA_8888_GrPixelConfig].fFlags |= ConfigInfo::kCanUseTexStorage_Flag;
+ }
+ fConfigTable[kBGRA_8888_GrPixelConfig].fSwizzle = GrSwizzle::RGBA();
+
+ // We only enable srgb support if both textures and FBOs support srgb,
+ // *and* we can disable sRGB decode-on-read, to support "legacy" mode.
+ if (kGL_GrGLStandard == standard) {
+ if (ctxInfo.version() >= GR_GL_VER(3,0)) {
+ fSRGBSupport = true;
+ } else if (ctxInfo.hasExtension("GL_EXT_texture_sRGB")) {
+ if (ctxInfo.hasExtension("GL_ARB_framebuffer_sRGB") ||
+ ctxInfo.hasExtension("GL_EXT_framebuffer_sRGB")) {
+ fSRGBSupport = true;
+ }
+ }
+ // All the above srgb extensions support toggling srgb writes
+ if (fSRGBSupport) {
+ fSRGBWriteControl = true;
+ }
+ } else {
+ // See https://bug.skia.org/4148 for PowerVR issue.
+ fSRGBSupport = kPowerVRRogue_GrGLRenderer != ctxInfo.renderer() &&
+ (ctxInfo.version() >= GR_GL_VER(3,0) || ctxInfo.hasExtension("GL_EXT_sRGB"));
+ // ES through 3.1 requires EXT_srgb_write_control to support toggling
+ // sRGB writing for destinations.
+ // See https://bug.skia.org/5329 for Adreno4xx issue.
+ fSRGBWriteControl = kAdreno4xx_GrGLRenderer != ctxInfo.renderer() &&
+ ctxInfo.hasExtension("GL_EXT_sRGB_write_control");
+ }
+ if (!ctxInfo.hasExtension("GL_EXT_texture_sRGB_decode")) {
+ // To support "legacy" L32 mode, we require the ability to turn off sRGB decode:
+ fSRGBSupport = false;
+ }
+ fConfigTable[kSRGBA_8888_GrPixelConfig].fFormats.fBaseInternalFormat = GR_GL_SRGB_ALPHA;
+ fConfigTable[kSRGBA_8888_GrPixelConfig].fFormats.fSizedInternalFormat = GR_GL_SRGB8_ALPHA8;
+ // GL does not do srgb<->rgb conversions when transferring between cpu and gpu. Thus, the
+ // external format is GL_RGBA. See below for note about ES2.0 and glTex[Sub]Image.
+ fConfigTable[kSRGBA_8888_GrPixelConfig].fFormats.fExternalFormat[kOther_ExternalFormatUsage] =
+ GR_GL_RGBA;
+ fConfigTable[kSRGBA_8888_GrPixelConfig].fFormats.fExternalType = GR_GL_UNSIGNED_BYTE;
+ fConfigTable[kSRGBA_8888_GrPixelConfig].fFormatType = kNormalizedFixedPoint_FormatType;
+ if (fSRGBSupport) {
+ fConfigTable[kSRGBA_8888_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag |
+ allRenderFlags;
+ }
+ if (texStorageSupported) {
+ fConfigTable[kSRGBA_8888_GrPixelConfig].fFlags |= ConfigInfo::kCanUseTexStorage_Flag;
+ }
+ fConfigTable[kSRGBA_8888_GrPixelConfig].fSwizzle = GrSwizzle::RGBA();
+
+ // sBGRA is not a "real" thing in OpenGL, but GPUs support it, and on platforms where
+ // kN32 == BGRA, we need some way to work with it. (The default framebuffer on Windows
+ // is in this format, for example).
+ fConfigTable[kSBGRA_8888_GrPixelConfig].fFormats.fBaseInternalFormat = GR_GL_SRGB_ALPHA;
+ fConfigTable[kSBGRA_8888_GrPixelConfig].fFormats.fSizedInternalFormat = GR_GL_SRGB8_ALPHA8;
+ // GL does not do srgb<->rgb conversions when transferring between cpu and gpu. Thus, the
+ // external format is GL_BGRA.
+ fConfigTable[kSBGRA_8888_GrPixelConfig].fFormats.fExternalFormat[kOther_ExternalFormatUsage] =
+ GR_GL_BGRA;
+ fConfigTable[kSBGRA_8888_GrPixelConfig].fFormats.fExternalType = GR_GL_UNSIGNED_BYTE;
+ fConfigTable[kSBGRA_8888_GrPixelConfig].fFormatType = kNormalizedFixedPoint_FormatType;
+ if (fSRGBSupport) {
+ fConfigTable[kSBGRA_8888_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag |
+ allRenderFlags;
+ }
+ if (texStorageSupported) {
+ fConfigTable[kSBGRA_8888_GrPixelConfig].fFlags |= ConfigInfo::kCanUseTexStorage_Flag;
+ }
+ fConfigTable[kSBGRA_8888_GrPixelConfig].fSwizzle = GrSwizzle::RGBA();
+
+ fConfigTable[kRGB_565_GrPixelConfig].fFormats.fBaseInternalFormat = GR_GL_RGB;
+ if (this->ES2CompatibilitySupport()) {
+ fConfigTable[kRGB_565_GrPixelConfig].fFormats.fSizedInternalFormat = GR_GL_RGB565;
+ } else {
+ fConfigTable[kRGB_565_GrPixelConfig].fFormats.fSizedInternalFormat = GR_GL_RGB5;
+ }
+ fConfigTable[kRGB_565_GrPixelConfig].fFormats.fExternalFormat[kOther_ExternalFormatUsage] =
+ GR_GL_RGB;
+ fConfigTable[kRGB_565_GrPixelConfig].fFormats.fExternalType = GR_GL_UNSIGNED_SHORT_5_6_5;
+ fConfigTable[kRGB_565_GrPixelConfig].fFormatType = kNormalizedFixedPoint_FormatType;
+ fConfigTable[kRGB_565_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag;
+ if (kGL_GrGLStandard == standard) {
+ if (version >= GR_GL_VER(4, 2) || ctxInfo.hasExtension("GL_ES2_compatibility")) {
+ fConfigTable[kRGB_565_GrPixelConfig].fFlags |= allRenderFlags;
+ }
+ } else {
+ fConfigTable[kRGB_565_GrPixelConfig].fFlags |= allRenderFlags;
+ }
+ // 565 is not a sized internal format on desktop GL. So on desktop with
+ // 565 we always use an unsized internal format to let the system pick
+ // the best sized format to convert the 565 data to. Since TexStorage
+ // only allows sized internal formats we disallow it.
+ //
+ // TODO: As of 4.2, regular GL supports 565. This logic is due for an
+ // update.
+ if (texStorageSupported && kGL_GrGLStandard != standard) {
+ fConfigTable[kRGB_565_GrPixelConfig].fFlags |= ConfigInfo::kCanUseTexStorage_Flag;
+ }
+ fConfigTable[kRGB_565_GrPixelConfig].fSwizzle = GrSwizzle::RGBA();
+
+ fConfigTable[kRGBA_4444_GrPixelConfig].fFormats.fBaseInternalFormat = GR_GL_RGBA;
+ fConfigTable[kRGBA_4444_GrPixelConfig].fFormats.fSizedInternalFormat = GR_GL_RGBA4;
+ fConfigTable[kRGBA_4444_GrPixelConfig].fFormats.fExternalFormat[kOther_ExternalFormatUsage] =
+ GR_GL_RGBA;
+ fConfigTable[kRGBA_4444_GrPixelConfig].fFormats.fExternalType = GR_GL_UNSIGNED_SHORT_4_4_4_4;
+ fConfigTable[kRGBA_4444_GrPixelConfig].fFormatType = kNormalizedFixedPoint_FormatType;
+ fConfigTable[kRGBA_4444_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag;
+ if (kGL_GrGLStandard == standard) {
+ if (version >= GR_GL_VER(4, 2)) {
+ fConfigTable[kRGBA_4444_GrPixelConfig].fFlags |= allRenderFlags;
+ }
+ } else {
+ fConfigTable[kRGBA_4444_GrPixelConfig].fFlags |= allRenderFlags;
+ }
+ if (texStorageSupported) {
+ fConfigTable[kRGBA_4444_GrPixelConfig].fFlags |= ConfigInfo::kCanUseTexStorage_Flag;
+ }
+ fConfigTable[kRGBA_4444_GrPixelConfig].fSwizzle = GrSwizzle::RGBA();
+
+ if (this->textureRedSupport()) {
+ fConfigTable[kAlpha_8_GrPixelConfig].fFormats.fBaseInternalFormat = GR_GL_RED;
+ fConfigTable[kAlpha_8_GrPixelConfig].fFormats.fSizedInternalFormat = GR_GL_R8;
+ fConfigTable[kAlpha_8_GrPixelConfig].fFormats.fExternalFormat[kOther_ExternalFormatUsage] =
+ GR_GL_RED;
+ fConfigTable[kAlpha_8_GrPixelConfig].fSwizzle = GrSwizzle::RRRR();
+ if (texelBufferSupport) {
+ fConfigTable[kAlpha_8_GrPixelConfig].fFlags |= ConfigInfo::kCanUseWithTexelBuffer_Flag;
+ }
+ } else {
+ fConfigTable[kAlpha_8_GrPixelConfig].fFormats.fBaseInternalFormat = GR_GL_ALPHA;
+ fConfigTable[kAlpha_8_GrPixelConfig].fFormats.fSizedInternalFormat = GR_GL_ALPHA8;
+ fConfigTable[kAlpha_8_GrPixelConfig].fFormats.fExternalFormat[kOther_ExternalFormatUsage] =
+ GR_GL_ALPHA;
+ fConfigTable[kAlpha_8_GrPixelConfig].fSwizzle = GrSwizzle::AAAA();
+ }
+ fConfigTable[kAlpha_8_GrPixelConfig].fFormats.fExternalType = GR_GL_UNSIGNED_BYTE;
+ fConfigTable[kAlpha_8_GrPixelConfig].fFormatType = kNormalizedFixedPoint_FormatType;
+ fConfigTable[kAlpha_8_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag;
+ if (this->textureRedSupport() ||
+ (kDesktop_ARB_MSFBOType == this->msFBOType() &&
+ ctxInfo.renderer() != kOSMesa_GrGLRenderer)) {
+ // desktop ARB extension/3.0+ supports ALPHA8 as renderable.
+ // However, osmesa fails if it used even when GL_ARB_framebuffer_object is present.
+ // Core profile removes ALPHA8 support, but we should have chosen R8 in that case.
+ fConfigTable[kAlpha_8_GrPixelConfig].fFlags |= allRenderFlags;
+ }
+ if (texStorageSupported) {
+ fConfigTable[kAlpha_8_GrPixelConfig].fFlags |= ConfigInfo::kCanUseTexStorage_Flag;
+ }
+
+ // Check for [half] floating point texture support
+ // NOTE: We disallow floating point textures on ES devices if linear filtering modes are not
+ // supported. This is for simplicity, but a more granular approach is possible. Coincidentally,
+ // [half] floating point textures became part of the standard in ES3.1 / OGL 3.0.
+ bool hasFPTextures = false;
+ bool hasHalfFPTextures = false;
+ // for now we don't support floating point MSAA on ES
+ uint32_t fpRenderFlags = (kGL_GrGLStandard == standard) ?
+ allRenderFlags : (uint32_t)ConfigInfo::kRenderable_Flag;
+
+ if (kGL_GrGLStandard == standard) {
+ if (version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_ARB_texture_float")) {
+ hasFPTextures = true;
+ hasHalfFPTextures = true;
+ }
+ } else {
+ if (version >= GR_GL_VER(3, 1)) {
+ hasFPTextures = true;
+ hasHalfFPTextures = true;
+ } else {
+ if (ctxInfo.hasExtension("GL_OES_texture_float_linear") &&
+ ctxInfo.hasExtension("GL_OES_texture_float")) {
+ hasFPTextures = true;
+ }
+ if (ctxInfo.hasExtension("GL_OES_texture_half_float_linear") &&
+ ctxInfo.hasExtension("GL_OES_texture_half_float")) {
+ hasHalfFPTextures = true;
+ }
+ }
+ }
+
+ fConfigTable[kRGBA_float_GrPixelConfig].fFormats.fBaseInternalFormat = GR_GL_RGBA;
+ fConfigTable[kRGBA_float_GrPixelConfig].fFormats.fSizedInternalFormat = GR_GL_RGBA32F;
+ fConfigTable[kRGBA_float_GrPixelConfig].fFormats.fExternalFormat[kOther_ExternalFormatUsage] =
+ GR_GL_RGBA;
+ fConfigTable[kRGBA_float_GrPixelConfig].fFormats.fExternalType = GR_GL_FLOAT;
+ fConfigTable[kRGBA_float_GrPixelConfig].fFormatType = kFloat_FormatType;
+ if (hasFPTextures) {
+ fConfigTable[kRGBA_float_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag;
+ // For now we only enable rendering to float on desktop, because on ES we'd have to solve
+ // many precision issues and no clients actually want this yet.
+ if (kGL_GrGLStandard == standard /* || version >= GR_GL_VER(3,2) ||
+ ctxInfo.hasExtension("GL_EXT_color_buffer_float")*/) {
+ fConfigTable[kRGBA_float_GrPixelConfig].fFlags |= fpRenderFlags;
+ }
+ }
+ if (texStorageSupported) {
+ fConfigTable[kRGBA_float_GrPixelConfig].fFlags |= ConfigInfo::kCanUseTexStorage_Flag;
+ }
+ if (texelBufferSupport) {
+ fConfigTable[kRGBA_float_GrPixelConfig].fFlags |= ConfigInfo::kCanUseWithTexelBuffer_Flag;
+ }
+ fConfigTable[kRGBA_float_GrPixelConfig].fSwizzle = GrSwizzle::RGBA();
+
+ if (this->textureRedSupport()) {
+ fConfigTable[kAlpha_half_GrPixelConfig].fFormats.fBaseInternalFormat = GR_GL_RED;
+ fConfigTable[kAlpha_half_GrPixelConfig].fFormats.fSizedInternalFormat = GR_GL_R16F;
+ fConfigTable[kAlpha_half_GrPixelConfig].fFormats.fExternalFormat[kOther_ExternalFormatUsage]
+ = GR_GL_RED;
+ fConfigTable[kAlpha_half_GrPixelConfig].fSwizzle = GrSwizzle::RRRR();
+ if (texelBufferSupport) {
+ fConfigTable[kAlpha_half_GrPixelConfig].fFlags |=
+ ConfigInfo::kCanUseWithTexelBuffer_Flag;
+ }
+ } else {
+ fConfigTable[kAlpha_half_GrPixelConfig].fFormats.fBaseInternalFormat = GR_GL_ALPHA;
+ fConfigTable[kAlpha_half_GrPixelConfig].fFormats.fSizedInternalFormat = GR_GL_ALPHA16F;
+ fConfigTable[kAlpha_half_GrPixelConfig].fFormats.fExternalFormat[kOther_ExternalFormatUsage]
+ = GR_GL_ALPHA;
+ fConfigTable[kAlpha_half_GrPixelConfig].fSwizzle = GrSwizzle::AAAA();
+ }
+ if (kGL_GrGLStandard == ctxInfo.standard() || ctxInfo.version() >= GR_GL_VER(3, 0)) {
+ fConfigTable[kAlpha_half_GrPixelConfig].fFormats.fExternalType = GR_GL_HALF_FLOAT;
+ } else {
+ fConfigTable[kAlpha_half_GrPixelConfig].fFormats.fExternalType = GR_GL_HALF_FLOAT_OES;
+ }
+ fConfigTable[kAlpha_half_GrPixelConfig].fFormatType = kFloat_FormatType;
+ if (hasHalfFPTextures) {
+ fConfigTable[kAlpha_half_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag;
+ // ES requires either 3.2 or the combination of EXT_color_buffer_half_float and support for
+ // GL_RED internal format.
+ if (kGL_GrGLStandard == standard || version >= GR_GL_VER(3,2) ||
+ (this->textureRedSupport() &&
+ ctxInfo.hasExtension("GL_EXT_color_buffer_half_float"))) {
+ fConfigTable[kAlpha_half_GrPixelConfig].fFlags |= fpRenderFlags;
+ }
+ }
+ if (texStorageSupported) {
+ fConfigTable[kAlpha_half_GrPixelConfig].fFlags |= ConfigInfo::kCanUseTexStorage_Flag;
+ }
+
+ fConfigTable[kRGBA_half_GrPixelConfig].fFormats.fBaseInternalFormat = GR_GL_RGBA;
+ fConfigTable[kRGBA_half_GrPixelConfig].fFormats.fSizedInternalFormat = GR_GL_RGBA16F;
+ fConfigTable[kRGBA_half_GrPixelConfig].fFormats.fExternalFormat[kOther_ExternalFormatUsage] =
+ GR_GL_RGBA;
+ if (kGL_GrGLStandard == ctxInfo.standard() || ctxInfo.version() >= GR_GL_VER(3, 0)) {
+ fConfigTable[kRGBA_half_GrPixelConfig].fFormats.fExternalType = GR_GL_HALF_FLOAT;
+ } else {
+ fConfigTable[kRGBA_half_GrPixelConfig].fFormats.fExternalType = GR_GL_HALF_FLOAT_OES;
+ }
+ fConfigTable[kRGBA_half_GrPixelConfig].fFormatType = kFloat_FormatType;
+ if (hasHalfFPTextures) {
+ fConfigTable[kRGBA_half_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag;
+ // ES requires 3.2 or EXT_color_buffer_half_float.
+ if (kGL_GrGLStandard == standard || version >= GR_GL_VER(3,2) ||
+ ctxInfo.hasExtension("GL_EXT_color_buffer_half_float")) {
+ fConfigTable[kRGBA_half_GrPixelConfig].fFlags |= fpRenderFlags;
+ }
+ }
+ if (texStorageSupported) {
+ fConfigTable[kRGBA_half_GrPixelConfig].fFlags |= ConfigInfo::kCanUseTexStorage_Flag;
+ }
+ if (texelBufferSupport) {
+ fConfigTable[kRGBA_half_GrPixelConfig].fFlags |= ConfigInfo::kCanUseWithTexelBuffer_Flag;
+ }
+ fConfigTable[kRGBA_half_GrPixelConfig].fSwizzle = GrSwizzle::RGBA();
+
+ // Compressed texture support
+
+ // glCompressedTexImage2D is available on all OpenGL ES devices. It is available on standard
+ // OpenGL after version 1.3. We'll assume at least that level of OpenGL support.
+
+ // TODO: Fix command buffer bindings and remove this.
+ fCompressedTexSubImageSupport = SkToBool(gli->fFunctions.fCompressedTexSubImage2D);
+
+ // No sized/unsized internal format distinction for compressed formats, no external format.
+ // Below we set the external formats and types to 0.
+
+ fConfigTable[kIndex_8_GrPixelConfig].fFormats.fBaseInternalFormat = GR_GL_PALETTE8_RGBA8;
+ fConfigTable[kIndex_8_GrPixelConfig].fFormats.fSizedInternalFormat = GR_GL_PALETTE8_RGBA8;
+ fConfigTable[kIndex_8_GrPixelConfig].fFormats.fExternalFormat[kOther_ExternalFormatUsage] = 0;
+ fConfigTable[kIndex_8_GrPixelConfig].fFormats.fExternalType = 0;
+ fConfigTable[kIndex_8_GrPixelConfig].fFormatType = kNormalizedFixedPoint_FormatType;
+ // Disable this for now, while we investigate https://bug.skia.org/4333
+ if (false) {
+ // Check for 8-bit palette..
+ GrGLint numFormats;
+ GR_GL_GetIntegerv(gli, GR_GL_NUM_COMPRESSED_TEXTURE_FORMATS, &numFormats);
+ if (numFormats) {
+ SkAutoSTMalloc<10, GrGLint> formats(numFormats);
+ GR_GL_GetIntegerv(gli, GR_GL_COMPRESSED_TEXTURE_FORMATS, formats);
+ for (int i = 0; i < numFormats; ++i) {
+ if (GR_GL_PALETTE8_RGBA8 == formats[i]) {
+ fConfigTable[kIndex_8_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag;
+ break;
+ }
+ }
+ }
+ }
+ fConfigTable[kIndex_8_GrPixelConfig].fSwizzle = GrSwizzle::RGBA();
+
+ // May change the internal format based on extensions.
+ fConfigTable[kLATC_GrPixelConfig].fFormats.fBaseInternalFormat =
+ GR_GL_COMPRESSED_LUMINANCE_LATC1;
+ fConfigTable[kLATC_GrPixelConfig].fFormats.fSizedInternalFormat =
+ GR_GL_COMPRESSED_LUMINANCE_LATC1;
+ if (ctxInfo.hasExtension("GL_EXT_texture_compression_latc") ||
+ ctxInfo.hasExtension("GL_NV_texture_compression_latc")) {
+ fConfigTable[kLATC_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag;
+ } else if ((kGL_GrGLStandard == standard && version >= GR_GL_VER(3, 0)) ||
+ ctxInfo.hasExtension("GL_EXT_texture_compression_rgtc") ||
+ ctxInfo.hasExtension("GL_ARB_texture_compression_rgtc")) {
+ // RGTC is identical and available on OpenGL 3.0+ as well as with extensions
+ fConfigTable[kLATC_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag;
+ fConfigTable[kLATC_GrPixelConfig].fFormats.fBaseInternalFormat =
+ GR_GL_COMPRESSED_RED_RGTC1;
+ fConfigTable[kLATC_GrPixelConfig].fFormats.fSizedInternalFormat =
+ GR_GL_COMPRESSED_RED_RGTC1;
+ } else if (ctxInfo.hasExtension("GL_AMD_compressed_3DC_texture")) {
+ fConfigTable[kLATC_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag;
+ fConfigTable[kLATC_GrPixelConfig].fFormats.fBaseInternalFormat = GR_GL_COMPRESSED_3DC_X;
+ fConfigTable[kLATC_GrPixelConfig].fFormats.fSizedInternalFormat =
+ GR_GL_COMPRESSED_3DC_X;
+
+ }
+ fConfigTable[kLATC_GrPixelConfig].fFormats.fExternalFormat[kOther_ExternalFormatUsage] = 0;
+ fConfigTable[kLATC_GrPixelConfig].fFormats.fExternalType = 0;
+ fConfigTable[kLATC_GrPixelConfig].fFormatType = kNormalizedFixedPoint_FormatType;
+ fConfigTable[kLATC_GrPixelConfig].fSwizzle = GrSwizzle::RRRR();
+
+ fConfigTable[kETC1_GrPixelConfig].fFormats.fBaseInternalFormat = GR_GL_COMPRESSED_ETC1_RGB8;
+ fConfigTable[kETC1_GrPixelConfig].fFormats.fSizedInternalFormat = GR_GL_COMPRESSED_ETC1_RGB8;
+ fConfigTable[kETC1_GrPixelConfig].fFormats.fExternalFormat[kOther_ExternalFormatUsage] = 0;
+ fConfigTable[kETC1_GrPixelConfig].fFormats.fExternalType = 0;
+ fConfigTable[kETC1_GrPixelConfig].fFormatType = kNormalizedFixedPoint_FormatType;
+ if (kGL_GrGLStandard == standard) {
+ if (version >= GR_GL_VER(4, 3) || ctxInfo.hasExtension("GL_ARB_ES3_compatibility")) {
+ fConfigTable[kETC1_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag;
+ }
+ } else {
+ if (version >= GR_GL_VER(3, 0) ||
+ ctxInfo.hasExtension("GL_OES_compressed_ETC1_RGB8_texture") ||
+ // ETC2 is a superset of ETC1, so we can just check for that, too.
+ (ctxInfo.hasExtension("GL_OES_compressed_ETC2_RGB8_texture") &&
+ ctxInfo.hasExtension("GL_OES_compressed_ETC2_RGBA8_texture"))) {
+ fConfigTable[kETC1_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag;
+ }
+ }
+ fConfigTable[kETC1_GrPixelConfig].fSwizzle = GrSwizzle::RGBA();
+
+ fConfigTable[kR11_EAC_GrPixelConfig].fFormats.fBaseInternalFormat = GR_GL_COMPRESSED_R11_EAC;
+ fConfigTable[kR11_EAC_GrPixelConfig].fFormats.fSizedInternalFormat = GR_GL_COMPRESSED_R11_EAC;
+ fConfigTable[kR11_EAC_GrPixelConfig].fFormats.fExternalFormat[kOther_ExternalFormatUsage] = 0;
+ fConfigTable[kR11_EAC_GrPixelConfig].fFormats.fExternalType = 0;
+ fConfigTable[kR11_EAC_GrPixelConfig].fFormatType = kNormalizedFixedPoint_FormatType;
+ // Check for R11_EAC. We don't support R11_EAC on desktop, as most cards default to
+ // decompressing the textures in the driver, and is generally slower.
+ if (kGLES_GrGLStandard == standard && version >= GR_GL_VER(3,0)) {
+ fConfigTable[kR11_EAC_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag;
+ }
+ fConfigTable[kR11_EAC_GrPixelConfig].fSwizzle = GrSwizzle::RRRR();
+
+ fConfigTable[kASTC_12x12_GrPixelConfig].fFormats.fBaseInternalFormat =
+ GR_GL_COMPRESSED_RGBA_ASTC_12x12;
+ fConfigTable[kASTC_12x12_GrPixelConfig].fFormats.fSizedInternalFormat =
+ GR_GL_COMPRESSED_RGBA_ASTC_12x12;
+ fConfigTable[kASTC_12x12_GrPixelConfig].fFormats.fExternalFormat[kOther_ExternalFormatUsage] =
+ 0;
+ fConfigTable[kASTC_12x12_GrPixelConfig].fFormats.fExternalType = 0;
+ fConfigTable[kASTC_12x12_GrPixelConfig].fFormatType = kNormalizedFixedPoint_FormatType;
+ if (ctxInfo.hasExtension("GL_KHR_texture_compression_astc_hdr") ||
+ ctxInfo.hasExtension("GL_KHR_texture_compression_astc_ldr") ||
+ ctxInfo.hasExtension("GL_OES_texture_compression_astc")) {
+ fConfigTable[kASTC_12x12_GrPixelConfig].fFlags = ConfigInfo::kTextureable_Flag;
+ }
+ fConfigTable[kASTC_12x12_GrPixelConfig].fSwizzle = GrSwizzle::RGBA();
+
+ // Bulk populate the texture internal/external formats here and then deal with exceptions below.
+
+ // ES 2.0 requires that the internal/external formats match.
+ bool useSizedTexFormats = (kGL_GrGLStandard == ctxInfo.standard() ||
+ ctxInfo.version() >= GR_GL_VER(3,0));
+ // All ES versions (thus far) require sized internal formats for render buffers.
+ // TODO: Always use sized internal format?
+ bool useSizedRbFormats = kGLES_GrGLStandard == ctxInfo.standard();
+
+ for (int i = 0; i < kGrPixelConfigCnt; ++i) {
+ // Almost always we want to pass fExternalFormat[kOther_ExternalFormatUsage] as the <format>
+ // param to glTex[Sub]Image.
+ fConfigTable[i].fFormats.fExternalFormat[kTexImage_ExternalFormatUsage] =
+ fConfigTable[i].fFormats.fExternalFormat[kOther_ExternalFormatUsage];
+ fConfigTable[i].fFormats.fInternalFormatTexImage = useSizedTexFormats ?
+ fConfigTable[i].fFormats.fSizedInternalFormat :
+ fConfigTable[i].fFormats.fBaseInternalFormat;
+ fConfigTable[i].fFormats.fInternalFormatRenderbuffer = useSizedRbFormats ?
+ fConfigTable[i].fFormats.fSizedInternalFormat :
+ fConfigTable[i].fFormats.fBaseInternalFormat;
+ }
+ // OpenGL ES 2.0 + GL_EXT_sRGB allows GL_SRGB_ALPHA to be specified as the <format>
+ // param to Tex(Sub)Image. ES 2.0 requires the <internalFormat> and <format> params to match.
+ // Thus, on ES 2.0 we will use GL_SRGB_ALPHA as the <format> param.
+ // On OpenGL and ES 3.0+ GL_SRGB_ALPHA does not work for the <format> param to glTexImage.
+ if (ctxInfo.standard() == kGLES_GrGLStandard && ctxInfo.version() == GR_GL_VER(2,0)) {
+ fConfigTable[kSRGBA_8888_GrPixelConfig].fFormats.fExternalFormat[kTexImage_ExternalFormatUsage] =
+ GR_GL_SRGB_ALPHA;
+
+ // Additionally, because we had to "invent" sBGRA, there is no way to make it work
+ // in ES 2.0, because there is no <internalFormat> we can use. So just make that format
+ // unsupported. (If we have no sRGB support at all, this will get overwritten below).
+ fConfigTable[kSBGRA_8888_GrPixelConfig].fFlags = 0;
+ }
+
+ // If BGRA is supported as an internal format it must always be specified to glTex[Sub]Image
+ // as a base format.
+ // GL_EXT_texture_format_BGRA8888:
+ // This extension GL_BGRA as an unsized internal format. However, it is written against ES
+ // 2.0 and therefore doesn't define a value for GL_BGRA8 as ES 2.0 uses unsized internal
+ // formats.
+ // GL_APPLE_texture_format_BGRA8888:
+ // ES 2.0: the extension makes BGRA an external format but not an internal format.
+ // ES 3.0: the extension explicitly states GL_BGRA8 is not a valid internal format for
+ // glTexImage (just for glTexStorage).
+ if (useSizedTexFormats && this->bgraIsInternalFormat()) {
+ fConfigTable[kBGRA_8888_GrPixelConfig].fFormats.fInternalFormatTexImage = GR_GL_BGRA;
+ }
+
+ // If we don't have texture swizzle support then the shader generator must insert the
+ // swizzle into shader code.
+ if (!this->textureSwizzleSupport()) {
+ for (int i = 0; i < kGrPixelConfigCnt; ++i) {
+ glslCaps->fConfigTextureSwizzle[i] = fConfigTable[i].fSwizzle;
+ }
+ }
+
+ // Shader output swizzles will default to RGBA. When we've use GL_RED instead of GL_ALPHA to
+ // implement kAlpha_8_GrPixelConfig we need to swizzle the shader outputs so the alpha channel
+ // gets written to the single component.
+ if (this->textureRedSupport()) {
+ for (int i = 0; i < kGrPixelConfigCnt; ++i) {
+ GrPixelConfig config = static_cast<GrPixelConfig>(i);
+ if (GrPixelConfigIsAlphaOnly(config) &&
+ fConfigTable[i].fFormats.fBaseInternalFormat == GR_GL_RED) {
+ glslCaps->fConfigOutputSwizzle[i] = GrSwizzle::AAAA();
+ }
+ }
+ }
+
+#ifdef SK_DEBUG
+ // Make sure we initialized everything.
+ ConfigInfo defaultEntry;
+ for (int i = 0; i < kGrPixelConfigCnt; ++i) {
+ SkASSERT(defaultEntry.fFormats.fBaseInternalFormat !=
+ fConfigTable[i].fFormats.fBaseInternalFormat);
+ SkASSERT(defaultEntry.fFormats.fSizedInternalFormat !=
+ fConfigTable[i].fFormats.fSizedInternalFormat);
+ for (int j = 0; j < kExternalFormatUsageCnt; ++j) {
+ SkASSERT(defaultEntry.fFormats.fExternalFormat[j] !=
+ fConfigTable[i].fFormats.fExternalFormat[j]);
+ }
+ SkASSERT(defaultEntry.fFormats.fExternalType != fConfigTable[i].fFormats.fExternalType);
+ }
+#endif
+}
+
+void GrGLCaps::onApplyOptionsOverrides(const GrContextOptions& options) {
+ if (options.fEnableInstancedRendering) {
+ fInstancedSupport = gr_instanced::GLInstancedRendering::CheckSupport(*this);
+#ifndef SK_BUILD_FOR_MAC
+ // OS X doesn't seem to write correctly to floating point textures when using
+ // glDraw*Indirect, regardless of the underlying GPU.
+ fAvoidInstancedDrawsToFPTargets = true;
+#endif
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLCaps.h b/gfx/skia/skia/src/gpu/gl/GrGLCaps.h
new file mode 100644
index 000000000..f0b09407a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLCaps.h
@@ -0,0 +1,498 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGLCaps_DEFINED
+#define GrGLCaps_DEFINED
+
+#include <functional>
+
+#include "glsl/GrGLSL.h"
+#include "GrCaps.h"
+#include "GrGLStencilAttachment.h"
+#include "GrSwizzle.h"
+#include "SkChecksum.h"
+#include "SkTHash.h"
+#include "SkTArray.h"
+
+class GrGLContextInfo;
+class GrGLSLCaps;
+class GrGLRenderTarget;
+
+/**
+ * Stores some capabilities of a GL context. Most are determined by the GL
+ * version and the extensions string. It also tracks formats that have passed
+ * the FBO completeness test.
+ */
+class GrGLCaps : public GrCaps {
+public:
+ typedef GrGLStencilAttachment::Format StencilFormat;
+
+ /**
+ * The type of MSAA for FBOs supported. Different extensions have different
+ * semantics of how / when a resolve is performed.
+ */
+ enum MSFBOType {
+ /**
+ * no support for MSAA FBOs
+ */
+ kNone_MSFBOType = 0,
+ /**
+ * GL3.0-style MSAA FBO (GL_ARB_framebuffer_object).
+ */
+ kDesktop_ARB_MSFBOType,
+ /**
+ * earlier GL_EXT_framebuffer* extensions
+ */
+ kDesktop_EXT_MSFBOType,
+ /**
+ * Similar to kDesktop_ARB but with additional restrictions on glBlitFramebuffer.
+ */
+ kES_3_0_MSFBOType,
+ /**
+ * GL_APPLE_framebuffer_multisample ES extension
+ */
+ kES_Apple_MSFBOType,
+ /**
+ * GL_IMG_multisampled_render_to_texture. This variation does not have MSAA renderbuffers.
+ * Instead the texture is multisampled when bound to the FBO and then resolved automatically
+ * when read. It also defines an alternate value for GL_MAX_SAMPLES (which we call
+ * GR_GL_MAX_SAMPLES_IMG).
+ */
+ kES_IMG_MsToTexture_MSFBOType,
+ /**
+ * GL_EXT_multisampled_render_to_texture. Same as the IMG one above but uses the standard
+ * GL_MAX_SAMPLES value.
+ */
+ kES_EXT_MsToTexture_MSFBOType,
+ /**
+ * GL_NV_framebuffer_mixed_samples.
+ */
+ kMixedSamples_MSFBOType,
+
+ kLast_MSFBOType = kMixedSamples_MSFBOType
+ };
+
+ enum BlitFramebufferSupport {
+ kNone_BlitFramebufferSupport,
+ /**
+ * ANGLE exposes a limited blit framebuffer extension that does not allow for stretching
+ * or mirroring.
+ */
+ kNoScalingNoMirroring_BlitFramebufferSupport,
+ kFull_BlitFramebufferSupport
+ };
+
+ enum InvalidateFBType {
+ kNone_InvalidateFBType,
+ kDiscard_InvalidateFBType, //<! glDiscardFramebuffer()
+ kInvalidate_InvalidateFBType, //<! glInvalidateFramebuffer()
+
+ kLast_InvalidateFBType = kInvalidate_InvalidateFBType
+ };
+
+ enum MapBufferType {
+ kNone_MapBufferType,
+ kMapBuffer_MapBufferType, // glMapBuffer()
+ kMapBufferRange_MapBufferType, // glMapBufferRange()
+ kChromium_MapBufferType, // GL_CHROMIUM_map_sub
+
+ kLast_MapBufferType = kChromium_MapBufferType,
+ };
+
+ enum TransferBufferType {
+ kNone_TransferBufferType,
+ kPBO_TransferBufferType, // ARB_pixel_buffer_object
+ kChromium_TransferBufferType, // CHROMIUM_pixel_transfer_buffer_object
+
+ kLast_TransferBufferType = kChromium_TransferBufferType,
+ };
+
+ /**
+ * Initializes the GrGLCaps to the set of features supported in the current
+ * OpenGL context accessible via ctxInfo.
+ */
+ GrGLCaps(const GrContextOptions& contextOptions, const GrGLContextInfo& ctxInfo,
+ const GrGLInterface* glInterface);
+
+ bool isConfigTexturable(GrPixelConfig config) const override {
+ return SkToBool(fConfigTable[config].fFlags & ConfigInfo::kTextureable_Flag);
+ }
+
+ bool isConfigRenderable(GrPixelConfig config, bool withMSAA) const override {
+ if (withMSAA) {
+ return SkToBool(fConfigTable[config].fFlags & ConfigInfo::kRenderableWithMSAA_Flag);
+ } else {
+ return SkToBool(fConfigTable[config].fFlags & ConfigInfo::kRenderable_Flag);
+ }
+ }
+
+ bool isConfigTexSupportEnabled(GrPixelConfig config) const {
+ return SkToBool(fConfigTable[config].fFlags & ConfigInfo::kCanUseTexStorage_Flag);
+ }
+
+ bool canUseConfigWithTexelBuffer(GrPixelConfig config) const {
+ return SkToBool(fConfigTable[config].fFlags & ConfigInfo::kCanUseWithTexelBuffer_Flag);
+ }
+
+ /** Returns the mapping between GrPixelConfig components and GL internal format components. */
+ const GrSwizzle& configSwizzle(GrPixelConfig config) const {
+ return fConfigTable[config].fSwizzle;
+ }
+
+ GrGLenum configSizedInternalFormat(GrPixelConfig config) const {
+ return fConfigTable[config].fFormats.fSizedInternalFormat;
+ }
+
+ bool getTexImageFormats(GrPixelConfig surfaceConfig, GrPixelConfig externalConfig,
+ GrGLenum* internalFormat, GrGLenum* externalFormat,
+ GrGLenum* externalType) const;
+
+ bool getCompressedTexImageFormats(GrPixelConfig surfaceConfig, GrGLenum* internalFormat) const;
+
+ bool getReadPixelsFormat(GrPixelConfig surfaceConfig, GrPixelConfig externalConfig,
+ GrGLenum* externalFormat, GrGLenum* externalType) const;
+
+ bool getRenderbufferFormat(GrPixelConfig config, GrGLenum* internalFormat) const;
+
+ /**
+ * Gets an array of legal stencil formats. These formats are not guaranteed
+ * to be supported by the driver but are legal GLenum names given the GL
+ * version and extensions supported.
+ */
+ const SkTArray<StencilFormat, true>& stencilFormats() const {
+ return fStencilFormats;
+ }
+
+ /**
+ * Has a stencil format index been found for the config (or we've found that no format works).
+ */
+ bool hasStencilFormatBeenDeterminedForConfig(GrPixelConfig config) const {
+ return fConfigTable[config].fStencilFormatIndex != ConfigInfo::kUnknown_StencilIndex;
+ }
+
+ /**
+ * Gets the stencil format index for the config. This assumes
+ * hasStencilFormatBeenDeterminedForConfig has already been checked. Returns a value < 0 if
+ * no stencil format is supported with the config. Otherwise, returned index refers to the array
+ * returned by stencilFormats().
+ */
+ int getStencilFormatIndexForConfig(GrPixelConfig config) const {
+ SkASSERT(this->hasStencilFormatBeenDeterminedForConfig(config));
+ return fConfigTable[config].fStencilFormatIndex;
+ }
+
+ /**
+ * If index is >= 0 this records an index into stencilFormats() as the best stencil format for
+ * the config. If < 0 it records that the config has no supported stencil format index.
+ */
+ void setStencilFormatIndexForConfig(GrPixelConfig config, int index) {
+ SkASSERT(!this->hasStencilFormatBeenDeterminedForConfig(config));
+ if (index < 0) {
+ fConfigTable[config].fStencilFormatIndex = ConfigInfo::kUnsupported_StencilFormatIndex;
+ } else {
+ fConfigTable[config].fStencilFormatIndex = index;
+ }
+ }
+
+ /**
+ * Call to note that a color config has been verified as a valid color
+ * attachment. This may save future calls to glCheckFramebufferStatus
+ * using isConfigVerifiedColorAttachment().
+ */
+ void markConfigAsValidColorAttachment(GrPixelConfig config) {
+ fConfigTable[config].fFlags |= ConfigInfo::kVerifiedColorAttachment_Flag;
+ }
+
+ /**
+ * Call to check whether a config has been verified as a valid color
+ * attachment.
+ */
+ bool isConfigVerifiedColorAttachment(GrPixelConfig config) const {
+ return SkToBool(fConfigTable[config].fFlags & ConfigInfo::kVerifiedColorAttachment_Flag);
+ }
+
+ /**
+ * Reports the type of MSAA FBO support.
+ */
+ MSFBOType msFBOType() const { return fMSFBOType; }
+
+ /**
+ * Does the preferred MSAA FBO extension have MSAA renderbuffers?
+ */
+ bool usesMSAARenderBuffers() const {
+ return kNone_MSFBOType != fMSFBOType &&
+ kES_IMG_MsToTexture_MSFBOType != fMSFBOType &&
+ kES_EXT_MsToTexture_MSFBOType != fMSFBOType &&
+ kMixedSamples_MSFBOType != fMSFBOType;
+ }
+
+ /**
+ * What functionality is supported by glBlitFramebuffer.
+ */
+ BlitFramebufferSupport blitFramebufferSupport() const { return fBlitFramebufferSupport; }
+
+ /**
+ * Is the MSAA FBO extension one where the texture is multisampled when bound to an FBO and
+ * then implicitly resolved when read.
+ */
+ bool usesImplicitMSAAResolve() const {
+ return kES_IMG_MsToTexture_MSFBOType == fMSFBOType ||
+ kES_EXT_MsToTexture_MSFBOType == fMSFBOType;
+ }
+
+ InvalidateFBType invalidateFBType() const { return fInvalidateFBType; }
+
+ /// What type of buffer mapping is supported?
+ MapBufferType mapBufferType() const { return fMapBufferType; }
+
+ /// What type of transfer buffer is supported?
+ TransferBufferType transferBufferType() const { return fTransferBufferType; }
+
+ /// The maximum number of fragment uniform vectors (GLES has min. 16).
+ int maxFragmentUniformVectors() const { return fMaxFragmentUniformVectors; }
+
+ /**
+ * Depending on the ES extensions present the BGRA external format may
+ * correspond to either a BGRA or RGBA internalFormat. On desktop GL it is
+ * RGBA.
+ */
+ bool bgraIsInternalFormat() const;
+
+ /// Is there support for GL_UNPACK_ROW_LENGTH
+ bool unpackRowLengthSupport() const { return fUnpackRowLengthSupport; }
+
+ /// Is there support for GL_UNPACK_FLIP_Y
+ bool unpackFlipYSupport() const { return fUnpackFlipYSupport; }
+
+ /// Is there support for GL_PACK_ROW_LENGTH
+ bool packRowLengthSupport() const { return fPackRowLengthSupport; }
+
+ /// Is there support for GL_PACK_REVERSE_ROW_ORDER
+ bool packFlipYSupport() const { return fPackFlipYSupport; }
+
+ /// Is there support for texture parameter GL_TEXTURE_USAGE
+ bool textureUsageSupport() const { return fTextureUsageSupport; }
+
+ /// Is there support for GL_RED and GL_R8
+ bool textureRedSupport() const { return fTextureRedSupport; }
+
+ /// Is GL_ARB_IMAGING supported
+ bool imagingSupport() const { return fImagingSupport; }
+
+ /// Is there support for Vertex Array Objects?
+ bool vertexArrayObjectSupport() const { return fVertexArrayObjectSupport; }
+
+ /// Is there support for GL_EXT_direct_state_access?
+ bool directStateAccessSupport() const { return fDirectStateAccessSupport; }
+
+ /// Is there support for GL_KHR_debug?
+ bool debugSupport() const { return fDebugSupport; }
+
+ /// Is there support for ES2 compatability?
+ bool ES2CompatibilitySupport() const { return fES2CompatibilitySupport; }
+
+ /// Is there support for glDraw*Instanced?
+ bool drawInstancedSupport() const { return fDrawInstancedSupport; }
+
+ /// Is there support for glDraw*Indirect? Note that the baseInstance fields of indirect draw
+ /// commands cannot be used unless we have base instance support.
+ bool drawIndirectSupport() const { return fDrawIndirectSupport; }
+
+ /// Is there support for glMultiDraw*Indirect? Note that the baseInstance fields of indirect
+ /// draw commands cannot be used unless we have base instance support.
+ bool multiDrawIndirectSupport() const { return fMultiDrawIndirectSupport; }
+
+ /// Is there support for glDrawRangeElements?
+ bool drawRangeElementsSupport() const { return fDrawRangeElementsSupport; }
+
+ /// Are the baseInstance fields supported in indirect draw commands?
+ bool baseInstanceSupport() const { return fBaseInstanceSupport; }
+
+ /// Use indices or vertices in CPU arrays rather than VBOs for dynamic content.
+ bool useNonVBOVertexAndIndexDynamicData() const { return fUseNonVBOVertexAndIndexDynamicData; }
+
+ /// Does ReadPixels support reading readConfig pixels from a FBO that is renderTargetConfig?
+ bool readPixelsSupported(GrPixelConfig renderTargetConfig,
+ GrPixelConfig readConfig,
+ std::function<void (GrGLenum, GrGLint*)> getIntegerv,
+ std::function<bool ()> bindRenderTarget) const;
+
+ bool isCoreProfile() const { return fIsCoreProfile; }
+
+ bool bindFragDataLocationSupport() const { return fBindFragDataLocationSupport; }
+
+ bool bindUniformLocationSupport() const { return fBindUniformLocationSupport; }
+
+ /// Are textures with GL_TEXTURE_RECTANGLE type supported.
+ bool rectangleTextureSupport() const { return fRectangleTextureSupport; }
+
+ /// GL_ARB_texture_swizzle
+ bool textureSwizzleSupport() const { return fTextureSwizzleSupport; }
+
+ bool mipMapLevelAndLodControlSupport() const { return fMipMapLevelAndLodControlSupport; }
+
+ bool doManualMipmapping() const { return fDoManualMipmapping; }
+
+ /**
+ * Returns a string containing the caps info.
+ */
+ SkString dump() const override;
+
+ bool rgba8888PixelsOpsAreSlow() const { return fRGBA8888PixelsOpsAreSlow; }
+ bool partialFBOReadIsSlow() const { return fPartialFBOReadIsSlow; }
+ bool rgbaToBgraReadbackConversionsAreSlow() const {
+ return fRGBAToBGRAReadbackConversionsAreSlow;
+ }
+
+ const GrGLSLCaps* glslCaps() const { return reinterpret_cast<GrGLSLCaps*>(fShaderCaps.get()); }
+
+private:
+ enum ExternalFormatUsage {
+ kTexImage_ExternalFormatUsage,
+ kOther_ExternalFormatUsage,
+
+ kLast_ExternalFormatUsage = kOther_ExternalFormatUsage
+ };
+ static const int kExternalFormatUsageCnt = kLast_ExternalFormatUsage + 1;
+ bool getExternalFormat(GrPixelConfig surfaceConfig, GrPixelConfig memoryConfig,
+ ExternalFormatUsage usage, GrGLenum* externalFormat,
+ GrGLenum* externalType) const;
+
+ void init(const GrContextOptions&, const GrGLContextInfo&, const GrGLInterface*);
+ void initGLSL(const GrGLContextInfo&);
+ bool hasPathRenderingSupport(const GrGLContextInfo&, const GrGLInterface*);
+
+ void onApplyOptionsOverrides(const GrContextOptions& options) override;
+
+ void initFSAASupport(const GrGLContextInfo&, const GrGLInterface*);
+ void initBlendEqationSupport(const GrGLContextInfo&);
+ void initStencilFormats(const GrGLContextInfo&);
+ // This must be called after initFSAASupport().
+ void initConfigTable(const GrGLContextInfo&, const GrGLInterface* gli, GrGLSLCaps* glslCaps);
+
+ void initShaderPrecisionTable(const GrGLContextInfo& ctxInfo,
+ const GrGLInterface* intf,
+ GrGLSLCaps* glslCaps);
+
+ GrGLStandard fStandard;
+
+ SkTArray<StencilFormat, true> fStencilFormats;
+
+ int fMaxFragmentUniformVectors;
+
+ MSFBOType fMSFBOType;
+ InvalidateFBType fInvalidateFBType;
+ MapBufferType fMapBufferType;
+ TransferBufferType fTransferBufferType;
+
+ bool fUnpackRowLengthSupport : 1;
+ bool fUnpackFlipYSupport : 1;
+ bool fPackRowLengthSupport : 1;
+ bool fPackFlipYSupport : 1;
+ bool fTextureUsageSupport : 1;
+ bool fTextureRedSupport : 1;
+ bool fImagingSupport : 1;
+ bool fVertexArrayObjectSupport : 1;
+ bool fDirectStateAccessSupport : 1;
+ bool fDebugSupport : 1;
+ bool fES2CompatibilitySupport : 1;
+ bool fDrawInstancedSupport : 1;
+ bool fDrawIndirectSupport : 1;
+ bool fDrawRangeElementsSupport : 1;
+ bool fMultiDrawIndirectSupport : 1;
+ bool fBaseInstanceSupport : 1;
+ bool fUseNonVBOVertexAndIndexDynamicData : 1;
+ bool fIsCoreProfile : 1;
+ bool fBindFragDataLocationSupport : 1;
+ bool fRGBA8888PixelsOpsAreSlow : 1;
+ bool fPartialFBOReadIsSlow : 1;
+ bool fBindUniformLocationSupport : 1;
+ bool fRectangleTextureSupport : 1;
+ bool fTextureSwizzleSupport : 1;
+ bool fMipMapLevelAndLodControlSupport : 1;
+ bool fRGBAToBGRAReadbackConversionsAreSlow : 1;
+ bool fDoManualMipmapping : 1;
+
+ BlitFramebufferSupport fBlitFramebufferSupport;
+
+ /** Number type of the components (with out considering number of bits.) */
+ enum FormatType {
+ kNormalizedFixedPoint_FormatType,
+ kFloat_FormatType,
+ };
+
+ struct ReadPixelsFormat {
+ ReadPixelsFormat() : fFormat(0), fType(0) {}
+ GrGLenum fFormat;
+ GrGLenum fType;
+ };
+
+ struct ConfigFormats {
+ ConfigFormats() {
+ // Inits to known bad GL enum values.
+ memset(this, 0xAB, sizeof(ConfigFormats));
+ }
+ GrGLenum fBaseInternalFormat;
+ GrGLenum fSizedInternalFormat;
+
+ /** The external format and type are to be used when uploading/downloading data using this
+ config where both the CPU data and GrSurface are the same config. To get the external
+ format and type when converting between configs while copying to/from memory use
+ getExternalFormat().
+ The kTexImage external format is usually the same as kOther except for kSRGBA on some
+ GL contexts. */
+ GrGLenum fExternalFormat[kExternalFormatUsageCnt];
+ GrGLenum fExternalType;
+
+
+ // Either the base or sized internal format depending on the GL and config.
+ GrGLenum fInternalFormatTexImage;
+ GrGLenum fInternalFormatRenderbuffer;
+ };
+
+ struct ConfigInfo {
+ ConfigInfo() : fStencilFormatIndex(kUnknown_StencilIndex), fFlags(0) {}
+
+ ConfigFormats fFormats;
+
+ FormatType fFormatType;
+
+ // On ES contexts there are restrictions on type type/format that may be used for
+ // ReadPixels. One is implicitly specified by the current FBO's format. The other is
+ // queryable. This stores the queried option (lazily).
+ ReadPixelsFormat fSecondReadPixelsFormat;
+
+ enum {
+ // This indicates that a stencil format has not yet been determined for the config.
+ kUnknown_StencilIndex = -1,
+ // This indicates that there is no supported stencil format for the config.
+ kUnsupported_StencilFormatIndex = -2
+ };
+
+ // Index fStencilFormats.
+ int fStencilFormatIndex;
+
+ enum {
+ kVerifiedColorAttachment_Flag = 0x1,
+ kTextureable_Flag = 0x2,
+ kRenderable_Flag = 0x4,
+ kRenderableWithMSAA_Flag = 0x8,
+ kCanUseTexStorage_Flag = 0x10,
+ kCanUseWithTexelBuffer_Flag = 0x20,
+ };
+ uint32_t fFlags;
+
+ GrSwizzle fSwizzle;
+ };
+
+ ConfigInfo fConfigTable[kGrPixelConfigCnt];
+
+ typedef GrCaps INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLContext.cpp b/gfx/skia/skia/src/gpu/gl/GrGLContext.cpp
new file mode 100644
index 000000000..9e70b472c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLContext.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLContext.h"
+#include "GrGLGLSL.h"
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrGLContext* GrGLContext::Create(const GrGLInterface* interface, const GrContextOptions& options) {
+ // We haven't validated the GrGLInterface yet, so check for GetString function pointer
+ if (!interface->fFunctions.fGetString) {
+ return nullptr;
+ }
+ ConstructorArgs args;
+ args.fInterface = interface;
+
+ const GrGLubyte* verUByte;
+ GR_GL_CALL_RET(interface, verUByte, GetString(GR_GL_VERSION));
+ const char* ver = reinterpret_cast<const char*>(verUByte);
+
+ const GrGLubyte* rendererUByte;
+ GR_GL_CALL_RET(interface, rendererUByte, GetString(GR_GL_RENDERER));
+ const char* renderer = reinterpret_cast<const char*>(rendererUByte);
+
+ if (!interface->validate()) {
+ return nullptr;
+ }
+
+ args.fGLVersion = GrGLGetVersionFromString(ver);
+ if (GR_GL_INVALID_VER == args.fGLVersion) {
+ return nullptr;
+ }
+
+ if (!GrGLGetGLSLGeneration(interface, &args.fGLSLGeneration)) {
+ return nullptr;
+ }
+
+ args.fVendor = GrGLGetVendor(interface);
+
+ args.fRenderer = GrGLGetRendererFromString(renderer);
+
+ /*
+ * Qualcomm drivers for the 3xx series have a horrendous bug with some drivers. Though they
+ * claim to support GLES 3.00, some perfectly valid GLSL300 shaders will only compile with
+ * #version 100, and will fail to compile with #version 300 es. In the long term, we
+ * need to lock this down to a specific driver version.
+ * ?????/2015 - This bug is still present in Lollipop pre-mr1
+ * 06/18/2015 - This bug does not affect the nexus 6 (which has an Adreno 4xx).
+ */
+ if (kAdreno3xx_GrGLRenderer == args.fRenderer) {
+ args.fGLSLGeneration = k110_GrGLSLGeneration;
+ }
+
+ GrGLGetDriverInfo(interface->fStandard, args.fVendor, renderer, ver,
+ &args.fDriver, &args.fDriverVersion);
+
+ args.fContextOptions = &options;
+
+ return new GrGLContext(args);
+}
+
+GrGLContextInfo::GrGLContextInfo(const ConstructorArgs& args) {
+ fInterface.reset(SkRef(args.fInterface));
+ fGLVersion = args.fGLVersion;
+ fGLSLGeneration = args.fGLSLGeneration;
+ fVendor = args.fVendor;
+ fRenderer = args.fRenderer;
+ fDriver = args.fDriver;
+ fDriverVersion = args.fDriverVersion;
+
+ fGLCaps.reset(new GrGLCaps(*args.fContextOptions, *this, fInterface));
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLContext.h b/gfx/skia/skia/src/gpu/gl/GrGLContext.h
new file mode 100644
index 000000000..6016f6859
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLContext.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGLContext_DEFINED
+#define GrGLContext_DEFINED
+
+#include "gl/GrGLExtensions.h"
+#include "gl/GrGLInterface.h"
+#include "GrGLCaps.h"
+#include "GrGLUtil.h"
+
+struct GrContextOptions;
+
+/**
+ * Encapsulates information about an OpenGL context including the OpenGL
+ * version, the GrGLStandard type of the context, and GLSL version.
+ */
+class GrGLContextInfo : public SkRefCnt {
+public:
+ GrGLStandard standard() const { return fInterface->fStandard; }
+ GrGLVersion version() const { return fGLVersion; }
+ GrGLSLGeneration glslGeneration() const { return fGLSLGeneration; }
+ GrGLVendor vendor() const { return fVendor; }
+ GrGLRenderer renderer() const { return fRenderer; }
+ /** What driver is running our GL implementation? This is not necessarily related to the vendor.
+ (e.g. Intel GPU being driven by Mesa) */
+ GrGLDriver driver() const { return fDriver; }
+ GrGLDriverVersion driverVersion() const { return fDriverVersion; }
+ const GrGLCaps* caps() const { return fGLCaps.get(); }
+ GrGLCaps* caps() { return fGLCaps; }
+ bool hasExtension(const char* ext) const {
+ return fInterface->hasExtension(ext);
+ }
+
+ const GrGLExtensions& extensions() const { return fInterface->fExtensions; }
+
+protected:
+ struct ConstructorArgs {
+ const GrGLInterface* fInterface;
+ GrGLVersion fGLVersion;
+ GrGLSLGeneration fGLSLGeneration;
+ GrGLVendor fVendor;
+ GrGLRenderer fRenderer;
+ GrGLDriver fDriver;
+ GrGLDriverVersion fDriverVersion;
+ const GrContextOptions* fContextOptions;
+ };
+
+ GrGLContextInfo(const ConstructorArgs& args);
+
+ SkAutoTUnref<const GrGLInterface> fInterface;
+ GrGLVersion fGLVersion;
+ GrGLSLGeneration fGLSLGeneration;
+ GrGLVendor fVendor;
+ GrGLRenderer fRenderer;
+ GrGLDriver fDriver;
+ GrGLDriverVersion fDriverVersion;
+ SkAutoTUnref<GrGLCaps> fGLCaps;
+};
+
+/**
+ * Extension of GrGLContextInfo that also provides access to GrGLInterface.
+ */
+class GrGLContext : public GrGLContextInfo {
+public:
+ /**
+ * Creates a GrGLContext from a GrGLInterface and the currently
+ * bound OpenGL context accessible by the GrGLInterface.
+ */
+ static GrGLContext* Create(const GrGLInterface* interface, const GrContextOptions& options);
+
+ const GrGLInterface* interface() const { return fInterface; }
+
+private:
+ GrGLContext(const ConstructorArgs& args) : INHERITED(args) {}
+
+ typedef GrGLContextInfo INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLCreateNativeInterface_none.cpp b/gfx/skia/skia/src/gpu/gl/GrGLCreateNativeInterface_none.cpp
new file mode 100644
index 000000000..0a0f279f1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLCreateNativeInterface_none.cpp
@@ -0,0 +1,12 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "gl/GrGLInterface.h"
+
+const GrGLInterface* GrGLCreateNativeInterface() {
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLCreateNullInterface.cpp b/gfx/skia/skia/src/gpu/gl/GrGLCreateNullInterface.cpp
new file mode 100644
index 000000000..61f638b68
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLCreateNullInterface.cpp
@@ -0,0 +1,822 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrNonAtomicRef.h"
+#include "gl/GrGLInterface.h"
+#include "GrGLTestInterface.h"
+#include "SkMutex.h"
+#include "SkTDArray.h"
+#include <type_traits>
+
+// added to suppress 'no previous prototype' warning and because this code is duplicated in
+// SkNullGLContext.cpp
+namespace {
+
+class GLObject : public GrNonAtomicRef<GLObject> {
+public:
+ GLObject(GrGLuint id) : fID(id) {}
+ virtual ~GLObject() {}
+
+ GrGLuint id() const { return fID; }
+
+private:
+ GrGLuint fID;
+};
+
+// This class maintains a sparsely populated array of object pointers.
+template<typename T> class TGLObjectManager {
+ static_assert(std::is_convertible<T*, GLObject*>::value, "T must be a subclass of GLObject");
+
+public:
+ TGLObjectManager() : fFreeListHead(kFreeListEnd) {
+ *fGLObjects.append() = nullptr; // 0 is not a valid GL object id.
+ }
+
+ ~TGLObjectManager() {
+ // nullptr out the entries that are really free list links rather than ptrs before deleting.
+ intptr_t curr = fFreeListHead;
+ while (kFreeListEnd != curr) {
+ intptr_t next = reinterpret_cast<intptr_t>(fGLObjects[SkToS32(curr)]);
+ fGLObjects[SkToS32(curr)] = nullptr;
+ curr = next;
+ }
+
+ fGLObjects.safeUnrefAll();
+ }
+
+ T* lookUp(GrGLuint id) {
+ T* object = fGLObjects[id];
+ SkASSERT(object && object->id() == id);
+ return object;
+ }
+
+ T* create() {
+ GrGLuint id;
+ T* object;
+
+ if (kFreeListEnd == fFreeListHead) {
+ // no free slots - create a new one
+ id = fGLObjects.count();
+ object = new T(id);
+ *fGLObjects.append() = object;
+ } else {
+ // grab the head of the free list and advance the head to the next free slot.
+ id = static_cast<GrGLuint>(fFreeListHead);
+ fFreeListHead = reinterpret_cast<intptr_t>(fGLObjects[id]);
+
+ object = new T(id);
+ fGLObjects[id] = object;
+ }
+
+ return object;
+ }
+
+ void free(T* object) {
+ SkASSERT(object);
+ SkASSERT(fGLObjects.count() > 0);
+
+ GrGLuint id = object->id();
+ object->unref();
+
+ fGLObjects[id] = reinterpret_cast<T*>(fFreeListHead);
+ fFreeListHead = id;
+ }
+
+private:
+ static const intptr_t kFreeListEnd = -1;
+ // Index of the first entry of fGLObjects in the free list. Free slots in fGLObjects are indices
+ // to the next free slot. The last free slot has a value of kFreeListEnd.
+ intptr_t fFreeListHead;
+ SkTDArray<T*> fGLObjects;
+};
+
+class Buffer : public GLObject {
+public:
+ Buffer(GrGLuint id) : INHERITED(id), fDataPtr(nullptr), fSize(0), fMapped(false) {}
+ ~Buffer() { delete[] fDataPtr; }
+
+ void allocate(GrGLsizeiptr size, const GrGLchar* dataPtr) {
+ if (fDataPtr) {
+ SkASSERT(0 != fSize);
+ delete[] fDataPtr;
+ }
+
+ fSize = size;
+ fDataPtr = new char[size];
+ }
+
+ GrGLchar* dataPtr() { return fDataPtr; }
+ GrGLsizeiptr size() const { return fSize; }
+
+ void setMapped(bool mapped) { fMapped = mapped; }
+ bool mapped() const { return fMapped; }
+
+private:
+ GrGLchar* fDataPtr;
+ GrGLsizeiptr fSize; // size in bytes
+ bool fMapped;
+
+ typedef GLObject INHERITED;
+};
+
+class FramebufferAttachment : public GLObject {
+public:
+ int numSamples() const { return fNumSamples; }
+
+protected:
+ FramebufferAttachment(int id) : INHERITED(id), fNumSamples(1) {}
+
+ int fNumSamples;
+
+ typedef GLObject INHERITED;
+};
+
+class Renderbuffer : public FramebufferAttachment {
+public:
+ Renderbuffer(int id) : INHERITED(id) {}
+ void setNumSamples(int numSamples) { fNumSamples = numSamples; }
+
+private:
+ typedef FramebufferAttachment INHERITED;
+};
+
+class Texture : public FramebufferAttachment {
+public:
+ Texture() : INHERITED(1) {}
+
+private:
+ typedef FramebufferAttachment INHERITED;
+};
+
+class Framebuffer : public GLObject {
+public:
+ Framebuffer(int id) : INHERITED(id) {}
+
+ void setAttachment(GrGLenum attachmentPoint, const FramebufferAttachment* attachment) {
+ switch (attachmentPoint) {
+ default:
+ SK_ABORT("Invalid framebuffer attachment.");
+ break;
+ case GR_GL_STENCIL_ATTACHMENT:
+ fAttachments[(int)AttachmentPoint::kStencil].reset(SkRef(attachment));
+ break;
+ case GR_GL_DEPTH_ATTACHMENT:
+ fAttachments[(int)AttachmentPoint::kDepth].reset(SkRef(attachment));
+ break;
+ case GR_GL_COLOR_ATTACHMENT0:
+ fAttachments[(int)AttachmentPoint::kColor].reset(SkRef(attachment));
+ break;
+ }
+ }
+
+ void notifyAttachmentDeleteWhileBound(const FramebufferAttachment* deleted) {
+ for (auto& attachment : fAttachments) {
+ if (attachment == deleted) {
+ attachment.reset(nullptr);
+ }
+ }
+ }
+
+ int numSamples() const {
+ int numSamples = 0;
+ for (auto& attachment : fAttachments) {
+ if (!attachment) {
+ continue;
+ }
+ if (numSamples) {
+ GrAlwaysAssert(attachment->numSamples() == numSamples);
+ continue;
+ }
+ numSamples = attachment->numSamples();
+ }
+ GrAlwaysAssert(numSamples);
+ return numSamples;
+ }
+
+private:
+ enum AttachmentPoint {
+ kStencil,
+ kDepth,
+ kColor
+ };
+ constexpr int static kNumAttachmentPoints = 1 + (int)AttachmentPoint::kColor;
+
+ SkAutoTUnref<const FramebufferAttachment> fAttachments[kNumAttachmentPoints];
+
+ typedef GLObject INHERITED;
+};
+
+/** Null interface implementation */
+class NullInterface : public GrGLTestInterface {
+public:
+ NullInterface(bool enableNVPR)
+ : fCurrDrawFramebuffer(0)
+ , fCurrReadFramebuffer(0)
+ , fCurrRenderbuffer(0)
+ , fCurrProgramID(0)
+ , fCurrShaderID(0)
+ , fCurrGenericID(0)
+ , fCurrUniformLocation(0)
+ , fCurrPathID(0) {
+ memset(fBoundBuffers, 0, sizeof(fBoundBuffers));
+ fExtensions.push_back("GL_ARB_framebuffer_object");
+ fExtensions.push_back("GL_ARB_blend_func_extended");
+ fExtensions.push_back("GL_ARB_timer_query");
+ fExtensions.push_back("GL_ARB_draw_buffers");
+ fExtensions.push_back("GL_ARB_occlusion_query");
+ fExtensions.push_back("GL_EXT_stencil_wrap");
+ if (enableNVPR) {
+ fExtensions.push_back("GL_NV_path_rendering");
+ fExtensions.push_back("GL_ARB_program_interface_query");
+ }
+ fExtensions.push_back(nullptr);
+
+ this->init(kGL_GrGLStandard);
+ }
+
+ GrGLenum checkFramebufferStatus(GrGLenum target) override {
+ return GR_GL_FRAMEBUFFER_COMPLETE;
+ }
+
+ GrGLvoid genBuffers(GrGLsizei n, GrGLuint* ids) override {
+ for (int i = 0; i < n; ++i) {
+ Buffer* buffer = fBufferManager.create();
+ ids[i] = buffer->id();
+ }
+ }
+
+ GrGLvoid bufferData(GrGLenum target, GrGLsizeiptr size, const GrGLvoid* data,
+ GrGLenum usage) override {
+ GrGLuint id = fBoundBuffers[GetBufferIndex(target)];
+ if (id > 0) {
+ Buffer* buffer = fBufferManager.lookUp(id);
+ buffer->allocate(size, (const GrGLchar*) data);
+ }
+ }
+
+ GrGLuint createProgram() override {
+ return ++fCurrProgramID;
+ }
+
+ GrGLuint createShader(GrGLenum type) override {
+ return ++fCurrShaderID;
+ }
+
+ GrGLvoid bindBuffer(GrGLenum target, GrGLuint buffer) override {
+ fBoundBuffers[GetBufferIndex(target)] = buffer;
+ }
+
+ // deleting a bound buffer has the side effect of binding 0
+ GrGLvoid deleteBuffers(GrGLsizei n, const GrGLuint* ids) override {
+ // First potentially unbind the buffers.
+ for (int buffIdx = 0; buffIdx < kNumBufferTargets; ++buffIdx) {
+ if (!fBoundBuffers[buffIdx]) {
+ continue;
+ }
+ for (int i = 0; i < n; ++i) {
+ if (ids[i] == fBoundBuffers[buffIdx]) {
+ fBoundBuffers[buffIdx] = 0;
+ break;
+ }
+ }
+ }
+
+ // Then actually "delete" the buffers.
+ for (int i = 0; i < n; ++i) {
+ if (ids[i] > 0) {
+ Buffer* buffer = fBufferManager.lookUp(ids[i]);
+ fBufferManager.free(buffer);
+ }
+ }
+ }
+
+ GrGLvoid genFramebuffers(GrGLsizei n, GrGLuint *framebuffers) override {
+ for (int i = 0; i < n; ++i) {
+ Framebuffer* framebuffer = fFramebufferManager.create();
+ framebuffers[i] = framebuffer->id();
+ }
+ }
+
+ GrGLvoid bindFramebuffer(GrGLenum target, GrGLuint framebuffer) override {
+ SkASSERT(GR_GL_FRAMEBUFFER == target || GR_GL_DRAW_FRAMEBUFFER == target ||
+ GR_GL_READ_FRAMEBUFFER == target);
+ if (GR_GL_READ_FRAMEBUFFER != target) {
+ fCurrDrawFramebuffer = framebuffer;
+ }
+ if (GR_GL_DRAW_FRAMEBUFFER != target) {
+ fCurrReadFramebuffer = framebuffer;
+ }
+ }
+
+ GrGLvoid deleteFramebuffers(GrGLsizei n, const GrGLuint* ids) override {
+ for (int i = 0; i < n; ++i) {
+ if (ids[i] == fCurrDrawFramebuffer) {
+ fCurrDrawFramebuffer = 0;
+ }
+ if (ids[i] == fCurrReadFramebuffer) {
+ fCurrReadFramebuffer = 0;
+ }
+
+ if (ids[i] > 0) {
+ Framebuffer* framebuffer = fFramebufferManager.lookUp(ids[i]);
+ fFramebufferManager.free(framebuffer);
+ }
+ }
+ }
+
+ GrGLvoid genQueries(GrGLsizei n, GrGLuint *ids) override { this->genGenericIds(n, ids); }
+
+ GrGLvoid genRenderbuffers(GrGLsizei n, GrGLuint *renderbuffers) override {
+ for (int i = 0; i < n; ++i) {
+ Renderbuffer* renderbuffer = fRenderbufferManager.create();
+ renderbuffers[i] = renderbuffer->id();
+ }
+ }
+
+ GrGLvoid bindRenderbuffer(GrGLenum target, GrGLuint renderbuffer) override {
+ SkASSERT(GR_GL_RENDERBUFFER == target);
+ fCurrRenderbuffer = renderbuffer;
+ }
+
+ GrGLvoid deleteRenderbuffers(GrGLsizei n, const GrGLuint* ids) override {
+ for (int i = 0; i < n; ++i) {
+ if (ids[i] <= 0) {
+ continue;
+ }
+ if (ids[i] == fCurrRenderbuffer) {
+ fCurrRenderbuffer = 0;
+ }
+ Renderbuffer* renderbuffer = fRenderbufferManager.lookUp(ids[i]);
+
+ if (fCurrDrawFramebuffer) {
+ Framebuffer* drawFramebuffer = fFramebufferManager.lookUp(fCurrDrawFramebuffer);
+ drawFramebuffer->notifyAttachmentDeleteWhileBound(renderbuffer);
+ }
+ if (fCurrReadFramebuffer) {
+ Framebuffer* readFramebuffer = fFramebufferManager.lookUp(fCurrReadFramebuffer);
+ readFramebuffer->notifyAttachmentDeleteWhileBound(renderbuffer);
+ }
+
+ fRenderbufferManager.free(renderbuffer);
+ }
+ }
+
+ GrGLvoid renderbufferStorage(GrGLenum target, GrGLenum internalformat, GrGLsizei width,
+ GrGLsizei height) override {
+ GrAlwaysAssert(GR_GL_RENDERBUFFER == target);
+ GrAlwaysAssert(fCurrRenderbuffer);
+ Renderbuffer* renderbuffer = fRenderbufferManager.lookUp(fCurrRenderbuffer);
+ renderbuffer->setNumSamples(1);
+ }
+
+ GrGLvoid renderbufferStorageMultisample(GrGLenum target, GrGLsizei samples,
+ GrGLenum internalformat, GrGLsizei width,
+ GrGLsizei height) override {
+ GrAlwaysAssert(GR_GL_RENDERBUFFER == target);
+ GrAlwaysAssert(samples > 0);
+ GrAlwaysAssert(fCurrRenderbuffer);
+ Renderbuffer* renderbuffer = fRenderbufferManager.lookUp(fCurrRenderbuffer);
+ renderbuffer->setNumSamples(samples);
+ }
+
+ GrGLvoid namedRenderbufferStorage(GrGLuint renderbuffer, GrGLenum GrGLinternalformat,
+ GrGLsizei width, GrGLsizei height) override {
+ SK_ABORT("Not implemented");
+ }
+
+ GrGLvoid namedRenderbufferStorageMultisample(GrGLuint renderbuffer, GrGLsizei samples,
+ GrGLenum GrGLinternalformat, GrGLsizei width,
+ GrGLsizei height) override {
+ SK_ABORT("Not implemented");
+ }
+
+ GrGLvoid framebufferRenderbuffer(GrGLenum target, GrGLenum attachment,
+ GrGLenum renderbuffertarget,
+ GrGLuint renderBufferID) override {
+ GrGLuint id = this->getBoundFramebufferID(target);
+ GrAlwaysAssert(id);
+ Framebuffer* framebuffer = fFramebufferManager.lookUp(id);
+
+ GrAlwaysAssert(GR_GL_RENDERBUFFER == renderbuffertarget);
+ GrAlwaysAssert(fCurrRenderbuffer);
+ Renderbuffer* renderbuffer = fRenderbufferManager.lookUp(fCurrRenderbuffer);
+
+ framebuffer->setAttachment(attachment, renderbuffer);
+ }
+
+ GrGLvoid namedFramebufferRenderbuffer(GrGLuint framebuffer, GrGLenum attachment,
+ GrGLenum renderbuffertarget,
+ GrGLuint renderbuffer) override {
+ SK_ABORT("Not implemented");
+ }
+
+ GrGLvoid genTextures(GrGLsizei n, GrGLuint *textures) override {
+ this->genGenericIds(n, textures);
+ }
+
+ GrGLvoid framebufferTexture2D(GrGLenum target, GrGLenum attachment, GrGLenum textarget,
+ GrGLuint textureID, GrGLint level) override {
+ GrGLuint id = this->getBoundFramebufferID(target);
+ GrAlwaysAssert(id);
+ Framebuffer* framebuffer = fFramebufferManager.lookUp(id);
+ framebuffer->setAttachment(attachment, this->getSingleTextureObject());
+ }
+
+ GrGLvoid framebufferTexture2DMultisample(GrGLenum target, GrGLenum attachment,
+ GrGLenum textarget, GrGLuint texture, GrGLint level,
+ GrGLsizei samples) override {
+ SK_ABORT("Not implemented");
+ }
+
+ GrGLvoid namedFramebufferTexture1D(GrGLuint framebuffer, GrGLenum attachment,
+ GrGLenum textarget, GrGLuint texture,
+ GrGLint level) override {
+ SK_ABORT("Not implemented");
+ }
+
+ GrGLvoid namedFramebufferTexture2D(GrGLuint framebuffer, GrGLenum attachment,
+ GrGLenum textarget, GrGLuint texture,
+ GrGLint level) override {
+ SK_ABORT("Not implemented");
+ }
+
+ GrGLvoid namedFramebufferTexture3D(GrGLuint framebuffer, GrGLenum attachment,
+ GrGLenum textarget, GrGLuint texture, GrGLint level,
+ GrGLint zoffset) override {
+ SK_ABORT("Not implemented");
+ }
+
+ GrGLvoid genVertexArrays(GrGLsizei n, GrGLuint *arrays) override {
+ this->genGenericIds(n, arrays);
+ }
+
+ GrGLenum getError() override { return GR_GL_NO_ERROR; }
+
+ GrGLvoid getIntegerv(GrGLenum pname, GrGLint* params) override {
+ // TODO: remove from Ganesh the #defines for gets we don't use.
+ // We would like to minimize gets overall due to performance issues
+ switch (pname) {
+ case GR_GL_CONTEXT_PROFILE_MASK:
+ *params = GR_GL_CONTEXT_COMPATIBILITY_PROFILE_BIT;
+ break;
+ case GR_GL_STENCIL_BITS:
+ *params = 8;
+ break;
+ case GR_GL_SAMPLES: {
+ GrAlwaysAssert(fCurrDrawFramebuffer);
+ Framebuffer* framebuffer = fFramebufferManager.lookUp(fCurrDrawFramebuffer);
+ *params = framebuffer->numSamples();
+ break;
+ }
+ case GR_GL_FRAMEBUFFER_BINDING:
+ *params = 0;
+ break;
+ case GR_GL_VIEWPORT:
+ params[0] = 0;
+ params[1] = 0;
+ params[2] = 800;
+ params[3] = 600;
+ break;
+ case GR_GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS:
+ case GR_GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS:
+ case GR_GL_MAX_TEXTURE_IMAGE_UNITS:
+ case GR_GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS:
+ *params = 8;
+ break;
+ case GR_GL_MAX_TEXTURE_COORDS:
+ *params = 8;
+ break;
+ case GR_GL_MAX_VERTEX_UNIFORM_VECTORS:
+ *params = kDefaultMaxVertexUniformVectors;
+ break;
+ case GR_GL_MAX_FRAGMENT_UNIFORM_VECTORS:
+ *params = kDefaultMaxFragmentUniformVectors;
+ break;
+ case GR_GL_MAX_FRAGMENT_UNIFORM_COMPONENTS:
+ *params = 16 * 4;
+ break;
+ case GR_GL_NUM_COMPRESSED_TEXTURE_FORMATS:
+ *params = 0;
+ break;
+ case GR_GL_COMPRESSED_TEXTURE_FORMATS:
+ break;
+ case GR_GL_MAX_TEXTURE_SIZE:
+ *params = 8192;
+ break;
+ case GR_GL_MAX_RENDERBUFFER_SIZE:
+ *params = 8192;
+ break;
+ case GR_GL_MAX_SAMPLES:
+ *params = 32;
+ break;
+ case GR_GL_MAX_VERTEX_ATTRIBS:
+ *params = kDefaultMaxVertexAttribs;
+ break;
+ case GR_GL_MAX_VARYING_VECTORS:
+ *params = kDefaultMaxVaryingVectors;
+ break;
+ case GR_GL_NUM_EXTENSIONS: {
+ GrGLint i = 0;
+ while (fExtensions[i++]);
+ *params = i;
+ break;
+ }
+ default:
+ SkFAIL("Unexpected pname to GetIntegerv");
+ }
+ }
+
+ GrGLvoid getProgramiv(GrGLuint program, GrGLenum pname, GrGLint* params) override {
+ this->getShaderOrProgramiv(program, pname, params);
+ }
+
+ GrGLvoid getProgramInfoLog(GrGLuint program, GrGLsizei bufsize, GrGLsizei* length,
+ char* infolog) override {
+ this->getInfoLog(program, bufsize, length, infolog);
+ }
+
+ GrGLvoid getMultisamplefv(GrGLenum pname, GrGLuint index, GrGLfloat* val) override {
+ val[0] = val[1] = 0.5f;
+ }
+
+ GrGLvoid getQueryiv(GrGLenum GLtarget, GrGLenum pname, GrGLint *params) override {
+ switch (pname) {
+ case GR_GL_CURRENT_QUERY:
+ *params = 0;
+ break;
+ case GR_GL_QUERY_COUNTER_BITS:
+ *params = 32;
+ break;
+ default:
+ SkFAIL("Unexpected pname passed GetQueryiv.");
+ }
+ }
+
+ GrGLvoid getQueryObjecti64v(GrGLuint id, GrGLenum pname, GrGLint64 *params) override {
+ this->queryResult(id, pname, params);
+ }
+
+ GrGLvoid getQueryObjectiv(GrGLuint id, GrGLenum pname, GrGLint *params) override {
+ this->queryResult(id, pname, params);
+ }
+
+ GrGLvoid getQueryObjectui64v(GrGLuint id, GrGLenum pname, GrGLuint64 *params) override {
+ this->queryResult(id, pname, params);
+ }
+
+ GrGLvoid getQueryObjectuiv(GrGLuint id, GrGLenum pname, GrGLuint *params) override {
+ this->queryResult(id, pname, params);
+ }
+
+ GrGLvoid getShaderiv(GrGLuint shader, GrGLenum pname, GrGLint* params) override {
+ this->getShaderOrProgramiv(shader, pname, params);
+ }
+
+ GrGLvoid getShaderInfoLog(GrGLuint shader, GrGLsizei bufsize, GrGLsizei* length,
+ char* infolog) override {
+ this->getInfoLog(shader, bufsize, length, infolog);
+ }
+
+ const GrGLubyte* getString(GrGLenum name) override {
+ switch (name) {
+ case GR_GL_EXTENSIONS:
+ return CombinedExtensionString();
+ case GR_GL_VERSION:
+ return (const GrGLubyte*)"4.0 Null GL";
+ case GR_GL_SHADING_LANGUAGE_VERSION:
+ return (const GrGLubyte*)"4.20.8 Null GLSL";
+ case GR_GL_VENDOR:
+ return (const GrGLubyte*)"Null Vendor";
+ case GR_GL_RENDERER:
+ return (const GrGLubyte*)"The Null (Non-)Renderer";
+ default:
+ SkFAIL("Unexpected name passed to GetString");
+ return nullptr;
+ }
+ }
+
+ const GrGLubyte* getStringi(GrGLenum name, GrGLuint i) override {
+ switch (name) {
+ case GR_GL_EXTENSIONS: {
+ GrGLint count;
+ this->getIntegerv(GR_GL_NUM_EXTENSIONS, &count);
+ if ((GrGLint)i <= count) {
+ return (const GrGLubyte*) fExtensions[i];
+ } else {
+ return nullptr;
+ }
+ }
+ default:
+ SkFAIL("Unexpected name passed to GetStringi");
+ return nullptr;
+ }
+ }
+
+ GrGLint getUniformLocation(GrGLuint program, const char* name) override {
+ return ++fCurrUniformLocation;
+ }
+
+ GrGLvoid* mapBufferRange(GrGLenum target, GrGLintptr offset, GrGLsizeiptr length,
+ GrGLbitfield access) override {
+ GrGLuint id = fBoundBuffers[GetBufferIndex(target)];
+ if (id > 0) {
+ // We just ignore the offset and length here.
+ Buffer* buffer = fBufferManager.lookUp(id);
+ SkASSERT(!buffer->mapped());
+ buffer->setMapped(true);
+ return buffer->dataPtr();
+ }
+ return nullptr;
+ }
+
+ GrGLvoid* mapBuffer(GrGLenum target, GrGLenum access) override {
+ GrGLuint id = fBoundBuffers[GetBufferIndex(target)];
+ if (id > 0) {
+ Buffer* buffer = fBufferManager.lookUp(id);
+ SkASSERT(!buffer->mapped());
+ buffer->setMapped(true);
+ return buffer->dataPtr();
+ }
+
+ SkASSERT(false);
+ return nullptr; // no buffer bound to target
+ }
+
+ GrGLboolean unmapBuffer(GrGLenum target) override {
+ GrGLuint id = fBoundBuffers[GetBufferIndex(target)];
+ if (id > 0) {
+ Buffer* buffer = fBufferManager.lookUp(id);
+ SkASSERT(buffer->mapped());
+ buffer->setMapped(false);
+ return GR_GL_TRUE;
+ }
+
+ GrAlwaysAssert(false);
+ return GR_GL_FALSE; // GR_GL_INVALID_OPERATION;
+ }
+
+ GrGLvoid getBufferParameteriv(GrGLenum target, GrGLenum pname, GrGLint* params) override {
+ switch (pname) {
+ case GR_GL_BUFFER_MAPPED: {
+ *params = GR_GL_FALSE;
+ GrGLuint id = fBoundBuffers[GetBufferIndex(target)];
+ if (id > 0) {
+ Buffer* buffer = fBufferManager.lookUp(id);
+ if (buffer->mapped()) {
+ *params = GR_GL_TRUE;
+ }
+ }
+ break; }
+ default:
+ SkFAIL("Unexpected pname to GetBufferParamateriv");
+ break;
+ }
+ }
+
+ // NV_path_rendering
+ GrGLuint genPaths(GrGLsizei range) override {
+ return ++fCurrPathID;
+ }
+
+
+private:
+ inline int static GetBufferIndex(GrGLenum glTarget) {
+ switch (glTarget) {
+ default: SkFAIL("Unexpected GL target to GetBufferIndex");
+ case GR_GL_ARRAY_BUFFER: return 0;
+ case GR_GL_ELEMENT_ARRAY_BUFFER: return 1;
+ case GR_GL_TEXTURE_BUFFER: return 2;
+ case GR_GL_DRAW_INDIRECT_BUFFER: return 3;
+ case GR_GL_PIXEL_PACK_BUFFER: return 4;
+ case GR_GL_PIXEL_UNPACK_BUFFER: return 5;
+ }
+ }
+ constexpr int static kNumBufferTargets = 6;
+
+ TGLObjectManager<Buffer> fBufferManager;
+ GrGLuint fBoundBuffers[kNumBufferTargets];
+ TGLObjectManager<Framebuffer> fFramebufferManager;
+ GrGLuint fCurrDrawFramebuffer;
+ GrGLuint fCurrReadFramebuffer;
+ TGLObjectManager<Renderbuffer> fRenderbufferManager;
+ GrGLuint fCurrRenderbuffer;
+ GrGLuint fCurrProgramID;
+ GrGLuint fCurrShaderID;
+ GrGLuint fCurrGenericID;
+ GrGLuint fCurrUniformLocation;
+ GrGLuint fCurrPathID;
+ SkAutoTUnref<const Texture> fSingleTextureObject;
+ SkTArray<const char*> fExtensions;
+
+ // the OpenGLES 2.0 spec says this must be >= 128
+ static const GrGLint kDefaultMaxVertexUniformVectors = 128;
+
+ // the OpenGLES 2.0 spec says this must be >=16
+ static const GrGLint kDefaultMaxFragmentUniformVectors = 16;
+
+ // the OpenGLES 2.0 spec says this must be >= 8
+ static const GrGLint kDefaultMaxVertexAttribs = 8;
+
+ // the OpenGLES 2.0 spec says this must be >= 8
+ static const GrGLint kDefaultMaxVaryingVectors = 8;
+
+ GrGLuint getBoundFramebufferID(GrGLenum target) {
+ switch (target) {
+ case GR_GL_FRAMEBUFFER:
+ case GR_GL_DRAW_FRAMEBUFFER:
+ return fCurrDrawFramebuffer;
+ case GR_GL_READ_FRAMEBUFFER:
+ return fCurrReadFramebuffer;
+ default:
+ SK_ABORT("Invalid framebuffer target.");
+ return 0;
+ }
+ }
+
+ const Texture* getSingleTextureObject() {
+ // We currently only use FramebufferAttachment objects for a sample count, and all textures
+ // in Skia have one sample, so there is no need as of yet to track individual textures. This
+ // also works around a bug in chromium's cc_unittests where they send us texture IDs that
+ // were generated by cc::TestGLES2Interface.
+ if (!fSingleTextureObject) {
+ fSingleTextureObject.reset(new Texture);
+ }
+ return fSingleTextureObject;
+ }
+
+ const GrGLubyte* CombinedExtensionString() {
+ static SkString gExtString;
+ static SkMutex gMutex;
+ gMutex.acquire();
+ if (0 == gExtString.size()) {
+ int i = 0;
+ while (fExtensions[i]) {
+ if (i > 0) {
+ gExtString.append(" ");
+ }
+ gExtString.append(fExtensions[i]);
+ ++i;
+ }
+ }
+ gMutex.release();
+ return (const GrGLubyte*) gExtString.c_str();
+ }
+
+ GrGLvoid genGenericIds(GrGLsizei n, GrGLuint* ids) {
+ for (int i = 0; i < n; ++i) {
+ ids[i] = ++fCurrGenericID;
+ }
+ }
+
+ GrGLvoid getInfoLog(GrGLuint object, GrGLsizei bufsize, GrGLsizei* length,
+ char* infolog) {
+ if (length) {
+ *length = 0;
+ }
+ if (bufsize > 0) {
+ *infolog = 0;
+ }
+ }
+
+ GrGLvoid getShaderOrProgramiv(GrGLuint object, GrGLenum pname, GrGLint* params) {
+ switch (pname) {
+ case GR_GL_LINK_STATUS: // fallthru
+ case GR_GL_COMPILE_STATUS:
+ *params = GR_GL_TRUE;
+ break;
+ case GR_GL_INFO_LOG_LENGTH:
+ *params = 0;
+ break;
+ // we don't expect any other pnames
+ default:
+ SkFAIL("Unexpected pname to GetProgramiv");
+ break;
+ }
+ }
+
+ template <typename T>
+ void queryResult(GrGLenum GLtarget, GrGLenum pname, T *params) {
+ switch (pname) {
+ case GR_GL_QUERY_RESULT_AVAILABLE:
+ *params = GR_GL_TRUE;
+ break;
+ case GR_GL_QUERY_RESULT:
+ *params = 0;
+ break;
+ default:
+ SkFAIL("Unexpected pname passed to GetQueryObject.");
+ break;
+ }
+ }
+
+ typedef GrGLTestInterface INHERITED;
+};
+
+} // anonymous namespace
+
+const GrGLInterface* GrGLCreateNullInterface(bool enableNVPR) { return new NullInterface(enableNVPR); }
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLDefaultInterface_native.cpp b/gfx/skia/skia/src/gpu/gl/GrGLDefaultInterface_native.cpp
new file mode 100644
index 000000000..e695f15a1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLDefaultInterface_native.cpp
@@ -0,0 +1,12 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "gl/GrGLInterface.h"
+
+const GrGLInterface* GrGLDefaultInterface() {
+ return GrGLCreateNativeInterface();
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLDefaultInterface_none.cpp b/gfx/skia/skia/src/gpu/gl/GrGLDefaultInterface_none.cpp
new file mode 100644
index 000000000..f9e52470b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLDefaultInterface_none.cpp
@@ -0,0 +1,12 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "gl/GrGLInterface.h"
+
+const GrGLInterface* GrGLDefaultInterface() {
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLDefines.h b/gfx/skia/skia/src/gpu/gl/GrGLDefines.h
new file mode 100644
index 000000000..8dc7af142
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLDefines.h
@@ -0,0 +1,989 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrGLDefines_DEFINED
+#define GrGLDefines_DEFINED
+
+/* Profiles */
+#define GR_GL_CONTEXT_PROFILE_MASK 0x9126
+#define GR_GL_CONTEXT_CORE_PROFILE_BIT 0x00000001
+#define GR_GL_CONTEXT_COMPATIBILITY_PROFILE_BIT 0x00000002
+
+// The following constants consist of the intersection of GL constants
+// exported by GLES 1.0, GLES 2.0, and desktop GL required by the system.
+
+#define GR_GL_DEPTH_BUFFER_BIT 0x00000100
+#define GR_GL_STENCIL_BUFFER_BIT 0x00000400
+#define GR_GL_COLOR_BUFFER_BIT 0x00004000
+
+/* Boolean */
+#define GR_GL_FALSE 0
+#define GR_GL_TRUE 1
+
+/* BeginMode */
+#define GR_GL_POINTS 0x0000
+#define GR_GL_LINES 0x0001
+#define GR_GL_LINE_LOOP 0x0002
+#define GR_GL_LINE_STRIP 0x0003
+#define GR_GL_TRIANGLES 0x0004
+#define GR_GL_TRIANGLE_STRIP 0x0005
+#define GR_GL_TRIANGLE_FAN 0x0006
+
+/* AlphaFunction (not supported in ES20) */
+/* GL_NEVER */
+/* GL_LESS */
+/* GL_EQUAL */
+/* GL_LEQUAL */
+/* GL_GREATER */
+/* GL_NOTEQUAL */
+/* GL_GEQUAL */
+/* GL_ALWAYS */
+
+/* Basic OpenGL blend equations */
+#define GR_GL_FUNC_ADD 0x8006
+#define GR_GL_FUNC_SUBTRACT 0x800A
+#define GR_GL_FUNC_REVERSE_SUBTRACT 0x800B
+
+/* GL_KHR_blend_equation_advanced */
+#define GR_GL_SCREEN 0x9295
+#define GR_GL_OVERLAY 0x9296
+#define GR_GL_DARKEN 0x9297
+#define GR_GL_LIGHTEN 0x9298
+#define GR_GL_COLORDODGE 0x9299
+#define GR_GL_COLORBURN 0x929A
+#define GR_GL_HARDLIGHT 0x929B
+#define GR_GL_SOFTLIGHT 0x929C
+#define GR_GL_DIFFERENCE 0x929E
+#define GR_GL_EXCLUSION 0x92A0
+#define GR_GL_MULTIPLY 0x9294
+#define GR_GL_HSL_HUE 0x92AD
+#define GR_GL_HSL_SATURATION 0x92AE
+#define GR_GL_HSL_COLOR 0x92AF
+#define GR_GL_HSL_LUMINOSITY 0x92B0
+
+/* BlendingFactorDest */
+#define GR_GL_ZERO 0
+#define GR_GL_ONE 1
+#define GR_GL_SRC_COLOR 0x0300
+#define GR_GL_ONE_MINUS_SRC_COLOR 0x0301
+#define GR_GL_SRC_ALPHA 0x0302
+#define GR_GL_ONE_MINUS_SRC_ALPHA 0x0303
+#define GR_GL_DST_ALPHA 0x0304
+#define GR_GL_ONE_MINUS_DST_ALPHA 0x0305
+
+/* BlendingFactorSrc */
+/* GL_ZERO */
+/* GL_ONE */
+#define GR_GL_DST_COLOR 0x0306
+#define GR_GL_ONE_MINUS_DST_COLOR 0x0307
+#define GR_GL_SRC_ALPHA_SATURATE 0x0308
+/* GL_SRC_ALPHA */
+/* GL_ONE_MINUS_SRC_ALPHA */
+/* GL_DST_ALPHA */
+/* GL_ONE_MINUS_DST_ALPHA */
+
+/* ExtendedBlendFactors */
+#define GR_GL_SRC1_COLOR 0x88F9
+#define GR_GL_ONE_MINUS_SRC1_COLOR 0x88FA
+/* GL_SRC1_ALPHA */
+#define GR_GL_ONE_MINUS_SRC1_ALPHA 0x88FB
+
+/* Separate Blend Functions */
+#define GR_GL_BLEND_DST_RGB 0x80C8
+#define GR_GL_BLEND_SRC_RGB 0x80C9
+#define GR_GL_BLEND_DST_ALPHA 0x80CA
+#define GR_GL_BLEND_SRC_ALPHA 0x80CB
+#define GR_GL_CONSTANT_COLOR 0x8001
+#define GR_GL_ONE_MINUS_CONSTANT_COLOR 0x8002
+#define GR_GL_CONSTANT_ALPHA 0x8003
+#define GR_GL_ONE_MINUS_CONSTANT_ALPHA 0x8004
+#define GR_GL_BLEND_COLOR 0x8005
+
+/* Buffer Objects */
+#define GR_GL_ARRAY_BUFFER 0x8892
+#define GR_GL_ELEMENT_ARRAY_BUFFER 0x8893
+#define GR_GL_DRAW_INDIRECT_BUFFER 0x8F3F
+#define GR_GL_TEXTURE_BUFFER 0x8C2A
+#define GR_GL_ARRAY_BUFFER_BINDING 0x8894
+#define GR_GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895
+#define GR_GL_DRAW_INDIRECT_BUFFER_BINDING 0x8F43
+#define GR_GL_PIXEL_PACK_BUFFER 0x88EB
+#define GR_GL_PIXEL_UNPACK_BUFFER 0x88EC
+
+#define GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM 0x78EC
+#define GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM 0x78ED
+
+#define GR_GL_STREAM_DRAW 0x88E0
+#define GR_GL_STREAM_READ 0x88E1
+#define GR_GL_STATIC_DRAW 0x88E4
+#define GR_GL_STATIC_READ 0x88E5
+#define GR_GL_DYNAMIC_DRAW 0x88E8
+#define GR_GL_DYNAMIC_READ 0x88E9
+
+#define GR_GL_BUFFER_SIZE 0x8764
+#define GR_GL_BUFFER_USAGE 0x8765
+
+#define GR_GL_CURRENT_VERTEX_ATTRIB 0x8626
+
+/* CullFaceMode */
+#define GR_GL_FRONT 0x0404
+#define GR_GL_BACK 0x0405
+#define GR_GL_FRONT_AND_BACK 0x0408
+
+/* DepthFunction */
+/* GL_NEVER */
+/* GL_LESS */
+/* GL_EQUAL */
+/* GL_LEQUAL */
+/* GL_GREATER */
+/* GL_NOTEQUAL */
+/* GL_GEQUAL */
+/* GL_ALWAYS */
+
+/* EnableCap */
+#define GR_GL_TEXTURE_2D 0x0DE1
+#define GR_GL_CULL_FACE 0x0B44
+#define GR_GL_BLEND 0x0BE2
+#define GR_GL_DITHER 0x0BD0
+#define GR_GL_STENCIL_TEST 0x0B90
+#define GR_GL_DEPTH_TEST 0x0B71
+#define GR_GL_SCISSOR_TEST 0x0C11
+#define GR_GL_POLYGON_OFFSET_FILL 0x8037
+#define GR_GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E
+#define GR_GL_SAMPLE_COVERAGE 0x80A0
+#define GR_GL_POLYGON_OFFSET_FILL 0x8037
+#define GR_GL_POLYGON_SMOOTH 0x0B41
+#define GR_GL_POLYGON_STIPPLE 0x0B42
+#define GR_GL_COLOR_LOGIC_OP 0x0BF2
+#define GR_GL_COLOR_TABLE 0x80D0
+#define GR_GL_INDEX_LOGIC_OP 0x0BF1
+#define GR_GL_VERTEX_PROGRAM_POINT_SIZE 0x8642
+#define GR_GL_LINE_STIPPLE 0x0B24
+#define GR_GL_FRAMEBUFFER_SRGB 0x8DB9
+#define GR_GL_SHADER_PIXEL_LOCAL_STORAGE 0x8F64
+#define GR_GL_SAMPLE_SHADING 0x8C36
+
+/* ErrorCode */
+#define GR_GL_NO_ERROR 0
+#define GR_GL_INVALID_ENUM 0x0500
+#define GR_GL_INVALID_VALUE 0x0501
+#define GR_GL_INVALID_OPERATION 0x0502
+#define GR_GL_OUT_OF_MEMORY 0x0505
+#define GR_GL_CONTEXT_LOST 0x300E // TODO(gman): What value?
+
+/* FrontFaceDirection */
+#define GR_GL_CW 0x0900
+#define GR_GL_CCW 0x0901
+
+/* GetPName */
+#define GR_GL_LINE_WIDTH 0x0B21
+#define GR_GL_ALIASED_POINT_SIZE_RANGE 0x846D
+#define GR_GL_ALIASED_LINE_WIDTH_RANGE 0x846E
+#define GR_GL_CULL_FACE_MODE 0x0B45
+#define GR_GL_FRONT_FACE 0x0B46
+#define GR_GL_DEPTH_RANGE 0x0B70
+#define GR_GL_DEPTH_WRITEMASK 0x0B72
+#define GR_GL_DEPTH_CLEAR_VALUE 0x0B73
+#define GR_GL_DEPTH_FUNC 0x0B74
+#define GR_GL_STENCIL_CLEAR_VALUE 0x0B91
+#define GR_GL_STENCIL_FUNC 0x0B92
+#define GR_GL_STENCIL_FAIL 0x0B94
+#define GR_GL_STENCIL_PASS_DEPTH_FAIL 0x0B95
+#define GR_GL_STENCIL_PASS_DEPTH_PASS 0x0B96
+#define GR_GL_STENCIL_REF 0x0B97
+#define GR_GL_STENCIL_VALUE_MASK 0x0B93
+#define GR_GL_STENCIL_WRITEMASK 0x0B98
+#define GR_GL_STENCIL_BACK_FUNC 0x8800
+#define GR_GL_STENCIL_BACK_FAIL 0x8801
+#define GR_GL_STENCIL_BACK_PASS_DEPTH_FAIL 0x8802
+#define GR_GL_STENCIL_BACK_PASS_DEPTH_PASS 0x8803
+#define GR_GL_STENCIL_BACK_REF 0x8CA3
+#define GR_GL_STENCIL_BACK_VALUE_MASK 0x8CA4
+#define GR_GL_STENCIL_BACK_WRITEMASK 0x8CA5
+#define GR_GL_VIEWPORT 0x0BA2
+#define GR_GL_SCISSOR_BOX 0x0C10
+/* GL_SCISSOR_TEST */
+#define GR_GL_COLOR_CLEAR_VALUE 0x0C22
+#define GR_GL_COLOR_WRITEMASK 0x0C23
+#define GR_GL_UNPACK_ALIGNMENT 0x0CF5
+#define GR_GL_UNPACK_FLIP_Y 0x9240
+#define GR_GL_PACK_ALIGNMENT 0x0D05
+#define GR_GL_PACK_REVERSE_ROW_ORDER 0x93A4
+#define GR_GL_MAX_TEXTURE_SIZE 0x0D33
+#define GR_GL_TEXTURE_MIN_LOD 0x813A
+#define GR_GL_TEXTURE_MAX_LOD 0x813B
+#define GR_GL_TEXTURE_BASE_LEVEL 0x813C
+#define GR_GL_TEXTURE_MAX_LEVEL 0x813D
+#define GR_GL_MAX_VIEWPORT_DIMS 0x0D3A
+#define GR_GL_SUBPIXEL_BITS 0x0D50
+#define GR_GL_RED_BITS 0x0D52
+#define GR_GL_GREEN_BITS 0x0D53
+#define GR_GL_BLUE_BITS 0x0D54
+#define GR_GL_ALPHA_BITS 0x0D55
+#define GR_GL_DEPTH_BITS 0x0D56
+#define GR_GL_STENCIL_BITS 0x0D57
+#define GR_GL_POLYGON_OFFSET_UNITS 0x2A00
+/* GL_POLYGON_OFFSET_FILL */
+#define GR_GL_POLYGON_OFFSET_FACTOR 0x8038
+#define GR_GL_TEXTURE_BINDING_2D 0x8069
+#define GR_GL_SAMPLE_BUFFERS 0x80A8
+#define GR_GL_SAMPLES 0x80A9
+#define GR_GL_SAMPLE_COVERAGE_VALUE 0x80AA
+#define GR_GL_SAMPLE_COVERAGE_INVERT 0x80AB
+#define GR_GL_RENDERBUFFER_COVERAGE_SAMPLES 0x8CAB
+#define GR_GL_RENDERBUFFER_COLOR_SAMPLES 0x8E10
+#define GR_GL_MAX_MULTISAMPLE_COVERAGE_MODES 0x8E11
+#define GR_GL_MULTISAMPLE_COVERAGE_MODES 0x8E12
+#define GR_GL_MAX_TEXTURE_BUFFER_SIZE 0x8C2B
+
+/* GetTextureParameter */
+/* GL_TEXTURE_MAG_FILTER */
+/* GL_TEXTURE_MIN_FILTER */
+/* GL_TEXTURE_WRAP_S */
+/* GL_TEXTURE_WRAP_T */
+
+#define GR_GL_NUM_COMPRESSED_TEXTURE_FORMATS 0x86A2
+#define GR_GL_COMPRESSED_TEXTURE_FORMATS 0x86A3
+
+/* Compressed Texture Formats */
+#define GR_GL_COMPRESSED_RGB_S3TC_DXT1_EXT 0x83F0
+#define GR_GL_COMPRESSED_RGBA_S3TC_DXT1_EXT 0x83F1
+#define GR_GL_COMPRESSED_RGBA_S3TC_DXT3_EXT 0x83F2
+#define GR_GL_COMPRESSED_RGBA_S3TC_DXT5_EXT 0x83F3
+
+#define GR_GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG 0x8C00
+#define GR_GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG 0x8C01
+#define GR_GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG 0x8C02
+#define GR_GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG 0x8C03
+
+#define GR_GL_COMPRESSED_RGBA_PVRTC_2BPPV2_IMG 0x9137
+#define GR_GL_COMPRESSED_RGBA_PVRTC_4BPPV2_IMG 0x9138
+
+#define GR_GL_COMPRESSED_ETC1_RGB8 0x8D64
+
+#define GR_GL_COMPRESSED_R11_EAC 0x9270
+#define GR_GL_COMPRESSED_SIGNED_R11_EAC 0x9271
+#define GR_GL_COMPRESSED_RG11_EAC 0x9272
+#define GR_GL_COMPRESSED_SIGNED_RG11_EAC 0x9273
+
+#define GR_GL_COMPRESSED_RGB8_ETC2 0x9274
+#define GR_GL_COMPRESSED_SRGB8 0x9275
+#define GR_GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1 0x9276
+#define GR_GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1 0x9277
+#define GR_GL_COMPRESSED_RGBA8_ETC2 0x9278
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ETC2 0x9279
+
+#define GR_GL_COMPRESSED_LUMINANCE_LATC1 0x8C70
+#define GR_GL_COMPRESSED_SIGNED_LUMINANCE_LATC1 0x8C71
+#define GR_GL_COMPRESSED_LUMINANCE_ALPHA_LATC2 0x8C72
+#define GR_GL_COMPRESSED_SIGNED_LUMINANCE_ALPHA_LATC2 0x8C73
+
+#define GR_GL_COMPRESSED_RED_RGTC1 0x8DBB
+#define GR_GL_COMPRESSED_SIGNED_RED_RGTC1 0x8DBC
+#define GR_GL_COMPRESSED_RED_GREEN_RGTC2 0x8DBD
+#define GR_GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2 0x8DBE
+
+#define GR_GL_COMPRESSED_3DC_X 0x87F9
+#define GR_GL_COMPRESSED_3DC_XY 0x87FA
+
+#define GR_GL_COMPRESSED_RGBA_BPTC_UNORM 0x8E8C
+#define GR_GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM 0x8E8D
+#define GR_GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT 0x8E8E
+#define GR_GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT 0x8E8F
+
+#define GR_GL_COMPRESSED_RGBA_ASTC_4x4 0x93B0
+#define GR_GL_COMPRESSED_RGBA_ASTC_5x4 0x93B1
+#define GR_GL_COMPRESSED_RGBA_ASTC_5x5 0x93B2
+#define GR_GL_COMPRESSED_RGBA_ASTC_6x5 0x93B3
+#define GR_GL_COMPRESSED_RGBA_ASTC_6x6 0x93B4
+#define GR_GL_COMPRESSED_RGBA_ASTC_8x5 0x93B5
+#define GR_GL_COMPRESSED_RGBA_ASTC_8x6 0x93B6
+#define GR_GL_COMPRESSED_RGBA_ASTC_8x8 0x93B7
+#define GR_GL_COMPRESSED_RGBA_ASTC_10x5 0x93B8
+#define GR_GL_COMPRESSED_RGBA_ASTC_10x6 0x93B9
+#define GR_GL_COMPRESSED_RGBA_ASTC_10x8 0x93BA
+#define GR_GL_COMPRESSED_RGBA_ASTC_10x10 0x93BB
+#define GR_GL_COMPRESSED_RGBA_ASTC_12x10 0x93BC
+#define GR_GL_COMPRESSED_RGBA_ASTC_12x12 0x93BD
+
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4 0x93D0
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4 0x93D1
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5 0x93D2
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5 0x93D3
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6 0x93D4
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5 0x93D5
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6 0x93D6
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8 0x93D7
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5 0x93D8
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6 0x93D9
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8 0x93DA
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10 0x93DB
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10 0x93DC
+#define GR_GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12 0x93DD
+
+/* HintMode */
+#define GR_GL_DONT_CARE 0x1100
+#define GR_GL_FASTEST 0x1101
+#define GR_GL_NICEST 0x1102
+
+/* HintTarget */
+#define GR_GL_GENERATE_MIPMAP_HINT 0x8192
+
+/* DataType */
+#define GR_GL_BYTE 0x1400
+#define GR_GL_UNSIGNED_BYTE 0x1401
+#define GR_GL_SHORT 0x1402
+#define GR_GL_UNSIGNED_SHORT 0x1403
+#define GR_GL_INT 0x1404
+#define GR_GL_UNSIGNED_INT 0x1405
+#define GR_GL_FLOAT 0x1406
+#define GR_GL_HALF_FLOAT 0x140B
+#define GR_GL_FIXED 0x140C
+#define GR_GL_HALF_FLOAT_OES 0x8D61
+
+/* Lighting */
+#define GR_GL_LIGHTING 0x0B50
+#define GR_GL_LIGHT0 0x4000
+#define GR_GL_LIGHT1 0x4001
+#define GR_GL_LIGHT2 0x4002
+#define GR_GL_LIGHT3 0x4003
+#define GR_GL_LIGHT4 0x4004
+#define GR_GL_LIGHT5 0x4005
+#define GR_GL_LIGHT6 0x4006
+#define GR_GL_LIGHT7 0x4007
+#define GR_GL_SPOT_EXPONENT 0x1205
+#define GR_GL_SPOT_CUTOFF 0x1206
+#define GR_GL_CONSTANT_ATTENUATION 0x1207
+#define GR_GL_LINEAR_ATTENUATION 0x1208
+#define GR_GL_QUADRATIC_ATTENUATION 0x1209
+#define GR_GL_AMBIENT 0x1200
+#define GR_GL_DIFFUSE 0x1201
+#define GR_GL_SPECULAR 0x1202
+#define GR_GL_SHININESS 0x1601
+#define GR_GL_EMISSION 0x1600
+#define GR_GL_POSITION 0x1203
+#define GR_GL_SPOT_DIRECTION 0x1204
+#define GR_GL_AMBIENT_AND_DIFFUSE 0x1602
+#define GR_GL_COLOR_INDEXES 0x1603
+#define GR_GL_LIGHT_MODEL_TWO_SIDE 0x0B52
+#define GR_GL_LIGHT_MODEL_LOCAL_VIEWER 0x0B51
+#define GR_GL_LIGHT_MODEL_AMBIENT 0x0B53
+#define GR_GL_FRONT_AND_BACK 0x0408
+#define GR_GL_SHADE_MODEL 0x0B54
+#define GR_GL_FLAT 0x1D00
+#define GR_GL_SMOOTH 0x1D01
+#define GR_GL_COLOR_MATERIAL 0x0B57
+#define GR_GL_COLOR_MATERIAL_FACE 0x0B55
+#define GR_GL_COLOR_MATERIAL_PARAMETER 0x0B56
+#define GR_GL_NORMALIZE 0x0BA1
+
+/* Matrix Mode */
+#define GR_GL_MATRIX_MODE 0x0BA0
+#define GR_GL_MODELVIEW 0x1700
+#define GR_GL_PROJECTION 0x1701
+#define GR_GL_TEXTURE 0x1702
+
+/* multisample */
+#define GR_GL_MULTISAMPLE 0x809D
+#define GR_GL_SAMPLE_POSITION 0x8E50
+
+/* Points */
+#define GR_GL_POINT_SMOOTH 0x0B10
+#define GR_GL_POINT_SIZE 0x0B11
+#define GR_GL_POINT_SIZE_GRANULARITY 0x0B13
+#define GR_GL_POINT_SIZE_RANGE 0x0B12
+
+/* Lines */
+#define GR_GL_LINE_SMOOTH 0x0B20
+#define GR_GL_LINE_STIPPLE 0x0B24
+#define GR_GL_LINE_STIPPLE_PATTERN 0x0B25
+#define GR_GL_LINE_STIPPLE_REPEAT 0x0B26
+#define GR_GL_LINE_WIDTH 0x0B21
+#define GR_GL_LINE_WIDTH_GRANULARITY 0x0B23
+#define GR_GL_LINE_WIDTH_RANGE 0x0B22
+
+/* PixelFormat */
+#define GR_GL_DEPTH_COMPONENT 0x1902
+#define GR_GL_RED 0x1903
+#define GR_GL_GREEN 0x1904
+#define GR_GL_BLUE 0x1905
+#define GR_GL_ALPHA 0x1906
+#define GR_GL_RGB 0x1907
+#define GR_GL_RGBA 0x1908
+#define GR_GL_BGRA 0x80E1
+#define GR_GL_LUMINANCE 0x1909
+#define GR_GL_LUMINANCE_ALPHA 0x190A
+#define GR_GL_PALETTE8_RGBA8 0x8B96
+#define GR_GL_ALPHA8 0x803C
+
+#define GR_GL_R8 0x8229
+#define GR_GL_R16F 0x822D
+#define GR_GL_RGBA16F 0x881A
+#define GR_GL_ALPHA16F 0x881C
+
+/* PixelType */
+/* GL_UNSIGNED_BYTE */
+#define GR_GL_UNSIGNED_SHORT_4_4_4_4 0x8033
+#define GR_GL_UNSIGNED_SHORT_5_5_5_1 0x8034
+#define GR_GL_UNSIGNED_SHORT_5_6_5 0x8363
+
+/* Shaders */
+#define GR_GL_FRAGMENT_SHADER 0x8B30
+#define GR_GL_VERTEX_SHADER 0x8B31
+#define GR_GL_GEOMETRY_SHADER 0x8DD9
+#define GR_GL_MAX_VERTEX_ATTRIBS 0x8869
+#define GR_GL_MAX_VERTEX_UNIFORM_VECTORS 0x8DFB
+#define GR_GL_MAX_VARYING_VECTORS 0x8DFC
+#define GR_GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS 0x8B4D
+#define GR_GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS 0x8B4C
+#define GR_GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS 0x8C29
+#define GR_GL_MAX_TEXTURE_IMAGE_UNITS 0x8872
+#define GR_GL_MAX_FRAGMENT_UNIFORM_VECTORS 0x8DFD
+#define GR_GL_SHADER_TYPE 0x8B4F
+#define GR_GL_DELETE_STATUS 0x8B80
+#define GR_GL_LINK_STATUS 0x8B82
+#define GR_GL_VALIDATE_STATUS 0x8B83
+#define GR_GL_ATTACHED_SHADERS 0x8B85
+#define GR_GL_ACTIVE_UNIFORMS 0x8B86
+#define GR_GL_ACTIVE_UNIFORM_MAX_LENGTH 0x8B87
+#define GR_GL_ACTIVE_ATTRIBUTES 0x8B89
+#define GR_GL_ACTIVE_ATTRIBUTE_MAX_LENGTH 0x8B8A
+#define GR_GL_SHADING_LANGUAGE_VERSION 0x8B8C
+#define GR_GL_CURRENT_PROGRAM 0x8B8D
+#define GR_GL_MAX_FRAGMENT_UNIFORM_COMPONENTS 0x8B49
+#define GR_GL_MAX_VERTEX_UNIFORM_COMPONENTS 0x8B4A
+#define GR_GL_MAX_SHADER_PIXEL_LOCAL_STORAGE_FAST_SIZE 0x8F63
+
+/* StencilFunction */
+#define GR_GL_NEVER 0x0200
+#define GR_GL_LESS 0x0201
+#define GR_GL_EQUAL 0x0202
+#define GR_GL_LEQUAL 0x0203
+#define GR_GL_GREATER 0x0204
+#define GR_GL_NOTEQUAL 0x0205
+#define GR_GL_GEQUAL 0x0206
+#define GR_GL_ALWAYS 0x0207
+
+/* StencilOp */
+/* GL_ZERO */
+#define GR_GL_KEEP 0x1E00
+#define GR_GL_REPLACE 0x1E01
+#define GR_GL_INCR 0x1E02
+#define GR_GL_DECR 0x1E03
+#define GR_GL_INVERT 0x150A
+#define GR_GL_INCR_WRAP 0x8507
+#define GR_GL_DECR_WRAP 0x8508
+
+/* StringName */
+#define GR_GL_VENDOR 0x1F00
+#define GR_GL_RENDERER 0x1F01
+#define GR_GL_VERSION 0x1F02
+#define GR_GL_EXTENSIONS 0x1F03
+
+/* StringCounts */
+#define GR_GL_NUM_EXTENSIONS 0x821D
+
+/* Pixel Mode / Transfer */
+#define GR_GL_UNPACK_ROW_LENGTH 0x0CF2
+#define GR_GL_PACK_ROW_LENGTH 0x0D02
+
+
+/* TextureMagFilter */
+#define GR_GL_NEAREST 0x2600
+#define GR_GL_LINEAR 0x2601
+
+/* TextureMinFilter */
+/* GL_NEAREST */
+/* GL_LINEAR */
+#define GR_GL_NEAREST_MIPMAP_NEAREST 0x2700
+#define GR_GL_LINEAR_MIPMAP_NEAREST 0x2701
+#define GR_GL_NEAREST_MIPMAP_LINEAR 0x2702
+#define GR_GL_LINEAR_MIPMAP_LINEAR 0x2703
+
+/* TextureUsage */
+#define GR_GL_FRAMEBUFFER_ATTACHMENT 0x93A3
+
+/* TextureSRGBDecode */
+#define GR_GL_DECODE_EXT 0x8A49
+#define GR_GL_SKIP_DECODE_EXT 0x8A4A
+
+/* TextureParameterName */
+#define GR_GL_TEXTURE_MAG_FILTER 0x2800
+#define GR_GL_TEXTURE_MIN_FILTER 0x2801
+#define GR_GL_TEXTURE_WRAP_S 0x2802
+#define GR_GL_TEXTURE_WRAP_T 0x2803
+#define GR_GL_TEXTURE_USAGE 0x93A2
+#define GR_GL_TEXTURE_SRGB_DECODE_EXT 0x8A48
+
+/* TextureTarget */
+/* GL_TEXTURE_2D */
+#define GR_GL_TEXTURE 0x1702
+#define GR_GL_TEXTURE_CUBE_MAP 0x8513
+#define GR_GL_TEXTURE_BINDING_CUBE_MAP 0x8514
+#define GR_GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x8515
+#define GR_GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x8516
+#define GR_GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x8517
+#define GR_GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x8518
+#define GR_GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x8519
+#define GR_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x851A
+#define GR_GL_MAX_CUBE_MAP_TEXTURE_SIZE 0x851C
+
+/* TextureUnit */
+#define GR_GL_TEXTURE0 0x84C0
+#define GR_GL_TEXTURE1 0x84C1
+#define GR_GL_TEXTURE2 0x84C2
+#define GR_GL_TEXTURE3 0x84C3
+#define GR_GL_TEXTURE4 0x84C4
+#define GR_GL_TEXTURE5 0x84C5
+#define GR_GL_TEXTURE6 0x84C6
+#define GR_GL_TEXTURE7 0x84C7
+#define GR_GL_TEXTURE8 0x84C8
+#define GR_GL_TEXTURE9 0x84C9
+#define GR_GL_TEXTURE10 0x84CA
+#define GR_GL_TEXTURE11 0x84CB
+#define GR_GL_TEXTURE12 0x84CC
+#define GR_GL_TEXTURE13 0x84CD
+#define GR_GL_TEXTURE14 0x84CE
+#define GR_GL_TEXTURE15 0x84CF
+#define GR_GL_TEXTURE16 0x84D0
+#define GR_GL_TEXTURE17 0x84D1
+#define GR_GL_TEXTURE18 0x84D2
+#define GR_GL_TEXTURE19 0x84D3
+#define GR_GL_TEXTURE20 0x84D4
+#define GR_GL_TEXTURE21 0x84D5
+#define GR_GL_TEXTURE22 0x84D6
+#define GR_GL_TEXTURE23 0x84D7
+#define GR_GL_TEXTURE24 0x84D8
+#define GR_GL_TEXTURE25 0x84D9
+#define GR_GL_TEXTURE26 0x84DA
+#define GR_GL_TEXTURE27 0x84DB
+#define GR_GL_TEXTURE28 0x84DC
+#define GR_GL_TEXTURE29 0x84DD
+#define GR_GL_TEXTURE30 0x84DE
+#define GR_GL_TEXTURE31 0x84DF
+#define GR_GL_ACTIVE_TEXTURE 0x84E0
+#define GR_GL_MAX_TEXTURE_UNITS 0x84E2
+#define GR_GL_MAX_TEXTURE_COORDS 0x8871
+
+/* TextureWrapMode */
+#define GR_GL_REPEAT 0x2901
+#define GR_GL_CLAMP_TO_EDGE 0x812F
+#define GR_GL_MIRRORED_REPEAT 0x8370
+
+/* Texture Swizzle */
+#define GR_GL_TEXTURE_SWIZZLE_R 0x8E42
+#define GR_GL_TEXTURE_SWIZZLE_G 0x8E43
+#define GR_GL_TEXTURE_SWIZZLE_B 0x8E44
+#define GR_GL_TEXTURE_SWIZZLE_A 0x8E45
+#define GR_GL_TEXTURE_SWIZZLE_RGBA 0x8E46
+
+/* Texture mapping */
+#define GR_GL_TEXTURE_ENV 0x2300
+#define GR_GL_TEXTURE_ENV_MODE 0x2200
+#define GR_GL_TEXTURE_1D 0x0DE0
+/* GL_TEXTURE_2D */
+/* GL_TEXTURE_WRAP_S */
+/* GL_TEXTURE_WRAP_T */
+/* GL_TEXTURE_MAG_FILTER */
+/* GL_TEXTURE_MIN_FILTER */
+#define GR_GL_TEXTURE_ENV_COLOR 0x2201
+#define GR_GL_TEXTURE_GEN_S 0x0C60
+#define GR_GL_TEXTURE_GEN_T 0x0C61
+#define GR_GL_TEXTURE_GEN_R 0x0C62
+#define GR_GL_TEXTURE_GEN_Q 0x0C63
+#define GR_GL_TEXTURE_GEN_MODE 0x2500
+#define GR_GL_TEXTURE_BORDER_COLOR 0x1004
+#define GR_GL_TEXTURE_WIDTH 0x1000
+#define GR_GL_TEXTURE_HEIGHT 0x1001
+#define GR_GL_TEXTURE_BORDER 0x1005
+#define GR_GL_TEXTURE_COMPONENTS 0x1003
+#define GR_GL_TEXTURE_RED_SIZE 0x805C
+#define GR_GL_TEXTURE_GREEN_SIZE 0x805D
+#define GR_GL_TEXTURE_BLUE_SIZE 0x805E
+#define GR_GL_TEXTURE_ALPHA_SIZE 0x805F
+#define GR_GL_TEXTURE_LUMINANCE_SIZE 0x8060
+#define GR_GL_TEXTURE_INTENSITY_SIZE 0x8061
+#define GR_GL_TEXTURE_INTERNAL_FORMAT 0x1003
+/* GL_NEAREST_MIPMAP_NEAREST */
+/* GL_NEAREST_MIPMAP_LINEAR */
+/* GL_LINEAR_MIPMAP_NEAREST */
+/* GL_LINEAR_MIPMAP_LINEAR */
+#define GR_GL_OBJECT_LINEAR 0x2401
+#define GR_GL_OBJECT_PLANE 0x2501
+#define GR_GL_EYE_LINEAR 0x2400
+#define GR_GL_EYE_PLANE 0x2502
+#define GR_GL_SPHERE_MAP 0x2402
+#define GR_GL_DECAL 0x2101
+#define GR_GL_MODULATE 0x2100
+/* GL_NEAREST */
+/* GL_REPEAT */
+#define GR_GL_CLAMP 0x2900
+#define GR_GL_S 0x2000
+#define GR_GL_T 0x2001
+#define GR_GL_R 0x2002
+#define GR_GL_Q 0x2003
+#define GR_GL_TEXTURE_GEN_R 0x0C62
+#define GR_GL_TEXTURE_GEN_Q 0x0C63
+
+/* texture_env_combine */
+#define GR_GL_COMBINE 0x8570
+#define GR_GL_COMBINE_RGB 0x8571
+#define GR_GL_COMBINE_ALPHA 0x8572
+#define GR_GL_SOURCE0_RGB 0x8580
+#define GR_GL_SOURCE1_RGB 0x8581
+#define GR_GL_SOURCE2_RGB 0x8582
+#define GR_GL_SOURCE0_ALPHA 0x8588
+#define GR_GL_SOURCE1_ALPHA 0x8589
+#define GR_GL_SOURCE2_ALPHA 0x858A
+#define GR_GL_OPERAND0_RGB 0x8590
+#define GR_GL_OPERAND1_RGB 0x8591
+#define GR_GL_OPERAND2_RGB 0x8592
+#define GR_GL_OPERAND0_ALPHA 0x8598
+#define GR_GL_OPERAND1_ALPHA 0x8599
+#define GR_GL_OPERAND2_ALPHA 0x859A
+#define GR_GL_RGB_SCALE 0x8573
+#define GR_GL_ADD_SIGNED 0x8574
+#define GR_GL_INTERPOLATE 0x8575
+#define GR_GL_SUBTRACT 0x84E7
+#define GR_GL_CONSTANT 0x8576
+#define GR_GL_PRIMARY_COLOR 0x8577
+#define GR_GL_PREVIOUS 0x8578
+#define GR_GL_SRC0_RGB 0x8580
+#define GR_GL_SRC1_RGB 0x8581
+#define GR_GL_SRC2_RGB 0x8582
+#define GR_GL_SRC0_ALPHA 0x8588
+#define GR_GL_SRC1_ALPHA 0x8589
+#define GR_GL_SRC2_ALPHA 0x858A
+
+/* Uniform Types */
+#define GR_GL_FLOAT_VEC2 0x8B50
+#define GR_GL_FLOAT_VEC3 0x8B51
+#define GR_GL_FLOAT_VEC4 0x8B52
+#define GR_GL_INT_VEC2 0x8B53
+#define GR_GL_INT_VEC3 0x8B54
+#define GR_GL_INT_VEC4 0x8B55
+#define GR_GL_BOOL 0x8B56
+#define GR_GL_BOOL_VEC2 0x8B57
+#define GR_GL_BOOL_VEC3 0x8B58
+#define GR_GL_BOOL_VEC4 0x8B59
+#define GR_GL_FLOAT_MAT2 0x8B5A
+#define GR_GL_FLOAT_MAT3 0x8B5B
+#define GR_GL_FLOAT_MAT4 0x8B5C
+#define GR_GL_SAMPLER_2D 0x8B5E
+#define GR_GL_SAMPLER_CUBE 0x8B60
+
+/* Vertex Arrays */
+#define GR_GL_VERTEX_ATTRIB_ARRAY_ENABLED 0x8622
+#define GR_GL_VERTEX_ATTRIB_ARRAY_SIZE 0x8623
+#define GR_GL_VERTEX_ATTRIB_ARRAY_STRIDE 0x8624
+#define GR_GL_VERTEX_ATTRIB_ARRAY_TYPE 0x8625
+#define GR_GL_VERTEX_ATTRIB_ARRAY_NORMALIZED 0x886A
+#define GR_GL_VERTEX_ATTRIB_ARRAY_POINTER 0x8645
+#define GR_GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING 0x889F
+#define GR_GL_VERTEX_ARRAY 0x8074
+#define GR_GL_NORMAL_ARRAY 0x8075
+#define GR_GL_COLOR_ARRAY 0x8076
+#define GR_GL_SECONDARY_COLOR_ARRAY 0x845E
+#define GR_GL_INDEX_ARRAY 0x8077
+#define GR_GL_TEXTURE_COORD_ARRAY 0x8078
+#define GR_GL_EDGE_FLAG_ARRAY 0x8079
+#define GR_GL_VERTEX_ARRAY_SIZE 0x807A
+#define GR_GL_VERTEX_ARRAY_TYPE 0x807B
+#define GR_GL_VERTEX_ARRAY_STRIDE 0x807C
+#define GR_GL_NORMAL_ARRAY_TYPE 0x807E
+#define GR_GL_NORMAL_ARRAY_STRIDE 0x807F
+#define GR_GL_COLOR_ARRAY_SIZE 0x8081
+#define GR_GL_COLOR_ARRAY_TYPE 0x8082
+#define GR_GL_COLOR_ARRAY_STRIDE 0x8083
+#define GR_GL_INDEX_ARRAY_TYPE 0x8085
+#define GR_GL_INDEX_ARRAY_STRIDE 0x8086
+#define GR_GL_TEXTURE_COORD_ARRAY_SIZE 0x8088
+#define GR_GL_TEXTURE_COORD_ARRAY_TYPE 0x8089
+#define GR_GL_TEXTURE_COORD_ARRAY_STRIDE 0x808A
+#define GR_GL_EDGE_FLAG_ARRAY_STRIDE 0x808C
+#define GR_GL_VERTEX_ARRAY_POINTER 0x808E
+#define GR_GL_NORMAL_ARRAY_POINTER 0x808F
+#define GR_GL_COLOR_ARRAY_POINTER 0x8090
+#define GR_GL_INDEX_ARRAY_POINTER 0x8091
+#define GR_GL_TEXTURE_COORD_ARRAY_POINTER 0x8092
+#define GR_GL_EDGE_FLAG_ARRAY_POINTER 0x8093
+#define GR_GL_V2F 0x2A20
+#define GR_GL_V3F 0x2A21
+#define GR_GL_C4UB_V2F 0x2A22
+#define GR_GL_C4UB_V3F 0x2A23
+#define GR_GL_C3F_V3F 0x2A24
+#define GR_GL_N3F_V3F 0x2A25
+#define GR_GL_C4F_N3F_V3F 0x2A26
+#define GR_GL_T2F_V3F 0x2A27
+#define GR_GL_T4F_V4F 0x2A28
+#define GR_GL_T2F_C4UB_V3F 0x2A29
+#define GR_GL_T2F_C3F_V3F 0x2A2A
+#define GR_GL_T2F_N3F_V3F 0x2A2B
+#define GR_GL_T2F_C4F_N3F_V3F 0x2A2C
+#define GR_GL_T4F_C4F_N3F_V4F 0x2A2D
+
+/* Buffer Object */
+#define GR_GL_READ_ONLY 0x88B8
+#define GR_GL_WRITE_ONLY 0x88B9
+#define GR_GL_BUFFER_MAPPED 0x88BC
+
+#define GR_GL_MAP_READ_BIT 0x0001
+#define GR_GL_MAP_WRITE_BIT 0x0002
+#define GR_GL_MAP_INVALIDATE_RANGE_BIT 0x0004
+#define GR_GL_MAP_INVALIDATE_BUFFER_BIT 0x0008
+#define GR_GL_MAP_FLUSH_EXPLICIT_BIT 0x0010
+#define GR_GL_MAP_UNSYNCHRONIZED_BIT 0x0020
+
+/* Read Format */
+#define GR_GL_IMPLEMENTATION_COLOR_READ_TYPE 0x8B9A
+#define GR_GL_IMPLEMENTATION_COLOR_READ_FORMAT 0x8B9B
+
+/* Shader Source */
+#define GR_GL_COMPILE_STATUS 0x8B81
+#define GR_GL_INFO_LOG_LENGTH 0x8B84
+#define GR_GL_SHADER_SOURCE_LENGTH 0x8B88
+#define GR_GL_SHADER_COMPILER 0x8DFA
+
+/* Shader Binary */
+#define GR_GL_SHADER_BINARY_FORMATS 0x8DF8
+#define GR_GL_NUM_SHADER_BINARY_FORMATS 0x8DF9
+
+/* Shader Precision-Specified Types */
+#define GR_GL_LOW_FLOAT 0x8DF0
+#define GR_GL_MEDIUM_FLOAT 0x8DF1
+#define GR_GL_HIGH_FLOAT 0x8DF2
+#define GR_GL_LOW_INT 0x8DF3
+#define GR_GL_MEDIUM_INT 0x8DF4
+#define GR_GL_HIGH_INT 0x8DF5
+
+/* Queries */
+#define GR_GL_QUERY_COUNTER_BITS 0x8864
+#define GR_GL_CURRENT_QUERY 0x8865
+#define GR_GL_QUERY_RESULT 0x8866
+#define GR_GL_QUERY_RESULT_AVAILABLE 0x8867
+#define GR_GL_SAMPLES_PASSED 0x8914
+#define GR_GL_ANY_SAMPLES_PASSED 0x8C2F
+#define GR_GL_TIME_ELAPSED 0x88BF
+#define GR_GL_TIMESTAMP 0x8E28
+#define GR_GL_PRIMITIVES_GENERATED 0x8C87
+#define GR_GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN 0x8C88
+
+
+/* Framebuffer Object. */
+#define GR_GL_FRAMEBUFFER 0x8D40
+#define GR_GL_READ_FRAMEBUFFER 0x8CA8
+#define GR_GL_DRAW_FRAMEBUFFER 0x8CA9
+
+#define GR_GL_RENDERBUFFER 0x8D41
+
+#define GR_GL_RGBA4 0x8056
+#define GR_GL_RGB5_A1 0x8057
+#define GR_GL_RGB565 0x8D62
+#define GR_GL_RGBA8 0x8058
+#define GR_GL_RGBA32F 0x8814
+#define GR_GL_RGB5 0x8050
+#define GR_GL_RGB8 0x8051
+#define GR_GL_BGRA8 0x93A1
+#define GR_GL_SRGB 0x8C40
+#define GR_GL_SRGB8 0x8C41
+#define GR_GL_SRGB_ALPHA 0x8C42
+#define GR_GL_SRGB8_ALPHA8 0x8C43
+#define GR_GL_DEPTH_COMPONENT16 0x81A5
+#define GR_GL_STENCIL_INDEX 0x1901
+#define GR_GL_STENCIL_INDEX4 0x8D47
+#define GR_GL_STENCIL_INDEX8 0x8D48
+#define GR_GL_STENCIL_INDEX16 0x8D49
+#define GR_GL_DEPTH_STENCIL 0x84F9
+#define GR_GL_DEPTH24_STENCIL8 0x88F0
+
+#define GR_GL_MAX_SAMPLES 0x8D57
+// GL_IMG_multisampled_render_to_texture uses a different value for GL_MAX_SAMPLES
+#define GR_GL_MAX_SAMPLES_IMG 0x9135
+
+#define GR_GL_RENDERBUFFER_WIDTH 0x8D42
+#define GR_GL_RENDERBUFFER_HEIGHT 0x8D43
+#define GR_GL_RENDERBUFFER_INTERNAL_FORMAT 0x8D44
+#define GR_GL_RENDERBUFFER_RED_SIZE 0x8D50
+#define GR_GL_RENDERBUFFER_GREEN_SIZE 0x8D51
+#define GR_GL_RENDERBUFFER_BLUE_SIZE 0x8D52
+#define GR_GL_RENDERBUFFER_ALPHA_SIZE 0x8D53
+#define GR_GL_RENDERBUFFER_DEPTH_SIZE 0x8D54
+#define GR_GL_RENDERBUFFER_STENCIL_SIZE 0x8D55
+
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE 0x8CD0
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME 0x8CD1
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL 0x8CD2
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE 0x8CD3
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER 0x8CD4
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING 0x8210
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE 0x8211
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE 0x8212
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE 0x8213
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE 0x8214
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE 0x8215
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE 0x8216
+#define GR_GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE 0x8217
+
+#define GR_GL_COLOR_ATTACHMENT0 0x8CE0
+#define GR_GL_DEPTH_ATTACHMENT 0x8D00
+#define GR_GL_STENCIL_ATTACHMENT 0x8D20
+
+// GL_EXT_discard_framebuffer
+#define GR_GL_COLOR 0x1800
+#define GR_GL_DEPTH 0x1801
+#define GR_GL_STENCIL 0x1802
+
+#define GR_GL_NONE 0
+#define GR_GL_FRAMEBUFFER_DEFAULT 0x8218
+
+#define GR_GL_FRAMEBUFFER_COMPLETE 0x8CD5
+#define GR_GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT 0x8CD6
+#define GR_GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT 0x8CD7
+#define GR_GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS 0x8CD9
+#define GR_GL_FRAMEBUFFER_UNSUPPORTED 0x8CDD
+
+#define GR_GL_FRAMEBUFFER_BINDING 0x8CA6
+#define GR_GL_RENDERBUFFER_BINDING 0x8CA7
+#define GR_GL_MAX_RENDERBUFFER_SIZE 0x84E8
+
+#define GR_GL_INVALID_FRAMEBUFFER_OPERATION 0x0506
+
+/* Path Rendering */
+// commands
+#define GR_GL_CLOSE_PATH 0x00
+#define GR_GL_MOVE_TO 0x02
+#define GR_GL_LINE_TO 0x04
+#define GR_GL_QUADRATIC_CURVE_TO 0x0A
+#define GR_GL_CUBIC_CURVE_TO 0x0C
+#define GR_GL_CONIC_CURVE_TO 0x1A
+
+// path parameters
+#define GR_GL_PATH_STROKE_WIDTH 0x9075
+#define GR_GL_PATH_END_CAPS 0x9076
+#define GR_GL_PATH_JOIN_STYLE 0x9079
+#define GR_GL_PATH_MITER_LIMIT 0x907A
+#define GR_GL_PATH_STROKE_BOUND 0x9086
+
+// fill modes
+#define GR_GL_COUNT_UP 0x9088
+
+// cover mode
+#define GR_GL_BOUNDING_BOX 0x908D
+#define GR_GL_BOUNDING_BOX_OF_BOUNDING_BOXES 0x909C
+
+// transform type
+#define GR_GL_TRANSLATE_X 0x908E
+#define GR_GL_TRANSLATE_Y 0x908F
+#define GR_GL_TRANSLATE_2D 0x9090
+#define GR_GL_TRANSPOSE_AFFINE_2D 0x9096
+
+// cap/dash values
+#define GR_GL_SQUARE 0x90A3
+#define GR_GL_ROUND 0x90A4
+
+// join values
+#define GR_GL_BEVEL 0x90A6
+#define GR_GL_MITER_REVERT 0x90A7
+
+// glyph loading values
+#define GR_GL_STANDARD_FONT_FORMAT 0x936C
+#define GR_GL_FONT_GLYPHS_AVAILABLE 0x9368
+
+// NV_path_rendering extension to ARB_program_interface_query:
+// .. corresponds to the set of active input variables used by the fragment
+// shader stage of <program> (if a fragment stage exists).
+#define GR_GL_FRAGMENT_INPUT 0x936D
+
+// NV_path_rendering extension to EXT_direct_state_access:
+// [the matrix functions] must support the PATH_PROJECTION_NV and
+// PATH_MODELVIEW_NV tokens for matrixMode.
+#define GR_GL_PATH_PROJECTION 0x1701
+#define GR_GL_PATH_MODELVIEW 0x1700
+
+/* ARM specific define for MSAA support on framebuffer fetch */
+#define GR_GL_FETCH_PER_SAMPLE_ARM 0x8F65
+
+/* GL_EXT_raster_multisample */
+#define GR_GL_RASTER_MULTISAMPLE 0x9327
+#define GR_GL_RASTER_SAMPLES 0x9328
+#define GR_GL_MAX_RASTER_SAMPLES 0x9329
+#define GR_GL_RASTER_FIXED_SAMPLE_LOCATIONS 0x932A
+#define GR_GL_MULTISAMPLE_RASTERIZATION_ALLOWED 0x932B
+#define GR_GL_EFFECTIVE_RASTER_SAMPLES 0x932C
+
+/* GL_KHR_debug */
+#define GR_GL_DEBUG_OUTPUT 0x92E0
+#define GR_GL_DEBUG_OUTPUT_SYNCHRONOUS 0x8242
+#define GR_GL_CONTEXT_FLAG_DEBUG_BIT 0x00000002
+#define GR_GL_MAX_DEBUG_MESSAGE_LENGTH 0x9143
+#define GR_GL_MAX_DEBUG_LOGGED_MESSAGES 0x9144
+#define GR_GL_DEBUG_LOGGED_MESSAGES 0x9145
+#define GR_GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH 0x8243
+#define GR_GL_MAX_DEBUG_GROUP_STACK_DEPTH 0x826C
+#define GR_GL_DEBUG_GROUP_STACK_DEPTH 0x826D
+#define GR_GL_MAX_LABEL_LENGTH 0x82E8
+#define GR_GL_DEBUG_SOURCE_API 0x8246
+#define GR_GL_DEBUG_SOURCE_WINDOW_SYSTEM 0x8247
+#define GR_GL_DEBUG_SOURCE_SHADER_COMPILER 0x8248
+#define GR_GL_DEBUG_SOURCE_THIRD_PARTY 0x8249
+#define GR_GL_DEBUG_SOURCE_APPLICATION 0x824A
+#define GR_GL_DEBUG_SOURCE_OTHER 0x824B
+#define GR_GL_DEBUG_TYPE_ERROR 0x824C
+#define GR_GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR 0x824D
+#define GR_GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR 0x824E
+#define GR_GL_DEBUG_TYPE_PORTABILITY 0x824F
+#define GR_GL_DEBUG_TYPE_PERFORMANCE 0x8250
+#define GR_GL_DEBUG_TYPE_OTHER 0x8251
+#define GR_GL_DEBUG_TYPE_MARKER 0x8268
+#define GR_GL_DEBUG_TYPE_PUSH_GROUP 0x8269
+#define GR_GL_DEBUG_TYPE_POP_GROUP 0x826A
+#define GR_GL_DEBUG_SEVERITY_HIGH 0x9146
+#define GR_GL_DEBUG_SEVERITY_MEDIUM 0x9147
+#define GR_GL_DEBUG_SEVERITY_LOW 0x9148
+#define GR_GL_DEBUG_SEVERITY_NOTIFICATION 0x826B
+#define GR_GL_STACK_UNDERFLOW 0x0504
+#define GR_GL_STACK_OVERFLOW 0x0503
+#define GR_GL_BUFFER 0x82E0
+#define GR_GL_SHADER 0x82E1
+#define GR_GL_PROGRAM 0x82E2
+#define GR_GL_QUERY 0x82E3
+#define GR_GL_PROGRAM_PIPELINE 0x82E4
+#define GR_GL_SAMPLER 0x82E6
+
+/* GL_OES_EGL_image_external */
+#define GR_GL_TEXTURE_EXTERNAL 0x8D65
+
+/* GL_ARB_texture_rectangle */
+#define GR_GL_TEXTURE_RECTANGLE 0x84F5
+
+/* GL_EXT_window_rectangles */
+#define GR_GL_MAX_WINDOW_RECTANGLES 0x8f14
+#define GR_GL_INCLUSIVE 0x8f10
+#define GR_GL_EXCLUSIVE 0x8f11
+
+/* GL_ARB_sync */
+#define GR_GL_SYNC_GPU_COMMANDS_COMPLETE 0x9117
+#define GR_GL_ALREADY_SIGNALED 0x911A
+#define GR_GL_TIMEOUT_EXPIRED 0x911B
+#define GR_GL_CONDITION_SATISFIED 0x911C
+#define GR_GL_WAIT_FAILED 0x911D
+#define GR_GL_SYNC_FLUSH_COMMANDS_BIT 0x00000001
+
+/* EGL Defines */
+#define GR_EGL_NO_DISPLAY ((GrEGLDisplay)0)
+#define GR_EGL_EXTENSIONS 0x3055
+#define GR_EGL_GL_TEXTURE_2D 0x30B1
+#define GR_EGL_GL_TEXTURE_LEVEL 0x30BC
+#define GR_EGL_IMAGE_PRESERVED 0x30D2
+#define GR_EGL_FALSE 0x0
+#define GR_EGL_TRUE 0x1
+#define GR_EGL_NONE 0x3038
+#define GR_EGL_NO_IMAGE ((GrEGLImage)0)
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLExtensions.cpp b/gfx/skia/skia/src/gpu/gl/GrGLExtensions.cpp
new file mode 100644
index 000000000..43a147d76
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLExtensions.cpp
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "gl/GrGLExtensions.h"
+#include "gl/GrGLDefines.h"
+#include "gl/GrGLUtil.h"
+
+#include "SkTSearch.h"
+#include "SkTSort.h"
+
+namespace { // This cannot be static because it is used as a template parameter.
+inline bool extension_compare(const SkString& a, const SkString& b) {
+ return strcmp(a.c_str(), b.c_str()) < 0;
+}
+}
+
+// finds the index of ext in strings or a negative result if ext is not found.
+static int find_string(const SkTArray<SkString>& strings, const char ext[]) {
+ if (strings.empty()) {
+ return -1;
+ }
+ SkString extensionStr(ext);
+ int idx = SkTSearch<SkString, extension_compare>(&strings.front(),
+ strings.count(),
+ extensionStr,
+ sizeof(SkString));
+ return idx;
+}
+
+GrGLExtensions::GrGLExtensions(const GrGLExtensions& that) : fStrings(new SkTArray<SkString>) {
+ *this = that;
+}
+
+GrGLExtensions& GrGLExtensions::operator=(const GrGLExtensions& that) {
+ *fStrings = *that.fStrings;
+ fInitialized = that.fInitialized;
+ return *this;
+}
+
+static void eat_space_sep_strings(SkTArray<SkString>* out, const char in[]) {
+ if (!in) {
+ return;
+ }
+ while (true) {
+ // skip over multiple spaces between extensions
+ while (' ' == *in) {
+ ++in;
+ }
+ // quit once we reach the end of the string.
+ if ('\0' == *in) {
+ break;
+ }
+ // we found an extension
+ size_t length = strcspn(in, " ");
+ out->push_back().set(in, length);
+ in += length;
+ }
+}
+
+bool GrGLExtensions::init(GrGLStandard standard,
+ GrGLFunction<GrGLGetStringProc> getString,
+ GrGLFunction<GrGLGetStringiProc> getStringi,
+ GrGLFunction<GrGLGetIntegervProc> getIntegerv,
+ GrGLFunction<GrEGLQueryStringProc> queryString,
+ GrEGLDisplay eglDisplay) {
+ fInitialized = false;
+ fStrings->reset();
+
+ if (!getString) {
+ return false;
+ }
+
+ // glGetStringi and indexed extensions were added in version 3.0 of desktop GL and ES.
+ const GrGLubyte* verString = getString(GR_GL_VERSION);
+ GrGLVersion version = GrGLGetVersionFromString((const char*) verString);
+ if (GR_GL_INVALID_VER == version) {
+ return false;
+ }
+
+ bool indexed = version >= GR_GL_VER(3, 0);
+
+ if (indexed) {
+ if (!getStringi || !getIntegerv) {
+ return false;
+ }
+ GrGLint extensionCnt = 0;
+ getIntegerv(GR_GL_NUM_EXTENSIONS, &extensionCnt);
+ fStrings->push_back_n(extensionCnt);
+ for (int i = 0; i < extensionCnt; ++i) {
+ const char* ext = (const char*) getStringi(GR_GL_EXTENSIONS, i);
+ (*fStrings)[i] = ext;
+ }
+ } else {
+ const char* extensions = (const char*) getString(GR_GL_EXTENSIONS);
+ if (!extensions) {
+ return false;
+ }
+ eat_space_sep_strings(fStrings, extensions);
+ }
+ if (queryString) {
+ const char* extensions = queryString(eglDisplay, GR_EGL_EXTENSIONS);
+
+ eat_space_sep_strings(fStrings, extensions);
+ }
+ if (!fStrings->empty()) {
+ SkTLessFunctionToFunctorAdaptor<SkString, extension_compare> cmp;
+ SkTQSort(&fStrings->front(), &fStrings->back(), cmp);
+ }
+ fInitialized = true;
+ return true;
+}
+
+bool GrGLExtensions::has(const char ext[]) const {
+ SkASSERT(fInitialized);
+ return find_string(*fStrings, ext) >= 0;
+}
+
+bool GrGLExtensions::remove(const char ext[]) {
+ SkASSERT(fInitialized);
+ int idx = find_string(*fStrings, ext);
+ if (idx >= 0) {
+ // This is not terribly effecient but we really only expect this function to be called at
+ // most a handful of times when our test programs start.
+ SkAutoTDelete< SkTArray<SkString> > oldStrings(fStrings.release());
+ fStrings.reset(new SkTArray<SkString>(oldStrings->count() - 1));
+ fStrings->push_back_n(idx, &oldStrings->front());
+ fStrings->push_back_n(oldStrings->count() - idx - 1, &(*oldStrings)[idx] + 1);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+void GrGLExtensions::add(const char ext[]) {
+ int idx = find_string(*fStrings, ext);
+ if (idx < 0) {
+ // This is not the most effecient approach since we end up doing a full sort of the
+ // extensions after the add
+ fStrings->push_back().set(ext);
+ SkTLessFunctionToFunctorAdaptor<SkString, extension_compare> cmp;
+ SkTQSort(&fStrings->front(), &fStrings->back(), cmp);
+ }
+}
+
+void GrGLExtensions::print(const char* sep) const {
+ if (nullptr == sep) {
+ sep = " ";
+ }
+ int cnt = fStrings->count();
+ for (int i = 0; i < cnt; ++i) {
+ SkDebugf("%s%s", (*fStrings)[i].c_str(), (i < cnt - 1) ? sep : "");
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLGLSL.cpp b/gfx/skia/skia/src/gpu/gl/GrGLGLSL.cpp
new file mode 100755
index 000000000..e5aed0355
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLGLSL.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLGLSL.h"
+#include "GrGLContext.h"
+#include "GrGLUtil.h"
+#include "SkString.h"
+
+bool GrGLGetGLSLGeneration(const GrGLInterface* gl, GrGLSLGeneration* generation) {
+ SkASSERT(generation);
+ GrGLSLVersion ver = GrGLGetGLSLVersion(gl);
+ if (GR_GLSL_INVALID_VER == ver) {
+ return false;
+ }
+ switch (gl->fStandard) {
+ case kGL_GrGLStandard:
+ SkASSERT(ver >= GR_GLSL_VER(1,10));
+ if (ver >= GR_GLSL_VER(4,00)) {
+ *generation = k400_GrGLSLGeneration;
+ } else if (ver >= GR_GLSL_VER(3,30)) {
+ *generation = k330_GrGLSLGeneration;
+ } else if (ver >= GR_GLSL_VER(1,50)) {
+ *generation = k150_GrGLSLGeneration;
+ } else if (ver >= GR_GLSL_VER(1,40)) {
+ *generation = k140_GrGLSLGeneration;
+ } else if (ver >= GR_GLSL_VER(1,30)) {
+ *generation = k130_GrGLSLGeneration;
+ } else {
+ *generation = k110_GrGLSLGeneration;
+ }
+ return true;
+ case kGLES_GrGLStandard:
+ SkASSERT(ver >= GR_GL_VER(1,00));
+ if (ver >= GR_GLSL_VER(3,20)) {
+ *generation = k320es_GrGLSLGeneration;
+ } else if (ver >= GR_GLSL_VER(3,10)) {
+ *generation = k310es_GrGLSLGeneration;
+ } else if (ver >= GR_GLSL_VER(3,00)) {
+ *generation = k330_GrGLSLGeneration;
+ } else {
+ *generation = k110_GrGLSLGeneration;
+ }
+ return true;
+ default:
+ SkFAIL("Unknown GL Standard");
+ return false;
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLGLSL.h b/gfx/skia/skia/src/gpu/gl/GrGLGLSL.h
new file mode 100755
index 000000000..31e2de7d7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLGLSL.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLInitGLSL_DEFINED
+#define GrGLInitGLSL_DEFINED
+
+#include "gl/GrGLInterface.h"
+#include "glsl/GrGLSL.h"
+#include "GrColor.h"
+#include "GrTypesPriv.h"
+#include "SkString.h"
+
+class GrGLContextInfo;
+
+/**
+ * Gets the most recent GLSL Generation compatible with the OpenGL context.
+ */
+bool GrGLGetGLSLGeneration(const GrGLInterface* gl, GrGLSLGeneration* generation);
+
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLGpu.cpp b/gfx/skia/skia/src/gpu/gl/GrGLGpu.cpp
new file mode 100644
index 000000000..9341355f9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLGpu.cpp
@@ -0,0 +1,4712 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLGpu.h"
+#include "GrGLBuffer.h"
+#include "GrGLGLSL.h"
+#include "GrGLGpuCommandBuffer.h"
+#include "GrGLStencilAttachment.h"
+#include "GrGLTextureRenderTarget.h"
+#include "GrFixedClip.h"
+#include "GrGpuResourcePriv.h"
+#include "GrMesh.h"
+#include "GrPipeline.h"
+#include "GrPLSGeometryProcessor.h"
+#include "GrRenderTargetPriv.h"
+#include "GrSurfacePriv.h"
+#include "GrTexturePriv.h"
+#include "GrTypes.h"
+#include "builders/GrGLShaderStringBuilder.h"
+#include "glsl/GrGLSL.h"
+#include "glsl/GrGLSLCaps.h"
+#include "glsl/GrGLSLPLSPathRendering.h"
+#include "instanced/GLInstancedRendering.h"
+#include "SkMipMap.h"
+#include "SkPixmap.h"
+#include "SkStrokeRec.h"
+#include "SkTemplates.h"
+#include "SkTypes.h"
+
+#define GL_CALL(X) GR_GL_CALL(this->glInterface(), X)
+#define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X)
+
+#define SKIP_CACHE_CHECK true
+
+#if GR_GL_CHECK_ALLOC_WITH_GET_ERROR
+ #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface)
+ #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call)
+ #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface)
+#else
+ #define CLEAR_ERROR_BEFORE_ALLOC(iface)
+ #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call)
+ #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+using gr_instanced::InstancedRendering;
+using gr_instanced::GLInstancedRendering;
+
+static const GrGLenum gXfermodeEquation2Blend[] = {
+ // Basic OpenGL blend equations.
+ GR_GL_FUNC_ADD,
+ GR_GL_FUNC_SUBTRACT,
+ GR_GL_FUNC_REVERSE_SUBTRACT,
+
+ // GL_KHR_blend_equation_advanced.
+ GR_GL_SCREEN,
+ GR_GL_OVERLAY,
+ GR_GL_DARKEN,
+ GR_GL_LIGHTEN,
+ GR_GL_COLORDODGE,
+ GR_GL_COLORBURN,
+ GR_GL_HARDLIGHT,
+ GR_GL_SOFTLIGHT,
+ GR_GL_DIFFERENCE,
+ GR_GL_EXCLUSION,
+ GR_GL_MULTIPLY,
+ GR_GL_HSL_HUE,
+ GR_GL_HSL_SATURATION,
+ GR_GL_HSL_COLOR,
+ GR_GL_HSL_LUMINOSITY
+};
+GR_STATIC_ASSERT(0 == kAdd_GrBlendEquation);
+GR_STATIC_ASSERT(1 == kSubtract_GrBlendEquation);
+GR_STATIC_ASSERT(2 == kReverseSubtract_GrBlendEquation);
+GR_STATIC_ASSERT(3 == kScreen_GrBlendEquation);
+GR_STATIC_ASSERT(4 == kOverlay_GrBlendEquation);
+GR_STATIC_ASSERT(5 == kDarken_GrBlendEquation);
+GR_STATIC_ASSERT(6 == kLighten_GrBlendEquation);
+GR_STATIC_ASSERT(7 == kColorDodge_GrBlendEquation);
+GR_STATIC_ASSERT(8 == kColorBurn_GrBlendEquation);
+GR_STATIC_ASSERT(9 == kHardLight_GrBlendEquation);
+GR_STATIC_ASSERT(10 == kSoftLight_GrBlendEquation);
+GR_STATIC_ASSERT(11 == kDifference_GrBlendEquation);
+GR_STATIC_ASSERT(12 == kExclusion_GrBlendEquation);
+GR_STATIC_ASSERT(13 == kMultiply_GrBlendEquation);
+GR_STATIC_ASSERT(14 == kHSLHue_GrBlendEquation);
+GR_STATIC_ASSERT(15 == kHSLSaturation_GrBlendEquation);
+GR_STATIC_ASSERT(16 == kHSLColor_GrBlendEquation);
+GR_STATIC_ASSERT(17 == kHSLLuminosity_GrBlendEquation);
+GR_STATIC_ASSERT(SK_ARRAY_COUNT(gXfermodeEquation2Blend) == kGrBlendEquationCnt);
+
+static const GrGLenum gXfermodeCoeff2Blend[] = {
+ GR_GL_ZERO,
+ GR_GL_ONE,
+ GR_GL_SRC_COLOR,
+ GR_GL_ONE_MINUS_SRC_COLOR,
+ GR_GL_DST_COLOR,
+ GR_GL_ONE_MINUS_DST_COLOR,
+ GR_GL_SRC_ALPHA,
+ GR_GL_ONE_MINUS_SRC_ALPHA,
+ GR_GL_DST_ALPHA,
+ GR_GL_ONE_MINUS_DST_ALPHA,
+ GR_GL_CONSTANT_COLOR,
+ GR_GL_ONE_MINUS_CONSTANT_COLOR,
+ GR_GL_CONSTANT_ALPHA,
+ GR_GL_ONE_MINUS_CONSTANT_ALPHA,
+
+ // extended blend coeffs
+ GR_GL_SRC1_COLOR,
+ GR_GL_ONE_MINUS_SRC1_COLOR,
+ GR_GL_SRC1_ALPHA,
+ GR_GL_ONE_MINUS_SRC1_ALPHA,
+};
+
+bool GrGLGpu::BlendCoeffReferencesConstant(GrBlendCoeff coeff) {
+ static const bool gCoeffReferencesBlendConst[] = {
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ true,
+ true,
+ true,
+ true,
+
+ // extended blend coeffs
+ false,
+ false,
+ false,
+ false,
+ };
+ return gCoeffReferencesBlendConst[coeff];
+ GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gCoeffReferencesBlendConst));
+
+ GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff);
+ GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff);
+ GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff);
+ GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff);
+ GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff);
+ GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff);
+ GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff);
+ GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff);
+ GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff);
+ GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff);
+ GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff);
+ GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff);
+ GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff);
+ GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff);
+
+ GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff);
+ GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff);
+ GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff);
+ GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff);
+
+ // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope
+ GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gXfermodeCoeff2Blend));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+
+GrGpu* GrGLGpu::Create(GrBackendContext backendContext, const GrContextOptions& options,
+ GrContext* context) {
+ SkAutoTUnref<const GrGLInterface> glInterface(
+ reinterpret_cast<const GrGLInterface*>(backendContext));
+ if (!glInterface) {
+ glInterface.reset(GrGLDefaultInterface());
+ } else {
+ glInterface->ref();
+ }
+ if (!glInterface) {
+ return nullptr;
+ }
+ GrGLContext* glContext = GrGLContext::Create(glInterface, options);
+ if (glContext) {
+ return new GrGLGpu(glContext, context);
+ }
+ return nullptr;
+}
+
+static bool gPrintStartupSpew;
+
+GrGLGpu::GrGLGpu(GrGLContext* ctx, GrContext* context)
+ : GrGpu(context)
+ , fGLContext(ctx)
+ , fProgramCache(new ProgramCache(this))
+ , fHWProgramID(0)
+ , fTempSrcFBOID(0)
+ , fTempDstFBOID(0)
+ , fStencilClearFBOID(0)
+ , fHWMaxUsedBufferTextureUnit(-1)
+ , fHWPLSEnabled(false)
+ , fPLSHasBeenUsed(false)
+ , fHWMinSampleShading(0.0) {
+ for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
+ fCopyPrograms[i].fProgram = 0;
+ }
+ for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) {
+ fMipmapPrograms[i].fProgram = 0;
+ }
+ fWireRectProgram.fProgram = 0;
+ fPLSSetupProgram.fProgram = 0;
+
+ SkASSERT(ctx);
+ fCaps.reset(SkRef(ctx->caps()));
+
+ fHWBoundTextureUniqueIDs.reset(this->glCaps().glslCaps()->maxCombinedSamplers());
+
+ fHWBufferState[kVertex_GrBufferType].fGLTarget = GR_GL_ARRAY_BUFFER;
+ fHWBufferState[kIndex_GrBufferType].fGLTarget = GR_GL_ELEMENT_ARRAY_BUFFER;
+ fHWBufferState[kTexel_GrBufferType].fGLTarget = GR_GL_TEXTURE_BUFFER;
+ fHWBufferState[kDrawIndirect_GrBufferType].fGLTarget = GR_GL_DRAW_INDIRECT_BUFFER;
+ if (GrGLCaps::kChromium_TransferBufferType == this->glCaps().transferBufferType()) {
+ fHWBufferState[kXferCpuToGpu_GrBufferType].fGLTarget =
+ GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM;
+ fHWBufferState[kXferGpuToCpu_GrBufferType].fGLTarget =
+ GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
+ } else {
+ fHWBufferState[kXferCpuToGpu_GrBufferType].fGLTarget = GR_GL_PIXEL_UNPACK_BUFFER;
+ fHWBufferState[kXferGpuToCpu_GrBufferType].fGLTarget = GR_GL_PIXEL_PACK_BUFFER;
+ }
+ GR_STATIC_ASSERT(6 == SK_ARRAY_COUNT(fHWBufferState));
+
+ if (this->caps()->shaderCaps()->texelBufferSupport()) {
+ fHWBufferTextures.reset(this->glCaps().glslCaps()->maxCombinedSamplers());
+ }
+
+ if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
+ fPathRendering.reset(new GrGLPathRendering(this));
+ }
+
+ GrGLClearErr(this->glInterface());
+ if (gPrintStartupSpew) {
+ const GrGLubyte* vendor;
+ const GrGLubyte* renderer;
+ const GrGLubyte* version;
+ GL_CALL_RET(vendor, GetString(GR_GL_VENDOR));
+ GL_CALL_RET(renderer, GetString(GR_GL_RENDERER));
+ GL_CALL_RET(version, GetString(GR_GL_VERSION));
+ SkDebugf("------------------------- create GrGLGpu %p --------------\n",
+ this);
+ SkDebugf("------ VENDOR %s\n", vendor);
+ SkDebugf("------ RENDERER %s\n", renderer);
+ SkDebugf("------ VERSION %s\n", version);
+ SkDebugf("------ EXTENSIONS\n");
+ this->glContext().extensions().print();
+ SkDebugf("\n");
+ SkDebugf("%s", this->glCaps().dump().c_str());
+ }
+}
+
+GrGLGpu::~GrGLGpu() {
+ // Ensure any GrGpuResource objects get deleted first, since they may require a working GrGLGpu
+ // to release the resources held by the objects themselves.
+ fPathRendering.reset();
+ fCopyProgramArrayBuffer.reset();
+ fMipmapProgramArrayBuffer.reset();
+ fWireRectArrayBuffer.reset();
+ fPLSSetupProgram.fArrayBuffer.reset();
+
+ if (0 != fHWProgramID) {
+ // detach the current program so there is no confusion on OpenGL's part
+ // that we want it to be deleted
+ GL_CALL(UseProgram(0));
+ }
+
+ if (0 != fTempSrcFBOID) {
+ GL_CALL(DeleteFramebuffers(1, &fTempSrcFBOID));
+ }
+ if (0 != fTempDstFBOID) {
+ GL_CALL(DeleteFramebuffers(1, &fTempDstFBOID));
+ }
+ if (0 != fStencilClearFBOID) {
+ GL_CALL(DeleteFramebuffers(1, &fStencilClearFBOID));
+ }
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
+ if (0 != fCopyPrograms[i].fProgram) {
+ GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram));
+ }
+ }
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) {
+ if (0 != fMipmapPrograms[i].fProgram) {
+ GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram));
+ }
+ }
+
+ if (0 != fWireRectProgram.fProgram) {
+ GL_CALL(DeleteProgram(fWireRectProgram.fProgram));
+ }
+
+ if (0 != fPLSSetupProgram.fProgram) {
+ GL_CALL(DeleteProgram(fPLSSetupProgram.fProgram));
+ }
+
+ delete fProgramCache;
+}
+
+bool GrGLGpu::createPLSSetupProgram() {
+ if (!fPLSSetupProgram.fArrayBuffer) {
+ static const GrGLfloat vdata[] = {
+ 0, 0,
+ 0, 1,
+ 1, 0,
+ 1, 1
+ };
+ fPLSSetupProgram.fArrayBuffer.reset(GrGLBuffer::Create(this, sizeof(vdata),
+ kVertex_GrBufferType,
+ kStatic_GrAccessPattern, vdata));
+ if (!fPLSSetupProgram.fArrayBuffer) {
+ return false;
+ }
+ }
+
+ SkASSERT(!fPLSSetupProgram.fProgram);
+ GL_CALL_RET(fPLSSetupProgram.fProgram, CreateProgram());
+ if (!fPLSSetupProgram.fProgram) {
+ return false;
+ }
+
+ const GrGLSLCaps* glslCaps = this->glCaps().glslCaps();
+ const char* version = glslCaps->versionDeclString();
+
+ GrGLSLShaderVar aVertex("a_vertex", kVec2f_GrSLType, GrShaderVar::kAttribute_TypeModifier);
+ GrGLSLShaderVar uTexCoordXform("u_texCoordXform", kVec4f_GrSLType,
+ GrShaderVar::kUniform_TypeModifier);
+ GrGLSLShaderVar uPosXform("u_posXform", kVec4f_GrSLType, GrShaderVar::kUniform_TypeModifier);
+ GrGLSLShaderVar uTexture("u_texture", kTexture2DSampler_GrSLType,
+ GrShaderVar::kUniform_TypeModifier);
+ GrGLSLShaderVar vTexCoord("v_texCoord", kVec2f_GrSLType, GrShaderVar::kVaryingOut_TypeModifier);
+
+ SkString vshaderTxt(version);
+ if (glslCaps->noperspectiveInterpolationSupport()) {
+ if (const char* extension = glslCaps->noperspectiveInterpolationExtensionString()) {
+ vshaderTxt.appendf("#extension %s : require\n", extension);
+ }
+ vTexCoord.addModifier("noperspective");
+ }
+ aVertex.appendDecl(glslCaps, &vshaderTxt);
+ vshaderTxt.append(";");
+ uTexCoordXform.appendDecl(glslCaps, &vshaderTxt);
+ vshaderTxt.append(";");
+ uPosXform.appendDecl(glslCaps, &vshaderTxt);
+ vshaderTxt.append(";");
+ vTexCoord.appendDecl(glslCaps, &vshaderTxt);
+ vshaderTxt.append(";");
+
+ vshaderTxt.append(
+ "// PLS Setup Program VS\n"
+ "void main() {"
+ " gl_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;"
+ " gl_Position.zw = vec2(0, 1);"
+ "}"
+ );
+
+ SkString fshaderTxt(version);
+ if (glslCaps->noperspectiveInterpolationSupport()) {
+ if (const char* extension = glslCaps->noperspectiveInterpolationExtensionString()) {
+ fshaderTxt.appendf("#extension %s : require\n", extension);
+ }
+ }
+ fshaderTxt.append("#extension ");
+ fshaderTxt.append(glslCaps->fbFetchExtensionString());
+ fshaderTxt.append(" : require\n");
+ fshaderTxt.append("#extension GL_EXT_shader_pixel_local_storage : require\n");
+ GrGLSLAppendDefaultFloatPrecisionDeclaration(kDefault_GrSLPrecision, *glslCaps, &fshaderTxt);
+ vTexCoord.setTypeModifier(GrShaderVar::kVaryingIn_TypeModifier);
+ vTexCoord.appendDecl(glslCaps, &fshaderTxt);
+ fshaderTxt.append(";");
+ uTexture.appendDecl(glslCaps, &fshaderTxt);
+ fshaderTxt.append(";");
+
+ fshaderTxt.appendf(
+ "// PLS Setup Program FS\n"
+ GR_GL_PLS_PATH_DATA_DECL
+ "void main() {\n"
+ " " GR_GL_PLS_DSTCOLOR_NAME " = gl_LastFragColorARM;\n"
+ " pls.windings = ivec4(0, 0, 0, 0);\n"
+ "}"
+ );
+
+ const char* str;
+ GrGLint length;
+
+ str = vshaderTxt.c_str();
+ length = SkToInt(vshaderTxt.size());
+ GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fPLSSetupProgram.fProgram,
+ GR_GL_VERTEX_SHADER, &str, &length, 1, &fStats);
+
+ str = fshaderTxt.c_str();
+ length = SkToInt(fshaderTxt.size());
+ GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fPLSSetupProgram.fProgram,
+ GR_GL_FRAGMENT_SHADER, &str, &length, 1, &fStats);
+
+ GL_CALL(LinkProgram(fPLSSetupProgram.fProgram));
+
+ GL_CALL_RET(fPLSSetupProgram.fPosXformUniform, GetUniformLocation(fPLSSetupProgram.fProgram,
+ "u_posXform"));
+
+ GL_CALL(BindAttribLocation(fPLSSetupProgram.fProgram, 0, "a_vertex"));
+
+ GL_CALL(DeleteShader(vshader));
+ GL_CALL(DeleteShader(fshader));
+
+ return true;
+}
+
+void GrGLGpu::disconnect(DisconnectType type) {
+ INHERITED::disconnect(type);
+ if (DisconnectType::kCleanup == type) {
+ if (fHWProgramID) {
+ GL_CALL(UseProgram(0));
+ }
+ if (fTempSrcFBOID) {
+ GL_CALL(DeleteFramebuffers(1, &fTempSrcFBOID));
+ }
+ if (fTempDstFBOID) {
+ GL_CALL(DeleteFramebuffers(1, &fTempDstFBOID));
+ }
+ if (fStencilClearFBOID) {
+ GL_CALL(DeleteFramebuffers(1, &fStencilClearFBOID));
+ }
+ for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
+ if (fCopyPrograms[i].fProgram) {
+ GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram));
+ }
+ }
+ for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) {
+ if (fMipmapPrograms[i].fProgram) {
+ GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram));
+ }
+ }
+ if (fWireRectProgram.fProgram) {
+ GL_CALL(DeleteProgram(fWireRectProgram.fProgram));
+ }
+ if (fPLSSetupProgram.fProgram) {
+ GL_CALL(DeleteProgram(fPLSSetupProgram.fProgram));
+ }
+ } else {
+ if (fProgramCache) {
+ fProgramCache->abandon();
+ }
+ }
+
+ delete fProgramCache;
+ fProgramCache = nullptr;
+
+ fHWProgramID = 0;
+ fTempSrcFBOID = 0;
+ fTempDstFBOID = 0;
+ fStencilClearFBOID = 0;
+ fCopyProgramArrayBuffer.reset();
+ for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
+ fCopyPrograms[i].fProgram = 0;
+ }
+ fMipmapProgramArrayBuffer.reset();
+ for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) {
+ fMipmapPrograms[i].fProgram = 0;
+ }
+ fWireRectProgram.fProgram = 0;
+ fWireRectArrayBuffer.reset();
+ fPLSSetupProgram.fProgram = 0;
+ fPLSSetupProgram.fArrayBuffer.reset();
+ if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
+ this->glPathRendering()->disconnect(type);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGLGpu::onResetContext(uint32_t resetBits) {
+ // we don't use the zb at all
+ if (resetBits & kMisc_GrGLBackendState) {
+ GL_CALL(Disable(GR_GL_DEPTH_TEST));
+ GL_CALL(DepthMask(GR_GL_FALSE));
+
+ fHWBufferState[kTexel_GrBufferType].invalidate();
+ fHWBufferState[kDrawIndirect_GrBufferType].invalidate();
+ fHWBufferState[kXferCpuToGpu_GrBufferType].invalidate();
+ fHWBufferState[kXferGpuToCpu_GrBufferType].invalidate();
+
+ fHWDrawFace = GrDrawFace::kInvalid;
+
+ if (kGL_GrGLStandard == this->glStandard()) {
+ // Desktop-only state that we never change
+ if (!this->glCaps().isCoreProfile()) {
+ GL_CALL(Disable(GR_GL_POINT_SMOOTH));
+ GL_CALL(Disable(GR_GL_LINE_SMOOTH));
+ GL_CALL(Disable(GR_GL_POLYGON_SMOOTH));
+ GL_CALL(Disable(GR_GL_POLYGON_STIPPLE));
+ GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP));
+ GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP));
+ }
+ // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a
+ // core profile. This seems like a bug since the core spec removes any mention of
+ // GL_ARB_imaging.
+ if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) {
+ GL_CALL(Disable(GR_GL_COLOR_TABLE));
+ }
+ GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL));
+ // Since ES doesn't support glPointSize at all we always use the VS to
+ // set the point size
+ GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE));
+
+ // We should set glPolygonMode(FRONT_AND_BACK,FILL) here, too. It isn't
+ // currently part of our gl interface. There are probably others as
+ // well.
+ }
+
+ if (kGLES_GrGLStandard == this->glStandard() &&
+ this->hasExtension("GL_ARM_shader_framebuffer_fetch")) {
+ // The arm extension requires specifically enabling MSAA fetching per sample.
+ // On some devices this may have a perf hit. Also multiple render targets are disabled
+ GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE_ARM));
+ }
+ fHWWriteToColor = kUnknown_TriState;
+ // we only ever use lines in hairline mode
+ GL_CALL(LineWidth(1));
+ GL_CALL(Disable(GR_GL_DITHER));
+ }
+
+ if (resetBits & kMSAAEnable_GrGLBackendState) {
+ fMSAAEnabled = kUnknown_TriState;
+
+ if (this->caps()->usesMixedSamples()) {
+ if (0 != this->caps()->maxRasterSamples()) {
+ fHWRasterMultisampleEnabled = kUnknown_TriState;
+ fHWNumRasterSamples = 0;
+ }
+
+ // The skia blend modes all use premultiplied alpha and therefore expect RGBA coverage
+ // modulation. This state has no effect when not rendering to a mixed sampled target.
+ GL_CALL(CoverageModulation(GR_GL_RGBA));
+ }
+ }
+
+ fHWActiveTextureUnitIdx = -1; // invalid
+
+ if (resetBits & kTextureBinding_GrGLBackendState) {
+ for (int s = 0; s < fHWBoundTextureUniqueIDs.count(); ++s) {
+ fHWBoundTextureUniqueIDs[s] = SK_InvalidUniqueID;
+ }
+ for (int b = 0; b < fHWBufferTextures.count(); ++b) {
+ SkASSERT(this->caps()->shaderCaps()->texelBufferSupport());
+ fHWBufferTextures[b].fKnownBound = false;
+ }
+ }
+
+ if (resetBits & kBlend_GrGLBackendState) {
+ fHWBlendState.invalidate();
+ }
+
+ if (resetBits & kView_GrGLBackendState) {
+ fHWScissorSettings.invalidate();
+ fHWWindowRectsState.invalidate();
+ fHWViewport.invalidate();
+ }
+
+ if (resetBits & kStencil_GrGLBackendState) {
+ fHWStencilSettings.invalidate();
+ fHWStencilTestEnabled = kUnknown_TriState;
+ }
+
+ // Vertex
+ if (resetBits & kVertex_GrGLBackendState) {
+ fHWVertexArrayState.invalidate();
+ fHWBufferState[kVertex_GrBufferType].invalidate();
+ fHWBufferState[kIndex_GrBufferType].invalidate();
+ }
+
+ if (resetBits & kRenderTarget_GrGLBackendState) {
+ fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
+ fHWSRGBFramebuffer = kUnknown_TriState;
+ }
+
+ if (resetBits & kPathRendering_GrGLBackendState) {
+ if (this->caps()->shaderCaps()->pathRenderingSupport()) {
+ this->glPathRendering()->resetContext();
+ }
+ }
+
+ // we assume these values
+ if (resetBits & kPixelStore_GrGLBackendState) {
+ if (this->glCaps().unpackRowLengthSupport()) {
+ GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
+ }
+ if (this->glCaps().packRowLengthSupport()) {
+ GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
+ }
+ if (this->glCaps().unpackFlipYSupport()) {
+ GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
+ }
+ if (this->glCaps().packFlipYSupport()) {
+ GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE));
+ }
+ }
+
+ if (resetBits & kProgram_GrGLBackendState) {
+ fHWProgramID = 0;
+ }
+}
+
+static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin, bool renderTarget) {
+ // By default, GrRenderTargets are GL's normal orientation so that they
+ // can be drawn to by the outside world without the client having
+ // to render upside down.
+ if (kDefault_GrSurfaceOrigin == origin) {
+ return renderTarget ? kBottomLeft_GrSurfaceOrigin : kTopLeft_GrSurfaceOrigin;
+ } else {
+ return origin;
+ }
+}
+
+GrTexture* GrGLGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
+ GrWrapOwnership ownership) {
+#ifdef SK_IGNORE_GL_TEXTURE_TARGET
+ if (!desc.fTextureHandle) {
+ return nullptr;
+ }
+#else
+ const GrGLTextureInfo* info = reinterpret_cast<const GrGLTextureInfo*>(desc.fTextureHandle);
+ if (!info || !info->fID) {
+ return nullptr;
+ }
+#endif
+
+ // next line relies on GrBackendTextureDesc's flags matching GrTexture's
+ bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
+
+ GrGLTexture::IDDesc idDesc;
+ GrSurfaceDesc surfDesc;
+
+#ifdef SK_IGNORE_GL_TEXTURE_TARGET
+ idDesc.fInfo.fID = static_cast<GrGLuint>(desc.fTextureHandle);
+ // When we create the texture, we only
+ // create GL_TEXTURE_2D at the moment.
+ // External clients can do something different.
+
+ idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D;
+#else
+ idDesc.fInfo = *info;
+#endif
+
+ if (GR_GL_TEXTURE_EXTERNAL == idDesc.fInfo.fTarget) {
+ if (renderTarget) {
+ // This combination is not supported.
+ return nullptr;
+ }
+ if (!this->glCaps().glslCaps()->externalTextureSupport()) {
+ return nullptr;
+ }
+ } else if (GR_GL_TEXTURE_RECTANGLE == idDesc.fInfo.fTarget) {
+ if (!this->glCaps().rectangleTextureSupport()) {
+ return nullptr;
+ }
+ } else if (GR_GL_TEXTURE_2D != idDesc.fInfo.fTarget) {
+ return nullptr;
+ }
+
+ // Sample count is interpreted to mean the number of samples that Gr code should allocate
+ // for a render buffer that resolves to the texture. We don't support MSAA textures.
+ if (desc.fSampleCnt && !renderTarget) {
+ return nullptr;
+ }
+
+ if (kAdopt_GrWrapOwnership == ownership) {
+ idDesc.fOwnership = GrBackendObjectOwnership::kOwned;
+ } else {
+ idDesc.fOwnership = GrBackendObjectOwnership::kBorrowed;
+ }
+
+ surfDesc.fFlags = (GrSurfaceFlags) desc.fFlags;
+ surfDesc.fWidth = desc.fWidth;
+ surfDesc.fHeight = desc.fHeight;
+ surfDesc.fConfig = desc.fConfig;
+ surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
+ // FIXME: this should be calling resolve_origin(), but Chrome code is currently
+ // assuming the old behaviour, which is that backend textures are always
+ // BottomLeft, even for non-RT's. Once Chrome is fixed, change this to:
+ // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget);
+ if (kDefault_GrSurfaceOrigin == desc.fOrigin) {
+ surfDesc.fOrigin = kBottomLeft_GrSurfaceOrigin;
+ } else {
+ surfDesc.fOrigin = desc.fOrigin;
+ }
+
+ GrGLTexture* texture = nullptr;
+ if (renderTarget) {
+ GrGLRenderTarget::IDDesc rtIDDesc;
+ if (!this->createRenderTargetObjects(surfDesc, idDesc.fInfo, &rtIDDesc)) {
+ return nullptr;
+ }
+ texture = GrGLTextureRenderTarget::CreateWrapped(this, surfDesc, idDesc, rtIDDesc);
+ } else {
+ texture = GrGLTexture::CreateWrapped(this, surfDesc, idDesc);
+ }
+ if (nullptr == texture) {
+ return nullptr;
+ }
+
+ return texture;
+}
+
+GrRenderTarget* GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
+ GrWrapOwnership ownership) {
+ GrGLRenderTarget::IDDesc idDesc;
+ idDesc.fRTFBOID = static_cast<GrGLuint>(wrapDesc.fRenderTargetHandle);
+ idDesc.fMSColorRenderbufferID = 0;
+ idDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID;
+ if (kAdopt_GrWrapOwnership == ownership) {
+ idDesc.fRTFBOOwnership = GrBackendObjectOwnership::kOwned;
+ } else {
+ idDesc.fRTFBOOwnership = GrBackendObjectOwnership::kBorrowed;
+ }
+ idDesc.fIsMixedSampled = false;
+
+ GrSurfaceDesc desc;
+ desc.fConfig = wrapDesc.fConfig;
+ desc.fFlags = kCheckAllocation_GrSurfaceFlag | kRenderTarget_GrSurfaceFlag;
+ desc.fWidth = wrapDesc.fWidth;
+ desc.fHeight = wrapDesc.fHeight;
+ desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
+ desc.fOrigin = resolve_origin(wrapDesc.fOrigin, true);
+
+ return GrGLRenderTarget::CreateWrapped(this, desc, idDesc, wrapDesc.fStencilBits);
+}
+
+GrRenderTarget* GrGLGpu::onWrapBackendTextureAsRenderTarget(const GrBackendTextureDesc& desc) {
+#ifdef SK_IGNORE_GL_TEXTURE_TARGET
+ if (!desc.fTextureHandle) {
+ return nullptr;
+ }
+#else
+ const GrGLTextureInfo* info = reinterpret_cast<const GrGLTextureInfo*>(desc.fTextureHandle);
+ if (!info || !info->fID) {
+ return nullptr;
+ }
+#endif
+
+ GrGLTextureInfo texInfo;
+ GrSurfaceDesc surfDesc;
+
+#ifdef SK_IGNORE_GL_TEXTURE_TARGET
+ texInfo.fID = static_cast<GrGLuint>(desc.fTextureHandle);
+ // We only support GL_TEXTURE_2D at the moment.
+ texInfo.fTarget = GR_GL_TEXTURE_2D;
+#else
+ texInfo = *info;
+#endif
+
+ if (GR_GL_TEXTURE_RECTANGLE != texInfo.fTarget &&
+ GR_GL_TEXTURE_2D != texInfo.fTarget) {
+ // Only texture rectangle and texture 2d are supported. We do not check whether texture
+ // rectangle is supported by Skia - if the caller provided us with a texture rectangle,
+ // we assume the necessary support exists.
+ return nullptr;
+ }
+
+ surfDesc.fFlags = (GrSurfaceFlags) desc.fFlags;
+ surfDesc.fWidth = desc.fWidth;
+ surfDesc.fHeight = desc.fHeight;
+ surfDesc.fConfig = desc.fConfig;
+ surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
+ // FIXME: this should be calling resolve_origin(), but Chrome code is currently
+ // assuming the old behaviour, which is that backend textures are always
+ // BottomLeft, even for non-RT's. Once Chrome is fixed, change this to:
+ // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget);
+ if (kDefault_GrSurfaceOrigin == desc.fOrigin) {
+ surfDesc.fOrigin = kBottomLeft_GrSurfaceOrigin;
+ } else {
+ surfDesc.fOrigin = desc.fOrigin;
+ }
+
+ GrGLRenderTarget::IDDesc rtIDDesc;
+ if (!this->createRenderTargetObjects(surfDesc, texInfo, &rtIDDesc)) {
+ return nullptr;
+ }
+ return GrGLRenderTarget::CreateWrapped(this, surfDesc, rtIDDesc, 0);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool GrGLGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
+ GrPixelConfig srcConfig,
+ DrawPreference* drawPreference,
+ WritePixelTempDrawInfo* tempDrawInfo) {
+ if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
+ return false;
+ }
+
+ // This subclass only allows writes to textures. If the dst is not a texture we have to draw
+ // into it. We could use glDrawPixels on GLs that have it, but we don't today.
+ if (!dstSurface->asTexture()) {
+ ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
+ } else {
+ GrGLTexture* texture = static_cast<GrGLTexture*>(dstSurface->asTexture());
+ if (GR_GL_TEXTURE_EXTERNAL == texture->target()) {
+ // We don't currently support writing pixels to EXTERNAL textures.
+ return false;
+ }
+ }
+
+ if (GrPixelConfigIsSRGB(dstSurface->config()) != GrPixelConfigIsSRGB(srcConfig)) {
+ ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
+ }
+
+ // Start off assuming no swizzling
+ tempDrawInfo->fSwizzle = GrSwizzle::RGBA();
+ tempDrawInfo->fWriteConfig = srcConfig;
+
+ // These settings we will always want if a temp draw is performed. Initially set the config
+ // to srcConfig, though that may be modified if we decide to do a R/G swap.
+ tempDrawInfo->fTempSurfaceDesc.fFlags = kNone_GrSurfaceFlags;
+ tempDrawInfo->fTempSurfaceDesc.fConfig = srcConfig;
+ tempDrawInfo->fTempSurfaceDesc.fWidth = width;
+ tempDrawInfo->fTempSurfaceDesc.fHeight = height;
+ tempDrawInfo->fTempSurfaceDesc.fSampleCnt = 0;
+ tempDrawInfo->fTempSurfaceDesc.fOrigin = kTopLeft_GrSurfaceOrigin; // no CPU y-flip for TL.
+
+ bool configsAreRBSwaps = GrPixelConfigSwapRAndB(srcConfig) == dstSurface->config();
+
+ if (configsAreRBSwaps) {
+ if (!this->caps()->isConfigTexturable(srcConfig)) {
+ ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
+ tempDrawInfo->fTempSurfaceDesc.fConfig = dstSurface->config();
+ tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
+ tempDrawInfo->fWriteConfig = dstSurface->config();
+ } else if (this->glCaps().rgba8888PixelsOpsAreSlow() &&
+ kRGBA_8888_GrPixelConfig == srcConfig) {
+ ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference);
+ tempDrawInfo->fTempSurfaceDesc.fConfig = dstSurface->config();
+ tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
+ tempDrawInfo->fWriteConfig = dstSurface->config();
+ } else if (kGLES_GrGLStandard == this->glStandard() &&
+ this->glCaps().bgraIsInternalFormat()) {
+ // The internal format and external formats must match texture uploads so we can't
+ // swizzle while uploading when BGRA is a distinct internal format.
+ ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
+ tempDrawInfo->fTempSurfaceDesc.fConfig = dstSurface->config();
+ tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
+ tempDrawInfo->fWriteConfig = dstSurface->config();
+ }
+ }
+
+ if (!this->glCaps().unpackFlipYSupport() &&
+ kBottomLeft_GrSurfaceOrigin == dstSurface->origin()) {
+ ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference);
+ }
+
+ return true;
+}
+
+static bool check_write_and_transfer_input(GrGLTexture* glTex, GrSurface* surface,
+ GrPixelConfig config) {
+ if (!glTex) {
+ return false;
+ }
+
+ // OpenGL doesn't do sRGB <-> linear conversions when reading and writing pixels.
+ if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
+ return false;
+ }
+
+ // Write or transfer of pixels is not implemented for TEXTURE_EXTERNAL textures
+ if (GR_GL_TEXTURE_EXTERNAL == glTex->target()) {
+ return false;
+ }
+
+ return true;
+}
+
+bool GrGLGpu::onWritePixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config,
+ const SkTArray<GrMipLevel>& texels) {
+ GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture());
+
+ if (!check_write_and_transfer_input(glTex, surface, config)) {
+ return false;
+ }
+
+ this->setScratchTextureUnit();
+ GL_CALL(BindTexture(glTex->target(), glTex->textureID()));
+
+ bool success = false;
+ if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) {
+ // We check that config == desc.fConfig in GrGLGpu::canWriteTexturePixels()
+ SkASSERT(config == glTex->desc().fConfig);
+ success = this->uploadCompressedTexData(glTex->desc(), glTex->target(), texels,
+ kWrite_UploadType, left, top, width, height);
+ } else {
+ success = this->uploadTexData(glTex->desc(), glTex->target(), kWrite_UploadType,
+ left, top, width, height, config, texels);
+ }
+
+ return success;
+}
+
+bool GrGLGpu::onTransferPixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config, GrBuffer* transferBuffer,
+ size_t offset, size_t rowBytes) {
+ GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture());
+
+ if (!check_write_and_transfer_input(glTex, surface, config)) {
+ return false;
+ }
+
+ // For the moment, can't transfer compressed data
+ if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) {
+ return false;
+ }
+
+ this->setScratchTextureUnit();
+ GL_CALL(BindTexture(glTex->target(), glTex->textureID()));
+
+ SkASSERT(!transferBuffer->isMapped());
+ SkASSERT(!transferBuffer->isCPUBacked());
+ const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer);
+ this->bindBuffer(kXferCpuToGpu_GrBufferType, glBuffer);
+
+ bool success = false;
+ GrMipLevel mipLevel;
+ mipLevel.fPixels = transferBuffer;
+ mipLevel.fRowBytes = rowBytes;
+ SkSTArray<1, GrMipLevel> texels;
+ texels.push_back(mipLevel);
+ success = this->uploadTexData(glTex->desc(), glTex->target(), kTransfer_UploadType,
+ left, top, width, height, config, texels);
+ return success;
+}
+
+// For GL_[UN]PACK_ALIGNMENT.
+static inline GrGLint config_alignment(GrPixelConfig config) {
+ SkASSERT(!GrPixelConfigIsCompressed(config));
+ switch (config) {
+ case kAlpha_8_GrPixelConfig:
+ return 1;
+ case kRGB_565_GrPixelConfig:
+ case kRGBA_4444_GrPixelConfig:
+ case kAlpha_half_GrPixelConfig:
+ case kRGBA_half_GrPixelConfig:
+ return 2;
+ case kRGBA_8888_GrPixelConfig:
+ case kBGRA_8888_GrPixelConfig:
+ case kSRGBA_8888_GrPixelConfig:
+ case kSBGRA_8888_GrPixelConfig:
+ case kRGBA_float_GrPixelConfig:
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+static inline GrGLenum check_alloc_error(const GrSurfaceDesc& desc,
+ const GrGLInterface* interface) {
+ if (SkToBool(desc.fFlags & kCheckAllocation_GrSurfaceFlag)) {
+ return GR_GL_GET_ERROR(interface);
+ } else {
+ return CHECK_ALLOC_ERROR(interface);
+ }
+}
+
+/**
+ * Creates storage space for the texture and fills it with texels.
+ *
+ * @param desc The surface descriptor for the texture being created.
+ * @param interface The GL interface in use.
+ * @param caps The capabilities of the GL device.
+ * @param internalFormat The data format used for the internal storage of the texture. May be sized.
+ * @param internalFormatForTexStorage The data format used for the TexStorage API. Must be sized.
+ * @param externalFormat The data format used for the external storage of the texture.
+ * @param externalType The type of the data used for the external storage of the texture.
+ * @param texels The texel data of the texture being created.
+ * @param baseWidth The width of the texture's base mipmap level
+ * @param baseHeight The height of the texture's base mipmap level
+ * @param succeeded Set to true if allocating and populating the texture completed
+ * without error.
+ */
+static bool allocate_and_populate_uncompressed_texture(const GrSurfaceDesc& desc,
+ const GrGLInterface& interface,
+ const GrGLCaps& caps,
+ GrGLenum target,
+ GrGLenum internalFormat,
+ GrGLenum internalFormatForTexStorage,
+ GrGLenum externalFormat,
+ GrGLenum externalType,
+ const SkTArray<GrMipLevel>& texels,
+ int baseWidth, int baseHeight) {
+ CLEAR_ERROR_BEFORE_ALLOC(&interface);
+
+ bool useTexStorage = caps.isConfigTexSupportEnabled(desc.fConfig);
+ // We can only use TexStorage if we know we will not later change the storage requirements.
+ // This means if we may later want to add mipmaps, we cannot use TexStorage.
+ // Right now, we cannot know if we will later add mipmaps or not.
+ // The only time we can use TexStorage is when we already have the
+ // mipmaps.
+ useTexStorage &= texels.count() > 1;
+
+ if (useTexStorage) {
+ // We never resize or change formats of textures.
+ GL_ALLOC_CALL(&interface,
+ TexStorage2D(target,
+ texels.count(),
+ internalFormatForTexStorage,
+ desc.fWidth, desc.fHeight));
+ GrGLenum error = check_alloc_error(desc, &interface);
+ if (error != GR_GL_NO_ERROR) {
+ return false;
+ } else {
+ for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
+ const void* currentMipData = texels[currentMipLevel].fPixels;
+ if (currentMipData == nullptr) {
+ continue;
+ }
+ int twoToTheMipLevel = 1 << currentMipLevel;
+ int currentWidth = SkTMax(1, desc.fWidth / twoToTheMipLevel);
+ int currentHeight = SkTMax(1, desc.fHeight / twoToTheMipLevel);
+
+ GR_GL_CALL(&interface,
+ TexSubImage2D(target,
+ currentMipLevel,
+ 0, // left
+ 0, // top
+ currentWidth,
+ currentHeight,
+ externalFormat, externalType,
+ currentMipData));
+ }
+ return true;
+ }
+ } else {
+ if (texels.empty()) {
+ GL_ALLOC_CALL(&interface,
+ TexImage2D(target,
+ 0,
+ internalFormat,
+ baseWidth,
+ baseHeight,
+ 0, // border
+ externalFormat, externalType,
+ nullptr));
+ GrGLenum error = check_alloc_error(desc, &interface);
+ if (error != GR_GL_NO_ERROR) {
+ return false;
+ }
+ } else {
+ for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
+ int twoToTheMipLevel = 1 << currentMipLevel;
+ int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel);
+ int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel);
+ const void* currentMipData = texels[currentMipLevel].fPixels;
+ // Even if curremtMipData is nullptr, continue to call TexImage2D.
+ // This will allocate texture memory which we can later populate.
+ GL_ALLOC_CALL(&interface,
+ TexImage2D(target,
+ currentMipLevel,
+ internalFormat,
+ currentWidth,
+ currentHeight,
+ 0, // border
+ externalFormat, externalType,
+ currentMipData));
+ GrGLenum error = check_alloc_error(desc, &interface);
+ if (error != GR_GL_NO_ERROR) {
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+/**
+ * Creates storage space for the texture and fills it with texels.
+ *
+ * @param desc The surface descriptor for the texture being created.
+ * @param interface The GL interface in use.
+ * @param caps The capabilities of the GL device.
+ * @param internalFormat The data format used for the internal storage of the texture.
+ * @param texels The texel data of the texture being created.
+ */
+static bool allocate_and_populate_compressed_texture(const GrSurfaceDesc& desc,
+ const GrGLInterface& interface,
+ const GrGLCaps& caps,
+ GrGLenum target, GrGLenum internalFormat,
+ const SkTArray<GrMipLevel>& texels,
+ int baseWidth, int baseHeight) {
+ CLEAR_ERROR_BEFORE_ALLOC(&interface);
+
+ bool useTexStorage = caps.isConfigTexSupportEnabled(desc.fConfig);
+ // We can only use TexStorage if we know we will not later change the storage requirements.
+ // This means if we may later want to add mipmaps, we cannot use TexStorage.
+ // Right now, we cannot know if we will later add mipmaps or not.
+ // The only time we can use TexStorage is when we already have the
+ // mipmaps.
+ useTexStorage &= texels.count() > 1;
+
+ if (useTexStorage) {
+ // We never resize or change formats of textures.
+ GL_ALLOC_CALL(&interface,
+ TexStorage2D(target,
+ texels.count(),
+ internalFormat,
+ baseWidth, baseHeight));
+ GrGLenum error = check_alloc_error(desc, &interface);
+ if (error != GR_GL_NO_ERROR) {
+ return false;
+ } else {
+ for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
+ const void* currentMipData = texels[currentMipLevel].fPixels;
+ if (currentMipData == nullptr) {
+ continue;
+ }
+
+ int twoToTheMipLevel = 1 << currentMipLevel;
+ int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel);
+ int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel);
+
+ // Make sure that the width and height that we pass to OpenGL
+ // is a multiple of the block size.
+ size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, currentWidth,
+ currentHeight);
+ GR_GL_CALL(&interface, CompressedTexSubImage2D(target,
+ currentMipLevel,
+ 0, // left
+ 0, // top
+ currentWidth,
+ currentHeight,
+ internalFormat,
+ SkToInt(dataSize),
+ currentMipData));
+ }
+ }
+ } else {
+ for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
+ int twoToTheMipLevel = 1 << currentMipLevel;
+ int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel);
+ int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel);
+
+ // Make sure that the width and height that we pass to OpenGL
+ // is a multiple of the block size.
+ size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, baseWidth, baseHeight);
+
+ GL_ALLOC_CALL(&interface,
+ CompressedTexImage2D(target,
+ currentMipLevel,
+ internalFormat,
+ currentWidth,
+ currentHeight,
+ 0, // border
+ SkToInt(dataSize),
+ texels[currentMipLevel].fPixels));
+
+ GrGLenum error = check_alloc_error(desc, &interface);
+ if (error != GR_GL_NO_ERROR) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+/**
+ * After a texture is created, any state which was altered during its creation
+ * needs to be restored.
+ *
+ * @param interface The GL interface to use.
+ * @param caps The capabilities of the GL device.
+ * @param restoreGLRowLength Should the row length unpacking be restored?
+ * @param glFlipY Did GL flip the texture vertically?
+ */
+static void restore_pixelstore_state(const GrGLInterface& interface, const GrGLCaps& caps,
+ bool restoreGLRowLength, bool glFlipY) {
+ if (restoreGLRowLength) {
+ SkASSERT(caps.unpackRowLengthSupport());
+ GR_GL_CALL(&interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
+ }
+ if (glFlipY) {
+ GR_GL_CALL(&interface, PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
+ }
+}
+
+bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
+ GrGLenum target,
+ UploadType uploadType,
+ int left, int top, int width, int height,
+ GrPixelConfig dataConfig,
+ const SkTArray<GrMipLevel>& texels) {
+ // If we're uploading compressed data then we should be using uploadCompressedTexData
+ SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
+
+ SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
+
+ // texels is const.
+ // But we may need to flip the texture vertically to prepare it.
+ // Rather than flip in place and alter the incoming data,
+ // we allocate a new buffer to flip into.
+ // This means we need to make a non-const shallow copy of texels.
+ SkTArray<GrMipLevel> texelsShallowCopy(texels);
+
+ for (int currentMipLevel = texelsShallowCopy.count() - 1; currentMipLevel >= 0;
+ currentMipLevel--) {
+ SkASSERT(texelsShallowCopy[currentMipLevel].fPixels || kTransfer_UploadType == uploadType);
+ }
+
+ const GrGLInterface* interface = this->glInterface();
+ const GrGLCaps& caps = this->glCaps();
+
+ size_t bpp = GrBytesPerPixel(dataConfig);
+
+ if (width == 0 || height == 0) {
+ return false;
+ }
+
+ for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
+ int twoToTheMipLevel = 1 << currentMipLevel;
+ int currentWidth = SkTMax(1, width / twoToTheMipLevel);
+ int currentHeight = SkTMax(1, height / twoToTheMipLevel);
+
+ if (currentHeight > SK_MaxS32 ||
+ currentWidth > SK_MaxS32) {
+ return false;
+ }
+ if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
+ &currentWidth,
+ &currentHeight,
+ &texelsShallowCopy[currentMipLevel].fPixels,
+ &texelsShallowCopy[currentMipLevel].fRowBytes)) {
+ return false;
+ }
+ if (currentWidth < 0 || currentHeight < 0) {
+ return false;
+ }
+ }
+
+ // Internal format comes from the texture desc.
+ GrGLenum internalFormat;
+ // External format and type come from the upload data.
+ GrGLenum externalFormat;
+ GrGLenum externalType;
+ if (!this->glCaps().getTexImageFormats(desc.fConfig, dataConfig, &internalFormat,
+ &externalFormat, &externalType)) {
+ return false;
+ }
+ // TexStorage requires a sized format, and internalFormat may or may not be
+ GrGLenum internalFormatForTexStorage = this->glCaps().configSizedInternalFormat(desc.fConfig);
+
+ /*
+ * Check whether to allocate a temporary buffer for flipping y or
+ * because our srcData has extra bytes past each row. If so, we need
+ * to trim those off here, since GL ES may not let us specify
+ * GL_UNPACK_ROW_LENGTH.
+ */
+ bool restoreGLRowLength = false;
+ bool swFlipY = false;
+ bool glFlipY = false;
+
+ if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin && !texelsShallowCopy.empty()) {
+ if (caps.unpackFlipYSupport()) {
+ glFlipY = true;
+ } else {
+ swFlipY = true;
+ }
+ }
+
+ // in case we need a temporary, trimmed copy of the src pixels
+ SkAutoMalloc tempStorage;
+
+ // find the combined size of all the mip levels and the relative offset of
+ // each into the collective buffer
+ size_t combined_buffer_size = 0;
+ SkTArray<size_t> individual_mip_offsets(texelsShallowCopy.count());
+ for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
+ int twoToTheMipLevel = 1 << currentMipLevel;
+ int currentWidth = SkTMax(1, width / twoToTheMipLevel);
+ int currentHeight = SkTMax(1, height / twoToTheMipLevel);
+ const size_t trimmedSize = currentWidth * bpp * currentHeight;
+ individual_mip_offsets.push_back(combined_buffer_size);
+ combined_buffer_size += trimmedSize;
+ }
+ char* buffer = (char*)tempStorage.reset(combined_buffer_size);
+
+ for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
+ int twoToTheMipLevel = 1 << currentMipLevel;
+ int currentWidth = SkTMax(1, width / twoToTheMipLevel);
+ int currentHeight = SkTMax(1, height / twoToTheMipLevel);
+ const size_t trimRowBytes = currentWidth * bpp;
+
+ /*
+ * check whether to allocate a temporary buffer for flipping y or
+ * because our srcData has extra bytes past each row. If so, we need
+ * to trim those off here, since GL ES may not let us specify
+ * GL_UNPACK_ROW_LENGTH.
+ */
+ restoreGLRowLength = false;
+
+ const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
+
+ // TODO: This optimization should be enabled with or without mips.
+ // For use with mips, we must set GR_GL_UNPACK_ROW_LENGTH once per
+ // mip level, before calling glTexImage2D.
+ const bool usesMips = texelsShallowCopy.count() > 1;
+ if (caps.unpackRowLengthSupport() && !swFlipY && !usesMips) {
+ // can't use this for flipping, only non-neg values allowed. :(
+ if (rowBytes != trimRowBytes) {
+ GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp);
+ GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength));
+ restoreGLRowLength = true;
+ }
+ } else if (kTransfer_UploadType != uploadType) {
+ if (trimRowBytes != rowBytes || swFlipY) {
+ // copy data into our new storage, skipping the trailing bytes
+ const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
+ if (swFlipY && currentHeight >= 1) {
+ src += (currentHeight - 1) * rowBytes;
+ }
+ char* dst = buffer + individual_mip_offsets[currentMipLevel];
+ for (int y = 0; y < currentHeight; y++) {
+ memcpy(dst, src, trimRowBytes);
+ if (swFlipY) {
+ src -= rowBytes;
+ } else {
+ src += rowBytes;
+ }
+ dst += trimRowBytes;
+ }
+ // now point data to our copied version
+ texelsShallowCopy[currentMipLevel].fPixels = buffer +
+ individual_mip_offsets[currentMipLevel];
+ texelsShallowCopy[currentMipLevel].fRowBytes = trimRowBytes;
+ }
+ } else {
+ return false;
+ }
+ }
+
+ if (!texelsShallowCopy.empty()) {
+ if (glFlipY) {
+ GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE));
+ }
+ GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ALIGNMENT,
+ config_alignment(desc.fConfig)));
+ }
+
+ bool succeeded = true;
+ if (kNewTexture_UploadType == uploadType &&
+ 0 == left && 0 == top &&
+ desc.fWidth == width && desc.fHeight == height) {
+ succeeded = allocate_and_populate_uncompressed_texture(desc, *interface, caps, target,
+ internalFormat,
+ internalFormatForTexStorage,
+ externalFormat, externalType,
+ texelsShallowCopy, width, height);
+ } else {
+ if (swFlipY || glFlipY) {
+ top = desc.fHeight - (top + height);
+ }
+ for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count();
+ currentMipLevel++) {
+ int twoToTheMipLevel = 1 << currentMipLevel;
+ int currentWidth = SkTMax(1, width / twoToTheMipLevel);
+ int currentHeight = SkTMax(1, height / twoToTheMipLevel);
+
+ GL_CALL(TexSubImage2D(target,
+ currentMipLevel,
+ left, top,
+ currentWidth,
+ currentHeight,
+ externalFormat, externalType,
+ texelsShallowCopy[currentMipLevel].fPixels));
+ }
+ }
+
+ restore_pixelstore_state(*interface, caps, restoreGLRowLength, glFlipY);
+
+ return succeeded;
+}
+
+// TODO: This function is using a lot of wonky semantics like, if width == -1
+// then set width = desc.fWdith ... blah. A better way to do it might be to
+// create a CompressedTexData struct that takes a desc/ptr and figures out
+// the proper upload semantics. Then users can construct this function how they
+// see fit if they want to go against the "standard" way to do it.
+bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc,
+ GrGLenum target,
+ const SkTArray<GrMipLevel>& texels,
+ UploadType uploadType,
+ int left, int top, int width, int height) {
+ SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
+
+ // No support for software flip y, yet...
+ SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin);
+
+ const GrGLInterface* interface = this->glInterface();
+ const GrGLCaps& caps = this->glCaps();
+
+ if (-1 == width) {
+ width = desc.fWidth;
+ }
+#ifdef SK_DEBUG
+ else {
+ SkASSERT(width <= desc.fWidth);
+ }
+#endif
+
+ if (-1 == height) {
+ height = desc.fHeight;
+ }
+#ifdef SK_DEBUG
+ else {
+ SkASSERT(height <= desc.fHeight);
+ }
+#endif
+
+ // We only need the internal format for compressed 2D textures.
+ GrGLenum internalFormat;
+ if (!caps.getCompressedTexImageFormats(desc.fConfig, &internalFormat)) {
+ return false;
+ }
+
+ if (kNewTexture_UploadType == uploadType) {
+ return allocate_and_populate_compressed_texture(desc, *interface, caps, target,
+ internalFormat, texels, width, height);
+ } else {
+ // Paletted textures can't be updated.
+ if (GR_GL_PALETTE8_RGBA8 == internalFormat) {
+ return false;
+ }
+ for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
+ SkASSERT(texels[currentMipLevel].fPixels || kTransfer_UploadType == uploadType);
+
+ int twoToTheMipLevel = 1 << currentMipLevel;
+ int currentWidth = SkTMax(1, width / twoToTheMipLevel);
+ int currentHeight = SkTMax(1, height / twoToTheMipLevel);
+
+ // Make sure that the width and height that we pass to OpenGL
+ // is a multiple of the block size.
+ size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, currentWidth,
+ currentHeight);
+ GL_CALL(CompressedTexSubImage2D(target,
+ currentMipLevel,
+ left, top,
+ currentWidth,
+ currentHeight,
+ internalFormat,
+ SkToInt(dataSize),
+ texels[currentMipLevel].fPixels));
+ }
+ }
+
+ return true;
+}
+
+static bool renderbuffer_storage_msaa(const GrGLContext& ctx,
+ int sampleCount,
+ GrGLenum format,
+ int width, int height) {
+ CLEAR_ERROR_BEFORE_ALLOC(ctx.interface());
+ SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType());
+ switch (ctx.caps()->msFBOType()) {
+ case GrGLCaps::kDesktop_ARB_MSFBOType:
+ case GrGLCaps::kDesktop_EXT_MSFBOType:
+ case GrGLCaps::kMixedSamples_MSFBOType:
+ case GrGLCaps::kES_3_0_MSFBOType:
+ GL_ALLOC_CALL(ctx.interface(),
+ RenderbufferStorageMultisample(GR_GL_RENDERBUFFER,
+ sampleCount,
+ format,
+ width, height));
+ break;
+ case GrGLCaps::kES_Apple_MSFBOType:
+ GL_ALLOC_CALL(ctx.interface(),
+ RenderbufferStorageMultisampleES2APPLE(GR_GL_RENDERBUFFER,
+ sampleCount,
+ format,
+ width, height));
+ break;
+ case GrGLCaps::kES_EXT_MsToTexture_MSFBOType:
+ case GrGLCaps::kES_IMG_MsToTexture_MSFBOType:
+ GL_ALLOC_CALL(ctx.interface(),
+ RenderbufferStorageMultisampleES2EXT(GR_GL_RENDERBUFFER,
+ sampleCount,
+ format,
+ width, height));
+ break;
+ case GrGLCaps::kNone_MSFBOType:
+ SkFAIL("Shouldn't be here if we don't support multisampled renderbuffers.");
+ break;
+ }
+ return (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface()));
+}
+
+bool GrGLGpu::createRenderTargetObjects(const GrSurfaceDesc& desc,
+ const GrGLTextureInfo& texInfo,
+ GrGLRenderTarget::IDDesc* idDesc) {
+ idDesc->fMSColorRenderbufferID = 0;
+ idDesc->fRTFBOID = 0;
+ idDesc->fRTFBOOwnership = GrBackendObjectOwnership::kOwned;
+ idDesc->fTexFBOID = 0;
+ SkASSERT((GrGLCaps::kMixedSamples_MSFBOType == this->glCaps().msFBOType()) ==
+ this->caps()->usesMixedSamples());
+ idDesc->fIsMixedSampled = desc.fSampleCnt > 0 && this->caps()->usesMixedSamples();
+
+ GrGLenum status;
+
+ GrGLenum colorRenderbufferFormat = 0; // suppress warning
+
+ if (desc.fSampleCnt > 0 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) {
+ goto FAILED;
+ }
+
+ GL_CALL(GenFramebuffers(1, &idDesc->fTexFBOID));
+ if (!idDesc->fTexFBOID) {
+ goto FAILED;
+ }
+
+ // If we are using multisampling we will create two FBOS. We render to one and then resolve to
+ // the texture bound to the other. The exception is the IMG multisample extension. With this
+ // extension the texture is multisampled when rendered to and then auto-resolves it when it is
+ // rendered from.
+ if (desc.fSampleCnt > 0 && this->glCaps().usesMSAARenderBuffers()) {
+ GL_CALL(GenFramebuffers(1, &idDesc->fRTFBOID));
+ GL_CALL(GenRenderbuffers(1, &idDesc->fMSColorRenderbufferID));
+ if (!idDesc->fRTFBOID ||
+ !idDesc->fMSColorRenderbufferID) {
+ goto FAILED;
+ }
+ if (!this->glCaps().getRenderbufferFormat(desc.fConfig, &colorRenderbufferFormat)) {
+ return false;
+ }
+ } else {
+ idDesc->fRTFBOID = idDesc->fTexFBOID;
+ }
+
+ // below here we may bind the FBO
+ fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
+ if (idDesc->fRTFBOID != idDesc->fTexFBOID) {
+ SkASSERT(desc.fSampleCnt > 0);
+ GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, idDesc->fMSColorRenderbufferID));
+ if (!renderbuffer_storage_msaa(*fGLContext,
+ desc.fSampleCnt,
+ colorRenderbufferFormat,
+ desc.fWidth, desc.fHeight)) {
+ goto FAILED;
+ }
+ fStats.incRenderTargetBinds();
+ GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, idDesc->fRTFBOID));
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_COLOR_ATTACHMENT0,
+ GR_GL_RENDERBUFFER,
+ idDesc->fMSColorRenderbufferID));
+ if ((desc.fFlags & kCheckAllocation_GrSurfaceFlag) ||
+ !this->glCaps().isConfigVerifiedColorAttachment(desc.fConfig)) {
+ GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
+ if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
+ goto FAILED;
+ }
+ fGLContext->caps()->markConfigAsValidColorAttachment(desc.fConfig);
+ }
+ }
+ fStats.incRenderTargetBinds();
+ GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, idDesc->fTexFBOID));
+
+ if (this->glCaps().usesImplicitMSAAResolve() && desc.fSampleCnt > 0) {
+ GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER,
+ GR_GL_COLOR_ATTACHMENT0,
+ texInfo.fTarget,
+ texInfo.fID, 0, desc.fSampleCnt));
+ } else {
+ GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
+ GR_GL_COLOR_ATTACHMENT0,
+ texInfo.fTarget,
+ texInfo.fID, 0));
+ }
+ if ((desc.fFlags & kCheckAllocation_GrSurfaceFlag) ||
+ !this->glCaps().isConfigVerifiedColorAttachment(desc.fConfig)) {
+ GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
+ if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
+ goto FAILED;
+ }
+ fGLContext->caps()->markConfigAsValidColorAttachment(desc.fConfig);
+ }
+
+ return true;
+
+FAILED:
+ if (idDesc->fMSColorRenderbufferID) {
+ GL_CALL(DeleteRenderbuffers(1, &idDesc->fMSColorRenderbufferID));
+ }
+ if (idDesc->fRTFBOID != idDesc->fTexFBOID) {
+ GL_CALL(DeleteFramebuffers(1, &idDesc->fRTFBOID));
+ }
+ if (idDesc->fTexFBOID) {
+ GL_CALL(DeleteFramebuffers(1, &idDesc->fTexFBOID));
+ }
+ return false;
+}
+
+// good to set a break-point here to know when createTexture fails
+static GrTexture* return_null_texture() {
+// SkDEBUGFAIL("null texture");
+ return nullptr;
+}
+
+#if 0 && defined(SK_DEBUG)
+static size_t as_size_t(int x) {
+ return x;
+}
+#endif
+
+static GrGLTexture::IDDesc generate_gl_texture(const GrGLInterface* interface) {
+ GrGLTexture::IDDesc idDesc;
+ idDesc.fInfo.fID = 0;
+ GR_GL_CALL(interface, GenTextures(1, &idDesc.fInfo.fID));
+ idDesc.fOwnership = GrBackendObjectOwnership::kOwned;
+ // When we create the texture, we only
+ // create GL_TEXTURE_2D at the moment.
+ // External clients can do something different.
+ idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D;
+ return idDesc;
+}
+
+static void set_initial_texture_params(const GrGLInterface* interface,
+ const GrGLTextureInfo& info,
+ GrGLTexture::TexParams* initialTexParams) {
+ // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
+ // drivers have a bug where an FBO won't be complete if it includes a
+ // texture that is not mipmap complete (considering the filter in use).
+ // we only set a subset here so invalidate first
+ initialTexParams->invalidate();
+ initialTexParams->fMinFilter = GR_GL_NEAREST;
+ initialTexParams->fMagFilter = GR_GL_NEAREST;
+ initialTexParams->fWrapS = GR_GL_CLAMP_TO_EDGE;
+ initialTexParams->fWrapT = GR_GL_CLAMP_TO_EDGE;
+ GR_GL_CALL(interface, TexParameteri(info.fTarget,
+ GR_GL_TEXTURE_MAG_FILTER,
+ initialTexParams->fMagFilter));
+ GR_GL_CALL(interface, TexParameteri(info.fTarget,
+ GR_GL_TEXTURE_MIN_FILTER,
+ initialTexParams->fMinFilter));
+ GR_GL_CALL(interface, TexParameteri(info.fTarget,
+ GR_GL_TEXTURE_WRAP_S,
+ initialTexParams->fWrapS));
+ GR_GL_CALL(interface, TexParameteri(info.fTarget,
+ GR_GL_TEXTURE_WRAP_T,
+ initialTexParams->fWrapT));
+}
+
+GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc,
+ SkBudgeted budgeted,
+ const SkTArray<GrMipLevel>& texels) {
+ // We fail if the MSAA was requested and is not available.
+ if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleCnt) {
+ //SkDebugf("MSAA RT requested but not supported on this platform.");
+ return return_null_texture();
+ }
+
+ bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
+
+ GrGLTexture::IDDesc idDesc;
+ idDesc.fOwnership = GrBackendObjectOwnership::kOwned;
+ GrGLTexture::TexParams initialTexParams;
+ if (!this->createTextureImpl(desc, &idDesc.fInfo, renderTarget, &initialTexParams, texels)) {
+ return return_null_texture();
+ }
+
+ GrGLTexture* tex;
+ if (renderTarget) {
+ // unbind the texture from the texture unit before binding it to the frame buffer
+ GL_CALL(BindTexture(idDesc.fInfo.fTarget, 0));
+ GrGLRenderTarget::IDDesc rtIDDesc;
+
+ if (!this->createRenderTargetObjects(desc, idDesc.fInfo, &rtIDDesc)) {
+ GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID));
+ return return_null_texture();
+ }
+ tex = new GrGLTextureRenderTarget(this, budgeted, desc, idDesc, rtIDDesc);
+ } else {
+ bool wasMipMapDataProvided = false;
+ if (texels.count() > 1) {
+ wasMipMapDataProvided = true;
+ }
+ tex = new GrGLTexture(this, budgeted, desc, idDesc, wasMipMapDataProvided);
+ }
+ tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
+#ifdef TRACE_TEXTURE_CREATION
+ SkDebugf("--- new texture [%d] size=(%d %d) config=%d\n",
+ idDesc.fInfo.fID, desc.fWidth, desc.fHeight, desc.fConfig);
+#endif
+ return tex;
+}
+
+GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& desc,
+ SkBudgeted budgeted,
+ const SkTArray<GrMipLevel>& texels) {
+ // Make sure that we're not flipping Y.
+ if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
+ return return_null_texture();
+ }
+
+ GrGLTexture::IDDesc idDesc = generate_gl_texture(this->glInterface());
+ if (!idDesc.fInfo.fID) {
+ return return_null_texture();
+ }
+
+ this->setScratchTextureUnit();
+ GL_CALL(BindTexture(idDesc.fInfo.fTarget, idDesc.fInfo.fID));
+
+ GrGLTexture::TexParams initialTexParams;
+ set_initial_texture_params(this->glInterface(), idDesc.fInfo, &initialTexParams);
+
+ if (!this->uploadCompressedTexData(desc, idDesc.fInfo.fTarget, texels)) {
+ GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID));
+ return return_null_texture();
+ }
+
+ GrGLTexture* tex;
+ tex = new GrGLTexture(this, budgeted, desc, idDesc);
+ tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
+#ifdef TRACE_TEXTURE_CREATION
+ SkDebugf("--- new compressed texture [%d] size=(%d %d) config=%d\n",
+ idDesc.fInfo.fID, desc.fWidth, desc.fHeight, desc.fConfig);
+#endif
+ return tex;
+}
+
+namespace {
+
+const GrGLuint kUnknownBitCount = GrGLStencilAttachment::kUnknownBitCount;
+
+void inline get_stencil_rb_sizes(const GrGLInterface* gl,
+ GrGLStencilAttachment::Format* format) {
+
+ // we shouldn't ever know one size and not the other
+ SkASSERT((kUnknownBitCount == format->fStencilBits) ==
+ (kUnknownBitCount == format->fTotalBits));
+ if (kUnknownBitCount == format->fStencilBits) {
+ GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER,
+ GR_GL_RENDERBUFFER_STENCIL_SIZE,
+ (GrGLint*)&format->fStencilBits);
+ if (format->fPacked) {
+ GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER,
+ GR_GL_RENDERBUFFER_DEPTH_SIZE,
+ (GrGLint*)&format->fTotalBits);
+ format->fTotalBits += format->fStencilBits;
+ } else {
+ format->fTotalBits = format->fStencilBits;
+ }
+ }
+}
+}
+
+int GrGLGpu::getCompatibleStencilIndex(GrPixelConfig config) {
+ static const int kSize = 16;
+ SkASSERT(this->caps()->isConfigRenderable(config, false));
+ if (!this->glCaps().hasStencilFormatBeenDeterminedForConfig(config)) {
+ // Default to unsupported, set this if we find a stencil format that works.
+ int firstWorkingStencilFormatIndex = -1;
+ // Create color texture
+ GrGLuint colorID = 0;
+ GL_CALL(GenTextures(1, &colorID));
+ this->setScratchTextureUnit();
+ GL_CALL(BindTexture(GR_GL_TEXTURE_2D, colorID));
+ GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
+ GR_GL_TEXTURE_MAG_FILTER,
+ GR_GL_NEAREST));
+ GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
+ GR_GL_TEXTURE_MIN_FILTER,
+ GR_GL_NEAREST));
+ GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
+ GR_GL_TEXTURE_WRAP_S,
+ GR_GL_CLAMP_TO_EDGE));
+ GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
+ GR_GL_TEXTURE_WRAP_T,
+ GR_GL_CLAMP_TO_EDGE));
+
+ GrGLenum internalFormat;
+ GrGLenum externalFormat;
+ GrGLenum externalType;
+ if (!this->glCaps().getTexImageFormats(config, config, &internalFormat, &externalFormat,
+ &externalType)) {
+ return false;
+ }
+ CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
+ GL_ALLOC_CALL(this->glInterface(), TexImage2D(GR_GL_TEXTURE_2D,
+ 0,
+ internalFormat,
+ kSize,
+ kSize,
+ 0,
+ externalFormat,
+ externalType,
+ NULL));
+ if (GR_GL_NO_ERROR != CHECK_ALLOC_ERROR(this->glInterface())) {
+ GL_CALL(DeleteTextures(1, &colorID));
+ return -1;
+ }
+
+ // unbind the texture from the texture unit before binding it to the frame buffer
+ GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0));
+
+ // Create Framebuffer
+ GrGLuint fb = 0;
+ GL_CALL(GenFramebuffers(1, &fb));
+ GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, fb));
+ fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
+ GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
+ GR_GL_COLOR_ATTACHMENT0,
+ GR_GL_TEXTURE_2D,
+ colorID,
+ 0));
+ GrGLuint sbRBID = 0;
+ GL_CALL(GenRenderbuffers(1, &sbRBID));
+
+ // look over formats till I find a compatible one
+ int stencilFmtCnt = this->glCaps().stencilFormats().count();
+ if (sbRBID) {
+ GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbRBID));
+ for (int i = 0; i < stencilFmtCnt && sbRBID; ++i) {
+ const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[i];
+ CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
+ GL_ALLOC_CALL(this->glInterface(), RenderbufferStorage(GR_GL_RENDERBUFFER,
+ sFmt.fInternalFormat,
+ kSize, kSize));
+ if (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(this->glInterface())) {
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_STENCIL_ATTACHMENT,
+ GR_GL_RENDERBUFFER, sbRBID));
+ if (sFmt.fPacked) {
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_DEPTH_ATTACHMENT,
+ GR_GL_RENDERBUFFER, sbRBID));
+ } else {
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_DEPTH_ATTACHMENT,
+ GR_GL_RENDERBUFFER, 0));
+ }
+ GrGLenum status;
+ GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
+ if (status == GR_GL_FRAMEBUFFER_COMPLETE) {
+ firstWorkingStencilFormatIndex = i;
+ break;
+ }
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_STENCIL_ATTACHMENT,
+ GR_GL_RENDERBUFFER, 0));
+ if (sFmt.fPacked) {
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_DEPTH_ATTACHMENT,
+ GR_GL_RENDERBUFFER, 0));
+ }
+ }
+ }
+ GL_CALL(DeleteRenderbuffers(1, &sbRBID));
+ }
+ GL_CALL(DeleteTextures(1, &colorID));
+ GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, 0));
+ GL_CALL(DeleteFramebuffers(1, &fb));
+ fGLContext->caps()->setStencilFormatIndexForConfig(config, firstWorkingStencilFormatIndex);
+ }
+ return this->glCaps().getStencilFormatIndexForConfig(config);
+}
+
+bool GrGLGpu::createTextureImpl(const GrSurfaceDesc& desc, GrGLTextureInfo* info,
+ bool renderTarget, GrGLTexture::TexParams* initialTexParams,
+ const SkTArray<GrMipLevel>& texels) {
+ info->fID = 0;
+ info->fTarget = GR_GL_TEXTURE_2D;
+ GL_CALL(GenTextures(1, &(info->fID)));
+
+ if (!info->fID) {
+ return false;
+ }
+
+ this->setScratchTextureUnit();
+ GL_CALL(BindTexture(info->fTarget, info->fID));
+
+ if (renderTarget && this->glCaps().textureUsageSupport()) {
+ // provides a hint about how this texture will be used
+ GL_CALL(TexParameteri(info->fTarget,
+ GR_GL_TEXTURE_USAGE,
+ GR_GL_FRAMEBUFFER_ATTACHMENT));
+ }
+
+ if (info) {
+ set_initial_texture_params(this->glInterface(), *info, initialTexParams);
+ }
+ if (!this->uploadTexData(desc, info->fTarget, kNewTexture_UploadType, 0, 0,
+ desc.fWidth, desc.fHeight,
+ desc.fConfig, texels)) {
+ GL_CALL(DeleteTextures(1, &(info->fID)));
+ return false;
+ }
+ return true;
+}
+
+GrStencilAttachment* GrGLGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
+ int width,
+ int height) {
+ SkASSERT(width >= rt->width());
+ SkASSERT(height >= rt->height());
+
+ int samples = rt->numStencilSamples();
+ GrGLStencilAttachment::IDDesc sbDesc;
+
+ int sIdx = this->getCompatibleStencilIndex(rt->config());
+ if (sIdx < 0) {
+ return nullptr;
+ }
+
+ if (!sbDesc.fRenderbufferID) {
+ GL_CALL(GenRenderbuffers(1, &sbDesc.fRenderbufferID));
+ }
+ if (!sbDesc.fRenderbufferID) {
+ return nullptr;
+ }
+ GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbDesc.fRenderbufferID));
+ const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[sIdx];
+ CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
+ // we do this "if" so that we don't call the multisample
+ // version on a GL that doesn't have an MSAA extension.
+ if (samples > 0) {
+ SkAssertResult(renderbuffer_storage_msaa(*fGLContext,
+ samples,
+ sFmt.fInternalFormat,
+ width, height));
+ } else {
+ GL_ALLOC_CALL(this->glInterface(), RenderbufferStorage(GR_GL_RENDERBUFFER,
+ sFmt.fInternalFormat,
+ width, height));
+ SkASSERT(GR_GL_NO_ERROR == check_alloc_error(rt->desc(), this->glInterface()));
+ }
+ fStats.incStencilAttachmentCreates();
+ // After sized formats we attempt an unsized format and take
+ // whatever sizes GL gives us. In that case we query for the size.
+ GrGLStencilAttachment::Format format = sFmt;
+ get_stencil_rb_sizes(this->glInterface(), &format);
+ GrGLStencilAttachment* stencil = new GrGLStencilAttachment(this,
+ sbDesc,
+ width,
+ height,
+ samples,
+ format);
+ return stencil;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+// GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
+// objects are implemented as client-side-arrays on tile-deferred architectures.
+#define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW
+
+GrBuffer* GrGLGpu::onCreateBuffer(size_t size, GrBufferType intendedType,
+ GrAccessPattern accessPattern, const void* data) {
+ return GrGLBuffer::Create(this, size, intendedType, accessPattern, data);
+}
+
+InstancedRendering* GrGLGpu::onCreateInstancedRendering() {
+ return new GLInstancedRendering(this);
+}
+
+void GrGLGpu::flushScissor(const GrScissorState& scissorState,
+ const GrGLIRect& rtViewport,
+ GrSurfaceOrigin rtOrigin) {
+ if (scissorState.enabled()) {
+ GrGLIRect scissor;
+ scissor.setRelativeTo(rtViewport,
+ scissorState.rect().fLeft,
+ scissorState.rect().fTop,
+ scissorState.rect().width(),
+ scissorState.rect().height(),
+ rtOrigin);
+ // if the scissor fully contains the viewport then we fall through and
+ // disable the scissor test.
+ if (!scissor.contains(rtViewport)) {
+ if (fHWScissorSettings.fRect != scissor) {
+ scissor.pushToGLScissor(this->glInterface());
+ fHWScissorSettings.fRect = scissor;
+ }
+ if (kYes_TriState != fHWScissorSettings.fEnabled) {
+ GL_CALL(Enable(GR_GL_SCISSOR_TEST));
+ fHWScissorSettings.fEnabled = kYes_TriState;
+ }
+ return;
+ }
+ }
+
+ // See fall through note above
+ this->disableScissor();
+}
+
+void GrGLGpu::flushWindowRectangles(const GrWindowRectsState& windowState,
+ const GrGLRenderTarget* rt) {
+ typedef GrWindowRectsState::Mode Mode;
+ SkASSERT(!windowState.enabled() || rt->renderFBOID()); // Window rects can't be used on-screen.
+ SkASSERT(windowState.numWindows() <= this->caps()->maxWindowRectangles());
+
+ if (!this->caps()->maxWindowRectangles() ||
+ fHWWindowRectsState.knownEqualTo(rt->origin(), rt->getViewport(), windowState)) {
+ return;
+ }
+
+ // This is purely a workaround for a spurious warning generated by gcc. Otherwise the above
+ // assert would be sufficient. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=5912
+ int numWindows = SkTMin(windowState.numWindows(), int(GrWindowRectangles::kMaxWindows));
+ SkASSERT(windowState.numWindows() == numWindows);
+
+ GrGLIRect glwindows[GrWindowRectangles::kMaxWindows];
+ const SkIRect* skwindows = windowState.windows().data();
+ int dx = -windowState.origin().x(), dy = -windowState.origin().y();
+ for (int i = 0; i < numWindows; ++i) {
+ const SkIRect& skwindow = skwindows[i].makeOffset(dx, dy);
+ glwindows[i].setRelativeTo(rt->getViewport(), skwindow, rt->origin());
+ }
+
+ GrGLenum glmode = (Mode::kExclusive == windowState.mode()) ? GR_GL_EXCLUSIVE : GR_GL_INCLUSIVE;
+ GL_CALL(WindowRectangles(glmode, numWindows, glwindows->asInts()));
+
+ fHWWindowRectsState.set(rt->origin(), rt->getViewport(), windowState);
+}
+
+void GrGLGpu::disableWindowRectangles() {
+ if (!this->caps()->maxWindowRectangles() || fHWWindowRectsState.knownDisabled()) {
+ return;
+ }
+ GL_CALL(WindowRectangles(GR_GL_EXCLUSIVE, 0, nullptr));
+ fHWWindowRectsState.setDisabled();
+}
+
+void GrGLGpu::flushMinSampleShading(float minSampleShading) {
+ if (fHWMinSampleShading != minSampleShading) {
+ if (minSampleShading > 0.0) {
+ GL_CALL(Enable(GR_GL_SAMPLE_SHADING));
+ GL_CALL(MinSampleShading(minSampleShading));
+ }
+ else {
+ GL_CALL(Disable(GR_GL_SAMPLE_SHADING));
+ }
+ fHWMinSampleShading = minSampleShading;
+ }
+}
+
+bool GrGLGpu::flushGLState(const GrPipeline& pipeline, const GrPrimitiveProcessor& primProc,
+ bool willDrawPoints) {
+ SkAutoTUnref<GrGLProgram> program(fProgramCache->refProgram(this, pipeline, primProc,
+ willDrawPoints));
+ if (!program) {
+ GrCapsDebugf(this->caps(), "Failed to create program!\n");
+ return false;
+ }
+
+ program->generateMipmaps(primProc, pipeline);
+
+ GrXferProcessor::BlendInfo blendInfo;
+ pipeline.getXferProcessor().getBlendInfo(&blendInfo);
+
+ this->flushColorWrite(blendInfo.fWriteColor);
+ this->flushDrawFace(pipeline.getDrawFace());
+ this->flushMinSampleShading(primProc.getSampleShading());
+
+ GrGLuint programID = program->programID();
+ if (fHWProgramID != programID) {
+ GL_CALL(UseProgram(programID));
+ fHWProgramID = programID;
+ }
+
+ if (blendInfo.fWriteColor) {
+ // Swizzle the blend to match what the shader will output.
+ const GrSwizzle& swizzle = this->glCaps().glslCaps()->configOutputSwizzle(
+ pipeline.getRenderTarget()->config());
+ this->flushBlend(blendInfo, swizzle);
+ }
+
+ program->setData(primProc, pipeline);
+
+ GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(pipeline.getRenderTarget());
+ this->flushStencil(pipeline.getStencil());
+ this->flushScissor(pipeline.getScissorState(), glRT->getViewport(), glRT->origin());
+ this->flushWindowRectangles(pipeline.getWindowRectsState(), glRT);
+ this->flushHWAAState(glRT, pipeline.isHWAntialiasState(), !pipeline.getStencil().isDisabled());
+
+ // This must come after textures are flushed because a texture may need
+ // to be msaa-resolved (which will modify bound FBO state).
+ this->flushRenderTarget(glRT, nullptr, pipeline.getDisableOutputConversionToSRGB());
+
+ return true;
+}
+
+void GrGLGpu::setupGeometry(const GrPrimitiveProcessor& primProc,
+ const GrNonInstancedMesh& mesh,
+ size_t* indexOffsetInBytes) {
+ const GrBuffer* vbuf = mesh.vertexBuffer();
+ SkASSERT(vbuf);
+ SkASSERT(!vbuf->isMapped());
+
+ GrGLAttribArrayState* attribState;
+ if (mesh.isIndexed()) {
+ SkASSERT(indexOffsetInBytes);
+
+ *indexOffsetInBytes = 0;
+ const GrBuffer* ibuf = mesh.indexBuffer();
+ SkASSERT(ibuf);
+ SkASSERT(!ibuf->isMapped());
+ *indexOffsetInBytes += ibuf->baseOffset();
+ attribState = fHWVertexArrayState.bindInternalVertexArray(this, ibuf);
+ } else {
+ attribState = fHWVertexArrayState.bindInternalVertexArray(this);
+ }
+
+ int vaCount = primProc.numAttribs();
+ if (vaCount > 0) {
+
+ GrGLsizei stride = static_cast<GrGLsizei>(primProc.getVertexStride());
+
+ size_t vertexOffsetInBytes = stride * mesh.startVertex();
+
+ vertexOffsetInBytes += vbuf->baseOffset();
+
+ uint32_t usedAttribArraysMask = 0;
+ size_t offset = 0;
+
+ for (int attribIndex = 0; attribIndex < vaCount; attribIndex++) {
+ const GrGeometryProcessor::Attribute& attrib = primProc.getAttrib(attribIndex);
+ usedAttribArraysMask |= (1 << attribIndex);
+ GrVertexAttribType attribType = attrib.fType;
+ attribState->set(this,
+ attribIndex,
+ vbuf,
+ attribType,
+ stride,
+ reinterpret_cast<GrGLvoid*>(vertexOffsetInBytes + offset));
+ offset += attrib.fOffset;
+ }
+ attribState->disableUnusedArrays(this, usedAttribArraysMask);
+ }
+}
+
+GrGLenum GrGLGpu::bindBuffer(GrBufferType type, const GrBuffer* buffer) {
+ this->handleDirtyContext();
+
+ // Index buffer state is tied to the vertex array.
+ if (kIndex_GrBufferType == type) {
+ this->bindVertexArray(0);
+ }
+
+ SkASSERT(type >= 0 && type <= kLast_GrBufferType);
+ auto& bufferState = fHWBufferState[type];
+
+ if (buffer->uniqueID() != bufferState.fBoundBufferUniqueID) {
+ if (buffer->isCPUBacked()) {
+ if (!bufferState.fBufferZeroKnownBound) {
+ GL_CALL(BindBuffer(bufferState.fGLTarget, 0));
+ }
+ } else {
+ const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer);
+ GL_CALL(BindBuffer(bufferState.fGLTarget, glBuffer->bufferID()));
+ }
+ bufferState.fBufferZeroKnownBound = buffer->isCPUBacked();
+ bufferState.fBoundBufferUniqueID = buffer->uniqueID();
+ }
+
+ return bufferState.fGLTarget;
+}
+
+void GrGLGpu::notifyBufferReleased(const GrGLBuffer* buffer) {
+ if (buffer->hasAttachedToTexture()) {
+ // Detach this buffer from any textures to ensure the underlying memory is freed.
+ uint32_t uniqueID = buffer->uniqueID();
+ for (int i = fHWMaxUsedBufferTextureUnit; i >= 0; --i) {
+ auto& buffTex = fHWBufferTextures[i];
+ if (uniqueID != buffTex.fAttachedBufferUniqueID) {
+ continue;
+ }
+ if (i == fHWMaxUsedBufferTextureUnit) {
+ --fHWMaxUsedBufferTextureUnit;
+ }
+
+ this->setTextureUnit(i);
+ if (!buffTex.fKnownBound) {
+ SkASSERT(buffTex.fTextureID);
+ GL_CALL(BindTexture(GR_GL_TEXTURE_BUFFER, buffTex.fTextureID));
+ buffTex.fKnownBound = true;
+ }
+ GL_CALL(TexBuffer(GR_GL_TEXTURE_BUFFER,
+ this->glCaps().configSizedInternalFormat(buffTex.fTexelConfig), 0));
+ }
+ }
+}
+
+void GrGLGpu::disableScissor() {
+ if (kNo_TriState != fHWScissorSettings.fEnabled) {
+ GL_CALL(Disable(GR_GL_SCISSOR_TEST));
+ fHWScissorSettings.fEnabled = kNo_TriState;
+ return;
+ }
+}
+
+void GrGLGpu::clear(const GrFixedClip& clip, GrColor color, GrRenderTarget* target) {
+ this->handleDirtyContext();
+
+ // parent class should never let us get here with no RT
+ SkASSERT(target);
+ GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
+
+ this->flushRenderTarget(glRT, clip.scissorEnabled() ? &clip.scissorRect() : nullptr);
+ this->flushScissor(clip.scissorState(), glRT->getViewport(), glRT->origin());
+ this->flushWindowRectangles(clip.windowRectsState(), glRT);
+
+ GrGLfloat r, g, b, a;
+ static const GrGLfloat scale255 = 1.f / 255.f;
+ a = GrColorUnpackA(color) * scale255;
+ GrGLfloat scaleRGB = scale255;
+ r = GrColorUnpackR(color) * scaleRGB;
+ g = GrColorUnpackG(color) * scaleRGB;
+ b = GrColorUnpackB(color) * scaleRGB;
+
+ GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
+ fHWWriteToColor = kYes_TriState;
+ GL_CALL(ClearColor(r, g, b, a));
+ GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
+}
+
+void GrGLGpu::clearStencil(GrRenderTarget* target) {
+ if (nullptr == target) {
+ return;
+ }
+ GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
+ this->flushRenderTarget(glRT, &SkIRect::EmptyIRect());
+
+ this->disableScissor();
+ this->disableWindowRectangles();
+
+ GL_CALL(StencilMask(0xffffffff));
+ GL_CALL(ClearStencil(0));
+ GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
+ fHWStencilSettings.invalidate();
+}
+
+void GrGLGpu::clearStencilClip(const GrFixedClip& clip,
+ bool insideStencilMask,
+ GrRenderTarget* target) {
+ SkASSERT(target);
+ this->handleDirtyContext();
+
+ GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
+ // this should only be called internally when we know we have a
+ // stencil buffer.
+ SkASSERT(sb);
+ GrGLint stencilBitCount = sb->bits();
+#if 0
+ SkASSERT(stencilBitCount > 0);
+ GrGLint clipStencilMask = (1 << (stencilBitCount - 1));
+#else
+ // we could just clear the clip bit but when we go through
+ // ANGLE a partial stencil mask will cause clears to be
+ // turned into draws. Our contract on GrDrawTarget says that
+ // changing the clip between stencil passes may or may not
+ // zero the client's clip bits. So we just clear the whole thing.
+ static const GrGLint clipStencilMask = ~0;
+#endif
+ GrGLint value;
+ if (insideStencilMask) {
+ value = (1 << (stencilBitCount - 1));
+ } else {
+ value = 0;
+ }
+ GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
+ this->flushRenderTarget(glRT, &SkIRect::EmptyIRect());
+
+ this->flushScissor(clip.scissorState(), glRT->getViewport(), glRT->origin());
+ this->flushWindowRectangles(clip.windowRectsState(), glRT);
+
+ GL_CALL(StencilMask((uint32_t) clipStencilMask));
+ GL_CALL(ClearStencil(value));
+ GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
+ fHWStencilSettings.invalidate();
+}
+
+static bool read_pixels_pays_for_y_flip(GrRenderTarget* renderTarget, const GrGLCaps& caps,
+ int width, int height, GrPixelConfig config,
+ size_t rowBytes) {
+ // If this render target is already TopLeft, we don't need to flip.
+ if (kTopLeft_GrSurfaceOrigin == renderTarget->origin()) {
+ return false;
+ }
+
+ // If the read is really small or smaller than the min texture size, don't force a draw.
+ static const int kMinSize = 32;
+ if (width < kMinSize || height < kMinSize) {
+ return false;
+ }
+
+ // if GL can do the flip then we'll never pay for it.
+ if (caps.packFlipYSupport()) {
+ return false;
+ }
+
+ // If we have to do memcpy to handle non-trim rowBytes then we
+ // get the flip for free. Otherwise it costs.
+ // Note that we're assuming that 0 rowBytes has already been handled and that the width has been
+ // clipped.
+ return caps.packRowLengthSupport() || GrBytesPerPixel(config) * width == rowBytes;
+}
+
+bool GrGLGpu::readPixelsSupported(GrRenderTarget* target, GrPixelConfig readConfig) {
+ auto bindRenderTarget = [this, target]() -> bool {
+ this->flushRenderTarget(static_cast<GrGLRenderTarget*>(target), &SkIRect::EmptyIRect());
+ return true;
+ };
+ auto getIntegerv = [this](GrGLenum query, GrGLint* value) {
+ GR_GL_GetIntegerv(this->glInterface(), query, value);
+ };
+ GrPixelConfig rtConfig = target->config();
+ return this->glCaps().readPixelsSupported(rtConfig, readConfig, getIntegerv, bindRenderTarget);
+}
+
+bool GrGLGpu::readPixelsSupported(GrPixelConfig rtConfig, GrPixelConfig readConfig) {
+ auto bindRenderTarget = [this, rtConfig]() -> bool {
+ GrTextureDesc desc;
+ desc.fConfig = rtConfig;
+ desc.fWidth = desc.fHeight = 16;
+ desc.fFlags = kRenderTarget_GrSurfaceFlag;
+ SkAutoTUnref<GrTexture> temp(this->createTexture(desc,
+ SkBudgeted::kNo));
+ if (!temp) {
+ return false;
+ }
+ GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(temp->asRenderTarget());
+ this->flushRenderTarget(glrt, &SkIRect::EmptyIRect());
+ return true;
+ };
+ auto getIntegerv = [this](GrGLenum query, GrGLint* value) {
+ GR_GL_GetIntegerv(this->glInterface(), query, value);
+ };
+ return this->glCaps().readPixelsSupported(rtConfig, readConfig, getIntegerv, bindRenderTarget);
+}
+
+bool GrGLGpu::readPixelsSupported(GrSurface* surfaceForConfig, GrPixelConfig readConfig) {
+ if (GrRenderTarget* rt = surfaceForConfig->asRenderTarget()) {
+ return this->readPixelsSupported(rt, readConfig);
+ } else {
+ GrPixelConfig config = surfaceForConfig->config();
+ return this->readPixelsSupported(config, readConfig);
+ }
+}
+
+static bool requires_srgb_conversion(GrPixelConfig a, GrPixelConfig b) {
+ if (GrPixelConfigIsSRGB(a)) {
+ return !GrPixelConfigIsSRGB(b) && !GrPixelConfigIsAlphaOnly(b);
+ } else if (GrPixelConfigIsSRGB(b)) {
+ return !GrPixelConfigIsSRGB(a) && !GrPixelConfigIsAlphaOnly(a);
+ }
+ return false;
+}
+
+bool GrGLGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
+ GrPixelConfig readConfig, DrawPreference* drawPreference,
+ ReadPixelTempDrawInfo* tempDrawInfo) {
+ GrPixelConfig srcConfig = srcSurface->config();
+
+ // These settings we will always want if a temp draw is performed.
+ tempDrawInfo->fTempSurfaceDesc.fFlags = kRenderTarget_GrSurfaceFlag;
+ tempDrawInfo->fTempSurfaceDesc.fWidth = width;
+ tempDrawInfo->fTempSurfaceDesc.fHeight = height;
+ tempDrawInfo->fTempSurfaceDesc.fSampleCnt = 0;
+ tempDrawInfo->fTempSurfaceDesc.fOrigin = kTopLeft_GrSurfaceOrigin; // no CPU y-flip for TL.
+ tempDrawInfo->fTempSurfaceFit = this->glCaps().partialFBOReadIsSlow() ? SkBackingFit::kExact
+ : SkBackingFit::kApprox;
+ // For now assume no swizzling, we may change that below.
+ tempDrawInfo->fSwizzle = GrSwizzle::RGBA();
+
+ // Depends on why we need/want a temp draw. Start off assuming no change, the surface we read
+ // from will be srcConfig and we will read readConfig pixels from it.
+ // Not that if we require a draw and return a non-renderable format for the temp surface the
+ // base class will fail for us.
+ tempDrawInfo->fTempSurfaceDesc.fConfig = srcConfig;
+ tempDrawInfo->fReadConfig = readConfig;
+
+ if (requires_srgb_conversion(srcConfig, readConfig)) {
+ if (!this->readPixelsSupported(readConfig, readConfig)) {
+ return false;
+ }
+ // Draw to do srgb to linear conversion or vice versa.
+ ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
+ tempDrawInfo->fTempSurfaceDesc.fConfig = readConfig;
+ tempDrawInfo->fReadConfig = readConfig;
+ return true;
+ }
+
+ GrRenderTarget* srcAsRT = srcSurface->asRenderTarget();
+ if (!srcAsRT) {
+ // For now keep assuming the draw is not a format transformation, just a draw to get to a
+ // RT. We may add additional transformations below.
+ ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
+ }
+ if (this->glCaps().rgba8888PixelsOpsAreSlow() && kRGBA_8888_GrPixelConfig == readConfig &&
+ this->readPixelsSupported(kBGRA_8888_GrPixelConfig, kBGRA_8888_GrPixelConfig)) {
+ tempDrawInfo->fTempSurfaceDesc.fConfig = kBGRA_8888_GrPixelConfig;
+ tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
+ tempDrawInfo->fReadConfig = kBGRA_8888_GrPixelConfig;
+ ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference);
+ } else if (this->glCaps().rgbaToBgraReadbackConversionsAreSlow() &&
+ GrBytesPerPixel(readConfig) == 4 &&
+ GrPixelConfigSwapRAndB(readConfig) == srcConfig &&
+ this->readPixelsSupported(srcSurface, srcConfig)) {
+ // Mesa 3D takes a slow path on when reading back BGRA from an RGBA surface and vice-versa.
+ // Better to do a draw with a R/B swap and then read as the original config.
+ tempDrawInfo->fTempSurfaceDesc.fConfig = srcConfig;
+ tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
+ tempDrawInfo->fReadConfig = srcConfig;
+ ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference);
+ } else if (!this->readPixelsSupported(srcSurface, readConfig)) {
+ if (readConfig == kBGRA_8888_GrPixelConfig &&
+ this->glCaps().isConfigRenderable(kRGBA_8888_GrPixelConfig, false) &&
+ this->readPixelsSupported(kRGBA_8888_GrPixelConfig, kRGBA_8888_GrPixelConfig)) {
+ // We're trying to read BGRA but it's not supported. If RGBA is renderable and
+ // we can read it back, then do a swizzling draw to a RGBA and read it back (which
+ // will effectively be BGRA).
+ tempDrawInfo->fTempSurfaceDesc.fConfig = kRGBA_8888_GrPixelConfig;
+ tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
+ tempDrawInfo->fReadConfig = kRGBA_8888_GrPixelConfig;
+ ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
+ } else if (readConfig == kSBGRA_8888_GrPixelConfig &&
+ this->glCaps().isConfigRenderable(kSRGBA_8888_GrPixelConfig, false) &&
+ this->readPixelsSupported(kSRGBA_8888_GrPixelConfig, kSRGBA_8888_GrPixelConfig)) {
+ // We're trying to read sBGRA but it's not supported. If sRGBA is renderable and
+ // we can read it back, then do a swizzling draw to a sRGBA and read it back (which
+ // will effectively be sBGRA).
+ tempDrawInfo->fTempSurfaceDesc.fConfig = kSRGBA_8888_GrPixelConfig;
+ tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
+ tempDrawInfo->fReadConfig = kSRGBA_8888_GrPixelConfig;
+ ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
+ } else if (readConfig == kAlpha_8_GrPixelConfig) {
+ // onReadPixels implements a fallback for cases where we are want to read kAlpha_8,
+ // it's unsupported, but 32bit RGBA reads are supported.
+ // Don't attempt to do any srgb conversions since we only care about alpha.
+ GrPixelConfig cpuTempConfig = kRGBA_8888_GrPixelConfig;
+ if (GrPixelConfigIsSRGB(srcSurface->config())) {
+ cpuTempConfig = kSRGBA_8888_GrPixelConfig;
+ }
+ if (!this->readPixelsSupported(srcSurface, cpuTempConfig)) {
+ // If we can't read RGBA from the src try to draw to a kRGBA_8888 (or kSRGBA_8888)
+ // first and then onReadPixels will read that to a 32bit temporary buffer.
+ if (this->caps()->isConfigRenderable(cpuTempConfig, false)) {
+ ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
+ tempDrawInfo->fTempSurfaceDesc.fConfig = cpuTempConfig;
+ tempDrawInfo->fReadConfig = kAlpha_8_GrPixelConfig;
+ } else {
+ return false;
+ }
+ } else {
+ SkASSERT(tempDrawInfo->fTempSurfaceDesc.fConfig == srcConfig);
+ SkASSERT(tempDrawInfo->fReadConfig == kAlpha_8_GrPixelConfig);
+ }
+ } else if (this->caps()->isConfigRenderable(readConfig, false) &&
+ this->readPixelsSupported(readConfig, readConfig)) {
+ // Do a draw to convert from the src config to the read config.
+ ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
+ tempDrawInfo->fTempSurfaceDesc.fConfig = readConfig;
+ tempDrawInfo->fReadConfig = readConfig;
+ } else {
+ return false;
+ }
+ }
+
+ if (srcAsRT &&
+ read_pixels_pays_for_y_flip(srcAsRT, this->glCaps(), width, height, readConfig, rowBytes)) {
+ ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference);
+ }
+
+ return true;
+}
+
+bool GrGLGpu::onReadPixels(GrSurface* surface,
+ int left, int top,
+ int width, int height,
+ GrPixelConfig config,
+ void* buffer,
+ size_t rowBytes) {
+ SkASSERT(surface);
+
+ GrGLRenderTarget* renderTarget = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
+ if (!renderTarget) {
+ return false;
+ }
+
+ // OpenGL doesn't do sRGB <-> linear conversions when reading and writing pixels.
+ if (requires_srgb_conversion(surface->config(), config)) {
+ return false;
+ }
+
+ // We have a special case fallback for reading eight bit alpha. We will read back all four 8
+ // bit channels as RGBA and then extract A.
+ if (!this->readPixelsSupported(renderTarget, config)) {
+ // Don't attempt to do any srgb conversions since we only care about alpha.
+ GrPixelConfig tempConfig = kRGBA_8888_GrPixelConfig;
+ if (GrPixelConfigIsSRGB(renderTarget->config())) {
+ tempConfig = kSRGBA_8888_GrPixelConfig;
+ }
+ if (kAlpha_8_GrPixelConfig == config &&
+ this->readPixelsSupported(renderTarget, tempConfig)) {
+ SkAutoTDeleteArray<uint32_t> temp(new uint32_t[width * height * 4]);
+ if (this->onReadPixels(renderTarget, left, top, width, height, tempConfig, temp.get(),
+ width*4)) {
+ uint8_t* dst = reinterpret_cast<uint8_t*>(buffer);
+ for (int j = 0; j < height; ++j) {
+ for (int i = 0; i < width; ++i) {
+ dst[j*rowBytes + i] = (0xFF000000U & temp[j*width+i]) >> 24;
+ }
+ }
+ return true;
+ }
+ }
+ return false;
+ }
+
+ GrGLenum externalFormat;
+ GrGLenum externalType;
+ if (!this->glCaps().getReadPixelsFormat(renderTarget->config(), config, &externalFormat,
+ &externalType)) {
+ return false;
+ }
+ bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
+
+ // resolve the render target if necessary
+ switch (renderTarget->getResolveType()) {
+ case GrGLRenderTarget::kCantResolve_ResolveType:
+ return false;
+ case GrGLRenderTarget::kAutoResolves_ResolveType:
+ this->flushRenderTarget(renderTarget, &SkIRect::EmptyIRect());
+ break;
+ case GrGLRenderTarget::kCanResolve_ResolveType:
+ this->onResolveRenderTarget(renderTarget);
+ // we don't track the state of the READ FBO ID.
+ fStats.incRenderTargetBinds();
+ GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, renderTarget->textureFBOID()));
+ break;
+ default:
+ SkFAIL("Unknown resolve type");
+ }
+
+ const GrGLIRect& glvp = renderTarget->getViewport();
+
+ // the read rect is viewport-relative
+ GrGLIRect readRect;
+ readRect.setRelativeTo(glvp, left, top, width, height, renderTarget->origin());
+
+ size_t bytesPerPixel = GrBytesPerPixel(config);
+ size_t tightRowBytes = bytesPerPixel * width;
+
+ size_t readDstRowBytes = tightRowBytes;
+ void* readDst = buffer;
+
+ // determine if GL can read using the passed rowBytes or if we need
+ // a scratch buffer.
+ SkAutoMalloc scratch;
+ if (rowBytes != tightRowBytes) {
+ if (this->glCaps().packRowLengthSupport() && !(rowBytes % bytesPerPixel)) {
+ GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH,
+ static_cast<GrGLint>(rowBytes / bytesPerPixel)));
+ readDstRowBytes = rowBytes;
+ } else {
+ scratch.reset(tightRowBytes * height);
+ readDst = scratch.get();
+ }
+ }
+ if (flipY && this->glCaps().packFlipYSupport()) {
+ GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 1));
+ }
+ GL_CALL(PixelStorei(GR_GL_PACK_ALIGNMENT, config_alignment(config)));
+
+ GL_CALL(ReadPixels(readRect.fLeft, readRect.fBottom,
+ readRect.fWidth, readRect.fHeight,
+ externalFormat, externalType, readDst));
+ if (readDstRowBytes != tightRowBytes) {
+ SkASSERT(this->glCaps().packRowLengthSupport());
+ GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
+ }
+ if (flipY && this->glCaps().packFlipYSupport()) {
+ GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 0));
+ flipY = false;
+ }
+
+ // now reverse the order of the rows, since GL's are bottom-to-top, but our
+ // API presents top-to-bottom. We must preserve the padding contents. Note
+ // that the above readPixels did not overwrite the padding.
+ if (readDst == buffer) {
+ SkASSERT(rowBytes == readDstRowBytes);
+ if (flipY) {
+ scratch.reset(tightRowBytes);
+ void* tmpRow = scratch.get();
+ // flip y in-place by rows
+ const int halfY = height >> 1;
+ char* top = reinterpret_cast<char*>(buffer);
+ char* bottom = top + (height - 1) * rowBytes;
+ for (int y = 0; y < halfY; y++) {
+ memcpy(tmpRow, top, tightRowBytes);
+ memcpy(top, bottom, tightRowBytes);
+ memcpy(bottom, tmpRow, tightRowBytes);
+ top += rowBytes;
+ bottom -= rowBytes;
+ }
+ }
+ } else {
+ SkASSERT(readDst != buffer);
+ SkASSERT(rowBytes != tightRowBytes);
+ // copy from readDst to buffer while flipping y
+ // const int halfY = height >> 1;
+ const char* src = reinterpret_cast<const char*>(readDst);
+ char* dst = reinterpret_cast<char*>(buffer);
+ if (flipY) {
+ dst += (height-1) * rowBytes;
+ }
+ for (int y = 0; y < height; y++) {
+ memcpy(dst, src, tightRowBytes);
+ src += readDstRowBytes;
+ if (!flipY) {
+ dst += rowBytes;
+ } else {
+ dst -= rowBytes;
+ }
+ }
+ }
+ return true;
+}
+
+GrGpuCommandBuffer* GrGLGpu::createCommandBuffer(
+ GrRenderTarget* target,
+ const GrGpuCommandBuffer::LoadAndStoreInfo& colorInfo,
+ const GrGpuCommandBuffer::LoadAndStoreInfo& stencilInfo) {
+ return new GrGLGpuCommandBuffer(this);
+}
+
+void GrGLGpu::finishDrawTarget() {
+ if (fPLSHasBeenUsed) {
+ /* There is an ARM driver bug where if we use PLS, and then draw a frame which does not
+ * use PLS, it leaves garbage all over the place. As a workaround, we use PLS in a
+ * trivial way every frame. And since we use it every frame, there's never a point at which
+ * it becomes safe to stop using this workaround once we start.
+ */
+ this->disableScissor();
+ this->disableWindowRectangles();
+ // using PLS in the presence of MSAA results in GL_INVALID_OPERATION
+ this->flushHWAAState(nullptr, false, false);
+ SkASSERT(!fHWPLSEnabled);
+ SkASSERT(fMSAAEnabled != kYes_TriState);
+ GL_CALL(Enable(GR_GL_SHADER_PIXEL_LOCAL_STORAGE));
+ this->stampPLSSetupRect(SkRect::MakeXYWH(-100.0f, -100.0f, 0.01f, 0.01f));
+ GL_CALL(Disable(GR_GL_SHADER_PIXEL_LOCAL_STORAGE));
+ }
+}
+
+void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target, const SkIRect* bounds, bool disableSRGB) {
+ SkASSERT(target);
+
+ uint32_t rtID = target->uniqueID();
+ if (fHWBoundRenderTargetUniqueID != rtID) {
+ fStats.incRenderTargetBinds();
+ GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, target->renderFBOID()));
+#ifdef SK_DEBUG
+ // don't do this check in Chromium -- this is causing
+ // lots of repeated command buffer flushes when the compositor is
+ // rendering with Ganesh, which is really slow; even too slow for
+ // Debug mode.
+ if (kChromium_GrGLDriver != this->glContext().driver()) {
+ GrGLenum status;
+ GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
+ if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
+ SkDebugf("GrGLGpu::flushRenderTarget glCheckFramebufferStatus %x\n", status);
+ }
+ }
+#endif
+ fHWBoundRenderTargetUniqueID = rtID;
+ this->flushViewport(target->getViewport());
+ }
+
+ if (this->glCaps().srgbWriteControl()) {
+ this->flushFramebufferSRGB(GrPixelConfigIsSRGB(target->config()) && !disableSRGB);
+ }
+
+ this->didWriteToSurface(target, bounds);
+}
+
+void GrGLGpu::flushFramebufferSRGB(bool enable) {
+ if (enable && kYes_TriState != fHWSRGBFramebuffer) {
+ GL_CALL(Enable(GR_GL_FRAMEBUFFER_SRGB));
+ fHWSRGBFramebuffer = kYes_TriState;
+ } else if (!enable && kNo_TriState != fHWSRGBFramebuffer) {
+ GL_CALL(Disable(GR_GL_FRAMEBUFFER_SRGB));
+ fHWSRGBFramebuffer = kNo_TriState;
+ }
+}
+
+void GrGLGpu::flushViewport(const GrGLIRect& viewport) {
+ if (fHWViewport != viewport) {
+ viewport.pushToGLViewport(this->glInterface());
+ fHWViewport = viewport;
+ }
+}
+
+GrGLenum gPrimitiveType2GLMode[] = {
+ GR_GL_TRIANGLES,
+ GR_GL_TRIANGLE_STRIP,
+ GR_GL_TRIANGLE_FAN,
+ GR_GL_POINTS,
+ GR_GL_LINES,
+ GR_GL_LINE_STRIP
+};
+
+#define SWAP_PER_DRAW 0
+
+#if SWAP_PER_DRAW
+ #if defined(SK_BUILD_FOR_MAC)
+ #include <AGL/agl.h>
+ #elif defined(SK_BUILD_FOR_WIN32)
+ #include <gl/GL.h>
+ void SwapBuf() {
+ DWORD procID = GetCurrentProcessId();
+ HWND hwnd = GetTopWindow(GetDesktopWindow());
+ while(hwnd) {
+ DWORD wndProcID = 0;
+ GetWindowThreadProcessId(hwnd, &wndProcID);
+ if(wndProcID == procID) {
+ SwapBuffers(GetDC(hwnd));
+ }
+ hwnd = GetNextWindow(hwnd, GW_HWNDNEXT);
+ }
+ }
+ #endif
+#endif
+
+void GrGLGpu::draw(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ const GrMesh meshes[],
+ int meshCount) {
+ this->handleDirtyContext();
+
+ bool hasPoints = false;
+ for (int i = 0; i < meshCount; ++i) {
+ if (meshes[i].primitiveType() == kPoints_GrPrimitiveType) {
+ hasPoints = true;
+ break;
+ }
+ }
+ if (!this->flushGLState(pipeline, primProc, hasPoints)) {
+ return;
+ }
+ GrPixelLocalStorageState plsState = primProc.getPixelLocalStorageState();
+ if (!fHWPLSEnabled && plsState !=
+ GrPixelLocalStorageState::kDisabled_GrPixelLocalStorageState) {
+ GL_CALL(Enable(GR_GL_SHADER_PIXEL_LOCAL_STORAGE));
+ this->setupPixelLocalStorage(pipeline, primProc);
+ fHWPLSEnabled = true;
+ }
+ if (plsState == GrPixelLocalStorageState::kFinish_GrPixelLocalStorageState) {
+ GrStencilSettings stencil;
+ stencil.setDisabled();
+ this->flushStencil(stencil);
+ }
+
+ for (int i = 0; i < meshCount; ++i) {
+ if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*this->caps())) {
+ this->xferBarrier(pipeline.getRenderTarget(), barrierType);
+ }
+
+ const GrMesh& mesh = meshes[i];
+ GrMesh::Iterator iter;
+ const GrNonInstancedMesh* nonInstMesh = iter.init(mesh);
+ do {
+ size_t indexOffsetInBytes = 0;
+ this->setupGeometry(primProc, *nonInstMesh, &indexOffsetInBytes);
+ if (nonInstMesh->isIndexed()) {
+ GrGLvoid* indices =
+ reinterpret_cast<GrGLvoid*>(indexOffsetInBytes +
+ sizeof(uint16_t) * nonInstMesh->startIndex());
+ // info.startVertex() was accounted for by setupGeometry.
+ if (this->glCaps().drawRangeElementsSupport()) {
+ // We assume here that the batch that generated the mesh used the full
+ // 0..vertexCount()-1 range.
+ int start = 0;
+ int end = nonInstMesh->vertexCount() - 1;
+ GL_CALL(DrawRangeElements(gPrimitiveType2GLMode[nonInstMesh->primitiveType()],
+ start, end,
+ nonInstMesh->indexCount(),
+ GR_GL_UNSIGNED_SHORT,
+ indices));
+ } else {
+ GL_CALL(DrawElements(gPrimitiveType2GLMode[nonInstMesh->primitiveType()],
+ nonInstMesh->indexCount(),
+ GR_GL_UNSIGNED_SHORT,
+ indices));
+ }
+ } else {
+ // Pass 0 for parameter first. We have to adjust glVertexAttribPointer() to account
+ // for startVertex in the DrawElements case. So we always rely on setupGeometry to
+ // have accounted for startVertex.
+ GL_CALL(DrawArrays(gPrimitiveType2GLMode[nonInstMesh->primitiveType()], 0,
+ nonInstMesh->vertexCount()));
+ }
+ fStats.incNumDraws();
+ } while ((nonInstMesh = iter.next()));
+ }
+
+ if (fHWPLSEnabled && plsState == GrPixelLocalStorageState::kFinish_GrPixelLocalStorageState) {
+ // PLS draws always involve multiple draws, finishing up with a non-PLS
+ // draw that writes to the color buffer. That draw ends up here; we wait
+ // until after it is complete to actually disable PLS.
+ GL_CALL(Disable(GR_GL_SHADER_PIXEL_LOCAL_STORAGE));
+ fHWPLSEnabled = false;
+ this->disableScissor();
+ this->disableWindowRectangles();
+ }
+
+#if SWAP_PER_DRAW
+ glFlush();
+ #if defined(SK_BUILD_FOR_MAC)
+ aglSwapBuffers(aglGetCurrentContext());
+ int set_a_break_pt_here = 9;
+ aglSwapBuffers(aglGetCurrentContext());
+ #elif defined(SK_BUILD_FOR_WIN32)
+ SwapBuf();
+ int set_a_break_pt_here = 9;
+ SwapBuf();
+ #endif
+#endif
+}
+
+void GrGLGpu::stampPLSSetupRect(const SkRect& bounds) {
+ SkASSERT(this->glCaps().glslCaps()->plsPathRenderingSupport());
+
+ if (!fPLSSetupProgram.fProgram) {
+ if (!this->createPLSSetupProgram()) {
+ SkDebugf("Failed to create PLS setup program.\n");
+ return;
+ }
+ }
+
+ GL_CALL(UseProgram(fPLSSetupProgram.fProgram));
+ this->fHWVertexArrayState.setVertexArrayID(this, 0);
+
+ GrGLAttribArrayState* attribs = this->fHWVertexArrayState.bindInternalVertexArray(this);
+ attribs->set(this, 0, fPLSSetupProgram.fArrayBuffer, kVec2f_GrVertexAttribType,
+ 2 * sizeof(GrGLfloat), 0);
+ attribs->disableUnusedArrays(this, 0x1);
+
+ GL_CALL(Uniform4f(fPLSSetupProgram.fPosXformUniform, bounds.width(), bounds.height(),
+ bounds.left(), bounds.top()));
+
+ GrXferProcessor::BlendInfo blendInfo;
+ blendInfo.reset();
+ this->flushBlend(blendInfo, GrSwizzle());
+ this->flushColorWrite(true);
+ this->flushDrawFace(GrDrawFace::kBoth);
+ if (!fHWStencilSettings.isDisabled()) {
+ GL_CALL(Disable(GR_GL_STENCIL_TEST));
+ }
+ GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4));
+ GL_CALL(UseProgram(fHWProgramID));
+ if (!fHWStencilSettings.isDisabled()) {
+ GL_CALL(Enable(GR_GL_STENCIL_TEST));
+ }
+}
+
+void GrGLGpu::setupPixelLocalStorage(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc) {
+ fPLSHasBeenUsed = true;
+ const SkRect& bounds =
+ static_cast<const GrPLSGeometryProcessor&>(primProc).getBounds();
+ // setup pixel local storage -- this means capturing and storing the current framebuffer color
+ // and initializing the winding counts to zero
+ GrRenderTarget* rt = pipeline.getRenderTarget();
+ SkScalar width = SkIntToScalar(rt->width());
+ SkScalar height = SkIntToScalar(rt->height());
+ // dst rect edges in NDC (-1 to 1)
+ // having some issues with rounding, just expand the bounds by 1 and trust the scissor to keep
+ // it contained properly
+ GrGLfloat dx0 = 2.0f * (bounds.left() - 1) / width - 1.0f;
+ GrGLfloat dx1 = 2.0f * (bounds.right() + 1) / width - 1.0f;
+ GrGLfloat dy0 = -2.0f * (bounds.top() - 1) / height + 1.0f;
+ GrGLfloat dy1 = -2.0f * (bounds.bottom() + 1) / height + 1.0f;
+ SkRect deviceBounds = SkRect::MakeXYWH(dx0, dy0, dx1 - dx0, dy1 - dy0);
+
+ GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE_ARM));
+ this->stampPLSSetupRect(deviceBounds);
+}
+
+void GrGLGpu::onResolveRenderTarget(GrRenderTarget* target) {
+ GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(target);
+ if (rt->needsResolve()) {
+ // Some extensions automatically resolves the texture when it is read.
+ if (this->glCaps().usesMSAARenderBuffers()) {
+ SkASSERT(rt->textureFBOID() != rt->renderFBOID());
+ fStats.incRenderTargetBinds();
+ fStats.incRenderTargetBinds();
+ GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID()));
+ GL_CALL(BindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID()));
+ // make sure we go through flushRenderTarget() since we've modified
+ // the bound DRAW FBO ID.
+ fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
+ const GrGLIRect& vp = rt->getViewport();
+ const SkIRect dirtyRect = rt->getResolveRect();
+
+ if (GrGLCaps::kES_Apple_MSFBOType == this->glCaps().msFBOType()) {
+ // Apple's extension uses the scissor as the blit bounds.
+ GrScissorState scissorState;
+ scissorState.set(dirtyRect);
+ this->flushScissor(scissorState, vp, rt->origin());
+ this->disableWindowRectangles();
+ GL_CALL(ResolveMultisampleFramebuffer());
+ } else {
+ GrGLIRect r;
+ r.setRelativeTo(vp, dirtyRect.fLeft, dirtyRect.fTop,
+ dirtyRect.width(), dirtyRect.height(), target->origin());
+
+ int right = r.fLeft + r.fWidth;
+ int top = r.fBottom + r.fHeight;
+
+ // BlitFrameBuffer respects the scissor, so disable it.
+ this->disableScissor();
+ this->disableWindowRectangles();
+ GL_CALL(BlitFramebuffer(r.fLeft, r.fBottom, right, top,
+ r.fLeft, r.fBottom, right, top,
+ GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
+ }
+ }
+ rt->flagAsResolved();
+ }
+}
+
+namespace {
+
+
+GrGLenum gr_to_gl_stencil_op(GrStencilOp op) {
+ static const GrGLenum gTable[kGrStencilOpCount] = {
+ GR_GL_KEEP, // kKeep
+ GR_GL_ZERO, // kZero
+ GR_GL_REPLACE, // kReplace
+ GR_GL_INVERT, // kInvert
+ GR_GL_INCR_WRAP, // kIncWrap
+ GR_GL_DECR_WRAP, // kDecWrap
+ GR_GL_INCR, // kIncClamp
+ GR_GL_DECR, // kDecClamp
+ };
+ GR_STATIC_ASSERT(0 == (int)GrStencilOp::kKeep);
+ GR_STATIC_ASSERT(1 == (int)GrStencilOp::kZero);
+ GR_STATIC_ASSERT(2 == (int)GrStencilOp::kReplace);
+ GR_STATIC_ASSERT(3 == (int)GrStencilOp::kInvert);
+ GR_STATIC_ASSERT(4 == (int)GrStencilOp::kIncWrap);
+ GR_STATIC_ASSERT(5 == (int)GrStencilOp::kDecWrap);
+ GR_STATIC_ASSERT(6 == (int)GrStencilOp::kIncClamp);
+ GR_STATIC_ASSERT(7 == (int)GrStencilOp::kDecClamp);
+ SkASSERT(op < (GrStencilOp)kGrStencilOpCount);
+ return gTable[(int)op];
+}
+
+void set_gl_stencil(const GrGLInterface* gl,
+ const GrStencilSettings::Face& face,
+ GrGLenum glFace) {
+ GrGLenum glFunc = GrToGLStencilFunc(face.fTest);
+ GrGLenum glFailOp = gr_to_gl_stencil_op(face.fFailOp);
+ GrGLenum glPassOp = gr_to_gl_stencil_op(face.fPassOp);
+
+ GrGLint ref = face.fRef;
+ GrGLint mask = face.fTestMask;
+ GrGLint writeMask = face.fWriteMask;
+
+ if (GR_GL_FRONT_AND_BACK == glFace) {
+ // we call the combined func just in case separate stencil is not
+ // supported.
+ GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask));
+ GR_GL_CALL(gl, StencilMask(writeMask));
+ GR_GL_CALL(gl, StencilOp(glFailOp, GR_GL_KEEP, glPassOp));
+ } else {
+ GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask));
+ GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask));
+ GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, GR_GL_KEEP, glPassOp));
+ }
+}
+}
+
+void GrGLGpu::flushStencil(const GrStencilSettings& stencilSettings) {
+ if (fHWStencilSettings != stencilSettings) {
+ if (stencilSettings.isDisabled()) {
+ if (kNo_TriState != fHWStencilTestEnabled) {
+ GL_CALL(Disable(GR_GL_STENCIL_TEST));
+ fHWStencilTestEnabled = kNo_TriState;
+ }
+ } else {
+ if (kYes_TriState != fHWStencilTestEnabled) {
+ GL_CALL(Enable(GR_GL_STENCIL_TEST));
+ fHWStencilTestEnabled = kYes_TriState;
+ }
+ }
+ if (!stencilSettings.isDisabled()) {
+ if (stencilSettings.isTwoSided()) {
+ SkASSERT(this->caps()->twoSidedStencilSupport());
+ set_gl_stencil(this->glInterface(),
+ stencilSettings.front(),
+ GR_GL_FRONT);
+ set_gl_stencil(this->glInterface(),
+ stencilSettings.back(),
+ GR_GL_BACK);
+ } else {
+ set_gl_stencil(this->glInterface(),
+ stencilSettings.front(),
+ GR_GL_FRONT_AND_BACK);
+ }
+ }
+ fHWStencilSettings = stencilSettings;
+ }
+}
+
+void GrGLGpu::flushHWAAState(GrRenderTarget* rt, bool useHWAA, bool stencilEnabled) {
+ // rt is only optional if useHWAA is false.
+ SkASSERT(rt || !useHWAA);
+ SkASSERT(!useHWAA || rt->isStencilBufferMultisampled());
+
+ if (this->caps()->multisampleDisableSupport()) {
+ if (useHWAA) {
+ if (kYes_TriState != fMSAAEnabled) {
+ GL_CALL(Enable(GR_GL_MULTISAMPLE));
+ fMSAAEnabled = kYes_TriState;
+ }
+ } else {
+ if (kNo_TriState != fMSAAEnabled) {
+ GL_CALL(Disable(GR_GL_MULTISAMPLE));
+ fMSAAEnabled = kNo_TriState;
+ }
+ }
+ }
+
+ if (0 != this->caps()->maxRasterSamples()) {
+ if (useHWAA && rt->isMixedSampled() && !stencilEnabled) {
+ // Since stencil is disabled and we want more samples than are in the color buffer, we
+ // need to tell the rasterizer explicitly how many to run.
+ if (kYes_TriState != fHWRasterMultisampleEnabled) {
+ GL_CALL(Enable(GR_GL_RASTER_MULTISAMPLE));
+ fHWRasterMultisampleEnabled = kYes_TriState;
+ }
+ if (rt->numStencilSamples() != fHWNumRasterSamples) {
+ SkASSERT(rt->numStencilSamples() <= this->caps()->maxRasterSamples());
+ GL_CALL(RasterSamples(rt->numStencilSamples(), GR_GL_TRUE));
+ fHWNumRasterSamples = rt->numStencilSamples();
+ }
+ } else {
+ if (kNo_TriState != fHWRasterMultisampleEnabled) {
+ GL_CALL(Disable(GR_GL_RASTER_MULTISAMPLE));
+ fHWRasterMultisampleEnabled = kNo_TriState;
+ }
+ }
+ } else {
+ SkASSERT(!useHWAA || !rt->isMixedSampled() || stencilEnabled);
+ }
+}
+
+void GrGLGpu::flushBlend(const GrXferProcessor::BlendInfo& blendInfo, const GrSwizzle& swizzle) {
+ // Any optimization to disable blending should have already been applied and
+ // tweaked the equation to "add" or "subtract", and the coeffs to (1, 0).
+
+ GrBlendEquation equation = blendInfo.fEquation;
+ GrBlendCoeff srcCoeff = blendInfo.fSrcBlend;
+ GrBlendCoeff dstCoeff = blendInfo.fDstBlend;
+ bool blendOff = (kAdd_GrBlendEquation == equation || kSubtract_GrBlendEquation == equation) &&
+ kOne_GrBlendCoeff == srcCoeff && kZero_GrBlendCoeff == dstCoeff;
+ if (blendOff) {
+ if (kNo_TriState != fHWBlendState.fEnabled) {
+ GL_CALL(Disable(GR_GL_BLEND));
+
+ // Workaround for the ARM KHR_blend_equation_advanced blacklist issue
+ // https://code.google.com/p/skia/issues/detail?id=3943
+ if (kARM_GrGLVendor == this->ctxInfo().vendor() &&
+ GrBlendEquationIsAdvanced(fHWBlendState.fEquation)) {
+ SkASSERT(this->caps()->advancedBlendEquationSupport());
+ // Set to any basic blending equation.
+ GrBlendEquation blend_equation = kAdd_GrBlendEquation;
+ GL_CALL(BlendEquation(gXfermodeEquation2Blend[blend_equation]));
+ fHWBlendState.fEquation = blend_equation;
+ }
+
+ fHWBlendState.fEnabled = kNo_TriState;
+ }
+ return;
+ }
+
+ if (kYes_TriState != fHWBlendState.fEnabled) {
+ GL_CALL(Enable(GR_GL_BLEND));
+ fHWBlendState.fEnabled = kYes_TriState;
+ }
+
+ if (fHWBlendState.fEquation != equation) {
+ GL_CALL(BlendEquation(gXfermodeEquation2Blend[equation]));
+ fHWBlendState.fEquation = equation;
+ }
+
+ if (GrBlendEquationIsAdvanced(equation)) {
+ SkASSERT(this->caps()->advancedBlendEquationSupport());
+ // Advanced equations have no other blend state.
+ return;
+ }
+
+ if (fHWBlendState.fSrcCoeff != srcCoeff || fHWBlendState.fDstCoeff != dstCoeff) {
+ GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff],
+ gXfermodeCoeff2Blend[dstCoeff]));
+ fHWBlendState.fSrcCoeff = srcCoeff;
+ fHWBlendState.fDstCoeff = dstCoeff;
+ }
+
+ if ((BlendCoeffReferencesConstant(srcCoeff) || BlendCoeffReferencesConstant(dstCoeff))) {
+ GrColor blendConst = blendInfo.fBlendConstant;
+ blendConst = swizzle.applyTo(blendConst);
+ if (!fHWBlendState.fConstColorValid || fHWBlendState.fConstColor != blendConst) {
+ GrGLfloat c[4];
+ GrColorToRGBAFloat(blendConst, c);
+ GL_CALL(BlendColor(c[0], c[1], c[2], c[3]));
+ fHWBlendState.fConstColor = blendConst;
+ fHWBlendState.fConstColorValid = true;
+ }
+ }
+}
+
+static inline GrGLenum tile_to_gl_wrap(SkShader::TileMode tm) {
+ static const GrGLenum gWrapModes[] = {
+ GR_GL_CLAMP_TO_EDGE,
+ GR_GL_REPEAT,
+ GR_GL_MIRRORED_REPEAT
+ };
+ GR_STATIC_ASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gWrapModes));
+ GR_STATIC_ASSERT(0 == SkShader::kClamp_TileMode);
+ GR_STATIC_ASSERT(1 == SkShader::kRepeat_TileMode);
+ GR_STATIC_ASSERT(2 == SkShader::kMirror_TileMode);
+ return gWrapModes[tm];
+}
+
+static GrGLenum get_component_enum_from_char(char component) {
+ switch (component) {
+ case 'r':
+ return GR_GL_RED;
+ case 'g':
+ return GR_GL_GREEN;
+ case 'b':
+ return GR_GL_BLUE;
+ case 'a':
+ return GR_GL_ALPHA;
+ default:
+ SkFAIL("Unsupported component");
+ return 0;
+ }
+}
+
+/** If texture swizzling is available using tex parameters then it is preferred over mangling
+ the generated shader code. This potentially allows greater reuse of cached shaders. */
+static void get_tex_param_swizzle(GrPixelConfig config,
+ const GrGLCaps& caps,
+ GrGLenum* glSwizzle) {
+ const GrSwizzle& swizzle = caps.configSwizzle(config);
+ for (int i = 0; i < 4; ++i) {
+ glSwizzle[i] = get_component_enum_from_char(swizzle.c_str()[i]);
+ }
+}
+
+void GrGLGpu::bindTexture(int unitIdx, const GrTextureParams& params, bool allowSRGBInputs,
+ GrGLTexture* texture) {
+ SkASSERT(texture);
+
+#ifdef SK_DEBUG
+ if (!this->caps()->npotTextureTileSupport()) {
+ const bool tileX = SkShader::kClamp_TileMode != params.getTileModeX();
+ const bool tileY = SkShader::kClamp_TileMode != params.getTileModeY();
+ if (tileX || tileY) {
+ const int w = texture->width();
+ const int h = texture->height();
+ SkASSERT(SkIsPow2(w) && SkIsPow2(h));
+ }
+ }
+#endif
+
+ // If we created a rt/tex and rendered to it without using a texture and now we're texturing
+ // from the rt it will still be the last bound texture, but it needs resolving. So keep this
+ // out of the "last != next" check.
+ GrGLRenderTarget* texRT = static_cast<GrGLRenderTarget*>(texture->asRenderTarget());
+ if (texRT) {
+ this->onResolveRenderTarget(texRT);
+ }
+
+ uint32_t textureID = texture->uniqueID();
+ GrGLenum target = texture->target();
+ if (fHWBoundTextureUniqueIDs[unitIdx] != textureID) {
+ this->setTextureUnit(unitIdx);
+ GL_CALL(BindTexture(target, texture->textureID()));
+ fHWBoundTextureUniqueIDs[unitIdx] = textureID;
+ }
+
+ ResetTimestamp timestamp;
+ const GrGLTexture::TexParams& oldTexParams = texture->getCachedTexParams(&timestamp);
+ bool setAll = timestamp < this->getResetTimestamp();
+ GrGLTexture::TexParams newTexParams;
+
+ static GrGLenum glMinFilterModes[] = {
+ GR_GL_NEAREST,
+ GR_GL_LINEAR,
+ GR_GL_LINEAR_MIPMAP_LINEAR
+ };
+ static GrGLenum glMagFilterModes[] = {
+ GR_GL_NEAREST,
+ GR_GL_LINEAR,
+ GR_GL_LINEAR
+ };
+ GrTextureParams::FilterMode filterMode = params.filterMode();
+
+ if (GrTextureParams::kMipMap_FilterMode == filterMode) {
+ if (!this->caps()->mipMapSupport() || GrPixelConfigIsCompressed(texture->config())) {
+ filterMode = GrTextureParams::kBilerp_FilterMode;
+ }
+ }
+
+ newTexParams.fMinFilter = glMinFilterModes[filterMode];
+ newTexParams.fMagFilter = glMagFilterModes[filterMode];
+
+ if (GrPixelConfigIsSRGB(texture->config())) {
+ newTexParams.fSRGBDecode = allowSRGBInputs ? GR_GL_DECODE_EXT : GR_GL_SKIP_DECODE_EXT;
+ if (setAll || newTexParams.fSRGBDecode != oldTexParams.fSRGBDecode) {
+ this->setTextureUnit(unitIdx);
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SRGB_DECODE_EXT, newTexParams.fSRGBDecode));
+ }
+ }
+
+#ifdef SK_DEBUG
+ // We were supposed to ensure MipMaps were up-to-date and built correctly before getting here.
+ if (GrTextureParams::kMipMap_FilterMode == filterMode) {
+ SkASSERT(!texture->texturePriv().mipMapsAreDirty());
+ if (GrPixelConfigIsSRGB(texture->config())) {
+ SkSourceGammaTreatment gammaTreatment = allowSRGBInputs ?
+ SkSourceGammaTreatment::kRespect : SkSourceGammaTreatment::kIgnore;
+ SkASSERT(texture->texturePriv().gammaTreatment() == gammaTreatment);
+ }
+ }
+#endif
+
+ newTexParams.fMaxMipMapLevel = texture->texturePriv().maxMipMapLevel();
+
+ newTexParams.fWrapS = tile_to_gl_wrap(params.getTileModeX());
+ newTexParams.fWrapT = tile_to_gl_wrap(params.getTileModeY());
+ get_tex_param_swizzle(texture->config(), this->glCaps(), newTexParams.fSwizzleRGBA);
+ if (setAll || newTexParams.fMagFilter != oldTexParams.fMagFilter) {
+ this->setTextureUnit(unitIdx);
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, newTexParams.fMagFilter));
+ }
+ if (setAll || newTexParams.fMinFilter != oldTexParams.fMinFilter) {
+ this->setTextureUnit(unitIdx);
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, newTexParams.fMinFilter));
+ }
+ if (setAll || newTexParams.fMaxMipMapLevel != oldTexParams.fMaxMipMapLevel) {
+ // These are not supported in ES2 contexts
+ if (this->glCaps().mipMapLevelAndLodControlSupport()) {
+ if (newTexParams.fMaxMipMapLevel != 0) {
+ this->setTextureUnit(unitIdx);
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_LOD, 0));
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_BASE_LEVEL, 0));
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LOD,
+ newTexParams.fMaxMipMapLevel));
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LEVEL,
+ newTexParams.fMaxMipMapLevel));
+ }
+ }
+ }
+ if (setAll || newTexParams.fWrapS != oldTexParams.fWrapS) {
+ this->setTextureUnit(unitIdx);
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_S, newTexParams.fWrapS));
+ }
+ if (setAll || newTexParams.fWrapT != oldTexParams.fWrapT) {
+ this->setTextureUnit(unitIdx);
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_T, newTexParams.fWrapT));
+ }
+ if (this->glCaps().textureSwizzleSupport() &&
+ (setAll || memcmp(newTexParams.fSwizzleRGBA,
+ oldTexParams.fSwizzleRGBA,
+ sizeof(newTexParams.fSwizzleRGBA)))) {
+ this->setTextureSwizzle(unitIdx, target, newTexParams.fSwizzleRGBA);
+ }
+ texture->setCachedTexParams(newTexParams, this->getResetTimestamp());
+}
+
+void GrGLGpu::bindTexelBuffer(int unitIdx, GrPixelConfig texelConfig, GrGLBuffer* buffer) {
+ SkASSERT(this->glCaps().canUseConfigWithTexelBuffer(texelConfig));
+ SkASSERT(unitIdx >= 0 && unitIdx < fHWBufferTextures.count());
+
+ BufferTexture& buffTex = fHWBufferTextures[unitIdx];
+
+ if (!buffTex.fKnownBound) {
+ if (!buffTex.fTextureID) {
+ GL_CALL(GenTextures(1, &buffTex.fTextureID));
+ if (!buffTex.fTextureID) {
+ return;
+ }
+ }
+
+ this->setTextureUnit(unitIdx);
+ GL_CALL(BindTexture(GR_GL_TEXTURE_BUFFER, buffTex.fTextureID));
+
+ buffTex.fKnownBound = true;
+ }
+
+ if (buffer->uniqueID() != buffTex.fAttachedBufferUniqueID ||
+ buffTex.fTexelConfig != texelConfig) {
+
+ this->setTextureUnit(unitIdx);
+ GL_CALL(TexBuffer(GR_GL_TEXTURE_BUFFER,
+ this->glCaps().configSizedInternalFormat(texelConfig),
+ buffer->bufferID()));
+
+ buffTex.fTexelConfig = texelConfig;
+ buffTex.fAttachedBufferUniqueID = buffer->uniqueID();
+
+ if (this->glCaps().textureSwizzleSupport() &&
+ this->glCaps().configSwizzle(texelConfig) != buffTex.fSwizzle) {
+ GrGLenum glSwizzle[4];
+ get_tex_param_swizzle(texelConfig, this->glCaps(), glSwizzle);
+ this->setTextureSwizzle(unitIdx, GR_GL_TEXTURE_BUFFER, glSwizzle);
+ buffTex.fSwizzle = this->glCaps().configSwizzle(texelConfig);
+ }
+
+ buffer->setHasAttachedToTexture();
+ fHWMaxUsedBufferTextureUnit = SkTMax(unitIdx, fHWMaxUsedBufferTextureUnit);
+ }
+}
+
+void GrGLGpu::generateMipmaps(const GrTextureParams& params, bool allowSRGBInputs,
+ GrGLTexture* texture) {
+ SkASSERT(texture);
+
+ // First, figure out if we need mips for this texture at all:
+ GrTextureParams::FilterMode filterMode = params.filterMode();
+
+ if (GrTextureParams::kMipMap_FilterMode == filterMode) {
+ if (!this->caps()->mipMapSupport() || GrPixelConfigIsCompressed(texture->config())) {
+ filterMode = GrTextureParams::kBilerp_FilterMode;
+ }
+ }
+
+ if (GrTextureParams::kMipMap_FilterMode != filterMode) {
+ return;
+ }
+
+ // If this is an sRGB texture and the mips were previously built the "other" way
+ // (gamma-correct vs. not), then we need to rebuild them. We don't need to check for
+ // srgbSupport - we'll *never* get an sRGB pixel config if we don't support it.
+ SkSourceGammaTreatment gammaTreatment = allowSRGBInputs
+ ? SkSourceGammaTreatment::kRespect : SkSourceGammaTreatment::kIgnore;
+ if (GrPixelConfigIsSRGB(texture->config()) &&
+ gammaTreatment != texture->texturePriv().gammaTreatment()) {
+ texture->texturePriv().dirtyMipMaps(true);
+ }
+
+ // If the mips aren't dirty, we're done:
+ if (!texture->texturePriv().mipMapsAreDirty()) {
+ return;
+ }
+
+ // If we created a rt/tex and rendered to it without using a texture and now we're texturing
+ // from the rt it will still be the last bound texture, but it needs resolving.
+ GrGLRenderTarget* texRT = static_cast<GrGLRenderTarget*>(texture->asRenderTarget());
+ if (texRT) {
+ this->onResolveRenderTarget(texRT);
+ }
+
+ GrGLenum target = texture->target();
+ this->setScratchTextureUnit();
+ GL_CALL(BindTexture(target, texture->textureID()));
+
+ // Configure sRGB decode, if necessary. This state is the only thing needed for the driver
+ // call (glGenerateMipmap) to work correctly. Our manual method dirties other state, too.
+ if (GrPixelConfigIsSRGB(texture->config())) {
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SRGB_DECODE_EXT,
+ allowSRGBInputs ? GR_GL_DECODE_EXT : GR_GL_SKIP_DECODE_EXT));
+ }
+
+ // Either do manual mipmap generation or (if that fails), just rely on the driver:
+ if (!this->generateMipmap(texture, allowSRGBInputs)) {
+ GL_CALL(GenerateMipmap(target));
+ }
+
+ texture->texturePriv().dirtyMipMaps(false);
+ texture->texturePriv().setMaxMipMapLevel(SkMipMap::ComputeLevelCount(
+ texture->width(), texture->height()));
+ texture->texturePriv().setGammaTreatment(gammaTreatment);
+
+ // We have potentially set lots of state on the texture. Easiest to dirty it all:
+ texture->textureParamsModified();
+}
+
+void GrGLGpu::setTextureSwizzle(int unitIdx, GrGLenum target, const GrGLenum swizzle[]) {
+ this->setTextureUnit(unitIdx);
+ if (this->glStandard() == kGLES_GrGLStandard) {
+ // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA.
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_R, swizzle[0]));
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_G, swizzle[1]));
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_B, swizzle[2]));
+ GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_A, swizzle[3]));
+ } else {
+ GR_STATIC_ASSERT(sizeof(swizzle[0]) == sizeof(GrGLint));
+ GL_CALL(TexParameteriv(target, GR_GL_TEXTURE_SWIZZLE_RGBA,
+ reinterpret_cast<const GrGLint*>(swizzle)));
+ }
+}
+
+void GrGLGpu::flushColorWrite(bool writeColor) {
+ if (!writeColor) {
+ if (kNo_TriState != fHWWriteToColor) {
+ GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE,
+ GR_GL_FALSE, GR_GL_FALSE));
+ fHWWriteToColor = kNo_TriState;
+ }
+ } else {
+ if (kYes_TriState != fHWWriteToColor) {
+ GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
+ fHWWriteToColor = kYes_TriState;
+ }
+ }
+}
+
+void GrGLGpu::flushDrawFace(GrDrawFace face) {
+ if (fHWDrawFace != face) {
+ switch (face) {
+ case GrDrawFace::kCCW:
+ GL_CALL(Enable(GR_GL_CULL_FACE));
+ GL_CALL(CullFace(GR_GL_BACK));
+ break;
+ case GrDrawFace::kCW:
+ GL_CALL(Enable(GR_GL_CULL_FACE));
+ GL_CALL(CullFace(GR_GL_FRONT));
+ break;
+ case GrDrawFace::kBoth:
+ GL_CALL(Disable(GR_GL_CULL_FACE));
+ break;
+ default:
+ SkFAIL("Unknown draw face.");
+ }
+ fHWDrawFace = face;
+ }
+}
+
+void GrGLGpu::setTextureUnit(int unit) {
+ SkASSERT(unit >= 0 && unit < fHWBoundTextureUniqueIDs.count());
+ if (unit != fHWActiveTextureUnitIdx) {
+ GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit));
+ fHWActiveTextureUnitIdx = unit;
+ }
+}
+
+void GrGLGpu::setScratchTextureUnit() {
+ // Bind the last texture unit since it is the least likely to be used by GrGLProgram.
+ int lastUnitIdx = fHWBoundTextureUniqueIDs.count() - 1;
+ if (lastUnitIdx != fHWActiveTextureUnitIdx) {
+ GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx));
+ fHWActiveTextureUnitIdx = lastUnitIdx;
+ }
+ // clear out the this field so that if a program does use this unit it will rebind the correct
+ // texture.
+ fHWBoundTextureUniqueIDs[lastUnitIdx] = SK_InvalidUniqueID;
+}
+
+// Determines whether glBlitFramebuffer could be used between src and dst.
+static inline bool can_blit_framebuffer(const GrSurface* dst,
+ const GrSurface* src,
+ const GrGLGpu* gpu) {
+ if (gpu->glCaps().isConfigRenderable(dst->config(), dst->desc().fSampleCnt > 0) &&
+ gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0)) {
+ switch (gpu->glCaps().blitFramebufferSupport()) {
+ case GrGLCaps::kNone_BlitFramebufferSupport:
+ return false;
+ case GrGLCaps::kNoScalingNoMirroring_BlitFramebufferSupport:
+ // Our copy surface doesn't support scaling so just check for mirroring.
+ if (dst->origin() != src->origin()) {
+ return false;
+ }
+ break;
+ case GrGLCaps::kFull_BlitFramebufferSupport:
+ break;
+ }
+ // ES3 doesn't allow framebuffer blits when the src has MSAA and the configs don't match
+ // or the rects are not the same (not just the same size but have the same edges).
+ if (GrGLCaps::kES_3_0_MSFBOType == gpu->glCaps().msFBOType() &&
+ (src->desc().fSampleCnt > 0 || src->config() != dst->config())) {
+ return false;
+ }
+ const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture());
+ if (dstTex && dstTex->target() != GR_GL_TEXTURE_2D) {
+ return false;
+ }
+ const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(dst->asTexture());
+ if (srcTex && srcTex->target() != GR_GL_TEXTURE_2D) {
+ return false;
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static inline bool can_copy_texsubimage(const GrSurface* dst,
+ const GrSurface* src,
+ const GrGLGpu* gpu) {
+ // Table 3.9 of the ES2 spec indicates the supported formats with CopyTexSubImage
+ // and BGRA isn't in the spec. There doesn't appear to be any extension that adds it. Perhaps
+ // many drivers would allow it to work, but ANGLE does not.
+ if (kGLES_GrGLStandard == gpu->glStandard() && gpu->glCaps().bgraIsInternalFormat() &&
+ (kBGRA_8888_GrPixelConfig == dst->config() || kBGRA_8888_GrPixelConfig == src->config())) {
+ return false;
+ }
+ const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->asRenderTarget());
+ // If dst is multisampled (and uses an extension where there is a separate MSAA renderbuffer)
+ // then we don't want to copy to the texture but to the MSAA buffer.
+ if (dstRT && dstRT->renderFBOID() != dstRT->textureFBOID()) {
+ return false;
+ }
+ const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget());
+ // If the src is multisampled (and uses an extension where there is a separate MSAA
+ // renderbuffer) then it is an invalid operation to call CopyTexSubImage
+ if (srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) {
+ return false;
+ }
+
+ const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture());
+ // CopyTex(Sub)Image writes to a texture and we have no way of dynamically wrapping a RT in a
+ // texture.
+ if (!dstTex) {
+ return false;
+ }
+
+ const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture());
+
+ // Check that we could wrap the source in an FBO, that the dst is TEXTURE_2D, that no mirroring
+ // is required.
+ if (gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0) &&
+ !GrPixelConfigIsCompressed(src->config()) &&
+ (!srcTex || srcTex->target() == GR_GL_TEXTURE_2D) &&
+ dstTex->target() == GR_GL_TEXTURE_2D &&
+ dst->origin() == src->origin()) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+// If a temporary FBO was created, its non-zero ID is returned. The viewport that the copy rect is
+// relative to is output.
+void GrGLGpu::bindSurfaceFBOForCopy(GrSurface* surface, GrGLenum fboTarget, GrGLIRect* viewport,
+ TempFBOTarget tempFBOTarget) {
+ GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
+ if (!rt) {
+ SkASSERT(surface->asTexture());
+ GrGLuint texID = static_cast<GrGLTexture*>(surface->asTexture())->textureID();
+ GrGLenum target = static_cast<GrGLTexture*>(surface->asTexture())->target();
+ GrGLuint* tempFBOID;
+ tempFBOID = kSrc_TempFBOTarget == tempFBOTarget ? &fTempSrcFBOID : &fTempDstFBOID;
+
+ if (0 == *tempFBOID) {
+ GR_GL_CALL(this->glInterface(), GenFramebuffers(1, tempFBOID));
+ }
+
+ fStats.incRenderTargetBinds();
+ GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, *tempFBOID));
+ GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget,
+ GR_GL_COLOR_ATTACHMENT0,
+ target,
+ texID,
+ 0));
+ viewport->fLeft = 0;
+ viewport->fBottom = 0;
+ viewport->fWidth = surface->width();
+ viewport->fHeight = surface->height();
+ } else {
+ fStats.incRenderTargetBinds();
+ GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, rt->renderFBOID()));
+ *viewport = rt->getViewport();
+ }
+}
+
+void GrGLGpu::unbindTextureFBOForCopy(GrGLenum fboTarget, GrSurface* surface) {
+ // bindSurfaceFBOForCopy temporarily binds textures that are not render targets to
+ if (!surface->asRenderTarget()) {
+ SkASSERT(surface->asTexture());
+ GrGLenum textureTarget = static_cast<GrGLTexture*>(surface->asTexture())->target();
+ GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget,
+ GR_GL_COLOR_ATTACHMENT0,
+ textureTarget,
+ 0,
+ 0));
+ }
+}
+
+bool GrGLGpu::initDescForDstCopy(const GrRenderTarget* src, GrSurfaceDesc* desc) const {
+ // If the src is a texture, we can implement the blit as a draw assuming the config is
+ // renderable.
+ if (src->asTexture() && this->caps()->isConfigRenderable(src->config(), false)) {
+ desc->fOrigin = kDefault_GrSurfaceOrigin;
+ desc->fFlags = kRenderTarget_GrSurfaceFlag;
+ desc->fConfig = src->config();
+ return true;
+ }
+
+ const GrGLTexture* srcTexture = static_cast<const GrGLTexture*>(src->asTexture());
+ if (srcTexture && srcTexture->target() != GR_GL_TEXTURE_2D) {
+ // Not supported for FBO blit or CopyTexSubImage
+ return false;
+ }
+
+ // We look for opportunities to use CopyTexSubImage, or fbo blit. If neither are
+ // possible and we return false to fallback to creating a render target dst for render-to-
+ // texture. This code prefers CopyTexSubImage to fbo blit and avoids triggering temporary fbo
+ // creation. It isn't clear that avoiding temporary fbo creation is actually optimal.
+
+ GrSurfaceOrigin originForBlitFramebuffer = kDefault_GrSurfaceOrigin;
+ if (this->glCaps().blitFramebufferSupport() ==
+ GrGLCaps::kNoScalingNoMirroring_BlitFramebufferSupport) {
+ originForBlitFramebuffer = src->origin();
+ }
+
+ // Check for format issues with glCopyTexSubImage2D
+ if (kGLES_GrGLStandard == this->glStandard() && this->glCaps().bgraIsInternalFormat() &&
+ kBGRA_8888_GrPixelConfig == src->config()) {
+ // glCopyTexSubImage2D doesn't work with this config. If the bgra can be used with fbo blit
+ // then we set up for that, otherwise fail.
+ if (this->caps()->isConfigRenderable(kBGRA_8888_GrPixelConfig, false)) {
+ desc->fOrigin = originForBlitFramebuffer;
+ desc->fFlags = kRenderTarget_GrSurfaceFlag;
+ desc->fConfig = kBGRA_8888_GrPixelConfig;
+ return true;
+ }
+ return false;
+ }
+
+ const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src);
+ if (srcRT->renderFBOID() != srcRT->textureFBOID()) {
+ // It's illegal to call CopyTexSubImage2D on a MSAA renderbuffer. Set up for FBO blit or
+ // fail.
+ if (this->caps()->isConfigRenderable(src->config(), false)) {
+ desc->fOrigin = originForBlitFramebuffer;
+ desc->fFlags = kRenderTarget_GrSurfaceFlag;
+ desc->fConfig = src->config();
+ return true;
+ }
+ return false;
+ }
+
+ // We'll do a CopyTexSubImage. Make the dst a plain old texture.
+ desc->fConfig = src->config();
+ desc->fOrigin = src->origin();
+ desc->fFlags = kNone_GrSurfaceFlags;
+ return true;
+}
+
+bool GrGLGpu::onCopySurface(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ // None of our copy methods can handle a swizzle. TODO: Make copySurfaceAsDraw handle the
+ // swizzle.
+ if (this->glCaps().glslCaps()->configOutputSwizzle(src->config()) !=
+ this->glCaps().glslCaps()->configOutputSwizzle(dst->config())) {
+ return false;
+ }
+ // Don't prefer copying as a draw if the dst doesn't already have a FBO object.
+ bool preferCopy = SkToBool(dst->asRenderTarget());
+ if (preferCopy && src->asTexture()) {
+ if (this->copySurfaceAsDraw(dst, src, srcRect, dstPoint)) {
+ return true;
+ }
+ }
+
+ if (can_copy_texsubimage(dst, src, this)) {
+ this->copySurfaceAsCopyTexSubImage(dst, src, srcRect, dstPoint);
+ return true;
+ }
+
+ if (can_blit_framebuffer(dst, src, this)) {
+ return this->copySurfaceAsBlitFramebuffer(dst, src, srcRect, dstPoint);
+ }
+
+ if (!preferCopy && src->asTexture()) {
+ if (this->copySurfaceAsDraw(dst, src, srcRect, dstPoint)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool GrGLGpu::createCopyProgram(int progIdx) {
+ const GrGLSLCaps* glslCaps = this->glCaps().glslCaps();
+ static const GrSLType kSamplerTypes[3] = { kTexture2DSampler_GrSLType,
+ kTextureExternalSampler_GrSLType,
+ kTexture2DRectSampler_GrSLType };
+ if (kTextureExternalSampler_GrSLType == kSamplerTypes[progIdx] &&
+ !this->glCaps().glslCaps()->externalTextureSupport()) {
+ return false;
+ }
+ if (kTexture2DRectSampler_GrSLType == kSamplerTypes[progIdx] &&
+ !this->glCaps().rectangleTextureSupport()) {
+ return false;
+ }
+
+ if (!fCopyProgramArrayBuffer) {
+ static const GrGLfloat vdata[] = {
+ 0, 0,
+ 0, 1,
+ 1, 0,
+ 1, 1
+ };
+ fCopyProgramArrayBuffer.reset(GrGLBuffer::Create(this, sizeof(vdata), kVertex_GrBufferType,
+ kStatic_GrAccessPattern, vdata));
+ }
+ if (!fCopyProgramArrayBuffer) {
+ return false;
+ }
+
+ SkASSERT(!fCopyPrograms[progIdx].fProgram);
+ GL_CALL_RET(fCopyPrograms[progIdx].fProgram, CreateProgram());
+ if (!fCopyPrograms[progIdx].fProgram) {
+ return false;
+ }
+
+ const char* version = glslCaps->versionDeclString();
+ GrGLSLShaderVar aVertex("a_vertex", kVec2f_GrSLType, GrShaderVar::kAttribute_TypeModifier);
+ GrGLSLShaderVar uTexCoordXform("u_texCoordXform", kVec4f_GrSLType,
+ GrShaderVar::kUniform_TypeModifier);
+ GrGLSLShaderVar uPosXform("u_posXform", kVec4f_GrSLType,
+ GrShaderVar::kUniform_TypeModifier);
+ GrGLSLShaderVar uTexture("u_texture", kSamplerTypes[progIdx],
+ GrShaderVar::kUniform_TypeModifier);
+ GrGLSLShaderVar vTexCoord("v_texCoord", kVec2f_GrSLType,
+ GrShaderVar::kVaryingOut_TypeModifier);
+ GrGLSLShaderVar oFragColor("o_FragColor", kVec4f_GrSLType,
+ GrShaderVar::kOut_TypeModifier);
+
+ SkString vshaderTxt(version);
+ if (glslCaps->noperspectiveInterpolationSupport()) {
+ if (const char* extension = glslCaps->noperspectiveInterpolationExtensionString()) {
+ vshaderTxt.appendf("#extension %s : require\n", extension);
+ }
+ vTexCoord.addModifier("noperspective");
+ }
+
+ aVertex.appendDecl(glslCaps, &vshaderTxt);
+ vshaderTxt.append(";");
+ uTexCoordXform.appendDecl(glslCaps, &vshaderTxt);
+ vshaderTxt.append(";");
+ uPosXform.appendDecl(glslCaps, &vshaderTxt);
+ vshaderTxt.append(";");
+ vTexCoord.appendDecl(glslCaps, &vshaderTxt);
+ vshaderTxt.append(";");
+
+ vshaderTxt.append(
+ "// Copy Program VS\n"
+ "void main() {"
+ " v_texCoord = a_vertex.xy * u_texCoordXform.xy + u_texCoordXform.zw;"
+ " gl_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;"
+ " gl_Position.zw = vec2(0, 1);"
+ "}"
+ );
+
+ SkString fshaderTxt(version);
+ if (glslCaps->noperspectiveInterpolationSupport()) {
+ if (const char* extension = glslCaps->noperspectiveInterpolationExtensionString()) {
+ fshaderTxt.appendf("#extension %s : require\n", extension);
+ }
+ }
+ if (kSamplerTypes[progIdx] == kTextureExternalSampler_GrSLType) {
+ fshaderTxt.appendf("#extension %s : require\n",
+ glslCaps->externalTextureExtensionString());
+ }
+ GrGLSLAppendDefaultFloatPrecisionDeclaration(kDefault_GrSLPrecision, *glslCaps,
+ &fshaderTxt);
+ vTexCoord.setTypeModifier(GrShaderVar::kVaryingIn_TypeModifier);
+ vTexCoord.appendDecl(glslCaps, &fshaderTxt);
+ fshaderTxt.append(";");
+ uTexture.appendDecl(glslCaps, &fshaderTxt);
+ fshaderTxt.append(";");
+ const char* fsOutName;
+ if (glslCaps->mustDeclareFragmentShaderOutput()) {
+ oFragColor.appendDecl(glslCaps, &fshaderTxt);
+ fshaderTxt.append(";");
+ fsOutName = oFragColor.c_str();
+ } else {
+ fsOutName = "gl_FragColor";
+ }
+ fshaderTxt.appendf(
+ "// Copy Program FS\n"
+ "void main() {"
+ " %s = %s(u_texture, v_texCoord);"
+ "}",
+ fsOutName,
+ GrGLSLTexture2DFunctionName(kVec2f_GrSLType, kSamplerTypes[progIdx], this->glslGeneration())
+ );
+
+ const char* str;
+ GrGLint length;
+
+ str = vshaderTxt.c_str();
+ length = SkToInt(vshaderTxt.size());
+ GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[progIdx].fProgram,
+ GR_GL_VERTEX_SHADER, &str, &length, 1,
+ &fStats);
+
+ str = fshaderTxt.c_str();
+ length = SkToInt(fshaderTxt.size());
+ GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[progIdx].fProgram,
+ GR_GL_FRAGMENT_SHADER, &str, &length, 1,
+ &fStats);
+
+ GL_CALL(LinkProgram(fCopyPrograms[progIdx].fProgram));
+
+ GL_CALL_RET(fCopyPrograms[progIdx].fTextureUniform,
+ GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texture"));
+ GL_CALL_RET(fCopyPrograms[progIdx].fPosXformUniform,
+ GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_posXform"));
+ GL_CALL_RET(fCopyPrograms[progIdx].fTexCoordXformUniform,
+ GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texCoordXform"));
+
+ GL_CALL(BindAttribLocation(fCopyPrograms[progIdx].fProgram, 0, "a_vertex"));
+
+ GL_CALL(DeleteShader(vshader));
+ GL_CALL(DeleteShader(fshader));
+
+ return true;
+}
+
+bool GrGLGpu::createMipmapProgram(int progIdx) {
+ const bool oddWidth = SkToBool(progIdx & 0x2);
+ const bool oddHeight = SkToBool(progIdx & 0x1);
+ const int numTaps = (oddWidth ? 2 : 1) * (oddHeight ? 2 : 1);
+
+ const GrGLSLCaps* glslCaps = this->glCaps().glslCaps();
+
+ SkASSERT(!fMipmapPrograms[progIdx].fProgram);
+ GL_CALL_RET(fMipmapPrograms[progIdx].fProgram, CreateProgram());
+ if (!fMipmapPrograms[progIdx].fProgram) {
+ return false;
+ }
+
+ const char* version = glslCaps->versionDeclString();
+ GrGLSLShaderVar aVertex("a_vertex", kVec2f_GrSLType, GrShaderVar::kAttribute_TypeModifier);
+ GrGLSLShaderVar uTexCoordXform("u_texCoordXform", kVec4f_GrSLType,
+ GrShaderVar::kUniform_TypeModifier);
+ GrGLSLShaderVar uTexture("u_texture", kTexture2DSampler_GrSLType,
+ GrShaderVar::kUniform_TypeModifier);
+ // We need 1, 2, or 4 texture coordinates (depending on parity of each dimension):
+ GrGLSLShaderVar vTexCoords[] = {
+ GrGLSLShaderVar("v_texCoord0", kVec2f_GrSLType, GrShaderVar::kVaryingOut_TypeModifier),
+ GrGLSLShaderVar("v_texCoord1", kVec2f_GrSLType, GrShaderVar::kVaryingOut_TypeModifier),
+ GrGLSLShaderVar("v_texCoord2", kVec2f_GrSLType, GrShaderVar::kVaryingOut_TypeModifier),
+ GrGLSLShaderVar("v_texCoord3", kVec2f_GrSLType, GrShaderVar::kVaryingOut_TypeModifier),
+ };
+ GrGLSLShaderVar oFragColor("o_FragColor", kVec4f_GrSLType,
+ GrShaderVar::kOut_TypeModifier);
+
+ SkString vshaderTxt(version);
+ if (glslCaps->noperspectiveInterpolationSupport()) {
+ if (const char* extension = glslCaps->noperspectiveInterpolationExtensionString()) {
+ vshaderTxt.appendf("#extension %s : require\n", extension);
+ }
+ vTexCoords[0].addModifier("noperspective");
+ vTexCoords[1].addModifier("noperspective");
+ vTexCoords[2].addModifier("noperspective");
+ vTexCoords[3].addModifier("noperspective");
+ }
+
+ aVertex.appendDecl(glslCaps, &vshaderTxt);
+ vshaderTxt.append(";");
+ uTexCoordXform.appendDecl(glslCaps, &vshaderTxt);
+ vshaderTxt.append(";");
+ for (int i = 0; i < numTaps; ++i) {
+ vTexCoords[i].appendDecl(glslCaps, &vshaderTxt);
+ vshaderTxt.append(";");
+ }
+
+ vshaderTxt.append(
+ "// Mipmap Program VS\n"
+ "void main() {"
+ " gl_Position.xy = a_vertex * vec2(2, 2) - vec2(1, 1);"
+ " gl_Position.zw = vec2(0, 1);"
+ );
+
+ // Insert texture coordinate computation:
+ if (oddWidth && oddHeight) {
+ vshaderTxt.append(
+ " v_texCoord0 = a_vertex.xy * u_texCoordXform.yw;"
+ " v_texCoord1 = a_vertex.xy * u_texCoordXform.yw + vec2(u_texCoordXform.x, 0);"
+ " v_texCoord2 = a_vertex.xy * u_texCoordXform.yw + vec2(0, u_texCoordXform.z);"
+ " v_texCoord3 = a_vertex.xy * u_texCoordXform.yw + u_texCoordXform.xz;"
+ );
+ } else if (oddWidth) {
+ vshaderTxt.append(
+ " v_texCoord0 = a_vertex.xy * vec2(u_texCoordXform.y, 1);"
+ " v_texCoord1 = a_vertex.xy * vec2(u_texCoordXform.y, 1) + vec2(u_texCoordXform.x, 0);"
+ );
+ } else if (oddHeight) {
+ vshaderTxt.append(
+ " v_texCoord0 = a_vertex.xy * vec2(1, u_texCoordXform.w);"
+ " v_texCoord1 = a_vertex.xy * vec2(1, u_texCoordXform.w) + vec2(0, u_texCoordXform.z);"
+ );
+ } else {
+ vshaderTxt.append(
+ " v_texCoord0 = a_vertex.xy;"
+ );
+ }
+
+ vshaderTxt.append("}");
+
+ SkString fshaderTxt(version);
+ if (glslCaps->noperspectiveInterpolationSupport()) {
+ if (const char* extension = glslCaps->noperspectiveInterpolationExtensionString()) {
+ fshaderTxt.appendf("#extension %s : require\n", extension);
+ }
+ }
+ GrGLSLAppendDefaultFloatPrecisionDeclaration(kDefault_GrSLPrecision, *glslCaps,
+ &fshaderTxt);
+ for (int i = 0; i < numTaps; ++i) {
+ vTexCoords[i].setTypeModifier(GrShaderVar::kVaryingIn_TypeModifier);
+ vTexCoords[i].appendDecl(glslCaps, &fshaderTxt);
+ fshaderTxt.append(";");
+ }
+ uTexture.appendDecl(glslCaps, &fshaderTxt);
+ fshaderTxt.append(";");
+ const char* fsOutName;
+ if (glslCaps->mustDeclareFragmentShaderOutput()) {
+ oFragColor.appendDecl(glslCaps, &fshaderTxt);
+ fshaderTxt.append(";");
+ fsOutName = oFragColor.c_str();
+ } else {
+ fsOutName = "gl_FragColor";
+ }
+ const char* sampleFunction = GrGLSLTexture2DFunctionName(kVec2f_GrSLType,
+ kTexture2DSampler_GrSLType,
+ this->glslGeneration());
+ fshaderTxt.append(
+ "// Mipmap Program FS\n"
+ "void main() {"
+ );
+
+ if (oddWidth && oddHeight) {
+ fshaderTxt.appendf(
+ " %s = (%s(u_texture, v_texCoord0) + %s(u_texture, v_texCoord1) + "
+ " %s(u_texture, v_texCoord2) + %s(u_texture, v_texCoord3)) * 0.25;",
+ fsOutName, sampleFunction, sampleFunction, sampleFunction, sampleFunction
+ );
+ } else if (oddWidth || oddHeight) {
+ fshaderTxt.appendf(
+ " %s = (%s(u_texture, v_texCoord0) + %s(u_texture, v_texCoord1)) * 0.5;",
+ fsOutName, sampleFunction, sampleFunction
+ );
+ } else {
+ fshaderTxt.appendf(
+ " %s = %s(u_texture, v_texCoord0);",
+ fsOutName, sampleFunction
+ );
+ }
+
+ fshaderTxt.append("}");
+
+ const char* str;
+ GrGLint length;
+
+ str = vshaderTxt.c_str();
+ length = SkToInt(vshaderTxt.size());
+ GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fMipmapPrograms[progIdx].fProgram,
+ GR_GL_VERTEX_SHADER, &str, &length, 1,
+ &fStats);
+
+ str = fshaderTxt.c_str();
+ length = SkToInt(fshaderTxt.size());
+ GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fMipmapPrograms[progIdx].fProgram,
+ GR_GL_FRAGMENT_SHADER, &str, &length, 1,
+ &fStats);
+
+ GL_CALL(LinkProgram(fMipmapPrograms[progIdx].fProgram));
+
+ GL_CALL_RET(fMipmapPrograms[progIdx].fTextureUniform,
+ GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texture"));
+ GL_CALL_RET(fMipmapPrograms[progIdx].fTexCoordXformUniform,
+ GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texCoordXform"));
+
+ GL_CALL(BindAttribLocation(fMipmapPrograms[progIdx].fProgram, 0, "a_vertex"));
+
+ GL_CALL(DeleteShader(vshader));
+ GL_CALL(DeleteShader(fshader));
+
+ return true;
+}
+
+bool GrGLGpu::createWireRectProgram() {
+ if (!fWireRectArrayBuffer) {
+ static const GrGLfloat vdata[] = {
+ 0, 0,
+ 0, 1,
+ 1, 1,
+ 1, 0
+ };
+ fWireRectArrayBuffer.reset(GrGLBuffer::Create(this, sizeof(vdata), kVertex_GrBufferType,
+ kStatic_GrAccessPattern, vdata));
+ if (!fWireRectArrayBuffer) {
+ return false;
+ }
+ }
+
+ SkASSERT(!fWireRectProgram.fProgram);
+ GL_CALL_RET(fWireRectProgram.fProgram, CreateProgram());
+ if (!fWireRectProgram.fProgram) {
+ return false;
+ }
+
+ GrGLSLShaderVar uColor("u_color", kVec4f_GrSLType, GrShaderVar::kUniform_TypeModifier);
+ GrGLSLShaderVar uRect("u_rect", kVec4f_GrSLType, GrShaderVar::kUniform_TypeModifier);
+ GrGLSLShaderVar aVertex("a_vertex", kVec2f_GrSLType, GrShaderVar::kAttribute_TypeModifier);
+ const char* version = this->glCaps().glslCaps()->versionDeclString();
+
+ // The rect uniform specifies the rectangle in NDC space as a vec4 (left,top,right,bottom). The
+ // program is used with a vbo containing the unit square. Vertices are computed from the rect
+ // uniform using the 4 vbo vertices.
+ SkString vshaderTxt(version);
+ aVertex.appendDecl(this->glCaps().glslCaps(), &vshaderTxt);
+ vshaderTxt.append(";");
+ uRect.appendDecl(this->glCaps().glslCaps(), &vshaderTxt);
+ vshaderTxt.append(";");
+ vshaderTxt.append(
+ "// Wire Rect Program VS\n"
+ "void main() {"
+ " gl_Position.x = u_rect.x + a_vertex.x * (u_rect.z - u_rect.x);"
+ " gl_Position.y = u_rect.y + a_vertex.y * (u_rect.w - u_rect.y);"
+ " gl_Position.zw = vec2(0, 1);"
+ "}"
+ );
+
+ GrGLSLShaderVar oFragColor("o_FragColor", kVec4f_GrSLType, GrShaderVar::kOut_TypeModifier);
+
+ SkString fshaderTxt(version);
+ GrGLSLAppendDefaultFloatPrecisionDeclaration(kDefault_GrSLPrecision,
+ *this->glCaps().glslCaps(),
+ &fshaderTxt);
+ uColor.appendDecl(this->glCaps().glslCaps(), &fshaderTxt);
+ fshaderTxt.append(";");
+ const char* fsOutName;
+ if (this->glCaps().glslCaps()->mustDeclareFragmentShaderOutput()) {
+ oFragColor.appendDecl(this->glCaps().glslCaps(), &fshaderTxt);
+ fshaderTxt.append(";");
+ fsOutName = oFragColor.c_str();
+ } else {
+ fsOutName = "gl_FragColor";
+ }
+ fshaderTxt.appendf(
+ "// Write Rect Program FS\n"
+ "void main() {"
+ " %s = %s;"
+ "}",
+ fsOutName,
+ uColor.c_str()
+ );
+
+ const char* str;
+ GrGLint length;
+
+ str = vshaderTxt.c_str();
+ length = SkToInt(vshaderTxt.size());
+ GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fWireRectProgram.fProgram,
+ GR_GL_VERTEX_SHADER, &str, &length, 1,
+ &fStats);
+
+ str = fshaderTxt.c_str();
+ length = SkToInt(fshaderTxt.size());
+ GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fWireRectProgram.fProgram,
+ GR_GL_FRAGMENT_SHADER, &str, &length, 1,
+ &fStats);
+
+ GL_CALL(LinkProgram(fWireRectProgram.fProgram));
+
+ GL_CALL_RET(fWireRectProgram.fColorUniform,
+ GetUniformLocation(fWireRectProgram.fProgram, "u_color"));
+ GL_CALL_RET(fWireRectProgram.fRectUniform,
+ GetUniformLocation(fWireRectProgram.fProgram, "u_rect"));
+ GL_CALL(BindAttribLocation(fWireRectProgram.fProgram, 0, "a_vertex"));
+
+ GL_CALL(DeleteShader(vshader));
+ GL_CALL(DeleteShader(fshader));
+
+ return true;
+}
+
+void GrGLGpu::drawDebugWireRect(GrRenderTarget* rt, const SkIRect& rect, GrColor color) {
+ // TODO: This should swizzle the output to match dst's config, though it is a debugging
+ // visualization.
+
+ this->handleDirtyContext();
+ if (!fWireRectProgram.fProgram) {
+ if (!this->createWireRectProgram()) {
+ SkDebugf("Failed to create wire rect program.\n");
+ return;
+ }
+ }
+
+ int w = rt->width();
+ int h = rt->height();
+
+ // Compute the edges of the rectangle (top,left,right,bottom) in NDC space. Must consider
+ // whether the render target is flipped or not.
+ GrGLfloat edges[4];
+ edges[0] = SkIntToScalar(rect.fLeft) + 0.5f;
+ edges[2] = SkIntToScalar(rect.fRight) - 0.5f;
+ if (kBottomLeft_GrSurfaceOrigin == rt->origin()) {
+ edges[1] = h - (SkIntToScalar(rect.fTop) + 0.5f);
+ edges[3] = h - (SkIntToScalar(rect.fBottom) - 0.5f);
+ } else {
+ edges[1] = SkIntToScalar(rect.fTop) + 0.5f;
+ edges[3] = SkIntToScalar(rect.fBottom) - 0.5f;
+ }
+ edges[0] = 2 * edges[0] / w - 1.0f;
+ edges[1] = 2 * edges[1] / h - 1.0f;
+ edges[2] = 2 * edges[2] / w - 1.0f;
+ edges[3] = 2 * edges[3] / h - 1.0f;
+
+ GrGLfloat channels[4];
+ static const GrGLfloat scale255 = 1.f / 255.f;
+ channels[0] = GrColorUnpackR(color) * scale255;
+ channels[1] = GrColorUnpackG(color) * scale255;
+ channels[2] = GrColorUnpackB(color) * scale255;
+ channels[3] = GrColorUnpackA(color) * scale255;
+
+ GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(rt->asRenderTarget());
+ this->flushRenderTarget(glRT, &rect);
+
+ GL_CALL(UseProgram(fWireRectProgram.fProgram));
+ fHWProgramID = fWireRectProgram.fProgram;
+
+ fHWVertexArrayState.setVertexArrayID(this, 0);
+
+ GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this);
+ attribs->set(this, 0, fWireRectArrayBuffer, kVec2f_GrVertexAttribType, 2 * sizeof(GrGLfloat),
+ 0);
+ attribs->disableUnusedArrays(this, 0x1);
+
+ GL_CALL(Uniform4fv(fWireRectProgram.fRectUniform, 1, edges));
+ GL_CALL(Uniform4fv(fWireRectProgram.fColorUniform, 1, channels));
+
+ GrXferProcessor::BlendInfo blendInfo;
+ blendInfo.reset();
+ this->flushBlend(blendInfo, GrSwizzle::RGBA());
+ this->flushColorWrite(true);
+ this->flushDrawFace(GrDrawFace::kBoth);
+ this->flushHWAAState(glRT, false, false);
+ this->disableScissor();
+ this->disableWindowRectangles();
+ GrStencilSettings stencil;
+ stencil.setDisabled();
+ this->flushStencil(stencil);
+
+ GL_CALL(DrawArrays(GR_GL_LINE_LOOP, 0, 4));
+}
+
+
+bool GrGLGpu::copySurfaceAsDraw(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ GrGLTexture* srcTex = static_cast<GrGLTexture*>(src->asTexture());
+ int progIdx = TextureTargetToCopyProgramIdx(srcTex->target());
+
+ if (!fCopyPrograms[progIdx].fProgram) {
+ if (!this->createCopyProgram(progIdx)) {
+ SkDebugf("Failed to create copy program.\n");
+ return false;
+ }
+ }
+
+ int w = srcRect.width();
+ int h = srcRect.height();
+
+ GrTextureParams params(SkShader::kClamp_TileMode, GrTextureParams::kNone_FilterMode);
+ this->bindTexture(0, params, true, srcTex);
+
+ GrGLIRect dstVP;
+ this->bindSurfaceFBOForCopy(dst, GR_GL_FRAMEBUFFER, &dstVP, kDst_TempFBOTarget);
+ this->flushViewport(dstVP);
+ fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
+
+ SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, w, h);
+
+ GL_CALL(UseProgram(fCopyPrograms[progIdx].fProgram));
+ fHWProgramID = fCopyPrograms[progIdx].fProgram;
+
+ fHWVertexArrayState.setVertexArrayID(this, 0);
+
+ GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this);
+ attribs->set(this, 0, fCopyProgramArrayBuffer, kVec2f_GrVertexAttribType, 2 * sizeof(GrGLfloat),
+ 0);
+ attribs->disableUnusedArrays(this, 0x1);
+
+ // dst rect edges in NDC (-1 to 1)
+ int dw = dst->width();
+ int dh = dst->height();
+ GrGLfloat dx0 = 2.f * dstPoint.fX / dw - 1.f;
+ GrGLfloat dx1 = 2.f * (dstPoint.fX + w) / dw - 1.f;
+ GrGLfloat dy0 = 2.f * dstPoint.fY / dh - 1.f;
+ GrGLfloat dy1 = 2.f * (dstPoint.fY + h) / dh - 1.f;
+ if (kBottomLeft_GrSurfaceOrigin == dst->origin()) {
+ dy0 = -dy0;
+ dy1 = -dy1;
+ }
+
+ GrGLfloat sx0 = (GrGLfloat)srcRect.fLeft;
+ GrGLfloat sx1 = (GrGLfloat)(srcRect.fLeft + w);
+ GrGLfloat sy0 = (GrGLfloat)srcRect.fTop;
+ GrGLfloat sy1 = (GrGLfloat)(srcRect.fTop + h);
+ int sh = src->height();
+ if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
+ sy0 = sh - sy0;
+ sy1 = sh - sy1;
+ }
+ // src rect edges in normalized texture space (0 to 1) unless we're using a RECTANGLE texture.
+ GrGLenum srcTarget = srcTex->target();
+ if (GR_GL_TEXTURE_RECTANGLE != srcTarget) {
+ int sw = src->width();
+ sx0 /= sw;
+ sx1 /= sw;
+ sy0 /= sh;
+ sy1 /= sh;
+ }
+
+ GL_CALL(Uniform4f(fCopyPrograms[progIdx].fPosXformUniform, dx1 - dx0, dy1 - dy0, dx0, dy0));
+ GL_CALL(Uniform4f(fCopyPrograms[progIdx].fTexCoordXformUniform,
+ sx1 - sx0, sy1 - sy0, sx0, sy0));
+ GL_CALL(Uniform1i(fCopyPrograms[progIdx].fTextureUniform, 0));
+
+ GrXferProcessor::BlendInfo blendInfo;
+ blendInfo.reset();
+ this->flushBlend(blendInfo, GrSwizzle::RGBA());
+ this->flushColorWrite(true);
+ this->flushDrawFace(GrDrawFace::kBoth);
+ this->flushHWAAState(nullptr, false, false);
+ this->disableScissor();
+ this->disableWindowRectangles();
+ GrStencilSettings stencil;
+ stencil.setDisabled();
+ this->flushStencil(stencil);
+
+ GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4));
+ this->unbindTextureFBOForCopy(GR_GL_FRAMEBUFFER, dst);
+ this->didWriteToSurface(dst, &dstRect);
+
+ return true;
+}
+
+void GrGLGpu::copySurfaceAsCopyTexSubImage(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ SkASSERT(can_copy_texsubimage(dst, src, this));
+ GrGLIRect srcVP;
+ this->bindSurfaceFBOForCopy(src, GR_GL_FRAMEBUFFER, &srcVP, kSrc_TempFBOTarget);
+ GrGLTexture* dstTex = static_cast<GrGLTexture *>(dst->asTexture());
+ SkASSERT(dstTex);
+ // We modified the bound FBO
+ fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
+ GrGLIRect srcGLRect;
+ srcGLRect.setRelativeTo(srcVP,
+ srcRect.fLeft,
+ srcRect.fTop,
+ srcRect.width(),
+ srcRect.height(),
+ src->origin());
+
+ this->setScratchTextureUnit();
+ GL_CALL(BindTexture(dstTex->target(), dstTex->textureID()));
+ GrGLint dstY;
+ if (kBottomLeft_GrSurfaceOrigin == dst->origin()) {
+ dstY = dst->height() - (dstPoint.fY + srcGLRect.fHeight);
+ } else {
+ dstY = dstPoint.fY;
+ }
+ GL_CALL(CopyTexSubImage2D(dstTex->target(), 0,
+ dstPoint.fX, dstY,
+ srcGLRect.fLeft, srcGLRect.fBottom,
+ srcGLRect.fWidth, srcGLRect.fHeight));
+ this->unbindTextureFBOForCopy(GR_GL_FRAMEBUFFER, src);
+ SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
+ srcRect.width(), srcRect.height());
+ this->didWriteToSurface(dst, &dstRect);
+}
+
+bool GrGLGpu::copySurfaceAsBlitFramebuffer(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ SkASSERT(can_blit_framebuffer(dst, src, this));
+ SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
+ srcRect.width(), srcRect.height());
+ if (dst == src) {
+ if (SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect)) {
+ return false;
+ }
+ }
+
+ GrGLIRect dstVP;
+ GrGLIRect srcVP;
+ this->bindSurfaceFBOForCopy(dst, GR_GL_DRAW_FRAMEBUFFER, &dstVP, kDst_TempFBOTarget);
+ this->bindSurfaceFBOForCopy(src, GR_GL_READ_FRAMEBUFFER, &srcVP, kSrc_TempFBOTarget);
+ // We modified the bound FBO
+ fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
+ GrGLIRect srcGLRect;
+ GrGLIRect dstGLRect;
+ srcGLRect.setRelativeTo(srcVP,
+ srcRect.fLeft,
+ srcRect.fTop,
+ srcRect.width(),
+ srcRect.height(),
+ src->origin());
+ dstGLRect.setRelativeTo(dstVP,
+ dstRect.fLeft,
+ dstRect.fTop,
+ dstRect.width(),
+ dstRect.height(),
+ dst->origin());
+
+ // BlitFrameBuffer respects the scissor, so disable it.
+ this->disableScissor();
+ this->disableWindowRectangles();
+
+ GrGLint srcY0;
+ GrGLint srcY1;
+ // Does the blit need to y-mirror or not?
+ if (src->origin() == dst->origin()) {
+ srcY0 = srcGLRect.fBottom;
+ srcY1 = srcGLRect.fBottom + srcGLRect.fHeight;
+ } else {
+ srcY0 = srcGLRect.fBottom + srcGLRect.fHeight;
+ srcY1 = srcGLRect.fBottom;
+ }
+ GL_CALL(BlitFramebuffer(srcGLRect.fLeft,
+ srcY0,
+ srcGLRect.fLeft + srcGLRect.fWidth,
+ srcY1,
+ dstGLRect.fLeft,
+ dstGLRect.fBottom,
+ dstGLRect.fLeft + dstGLRect.fWidth,
+ dstGLRect.fBottom + dstGLRect.fHeight,
+ GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
+ this->unbindTextureFBOForCopy(GR_GL_DRAW_FRAMEBUFFER, dst);
+ this->unbindTextureFBOForCopy(GR_GL_READ_FRAMEBUFFER, src);
+ this->didWriteToSurface(dst, &dstRect);
+ return true;
+}
+
+// Manual implementation of mipmap generation, to work around driver bugs w/sRGB.
+// Uses draw calls to do a series of downsample operations to successive mips.
+// If this returns false, then the calling code falls back to using glGenerateMipmap.
+bool GrGLGpu::generateMipmap(GrGLTexture* texture, bool gammaCorrect) {
+ // Our iterative downsample requires the ability to limit which level we're sampling:
+ if (!this->glCaps().doManualMipmapping()) {
+ return false;
+ }
+
+ // Mipmaps are only supported on 2D textures:
+ if (GR_GL_TEXTURE_2D != texture->target()) {
+ return false;
+ }
+
+ // We need to be able to render to the texture for this to work:
+ if (!this->caps()->isConfigRenderable(texture->config(), false)) {
+ return false;
+ }
+
+ // If we're mipping an sRGB texture, we need to ensure FB sRGB is correct:
+ if (GrPixelConfigIsSRGB(texture->config())) {
+ // If we have write-control, just set the state that we want:
+ if (this->glCaps().srgbWriteControl()) {
+ this->flushFramebufferSRGB(gammaCorrect);
+ } else if (!gammaCorrect) {
+ // If we don't have write-control we can't do non-gamma-correct mipmapping:
+ return false;
+ }
+ }
+
+ int width = texture->width();
+ int height = texture->height();
+ int levelCount = SkMipMap::ComputeLevelCount(width, height) + 1;
+
+ // Define all mips, if we haven't previously done so:
+ if (0 == texture->texturePriv().maxMipMapLevel()) {
+ GrGLenum internalFormat;
+ GrGLenum externalFormat;
+ GrGLenum externalType;
+ if (!this->glCaps().getTexImageFormats(texture->config(), texture->config(),
+ &internalFormat, &externalFormat, &externalType)) {
+ return false;
+ }
+
+ for (GrGLint level = 1; level < levelCount; ++level) {
+ // Define the next mip:
+ width = SkTMax(1, width / 2);
+ height = SkTMax(1, height / 2);
+ GL_ALLOC_CALL(this->glInterface(), TexImage2D(GR_GL_TEXTURE_2D, level, internalFormat,
+ width, height, 0,
+ externalFormat, externalType, nullptr));
+ }
+ }
+
+ // Create (if necessary), then bind temporary FBO:
+ if (0 == fTempDstFBOID) {
+ GL_CALL(GenFramebuffers(1, &fTempDstFBOID));
+ }
+ GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, fTempDstFBOID));
+ fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
+
+ // Bind the texture, to get things configured for filtering.
+ // We'll be changing our base level further below:
+ this->setTextureUnit(0);
+ GrTextureParams params(SkShader::kClamp_TileMode, GrTextureParams::kBilerp_FilterMode);
+ this->bindTexture(0, params, gammaCorrect, texture);
+
+ // Vertex data:
+ if (!fMipmapProgramArrayBuffer) {
+ static const GrGLfloat vdata[] = {
+ 0, 0,
+ 0, 1,
+ 1, 0,
+ 1, 1
+ };
+ fMipmapProgramArrayBuffer.reset(GrGLBuffer::Create(this, sizeof(vdata),
+ kVertex_GrBufferType,
+ kStatic_GrAccessPattern, vdata));
+ }
+ if (!fMipmapProgramArrayBuffer) {
+ return false;
+ }
+
+ fHWVertexArrayState.setVertexArrayID(this, 0);
+
+ GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this);
+ attribs->set(this, 0, fMipmapProgramArrayBuffer, kVec2f_GrVertexAttribType,
+ 2 * sizeof(GrGLfloat), 0);
+ attribs->disableUnusedArrays(this, 0x1);
+
+ // Set "simple" state once:
+ GrXferProcessor::BlendInfo blendInfo;
+ blendInfo.reset();
+ this->flushBlend(blendInfo, GrSwizzle::RGBA());
+ this->flushColorWrite(true);
+ this->flushDrawFace(GrDrawFace::kBoth);
+ this->flushHWAAState(nullptr, false, false);
+ this->disableScissor();
+ this->disableWindowRectangles();
+ GrStencilSettings stencil;
+ stencil.setDisabled();
+ this->flushStencil(stencil);
+
+ // Do all the blits:
+ width = texture->width();
+ height = texture->height();
+ GrGLIRect viewport;
+ viewport.fLeft = 0;
+ viewport.fBottom = 0;
+ for (GrGLint level = 1; level < levelCount; ++level) {
+ // Get and bind the program for this particular downsample (filter shape can vary):
+ int progIdx = TextureSizeToMipmapProgramIdx(width, height);
+ if (!fMipmapPrograms[progIdx].fProgram) {
+ if (!this->createMipmapProgram(progIdx)) {
+ SkDebugf("Failed to create mipmap program.\n");
+ return false;
+ }
+ }
+ GL_CALL(UseProgram(fMipmapPrograms[progIdx].fProgram));
+ fHWProgramID = fMipmapPrograms[progIdx].fProgram;
+
+ // Texcoord uniform is expected to contain (1/w, (w-1)/w, 1/h, (h-1)/h)
+ const float invWidth = 1.0f / width;
+ const float invHeight = 1.0f / height;
+ GL_CALL(Uniform4f(fMipmapPrograms[progIdx].fTexCoordXformUniform,
+ invWidth, (width - 1) * invWidth, invHeight, (height - 1) * invHeight));
+ GL_CALL(Uniform1i(fMipmapPrograms[progIdx].fTextureUniform, 0));
+
+ // Only sample from previous mip
+ GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_BASE_LEVEL, level - 1));
+
+ GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
+ GR_GL_TEXTURE_2D, texture->textureID(), level));
+
+ width = SkTMax(1, width / 2);
+ height = SkTMax(1, height / 2);
+ viewport.fWidth = width;
+ viewport.fHeight = height;
+ this->flushViewport(viewport);
+
+ GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4));
+ }
+
+ // Unbind:
+ GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
+ GR_GL_TEXTURE_2D, 0, 0));
+
+ return true;
+}
+
+void GrGLGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings& stencil,
+ int* effectiveSampleCnt, SamplePattern* samplePattern) {
+ SkASSERT(!rt->isMixedSampled() || rt->renderTargetPriv().getStencilAttachment() ||
+ stencil.isDisabled());
+
+ this->flushStencil(stencil);
+ this->flushHWAAState(rt, true, !stencil.isDisabled());
+ this->flushRenderTarget(static_cast<GrGLRenderTarget*>(rt), &SkIRect::EmptyIRect());
+
+ if (0 != this->caps()->maxRasterSamples()) {
+ GR_GL_GetIntegerv(this->glInterface(), GR_GL_EFFECTIVE_RASTER_SAMPLES, effectiveSampleCnt);
+ } else {
+ GR_GL_GetIntegerv(this->glInterface(), GR_GL_SAMPLES, effectiveSampleCnt);
+ }
+
+ SkASSERT(*effectiveSampleCnt >= rt->desc().fSampleCnt);
+
+ if (this->caps()->sampleLocationsSupport()) {
+ samplePattern->reset(*effectiveSampleCnt);
+ for (int i = 0; i < *effectiveSampleCnt; ++i) {
+ GrGLfloat pos[2];
+ GL_CALL(GetMultisamplefv(GR_GL_SAMPLE_POSITION, i, pos));
+ if (kTopLeft_GrSurfaceOrigin == rt->origin()) {
+ (*samplePattern)[i].set(pos[0], pos[1]);
+ } else {
+ (*samplePattern)[i].set(pos[0], 1 - pos[1]);
+ }
+ }
+ }
+}
+
+void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) {
+ SkASSERT(type);
+ switch (type) {
+ case kTexture_GrXferBarrierType: {
+ GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt);
+ if (glrt->textureFBOID() != glrt->renderFBOID()) {
+ // The render target uses separate storage so no need for glTextureBarrier.
+ // FIXME: The render target will resolve automatically when its texture is bound,
+ // but we could resolve only the bounds that will be read if we do it here instead.
+ return;
+ }
+ SkASSERT(this->caps()->textureBarrierSupport());
+ GL_CALL(TextureBarrier());
+ return;
+ }
+ case kBlend_GrXferBarrierType:
+ SkASSERT(GrCaps::kAdvanced_BlendEquationSupport ==
+ this->caps()->blendEquationSupport());
+ GL_CALL(BlendBarrier());
+ return;
+ default: break; // placate compiler warnings that kNone not handled
+ }
+}
+
+GrBackendObject GrGLGpu::createTestingOnlyBackendTexture(void* pixels, int w, int h,
+ GrPixelConfig config, bool /*isRT*/) {
+ if (!this->caps()->isConfigTexturable(config)) {
+ return false;
+ }
+ GrGLTextureInfo* info = new GrGLTextureInfo;
+ info->fTarget = GR_GL_TEXTURE_2D;
+ info->fID = 0;
+ GL_CALL(GenTextures(1, &info->fID));
+ GL_CALL(ActiveTexture(GR_GL_TEXTURE0));
+ GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1));
+ GL_CALL(BindTexture(info->fTarget, info->fID));
+ fHWBoundTextureUniqueIDs[0] = 0;
+ GL_CALL(TexParameteri(info->fTarget, GR_GL_TEXTURE_MAG_FILTER, GR_GL_NEAREST));
+ GL_CALL(TexParameteri(info->fTarget, GR_GL_TEXTURE_MIN_FILTER, GR_GL_NEAREST));
+ GL_CALL(TexParameteri(info->fTarget, GR_GL_TEXTURE_WRAP_S, GR_GL_CLAMP_TO_EDGE));
+ GL_CALL(TexParameteri(info->fTarget, GR_GL_TEXTURE_WRAP_T, GR_GL_CLAMP_TO_EDGE));
+
+ GrGLenum internalFormat;
+ GrGLenum externalFormat;
+ GrGLenum externalType;
+
+ if (!this->glCaps().getTexImageFormats(config, config, &internalFormat, &externalFormat,
+ &externalType)) {
+ delete info;
+#ifdef SK_IGNORE_GL_TEXTURE_TARGET
+ return 0;
+#else
+ return reinterpret_cast<GrBackendObject>(nullptr);
+#endif
+ }
+
+ GL_CALL(TexImage2D(info->fTarget, 0, internalFormat, w, h, 0, externalFormat,
+ externalType, pixels));
+
+#ifdef SK_IGNORE_GL_TEXTURE_TARGET
+ GrGLuint id = info->fID;
+ delete info;
+ return id;
+#else
+ return reinterpret_cast<GrBackendObject>(info);
+#endif
+}
+
+bool GrGLGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
+#ifdef SK_IGNORE_GL_TEXTURE_TARGET
+ GrGLuint texID = (GrGLuint)id;
+#else
+ GrGLuint texID = reinterpret_cast<const GrGLTextureInfo*>(id)->fID;
+#endif
+
+ GrGLboolean result;
+ GL_CALL_RET(result, IsTexture(texID));
+
+ return (GR_GL_TRUE == result);
+}
+
+void GrGLGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandonTexture) {
+#ifdef SK_IGNORE_GL_TEXTURE_TARGET
+ GrGLuint texID = (GrGLuint)id;
+#else
+ const GrGLTextureInfo* info = reinterpret_cast<const GrGLTextureInfo*>(id);
+ GrGLuint texID = info->fID;
+#endif
+
+ if (!abandonTexture) {
+ GL_CALL(DeleteTextures(1, &texID));
+ }
+
+#ifndef SK_IGNORE_GL_TEXTURE_TARGET
+ delete info;
+#endif
+}
+
+void GrGLGpu::resetShaderCacheForTesting() const {
+ fProgramCache->abandon();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrGLAttribArrayState* GrGLGpu::HWVertexArrayState::bindInternalVertexArray(GrGLGpu* gpu,
+ const GrBuffer* ibuf) {
+ GrGLAttribArrayState* attribState;
+
+ if (gpu->glCaps().isCoreProfile()) {
+ if (!fCoreProfileVertexArray) {
+ GrGLuint arrayID;
+ GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID));
+ int attrCount = gpu->glCaps().maxVertexAttributes();
+ fCoreProfileVertexArray = new GrGLVertexArray(arrayID, attrCount);
+ }
+ if (ibuf) {
+ attribState = fCoreProfileVertexArray->bindWithIndexBuffer(gpu, ibuf);
+ } else {
+ attribState = fCoreProfileVertexArray->bind(gpu);
+ }
+ } else {
+ if (ibuf) {
+ // bindBuffer implicitly binds VAO 0 when binding an index buffer.
+ gpu->bindBuffer(kIndex_GrBufferType, ibuf);
+ } else {
+ this->setVertexArrayID(gpu, 0);
+ }
+ int attrCount = gpu->glCaps().maxVertexAttributes();
+ if (fDefaultVertexArrayAttribState.count() != attrCount) {
+ fDefaultVertexArrayAttribState.resize(attrCount);
+ }
+ attribState = &fDefaultVertexArrayAttribState;
+ }
+ return attribState;
+}
+
+bool GrGLGpu::onMakeCopyForTextureParams(GrTexture* texture, const GrTextureParams& textureParams,
+ GrTextureProducer::CopyParams* copyParams) const {
+ if (textureParams.isTiled() ||
+ GrTextureParams::kMipMap_FilterMode == textureParams.filterMode()) {
+ GrGLTexture* glTexture = static_cast<GrGLTexture*>(texture);
+ if (GR_GL_TEXTURE_EXTERNAL == glTexture->target() ||
+ GR_GL_TEXTURE_RECTANGLE == glTexture->target()) {
+ copyParams->fFilter = GrTextureParams::kNone_FilterMode;
+ copyParams->fWidth = texture->width();
+ copyParams->fHeight = texture->height();
+ return true;
+ }
+ }
+ return false;
+}
+
+GrFence SK_WARN_UNUSED_RESULT GrGLGpu::insertFence() const {
+ GrGLsync fence;
+ GL_CALL_RET(fence, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0));
+ return (GrFence)fence;
+}
+
+bool GrGLGpu::waitFence(GrFence fence, uint64_t timeout) const {
+ GrGLenum result;
+ GL_CALL_RET(result, ClientWaitSync((GrGLsync)fence, GR_GL_SYNC_FLUSH_COMMANDS_BIT, timeout));
+ return (GR_GL_CONDITION_SATISFIED == result);
+}
+
+void GrGLGpu::deleteFence(GrFence fence) const {
+ GL_CALL(DeleteSync((GrGLsync)fence));
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLGpu.h b/gfx/skia/skia/src/gpu/gl/GrGLGpu.h
new file mode 100644
index 000000000..7ba79b2ee
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLGpu.h
@@ -0,0 +1,651 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLGpu_DEFINED
+#define GrGLGpu_DEFINED
+
+#include "GrGLContext.h"
+#include "GrGLIRect.h"
+#include "GrGLPathRendering.h"
+#include "GrGLProgram.h"
+#include "GrGLRenderTarget.h"
+#include "GrGLStencilAttachment.h"
+#include "GrGLTexture.h"
+#include "GrGLVertexArray.h"
+#include "GrGpu.h"
+#include "GrTypes.h"
+#include "GrWindowRectsState.h"
+#include "GrXferProcessor.h"
+#include "SkTArray.h"
+#include "SkTypes.h"
+
+class GrGLBuffer;
+class GrPipeline;
+class GrNonInstancedMesh;
+class GrSwizzle;
+
+namespace gr_instanced { class GLInstancedRendering; }
+
+#ifdef SK_DEBUG
+#define PROGRAM_CACHE_STATS
+#endif
+
+class GrGLGpu final : public GrGpu {
+public:
+ static GrGpu* Create(GrBackendContext backendContext, const GrContextOptions& options,
+ GrContext* context);
+ ~GrGLGpu() override;
+
+ void disconnect(DisconnectType) override;
+
+ const GrGLContext& glContext() const { return *fGLContext; }
+
+ const GrGLInterface* glInterface() const { return fGLContext->interface(); }
+ const GrGLContextInfo& ctxInfo() const { return *fGLContext; }
+ GrGLStandard glStandard() const { return fGLContext->standard(); }
+ GrGLVersion glVersion() const { return fGLContext->version(); }
+ GrGLSLGeneration glslGeneration() const { return fGLContext->glslGeneration(); }
+ const GrGLCaps& glCaps() const { return *fGLContext->caps(); }
+
+ GrGLPathRendering* glPathRendering() {
+ SkASSERT(glCaps().shaderCaps()->pathRenderingSupport());
+ return static_cast<GrGLPathRendering*>(pathRendering());
+ }
+
+ // Used by GrGLProgram to configure OpenGL state.
+ void bindTexture(int unitIdx, const GrTextureParams& params, bool allowSRGBInputs,
+ GrGLTexture* texture);
+
+ void bindTexelBuffer(int unitIdx, GrPixelConfig, GrGLBuffer*);
+
+ void generateMipmaps(const GrTextureParams& params, bool allowSRGBInputs, GrGLTexture* texture);
+
+ bool onGetReadPixelsInfo(GrSurface* srcSurface, int readWidth, int readHeight, size_t rowBytes,
+ GrPixelConfig readConfig, DrawPreference*,
+ ReadPixelTempDrawInfo*) override;
+
+ bool onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
+ GrPixelConfig srcConfig, DrawPreference*,
+ WritePixelTempDrawInfo*) override;
+
+ bool initDescForDstCopy(const GrRenderTarget* src, GrSurfaceDesc* desc) const override;
+
+ // These functions should be used to bind GL objects. They track the GL state and skip redundant
+ // bindings. Making the equivalent glBind calls directly will confuse the state tracking.
+ void bindVertexArray(GrGLuint id) {
+ fHWVertexArrayState.setVertexArrayID(this, id);
+ }
+
+ // These callbacks update state tracking when GL objects are deleted. They are called from
+ // GrGLResource onRelease functions.
+ void notifyVertexArrayDelete(GrGLuint id) {
+ fHWVertexArrayState.notifyVertexArrayDelete(id);
+ }
+
+ // Binds a buffer to the GL target corresponding to 'type', updates internal state tracking, and
+ // returns the GL target the buffer was bound to.
+ // When 'type' is kIndex_GrBufferType, this function will also implicitly bind the default VAO.
+ // If the caller wishes to bind an index buffer to a specific VAO, it can call glBind directly.
+ GrGLenum bindBuffer(GrBufferType type, const GrBuffer*);
+
+ // Called by GrGLBuffer after its buffer object has been destroyed.
+ void notifyBufferReleased(const GrGLBuffer*);
+
+ // The GrGLGpuCommandBuffer does not buffer up draws before submitting them to the gpu.
+ // Thus this is the implementation of the draw call for the corresponding passthrough function
+ // on GrGLGpuCommandBuffer.
+ void draw(const GrPipeline&,
+ const GrPrimitiveProcessor&,
+ const GrMesh*,
+ int meshCount);
+
+ // The GrGLGpuCommandBuffer does not buffer up draws before submitting them to the gpu.
+ // Thus this is the implementation of the clear call for the corresponding passthrough function
+ // on GrGLGpuCommandBuffer.
+ void clear(const GrFixedClip&, GrColor, GrRenderTarget*);
+
+ // The GrGLGpuCommandBuffer does not buffer up draws before submitting them to the gpu.
+ // Thus this is the implementation of the clearStencil call for the corresponding passthrough
+ // function on GrGLGpuCommandBuffer.
+ void clearStencilClip(const GrFixedClip&, bool insideStencilMask, GrRenderTarget*);
+
+ const GrGLContext* glContextForTesting() const override {
+ return &this->glContext();
+ }
+
+ void clearStencil(GrRenderTarget*) override;
+
+ GrGpuCommandBuffer* createCommandBuffer(
+ GrRenderTarget* target,
+ const GrGpuCommandBuffer::LoadAndStoreInfo& colorInfo,
+ const GrGpuCommandBuffer::LoadAndStoreInfo& stencilInfo) override;
+
+ void invalidateBoundRenderTarget() {
+ fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
+ }
+
+ GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
+ int width,
+ int height) override;
+
+ GrBackendObject createTestingOnlyBackendTexture(void* pixels, int w, int h,
+ GrPixelConfig config,
+ bool isRenderTarget = false) override;
+ bool isTestingOnlyBackendTexture(GrBackendObject) const override;
+ void deleteTestingOnlyBackendTexture(GrBackendObject, bool abandonTexture) override;
+
+ void resetShaderCacheForTesting() const override;
+
+ void drawDebugWireRect(GrRenderTarget*, const SkIRect&, GrColor) override;
+
+ void finishDrawTarget() override;
+
+ GrFence SK_WARN_UNUSED_RESULT insertFence() const override;
+ bool waitFence(GrFence, uint64_t timeout) const override;
+ void deleteFence(GrFence) const override;
+
+private:
+ GrGLGpu(GrGLContext* ctx, GrContext* context);
+
+ // GrGpu overrides
+ void onResetContext(uint32_t resetBits) override;
+
+ void xferBarrier(GrRenderTarget*, GrXferBarrierType) override;
+
+ GrTexture* onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
+ const SkTArray<GrMipLevel>& texels) override;
+ GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc,
+ SkBudgeted budgeted,
+ const SkTArray<GrMipLevel>& texels) override;
+
+ GrBuffer* onCreateBuffer(size_t size, GrBufferType intendedType, GrAccessPattern,
+ const void* data) override;
+ GrTexture* onWrapBackendTexture(const GrBackendTextureDesc&, GrWrapOwnership) override;
+ GrRenderTarget* onWrapBackendRenderTarget(const GrBackendRenderTargetDesc&,
+ GrWrapOwnership) override;
+ GrRenderTarget* onWrapBackendTextureAsRenderTarget(const GrBackendTextureDesc&) override;
+
+ gr_instanced::InstancedRendering* onCreateInstancedRendering() override;
+
+ // Given a GrPixelConfig return the index into the stencil format array on GrGLCaps to a
+ // compatible stencil format, or negative if there is no compatible stencil format.
+ int getCompatibleStencilIndex(GrPixelConfig config);
+
+
+ // Returns whether the texture is successfully created. On success, the
+ // result is stored in |info|.
+ // The texture is populated with |texels|, if it exists.
+ // The texture parameters are cached in |initialTexParams|.
+ bool createTextureImpl(const GrSurfaceDesc& desc, GrGLTextureInfo* info,
+ bool renderTarget, GrGLTexture::TexParams* initialTexParams,
+ const SkTArray<GrMipLevel>& texels);
+
+ bool onMakeCopyForTextureParams(GrTexture*, const GrTextureParams&,
+ GrTextureProducer::CopyParams*) const override;
+
+ // Checks whether glReadPixels can be called to get pixel values in readConfig from the
+ // render target.
+ bool readPixelsSupported(GrRenderTarget* target, GrPixelConfig readConfig);
+
+ // Checks whether glReadPixels can be called to get pixel values in readConfig from a
+ // render target that has renderTargetConfig. This may have to create a temporary
+ // render target and thus is less preferable than the variant that takes a render target.
+ bool readPixelsSupported(GrPixelConfig renderTargetConfig, GrPixelConfig readConfig);
+
+ // Checks whether glReadPixels can be called to get pixel values in readConfig from a
+ // render target that has the same config as surfaceForConfig. Calls one of the the two
+ // variations above, depending on whether the surface is a render target or not.
+ bool readPixelsSupported(GrSurface* surfaceForConfig, GrPixelConfig readConfig);
+
+ bool onReadPixels(GrSurface*,
+ int left, int top,
+ int width, int height,
+ GrPixelConfig,
+ void* buffer,
+ size_t rowBytes) override;
+
+ bool onWritePixels(GrSurface*,
+ int left, int top, int width, int height,
+ GrPixelConfig config,
+ const SkTArray<GrMipLevel>& texels) override;
+
+ bool onTransferPixels(GrSurface*,
+ int left, int top, int width, int height,
+ GrPixelConfig config, GrBuffer* transferBuffer,
+ size_t offset, size_t rowBytes) override;
+
+ void onResolveRenderTarget(GrRenderTarget* target) override;
+
+ bool onCopySurface(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) override;
+
+ void onGetMultisampleSpecs(GrRenderTarget*, const GrStencilSettings&,
+ int* effectiveSampleCnt, SamplePattern*) override;
+
+ // binds texture unit in GL
+ void setTextureUnit(int unitIdx);
+
+ void setTextureSwizzle(int unitIdx, GrGLenum target, const GrGLenum swizzle[]);
+
+ // Flushes state from GrPipeline to GL. Returns false if the state couldn't be set.
+ // willDrawPoints must be true if point primitives will be rendered after setting the GL state.
+ bool flushGLState(const GrPipeline&, const GrPrimitiveProcessor&, bool willDrawPoints);
+
+ // Sets up vertex attribute pointers and strides. On return indexOffsetInBytes gives the offset
+ // an into the index buffer. It does not account for vertices.startIndex() but rather the start
+ // index is relative to the returned offset.
+ void setupGeometry(const GrPrimitiveProcessor&,
+ const GrNonInstancedMesh& mesh,
+ size_t* indexOffsetInBytes);
+
+ void flushBlend(const GrXferProcessor::BlendInfo& blendInfo, const GrSwizzle&);
+
+ bool hasExtension(const char* ext) const { return fGLContext->hasExtension(ext); }
+
+ bool copySurfaceAsDraw(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+ void copySurfaceAsCopyTexSubImage(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+ bool copySurfaceAsBlitFramebuffer(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+ bool generateMipmap(GrGLTexture* texture, bool gammaCorrect);
+
+ void stampPLSSetupRect(const SkRect& bounds);
+
+ void setupPixelLocalStorage(const GrPipeline&, const GrPrimitiveProcessor&);
+
+ static bool BlendCoeffReferencesConstant(GrBlendCoeff coeff);
+
+ class ProgramCache : public ::SkNoncopyable {
+ public:
+ ProgramCache(GrGLGpu* gpu);
+ ~ProgramCache();
+
+ void abandon();
+ GrGLProgram* refProgram(const GrGLGpu*, const GrPipeline&, const GrPrimitiveProcessor&,
+ bool hasPointSize);
+
+ private:
+ enum {
+ // We may actually have kMaxEntries+1 shaders in the GL context because we create a new
+ // shader before evicting from the cache.
+ kMaxEntries = 128,
+ kHashBits = 6,
+ };
+
+ struct Entry;
+
+ struct ProgDescLess;
+
+ // binary search for entry matching desc. returns index into fEntries that matches desc or ~
+ // of the index of where it should be inserted.
+ int search(const GrProgramDesc& desc) const;
+
+ // sorted array of all the entries
+ Entry* fEntries[kMaxEntries];
+ // hash table based on lowest kHashBits bits of the program key. Used to avoid binary
+ // searching fEntries.
+ Entry* fHashTable[1 << kHashBits];
+
+ int fCount;
+ unsigned int fCurrLRUStamp;
+ GrGLGpu* fGpu;
+#ifdef PROGRAM_CACHE_STATS
+ int fTotalRequests;
+ int fCacheMisses;
+ int fHashMisses; // cache hit but hash table missed
+#endif
+ };
+
+ void flushColorWrite(bool writeColor);
+ void flushDrawFace(GrDrawFace face);
+
+ // flushes the scissor. see the note on flushBoundTextureAndParams about
+ // flushing the scissor after that function is called.
+ void flushScissor(const GrScissorState&,
+ const GrGLIRect& rtViewport,
+ GrSurfaceOrigin rtOrigin);
+
+ // disables the scissor
+ void disableScissor();
+
+ void flushWindowRectangles(const GrWindowRectsState&, const GrGLRenderTarget*);
+ void disableWindowRectangles();
+
+ void initFSAASupport();
+
+ // determines valid stencil formats
+ void initStencilFormats();
+
+ // sets a texture unit to use for texture operations other than binding a texture to a program.
+ // ensures that such operations don't negatively interact with tracking bound textures.
+ void setScratchTextureUnit();
+
+ // bounds is region that may be modified.
+ // nullptr means whole target. Can be an empty rect.
+ void flushRenderTarget(GrGLRenderTarget*, const SkIRect* bounds, bool disableSRGB = false);
+
+ // Need not be called if flushRenderTarget is used.
+ void flushViewport(const GrGLIRect&);
+
+ void flushStencil(const GrStencilSettings&);
+
+ // rt is used only if useHWAA is true.
+ void flushHWAAState(GrRenderTarget* rt, bool useHWAA, bool stencilEnabled);
+
+ void flushMinSampleShading(float minSampleShading);
+
+ void flushFramebufferSRGB(bool enable);
+
+ // helper for onCreateTexture and writeTexturePixels
+ enum UploadType {
+ kNewTexture_UploadType, // we are creating a new texture
+ kWrite_UploadType, // we are using TexSubImage2D to copy data to an existing texture
+ kTransfer_UploadType, // we are using a transfer buffer to copy data
+ };
+ bool uploadTexData(const GrSurfaceDesc& desc,
+ GrGLenum target,
+ UploadType uploadType,
+ int left, int top, int width, int height,
+ GrPixelConfig dataConfig,
+ const SkTArray<GrMipLevel>& texels);
+
+ // helper for onCreateCompressedTexture. If width and height are
+ // set to -1, then this function will use desc.fWidth and desc.fHeight
+ // for the size of the data. The isNewTexture flag should be set to true
+ // whenever a new texture needs to be created. Otherwise, we assume that
+ // the texture is already in GPU memory and that it's going to be updated
+ // with new data.
+ bool uploadCompressedTexData(const GrSurfaceDesc& desc,
+ GrGLenum target,
+ const SkTArray<GrMipLevel>& texels,
+ UploadType uploadType = kNewTexture_UploadType,
+ int left = 0, int top = 0,
+ int width = -1, int height = -1);
+
+ bool createRenderTargetObjects(const GrSurfaceDesc&, const GrGLTextureInfo& texInfo,
+ GrGLRenderTarget::IDDesc*);
+
+ enum TempFBOTarget {
+ kSrc_TempFBOTarget,
+ kDst_TempFBOTarget
+ };
+
+ // Binds a surface as a FBO for a copy operation. If the surface already owns an FBO ID then
+ // that ID is bound. If not the surface is temporarily bound to a FBO and that FBO is bound.
+ // This must be paired with a call to unbindSurfaceFBOForCopy().
+ void bindSurfaceFBOForCopy(GrSurface* surface, GrGLenum fboTarget, GrGLIRect* viewport,
+ TempFBOTarget tempFBOTarget);
+
+ // Must be called if bindSurfaceFBOForCopy was used to bind a surface for copying.
+ void unbindTextureFBOForCopy(GrGLenum fboTarget, GrSurface* surface);
+
+ SkAutoTUnref<GrGLContext> fGLContext;
+
+ bool createCopyProgram(int progIdx);
+ bool createMipmapProgram(int progIdx);
+ bool createWireRectProgram();
+ bool createPLSSetupProgram();
+
+ // GL program-related state
+ ProgramCache* fProgramCache;
+
+ ///////////////////////////////////////////////////////////////////////////
+ ///@name Caching of GL State
+ ///@{
+ int fHWActiveTextureUnitIdx;
+ GrGLuint fHWProgramID;
+
+ enum TriState {
+ kNo_TriState,
+ kYes_TriState,
+ kUnknown_TriState
+ };
+
+ GrGLuint fTempSrcFBOID;
+ GrGLuint fTempDstFBOID;
+
+ GrGLuint fStencilClearFBOID;
+
+ // last scissor / viewport scissor state seen by the GL.
+ struct {
+ TriState fEnabled;
+ GrGLIRect fRect;
+ void invalidate() {
+ fEnabled = kUnknown_TriState;
+ fRect.invalidate();
+ }
+ } fHWScissorSettings;
+
+ class {
+ public:
+ bool valid() const { return kInvalidSurfaceOrigin != fRTOrigin; }
+ void invalidate() { fRTOrigin = kInvalidSurfaceOrigin; }
+ bool knownDisabled() const { return this->valid() && !fWindowState.enabled(); }
+ void setDisabled() {
+ fRTOrigin = kDefault_GrSurfaceOrigin;
+ fWindowState.setDisabled();
+ }
+
+ void set(GrSurfaceOrigin rtOrigin, const GrGLIRect& viewport,
+ const GrWindowRectsState& windowState) {
+ fRTOrigin = rtOrigin;
+ fViewport = viewport;
+ fWindowState = windowState;
+ }
+
+ bool knownEqualTo(GrSurfaceOrigin rtOrigin, const GrGLIRect& viewport,
+ const GrWindowRectsState& windowState) const {
+ if (!this->valid()) {
+ return false;
+ }
+ if (fWindowState.numWindows() && (fRTOrigin != rtOrigin || fViewport != viewport)) {
+ return false;
+ }
+ return fWindowState.cheapEqualTo(windowState);
+ }
+
+ private:
+ enum { kInvalidSurfaceOrigin = -1 };
+
+ int fRTOrigin;
+ GrGLIRect fViewport;
+ GrWindowRectsState fWindowState;
+ } fHWWindowRectsState;
+
+ GrGLIRect fHWViewport;
+
+ /**
+ * Tracks vertex attrib array state.
+ */
+ class HWVertexArrayState {
+ public:
+ HWVertexArrayState() : fCoreProfileVertexArray(nullptr) { this->invalidate(); }
+
+ ~HWVertexArrayState() { delete fCoreProfileVertexArray; }
+
+ void invalidate() {
+ fBoundVertexArrayIDIsValid = false;
+ fDefaultVertexArrayAttribState.invalidate();
+ if (fCoreProfileVertexArray) {
+ fCoreProfileVertexArray->invalidateCachedState();
+ }
+ }
+
+ void notifyVertexArrayDelete(GrGLuint id) {
+ if (fBoundVertexArrayIDIsValid && fBoundVertexArrayID == id) {
+ // Does implicit bind to 0
+ fBoundVertexArrayID = 0;
+ }
+ }
+
+ void setVertexArrayID(GrGLGpu* gpu, GrGLuint arrayID) {
+ if (!gpu->glCaps().vertexArrayObjectSupport()) {
+ SkASSERT(0 == arrayID);
+ return;
+ }
+ if (!fBoundVertexArrayIDIsValid || arrayID != fBoundVertexArrayID) {
+ GR_GL_CALL(gpu->glInterface(), BindVertexArray(arrayID));
+ fBoundVertexArrayIDIsValid = true;
+ fBoundVertexArrayID = arrayID;
+ }
+ }
+
+ /**
+ * Binds the vertex array that should be used for internal draws, and returns its attrib
+ * state. This binds the default VAO (ID=zero) unless we are on a core profile, in which
+ * case we use a dummy array instead.
+ *
+ * If an index buffer is privided, it will be bound to the vertex array. Otherwise the
+ * index buffer binding will be left unchanged.
+ *
+ * The returned GrGLAttribArrayState should be used to set vertex attribute arrays.
+ */
+ GrGLAttribArrayState* bindInternalVertexArray(GrGLGpu*, const GrBuffer* ibuff = nullptr);
+
+ private:
+ GrGLuint fBoundVertexArrayID;
+ bool fBoundVertexArrayIDIsValid;
+
+ // We return a non-const pointer to this from bindArrayAndBuffersToDraw when vertex array 0
+ // is bound. However, this class is internal to GrGLGpu and this object never leaks out of
+ // GrGLGpu.
+ GrGLAttribArrayState fDefaultVertexArrayAttribState;
+
+ // This is used when we're using a core profile.
+ GrGLVertexArray* fCoreProfileVertexArray;
+ } fHWVertexArrayState;
+
+ struct {
+ GrGLenum fGLTarget;
+ uint32_t fBoundBufferUniqueID;
+ bool fBufferZeroKnownBound;
+
+ void invalidate() {
+ fBoundBufferUniqueID = SK_InvalidUniqueID;
+ fBufferZeroKnownBound = false;
+ }
+ } fHWBufferState[kGrBufferTypeCount];
+
+ struct {
+ GrBlendEquation fEquation;
+ GrBlendCoeff fSrcCoeff;
+ GrBlendCoeff fDstCoeff;
+ GrColor fConstColor;
+ bool fConstColorValid;
+ TriState fEnabled;
+
+ void invalidate() {
+ fEquation = static_cast<GrBlendEquation>(-1);
+ fSrcCoeff = static_cast<GrBlendCoeff>(-1);
+ fDstCoeff = static_cast<GrBlendCoeff>(-1);
+ fConstColorValid = false;
+ fEnabled = kUnknown_TriState;
+ }
+ } fHWBlendState;
+
+ TriState fMSAAEnabled;
+
+ GrStencilSettings fHWStencilSettings;
+ TriState fHWStencilTestEnabled;
+
+
+ GrDrawFace fHWDrawFace;
+ TriState fHWWriteToColor;
+ uint32_t fHWBoundRenderTargetUniqueID;
+ TriState fHWSRGBFramebuffer;
+ SkTArray<uint32_t, true> fHWBoundTextureUniqueIDs;
+
+ struct BufferTexture {
+ BufferTexture() : fTextureID(0), fKnownBound(false),
+ fAttachedBufferUniqueID(SK_InvalidUniqueID),
+ fSwizzle(GrSwizzle::RGBA()) {}
+
+ GrGLuint fTextureID;
+ bool fKnownBound;
+ GrPixelConfig fTexelConfig;
+ uint32_t fAttachedBufferUniqueID;
+ GrSwizzle fSwizzle;
+ };
+
+ SkTArray<BufferTexture, true> fHWBufferTextures;
+ int fHWMaxUsedBufferTextureUnit;
+
+ // EXT_raster_multisample.
+ TriState fHWRasterMultisampleEnabled;
+ int fHWNumRasterSamples;
+ ///@}
+
+ /** IDs for copy surface program. */
+ struct {
+ GrGLuint fProgram;
+ GrGLint fTextureUniform;
+ GrGLint fTexCoordXformUniform;
+ GrGLint fPosXformUniform;
+ } fCopyPrograms[3];
+ SkAutoTUnref<GrGLBuffer> fCopyProgramArrayBuffer;
+
+ /** IDs for texture mipmap program. (4 filter configurations) */
+ struct {
+ GrGLuint fProgram;
+ GrGLint fTextureUniform;
+ GrGLint fTexCoordXformUniform;
+ } fMipmapPrograms[4];
+ SkAutoTUnref<GrGLBuffer> fMipmapProgramArrayBuffer;
+
+ struct {
+ GrGLuint fProgram;
+ GrGLint fColorUniform;
+ GrGLint fRectUniform;
+ } fWireRectProgram;
+ SkAutoTUnref<GrGLBuffer> fWireRectArrayBuffer;
+
+ static int TextureTargetToCopyProgramIdx(GrGLenum target) {
+ switch (target) {
+ case GR_GL_TEXTURE_2D:
+ return 0;
+ case GR_GL_TEXTURE_EXTERNAL:
+ return 1;
+ case GR_GL_TEXTURE_RECTANGLE:
+ return 2;
+ default:
+ SkFAIL("Unexpected texture target type.");
+ return 0;
+ }
+ }
+
+ static int TextureSizeToMipmapProgramIdx(int width, int height) {
+ const bool wide = (width > 1) && SkToBool(width & 0x1);
+ const bool tall = (height > 1) && SkToBool(height & 0x1);
+ return (wide ? 0x2 : 0x0) | (tall ? 0x1 : 0x0);
+ }
+
+ struct {
+ GrGLuint fProgram;
+ GrGLint fPosXformUniform;
+ SkAutoTUnref<GrGLBuffer> fArrayBuffer;
+ } fPLSSetupProgram;
+
+ bool fHWPLSEnabled;
+ bool fPLSHasBeenUsed;
+
+ float fHWMinSampleShading;
+
+ typedef GrGpu INHERITED;
+ friend class GrGLPathRendering; // For accessing setTextureUnit.
+ friend class gr_instanced::GLInstancedRendering; // For accessing flushGLState.
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLGpuCommandBuffer.h b/gfx/skia/skia/src/gpu/gl/GrGLGpuCommandBuffer.h
new file mode 100644
index 000000000..4ad2b13ad
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLGpuCommandBuffer.h
@@ -0,0 +1,58 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrGLGpuCommandBuffer_DEFINED
+#define GrGLGpuCommandBuffer_DEFINED
+
+#include "GrGpuCommandBuffer.h"
+
+#include "GrGLGpu.h"
+
+class GrGLGpuCommandBuffer : public GrGpuCommandBuffer {
+/**
+ * We do not actually buffer up draws or do any work in the this class for GL. Instead commands
+ * are immediately sent to the gpu to execute. Thus all the commands in this class are simply
+ * pass through functions to corresponding calls in the GrGLGpu class.
+ */
+public:
+ GrGLGpuCommandBuffer(GrGLGpu* gpu) : fGpu(gpu) {}
+
+ virtual ~GrGLGpuCommandBuffer() {}
+
+ void end() override {}
+
+ void discard(GrRenderTarget* rt) override {}
+
+private:
+ GrGpu* gpu() override { return fGpu; }
+
+ void onSubmit(const SkIRect& bounds) override {}
+
+ void onDraw(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ const GrMesh* mesh,
+ int meshCount) override {
+ fGpu->draw(pipeline, primProc, mesh, meshCount);
+ }
+
+ void onClear(GrRenderTarget* rt, const GrFixedClip& clip, GrColor color) override {
+ fGpu->clear(clip, color, rt);
+ }
+
+ void onClearStencilClip(GrRenderTarget* rt,
+ const GrFixedClip& clip,
+ bool insideStencilMask) override {
+ fGpu->clearStencilClip(clip, insideStencilMask, rt);
+ }
+
+ GrGLGpu* fGpu;
+
+ typedef GrGpuCommandBuffer INHERITED;
+};
+
+#endif
+
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLGpuProgramCache.cpp b/gfx/skia/skia/src/gpu/gl/GrGLGpuProgramCache.cpp
new file mode 100644
index 000000000..260e256db
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLGpuProgramCache.cpp
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLGpu.h"
+
+#include "builders/GrGLProgramBuilder.h"
+#include "GrProcessor.h"
+#include "GrProgramDesc.h"
+#include "GrGLPathRendering.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "SkTSearch.h"
+
+#ifdef PROGRAM_CACHE_STATS
+// Display program cache usage
+static const bool c_DisplayCache{false};
+#endif
+
+typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;
+
+struct GrGLGpu::ProgramCache::Entry {
+
+ Entry() : fProgram(nullptr), fLRUStamp(0) {}
+
+ SkAutoTUnref<GrGLProgram> fProgram;
+ unsigned int fLRUStamp;
+};
+
+struct GrGLGpu::ProgramCache::ProgDescLess {
+ bool operator() (const GrProgramDesc& desc, const Entry* entry) {
+ SkASSERT(entry->fProgram.get());
+ return GrProgramDesc::Less(desc, entry->fProgram->getDesc());
+ }
+
+ bool operator() (const Entry* entry, const GrProgramDesc& desc) {
+ SkASSERT(entry->fProgram.get());
+ return GrProgramDesc::Less(entry->fProgram->getDesc(), desc);
+ }
+};
+
+GrGLGpu::ProgramCache::ProgramCache(GrGLGpu* gpu)
+ : fCount(0)
+ , fCurrLRUStamp(0)
+ , fGpu(gpu)
+#ifdef PROGRAM_CACHE_STATS
+ , fTotalRequests(0)
+ , fCacheMisses(0)
+ , fHashMisses(0)
+#endif
+{
+ for (int i = 0; i < 1 << kHashBits; ++i) {
+ fHashTable[i] = nullptr;
+ }
+}
+
+GrGLGpu::ProgramCache::~ProgramCache() {
+ for (int i = 0; i < fCount; ++i){
+ delete fEntries[i];
+ }
+ // dump stats
+#ifdef PROGRAM_CACHE_STATS
+ if (c_DisplayCache) {
+ SkDebugf("--- Program Cache ---\n");
+ SkDebugf("Total requests: %d\n", fTotalRequests);
+ SkDebugf("Cache misses: %d\n", fCacheMisses);
+ SkDebugf("Cache miss %%: %f\n", (fTotalRequests > 0) ?
+ 100.f * fCacheMisses / fTotalRequests :
+ 0.f);
+ int cacheHits = fTotalRequests - fCacheMisses;
+ SkDebugf("Hash miss %%: %f\n", (cacheHits > 0) ? 100.f * fHashMisses / cacheHits : 0.f);
+ SkDebugf("---------------------\n");
+ }
+#endif
+}
+
+void GrGLGpu::ProgramCache::abandon() {
+ for (int i = 0; i < fCount; ++i) {
+ SkASSERT(fEntries[i]->fProgram.get());
+ fEntries[i]->fProgram->abandon();
+ delete fEntries[i];
+ fEntries[i] = nullptr;
+ }
+ fCount = 0;
+
+ // zero out hash table
+ for (int i = 0; i < 1 << kHashBits; i++) {
+ fHashTable[i] = nullptr;
+ }
+
+ fCurrLRUStamp = 0;
+#ifdef PROGRAM_CACHE_STATS
+ fTotalRequests = 0;
+ fCacheMisses = 0;
+ fHashMisses = 0;
+#endif
+}
+
+int GrGLGpu::ProgramCache::search(const GrProgramDesc& desc) const {
+ ProgDescLess less;
+ return SkTSearch(fEntries, fCount, desc, sizeof(Entry*), less);
+}
+
+GrGLProgram* GrGLGpu::ProgramCache::refProgram(const GrGLGpu* gpu,
+ const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ bool isPoints) {
+#ifdef PROGRAM_CACHE_STATS
+ ++fTotalRequests;
+#endif
+
+ // Get GrGLProgramDesc
+ GrProgramDesc desc;
+ if (!GrProgramDesc::Build(&desc, primProc, isPoints, pipeline, *gpu->glCaps().glslCaps())) {
+ GrCapsDebugf(gpu->caps(), "Failed to gl program descriptor!\n");
+ return nullptr;
+ }
+ desc.finalize();
+
+ Entry* entry = nullptr;
+
+ uint32_t hashIdx = desc.getChecksum();
+ hashIdx ^= hashIdx >> 16;
+ if (kHashBits <= 8) {
+ hashIdx ^= hashIdx >> 8;
+ }
+ hashIdx &=((1 << kHashBits) - 1);
+ Entry* hashedEntry = fHashTable[hashIdx];
+ if (hashedEntry && hashedEntry->fProgram->getDesc() == desc) {
+ SkASSERT(hashedEntry->fProgram);
+ entry = hashedEntry;
+ }
+
+ int entryIdx;
+ if (nullptr == entry) {
+ entryIdx = this->search(desc);
+ if (entryIdx >= 0) {
+ entry = fEntries[entryIdx];
+#ifdef PROGRAM_CACHE_STATS
+ ++fHashMisses;
+#endif
+ }
+ }
+
+ if (nullptr == entry) {
+ // We have a cache miss
+#ifdef PROGRAM_CACHE_STATS
+ ++fCacheMisses;
+#endif
+ GrGLProgram* program = GrGLProgramBuilder::CreateProgram(pipeline, primProc, desc, fGpu);
+ if (nullptr == program) {
+ return nullptr;
+ }
+ int purgeIdx = 0;
+ if (fCount < kMaxEntries) {
+ entry = new Entry;
+ purgeIdx = fCount++;
+ fEntries[purgeIdx] = entry;
+ } else {
+ SkASSERT(fCount == kMaxEntries);
+ purgeIdx = 0;
+ for (int i = 1; i < kMaxEntries; ++i) {
+ if (fEntries[i]->fLRUStamp < fEntries[purgeIdx]->fLRUStamp) {
+ purgeIdx = i;
+ }
+ }
+ entry = fEntries[purgeIdx];
+ int purgedHashIdx = entry->fProgram->getDesc().getChecksum() & ((1 << kHashBits) - 1);
+ if (fHashTable[purgedHashIdx] == entry) {
+ fHashTable[purgedHashIdx] = nullptr;
+ }
+ }
+ SkASSERT(fEntries[purgeIdx] == entry);
+ entry->fProgram.reset(program);
+ // We need to shift fEntries around so that the entry currently at purgeIdx is placed
+ // just before the entry at ~entryIdx (in order to keep fEntries sorted by descriptor).
+ entryIdx = ~entryIdx;
+ if (entryIdx < purgeIdx) {
+ // Let E and P be the entries at index entryIdx and purgeIdx, respectively.
+ // If the entries array looks like this:
+ // aaaaEbbbbbPccccc
+ // we rearrange it to look like this:
+ // aaaaPEbbbbbccccc
+ size_t copySize = (purgeIdx - entryIdx) * sizeof(Entry*);
+ memmove(fEntries + entryIdx + 1, fEntries + entryIdx, copySize);
+ fEntries[entryIdx] = entry;
+ } else if (purgeIdx < entryIdx) {
+ // If the entries array looks like this:
+ // aaaaPbbbbbEccccc
+ // we rearrange it to look like this:
+ // aaaabbbbbPEccccc
+ size_t copySize = (entryIdx - purgeIdx - 1) * sizeof(Entry*);
+ memmove(fEntries + purgeIdx, fEntries + purgeIdx + 1, copySize);
+ fEntries[entryIdx - 1] = entry;
+ }
+#ifdef SK_DEBUG
+ SkASSERT(fEntries[0]->fProgram.get());
+ for (int i = 0; i < fCount - 1; ++i) {
+ SkASSERT(fEntries[i + 1]->fProgram.get());
+ const GrProgramDesc& a = fEntries[i]->fProgram->getDesc();
+ const GrProgramDesc& b = fEntries[i + 1]->fProgram->getDesc();
+ SkASSERT(GrProgramDesc::Less(a, b));
+ SkASSERT(!GrProgramDesc::Less(b, a));
+ }
+#endif
+ }
+
+ fHashTable[hashIdx] = entry;
+ entry->fLRUStamp = fCurrLRUStamp;
+
+ if (SK_MaxU32 == fCurrLRUStamp) {
+ // wrap around! just trash our LRU, one time hit.
+ for (int i = 0; i < fCount; ++i) {
+ fEntries[i]->fLRUStamp = 0;
+ }
+ }
+ ++fCurrLRUStamp;
+ return SkRef(entry->fProgram.get());
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLIRect.h b/gfx/skia/skia/src/gpu/gl/GrGLIRect.h
new file mode 100644
index 000000000..a699ae36b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLIRect.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrGLIRect_DEFINED
+#define GrGLIRect_DEFINED
+
+#include "gl/GrGLInterface.h"
+#include "GrGLUtil.h"
+
+/**
+ * Helper struct for dealing with the fact that Ganesh and GL use different
+ * window coordinate systems (top-down vs bottom-up)
+ */
+struct GrGLIRect {
+ GrGLint fLeft;
+ GrGLint fBottom;
+ GrGLsizei fWidth;
+ GrGLsizei fHeight;
+
+ /**
+ * cast-safe way to treat the rect as an array of (4) ints.
+ */
+ const int* asInts() const {
+ return &fLeft;
+
+ GR_STATIC_ASSERT(0 == offsetof(GrGLIRect, fLeft));
+ GR_STATIC_ASSERT(4 == offsetof(GrGLIRect, fBottom));
+ GR_STATIC_ASSERT(8 == offsetof(GrGLIRect, fWidth));
+ GR_STATIC_ASSERT(12 == offsetof(GrGLIRect, fHeight));
+ GR_STATIC_ASSERT(16 == sizeof(GrGLIRect)); // For an array of GrGLIRect.
+ }
+ int* asInts() { return &fLeft; }
+
+ void pushToGLViewport(const GrGLInterface* gl) const {
+ GR_GL_CALL(gl, Viewport(fLeft, fBottom, fWidth, fHeight));
+ }
+
+ void pushToGLScissor(const GrGLInterface* gl) const {
+ GR_GL_CALL(gl, Scissor(fLeft, fBottom, fWidth, fHeight));
+ }
+
+ void setFromGLViewport(const GrGLInterface* gl) {
+ GR_STATIC_ASSERT(sizeof(GrGLIRect) == 4*sizeof(GrGLint));
+ GR_GL_GetIntegerv(gl, GR_GL_VIEWPORT, (GrGLint*) this);
+ }
+
+ // sometimes we have a SkIRect from the client that we
+ // want to simultaneously make relative to GL's viewport
+ // and (optionally) convert from top-down to bottom-up.
+ void setRelativeTo(const GrGLIRect& glViewport, const SkIRect& devRect, GrSurfaceOrigin org) {
+ this->setRelativeTo(glViewport, devRect.x(), devRect.y(), devRect.width(), devRect.height(),
+ org);
+ }
+
+ void setRelativeTo(const GrGLIRect& glRect,
+ int leftOffset,
+ int topOffset,
+ int width,
+ int height,
+ GrSurfaceOrigin origin) {
+ fLeft = glRect.fLeft + leftOffset;
+ fWidth = width;
+ if (kBottomLeft_GrSurfaceOrigin == origin) {
+ fBottom = glRect.fBottom + (glRect.fHeight - topOffset - height);
+ } else {
+ fBottom = glRect.fBottom + topOffset;
+ }
+ fHeight = height;
+
+ SkASSERT(fWidth >= 0);
+ SkASSERT(fHeight >= 0);
+ }
+
+ bool contains(const GrGLIRect& glRect) const {
+ return fLeft <= glRect.fLeft &&
+ fBottom <= glRect.fBottom &&
+ fLeft + fWidth >= glRect.fLeft + glRect.fWidth &&
+ fBottom + fHeight >= glRect.fBottom + glRect.fHeight;
+ }
+
+ void invalidate() {fLeft = fWidth = fBottom = fHeight = -1;}
+
+ bool operator ==(const GrGLIRect& glRect) const {
+ return 0 == memcmp(this, &glRect, sizeof(GrGLIRect));
+ }
+
+ bool operator !=(const GrGLIRect& glRect) const {return !(*this == glRect);}
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLInterface.cpp b/gfx/skia/skia/src/gpu/gl/GrGLInterface.cpp
new file mode 100644
index 000000000..0a157dda1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLInterface.cpp
@@ -0,0 +1,825 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "gl/GrGLInterface.h"
+#include "gl/GrGLExtensions.h"
+#include "gl/GrGLUtil.h"
+
+#include <stdio.h>
+
+const GrGLInterface* GrGLInterfaceAddTestDebugMarker(const GrGLInterface* interface,
+ GrGLInsertEventMarkerProc insertEventMarkerFn,
+ GrGLPushGroupMarkerProc pushGroupMarkerFn,
+ GrGLPopGroupMarkerProc popGroupMarkerFn) {
+ GrGLInterface* newInterface = GrGLInterface::NewClone(interface);
+
+ if (!newInterface->fExtensions.has("GL_EXT_debug_marker")) {
+ newInterface->fExtensions.add("GL_EXT_debug_marker");
+ }
+
+ newInterface->fFunctions.fInsertEventMarker = insertEventMarkerFn;
+ newInterface->fFunctions.fPushGroupMarker = pushGroupMarkerFn;
+ newInterface->fFunctions.fPopGroupMarker = popGroupMarkerFn;
+
+ return newInterface;
+}
+
+const GrGLInterface* GrGLInterfaceRemoveNVPR(const GrGLInterface* interface) {
+ GrGLInterface* newInterface = GrGLInterface::NewClone(interface);
+
+ newInterface->fExtensions.remove("GL_NV_path_rendering");
+ newInterface->fExtensions.remove("GL_CHROMIUM_path_rendering");
+ newInterface->fFunctions.fMatrixLoadf = nullptr;
+ newInterface->fFunctions.fMatrixLoadIdentity = nullptr;
+ newInterface->fFunctions.fPathCommands = nullptr;
+ newInterface->fFunctions.fPathParameteri = nullptr;
+ newInterface->fFunctions.fPathParameterf = nullptr;
+ newInterface->fFunctions.fGenPaths = nullptr;
+ newInterface->fFunctions.fDeletePaths = nullptr;
+ newInterface->fFunctions.fIsPath = nullptr;
+ newInterface->fFunctions.fPathStencilFunc = nullptr;
+ newInterface->fFunctions.fStencilFillPath = nullptr;
+ newInterface->fFunctions.fStencilStrokePath = nullptr;
+ newInterface->fFunctions.fStencilFillPathInstanced = nullptr;
+ newInterface->fFunctions.fStencilStrokePathInstanced = nullptr;
+ newInterface->fFunctions.fCoverFillPath = nullptr;
+ newInterface->fFunctions.fCoverStrokePath = nullptr;
+ newInterface->fFunctions.fCoverFillPathInstanced = nullptr;
+ newInterface->fFunctions.fCoverStrokePathInstanced = nullptr;
+ newInterface->fFunctions.fStencilThenCoverFillPath = nullptr;
+ newInterface->fFunctions.fStencilThenCoverStrokePath = nullptr;
+ newInterface->fFunctions.fStencilThenCoverFillPathInstanced = nullptr;
+ newInterface->fFunctions.fStencilThenCoverStrokePathInstanced = nullptr;
+ newInterface->fFunctions.fProgramPathFragmentInputGen = nullptr;
+ newInterface->fFunctions.fBindFragmentInputLocation = nullptr;
+ return newInterface;
+}
+
+GrGLInterface::GrGLInterface() {
+ fStandard = kNone_GrGLStandard;
+}
+
+GrGLInterface* GrGLInterface::NewClone(const GrGLInterface* interface) {
+ SkASSERT(interface);
+
+ GrGLInterface* clone = new GrGLInterface;
+ clone->fStandard = interface->fStandard;
+ clone->fExtensions = interface->fExtensions;
+ clone->fFunctions = interface->fFunctions;
+ return clone;
+}
+
+#ifdef SK_DEBUG
+ static int kIsDebug = 1;
+#else
+ static int kIsDebug = 0;
+#endif
+
+#define RETURN_FALSE_INTERFACE \
+ if (kIsDebug) { SkDebugf("%s:%d GrGLInterface::validate() failed.\n", __FILE__, __LINE__); } \
+ return false;
+
+bool GrGLInterface::validate() const {
+
+ if (kNone_GrGLStandard == fStandard) {
+ RETURN_FALSE_INTERFACE
+ }
+
+ if (!fExtensions.isInitialized()) {
+ RETURN_FALSE_INTERFACE
+ }
+
+ // functions that are always required
+ if (nullptr == fFunctions.fActiveTexture ||
+ nullptr == fFunctions.fAttachShader ||
+ nullptr == fFunctions.fBindAttribLocation ||
+ nullptr == fFunctions.fBindBuffer ||
+ nullptr == fFunctions.fBindTexture ||
+ nullptr == fFunctions.fBlendColor || // -> GL >= 1.4 or extension, ES >= 2.0
+ nullptr == fFunctions.fBlendEquation || // -> GL >= 1.4 or extension, ES >= 2.0
+ nullptr == fFunctions.fBlendFunc ||
+ nullptr == fFunctions.fBufferData ||
+ nullptr == fFunctions.fBufferSubData ||
+ nullptr == fFunctions.fClear ||
+ nullptr == fFunctions.fClearColor ||
+ nullptr == fFunctions.fClearStencil ||
+ nullptr == fFunctions.fColorMask ||
+ nullptr == fFunctions.fCompileShader ||
+ nullptr == fFunctions.fCopyTexSubImage2D ||
+ nullptr == fFunctions.fCreateProgram ||
+ nullptr == fFunctions.fCreateShader ||
+ nullptr == fFunctions.fCullFace ||
+ nullptr == fFunctions.fDeleteBuffers ||
+ nullptr == fFunctions.fDeleteProgram ||
+ nullptr == fFunctions.fDeleteShader ||
+ nullptr == fFunctions.fDeleteTextures ||
+ nullptr == fFunctions.fDepthMask ||
+ nullptr == fFunctions.fDisable ||
+ nullptr == fFunctions.fDisableVertexAttribArray ||
+ nullptr == fFunctions.fDrawArrays ||
+ nullptr == fFunctions.fDrawElements ||
+ nullptr == fFunctions.fEnable ||
+ nullptr == fFunctions.fEnableVertexAttribArray ||
+ nullptr == fFunctions.fFrontFace ||
+ nullptr == fFunctions.fGenBuffers ||
+ nullptr == fFunctions.fGenTextures ||
+ nullptr == fFunctions.fGetBufferParameteriv ||
+ nullptr == fFunctions.fGenerateMipmap ||
+ nullptr == fFunctions.fGetError ||
+ nullptr == fFunctions.fGetIntegerv ||
+ nullptr == fFunctions.fGetProgramInfoLog ||
+ nullptr == fFunctions.fGetProgramiv ||
+ nullptr == fFunctions.fGetShaderInfoLog ||
+ nullptr == fFunctions.fGetShaderiv ||
+ nullptr == fFunctions.fGetString ||
+ nullptr == fFunctions.fGetUniformLocation ||
+#if 0 // Not included in Chrome yet
+ nullptr == fFunctions.fIsTexture ||
+#endif
+ nullptr == fFunctions.fLinkProgram ||
+ nullptr == fFunctions.fLineWidth ||
+ nullptr == fFunctions.fPixelStorei ||
+ nullptr == fFunctions.fReadPixels ||
+ nullptr == fFunctions.fScissor ||
+ nullptr == fFunctions.fShaderSource ||
+ nullptr == fFunctions.fStencilFunc ||
+ nullptr == fFunctions.fStencilMask ||
+ nullptr == fFunctions.fStencilOp ||
+ nullptr == fFunctions.fTexImage2D ||
+ nullptr == fFunctions.fTexParameteri ||
+ nullptr == fFunctions.fTexParameteriv ||
+ nullptr == fFunctions.fTexSubImage2D ||
+ nullptr == fFunctions.fUniform1f ||
+ nullptr == fFunctions.fUniform1i ||
+ nullptr == fFunctions.fUniform1fv ||
+ nullptr == fFunctions.fUniform1iv ||
+ nullptr == fFunctions.fUniform2f ||
+ nullptr == fFunctions.fUniform2i ||
+ nullptr == fFunctions.fUniform2fv ||
+ nullptr == fFunctions.fUniform2iv ||
+ nullptr == fFunctions.fUniform3f ||
+ nullptr == fFunctions.fUniform3i ||
+ nullptr == fFunctions.fUniform3fv ||
+ nullptr == fFunctions.fUniform3iv ||
+ nullptr == fFunctions.fUniform4f ||
+ nullptr == fFunctions.fUniform4i ||
+ nullptr == fFunctions.fUniform4fv ||
+ nullptr == fFunctions.fUniform4iv ||
+ nullptr == fFunctions.fUniformMatrix2fv ||
+ nullptr == fFunctions.fUniformMatrix3fv ||
+ nullptr == fFunctions.fUniformMatrix4fv ||
+ nullptr == fFunctions.fUseProgram ||
+ nullptr == fFunctions.fVertexAttrib1f ||
+ nullptr == fFunctions.fVertexAttrib2fv ||
+ nullptr == fFunctions.fVertexAttrib3fv ||
+ nullptr == fFunctions.fVertexAttrib4fv ||
+ nullptr == fFunctions.fVertexAttribPointer ||
+ nullptr == fFunctions.fViewport ||
+ nullptr == fFunctions.fBindFramebuffer ||
+ nullptr == fFunctions.fBindRenderbuffer ||
+ nullptr == fFunctions.fCheckFramebufferStatus ||
+ nullptr == fFunctions.fDeleteFramebuffers ||
+ nullptr == fFunctions.fDeleteRenderbuffers ||
+ nullptr == fFunctions.fFinish ||
+ nullptr == fFunctions.fFlush ||
+ nullptr == fFunctions.fFramebufferRenderbuffer ||
+ nullptr == fFunctions.fFramebufferTexture2D ||
+ nullptr == fFunctions.fGetFramebufferAttachmentParameteriv ||
+ nullptr == fFunctions.fGetRenderbufferParameteriv ||
+ nullptr == fFunctions.fGenFramebuffers ||
+ nullptr == fFunctions.fGenRenderbuffers ||
+ nullptr == fFunctions.fRenderbufferStorage) {
+ RETURN_FALSE_INTERFACE
+ }
+
+ GrGLVersion glVer = GrGLGetVersion(this);
+ if (GR_GL_INVALID_VER == glVer) {
+ RETURN_FALSE_INTERFACE
+ }
+
+ // Now check that baseline ES/Desktop fns not covered above are present
+ // and that we have fn pointers for any advertised fExtensions that we will
+ // try to use.
+
+ // these functions are part of ES2, we assume they are available
+ // On the desktop we assume they are available if the extension
+ // is present or GL version is high enough.
+ if (kGLES_GrGLStandard == fStandard) {
+ if (nullptr == fFunctions.fStencilFuncSeparate ||
+ nullptr == fFunctions.fStencilMaskSeparate ||
+ nullptr == fFunctions.fStencilOpSeparate) {
+ RETURN_FALSE_INTERFACE
+ }
+ } else if (kGL_GrGLStandard == fStandard) {
+
+ if (glVer >= GR_GL_VER(2,0)) {
+ if (nullptr == fFunctions.fStencilFuncSeparate ||
+ nullptr == fFunctions.fStencilMaskSeparate ||
+ nullptr == fFunctions.fStencilOpSeparate) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ if (glVer >= GR_GL_VER(3,0) && nullptr == fFunctions.fBindFragDataLocation) {
+ RETURN_FALSE_INTERFACE
+ }
+ if (glVer >= GR_GL_VER(2,0) || fExtensions.has("GL_ARB_draw_buffers")) {
+ if (nullptr == fFunctions.fDrawBuffers) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ if (glVer >= GR_GL_VER(1,5) || fExtensions.has("GL_ARB_occlusion_query")) {
+ if (nullptr == fFunctions.fGenQueries ||
+ nullptr == fFunctions.fDeleteQueries ||
+ nullptr == fFunctions.fBeginQuery ||
+ nullptr == fFunctions.fEndQuery ||
+ nullptr == fFunctions.fGetQueryiv ||
+ nullptr == fFunctions.fGetQueryObjectiv ||
+ nullptr == fFunctions.fGetQueryObjectuiv) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ if (glVer >= GR_GL_VER(3,3) ||
+ fExtensions.has("GL_ARB_timer_query") ||
+ fExtensions.has("GL_EXT_timer_query")) {
+ if (nullptr == fFunctions.fGetQueryObjecti64v ||
+ nullptr == fFunctions.fGetQueryObjectui64v) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ if (glVer >= GR_GL_VER(3,3) || fExtensions.has("GL_ARB_timer_query")) {
+ if (nullptr == fFunctions.fQueryCounter) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ }
+
+ // optional function on desktop before 1.3
+ if (kGL_GrGLStandard != fStandard ||
+ (glVer >= GR_GL_VER(1,3)) ||
+ fExtensions.has("GL_ARB_texture_compression")) {
+ if (nullptr == fFunctions.fCompressedTexImage2D
+#if 0
+ || nullptr == fFunctions.fCompressedTexSubImage2D
+#endif
+ ) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ // part of desktop GL, but not ES
+ if (kGL_GrGLStandard == fStandard &&
+ (nullptr == fFunctions.fGetTexLevelParameteriv ||
+ nullptr == fFunctions.fDrawBuffer ||
+ nullptr == fFunctions.fReadBuffer)) {
+ RETURN_FALSE_INTERFACE
+ }
+
+ // GL_EXT_texture_storage is part of desktop 4.2
+ // There is a desktop ARB extension and an ES+desktop EXT extension
+ if (kGL_GrGLStandard == fStandard) {
+ if (glVer >= GR_GL_VER(4,2) ||
+ fExtensions.has("GL_ARB_texture_storage") ||
+ fExtensions.has("GL_EXT_texture_storage")) {
+ if (nullptr == fFunctions.fTexStorage2D) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ } else if (glVer >= GR_GL_VER(3,0) || fExtensions.has("GL_EXT_texture_storage")) {
+ if (nullptr == fFunctions.fTexStorage2D) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ // glTextureBarrier is part of desktop 4.5. There are also ARB and NV extensions.
+ if (kGL_GrGLStandard == fStandard) {
+ if (glVer >= GR_GL_VER(4,5) ||
+ fExtensions.has("GL_ARB_texture_barrier") ||
+ fExtensions.has("GL_NV_texture_barrier")) {
+ if (nullptr == fFunctions.fTextureBarrier) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ } else if (fExtensions.has("GL_NV_texture_barrier")) {
+ if (nullptr == fFunctions.fTextureBarrier) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ if (fExtensions.has("GL_KHR_blend_equation_advanced") ||
+ fExtensions.has("GL_NV_blend_equation_advanced")) {
+ if (nullptr == fFunctions.fBlendBarrier) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ if (fExtensions.has("GL_EXT_discard_framebuffer")) {
+ if (nullptr == fFunctions.fDiscardFramebuffer) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ // FBO MSAA
+ if (kGL_GrGLStandard == fStandard) {
+ // GL 3.0 and the ARB extension have multisample + blit
+ if (glVer >= GR_GL_VER(3,0) || fExtensions.has("GL_ARB_framebuffer_object")) {
+ if (nullptr == fFunctions.fRenderbufferStorageMultisample ||
+ nullptr == fFunctions.fBlitFramebuffer) {
+ RETURN_FALSE_INTERFACE
+ }
+ } else {
+ if (fExtensions.has("GL_EXT_framebuffer_blit") &&
+ nullptr == fFunctions.fBlitFramebuffer) {
+ RETURN_FALSE_INTERFACE
+ }
+ if (fExtensions.has("GL_EXT_framebuffer_multisample") &&
+ nullptr == fFunctions.fRenderbufferStorageMultisample) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ } else {
+ if (glVer >= GR_GL_VER(3,0) || fExtensions.has("GL_CHROMIUM_framebuffer_multisample")) {
+ if (nullptr == fFunctions.fRenderbufferStorageMultisample ||
+ nullptr == fFunctions.fBlitFramebuffer) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ if (fExtensions.has("GL_APPLE_framebuffer_multisample")) {
+ if (nullptr == fFunctions.fRenderbufferStorageMultisampleES2APPLE ||
+ nullptr == fFunctions.fResolveMultisampleFramebuffer) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ if (fExtensions.has("GL_IMG_multisampled_render_to_texture") ||
+ fExtensions.has("GL_EXT_multisampled_render_to_texture")) {
+ if (nullptr == fFunctions.fRenderbufferStorageMultisampleES2EXT ||
+ nullptr == fFunctions.fFramebufferTexture2DMultisample) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ }
+
+ // On ES buffer mapping is an extension. On Desktop
+ // buffer mapping was part of original VBO extension
+ // which we require.
+ if (kGL_GrGLStandard == fStandard || fExtensions.has("GL_OES_mapbuffer")) {
+ if (nullptr == fFunctions.fMapBuffer ||
+ nullptr == fFunctions.fUnmapBuffer) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ // Dual source blending
+ if (kGL_GrGLStandard == fStandard) {
+ if (glVer >= GR_GL_VER(3,3) || fExtensions.has("GL_ARB_blend_func_extended")) {
+ if (nullptr == fFunctions.fBindFragDataLocationIndexed) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ } else {
+ if (glVer >= GR_GL_VER(3,0) && fExtensions.has("GL_EXT_blend_func_extended")) {
+ if (nullptr == fFunctions.fBindFragDataLocation ||
+ nullptr == fFunctions.fBindFragDataLocationIndexed) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ }
+
+
+ // glGetStringi was added in version 3.0 of both desktop and ES.
+ if (glVer >= GR_GL_VER(3, 0)) {
+ if (nullptr == fFunctions.fGetStringi) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ // glVertexAttribIPointer was added in version 3.0 of both desktop and ES.
+ if (glVer >= GR_GL_VER(3, 0)) {
+ if (NULL == fFunctions.fVertexAttribIPointer) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ if (kGL_GrGLStandard == fStandard) {
+ if (glVer >= GR_GL_VER(3,1)) {
+ if (nullptr == fFunctions.fTexBuffer) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+ if (glVer >= GR_GL_VER(4,3)) {
+ if (nullptr == fFunctions.fTexBufferRange) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+ } else {
+ if (glVer >= GR_GL_VER(3,2) || fExtensions.has("GL_OES_texture_buffer") ||
+ fExtensions.has("GL_EXT_texture_buffer")) {
+ if (nullptr == fFunctions.fTexBuffer ||
+ nullptr == fFunctions.fTexBufferRange) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+ }
+
+ if (kGL_GrGLStandard == fStandard) {
+ if (glVer >= GR_GL_VER(3, 0) || fExtensions.has("GL_ARB_vertex_array_object")) {
+ if (nullptr == fFunctions.fBindVertexArray ||
+ nullptr == fFunctions.fDeleteVertexArrays ||
+ nullptr == fFunctions.fGenVertexArrays) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ } else {
+ if (glVer >= GR_GL_VER(3,0) || fExtensions.has("GL_OES_vertex_array_object")) {
+ if (nullptr == fFunctions.fBindVertexArray ||
+ nullptr == fFunctions.fDeleteVertexArrays ||
+ nullptr == fFunctions.fGenVertexArrays) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ }
+
+ if (fExtensions.has("GL_EXT_debug_marker")) {
+ if (nullptr == fFunctions.fInsertEventMarker ||
+ nullptr == fFunctions.fPushGroupMarker ||
+ nullptr == fFunctions.fPopGroupMarker) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ if ((kGL_GrGLStandard == fStandard && glVer >= GR_GL_VER(4,3)) ||
+ fExtensions.has("GL_ARB_invalidate_subdata")) {
+ if (nullptr == fFunctions.fInvalidateBufferData ||
+ nullptr == fFunctions.fInvalidateBufferSubData ||
+ nullptr == fFunctions.fInvalidateFramebuffer ||
+ nullptr == fFunctions.fInvalidateSubFramebuffer ||
+ nullptr == fFunctions.fInvalidateTexImage ||
+ nullptr == fFunctions.fInvalidateTexSubImage) {
+ RETURN_FALSE_INTERFACE;
+ }
+ } else if (kGLES_GrGLStandard == fStandard && glVer >= GR_GL_VER(3,0)) {
+ // ES 3.0 adds the framebuffer functions but not the others.
+ if (nullptr == fFunctions.fInvalidateFramebuffer ||
+ nullptr == fFunctions.fInvalidateSubFramebuffer) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if (kGLES_GrGLStandard == fStandard && fExtensions.has("GL_CHROMIUM_map_sub")) {
+ if (nullptr == fFunctions.fMapBufferSubData ||
+ nullptr == fFunctions.fMapTexSubImage2D ||
+ nullptr == fFunctions.fUnmapBufferSubData ||
+ nullptr == fFunctions.fUnmapTexSubImage2D) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ // These functions are added to the 3.0 version of both GLES and GL.
+ if (glVer >= GR_GL_VER(3,0) ||
+ (kGLES_GrGLStandard == fStandard && fExtensions.has("GL_EXT_map_buffer_range")) ||
+ (kGL_GrGLStandard == fStandard && fExtensions.has("GL_ARB_map_buffer_range"))) {
+ if (nullptr == fFunctions.fMapBufferRange ||
+ nullptr == fFunctions.fFlushMappedBufferRange) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ if ((kGL_GrGLStandard == fStandard &&
+ (glVer >= GR_GL_VER(3,2) || fExtensions.has("GL_ARB_texture_multisample"))) ||
+ (kGLES_GrGLStandard == fStandard && glVer >= GR_GL_VER(3,1))) {
+ if (NULL == fFunctions.fGetMultisamplefv) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ if ((kGL_GrGLStandard == fStandard &&
+ (glVer >= GR_GL_VER(4,3) || fExtensions.has("GL_ARB_program_interface_query"))) ||
+ (kGLES_GrGLStandard == fStandard && glVer >= GR_GL_VER(3,1))) {
+ if (nullptr == fFunctions.fGetProgramResourceLocation) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ if (kGLES_GrGLStandard == fStandard || glVer >= GR_GL_VER(4,1) ||
+ fExtensions.has("GL_ARB_ES2_compatibility")) {
+ if (nullptr == fFunctions.fGetShaderPrecisionFormat) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ if (fExtensions.has("GL_NV_path_rendering") || fExtensions.has("GL_CHROMIUM_path_rendering")) {
+ if (nullptr == fFunctions.fMatrixLoadf ||
+ nullptr == fFunctions.fMatrixLoadIdentity ||
+ nullptr == fFunctions.fPathCommands ||
+ nullptr == fFunctions.fPathParameteri ||
+ nullptr == fFunctions.fPathParameterf ||
+ nullptr == fFunctions.fGenPaths ||
+ nullptr == fFunctions.fDeletePaths ||
+ nullptr == fFunctions.fIsPath ||
+ nullptr == fFunctions.fPathStencilFunc ||
+ nullptr == fFunctions.fStencilFillPath ||
+ nullptr == fFunctions.fStencilStrokePath ||
+ nullptr == fFunctions.fStencilFillPathInstanced ||
+ nullptr == fFunctions.fStencilStrokePathInstanced ||
+ nullptr == fFunctions.fCoverFillPath ||
+ nullptr == fFunctions.fCoverStrokePath ||
+ nullptr == fFunctions.fCoverFillPathInstanced ||
+ nullptr == fFunctions.fCoverStrokePathInstanced
+#if 0
+ // List of functions that Skia uses, but which have been added since the initial release
+ // of NV_path_rendering driver. We do not want to fail interface validation due to
+ // missing features, we will just not use the extension.
+ // Update this list -> update GrGLCaps::hasPathRenderingSupport too.
+ || nullptr == fFunctions.fStencilThenCoverFillPath ||
+ nullptr == fFunctions.fStencilThenCoverStrokePath ||
+ nullptr == fFunctions.fStencilThenCoverFillPathInstanced ||
+ nullptr == fFunctions.fStencilThenCoverStrokePathInstanced ||
+ nullptr == fFunctions.fProgramPathFragmentInputGen
+#endif
+ ) {
+ RETURN_FALSE_INTERFACE
+ }
+ if (fExtensions.has("GL_CHROMIUM_path_rendering")) {
+ if (nullptr == fFunctions.fBindFragmentInputLocation) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ }
+
+ if (fExtensions.has("GL_EXT_raster_multisample")) {
+ if (nullptr == fFunctions.fRasterSamples) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ if (fExtensions.has("GL_NV_framebuffer_mixed_samples") ||
+ fExtensions.has("GL_CHROMIUM_framebuffer_mixed_samples")) {
+ if (nullptr == fFunctions.fCoverageModulation) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ if (kGL_GrGLStandard == fStandard) {
+ if (glVer >= GR_GL_VER(3,1) ||
+ fExtensions.has("GL_EXT_draw_instanced") || fExtensions.has("GL_ARB_draw_instanced")) {
+ if (nullptr == fFunctions.fDrawArraysInstanced ||
+ nullptr == fFunctions.fDrawElementsInstanced) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ } else if (kGLES_GrGLStandard == fStandard) {
+ if (glVer >= GR_GL_VER(3,0) || fExtensions.has("GL_EXT_draw_instanced")) {
+ if (nullptr == fFunctions.fDrawArraysInstanced ||
+ nullptr == fFunctions.fDrawElementsInstanced) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ }
+
+ if (kGL_GrGLStandard == fStandard) {
+ if (glVer >= GR_GL_VER(3,2) || fExtensions.has("GL_ARB_instanced_arrays")) {
+ if (nullptr == fFunctions.fVertexAttribDivisor) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ } else if (kGLES_GrGLStandard == fStandard) {
+ if (glVer >= GR_GL_VER(3,0) || fExtensions.has("GL_EXT_instanced_arrays")) {
+ if (nullptr == fFunctions.fVertexAttribDivisor) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ }
+
+ if ((kGL_GrGLStandard == fStandard &&
+ (glVer >= GR_GL_VER(4,0) || fExtensions.has("GL_ARB_draw_indirect"))) ||
+ (kGLES_GrGLStandard == fStandard && glVer >= GR_GL_VER(3,1))) {
+ if (NULL == fFunctions.fDrawArraysIndirect ||
+ NULL == fFunctions.fDrawElementsIndirect) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ if ((kGL_GrGLStandard == fStandard &&
+ (glVer >= GR_GL_VER(4,3) || fExtensions.has("GL_ARB_multi_draw_indirect"))) ||
+ (kGLES_GrGLStandard == fStandard && fExtensions.has("GL_EXT_multi_draw_indirect"))) {
+ if (NULL == fFunctions.fMultiDrawArraysIndirect ||
+ NULL == fFunctions.fMultiDrawElementsIndirect) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ if (fExtensions.has("GL_NV_bindless_texture")) {
+ if (nullptr == fFunctions.fGetTextureHandle ||
+ nullptr == fFunctions.fGetTextureSamplerHandle ||
+ nullptr == fFunctions.fMakeTextureHandleResident ||
+ nullptr == fFunctions.fMakeTextureHandleNonResident ||
+ nullptr == fFunctions.fGetImageHandle ||
+ nullptr == fFunctions.fMakeImageHandleResident ||
+ nullptr == fFunctions.fMakeImageHandleNonResident ||
+ nullptr == fFunctions.fIsTextureHandleResident ||
+ nullptr == fFunctions.fIsImageHandleResident ||
+ nullptr == fFunctions.fUniformHandleui64 ||
+ nullptr == fFunctions.fUniformHandleui64v ||
+ nullptr == fFunctions.fProgramUniformHandleui64 ||
+ nullptr == fFunctions.fProgramUniformHandleui64v) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ if (kGL_GrGLStandard == fStandard && fExtensions.has("GL_EXT_direct_state_access")) {
+ if (nullptr == fFunctions.fTextureParameteri ||
+ nullptr == fFunctions.fTextureParameteriv ||
+ nullptr == fFunctions.fTextureParameterf ||
+ nullptr == fFunctions.fTextureParameterfv ||
+ nullptr == fFunctions.fTextureImage1D ||
+ nullptr == fFunctions.fTextureImage2D ||
+ nullptr == fFunctions.fTextureSubImage1D ||
+ nullptr == fFunctions.fTextureSubImage2D ||
+ nullptr == fFunctions.fCopyTextureImage1D ||
+ nullptr == fFunctions.fCopyTextureImage2D ||
+ nullptr == fFunctions.fCopyTextureSubImage1D ||
+ nullptr == fFunctions.fCopyTextureSubImage2D ||
+ nullptr == fFunctions.fGetTextureImage ||
+ nullptr == fFunctions.fGetTextureParameterfv ||
+ nullptr == fFunctions.fGetTextureParameteriv ||
+ nullptr == fFunctions.fGetTextureLevelParameterfv ||
+ nullptr == fFunctions.fGetTextureLevelParameteriv) {
+ RETURN_FALSE_INTERFACE
+ }
+ if (glVer >= GR_GL_VER(1,2)) {
+ if (nullptr == fFunctions.fTextureImage3D ||
+ nullptr == fFunctions.fTextureSubImage3D ||
+ nullptr == fFunctions.fCopyTextureSubImage3D ||
+ nullptr == fFunctions.fCompressedTextureImage3D ||
+ nullptr == fFunctions.fCompressedTextureImage2D ||
+ nullptr == fFunctions.fCompressedTextureImage1D ||
+ nullptr == fFunctions.fCompressedTextureSubImage3D ||
+ nullptr == fFunctions.fCompressedTextureSubImage2D ||
+ nullptr == fFunctions.fCompressedTextureSubImage1D ||
+ nullptr == fFunctions.fGetCompressedTextureImage) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ if (glVer >= GR_GL_VER(1,5)) {
+ if (nullptr == fFunctions.fNamedBufferData ||
+ nullptr == fFunctions.fNamedBufferSubData ||
+ nullptr == fFunctions.fMapNamedBuffer ||
+ nullptr == fFunctions.fUnmapNamedBuffer ||
+ nullptr == fFunctions.fGetNamedBufferParameteriv ||
+ nullptr == fFunctions.fGetNamedBufferPointerv ||
+ nullptr == fFunctions.fGetNamedBufferSubData) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ if (glVer >= GR_GL_VER(2,0)) {
+ if (nullptr == fFunctions.fProgramUniform1f ||
+ nullptr == fFunctions.fProgramUniform2f ||
+ nullptr == fFunctions.fProgramUniform3f ||
+ nullptr == fFunctions.fProgramUniform4f ||
+ nullptr == fFunctions.fProgramUniform1i ||
+ nullptr == fFunctions.fProgramUniform2i ||
+ nullptr == fFunctions.fProgramUniform3i ||
+ nullptr == fFunctions.fProgramUniform4i ||
+ nullptr == fFunctions.fProgramUniform1fv ||
+ nullptr == fFunctions.fProgramUniform2fv ||
+ nullptr == fFunctions.fProgramUniform3fv ||
+ nullptr == fFunctions.fProgramUniform4fv ||
+ nullptr == fFunctions.fProgramUniform1iv ||
+ nullptr == fFunctions.fProgramUniform2iv ||
+ nullptr == fFunctions.fProgramUniform3iv ||
+ nullptr == fFunctions.fProgramUniform4iv ||
+ nullptr == fFunctions.fProgramUniformMatrix2fv ||
+ nullptr == fFunctions.fProgramUniformMatrix3fv ||
+ nullptr == fFunctions.fProgramUniformMatrix4fv) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ if (glVer >= GR_GL_VER(2,1)) {
+ if (nullptr == fFunctions.fProgramUniformMatrix2x3fv ||
+ nullptr == fFunctions.fProgramUniformMatrix3x2fv ||
+ nullptr == fFunctions.fProgramUniformMatrix2x4fv ||
+ nullptr == fFunctions.fProgramUniformMatrix4x2fv ||
+ nullptr == fFunctions.fProgramUniformMatrix3x4fv ||
+ nullptr == fFunctions.fProgramUniformMatrix4x3fv) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ if (glVer >= GR_GL_VER(3,0)) {
+ if (nullptr == fFunctions.fNamedRenderbufferStorage ||
+ nullptr == fFunctions.fGetNamedRenderbufferParameteriv ||
+ nullptr == fFunctions.fNamedRenderbufferStorageMultisample ||
+ nullptr == fFunctions.fCheckNamedFramebufferStatus ||
+ nullptr == fFunctions.fNamedFramebufferTexture1D ||
+ nullptr == fFunctions.fNamedFramebufferTexture2D ||
+ nullptr == fFunctions.fNamedFramebufferTexture3D ||
+ nullptr == fFunctions.fNamedFramebufferRenderbuffer ||
+ nullptr == fFunctions.fGetNamedFramebufferAttachmentParameteriv ||
+ nullptr == fFunctions.fGenerateTextureMipmap ||
+ nullptr == fFunctions.fFramebufferDrawBuffer ||
+ nullptr == fFunctions.fFramebufferDrawBuffers ||
+ nullptr == fFunctions.fFramebufferReadBuffer ||
+ nullptr == fFunctions.fGetFramebufferParameteriv ||
+ nullptr == fFunctions.fNamedCopyBufferSubData ||
+ nullptr == fFunctions.fVertexArrayVertexOffset ||
+ nullptr == fFunctions.fVertexArrayColorOffset ||
+ nullptr == fFunctions.fVertexArrayEdgeFlagOffset ||
+ nullptr == fFunctions.fVertexArrayIndexOffset ||
+ nullptr == fFunctions.fVertexArrayNormalOffset ||
+ nullptr == fFunctions.fVertexArrayTexCoordOffset ||
+ nullptr == fFunctions.fVertexArrayMultiTexCoordOffset ||
+ nullptr == fFunctions.fVertexArrayFogCoordOffset ||
+ nullptr == fFunctions.fVertexArraySecondaryColorOffset ||
+ nullptr == fFunctions.fVertexArrayVertexAttribOffset ||
+ nullptr == fFunctions.fVertexArrayVertexAttribIOffset ||
+ nullptr == fFunctions.fEnableVertexArray ||
+ nullptr == fFunctions.fDisableVertexArray ||
+ nullptr == fFunctions.fEnableVertexArrayAttrib ||
+ nullptr == fFunctions.fDisableVertexArrayAttrib ||
+ nullptr == fFunctions.fGetVertexArrayIntegerv ||
+ nullptr == fFunctions.fGetVertexArrayPointerv ||
+ nullptr == fFunctions.fGetVertexArrayIntegeri_v ||
+ nullptr == fFunctions.fGetVertexArrayPointeri_v ||
+ nullptr == fFunctions.fMapNamedBufferRange ||
+ nullptr == fFunctions.fFlushMappedNamedBufferRange) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ if (glVer >= GR_GL_VER(3,1)) {
+ if (nullptr == fFunctions.fTextureBuffer) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+ }
+
+ if ((kGL_GrGLStandard == fStandard && glVer >= GR_GL_VER(4,3)) ||
+ fExtensions.has("GL_KHR_debug")) {
+ if (nullptr == fFunctions.fDebugMessageControl ||
+ nullptr == fFunctions.fDebugMessageInsert ||
+ nullptr == fFunctions.fDebugMessageCallback ||
+ nullptr == fFunctions.fGetDebugMessageLog ||
+ nullptr == fFunctions.fPushDebugGroup ||
+ nullptr == fFunctions.fPopDebugGroup ||
+ nullptr == fFunctions.fObjectLabel) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ if (fExtensions.has("GL_EXT_window_rectangles")) {
+ if (nullptr == fFunctions.fWindowRectangles) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ if ((kGL_GrGLStandard == fStandard && glVer >= GR_GL_VER(4,0)) ||
+ fExtensions.has("GL_ARB_sample_shading")) {
+ if (nullptr == fFunctions.fMinSampleShading) {
+ RETURN_FALSE_INTERFACE
+ }
+ } else if (kGLES_GrGLStandard == fStandard && fExtensions.has("GL_OES_sample_shading")) {
+ if (nullptr == fFunctions.fMinSampleShading) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ if (kGL_GrGLStandard == fStandard) {
+ if (glVer >= GR_GL_VER(3, 2) || fExtensions.has("GL_ARB_sync")) {
+ if (nullptr == fFunctions.fFenceSync ||
+ nullptr == fFunctions.fClientWaitSync ||
+ nullptr == fFunctions.fDeleteSync) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ } else if (kGLES_GrGLStandard == fStandard) {
+ if (glVer >= GR_GL_VER(3, 0)) {
+ if (nullptr == fFunctions.fFenceSync ||
+ nullptr == fFunctions.fClientWaitSync ||
+ nullptr == fFunctions.fDeleteSync) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+ }
+
+ if (fExtensions.has("EGL_KHR_image") || fExtensions.has("EGL_KHR_image_base")) {
+ if (nullptr == fFunctions.fEGLCreateImage ||
+ nullptr == fFunctions.fEGLDestroyImage) {
+ RETURN_FALSE_INTERFACE
+ }
+ }
+
+ if (kGL_GrGLStandard == fStandard && glVer >= GR_GL_VER(2,0)) {
+ if (nullptr == fFunctions.fDrawRangeElements) {
+ RETURN_FALSE_INTERFACE;
+ }
+ } else if (kGLES_GrGLStandard == fStandard && glVer >= GR_GL_VER(3,0)) {
+ if (nullptr == fFunctions.fDrawRangeElements) {
+ RETURN_FALSE_INTERFACE;
+ }
+ }
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLPath.cpp b/gfx/skia/skia/src/gpu/gl/GrGLPath.cpp
new file mode 100644
index 000000000..05460187e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLPath.cpp
@@ -0,0 +1,345 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLPath.h"
+#include "GrGLPathRendering.h"
+#include "GrGLGpu.h"
+#include "GrStyle.h"
+
+namespace {
+inline GrGLubyte verb_to_gl_path_cmd(SkPath::Verb verb) {
+ static const GrGLubyte gTable[] = {
+ GR_GL_MOVE_TO,
+ GR_GL_LINE_TO,
+ GR_GL_QUADRATIC_CURVE_TO,
+ GR_GL_CONIC_CURVE_TO,
+ GR_GL_CUBIC_CURVE_TO,
+ GR_GL_CLOSE_PATH,
+ };
+ GR_STATIC_ASSERT(0 == SkPath::kMove_Verb);
+ GR_STATIC_ASSERT(1 == SkPath::kLine_Verb);
+ GR_STATIC_ASSERT(2 == SkPath::kQuad_Verb);
+ GR_STATIC_ASSERT(3 == SkPath::kConic_Verb);
+ GR_STATIC_ASSERT(4 == SkPath::kCubic_Verb);
+ GR_STATIC_ASSERT(5 == SkPath::kClose_Verb);
+
+ SkASSERT(verb >= 0 && (size_t)verb < SK_ARRAY_COUNT(gTable));
+ return gTable[verb];
+}
+
+#ifdef SK_DEBUG
+inline int num_coords(SkPath::Verb verb) {
+ static const int gTable[] = {
+ 2, // move
+ 2, // line
+ 4, // quad
+ 5, // conic
+ 6, // cubic
+ 0, // close
+ };
+ GR_STATIC_ASSERT(0 == SkPath::kMove_Verb);
+ GR_STATIC_ASSERT(1 == SkPath::kLine_Verb);
+ GR_STATIC_ASSERT(2 == SkPath::kQuad_Verb);
+ GR_STATIC_ASSERT(3 == SkPath::kConic_Verb);
+ GR_STATIC_ASSERT(4 == SkPath::kCubic_Verb);
+ GR_STATIC_ASSERT(5 == SkPath::kClose_Verb);
+
+ SkASSERT(verb >= 0 && (size_t)verb < SK_ARRAY_COUNT(gTable));
+ return gTable[verb];
+}
+#endif
+
+inline GrGLenum join_to_gl_join(SkPaint::Join join) {
+ static GrGLenum gSkJoinsToGrGLJoins[] = {
+ GR_GL_MITER_REVERT,
+ GR_GL_ROUND,
+ GR_GL_BEVEL
+ };
+ return gSkJoinsToGrGLJoins[join];
+ GR_STATIC_ASSERT(0 == SkPaint::kMiter_Join);
+ GR_STATIC_ASSERT(1 == SkPaint::kRound_Join);
+ GR_STATIC_ASSERT(2 == SkPaint::kBevel_Join);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gSkJoinsToGrGLJoins) == SkPaint::kJoinCount);
+}
+
+inline GrGLenum cap_to_gl_cap(SkPaint::Cap cap) {
+ static GrGLenum gSkCapsToGrGLCaps[] = {
+ GR_GL_FLAT,
+ GR_GL_ROUND,
+ GR_GL_SQUARE
+ };
+ return gSkCapsToGrGLCaps[cap];
+ GR_STATIC_ASSERT(0 == SkPaint::kButt_Cap);
+ GR_STATIC_ASSERT(1 == SkPaint::kRound_Cap);
+ GR_STATIC_ASSERT(2 == SkPaint::kSquare_Cap);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gSkCapsToGrGLCaps) == SkPaint::kCapCount);
+}
+
+#ifdef SK_DEBUG
+inline void verify_floats(const float* floats, int count) {
+ for (int i = 0; i < count; ++i) {
+ SkASSERT(!SkScalarIsNaN(SkFloatToScalar(floats[i])));
+ }
+}
+#endif
+
+inline void points_to_coords(const SkPoint points[], size_t first_point, size_t amount,
+ GrGLfloat coords[]) {
+ for (size_t i = 0; i < amount; ++i) {
+ coords[i * 2] = SkScalarToFloat(points[first_point + i].fX);
+ coords[i * 2 + 1] = SkScalarToFloat(points[first_point + i].fY);
+ }
+}
+
+template<bool checkForDegenerates>
+inline bool init_path_object_for_general_path(GrGLGpu* gpu, GrGLuint pathID,
+ const SkPath& skPath) {
+ SkDEBUGCODE(int numCoords = 0);
+ int verbCnt = skPath.countVerbs();
+ int pointCnt = skPath.countPoints();
+ int minCoordCnt = pointCnt * 2;
+
+ SkSTArray<16, GrGLubyte, true> pathCommands(verbCnt);
+ SkSTArray<16, GrGLfloat, true> pathCoords(minCoordCnt);
+ bool lastVerbWasMove = true; // A path with just "close;" means "moveto(0,0); close;"
+ SkPoint points[4];
+ SkPath::RawIter iter(skPath);
+ SkPath::Verb verb;
+ while ((verb = iter.next(points)) != SkPath::kDone_Verb) {
+ pathCommands.push_back(verb_to_gl_path_cmd(verb));
+ GrGLfloat coords[6];
+ int coordsForVerb;
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ if (checkForDegenerates) {
+ lastVerbWasMove = true;
+ }
+ points_to_coords(points, 0, 1, coords);
+ coordsForVerb = 2;
+ break;
+ case SkPath::kLine_Verb:
+ if (checkForDegenerates) {
+ if (SkPath::IsLineDegenerate(points[0], points[1], true)) {
+ return false;
+ }
+ lastVerbWasMove = false;
+ }
+
+ points_to_coords(points, 1, 1, coords);
+ coordsForVerb = 2;
+ break;
+ case SkPath::kConic_Verb:
+ if (checkForDegenerates) {
+ if (SkPath::IsQuadDegenerate(points[0], points[1], points[2], true)) {
+ return false;
+ }
+ lastVerbWasMove = false;
+ }
+ points_to_coords(points, 1, 2, coords);
+ coords[4] = SkScalarToFloat(iter.conicWeight());
+ coordsForVerb = 5;
+ break;
+ case SkPath::kQuad_Verb:
+ if (checkForDegenerates) {
+ if (SkPath::IsQuadDegenerate(points[0], points[1], points[2], true)) {
+ return false;
+ }
+ lastVerbWasMove = false;
+ }
+ points_to_coords(points, 1, 2, coords);
+ coordsForVerb = 4;
+ break;
+ case SkPath::kCubic_Verb:
+ if (checkForDegenerates) {
+ if (SkPath::IsCubicDegenerate(points[0], points[1], points[2], points[3],
+ true)) {
+ return false;
+ }
+ lastVerbWasMove = false;
+ }
+ points_to_coords(points, 1, 3, coords);
+ coordsForVerb = 6;
+ break;
+ case SkPath::kClose_Verb:
+ if (checkForDegenerates) {
+ if (lastVerbWasMove) {
+ // Interpret "move(x,y);close;" as "move(x,y);lineto(x,y);close;".
+ // which produces a degenerate segment.
+ return false;
+ }
+ }
+ continue;
+ default:
+ SkASSERT(false); // Not reached.
+ continue;
+ }
+ SkDEBUGCODE(numCoords += num_coords(verb));
+ SkDEBUGCODE(verify_floats(coords, coordsForVerb));
+ pathCoords.push_back_n(coordsForVerb, coords);
+ }
+ SkASSERT(verbCnt == pathCommands.count());
+ SkASSERT(numCoords == pathCoords.count());
+
+ GR_GL_CALL(gpu->glInterface(),
+ PathCommands(pathID, pathCommands.count(), pathCommands.begin(),
+ pathCoords.count(), GR_GL_FLOAT, pathCoords.begin()));
+ return true;
+}
+
+/*
+ * For now paths only natively support winding and even odd fill types
+ */
+static GrPathRendering::FillType convert_skpath_filltype(SkPath::FillType fill) {
+ switch (fill) {
+ default:
+ SkFAIL("Incomplete Switch\n");
+ case SkPath::kWinding_FillType:
+ case SkPath::kInverseWinding_FillType:
+ return GrPathRendering::kWinding_FillType;
+ case SkPath::kEvenOdd_FillType:
+ case SkPath::kInverseEvenOdd_FillType:
+ return GrPathRendering::kEvenOdd_FillType;
+ }
+}
+
+} // namespace
+
+bool GrGLPath::InitPathObjectPathDataCheckingDegenerates(GrGLGpu* gpu, GrGLuint pathID,
+ const SkPath& skPath) {
+ return init_path_object_for_general_path<true>(gpu, pathID, skPath);
+}
+
+void GrGLPath::InitPathObjectPathData(GrGLGpu* gpu,
+ GrGLuint pathID,
+ const SkPath& skPath) {
+ SkASSERT(!skPath.isEmpty());
+
+#ifdef SK_SCALAR_IS_FLOAT
+ // This branch does type punning, converting SkPoint* to GrGLfloat*.
+ if ((skPath.getSegmentMasks() & SkPath::kConic_SegmentMask) == 0) {
+ int verbCnt = skPath.countVerbs();
+ int pointCnt = skPath.countPoints();
+ int coordCnt = pointCnt * 2;
+ SkSTArray<16, GrGLubyte, true> pathCommands(verbCnt);
+ SkSTArray<16, GrGLfloat, true> pathCoords(coordCnt);
+
+ static_assert(sizeof(SkPoint) == sizeof(GrGLfloat) * 2, "sk_point_not_two_floats");
+
+ pathCommands.resize_back(verbCnt);
+ pathCoords.resize_back(coordCnt);
+ skPath.getPoints(reinterpret_cast<SkPoint*>(&pathCoords[0]), pointCnt);
+ skPath.getVerbs(&pathCommands[0], verbCnt);
+
+ SkDEBUGCODE(int verbCoordCnt = 0);
+ for (int i = 0; i < verbCnt; ++i) {
+ SkPath::Verb v = static_cast<SkPath::Verb>(pathCommands[i]);
+ pathCommands[i] = verb_to_gl_path_cmd(v);
+ SkDEBUGCODE(verbCoordCnt += num_coords(v));
+ }
+ SkASSERT(verbCnt == pathCommands.count());
+ SkASSERT(verbCoordCnt == pathCoords.count());
+ SkDEBUGCODE(verify_floats(&pathCoords[0], pathCoords.count()));
+ GR_GL_CALL(gpu->glInterface(), PathCommands(pathID, pathCommands.count(), &pathCommands[0],
+ pathCoords.count(), GR_GL_FLOAT,
+ &pathCoords[0]));
+ return;
+ }
+#endif
+ SkAssertResult(init_path_object_for_general_path<false>(gpu, pathID, skPath));
+}
+
+void GrGLPath::InitPathObjectStroke(GrGLGpu* gpu, GrGLuint pathID, const SkStrokeRec& stroke) {
+ SkASSERT(!stroke.isHairlineStyle());
+ GR_GL_CALL(gpu->glInterface(),
+ PathParameterf(pathID, GR_GL_PATH_STROKE_WIDTH, SkScalarToFloat(stroke.getWidth())));
+ GR_GL_CALL(gpu->glInterface(),
+ PathParameterf(pathID, GR_GL_PATH_MITER_LIMIT, SkScalarToFloat(stroke.getMiter())));
+ GrGLenum join = join_to_gl_join(stroke.getJoin());
+ GR_GL_CALL(gpu->glInterface(), PathParameteri(pathID, GR_GL_PATH_JOIN_STYLE, join));
+ GrGLenum cap = cap_to_gl_cap(stroke.getCap());
+ GR_GL_CALL(gpu->glInterface(), PathParameteri(pathID, GR_GL_PATH_END_CAPS, cap));
+ GR_GL_CALL(gpu->glInterface(), PathParameterf(pathID, GR_GL_PATH_STROKE_BOUND, 0.02f));
+}
+
+void GrGLPath::InitPathObjectEmptyPath(GrGLGpu* gpu, GrGLuint pathID) {
+ GR_GL_CALL(gpu->glInterface(), PathCommands(pathID, 0, nullptr, 0, GR_GL_FLOAT, nullptr));
+}
+
+GrGLPath::GrGLPath(GrGLGpu* gpu, const SkPath& origSkPath, const GrStyle& style)
+ : INHERITED(gpu, origSkPath, style),
+ fPathID(gpu->glPathRendering()->genPaths(1)) {
+
+ if (origSkPath.isEmpty()) {
+ InitPathObjectEmptyPath(gpu, fPathID);
+ fShouldStroke = false;
+ fShouldFill = false;
+ } else {
+ const SkPath* skPath = &origSkPath;
+ SkTLazy<SkPath> tmpPath;
+ SkStrokeRec stroke(SkStrokeRec::kFill_InitStyle);
+
+ if (style.pathEffect()) {
+ // Skia stroking and NVPR stroking differ with respect to dashing
+ // pattern.
+ // Convert a dashing (or other path effect) to either a stroke or a fill.
+ if (style.applyPathEffectToPath(tmpPath.init(), &stroke, *skPath, SK_Scalar1)) {
+ skPath = tmpPath.get();
+ }
+ } else {
+ stroke = style.strokeRec();
+ }
+
+ bool didInit = false;
+ if (stroke.needToApply() && stroke.getCap() != SkPaint::kButt_Cap) {
+ // Skia stroking and NVPR stroking differ with respect to stroking
+ // end caps of empty subpaths.
+ // Convert stroke to fill if path contains empty subpaths.
+ didInit = InitPathObjectPathDataCheckingDegenerates(gpu, fPathID, *skPath);
+ if (!didInit) {
+ if (!tmpPath.isValid()) {
+ tmpPath.init();
+ }
+ SkAssertResult(stroke.applyToPath(tmpPath.get(), *skPath));
+ skPath = tmpPath.get();
+ stroke.setFillStyle();
+ }
+ }
+
+ if (!didInit) {
+ InitPathObjectPathData(gpu, fPathID, *skPath);
+ }
+
+ fShouldStroke = stroke.needToApply();
+ fShouldFill = stroke.isFillStyle() ||
+ stroke.getStyle() == SkStrokeRec::kStrokeAndFill_Style;
+
+ fFillType = convert_skpath_filltype(skPath->getFillType());
+ fBounds = skPath->getBounds();
+ SkScalar radius = stroke.getInflationRadius();
+ fBounds.outset(radius, radius);
+ if (fShouldStroke) {
+ InitPathObjectStroke(gpu, fPathID, stroke);
+ }
+ }
+
+ this->registerWithCache(SkBudgeted::kYes);
+}
+
+void GrGLPath::onRelease() {
+ if (0 != fPathID) {
+ static_cast<GrGLGpu*>(this->getGpu())->glPathRendering()->deletePaths(fPathID, 1);
+ fPathID = 0;
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrGLPath::onAbandon() {
+ fPathID = 0;
+
+ INHERITED::onAbandon();
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLPath.h b/gfx/skia/skia/src/gpu/gl/GrGLPath.h
new file mode 100644
index 000000000..ddcee533e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLPath.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLPath_DEFINED
+#define GrGLPath_DEFINED
+
+#include "../GrPath.h"
+#include "gl/GrGLTypes.h"
+
+class GrGLGpu;
+class GrStyle;
+
+/**
+ * Currently this represents a path built using GL_NV_path_rendering. If we
+ * support other GL path extensions then this would have to have a type enum
+ * and/or be subclassed.
+ */
+
+class GrGLPath : public GrPath {
+public:
+ static bool InitPathObjectPathDataCheckingDegenerates(GrGLGpu*,
+ GrGLuint pathID,
+ const SkPath&);
+ static void InitPathObjectPathData(GrGLGpu*,
+ GrGLuint pathID,
+ const SkPath&);
+ static void InitPathObjectStroke(GrGLGpu*, GrGLuint pathID, const SkStrokeRec&);
+
+ static void InitPathObjectEmptyPath(GrGLGpu*, GrGLuint pathID);
+
+
+ GrGLPath(GrGLGpu*, const SkPath&, const GrStyle&);
+ GrGLuint pathID() const { return fPathID; }
+
+ bool shouldStroke() const { return fShouldStroke; }
+ bool shouldFill() const { return fShouldFill; }
+protected:
+ void onRelease() override;
+ void onAbandon() override;
+
+private:
+ // TODO: Figure out how to get an approximate size of the path in Gpu memory.
+ size_t onGpuMemorySize() const override { return 100; }
+
+ GrGLuint fPathID;
+ bool fShouldStroke;
+ bool fShouldFill;
+
+ typedef GrPath INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLPathRange.cpp b/gfx/skia/skia/src/gpu/gl/GrGLPathRange.cpp
new file mode 100644
index 000000000..da1e9fe70
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLPathRange.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLPathRange.h"
+#include "GrGLPath.h"
+#include "GrGLPathRendering.h"
+#include "GrGLGpu.h"
+
+GrGLPathRange::GrGLPathRange(GrGLGpu* gpu, PathGenerator* pathGenerator, const GrStyle& style)
+ : INHERITED(gpu, pathGenerator),
+ fStyle(style),
+ fBasePathID(gpu->glPathRendering()->genPaths(this->getNumPaths())),
+ fGpuMemorySize(0) {
+ this->init();
+ this->registerWithCache(SkBudgeted::kYes);
+}
+
+GrGLPathRange::GrGLPathRange(GrGLGpu* gpu,
+ GrGLuint basePathID,
+ int numPaths,
+ size_t gpuMemorySize,
+ const GrStyle& style)
+ : INHERITED(gpu, numPaths),
+ fStyle(style),
+ fBasePathID(basePathID),
+ fGpuMemorySize(gpuMemorySize) {
+ this->init();
+ this->registerWithCache(SkBudgeted::kYes);
+}
+
+void GrGLPathRange::init() {
+ const SkStrokeRec& stroke = fStyle.strokeRec();
+ // Must force fill:
+ // * dashing: NVPR stroke dashing is different to Skia.
+ // * end caps: NVPR stroking degenerate contours with end caps is different to Skia.
+ bool forceFill = fStyle.pathEffect() ||
+ (stroke.needToApply() && stroke.getCap() != SkPaint::kButt_Cap);
+
+ if (forceFill) {
+ fShouldStroke = false;
+ fShouldFill = true;
+ } else {
+ fShouldStroke = stroke.needToApply();
+ fShouldFill = stroke.isFillStyle() ||
+ stroke.getStyle() == SkStrokeRec::kStrokeAndFill_Style;
+ }
+}
+
+void GrGLPathRange::onInitPath(int index, const SkPath& origSkPath) const {
+ GrGLGpu* gpu = static_cast<GrGLGpu*>(this->getGpu());
+ if (nullptr == gpu) {
+ return;
+ }
+ // Make sure the path at this index hasn't been initted already.
+ SkDEBUGCODE(
+ GrGLboolean isPath;
+ GR_GL_CALL_RET(gpu->glInterface(), isPath, IsPath(fBasePathID + index)));
+ SkASSERT(GR_GL_FALSE == isPath);
+
+ if (origSkPath.isEmpty()) {
+ GrGLPath::InitPathObjectEmptyPath(gpu, fBasePathID + index);
+ } else if (fShouldStroke) {
+ GrGLPath::InitPathObjectPathData(gpu, fBasePathID + index, origSkPath);
+ GrGLPath::InitPathObjectStroke(gpu, fBasePathID + index, fStyle.strokeRec());
+ } else {
+ const SkPath* skPath = &origSkPath;
+ SkTLazy<SkPath> tmpPath;
+ if (!fStyle.isSimpleFill()) {
+ SkStrokeRec::InitStyle fill;
+ // The path effect must be applied to the path. However, if a path effect is present,
+ // we must convert all the paths to fills. The path effect application may leave
+ // simple paths as strokes but converts other paths to fills.
+ // Thus we must stroke the strokes here, so that all paths in the
+ // path range are using the same style.
+ if (!fStyle.applyToPath(tmpPath.init(), &fill, *skPath, SK_Scalar1)) {
+ return;
+ }
+ // We shouldn't have allowed hairlines or arbitrary path effect styles to get here
+ // so after application we better have a filled path.
+ SkASSERT(SkStrokeRec::kFill_InitStyle == fill);
+ skPath = tmpPath.get();
+
+ }
+ GrGLPath::InitPathObjectPathData(gpu, fBasePathID + index, *skPath);
+ }
+ // TODO: Use a better approximation for the individual path sizes.
+ fGpuMemorySize += 100;
+}
+
+void GrGLPathRange::onRelease() {
+ SkASSERT(this->getGpu());
+
+ if (0 != fBasePathID) {
+ static_cast<GrGLGpu*>(this->getGpu())->glPathRendering()->deletePaths(fBasePathID,
+ this->getNumPaths());
+ fBasePathID = 0;
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrGLPathRange::onAbandon() {
+ fBasePathID = 0;
+
+ INHERITED::onAbandon();
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLPathRange.h b/gfx/skia/skia/src/gpu/gl/GrGLPathRange.h
new file mode 100644
index 000000000..7d920105a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLPathRange.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLPathRange_DEFINED
+#define GrGLPathRange_DEFINED
+
+#include "../GrPathRange.h"
+#include "GrStyle.h"
+#include "gl/GrGLTypes.h"
+
+class GrGLGpu;
+
+/**
+ * Currently this represents a range of GL_NV_path_rendering Path IDs. If we
+ * support other GL path extensions then this would have to have a type enum
+ * and/or be subclassed.
+ */
+
+class GrGLPathRange : public GrPathRange {
+public:
+ /**
+ * Initialize a GL path range from a PathGenerator. This class will allocate
+ * the GPU path objects and initialize them lazily.
+ */
+ GrGLPathRange(GrGLGpu*, PathGenerator*, const GrStyle&);
+
+ /**
+ * Initialize a GL path range from an existing range of pre-initialized GPU
+ * path objects. This class assumes ownership of the GPU path objects and
+ * will delete them when done.
+ */
+ GrGLPathRange(GrGLGpu*,
+ GrGLuint basePathID,
+ int numPaths,
+ size_t gpuMemorySize,
+ const GrStyle&);
+
+ GrGLuint basePathID() const { return fBasePathID; }
+
+ bool shouldStroke() const { return fShouldStroke; }
+ bool shouldFill() const { return fShouldFill; }
+
+protected:
+ void onInitPath(int index, const SkPath&) const override;
+
+ void onRelease() override;
+ void onAbandon() override;
+
+private:
+ void init();
+ size_t onGpuMemorySize() const override { return fGpuMemorySize; }
+
+ const GrStyle fStyle;
+ GrGLuint fBasePathID;
+ mutable size_t fGpuMemorySize;
+ bool fShouldStroke;
+ bool fShouldFill;
+
+ typedef GrPathRange INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLPathRendering.cpp b/gfx/skia/skia/src/gpu/gl/GrGLPathRendering.cpp
new file mode 100644
index 000000000..cbf204125
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLPathRendering.cpp
@@ -0,0 +1,339 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "gl/GrGLPathRendering.h"
+#include "gl/GrGLUtil.h"
+#include "gl/GrGLGpu.h"
+
+#include "GrGLPath.h"
+#include "GrGLPathRange.h"
+#include "GrGLPathRendering.h"
+
+#include "SkStream.h"
+#include "SkTypeface.h"
+
+#define GL_CALL(X) GR_GL_CALL(this->gpu()->glInterface(), X)
+#define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->gpu()->glInterface(), RET, X)
+
+// Number of paths to allocate per glGenPaths call. The call can be overly slow on command buffer GL
+// implementation. The call has a result value, and thus waiting for the call completion is needed.
+static const GrGLsizei kPathIDPreallocationAmount = 65536;
+
+static const GrGLenum gIndexType2GLType[] = {
+ GR_GL_UNSIGNED_BYTE,
+ GR_GL_UNSIGNED_SHORT,
+ GR_GL_UNSIGNED_INT
+};
+
+GR_STATIC_ASSERT(0 == GrPathRange::kU8_PathIndexType);
+GR_STATIC_ASSERT(1 == GrPathRange::kU16_PathIndexType);
+GR_STATIC_ASSERT(2 == GrPathRange::kU32_PathIndexType);
+GR_STATIC_ASSERT(GrPathRange::kU32_PathIndexType == GrPathRange::kLast_PathIndexType);
+
+static const GrGLenum gXformType2GLType[] = {
+ GR_GL_NONE,
+ GR_GL_TRANSLATE_X,
+ GR_GL_TRANSLATE_Y,
+ GR_GL_TRANSLATE_2D,
+ GR_GL_TRANSPOSE_AFFINE_2D
+};
+
+GR_STATIC_ASSERT(0 == GrPathRendering::kNone_PathTransformType);
+GR_STATIC_ASSERT(1 == GrPathRendering::kTranslateX_PathTransformType);
+GR_STATIC_ASSERT(2 == GrPathRendering::kTranslateY_PathTransformType);
+GR_STATIC_ASSERT(3 == GrPathRendering::kTranslate_PathTransformType);
+GR_STATIC_ASSERT(4 == GrPathRendering::kAffine_PathTransformType);
+GR_STATIC_ASSERT(GrPathRendering::kAffine_PathTransformType == GrPathRendering::kLast_PathTransformType);
+
+#ifdef SK_DEBUG
+static const GrGLenum gXformType2ComponentCount[] = {
+ 0,
+ 1,
+ 1,
+ 2,
+ 6
+};
+
+static void verify_floats(const float* floats, int count) {
+ for (int i = 0; i < count; ++i) {
+ SkASSERT(!SkScalarIsNaN(SkFloatToScalar(floats[i])));
+ }
+}
+#endif
+
+static GrGLenum gr_stencil_op_to_gl_path_rendering_fill_mode(GrStencilOp op) {
+ switch (op) {
+ default:
+ SkFAIL("Unexpected path fill.");
+ /* fallthrough */;
+ case GrStencilOp::kIncWrap:
+ return GR_GL_COUNT_UP;
+ case GrStencilOp::kInvert:
+ return GR_GL_INVERT;
+ }
+}
+
+GrGLPathRendering::GrGLPathRendering(GrGLGpu* gpu)
+ : GrPathRendering(gpu)
+ , fPreallocatedPathCount(0) {
+ const GrGLInterface* glInterface = gpu->glInterface();
+ fCaps.bindFragmentInputSupport =
+ nullptr != glInterface->fFunctions.fBindFragmentInputLocation;
+}
+
+GrGLPathRendering::~GrGLPathRendering() {
+ if (fPreallocatedPathCount > 0) {
+ this->deletePaths(fFirstPreallocatedPathID, fPreallocatedPathCount);
+ }
+}
+
+void GrGLPathRendering::disconnect(GrGpu::DisconnectType type) {
+ if (GrGpu::DisconnectType::kCleanup == type) {
+ this->deletePaths(fFirstPreallocatedPathID, fPreallocatedPathCount);
+ };
+ fPreallocatedPathCount = 0;
+}
+
+void GrGLPathRendering::resetContext() {
+ fHWProjectionMatrixState.invalidate();
+ // we don't use the model view matrix.
+ GL_CALL(MatrixLoadIdentity(GR_GL_PATH_MODELVIEW));
+
+ fHWPathStencilSettings.invalidate();
+}
+
+GrPath* GrGLPathRendering::createPath(const SkPath& inPath, const GrStyle& style) {
+ return new GrGLPath(this->gpu(), inPath, style);
+}
+
+GrPathRange* GrGLPathRendering::createPathRange(GrPathRange::PathGenerator* pathGenerator,
+ const GrStyle& style) {
+ return new GrGLPathRange(this->gpu(), pathGenerator, style);
+}
+
+void GrGLPathRendering::onStencilPath(const StencilPathArgs& args, const GrPath* path) {
+ GrGLGpu* gpu = this->gpu();
+ SkASSERT(gpu->caps()->shaderCaps()->pathRenderingSupport());
+ gpu->flushColorWrite(false);
+ gpu->flushDrawFace(GrDrawFace::kBoth);
+
+ GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(args.fRenderTarget);
+ SkISize size = SkISize::Make(rt->width(), rt->height());
+ this->setProjectionMatrix(*args.fViewMatrix, size, rt->origin());
+ gpu->flushScissor(*args.fScissor, rt->getViewport(), rt->origin());
+ gpu->flushHWAAState(rt, args.fUseHWAA, true);
+ gpu->flushRenderTarget(rt, nullptr);
+
+ const GrGLPath* glPath = static_cast<const GrGLPath*>(path);
+
+ this->flushPathStencilSettings(*args.fStencil);
+ SkASSERT(!fHWPathStencilSettings.isTwoSided());
+
+ GrGLenum fillMode =
+ gr_stencil_op_to_gl_path_rendering_fill_mode(fHWPathStencilSettings.front().fPassOp);
+ GrGLint writeMask = fHWPathStencilSettings.front().fWriteMask;
+
+ if (glPath->shouldFill()) {
+ GL_CALL(StencilFillPath(glPath->pathID(), fillMode, writeMask));
+ }
+ if (glPath->shouldStroke()) {
+ GL_CALL(StencilStrokePath(glPath->pathID(), 0xffff, writeMask));
+ }
+}
+
+void GrGLPathRendering::onDrawPath(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ const GrStencilSettings& stencilPassSettings,
+ const GrPath* path) {
+ if (!this->gpu()->flushGLState(pipeline, primProc, false)) {
+ return;
+ }
+ const GrGLPath* glPath = static_cast<const GrGLPath*>(path);
+
+ this->flushPathStencilSettings(stencilPassSettings);
+ SkASSERT(!fHWPathStencilSettings.isTwoSided());
+
+ GrGLenum fillMode =
+ gr_stencil_op_to_gl_path_rendering_fill_mode(fHWPathStencilSettings.front().fPassOp);
+ GrGLint writeMask = fHWPathStencilSettings.front().fWriteMask;
+
+ if (glPath->shouldStroke()) {
+ if (glPath->shouldFill()) {
+ GL_CALL(StencilFillPath(glPath->pathID(), fillMode, writeMask));
+ }
+ GL_CALL(StencilThenCoverStrokePath(glPath->pathID(), 0xffff, writeMask,
+ GR_GL_BOUNDING_BOX));
+ } else {
+ GL_CALL(StencilThenCoverFillPath(glPath->pathID(), fillMode, writeMask,
+ GR_GL_BOUNDING_BOX));
+ }
+}
+
+void GrGLPathRendering::onDrawPaths(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ const GrStencilSettings& stencilPassSettings,
+ const GrPathRange* pathRange, const void* indices,
+ PathIndexType indexType, const float transformValues[],
+ PathTransformType transformType, int count) {
+ SkDEBUGCODE(verify_floats(transformValues, gXformType2ComponentCount[transformType] * count));
+
+ if (!this->gpu()->flushGLState(pipeline, primProc, false)) {
+ return;
+ }
+ this->flushPathStencilSettings(stencilPassSettings);
+ SkASSERT(!fHWPathStencilSettings.isTwoSided());
+
+
+ const GrGLPathRange* glPathRange = static_cast<const GrGLPathRange*>(pathRange);
+
+ GrGLenum fillMode =
+ gr_stencil_op_to_gl_path_rendering_fill_mode(fHWPathStencilSettings.front().fPassOp);
+ GrGLint writeMask = fHWPathStencilSettings.front().fWriteMask;
+
+ if (glPathRange->shouldStroke()) {
+ if (glPathRange->shouldFill()) {
+ GL_CALL(StencilFillPathInstanced(
+ count, gIndexType2GLType[indexType], indices, glPathRange->basePathID(),
+ fillMode, writeMask, gXformType2GLType[transformType],
+ transformValues));
+ }
+ GL_CALL(StencilThenCoverStrokePathInstanced(
+ count, gIndexType2GLType[indexType], indices, glPathRange->basePathID(),
+ 0xffff, writeMask, GR_GL_BOUNDING_BOX_OF_BOUNDING_BOXES,
+ gXformType2GLType[transformType], transformValues));
+ } else {
+ GL_CALL(StencilThenCoverFillPathInstanced(
+ count, gIndexType2GLType[indexType], indices, glPathRange->basePathID(),
+ fillMode, writeMask, GR_GL_BOUNDING_BOX_OF_BOUNDING_BOXES,
+ gXformType2GLType[transformType], transformValues));
+ }
+}
+
+void GrGLPathRendering::setProgramPathFragmentInputTransform(GrGLuint program, GrGLint location,
+ GrGLenum genMode, GrGLint components,
+ const SkMatrix& matrix) {
+ float coefficients[3 * 3];
+ SkASSERT(components >= 1 && components <= 3);
+
+ coefficients[0] = SkScalarToFloat(matrix[SkMatrix::kMScaleX]);
+ coefficients[1] = SkScalarToFloat(matrix[SkMatrix::kMSkewX]);
+ coefficients[2] = SkScalarToFloat(matrix[SkMatrix::kMTransX]);
+
+ if (components >= 2) {
+ coefficients[3] = SkScalarToFloat(matrix[SkMatrix::kMSkewY]);
+ coefficients[4] = SkScalarToFloat(matrix[SkMatrix::kMScaleY]);
+ coefficients[5] = SkScalarToFloat(matrix[SkMatrix::kMTransY]);
+ }
+
+ if (components >= 3) {
+ coefficients[6] = SkScalarToFloat(matrix[SkMatrix::kMPersp0]);
+ coefficients[7] = SkScalarToFloat(matrix[SkMatrix::kMPersp1]);
+ coefficients[8] = SkScalarToFloat(matrix[SkMatrix::kMPersp2]);
+ }
+ SkDEBUGCODE(verify_floats(coefficients, components * 3));
+
+ GL_CALL(ProgramPathFragmentInputGen(program, location, genMode, components, coefficients));
+}
+
+void GrGLPathRendering::setProjectionMatrix(const SkMatrix& matrix,
+ const SkISize& renderTargetSize,
+ GrSurfaceOrigin renderTargetOrigin) {
+
+ SkASSERT(this->gpu()->glCaps().shaderCaps()->pathRenderingSupport());
+
+ if (renderTargetOrigin == fHWProjectionMatrixState.fRenderTargetOrigin &&
+ renderTargetSize == fHWProjectionMatrixState.fRenderTargetSize &&
+ matrix.cheapEqualTo(fHWProjectionMatrixState.fViewMatrix)) {
+ return;
+ }
+
+ fHWProjectionMatrixState.fViewMatrix = matrix;
+ fHWProjectionMatrixState.fRenderTargetSize = renderTargetSize;
+ fHWProjectionMatrixState.fRenderTargetOrigin = renderTargetOrigin;
+
+ float glMatrix[4 * 4];
+ fHWProjectionMatrixState.getRTAdjustedGLMatrix<4>(glMatrix);
+ SkDEBUGCODE(verify_floats(glMatrix, SK_ARRAY_COUNT(glMatrix)));
+ GL_CALL(MatrixLoadf(GR_GL_PATH_PROJECTION, glMatrix));
+}
+
+GrGLuint GrGLPathRendering::genPaths(GrGLsizei range) {
+ SkASSERT(range > 0);
+ GrGLuint firstID;
+ if (fPreallocatedPathCount >= range) {
+ firstID = fFirstPreallocatedPathID;
+ fPreallocatedPathCount -= range;
+ fFirstPreallocatedPathID += range;
+ return firstID;
+ }
+ // Allocate range + the amount to fill up preallocation amount. If succeed, either join with
+ // the existing preallocation range or delete the existing and use the new (potentially partial)
+ // preallocation range.
+ GrGLsizei allocAmount = range + (kPathIDPreallocationAmount - fPreallocatedPathCount);
+ if (allocAmount >= range) {
+ GL_CALL_RET(firstID, GenPaths(allocAmount));
+
+ if (firstID != 0) {
+ if (fPreallocatedPathCount > 0 &&
+ firstID == fFirstPreallocatedPathID + fPreallocatedPathCount) {
+ firstID = fFirstPreallocatedPathID;
+ fPreallocatedPathCount += allocAmount - range;
+ fFirstPreallocatedPathID += range;
+ return firstID;
+ }
+
+ if (allocAmount > range) {
+ if (fPreallocatedPathCount > 0) {
+ this->deletePaths(fFirstPreallocatedPathID, fPreallocatedPathCount);
+ }
+ fFirstPreallocatedPathID = firstID + range;
+ fPreallocatedPathCount = allocAmount - range;
+ }
+ // Special case: if allocAmount == range, we have full preallocated range.
+ return firstID;
+ }
+ }
+ // Failed to allocate with preallocation. Remove existing preallocation and try to allocate just
+ // the range.
+ if (fPreallocatedPathCount > 0) {
+ this->deletePaths(fFirstPreallocatedPathID, fPreallocatedPathCount);
+ fPreallocatedPathCount = 0;
+ }
+
+ GL_CALL_RET(firstID, GenPaths(range));
+ if (firstID == 0) {
+ SkDebugf("Warning: Failed to allocate path\n");
+ }
+ return firstID;
+}
+
+void GrGLPathRendering::deletePaths(GrGLuint path, GrGLsizei range) {
+ GL_CALL(DeletePaths(path, range));
+}
+
+void GrGLPathRendering::flushPathStencilSettings(const GrStencilSettings& stencilSettings) {
+ if (fHWPathStencilSettings != stencilSettings) {
+ SkASSERT(stencilSettings.isValid());
+ // Just the func, ref, and mask is set here. The op and write mask are params to the call
+ // that draws the path to the SB (glStencilFillPath)
+ uint16_t ref = stencilSettings.front().fRef;
+ GrStencilTest test = stencilSettings.front().fTest;
+ uint16_t testMask = stencilSettings.front().fTestMask;
+
+ if (!fHWPathStencilSettings.isValid() ||
+ ref != fHWPathStencilSettings.front().fRef ||
+ test != fHWPathStencilSettings.front().fTest ||
+ testMask != fHWPathStencilSettings.front().fTestMask) {
+ GL_CALL(PathStencilFunc(GrToGLStencilFunc(test), ref, testMask));
+ }
+ fHWPathStencilSettings = stencilSettings;
+ }
+}
+
+inline GrGLGpu* GrGLPathRendering::gpu() {
+ return static_cast<GrGLGpu*>(fGpu);
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLPathRendering.h b/gfx/skia/skia/src/gpu/gl/GrGLPathRendering.h
new file mode 100644
index 000000000..40f72ccd7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLPathRendering.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLPathRendering_DEFINED
+#define GrGLPathRendering_DEFINED
+
+#include "SkRefCnt.h"
+#include "GrPathRendering.h"
+#include "gl/GrGLTypes.h"
+#include "glsl/GrGLSLUtil.h"
+
+class GrGLNameAllocator;
+class GrGLGpu;
+class GrStencilSettings;
+class GrStyle;
+
+/**
+ * This class wraps the NV_path_rendering extension and manages its various
+ * API versions. If a method is not present in the GrGLInterface of the GrGLGpu
+ * (because the driver version is old), it tries to provide a backup
+ * implementation. But if a backup implementation is not practical, it marks the
+ * method as not supported.
+ */
+class GrGLPathRendering : public GrPathRendering {
+public:
+ /**
+ * Create a new GrGLPathRendering object from a given GrGLGpu.
+ */
+ GrGLPathRendering(GrGLGpu* gpu);
+ virtual ~GrGLPathRendering();
+
+ // GrPathRendering implementations.
+ GrPath* createPath(const SkPath&, const GrStyle&) override;
+ virtual GrPathRange* createPathRange(GrPathRange::PathGenerator*,
+ const GrStyle&) override;
+
+ /* Called when the 3D context state is unknown. */
+ void resetContext();
+
+ /**
+ * Called when the context either is about to be lost or is lost. DisconnectType indicates
+ * whether GPU resources should be cleaned up or abandoned when this is called.
+ */
+ void disconnect(GrGpu::DisconnectType);
+
+ bool shouldBindFragmentInputs() const {
+ return fCaps.bindFragmentInputSupport;
+ }
+
+ // Functions for "separable shader" texturing support.
+ void setProgramPathFragmentInputTransform(GrGLuint program, GrGLint location,
+ GrGLenum genMode, GrGLint components,
+ const SkMatrix&);
+
+ /* Sets the projection matrix for path rendering */
+ void setProjectionMatrix(const SkMatrix& matrix,
+ const SkISize& renderTargetSize,
+ GrSurfaceOrigin renderTargetOrigin);
+
+ GrGLuint genPaths(GrGLsizei range);
+ GrGLvoid deletePaths(GrGLuint path, GrGLsizei range);
+
+protected:
+ void onStencilPath(const StencilPathArgs&, const GrPath*) override;
+ void onDrawPath(const GrPipeline&,
+ const GrPrimitiveProcessor&,
+ const GrStencilSettings&,
+ const GrPath*) override;
+ void onDrawPaths(const GrPipeline&,
+ const GrPrimitiveProcessor&,
+ const GrStencilSettings&,
+ const GrPathRange*,
+ const void* indices,
+ PathIndexType,
+ const float transformValues[],
+ PathTransformType,
+ int count) override;
+private:
+ /**
+ * Mark certain functionality as not supported.
+ */
+ struct Caps {
+ bool bindFragmentInputSupport : 1;
+ };
+
+ void flushPathStencilSettings(const GrStencilSettings&);
+
+ struct MatrixState {
+ SkMatrix fViewMatrix;
+ SkISize fRenderTargetSize;
+ GrSurfaceOrigin fRenderTargetOrigin;
+
+ MatrixState() { this->invalidate(); }
+ void invalidate() {
+ fViewMatrix = SkMatrix::InvalidMatrix();
+ fRenderTargetSize.fWidth = -1;
+ fRenderTargetSize.fHeight = -1;
+ fRenderTargetOrigin = (GrSurfaceOrigin) -1;
+ }
+
+ /**
+ * Gets a matrix that goes from local coordinates to GL normalized device coords.
+ */
+ template<int Size> void getRTAdjustedGLMatrix(float* destMatrix) {
+ SkMatrix combined;
+ if (kBottomLeft_GrSurfaceOrigin == fRenderTargetOrigin) {
+ combined.setAll(SkIntToScalar(2) / fRenderTargetSize.fWidth, 0, -SK_Scalar1,
+ 0, -SkIntToScalar(2) / fRenderTargetSize.fHeight, SK_Scalar1,
+ 0, 0, 1);
+ } else {
+ combined.setAll(SkIntToScalar(2) / fRenderTargetSize.fWidth, 0, -SK_Scalar1,
+ 0, SkIntToScalar(2) / fRenderTargetSize.fHeight, -SK_Scalar1,
+ 0, 0, 1);
+ }
+ combined.preConcat(fViewMatrix);
+ GrGLSLGetMatrix<Size>(destMatrix, combined);
+ }
+ };
+ GrGLGpu* gpu();
+
+ GrGLuint fFirstPreallocatedPathID;
+ GrGLsizei fPreallocatedPathCount;
+ MatrixState fHWProjectionMatrixState;
+ GrStencilSettings fHWPathStencilSettings;
+ Caps fCaps;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLProgram.cpp b/gfx/skia/skia/src/gpu/gl/GrGLProgram.cpp
new file mode 100644
index 000000000..86b237266
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLProgram.cpp
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLProgram.h"
+
+#include "GrAllocator.h"
+#include "GrProcessor.h"
+#include "GrCoordTransform.h"
+#include "GrGLGpu.h"
+#include "GrGLBuffer.h"
+#include "GrGLPathRendering.h"
+#include "GrPathProcessor.h"
+#include "GrPipeline.h"
+#include "GrXferProcessor.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLGeometryProcessor.h"
+#include "glsl/GrGLSLXferProcessor.h"
+#include "SkXfermode.h"
+
+#define GL_CALL(X) GR_GL_CALL(fGpu->glInterface(), X)
+#define GL_CALL_RET(R, X) GR_GL_CALL_RET(fGpu->glInterface(), R, X)
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+GrGLProgram::GrGLProgram(GrGLGpu* gpu,
+ const GrProgramDesc& desc,
+ const BuiltinUniformHandles& builtinUniforms,
+ GrGLuint programID,
+ const UniformInfoArray& uniforms,
+ const SkTArray<GrGLSampler>& samplers,
+ const VaryingInfoArray& pathProcVaryings,
+ GrGLSLPrimitiveProcessor* geometryProcessor,
+ GrGLSLXferProcessor* xferProcessor,
+ const GrGLSLFragProcs& fragmentProcessors)
+ : fBuiltinUniformHandles(builtinUniforms)
+ , fProgramID(programID)
+ , fGeometryProcessor(geometryProcessor)
+ , fXferProcessor(xferProcessor)
+ , fFragmentProcessors(fragmentProcessors)
+ , fDesc(desc)
+ , fGpu(gpu)
+ , fProgramDataManager(gpu, programID, uniforms, pathProcVaryings) {
+ // Assign texture units to sampler uniforms one time up front.
+ GL_CALL(UseProgram(fProgramID));
+ fProgramDataManager.setSamplers(samplers);
+}
+
+GrGLProgram::~GrGLProgram() {
+ if (fProgramID) {
+ GL_CALL(DeleteProgram(fProgramID));
+ }
+ for (int i = 0; i < fFragmentProcessors.count(); ++i) {
+ delete fFragmentProcessors[i];
+ }
+}
+
+void GrGLProgram::abandon() {
+ fProgramID = 0;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGLProgram::setData(const GrPrimitiveProcessor& primProc, const GrPipeline& pipeline) {
+ this->setRenderTargetState(primProc, pipeline);
+
+ // we set the textures, and uniforms for installed processors in a generic way, but subclasses
+ // of GLProgram determine how to set coord transforms
+ int nextSamplerIdx = 0;
+ fGeometryProcessor->setData(fProgramDataManager, primProc,
+ GrFragmentProcessor::CoordTransformIter(pipeline));
+ this->bindTextures(primProc, pipeline.getAllowSRGBInputs(), &nextSamplerIdx);
+
+ this->setFragmentData(primProc, pipeline, &nextSamplerIdx);
+
+ if (primProc.getPixelLocalStorageState() !=
+ GrPixelLocalStorageState::kDraw_GrPixelLocalStorageState) {
+ const GrXferProcessor& xp = pipeline.getXferProcessor();
+ fXferProcessor->setData(fProgramDataManager, xp);
+ this->bindTextures(xp, pipeline.getAllowSRGBInputs(), &nextSamplerIdx);
+ }
+}
+
+void GrGLProgram::generateMipmaps(const GrPrimitiveProcessor& primProc,
+ const GrPipeline& pipeline) {
+ this->generateMipmaps(primProc, pipeline.getAllowSRGBInputs());
+
+ GrFragmentProcessor::Iter iter(pipeline);
+ while (const GrFragmentProcessor* fp = iter.next()) {
+ this->generateMipmaps(*fp, pipeline.getAllowSRGBInputs());
+ }
+
+ if (primProc.getPixelLocalStorageState() !=
+ GrPixelLocalStorageState::kDraw_GrPixelLocalStorageState) {
+ const GrXferProcessor& xp = pipeline.getXferProcessor();
+ this->generateMipmaps(xp, pipeline.getAllowSRGBInputs());
+ }
+}
+
+void GrGLProgram::setFragmentData(const GrPrimitiveProcessor& primProc,
+ const GrPipeline& pipeline,
+ int* nextSamplerIdx) {
+ GrFragmentProcessor::Iter iter(pipeline);
+ GrGLSLFragmentProcessor::Iter glslIter(fFragmentProcessors.begin(),
+ fFragmentProcessors.count());
+ const GrFragmentProcessor* fp = iter.next();
+ GrGLSLFragmentProcessor* glslFP = glslIter.next();
+ while (fp && glslFP) {
+ glslFP->setData(fProgramDataManager, *fp);
+ this->bindTextures(*fp, pipeline.getAllowSRGBInputs(), nextSamplerIdx);
+ fp = iter.next();
+ glslFP = glslIter.next();
+ }
+ SkASSERT(!fp && !glslFP);
+}
+
+
+void GrGLProgram::setRenderTargetState(const GrPrimitiveProcessor& primProc,
+ const GrPipeline& pipeline) {
+ // Load the RT height uniform if it is needed to y-flip gl_FragCoord.
+ if (fBuiltinUniformHandles.fRTHeightUni.isValid() &&
+ fRenderTargetState.fRenderTargetSize.fHeight != pipeline.getRenderTarget()->height()) {
+ fProgramDataManager.set1f(fBuiltinUniformHandles.fRTHeightUni,
+ SkIntToScalar(pipeline.getRenderTarget()->height()));
+ }
+
+ // set RT adjustment
+ const GrRenderTarget* rt = pipeline.getRenderTarget();
+ SkISize size;
+ size.set(rt->width(), rt->height());
+ if (!primProc.isPathRendering()) {
+ if (fRenderTargetState.fRenderTargetOrigin != rt->origin() ||
+ fRenderTargetState.fRenderTargetSize != size) {
+ fRenderTargetState.fRenderTargetSize = size;
+ fRenderTargetState.fRenderTargetOrigin = rt->origin();
+
+ float rtAdjustmentVec[4];
+ fRenderTargetState.getRTAdjustmentVec(rtAdjustmentVec);
+ fProgramDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, rtAdjustmentVec);
+ }
+ } else {
+ SkASSERT(fGpu->glCaps().shaderCaps()->pathRenderingSupport());
+ const GrPathProcessor& pathProc = primProc.cast<GrPathProcessor>();
+ fGpu->glPathRendering()->setProjectionMatrix(pathProc.viewMatrix(),
+ size, rt->origin());
+ }
+}
+
+void GrGLProgram::bindTextures(const GrProcessor& processor,
+ bool allowSRGBInputs,
+ int* nextSamplerIdx) {
+ for (int i = 0; i < processor.numTextures(); ++i) {
+ const GrTextureAccess& access = processor.textureAccess(i);
+ fGpu->bindTexture((*nextSamplerIdx)++, access.getParams(),
+ allowSRGBInputs, static_cast<GrGLTexture*>(access.getTexture()));
+ }
+ for (int i = 0; i < processor.numBuffers(); ++i) {
+ const GrBufferAccess& access = processor.bufferAccess(i);
+ fGpu->bindTexelBuffer((*nextSamplerIdx)++, access.texelConfig(),
+ static_cast<GrGLBuffer*>(access.buffer()));
+ }
+}
+
+void GrGLProgram::generateMipmaps(const GrProcessor& processor,
+ bool allowSRGBInputs) {
+ for (int i = 0; i < processor.numTextures(); ++i) {
+ const GrTextureAccess& access = processor.textureAccess(i);
+ fGpu->generateMipmaps(access.getParams(), allowSRGBInputs,
+ static_cast<GrGLTexture*>(access.getTexture()));
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLProgram.h b/gfx/skia/skia/src/gpu/gl/GrGLProgram.h
new file mode 100644
index 000000000..34037a240
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLProgram.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGLProgram_DEFINED
+#define GrGLProgram_DEFINED
+
+#include "GrGLContext.h"
+#include "GrProgramDesc.h"
+#include "GrGLTexture.h"
+#include "GrGLProgramDataManager.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+
+#include "SkString.h"
+#include "SkXfermode.h"
+
+#include "builders/GrGLProgramBuilder.h"
+
+class GrGLInstalledProcessors;
+class GrGLProgramBuilder;
+class GrPipeline;
+
+/**
+ * This class manages a GPU program and records per-program information.
+ * We can specify the attribute locations so that they are constant
+ * across our shaders. But the driver determines the uniform locations
+ * at link time. We don't need to remember the sampler uniform location
+ * because we will bind a texture slot to it and never change it
+ * Uniforms are program-local so we can't rely on fHWState to hold the
+ * previous uniform state after a program change.
+ */
+class GrGLProgram : public SkRefCnt {
+public:
+ typedef GrGLSLProgramBuilder::BuiltinUniformHandles BuiltinUniformHandles;
+
+ ~GrGLProgram();
+
+ /**
+ * Call to abandon GL objects owned by this program.
+ */
+ void abandon();
+
+ const GrProgramDesc& getDesc() { return fDesc; }
+
+ /**
+ * Gets the GL program ID for this program.
+ */
+ GrGLuint programID() const { return fProgramID; }
+
+ /**
+ * We use the RT's size and origin to adjust from Skia device space to OpenGL normalized device
+ * space and to make device space positions have the correct origin for processors that require
+ * them.
+ */
+ struct RenderTargetState {
+ SkISize fRenderTargetSize;
+ GrSurfaceOrigin fRenderTargetOrigin;
+
+ RenderTargetState() { this->invalidate(); }
+ void invalidate() {
+ fRenderTargetSize.fWidth = -1;
+ fRenderTargetSize.fHeight = -1;
+ fRenderTargetOrigin = (GrSurfaceOrigin) -1;
+ }
+
+ /**
+ * Gets a vec4 that adjusts the position from Skia device coords to GL's normalized device
+ * coords. Assuming the transformed position, pos, is a homogeneous vec3, the vec, v, is
+ * applied as such:
+ * pos.x = dot(v.xy, pos.xz)
+ * pos.y = dot(v.zw, pos.yz)
+ */
+ void getRTAdjustmentVec(float* destVec) {
+ destVec[0] = 2.f / fRenderTargetSize.fWidth;
+ destVec[1] = -1.f;
+ if (kBottomLeft_GrSurfaceOrigin == fRenderTargetOrigin) {
+ destVec[2] = -2.f / fRenderTargetSize.fHeight;
+ destVec[3] = 1.f;
+ } else {
+ destVec[2] = 2.f / fRenderTargetSize.fHeight;
+ destVec[3] = -1.f;
+ }
+ }
+ };
+
+ /**
+ * This function uploads uniforms, calls each GrGL*Processor's setData, and retrieves the
+ * textures that need to be bound on each unit. It is the caller's responsibility to ensure
+ * the program is bound before calling, and to bind the outgoing textures to their respective
+ * units upon return. (Each index in the array corresponds to its matching GL texture unit.)
+ */
+ void setData(const GrPrimitiveProcessor&, const GrPipeline&);
+
+ /**
+ * This function retrieves the textures that need to be used by each GrGL*Processor, and
+ * ensures that any textures requiring mipmaps have their mipmaps correctly built.
+ */
+ void generateMipmaps(const GrPrimitiveProcessor&, const GrPipeline&);
+
+protected:
+ typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;
+ typedef GrGLProgramDataManager::UniformInfoArray UniformInfoArray;
+ typedef GrGLProgramDataManager::VaryingInfoArray VaryingInfoArray;
+
+ GrGLProgram(GrGLGpu*,
+ const GrProgramDesc&,
+ const BuiltinUniformHandles&,
+ GrGLuint programID,
+ const UniformInfoArray&,
+ const SkTArray<GrGLSampler>&,
+ const VaryingInfoArray&, // used for NVPR only currently
+ GrGLSLPrimitiveProcessor* geometryProcessor,
+ GrGLSLXferProcessor* xferProcessor,
+ const GrGLSLFragProcs& fragmentProcessors);
+
+ // A helper to loop over effects, set the transforms (via subclass) and bind textures
+ void setFragmentData(const GrPrimitiveProcessor&, const GrPipeline&, int* nextSamplerIdx);
+
+ // Helper for setData() that sets the view matrix and loads the render target height uniform
+ void setRenderTargetState(const GrPrimitiveProcessor&, const GrPipeline&);
+
+ // Helper for setData() that binds textures and texel buffers to the appropriate texture units
+ void bindTextures(const GrProcessor&, bool allowSRGBInputs, int* nextSamplerIdx);
+
+ // Helper for generateMipmaps() that ensures mipmaps are up to date
+ void generateMipmaps(const GrProcessor&, bool allowSRGBInputs);
+
+ // these reflect the current values of uniforms (GL uniform values travel with program)
+ RenderTargetState fRenderTargetState;
+ BuiltinUniformHandles fBuiltinUniformHandles;
+ GrGLuint fProgramID;
+
+ // the installed effects
+ SkAutoTDelete<GrGLSLPrimitiveProcessor> fGeometryProcessor;
+ SkAutoTDelete<GrGLSLXferProcessor> fXferProcessor;
+ GrGLSLFragProcs fFragmentProcessors;
+
+ GrProgramDesc fDesc;
+ GrGLGpu* fGpu;
+ GrGLProgramDataManager fProgramDataManager;
+
+ friend class GrGLProgramBuilder;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLProgramDataManager.cpp b/gfx/skia/skia/src/gpu/gl/GrGLProgramDataManager.cpp
new file mode 100644
index 000000000..9fe7d3b4f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLProgramDataManager.cpp
@@ -0,0 +1,325 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMatrix.h"
+#include "gl/GrGLProgramDataManager.h"
+#include "gl/GrGLGpu.h"
+#include "glsl/GrGLSLUniformHandler.h"
+
+#define ASSERT_ARRAY_UPLOAD_IN_BOUNDS(UNI, COUNT) \
+ SkASSERT((COUNT) <= (UNI).fArrayCount || \
+ (1 == (COUNT) && GrGLSLShaderVar::kNonArray == (UNI).fArrayCount))
+
+GrGLProgramDataManager::GrGLProgramDataManager(GrGLGpu* gpu, GrGLuint programID,
+ const UniformInfoArray& uniforms,
+ const VaryingInfoArray& pathProcVaryings)
+ : fGpu(gpu)
+ , fProgramID(programID) {
+ int count = uniforms.count();
+ fUniforms.push_back_n(count);
+ for (int i = 0; i < count; i++) {
+ Uniform& uniform = fUniforms[i];
+ const UniformInfo& builderUniform = uniforms[i];
+ SkASSERT(GrGLSLShaderVar::kNonArray == builderUniform.fVariable.getArrayCount() ||
+ builderUniform.fVariable.getArrayCount() > 0);
+ SkDEBUGCODE(
+ uniform.fArrayCount = builderUniform.fVariable.getArrayCount();
+ uniform.fType = builderUniform.fVariable.getType();
+ );
+ // TODO: Move the Xoom uniform array in both FS and VS bug workaround here.
+
+ if (kVertex_GrShaderFlag & builderUniform.fVisibility) {
+ uniform.fVSLocation = builderUniform.fLocation;
+ } else {
+ uniform.fVSLocation = kUnusedUniform;
+ }
+ if (kFragment_GrShaderFlag & builderUniform.fVisibility) {
+ uniform.fFSLocation = builderUniform.fLocation;
+ } else {
+ uniform.fFSLocation = kUnusedUniform;
+ }
+ }
+
+ // NVPR programs have separable varyings
+ count = pathProcVaryings.count();
+ fPathProcVaryings.push_back_n(count);
+ for (int i = 0; i < count; i++) {
+ SkASSERT(fGpu->glCaps().shaderCaps()->pathRenderingSupport());
+ PathProcVarying& pathProcVarying = fPathProcVaryings[i];
+ const VaryingInfo& builderPathProcVarying = pathProcVaryings[i];
+ SkASSERT(GrGLSLShaderVar::kNonArray == builderPathProcVarying.fVariable.getArrayCount() ||
+ builderPathProcVarying.fVariable.getArrayCount() > 0);
+ SkDEBUGCODE(
+ pathProcVarying.fArrayCount = builderPathProcVarying.fVariable.getArrayCount();
+ pathProcVarying.fType = builderPathProcVarying.fVariable.getType();
+ );
+ pathProcVarying.fLocation = builderPathProcVarying.fLocation;
+ }
+}
+
+void GrGLProgramDataManager::setSamplers(const SkTArray<GrGLSampler>& samplers) const {
+ for (int i = 0; i < samplers.count(); ++i) {
+ GrGLint vsLocation;
+ GrGLint fsLocation;
+ const GrGLSampler& sampler = samplers[i];
+ if (kVertex_GrShaderFlag & sampler.visibility()) {
+ vsLocation = sampler.location();
+ } else {
+ vsLocation = kUnusedUniform;
+ }
+ if (kFragment_GrShaderFlag & sampler.visibility()) {
+ fsLocation = sampler.location();
+ } else {
+ fsLocation = kUnusedUniform;
+ }
+ // FIXME: We still insert a single sampler uniform for every stage. If the shader does not
+ // reference the sampler then the compiler may have optimized it out. Uncomment this assert
+ // once stages insert their own samplers.
+ // this->printUnused(uni);
+ if (kUnusedUniform != fsLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform1i(fsLocation, i));
+ }
+ if (kUnusedUniform != vsLocation && vsLocation != fsLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform1i(vsLocation, i));
+ }
+ }
+}
+
+void GrGLProgramDataManager::set1i(UniformHandle u, int32_t i) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt_GrSLType);
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);
+ SkDEBUGCODE(this->printUnused(uni));
+ if (kUnusedUniform != uni.fFSLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform1i(uni.fFSLocation, i));
+ }
+ if (kUnusedUniform != uni.fVSLocation && uni.fVSLocation != uni.fFSLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform1i(uni.fVSLocation, i));
+ }
+}
+
+void GrGLProgramDataManager::set1iv(UniformHandle u,
+ int arrayCount,
+ const int v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt_GrSLType);
+ SkASSERT(arrayCount > 0);
+ ASSERT_ARRAY_UPLOAD_IN_BOUNDS(uni, arrayCount);
+ if (kUnusedUniform != uni.fFSLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform1iv(uni.fFSLocation, arrayCount, v));
+ }
+ if (kUnusedUniform != uni.fVSLocation && uni.fVSLocation != uni.fFSLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform1iv(uni.fVSLocation, arrayCount, v));
+ }
+}
+
+void GrGLProgramDataManager::set1f(UniformHandle u, float v0) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat_GrSLType);
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);
+ SkDEBUGCODE(this->printUnused(uni);)
+ if (kUnusedUniform != uni.fFSLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform1f(uni.fFSLocation, v0));
+ }
+ if (kUnusedUniform != uni.fVSLocation && uni.fVSLocation != uni.fFSLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform1f(uni.fVSLocation, v0));
+ }
+}
+
+void GrGLProgramDataManager::set1fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat_GrSLType);
+ SkASSERT(arrayCount > 0);
+ ASSERT_ARRAY_UPLOAD_IN_BOUNDS(uni, arrayCount);
+ // This assert fires in some instances of the two-pt gradient for its VSParams.
+ // Once the uniform manager is responsible for inserting the duplicate uniform
+ // arrays in VS and FS driver bug workaround, this can be enabled.
+ // this->printUni(uni);
+ if (kUnusedUniform != uni.fFSLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform1fv(uni.fFSLocation, arrayCount, v));
+ }
+ if (kUnusedUniform != uni.fVSLocation && uni.fVSLocation != uni.fFSLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform1fv(uni.fVSLocation, arrayCount, v));
+ }
+}
+
+void GrGLProgramDataManager::set2f(UniformHandle u, float v0, float v1) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kVec2f_GrSLType);
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);
+ SkDEBUGCODE(this->printUnused(uni);)
+ if (kUnusedUniform != uni.fFSLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform2f(uni.fFSLocation, v0, v1));
+ }
+ if (kUnusedUniform != uni.fVSLocation && uni.fVSLocation != uni.fFSLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform2f(uni.fVSLocation, v0, v1));
+ }
+}
+
+void GrGLProgramDataManager::set2fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kVec2f_GrSLType);
+ SkASSERT(arrayCount > 0);
+ ASSERT_ARRAY_UPLOAD_IN_BOUNDS(uni, arrayCount);
+ SkDEBUGCODE(this->printUnused(uni);)
+ if (kUnusedUniform != uni.fFSLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform2fv(uni.fFSLocation, arrayCount, v));
+ }
+ if (kUnusedUniform != uni.fVSLocation && uni.fVSLocation != uni.fFSLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform2fv(uni.fVSLocation, arrayCount, v));
+ }
+}
+
+void GrGLProgramDataManager::set3f(UniformHandle u, float v0, float v1, float v2) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kVec3f_GrSLType);
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);
+ SkDEBUGCODE(this->printUnused(uni);)
+ if (kUnusedUniform != uni.fFSLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform3f(uni.fFSLocation, v0, v1, v2));
+ }
+ if (kUnusedUniform != uni.fVSLocation && uni.fVSLocation != uni.fFSLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform3f(uni.fVSLocation, v0, v1, v2));
+ }
+}
+
+void GrGLProgramDataManager::set3fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kVec3f_GrSLType);
+ SkASSERT(arrayCount > 0);
+ ASSERT_ARRAY_UPLOAD_IN_BOUNDS(uni, arrayCount);
+ SkDEBUGCODE(this->printUnused(uni);)
+ if (kUnusedUniform != uni.fFSLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform3fv(uni.fFSLocation, arrayCount, v));
+ }
+ if (kUnusedUniform != uni.fVSLocation && uni.fVSLocation != uni.fFSLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform3fv(uni.fVSLocation, arrayCount, v));
+ }
+}
+
+void GrGLProgramDataManager::set4f(UniformHandle u,
+ float v0,
+ float v1,
+ float v2,
+ float v3) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kVec4f_GrSLType);
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);
+ SkDEBUGCODE(this->printUnused(uni);)
+ if (kUnusedUniform != uni.fFSLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform4f(uni.fFSLocation, v0, v1, v2, v3));
+ }
+ if (kUnusedUniform != uni.fVSLocation && uni.fVSLocation != uni.fFSLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform4f(uni.fVSLocation, v0, v1, v2, v3));
+ }
+}
+
+void GrGLProgramDataManager::set4fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kVec4f_GrSLType);
+ SkASSERT(arrayCount > 0);
+ ASSERT_ARRAY_UPLOAD_IN_BOUNDS(uni, arrayCount);
+ SkDEBUGCODE(this->printUnused(uni);)
+ if (kUnusedUniform != uni.fFSLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform4fv(uni.fFSLocation, arrayCount, v));
+ }
+ if (kUnusedUniform != uni.fVSLocation && uni.fVSLocation != uni.fFSLocation) {
+ GR_GL_CALL(fGpu->glInterface(), Uniform4fv(uni.fVSLocation, arrayCount, v));
+ }
+}
+
+void GrGLProgramDataManager::setMatrix2f(UniformHandle u, const float matrix[]) const {
+ this->setMatrices<2>(u, 1, matrix);
+}
+
+void GrGLProgramDataManager::setMatrix3f(UniformHandle u, const float matrix[]) const {
+ this->setMatrices<3>(u, 1, matrix);
+}
+
+void GrGLProgramDataManager::setMatrix4f(UniformHandle u, const float matrix[]) const {
+ this->setMatrices<4>(u, 1, matrix);
+}
+
+void GrGLProgramDataManager::setMatrix2fv(UniformHandle u, int arrayCount, const float m[]) const {
+ this->setMatrices<2>(u, arrayCount, m);
+}
+
+void GrGLProgramDataManager::setMatrix3fv(UniformHandle u, int arrayCount, const float m[]) const {
+ this->setMatrices<3>(u, arrayCount, m);
+}
+
+void GrGLProgramDataManager::setMatrix4fv(UniformHandle u, int arrayCount, const float m[]) const {
+ this->setMatrices<4>(u, arrayCount, m);
+}
+
+template<int N> struct set_uniform_matrix;
+
+template<int N> inline void GrGLProgramDataManager::setMatrices(UniformHandle u,
+ int arrayCount,
+ const float matrices[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kMat22f_GrSLType + (N - 2));
+ SkASSERT(arrayCount > 0);
+ ASSERT_ARRAY_UPLOAD_IN_BOUNDS(uni, arrayCount);
+ SkDEBUGCODE(this->printUnused(uni);)
+ if (kUnusedUniform != uni.fFSLocation) {
+ set_uniform_matrix<N>::set(fGpu->glInterface(), uni.fFSLocation, arrayCount, matrices);
+ }
+ if (kUnusedUniform != uni.fVSLocation && uni.fVSLocation != uni.fFSLocation) {
+ set_uniform_matrix<N>::set(fGpu->glInterface(), uni.fVSLocation, arrayCount, matrices);
+ }
+}
+
+template<> struct set_uniform_matrix<2> {
+ inline static void set(const GrGLInterface* gli, const GrGLint loc, int cnt, const float m[]) {
+ GR_GL_CALL(gli, UniformMatrix2fv(loc, cnt, false, m));
+ }
+};
+
+template<> struct set_uniform_matrix<3> {
+ inline static void set(const GrGLInterface* gli, const GrGLint loc, int cnt, const float m[]) {
+ GR_GL_CALL(gli, UniformMatrix3fv(loc, cnt, false, m));
+ }
+};
+
+template<> struct set_uniform_matrix<4> {
+ inline static void set(const GrGLInterface* gli, const GrGLint loc, int cnt, const float m[]) {
+ GR_GL_CALL(gli, UniformMatrix4fv(loc, cnt, false, m));
+ }
+};
+
+void GrGLProgramDataManager::setPathFragmentInputTransform(VaryingHandle u,
+ int components,
+ const SkMatrix& matrix) const {
+ SkASSERT(fGpu->glCaps().shaderCaps()->pathRenderingSupport());
+ const PathProcVarying& fragmentInput = fPathProcVaryings[u.toIndex()];
+
+ SkASSERT((components == 2 && fragmentInput.fType == kVec2f_GrSLType) ||
+ (components == 3 && fragmentInput.fType == kVec3f_GrSLType));
+
+ fGpu->glPathRendering()->setProgramPathFragmentInputTransform(fProgramID,
+ fragmentInput.fLocation,
+ GR_GL_OBJECT_LINEAR,
+ components,
+ matrix);
+}
+
+#ifdef SK_DEBUG
+void GrGLProgramDataManager::printUnused(const Uniform& uni) const {
+ if (kUnusedUniform == uni.fFSLocation && kUnusedUniform == uni.fVSLocation) {
+ GrCapsDebugf(fGpu->caps(), "Unused uniform in shader\n");
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLProgramDataManager.h b/gfx/skia/skia/src/gpu/gl/GrGLProgramDataManager.h
new file mode 100644
index 000000000..30c4a63eb
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLProgramDataManager.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLProgramDataManager_DEFINED
+#define GrGLProgramDataManager_DEFINED
+
+#include "glsl/GrGLSLProgramDataManager.h"
+
+#include "GrAllocator.h"
+#include "gl/GrGLSampler.h"
+#include "gl/GrGLTypes.h"
+#include "glsl/GrGLSLShaderVar.h"
+
+#include "SkTArray.h"
+
+class GrGLGpu;
+class SkMatrix;
+class GrGLProgram;
+
+/** Manages the resources used by a shader program.
+ * The resources are objects the program uses to communicate with the
+ * application code.
+ */
+class GrGLProgramDataManager : public GrGLSLProgramDataManager {
+public:
+ struct UniformInfo {
+ GrGLSLShaderVar fVariable;
+ uint32_t fVisibility;
+ GrGLint fLocation;
+ };
+
+ struct VaryingInfo {
+ GrGLSLShaderVar fVariable;
+ GrGLint fLocation;
+ };
+
+ // This uses an allocator rather than array so that the GrGLSLShaderVars don't move in memory
+ // after they are inserted. Users of GrGLShaderBuilder get refs to the vars and ptrs to their
+ // name strings. Otherwise, we'd have to hand out copies.
+ typedef GrTAllocator<UniformInfo> UniformInfoArray;
+ typedef GrTAllocator<VaryingInfo> VaryingInfoArray;
+
+ GrGLProgramDataManager(GrGLGpu*, GrGLuint programID, const UniformInfoArray&,
+ const VaryingInfoArray&);
+
+
+ void setSamplers(const SkTArray<GrGLSampler>& samplers) const;
+
+ /** Functions for uploading uniform values. The varities ending in v can be used to upload to an
+ * array of uniforms. arrayCount must be <= the array count of the uniform.
+ */
+ void set1i(UniformHandle, int32_t) const override;
+ void set1iv(UniformHandle, int arrayCount, const int v[]) const override;
+ void set1f(UniformHandle, float v0) const override;
+ void set1fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set2f(UniformHandle, float, float) const override;
+ void set2fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set3f(UniformHandle, float, float, float) const override;
+ void set3fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set4f(UniformHandle, float, float, float, float) const override;
+ void set4fv(UniformHandle, int arrayCount, const float v[]) const override;
+ // matrices are column-major, the first three upload a single matrix, the latter three upload
+ // arrayCount matrices into a uniform array.
+ void setMatrix2f(UniformHandle, const float matrix[]) const override;
+ void setMatrix3f(UniformHandle, const float matrix[]) const override;
+ void setMatrix4f(UniformHandle, const float matrix[]) const override;
+ void setMatrix2fv(UniformHandle, int arrayCount, const float matrices[]) const override;
+ void setMatrix3fv(UniformHandle, int arrayCount, const float matrices[]) const override;
+ void setMatrix4fv(UniformHandle, int arrayCount, const float matrices[]) const override;
+
+ // for nvpr only
+ void setPathFragmentInputTransform(VaryingHandle u, int components,
+ const SkMatrix& matrix) const override;
+
+private:
+ enum {
+ kUnusedUniform = -1,
+ };
+
+ struct Uniform {
+ GrGLint fVSLocation;
+ GrGLint fFSLocation;
+ SkDEBUGCODE(
+ GrSLType fType;
+ int fArrayCount;
+ );
+ };
+
+ enum {
+ kUnusedPathProcVarying = -1,
+ };
+ struct PathProcVarying {
+ GrGLint fLocation;
+ SkDEBUGCODE(
+ GrSLType fType;
+ int fArrayCount;
+ );
+ };
+
+ SkDEBUGCODE(void printUnused(const Uniform&) const;)
+
+ template<int N> inline void setMatrices(UniformHandle, int arrayCount,
+ const float matrices[]) const;
+
+ SkTArray<Uniform, true> fUniforms;
+ SkTArray<PathProcVarying, true> fPathProcVaryings;
+ GrGLGpu* fGpu;
+ GrGLuint fProgramID;
+
+ typedef GrGLSLProgramDataManager INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLRenderTarget.cpp b/gfx/skia/skia/src/gpu/gl/GrGLRenderTarget.cpp
new file mode 100644
index 000000000..f6ad3ba61
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLRenderTarget.cpp
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLRenderTarget.h"
+
+#include "GrGLGpu.h"
+#include "GrGLUtil.h"
+#include "GrGpuResourcePriv.h"
+#include "GrRenderTargetPriv.h"
+#include "SkTraceMemoryDump.h"
+
+#define GPUGL static_cast<GrGLGpu*>(this->getGpu())
+#define GL_CALL(X) GR_GL_CALL(GPUGL->glInterface(), X)
+
+// Because this class is virtually derived from GrSurface we must explicitly call its constructor.
+// Constructor for wrapped render targets.
+GrGLRenderTarget::GrGLRenderTarget(GrGLGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const IDDesc& idDesc,
+ GrGLStencilAttachment* stencil)
+ : GrSurface(gpu, desc)
+ , INHERITED(gpu, desc, ComputeFlags(gpu->glCaps(), idDesc), stencil) {
+ this->init(desc, idDesc);
+ this->registerWithCacheWrapped();
+}
+
+GrGLRenderTarget::GrGLRenderTarget(GrGLGpu* gpu, const GrSurfaceDesc& desc,
+ const IDDesc& idDesc)
+ : GrSurface(gpu, desc)
+ , INHERITED(gpu, desc, ComputeFlags(gpu->glCaps(), idDesc)) {
+ this->init(desc, idDesc);
+}
+
+inline GrRenderTarget::Flags GrGLRenderTarget::ComputeFlags(const GrGLCaps& glCaps,
+ const IDDesc& idDesc) {
+ Flags flags = Flags::kNone;
+ if (idDesc.fIsMixedSampled) {
+ SkASSERT(glCaps.usesMixedSamples() && idDesc.fRTFBOID); // FBO 0 can't be mixed sampled.
+ flags |= Flags::kMixedSampled;
+ }
+ if (glCaps.maxWindowRectangles() > 0 && idDesc.fRTFBOID) {
+ flags |= Flags::kWindowRectsSupport;
+ }
+ return flags;
+}
+
+void GrGLRenderTarget::init(const GrSurfaceDesc& desc, const IDDesc& idDesc) {
+ fRTFBOID = idDesc.fRTFBOID;
+ fTexFBOID = idDesc.fTexFBOID;
+ fMSColorRenderbufferID = idDesc.fMSColorRenderbufferID;
+ fRTFBOOwnership = idDesc.fRTFBOOwnership;
+
+ fViewport.fLeft = 0;
+ fViewport.fBottom = 0;
+ fViewport.fWidth = desc.fWidth;
+ fViewport.fHeight = desc.fHeight;
+
+ fGpuMemorySize = this->totalSamples() * this->totalBytesPerSample();
+
+ SkASSERT(fGpuMemorySize <= WorstCaseSize(desc));
+}
+
+GrGLRenderTarget* GrGLRenderTarget::CreateWrapped(GrGLGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const IDDesc& idDesc,
+ int stencilBits) {
+ GrGLStencilAttachment* sb = nullptr;
+ if (stencilBits) {
+ GrGLStencilAttachment::IDDesc sbDesc;
+ GrGLStencilAttachment::Format format;
+ format.fInternalFormat = GrGLStencilAttachment::kUnknownInternalFormat;
+ format.fPacked = false;
+ format.fStencilBits = stencilBits;
+ format.fTotalBits = stencilBits;
+ // Owndership of sb is passed to the GrRenderTarget so doesn't need to be deleted
+ sb = new GrGLStencilAttachment(gpu, sbDesc, desc.fWidth, desc.fHeight,
+ desc.fSampleCnt, format);
+ }
+ return (new GrGLRenderTarget(gpu, desc, idDesc, sb));
+}
+
+size_t GrGLRenderTarget::onGpuMemorySize() const {
+ return fGpuMemorySize;
+}
+
+bool GrGLRenderTarget::completeStencilAttachment() {
+ GrGLGpu* gpu = this->getGLGpu();
+ const GrGLInterface* interface = gpu->glInterface();
+ GrStencilAttachment* stencil = this->renderTargetPriv().getStencilAttachment();
+ if (nullptr == stencil) {
+ GR_GL_CALL(interface, FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_STENCIL_ATTACHMENT,
+ GR_GL_RENDERBUFFER, 0));
+ GR_GL_CALL(interface, FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_DEPTH_ATTACHMENT,
+ GR_GL_RENDERBUFFER, 0));
+#ifdef SK_DEBUG
+ if (kChromium_GrGLDriver != gpu->glContext().driver()) {
+ // This check can cause problems in Chromium if the context has been asynchronously
+ // abandoned (see skbug.com/5200)
+ GrGLenum status;
+ GR_GL_CALL_RET(interface, status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
+ SkASSERT(GR_GL_FRAMEBUFFER_COMPLETE == status);
+ }
+#endif
+ return true;
+ } else {
+ const GrGLStencilAttachment* glStencil = static_cast<const GrGLStencilAttachment*>(stencil);
+ GrGLuint rb = glStencil->renderbufferID();
+
+ gpu->invalidateBoundRenderTarget();
+ gpu->stats()->incRenderTargetBinds();
+ GR_GL_CALL(interface, BindFramebuffer(GR_GL_FRAMEBUFFER, this->renderFBOID()));
+ GR_GL_CALL(interface, FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_STENCIL_ATTACHMENT,
+ GR_GL_RENDERBUFFER, rb));
+ if (glStencil->format().fPacked) {
+ GR_GL_CALL(interface, FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_DEPTH_ATTACHMENT,
+ GR_GL_RENDERBUFFER, rb));
+ } else {
+ GR_GL_CALL(interface, FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_DEPTH_ATTACHMENT,
+ GR_GL_RENDERBUFFER, 0));
+ }
+
+#ifdef SK_DEBUG
+ if (kChromium_GrGLDriver != gpu->glContext().driver()) {
+ // This check can cause problems in Chromium if the context has been asynchronously
+ // abandoned (see skbug.com/5200)
+ GrGLenum status;
+ GR_GL_CALL_RET(interface, status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
+ SkASSERT(GR_GL_FRAMEBUFFER_COMPLETE == status);
+ }
+#endif
+ return true;
+ }
+}
+
+void GrGLRenderTarget::onRelease() {
+ if (GrBackendObjectOwnership::kBorrowed != fRTFBOOwnership) {
+ if (fTexFBOID) {
+ GL_CALL(DeleteFramebuffers(1, &fTexFBOID));
+ }
+ if (fRTFBOID && fRTFBOID != fTexFBOID) {
+ GL_CALL(DeleteFramebuffers(1, &fRTFBOID));
+ }
+ if (fMSColorRenderbufferID) {
+ GL_CALL(DeleteRenderbuffers(1, &fMSColorRenderbufferID));
+ }
+ }
+ fRTFBOID = 0;
+ fTexFBOID = 0;
+ fMSColorRenderbufferID = 0;
+ INHERITED::onRelease();
+}
+
+void GrGLRenderTarget::onAbandon() {
+ fRTFBOID = 0;
+ fTexFBOID = 0;
+ fMSColorRenderbufferID = 0;
+ INHERITED::onAbandon();
+}
+
+GrGLGpu* GrGLRenderTarget::getGLGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrGLGpu*>(this->getGpu());
+}
+
+bool GrGLRenderTarget::canAttemptStencilAttachment() const {
+ // Only modify the FBO's attachments if we have created the FBO. Public APIs do not currently
+ // allow for borrowed FBO ownership, so we can safely assume that if an object is owned,
+ // Skia created it.
+ return this->fRTFBOOwnership == GrBackendObjectOwnership::kOwned;
+}
+
+void GrGLRenderTarget::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
+ // Don't log the backing texture's contribution to the memory size. This will be handled by the
+ // texture object.
+
+ // Log any renderbuffer's contribution to memory. We only do this if we own the renderbuffer
+ // (have a fMSColorRenderbufferID).
+ if (fMSColorRenderbufferID) {
+ size_t size = this->msaaSamples() * this->totalBytesPerSample();
+
+ // Due to this resource having both a texture and a renderbuffer component, dump as
+ // skia/gpu_resources/resource_#/renderbuffer
+ SkString dumpName("skia/gpu_resources/resource_");
+ dumpName.appendS32(this->uniqueID());
+ dumpName.append("/renderbuffer");
+
+ traceMemoryDump->dumpNumericValue(dumpName.c_str(), "size", "bytes", size);
+
+ if (this->isPurgeable()) {
+ traceMemoryDump->dumpNumericValue(dumpName.c_str(), "purgeable_size", "bytes", size);
+ }
+
+ SkString renderbuffer_id;
+ renderbuffer_id.appendU32(fMSColorRenderbufferID);
+ traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_renderbuffer",
+ renderbuffer_id.c_str());
+ }
+}
+
+size_t GrGLRenderTarget::totalBytesPerSample() const {
+ SkASSERT(kUnknown_GrPixelConfig != fDesc.fConfig);
+ SkASSERT(!GrPixelConfigIsCompressed(fDesc.fConfig));
+ size_t colorBytes = GrBytesPerPixel(fDesc.fConfig);
+ SkASSERT(colorBytes > 0);
+
+ return fDesc.fWidth * fDesc.fHeight * colorBytes;
+}
+
+int GrGLRenderTarget::msaaSamples() const {
+ if (fTexFBOID == kUnresolvableFBOID || fTexFBOID != fRTFBOID) {
+ // If the render target's FBO is external (fTexFBOID == kUnresolvableFBOID), or if we own
+ // the render target's FBO (fTexFBOID == fRTFBOID) then we use the provided sample count.
+ return SkTMax(1, fDesc.fSampleCnt);
+ }
+
+ // When fTexFBOID == fRTFBOID, we either are not using MSAA, or MSAA is auto resolving, so use
+ // 0 for the sample count.
+ return 0;
+}
+
+int GrGLRenderTarget::totalSamples() const {
+ int total_samples = this->msaaSamples();
+
+ if (fTexFBOID != kUnresolvableFBOID) {
+ // If we own the resolve buffer then that is one more sample per pixel.
+ total_samples += 1;
+ }
+
+ return total_samples;
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLRenderTarget.h b/gfx/skia/skia/src/gpu/gl/GrGLRenderTarget.h
new file mode 100644
index 000000000..85e377f69
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLRenderTarget.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGLRenderTarget_DEFINED
+#define GrGLRenderTarget_DEFINED
+
+#include "GrGLIRect.h"
+#include "GrRenderTarget.h"
+#include "SkScalar.h"
+
+class GrGLCaps;
+class GrGLGpu;
+class GrGLStencilAttachment;
+
+class GrGLRenderTarget : public GrRenderTarget {
+public:
+ // set fTexFBOID to this value to indicate that it is multisampled but
+ // Gr doesn't know how to resolve it.
+ enum { kUnresolvableFBOID = 0 };
+
+ struct IDDesc {
+ GrGLuint fRTFBOID;
+ GrBackendObjectOwnership fRTFBOOwnership;
+ GrGLuint fTexFBOID;
+ GrGLuint fMSColorRenderbufferID;
+ bool fIsMixedSampled;
+ };
+
+ static GrGLRenderTarget* CreateWrapped(GrGLGpu*,
+ const GrSurfaceDesc&,
+ const IDDesc&,
+ int stencilBits);
+
+ void setViewport(const GrGLIRect& rect) { fViewport = rect; }
+ const GrGLIRect& getViewport() const { return fViewport; }
+
+ // The following two functions return the same ID when a
+ // texture/render target is multisampled, and different IDs when
+ // it is.
+ // FBO ID used to render into
+ GrGLuint renderFBOID() const { return fRTFBOID; }
+ // FBO ID that has texture ID attached.
+ GrGLuint textureFBOID() const { return fTexFBOID; }
+
+ // override of GrRenderTarget
+ ResolveType getResolveType() const override {
+ if (!this->isUnifiedMultisampled() ||
+ fRTFBOID == fTexFBOID) {
+ // catches FBO 0 and non MSAA case
+ return kAutoResolves_ResolveType;
+ } else if (kUnresolvableFBOID == fTexFBOID) {
+ return kCantResolve_ResolveType;
+ } else {
+ return kCanResolve_ResolveType;
+ }
+ }
+
+ GrBackendObject getRenderTargetHandle() const override { return fRTFBOID; }
+
+ bool canAttemptStencilAttachment() const override;
+
+ // GrGLRenderTarget overrides dumpMemoryStatistics so it can log its texture and renderbuffer
+ // components seperately.
+ void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const override;
+
+protected:
+ // Constructor for subclasses.
+ GrGLRenderTarget(GrGLGpu*, const GrSurfaceDesc&, const IDDesc&);
+
+ void init(const GrSurfaceDesc&, const IDDesc&);
+
+ void onAbandon() override;
+ void onRelease() override;
+
+ // In protected because subclass GrGLTextureRenderTarget calls this version.
+ size_t onGpuMemorySize() const override;
+
+private:
+ // Constructor for instances wrapping backend objects.
+ GrGLRenderTarget(GrGLGpu*, const GrSurfaceDesc&, const IDDesc&, GrGLStencilAttachment*);
+
+ static Flags ComputeFlags(const GrGLCaps&, const IDDesc&);
+
+ GrGLGpu* getGLGpu() const;
+ bool completeStencilAttachment() override;
+
+ // The total size of the resource (including all pixels) for a single sample.
+ size_t totalBytesPerSample() const;
+ int msaaSamples() const;
+ // The number total number of samples, including both MSAA and resolve texture samples.
+ int totalSamples() const;
+
+ GrGLuint fRTFBOID;
+ GrGLuint fTexFBOID;
+ GrGLuint fMSColorRenderbufferID;
+
+ GrBackendObjectOwnership fRTFBOOwnership;
+
+ // when we switch to this render target we want to set the viewport to
+ // only render to content area (as opposed to the whole allocation) and
+ // we want the rendering to be at top left (GL has origin in bottom left)
+ GrGLIRect fViewport;
+
+ // onGpuMemorySize() needs to know the VRAM footprint of the FBO(s). However, abandon and
+ // release zero out the IDs and the cache needs to know the size even after those actions.
+ size_t fGpuMemorySize;
+
+ typedef GrRenderTarget INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLSampler.h b/gfx/skia/skia/src/gpu/gl/GrGLSampler.h
new file mode 100644
index 000000000..1f67ac9a5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLSampler.h
@@ -0,0 +1,45 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrGLSampler_DEFINED
+#define GrGLSampler_DEFINED
+
+#include "glsl/GrGLSLSampler.h"
+
+#include "gl/GrGLTypes.h"
+#include "glsl/GrGLSLShaderVar.h"
+
+class GrGLSampler : public GrGLSLSampler {
+public:
+ GrGLSampler(uint32_t visibility,
+ GrPixelConfig config,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name) : INHERITED(visibility, config) {
+ SkASSERT(GrSLTypeIsCombinedSamplerType(type));
+ fShaderVar.setType(type);
+ fShaderVar.setTypeModifier(GrGLSLShaderVar::kUniform_TypeModifier);
+ fShaderVar.setPrecision(precision);
+ fShaderVar.accessName()->set(name);
+ }
+
+ GrGLint location() const { return fLocation; }
+ GrSLType type() const override { return fShaderVar.getType(); }
+
+ const char* onGetSamplerNameForTexture2D() const override { return fShaderVar.c_str(); }
+ const char* getSamplerNameForTexelFetch() const override { return fShaderVar.c_str(); }
+
+private:
+ GrGLSLShaderVar fShaderVar;
+ GrGLint fLocation;
+
+ friend class GrGLUniformHandler;
+
+ typedef GrGLSLSampler INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLStencilAttachment.cpp b/gfx/skia/skia/src/gpu/gl/GrGLStencilAttachment.cpp
new file mode 100644
index 000000000..aa813ed50
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLStencilAttachment.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrGLStencilAttachment.h"
+#include "GrGLGpu.h"
+#include "SkTraceMemoryDump.h"
+
+size_t GrGLStencilAttachment::onGpuMemorySize() const {
+ uint64_t size = this->width();
+ size *= this->height();
+ size *= fFormat.fTotalBits;
+ size *= SkTMax(1,this->numSamples());
+ return static_cast<size_t>(size / 8);
+}
+
+void GrGLStencilAttachment::onRelease() {
+ if (0 != fRenderbufferID) {
+ GrGLGpu* gpuGL = (GrGLGpu*) this->getGpu();
+ const GrGLInterface* gl = gpuGL->glInterface();
+ GR_GL_CALL(gl, DeleteRenderbuffers(1, &fRenderbufferID));
+ fRenderbufferID = 0;
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrGLStencilAttachment::onAbandon() {
+ fRenderbufferID = 0;
+
+ INHERITED::onAbandon();
+}
+
+void GrGLStencilAttachment::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& dumpName) const {
+ SkString renderbuffer_id;
+ renderbuffer_id.appendU32(this->renderbufferID());
+ traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_renderbuffer",
+ renderbuffer_id.c_str());
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLStencilAttachment.h b/gfx/skia/skia/src/gpu/gl/GrGLStencilAttachment.h
new file mode 100644
index 000000000..f578bf80b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLStencilAttachment.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGLStencilAttachment_DEFINED
+#define GrGLStencilAttachment_DEFINED
+
+#include "gl/GrGLInterface.h"
+#include "GrStencilAttachment.h"
+
+class GrGLStencilAttachment : public GrStencilAttachment {
+public:
+ static const GrGLenum kUnknownInternalFormat = ~0U;
+ static const GrGLuint kUnknownBitCount = ~0U;
+ struct Format {
+ GrGLenum fInternalFormat;
+ GrGLuint fStencilBits;
+ GrGLuint fTotalBits;
+ bool fPacked;
+ };
+
+ struct IDDesc {
+ IDDesc() : fRenderbufferID(0) {}
+ GrGLuint fRenderbufferID;
+ };
+
+ GrGLStencilAttachment(GrGpu* gpu,
+ const IDDesc& idDesc,
+ int width, int height,
+ int sampleCnt,
+ const Format& format)
+ : GrStencilAttachment(gpu, width, height, format.fStencilBits, sampleCnt)
+ , fFormat(format)
+ , fRenderbufferID(idDesc.fRenderbufferID) {
+ this->registerWithCache(SkBudgeted::kYes);
+ }
+
+ GrGLuint renderbufferID() const {
+ return fRenderbufferID;
+ }
+
+ const Format& format() const { return fFormat; }
+
+protected:
+ // overrides of GrResource
+ void onRelease() override;
+ void onAbandon() override;
+ void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& dumpName) const override;
+
+private:
+ size_t onGpuMemorySize() const override;
+
+ Format fFormat;
+ // may be zero for external SBs associated with external RTs
+ // (we don't require the client to give us the id, just tell
+ // us how many bits of stencil there are).
+ GrGLuint fRenderbufferID;
+
+ typedef GrStencilAttachment INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLTestInterface.cpp b/gfx/skia/skia/src/gpu/gl/GrGLTestInterface.cpp
new file mode 100644
index 000000000..d871ef628
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLTestInterface.cpp
@@ -0,0 +1,325 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <functional>
+#include "GrGLTestInterface.h"
+
+namespace {
+template<typename R, typename... A>
+std::function<R(A...)> bind_to_member(GrGLTestInterface* interface, R (GrGLTestInterface::*member)(A...)) {
+ return [interface, member] (A... a) -> R { return (interface->*member)(a...); };
+}
+} // anonymous namespace
+
+GrGLTestInterface::GrGLTestInterface() {
+ fFunctions.fActiveTexture = bind_to_member(this, &GrGLTestInterface::activeTexture);
+ fFunctions.fAttachShader = bind_to_member(this, &GrGLTestInterface::attachShader);
+ fFunctions.fBeginQuery = bind_to_member(this, &GrGLTestInterface::beginQuery);
+ fFunctions.fBindAttribLocation = bind_to_member(this, &GrGLTestInterface::bindAttribLocation);
+ fFunctions.fBindBuffer = bind_to_member(this, &GrGLTestInterface::bindBuffer);
+ fFunctions.fBindFramebuffer = bind_to_member(this, &GrGLTestInterface::bindFramebuffer);
+ fFunctions.fBindRenderbuffer = bind_to_member(this, &GrGLTestInterface::bindRenderbuffer);
+ fFunctions.fBindTexture = bind_to_member(this, &GrGLTestInterface::bindTexture);
+ fFunctions.fBindFragDataLocation = bind_to_member(this, &GrGLTestInterface::bindFragDataLocation);
+ fFunctions.fBindFragDataLocationIndexed = bind_to_member(this, &GrGLTestInterface::bindFragDataLocationIndexed);
+ fFunctions.fBindVertexArray = bind_to_member(this, &GrGLTestInterface::bindVertexArray);
+ fFunctions.fBlendBarrier = bind_to_member(this, &GrGLTestInterface::blendBarrier);
+ fFunctions.fBlendColor = bind_to_member(this, &GrGLTestInterface::blendColor);
+ fFunctions.fBlendEquation = bind_to_member(this, &GrGLTestInterface::blendEquation);
+ fFunctions.fBlendFunc = bind_to_member(this, &GrGLTestInterface::blendFunc);
+ fFunctions.fBlitFramebuffer = bind_to_member(this, &GrGLTestInterface::blitFramebuffer);
+ fFunctions.fBufferData = bind_to_member(this, &GrGLTestInterface::bufferData);
+ fFunctions.fBufferSubData = bind_to_member(this, &GrGLTestInterface::bufferSubData);
+ fFunctions.fCheckFramebufferStatus = bind_to_member(this, &GrGLTestInterface::checkFramebufferStatus);
+ fFunctions.fClear = bind_to_member(this, &GrGLTestInterface::clear);
+ fFunctions.fClearColor = bind_to_member(this, &GrGLTestInterface::clearColor);
+ fFunctions.fClearStencil = bind_to_member(this, &GrGLTestInterface::clearStencil);
+ fFunctions.fColorMask = bind_to_member(this, &GrGLTestInterface::colorMask);
+ fFunctions.fCompileShader = bind_to_member(this, &GrGLTestInterface::compileShader);
+ fFunctions.fCompressedTexImage2D = bind_to_member(this, &GrGLTestInterface::compressedTexImage2D);
+ fFunctions.fCompressedTexSubImage2D = bind_to_member(this, &GrGLTestInterface::compressedTexSubImage2D);
+ fFunctions.fCopyTexSubImage2D = bind_to_member(this, &GrGLTestInterface::copyTexSubImage2D);
+ fFunctions.fCreateProgram = bind_to_member(this, &GrGLTestInterface::createProgram);
+ fFunctions.fCreateShader = bind_to_member(this, &GrGLTestInterface::createShader);
+ fFunctions.fCullFace = bind_to_member(this, &GrGLTestInterface::cullFace);
+ fFunctions.fDeleteBuffers = bind_to_member(this, &GrGLTestInterface::deleteBuffers);
+ fFunctions.fDeleteFramebuffers = bind_to_member(this, &GrGLTestInterface::deleteFramebuffers);
+ fFunctions.fDeleteProgram = bind_to_member(this, &GrGLTestInterface::deleteProgram);
+ fFunctions.fDeleteQueries = bind_to_member(this, &GrGLTestInterface::deleteQueries);
+ fFunctions.fDeleteRenderbuffers = bind_to_member(this, &GrGLTestInterface::deleteRenderbuffers);
+ fFunctions.fDeleteShader = bind_to_member(this, &GrGLTestInterface::deleteShader);
+ fFunctions.fDeleteTextures = bind_to_member(this, &GrGLTestInterface::deleteTextures);
+ fFunctions.fDeleteVertexArrays = bind_to_member(this, &GrGLTestInterface::deleteVertexArrays);
+ fFunctions.fDepthMask = bind_to_member(this, &GrGLTestInterface::depthMask);
+ fFunctions.fDisable = bind_to_member(this, &GrGLTestInterface::disable);
+ fFunctions.fDisableVertexAttribArray = bind_to_member(this, &GrGLTestInterface::disableVertexAttribArray);
+ fFunctions.fDrawArrays = bind_to_member(this, &GrGLTestInterface::drawArrays);
+ fFunctions.fDrawArraysInstanced = bind_to_member(this, &GrGLTestInterface::drawArraysInstanced);
+ fFunctions.fDrawArraysIndirect = bind_to_member(this, &GrGLTestInterface::drawArraysIndirect);
+ fFunctions.fDrawBuffer = bind_to_member(this, &GrGLTestInterface::drawBuffer);
+ fFunctions.fDrawBuffers = bind_to_member(this, &GrGLTestInterface::drawBuffers);
+ fFunctions.fDrawElements = bind_to_member(this, &GrGLTestInterface::drawElements);
+ fFunctions.fDrawElementsInstanced = bind_to_member(this, &GrGLTestInterface::drawElementsInstanced);
+ fFunctions.fDrawElementsIndirect = bind_to_member(this, &GrGLTestInterface::drawElementsIndirect);
+ fFunctions.fDrawRangeElements = bind_to_member(this, &GrGLTestInterface::drawRangeElements);
+ fFunctions.fEnable = bind_to_member(this, &GrGLTestInterface::enable);
+ fFunctions.fEnableVertexAttribArray = bind_to_member(this, &GrGLTestInterface::enableVertexAttribArray);
+ fFunctions.fEndQuery = bind_to_member(this, &GrGLTestInterface::endQuery);
+ fFunctions.fFinish = bind_to_member(this, &GrGLTestInterface::finish);
+ fFunctions.fFlush = bind_to_member(this, &GrGLTestInterface::flush);
+ fFunctions.fFlushMappedBufferRange = bind_to_member(this, &GrGLTestInterface::flushMappedBufferRange);
+ fFunctions.fFramebufferRenderbuffer = bind_to_member(this, &GrGLTestInterface::framebufferRenderbuffer);
+ fFunctions.fFramebufferTexture2D = bind_to_member(this, &GrGLTestInterface::framebufferTexture2D);
+ fFunctions.fFramebufferTexture2DMultisample = bind_to_member(this, &GrGLTestInterface::framebufferTexture2DMultisample);
+ fFunctions.fFrontFace = bind_to_member(this, &GrGLTestInterface::frontFace);
+ fFunctions.fGenBuffers = bind_to_member(this, &GrGLTestInterface::genBuffers);
+ fFunctions.fGenFramebuffers = bind_to_member(this, &GrGLTestInterface::genFramebuffers);
+ fFunctions.fGenerateMipmap = bind_to_member(this, &GrGLTestInterface::generateMipmap);
+ fFunctions.fGenQueries = bind_to_member(this, &GrGLTestInterface::genQueries);
+ fFunctions.fGenRenderbuffers = bind_to_member(this, &GrGLTestInterface::genRenderbuffers);
+ fFunctions.fGenTextures = bind_to_member(this, &GrGLTestInterface::genTextures);
+ fFunctions.fGenVertexArrays = bind_to_member(this, &GrGLTestInterface::genVertexArrays);
+ fFunctions.fGetBufferParameteriv = bind_to_member(this, &GrGLTestInterface::getBufferParameteriv);
+ fFunctions.fGetError = bind_to_member(this, &GrGLTestInterface::getError);
+ fFunctions.fGetFramebufferAttachmentParameteriv = bind_to_member(this, &GrGLTestInterface::getFramebufferAttachmentParameteriv);
+ fFunctions.fGetIntegerv = bind_to_member(this, &GrGLTestInterface::getIntegerv);
+ fFunctions.fGetMultisamplefv = bind_to_member(this, &GrGLTestInterface::getMultisamplefv);
+ fFunctions.fGetProgramInfoLog = bind_to_member(this, &GrGLTestInterface::getProgramInfoLog);
+ fFunctions.fGetProgramiv = bind_to_member(this, &GrGLTestInterface::getProgramiv);
+ fFunctions.fGetQueryiv = bind_to_member(this, &GrGLTestInterface::getQueryiv);
+ fFunctions.fGetQueryObjecti64v = bind_to_member(this, &GrGLTestInterface::getQueryObjecti64v);
+ fFunctions.fGetQueryObjectiv = bind_to_member(this, &GrGLTestInterface::getQueryObjectiv);
+ fFunctions.fGetQueryObjectui64v = bind_to_member(this, &GrGLTestInterface::getQueryObjectui64v);
+ fFunctions.fGetQueryObjectuiv = bind_to_member(this, &GrGLTestInterface::getQueryObjectuiv);
+ fFunctions.fGetRenderbufferParameteriv = bind_to_member(this, &GrGLTestInterface::getRenderbufferParameteriv);
+ fFunctions.fGetShaderInfoLog = bind_to_member(this, &GrGLTestInterface::getShaderInfoLog);
+ fFunctions.fGetShaderiv = bind_to_member(this, &GrGLTestInterface::getShaderiv);
+ fFunctions.fGetShaderPrecisionFormat = bind_to_member(this, &GrGLTestInterface::getShaderPrecisionFormat);
+ fFunctions.fGetString = bind_to_member(this, &GrGLTestInterface::getString);
+ fFunctions.fGetStringi = bind_to_member(this, &GrGLTestInterface::getStringi);
+ fFunctions.fGetTexLevelParameteriv = bind_to_member(this, &GrGLTestInterface::getTexLevelParameteriv);
+ fFunctions.fGetUniformLocation = bind_to_member(this, &GrGLTestInterface::getUniformLocation);
+ fFunctions.fInsertEventMarker = bind_to_member(this, &GrGLTestInterface::insertEventMarker);
+ fFunctions.fInvalidateBufferData = bind_to_member(this, &GrGLTestInterface::invalidateBufferData);
+ fFunctions.fInvalidateBufferSubData = bind_to_member(this, &GrGLTestInterface::invalidateBufferSubData);
+ fFunctions.fInvalidateFramebuffer = bind_to_member(this, &GrGLTestInterface::invalidateFramebuffer);
+ fFunctions.fInvalidateSubFramebuffer = bind_to_member(this, &GrGLTestInterface::invalidateSubFramebuffer);
+ fFunctions.fInvalidateTexImage = bind_to_member(this, &GrGLTestInterface::invalidateTexImage);
+ fFunctions.fInvalidateTexSubImage = bind_to_member(this, &GrGLTestInterface::invalidateTexSubImage);
+ fFunctions.fIsTexture = bind_to_member(this, &GrGLTestInterface::isTexture);
+ fFunctions.fLineWidth = bind_to_member(this, &GrGLTestInterface::lineWidth);
+ fFunctions.fLinkProgram = bind_to_member(this, &GrGLTestInterface::linkProgram);
+ fFunctions.fMapBuffer = bind_to_member(this, &GrGLTestInterface::mapBuffer);
+ fFunctions.fMapBufferRange = bind_to_member(this, &GrGLTestInterface::mapBufferRange);
+ fFunctions.fMapBufferSubData = bind_to_member(this, &GrGLTestInterface::mapBufferSubData);
+ fFunctions.fMapTexSubImage2D = bind_to_member(this, &GrGLTestInterface::mapTexSubImage2D);
+ fFunctions.fMinSampleShading = bind_to_member(this, &GrGLTestInterface::minSampleShading);
+ fFunctions.fPixelStorei = bind_to_member(this, &GrGLTestInterface::pixelStorei);
+ fFunctions.fPopGroupMarker = bind_to_member(this, &GrGLTestInterface::popGroupMarker);
+ fFunctions.fPushGroupMarker = bind_to_member(this, &GrGLTestInterface::pushGroupMarker);
+ fFunctions.fQueryCounter = bind_to_member(this, &GrGLTestInterface::queryCounter);
+ fFunctions.fRasterSamples = bind_to_member(this, &GrGLTestInterface::rasterSamples);
+ fFunctions.fReadBuffer = bind_to_member(this, &GrGLTestInterface::readBuffer);
+ fFunctions.fReadPixels = bind_to_member(this, &GrGLTestInterface::readPixels);
+ fFunctions.fRenderbufferStorage = bind_to_member(this, &GrGLTestInterface::renderbufferStorage);
+ fFunctions.fRenderbufferStorageMultisample = bind_to_member(this, &GrGLTestInterface::renderbufferStorageMultisample);
+ fFunctions.fResolveMultisampleFramebuffer = bind_to_member(this, &GrGLTestInterface::resolveMultisampleFramebuffer);
+ fFunctions.fScissor = bind_to_member(this, &GrGLTestInterface::scissor);
+ fFunctions.fBindUniformLocation = bind_to_member(this, &GrGLTestInterface::bindUniformLocation);
+ fFunctions.fShaderSource = bind_to_member(this, &GrGLTestInterface::shaderSource);
+ fFunctions.fStencilFunc = bind_to_member(this, &GrGLTestInterface::stencilFunc);
+ fFunctions.fStencilFuncSeparate = bind_to_member(this, &GrGLTestInterface::stencilFuncSeparate);
+ fFunctions.fStencilMask = bind_to_member(this, &GrGLTestInterface::stencilMask);
+ fFunctions.fStencilMaskSeparate = bind_to_member(this, &GrGLTestInterface::stencilMaskSeparate);
+ fFunctions.fStencilOp = bind_to_member(this, &GrGLTestInterface::stencilOp);
+ fFunctions.fStencilOpSeparate = bind_to_member(this, &GrGLTestInterface::stencilOpSeparate);
+ fFunctions.fTexBuffer = bind_to_member(this, &GrGLTestInterface::texBuffer);
+ fFunctions.fTexImage2D = bind_to_member(this, &GrGLTestInterface::texImage2D);
+ fFunctions.fTexParameteri = bind_to_member(this, &GrGLTestInterface::texParameteri);
+ fFunctions.fTexParameteriv = bind_to_member(this, &GrGLTestInterface::texParameteriv);
+ fFunctions.fTexStorage2D = bind_to_member(this, &GrGLTestInterface::texStorage2D);
+ fFunctions.fDiscardFramebuffer = bind_to_member(this, &GrGLTestInterface::discardFramebuffer);
+ fFunctions.fTexSubImage2D = bind_to_member(this, &GrGLTestInterface::texSubImage2D);
+ fFunctions.fTextureBarrier = bind_to_member(this, &GrGLTestInterface::textureBarrier);
+ fFunctions.fUniform1f = bind_to_member(this, &GrGLTestInterface::uniform1f);
+ fFunctions.fUniform1i = bind_to_member(this, &GrGLTestInterface::uniform1i);
+ fFunctions.fUniform1fv = bind_to_member(this, &GrGLTestInterface::uniform1fv);
+ fFunctions.fUniform1iv = bind_to_member(this, &GrGLTestInterface::uniform1iv);
+ fFunctions.fUniform2f = bind_to_member(this, &GrGLTestInterface::uniform2f);
+ fFunctions.fUniform2i = bind_to_member(this, &GrGLTestInterface::uniform2i);
+ fFunctions.fUniform2fv = bind_to_member(this, &GrGLTestInterface::uniform2fv);
+ fFunctions.fUniform2iv = bind_to_member(this, &GrGLTestInterface::uniform2iv);
+ fFunctions.fUniform3f = bind_to_member(this, &GrGLTestInterface::uniform3f);
+ fFunctions.fUniform3i = bind_to_member(this, &GrGLTestInterface::uniform3i);
+ fFunctions.fUniform3fv = bind_to_member(this, &GrGLTestInterface::uniform3fv);
+ fFunctions.fUniform3iv = bind_to_member(this, &GrGLTestInterface::uniform3iv);
+ fFunctions.fUniform4f = bind_to_member(this, &GrGLTestInterface::uniform4f);
+ fFunctions.fUniform4i = bind_to_member(this, &GrGLTestInterface::uniform4i);
+ fFunctions.fUniform4fv = bind_to_member(this, &GrGLTestInterface::uniform4fv);
+ fFunctions.fUniform4iv = bind_to_member(this, &GrGLTestInterface::uniform4iv);
+ fFunctions.fUniformMatrix2fv = bind_to_member(this, &GrGLTestInterface::uniformMatrix2fv);
+ fFunctions.fUniformMatrix3fv = bind_to_member(this, &GrGLTestInterface::uniformMatrix3fv);
+ fFunctions.fUniformMatrix4fv = bind_to_member(this, &GrGLTestInterface::uniformMatrix4fv);
+ fFunctions.fUnmapBuffer = bind_to_member(this, &GrGLTestInterface::unmapBuffer);
+ fFunctions.fUnmapBufferSubData = bind_to_member(this, &GrGLTestInterface::unmapBufferSubData);
+ fFunctions.fUnmapTexSubImage2D = bind_to_member(this, &GrGLTestInterface::unmapTexSubImage2D);
+ fFunctions.fUseProgram = bind_to_member(this, &GrGLTestInterface::useProgram);
+ fFunctions.fVertexAttrib1f = bind_to_member(this, &GrGLTestInterface::vertexAttrib1f);
+ fFunctions.fVertexAttrib2fv = bind_to_member(this, &GrGLTestInterface::vertexAttrib2fv);
+ fFunctions.fVertexAttrib3fv = bind_to_member(this, &GrGLTestInterface::vertexAttrib3fv);
+ fFunctions.fVertexAttrib4fv = bind_to_member(this, &GrGLTestInterface::vertexAttrib4fv);
+ fFunctions.fVertexAttribDivisor = bind_to_member(this, &GrGLTestInterface::vertexAttribDivisor);
+ fFunctions.fVertexAttribIPointer = bind_to_member(this, &GrGLTestInterface::vertexAttribIPointer);
+ fFunctions.fVertexAttribPointer = bind_to_member(this, &GrGLTestInterface::vertexAttribPointer);
+ fFunctions.fViewport = bind_to_member(this, &GrGLTestInterface::viewport);
+ fFunctions.fMatrixLoadf = bind_to_member(this, &GrGLTestInterface::matrixLoadf);
+ fFunctions.fMatrixLoadIdentity = bind_to_member(this, &GrGLTestInterface::matrixLoadIdentity);
+ fFunctions.fPathCommands = bind_to_member(this, &GrGLTestInterface::pathCommands);
+ fFunctions.fPathParameteri = bind_to_member(this, &GrGLTestInterface::pathParameteri);
+ fFunctions.fPathParameterf = bind_to_member(this, &GrGLTestInterface::pathParameterf);
+ fFunctions.fGenPaths = bind_to_member(this, &GrGLTestInterface::genPaths);
+ fFunctions.fDeletePaths = bind_to_member(this, &GrGLTestInterface::deletePaths);
+ fFunctions.fIsPath = bind_to_member(this, &GrGLTestInterface::isPath);
+ fFunctions.fPathStencilFunc = bind_to_member(this, &GrGLTestInterface::pathStencilFunc);
+ fFunctions.fStencilFillPath = bind_to_member(this, &GrGLTestInterface::stencilFillPath);
+ fFunctions.fStencilStrokePath = bind_to_member(this, &GrGLTestInterface::stencilStrokePath);
+ fFunctions.fStencilFillPathInstanced = bind_to_member(this, &GrGLTestInterface::stencilFillPathInstanced);
+ fFunctions.fStencilStrokePathInstanced = bind_to_member(this, &GrGLTestInterface::stencilStrokePathInstanced);
+ fFunctions.fCoverFillPath = bind_to_member(this, &GrGLTestInterface::coverFillPath);
+ fFunctions.fCoverStrokePath = bind_to_member(this, &GrGLTestInterface::coverStrokePath);
+ fFunctions.fCoverFillPathInstanced = bind_to_member(this, &GrGLTestInterface::coverFillPathInstanced);
+ fFunctions.fCoverStrokePathInstanced = bind_to_member(this, &GrGLTestInterface::coverStrokePathInstanced);
+ fFunctions.fStencilThenCoverFillPath = bind_to_member(this, &GrGLTestInterface::stencilThenCoverFillPath);
+ fFunctions.fStencilThenCoverStrokePath = bind_to_member(this, &GrGLTestInterface::stencilThenCoverStrokePath);
+ fFunctions.fStencilThenCoverFillPathInstanced = bind_to_member(this, &GrGLTestInterface::stencilThenCoverFillPathInstanced);
+ fFunctions.fStencilThenCoverStrokePathInstanced = bind_to_member(this, &GrGLTestInterface::stencilThenCoverStrokePathInstanced);
+ fFunctions.fProgramPathFragmentInputGen = bind_to_member(this, &GrGLTestInterface::programPathFragmentInputGen);
+ fFunctions.fBindFragmentInputLocation = bind_to_member(this, &GrGLTestInterface::bindFragmentInputLocation);
+ fFunctions.fGetProgramResourceLocation = bind_to_member(this, &GrGLTestInterface::getProgramResourceLocation);
+ fFunctions.fCoverageModulation = bind_to_member(this, &GrGLTestInterface::coverageModulation);
+ fFunctions.fMultiDrawArraysIndirect = bind_to_member(this, &GrGLTestInterface::multiDrawArraysIndirect);
+ fFunctions.fMultiDrawElementsIndirect = bind_to_member(this, &GrGLTestInterface::multiDrawElementsIndirect);
+ fFunctions.fGetTextureHandle = bind_to_member(this, &GrGLTestInterface::getTextureHandle);
+ fFunctions.fGetTextureSamplerHandle = bind_to_member(this, &GrGLTestInterface::getTextureSamplerHandle);
+ fFunctions.fMakeTextureHandleResident = bind_to_member(this, &GrGLTestInterface::makeTextureHandleResident);
+ fFunctions.fMakeTextureHandleNonResident = bind_to_member(this, &GrGLTestInterface::makeTextureHandleNonResident);
+ fFunctions.fGetImageHandle = bind_to_member(this, &GrGLTestInterface::getImageHandle);
+ fFunctions.fMakeImageHandleResident = bind_to_member(this, &GrGLTestInterface::makeImageHandleResident);
+ fFunctions.fMakeImageHandleNonResident = bind_to_member(this, &GrGLTestInterface::makeImageHandleNonResident);
+ fFunctions.fIsTextureHandleResident = bind_to_member(this, &GrGLTestInterface::isTextureHandleResident);
+ fFunctions.fIsImageHandleResident = bind_to_member(this, &GrGLTestInterface::isImageHandleResident);
+ fFunctions.fUniformHandleui64 = bind_to_member(this, &GrGLTestInterface::uniformHandleui64);
+ fFunctions.fUniformHandleui64v = bind_to_member(this, &GrGLTestInterface::uniformHandleui64v);
+ fFunctions.fProgramUniformHandleui64 = bind_to_member(this, &GrGLTestInterface::programUniformHandleui64);
+ fFunctions.fProgramUniformHandleui64v = bind_to_member(this, &GrGLTestInterface::programUniformHandleui64v);
+ fFunctions.fTextureParameteri = bind_to_member(this, &GrGLTestInterface::textureParameteri);
+ fFunctions.fTextureParameteriv = bind_to_member(this, &GrGLTestInterface::textureParameteriv);
+ fFunctions.fTextureParameterf = bind_to_member(this, &GrGLTestInterface::textureParameterf);
+ fFunctions.fTextureParameterfv = bind_to_member(this, &GrGLTestInterface::textureParameterfv);
+ fFunctions.fTextureImage1D = bind_to_member(this, &GrGLTestInterface::textureImage1D);
+ fFunctions.fTextureImage2D = bind_to_member(this, &GrGLTestInterface::textureImage2D);
+ fFunctions.fTextureSubImage1D = bind_to_member(this, &GrGLTestInterface::textureSubImage1D);
+ fFunctions.fTextureSubImage2D = bind_to_member(this, &GrGLTestInterface::textureSubImage2D);
+ fFunctions.fCopyTextureImage1D = bind_to_member(this, &GrGLTestInterface::copyTextureImage1D);
+ fFunctions.fCopyTextureImage2D = bind_to_member(this, &GrGLTestInterface::copyTextureImage2D);
+ fFunctions.fCopyTextureSubImage1D = bind_to_member(this, &GrGLTestInterface::copyTextureSubImage1D);
+ fFunctions.fCopyTextureSubImage2D = bind_to_member(this, &GrGLTestInterface::copyTextureSubImage2D);
+ fFunctions.fGetTextureImage = bind_to_member(this, &GrGLTestInterface::getTextureImage);
+ fFunctions.fGetTextureParameterfv = bind_to_member(this, &GrGLTestInterface::getTextureParameterfv);
+ fFunctions.fGetTextureParameteriv = bind_to_member(this, &GrGLTestInterface::getTextureParameteriv);
+ fFunctions.fGetTextureLevelParameterfv = bind_to_member(this, &GrGLTestInterface::getTextureLevelParameterfv);
+ fFunctions.fGetTextureLevelParameteriv = bind_to_member(this, &GrGLTestInterface::getTextureLevelParameteriv);
+ fFunctions.fTextureImage3D = bind_to_member(this, &GrGLTestInterface::textureImage3D);
+ fFunctions.fTextureSubImage3D = bind_to_member(this, &GrGLTestInterface::textureSubImage3D);
+ fFunctions.fCopyTextureSubImage3D = bind_to_member(this, &GrGLTestInterface::copyTextureSubImage3D);
+ fFunctions.fCompressedTextureImage3D = bind_to_member(this, &GrGLTestInterface::compressedTextureImage3D);
+ fFunctions.fCompressedTextureImage2D = bind_to_member(this, &GrGLTestInterface::compressedTextureImage2D);
+ fFunctions.fCompressedTextureImage1D = bind_to_member(this, &GrGLTestInterface::compressedTextureImage1D);
+ fFunctions.fCompressedTextureSubImage3D = bind_to_member(this, &GrGLTestInterface::compressedTextureSubImage3D);
+ fFunctions.fCompressedTextureSubImage2D = bind_to_member(this, &GrGLTestInterface::compressedTextureSubImage2D);
+ fFunctions.fCompressedTextureSubImage1D = bind_to_member(this, &GrGLTestInterface::compressedTextureSubImage1D);
+ fFunctions.fGetCompressedTextureImage = bind_to_member(this, &GrGLTestInterface::getCompressedTextureImage);
+ fFunctions.fNamedBufferData = bind_to_member(this, &GrGLTestInterface::namedBufferData);
+ fFunctions.fNamedBufferSubData = bind_to_member(this, &GrGLTestInterface::namedBufferSubData);
+ fFunctions.fMapNamedBuffer = bind_to_member(this, &GrGLTestInterface::mapNamedBuffer);
+ fFunctions.fUnmapNamedBuffer = bind_to_member(this, &GrGLTestInterface::unmapNamedBuffer);
+ fFunctions.fGetNamedBufferParameteriv = bind_to_member(this, &GrGLTestInterface::getNamedBufferParameteriv);
+ fFunctions.fGetNamedBufferPointerv = bind_to_member(this, &GrGLTestInterface::getNamedBufferPointerv);
+ fFunctions.fGetNamedBufferSubData = bind_to_member(this, &GrGLTestInterface::getNamedBufferSubData);
+ fFunctions.fProgramUniform1f = bind_to_member(this, &GrGLTestInterface::programUniform1f);
+ fFunctions.fProgramUniform2f = bind_to_member(this, &GrGLTestInterface::programUniform2f);
+ fFunctions.fProgramUniform3f = bind_to_member(this, &GrGLTestInterface::programUniform3f);
+ fFunctions.fProgramUniform4f = bind_to_member(this, &GrGLTestInterface::programUniform4f);
+ fFunctions.fProgramUniform1i = bind_to_member(this, &GrGLTestInterface::programUniform1i);
+ fFunctions.fProgramUniform2i = bind_to_member(this, &GrGLTestInterface::programUniform2i);
+ fFunctions.fProgramUniform3i = bind_to_member(this, &GrGLTestInterface::programUniform3i);
+ fFunctions.fProgramUniform4i = bind_to_member(this, &GrGLTestInterface::programUniform4i);
+ fFunctions.fProgramUniform1fv = bind_to_member(this, &GrGLTestInterface::programUniform1fv);
+ fFunctions.fProgramUniform2fv = bind_to_member(this, &GrGLTestInterface::programUniform2fv);
+ fFunctions.fProgramUniform3fv = bind_to_member(this, &GrGLTestInterface::programUniform3fv);
+ fFunctions.fProgramUniform4fv = bind_to_member(this, &GrGLTestInterface::programUniform4fv);
+ fFunctions.fProgramUniform1iv = bind_to_member(this, &GrGLTestInterface::programUniform1iv);
+ fFunctions.fProgramUniform2iv = bind_to_member(this, &GrGLTestInterface::programUniform2iv);
+ fFunctions.fProgramUniform3iv = bind_to_member(this, &GrGLTestInterface::programUniform3iv);
+ fFunctions.fProgramUniform4iv = bind_to_member(this, &GrGLTestInterface::programUniform4iv);
+ fFunctions.fProgramUniformMatrix2fv = bind_to_member(this, &GrGLTestInterface::programUniformMatrix2fv);
+ fFunctions.fProgramUniformMatrix3fv = bind_to_member(this, &GrGLTestInterface::programUniformMatrix3fv);
+ fFunctions.fProgramUniformMatrix4fv = bind_to_member(this, &GrGLTestInterface::programUniformMatrix4fv);
+ fFunctions.fProgramUniformMatrix2x3fv = bind_to_member(this, &GrGLTestInterface::programUniformMatrix2x3fv);
+ fFunctions.fProgramUniformMatrix3x2fv = bind_to_member(this, &GrGLTestInterface::programUniformMatrix3x2fv);
+ fFunctions.fProgramUniformMatrix2x4fv = bind_to_member(this, &GrGLTestInterface::programUniformMatrix2x4fv);
+ fFunctions.fProgramUniformMatrix4x2fv = bind_to_member(this, &GrGLTestInterface::programUniformMatrix4x2fv);
+ fFunctions.fProgramUniformMatrix3x4fv = bind_to_member(this, &GrGLTestInterface::programUniformMatrix3x4fv);
+ fFunctions.fProgramUniformMatrix4x3fv = bind_to_member(this, &GrGLTestInterface::programUniformMatrix4x3fv);
+ fFunctions.fNamedRenderbufferStorage = bind_to_member(this, &GrGLTestInterface::namedRenderbufferStorage);
+ fFunctions.fGetNamedRenderbufferParameteriv = bind_to_member(this, &GrGLTestInterface::getNamedRenderbufferParameteriv);
+ fFunctions.fNamedRenderbufferStorageMultisample = bind_to_member(this, &GrGLTestInterface::namedRenderbufferStorageMultisample);
+ fFunctions.fCheckNamedFramebufferStatus = bind_to_member(this, &GrGLTestInterface::checkNamedFramebufferStatus);
+ fFunctions.fNamedFramebufferTexture1D = bind_to_member(this, &GrGLTestInterface::namedFramebufferTexture1D);
+ fFunctions.fNamedFramebufferTexture2D = bind_to_member(this, &GrGLTestInterface::namedFramebufferTexture2D);
+ fFunctions.fNamedFramebufferTexture3D = bind_to_member(this, &GrGLTestInterface::namedFramebufferTexture3D);
+ fFunctions.fNamedFramebufferRenderbuffer = bind_to_member(this, &GrGLTestInterface::namedFramebufferRenderbuffer);
+ fFunctions.fGetNamedFramebufferAttachmentParameteriv = bind_to_member(this, &GrGLTestInterface::getNamedFramebufferAttachmentParameteriv);
+ fFunctions.fGenerateTextureMipmap = bind_to_member(this, &GrGLTestInterface::generateTextureMipmap);
+ fFunctions.fFramebufferDrawBuffer = bind_to_member(this, &GrGLTestInterface::framebufferDrawBuffer);
+ fFunctions.fFramebufferDrawBuffers = bind_to_member(this, &GrGLTestInterface::framebufferDrawBuffers);
+ fFunctions.fFramebufferReadBuffer = bind_to_member(this, &GrGLTestInterface::framebufferReadBuffer);
+ fFunctions.fGetFramebufferParameteriv = bind_to_member(this, &GrGLTestInterface::getFramebufferParameteriv);
+ fFunctions.fNamedCopyBufferSubData = bind_to_member(this, &GrGLTestInterface::namedCopyBufferSubData);
+ fFunctions.fVertexArrayVertexOffset = bind_to_member(this, &GrGLTestInterface::vertexArrayVertexOffset);
+ fFunctions.fVertexArrayColorOffset = bind_to_member(this, &GrGLTestInterface::vertexArrayColorOffset);
+ fFunctions.fVertexArrayEdgeFlagOffset = bind_to_member(this, &GrGLTestInterface::vertexArrayEdgeFlagOffset);
+ fFunctions.fVertexArrayIndexOffset = bind_to_member(this, &GrGLTestInterface::vertexArrayIndexOffset);
+ fFunctions.fVertexArrayNormalOffset = bind_to_member(this, &GrGLTestInterface::vertexArrayNormalOffset);
+ fFunctions.fVertexArrayTexCoordOffset = bind_to_member(this, &GrGLTestInterface::vertexArrayTexCoordOffset);
+ fFunctions.fVertexArrayMultiTexCoordOffset = bind_to_member(this, &GrGLTestInterface::vertexArrayMultiTexCoordOffset);
+ fFunctions.fVertexArrayFogCoordOffset = bind_to_member(this, &GrGLTestInterface::vertexArrayFogCoordOffset);
+ fFunctions.fVertexArraySecondaryColorOffset = bind_to_member(this, &GrGLTestInterface::vertexArraySecondaryColorOffset);
+ fFunctions.fVertexArrayVertexAttribOffset = bind_to_member(this, &GrGLTestInterface::vertexArrayVertexAttribOffset);
+ fFunctions.fVertexArrayVertexAttribIOffset = bind_to_member(this, &GrGLTestInterface::vertexArrayVertexAttribIOffset);
+ fFunctions.fEnableVertexArray = bind_to_member(this, &GrGLTestInterface::enableVertexArray);
+ fFunctions.fDisableVertexArray = bind_to_member(this, &GrGLTestInterface::disableVertexArray);
+ fFunctions.fEnableVertexArrayAttrib = bind_to_member(this, &GrGLTestInterface::enableVertexArrayAttrib);
+ fFunctions.fDisableVertexArrayAttrib = bind_to_member(this, &GrGLTestInterface::disableVertexArrayAttrib);
+ fFunctions.fGetVertexArrayIntegerv = bind_to_member(this, &GrGLTestInterface::getVertexArrayIntegerv);
+ fFunctions.fGetVertexArrayPointerv = bind_to_member(this, &GrGLTestInterface::getVertexArrayPointerv);
+ fFunctions.fGetVertexArrayIntegeri_v = bind_to_member(this, &GrGLTestInterface::getVertexArrayIntegeri_v);
+ fFunctions.fGetVertexArrayPointeri_v = bind_to_member(this, &GrGLTestInterface::getVertexArrayPointeri_v);
+ fFunctions.fMapNamedBufferRange = bind_to_member(this, &GrGLTestInterface::mapNamedBufferRange);
+ fFunctions.fFlushMappedNamedBufferRange = bind_to_member(this, &GrGLTestInterface::flushMappedNamedBufferRange);
+ fFunctions.fTextureBuffer = bind_to_member(this, &GrGLTestInterface::textureBuffer);
+ fFunctions.fFenceSync = bind_to_member(this, &GrGLTestInterface::fenceSync);
+ fFunctions.fClientWaitSync = bind_to_member(this, &GrGLTestInterface::clientWaitSync);
+ fFunctions.fDeleteSync = bind_to_member(this, &GrGLTestInterface::deleteSync);
+ fFunctions.fDebugMessageControl = bind_to_member(this, &GrGLTestInterface::debugMessageControl);
+ fFunctions.fDebugMessageInsert = bind_to_member(this, &GrGLTestInterface::debugMessageInsert);
+ fFunctions.fDebugMessageCallback = bind_to_member(this, &GrGLTestInterface::debugMessageCallback);
+ fFunctions.fGetDebugMessageLog = bind_to_member(this, &GrGLTestInterface::getDebugMessageLog);
+ fFunctions.fPushDebugGroup = bind_to_member(this, &GrGLTestInterface::pushDebugGroup);
+ fFunctions.fPopDebugGroup = bind_to_member(this, &GrGLTestInterface::popDebugGroup);
+ fFunctions.fObjectLabel = bind_to_member(this, &GrGLTestInterface::objectLabel);
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLTestInterface.h b/gfx/skia/skia/src/gpu/gl/GrGLTestInterface.h
new file mode 100644
index 000000000..ef00df397
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLTestInterface.h
@@ -0,0 +1,341 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLTestInterface_DEFINED
+#define GrGLTestInterface_DEFINED
+
+#include "gl/GrGLInterface.h"
+#include "GrGLDefines.h"
+
+/**
+ * Base class for interfaces used for Skia testing. We would like to move this to tools/gpu/gl
+ * when Chromium is no longer using GrGLCreateNullInterface in its unit testing.
+ */
+class GrGLTestInterface : public GrGLInterface {
+public:
+ virtual GrGLvoid activeTexture(GrGLenum texture) {}
+ virtual GrGLvoid attachShader(GrGLuint program, GrGLuint shader) {}
+ virtual GrGLvoid beginQuery(GrGLenum target, GrGLuint id) {}
+ virtual GrGLvoid bindAttribLocation(GrGLuint program, GrGLuint index, const char* name) {}
+ virtual GrGLvoid bindBuffer(GrGLenum target, GrGLuint buffer) {}
+ virtual GrGLvoid bindFramebuffer(GrGLenum target, GrGLuint framebuffer) {}
+ virtual GrGLvoid bindRenderbuffer(GrGLenum target, GrGLuint renderbuffer) {}
+ virtual GrGLvoid bindTexture(GrGLenum target, GrGLuint texture) {}
+ virtual GrGLvoid bindFragDataLocation(GrGLuint program, GrGLuint colorNumber, const GrGLchar* name) {}
+ virtual GrGLvoid bindFragDataLocationIndexed(GrGLuint program, GrGLuint colorNumber, GrGLuint index, const GrGLchar * name) {}
+ virtual GrGLvoid bindVertexArray(GrGLuint array) {}
+ virtual GrGLvoid blendBarrier() {}
+ virtual GrGLvoid blendColor(GrGLclampf red, GrGLclampf green, GrGLclampf blue, GrGLclampf alpha) {}
+ virtual GrGLvoid blendEquation(GrGLenum mode) {}
+ virtual GrGLvoid blendFunc(GrGLenum sfactor, GrGLenum dfactor) {}
+ virtual GrGLvoid blitFramebuffer(GrGLint srcX0, GrGLint srcY0, GrGLint srcX1, GrGLint srcY1, GrGLint dstX0, GrGLint dstY0, GrGLint dstX1, GrGLint dstY1, GrGLbitfield mask, GrGLenum filter) {}
+ virtual GrGLvoid bufferData(GrGLenum target, GrGLsizeiptr size, const GrGLvoid* data, GrGLenum usage) {}
+ virtual GrGLvoid bufferSubData(GrGLenum target, GrGLintptr offset, GrGLsizeiptr size, const GrGLvoid* data) {}
+ virtual GrGLenum checkFramebufferStatus(GrGLenum target) { return GR_GL_FRAMEBUFFER_COMPLETE; }
+ virtual GrGLvoid clear(GrGLbitfield mask) {}
+ virtual GrGLvoid clearColor(GrGLclampf red, GrGLclampf green, GrGLclampf blue, GrGLclampf alpha) {}
+ virtual GrGLvoid clearStencil(GrGLint s) {}
+ virtual GrGLvoid colorMask(GrGLboolean red, GrGLboolean green, GrGLboolean blue, GrGLboolean alpha) {}
+ virtual GrGLvoid compileShader(GrGLuint shader) {}
+ virtual GrGLvoid compressedTexImage2D(GrGLenum target, GrGLint level, GrGLenum internalformat, GrGLsizei width, GrGLsizei height, GrGLint border, GrGLsizei imageSize, const GrGLvoid* data) {}
+ virtual GrGLvoid compressedTexSubImage2D(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLsizei imageSize, const GrGLvoid* data) {}
+ virtual GrGLvoid copyTexSubImage2D(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height) {}
+ virtual GrGLuint createProgram() { return 0; }
+ virtual GrGLuint createShader(GrGLenum type) { return 0; }
+ virtual GrGLvoid cullFace(GrGLenum mode) {}
+ virtual GrGLvoid deleteBuffers(GrGLsizei n, const GrGLuint* buffers) {}
+ virtual GrGLvoid deleteFramebuffers(GrGLsizei n, const GrGLuint *framebuffers) {}
+ virtual GrGLvoid deleteProgram(GrGLuint program) {}
+ virtual GrGLvoid deleteQueries(GrGLsizei n, const GrGLuint *ids) {}
+ virtual GrGLvoid deleteRenderbuffers(GrGLsizei n, const GrGLuint *renderbuffers) {}
+ virtual GrGLvoid deleteShader(GrGLuint shader) {}
+ virtual GrGLvoid deleteTextures(GrGLsizei n, const GrGLuint* textures) {}
+ virtual GrGLvoid deleteVertexArrays(GrGLsizei n, const GrGLuint *arrays) {}
+ virtual GrGLvoid depthMask(GrGLboolean flag) {}
+ virtual GrGLvoid disable(GrGLenum cap) {}
+ virtual GrGLvoid disableVertexAttribArray(GrGLuint index) {}
+ virtual GrGLvoid drawArrays(GrGLenum mode, GrGLint first, GrGLsizei count) {}
+ virtual GrGLvoid drawArraysInstanced(GrGLenum mode, GrGLint first, GrGLsizei count, GrGLsizei primcount) {}
+ virtual GrGLvoid drawArraysIndirect(GrGLenum mode, const GrGLvoid* indirect) {}
+ virtual GrGLvoid drawBuffer(GrGLenum mode) {}
+ virtual GrGLvoid drawBuffers(GrGLsizei n, const GrGLenum* bufs) {}
+ virtual GrGLvoid drawElements(GrGLenum mode, GrGLsizei count, GrGLenum type, const GrGLvoid* indices) {}
+ virtual GrGLvoid drawElementsInstanced(GrGLenum mode, GrGLsizei count, GrGLenum type, const GrGLvoid *indices, GrGLsizei primcount) {}
+ virtual GrGLvoid drawElementsIndirect(GrGLenum mode, GrGLenum type, const GrGLvoid* indirect) {}
+ virtual GrGLvoid drawRangeElements(GrGLenum mode, GrGLuint start, GrGLuint end, GrGLsizei count, GrGLenum type, const GrGLvoid* indices) {}
+ virtual GrGLvoid enable(GrGLenum cap) {}
+ virtual GrGLvoid enableVertexAttribArray(GrGLuint index) {}
+ virtual GrGLvoid endQuery(GrGLenum target) {}
+ virtual GrGLvoid finish() {}
+ virtual GrGLvoid flush() {}
+ virtual GrGLvoid flushMappedBufferRange(GrGLenum target, GrGLintptr offset, GrGLsizeiptr length) {}
+ virtual GrGLvoid framebufferRenderbuffer(GrGLenum target, GrGLenum attachment, GrGLenum renderbuffertarget, GrGLuint renderbuffer) {}
+ virtual GrGLvoid framebufferTexture2D(GrGLenum target, GrGLenum attachment, GrGLenum textarget, GrGLuint texture, GrGLint level) {}
+ virtual GrGLvoid framebufferTexture2DMultisample(GrGLenum target, GrGLenum attachment, GrGLenum textarget, GrGLuint texture, GrGLint level, GrGLsizei samples) {}
+ virtual GrGLvoid frontFace(GrGLenum mode) {}
+ virtual GrGLvoid genBuffers(GrGLsizei n, GrGLuint* buffers) {}
+ virtual GrGLvoid genFramebuffers(GrGLsizei n, GrGLuint *framebuffers) {}
+ virtual GrGLvoid generateMipmap(GrGLenum target) {}
+ virtual GrGLvoid genQueries(GrGLsizei n, GrGLuint *ids) {}
+ virtual GrGLvoid genRenderbuffers(GrGLsizei n, GrGLuint *renderbuffers) {}
+ virtual GrGLvoid genTextures(GrGLsizei n, GrGLuint* textures) {}
+ virtual GrGLvoid genVertexArrays(GrGLsizei n, GrGLuint *arrays) {}
+ virtual GrGLvoid getBufferParameteriv(GrGLenum target, GrGLenum pname, GrGLint* params) {}
+ virtual GrGLenum getError() { return GR_GL_NO_ERROR; }
+ virtual GrGLvoid getFramebufferAttachmentParameteriv(GrGLenum target, GrGLenum attachment, GrGLenum pname, GrGLint* params) {}
+ virtual GrGLvoid getIntegerv(GrGLenum pname, GrGLint* params) {}
+ virtual GrGLvoid getMultisamplefv(GrGLenum pname, GrGLuint index, GrGLfloat* val) {}
+ virtual GrGLvoid getProgramInfoLog(GrGLuint program, GrGLsizei bufsize, GrGLsizei* length, char* infolog) {}
+ virtual GrGLvoid getProgramiv(GrGLuint program, GrGLenum pname, GrGLint* params) {}
+ virtual GrGLvoid getQueryiv(GrGLenum GLtarget, GrGLenum pname, GrGLint *params) {}
+ virtual GrGLvoid getQueryObjecti64v(GrGLuint id, GrGLenum pname, GrGLint64 *params) {}
+ virtual GrGLvoid getQueryObjectiv(GrGLuint id, GrGLenum pname, GrGLint *params) {}
+ virtual GrGLvoid getQueryObjectui64v(GrGLuint id, GrGLenum pname, GrGLuint64 *params) {}
+ virtual GrGLvoid getQueryObjectuiv(GrGLuint id, GrGLenum pname, GrGLuint *params) {}
+ virtual GrGLvoid getRenderbufferParameteriv(GrGLenum target, GrGLenum pname, GrGLint* params) {}
+ virtual GrGLvoid getShaderInfoLog(GrGLuint shader, GrGLsizei bufsize, GrGLsizei* length, char* infolog) {}
+ virtual GrGLvoid getShaderiv(GrGLuint shader, GrGLenum pname, GrGLint* params) {}
+ virtual GrGLvoid getShaderPrecisionFormat(GrGLenum shadertype, GrGLenum precisiontype, GrGLint *range, GrGLint *precision) {}
+ virtual const GrGLubyte* getString(GrGLenum name) { return nullptr; }
+ virtual const GrGLubyte* getStringi(GrGLenum name, GrGLuint index) { return nullptr; }
+ virtual GrGLvoid getTexLevelParameteriv(GrGLenum target, GrGLint level, GrGLenum pname, GrGLint* params) {}
+ virtual GrGLint getUniformLocation(GrGLuint program, const char* name) { return 0; }
+ virtual GrGLvoid insertEventMarker(GrGLsizei length, const char* marker) {}
+ virtual GrGLvoid invalidateBufferData(GrGLuint buffer) {}
+ virtual GrGLvoid invalidateBufferSubData(GrGLuint buffer, GrGLintptr offset, GrGLsizeiptr length) {}
+ virtual GrGLvoid invalidateFramebuffer(GrGLenum target, GrGLsizei numAttachments, const GrGLenum *attachments) {}
+ virtual GrGLvoid invalidateSubFramebuffer(GrGLenum target, GrGLsizei numAttachments, const GrGLenum *attachments, GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height) {}
+ virtual GrGLvoid invalidateTexImage(GrGLuint texture, GrGLint level) {}
+ virtual GrGLvoid invalidateTexSubImage(GrGLuint texture, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint zoffset, GrGLsizei width, GrGLsizei height, GrGLsizei depth) {}
+ virtual GrGLboolean isTexture(GrGLuint texture) { return GR_GL_FALSE; }
+ virtual GrGLvoid lineWidth(GrGLfloat width) {}
+ virtual GrGLvoid linkProgram(GrGLuint program) {}
+ virtual GrGLvoid* mapBuffer(GrGLenum target, GrGLenum access) { return nullptr; }
+ virtual GrGLvoid* mapBufferRange(GrGLenum target, GrGLintptr offset, GrGLsizeiptr length, GrGLbitfield access) { return nullptr; }
+ virtual GrGLvoid* mapBufferSubData(GrGLuint target, GrGLintptr offset, GrGLsizeiptr size, GrGLenum access) { return nullptr; }
+ virtual GrGLvoid* mapTexSubImage2D(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, GrGLenum access) { return nullptr; }
+ virtual GrGLvoid minSampleShading(GrGLfloat value) {}
+ virtual GrGLvoid pixelStorei(GrGLenum pname, GrGLint param) {}
+ virtual GrGLvoid popGroupMarker() {}
+ virtual GrGLvoid pushGroupMarker(GrGLsizei length, const char* marker) {}
+ virtual GrGLvoid queryCounter(GrGLuint id, GrGLenum target) {}
+ virtual GrGLvoid rasterSamples(GrGLuint samples, GrGLboolean fixedsamplelocations) {}
+ virtual GrGLvoid readBuffer(GrGLenum src) {}
+ virtual GrGLvoid readPixels(GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, GrGLvoid* pixels) {}
+ virtual GrGLvoid renderbufferStorage(GrGLenum target, GrGLenum internalformat, GrGLsizei width, GrGLsizei height) {}
+ virtual GrGLvoid renderbufferStorageMultisample(GrGLenum target, GrGLsizei samples, GrGLenum internalformat, GrGLsizei width, GrGLsizei height) {}
+ virtual GrGLvoid resolveMultisampleFramebuffer() {}
+ virtual GrGLvoid scissor(GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height) {}
+ virtual GrGLvoid bindUniformLocation(GrGLuint program, GrGLint location, const char* name) {}
+#if GR_GL_USE_NEW_SHADER_SOURCE_SIGNATURE
+ virtual GrGLvoid shaderSource(GrGLuint shader, GrGLsizei count, const char* const * str, const GrGLint* length) {}
+#else
+ virtual GrGLvoid shaderSource(GrGLuint shader, GrGLsizei count, const char** str, const GrGLint* length) {}
+#endif
+ virtual GrGLvoid stencilFunc(GrGLenum func, GrGLint ref, GrGLuint mask) {}
+ virtual GrGLvoid stencilFuncSeparate(GrGLenum face, GrGLenum func, GrGLint ref, GrGLuint mask) {}
+ virtual GrGLvoid stencilMask(GrGLuint mask) {}
+ virtual GrGLvoid stencilMaskSeparate(GrGLenum face, GrGLuint mask) {}
+ virtual GrGLvoid stencilOp(GrGLenum fail, GrGLenum zfail, GrGLenum zpass) {}
+ virtual GrGLvoid stencilOpSeparate(GrGLenum face, GrGLenum fail, GrGLenum zfail, GrGLenum zpass) {}
+ virtual GrGLvoid texBuffer(GrGLenum target, GrGLenum internalformat, GrGLuint buffer) {}
+ virtual GrGLvoid texImage2D(GrGLenum target, GrGLint level, GrGLint internalformat, GrGLsizei width, GrGLsizei height, GrGLint border, GrGLenum format, GrGLenum type, const GrGLvoid* pixels) {}
+ virtual GrGLvoid texParameteri(GrGLenum target, GrGLenum pname, GrGLint param) {}
+ virtual GrGLvoid texParameteriv(GrGLenum target, GrGLenum pname, const GrGLint* params) {}
+ virtual GrGLvoid texStorage2D(GrGLenum target, GrGLsizei levels, GrGLenum internalformat, GrGLsizei width, GrGLsizei height) {}
+ virtual GrGLvoid discardFramebuffer(GrGLenum target, GrGLsizei numAttachments, const GrGLenum* attachments) {}
+ virtual GrGLvoid texSubImage2D(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, const GrGLvoid* pixels) {}
+ virtual GrGLvoid textureBarrier() {}
+ virtual GrGLvoid uniform1f(GrGLint location, GrGLfloat v0) {}
+ virtual GrGLvoid uniform1i(GrGLint location, GrGLint v0) {}
+ virtual GrGLvoid uniform1fv(GrGLint location, GrGLsizei count, const GrGLfloat* v) {}
+ virtual GrGLvoid uniform1iv(GrGLint location, GrGLsizei count, const GrGLint* v) {}
+ virtual GrGLvoid uniform2f(GrGLint location, GrGLfloat v0, GrGLfloat v1) {}
+ virtual GrGLvoid uniform2i(GrGLint location, GrGLint v0, GrGLint v1) {}
+ virtual GrGLvoid uniform2fv(GrGLint location, GrGLsizei count, const GrGLfloat* v) {}
+ virtual GrGLvoid uniform2iv(GrGLint location, GrGLsizei count, const GrGLint* v) {}
+ virtual GrGLvoid uniform3f(GrGLint location, GrGLfloat v0, GrGLfloat v1, GrGLfloat v2) {}
+ virtual GrGLvoid uniform3i(GrGLint location, GrGLint v0, GrGLint v1, GrGLint v2) {}
+ virtual GrGLvoid uniform3fv(GrGLint location, GrGLsizei count, const GrGLfloat* v) {}
+ virtual GrGLvoid uniform3iv(GrGLint location, GrGLsizei count, const GrGLint* v) {}
+ virtual GrGLvoid uniform4f(GrGLint location, GrGLfloat v0, GrGLfloat v1, GrGLfloat v2, GrGLfloat v3) {}
+ virtual GrGLvoid uniform4i(GrGLint location, GrGLint v0, GrGLint v1, GrGLint v2, GrGLint v3) {}
+ virtual GrGLvoid uniform4fv(GrGLint location, GrGLsizei count, const GrGLfloat* v) {}
+ virtual GrGLvoid uniform4iv(GrGLint location, GrGLsizei count, const GrGLint* v) {}
+ virtual GrGLvoid uniformMatrix2fv(GrGLint location, GrGLsizei count, GrGLboolean transpose, const GrGLfloat* value) {}
+ virtual GrGLvoid uniformMatrix3fv(GrGLint location, GrGLsizei count, GrGLboolean transpose, const GrGLfloat* value) {}
+ virtual GrGLvoid uniformMatrix4fv(GrGLint location, GrGLsizei count, GrGLboolean transpose, const GrGLfloat* value) {}
+ virtual GrGLboolean unmapBuffer(GrGLenum target) { return GR_GL_TRUE; }
+ virtual GrGLvoid unmapBufferSubData(const GrGLvoid* mem) {}
+ virtual GrGLvoid unmapTexSubImage2D(const GrGLvoid* mem) {}
+ virtual GrGLvoid useProgram(GrGLuint program) {}
+ virtual GrGLvoid vertexAttrib1f(GrGLuint indx, const GrGLfloat value) {}
+ virtual GrGLvoid vertexAttrib2fv(GrGLuint indx, const GrGLfloat* values) {}
+ virtual GrGLvoid vertexAttrib3fv(GrGLuint indx, const GrGLfloat* values) {}
+ virtual GrGLvoid vertexAttrib4fv(GrGLuint indx, const GrGLfloat* values) {}
+ virtual GrGLvoid vertexAttribDivisor(GrGLuint index, GrGLuint divisor) {}
+ virtual GrGLvoid vertexAttribIPointer(GrGLuint indx, GrGLint size, GrGLenum type, GrGLsizei stride, const GrGLvoid* ptr) {}
+ virtual GrGLvoid vertexAttribPointer(GrGLuint indx, GrGLint size, GrGLenum type, GrGLboolean normalized, GrGLsizei stride, const GrGLvoid* ptr) {}
+ virtual GrGLvoid viewport(GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height) {}
+ virtual GrGLvoid matrixLoadf(GrGLenum matrixMode, const GrGLfloat* m) {}
+ virtual GrGLvoid matrixLoadIdentity(GrGLenum) {}
+ virtual GrGLvoid pathCommands(GrGLuint path, GrGLsizei numCommands, const GrGLubyte *commands, GrGLsizei numCoords, GrGLenum coordType, const GrGLvoid *coords) {}
+ virtual GrGLvoid pathParameteri(GrGLuint path, GrGLenum pname, GrGLint value) {}
+ virtual GrGLvoid pathParameterf(GrGLuint path, GrGLenum pname, GrGLfloat value) {}
+ virtual GrGLuint genPaths(GrGLsizei range) { return 0; }
+ virtual GrGLvoid deletePaths(GrGLuint path, GrGLsizei range) {}
+ virtual GrGLboolean isPath(GrGLuint path) { return true; }
+ virtual GrGLvoid pathStencilFunc(GrGLenum func, GrGLint ref, GrGLuint mask) {}
+ virtual GrGLvoid stencilFillPath(GrGLuint path, GrGLenum fillMode, GrGLuint mask) {}
+ virtual GrGLvoid stencilStrokePath(GrGLuint path, GrGLint reference, GrGLuint mask) {}
+ virtual GrGLvoid stencilFillPathInstanced(GrGLsizei numPaths, GrGLenum pathNameType, const GrGLvoid *paths, GrGLuint pathBase, GrGLenum fillMode, GrGLuint mask, GrGLenum transformType, const GrGLfloat *transformValues) {}
+ virtual GrGLvoid stencilStrokePathInstanced(GrGLsizei numPaths, GrGLenum pathNameType, const GrGLvoid *paths, GrGLuint pathBase, GrGLint reference, GrGLuint mask, GrGLenum transformType, const GrGLfloat *transformValues) {}
+ virtual GrGLvoid coverFillPath(GrGLuint path, GrGLenum coverMode) {}
+ virtual GrGLvoid coverStrokePath(GrGLuint name, GrGLenum coverMode) {}
+ virtual GrGLvoid coverFillPathInstanced(GrGLsizei numPaths, GrGLenum pathNameType, const GrGLvoid *paths, GrGLuint pathBase, GrGLenum coverMode, GrGLenum transformType, const GrGLfloat *transformValues) {}
+ virtual GrGLvoid coverStrokePathInstanced(GrGLsizei numPaths, GrGLenum pathNameType, const GrGLvoid *paths, GrGLuint pathBase, GrGLenum coverMode, GrGLenum transformType, const GrGLfloat* transformValues) {}
+ virtual GrGLvoid stencilThenCoverFillPath(GrGLuint path, GrGLenum fillMode, GrGLuint mask, GrGLenum coverMode) {}
+ virtual GrGLvoid stencilThenCoverStrokePath(GrGLuint path, GrGLint reference, GrGLuint mask, GrGLenum coverMode) {}
+ virtual GrGLvoid stencilThenCoverFillPathInstanced(GrGLsizei numPaths, GrGLenum pathNameType, const GrGLvoid *paths, GrGLuint pathBase, GrGLenum fillMode, GrGLuint mask, GrGLenum coverMode, GrGLenum transformType, const GrGLfloat *transformValues) {}
+ virtual GrGLvoid stencilThenCoverStrokePathInstanced(GrGLsizei numPaths, GrGLenum pathNameType, const GrGLvoid *paths, GrGLuint pathBase, GrGLint reference, GrGLuint mask, GrGLenum coverMode, GrGLenum transformType, const GrGLfloat *transformValues) {}
+ virtual GrGLvoid programPathFragmentInputGen(GrGLuint program, GrGLint location, GrGLenum genMode, GrGLint components,const GrGLfloat *coeffs) {}
+ virtual GrGLvoid bindFragmentInputLocation(GrGLuint program, GrGLint location, const GrGLchar* name) {}
+ virtual GrGLint getProgramResourceLocation(GrGLuint program, GrGLenum programInterface, const GrGLchar *name) { return 0; }
+ virtual GrGLvoid coverageModulation(GrGLenum components) {}
+ virtual GrGLvoid multiDrawArraysIndirect(GrGLenum mode, const GrGLvoid *indirect, GrGLsizei drawcount, GrGLsizei stride) {}
+ virtual GrGLvoid multiDrawElementsIndirect(GrGLenum mode, GrGLenum type, const GrGLvoid *indirect, GrGLsizei drawcount, GrGLsizei stride) {}
+ virtual GrGLuint64 getTextureHandle(GrGLuint texture) { return 0; }
+ virtual GrGLuint64 getTextureSamplerHandle(GrGLuint texture, GrGLuint sampler) { return 0; }
+ virtual GrGLvoid makeTextureHandleResident(GrGLuint64 handle) {}
+ virtual GrGLvoid makeTextureHandleNonResident(GrGLuint64 handle) {}
+ virtual GrGLuint64 getImageHandle(GrGLuint texture, GrGLint level, GrGLboolean layered, GrGLint layer, GrGLint format) { return 0; }
+ virtual GrGLvoid makeImageHandleResident(GrGLuint64 handle, GrGLenum access) {}
+ virtual GrGLvoid makeImageHandleNonResident(GrGLuint64 handle) {}
+ virtual GrGLboolean isTextureHandleResident(GrGLuint64 handle) { return GR_GL_FALSE; }
+ virtual GrGLboolean isImageHandleResident(GrGLuint64 handle) { return GR_GL_FALSE; }
+ virtual GrGLvoid uniformHandleui64(GrGLint location, GrGLuint64 v0) {}
+ virtual GrGLvoid uniformHandleui64v(GrGLint location, GrGLsizei count, const GrGLuint64 *value) {}
+ virtual GrGLvoid programUniformHandleui64(GrGLuint program, GrGLint location, GrGLuint64 v0) {}
+ virtual GrGLvoid programUniformHandleui64v(GrGLuint program, GrGLint location, GrGLsizei count, const GrGLuint64 *value) {}
+ virtual GrGLvoid textureParameteri(GrGLuint texture, GrGLenum target, GrGLenum pname, GrGLint param) {}
+ virtual GrGLvoid textureParameteriv(GrGLuint texture, GrGLenum target, GrGLenum pname, const GrGLint *param) {}
+ virtual GrGLvoid textureParameterf(GrGLuint texture, GrGLenum target, GrGLenum pname, float param) {}
+ virtual GrGLvoid textureParameterfv(GrGLuint texture, GrGLenum target, GrGLenum pname, const float *param) {}
+ virtual GrGLvoid textureImage1D(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint GrGLinternalformat, GrGLsizei width, GrGLint border, GrGLenum format, GrGLenum type, const GrGLvoid *pixels) {}
+ virtual GrGLvoid textureImage2D(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint GrGLinternalformat, GrGLsizei width, GrGLsizei height, GrGLint border, GrGLenum format, GrGLenum type, const GrGLvoid *pixels) {}
+ virtual GrGLvoid textureSubImage1D(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint xoffset, GrGLsizei width, GrGLenum format, GrGLenum type, const GrGLvoid *pixels) {}
+ virtual GrGLvoid textureSubImage2D(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, const GrGLvoid *pixels) {}
+ virtual GrGLvoid copyTextureImage1D(GrGLuint texture, GrGLenum target, GrGLint level, GrGLenum GrGLinternalformat, GrGLint x, GrGLint y, GrGLsizei width, GrGLint border) {}
+ virtual GrGLvoid copyTextureImage2D(GrGLuint texture, GrGLenum target, GrGLint level, GrGLenum GrGLinternalformat, GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height, GrGLint border) {}
+ virtual GrGLvoid copyTextureSubImage1D(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint x, GrGLint y, GrGLsizei width) {}
+ virtual GrGLvoid copyTextureSubImage2D(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height) {}
+ virtual GrGLvoid getTextureImage(GrGLuint texture, GrGLenum target, GrGLint level, GrGLenum format, GrGLenum type, GrGLvoid *pixels) {}
+ virtual GrGLvoid getTextureParameterfv(GrGLuint texture, GrGLenum target, GrGLenum pname, float *params) {}
+ virtual GrGLvoid getTextureParameteriv(GrGLuint texture, GrGLenum target, GrGLenum pname, GrGLint *params) {}
+ virtual GrGLvoid getTextureLevelParameterfv(GrGLuint texture, GrGLenum target, GrGLint level, GrGLenum pname, float *params) {}
+ virtual GrGLvoid getTextureLevelParameteriv(GrGLuint texture, GrGLenum target, GrGLint level, GrGLenum pname, GrGLint *params) {}
+ virtual GrGLvoid textureImage3D(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint GrGLinternalformat, GrGLsizei width, GrGLsizei height, GrGLsizei depth, GrGLint border, GrGLenum format, GrGLenum type, const GrGLvoid *pixels) {}
+ virtual GrGLvoid textureSubImage3D(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint zoffset, GrGLsizei width, GrGLsizei height, GrGLsizei depth, GrGLenum format, GrGLenum type, const GrGLvoid *pixels) {}
+ virtual GrGLvoid copyTextureSubImage3D(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint zoffset, GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height) {}
+ virtual GrGLvoid compressedTextureImage3D(GrGLuint texture, GrGLenum target, GrGLint level, GrGLenum GrGLinternalformat, GrGLsizei width, GrGLsizei height, GrGLsizei depth, GrGLint border, GrGLsizei imageSize, const GrGLvoid *data) {}
+ virtual GrGLvoid compressedTextureImage2D(GrGLuint texture, GrGLenum target, GrGLint level, GrGLenum GrGLinternalformat, GrGLsizei width, GrGLsizei height, GrGLint border, GrGLsizei imageSize, const GrGLvoid *data) {}
+ virtual GrGLvoid compressedTextureImage1D(GrGLuint texture, GrGLenum target, GrGLint level, GrGLenum GrGLinternalformat, GrGLsizei width, GrGLint border, GrGLsizei imageSize, const GrGLvoid *data) {}
+ virtual GrGLvoid compressedTextureSubImage3D(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint zoffset, GrGLsizei width, GrGLsizei height, GrGLsizei depth, GrGLenum format, GrGLsizei imageSize, const GrGLvoid *data) {}
+ virtual GrGLvoid compressedTextureSubImage2D(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLsizei imageSize, const GrGLvoid *data) {}
+ virtual GrGLvoid compressedTextureSubImage1D(GrGLuint texture, GrGLenum target, GrGLint level, GrGLint xoffset, GrGLsizei width, GrGLenum format, GrGLsizei imageSize, const GrGLvoid *data) {}
+ virtual GrGLvoid getCompressedTextureImage(GrGLuint texture, GrGLenum target, GrGLint level, GrGLvoid *img) {}
+ virtual GrGLvoid namedBufferData(GrGLuint buffer, GrGLsizeiptr size, const GrGLvoid *data, GrGLenum usage) {}
+ virtual GrGLvoid namedBufferSubData(GrGLuint buffer, GrGLintptr offset, GrGLsizeiptr size, const GrGLvoid *data) {}
+ virtual GrGLvoid* mapNamedBuffer(GrGLuint buffer, GrGLenum access) { return nullptr; }
+ virtual GrGLboolean unmapNamedBuffer(GrGLuint buffer) { return GR_GL_FALSE; }
+ virtual GrGLvoid getNamedBufferParameteriv(GrGLuint buffer, GrGLenum pname, GrGLint *params) {}
+ virtual GrGLvoid getNamedBufferPointerv(GrGLuint buffer, GrGLenum pname, GrGLvoid* *params) {}
+ virtual GrGLvoid getNamedBufferSubData(GrGLuint buffer, GrGLintptr offset, GrGLsizeiptr size, GrGLvoid *data) {}
+ virtual GrGLvoid programUniform1f(GrGLuint program, GrGLint location, float v0) {}
+ virtual GrGLvoid programUniform2f(GrGLuint program, GrGLint location, float v0, float v1) {}
+ virtual GrGLvoid programUniform3f(GrGLuint program, GrGLint location, float v0, float v1, float v2) {}
+ virtual GrGLvoid programUniform4f(GrGLuint program, GrGLint location, float v0, float v1, float v2, float v3) {}
+ virtual GrGLvoid programUniform1i(GrGLuint program, GrGLint location, GrGLint v0) {}
+ virtual GrGLvoid programUniform2i(GrGLuint program, GrGLint location, GrGLint v0, GrGLint v1) {}
+ virtual GrGLvoid programUniform3i(GrGLuint program, GrGLint location, GrGLint v0, GrGLint v1, GrGLint v2) {}
+ virtual GrGLvoid programUniform4i(GrGLuint program, GrGLint location, GrGLint v0, GrGLint v1, GrGLint v2, GrGLint v3) {}
+ virtual GrGLvoid programUniform1fv(GrGLuint program, GrGLint location, GrGLsizei count, const float *value) {}
+ virtual GrGLvoid programUniform2fv(GrGLuint program, GrGLint location, GrGLsizei count, const float *value) {}
+ virtual GrGLvoid programUniform3fv(GrGLuint program, GrGLint location, GrGLsizei count, const float *value) {}
+ virtual GrGLvoid programUniform4fv(GrGLuint program, GrGLint location, GrGLsizei count, const float *value) {}
+ virtual GrGLvoid programUniform1iv(GrGLuint program, GrGLint location, GrGLsizei count, const GrGLint *value) {}
+ virtual GrGLvoid programUniform2iv(GrGLuint program, GrGLint location, GrGLsizei count, const GrGLint *value) {}
+ virtual GrGLvoid programUniform3iv(GrGLuint program, GrGLint location, GrGLsizei count, const GrGLint *value) {}
+ virtual GrGLvoid programUniform4iv(GrGLuint program, GrGLint location, GrGLsizei count, const GrGLint *value) {}
+ virtual GrGLvoid programUniformMatrix2fv(GrGLuint program, GrGLint location, GrGLsizei count, GrGLboolean transpose, const float *value) {}
+ virtual GrGLvoid programUniformMatrix3fv(GrGLuint program, GrGLint location, GrGLsizei count, GrGLboolean transpose, const float *value) {}
+ virtual GrGLvoid programUniformMatrix4fv(GrGLuint program, GrGLint location, GrGLsizei count, GrGLboolean transpose, const float *value) {}
+ virtual GrGLvoid programUniformMatrix2x3fv(GrGLuint program, GrGLint location, GrGLsizei count, GrGLboolean transpose, const float *value) {}
+ virtual GrGLvoid programUniformMatrix3x2fv(GrGLuint program, GrGLint location, GrGLsizei count, GrGLboolean transpose, const float *value) {}
+ virtual GrGLvoid programUniformMatrix2x4fv(GrGLuint program, GrGLint location, GrGLsizei count, GrGLboolean transpose, const float *value) {}
+ virtual GrGLvoid programUniformMatrix4x2fv(GrGLuint program, GrGLint location, GrGLsizei count, GrGLboolean transpose, const float *value) {}
+ virtual GrGLvoid programUniformMatrix3x4fv(GrGLuint program, GrGLint location, GrGLsizei count, GrGLboolean transpose, const float *value) {}
+ virtual GrGLvoid programUniformMatrix4x3fv(GrGLuint program, GrGLint location, GrGLsizei count, GrGLboolean transpose, const float *value) {}
+ virtual GrGLvoid namedRenderbufferStorage(GrGLuint renderbuffer, GrGLenum GrGLinternalformat, GrGLsizei width, GrGLsizei height) {}
+ virtual GrGLvoid getNamedRenderbufferParameteriv(GrGLuint renderbuffer, GrGLenum pname, GrGLint *params) {}
+ virtual GrGLvoid namedRenderbufferStorageMultisample(GrGLuint renderbuffer, GrGLsizei samples, GrGLenum GrGLinternalformat, GrGLsizei width, GrGLsizei height) {}
+ virtual GrGLenum checkNamedFramebufferStatus(GrGLuint framebuffer, GrGLenum target) { return GR_GL_FRAMEBUFFER_COMPLETE; }
+ virtual GrGLvoid namedFramebufferTexture1D(GrGLuint framebuffer, GrGLenum attachment, GrGLenum textarget, GrGLuint texture, GrGLint level) {}
+ virtual GrGLvoid namedFramebufferTexture2D(GrGLuint framebuffer, GrGLenum attachment, GrGLenum textarget, GrGLuint texture, GrGLint level) {}
+ virtual GrGLvoid namedFramebufferTexture3D(GrGLuint framebuffer, GrGLenum attachment, GrGLenum textarget, GrGLuint texture, GrGLint level, GrGLint zoffset) {}
+ virtual GrGLvoid namedFramebufferRenderbuffer(GrGLuint framebuffer, GrGLenum attachment, GrGLenum renderbuffertarget, GrGLuint renderbuffer) {}
+ virtual GrGLvoid getNamedFramebufferAttachmentParameteriv(GrGLuint framebuffer, GrGLenum attachment, GrGLenum pname, GrGLint *params) {}
+ virtual GrGLvoid generateTextureMipmap(GrGLuint texture, GrGLenum target) {}
+ virtual GrGLvoid framebufferDrawBuffer(GrGLuint framebuffer, GrGLenum mode) {}
+ virtual GrGLvoid framebufferDrawBuffers(GrGLuint framebuffer, GrGLsizei n, const GrGLenum *bufs) {}
+ virtual GrGLvoid framebufferReadBuffer(GrGLuint framebuffer, GrGLenum mode) {}
+ virtual GrGLvoid getFramebufferParameteriv(GrGLuint framebuffer, GrGLenum pname, GrGLint *param) {}
+ virtual GrGLvoid namedCopyBufferSubData(GrGLuint readBuffer, GrGLuint writeBuffer, GrGLintptr readOffset, GrGLintptr writeOffset, GrGLsizeiptr size) {}
+ virtual GrGLvoid vertexArrayVertexOffset(GrGLuint vaobj, GrGLuint buffer, GrGLint size, GrGLenum type, GrGLsizei stride, GrGLintptr offset) {}
+ virtual GrGLvoid vertexArrayColorOffset(GrGLuint vaobj, GrGLuint buffer, GrGLint size, GrGLenum type, GrGLsizei stride, GrGLintptr offset) {}
+ virtual GrGLvoid vertexArrayEdgeFlagOffset(GrGLuint vaobj, GrGLuint buffer, GrGLsizei stride, GrGLintptr offset) {}
+ virtual GrGLvoid vertexArrayIndexOffset(GrGLuint vaobj, GrGLuint buffer, GrGLenum type, GrGLsizei stride, GrGLintptr offset) {}
+ virtual GrGLvoid vertexArrayNormalOffset(GrGLuint vaobj, GrGLuint buffer, GrGLenum type, GrGLsizei stride, GrGLintptr offset) {}
+ virtual GrGLvoid vertexArrayTexCoordOffset(GrGLuint vaobj, GrGLuint buffer, GrGLint size, GrGLenum type, GrGLsizei stride, GrGLintptr offset) {}
+ virtual GrGLvoid vertexArrayMultiTexCoordOffset(GrGLuint vaobj, GrGLuint buffer, GrGLenum texunit, GrGLint size, GrGLenum type, GrGLsizei stride, GrGLintptr offset) {}
+ virtual GrGLvoid vertexArrayFogCoordOffset(GrGLuint vaobj, GrGLuint buffer, GrGLenum type, GrGLsizei stride, GrGLintptr offset) {}
+ virtual GrGLvoid vertexArraySecondaryColorOffset(GrGLuint vaobj, GrGLuint buffer, GrGLint size, GrGLenum type, GrGLsizei stride, GrGLintptr offset) {}
+ virtual GrGLvoid vertexArrayVertexAttribOffset(GrGLuint vaobj, GrGLuint buffer, GrGLuint index, GrGLint size, GrGLenum type, GrGLboolean normalized, GrGLsizei stride, GrGLintptr offset) {}
+ virtual GrGLvoid vertexArrayVertexAttribIOffset(GrGLuint vaobj, GrGLuint buffer, GrGLuint index, GrGLint size, GrGLenum type, GrGLsizei stride, GrGLintptr offset) {}
+ virtual GrGLvoid enableVertexArray(GrGLuint vaobj, GrGLenum array) {}
+ virtual GrGLvoid disableVertexArray(GrGLuint vaobj, GrGLenum array) {}
+ virtual GrGLvoid enableVertexArrayAttrib(GrGLuint vaobj, GrGLuint index) {}
+ virtual GrGLvoid disableVertexArrayAttrib(GrGLuint vaobj, GrGLuint index) {}
+ virtual GrGLvoid getVertexArrayIntegerv(GrGLuint vaobj, GrGLenum pname, GrGLint *param) {}
+ virtual GrGLvoid getVertexArrayPointerv(GrGLuint vaobj, GrGLenum pname, GrGLvoid **param) {}
+ virtual GrGLvoid getVertexArrayIntegeri_v(GrGLuint vaobj, GrGLuint index, GrGLenum pname, GrGLint *param) {}
+ virtual GrGLvoid getVertexArrayPointeri_v(GrGLuint vaobj, GrGLuint index, GrGLenum pname, GrGLvoid **param) {}
+ virtual GrGLvoid* mapNamedBufferRange(GrGLuint buffer, GrGLintptr offset, GrGLsizeiptr length, GrGLbitfield access) { return nullptr; }
+ virtual GrGLvoid flushMappedNamedBufferRange(GrGLuint buffer, GrGLintptr offset, GrGLsizeiptr length) {}
+ virtual GrGLvoid textureBuffer(GrGLuint texture, GrGLenum target, GrGLenum internalformat, GrGLuint buffer) {}
+ virtual GrGLsync fenceSync(GrGLenum condition, GrGLbitfield flags) { return nullptr; }
+ virtual GrGLenum clientWaitSync(GrGLsync sync, GrGLbitfield flags, GrGLuint64 timeout) { return GR_GL_WAIT_FAILED; }
+ virtual GrGLvoid deleteSync(GrGLsync sync) {}
+ virtual GrGLvoid debugMessageControl(GrGLenum source, GrGLenum type, GrGLenum severity, GrGLsizei count, const GrGLuint* ids, GrGLboolean enabled) {}
+ virtual GrGLvoid debugMessageInsert(GrGLenum source, GrGLenum type, GrGLuint id, GrGLenum severity, GrGLsizei length, const GrGLchar* buf) {}
+ virtual GrGLvoid debugMessageCallback(GRGLDEBUGPROC callback, const GrGLvoid* userParam) {}
+ virtual GrGLuint getDebugMessageLog(GrGLuint count, GrGLsizei bufSize, GrGLenum* sources, GrGLenum* types, GrGLuint* ids, GrGLenum* severities, GrGLsizei* lengths, GrGLchar* messageLog) { return 0; }
+ virtual GrGLvoid pushDebugGroup(GrGLenum source, GrGLuint id, GrGLsizei length, const GrGLchar * message) {}
+ virtual GrGLvoid popDebugGroup() {}
+ virtual GrGLvoid objectLabel(GrGLenum identifier, GrGLuint name, GrGLsizei length, const GrGLchar *label) {}
+
+protected:
+ // This must be called by leaf class
+ void init(GrGLStandard standard) {
+ fStandard = standard;
+ fExtensions.init(standard, fFunctions.fGetString, fFunctions.fGetStringi,
+ fFunctions.fGetIntegerv, nullptr, GR_EGL_NO_DISPLAY);
+ }
+ GrGLTestInterface();
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLTexture.cpp b/gfx/skia/skia/src/gpu/gl/GrGLTexture.cpp
new file mode 100644
index 000000000..9fd9ad878
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLTexture.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLTexture.h"
+#include "GrGLGpu.h"
+#include "SkTraceMemoryDump.h"
+
+#define GPUGL static_cast<GrGLGpu*>(this->getGpu())
+#define GL_CALL(X) GR_GL_CALL(GPUGL->glInterface(), X)
+
+inline static GrSLType sampler_type(const GrGLTexture::IDDesc& idDesc, const GrGLGpu* gpu) {
+ if (idDesc.fInfo.fTarget == GR_GL_TEXTURE_EXTERNAL) {
+ SkASSERT(gpu->glCaps().glslCaps()->externalTextureSupport());
+ return kTextureExternalSampler_GrSLType;
+ } else if (idDesc.fInfo.fTarget == GR_GL_TEXTURE_RECTANGLE) {
+ SkASSERT(gpu->glCaps().rectangleTextureSupport());
+ return kTexture2DRectSampler_GrSLType;
+ } else {
+ SkASSERT(idDesc.fInfo.fTarget == GR_GL_TEXTURE_2D);
+ return kTexture2DSampler_GrSLType;
+ }
+}
+
+// Because this class is virtually derived from GrSurface we must explicitly call its constructor.
+GrGLTexture::GrGLTexture(GrGLGpu* gpu, SkBudgeted budgeted, const GrSurfaceDesc& desc,
+ const IDDesc& idDesc)
+ : GrSurface(gpu, desc)
+ , INHERITED(gpu, desc, sampler_type(idDesc, gpu), false) {
+ this->init(desc, idDesc);
+ this->registerWithCache(budgeted);
+}
+
+GrGLTexture::GrGLTexture(GrGLGpu* gpu, SkBudgeted budgeted, const GrSurfaceDesc& desc,
+ const IDDesc& idDesc,
+ bool wasMipMapDataProvided)
+ : GrSurface(gpu, desc)
+ , INHERITED(gpu, desc, sampler_type(idDesc, gpu), wasMipMapDataProvided) {
+ this->init(desc, idDesc);
+ this->registerWithCache(budgeted);
+}
+
+GrGLTexture::GrGLTexture(GrGLGpu* gpu, Wrapped, const GrSurfaceDesc& desc, const IDDesc& idDesc)
+ : GrSurface(gpu, desc)
+ , INHERITED(gpu, desc, sampler_type(idDesc, gpu), false) {
+ this->init(desc, idDesc);
+ this->registerWithCacheWrapped();
+}
+
+GrGLTexture::GrGLTexture(GrGLGpu* gpu, const GrSurfaceDesc& desc, const IDDesc& idDesc)
+ : GrSurface(gpu, desc)
+ , INHERITED(gpu, desc, sampler_type(idDesc, gpu), false) {
+ this->init(desc, idDesc);
+}
+
+void GrGLTexture::init(const GrSurfaceDesc& desc, const IDDesc& idDesc) {
+ SkASSERT(0 != idDesc.fInfo.fID);
+ fTexParams.invalidate();
+ fTexParamsTimestamp = GrGpu::kExpiredTimestamp;
+ fInfo = idDesc.fInfo;
+ fTextureIDOwnership = idDesc.fOwnership;
+}
+
+void GrGLTexture::onRelease() {
+ if (fInfo.fID) {
+ if (GrBackendObjectOwnership::kBorrowed != fTextureIDOwnership) {
+ GL_CALL(DeleteTextures(1, &fInfo.fID));
+ }
+ fInfo.fID = 0;
+ }
+ INHERITED::onRelease();
+}
+
+void GrGLTexture::onAbandon() {
+ fInfo.fTarget = 0;
+ fInfo.fID = 0;
+ INHERITED::onAbandon();
+}
+
+GrBackendObject GrGLTexture::getTextureHandle() const {
+#ifdef SK_IGNORE_GL_TEXTURE_TARGET
+ return static_cast<GrBackendObject>(this->textureID());
+#else
+ return reinterpret_cast<GrBackendObject>(&fInfo);
+#endif
+}
+
+void GrGLTexture::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& dumpName) const {
+ SkString texture_id;
+ texture_id.appendU32(this->textureID());
+ traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_texture",
+ texture_id.c_str());
+}
+
+GrGLTexture* GrGLTexture::CreateWrapped(GrGLGpu* gpu, const GrSurfaceDesc& desc,
+ const IDDesc& idDesc) {
+ return new GrGLTexture(gpu, kWrapped, desc, idDesc);
+}
+
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLTexture.h b/gfx/skia/skia/src/gpu/gl/GrGLTexture.h
new file mode 100644
index 000000000..05d26c8de
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLTexture.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGLTexture_DEFINED
+#define GrGLTexture_DEFINED
+
+#include "GrGpu.h"
+#include "GrTexture.h"
+#include "GrGLUtil.h"
+
+class GrGLGpu;
+
+class GrGLTexture : public GrTexture {
+public:
+ struct TexParams {
+ GrGLenum fMinFilter;
+ GrGLenum fMagFilter;
+ GrGLenum fWrapS;
+ GrGLenum fWrapT;
+ GrGLenum fMaxMipMapLevel;
+ GrGLenum fSwizzleRGBA[4];
+ GrGLenum fSRGBDecode;
+ void invalidate() { memset(this, 0xff, sizeof(TexParams)); }
+ };
+
+ struct IDDesc {
+ GrGLTextureInfo fInfo;
+ GrBackendObjectOwnership fOwnership;
+ };
+ GrGLTexture(GrGLGpu*, SkBudgeted, const GrSurfaceDesc&, const IDDesc&);
+ GrGLTexture(GrGLGpu*, SkBudgeted, const GrSurfaceDesc&, const IDDesc&,
+ bool wasMipMapDataProvided);
+
+ GrBackendObject getTextureHandle() const override;
+
+ void textureParamsModified() override { fTexParams.invalidate(); }
+
+ // These functions are used to track the texture parameters associated with the texture.
+ const TexParams& getCachedTexParams(GrGpu::ResetTimestamp* timestamp) const {
+ *timestamp = fTexParamsTimestamp;
+ return fTexParams;
+ }
+
+ void setCachedTexParams(const TexParams& texParams,
+ GrGpu::ResetTimestamp timestamp) {
+ fTexParams = texParams;
+ fTexParamsTimestamp = timestamp;
+ }
+
+ GrGLuint textureID() const { return fInfo.fID; }
+
+ GrGLenum target() const { return fInfo.fTarget; }
+
+ static GrGLTexture* CreateWrapped(GrGLGpu*, const GrSurfaceDesc&, const IDDesc&);
+protected:
+ // Constructor for subclasses.
+ GrGLTexture(GrGLGpu*, const GrSurfaceDesc&, const IDDesc&);
+
+ enum Wrapped { kWrapped };
+ // Constructor for instances wrapping backend objects.
+ GrGLTexture(GrGLGpu*, Wrapped, const GrSurfaceDesc&, const IDDesc&);
+
+ void init(const GrSurfaceDesc&, const IDDesc&);
+
+ void onAbandon() override;
+ void onRelease() override;
+ void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& dumpName) const override;
+
+private:
+ TexParams fTexParams;
+ GrGpu::ResetTimestamp fTexParamsTimestamp;
+ // Holds the texture target and ID. A pointer to this may be shared to external clients for
+ // direct interaction with the GL object.
+ GrGLTextureInfo fInfo;
+ GrBackendObjectOwnership fTextureIDOwnership;
+
+ typedef GrTexture INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLTextureRenderTarget.cpp b/gfx/skia/skia/src/gpu/gl/GrGLTextureRenderTarget.cpp
new file mode 100644
index 000000000..2ba469a9f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLTextureRenderTarget.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLTextureRenderTarget.h"
+
+#include "SkTraceMemoryDump.h"
+
+// GrGLTextureRenderTarget must dump both of its superclasses.
+void GrGLTextureRenderTarget::dumpMemoryStatistics(
+ SkTraceMemoryDump* traceMemoryDump) const {
+ GrGLRenderTarget::dumpMemoryStatistics(traceMemoryDump);
+
+ // Also dump the GrGLTexture's memory. Due to this resource having both a
+ // texture and a
+ // renderbuffer component, dump as skia/gpu_resources/resource_#/texture
+ SkString dumpName("skia/gpu_resources/resource_");
+ dumpName.appendS32(this->uniqueID());
+ dumpName.append("/texture");
+
+ // Use the texture's gpuMemorySize, not our own, which includes the
+ // renderbuffer as well.
+ size_t size = GrGLTexture::gpuMemorySize();
+
+ traceMemoryDump->dumpNumericValue(dumpName.c_str(), "size", "bytes", size);
+
+ if (this->isPurgeable()) {
+ traceMemoryDump->dumpNumericValue(dumpName.c_str(), "purgeable_size",
+ "bytes", size);
+ }
+
+ SkString texture_id;
+ texture_id.appendU32(this->textureID());
+ traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_texture",
+ texture_id.c_str());
+}
+
+bool GrGLTextureRenderTarget::canAttemptStencilAttachment() const {
+ // The RT FBO of GrGLTextureRenderTarget is never created from a
+ // wrapped FBO.
+ return true;
+}
+
+GrGLTextureRenderTarget* GrGLTextureRenderTarget::CreateWrapped(GrGLGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const GrGLTexture::IDDesc& texIDDesc,
+ const GrGLRenderTarget::IDDesc& rtIDDesc) {
+ return new GrGLTextureRenderTarget(gpu, desc, texIDDesc, rtIDDesc);
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLTextureRenderTarget.h b/gfx/skia/skia/src/gpu/gl/GrGLTextureRenderTarget.h
new file mode 100644
index 000000000..0826cf3a7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLTextureRenderTarget.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGLTextureRenderTarget_DEFINED
+#define GrGLTextureRenderTarget_DEFINED
+
+#include "GrGLGpu.h"
+#include "GrGLTexture.h"
+#include "GrGLRenderTarget.h"
+
+class GrGLGpu;
+
+#ifdef SK_BUILD_FOR_WIN
+// Windows gives bogus warnings about inheriting asTexture/asRenderTarget via dominance.
+#pragma warning(push)
+#pragma warning(disable: 4250)
+#endif
+
+class GrGLTextureRenderTarget : public GrGLTexture, public GrGLRenderTarget {
+public:
+ // We're virtually derived from GrSurface (via both GrGLTexture and GrGLRenderTarget) so its
+ // constructor must be explicitly called.
+ GrGLTextureRenderTarget(GrGLGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ const GrGLTexture::IDDesc& texIDDesc,
+ const GrGLRenderTarget::IDDesc& rtIDDesc)
+ : GrSurface(gpu, desc)
+ , GrGLTexture(gpu, desc, texIDDesc)
+ , GrGLRenderTarget(gpu, desc, rtIDDesc) {
+ this->registerWithCache(budgeted);
+ }
+
+ bool canAttemptStencilAttachment() const override;
+
+ void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const override;
+
+ static GrGLTextureRenderTarget* CreateWrapped(GrGLGpu* gpu, const GrSurfaceDesc& desc,
+ const GrGLTexture::IDDesc& texIDDesc,
+ const GrGLRenderTarget::IDDesc& rtIDDesc);
+protected:
+ void onAbandon() override {
+ GrGLRenderTarget::onAbandon();
+ GrGLTexture::onAbandon();
+ }
+
+ void onRelease() override {
+ GrGLRenderTarget::onRelease();
+ GrGLTexture::onRelease();
+ }
+
+private:
+ // Constructor for instances wrapping backend objects.
+ GrGLTextureRenderTarget(GrGLGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const GrGLTexture::IDDesc& texIDDesc,
+ const GrGLRenderTarget::IDDesc& rtIDDesc)
+ : GrSurface(gpu, desc)
+ , GrGLTexture(gpu, desc, texIDDesc)
+ , GrGLRenderTarget(gpu, desc, rtIDDesc) {
+ this->registerWithCacheWrapped();
+ }
+
+ // GrGLRenderTarget accounts for the texture's memory and any MSAA renderbuffer's memory.
+ size_t onGpuMemorySize() const override {
+ return GrGLRenderTarget::onGpuMemorySize();
+ }
+
+};
+
+#ifdef SK_BUILD_FOR_WIN
+#pragma warning(pop)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLUniformHandler.cpp b/gfx/skia/skia/src/gpu/gl/GrGLUniformHandler.cpp
new file mode 100644
index 000000000..c388e4621
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLUniformHandler.cpp
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "gl/GrGLUniformHandler.h"
+
+#include "gl/GrGLCaps.h"
+#include "gl/GrGLGpu.h"
+#include "gl/builders/GrGLProgramBuilder.h"
+
+#define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
+#define GL_CALL_RET(R, X) GR_GL_CALL_RET(this->glGpu()->glInterface(), R, X)
+
+GrGLSLUniformHandler::UniformHandle GrGLUniformHandler::internalAddUniformArray(
+ uint32_t visibility,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name,
+ bool mangleName,
+ int arrayCount,
+ const char** outName) {
+ SkASSERT(name && strlen(name));
+ SkDEBUGCODE(static const uint32_t kVisMask = kVertex_GrShaderFlag | kFragment_GrShaderFlag);
+ SkASSERT(0 == (~kVisMask & visibility));
+ SkASSERT(0 != visibility);
+ SkASSERT(kDefault_GrSLPrecision == precision || GrSLTypeAcceptsPrecision(type));
+
+ UniformInfo& uni = fUniforms.push_back();
+ uni.fVariable.setType(type);
+ uni.fVariable.setTypeModifier(GrGLSLShaderVar::kUniform_TypeModifier);
+ // TODO this is a bit hacky, lets think of a better way. Basically we need to be able to use
+ // the uniform view matrix name in the GP, and the GP is immutable so it has to tell the PB
+ // exactly what name it wants to use for the uniform view matrix. If we prefix anythings, then
+ // the names will mismatch. I think the correct solution is to have all GPs which need the
+ // uniform view matrix, they should upload the view matrix in their setData along with regular
+ // uniforms.
+ char prefix = 'u';
+ if ('u' == name[0]) {
+ prefix = '\0';
+ }
+ fProgramBuilder->nameVariable(uni.fVariable.accessName(), prefix, name, mangleName);
+ uni.fVariable.setArrayCount(arrayCount);
+ uni.fVisibility = visibility;
+ uni.fVariable.setPrecision(precision);
+
+ if (outName) {
+ *outName = uni.fVariable.c_str();
+ }
+ return GrGLSLUniformHandler::UniformHandle(fUniforms.count() - 1);
+}
+
+GrGLSLUniformHandler::SamplerHandle GrGLUniformHandler::internalAddSampler(uint32_t visibility,
+ GrPixelConfig config,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name) {
+ SkASSERT(name && strlen(name));
+ SkDEBUGCODE(static const uint32_t kVisMask = kVertex_GrShaderFlag | kFragment_GrShaderFlag);
+ SkASSERT(0 == (~kVisMask & visibility));
+ SkASSERT(0 != visibility);
+ SkString mangleName;
+ char prefix = 'u';
+ fProgramBuilder->nameVariable(&mangleName, prefix, name, true);
+ fSamplers.emplace_back(visibility, config, type, precision, mangleName.c_str());
+ return GrGLSLUniformHandler::SamplerHandle(fSamplers.count() - 1);
+}
+
+void GrGLUniformHandler::appendUniformDecls(GrShaderFlags visibility, SkString* out) const {
+ for (int i = 0; i < fUniforms.count(); ++i) {
+ if (fUniforms[i].fVisibility & visibility) {
+ fUniforms[i].fVariable.appendDecl(fProgramBuilder->glslCaps(), out);
+ out->append(";\n");
+ }
+ }
+ for (int i = 0; i < fSamplers.count(); ++i) {
+ if (fSamplers[i].visibility() & visibility) {
+ fSamplers[i].fShaderVar.appendDecl(fProgramBuilder->glslCaps(), out);
+ out->append(";\n");
+ }
+ }
+}
+
+void GrGLUniformHandler::bindUniformLocations(GrGLuint programID, const GrGLCaps& caps) {
+ if (caps.bindUniformLocationSupport()) {
+ int uniformCnt = fUniforms.count();
+ for (int i = 0; i < uniformCnt; ++i) {
+ GL_CALL(BindUniformLocation(programID, i, fUniforms[i].fVariable.c_str()));
+ fUniforms[i].fLocation = i;
+ }
+ for (int i = 0; i < fSamplers.count(); ++i) {
+ GrGLint location = i + uniformCnt;
+ GL_CALL(BindUniformLocation(programID, location, fSamplers[i].fShaderVar.c_str()));
+ fSamplers[i].fLocation = location;
+ }
+ }
+}
+
+void GrGLUniformHandler::getUniformLocations(GrGLuint programID, const GrGLCaps& caps) {
+ if (!caps.bindUniformLocationSupport()) {
+ int count = fUniforms.count();
+ for (int i = 0; i < count; ++i) {
+ GrGLint location;
+ GL_CALL_RET(location, GetUniformLocation(programID, fUniforms[i].fVariable.c_str()));
+ fUniforms[i].fLocation = location;
+ }
+ for (int i = 0; i < fSamplers.count(); ++i) {
+ GrGLint location;
+ GL_CALL_RET(location, GetUniformLocation(programID, fSamplers[i].fShaderVar.c_str()));
+ fSamplers[i].fLocation = location;
+ }
+ }
+}
+
+const GrGLGpu* GrGLUniformHandler::glGpu() const {
+ GrGLProgramBuilder* glPB = (GrGLProgramBuilder*) fProgramBuilder;
+ return glPB->gpu();
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLUniformHandler.h b/gfx/skia/skia/src/gpu/gl/GrGLUniformHandler.h
new file mode 100644
index 000000000..3656d3a10
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLUniformHandler.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLUniformHandler_DEFINED
+#define GrGLUniformHandler_DEFINED
+
+#include "glsl/GrGLSLUniformHandler.h"
+
+#include "gl/GrGLProgramDataManager.h"
+#include "gl/GrGLSampler.h"
+
+class GrGLCaps;
+
+class GrGLUniformHandler : public GrGLSLUniformHandler {
+public:
+ static const int kUniformsPerBlock = 8;
+
+ const GrGLSLShaderVar& getUniformVariable(UniformHandle u) const override {
+ return fUniforms[u.toIndex()].fVariable;
+ }
+
+ const char* getUniformCStr(UniformHandle u) const override {
+ return this->getUniformVariable(u).c_str();
+ }
+private:
+ explicit GrGLUniformHandler(GrGLSLProgramBuilder* program)
+ : INHERITED(program)
+ , fUniforms(kUniformsPerBlock) {}
+
+ UniformHandle internalAddUniformArray(uint32_t visibility,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name,
+ bool mangleName,
+ int arrayCount,
+ const char** outName) override;
+
+ SamplerHandle internalAddSampler(uint32_t visibility,
+ GrPixelConfig config,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name) override;
+
+ int numSamplers() const override { return fSamplers.count(); }
+ const GrGLSLSampler& getSampler(SamplerHandle handle) const override {
+ return fSamplers[handle.toIndex()];
+ }
+
+ void appendUniformDecls(GrShaderFlags visibility, SkString*) const override;
+
+ // Manually set uniform locations for all our uniforms.
+ void bindUniformLocations(GrGLuint programID, const GrGLCaps& caps);
+
+ // Updates the loction of the Uniforms if we cannot bind uniform locations manually
+ void getUniformLocations(GrGLuint programID, const GrGLCaps& caps);
+
+ const GrGLGpu* glGpu() const;
+
+ typedef GrGLProgramDataManager::UniformInfo UniformInfo;
+ typedef GrGLProgramDataManager::UniformInfoArray UniformInfoArray;
+
+ UniformInfoArray fUniforms;
+
+ SkTArray<GrGLSampler> fSamplers;
+
+ friend class GrGLProgramBuilder;
+
+ typedef GrGLSLUniformHandler INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLUtil.cpp b/gfx/skia/skia/src/gpu/gl/GrGLUtil.cpp
new file mode 100644
index 000000000..23544fee9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLUtil.cpp
@@ -0,0 +1,365 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrGLUtil.h"
+#include "SkMatrix.h"
+#include <stdio.h>
+
+void GrGLClearErr(const GrGLInterface* gl) {
+ while (GR_GL_NO_ERROR != gl->fFunctions.fGetError()) {}
+}
+
+namespace {
+const char *get_error_string(uint32_t err) {
+ switch (err) {
+ case GR_GL_NO_ERROR:
+ return "";
+ case GR_GL_INVALID_ENUM:
+ return "Invalid Enum";
+ case GR_GL_INVALID_VALUE:
+ return "Invalid Value";
+ case GR_GL_INVALID_OPERATION:
+ return "Invalid Operation";
+ case GR_GL_OUT_OF_MEMORY:
+ return "Out of Memory";
+ case GR_GL_CONTEXT_LOST:
+ return "Context Lost";
+ }
+ return "Unknown";
+}
+}
+
+void GrGLCheckErr(const GrGLInterface* gl,
+ const char* location,
+ const char* call) {
+ uint32_t err = GR_GL_GET_ERROR(gl);
+ if (GR_GL_NO_ERROR != err) {
+ SkDebugf("---- glGetError 0x%x(%s)", err, get_error_string(err));
+ if (location) {
+ SkDebugf(" at\n\t%s", location);
+ }
+ if (call) {
+ SkDebugf("\n\t\t%s", call);
+ }
+ SkDebugf("\n");
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if GR_GL_LOG_CALLS
+ bool gLogCallsGL = !!(GR_GL_LOG_CALLS_START);
+#endif
+
+#if GR_GL_CHECK_ERROR
+ bool gCheckErrorGL = !!(GR_GL_CHECK_ERROR_START);
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrGLStandard GrGLGetStandardInUseFromString(const char* versionString) {
+ if (nullptr == versionString) {
+ SkDebugf("nullptr GL version string.");
+ return kNone_GrGLStandard;
+ }
+
+ int major, minor;
+
+ // check for desktop
+ int n = sscanf(versionString, "%d.%d", &major, &minor);
+ if (2 == n) {
+ return kGL_GrGLStandard;
+ }
+
+ // check for ES 1
+ char profile[2];
+ n = sscanf(versionString, "OpenGL ES-%c%c %d.%d", profile, profile+1, &major, &minor);
+ if (4 == n) {
+ // we no longer support ES1.
+ return kNone_GrGLStandard;
+ }
+
+ // check for ES2
+ n = sscanf(versionString, "OpenGL ES %d.%d", &major, &minor);
+ if (2 == n) {
+ return kGLES_GrGLStandard;
+ }
+ return kNone_GrGLStandard;
+}
+
+void GrGLGetDriverInfo(GrGLStandard standard,
+ GrGLVendor vendor,
+ const char* rendererString,
+ const char* versionString,
+ GrGLDriver* outDriver,
+ GrGLDriverVersion* outVersion) {
+ int major, minor, rev, driverMajor, driverMinor;
+
+ *outDriver = kUnknown_GrGLDriver;
+ *outVersion = GR_GL_DRIVER_UNKNOWN_VER;
+ // These null checks are for test GL contexts that return nullptr in their
+ // glGetString implementation.
+ if (!rendererString) {
+ rendererString = "";
+ }
+ if (!versionString) {
+ versionString = "";
+ }
+
+ if (0 == strcmp(rendererString, "Chromium")) {
+ *outDriver = kChromium_GrGLDriver;
+ return;
+ }
+
+ if (standard == kGL_GrGLStandard) {
+ if (kNVIDIA_GrGLVendor == vendor) {
+ *outDriver = kNVIDIA_GrGLDriver;
+ int n = sscanf(versionString, "%d.%d.%d NVIDIA %d.%d",
+ &major, &minor, &rev, &driverMajor, &driverMinor);
+ // Some older NVIDIA drivers don't report the driver version.
+ if (5 == n) {
+ *outVersion = GR_GL_DRIVER_VER(driverMajor, driverMinor);
+ }
+ return;
+ }
+ int n = sscanf(versionString, "%d.%d Mesa %d.%d",
+ &major, &minor, &driverMajor, &driverMinor);
+ if (4 == n) {
+ *outDriver = kMesa_GrGLDriver;
+ *outVersion = GR_GL_DRIVER_VER(driverMajor, driverMinor);
+ return;
+ }
+ }
+ else {
+ if (kNVIDIA_GrGLVendor == vendor) {
+ *outDriver = kNVIDIA_GrGLDriver;
+ int n = sscanf(versionString, "OpenGL ES %d.%d NVIDIA %d.%d",
+ &major, &minor, &driverMajor, &driverMinor);
+ // Some older NVIDIA drivers don't report the driver version.
+ if (4 == n) {
+ *outVersion = GR_GL_DRIVER_VER(driverMajor, driverMinor);
+ }
+ return;
+ }
+
+ int n = sscanf(versionString, "OpenGL ES %d.%d Mesa %d.%d",
+ &major, &minor, &driverMajor, &driverMinor);
+ if (4 == n) {
+ *outDriver = kMesa_GrGLDriver;
+ *outVersion = GR_GL_DRIVER_VER(driverMajor, driverMinor);
+ return;
+ }
+ if (0 == strncmp("ANGLE", rendererString, 5)) {
+ *outDriver = kANGLE_GrGLDriver;
+ n = sscanf(versionString, "OpenGL ES %d.%d (ANGLE %d.%d", &major, &minor, &driverMajor,
+ &driverMinor);
+ if (4 == n) {
+ *outVersion = GR_GL_DRIVER_VER(driverMajor, driverMinor);
+ }
+ return;
+ }
+ }
+
+ if (kIntel_GrGLVendor == vendor) {
+ // We presume we're on the Intel driver since it hasn't identified itself as Mesa.
+ *outDriver = kIntel_GrGLDriver;
+ }
+}
+
+GrGLVersion GrGLGetVersionFromString(const char* versionString) {
+ if (nullptr == versionString) {
+ SkDebugf("nullptr GL version string.");
+ return GR_GL_INVALID_VER;
+ }
+
+ int major, minor;
+
+ // check for mesa
+ int mesaMajor, mesaMinor;
+ int n = sscanf(versionString, "%d.%d Mesa %d.%d", &major, &minor, &mesaMajor, &mesaMinor);
+ if (4 == n) {
+ return GR_GL_VER(major, minor);
+ }
+
+ n = sscanf(versionString, "%d.%d", &major, &minor);
+ if (2 == n) {
+ return GR_GL_VER(major, minor);
+ }
+
+ char profile[2];
+ n = sscanf(versionString, "OpenGL ES-%c%c %d.%d", profile, profile+1,
+ &major, &minor);
+ if (4 == n) {
+ return GR_GL_VER(major, minor);
+ }
+
+ n = sscanf(versionString, "OpenGL ES %d.%d", &major, &minor);
+ if (2 == n) {
+ return GR_GL_VER(major, minor);
+ }
+
+ return GR_GL_INVALID_VER;
+}
+
+GrGLSLVersion GrGLGetGLSLVersionFromString(const char* versionString) {
+ if (nullptr == versionString) {
+ SkDebugf("nullptr GLSL version string.");
+ return GR_GLSL_INVALID_VER;
+ }
+
+ int major, minor;
+
+ int n = sscanf(versionString, "%d.%d", &major, &minor);
+ if (2 == n) {
+ return GR_GLSL_VER(major, minor);
+ }
+
+ n = sscanf(versionString, "OpenGL ES GLSL ES %d.%d", &major, &minor);
+ if (2 == n) {
+ return GR_GLSL_VER(major, minor);
+ }
+
+#ifdef SK_BUILD_FOR_ANDROID
+ // android hack until the gpu vender updates their drivers
+ n = sscanf(versionString, "OpenGL ES GLSL %d.%d", &major, &minor);
+ if (2 == n) {
+ return GR_GLSL_VER(major, minor);
+ }
+#endif
+
+ return GR_GLSL_INVALID_VER;
+}
+
+GrGLVendor GrGLGetVendorFromString(const char* vendorString) {
+ if (vendorString) {
+ if (0 == strcmp(vendorString, "ARM")) {
+ return kARM_GrGLVendor;
+ }
+ if (0 == strcmp(vendorString, "Imagination Technologies")) {
+ return kImagination_GrGLVendor;
+ }
+ if (0 == strncmp(vendorString, "Intel ", 6) || 0 == strcmp(vendorString, "Intel")) {
+ return kIntel_GrGLVendor;
+ }
+ if (0 == strcmp(vendorString, "Qualcomm")) {
+ return kQualcomm_GrGLVendor;
+ }
+ if (0 == strcmp(vendorString, "NVIDIA Corporation")) {
+ return kNVIDIA_GrGLVendor;
+ }
+ if (0 == strcmp(vendorString, "ATI Technologies Inc.")) {
+ return kATI_GrGLVendor;
+ }
+ }
+ return kOther_GrGLVendor;
+}
+
+GrGLRenderer GrGLGetRendererFromString(const char* rendererString) {
+ if (rendererString) {
+ if (0 == strcmp(rendererString, "NVIDIA Tegra 3")) {
+ return kTegra3_GrGLRenderer;
+ } else if (0 == strcmp(rendererString, "NVIDIA Tegra")) {
+ return kTegra2_GrGLRenderer;
+ }
+ int lastDigit;
+ int n = sscanf(rendererString, "PowerVR SGX 54%d", &lastDigit);
+ if (1 == n && lastDigit >= 0 && lastDigit <= 9) {
+ return kPowerVR54x_GrGLRenderer;
+ }
+ // certain iOS devices also use PowerVR54x GPUs
+ static const char kAppleA4Str[] = "Apple A4";
+ static const char kAppleA5Str[] = "Apple A5";
+ static const char kAppleA6Str[] = "Apple A6";
+ if (0 == strncmp(rendererString, kAppleA4Str,
+ SK_ARRAY_COUNT(kAppleA4Str)-1) ||
+ 0 == strncmp(rendererString, kAppleA5Str,
+ SK_ARRAY_COUNT(kAppleA5Str)-1) ||
+ 0 == strncmp(rendererString, kAppleA6Str,
+ SK_ARRAY_COUNT(kAppleA6Str)-1)) {
+ return kPowerVR54x_GrGLRenderer;
+ }
+ static const char kPowerVRRogueStr[] = "PowerVR Rogue";
+ static const char kAppleA7Str[] = "Apple A7";
+ static const char kAppleA8Str[] = "Apple A8";
+ if (0 == strncmp(rendererString, kPowerVRRogueStr,
+ SK_ARRAY_COUNT(kPowerVRRogueStr)-1) ||
+ 0 == strncmp(rendererString, kAppleA7Str,
+ SK_ARRAY_COUNT(kAppleA7Str)-1) ||
+ 0 == strncmp(rendererString, kAppleA8Str,
+ SK_ARRAY_COUNT(kAppleA8Str)-1)) {
+ return kPowerVRRogue_GrGLRenderer;
+ }
+ int adrenoNumber;
+ n = sscanf(rendererString, "Adreno (TM) %d", &adrenoNumber);
+ if (1 == n) {
+ if (adrenoNumber >= 300) {
+ if (adrenoNumber < 400) {
+ return kAdreno3xx_GrGLRenderer;
+ }
+ if (adrenoNumber < 500) {
+ return kAdreno4xx_GrGLRenderer;
+ }
+ if (adrenoNumber < 600) {
+ return kAdreno5xx_GrGLRenderer;
+ }
+ }
+ }
+ if (strcmp("Mesa Offscreen", rendererString)) {
+ return kOSMesa_GrGLRenderer;
+ }
+ }
+ return kOther_GrGLRenderer;
+}
+
+GrGLVersion GrGLGetVersion(const GrGLInterface* gl) {
+ const GrGLubyte* v;
+ GR_GL_CALL_RET(gl, v, GetString(GR_GL_VERSION));
+ return GrGLGetVersionFromString((const char*) v);
+}
+
+GrGLSLVersion GrGLGetGLSLVersion(const GrGLInterface* gl) {
+ const GrGLubyte* v;
+ GR_GL_CALL_RET(gl, v, GetString(GR_GL_SHADING_LANGUAGE_VERSION));
+ return GrGLGetGLSLVersionFromString((const char*) v);
+}
+
+GrGLVendor GrGLGetVendor(const GrGLInterface* gl) {
+ const GrGLubyte* v;
+ GR_GL_CALL_RET(gl, v, GetString(GR_GL_VENDOR));
+ return GrGLGetVendorFromString((const char*) v);
+}
+
+GrGLRenderer GrGLGetRenderer(const GrGLInterface* gl) {
+ const GrGLubyte* v;
+ GR_GL_CALL_RET(gl, v, GetString(GR_GL_RENDERER));
+ return GrGLGetRendererFromString((const char*) v);
+}
+
+GrGLenum GrToGLStencilFunc(GrStencilTest test) {
+ static const GrGLenum gTable[kGrStencilTestCount] = {
+ GR_GL_ALWAYS, // kAlways
+ GR_GL_NEVER, // kNever
+ GR_GL_GREATER, // kGreater
+ GR_GL_GEQUAL, // kGEqual
+ GR_GL_LESS, // kLess
+ GR_GL_LEQUAL, // kLEqual
+ GR_GL_EQUAL, // kEqual
+ GR_GL_NOTEQUAL, // kNotEqual
+ };
+ GR_STATIC_ASSERT(0 == (int)GrStencilTest::kAlways);
+ GR_STATIC_ASSERT(1 == (int)GrStencilTest::kNever);
+ GR_STATIC_ASSERT(2 == (int)GrStencilTest::kGreater);
+ GR_STATIC_ASSERT(3 == (int)GrStencilTest::kGEqual);
+ GR_STATIC_ASSERT(4 == (int)GrStencilTest::kLess);
+ GR_STATIC_ASSERT(5 == (int)GrStencilTest::kLEqual);
+ GR_STATIC_ASSERT(6 == (int)GrStencilTest::kEqual);
+ GR_STATIC_ASSERT(7 == (int)GrStencilTest::kNotEqual);
+ SkASSERT(test < (GrStencilTest)kGrStencilTestCount);
+
+ return gTable[(int)test];
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLUtil.h b/gfx/skia/skia/src/gpu/gl/GrGLUtil.h
new file mode 100644
index 000000000..750337109
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLUtil.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLUtil_DEFINED
+#define GrGLUtil_DEFINED
+
+#include "gl/GrGLInterface.h"
+#include "GrGLDefines.h"
+#include "GrStencilSettings.h"
+
+class SkMatrix;
+
+////////////////////////////////////////////////////////////////////////////////
+
+typedef uint32_t GrGLVersion;
+typedef uint32_t GrGLSLVersion;
+typedef uint32_t GrGLDriverVersion;
+
+#define GR_GL_VER(major, minor) ((static_cast<int>(major) << 16) | \
+ static_cast<int>(minor))
+#define GR_GLSL_VER(major, minor) ((static_cast<int>(major) << 16) | \
+ static_cast<int>(minor))
+#define GR_GL_DRIVER_VER(major, minor) ((static_cast<int>(major) << 16) | \
+ static_cast<int>(minor))
+
+#define GR_GL_INVALID_VER GR_GL_VER(0, 0)
+#define GR_GLSL_INVALID_VER GR_GLSL_VER(0, 0)
+#define GR_GL_DRIVER_UNKNOWN_VER GR_GL_DRIVER_VER(0, 0)
+
+/**
+ * The Vendor and Renderer enum values are lazily updated as required.
+ */
+enum GrGLVendor {
+ kARM_GrGLVendor,
+ kImagination_GrGLVendor,
+ kIntel_GrGLVendor,
+ kQualcomm_GrGLVendor,
+ kNVIDIA_GrGLVendor,
+ kATI_GrGLVendor,
+
+ kOther_GrGLVendor
+};
+
+enum GrGLRenderer {
+ kTegra2_GrGLRenderer,
+ kTegra3_GrGLRenderer,
+ kPowerVR54x_GrGLRenderer,
+ kPowerVRRogue_GrGLRenderer,
+ kAdreno3xx_GrGLRenderer,
+ kAdreno4xx_GrGLRenderer,
+ kAdreno5xx_GrGLRenderer,
+ kOSMesa_GrGLRenderer,
+ kOther_GrGLRenderer
+};
+
+enum GrGLDriver {
+ kMesa_GrGLDriver,
+ kChromium_GrGLDriver,
+ kNVIDIA_GrGLDriver,
+ kIntel_GrGLDriver,
+ kANGLE_GrGLDriver,
+ kUnknown_GrGLDriver
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Some drivers want the var-int arg to be zero-initialized on input.
+ */
+#define GR_GL_INIT_ZERO 0
+#define GR_GL_GetIntegerv(gl, e, p) \
+ do { \
+ *(p) = GR_GL_INIT_ZERO; \
+ GR_GL_CALL(gl, GetIntegerv(e, p)); \
+ } while (0)
+
+#define GR_GL_GetFramebufferAttachmentParameteriv(gl, t, a, pname, p) \
+ do { \
+ *(p) = GR_GL_INIT_ZERO; \
+ GR_GL_CALL(gl, GetFramebufferAttachmentParameteriv(t, a, pname, p)); \
+ } while (0)
+
+#define GR_GL_GetNamedFramebufferAttachmentParameteriv(gl, fb, a, pname, p) \
+ do { \
+ *(p) = GR_GL_INIT_ZERO; \
+ GR_GL_CALL(gl, GetNamedFramebufferAttachmentParameteriv(fb, a, pname, p)); \
+ } while (0)
+
+#define GR_GL_GetRenderbufferParameteriv(gl, t, pname, p) \
+ do { \
+ *(p) = GR_GL_INIT_ZERO; \
+ GR_GL_CALL(gl, GetRenderbufferParameteriv(t, pname, p)); \
+ } while (0)
+
+#define GR_GL_GetTexLevelParameteriv(gl, t, l, pname, p) \
+ do { \
+ *(p) = GR_GL_INIT_ZERO; \
+ GR_GL_CALL(gl, GetTexLevelParameteriv(t, l, pname, p)); \
+ } while (0)
+
+#define GR_GL_GetShaderPrecisionFormat(gl, st, pt, range, precision) \
+ do { \
+ (range)[0] = GR_GL_INIT_ZERO; \
+ (range)[1] = GR_GL_INIT_ZERO; \
+ (*precision) = GR_GL_INIT_ZERO; \
+ GR_GL_CALL(gl, GetShaderPrecisionFormat(st, pt, range, precision)); \
+ } while (0)
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Helpers for glGetString()
+ */
+
+// these variants assume caller already has a string from glGetString()
+GrGLVersion GrGLGetVersionFromString(const char* versionString);
+GrGLStandard GrGLGetStandardInUseFromString(const char* versionString);
+GrGLSLVersion GrGLGetGLSLVersionFromString(const char* versionString);
+GrGLVendor GrGLGetVendorFromString(const char* vendorString);
+GrGLRenderer GrGLGetRendererFromString(const char* rendererString);
+
+void GrGLGetDriverInfo(GrGLStandard standard,
+ GrGLVendor vendor,
+ const char* rendererString,
+ const char* versionString,
+ GrGLDriver* outDriver,
+ GrGLDriverVersion* outVersion);
+
+// these variants call glGetString()
+GrGLVersion GrGLGetVersion(const GrGLInterface*);
+GrGLSLVersion GrGLGetGLSLVersion(const GrGLInterface*);
+GrGLVendor GrGLGetVendor(const GrGLInterface*);
+GrGLRenderer GrGLGetRenderer(const GrGLInterface*);
+
+
+/**
+ * Helpers for glGetError()
+ */
+
+void GrGLCheckErr(const GrGLInterface* gl,
+ const char* location,
+ const char* call);
+
+void GrGLClearErr(const GrGLInterface* gl);
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Macros for using GrGLInterface to make GL calls
+ */
+
+// internal macro to conditionally call glGetError based on compile-time and
+// run-time flags.
+#if GR_GL_CHECK_ERROR
+ extern bool gCheckErrorGL;
+ #define GR_GL_CHECK_ERROR_IMPL(IFACE, X) \
+ if (gCheckErrorGL) \
+ GrGLCheckErr(IFACE, GR_FILE_AND_LINE_STR, #X)
+#else
+ #define GR_GL_CHECK_ERROR_IMPL(IFACE, X)
+#endif
+
+// internal macro to conditionally log the gl call using SkDebugf based on
+// compile-time and run-time flags.
+#if GR_GL_LOG_CALLS
+ extern bool gLogCallsGL;
+ #define GR_GL_LOG_CALLS_IMPL(X) \
+ if (gLogCallsGL) \
+ SkDebugf(GR_FILE_AND_LINE_STR "GL: " #X "\n")
+#else
+ #define GR_GL_LOG_CALLS_IMPL(X)
+#endif
+
+// makes a GL call on the interface and does any error checking and logging
+#define GR_GL_CALL(IFACE, X) \
+ do { \
+ GR_GL_CALL_NOERRCHECK(IFACE, X); \
+ GR_GL_CHECK_ERROR_IMPL(IFACE, X); \
+ } while (false)
+
+// Variant of above that always skips the error check. This is useful when
+// the caller wants to do its own glGetError() call and examine the error value.
+#define GR_GL_CALL_NOERRCHECK(IFACE, X) \
+ do { \
+ (IFACE)->fFunctions.f##X; \
+ GR_GL_LOG_CALLS_IMPL(X); \
+ } while (false)
+
+// same as GR_GL_CALL but stores the return value of the gl call in RET
+#define GR_GL_CALL_RET(IFACE, RET, X) \
+ do { \
+ GR_GL_CALL_RET_NOERRCHECK(IFACE, RET, X); \
+ GR_GL_CHECK_ERROR_IMPL(IFACE, X); \
+ } while (false)
+
+// same as GR_GL_CALL_RET but always skips the error check.
+#define GR_GL_CALL_RET_NOERRCHECK(IFACE, RET, X) \
+ do { \
+ (RET) = (IFACE)->fFunctions.f##X; \
+ GR_GL_LOG_CALLS_IMPL(X); \
+ } while (false)
+
+// call glGetError without doing a redundant error check or logging.
+#define GR_GL_GET_ERROR(IFACE) (IFACE)->fFunctions.fGetError()
+
+GrGLenum GrToGLStencilFunc(GrStencilTest test);
+
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLVaryingHandler.cpp b/gfx/skia/skia/src/gpu/gl/GrGLVaryingHandler.cpp
new file mode 100644
index 000000000..b7c872930
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLVaryingHandler.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "gl/GrGLVaryingHandler.h"
+
+#include "gl/GrGLGpu.h"
+#include "gl/builders/GrGLProgramBuilder.h"
+
+
+GrGLSLVaryingHandler::VaryingHandle GrGLVaryingHandler::addPathProcessingVarying(
+ const char* name,
+ GrGLSLVertToFrag* v,
+ GrSLPrecision fsPrecision) {
+#ifdef SK_DEBUG
+ GrGLProgramBuilder* glPB = (GrGLProgramBuilder*) fProgramBuilder;
+ // This call is not used for non-NVPR backends.
+ SkASSERT(glPB->gpu()->glCaps().shaderCaps()->pathRenderingSupport() &&
+ glPB->fPrimProc.isPathRendering() &&
+ !glPB->fPrimProc.willUseGeoShader() &&
+ glPB->fPrimProc.numAttribs() == 0);
+#endif
+ this->addVarying(name, v, fsPrecision);
+ auto varyingInfo = fPathProcVaryingInfos.push_back();
+ varyingInfo.fLocation = fPathProcVaryingInfos.count() - 1;
+ return VaryingHandle(varyingInfo.fLocation);
+}
+
+void GrGLVaryingHandler::onFinalize() {
+ SkASSERT(fPathProcVaryingInfos.empty() || fPathProcVaryingInfos.count() == fFragInputs.count());
+ for (int i = 0; i < fPathProcVaryingInfos.count(); ++i) {
+ fPathProcVaryingInfos[i].fVariable = fFragInputs[i];
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLVaryingHandler.h b/gfx/skia/skia/src/gpu/gl/GrGLVaryingHandler.h
new file mode 100644
index 000000000..e08a6c615
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLVaryingHandler.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLVaryingHandler_DEFINED
+#define GrGLVaryingHandler_DEFINED
+
+#include "glsl/GrGLSLVarying.h"
+#include "GrTypesPriv.h"
+#include "gl/GrGLProgramDataManager.h"
+
+class GrGLVaryingHandler : public GrGLSLVaryingHandler {
+public:
+ GrGLVaryingHandler(GrGLSLProgramBuilder* program)
+ : INHERITED(program),
+ fPathProcVaryingInfos(kVaryingsPerBlock) {}
+
+ // This function is used by the NVPR PathProcessor to add a varying directly into the fragment
+ // shader since there is no vertex shader.
+ VaryingHandle addPathProcessingVarying(const char* name, GrGLSLVertToFrag*,
+ GrSLPrecision fsPrecision = kDefault_GrSLPrecision);
+
+private:
+ void onFinalize() override;
+
+ GrGLProgramDataManager::VaryingInfoArray fPathProcVaryingInfos;
+
+ friend class GrGLProgramBuilder;
+
+ typedef GrGLSLVaryingHandler INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLVertexArray.cpp b/gfx/skia/skia/src/gpu/gl/GrGLVertexArray.cpp
new file mode 100644
index 000000000..04299d785
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLVertexArray.cpp
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLVertexArray.h"
+#include "GrGLBuffer.h"
+#include "GrGLGpu.h"
+
+struct AttribLayout {
+ GrGLint fCount;
+ GrGLenum fType;
+ GrGLboolean fNormalized; // Only used by floating point types.
+};
+
+static const AttribLayout gLayouts[kGrVertexAttribTypeCount] = {
+ {1, GR_GL_FLOAT, false}, // kFloat_GrVertexAttribType
+ {2, GR_GL_FLOAT, false}, // kVec2f_GrVertexAttribType
+ {3, GR_GL_FLOAT, false}, // kVec3f_GrVertexAttribType
+ {4, GR_GL_FLOAT, false}, // kVec4f_GrVertexAttribType
+ {1, GR_GL_UNSIGNED_BYTE, true}, // kUByte_GrVertexAttribType
+ {4, GR_GL_UNSIGNED_BYTE, true}, // kVec4ub_GrVertexAttribType
+ {2, GR_GL_UNSIGNED_SHORT, true}, // kVec2s_GrVertexAttribType
+ {1, GR_GL_INT, false}, // kInt_GrVertexAttribType
+ {1, GR_GL_UNSIGNED_INT, false}, // kUint_GrVertexAttribType
+};
+
+GR_STATIC_ASSERT(0 == kFloat_GrVertexAttribType);
+GR_STATIC_ASSERT(1 == kVec2f_GrVertexAttribType);
+GR_STATIC_ASSERT(2 == kVec3f_GrVertexAttribType);
+GR_STATIC_ASSERT(3 == kVec4f_GrVertexAttribType);
+GR_STATIC_ASSERT(4 == kUByte_GrVertexAttribType);
+GR_STATIC_ASSERT(5 == kVec4ub_GrVertexAttribType);
+GR_STATIC_ASSERT(6 == kVec2us_GrVertexAttribType);
+GR_STATIC_ASSERT(7 == kInt_GrVertexAttribType);
+GR_STATIC_ASSERT(8 == kUint_GrVertexAttribType);
+
+void GrGLAttribArrayState::set(GrGLGpu* gpu,
+ int index,
+ const GrBuffer* vertexBuffer,
+ GrVertexAttribType type,
+ GrGLsizei stride,
+ GrGLvoid* offset) {
+ SkASSERT(index >= 0 && index < fAttribArrayStates.count());
+ AttribArrayState* array = &fAttribArrayStates[index];
+ if (!array->fEnableIsValid || !array->fEnabled) {
+ GR_GL_CALL(gpu->glInterface(), EnableVertexAttribArray(index));
+ array->fEnableIsValid = true;
+ array->fEnabled = true;
+ }
+ if (array->fVertexBufferUniqueID != vertexBuffer->uniqueID() ||
+ array->fType != type ||
+ array->fStride != stride ||
+ array->fOffset != offset) {
+ gpu->bindBuffer(kVertex_GrBufferType, vertexBuffer);
+ const AttribLayout& layout = gLayouts[type];
+ if (!GrVertexAttribTypeIsIntType(type)) {
+ GR_GL_CALL(gpu->glInterface(), VertexAttribPointer(index,
+ layout.fCount,
+ layout.fType,
+ layout.fNormalized,
+ stride,
+ offset));
+ } else {
+ SkASSERT(gpu->caps()->shaderCaps()->integerSupport());
+ SkASSERT(!layout.fNormalized);
+ GR_GL_CALL(gpu->glInterface(), VertexAttribIPointer(index,
+ layout.fCount,
+ layout.fType,
+ stride,
+ offset));
+ }
+ array->fVertexBufferUniqueID = vertexBuffer->uniqueID();
+ array->fType = type;
+ array->fStride = stride;
+ array->fOffset = offset;
+ }
+}
+
+void GrGLAttribArrayState::disableUnusedArrays(const GrGLGpu* gpu, uint64_t usedMask) {
+ int count = fAttribArrayStates.count();
+ for (int i = 0; i < count; ++i) {
+ if (!(usedMask & 0x1)) {
+ if (!fAttribArrayStates[i].fEnableIsValid || fAttribArrayStates[i].fEnabled) {
+ GR_GL_CALL(gpu->glInterface(), DisableVertexAttribArray(i));
+ fAttribArrayStates[i].fEnableIsValid = true;
+ fAttribArrayStates[i].fEnabled = false;
+ }
+ } else {
+ SkASSERT(fAttribArrayStates[i].fEnableIsValid && fAttribArrayStates[i].fEnabled);
+ }
+ // if the count is greater than 64 then this will become 0 and we will disable arrays 64+.
+ usedMask >>= 1;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+GrGLVertexArray::GrGLVertexArray(GrGLint id, int attribCount)
+ : fID(id)
+ , fAttribArrays(attribCount)
+ , fIndexBufferUniqueID(SK_InvalidUniqueID) {
+}
+
+GrGLAttribArrayState* GrGLVertexArray::bind(GrGLGpu* gpu) {
+ if (0 == fID) {
+ return nullptr;
+ }
+ gpu->bindVertexArray(fID);
+ return &fAttribArrays;
+}
+
+GrGLAttribArrayState* GrGLVertexArray::bindWithIndexBuffer(GrGLGpu* gpu, const GrBuffer* ibuff) {
+ GrGLAttribArrayState* state = this->bind(gpu);
+ if (state && fIndexBufferUniqueID != ibuff->uniqueID()) {
+ if (ibuff->isCPUBacked()) {
+ GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, 0));
+ } else {
+ const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(ibuff);
+ GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER,
+ glBuffer->bufferID()));
+ }
+ fIndexBufferUniqueID = ibuff->uniqueID();
+ }
+ return state;
+}
+
+void GrGLVertexArray::invalidateCachedState() {
+ fAttribArrays.invalidate();
+ fIndexBufferUniqueID = SK_InvalidUniqueID;
+}
diff --git a/gfx/skia/skia/src/gpu/gl/GrGLVertexArray.h b/gfx/skia/skia/src/gpu/gl/GrGLVertexArray.h
new file mode 100644
index 000000000..639892690
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/GrGLVertexArray.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLVertexArray_DEFINED
+#define GrGLVertexArray_DEFINED
+
+#include "GrTypesPriv.h"
+#include "gl/GrGLDefines.h"
+#include "gl/GrGLTypes.h"
+#include "SkTArray.h"
+
+class GrBuffer;
+class GrGLGpu;
+
+/**
+ * This sets and tracks the vertex attribute array state. It is used internally by GrGLVertexArray
+ * (below) but is separate because it is also used to track the state of vertex array object 0.
+ */
+class GrGLAttribArrayState {
+public:
+ explicit GrGLAttribArrayState(int arrayCount = 0) {
+ this->resize(arrayCount);
+ }
+
+ void resize(int newCount) {
+ fAttribArrayStates.resize_back(newCount);
+ for (int i = 0; i < newCount; ++i) {
+ fAttribArrayStates[i].invalidate();
+ }
+ }
+
+ /**
+ * This function enables and sets vertex attrib state for the specified attrib index. It is
+ * assumed that the GrGLAttribArrayState is tracking the state of the currently bound vertex
+ * array object.
+ */
+ void set(GrGLGpu*,
+ int attribIndex,
+ const GrBuffer* vertexBuffer,
+ GrVertexAttribType type,
+ GrGLsizei stride,
+ GrGLvoid* offset);
+
+ /**
+ * This function disables vertex attribs not present in the mask. It is assumed that the
+ * GrGLAttribArrayState is tracking the state of the currently bound vertex array object.
+ */
+ void disableUnusedArrays(const GrGLGpu*, uint64_t usedAttribArrayMask);
+
+ void invalidate() {
+ int count = fAttribArrayStates.count();
+ for (int i = 0; i < count; ++i) {
+ fAttribArrayStates[i].invalidate();
+ }
+ }
+
+ /**
+ * The number of attrib arrays that this object is configured to track.
+ */
+ int count() const { return fAttribArrayStates.count(); }
+
+private:
+ /**
+ * Tracks the state of glVertexAttribArray for an attribute index.
+ */
+ struct AttribArrayState {
+ void invalidate() {
+ fEnableIsValid = false;
+ fVertexBufferUniqueID = SK_InvalidUniqueID;
+ }
+
+ bool fEnableIsValid;
+ bool fEnabled;
+ uint32_t fVertexBufferUniqueID;
+ GrVertexAttribType fType;
+ GrGLsizei fStride;
+ GrGLvoid* fOffset;
+ };
+
+ SkSTArray<16, AttribArrayState, true> fAttribArrayStates;
+};
+
+/**
+ * This class represents an OpenGL vertex array object. It manages the lifetime of the vertex array
+ * and is used to track the state of the vertex array to avoid redundant GL calls.
+ */
+class GrGLVertexArray {
+public:
+ GrGLVertexArray(GrGLint id, int attribCount);
+
+ /**
+ * Binds this vertex array. If the ID has been deleted or abandoned then nullptr is returned.
+ * Otherwise, the GrGLAttribArrayState that is tracking this vertex array's attrib bindings is
+ * returned.
+ */
+ GrGLAttribArrayState* bind(GrGLGpu*);
+
+ /**
+ * This is a version of the above function that also binds an index buffer to the vertex
+ * array object.
+ */
+ GrGLAttribArrayState* bindWithIndexBuffer(GrGLGpu* gpu, const GrBuffer* indexBuffer);
+
+ GrGLuint arrayID() const { return fID; }
+
+ void invalidateCachedState();
+
+private:
+ GrGLuint fID;
+ GrGLAttribArrayState fAttribArrays;
+ uint32_t fIndexBufferUniqueID;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/android/GrGLCreateNativeInterface_android.cpp b/gfx/skia/skia/src/gpu/gl/android/GrGLCreateNativeInterface_android.cpp
new file mode 100644
index 000000000..f5949175f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/android/GrGLCreateNativeInterface_android.cpp
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_ANDROID)
+
+#include "gl/GrGLInterface.h"
+#include "gl/GrGLAssembleInterface.h"
+#include "gl/GrGLUtil.h"
+
+#include <EGL/egl.h>
+#include <GLES2/gl2.h>
+
+static GrGLFuncPtr android_get_gl_proc(void* ctx, const char name[]) {
+ SkASSERT(nullptr == ctx);
+ // Some older drivers on Android have busted eglGetProcAdddress Functions that
+ // will return the wrong pointer for built in GLES2 functions. This set of functions
+ // was generated on a Xoom by finding mismatches between the function pulled in via gl2.h and
+ // the address returned by eglGetProcAddress.
+ if (0 == strcmp("glActiveTexture", name)) {
+ return (GrGLFuncPtr) glActiveTexture;
+ } else if (0 == strcmp("glAttachShader", name)) {
+ return (GrGLFuncPtr) glAttachShader;
+ } else if (0 == strcmp("glBindAttribLocation", name)) {
+ return (GrGLFuncPtr) glBindAttribLocation;
+ } else if (0 == strcmp("glBindBuffer", name)) {
+ return (GrGLFuncPtr) glBindBuffer;
+ } else if (0 == strcmp("glBindTexture", name)) {
+ return (GrGLFuncPtr) glBindTexture;
+ } else if (0 == strcmp("glBlendColor", name)) {
+ return (GrGLFuncPtr) glBlendColor;
+ } else if (0 == strcmp("glBlendEquation", name)) {
+ return (GrGLFuncPtr) glBlendEquation;
+ } else if (0 == strcmp("glBlendFunc", name)) {
+ return (GrGLFuncPtr) glBlendFunc;
+ } else if (0 == strcmp("glBufferData", name)) {
+ return (GrGLFuncPtr) glBufferData;
+ } else if (0 == strcmp("glBufferSubData", name)) {
+ return (GrGLFuncPtr) glBufferSubData;
+ } else if (0 == strcmp("glClear", name)) {
+ return (GrGLFuncPtr) glClear;
+ } else if (0 == strcmp("glClearColor", name)) {
+ return (GrGLFuncPtr) glClearColor;
+ } else if (0 == strcmp("glClearStencil", name)) {
+ return (GrGLFuncPtr) glClearStencil;
+ } else if (0 == strcmp("glColorMask", name)) {
+ return (GrGLFuncPtr) glColorMask;
+ } else if (0 == strcmp("glCompileShader", name)) {
+ return (GrGLFuncPtr) glCompileShader;
+ } else if (0 == strcmp("glCompressedTexImage2D", name)) {
+ return (GrGLFuncPtr) glCompressedTexImage2D;
+ } else if (0 == strcmp("glCompressedTexSubImage2D", name)) {
+ return (GrGLFuncPtr) glCompressedTexSubImage2D;
+ } else if (0 == strcmp("glCopyTexSubImage2D", name)) {
+ return (GrGLFuncPtr) glCopyTexSubImage2D;
+ } else if (0 == strcmp("glCreateProgram", name)) {
+ return (GrGLFuncPtr) glCreateProgram;
+ } else if (0 == strcmp("glCreateShader", name)) {
+ return (GrGLFuncPtr) glCreateShader;
+ } else if (0 == strcmp("glCullFace", name)) {
+ return (GrGLFuncPtr) glCullFace;
+ } else if (0 == strcmp("glDeleteBuffers", name)) {
+ return (GrGLFuncPtr) glDeleteBuffers;
+ } else if (0 == strcmp("glDeleteProgram", name)) {
+ return (GrGLFuncPtr) glDeleteProgram;
+ } else if (0 == strcmp("glDeleteShader", name)) {
+ return (GrGLFuncPtr) glDeleteShader;
+ } else if (0 == strcmp("glDeleteTextures", name)) {
+ return (GrGLFuncPtr) glDeleteTextures;
+ } else if (0 == strcmp("glDepthMask", name)) {
+ return (GrGLFuncPtr) glDepthMask;
+ } else if (0 == strcmp("glDisable", name)) {
+ return (GrGLFuncPtr) glDisable;
+ } else if (0 == strcmp("glDisableVertexAttribArray", name)) {
+ return (GrGLFuncPtr) glDisableVertexAttribArray;
+ } else if (0 == strcmp("glDrawArrays", name)) {
+ return (GrGLFuncPtr) glDrawArrays;
+ } else if (0 == strcmp("glDrawElements", name)) {
+ return (GrGLFuncPtr) glDrawElements;
+ } else if (0 == strcmp("glEnable", name)) {
+ return (GrGLFuncPtr) glEnable;
+ } else if (0 == strcmp("glEnableVertexAttribArray", name)) {
+ return (GrGLFuncPtr) glEnableVertexAttribArray;
+ } else if (0 == strcmp("glFinish", name)) {
+ return (GrGLFuncPtr) glFinish;
+ } else if (0 == strcmp("glFlush", name)) {
+ return (GrGLFuncPtr) glFlush;
+ } else if (0 == strcmp("glFrontFace", name)) {
+ return (GrGLFuncPtr) glFrontFace;
+ } else if (0 == strcmp("glGenBuffers", name)) {
+ return (GrGLFuncPtr) glGenBuffers;
+ } else if (0 == strcmp("glGenerateMipmap", name)) {
+ return (GrGLFuncPtr) glGenerateMipmap;
+ } else if (0 == strcmp("glGenTextures", name)) {
+ return (GrGLFuncPtr) glGenTextures;
+ } else if (0 == strcmp("glGetBufferParameteriv", name)) {
+ return (GrGLFuncPtr) glGetBufferParameteriv;
+ } else if (0 == strcmp("glGetError", name)) {
+ return (GrGLFuncPtr) glGetError;
+ } else if (0 == strcmp("glGetIntegerv", name)) {
+ return (GrGLFuncPtr) glGetIntegerv;
+ } else if (0 == strcmp("glGetProgramInfoLog", name)) {
+ return (GrGLFuncPtr) glGetProgramInfoLog;
+ } else if (0 == strcmp("glGetProgramiv", name)) {
+ return (GrGLFuncPtr) glGetProgramiv;
+ } else if (0 == strcmp("glGetShaderInfoLog", name)) {
+ return (GrGLFuncPtr) glGetShaderInfoLog;
+ } else if (0 == strcmp("glGetShaderiv", name)) {
+ return (GrGLFuncPtr) glGetShaderiv;
+ } else if (0 == strcmp("glGetString", name)) {
+ return (GrGLFuncPtr) glGetString;
+ } else if (0 == strcmp("glGetUniformLocation", name)) {
+ return (GrGLFuncPtr) glGetUniformLocation;
+ } else if (0 == strcmp("glLineWidth", name)) {
+ return (GrGLFuncPtr) glLineWidth;
+ } else if (0 == strcmp("glLinkProgram", name)) {
+ return (GrGLFuncPtr) glLinkProgram;
+ } else if (0 == strcmp("glPixelStorei", name)) {
+ return (GrGLFuncPtr) glPixelStorei;
+ } else if (0 == strcmp("glReadPixels", name)) {
+ return (GrGLFuncPtr) glReadPixels;
+ } else if (0 == strcmp("glScissor", name)) {
+ return (GrGLFuncPtr) glScissor;
+ } else if (0 == strcmp("glShaderSource", name)) {
+ return (GrGLFuncPtr) glShaderSource;
+ } else if (0 == strcmp("glStencilFunc", name)) {
+ return (GrGLFuncPtr) glStencilFunc;
+ } else if (0 == strcmp("glStencilFuncSeparate", name)) {
+ return (GrGLFuncPtr) glStencilFuncSeparate;
+ } else if (0 == strcmp("glStencilMask", name)) {
+ return (GrGLFuncPtr) glStencilMask;
+ } else if (0 == strcmp("glStencilMaskSeparate", name)) {
+ return (GrGLFuncPtr) glStencilMaskSeparate;
+ } else if (0 == strcmp("glStencilOp", name)) {
+ return (GrGLFuncPtr) glStencilOp;
+ } else if (0 == strcmp("glStencilOpSeparate", name)) {
+ return (GrGLFuncPtr) glStencilOpSeparate;
+ } else if (0 == strcmp("glTexImage2D", name)) {
+ return (GrGLFuncPtr) glTexImage2D;
+ } else if (0 == strcmp("glTexParameteri", name)) {
+ return (GrGLFuncPtr) glTexParameteri;
+ } else if (0 == strcmp("glTexParameteriv", name)) {
+ return (GrGLFuncPtr) glTexParameteriv;
+ } else if (0 == strcmp("glTexSubImage2D", name)) {
+ return (GrGLFuncPtr) glTexSubImage2D;
+ } else if (0 == strcmp("glUniform1f", name)) {
+ return (GrGLFuncPtr) glUniform1f;
+ } else if (0 == strcmp("glUniform1i", name)) {
+ return (GrGLFuncPtr) glUniform1i;
+ } else if (0 == strcmp("glUniform1fv", name)) {
+ return (GrGLFuncPtr) glUniform1fv;
+ } else if (0 == strcmp("glUniform1iv", name)) {
+ return (GrGLFuncPtr) glUniform1iv;
+ } else if (0 == strcmp("glUniform2f", name)) {
+ return (GrGLFuncPtr) glUniform2f;
+ } else if (0 == strcmp("glUniform2i", name)) {
+ return (GrGLFuncPtr) glUniform2i;
+ } else if (0 == strcmp("glUniform2fv", name)) {
+ return (GrGLFuncPtr) glUniform2fv;
+ } else if (0 == strcmp("glUniform2iv", name)) {
+ return (GrGLFuncPtr) glUniform2iv;
+ } else if (0 == strcmp("glUniform3f", name)) {
+ return (GrGLFuncPtr) glUniform3f;
+ } else if (0 == strcmp("glUniform3i", name)) {
+ return (GrGLFuncPtr) glUniform3i;
+ } else if (0 == strcmp("glUniform3fv", name)) {
+ return (GrGLFuncPtr) glUniform3fv;
+ } else if (0 == strcmp("glUniform3iv", name)) {
+ return (GrGLFuncPtr) glUniform3iv;
+ } else if (0 == strcmp("glUniform4f", name)) {
+ return (GrGLFuncPtr) glUniform4f;
+ } else if (0 == strcmp("glUniform4i", name)) {
+ return (GrGLFuncPtr) glUniform4i;
+ } else if (0 == strcmp("glUniform4fv", name)) {
+ return (GrGLFuncPtr) glUniform4fv;
+ } else if (0 == strcmp("glUniform4iv", name)) {
+ return (GrGLFuncPtr) glUniform4iv;
+ } else if (0 == strcmp("glUniformMatrix2fv", name)) {
+ return (GrGLFuncPtr) glUniformMatrix2fv;
+ } else if (0 == strcmp("glUniformMatrix3fv", name)) {
+ return (GrGLFuncPtr) glUniformMatrix3fv;
+ } else if (0 == strcmp("glUniformMatrix4fv", name)) {
+ return (GrGLFuncPtr) glUniformMatrix4fv;
+ } else if (0 == strcmp("glUseProgram", name)) {
+ return (GrGLFuncPtr) glUseProgram;
+ } else if (0 == strcmp("glVertexAttrib1f", name)) {
+ return (GrGLFuncPtr) glVertexAttrib1f;
+ } else if (0 == strcmp("glVertexAttrib2fv", name)) {
+ return (GrGLFuncPtr) glVertexAttrib2fv;
+ } else if (0 == strcmp("glVertexAttrib3fv", name)) {
+ return (GrGLFuncPtr) glVertexAttrib3fv;
+ } else if (0 == strcmp("glVertexAttrib4fv", name)) {
+ return (GrGLFuncPtr) glVertexAttrib4fv;
+ } else if (0 == strcmp("glVertexAttribPointer", name)) {
+ return (GrGLFuncPtr) glVertexAttribPointer;
+ } else if (0 == strcmp("glViewport", name)) {
+ return (GrGLFuncPtr) glViewport;
+ } else if (0 == strcmp("glBindFramebuffer", name)) {
+ return (GrGLFuncPtr) glBindFramebuffer;
+ } else if (0 == strcmp("glBindRenderbuffer", name)) {
+ return (GrGLFuncPtr) glBindRenderbuffer;
+ } else if (0 == strcmp("glCheckFramebufferStatus", name)) {
+ return (GrGLFuncPtr) glCheckFramebufferStatus;
+ } else if (0 == strcmp("glDeleteFramebuffers", name)) {
+ return (GrGLFuncPtr) glDeleteFramebuffers;
+ } else if (0 == strcmp("glDeleteRenderbuffers", name)) {
+ return (GrGLFuncPtr) glDeleteRenderbuffers;
+ } else if (0 == strcmp("glFramebufferRenderbuffer", name)) {
+ return (GrGLFuncPtr) glFramebufferRenderbuffer;
+ } else if (0 == strcmp("glFramebufferTexture2D", name)) {
+ return (GrGLFuncPtr) glFramebufferTexture2D;
+ } else if (0 == strcmp("glGenFramebuffers", name)) {
+ return (GrGLFuncPtr) glGenFramebuffers;
+ } else if (0 == strcmp("glGenRenderbuffers", name)) {
+ return (GrGLFuncPtr) glGenRenderbuffers;
+ } else if (0 == strcmp("glGetFramebufferAttachmentParameteriv", name)) {
+ return (GrGLFuncPtr) glGetFramebufferAttachmentParameteriv;
+ } else if (0 == strcmp("glGetRenderbufferParameteriv", name)) {
+ return (GrGLFuncPtr) glGetRenderbufferParameteriv;
+ } else if (0 == strcmp("glRenderbufferStorage", name)) {
+ return (GrGLFuncPtr) glRenderbufferStorage;
+ } else if (0 == strcmp("eglQueryString", name)) {
+ return (GrGLFuncPtr) eglQueryString;
+ } else if (0 == strcmp("eglGetCurrentDisplay", name)) {
+ return (GrGLFuncPtr) eglGetCurrentDisplay;
+ }
+ return eglGetProcAddress(name);
+}
+
+const GrGLInterface* GrGLCreateNativeInterface() {
+ return GrGLAssembleInterface(nullptr, android_get_gl_proc);
+}
+
+#endif//defined(SK_BUILD_FOR_ANDROID)
diff --git a/gfx/skia/skia/src/gpu/gl/builders/GrGLProgramBuilder.cpp b/gfx/skia/skia/src/gpu/gl/builders/GrGLProgramBuilder.cpp
new file mode 100644
index 000000000..370cee6a0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/builders/GrGLProgramBuilder.cpp
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLProgramBuilder.h"
+
+#include "GrAutoLocaleSetter.h"
+#include "GrCoordTransform.h"
+#include "GrGLProgramBuilder.h"
+#include "GrProgramDesc.h"
+#include "GrSwizzle.h"
+#include "GrTexture.h"
+#include "SkTraceEvent.h"
+#include "gl/GrGLGpu.h"
+#include "gl/GrGLProgram.h"
+#include "gl/GrGLSLPrettyPrint.h"
+#include "gl/builders/GrGLShaderStringBuilder.h"
+#include "glsl/GrGLSLCaps.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLGeometryProcessor.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLSampler.h"
+#include "glsl/GrGLSLXferProcessor.h"
+
+#define GL_CALL(X) GR_GL_CALL(this->gpu()->glInterface(), X)
+#define GL_CALL_RET(R, X) GR_GL_CALL_RET(this->gpu()->glInterface(), R, X)
+
+GrGLProgram* GrGLProgramBuilder::CreateProgram(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ const GrProgramDesc& desc,
+ GrGLGpu* gpu) {
+ GrAutoLocaleSetter als("C");
+
+ // create a builder. This will be handed off to effects so they can use it to add
+ // uniforms, varyings, textures, etc
+ GrGLProgramBuilder builder(gpu, pipeline, primProc, desc);
+
+ // TODO: Once all stages can handle taking a float or vec4 and correctly handling them we can
+ // seed correctly here
+ GrGLSLExpr4 inputColor;
+ GrGLSLExpr4 inputCoverage;
+
+ if (!builder.emitAndInstallProcs(&inputColor, &inputCoverage)) {
+ builder.cleanupFragmentProcessors();
+ return nullptr;
+ }
+
+ return builder.finalize();
+}
+
+/////////////////////////////////////////////////////////////////////////////
+
+GrGLProgramBuilder::GrGLProgramBuilder(GrGLGpu* gpu,
+ const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ const GrProgramDesc& desc)
+ : INHERITED(pipeline, primProc, desc)
+ , fGpu(gpu)
+ , fVaryingHandler(this)
+ , fUniformHandler(this) {
+}
+
+const GrCaps* GrGLProgramBuilder::caps() const {
+ return fGpu->caps();
+}
+
+const GrGLSLCaps* GrGLProgramBuilder::glslCaps() const {
+ return fGpu->ctxInfo().caps()->glslCaps();
+}
+
+bool GrGLProgramBuilder::compileAndAttachShaders(GrGLSLShaderBuilder& shader,
+ GrGLuint programId,
+ GrGLenum type,
+ SkTDArray<GrGLuint>* shaderIds) {
+ GrGLGpu* gpu = this->gpu();
+ GrGLuint shaderId = GrGLCompileAndAttachShader(gpu->glContext(),
+ programId,
+ type,
+ shader.fCompilerStrings.begin(),
+ shader.fCompilerStringLengths.begin(),
+ shader.fCompilerStrings.count(),
+ gpu->stats());
+
+ if (!shaderId) {
+ return false;
+ }
+
+ *shaderIds->append() = shaderId;
+
+ return true;
+}
+
+GrGLProgram* GrGLProgramBuilder::finalize() {
+ // verify we can get a program id
+ GrGLuint programID;
+ GL_CALL_RET(programID, CreateProgram());
+ if (0 == programID) {
+ this->cleanupFragmentProcessors();
+ return nullptr;
+ }
+
+ this->finalizeShaders();
+
+ // compile shaders and bind attributes / uniforms
+ SkTDArray<GrGLuint> shadersToDelete;
+ if (!this->compileAndAttachShaders(fVS, programID, GR_GL_VERTEX_SHADER, &shadersToDelete)) {
+ this->cleanupProgram(programID, shadersToDelete);
+ return nullptr;
+ }
+
+ // NVPR actually requires a vertex shader to compile
+ bool useNvpr = primitiveProcessor().isPathRendering();
+ if (!useNvpr) {
+ const GrPrimitiveProcessor& primProc = this->primitiveProcessor();
+
+ int vaCount = primProc.numAttribs();
+ for (int i = 0; i < vaCount; i++) {
+ GL_CALL(BindAttribLocation(programID, i, primProc.getAttrib(i).fName));
+ }
+ }
+
+ if (!this->compileAndAttachShaders(fFS, programID, GR_GL_FRAGMENT_SHADER, &shadersToDelete)) {
+ this->cleanupProgram(programID, shadersToDelete);
+ return nullptr;
+ }
+
+ this->bindProgramResourceLocations(programID);
+
+ GL_CALL(LinkProgram(programID));
+
+ // Calling GetProgramiv is expensive in Chromium. Assume success in release builds.
+ bool checkLinked = kChromium_GrGLDriver != fGpu->ctxInfo().driver();
+#ifdef SK_DEBUG
+ checkLinked = true;
+#endif
+ if (checkLinked) {
+ checkLinkStatus(programID);
+ }
+ this->resolveProgramResourceLocations(programID);
+
+ this->cleanupShaders(shadersToDelete);
+
+ return this->createProgram(programID);
+}
+
+void GrGLProgramBuilder::bindProgramResourceLocations(GrGLuint programID) {
+ fUniformHandler.bindUniformLocations(programID, fGpu->glCaps());
+
+ const GrGLCaps& caps = this->gpu()->glCaps();
+ if (fFS.hasCustomColorOutput() && caps.bindFragDataLocationSupport()) {
+ GL_CALL(BindFragDataLocation(programID, 0,
+ GrGLSLFragmentShaderBuilder::DeclaredColorOutputName()));
+ }
+ if (fFS.hasSecondaryOutput() && caps.glslCaps()->mustDeclareFragmentShaderOutput()) {
+ GL_CALL(BindFragDataLocationIndexed(programID, 0, 1,
+ GrGLSLFragmentShaderBuilder::DeclaredSecondaryColorOutputName()));
+ }
+
+ // handle NVPR separable varyings
+ if (!fGpu->glCaps().shaderCaps()->pathRenderingSupport() ||
+ !fGpu->glPathRendering()->shouldBindFragmentInputs()) {
+ return;
+ }
+ int count = fVaryingHandler.fPathProcVaryingInfos.count();
+ for (int i = 0; i < count; ++i) {
+ GL_CALL(BindFragmentInputLocation(programID, i,
+ fVaryingHandler.fPathProcVaryingInfos[i].fVariable.c_str()));
+ fVaryingHandler.fPathProcVaryingInfos[i].fLocation = i;
+ }
+}
+
+bool GrGLProgramBuilder::checkLinkStatus(GrGLuint programID) {
+ GrGLint linked = GR_GL_INIT_ZERO;
+ GL_CALL(GetProgramiv(programID, GR_GL_LINK_STATUS, &linked));
+ if (!linked) {
+ GrGLint infoLen = GR_GL_INIT_ZERO;
+ GL_CALL(GetProgramiv(programID, GR_GL_INFO_LOG_LENGTH, &infoLen));
+ SkAutoMalloc log(sizeof(char)*(infoLen+1)); // outside if for debugger
+ if (infoLen > 0) {
+ // retrieve length even though we don't need it to workaround
+ // bug in chrome cmd buffer param validation.
+ GrGLsizei length = GR_GL_INIT_ZERO;
+ GL_CALL(GetProgramInfoLog(programID,
+ infoLen+1,
+ &length,
+ (char*)log.get()));
+ SkDebugf("%s", (char*)log.get());
+ }
+ SkDEBUGFAIL("Error linking program");
+ GL_CALL(DeleteProgram(programID));
+ programID = 0;
+ }
+ return SkToBool(linked);
+}
+
+void GrGLProgramBuilder::resolveProgramResourceLocations(GrGLuint programID) {
+ fUniformHandler.getUniformLocations(programID, fGpu->glCaps());
+
+ // handle NVPR separable varyings
+ if (!fGpu->glCaps().shaderCaps()->pathRenderingSupport() ||
+ fGpu->glPathRendering()->shouldBindFragmentInputs()) {
+ return;
+ }
+ int count = fVaryingHandler.fPathProcVaryingInfos.count();
+ for (int i = 0; i < count; ++i) {
+ GrGLint location;
+ GL_CALL_RET(location, GetProgramResourceLocation(
+ programID,
+ GR_GL_FRAGMENT_INPUT,
+ fVaryingHandler.fPathProcVaryingInfos[i].fVariable.c_str()));
+ fVaryingHandler.fPathProcVaryingInfos[i].fLocation = location;
+ }
+}
+
+void GrGLProgramBuilder::cleanupProgram(GrGLuint programID, const SkTDArray<GrGLuint>& shaderIDs) {
+ GL_CALL(DeleteProgram(programID));
+ this->cleanupShaders(shaderIDs);
+ this->cleanupFragmentProcessors();
+}
+void GrGLProgramBuilder::cleanupShaders(const SkTDArray<GrGLuint>& shaderIDs) {
+ for (int i = 0; i < shaderIDs.count(); ++i) {
+ GL_CALL(DeleteShader(shaderIDs[i]));
+ }
+}
+
+GrGLProgram* GrGLProgramBuilder::createProgram(GrGLuint programID) {
+ return new GrGLProgram(fGpu,
+ this->desc(),
+ fUniformHandles,
+ programID,
+ fUniformHandler.fUniforms,
+ fUniformHandler.fSamplers,
+ fVaryingHandler.fPathProcVaryingInfos,
+ fGeometryProcessor,
+ fXferProcessor,
+ fFragmentProcessors);
+}
diff --git a/gfx/skia/skia/src/gpu/gl/builders/GrGLProgramBuilder.h b/gfx/skia/skia/src/gpu/gl/builders/GrGLProgramBuilder.h
new file mode 100644
index 000000000..253f9e632
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/builders/GrGLProgramBuilder.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLProgramBuilder_DEFINED
+#define GrGLProgramBuilder_DEFINED
+
+#include "GrPipeline.h"
+#include "gl/GrGLProgramDataManager.h"
+#include "gl/GrGLUniformHandler.h"
+#include "gl/GrGLVaryingHandler.h"
+#include "glsl/GrGLSLProgramBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+
+class GrFragmentProcessor;
+class GrGLContextInfo;
+class GrProgramDesc;
+class GrGLSLShaderBuilder;
+class GrGLSLCaps;
+
+class GrGLProgramBuilder : public GrGLSLProgramBuilder {
+public:
+ /** Generates a shader program.
+ *
+ * The program implements what is specified in the stages given as input.
+ * After successful generation, the builder result objects are available
+ * to be used.
+ * @return true if generation was successful.
+ */
+ static GrGLProgram* CreateProgram(const GrPipeline&,
+ const GrPrimitiveProcessor&,
+ const GrProgramDesc&,
+ GrGLGpu*);
+
+ const GrCaps* caps() const override;
+ const GrGLSLCaps* glslCaps() const override;
+
+ GrGLGpu* gpu() const { return fGpu; }
+
+private:
+ GrGLProgramBuilder(GrGLGpu*, const GrPipeline&, const GrPrimitiveProcessor&,
+ const GrProgramDesc&);
+
+ bool compileAndAttachShaders(GrGLSLShaderBuilder& shader,
+ GrGLuint programId,
+ GrGLenum type,
+ SkTDArray<GrGLuint>* shaderIds);
+ GrGLProgram* finalize();
+ void bindProgramResourceLocations(GrGLuint programID);
+ bool checkLinkStatus(GrGLuint programID);
+ void resolveProgramResourceLocations(GrGLuint programID);
+ void cleanupProgram(GrGLuint programID, const SkTDArray<GrGLuint>& shaderIDs);
+ void cleanupShaders(const SkTDArray<GrGLuint>& shaderIDs);
+
+ // Subclasses create different programs
+ GrGLProgram* createProgram(GrGLuint programID);
+
+ GrGLSLUniformHandler* uniformHandler() override { return &fUniformHandler; }
+ const GrGLSLUniformHandler* uniformHandler() const override { return &fUniformHandler; }
+ GrGLSLVaryingHandler* varyingHandler() override { return &fVaryingHandler; }
+
+
+ GrGLGpu* fGpu;
+ GrGLVaryingHandler fVaryingHandler;
+ GrGLUniformHandler fUniformHandler;
+
+ typedef GrGLSLProgramBuilder INHERITED;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/builders/GrGLSLPrettyPrint.cpp b/gfx/skia/skia/src/gpu/gl/builders/GrGLSLPrettyPrint.cpp
new file mode 100644
index 000000000..02802987c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/builders/GrGLSLPrettyPrint.cpp
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "gl/GrGLSLPrettyPrint.h"
+
+namespace GrGLSLPrettyPrint {
+
+class GLSLPrettyPrint {
+public:
+ GLSLPrettyPrint() {}
+
+ SkString prettify(const char** strings,
+ int* lengths,
+ int count,
+ bool countlines) {
+ fCountlines = countlines;
+ fTabs = 0;
+ fLinecount = 1;
+ fFreshline = true;
+
+ // If a string breaks while in the middle 'parse until' we need to continue parsing on the
+ // next string
+ fInParseUntilNewline = false;
+ fInParseUntil = false;
+
+ int parensDepth = 0;
+
+ // number 1st line
+ this->lineNumbering();
+ for (int i = 0; i < count; i++) {
+ // setup pretty state
+ fIndex = 0;
+ fLength = lengths[i];
+ fInput = strings[i];
+
+ while (fLength > fIndex) {
+ /* the heart and soul of our prettification algorithm. The rules should hopefully
+ * be self explanatory. For '#' and '//' tokens we parse until we reach a newline.
+ *
+ * For long style comments like this one, we search for the ending token. We also
+ * preserve whitespace in these comments WITH THE CAVEAT that we do the newlines
+ * ourselves. This allows us to remain in control of line numbers, and matching
+ * tabs Existing tabs in the input string are copied over too, but this will look
+ * funny
+ *
+ * '{' and '}' are handled in basically the same way. We add a newline if we aren't
+ * on a fresh line, dirty the line, then add a second newline, ie braces are always
+ * on their own lines indented properly. The one funkiness here is structs print
+ * with the semicolon on its own line. Its not a problem for a glsl compiler though
+ *
+ * '(' and ')' are basically ignored, except as a sign we need to ignore ';' ala
+ * in for loops.
+ *
+ * ';' means add a new line
+ *
+ * '\t' and '\n' are ignored in general parsing for backwards compatability with
+ * existing shader code and we also have a special case for handling whitespace
+ * at the beginning of fresh lines.
+ *
+ * Otherwise just add the new character to the pretty string, indenting if necessary.
+ */
+ if (fInParseUntilNewline) {
+ this->parseUntilNewline();
+ } else if (fInParseUntil) {
+ this->parseUntil(fInParseUntilToken);
+ } else if (this->hasToken("#") || this->hasToken("//")) {
+ this->parseUntilNewline();
+ } else if (this->hasToken("/*")) {
+ this->parseUntil("*/");
+ } else if ('{' == fInput[fIndex]) {
+ this->newline();
+ this->appendChar('{');
+ fTabs++;
+ this->newline();
+ } else if ('}' == fInput[fIndex]) {
+ fTabs--;
+ this->newline();
+ this->appendChar('}');
+ this->newline();
+ } else if (this->hasToken(")")) {
+ parensDepth--;
+ } else if (this->hasToken("(")) {
+ parensDepth++;
+ } else if (!parensDepth && this->hasToken(";")) {
+ this->newline();
+ } else if ('\t' == fInput[fIndex] || '\n' == fInput[fIndex] ||
+ (fFreshline && ' ' == fInput[fIndex])) {
+ fIndex++;
+ } else {
+ this->appendChar(fInput[fIndex]);
+ }
+ }
+ }
+ return fPretty;
+ }
+private:
+ void appendChar(char c) {
+ this->tabString();
+ fPretty.appendf("%c", fInput[fIndex++]);
+ fFreshline = false;
+ }
+
+ // hasToken automatically consumes the next token, if it is a match, and then tabs
+ // if necessary, before inserting the token into the pretty string
+ bool hasToken(const char* token) {
+ size_t i = fIndex;
+ for (size_t j = 0; token[j] && fLength > i; i++, j++) {
+ if (token[j] != fInput[i]) {
+ return false;
+ }
+ }
+ this->tabString();
+ fIndex = i;
+ fPretty.append(token);
+ fFreshline = false;
+ return true;
+ }
+
+ void parseUntilNewline() {
+ while (fLength > fIndex) {
+ if ('\n' == fInput[fIndex]) {
+ fIndex++;
+ this->newline();
+ fInParseUntilNewline = false;
+ break;
+ }
+ fPretty.appendf("%c", fInput[fIndex++]);
+ fInParseUntilNewline = true;
+ }
+ }
+
+ // this code assumes it is not actually searching for a newline. If you need to search for a
+ // newline, then use the function above. If you do search for a newline with this function
+ // it will consume the entire string and the output will certainly not be prettified
+ void parseUntil(const char* token) {
+ while (fLength > fIndex) {
+ // For embedded newlines, this code will make sure to embed the newline in the
+ // pretty string, increase the linecount, and tab out the next line to the appropriate
+ // place
+ if ('\n' == fInput[fIndex]) {
+ this->newline();
+ this->tabString();
+ fIndex++;
+ }
+ if (this->hasToken(token)) {
+ fInParseUntil = false;
+ break;
+ }
+ fFreshline = false;
+ fPretty.appendf("%c", fInput[fIndex++]);
+ fInParseUntil = true;
+ fInParseUntilToken = token;
+ }
+ }
+
+ // We only tab if on a newline, otherwise consider the line tabbed
+ void tabString() {
+ if (fFreshline) {
+ for (int t = 0; t < fTabs; t++) {
+ fPretty.append("\t");
+ }
+ }
+ }
+
+ // newline is really a request to add a newline, if we are on a fresh line there is no reason
+ // to add another newline
+ void newline() {
+ if (!fFreshline) {
+ fFreshline = true;
+ fPretty.append("\n");
+ this->lineNumbering();
+ }
+ }
+
+ void lineNumbering() {
+ if (fCountlines) {
+ fPretty.appendf("%4d\t", fLinecount++);
+ }
+ }
+
+ bool fCountlines, fFreshline;
+ int fTabs, fLinecount;
+ size_t fIndex, fLength;
+ const char* fInput;
+ SkString fPretty;
+
+ // Some helpers for parseUntil when we go over a string length
+ bool fInParseUntilNewline;
+ bool fInParseUntil;
+ const char* fInParseUntilToken;
+};
+
+SkString PrettyPrintGLSL(const char** strings,
+ int* lengths,
+ int count,
+ bool countlines) {
+ GLSLPrettyPrint pp;
+ return pp.prettify(strings, lengths, count, countlines);
+}
+
+} // end namespace
diff --git a/gfx/skia/skia/src/gpu/gl/builders/GrGLShaderStringBuilder.cpp b/gfx/skia/skia/src/gpu/gl/builders/GrGLShaderStringBuilder.cpp
new file mode 100644
index 000000000..d2e49a5cf
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/builders/GrGLShaderStringBuilder.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLShaderStringBuilder.h"
+#include "gl/GrGLGpu.h"
+#include "gl/GrGLSLPrettyPrint.h"
+#include "SkTraceEvent.h"
+
+#define GL_CALL(X) GR_GL_CALL(gpu->glInterface(), X)
+#define GL_CALL_RET(R, X) GR_GL_CALL_RET(gpu->glInterface(), R, X)
+
+// Print the source code for all shaders generated.
+static const bool c_PrintShaders{false};
+
+static void print_shader_source(const char** strings, int* lengths, int count);
+
+GrGLuint GrGLCompileAndAttachShader(const GrGLContext& glCtx,
+ GrGLuint programId,
+ GrGLenum type,
+ const char** strings,
+ int* lengths,
+ int count,
+ GrGpu::Stats* stats) {
+ const GrGLInterface* gli = glCtx.interface();
+
+ GrGLuint shaderId;
+ GR_GL_CALL_RET(gli, shaderId, CreateShader(type));
+ if (0 == shaderId) {
+ return 0;
+ }
+
+#ifdef SK_DEBUG
+ SkString prettySource = GrGLSLPrettyPrint::PrettyPrintGLSL(strings, lengths, count, false);
+ const GrGLchar* sourceStr = prettySource.c_str();
+ GrGLint sourceLength = static_cast<GrGLint>(prettySource.size());
+ GR_GL_CALL(gli, ShaderSource(shaderId, 1, &sourceStr, &sourceLength));
+#else
+ GR_GL_CALL(gli, ShaderSource(shaderId, count, strings, lengths));
+#endif
+
+ // If tracing is enabled in chrome then we pretty print
+ bool traceShader;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), &traceShader);
+ if (traceShader) {
+ SkString shader = GrGLSLPrettyPrint::PrettyPrintGLSL(strings, lengths, count, false);
+ TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "skia_gpu::GLShader",
+ TRACE_EVENT_SCOPE_THREAD, "shader", TRACE_STR_COPY(shader.c_str()));
+ }
+
+ stats->incShaderCompilations();
+ GR_GL_CALL(gli, CompileShader(shaderId));
+
+ // Calling GetShaderiv in Chromium is quite expensive. Assume success in release builds.
+ bool checkCompiled = kChromium_GrGLDriver != glCtx.driver();
+#ifdef SK_DEBUG
+ checkCompiled = true;
+#endif
+ if (checkCompiled) {
+ GrGLint compiled = GR_GL_INIT_ZERO;
+ GR_GL_CALL(gli, GetShaderiv(shaderId, GR_GL_COMPILE_STATUS, &compiled));
+
+ if (!compiled) {
+ GrGLint infoLen = GR_GL_INIT_ZERO;
+ GR_GL_CALL(gli, GetShaderiv(shaderId, GR_GL_INFO_LOG_LENGTH, &infoLen));
+ SkAutoMalloc log(sizeof(char)*(infoLen+1)); // outside if for debugger
+ if (infoLen > 0) {
+ // retrieve length even though we don't need it to workaround bug in Chromium cmd
+ // buffer param validation.
+ GrGLsizei length = GR_GL_INIT_ZERO;
+ GR_GL_CALL(gli, GetShaderInfoLog(shaderId, infoLen+1, &length, (char*)log.get()));
+ print_shader_source(strings, lengths, count);
+ SkDebugf("\n%s", (const char*)log.get());
+ }
+ SkDEBUGFAIL("Shader compilation failed!");
+ GR_GL_CALL(gli, DeleteShader(shaderId));
+ return 0;
+ }
+ }
+
+ if (c_PrintShaders) {
+ print_shader_source(strings, lengths, count);
+ }
+
+ // Attach the shader, but defer deletion until after we have linked the program.
+ // This works around a bug in the Android emulator's GLES2 wrapper which
+ // will immediately delete the shader object and free its memory even though it's
+ // attached to a program, which then causes glLinkProgram to fail.
+ GR_GL_CALL(gli, AttachShader(programId, shaderId));
+
+ return shaderId;
+}
+
+static void print_shader_source(const char** strings, int* lengths, int count) {
+ const SkString& pretty = GrGLSLPrettyPrint::PrettyPrintGLSL(strings, lengths, count, true);
+ SkTArray<SkString> lines;
+ SkStrSplit(pretty.c_str(), "\n", &lines);
+ for (const SkString& line : lines) {
+ // Print the shader one line at the time so it doesn't get truncated by the adb log.
+ SkDebugf("%s\n", line.c_str());
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/gl/builders/GrGLShaderStringBuilder.h b/gfx/skia/skia/src/gpu/gl/builders/GrGLShaderStringBuilder.h
new file mode 100644
index 000000000..062e229cd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/builders/GrGLShaderStringBuilder.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLShaderStringBuilder_DEFINED
+#define GrGLShaderStringBuilder_DEFINED
+
+#include "GrAllocator.h"
+#include "GrGpu.h"
+#include "gl/GrGLContext.h"
+#include "SkTypes.h"
+
+GrGLuint GrGLCompileAndAttachShader(const GrGLContext& glCtx,
+ GrGLuint programId,
+ GrGLenum type,
+ const char** strings,
+ int* lengths,
+ int count,
+ GrGpu::Stats*);
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/gl/egl/GrGLCreateNativeInterface_egl.cpp b/gfx/skia/skia/src/gpu/gl/egl/GrGLCreateNativeInterface_egl.cpp
new file mode 100644
index 000000000..4f427820b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/egl/GrGLCreateNativeInterface_egl.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "gl/GrGLInterface.h"
+#include "gl/GrGLAssembleInterface.h"
+#include "gl/GrGLUtil.h"
+
+#include <EGL/egl.h>
+#include <GLES2/gl2.h>
+
+static GrGLFuncPtr egl_get_gl_proc(void* ctx, const char name[]) {
+ SkASSERT(nullptr == ctx);
+ GrGLFuncPtr ptr = eglGetProcAddress(name);
+ if (!ptr) {
+ if (0 == strcmp("eglQueryString", name)) {
+ return (GrGLFuncPtr)eglQueryString;
+ } else if (0 == strcmp("eglGetCurrentDisplay", name)) {
+ return (GrGLFuncPtr)eglGetCurrentDisplay;
+ }
+ }
+ return ptr;
+}
+
+const GrGLInterface* GrGLCreateNativeInterface() {
+ return GrGLAssembleInterface(nullptr, egl_get_gl_proc);
+}
diff --git a/gfx/skia/skia/src/gpu/gl/glfw/GrGLCreateNativeInterface_glfw.cpp b/gfx/skia/skia/src/gpu/gl/glfw/GrGLCreateNativeInterface_glfw.cpp
new file mode 100644
index 000000000..e2f6e668c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/glfw/GrGLCreateNativeInterface_glfw.cpp
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "gl/GrGLInterface.h"
+#include "gl/GrGLAssembleInterface.h"
+#include "gl/GrGLUtil.h"
+
+#include <GLFW/glfw3.h>
+
+static GrGLFuncPtr glfw_get(void* ctx, const char name[]) {
+ SkASSERT(nullptr == ctx);
+ SkASSERT(glfwGetCurrentContext());
+ return glfwGetProcAddress(name);
+}
+
+const GrGLInterface* GrGLCreateNativeInterface() {
+ if (nullptr == glfwGetCurrentContext()) {
+ return nullptr;
+ }
+
+ return GrGLAssembleInterface(nullptr, glfw_get);
+}
diff --git a/gfx/skia/skia/src/gpu/gl/glx/GrGLCreateNativeInterface_glx.cpp b/gfx/skia/skia/src/gpu/gl/glx/GrGLCreateNativeInterface_glx.cpp
new file mode 100644
index 000000000..4a204123e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/glx/GrGLCreateNativeInterface_glx.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "gl/GrGLInterface.h"
+#include "gl/GrGLAssembleInterface.h"
+#include "gl/GrGLUtil.h"
+
+#include <GL/glx.h>
+
+static GrGLFuncPtr glx_get(void* ctx, const char name[]) {
+ // Avoid calling glXGetProcAddress() for EGL procs.
+ // We don't expect it to ever succeed, but somtimes it returns non-null anyway.
+ if (0 == strncmp(name, "egl", 3)) {
+ return nullptr;
+ }
+
+ SkASSERT(nullptr == ctx);
+ SkASSERT(glXGetCurrentContext());
+ return glXGetProcAddress(reinterpret_cast<const GLubyte*>(name));
+}
+
+const GrGLInterface* GrGLCreateNativeInterface() {
+ if (nullptr == glXGetCurrentContext()) {
+ return nullptr;
+ }
+
+ return GrGLAssembleInterface(nullptr, glx_get);
+}
diff --git a/gfx/skia/skia/src/gpu/gl/iOS/GrGLCreateNativeInterface_iOS.cpp b/gfx/skia/skia/src/gpu/gl/iOS/GrGLCreateNativeInterface_iOS.cpp
new file mode 100644
index 000000000..c7eb46b73
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/iOS/GrGLCreateNativeInterface_iOS.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "gl/GrGLInterface.h"
+#include "gl/GrGLAssembleInterface.h"
+#include <dlfcn.h>
+
+class GLLoader {
+public:
+ GLLoader() {
+ fLibrary = dlopen(
+ "/System/Library/Frameworks/OpenGL.framework/Versions/A/Libraries/libGL.dylib",
+ RTLD_LAZY);
+ }
+
+ ~GLLoader() {
+ if (fLibrary) {
+ dlclose(fLibrary);
+ }
+ }
+
+ void* handle() const {
+ return nullptr == fLibrary ? RTLD_DEFAULT : fLibrary;
+ }
+
+private:
+ void* fLibrary;
+};
+
+class GLProcGetter {
+public:
+ GLProcGetter() {}
+
+ GrGLFuncPtr getProc(const char name[]) const {
+ return (GrGLFuncPtr) dlsym(fLoader.handle(), name);
+ }
+
+private:
+ GLLoader fLoader;
+};
+
+static GrGLFuncPtr ios_get_gl_proc(void* ctx, const char name[]) {
+ SkASSERT(ctx);
+ const GLProcGetter* getter = (const GLProcGetter*) ctx;
+ return getter->getProc(name);
+}
+
+const GrGLInterface* GrGLCreateNativeInterface() {
+ GLProcGetter getter;
+ return GrGLAssembleGLESInterface(&getter, ios_get_gl_proc);
+}
diff --git a/gfx/skia/skia/src/gpu/gl/mac/GrGLCreateNativeInterface_mac.cpp b/gfx/skia/skia/src/gpu/gl/mac/GrGLCreateNativeInterface_mac.cpp
new file mode 100644
index 000000000..c843d0e8c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/mac/GrGLCreateNativeInterface_mac.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC)
+
+
+#include "gl/GrGLInterface.h"
+#include "gl/GrGLAssembleInterface.h"
+
+#include <dlfcn.h>
+
+class GLLoader {
+public:
+ GLLoader() {
+ fLibrary = dlopen(
+ "/System/Library/Frameworks/OpenGL.framework/Versions/A/Libraries/libGL.dylib",
+ RTLD_LAZY);
+ }
+
+ ~GLLoader() {
+ if (fLibrary) {
+ dlclose(fLibrary);
+ }
+ }
+
+ void* handle() const {
+ return nullptr == fLibrary ? RTLD_DEFAULT : fLibrary;
+ }
+
+private:
+ void* fLibrary;
+};
+
+class GLProcGetter {
+public:
+ GLProcGetter() {}
+
+ GrGLFuncPtr getProc(const char name[]) const {
+ return (GrGLFuncPtr) dlsym(fLoader.handle(), name);
+ }
+
+private:
+ GLLoader fLoader;
+};
+
+static GrGLFuncPtr mac_get_gl_proc(void* ctx, const char name[]) {
+ SkASSERT(ctx);
+ const GLProcGetter* getter = (const GLProcGetter*) ctx;
+ return getter->getProc(name);
+}
+
+const GrGLInterface* GrGLCreateNativeInterface() {
+ GLProcGetter getter;
+ return GrGLAssembleGLInterface(&getter, mac_get_gl_proc);
+}
+
+#endif//defined(SK_BUILD_FOR_MAC)
diff --git a/gfx/skia/skia/src/gpu/gl/mesa/osmesa_wrapper.h b/gfx/skia/skia/src/gpu/gl/mesa/osmesa_wrapper.h
new file mode 100644
index 000000000..4bb70a473
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/mesa/osmesa_wrapper.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Older versions of XQuartz have a bug where a header included by osmesa.h
+// defines GL_GLEXT_PROTOTYPES. This will cause a redefinition warning if
+// the file that includes osmesa.h already defined it. XCode 3 uses a version
+// of gcc (4.2.1) that does not support the diagnostic pragma to disable a
+// warning (added in 4.2.4). So we use the system_header pragma to shut GCC
+// up about warnings in osmesa.h
+#pragma GCC system_header
+#include <GL/osmesa.h>
diff --git a/gfx/skia/skia/src/gpu/gl/win/GrGLCreateNativeInterface_win.cpp b/gfx/skia/skia/src/gpu/gl/win/GrGLCreateNativeInterface_win.cpp
new file mode 100644
index 000000000..5f26fdde3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/gl/win/GrGLCreateNativeInterface_win.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32)
+
+#include "SkLeanWindows.h"
+
+#include "gl/GrGLInterface.h"
+#include "gl/GrGLAssembleInterface.h"
+#include "gl/GrGLUtil.h"
+
+class AutoLibraryUnload {
+public:
+ AutoLibraryUnload(const char* moduleName) {
+ fModule = LoadLibraryA(moduleName);
+ }
+ ~AutoLibraryUnload() {
+ if (fModule) {
+ FreeLibrary(fModule);
+ }
+ }
+ HMODULE get() const { return fModule; }
+
+private:
+ HMODULE fModule;
+};
+
+class GLProcGetter {
+public:
+ GLProcGetter() : fGLLib("opengl32.dll") {}
+
+ bool isInitialized() const { return SkToBool(fGLLib.get()); }
+
+ GrGLFuncPtr getProc(const char name[]) const {
+ GrGLFuncPtr proc;
+ if ((proc = (GrGLFuncPtr) GetProcAddress(fGLLib.get(), name))) {
+ return proc;
+ }
+ if ((proc = (GrGLFuncPtr) wglGetProcAddress(name))) {
+ return proc;
+ }
+ return nullptr;
+ }
+
+private:
+ AutoLibraryUnload fGLLib;
+};
+
+static GrGLFuncPtr win_get_gl_proc(void* ctx, const char name[]) {
+ SkASSERT(ctx);
+ SkASSERT(wglGetCurrentContext());
+ const GLProcGetter* getter = (const GLProcGetter*) ctx;
+ return getter->getProc(name);
+}
+
+/*
+ * Windows makes the GL funcs all be __stdcall instead of __cdecl :(
+ * This implementation will only work if GR_GL_FUNCTION_TYPE is __stdcall.
+ * Otherwise, a springboard would be needed that hides the calling convention.
+ */
+const GrGLInterface* GrGLCreateNativeInterface() {
+ if (nullptr == wglGetCurrentContext()) {
+ return nullptr;
+ }
+
+ GLProcGetter getter;
+ if (!getter.isInitialized()) {
+ return nullptr;
+ }
+
+ GrGLGetStringProc getString = (GrGLGetStringProc)getter.getProc("glGetString");
+ if (nullptr == getString) {
+ return nullptr;
+ }
+ const char* verStr = reinterpret_cast<const char*>(getString(GR_GL_VERSION));
+ GrGLStandard standard = GrGLGetStandardInUseFromString(verStr);
+
+ if (kGLES_GrGLStandard == standard) {
+ return GrGLAssembleGLESInterface(&getter, win_get_gl_proc);
+ } else if (kGL_GrGLStandard == standard) {
+ return GrGLAssembleGLInterface(&getter, win_get_gl_proc);
+ }
+ return nullptr;
+}
+
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSL.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSL.cpp
new file mode 100644
index 000000000..bec4784db
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSL.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLSL.h"
+#include "GrGLSLCaps.h"
+#include "SkString.h"
+
+bool GrGLSLSupportsNamedFragmentShaderOutputs(GrGLSLGeneration gen) {
+ switch (gen) {
+ case k110_GrGLSLGeneration:
+ return false;
+ case k130_GrGLSLGeneration:
+ case k140_GrGLSLGeneration:
+ case k150_GrGLSLGeneration:
+ case k330_GrGLSLGeneration:
+ case k400_GrGLSLGeneration:
+ case k310es_GrGLSLGeneration:
+ case k320es_GrGLSLGeneration:
+ return true;
+ }
+ return false;
+}
+
+void GrGLSLAppendDefaultFloatPrecisionDeclaration(GrSLPrecision p,
+ const GrGLSLCaps& glslCaps,
+ SkString* out) {
+ if (glslCaps.usesPrecisionModifiers()) {
+ switch (p) {
+ case kHigh_GrSLPrecision:
+ out->append("precision highp float;\n");
+ break;
+ case kMedium_GrSLPrecision:
+ out->append("precision mediump float;\n");
+ break;
+ case kLow_GrSLPrecision:
+ out->append("precision lowp float;\n");
+ break;
+ default:
+ SkFAIL("Unknown precision value.");
+ }
+ }
+}
+
+void GrGLSLMulVarBy4f(SkString* outAppend, const char* vec4VarName, const GrGLSLExpr4& mulFactor) {
+ if (mulFactor.isOnes()) {
+ *outAppend = SkString();
+ }
+
+ if (mulFactor.isZeros()) {
+ outAppend->appendf("%s = vec4(0);", vec4VarName);
+ } else {
+ outAppend->appendf("%s *= %s;", vec4VarName, mulFactor.c_str());
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSL.h b/gfx/skia/skia/src/gpu/glsl/GrGLSL.h
new file mode 100644
index 000000000..e4e165b28
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSL.h
@@ -0,0 +1,390 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSL_DEFINED
+#define GrGLSL_DEFINED
+
+#include "GrTypesPriv.h"
+#include "SkString.h"
+
+class GrGLSLCaps;
+
+// Limited set of GLSL versions we build shaders for. Caller should round
+// down the GLSL version to one of these enums.
+enum GrGLSLGeneration {
+ /**
+ * Desktop GLSL 1.10 and ES2 shading language (based on desktop GLSL 1.20)
+ */
+ k110_GrGLSLGeneration,
+ /**
+ * Desktop GLSL 1.30
+ */
+ k130_GrGLSLGeneration,
+ /**
+ * Desktop GLSL 1.40
+ */
+ k140_GrGLSLGeneration,
+ /**
+ * Desktop GLSL 1.50
+ */
+ k150_GrGLSLGeneration,
+ /**
+ * Desktop GLSL 3.30, and ES GLSL 3.00
+ */
+ k330_GrGLSLGeneration,
+ /**
+ * Desktop GLSL 4.00
+ */
+ k400_GrGLSLGeneration,
+ /**
+ * ES GLSL 3.10 only TODO Make GLSLCap objects to make this more granular
+ */
+ k310es_GrGLSLGeneration,
+ /**
+ * ES GLSL 3.20
+ */
+ k320es_GrGLSLGeneration,
+};
+
+bool GrGLSLSupportsNamedFragmentShaderOutputs(GrGLSLGeneration);
+
+/**
+ * Gets the name of the function that should be used to sample a 2D texture. Coord type is used
+ * to indicate whether the texture is sampled using projective textured (kVec3f) or not (kVec2f).
+ */
+inline const char* GrGLSLTexture2DFunctionName(GrSLType coordType, GrSLType samplerType,
+ GrGLSLGeneration glslGen) {
+ SkASSERT(GrSLTypeIs2DCombinedSamplerType(samplerType));
+ SkASSERT(kVec2f_GrSLType == coordType || kVec3f_GrSLType == coordType);
+ // GL_TEXTURE_RECTANGLE_ARB is written against OpenGL 2.0/GLSL 1.10. At that time there were
+ // separate texture*() functions. In OpenGL 3.0/GLSL 1.30 the different texture*() functions
+ // were deprecated in favor or the unified texture() function. RECTANGLE textures became
+ // standard in OpenGL 3.2/GLSL 1.50 and use texture(). It isn't completely clear what function
+ // should be used for RECTANGLE textures in GLSL versions >= 1.30 && < 1.50. We're going with
+ // using texture().
+ if (glslGen >= k130_GrGLSLGeneration) {
+ return (kVec2f_GrSLType == coordType) ? "texture" : "textureProj";
+ }
+ if (kVec2f_GrSLType == coordType) {
+ return (samplerType == kTexture2DRectSampler_GrSLType) ? "texture2DRect" : "texture2D";
+ } else {
+ return (samplerType == kTexture2DRectSampler_GrSLType) ? "texture2DRectProj"
+ : "texture2DProj";
+ }
+}
+
+/**
+ * Adds a line of GLSL code to declare the default precision for float types.
+ */
+void GrGLSLAppendDefaultFloatPrecisionDeclaration(GrSLPrecision,
+ const GrGLSLCaps& glslCaps,
+ SkString* out);
+
+/**
+ * Converts a GrSLPrecision to its corresponding GLSL precision qualifier.
+ */
+static inline const char* GrGLSLPrecisionString(GrSLPrecision p) {
+ switch (p) {
+ case kLow_GrSLPrecision:
+ return "lowp";
+ case kMedium_GrSLPrecision:
+ return "mediump";
+ case kHigh_GrSLPrecision:
+ return "highp";
+ default:
+ SkFAIL("Unexpected precision type.");
+ return "";
+ }
+}
+
+/**
+ * Converts a GrSLType to a string containing the name of the equivalent GLSL type.
+ */
+static inline const char* GrGLSLTypeString(GrSLType t) {
+ switch (t) {
+ case kVoid_GrSLType:
+ return "void";
+ case kFloat_GrSLType:
+ return "float";
+ case kVec2f_GrSLType:
+ return "vec2";
+ case kVec3f_GrSLType:
+ return "vec3";
+ case kVec4f_GrSLType:
+ return "vec4";
+ case kMat22f_GrSLType:
+ return "mat2";
+ case kMat33f_GrSLType:
+ return "mat3";
+ case kMat44f_GrSLType:
+ return "mat4";
+ case kTexture2DSampler_GrSLType:
+ return "sampler2D";
+ case kTextureExternalSampler_GrSLType:
+ return "samplerExternalOES";
+ case kTexture2DRectSampler_GrSLType:
+ return "sampler2DRect";
+ case kTextureBufferSampler_GrSLType:
+ return "samplerBuffer";
+ case kBool_GrSLType:
+ return "bool";
+ case kInt_GrSLType:
+ return "int";
+ case kUint_GrSLType:
+ return "uint";
+ case kTexture2D_GrSLType:
+ return "texture2D";
+ case kSampler_GrSLType:
+ return "sampler";
+ default:
+ SkFAIL("Unknown shader var type.");
+ return ""; // suppress warning
+ }
+}
+
+/** A generic base-class representing a GLSL expression.
+ * The instance can be a variable name, expression or vecN(0) or vecN(1). Does simple constant
+ * folding with help of 1 and 0.
+ *
+ * Clients should not use this class, rather the specific instantiations defined
+ * later, for example GrGLSLExpr4.
+ */
+template <typename Self>
+class GrGLSLExpr {
+public:
+ bool isOnes() const { return kOnes_ExprType == fType; }
+ bool isZeros() const { return kZeros_ExprType == fType; }
+
+ const char* c_str() const {
+ if (kZeros_ExprType == fType) {
+ return Self::ZerosStr();
+ } else if (kOnes_ExprType == fType) {
+ return Self::OnesStr();
+ }
+ SkASSERT(!fExpr.isEmpty()); // Empty expressions should not be used.
+ return fExpr.c_str();
+ }
+
+ bool isValid() const {
+ return kFullExpr_ExprType != fType || !fExpr.isEmpty();
+ }
+
+protected:
+ /** Constructs an invalid expression.
+ * Useful only as a return value from functions that never actually return
+ * this and instances that will be assigned to later. */
+ GrGLSLExpr()
+ : fType(kFullExpr_ExprType) {
+ // The only constructor that is allowed to build an empty expression.
+ SkASSERT(!this->isValid());
+ }
+
+ /** Constructs an expression with all components as value v */
+ explicit GrGLSLExpr(int v) {
+ if (v == 0) {
+ fType = kZeros_ExprType;
+ } else if (v == 1) {
+ fType = kOnes_ExprType;
+ } else {
+ fType = kFullExpr_ExprType;
+ fExpr.appendf(Self::CastIntStr(), v);
+ }
+ }
+
+ /** Constructs an expression from a string.
+ * Argument expr is a simple expression or a parenthesized expression. */
+ // TODO: make explicit once effects input Exprs.
+ GrGLSLExpr(const char expr[]) {
+ if (nullptr == expr) { // TODO: remove this once effects input Exprs.
+ fType = kOnes_ExprType;
+ } else {
+ fType = kFullExpr_ExprType;
+ fExpr = expr;
+ }
+ SkASSERT(this->isValid());
+ }
+
+ /** Constructs an expression from a string.
+ * Argument expr is a simple expression or a parenthesized expression. */
+ // TODO: make explicit once effects input Exprs.
+ GrGLSLExpr(const SkString& expr) {
+ if (expr.isEmpty()) { // TODO: remove this once effects input Exprs.
+ fType = kOnes_ExprType;
+ } else {
+ fType = kFullExpr_ExprType;
+ fExpr = expr;
+ }
+ SkASSERT(this->isValid());
+ }
+
+ /** Constructs an expression from a string with one substitution. */
+ GrGLSLExpr(const char format[], const char in0[])
+ : fType(kFullExpr_ExprType) {
+ fExpr.appendf(format, in0);
+ }
+
+ /** Constructs an expression from a string with two substitutions. */
+ GrGLSLExpr(const char format[], const char in0[], const char in1[])
+ : fType(kFullExpr_ExprType) {
+ fExpr.appendf(format, in0, in1);
+ }
+
+ /** Returns expression casted to another type.
+ * Generic implementation that is called for non-trivial cases of casts. */
+ template <typename T>
+ static Self VectorCastImpl(const T& other);
+
+ /** Returns a GLSL multiplication: component-wise or component-by-scalar.
+ * The multiplication will be component-wise or multiply each component by a scalar.
+ *
+ * The returned expression will compute the value of:
+ * vecN(in0.x * in1.x, ...) if dim(T0) == dim(T1) (component-wise)
+ * vecN(in0.x * in1, ...) if dim(T1) == 1 (vector by scalar)
+ * vecN(in0 * in1.x, ...) if dim(T0) == 1 (scalar by vector)
+ */
+ template <typename T0, typename T1>
+ static Self Mul(T0 in0, T1 in1);
+
+ /** Returns a GLSL addition: component-wise or add a scalar to each component.
+ * Return value computes:
+ * vecN(in0.x + in1.x, ...) or vecN(in0.x + in1, ...) or vecN(in0 + in1.x, ...).
+ */
+ template <typename T0, typename T1>
+ static Self Add(T0 in0, T1 in1);
+
+ /** Returns a GLSL subtraction: component-wise or subtract compoments by a scalar.
+ * Return value computes
+ * vecN(in0.x - in1.x, ...) or vecN(in0.x - in1, ...) or vecN(in0 - in1.x, ...).
+ */
+ template <typename T0, typename T1>
+ static Self Sub(T0 in0, T1 in1);
+
+ /** Returns expression that accesses component(s) of the expression.
+ * format should be the form "%s.x" where 'x' is the component(s) to access.
+ * Caller is responsible for making sure the amount of components in the
+ * format string is equal to dim(T).
+ */
+ template <typename T>
+ T extractComponents(const char format[]) const;
+
+private:
+ enum ExprType {
+ kZeros_ExprType,
+ kOnes_ExprType,
+ kFullExpr_ExprType,
+ };
+ ExprType fType;
+ SkString fExpr;
+};
+
+class GrGLSLExpr1;
+class GrGLSLExpr4;
+
+/** Class representing a float GLSL expression. */
+class GrGLSLExpr1 : public GrGLSLExpr<GrGLSLExpr1> {
+public:
+ GrGLSLExpr1()
+ : INHERITED() {
+ }
+ explicit GrGLSLExpr1(int v)
+ : INHERITED(v) {
+ }
+ GrGLSLExpr1(const char* expr)
+ : INHERITED(expr) {
+ }
+ GrGLSLExpr1(const SkString& expr)
+ : INHERITED(expr) {
+ }
+
+ static GrGLSLExpr1 VectorCast(const GrGLSLExpr1& expr);
+
+private:
+ GrGLSLExpr1(const char format[], const char in0[])
+ : INHERITED(format, in0) {
+ }
+ GrGLSLExpr1(const char format[], const char in0[], const char in1[])
+ : INHERITED(format, in0, in1) {
+ }
+
+ static const char* ZerosStr();
+ static const char* OnesStr();
+ static const char* CastStr();
+ static const char* CastIntStr();
+
+ friend GrGLSLExpr1 operator*(const GrGLSLExpr1& in0, const GrGLSLExpr1&in1);
+ friend GrGLSLExpr1 operator+(const GrGLSLExpr1& in0, const GrGLSLExpr1&in1);
+ friend GrGLSLExpr1 operator-(const GrGLSLExpr1& in0, const GrGLSLExpr1&in1);
+
+ friend class GrGLSLExpr<GrGLSLExpr1>;
+ friend class GrGLSLExpr<GrGLSLExpr4>;
+
+ typedef GrGLSLExpr<GrGLSLExpr1> INHERITED;
+};
+
+/** Class representing a float vector (vec4) GLSL expression. */
+class GrGLSLExpr4 : public GrGLSLExpr<GrGLSLExpr4> {
+public:
+ GrGLSLExpr4()
+ : INHERITED() {
+ }
+ explicit GrGLSLExpr4(int v)
+ : INHERITED(v) {
+ }
+ GrGLSLExpr4(const char* expr)
+ : INHERITED(expr) {
+ }
+ GrGLSLExpr4(const SkString& expr)
+ : INHERITED(expr) {
+ }
+
+ typedef GrGLSLExpr1 AExpr;
+ AExpr a() const;
+
+ /** GLSL vec4 cast / constructor, eg vec4(floatv) -> vec4(floatv, floatv, floatv, floatv) */
+ static GrGLSLExpr4 VectorCast(const GrGLSLExpr1& expr);
+ static GrGLSLExpr4 VectorCast(const GrGLSLExpr4& expr);
+
+private:
+ GrGLSLExpr4(const char format[], const char in0[])
+ : INHERITED(format, in0) {
+ }
+ GrGLSLExpr4(const char format[], const char in0[], const char in1[])
+ : INHERITED(format, in0, in1) {
+ }
+
+ static const char* ZerosStr();
+ static const char* OnesStr();
+ static const char* CastStr();
+ static const char* CastIntStr();
+
+ // The vector-by-scalar and scalar-by-vector binary operations.
+ friend GrGLSLExpr4 operator*(const GrGLSLExpr1& in0, const GrGLSLExpr4&in1);
+ friend GrGLSLExpr4 operator+(const GrGLSLExpr1& in0, const GrGLSLExpr4&in1);
+ friend GrGLSLExpr4 operator-(const GrGLSLExpr1& in0, const GrGLSLExpr4&in1);
+ friend GrGLSLExpr4 operator*(const GrGLSLExpr4& in0, const GrGLSLExpr1&in1);
+ friend GrGLSLExpr4 operator+(const GrGLSLExpr4& in0, const GrGLSLExpr1&in1);
+ friend GrGLSLExpr4 operator-(const GrGLSLExpr4& in0, const GrGLSLExpr1&in1);
+
+ // The vector-by-vector, i.e. component-wise, binary operations.
+ friend GrGLSLExpr4 operator*(const GrGLSLExpr4& in0, const GrGLSLExpr4&in1);
+ friend GrGLSLExpr4 operator+(const GrGLSLExpr4& in0, const GrGLSLExpr4&in1);
+ friend GrGLSLExpr4 operator-(const GrGLSLExpr4& in0, const GrGLSLExpr4&in1);
+
+ friend class GrGLSLExpr<GrGLSLExpr4>;
+
+ typedef GrGLSLExpr<GrGLSLExpr4> INHERITED;
+};
+
+/**
+ * Does an inplace mul, *=, of vec4VarName by mulFactor.
+ * A semicolon is added after the assignment.
+ */
+void GrGLSLMulVarBy4f(SkString* outAppend, const char* vec4VarName, const GrGLSLExpr4& mulFactor);
+
+#include "GrGLSL_impl.h"
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLBlend.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLBlend.cpp
new file mode 100644
index 000000000..d2c364035
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLBlend.cpp
@@ -0,0 +1,481 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "GrGLSLBlend.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+
+//////////////////////////////////////////////////////////////////////////////
+// Advanced (non-coeff) blend helpers
+//////////////////////////////////////////////////////////////////////////////
+
+static void hard_light(GrGLSLFragmentBuilder* fsBuilder,
+ const char* final,
+ const char* src,
+ const char* dst) {
+ static const char kComponents[] = { 'r', 'g', 'b' };
+ for (size_t i = 0; i < SK_ARRAY_COUNT(kComponents); ++i) {
+ char component = kComponents[i];
+ fsBuilder->codeAppendf("if (2.0 * %s.%c <= %s.a) {", src, component, src);
+ fsBuilder->codeAppendf("%s.%c = 2.0 * %s.%c * %s.%c;",
+ final, component, src, component, dst, component);
+ fsBuilder->codeAppend("} else {");
+ fsBuilder->codeAppendf("%s.%c = %s.a * %s.a - 2.0 * (%s.a - %s.%c) * (%s.a - %s.%c);",
+ final, component, src, dst, dst, dst, component, src, src,
+ component);
+ fsBuilder->codeAppend("}");
+ }
+ fsBuilder->codeAppendf("%s.rgb += %s.rgb * (1.0 - %s.a) + %s.rgb * (1.0 - %s.a);",
+ final, src, dst, dst, src);
+}
+
+// Does one component of color-dodge
+static void color_dodge_component(GrGLSLFragmentBuilder* fsBuilder,
+ const char* final,
+ const char* src,
+ const char* dst,
+ const char component) {
+ fsBuilder->codeAppendf("if (0.0 == %s.%c) {", dst, component);
+ fsBuilder->codeAppendf("%s.%c = %s.%c * (1.0 - %s.a);",
+ final, component, src, component, dst);
+ fsBuilder->codeAppend("} else {");
+ fsBuilder->codeAppendf("float d = %s.a - %s.%c;", src, src, component);
+ fsBuilder->codeAppend("if (0.0 == d) {");
+ fsBuilder->codeAppendf("%s.%c = %s.a * %s.a + %s.%c * (1.0 - %s.a) + %s.%c * (1.0 - %s.a);",
+ final, component, src, dst, src, component, dst, dst, component,
+ src);
+ fsBuilder->codeAppend("} else {");
+ fsBuilder->codeAppendf("d = min(%s.a, %s.%c * %s.a / d);",
+ dst, dst, component, src);
+ fsBuilder->codeAppendf("%s.%c = d * %s.a + %s.%c * (1.0 - %s.a) + %s.%c * (1.0 - %s.a);",
+ final, component, src, src, component, dst, dst, component, src);
+ fsBuilder->codeAppend("}");
+ fsBuilder->codeAppend("}");
+}
+
+// Does one component of color-burn
+static void color_burn_component(GrGLSLFragmentBuilder* fsBuilder,
+ const char* final,
+ const char* src,
+ const char* dst,
+ const char component) {
+ fsBuilder->codeAppendf("if (%s.a == %s.%c) {", dst, dst, component);
+ fsBuilder->codeAppendf("%s.%c = %s.a * %s.a + %s.%c * (1.0 - %s.a) + %s.%c * (1.0 - %s.a);",
+ final, component, src, dst, src, component, dst, dst, component,
+ src);
+ fsBuilder->codeAppendf("} else if (0.0 == %s.%c) {", src, component);
+ fsBuilder->codeAppendf("%s.%c = %s.%c * (1.0 - %s.a);",
+ final, component, dst, component, src);
+ fsBuilder->codeAppend("} else {");
+ fsBuilder->codeAppendf("float d = max(0.0, %s.a - (%s.a - %s.%c) * %s.a / %s.%c);",
+ dst, dst, dst, component, src, src, component);
+ fsBuilder->codeAppendf("%s.%c = %s.a * d + %s.%c * (1.0 - %s.a) + %s.%c * (1.0 - %s.a);",
+ final, component, src, src, component, dst, dst, component, src);
+ fsBuilder->codeAppend("}");
+}
+
+// Does one component of soft-light. Caller should have already checked that dst alpha > 0.
+static void soft_light_component_pos_dst_alpha(GrGLSLFragmentBuilder* fsBuilder,
+ const char* final,
+ const char* src,
+ const char* dst,
+ const char component) {
+ // if (2S < Sa)
+ fsBuilder->codeAppendf("if (2.0 * %s.%c <= %s.a) {", src, component, src);
+ // (D^2 (Sa-2 S))/Da+(1-Da) S+D (-Sa+2 S+1)
+ fsBuilder->codeAppendf("%s.%c = (%s.%c*%s.%c*(%s.a - 2.0*%s.%c)) / %s.a +"
+ "(1.0 - %s.a) * %s.%c + %s.%c*(-%s.a + 2.0*%s.%c + 1.0);",
+ final, component, dst, component, dst, component, src, src,
+ component, dst, dst, src, component, dst, component, src, src,
+ component);
+ // else if (4D < Da)
+ fsBuilder->codeAppendf("} else if (4.0 * %s.%c <= %s.a) {",
+ dst, component, dst);
+ fsBuilder->codeAppendf("float DSqd = %s.%c * %s.%c;",
+ dst, component, dst, component);
+ fsBuilder->codeAppendf("float DCub = DSqd * %s.%c;", dst, component);
+ fsBuilder->codeAppendf("float DaSqd = %s.a * %s.a;", dst, dst);
+ fsBuilder->codeAppendf("float DaCub = DaSqd * %s.a;", dst);
+ // (Da^3 (-S)+Da^2 (S-D (3 Sa-6 S-1))+12 Da D^2 (Sa-2 S)-16 D^3 (Sa-2 S))/Da^2
+ fsBuilder->codeAppendf("%s.%c ="
+ "(DaSqd*(%s.%c - %s.%c * (3.0*%s.a - 6.0*%s.%c - 1.0)) +"
+ " 12.0*%s.a*DSqd*(%s.a - 2.0*%s.%c) - 16.0*DCub * (%s.a - 2.0*%s.%c) -"
+ " DaCub*%s.%c) / DaSqd;",
+ final, component, src, component, dst, component,
+ src, src, component, dst, src, src, component, src, src,
+ component, src, component);
+ fsBuilder->codeAppendf("} else {");
+ // -sqrt(Da * D) (Sa-2 S)-Da S+D (Sa-2 S+1)+S
+ fsBuilder->codeAppendf("%s.%c = %s.%c*(%s.a - 2.0*%s.%c + 1.0) + %s.%c -"
+ " sqrt(%s.a*%s.%c)*(%s.a - 2.0*%s.%c) - %s.a*%s.%c;",
+ final, component, dst, component, src, src, component, src, component,
+ dst, dst, component, src, src, component, dst, src, component);
+ fsBuilder->codeAppendf("}");
+}
+
+// Adds a function that takes two colors and an alpha as input. It produces a color with the
+// hue and saturation of the first color, the luminosity of the second color, and the input
+// alpha. It has this signature:
+// vec3 set_luminance(vec3 hueSatColor, float alpha, vec3 lumColor).
+static void add_lum_function(GrGLSLFragmentBuilder* fsBuilder, SkString* setLumFunction) {
+ // Emit a helper that gets the luminance of a color.
+ SkString getFunction;
+ GrGLSLShaderVar getLumArgs[] = {
+ GrGLSLShaderVar("color", kVec3f_GrSLType),
+ };
+ SkString getLumBody("return dot(vec3(0.3, 0.59, 0.11), color);");
+ fsBuilder->emitFunction(kFloat_GrSLType,
+ "luminance",
+ SK_ARRAY_COUNT(getLumArgs), getLumArgs,
+ getLumBody.c_str(),
+ &getFunction);
+
+ // Emit the set luminance function.
+ GrGLSLShaderVar setLumArgs[] = {
+ GrGLSLShaderVar("hueSat", kVec3f_GrSLType),
+ GrGLSLShaderVar("alpha", kFloat_GrSLType),
+ GrGLSLShaderVar("lumColor", kVec3f_GrSLType),
+ };
+ SkString setLumBody;
+ setLumBody.printf("float diff = %s(lumColor - hueSat);", getFunction.c_str());
+ setLumBody.append("vec3 outColor = hueSat + diff;");
+ setLumBody.appendf("float outLum = %s(outColor);", getFunction.c_str());
+ setLumBody.append("float minComp = min(min(outColor.r, outColor.g), outColor.b);"
+ "float maxComp = max(max(outColor.r, outColor.g), outColor.b);"
+ "if (minComp < 0.0 && outLum != minComp) {"
+ "outColor = outLum + ((outColor - vec3(outLum, outLum, outLum)) * outLum) /"
+ "(outLum - minComp);"
+ "}"
+ "if (maxComp > alpha && maxComp != outLum) {"
+ "outColor = outLum +"
+ "((outColor - vec3(outLum, outLum, outLum)) * (alpha - outLum)) /"
+ "(maxComp - outLum);"
+ "}"
+ "return outColor;");
+ fsBuilder->emitFunction(kVec3f_GrSLType,
+ "set_luminance",
+ SK_ARRAY_COUNT(setLumArgs), setLumArgs,
+ setLumBody.c_str(),
+ setLumFunction);
+}
+
+// Adds a function that creates a color with the hue and luminosity of one input color and
+// the saturation of another color. It will have this signature:
+// float set_saturation(vec3 hueLumColor, vec3 satColor)
+static void add_sat_function(GrGLSLFragmentBuilder* fsBuilder, SkString* setSatFunction) {
+ // Emit a helper that gets the saturation of a color
+ SkString getFunction;
+ GrGLSLShaderVar getSatArgs[] = { GrGLSLShaderVar("color", kVec3f_GrSLType) };
+ SkString getSatBody;
+ getSatBody.printf("return max(max(color.r, color.g), color.b) - "
+ "min(min(color.r, color.g), color.b);");
+ fsBuilder->emitFunction(kFloat_GrSLType,
+ "saturation",
+ SK_ARRAY_COUNT(getSatArgs), getSatArgs,
+ getSatBody.c_str(),
+ &getFunction);
+
+ // Emit a helper that sets the saturation given sorted input channels. This used
+ // to use inout params for min, mid, and max components but that seems to cause
+ // problems on PowerVR drivers. So instead it returns a vec3 where r, g ,b are the
+ // adjusted min, mid, and max inputs, respectively.
+ SkString helperFunction;
+ GrGLSLShaderVar helperArgs[] = {
+ GrGLSLShaderVar("minComp", kFloat_GrSLType),
+ GrGLSLShaderVar("midComp", kFloat_GrSLType),
+ GrGLSLShaderVar("maxComp", kFloat_GrSLType),
+ GrGLSLShaderVar("sat", kFloat_GrSLType),
+ };
+ static const char kHelperBody[] = "if (minComp < maxComp) {"
+ "vec3 result;"
+ "result.r = 0.0;"
+ "result.g = sat * (midComp - minComp) / (maxComp - minComp);"
+ "result.b = sat;"
+ "return result;"
+ "} else {"
+ "return vec3(0, 0, 0);"
+ "}";
+ fsBuilder->emitFunction(kVec3f_GrSLType,
+ "set_saturation_helper",
+ SK_ARRAY_COUNT(helperArgs), helperArgs,
+ kHelperBody,
+ &helperFunction);
+
+ GrGLSLShaderVar setSatArgs[] = {
+ GrGLSLShaderVar("hueLumColor", kVec3f_GrSLType),
+ GrGLSLShaderVar("satColor", kVec3f_GrSLType),
+ };
+ const char* helpFunc = helperFunction.c_str();
+ SkString setSatBody;
+ setSatBody.appendf("float sat = %s(satColor);"
+ "if (hueLumColor.r <= hueLumColor.g) {"
+ "if (hueLumColor.g <= hueLumColor.b) {"
+ "hueLumColor.rgb = %s(hueLumColor.r, hueLumColor.g, hueLumColor.b, sat);"
+ "} else if (hueLumColor.r <= hueLumColor.b) {"
+ "hueLumColor.rbg = %s(hueLumColor.r, hueLumColor.b, hueLumColor.g, sat);"
+ "} else {"
+ "hueLumColor.brg = %s(hueLumColor.b, hueLumColor.r, hueLumColor.g, sat);"
+ "}"
+ "} else if (hueLumColor.r <= hueLumColor.b) {"
+ "hueLumColor.grb = %s(hueLumColor.g, hueLumColor.r, hueLumColor.b, sat);"
+ "} else if (hueLumColor.g <= hueLumColor.b) {"
+ "hueLumColor.gbr = %s(hueLumColor.g, hueLumColor.b, hueLumColor.r, sat);"
+ "} else {"
+ "hueLumColor.bgr = %s(hueLumColor.b, hueLumColor.g, hueLumColor.r, sat);"
+ "}"
+ "return hueLumColor;",
+ getFunction.c_str(), helpFunc, helpFunc, helpFunc, helpFunc,
+ helpFunc, helpFunc);
+ fsBuilder->emitFunction(kVec3f_GrSLType,
+ "set_saturation",
+ SK_ARRAY_COUNT(setSatArgs), setSatArgs,
+ setSatBody.c_str(),
+ setSatFunction);
+}
+
+static void emit_advanced_xfermode_code(GrGLSLFragmentBuilder* fsBuilder, const char* srcColor,
+ const char* dstColor, const char* outputColor,
+ SkXfermode::Mode mode) {
+ SkASSERT(srcColor);
+ SkASSERT(dstColor);
+ SkASSERT(outputColor);
+ // These all perform src-over on the alpha channel.
+ fsBuilder->codeAppendf("%s.a = %s.a + (1.0 - %s.a) * %s.a;",
+ outputColor, srcColor, srcColor, dstColor);
+
+ switch (mode) {
+ case SkXfermode::kOverlay_Mode:
+ // Overlay is Hard-Light with the src and dst reversed
+ hard_light(fsBuilder, outputColor, dstColor, srcColor);
+ break;
+ case SkXfermode::kDarken_Mode:
+ fsBuilder->codeAppendf("%s.rgb = min((1.0 - %s.a) * %s.rgb + %s.rgb, "
+ "(1.0 - %s.a) * %s.rgb + %s.rgb);",
+ outputColor,
+ srcColor, dstColor, srcColor,
+ dstColor, srcColor, dstColor);
+ break;
+ case SkXfermode::kLighten_Mode:
+ fsBuilder->codeAppendf("%s.rgb = max((1.0 - %s.a) * %s.rgb + %s.rgb, "
+ "(1.0 - %s.a) * %s.rgb + %s.rgb);",
+ outputColor,
+ srcColor, dstColor, srcColor,
+ dstColor, srcColor, dstColor);
+ break;
+ case SkXfermode::kColorDodge_Mode:
+ color_dodge_component(fsBuilder, outputColor, srcColor, dstColor, 'r');
+ color_dodge_component(fsBuilder, outputColor, srcColor, dstColor, 'g');
+ color_dodge_component(fsBuilder, outputColor, srcColor, dstColor, 'b');
+ break;
+ case SkXfermode::kColorBurn_Mode:
+ color_burn_component(fsBuilder, outputColor, srcColor, dstColor, 'r');
+ color_burn_component(fsBuilder, outputColor, srcColor, dstColor, 'g');
+ color_burn_component(fsBuilder, outputColor, srcColor, dstColor, 'b');
+ break;
+ case SkXfermode::kHardLight_Mode:
+ hard_light(fsBuilder, outputColor, srcColor, dstColor);
+ break;
+ case SkXfermode::kSoftLight_Mode:
+ fsBuilder->codeAppendf("if (0.0 == %s.a) {", dstColor);
+ fsBuilder->codeAppendf("%s.rgba = %s;", outputColor, srcColor);
+ fsBuilder->codeAppendf("} else {");
+ soft_light_component_pos_dst_alpha(fsBuilder, outputColor, srcColor, dstColor, 'r');
+ soft_light_component_pos_dst_alpha(fsBuilder, outputColor, srcColor, dstColor, 'g');
+ soft_light_component_pos_dst_alpha(fsBuilder, outputColor, srcColor, dstColor, 'b');
+ fsBuilder->codeAppendf("}");
+ break;
+ case SkXfermode::kDifference_Mode:
+ fsBuilder->codeAppendf("%s.rgb = %s.rgb + %s.rgb -"
+ "2.0 * min(%s.rgb * %s.a, %s.rgb * %s.a);",
+ outputColor, srcColor, dstColor, srcColor, dstColor,
+ dstColor, srcColor);
+ break;
+ case SkXfermode::kExclusion_Mode:
+ fsBuilder->codeAppendf("%s.rgb = %s.rgb + %s.rgb - "
+ "2.0 * %s.rgb * %s.rgb;",
+ outputColor, dstColor, srcColor, dstColor, srcColor);
+ break;
+ case SkXfermode::kMultiply_Mode:
+ fsBuilder->codeAppendf("%s.rgb = (1.0 - %s.a) * %s.rgb + "
+ "(1.0 - %s.a) * %s.rgb + "
+ "%s.rgb * %s.rgb;",
+ outputColor, srcColor, dstColor, dstColor, srcColor,
+ srcColor, dstColor);
+ break;
+ case SkXfermode::kHue_Mode: {
+ // SetLum(SetSat(S * Da, Sat(D * Sa)), Sa*Da, D*Sa) + (1 - Sa) * D + (1 - Da) * S
+ SkString setSat, setLum;
+ add_sat_function(fsBuilder, &setSat);
+ add_lum_function(fsBuilder, &setLum);
+ fsBuilder->codeAppendf("vec4 dstSrcAlpha = %s * %s.a;",
+ dstColor, srcColor);
+ fsBuilder->codeAppendf("%s.rgb = %s(%s(%s.rgb * %s.a, dstSrcAlpha.rgb),"
+ "dstSrcAlpha.a, dstSrcAlpha.rgb);",
+ outputColor, setLum.c_str(), setSat.c_str(), srcColor,
+ dstColor);
+ fsBuilder->codeAppendf("%s.rgb += (1.0 - %s.a) * %s.rgb + (1.0 - %s.a) * %s.rgb;",
+ outputColor, srcColor, dstColor, dstColor, srcColor);
+ break;
+ }
+ case SkXfermode::kSaturation_Mode: {
+ // SetLum(SetSat(D * Sa, Sat(S * Da)), Sa*Da, D*Sa)) + (1 - Sa) * D + (1 - Da) * S
+ SkString setSat, setLum;
+ add_sat_function(fsBuilder, &setSat);
+ add_lum_function(fsBuilder, &setLum);
+ fsBuilder->codeAppendf("vec4 dstSrcAlpha = %s * %s.a;",
+ dstColor, srcColor);
+ fsBuilder->codeAppendf("%s.rgb = %s(%s(dstSrcAlpha.rgb, %s.rgb * %s.a),"
+ "dstSrcAlpha.a, dstSrcAlpha.rgb);",
+ outputColor, setLum.c_str(), setSat.c_str(), srcColor,
+ dstColor);
+ fsBuilder->codeAppendf("%s.rgb += (1.0 - %s.a) * %s.rgb + (1.0 - %s.a) * %s.rgb;",
+ outputColor, srcColor, dstColor, dstColor, srcColor);
+ break;
+ }
+ case SkXfermode::kColor_Mode: {
+ // SetLum(S * Da, Sa* Da, D * Sa) + (1 - Sa) * D + (1 - Da) * S
+ SkString setLum;
+ add_lum_function(fsBuilder, &setLum);
+ fsBuilder->codeAppendf("vec4 srcDstAlpha = %s * %s.a;",
+ srcColor, dstColor);
+ fsBuilder->codeAppendf("%s.rgb = %s(srcDstAlpha.rgb, srcDstAlpha.a, %s.rgb * %s.a);",
+ outputColor, setLum.c_str(), dstColor, srcColor);
+ fsBuilder->codeAppendf("%s.rgb += (1.0 - %s.a) * %s.rgb + (1.0 - %s.a) * %s.rgb;",
+ outputColor, srcColor, dstColor, dstColor, srcColor);
+ break;
+ }
+ case SkXfermode::kLuminosity_Mode: {
+ // SetLum(D * Sa, Sa* Da, S * Da) + (1 - Sa) * D + (1 - Da) * S
+ SkString setLum;
+ add_lum_function(fsBuilder, &setLum);
+ fsBuilder->codeAppendf("vec4 srcDstAlpha = %s * %s.a;",
+ srcColor, dstColor);
+ fsBuilder->codeAppendf("%s.rgb = %s(%s.rgb * %s.a, srcDstAlpha.a, srcDstAlpha.rgb);",
+ outputColor, setLum.c_str(), dstColor, srcColor);
+ fsBuilder->codeAppendf("%s.rgb += (1.0 - %s.a) * %s.rgb + (1.0 - %s.a) * %s.rgb;",
+ outputColor, srcColor, dstColor, dstColor, srcColor);
+ break;
+ }
+ default:
+ SkFAIL("Unknown Custom Xfer mode.");
+ break;
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Porter-Duff blend helper
+//////////////////////////////////////////////////////////////////////////////
+
+static bool append_porterduff_term(GrGLSLFragmentBuilder* fsBuilder, SkXfermode::Coeff coeff,
+ const char* colorName, const char* srcColorName,
+ const char* dstColorName, bool hasPrevious) {
+ if (SkXfermode::kZero_Coeff == coeff) {
+ return hasPrevious;
+ } else {
+ if (hasPrevious) {
+ fsBuilder->codeAppend(" + ");
+ }
+ fsBuilder->codeAppendf("%s", colorName);
+ switch (coeff) {
+ case SkXfermode::kOne_Coeff:
+ break;
+ case SkXfermode::kSC_Coeff:
+ fsBuilder->codeAppendf(" * %s", srcColorName);
+ break;
+ case SkXfermode::kISC_Coeff:
+ fsBuilder->codeAppendf(" * (vec4(1.0) - %s)", srcColorName);
+ break;
+ case SkXfermode::kDC_Coeff:
+ fsBuilder->codeAppendf(" * %s", dstColorName);
+ break;
+ case SkXfermode::kIDC_Coeff:
+ fsBuilder->codeAppendf(" * (vec4(1.0) - %s)", dstColorName);
+ break;
+ case SkXfermode::kSA_Coeff:
+ fsBuilder->codeAppendf(" * %s.a", srcColorName);
+ break;
+ case SkXfermode::kISA_Coeff:
+ fsBuilder->codeAppendf(" * (1.0 - %s.a)", srcColorName);
+ break;
+ case SkXfermode::kDA_Coeff:
+ fsBuilder->codeAppendf(" * %s.a", dstColorName);
+ break;
+ case SkXfermode::kIDA_Coeff:
+ fsBuilder->codeAppendf(" * (1.0 - %s.a)", dstColorName);
+ break;
+ default:
+ SkFAIL("Unsupported Blend Coeff");
+ }
+ return true;
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void GrGLSLBlend::AppendMode(GrGLSLFragmentBuilder* fsBuilder, const char* srcColor,
+ const char* dstColor, const char* outColor,
+ SkXfermode::Mode mode) {
+
+ SkXfermode::Coeff srcCoeff, dstCoeff;
+ if (SkXfermode::ModeAsCoeff(mode, &srcCoeff, &dstCoeff)) {
+ fsBuilder->codeAppendf("%s = ", outColor);
+ // append src blend
+ bool didAppend = append_porterduff_term(fsBuilder, srcCoeff, srcColor, srcColor, dstColor,
+ false);
+ // append dst blend
+ if(!append_porterduff_term(fsBuilder, dstCoeff, dstColor, srcColor, dstColor, didAppend)) {
+ fsBuilder->codeAppend("vec4(0, 0, 0, 0)");
+ }
+ fsBuilder->codeAppend(";");
+ } else {
+ emit_advanced_xfermode_code(fsBuilder, srcColor, dstColor, outColor, mode);
+ }
+}
+
+void GrGLSLBlend::AppendRegionOp(GrGLSLFragmentBuilder* fsBuilder, const char* srcColor,
+ const char* dstColor, const char* outColor,
+ SkRegion::Op regionOp) {
+ SkXfermode::Coeff srcCoeff, dstCoeff;
+ switch (regionOp) {
+ case SkRegion::kReplace_Op:
+ srcCoeff = SkXfermode::kOne_Coeff;
+ dstCoeff = SkXfermode::kZero_Coeff;
+ break;
+ case SkRegion::kIntersect_Op:
+ srcCoeff = SkXfermode::kDC_Coeff;
+ dstCoeff = SkXfermode::kZero_Coeff;
+ break;
+ case SkRegion::kUnion_Op:
+ srcCoeff = SkXfermode::kOne_Coeff;
+ dstCoeff = SkXfermode::kISC_Coeff;
+ break;
+ case SkRegion::kXOR_Op:
+ srcCoeff = SkXfermode::kIDC_Coeff;
+ dstCoeff = SkXfermode::kISC_Coeff;
+ break;
+ case SkRegion::kDifference_Op:
+ srcCoeff = SkXfermode::kZero_Coeff;
+ dstCoeff = SkXfermode::kISC_Coeff;
+ break;
+ case SkRegion::kReverseDifference_Op:
+ srcCoeff = SkXfermode::kIDC_Coeff;
+ dstCoeff = SkXfermode::kZero_Coeff;
+ break;
+ default:
+ SkFAIL("Unsupported Op");
+ // We should never get here but to make compiler happy
+ srcCoeff = SkXfermode::kZero_Coeff;
+ dstCoeff = SkXfermode::kZero_Coeff;
+ }
+ fsBuilder->codeAppendf("%s = ", outColor);
+ // append src blend
+ bool didAppend = append_porterduff_term(fsBuilder, srcCoeff, srcColor, srcColor, dstColor,
+ false);
+ // append dst blend
+ if(!append_porterduff_term(fsBuilder, dstCoeff, dstColor, srcColor, dstColor, didAppend)) {
+ fsBuilder->codeAppend("vec4(0, 0, 0, 0)");
+ }
+ fsBuilder->codeAppend(";");
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLBlend.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLBlend.h
new file mode 100644
index 000000000..c8047f8af
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLBlend.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLBlend_DEFINED
+#define GrGLBlend_DEFINED
+
+#include "SkRegion.h"
+#include "SkXfermode.h"
+
+class GrGLSLFragmentBuilder;
+
+namespace GrGLSLBlend {
+ /*
+ * Appends GLSL code to fsBuilder that assigns a specified blend of the srcColor and dstColor
+ * variables to the outColor variable.
+ */
+ void AppendMode(GrGLSLFragmentBuilder* fsBuilder, const char* srcColor,
+ const char* dstColor, const char* outColor, SkXfermode::Mode mode);
+
+ void AppendRegionOp(GrGLSLFragmentBuilder* fsBuilder, const char* srcColor,
+ const char* dstColor, const char* outColor, SkRegion::Op regionOp);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLCaps.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLCaps.cpp
new file mode 100755
index 000000000..b33e3082e
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLCaps.cpp
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrGLSLCaps.h"
+
+#include "GrContextOptions.h"
+
+////////////////////////////////////////////////////////////////////////////////////////////
+
+GrGLSLCaps::GrGLSLCaps(const GrContextOptions& options) {
+ fGLSLGeneration = k330_GrGLSLGeneration;
+
+ fDropsTileOnZeroDivide = false;
+ fFBFetchSupport = false;
+ fFBFetchNeedsCustomOutput = false;
+ fBindlessTextureSupport = false;
+ fUsesPrecisionModifiers = false;
+ fCanUseAnyFunctionInShader = true;
+ fCanUseMinAndAbsTogether = true;
+ fMustForceNegatedAtanParamToFloat = false;
+ fRequiresLocalOutputColorForFBFetch = false;
+ fFlatInterpolationSupport = false;
+ fNoPerspectiveInterpolationSupport = false;
+ fMultisampleInterpolationSupport = false;
+ fSampleVariablesSupport = false;
+ fSampleMaskOverrideCoverageSupport = false;
+ fExternalTextureSupport = false;
+ fTexelFetchSupport = false;
+ fVersionDeclString = nullptr;
+ fShaderDerivativeExtensionString = nullptr;
+ fFragCoordConventionsExtensionString = nullptr;
+ fSecondaryOutputExtensionString = nullptr;
+ fExternalTextureExtensionString = nullptr;
+ fTexelBufferExtensionString = nullptr;
+ fNoPerspectiveInterpolationExtensionString = nullptr;
+ fMultisampleInterpolationExtensionString = nullptr;
+ fSampleVariablesExtensionString = nullptr;
+ fFBFetchColorName = nullptr;
+ fFBFetchExtensionString = nullptr;
+ fMaxVertexSamplers = 0;
+ fMaxGeometrySamplers = 0;
+ fMaxFragmentSamplers = 0;
+ fMaxCombinedSamplers = 0;
+ fAdvBlendEqInteraction = kNotSupported_AdvBlendEqInteraction;
+}
+
+SkString GrGLSLCaps::dump() const {
+ SkString r = INHERITED::dump();
+
+ static const char* kAdvBlendEqInteractionStr[] = {
+ "Not Supported",
+ "Automatic",
+ "General Enable",
+ "Specific Enables",
+ };
+ GR_STATIC_ASSERT(0 == kNotSupported_AdvBlendEqInteraction);
+ GR_STATIC_ASSERT(1 == kAutomatic_AdvBlendEqInteraction);
+ GR_STATIC_ASSERT(2 == kGeneralEnable_AdvBlendEqInteraction);
+ GR_STATIC_ASSERT(3 == kSpecificEnables_AdvBlendEqInteraction);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kAdvBlendEqInteractionStr) == kLast_AdvBlendEqInteraction + 1);
+
+ r.appendf("--- GLSL-Specific ---\n");
+
+ r.appendf("FB Fetch Support: %s\n", (fFBFetchSupport ? "YES" : "NO"));
+ r.appendf("Drops tile on zero divide: %s\n", (fDropsTileOnZeroDivide ? "YES" : "NO"));
+ r.appendf("Bindless texture support: %s\n", (fBindlessTextureSupport ? "YES" : "NO"));
+ r.appendf("Uses precision modifiers: %s\n", (fUsesPrecisionModifiers ? "YES" : "NO"));
+ r.appendf("Can use any() function: %s\n", (fCanUseAnyFunctionInShader ? "YES" : "NO"));
+ r.appendf("Can use min() and abs() together: %s\n", (fCanUseMinAndAbsTogether ? "YES" : "NO"));
+ r.appendf("Must force negated atan param to float: %s\n", (fMustForceNegatedAtanParamToFloat ?
+ "YES" : "NO"));
+ r.appendf("Must use local out color for FBFetch: %s\n", (fRequiresLocalOutputColorForFBFetch ?
+ "YES" : "NO"));
+ r.appendf("Flat interpolation support: %s\n", (fFlatInterpolationSupport ? "YES" : "NO"));
+ r.appendf("No perspective interpolation support: %s\n", (fNoPerspectiveInterpolationSupport ?
+ "YES" : "NO"));
+ r.appendf("Multisample interpolation support: %s\n", (fMultisampleInterpolationSupport ?
+ "YES" : "NO"));
+ r.appendf("Sample variables support: %s\n", (fSampleVariablesSupport ? "YES" : "NO"));
+ r.appendf("Sample mask override coverage support: %s\n", (fSampleMaskOverrideCoverageSupport ?
+ "YES" : "NO"));
+ r.appendf("External texture support: %s\n", (fExternalTextureSupport ? "YES" : "NO"));
+ r.appendf("texelFetch support: %s\n", (fTexelFetchSupport ? "YES" : "NO"));
+ r.appendf("Max VS Samplers: %d\n", fMaxVertexSamplers);
+ r.appendf("Max GS Samplers: %d\n", fMaxGeometrySamplers);
+ r.appendf("Max FS Samplers: %d\n", fMaxFragmentSamplers);
+ r.appendf("Max Combined Samplers: %d\n", fMaxFragmentSamplers);
+ r.appendf("Advanced blend equation interaction: %s\n",
+ kAdvBlendEqInteractionStr[fAdvBlendEqInteraction]);
+ return r;
+}
+
+void GrGLSLCaps::initSamplerPrecisionTable() {
+ // Determine the largest precision qualifiers that are effectively the same as lowp/mediump.
+ // e.g. if lowp == mediump, then use mediump instead of lowp.
+ GrSLPrecision effectiveMediumP[kGrShaderTypeCount];
+ GrSLPrecision effectiveLowP[kGrShaderTypeCount];
+ for (int s = 0; s < kGrShaderTypeCount; ++s) {
+ const PrecisionInfo* info = fFloatPrecisions[s];
+ effectiveMediumP[s] = info[kHigh_GrSLPrecision] == info[kMedium_GrSLPrecision] ?
+ kHigh_GrSLPrecision : kMedium_GrSLPrecision;
+ effectiveLowP[s] = info[kMedium_GrSLPrecision] == info[kLow_GrSLPrecision] ?
+ effectiveMediumP[s] : kLow_GrSLPrecision;
+ }
+
+ // Determine which precision qualifiers should be used with samplers.
+ for (int visibility = 0; visibility < (1 << kGrShaderTypeCount); ++visibility) {
+ GrSLPrecision mediump = kHigh_GrSLPrecision;
+ GrSLPrecision lowp = kHigh_GrSLPrecision;
+ for (int s = 0; s < kGrShaderTypeCount; ++s) {
+ if (visibility & (1 << s)) {
+ mediump = SkTMin(mediump, effectiveMediumP[s]);
+ lowp = SkTMin(lowp, effectiveLowP[s]);
+ }
+
+ GR_STATIC_ASSERT(0 == kLow_GrSLPrecision);
+ GR_STATIC_ASSERT(1 == kMedium_GrSLPrecision);
+ GR_STATIC_ASSERT(2 == kHigh_GrSLPrecision);
+
+ GR_STATIC_ASSERT((1 << kVertex_GrShaderType) == kVertex_GrShaderFlag);
+ GR_STATIC_ASSERT((1 << kGeometry_GrShaderType) == kGeometry_GrShaderFlag);
+ GR_STATIC_ASSERT((1 << kFragment_GrShaderType) == kFragment_GrShaderFlag);
+ GR_STATIC_ASSERT(3 == kGrShaderTypeCount);
+ }
+
+ uint8_t* table = fSamplerPrecisions[visibility];
+ table[kUnknown_GrPixelConfig] = kDefault_GrSLPrecision;
+ table[kAlpha_8_GrPixelConfig] = lowp;
+ table[kIndex_8_GrPixelConfig] = lowp;
+ table[kRGB_565_GrPixelConfig] = lowp;
+ table[kRGBA_4444_GrPixelConfig] = lowp;
+ table[kRGBA_8888_GrPixelConfig] = lowp;
+ table[kBGRA_8888_GrPixelConfig] = lowp;
+ table[kSRGBA_8888_GrPixelConfig] = lowp;
+ table[kSBGRA_8888_GrPixelConfig] = lowp;
+ table[kETC1_GrPixelConfig] = lowp;
+ table[kLATC_GrPixelConfig] = lowp;
+ table[kR11_EAC_GrPixelConfig] = lowp;
+ table[kASTC_12x12_GrPixelConfig] = lowp;
+ table[kRGBA_float_GrPixelConfig] = kHigh_GrSLPrecision;
+ table[kAlpha_half_GrPixelConfig] = mediump;
+ table[kRGBA_half_GrPixelConfig] = mediump;
+
+ GR_STATIC_ASSERT(16 == kGrPixelConfigCnt);
+ }
+}
+
+void GrGLSLCaps::onApplyOptionsOverrides(const GrContextOptions& options) {
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLCaps.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLCaps.h
new file mode 100755
index 000000000..ac409a36b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLCaps.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGLSLCaps_DEFINED
+#define GrGLSLCaps_DEFINED
+
+#include "GrCaps.h"
+#include "GrGLSL.h"
+#include "GrSwizzle.h"
+
+class GrGLSLCaps : public GrShaderCaps {
+public:
+
+
+ /**
+ * Indicates how GLSL must interact with advanced blend equations. The KHR extension requires
+ * special layout qualifiers in the fragment shader.
+ */
+ enum AdvBlendEqInteraction {
+ kNotSupported_AdvBlendEqInteraction, //<! No _blend_equation_advanced extension
+ kAutomatic_AdvBlendEqInteraction, //<! No interaction required
+ kGeneralEnable_AdvBlendEqInteraction, //<! layout(blend_support_all_equations) out
+ kSpecificEnables_AdvBlendEqInteraction, //<! Specific layout qualifiers per equation
+
+ kLast_AdvBlendEqInteraction = kSpecificEnables_AdvBlendEqInteraction
+ };
+
+ /**
+ * Initializes the GrGLSLCaps to a default set of features
+ */
+ GrGLSLCaps(const GrContextOptions&);
+
+ /**
+ * Some helper functions for encapsulating various extensions to read FB Buffer on openglES
+ *
+ * TODO(joshualitt) On desktop opengl 4.2+ we can achieve something similar to this effect
+ */
+ bool fbFetchSupport() const { return fFBFetchSupport; }
+
+ bool fbFetchNeedsCustomOutput() const { return fFBFetchNeedsCustomOutput; }
+
+ bool bindlessTextureSupport() const { return fBindlessTextureSupport; }
+
+ const char* versionDeclString() const { return fVersionDeclString; }
+
+ const char* fbFetchColorName() const { return fFBFetchColorName; }
+
+ const char* fbFetchExtensionString() const { return fFBFetchExtensionString; }
+
+ bool dropsTileOnZeroDivide() const { return fDropsTileOnZeroDivide; }
+
+ bool flatInterpolationSupport() const { return fFlatInterpolationSupport; }
+
+ bool noperspectiveInterpolationSupport() const { return fNoPerspectiveInterpolationSupport; }
+
+ bool multisampleInterpolationSupport() const { return fMultisampleInterpolationSupport; }
+
+ bool sampleVariablesSupport() const { return fSampleVariablesSupport; }
+
+ bool sampleMaskOverrideCoverageSupport() const { return fSampleMaskOverrideCoverageSupport; }
+
+ bool externalTextureSupport() const { return fExternalTextureSupport; }
+
+ bool texelFetchSupport() const { return fTexelFetchSupport; }
+
+ AdvBlendEqInteraction advBlendEqInteraction() const { return fAdvBlendEqInteraction; }
+
+ bool mustEnableAdvBlendEqs() const {
+ return fAdvBlendEqInteraction >= kGeneralEnable_AdvBlendEqInteraction;
+ }
+
+ bool mustEnableSpecificAdvBlendEqs() const {
+ return fAdvBlendEqInteraction == kSpecificEnables_AdvBlendEqInteraction;
+ }
+
+ bool mustDeclareFragmentShaderOutput() const {
+ return fGLSLGeneration > k110_GrGLSLGeneration;
+ }
+
+ bool usesPrecisionModifiers() const { return fUsesPrecisionModifiers; }
+
+ // Returns whether we can use the glsl funciton any() in our shader code.
+ bool canUseAnyFunctionInShader() const { return fCanUseAnyFunctionInShader; }
+
+ bool canUseMinAndAbsTogether() const { return fCanUseMinAndAbsTogether; }
+
+ bool mustForceNegatedAtanParamToFloat() const { return fMustForceNegatedAtanParamToFloat; }
+
+ bool requiresLocalOutputColorForFBFetch() const { return fRequiresLocalOutputColorForFBFetch; }
+
+ // Returns the string of an extension that must be enabled in the shader to support
+ // derivatives. If nullptr is returned then no extension needs to be enabled. Before calling
+ // this function, the caller should check that shaderDerivativeSupport exists.
+ const char* shaderDerivativeExtensionString() const {
+ SkASSERT(this->shaderDerivativeSupport());
+ return fShaderDerivativeExtensionString;
+ }
+
+ // Returns the string of an extension that will do all necessary coord transfomations needed
+ // when reading the fragment position. If such an extension does not exisits, this function
+ // returns a nullptr, and all transforms of the frag position must be done manually in the
+ // shader.
+ const char* fragCoordConventionsExtensionString() const {
+ return fFragCoordConventionsExtensionString;
+ }
+
+ // This returns the name of an extension that must be enabled in the shader, if such a thing is
+ // required in order to use a secondary output in the shader. This returns a nullptr if no such
+ // extension is required. However, the return value of this function does not say whether dual
+ // source blending is supported.
+ const char* secondaryOutputExtensionString() const {
+ return fSecondaryOutputExtensionString;
+ }
+
+ const char* externalTextureExtensionString() const {
+ SkASSERT(this->externalTextureSupport());
+ return fExternalTextureExtensionString;
+ }
+
+ const char* texelBufferExtensionString() const {
+ SkASSERT(this->texelBufferSupport());
+ return fTexelBufferExtensionString;
+ }
+
+ const char* noperspectiveInterpolationExtensionString() const {
+ SkASSERT(this->noperspectiveInterpolationSupport());
+ return fNoPerspectiveInterpolationExtensionString;
+ }
+
+ const char* multisampleInterpolationExtensionString() const {
+ SkASSERT(this->multisampleInterpolationSupport());
+ return fMultisampleInterpolationExtensionString;
+ }
+
+ const char* sampleVariablesExtensionString() const {
+ SkASSERT(this->sampleVariablesSupport());
+ return fSampleVariablesExtensionString;
+ }
+
+ int maxVertexSamplers() const { return fMaxVertexSamplers; }
+
+ int maxGeometrySamplers() const { return fMaxGeometrySamplers; }
+
+ int maxFragmentSamplers() const { return fMaxFragmentSamplers; }
+
+ int maxCombinedSamplers() const { return fMaxCombinedSamplers; }
+
+ /**
+ * Given a texture's config, this determines what swizzle must be appended to accesses to the
+ * texture in generated shader code. Swizzling may be implemented in texture parameters or a
+ * sampler rather than in the shader. In this case the returned swizzle will always be "rgba".
+ */
+ const GrSwizzle& configTextureSwizzle(GrPixelConfig config) const {
+ return fConfigTextureSwizzle[config];
+ }
+
+ /** Swizzle that should occur on the fragment shader outputs for a given config. */
+ const GrSwizzle& configOutputSwizzle(GrPixelConfig config) const {
+ return fConfigOutputSwizzle[config];
+ }
+
+ /** Precision qualifier that should be used with a sampler, given its config and visibility. */
+ GrSLPrecision samplerPrecision(GrPixelConfig config, GrShaderFlags visibility) const {
+ return static_cast<GrSLPrecision>(fSamplerPrecisions[visibility][config]);
+ }
+
+ GrGLSLGeneration generation() const { return fGLSLGeneration; }
+
+ /**
+ * Returns a string containing the caps info.
+ */
+ SkString dump() const override;
+
+private:
+ /** GrCaps subclasses must call this after filling in the shader precision table. */
+ void initSamplerPrecisionTable();
+
+ void onApplyOptionsOverrides(const GrContextOptions& options) override;
+
+ GrGLSLGeneration fGLSLGeneration;
+
+ bool fDropsTileOnZeroDivide : 1;
+ bool fFBFetchSupport : 1;
+ bool fFBFetchNeedsCustomOutput : 1;
+ bool fBindlessTextureSupport : 1;
+ bool fUsesPrecisionModifiers : 1;
+ bool fCanUseAnyFunctionInShader : 1;
+ bool fFlatInterpolationSupport : 1;
+ bool fNoPerspectiveInterpolationSupport : 1;
+ bool fMultisampleInterpolationSupport : 1;
+ bool fSampleVariablesSupport : 1;
+ bool fSampleMaskOverrideCoverageSupport : 1;
+ bool fExternalTextureSupport : 1;
+ bool fTexelFetchSupport : 1;
+
+ // Used for specific driver bug work arounds
+ bool fCanUseMinAndAbsTogether : 1;
+ bool fMustForceNegatedAtanParamToFloat : 1;
+ bool fRequiresLocalOutputColorForFBFetch : 1;
+
+ const char* fVersionDeclString;
+
+ const char* fShaderDerivativeExtensionString;
+ const char* fFragCoordConventionsExtensionString;
+ const char* fSecondaryOutputExtensionString;
+ const char* fExternalTextureExtensionString;
+ const char* fTexelBufferExtensionString;
+ const char* fNoPerspectiveInterpolationExtensionString;
+ const char* fMultisampleInterpolationExtensionString;
+ const char* fSampleVariablesExtensionString;
+
+ const char* fFBFetchColorName;
+ const char* fFBFetchExtensionString;
+
+ int fMaxVertexSamplers;
+ int fMaxGeometrySamplers;
+ int fMaxFragmentSamplers;
+ int fMaxCombinedSamplers;
+
+ AdvBlendEqInteraction fAdvBlendEqInteraction;
+
+ GrSwizzle fConfigTextureSwizzle[kGrPixelConfigCnt];
+ GrSwizzle fConfigOutputSwizzle[kGrPixelConfigCnt];
+
+ uint8_t fSamplerPrecisions[(1 << kGrShaderTypeCount)][kGrPixelConfigCnt];
+
+ friend class GrGLCaps; // For initialization.
+ friend class GrVkCaps;
+
+ typedef GrShaderCaps INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLColorSpaceXformHelper.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLColorSpaceXformHelper.h
new file mode 100644
index 000000000..5e112f9f9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLColorSpaceXformHelper.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLColorSpaceXformHelper_DEFINED
+#define GrGLSLColorSpaceXformHelper_DEFINED
+
+#include "GrColorSpaceXform.h"
+#include "GrGLSLUniformHandler.h"
+
+/**
+ * Stack helper class to assist with using GrColorSpaceXform within an FP's emitCode function.
+ * This injects the uniform declaration, and stores the information needed to generate correct
+ * gamut-transformation shader code.
+ */
+class GrGLSLColorSpaceXformHelper : public SkNoncopyable {
+public:
+ GrGLSLColorSpaceXformHelper(GrGLSLUniformHandler* uniformHandler,
+ GrColorSpaceXform* colorSpaceXform,
+ GrGLSLProgramDataManager::UniformHandle* handle) {
+ SkASSERT(uniformHandler && handle);
+ if (colorSpaceXform) {
+ *handle = uniformHandler->addUniform(kFragment_GrShaderFlag, kMat44f_GrSLType,
+ kDefault_GrSLPrecision, "ColorXform",
+ &fXformMatrix);
+ } else {
+ fXformMatrix = nullptr;
+ }
+ }
+
+ const char* getXformMatrix() const { return fXformMatrix; }
+
+private:
+ const char* fXformMatrix;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentProcessor.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentProcessor.cpp
new file mode 100644
index 000000000..5ae7fee7d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentProcessor.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLSLFragmentProcessor.h"
+#include "GrFragmentProcessor.h"
+#include "GrProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLUniformHandler.h"
+
+void GrGLSLFragmentProcessor::setData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& processor) {
+ this->onSetData(pdman, processor);
+ SkASSERT(fChildProcessors.count() == processor.numChildProcessors());
+ for (int i = 0; i < fChildProcessors.count(); ++i) {
+ fChildProcessors[i]->setData(pdman, processor.childProcessor(i));
+ }
+}
+
+void GrGLSLFragmentProcessor::emitChild(int childIndex, const char* inputColor, EmitArgs& args) {
+ this->internalEmitChild(childIndex, inputColor, args.fOutputColor, args);
+}
+
+void GrGLSLFragmentProcessor::emitChild(int childIndex, const char* inputColor,
+ SkString* outputColor, EmitArgs& args) {
+
+ SkASSERT(outputColor);
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ outputColor->append(fragBuilder->getMangleString());
+ fragBuilder->codeAppendf("vec4 %s;", outputColor->c_str());
+ this->internalEmitChild(childIndex, inputColor, outputColor->c_str(), args);
+}
+
+void GrGLSLFragmentProcessor::internalEmitChild(int childIndex, const char* inputColor,
+ const char* outputColor, EmitArgs& args) {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ fragBuilder->onBeforeChildProcEmitCode(); // call first so mangleString is updated
+
+ const GrFragmentProcessor& childProc = args.fFp.childProcessor(childIndex);
+
+ // emit the code for the child in its own scope
+ fragBuilder->codeAppend("{\n");
+ fragBuilder->codeAppendf("// Child Index %d (mangle: %s): %s\n", childIndex,
+ fragBuilder->getMangleString().c_str(), childProc.name());
+ TransformedCoordVars coordVars = args.fTransformedCoords.childInputs(childIndex);
+ TextureSamplers textureSamplers = args.fTexSamplers.childInputs(childIndex);
+ BufferSamplers bufferSamplers = args.fBufferSamplers.childInputs(childIndex);
+ EmitArgs childArgs(fragBuilder,
+ args.fUniformHandler,
+ args.fGLSLCaps,
+ childProc,
+ outputColor,
+ inputColor,
+ coordVars,
+ textureSamplers,
+ bufferSamplers,
+ args.fGpImplementsDistanceVector);
+ this->childProcessor(childIndex)->emitCode(childArgs);
+ fragBuilder->codeAppend("}\n");
+
+ fragBuilder->onAfterChildProcEmitCode();
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+GrGLSLFragmentProcessor* GrGLSLFragmentProcessor::Iter::next() {
+ if (fFPStack.empty()) {
+ return nullptr;
+ }
+ GrGLSLFragmentProcessor* back = fFPStack.back();
+ fFPStack.pop_back();
+ for (int i = back->numChildProcessors() - 1; i >= 0; --i) {
+ fFPStack.push_back(back->childProcessor(i));
+ }
+ return back;
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentProcessor.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentProcessor.h
new file mode 100644
index 000000000..d2f00f8b3
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentProcessor.h
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLFragmentProcessor_DEFINED
+#define GrGLSLFragmentProcessor_DEFINED
+
+#include "GrFragmentProcessor.h"
+#include "GrShaderVar.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLSampler.h"
+
+class GrProcessor;
+class GrProcessorKeyBuilder;
+class GrGLSLCaps;
+class GrGLSLFPBuilder;
+class GrGLSLFPFragmentBuilder;
+class GrGLSLUniformHandler;
+
+class GrGLSLFragmentProcessor {
+public:
+ GrGLSLFragmentProcessor() {}
+
+ virtual ~GrGLSLFragmentProcessor() {
+ for (int i = 0; i < fChildProcessors.count(); ++i) {
+ delete fChildProcessors[i];
+ }
+ }
+
+ typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;
+ typedef GrGLSLProgramDataManager::UniformHandle SamplerHandle;
+
+private:
+ /**
+ * This class allows the shader builder to provide each GrGLSLFragmentProcesor with an array of
+ * generated variables where each generated variable corresponds to an element of an array on
+ * the GrFragmentProcessor that generated the GLSLFP. For example, this is used to provide a
+ * variable holding transformed coords for each GrCoordTransform owned by the FP.
+ */
+ template <typename T, typename FPBASE, int (FPBASE::*COUNT)() const>
+ class BuilderInputProvider {
+ public:
+ BuilderInputProvider(const GrFragmentProcessor* fp, const T* ts) : fFP(fp) , fTs(ts) {}
+
+ const T& operator[] (int i) const {
+ SkASSERT(i >= 0 && i < (fFP->*COUNT)());
+ return fTs[i];
+ }
+
+ BuilderInputProvider childInputs(int childIdx) const {
+ const GrFragmentProcessor* child = &fFP->childProcessor(childIdx);
+ GrFragmentProcessor::Iter iter(fFP);
+ int numToSkip = 0;
+ while (true) {
+ const GrFragmentProcessor* fp = iter.next();
+ if (fp == child) {
+ return BuilderInputProvider(child, fTs + numToSkip);
+ }
+ numToSkip += (fp->*COUNT)();
+ }
+ }
+
+ private:
+ const GrFragmentProcessor* fFP;
+ const T* fTs;
+ };
+
+public:
+ using TransformedCoordVars = BuilderInputProvider<GrShaderVar, GrFragmentProcessor,
+ &GrFragmentProcessor::numCoordTransforms>;
+ using TextureSamplers = BuilderInputProvider<SamplerHandle, GrProcessor,
+ &GrProcessor::numTextures>;
+ using BufferSamplers = BuilderInputProvider<SamplerHandle, GrProcessor,
+ &GrProcessor::numBuffers>;
+
+ /** Called when the program stage should insert its code into the shaders. The code in each
+ shader will be in its own block ({}) and so locally scoped names will not collide across
+ stages.
+
+ @param fragBuilder Interface used to emit code in the shaders.
+ @param fp The processor that generated this program stage.
+ @param key The key that was computed by GenKey() from the generating
+ GrProcessor.
+ @param outputColor A predefined vec4 in the FS in which the stage should place its
+ output color (or coverage).
+ @param inputColor A vec4 that holds the input color to the stage in the FS. This may
+ be nullptr in which case the implied input is solid white (all
+ ones). TODO: Better system for communicating optimization info
+ (e.g. input color is solid white, trans black, known to be opaque,
+ etc.) that allows the processor to communicate back similar known
+ info about its output.
+ @param transformedCoords Fragment shader variables containing the coords computed using
+ each of the GrFragmentProcessor's GrCoordTransforms.
+ @param texSamplers Contains one entry for each GrTextureAccess of the GrProcessor.
+ These can be passed to the builder to emit texture reads in the
+ generated code.
+ @param bufferSamplers Contains one entry for each GrBufferAccess of the GrProcessor.
+ These can be passed to the builder to emit buffer reads in the
+ generated code.
+ */
+ struct EmitArgs {
+ EmitArgs(GrGLSLFPFragmentBuilder* fragBuilder,
+ GrGLSLUniformHandler* uniformHandler,
+ const GrGLSLCaps* caps,
+ const GrFragmentProcessor& fp,
+ const char* outputColor,
+ const char* inputColor,
+ const TransformedCoordVars& transformedCoordVars,
+ const TextureSamplers& textureSamplers,
+ const BufferSamplers& bufferSamplers,
+ bool gpImplementsDistanceVector)
+ : fFragBuilder(fragBuilder)
+ , fUniformHandler(uniformHandler)
+ , fGLSLCaps(caps)
+ , fFp(fp)
+ , fOutputColor(outputColor)
+ , fInputColor(inputColor)
+ , fTransformedCoords(transformedCoordVars)
+ , fTexSamplers(textureSamplers)
+ , fBufferSamplers(bufferSamplers)
+ , fGpImplementsDistanceVector(gpImplementsDistanceVector) {}
+ GrGLSLFPFragmentBuilder* fFragBuilder;
+ GrGLSLUniformHandler* fUniformHandler;
+ const GrGLSLCaps* fGLSLCaps;
+ const GrFragmentProcessor& fFp;
+ const char* fOutputColor;
+ const char* fInputColor;
+ const TransformedCoordVars& fTransformedCoords;
+ const TextureSamplers& fTexSamplers;
+ const BufferSamplers& fBufferSamplers;
+ bool fGpImplementsDistanceVector;
+ };
+
+ virtual void emitCode(EmitArgs&) = 0;
+
+ void setData(const GrGLSLProgramDataManager& pdman, const GrFragmentProcessor& processor);
+
+ static void GenKey(const GrProcessor&, const GrGLSLCaps&, GrProcessorKeyBuilder*) {}
+
+ int numChildProcessors() const { return fChildProcessors.count(); }
+
+ GrGLSLFragmentProcessor* childProcessor(int index) {
+ return fChildProcessors[index];
+ }
+
+ /** Will emit the code of a child proc in its own scope. Pass in the parent's EmitArgs and
+ * emitChild will automatically extract the coords and samplers of that child and pass them
+ * on to the child's emitCode(). Also, any uniforms or functions emitted by the child will
+ * have their names mangled to prevent redefinitions. The output color name is also mangled
+ * therefore in an in/out param. It will be declared in mangled form by emitChild(). It is
+ * legal to pass nullptr as inputColor, since all fragment processors are required to work
+ * without an input color.
+ */
+ void emitChild(int childIndex, const char* inputColor, SkString* outputColor,
+ EmitArgs& parentArgs);
+
+ /** Variation that uses the parent's output color variable to hold the child's output.*/
+ void emitChild(int childIndex, const char* inputColor, EmitArgs& parentArgs);
+
+ /**
+ * Pre-order traversal of a GLSLFP hierarchy, or of multiple trees with roots in an array of
+ * GLSLFPS. This agrees with the traversal order of GrFragmentProcessor::Iter
+ */
+ class Iter : public SkNoncopyable {
+ public:
+ explicit Iter(GrGLSLFragmentProcessor* fp) { fFPStack.push_back(fp); }
+ explicit Iter(GrGLSLFragmentProcessor* fps[], int cnt) {
+ for (int i = cnt - 1; i >= 0; --i) {
+ fFPStack.push_back(fps[i]);
+ }
+ }
+ GrGLSLFragmentProcessor* next();
+
+ private:
+ SkSTArray<4, GrGLSLFragmentProcessor*, true> fFPStack;
+ };
+
+protected:
+ /** A GrGLSLFragmentProcessor instance can be reused with any GrFragmentProcessor that produces
+ the same stage key; this function reads data from a GrFragmentProcessor and uploads any
+ uniform variables required by the shaders created in emitCode(). The GrFragmentProcessor
+ parameter is guaranteed to be of the same type that created this GrGLSLFragmentProcessor and
+ to have an identical processor key as the one that created this GrGLSLFragmentProcessor. */
+ // TODO update this to pass in GrFragmentProcessor
+ virtual void onSetData(const GrGLSLProgramDataManager&, const GrProcessor&) {}
+
+private:
+ void internalEmitChild(int, const char*, const char*, EmitArgs&);
+
+ SkTArray<GrGLSLFragmentProcessor*, true> fChildProcessors;
+
+ friend class GrFragmentProcessor;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentShaderBuilder.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentShaderBuilder.cpp
new file mode 100644
index 000000000..d35730f63
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentShaderBuilder.cpp
@@ -0,0 +1,397 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLSLFragmentShaderBuilder.h"
+#include "GrRenderTarget.h"
+#include "GrRenderTargetPriv.h"
+#include "gl/GrGLGpu.h"
+#include "glsl/GrGLSL.h"
+#include "glsl/GrGLSLCaps.h"
+#include "glsl/GrGLSLProgramBuilder.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "glsl/GrGLSLVarying.h"
+
+const char* GrGLSLFragmentShaderBuilder::kDstColorName = "_dstColor";
+
+static const char* sample_offset_array_name(GrGLSLFPFragmentBuilder::Coordinates coords) {
+ static const char* kArrayNames[] = {
+ "deviceSpaceSampleOffsets",
+ "windowSpaceSampleOffsets"
+ };
+ return kArrayNames[coords];
+
+ GR_STATIC_ASSERT(0 == GrGLSLFPFragmentBuilder::kSkiaDevice_Coordinates);
+ GR_STATIC_ASSERT(1 == GrGLSLFPFragmentBuilder::kGLSLWindow_Coordinates);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kArrayNames) == GrGLSLFPFragmentBuilder::kLast_Coordinates + 1);
+}
+
+static const char* specific_layout_qualifier_name(GrBlendEquation equation) {
+ SkASSERT(GrBlendEquationIsAdvanced(equation));
+
+ static const char* kLayoutQualifierNames[] = {
+ "blend_support_screen",
+ "blend_support_overlay",
+ "blend_support_darken",
+ "blend_support_lighten",
+ "blend_support_colordodge",
+ "blend_support_colorburn",
+ "blend_support_hardlight",
+ "blend_support_softlight",
+ "blend_support_difference",
+ "blend_support_exclusion",
+ "blend_support_multiply",
+ "blend_support_hsl_hue",
+ "blend_support_hsl_saturation",
+ "blend_support_hsl_color",
+ "blend_support_hsl_luminosity"
+ };
+ return kLayoutQualifierNames[equation - kFirstAdvancedGrBlendEquation];
+
+ GR_STATIC_ASSERT(0 == kScreen_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(1 == kOverlay_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(2 == kDarken_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(3 == kLighten_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(4 == kColorDodge_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(5 == kColorBurn_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(6 == kHardLight_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(7 == kSoftLight_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(8 == kDifference_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(9 == kExclusion_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(10 == kMultiply_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(11 == kHSLHue_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(12 == kHSLSaturation_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(13 == kHSLColor_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(14 == kHSLLuminosity_GrBlendEquation - kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kLayoutQualifierNames) ==
+ kGrBlendEquationCnt - kFirstAdvancedGrBlendEquation);
+}
+
+uint8_t GrGLSLFragmentShaderBuilder::KeyForSurfaceOrigin(GrSurfaceOrigin origin) {
+ SkASSERT(kTopLeft_GrSurfaceOrigin == origin || kBottomLeft_GrSurfaceOrigin == origin);
+ return origin;
+
+ GR_STATIC_ASSERT(1 == kTopLeft_GrSurfaceOrigin);
+ GR_STATIC_ASSERT(2 == kBottomLeft_GrSurfaceOrigin);
+}
+
+GrGLSLFragmentShaderBuilder::GrGLSLFragmentShaderBuilder(GrGLSLProgramBuilder* program)
+ : GrGLSLFragmentBuilder(program)
+ , fSetupFragPosition(false)
+ , fHasCustomColorOutput(false)
+ , fCustomColorOutputIndex(-1)
+ , fHasSecondaryOutput(false)
+ , fUsedSampleOffsetArrays(0)
+ , fHasInitializedSampleMask(false) {
+ fSubstageIndices.push_back(0);
+#ifdef SK_DEBUG
+ fUsedProcessorFeatures = GrProcessor::kNone_RequiredFeatures;
+ fHasReadDstColor = false;
+#endif
+}
+
+bool GrGLSLFragmentShaderBuilder::enableFeature(GLSLFeature feature) {
+ const GrGLSLCaps& glslCaps = *fProgramBuilder->glslCaps();
+ switch (feature) {
+ case kStandardDerivatives_GLSLFeature:
+ if (!glslCaps.shaderDerivativeSupport()) {
+ return false;
+ }
+ if (const char* extension = glslCaps.shaderDerivativeExtensionString()) {
+ this->addFeature(1 << kStandardDerivatives_GLSLFeature, extension);
+ }
+ return true;
+ case kPixelLocalStorage_GLSLFeature:
+ if (glslCaps.pixelLocalStorageSize() <= 0) {
+ return false;
+ }
+ this->addFeature(1 << kPixelLocalStorage_GLSLFeature,
+ "GL_EXT_shader_pixel_local_storage");
+ return true;
+ case kMultisampleInterpolation_GLSLFeature:
+ if (!glslCaps.multisampleInterpolationSupport()) {
+ return false;
+ }
+ if (const char* extension = glslCaps.multisampleInterpolationExtensionString()) {
+ this->addFeature(1 << kMultisampleInterpolation_GLSLFeature, extension);
+ }
+ return true;
+ default:
+ SkFAIL("Unexpected GLSLFeature requested.");
+ return false;
+ }
+}
+
+SkString GrGLSLFragmentShaderBuilder::ensureCoords2D(const GrShaderVar& coords) {
+ if (kVec3f_GrSLType != coords.getType()) {
+ SkASSERT(kVec2f_GrSLType == coords.getType());
+ return coords.getName();
+ }
+
+ SkString coords2D;
+ coords2D.printf("%s_ensure2D", coords.c_str());
+ this->codeAppendf("\tvec2 %s = %s.xy / %s.z;", coords2D.c_str(), coords.c_str(),
+ coords.c_str());
+ return coords2D;
+}
+
+const char* GrGLSLFragmentShaderBuilder::fragmentPosition() {
+ SkDEBUGCODE(fUsedProcessorFeatures |= GrProcessor::kFragmentPosition_RequiredFeature;)
+
+ const GrGLSLCaps* glslCaps = fProgramBuilder->glslCaps();
+ // We only declare "gl_FragCoord" when we're in the case where we want to use layout qualifiers
+ // to reverse y. Otherwise it isn't necessary and whether the "in" qualifier appears in the
+ // declaration varies in earlier GLSL specs. So it is simpler to omit it.
+ if (kTopLeft_GrSurfaceOrigin == this->getSurfaceOrigin()) {
+ fSetupFragPosition = true;
+ return "gl_FragCoord";
+ } else if (const char* extension = glslCaps->fragCoordConventionsExtensionString()) {
+ if (!fSetupFragPosition) {
+ if (glslCaps->generation() < k150_GrGLSLGeneration) {
+ this->addFeature(1 << kFragCoordConventions_GLSLPrivateFeature,
+ extension);
+ }
+ fInputs.push_back().set(kVec4f_GrSLType,
+ GrGLSLShaderVar::kIn_TypeModifier,
+ "gl_FragCoord",
+ kDefault_GrSLPrecision,
+ "origin_upper_left");
+ fSetupFragPosition = true;
+ }
+ return "gl_FragCoord";
+ } else {
+ static const char* kTempName = "tmpXYFragCoord";
+ static const char* kCoordName = "fragCoordYDown";
+ if (!fSetupFragPosition) {
+ const char* rtHeightName;
+
+ fProgramBuilder->addRTHeightUniform("RTHeight", &rtHeightName);
+
+ // The Adreno compiler seems to be very touchy about access to "gl_FragCoord".
+ // Accessing glFragCoord.zw can cause a program to fail to link. Additionally,
+ // depending on the surrounding code, accessing .xy with a uniform involved can
+ // do the same thing. Copying gl_FragCoord.xy into a temp vec2 beforehand
+ // (and only accessing .xy) seems to "fix" things.
+ const char* precision = glslCaps->usesPrecisionModifiers() ? "highp " : "";
+ this->codePrependf("\t%svec4 %s = vec4(%s.x, %s - %s.y, 1.0, 1.0);\n",
+ precision, kCoordName, kTempName, rtHeightName, kTempName);
+ this->codePrependf("%svec2 %s = gl_FragCoord.xy;", precision, kTempName);
+ fSetupFragPosition = true;
+ }
+ SkASSERT(fProgramBuilder->fUniformHandles.fRTHeightUni.isValid());
+ return kCoordName;
+ }
+}
+
+const char* GrGLSLFragmentShaderBuilder::distanceVectorName() const {
+ return "fsDistanceVector";
+}
+
+void GrGLSLFragmentShaderBuilder::appendOffsetToSample(const char* sampleIdx, Coordinates coords) {
+ SkASSERT(fProgramBuilder->header().fSamplePatternKey);
+ SkDEBUGCODE(fUsedProcessorFeatures |= GrProcessor::kSampleLocations_RequiredFeature);
+ if (kTopLeft_GrSurfaceOrigin == this->getSurfaceOrigin()) {
+ // With a top left origin, device and window space are equal, so we only use device coords.
+ coords = kSkiaDevice_Coordinates;
+ }
+ this->codeAppendf("%s[%s]", sample_offset_array_name(coords), sampleIdx);
+ fUsedSampleOffsetArrays |= (1 << coords);
+}
+
+void GrGLSLFragmentShaderBuilder::maskSampleCoverage(const char* mask, bool invert) {
+ const GrGLSLCaps& glslCaps = *fProgramBuilder->glslCaps();
+ if (!glslCaps.sampleVariablesSupport()) {
+ SkDEBUGFAIL("Attempted to mask sample coverage without support.");
+ return;
+ }
+ if (const char* extension = glslCaps.sampleVariablesExtensionString()) {
+ this->addFeature(1 << kSampleVariables_GLSLPrivateFeature, extension);
+ }
+ if (!fHasInitializedSampleMask) {
+ this->codePrependf("gl_SampleMask[0] = -1;");
+ fHasInitializedSampleMask = true;
+ }
+ if (invert) {
+ this->codeAppendf("gl_SampleMask[0] &= ~(%s);", mask);
+ } else {
+ this->codeAppendf("gl_SampleMask[0] &= %s;", mask);
+ }
+}
+
+void GrGLSLFragmentShaderBuilder::overrideSampleCoverage(const char* mask) {
+ const GrGLSLCaps& glslCaps = *fProgramBuilder->glslCaps();
+ if (!glslCaps.sampleMaskOverrideCoverageSupport()) {
+ SkDEBUGFAIL("Attempted to override sample coverage without support.");
+ return;
+ }
+ SkASSERT(glslCaps.sampleVariablesSupport());
+ if (const char* extension = glslCaps.sampleVariablesExtensionString()) {
+ this->addFeature(1 << kSampleVariables_GLSLPrivateFeature, extension);
+ }
+ if (this->addFeature(1 << kSampleMaskOverrideCoverage_GLSLPrivateFeature,
+ "GL_NV_sample_mask_override_coverage")) {
+ // Redeclare gl_SampleMask with layout(override_coverage) if we haven't already.
+ fOutputs.push_back().set(kInt_GrSLType, GrShaderVar::kOut_TypeModifier,
+ "gl_SampleMask", 1, kHigh_GrSLPrecision,
+ "override_coverage");
+ }
+ this->codeAppendf("gl_SampleMask[0] = %s;", mask);
+ fHasInitializedSampleMask = true;
+}
+
+const char* GrGLSLFragmentShaderBuilder::dstColor() {
+ SkDEBUGCODE(fHasReadDstColor = true;)
+
+ const char* override = fProgramBuilder->primitiveProcessor().getDestColorOverride();
+ if (override != nullptr) {
+ return override;
+ }
+
+ const GrGLSLCaps* glslCaps = fProgramBuilder->glslCaps();
+ if (glslCaps->fbFetchSupport()) {
+ this->addFeature(1 << kFramebufferFetch_GLSLPrivateFeature,
+ glslCaps->fbFetchExtensionString());
+
+ // Some versions of this extension string require declaring custom color output on ES 3.0+
+ const char* fbFetchColorName = glslCaps->fbFetchColorName();
+ if (glslCaps->fbFetchNeedsCustomOutput()) {
+ this->enableCustomOutput();
+ fOutputs[fCustomColorOutputIndex].setTypeModifier(GrShaderVar::kInOut_TypeModifier);
+ fbFetchColorName = DeclaredColorOutputName();
+ // Set the dstColor to an intermediate variable so we don't override it with the output
+ this->codeAppendf("vec4 %s = %s;", kDstColorName, fbFetchColorName);
+ } else {
+ return fbFetchColorName;
+ }
+ }
+ return kDstColorName;
+}
+
+void GrGLSLFragmentShaderBuilder::enableAdvancedBlendEquationIfNeeded(GrBlendEquation equation) {
+ SkASSERT(GrBlendEquationIsAdvanced(equation));
+
+ const GrGLSLCaps& caps = *fProgramBuilder->glslCaps();
+ if (!caps.mustEnableAdvBlendEqs()) {
+ return;
+ }
+
+ this->addFeature(1 << kBlendEquationAdvanced_GLSLPrivateFeature,
+ "GL_KHR_blend_equation_advanced");
+ if (caps.mustEnableSpecificAdvBlendEqs()) {
+ this->addLayoutQualifier(specific_layout_qualifier_name(equation), kOut_InterfaceQualifier);
+ } else {
+ this->addLayoutQualifier("blend_support_all_equations", kOut_InterfaceQualifier);
+ }
+}
+
+void GrGLSLFragmentShaderBuilder::enableCustomOutput() {
+ if (!fHasCustomColorOutput) {
+ fHasCustomColorOutput = true;
+ fCustomColorOutputIndex = fOutputs.count();
+ fOutputs.push_back().set(kVec4f_GrSLType,
+ GrGLSLShaderVar::kOut_TypeModifier,
+ DeclaredColorOutputName());
+ fProgramBuilder->finalizeFragmentOutputColor(fOutputs.back());
+ }
+}
+
+void GrGLSLFragmentShaderBuilder::enableSecondaryOutput() {
+ SkASSERT(!fHasSecondaryOutput);
+ fHasSecondaryOutput = true;
+ const GrGLSLCaps& caps = *fProgramBuilder->glslCaps();
+ if (const char* extension = caps.secondaryOutputExtensionString()) {
+ this->addFeature(1 << kBlendFuncExtended_GLSLPrivateFeature, extension);
+ }
+
+ // If the primary output is declared, we must declare also the secondary output
+ // and vice versa, since it is not allowed to use a built-in gl_FragColor and a custom
+ // output. The condition also co-incides with the condition in whici GLES SL 2.0
+ // requires the built-in gl_SecondaryFragColorEXT, where as 3.0 requires a custom output.
+ if (caps.mustDeclareFragmentShaderOutput()) {
+ fOutputs.push_back().set(kVec4f_GrSLType, GrGLSLShaderVar::kOut_TypeModifier,
+ DeclaredSecondaryColorOutputName());
+ fProgramBuilder->finalizeFragmentSecondaryColor(fOutputs.back());
+ }
+}
+
+const char* GrGLSLFragmentShaderBuilder::getPrimaryColorOutputName() const {
+ return fHasCustomColorOutput ? DeclaredColorOutputName() : "gl_FragColor";
+}
+
+void GrGLSLFragmentBuilder::declAppendf(const char* fmt, ...) {
+ va_list argp;
+ va_start(argp, fmt);
+ inputs().appendVAList(fmt, argp);
+ va_end(argp);
+}
+
+const char* GrGLSLFragmentShaderBuilder::getSecondaryColorOutputName() const {
+ const GrGLSLCaps& caps = *fProgramBuilder->glslCaps();
+ return caps.mustDeclareFragmentShaderOutput() ? DeclaredSecondaryColorOutputName()
+ : "gl_SecondaryFragColorEXT";
+}
+
+GrSurfaceOrigin GrGLSLFragmentShaderBuilder::getSurfaceOrigin() const {
+ SkASSERT(fProgramBuilder->header().fSurfaceOriginKey);
+ return static_cast<GrSurfaceOrigin>(fProgramBuilder->header().fSurfaceOriginKey);
+
+ GR_STATIC_ASSERT(1 == kTopLeft_GrSurfaceOrigin);
+ GR_STATIC_ASSERT(2 == kBottomLeft_GrSurfaceOrigin);
+}
+
+void GrGLSLFragmentShaderBuilder::onFinalize() {
+ fProgramBuilder->varyingHandler()->getFragDecls(&this->inputs(), &this->outputs());
+ GrGLSLAppendDefaultFloatPrecisionDeclaration(kDefault_GrSLPrecision,
+ *fProgramBuilder->glslCaps(),
+ &this->precisionQualifier());
+ if (fUsedSampleOffsetArrays & (1 << kSkiaDevice_Coordinates)) {
+ this->defineSampleOffsetArray(sample_offset_array_name(kSkiaDevice_Coordinates),
+ SkMatrix::MakeTrans(-0.5f, -0.5f));
+ }
+ if (fUsedSampleOffsetArrays & (1 << kGLSLWindow_Coordinates)) {
+ // With a top left origin, device and window space are equal, so we only use device coords.
+ SkASSERT(kBottomLeft_GrSurfaceOrigin == this->getSurfaceOrigin());
+ SkMatrix m;
+ m.setScale(1, -1);
+ m.preTranslate(-0.5f, -0.5f);
+ this->defineSampleOffsetArray(sample_offset_array_name(kGLSLWindow_Coordinates), m);
+ }
+}
+
+void GrGLSLFragmentShaderBuilder::defineSampleOffsetArray(const char* name, const SkMatrix& m) {
+ SkASSERT(fProgramBuilder->caps()->sampleLocationsSupport());
+ const GrPipeline& pipeline = fProgramBuilder->pipeline();
+ const GrRenderTargetPriv& rtp = pipeline.getRenderTarget()->renderTargetPriv();
+ const GrGpu::MultisampleSpecs& specs = rtp.getMultisampleSpecs(pipeline.getStencil());
+ SkSTArray<16, SkPoint, true> offsets;
+ offsets.push_back_n(specs.fEffectiveSampleCnt);
+ m.mapPoints(offsets.begin(), specs.fSampleLocations, specs.fEffectiveSampleCnt);
+ this->definitions().append("const ");
+ if (fProgramBuilder->glslCaps()->usesPrecisionModifiers()) {
+ this->definitions().append("highp ");
+ }
+ this->definitions().appendf("vec2 %s[] = vec2[](", name);
+ for (int i = 0; i < specs.fEffectiveSampleCnt; ++i) {
+ this->definitions().appendf("vec2(%f, %f)", offsets[i].x(), offsets[i].y());
+ this->definitions().append(i + 1 != specs.fEffectiveSampleCnt ? ", " : ");\n");
+ }
+}
+
+void GrGLSLFragmentShaderBuilder::onBeforeChildProcEmitCode() {
+ SkASSERT(fSubstageIndices.count() >= 1);
+ fSubstageIndices.push_back(0);
+ // second-to-last value in the fSubstageIndices stack is the index of the child proc
+ // at that level which is currently emitting code.
+ fMangleString.appendf("_c%d", fSubstageIndices[fSubstageIndices.count() - 2]);
+}
+
+void GrGLSLFragmentShaderBuilder::onAfterChildProcEmitCode() {
+ SkASSERT(fSubstageIndices.count() >= 2);
+ fSubstageIndices.pop_back();
+ fSubstageIndices.back()++;
+ int removeAt = fMangleString.findLastOf('_');
+ fMangleString.remove(removeAt, fMangleString.size() - removeAt);
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentShaderBuilder.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentShaderBuilder.h
new file mode 100644
index 000000000..ecb6d455d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLFragmentShaderBuilder.h
@@ -0,0 +1,255 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLFragmentShaderBuilder_DEFINED
+#define GrGLSLFragmentShaderBuilder_DEFINED
+
+#include "GrGLSLShaderBuilder.h"
+
+#include "GrProcessor.h"
+
+class GrRenderTarget;
+class GrGLSLVarying;
+
+/*
+ * This base class encapsulates the common functionality which all processors use to build fragment
+ * shaders.
+ */
+class GrGLSLFragmentBuilder : public GrGLSLShaderBuilder {
+public:
+ GrGLSLFragmentBuilder(GrGLSLProgramBuilder* program) : INHERITED(program) {}
+ virtual ~GrGLSLFragmentBuilder() {}
+
+ /**
+ * Use of these features may require a GLSL extension to be enabled. Shaders may not compile
+ * if code is added that uses one of these features without calling enableFeature()
+ */
+ enum GLSLFeature {
+ kStandardDerivatives_GLSLFeature = kLastGLSLPrivateFeature + 1,
+ kPixelLocalStorage_GLSLFeature,
+ kMultisampleInterpolation_GLSLFeature
+ };
+
+ /**
+ * If the feature is supported then true is returned and any necessary #extension declarations
+ * are added to the shaders. If the feature is not supported then false will be returned.
+ */
+ virtual bool enableFeature(GLSLFeature) = 0;
+
+ /**
+ * This returns a variable name to access the 2D, perspective correct version of the coords in
+ * the fragment shader. The passed in coordinates must either be of type kVec2f or kVec3f. If
+ * the coordinates are 3-dimensional, it a perspective divide into is emitted into the
+ * fragment shader (xy / z) to convert them to 2D.
+ */
+ virtual SkString ensureCoords2D(const GrShaderVar&) = 0;
+
+
+ /** Returns a variable name that represents the position of the fragment in the FS. The position
+ is in device space (e.g. 0,0 is the top left and pixel centers are at half-integers). */
+ virtual const char* fragmentPosition() = 0;
+
+ // TODO: remove this method.
+ void declAppendf(const char* fmt, ...);
+
+private:
+ typedef GrGLSLShaderBuilder INHERITED;
+};
+
+/*
+ * This class is used by fragment processors to build their fragment code.
+ */
+class GrGLSLFPFragmentBuilder : virtual public GrGLSLFragmentBuilder {
+public:
+ /** Appease the compiler; the derived class initializes GrGLSLFragmentBuilder. */
+ GrGLSLFPFragmentBuilder() : GrGLSLFragmentBuilder(nullptr) {}
+
+ enum Coordinates {
+ kSkiaDevice_Coordinates,
+ kGLSLWindow_Coordinates,
+
+ kLast_Coordinates = kGLSLWindow_Coordinates
+ };
+
+ /**
+ * Appends the offset from the center of the pixel to a specified sample.
+ *
+ * @param sampleIdx GLSL expression of the sample index.
+ * @param Coordinates Coordinate space in which to emit the offset.
+ *
+ * A processor must call setWillUseSampleLocations in its constructor before using this method.
+ */
+ virtual void appendOffsetToSample(const char* sampleIdx, Coordinates) = 0;
+
+ /**
+ * Subtracts sample coverage from the fragment. Any sample whose corresponding bit is not found
+ * in the mask will not be written out to the framebuffer.
+ *
+ * @param mask int that contains the sample mask. Bit N corresponds to the Nth sample.
+ * @param invert perform a bit-wise NOT on the provided mask before applying it?
+ *
+ * Requires GLSL support for sample variables.
+ */
+ virtual void maskSampleCoverage(const char* mask, bool invert = false) = 0;
+
+ /** Returns a variable name that represents a vector to the nearest edge of the shape, in source
+ space coordinates. */
+ virtual const char* distanceVectorName() const = 0;
+
+ /**
+ * Fragment procs with child procs should call these functions before/after calling emitCode
+ * on a child proc.
+ */
+ virtual void onBeforeChildProcEmitCode() = 0;
+ virtual void onAfterChildProcEmitCode() = 0;
+
+ virtual const SkString& getMangleString() const = 0;
+};
+
+/*
+ * This class is used by primitive processors to build their fragment code.
+ */
+class GrGLSLPPFragmentBuilder : public GrGLSLFPFragmentBuilder {
+public:
+ /** Appease the compiler; the derived class initializes GrGLSLFragmentBuilder. */
+ GrGLSLPPFragmentBuilder() : GrGLSLFragmentBuilder(nullptr) {}
+
+ /**
+ * Overrides the fragment's sample coverage. The provided mask determines which samples will now
+ * be written out to the framebuffer. Note that this mask can be reduced by a future call to
+ * maskSampleCoverage.
+ *
+ * If a primitive processor uses this method, it must guarantee that every codepath through the
+ * shader overrides the sample mask at some point.
+ *
+ * @param mask int that contains the new coverage mask. Bit N corresponds to the Nth sample.
+ *
+ * Requires NV_sample_mask_override_coverage.
+ */
+ virtual void overrideSampleCoverage(const char* mask) = 0;
+};
+
+/*
+ * This class is used by Xfer processors to build their fragment code.
+ */
+class GrGLSLXPFragmentBuilder : virtual public GrGLSLFragmentBuilder {
+public:
+ /** Appease the compiler; the derived class initializes GrGLSLFragmentBuilder. */
+ GrGLSLXPFragmentBuilder() : GrGLSLFragmentBuilder(nullptr) {}
+
+ virtual bool hasCustomColorOutput() const = 0;
+ virtual bool hasSecondaryOutput() const = 0;
+
+ /** Returns the variable name that holds the color of the destination pixel. This may be nullptr
+ * if no effect advertised that it will read the destination. */
+ virtual const char* dstColor() = 0;
+
+ /** Adds any necessary layout qualifiers in order to legalize the supplied blend equation with
+ this shader. It is only legal to call this method with an advanced blend equation, and only
+ if these equations are supported. */
+ virtual void enableAdvancedBlendEquationIfNeeded(GrBlendEquation) = 0;
+};
+
+/*
+ * This class implements the various fragment builder interfaces.
+ */
+class GrGLSLFragmentShaderBuilder : public GrGLSLPPFragmentBuilder, public GrGLSLXPFragmentBuilder {
+public:
+ /** Returns a nonzero key for a surface's origin. This should only be called if a processor will
+ use the fragment position and/or sample locations. */
+ static uint8_t KeyForSurfaceOrigin(GrSurfaceOrigin);
+
+ GrGLSLFragmentShaderBuilder(GrGLSLProgramBuilder* program);
+
+ // Shared GrGLSLFragmentBuilder interface.
+ bool enableFeature(GLSLFeature) override;
+ virtual SkString ensureCoords2D(const GrShaderVar&) override;
+ const char* fragmentPosition() override;
+ const char* distanceVectorName() const override;
+
+ // GrGLSLFPFragmentBuilder interface.
+ void appendOffsetToSample(const char* sampleIdx, Coordinates) override;
+ void maskSampleCoverage(const char* mask, bool invert = false) override;
+ void overrideSampleCoverage(const char* mask) override;
+ const SkString& getMangleString() const override { return fMangleString; }
+ void onBeforeChildProcEmitCode() override;
+ void onAfterChildProcEmitCode() override;
+
+ // GrGLSLXPFragmentBuilder interface.
+ bool hasCustomColorOutput() const override { return fHasCustomColorOutput; }
+ bool hasSecondaryOutput() const override { return fHasSecondaryOutput; }
+ const char* dstColor() override;
+ void enableAdvancedBlendEquationIfNeeded(GrBlendEquation) override;
+
+private:
+ // Private public interface, used by GrGLProgramBuilder to build a fragment shader
+ void enableCustomOutput();
+ void enableSecondaryOutput();
+ const char* getPrimaryColorOutputName() const;
+ const char* getSecondaryColorOutputName() const;
+
+#ifdef SK_DEBUG
+ // As GLSLProcessors emit code, there are some conditions we need to verify. We use the below
+ // state to track this. The reset call is called per processor emitted.
+ GrProcessor::RequiredFeatures usedProcessorFeatures() const { return fUsedProcessorFeatures; }
+ bool hasReadDstColor() const { return fHasReadDstColor; }
+ void resetVerification() {
+ fUsedProcessorFeatures = GrProcessor::kNone_RequiredFeatures;
+ fHasReadDstColor = false;
+ }
+#endif
+
+ static const char* DeclaredColorOutputName() { return "fsColorOut"; }
+ static const char* DeclaredSecondaryColorOutputName() { return "fsSecondaryColorOut"; }
+
+ GrSurfaceOrigin getSurfaceOrigin() const;
+
+ void onFinalize() override;
+ void defineSampleOffsetArray(const char* name, const SkMatrix&);
+
+ static const char* kDstColorName;
+
+ /*
+ * State that tracks which child proc in the proc tree is currently emitting code. This is
+ * used to update the fMangleString, which is used to mangle the names of uniforms and functions
+ * emitted by the proc. fSubstageIndices is a stack: its count indicates how many levels deep
+ * we are in the tree, and its second-to-last value is the index of the child proc at that
+ * level which is currently emitting code. For example, if fSubstageIndices = [3, 1, 2, 0], that
+ * means we're currently emitting code for the base proc's 3rd child's 1st child's 2nd child.
+ */
+ SkTArray<int> fSubstageIndices;
+
+ /*
+ * The mangle string is used to mangle the names of uniforms/functions emitted by the child
+ * procs so no duplicate uniforms/functions appear in the generated shader program. The mangle
+ * string is simply based on fSubstageIndices. For example, if fSubstageIndices = [3, 1, 2, 0],
+ * then the manglestring will be "_c3_c1_c2", and any uniform/function emitted by that proc will
+ * have "_c3_c1_c2" appended to its name, which can be interpreted as "base proc's 3rd child's
+ * 1st child's 2nd child".
+ */
+ SkString fMangleString;
+
+ bool fSetupFragPosition;
+ bool fHasCustomColorOutput;
+ int fCustomColorOutputIndex;
+ bool fHasSecondaryOutput;
+ uint8_t fUsedSampleOffsetArrays;
+ bool fHasInitializedSampleMask;
+ SkString fDistanceVectorOutput;
+
+#ifdef SK_DEBUG
+ // some state to verify shaders and effects are consistent, this is reset between effects by
+ // the program creator
+ GrProcessor::RequiredFeatures fUsedProcessorFeatures;
+ bool fHasReadDstColor;
+#endif
+
+ friend class GrGLSLProgramBuilder;
+ friend class GrGLProgramBuilder;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryProcessor.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryProcessor.cpp
new file mode 100644
index 000000000..0d5ed93ed
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryProcessor.cpp
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLSLGeometryProcessor.h"
+
+#include "GrCoordTransform.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "glsl/GrGLSLVarying.h"
+#include "glsl/GrGLSLVertexShaderBuilder.h"
+
+void GrGLSLGeometryProcessor::emitCode(EmitArgs& args) {
+ GrGLSLVertexBuilder* vBuilder = args.fVertBuilder;
+ GrGPArgs gpArgs;
+ this->onEmitCode(args, &gpArgs);
+ vBuilder->transformToNormalizedDeviceSpace(gpArgs.fPositionVar);
+ if (kVec2f_GrSLType == gpArgs.fPositionVar.getType()) {
+ args.fVaryingHandler->setNoPerspective();
+ }
+}
+
+void GrGLSLGeometryProcessor::emitTransforms(GrGLSLVertexBuilder* vb,
+ GrGLSLVaryingHandler* varyingHandler,
+ GrGLSLUniformHandler* uniformHandler,
+ const GrShaderVar& posVar,
+ const char* localCoords,
+ const SkMatrix& localMatrix,
+ FPCoordTransformHandler* handler) {
+ int i = 0;
+ while (const GrCoordTransform* coordTransform = handler->nextCoordTransform()) {
+ SkString strUniName;
+ strUniName.printf("CoordTransformMatrix_%d", i);
+ GrSLType varyingType;
+
+ uint32_t type = coordTransform->getMatrix().getType();
+ type |= localMatrix.getType();
+
+ varyingType = SkToBool(SkMatrix::kPerspective_Mask & type) ? kVec3f_GrSLType :
+ kVec2f_GrSLType;
+ GrSLPrecision precision = coordTransform->precision();
+
+ const char* uniName;
+
+
+ fInstalledTransforms.push_back().fHandle = uniformHandler->addUniform(kVertex_GrShaderFlag,
+ kMat33f_GrSLType,
+ precision,
+ strUniName.c_str(),
+ &uniName).toIndex();
+ SkString strVaryingName;
+ strVaryingName.printf("TransformedCoords_%d", i);
+
+ GrGLSLVertToFrag v(varyingType);
+ varyingHandler->addVarying(strVaryingName.c_str(), &v, precision);
+
+ SkASSERT(kVec2f_GrSLType == varyingType || kVec3f_GrSLType == varyingType);
+ handler->specifyCoordsForCurrCoordTransform(SkString(v.fsIn()), varyingType);
+
+ if (kVec2f_GrSLType == varyingType) {
+ vb->codeAppendf("%s = (%s * vec3(%s, 1)).xy;", v.vsOut(), uniName, localCoords);
+ } else {
+ vb->codeAppendf("%s = %s * vec3(%s, 1);", v.vsOut(), uniName, localCoords);
+ }
+ ++i;
+ }
+}
+
+void GrGLSLGeometryProcessor::setTransformDataHelper(const SkMatrix& localMatrix,
+ const GrGLSLProgramDataManager& pdman,
+ FPCoordTransformIter* transformIter) {
+ int i = 0;
+ while (const GrCoordTransform* coordTransform = transformIter->next()) {
+ const SkMatrix& m = GetTransformMatrix(localMatrix, *coordTransform);
+ if (!fInstalledTransforms[i].fCurrentValue.cheapEqualTo(m)) {
+ pdman.setSkMatrix(fInstalledTransforms[i].fHandle.toIndex(), m);
+ fInstalledTransforms[i].fCurrentValue = m;
+ }
+ ++i;
+ }
+ SkASSERT(i == fInstalledTransforms.count());
+}
+
+void GrGLSLGeometryProcessor::setupPosition(GrGLSLVertexBuilder* vertBuilder,
+ GrGPArgs* gpArgs,
+ const char* posName) {
+ gpArgs->fPositionVar.set(kVec2f_GrSLType, "pos2");
+ vertBuilder->codeAppendf("vec2 %s = %s;", gpArgs->fPositionVar.c_str(), posName);
+}
+
+void GrGLSLGeometryProcessor::setupPosition(GrGLSLVertexBuilder* vertBuilder,
+ GrGLSLUniformHandler* uniformHandler,
+ GrGPArgs* gpArgs,
+ const char* posName,
+ const SkMatrix& mat,
+ UniformHandle* viewMatrixUniform) {
+ if (mat.isIdentity()) {
+ gpArgs->fPositionVar.set(kVec2f_GrSLType, "pos2");
+ vertBuilder->codeAppendf("vec2 %s = %s;", gpArgs->fPositionVar.c_str(), posName);
+ } else {
+ const char* viewMatrixName;
+ *viewMatrixUniform = uniformHandler->addUniform(kVertex_GrShaderFlag,
+ kMat33f_GrSLType, kHigh_GrSLPrecision,
+ "uViewM",
+ &viewMatrixName);
+ if (!mat.hasPerspective()) {
+ gpArgs->fPositionVar.set(kVec2f_GrSLType, "pos2");
+ vertBuilder->codeAppendf("vec2 %s = vec2(%s * vec3(%s, 1));",
+ gpArgs->fPositionVar.c_str(), viewMatrixName, posName);
+ } else {
+ gpArgs->fPositionVar.set(kVec3f_GrSLType, "pos3");
+ vertBuilder->codeAppendf("vec3 %s = %s * vec3(%s, 1);",
+ gpArgs->fPositionVar.c_str(), viewMatrixName, posName);
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryProcessor.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryProcessor.h
new file mode 100644
index 000000000..6777620a8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryProcessor.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLGeometryProcessor_DEFINED
+#define GrGLSLGeometryProcessor_DEFINED
+
+#include "GrGLSLPrimitiveProcessor.h"
+
+class GrGLSLGPBuilder;
+
+/**
+ * If a GL effect needs a GrGLFullShaderBuilder* object to emit vertex code, then it must inherit
+ * from this class. Since paths don't have vertices, this class is only meant to be used internally
+ * by skia, for special cases.
+ */
+class GrGLSLGeometryProcessor : public GrGLSLPrimitiveProcessor {
+public:
+ /* Any general emit code goes in the base class emitCode. Subclasses override onEmitCode */
+ void emitCode(EmitArgs&) override;
+
+protected:
+ // A helper which subclasses can use if needed and used above in the default setTransformData().
+ void setTransformDataHelper(const SkMatrix& localMatrix,
+ const GrGLSLProgramDataManager& pdman,
+ FPCoordTransformIter*);
+
+ // Emit a uniform matrix for each coord transform.
+ void emitTransforms(GrGLSLVertexBuilder* vb,
+ GrGLSLVaryingHandler* varyingHandler,
+ GrGLSLUniformHandler* uniformHandler,
+ const GrShaderVar& posVar,
+ const char* localCoords,
+ FPCoordTransformHandler* handler) {
+ this->emitTransforms(vb, varyingHandler, uniformHandler,
+ posVar, localCoords, SkMatrix::I(), handler);
+ }
+
+ // Emit pre-transformed coords as a vertex attribute per coord-transform.
+ void emitTransforms(GrGLSLVertexBuilder*,
+ GrGLSLVaryingHandler*,
+ GrGLSLUniformHandler*,
+ const GrShaderVar& posVar,
+ const char* localCoords,
+ const SkMatrix& localMatrix,
+ FPCoordTransformHandler*);
+
+ struct GrGPArgs {
+ // The variable used by a GP to store its position. It can be
+ // either a vec2 or a vec3 depending on the presence of perspective.
+ GrShaderVar fPositionVar;
+ };
+
+ // Create the correct type of position variable given the CTM
+ void setupPosition(GrGLSLVertexBuilder*, GrGPArgs*, const char* posName);
+ void setupPosition(GrGLSLVertexBuilder*,
+ GrGLSLUniformHandler* uniformHandler,
+ GrGPArgs*,
+ const char* posName,
+ const SkMatrix& mat,
+ UniformHandle* viewMatrixUniform);
+
+ static uint32_t ComputePosKey(const SkMatrix& mat) {
+ if (mat.isIdentity()) {
+ return 0x0;
+ } else if (!mat.hasPerspective()) {
+ return 0x01;
+ } else {
+ return 0x02;
+ }
+ }
+
+private:
+ virtual void onEmitCode(EmitArgs&, GrGPArgs*) = 0;
+
+ struct TransformUniform {
+ UniformHandle fHandle;
+ SkMatrix fCurrentValue = SkMatrix::InvalidMatrix();
+ };
+
+ SkTArray<TransformUniform, true> fInstalledTransforms;
+
+ typedef GrGLSLPrimitiveProcessor INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryShaderBuilder.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryShaderBuilder.cpp
new file mode 100644
index 000000000..eddd69f7c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryShaderBuilder.cpp
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLSLGeometryShaderBuilder.h"
+#include "GrGLSLProgramBuilder.h"
+#include "GrGLSLVarying.h"
+
+GrGLSLGeometryBuilder::GrGLSLGeometryBuilder(GrGLSLProgramBuilder* program)
+ : INHERITED(program) {
+
+}
+
+void GrGLSLGeometryBuilder::onFinalize() {
+ fProgramBuilder->varyingHandler()->getGeomDecls(&this->inputs(), &this->outputs());
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryShaderBuilder.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryShaderBuilder.h
new file mode 100644
index 000000000..f5e09f11a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLGeometryShaderBuilder.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLGeometryShaderBuilder_DEFINED
+#define GrGLSLGeometryShaderBuilder_DEFINED
+
+#include "GrGLSLShaderBuilder.h"
+
+class GrGLSLVarying;
+
+class GrGLSLGeometryBuilder : public GrGLSLShaderBuilder {
+public:
+ GrGLSLGeometryBuilder(GrGLSLProgramBuilder* program);
+
+private:
+ void onFinalize() override;
+
+ friend class GrGLProgramBuilder;
+
+ typedef GrGLSLShaderBuilder INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLPLSPathRendering.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLPLSPathRendering.h
new file mode 100644
index 000000000..60889e98d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLPLSPathRendering.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#define GR_GL_PLS_DSTCOLOR_NAME "pls.dstColor"
+#define GR_GL_PLS_PATH_DATA_DECL "__pixel_localEXT PLSData {\n"\
+ " layout(rgba8i) ivec4 windings;\n"\
+ " layout(rgba8) vec4 dstColor;\n"\
+ "} pls;\n"
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLPrimitiveProcessor.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLPrimitiveProcessor.cpp
new file mode 100644
index 000000000..24f21ffe7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLPrimitiveProcessor.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLSLPrimitiveProcessor.h"
+
+#include "GrCoordTransform.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "glsl/GrGLSLVertexShaderBuilder.h"
+
+SkMatrix GrGLSLPrimitiveProcessor::GetTransformMatrix(const SkMatrix& localMatrix,
+ const GrCoordTransform& coordTransform) {
+ SkMatrix combined;
+ combined.setConcat(coordTransform.getMatrix(), localMatrix);
+ if (coordTransform.reverseY()) {
+ // combined.postScale(1,-1);
+ // combined.postTranslate(0,1);
+ combined.set(SkMatrix::kMSkewY,
+ combined[SkMatrix::kMPersp0] - combined[SkMatrix::kMSkewY]);
+ combined.set(SkMatrix::kMScaleY,
+ combined[SkMatrix::kMPersp1] - combined[SkMatrix::kMScaleY]);
+ combined.set(SkMatrix::kMTransY,
+ combined[SkMatrix::kMPersp2] - combined[SkMatrix::kMTransY]);
+ }
+ return combined;
+}
+
+void GrGLSLPrimitiveProcessor::setupUniformColor(GrGLSLPPFragmentBuilder* fragBuilder,
+ GrGLSLUniformHandler* uniformHandler,
+ const char* outputName,
+ UniformHandle* colorUniform) {
+ SkASSERT(colorUniform);
+ const char* stagedLocalVarName;
+ *colorUniform = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec4f_GrSLType,
+ kDefault_GrSLPrecision,
+ "Color",
+ &stagedLocalVarName);
+ fragBuilder->codeAppendf("%s = %s;", outputName, stagedLocalVarName);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+const GrCoordTransform* GrGLSLPrimitiveProcessor::FPCoordTransformHandler::nextCoordTransform() {
+#ifdef SK_DEBUG
+ SkASSERT(nullptr == fCurr || fAddedCoord);
+ fAddedCoord = false;
+ fCurr = fIter.next();
+ return fCurr;
+#else
+ return fIter.next();
+#endif
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLPrimitiveProcessor.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLPrimitiveProcessor.h
new file mode 100644
index 000000000..d270fa18f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLPrimitiveProcessor.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLPrimitiveProcessor_DEFINED
+#define GrGLSLPrimitiveProcessor_DEFINED
+
+#include "GrFragmentProcessor.h"
+#include "GrPrimitiveProcessor.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLSampler.h"
+
+class GrBatchTracker;
+class GrPrimitiveProcessor;
+class GrGLSLCaps;
+class GrGLSLPPFragmentBuilder;
+class GrGLSLGPBuilder;
+class GrGLSLUniformHandler;
+class GrGLSLVaryingHandler;
+class GrGLSLVertexBuilder;
+
+class GrGLSLPrimitiveProcessor {
+public:
+ using FPCoordTransformIter = GrFragmentProcessor::CoordTransformIter;
+
+ virtual ~GrGLSLPrimitiveProcessor() {}
+
+ typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;
+ typedef GrGLSLProgramDataManager::UniformHandle SamplerHandle;
+
+ /**
+ * This class provides access to the GrCoordTransforms across all GrFragmentProcessors in a
+ * GrPipeline. It is also used by the primitive processor to specify the fragment shader
+ * variable that will hold the transformed coords for each GrCoordTransform. It is required that
+ * the primitive processor iterate over each coord transform and insert a shader var result for
+ * each. The GrGLSLFragmentProcessors will reference these variables in their fragment code.
+ */
+ class FPCoordTransformHandler : public SkNoncopyable {
+ public:
+ FPCoordTransformHandler(const GrPipeline& pipeline,
+ SkTArray<GrShaderVar>* transformedCoordVars)
+ : fIter(pipeline)
+ , fTransformedCoordVars(transformedCoordVars) {}
+
+ ~FPCoordTransformHandler() { SkASSERT(!this->nextCoordTransform());}
+
+ const GrCoordTransform* nextCoordTransform();
+
+ // 'args' are constructor params to GrShaderVar.
+ template<typename... Args>
+ void specifyCoordsForCurrCoordTransform(Args&&... args) {
+ SkASSERT(!fAddedCoord);
+ fTransformedCoordVars->emplace_back(std::forward<Args>(args)...);
+ SkDEBUGCODE(fAddedCoord = true;)
+ }
+
+ private:
+ GrFragmentProcessor::CoordTransformIter fIter;
+ SkDEBUGCODE(bool fAddedCoord = false;)
+ SkDEBUGCODE(const GrCoordTransform* fCurr = nullptr;)
+ SkTArray<GrShaderVar>* fTransformedCoordVars;
+ };
+
+ struct EmitArgs {
+ EmitArgs(GrGLSLVertexBuilder* vertBuilder,
+ GrGLSLPPFragmentBuilder* fragBuilder,
+ GrGLSLVaryingHandler* varyingHandler,
+ GrGLSLUniformHandler* uniformHandler,
+ const GrGLSLCaps* caps,
+ const GrPrimitiveProcessor& gp,
+ const char* outputColor,
+ const char* outputCoverage,
+ const char* distanceVectorName,
+ const SamplerHandle* texSamplers,
+ const SamplerHandle* bufferSamplers,
+ FPCoordTransformHandler* transformHandler)
+ : fVertBuilder(vertBuilder)
+ , fFragBuilder(fragBuilder)
+ , fVaryingHandler(varyingHandler)
+ , fUniformHandler(uniformHandler)
+ , fGLSLCaps(caps)
+ , fGP(gp)
+ , fOutputColor(outputColor)
+ , fOutputCoverage(outputCoverage)
+ , fDistanceVectorName(distanceVectorName)
+ , fTexSamplers(texSamplers)
+ , fBufferSamplers(bufferSamplers)
+ , fFPCoordTransformHandler(transformHandler) {}
+ GrGLSLVertexBuilder* fVertBuilder;
+ GrGLSLPPFragmentBuilder* fFragBuilder;
+ GrGLSLVaryingHandler* fVaryingHandler;
+ GrGLSLUniformHandler* fUniformHandler;
+ const GrGLSLCaps* fGLSLCaps;
+ const GrPrimitiveProcessor& fGP;
+ const char* fOutputColor;
+ const char* fOutputCoverage;
+ const char* fDistanceVectorName;
+ const SamplerHandle* fTexSamplers;
+ const SamplerHandle* fBufferSamplers;
+ FPCoordTransformHandler* fFPCoordTransformHandler;
+ };
+
+ /**
+ * This is similar to emitCode() in the base class, except it takes a full shader builder.
+ * This allows the effect subclass to emit vertex code.
+ */
+ virtual void emitCode(EmitArgs&) = 0;
+
+ /**
+ * A GrGLSLPrimitiveProcessor instance can be reused with any GrGLSLPrimitiveProcessor that
+ * produces the same stage key; this function reads data from a GrGLSLPrimitiveProcessor and
+ * uploads any uniform variables required by the shaders created in emitCode(). The
+ * GrPrimitiveProcessor parameter is guaranteed to be of the same type and to have an
+ * identical processor key as the GrPrimitiveProcessor that created this
+ * GrGLSLPrimitiveProcessor.
+ * The subclass may use the transform iterator to perform any setup required for the particular
+ * set of fp transform matrices, such as uploading via uniforms. The iterator will iterate over
+ * the transforms in the same order as the TransformHandler passed to emitCode.
+ */
+ virtual void setData(const GrGLSLProgramDataManager&, const GrPrimitiveProcessor&,
+ FPCoordTransformIter&&) = 0;
+
+ static SkMatrix GetTransformMatrix(const SkMatrix& localMatrix, const GrCoordTransform&);
+
+protected:
+ void setupUniformColor(GrGLSLPPFragmentBuilder* fragBuilder,
+ GrGLSLUniformHandler* uniformHandler,
+ const char* outputName,
+ UniformHandle* colorUniform);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramBuilder.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramBuilder.cpp
new file mode 100644
index 000000000..abfeafda0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramBuilder.cpp
@@ -0,0 +1,427 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "glsl/GrGLSLProgramBuilder.h"
+
+#include "GrPipeline.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLGeometryProcessor.h"
+#include "glsl/GrGLSLVarying.h"
+#include "glsl/GrGLSLXferProcessor.h"
+
+const int GrGLSLProgramBuilder::kVarsPerBlock = 8;
+
+GrGLSLProgramBuilder::GrGLSLProgramBuilder(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ const GrProgramDesc& desc)
+ : fVS(this)
+ , fGS(this)
+ , fFS(this)
+ , fStageIndex(-1)
+ , fPipeline(pipeline)
+ , fPrimProc(primProc)
+ , fDesc(desc)
+ , fGeometryProcessor(nullptr)
+ , fXferProcessor(nullptr)
+ , fNumVertexSamplers(0)
+ , fNumGeometrySamplers(0)
+ , fNumFragmentSamplers(0) {
+}
+
+void GrGLSLProgramBuilder::addFeature(GrShaderFlags shaders,
+ uint32_t featureBit,
+ const char* extensionName) {
+ if (shaders & kVertex_GrShaderFlag) {
+ fVS.addFeature(featureBit, extensionName);
+ }
+ if (shaders & kGeometry_GrShaderFlag) {
+ SkASSERT(this->glslCaps()->geometryShaderSupport());
+ fGS.addFeature(featureBit, extensionName);
+ }
+ if (shaders & kFragment_GrShaderFlag) {
+ fFS.addFeature(featureBit, extensionName);
+ }
+}
+
+bool GrGLSLProgramBuilder::emitAndInstallProcs(GrGLSLExpr4* inputColor,
+ GrGLSLExpr4* inputCoverage) {
+ // First we loop over all of the installed processors and collect coord transforms. These will
+ // be sent to the GrGLSLPrimitiveProcessor in its emitCode function
+ const GrPrimitiveProcessor& primProc = this->primitiveProcessor();
+
+ this->emitAndInstallPrimProc(primProc, inputColor, inputCoverage);
+
+ this->emitAndInstallFragProcs(inputColor, inputCoverage);
+ if (primProc.getPixelLocalStorageState() !=
+ GrPixelLocalStorageState::kDraw_GrPixelLocalStorageState) {
+ this->emitAndInstallXferProc(this->pipeline().getXferProcessor(), *inputColor,
+ *inputCoverage, this->pipeline().ignoresCoverage(),
+ primProc.getPixelLocalStorageState());
+ this->emitFSOutputSwizzle(this->pipeline().getXferProcessor().hasSecondaryOutput());
+ }
+
+ return this->checkSamplerCounts();
+}
+
+void GrGLSLProgramBuilder::emitAndInstallPrimProc(const GrPrimitiveProcessor& proc,
+ GrGLSLExpr4* outputColor,
+ GrGLSLExpr4* outputCoverage) {
+ // Program builders have a bit of state we need to clear with each effect
+ AutoStageAdvance adv(this);
+ this->nameExpression(outputColor, "outputColor");
+ this->nameExpression(outputCoverage, "outputCoverage");
+
+ const char* distanceVectorName = nullptr;
+ if (this->fPipeline.usesDistanceVectorField() && proc.implementsDistanceVector()) {
+ // Each individual user (FP) of the distance vector must be able to handle having this
+ // variable be undeclared. There is no single default value that will yield a reasonable
+ // result for all users.
+ distanceVectorName = fFS.distanceVectorName();
+ fFS.codeAppend( "// Normalized vector to the closest geometric edge (in device space)\n");
+ fFS.codeAppend( "// Distance to the edge encoded in the z-component\n");
+ fFS.codeAppendf("vec4 %s;", distanceVectorName);
+ }
+
+ // Enclose custom code in a block to avoid namespace conflicts
+ SkString openBrace;
+ openBrace.printf("{ // Stage %d, %s\n", fStageIndex, proc.name());
+ fFS.codeAppend(openBrace.c_str());
+ fVS.codeAppendf("// Primitive Processor %s\n", proc.name());
+
+ SkASSERT(!fGeometryProcessor);
+ fGeometryProcessor = proc.createGLSLInstance(*this->glslCaps());
+
+ SkSTArray<4, SamplerHandle> texSamplers(proc.numTextures());
+ SkSTArray<2, SamplerHandle> bufferSamplers(proc.numBuffers());
+ this->emitSamplers(proc, &texSamplers, &bufferSamplers);
+
+ GrGLSLPrimitiveProcessor::FPCoordTransformHandler transformHandler(fPipeline,
+ &fTransformedCoordVars);
+ GrGLSLGeometryProcessor::EmitArgs args(&fVS,
+ &fFS,
+ this->varyingHandler(),
+ this->uniformHandler(),
+ this->glslCaps(),
+ proc,
+ outputColor->c_str(),
+ outputCoverage->c_str(),
+ distanceVectorName,
+ texSamplers.begin(),
+ bufferSamplers.begin(),
+ &transformHandler);
+ fGeometryProcessor->emitCode(args);
+
+ // We have to check that effects and the code they emit are consistent, ie if an effect
+ // asks for dst color, then the emit code needs to follow suit
+ SkDEBUGCODE(verify(proc);)
+
+ fFS.codeAppend("}");
+}
+
+void GrGLSLProgramBuilder::emitAndInstallFragProcs(GrGLSLExpr4* color, GrGLSLExpr4* coverage) {
+ int transformedCoordVarsIdx = 0;
+ GrGLSLExpr4** inOut = &color;
+ for (int i = 0; i < this->pipeline().numFragmentProcessors(); ++i) {
+ if (i == this->pipeline().numColorFragmentProcessors()) {
+ inOut = &coverage;
+ }
+ GrGLSLExpr4 output;
+ const GrFragmentProcessor& fp = this->pipeline().getFragmentProcessor(i);
+ this->emitAndInstallFragProc(fp, i, transformedCoordVarsIdx, **inOut, &output);
+ GrFragmentProcessor::Iter iter(&fp);
+ while (const GrFragmentProcessor* fp = iter.next()) {
+ transformedCoordVarsIdx += fp->numCoordTransforms();
+ }
+ **inOut = output;
+ }
+}
+
+// TODO Processors cannot output zeros because an empty string is all 1s
+// the fix is to allow effects to take the GrGLSLExpr4 directly
+void GrGLSLProgramBuilder::emitAndInstallFragProc(const GrFragmentProcessor& fp,
+ int index,
+ int transformedCoordVarsIdx,
+ const GrGLSLExpr4& input,
+ GrGLSLExpr4* output) {
+ // Program builders have a bit of state we need to clear with each effect
+ AutoStageAdvance adv(this);
+ this->nameExpression(output, "output");
+
+ // Enclose custom code in a block to avoid namespace conflicts
+ SkString openBrace;
+ openBrace.printf("{ // Stage %d, %s\n", fStageIndex, fp.name());
+ fFS.codeAppend(openBrace.c_str());
+
+ GrGLSLFragmentProcessor* fragProc = fp.createGLSLInstance();
+
+ SkSTArray<4, SamplerHandle> textureSamplerArray(fp.numTextures());
+ SkSTArray<2, SamplerHandle> bufferSamplerArray(fp.numBuffers());
+ GrFragmentProcessor::Iter iter(&fp);
+ while (const GrFragmentProcessor* subFP = iter.next()) {
+ this->emitSamplers(*subFP, &textureSamplerArray, &bufferSamplerArray);
+ }
+
+ const GrShaderVar* coordVars = fTransformedCoordVars.begin() + transformedCoordVarsIdx;
+ GrGLSLFragmentProcessor::TransformedCoordVars coords(&fp, coordVars);
+ GrGLSLFragmentProcessor::TextureSamplers textureSamplers(&fp, textureSamplerArray.begin());
+ GrGLSLFragmentProcessor::BufferSamplers bufferSamplers(&fp, bufferSamplerArray.begin());
+ GrGLSLFragmentProcessor::EmitArgs args(&fFS,
+ this->uniformHandler(),
+ this->glslCaps(),
+ fp,
+ output->c_str(),
+ input.isOnes() ? nullptr : input.c_str(),
+ coords,
+ textureSamplers,
+ bufferSamplers,
+ this->primitiveProcessor().implementsDistanceVector());
+
+ fragProc->emitCode(args);
+
+ // We have to check that effects and the code they emit are consistent, ie if an effect
+ // asks for dst color, then the emit code needs to follow suit
+ SkDEBUGCODE(verify(fp);)
+ fFragmentProcessors.push_back(fragProc);
+
+ fFS.codeAppend("}");
+}
+
+void GrGLSLProgramBuilder::emitAndInstallXferProc(const GrXferProcessor& xp,
+ const GrGLSLExpr4& colorIn,
+ const GrGLSLExpr4& coverageIn,
+ bool ignoresCoverage,
+ GrPixelLocalStorageState plsState) {
+ // Program builders have a bit of state we need to clear with each effect
+ AutoStageAdvance adv(this);
+
+ SkASSERT(!fXferProcessor);
+ fXferProcessor = xp.createGLSLInstance();
+
+ // Enable dual source secondary output if we have one
+ if (xp.hasSecondaryOutput()) {
+ fFS.enableSecondaryOutput();
+ }
+
+ if (this->glslCaps()->mustDeclareFragmentShaderOutput()) {
+ fFS.enableCustomOutput();
+ }
+
+ SkString openBrace;
+ openBrace.printf("{ // Xfer Processor: %s\n", xp.name());
+ fFS.codeAppend(openBrace.c_str());
+
+ SkSTArray<4, SamplerHandle> texSamplers(xp.numTextures());
+ SkSTArray<2, SamplerHandle> bufferSamplers(xp.numBuffers());
+ this->emitSamplers(xp, &texSamplers, &bufferSamplers);
+
+ bool usePLSDstRead = (plsState == GrPixelLocalStorageState::kFinish_GrPixelLocalStorageState);
+ GrGLSLXferProcessor::EmitArgs args(&fFS,
+ this->uniformHandler(),
+ this->glslCaps(),
+ xp, colorIn.c_str(),
+ ignoresCoverage ? nullptr : coverageIn.c_str(),
+ fFS.getPrimaryColorOutputName(),
+ fFS.getSecondaryColorOutputName(),
+ texSamplers.begin(),
+ bufferSamplers.begin(),
+ usePLSDstRead);
+ fXferProcessor->emitCode(args);
+
+ // We have to check that effects and the code they emit are consistent, ie if an effect
+ // asks for dst color, then the emit code needs to follow suit
+ SkDEBUGCODE(verify(xp);)
+ fFS.codeAppend("}");
+}
+
+void GrGLSLProgramBuilder::emitSamplers(const GrProcessor& processor,
+ SkTArray<SamplerHandle>* outTexSamplers,
+ SkTArray<SamplerHandle>* outBufferSamplers) {
+ SkString name;
+ int numTextures = processor.numTextures();
+ for (int t = 0; t < numTextures; ++t) {
+ const GrTextureAccess& access = processor.textureAccess(t);
+ GrSLType samplerType = access.getTexture()->samplerType();
+ if (kTextureExternalSampler_GrSLType == samplerType) {
+ const char* externalFeatureString = this->glslCaps()->externalTextureExtensionString();
+ // We shouldn't ever create a GrGLTexture that requires external sampler type
+ SkASSERT(externalFeatureString);
+ this->addFeature(access.getVisibility(),
+ 1 << GrGLSLShaderBuilder::kExternalTexture_GLSLPrivateFeature,
+ externalFeatureString);
+ }
+ name.printf("TextureSampler_%d", outTexSamplers->count());
+ this->emitSampler(samplerType, access.getTexture()->config(),
+ name.c_str(), access.getVisibility(), outTexSamplers);
+ }
+
+ if (int numBuffers = processor.numBuffers()) {
+ SkASSERT(this->glslCaps()->texelBufferSupport());
+ GrShaderFlags texelBufferVisibility = kNone_GrShaderFlags;
+
+ for (int b = 0; b < numBuffers; ++b) {
+ const GrBufferAccess& access = processor.bufferAccess(b);
+ name.printf("BufferSampler_%d", outBufferSamplers->count());
+ this->emitSampler(kTextureBufferSampler_GrSLType, access.texelConfig(), name.c_str(),
+ access.visibility(), outBufferSamplers);
+ texelBufferVisibility |= access.visibility();
+ }
+
+ if (const char* extension = this->glslCaps()->texelBufferExtensionString()) {
+ this->addFeature(texelBufferVisibility,
+ 1 << GrGLSLShaderBuilder::kTexelBuffer_GLSLPrivateFeature,
+ extension);
+ }
+ }
+}
+
+void GrGLSLProgramBuilder::emitSampler(GrSLType samplerType,
+ GrPixelConfig config,
+ const char* name,
+ GrShaderFlags visibility,
+ SkTArray<SamplerHandle>* outSamplers) {
+ if (visibility & kVertex_GrShaderFlag) {
+ ++fNumVertexSamplers;
+ }
+ if (visibility & kGeometry_GrShaderFlag) {
+ SkASSERT(this->primitiveProcessor().willUseGeoShader());
+ ++fNumGeometrySamplers;
+ }
+ if (visibility & kFragment_GrShaderFlag) {
+ ++fNumFragmentSamplers;
+ }
+ GrSLPrecision precision = this->glslCaps()->samplerPrecision(config, visibility);
+ SamplerHandle handle = this->uniformHandler()->addSampler(visibility,
+ config,
+ samplerType,
+ precision,
+ name);
+ outSamplers->emplace_back(handle);
+}
+
+void GrGLSLProgramBuilder::emitFSOutputSwizzle(bool hasSecondaryOutput) {
+ // Swizzle the fragment shader outputs if necessary.
+ GrSwizzle swizzle;
+ swizzle.setFromKey(this->desc().header().fOutputSwizzle);
+ if (swizzle != GrSwizzle::RGBA()) {
+ fFS.codeAppendf("%s = %s.%s;", fFS.getPrimaryColorOutputName(),
+ fFS.getPrimaryColorOutputName(),
+ swizzle.c_str());
+ if (hasSecondaryOutput) {
+ fFS.codeAppendf("%s = %s.%s;", fFS.getSecondaryColorOutputName(),
+ fFS.getSecondaryColorOutputName(),
+ swizzle.c_str());
+ }
+ }
+}
+
+bool GrGLSLProgramBuilder::checkSamplerCounts() {
+ const GrGLSLCaps& glslCaps = *this->glslCaps();
+ if (fNumVertexSamplers > glslCaps.maxVertexSamplers()) {
+ GrCapsDebugf(this->caps(), "Program would use too many vertex samplers\n");
+ return false;
+ }
+ if (fNumGeometrySamplers > glslCaps.maxGeometrySamplers()) {
+ GrCapsDebugf(this->caps(), "Program would use too many geometry samplers\n");
+ return false;
+ }
+ if (fNumFragmentSamplers > glslCaps.maxFragmentSamplers()) {
+ GrCapsDebugf(this->caps(), "Program would use too many fragment samplers\n");
+ return false;
+ }
+ // If the same sampler is used in two different shaders, it counts as two combined samplers.
+ int numCombinedSamplers = fNumVertexSamplers + fNumGeometrySamplers + fNumFragmentSamplers;
+ if (numCombinedSamplers > glslCaps.maxCombinedSamplers()) {
+ GrCapsDebugf(this->caps(), "Program would use too many combined samplers\n");
+ return false;
+ }
+ return true;
+}
+
+#ifdef SK_DEBUG
+void GrGLSLProgramBuilder::verify(const GrPrimitiveProcessor& gp) {
+ SkASSERT(fFS.usedProcessorFeatures() == gp.requiredFeatures());
+}
+
+void GrGLSLProgramBuilder::verify(const GrXferProcessor& xp) {
+ SkASSERT(fFS.usedProcessorFeatures() == xp.requiredFeatures());
+ SkASSERT(fFS.hasReadDstColor() == xp.willReadDstColor());
+}
+
+void GrGLSLProgramBuilder::verify(const GrFragmentProcessor& fp) {
+ SkASSERT(fFS.usedProcessorFeatures() == fp.requiredFeatures());
+}
+#endif
+
+void GrGLSLProgramBuilder::nameVariable(SkString* out, char prefix, const char* name, bool mangle) {
+ if ('\0' == prefix) {
+ *out = name;
+ } else {
+ out->printf("%c%s", prefix, name);
+ }
+ if (mangle) {
+ if (out->endsWith('_')) {
+ // Names containing "__" are reserved.
+ out->append("x");
+ }
+ out->appendf("_Stage%d%s", fStageIndex, fFS.getMangleString().c_str());
+ }
+}
+
+void GrGLSLProgramBuilder::nameExpression(GrGLSLExpr4* output, const char* baseName) {
+ // create var to hold stage result. If we already have a valid output name, just use that
+ // otherwise create a new mangled one. This name is only valid if we are reordering stages
+ // and have to tell stage exactly where to put its output.
+ SkString outName;
+ if (output->isValid()) {
+ outName = output->c_str();
+ } else {
+ this->nameVariable(&outName, '\0', baseName);
+ }
+ fFS.codeAppendf("vec4 %s;", outName.c_str());
+ *output = outName;
+}
+
+void GrGLSLProgramBuilder::appendUniformDecls(GrShaderFlags visibility, SkString* out) const {
+ this->uniformHandler()->appendUniformDecls(visibility, out);
+}
+
+const GrGLSLSampler& GrGLSLProgramBuilder::getSampler(SamplerHandle handle) const {
+ return this->uniformHandler()->getSampler(handle);
+}
+
+void GrGLSLProgramBuilder::addRTAdjustmentUniform(GrSLPrecision precision,
+ const char* name,
+ const char** outName) {
+ SkASSERT(!fUniformHandles.fRTAdjustmentUni.isValid());
+ fUniformHandles.fRTAdjustmentUni =
+ this->uniformHandler()->addUniform(kVertex_GrShaderFlag,
+ kVec4f_GrSLType,
+ precision,
+ name,
+ outName);
+}
+
+void GrGLSLProgramBuilder::addRTHeightUniform(const char* name, const char** outName) {
+ SkASSERT(!fUniformHandles.fRTHeightUni.isValid());
+ GrGLSLUniformHandler* uniformHandler = this->uniformHandler();
+ fUniformHandles.fRTHeightUni =
+ uniformHandler->internalAddUniformArray(kFragment_GrShaderFlag,
+ kFloat_GrSLType, kDefault_GrSLPrecision,
+ name, false, 0, outName);
+}
+
+void GrGLSLProgramBuilder::cleanupFragmentProcessors() {
+ for (int i = 0; i < fFragmentProcessors.count(); ++i) {
+ delete fFragmentProcessors[i];
+ }
+}
+
+void GrGLSLProgramBuilder::finalizeShaders() {
+ this->varyingHandler()->finalize();
+ fVS.finalize(kVertex_GrShaderFlag);
+ fFS.finalize(kFragment_GrShaderFlag);
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramBuilder.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramBuilder.h
new file mode 100644
index 000000000..8a8cff55a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramBuilder.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLProgramBuilder_DEFINED
+#define GrGLSLProgramBuilder_DEFINED
+
+#include "GrGeometryProcessor.h"
+#include "GrGpu.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLGeometryShaderBuilder.h"
+#include "glsl/GrGLSLPrimitiveProcessor.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "glsl/GrGLSLSampler.h"
+#include "glsl/GrGLSLVertexShaderBuilder.h"
+#include "glsl/GrGLSLXferProcessor.h"
+
+class GrGLSLCaps;
+class GrGLSLShaderVar;
+class GrGLSLVaryingHandler;
+
+typedef SkSTArray<8, GrGLSLFragmentProcessor*, true> GrGLSLFragProcs;
+
+class GrGLSLProgramBuilder {
+public:
+ typedef GrGLSLUniformHandler::UniformHandle UniformHandle;
+
+ virtual ~GrGLSLProgramBuilder() {}
+
+ virtual const GrCaps* caps() const = 0;
+ virtual const GrGLSLCaps* glslCaps() const = 0;
+
+ const GrPrimitiveProcessor& primitiveProcessor() const { return fPrimProc; }
+ const GrPipeline& pipeline() const { return fPipeline; }
+ const GrProgramDesc& desc() const { return fDesc; }
+ const GrProgramDesc::KeyHeader& header() const { return fDesc.header(); }
+
+ void appendUniformDecls(GrShaderFlags visibility, SkString*) const;
+
+ typedef GrGLSLUniformHandler::SamplerHandle SamplerHandle;
+
+ const GrGLSLSampler& getSampler(SamplerHandle handle) const;
+
+ // Handles for program uniforms (other than per-effect uniforms)
+ struct BuiltinUniformHandles {
+ UniformHandle fRTAdjustmentUni;
+
+ // We use the render target height to provide a y-down frag coord when specifying
+ // origin_upper_left is not supported.
+ UniformHandle fRTHeightUni;
+ };
+
+ // Used to add a uniform in the vertex shader for transforming into normalized device space.
+ void addRTAdjustmentUniform(GrSLPrecision precision, const char* name, const char** outName);
+ const char* rtAdjustment() const { return "rtAdjustment"; }
+
+ // Used to add a uniform for the RenderTarget height (used for frag position) without mangling
+ // the name of the uniform inside of a stage.
+ void addRTHeightUniform(const char* name, const char** outName);
+
+ // Generates a name for a variable. The generated string will be name prefixed by the prefix
+ // char (unless the prefix is '\0'). It also will mangle the name to be stage-specific unless
+ // explicitly asked not to.
+ void nameVariable(SkString* out, char prefix, const char* name, bool mangle = true);
+
+ virtual GrGLSLUniformHandler* uniformHandler() = 0;
+ virtual const GrGLSLUniformHandler* uniformHandler() const = 0;
+ virtual GrGLSLVaryingHandler* varyingHandler() = 0;
+
+ // Used for backend customization of the output color and secondary color variables from the
+ // fragment processor. Only used if the outputs are explicitly declared in the shaders
+ virtual void finalizeFragmentOutputColor(GrGLSLShaderVar& outputColor) {}
+ virtual void finalizeFragmentSecondaryColor(GrGLSLShaderVar& outputColor) {}
+
+ // number of each input/output type in a single allocation block, used by many builders
+ static const int kVarsPerBlock;
+
+ GrGLSLVertexBuilder fVS;
+ GrGLSLGeometryBuilder fGS;
+ GrGLSLFragmentShaderBuilder fFS;
+
+ int fStageIndex;
+
+ const GrPipeline& fPipeline;
+ const GrPrimitiveProcessor& fPrimProc;
+ const GrProgramDesc& fDesc;
+
+ BuiltinUniformHandles fUniformHandles;
+
+ GrGLSLPrimitiveProcessor* fGeometryProcessor;
+ GrGLSLXferProcessor* fXferProcessor;
+ GrGLSLFragProcs fFragmentProcessors;
+
+protected:
+ explicit GrGLSLProgramBuilder(const GrPipeline&,
+ const GrPrimitiveProcessor&,
+ const GrProgramDesc&);
+
+ void addFeature(GrShaderFlags shaders, uint32_t featureBit, const char* extensionName);
+
+ bool emitAndInstallProcs(GrGLSLExpr4* inputColor, GrGLSLExpr4* inputCoverage);
+
+ void cleanupFragmentProcessors();
+
+ void finalizeShaders();
+
+private:
+ // reset is called by program creator between each processor's emit code. It increments the
+ // stage offset for variable name mangling, and also ensures verfication variables in the
+ // fragment shader are cleared.
+ void reset() {
+ this->addStage();
+ SkDEBUGCODE(fFS.resetVerification();)
+ }
+ void addStage() { fStageIndex++; }
+
+ class AutoStageAdvance {
+ public:
+ AutoStageAdvance(GrGLSLProgramBuilder* pb)
+ : fPB(pb) {
+ fPB->reset();
+ // Each output to the fragment processor gets its own code section
+ fPB->fFS.nextStage();
+ }
+ ~AutoStageAdvance() {}
+ private:
+ GrGLSLProgramBuilder* fPB;
+ };
+
+ // Generates a possibly mangled name for a stage variable and writes it to the fragment shader.
+ // If GrGLSLExpr4 has a valid name then it will use that instead
+ void nameExpression(GrGLSLExpr4*, const char* baseName);
+
+ void emitAndInstallPrimProc(const GrPrimitiveProcessor&,
+ GrGLSLExpr4* outputColor,
+ GrGLSLExpr4* outputCoverage);
+ void emitAndInstallFragProcs(GrGLSLExpr4* colorInOut, GrGLSLExpr4* coverageInOut);
+ void emitAndInstallFragProc(const GrFragmentProcessor&,
+ int index,
+ int transformedCoordVarsIdx,
+ const GrGLSLExpr4& input,
+ GrGLSLExpr4* output);
+ void emitAndInstallXferProc(const GrXferProcessor&,
+ const GrGLSLExpr4& colorIn,
+ const GrGLSLExpr4& coverageIn,
+ bool ignoresCoverage,
+ GrPixelLocalStorageState plsState);
+
+ void emitSamplers(const GrProcessor& processor,
+ SkTArray<SamplerHandle>* outTexSamplers,
+ SkTArray<SamplerHandle>* outBufferSamplers);
+ void emitSampler(GrSLType samplerType,
+ GrPixelConfig,
+ const char* name,
+ GrShaderFlags visibility,
+ SkTArray<SamplerHandle>* outSamplers);
+ void emitFSOutputSwizzle(bool hasSecondaryOutput);
+ bool checkSamplerCounts();
+
+#ifdef SK_DEBUG
+ void verify(const GrPrimitiveProcessor&);
+ void verify(const GrXferProcessor&);
+ void verify(const GrFragmentProcessor&);
+#endif
+
+ int fNumVertexSamplers;
+ int fNumGeometrySamplers;
+ int fNumFragmentSamplers;
+ SkSTArray<4, GrShaderVar> fTransformedCoordVars;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramDataManager.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramDataManager.cpp
new file mode 100644
index 000000000..0803f7af6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramDataManager.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "glsl/GrGLSLProgramDataManager.h"
+
+#include "SkMatrix.h"
+#include "SkMatrix44.h"
+
+void GrGLSLProgramDataManager::setSkMatrix(UniformHandle u, const SkMatrix& matrix) const {
+ float mt[] = {
+ matrix.get(SkMatrix::kMScaleX),
+ matrix.get(SkMatrix::kMSkewY),
+ matrix.get(SkMatrix::kMPersp0),
+ matrix.get(SkMatrix::kMSkewX),
+ matrix.get(SkMatrix::kMScaleY),
+ matrix.get(SkMatrix::kMPersp1),
+ matrix.get(SkMatrix::kMTransX),
+ matrix.get(SkMatrix::kMTransY),
+ matrix.get(SkMatrix::kMPersp2),
+ };
+ this->setMatrix3f(u, mt);
+}
+
+void GrGLSLProgramDataManager::setSkMatrix44(UniformHandle u, const SkMatrix44& matrix) const {
+ // TODO: We could skip this temporary buffer if we had direct access to the matrix storage
+ float m[16];
+ matrix.asColMajorf(m);
+ this->setMatrix4f(u, m);
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramDataManager.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramDataManager.h
new file mode 100644
index 000000000..8d58fc8b9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLProgramDataManager.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLProgramDataManager_DEFINED
+#define GrGLSLProgramDataManager_DEFINED
+
+#include "GrResourceHandle.h"
+#include "SkTypes.h"
+
+class SkMatrix;
+class SkMatrix44;
+
+/** Manages the resources used by a shader program.
+ * The resources are objects the program uses to communicate with the
+ * application code.
+ */
+class GrGLSLProgramDataManager : SkNoncopyable {
+public:
+ GR_DEFINE_RESOURCE_HANDLE_CLASS(UniformHandle);
+
+ virtual ~GrGLSLProgramDataManager() {}
+
+ /** Functions for uploading uniform values. The varities ending in v can be used to upload to an
+ * array of uniforms. arrayCount must be <= the array count of the uniform.
+ */
+ virtual void set1i(UniformHandle, int32_t) const = 0;
+ virtual void set1iv(UniformHandle, int arrayCount, const int v[]) const = 0;
+ virtual void set1f(UniformHandle, float v0) const = 0;
+ virtual void set1fv(UniformHandle, int arrayCount, const float v[]) const = 0;
+ virtual void set2f(UniformHandle, float, float) const = 0;
+ virtual void set2fv(UniformHandle, int arrayCount, const float v[]) const = 0;
+ virtual void set3f(UniformHandle, float, float, float) const = 0;
+ virtual void set3fv(UniformHandle, int arrayCount, const float v[]) const = 0;
+ virtual void set4f(UniformHandle, float, float, float, float) const = 0;
+ virtual void set4fv(UniformHandle, int arrayCount, const float v[]) const = 0;
+ // matrices are column-major, the first three upload a single matrix, the latter three upload
+ // arrayCount matrices into a uniform array.
+ virtual void setMatrix2f(UniformHandle, const float matrix[]) const = 0;
+ virtual void setMatrix3f(UniformHandle, const float matrix[]) const = 0;
+ virtual void setMatrix4f(UniformHandle, const float matrix[]) const = 0;
+ virtual void setMatrix2fv(UniformHandle, int arrayCount, const float matrices[]) const = 0;
+ virtual void setMatrix3fv(UniformHandle, int arrayCount, const float matrices[]) const = 0;
+ virtual void setMatrix4fv(UniformHandle, int arrayCount, const float matrices[]) const = 0;
+
+ // convenience method for uploading a SkMatrix to a 3x3 matrix uniform
+ void setSkMatrix(UniformHandle, const SkMatrix&) const;
+
+ // convenience method for uploading a SkMatrix44 to a 4x4 matrix uniform
+ void setSkMatrix44(UniformHandle, const SkMatrix44&) const;
+
+ // for nvpr only
+ GR_DEFINE_RESOURCE_HANDLE_CLASS(VaryingHandle);
+ virtual void setPathFragmentInputTransform(VaryingHandle u, int components,
+ const SkMatrix& matrix) const = 0;
+
+protected:
+ GrGLSLProgramDataManager() {}
+
+private:
+ typedef SkNoncopyable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLSampler.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLSampler.h
new file mode 100644
index 000000000..bc9076929
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLSampler.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLSampler_DEFINED
+#define GrGLSLSampler_DEFINED
+
+#include "GrTypes.h"
+#include "GrTypesPriv.h"
+#include "SkString.h"
+
+class GrGLSLSampler {
+public:
+ virtual ~GrGLSLSampler() {}
+
+ explicit GrGLSLSampler(uint32_t visibility, GrPixelConfig config)
+ : fVisibility(visibility)
+ , fConfig(config) {
+ SkASSERT(kUnknown_GrPixelConfig != fConfig);
+ }
+
+ uint32_t visibility() const { return fVisibility; }
+ GrPixelConfig config() const { return fConfig; }
+ virtual GrSLType type() const = 0;
+
+ // Returns the string to be used for the sampler in glsl 2D texture functions (texture,
+ // texture2D, etc.)
+ const char* getSamplerNameForTexture2D() const {
+ SkASSERT(GrSLTypeIs2DCombinedSamplerType(this->type()));
+ return this->onGetSamplerNameForTexture2D();
+ }
+
+ // Returns the string to be used for the sampler in glsl texelFetch.
+ virtual const char* getSamplerNameForTexelFetch() const = 0;
+
+private:
+ virtual const char* onGetSamplerNameForTexture2D() const = 0;
+ uint32_t fVisibility;
+ GrPixelConfig fConfig;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLShaderBuilder.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLShaderBuilder.cpp
new file mode 100644
index 000000000..6d77bdbf9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLShaderBuilder.cpp
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrSwizzle.h"
+#include "glsl/GrGLSLShaderBuilder.h"
+#include "glsl/GrGLSLCaps.h"
+#include "glsl/GrGLSLColorSpaceXformHelper.h"
+#include "glsl/GrGLSLShaderVar.h"
+#include "glsl/GrGLSLSampler.h"
+#include "glsl/GrGLSLProgramBuilder.h"
+
+GrGLSLShaderBuilder::GrGLSLShaderBuilder(GrGLSLProgramBuilder* program)
+ : fProgramBuilder(program)
+ , fInputs(GrGLSLProgramBuilder::kVarsPerBlock)
+ , fOutputs(GrGLSLProgramBuilder::kVarsPerBlock)
+ , fFeaturesAddedMask(0)
+ , fCodeIndex(kCode)
+ , fFinalized(false) {
+ // We push back some dummy pointers which will later become our header
+ for (int i = 0; i <= kCode; i++) {
+ fShaderStrings.push_back();
+ fCompilerStrings.push_back(nullptr);
+ fCompilerStringLengths.push_back(0);
+ }
+
+ this->main() = "void main() {";
+}
+
+void GrGLSLShaderBuilder::declAppend(const GrGLSLShaderVar& var) {
+ SkString tempDecl;
+ var.appendDecl(fProgramBuilder->glslCaps(), &tempDecl);
+ this->codeAppendf("%s;", tempDecl.c_str());
+}
+
+void GrGLSLShaderBuilder::appendPrecisionModifier(GrSLPrecision precision) {
+ if (fProgramBuilder->glslCaps()->usesPrecisionModifiers()) {
+ this->codeAppendf("%s ", GrGLSLPrecisionString(precision));
+ }
+}
+
+void GrGLSLShaderBuilder::emitFunction(GrSLType returnType,
+ const char* name,
+ int argCnt,
+ const GrGLSLShaderVar* args,
+ const char* body,
+ SkString* outName) {
+ this->functions().append(GrGLSLTypeString(returnType));
+ fProgramBuilder->nameVariable(outName, '\0', name);
+ this->functions().appendf(" %s", outName->c_str());
+ this->functions().append("(");
+ for (int i = 0; i < argCnt; ++i) {
+ args[i].appendDecl(fProgramBuilder->glslCaps(), &this->functions());
+ if (i < argCnt - 1) {
+ this->functions().append(", ");
+ }
+ }
+ this->functions().append(") {\n");
+ this->functions().append(body);
+ this->functions().append("}\n\n");
+}
+
+void GrGLSLShaderBuilder::appendTextureLookup(SkString* out,
+ SamplerHandle samplerHandle,
+ const char* coordName,
+ GrSLType varyingType) const {
+ const GrGLSLCaps* glslCaps = fProgramBuilder->glslCaps();
+ const GrGLSLSampler& sampler = fProgramBuilder->getSampler(samplerHandle);
+ GrSLType samplerType = sampler.type();
+ if (samplerType == kTexture2DRectSampler_GrSLType) {
+ if (varyingType == kVec2f_GrSLType) {
+ out->appendf("%s(%s, textureSize(%s) * %s)",
+ GrGLSLTexture2DFunctionName(varyingType, samplerType,
+ glslCaps->generation()),
+ sampler.getSamplerNameForTexture2D(),
+ sampler.getSamplerNameForTexture2D(),
+ coordName);
+ } else {
+ out->appendf("%s(%s, vec3(textureSize(%s) * %s.xy, %s.z))",
+ GrGLSLTexture2DFunctionName(varyingType, samplerType,
+ glslCaps->generation()),
+ sampler.getSamplerNameForTexture2D(),
+ sampler.getSamplerNameForTexture2D(),
+ coordName,
+ coordName);
+ }
+ } else {
+ out->appendf("%s(%s, %s)",
+ GrGLSLTexture2DFunctionName(varyingType, samplerType, glslCaps->generation()),
+ sampler.getSamplerNameForTexture2D(),
+ coordName);
+ }
+
+ this->appendTextureSwizzle(out, sampler.config());
+}
+
+void GrGLSLShaderBuilder::appendTextureLookup(SamplerHandle samplerHandle,
+ const char* coordName,
+ GrSLType varyingType,
+ GrGLSLColorSpaceXformHelper* colorXformHelper) {
+ if (colorXformHelper && colorXformHelper->getXformMatrix()) {
+ // With a color gamut transform, we need to wrap the lookup in another function call
+ SkString lookup;
+ this->appendTextureLookup(&lookup, samplerHandle, coordName, varyingType);
+ this->appendColorGamutXform(lookup.c_str(), colorXformHelper);
+ } else {
+ this->appendTextureLookup(&this->code(), samplerHandle, coordName, varyingType);
+ }
+}
+
+void GrGLSLShaderBuilder::appendTextureLookupAndModulate(
+ const char* modulation,
+ SamplerHandle samplerHandle,
+ const char* coordName,
+ GrSLType varyingType,
+ GrGLSLColorSpaceXformHelper* colorXformHelper) {
+ SkString lookup;
+ this->appendTextureLookup(&lookup, samplerHandle, coordName, varyingType);
+ if (colorXformHelper && colorXformHelper->getXformMatrix()) {
+ SkString xform;
+ this->appendColorGamutXform(&xform, lookup.c_str(), colorXformHelper);
+ this->codeAppend((GrGLSLExpr4(modulation) * GrGLSLExpr4(xform)).c_str());
+ } else {
+ this->codeAppend((GrGLSLExpr4(modulation) * GrGLSLExpr4(lookup)).c_str());
+ }
+}
+
+void GrGLSLShaderBuilder::appendColorGamutXform(SkString* out,
+ const char* srcColor,
+ GrGLSLColorSpaceXformHelper* colorXformHelper) {
+ // Our color is (r, g, b, a), but we want to multiply (r, g, b, 1) by our matrix, then
+ // re-insert the original alpha. The supplied srcColor is likely to be of the form
+ // "texture(...)", and we don't want to evaluate that twice, so wrap everything in a function.
+ static const GrGLSLShaderVar gColorGamutXformArgs[] = {
+ GrGLSLShaderVar("color", kVec4f_GrSLType),
+ GrGLSLShaderVar("xform", kMat44f_GrSLType),
+ };
+ SkString functionBody;
+ // Gamut xform, clamp to destination gamut
+ functionBody.append("\tcolor.rgb = clamp((xform * vec4(color.rgb, 1.0)).rgb, 0.0, 1.0);\n");
+ functionBody.append("\treturn color;");
+ SkString colorGamutXformFuncName;
+ this->emitFunction(kVec4f_GrSLType,
+ "colorGamutXform",
+ SK_ARRAY_COUNT(gColorGamutXformArgs),
+ gColorGamutXformArgs,
+ functionBody.c_str(),
+ &colorGamutXformFuncName);
+
+ out->appendf("%s(%s, %s)", colorGamutXformFuncName.c_str(), srcColor,
+ colorXformHelper->getXformMatrix());
+}
+
+void GrGLSLShaderBuilder::appendColorGamutXform(const char* srcColor,
+ GrGLSLColorSpaceXformHelper* colorXformHelper) {
+ SkString xform;
+ this->appendColorGamutXform(&xform, srcColor, colorXformHelper);
+ this->codeAppend(xform.c_str());
+}
+
+void GrGLSLShaderBuilder::appendTexelFetch(SkString* out,
+ SamplerHandle samplerHandle,
+ const char* coordExpr) const {
+ const GrGLSLSampler& sampler = fProgramBuilder->getSampler(samplerHandle);
+ SkASSERT(fProgramBuilder->glslCaps()->texelFetchSupport());
+ SkASSERT(GrSLTypeIsCombinedSamplerType(sampler.type()));
+
+ out->appendf("texelFetch(%s, %s)", sampler.getSamplerNameForTexelFetch(), coordExpr);
+
+ this->appendTextureSwizzle(out, sampler.config());
+}
+
+void GrGLSLShaderBuilder::appendTexelFetch(SamplerHandle samplerHandle, const char* coordExpr) {
+ this->appendTexelFetch(&this->code(), samplerHandle, coordExpr);
+}
+
+void GrGLSLShaderBuilder::appendTextureSwizzle(SkString* out, GrPixelConfig config) const {
+ const GrSwizzle& configSwizzle = fProgramBuilder->glslCaps()->configTextureSwizzle(config);
+
+ if (configSwizzle != GrSwizzle::RGBA()) {
+ out->appendf(".%s", configSwizzle.c_str());
+ }
+}
+
+bool GrGLSLShaderBuilder::addFeature(uint32_t featureBit, const char* extensionName) {
+ if (featureBit & fFeaturesAddedMask) {
+ return false;
+ }
+ this->extensions().appendf("#extension %s: require\n", extensionName);
+ fFeaturesAddedMask |= featureBit;
+ return true;
+}
+
+void GrGLSLShaderBuilder::appendDecls(const VarArray& vars, SkString* out) const {
+ for (int i = 0; i < vars.count(); ++i) {
+ vars[i].appendDecl(fProgramBuilder->glslCaps(), out);
+ out->append(";\n");
+ }
+}
+
+void GrGLSLShaderBuilder::addLayoutQualifier(const char* param, InterfaceQualifier interface) {
+ SkASSERT(fProgramBuilder->glslCaps()->generation() >= k330_GrGLSLGeneration ||
+ fProgramBuilder->glslCaps()->mustEnableAdvBlendEqs());
+ fLayoutParams[interface].push_back() = param;
+}
+
+void GrGLSLShaderBuilder::compileAndAppendLayoutQualifiers() {
+ static const char* interfaceQualifierNames[] = {
+ "out"
+ };
+
+ for (int interface = 0; interface <= kLastInterfaceQualifier; ++interface) {
+ const SkTArray<SkString>& params = fLayoutParams[interface];
+ if (params.empty()) {
+ continue;
+ }
+ this->layoutQualifiers().appendf("layout(%s", params[0].c_str());
+ for (int i = 1; i < params.count(); ++i) {
+ this->layoutQualifiers().appendf(", %s", params[i].c_str());
+ }
+ this->layoutQualifiers().appendf(") %s;\n", interfaceQualifierNames[interface]);
+ }
+
+ GR_STATIC_ASSERT(0 == GrGLSLShaderBuilder::kOut_InterfaceQualifier);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(interfaceQualifierNames) == kLastInterfaceQualifier + 1);
+}
+
+void GrGLSLShaderBuilder::finalize(uint32_t visibility) {
+ SkASSERT(!fFinalized);
+ this->versionDecl() = fProgramBuilder->glslCaps()->versionDeclString();
+ this->compileAndAppendLayoutQualifiers();
+ SkASSERT(visibility);
+ fProgramBuilder->appendUniformDecls((GrShaderFlags) visibility, &this->uniforms());
+ this->appendDecls(fInputs, &this->inputs());
+ this->appendDecls(fOutputs, &this->outputs());
+ this->onFinalize();
+ // append the 'footer' to code
+ this->code().append("}");
+
+ for (int i = 0; i <= fCodeIndex; i++) {
+ fCompilerStrings[i] = fShaderStrings[i].c_str();
+ fCompilerStringLengths[i] = (int)fShaderStrings[i].size();
+ }
+
+ fFinalized = true;
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLShaderBuilder.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLShaderBuilder.h
new file mode 100644
index 000000000..a6ff96dd1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLShaderBuilder.h
@@ -0,0 +1,263 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLShaderBuilder_DEFINED
+#define GrGLSLShaderBuilder_DEFINED
+
+#include "GrAllocator.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "glsl/GrGLSLShaderVar.h"
+#include "SkTDArray.h"
+
+#include <stdarg.h>
+
+class GrGLSLColorSpaceXformHelper;
+
+/**
+ base class for all shaders builders
+*/
+class GrGLSLShaderBuilder {
+public:
+ GrGLSLShaderBuilder(GrGLSLProgramBuilder* program);
+ virtual ~GrGLSLShaderBuilder() {}
+
+ typedef GrGLSLUniformHandler::SamplerHandle SamplerHandle;
+
+ /** Appends a 2D texture sample with projection if necessary. coordType must either be Vec2f or
+ Vec3f. The latter is interpreted as projective texture coords. The vec length and swizzle
+ order of the result depends on the GrTextureAccess associated with the GrGLSLSampler.
+ */
+ void appendTextureLookup(SkString* out,
+ SamplerHandle,
+ const char* coordName,
+ GrSLType coordType = kVec2f_GrSLType) const;
+
+ /** Version of above that appends the result to the shader code instead.*/
+ void appendTextureLookup(SamplerHandle,
+ const char* coordName,
+ GrSLType coordType = kVec2f_GrSLType,
+ GrGLSLColorSpaceXformHelper* colorXformHelper = nullptr);
+
+
+ /** Does the work of appendTextureLookup and modulates the result by modulation. The result is
+ always a vec4. modulation and the swizzle specified by GrGLSLSampler must both be
+ vec4 or float. If modulation is "" or nullptr it this function acts as though
+ appendTextureLookup were called. */
+ void appendTextureLookupAndModulate(const char* modulation,
+ SamplerHandle,
+ const char* coordName,
+ GrSLType coordType = kVec2f_GrSLType,
+ GrGLSLColorSpaceXformHelper* colorXformHelper = nullptr);
+
+ /** Adds a helper function to facilitate color gamut transformation, and produces code that
+ returns the srcColor transformed into a new gamut (via multiplication by the xform from
+ colorXformHelper). Premultiplied sources are also handled correctly (colorXformHelper
+ determines if the source is premultipled or not). */
+ void appendColorGamutXform(SkString* out, const char* srcColor,
+ GrGLSLColorSpaceXformHelper* colorXformHelper);
+
+ /** Version of above that appends the result to the shader code instead. */
+ void appendColorGamutXform(const char* srcColor, GrGLSLColorSpaceXformHelper* colorXformHelper);
+
+ /** Fetches an unfiltered texel from a sampler at integer coordinates. coordExpr must match the
+ dimensionality of the sampler and must be within the sampler's range. coordExpr is emitted
+ exactly once, so expressions like "idx++" are acceptable. */
+ void appendTexelFetch(SkString* out, SamplerHandle, const char* coordExpr) const;
+
+ /** Version of above that appends the result to the shader code instead.*/
+ void appendTexelFetch(SamplerHandle, const char* coordExpr);
+
+ /**
+ * Adds a #define directive to the top of the shader.
+ */
+ void define(const char* macro, const char* replacement) {
+ this->definitions().appendf("#define %s %s\n", macro, replacement);
+ }
+
+ void define(const char* macro, int replacement) {
+ this->definitions().appendf("#define %s %i\n", macro, replacement);
+ }
+
+ void definef(const char* macro, const char* replacement, ...) {
+ this->definitions().appendf("#define %s ", macro);
+ va_list args;
+ va_start(args, replacement);
+ this->definitions().appendVAList(replacement, args);
+ va_end(args);
+ this->definitions().append("\n");
+ }
+
+ /**
+ * Called by GrGLSLProcessors to add code to one of the shaders.
+ */
+ void codeAppendf(const char format[], ...) SK_PRINTF_LIKE(2, 3) {
+ va_list args;
+ va_start(args, format);
+ this->code().appendVAList(format, args);
+ va_end(args);
+ }
+
+ void codeAppend(const char* str) { this->code().append(str); }
+
+ void codePrependf(const char format[], ...) SK_PRINTF_LIKE(2, 3) {
+ va_list args;
+ va_start(args, format);
+ this->code().prependVAList(format, args);
+ va_end(args);
+ }
+
+ /**
+ * Appends a variable declaration to one of the shaders
+ */
+ void declAppend(const GrGLSLShaderVar& var);
+
+ /**
+ * Appends a precision qualifier followed by a space, if relevant for the GLSL version.
+ */
+ void appendPrecisionModifier(GrSLPrecision);
+
+ /** Emits a helper function outside of main() in the fragment shader. */
+ void emitFunction(GrSLType returnType,
+ const char* name,
+ int argCnt,
+ const GrGLSLShaderVar* args,
+ const char* body,
+ SkString* outName);
+
+ /*
+ * Combines the various parts of the shader to create a single finalized shader string.
+ */
+ void finalize(uint32_t visibility);
+
+ /*
+ * Get parent builder for adding uniforms
+ */
+ GrGLSLProgramBuilder* getProgramBuilder() { return fProgramBuilder; }
+
+ /**
+ * Helper for begining and ending a block in the shader code.
+ */
+ class ShaderBlock {
+ public:
+ ShaderBlock(GrGLSLShaderBuilder* builder) : fBuilder(builder) {
+ SkASSERT(builder);
+ fBuilder->codeAppend("{");
+ }
+
+ ~ShaderBlock() {
+ fBuilder->codeAppend("}");
+ }
+ private:
+ GrGLSLShaderBuilder* fBuilder;
+ };
+
+protected:
+ typedef GrTAllocator<GrGLSLShaderVar> VarArray;
+ void appendDecls(const VarArray& vars, SkString* out) const;
+
+ /**
+ * Features that should only be enabled internally by the builders.
+ */
+ enum GLSLPrivateFeature {
+ kFragCoordConventions_GLSLPrivateFeature,
+ kBlendEquationAdvanced_GLSLPrivateFeature,
+ kBlendFuncExtended_GLSLPrivateFeature,
+ kExternalTexture_GLSLPrivateFeature,
+ kTexelBuffer_GLSLPrivateFeature,
+ kFramebufferFetch_GLSLPrivateFeature,
+ kNoPerspectiveInterpolation_GLSLPrivateFeature,
+ kSampleVariables_GLSLPrivateFeature,
+ kSampleMaskOverrideCoverage_GLSLPrivateFeature,
+ kLastGLSLPrivateFeature = kSampleMaskOverrideCoverage_GLSLPrivateFeature
+ };
+
+ /*
+ * A general function which enables an extension in a shader if the feature bit is not present
+ *
+ * @return true if the feature bit was not yet present, false otherwise.
+ */
+ bool addFeature(uint32_t featureBit, const char* extensionName);
+
+ enum InterfaceQualifier {
+ kOut_InterfaceQualifier,
+ kLastInterfaceQualifier = kOut_InterfaceQualifier
+ };
+
+ /*
+ * A low level function to build default layout qualifiers.
+ *
+ * e.g. layout(param1, param2, ...) out;
+ *
+ * GLSL allows default layout qualifiers for in, out, and uniform.
+ */
+ void addLayoutQualifier(const char* param, InterfaceQualifier);
+
+ void compileAndAppendLayoutQualifiers();
+
+ /* Appends any swizzling we may need to get from some backend internal format to the format used
+ * in GrPixelConfig. If this is implemented by the GrGpu object, then swizzle will be rgba. For
+ * shader prettiness we omit the swizzle rather than appending ".rgba".
+ */
+ void appendTextureSwizzle(SkString* out, GrPixelConfig) const;
+
+ void nextStage() {
+ fShaderStrings.push_back();
+ fCompilerStrings.push_back(this->code().c_str());
+ fCompilerStringLengths.push_back((int)this->code().size());
+ fCodeIndex++;
+ }
+
+ SkString& versionDecl() { return fShaderStrings[kVersionDecl]; }
+ SkString& extensions() { return fShaderStrings[kExtensions]; }
+ SkString& definitions() { return fShaderStrings[kDefinitions]; }
+ SkString& precisionQualifier() { return fShaderStrings[kPrecisionQualifier]; }
+ SkString& layoutQualifiers() { return fShaderStrings[kLayoutQualifiers]; }
+ SkString& uniforms() { return fShaderStrings[kUniforms]; }
+ SkString& inputs() { return fShaderStrings[kInputs]; }
+ SkString& outputs() { return fShaderStrings[kOutputs]; }
+ SkString& functions() { return fShaderStrings[kFunctions]; }
+ SkString& main() { return fShaderStrings[kMain]; }
+ SkString& code() { return fShaderStrings[fCodeIndex]; }
+
+ virtual void onFinalize() = 0;
+
+ enum {
+ kVersionDecl,
+ kExtensions,
+ kDefinitions,
+ kPrecisionQualifier,
+ kLayoutQualifiers,
+ kUniforms,
+ kInputs,
+ kOutputs,
+ kFunctions,
+ kMain,
+ kCode,
+ };
+
+ GrGLSLProgramBuilder* fProgramBuilder;
+ SkSTArray<kCode, const char*, true> fCompilerStrings;
+ SkSTArray<kCode, int, true> fCompilerStringLengths;
+ SkSTArray<kCode, SkString> fShaderStrings;
+ SkString fCode;
+ SkString fFunctions;
+ SkString fExtensions;
+
+ VarArray fInputs;
+ VarArray fOutputs;
+ uint32_t fFeaturesAddedMask;
+ SkSTArray<1, SkString> fLayoutParams[kLastInterfaceQualifier + 1];
+ int fCodeIndex;
+ bool fFinalized;
+
+ friend class GrGLSLProgramBuilder;
+ friend class GrGLProgramBuilder;
+ friend class GrGLSLVaryingHandler; // to access noperspective interpolation feature.
+ friend class GrGLPathProgramBuilder; // to access fInputs.
+ friend class GrVkPipelineStateBuilder;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLShaderVar.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLShaderVar.h
new file mode 100644
index 000000000..9d162ecaa
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLShaderVar.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLShaderVar_DEFINED
+#define GrGLSLShaderVar_DEFINED
+
+#include "GrShaderVar.h"
+#include "../glsl/GrGLSL.h"
+#include "../glsl/GrGLSLCaps.h"
+
+#define USE_UNIFORM_FLOAT_ARRAYS true
+
+/**
+ * Represents a variable in a shader
+ */
+class GrGLSLShaderVar : public GrShaderVar {
+public:
+ /**
+ * Defaults to a float with no precision specifier
+ */
+ GrGLSLShaderVar()
+ : GrShaderVar()
+ , fUseUniformFloatArrays(USE_UNIFORM_FLOAT_ARRAYS) {
+ }
+
+ GrGLSLShaderVar(const char* name, GrSLType type, int arrayCount = kNonArray,
+ GrSLPrecision precision = kDefault_GrSLPrecision)
+ : GrShaderVar(name, type, arrayCount, precision)
+ , fUseUniformFloatArrays(USE_UNIFORM_FLOAT_ARRAYS) {
+ SkASSERT(kVoid_GrSLType != type);
+ fUseUniformFloatArrays = USE_UNIFORM_FLOAT_ARRAYS;
+ }
+
+ GrGLSLShaderVar(const char* name, GrSLType type, TypeModifier typeModifier,
+ int arrayCount = kNonArray, GrSLPrecision precision = kDefault_GrSLPrecision)
+ : GrShaderVar(name, type, typeModifier, arrayCount, precision)
+ , fUseUniformFloatArrays(USE_UNIFORM_FLOAT_ARRAYS) {
+ SkASSERT(kVoid_GrSLType != type);
+ }
+
+ GrGLSLShaderVar(const GrShaderVar& var)
+ : GrShaderVar(var)
+ , fUseUniformFloatArrays(USE_UNIFORM_FLOAT_ARRAYS) {
+ SkASSERT(kVoid_GrSLType != var.getType());
+ }
+
+ GrGLSLShaderVar(const GrGLSLShaderVar& var)
+ : GrShaderVar(var.c_str(), var.getType(), var.getTypeModifier(),
+ var.getArrayCount(), var.getPrecision())
+ , fUseUniformFloatArrays(var.fUseUniformFloatArrays)
+ , fLayoutQualifier(var.fLayoutQualifier)
+ , fExtraModifiers(var.fExtraModifiers) {
+ SkASSERT(kVoid_GrSLType != var.getType());
+ }
+
+ /**
+ * Values for array count that have special meaning. We allow 1-sized arrays.
+ */
+ enum {
+ kNonArray = 0, // not an array
+ kUnsizedArray = -1, // an unsized array (declared with [])
+ };
+
+ /**
+ * Sets as a non-array.
+ */
+ void set(GrSLType type,
+ TypeModifier typeModifier,
+ const SkString& name,
+ GrSLPrecision precision = kDefault_GrSLPrecision,
+ const char* layoutQualifier = nullptr,
+ const char* extraModifiers = nullptr,
+ bool useUniformFloatArrays = USE_UNIFORM_FLOAT_ARRAYS) {
+ SkASSERT(kVoid_GrSLType != type);
+ SkASSERT(kDefault_GrSLPrecision == precision || GrSLTypeAcceptsPrecision(type));
+ INHERITED::set(type, name, typeModifier, precision);
+ fLayoutQualifier = layoutQualifier;
+ if (extraModifiers) {
+ fExtraModifiers.printf("%s ", extraModifiers);
+ }
+ fUseUniformFloatArrays = useUniformFloatArrays;
+ }
+
+ /**
+ * Sets as a non-array.
+ */
+ void set(GrSLType type,
+ TypeModifier typeModifier,
+ const char* name,
+ GrSLPrecision precision = kDefault_GrSLPrecision,
+ const char* layoutQualifier = nullptr,
+ const char* extraModifiers = nullptr,
+ bool useUniformFloatArrays = USE_UNIFORM_FLOAT_ARRAYS) {
+ SkASSERT(kVoid_GrSLType != type);
+ SkASSERT(kDefault_GrSLPrecision == precision || GrSLTypeAcceptsPrecision(type));
+ INHERITED::set(type, name, typeModifier, precision);
+ fLayoutQualifier = layoutQualifier;
+ if (extraModifiers) {
+ fExtraModifiers.printf("%s ", extraModifiers);
+ }
+ fUseUniformFloatArrays = useUniformFloatArrays;
+ }
+
+ /**
+ * Set all var options
+ */
+ void set(GrSLType type,
+ TypeModifier typeModifier,
+ const SkString& name,
+ int count,
+ GrSLPrecision precision = kDefault_GrSLPrecision,
+ const char* layoutQualifier = nullptr,
+ const char* extraModifiers = nullptr,
+ bool useUniformFloatArrays = USE_UNIFORM_FLOAT_ARRAYS) {
+ SkASSERT(kVoid_GrSLType != type);
+ SkASSERT(kDefault_GrSLPrecision == precision || GrSLTypeAcceptsPrecision(type));
+ INHERITED::set(type, name, typeModifier, precision, count);
+ fLayoutQualifier = layoutQualifier;
+ if (extraModifiers) {
+ fExtraModifiers.printf("%s ", extraModifiers);
+ }
+ fUseUniformFloatArrays = useUniformFloatArrays;
+ }
+
+ /**
+ * Set all var options
+ */
+ void set(GrSLType type,
+ TypeModifier typeModifier,
+ const char* name,
+ int count,
+ GrSLPrecision precision = kDefault_GrSLPrecision,
+ const char* layoutQualifier = nullptr,
+ const char* extraModifiers = nullptr,
+ bool useUniformFloatArrays = USE_UNIFORM_FLOAT_ARRAYS) {
+ SkASSERT(kVoid_GrSLType != type);
+ SkASSERT(kDefault_GrSLPrecision == precision || GrSLTypeAcceptsPrecision(type));
+ INHERITED::set(type, name, typeModifier, precision, count);
+ fLayoutQualifier = layoutQualifier;
+ if (extraModifiers) {
+ fExtraModifiers.printf("%s ", extraModifiers);
+ }
+ fUseUniformFloatArrays = useUniformFloatArrays;
+ }
+
+ /**
+ * Set the layout qualifier
+ */
+ void setLayoutQualifier(const char* layoutQualifier) {
+ fLayoutQualifier = layoutQualifier;
+ }
+
+ void addModifier(const char* modifier) {
+ if (modifier) {
+ fExtraModifiers.appendf("%s ", modifier);
+ }
+ }
+
+ /**
+ * Write a declaration of this variable to out.
+ */
+ void appendDecl(const GrGLSLCaps* glslCaps, SkString* out) const {
+ SkASSERT(kDefault_GrSLPrecision == fPrecision || GrSLTypeAcceptsPrecision(fType));
+ if (!fLayoutQualifier.isEmpty()) {
+ out->appendf("layout(%s) ", fLayoutQualifier.c_str());
+ }
+ out->append(fExtraModifiers);
+ if (this->getTypeModifier() != kNone_TypeModifier) {
+ out->append(TypeModifierString(glslCaps, this->getTypeModifier()));
+ out->append(" ");
+ }
+ GrSLType effectiveType = this->getType();
+ if (glslCaps->usesPrecisionModifiers() && GrSLTypeAcceptsPrecision(effectiveType)) {
+ // Desktop GLSL has added precision qualifiers but they don't do anything.
+ out->appendf("%s ", GrGLSLPrecisionString(fPrecision));
+ }
+ if (this->isArray()) {
+ if (this->isUnsizedArray()) {
+ out->appendf("%s %s[]",
+ GrGLSLTypeString(effectiveType),
+ this->getName().c_str());
+ } else {
+ SkASSERT(this->getArrayCount() > 0);
+ out->appendf("%s %s[%d]",
+ GrGLSLTypeString(effectiveType),
+ this->getName().c_str(),
+ this->getArrayCount());
+ }
+ } else {
+ out->appendf("%s %s",
+ GrGLSLTypeString(effectiveType),
+ this->getName().c_str());
+ }
+ }
+
+ void appendArrayAccess(int index, SkString* out) const {
+ out->appendf("%s[%d]%s",
+ this->getName().c_str(),
+ index,
+ fUseUniformFloatArrays ? "" : ".x");
+ }
+
+ void appendArrayAccess(const char* indexName, SkString* out) const {
+ out->appendf("%s[%s]%s",
+ this->getName().c_str(),
+ indexName,
+ fUseUniformFloatArrays ? "" : ".x");
+ }
+
+private:
+ static const char* TypeModifierString(const GrGLSLCaps* glslCaps, TypeModifier t) {
+ GrGLSLGeneration gen = glslCaps->generation();
+ switch (t) {
+ case kNone_TypeModifier:
+ return "";
+ case kIn_TypeModifier:
+ return "in";
+ case kInOut_TypeModifier:
+ return "inout";
+ case kOut_TypeModifier:
+ return "out";
+ case kUniform_TypeModifier:
+ return "uniform";
+ case kAttribute_TypeModifier:
+ return k110_GrGLSLGeneration == gen ? "attribute" : "in";
+ case kVaryingIn_TypeModifier:
+ return k110_GrGLSLGeneration == gen ? "varying" : "in";
+ case kVaryingOut_TypeModifier:
+ return k110_GrGLSLGeneration == gen ? "varying" : "out";
+ default:
+ SkFAIL("Unknown shader variable type modifier.");
+ return ""; // suppress warning
+ }
+ }
+
+ /// Work around driver bugs on some hardware that don't correctly
+ /// support uniform float []
+ bool fUseUniformFloatArrays;
+
+ SkString fLayoutQualifier;
+ SkString fExtraModifiers;
+
+ typedef GrShaderVar INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLUniformHandler.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLUniformHandler.h
new file mode 100644
index 000000000..d7b213869
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLUniformHandler.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLUniformHandler_DEFINED
+#define GrGLSLUniformHandler_DEFINED
+
+#include "GrGLSLProgramDataManager.h"
+#include "GrGLSLShaderVar.h"
+
+class GrGLSLProgramBuilder;
+class GrGLSLSampler;
+
+class GrGLSLUniformHandler {
+public:
+ virtual ~GrGLSLUniformHandler() {}
+
+ typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;
+ typedef GrGLSLProgramDataManager::UniformHandle SamplerHandle;
+
+ /** Add a uniform variable to the current program, that has visibility in one or more shaders.
+ visibility is a bitfield of GrShaderFlag values indicating from which shaders the uniform
+ should be accessible. At least one bit must be set. Geometry shader uniforms are not
+ supported at this time. The actual uniform name will be mangled. If outName is not nullptr
+ then it will refer to the final uniform name after return. Use the addUniformArray variant
+ to add an array of uniforms. */
+ UniformHandle addUniform(uint32_t visibility,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name,
+ const char** outName = nullptr) {
+ SkASSERT(!GrSLTypeIsCombinedSamplerType(type));
+ return this->addUniformArray(visibility, type, precision, name, 0, outName);
+ }
+
+ UniformHandle addUniformArray(uint32_t visibility,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name,
+ int arrayCount,
+ const char** outName = nullptr) {
+ SkASSERT(!GrSLTypeIsCombinedSamplerType(type));
+ return this->internalAddUniformArray(visibility, type, precision, name, true, arrayCount,
+ outName);
+ }
+
+ virtual const GrGLSLShaderVar& getUniformVariable(UniformHandle u) const = 0;
+
+ /**
+ * Shortcut for getUniformVariable(u).c_str()
+ */
+ virtual const char* getUniformCStr(UniformHandle u) const = 0;
+
+protected:
+ explicit GrGLSLUniformHandler(GrGLSLProgramBuilder* program) : fProgramBuilder(program) {}
+
+ // This is not owned by the class
+ GrGLSLProgramBuilder* fProgramBuilder;
+
+private:
+ virtual int numSamplers() const = 0;
+ virtual const GrGLSLSampler& getSampler(SamplerHandle handle) const = 0;
+
+ SamplerHandle addSampler(uint32_t visibility,
+ GrPixelConfig config,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name) {
+ return this->internalAddSampler(visibility, config, type, precision, name);
+ }
+
+ virtual SamplerHandle internalAddSampler(uint32_t visibility,
+ GrPixelConfig config,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name) = 0;
+
+ virtual UniformHandle internalAddUniformArray(uint32_t visibility,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name,
+ bool mangleName,
+ int arrayCount,
+ const char** outName) = 0;
+
+ virtual void appendUniformDecls(GrShaderFlags visibility, SkString*) const = 0;
+
+ friend class GrGLSLProgramBuilder;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLUtil.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLUtil.cpp
new file mode 100644
index 000000000..5a333244b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLUtil.cpp
@@ -0,0 +1,52 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrGLSLUtil.h"
+#include "SkMatrix.h"
+
+template<> void GrGLSLGetMatrix<3>(float* dest, const SkMatrix& src) {
+ // Col 0
+ dest[0] = SkScalarToFloat(src[SkMatrix::kMScaleX]);
+ dest[1] = SkScalarToFloat(src[SkMatrix::kMSkewY]);
+ dest[2] = SkScalarToFloat(src[SkMatrix::kMPersp0]);
+
+ // Col 1
+ dest[3] = SkScalarToFloat(src[SkMatrix::kMSkewX]);
+ dest[4] = SkScalarToFloat(src[SkMatrix::kMScaleY]);
+ dest[5] = SkScalarToFloat(src[SkMatrix::kMPersp1]);
+
+ // Col 2
+ dest[6] = SkScalarToFloat(src[SkMatrix::kMTransX]);
+ dest[7] = SkScalarToFloat(src[SkMatrix::kMTransY]);
+ dest[8] = SkScalarToFloat(src[SkMatrix::kMPersp2]);
+}
+
+template<> void GrGLSLGetMatrix<4>(float* dest, const SkMatrix& src) {
+ // Col 0
+ dest[0] = SkScalarToFloat(src[SkMatrix::kMScaleX]);
+ dest[1] = SkScalarToFloat(src[SkMatrix::kMSkewY]);
+ dest[2] = 0;
+ dest[3] = SkScalarToFloat(src[SkMatrix::kMPersp0]);
+
+ // Col 1
+ dest[4] = SkScalarToFloat(src[SkMatrix::kMSkewX]);
+ dest[5] = SkScalarToFloat(src[SkMatrix::kMScaleY]);
+ dest[6] = 0;
+ dest[7] = SkScalarToFloat(src[SkMatrix::kMPersp1]);
+
+ // Col 2
+ dest[8] = 0;
+ dest[9] = 0;
+ dest[10] = 1;
+ dest[11] = 0;
+
+ // Col 3
+ dest[12] = SkScalarToFloat(src[SkMatrix::kMTransX]);
+ dest[13] = SkScalarToFloat(src[SkMatrix::kMTransY]);
+ dest[14] = 0;
+ dest[15] = SkScalarToFloat(src[SkMatrix::kMPersp2]);
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLUtil.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLUtil.h
new file mode 100644
index 000000000..0d2b7e742
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLUtil.h
@@ -0,0 +1,19 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrGLSLUtil_DEFINED
+#define GrGLSLUtil_DEFINED
+
+class SkMatrix;
+
+/**
+ * Helper for converting SkMatrix to a column-major float array. We assume that all GLSL backends
+ * use a column major representation for matrices.
+ */
+template<int MatrixSize> void GrGLSLGetMatrix(float* dest, const SkMatrix& src);
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLVarying.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLVarying.cpp
new file mode 100644
index 000000000..f3b0405ff
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLVarying.cpp
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "glsl/GrGLSLVarying.h"
+
+#include "glsl/GrGLSLProgramBuilder.h"
+
+void GrGLSLVaryingHandler::addPassThroughAttribute(const GrGeometryProcessor::Attribute* input,
+ const char* output, GrSLPrecision precision) {
+ GrSLType type = GrVertexAttribTypeToSLType(input->fType);
+ GrGLSLVertToFrag v(type);
+ this->addVarying(input->fName, &v, precision);
+ this->writePassThroughAttribute(input, output, v);
+}
+
+void GrGLSLVaryingHandler::addFlatPassThroughAttribute(const GrGeometryProcessor::Attribute* input,
+ const char* output,
+ GrSLPrecision precision) {
+ GrSLType type = GrVertexAttribTypeToSLType(input->fType);
+ GrGLSLVertToFrag v(type);
+ this->addFlatVarying(input->fName, &v, precision);
+ this->writePassThroughAttribute(input, output, v);
+}
+
+void GrGLSLVaryingHandler::writePassThroughAttribute(const GrGeometryProcessor::Attribute* input,
+ const char* output, const GrGLSLVarying& v) {
+ fProgramBuilder->fVS.codeAppendf("%s = %s;", v.vsOut(), input->fName);
+
+ if (fProgramBuilder->primitiveProcessor().willUseGeoShader()) {
+ fProgramBuilder->fGS.codeAppendf("%s = %s[0];", v.gsOut(), v.gsIn());
+ }
+
+ fProgramBuilder->fFS.codeAppendf("%s = %s;", output, v.fsIn());
+}
+
+void GrGLSLVaryingHandler::internalAddVarying(const char* name,
+ GrGLSLVarying* varying,
+ GrSLPrecision precision,
+ bool flat) {
+ bool willUseGeoShader = fProgramBuilder->primitiveProcessor().willUseGeoShader();
+ VaryingInfo& v = fVaryings.push_back();
+
+ SkASSERT(varying);
+ v.fType = varying->fType;
+ v.fPrecision = precision;
+ v.fIsFlat = flat;
+ fProgramBuilder->nameVariable(&v.fVsOut, 'v', name);
+ v.fVisibility = kNone_GrShaderFlags;
+ if (varying->vsVarying()) {
+ varying->fVsOut = v.fVsOut.c_str();
+ v.fVisibility |= kVertex_GrShaderFlag;
+ }
+ if (willUseGeoShader) {
+ fProgramBuilder->nameVariable(&v.fGsOut, 'g', name);
+ varying->fGsIn = v.fVsOut.c_str();
+ varying->fGsOut = v.fGsOut.c_str();
+ v.fVisibility |= kGeometry_GrShaderFlag;
+ }
+ if (varying->fsVarying()) {
+ varying->fFsIn = (willUseGeoShader ? v.fGsOut : v.fVsOut).c_str();
+ v.fVisibility |= kFragment_GrShaderFlag;
+ }
+}
+
+void GrGLSLVaryingHandler::emitAttributes(const GrGeometryProcessor& gp) {
+ int vaCount = gp.numAttribs();
+ for (int i = 0; i < vaCount; i++) {
+ const GrGeometryProcessor::Attribute& attr = gp.getAttrib(i);
+ this->addAttribute(GrShaderVar(attr.fName,
+ GrVertexAttribTypeToSLType(attr.fType),
+ GrShaderVar::kAttribute_TypeModifier,
+ GrShaderVar::kNonArray,
+ attr.fPrecision));
+ }
+}
+
+void GrGLSLVaryingHandler::addAttribute(const GrShaderVar& var) {
+ SkASSERT(GrShaderVar::kAttribute_TypeModifier == var.getTypeModifier());
+ for (int j = 0; j < fVertexInputs.count(); ++j) {
+ const GrGLSLShaderVar& attr = fVertexInputs[j];
+ // if attribute already added, don't add it again
+ if (attr.getName().equals(var.getName())) {
+ return;
+ }
+ }
+ fVertexInputs.push_back(var);
+}
+
+void GrGLSLVaryingHandler::setNoPerspective() {
+ const GrGLSLCaps& caps = *fProgramBuilder->glslCaps();
+ if (!caps.noperspectiveInterpolationSupport()) {
+ return;
+ }
+ if (const char* extension = caps.noperspectiveInterpolationExtensionString()) {
+ int bit = 1 << GrGLSLFragmentBuilder::kNoPerspectiveInterpolation_GLSLPrivateFeature;
+ fProgramBuilder->fVS.addFeature(bit, extension);
+ if (fProgramBuilder->primitiveProcessor().willUseGeoShader()) {
+ fProgramBuilder->fGS.addFeature(bit, extension);
+ }
+ fProgramBuilder->fFS.addFeature(bit, extension);
+ }
+ fDefaultInterpolationModifier = "noperspective";
+}
+
+void GrGLSLVaryingHandler::finalize() {
+ for (int i = 0; i < fVaryings.count(); ++i) {
+ const VaryingInfo& v = this->fVaryings[i];
+ const char* modifier = v.fIsFlat ? "flat" : fDefaultInterpolationModifier;
+ if (v.fVisibility & kVertex_GrShaderFlag) {
+ fVertexOutputs.push_back().set(v.fType, GrShaderVar::kVaryingOut_TypeModifier, v.fVsOut,
+ v.fPrecision, nullptr, modifier);
+ if (v.fVisibility & kGeometry_GrShaderFlag) {
+ fGeomInputs.push_back().set(v.fType, GrShaderVar::kVaryingIn_TypeModifier, v.fVsOut,
+ GrShaderVar::kUnsizedArray, v.fPrecision, nullptr,
+ modifier);
+ }
+ }
+ if (v.fVisibility & kFragment_GrShaderFlag) {
+ const char* fsIn = v.fVsOut.c_str();
+ if (v.fVisibility & kGeometry_GrShaderFlag) {
+ fGeomOutputs.push_back().set(v.fType, GrGLSLShaderVar::kVaryingOut_TypeModifier,
+ v.fGsOut, v.fPrecision, nullptr, modifier);
+ fsIn = v.fGsOut.c_str();
+ }
+ fFragInputs.push_back().set(v.fType, GrShaderVar::kVaryingIn_TypeModifier, fsIn,
+ v.fPrecision, nullptr, modifier);
+ }
+ }
+ this->onFinalize();
+}
+
+void GrGLSLVaryingHandler::appendDecls(const VarArray& vars, SkString* out) const {
+ for (int i = 0; i < vars.count(); ++i) {
+ vars[i].appendDecl(fProgramBuilder->glslCaps(), out);
+ out->append(";");
+ }
+}
+
+void GrGLSLVaryingHandler::getVertexDecls(SkString* inputDecls, SkString* outputDecls) const {
+ this->appendDecls(fVertexInputs, inputDecls);
+ this->appendDecls(fVertexOutputs, outputDecls);
+}
+
+void GrGLSLVaryingHandler::getGeomDecls(SkString* inputDecls, SkString* outputDecls) const {
+ this->appendDecls(fGeomInputs, inputDecls);
+ this->appendDecls(fGeomOutputs, outputDecls);
+}
+
+void GrGLSLVaryingHandler::getFragDecls(SkString* inputDecls, SkString* outputDecls) const {
+ // We should not have any outputs in the fragment shader when using version 1.10
+ SkASSERT(k110_GrGLSLGeneration != fProgramBuilder->glslCaps()->generation() ||
+ fFragOutputs.empty());
+ this->appendDecls(fFragInputs, inputDecls);
+ this->appendDecls(fFragOutputs, outputDecls);
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLVarying.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLVarying.h
new file mode 100644
index 000000000..5867361ce
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLVarying.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLVarying_DEFINED
+#define GrGLSLVarying_DEFINED
+
+#include "GrAllocator.h"
+#include "GrGeometryProcessor.h"
+#include "GrTypesPriv.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLShaderVar.h"
+
+class GrGLSLProgramBuilder;
+
+class GrGLSLVarying {
+public:
+ bool vsVarying() const { return kVertToFrag_Varying == fVarying ||
+ kVertToGeo_Varying == fVarying; }
+ bool fsVarying() const { return kVertToFrag_Varying == fVarying ||
+ kGeoToFrag_Varying == fVarying; }
+ const char* vsOut() const { return fVsOut; }
+ const char* gsIn() const { return fGsIn; }
+ const char* gsOut() const { return fGsOut; }
+ const char* fsIn() const { return fFsIn; }
+ GrSLType type() const { return fType; }
+
+protected:
+ enum Varying {
+ kVertToFrag_Varying,
+ kVertToGeo_Varying,
+ kGeoToFrag_Varying,
+ };
+
+ GrGLSLVarying(GrSLType type, Varying varying)
+ : fVarying(varying), fType(type), fVsOut(nullptr), fGsIn(nullptr), fGsOut(nullptr),
+ fFsIn(nullptr) {}
+
+ Varying fVarying;
+
+private:
+ GrSLType fType;
+ const char* fVsOut;
+ const char* fGsIn;
+ const char* fGsOut;
+ const char* fFsIn;
+
+ friend class GrGLSLVaryingHandler;
+};
+
+struct GrGLSLVertToFrag : public GrGLSLVarying {
+ GrGLSLVertToFrag(GrSLType type)
+ : GrGLSLVarying(type, kVertToFrag_Varying) {}
+};
+
+struct GrGLSLVertToGeo : public GrGLSLVarying {
+ GrGLSLVertToGeo(GrSLType type)
+ : GrGLSLVarying(type, kVertToGeo_Varying) {}
+};
+
+struct GrGLSLGeoToFrag : public GrGLSLVarying {
+ GrGLSLGeoToFrag(GrSLType type)
+ : GrGLSLVarying(type, kGeoToFrag_Varying) {}
+};
+
+static const int kVaryingsPerBlock = 8;
+
+class GrGLSLVaryingHandler {
+public:
+ explicit GrGLSLVaryingHandler(GrGLSLProgramBuilder* program)
+ : fVaryings(kVaryingsPerBlock)
+ , fVertexInputs(kVaryingsPerBlock)
+ , fVertexOutputs(kVaryingsPerBlock)
+ , fGeomInputs(kVaryingsPerBlock)
+ , fGeomOutputs(kVaryingsPerBlock)
+ , fFragInputs(kVaryingsPerBlock)
+ , fFragOutputs(kVaryingsPerBlock)
+ , fProgramBuilder(program)
+ , fDefaultInterpolationModifier(nullptr) {}
+
+ virtual ~GrGLSLVaryingHandler() {}
+
+ /*
+ * Notifies the varying handler that this shader will never emit geometry in perspective and
+ * therefore does not require perspective-correct interpolation. When supported, this allows
+ * varyings to use the "noperspective" keyword, which means the GPU can use cheaper math for
+ * interpolation.
+ */
+ void setNoPerspective();
+
+ /*
+ * addVarying allows fine grained control for setting up varyings between stages. Calling this
+ * functions will make sure all necessary decls are setup for the client. The client however is
+ * responsible for setting up all shader code (e.g "vOut = vIn;") If you just need to take an
+ * attribute and pass it through to an output value in a fragment shader, use
+ * addPassThroughAttribute.
+ * TODO convert most uses of addVarying to addPassThroughAttribute
+ */
+ void addVarying(const char* name,
+ GrGLSLVarying* varying,
+ GrSLPrecision precision = kDefault_GrSLPrecision) {
+ SkASSERT(GrSLTypeIsFloatType(varying->type())); // Integers must use addFlatVarying.
+ this->internalAddVarying(name, varying, precision, false /*flat*/);
+ }
+
+ /*
+ * addFlatVarying sets up a varying whose value is constant across every fragment. The graphics
+ * pipeline will pull its value from the final vertex of the draw primitive (provoking vertex).
+ * Flat interpolation is not always supported and the user must check the caps before using.
+ * TODO: Some platforms can change the provoking vertex. Should we be resetting this knob?
+ */
+ void addFlatVarying(const char* name,
+ GrGLSLVarying* varying,
+ GrSLPrecision precision = kDefault_GrSLPrecision) {
+ this->internalAddVarying(name, varying, precision, true /*flat*/);
+ }
+
+ /*
+ * The GP can use these calls to pass an attribute through all shaders directly to 'output' in
+ * the fragment shader. Though these calls affect both the vertex shader and fragment shader,
+ * they expect 'output' to be defined in the fragment shader before the call is made. If there
+ * is a geometry shader, we will simply take the value of the varying from the first vertex and
+ * that will be set as the output varying for all emitted vertices.
+ * TODO it might be nicer behavior to have a flag to declare output inside these calls
+ */
+ void addPassThroughAttribute(const GrGeometryProcessor::Attribute*, const char* output,
+ GrSLPrecision = kDefault_GrSLPrecision);
+ void addFlatPassThroughAttribute(const GrGeometryProcessor::Attribute*, const char* output,
+ GrSLPrecision = kDefault_GrSLPrecision);
+
+ void emitAttributes(const GrGeometryProcessor& gp);
+
+ // This should be called once all attributes and varyings have been added to the
+ // GrGLSLVaryingHanlder and before getting/adding any of the declarations to the shaders.
+ void finalize();
+
+ void getVertexDecls(SkString* inputDecls, SkString* outputDecls) const;
+ void getGeomDecls(SkString* inputDecls, SkString* outputDecls) const;
+ void getFragDecls(SkString* inputDecls, SkString* outputDecls) const;
+
+protected:
+ struct VaryingInfo {
+ GrSLType fType;
+ GrSLPrecision fPrecision;
+ bool fIsFlat;
+ SkString fVsOut;
+ SkString fGsOut;
+ GrShaderFlags fVisibility;
+ };
+
+ typedef GrTAllocator<VaryingInfo> VaryingList;
+ typedef GrTAllocator<GrGLSLShaderVar> VarArray;
+ typedef GrGLSLProgramDataManager::VaryingHandle VaryingHandle;
+
+ VaryingList fVaryings;
+ VarArray fVertexInputs;
+ VarArray fVertexOutputs;
+ VarArray fGeomInputs;
+ VarArray fGeomOutputs;
+ VarArray fFragInputs;
+ VarArray fFragOutputs;
+
+ // This is not owned by the class
+ GrGLSLProgramBuilder* fProgramBuilder;
+
+private:
+ void internalAddVarying(const char* name, GrGLSLVarying*, GrSLPrecision, bool flat);
+ void writePassThroughAttribute(const GrGeometryProcessor::Attribute*, const char* output,
+ const GrGLSLVarying&);
+
+ void addAttribute(const GrShaderVar& var);
+
+ virtual void onFinalize() = 0;
+
+ // helper function for get*Decls
+ void appendDecls(const VarArray& vars, SkString* out) const;
+
+ const char* fDefaultInterpolationModifier;
+
+ friend class GrGLSLProgramBuilder;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLVertexShaderBuilder.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLVertexShaderBuilder.cpp
new file mode 100644
index 000000000..f8302b38f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLVertexShaderBuilder.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLSLVertexShaderBuilder.h"
+#include "glsl/GrGLSLProgramBuilder.h"
+#include "glsl/GrGLSLUniformHandler.h"
+#include "glsl/GrGLSLVarying.h"
+
+GrGLSLVertexBuilder::GrGLSLVertexBuilder(GrGLSLProgramBuilder* program)
+ : INHERITED(program)
+ , fRtAdjustName(nullptr) {
+}
+
+void GrGLSLVertexBuilder::transformToNormalizedDeviceSpace(const GrShaderVar& posVar) {
+ SkASSERT(!fRtAdjustName);
+
+ // setup RT Uniform
+ fProgramBuilder->addRTAdjustmentUniform(kHigh_GrSLPrecision,
+ fProgramBuilder->rtAdjustment(),
+ &fRtAdjustName);
+ if (this->getProgramBuilder()->desc().header().fSnapVerticesToPixelCenters) {
+ if (kVec3f_GrSLType == posVar.getType()) {
+ const char* p = posVar.c_str();
+ this->codeAppendf("{vec2 _posTmp = vec2(%s.x/%s.z, %s.y/%s.z);", p, p, p, p);
+ } else {
+ SkASSERT(kVec2f_GrSLType == posVar.getType());
+ this->codeAppendf("{vec2 _posTmp = %s;", posVar.c_str());
+ }
+ this->codeAppendf("_posTmp = floor(_posTmp) + vec2(0.5, 0.5);"
+ "gl_Position = vec4(_posTmp.x * %s.x + %s.y,"
+ "_posTmp.y * %s.z + %s.w, 0, 1);}",
+ fRtAdjustName, fRtAdjustName, fRtAdjustName, fRtAdjustName);
+ } else if (kVec3f_GrSLType == posVar.getType()) {
+ this->codeAppendf("gl_Position = vec4(dot(%s.xz, %s.xy), dot(%s.yz, %s.zw), 0, %s.z);",
+ posVar.c_str(), fRtAdjustName,
+ posVar.c_str(), fRtAdjustName,
+ posVar.c_str());
+ } else {
+ SkASSERT(kVec2f_GrSLType == posVar.getType());
+ this->codeAppendf("gl_Position = vec4(%s.x * %s.x + %s.y, %s.y * %s.z + %s.w, 0, 1);",
+ posVar.c_str(), fRtAdjustName, fRtAdjustName,
+ posVar.c_str(), fRtAdjustName, fRtAdjustName);
+ }
+ // We could have the GrGeometryProcessor do this, but its just easier to have it performed
+ // here. If we ever need to set variable pointsize, then we can reinvestigate.
+ if (this->getProgramBuilder()->desc().header().fHasPointSize) {
+ this->codeAppend("gl_PointSize = 1.0;");
+ }
+}
+
+void GrGLSLVertexBuilder::onFinalize() {
+ fProgramBuilder->varyingHandler()->getVertexDecls(&this->inputs(), &this->outputs());
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLVertexShaderBuilder.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLVertexShaderBuilder.h
new file mode 100644
index 000000000..af8d10c2c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLVertexShaderBuilder.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLVertexShader_DEFINED
+#define GrGLSLVertexShader_DEFINED
+
+#include "GrGLSLShaderBuilder.h"
+#include "GrGeometryProcessor.h"
+
+class GrGLSLVarying;
+
+// Enough precision to represent 1 / 2048 accurately in printf
+#define GR_SIGNIFICANT_POW2_DECIMAL_DIG 11
+
+class GrGLSLVertexBuilder : public GrGLSLShaderBuilder {
+public:
+ GrGLSLVertexBuilder(GrGLSLProgramBuilder* program);
+
+ void transformToNormalizedDeviceSpace(const GrShaderVar& posVar);
+private:
+ void onFinalize() override;
+
+ const char* fRtAdjustName;
+
+ friend class GrGLProgramBuilder;
+
+ typedef GrGLSLShaderBuilder INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLXferProcessor.cpp b/gfx/skia/skia/src/gpu/glsl/GrGLSLXferProcessor.cpp
new file mode 100644
index 000000000..0f7a3db71
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLXferProcessor.cpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "glsl/GrGLSLXferProcessor.h"
+
+#include "GrXferProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLUniformHandler.h"
+
+void GrGLSLXferProcessor::emitCode(const EmitArgs& args) {
+ if (!args.fXP.willReadDstColor()) {
+ this->emitOutputsForBlendState(args);
+ return;
+ }
+
+ GrGLSLXPFragmentBuilder* fragBuilder = args.fXPFragBuilder;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ const char* dstColor = fragBuilder->dstColor();
+
+ bool needsLocalOutColor = false;
+
+ if (args.fXP.getDstTexture()) {
+ bool topDown = kTopLeft_GrSurfaceOrigin == args.fXP.getDstTexture()->origin();
+
+ if (args.fInputCoverage) {
+ // We don't think any shaders actually output negative coverage, but just as a safety
+ // check for floating point precision errors we compare with <= here
+ fragBuilder->codeAppendf("if (all(lessThanEqual(%s, vec4(0)))) {"
+ " discard;"
+ "}", args.fInputCoverage);
+ }
+
+ const char* dstTopLeftName;
+ const char* dstCoordScaleName;
+
+ fDstTopLeftUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType,
+ kDefault_GrSLPrecision,
+ "DstTextureUpperLeft",
+ &dstTopLeftName);
+ fDstScaleUni = uniformHandler->addUniform(kFragment_GrShaderFlag,
+ kVec2f_GrSLType,
+ kDefault_GrSLPrecision,
+ "DstTextureCoordScale",
+ &dstCoordScaleName);
+ const char* fragPos = fragBuilder->fragmentPosition();
+
+ fragBuilder->codeAppend("// Read color from copy of the destination.\n");
+ fragBuilder->codeAppendf("vec2 _dstTexCoord = (%s.xy - %s) * %s;",
+ fragPos, dstTopLeftName, dstCoordScaleName);
+
+ if (!topDown) {
+ fragBuilder->codeAppend("_dstTexCoord.y = 1.0 - _dstTexCoord.y;");
+ }
+
+ fragBuilder->codeAppendf("vec4 %s = ", dstColor);
+ fragBuilder->appendTextureLookup(args.fTexSamplers[0], "_dstTexCoord", kVec2f_GrSLType);
+ fragBuilder->codeAppend(";");
+ } else {
+ needsLocalOutColor = args.fGLSLCaps->requiresLocalOutputColorForFBFetch();
+ }
+
+ const char* outColor = "_localColorOut";
+ if (!needsLocalOutColor) {
+ outColor = args.fOutputPrimary;
+ } else {
+ fragBuilder->codeAppendf("vec4 %s;", outColor);
+ }
+
+ this->emitBlendCodeForDstRead(fragBuilder,
+ uniformHandler,
+ args.fInputColor,
+ args.fInputCoverage,
+ dstColor,
+ outColor,
+ args.fOutputSecondary,
+ args.fXP);
+ if (needsLocalOutColor) {
+ fragBuilder->codeAppendf("%s = %s;", args.fOutputPrimary, outColor);
+ }
+}
+
+void GrGLSLXferProcessor::setData(const GrGLSLProgramDataManager& pdm, const GrXferProcessor& xp) {
+ if (xp.getDstTexture()) {
+ if (fDstTopLeftUni.isValid()) {
+ pdm.set2f(fDstTopLeftUni, static_cast<float>(xp.dstTextureOffset().fX),
+ static_cast<float>(xp.dstTextureOffset().fY));
+ pdm.set2f(fDstScaleUni, 1.f / xp.getDstTexture()->width(),
+ 1.f / xp.getDstTexture()->height());
+ } else {
+ SkASSERT(!fDstScaleUni.isValid());
+ }
+ } else {
+ SkASSERT(!fDstTopLeftUni.isValid());
+ SkASSERT(!fDstScaleUni.isValid());
+ }
+ this->onSetData(pdm, xp);
+}
+
+void GrGLSLXferProcessor::DefaultCoverageModulation(GrGLSLXPFragmentBuilder* fragBuilder,
+ const char* srcCoverage,
+ const char* dstColor,
+ const char* outColor,
+ const char* outColorSecondary,
+ const GrXferProcessor& proc) {
+ if (proc.dstReadUsesMixedSamples()) {
+ if (srcCoverage) {
+ fragBuilder->codeAppendf("%s *= %s;", outColor, srcCoverage);
+ fragBuilder->codeAppendf("%s = %s;", outColorSecondary, srcCoverage);
+ } else {
+ fragBuilder->codeAppendf("%s = vec4(1.0);", outColorSecondary);
+ }
+ } else if (srcCoverage) {
+ fragBuilder->codeAppendf("%s = %s * %s + (vec4(1.0) - %s) * %s;",
+ outColor, srcCoverage, outColor, srcCoverage, dstColor);
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSLXferProcessor.h b/gfx/skia/skia/src/gpu/glsl/GrGLSLXferProcessor.h
new file mode 100644
index 000000000..f4a8ebdfa
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSLXferProcessor.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSLXferProcessor_DEFINED
+#define GrGLSLXferProcessor_DEFINED
+
+#include "glsl/GrGLSLProgramDataManager.h"
+#include "glsl/GrGLSLSampler.h"
+
+class GrXferProcessor;
+class GrGLSLCaps;
+class GrGLSLUniformHandler;
+class GrGLSLXPBuilder;
+class GrGLSLXPFragmentBuilder;
+
+class GrGLSLXferProcessor {
+public:
+ GrGLSLXferProcessor() {}
+ virtual ~GrGLSLXferProcessor() {}
+
+ typedef GrGLSLProgramDataManager::UniformHandle SamplerHandle;
+
+ struct EmitArgs {
+ EmitArgs(GrGLSLXPFragmentBuilder* fragBuilder,
+ GrGLSLUniformHandler* uniformHandler,
+ const GrGLSLCaps* caps,
+ const GrXferProcessor& xp,
+ const char* inputColor,
+ const char* inputCoverage,
+ const char* outputPrimary,
+ const char* outputSecondary,
+ const SamplerHandle* texSamplers,
+ const SamplerHandle* bufferSamplers,
+ const bool usePLSDstRead)
+ : fXPFragBuilder(fragBuilder)
+ , fUniformHandler(uniformHandler)
+ , fGLSLCaps(caps)
+ , fXP(xp)
+ , fInputColor(inputColor)
+ , fInputCoverage(inputCoverage)
+ , fOutputPrimary(outputPrimary)
+ , fOutputSecondary(outputSecondary)
+ , fTexSamplers(texSamplers)
+ , fBufferSamplers(bufferSamplers)
+ , fUsePLSDstRead(usePLSDstRead) {}
+
+ GrGLSLXPFragmentBuilder* fXPFragBuilder;
+ GrGLSLUniformHandler* fUniformHandler;
+ const GrGLSLCaps* fGLSLCaps;
+ const GrXferProcessor& fXP;
+ const char* fInputColor;
+ const char* fInputCoverage;
+ const char* fOutputPrimary;
+ const char* fOutputSecondary;
+ const SamplerHandle* fTexSamplers;
+ const SamplerHandle* fBufferSamplers;
+ bool fUsePLSDstRead;
+ };
+ /**
+ * This is similar to emitCode() in the base class, except it takes a full shader builder.
+ * This allows the effect subclass to emit vertex code.
+ */
+ void emitCode(const EmitArgs&);
+
+ /** A GrGLSLXferProcessor instance can be reused with any GrGLSLXferProcessor that produces
+ the same stage key; this function reads data from a GrGLSLXferProcessor and uploads any
+ uniform variables required by the shaders created in emitCode(). The GrXferProcessor
+ parameter is guaranteed to be of the same type that created this GrGLSLXferProcessor and
+ to have an identical processor key as the one that created this GrGLSLXferProcessor. This
+ function calls onSetData on the subclass of GrGLSLXferProcessor
+ */
+ void setData(const GrGLSLProgramDataManager& pdm, const GrXferProcessor& xp);
+
+protected:
+ static void DefaultCoverageModulation(GrGLSLXPFragmentBuilder* fragBuilder,
+ const char* srcCoverage,
+ const char* dstColor,
+ const char* outColor,
+ const char* outColorSecondary,
+ const GrXferProcessor& proc);
+
+private:
+ /**
+ * Called by emitCode() when the XP will not be performing a dst read. This method is
+ * responsible for both blending and coverage. A subclass only needs to implement this method if
+ * it can construct a GrXferProcessor that will not read the dst color.
+ */
+ virtual void emitOutputsForBlendState(const EmitArgs&) {
+ SkFAIL("emitOutputsForBlendState not implemented.");
+ }
+
+ /**
+ * Called by emitCode() when the XP will perform a dst read. This method only needs to supply
+ * the blending logic. The base class applies coverage. A subclass only needs to implement this
+ * method if it can construct a GrXferProcessor that reads the dst color.
+ */
+ virtual void emitBlendCodeForDstRead(GrGLSLXPFragmentBuilder*,
+ GrGLSLUniformHandler*,
+ const char* srcColor,
+ const char* srcCoverage,
+ const char* dstColor,
+ const char* outColor,
+ const char* outColorSecondary,
+ const GrXferProcessor&) {
+ SkFAIL("emitBlendCodeForDstRead not implemented.");
+ }
+
+ virtual void onSetData(const GrGLSLProgramDataManager&, const GrXferProcessor&) = 0;
+
+ GrGLSLProgramDataManager::UniformHandle fDstTopLeftUni;
+ GrGLSLProgramDataManager::UniformHandle fDstScaleUni;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/glsl/GrGLSL_impl.h b/gfx/skia/skia/src/gpu/glsl/GrGLSL_impl.h
new file mode 100644
index 000000000..bdd69cc76
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/glsl/GrGLSL_impl.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLSL_impl_DEFINED
+#define GrGLSL_impl_DEFINED
+
+template<typename Self>
+template<typename T>
+inline Self GrGLSLExpr<Self>::VectorCastImpl(const T& expr) {
+ if (expr.isZeros()) {
+ return Self(0);
+ }
+ if (expr.isOnes()) {
+ return Self(1);
+ }
+ return Self(Self::CastStr(), expr.c_str());
+}
+
+template<typename Self>
+template<typename T0, typename T1>
+inline Self GrGLSLExpr<Self>::Mul(T0 in0, T1 in1) {
+ if (in0.isZeros() || in1.isZeros()) {
+ return Self(0);
+ }
+ if (in0.isOnes()) {
+ return Self::VectorCast(in1);
+ }
+ if (in1.isOnes()) {
+ return Self::VectorCast(in0);
+ }
+ return Self("(%s * %s)", in0.c_str(), in1.c_str());
+}
+
+template<typename Self>
+template<typename T0, typename T1>
+inline Self GrGLSLExpr<Self>::Add(T0 in0, T1 in1) {
+ if (in1.isZeros()) {
+ return Self::VectorCast(in0);
+ }
+ if (in0.isZeros()) {
+ return Self::VectorCast(in1);
+ }
+ if (in0.isOnes() && in1.isOnes()) {
+ return Self(2);
+ }
+ return Self("(%s + %s)", in0.c_str(), in1.c_str());
+}
+
+template<typename Self>
+template<typename T0, typename T1>
+inline Self GrGLSLExpr<Self>::Sub(T0 in0, T1 in1) {
+ if (in1.isZeros()) {
+ return Self::VectorCast(in0);
+ }
+ if (in1.isOnes()) {
+ if (in0.isOnes()) {
+ return Self(0);
+ }
+ }
+
+ return Self("(%s - %s)", in0.c_str(), in1.c_str());
+}
+
+template <typename Self>
+template <typename T>
+T GrGLSLExpr<Self>::extractComponents(const char format[]) const {
+ if (this->isZeros()) {
+ return T(0);
+ }
+ if (this->isOnes()) {
+ return T(1);
+ }
+ return T(format, this->c_str());
+}
+
+inline GrGLSLExpr1 GrGLSLExpr1::VectorCast(const GrGLSLExpr1& expr) {
+ return expr;
+}
+
+inline const char* GrGLSLExpr1::ZerosStr() {
+ return "0";
+}
+
+inline const char* GrGLSLExpr1::OnesStr() {
+ return "1.0";
+}
+
+// GrGLSLExpr1::CastStr() is unimplemented because using them is likely an
+// error. This is now caught compile-time.
+
+inline const char* GrGLSLExpr1::CastIntStr() {
+ return "%d";
+}
+
+inline GrGLSLExpr1 operator*(const GrGLSLExpr1& in0, const GrGLSLExpr1& in1) {
+ return GrGLSLExpr1::Mul(in0, in1);
+}
+
+inline GrGLSLExpr1 operator+(const GrGLSLExpr1& in0, const GrGLSLExpr1& in1) {
+ return GrGLSLExpr1::Add(in0, in1);
+}
+
+inline GrGLSLExpr1 operator-(const GrGLSLExpr1& in0, const GrGLSLExpr1& in1) {
+ return GrGLSLExpr1::Sub(in0, in1);
+}
+
+inline const char* GrGLSLExpr4::ZerosStr() {
+ return "vec4(0)";
+}
+
+inline const char* GrGLSLExpr4::OnesStr() {
+ return "vec4(1)";
+}
+
+inline const char* GrGLSLExpr4::CastStr() {
+ return "vec4(%s)";
+}
+
+inline const char* GrGLSLExpr4::CastIntStr() {
+ return "vec4(%d)";
+}
+
+inline GrGLSLExpr4 GrGLSLExpr4::VectorCast(const GrGLSLExpr1& expr) {
+ return INHERITED::VectorCastImpl(expr);
+}
+
+inline GrGLSLExpr4 GrGLSLExpr4::VectorCast(const GrGLSLExpr4& expr) {
+ return expr;
+}
+
+inline GrGLSLExpr4::AExpr GrGLSLExpr4::a() const {
+ return this->extractComponents<GrGLSLExpr4::AExpr>("%s.a");
+}
+
+inline GrGLSLExpr4 operator*(const GrGLSLExpr1& in0, const GrGLSLExpr4& in1) {
+ return GrGLSLExpr4::Mul(in0, in1);
+}
+
+inline GrGLSLExpr4 operator+(const GrGLSLExpr1& in0, const GrGLSLExpr4& in1) {
+ return GrGLSLExpr4::Add(in0, in1);
+}
+
+inline GrGLSLExpr4 operator-(const GrGLSLExpr1& in0, const GrGLSLExpr4& in1) {
+ return GrGLSLExpr4::Sub(in0, in1);
+}
+
+inline GrGLSLExpr4 operator*(const GrGLSLExpr4& in0, const GrGLSLExpr1& in1) {
+ return GrGLSLExpr4::Mul(in0, in1);
+}
+
+inline GrGLSLExpr4 operator+(const GrGLSLExpr4& in0, const GrGLSLExpr1& in1) {
+ return GrGLSLExpr4::Add(in0, in1);
+}
+
+inline GrGLSLExpr4 operator-(const GrGLSLExpr4& in0, const GrGLSLExpr1& in1) {
+ return GrGLSLExpr4::Sub(in0, in1);
+}
+
+inline GrGLSLExpr4 operator*(const GrGLSLExpr4& in0, const GrGLSLExpr4& in1) {
+ return GrGLSLExpr4::Mul(in0, in1);
+}
+
+inline GrGLSLExpr4 operator+(const GrGLSLExpr4& in0, const GrGLSLExpr4& in1) {
+ return GrGLSLExpr4::Add(in0, in1);
+}
+
+inline GrGLSLExpr4 operator-(const GrGLSLExpr4& in0, const GrGLSLExpr4& in1) {
+ return GrGLSLExpr4::Sub(in0, in1);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/instanced/GLInstancedRendering.cpp b/gfx/skia/skia/src/gpu/instanced/GLInstancedRendering.cpp
new file mode 100644
index 000000000..dabfe4e02
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/instanced/GLInstancedRendering.cpp
@@ -0,0 +1,324 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GLInstancedRendering.h"
+
+#include "GrResourceProvider.h"
+#include "gl/GrGLGpu.h"
+#include "instanced/InstanceProcessor.h"
+
+#define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
+
+namespace gr_instanced {
+
+class GLInstancedRendering::GLBatch : public InstancedRendering::Batch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ GLBatch(GLInstancedRendering* instRendering) : INHERITED(ClassID(), instRendering) {}
+ int numGLCommands() const { return 1 + fNumChangesInGeometry; }
+
+private:
+ int fEmulatedBaseInstance;
+ int fGLDrawCmdsIdx;
+
+ friend class GLInstancedRendering;
+
+ typedef Batch INHERITED;
+};
+
+GrCaps::InstancedSupport GLInstancedRendering::CheckSupport(const GrGLCaps& glCaps) {
+ // This method is only intended to be used for initializing fInstancedSupport in the caps.
+ SkASSERT(GrCaps::InstancedSupport::kNone == glCaps.instancedSupport());
+ if (!glCaps.vertexArrayObjectSupport() ||
+ (!glCaps.drawIndirectSupport() && !glCaps.drawInstancedSupport())) {
+ return GrCaps::InstancedSupport::kNone;
+ }
+ return InstanceProcessor::CheckSupport(*glCaps.glslCaps(), glCaps);
+}
+
+GLInstancedRendering::GLInstancedRendering(GrGLGpu* gpu)
+ : INHERITED(gpu),
+ fVertexArrayID(0),
+ fGLDrawCmdsInfo(0),
+ fInstanceAttribsBufferUniqueId(SK_InvalidUniqueID) {
+ SkASSERT(GrCaps::InstancedSupport::kNone != this->gpu()->caps()->instancedSupport());
+}
+
+GLInstancedRendering::~GLInstancedRendering() {
+ if (fVertexArrayID) {
+ GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
+ this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
+ }
+}
+
+inline GrGLGpu* GLInstancedRendering::glGpu() const {
+ return static_cast<GrGLGpu*>(this->gpu());
+}
+
+InstancedRendering::Batch* GLInstancedRendering::createBatch() {
+ return new GLBatch(this);
+}
+
+void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) {
+ // Count what there is to draw.
+ BatchList::Iter iter;
+ iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart);
+ int numGLInstances = 0;
+ int numGLDrawCmds = 0;
+ while (Batch* b = iter.get()) {
+ GLBatch* batch = static_cast<GLBatch*>(b);
+ iter.next();
+
+ numGLInstances += batch->fNumDraws;
+ numGLDrawCmds += batch->numGLCommands();
+ }
+ if (!numGLDrawCmds) {
+ return;
+ }
+ SkASSERT(numGLInstances);
+
+ // Lazily create a vertex array object.
+ if (!fVertexArrayID) {
+ GL_CALL(GenVertexArrays(1, &fVertexArrayID));
+ if (!fVertexArrayID) {
+ return;
+ }
+ this->glGpu()->bindVertexArray(fVertexArrayID);
+
+ // Attach our index buffer to the vertex array.
+ SkASSERT(!this->indexBuffer()->isCPUBacked());
+ GL_CALL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER,
+ static_cast<const GrGLBuffer*>(this->indexBuffer())->bufferID()));
+
+ // Set up the non-instanced attribs.
+ this->glGpu()->bindBuffer(kVertex_GrBufferType, this->vertexBuffer());
+ GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeCoords));
+ GL_CALL(VertexAttribPointer((int)Attrib::kShapeCoords, 2, GR_GL_FLOAT, GR_GL_FALSE,
+ sizeof(ShapeVertex), (void*) offsetof(ShapeVertex, fX)));
+ GL_CALL(EnableVertexAttribArray((int)Attrib::kVertexAttrs));
+ GL_CALL(VertexAttribIPointer((int)Attrib::kVertexAttrs, 1, GR_GL_INT, sizeof(ShapeVertex),
+ (void*) offsetof(ShapeVertex, fAttrs)));
+
+ SkASSERT(SK_InvalidUniqueID == fInstanceAttribsBufferUniqueId);
+ }
+
+ // Create and map instance and draw-indirect buffers.
+ SkASSERT(!fInstanceBuffer);
+ fInstanceBuffer.reset(
+ rp->createBuffer(sizeof(Instance) * numGLInstances, kVertex_GrBufferType,
+ kDynamic_GrAccessPattern,
+ GrResourceProvider::kNoPendingIO_Flag |
+ GrResourceProvider::kRequireGpuMemory_Flag));
+ if (!fInstanceBuffer) {
+ return;
+ }
+
+ SkASSERT(!fDrawIndirectBuffer);
+ if (this->glGpu()->glCaps().drawIndirectSupport()) {
+ fDrawIndirectBuffer.reset(
+ rp->createBuffer(sizeof(GrGLDrawElementsIndirectCommand) * numGLDrawCmds,
+ kDrawIndirect_GrBufferType, kDynamic_GrAccessPattern,
+ GrResourceProvider::kNoPendingIO_Flag |
+ GrResourceProvider::kRequireGpuMemory_Flag));
+ if (!fDrawIndirectBuffer) {
+ return;
+ }
+ }
+
+ Instance* glMappedInstances = static_cast<Instance*>(fInstanceBuffer->map());
+ SkASSERT(glMappedInstances);
+ int glInstancesIdx = 0;
+
+ GrGLDrawElementsIndirectCommand* glMappedCmds = nullptr;
+ int glDrawCmdsIdx = 0;
+ if (fDrawIndirectBuffer) {
+ glMappedCmds = static_cast<GrGLDrawElementsIndirectCommand*>(fDrawIndirectBuffer->map());
+ SkASSERT(glMappedCmds);
+ }
+
+ bool baseInstanceSupport = this->glGpu()->glCaps().baseInstanceSupport();
+ SkASSERT(!baseInstanceSupport || fDrawIndirectBuffer);
+
+ SkASSERT(!fGLDrawCmdsInfo);
+ if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) {
+ fGLDrawCmdsInfo.reset(numGLDrawCmds);
+ }
+
+ // Generate the instance and draw-indirect buffer contents based on the tracked batches.
+ iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart);
+ while (Batch* b = iter.get()) {
+ GLBatch* batch = static_cast<GLBatch*>(b);
+ iter.next();
+
+ batch->fEmulatedBaseInstance = baseInstanceSupport ? 0 : glInstancesIdx;
+ batch->fGLDrawCmdsIdx = glDrawCmdsIdx;
+
+ const Batch::Draw* draw = batch->fHeadDraw;
+ SkASSERT(draw);
+ do {
+ int instanceCount = 0;
+ IndexRange geometry = draw->fGeometry;
+ SkASSERT(!geometry.isEmpty());
+
+ do {
+ glMappedInstances[glInstancesIdx + instanceCount++] = draw->fInstance;
+ draw = draw->fNext;
+ } while (draw && draw->fGeometry == geometry);
+
+ if (fDrawIndirectBuffer) {
+ GrGLDrawElementsIndirectCommand& glCmd = glMappedCmds[glDrawCmdsIdx];
+ glCmd.fCount = geometry.fCount;
+ glCmd.fInstanceCount = instanceCount;
+ glCmd.fFirstIndex = geometry.fStart;
+ glCmd.fBaseVertex = 0;
+ glCmd.fBaseInstance = baseInstanceSupport ? glInstancesIdx : 0;
+ }
+
+ if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) {
+ GLDrawCmdInfo& cmdInfo = fGLDrawCmdsInfo[glDrawCmdsIdx];
+ cmdInfo.fGeometry = geometry;
+ cmdInfo.fInstanceCount = instanceCount;
+ }
+
+ glInstancesIdx += instanceCount;
+ ++glDrawCmdsIdx;
+ } while (draw);
+ }
+
+ SkASSERT(glDrawCmdsIdx == numGLDrawCmds);
+ if (fDrawIndirectBuffer) {
+ fDrawIndirectBuffer->unmap();
+ }
+
+ SkASSERT(glInstancesIdx == numGLInstances);
+ fInstanceBuffer->unmap();
+}
+
+void GLInstancedRendering::onDraw(const GrPipeline& pipeline, const InstanceProcessor& instProc,
+ const Batch* baseBatch) {
+ if (!fDrawIndirectBuffer && !fGLDrawCmdsInfo) {
+ return; // beginFlush was not successful.
+ }
+ if (!this->glGpu()->flushGLState(pipeline, instProc, false)) {
+ return;
+ }
+
+ if (fDrawIndirectBuffer) {
+ this->glGpu()->bindBuffer(kDrawIndirect_GrBufferType, fDrawIndirectBuffer.get());
+ }
+
+ const GrGLCaps& glCaps = this->glGpu()->glCaps();
+ const GLBatch* batch = static_cast<const GLBatch*>(baseBatch);
+ int numCommands = batch->numGLCommands();
+
+#if GR_GL_LOG_INSTANCED_BATCHES
+ SkASSERT(fGLDrawCmdsInfo);
+ SkDebugf("Instanced batch: [");
+ for (int i = 0; i < numCommands; ++i) {
+ int glCmdIdx = batch->fGLDrawCmdsIdx + i;
+ SkDebugf("%s%i * %s", (i ? ", " : ""), fGLDrawCmdsInfo[glCmdIdx].fInstanceCount,
+ InstanceProcessor::GetNameOfIndexRange(fGLDrawCmdsInfo[glCmdIdx].fGeometry));
+ }
+ SkDebugf("]\n");
+#else
+ SkASSERT(SkToBool(fGLDrawCmdsInfo) == !glCaps.baseInstanceSupport());
+#endif
+
+ if (numCommands > 1 && glCaps.multiDrawIndirectSupport() && glCaps.baseInstanceSupport()) {
+ SkASSERT(fDrawIndirectBuffer);
+ int glCmdsIdx = batch->fGLDrawCmdsIdx;
+ this->flushInstanceAttribs(batch->fEmulatedBaseInstance);
+ GL_CALL(MultiDrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
+ (GrGLDrawElementsIndirectCommand*) nullptr + glCmdsIdx,
+ numCommands, 0));
+ return;
+ }
+
+ int emulatedBaseInstance = batch->fEmulatedBaseInstance;
+ for (int i = 0; i < numCommands; ++i) {
+ int glCmdIdx = batch->fGLDrawCmdsIdx + i;
+ this->flushInstanceAttribs(emulatedBaseInstance);
+ if (fDrawIndirectBuffer) {
+ GL_CALL(DrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
+ (GrGLDrawElementsIndirectCommand*) nullptr + glCmdIdx));
+ } else {
+ const GLDrawCmdInfo& cmdInfo = fGLDrawCmdsInfo[glCmdIdx];
+ GL_CALL(DrawElementsInstanced(GR_GL_TRIANGLES, cmdInfo.fGeometry.fCount,
+ GR_GL_UNSIGNED_BYTE,
+ (GrGLubyte*) nullptr + cmdInfo.fGeometry.fStart,
+ cmdInfo.fInstanceCount));
+ }
+ if (!glCaps.baseInstanceSupport()) {
+ const GLDrawCmdInfo& cmdInfo = fGLDrawCmdsInfo[glCmdIdx];
+ emulatedBaseInstance += cmdInfo.fInstanceCount;
+ }
+ }
+}
+
+void GLInstancedRendering::flushInstanceAttribs(int baseInstance) {
+ SkASSERT(fVertexArrayID);
+ this->glGpu()->bindVertexArray(fVertexArrayID);
+
+ SkASSERT(fInstanceBuffer);
+ if (fInstanceAttribsBufferUniqueId != fInstanceBuffer->uniqueID() ||
+ fInstanceAttribsBaseInstance != baseInstance) {
+ Instance* offsetInBuffer = (Instance*) nullptr + baseInstance;
+
+ this->glGpu()->bindBuffer(kVertex_GrBufferType, fInstanceBuffer.get());
+
+ // Info attrib.
+ GL_CALL(EnableVertexAttribArray((int)Attrib::kInstanceInfo));
+ GL_CALL(VertexAttribIPointer((int)Attrib::kInstanceInfo, 1, GR_GL_UNSIGNED_INT,
+ sizeof(Instance), &offsetInBuffer->fInfo));
+ GL_CALL(VertexAttribDivisor((int)Attrib::kInstanceInfo, 1));
+
+ // Shape matrix attrib.
+ GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeMatrixX));
+ GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeMatrixY));
+ GL_CALL(VertexAttribPointer((int)Attrib::kShapeMatrixX, 3, GR_GL_FLOAT, GR_GL_FALSE,
+ sizeof(Instance), &offsetInBuffer->fShapeMatrix2x3[0]));
+ GL_CALL(VertexAttribPointer((int)Attrib::kShapeMatrixY, 3, GR_GL_FLOAT, GR_GL_FALSE,
+ sizeof(Instance), &offsetInBuffer->fShapeMatrix2x3[3]));
+ GL_CALL(VertexAttribDivisor((int)Attrib::kShapeMatrixX, 1));
+ GL_CALL(VertexAttribDivisor((int)Attrib::kShapeMatrixY, 1));
+
+ // Color attrib.
+ GL_CALL(EnableVertexAttribArray((int)Attrib::kColor));
+ GL_CALL(VertexAttribPointer((int)Attrib::kColor, 4, GR_GL_UNSIGNED_BYTE, GR_GL_TRUE,
+ sizeof(Instance), &offsetInBuffer->fColor));
+ GL_CALL(VertexAttribDivisor((int)Attrib::kColor, 1));
+
+ // Local rect attrib.
+ GL_CALL(EnableVertexAttribArray((int)Attrib::kLocalRect));
+ GL_CALL(VertexAttribPointer((int)Attrib::kLocalRect, 4, GR_GL_FLOAT, GR_GL_FALSE,
+ sizeof(Instance), &offsetInBuffer->fLocalRect));
+ GL_CALL(VertexAttribDivisor((int)Attrib::kLocalRect, 1));
+
+ fInstanceAttribsBufferUniqueId = fInstanceBuffer->uniqueID();
+ fInstanceAttribsBaseInstance = baseInstance;
+ }
+}
+
+void GLInstancedRendering::onEndFlush() {
+ fInstanceBuffer.reset();
+ fDrawIndirectBuffer.reset();
+ fGLDrawCmdsInfo.reset(0);
+}
+
+void GLInstancedRendering::onResetGpuResources(ResetType resetType) {
+ if (fVertexArrayID && ResetType::kDestroy == resetType) {
+ GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
+ this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
+ }
+ fVertexArrayID = 0;
+ fInstanceBuffer.reset();
+ fDrawIndirectBuffer.reset();
+ fInstanceAttribsBufferUniqueId = SK_InvalidUniqueID;
+}
+
+}
diff --git a/gfx/skia/skia/src/gpu/instanced/GLInstancedRendering.h b/gfx/skia/skia/src/gpu/instanced/GLInstancedRendering.h
new file mode 100644
index 000000000..ce1638c7f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/instanced/GLInstancedRendering.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef gr_instanced_GLInstancedRendering_DEFINED
+#define gr_instanced_GLInstancedRendering_DEFINED
+
+#include "GrCaps.h"
+#include "gl/GrGLBuffer.h"
+#include "instanced/InstancedRendering.h"
+
+class GrGLCaps;
+class GrGLGpu;
+
+#define GR_GL_LOG_INSTANCED_BATCHES 0
+
+namespace gr_instanced {
+
+class GLInstancedRendering final : public InstancedRendering {
+public:
+ GLInstancedRendering(GrGLGpu*);
+ ~GLInstancedRendering() override;
+
+private:
+ /**
+ * Called by GrGLCaps to determine the level of support this class can offer for instanced
+ * rendering on the current platform.
+ */
+ static GrCaps::InstancedSupport CheckSupport(const GrGLCaps&);
+
+ GrGLGpu* glGpu() const;
+
+ Batch* createBatch() override;
+
+ void onBeginFlush(GrResourceProvider*) override;
+ void onDraw(const GrPipeline&, const InstanceProcessor&, const Batch*) override;
+ void onEndFlush() override;
+ void onResetGpuResources(ResetType) override;
+
+ void flushInstanceAttribs(int baseInstance);
+
+ struct GLDrawCmdInfo {
+ IndexRange fGeometry;
+ int fInstanceCount;
+ };
+
+ GrGLuint fVertexArrayID;
+ SkAutoTUnref<GrBuffer> fInstanceBuffer;
+ SkAutoTUnref<GrBuffer> fDrawIndirectBuffer;
+ SkAutoSTMalloc<1024, GLDrawCmdInfo> fGLDrawCmdsInfo;
+ uint32_t fInstanceAttribsBufferUniqueId;
+ int fInstanceAttribsBaseInstance;
+
+ class GLBatch;
+
+ friend class ::GrGLCaps; // For CheckSupport.
+
+ typedef InstancedRendering INHERITED;
+};
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/instanced/InstanceProcessor.cpp b/gfx/skia/skia/src/gpu/instanced/InstanceProcessor.cpp
new file mode 100644
index 000000000..480155b68
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/instanced/InstanceProcessor.cpp
@@ -0,0 +1,2123 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "InstanceProcessor.h"
+
+#include "GrContext.h"
+#include "GrRenderTargetPriv.h"
+#include "GrResourceCache.h"
+#include "GrResourceProvider.h"
+#include "glsl/GrGLSLGeometryProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLProgramBuilder.h"
+#include "glsl/GrGLSLVarying.h"
+
+namespace gr_instanced {
+
+GrCaps::InstancedSupport InstanceProcessor::CheckSupport(const GrGLSLCaps& glslCaps,
+ const GrCaps& caps) {
+ if (!glslCaps.canUseAnyFunctionInShader() ||
+ !glslCaps.flatInterpolationSupport() ||
+ !glslCaps.integerSupport() ||
+ 0 == glslCaps.maxVertexSamplers() ||
+ !caps.shaderCaps()->texelBufferSupport() ||
+ caps.maxVertexAttributes() < kNumAttribs) {
+ return GrCaps::InstancedSupport::kNone;
+ }
+ if (!caps.sampleLocationsSupport() ||
+ !glslCaps.sampleVariablesSupport() ||
+ !glslCaps.shaderDerivativeSupport()) {
+ return GrCaps::InstancedSupport::kBasic;
+ }
+ if (0 == caps.maxRasterSamples() ||
+ !glslCaps.sampleMaskOverrideCoverageSupport()) {
+ return GrCaps::InstancedSupport::kMultisampled;
+ }
+ return GrCaps::InstancedSupport::kMixedSampled;
+}
+
+InstanceProcessor::InstanceProcessor(BatchInfo batchInfo, GrBuffer* paramsBuffer)
+ : fBatchInfo(batchInfo) {
+ this->initClassID<InstanceProcessor>();
+
+ this->addVertexAttrib("shapeCoords", kVec2f_GrVertexAttribType, kHigh_GrSLPrecision);
+ this->addVertexAttrib("vertexAttrs", kInt_GrVertexAttribType);
+ this->addVertexAttrib("instanceInfo", kUint_GrVertexAttribType);
+ this->addVertexAttrib("shapeMatrixX", kVec3f_GrVertexAttribType, kHigh_GrSLPrecision);
+ this->addVertexAttrib("shapeMatrixY", kVec3f_GrVertexAttribType, kHigh_GrSLPrecision);
+ this->addVertexAttrib("color", kVec4f_GrVertexAttribType, kLow_GrSLPrecision);
+ this->addVertexAttrib("localRect", kVec4f_GrVertexAttribType, kHigh_GrSLPrecision);
+
+ GR_STATIC_ASSERT(0 == (int)Attrib::kShapeCoords);
+ GR_STATIC_ASSERT(1 == (int)Attrib::kVertexAttrs);
+ GR_STATIC_ASSERT(2 == (int)Attrib::kInstanceInfo);
+ GR_STATIC_ASSERT(3 == (int)Attrib::kShapeMatrixX);
+ GR_STATIC_ASSERT(4 == (int)Attrib::kShapeMatrixY);
+ GR_STATIC_ASSERT(5 == (int)Attrib::kColor);
+ GR_STATIC_ASSERT(6 == (int)Attrib::kLocalRect);
+ GR_STATIC_ASSERT(7 == kNumAttribs);
+
+ if (fBatchInfo.fHasParams) {
+ SkASSERT(paramsBuffer);
+ fParamsAccess.reset(kRGBA_float_GrPixelConfig, paramsBuffer, kVertex_GrShaderFlag);
+ this->addBufferAccess(&fParamsAccess);
+ }
+
+ if (fBatchInfo.fAntialiasMode >= AntialiasMode::kMSAA) {
+ if (!fBatchInfo.isSimpleRects() ||
+ AntialiasMode::kMixedSamples == fBatchInfo.fAntialiasMode) {
+ this->setWillUseSampleLocations();
+ }
+ }
+}
+
+class GLSLInstanceProcessor : public GrGLSLGeometryProcessor {
+public:
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override;
+
+private:
+ void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor&,
+ FPCoordTransformIter&& transformIter) override {
+ this->setTransformDataHelper(SkMatrix::I(), pdman, &transformIter);
+ }
+
+ class VertexInputs;
+ class Backend;
+ class BackendNonAA;
+ class BackendCoverage;
+ class BackendMultisample;
+
+ typedef GrGLSLGeometryProcessor INHERITED;
+};
+
+GrGLSLPrimitiveProcessor* InstanceProcessor::createGLSLInstance(const GrGLSLCaps&) const {
+ return new GLSLInstanceProcessor();
+}
+
+class GLSLInstanceProcessor::VertexInputs {
+public:
+ VertexInputs(const InstanceProcessor& instProc, GrGLSLVertexBuilder* vertexBuilder)
+ : fInstProc(instProc),
+ fVertexBuilder(vertexBuilder) {
+ }
+
+ void initParams(const SamplerHandle paramsBuffer) {
+ fParamsBuffer = paramsBuffer;
+ fVertexBuilder->definef("PARAMS_IDX_MASK", "0x%xu", kParamsIdx_InfoMask);
+ fVertexBuilder->appendPrecisionModifier(kHigh_GrSLPrecision);
+ fVertexBuilder->codeAppendf("int paramsIdx = int(%s & PARAMS_IDX_MASK);",
+ this->attr(Attrib::kInstanceInfo));
+ }
+
+ const char* attr(Attrib attr) const { return fInstProc.getAttrib((int)attr).fName; }
+
+ void fetchNextParam(GrSLType type = kVec4f_GrSLType) const {
+ SkASSERT(fParamsBuffer.isValid());
+ if (type != kVec4f_GrSLType) {
+ fVertexBuilder->codeAppendf("%s(", GrGLSLTypeString(type));
+ }
+ fVertexBuilder->appendTexelFetch(fParamsBuffer, "paramsIdx++");
+ if (type != kVec4f_GrSLType) {
+ fVertexBuilder->codeAppend(")");
+ }
+ }
+
+ void skipParams(unsigned n) const {
+ SkASSERT(fParamsBuffer.isValid());
+ fVertexBuilder->codeAppendf("paramsIdx += %u;", n);
+ }
+
+private:
+ const InstanceProcessor& fInstProc;
+ GrGLSLVertexBuilder* fVertexBuilder;
+ SamplerHandle fParamsBuffer;
+};
+
+class GLSLInstanceProcessor::Backend {
+public:
+ static Backend* SK_WARN_UNUSED_RESULT Create(const GrPipeline&, BatchInfo, const VertexInputs&);
+ virtual ~Backend() {}
+
+ void init(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*);
+ virtual void setupRect(GrGLSLVertexBuilder*) = 0;
+ virtual void setupOval(GrGLSLVertexBuilder*) = 0;
+ void setupRRect(GrGLSLVertexBuilder*, int* usedShapeDefinitions);
+
+ void initInnerShape(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*);
+ virtual void setupInnerRect(GrGLSLVertexBuilder*) = 0;
+ virtual void setupInnerOval(GrGLSLVertexBuilder*) = 0;
+ void setupInnerSimpleRRect(GrGLSLVertexBuilder*);
+
+ const char* outShapeCoords() {
+ return fModifiedShapeCoords ? fModifiedShapeCoords : fInputs.attr(Attrib::kShapeCoords);
+ }
+
+ void emitCode(GrGLSLVertexBuilder*, GrGLSLPPFragmentBuilder*, const char* outCoverage,
+ const char* outColor);
+
+protected:
+ Backend(BatchInfo batchInfo, const VertexInputs& inputs)
+ : fBatchInfo(batchInfo),
+ fInputs(inputs),
+ fModifiesCoverage(false),
+ fModifiesColor(false),
+ fNeedsNeighborRadii(false),
+ fColor(kVec4f_GrSLType),
+ fTriangleIsArc(kInt_GrSLType),
+ fArcCoords(kVec2f_GrSLType),
+ fInnerShapeCoords(kVec2f_GrSLType),
+ fInnerRRect(kVec4f_GrSLType),
+ fModifiedShapeCoords(nullptr) {
+ if (fBatchInfo.fShapeTypes & kRRect_ShapesMask) {
+ fModifiedShapeCoords = "adjustedShapeCoords";
+ }
+ }
+
+ virtual void onInit(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) = 0;
+ virtual void adjustRRectVertices(GrGLSLVertexBuilder*);
+ virtual void onSetupRRect(GrGLSLVertexBuilder*) {}
+
+ virtual void onInitInnerShape(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) = 0;
+ virtual void onSetupInnerSimpleRRect(GrGLSLVertexBuilder*) = 0;
+
+ virtual void onEmitCode(GrGLSLVertexBuilder*, GrGLSLPPFragmentBuilder*,
+ const char* outCoverage, const char* outColor) = 0;
+
+ void setupSimpleRadii(GrGLSLVertexBuilder*);
+ void setupNinePatchRadii(GrGLSLVertexBuilder*);
+ void setupComplexRadii(GrGLSLVertexBuilder*);
+
+ const BatchInfo fBatchInfo;
+ const VertexInputs& fInputs;
+ bool fModifiesCoverage;
+ bool fModifiesColor;
+ bool fNeedsNeighborRadii;
+ GrGLSLVertToFrag fColor;
+ GrGLSLVertToFrag fTriangleIsArc;
+ GrGLSLVertToFrag fArcCoords;
+ GrGLSLVertToFrag fInnerShapeCoords;
+ GrGLSLVertToFrag fInnerRRect;
+ const char* fModifiedShapeCoords;
+};
+
+void GLSLInstanceProcessor::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) {
+ const GrPipeline& pipeline = args.fVertBuilder->getProgramBuilder()->pipeline();
+ const InstanceProcessor& ip = args.fGP.cast<InstanceProcessor>();
+ GrGLSLUniformHandler* uniHandler = args.fUniformHandler;
+ GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
+ GrGLSLVertexBuilder* v = args.fVertBuilder;
+ GrGLSLPPFragmentBuilder* f = args.fFragBuilder;
+
+ varyingHandler->emitAttributes(ip);
+
+ VertexInputs inputs(ip, v);
+ if (ip.batchInfo().fHasParams) {
+ SkASSERT(1 == ip.numBuffers());
+ inputs.initParams(args.fBufferSamplers[0]);
+ }
+
+ if (!ip.batchInfo().fHasPerspective) {
+ v->codeAppendf("mat2x3 shapeMatrix = mat2x3(%s, %s);",
+ inputs.attr(Attrib::kShapeMatrixX), inputs.attr(Attrib::kShapeMatrixY));
+ } else {
+ v->definef("PERSPECTIVE_FLAG", "0x%xu", kPerspective_InfoFlag);
+ v->codeAppendf("mat3 shapeMatrix = mat3(%s, %s, vec3(0, 0, 1));",
+ inputs.attr(Attrib::kShapeMatrixX), inputs.attr(Attrib::kShapeMatrixY));
+ v->codeAppendf("if (0u != (%s & PERSPECTIVE_FLAG)) {",
+ inputs.attr(Attrib::kInstanceInfo));
+ v->codeAppend ( "shapeMatrix[2] = ");
+ inputs.fetchNextParam(kVec3f_GrSLType);
+ v->codeAppend ( ";");
+ v->codeAppend ("}");
+ }
+
+ bool hasSingleShapeType = SkIsPow2(ip.batchInfo().fShapeTypes);
+ if (!hasSingleShapeType) {
+ v->define("SHAPE_TYPE_BIT", kShapeType_InfoBit);
+ v->codeAppendf("uint shapeType = %s >> SHAPE_TYPE_BIT;",
+ inputs.attr(Attrib::kInstanceInfo));
+ }
+
+ SkAutoTDelete<Backend> backend(Backend::Create(pipeline, ip.batchInfo(), inputs));
+ backend->init(varyingHandler, v);
+
+ int usedShapeDefinitions = 0;
+
+ if (hasSingleShapeType || !(ip.batchInfo().fShapeTypes & ~kRRect_ShapesMask)) {
+ if (kRect_ShapeFlag == ip.batchInfo().fShapeTypes) {
+ backend->setupRect(v);
+ } else if (kOval_ShapeFlag == ip.batchInfo().fShapeTypes) {
+ backend->setupOval(v);
+ } else {
+ backend->setupRRect(v, &usedShapeDefinitions);
+ }
+ } else {
+ if (ip.batchInfo().fShapeTypes & kRRect_ShapesMask) {
+ v->codeAppend ("if (shapeType >= SIMPLE_R_RECT_SHAPE_TYPE) {");
+ backend->setupRRect(v, &usedShapeDefinitions);
+ v->codeAppend ("}");
+ usedShapeDefinitions |= kSimpleRRect_ShapeFlag;
+ }
+ if (ip.batchInfo().fShapeTypes & kOval_ShapeFlag) {
+ if (ip.batchInfo().fShapeTypes & kRect_ShapeFlag) {
+ if (ip.batchInfo().fShapeTypes & kRRect_ShapesMask) {
+ v->codeAppend ("else ");
+ }
+ v->codeAppend ("if (OVAL_SHAPE_TYPE == shapeType) {");
+ usedShapeDefinitions |= kOval_ShapeFlag;
+ } else {
+ v->codeAppend ("else {");
+ }
+ backend->setupOval(v);
+ v->codeAppend ("}");
+ }
+ if (ip.batchInfo().fShapeTypes & kRect_ShapeFlag) {
+ v->codeAppend ("else {");
+ backend->setupRect(v);
+ v->codeAppend ("}");
+ }
+ }
+
+ if (ip.batchInfo().fInnerShapeTypes) {
+ bool hasSingleInnerShapeType = SkIsPow2(ip.batchInfo().fInnerShapeTypes);
+ if (!hasSingleInnerShapeType) {
+ v->definef("INNER_SHAPE_TYPE_MASK", "0x%xu", kInnerShapeType_InfoMask);
+ v->define("INNER_SHAPE_TYPE_BIT", kInnerShapeType_InfoBit);
+ v->codeAppendf("uint innerShapeType = ((%s & INNER_SHAPE_TYPE_MASK) >> "
+ "INNER_SHAPE_TYPE_BIT);",
+ inputs.attr(Attrib::kInstanceInfo));
+ }
+ // Here we take advantage of the fact that outerRect == localRect in recordDRRect.
+ v->codeAppendf("vec4 outer = %s;", inputs.attr(Attrib::kLocalRect));
+ v->codeAppend ("vec4 inner = ");
+ inputs.fetchNextParam();
+ v->codeAppend (";");
+ // outer2Inner is a transform from shape coords to inner shape coords:
+ // e.g. innerShapeCoords = shapeCoords * outer2Inner.xy + outer2Inner.zw
+ v->codeAppend ("vec4 outer2Inner = vec4(outer.zw - outer.xy, "
+ "outer.xy + outer.zw - inner.xy - inner.zw) / "
+ "(inner.zw - inner.xy).xyxy;");
+ v->codeAppendf("vec2 innerShapeCoords = %s * outer2Inner.xy + outer2Inner.zw;",
+ backend->outShapeCoords());
+
+ backend->initInnerShape(varyingHandler, v);
+
+ SkASSERT(0 == (ip.batchInfo().fInnerShapeTypes & kRRect_ShapesMask) ||
+ kSimpleRRect_ShapeFlag == (ip.batchInfo().fInnerShapeTypes & kRRect_ShapesMask));
+
+ if (hasSingleInnerShapeType) {
+ if (kRect_ShapeFlag == ip.batchInfo().fInnerShapeTypes) {
+ backend->setupInnerRect(v);
+ } else if (kOval_ShapeFlag == ip.batchInfo().fInnerShapeTypes) {
+ backend->setupInnerOval(v);
+ } else {
+ backend->setupInnerSimpleRRect(v);
+ }
+ } else {
+ if (ip.batchInfo().fInnerShapeTypes & kSimpleRRect_ShapeFlag) {
+ v->codeAppend ("if (SIMPLE_R_RECT_SHAPE_TYPE == innerShapeType) {");
+ backend->setupInnerSimpleRRect(v);
+ v->codeAppend("}");
+ usedShapeDefinitions |= kSimpleRRect_ShapeFlag;
+ }
+ if (ip.batchInfo().fInnerShapeTypes & kOval_ShapeFlag) {
+ if (ip.batchInfo().fInnerShapeTypes & kRect_ShapeFlag) {
+ if (ip.batchInfo().fInnerShapeTypes & kSimpleRRect_ShapeFlag) {
+ v->codeAppend ("else ");
+ }
+ v->codeAppend ("if (OVAL_SHAPE_TYPE == innerShapeType) {");
+ usedShapeDefinitions |= kOval_ShapeFlag;
+ } else {
+ v->codeAppend ("else {");
+ }
+ backend->setupInnerOval(v);
+ v->codeAppend("}");
+ }
+ if (ip.batchInfo().fInnerShapeTypes & kRect_ShapeFlag) {
+ v->codeAppend("else {");
+ backend->setupInnerRect(v);
+ v->codeAppend("}");
+ }
+ }
+ }
+
+ if (usedShapeDefinitions & kOval_ShapeFlag) {
+ v->definef("OVAL_SHAPE_TYPE", "%du", (int)ShapeType::kOval);
+ }
+ if (usedShapeDefinitions & kSimpleRRect_ShapeFlag) {
+ v->definef("SIMPLE_R_RECT_SHAPE_TYPE", "%du", (int)ShapeType::kSimpleRRect);
+ }
+ if (usedShapeDefinitions & kNinePatch_ShapeFlag) {
+ v->definef("NINE_PATCH_SHAPE_TYPE", "%du", (int)ShapeType::kNinePatch);
+ }
+ SkASSERT(!(usedShapeDefinitions & (kRect_ShapeFlag | kComplexRRect_ShapeFlag)));
+
+ backend->emitCode(v, f, pipeline.ignoresCoverage() ? nullptr : args.fOutputCoverage,
+ args.fOutputColor);
+
+ const char* localCoords = nullptr;
+ if (ip.batchInfo().fUsesLocalCoords) {
+ localCoords = "localCoords";
+ v->codeAppendf("vec2 t = 0.5 * (%s + vec2(1));", backend->outShapeCoords());
+ v->codeAppendf("vec2 localCoords = (1.0 - t) * %s.xy + t * %s.zw;",
+ inputs.attr(Attrib::kLocalRect), inputs.attr(Attrib::kLocalRect));
+ }
+ if (ip.batchInfo().fHasLocalMatrix && ip.batchInfo().fHasParams) {
+ v->definef("LOCAL_MATRIX_FLAG", "0x%xu", kLocalMatrix_InfoFlag);
+ v->codeAppendf("if (0u != (%s & LOCAL_MATRIX_FLAG)) {",
+ inputs.attr(Attrib::kInstanceInfo));
+ if (!ip.batchInfo().fUsesLocalCoords) {
+ inputs.skipParams(2);
+ } else {
+ v->codeAppendf( "mat2x3 localMatrix;");
+ v->codeAppend ( "localMatrix[0] = ");
+ inputs.fetchNextParam(kVec3f_GrSLType);
+ v->codeAppend ( ";");
+ v->codeAppend ( "localMatrix[1] = ");
+ inputs.fetchNextParam(kVec3f_GrSLType);
+ v->codeAppend ( ";");
+ v->codeAppend ( "localCoords = (vec3(localCoords, 1) * localMatrix).xy;");
+ }
+ v->codeAppend("}");
+ }
+
+ GrSLType positionType = ip.batchInfo().fHasPerspective ? kVec3f_GrSLType : kVec2f_GrSLType;
+ v->codeAppendf("%s deviceCoords = vec3(%s, 1) * shapeMatrix;",
+ GrGLSLTypeString(positionType), backend->outShapeCoords());
+ gpArgs->fPositionVar.set(positionType, "deviceCoords");
+
+ this->emitTransforms(v, varyingHandler, uniHandler, gpArgs->fPositionVar, localCoords,
+ args.fFPCoordTransformHandler);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void GLSLInstanceProcessor::Backend::init(GrGLSLVaryingHandler* varyingHandler,
+ GrGLSLVertexBuilder* v) {
+ if (fModifiedShapeCoords) {
+ v->codeAppendf("vec2 %s = %s;", fModifiedShapeCoords, fInputs.attr(Attrib::kShapeCoords));
+ }
+
+ this->onInit(varyingHandler, v);
+
+ if (!fColor.vsOut()) {
+ varyingHandler->addFlatVarying("color", &fColor, kLow_GrSLPrecision);
+ v->codeAppendf("%s = %s;", fColor.vsOut(), fInputs.attr(Attrib::kColor));
+ }
+}
+
+void GLSLInstanceProcessor::Backend::setupRRect(GrGLSLVertexBuilder* v, int* usedShapeDefinitions) {
+ v->codeAppendf("uvec2 corner = uvec2(%s & 1, (%s >> 1) & 1);",
+ fInputs.attr(Attrib::kVertexAttrs), fInputs.attr(Attrib::kVertexAttrs));
+ v->codeAppend ("vec2 cornerSign = vec2(corner) * 2.0 - 1.0;");
+ v->codeAppendf("vec2 radii%s;", fNeedsNeighborRadii ? ", neighborRadii" : "");
+ v->codeAppend ("mat2 p = ");
+ fInputs.fetchNextParam(kMat22f_GrSLType);
+ v->codeAppend (";");
+ uint8_t types = fBatchInfo.fShapeTypes & kRRect_ShapesMask;
+ if (0 == (types & (types - 1))) {
+ if (kSimpleRRect_ShapeFlag == types) {
+ this->setupSimpleRadii(v);
+ } else if (kNinePatch_ShapeFlag == types) {
+ this->setupNinePatchRadii(v);
+ } else if (kComplexRRect_ShapeFlag == types) {
+ this->setupComplexRadii(v);
+ }
+ } else {
+ if (types & kSimpleRRect_ShapeFlag) {
+ v->codeAppend ("if (SIMPLE_R_RECT_SHAPE_TYPE == shapeType) {");
+ this->setupSimpleRadii(v);
+ v->codeAppend ("}");
+ *usedShapeDefinitions |= kSimpleRRect_ShapeFlag;
+ }
+ if (types & kNinePatch_ShapeFlag) {
+ if (types & kComplexRRect_ShapeFlag) {
+ if (types & kSimpleRRect_ShapeFlag) {
+ v->codeAppend ("else ");
+ }
+ v->codeAppend ("if (NINE_PATCH_SHAPE_TYPE == shapeType) {");
+ *usedShapeDefinitions |= kNinePatch_ShapeFlag;
+ } else {
+ v->codeAppend ("else {");
+ }
+ this->setupNinePatchRadii(v);
+ v->codeAppend ("}");
+ }
+ if (types & kComplexRRect_ShapeFlag) {
+ v->codeAppend ("else {");
+ this->setupComplexRadii(v);
+ v->codeAppend ("}");
+ }
+ }
+
+ this->adjustRRectVertices(v);
+
+ if (fArcCoords.vsOut()) {
+ v->codeAppendf("%s = (cornerSign * %s + radii - vec2(1)) / radii;",
+ fArcCoords.vsOut(), fModifiedShapeCoords);
+ }
+ if (fTriangleIsArc.vsOut()) {
+ v->codeAppendf("%s = int(all(equal(vec2(1), abs(%s))));",
+ fTriangleIsArc.vsOut(), fInputs.attr(Attrib::kShapeCoords));
+ }
+
+ this->onSetupRRect(v);
+}
+
+void GLSLInstanceProcessor::Backend::setupSimpleRadii(GrGLSLVertexBuilder* v) {
+ if (fNeedsNeighborRadii) {
+ v->codeAppend ("neighborRadii = ");
+ }
+ v->codeAppend("radii = p[0] * 2.0 / p[1];");
+}
+
+void GLSLInstanceProcessor::Backend::setupNinePatchRadii(GrGLSLVertexBuilder* v) {
+ v->codeAppend("radii = vec2(p[0][corner.x], p[1][corner.y]);");
+ if (fNeedsNeighborRadii) {
+ v->codeAppend("neighborRadii = vec2(p[0][1u - corner.x], p[1][1u - corner.y]);");
+ }
+}
+
+void GLSLInstanceProcessor::Backend::setupComplexRadii(GrGLSLVertexBuilder* v) {
+ /**
+ * The x and y radii of each arc are stored in separate vectors,
+ * in the following order:
+ *
+ * __x1 _ _ _ x3__
+ *
+ * y1 | | y2
+ *
+ * | |
+ *
+ * y3 |__ _ _ _ __| y4
+ * x2 x4
+ *
+ */
+ v->codeAppend("mat2 p2 = ");
+ fInputs.fetchNextParam(kMat22f_GrSLType);
+ v->codeAppend(";");
+ v->codeAppend("radii = vec2(p[corner.x][corner.y], p2[corner.y][corner.x]);");
+ if (fNeedsNeighborRadii) {
+ v->codeAppend("neighborRadii = vec2(p[1u - corner.x][corner.y], "
+ "p2[1u - corner.y][corner.x]);");
+ }
+}
+
+void GLSLInstanceProcessor::Backend::adjustRRectVertices(GrGLSLVertexBuilder* v) {
+ // Resize the 4 triangles that arcs are drawn into so they match their corresponding radii.
+ // 0.5 is a special value that indicates the edge of an arc triangle.
+ v->codeAppendf("if (abs(%s.x) == 0.5)"
+ "%s.x = cornerSign.x * (1.0 - radii.x);",
+ fInputs.attr(Attrib::kShapeCoords), fModifiedShapeCoords);
+ v->codeAppendf("if (abs(%s.y) == 0.5) "
+ "%s.y = cornerSign.y * (1.0 - radii.y);",
+ fInputs.attr(Attrib::kShapeCoords), fModifiedShapeCoords);
+}
+
+void GLSLInstanceProcessor::Backend::initInnerShape(GrGLSLVaryingHandler* varyingHandler,
+ GrGLSLVertexBuilder* v) {
+ SkASSERT(!(fBatchInfo.fInnerShapeTypes & (kNinePatch_ShapeFlag | kComplexRRect_ShapeFlag)));
+
+ this->onInitInnerShape(varyingHandler, v);
+
+ if (fInnerShapeCoords.vsOut()) {
+ v->codeAppendf("%s = innerShapeCoords;", fInnerShapeCoords.vsOut());
+ }
+}
+
+void GLSLInstanceProcessor::Backend::setupInnerSimpleRRect(GrGLSLVertexBuilder* v) {
+ v->codeAppend("mat2 innerP = ");
+ fInputs.fetchNextParam(kMat22f_GrSLType);
+ v->codeAppend(";");
+ v->codeAppend("vec2 innerRadii = innerP[0] * 2.0 / innerP[1];");
+ this->onSetupInnerSimpleRRect(v);
+}
+
+void GLSLInstanceProcessor::Backend::emitCode(GrGLSLVertexBuilder* v, GrGLSLPPFragmentBuilder* f,
+ const char* outCoverage, const char* outColor) {
+ SkASSERT(!fModifiesCoverage || outCoverage);
+ this->onEmitCode(v, f, fModifiesCoverage ? outCoverage : nullptr,
+ fModifiesColor ? outColor : nullptr);
+ if (outCoverage && !fModifiesCoverage) {
+ // Even though the subclass doesn't use coverage, we are expected to assign some value.
+ f->codeAppendf("%s = vec4(1);", outCoverage);
+ }
+ if (!fModifiesColor) {
+ // The subclass didn't assign a value to the output color.
+ f->codeAppendf("%s = %s;", outColor, fColor.fsIn());
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+class GLSLInstanceProcessor::BackendNonAA : public Backend {
+public:
+ BackendNonAA(BatchInfo batchInfo, const VertexInputs& inputs)
+ : INHERITED(batchInfo, inputs) {
+ if (fBatchInfo.fCannotDiscard && !fBatchInfo.isSimpleRects()) {
+ fModifiesColor = !fBatchInfo.fCannotTweakAlphaForCoverage;
+ fModifiesCoverage = !fModifiesColor;
+ }
+ }
+
+private:
+ void onInit(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) override;
+ void setupRect(GrGLSLVertexBuilder*) override;
+ void setupOval(GrGLSLVertexBuilder*) override;
+
+ void onInitInnerShape(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) override;
+ void setupInnerRect(GrGLSLVertexBuilder*) override;
+ void setupInnerOval(GrGLSLVertexBuilder*) override;
+ void onSetupInnerSimpleRRect(GrGLSLVertexBuilder*) override;
+
+ void onEmitCode(GrGLSLVertexBuilder*, GrGLSLPPFragmentBuilder*, const char*,
+ const char*) override;
+
+ typedef Backend INHERITED;
+};
+
+void GLSLInstanceProcessor::BackendNonAA::onInit(GrGLSLVaryingHandler* varyingHandler,
+ GrGLSLVertexBuilder*) {
+ if (kRect_ShapeFlag != fBatchInfo.fShapeTypes) {
+ varyingHandler->addFlatVarying("triangleIsArc", &fTriangleIsArc, kLow_GrSLPrecision);
+ varyingHandler->addVarying("arcCoords", &fArcCoords, kMedium_GrSLPrecision);
+ }
+}
+
+void GLSLInstanceProcessor::BackendNonAA::setupRect(GrGLSLVertexBuilder* v) {
+ if (fTriangleIsArc.vsOut()) {
+ v->codeAppendf("%s = 0;", fTriangleIsArc.vsOut());
+ }
+}
+
+void GLSLInstanceProcessor::BackendNonAA::setupOval(GrGLSLVertexBuilder* v) {
+ SkASSERT(fArcCoords.vsOut());
+ SkASSERT(fTriangleIsArc.vsOut());
+ v->codeAppendf("%s = %s;", fArcCoords.vsOut(), this->outShapeCoords());
+ v->codeAppendf("%s = %s & 1;", fTriangleIsArc.vsOut(), fInputs.attr(Attrib::kVertexAttrs));
+}
+
+void GLSLInstanceProcessor::BackendNonAA::onInitInnerShape(GrGLSLVaryingHandler* varyingHandler,
+ GrGLSLVertexBuilder*) {
+ varyingHandler->addVarying("innerShapeCoords", &fInnerShapeCoords, kMedium_GrSLPrecision);
+ if (kRect_ShapeFlag != fBatchInfo.fInnerShapeTypes &&
+ kOval_ShapeFlag != fBatchInfo.fInnerShapeTypes) {
+ varyingHandler->addFlatVarying("innerRRect", &fInnerRRect, kMedium_GrSLPrecision);
+ }
+}
+
+void GLSLInstanceProcessor::BackendNonAA::setupInnerRect(GrGLSLVertexBuilder* v) {
+ if (fInnerRRect.vsOut()) {
+ v->codeAppendf("%s = vec4(1);", fInnerRRect.vsOut());
+ }
+}
+
+void GLSLInstanceProcessor::BackendNonAA::setupInnerOval(GrGLSLVertexBuilder* v) {
+ if (fInnerRRect.vsOut()) {
+ v->codeAppendf("%s = vec4(0, 0, 1, 1);", fInnerRRect.vsOut());
+ }
+}
+
+void GLSLInstanceProcessor::BackendNonAA::onSetupInnerSimpleRRect(GrGLSLVertexBuilder* v) {
+ v->codeAppendf("%s = vec4(1.0 - innerRadii, 1.0 / innerRadii);", fInnerRRect.vsOut());
+}
+
+void GLSLInstanceProcessor::BackendNonAA::onEmitCode(GrGLSLVertexBuilder*,
+ GrGLSLPPFragmentBuilder* f,
+ const char* outCoverage,
+ const char* outColor) {
+ const char* dropFragment = nullptr;
+ if (!fBatchInfo.fCannotDiscard) {
+ dropFragment = "discard";
+ } else if (fModifiesCoverage) {
+ f->appendPrecisionModifier(kLow_GrSLPrecision);
+ f->codeAppend ("float covered = 1.0;");
+ dropFragment = "covered = 0.0";
+ } else if (fModifiesColor) {
+ f->appendPrecisionModifier(kLow_GrSLPrecision);
+ f->codeAppendf("vec4 color = %s;", fColor.fsIn());
+ dropFragment = "color = vec4(0)";
+ }
+ if (fTriangleIsArc.fsIn()) {
+ SkASSERT(dropFragment);
+ f->codeAppendf("if (%s != 0 && dot(%s, %s) > 1.0) %s;",
+ fTriangleIsArc.fsIn(), fArcCoords.fsIn(), fArcCoords.fsIn(), dropFragment);
+ }
+ if (fBatchInfo.fInnerShapeTypes) {
+ SkASSERT(dropFragment);
+ f->codeAppendf("// Inner shape.\n");
+ if (kRect_ShapeFlag == fBatchInfo.fInnerShapeTypes) {
+ f->codeAppendf("if (all(lessThanEqual(abs(%s), vec2(1)))) %s;",
+ fInnerShapeCoords.fsIn(), dropFragment);
+ } else if (kOval_ShapeFlag == fBatchInfo.fInnerShapeTypes) {
+ f->codeAppendf("if ((dot(%s, %s) <= 1.0)) %s;",
+ fInnerShapeCoords.fsIn(), fInnerShapeCoords.fsIn(), dropFragment);
+ } else {
+ f->codeAppendf("if (all(lessThan(abs(%s), vec2(1)))) {", fInnerShapeCoords.fsIn());
+ f->codeAppendf( "vec2 distanceToArcEdge = abs(%s) - %s.xy;",
+ fInnerShapeCoords.fsIn(), fInnerRRect.fsIn());
+ f->codeAppend ( "if (any(lessThan(distanceToArcEdge, vec2(0)))) {");
+ f->codeAppendf( "%s;", dropFragment);
+ f->codeAppend ( "} else {");
+ f->codeAppendf( "vec2 rrectCoords = distanceToArcEdge * %s.zw;",
+ fInnerRRect.fsIn());
+ f->codeAppend ( "if (dot(rrectCoords, rrectCoords) <= 1.0) {");
+ f->codeAppendf( "%s;", dropFragment);
+ f->codeAppend ( "}");
+ f->codeAppend ( "}");
+ f->codeAppend ("}");
+ }
+ }
+ if (fModifiesCoverage) {
+ f->codeAppendf("%s = vec4(covered);", outCoverage);
+ } else if (fModifiesColor) {
+ f->codeAppendf("%s = color;", outColor);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+class GLSLInstanceProcessor::BackendCoverage : public Backend {
+public:
+ BackendCoverage(BatchInfo batchInfo, const VertexInputs& inputs)
+ : INHERITED(batchInfo, inputs),
+ fColorTimesRectCoverage(kVec4f_GrSLType),
+ fRectCoverage(kFloat_GrSLType),
+ fEllipseCoords(kVec2f_GrSLType),
+ fEllipseName(kVec2f_GrSLType),
+ fBloatedRadius(kFloat_GrSLType),
+ fDistanceToInnerEdge(kVec2f_GrSLType),
+ fInnerShapeBloatedHalfSize(kVec2f_GrSLType),
+ fInnerEllipseCoords(kVec2f_GrSLType),
+ fInnerEllipseName(kVec2f_GrSLType) {
+ fShapeIsCircle = !fBatchInfo.fNonSquare && !(fBatchInfo.fShapeTypes & kRRect_ShapesMask);
+ fTweakAlphaForCoverage = !fBatchInfo.fCannotTweakAlphaForCoverage &&
+ !fBatchInfo.fInnerShapeTypes;
+ fModifiesCoverage = !fTweakAlphaForCoverage;
+ fModifiesColor = fTweakAlphaForCoverage;
+ fModifiedShapeCoords = "bloatedShapeCoords";
+ }
+
+private:
+ void onInit(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) override;
+ void setupRect(GrGLSLVertexBuilder*) override;
+ void setupOval(GrGLSLVertexBuilder*) override;
+ void adjustRRectVertices(GrGLSLVertexBuilder*) override;
+ void onSetupRRect(GrGLSLVertexBuilder*) override;
+
+ void onInitInnerShape(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) override;
+ void setupInnerRect(GrGLSLVertexBuilder*) override;
+ void setupInnerOval(GrGLSLVertexBuilder*) override;
+ void onSetupInnerSimpleRRect(GrGLSLVertexBuilder*) override;
+
+ void onEmitCode(GrGLSLVertexBuilder*, GrGLSLPPFragmentBuilder*, const char* outCoverage,
+ const char* outColor) override;
+
+ void emitRect(GrGLSLPPFragmentBuilder*, const char* outCoverage, const char* outColor);
+ void emitCircle(GrGLSLPPFragmentBuilder*, const char* outCoverage);
+ void emitArc(GrGLSLPPFragmentBuilder* f, const char* ellipseCoords, const char* ellipseName,
+ bool ellipseCoordsNeedClamp, bool ellipseCoordsMayBeNegative,
+ const char* outCoverage);
+ void emitInnerRect(GrGLSLPPFragmentBuilder*, const char* outCoverage);
+
+ GrGLSLVertToFrag fColorTimesRectCoverage;
+ GrGLSLVertToFrag fRectCoverage;
+ GrGLSLVertToFrag fEllipseCoords;
+ GrGLSLVertToFrag fEllipseName;
+ GrGLSLVertToFrag fBloatedRadius;
+ GrGLSLVertToFrag fDistanceToInnerEdge;
+ GrGLSLVertToFrag fInnerShapeBloatedHalfSize;
+ GrGLSLVertToFrag fInnerEllipseCoords;
+ GrGLSLVertToFrag fInnerEllipseName;
+ bool fShapeIsCircle;
+ bool fTweakAlphaForCoverage;
+
+ typedef Backend INHERITED;
+};
+
+void GLSLInstanceProcessor::BackendCoverage::onInit(GrGLSLVaryingHandler* varyingHandler,
+ GrGLSLVertexBuilder* v) {
+ v->codeAppend ("mat2 shapeTransposeMatrix = transpose(mat2(shapeMatrix));");
+ v->codeAppend ("vec2 shapeHalfSize = vec2(length(shapeTransposeMatrix[0]), "
+ "length(shapeTransposeMatrix[1]));");
+ v->codeAppend ("vec2 bloat = 0.5 / shapeHalfSize;");
+ v->codeAppendf("bloatedShapeCoords = %s * (1.0 + bloat);", fInputs.attr(Attrib::kShapeCoords));
+
+ if (kOval_ShapeFlag != fBatchInfo.fShapeTypes) {
+ if (fTweakAlphaForCoverage) {
+ varyingHandler->addVarying("colorTimesRectCoverage", &fColorTimesRectCoverage,
+ kLow_GrSLPrecision);
+ if (kRect_ShapeFlag == fBatchInfo.fShapeTypes) {
+ fColor = fColorTimesRectCoverage;
+ }
+ } else {
+ varyingHandler->addVarying("rectCoverage", &fRectCoverage, kLow_GrSLPrecision);
+ }
+ v->codeAppend("float rectCoverage = 0.0;");
+ }
+ if (kRect_ShapeFlag != fBatchInfo.fShapeTypes) {
+ varyingHandler->addFlatVarying("triangleIsArc", &fTriangleIsArc, kLow_GrSLPrecision);
+ if (!fShapeIsCircle) {
+ varyingHandler->addVarying("ellipseCoords", &fEllipseCoords, kMedium_GrSLPrecision);
+ varyingHandler->addFlatVarying("ellipseName", &fEllipseName, kHigh_GrSLPrecision);
+ } else {
+ varyingHandler->addVarying("circleCoords", &fEllipseCoords, kHigh_GrSLPrecision);
+ varyingHandler->addFlatVarying("bloatedRadius", &fBloatedRadius, kHigh_GrSLPrecision);
+ }
+ }
+}
+
+void GLSLInstanceProcessor::BackendCoverage::setupRect(GrGLSLVertexBuilder* v) {
+ // Make the border one pixel wide. Inner vs outer is indicated by coordAttrs.
+ v->codeAppendf("vec2 rectBloat = (%s != 0) ? bloat : -bloat;",
+ fInputs.attr(Attrib::kVertexAttrs));
+ // Here we use the absolute value, because when the rect is thinner than a pixel, this makes it
+ // mark the spot where pixel center is within half a pixel of the *opposite* edge. This,
+ // combined with the "maxCoverage" logic below gives us mathematically correct coverage even for
+ // subpixel rectangles.
+ v->codeAppendf("bloatedShapeCoords = %s * abs(vec2(1.0 + rectBloat));",
+ fInputs.attr(Attrib::kShapeCoords));
+
+ // Determine coverage at the vertex. Coverage naturally ramps from 0 to 1 unless the rect is
+ // narrower than a pixel.
+ v->codeAppend ("float maxCoverage = 4.0 * min(0.5, shapeHalfSize.x) *"
+ "min(0.5, shapeHalfSize.y);");
+ v->codeAppendf("rectCoverage = (%s != 0) ? 0.0 : maxCoverage;",
+ fInputs.attr(Attrib::kVertexAttrs));
+
+ if (fTriangleIsArc.vsOut()) {
+ v->codeAppendf("%s = 0;", fTriangleIsArc.vsOut());
+ }
+}
+
+void GLSLInstanceProcessor::BackendCoverage::setupOval(GrGLSLVertexBuilder* v) {
+ // Offset the inner and outer octagons by one pixel. Inner vs outer is indicated by coordAttrs.
+ v->codeAppendf("vec2 ovalBloat = (%s != 0) ? bloat : -bloat;",
+ fInputs.attr(Attrib::kVertexAttrs));
+ v->codeAppendf("bloatedShapeCoords = %s * max(vec2(1.0 + ovalBloat), vec2(0));",
+ fInputs.attr(Attrib::kShapeCoords));
+ v->codeAppendf("%s = bloatedShapeCoords * shapeHalfSize;", fEllipseCoords.vsOut());
+ if (fEllipseName.vsOut()) {
+ v->codeAppendf("%s = 1.0 / (shapeHalfSize * shapeHalfSize);", fEllipseName.vsOut());
+ }
+ if (fBloatedRadius.vsOut()) {
+ SkASSERT(fShapeIsCircle);
+ v->codeAppendf("%s = shapeHalfSize.x + 0.5;", fBloatedRadius.vsOut());
+ }
+ if (fTriangleIsArc.vsOut()) {
+ v->codeAppendf("%s = int(%s != 0);",
+ fTriangleIsArc.vsOut(), fInputs.attr(Attrib::kVertexAttrs));
+ }
+ if (fColorTimesRectCoverage.vsOut() || fRectCoverage.vsOut()) {
+ v->codeAppendf("rectCoverage = 1.0;");
+ }
+}
+
+void GLSLInstanceProcessor::BackendCoverage::adjustRRectVertices(GrGLSLVertexBuilder* v) {
+ // We try to let the AA borders line up with the arc edges on their particular side, but we
+ // can't allow them to get closer than one half pixel to the edge or they might overlap with
+ // their neighboring border.
+ v->codeAppend("vec2 innerEdge = max(1.0 - bloat, vec2(0));");
+ v->codeAppend ("vec2 borderEdge = cornerSign * clamp(1.0 - radii, -innerEdge, innerEdge);");
+ // 0.5 is a special value that indicates this vertex is an arc edge.
+ v->codeAppendf("if (abs(%s.x) == 0.5)"
+ "bloatedShapeCoords.x = borderEdge.x;", fInputs.attr(Attrib::kShapeCoords));
+ v->codeAppendf("if (abs(%s.y) == 0.5)"
+ "bloatedShapeCoords.y = borderEdge.y;", fInputs.attr(Attrib::kShapeCoords));
+
+ // Adjust the interior border vertices to make the border one pixel wide. 0.75 is a special
+ // value to indicate these points.
+ v->codeAppendf("if (abs(%s.x) == 0.75) "
+ "bloatedShapeCoords.x = cornerSign.x * innerEdge.x;",
+ fInputs.attr(Attrib::kShapeCoords));
+ v->codeAppendf("if (abs(%s.y) == 0.75) "
+ "bloatedShapeCoords.y = cornerSign.y * innerEdge.y;",
+ fInputs.attr(Attrib::kShapeCoords));
+}
+
+void GLSLInstanceProcessor::BackendCoverage::onSetupRRect(GrGLSLVertexBuilder* v) {
+ // The geometry is laid out in such a way that rectCoverage will be 0 and 1 on the vertices, but
+ // we still need to recompute this value because when the rrect gets thinner than one pixel, the
+ // interior edge of the border will necessarily clamp, and we need to match the AA behavior of
+ // the arc segments (i.e. distance from bloated edge only; ignoring the fact that the pixel
+ // actully has less coverage because it's not completely inside the opposite edge.)
+ v->codeAppend("vec2 d = shapeHalfSize + 0.5 - abs(bloatedShapeCoords) * shapeHalfSize;");
+ v->codeAppend("rectCoverage = min(d.x, d.y);");
+
+ SkASSERT(!fShapeIsCircle);
+ // The AA border does not get closer than one half pixel to the edge of the rect, so to get a
+ // smooth transition from flat edge to arc, we don't allow the radii to be smaller than one half
+ // pixel. (We don't worry about the transition on the opposite side when a radius is so large
+ // that the border clamped on that side.)
+ v->codeAppendf("vec2 clampedRadii = max(radii, bloat);");
+ v->codeAppendf("%s = (cornerSign * bloatedShapeCoords + clampedRadii - vec2(1)) * "
+ "shapeHalfSize;", fEllipseCoords.vsOut());
+ v->codeAppendf("%s = 1.0 / (clampedRadii * clampedRadii * shapeHalfSize * shapeHalfSize);",
+ fEllipseName.vsOut());
+}
+
+void GLSLInstanceProcessor::BackendCoverage::onInitInnerShape(GrGLSLVaryingHandler* varyingHandler,
+ GrGLSLVertexBuilder* v) {
+ v->codeAppend("vec2 innerShapeHalfSize = shapeHalfSize / outer2Inner.xy;");
+
+ if (kOval_ShapeFlag == fBatchInfo.fInnerShapeTypes) {
+ varyingHandler->addVarying("innerEllipseCoords", &fInnerEllipseCoords,
+ kMedium_GrSLPrecision);
+ varyingHandler->addFlatVarying("innerEllipseName", &fInnerEllipseName, kHigh_GrSLPrecision);
+ } else {
+ varyingHandler->addVarying("distanceToInnerEdge", &fDistanceToInnerEdge,
+ kMedium_GrSLPrecision);
+ varyingHandler->addFlatVarying("innerShapeBloatedHalfSize", &fInnerShapeBloatedHalfSize,
+ kMedium_GrSLPrecision);
+ if (kRect_ShapeFlag != fBatchInfo.fInnerShapeTypes) {
+ varyingHandler->addVarying("innerShapeCoords", &fInnerShapeCoords,
+ kMedium_GrSLPrecision);
+ varyingHandler->addFlatVarying("innerEllipseName", &fInnerEllipseName,
+ kHigh_GrSLPrecision);
+ varyingHandler->addFlatVarying("innerRRect", &fInnerRRect, kMedium_GrSLPrecision);
+ }
+ }
+}
+
+void GLSLInstanceProcessor::BackendCoverage::setupInnerRect(GrGLSLVertexBuilder* v) {
+ if (fInnerRRect.vsOut()) {
+ // The fragment shader will generalize every inner shape as a round rect. Since this one
+ // is a rect, we simply emit bogus parameters for the round rect (effectively negative
+ // radii) that ensure the fragment shader always takes the "emitRect" codepath.
+ v->codeAppendf("%s.xy = abs(outer2Inner.xy) * (1.0 + bloat) + abs(outer2Inner.zw);",
+ fInnerRRect.vsOut());
+ }
+}
+
+void GLSLInstanceProcessor::BackendCoverage::setupInnerOval(GrGLSLVertexBuilder* v) {
+ v->codeAppendf("%s = 1.0 / (innerShapeHalfSize * innerShapeHalfSize);",
+ fInnerEllipseName.vsOut());
+ if (fInnerEllipseCoords.vsOut()) {
+ v->codeAppendf("%s = innerShapeCoords * innerShapeHalfSize;", fInnerEllipseCoords.vsOut());
+ }
+ if (fInnerRRect.vsOut()) {
+ v->codeAppendf("%s = vec4(0, 0, innerShapeHalfSize);", fInnerRRect.vsOut());
+ }
+}
+
+void GLSLInstanceProcessor::BackendCoverage::onSetupInnerSimpleRRect(GrGLSLVertexBuilder* v) {
+ // The distance to ellipse formula doesn't work well when the radii are less than half a pixel.
+ v->codeAppend ("innerRadii = max(innerRadii, bloat);");
+ v->codeAppendf("%s = 1.0 / (innerRadii * innerRadii * innerShapeHalfSize * "
+ "innerShapeHalfSize);",
+ fInnerEllipseName.vsOut());
+ v->codeAppendf("%s = vec4(1.0 - innerRadii, innerShapeHalfSize);", fInnerRRect.vsOut());
+}
+
+void GLSLInstanceProcessor::BackendCoverage::onEmitCode(GrGLSLVertexBuilder* v,
+ GrGLSLPPFragmentBuilder* f,
+ const char* outCoverage,
+ const char* outColor) {
+ if (fColorTimesRectCoverage.vsOut()) {
+ SkASSERT(!fRectCoverage.vsOut());
+ v->codeAppendf("%s = %s * rectCoverage;",
+ fColorTimesRectCoverage.vsOut(), fInputs.attr(Attrib::kColor));
+ }
+ if (fRectCoverage.vsOut()) {
+ SkASSERT(!fColorTimesRectCoverage.vsOut());
+ v->codeAppendf("%s = rectCoverage;", fRectCoverage.vsOut());
+ }
+
+ SkString coverage("float coverage");
+ if (f->getProgramBuilder()->glslCaps()->usesPrecisionModifiers()) {
+ coverage.prependf("lowp ");
+ }
+ if (fBatchInfo.fInnerShapeTypes || (!fTweakAlphaForCoverage && fTriangleIsArc.fsIn())) {
+ f->codeAppendf("%s;", coverage.c_str());
+ coverage = "coverage";
+ }
+ if (fTriangleIsArc.fsIn()) {
+ f->codeAppendf("if (%s == 0) {", fTriangleIsArc.fsIn());
+ this->emitRect(f, coverage.c_str(), outColor);
+ f->codeAppend ("} else {");
+ if (fShapeIsCircle) {
+ this->emitCircle(f, coverage.c_str());
+ } else {
+ bool ellipseCoordsMayBeNegative = SkToBool(fBatchInfo.fShapeTypes & kOval_ShapeFlag);
+ this->emitArc(f, fEllipseCoords.fsIn(), fEllipseName.fsIn(),
+ true /*ellipseCoordsNeedClamp*/, ellipseCoordsMayBeNegative,
+ coverage.c_str());
+ }
+ if (fTweakAlphaForCoverage) {
+ f->codeAppendf("%s = %s * coverage;", outColor, fColor.fsIn());
+ }
+ f->codeAppend ("}");
+ } else {
+ this->emitRect(f, coverage.c_str(), outColor);
+ }
+
+ if (fBatchInfo.fInnerShapeTypes) {
+ f->codeAppendf("// Inner shape.\n");
+ SkString innerCoverageDecl("float innerCoverage");
+ if (f->getProgramBuilder()->glslCaps()->usesPrecisionModifiers()) {
+ innerCoverageDecl.prependf("lowp ");
+ }
+ if (kOval_ShapeFlag == fBatchInfo.fInnerShapeTypes) {
+ this->emitArc(f, fInnerEllipseCoords.fsIn(), fInnerEllipseName.fsIn(),
+ true /*ellipseCoordsNeedClamp*/, true /*ellipseCoordsMayBeNegative*/,
+ innerCoverageDecl.c_str());
+ } else {
+ v->codeAppendf("%s = innerShapeCoords * innerShapeHalfSize;",
+ fDistanceToInnerEdge.vsOut());
+ v->codeAppendf("%s = innerShapeHalfSize + 0.5;", fInnerShapeBloatedHalfSize.vsOut());
+
+ if (kRect_ShapeFlag == fBatchInfo.fInnerShapeTypes) {
+ this->emitInnerRect(f, innerCoverageDecl.c_str());
+ } else {
+ f->codeAppendf("%s = 0.0;", innerCoverageDecl.c_str());
+ f->appendPrecisionModifier(kMedium_GrSLPrecision);
+ f->codeAppendf("vec2 distanceToArcEdge = abs(%s) - %s.xy;",
+ fInnerShapeCoords.fsIn(), fInnerRRect.fsIn());
+ f->codeAppend ("if (any(lessThan(distanceToArcEdge, vec2(1e-5)))) {");
+ this->emitInnerRect(f, "innerCoverage");
+ f->codeAppend ("} else {");
+ f->appendPrecisionModifier(kMedium_GrSLPrecision);
+ f->codeAppendf( "vec2 ellipseCoords = distanceToArcEdge * %s.zw;",
+ fInnerRRect.fsIn());
+ this->emitArc(f, "ellipseCoords", fInnerEllipseName.fsIn(),
+ false /*ellipseCoordsNeedClamp*/,
+ false /*ellipseCoordsMayBeNegative*/, "innerCoverage");
+ f->codeAppend ("}");
+ }
+ }
+ f->codeAppendf("%s = vec4(max(coverage - innerCoverage, 0.0));", outCoverage);
+ } else if (!fTweakAlphaForCoverage) {
+ f->codeAppendf("%s = vec4(coverage);", outCoverage);
+ }
+}
+
+void GLSLInstanceProcessor::BackendCoverage::emitRect(GrGLSLPPFragmentBuilder* f,
+ const char* outCoverage,
+ const char* outColor) {
+ if (fColorTimesRectCoverage.fsIn()) {
+ f->codeAppendf("%s = %s;", outColor, fColorTimesRectCoverage.fsIn());
+ } else if (fTweakAlphaForCoverage) {
+ // We are drawing just ovals. The interior rect always has 100% coverage.
+ f->codeAppendf("%s = %s;", outColor, fColor.fsIn());
+ } else if (fRectCoverage.fsIn()) {
+ f->codeAppendf("%s = %s;", outCoverage, fRectCoverage.fsIn());
+ } else {
+ f->codeAppendf("%s = 1.0;", outCoverage);
+ }
+}
+
+void GLSLInstanceProcessor::BackendCoverage::emitCircle(GrGLSLPPFragmentBuilder* f,
+ const char* outCoverage) {
+ // TODO: circleCoords = max(circleCoords, 0) if we decide to do this optimization on rrects.
+ SkASSERT(!(kRRect_ShapesMask & fBatchInfo.fShapeTypes));
+ f->appendPrecisionModifier(kMedium_GrSLPrecision);
+ f->codeAppendf("float distanceToEdge = %s - length(%s);",
+ fBloatedRadius.fsIn(), fEllipseCoords.fsIn());
+ f->codeAppendf("%s = clamp(distanceToEdge, 0.0, 1.0);", outCoverage);
+}
+
+void GLSLInstanceProcessor::BackendCoverage::emitArc(GrGLSLPPFragmentBuilder* f,
+ const char* ellipseCoords,
+ const char* ellipseName,
+ bool ellipseCoordsNeedClamp,
+ bool ellipseCoordsMayBeNegative,
+ const char* outCoverage) {
+ SkASSERT(!ellipseCoordsMayBeNegative || ellipseCoordsNeedClamp);
+ if (ellipseCoordsNeedClamp) {
+ // This serves two purposes:
+ // - To restrict the arcs of rounded rects to their positive quadrants.
+ // - To avoid inversesqrt(0) in the ellipse formula.
+ f->appendPrecisionModifier(kMedium_GrSLPrecision);
+ if (ellipseCoordsMayBeNegative) {
+ f->codeAppendf("vec2 ellipseClampedCoords = max(abs(%s), vec2(1e-4));", ellipseCoords);
+ } else {
+ f->codeAppendf("vec2 ellipseClampedCoords = max(%s, vec2(1e-4));", ellipseCoords);
+ }
+ ellipseCoords = "ellipseClampedCoords";
+ }
+ // ellipseCoords are in pixel space and ellipseName is 1 / rx^2, 1 / ry^2.
+ f->appendPrecisionModifier(kHigh_GrSLPrecision);
+ f->codeAppendf("vec2 Z = %s * %s;", ellipseCoords, ellipseName);
+ // implicit is the evaluation of (x/rx)^2 + (y/ry)^2 - 1.
+ f->appendPrecisionModifier(kHigh_GrSLPrecision);
+ f->codeAppendf("float implicit = dot(Z, %s) - 1.0;", ellipseCoords);
+ // gradDot is the squared length of the gradient of the implicit.
+ f->appendPrecisionModifier(kHigh_GrSLPrecision);
+ f->codeAppendf("float gradDot = 4.0 * dot(Z, Z);");
+ f->appendPrecisionModifier(kMedium_GrSLPrecision);
+ f->codeAppend ("float approxDist = implicit * inversesqrt(gradDot);");
+ f->codeAppendf("%s = clamp(0.5 - approxDist, 0.0, 1.0);", outCoverage);
+}
+
+void GLSLInstanceProcessor::BackendCoverage::emitInnerRect(GrGLSLPPFragmentBuilder* f,
+ const char* outCoverage) {
+ f->appendPrecisionModifier(kLow_GrSLPrecision);
+ f->codeAppendf("vec2 c = %s - abs(%s);",
+ fInnerShapeBloatedHalfSize.fsIn(), fDistanceToInnerEdge.fsIn());
+ f->codeAppendf("%s = clamp(min(c.x, c.y), 0.0, 1.0);", outCoverage);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+class GLSLInstanceProcessor::BackendMultisample : public Backend {
+public:
+ BackendMultisample(BatchInfo batchInfo, const VertexInputs& inputs, int effectiveSampleCnt)
+ : INHERITED(batchInfo, inputs),
+ fEffectiveSampleCnt(effectiveSampleCnt),
+ fShapeCoords(kVec2f_GrSLType),
+ fShapeInverseMatrix(kMat22f_GrSLType),
+ fFragShapeHalfSpan(kVec2f_GrSLType),
+ fArcTest(kVec2f_GrSLType),
+ fArcInverseMatrix(kMat22f_GrSLType),
+ fFragArcHalfSpan(kVec2f_GrSLType),
+ fEarlyAccept(kInt_GrSLType),
+ fInnerShapeInverseMatrix(kMat22f_GrSLType),
+ fFragInnerShapeHalfSpan(kVec2f_GrSLType) {
+ fRectTrianglesMaySplit = fBatchInfo.fHasPerspective;
+ fNeedsNeighborRadii = this->isMixedSampled() && !fBatchInfo.fHasPerspective;
+ }
+
+private:
+ bool isMixedSampled() const { return AntialiasMode::kMixedSamples == fBatchInfo.fAntialiasMode; }
+
+ void onInit(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) override;
+ void setupRect(GrGLSLVertexBuilder*) override;
+ void setupOval(GrGLSLVertexBuilder*) override;
+ void adjustRRectVertices(GrGLSLVertexBuilder*) override;
+ void onSetupRRect(GrGLSLVertexBuilder*) override;
+
+ void onInitInnerShape(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) override;
+ void setupInnerRect(GrGLSLVertexBuilder*) override;
+ void setupInnerOval(GrGLSLVertexBuilder*) override;
+ void onSetupInnerSimpleRRect(GrGLSLVertexBuilder*) override;
+
+ void onEmitCode(GrGLSLVertexBuilder*, GrGLSLPPFragmentBuilder*, const char*,
+ const char*) override;
+
+ struct EmitShapeCoords {
+ const GrGLSLVarying* fVarying;
+ const char* fInverseMatrix;
+ const char* fFragHalfSpan;
+ };
+
+ struct EmitShapeOpts {
+ bool fIsTightGeometry;
+ bool fResolveMixedSamples;
+ bool fInvertCoverage;
+ };
+
+ void emitRect(GrGLSLPPFragmentBuilder*, const EmitShapeCoords&, const EmitShapeOpts&);
+ void emitArc(GrGLSLPPFragmentBuilder*, const EmitShapeCoords&, bool coordsMayBeNegative,
+ bool clampCoords, const EmitShapeOpts&);
+ void emitSimpleRRect(GrGLSLPPFragmentBuilder*, const EmitShapeCoords&, const char* rrect,
+ const EmitShapeOpts&);
+ void interpolateAtSample(GrGLSLPPFragmentBuilder*, const GrGLSLVarying&, const char* sampleIdx,
+ const char* interpolationMatrix);
+ void acceptOrRejectWholeFragment(GrGLSLPPFragmentBuilder*, bool inside, const EmitShapeOpts&);
+ void acceptCoverageMask(GrGLSLPPFragmentBuilder*, const char* shapeMask, const EmitShapeOpts&,
+ bool maybeSharedEdge = true);
+
+ int fEffectiveSampleCnt;
+ bool fRectTrianglesMaySplit;
+ GrGLSLVertToFrag fShapeCoords;
+ GrGLSLVertToFrag fShapeInverseMatrix;
+ GrGLSLVertToFrag fFragShapeHalfSpan;
+ GrGLSLVertToFrag fArcTest;
+ GrGLSLVertToFrag fArcInverseMatrix;
+ GrGLSLVertToFrag fFragArcHalfSpan;
+ GrGLSLVertToFrag fEarlyAccept;
+ GrGLSLVertToFrag fInnerShapeInverseMatrix;
+ GrGLSLVertToFrag fFragInnerShapeHalfSpan;
+ SkString fSquareFun;
+
+ typedef Backend INHERITED;
+};
+
+void GLSLInstanceProcessor::BackendMultisample::onInit(GrGLSLVaryingHandler* varyingHandler,
+ GrGLSLVertexBuilder* v) {
+ if (!this->isMixedSampled()) {
+ if (kRect_ShapeFlag != fBatchInfo.fShapeTypes) {
+ varyingHandler->addFlatVarying("triangleIsArc", &fTriangleIsArc, kLow_GrSLPrecision);
+ varyingHandler->addVarying("arcCoords", &fArcCoords, kHigh_GrSLPrecision);
+ if (!fBatchInfo.fHasPerspective) {
+ varyingHandler->addFlatVarying("arcInverseMatrix", &fArcInverseMatrix,
+ kHigh_GrSLPrecision);
+ varyingHandler->addFlatVarying("fragArcHalfSpan", &fFragArcHalfSpan,
+ kHigh_GrSLPrecision);
+ }
+ } else if (!fBatchInfo.fInnerShapeTypes) {
+ return;
+ }
+ } else {
+ varyingHandler->addVarying("shapeCoords", &fShapeCoords, kHigh_GrSLPrecision);
+ if (!fBatchInfo.fHasPerspective) {
+ varyingHandler->addFlatVarying("shapeInverseMatrix", &fShapeInverseMatrix,
+ kHigh_GrSLPrecision);
+ varyingHandler->addFlatVarying("fragShapeHalfSpan", &fFragShapeHalfSpan,
+ kHigh_GrSLPrecision);
+ }
+ if (fBatchInfo.fShapeTypes & kRRect_ShapesMask) {
+ varyingHandler->addVarying("arcCoords", &fArcCoords, kHigh_GrSLPrecision);
+ varyingHandler->addVarying("arcTest", &fArcTest, kHigh_GrSLPrecision);
+ if (!fBatchInfo.fHasPerspective) {
+ varyingHandler->addFlatVarying("arcInverseMatrix", &fArcInverseMatrix,
+ kHigh_GrSLPrecision);
+ varyingHandler->addFlatVarying("fragArcHalfSpan", &fFragArcHalfSpan,
+ kHigh_GrSLPrecision);
+ }
+ } else if (fBatchInfo.fShapeTypes & kOval_ShapeFlag) {
+ fArcCoords = fShapeCoords;
+ fArcInverseMatrix = fShapeInverseMatrix;
+ fFragArcHalfSpan = fFragShapeHalfSpan;
+ if (fBatchInfo.fShapeTypes & kRect_ShapeFlag) {
+ varyingHandler->addFlatVarying("triangleIsArc", &fTriangleIsArc,
+ kLow_GrSLPrecision);
+ }
+ }
+ if (kRect_ShapeFlag != fBatchInfo.fShapeTypes) {
+ v->definef("SAMPLE_MASK_ALL", "0x%x", (1 << fEffectiveSampleCnt) - 1);
+ varyingHandler->addFlatVarying("earlyAccept", &fEarlyAccept, kHigh_GrSLPrecision);
+ }
+ }
+ if (!fBatchInfo.fHasPerspective) {
+ v->codeAppend("mat2 shapeInverseMatrix = inverse(mat2(shapeMatrix));");
+ v->codeAppend("vec2 fragShapeSpan = abs(vec4(shapeInverseMatrix).xz) + "
+ "abs(vec4(shapeInverseMatrix).yw);");
+ }
+}
+
+void GLSLInstanceProcessor::BackendMultisample::setupRect(GrGLSLVertexBuilder* v) {
+ if (fShapeCoords.vsOut()) {
+ v->codeAppendf("%s = %s;", fShapeCoords.vsOut(), this->outShapeCoords());
+ }
+ if (fShapeInverseMatrix.vsOut()) {
+ v->codeAppendf("%s = shapeInverseMatrix;", fShapeInverseMatrix.vsOut());
+ }
+ if (fFragShapeHalfSpan.vsOut()) {
+ v->codeAppendf("%s = 0.5 * fragShapeSpan;", fFragShapeHalfSpan.vsOut());
+ }
+ if (fArcTest.vsOut()) {
+ // Pick a value that is not > 0.
+ v->codeAppendf("%s = vec2(0);", fArcTest.vsOut());
+ }
+ if (fTriangleIsArc.vsOut()) {
+ v->codeAppendf("%s = 0;", fTriangleIsArc.vsOut());
+ }
+ if (fEarlyAccept.vsOut()) {
+ v->codeAppendf("%s = SAMPLE_MASK_ALL;", fEarlyAccept.vsOut());
+ }
+}
+
+void GLSLInstanceProcessor::BackendMultisample::setupOval(GrGLSLVertexBuilder* v) {
+ v->codeAppendf("%s = abs(%s);", fArcCoords.vsOut(), this->outShapeCoords());
+ if (fArcInverseMatrix.vsOut()) {
+ v->codeAppendf("vec2 s = sign(%s);", this->outShapeCoords());
+ v->codeAppendf("%s = shapeInverseMatrix * mat2(s.x, 0, 0 , s.y);",
+ fArcInverseMatrix.vsOut());
+ }
+ if (fFragArcHalfSpan.vsOut()) {
+ v->codeAppendf("%s = 0.5 * fragShapeSpan;", fFragArcHalfSpan.vsOut());
+ }
+ if (fArcTest.vsOut()) {
+ // Pick a value that is > 0.
+ v->codeAppendf("%s = vec2(1);", fArcTest.vsOut());
+ }
+ if (fTriangleIsArc.vsOut()) {
+ if (!this->isMixedSampled()) {
+ v->codeAppendf("%s = %s & 1;",
+ fTriangleIsArc.vsOut(), fInputs.attr(Attrib::kVertexAttrs));
+ } else {
+ v->codeAppendf("%s = 1;", fTriangleIsArc.vsOut());
+ }
+ }
+ if (fEarlyAccept.vsOut()) {
+ v->codeAppendf("%s = ~%s & SAMPLE_MASK_ALL;",
+ fEarlyAccept.vsOut(), fInputs.attr(Attrib::kVertexAttrs));
+ }
+}
+
+void GLSLInstanceProcessor::BackendMultisample::adjustRRectVertices(GrGLSLVertexBuilder* v) {
+ if (!this->isMixedSampled()) {
+ INHERITED::adjustRRectVertices(v);
+ return;
+ }
+
+ if (!fBatchInfo.fHasPerspective) {
+ // For the mixed samples algorithm it's best to bloat the corner triangles a bit so that
+ // more of the pixels that cross into the arc region are completely inside the shared edges.
+ // We also snap to a regular rect if the radii shrink smaller than a pixel.
+ v->codeAppend ("vec2 midpt = 0.5 * (neighborRadii - radii);");
+ v->codeAppend ("vec2 cornerSize = any(lessThan(radii, fragShapeSpan)) ? "
+ "vec2(0) : min(radii + 0.5 * fragShapeSpan, 1.0 - midpt);");
+ } else {
+ // TODO: We could still bloat the corner triangle in the perspective case; we would just
+ // need to find the screen-space derivative of shape coords at this particular point.
+ v->codeAppend ("vec2 cornerSize = any(lessThan(radii, vec2(1e-3))) ? vec2(0) : radii;");
+ }
+
+ v->codeAppendf("if (abs(%s.x) == 0.5)"
+ "%s.x = cornerSign.x * (1.0 - cornerSize.x);",
+ fInputs.attr(Attrib::kShapeCoords), fModifiedShapeCoords);
+ v->codeAppendf("if (abs(%s.y) == 0.5)"
+ "%s.y = cornerSign.y * (1.0 - cornerSize.y);",
+ fInputs.attr(Attrib::kShapeCoords), fModifiedShapeCoords);
+}
+
+void GLSLInstanceProcessor::BackendMultisample::onSetupRRect(GrGLSLVertexBuilder* v) {
+ if (fShapeCoords.vsOut()) {
+ v->codeAppendf("%s = %s;", fShapeCoords.vsOut(), this->outShapeCoords());
+ }
+ if (fShapeInverseMatrix.vsOut()) {
+ v->codeAppendf("%s = shapeInverseMatrix;", fShapeInverseMatrix.vsOut());
+ }
+ if (fFragShapeHalfSpan.vsOut()) {
+ v->codeAppendf("%s = 0.5 * fragShapeSpan;", fFragShapeHalfSpan.vsOut());
+ }
+ if (fArcInverseMatrix.vsOut()) {
+ v->codeAppend ("vec2 s = cornerSign / radii;");
+ v->codeAppendf("%s = shapeInverseMatrix * mat2(s.x, 0, 0, s.y);",
+ fArcInverseMatrix.vsOut());
+ }
+ if (fFragArcHalfSpan.vsOut()) {
+ v->codeAppendf("%s = 0.5 * (abs(vec4(%s).xz) + abs(vec4(%s).yw));",
+ fFragArcHalfSpan.vsOut(), fArcInverseMatrix.vsOut(),
+ fArcInverseMatrix.vsOut());
+ }
+ if (fArcTest.vsOut()) {
+ // The interior triangles are laid out as a fan. fArcTest is both distances from shared
+ // edges of a fan triangle to a point within that triangle. fArcTest is used to check if a
+ // fragment is too close to either shared edge, in which case we point sample the shape as a
+ // rect at that point in order to guarantee the mixed samples discard logic works correctly.
+ v->codeAppendf("%s = (cornerSize == vec2(0)) ? vec2(0) : "
+ "cornerSign * %s * mat2(1, cornerSize.x - 1.0, cornerSize.y - 1.0, 1);",
+ fArcTest.vsOut(), fModifiedShapeCoords);
+ if (!fBatchInfo.fHasPerspective) {
+ // Shift the point at which distances to edges are measured from the center of the pixel
+ // to the corner. This way the sign of fArcTest will quickly tell us whether a pixel
+ // is completely inside the shared edge. Perspective mode will accomplish this same task
+ // by finding the derivatives in the fragment shader.
+ v->codeAppendf("%s -= 0.5 * (fragShapeSpan.yx * abs(radii - 1.0) + fragShapeSpan);",
+ fArcTest.vsOut());
+ }
+ }
+ if (fEarlyAccept.vsOut()) {
+ SkASSERT(this->isMixedSampled());
+ v->codeAppendf("%s = all(equal(vec2(1), abs(%s))) ? 0 : SAMPLE_MASK_ALL;",
+ fEarlyAccept.vsOut(), fInputs.attr(Attrib::kShapeCoords));
+ }
+}
+
+void
+GLSLInstanceProcessor::BackendMultisample::onInitInnerShape(GrGLSLVaryingHandler* varyingHandler,
+ GrGLSLVertexBuilder* v) {
+ varyingHandler->addVarying("innerShapeCoords", &fInnerShapeCoords, kHigh_GrSLPrecision);
+ if (kOval_ShapeFlag != fBatchInfo.fInnerShapeTypes &&
+ kRect_ShapeFlag != fBatchInfo.fInnerShapeTypes) {
+ varyingHandler->addFlatVarying("innerRRect", &fInnerRRect, kHigh_GrSLPrecision);
+ }
+ if (!fBatchInfo.fHasPerspective) {
+ varyingHandler->addFlatVarying("innerShapeInverseMatrix", &fInnerShapeInverseMatrix,
+ kHigh_GrSLPrecision);
+ v->codeAppendf("%s = shapeInverseMatrix * mat2(outer2Inner.x, 0, 0, outer2Inner.y);",
+ fInnerShapeInverseMatrix.vsOut());
+ varyingHandler->addFlatVarying("fragInnerShapeHalfSpan", &fFragInnerShapeHalfSpan,
+ kHigh_GrSLPrecision);
+ v->codeAppendf("%s = 0.5 * fragShapeSpan * outer2Inner.xy;",
+ fFragInnerShapeHalfSpan.vsOut());
+ }
+}
+
+void GLSLInstanceProcessor::BackendMultisample::setupInnerRect(GrGLSLVertexBuilder* v) {
+ if (fInnerRRect.vsOut()) {
+ // The fragment shader will generalize every inner shape as a round rect. Since this one
+ // is a rect, we simply emit bogus parameters for the round rect (negative radii) that
+ // ensure the fragment shader always takes the "sample as rect" codepath.
+ v->codeAppendf("%s = vec4(2.0 * (inner.zw - inner.xy) / (outer.zw - outer.xy), vec2(0));",
+ fInnerRRect.vsOut());
+ }
+}
+
+void GLSLInstanceProcessor::BackendMultisample::setupInnerOval(GrGLSLVertexBuilder* v) {
+ if (fInnerRRect.vsOut()) {
+ v->codeAppendf("%s = vec4(0, 0, 1, 1);", fInnerRRect.vsOut());
+ }
+}
+
+void GLSLInstanceProcessor::BackendMultisample::onSetupInnerSimpleRRect(GrGLSLVertexBuilder* v) {
+ // Avoid numeric instability by not allowing the inner radii to get smaller than 1/10th pixel.
+ if (fFragInnerShapeHalfSpan.vsOut()) {
+ v->codeAppendf("innerRadii = max(innerRadii, 2e-1 * %s);", fFragInnerShapeHalfSpan.vsOut());
+ } else {
+ v->codeAppend ("innerRadii = max(innerRadii, vec2(1e-4));");
+ }
+ v->codeAppendf("%s = vec4(1.0 - innerRadii, 1.0 / innerRadii);", fInnerRRect.vsOut());
+}
+
+void GLSLInstanceProcessor::BackendMultisample::onEmitCode(GrGLSLVertexBuilder*,
+ GrGLSLPPFragmentBuilder* f,
+ const char*, const char*) {
+ f->define("SAMPLE_COUNT", fEffectiveSampleCnt);
+ if (this->isMixedSampled()) {
+ f->definef("SAMPLE_MASK_ALL", "0x%x", (1 << fEffectiveSampleCnt) - 1);
+ f->definef("SAMPLE_MASK_MSB", "0x%x", 1 << (fEffectiveSampleCnt - 1));
+ }
+
+ if (kRect_ShapeFlag != (fBatchInfo.fShapeTypes | fBatchInfo.fInnerShapeTypes)) {
+ GrGLSLShaderVar x("x", kVec2f_GrSLType, GrGLSLShaderVar::kNonArray, kHigh_GrSLPrecision);
+ f->emitFunction(kFloat_GrSLType, "square", 1, &x, "return dot(x, x);", &fSquareFun);
+ }
+
+ EmitShapeCoords shapeCoords;
+ shapeCoords.fVarying = &fShapeCoords;
+ shapeCoords.fInverseMatrix = fShapeInverseMatrix.fsIn();
+ shapeCoords.fFragHalfSpan = fFragShapeHalfSpan.fsIn();
+
+ EmitShapeCoords arcCoords;
+ arcCoords.fVarying = &fArcCoords;
+ arcCoords.fInverseMatrix = fArcInverseMatrix.fsIn();
+ arcCoords.fFragHalfSpan = fFragArcHalfSpan.fsIn();
+ bool clampArcCoords = this->isMixedSampled() && (fBatchInfo.fShapeTypes & kRRect_ShapesMask);
+
+ EmitShapeOpts opts;
+ opts.fIsTightGeometry = true;
+ opts.fResolveMixedSamples = this->isMixedSampled();
+ opts.fInvertCoverage = false;
+
+ if (fBatchInfo.fHasPerspective && fBatchInfo.fInnerShapeTypes) {
+ // This determines if the fragment should consider the inner shape in its sample mask.
+ // We take the derivative early in case discards may occur before we get to the inner shape.
+ f->appendPrecisionModifier(kHigh_GrSLPrecision);
+ f->codeAppendf("vec2 fragInnerShapeApproxHalfSpan = 0.5 * fwidth(%s);",
+ fInnerShapeCoords.fsIn());
+ }
+
+ if (!this->isMixedSampled()) {
+ SkASSERT(!fArcTest.fsIn());
+ if (fTriangleIsArc.fsIn()) {
+ f->codeAppendf("if (%s != 0) {", fTriangleIsArc.fsIn());
+ this->emitArc(f, arcCoords, false, clampArcCoords, opts);
+
+ f->codeAppend ("}");
+ }
+ } else {
+ const char* arcTest = fArcTest.fsIn();
+ if (arcTest && fBatchInfo.fHasPerspective) {
+ // The non-perspective version accounts for fwidth() in the vertex shader.
+ // We make sure to take the derivative here, before a neighbor pixel may early accept.
+ f->enableFeature(GrGLSLPPFragmentBuilder::kStandardDerivatives_GLSLFeature);
+ f->appendPrecisionModifier(kHigh_GrSLPrecision);
+ f->codeAppendf("vec2 arcTest = %s - 0.5 * fwidth(%s);",
+ fArcTest.fsIn(), fArcTest.fsIn());
+ arcTest = "arcTest";
+ }
+ const char* earlyAccept = fEarlyAccept.fsIn() ? fEarlyAccept.fsIn() : "SAMPLE_MASK_ALL";
+ f->codeAppendf("if (gl_SampleMaskIn[0] == %s) {", earlyAccept);
+ f->overrideSampleCoverage(earlyAccept);
+ f->codeAppend ("} else {");
+ if (arcTest) {
+ // At this point, if the sample mask is all set it means we are inside an arc triangle.
+ f->codeAppendf("if (gl_SampleMaskIn[0] == SAMPLE_MASK_ALL || "
+ "all(greaterThan(%s, vec2(0)))) {", arcTest);
+ this->emitArc(f, arcCoords, false, clampArcCoords, opts);
+ f->codeAppend ("} else {");
+ this->emitRect(f, shapeCoords, opts);
+ f->codeAppend ("}");
+ } else if (fTriangleIsArc.fsIn()) {
+ f->codeAppendf("if (%s == 0) {", fTriangleIsArc.fsIn());
+ this->emitRect(f, shapeCoords, opts);
+ f->codeAppend ("} else {");
+ this->emitArc(f, arcCoords, false, clampArcCoords, opts);
+ f->codeAppend ("}");
+ } else if (fBatchInfo.fShapeTypes == kOval_ShapeFlag) {
+ this->emitArc(f, arcCoords, false, clampArcCoords, opts);
+ } else {
+ SkASSERT(fBatchInfo.fShapeTypes == kRect_ShapeFlag);
+ this->emitRect(f, shapeCoords, opts);
+ }
+ f->codeAppend ("}");
+ }
+
+ if (fBatchInfo.fInnerShapeTypes) {
+ f->codeAppendf("// Inner shape.\n");
+
+ EmitShapeCoords innerShapeCoords;
+ innerShapeCoords.fVarying = &fInnerShapeCoords;
+ if (!fBatchInfo.fHasPerspective) {
+ innerShapeCoords.fInverseMatrix = fInnerShapeInverseMatrix.fsIn();
+ innerShapeCoords.fFragHalfSpan = fFragInnerShapeHalfSpan.fsIn();
+ }
+
+ EmitShapeOpts innerOpts;
+ innerOpts.fIsTightGeometry = false;
+ innerOpts.fResolveMixedSamples = false; // Mixed samples are resolved in the outer shape.
+ innerOpts.fInvertCoverage = true;
+
+ if (kOval_ShapeFlag == fBatchInfo.fInnerShapeTypes) {
+ this->emitArc(f, innerShapeCoords, true, false, innerOpts);
+ } else {
+ f->codeAppendf("if (all(lessThan(abs(%s), 1.0 + %s))) {", fInnerShapeCoords.fsIn(),
+ !fBatchInfo.fHasPerspective ? innerShapeCoords.fFragHalfSpan
+ : "fragInnerShapeApproxHalfSpan"); // Above.
+ if (kRect_ShapeFlag == fBatchInfo.fInnerShapeTypes) {
+ this->emitRect(f, innerShapeCoords, innerOpts);
+ } else {
+ this->emitSimpleRRect(f, innerShapeCoords, fInnerRRect.fsIn(), innerOpts);
+ }
+ f->codeAppend ("}");
+ }
+ }
+}
+
+void GLSLInstanceProcessor::BackendMultisample::emitRect(GrGLSLPPFragmentBuilder* f,
+ const EmitShapeCoords& coords,
+ const EmitShapeOpts& opts) {
+ // Full MSAA doesn't need to do anything to draw a rect.
+ SkASSERT(!opts.fIsTightGeometry || opts.fResolveMixedSamples);
+ if (coords.fFragHalfSpan) {
+ f->codeAppendf("if (all(lessThanEqual(abs(%s), 1.0 - %s))) {",
+ coords.fVarying->fsIn(), coords.fFragHalfSpan);
+ // The entire pixel is inside the rect.
+ this->acceptOrRejectWholeFragment(f, true, opts);
+ f->codeAppend ("} else ");
+ if (opts.fIsTightGeometry && !fRectTrianglesMaySplit) {
+ f->codeAppendf("if (any(lessThan(abs(%s), 1.0 - %s))) {",
+ coords.fVarying->fsIn(), coords.fFragHalfSpan);
+ // The pixel falls on an edge of the rectangle and is known to not be on a shared edge.
+ this->acceptCoverageMask(f, "gl_SampleMaskIn[0]", opts, false);
+ f->codeAppend ("} else");
+ }
+ f->codeAppend ("{");
+ }
+ f->codeAppend ("int rectMask = 0;");
+ f->codeAppend ("for (int i = 0; i < SAMPLE_COUNT; i++) {");
+ f->appendPrecisionModifier(kHigh_GrSLPrecision);
+ f->codeAppend ( "vec2 pt = ");
+ this->interpolateAtSample(f, *coords.fVarying, "i", coords.fInverseMatrix);
+ f->codeAppend ( ";");
+ f->codeAppend ( "if (all(lessThan(abs(pt), vec2(1)))) rectMask |= (1 << i);");
+ f->codeAppend ("}");
+ this->acceptCoverageMask(f, "rectMask", opts);
+ if (coords.fFragHalfSpan) {
+ f->codeAppend ("}");
+ }
+}
+
+void GLSLInstanceProcessor::BackendMultisample::emitArc(GrGLSLPPFragmentBuilder* f,
+ const EmitShapeCoords& coords,
+ bool coordsMayBeNegative, bool clampCoords,
+ const EmitShapeOpts& opts) {
+ if (coords.fFragHalfSpan) {
+ SkString absArcCoords;
+ absArcCoords.printf(coordsMayBeNegative ? "abs(%s)" : "%s", coords.fVarying->fsIn());
+ if (clampCoords) {
+ f->codeAppendf("if (%s(max(%s + %s, vec2(0))) < 1.0) {",
+ fSquareFun.c_str(), absArcCoords.c_str(), coords.fFragHalfSpan);
+ } else {
+ f->codeAppendf("if (%s(%s + %s) < 1.0) {",
+ fSquareFun.c_str(), absArcCoords.c_str(), coords.fFragHalfSpan);
+ }
+ // The entire pixel is inside the arc.
+ this->acceptOrRejectWholeFragment(f, true, opts);
+ f->codeAppendf("} else if (%s(max(%s - %s, vec2(0))) >= 1.0) {",
+ fSquareFun.c_str(), absArcCoords.c_str(), coords.fFragHalfSpan);
+ // The entire pixel is outside the arc.
+ this->acceptOrRejectWholeFragment(f, false, opts);
+ f->codeAppend ("} else {");
+ }
+ f->codeAppend ( "int arcMask = 0;");
+ f->codeAppend ( "for (int i = 0; i < SAMPLE_COUNT; i++) {");
+ f->appendPrecisionModifier(kHigh_GrSLPrecision);
+ f->codeAppend ( "vec2 pt = ");
+ this->interpolateAtSample(f, *coords.fVarying, "i", coords.fInverseMatrix);
+ f->codeAppend ( ";");
+ if (clampCoords) {
+ SkASSERT(!coordsMayBeNegative);
+ f->codeAppend ( "pt = max(pt, vec2(0));");
+ }
+ f->codeAppendf( "if (%s(pt) < 1.0) arcMask |= (1 << i);", fSquareFun.c_str());
+ f->codeAppend ( "}");
+ this->acceptCoverageMask(f, "arcMask", opts);
+ if (coords.fFragHalfSpan) {
+ f->codeAppend ("}");
+ }
+}
+
+void GLSLInstanceProcessor::BackendMultisample::emitSimpleRRect(GrGLSLPPFragmentBuilder* f,
+ const EmitShapeCoords& coords,
+ const char* rrect,
+ const EmitShapeOpts& opts) {
+ f->appendPrecisionModifier(kHigh_GrSLPrecision);
+ f->codeAppendf("vec2 distanceToArcEdge = abs(%s) - %s.xy;", coords.fVarying->fsIn(), rrect);
+ f->codeAppend ("if (any(lessThan(distanceToArcEdge, vec2(0)))) {");
+ this->emitRect(f, coords, opts);
+ f->codeAppend ("} else {");
+ if (coords.fInverseMatrix && coords.fFragHalfSpan) {
+ f->appendPrecisionModifier(kHigh_GrSLPrecision);
+ f->codeAppendf("vec2 rrectCoords = distanceToArcEdge * %s.zw;", rrect);
+ f->appendPrecisionModifier(kHigh_GrSLPrecision);
+ f->codeAppendf("vec2 fragRRectHalfSpan = %s * %s.zw;", coords.fFragHalfSpan, rrect);
+ f->codeAppendf("if (%s(rrectCoords + fragRRectHalfSpan) <= 1.0) {", fSquareFun.c_str());
+ // The entire pixel is inside the round rect.
+ this->acceptOrRejectWholeFragment(f, true, opts);
+ f->codeAppendf("} else if (%s(max(rrectCoords - fragRRectHalfSpan, vec2(0))) >= 1.0) {",
+ fSquareFun.c_str());
+ // The entire pixel is outside the round rect.
+ this->acceptOrRejectWholeFragment(f, false, opts);
+ f->codeAppend ("} else {");
+ f->appendPrecisionModifier(kHigh_GrSLPrecision);
+ f->codeAppendf( "vec2 s = %s.zw * sign(%s);", rrect, coords.fVarying->fsIn());
+ f->appendPrecisionModifier(kHigh_GrSLPrecision);
+ f->codeAppendf( "mat2 innerRRectInverseMatrix = %s * mat2(s.x, 0, 0, s.y);",
+ coords.fInverseMatrix);
+ f->appendPrecisionModifier(kHigh_GrSLPrecision);
+ f->codeAppend ( "int rrectMask = 0;");
+ f->codeAppend ( "for (int i = 0; i < SAMPLE_COUNT; i++) {");
+ f->appendPrecisionModifier(kHigh_GrSLPrecision);
+ f->codeAppend ( "vec2 pt = rrectCoords + ");
+ f->appendOffsetToSample("i", GrGLSLFPFragmentBuilder::kSkiaDevice_Coordinates);
+ f->codeAppend ( "* innerRRectInverseMatrix;");
+ f->codeAppendf( "if (%s(max(pt, vec2(0))) < 1.0) rrectMask |= (1 << i);",
+ fSquareFun.c_str());
+ f->codeAppend ( "}");
+ this->acceptCoverageMask(f, "rrectMask", opts);
+ f->codeAppend ("}");
+ } else {
+ f->codeAppend ("int rrectMask = 0;");
+ f->codeAppend ("for (int i = 0; i < SAMPLE_COUNT; i++) {");
+ f->appendPrecisionModifier(kHigh_GrSLPrecision);
+ f->codeAppend ( "vec2 shapePt = ");
+ this->interpolateAtSample(f, *coords.fVarying, "i", nullptr);
+ f->codeAppend ( ";");
+ f->appendPrecisionModifier(kHigh_GrSLPrecision);
+ f->codeAppendf( "vec2 rrectPt = max(abs(shapePt) - %s.xy, vec2(0)) * %s.zw;",
+ rrect, rrect);
+ f->codeAppendf( "if (%s(rrectPt) < 1.0) rrectMask |= (1 << i);", fSquareFun.c_str());
+ f->codeAppend ("}");
+ this->acceptCoverageMask(f, "rrectMask", opts);
+ }
+ f->codeAppend ("}");
+}
+
+void GLSLInstanceProcessor::BackendMultisample::interpolateAtSample(GrGLSLPPFragmentBuilder* f,
+ const GrGLSLVarying& varying,
+ const char* sampleIdx,
+ const char* interpolationMatrix) {
+ if (interpolationMatrix) {
+ f->codeAppendf("(%s + ", varying.fsIn());
+ f->appendOffsetToSample(sampleIdx, GrGLSLFPFragmentBuilder::kSkiaDevice_Coordinates);
+ f->codeAppendf(" * %s)", interpolationMatrix);
+ } else {
+ SkAssertResult(
+ f->enableFeature(GrGLSLFragmentBuilder::kMultisampleInterpolation_GLSLFeature));
+ f->codeAppendf("interpolateAtOffset(%s, ", varying.fsIn());
+ f->appendOffsetToSample(sampleIdx, GrGLSLFPFragmentBuilder::kGLSLWindow_Coordinates);
+ f->codeAppend(")");
+ }
+}
+
+void
+GLSLInstanceProcessor::BackendMultisample::acceptOrRejectWholeFragment(GrGLSLPPFragmentBuilder* f,
+ bool inside,
+ const EmitShapeOpts& opts) {
+ if (inside != opts.fInvertCoverage) { // Accept the entire fragment.
+ if (opts.fResolveMixedSamples) {
+ // This is a mixed sampled fragment in the interior of the shape. Reassign 100% coverage
+ // to one fragment, and drop all other fragments that may fall on this same pixel. Since
+ // our geometry is water tight and non-overlapping, we can take advantage of the
+ // properties that (1) the incoming sample masks will be disjoint across fragments that
+ // fall on a common pixel, and (2) since the entire fragment is inside the shape, each
+ // sample's corresponding bit will be set in the incoming sample mask of exactly one
+ // fragment.
+ f->codeAppend("if ((gl_SampleMaskIn[0] & SAMPLE_MASK_MSB) == 0) {");
+ // Drop this fragment.
+ if (!fBatchInfo.fCannotDiscard) {
+ f->codeAppend("discard;");
+ } else {
+ f->overrideSampleCoverage("0");
+ }
+ f->codeAppend("} else {");
+ // Override the lone surviving fragment to full coverage.
+ f->overrideSampleCoverage("-1");
+ f->codeAppend("}");
+ }
+ } else { // Reject the entire fragment.
+ if (!fBatchInfo.fCannotDiscard) {
+ f->codeAppend("discard;");
+ } else if (opts.fResolveMixedSamples) {
+ f->overrideSampleCoverage("0");
+ } else {
+ f->maskSampleCoverage("0");
+ }
+ }
+}
+
+void GLSLInstanceProcessor::BackendMultisample::acceptCoverageMask(GrGLSLPPFragmentBuilder* f,
+ const char* shapeMask,
+ const EmitShapeOpts& opts,
+ bool maybeSharedEdge) {
+ if (opts.fResolveMixedSamples) {
+ if (maybeSharedEdge) {
+ // This is a mixed sampled fragment, potentially on the outer edge of the shape, with
+ // only partial shape coverage. Override the coverage of one fragment to "shapeMask",
+ // and drop all other fragments that may fall on this same pixel. Since our geometry is
+ // water tight, non-overlapping, and completely contains the shape, this means that each
+ // "on" bit from shapeMask is guaranteed to be set in the incoming sample mask of one,
+ // and only one, fragment that falls on this same pixel.
+ SkASSERT(!opts.fInvertCoverage);
+ f->codeAppendf("if ((gl_SampleMaskIn[0] & (1 << findMSB(%s))) == 0) {", shapeMask);
+ // Drop this fragment.
+ if (!fBatchInfo.fCannotDiscard) {
+ f->codeAppend ("discard;");
+ } else {
+ f->overrideSampleCoverage("0");
+ }
+ f->codeAppend ("} else {");
+ // Override the coverage of the lone surviving fragment to "shapeMask".
+ f->overrideSampleCoverage(shapeMask);
+ f->codeAppend ("}");
+ } else {
+ f->overrideSampleCoverage(shapeMask);
+ }
+ } else {
+ f->maskSampleCoverage(shapeMask, opts.fInvertCoverage);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+GLSLInstanceProcessor::Backend*
+GLSLInstanceProcessor::Backend::Create(const GrPipeline& pipeline, BatchInfo batchInfo,
+ const VertexInputs& inputs) {
+ switch (batchInfo.fAntialiasMode) {
+ default:
+ SkFAIL("Unexpected antialias mode.");
+ case AntialiasMode::kNone:
+ return new BackendNonAA(batchInfo, inputs);
+ case AntialiasMode::kCoverage:
+ return new BackendCoverage(batchInfo, inputs);
+ case AntialiasMode::kMSAA:
+ case AntialiasMode::kMixedSamples: {
+ const GrRenderTargetPriv& rtp = pipeline.getRenderTarget()->renderTargetPriv();
+ const GrGpu::MultisampleSpecs& specs = rtp.getMultisampleSpecs(pipeline.getStencil());
+ return new BackendMultisample(batchInfo, inputs, specs.fEffectiveSampleCnt);
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+const ShapeVertex kVertexData[] = {
+ // Rectangle.
+ {+1, +1, ~0}, /*0*/
+ {-1, +1, ~0}, /*1*/
+ {-1, -1, ~0}, /*2*/
+ {+1, -1, ~0}, /*3*/
+ // The next 4 are for the bordered version.
+ {+1, +1, 0}, /*4*/
+ {-1, +1, 0}, /*5*/
+ {-1, -1, 0}, /*6*/
+ {+1, -1, 0}, /*7*/
+
+ // Octagon that inscribes the unit circle, cut by an interior unit octagon.
+ {+1.000000f, 0.000000f, 0}, /* 8*/
+ {+1.000000f, +0.414214f, ~0}, /* 9*/
+ {+0.707106f, +0.707106f, 0}, /*10*/
+ {+0.414214f, +1.000000f, ~0}, /*11*/
+ { 0.000000f, +1.000000f, 0}, /*12*/
+ {-0.414214f, +1.000000f, ~0}, /*13*/
+ {-0.707106f, +0.707106f, 0}, /*14*/
+ {-1.000000f, +0.414214f, ~0}, /*15*/
+ {-1.000000f, 0.000000f, 0}, /*16*/
+ {-1.000000f, -0.414214f, ~0}, /*17*/
+ {-0.707106f, -0.707106f, 0}, /*18*/
+ {-0.414214f, -1.000000f, ~0}, /*19*/
+ { 0.000000f, -1.000000f, 0}, /*20*/
+ {+0.414214f, -1.000000f, ~0}, /*21*/
+ {+0.707106f, -0.707106f, 0}, /*22*/
+ {+1.000000f, -0.414214f, ~0}, /*23*/
+ // This vertex is for the fanned versions.
+ { 0.000000f, 0.000000f, ~0}, /*24*/
+
+ // Rectangle with disjoint corner segments.
+ {+1.0, +0.5, 0x3}, /*25*/
+ {+1.0, +1.0, 0x3}, /*26*/
+ {+0.5, +1.0, 0x3}, /*27*/
+ {-0.5, +1.0, 0x2}, /*28*/
+ {-1.0, +1.0, 0x2}, /*29*/
+ {-1.0, +0.5, 0x2}, /*30*/
+ {-1.0, -0.5, 0x0}, /*31*/
+ {-1.0, -1.0, 0x0}, /*32*/
+ {-0.5, -1.0, 0x0}, /*33*/
+ {+0.5, -1.0, 0x1}, /*34*/
+ {+1.0, -1.0, 0x1}, /*35*/
+ {+1.0, -0.5, 0x1}, /*36*/
+ // The next 4 are for the fanned version.
+ { 0.0, 0.0, 0x3}, /*37*/
+ { 0.0, 0.0, 0x2}, /*38*/
+ { 0.0, 0.0, 0x0}, /*39*/
+ { 0.0, 0.0, 0x1}, /*40*/
+ // The next 8 are for the bordered version.
+ {+0.75, +0.50, 0x3}, /*41*/
+ {+0.50, +0.75, 0x3}, /*42*/
+ {-0.50, +0.75, 0x2}, /*43*/
+ {-0.75, +0.50, 0x2}, /*44*/
+ {-0.75, -0.50, 0x0}, /*45*/
+ {-0.50, -0.75, 0x0}, /*46*/
+ {+0.50, -0.75, 0x1}, /*47*/
+ {+0.75, -0.50, 0x1}, /*48*/
+
+ // 16-gon that inscribes the unit circle, cut by an interior unit 16-gon.
+ {+1.000000f, +0.000000f, 0}, /*49*/
+ {+1.000000f, +0.198913f, ~0}, /*50*/
+ {+0.923879f, +0.382683f, 0}, /*51*/
+ {+0.847760f, +0.566455f, ~0}, /*52*/
+ {+0.707106f, +0.707106f, 0}, /*53*/
+ {+0.566455f, +0.847760f, ~0}, /*54*/
+ {+0.382683f, +0.923879f, 0}, /*55*/
+ {+0.198913f, +1.000000f, ~0}, /*56*/
+ {+0.000000f, +1.000000f, 0}, /*57*/
+ {-0.198913f, +1.000000f, ~0}, /*58*/
+ {-0.382683f, +0.923879f, 0}, /*59*/
+ {-0.566455f, +0.847760f, ~0}, /*60*/
+ {-0.707106f, +0.707106f, 0}, /*61*/
+ {-0.847760f, +0.566455f, ~0}, /*62*/
+ {-0.923879f, +0.382683f, 0}, /*63*/
+ {-1.000000f, +0.198913f, ~0}, /*64*/
+ {-1.000000f, +0.000000f, 0}, /*65*/
+ {-1.000000f, -0.198913f, ~0}, /*66*/
+ {-0.923879f, -0.382683f, 0}, /*67*/
+ {-0.847760f, -0.566455f, ~0}, /*68*/
+ {-0.707106f, -0.707106f, 0}, /*69*/
+ {-0.566455f, -0.847760f, ~0}, /*70*/
+ {-0.382683f, -0.923879f, 0}, /*71*/
+ {-0.198913f, -1.000000f, ~0}, /*72*/
+ {-0.000000f, -1.000000f, 0}, /*73*/
+ {+0.198913f, -1.000000f, ~0}, /*74*/
+ {+0.382683f, -0.923879f, 0}, /*75*/
+ {+0.566455f, -0.847760f, ~0}, /*76*/
+ {+0.707106f, -0.707106f, 0}, /*77*/
+ {+0.847760f, -0.566455f, ~0}, /*78*/
+ {+0.923879f, -0.382683f, 0}, /*79*/
+ {+1.000000f, -0.198913f, ~0}, /*80*/
+};
+
+const uint8_t kIndexData[] = {
+ // Rectangle.
+ 0, 1, 2,
+ 0, 2, 3,
+
+ // Rectangle with a border.
+ 0, 1, 5,
+ 5, 4, 0,
+ 1, 2, 6,
+ 6, 5, 1,
+ 2, 3, 7,
+ 7, 6, 2,
+ 3, 0, 4,
+ 4, 7, 3,
+ 4, 5, 6,
+ 6, 7, 4,
+
+ // Octagon that inscribes the unit circle, cut by an interior unit octagon.
+ 10, 8, 9,
+ 12, 10, 11,
+ 14, 12, 13,
+ 16, 14, 15,
+ 18, 16, 17,
+ 20, 18, 19,
+ 22, 20, 21,
+ 8, 22, 23,
+ 8, 10, 12,
+ 12, 14, 16,
+ 16, 18, 20,
+ 20, 22, 8,
+ 8, 12, 16,
+ 16, 20, 8,
+
+ // Same octagons, but with the interior arranged as a fan. Used by mixed samples.
+ 10, 8, 9,
+ 12, 10, 11,
+ 14, 12, 13,
+ 16, 14, 15,
+ 18, 16, 17,
+ 20, 18, 19,
+ 22, 20, 21,
+ 8, 22, 23,
+ 24, 8, 10,
+ 12, 24, 10,
+ 24, 12, 14,
+ 16, 24, 14,
+ 24, 16, 18,
+ 20, 24, 18,
+ 24, 20, 22,
+ 8, 24, 22,
+
+ // Same octagons, but with the inner and outer disjoint. Used by coverage AA.
+ 8, 22, 23,
+ 9, 8, 23,
+ 10, 8, 9,
+ 11, 10, 9,
+ 12, 10, 11,
+ 13, 12, 11,
+ 14, 12, 13,
+ 15, 14, 13,
+ 16, 14, 15,
+ 17, 16, 15,
+ 18, 16, 17,
+ 19, 18, 17,
+ 20, 18, 19,
+ 21, 20, 19,
+ 22, 20, 21,
+ 23, 22, 21,
+ 22, 8, 10,
+ 10, 12, 14,
+ 14, 16, 18,
+ 18, 20, 22,
+ 22, 10, 14,
+ 14, 18, 22,
+
+ // Rectangle with disjoint corner segments.
+ 27, 25, 26,
+ 30, 28, 29,
+ 33, 31, 32,
+ 36, 34, 35,
+ 25, 27, 28,
+ 28, 30, 31,
+ 31, 33, 34,
+ 34, 36, 25,
+ 25, 28, 31,
+ 31, 34, 25,
+
+ // Same rectangle with disjoint corners, but with the interior arranged as a fan. Used by
+ // mixed samples.
+ 27, 25, 26,
+ 30, 28, 29,
+ 33, 31, 32,
+ 36, 34, 35,
+ 27, 37, 25,
+ 28, 37, 27,
+ 30, 38, 28,
+ 31, 38, 30,
+ 33, 39, 31,
+ 34, 39, 33,
+ 36, 40, 34,
+ 25, 40, 36,
+
+ // Same rectangle with disjoint corners, with a border as well. Used by coverage AA.
+ 41, 25, 26,
+ 42, 41, 26,
+ 27, 42, 26,
+ 43, 28, 29,
+ 44, 43, 29,
+ 30, 44, 29,
+ 45, 31, 32,
+ 46, 45, 32,
+ 33, 46, 32,
+ 47, 34, 35,
+ 48, 47, 35,
+ 36, 48, 35,
+ 27, 28, 42,
+ 42, 28, 43,
+ 30, 31, 44,
+ 44, 31, 45,
+ 33, 34, 46,
+ 46, 34, 47,
+ 36, 25, 48,
+ 48, 25, 41,
+ 41, 42, 43,
+ 43, 44, 45,
+ 45, 46, 47,
+ 47, 48, 41,
+ 41, 43, 45,
+ 45, 47, 41,
+
+ // Same as the disjoint octagons, but with 16-gons instead. Used by coverage AA when the oval is
+ // sufficiently large.
+ 49, 79, 80,
+ 50, 49, 80,
+ 51, 49, 50,
+ 52, 51, 50,
+ 53, 51, 52,
+ 54, 53, 52,
+ 55, 53, 54,
+ 56, 55, 54,
+ 57, 55, 56,
+ 58, 57, 56,
+ 59, 57, 58,
+ 60, 59, 58,
+ 61, 59, 60,
+ 62, 61, 60,
+ 63, 61, 62,
+ 64, 63, 62,
+ 65, 63, 64,
+ 66, 65, 64,
+ 67, 65, 66,
+ 68, 67, 66,
+ 69, 67, 68,
+ 70, 69, 68,
+ 71, 69, 70,
+ 72, 71, 70,
+ 73, 71, 72,
+ 74, 73, 72,
+ 75, 73, 74,
+ 76, 75, 74,
+ 77, 75, 76,
+ 78, 77, 76,
+ 79, 77, 78,
+ 80, 79, 78,
+ 49, 51, 53,
+ 53, 55, 57,
+ 57, 59, 61,
+ 61, 63, 65,
+ 65, 67, 69,
+ 69, 71, 73,
+ 73, 75, 77,
+ 77, 79, 49,
+ 49, 53, 57,
+ 57, 61, 65,
+ 65, 69, 73,
+ 73, 77, 49,
+ 49, 57, 65,
+ 65, 73, 49,
+};
+
+enum {
+ kRect_FirstIndex = 0,
+ kRect_TriCount = 2,
+
+ kFramedRect_FirstIndex = 6,
+ kFramedRect_TriCount = 10,
+
+ kOctagons_FirstIndex = 36,
+ kOctagons_TriCount = 14,
+
+ kOctagonsFanned_FirstIndex = 78,
+ kOctagonsFanned_TriCount = 16,
+
+ kDisjointOctagons_FirstIndex = 126,
+ kDisjointOctagons_TriCount = 22,
+
+ kCorneredRect_FirstIndex = 192,
+ kCorneredRect_TriCount = 10,
+
+ kCorneredRectFanned_FirstIndex = 222,
+ kCorneredRectFanned_TriCount = 12,
+
+ kCorneredFramedRect_FirstIndex = 258,
+ kCorneredFramedRect_TriCount = 26,
+
+ kDisjoint16Gons_FirstIndex = 336,
+ kDisjoint16Gons_TriCount = 46,
+};
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gShapeVertexBufferKey);
+
+const GrBuffer* InstanceProcessor::FindOrCreateVertexBuffer(GrGpu* gpu) {
+ GR_DEFINE_STATIC_UNIQUE_KEY(gShapeVertexBufferKey);
+ GrResourceCache* cache = gpu->getContext()->getResourceCache();
+ if (GrGpuResource* cached = cache->findAndRefUniqueResource(gShapeVertexBufferKey)) {
+ return static_cast<GrBuffer*>(cached);
+ }
+ if (GrBuffer* buffer = gpu->createBuffer(sizeof(kVertexData), kVertex_GrBufferType,
+ kStatic_GrAccessPattern, kVertexData)) {
+ buffer->resourcePriv().setUniqueKey(gShapeVertexBufferKey);
+ return buffer;
+ }
+ return nullptr;
+}
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gShapeIndexBufferKey);
+
+const GrBuffer* InstanceProcessor::FindOrCreateIndex8Buffer(GrGpu* gpu) {
+ GR_DEFINE_STATIC_UNIQUE_KEY(gShapeIndexBufferKey);
+ GrResourceCache* cache = gpu->getContext()->getResourceCache();
+ if (GrGpuResource* cached = cache->findAndRefUniqueResource(gShapeIndexBufferKey)) {
+ return static_cast<GrBuffer*>(cached);
+ }
+ if (GrBuffer* buffer = gpu->createBuffer(sizeof(kIndexData), kIndex_GrBufferType,
+ kStatic_GrAccessPattern, kIndexData)) {
+ buffer->resourcePriv().setUniqueKey(gShapeIndexBufferKey);
+ return buffer;
+ }
+ return nullptr;
+}
+
+IndexRange InstanceProcessor::GetIndexRangeForRect(AntialiasMode aa) {
+ static constexpr IndexRange kRectRanges[kNumAntialiasModes] = {
+ {kRect_FirstIndex, 3 * kRect_TriCount}, // kNone
+ {kFramedRect_FirstIndex, 3 * kFramedRect_TriCount}, // kCoverage
+ {kRect_FirstIndex, 3 * kRect_TriCount}, // kMSAA
+ {kRect_FirstIndex, 3 * kRect_TriCount} // kMixedSamples
+ };
+
+ SkASSERT(aa >= AntialiasMode::kNone && aa <= AntialiasMode::kMixedSamples);
+ return kRectRanges[(int)aa];
+
+ GR_STATIC_ASSERT(0 == (int)AntialiasMode::kNone);
+ GR_STATIC_ASSERT(1 == (int)AntialiasMode::kCoverage);
+ GR_STATIC_ASSERT(2 == (int)AntialiasMode::kMSAA);
+ GR_STATIC_ASSERT(3 == (int)AntialiasMode::kMixedSamples);
+}
+
+IndexRange InstanceProcessor::GetIndexRangeForOval(AntialiasMode aa, const SkRect& devBounds) {
+ if (AntialiasMode::kCoverage == aa && devBounds.height() * devBounds.width() >= 256 * 256) {
+ // This threshold was chosen quasi-scientifically on Tegra X1.
+ return {kDisjoint16Gons_FirstIndex, 3 * kDisjoint16Gons_TriCount};
+ }
+
+ static constexpr IndexRange kOvalRanges[kNumAntialiasModes] = {
+ {kOctagons_FirstIndex, 3 * kOctagons_TriCount}, // kNone
+ {kDisjointOctagons_FirstIndex, 3 * kDisjointOctagons_TriCount}, // kCoverage
+ {kOctagons_FirstIndex, 3 * kOctagons_TriCount}, // kMSAA
+ {kOctagonsFanned_FirstIndex, 3 * kOctagonsFanned_TriCount} // kMixedSamples
+ };
+
+ SkASSERT(aa >= AntialiasMode::kNone && aa <= AntialiasMode::kMixedSamples);
+ return kOvalRanges[(int)aa];
+
+ GR_STATIC_ASSERT(0 == (int)AntialiasMode::kNone);
+ GR_STATIC_ASSERT(1 == (int)AntialiasMode::kCoverage);
+ GR_STATIC_ASSERT(2 == (int)AntialiasMode::kMSAA);
+ GR_STATIC_ASSERT(3 == (int)AntialiasMode::kMixedSamples);
+}
+
+IndexRange InstanceProcessor::GetIndexRangeForRRect(AntialiasMode aa) {
+ static constexpr IndexRange kRRectRanges[kNumAntialiasModes] = {
+ {kCorneredRect_FirstIndex, 3 * kCorneredRect_TriCount}, // kNone
+ {kCorneredFramedRect_FirstIndex, 3 * kCorneredFramedRect_TriCount}, // kCoverage
+ {kCorneredRect_FirstIndex, 3 * kCorneredRect_TriCount}, // kMSAA
+ {kCorneredRectFanned_FirstIndex, 3 * kCorneredRectFanned_TriCount} // kMixedSamples
+ };
+
+ SkASSERT(aa >= AntialiasMode::kNone && aa <= AntialiasMode::kMixedSamples);
+ return kRRectRanges[(int)aa];
+
+ GR_STATIC_ASSERT(0 == (int)AntialiasMode::kNone);
+ GR_STATIC_ASSERT(1 == (int)AntialiasMode::kCoverage);
+ GR_STATIC_ASSERT(2 == (int)AntialiasMode::kMSAA);
+ GR_STATIC_ASSERT(3 == (int)AntialiasMode::kMixedSamples);
+}
+
+const char* InstanceProcessor::GetNameOfIndexRange(IndexRange range) {
+ switch (range.fStart) {
+ case kRect_FirstIndex: return "basic_rect";
+ case kFramedRect_FirstIndex: return "coverage_rect";
+
+ case kOctagons_FirstIndex: return "basic_oval";
+ case kDisjointOctagons_FirstIndex: return "coverage_oval";
+ case kDisjoint16Gons_FirstIndex: return "coverage_large_oval";
+ case kOctagonsFanned_FirstIndex: return "mixed_samples_oval";
+
+ case kCorneredRect_FirstIndex: return "basic_round_rect";
+ case kCorneredFramedRect_FirstIndex: return "coverage_round_rect";
+ case kCorneredRectFanned_FirstIndex: return "mixed_samples_round_rect";
+
+ default: return "unknown";
+ }
+}
+
+}
diff --git a/gfx/skia/skia/src/gpu/instanced/InstanceProcessor.h b/gfx/skia/skia/src/gpu/instanced/InstanceProcessor.h
new file mode 100644
index 000000000..0b3a16d5a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/instanced/InstanceProcessor.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef gr_instanced_InstanceProcessor_DEFINED
+#define gr_instanced_InstanceProcessor_DEFINED
+
+#include "GrCaps.h"
+#include "GrBufferAccess.h"
+#include "GrGeometryProcessor.h"
+#include "instanced/InstancedRenderingTypes.h"
+
+namespace gr_instanced {
+
+/**
+ * This class provides a GP implementation that uses instanced rendering. Is sends geometry in as
+ * basic, pre-baked canonical shapes, and uses instanced vertex attribs to control how these shapes
+ * are transformed and drawn. MSAA is accomplished with the sample mask rather than finely
+ * tesselated geometry.
+ */
+class InstanceProcessor : public GrGeometryProcessor {
+public:
+ InstanceProcessor(BatchInfo, GrBuffer* paramsBuffer);
+
+ const char* name() const override { return "Instance Processor"; }
+ BatchInfo batchInfo() const { return fBatchInfo; }
+
+ void getGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder* b) const override {
+ b->add32(fBatchInfo.fData);
+ }
+ GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override;
+
+ /**
+ * Returns a buffer of ShapeVertex that defines the canonical instanced geometry.
+ */
+ static const GrBuffer* SK_WARN_UNUSED_RESULT FindOrCreateVertexBuffer(GrGpu*);
+
+ /**
+ * Returns a buffer of 8-bit indices for the canonical instanced geometry. The client can call
+ * GetIndexRangeForXXX to know which indices to use for a specific shape.
+ */
+ static const GrBuffer* SK_WARN_UNUSED_RESULT FindOrCreateIndex8Buffer(GrGpu*);
+
+ static IndexRange GetIndexRangeForRect(AntialiasMode);
+ static IndexRange GetIndexRangeForOval(AntialiasMode, const SkRect& devBounds);
+ static IndexRange GetIndexRangeForRRect(AntialiasMode);
+
+ static const char* GetNameOfIndexRange(IndexRange);
+
+private:
+ /**
+ * Called by the platform-specific instanced rendering implementation to determine the level of
+ * support this class can offer on the given GLSL platform.
+ */
+ static GrCaps::InstancedSupport CheckSupport(const GrGLSLCaps&, const GrCaps&);
+
+ const BatchInfo fBatchInfo;
+ GrBufferAccess fParamsAccess;
+
+ friend class GLInstancedRendering; // For CheckSupport.
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/instanced/InstancedRendering.cpp b/gfx/skia/skia/src/gpu/instanced/InstancedRendering.cpp
new file mode 100644
index 000000000..66e53dd5d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/instanced/InstancedRendering.cpp
@@ -0,0 +1,496 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "InstancedRendering.h"
+
+#include "GrBatchFlushState.h"
+#include "GrCaps.h"
+#include "GrPipeline.h"
+#include "GrResourceProvider.h"
+#include "instanced/InstanceProcessor.h"
+
+namespace gr_instanced {
+
+InstancedRendering::InstancedRendering(GrGpu* gpu)
+ : fGpu(SkRef(gpu)),
+ fState(State::kRecordingDraws),
+ fDrawPool(1024 * sizeof(Batch::Draw), 1024 * sizeof(Batch::Draw)) {
+}
+
+GrDrawBatch* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix,
+ GrColor color, bool antialias,
+ const GrInstancedPipelineInfo& info, bool* useHWAA) {
+ return this->recordShape(ShapeType::kRect, rect, viewMatrix, color, rect, antialias, info,
+ useHWAA);
+}
+
+GrDrawBatch* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix,
+ GrColor color, const SkRect& localRect, bool antialias,
+ const GrInstancedPipelineInfo& info, bool* useHWAA) {
+ return this->recordShape(ShapeType::kRect, rect, viewMatrix, color, localRect, antialias, info,
+ useHWAA);
+}
+
+GrDrawBatch* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix,
+ GrColor color, const SkMatrix& localMatrix,
+ bool antialias, const GrInstancedPipelineInfo& info,
+ bool* useHWAA) {
+ if (localMatrix.hasPerspective()) {
+ return nullptr; // Perspective is not yet supported in the local matrix.
+ }
+ if (Batch* batch = this->recordShape(ShapeType::kRect, rect, viewMatrix, color, rect, antialias,
+ info, useHWAA)) {
+ batch->getSingleInstance().fInfo |= kLocalMatrix_InfoFlag;
+ batch->appendParamsTexel(localMatrix.getScaleX(), localMatrix.getSkewX(),
+ localMatrix.getTranslateX());
+ batch->appendParamsTexel(localMatrix.getSkewY(), localMatrix.getScaleY(),
+ localMatrix.getTranslateY());
+ batch->fInfo.fHasLocalMatrix = true;
+ return batch;
+ }
+ return nullptr;
+}
+
+GrDrawBatch* InstancedRendering::recordOval(const SkRect& oval, const SkMatrix& viewMatrix,
+ GrColor color, bool antialias,
+ const GrInstancedPipelineInfo& info, bool* useHWAA) {
+ return this->recordShape(ShapeType::kOval, oval, viewMatrix, color, oval, antialias, info,
+ useHWAA);
+}
+
+GrDrawBatch* InstancedRendering::recordRRect(const SkRRect& rrect, const SkMatrix& viewMatrix,
+ GrColor color, bool antialias,
+ const GrInstancedPipelineInfo& info, bool* useHWAA) {
+ if (Batch* batch = this->recordShape(GetRRectShapeType(rrect), rrect.rect(), viewMatrix, color,
+ rrect.rect(), antialias, info, useHWAA)) {
+ batch->appendRRectParams(rrect);
+ return batch;
+ }
+ return nullptr;
+}
+
+GrDrawBatch* InstancedRendering::recordDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkMatrix& viewMatrix, GrColor color,
+ bool antialias, const GrInstancedPipelineInfo& info,
+ bool* useHWAA) {
+ if (inner.getType() > SkRRect::kSimple_Type) {
+ return nullptr; // Complex inner round rects are not yet supported.
+ }
+ if (SkRRect::kEmpty_Type == inner.getType()) {
+ return this->recordRRect(outer, viewMatrix, color, antialias, info, useHWAA);
+ }
+ if (Batch* batch = this->recordShape(GetRRectShapeType(outer), outer.rect(), viewMatrix, color,
+ outer.rect(), antialias, info, useHWAA)) {
+ batch->appendRRectParams(outer);
+ ShapeType innerShapeType = GetRRectShapeType(inner);
+ batch->fInfo.fInnerShapeTypes |= GetShapeFlag(innerShapeType);
+ batch->getSingleInstance().fInfo |= ((int)innerShapeType << kInnerShapeType_InfoBit);
+ batch->appendParamsTexel(inner.rect().asScalars(), 4);
+ batch->appendRRectParams(inner);
+ return batch;
+ }
+ return nullptr;
+}
+
+InstancedRendering::Batch* InstancedRendering::recordShape(ShapeType type, const SkRect& bounds,
+ const SkMatrix& viewMatrix,
+ GrColor color, const SkRect& localRect,
+ bool antialias,
+ const GrInstancedPipelineInfo& info,
+ bool* useHWAA) {
+ SkASSERT(State::kRecordingDraws == fState);
+
+ if (info.fIsRenderingToFloat && fGpu->caps()->avoidInstancedDrawsToFPTargets()) {
+ return nullptr;
+ }
+
+ AntialiasMode antialiasMode;
+ if (!this->selectAntialiasMode(viewMatrix, antialias, info, useHWAA, &antialiasMode)) {
+ return nullptr;
+ }
+
+ Batch* batch = this->createBatch();
+ batch->fInfo.fAntialiasMode = antialiasMode;
+ batch->fInfo.fShapeTypes = GetShapeFlag(type);
+ batch->fInfo.fCannotDiscard = !info.fCanDiscard;
+
+ Instance& instance = batch->getSingleInstance();
+ instance.fInfo = (int)type << kShapeType_InfoBit;
+
+ Batch::HasAABloat aaBloat = (antialiasMode == AntialiasMode::kCoverage)
+ ? Batch::HasAABloat::kYes
+ : Batch::HasAABloat::kNo;
+ Batch::IsZeroArea zeroArea = (bounds.isEmpty()) ? Batch::IsZeroArea::kYes
+ : Batch::IsZeroArea::kNo;
+
+ // The instanced shape renderer draws rectangles of [-1, -1, +1, +1], so we find the matrix that
+ // will map this rectangle to the same device coordinates as "viewMatrix * bounds".
+ float sx = 0.5f * bounds.width();
+ float sy = 0.5f * bounds.height();
+ float tx = sx + bounds.fLeft;
+ float ty = sy + bounds.fTop;
+ if (!viewMatrix.hasPerspective()) {
+ float* m = instance.fShapeMatrix2x3;
+ m[0] = viewMatrix.getScaleX() * sx;
+ m[1] = viewMatrix.getSkewX() * sy;
+ m[2] = viewMatrix.getTranslateX() +
+ viewMatrix.getScaleX() * tx + viewMatrix.getSkewX() * ty;
+
+ m[3] = viewMatrix.getSkewY() * sx;
+ m[4] = viewMatrix.getScaleY() * sy;
+ m[5] = viewMatrix.getTranslateY() +
+ viewMatrix.getSkewY() * tx + viewMatrix.getScaleY() * ty;
+
+ // Since 'm' is a 2x3 matrix that maps the rect [-1, +1] into the shape's device-space quad,
+ // it's quite simple to find the bounding rectangle:
+ float devBoundsHalfWidth = fabsf(m[0]) + fabsf(m[1]);
+ float devBoundsHalfHeight = fabsf(m[3]) + fabsf(m[4]);
+ SkRect batchBounds;
+ batchBounds.fLeft = m[2] - devBoundsHalfWidth;
+ batchBounds.fRight = m[2] + devBoundsHalfWidth;
+ batchBounds.fTop = m[5] - devBoundsHalfHeight;
+ batchBounds.fBottom = m[5] + devBoundsHalfHeight;
+ batch->setBounds(batchBounds, aaBloat, zeroArea);
+
+ // TODO: Is this worth the CPU overhead?
+ batch->fInfo.fNonSquare =
+ fabsf(devBoundsHalfHeight - devBoundsHalfWidth) > 0.5f || // Early out.
+ fabs(m[0] * m[3] + m[1] * m[4]) > 1e-3f || // Skew?
+ fabs(m[0] * m[0] + m[1] * m[1] - m[3] * m[3] - m[4] * m[4]) > 1e-2f; // Diff. lengths?
+ } else {
+ SkMatrix shapeMatrix(viewMatrix);
+ shapeMatrix.preTranslate(tx, ty);
+ shapeMatrix.preScale(sx, sy);
+ instance.fInfo |= kPerspective_InfoFlag;
+
+ float* m = instance.fShapeMatrix2x3;
+ m[0] = SkScalarToFloat(shapeMatrix.getScaleX());
+ m[1] = SkScalarToFloat(shapeMatrix.getSkewX());
+ m[2] = SkScalarToFloat(shapeMatrix.getTranslateX());
+ m[3] = SkScalarToFloat(shapeMatrix.getSkewY());
+ m[4] = SkScalarToFloat(shapeMatrix.getScaleY());
+ m[5] = SkScalarToFloat(shapeMatrix.getTranslateY());
+
+ // Send the perspective column as a param.
+ batch->appendParamsTexel(shapeMatrix[SkMatrix::kMPersp0], shapeMatrix[SkMatrix::kMPersp1],
+ shapeMatrix[SkMatrix::kMPersp2]);
+ batch->fInfo.fHasPerspective = true;
+
+ batch->setBounds(bounds, aaBloat, zeroArea);
+ batch->fInfo.fNonSquare = true;
+ }
+
+ instance.fColor = color;
+
+ const float* rectAsFloats = localRect.asScalars(); // Ensure SkScalar == float.
+ memcpy(&instance.fLocalRect, rectAsFloats, 4 * sizeof(float));
+
+ batch->fPixelLoad = batch->bounds().height() * batch->bounds().width();
+ return batch;
+}
+
+inline bool InstancedRendering::selectAntialiasMode(const SkMatrix& viewMatrix, bool antialias,
+ const GrInstancedPipelineInfo& info,
+ bool* useHWAA, AntialiasMode* antialiasMode) {
+ SkASSERT(!info.fColorDisabled || info.fDrawingShapeToStencil);
+ SkASSERT(!info.fIsMixedSampled || info.fIsMultisampled);
+ SkASSERT(GrCaps::InstancedSupport::kNone != fGpu->caps()->instancedSupport());
+
+ if (!info.fIsMultisampled || fGpu->caps()->multisampleDisableSupport()) {
+ if (!antialias) {
+ if (info.fDrawingShapeToStencil && !info.fCanDiscard) {
+ // We can't draw to the stencil buffer without discard (or sample mask if MSAA).
+ return false;
+ }
+ *antialiasMode = AntialiasMode::kNone;
+ *useHWAA = false;
+ return true;
+ }
+
+ if (info.canUseCoverageAA() && viewMatrix.preservesRightAngles()) {
+ *antialiasMode = AntialiasMode::kCoverage;
+ *useHWAA = false;
+ return true;
+ }
+ }
+
+ if (info.fIsMultisampled &&
+ fGpu->caps()->instancedSupport() >= GrCaps::InstancedSupport::kMultisampled) {
+ if (!info.fIsMixedSampled || info.fColorDisabled) {
+ *antialiasMode = AntialiasMode::kMSAA;
+ *useHWAA = true;
+ return true;
+ }
+ if (fGpu->caps()->instancedSupport() >= GrCaps::InstancedSupport::kMixedSampled) {
+ *antialiasMode = AntialiasMode::kMixedSamples;
+ *useHWAA = true;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+InstancedRendering::Batch::Batch(uint32_t classID, InstancedRendering* ir)
+ : INHERITED(classID),
+ fInstancedRendering(ir),
+ fIsTracked(false),
+ fNumDraws(1),
+ fNumChangesInGeometry(0) {
+ fHeadDraw = fTailDraw = (Draw*)fInstancedRendering->fDrawPool.allocate(sizeof(Draw));
+#ifdef SK_DEBUG
+ fHeadDraw->fGeometry = {-1, 0};
+#endif
+ fHeadDraw->fNext = nullptr;
+}
+
+InstancedRendering::Batch::~Batch() {
+ if (fIsTracked) {
+ fInstancedRendering->fTrackedBatches.remove(this);
+ }
+
+ Draw* draw = fHeadDraw;
+ while (draw) {
+ Draw* next = draw->fNext;
+ fInstancedRendering->fDrawPool.release(draw);
+ draw = next;
+ }
+}
+
+void InstancedRendering::Batch::appendRRectParams(const SkRRect& rrect) {
+ SkASSERT(!fIsTracked);
+ switch (rrect.getType()) {
+ case SkRRect::kSimple_Type: {
+ const SkVector& radii = rrect.getSimpleRadii();
+ this->appendParamsTexel(radii.x(), radii.y(), rrect.width(), rrect.height());
+ return;
+ }
+ case SkRRect::kNinePatch_Type: {
+ float twoOverW = 2 / rrect.width();
+ float twoOverH = 2 / rrect.height();
+ const SkVector& radiiTL = rrect.radii(SkRRect::kUpperLeft_Corner);
+ const SkVector& radiiBR = rrect.radii(SkRRect::kLowerRight_Corner);
+ this->appendParamsTexel(radiiTL.x() * twoOverW, radiiBR.x() * twoOverW,
+ radiiTL.y() * twoOverH, radiiBR.y() * twoOverH);
+ return;
+ }
+ case SkRRect::kComplex_Type: {
+ /**
+ * The x and y radii of each arc are stored in separate vectors,
+ * in the following order:
+ *
+ * __x1 _ _ _ x3__
+ * y1 | | y2
+ *
+ * | |
+ *
+ * y3 |__ _ _ _ __| y4
+ * x2 x4
+ *
+ */
+ float twoOverW = 2 / rrect.width();
+ float twoOverH = 2 / rrect.height();
+ const SkVector& radiiTL = rrect.radii(SkRRect::kUpperLeft_Corner);
+ const SkVector& radiiTR = rrect.radii(SkRRect::kUpperRight_Corner);
+ const SkVector& radiiBR = rrect.radii(SkRRect::kLowerRight_Corner);
+ const SkVector& radiiBL = rrect.radii(SkRRect::kLowerLeft_Corner);
+ this->appendParamsTexel(radiiTL.x() * twoOverW, radiiBL.x() * twoOverW,
+ radiiTR.x() * twoOverW, radiiBR.x() * twoOverW);
+ this->appendParamsTexel(radiiTL.y() * twoOverH, radiiTR.y() * twoOverH,
+ radiiBL.y() * twoOverH, radiiBR.y() * twoOverH);
+ return;
+ }
+ default: return;
+ }
+}
+
+void InstancedRendering::Batch::appendParamsTexel(const SkScalar* vals, int count) {
+ SkASSERT(!fIsTracked);
+ SkASSERT(count <= 4 && count >= 0);
+ const float* valsAsFloats = vals; // Ensure SkScalar == float.
+ memcpy(&fParams.push_back(), valsAsFloats, count * sizeof(float));
+ fInfo.fHasParams = true;
+}
+
+void InstancedRendering::Batch::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w) {
+ SkASSERT(!fIsTracked);
+ ParamsTexel& texel = fParams.push_back();
+ texel.fX = SkScalarToFloat(x);
+ texel.fY = SkScalarToFloat(y);
+ texel.fZ = SkScalarToFloat(z);
+ texel.fW = SkScalarToFloat(w);
+ fInfo.fHasParams = true;
+}
+
+void InstancedRendering::Batch::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z) {
+ SkASSERT(!fIsTracked);
+ ParamsTexel& texel = fParams.push_back();
+ texel.fX = SkScalarToFloat(x);
+ texel.fY = SkScalarToFloat(y);
+ texel.fZ = SkScalarToFloat(z);
+ fInfo.fHasParams = true;
+}
+
+void InstancedRendering::Batch::computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides* overrides) const {
+ color->setKnownFourComponents(this->getSingleInstance().fColor);
+
+ if (AntialiasMode::kCoverage == fInfo.fAntialiasMode ||
+ (AntialiasMode::kNone == fInfo.fAntialiasMode &&
+ !fInfo.isSimpleRects() && fInfo.fCannotDiscard)) {
+ coverage->setUnknownSingleComponent();
+ } else {
+ coverage->setKnownSingleComponent(255);
+ }
+}
+
+void InstancedRendering::Batch::initBatchTracker(const GrXPOverridesForBatch& overrides) {
+ Draw& draw = this->getSingleDraw(); // This will assert if we have > 1 command.
+ SkASSERT(draw.fGeometry.isEmpty());
+ SkASSERT(SkIsPow2(fInfo.fShapeTypes));
+ SkASSERT(!fIsTracked);
+
+ if (kRect_ShapeFlag == fInfo.fShapeTypes) {
+ draw.fGeometry = InstanceProcessor::GetIndexRangeForRect(fInfo.fAntialiasMode);
+ } else if (kOval_ShapeFlag == fInfo.fShapeTypes) {
+ draw.fGeometry = InstanceProcessor::GetIndexRangeForOval(fInfo.fAntialiasMode,
+ this->bounds());
+ } else {
+ draw.fGeometry = InstanceProcessor::GetIndexRangeForRRect(fInfo.fAntialiasMode);
+ }
+
+ if (!fParams.empty()) {
+ SkASSERT(fInstancedRendering->fParams.count() < (int)kParamsIdx_InfoMask); // TODO: cleaner.
+ this->getSingleInstance().fInfo |= fInstancedRendering->fParams.count();
+ fInstancedRendering->fParams.push_back_n(fParams.count(), fParams.begin());
+ }
+
+ GrColor overrideColor;
+ if (overrides.getOverrideColorIfSet(&overrideColor)) {
+ SkASSERT(State::kRecordingDraws == fInstancedRendering->fState);
+ this->getSingleInstance().fColor = overrideColor;
+ }
+ fInfo.fUsesLocalCoords = overrides.readsLocalCoords();
+ fInfo.fCannotTweakAlphaForCoverage = !overrides.canTweakAlphaForCoverage();
+
+ fInstancedRendering->fTrackedBatches.addToTail(this);
+ fIsTracked = true;
+}
+
+bool InstancedRendering::Batch::onCombineIfPossible(GrBatch* other, const GrCaps& caps) {
+ Batch* that = static_cast<Batch*>(other);
+ SkASSERT(fInstancedRendering == that->fInstancedRendering);
+ SkASSERT(fTailDraw);
+ SkASSERT(that->fTailDraw);
+
+ if (!BatchInfo::CanCombine(fInfo, that->fInfo) ||
+ !GrPipeline::CanCombine(*this->pipeline(), this->bounds(),
+ *that->pipeline(), that->bounds(), caps)) {
+ return false;
+ }
+
+ BatchInfo combinedInfo = fInfo | that->fInfo;
+ if (!combinedInfo.isSimpleRects()) {
+ // This threshold was chosen with the "shapes_mixed" bench on a MacBook with Intel graphics.
+ // There seems to be a wide range where it doesn't matter if we combine or not. What matters
+ // is that the itty bitty rects combine with other shapes and the giant ones don't.
+ constexpr SkScalar kMaxPixelsToGeneralizeRects = 256 * 256;
+ if (fInfo.isSimpleRects() && fPixelLoad > kMaxPixelsToGeneralizeRects) {
+ return false;
+ }
+ if (that->fInfo.isSimpleRects() && that->fPixelLoad > kMaxPixelsToGeneralizeRects) {
+ return false;
+ }
+ }
+
+ this->joinBounds(*that);
+ fInfo = combinedInfo;
+ fPixelLoad += that->fPixelLoad;
+
+ // Adopt the other batch's draws.
+ fNumDraws += that->fNumDraws;
+ fNumChangesInGeometry += that->fNumChangesInGeometry;
+ if (fTailDraw->fGeometry != that->fHeadDraw->fGeometry) {
+ ++fNumChangesInGeometry;
+ }
+ fTailDraw->fNext = that->fHeadDraw;
+ fTailDraw = that->fTailDraw;
+
+ that->fHeadDraw = that->fTailDraw = nullptr;
+
+ return true;
+}
+
+void InstancedRendering::beginFlush(GrResourceProvider* rp) {
+ SkASSERT(State::kRecordingDraws == fState);
+ fState = State::kFlushing;
+
+ if (fTrackedBatches.isEmpty()) {
+ return;
+ }
+
+ if (!fVertexBuffer) {
+ fVertexBuffer.reset(InstanceProcessor::FindOrCreateVertexBuffer(fGpu));
+ if (!fVertexBuffer) {
+ return;
+ }
+ }
+
+ if (!fIndexBuffer) {
+ fIndexBuffer.reset(InstanceProcessor::FindOrCreateIndex8Buffer(fGpu));
+ if (!fIndexBuffer) {
+ return;
+ }
+ }
+
+ if (!fParams.empty()) {
+ fParamsBuffer.reset(rp->createBuffer(fParams.count() * sizeof(ParamsTexel),
+ kTexel_GrBufferType, kDynamic_GrAccessPattern,
+ GrResourceProvider::kNoPendingIO_Flag |
+ GrResourceProvider::kRequireGpuMemory_Flag,
+ fParams.begin()));
+ if (!fParamsBuffer) {
+ return;
+ }
+ }
+
+ this->onBeginFlush(rp);
+}
+
+void InstancedRendering::Batch::onDraw(GrBatchFlushState* state) {
+ SkASSERT(State::kFlushing == fInstancedRendering->fState);
+ SkASSERT(state->gpu() == fInstancedRendering->gpu());
+
+ state->gpu()->handleDirtyContext();
+ if (GrXferBarrierType barrierType = this->pipeline()->xferBarrierType(*state->gpu()->caps())) {
+ state->gpu()->xferBarrier(this->pipeline()->getRenderTarget(), barrierType);
+ }
+
+ InstanceProcessor instProc(fInfo, fInstancedRendering->fParamsBuffer);
+ fInstancedRendering->onDraw(*this->pipeline(), instProc, this);
+}
+
+void InstancedRendering::endFlush() {
+ // The caller is expected to delete all tracked batches (i.e. batches whose initBatchTracker
+ // method has been called) before ending the flush.
+ SkASSERT(fTrackedBatches.isEmpty());
+ fParams.reset();
+ fParamsBuffer.reset();
+ this->onEndFlush();
+ fState = State::kRecordingDraws;
+ // Hold on to the shape coords and index buffers.
+}
+
+void InstancedRendering::resetGpuResources(ResetType resetType) {
+ fVertexBuffer.reset();
+ fIndexBuffer.reset();
+ fParamsBuffer.reset();
+ this->onResetGpuResources(resetType);
+}
+
+}
diff --git a/gfx/skia/skia/src/gpu/instanced/InstancedRendering.h b/gfx/skia/skia/src/gpu/instanced/InstancedRendering.h
new file mode 100644
index 000000000..b2c360b25
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/instanced/InstancedRendering.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef gr_instanced_InstancedRendering_DEFINED
+#define gr_instanced_InstancedRendering_DEFINED
+
+#include "GrMemoryPool.h"
+#include "SkTInternalLList.h"
+#include "batches/GrDrawBatch.h"
+#include "instanced/InstancedRenderingTypes.h"
+#include "../private/GrInstancedPipelineInfo.h"
+
+class GrResourceProvider;
+
+namespace gr_instanced {
+
+class InstanceProcessor;
+
+/**
+ * This class serves as a centralized clearinghouse for instanced rendering. It accumulates data for
+ * instanced draws into one location, and creates special batches that pull from this data. The
+ * nature of instanced rendering allows these batches to combine well and render efficiently.
+ *
+ * During a flush, this class assembles the accumulated draw data into a single vertex and texel
+ * buffer, and its subclass draws the batches using backend-specific instanced rendering APIs.
+ *
+ * This class is responsible for the CPU side of instanced rendering. Shaders are implemented by
+ * InstanceProcessor.
+ */
+class InstancedRendering : public SkNoncopyable {
+public:
+ virtual ~InstancedRendering() { SkASSERT(State::kRecordingDraws == fState); }
+
+ GrGpu* gpu() const { return fGpu; }
+
+ /**
+ * These methods make a new record internally for an instanced draw, and return a batch that is
+ * effectively just an index to that record. The returned batch is not self-contained, but
+ * rather relies on this class to handle the rendering. The client must call beginFlush() on
+ * this class before attempting to flush batches returned by it. It is invalid to record new
+ * draws between beginFlush() and endFlush().
+ */
+ GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor,
+ bool antialias, const GrInstancedPipelineInfo&,
+ bool* useHWAA);
+
+ GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor,
+ const SkRect& localRect, bool antialias,
+ const GrInstancedPipelineInfo&, bool* useHWAA);
+
+ GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor,
+ const SkMatrix& localMatrix, bool antialias,
+ const GrInstancedPipelineInfo&, bool* useHWAA);
+
+ GrDrawBatch* SK_WARN_UNUSED_RESULT recordOval(const SkRect&, const SkMatrix&, GrColor,
+ bool antialias, const GrInstancedPipelineInfo&,
+ bool* useHWAA);
+
+ GrDrawBatch* SK_WARN_UNUSED_RESULT recordRRect(const SkRRect&, const SkMatrix&, GrColor,
+ bool antialias, const GrInstancedPipelineInfo&,
+ bool* useHWAA);
+
+ GrDrawBatch* SK_WARN_UNUSED_RESULT recordDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkMatrix&, GrColor, bool antialias,
+ const GrInstancedPipelineInfo&, bool* useHWAA);
+
+ /**
+ * Compiles all recorded draws into GPU buffers and allows the client to begin flushing the
+ * batches created by this class.
+ */
+ void beginFlush(GrResourceProvider*);
+
+ /**
+ * Called once the batches created previously by this class have all been released. Allows the
+ * client to begin recording draws again.
+ */
+ void endFlush();
+
+ enum class ResetType : bool {
+ kDestroy,
+ kAbandon
+ };
+
+ /**
+ * Resets all GPU resources, including those that are held long term. They will be lazily
+ * reinitialized if the class begins to be used again.
+ */
+ void resetGpuResources(ResetType);
+
+protected:
+ class Batch : public GrDrawBatch {
+ public:
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(Batch);
+
+ ~Batch() override;
+ const char* name() const override { return "Instanced Batch"; }
+
+ struct Draw {
+ Instance fInstance;
+ IndexRange fGeometry;
+ Draw* fNext;
+ };
+
+ Draw& getSingleDraw() const { SkASSERT(fHeadDraw && !fHeadDraw->fNext); return *fHeadDraw; }
+ Instance& getSingleInstance() const { return this->getSingleDraw().fInstance; }
+
+ void appendRRectParams(const SkRRect&);
+ void appendParamsTexel(const SkScalar* vals, int count);
+ void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w);
+ void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z);
+
+ protected:
+ Batch(uint32_t classID, InstancedRendering* ir);
+
+ void initBatchTracker(const GrXPOverridesForBatch&) override;
+ bool onCombineIfPossible(GrBatch* other, const GrCaps& caps) override;
+
+ void computePipelineOptimizations(GrInitInvariantOutput* color,
+ GrInitInvariantOutput* coverage,
+ GrBatchToXPOverrides*) const override;
+
+ void onPrepare(GrBatchFlushState*) override {}
+ void onDraw(GrBatchFlushState*) override;
+
+ InstancedRendering* const fInstancedRendering;
+ BatchInfo fInfo;
+ SkScalar fPixelLoad;
+ SkSTArray<5, ParamsTexel, true> fParams;
+ bool fIsTracked;
+ int fNumDraws;
+ int fNumChangesInGeometry;
+ Draw* fHeadDraw;
+ Draw* fTailDraw;
+
+ typedef GrDrawBatch INHERITED;
+
+ friend class InstancedRendering;
+ };
+
+ typedef SkTInternalLList<Batch> BatchList;
+
+ InstancedRendering(GrGpu* gpu);
+
+ const BatchList& trackedBatches() const { return fTrackedBatches; }
+ const GrBuffer* vertexBuffer() const { SkASSERT(fVertexBuffer); return fVertexBuffer; }
+ const GrBuffer* indexBuffer() const { SkASSERT(fIndexBuffer); return fIndexBuffer; }
+
+ virtual void onBeginFlush(GrResourceProvider*) = 0;
+ virtual void onDraw(const GrPipeline&, const InstanceProcessor&, const Batch*) = 0;
+ virtual void onEndFlush() = 0;
+ virtual void onResetGpuResources(ResetType) = 0;
+
+private:
+ enum class State : bool {
+ kRecordingDraws,
+ kFlushing
+ };
+
+ Batch* SK_WARN_UNUSED_RESULT recordShape(ShapeType, const SkRect& bounds,
+ const SkMatrix& viewMatrix, GrColor,
+ const SkRect& localRect, bool antialias,
+ const GrInstancedPipelineInfo&, bool* requireHWAA);
+
+ bool selectAntialiasMode(const SkMatrix& viewMatrix, bool antialias,
+ const GrInstancedPipelineInfo&, bool* useHWAA, AntialiasMode*);
+
+ virtual Batch* createBatch() = 0;
+
+ const SkAutoTUnref<GrGpu> fGpu;
+ State fState;
+ GrMemoryPool fDrawPool;
+ SkSTArray<1024, ParamsTexel, true> fParams;
+ BatchList fTrackedBatches;
+ SkAutoTUnref<const GrBuffer> fVertexBuffer;
+ SkAutoTUnref<const GrBuffer> fIndexBuffer;
+ SkAutoTUnref<GrBuffer> fParamsBuffer;
+};
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/instanced/InstancedRenderingTypes.h b/gfx/skia/skia/src/gpu/instanced/InstancedRenderingTypes.h
new file mode 100644
index 000000000..97f8946d0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/instanced/InstancedRenderingTypes.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef gr_instanced_InstancedRenderingTypes_DEFINED
+#define gr_instanced_InstancedRenderingTypes_DEFINED
+
+#include "GrTypes.h"
+#include "SkRRect.h"
+
+namespace gr_instanced {
+
+/**
+ * Per-vertex data. These values get fed into normal vertex attribs.
+ */
+struct ShapeVertex {
+ float fX, fY; //!< Shape coordinates.
+ int32_t fAttrs; //!< Shape-specific vertex attributes, if needed.
+};
+
+/**
+ * Per-instance data. These values get fed into instanced vertex attribs.
+ */
+struct Instance {
+ uint32_t fInfo; //!< Packed info about the instance. See InfoBits.
+ float fShapeMatrix2x3[6]; //!< Maps canonical shape coords -> device space coords.
+ uint32_t fColor; //!< Color to be written out by the primitive processor.
+ float fLocalRect[4]; //!< Local coords rect that spans [-1, +1] in shape coords.
+};
+
+enum class Attrib : uint8_t {
+ kShapeCoords,
+ kVertexAttrs,
+ kInstanceInfo,
+ kShapeMatrixX,
+ kShapeMatrixY,
+ kColor,
+ kLocalRect
+};
+constexpr int kNumAttribs = 1 + (int)Attrib::kLocalRect;
+
+enum class AntialiasMode : uint8_t {
+ kNone,
+ kCoverage,
+ kMSAA,
+ kMixedSamples
+};
+constexpr int kNumAntialiasModes = 1 + (int)AntialiasMode::kMixedSamples;
+
+enum class ShapeType : uint8_t {
+ kRect,
+ kOval,
+ kSimpleRRect,
+ kNinePatch,
+ kComplexRRect
+};
+constexpr int kNumShapeTypes = 1 + (int)ShapeType::kComplexRRect;
+
+inline static ShapeType GetRRectShapeType(const SkRRect& rrect) {
+ SkASSERT(rrect.getType() >= SkRRect::kRect_Type &&
+ rrect.getType() <= SkRRect::kComplex_Type);
+ return static_cast<ShapeType>(rrect.getType() - 1);
+
+ GR_STATIC_ASSERT((int)ShapeType::kRect == SkRRect::kRect_Type - 1);
+ GR_STATIC_ASSERT((int)ShapeType::kOval == SkRRect::kOval_Type - 1);
+ GR_STATIC_ASSERT((int)ShapeType::kSimpleRRect == SkRRect::kSimple_Type - 1);
+ GR_STATIC_ASSERT((int)ShapeType::kNinePatch == SkRRect::kNinePatch_Type - 1);
+ GR_STATIC_ASSERT((int)ShapeType::kComplexRRect == SkRRect::kComplex_Type - 1);
+ GR_STATIC_ASSERT(kNumShapeTypes == SkRRect::kComplex_Type);
+}
+
+enum ShapeFlag {
+ kRect_ShapeFlag = (1 << (int)ShapeType::kRect),
+ kOval_ShapeFlag = (1 << (int)ShapeType::kOval),
+ kSimpleRRect_ShapeFlag = (1 << (int)ShapeType::kSimpleRRect),
+ kNinePatch_ShapeFlag = (1 << (int)ShapeType::kNinePatch),
+ kComplexRRect_ShapeFlag = (1 << (int)ShapeType::kComplexRRect),
+
+ kRRect_ShapesMask = kSimpleRRect_ShapeFlag | kNinePatch_ShapeFlag | kComplexRRect_ShapeFlag
+};
+
+constexpr uint8_t GetShapeFlag(ShapeType type) { return 1 << (int)type; }
+
+/**
+ * Defines what data is stored at which bits in the fInfo field of the instanced data.
+ */
+enum InfoBits {
+ kShapeType_InfoBit = 29,
+ kInnerShapeType_InfoBit = 27,
+ kPerspective_InfoBit = 26,
+ kLocalMatrix_InfoBit = 25,
+ kParamsIdx_InfoBit = 0
+};
+
+enum InfoMasks {
+ kShapeType_InfoMask = 0u - (1 << kShapeType_InfoBit),
+ kInnerShapeType_InfoMask = (1 << kShapeType_InfoBit) - (1 << kInnerShapeType_InfoBit),
+ kPerspective_InfoFlag = (1 << kPerspective_InfoBit),
+ kLocalMatrix_InfoFlag = (1 << kLocalMatrix_InfoBit),
+ kParamsIdx_InfoMask = (1 << kLocalMatrix_InfoBit) - 1
+};
+
+GR_STATIC_ASSERT((kNumShapeTypes - 1) <= (uint32_t)kShapeType_InfoMask >> kShapeType_InfoBit);
+GR_STATIC_ASSERT((int)ShapeType::kSimpleRRect <=
+ kInnerShapeType_InfoMask >> kInnerShapeType_InfoBit);
+
+/**
+ * Additional parameters required by some instances (e.g. round rect radii, perspective column,
+ * local matrix). These are accessed via texel buffer.
+ */
+struct ParamsTexel {
+ float fX, fY, fZ, fW;
+};
+
+GR_STATIC_ASSERT(0 == offsetof(ParamsTexel, fX));
+GR_STATIC_ASSERT(4 * 4 == sizeof(ParamsTexel));
+
+/**
+ * Tracks all information needed in order to draw a batch of instances. This struct also serves
+ * as an all-in-one shader key for the batch.
+ */
+struct BatchInfo {
+ BatchInfo() : fData(0) {}
+ explicit BatchInfo(uint32_t data) : fData(data) {}
+
+ static bool CanCombine(const BatchInfo& a, const BatchInfo& b);
+
+ bool isSimpleRects() const {
+ return !((fShapeTypes & ~kRect_ShapeFlag) | fInnerShapeTypes);
+ }
+
+ union {
+ struct {
+ AntialiasMode fAntialiasMode;
+ uint8_t fShapeTypes;
+ uint8_t fInnerShapeTypes;
+ bool fHasPerspective : 1;
+ bool fHasLocalMatrix : 1;
+ bool fHasParams : 1;
+ bool fNonSquare : 1;
+ bool fUsesLocalCoords : 1;
+ bool fCannotTweakAlphaForCoverage : 1;
+ bool fCannotDiscard : 1;
+ };
+ uint32_t fData;
+ };
+};
+
+inline bool BatchInfo::CanCombine(const BatchInfo& a, const BatchInfo& b) {
+ if (a.fAntialiasMode != b.fAntialiasMode) {
+ return false;
+ }
+ if (SkToBool(a.fInnerShapeTypes) != SkToBool(b.fInnerShapeTypes)) {
+ // GrInstanceProcessor can't currently combine draws with and without inner shapes.
+ return false;
+ }
+ if (a.fCannotDiscard != b.fCannotDiscard) {
+ // For stencil draws, the use of discard can be a requirement.
+ return false;
+ }
+ return true;
+}
+
+inline BatchInfo operator|(const BatchInfo& a, const BatchInfo& b) {
+ SkASSERT(BatchInfo::CanCombine(a, b));
+ return BatchInfo(a.fData | b.fData);
+}
+
+// This is required since all the data must fit into 32 bits of a shader key.
+GR_STATIC_ASSERT(sizeof(uint32_t) == sizeof(BatchInfo));
+GR_STATIC_ASSERT(kNumShapeTypes <= 8);
+
+struct IndexRange {
+ bool operator ==(const IndexRange& that) const {
+ SkASSERT(fStart != that.fStart || fCount == that.fCount);
+ return fStart == that.fStart;
+ }
+ bool operator !=(const IndexRange& that) const { return !(*this == that); }
+
+ bool isEmpty() const { return fCount <= 0; }
+ int end() { return fStart + fCount; }
+
+ int16_t fStart;
+ int16_t fCount;
+};
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/text/GrAtlasTextBlob.cpp b/gfx/skia/skia/src/gpu/text/GrAtlasTextBlob.cpp
new file mode 100644
index 000000000..12f35a38c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrAtlasTextBlob.cpp
@@ -0,0 +1,568 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrAtlasTextBlob.h"
+
+#include "GrBlurUtils.h"
+#include "GrContext.h"
+#include "GrDrawContext.h"
+#include "GrPipelineBuilder.h"
+#include "GrTextUtils.h"
+#include "SkColorFilter.h"
+#include "SkDrawFilter.h"
+#include "SkGlyphCache.h"
+#include "SkTextBlobRunIterator.h"
+#include "batches/GrAtlasTextBatch.h"
+
+GrAtlasTextBlob* GrAtlasTextBlob::Create(GrMemoryPool* pool, int glyphCount, int runCount) {
+ // We allocate size for the GrAtlasTextBlob itself, plus size for the vertices array,
+ // and size for the glyphIds array.
+ size_t verticesCount = glyphCount * kVerticesPerGlyph * kMaxVASize;
+ size_t size = sizeof(GrAtlasTextBlob) +
+ verticesCount +
+ glyphCount * sizeof(GrGlyph**) +
+ sizeof(GrAtlasTextBlob::Run) * runCount;
+
+ void* allocation = pool->allocate(size);
+ if (CACHE_SANITY_CHECK) {
+ sk_bzero(allocation, size);
+ }
+
+ GrAtlasTextBlob* cacheBlob = new (allocation) GrAtlasTextBlob;
+ cacheBlob->fSize = size;
+
+ // setup offsets for vertices / glyphs
+ cacheBlob->fVertices = sizeof(GrAtlasTextBlob) + reinterpret_cast<unsigned char*>(cacheBlob);
+ cacheBlob->fGlyphs = reinterpret_cast<GrGlyph**>(cacheBlob->fVertices + verticesCount);
+ cacheBlob->fRuns = reinterpret_cast<GrAtlasTextBlob::Run*>(cacheBlob->fGlyphs + glyphCount);
+
+ // Initialize runs
+ for (int i = 0; i < runCount; i++) {
+ new (&cacheBlob->fRuns[i]) GrAtlasTextBlob::Run;
+ }
+ cacheBlob->fRunCount = runCount;
+ cacheBlob->fPool = pool;
+ return cacheBlob;
+}
+
+SkGlyphCache* GrAtlasTextBlob::setupCache(int runIndex,
+ const SkSurfaceProps& props,
+ uint32_t scalerContextFlags,
+ const SkPaint& skPaint,
+ const SkMatrix* viewMatrix) {
+ GrAtlasTextBlob::Run* run = &fRuns[runIndex];
+
+ // if we have an override descriptor for the run, then we should use that
+ SkAutoDescriptor* desc = run->fOverrideDescriptor.get() ? run->fOverrideDescriptor.get() :
+ &run->fDescriptor;
+ SkScalerContextEffects effects;
+ skPaint.getScalerContextDescriptor(&effects, desc, props, scalerContextFlags, viewMatrix);
+ run->fTypeface.reset(SkSafeRef(skPaint.getTypeface()));
+ run->fPathEffect = sk_ref_sp(effects.fPathEffect);
+ run->fRasterizer = sk_ref_sp(effects.fRasterizer);
+ run->fMaskFilter = sk_ref_sp(effects.fMaskFilter);
+ return SkGlyphCache::DetachCache(run->fTypeface, effects, desc->getDesc());
+}
+
+void GrAtlasTextBlob::appendGlyph(int runIndex,
+ const SkRect& positions,
+ GrColor color,
+ GrBatchTextStrike* strike,
+ GrGlyph* glyph,
+ SkGlyphCache* cache, const SkGlyph& skGlyph,
+ SkScalar x, SkScalar y, SkScalar scale, bool applyVM) {
+
+ // If the glyph is too large we fall back to paths
+ if (glyph->fTooLargeForAtlas) {
+ this->appendLargeGlyph(glyph, cache, skGlyph, x, y, scale, applyVM);
+ return;
+ }
+
+ Run& run = fRuns[runIndex];
+ GrMaskFormat format = glyph->fMaskFormat;
+
+ Run::SubRunInfo* subRun = &run.fSubRunInfo.back();
+ if (run.fInitialized && subRun->maskFormat() != format) {
+ subRun = &run.push_back();
+ subRun->setStrike(strike);
+ } else if (!run.fInitialized) {
+ subRun->setStrike(strike);
+ }
+
+ run.fInitialized = true;
+
+ size_t vertexStride = GetVertexStride(format);
+
+ subRun->setMaskFormat(format);
+
+ subRun->joinGlyphBounds(positions);
+ subRun->setColor(color);
+
+ intptr_t vertex = reinterpret_cast<intptr_t>(this->fVertices + subRun->vertexEndIndex());
+
+ if (kARGB_GrMaskFormat != glyph->fMaskFormat) {
+ // V0
+ SkPoint* position = reinterpret_cast<SkPoint*>(vertex);
+ position->set(positions.fLeft, positions.fTop);
+ SkColor* colorPtr = reinterpret_cast<SkColor*>(vertex + sizeof(SkPoint));
+ *colorPtr = color;
+ vertex += vertexStride;
+
+ // V1
+ position = reinterpret_cast<SkPoint*>(vertex);
+ position->set(positions.fLeft, positions.fBottom);
+ colorPtr = reinterpret_cast<SkColor*>(vertex + sizeof(SkPoint));
+ *colorPtr = color;
+ vertex += vertexStride;
+
+ // V2
+ position = reinterpret_cast<SkPoint*>(vertex);
+ position->set(positions.fRight, positions.fBottom);
+ colorPtr = reinterpret_cast<SkColor*>(vertex + sizeof(SkPoint));
+ *colorPtr = color;
+ vertex += vertexStride;
+
+ // V3
+ position = reinterpret_cast<SkPoint*>(vertex);
+ position->set(positions.fRight, positions.fTop);
+ colorPtr = reinterpret_cast<SkColor*>(vertex + sizeof(SkPoint));
+ *colorPtr = color;
+ } else {
+ // V0
+ SkPoint* position = reinterpret_cast<SkPoint*>(vertex);
+ position->set(positions.fLeft, positions.fTop);
+ vertex += vertexStride;
+
+ // V1
+ position = reinterpret_cast<SkPoint*>(vertex);
+ position->set(positions.fLeft, positions.fBottom);
+ vertex += vertexStride;
+
+ // V2
+ position = reinterpret_cast<SkPoint*>(vertex);
+ position->set(positions.fRight, positions.fBottom);
+ vertex += vertexStride;
+
+ // V3
+ position = reinterpret_cast<SkPoint*>(vertex);
+ position->set(positions.fRight, positions.fTop);
+ }
+ subRun->appendVertices(vertexStride);
+ fGlyphs[subRun->glyphEndIndex()] = glyph;
+ subRun->glyphAppended();
+}
+
+void GrAtlasTextBlob::appendLargeGlyph(GrGlyph* glyph, SkGlyphCache* cache, const SkGlyph& skGlyph,
+ SkScalar x, SkScalar y, SkScalar scale, bool applyVM) {
+ if (nullptr == glyph->fPath) {
+ const SkPath* glyphPath = cache->findPath(skGlyph);
+ if (!glyphPath) {
+ return;
+ }
+
+ glyph->fPath = new SkPath(*glyphPath);
+ }
+ fBigGlyphs.push_back(GrAtlasTextBlob::BigGlyph(*glyph->fPath, x, y, scale, applyVM));
+}
+
+bool GrAtlasTextBlob::mustRegenerate(const SkPaint& paint,
+ GrColor color, const SkMaskFilter::BlurRec& blurRec,
+ const SkMatrix& viewMatrix, SkScalar x, SkScalar y) {
+ // If we have LCD text then our canonical color will be set to transparent, in this case we have
+ // to regenerate the blob on any color change
+ // We use the grPaint to get any color filter effects
+ if (fKey.fCanonicalColor == SK_ColorTRANSPARENT &&
+ fPaintColor != color) {
+ return true;
+ }
+
+ if (fInitialViewMatrix.hasPerspective() != viewMatrix.hasPerspective()) {
+ return true;
+ }
+
+ if (fInitialViewMatrix.hasPerspective() && !fInitialViewMatrix.cheapEqualTo(viewMatrix)) {
+ return true;
+ }
+
+ // We only cache one masked version
+ if (fKey.fHasBlur &&
+ (fBlurRec.fSigma != blurRec.fSigma ||
+ fBlurRec.fStyle != blurRec.fStyle ||
+ fBlurRec.fQuality != blurRec.fQuality)) {
+ return true;
+ }
+
+ // Similarly, we only cache one version for each style
+ if (fKey.fStyle != SkPaint::kFill_Style &&
+ (fStrokeInfo.fFrameWidth != paint.getStrokeWidth() ||
+ fStrokeInfo.fMiterLimit != paint.getStrokeMiter() ||
+ fStrokeInfo.fJoin != paint.getStrokeJoin())) {
+ return true;
+ }
+
+ // Mixed blobs must be regenerated. We could probably figure out a way to do integer scrolls
+ // for mixed blobs if this becomes an issue.
+ if (this->hasBitmap() && this->hasDistanceField()) {
+ // Identical viewmatrices and we can reuse in all cases
+ if (fInitialViewMatrix.cheapEqualTo(viewMatrix) && x == fInitialX && y == fInitialY) {
+ return false;
+ }
+ return true;
+ }
+
+ if (this->hasBitmap()) {
+ if (fInitialViewMatrix.getScaleX() != viewMatrix.getScaleX() ||
+ fInitialViewMatrix.getScaleY() != viewMatrix.getScaleY() ||
+ fInitialViewMatrix.getSkewX() != viewMatrix.getSkewX() ||
+ fInitialViewMatrix.getSkewY() != viewMatrix.getSkewY()) {
+ return true;
+ }
+
+ // We can update the positions in the cachedtextblobs without regenerating the whole blob,
+ // but only for integer translations.
+ // This cool bit of math will determine the necessary translation to apply to the already
+ // generated vertex coordinates to move them to the correct position
+ SkScalar transX = viewMatrix.getTranslateX() +
+ viewMatrix.getScaleX() * (x - fInitialX) +
+ viewMatrix.getSkewX() * (y - fInitialY) -
+ fInitialViewMatrix.getTranslateX();
+ SkScalar transY = viewMatrix.getTranslateY() +
+ viewMatrix.getSkewY() * (x - fInitialX) +
+ viewMatrix.getScaleY() * (y - fInitialY) -
+ fInitialViewMatrix.getTranslateY();
+ if (!SkScalarIsInt(transX) || !SkScalarIsInt(transY)) {
+ return true;
+ }
+ } else if (this->hasDistanceField()) {
+ // A scale outside of [blob.fMaxMinScale, blob.fMinMaxScale] would result in a different
+ // distance field being generated, so we have to regenerate in those cases
+ SkScalar newMaxScale = viewMatrix.getMaxScale();
+ SkScalar oldMaxScale = fInitialViewMatrix.getMaxScale();
+ SkScalar scaleAdjust = newMaxScale / oldMaxScale;
+ if (scaleAdjust < fMaxMinScale || scaleAdjust > fMinMaxScale) {
+ return true;
+ }
+ }
+
+ // It is possible that a blob has neither distanceField nor bitmaptext. This is in the case
+ // when all of the runs inside the blob are drawn as paths. In this case, we always regenerate
+ // the blob anyways at flush time, so no need to regenerate explicitly
+ return false;
+}
+
+inline GrDrawBatch* GrAtlasTextBlob::createBatch(
+ const Run::SubRunInfo& info,
+ int glyphCount, int run, int subRun,
+ const SkMatrix& viewMatrix, SkScalar x, SkScalar y,
+ GrColor color,
+ const SkPaint& skPaint, const SkSurfaceProps& props,
+ const GrDistanceFieldAdjustTable* distanceAdjustTable,
+ bool useGammaCorrectDistanceTable,
+ GrBatchFontCache* cache) {
+ GrMaskFormat format = info.maskFormat();
+ GrColor subRunColor;
+ if (kARGB_GrMaskFormat == format) {
+ uint8_t paintAlpha = skPaint.getAlpha();
+ subRunColor = SkColorSetARGB(paintAlpha, paintAlpha, paintAlpha, paintAlpha);
+ } else {
+ subRunColor = color;
+ }
+
+ GrAtlasTextBatch* batch;
+ if (info.drawAsDistanceFields()) {
+ SkColor filteredColor;
+ SkColorFilter* colorFilter = skPaint.getColorFilter();
+ if (colorFilter) {
+ filteredColor = colorFilter->filterColor(skPaint.getColor());
+ } else {
+ filteredColor = skPaint.getColor();
+ }
+ bool useBGR = SkPixelGeometryIsBGR(props.pixelGeometry());
+ batch = GrAtlasTextBatch::CreateDistanceField(glyphCount, cache,
+ distanceAdjustTable,
+ useGammaCorrectDistanceTable,
+ filteredColor, info.hasUseLCDText(), useBGR);
+ } else {
+ batch = GrAtlasTextBatch::CreateBitmap(format, glyphCount, cache);
+ }
+ GrAtlasTextBatch::Geometry& geometry = batch->geometry();
+ geometry.fViewMatrix = viewMatrix;
+ geometry.fBlob = SkRef(this);
+ geometry.fRun = run;
+ geometry.fSubRun = subRun;
+ geometry.fColor = subRunColor;
+ geometry.fX = x;
+ geometry.fY = y;
+ batch->init();
+
+ return batch;
+}
+
+inline
+void GrAtlasTextBlob::flushRun(GrDrawContext* dc, const GrPaint& grPaint,
+ const GrClip& clip, int run, const SkMatrix& viewMatrix, SkScalar x,
+ SkScalar y,
+ const SkPaint& skPaint, const SkSurfaceProps& props,
+ const GrDistanceFieldAdjustTable* distanceAdjustTable,
+ GrBatchFontCache* cache) {
+ for (int subRun = 0; subRun < fRuns[run].fSubRunInfo.count(); subRun++) {
+ const Run::SubRunInfo& info = fRuns[run].fSubRunInfo[subRun];
+ int glyphCount = info.glyphCount();
+ if (0 == glyphCount) {
+ continue;
+ }
+
+ GrColor color = grPaint.getColor();
+
+ SkAutoTUnref<GrDrawBatch> batch(this->createBatch(info, glyphCount, run,
+ subRun, viewMatrix, x, y, color,
+ skPaint, props,
+ distanceAdjustTable, dc->isGammaCorrect(),
+ cache));
+
+ GrPipelineBuilder pipelineBuilder(grPaint, dc->mustUseHWAA(grPaint));
+
+ dc->drawBatch(pipelineBuilder, clip, batch);
+ }
+}
+
+static void calculate_translation(bool applyVM,
+ const SkMatrix& newViewMatrix, SkScalar newX, SkScalar newY,
+ const SkMatrix& currentViewMatrix, SkScalar currentX,
+ SkScalar currentY, SkScalar* transX, SkScalar* transY) {
+ if (applyVM) {
+ *transX = newViewMatrix.getTranslateX() +
+ newViewMatrix.getScaleX() * (newX - currentX) +
+ newViewMatrix.getSkewX() * (newY - currentY) -
+ currentViewMatrix.getTranslateX();
+
+ *transY = newViewMatrix.getTranslateY() +
+ newViewMatrix.getSkewY() * (newX - currentX) +
+ newViewMatrix.getScaleY() * (newY - currentY) -
+ currentViewMatrix.getTranslateY();
+ } else {
+ *transX = newX - currentX;
+ *transY = newY - currentY;
+ }
+}
+
+
+void GrAtlasTextBlob::flushBigGlyphs(GrContext* context, GrDrawContext* dc,
+ const GrClip& clip, const SkPaint& skPaint,
+ const SkMatrix& viewMatrix, SkScalar x, SkScalar y,
+ const SkIRect& clipBounds) {
+ SkScalar transX, transY;
+ for (int i = 0; i < fBigGlyphs.count(); i++) {
+ GrAtlasTextBlob::BigGlyph& bigGlyph = fBigGlyphs[i];
+ calculate_translation(bigGlyph.fApplyVM, viewMatrix, x, y,
+ fInitialViewMatrix, fInitialX, fInitialY, &transX, &transY);
+ SkMatrix ctm;
+ ctm.setScale(bigGlyph.fScale, bigGlyph.fScale);
+ ctm.postTranslate(bigGlyph.fX + transX, bigGlyph.fY + transY);
+ if (bigGlyph.fApplyVM) {
+ ctm.postConcat(viewMatrix);
+ }
+
+ GrBlurUtils::drawPathWithMaskFilter(context, dc, clip, bigGlyph.fPath,
+ skPaint, ctm, nullptr, clipBounds, false);
+ }
+}
+
+void GrAtlasTextBlob::flushRunAsPaths(GrContext* context, GrDrawContext* dc,
+ const SkSurfaceProps& props,
+ const SkTextBlobRunIterator& it,
+ const GrClip& clip, const SkPaint& skPaint,
+ SkDrawFilter* drawFilter, const SkMatrix& viewMatrix,
+ const SkIRect& clipBounds, SkScalar x, SkScalar y) {
+ SkPaint runPaint = skPaint;
+
+ size_t textLen = it.glyphCount() * sizeof(uint16_t);
+ const SkPoint& offset = it.offset();
+
+ it.applyFontToPaint(&runPaint);
+
+ if (drawFilter && !drawFilter->filter(&runPaint, SkDrawFilter::kText_Type)) {
+ return;
+ }
+
+ runPaint.setFlags(GrTextUtils::FilterTextFlags(props, runPaint));
+
+ switch (it.positioning()) {
+ case SkTextBlob::kDefault_Positioning:
+ GrTextUtils::DrawTextAsPath(context, dc, clip, runPaint, viewMatrix,
+ (const char *)it.glyphs(),
+ textLen, x + offset.x(), y + offset.y(), clipBounds);
+ break;
+ case SkTextBlob::kHorizontal_Positioning:
+ GrTextUtils::DrawPosTextAsPath(context, dc, props, clip, runPaint, viewMatrix,
+ (const char*)it.glyphs(),
+ textLen, it.pos(), 1, SkPoint::Make(x, y + offset.y()),
+ clipBounds);
+ break;
+ case SkTextBlob::kFull_Positioning:
+ GrTextUtils::DrawPosTextAsPath(context, dc, props, clip, runPaint, viewMatrix,
+ (const char*)it.glyphs(),
+ textLen, it.pos(), 2, SkPoint::Make(x, y), clipBounds);
+ break;
+ }
+}
+
+void GrAtlasTextBlob::flushCached(GrContext* context,
+ GrDrawContext* dc,
+ const SkTextBlob* blob,
+ const SkSurfaceProps& props,
+ const GrDistanceFieldAdjustTable* distanceAdjustTable,
+ const SkPaint& skPaint,
+ const GrPaint& grPaint,
+ SkDrawFilter* drawFilter,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkIRect& clipBounds,
+ SkScalar x, SkScalar y) {
+ // We loop through the runs of the blob, flushing each. If any run is too large, then we flush
+ // it as paths
+ SkTextBlobRunIterator it(blob);
+ for (int run = 0; !it.done(); it.next(), run++) {
+ if (fRuns[run].fDrawAsPaths) {
+ this->flushRunAsPaths(context, dc, props, it, clip, skPaint,
+ drawFilter, viewMatrix, clipBounds, x, y);
+ continue;
+ }
+ this->flushRun(dc, grPaint, clip, run, viewMatrix, x, y, skPaint, props,
+ distanceAdjustTable, context->getBatchFontCache());
+ }
+
+ // Now flush big glyphs
+ this->flushBigGlyphs(context, dc, clip, skPaint, viewMatrix, x, y, clipBounds);
+}
+
+void GrAtlasTextBlob::flushThrowaway(GrContext* context,
+ GrDrawContext* dc,
+ const SkSurfaceProps& props,
+ const GrDistanceFieldAdjustTable* distanceAdjustTable,
+ const SkPaint& skPaint,
+ const GrPaint& grPaint,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkIRect& clipBounds,
+ SkScalar x, SkScalar y) {
+ for (int run = 0; run < fRunCount; run++) {
+ this->flushRun(dc, grPaint, clip, run, viewMatrix, x, y, skPaint, props,
+ distanceAdjustTable, context->getBatchFontCache());
+ }
+
+ // Now flush big glyphs
+ this->flushBigGlyphs(context, dc, clip, skPaint, viewMatrix, x, y, clipBounds);
+}
+
+GrDrawBatch* GrAtlasTextBlob::test_createBatch(
+ int glyphCount, int run, int subRun,
+ const SkMatrix& viewMatrix, SkScalar x, SkScalar y,
+ GrColor color,
+ const SkPaint& skPaint, const SkSurfaceProps& props,
+ const GrDistanceFieldAdjustTable* distanceAdjustTable,
+ GrBatchFontCache* cache) {
+ const GrAtlasTextBlob::Run::SubRunInfo& info = fRuns[run].fSubRunInfo[subRun];
+ return this->createBatch(info, glyphCount, run, subRun, viewMatrix, x, y, color, skPaint,
+ props, distanceAdjustTable, false, cache);
+}
+
+void GrAtlasTextBlob::AssertEqual(const GrAtlasTextBlob& l, const GrAtlasTextBlob& r) {
+ SkASSERT_RELEASE(l.fSize == r.fSize);
+ SkASSERT_RELEASE(l.fPool == r.fPool);
+
+ SkASSERT_RELEASE(l.fBlurRec.fSigma == r.fBlurRec.fSigma);
+ SkASSERT_RELEASE(l.fBlurRec.fStyle == r.fBlurRec.fStyle);
+ SkASSERT_RELEASE(l.fBlurRec.fQuality == r.fBlurRec.fQuality);
+
+ SkASSERT_RELEASE(l.fStrokeInfo.fFrameWidth == r.fStrokeInfo.fFrameWidth);
+ SkASSERT_RELEASE(l.fStrokeInfo.fMiterLimit == r.fStrokeInfo.fMiterLimit);
+ SkASSERT_RELEASE(l.fStrokeInfo.fJoin == r.fStrokeInfo.fJoin);
+
+ SkASSERT_RELEASE(l.fBigGlyphs.count() == r.fBigGlyphs.count());
+ for (int i = 0; i < l.fBigGlyphs.count(); i++) {
+ const BigGlyph& lBigGlyph = l.fBigGlyphs[i];
+ const BigGlyph& rBigGlyph = r.fBigGlyphs[i];
+
+ SkASSERT_RELEASE(lBigGlyph.fPath == rBigGlyph.fPath);
+ // We can't assert that these have the same translations
+ }
+
+ SkASSERT_RELEASE(l.fKey == r.fKey);
+ //SkASSERT_RELEASE(l.fPaintColor == r.fPaintColor); // Colors might not actually be identical
+ SkASSERT_RELEASE(l.fMaxMinScale == r.fMaxMinScale);
+ SkASSERT_RELEASE(l.fMinMaxScale == r.fMinMaxScale);
+ SkASSERT_RELEASE(l.fTextType == r.fTextType);
+
+ SkASSERT_RELEASE(l.fRunCount == r.fRunCount);
+ for (int i = 0; i < l.fRunCount; i++) {
+ const Run& lRun = l.fRuns[i];
+ const Run& rRun = r.fRuns[i];
+
+ if (lRun.fTypeface.get()) {
+ SkASSERT_RELEASE(rRun.fTypeface.get());
+ SkASSERT_RELEASE(SkTypeface::Equal(lRun.fTypeface, rRun.fTypeface));
+ } else {
+ SkASSERT_RELEASE(!rRun.fTypeface.get());
+ }
+
+
+ SkASSERT_RELEASE(lRun.fDescriptor.getDesc());
+ SkASSERT_RELEASE(rRun.fDescriptor.getDesc());
+ SkASSERT_RELEASE(*lRun.fDescriptor.getDesc() == *rRun.fDescriptor.getDesc());
+
+ if (lRun.fOverrideDescriptor.get()) {
+ SkASSERT_RELEASE(lRun.fOverrideDescriptor->getDesc());
+ SkASSERT_RELEASE(rRun.fOverrideDescriptor.get() && rRun.fOverrideDescriptor->getDesc());
+ SkASSERT_RELEASE(*lRun.fOverrideDescriptor->getDesc() ==
+ *rRun.fOverrideDescriptor->getDesc());
+ } else {
+ SkASSERT_RELEASE(!rRun.fOverrideDescriptor.get());
+ }
+
+ // color can be changed
+ //SkASSERT(lRun.fColor == rRun.fColor);
+ SkASSERT_RELEASE(lRun.fInitialized == rRun.fInitialized);
+ SkASSERT_RELEASE(lRun.fDrawAsPaths == rRun.fDrawAsPaths);
+
+ SkASSERT_RELEASE(lRun.fSubRunInfo.count() == rRun.fSubRunInfo.count());
+ for(int j = 0; j < lRun.fSubRunInfo.count(); j++) {
+ const Run::SubRunInfo& lSubRun = lRun.fSubRunInfo[j];
+ const Run::SubRunInfo& rSubRun = rRun.fSubRunInfo[j];
+
+ // TODO we can do this check, but we have to apply the VM to the old vertex bounds
+ //SkASSERT_RELEASE(lSubRun.vertexBounds() == rSubRun.vertexBounds());
+
+ if (lSubRun.strike()) {
+ SkASSERT_RELEASE(rSubRun.strike());
+ SkASSERT_RELEASE(GrBatchTextStrike::GetKey(*lSubRun.strike()) ==
+ GrBatchTextStrike::GetKey(*rSubRun.strike()));
+
+ } else {
+ SkASSERT_RELEASE(!rSubRun.strike());
+ }
+
+ SkASSERT_RELEASE(lSubRun.vertexStartIndex() == rSubRun.vertexStartIndex());
+ SkASSERT_RELEASE(lSubRun.vertexEndIndex() == rSubRun.vertexEndIndex());
+ SkASSERT_RELEASE(lSubRun.glyphStartIndex() == rSubRun.glyphStartIndex());
+ SkASSERT_RELEASE(lSubRun.glyphEndIndex() == rSubRun.glyphEndIndex());
+ SkASSERT_RELEASE(lSubRun.maskFormat() == rSubRun.maskFormat());
+ SkASSERT_RELEASE(lSubRun.drawAsDistanceFields() == rSubRun.drawAsDistanceFields());
+ SkASSERT_RELEASE(lSubRun.hasUseLCDText() == rSubRun.hasUseLCDText());
+ }
+ }
+}
+
+void GrAtlasTextBlob::Run::SubRunInfo::computeTranslation(const SkMatrix& viewMatrix,
+ SkScalar x, SkScalar y, SkScalar* transX,
+ SkScalar* transY) {
+ calculate_translation(!this->drawAsDistanceFields(), viewMatrix, x, y,
+ fCurrentViewMatrix, fX, fY, transX, transY);
+ fCurrentViewMatrix = viewMatrix;
+ fX = x;
+ fY = y;
+}
diff --git a/gfx/skia/skia/src/gpu/text/GrAtlasTextBlob.h b/gfx/skia/skia/src/gpu/text/GrAtlasTextBlob.h
new file mode 100644
index 000000000..afc11a9bd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrAtlasTextBlob.h
@@ -0,0 +1,567 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAtlasTextBlob_DEFINED
+#define GrAtlasTextBlob_DEFINED
+
+#include "GrBatchAtlas.h"
+#include "GrBatchFontCache.h"
+#include "GrColor.h"
+#include "GrMemoryPool.h"
+#include "SkDescriptor.h"
+#include "SkMaskFilter.h"
+#include "SkOpts.h"
+#include "SkPathEffect.h"
+#include "SkRasterizer.h"
+#include "SkSurfaceProps.h"
+#include "SkTInternalLList.h"
+
+class GrBlobRegenHelper;
+struct GrDistanceFieldAdjustTable;
+class GrMemoryPool;
+class SkDrawFilter;
+class SkTextBlob;
+class SkTextBlobRunIterator;
+
+// With this flag enabled, the GrAtlasTextContext will, as a sanity check, regenerate every blob
+// that comes in to verify the integrity of its cache
+#define CACHE_SANITY_CHECK 0
+
+/*
+ * A GrAtlasTextBlob contains a fully processed SkTextBlob, suitable for nearly immediate drawing
+ * on the GPU. These are initially created with valid positions and colors, but invalid
+ * texture coordinates. The GrAtlasTextBlob itself has a few Blob-wide properties, and also
+ * consists of a number of runs. Runs inside a blob are flushed individually so they can be
+ * reordered.
+ *
+ * The only thing(aside from a memcopy) required to flush a GrAtlasTextBlob is to ensure that
+ * the GrAtlas will not evict anything the Blob needs.
+ *
+ * Note: This struct should really be named GrCachedAtasTextBlob, but that is too verbose.
+ *
+ * *WARNING* If you add new fields to this struct, then you may need to to update AssertEqual
+ */
+class GrAtlasTextBlob : public SkNVRefCnt<GrAtlasTextBlob> {
+public:
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrAtlasTextBlob);
+
+ static GrAtlasTextBlob* Create(GrMemoryPool* pool, int glyphCount, int runCount);
+
+ struct Key {
+ Key() {
+ sk_bzero(this, sizeof(Key));
+ }
+ uint32_t fUniqueID;
+ // Color may affect the gamma of the mask we generate, but in a fairly limited way.
+ // Each color is assigned to on of a fixed number of buckets based on its
+ // luminance. For each luminance bucket there is a "canonical color" that
+ // represents the bucket. This functionality is currently only supported for A8
+ SkColor fCanonicalColor;
+ SkPaint::Style fStyle;
+ SkPixelGeometry fPixelGeometry;
+ bool fHasBlur;
+ uint32_t fScalerContextFlags;
+
+ bool operator==(const Key& other) const {
+ return 0 == memcmp(this, &other, sizeof(Key));
+ }
+ };
+
+ void setupKey(const GrAtlasTextBlob::Key& key,
+ const SkMaskFilter::BlurRec& blurRec,
+ const SkPaint& paint) {
+ fKey = key;
+ if (key.fHasBlur) {
+ fBlurRec = blurRec;
+ }
+ if (key.fStyle != SkPaint::kFill_Style) {
+ fStrokeInfo.fFrameWidth = paint.getStrokeWidth();
+ fStrokeInfo.fMiterLimit = paint.getStrokeMiter();
+ fStrokeInfo.fJoin = paint.getStrokeJoin();
+ }
+ }
+
+ static const Key& GetKey(const GrAtlasTextBlob& blob) {
+ return blob.fKey;
+ }
+
+ static uint32_t Hash(const Key& key) {
+ return SkOpts::hash(&key, sizeof(Key));
+ }
+
+ void operator delete(void* p) {
+ GrAtlasTextBlob* blob = reinterpret_cast<GrAtlasTextBlob*>(p);
+ blob->fPool->release(p);
+ }
+ void* operator new(size_t) {
+ SkFAIL("All blobs are created by placement new.");
+ return sk_malloc_throw(0);
+ }
+
+ void* operator new(size_t, void* p) { return p; }
+ void operator delete(void* target, void* placement) {
+ ::operator delete(target, placement);
+ }
+
+ bool hasDistanceField() const { return SkToBool(fTextType & kHasDistanceField_TextType); }
+ bool hasBitmap() const { return SkToBool(fTextType & kHasBitmap_TextType); }
+ void setHasDistanceField() { fTextType |= kHasDistanceField_TextType; }
+ void setHasBitmap() { fTextType |= kHasBitmap_TextType; }
+
+ int runCount() const { return fRunCount; }
+
+ void push_back_run(int currRun) {
+ SkASSERT(currRun < fRunCount);
+ if (currRun > 0) {
+ Run::SubRunInfo& newRun = fRuns[currRun].fSubRunInfo.back();
+ Run::SubRunInfo& lastRun = fRuns[currRun - 1].fSubRunInfo.back();
+ newRun.setAsSuccessor(lastRun);
+ }
+ }
+
+ // sets the last subrun of runIndex to use distance field text
+ void setSubRunHasDistanceFields(int runIndex, bool hasLCD) {
+ Run& run = fRuns[runIndex];
+ Run::SubRunInfo& subRun = run.fSubRunInfo.back();
+ subRun.setUseLCDText(hasLCD);
+ subRun.setDrawAsDistanceFields();
+ }
+
+ void setRunDrawAsPaths(int runIndex) {
+ fRuns[runIndex].fDrawAsPaths = true;
+ }
+
+ void setMinAndMaxScale(SkScalar scaledMax, SkScalar scaledMin) {
+ // we init fMaxMinScale and fMinMaxScale in the constructor
+ fMaxMinScale = SkMaxScalar(scaledMax, fMaxMinScale);
+ fMinMaxScale = SkMinScalar(scaledMin, fMinMaxScale);
+ }
+
+ // inits the override descriptor on the current run. All following subruns must use this
+ // descriptor
+ void initOverride(int runIndex) {
+ Run& run = fRuns[runIndex];
+ // Push back a new subrun to fill and set the override descriptor
+ run.push_back();
+ run.fOverrideDescriptor.reset(new SkAutoDescriptor);
+ }
+
+ SkGlyphCache* setupCache(int runIndex,
+ const SkSurfaceProps& props,
+ uint32_t scalerContextFlags,
+ const SkPaint& skPaint,
+ const SkMatrix* viewMatrix);
+
+ // Appends a glyph to the blob. If the glyph is too large, the glyph will be appended
+ // as a path.
+ void appendGlyph(int runIndex,
+ const SkRect& positions,
+ GrColor color,
+ GrBatchTextStrike* strike,
+ GrGlyph* glyph,
+ SkGlyphCache*, const SkGlyph& skGlyph,
+ SkScalar x, SkScalar y, SkScalar scale, bool applyVM);
+
+ static size_t GetVertexStride(GrMaskFormat maskFormat) {
+ switch (maskFormat) {
+ case kA8_GrMaskFormat:
+ return kGrayTextVASize;
+ case kARGB_GrMaskFormat:
+ return kColorTextVASize;
+ default:
+ return kLCDTextVASize;
+ }
+ }
+
+ bool mustRegenerate(const SkPaint& paint, GrColor color, const SkMaskFilter::BlurRec& blurRec,
+ const SkMatrix& viewMatrix, SkScalar x, SkScalar y);
+
+ // flush a GrAtlasTextBlob associated with a SkTextBlob
+ void flushCached(GrContext* context,
+ GrDrawContext* dc,
+ const SkTextBlob* blob,
+ const SkSurfaceProps& props,
+ const GrDistanceFieldAdjustTable* distanceAdjustTable,
+ const SkPaint& skPaint,
+ const GrPaint& grPaint,
+ SkDrawFilter* drawFilter,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkIRect& clipBounds,
+ SkScalar x, SkScalar y);
+
+ // flush a throwaway GrAtlasTextBlob *not* associated with an SkTextBlob
+ void flushThrowaway(GrContext* context,
+ GrDrawContext* dc,
+ const SkSurfaceProps& props,
+ const GrDistanceFieldAdjustTable* distanceAdjustTable,
+ const SkPaint& skPaint,
+ const GrPaint& grPaint,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkIRect& clipBounds,
+ SkScalar x, SkScalar y);
+
+ void computeSubRunBounds(SkRect* outBounds, int runIndex, int subRunIndex,
+ const SkMatrix& viewMatrix, SkScalar x, SkScalar y) {
+ // We don't yet position distance field text on the cpu, so we have to map the vertex bounds
+ // into device space.
+ // We handle vertex bounds differently for distance field text and bitmap text because
+ // the vertex bounds of bitmap text are in device space. If we are flushing multiple runs
+ // from one blob then we are going to pay the price here of mapping the rect for each run.
+ const Run& run = fRuns[runIndex];
+ const Run::SubRunInfo& subRun = run.fSubRunInfo[subRunIndex];
+ *outBounds = subRun.vertexBounds();
+ if (subRun.drawAsDistanceFields()) {
+ // Distance field text is positioned with the (X,Y) as part of the glyph position,
+ // and currently the view matrix is applied on the GPU
+ outBounds->offset(x - fInitialX, y - fInitialY);
+ viewMatrix.mapRect(outBounds);
+ } else {
+ // Bitmap text is fully positioned on the CPU, and offset by an (X,Y) translate in
+ // device space.
+ SkMatrix boundsMatrix = fInitialViewMatrixInverse;
+
+ boundsMatrix.postTranslate(-fInitialX, -fInitialY);
+
+ boundsMatrix.postTranslate(x, y);
+
+ boundsMatrix.postConcat(viewMatrix);
+ boundsMatrix.mapRect(outBounds);
+
+ // Due to floating point numerical inaccuracies, we have to round out here
+ outBounds->roundOut(outBounds);
+ }
+ }
+
+ // position + local coord
+ static const size_t kColorTextVASize = sizeof(SkPoint) + sizeof(SkIPoint16);
+ static const size_t kGrayTextVASize = sizeof(SkPoint) + sizeof(GrColor) + sizeof(SkIPoint16);
+ static const size_t kLCDTextVASize = kGrayTextVASize;
+ static const size_t kMaxVASize = kGrayTextVASize;
+ static const int kVerticesPerGlyph = 4;
+
+ static void AssertEqual(const GrAtlasTextBlob&, const GrAtlasTextBlob&);
+
+ // The color here is the GrPaint color, and it is used to determine whether we
+ // have to regenerate LCD text blobs.
+ // We use this color vs the SkPaint color because it has the colorfilter applied.
+ void initReusableBlob(GrColor color, const SkMatrix& viewMatrix, SkScalar x, SkScalar y) {
+ fPaintColor = color;
+ this->setupViewMatrix(viewMatrix, x, y);
+ }
+
+ void initThrowawayBlob(const SkMatrix& viewMatrix, SkScalar x, SkScalar y) {
+ this->setupViewMatrix(viewMatrix, x, y);
+ }
+
+ /**
+ * Consecutive calls to regenInBatch often use the same SkGlyphCache. If the same instance of
+ * SkAutoGlyphCache is passed to multiple calls of regenInBatch then it can save the cost of
+ * multiple detach/attach operations of SkGlyphCache.
+ */
+ void regenInBatch(GrDrawBatch::Target* target, GrBatchFontCache* fontCache,
+ GrBlobRegenHelper *helper, int run, int subRun, SkAutoGlyphCache*,
+ size_t vertexStride, const SkMatrix& viewMatrix, SkScalar x, SkScalar y,
+ GrColor color, void** vertices, size_t* byteCount, int* glyphCount);
+
+ const Key& key() const { return fKey; }
+
+ ~GrAtlasTextBlob() {
+ for (int i = 0; i < fRunCount; i++) {
+ fRuns[i].~Run();
+ }
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////////////////////
+ // Internal test methods
+ GrDrawBatch* test_createBatch(int glyphCount, int run, int subRun,
+ const SkMatrix& viewMatrix, SkScalar x, SkScalar y, GrColor color,
+ const SkPaint& skPaint, const SkSurfaceProps& props,
+ const GrDistanceFieldAdjustTable* distanceAdjustTable,
+ GrBatchFontCache* cache);
+
+private:
+ GrAtlasTextBlob()
+ : fMaxMinScale(-SK_ScalarMax)
+ , fMinMaxScale(SK_ScalarMax)
+ , fTextType(0) {}
+
+ void appendLargeGlyph(GrGlyph* glyph, SkGlyphCache* cache, const SkGlyph& skGlyph,
+ SkScalar x, SkScalar y, SkScalar scale, bool applyVM);
+
+ inline void flushRun(GrDrawContext* dc, const GrPaint&, const GrClip&,
+ int run, const SkMatrix& viewMatrix, SkScalar x, SkScalar y,
+ const SkPaint& skPaint, const SkSurfaceProps& props,
+ const GrDistanceFieldAdjustTable* distanceAdjustTable,
+ GrBatchFontCache* cache);
+
+ void flushBigGlyphs(GrContext* context, GrDrawContext* dc,
+ const GrClip& clip, const SkPaint& skPaint,
+ const SkMatrix& viewMatrix, SkScalar x, SkScalar y,
+ const SkIRect& clipBounds);
+
+ void flushRunAsPaths(GrContext* context,
+ GrDrawContext* dc,
+ const SkSurfaceProps& props,
+ const SkTextBlobRunIterator& it,
+ const GrClip& clip, const SkPaint& skPaint,
+ SkDrawFilter* drawFilter, const SkMatrix& viewMatrix,
+ const SkIRect& clipBounds, SkScalar x, SkScalar y);
+
+ // This function will only be called when we are generating a blob from scratch. We record the
+ // initial view matrix and initial offsets(x,y), because we record vertex bounds relative to
+ // these numbers. When blobs are reused with new matrices, we need to return to model space so
+ // we can update the vertex bounds appropriately.
+ void setupViewMatrix(const SkMatrix& viewMatrix, SkScalar x, SkScalar y) {
+ fInitialViewMatrix = viewMatrix;
+ if (!viewMatrix.invert(&fInitialViewMatrixInverse)) {
+ fInitialViewMatrixInverse = SkMatrix::I();
+ SkDebugf("Could not invert viewmatrix\n");
+ }
+ fInitialX = x;
+ fInitialY = y;
+
+ // make sure all initial subruns have the correct VM and X/Y applied
+ for (int i = 0; i < fRunCount; i++) {
+ fRuns[i].fSubRunInfo[0].init(fInitialViewMatrix, x, y);
+ }
+ }
+
+ /*
+ * Each Run inside of the blob can have its texture coordinates regenerated if required.
+ * To determine if regeneration is necessary, fAtlasGeneration is used. If there have been
+ * any evictions inside of the atlas, then we will simply regenerate Runs. We could track
+ * this at a more fine grained level, but its not clear if this is worth it, as evictions
+ * should be fairly rare.
+ *
+ * One additional point, each run can contain glyphs with any of the three mask formats.
+ * We call these SubRuns. Because a subrun must be a contiguous range, we have to create
+ * a new subrun each time the mask format changes in a run. In theory, a run can have as
+ * many SubRuns as it has glyphs, ie if a run alternates between color emoji and A8. In
+ * practice, the vast majority of runs have only a single subrun.
+ *
+ * Finally, for runs where the entire thing is too large for the GrAtlasTextContext to
+ * handle, we have a bit to mark the run as flusahable via rendering as paths. It is worth
+ * pointing. It would be a bit expensive to figure out ahead of time whether or not a run
+ * can flush in this manner, so we always allocate vertices for the run, regardless of
+ * whether or not it is too large. The benefit of this strategy is that we can always reuse
+ * a blob allocation regardless of viewmatrix changes. We could store positions for these
+ * glyphs. However, its not clear if this is a win because we'd still have to either go the
+ * glyph cache to get the path at flush time, or hold onto the path in the cache, which
+ * would greatly increase the memory of these cached items.
+ */
+ struct Run {
+ Run()
+ : fInitialized(false)
+ , fDrawAsPaths(false) {
+ // To ensure we always have one subrun, we push back a fresh run here
+ fSubRunInfo.push_back();
+ }
+ struct SubRunInfo {
+ SubRunInfo()
+ : fAtlasGeneration(GrBatchAtlas::kInvalidAtlasGeneration)
+ , fVertexStartIndex(0)
+ , fVertexEndIndex(0)
+ , fGlyphStartIndex(0)
+ , fGlyphEndIndex(0)
+ , fColor(GrColor_ILLEGAL)
+ , fMaskFormat(kA8_GrMaskFormat)
+ , fDrawAsDistanceFields(false)
+ , fUseLCDText(false) {
+ fVertexBounds.setLargestInverted();
+ }
+ SubRunInfo(const SubRunInfo& that)
+ : fBulkUseToken(that.fBulkUseToken)
+ , fStrike(SkSafeRef(that.fStrike.get()))
+ , fCurrentViewMatrix(that.fCurrentViewMatrix)
+ , fVertexBounds(that.fVertexBounds)
+ , fAtlasGeneration(that.fAtlasGeneration)
+ , fVertexStartIndex(that.fVertexStartIndex)
+ , fVertexEndIndex(that.fVertexEndIndex)
+ , fGlyphStartIndex(that.fGlyphStartIndex)
+ , fGlyphEndIndex(that.fGlyphEndIndex)
+ , fX(that.fX)
+ , fY(that.fY)
+ , fColor(that.fColor)
+ , fMaskFormat(that.fMaskFormat)
+ , fDrawAsDistanceFields(that.fDrawAsDistanceFields)
+ , fUseLCDText(that.fUseLCDText) {
+ }
+
+ // TODO when this object is more internal, drop the privacy
+ void resetBulkUseToken() { fBulkUseToken.reset(); }
+ GrBatchAtlas::BulkUseTokenUpdater* bulkUseToken() { return &fBulkUseToken; }
+ void setStrike(GrBatchTextStrike* strike) { fStrike.reset(SkRef(strike)); }
+ GrBatchTextStrike* strike() const { return fStrike.get(); }
+
+ void setAtlasGeneration(uint64_t atlasGeneration) { fAtlasGeneration = atlasGeneration;}
+ uint64_t atlasGeneration() const { return fAtlasGeneration; }
+
+ size_t byteCount() const { return fVertexEndIndex - fVertexStartIndex; }
+ size_t vertexStartIndex() const { return fVertexStartIndex; }
+ size_t vertexEndIndex() const { return fVertexEndIndex; }
+ void appendVertices(size_t vertexStride) {
+ fVertexEndIndex += vertexStride * kVerticesPerGlyph;
+ }
+
+ uint32_t glyphCount() const { return fGlyphEndIndex - fGlyphStartIndex; }
+ uint32_t glyphStartIndex() const { return fGlyphStartIndex; }
+ uint32_t glyphEndIndex() const { return fGlyphEndIndex; }
+ void glyphAppended() { fGlyphEndIndex++; }
+ void setColor(GrColor color) { fColor = color; }
+ GrColor color() const { return fColor; }
+ void setMaskFormat(GrMaskFormat format) { fMaskFormat = format; }
+ GrMaskFormat maskFormat() const { return fMaskFormat; }
+
+ void setAsSuccessor(const SubRunInfo& prev) {
+ fGlyphStartIndex = prev.glyphEndIndex();
+ fGlyphEndIndex = prev.glyphEndIndex();
+
+ fVertexStartIndex = prev.vertexEndIndex();
+ fVertexEndIndex = prev.vertexEndIndex();
+
+ // copy over viewmatrix settings
+ this->init(prev.fCurrentViewMatrix, prev.fX, prev.fY);
+ }
+
+ const SkRect& vertexBounds() const { return fVertexBounds; }
+ void joinGlyphBounds(const SkRect& glyphBounds) {
+ fVertexBounds.joinNonEmptyArg(glyphBounds);
+ }
+
+ void init(const SkMatrix& viewMatrix, SkScalar x, SkScalar y) {
+ fCurrentViewMatrix = viewMatrix;
+ fX = x;
+ fY = y;
+ }
+
+ // This function assumes the translation will be applied before it is called again
+ void computeTranslation(const SkMatrix& viewMatrix, SkScalar x, SkScalar y,
+ SkScalar*transX, SkScalar* transY);
+
+ // df properties
+ void setUseLCDText(bool useLCDText) { fUseLCDText = useLCDText; }
+ bool hasUseLCDText() const { return fUseLCDText; }
+ void setDrawAsDistanceFields() { fDrawAsDistanceFields = true; }
+ bool drawAsDistanceFields() const { return fDrawAsDistanceFields; }
+
+ private:
+ GrBatchAtlas::BulkUseTokenUpdater fBulkUseToken;
+ SkAutoTUnref<GrBatchTextStrike> fStrike;
+ SkMatrix fCurrentViewMatrix;
+ SkRect fVertexBounds;
+ uint64_t fAtlasGeneration;
+ size_t fVertexStartIndex;
+ size_t fVertexEndIndex;
+ uint32_t fGlyphStartIndex;
+ uint32_t fGlyphEndIndex;
+ SkScalar fX;
+ SkScalar fY;
+ GrColor fColor;
+ GrMaskFormat fMaskFormat;
+ bool fDrawAsDistanceFields; // df property
+ bool fUseLCDText; // df property
+ };
+
+ SubRunInfo& push_back() {
+ // Forward glyph / vertex information to seed the new sub run
+ SubRunInfo& newSubRun = fSubRunInfo.push_back();
+ const SubRunInfo& prevSubRun = fSubRunInfo.fromBack(1);
+
+ newSubRun.setAsSuccessor(prevSubRun);
+ return newSubRun;
+ }
+ static const int kMinSubRuns = 1;
+ SkAutoTUnref<SkTypeface> fTypeface;
+ SkSTArray<kMinSubRuns, SubRunInfo> fSubRunInfo;
+ SkAutoDescriptor fDescriptor;
+
+ // Effects from the paint that are used to build a SkScalerContext.
+ sk_sp<SkPathEffect> fPathEffect;
+ sk_sp<SkRasterizer> fRasterizer;
+ sk_sp<SkMaskFilter> fMaskFilter;
+
+ // Distance field text cannot draw coloremoji, and so has to fall back. However,
+ // though the distance field text and the coloremoji may share the same run, they
+ // will have different descriptors. If fOverrideDescriptor is non-nullptr, then it
+ // will be used in place of the run's descriptor to regen texture coords
+ SkAutoTDelete<SkAutoDescriptor> fOverrideDescriptor; // df properties
+ bool fInitialized;
+ bool fDrawAsPaths;
+ };
+
+ template <bool regenPos, bool regenCol, bool regenTexCoords, bool regenGlyphs>
+ void regenInBatch(GrDrawBatch::Target* target,
+ GrBatchFontCache* fontCache,
+ GrBlobRegenHelper* helper,
+ Run* run, Run::SubRunInfo* info,
+ SkAutoGlyphCache*, int glyphCount,
+ size_t vertexStride,
+ GrColor color, SkScalar transX,
+ SkScalar transY) const;
+
+ inline GrDrawBatch* createBatch(const Run::SubRunInfo& info,
+ int glyphCount, int run, int subRun,
+ const SkMatrix& viewMatrix, SkScalar x, SkScalar y,
+ GrColor color,
+ const SkPaint& skPaint, const SkSurfaceProps& props,
+ const GrDistanceFieldAdjustTable* distanceAdjustTable,
+ bool useGammaCorrectDistanceTable,
+ GrBatchFontCache* cache);
+
+ struct BigGlyph {
+ BigGlyph(const SkPath& path, SkScalar vx, SkScalar vy, SkScalar scale, bool applyVM)
+ : fPath(path)
+ , fScale(scale)
+ , fX(vx)
+ , fY(vy)
+ , fApplyVM(applyVM) {}
+ SkPath fPath;
+ SkScalar fScale;
+ SkScalar fX;
+ SkScalar fY;
+ bool fApplyVM;
+ };
+
+ struct StrokeInfo {
+ SkScalar fFrameWidth;
+ SkScalar fMiterLimit;
+ SkPaint::Join fJoin;
+ };
+
+ enum TextType {
+ kHasDistanceField_TextType = 0x1,
+ kHasBitmap_TextType = 0x2,
+ };
+
+ // all glyph / vertex offsets are into these pools.
+ unsigned char* fVertices;
+ GrGlyph** fGlyphs;
+ Run* fRuns;
+ GrMemoryPool* fPool;
+ SkMaskFilter::BlurRec fBlurRec;
+ StrokeInfo fStrokeInfo;
+ SkTArray<BigGlyph> fBigGlyphs;
+ Key fKey;
+ SkMatrix fInitialViewMatrix;
+ SkMatrix fInitialViewMatrixInverse;
+ size_t fSize;
+ GrColor fPaintColor;
+ SkScalar fInitialX;
+ SkScalar fInitialY;
+
+ // We can reuse distance field text, but only if the new viewmatrix would not result in
+ // a mip change. Because there can be multiple runs in a blob, we track the overall
+ // maximum minimum scale, and minimum maximum scale, we can support before we need to regen
+ SkScalar fMaxMinScale;
+ SkScalar fMinMaxScale;
+ int fRunCount;
+ uint8_t fTextType;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/text/GrAtlasTextBlob_regenInBatch.cpp b/gfx/skia/skia/src/gpu/text/GrAtlasTextBlob_regenInBatch.cpp
new file mode 100644
index 000000000..59df1fa81
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrAtlasTextBlob_regenInBatch.cpp
@@ -0,0 +1,314 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrAtlasTextBlob.h"
+
+#include "GrBatchFlushState.h"
+#include "GrTextUtils.h"
+
+#include "SkDistanceFieldGen.h"
+#include "SkGlyphCache.h"
+
+#include "batches/GrAtlasTextBatch.h"
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// A large template to handle regenerating the vertices of a textblob with as few branches as
+// possible
+template <bool regenPos, bool regenCol, bool regenTexCoords>
+inline void regen_vertices(intptr_t vertex, const GrGlyph* glyph, size_t vertexStride,
+ bool useDistanceFields, SkScalar transX, SkScalar transY,
+ int32_t log2Width, int32_t log2Height,
+ GrColor color) {
+ int u0, v0, u1, v1;
+ if (regenTexCoords) {
+ SkASSERT(glyph);
+ int width = glyph->fBounds.width();
+ int height = glyph->fBounds.height();
+
+ if (useDistanceFields) {
+ u0 = glyph->fAtlasLocation.fX + SK_DistanceFieldInset;
+ v0 = glyph->fAtlasLocation.fY + SK_DistanceFieldInset;
+ u1 = u0 + width - 2 * SK_DistanceFieldInset;
+ v1 = v0 + height - 2 * SK_DistanceFieldInset;
+ } else {
+ u0 = glyph->fAtlasLocation.fX;
+ v0 = glyph->fAtlasLocation.fY;
+ u1 = u0 + width;
+ v1 = v0 + height;
+ }
+
+ // normalize
+ u0 *= 65535;
+ u0 >>= log2Width;
+ u1 *= 65535;
+ u1 >>= log2Width;
+ v0 *= 65535;
+ v0 >>= log2Height;
+ v1 *= 65535;
+ v1 >>= log2Height;
+ SkASSERT(u0 >= 0 && u0 <= 65535);
+ SkASSERT(u1 >= 0 && u1 <= 65535);
+ SkASSERT(v0 >= 0 && v0 <= 65535);
+ SkASSERT(v1 >= 0 && v1 <= 65535);
+ }
+
+ // This is a bit wonky, but sometimes we have LCD text, in which case we won't have color
+ // vertices, hence vertexStride - sizeof(SkIPoint16)
+ intptr_t colorOffset = sizeof(SkPoint);
+ intptr_t texCoordOffset = vertexStride - sizeof(SkIPoint16);
+
+ // V0
+ if (regenPos) {
+ SkPoint* point = reinterpret_cast<SkPoint*>(vertex);
+ point->fX += transX;
+ point->fY += transY;
+ }
+
+ if (regenCol) {
+ SkColor* vcolor = reinterpret_cast<SkColor*>(vertex + colorOffset);
+ *vcolor = color;
+ }
+
+ if (regenTexCoords) {
+ uint16_t* textureCoords = reinterpret_cast<uint16_t*>(vertex + texCoordOffset);
+ textureCoords[0] = (uint16_t) u0;
+ textureCoords[1] = (uint16_t) v0;
+ }
+ vertex += vertexStride;
+
+ // V1
+ if (regenPos) {
+ SkPoint* point = reinterpret_cast<SkPoint*>(vertex);
+ point->fX += transX;
+ point->fY += transY;
+ }
+
+ if (regenCol) {
+ SkColor* vcolor = reinterpret_cast<SkColor*>(vertex + colorOffset);
+ *vcolor = color;
+ }
+
+ if (regenTexCoords) {
+ uint16_t* textureCoords = reinterpret_cast<uint16_t*>(vertex + texCoordOffset);
+ textureCoords[0] = (uint16_t)u0;
+ textureCoords[1] = (uint16_t)v1;
+ }
+ vertex += vertexStride;
+
+ // V2
+ if (regenPos) {
+ SkPoint* point = reinterpret_cast<SkPoint*>(vertex);
+ point->fX += transX;
+ point->fY += transY;
+ }
+
+ if (regenCol) {
+ SkColor* vcolor = reinterpret_cast<SkColor*>(vertex + colorOffset);
+ *vcolor = color;
+ }
+
+ if (regenTexCoords) {
+ uint16_t* textureCoords = reinterpret_cast<uint16_t*>(vertex + texCoordOffset);
+ textureCoords[0] = (uint16_t)u1;
+ textureCoords[1] = (uint16_t)v1;
+ }
+ vertex += vertexStride;
+
+ // V3
+ if (regenPos) {
+ SkPoint* point = reinterpret_cast<SkPoint*>(vertex);
+ point->fX += transX;
+ point->fY += transY;
+ }
+
+ if (regenCol) {
+ SkColor* vcolor = reinterpret_cast<SkColor*>(vertex + colorOffset);
+ *vcolor = color;
+ }
+
+ if (regenTexCoords) {
+ uint16_t* textureCoords = reinterpret_cast<uint16_t*>(vertex + texCoordOffset);
+ textureCoords[0] = (uint16_t)u1;
+ textureCoords[1] = (uint16_t)v0;
+ }
+}
+
+template <bool regenPos, bool regenCol, bool regenTexCoords, bool regenGlyphs>
+void GrAtlasTextBlob::regenInBatch(GrDrawBatch::Target* target,
+ GrBatchFontCache* fontCache,
+ GrBlobRegenHelper *helper,
+ Run* run,
+ Run::SubRunInfo* info,
+ SkAutoGlyphCache* lazyCache,
+ int glyphCount, size_t vertexStride,
+ GrColor color, SkScalar transX,
+ SkScalar transY) const {
+ SkASSERT(lazyCache);
+ static_assert(!regenGlyphs || regenTexCoords, "must regenTexCoords along regenGlyphs");
+ GrBatchTextStrike* strike = nullptr;
+ if (regenTexCoords) {
+ info->resetBulkUseToken();
+
+ const SkDescriptor* desc = (run->fOverrideDescriptor && !info->drawAsDistanceFields())
+ ? run->fOverrideDescriptor->getDesc()
+ : run->fDescriptor.getDesc();
+
+ if (!*lazyCache || (*lazyCache)->getDescriptor() != *desc) {
+ SkScalerContextEffects effects;
+ effects.fPathEffect = run->fPathEffect.get();
+ effects.fRasterizer = run->fRasterizer.get();
+ effects.fMaskFilter = run->fMaskFilter.get();
+ lazyCache->reset(SkGlyphCache::DetachCache(run->fTypeface, effects, desc));
+ }
+
+ if (regenGlyphs) {
+ strike = fontCache->getStrike(lazyCache->get());
+ } else {
+ strike = info->strike();
+ }
+ }
+
+ bool brokenRun = false;
+ for (int glyphIdx = 0; glyphIdx < glyphCount; glyphIdx++) {
+ GrGlyph* glyph = nullptr;
+ int log2Width = 0, log2Height = 0;
+ if (regenTexCoords) {
+ size_t glyphOffset = glyphIdx + info->glyphStartIndex();
+
+ if (regenGlyphs) {
+ // Get the id from the old glyph, and use the new strike to lookup
+ // the glyph.
+ GrGlyph::PackedID id = fGlyphs[glyphOffset]->fPackedID;
+ fGlyphs[glyphOffset] = strike->getGlyph(id, info->maskFormat(), lazyCache->get());
+ SkASSERT(id == fGlyphs[glyphOffset]->fPackedID);
+ }
+ glyph = fGlyphs[glyphOffset];
+ SkASSERT(glyph && glyph->fMaskFormat == info->maskFormat());
+
+ if (!fontCache->hasGlyph(glyph) &&
+ !strike->addGlyphToAtlas(target, glyph, lazyCache->get(), info->maskFormat())) {
+ helper->flush();
+ brokenRun = glyphIdx > 0;
+
+ SkDEBUGCODE(bool success =) strike->addGlyphToAtlas(target,
+ glyph,
+ lazyCache->get(),
+ info->maskFormat());
+ SkASSERT(success);
+ }
+ fontCache->addGlyphToBulkAndSetUseToken(info->bulkUseToken(), glyph,
+ target->nextDrawToken());
+ log2Width = fontCache->log2Width(info->maskFormat());
+ log2Height = fontCache->log2Height(info->maskFormat());
+ }
+
+ intptr_t vertex = reinterpret_cast<intptr_t>(fVertices);
+ vertex += info->vertexStartIndex();
+ vertex += vertexStride * glyphIdx * GrAtlasTextBatch::kVerticesPerGlyph;
+ regen_vertices<regenPos, regenCol, regenTexCoords>(vertex, glyph, vertexStride,
+ info->drawAsDistanceFields(), transX,
+ transY, log2Width, log2Height, color);
+ helper->incGlyphCount();
+ }
+
+ // We may have changed the color so update it here
+ info->setColor(color);
+ if (regenTexCoords) {
+ if (regenGlyphs) {
+ info->setStrike(strike);
+ }
+ info->setAtlasGeneration(brokenRun ? GrBatchAtlas::kInvalidAtlasGeneration :
+ fontCache->atlasGeneration(info->maskFormat()));
+ }
+}
+
+enum RegenMask {
+ kNoRegen = 0x0,
+ kRegenPos = 0x1,
+ kRegenCol = 0x2,
+ kRegenTex = 0x4,
+ kRegenGlyph = 0x8 | kRegenTex, // we have to regenerate the texture coords when we regen glyphs
+
+ // combinations
+ kRegenPosCol = kRegenPos | kRegenCol,
+ kRegenPosTex = kRegenPos | kRegenTex,
+ kRegenPosTexGlyph = kRegenPos | kRegenGlyph,
+ kRegenPosColTex = kRegenPos | kRegenCol | kRegenTex,
+ kRegenPosColTexGlyph = kRegenPos | kRegenCol | kRegenGlyph,
+ kRegenColTex = kRegenCol | kRegenTex,
+ kRegenColTexGlyph = kRegenCol | kRegenGlyph,
+};
+
+#define REGEN_ARGS target, fontCache, helper, &run, &info, lazyCache, \
+ *glyphCount, vertexStride, color, transX, transY
+
+void GrAtlasTextBlob::regenInBatch(GrDrawBatch::Target* target,
+ GrBatchFontCache* fontCache,
+ GrBlobRegenHelper *helper,
+ int runIndex, int subRunIndex, SkAutoGlyphCache* lazyCache,
+ size_t vertexStride, const SkMatrix& viewMatrix,
+ SkScalar x, SkScalar y, GrColor color,
+ void** vertices, size_t* byteCount, int* glyphCount) {
+ Run& run = fRuns[runIndex];
+ Run::SubRunInfo& info = run.fSubRunInfo[subRunIndex];
+
+ uint64_t currentAtlasGen = fontCache->atlasGeneration(info.maskFormat());
+
+ // Compute translation if any
+ SkScalar transX, transY;
+ info.computeTranslation(viewMatrix, x, y, &transX, &transY);
+
+ // Because the GrBatchFontCache may evict the strike a blob depends on using for
+ // generating its texture coords, we have to track whether or not the strike has
+ // been abandoned. If it hasn't been abandoned, then we can use the GrGlyph*s as is
+ // otherwise we have to get the new strike, and use that to get the correct glyphs.
+ // Because we do not have the packed ids, and thus can't look up our glyphs in the
+ // new strike, we instead keep our ref to the old strike and use the packed ids from
+ // it. These ids will still be valid as long as we hold the ref. When we are done
+ // updating our cache of the GrGlyph*s, we drop our ref on the old strike
+ bool regenerateGlyphs = info.strike()->isAbandoned();
+ bool regenerateTextureCoords = info.atlasGeneration() != currentAtlasGen ||
+ regenerateGlyphs;
+ bool regenerateColors = kARGB_GrMaskFormat != info.maskFormat() &&
+ info.color() != color;
+ bool regeneratePositions = transX != 0.f || transY != 0.f;
+ *glyphCount = info.glyphCount();
+
+ uint32_t regenMaskBits = kNoRegen;
+ regenMaskBits |= regeneratePositions ? kRegenPos : 0;
+ regenMaskBits |= regenerateColors ? kRegenCol : 0;
+ regenMaskBits |= regenerateTextureCoords ? kRegenTex : 0;
+ regenMaskBits |= regenerateGlyphs ? kRegenGlyph : 0;
+ RegenMask regenMask = (RegenMask)regenMaskBits;
+
+ switch (regenMask) {
+ case kRegenPos: this->regenInBatch<true, false, false, false>(REGEN_ARGS); break;
+ case kRegenCol: this->regenInBatch<false, true, false, false>(REGEN_ARGS); break;
+ case kRegenTex: this->regenInBatch<false, false, true, false>(REGEN_ARGS); break;
+ case kRegenGlyph: this->regenInBatch<false, false, true, true>(REGEN_ARGS); break;
+
+ // combinations
+ case kRegenPosCol: this->regenInBatch<true, true, false, false>(REGEN_ARGS); break;
+ case kRegenPosTex: this->regenInBatch<true, false, true, false>(REGEN_ARGS); break;
+ case kRegenPosTexGlyph: this->regenInBatch<true, false, true, true>(REGEN_ARGS); break;
+ case kRegenPosColTex: this->regenInBatch<true, true, true, false>(REGEN_ARGS); break;
+ case kRegenPosColTexGlyph: this->regenInBatch<true, true, true, true>(REGEN_ARGS); break;
+ case kRegenColTex: this->regenInBatch<false, true, true, false>(REGEN_ARGS); break;
+ case kRegenColTexGlyph: this->regenInBatch<false, true, true, true>(REGEN_ARGS); break;
+ case kNoRegen:
+ helper->incGlyphCount(*glyphCount);
+
+ // set use tokens for all of the glyphs in our subrun. This is only valid if we
+ // have a valid atlas generation
+ fontCache->setUseTokenBulk(*info.bulkUseToken(), target->nextDrawToken(),
+ info.maskFormat());
+ break;
+ }
+
+ *byteCount = info.byteCount();
+ *vertices = fVertices + info.vertexStartIndex();
+}
diff --git a/gfx/skia/skia/src/gpu/text/GrAtlasTextContext.cpp b/gfx/skia/skia/src/gpu/text/GrAtlasTextContext.cpp
new file mode 100644
index 000000000..285aea4d2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrAtlasTextContext.cpp
@@ -0,0 +1,436 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "GrAtlasTextContext.h"
+
+#include "GrContext.h"
+#include "GrDrawContext.h"
+#include "GrTextBlobCache.h"
+#include "GrTextUtils.h"
+
+#include "SkDraw.h"
+#include "SkDrawFilter.h"
+#include "SkGrPriv.h"
+
+GrAtlasTextContext::GrAtlasTextContext()
+ : fDistanceAdjustTable(new GrDistanceFieldAdjustTable) {
+}
+
+
+GrAtlasTextContext* GrAtlasTextContext::Create() {
+ return new GrAtlasTextContext();
+}
+
+bool GrAtlasTextContext::canDraw(const SkPaint& skPaint,
+ const SkMatrix& viewMatrix,
+ const SkSurfaceProps& props,
+ const GrShaderCaps& shaderCaps) {
+ return GrTextUtils::CanDrawAsDistanceFields(skPaint, viewMatrix, props, shaderCaps) ||
+ !SkDraw::ShouldDrawTextAsPaths(skPaint, viewMatrix);
+}
+
+GrColor GrAtlasTextContext::ComputeCanonicalColor(const SkPaint& paint, bool lcd) {
+ GrColor canonicalColor = paint.computeLuminanceColor();
+ if (lcd) {
+ // This is the correct computation, but there are tons of cases where LCD can be overridden.
+ // For now we just regenerate if any run in a textblob has LCD.
+ // TODO figure out where all of these overrides are and see if we can incorporate that logic
+ // at a higher level *OR* use sRGB
+ SkASSERT(false);
+ //canonicalColor = SkMaskGamma::CanonicalColor(canonicalColor);
+ } else {
+ // A8, though can have mixed BMP text but it shouldn't matter because BMP text won't have
+ // gamma corrected masks anyways, nor color
+ U8CPU lum = SkComputeLuminance(SkColorGetR(canonicalColor),
+ SkColorGetG(canonicalColor),
+ SkColorGetB(canonicalColor));
+ // reduce to our finite number of bits
+ canonicalColor = SkMaskGamma::CanonicalColor(SkColorSetRGB(lum, lum, lum));
+ }
+ return canonicalColor;
+}
+
+uint32_t GrAtlasTextContext::ComputeScalerContextFlags(GrDrawContext* dc) {
+ // If we're doing gamma-correct rendering, then we can disable the gamma hacks.
+ // Otherwise, leave them on. In either case, we still want the contrast boost:
+ if (dc->isGammaCorrect()) {
+ return SkPaint::kBoostContrast_ScalerContextFlag;
+ } else {
+ return SkPaint::kFakeGammaAndBoostContrast_ScalerContextFlags;
+ }
+}
+
+// TODO if this function ever shows up in profiling, then we can compute this value when the
+// textblob is being built and cache it. However, for the time being textblobs mostly only have 1
+// run so this is not a big deal to compute here.
+bool GrAtlasTextContext::HasLCD(const SkTextBlob* blob) {
+ SkTextBlobRunIterator it(blob);
+ for (; !it.done(); it.next()) {
+ if (it.isLCD()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void GrAtlasTextContext::drawTextBlob(GrContext* context, GrDrawContext* dc,
+ const GrClip& clip, const SkPaint& skPaint,
+ const SkMatrix& viewMatrix,
+ const SkSurfaceProps& props, const SkTextBlob* blob,
+ SkScalar x, SkScalar y,
+ SkDrawFilter* drawFilter, const SkIRect& clipBounds) {
+ // If we have been abandoned, then don't draw
+ if (context->abandoned()) {
+ return;
+ }
+
+ SkAutoTUnref<GrAtlasTextBlob> cacheBlob;
+ SkMaskFilter::BlurRec blurRec;
+ GrAtlasTextBlob::Key key;
+ // It might be worth caching these things, but its not clear at this time
+ // TODO for animated mask filters, this will fill up our cache. We need a safeguard here
+ const SkMaskFilter* mf = skPaint.getMaskFilter();
+ bool canCache = !(skPaint.getPathEffect() ||
+ (mf && !mf->asABlur(&blurRec)) ||
+ drawFilter);
+ uint32_t scalerContextFlags = ComputeScalerContextFlags(dc);
+
+ GrTextBlobCache* cache = context->getTextBlobCache();
+ if (canCache) {
+ bool hasLCD = HasLCD(blob);
+
+ // We canonicalize all non-lcd draws to use kUnknown_SkPixelGeometry
+ SkPixelGeometry pixelGeometry = hasLCD ? props.pixelGeometry() :
+ kUnknown_SkPixelGeometry;
+
+ // TODO we want to figure out a way to be able to use the canonical color on LCD text,
+ // see the note on ComputeCanonicalColor above. We pick a dummy value for LCD text to
+ // ensure we always match the same key
+ GrColor canonicalColor = hasLCD ? SK_ColorTRANSPARENT :
+ ComputeCanonicalColor(skPaint, hasLCD);
+
+ key.fPixelGeometry = pixelGeometry;
+ key.fUniqueID = blob->uniqueID();
+ key.fStyle = skPaint.getStyle();
+ key.fHasBlur = SkToBool(mf);
+ key.fCanonicalColor = canonicalColor;
+ key.fScalerContextFlags = scalerContextFlags;
+ cacheBlob.reset(SkSafeRef(cache->find(key)));
+ }
+
+ // Though for the time being runs in the textblob can override the paint, they only touch font
+ // info.
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(context, dc, skPaint, viewMatrix, &grPaint)) {
+ return;
+ }
+
+ if (cacheBlob) {
+ if (cacheBlob->mustRegenerate(skPaint, grPaint.getColor(), blurRec, viewMatrix, x, y)) {
+ // We have to remake the blob because changes may invalidate our masks.
+ // TODO we could probably get away reuse most of the time if the pointer is unique,
+ // but we'd have to clear the subrun information
+ cache->remove(cacheBlob);
+ cacheBlob.reset(SkRef(cache->createCachedBlob(blob, key, blurRec, skPaint)));
+ RegenerateTextBlob(cacheBlob, context->getBatchFontCache(),
+ *context->caps()->shaderCaps(), skPaint, grPaint.getColor(),
+ scalerContextFlags, viewMatrix, props,
+ blob, x, y, drawFilter);
+ } else {
+ cache->makeMRU(cacheBlob);
+
+ if (CACHE_SANITY_CHECK) {
+ int glyphCount = 0;
+ int runCount = 0;
+ GrTextBlobCache::BlobGlyphCount(&glyphCount, &runCount, blob);
+ SkAutoTUnref<GrAtlasTextBlob> sanityBlob(cache->createBlob(glyphCount, runCount));
+ sanityBlob->setupKey(key, blurRec, skPaint);
+ RegenerateTextBlob(sanityBlob, context->getBatchFontCache(),
+ *context->caps()->shaderCaps(), skPaint,
+ grPaint.getColor(), scalerContextFlags, viewMatrix, props,
+ blob, x, y, drawFilter);
+ GrAtlasTextBlob::AssertEqual(*sanityBlob, *cacheBlob);
+ }
+ }
+ } else {
+ if (canCache) {
+ cacheBlob.reset(SkRef(cache->createCachedBlob(blob, key, blurRec, skPaint)));
+ } else {
+ cacheBlob.reset(cache->createBlob(blob));
+ }
+ RegenerateTextBlob(cacheBlob, context->getBatchFontCache(),
+ *context->caps()->shaderCaps(), skPaint, grPaint.getColor(),
+ scalerContextFlags, viewMatrix, props,
+ blob, x, y, drawFilter);
+ }
+
+ cacheBlob->flushCached(context, dc, blob, props, fDistanceAdjustTable, skPaint,
+ grPaint, drawFilter, clip, viewMatrix, clipBounds, x, y);
+}
+
+void GrAtlasTextContext::RegenerateTextBlob(GrAtlasTextBlob* cacheBlob,
+ GrBatchFontCache* fontCache,
+ const GrShaderCaps& shaderCaps,
+ const SkPaint& skPaint, GrColor color,
+ uint32_t scalerContextFlags,
+ const SkMatrix& viewMatrix,
+ const SkSurfaceProps& props,
+ const SkTextBlob* blob, SkScalar x, SkScalar y,
+ SkDrawFilter* drawFilter) {
+ cacheBlob->initReusableBlob(color, viewMatrix, x, y);
+
+ // Regenerate textblob
+ SkPaint runPaint = skPaint;
+ SkTextBlobRunIterator it(blob);
+ for (int run = 0; !it.done(); it.next(), run++) {
+ int glyphCount = it.glyphCount();
+ size_t textLen = glyphCount * sizeof(uint16_t);
+ const SkPoint& offset = it.offset();
+ // applyFontToPaint() always overwrites the exact same attributes,
+ // so it is safe to not re-seed the paint for this reason.
+ it.applyFontToPaint(&runPaint);
+
+ if (drawFilter && !drawFilter->filter(&runPaint, SkDrawFilter::kText_Type)) {
+ // A false return from filter() means we should abort the current draw.
+ runPaint = skPaint;
+ continue;
+ }
+
+ runPaint.setFlags(GrTextUtils::FilterTextFlags(props, runPaint));
+
+ cacheBlob->push_back_run(run);
+
+ if (GrTextUtils::CanDrawAsDistanceFields(runPaint, viewMatrix, props, shaderCaps)) {
+ switch (it.positioning()) {
+ case SkTextBlob::kDefault_Positioning: {
+ GrTextUtils::DrawDFText(cacheBlob, run, fontCache,
+ props, runPaint, color, scalerContextFlags,
+ viewMatrix, (const char *)it.glyphs(), textLen,
+ x + offset.x(), y + offset.y());
+ break;
+ }
+ case SkTextBlob::kHorizontal_Positioning: {
+ SkPoint dfOffset = SkPoint::Make(x, y + offset.y());
+ GrTextUtils::DrawDFPosText(cacheBlob, run, fontCache,
+ props, runPaint, color, scalerContextFlags,
+ viewMatrix, (const char*)it.glyphs(), textLen,
+ it.pos(), 1, dfOffset);
+ break;
+ }
+ case SkTextBlob::kFull_Positioning: {
+ SkPoint dfOffset = SkPoint::Make(x, y);
+ GrTextUtils::DrawDFPosText(cacheBlob, run, fontCache,
+ props, runPaint, color, scalerContextFlags,
+ viewMatrix, (const char*)it.glyphs(), textLen,
+ it.pos(), 2, dfOffset);
+ break;
+ }
+ }
+ } else if (SkDraw::ShouldDrawTextAsPaths(runPaint, viewMatrix)) {
+ cacheBlob->setRunDrawAsPaths(run);
+ } else {
+ switch (it.positioning()) {
+ case SkTextBlob::kDefault_Positioning:
+ GrTextUtils::DrawBmpText(cacheBlob, run, fontCache,
+ props, runPaint, color, scalerContextFlags,
+ viewMatrix, (const char *)it.glyphs(), textLen,
+ x + offset.x(), y + offset.y());
+ break;
+ case SkTextBlob::kHorizontal_Positioning:
+ GrTextUtils::DrawBmpPosText(cacheBlob, run, fontCache,
+ props, runPaint, color, scalerContextFlags,
+ viewMatrix, (const char*)it.glyphs(), textLen,
+ it.pos(), 1, SkPoint::Make(x, y + offset.y()));
+ break;
+ case SkTextBlob::kFull_Positioning:
+ GrTextUtils::DrawBmpPosText(cacheBlob, run, fontCache,
+ props, runPaint, color, scalerContextFlags,
+ viewMatrix, (const char*)it.glyphs(), textLen,
+ it.pos(), 2, SkPoint::Make(x, y));
+ break;
+ }
+ }
+
+ if (drawFilter) {
+ // A draw filter may change the paint arbitrarily, so we must re-seed in this case.
+ runPaint = skPaint;
+ }
+ }
+}
+
+inline GrAtlasTextBlob*
+GrAtlasTextContext::CreateDrawTextBlob(GrTextBlobCache* blobCache,
+ GrBatchFontCache* fontCache,
+ const GrShaderCaps& shaderCaps,
+ const GrPaint& paint,
+ const SkPaint& skPaint,
+ uint32_t scalerContextFlags,
+ const SkMatrix& viewMatrix,
+ const SkSurfaceProps& props,
+ const char text[], size_t byteLength,
+ SkScalar x, SkScalar y) {
+ int glyphCount = skPaint.countText(text, byteLength);
+
+ GrAtlasTextBlob* blob = blobCache->createBlob(glyphCount, 1);
+ blob->initThrowawayBlob(viewMatrix, x, y);
+
+ if (GrTextUtils::CanDrawAsDistanceFields(skPaint, viewMatrix, props, shaderCaps)) {
+ GrTextUtils::DrawDFText(blob, 0, fontCache, props, skPaint, paint.getColor(),
+ scalerContextFlags, viewMatrix, text, byteLength, x, y);
+ } else {
+ GrTextUtils::DrawBmpText(blob, 0, fontCache, props, skPaint, paint.getColor(),
+ scalerContextFlags, viewMatrix, text, byteLength, x, y);
+ }
+ return blob;
+}
+
+inline GrAtlasTextBlob*
+GrAtlasTextContext::CreateDrawPosTextBlob(GrTextBlobCache* blobCache, GrBatchFontCache* fontCache,
+ const GrShaderCaps& shaderCaps, const GrPaint& paint,
+ const SkPaint& skPaint, uint32_t scalerContextFlags,
+ const SkMatrix& viewMatrix, const SkSurfaceProps& props,
+ const char text[], size_t byteLength,
+ const SkScalar pos[], int scalarsPerPosition,
+ const SkPoint& offset) {
+ int glyphCount = skPaint.countText(text, byteLength);
+
+ GrAtlasTextBlob* blob = blobCache->createBlob(glyphCount, 1);
+ blob->initThrowawayBlob(viewMatrix, offset.x(), offset.y());
+
+ if (GrTextUtils::CanDrawAsDistanceFields(skPaint, viewMatrix, props, shaderCaps)) {
+ GrTextUtils::DrawDFPosText(blob, 0, fontCache, props,
+ skPaint, paint.getColor(), scalerContextFlags, viewMatrix, text,
+ byteLength, pos, scalarsPerPosition, offset);
+ } else {
+ GrTextUtils::DrawBmpPosText(blob, 0, fontCache, props, skPaint,
+ paint.getColor(), scalerContextFlags, viewMatrix, text,
+ byteLength, pos, scalarsPerPosition, offset);
+ }
+ return blob;
+}
+
+void GrAtlasTextContext::drawText(GrContext* context,
+ GrDrawContext* dc,
+ const GrClip& clip,
+ const GrPaint& paint, const SkPaint& skPaint,
+ const SkMatrix& viewMatrix,
+ const SkSurfaceProps& props,
+ const char text[], size_t byteLength,
+ SkScalar x, SkScalar y, const SkIRect& regionClipBounds) {
+ if (context->abandoned()) {
+ return;
+ } else if (this->canDraw(skPaint, viewMatrix, props, *context->caps()->shaderCaps())) {
+ SkAutoTUnref<GrAtlasTextBlob> blob(
+ CreateDrawTextBlob(context->getTextBlobCache(), context->getBatchFontCache(),
+ *context->caps()->shaderCaps(),
+ paint, skPaint,
+ ComputeScalerContextFlags(dc),
+ viewMatrix, props,
+ text, byteLength, x, y));
+ blob->flushThrowaway(context, dc, props, fDistanceAdjustTable, skPaint, paint,
+ clip, viewMatrix, regionClipBounds, x, y);
+ return;
+ }
+
+ // fall back to drawing as a path
+ GrTextUtils::DrawTextAsPath(context, dc, clip, skPaint, viewMatrix, text, byteLength, x, y,
+ regionClipBounds);
+}
+
+void GrAtlasTextContext::drawPosText(GrContext* context,
+ GrDrawContext* dc,
+ const GrClip& clip,
+ const GrPaint& paint, const SkPaint& skPaint,
+ const SkMatrix& viewMatrix,
+ const SkSurfaceProps& props,
+ const char text[], size_t byteLength,
+ const SkScalar pos[], int scalarsPerPosition,
+ const SkPoint& offset, const SkIRect& regionClipBounds) {
+ if (context->abandoned()) {
+ return;
+ } else if (this->canDraw(skPaint, viewMatrix, props, *context->caps()->shaderCaps())) {
+ SkAutoTUnref<GrAtlasTextBlob> blob(
+ CreateDrawPosTextBlob(context->getTextBlobCache(),
+ context->getBatchFontCache(),
+ *context->caps()->shaderCaps(),
+ paint, skPaint,
+ ComputeScalerContextFlags(dc),
+ viewMatrix, props,
+ text, byteLength,
+ pos, scalarsPerPosition,
+ offset));
+ blob->flushThrowaway(context, dc, props, fDistanceAdjustTable, skPaint, paint,
+ clip, viewMatrix, regionClipBounds, offset.fX, offset.fY);
+ return;
+ }
+
+ // fall back to drawing as a path
+ GrTextUtils::DrawPosTextAsPath(context, dc, props, clip, skPaint, viewMatrix, text,
+ byteLength, pos, scalarsPerPosition, offset, regionClipBounds);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_TEST_UTILS
+
+DRAW_BATCH_TEST_DEFINE(TextBlobBatch) {
+ static uint32_t gContextID = SK_InvalidGenID;
+ static GrAtlasTextContext* gTextContext = nullptr;
+ static SkSurfaceProps gSurfaceProps(SkSurfaceProps::kLegacyFontHost_InitType);
+
+ if (context->uniqueID() != gContextID) {
+ gContextID = context->uniqueID();
+ delete gTextContext;
+
+ gTextContext = GrAtlasTextContext::Create();
+ }
+
+ // Setup dummy SkPaint / GrPaint / GrDrawContext
+ sk_sp<GrDrawContext> drawContext(context->makeDrawContext(SkBackingFit::kApprox, 1024, 1024,
+ kSkia8888_GrPixelConfig, nullptr));
+
+ GrColor color = GrRandomColor(random);
+ SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
+ SkPaint skPaint;
+ skPaint.setColor(color);
+ skPaint.setLCDRenderText(random->nextBool());
+ skPaint.setAntiAlias(skPaint.isLCDRenderText() ? true : random->nextBool());
+ skPaint.setSubpixelText(random->nextBool());
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(context, drawContext.get(), skPaint, viewMatrix, &grPaint)) {
+ SkFAIL("couldn't convert paint\n");
+ }
+
+ const char* text = "The quick brown fox jumps over the lazy dog.";
+ int textLen = (int)strlen(text);
+
+ // create some random x/y offsets, including negative offsets
+ static const int kMaxTrans = 1024;
+ int xPos = (random->nextU() % 2) * 2 - 1;
+ int yPos = (random->nextU() % 2) * 2 - 1;
+ int xInt = (random->nextU() % kMaxTrans) * xPos;
+ int yInt = (random->nextU() % kMaxTrans) * yPos;
+ SkScalar x = SkIntToScalar(xInt);
+ SkScalar y = SkIntToScalar(yInt);
+
+ // right now we don't handle textblobs, nor do we handle drawPosText. Since we only
+ // intend to test the batch with this unit test, that is okay.
+ SkAutoTUnref<GrAtlasTextBlob> blob(
+ GrAtlasTextContext::CreateDrawTextBlob(context->getTextBlobCache(),
+ context->getBatchFontCache(),
+ *context->caps()->shaderCaps(), grPaint, skPaint,
+ GrAtlasTextContext::kTextBlobBatchScalerContextFlags,
+ viewMatrix,
+ gSurfaceProps, text,
+ static_cast<size_t>(textLen), x, y));
+
+ return blob->test_createBatch(textLen, 0, 0, viewMatrix, x, y, color, skPaint,
+ gSurfaceProps, gTextContext->dfAdjustTable(),
+ context->getBatchFontCache());
+}
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/text/GrAtlasTextContext.h b/gfx/skia/skia/src/gpu/text/GrAtlasTextContext.h
new file mode 100644
index 000000000..5bf7662c4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrAtlasTextContext.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAtlasTextContext_DEFINED
+#define GrAtlasTextContext_DEFINED
+
+#include "GrAtlasTextBlob.h"
+#include "GrDistanceFieldAdjustTable.h"
+#include "GrGeometryProcessor.h"
+#include "SkTextBlobRunIterator.h"
+
+#ifdef GR_TEST_UTILS
+#include "GrBatchTest.h"
+#endif
+
+class GrDrawBatch;
+class GrDrawContext;
+class GrPipelineBuilder;
+class GrTextBlobCache;
+class SkGlyph;
+
+/*
+ * Renders text using some kind of an atlas, ie BitmapText or DistanceField text
+ */
+class GrAtlasTextContext {
+public:
+ static GrAtlasTextContext* Create();
+
+ bool canDraw(const SkPaint&, const SkMatrix& viewMatrix, const SkSurfaceProps&,
+ const GrShaderCaps&);
+ void drawText(GrContext*, GrDrawContext*, const GrClip&, const GrPaint&, const SkPaint&,
+ const SkMatrix& viewMatrix, const SkSurfaceProps&, const char text[],
+ size_t byteLength, SkScalar x, SkScalar y,
+ const SkIRect& regionClipBounds);
+ void drawPosText(GrContext*, GrDrawContext*, const GrClip&, const GrPaint&,
+ const SkPaint&, const SkMatrix& viewMatrix, const SkSurfaceProps&,
+ const char text[], size_t byteLength,
+ const SkScalar pos[], int scalarsPerPosition,
+ const SkPoint& offset, const SkIRect& regionClipBounds);
+ void drawTextBlob(GrContext*, GrDrawContext*, const GrClip&, const SkPaint&,
+ const SkMatrix& viewMatrix, const SkSurfaceProps&, const SkTextBlob*,
+ SkScalar x, SkScalar y,
+ SkDrawFilter*, const SkIRect& clipBounds);
+
+private:
+ GrAtlasTextContext();
+
+ // sets up the descriptor on the blob and returns a detached cache. Client must attach
+ inline static GrColor ComputeCanonicalColor(const SkPaint&, bool lcd);
+ // Determines if we need to use fake gamma (and contrast boost):
+ inline static uint32_t ComputeScalerContextFlags(GrDrawContext*);
+ static void RegenerateTextBlob(GrAtlasTextBlob* bmp,
+ GrBatchFontCache*,
+ const GrShaderCaps&,
+ const SkPaint& skPaint, GrColor,
+ uint32_t scalerContextFlags,
+ const SkMatrix& viewMatrix,
+ const SkSurfaceProps&,
+ const SkTextBlob* blob, SkScalar x, SkScalar y,
+ SkDrawFilter* drawFilter);
+ inline static bool HasLCD(const SkTextBlob*);
+
+ static inline GrAtlasTextBlob* CreateDrawTextBlob(GrTextBlobCache*,
+ GrBatchFontCache*, const GrShaderCaps&,
+ const GrPaint&,
+ const SkPaint&,
+ uint32_t scalerContextFlags,
+ const SkMatrix& viewMatrix,
+ const SkSurfaceProps&,
+ const char text[], size_t byteLength,
+ SkScalar x, SkScalar y);
+ static inline GrAtlasTextBlob* CreateDrawPosTextBlob(GrTextBlobCache*, GrBatchFontCache*,
+ const GrShaderCaps&,
+ const GrPaint&,
+ const SkPaint&,
+ uint32_t scalerContextFlags,
+ const SkMatrix& viewMatrix,
+ const SkSurfaceProps&,
+ const char text[], size_t byteLength,
+ const SkScalar pos[],
+ int scalarsPerPosition,
+ const SkPoint& offset);
+ const GrDistanceFieldAdjustTable* dfAdjustTable() const { return fDistanceAdjustTable; }
+
+ SkAutoTUnref<const GrDistanceFieldAdjustTable> fDistanceAdjustTable;
+
+#ifdef GR_TEST_UTILS
+ static const uint32_t kTextBlobBatchScalerContextFlags =
+ SkPaint::kFakeGammaAndBoostContrast_ScalerContextFlags;
+ DRAW_BATCH_TEST_FRIEND(TextBlobBatch);
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/text/GrBatchFontCache.cpp b/gfx/skia/skia/src/gpu/text/GrBatchFontCache.cpp
new file mode 100644
index 000000000..3e212cd17
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrBatchFontCache.cpp
@@ -0,0 +1,390 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrBatchFontCache.h"
+#include "GrContext.h"
+#include "GrGpu.h"
+#include "GrRectanizer.h"
+#include "GrResourceProvider.h"
+#include "GrSurfacePriv.h"
+#include "SkString.h"
+
+#include "SkDistanceFieldGen.h"
+
+bool GrBatchFontCache::initAtlas(GrMaskFormat format) {
+ int index = MaskFormatToAtlasIndex(format);
+ if (!fAtlases[index]) {
+ GrPixelConfig config = MaskFormatToPixelConfig(format, *fContext->caps());
+ int width = fAtlasConfigs[index].fWidth;
+ int height = fAtlasConfigs[index].fHeight;
+ int numPlotsX = fAtlasConfigs[index].numPlotsX();
+ int numPlotsY = fAtlasConfigs[index].numPlotsY();
+
+ fAtlases[index] =
+ fContext->resourceProvider()->createAtlas(config, width, height,
+ numPlotsX, numPlotsY,
+ &GrBatchFontCache::HandleEviction,
+ (void*)this);
+ if (!fAtlases[index]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+GrBatchFontCache::GrBatchFontCache(GrContext* context)
+ : fContext(context)
+ , fPreserveStrike(nullptr) {
+ for (int i = 0; i < kMaskFormatCount; ++i) {
+ fAtlases[i] = nullptr;
+ }
+
+ // setup default atlas configs
+ fAtlasConfigs[kA8_GrMaskFormat].fWidth = 2048;
+ fAtlasConfigs[kA8_GrMaskFormat].fHeight = 2048;
+ fAtlasConfigs[kA8_GrMaskFormat].fLog2Width = 11;
+ fAtlasConfigs[kA8_GrMaskFormat].fLog2Height = 11;
+ fAtlasConfigs[kA8_GrMaskFormat].fPlotWidth = 512;
+ fAtlasConfigs[kA8_GrMaskFormat].fPlotHeight = 256;
+
+ fAtlasConfigs[kA565_GrMaskFormat].fWidth = 1024;
+ fAtlasConfigs[kA565_GrMaskFormat].fHeight = 2048;
+ fAtlasConfigs[kA565_GrMaskFormat].fLog2Width = 10;
+ fAtlasConfigs[kA565_GrMaskFormat].fLog2Height = 11;
+ fAtlasConfigs[kA565_GrMaskFormat].fPlotWidth = 256;
+ fAtlasConfigs[kA565_GrMaskFormat].fPlotHeight = 256;
+
+ fAtlasConfigs[kARGB_GrMaskFormat].fWidth = 1024;
+ fAtlasConfigs[kARGB_GrMaskFormat].fHeight = 2048;
+ fAtlasConfigs[kARGB_GrMaskFormat].fLog2Width = 10;
+ fAtlasConfigs[kARGB_GrMaskFormat].fLog2Height = 11;
+ fAtlasConfigs[kARGB_GrMaskFormat].fPlotWidth = 256;
+ fAtlasConfigs[kARGB_GrMaskFormat].fPlotHeight = 256;
+}
+
+GrBatchFontCache::~GrBatchFontCache() {
+ StrikeHash::Iter iter(&fCache);
+ while (!iter.done()) {
+ (*iter).fIsAbandoned = true;
+ (*iter).unref();
+ ++iter;
+ }
+ for (int i = 0; i < kMaskFormatCount; ++i) {
+ delete fAtlases[i];
+ }
+}
+
+void GrBatchFontCache::freeAll() {
+ StrikeHash::Iter iter(&fCache);
+ while (!iter.done()) {
+ (*iter).fIsAbandoned = true;
+ (*iter).unref();
+ ++iter;
+ }
+ fCache.rewind();
+ for (int i = 0; i < kMaskFormatCount; ++i) {
+ delete fAtlases[i];
+ fAtlases[i] = nullptr;
+ }
+}
+
+void GrBatchFontCache::HandleEviction(GrBatchAtlas::AtlasID id, void* ptr) {
+ GrBatchFontCache* fontCache = reinterpret_cast<GrBatchFontCache*>(ptr);
+
+ StrikeHash::Iter iter(&fontCache->fCache);
+ for (; !iter.done(); ++iter) {
+ GrBatchTextStrike* strike = &*iter;
+ strike->removeID(id);
+
+ // clear out any empty strikes. We will preserve the strike whose call to addToAtlas
+ // triggered the eviction
+ if (strike != fontCache->fPreserveStrike && 0 == strike->fAtlasedGlyphs) {
+ fontCache->fCache.remove(GrBatchTextStrike::GetKey(*strike));
+ strike->fIsAbandoned = true;
+ strike->unref();
+ }
+ }
+}
+
+void GrBatchFontCache::dump() const {
+ static int gDumpCount = 0;
+ for (int i = 0; i < kMaskFormatCount; ++i) {
+ if (fAtlases[i]) {
+ GrTexture* texture = fAtlases[i]->getTexture();
+ if (texture) {
+ SkString filename;
+#ifdef SK_BUILD_FOR_ANDROID
+ filename.printf("/sdcard/fontcache_%d%d.png", gDumpCount, i);
+#else
+ filename.printf("fontcache_%d%d.png", gDumpCount, i);
+#endif
+ texture->surfacePriv().savePixels(filename.c_str());
+ }
+ }
+ }
+ ++gDumpCount;
+}
+
+void GrBatchFontCache::setAtlasSizes_ForTesting(const GrBatchAtlasConfig configs[3]) {
+ // delete any old atlases, this should be safe to do as long as we are not in the middle of a
+ // flush
+ for (int i = 0; i < kMaskFormatCount; i++) {
+ if (fAtlases[i]) {
+ delete fAtlases[i];
+ fAtlases[i] = nullptr;
+ }
+ }
+ memcpy(fAtlasConfigs, configs, sizeof(fAtlasConfigs));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline GrMaskFormat get_packed_glyph_mask_format(const SkGlyph& glyph) {
+ SkMask::Format format = static_cast<SkMask::Format>(glyph.fMaskFormat);
+ switch (format) {
+ case SkMask::kBW_Format:
+ // fall through to kA8 -- we store BW glyphs in our 8-bit cache
+ case SkMask::kA8_Format:
+ return kA8_GrMaskFormat;
+ case SkMask::kLCD16_Format:
+ return kA565_GrMaskFormat;
+ case SkMask::kARGB32_Format:
+ return kARGB_GrMaskFormat;
+ default:
+ SkDEBUGFAIL("unsupported SkMask::Format");
+ return kA8_GrMaskFormat;
+ }
+}
+
+static inline bool get_packed_glyph_bounds(SkGlyphCache* cache, const SkGlyph& glyph,
+ SkIRect* bounds) {
+#if 1
+ // crbug:510931
+ // Retrieving the image from the cache can actually change the mask format.
+ cache->findImage(glyph);
+#endif
+ bounds->setXYWH(glyph.fLeft, glyph.fTop, glyph.fWidth, glyph.fHeight);
+
+ return true;
+}
+
+static inline bool get_packed_glyph_df_bounds(SkGlyphCache* cache, const SkGlyph& glyph,
+ SkIRect* bounds) {
+#if 1
+ // crbug:510931
+ // Retrieving the image from the cache can actually change the mask format.
+ cache->findImage(glyph);
+#endif
+ bounds->setXYWH(glyph.fLeft, glyph.fTop, glyph.fWidth, glyph.fHeight);
+ bounds->outset(SK_DistanceFieldPad, SK_DistanceFieldPad);
+
+ return true;
+}
+
+// expands each bit in a bitmask to 0 or ~0 of type INT_TYPE. Used to expand a BW glyph mask to
+// A8, RGB565, or RGBA8888.
+template <typename INT_TYPE>
+static void expand_bits(INT_TYPE* dst,
+ const uint8_t* src,
+ int width,
+ int height,
+ int dstRowBytes,
+ int srcRowBytes) {
+ for (int i = 0; i < height; ++i) {
+ int rowWritesLeft = width;
+ const uint8_t* s = src;
+ INT_TYPE* d = dst;
+ while (rowWritesLeft > 0) {
+ unsigned mask = *s++;
+ for (int i = 7; i >= 0 && rowWritesLeft; --i, --rowWritesLeft) {
+ *d++ = (mask & (1 << i)) ? (INT_TYPE)(~0UL) : 0;
+ }
+ }
+ dst = reinterpret_cast<INT_TYPE*>(reinterpret_cast<intptr_t>(dst) + dstRowBytes);
+ src += srcRowBytes;
+ }
+}
+
+static bool get_packed_glyph_image(SkGlyphCache* cache, const SkGlyph& glyph, int width,
+ int height, int dstRB, GrMaskFormat expectedMaskFormat,
+ void* dst) {
+ SkASSERT(glyph.fWidth == width);
+ SkASSERT(glyph.fHeight == height);
+ const void* src = cache->findImage(glyph);
+ if (nullptr == src) {
+ return false;
+ }
+
+ // crbug:510931
+ // Retrieving the image from the cache can actually change the mask format. This case is very
+ // uncommon so for now we just draw a clear box for these glyphs.
+ if (get_packed_glyph_mask_format(glyph) != expectedMaskFormat) {
+ const int bpp = GrMaskFormatBytesPerPixel(expectedMaskFormat);
+ for (int y = 0; y < height; y++) {
+ sk_bzero(dst, width * bpp);
+ dst = (char*)dst + dstRB;
+ }
+ return true;
+ }
+
+ int srcRB = glyph.rowBytes();
+ // The windows font host sometimes has BW glyphs in a non-BW strike. So it is important here to
+ // check the glyph's format, not the strike's format, and to be able to convert to any of the
+ // GrMaskFormats.
+ if (SkMask::kBW_Format == glyph.fMaskFormat) {
+ // expand bits to our mask type
+ const uint8_t* bits = reinterpret_cast<const uint8_t*>(src);
+ switch (expectedMaskFormat) {
+ case kA8_GrMaskFormat:{
+ uint8_t* bytes = reinterpret_cast<uint8_t*>(dst);
+ expand_bits(bytes, bits, width, height, dstRB, srcRB);
+ break;
+ }
+ case kA565_GrMaskFormat: {
+ uint16_t* rgb565 = reinterpret_cast<uint16_t*>(dst);
+ expand_bits(rgb565, bits, width, height, dstRB, srcRB);
+ break;
+ }
+ default:
+ SkFAIL("Invalid GrMaskFormat");
+ }
+ } else if (srcRB == dstRB) {
+ memcpy(dst, src, dstRB * height);
+ } else {
+ const int bbp = GrMaskFormatBytesPerPixel(expectedMaskFormat);
+ for (int y = 0; y < height; y++) {
+ memcpy(dst, src, width * bbp);
+ src = (const char*)src + srcRB;
+ dst = (char*)dst + dstRB;
+ }
+ }
+ return true;
+}
+
+static bool get_packed_glyph_df_image(SkGlyphCache* cache, const SkGlyph& glyph,
+ int width, int height, void* dst) {
+ SkASSERT(glyph.fWidth + 2*SK_DistanceFieldPad == width);
+ SkASSERT(glyph.fHeight + 2*SK_DistanceFieldPad == height);
+ const void* image = cache->findImage(glyph);
+ if (nullptr == image) {
+ return false;
+ }
+ // now generate the distance field
+ SkASSERT(dst);
+ SkMask::Format maskFormat = static_cast<SkMask::Format>(glyph.fMaskFormat);
+ if (SkMask::kA8_Format == maskFormat) {
+ // make the distance field from the image
+ SkGenerateDistanceFieldFromA8Image((unsigned char*)dst,
+ (unsigned char*)image,
+ glyph.fWidth, glyph.fHeight,
+ glyph.rowBytes());
+ } else if (SkMask::kBW_Format == maskFormat) {
+ // make the distance field from the image
+ SkGenerateDistanceFieldFromBWImage((unsigned char*)dst,
+ (unsigned char*)image,
+ glyph.fWidth, glyph.fHeight,
+ glyph.rowBytes());
+ } else {
+ return false;
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/*
+ The text strike is specific to a given font/style/matrix setup, which is
+ represented by the GrHostFontScaler object we are given in getGlyph().
+
+ We map a 32bit glyphID to a GrGlyph record, which in turn points to a
+ atlas and a position within that texture.
+ */
+
+GrBatchTextStrike::GrBatchTextStrike(GrBatchFontCache* owner, const SkDescriptor& key)
+ : fFontScalerKey(key)
+ , fPool(9/*start allocations at 512 bytes*/)
+ , fBatchFontCache(owner) // no need to ref, it won't go away before we do
+ , fAtlasedGlyphs(0)
+ , fIsAbandoned(false) {}
+
+GrBatchTextStrike::~GrBatchTextStrike() {
+ SkTDynamicHash<GrGlyph, GrGlyph::PackedID>::Iter iter(&fCache);
+ while (!iter.done()) {
+ (*iter).reset();
+ ++iter;
+ }
+}
+
+GrGlyph* GrBatchTextStrike::generateGlyph(const SkGlyph& skGlyph, GrGlyph::PackedID packed,
+ SkGlyphCache* cache) {
+ SkIRect bounds;
+ if (GrGlyph::kDistance_MaskStyle == GrGlyph::UnpackMaskStyle(packed)) {
+ if (!get_packed_glyph_df_bounds(cache, skGlyph, &bounds)) {
+ return nullptr;
+ }
+ } else {
+ if (!get_packed_glyph_bounds(cache, skGlyph, &bounds)) {
+ return nullptr;
+ }
+ }
+ GrMaskFormat format = get_packed_glyph_mask_format(skGlyph);
+
+ GrGlyph* glyph = (GrGlyph*)fPool.alloc(sizeof(GrGlyph));
+ glyph->init(packed, bounds, format);
+ fCache.add(glyph);
+ return glyph;
+}
+
+void GrBatchTextStrike::removeID(GrBatchAtlas::AtlasID id) {
+ SkTDynamicHash<GrGlyph, GrGlyph::PackedID>::Iter iter(&fCache);
+ while (!iter.done()) {
+ if (id == (*iter).fID) {
+ (*iter).fID = GrBatchAtlas::kInvalidAtlasID;
+ fAtlasedGlyphs--;
+ SkASSERT(fAtlasedGlyphs >= 0);
+ }
+ ++iter;
+ }
+}
+
+bool GrBatchTextStrike::addGlyphToAtlas(GrDrawBatch::Target* target,
+ GrGlyph* glyph,
+ SkGlyphCache* cache,
+ GrMaskFormat expectedMaskFormat) {
+ SkASSERT(glyph);
+ SkASSERT(cache);
+ SkASSERT(fCache.find(glyph->fPackedID));
+
+ int bytesPerPixel = GrMaskFormatBytesPerPixel(expectedMaskFormat);
+
+ size_t size = glyph->fBounds.area() * bytesPerPixel;
+ SkAutoSMalloc<1024> storage(size);
+
+ const SkGlyph& skGlyph = GrToSkGlyph(cache, glyph->fPackedID);
+ if (GrGlyph::kDistance_MaskStyle == GrGlyph::UnpackMaskStyle(glyph->fPackedID)) {
+ if (!get_packed_glyph_df_image(cache, skGlyph, glyph->width(), glyph->height(),
+ storage.get())) {
+ return false;
+ }
+ } else {
+ if (!get_packed_glyph_image(cache, skGlyph, glyph->width(), glyph->height(),
+ glyph->width() * bytesPerPixel, expectedMaskFormat,
+ storage.get())) {
+ return false;
+ }
+ }
+
+ bool success = fBatchFontCache->addToAtlas(this, &glyph->fID, target, expectedMaskFormat,
+ glyph->width(), glyph->height(),
+ storage.get(), &glyph->fAtlasLocation);
+ if (success) {
+ SkASSERT(GrBatchAtlas::kInvalidAtlasID != glyph->fID);
+ fAtlasedGlyphs++;
+ }
+ return success;
+}
diff --git a/gfx/skia/skia/src/gpu/text/GrBatchFontCache.h b/gfx/skia/skia/src/gpu/text/GrBatchFontCache.h
new file mode 100644
index 000000000..9e08c5303
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrBatchFontCache.h
@@ -0,0 +1,241 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBatchFontCache_DEFINED
+#define GrBatchFontCache_DEFINED
+
+#include "GrBatchAtlas.h"
+#include "GrCaps.h"
+#include "GrGlyph.h"
+#include "SkGlyphCache.h"
+#include "SkTDynamicHash.h"
+#include "SkVarAlloc.h"
+
+class GrBatchFontCache;
+class GrGpu;
+
+/**
+ * The GrBatchTextStrike manages a pool of CPU backing memory for GrGlyphs. This backing memory
+ * is indexed by a PackedID and SkGlyphCache. The SkGlyphCache is what actually creates the mask.
+ * The GrBatchTextStrike may outlive the generating SkGlyphCache. However, it retains a copy
+ * of it's SkDescriptor as a key to access (or regenerate) the SkGlyphCache. GrBatchTextStrikes are
+ * created by and owned by a GrBatchFontCache.
+ */
+class GrBatchTextStrike : public SkNVRefCnt<GrBatchTextStrike> {
+public:
+ /** Owner is the cache that owns this strike. */
+ GrBatchTextStrike(GrBatchFontCache* owner, const SkDescriptor& fontScalerKey);
+ ~GrBatchTextStrike();
+
+ inline GrGlyph* getGlyph(const SkGlyph& skGlyph, GrGlyph::PackedID packed,
+ SkGlyphCache* cache) {
+ GrGlyph* glyph = fCache.find(packed);
+ if (nullptr == glyph) {
+ glyph = this->generateGlyph(skGlyph, packed, cache);
+ }
+ return glyph;
+ }
+
+ // This variant of the above function is called by TextBatch. At this point, it is possible
+ // that the maskformat of the glyph differs from what we expect. In these cases we will just
+ // draw a clear square.
+ // skbug:4143 crbug:510931
+ inline GrGlyph* getGlyph(GrGlyph::PackedID packed,
+ GrMaskFormat expectedMaskFormat,
+ SkGlyphCache* cache) {
+ GrGlyph* glyph = fCache.find(packed);
+ if (nullptr == glyph) {
+ // We could return this to the caller, but in practice it adds code complexity for
+ // potentially little benefit(ie, if the glyph is not in our font cache, then its not
+ // in the atlas and we're going to be doing a texture upload anyways).
+ const SkGlyph& skGlyph = GrToSkGlyph(cache, packed);
+ glyph = this->generateGlyph(skGlyph, packed, cache);
+ glyph->fMaskFormat = expectedMaskFormat;
+ }
+ return glyph;
+ }
+
+ // returns true if glyph successfully added to texture atlas, false otherwise. If the glyph's
+ // mask format has changed, then addGlyphToAtlas will draw a clear box. This will almost never
+ // happen.
+ // TODO we can handle some of these cases if we really want to, but the long term solution is to
+ // get the actual glyph image itself when we get the glyph metrics.
+ bool addGlyphToAtlas(GrDrawBatch::Target*, GrGlyph*, SkGlyphCache*,
+ GrMaskFormat expectedMaskFormat);
+
+ // testing
+ int countGlyphs() const { return fCache.count(); }
+
+ // remove any references to this plot
+ void removeID(GrBatchAtlas::AtlasID);
+
+ // If a TextStrike is abandoned by the cache, then the caller must get a new strike
+ bool isAbandoned() const { return fIsAbandoned; }
+
+ static const SkDescriptor& GetKey(const GrBatchTextStrike& ts) {
+ return *ts.fFontScalerKey.getDesc();
+ }
+
+ static uint32_t Hash(const SkDescriptor& desc) { return desc.getChecksum(); }
+
+private:
+ SkTDynamicHash<GrGlyph, GrGlyph::PackedID> fCache;
+ SkAutoDescriptor fFontScalerKey;
+ SkVarAlloc fPool;
+
+ GrBatchFontCache* fBatchFontCache;
+ int fAtlasedGlyphs;
+ bool fIsAbandoned;
+
+ static const SkGlyph& GrToSkGlyph(SkGlyphCache* cache, GrGlyph::PackedID id) {
+ return cache->getGlyphIDMetrics(GrGlyph::UnpackID(id),
+ GrGlyph::UnpackFixedX(id),
+ GrGlyph::UnpackFixedY(id));
+ }
+
+ GrGlyph* generateGlyph(const SkGlyph&, GrGlyph::PackedID, SkGlyphCache*);
+
+ friend class GrBatchFontCache;
+};
+
+/*
+ * GrBatchFontCache manages strikes which are indexed by a SkGlyphCache. These strikes can then be
+ * used to individual Glyph Masks. The GrBatchFontCache also manages GrBatchAtlases, though this is
+ * more or less transparent to the client(aside from atlasGeneration, described below).
+ * Note - we used to initialize the backing atlas for the GrBatchFontCache at initialization time.
+ * However, this caused a regression, even when the GrBatchFontCache was unused. We now initialize
+ * the backing atlases lazily. Its not immediately clear why this improves the situation.
+ */
+class GrBatchFontCache {
+public:
+ GrBatchFontCache(GrContext*);
+ ~GrBatchFontCache();
+ // The user of the cache may hold a long-lived ref to the returned strike. However, actions by
+ // another client of the cache may cause the strike to be purged while it is still reffed.
+ // Therefore, the caller must check GrBatchTextStrike::isAbandoned() if there are other
+ // interactions with the cache since the strike was received.
+ inline GrBatchTextStrike* getStrike(const SkGlyphCache* cache) {
+ GrBatchTextStrike* strike = fCache.find(cache->getDescriptor());
+ if (nullptr == strike) {
+ strike = this->generateStrike(cache);
+ }
+ return strike;
+ }
+
+ void freeAll();
+
+ // if getTexture returns nullptr, the client must not try to use other functions on the
+ // GrBatchFontCache which use the atlas. This function *must* be called first, before other
+ // functions which use the atlas.
+ GrTexture* getTexture(GrMaskFormat format) {
+ if (this->initAtlas(format)) {
+ return this->getAtlas(format)->getTexture();
+ }
+ return nullptr;
+ }
+
+ bool hasGlyph(GrGlyph* glyph) {
+ SkASSERT(glyph);
+ return this->getAtlas(glyph->fMaskFormat)->hasID(glyph->fID);
+ }
+
+ // To ensure the GrBatchAtlas does not evict the Glyph Mask from its texture backing store,
+ // the client must pass in the current batch token along with the GrGlyph.
+ // A BulkUseTokenUpdater is used to manage bulk last use token updating in the Atlas.
+ // For convenience, this function will also set the use token for the current glyph if required
+ // NOTE: the bulk uploader is only valid if the subrun has a valid atlasGeneration
+ void addGlyphToBulkAndSetUseToken(GrBatchAtlas::BulkUseTokenUpdater* updater,
+ GrGlyph* glyph, GrBatchDrawToken token) {
+ SkASSERT(glyph);
+ updater->add(glyph->fID);
+ this->getAtlas(glyph->fMaskFormat)->setLastUseToken(glyph->fID, token);
+ }
+
+ void setUseTokenBulk(const GrBatchAtlas::BulkUseTokenUpdater& updater,
+ GrBatchDrawToken token,
+ GrMaskFormat format) {
+ this->getAtlas(format)->setLastUseTokenBulk(updater, token);
+ }
+
+ // add to texture atlas that matches this format
+ bool addToAtlas(GrBatchTextStrike* strike, GrBatchAtlas::AtlasID* id,
+ GrDrawBatch::Target* target,
+ GrMaskFormat format, int width, int height, const void* image,
+ SkIPoint16* loc) {
+ fPreserveStrike = strike;
+ return this->getAtlas(format)->addToAtlas(id, target, width, height, image, loc);
+ }
+
+ // Some clients may wish to verify the integrity of the texture backing store of the
+ // GrBatchAtlas. The atlasGeneration returned below is a monitonically increasing number which
+ // changes everytime something is removed from the texture backing store.
+ uint64_t atlasGeneration(GrMaskFormat format) const {
+ return this->getAtlas(format)->atlasGeneration();
+ }
+
+ int log2Width(GrMaskFormat format) { return fAtlasConfigs[format].fLog2Width; }
+ int log2Height(GrMaskFormat format) { return fAtlasConfigs[format].fLog2Height; }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Functions intended debug only
+ void dump() const;
+
+ void setAtlasSizes_ForTesting(const GrBatchAtlasConfig configs[3]);
+
+private:
+ static GrPixelConfig MaskFormatToPixelConfig(GrMaskFormat format, const GrCaps& caps) {
+ switch (format) {
+ case kA8_GrMaskFormat:
+ return kAlpha_8_GrPixelConfig;
+ case kA565_GrMaskFormat:
+ return kRGB_565_GrPixelConfig;
+ case kARGB_GrMaskFormat:
+ return caps.srgbSupport() ? kSkiaGamma8888_GrPixelConfig : kSkia8888_GrPixelConfig;
+ default:
+ SkDEBUGFAIL("unsupported GrMaskFormat");
+ return kAlpha_8_GrPixelConfig;
+ }
+ }
+
+ // There is a 1:1 mapping between GrMaskFormats and atlas indices
+ static int MaskFormatToAtlasIndex(GrMaskFormat format) {
+ static const int sAtlasIndices[] = {
+ kA8_GrMaskFormat,
+ kA565_GrMaskFormat,
+ kARGB_GrMaskFormat,
+ };
+ static_assert(SK_ARRAY_COUNT(sAtlasIndices) == kMaskFormatCount, "array_size_mismatch");
+
+ SkASSERT(sAtlasIndices[format] < kMaskFormatCount);
+ return sAtlasIndices[format];
+ }
+
+ bool initAtlas(GrMaskFormat);
+
+ GrBatchTextStrike* generateStrike(const SkGlyphCache* cache) {
+ GrBatchTextStrike* strike = new GrBatchTextStrike(this, cache->getDescriptor());
+ fCache.add(strike);
+ return strike;
+ }
+
+ GrBatchAtlas* getAtlas(GrMaskFormat format) const {
+ int atlasIndex = MaskFormatToAtlasIndex(format);
+ SkASSERT(fAtlases[atlasIndex]);
+ return fAtlases[atlasIndex];
+ }
+
+ static void HandleEviction(GrBatchAtlas::AtlasID, void*);
+
+ using StrikeHash = SkTDynamicHash<GrBatchTextStrike, SkDescriptor>;
+ GrContext* fContext;
+ StrikeHash fCache;
+ GrBatchAtlas* fAtlases[kMaskFormatCount];
+ GrBatchTextStrike* fPreserveStrike;
+ GrBatchAtlasConfig fAtlasConfigs[kMaskFormatCount];
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/text/GrDistanceFieldAdjustTable.cpp b/gfx/skia/skia/src/gpu/text/GrDistanceFieldAdjustTable.cpp
new file mode 100644
index 000000000..3aa96b5f2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrDistanceFieldAdjustTable.cpp
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrDistanceFieldAdjustTable.h"
+
+#include "SkScalerContext.h"
+
+SkDEBUGCODE(static const int kExpectedDistanceAdjustTableSize = 8;)
+
+SkScalar* build_distance_adjust_table(SkScalar paintGamma, SkScalar deviceGamma) {
+ // This is used for an approximation of the mask gamma hack, used by raster and bitmap
+ // text. The mask gamma hack is based off of guessing what the blend color is going to
+ // be, and adjusting the mask so that when run through the linear blend will
+ // produce the value closest to the desired result. However, in practice this means
+ // that the 'adjusted' mask is just increasing or decreasing the coverage of
+ // the mask depending on what it is thought it will blit against. For black (on
+ // assumed white) this means that coverages are decreased (on a curve). For white (on
+ // assumed black) this means that coverages are increased (on a a curve). At
+ // middle (perceptual) gray (which could be blit against anything) the coverages
+ // remain the same.
+ //
+ // The idea here is that instead of determining the initial (real) coverage and
+ // then adjusting that coverage, we determine an adjusted coverage directly by
+ // essentially manipulating the geometry (in this case, the distance to the glyph
+ // edge). So for black (on assumed white) this thins a bit; for white (on
+ // assumed black) this fake bolds the geometry a bit.
+ //
+ // The distance adjustment is calculated by determining the actual coverage value which
+ // when fed into in the mask gamma table gives us an 'adjusted coverage' value of 0.5. This
+ // actual coverage value (assuming it's between 0 and 1) corresponds to a distance from the
+ // actual edge. So by subtracting this distance adjustment and computing without the
+ // the coverage adjustment we should get 0.5 coverage at the same point.
+ //
+ // This has several implications:
+ // For non-gray lcd smoothed text, each subpixel essentially is using a
+ // slightly different geometry.
+ //
+ // For black (on assumed white) this may not cover some pixels which were
+ // previously covered; however those pixels would have been only slightly
+ // covered and that slight coverage would have been decreased anyway. Also, some pixels
+ // which were previously fully covered may no longer be fully covered.
+ //
+ // For white (on assumed black) this may cover some pixels which weren't
+ // previously covered at all.
+
+ int width, height;
+ size_t size;
+
+#ifdef SK_GAMMA_CONTRAST
+ SkScalar contrast = SK_GAMMA_CONTRAST;
+#else
+ SkScalar contrast = 0.5f;
+#endif
+
+ size = SkScalerContext::GetGammaLUTSize(contrast, paintGamma, deviceGamma,
+ &width, &height);
+
+ SkASSERT(kExpectedDistanceAdjustTableSize == height);
+ SkScalar* table = new SkScalar[height];
+
+ SkAutoTArray<uint8_t> data((int)size);
+ SkScalerContext::GetGammaLUTData(contrast, paintGamma, deviceGamma, data.get());
+
+ // find the inverse points where we cross 0.5
+ // binsearch might be better, but we only need to do this once on creation
+ for (int row = 0; row < height; ++row) {
+ uint8_t* rowPtr = data.get() + row*width;
+ for (int col = 0; col < width - 1; ++col) {
+ if (rowPtr[col] <= 127 && rowPtr[col + 1] >= 128) {
+ // compute point where a mask value will give us a result of 0.5
+ float interp = (127.5f - rowPtr[col]) / (rowPtr[col + 1] - rowPtr[col]);
+ float borderAlpha = (col + interp) / 255.f;
+
+ // compute t value for that alpha
+ // this is an approximate inverse for smoothstep()
+ float t = borderAlpha*(borderAlpha*(4.0f*borderAlpha - 6.0f) + 5.0f) / 3.0f;
+
+ // compute distance which gives us that t value
+ const float kDistanceFieldAAFactor = 0.65f; // should match SK_DistanceFieldAAFactor
+ float d = 2.0f*kDistanceFieldAAFactor*t - kDistanceFieldAAFactor;
+
+ table[row] = d;
+ break;
+ }
+ }
+ }
+
+ return table;
+}
+
+void GrDistanceFieldAdjustTable::buildDistanceAdjustTables() {
+ fTable = build_distance_adjust_table(SK_GAMMA_EXPONENT, SK_GAMMA_EXPONENT);
+ fGammaCorrectTable = build_distance_adjust_table(SK_Scalar1, SK_Scalar1);
+}
diff --git a/gfx/skia/skia/src/gpu/text/GrDistanceFieldAdjustTable.h b/gfx/skia/skia/src/gpu/text/GrDistanceFieldAdjustTable.h
new file mode 100644
index 000000000..0f5c6f29a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrDistanceFieldAdjustTable.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDistanceFieldAdjustTable_DEFINED
+#define GrDistanceFieldAdjustTable_DEFINED
+
+#include "SkRefCnt.h"
+#include "SkScalar.h"
+
+// Distance field text needs this table to compute a value for use in the fragment shader.
+// Because the GrAtlasTextContext can go out of scope before the final flush, this needs to be
+// refcnted and malloced
+struct GrDistanceFieldAdjustTable : public SkNVRefCnt<GrDistanceFieldAdjustTable> {
+ GrDistanceFieldAdjustTable() { this->buildDistanceAdjustTables(); }
+ ~GrDistanceFieldAdjustTable() {
+ delete[] fTable;
+ delete[] fGammaCorrectTable;
+ }
+
+ const SkScalar& getAdjustment(int i, bool useGammaCorrectTable) const {
+ return useGammaCorrectTable ? fGammaCorrectTable[i] : fTable[i];
+ }
+
+private:
+ void buildDistanceAdjustTables();
+
+ SkScalar* fTable;
+ SkScalar* fGammaCorrectTable;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/text/GrStencilAndCoverTextContext.cpp b/gfx/skia/skia/src/gpu/text/GrStencilAndCoverTextContext.cpp
new file mode 100644
index 000000000..32d80218d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrStencilAndCoverTextContext.cpp
@@ -0,0 +1,739 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrStencilAndCoverTextContext.h"
+#include "GrAtlasTextContext.h"
+#include "GrContext.h"
+#include "GrDrawContext.h"
+#include "GrPath.h"
+#include "GrPathRange.h"
+#include "GrPipelineBuilder.h"
+#include "GrResourceProvider.h"
+#include "GrTextUtils.h"
+#include "SkAutoKern.h"
+#include "SkDraw.h"
+#include "SkDrawProcs.h"
+#include "SkGlyphCache.h"
+#include "SkGrPriv.h"
+#include "SkDrawFilter.h"
+#include "SkPath.h"
+#include "SkTextBlobRunIterator.h"
+#include "SkTextMapStateProc.h"
+#include "SkTextFormatParams.h"
+
+#include "batches/GrDrawPathBatch.h"
+
+template<typename Key, typename Val> static void delete_hash_map_entry(const Key&, Val* val) {
+ SkASSERT(*val);
+ delete *val;
+}
+
+template<typename T> static void delete_hash_table_entry(T* val) {
+ SkASSERT(*val);
+ delete *val;
+}
+
+GrStencilAndCoverTextContext::GrStencilAndCoverTextContext(GrAtlasTextContext* fallbackTextContext)
+ : fFallbackTextContext(fallbackTextContext)
+ , fCacheSize(0) {
+}
+
+GrStencilAndCoverTextContext*
+GrStencilAndCoverTextContext::Create(GrAtlasTextContext* fallbackTextContext) {
+ return new GrStencilAndCoverTextContext(fallbackTextContext);;
+}
+
+GrStencilAndCoverTextContext::~GrStencilAndCoverTextContext() {
+ fBlobIdCache.foreach(delete_hash_map_entry<uint32_t, TextBlob*>);
+ fBlobKeyCache.foreach(delete_hash_table_entry<TextBlob*>);
+}
+
+bool GrStencilAndCoverTextContext::internalCanDraw(const SkPaint& skPaint) {
+ if (skPaint.getRasterizer()) {
+ return false;
+ }
+ if (skPaint.getMaskFilter()) {
+ return false;
+ }
+ if (SkPathEffect* pe = skPaint.getPathEffect()) {
+ if (pe->asADash(nullptr) != SkPathEffect::kDash_DashType) {
+ return false;
+ }
+ }
+ // No hairlines. They would require new paths with customized strokes for every new draw matrix.
+ return SkPaint::kStroke_Style != skPaint.getStyle() || 0 != skPaint.getStrokeWidth();
+}
+
+void GrStencilAndCoverTextContext::drawText(GrContext* context, GrDrawContext* dc,
+ const GrClip& clip, const GrPaint& paint,
+ const SkPaint& skPaint, const SkMatrix& viewMatrix,
+ const SkSurfaceProps& props,
+ const char text[], size_t byteLength,
+ SkScalar x, SkScalar y, const SkIRect& clipBounds) {
+ if (context->abandoned()) {
+ return;
+ } else if (this->canDraw(skPaint, viewMatrix)) {
+ if (skPaint.getTextSize() > 0) {
+ TextRun run(skPaint);
+ run.setText(text, byteLength, x, y);
+ run.draw(context, dc, paint, clip, viewMatrix, props, 0, 0,
+ clipBounds, fFallbackTextContext, skPaint);
+ }
+ return;
+ } else if (fFallbackTextContext->canDraw(skPaint, viewMatrix, props,
+ *context->caps()->shaderCaps())) {
+ fFallbackTextContext->drawText(context, dc, clip, paint, skPaint, viewMatrix, props, text,
+ byteLength, x, y, clipBounds);
+ return;
+ }
+
+ // fall back to drawing as a path
+ GrTextUtils::DrawTextAsPath(context, dc, clip, skPaint, viewMatrix, text, byteLength, x, y,
+ clipBounds);
+}
+
+void GrStencilAndCoverTextContext::drawPosText(GrContext* context, GrDrawContext* dc,
+ const GrClip& clip,
+ const GrPaint& paint,
+ const SkPaint& skPaint,
+ const SkMatrix& viewMatrix,
+ const SkSurfaceProps& props,
+ const char text[],
+ size_t byteLength,
+ const SkScalar pos[],
+ int scalarsPerPosition,
+ const SkPoint& offset,
+ const SkIRect& clipBounds) {
+ if (context->abandoned()) {
+ return;
+ } else if (this->canDraw(skPaint, viewMatrix)) {
+ if (skPaint.getTextSize() > 0) {
+ TextRun run(skPaint);
+ run.setPosText(text, byteLength, pos, scalarsPerPosition, offset);
+ run.draw(context, dc, paint, clip, viewMatrix, props, 0, 0,
+ clipBounds, fFallbackTextContext, skPaint);
+ }
+ return;
+ } else if (fFallbackTextContext->canDraw(skPaint, viewMatrix, props,
+ *context->caps()->shaderCaps())) {
+ fFallbackTextContext->drawPosText(context, dc, clip, paint, skPaint, viewMatrix, props,
+ text, byteLength, pos,
+ scalarsPerPosition, offset, clipBounds);
+ return;
+ }
+
+ // fall back to drawing as a path
+ GrTextUtils::DrawPosTextAsPath(context, dc, props, clip, skPaint, viewMatrix, text,
+ byteLength, pos, scalarsPerPosition, offset, clipBounds);
+}
+
+void GrStencilAndCoverTextContext::uncachedDrawTextBlob(GrContext* context,
+ GrDrawContext* dc,
+ const GrClip& clip,
+ const SkPaint& skPaint,
+ const SkMatrix& viewMatrix,
+ const SkSurfaceProps& props,
+ const SkTextBlob* blob,
+ SkScalar x, SkScalar y,
+ SkDrawFilter* drawFilter,
+ const SkIRect& clipBounds) {
+ SkPaint runPaint = skPaint;
+
+ SkTextBlobRunIterator it(blob);
+ for (;!it.done(); it.next()) {
+ size_t textLen = it.glyphCount() * sizeof(uint16_t);
+ const SkPoint& offset = it.offset();
+
+ // applyFontToPaint() always overwrites the exact same attributes,
+ // so it is safe to not re-seed the paint for this reason.
+ it.applyFontToPaint(&runPaint);
+
+ if (drawFilter && !drawFilter->filter(&runPaint, SkDrawFilter::kText_Type)) {
+ // A false return from filter() means we should abort the current draw.
+ runPaint = skPaint;
+ continue;
+ }
+
+ runPaint.setFlags(GrTextUtils::FilterTextFlags(props, runPaint));
+
+ GrPaint grPaint;
+ if (!SkPaintToGrPaint(context, dc, runPaint, viewMatrix, &grPaint)) {
+ return;
+ }
+
+ switch (it.positioning()) {
+ case SkTextBlob::kDefault_Positioning:
+ this->drawText(context, dc, clip, grPaint, runPaint, viewMatrix, props,
+ (const char *)it.glyphs(),
+ textLen, x + offset.x(), y + offset.y(), clipBounds);
+ break;
+ case SkTextBlob::kHorizontal_Positioning:
+ this->drawPosText(context, dc, clip, grPaint, runPaint, viewMatrix, props,
+ (const char*)it.glyphs(),
+ textLen, it.pos(), 1, SkPoint::Make(x, y + offset.y()),
+ clipBounds);
+ break;
+ case SkTextBlob::kFull_Positioning:
+ this->drawPosText(context, dc, clip, grPaint, runPaint, viewMatrix, props,
+ (const char*)it.glyphs(),
+ textLen, it.pos(), 2, SkPoint::Make(x, y), clipBounds);
+ break;
+ }
+
+ if (drawFilter) {
+ // A draw filter may change the paint arbitrarily, so we must re-seed in this case.
+ runPaint = skPaint;
+ }
+ }
+}
+
+void GrStencilAndCoverTextContext::drawTextBlob(GrContext* context, GrDrawContext* dc,
+ const GrClip& clip, const SkPaint& skPaint,
+ const SkMatrix& viewMatrix,
+ const SkSurfaceProps& props,
+ const SkTextBlob* skBlob, SkScalar x, SkScalar y,
+ SkDrawFilter* drawFilter,
+ const SkIRect& clipBounds) {
+ if (context->abandoned()) {
+ return;
+ }
+
+ if (!this->internalCanDraw(skPaint)) {
+ fFallbackTextContext->drawTextBlob(context, dc, clip, skPaint, viewMatrix, props, skBlob,
+ x, y, drawFilter, clipBounds);
+ return;
+ }
+
+ if (drawFilter || skPaint.getPathEffect()) {
+ // This draw can't be cached.
+ this->uncachedDrawTextBlob(context, dc, clip, skPaint, viewMatrix, props, skBlob, x, y,
+ drawFilter, clipBounds);
+ return;
+ }
+
+ GrPaint paint;
+ if (!SkPaintToGrPaint(context, dc, skPaint, viewMatrix, &paint)) {
+ return;
+ }
+
+ const TextBlob& blob = this->findOrCreateTextBlob(skBlob, skPaint);
+
+ TextBlob::Iter iter(blob);
+ for (TextRun* run = iter.get(); run; run = iter.next()) {
+ // The run's "font" overrides the anti-aliasing of the passed in paint!
+ paint.setAntiAlias(run->isAntiAlias());
+ run->draw(context, dc, paint, clip, viewMatrix, props, x, y,
+ clipBounds, fFallbackTextContext, skPaint);
+ run->releaseGlyphCache();
+ }
+}
+
+static inline int style_key_cnt(const GrStyle& style) {
+ int cnt = GrStyle::KeySize(style, GrStyle::Apply::kPathEffectAndStrokeRec);
+ // We should be able to make a key because we filtered out arbitrary path effects.
+ SkASSERT(cnt > 0);
+ return cnt;
+}
+
+static inline void write_style_key(uint32_t* dst, const GrStyle& style) {
+ // Pass 1 for the scale since the GPU will apply the style not GrStyle::applyToPath().
+ GrStyle::WriteKey(dst, style, GrStyle::Apply::kPathEffectAndStrokeRec, SK_Scalar1);
+}
+
+const GrStencilAndCoverTextContext::TextBlob&
+GrStencilAndCoverTextContext::findOrCreateTextBlob(const SkTextBlob* skBlob,
+ const SkPaint& skPaint) {
+ // The font-related parameters are baked into the text blob and will override this skPaint, so
+ // the only remaining properties that can affect a TextBlob are the ones related to stroke.
+ if (SkPaint::kFill_Style == skPaint.getStyle()) { // Fast path.
+ if (TextBlob** found = fBlobIdCache.find(skBlob->uniqueID())) {
+ fLRUList.remove(*found);
+ fLRUList.addToTail(*found);
+ return **found;
+ }
+ TextBlob* blob = new TextBlob(skBlob->uniqueID(), skBlob, skPaint);
+ this->purgeToFit(*blob);
+ fBlobIdCache.set(skBlob->uniqueID(), blob);
+ fLRUList.addToTail(blob);
+ fCacheSize += blob->cpuMemorySize();
+ return *blob;
+ } else {
+ GrStyle style(skPaint);
+ SkSTArray<4, uint32_t, true> key;
+ key.reset(1 + style_key_cnt(style));
+ key[0] = skBlob->uniqueID();
+ write_style_key(&key[1], style);
+ if (TextBlob** found = fBlobKeyCache.find(key)) {
+ fLRUList.remove(*found);
+ fLRUList.addToTail(*found);
+ return **found;
+ }
+ TextBlob* blob = new TextBlob(key, skBlob, skPaint);
+ this->purgeToFit(*blob);
+ fBlobKeyCache.set(blob);
+ fLRUList.addToTail(blob);
+ fCacheSize += blob->cpuMemorySize();
+ return *blob;
+ }
+}
+
+void GrStencilAndCoverTextContext::purgeToFit(const TextBlob& blob) {
+ static const size_t maxCacheSize = 4 * 1024 * 1024; // Allow up to 4 MB for caching text blobs.
+
+ size_t maxSizeForNewBlob = maxCacheSize - blob.cpuMemorySize();
+ while (fCacheSize && fCacheSize > maxSizeForNewBlob) {
+ TextBlob* lru = fLRUList.head();
+ if (1 == lru->key().count()) {
+ // 1-length keys are unterstood to be the blob id.
+ fBlobIdCache.remove(lru->key()[0]);
+ } else {
+ fBlobKeyCache.remove(lru->key());
+ }
+ fLRUList.remove(lru);
+ fCacheSize -= lru->cpuMemorySize();
+ delete lru;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void GrStencilAndCoverTextContext::TextBlob::init(const SkTextBlob* skBlob,
+ const SkPaint& skPaint) {
+ fCpuMemorySize = sizeof(TextBlob);
+ SkPaint runPaint(skPaint);
+ for (SkTextBlobRunIterator iter(skBlob); !iter.done(); iter.next()) {
+ iter.applyFontToPaint(&runPaint); // No need to re-seed the paint.
+ if (runPaint.getTextSize() <= 0) {
+ continue;
+ }
+ TextRun* run = this->addToTail(runPaint);
+
+ const char* text = reinterpret_cast<const char*>(iter.glyphs());
+ size_t byteLength = sizeof(uint16_t) * iter.glyphCount();
+ const SkPoint& runOffset = iter.offset();
+
+ switch (iter.positioning()) {
+ case SkTextBlob::kDefault_Positioning:
+ run->setText(text, byteLength, runOffset.fX, runOffset.fY);
+ break;
+ case SkTextBlob::kHorizontal_Positioning:
+ run->setPosText(text, byteLength, iter.pos(), 1, SkPoint::Make(0, runOffset.fY));
+ break;
+ case SkTextBlob::kFull_Positioning:
+ run->setPosText(text, byteLength, iter.pos(), 2, SkPoint::Make(0, 0));
+ break;
+ }
+
+ fCpuMemorySize += run->computeSizeInCache();
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+class GrStencilAndCoverTextContext::FallbackBlobBuilder {
+public:
+ FallbackBlobBuilder() : fBuffIdx(0), fCount(0) {}
+
+ bool isInitialized() const { return fBuilder != nullptr; }
+
+ void init(const SkPaint& font, SkScalar textRatio);
+
+ void appendGlyph(uint16_t glyphId, const SkPoint& pos);
+
+ sk_sp<SkTextBlob> makeIfNeeded(int* count);
+
+private:
+ enum { kWriteBufferSize = 1024 };
+
+ void flush();
+
+ SkAutoTDelete<SkTextBlobBuilder> fBuilder;
+ SkPaint fFont;
+ int fBuffIdx;
+ int fCount;
+ uint16_t fGlyphIds[kWriteBufferSize];
+ SkPoint fPositions[kWriteBufferSize];
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+GrStencilAndCoverTextContext::TextRun::TextRun(const SkPaint& fontAndStroke)
+ : fStyle(fontAndStroke)
+ , fFont(fontAndStroke)
+ , fTotalGlyphCount(0)
+ , fFallbackGlyphCount(0)
+ , fDetachedGlyphCache(nullptr)
+ , fLastDrawnGlyphsID(SK_InvalidUniqueID) {
+ SkASSERT(fFont.getTextSize() > 0);
+ SkASSERT(!fStyle.hasNonDashPathEffect()); // Arbitrary path effects not supported.
+ SkASSERT(!fStyle.isSimpleHairline()); // Hairlines are not supported.
+
+ // Setting to "fill" ensures that no strokes get baked into font outlines. (We use the GPU path
+ // rendering API for stroking).
+ fFont.setStyle(SkPaint::kFill_Style);
+
+ if (fFont.isFakeBoldText() && fStyle.isSimpleFill()) {
+ const SkStrokeRec& stroke = fStyle.strokeRec();
+ // Instead of letting fake bold get baked into the glyph outlines, do it with GPU stroke.
+ SkScalar fakeBoldScale = SkScalarInterpFunc(fFont.getTextSize(),
+ kStdFakeBoldInterpKeys,
+ kStdFakeBoldInterpValues,
+ kStdFakeBoldInterpLength);
+ SkScalar extra = SkScalarMul(fFont.getTextSize(), fakeBoldScale);
+
+ SkStrokeRec strokeRec(SkStrokeRec::kFill_InitStyle);
+ strokeRec.setStrokeStyle(stroke.needToApply() ? stroke.getWidth() + extra : extra,
+ true /*strokeAndFill*/);
+ fStyle = GrStyle(strokeRec, fStyle.pathEffect());
+ fFont.setFakeBoldText(false);
+ }
+
+ if (!fFont.getPathEffect() && !fStyle.isDashed()) {
+ const SkStrokeRec& stroke = fStyle.strokeRec();
+ // We can draw the glyphs from canonically sized paths.
+ fTextRatio = fFont.getTextSize() / SkPaint::kCanonicalTextSizeForPaths;
+ fTextInverseRatio = SkPaint::kCanonicalTextSizeForPaths / fFont.getTextSize();
+
+ // Compensate for the glyphs being scaled by fTextRatio.
+ if (!fStyle.isSimpleFill()) {
+ SkStrokeRec strokeRec(SkStrokeRec::kFill_InitStyle);
+ strokeRec.setStrokeStyle(stroke.getWidth() / fTextRatio,
+ SkStrokeRec::kStrokeAndFill_Style == stroke.getStyle());
+ fStyle = GrStyle(strokeRec, fStyle.pathEffect());
+ }
+
+ fFont.setLinearText(true);
+ fFont.setLCDRenderText(false);
+ fFont.setAutohinted(false);
+ fFont.setHinting(SkPaint::kNo_Hinting);
+ fFont.setSubpixelText(true);
+ fFont.setTextSize(SkIntToScalar(SkPaint::kCanonicalTextSizeForPaths));
+
+ fUsingRawGlyphPaths = SK_Scalar1 == fFont.getTextScaleX() &&
+ 0 == fFont.getTextSkewX() &&
+ !fFont.isFakeBoldText() &&
+ !fFont.isVerticalText();
+ } else {
+ fTextRatio = fTextInverseRatio = 1.0f;
+ fUsingRawGlyphPaths = false;
+ }
+
+ // Generate the key that will be used to cache the GPU glyph path objects.
+ if (fUsingRawGlyphPaths && fStyle.isSimpleFill()) {
+ static const GrUniqueKey::Domain kRawFillPathGlyphDomain = GrUniqueKey::GenerateDomain();
+
+ const SkTypeface* typeface = fFont.getTypeface();
+ GrUniqueKey::Builder builder(&fGlyphPathsKey, kRawFillPathGlyphDomain, 1);
+ reinterpret_cast<uint32_t&>(builder[0]) = typeface ? typeface->uniqueID() : 0;
+ } else {
+ static const GrUniqueKey::Domain kPathGlyphDomain = GrUniqueKey::GenerateDomain();
+
+ int styleDataCount = GrStyle::KeySize(fStyle, GrStyle::Apply::kPathEffectAndStrokeRec);
+ // Key should be valid since we opted out of drawing arbitrary path effects.
+ SkASSERT(styleDataCount >= 0);
+ if (fUsingRawGlyphPaths) {
+ const SkTypeface* typeface = fFont.getTypeface();
+ GrUniqueKey::Builder builder(&fGlyphPathsKey, kPathGlyphDomain, 2 + styleDataCount);
+ reinterpret_cast<uint32_t&>(builder[0]) = typeface ? typeface->uniqueID() : 0;
+ reinterpret_cast<uint32_t&>(builder[1]) = styleDataCount;
+ if (styleDataCount) {
+ write_style_key(&builder[2], fStyle);
+ }
+ } else {
+ SkGlyphCache* glyphCache = this->getGlyphCache();
+ const SkTypeface* typeface = glyphCache->getScalerContext()->getTypeface();
+ const SkDescriptor* desc = &glyphCache->getDescriptor();
+ int descDataCount = (desc->getLength() + 3) / 4;
+ GrUniqueKey::Builder builder(&fGlyphPathsKey, kPathGlyphDomain,
+ 2 + styleDataCount + descDataCount);
+ reinterpret_cast<uint32_t&>(builder[0]) = typeface ? typeface->uniqueID() : 0;
+ reinterpret_cast<uint32_t&>(builder[1]) = styleDataCount | (descDataCount << 16);
+ if (styleDataCount) {
+ write_style_key(&builder[2], fStyle);
+ }
+ memcpy(&builder[2 + styleDataCount], desc, desc->getLength());
+ }
+ }
+}
+
+GrStencilAndCoverTextContext::TextRun::~TextRun() {
+ this->releaseGlyphCache();
+}
+
+void GrStencilAndCoverTextContext::TextRun::setText(const char text[], size_t byteLength,
+ SkScalar x, SkScalar y) {
+ SkASSERT(byteLength == 0 || text != nullptr);
+
+ SkGlyphCache* glyphCache = this->getGlyphCache();
+ SkPaint::GlyphCacheProc glyphCacheProc = SkPaint::GetGlyphCacheProc(fFont.getTextEncoding(),
+ fFont.isDevKernText(),
+ true);
+
+ fTotalGlyphCount = fFont.countText(text, byteLength);
+ fInstanceData.reset(InstanceData::Alloc(GrPathRendering::kTranslate_PathTransformType,
+ fTotalGlyphCount));
+
+ const char* stop = text + byteLength;
+
+ // Measure first if needed.
+ if (fFont.getTextAlign() != SkPaint::kLeft_Align) {
+ SkScalar stopX = 0;
+ SkScalar stopY = 0;
+
+ const char* textPtr = text;
+ while (textPtr < stop) {
+ // We don't need x, y here, since all subpixel variants will have the
+ // same advance.
+ const SkGlyph& glyph = glyphCacheProc(glyphCache, &textPtr);
+
+ stopX += SkFloatToScalar(glyph.fAdvanceX);
+ stopY += SkFloatToScalar(glyph.fAdvanceY);
+ }
+ SkASSERT(textPtr == stop);
+
+ SkScalar alignX = stopX * fTextRatio;
+ SkScalar alignY = stopY * fTextRatio;
+
+ if (fFont.getTextAlign() == SkPaint::kCenter_Align) {
+ alignX = SkScalarHalf(alignX);
+ alignY = SkScalarHalf(alignY);
+ }
+
+ x -= alignX;
+ y -= alignY;
+ }
+
+ SkAutoKern autokern;
+
+ FallbackBlobBuilder fallback;
+ while (text < stop) {
+ const SkGlyph& glyph = glyphCacheProc(glyphCache, &text);
+ x += autokern.adjust(glyph) * fTextRatio;
+ if (glyph.fWidth) {
+ this->appendGlyph(glyph, SkPoint::Make(x, y), &fallback);
+ }
+
+ x += SkFloatToScalar(glyph.fAdvanceX) * fTextRatio;
+ y += SkFloatToScalar(glyph.fAdvanceY) * fTextRatio;
+ }
+
+ fFallbackTextBlob = fallback.makeIfNeeded(&fFallbackGlyphCount);
+}
+
+void GrStencilAndCoverTextContext::TextRun::setPosText(const char text[], size_t byteLength,
+ const SkScalar pos[], int scalarsPerPosition,
+ const SkPoint& offset) {
+ SkASSERT(byteLength == 0 || text != nullptr);
+ SkASSERT(1 == scalarsPerPosition || 2 == scalarsPerPosition);
+
+ SkGlyphCache* glyphCache = this->getGlyphCache();
+ SkPaint::GlyphCacheProc glyphCacheProc = SkPaint::GetGlyphCacheProc(fFont.getTextEncoding(),
+ fFont.isDevKernText(),
+ true);
+
+ fTotalGlyphCount = fFont.countText(text, byteLength);
+ fInstanceData.reset(InstanceData::Alloc(GrPathRendering::kTranslate_PathTransformType,
+ fTotalGlyphCount));
+
+ const char* stop = text + byteLength;
+
+ SkTextMapStateProc tmsProc(SkMatrix::I(), offset, scalarsPerPosition);
+ SkTextAlignProc alignProc(fFont.getTextAlign());
+ FallbackBlobBuilder fallback;
+ while (text < stop) {
+ const SkGlyph& glyph = glyphCacheProc(glyphCache, &text);
+ if (glyph.fWidth) {
+ SkPoint tmsLoc;
+ tmsProc(pos, &tmsLoc);
+ SkPoint loc;
+ alignProc(tmsLoc, glyph, &loc);
+
+ this->appendGlyph(glyph, loc, &fallback);
+ }
+ pos += scalarsPerPosition;
+ }
+
+ fFallbackTextBlob = fallback.makeIfNeeded(&fFallbackGlyphCount);
+}
+
+GrPathRange* GrStencilAndCoverTextContext::TextRun::createGlyphs(GrContext* ctx) const {
+ GrPathRange* glyphs = static_cast<GrPathRange*>(
+ ctx->resourceProvider()->findAndRefResourceByUniqueKey(fGlyphPathsKey));
+ if (nullptr == glyphs) {
+ if (fUsingRawGlyphPaths) {
+ SkScalerContextEffects noeffects;
+ glyphs = ctx->resourceProvider()->createGlyphs(fFont.getTypeface(), noeffects,
+ nullptr, fStyle);
+ } else {
+ SkGlyphCache* cache = this->getGlyphCache();
+ glyphs = ctx->resourceProvider()->createGlyphs(cache->getScalerContext()->getTypeface(),
+ cache->getScalerContext()->getEffects(),
+ &cache->getDescriptor(),
+ fStyle);
+ }
+ ctx->resourceProvider()->assignUniqueKeyToResource(fGlyphPathsKey, glyphs);
+ }
+ return glyphs;
+}
+
+inline void GrStencilAndCoverTextContext::TextRun::appendGlyph(const SkGlyph& glyph,
+ const SkPoint& pos,
+ FallbackBlobBuilder* fallback) {
+ // Stick the glyphs we can't draw into the fallback text blob.
+ if (SkMask::kARGB32_Format == glyph.fMaskFormat) {
+ if (!fallback->isInitialized()) {
+ fallback->init(fFont, fTextRatio);
+ }
+ fallback->appendGlyph(glyph.getGlyphID(), pos);
+ } else {
+ fInstanceData->append(glyph.getGlyphID(), fTextInverseRatio * pos.x(),
+ fTextInverseRatio * pos.y());
+ }
+}
+
+void GrStencilAndCoverTextContext::TextRun::draw(GrContext* ctx,
+ GrDrawContext* drawContext,
+ const GrPaint& grPaint,
+ const GrClip& clip,
+ const SkMatrix& viewMatrix,
+ const SkSurfaceProps& props,
+ SkScalar x, SkScalar y,
+ const SkIRect& clipBounds,
+ GrAtlasTextContext* fallbackTextContext,
+ const SkPaint& originalSkPaint) const {
+ SkASSERT(fInstanceData);
+ SkASSERT(drawContext->isStencilBufferMultisampled() || !grPaint.isAntiAlias());
+
+ if (fInstanceData->count()) {
+ static constexpr GrUserStencilSettings kCoverPass(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kNotEqual, // Stencil pass accounts for clip.
+ 0xffff,
+ GrUserStencilOp::kZero,
+ GrUserStencilOp::kKeep,
+ 0xffff>()
+ );
+
+ SkAutoTUnref<GrPathRange> glyphs(this->createGlyphs(ctx));
+ if (fLastDrawnGlyphsID != glyphs->uniqueID()) {
+ // Either this is the first draw or the glyphs object was purged since last draw.
+ glyphs->loadPathsIfNeeded(fInstanceData->indices(), fInstanceData->count());
+ fLastDrawnGlyphsID = glyphs->uniqueID();
+ }
+
+ // Don't compute a bounding box. For dst copy texture, we'll opt instead for it to just copy
+ // the entire dst. Realistically this is a moot point, because any context that supports
+ // NV_path_rendering will also support NV_blend_equation_advanced.
+ // For clipping we'll just skip any optimizations based on the bounds. This does, however,
+ // hurt batching.
+ const SkRect bounds = SkRect::MakeIWH(drawContext->width(), drawContext->height());
+
+ SkAutoTUnref<GrDrawBatch> batch(
+ GrDrawPathRangeBatch::Create(viewMatrix, fTextRatio, fTextInverseRatio * x,
+ fTextInverseRatio * y, grPaint.getColor(),
+ GrPathRendering::kWinding_FillType, glyphs, fInstanceData,
+ bounds));
+
+ GrPipelineBuilder pipelineBuilder(grPaint);
+ pipelineBuilder.setState(GrPipelineBuilder::kHWAntialias_Flag, grPaint.isAntiAlias());
+ pipelineBuilder.setUserStencil(&kCoverPass);
+
+ drawContext->drawBatch(pipelineBuilder, clip, batch);
+ }
+
+ if (fFallbackTextBlob) {
+ SkPaint fallbackSkPaint(originalSkPaint);
+ fStyle.strokeRec().applyToPaint(&fallbackSkPaint);
+ if (!fStyle.isSimpleFill()) {
+ fallbackSkPaint.setStrokeWidth(fStyle.strokeRec().getWidth() * fTextRatio);
+ }
+
+ fallbackTextContext->drawTextBlob(ctx, drawContext, clip, fallbackSkPaint, viewMatrix,
+ props, fFallbackTextBlob.get(), x, y, nullptr,
+ clipBounds);
+ }
+}
+
+SkGlyphCache* GrStencilAndCoverTextContext::TextRun::getGlyphCache() const {
+ if (!fDetachedGlyphCache) {
+ fDetachedGlyphCache = fFont.detachCache(nullptr, SkPaint::kNone_ScalerContextFlags,
+ nullptr);
+ }
+ return fDetachedGlyphCache;
+}
+
+
+void GrStencilAndCoverTextContext::TextRun::releaseGlyphCache() const {
+ if (fDetachedGlyphCache) {
+ SkGlyphCache::AttachCache(fDetachedGlyphCache);
+ fDetachedGlyphCache = nullptr;
+ }
+}
+
+size_t GrStencilAndCoverTextContext::TextRun::computeSizeInCache() const {
+ size_t size = sizeof(TextRun) + fGlyphPathsKey.size();
+ // The instance data always reserves enough space for every glyph.
+ size += (fTotalGlyphCount + fFallbackGlyphCount) * (sizeof(uint16_t) + 2 * sizeof(float));
+ if (fInstanceData) {
+ size += sizeof(InstanceData);
+ }
+ if (fFallbackTextBlob) {
+ size += sizeof(SkTextBlob);
+ }
+ return size;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void GrStencilAndCoverTextContext::FallbackBlobBuilder::init(const SkPaint& font,
+ SkScalar textRatio) {
+ SkASSERT(!this->isInitialized());
+ fBuilder.reset(new SkTextBlobBuilder);
+ fFont = font;
+ fFont.setTextAlign(SkPaint::kLeft_Align); // The glyph positions will already account for align.
+ fFont.setTextEncoding(SkPaint::kGlyphID_TextEncoding);
+ // No need for subpixel positioning with bitmap glyphs. TODO: revisit if non-bitmap color glyphs
+ // show up and https://code.google.com/p/skia/issues/detail?id=4408 gets resolved.
+ fFont.setSubpixelText(false);
+ fFont.setTextSize(fFont.getTextSize() * textRatio);
+ fBuffIdx = 0;
+}
+
+void GrStencilAndCoverTextContext::FallbackBlobBuilder::appendGlyph(uint16_t glyphId,
+ const SkPoint& pos) {
+ SkASSERT(this->isInitialized());
+ if (fBuffIdx >= kWriteBufferSize) {
+ this->flush();
+ }
+ fGlyphIds[fBuffIdx] = glyphId;
+ fPositions[fBuffIdx] = pos;
+ fBuffIdx++;
+ fCount++;
+}
+
+void GrStencilAndCoverTextContext::FallbackBlobBuilder::flush() {
+ SkASSERT(this->isInitialized());
+ SkASSERT(fBuffIdx <= kWriteBufferSize);
+ if (!fBuffIdx) {
+ return;
+ }
+ // This will automatically merge with previous runs since we use the same font.
+ const SkTextBlobBuilder::RunBuffer& buff = fBuilder->allocRunPos(fFont, fBuffIdx);
+ memcpy(buff.glyphs, fGlyphIds, fBuffIdx * sizeof(uint16_t));
+ memcpy(buff.pos, fPositions[0].asScalars(), fBuffIdx * 2 * sizeof(SkScalar));
+ fBuffIdx = 0;
+}
+
+sk_sp<SkTextBlob> GrStencilAndCoverTextContext::FallbackBlobBuilder::makeIfNeeded(int *count) {
+ *count = fCount;
+ if (fCount) {
+ this->flush();
+ return fBuilder->make();
+ }
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/gpu/text/GrStencilAndCoverTextContext.h b/gfx/skia/skia/src/gpu/text/GrStencilAndCoverTextContext.h
new file mode 100644
index 000000000..0f6735e13
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrStencilAndCoverTextContext.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrStencilAndCoverTextContext_DEFINED
+#define GrStencilAndCoverTextContext_DEFINED
+
+#include "GrDrawContext.h"
+#include "GrStyle.h"
+#include "SkDrawFilter.h"
+#include "SkOpts.h"
+#include "SkTextBlob.h"
+#include "SkTHash.h"
+#include "SkTInternalLList.h"
+#include "SkTLList.h"
+#include "batches/GrDrawPathBatch.h"
+
+class GrAtlasTextContext;
+class GrTextStrike;
+class GrPath;
+class SkSurfaceProps;
+
+/*
+ * This class implements text rendering using stencil and cover path rendering
+ * (by the means of GrDrawTarget::drawPath).
+ */
+class GrStencilAndCoverTextContext {
+public:
+ static GrStencilAndCoverTextContext* Create(GrAtlasTextContext* fallbackTextContext);
+
+ void drawText(GrContext*, GrDrawContext* dc,
+ const GrClip&, const GrPaint&, const SkPaint&,
+ const SkMatrix& viewMatrix, const SkSurfaceProps&, const char text[],
+ size_t byteLength, SkScalar x,
+ SkScalar y, const SkIRect& clipBounds);
+ void drawPosText(GrContext*, GrDrawContext*,
+ const GrClip&, const GrPaint&, const SkPaint&,
+ const SkMatrix& viewMatrix, const SkSurfaceProps&,
+ const char text[], size_t byteLength,
+ const SkScalar pos[], int scalarsPerPosition,
+ const SkPoint& offset, const SkIRect& clipBounds);
+ void drawTextBlob(GrContext*, GrDrawContext*, const GrClip&, const SkPaint&,
+ const SkMatrix& viewMatrix, const SkSurfaceProps&, const SkTextBlob*,
+ SkScalar x, SkScalar y,
+ SkDrawFilter*, const SkIRect& clipBounds);
+
+ virtual ~GrStencilAndCoverTextContext();
+
+private:
+ GrStencilAndCoverTextContext(GrAtlasTextContext* fallbackTextContext);
+
+ bool canDraw(const SkPaint& skPaint, const SkMatrix&) {
+ return this->internalCanDraw(skPaint);
+ }
+
+ bool internalCanDraw(const SkPaint&);
+
+ void uncachedDrawTextBlob(GrContext*, GrDrawContext* dc,
+ const GrClip& clip, const SkPaint& skPaint,
+ const SkMatrix& viewMatrix,
+ const SkSurfaceProps&,
+ const SkTextBlob* blob,
+ SkScalar x, SkScalar y,
+ SkDrawFilter* drawFilter,
+ const SkIRect& clipBounds);
+
+ class FallbackBlobBuilder;
+
+ class TextRun {
+ public:
+ TextRun(const SkPaint& fontAndStroke);
+ ~TextRun();
+
+ void setText(const char text[], size_t byteLength, SkScalar x, SkScalar y);
+
+ void setPosText(const char text[], size_t byteLength, const SkScalar pos[],
+ int scalarsPerPosition, const SkPoint& offset);
+
+ void draw(GrContext*, GrDrawContext*, const GrPaint&, const GrClip&,
+ const SkMatrix&, const SkSurfaceProps&,
+ SkScalar x, SkScalar y, const SkIRect& clipBounds,
+ GrAtlasTextContext* fallbackTextContext, const SkPaint& originalSkPaint) const;
+
+ void releaseGlyphCache() const;
+
+ size_t computeSizeInCache() const;
+
+ bool isAntiAlias() const { return fFont.isAntiAlias(); }
+
+ private:
+ typedef GrDrawPathRangeBatch::InstanceData InstanceData;
+
+ SkGlyphCache* getGlyphCache() const;
+ GrPathRange* createGlyphs(GrContext*) const;
+ void appendGlyph(const SkGlyph&, const SkPoint&, FallbackBlobBuilder*);
+
+ GrStyle fStyle;
+ SkPaint fFont;
+ SkScalar fTextRatio;
+ float fTextInverseRatio;
+ bool fUsingRawGlyphPaths;
+ GrUniqueKey fGlyphPathsKey;
+ int fTotalGlyphCount;
+ SkAutoTUnref<InstanceData> fInstanceData;
+ int fFallbackGlyphCount;
+ sk_sp<SkTextBlob> fFallbackTextBlob;
+ mutable SkGlyphCache* fDetachedGlyphCache;
+ mutable uint32_t fLastDrawnGlyphsID;
+ };
+
+ // Text blobs/caches.
+
+ class TextBlob : public SkTLList<TextRun, 1> {
+ public:
+ typedef SkTArray<uint32_t, true> Key;
+
+ static const Key& GetKey(const TextBlob* blob) { return blob->key(); }
+
+ static uint32_t Hash(const Key& key) {
+ SkASSERT(key.count() > 1); // 1-length keys should be using the blob-id hash map.
+ return SkOpts::hash(key.begin(), sizeof(uint32_t) * key.count());
+ }
+
+ TextBlob(uint32_t blobId, const SkTextBlob* skBlob, const SkPaint& skPaint)
+ : fKey(&blobId, 1) { this->init(skBlob, skPaint); }
+
+ TextBlob(const Key& key, const SkTextBlob* skBlob, const SkPaint& skPaint)
+ : fKey(key) {
+ // 1-length keys are unterstood to be the blob id and must use the other constructor.
+ SkASSERT(fKey.count() > 1);
+ this->init(skBlob, skPaint);
+ }
+
+ const Key& key() const { return fKey; }
+
+ size_t cpuMemorySize() const { return fCpuMemorySize; }
+
+ private:
+ void init(const SkTextBlob*, const SkPaint&);
+
+ const SkSTArray<1, uint32_t, true> fKey;
+ size_t fCpuMemorySize;
+
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(TextBlob);
+ };
+
+ const TextBlob& findOrCreateTextBlob(const SkTextBlob*, const SkPaint&);
+ void purgeToFit(const TextBlob&);
+
+ GrAtlasTextContext* fFallbackTextContext;
+ SkTHashMap<uint32_t, TextBlob*> fBlobIdCache;
+ SkTHashTable<TextBlob*, const TextBlob::Key&, TextBlob> fBlobKeyCache;
+ SkTInternalLList<TextBlob> fLRUList;
+ size_t fCacheSize;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/text/GrTextBlobCache.cpp b/gfx/skia/skia/src/gpu/text/GrTextBlobCache.cpp
new file mode 100644
index 000000000..ce74977e4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrTextBlobCache.cpp
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrTextBlobCache.h"
+
+GrTextBlobCache::~GrTextBlobCache() {
+ this->freeAll();
+}
+
+void GrTextBlobCache::freeAll() {
+ SkTDynamicHash<GrAtlasTextBlob, GrAtlasTextBlob::Key>::Iter iter(&fCache);
+ while (!iter.done()) {
+ GrAtlasTextBlob* blob = &(*iter);
+ fBlobList.remove(blob);
+ blob->unref();
+ ++iter;
+ }
+ fCache.rewind();
+
+ // There should be no allocations in the memory pool at this point
+ SkASSERT(fPool.isEmpty());
+}
diff --git a/gfx/skia/skia/src/gpu/text/GrTextBlobCache.h b/gfx/skia/skia/src/gpu/text/GrTextBlobCache.h
new file mode 100644
index 000000000..e3b2ca73f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrTextBlobCache.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextBlobCache_DEFINED
+#define GrTextBlobCache_DEFINED
+
+#include "GrAtlasTextContext.h"
+#include "SkTDynamicHash.h"
+#include "SkTextBlobRunIterator.h"
+
+class GrTextBlobCache {
+public:
+ /**
+ * The callback function used by the cache when it is still over budget after a purge. The
+ * passed in 'data' is the same 'data' handed to setOverbudgetCallback.
+ */
+ typedef void (*PFOverBudgetCB)(void* data);
+
+ GrTextBlobCache(PFOverBudgetCB cb, void* data)
+ : fPool(kPreAllocSize, kMinGrowthSize)
+ , fCallback(cb)
+ , fData(data)
+ , fBudget(kDefaultBudget) {
+ SkASSERT(cb && data);
+ }
+ ~GrTextBlobCache();
+
+ // creates an uncached blob
+ GrAtlasTextBlob* createBlob(int glyphCount, int runCount) {
+ return GrAtlasTextBlob::Create(&fPool, glyphCount, runCount);
+ }
+ GrAtlasTextBlob* createBlob(const SkTextBlob* blob) {
+ int glyphCount = 0;
+ int runCount = 0;
+ BlobGlyphCount(&glyphCount, &runCount, blob);
+ GrAtlasTextBlob* cacheBlob = GrAtlasTextBlob::Create(&fPool, glyphCount, runCount);
+ return cacheBlob;
+ }
+
+ GrAtlasTextBlob* createCachedBlob(const SkTextBlob* blob,
+ const GrAtlasTextBlob::Key& key,
+ const SkMaskFilter::BlurRec& blurRec,
+ const SkPaint& paint) {
+ int glyphCount = 0;
+ int runCount = 0;
+ BlobGlyphCount(&glyphCount, &runCount, blob);
+ GrAtlasTextBlob* cacheBlob = GrAtlasTextBlob::Create(&fPool, glyphCount, runCount);
+ cacheBlob->setupKey(key, blurRec, paint);
+ this->add(cacheBlob);
+ return cacheBlob;
+ }
+
+ GrAtlasTextBlob* find(const GrAtlasTextBlob::Key& key) {
+ return fCache.find(key);
+ }
+
+ void remove(GrAtlasTextBlob* blob) {
+ fCache.remove(blob->key());
+ fBlobList.remove(blob);
+ blob->unref();
+ }
+
+ void add(GrAtlasTextBlob* blob) {
+ fCache.add(blob);
+ fBlobList.addToHead(blob);
+
+ this->checkPurge(blob);
+ }
+
+ void makeMRU(GrAtlasTextBlob* blob) {
+ if (fBlobList.head() == blob) {
+ return;
+ }
+
+ fBlobList.remove(blob);
+ fBlobList.addToHead(blob);
+ }
+
+ void freeAll();
+
+ // TODO move to SkTextBlob
+ static void BlobGlyphCount(int* glyphCount, int* runCount, const SkTextBlob* blob) {
+ SkTextBlobRunIterator itCounter(blob);
+ for (; !itCounter.done(); itCounter.next(), (*runCount)++) {
+ *glyphCount += itCounter.glyphCount();
+ }
+ }
+
+ void setBudget(size_t budget) {
+ fBudget = budget;
+ this->checkPurge();
+ }
+
+private:
+ typedef SkTInternalLList<GrAtlasTextBlob> BitmapBlobList;
+
+ void checkPurge(GrAtlasTextBlob* blob = nullptr) {
+ // If we are overbudget, then unref until we are below budget again
+ if (fPool.size() > fBudget) {
+ BitmapBlobList::Iter iter;
+ iter.init(fBlobList, BitmapBlobList::Iter::kTail_IterStart);
+ GrAtlasTextBlob* lruBlob = nullptr;
+ while (fPool.size() > fBudget && (lruBlob = iter.get()) && lruBlob != blob) {
+ fCache.remove(lruBlob->key());
+
+ // Backup the iterator before removing and unrefing the blob
+ iter.prev();
+ fBlobList.remove(lruBlob);
+ lruBlob->unref();
+ }
+
+ // If we break out of the loop with lruBlob == blob, then we haven't purged enough
+ // use the call back and try to free some more. If we are still overbudget after this,
+ // then this single textblob is over our budget
+ if (blob && lruBlob == blob) {
+ (*fCallback)(fData);
+ }
+
+#ifdef SPEW_BUDGET_MESSAGE
+ if (fPool.size() > fBudget) {
+ SkDebugf("Single textblob is larger than our whole budget");
+ }
+#endif
+ }
+ }
+
+ // Budget was chosen to be ~4 megabytes. The min alloc and pre alloc sizes in the pool are
+ // based off of the largest cached textblob I have seen in the skps(a couple of kilobytes).
+ static const int kPreAllocSize = 1 << 17;
+ static const int kMinGrowthSize = 1 << 17;
+ static const int kDefaultBudget = 1 << 22;
+ BitmapBlobList fBlobList;
+ SkTDynamicHash<GrAtlasTextBlob, GrAtlasTextBlob::Key> fCache;
+ GrMemoryPool fPool;
+ PFOverBudgetCB fCallback;
+ void* fData;
+ size_t fBudget;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/text/GrTextUtils.cpp b/gfx/skia/skia/src/gpu/text/GrTextUtils.cpp
new file mode 100644
index 000000000..a5685f038
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrTextUtils.cpp
@@ -0,0 +1,570 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrTextUtils.h"
+
+#include "GrAtlasTextBlob.h"
+#include "GrBatchFontCache.h"
+#include "GrBlurUtils.h"
+#include "GrCaps.h"
+#include "GrContext.h"
+#include "GrDrawContext.h"
+
+#include "SkDistanceFieldGen.h"
+#include "SkDrawProcs.h"
+#include "SkFindAndPlaceGlyph.h"
+#include "SkGlyphCache.h"
+#include "SkPaint.h"
+#include "SkRect.h"
+#include "SkTextMapStateProc.h"
+#include "SkTextToPathIter.h"
+
+namespace {
+static const int kMinDFFontSize = 18;
+static const int kSmallDFFontSize = 32;
+static const int kSmallDFFontLimit = 32;
+static const int kMediumDFFontSize = 72;
+static const int kMediumDFFontLimit = 72;
+static const int kLargeDFFontSize = 162;
+#ifdef SK_BUILD_FOR_ANDROID
+static const int kLargeDFFontLimit = 384;
+#else
+static const int kLargeDFFontLimit = 2 * kLargeDFFontSize;
+#endif
+};
+
+void GrTextUtils::DrawBmpText(GrAtlasTextBlob* blob, int runIndex,
+ GrBatchFontCache* fontCache,
+ const SkSurfaceProps& props, const SkPaint& skPaint,
+ GrColor color, uint32_t scalerContextFlags,
+ const SkMatrix& viewMatrix,
+ const char text[], size_t byteLength,
+ SkScalar x, SkScalar y) {
+ SkASSERT(byteLength == 0 || text != nullptr);
+
+ // nothing to draw
+ if (text == nullptr || byteLength == 0) {
+ return;
+ }
+
+ // Ensure the blob is set for bitmaptext
+ blob->setHasBitmap();
+
+ GrBatchTextStrike* currStrike = nullptr;
+
+ SkGlyphCache* cache = blob->setupCache(runIndex, props, scalerContextFlags, skPaint,
+ &viewMatrix);
+ SkFindAndPlaceGlyph::ProcessText(
+ skPaint.getTextEncoding(), text, byteLength,
+ {x, y}, viewMatrix, skPaint.getTextAlign(),
+ cache,
+ [&](const SkGlyph& glyph, SkPoint position, SkPoint rounding) {
+ position += rounding;
+ BmpAppendGlyph(
+ blob, runIndex, fontCache, &currStrike, glyph,
+ SkScalarFloorToInt(position.fX), SkScalarFloorToInt(position.fY),
+ color, cache);
+ }
+ );
+
+ SkGlyphCache::AttachCache(cache);
+}
+
+void GrTextUtils::DrawBmpPosText(GrAtlasTextBlob* blob, int runIndex,
+ GrBatchFontCache* fontCache,
+ const SkSurfaceProps& props, const SkPaint& skPaint,
+ GrColor color, uint32_t scalerContextFlags,
+ const SkMatrix& viewMatrix,
+ const char text[], size_t byteLength,
+ const SkScalar pos[], int scalarsPerPosition,
+ const SkPoint& offset) {
+ SkASSERT(byteLength == 0 || text != nullptr);
+ SkASSERT(1 == scalarsPerPosition || 2 == scalarsPerPosition);
+
+ // nothing to draw
+ if (text == nullptr || byteLength == 0) {
+ return;
+ }
+
+ // Ensure the blob is set for bitmaptext
+ blob->setHasBitmap();
+
+ GrBatchTextStrike* currStrike = nullptr;
+
+ SkGlyphCache* cache = blob->setupCache(runIndex, props, scalerContextFlags, skPaint,
+ &viewMatrix);
+
+ SkFindAndPlaceGlyph::ProcessPosText(
+ skPaint.getTextEncoding(), text, byteLength,
+ offset, viewMatrix, pos, scalarsPerPosition,
+ skPaint.getTextAlign(), cache,
+ [&](const SkGlyph& glyph, SkPoint position, SkPoint rounding) {
+ position += rounding;
+ BmpAppendGlyph(
+ blob, runIndex, fontCache, &currStrike, glyph,
+ SkScalarFloorToInt(position.fX), SkScalarFloorToInt(position.fY),
+ color, cache);
+ }
+ );
+
+ SkGlyphCache::AttachCache(cache);
+}
+
+void GrTextUtils::BmpAppendGlyph(GrAtlasTextBlob* blob, int runIndex,
+ GrBatchFontCache* fontCache,
+ GrBatchTextStrike** strike, const SkGlyph& skGlyph,
+ int vx, int vy, GrColor color, SkGlyphCache* cache) {
+ if (!*strike) {
+ *strike = fontCache->getStrike(cache);
+ }
+
+ GrGlyph::PackedID id = GrGlyph::Pack(skGlyph.getGlyphID(),
+ skGlyph.getSubXFixed(),
+ skGlyph.getSubYFixed(),
+ GrGlyph::kCoverage_MaskStyle);
+ GrGlyph* glyph = (*strike)->getGlyph(skGlyph, id, cache);
+ if (!glyph) {
+ return;
+ }
+
+ int x = vx + glyph->fBounds.fLeft;
+ int y = vy + glyph->fBounds.fTop;
+
+ // keep them as ints until we've done the clip-test
+ int width = glyph->fBounds.width();
+ int height = glyph->fBounds.height();
+
+ SkRect r;
+ r.fLeft = SkIntToScalar(x);
+ r.fTop = SkIntToScalar(y);
+ r.fRight = r.fLeft + SkIntToScalar(width);
+ r.fBottom = r.fTop + SkIntToScalar(height);
+
+ blob->appendGlyph(runIndex, r, color, *strike, glyph, cache, skGlyph,
+ SkIntToScalar(vx), SkIntToScalar(vy), 1.0f, false);
+}
+
+bool GrTextUtils::CanDrawAsDistanceFields(const SkPaint& skPaint, const SkMatrix& viewMatrix,
+ const SkSurfaceProps& props, const GrShaderCaps& caps) {
+ // TODO: support perspective (need getMaxScale replacement)
+ if (viewMatrix.hasPerspective()) {
+ return false;
+ }
+
+ SkScalar maxScale = viewMatrix.getMaxScale();
+ SkScalar scaledTextSize = maxScale*skPaint.getTextSize();
+ // Hinted text looks far better at small resolutions
+ // Scaling up beyond 2x yields undesireable artifacts
+ if (scaledTextSize < kMinDFFontSize ||
+ scaledTextSize > kLargeDFFontLimit) {
+ return false;
+ }
+
+ bool useDFT = props.isUseDeviceIndependentFonts();
+#if SK_FORCE_DISTANCE_FIELD_TEXT
+ useDFT = true;
+#endif
+
+ if (!useDFT && scaledTextSize < kLargeDFFontSize) {
+ return false;
+ }
+
+ // rasterizers and mask filters modify alpha, which doesn't
+ // translate well to distance
+ if (skPaint.getRasterizer() || skPaint.getMaskFilter() || !caps.shaderDerivativeSupport()) {
+ return false;
+ }
+
+ // TODO: add some stroking support
+ if (skPaint.getStyle() != SkPaint::kFill_Style) {
+ return false;
+ }
+
+ return true;
+}
+
+void GrTextUtils::InitDistanceFieldPaint(GrAtlasTextBlob* blob,
+ SkPaint* skPaint,
+ SkScalar* textRatio,
+ const SkMatrix& viewMatrix) {
+ // getMaxScale doesn't support perspective, so neither do we at the moment
+ SkASSERT(!viewMatrix.hasPerspective());
+ SkScalar maxScale = viewMatrix.getMaxScale();
+ SkScalar textSize = skPaint->getTextSize();
+ SkScalar scaledTextSize = textSize;
+ // if we have non-unity scale, we need to choose our base text size
+ // based on the SkPaint's text size multiplied by the max scale factor
+ // TODO: do we need to do this if we're scaling down (i.e. maxScale < 1)?
+ if (maxScale > 0 && !SkScalarNearlyEqual(maxScale, SK_Scalar1)) {
+ scaledTextSize *= maxScale;
+ }
+
+ // We have three sizes of distance field text, and within each size 'bucket' there is a floor
+ // and ceiling. A scale outside of this range would require regenerating the distance fields
+ SkScalar dfMaskScaleFloor;
+ SkScalar dfMaskScaleCeil;
+ if (scaledTextSize <= kSmallDFFontLimit) {
+ dfMaskScaleFloor = kMinDFFontSize;
+ dfMaskScaleCeil = kSmallDFFontLimit;
+ *textRatio = textSize / kSmallDFFontSize;
+ skPaint->setTextSize(SkIntToScalar(kSmallDFFontSize));
+ } else if (scaledTextSize <= kMediumDFFontLimit) {
+ dfMaskScaleFloor = kSmallDFFontLimit;
+ dfMaskScaleCeil = kMediumDFFontLimit;
+ *textRatio = textSize / kMediumDFFontSize;
+ skPaint->setTextSize(SkIntToScalar(kMediumDFFontSize));
+ } else {
+ dfMaskScaleFloor = kMediumDFFontLimit;
+ dfMaskScaleCeil = kLargeDFFontLimit;
+ *textRatio = textSize / kLargeDFFontSize;
+ skPaint->setTextSize(SkIntToScalar(kLargeDFFontSize));
+ }
+
+ // Because there can be multiple runs in the blob, we want the overall maxMinScale, and
+ // minMaxScale to make regeneration decisions. Specifically, we want the maximum minimum scale
+ // we can tolerate before we'd drop to a lower mip size, and the minimum maximum scale we can
+ // tolerate before we'd have to move to a large mip size. When we actually test these values
+ // we look at the delta in scale between the new viewmatrix and the old viewmatrix, and test
+ // against these values to decide if we can reuse or not(ie, will a given scale change our mip
+ // level)
+ SkASSERT(dfMaskScaleFloor <= scaledTextSize && scaledTextSize <= dfMaskScaleCeil);
+ blob->setMinAndMaxScale(dfMaskScaleFloor / scaledTextSize, dfMaskScaleCeil / scaledTextSize);
+
+ skPaint->setLCDRenderText(false);
+ skPaint->setAutohinted(false);
+ skPaint->setHinting(SkPaint::kNormal_Hinting);
+ skPaint->setSubpixelText(true);
+}
+
+void GrTextUtils::DrawDFText(GrAtlasTextBlob* blob, int runIndex,
+ GrBatchFontCache* fontCache, const SkSurfaceProps& props,
+ const SkPaint& skPaint, GrColor color, uint32_t scalerContextFlags,
+ const SkMatrix& viewMatrix,
+ const char text[], size_t byteLength,
+ SkScalar x, SkScalar y) {
+ SkASSERT(byteLength == 0 || text != nullptr);
+
+ // nothing to draw
+ if (text == nullptr || byteLength == 0) {
+ return;
+ }
+
+ SkPaint::GlyphCacheProc glyphCacheProc = SkPaint::GetGlyphCacheProc(skPaint.getTextEncoding(),
+ skPaint.isDevKernText(),
+ true);
+ SkAutoDescriptor desc;
+ SkScalerContextEffects effects;
+ // We apply the fake-gamma by altering the distance in the shader, so we ignore the
+ // passed-in scaler context flags. (It's only used when we fall-back to bitmap text).
+ skPaint.getScalerContextDescriptor(&effects, &desc, props, SkPaint::kNone_ScalerContextFlags,
+ nullptr);
+ SkGlyphCache* origPaintCache = SkGlyphCache::DetachCache(skPaint.getTypeface(), effects,
+ desc.getDesc());
+
+ SkTArray<SkScalar> positions;
+
+ const char* textPtr = text;
+ SkScalar stopX = 0;
+ SkScalar stopY = 0;
+ SkScalar origin = 0;
+ switch (skPaint.getTextAlign()) {
+ case SkPaint::kRight_Align: origin = SK_Scalar1; break;
+ case SkPaint::kCenter_Align: origin = SK_ScalarHalf; break;
+ case SkPaint::kLeft_Align: origin = 0; break;
+ }
+
+ SkAutoKern autokern;
+ const char* stop = text + byteLength;
+ while (textPtr < stop) {
+ // don't need x, y here, since all subpixel variants will have the
+ // same advance
+ const SkGlyph& glyph = glyphCacheProc(origPaintCache, &textPtr);
+
+ SkScalar width = SkFloatToScalar(glyph.fAdvanceX) + autokern.adjust(glyph);
+ positions.push_back(stopX + origin * width);
+
+ SkScalar height = SkFloatToScalar(glyph.fAdvanceY);
+ positions.push_back(stopY + origin * height);
+
+ stopX += width;
+ stopY += height;
+ }
+ SkASSERT(textPtr == stop);
+
+ SkGlyphCache::AttachCache(origPaintCache);
+
+ // now adjust starting point depending on alignment
+ SkScalar alignX = stopX;
+ SkScalar alignY = stopY;
+ if (skPaint.getTextAlign() == SkPaint::kCenter_Align) {
+ alignX = SkScalarHalf(alignX);
+ alignY = SkScalarHalf(alignY);
+ } else if (skPaint.getTextAlign() == SkPaint::kLeft_Align) {
+ alignX = 0;
+ alignY = 0;
+ }
+ x -= alignX;
+ y -= alignY;
+ SkPoint offset = SkPoint::Make(x, y);
+
+ DrawDFPosText(blob, runIndex, fontCache, props, skPaint, color, scalerContextFlags, viewMatrix,
+ text, byteLength, positions.begin(), 2, offset);
+}
+
+void GrTextUtils::DrawDFPosText(GrAtlasTextBlob* blob, int runIndex,
+ GrBatchFontCache* fontCache, const SkSurfaceProps& props,
+ const SkPaint& origPaint,
+ GrColor color, uint32_t scalerContextFlags,
+ const SkMatrix& viewMatrix,
+ const char text[], size_t byteLength,
+ const SkScalar pos[], int scalarsPerPosition,
+ const SkPoint& offset) {
+ SkASSERT(byteLength == 0 || text != nullptr);
+ SkASSERT(1 == scalarsPerPosition || 2 == scalarsPerPosition);
+
+ // nothing to draw
+ if (text == nullptr || byteLength == 0) {
+ return;
+ }
+
+ SkTDArray<char> fallbackTxt;
+ SkTDArray<SkScalar> fallbackPos;
+
+ // Setup distance field paint and text ratio
+ SkScalar textRatio;
+ SkPaint dfPaint(origPaint);
+ GrTextUtils::InitDistanceFieldPaint(blob, &dfPaint, &textRatio, viewMatrix);
+ blob->setHasDistanceField();
+ blob->setSubRunHasDistanceFields(runIndex, origPaint.isLCDRenderText());
+
+ GrBatchTextStrike* currStrike = nullptr;
+
+ // We apply the fake-gamma by altering the distance in the shader, so we ignore the
+ // passed-in scaler context flags. (It's only used when we fall-back to bitmap text).
+ SkGlyphCache* cache = blob->setupCache(runIndex, props, SkPaint::kNone_ScalerContextFlags,
+ dfPaint, nullptr);
+ SkPaint::GlyphCacheProc glyphCacheProc = SkPaint::GetGlyphCacheProc(dfPaint.getTextEncoding(),
+ dfPaint.isDevKernText(),
+ true);
+
+ const char* stop = text + byteLength;
+
+ if (SkPaint::kLeft_Align == dfPaint.getTextAlign()) {
+ while (text < stop) {
+ const char* lastText = text;
+ // the last 2 parameters are ignored
+ const SkGlyph& glyph = glyphCacheProc(cache, &text);
+
+ if (glyph.fWidth) {
+ SkScalar x = offset.x() + pos[0];
+ SkScalar y = offset.y() + (2 == scalarsPerPosition ? pos[1] : 0);
+
+ if (!DfAppendGlyph(blob,
+ runIndex,
+ fontCache,
+ &currStrike,
+ glyph,
+ x, y, color, cache,
+ textRatio, viewMatrix)) {
+ // couldn't append, send to fallback
+ fallbackTxt.append(SkToInt(text-lastText), lastText);
+ *fallbackPos.append() = pos[0];
+ if (2 == scalarsPerPosition) {
+ *fallbackPos.append() = pos[1];
+ }
+ }
+ }
+ pos += scalarsPerPosition;
+ }
+ } else {
+ SkScalar alignMul = SkPaint::kCenter_Align == dfPaint.getTextAlign() ? SK_ScalarHalf
+ : SK_Scalar1;
+ while (text < stop) {
+ const char* lastText = text;
+ // the last 2 parameters are ignored
+ const SkGlyph& glyph = glyphCacheProc(cache, &text);
+
+ if (glyph.fWidth) {
+ SkScalar x = offset.x() + pos[0];
+ SkScalar y = offset.y() + (2 == scalarsPerPosition ? pos[1] : 0);
+
+ SkScalar advanceX = SkFloatToScalar(glyph.fAdvanceX) * alignMul * textRatio;
+ SkScalar advanceY = SkFloatToScalar(glyph.fAdvanceY) * alignMul * textRatio;
+
+ if (!DfAppendGlyph(blob,
+ runIndex,
+ fontCache,
+ &currStrike,
+ glyph,
+ x - advanceX, y - advanceY, color,
+ cache,
+ textRatio,
+ viewMatrix)) {
+ // couldn't append, send to fallback
+ fallbackTxt.append(SkToInt(text-lastText), lastText);
+ *fallbackPos.append() = pos[0];
+ if (2 == scalarsPerPosition) {
+ *fallbackPos.append() = pos[1];
+ }
+ }
+ }
+ pos += scalarsPerPosition;
+ }
+ }
+
+ SkGlyphCache::AttachCache(cache);
+ if (fallbackTxt.count()) {
+ blob->initOverride(runIndex);
+ GrTextUtils::DrawBmpPosText(blob, runIndex, fontCache, props,
+ origPaint, origPaint.getColor(), scalerContextFlags, viewMatrix,
+ fallbackTxt.begin(), fallbackTxt.count(),
+ fallbackPos.begin(), scalarsPerPosition, offset);
+ }
+}
+
+bool GrTextUtils::DfAppendGlyph(GrAtlasTextBlob* blob, int runIndex, GrBatchFontCache* cache,
+ GrBatchTextStrike** strike, const SkGlyph& skGlyph,
+ SkScalar sx, SkScalar sy, GrColor color,
+ SkGlyphCache* glyphCache,
+ SkScalar textRatio, const SkMatrix& viewMatrix) {
+ if (!*strike) {
+ *strike = cache->getStrike(glyphCache);
+ }
+
+ GrGlyph::PackedID id = GrGlyph::Pack(skGlyph.getGlyphID(),
+ skGlyph.getSubXFixed(),
+ skGlyph.getSubYFixed(),
+ GrGlyph::kDistance_MaskStyle);
+ GrGlyph* glyph = (*strike)->getGlyph(skGlyph, id, glyphCache);
+ if (!glyph) {
+ return true;
+ }
+
+ // fallback to color glyph support
+ if (kA8_GrMaskFormat != glyph->fMaskFormat) {
+ return false;
+ }
+
+ SkScalar dx = SkIntToScalar(glyph->fBounds.fLeft + SK_DistanceFieldInset);
+ SkScalar dy = SkIntToScalar(glyph->fBounds.fTop + SK_DistanceFieldInset);
+ SkScalar width = SkIntToScalar(glyph->fBounds.width() - 2 * SK_DistanceFieldInset);
+ SkScalar height = SkIntToScalar(glyph->fBounds.height() - 2 * SK_DistanceFieldInset);
+
+ SkScalar scale = textRatio;
+ dx *= scale;
+ dy *= scale;
+ width *= scale;
+ height *= scale;
+ sx += dx;
+ sy += dy;
+ SkRect glyphRect = SkRect::MakeXYWH(sx, sy, width, height);
+
+ blob->appendGlyph(runIndex, glyphRect, color, *strike, glyph, glyphCache, skGlyph,
+ sx - dx, sy - dy, scale, true);
+ return true;
+}
+
+void GrTextUtils::DrawTextAsPath(GrContext* context, GrDrawContext* dc,
+ const GrClip& clip,
+ const SkPaint& skPaint, const SkMatrix& viewMatrix,
+ const char text[], size_t byteLength, SkScalar x, SkScalar y,
+ const SkIRect& clipBounds) {
+ SkTextToPathIter iter(text, byteLength, skPaint, true);
+
+ SkMatrix matrix;
+ matrix.setScale(iter.getPathScale(), iter.getPathScale());
+ matrix.postTranslate(x, y);
+
+ const SkPath* iterPath;
+ SkScalar xpos, prevXPos = 0;
+
+ while (iter.next(&iterPath, &xpos)) {
+ matrix.postTranslate(xpos - prevXPos, 0);
+ if (iterPath) {
+ const SkPaint& pnt = iter.getPaint();
+ GrBlurUtils::drawPathWithMaskFilter(context, dc, clip, *iterPath,
+ pnt, viewMatrix, &matrix, clipBounds, false);
+ }
+ prevXPos = xpos;
+ }
+}
+
+void GrTextUtils::DrawPosTextAsPath(GrContext* context,
+ GrDrawContext* dc,
+ const SkSurfaceProps& props,
+ const GrClip& clip,
+ const SkPaint& origPaint, const SkMatrix& viewMatrix,
+ const char text[], size_t byteLength,
+ const SkScalar pos[], int scalarsPerPosition,
+ const SkPoint& offset, const SkIRect& clipBounds) {
+ // setup our std paint, in hopes of getting hits in the cache
+ SkPaint paint(origPaint);
+ SkScalar matrixScale = paint.setupForAsPaths();
+
+ SkMatrix matrix;
+ matrix.setScale(matrixScale, matrixScale);
+
+ // Temporarily jam in kFill, so we only ever ask for the raw outline from the cache.
+ paint.setStyle(SkPaint::kFill_Style);
+ paint.setPathEffect(nullptr);
+
+ SkPaint::GlyphCacheProc glyphCacheProc = SkPaint::GetGlyphCacheProc(paint.getTextEncoding(),
+ paint.isDevKernText(),
+ true);
+ SkAutoGlyphCache autoCache(paint, &props, nullptr);
+ SkGlyphCache* cache = autoCache.getCache();
+
+ const char* stop = text + byteLength;
+ SkTextAlignProc alignProc(paint.getTextAlign());
+ SkTextMapStateProc tmsProc(SkMatrix::I(), offset, scalarsPerPosition);
+
+ // Now restore the original settings, so we "draw" with whatever style/stroking.
+ paint.setStyle(origPaint.getStyle());
+ paint.setPathEffect(sk_ref_sp(origPaint.getPathEffect()));
+
+ while (text < stop) {
+ const SkGlyph& glyph = glyphCacheProc(cache, &text);
+ if (glyph.fWidth) {
+ const SkPath* path = cache->findPath(glyph);
+ if (path) {
+ SkPoint tmsLoc;
+ tmsProc(pos, &tmsLoc);
+ SkPoint loc;
+ alignProc(tmsLoc, glyph, &loc);
+
+ matrix[SkMatrix::kMTransX] = loc.fX;
+ matrix[SkMatrix::kMTransY] = loc.fY;
+ GrBlurUtils::drawPathWithMaskFilter(context, dc, clip, *path, paint,
+ viewMatrix, &matrix, clipBounds, false);
+ }
+ }
+ pos += scalarsPerPosition;
+ }
+}
+
+bool GrTextUtils::ShouldDisableLCD(const SkPaint& paint) {
+ return paint.getMaskFilter() ||
+ paint.getRasterizer() ||
+ paint.getPathEffect() ||
+ paint.isFakeBoldText() ||
+ paint.getStyle() != SkPaint::kFill_Style;
+}
+
+uint32_t GrTextUtils::FilterTextFlags(const SkSurfaceProps& surfaceProps, const SkPaint& paint) {
+ uint32_t flags = paint.getFlags();
+
+ if (!paint.isLCDRenderText() || !paint.isAntiAlias()) {
+ return flags;
+ }
+
+ if (kUnknown_SkPixelGeometry == surfaceProps.pixelGeometry() || ShouldDisableLCD(paint)) {
+ flags &= ~SkPaint::kLCDRenderText_Flag;
+ flags |= SkPaint::kGenA8FromLCD_Flag;
+ }
+
+ return flags;
+}
diff --git a/gfx/skia/skia/src/gpu/text/GrTextUtils.h b/gfx/skia/skia/src/gpu/text/GrTextUtils.h
new file mode 100644
index 000000000..c218ab7da
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/text/GrTextUtils.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTextUtils_DEFINED
+#define GrTextUtils_DEFINED
+
+#include "GrColor.h"
+#include "SkPaint.h"
+#include "SkScalar.h"
+
+class GrAtlasTextBlob;
+class GrBatchFontCache;
+class GrBatchTextStrike;
+class GrClip;
+class GrContext;
+class GrDrawContext;
+class GrShaderCaps;
+class SkGlyph;
+class SkMatrix;
+struct SkIRect;
+struct SkPoint;
+class SkGlyphCache;
+class SkSurfaceProps;
+
+/*
+ * A class to house a bunch of common text utilities. This class should *ONLY* have static
+ * functions. It is not a namespace only because we wish to friend SkPaint
+ *
+ */
+class GrTextUtils {
+public:
+ // Functions for appending BMP text to GrAtlasTextBlob
+ static void DrawBmpText(GrAtlasTextBlob*, int runIndex,
+ GrBatchFontCache*, const SkSurfaceProps&,
+ const SkPaint&,
+ GrColor, uint32_t scalerContextFlags, const SkMatrix& viewMatrix,
+ const char text[], size_t byteLength,
+ SkScalar x, SkScalar y);
+
+ static void DrawBmpPosText(GrAtlasTextBlob*, int runIndex,
+ GrBatchFontCache*, const SkSurfaceProps&, const SkPaint&,
+ GrColor, uint32_t scalerContextFlags, const SkMatrix& viewMatrix,
+ const char text[], size_t byteLength,
+ const SkScalar pos[], int scalarsPerPosition,
+ const SkPoint& offset);
+
+ // functions for appending distance field text
+ static bool CanDrawAsDistanceFields(const SkPaint& skPaint, const SkMatrix& viewMatrix,
+ const SkSurfaceProps& props, const GrShaderCaps& caps);
+
+ static void DrawDFText(GrAtlasTextBlob* blob, int runIndex,
+ GrBatchFontCache*, const SkSurfaceProps&,
+ const SkPaint& skPaint, GrColor color, uint32_t scalerContextFlags,
+ const SkMatrix& viewMatrix,
+ const char text[], size_t byteLength,
+ SkScalar x, SkScalar y);
+
+ static void DrawDFPosText(GrAtlasTextBlob* blob, int runIndex,
+ GrBatchFontCache*, const SkSurfaceProps&, const SkPaint&,
+ GrColor color, uint32_t scalerContextFlags,
+ const SkMatrix& viewMatrix,
+ const char text[], size_t byteLength,
+ const SkScalar pos[], int scalarsPerPosition,
+ const SkPoint& offset);
+
+ // Functions for drawing text as paths
+ static void DrawTextAsPath(GrContext*, GrDrawContext*, const GrClip& clip,
+ const SkPaint& origPaint, const SkMatrix& viewMatrix,
+ const char text[], size_t byteLength, SkScalar x, SkScalar y,
+ const SkIRect& clipBounds);
+
+ static void DrawPosTextAsPath(GrContext* context,
+ GrDrawContext* dc,
+ const SkSurfaceProps& props,
+ const GrClip& clip,
+ const SkPaint& origPaint, const SkMatrix& viewMatrix,
+ const char text[], size_t byteLength,
+ const SkScalar pos[], int scalarsPerPosition,
+ const SkPoint& offset, const SkIRect& clipBounds);
+
+ static bool ShouldDisableLCD(const SkPaint& paint);
+
+ static uint32_t FilterTextFlags(const SkSurfaceProps& surfaceProps, const SkPaint& paint);
+
+private:
+ static void InitDistanceFieldPaint(GrAtlasTextBlob* blob,
+ SkPaint* skPaint,
+ SkScalar* textRatio,
+ const SkMatrix& viewMatrix);
+
+ static void BmpAppendGlyph(GrAtlasTextBlob*, int runIndex, GrBatchFontCache*,
+ GrBatchTextStrike**, const SkGlyph&, int left, int top,
+ GrColor color, SkGlyphCache*);
+
+ static bool DfAppendGlyph(GrAtlasTextBlob*, int runIndex, GrBatchFontCache*,
+ GrBatchTextStrike**, const SkGlyph&,
+ SkScalar sx, SkScalar sy, GrColor color,
+ SkGlyphCache* cache,
+ SkScalar textRatio, const SkMatrix& viewMatrix);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkBackendContext.cpp b/gfx/skia/skia/src/gpu/vk/GrVkBackendContext.cpp
new file mode 100644
index 000000000..a2165b410
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkBackendContext.cpp
@@ -0,0 +1,275 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "vk/GrVkBackendContext.h"
+#include "vk/GrVkExtensions.h"
+#include "vk/GrVkInterface.h"
+#include "vk/GrVkUtil.h"
+
+////////////////////////////////////////////////////////////////////////////////
+// Helper code to set up Vulkan context objects
+
+#ifdef SK_ENABLE_VK_LAYERS
+const char* kDebugLayerNames[] = {
+ // elements of VK_LAYER_LUNARG_standard_validation
+ "VK_LAYER_GOOGLE_threading",
+ "VK_LAYER_LUNARG_parameter_validation",
+ "VK_LAYER_LUNARG_device_limits",
+ "VK_LAYER_LUNARG_object_tracker",
+ "VK_LAYER_LUNARG_image",
+ "VK_LAYER_LUNARG_core_validation",
+ "VK_LAYER_LUNARG_swapchain",
+ "VK_LAYER_GOOGLE_unique_objects",
+ // not included in standard_validation
+ //"VK_LAYER_LUNARG_api_dump",
+ //"VK_LAYER_LUNARG_vktrace",
+ //"VK_LAYER_LUNARG_screenshot",
+};
+#endif
+
+// the minimum version of Vulkan supported
+#ifdef SK_BUILD_FOR_ANDROID
+const uint32_t kGrVkMinimumVersion = VK_MAKE_VERSION(1, 0, 3);
+#else
+const uint32_t kGrVkMinimumVersion = VK_MAKE_VERSION(1, 0, 8);
+#endif
+
+// Create the base Vulkan objects needed by the GrVkGpu object
+const GrVkBackendContext* GrVkBackendContext::Create(uint32_t* presentQueueIndexPtr,
+ CanPresentFn canPresent) {
+ VkPhysicalDevice physDev;
+ VkDevice device;
+ VkInstance inst;
+ VkResult err;
+
+ const VkApplicationInfo app_info = {
+ VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
+ nullptr, // pNext
+ "vktest", // pApplicationName
+ 0, // applicationVersion
+ "vktest", // pEngineName
+ 0, // engineVerison
+ kGrVkMinimumVersion, // apiVersion
+ };
+
+ GrVkExtensions extensions;
+ extensions.initInstance(kGrVkMinimumVersion);
+
+ SkTArray<const char*> instanceLayerNames;
+ SkTArray<const char*> instanceExtensionNames;
+ uint32_t extensionFlags = 0;
+#ifdef SK_ENABLE_VK_LAYERS
+ for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
+ if (extensions.hasInstanceLayer(kDebugLayerNames[i])) {
+ instanceLayerNames.push_back(kDebugLayerNames[i]);
+ }
+ }
+ if (extensions.hasInstanceExtension(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
+ instanceExtensionNames.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME);
+ extensionFlags |= kEXT_debug_report_GrVkExtensionFlag;
+ }
+#endif
+
+ if (extensions.hasInstanceExtension(VK_KHR_SURFACE_EXTENSION_NAME)) {
+ instanceExtensionNames.push_back(VK_KHR_SURFACE_EXTENSION_NAME);
+ extensionFlags |= kKHR_surface_GrVkExtensionFlag;
+ }
+ if (extensions.hasInstanceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
+ instanceExtensionNames.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
+ extensionFlags |= kKHR_swapchain_GrVkExtensionFlag;
+ }
+#ifdef SK_BUILD_FOR_WIN
+ if (extensions.hasInstanceExtension(VK_KHR_WIN32_SURFACE_EXTENSION_NAME)) {
+ instanceExtensionNames.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME);
+ extensionFlags |= kKHR_win32_surface_GrVkExtensionFlag;
+ }
+#elif defined(SK_BUILD_FOR_ANDROID)
+ if (extensions.hasInstanceExtension(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
+ instanceExtensionNames.push_back(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME);
+ extensionFlags |= kKHR_android_surface_GrVkExtensionFlag;
+ }
+#elif defined(SK_BUILD_FOR_UNIX)
+ if (extensions.hasInstanceExtension(VK_KHR_XCB_SURFACE_EXTENSION_NAME)) {
+ instanceExtensionNames.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME);
+ extensionFlags |= kKHR_xcb_surface_GrVkExtensionFlag;
+ }
+#endif
+
+ const VkInstanceCreateInfo instance_create = {
+ VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // flags
+ &app_info, // pApplicationInfo
+ (uint32_t) instanceLayerNames.count(), // enabledLayerNameCount
+ instanceLayerNames.begin(), // ppEnabledLayerNames
+ (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount
+ instanceExtensionNames.begin(), // ppEnabledExtensionNames
+ };
+
+ err = vkCreateInstance(&instance_create, nullptr, &inst);
+ if (err < 0) {
+ SkDebugf("vkCreateInstance failed: %d\n", err);
+ return nullptr;
+ }
+
+ uint32_t gpuCount;
+ err = vkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
+ if (err) {
+ SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
+ vkDestroyInstance(inst, nullptr);
+ return nullptr;
+ }
+ SkASSERT(gpuCount > 0);
+ // Just returning the first physical device instead of getting the whole array.
+ // TODO: find best match for our needs
+ gpuCount = 1;
+ err = vkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
+ if (err) {
+ SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
+ vkDestroyInstance(inst, nullptr);
+ return nullptr;
+ }
+
+ // query to get the initial queue props size
+ uint32_t queueCount;
+ vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
+ SkASSERT(queueCount >= 1);
+
+ SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
+ // now get the actual queue props
+ VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
+
+ vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
+
+ // iterate to find the graphics queue
+ uint32_t graphicsQueueIndex = queueCount;
+ for (uint32_t i = 0; i < queueCount; i++) {
+ if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
+ graphicsQueueIndex = i;
+ break;
+ }
+ }
+ SkASSERT(graphicsQueueIndex < queueCount);
+
+ // iterate to find the present queue, if needed
+ uint32_t presentQueueIndex = graphicsQueueIndex;
+ if (presentQueueIndexPtr && canPresent) {
+ for (uint32_t i = 0; i < queueCount; i++) {
+ if (canPresent(inst, physDev, i)) {
+ presentQueueIndex = i;
+ break;
+ }
+ }
+ SkASSERT(presentQueueIndex < queueCount);
+ *presentQueueIndexPtr = presentQueueIndex;
+ }
+
+ extensions.initDevice(kGrVkMinimumVersion, inst, physDev);
+
+ SkTArray<const char*> deviceLayerNames;
+ SkTArray<const char*> deviceExtensionNames;
+#ifdef SK_ENABLE_VK_LAYERS
+ for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
+ if (extensions.hasDeviceLayer(kDebugLayerNames[i])) {
+ deviceLayerNames.push_back(kDebugLayerNames[i]);
+ }
+ }
+#endif
+ if (extensions.hasDeviceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
+ deviceExtensionNames.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
+ extensionFlags |= kKHR_swapchain_GrVkExtensionFlag;
+ }
+ if (extensions.hasDeviceExtension("VK_NV_glsl_shader")) {
+ deviceExtensionNames.push_back("VK_NV_glsl_shader");
+ extensionFlags |= kNV_glsl_shader_GrVkExtensionFlag;
+ }
+
+ // query to get the physical device properties
+ VkPhysicalDeviceFeatures deviceFeatures;
+ vkGetPhysicalDeviceFeatures(physDev, &deviceFeatures);
+ // this looks like it would slow things down,
+ // and we can't depend on it on all platforms
+ deviceFeatures.robustBufferAccess = VK_FALSE;
+
+ uint32_t featureFlags = 0;
+ if (deviceFeatures.geometryShader) {
+ featureFlags |= kGeometryShader_GrVkFeatureFlag;
+ }
+ if (deviceFeatures.dualSrcBlend) {
+ featureFlags |= kDualSrcBlend_GrVkFeatureFlag;
+ }
+ if (deviceFeatures.sampleRateShading) {
+ featureFlags |= kSampleRateShading_GrVkFeatureFlag;
+ }
+
+ float queuePriorities[1] = { 0.0 };
+ // Here we assume no need for swapchain queue
+ // If one is needed, the client will need its own setup code
+ const VkDeviceQueueCreateInfo queueInfo[2] = {
+ {
+ VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // VkDeviceQueueCreateFlags
+ graphicsQueueIndex, // queueFamilyIndex
+ 1, // queueCount
+ queuePriorities, // pQueuePriorities
+ },
+ {
+ VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // VkDeviceQueueCreateFlags
+ presentQueueIndex, // queueFamilyIndex
+ 1, // queueCount
+ queuePriorities, // pQueuePriorities
+ }
+ };
+ uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
+
+ const VkDeviceCreateInfo deviceInfo = {
+ VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // VkDeviceCreateFlags
+ queueInfoCount, // queueCreateInfoCount
+ queueInfo, // pQueueCreateInfos
+ (uint32_t) deviceLayerNames.count(), // layerCount
+ deviceLayerNames.begin(), // ppEnabledLayerNames
+ (uint32_t) deviceExtensionNames.count(), // extensionCount
+ deviceExtensionNames.begin(), // ppEnabledExtensionNames
+ &deviceFeatures // ppEnabledFeatures
+ };
+
+ err = vkCreateDevice(physDev, &deviceInfo, nullptr, &device);
+ if (err) {
+ SkDebugf("CreateDevice failed: %d\n", err);
+ vkDestroyInstance(inst, nullptr);
+ return nullptr;
+ }
+
+ VkQueue queue;
+ vkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
+
+ GrVkBackendContext* ctx = new GrVkBackendContext();
+ ctx->fInstance = inst;
+ ctx->fPhysicalDevice = physDev;
+ ctx->fDevice = device;
+ ctx->fQueue = queue;
+ ctx->fGraphicsQueueIndex = graphicsQueueIndex;
+ ctx->fMinAPIVersion = kGrVkMinimumVersion;
+ ctx->fExtensions = extensionFlags;
+ ctx->fFeatures = featureFlags;
+ ctx->fInterface.reset(GrVkCreateInterface(inst, device, extensionFlags));
+
+ return ctx;
+}
+
+GrVkBackendContext::~GrVkBackendContext() {
+ vkDeviceWaitIdle(fDevice);
+ vkDestroyDevice(fDevice, nullptr);
+ fDevice = VK_NULL_HANDLE;
+ vkDestroyInstance(fInstance, nullptr);
+ fInstance = VK_NULL_HANDLE;
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkBuffer.cpp b/gfx/skia/skia/src/gpu/vk/GrVkBuffer.cpp
new file mode 100644
index 000000000..82674b4cf
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkBuffer.cpp
@@ -0,0 +1,224 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkBuffer.h"
+#include "GrVkGpu.h"
+#include "GrVkMemory.h"
+#include "GrVkUtil.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+#ifdef SK_DEBUG
+#define VALIDATE() this->validate()
+#else
+#define VALIDATE() do {} while(false)
+#endif
+
+const GrVkBuffer::Resource* GrVkBuffer::Create(const GrVkGpu* gpu, const Desc& desc) {
+ VkBuffer buffer;
+ GrVkAlloc alloc;
+
+ // create the buffer object
+ VkBufferCreateInfo bufInfo;
+ memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
+ bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ bufInfo.flags = 0;
+ bufInfo.size = desc.fSizeInBytes;
+ switch (desc.fType) {
+ case kVertex_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
+ break;
+ case kIndex_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
+ break;
+ case kUniform_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+ break;
+ case kCopyRead_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+ break;
+ case kCopyWrite_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+ break;
+ }
+ if (!desc.fDynamic) {
+ bufInfo.usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+ }
+
+ bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ bufInfo.queueFamilyIndexCount = 0;
+ bufInfo.pQueueFamilyIndices = nullptr;
+
+ VkResult err;
+ err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
+ if (err) {
+ return nullptr;
+ }
+
+ if (!GrVkMemory::AllocAndBindBufferMemory(gpu,
+ buffer,
+ desc.fType,
+ desc.fDynamic,
+ &alloc)) {
+ return nullptr;
+ }
+
+ const GrVkBuffer::Resource* resource = new GrVkBuffer::Resource(buffer, alloc, desc.fType);
+ if (!resource) {
+ VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
+ GrVkMemory::FreeBufferMemory(gpu, desc.fType, alloc);
+ return nullptr;
+ }
+
+ return resource;
+}
+
+void GrVkBuffer::addMemoryBarrier(const GrVkGpu* gpu,
+ VkAccessFlags srcAccessMask,
+ VkAccessFlags dstAccesMask,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion) const {
+ VkBufferMemoryBarrier bufferMemoryBarrier = {
+ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
+ NULL, // pNext
+ srcAccessMask, // srcAccessMask
+ dstAccesMask, // dstAccessMask
+ VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
+ this->buffer(), // buffer
+ 0, // offset
+ fDesc.fSizeInBytes, // size
+ };
+
+ // TODO: restrict to area of buffer we're interested in
+ gpu->addBufferMemoryBarrier(srcStageMask, dstStageMask, byRegion, &bufferMemoryBarrier);
+}
+
+void GrVkBuffer::Resource::freeGPUData(const GrVkGpu* gpu) const {
+ SkASSERT(fBuffer);
+ SkASSERT(fAlloc.fMemory);
+ VK_CALL(gpu, DestroyBuffer(gpu->device(), fBuffer, nullptr));
+ GrVkMemory::FreeBufferMemory(gpu, fType, fAlloc);
+}
+
+void GrVkBuffer::vkRelease(const GrVkGpu* gpu) {
+ VALIDATE();
+ fResource->recycle(const_cast<GrVkGpu*>(gpu));
+ fResource = nullptr;
+ fMapPtr = nullptr;
+ VALIDATE();
+}
+
+void GrVkBuffer::vkAbandon() {
+ fResource->unrefAndAbandon();
+ fResource = nullptr;
+ fMapPtr = nullptr;
+ VALIDATE();
+}
+
+VkAccessFlags buffer_type_to_access_flags(GrVkBuffer::Type type) {
+ switch (type) {
+ case GrVkBuffer::kIndex_Type:
+ return VK_ACCESS_INDEX_READ_BIT;
+ case GrVkBuffer::kVertex_Type:
+ return VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
+ default:
+ // This helper is only called for static buffers so we should only ever see index or
+ // vertex buffers types
+ SkASSERT(false);
+ return 0;
+ }
+}
+
+void GrVkBuffer::internalMap(GrVkGpu* gpu, size_t size, bool* createdNewBuffer) {
+ VALIDATE();
+ SkASSERT(!this->vkIsMapped());
+
+ if (!fResource->unique()) {
+ if (fDesc.fDynamic) {
+ // in use by the command buffer, so we need to create a new one
+ fResource->recycle(gpu);
+ fResource = this->createResource(gpu, fDesc);
+ if (createdNewBuffer) {
+ *createdNewBuffer = true;
+ }
+ } else {
+ SkASSERT(fMapPtr);
+ this->addMemoryBarrier(gpu,
+ buffer_type_to_access_flags(fDesc.fType),
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+ }
+ }
+
+ if (fDesc.fDynamic) {
+ const GrVkAlloc& alloc = this->alloc();
+ VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc.fMemory,
+ alloc.fOffset + fOffset,
+ size, 0, &fMapPtr));
+ if (err) {
+ fMapPtr = nullptr;
+ }
+ } else {
+ if (!fMapPtr) {
+ fMapPtr = new unsigned char[this->size()];
+ }
+ }
+
+ VALIDATE();
+}
+
+void GrVkBuffer::internalUnmap(GrVkGpu* gpu, size_t size) {
+ VALIDATE();
+ SkASSERT(this->vkIsMapped());
+
+ if (fDesc.fDynamic) {
+ GrVkMemory::FlushMappedAlloc(gpu, this->alloc());
+ VK_CALL(gpu, UnmapMemory(gpu->device(), this->alloc().fMemory));
+ fMapPtr = nullptr;
+ } else {
+ gpu->updateBuffer(this, fMapPtr, this->offset(), size);
+ this->addMemoryBarrier(gpu,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ buffer_type_to_access_flags(fDesc.fType),
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
+ false);
+ }
+}
+
+bool GrVkBuffer::vkIsMapped() const {
+ VALIDATE();
+ return SkToBool(fMapPtr);
+}
+
+bool GrVkBuffer::vkUpdateData(GrVkGpu* gpu, const void* src, size_t srcSizeInBytes,
+ bool* createdNewBuffer) {
+ if (srcSizeInBytes > fDesc.fSizeInBytes) {
+ return false;
+ }
+
+ this->internalMap(gpu, srcSizeInBytes, createdNewBuffer);
+ if (!fMapPtr) {
+ return false;
+ }
+
+ memcpy(fMapPtr, src, srcSizeInBytes);
+
+ this->internalUnmap(gpu, srcSizeInBytes);
+
+ return true;
+}
+
+void GrVkBuffer::validate() const {
+ SkASSERT(!fResource || kVertex_Type == fDesc.fType || kIndex_Type == fDesc.fType
+ || kCopyRead_Type == fDesc.fType || kCopyWrite_Type == fDesc.fType
+ || kUniform_Type == fDesc.fType);
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkBuffer.h b/gfx/skia/skia/src/gpu/vk/GrVkBuffer.h
new file mode 100644
index 000000000..e58d5e40f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkBuffer.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkBuffer_DEFINED
+#define GrVkBuffer_DEFINED
+
+#include "GrVkResource.h"
+#include "vk/GrVkDefines.h"
+#include "vk/GrVkTypes.h"
+
+class GrVkGpu;
+
+/**
+ * This class serves as the base of GrVk*Buffer classes. It was written to avoid code
+ * duplication in those classes.
+ */
+class GrVkBuffer : public SkNoncopyable {
+public:
+ virtual ~GrVkBuffer() {
+ // either release or abandon should have been called by the owner of this object.
+ SkASSERT(!fResource);
+ delete [] (unsigned char*)fMapPtr;
+ }
+
+ VkBuffer buffer() const { return fResource->fBuffer; }
+ const GrVkAlloc& alloc() const { return fResource->fAlloc; }
+ const GrVkRecycledResource* resource() const { return fResource; }
+ size_t size() const { return fDesc.fSizeInBytes; }
+ VkDeviceSize offset() const { return fOffset; }
+
+ void addMemoryBarrier(const GrVkGpu* gpu,
+ VkAccessFlags srcAccessMask,
+ VkAccessFlags dstAccessMask,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion) const;
+
+ enum Type {
+ kVertex_Type,
+ kIndex_Type,
+ kUniform_Type,
+ kCopyRead_Type,
+ kCopyWrite_Type,
+ };
+
+protected:
+ struct Desc {
+ size_t fSizeInBytes;
+ Type fType; // vertex buffer, index buffer, etc.
+ bool fDynamic;
+ };
+
+ class Resource : public GrVkRecycledResource {
+ public:
+ Resource(VkBuffer buf, const GrVkAlloc& alloc, Type type)
+ : INHERITED(), fBuffer(buf), fAlloc(alloc), fType(type) {}
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkBuffer: %d (%d refs)\n", fBuffer, this->getRefCnt());
+ }
+#endif
+ VkBuffer fBuffer;
+ GrVkAlloc fAlloc;
+ Type fType;
+
+ private:
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ void onRecycle(GrVkGpu* gpu) const override { this->unref(gpu); }
+
+ typedef GrVkRecycledResource INHERITED;
+ };
+
+ // convenience routine for raw buffer creation
+ static const Resource* Create(const GrVkGpu* gpu,
+ const Desc& descriptor);
+
+ GrVkBuffer(const Desc& desc, const GrVkBuffer::Resource* resource)
+ : fDesc(desc), fResource(resource), fOffset(0), fMapPtr(nullptr) {
+ }
+
+ void* vkMap(GrVkGpu* gpu) {
+ this->internalMap(gpu, fDesc.fSizeInBytes);
+ return fMapPtr;
+ }
+ void vkUnmap(GrVkGpu* gpu) { this->internalUnmap(gpu, this->size()); }
+
+ // If the caller passes in a non null createdNewBuffer, this function will set the bool to true
+ // if it creates a new VkBuffer to upload the data to.
+ bool vkUpdateData(GrVkGpu* gpu, const void* src, size_t srcSizeInBytes,
+ bool* createdNewBuffer = nullptr);
+
+ void vkAbandon();
+ void vkRelease(const GrVkGpu* gpu);
+
+private:
+ virtual const Resource* createResource(GrVkGpu* gpu,
+ const Desc& descriptor) {
+ return Create(gpu, descriptor);
+ }
+
+ void internalMap(GrVkGpu* gpu, size_t size, bool* createdNewBuffer = nullptr);
+ void internalUnmap(GrVkGpu* gpu, size_t size);
+
+ void validate() const;
+ bool vkIsMapped() const;
+
+ Desc fDesc;
+ const Resource* fResource;
+ VkDeviceSize fOffset;
+ void* fMapPtr;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkCaps.cpp b/gfx/skia/skia/src/gpu/vk/GrVkCaps.cpp
new file mode 100644
index 000000000..d982756cc
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkCaps.cpp
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkCaps.h"
+
+#include "GrVkUtil.h"
+#include "glsl/GrGLSLCaps.h"
+#include "vk/GrVkInterface.h"
+#include "vk/GrVkBackendContext.h"
+
+GrVkCaps::GrVkCaps(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
+ VkPhysicalDevice physDev, uint32_t featureFlags, uint32_t extensionFlags)
+ : INHERITED(contextOptions) {
+ fCanUseGLSLForShaderModule = false;
+ fMustDoCopiesFromOrigin = false;
+ fAllowInitializationErrorOnTearDown = false;
+ fSupportsCopiesAsDraws = false;
+ fMustSubmitCommandsBeforeCopyOp = false;
+
+ /**************************************************************************
+ * GrDrawTargetCaps fields
+ **************************************************************************/
+ fMipMapSupport = true; // always available in Vulkan
+ fSRGBSupport = true; // always available in Vulkan
+ fNPOTTextureTileSupport = true; // always available in Vulkan
+ fTwoSidedStencilSupport = true; // always available in Vulkan
+ fStencilWrapOpsSupport = true; // always available in Vulkan
+ fDiscardRenderTargetSupport = true;
+ fReuseScratchTextures = true; //TODO: figure this out
+ fGpuTracingSupport = false; //TODO: figure this out
+ fCompressedTexSubImageSupport = false; //TODO: figure this out
+ fOversizedStencilSupport = false; //TODO: figure this out
+
+ fUseDrawInsteadOfClear = false;
+ fFenceSyncSupport = true; // always available in Vulkan
+
+ fMapBufferFlags = kNone_MapFlags; //TODO: figure this out
+ fBufferMapThreshold = SK_MaxS32; //TODO: figure this out
+
+ fMaxRenderTargetSize = 4096; // minimum required by spec
+ fMaxTextureSize = 4096; // minimum required by spec
+ fMaxColorSampleCount = 4; // minimum required by spec
+ fMaxStencilSampleCount = 4; // minimum required by spec
+
+ fShaderCaps.reset(new GrGLSLCaps(contextOptions));
+
+ this->init(contextOptions, vkInterface, physDev, featureFlags, extensionFlags);
+}
+
+void GrVkCaps::init(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
+ VkPhysicalDevice physDev, uint32_t featureFlags, uint32_t extensionFlags) {
+
+ VkPhysicalDeviceProperties properties;
+ GR_VK_CALL(vkInterface, GetPhysicalDeviceProperties(physDev, &properties));
+
+ VkPhysicalDeviceMemoryProperties memoryProperties;
+ GR_VK_CALL(vkInterface, GetPhysicalDeviceMemoryProperties(physDev, &memoryProperties));
+
+ this->initGrCaps(properties, memoryProperties, featureFlags);
+ this->initGLSLCaps(properties, featureFlags);
+ this->initConfigTable(vkInterface, physDev);
+ this->initStencilFormat(vkInterface, physDev);
+
+ if (SkToBool(extensionFlags & kNV_glsl_shader_GrVkExtensionFlag)) {
+ // Currently disabling this feature since it does not play well with validation layers which
+ // expect a SPIR-V shader
+ // fCanUseGLSLForShaderModule = true;
+ }
+
+ if (kQualcomm_VkVendor == properties.vendorID) {
+ fMustDoCopiesFromOrigin = true;
+ fAllowInitializationErrorOnTearDown = true;
+ }
+
+ if (kNvidia_VkVendor == properties.vendorID) {
+ fSupportsCopiesAsDraws = true;
+ fMustSubmitCommandsBeforeCopyOp = true;
+ }
+
+ this->applyOptionsOverrides(contextOptions);
+ GrGLSLCaps* glslCaps = static_cast<GrGLSLCaps*>(fShaderCaps.get());
+ glslCaps->applyOptionsOverrides(contextOptions);
+}
+
+int get_max_sample_count(VkSampleCountFlags flags) {
+ SkASSERT(flags & VK_SAMPLE_COUNT_1_BIT);
+ if (!(flags & VK_SAMPLE_COUNT_2_BIT)) {
+ return 0;
+ }
+ if (!(flags & VK_SAMPLE_COUNT_4_BIT)) {
+ return 2;
+ }
+ if (!(flags & VK_SAMPLE_COUNT_8_BIT)) {
+ return 4;
+ }
+ if (!(flags & VK_SAMPLE_COUNT_16_BIT)) {
+ return 8;
+ }
+ if (!(flags & VK_SAMPLE_COUNT_32_BIT)) {
+ return 16;
+ }
+ if (!(flags & VK_SAMPLE_COUNT_64_BIT)) {
+ return 32;
+ }
+ return 64;
+}
+
+void GrVkCaps::initSampleCount(const VkPhysicalDeviceProperties& properties) {
+ VkSampleCountFlags colorSamples = properties.limits.framebufferColorSampleCounts;
+ VkSampleCountFlags stencilSamples = properties.limits.framebufferStencilSampleCounts;
+
+ fMaxColorSampleCount = get_max_sample_count(colorSamples);
+ fMaxStencilSampleCount = get_max_sample_count(stencilSamples);
+}
+
+void GrVkCaps::initGrCaps(const VkPhysicalDeviceProperties& properties,
+ const VkPhysicalDeviceMemoryProperties& memoryProperties,
+ uint32_t featureFlags) {
+ fMaxVertexAttributes = SkTMin(properties.limits.maxVertexInputAttributes, (uint32_t)INT_MAX);
+ // We could actually query and get a max size for each config, however maxImageDimension2D will
+ // give the minimum max size across all configs. So for simplicity we will use that for now.
+ fMaxRenderTargetSize = SkTMin(properties.limits.maxImageDimension2D, (uint32_t)INT_MAX);
+ fMaxTextureSize = SkTMin(properties.limits.maxImageDimension2D, (uint32_t)INT_MAX);
+
+ this->initSampleCount(properties);
+
+ // Assuming since we will always map in the end to upload the data we might as well just map
+ // from the get go. There is no hard data to suggest this is faster or slower.
+ fBufferMapThreshold = 0;
+
+ fMapBufferFlags = kCanMap_MapFlag | kSubset_MapFlag;
+
+ fStencilWrapOpsSupport = true;
+ fOversizedStencilSupport = true;
+ fSampleShadingSupport = SkToBool(featureFlags & kSampleRateShading_GrVkFeatureFlag);
+}
+
+void GrVkCaps::initGLSLCaps(const VkPhysicalDeviceProperties& properties,
+ uint32_t featureFlags) {
+ GrGLSLCaps* glslCaps = static_cast<GrGLSLCaps*>(fShaderCaps.get());
+ glslCaps->fVersionDeclString = "#version 330\n";
+
+
+ // fConfigOutputSwizzle will default to RGBA so we only need to set it for alpha only config.
+ for (int i = 0; i < kGrPixelConfigCnt; ++i) {
+ GrPixelConfig config = static_cast<GrPixelConfig>(i);
+ if (GrPixelConfigIsAlphaOnly(config)) {
+ glslCaps->fConfigTextureSwizzle[i] = GrSwizzle::RRRR();
+ glslCaps->fConfigOutputSwizzle[i] = GrSwizzle::AAAA();
+ } else {
+ if (kRGBA_4444_GrPixelConfig == config) {
+ // The vulkan spec does not require R4G4B4A4 to be supported for texturing so we
+ // store the data in a B4G4R4A4 texture and then swizzle it when doing texture reads
+ // or writing to outputs. Since we're not actually changing the data at all, the
+ // only extra work is the swizzle in the shader for all operations.
+ glslCaps->fConfigTextureSwizzle[i] = GrSwizzle::BGRA();
+ glslCaps->fConfigOutputSwizzle[i] = GrSwizzle::BGRA();
+ } else {
+ glslCaps->fConfigTextureSwizzle[i] = GrSwizzle::RGBA();
+ }
+ }
+ }
+
+ // Vulkan is based off ES 3.0 so the following should all be supported
+ glslCaps->fUsesPrecisionModifiers = true;
+ glslCaps->fFlatInterpolationSupport = true;
+
+ // GrShaderCaps
+
+ glslCaps->fShaderDerivativeSupport = true;
+ glslCaps->fGeometryShaderSupport = SkToBool(featureFlags & kGeometryShader_GrVkFeatureFlag);
+
+ glslCaps->fDualSourceBlendingSupport = SkToBool(featureFlags & kDualSrcBlend_GrVkFeatureFlag);
+
+ glslCaps->fIntegerSupport = true;
+
+ // Assume the minimum precisions mandated by the SPIR-V spec.
+ glslCaps->fShaderPrecisionVaries = true;
+ for (int s = 0; s < kGrShaderTypeCount; ++s) {
+ auto& highp = glslCaps->fFloatPrecisions[s][kHigh_GrSLPrecision];
+ highp.fLogRangeLow = highp.fLogRangeHigh = 127;
+ highp.fBits = 23;
+
+ auto& mediump = glslCaps->fFloatPrecisions[s][kMedium_GrSLPrecision];
+ mediump.fLogRangeLow = mediump.fLogRangeHigh = 14;
+ mediump.fBits = 10;
+
+ glslCaps->fFloatPrecisions[s][kLow_GrSLPrecision] = mediump;
+ }
+ glslCaps->initSamplerPrecisionTable();
+
+ glslCaps->fMaxVertexSamplers =
+ glslCaps->fMaxGeometrySamplers =
+ glslCaps->fMaxFragmentSamplers = SkTMin(SkTMin(properties.limits.maxPerStageDescriptorSampledImages,
+ properties.limits.maxPerStageDescriptorSamplers),
+ (uint32_t)INT_MAX);
+ glslCaps->fMaxCombinedSamplers = SkTMin(SkTMin(properties.limits.maxDescriptorSetSampledImages,
+ properties.limits.maxDescriptorSetSamplers),
+ (uint32_t)INT_MAX);
+}
+
+bool stencil_format_supported(const GrVkInterface* interface,
+ VkPhysicalDevice physDev,
+ VkFormat format) {
+ VkFormatProperties props;
+ memset(&props, 0, sizeof(VkFormatProperties));
+ GR_VK_CALL(interface, GetPhysicalDeviceFormatProperties(physDev, format, &props));
+ return SkToBool(VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT & props.optimalTilingFeatures);
+}
+
+void GrVkCaps::initStencilFormat(const GrVkInterface* interface, VkPhysicalDevice physDev) {
+ // List of legal stencil formats (though perhaps not supported on
+ // the particular gpu/driver) from most preferred to least. We are guaranteed to have either
+ // VK_FORMAT_D24_UNORM_S8_UINT or VK_FORMAT_D32_SFLOAT_S8_UINT. VK_FORMAT_D32_SFLOAT_S8_UINT
+ // can optionally have 24 unused bits at the end so we assume the total bits is 64.
+ static const StencilFormat
+ // internal Format stencil bits total bits packed?
+ gS8 = { VK_FORMAT_S8_UINT, 8, 8, false },
+ gD24S8 = { VK_FORMAT_D24_UNORM_S8_UINT, 8, 32, true },
+ gD32S8 = { VK_FORMAT_D32_SFLOAT_S8_UINT, 8, 64, true };
+
+ if (stencil_format_supported(interface, physDev, VK_FORMAT_S8_UINT)) {
+ fPreferedStencilFormat = gS8;
+ } else if (stencil_format_supported(interface, physDev, VK_FORMAT_D24_UNORM_S8_UINT)) {
+ fPreferedStencilFormat = gD24S8;
+ } else {
+ SkASSERT(stencil_format_supported(interface, physDev, VK_FORMAT_D32_SFLOAT_S8_UINT));
+ fPreferedStencilFormat = gD32S8;
+ }
+}
+
+void GrVkCaps::initConfigTable(const GrVkInterface* interface, VkPhysicalDevice physDev) {
+ for (int i = 0; i < kGrPixelConfigCnt; ++i) {
+ VkFormat format;
+ if (GrPixelConfigToVkFormat(static_cast<GrPixelConfig>(i), &format)) {
+ fConfigTable[i].init(interface, physDev, format);
+ }
+ }
+}
+
+void GrVkCaps::ConfigInfo::InitConfigFlags(VkFormatFeatureFlags vkFlags, uint16_t* flags) {
+ if (SkToBool(VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT & vkFlags) &&
+ SkToBool(VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT & vkFlags)) {
+ *flags = *flags | kTextureable_Flag;
+ }
+
+ if (SkToBool(VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT & vkFlags)) {
+ *flags = *flags | kRenderable_Flag;
+ }
+
+ if (SkToBool(VK_FORMAT_FEATURE_BLIT_SRC_BIT & vkFlags)) {
+ *flags = *flags | kBlitSrc_Flag;
+ }
+
+ if (SkToBool(VK_FORMAT_FEATURE_BLIT_DST_BIT & vkFlags)) {
+ *flags = *flags | kBlitDst_Flag;
+ }
+}
+
+void GrVkCaps::ConfigInfo::init(const GrVkInterface* interface,
+ VkPhysicalDevice physDev,
+ VkFormat format) {
+ VkFormatProperties props;
+ memset(&props, 0, sizeof(VkFormatProperties));
+ GR_VK_CALL(interface, GetPhysicalDeviceFormatProperties(physDev, format, &props));
+ InitConfigFlags(props.linearTilingFeatures, &fLinearFlags);
+ InitConfigFlags(props.optimalTilingFeatures, &fOptimalFlags);
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkCaps.h b/gfx/skia/skia/src/gpu/vk/GrVkCaps.h
new file mode 100644
index 000000000..a4ce35aa0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkCaps.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkCaps_DEFINED
+#define GrVkCaps_DEFINED
+
+#include "GrCaps.h"
+#include "GrVkStencilAttachment.h"
+#include "vk/GrVkDefines.h"
+
+struct GrVkInterface;
+class GrGLSLCaps;
+
+/**
+ * Stores some capabilities of a Vk backend.
+ */
+class GrVkCaps : public GrCaps {
+public:
+ typedef GrVkStencilAttachment::Format StencilFormat;
+
+ /**
+ * Creates a GrVkCaps that is set such that nothing is supported. The init function should
+ * be called to fill out the caps.
+ */
+ GrVkCaps(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
+ VkPhysicalDevice device, uint32_t featureFlags, uint32_t extensionFlags);
+
+ bool isConfigTexturable(GrPixelConfig config) const override {
+ return SkToBool(ConfigInfo::kTextureable_Flag & fConfigTable[config].fOptimalFlags);
+ }
+
+ bool isConfigRenderable(GrPixelConfig config, bool withMSAA) const override {
+ return SkToBool(ConfigInfo::kRenderable_Flag & fConfigTable[config].fOptimalFlags);
+ }
+
+ bool isConfigTexturableLinearly(GrPixelConfig config) const {
+ return SkToBool(ConfigInfo::kTextureable_Flag & fConfigTable[config].fLinearFlags);
+ }
+
+ bool isConfigRenderableLinearly(GrPixelConfig config, bool withMSAA) const {
+ return !withMSAA && SkToBool(ConfigInfo::kRenderable_Flag &
+ fConfigTable[config].fLinearFlags);
+ }
+
+ bool configCanBeDstofBlit(GrPixelConfig config, bool linearTiled) const {
+ const uint16_t& flags = linearTiled ? fConfigTable[config].fLinearFlags :
+ fConfigTable[config].fOptimalFlags;
+ return SkToBool(ConfigInfo::kBlitDst_Flag & flags);
+ }
+
+ bool configCanBeSrcofBlit(GrPixelConfig config, bool linearTiled) const {
+ const uint16_t& flags = linearTiled ? fConfigTable[config].fLinearFlags :
+ fConfigTable[config].fOptimalFlags;
+ return SkToBool(ConfigInfo::kBlitSrc_Flag & flags);
+ }
+
+ bool canUseGLSLForShaderModule() const {
+ return fCanUseGLSLForShaderModule;
+ }
+
+ bool mustDoCopiesFromOrigin() const {
+ return fMustDoCopiesFromOrigin;
+ }
+
+ bool allowInitializationErrorOnTearDown() const {
+ return fAllowInitializationErrorOnTearDown;
+ }
+
+ bool supportsCopiesAsDraws() const {
+ return fSupportsCopiesAsDraws;
+ }
+
+ bool mustSubmitCommandsBeforeCopyOp() const {
+ return fMustSubmitCommandsBeforeCopyOp;
+ }
+
+ /**
+ * Returns both a supported and most prefered stencil format to use in draws.
+ */
+ const StencilFormat& preferedStencilFormat() const {
+ return fPreferedStencilFormat;
+ }
+
+ GrGLSLCaps* glslCaps() const { return reinterpret_cast<GrGLSLCaps*>(fShaderCaps.get()); }
+
+private:
+ enum VkVendor {
+ kQualcomm_VkVendor = 20803,
+ kNvidia_VkVendor = 4318,
+ };
+
+ void init(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
+ VkPhysicalDevice device, uint32_t featureFlags, uint32_t extensionFlags);
+ void initGrCaps(const VkPhysicalDeviceProperties&,
+ const VkPhysicalDeviceMemoryProperties&,
+ uint32_t featureFlags);
+ void initGLSLCaps(const VkPhysicalDeviceProperties&, uint32_t featureFlags);
+ void initSampleCount(const VkPhysicalDeviceProperties& properties);
+
+
+ void initConfigTable(const GrVkInterface*, VkPhysicalDevice);
+ void initStencilFormat(const GrVkInterface* iface, VkPhysicalDevice physDev);
+
+ struct ConfigInfo {
+ ConfigInfo() : fOptimalFlags(0), fLinearFlags(0) {}
+
+ void init(const GrVkInterface*, VkPhysicalDevice, VkFormat);
+ static void InitConfigFlags(VkFormatFeatureFlags, uint16_t* flags);
+
+ enum {
+ kTextureable_Flag = 0x1,
+ kRenderable_Flag = 0x2,
+ kBlitSrc_Flag = 0x4,
+ kBlitDst_Flag = 0x8,
+ };
+
+ uint16_t fOptimalFlags;
+ uint16_t fLinearFlags;
+ };
+ ConfigInfo fConfigTable[kGrPixelConfigCnt];
+
+ StencilFormat fPreferedStencilFormat;
+
+ // Tells of if we can pass in straight GLSL string into vkCreateShaderModule
+ bool fCanUseGLSLForShaderModule;
+
+ // On Adreno vulkan, they do not respect the imageOffset parameter at least in
+ // copyImageToBuffer. This flag says that we must do the copy starting from the origin always.
+ bool fMustDoCopiesFromOrigin;
+
+ // On Adreno, there is a bug where vkQueueWaitIdle will once in a while return
+ // VK_ERROR_INITIALIZATION_FAILED instead of the required VK_SUCCESS or VK_DEVICE_LOST. This
+ // flag says we will accept VK_ERROR_INITIALIZATION_FAILED as well.
+ bool fAllowInitializationErrorOnTearDown;
+
+ // Check whether we support using draws for copies.
+ bool fSupportsCopiesAsDraws;
+
+ // On Nvidia there is a current bug where we must the current command buffer before copy
+ // operations or else the copy will not happen. This includes copies, blits, resolves, and copy
+ // as draws.
+ bool fMustSubmitCommandsBeforeCopyOp;
+
+ typedef GrCaps INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkCommandBuffer.cpp b/gfx/skia/skia/src/gpu/vk/GrVkCommandBuffer.cpp
new file mode 100644
index 000000000..bc6272c0c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkCommandBuffer.cpp
@@ -0,0 +1,695 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkCommandBuffer.h"
+
+#include "GrVkFramebuffer.h"
+#include "GrVkImageView.h"
+#include "GrVkPipeline.h"
+#include "GrVkRenderPass.h"
+#include "GrVkRenderTarget.h"
+#include "GrVkPipelineState.h"
+#include "GrVkTransferBuffer.h"
+#include "GrVkUtil.h"
+#include "SkRect.h"
+
+void GrVkCommandBuffer::invalidateState() {
+ fBoundVertexBuffer = VK_NULL_HANDLE;
+ fBoundVertexBufferIsValid = false;
+ fBoundIndexBuffer = VK_NULL_HANDLE;
+ fBoundIndexBufferIsValid = false;
+
+ memset(&fCachedViewport, 0, sizeof(VkViewport));
+ fCachedViewport.width = - 1.0f; // Viewport must have a width greater than 0
+
+ memset(&fCachedScissor, 0, sizeof(VkRect2D));
+ fCachedScissor.offset.x = -1; // Scissor offset must be greater that 0 to be valid
+
+ for (int i = 0; i < 4; ++i) {
+ fCachedBlendConstant[i] = -1.0;
+ }
+}
+
+void GrVkCommandBuffer::freeGPUData(const GrVkGpu* gpu) const {
+ SkASSERT(!fIsActive);
+ for (int i = 0; i < fTrackedResources.count(); ++i) {
+ fTrackedResources[i]->unref(gpu);
+ }
+
+ for (int i = 0; i < fTrackedRecycledResources.count(); ++i) {
+ fTrackedRecycledResources[i]->recycle(const_cast<GrVkGpu*>(gpu));
+ }
+
+ GR_VK_CALL(gpu->vkInterface(), FreeCommandBuffers(gpu->device(), gpu->cmdPool(),
+ 1, &fCmdBuffer));
+
+ this->onFreeGPUData(gpu);
+}
+
+void GrVkCommandBuffer::abandonSubResources() const {
+ for (int i = 0; i < fTrackedResources.count(); ++i) {
+ fTrackedResources[i]->unrefAndAbandon();
+ }
+
+ for (int i = 0; i < fTrackedRecycledResources.count(); ++i) {
+ // We don't recycle resources when abandoning them.
+ fTrackedRecycledResources[i]->unrefAndAbandon();
+ }
+}
+
+void GrVkCommandBuffer::reset(GrVkGpu* gpu) {
+ SkASSERT(!fIsActive);
+ for (int i = 0; i < fTrackedResources.count(); ++i) {
+ fTrackedResources[i]->unref(gpu);
+ }
+ for (int i = 0; i < fTrackedRecycledResources.count(); ++i) {
+ fTrackedRecycledResources[i]->recycle(const_cast<GrVkGpu*>(gpu));
+ }
+
+ if (++fNumResets > kNumRewindResetsBeforeFullReset) {
+ fTrackedResources.reset();
+ fTrackedRecycledResources.reset();
+ fTrackedResources.setReserve(kInitialTrackedResourcesCount);
+ fTrackedRecycledResources.setReserve(kInitialTrackedResourcesCount);
+ fNumResets = 0;
+ } else {
+ fTrackedResources.rewind();
+ fTrackedRecycledResources.rewind();
+ }
+
+
+ this->invalidateState();
+
+ // we will retain resources for later use
+ VkCommandBufferResetFlags flags = 0;
+ GR_VK_CALL(gpu->vkInterface(), ResetCommandBuffer(fCmdBuffer, flags));
+
+ this->onReset(gpu);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// CommandBuffer commands
+////////////////////////////////////////////////////////////////////////////////
+
+void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ BarrierType barrierType,
+ void* barrier) const {
+ SkASSERT(fIsActive);
+ // For images we can have barriers inside of render passes but they require us to add more
+ // support in subpasses which need self dependencies to have barriers inside them. Also, we can
+ // never have buffer barriers inside of a render pass. For now we will just assert that we are
+ // not in a render pass.
+ SkASSERT(!fActiveRenderPass);
+ VkDependencyFlags dependencyFlags = byRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0;
+
+ switch (barrierType) {
+ case kMemory_BarrierType: {
+ const VkMemoryBarrier* barrierPtr = reinterpret_cast<VkMemoryBarrier*>(barrier);
+ GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask,
+ dstStageMask, dependencyFlags,
+ 1, barrierPtr,
+ 0, nullptr,
+ 0, nullptr));
+ break;
+ }
+
+ case kBufferMemory_BarrierType: {
+ const VkBufferMemoryBarrier* barrierPtr =
+ reinterpret_cast<VkBufferMemoryBarrier*>(barrier);
+ GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask,
+ dstStageMask, dependencyFlags,
+ 0, nullptr,
+ 1, barrierPtr,
+ 0, nullptr));
+ break;
+ }
+
+ case kImageMemory_BarrierType: {
+ const VkImageMemoryBarrier* barrierPtr =
+ reinterpret_cast<VkImageMemoryBarrier*>(barrier);
+ GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask,
+ dstStageMask, dependencyFlags,
+ 0, nullptr,
+ 0, nullptr,
+ 1, barrierPtr));
+ break;
+ }
+ }
+
+}
+
+void GrVkCommandBuffer::clearAttachments(const GrVkGpu* gpu,
+ int numAttachments,
+ const VkClearAttachment* attachments,
+ int numRects,
+ const VkClearRect* clearRects) const {
+ SkASSERT(fIsActive);
+ SkASSERT(fActiveRenderPass);
+ SkASSERT(numAttachments > 0);
+ SkASSERT(numRects > 0);
+#ifdef SK_DEBUG
+ for (int i = 0; i < numAttachments; ++i) {
+ if (attachments[i].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
+ uint32_t testIndex;
+ SkAssertResult(fActiveRenderPass->colorAttachmentIndex(&testIndex));
+ SkASSERT(testIndex == attachments[i].colorAttachment);
+ }
+ }
+#endif
+ GR_VK_CALL(gpu->vkInterface(), CmdClearAttachments(fCmdBuffer,
+ numAttachments,
+ attachments,
+ numRects,
+ clearRects));
+}
+
+void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu,
+ GrVkPipelineState* pipelineState,
+ VkPipelineLayout layout,
+ uint32_t firstSet,
+ uint32_t setCount,
+ const VkDescriptorSet* descriptorSets,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets) {
+ SkASSERT(fIsActive);
+ GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer,
+ VK_PIPELINE_BIND_POINT_GRAPHICS,
+ layout,
+ firstSet,
+ setCount,
+ descriptorSets,
+ dynamicOffsetCount,
+ dynamicOffsets));
+ pipelineState->addUniformResources(*this);
+}
+
+void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu,
+ const SkTArray<const GrVkRecycledResource*>& recycled,
+ const SkTArray<const GrVkResource*>& resources,
+ VkPipelineLayout layout,
+ uint32_t firstSet,
+ uint32_t setCount,
+ const VkDescriptorSet* descriptorSets,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets) {
+ SkASSERT(fIsActive);
+ GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer,
+ VK_PIPELINE_BIND_POINT_GRAPHICS,
+ layout,
+ firstSet,
+ setCount,
+ descriptorSets,
+ dynamicOffsetCount,
+ dynamicOffsets));
+ for (int i = 0; i < recycled.count(); ++i) {
+ this->addRecycledResource(recycled[i]);
+ }
+ for (int i = 0; i < resources.count(); ++i) {
+ this->addResource(resources[i]);
+ }
+}
+
+void GrVkCommandBuffer::bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline) {
+ SkASSERT(fIsActive);
+ GR_VK_CALL(gpu->vkInterface(), CmdBindPipeline(fCmdBuffer,
+ VK_PIPELINE_BIND_POINT_GRAPHICS,
+ pipeline->pipeline()));
+ this->addResource(pipeline);
+}
+
+void GrVkCommandBuffer::drawIndexed(const GrVkGpu* gpu,
+ uint32_t indexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t vertexOffset,
+ uint32_t firstInstance) const {
+ SkASSERT(fIsActive);
+ SkASSERT(fActiveRenderPass);
+ GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexed(fCmdBuffer,
+ indexCount,
+ instanceCount,
+ firstIndex,
+ vertexOffset,
+ firstInstance));
+}
+
+void GrVkCommandBuffer::draw(const GrVkGpu* gpu,
+ uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstVertex,
+ uint32_t firstInstance) const {
+ SkASSERT(fIsActive);
+ SkASSERT(fActiveRenderPass);
+ GR_VK_CALL(gpu->vkInterface(), CmdDraw(fCmdBuffer,
+ vertexCount,
+ instanceCount,
+ firstVertex,
+ firstInstance));
+}
+
+void GrVkCommandBuffer::setViewport(const GrVkGpu* gpu,
+ uint32_t firstViewport,
+ uint32_t viewportCount,
+ const VkViewport* viewports) {
+ SkASSERT(fIsActive);
+ SkASSERT(1 == viewportCount);
+ if (memcmp(viewports, &fCachedViewport, sizeof(VkViewport))) {
+ GR_VK_CALL(gpu->vkInterface(), CmdSetViewport(fCmdBuffer,
+ firstViewport,
+ viewportCount,
+ viewports));
+ fCachedViewport = viewports[0];
+ }
+}
+
+void GrVkCommandBuffer::setScissor(const GrVkGpu* gpu,
+ uint32_t firstScissor,
+ uint32_t scissorCount,
+ const VkRect2D* scissors) {
+ SkASSERT(fIsActive);
+ SkASSERT(1 == scissorCount);
+ if (memcmp(scissors, &fCachedScissor, sizeof(VkRect2D))) {
+ GR_VK_CALL(gpu->vkInterface(), CmdSetScissor(fCmdBuffer,
+ firstScissor,
+ scissorCount,
+ scissors));
+ fCachedScissor = scissors[0];
+ }
+}
+
+void GrVkCommandBuffer::setBlendConstants(const GrVkGpu* gpu,
+ const float blendConstants[4]) {
+ SkASSERT(fIsActive);
+ if (memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) {
+ GR_VK_CALL(gpu->vkInterface(), CmdSetBlendConstants(fCmdBuffer, blendConstants));
+ memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float));
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// PrimaryCommandBuffer
+////////////////////////////////////////////////////////////////////////////////
+GrVkPrimaryCommandBuffer::~GrVkPrimaryCommandBuffer() {
+ // Should have ended any render pass we're in the middle of
+ SkASSERT(!fActiveRenderPass);
+}
+
+GrVkPrimaryCommandBuffer* GrVkPrimaryCommandBuffer::Create(const GrVkGpu* gpu,
+ VkCommandPool cmdPool) {
+ const VkCommandBufferAllocateInfo cmdInfo = {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
+ NULL, // pNext
+ cmdPool, // commandPool
+ VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
+ 1 // bufferCount
+ };
+
+ VkCommandBuffer cmdBuffer;
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateCommandBuffers(gpu->device(),
+ &cmdInfo,
+ &cmdBuffer));
+ if (err) {
+ return nullptr;
+ }
+ return new GrVkPrimaryCommandBuffer(cmdBuffer);
+}
+
+void GrVkPrimaryCommandBuffer::begin(const GrVkGpu* gpu) {
+ SkASSERT(!fIsActive);
+ VkCommandBufferBeginInfo cmdBufferBeginInfo;
+ memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
+ cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdBufferBeginInfo.pNext = nullptr;
+ cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ cmdBufferBeginInfo.pInheritanceInfo = nullptr;
+
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), BeginCommandBuffer(fCmdBuffer,
+ &cmdBufferBeginInfo));
+ fIsActive = true;
+}
+
+void GrVkPrimaryCommandBuffer::end(const GrVkGpu* gpu) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), EndCommandBuffer(fCmdBuffer));
+ this->invalidateState();
+ fIsActive = false;
+}
+
+void GrVkPrimaryCommandBuffer::beginRenderPass(const GrVkGpu* gpu,
+ const GrVkRenderPass* renderPass,
+ uint32_t clearCount,
+ const VkClearValue* clearValues,
+ const GrVkRenderTarget& target,
+ const SkIRect& bounds,
+ bool forSecondaryCB) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ SkASSERT(renderPass->isCompatible(target));
+
+ VkRenderPassBeginInfo beginInfo;
+ VkRect2D renderArea;
+ renderArea.offset = { bounds.fLeft , bounds.fTop };
+ renderArea.extent = { (uint32_t)bounds.width(), (uint32_t)bounds.height() };
+
+ memset(&beginInfo, 0, sizeof(VkRenderPassBeginInfo));
+ beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ beginInfo.pNext = nullptr;
+ beginInfo.renderPass = renderPass->vkRenderPass();
+ beginInfo.framebuffer = target.framebuffer()->framebuffer();
+ beginInfo.renderArea = renderArea;
+ beginInfo.clearValueCount = clearCount;
+ beginInfo.pClearValues = clearValues;
+
+ VkSubpassContents contents = forSecondaryCB ? VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
+ : VK_SUBPASS_CONTENTS_INLINE;
+
+ GR_VK_CALL(gpu->vkInterface(), CmdBeginRenderPass(fCmdBuffer, &beginInfo, contents));
+ fActiveRenderPass = renderPass;
+ this->addResource(renderPass);
+ target.addResources(*this);
+}
+
+void GrVkPrimaryCommandBuffer::endRenderPass(const GrVkGpu* gpu) {
+ SkASSERT(fIsActive);
+ SkASSERT(fActiveRenderPass);
+ GR_VK_CALL(gpu->vkInterface(), CmdEndRenderPass(fCmdBuffer));
+ fActiveRenderPass = nullptr;
+}
+
+void GrVkPrimaryCommandBuffer::executeCommands(const GrVkGpu* gpu,
+ GrVkSecondaryCommandBuffer* buffer) {
+ SkASSERT(fIsActive);
+ SkASSERT(fActiveRenderPass);
+ SkASSERT(fActiveRenderPass->isCompatible(*buffer->fActiveRenderPass));
+
+ GR_VK_CALL(gpu->vkInterface(), CmdExecuteCommands(fCmdBuffer, 1, &buffer->fCmdBuffer));
+ buffer->ref();
+ fSecondaryCommandBuffers.push_back(buffer);
+ // When executing a secondary command buffer all state (besides render pass state) becomes
+ // invalidated and must be reset. This includes bound buffers, pipelines, dynamic state, etc.
+ this->invalidateState();
+}
+
+void GrVkPrimaryCommandBuffer::submitToQueue(const GrVkGpu* gpu,
+ VkQueue queue,
+ GrVkGpu::SyncQueue sync) {
+ SkASSERT(!fIsActive);
+
+ VkResult err;
+ if (VK_NULL_HANDLE == fSubmitFence) {
+ VkFenceCreateInfo fenceInfo;
+ memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
+ fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+ err = GR_VK_CALL(gpu->vkInterface(), CreateFence(gpu->device(), &fenceInfo, nullptr,
+ &fSubmitFence));
+ SkASSERT(!err);
+ } else {
+ GR_VK_CALL(gpu->vkInterface(), ResetFences(gpu->device(), 1, &fSubmitFence));
+ }
+
+ VkSubmitInfo submitInfo;
+ memset(&submitInfo, 0, sizeof(VkSubmitInfo));
+ submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submitInfo.pNext = nullptr;
+ submitInfo.waitSemaphoreCount = 0;
+ submitInfo.pWaitSemaphores = nullptr;
+ submitInfo.pWaitDstStageMask = 0;
+ submitInfo.commandBufferCount = 1;
+ submitInfo.pCommandBuffers = &fCmdBuffer;
+ submitInfo.signalSemaphoreCount = 0;
+ submitInfo.pSignalSemaphores = nullptr;
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), QueueSubmit(queue, 1, &submitInfo, fSubmitFence));
+
+ if (GrVkGpu::kForce_SyncQueue == sync) {
+ err = GR_VK_CALL(gpu->vkInterface(),
+ WaitForFences(gpu->device(), 1, &fSubmitFence, true, UINT64_MAX));
+ if (VK_TIMEOUT == err) {
+ SkDebugf("Fence failed to signal: %d\n", err);
+ SkFAIL("failing");
+ }
+ SkASSERT(!err);
+
+ // Destroy the fence
+ GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
+ fSubmitFence = VK_NULL_HANDLE;
+ }
+}
+
+bool GrVkPrimaryCommandBuffer::finished(const GrVkGpu* gpu) const {
+ if (VK_NULL_HANDLE == fSubmitFence) {
+ return true;
+ }
+
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), GetFenceStatus(gpu->device(), fSubmitFence));
+ switch (err) {
+ case VK_SUCCESS:
+ return true;
+
+ case VK_NOT_READY:
+ return false;
+
+ default:
+ SkDebugf("Error getting fence status: %d\n", err);
+ SkFAIL("failing");
+ break;
+ }
+
+ return false;
+}
+
+void GrVkPrimaryCommandBuffer::onReset(GrVkGpu* gpu) {
+ for (int i = 0; i < fSecondaryCommandBuffers.count(); ++i) {
+ gpu->resourceProvider().recycleSecondaryCommandBuffer(fSecondaryCommandBuffers[i]);
+ }
+ fSecondaryCommandBuffers.reset();
+}
+
+void GrVkPrimaryCommandBuffer::copyImage(const GrVkGpu* gpu,
+ GrVkImage* srcImage,
+ VkImageLayout srcLayout,
+ GrVkImage* dstImage,
+ VkImageLayout dstLayout,
+ uint32_t copyRegionCount,
+ const VkImageCopy* copyRegions) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addResource(srcImage->resource());
+ this->addResource(dstImage->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdCopyImage(fCmdBuffer,
+ srcImage->image(),
+ srcLayout,
+ dstImage->image(),
+ dstLayout,
+ copyRegionCount,
+ copyRegions));
+}
+
+void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
+ const GrVkResource* srcResource,
+ VkImage srcImage,
+ VkImageLayout srcLayout,
+ const GrVkResource* dstResource,
+ VkImage dstImage,
+ VkImageLayout dstLayout,
+ uint32_t blitRegionCount,
+ const VkImageBlit* blitRegions,
+ VkFilter filter) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addResource(srcResource);
+ this->addResource(dstResource);
+ GR_VK_CALL(gpu->vkInterface(), CmdBlitImage(fCmdBuffer,
+ srcImage,
+ srcLayout,
+ dstImage,
+ dstLayout,
+ blitRegionCount,
+ blitRegions,
+ filter));
+}
+
+void GrVkPrimaryCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu,
+ GrVkImage* srcImage,
+ VkImageLayout srcLayout,
+ GrVkTransferBuffer* dstBuffer,
+ uint32_t copyRegionCount,
+ const VkBufferImageCopy* copyRegions) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addResource(srcImage->resource());
+ this->addResource(dstBuffer->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdCopyImageToBuffer(fCmdBuffer,
+ srcImage->image(),
+ srcLayout,
+ dstBuffer->buffer(),
+ copyRegionCount,
+ copyRegions));
+}
+
+void GrVkPrimaryCommandBuffer::copyBufferToImage(const GrVkGpu* gpu,
+ GrVkTransferBuffer* srcBuffer,
+ GrVkImage* dstImage,
+ VkImageLayout dstLayout,
+ uint32_t copyRegionCount,
+ const VkBufferImageCopy* copyRegions) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addResource(srcBuffer->resource());
+ this->addResource(dstImage->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdCopyBufferToImage(fCmdBuffer,
+ srcBuffer->buffer(),
+ dstImage->image(),
+ dstLayout,
+ copyRegionCount,
+ copyRegions));
+}
+
+void GrVkPrimaryCommandBuffer::updateBuffer(GrVkGpu* gpu,
+ GrVkBuffer* dstBuffer,
+ VkDeviceSize dstOffset,
+ VkDeviceSize dataSize,
+ const void* data) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ SkASSERT(0 == (dstOffset & 0x03)); // four byte aligned
+ // TODO: handle larger transfer sizes
+ SkASSERT(dataSize <= 65536);
+ SkASSERT(0 == (dataSize & 0x03)); // four byte aligned
+ this->addResource(dstBuffer->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdUpdateBuffer(fCmdBuffer,
+ dstBuffer->buffer(),
+ dstOffset,
+ dataSize,
+ (const uint32_t*) data));
+}
+
+void GrVkPrimaryCommandBuffer::clearColorImage(const GrVkGpu* gpu,
+ GrVkImage* image,
+ const VkClearColorValue* color,
+ uint32_t subRangeCount,
+ const VkImageSubresourceRange* subRanges) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addResource(image->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdClearColorImage(fCmdBuffer,
+ image->image(),
+ image->currentLayout(),
+ color,
+ subRangeCount,
+ subRanges));
+}
+
+void GrVkPrimaryCommandBuffer::clearDepthStencilImage(const GrVkGpu* gpu,
+ GrVkImage* image,
+ const VkClearDepthStencilValue* color,
+ uint32_t subRangeCount,
+ const VkImageSubresourceRange* subRanges) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addResource(image->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdClearDepthStencilImage(fCmdBuffer,
+ image->image(),
+ image->currentLayout(),
+ color,
+ subRangeCount,
+ subRanges));
+}
+
+void GrVkPrimaryCommandBuffer::resolveImage(GrVkGpu* gpu,
+ const GrVkImage& srcImage,
+ const GrVkImage& dstImage,
+ uint32_t regionCount,
+ const VkImageResolve* regions) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+
+ this->addResource(srcImage.resource());
+ this->addResource(dstImage.resource());
+
+ GR_VK_CALL(gpu->vkInterface(), CmdResolveImage(fCmdBuffer,
+ srcImage.image(),
+ srcImage.currentLayout(),
+ dstImage.image(),
+ dstImage.currentLayout(),
+ regionCount,
+ regions));
+}
+
+void GrVkPrimaryCommandBuffer::onFreeGPUData(const GrVkGpu* gpu) const {
+ SkASSERT(!fActiveRenderPass);
+ // Destroy the fence, if any
+ if (VK_NULL_HANDLE != fSubmitFence) {
+ GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// SecondaryCommandBuffer
+////////////////////////////////////////////////////////////////////////////////
+
+GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(const GrVkGpu* gpu,
+ VkCommandPool cmdPool) {
+ const VkCommandBufferAllocateInfo cmdInfo = {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
+ NULL, // pNext
+ cmdPool, // commandPool
+ VK_COMMAND_BUFFER_LEVEL_SECONDARY, // level
+ 1 // bufferCount
+ };
+
+ VkCommandBuffer cmdBuffer;
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateCommandBuffers(gpu->device(),
+ &cmdInfo,
+ &cmdBuffer));
+ if (err) {
+ return nullptr;
+ }
+ return new GrVkSecondaryCommandBuffer(cmdBuffer);
+}
+
+
+void GrVkSecondaryCommandBuffer::begin(const GrVkGpu* gpu, const GrVkFramebuffer* framebuffer,
+ const GrVkRenderPass* compatibleRenderPass) {
+ SkASSERT(!fIsActive);
+ SkASSERT(compatibleRenderPass);
+ fActiveRenderPass = compatibleRenderPass;
+
+ VkCommandBufferInheritanceInfo inheritanceInfo;
+ memset(&inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
+ inheritanceInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
+ inheritanceInfo.pNext = nullptr;
+ inheritanceInfo.renderPass = fActiveRenderPass->vkRenderPass();
+ inheritanceInfo.subpass = 0; // Currently only using 1 subpass for each render pass
+ inheritanceInfo.framebuffer = framebuffer ? framebuffer->framebuffer() : VK_NULL_HANDLE;
+ inheritanceInfo.occlusionQueryEnable = false;
+ inheritanceInfo.queryFlags = 0;
+ inheritanceInfo.pipelineStatistics = 0;
+
+ VkCommandBufferBeginInfo cmdBufferBeginInfo;
+ memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
+ cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdBufferBeginInfo.pNext = nullptr;
+ cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT |
+ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ cmdBufferBeginInfo.pInheritanceInfo = &inheritanceInfo;
+
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), BeginCommandBuffer(fCmdBuffer,
+ &cmdBufferBeginInfo));
+ fIsActive = true;
+}
+
+void GrVkSecondaryCommandBuffer::end(const GrVkGpu* gpu) {
+ SkASSERT(fIsActive);
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), EndCommandBuffer(fCmdBuffer));
+ this->invalidateState();
+ fIsActive = false;
+}
+
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkCommandBuffer.h b/gfx/skia/skia/src/gpu/vk/GrVkCommandBuffer.h
new file mode 100644
index 000000000..8020c7db1
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkCommandBuffer.h
@@ -0,0 +1,352 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkCommandBuffer_DEFINED
+#define GrVkCommandBuffer_DEFINED
+
+#include "GrVkGpu.h"
+#include "GrVkResource.h"
+#include "GrVkUtil.h"
+#include "vk/GrVkDefines.h"
+
+class GrVkFramebuffer;
+class GrVkPipeline;
+class GrVkRenderPass;
+class GrVkRenderTarget;
+class GrVkTransferBuffer;
+
+class GrVkCommandBuffer : public GrVkResource {
+public:
+ void invalidateState();
+
+ ////////////////////////////////////////////////////////////////////////////
+ // CommandBuffer commands
+ ////////////////////////////////////////////////////////////////////////////
+ enum BarrierType {
+ kMemory_BarrierType,
+ kBufferMemory_BarrierType,
+ kImageMemory_BarrierType
+ };
+
+ void pipelineBarrier(const GrVkGpu* gpu,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ BarrierType barrierType,
+ void* barrier) const;
+
+ void bindVertexBuffer(GrVkGpu* gpu, GrVkVertexBuffer* vbuffer) {
+ VkBuffer vkBuffer = vbuffer->buffer();
+ // TODO: once vbuffer->offset() no longer always returns 0, we will need to track the offset
+ // to know if we can skip binding or not.
+ if (!fBoundVertexBufferIsValid || vkBuffer != fBoundVertexBuffer) {
+ VkDeviceSize offset = vbuffer->offset();
+ GR_VK_CALL(gpu->vkInterface(), CmdBindVertexBuffers(fCmdBuffer,
+ 0,
+ 1,
+ &vkBuffer,
+ &offset));
+ fBoundVertexBufferIsValid = true;
+ fBoundVertexBuffer = vkBuffer;
+ addResource(vbuffer->resource());
+ }
+ }
+
+ void bindIndexBuffer(GrVkGpu* gpu, GrVkIndexBuffer* ibuffer) {
+ VkBuffer vkBuffer = ibuffer->buffer();
+ // TODO: once ibuffer->offset() no longer always returns 0, we will need to track the offset
+ // to know if we can skip binding or not.
+ if (!fBoundIndexBufferIsValid || vkBuffer != fBoundIndexBuffer) {
+ GR_VK_CALL(gpu->vkInterface(), CmdBindIndexBuffer(fCmdBuffer,
+ vkBuffer,
+ ibuffer->offset(),
+ VK_INDEX_TYPE_UINT16));
+ fBoundIndexBufferIsValid = true;
+ fBoundIndexBuffer = vkBuffer;
+ addResource(ibuffer->resource());
+ }
+ }
+
+ void bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline);
+
+ void bindDescriptorSets(const GrVkGpu* gpu,
+ GrVkPipelineState*,
+ VkPipelineLayout layout,
+ uint32_t firstSet,
+ uint32_t setCount,
+ const VkDescriptorSet* descriptorSets,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets);
+
+ void bindDescriptorSets(const GrVkGpu* gpu,
+ const SkTArray<const GrVkRecycledResource*>&,
+ const SkTArray<const GrVkResource*>&,
+ VkPipelineLayout layout,
+ uint32_t firstSet,
+ uint32_t setCount,
+ const VkDescriptorSet* descriptorSets,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets);
+
+ void setViewport(const GrVkGpu* gpu,
+ uint32_t firstViewport,
+ uint32_t viewportCount,
+ const VkViewport* viewports);
+
+ void setScissor(const GrVkGpu* gpu,
+ uint32_t firstScissor,
+ uint32_t scissorCount,
+ const VkRect2D* scissors);
+
+ void setBlendConstants(const GrVkGpu* gpu, const float blendConstants[4]);
+
+ // Commands that only work inside of a render pass
+ void clearAttachments(const GrVkGpu* gpu,
+ int numAttachments,
+ const VkClearAttachment* attachments,
+ int numRects,
+ const VkClearRect* clearRects) const;
+
+ void drawIndexed(const GrVkGpu* gpu,
+ uint32_t indexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t vertexOffset,
+ uint32_t firstInstance) const;
+
+ void draw(const GrVkGpu* gpu,
+ uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstVertex,
+ uint32_t firstInstance) const;
+
+ // Add ref-counted resource that will be tracked and released when this
+ // command buffer finishes execution
+ void addResource(const GrVkResource* resource) {
+ resource->ref();
+ fTrackedResources.append(1, &resource);
+ }
+
+ // Add ref-counted resource that will be tracked and released when this command buffer finishes
+ // execution. When it is released, it will signal that the resource can be recycled for reuse.
+ void addRecycledResource(const GrVkRecycledResource* resource) {
+ resource->ref();
+ fTrackedRecycledResources.append(1, &resource);
+ }
+
+ void reset(GrVkGpu* gpu);
+
+protected:
+ GrVkCommandBuffer(VkCommandBuffer cmdBuffer, const GrVkRenderPass* rp = VK_NULL_HANDLE)
+ : fIsActive(false)
+ , fActiveRenderPass(rp)
+ , fCmdBuffer(cmdBuffer)
+ , fBoundVertexBufferIsValid(false)
+ , fBoundIndexBufferIsValid(false)
+ , fNumResets(0) {
+ fTrackedResources.setReserve(kInitialTrackedResourcesCount);
+ fTrackedRecycledResources.setReserve(kInitialTrackedResourcesCount);
+ this->invalidateState();
+ }
+
+ SkTDArray<const GrVkResource*> fTrackedResources;
+ SkTDArray<const GrVkRecycledResource*> fTrackedRecycledResources;
+
+ // Tracks whether we are in the middle of a command buffer begin/end calls and thus can add
+ // new commands to the buffer;
+ bool fIsActive;
+
+ // Stores a pointer to the current active render pass (i.e. begin has been called but not
+ // end). A nullptr means there is no active render pass. The GrVKCommandBuffer does not own
+ // the render pass.
+ const GrVkRenderPass* fActiveRenderPass;
+
+ VkCommandBuffer fCmdBuffer;
+
+private:
+ static const int kInitialTrackedResourcesCount = 32;
+
+ void freeGPUData(const GrVkGpu* gpu) const override;
+ virtual void onFreeGPUData(const GrVkGpu* gpu) const = 0;
+ void abandonSubResources() const override;
+
+ virtual void onReset(GrVkGpu* gpu) {}
+
+ VkBuffer fBoundVertexBuffer;
+ bool fBoundVertexBufferIsValid;
+
+ VkBuffer fBoundIndexBuffer;
+ bool fBoundIndexBufferIsValid;
+
+ // When resetting the command buffer, we remove the tracked resources from their arrays, and
+ // we prefer to not free all the memory every time so usually we just rewind. However, to avoid
+ // all arrays growing to the max size, after so many resets we'll do a full reset of the tracked
+ // resource arrays.
+ static const int kNumRewindResetsBeforeFullReset = 8;
+ int fNumResets;
+
+ // Cached values used for dynamic state updates
+ VkViewport fCachedViewport;
+ VkRect2D fCachedScissor;
+ float fCachedBlendConstant[4];
+};
+
+class GrVkSecondaryCommandBuffer;
+
+class GrVkPrimaryCommandBuffer : public GrVkCommandBuffer {
+public:
+ ~GrVkPrimaryCommandBuffer() override;
+
+ static GrVkPrimaryCommandBuffer* Create(const GrVkGpu* gpu, VkCommandPool cmdPool);
+
+ void begin(const GrVkGpu* gpu);
+ void end(const GrVkGpu* gpu);
+
+ // Begins render pass on this command buffer. The framebuffer from GrVkRenderTarget will be used
+ // in the render pass.
+ void beginRenderPass(const GrVkGpu* gpu,
+ const GrVkRenderPass* renderPass,
+ uint32_t clearCount,
+ const VkClearValue* clearValues,
+ const GrVkRenderTarget& target,
+ const SkIRect& bounds,
+ bool forSecondaryCB);
+ void endRenderPass(const GrVkGpu* gpu);
+
+ // Submits the SecondaryCommandBuffer into this command buffer. It is required that we are
+ // currently inside a render pass that is compatible with the one used to create the
+ // SecondaryCommandBuffer.
+ void executeCommands(const GrVkGpu* gpu,
+ GrVkSecondaryCommandBuffer* secondaryBuffer);
+
+ // Commands that only work outside of a render pass
+ void clearColorImage(const GrVkGpu* gpu,
+ GrVkImage* image,
+ const VkClearColorValue* color,
+ uint32_t subRangeCount,
+ const VkImageSubresourceRange* subRanges);
+
+ void clearDepthStencilImage(const GrVkGpu* gpu,
+ GrVkImage* image,
+ const VkClearDepthStencilValue* color,
+ uint32_t subRangeCount,
+ const VkImageSubresourceRange* subRanges);
+
+ void copyImage(const GrVkGpu* gpu,
+ GrVkImage* srcImage,
+ VkImageLayout srcLayout,
+ GrVkImage* dstImage,
+ VkImageLayout dstLayout,
+ uint32_t copyRegionCount,
+ const VkImageCopy* copyRegions);
+
+ void blitImage(const GrVkGpu* gpu,
+ const GrVkResource* srcResource,
+ VkImage srcImage,
+ VkImageLayout srcLayout,
+ const GrVkResource* dstResource,
+ VkImage dstImage,
+ VkImageLayout dstLayout,
+ uint32_t blitRegionCount,
+ const VkImageBlit* blitRegions,
+ VkFilter filter);
+
+ void blitImage(const GrVkGpu* gpu,
+ const GrVkImage& srcImage,
+ const GrVkImage& dstImage,
+ uint32_t blitRegionCount,
+ const VkImageBlit* blitRegions,
+ VkFilter filter) {
+ this->blitImage(gpu,
+ srcImage.resource(),
+ srcImage.image(),
+ srcImage.currentLayout(),
+ dstImage.resource(),
+ dstImage.image(),
+ dstImage.currentLayout(),
+ blitRegionCount,
+ blitRegions,
+ filter);
+ }
+
+ void copyImageToBuffer(const GrVkGpu* gpu,
+ GrVkImage* srcImage,
+ VkImageLayout srcLayout,
+ GrVkTransferBuffer* dstBuffer,
+ uint32_t copyRegionCount,
+ const VkBufferImageCopy* copyRegions);
+
+ void copyBufferToImage(const GrVkGpu* gpu,
+ GrVkTransferBuffer* srcBuffer,
+ GrVkImage* dstImage,
+ VkImageLayout dstLayout,
+ uint32_t copyRegionCount,
+ const VkBufferImageCopy* copyRegions);
+
+ void updateBuffer(GrVkGpu* gpu,
+ GrVkBuffer* dstBuffer,
+ VkDeviceSize dstOffset,
+ VkDeviceSize dataSize,
+ const void* data);
+
+ void resolveImage(GrVkGpu* gpu,
+ const GrVkImage& srcImage,
+ const GrVkImage& dstImage,
+ uint32_t regionCount,
+ const VkImageResolve* regions);
+
+ void submitToQueue(const GrVkGpu* gpu, VkQueue queue, GrVkGpu::SyncQueue sync);
+ bool finished(const GrVkGpu* gpu) const;
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkPrimaryCommandBuffer: %d (%d refs)\n", fCmdBuffer, this->getRefCnt());
+ }
+#endif
+
+private:
+ explicit GrVkPrimaryCommandBuffer(VkCommandBuffer cmdBuffer)
+ : INHERITED(cmdBuffer)
+ , fSubmitFence(VK_NULL_HANDLE) {}
+
+ void onFreeGPUData(const GrVkGpu* gpu) const override;
+
+ void onReset(GrVkGpu* gpu) override;
+
+ SkTArray<GrVkSecondaryCommandBuffer*, true> fSecondaryCommandBuffers;
+ VkFence fSubmitFence;
+
+ typedef GrVkCommandBuffer INHERITED;
+};
+
+class GrVkSecondaryCommandBuffer : public GrVkCommandBuffer {
+public:
+ static GrVkSecondaryCommandBuffer* Create(const GrVkGpu* gpu, VkCommandPool cmdPool);
+
+ void begin(const GrVkGpu* gpu, const GrVkFramebuffer* framebuffer,
+ const GrVkRenderPass* compatibleRenderPass);
+ void end(const GrVkGpu* gpu);
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkSecondaryCommandBuffer: %d (%d refs)\n", fCmdBuffer, this->getRefCnt());
+ }
+#endif
+
+private:
+ explicit GrVkSecondaryCommandBuffer(VkCommandBuffer cmdBuffer)
+ : INHERITED(cmdBuffer) {
+ }
+
+ void onFreeGPUData(const GrVkGpu* gpu) const override {}
+
+ friend class GrVkPrimaryCommandBuffer;
+
+ typedef GrVkCommandBuffer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkCopyManager.cpp b/gfx/skia/skia/src/gpu/vk/GrVkCopyManager.cpp
new file mode 100644
index 000000000..68f7c317d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkCopyManager.cpp
@@ -0,0 +1,405 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+*/
+
+#include "GrVkCopyManager.h"
+
+#include "GrSurface.h"
+#include "GrTextureParams.h"
+#include "GrTexturePriv.h"
+#include "GrVkCommandBuffer.h"
+#include "GrVkCopyPipeline.h"
+#include "GrVkDescriptorSet.h"
+#include "GrVkGpu.h"
+#include "GrVkImageView.h"
+#include "GrVkRenderTarget.h"
+#include "GrVkResourceProvider.h"
+#include "GrVkSampler.h"
+#include "GrVkTexture.h"
+#include "GrVkUniformBuffer.h"
+#include "GrVkVertexBuffer.h"
+#include "SkPoint.h"
+#include "SkRect.h"
+
+bool GrVkCopyManager::createCopyProgram(GrVkGpu* gpu) {
+ const GrGLSLCaps* glslCaps = gpu->vkCaps().glslCaps();
+ const char* version = glslCaps->versionDeclString();
+ SkString vertShaderText(version);
+ vertShaderText.append(
+ "#extension GL_ARB_separate_shader_objects : enable\n"
+ "#extension GL_ARB_shading_language_420pack : enable\n"
+
+ "layout(set = 0, binding = 0) uniform vertexUniformBuffer {"
+ "mediump vec4 uPosXform;"
+ "mediump vec4 uTexCoordXform;"
+ "};"
+ "layout(location = 0) in highp vec2 inPosition;"
+ "layout(location = 1) out mediump vec2 vTexCoord;"
+
+ "// Copy Program VS\n"
+ "void main() {"
+ "vTexCoord = inPosition * uTexCoordXform.xy + uTexCoordXform.zw;"
+ "gl_Position.xy = inPosition * uPosXform.xy + uPosXform.zw;"
+ "gl_Position.zw = vec2(0, 1);"
+ "}"
+ );
+
+ SkString fragShaderText(version);
+ fragShaderText.append(
+ "#extension GL_ARB_separate_shader_objects : enable\n"
+ "#extension GL_ARB_shading_language_420pack : enable\n"
+
+ "precision mediump float;"
+
+ "layout(set = 1, binding = 0) uniform mediump sampler2D uTextureSampler;"
+ "layout(location = 1) in mediump vec2 vTexCoord;"
+ "layout(location = 0, index = 0) out mediump vec4 fsColorOut;"
+
+ "// Copy Program FS\n"
+ "void main() {"
+ "fsColorOut = texture(uTextureSampler, vTexCoord);"
+ "}"
+ );
+
+ if (!GrCompileVkShaderModule(gpu, vertShaderText.c_str(),
+ VK_SHADER_STAGE_VERTEX_BIT,
+ &fVertShaderModule, &fShaderStageInfo[0])) {
+ this->destroyResources(gpu);
+ return false;
+ }
+
+ if (!GrCompileVkShaderModule(gpu, fragShaderText.c_str(),
+ VK_SHADER_STAGE_FRAGMENT_BIT,
+ &fFragShaderModule, &fShaderStageInfo[1])) {
+ this->destroyResources(gpu);
+ return false;
+ }
+
+ VkDescriptorSetLayout dsLayout[2];
+
+ GrVkResourceProvider& resourceProvider = gpu->resourceProvider();
+
+ dsLayout[GrVkUniformHandler::kUniformBufferDescSet] = resourceProvider.getUniformDSLayout();
+
+ uint32_t samplerVisibility = kFragment_GrShaderFlag;
+ SkTArray<uint32_t> visibilityArray(&samplerVisibility, 1);
+
+ resourceProvider.getSamplerDescriptorSetHandle(visibilityArray, &fSamplerDSHandle);
+ dsLayout[GrVkUniformHandler::kSamplerDescSet] =
+ resourceProvider.getSamplerDSLayout(fSamplerDSHandle);
+
+ // Create the VkPipelineLayout
+ VkPipelineLayoutCreateInfo layoutCreateInfo;
+ memset(&layoutCreateInfo, 0, sizeof(VkPipelineLayoutCreateFlags));
+ layoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
+ layoutCreateInfo.pNext = 0;
+ layoutCreateInfo.flags = 0;
+ layoutCreateInfo.setLayoutCount = 2;
+ layoutCreateInfo.pSetLayouts = dsLayout;
+ layoutCreateInfo.pushConstantRangeCount = 0;
+ layoutCreateInfo.pPushConstantRanges = nullptr;
+
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), CreatePipelineLayout(gpu->device(),
+ &layoutCreateInfo,
+ nullptr,
+ &fPipelineLayout));
+ if (err) {
+ this->destroyResources(gpu);
+ return false;
+ }
+
+ static const float vdata[] = {
+ 0, 0,
+ 0, 1,
+ 1, 0,
+ 1, 1
+ };
+ fVertexBuffer.reset(GrVkVertexBuffer::Create(gpu, sizeof(vdata), false));
+ SkASSERT(fVertexBuffer.get());
+ fVertexBuffer->updateData(vdata, sizeof(vdata));
+
+ // We use 2 vec4's for uniforms
+ fUniformBuffer = GrVkUniformBuffer::Create(gpu, 8 * sizeof(float));
+ SkASSERT(fUniformBuffer);
+
+ return true;
+}
+
+bool GrVkCopyManager::copySurfaceAsDraw(GrVkGpu* gpu,
+ GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ if (!gpu->vkCaps().supportsCopiesAsDraws()) {
+ return false;
+ }
+
+ GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(dst->asRenderTarget());
+ if (!rt) {
+ return false;
+ }
+
+ GrVkTexture* srcTex = static_cast<GrVkTexture*>(src->asTexture());
+ if (!srcTex) {
+ return false;
+ }
+
+ if (VK_NULL_HANDLE == fVertShaderModule) {
+ SkASSERT(VK_NULL_HANDLE == fFragShaderModule &&
+ VK_NULL_HANDLE == fPipelineLayout &&
+ nullptr == fVertexBuffer.get() &&
+ nullptr == fUniformBuffer);
+ if (!this->createCopyProgram(gpu)) {
+ SkDebugf("Failed to create copy program.\n");
+ return false;
+ }
+ }
+
+ GrVkResourceProvider& resourceProv = gpu->resourceProvider();
+
+ GrVkCopyPipeline* pipeline = resourceProv.findOrCreateCopyPipeline(rt,
+ fShaderStageInfo,
+ fPipelineLayout);
+ if (!pipeline) {
+ return false;
+ }
+
+ // UPDATE UNIFORM DESCRIPTOR SET
+ int w = srcRect.width();
+ int h = srcRect.height();
+
+ // dst rect edges in NDC (-1 to 1)
+ int dw = dst->width();
+ int dh = dst->height();
+ float dx0 = 2.f * dstPoint.fX / dw - 1.f;
+ float dx1 = 2.f * (dstPoint.fX + w) / dw - 1.f;
+ float dy0 = 2.f * dstPoint.fY / dh - 1.f;
+ float dy1 = 2.f * (dstPoint.fY + h) / dh - 1.f;
+ if (kBottomLeft_GrSurfaceOrigin == dst->origin()) {
+ dy0 = -dy0;
+ dy1 = -dy1;
+ }
+
+
+ float sx0 = (float)srcRect.fLeft;
+ float sx1 = (float)(srcRect.fLeft + w);
+ float sy0 = (float)srcRect.fTop;
+ float sy1 = (float)(srcRect.fTop + h);
+ int sh = src->height();
+ if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
+ sy0 = sh - sy0;
+ sy1 = sh - sy1;
+ }
+ // src rect edges in normalized texture space (0 to 1).
+ int sw = src->width();
+ sx0 /= sw;
+ sx1 /= sw;
+ sy0 /= sh;
+ sy1 /= sh;
+
+ float uniData[] = { dx1 - dx0, dy1 - dy0, dx0, dy0, // posXform
+ sx1 - sx0, sy1 - sy0, sx0, sy0 }; // texCoordXform
+
+ fUniformBuffer->updateData(gpu, uniData, sizeof(uniData), nullptr);
+
+ const GrVkDescriptorSet* uniformDS = resourceProv.getUniformDescriptorSet();
+ SkASSERT(uniformDS);
+
+ VkDescriptorBufferInfo uniBufferInfo;
+ uniBufferInfo.buffer = fUniformBuffer->buffer();
+ uniBufferInfo.offset = fUniformBuffer->offset();
+ uniBufferInfo.range = fUniformBuffer->size();
+
+ VkWriteDescriptorSet descriptorWrites;
+ descriptorWrites.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ descriptorWrites.pNext = nullptr;
+ descriptorWrites.dstSet = uniformDS->descriptorSet();
+ descriptorWrites.dstBinding = GrVkUniformHandler::kVertexBinding;
+ descriptorWrites.dstArrayElement = 0;
+ descriptorWrites.descriptorCount = 1;
+ descriptorWrites.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ descriptorWrites.pImageInfo = nullptr;
+ descriptorWrites.pBufferInfo = &uniBufferInfo;
+ descriptorWrites.pTexelBufferView = nullptr;
+
+ GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(),
+ 1,
+ &descriptorWrites,
+ 0, nullptr));
+
+ // UPDATE SAMPLER DESCRIPTOR SET
+ const GrVkDescriptorSet* samplerDS =
+ gpu->resourceProvider().getSamplerDescriptorSet(fSamplerDSHandle);
+
+ GrTextureParams params(SkShader::kClamp_TileMode, GrTextureParams::kNone_FilterMode);
+
+ GrVkSampler* sampler =
+ resourceProv.findOrCreateCompatibleSampler(params, srcTex->texturePriv().maxMipMapLevel());
+
+ VkDescriptorImageInfo imageInfo;
+ memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));
+ imageInfo.sampler = sampler->sampler();
+ imageInfo.imageView = srcTex->textureView(true)->imageView();
+ imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+ VkWriteDescriptorSet writeInfo;
+ memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
+ writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ writeInfo.pNext = nullptr;
+ writeInfo.dstSet = samplerDS->descriptorSet();
+ writeInfo.dstBinding = 0;
+ writeInfo.dstArrayElement = 0;
+ writeInfo.descriptorCount = 1;
+ writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ writeInfo.pImageInfo = &imageInfo;
+ writeInfo.pBufferInfo = nullptr;
+ writeInfo.pTexelBufferView = nullptr;
+
+ GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(),
+ 1,
+ &writeInfo,
+ 0, nullptr));
+
+ VkDescriptorSet vkDescSets[] = { uniformDS->descriptorSet(), samplerDS->descriptorSet() };
+
+ GrVkRenderTarget* texRT = static_cast<GrVkRenderTarget*>(srcTex->asRenderTarget());
+ if (texRT) {
+ gpu->onResolveRenderTarget(texRT);
+ }
+
+ GrVkPrimaryCommandBuffer* cmdBuffer = gpu->currentCommandBuffer();
+
+ // TODO: Make tighter bounds and then adjust bounds for origin and granularity if we see
+ // any perf issues with using the whole bounds
+ SkIRect bounds = SkIRect::MakeWH(rt->width(), rt->height());
+
+ // Change layouts of rt and texture
+ GrVkImage* targetImage = rt->msaaImage() ? rt->msaaImage() : rt;
+ targetImage->setImageLayout(gpu,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
+ VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
+ false);
+
+ srcTex->setImageLayout(gpu,
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
+ VK_ACCESS_SHADER_READ_BIT,
+ VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
+ false);
+
+ GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ VK_ATTACHMENT_STORE_OP_STORE);
+ GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ VK_ATTACHMENT_STORE_OP_STORE);
+ const GrVkRenderPass* renderPass;
+ const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
+ rt->compatibleRenderPassHandle();
+ if (rpHandle.isValid()) {
+ renderPass = gpu->resourceProvider().findRenderPass(rpHandle,
+ vkColorOps,
+ vkStencilOps);
+ } else {
+ renderPass = gpu->resourceProvider().findRenderPass(*rt,
+ vkColorOps,
+ vkStencilOps);
+ }
+
+ SkASSERT(renderPass->isCompatible(*rt->simpleRenderPass()));
+
+
+ cmdBuffer->beginRenderPass(gpu, renderPass, 0, nullptr, *rt, bounds, false);
+ cmdBuffer->bindPipeline(gpu, pipeline);
+
+ // Uniform DescriptorSet, Sampler DescriptorSet, and vertex shader uniformBuffer
+ SkSTArray<3, const GrVkRecycledResource*> descriptorRecycledResources;
+ descriptorRecycledResources.push_back(uniformDS);
+ descriptorRecycledResources.push_back(samplerDS);
+ descriptorRecycledResources.push_back(fUniformBuffer->resource());
+
+ // One sampler, texture view, and texture
+ SkSTArray<3, const GrVkResource*> descriptorResources;
+ descriptorResources.push_back(sampler);
+ descriptorResources.push_back(srcTex->textureView(true));
+ descriptorResources.push_back(srcTex->resource());
+
+ cmdBuffer->bindDescriptorSets(gpu,
+ descriptorRecycledResources,
+ descriptorResources,
+ fPipelineLayout,
+ 0,
+ 2,
+ vkDescSets,
+ 0,
+ nullptr);
+
+ // Set Dynamic viewport and stencil
+ // We always use one viewport the size of the RT
+ VkViewport viewport;
+ viewport.x = 0.0f;
+ viewport.y = 0.0f;
+ viewport.width = SkIntToScalar(rt->width());
+ viewport.height = SkIntToScalar(rt->height());
+ viewport.minDepth = 0.0f;
+ viewport.maxDepth = 1.0f;
+ cmdBuffer->setViewport(gpu, 0, 1, &viewport);
+
+ // We assume the scissor is not enabled so just set it to the whole RT
+ VkRect2D scissor;
+ scissor.extent.width = rt->width();
+ scissor.extent.height = rt->height();
+ scissor.offset.x = 0;
+ scissor.offset.y = 0;
+ cmdBuffer->setScissor(gpu, 0, 1, &scissor);
+
+ cmdBuffer->bindVertexBuffer(gpu, fVertexBuffer);
+ cmdBuffer->draw(gpu, 4, 1, 0, 0);
+ cmdBuffer->endRenderPass(gpu);
+
+ // Release all temp resources which should now be reffed by the cmd buffer
+ pipeline->unref(gpu);
+ uniformDS->unref(gpu);
+ samplerDS->unref(gpu);
+ sampler->unref(gpu);
+ renderPass->unref(gpu);
+
+ return true;
+}
+
+void GrVkCopyManager::destroyResources(GrVkGpu* gpu) {
+ if (VK_NULL_HANDLE != fVertShaderModule) {
+ GR_VK_CALL(gpu->vkInterface(), DestroyShaderModule(gpu->device(), fVertShaderModule,
+ nullptr));
+ fVertShaderModule = VK_NULL_HANDLE;
+ }
+
+ if (VK_NULL_HANDLE != fFragShaderModule) {
+ GR_VK_CALL(gpu->vkInterface(), DestroyShaderModule(gpu->device(), fFragShaderModule,
+ nullptr));
+ fFragShaderModule = VK_NULL_HANDLE;
+ }
+
+ if (VK_NULL_HANDLE != fPipelineLayout) {
+ GR_VK_CALL(gpu->vkInterface(), DestroyPipelineLayout(gpu->device(), fPipelineLayout,
+ nullptr));
+ fPipelineLayout = VK_NULL_HANDLE;
+ }
+
+ if (fUniformBuffer) {
+ fUniformBuffer->release(gpu);
+ fUniformBuffer = nullptr;
+ }
+}
+
+void GrVkCopyManager::abandonResources() {
+ fVertShaderModule = VK_NULL_HANDLE;
+ fFragShaderModule = VK_NULL_HANDLE;
+ fPipelineLayout = VK_NULL_HANDLE;
+
+ if (fUniformBuffer) {
+ fUniformBuffer->abandon();
+ fUniformBuffer = nullptr;
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkCopyManager.h b/gfx/skia/skia/src/gpu/vk/GrVkCopyManager.h
new file mode 100644
index 000000000..e19a14402
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkCopyManager.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+*/
+
+#ifndef GrVkCopyManager_DEFINED
+#define GrVkCopyManager_DEFINED
+
+#include "GrVkDescriptorSetManager.h"
+
+#include "vk/GrVkDefines.h"
+
+class GrSurface;
+class GrVkCopyPipeline;
+class GrVkGpu;
+class GrVkUniformBuffer;
+class GrVkVertexBuffer;
+struct SkIPoint;
+struct SkIRect;
+
+class GrVkCopyManager {
+public:
+ GrVkCopyManager()
+ : fVertShaderModule(VK_NULL_HANDLE)
+ , fFragShaderModule(VK_NULL_HANDLE)
+ , fPipelineLayout(VK_NULL_HANDLE)
+ , fUniformBuffer(nullptr) {}
+
+ bool copySurfaceAsDraw(GrVkGpu* gpu,
+ GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ void destroyResources(GrVkGpu* gpu);
+ void abandonResources();
+
+private:
+ bool createCopyProgram(GrVkGpu* gpu);
+
+ // Everything below is only created once and shared by all copy draws/pipelines
+ VkShaderModule fVertShaderModule;
+ VkShaderModule fFragShaderModule;
+ VkPipelineShaderStageCreateInfo fShaderStageInfo[2];
+
+ GrVkDescriptorSetManager::Handle fSamplerDSHandle;
+ VkPipelineLayout fPipelineLayout;
+
+ SkAutoTUnref<GrVkVertexBuffer> fVertexBuffer;
+ GrVkUniformBuffer* fUniformBuffer;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkCopyPipeline.cpp b/gfx/skia/skia/src/gpu/vk/GrVkCopyPipeline.cpp
new file mode 100644
index 000000000..8986029c7
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkCopyPipeline.cpp
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkCopyPipeline.h"
+
+#include "GrVkGpu.h"
+#include "GrVkUtil.h"
+#include "SkOnce.h"
+
+static void setup_multisample_state(int numSamples,
+ VkPipelineMultisampleStateCreateInfo* multisampleInfo) {
+ memset(multisampleInfo, 0, sizeof(VkPipelineMultisampleStateCreateInfo));
+ multisampleInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
+ multisampleInfo->pNext = nullptr;
+ multisampleInfo->flags = 0;
+ SkAssertResult(GrSampleCountToVkSampleCount(numSamples,
+ &multisampleInfo->rasterizationSamples));
+ multisampleInfo->sampleShadingEnable = VK_FALSE;
+ multisampleInfo->minSampleShading = 0.0f;
+ multisampleInfo->pSampleMask = nullptr;
+ multisampleInfo->alphaToCoverageEnable = VK_FALSE;
+ multisampleInfo->alphaToOneEnable = VK_FALSE;
+}
+
+GrVkCopyPipeline* GrVkCopyPipeline::Create(GrVkGpu* gpu,
+ VkPipelineShaderStageCreateInfo* shaderStageInfo,
+ VkPipelineLayout pipelineLayout,
+ int numSamples,
+ const GrVkRenderPass& renderPass,
+ VkPipelineCache cache) {
+
+ static const VkVertexInputAttributeDescription attributeDesc = {
+ 0, // location
+ 0, // binding
+ VK_FORMAT_R32G32_SFLOAT, // format
+ 0, // offset
+ };
+
+ static const VkVertexInputBindingDescription bindingDesc = {
+ 0, // binding
+ 2 * sizeof(float), // stride
+ VK_VERTEX_INPUT_RATE_VERTEX // inputRate
+ };
+
+ static const VkPipelineVertexInputStateCreateInfo vertexInputInfo = {
+ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // flags
+ 1, // vertexBindingDescriptionCount
+ &bindingDesc, // pVertexBindingDescriptions
+ 1, // vertexAttributeDescriptionCnt
+ &attributeDesc, // pVertexAttributeDescriptions
+ };
+
+ static const VkPipelineInputAssemblyStateCreateInfo inputAssemblyInfo = {
+ VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // flags
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, // topology
+ VK_FALSE // primitiveRestartEnable
+ };
+
+ static const VkStencilOpState dummyStencilState = {
+ VK_STENCIL_OP_KEEP, // failOp
+ VK_STENCIL_OP_KEEP, // passOp
+ VK_STENCIL_OP_KEEP, // depthFailOp
+ VK_COMPARE_OP_NEVER, // compareOp
+ 0, // compareMask
+ 0, // writeMask
+ 0 // reference
+ };
+
+ static const VkPipelineDepthStencilStateCreateInfo stencilInfo = {
+ VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // flags
+ VK_FALSE, // depthTestEnable
+ VK_FALSE, // depthWriteEnable
+ VK_COMPARE_OP_ALWAYS, // depthCompareOp
+ VK_FALSE, // depthBoundsTestEnable
+ VK_FALSE, // stencilTestEnable
+ dummyStencilState, // front
+ dummyStencilState, // bakc
+ 0.0f, // minDepthBounds
+ 1.0f // maxDepthBounds
+ };
+
+ static const VkPipelineViewportStateCreateInfo viewportInfo = {
+ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // flags
+ 1, // viewportCount
+ nullptr, // pViewports
+ 1, // scissorCount
+ nullptr // pScissors
+ };
+
+ static const VkPipelineColorBlendAttachmentState attachmentState = {
+ VK_TRUE, // belndEnable
+ VK_BLEND_FACTOR_ONE, // srcColorBlendFactor
+ VK_BLEND_FACTOR_ZERO, // dstColorBlendFactor
+ VK_BLEND_OP_ADD, // colorBlendOp
+ VK_BLEND_FACTOR_ONE, // srcAlphaBlendFactor
+ VK_BLEND_FACTOR_ZERO, // dstAlphaBlendFactor
+ VK_BLEND_OP_ADD, // alphaBlendOp
+ VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | // colorWriteMask
+ VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT // colorWriteMask
+ };
+
+ static const VkPipelineColorBlendStateCreateInfo colorBlendInfo = {
+ VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // flags
+ VK_FALSE, // logicOpEnable
+ VK_LOGIC_OP_CLEAR, // logicOp
+ 1, // attachmentCount
+ &attachmentState, // pAttachments
+ { 0.f, 0.f, 0.f, 0.f } // blendConstants[4]
+ };
+
+ static const VkPipelineRasterizationStateCreateInfo rasterInfo = {
+ VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // flags
+ VK_FALSE, // depthClampEnable
+ VK_FALSE, // rasterizerDiscardEnabled
+ VK_POLYGON_MODE_FILL, // polygonMode
+ VK_CULL_MODE_NONE, // cullMode
+ VK_FRONT_FACE_COUNTER_CLOCKWISE, // frontFace
+ VK_FALSE, // depthBiasEnable
+ 0.0f, // depthBiasConstantFactor
+ 0.0f, // depthBiasClamp
+ 0.0f, // depthBiasSlopeFactor
+ 1.0f // lineWidth
+ };
+
+ static const VkDynamicState dynamicStates[2] = { VK_DYNAMIC_STATE_VIEWPORT,
+ VK_DYNAMIC_STATE_SCISSOR };
+ static const VkPipelineDynamicStateCreateInfo dynamicInfo = {
+ VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // flags
+ 2, // dynamicStateCount
+ dynamicStates // pDynamicStates
+ };
+
+ VkPipelineMultisampleStateCreateInfo multisampleInfo;
+ setup_multisample_state(numSamples, &multisampleInfo);
+
+ VkGraphicsPipelineCreateInfo pipelineCreateInfo;
+ memset(&pipelineCreateInfo, 0, sizeof(VkGraphicsPipelineCreateInfo));
+ pipelineCreateInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
+ pipelineCreateInfo.pNext = nullptr;
+ pipelineCreateInfo.flags = 0;
+ pipelineCreateInfo.stageCount = 2;
+ pipelineCreateInfo.pStages = shaderStageInfo;
+ pipelineCreateInfo.pVertexInputState = &vertexInputInfo;
+ pipelineCreateInfo.pInputAssemblyState = &inputAssemblyInfo;
+ pipelineCreateInfo.pTessellationState = nullptr;
+ pipelineCreateInfo.pViewportState = &viewportInfo;
+ pipelineCreateInfo.pRasterizationState = &rasterInfo;
+ pipelineCreateInfo.pMultisampleState = &multisampleInfo;
+ pipelineCreateInfo.pDepthStencilState = &stencilInfo;
+ pipelineCreateInfo.pColorBlendState = &colorBlendInfo;
+ pipelineCreateInfo.pDynamicState = &dynamicInfo;
+ pipelineCreateInfo.layout = pipelineLayout;
+ pipelineCreateInfo.renderPass = renderPass.vkRenderPass();
+ pipelineCreateInfo.subpass = 0;
+ pipelineCreateInfo.basePipelineHandle = VK_NULL_HANDLE;
+ pipelineCreateInfo.basePipelineIndex = -1;
+
+ VkPipeline vkPipeline;
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateGraphicsPipelines(gpu->device(),
+ cache, 1,
+ &pipelineCreateInfo,
+ nullptr, &vkPipeline));
+ if (err) {
+ return nullptr;
+ }
+
+ return new GrVkCopyPipeline(vkPipeline, &renderPass);
+}
+
+bool GrVkCopyPipeline::isCompatible(const GrVkRenderPass& rp) const {
+ return rp.isCompatible(*fRenderPass);
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkCopyPipeline.h b/gfx/skia/skia/src/gpu/vk/GrVkCopyPipeline.h
new file mode 100644
index 000000000..a5c14ee4b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkCopyPipeline.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkCopyPipeline_DEFINED
+#define GrVkCopyPipeline_DEFINED
+
+#include "GrVkPipeline.h"
+
+class GrVkCopyPipeline : public GrVkPipeline {
+public:
+ // We expect the passed in renderPass to be stored on the GrVkResourceProvider and not a local
+ // object of the client.
+ static GrVkCopyPipeline* Create(GrVkGpu* gpu,
+ VkPipelineShaderStageCreateInfo* shaderStageInfo,
+ VkPipelineLayout pipelineLayout,
+ int numSamples,
+ const GrVkRenderPass& renderPass,
+ VkPipelineCache cache);
+
+ bool isCompatible(const GrVkRenderPass& rp) const;
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkCopyPipeline: %d (%d refs)\n", fPipeline, this->getRefCnt());
+ }
+#endif
+
+private:
+ GrVkCopyPipeline(VkPipeline pipeline, const GrVkRenderPass* renderPass)
+ : INHERITED(pipeline)
+ , fRenderPass(renderPass) {
+ }
+
+ const GrVkRenderPass* fRenderPass;
+
+ typedef GrVkPipeline INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkDescriptorPool.cpp b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorPool.cpp
new file mode 100644
index 000000000..b89145a73
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorPool.cpp
@@ -0,0 +1,51 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkDescriptorPool.h"
+
+#include "GrVkGpu.h"
+#include "SkTemplates.h"
+
+
+GrVkDescriptorPool::GrVkDescriptorPool(const GrVkGpu* gpu, VkDescriptorType type, uint32_t count)
+ : INHERITED()
+ , fType (type)
+ , fCount(count) {
+ VkDescriptorPoolSize poolSize;
+ memset(&poolSize, 0, sizeof(VkDescriptorPoolSize));
+ poolSize.descriptorCount = count;
+ poolSize.type = type;
+
+ VkDescriptorPoolCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkDescriptorPoolCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ // This is an over/conservative estimate since each set may contain more than count descriptors.
+ createInfo.maxSets = count;
+ createInfo.poolSizeCount = 1;
+ createInfo.pPoolSizes = &poolSize;
+
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateDescriptorPool(gpu->device(),
+ &createInfo,
+ nullptr,
+ &fDescPool));
+}
+
+bool GrVkDescriptorPool::isCompatible(VkDescriptorType type, uint32_t count) const {
+ return fType == type && count <= fCount;
+}
+
+void GrVkDescriptorPool::reset(const GrVkGpu* gpu) {
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), ResetDescriptorPool(gpu->device(), fDescPool, 0));
+}
+
+void GrVkDescriptorPool::freeGPUData(const GrVkGpu* gpu) const {
+ // Destroying the VkDescriptorPool will automatically free and delete any VkDescriptorSets
+ // allocated from the pool.
+ GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorPool(gpu->device(), fDescPool, nullptr));
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkDescriptorPool.h b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorPool.h
new file mode 100644
index 000000000..5327a7de8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorPool.h
@@ -0,0 +1,51 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkDescriptorPool_DEFINED
+#define GrVkDescriptorPool_DEFINED
+
+#include "GrVkResource.h"
+
+#include "vk/GrVkDefines.h"
+
+class GrVkGpu;
+
+/**
+ * We require that all descriptor sets are of a single descriptor type. We also use a pool to only
+ * make one type of descriptor set. Thus a single VkDescriptorPool will only allocated space for
+ * for one type of descriptor.
+ */
+class GrVkDescriptorPool : public GrVkResource {
+public:
+ GrVkDescriptorPool(const GrVkGpu* gpu, VkDescriptorType type, uint32_t count);
+
+ VkDescriptorPool descPool() const { return fDescPool; }
+
+ void reset(const GrVkGpu* gpu);
+
+ // Returns whether or not this descriptor pool could be used, assuming it gets fully reset and
+ // not in use by another draw, to support the requested type and count.
+ bool isCompatible(VkDescriptorType type, uint32_t count) const;
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkDescriptorPool: %d, type %d (%d refs)\n", fDescPool, fType,
+ this->getRefCnt());
+ }
+#endif
+
+private:
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ VkDescriptorType fType;
+ uint32_t fCount;
+ VkDescriptorPool fDescPool;
+
+ typedef GrVkResource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSet.cpp b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSet.cpp
new file mode 100644
index 000000000..47a997f55
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSet.cpp
@@ -0,0 +1,34 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkDescriptorSet.h"
+
+#include "GrVkDescriptorPool.h"
+#include "GrVkGpu.h"
+#include "GrVkResourceProvider.h"
+
+GrVkDescriptorSet::GrVkDescriptorSet(VkDescriptorSet descSet,
+ GrVkDescriptorPool* pool,
+ GrVkDescriptorSetManager::Handle handle)
+ : fDescSet(descSet)
+ , fPool(pool)
+ , fHandle(handle) {
+ fPool->ref();
+}
+
+void GrVkDescriptorSet::freeGPUData(const GrVkGpu* gpu) const {
+ fPool->unref(gpu);
+}
+
+void GrVkDescriptorSet::onRecycle(GrVkGpu* gpu) const {
+ gpu->resourceProvider().recycleDescriptorSet(this, fHandle);
+}
+
+void GrVkDescriptorSet::abandonSubResources() const {
+ fPool->unrefAndAbandon();
+}
+
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSet.h b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSet.h
new file mode 100644
index 000000000..69e2d4433
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSet.h
@@ -0,0 +1,44 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkDescriptorSet_DEFINED
+#define GrVkDescriptorSet_DEFINED
+
+#include "GrVkDescriptorSetManager.h"
+#include "GrVkResource.h"
+#include "vk/GrVkDefines.h"
+
+class GrVkDescriptorPool;
+class GrVkGpu;
+
+class GrVkDescriptorSet : public GrVkRecycledResource {
+public:
+ GrVkDescriptorSet(VkDescriptorSet descSet,
+ GrVkDescriptorPool* pool,
+ GrVkDescriptorSetManager::Handle handle);
+
+ ~GrVkDescriptorSet() override {}
+
+ VkDescriptorSet descriptorSet() const { return fDescSet; }
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkDescriptorSet: %d (%d refs)\n", fDescSet, this->getRefCnt());
+ }
+#endif
+
+private:
+ void freeGPUData(const GrVkGpu* gpu) const override;
+ void abandonSubResources() const override;
+ void onRecycle(GrVkGpu* gpu) const override;
+
+ VkDescriptorSet fDescSet;
+ SkDEBUGCODE(mutable) GrVkDescriptorPool* fPool;
+ GrVkDescriptorSetManager::Handle fHandle;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSetManager.cpp b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSetManager.cpp
new file mode 100644
index 000000000..868a5ce96
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSetManager.cpp
@@ -0,0 +1,315 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkDescriptorSetManager.h"
+
+#include "GrVkDescriptorPool.h"
+#include "GrVkDescriptorSet.h"
+#include "GrVkGpu.h"
+#include "GrVkUniformHandler.h"
+#include "glsl/GrGLSLSampler.h"
+
+GrVkDescriptorSetManager::GrVkDescriptorSetManager(GrVkGpu* gpu,
+ VkDescriptorType type,
+ const GrVkUniformHandler* uniformHandler)
+ : fPoolManager(type, gpu, uniformHandler) {
+ if (type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
+ SkASSERT(uniformHandler);
+ for (int i = 0; i < uniformHandler->numSamplers(); ++i) {
+ fBindingVisibilities.push_back(uniformHandler->getSampler(i).visibility());
+ }
+ } else {
+ SkASSERT(type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
+ // We set the visibility of the first binding to the vertex shader and the second to the
+ // fragment shader.
+ fBindingVisibilities.push_back(kVertex_GrShaderFlag);
+ fBindingVisibilities.push_back(kFragment_GrShaderFlag);
+ }
+}
+
+GrVkDescriptorSetManager::GrVkDescriptorSetManager(GrVkGpu* gpu,
+ VkDescriptorType type,
+ const SkTArray<uint32_t>& visibilities)
+ : fPoolManager(type, gpu, visibilities) {
+ if (type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
+ for (int i = 0; i < visibilities.count(); ++i) {
+ fBindingVisibilities.push_back(visibilities[i]);
+ }
+ } else {
+ SkASSERT(type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
+ SkASSERT(2 == visibilities.count() &&
+ kVertex_GrShaderFlag == visibilities[0] &&
+ kFragment_GrShaderFlag == visibilities[1]);
+ // We set the visibility of the first binding to the vertex shader and the second to the
+ // fragment shader.
+ fBindingVisibilities.push_back(kVertex_GrShaderFlag);
+ fBindingVisibilities.push_back(kFragment_GrShaderFlag);
+ }
+}
+
+const GrVkDescriptorSet* GrVkDescriptorSetManager::getDescriptorSet(GrVkGpu* gpu,
+ const Handle& handle) {
+ const GrVkDescriptorSet* ds = nullptr;
+ int count = fFreeSets.count();
+ if (count > 0) {
+ ds = fFreeSets[count - 1];
+ fFreeSets.removeShuffle(count - 1);
+ } else {
+ VkDescriptorSet vkDS;
+ fPoolManager.getNewDescriptorSet(gpu, &vkDS);
+
+ ds = new GrVkDescriptorSet(vkDS, fPoolManager.fPool, handle);
+ }
+ SkASSERT(ds);
+ return ds;
+}
+
+void GrVkDescriptorSetManager::recycleDescriptorSet(const GrVkDescriptorSet* descSet) {
+ SkASSERT(descSet);
+ fFreeSets.push_back(descSet);
+}
+
+void GrVkDescriptorSetManager::release(const GrVkGpu* gpu) {
+ fPoolManager.freeGPUResources(gpu);
+
+ for (int i = 0; i < fFreeSets.count(); ++i) {
+ fFreeSets[i]->unref(gpu);
+ }
+ fFreeSets.reset();
+}
+
+void GrVkDescriptorSetManager::abandon() {
+ fPoolManager.abandonGPUResources();
+
+ for (int i = 0; i < fFreeSets.count(); ++i) {
+ fFreeSets[i]->unrefAndAbandon();
+ }
+ fFreeSets.reset();
+}
+
+bool GrVkDescriptorSetManager::isCompatible(VkDescriptorType type,
+ const GrVkUniformHandler* uniHandler) const {
+ SkASSERT(uniHandler);
+ if (type != fPoolManager.fDescType) {
+ return false;
+ }
+
+ if (type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
+ if (fBindingVisibilities.count() != uniHandler->numSamplers()) {
+ return false;
+ }
+ for (int i = 0; i < uniHandler->numSamplers(); ++i) {
+ if (uniHandler->getSampler(i).visibility() != fBindingVisibilities[i]) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+bool GrVkDescriptorSetManager::isCompatible(VkDescriptorType type,
+ const SkTArray<uint32_t>& visibilities) const {
+ if (type != fPoolManager.fDescType) {
+ return false;
+ }
+
+ if (type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
+ if (fBindingVisibilities.count() != visibilities.count()) {
+ return false;
+ }
+ for (int i = 0; i < visibilities.count(); ++i) {
+ if (visibilities[i] != fBindingVisibilities[i]) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+VkShaderStageFlags visibility_to_vk_stage_flags(uint32_t visibility) {
+ VkShaderStageFlags flags = 0;
+
+ if (visibility & kVertex_GrShaderFlag) {
+ flags |= VK_SHADER_STAGE_VERTEX_BIT;
+ }
+ if (visibility & kGeometry_GrShaderFlag) {
+ flags |= VK_SHADER_STAGE_GEOMETRY_BIT;
+ }
+ if (visibility & kFragment_GrShaderFlag) {
+ flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
+ }
+ return flags;
+}
+
+GrVkDescriptorSetManager::DescriptorPoolManager::DescriptorPoolManager(
+ VkDescriptorType type,
+ GrVkGpu* gpu,
+ const GrVkUniformHandler* uniformHandler)
+ : fDescType(type)
+ , fCurrentDescriptorCount(0)
+ , fPool(nullptr) {
+ this->init(gpu, type, uniformHandler, nullptr);
+}
+
+GrVkDescriptorSetManager::DescriptorPoolManager::DescriptorPoolManager(
+ VkDescriptorType type,
+ GrVkGpu* gpu,
+ const SkTArray<uint32_t>& visibilities)
+ : fDescType(type)
+ , fCurrentDescriptorCount(0)
+ , fPool(nullptr) {
+ this->init(gpu, type, nullptr, &visibilities);
+}
+
+void GrVkDescriptorSetManager::DescriptorPoolManager::init(GrVkGpu* gpu,
+ VkDescriptorType type,
+ const GrVkUniformHandler* uniformHandler,
+ const SkTArray<uint32_t>* visibilities) {
+ if (type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
+ SkASSERT(SkToBool(uniformHandler) != SkToBool(visibilities));
+ uint32_t numSamplers;
+ if (uniformHandler) {
+ numSamplers = (uint32_t)uniformHandler->numSamplers();
+ } else {
+ numSamplers = (uint32_t)visibilities->count();
+ }
+
+ SkAutoTDeleteArray<VkDescriptorSetLayoutBinding> dsSamplerBindings(
+ new VkDescriptorSetLayoutBinding[numSamplers]);
+ for (uint32_t i = 0; i < numSamplers; ++i) {
+ uint32_t visibility;
+ if (uniformHandler) {
+ const GrVkGLSLSampler& sampler =
+ static_cast<const GrVkGLSLSampler&>(uniformHandler->getSampler(i));
+ SkASSERT(sampler.binding() == i);
+ visibility = sampler.visibility();
+ } else {
+ visibility = (*visibilities)[i];
+ }
+ dsSamplerBindings[i].binding = i;
+ dsSamplerBindings[i].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ dsSamplerBindings[i].descriptorCount = 1;
+ dsSamplerBindings[i].stageFlags = visibility_to_vk_stage_flags(visibility);
+ dsSamplerBindings[i].pImmutableSamplers = nullptr;
+ }
+
+ VkDescriptorSetLayoutCreateInfo dsSamplerLayoutCreateInfo;
+ memset(&dsSamplerLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
+ dsSamplerLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ dsSamplerLayoutCreateInfo.pNext = nullptr;
+ dsSamplerLayoutCreateInfo.flags = 0;
+ dsSamplerLayoutCreateInfo.bindingCount = numSamplers;
+ // Setting to nullptr fixes an error in the param checker validation layer. Even though
+ // bindingCount is 0 (which is valid), it still tries to validate pBindings unless it is
+ // null.
+ dsSamplerLayoutCreateInfo.pBindings = numSamplers ? dsSamplerBindings.get() : nullptr;
+
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(),
+ CreateDescriptorSetLayout(gpu->device(),
+ &dsSamplerLayoutCreateInfo,
+ nullptr,
+ &fDescLayout));
+ fDescCountPerSet = numSamplers;
+ } else {
+ SkASSERT(type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
+ // Create Uniform Buffer Descriptor
+ // The vertex uniform buffer will have binding 0 and the fragment binding 1.
+ VkDescriptorSetLayoutBinding dsUniBindings[kUniformDescPerSet];
+ memset(&dsUniBindings, 0, 2 * sizeof(VkDescriptorSetLayoutBinding));
+ dsUniBindings[0].binding = GrVkUniformHandler::kVertexBinding;
+ dsUniBindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ dsUniBindings[0].descriptorCount = 1;
+ dsUniBindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
+ dsUniBindings[0].pImmutableSamplers = nullptr;
+ dsUniBindings[1].binding = GrVkUniformHandler::kFragBinding;
+ dsUniBindings[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ dsUniBindings[1].descriptorCount = 1;
+ dsUniBindings[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
+ dsUniBindings[1].pImmutableSamplers = nullptr;
+
+ VkDescriptorSetLayoutCreateInfo uniformLayoutCreateInfo;
+ memset(&uniformLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
+ uniformLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ uniformLayoutCreateInfo.pNext = nullptr;
+ uniformLayoutCreateInfo.flags = 0;
+ uniformLayoutCreateInfo.bindingCount = 2;
+ uniformLayoutCreateInfo.pBindings = dsUniBindings;
+
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateDescriptorSetLayout(gpu->device(),
+ &uniformLayoutCreateInfo,
+ nullptr,
+ &fDescLayout));
+ fDescCountPerSet = kUniformDescPerSet;
+ }
+
+ SkASSERT(fDescCountPerSet < kStartNumDescriptors);
+ fMaxDescriptors = kStartNumDescriptors;
+ SkASSERT(fMaxDescriptors > 0);
+ this->getNewPool(gpu);
+}
+
+void GrVkDescriptorSetManager::DescriptorPoolManager::getNewPool(GrVkGpu* gpu) {
+ if (fPool) {
+ fPool->unref(gpu);
+ uint32_t newPoolSize = fMaxDescriptors + ((fMaxDescriptors + 1) >> 1);
+ if (newPoolSize < kMaxDescriptors) {
+ fMaxDescriptors = newPoolSize;
+ } else {
+ fMaxDescriptors = kMaxDescriptors;
+ }
+
+ }
+ fPool = gpu->resourceProvider().findOrCreateCompatibleDescriptorPool(fDescType,
+ fMaxDescriptors);
+ SkASSERT(fPool);
+}
+
+void GrVkDescriptorSetManager::DescriptorPoolManager::getNewDescriptorSet(GrVkGpu* gpu,
+ VkDescriptorSet* ds) {
+ if (!fMaxDescriptors) {
+ return;
+ }
+ fCurrentDescriptorCount += fDescCountPerSet;
+ if (fCurrentDescriptorCount > fMaxDescriptors) {
+ this->getNewPool(gpu);
+ fCurrentDescriptorCount = fDescCountPerSet;
+ }
+
+ VkDescriptorSetAllocateInfo dsAllocateInfo;
+ memset(&dsAllocateInfo, 0, sizeof(VkDescriptorSetAllocateInfo));
+ dsAllocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
+ dsAllocateInfo.pNext = nullptr;
+ dsAllocateInfo.descriptorPool = fPool->descPool();
+ dsAllocateInfo.descriptorSetCount = 1;
+ dsAllocateInfo.pSetLayouts = &fDescLayout;
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), AllocateDescriptorSets(gpu->device(),
+ &dsAllocateInfo,
+ ds));
+}
+
+void GrVkDescriptorSetManager::DescriptorPoolManager::freeGPUResources(const GrVkGpu* gpu) {
+ if (fDescLayout) {
+ GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorSetLayout(gpu->device(), fDescLayout,
+ nullptr));
+ fDescLayout = VK_NULL_HANDLE;
+ }
+
+ if (fPool) {
+ fPool->unref(gpu);
+ fPool = nullptr;
+ }
+}
+
+void GrVkDescriptorSetManager::DescriptorPoolManager::abandonGPUResources() {
+ fDescLayout = VK_NULL_HANDLE;
+ if (fPool) {
+ fPool->unrefAndAbandon();
+ fPool = nullptr;
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSetManager.h b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSetManager.h
new file mode 100644
index 000000000..84dd29ece
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkDescriptorSetManager.h
@@ -0,0 +1,95 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkDescriptorSetManager_DEFINED
+#define GrVkDescriptorSetManager_DEFINED
+
+#include "GrResourceHandle.h"
+#include "GrVkDescriptorPool.h"
+#include "SkRefCnt.h"
+#include "SkTArray.h"
+#include "vk/GrVkDefines.h"
+
+class GrVkDescriptorSet;
+class GrVkGpu;
+class GrVkUniformHandler;
+
+/**
+ * This class handles the allocation of descriptor sets for a given VkDescriptorSetLayout. It will
+ * try to reuse previously allocated descriptor sets if they are no longer in use by other objects.
+ */
+class GrVkDescriptorSetManager {
+public:
+ GR_DEFINE_RESOURCE_HANDLE_CLASS(Handle);
+
+ GrVkDescriptorSetManager(GrVkGpu* gpu,
+ VkDescriptorType,
+ const GrVkUniformHandler* handler = nullptr);
+
+ GrVkDescriptorSetManager(GrVkGpu* gpu,
+ VkDescriptorType,
+ const SkTArray<uint32_t>& visibilities);
+
+ ~GrVkDescriptorSetManager() {}
+
+ void abandon();
+ void release(const GrVkGpu* gpu);
+
+ VkDescriptorSetLayout layout() const { return fPoolManager.fDescLayout; }
+
+ const GrVkDescriptorSet* getDescriptorSet(GrVkGpu* gpu, const Handle& handle);
+
+ void recycleDescriptorSet(const GrVkDescriptorSet*);
+
+ bool isCompatible(VkDescriptorType type, const GrVkUniformHandler*) const;
+ bool isCompatible(VkDescriptorType type,
+ const SkTArray<uint32_t>& visibilities) const;
+
+private:
+ struct DescriptorPoolManager {
+ DescriptorPoolManager(VkDescriptorType type, GrVkGpu* gpu,
+ const GrVkUniformHandler* handler = nullptr);
+ DescriptorPoolManager(VkDescriptorType type, GrVkGpu* gpu,
+ const SkTArray<uint32_t>& visibilities);
+
+
+ ~DescriptorPoolManager() {
+ SkASSERT(!fDescLayout);
+ SkASSERT(!fPool);
+ }
+
+ void getNewDescriptorSet(GrVkGpu* gpu, VkDescriptorSet* ds);
+
+ void freeGPUResources(const GrVkGpu* gpu);
+ void abandonGPUResources();
+
+ VkDescriptorSetLayout fDescLayout;
+ VkDescriptorType fDescType;
+ uint32_t fDescCountPerSet;
+ uint32_t fMaxDescriptors;
+ uint32_t fCurrentDescriptorCount;
+ GrVkDescriptorPool* fPool;
+
+ private:
+ enum {
+ kUniformDescPerSet = 2,
+ kMaxDescriptors = 1024,
+ kStartNumDescriptors = 16, // must be less than kMaxUniformDescriptors
+ };
+
+ void init(GrVkGpu* gpu, VkDescriptorType type, const GrVkUniformHandler* uniformHandler,
+ const SkTArray<uint32_t>* visibilities);
+
+ void getNewPool(GrVkGpu* gpu);
+ };
+
+ DescriptorPoolManager fPoolManager;
+ SkTArray<const GrVkDescriptorSet*, true> fFreeSets;
+ SkSTArray<4, uint32_t> fBindingVisibilities;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkExtensions.cpp b/gfx/skia/skia/src/gpu/vk/GrVkExtensions.cpp
new file mode 100644
index 000000000..03b453090
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkExtensions.cpp
@@ -0,0 +1,259 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "vk/GrVkExtensions.h"
+#include "vk/GrVkUtil.h"
+
+#include "SkTSearch.h"
+#include "SkTSort.h"
+
+namespace { // This cannot be static because it is used as a template parameter.
+inline bool extension_compare(const SkString& a, const SkString& b) {
+ return strcmp(a.c_str(), b.c_str()) < 0;
+}
+}
+
+// finds the index of ext in strings or a negative result if ext is not found.
+static int find_string(const SkTArray<SkString>& strings, const char ext[]) {
+ if (strings.empty()) {
+ return -1;
+ }
+ SkString extensionStr(ext);
+ int idx = SkTSearch<SkString, extension_compare>(&strings.front(),
+ strings.count(),
+ extensionStr,
+ sizeof(SkString));
+ return idx;
+}
+
+#define GET_PROC_LOCAL(inst, F) PFN_vk ## F F = (PFN_vk ## F) vkGetInstanceProcAddr(inst, "vk" #F)
+
+static uint32_t remove_patch_version(uint32_t specVersion) {
+ return (specVersion >> 12) << 12;
+}
+
+bool GrVkExtensions::initInstance(uint32_t specVersion) {
+ uint32_t nonPatchVersion = remove_patch_version(specVersion);
+
+ GET_PROC_LOCAL(nullptr, EnumerateInstanceExtensionProperties);
+ GET_PROC_LOCAL(nullptr, EnumerateInstanceLayerProperties);
+
+ SkTLessFunctionToFunctorAdaptor<SkString, extension_compare> cmp;
+
+ if (!EnumerateInstanceExtensionProperties ||
+ !EnumerateInstanceLayerProperties) {
+ return false;
+ }
+
+ // instance layers
+ uint32_t layerCount = 0;
+ VkResult res = EnumerateInstanceLayerProperties(&layerCount, nullptr);
+ if (VK_SUCCESS != res) {
+ return false;
+ }
+ VkLayerProperties* layers = new VkLayerProperties[layerCount];
+ res = EnumerateInstanceLayerProperties(&layerCount, layers);
+ if (VK_SUCCESS != res) {
+ delete[] layers;
+ return false;
+ }
+ for (uint32_t i = 0; i < layerCount; ++i) {
+ if (nonPatchVersion >= remove_patch_version(layers[i].specVersion)) {
+ fInstanceLayerStrings->push_back() = layers[i].layerName;
+ }
+ }
+ delete[] layers;
+ if (!fInstanceLayerStrings->empty()) {
+ SkTQSort(&fInstanceLayerStrings->front(), &fInstanceLayerStrings->back(), cmp);
+ }
+
+ // instance extensions
+ // via Vulkan implementation and implicitly enabled layers
+ uint32_t extensionCount = 0;
+ res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
+ if (VK_SUCCESS != res) {
+ return false;
+ }
+ VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
+ res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
+ if (VK_SUCCESS != res) {
+ delete[] extensions;
+ return false;
+ }
+ for (uint32_t i = 0; i < extensionCount; ++i) {
+ if (nonPatchVersion >= remove_patch_version(extensions[i].specVersion)) {
+ fInstanceExtensionStrings->push_back() = extensions[i].extensionName;
+ }
+ }
+ delete [] extensions;
+ // sort so we can search
+ if (!fInstanceExtensionStrings->empty()) {
+ SkTQSort(&fInstanceExtensionStrings->front(), &fInstanceExtensionStrings->back(), cmp);
+ }
+ // via explicitly enabled layers
+ layerCount = fInstanceLayerStrings->count();
+ for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
+ uint32_t extensionCount = 0;
+ res = EnumerateInstanceExtensionProperties((*fInstanceLayerStrings)[layerIndex].c_str(),
+ &extensionCount, nullptr);
+ if (VK_SUCCESS != res) {
+ return false;
+ }
+ VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
+ res = EnumerateInstanceExtensionProperties((*fInstanceLayerStrings)[layerIndex].c_str(),
+ &extensionCount, extensions);
+ if (VK_SUCCESS != res) {
+ delete[] extensions;
+ return false;
+ }
+ for (uint32_t i = 0; i < extensionCount; ++i) {
+ // if not already in the list, add it
+ if (nonPatchVersion >= remove_patch_version(extensions[i].specVersion) &&
+ find_string(*fInstanceExtensionStrings, extensions[i].extensionName) < 0) {
+ fInstanceExtensionStrings->push_back() = extensions[i].extensionName;
+ SkTQSort(&fInstanceExtensionStrings->front(), &fInstanceExtensionStrings->back(),
+ cmp);
+ }
+ }
+ delete[] extensions;
+ }
+
+ return true;
+}
+
+bool GrVkExtensions::initDevice(uint32_t specVersion, VkInstance inst, VkPhysicalDevice physDev) {
+ uint32_t nonPatchVersion = remove_patch_version(specVersion);
+
+ GET_PROC_LOCAL(inst, EnumerateDeviceExtensionProperties);
+ GET_PROC_LOCAL(inst, EnumerateDeviceLayerProperties);
+
+ SkTLessFunctionToFunctorAdaptor<SkString, extension_compare> cmp;
+
+ if (!EnumerateDeviceExtensionProperties ||
+ !EnumerateDeviceLayerProperties) {
+ return false;
+ }
+
+ // device layers
+ uint32_t layerCount = 0;
+ VkResult res = EnumerateDeviceLayerProperties(physDev, &layerCount, nullptr);
+ if (VK_SUCCESS != res) {
+ return false;
+ }
+ VkLayerProperties* layers = new VkLayerProperties[layerCount];
+ res = EnumerateDeviceLayerProperties(physDev, &layerCount, layers);
+ if (VK_SUCCESS != res) {
+ delete[] layers;
+ return false;
+ }
+ for (uint32_t i = 0; i < layerCount; ++i) {
+ if (nonPatchVersion >= remove_patch_version(layers[i].specVersion)) {
+ fDeviceLayerStrings->push_back() = layers[i].layerName;
+ }
+ }
+ delete[] layers;
+ if (!fDeviceLayerStrings->empty()) {
+ SkTLessFunctionToFunctorAdaptor<SkString, extension_compare> cmp;
+ SkTQSort(&fDeviceLayerStrings->front(), &fDeviceLayerStrings->back(), cmp);
+ }
+
+ // device extensions
+ // via Vulkan implementation and implicitly enabled layers
+ uint32_t extensionCount = 0;
+ res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, nullptr);
+ if (VK_SUCCESS != res) {
+ return false;
+ }
+ VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
+ res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, extensions);
+ if (VK_SUCCESS != res) {
+ delete[] extensions;
+ return false;
+ }
+ for (uint32_t i = 0; i < extensionCount; ++i) {
+ if (nonPatchVersion >= remove_patch_version(extensions[i].specVersion)) {
+ fDeviceExtensionStrings->push_back() = extensions[i].extensionName;
+ }
+ }
+ delete[] extensions;
+ if (!fDeviceExtensionStrings->empty()) {
+ SkTLessFunctionToFunctorAdaptor<SkString, extension_compare> cmp;
+ SkTQSort(&fDeviceExtensionStrings->front(), &fDeviceExtensionStrings->back(), cmp);
+ }
+ // via explicitly enabled layers
+ layerCount = fDeviceLayerStrings->count();
+ for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
+ uint32_t extensionCount = 0;
+ res = EnumerateDeviceExtensionProperties(physDev,
+ (*fDeviceLayerStrings)[layerIndex].c_str(),
+ &extensionCount, nullptr);
+ if (VK_SUCCESS != res) {
+ return false;
+ }
+ VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
+ res = EnumerateDeviceExtensionProperties(physDev,
+ (*fDeviceLayerStrings)[layerIndex].c_str(),
+ &extensionCount, extensions);
+ if (VK_SUCCESS != res) {
+ delete[] extensions;
+ return false;
+ }
+ for (uint32_t i = 0; i < extensionCount; ++i) {
+ // if not already in the list, add it
+ if (nonPatchVersion >= remove_patch_version(extensions[i].specVersion) &&
+ find_string(*fDeviceExtensionStrings, extensions[i].extensionName) < 0) {
+ fDeviceExtensionStrings->push_back() = extensions[i].extensionName;
+ SkTQSort(&fDeviceExtensionStrings->front(), &fDeviceExtensionStrings->back(), cmp);
+ }
+ }
+ delete[] extensions;
+ }
+
+ return true;
+}
+
+bool GrVkExtensions::hasInstanceExtension(const char ext[]) const {
+ return find_string(*fInstanceExtensionStrings, ext) >= 0;
+}
+
+bool GrVkExtensions::hasDeviceExtension(const char ext[]) const {
+ return find_string(*fDeviceExtensionStrings, ext) >= 0;
+}
+
+bool GrVkExtensions::hasInstanceLayer(const char ext[]) const {
+ return find_string(*fInstanceLayerStrings, ext) >= 0;
+}
+
+bool GrVkExtensions::hasDeviceLayer(const char ext[]) const {
+ return find_string(*fDeviceLayerStrings, ext) >= 0;
+}
+
+void GrVkExtensions::print(const char* sep) const {
+ if (nullptr == sep) {
+ sep = " ";
+ }
+ int cnt = fInstanceExtensionStrings->count();
+ SkDebugf("Instance Extensions: ");
+ for (int i = 0; i < cnt; ++i) {
+ SkDebugf("%s%s", (*fInstanceExtensionStrings)[i].c_str(), (i < cnt - 1) ? sep : "");
+ }
+ cnt = fDeviceExtensionStrings->count();
+ SkDebugf("\nDevice Extensions: ");
+ for (int i = 0; i < cnt; ++i) {
+ SkDebugf("%s%s", (*fDeviceExtensionStrings)[i].c_str(), (i < cnt - 1) ? sep : "");
+ }
+ cnt = fInstanceLayerStrings->count();
+ SkDebugf("\nInstance Layers: ");
+ for (int i = 0; i < cnt; ++i) {
+ SkDebugf("%s%s", (*fInstanceLayerStrings)[i].c_str(), (i < cnt - 1) ? sep : "");
+ }
+ cnt = fDeviceLayerStrings->count();
+ SkDebugf("\nDevice Layers: ");
+ for (int i = 0; i < cnt; ++i) {
+ SkDebugf("%s%s", (*fDeviceLayerStrings)[i].c_str(), (i < cnt - 1) ? sep : "");
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkExtensions.h b/gfx/skia/skia/src/gpu/vk/GrVkExtensions.h
new file mode 100644
index 000000000..6c395fddd
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkExtensions.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkExtensions_DEFINED
+#define GrVkExtensions_DEFINED
+
+#include "../private/SkTArray.h"
+#include "SkString.h"
+#include "vk/GrVkDefines.h"
+
+/**
+ * This helper queries the Vulkan driver for available extensions and layers, remembers them,
+ * and can be queried. It supports queries for both instance and device extensions and layers.
+ */
+class SK_API GrVkExtensions {
+public:
+ GrVkExtensions() : fInstanceExtensionStrings(new SkTArray<SkString>)
+ , fDeviceExtensionStrings(new SkTArray<SkString>)
+ , fInstanceLayerStrings(new SkTArray<SkString>)
+ , fDeviceLayerStrings(new SkTArray<SkString>) {}
+
+ bool initInstance(uint32_t specVersion);
+ bool initDevice(uint32_t specVersion, VkInstance, VkPhysicalDevice);
+
+ /**
+ * Queries whether an extension or layer is present. Will fail if not initialized.
+ */
+ bool hasInstanceExtension(const char[]) const;
+ bool hasDeviceExtension(const char[]) const;
+ bool hasInstanceLayer(const char[]) const;
+ bool hasDeviceLayer(const char[]) const;
+
+ void print(const char* sep = "\n") const;
+
+private:
+ SkAutoTDelete<SkTArray<SkString> > fInstanceExtensionStrings;
+ SkAutoTDelete<SkTArray<SkString> > fDeviceExtensionStrings;
+ SkAutoTDelete<SkTArray<SkString> > fInstanceLayerStrings;
+ SkAutoTDelete<SkTArray<SkString> > fDeviceLayerStrings;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkFramebuffer.cpp b/gfx/skia/skia/src/gpu/vk/GrVkFramebuffer.cpp
new file mode 100644
index 000000000..f9add633c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkFramebuffer.cpp
@@ -0,0 +1,57 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkFramebuffer.h"
+
+#include "GrVkGpu.h"
+#include "GrVkImageView.h"
+#include "GrVkRenderPass.h"
+
+GrVkFramebuffer* GrVkFramebuffer::Create(GrVkGpu* gpu,
+ int width, int height,
+ const GrVkRenderPass* renderPass,
+ const GrVkImageView* colorAttachment,
+ const GrVkImageView* stencilAttachment) {
+ // At the very least we need a renderPass and a colorAttachment
+ SkASSERT(renderPass);
+ SkASSERT(colorAttachment);
+
+ VkImageView attachments[3];
+ attachments[0] = colorAttachment->imageView();
+ int numAttachments = 1;
+ if (stencilAttachment) {
+ attachments[numAttachments++] = stencilAttachment->imageView();
+ }
+
+ VkFramebufferCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkFramebufferCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.renderPass = renderPass->vkRenderPass();
+ createInfo.attachmentCount = numAttachments;
+ createInfo.pAttachments = attachments;
+ createInfo.width = width;
+ createInfo.height = height;
+ createInfo.layers = 1;
+
+ VkFramebuffer framebuffer;
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateFramebuffer(gpu->device(),
+ &createInfo,
+ nullptr,
+ &framebuffer));
+ if (err) {
+ return nullptr;
+ }
+
+ return new GrVkFramebuffer(framebuffer);
+}
+
+void GrVkFramebuffer::freeGPUData(const GrVkGpu* gpu) const {
+ SkASSERT(fFramebuffer);
+ GR_VK_CALL(gpu->vkInterface(), DestroyFramebuffer(gpu->device(), fFramebuffer, nullptr));
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkFramebuffer.h b/gfx/skia/skia/src/gpu/vk/GrVkFramebuffer.h
new file mode 100644
index 000000000..b0f4beb3d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkFramebuffer.h
@@ -0,0 +1,50 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkFramebuffer_DEFINED
+#define GrVkFramebuffer_DEFINED
+
+#include "GrTypes.h"
+
+#include "GrVkResource.h"
+
+#include "vk/GrVkDefines.h"
+
+class GrVkGpu;
+class GrVkImageView;
+class GrVkRenderPass;
+
+class GrVkFramebuffer : public GrVkResource {
+public:
+ static GrVkFramebuffer* Create(GrVkGpu* gpu,
+ int width, int height,
+ const GrVkRenderPass* renderPass,
+ const GrVkImageView* colorAttachment,
+ const GrVkImageView* stencilAttachment);
+
+ VkFramebuffer framebuffer() const { return fFramebuffer; }
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkFramebuffer: %d (%d refs)\n", fFramebuffer, this->getRefCnt());
+ }
+#endif
+
+private:
+ GrVkFramebuffer(VkFramebuffer framebuffer) : INHERITED(), fFramebuffer(framebuffer) {}
+
+ GrVkFramebuffer(const GrVkFramebuffer&);
+ GrVkFramebuffer& operator=(const GrVkFramebuffer&);
+
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ VkFramebuffer fFramebuffer;
+
+ typedef GrVkResource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkGLSLSampler.h b/gfx/skia/skia/src/gpu/vk/GrVkGLSLSampler.h
new file mode 100644
index 000000000..f0ba7fa33
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkGLSLSampler.h
@@ -0,0 +1,49 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkGLSLSampler_DEFINED
+#define GrVkGLSLSampler_DEFINED
+
+#include "glsl/GrGLSLSampler.h"
+
+#include "glsl/GrGLSLShaderVar.h"
+
+class GrVkGLSLSampler : public GrGLSLSampler {
+public:
+ GrVkGLSLSampler(uint32_t visibility,
+ GrPixelConfig config,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name,
+ uint32_t binding,
+ uint32_t set) : INHERITED(visibility, config), fBinding(binding) {
+ SkASSERT(GrSLTypeIsCombinedSamplerType(type));
+ fShaderVar.setType(type);
+ fShaderVar.setTypeModifier(GrGLSLShaderVar::kUniform_TypeModifier);
+ fShaderVar.setPrecision(precision);
+ fShaderVar.accessName()->set(name);
+ SkString layoutQualifier;
+ layoutQualifier.appendf("set=%d, binding=%d", set, binding);
+ fShaderVar.setLayoutQualifier(layoutQualifier.c_str());
+ }
+
+ GrSLType type() const override { return fShaderVar.getType(); }
+ uint32_t binding() const { return fBinding; }
+
+ const char* onGetSamplerNameForTexture2D() const override { return fShaderVar.c_str(); }
+ const char* getSamplerNameForTexelFetch() const override { return fShaderVar.c_str(); }
+
+private:
+ GrGLSLShaderVar fShaderVar;
+ uint32_t fBinding;
+
+ friend class GrVkUniformHandler;
+
+ typedef GrGLSLSampler INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkGpu.cpp b/gfx/skia/skia/src/gpu/vk/GrVkGpu.cpp
new file mode 100644
index 000000000..60a876373
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkGpu.cpp
@@ -0,0 +1,1918 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkGpu.h"
+
+#include "GrContextOptions.h"
+#include "GrGeometryProcessor.h"
+#include "GrGpuResourceCacheAccess.h"
+#include "GrMesh.h"
+#include "GrPipeline.h"
+#include "GrRenderTargetPriv.h"
+#include "GrSurfacePriv.h"
+#include "GrTexturePriv.h"
+
+#include "GrVkCommandBuffer.h"
+#include "GrVkGpuCommandBuffer.h"
+#include "GrVkImage.h"
+#include "GrVkIndexBuffer.h"
+#include "GrVkMemory.h"
+#include "GrVkPipeline.h"
+#include "GrVkPipelineState.h"
+#include "GrVkRenderPass.h"
+#include "GrVkResourceProvider.h"
+#include "GrVkTexture.h"
+#include "GrVkTextureRenderTarget.h"
+#include "GrVkTransferBuffer.h"
+#include "GrVkVertexBuffer.h"
+
+#include "SkConfig8888.h"
+#include "SkMipMap.h"
+
+#include "vk/GrVkInterface.h"
+#include "vk/GrVkTypes.h"
+
+#if USE_SKSL
+#include "SkSLCompiler.h"
+#endif
+
+#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
+#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
+#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
+
+#ifdef SK_ENABLE_VK_LAYERS
+VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
+ VkDebugReportFlagsEXT flags,
+ VkDebugReportObjectTypeEXT objectType,
+ uint64_t object,
+ size_t location,
+ int32_t messageCode,
+ const char* pLayerPrefix,
+ const char* pMessage,
+ void* pUserData) {
+ if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
+ SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
+ } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
+ SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
+ } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
+ SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
+ } else {
+ SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
+ }
+ return VK_FALSE;
+}
+#endif
+
+GrGpu* GrVkGpu::Create(GrBackendContext backendContext, const GrContextOptions& options,
+ GrContext* context) {
+ const GrVkBackendContext* vkBackendContext =
+ reinterpret_cast<const GrVkBackendContext*>(backendContext);
+ if (!vkBackendContext) {
+ vkBackendContext = GrVkBackendContext::Create();
+ if (!vkBackendContext) {
+ return nullptr;
+ }
+ } else {
+ vkBackendContext->ref();
+ }
+
+ return new GrVkGpu(context, options, vkBackendContext);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
+ const GrVkBackendContext* backendCtx)
+ : INHERITED(context)
+ , fDevice(backendCtx->fDevice)
+ , fQueue(backendCtx->fQueue)
+ , fResourceProvider(this) {
+ fBackendContext.reset(backendCtx);
+
+#ifdef SK_ENABLE_VK_LAYERS
+ fCallback = VK_NULL_HANDLE;
+ if (backendCtx->fExtensions & kEXT_debug_report_GrVkExtensionFlag) {
+ // Setup callback creation information
+ VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
+ callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
+ callbackCreateInfo.pNext = nullptr;
+ callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
+ VK_DEBUG_REPORT_WARNING_BIT_EXT |
+ //VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
+ //VK_DEBUG_REPORT_DEBUG_BIT_EXT |
+ VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
+ callbackCreateInfo.pfnCallback = &DebugReportCallback;
+ callbackCreateInfo.pUserData = nullptr;
+
+ // Register the callback
+ GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateDebugReportCallbackEXT(
+ backendCtx->fInstance, &callbackCreateInfo, nullptr, &fCallback));
+ }
+#endif
+
+#if USE_SKSL
+ fCompiler = new SkSL::Compiler();
+#else
+ fCompiler = shaderc_compiler_initialize();
+#endif
+
+ fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendCtx->fPhysicalDevice,
+ backendCtx->fFeatures, backendCtx->fExtensions));
+ fCaps.reset(SkRef(fVkCaps.get()));
+
+ VK_CALL(GetPhysicalDeviceMemoryProperties(backendCtx->fPhysicalDevice, &fPhysDevMemProps));
+
+ const VkCommandPoolCreateInfo cmdPoolInfo = {
+ VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
+ nullptr, // pNext
+ VK_COMMAND_POOL_CREATE_TRANSIENT_BIT |
+ VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, // CmdPoolCreateFlags
+ backendCtx->fGraphicsQueueIndex, // queueFamilyIndex
+ };
+ GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateCommandPool(fDevice, &cmdPoolInfo, nullptr,
+ &fCmdPool));
+
+ // must call this after creating the CommandPool
+ fResourceProvider.init();
+ fCurrentCmdBuffer = fResourceProvider.findOrCreatePrimaryCommandBuffer();
+ SkASSERT(fCurrentCmdBuffer);
+ fCurrentCmdBuffer->begin(this);
+
+ // set up our heaps
+ fHeaps[kLinearImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 16*1024*1024));
+ // We want the OptimalImage_Heap to use a SubAlloc_strategy but it occasionally causes the
+ // device to run out of memory. Most likely this is caused by fragmentation in the device heap
+ // and we can't allocate more. Until we get a fix moving this to SingleAlloc.
+ fHeaps[kOptimalImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 64*1024*1024));
+ fHeaps[kSmallOptimalImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 2*1024*1024));
+ fHeaps[kVertexBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 0));
+ fHeaps[kIndexBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 0));
+ fHeaps[kUniformBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 256*1024));
+ fHeaps[kCopyReadBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 0));
+ fHeaps[kCopyWriteBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 16*1024*1024));
+}
+
+GrVkGpu::~GrVkGpu() {
+ fCurrentCmdBuffer->end(this);
+ fCurrentCmdBuffer->unref(this);
+
+ // wait for all commands to finish
+ fResourceProvider.checkCommandBuffers();
+ SkDEBUGCODE(VkResult res = ) VK_CALL(QueueWaitIdle(fQueue));
+
+ // On windows, sometimes calls to QueueWaitIdle return before actually signalling the fences
+ // on the command buffers even though they have completed. This causes an assert to fire when
+ // destroying the command buffers. Currently this ony seems to happen on windows, so we add a
+ // sleep to make sure the fence singals.
+#ifdef SK_DEBUG
+#if defined(SK_BUILD_FOR_WIN)
+ Sleep(10); // In milliseconds
+#else
+ // Uncomment if above bug happens on non windows build.
+ // sleep(1); // In seconds
+#endif
+#endif
+
+#ifdef SK_DEBUG
+ if (this->vkCaps().allowInitializationErrorOnTearDown()) {
+ SkASSERT(VK_SUCCESS == res ||
+ VK_ERROR_DEVICE_LOST == res ||
+ VK_ERROR_INITIALIZATION_FAILED == res);
+ } else {
+ SkASSERT(VK_SUCCESS == res || VK_ERROR_DEVICE_LOST == res);
+ }
+#endif
+
+ fCopyManager.destroyResources(this);
+
+ // must call this just before we destroy the VkDevice
+ fResourceProvider.destroyResources();
+
+ VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr));
+
+#if USE_SKSL
+ delete fCompiler;
+#else
+ shaderc_compiler_release(fCompiler);
+#endif
+
+#ifdef SK_ENABLE_VK_LAYERS
+ if (fCallback) {
+ VK_CALL(DestroyDebugReportCallbackEXT(fBackendContext->fInstance, fCallback, nullptr));
+ fCallback = VK_NULL_HANDLE;
+ }
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrGpuCommandBuffer* GrVkGpu::createCommandBuffer(
+ GrRenderTarget* target,
+ const GrGpuCommandBuffer::LoadAndStoreInfo& colorInfo,
+ const GrGpuCommandBuffer::LoadAndStoreInfo& stencilInfo) {
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
+ return new GrVkGpuCommandBuffer(this, vkRT, colorInfo, stencilInfo);
+}
+
+void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
+ SkASSERT(fCurrentCmdBuffer);
+ fCurrentCmdBuffer->end(this);
+
+ fCurrentCmdBuffer->submitToQueue(this, fQueue, sync);
+ fResourceProvider.checkCommandBuffers();
+
+ // Release old command buffer and create a new one
+ fCurrentCmdBuffer->unref(this);
+ fCurrentCmdBuffer = fResourceProvider.findOrCreatePrimaryCommandBuffer();
+ SkASSERT(fCurrentCmdBuffer);
+
+ fCurrentCmdBuffer->begin(this);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+GrBuffer* GrVkGpu::onCreateBuffer(size_t size, GrBufferType type, GrAccessPattern accessPattern,
+ const void* data) {
+ GrBuffer* buff;
+ switch (type) {
+ case kVertex_GrBufferType:
+ SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
+ kStatic_GrAccessPattern == accessPattern);
+ buff = GrVkVertexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
+ break;
+ case kIndex_GrBufferType:
+ SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
+ kStatic_GrAccessPattern == accessPattern);
+ buff = GrVkIndexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
+ break;
+ case kXferCpuToGpu_GrBufferType:
+ SkASSERT(kStream_GrAccessPattern == accessPattern);
+ buff = GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyRead_Type);
+ break;
+ case kXferGpuToCpu_GrBufferType:
+ SkASSERT(kStream_GrAccessPattern == accessPattern);
+ buff = GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyWrite_Type);
+ break;
+ default:
+ SkFAIL("Unknown buffer type.");
+ return nullptr;
+ }
+ if (data && buff) {
+ buff->updateData(data, size);
+ }
+ return buff;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
+ GrPixelConfig srcConfig, DrawPreference* drawPreference,
+ WritePixelTempDrawInfo* tempDrawInfo) {
+ if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
+ return false;
+ }
+
+ GrRenderTarget* renderTarget = dstSurface->asRenderTarget();
+
+ // Start off assuming no swizzling
+ tempDrawInfo->fSwizzle = GrSwizzle::RGBA();
+ tempDrawInfo->fWriteConfig = srcConfig;
+
+ // These settings we will always want if a temp draw is performed. Initially set the config
+ // to srcConfig, though that may be modified if we decide to do a R/B swap
+ tempDrawInfo->fTempSurfaceDesc.fFlags = kNone_GrSurfaceFlags;
+ tempDrawInfo->fTempSurfaceDesc.fConfig = srcConfig;
+ tempDrawInfo->fTempSurfaceDesc.fWidth = width;
+ tempDrawInfo->fTempSurfaceDesc.fHeight = height;
+ tempDrawInfo->fTempSurfaceDesc.fSampleCnt = 0;
+ tempDrawInfo->fTempSurfaceDesc.fOrigin = kTopLeft_GrSurfaceOrigin;
+
+ if (dstSurface->config() == srcConfig) {
+ return true;
+ }
+
+ if (renderTarget && this->vkCaps().isConfigRenderable(renderTarget->config(),
+ renderTarget->numColorSamples() > 1)) {
+ ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
+
+ bool configsAreRBSwaps = GrPixelConfigSwapRAndB(srcConfig) == dstSurface->config();
+
+ if (!this->vkCaps().isConfigTexturable(srcConfig) && configsAreRBSwaps) {
+ if (!this->vkCaps().isConfigTexturable(dstSurface->config())) {
+ return false;
+ }
+ tempDrawInfo->fTempSurfaceDesc.fConfig = dstSurface->config();
+ tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
+ tempDrawInfo->fWriteConfig = dstSurface->config();
+ }
+ return true;
+ }
+
+ return false;
+}
+
+bool GrVkGpu::onWritePixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config,
+ const SkTArray<GrMipLevel>& texels) {
+ GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
+ if (!vkTex) {
+ return false;
+ }
+
+ // Make sure we have at least the base level
+ if (texels.empty() || !texels.begin()->fPixels) {
+ return false;
+ }
+
+ // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
+ if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
+ return false;
+ }
+
+ bool success = false;
+ if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
+ // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
+ SkASSERT(config == vkTex->desc().fConfig);
+ // TODO: add compressed texture support
+ // delete the following two lines and uncomment the two after that when ready
+ vkTex->unref();
+ return false;
+ //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
+ // height);
+ } else {
+ bool linearTiling = vkTex->isLinearTiled();
+ if (linearTiling) {
+ if (texels.count() > 1) {
+ SkDebugf("Can't upload mipmap data to linear tiled texture");
+ return false;
+ }
+ if (VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
+ // Need to change the layout to general in order to perform a host write
+ vkTex->setImageLayout(this,
+ VK_IMAGE_LAYOUT_GENERAL,
+ VK_ACCESS_HOST_WRITE_BIT,
+ VK_PIPELINE_STAGE_HOST_BIT,
+ false);
+ this->submitCommandBuffer(kForce_SyncQueue);
+ }
+ success = this->uploadTexDataLinear(vkTex, left, top, width, height, config,
+ texels.begin()->fPixels, texels.begin()->fRowBytes);
+ } else {
+ int newMipLevels = texels.count();
+ int currentMipLevels = vkTex->texturePriv().maxMipMapLevel() + 1;
+ if (newMipLevels != currentMipLevels) {
+ if (!vkTex->reallocForMipmap(this, newMipLevels)) {
+ return false;
+ }
+ }
+ success = this->uploadTexDataOptimal(vkTex, left, top, width, height, config, texels);
+ }
+ }
+
+ return success;
+}
+
+void GrVkGpu::resolveImage(GrVkRenderTarget* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ SkASSERT(dst);
+ SkASSERT(src && src->numColorSamples() > 1 && src->msaaImage());
+
+ if (this->vkCaps().mustSubmitCommandsBeforeCopyOp()) {
+ this->submitCommandBuffer(GrVkGpu::kSkip_SyncQueue);
+ }
+
+ // Flip rect if necessary
+ SkIRect srcVkRect = srcRect;
+ int32_t dstY = dstPoint.fY;
+
+ if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
+ SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin());
+ srcVkRect.fTop = src->height() - srcRect.fBottom;
+ srcVkRect.fBottom = src->height() - srcRect.fTop;
+ dstY = dst->height() - dstPoint.fY - srcVkRect.height();
+ }
+
+ VkImageResolve resolveInfo;
+ resolveInfo.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ resolveInfo.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
+ resolveInfo.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ resolveInfo.dstOffset = { dstPoint.fX, dstY, 0 };
+ // By the spec the depth of the extent should be ignored for 2D images, but certain devices
+ // (e.g. nexus 5x) currently fail if it is not 1
+ resolveInfo.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 1 };
+
+ dst->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ src->msaaImage()->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ fCurrentCmdBuffer->resolveImage(this, *src->msaaImage(), *dst, 1, &resolveInfo);
+}
+
+void GrVkGpu::onResolveRenderTarget(GrRenderTarget* target) {
+ if (target->needsResolve()) {
+ SkASSERT(target->numColorSamples() > 1);
+ GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target);
+ SkASSERT(rt->msaaImage());
+
+ const SkIRect& srcRect = rt->getResolveRect();
+
+ this->resolveImage(rt, rt, srcRect, SkIPoint::Make(srcRect.fLeft, srcRect.fTop));
+
+ rt->flagAsResolved();
+ }
+}
+
+bool GrVkGpu::uploadTexDataLinear(GrVkTexture* tex,
+ int left, int top, int width, int height,
+ GrPixelConfig dataConfig,
+ const void* data,
+ size_t rowBytes) {
+ SkASSERT(data);
+ SkASSERT(tex->isLinearTiled());
+
+ // If we're uploading compressed data then we should be using uploadCompressedTexData
+ SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
+
+ size_t bpp = GrBytesPerPixel(dataConfig);
+
+ const GrSurfaceDesc& desc = tex->desc();
+
+ if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
+ &width, &height, &data, &rowBytes)) {
+ return false;
+ }
+ size_t trimRowBytes = width * bpp;
+
+ SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
+ VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
+ const VkImageSubresource subres = {
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ 0, // mipLevel
+ 0, // arraySlice
+ };
+ VkSubresourceLayout layout;
+ VkResult err;
+
+ const GrVkInterface* interface = this->vkInterface();
+
+ GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
+ tex->image(),
+ &subres,
+ &layout));
+
+ int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height : top;
+ const GrVkAlloc& alloc = tex->alloc();
+ VkDeviceSize offset = alloc.fOffset + texTop*layout.rowPitch + left*bpp;
+ VkDeviceSize size = height*layout.rowPitch;
+ void* mapPtr;
+ err = GR_VK_CALL(interface, MapMemory(fDevice, alloc.fMemory, offset, size, 0, &mapPtr));
+ if (err) {
+ return false;
+ }
+
+ if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
+ // copy into buffer by rows
+ const char* srcRow = reinterpret_cast<const char*>(data);
+ char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch;
+ for (int y = 0; y < height; y++) {
+ memcpy(dstRow, srcRow, trimRowBytes);
+ srcRow += rowBytes;
+ dstRow -= layout.rowPitch;
+ }
+ } else {
+ // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy
+ if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
+ memcpy(mapPtr, data, trimRowBytes * height);
+ } else {
+ SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes, trimRowBytes,
+ height);
+ }
+ }
+
+ GrVkMemory::FlushMappedAlloc(this, alloc);
+ GR_VK_CALL(interface, UnmapMemory(fDevice, alloc.fMemory));
+
+ return true;
+}
+
+bool GrVkGpu::uploadTexDataOptimal(GrVkTexture* tex,
+ int left, int top, int width, int height,
+ GrPixelConfig dataConfig,
+ const SkTArray<GrMipLevel>& texels) {
+ SkASSERT(!tex->isLinearTiled());
+ // The assumption is either that we have no mipmaps, or that our rect is the entire texture
+ SkASSERT(1 == texels.count() ||
+ (0 == left && 0 == top && width == tex->width() && height == tex->height()));
+
+ // If we're uploading compressed data then we should be using uploadCompressedTexData
+ SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
+
+ if (width == 0 || height == 0) {
+ return false;
+ }
+
+ const GrSurfaceDesc& desc = tex->desc();
+ SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
+ size_t bpp = GrBytesPerPixel(dataConfig);
+
+ // texels is const.
+ // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes.
+ // Because of this we need to make a non-const shallow copy of texels.
+ SkTArray<GrMipLevel> texelsShallowCopy(texels);
+
+ for (int currentMipLevel = texelsShallowCopy.count() - 1; currentMipLevel >= 0;
+ currentMipLevel--) {
+ SkASSERT(texelsShallowCopy[currentMipLevel].fPixels);
+ }
+
+ // Determine whether we need to flip when we copy into the buffer
+ bool flipY = (kBottomLeft_GrSurfaceOrigin == desc.fOrigin && !texelsShallowCopy.empty());
+
+ // adjust any params (left, top, currentWidth, currentHeight
+ // find the combined size of all the mip levels and the relative offset of
+ // each into the collective buffer
+ // Do the first level separately because we may need to adjust width and height
+ // (for the non-mipped case).
+ if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
+ &width,
+ &height,
+ &texelsShallowCopy[0].fPixels,
+ &texelsShallowCopy[0].fRowBytes)) {
+ return false;
+ }
+ SkTArray<size_t> individualMipOffsets(texelsShallowCopy.count());
+ individualMipOffsets.push_back(0);
+ size_t combinedBufferSize = width * bpp * height;
+ int currentWidth = width;
+ int currentHeight = height;
+ for (int currentMipLevel = 1; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
+ currentWidth = SkTMax(1, currentWidth/2);
+ currentHeight = SkTMax(1, currentHeight/2);
+ if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
+ &currentWidth,
+ &currentHeight,
+ &texelsShallowCopy[currentMipLevel].fPixels,
+ &texelsShallowCopy[currentMipLevel].fRowBytes)) {
+ return false;
+ }
+ const size_t trimmedSize = currentWidth * bpp * currentHeight;
+ individualMipOffsets.push_back(combinedBufferSize);
+ combinedBufferSize += trimmedSize;
+ }
+
+ // allocate buffer to hold our mip data
+ GrVkTransferBuffer* transferBuffer =
+ GrVkTransferBuffer::Create(this, combinedBufferSize, GrVkBuffer::kCopyRead_Type);
+
+ char* buffer = (char*) transferBuffer->map();
+ SkTArray<VkBufferImageCopy> regions(texelsShallowCopy.count());
+
+ currentWidth = width;
+ currentHeight = height;
+ for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
+ const size_t trimRowBytes = currentWidth * bpp;
+ const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
+
+ // copy data into the buffer, skipping the trailing bytes
+ char* dst = buffer + individualMipOffsets[currentMipLevel];
+ const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
+ if (flipY) {
+ src += (currentHeight - 1) * rowBytes;
+ for (int y = 0; y < currentHeight; y++) {
+ memcpy(dst, src, trimRowBytes);
+ src -= rowBytes;
+ dst += trimRowBytes;
+ }
+ } else if (trimRowBytes == rowBytes) {
+ memcpy(dst, src, trimRowBytes * currentHeight);
+ } else {
+ SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
+ }
+
+ VkBufferImageCopy& region = regions.push_back();
+ memset(&region, 0, sizeof(VkBufferImageCopy));
+ region.bufferOffset = transferBuffer->offset() + individualMipOffsets[currentMipLevel];
+ region.bufferRowLength = currentWidth;
+ region.bufferImageHeight = currentHeight;
+ region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1 };
+ region.imageOffset = { left, flipY ? tex->height() - top - currentHeight : top, 0 };
+ region.imageExtent = { (uint32_t)currentWidth, (uint32_t)currentHeight, 1 };
+
+ currentWidth = SkTMax(1, currentWidth/2);
+ currentHeight = SkTMax(1, currentHeight/2);
+ }
+
+ // no need to flush non-coherent memory, unmap will do that for us
+ transferBuffer->unmap();
+
+ // Change layout of our target so it can be copied to
+ tex->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ // Copy the buffer to the image
+ fCurrentCmdBuffer->copyBufferToImage(this,
+ transferBuffer,
+ tex,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ regions.count(),
+ regions.begin());
+ transferBuffer->unref();
+
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
+ const SkTArray<GrMipLevel>& texels) {
+ bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
+
+ VkFormat pixelFormat;
+ if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) {
+ return nullptr;
+ }
+
+ if (!fVkCaps->isConfigTexturable(desc.fConfig)) {
+ return nullptr;
+ }
+
+ if (renderTarget && !fVkCaps->isConfigRenderable(desc.fConfig, false)) {
+ return nullptr;
+ }
+
+ bool linearTiling = false;
+ if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) {
+ // we can't have a linear texture with a mipmap
+ if (texels.count() > 1) {
+ SkDebugf("Trying to create linear tiled texture with mipmap");
+ return nullptr;
+ }
+ if (fVkCaps->isConfigTexturableLinearly(desc.fConfig) &&
+ (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) {
+ linearTiling = true;
+ } else {
+ return nullptr;
+ }
+ }
+
+ VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
+ if (renderTarget) {
+ usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ }
+
+ // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
+ // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
+ // will be using this texture in some copy or not. Also this assumes, as is the current case,
+ // that all render targets in vulkan are also textures. If we change this practice of setting
+ // both bits, we must make sure to set the destination bit if we are uploading srcData to the
+ // texture.
+ usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+ VkFlags memProps = (!texels.empty() && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
+ // requested, this ImageDesc describes the resolved texture. Therefore we always have samples set
+ // to 1.
+ int mipLevels = texels.empty() ? 1 : texels.count();
+ GrVkImage::ImageDesc imageDesc;
+ imageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ imageDesc.fFormat = pixelFormat;
+ imageDesc.fWidth = desc.fWidth;
+ imageDesc.fHeight = desc.fHeight;
+ imageDesc.fLevels = linearTiling ? 1 : mipLevels;
+ imageDesc.fSamples = 1;
+ imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
+ imageDesc.fUsageFlags = usageFlags;
+ imageDesc.fMemProps = memProps;
+
+ GrVkTexture* tex;
+ if (renderTarget) {
+ tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, budgeted, desc,
+ imageDesc);
+ } else {
+ tex = GrVkTexture::CreateNewTexture(this, budgeted, desc, imageDesc);
+ }
+
+ if (!tex) {
+ return nullptr;
+ }
+
+ if (!texels.empty()) {
+ SkASSERT(texels.begin()->fPixels);
+ bool success;
+ if (linearTiling) {
+ success = this->uploadTexDataLinear(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
+ texels.begin()->fPixels, texels.begin()->fRowBytes);
+ } else {
+ success = this->uploadTexDataOptimal(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
+ texels);
+ }
+ if (!success) {
+ tex->unref();
+ return nullptr;
+ }
+ }
+
+ return tex;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool GrVkGpu::updateBuffer(GrVkBuffer* buffer, const void* src,
+ VkDeviceSize offset, VkDeviceSize size) {
+
+ // Update the buffer
+ fCurrentCmdBuffer->updateBuffer(this, buffer, offset, size, src);
+
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) {
+ // By default, all textures in Vk use TopLeft
+ if (kDefault_GrSurfaceOrigin == origin) {
+ return kTopLeft_GrSurfaceOrigin;
+ } else {
+ return origin;
+ }
+}
+
+GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
+ GrWrapOwnership ownership) {
+ if (0 == desc.fTextureHandle) {
+ return nullptr;
+ }
+
+ int maxSize = this->caps()->maxTextureSize();
+ if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
+ return nullptr;
+ }
+
+ const GrVkImageInfo* info = reinterpret_cast<const GrVkImageInfo*>(desc.fTextureHandle);
+ if (VK_NULL_HANDLE == info->fImage || VK_NULL_HANDLE == info->fAlloc.fMemory) {
+ return nullptr;
+ }
+#ifdef SK_DEBUG
+ VkFormat format;
+ if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) {
+ return nullptr;
+ }
+ SkASSERT(format == info->fFormat);
+#endif
+
+ GrSurfaceDesc surfDesc;
+ // next line relies on GrBackendTextureDesc's flags matching GrTexture's
+ surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags;
+ surfDesc.fWidth = desc.fWidth;
+ surfDesc.fHeight = desc.fHeight;
+ surfDesc.fConfig = desc.fConfig;
+ surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
+ bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
+ // In GL, Chrome assumes all textures are BottomLeft
+ // In VK, we don't have this restriction
+ surfDesc.fOrigin = resolve_origin(desc.fOrigin);
+
+ GrVkTexture* texture = nullptr;
+ if (renderTarget) {
+ texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this, surfDesc,
+ ownership, info);
+ } else {
+ texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, ownership, info);
+ }
+ if (!texture) {
+ return nullptr;
+ }
+
+ return texture;
+}
+
+GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
+ GrWrapOwnership ownership) {
+
+ const GrVkImageInfo* info =
+ reinterpret_cast<const GrVkImageInfo*>(wrapDesc.fRenderTargetHandle);
+ if (VK_NULL_HANDLE == info->fImage ||
+ (VK_NULL_HANDLE == info->fAlloc.fMemory && kAdopt_GrWrapOwnership == ownership)) {
+ return nullptr;
+ }
+
+ GrSurfaceDesc desc;
+ desc.fConfig = wrapDesc.fConfig;
+ desc.fFlags = kCheckAllocation_GrSurfaceFlag;
+ desc.fWidth = wrapDesc.fWidth;
+ desc.fHeight = wrapDesc.fHeight;
+ desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
+
+ desc.fOrigin = resolve_origin(wrapDesc.fOrigin);
+
+ GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, desc,
+ ownership,
+ info);
+ if (tgt && wrapDesc.fStencilBits) {
+ if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeight)) {
+ tgt->unref();
+ return nullptr;
+ }
+ }
+ return tgt;
+}
+
+void GrVkGpu::generateMipmap(GrVkTexture* tex) {
+ // don't do anything for linearly tiled textures (can't have mipmaps)
+ if (tex->isLinearTiled()) {
+ SkDebugf("Trying to create mipmap for linear tiled texture");
+ return;
+ }
+
+ // determine if we can blit to and from this format
+ const GrVkCaps& caps = this->vkCaps();
+ if (!caps.configCanBeDstofBlit(tex->config(), false) ||
+ !caps.configCanBeSrcofBlit(tex->config(), false) ||
+ !caps.mipMapSupport()) {
+ return;
+ }
+
+ if (this->vkCaps().mustSubmitCommandsBeforeCopyOp()) {
+ this->submitCommandBuffer(kSkip_SyncQueue);
+ }
+
+ // We may need to resolve the texture first if it is also a render target
+ GrVkRenderTarget* texRT = static_cast<GrVkRenderTarget*>(tex->asRenderTarget());
+ if (texRT) {
+ this->onResolveRenderTarget(texRT);
+ }
+
+ int width = tex->width();
+ int height = tex->height();
+ VkImageBlit blitRegion;
+ memset(&blitRegion, 0, sizeof(VkImageBlit));
+
+ // SkMipMap doesn't include the base level in the level count so we have to add 1
+ uint32_t levelCount = SkMipMap::ComputeLevelCount(tex->width(), tex->height()) + 1;
+ if (levelCount != tex->mipLevels()) {
+ const GrVkResource* oldResource = tex->resource();
+ oldResource->ref();
+ // grab handle to the original image resource
+ VkImage oldImage = tex->image();
+
+ // change the original image's layout so we can copy from it
+ tex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ VK_ACCESS_TRANSFER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
+
+ if (!tex->reallocForMipmap(this, levelCount)) {
+ oldResource->unref(this);
+ return;
+ }
+ // change the new image's layout so we can blit to it
+ tex->setImageLayout(this, VK_IMAGE_LAYOUT_GENERAL,
+ VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
+
+ // Blit original image to top level of new image
+ blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ blitRegion.srcOffsets[0] = { 0, 0, 0 };
+ blitRegion.srcOffsets[1] = { width, height, 1 };
+ blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ blitRegion.dstOffsets[0] = { 0, 0, 0 };
+ blitRegion.dstOffsets[1] = { width, height, 1 };
+
+ fCurrentCmdBuffer->blitImage(this,
+ oldResource,
+ oldImage,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ tex->resource(),
+ tex->image(),
+ VK_IMAGE_LAYOUT_GENERAL,
+ 1,
+ &blitRegion,
+ VK_FILTER_LINEAR);
+
+ oldResource->unref(this);
+ } else {
+ // change layout of the layers so we can write to them.
+ tex->setImageLayout(this, VK_IMAGE_LAYOUT_GENERAL,
+ VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
+ }
+
+ // setup memory barrier
+ SkASSERT(GrVkFormatToPixelConfig(tex->imageFormat(), nullptr));
+ VkImageAspectFlags aspectFlags = VK_IMAGE_ASPECT_COLOR_BIT;
+ VkImageMemoryBarrier imageMemoryBarrier = {
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
+ NULL, // pNext
+ VK_ACCESS_TRANSFER_WRITE_BIT, // srcAccessMask
+ VK_ACCESS_TRANSFER_READ_BIT, // dstAccessMask
+ VK_IMAGE_LAYOUT_GENERAL, // oldLayout
+ VK_IMAGE_LAYOUT_GENERAL, // newLayout
+ VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
+ tex->image(), // image
+ { aspectFlags, 0, 1, 0, 1 } // subresourceRange
+ };
+
+ // Blit the miplevels
+ uint32_t mipLevel = 1;
+ while (mipLevel < levelCount) {
+ int prevWidth = width;
+ int prevHeight = height;
+ width = SkTMax(1, width / 2);
+ height = SkTMax(1, height / 2);
+
+ imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
+ this->addImageMemoryBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false, &imageMemoryBarrier);
+
+ blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
+ blitRegion.srcOffsets[0] = { 0, 0, 0 };
+ blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
+ blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
+ blitRegion.dstOffsets[0] = { 0, 0, 0 };
+ blitRegion.dstOffsets[1] = { width, height, 1 };
+ fCurrentCmdBuffer->blitImage(this,
+ *tex,
+ *tex,
+ 1,
+ &blitRegion,
+ VK_FILTER_LINEAR);
+ ++mipLevel;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
+ int width,
+ int height) {
+ SkASSERT(width >= rt->width());
+ SkASSERT(height >= rt->height());
+
+ int samples = rt->numStencilSamples();
+
+ const GrVkCaps::StencilFormat& sFmt = this->vkCaps().preferedStencilFormat();
+
+ GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
+ width,
+ height,
+ samples,
+ sFmt));
+ fStats.incStencilAttachmentCreates();
+ return stencil;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool copy_testing_data(GrVkGpu* gpu, void* srcData, const GrVkAlloc& alloc,
+ size_t srcRowBytes, size_t dstRowBytes, int h) {
+ void* mapPtr;
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), MapMemory(gpu->device(),
+ alloc.fMemory,
+ alloc.fOffset,
+ dstRowBytes * h,
+ 0,
+ &mapPtr));
+ if (err) {
+ return false;
+ }
+
+ // If there is no padding on dst we can do a single memcopy.
+ // This assumes the srcData comes in with no padding.
+ if (srcRowBytes == dstRowBytes) {
+ memcpy(mapPtr, srcData, srcRowBytes * h);
+ } else {
+ SkRectMemcpy(mapPtr, static_cast<size_t>(dstRowBytes), srcData, srcRowBytes,
+ srcRowBytes, h);
+ }
+ GrVkMemory::FlushMappedAlloc(gpu, alloc);
+ GR_VK_CALL(gpu->vkInterface(), UnmapMemory(gpu->device(), alloc.fMemory));
+ return true;
+}
+
+GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
+ GrPixelConfig config,
+ bool isRenderTarget) {
+
+ VkFormat pixelFormat;
+ if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
+ return 0;
+ }
+
+ bool linearTiling = false;
+ if (!fVkCaps->isConfigTexturable(config)) {
+ return 0;
+ }
+
+ if (isRenderTarget && !fVkCaps->isConfigRenderable(config, false)) {
+ return 0;
+ }
+
+ if (fVkCaps->isConfigTexturableLinearly(config) &&
+ (!isRenderTarget || fVkCaps->isConfigRenderableLinearly(config, false))) {
+ linearTiling = true;
+ }
+
+ VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
+ usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+ usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ if (isRenderTarget) {
+ usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ }
+
+ VkImage image = VK_NULL_HANDLE;
+ GrVkAlloc alloc = { VK_NULL_HANDLE, 0, 0, 0 };
+
+ VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
+ VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling)
+ ? VK_IMAGE_LAYOUT_PREINITIALIZED
+ : VK_IMAGE_LAYOUT_UNDEFINED;
+
+ // Create Image
+ VkSampleCountFlagBits vkSamples;
+ if (!GrSampleCountToVkSampleCount(1, &vkSamples)) {
+ return 0;
+ }
+
+ const VkImageCreateInfo imageCreateInfo = {
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
+ NULL, // pNext
+ 0, // VkImageCreateFlags
+ VK_IMAGE_TYPE_2D, // VkImageType
+ pixelFormat, // VkFormat
+ { (uint32_t) w, (uint32_t) h, 1 }, // VkExtent3D
+ 1, // mipLevels
+ 1, // arrayLayers
+ vkSamples, // samples
+ imageTiling, // VkImageTiling
+ usageFlags, // VkImageUsageFlags
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
+ 0, // queueFamilyCount
+ 0, // pQueueFamilyIndices
+ initialLayout // initialLayout
+ };
+
+ GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateImage(this->device(), &imageCreateInfo, nullptr, &image));
+
+ if (!GrVkMemory::AllocAndBindImageMemory(this, image, linearTiling, &alloc)) {
+ VK_CALL(DestroyImage(this->device(), image, nullptr));
+ return 0;
+ }
+
+ if (srcData) {
+ size_t bpp = GrBytesPerPixel(config);
+ size_t rowCopyBytes = bpp * w;
+ if (linearTiling) {
+ const VkImageSubresource subres = {
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ 0, // mipLevel
+ 0, // arraySlice
+ };
+ VkSubresourceLayout layout;
+
+ VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout));
+
+ if (!copy_testing_data(this, srcData, alloc, rowCopyBytes,
+ static_cast<size_t>(layout.rowPitch), h)) {
+ GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
+ VK_CALL(DestroyImage(fDevice, image, nullptr));
+ return 0;
+ }
+ } else {
+ SkASSERT(w && h);
+
+ VkBuffer buffer;
+ VkBufferCreateInfo bufInfo;
+ memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
+ bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ bufInfo.flags = 0;
+ bufInfo.size = rowCopyBytes * h;
+ bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+ bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ bufInfo.queueFamilyIndexCount = 0;
+ bufInfo.pQueueFamilyIndices = nullptr;
+ VkResult err;
+ err = VK_CALL(CreateBuffer(fDevice, &bufInfo, nullptr, &buffer));
+
+ if (err) {
+ GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
+ VK_CALL(DestroyImage(fDevice, image, nullptr));
+ return 0;
+ }
+
+ GrVkAlloc bufferAlloc = { VK_NULL_HANDLE, 0, 0, 0 };
+ if (!GrVkMemory::AllocAndBindBufferMemory(this, buffer, GrVkBuffer::kCopyRead_Type,
+ true, &bufferAlloc)) {
+ GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
+ VK_CALL(DestroyImage(fDevice, image, nullptr));
+ VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
+ return 0;
+ }
+
+ if (!copy_testing_data(this, srcData, bufferAlloc, rowCopyBytes, rowCopyBytes, h)) {
+ GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
+ VK_CALL(DestroyImage(fDevice, image, nullptr));
+ GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
+ VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
+ return 0;
+ }
+
+ const VkCommandBufferAllocateInfo cmdInfo = {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
+ NULL, // pNext
+ fCmdPool, // commandPool
+ VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
+ 1 // bufferCount
+ };
+
+ VkCommandBuffer cmdBuffer;
+ err = VK_CALL(AllocateCommandBuffers(fDevice, &cmdInfo, &cmdBuffer));
+ if (err) {
+ GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
+ VK_CALL(DestroyImage(fDevice, image, nullptr));
+ GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
+ VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
+ return 0;
+ }
+
+ VkCommandBufferBeginInfo cmdBufferBeginInfo;
+ memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
+ cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdBufferBeginInfo.pNext = nullptr;
+ cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ cmdBufferBeginInfo.pInheritanceInfo = nullptr;
+
+ err = VK_CALL(BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo));
+ SkASSERT(!err);
+
+ // Set image layout and add barrier
+ VkImageMemoryBarrier barrier;
+ memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
+ barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ barrier.pNext = nullptr;
+ barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout);
+ barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+ barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.image = image;
+ barrier.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0 , 1};
+
+ VK_CALL(CmdPipelineBarrier(cmdBuffer,
+ GrVkMemory::LayoutToPipelineStageFlags(initialLayout),
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ 0,
+ 0, nullptr,
+ 0, nullptr,
+ 1, &barrier));
+ initialLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+
+ // Submit copy command
+ VkBufferImageCopy region;
+ memset(&region, 0, sizeof(VkBufferImageCopy));
+ region.bufferOffset = 0;
+ region.bufferRowLength = w;
+ region.bufferImageHeight = h;
+ region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ region.imageOffset = { 0, 0, 0 };
+ region.imageExtent = { (uint32_t)w, (uint32_t)h, 1 };
+
+ VK_CALL(CmdCopyBufferToImage(cmdBuffer, buffer, image, initialLayout, 1, &region));
+
+ // End CommandBuffer
+ err = VK_CALL(EndCommandBuffer(cmdBuffer));
+ SkASSERT(!err);
+
+ // Create Fence for queue
+ VkFence fence;
+ VkFenceCreateInfo fenceInfo;
+ memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
+ fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+
+ err = VK_CALL(CreateFence(fDevice, &fenceInfo, nullptr, &fence));
+ SkASSERT(!err);
+
+ VkSubmitInfo submitInfo;
+ memset(&submitInfo, 0, sizeof(VkSubmitInfo));
+ submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submitInfo.pNext = nullptr;
+ submitInfo.waitSemaphoreCount = 0;
+ submitInfo.pWaitSemaphores = nullptr;
+ submitInfo.pWaitDstStageMask = 0;
+ submitInfo.commandBufferCount = 1;
+ submitInfo.pCommandBuffers = &cmdBuffer;
+ submitInfo.signalSemaphoreCount = 0;
+ submitInfo.pSignalSemaphores = nullptr;
+ err = VK_CALL(QueueSubmit(this->queue(), 1, &submitInfo, fence));
+ SkASSERT(!err);
+
+ err = VK_CALL(WaitForFences(fDevice, 1, &fence, true, UINT64_MAX));
+ if (VK_TIMEOUT == err) {
+ GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
+ VK_CALL(DestroyImage(fDevice, image, nullptr));
+ GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
+ VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
+ VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
+ VK_CALL(DestroyFence(fDevice, fence, nullptr));
+ SkDebugf("Fence failed to signal: %d\n", err);
+ SkFAIL("failing");
+ }
+ SkASSERT(!err);
+
+ // Clean up transfer resources
+ GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
+ VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
+ VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
+ VK_CALL(DestroyFence(fDevice, fence, nullptr));
+ }
+ }
+
+ GrVkImageInfo* info = new GrVkImageInfo;
+ info->fImage = image;
+ info->fAlloc = alloc;
+ info->fImageTiling = imageTiling;
+ info->fImageLayout = initialLayout;
+ info->fFormat = pixelFormat;
+ info->fLevelCount = 1;
+
+ return (GrBackendObject)info;
+}
+
+bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
+ const GrVkImageInfo* backend = reinterpret_cast<const GrVkImageInfo*>(id);
+
+ if (backend && backend->fImage && backend->fAlloc.fMemory) {
+ VkMemoryRequirements req;
+ memset(&req, 0, sizeof(req));
+ GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
+ backend->fImage,
+ &req));
+ // TODO: find a better check
+ // This will probably fail with a different driver
+ return (req.size > 0) && (req.size <= 8192 * 8192);
+ }
+
+ return false;
+}
+
+void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
+ GrVkImageInfo* backend = reinterpret_cast<GrVkImageInfo*>(id);
+ if (backend) {
+ if (!abandon) {
+ // something in the command buffer may still be using this, so force submit
+ this->submitCommandBuffer(kForce_SyncQueue);
+ GrVkImage::DestroyImageInfo(this, backend);
+ }
+ delete backend;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkMemoryBarrier* barrier) const {
+ SkASSERT(fCurrentCmdBuffer);
+ fCurrentCmdBuffer->pipelineBarrier(this,
+ srcStageMask,
+ dstStageMask,
+ byRegion,
+ GrVkCommandBuffer::kMemory_BarrierType,
+ barrier);
+}
+
+void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkBufferMemoryBarrier* barrier) const {
+ SkASSERT(fCurrentCmdBuffer);
+ fCurrentCmdBuffer->pipelineBarrier(this,
+ srcStageMask,
+ dstStageMask,
+ byRegion,
+ GrVkCommandBuffer::kBufferMemory_BarrierType,
+ barrier);
+}
+
+void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkImageMemoryBarrier* barrier) const {
+ SkASSERT(fCurrentCmdBuffer);
+ fCurrentCmdBuffer->pipelineBarrier(this,
+ srcStageMask,
+ dstStageMask,
+ byRegion,
+ GrVkCommandBuffer::kImageMemory_BarrierType,
+ barrier);
+}
+
+void GrVkGpu::finishDrawTarget() {
+ // Submit the current command buffer to the Queue
+ this->submitCommandBuffer(kSkip_SyncQueue);
+}
+
+void GrVkGpu::clearStencil(GrRenderTarget* target) {
+ if (nullptr == target) {
+ return;
+ }
+ GrStencilAttachment* stencil = target->renderTargetPriv().getStencilAttachment();
+ GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
+
+
+ VkClearDepthStencilValue vkStencilColor;
+ memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
+
+ vkStencil->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ VkImageSubresourceRange subRange;
+ memset(&subRange, 0, sizeof(VkImageSubresourceRange));
+ subRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
+ subRange.baseMipLevel = 0;
+ subRange.levelCount = 1;
+ subRange.baseArrayLayer = 0;
+ subRange.layerCount = 1;
+
+ // TODO: I imagine that most times we want to clear a stencil it will be at the beginning of a
+ // draw. Thus we should look into using the load op functions on the render pass to clear out
+ // the stencil there.
+ fCurrentCmdBuffer->clearDepthStencilImage(this, vkStencil, &vkStencilColor, 1, &subRange);
+}
+
+inline bool can_copy_image(const GrSurface* dst,
+ const GrSurface* src,
+ const GrVkGpu* gpu) {
+ const GrRenderTarget* dstRT = dst->asRenderTarget();
+ const GrRenderTarget* srcRT = src->asRenderTarget();
+ if (dstRT && srcRT) {
+ if (srcRT->numColorSamples() != dstRT->numColorSamples()) {
+ return false;
+ }
+ } else if (dstRT) {
+ if (dstRT->numColorSamples() > 1) {
+ return false;
+ }
+ } else if (srcRT) {
+ if (srcRT->numColorSamples() > 1) {
+ return false;
+ }
+ }
+
+ // We require that all vulkan GrSurfaces have been created with transfer_dst and transfer_src
+ // as image usage flags.
+ if (src->origin() == dst->origin() &&
+ GrBytesPerPixel(src->config()) == GrBytesPerPixel(dst->config())) {
+ return true;
+ }
+
+ return false;
+}
+
+void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
+ GrSurface* src,
+ GrVkImage* dstImage,
+ GrVkImage* srcImage,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ SkASSERT(can_copy_image(dst, src, this));
+
+ // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
+ // the cache is flushed since it is only being written to.
+ dstImage->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ srcImage->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ // Flip rect if necessary
+ SkIRect srcVkRect = srcRect;
+ int32_t dstY = dstPoint.fY;
+
+ if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
+ SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin());
+ srcVkRect.fTop = src->height() - srcRect.fBottom;
+ srcVkRect.fBottom = src->height() - srcRect.fTop;
+ dstY = dst->height() - dstPoint.fY - srcVkRect.height();
+ }
+
+ VkImageCopy copyRegion;
+ memset(&copyRegion, 0, sizeof(VkImageCopy));
+ copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
+ copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ copyRegion.dstOffset = { dstPoint.fX, dstY, 0 };
+ // The depth value of the extent is ignored according the vulkan spec for 2D images. However, on
+ // at least the nexus 5X it seems to be checking it. Thus as a working around we must have the
+ // depth value be 1.
+ copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 1 };
+
+ fCurrentCmdBuffer->copyImage(this,
+ srcImage,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ dstImage,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ 1,
+ &copyRegion);
+
+ SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
+ srcRect.width(), srcRect.height());
+ this->didWriteToSurface(dst, &dstRect);
+}
+
+inline bool can_copy_as_blit(const GrSurface* dst,
+ const GrSurface* src,
+ const GrVkImage* dstImage,
+ const GrVkImage* srcImage,
+ const GrVkGpu* gpu) {
+ // We require that all vulkan GrSurfaces have been created with transfer_dst and transfer_src
+ // as image usage flags.
+ const GrVkCaps& caps = gpu->vkCaps();
+ if (!caps.configCanBeDstofBlit(dst->config(), dstImage->isLinearTiled()) ||
+ !caps.configCanBeSrcofBlit(src->config(), srcImage->isLinearTiled())) {
+ return false;
+ }
+
+ // We cannot blit images that are multisampled. Will need to figure out if we can blit the
+ // resolved msaa though.
+ if ((dst->asRenderTarget() && dst->asRenderTarget()->numColorSamples() > 1) ||
+ (src->asRenderTarget() && src->asRenderTarget()->numColorSamples() > 1)) {
+ return false;
+ }
+
+ return true;
+}
+
+void GrVkGpu::copySurfaceAsBlit(GrSurface* dst,
+ GrSurface* src,
+ GrVkImage* dstImage,
+ GrVkImage* srcImage,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ SkASSERT(can_copy_as_blit(dst, src, dstImage, srcImage, this));
+
+ dstImage->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ srcImage->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ // Flip rect if necessary
+ SkIRect srcVkRect;
+ srcVkRect.fLeft = srcRect.fLeft;
+ srcVkRect.fRight = srcRect.fRight;
+ SkIRect dstRect;
+ dstRect.fLeft = dstPoint.fX;
+ dstRect.fRight = dstPoint.fX + srcRect.width();
+
+ if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
+ srcVkRect.fTop = src->height() - srcRect.fBottom;
+ srcVkRect.fBottom = src->height() - srcRect.fTop;
+ } else {
+ srcVkRect.fTop = srcRect.fTop;
+ srcVkRect.fBottom = srcRect.fBottom;
+ }
+
+ if (kBottomLeft_GrSurfaceOrigin == dst->origin()) {
+ dstRect.fTop = dst->height() - dstPoint.fY - srcVkRect.height();
+ } else {
+ dstRect.fTop = dstPoint.fY;
+ }
+ dstRect.fBottom = dstRect.fTop + srcVkRect.height();
+
+ // If we have different origins, we need to flip the top and bottom of the dst rect so that we
+ // get the correct origintation of the copied data.
+ if (src->origin() != dst->origin()) {
+ SkTSwap(dstRect.fTop, dstRect.fBottom);
+ }
+
+ VkImageBlit blitRegion;
+ memset(&blitRegion, 0, sizeof(VkImageBlit));
+ blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ blitRegion.srcOffsets[0] = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
+ blitRegion.srcOffsets[1] = { srcVkRect.fRight, srcVkRect.fBottom, 0 };
+ blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
+ blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 0 };
+
+ fCurrentCmdBuffer->blitImage(this,
+ *srcImage,
+ *dstImage,
+ 1,
+ &blitRegion,
+ VK_FILTER_NEAREST); // We never scale so any filter works here
+
+ this->didWriteToSurface(dst, &dstRect);
+}
+
+inline bool can_copy_as_resolve(const GrSurface* dst,
+ const GrSurface* src,
+ const GrVkGpu* gpu) {
+ // Our src must be a multisampled render target
+ if (!src->asRenderTarget() || src->asRenderTarget()->numColorSamples() <= 1) {
+ return false;
+ }
+
+ // The dst must be a render target but not multisampled
+ if (!dst->asRenderTarget() || dst->asRenderTarget()->numColorSamples() > 1) {
+ return false;
+ }
+
+ // Surfaces must have the same origin.
+ if (src->origin() != dst->origin()) {
+ return false;
+ }
+
+ return true;
+}
+
+void GrVkGpu::copySurfaceAsResolve(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ GrVkRenderTarget* dstRT = static_cast<GrVkRenderTarget*>(dst->asRenderTarget());
+ GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
+ SkASSERT(dstRT && dstRT->numColorSamples() <= 1);
+ this->resolveImage(dstRT, srcRT, srcRect, dstPoint);
+}
+
+inline bool can_copy_as_draw(const GrSurface* dst,
+ const GrSurface* src,
+ const GrVkGpu* gpu) {
+ return false;
+}
+
+void GrVkGpu::copySurfaceAsDraw(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ SkASSERT(false);
+}
+
+bool GrVkGpu::onCopySurface(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ if (can_copy_as_resolve(dst, src, this)) {
+ this->copySurfaceAsResolve(dst, src, srcRect, dstPoint);
+ return true;
+ }
+
+ if (this->vkCaps().mustSubmitCommandsBeforeCopyOp()) {
+ this->submitCommandBuffer(GrVkGpu::kSkip_SyncQueue);
+ }
+
+ if (fCopyManager.copySurfaceAsDraw(this, dst, src, srcRect, dstPoint)) {
+ return true;
+ }
+
+ GrVkImage* dstImage;
+ GrVkImage* srcImage;
+ GrRenderTarget* dstRT = dst->asRenderTarget();
+ if (dstRT) {
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
+ dstImage = vkRT->numColorSamples() > 1 ? vkRT->msaaImage() : vkRT;
+ } else {
+ SkASSERT(dst->asTexture());
+ dstImage = static_cast<GrVkTexture*>(dst->asTexture());
+ }
+ GrRenderTarget* srcRT = src->asRenderTarget();
+ if (srcRT) {
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(srcRT);
+ srcImage = vkRT->numColorSamples() > 1 ? vkRT->msaaImage() : vkRT;
+ } else {
+ SkASSERT(src->asTexture());
+ srcImage = static_cast<GrVkTexture*>(src->asTexture());
+ }
+
+ if (can_copy_image(dst, src, this)) {
+ this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
+ return true;
+ }
+
+ if (can_copy_as_blit(dst, src, dstImage, srcImage, this)) {
+ this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstPoint);
+ return true;
+ }
+
+ return false;
+}
+
+bool GrVkGpu::initDescForDstCopy(const GrRenderTarget* src, GrSurfaceDesc* desc) const {
+ // We can always succeed here with either a CopyImage (none msaa src) or ResolveImage (msaa).
+ // For CopyImage we can make a simple texture, for ResolveImage we require the dst to be a
+ // render target as well.
+ desc->fOrigin = src->origin();
+ desc->fConfig = src->config();
+ if (src->numColorSamples() > 1 ||
+ (src->asTexture() && this->vkCaps().supportsCopiesAsDraws())) {
+ desc->fFlags = kRenderTarget_GrSurfaceFlag;
+ } else {
+ // Just going to use CopyImage here
+ desc->fFlags = kNone_GrSurfaceFlags;
+ }
+
+ return true;
+}
+
+void GrVkGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&,
+ int* effectiveSampleCnt, SamplePattern*) {
+ // TODO: stub.
+ SkASSERT(!this->caps()->sampleLocationsSupport());
+ *effectiveSampleCnt = rt->desc().fSampleCnt;
+}
+
+bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
+ GrPixelConfig readConfig, DrawPreference* drawPreference,
+ ReadPixelTempDrawInfo* tempDrawInfo) {
+ // These settings we will always want if a temp draw is performed.
+ tempDrawInfo->fTempSurfaceDesc.fFlags = kRenderTarget_GrSurfaceFlag;
+ tempDrawInfo->fTempSurfaceDesc.fWidth = width;
+ tempDrawInfo->fTempSurfaceDesc.fHeight = height;
+ tempDrawInfo->fTempSurfaceDesc.fSampleCnt = 0;
+ tempDrawInfo->fTempSurfaceDesc.fOrigin = kTopLeft_GrSurfaceOrigin; // no CPU y-flip for TL.
+ tempDrawInfo->fTempSurfaceFit = SkBackingFit::kApprox;
+
+ // For now assume no swizzling, we may change that below.
+ tempDrawInfo->fSwizzle = GrSwizzle::RGBA();
+
+ // Depends on why we need/want a temp draw. Start off assuming no change, the surface we read
+ // from will be srcConfig and we will read readConfig pixels from it.
+ // Not that if we require a draw and return a non-renderable format for the temp surface the
+ // base class will fail for us.
+ tempDrawInfo->fTempSurfaceDesc.fConfig = srcSurface->config();
+ tempDrawInfo->fReadConfig = readConfig;
+
+ if (srcSurface->config() == readConfig) {
+ return true;
+ }
+
+ if (this->vkCaps().isConfigRenderable(readConfig, srcSurface->desc().fSampleCnt > 1)) {
+ ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
+ tempDrawInfo->fTempSurfaceDesc.fConfig = readConfig;
+ tempDrawInfo->fReadConfig = readConfig;
+ return true;
+ }
+
+ return false;
+}
+
+bool GrVkGpu::onReadPixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config,
+ void* buffer,
+ size_t rowBytes) {
+ VkFormat pixelFormat;
+ if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
+ return false;
+ }
+
+ GrVkImage* image = nullptr;
+ GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget());
+ if (rt) {
+ // resolve the render target if necessary
+ switch (rt->getResolveType()) {
+ case GrVkRenderTarget::kCantResolve_ResolveType:
+ return false;
+ case GrVkRenderTarget::kAutoResolves_ResolveType:
+ break;
+ case GrVkRenderTarget::kCanResolve_ResolveType:
+ this->onResolveRenderTarget(rt);
+ break;
+ default:
+ SkFAIL("Unknown resolve type");
+ }
+ image = rt;
+ } else {
+ image = static_cast<GrVkTexture*>(surface->asTexture());
+ }
+
+ if (!image) {
+ return false;
+ }
+
+ // Change layout of our target so it can be used as copy
+ image->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ size_t bpp = GrBytesPerPixel(config);
+ size_t tightRowBytes = bpp * width;
+ bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
+
+ VkBufferImageCopy region;
+ memset(&region, 0, sizeof(VkBufferImageCopy));
+
+ bool copyFromOrigin = this->vkCaps().mustDoCopiesFromOrigin();
+ if (copyFromOrigin) {
+ region.imageOffset = { 0, 0, 0 };
+ region.imageExtent = { (uint32_t)(left + width),
+ (uint32_t)(flipY ? surface->height() - top : top + height),
+ 1
+ };
+ } else {
+ VkOffset3D offset = {
+ left,
+ flipY ? surface->height() - top - height : top,
+ 0
+ };
+ region.imageOffset = offset;
+ region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
+ }
+
+ size_t transBufferRowBytes = bpp * region.imageExtent.width;
+ GrVkTransferBuffer* transferBuffer =
+ static_cast<GrVkTransferBuffer*>(this->createBuffer(transBufferRowBytes * height,
+ kXferGpuToCpu_GrBufferType,
+ kStream_GrAccessPattern));
+
+ // Copy the image to a buffer so we can map it to cpu memory
+ region.bufferOffset = transferBuffer->offset();
+ region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below.
+ region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
+ region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+
+ fCurrentCmdBuffer->copyImageToBuffer(this,
+ image,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ transferBuffer,
+ 1,
+ &region);
+
+ // make sure the copy to buffer has finished
+ transferBuffer->addMemoryBarrier(this,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_ACCESS_HOST_READ_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ VK_PIPELINE_STAGE_HOST_BIT,
+ false);
+
+ // We need to submit the current command buffer to the Queue and make sure it finishes before
+ // we can copy the data out of the buffer.
+ this->submitCommandBuffer(kForce_SyncQueue);
+ GrVkMemory::InvalidateMappedAlloc(this, transferBuffer->alloc());
+ void* mappedMemory = transferBuffer->map();
+
+ if (copyFromOrigin) {
+ uint32_t skipRows = region.imageExtent.height - height;
+ mappedMemory = (char*)mappedMemory + transBufferRowBytes * skipRows + bpp * left;
+ }
+
+ if (flipY) {
+ const char* srcRow = reinterpret_cast<const char*>(mappedMemory);
+ char* dstRow = reinterpret_cast<char*>(buffer)+(height - 1) * rowBytes;
+ for (int y = 0; y < height; y++) {
+ memcpy(dstRow, srcRow, tightRowBytes);
+ srcRow += transBufferRowBytes;
+ dstRow -= rowBytes;
+ }
+ } else {
+ if (transBufferRowBytes == rowBytes) {
+ memcpy(buffer, mappedMemory, rowBytes*height);
+ } else {
+ SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, tightRowBytes,
+ height);
+ }
+ }
+
+ transferBuffer->unmap();
+ transferBuffer->unref();
+ return true;
+}
+
+// The RenderArea bounds we pass into BeginRenderPass must have a start x value that is a multiple
+// of the granularity. The width must also be a multiple of the granularity or eaqual to the width
+// the the entire attachment. Similar requirements for the y and height components.
+void adjust_bounds_to_granularity(SkIRect* dstBounds, const SkIRect& srcBounds,
+ const VkExtent2D& granularity, int maxWidth, int maxHeight) {
+ // Adjust Width
+ if ((0 != granularity.width && 1 != granularity.width)) {
+ // Start with the right side of rect so we know if we end up going pass the maxWidth.
+ int rightAdj = srcBounds.fRight % granularity.width;
+ if (rightAdj != 0) {
+ rightAdj = granularity.width - rightAdj;
+ }
+ dstBounds->fRight = srcBounds.fRight + rightAdj;
+ if (dstBounds->fRight > maxWidth) {
+ dstBounds->fRight = maxWidth;
+ dstBounds->fLeft = 0;
+ } else {
+ dstBounds->fLeft = srcBounds.fLeft - srcBounds.fLeft % granularity.width;
+ }
+ } else {
+ dstBounds->fLeft = srcBounds.fLeft;
+ dstBounds->fRight = srcBounds.fRight;
+ }
+
+ // Adjust height
+ if ((0 != granularity.height && 1 != granularity.height)) {
+ // Start with the bottom side of rect so we know if we end up going pass the maxHeight.
+ int bottomAdj = srcBounds.fBottom % granularity.height;
+ if (bottomAdj != 0) {
+ bottomAdj = granularity.height - bottomAdj;
+ }
+ dstBounds->fBottom = srcBounds.fBottom + bottomAdj;
+ if (dstBounds->fBottom > maxHeight) {
+ dstBounds->fBottom = maxHeight;
+ dstBounds->fTop = 0;
+ } else {
+ dstBounds->fTop = srcBounds.fTop - srcBounds.fTop % granularity.height;
+ }
+ } else {
+ dstBounds->fTop = srcBounds.fTop;
+ dstBounds->fBottom = srcBounds.fBottom;
+ }
+}
+
+void GrVkGpu::submitSecondaryCommandBuffer(GrVkSecondaryCommandBuffer* buffer,
+ const GrVkRenderPass* renderPass,
+ const VkClearValue* colorClear,
+ GrVkRenderTarget* target,
+ const SkIRect& bounds) {
+ const SkIRect* pBounds = &bounds;
+ SkIRect flippedBounds;
+ if (kBottomLeft_GrSurfaceOrigin == target->origin()) {
+ flippedBounds = bounds;
+ flippedBounds.fTop = target->height() - bounds.fBottom;
+ flippedBounds.fBottom = target->height() - bounds.fTop;
+ pBounds = &flippedBounds;
+ }
+
+ // The bounds we use for the render pass should be of the granularity supported
+ // by the device.
+ const VkExtent2D& granularity = renderPass->granularity();
+ SkIRect adjustedBounds;
+ if ((0 != granularity.width && 1 != granularity.width) ||
+ (0 != granularity.height && 1 != granularity.height)) {
+ adjust_bounds_to_granularity(&adjustedBounds, *pBounds, granularity,
+ target->width(), target->height());
+ pBounds = &adjustedBounds;
+ }
+
+ // Currently it is fine for us to always pass in 1 for the clear count even if no attachment
+ // uses it. In the current state, we also only use the LOAD_OP_CLEAR for the color attachment
+ // which is always at the first attachment.
+ fCurrentCmdBuffer->beginRenderPass(this, renderPass, 1, colorClear, *target, *pBounds, true);
+ fCurrentCmdBuffer->executeCommands(this, buffer);
+ fCurrentCmdBuffer->endRenderPass(this);
+
+ this->didWriteToSurface(target, &bounds);
+}
+
+GrFence SK_WARN_UNUSED_RESULT GrVkGpu::insertFence() const {
+ VkFenceCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkFenceCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ VkFence fence = VK_NULL_HANDLE;
+ VkResult result = GR_VK_CALL(this->vkInterface(), CreateFence(this->device(), &createInfo,
+ nullptr, &fence));
+ // TODO: verify that all QueueSubmits before this will finish before this fence signals
+ if (VK_SUCCESS == result) {
+ GR_VK_CALL(this->vkInterface(), QueueSubmit(this->queue(), 0, nullptr, fence));
+ }
+ return (GrFence)fence;
+}
+
+bool GrVkGpu::waitFence(GrFence fence, uint64_t timeout) const {
+ VkResult result = GR_VK_CALL(this->vkInterface(), WaitForFences(this->device(), 1,
+ (VkFence*)&fence,
+ VK_TRUE,
+ timeout));
+ return (VK_SUCCESS == result);
+}
+
+void GrVkGpu::deleteFence(GrFence fence) const {
+ GR_VK_CALL(this->vkInterface(), DestroyFence(this->device(), (VkFence)fence, nullptr));
+}
+
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkGpu.h b/gfx/skia/skia/src/gpu/vk/GrVkGpu.h
new file mode 100644
index 000000000..273f28c5d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkGpu.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkGpu_DEFINED
+#define GrVkGpu_DEFINED
+
+#define USE_SKSL 1
+
+#include "GrGpu.h"
+#include "GrGpuFactory.h"
+#include "vk/GrVkBackendContext.h"
+#include "GrVkCaps.h"
+#include "GrVkCopyManager.h"
+#include "GrVkIndexBuffer.h"
+#include "GrVkMemory.h"
+#include "GrVkResourceProvider.h"
+#include "GrVkVertexBuffer.h"
+#include "GrVkUtil.h"
+
+#if USE_SKSL
+namespace SkSL {
+ class Compiler;
+}
+#else
+#include "shaderc/shaderc.h"
+#endif
+
+#include "vk/GrVkDefines.h"
+
+class GrPipeline;
+class GrNonInstancedMesh;
+
+class GrVkBufferImpl;
+class GrVkPipeline;
+class GrVkPipelineState;
+class GrVkPrimaryCommandBuffer;
+class GrVkRenderPass;
+class GrVkSecondaryCommandBuffer;
+class GrVkTexture;
+struct GrVkInterface;
+
+class GrVkGpu : public GrGpu {
+public:
+ static GrGpu* Create(GrBackendContext backendContext, const GrContextOptions& options,
+ GrContext* context);
+
+ ~GrVkGpu() override;
+
+ const GrVkInterface* vkInterface() const { return fBackendContext->fInterface; }
+ const GrVkCaps& vkCaps() const { return *fVkCaps; }
+
+ VkDevice device() const { return fDevice; }
+ VkQueue queue() const { return fQueue; }
+ VkCommandPool cmdPool() const { return fCmdPool; }
+ VkPhysicalDeviceMemoryProperties physicalDeviceMemoryProperties() const {
+ return fPhysDevMemProps;
+ }
+
+ GrVkResourceProvider& resourceProvider() { return fResourceProvider; }
+
+ GrVkPrimaryCommandBuffer* currentCommandBuffer() { return fCurrentCmdBuffer; }
+
+ enum SyncQueue {
+ kForce_SyncQueue,
+ kSkip_SyncQueue
+ };
+
+ bool onGetReadPixelsInfo(GrSurface* srcSurface, int readWidth, int readHeight, size_t rowBytes,
+ GrPixelConfig readConfig, DrawPreference*,
+ ReadPixelTempDrawInfo*) override;
+
+ bool onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
+ GrPixelConfig srcConfig, DrawPreference*,
+ WritePixelTempDrawInfo*) override;
+
+ bool onCopySurface(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) override;
+
+ void onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&,
+ int* effectiveSampleCnt, SamplePattern*) override;
+
+ bool initDescForDstCopy(const GrRenderTarget* src, GrSurfaceDesc* desc) const override;
+
+ void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
+
+ GrBackendObject createTestingOnlyBackendTexture(void* pixels, int w, int h,
+ GrPixelConfig config,
+ bool isRenderTarget) override;
+ bool isTestingOnlyBackendTexture(GrBackendObject id) const override;
+ void deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandonTexture) override;
+
+ GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget*,
+ int width,
+ int height) override;
+
+ void clearStencil(GrRenderTarget* target) override;
+
+ GrGpuCommandBuffer* createCommandBuffer(
+ GrRenderTarget* target,
+ const GrGpuCommandBuffer::LoadAndStoreInfo& colorInfo,
+ const GrGpuCommandBuffer::LoadAndStoreInfo& stencilInfo) override;
+
+ void drawDebugWireRect(GrRenderTarget*, const SkIRect&, GrColor) override {}
+
+ void addMemoryBarrier(VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkMemoryBarrier* barrier) const;
+ void addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkBufferMemoryBarrier* barrier) const;
+ void addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkImageMemoryBarrier* barrier) const;
+
+#if USE_SKSL
+ SkSL::Compiler* shaderCompiler() const {
+ return fCompiler;
+ }
+#else
+ shaderc_compiler_t shadercCompiler() const {
+ return fCompiler;
+ }
+#endif
+
+ void onResolveRenderTarget(GrRenderTarget* target) override;
+
+ void submitSecondaryCommandBuffer(GrVkSecondaryCommandBuffer*,
+ const GrVkRenderPass*,
+ const VkClearValue*,
+ GrVkRenderTarget*,
+ const SkIRect& bounds);
+
+ void finishDrawTarget() override;
+
+ GrFence SK_WARN_UNUSED_RESULT insertFence() const override;
+ bool waitFence(GrFence, uint64_t timeout) const override;
+ void deleteFence(GrFence) const override;
+
+ void generateMipmap(GrVkTexture* tex);
+
+ bool updateBuffer(GrVkBuffer* buffer, const void* src, VkDeviceSize offset, VkDeviceSize size);
+
+ // Heaps
+ enum Heap {
+ kLinearImage_Heap = 0,
+ // We separate out small (i.e., <= 16K) images to reduce fragmentation
+ // in the main heap.
+ kOptimalImage_Heap,
+ kSmallOptimalImage_Heap,
+ // We have separate vertex and image heaps, because it's possible that
+ // a given Vulkan driver may allocate them separately.
+ kVertexBuffer_Heap,
+ kIndexBuffer_Heap,
+ kUniformBuffer_Heap,
+ kCopyReadBuffer_Heap,
+ kCopyWriteBuffer_Heap,
+
+ kLastHeap = kCopyWriteBuffer_Heap
+ };
+ static const int kHeapCount = kLastHeap + 1;
+
+ GrVkHeap* getHeap(Heap heap) const { return fHeaps[heap]; }
+
+private:
+ GrVkGpu(GrContext* context, const GrContextOptions& options,
+ const GrVkBackendContext* backendContext);
+
+ void onResetContext(uint32_t resetBits) override {}
+
+ GrTexture* onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
+ const SkTArray<GrMipLevel>&) override;
+
+ GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc, SkBudgeted,
+ const SkTArray<GrMipLevel>&) override { return NULL; }
+
+ GrTexture* onWrapBackendTexture(const GrBackendTextureDesc&, GrWrapOwnership) override;
+
+ GrRenderTarget* onWrapBackendRenderTarget(const GrBackendRenderTargetDesc&,
+ GrWrapOwnership) override;
+ GrRenderTarget* onWrapBackendTextureAsRenderTarget(const GrBackendTextureDesc&) override { return NULL; }
+
+ GrBuffer* onCreateBuffer(size_t size, GrBufferType type, GrAccessPattern,
+ const void* data) override;
+
+ gr_instanced::InstancedRendering* onCreateInstancedRendering() override { return nullptr; }
+
+ bool onReadPixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig,
+ void* buffer,
+ size_t rowBytes) override;
+
+ bool onWritePixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config, const SkTArray<GrMipLevel>&) override;
+
+ bool onTransferPixels(GrSurface*,
+ int left, int top, int width, int height,
+ GrPixelConfig config, GrBuffer* transferBuffer,
+ size_t offset, size_t rowBytes) override { return false; }
+
+ // Ends and submits the current command buffer to the queue and then creates a new command
+ // buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all
+ // work in the queue to finish before returning.
+ void submitCommandBuffer(SyncQueue sync);
+
+ void copySurfaceAsCopyImage(GrSurface* dst,
+ GrSurface* src,
+ GrVkImage* dstImage,
+ GrVkImage* srcImage,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ void copySurfaceAsBlit(GrSurface* dst,
+ GrSurface* src,
+ GrVkImage* dstImage,
+ GrVkImage* srcImage,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ void copySurfaceAsResolve(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ void copySurfaceAsDraw(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ // helpers for onCreateTexture and writeTexturePixels
+ bool uploadTexDataLinear(GrVkTexture* tex,
+ int left, int top, int width, int height,
+ GrPixelConfig dataConfig,
+ const void* data,
+ size_t rowBytes);
+ bool uploadTexDataOptimal(GrVkTexture* tex,
+ int left, int top, int width, int height,
+ GrPixelConfig dataConfig,
+ const SkTArray<GrMipLevel>&);
+
+ void resolveImage(GrVkRenderTarget* dst,
+ GrVkRenderTarget* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ SkAutoTUnref<const GrVkBackendContext> fBackendContext;
+ SkAutoTUnref<GrVkCaps> fVkCaps;
+
+ // These Vulkan objects are provided by the client, and also stored in fBackendContext.
+ // They're copied here for convenient access.
+ VkDevice fDevice;
+ VkQueue fQueue; // Must be Graphics queue
+
+ // Created by GrVkGpu
+ GrVkResourceProvider fResourceProvider;
+ VkCommandPool fCmdPool;
+ GrVkPrimaryCommandBuffer* fCurrentCmdBuffer;
+ VkPhysicalDeviceMemoryProperties fPhysDevMemProps;
+
+ SkAutoTDelete<GrVkHeap> fHeaps[kHeapCount];
+
+ GrVkCopyManager fCopyManager;
+
+#ifdef SK_ENABLE_VK_LAYERS
+ // For reporting validation layer errors
+ VkDebugReportCallbackEXT fCallback;
+#endif
+
+#if USE_SKSL
+ SkSL::Compiler* fCompiler;
+#else
+ // Shaderc compiler used for compiling glsl in spirv. We only want to create the compiler once
+ // since there is significant overhead to the first compile of any compiler.
+ shaderc_compiler_t fCompiler;
+#endif
+
+ typedef GrGpu INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkGpuCommandBuffer.cpp b/gfx/skia/skia/src/gpu/vk/GrVkGpuCommandBuffer.cpp
new file mode 100644
index 000000000..e0cfce3dc
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkGpuCommandBuffer.cpp
@@ -0,0 +1,450 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkGpuCommandBuffer.h"
+
+#include "GrFixedClip.h"
+#include "GrMesh.h"
+#include "GrPipeline.h"
+#include "GrRenderTargetPriv.h"
+#include "GrTextureAccess.h"
+#include "GrTexturePriv.h"
+#include "GrVkCommandBuffer.h"
+#include "GrVkGpu.h"
+#include "GrVkPipeline.h"
+#include "GrVkRenderPass.h"
+#include "GrVkRenderTarget.h"
+#include "GrVkResourceProvider.h"
+#include "GrVkTexture.h"
+
+void get_vk_load_store_ops(const GrGpuCommandBuffer::LoadAndStoreInfo& info,
+ VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* storeOp) {
+ switch (info.fLoadOp) {
+ case GrGpuCommandBuffer::LoadOp::kLoad:
+ *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ break;
+ case GrGpuCommandBuffer::LoadOp::kClear:
+ *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ break;
+ case GrGpuCommandBuffer::LoadOp::kDiscard:
+ *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ break;
+ default:
+ SK_ABORT("Invalid LoadOp");
+ *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ }
+
+ switch (info.fStoreOp) {
+ case GrGpuCommandBuffer::StoreOp::kStore:
+ *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ break;
+ case GrGpuCommandBuffer::StoreOp::kDiscard:
+ *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ break;
+ default:
+ SK_ABORT("Invalid StoreOp");
+ *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ }
+}
+
+GrVkGpuCommandBuffer::GrVkGpuCommandBuffer(GrVkGpu* gpu,
+ GrVkRenderTarget* target,
+ const LoadAndStoreInfo& colorInfo,
+ const LoadAndStoreInfo& stencilInfo)
+ : fGpu(gpu)
+ , fRenderTarget(target)
+ , fIsEmpty(true)
+ , fStartsWithClear(false) {
+ VkAttachmentLoadOp vkLoadOp;
+ VkAttachmentStoreOp vkStoreOp;
+
+ get_vk_load_store_ops(colorInfo, &vkLoadOp, &vkStoreOp);
+ GrVkRenderPass::LoadStoreOps vkColorOps(vkLoadOp, vkStoreOp);
+
+ get_vk_load_store_ops(stencilInfo, &vkLoadOp, &vkStoreOp);
+ GrVkRenderPass::LoadStoreOps vkStencilOps(vkLoadOp, vkStoreOp);
+
+ const GrVkResourceProvider::CompatibleRPHandle& rpHandle = target->compatibleRenderPassHandle();
+ if (rpHandle.isValid()) {
+ fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
+ vkColorOps,
+ vkStencilOps);
+ } else {
+ fRenderPass = fGpu->resourceProvider().findRenderPass(*target,
+ vkColorOps,
+ vkStencilOps);
+ }
+
+ GrColorToRGBAFloat(colorInfo.fClearColor, fColorClearValue.color.float32);
+
+ fCommandBuffer = gpu->resourceProvider().findOrCreateSecondaryCommandBuffer();
+ fCommandBuffer->begin(gpu, target->framebuffer(), fRenderPass);
+}
+
+GrVkGpuCommandBuffer::~GrVkGpuCommandBuffer() {
+ fCommandBuffer->unref(fGpu);
+ fRenderPass->unref(fGpu);
+}
+
+GrGpu* GrVkGpuCommandBuffer::gpu() { return fGpu; }
+
+void GrVkGpuCommandBuffer::end() {
+ fCommandBuffer->end(fGpu);
+}
+
+void GrVkGpuCommandBuffer::onSubmit(const SkIRect& bounds) {
+ // TODO: We can't add this optimization yet since many things create a scratch texture which
+ // adds the discard immediately, but then don't draw to it right away. This causes the discard
+ // to be ignored and we get yelled at for loading uninitialized data. However, once MDP lands,
+ // the discard will get reordered with the rest of the draw commands and we can re-enable this.
+#if 0
+ if (fIsEmpty && !fStartsWithClear) {
+ // We have sumbitted no actual draw commands to the command buffer and we are not using
+ // the render pass to do a clear so there is no need to submit anything.
+ return;
+ }
+#endif
+
+ // Change layout of our render target so it can be used as the color attachment. Currently
+ // we don't attach the resolve to the framebuffer so no need to change its layout.
+ GrVkImage* targetImage = fRenderTarget->msaaImage() ? fRenderTarget->msaaImage()
+ : fRenderTarget;
+
+ // Change layout of our render target so it can be used as the color attachment
+ targetImage->setImageLayout(fGpu,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
+ VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
+ false);
+
+ // If we are using a stencil attachment we also need to update its layout
+ if (GrStencilAttachment* stencil = fRenderTarget->renderTargetPriv().getStencilAttachment()) {
+ GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
+ vkStencil->setImageLayout(fGpu,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
+ VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
+ false);
+ }
+
+ fGpu->submitSecondaryCommandBuffer(fCommandBuffer, fRenderPass, &fColorClearValue,
+ fRenderTarget, bounds);
+}
+
+void GrVkGpuCommandBuffer::discard(GrRenderTarget* target) {
+ if (fIsEmpty) {
+ // We will change the render pass to do a clear load instead
+ GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ VK_ATTACHMENT_STORE_OP_STORE);
+ GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ VK_ATTACHMENT_STORE_OP_STORE);
+
+ const GrVkRenderPass* oldRP = fRenderPass;
+
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
+ const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
+ vkRT->compatibleRenderPassHandle();
+ if (rpHandle.isValid()) {
+ fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
+ vkColorOps,
+ vkStencilOps);
+ } else {
+ fRenderPass = fGpu->resourceProvider().findRenderPass(*vkRT,
+ vkColorOps,
+ vkStencilOps);
+ }
+
+ SkASSERT(fRenderPass->isCompatible(*oldRP));
+ oldRP->unref(fGpu);
+ fStartsWithClear = false;
+ }
+}
+
+void GrVkGpuCommandBuffer::onClearStencilClip(GrRenderTarget* target,
+ const GrFixedClip& clip,
+ bool insideStencilMask) {
+ SkASSERT(target);
+ SkASSERT(!clip.hasWindowRectangles());
+
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
+ GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
+ // this should only be called internally when we know we have a
+ // stencil buffer.
+ SkASSERT(sb);
+ int stencilBitCount = sb->bits();
+
+ // The contract with the callers does not guarantee that we preserve all bits in the stencil
+ // during this clear. Thus we will clear the entire stencil to the desired value.
+
+ VkClearDepthStencilValue vkStencilColor;
+ memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
+ if (insideStencilMask) {
+ vkStencilColor.stencil = (1 << (stencilBitCount - 1));
+ } else {
+ vkStencilColor.stencil = 0;
+ }
+
+ VkClearRect clearRect;
+ // Flip rect if necessary
+ SkIRect vkRect;
+ if (!clip.scissorEnabled()) {
+ vkRect.setXYWH(0, 0, vkRT->width(), vkRT->height());
+ } else if (kBottomLeft_GrSurfaceOrigin != vkRT->origin()) {
+ vkRect = clip.scissorRect();
+ } else {
+ const SkIRect& scissor = clip.scissorRect();
+ vkRect.setLTRB(scissor.fLeft, vkRT->height() - scissor.fBottom,
+ scissor.fRight, vkRT->height() - scissor.fTop);
+ }
+
+ clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
+ clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
+
+ clearRect.baseArrayLayer = 0;
+ clearRect.layerCount = 1;
+
+ uint32_t stencilIndex;
+ SkAssertResult(fRenderPass->stencilAttachmentIndex(&stencilIndex));
+
+ VkClearAttachment attachment;
+ attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
+ attachment.colorAttachment = 0; // this value shouldn't matter
+ attachment.clearValue.depthStencil = vkStencilColor;
+
+ fCommandBuffer->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
+ fIsEmpty = false;
+}
+
+void GrVkGpuCommandBuffer::onClear(GrRenderTarget* target, const GrFixedClip& clip, GrColor color) {
+ // parent class should never let us get here with no RT
+ SkASSERT(target);
+ SkASSERT(!clip.hasWindowRectangles());
+
+ VkClearColorValue vkColor;
+ GrColorToRGBAFloat(color, vkColor.float32);
+
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
+
+ if (fIsEmpty && !clip.scissorEnabled()) {
+ // We will change the render pass to do a clear load instead
+ GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_CLEAR,
+ VK_ATTACHMENT_STORE_OP_STORE);
+ GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
+ VK_ATTACHMENT_STORE_OP_STORE);
+
+ const GrVkRenderPass* oldRP = fRenderPass;
+
+ const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
+ vkRT->compatibleRenderPassHandle();
+ if (rpHandle.isValid()) {
+ fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
+ vkColorOps,
+ vkStencilOps);
+ } else {
+ fRenderPass = fGpu->resourceProvider().findRenderPass(*vkRT,
+ vkColorOps,
+ vkStencilOps);
+ }
+
+ SkASSERT(fRenderPass->isCompatible(*oldRP));
+ oldRP->unref(fGpu);
+
+ GrColorToRGBAFloat(color, fColorClearValue.color.float32);
+ fStartsWithClear = true;
+ return;
+ }
+
+ // We always do a sub rect clear with clearAttachments since we are inside a render pass
+ VkClearRect clearRect;
+ // Flip rect if necessary
+ SkIRect vkRect;
+ if (!clip.scissorEnabled()) {
+ vkRect.setXYWH(0, 0, vkRT->width(), vkRT->height());
+ } else if (kBottomLeft_GrSurfaceOrigin != vkRT->origin()) {
+ vkRect = clip.scissorRect();
+ } else {
+ const SkIRect& scissor = clip.scissorRect();
+ vkRect.setLTRB(scissor.fLeft, vkRT->height() - scissor.fBottom,
+ scissor.fRight, vkRT->height() - scissor.fTop);
+ }
+ clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
+ clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
+ clearRect.baseArrayLayer = 0;
+ clearRect.layerCount = 1;
+
+ uint32_t colorIndex;
+ SkAssertResult(fRenderPass->colorAttachmentIndex(&colorIndex));
+
+ VkClearAttachment attachment;
+ attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ attachment.colorAttachment = colorIndex;
+ attachment.clearValue.color = vkColor;
+
+ fCommandBuffer->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
+ fIsEmpty = false;
+ return;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrVkGpuCommandBuffer::bindGeometry(const GrPrimitiveProcessor& primProc,
+ const GrNonInstancedMesh& mesh) {
+ // There is no need to put any memory barriers to make sure host writes have finished here.
+ // When a command buffer is submitted to a queue, there is an implicit memory barrier that
+ // occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of
+ // an active RenderPass.
+ SkASSERT(!mesh.vertexBuffer()->isCPUBacked());
+ GrVkVertexBuffer* vbuf;
+ vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer();
+ SkASSERT(vbuf);
+ SkASSERT(!vbuf->isMapped());
+
+ fCommandBuffer->bindVertexBuffer(fGpu, vbuf);
+
+ if (mesh.isIndexed()) {
+ SkASSERT(!mesh.indexBuffer()->isCPUBacked());
+ GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)mesh.indexBuffer();
+ SkASSERT(ibuf);
+ SkASSERT(!ibuf->isMapped());
+
+ fCommandBuffer->bindIndexBuffer(fGpu, ibuf);
+ }
+}
+
+sk_sp<GrVkPipelineState> GrVkGpuCommandBuffer::prepareDrawState(
+ const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass) {
+ sk_sp<GrVkPipelineState> pipelineState =
+ fGpu->resourceProvider().findOrCreateCompatiblePipelineState(pipeline,
+ primProc,
+ primitiveType,
+ renderPass);
+ if (!pipelineState) {
+ return pipelineState;
+ }
+
+ pipelineState->setData(fGpu, primProc, pipeline);
+
+ pipelineState->bind(fGpu, fCommandBuffer);
+
+ GrVkPipeline::SetDynamicState(fGpu, fCommandBuffer, pipeline);
+
+ return pipelineState;
+}
+
+static void prepare_sampled_images(const GrProcessor& processor, GrVkGpu* gpu) {
+ for (int i = 0; i < processor.numTextures(); ++i) {
+ const GrTextureAccess& texAccess = processor.textureAccess(i);
+ GrVkTexture* vkTexture = static_cast<GrVkTexture*>(processor.texture(i));
+ SkASSERT(vkTexture);
+
+ // We may need to resolve the texture first if it is also a render target
+ GrVkRenderTarget* texRT = static_cast<GrVkRenderTarget*>(vkTexture->asRenderTarget());
+ if (texRT) {
+ gpu->onResolveRenderTarget(texRT);
+ }
+
+ const GrTextureParams& params = texAccess.getParams();
+ // Check if we need to regenerate any mip maps
+ if (GrTextureParams::kMipMap_FilterMode == params.filterMode()) {
+ if (vkTexture->texturePriv().mipMapsAreDirty()) {
+ gpu->generateMipmap(vkTexture);
+ vkTexture->texturePriv().dirtyMipMaps(false);
+ }
+ }
+
+ // TODO: If we ever decide to create the secondary command buffers ahead of time before we
+ // are actually going to submit them, we will need to track the sampled images and delay
+ // adding the layout change/barrier until we are ready to submit.
+ vkTexture->setImageLayout(gpu,
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
+ VK_ACCESS_SHADER_READ_BIT,
+ VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
+ false);
+ }
+}
+
+void GrVkGpuCommandBuffer::onDraw(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ const GrMesh* meshes,
+ int meshCount) {
+ if (!meshCount) {
+ return;
+ }
+ GrRenderTarget* rt = pipeline.getRenderTarget();
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
+ const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
+ SkASSERT(renderPass);
+
+ prepare_sampled_images(primProc, fGpu);
+ GrFragmentProcessor::Iter iter(pipeline);
+ while (const GrFragmentProcessor* fp = iter.next()) {
+ prepare_sampled_images(*fp, fGpu);
+ }
+ prepare_sampled_images(pipeline.getXferProcessor(), fGpu);
+
+ GrPrimitiveType primitiveType = meshes[0].primitiveType();
+ sk_sp<GrVkPipelineState> pipelineState = this->prepareDrawState(pipeline,
+ primProc,
+ primitiveType,
+ *renderPass);
+ if (!pipelineState) {
+ return;
+ }
+
+ for (int i = 0; i < meshCount; ++i) {
+ const GrMesh& mesh = meshes[i];
+ GrMesh::Iterator iter;
+ const GrNonInstancedMesh* nonIdxMesh = iter.init(mesh);
+ do {
+ if (nonIdxMesh->primitiveType() != primitiveType) {
+ // Technically we don't have to call this here (since there is a safety check in
+ // pipelineState:setData but this will allow for quicker freeing of resources if the
+ // pipelineState sits in a cache for a while.
+ pipelineState->freeTempResources(fGpu);
+ SkDEBUGCODE(pipelineState = nullptr);
+ primitiveType = nonIdxMesh->primitiveType();
+ pipelineState = this->prepareDrawState(pipeline,
+ primProc,
+ primitiveType,
+ *renderPass);
+ if (!pipelineState) {
+ return;
+ }
+ }
+ SkASSERT(pipelineState);
+ this->bindGeometry(primProc, *nonIdxMesh);
+
+ if (nonIdxMesh->isIndexed()) {
+ fCommandBuffer->drawIndexed(fGpu,
+ nonIdxMesh->indexCount(),
+ 1,
+ nonIdxMesh->startIndex(),
+ nonIdxMesh->startVertex(),
+ 0);
+ } else {
+ fCommandBuffer->draw(fGpu,
+ nonIdxMesh->vertexCount(),
+ 1,
+ nonIdxMesh->startVertex(),
+ 0);
+ }
+ fIsEmpty = false;
+
+ fGpu->stats()->incNumDraws();
+ } while ((nonIdxMesh = iter.next()));
+ }
+
+ // Technically we don't have to call this here (since there is a safety check in
+ // pipelineState:setData but this will allow for quicker freeing of resources if the
+ // pipelineState sits in a cache for a while.
+ pipelineState->freeTempResources(fGpu);
+}
+
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkGpuCommandBuffer.h b/gfx/skia/skia/src/gpu/vk/GrVkGpuCommandBuffer.h
new file mode 100644
index 000000000..d91271e8c
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkGpuCommandBuffer.h
@@ -0,0 +1,71 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkGpuCommandBuffer_DEFINED
+#define GrVkGpuCommandBuffer_DEFINED
+
+#include "GrGpuCommandBuffer.h"
+
+#include "GrColor.h"
+#include "GrTypes.h"
+#include "GrVkPipelineState.h"
+
+class GrNonInstancedMesh;
+class GrVkGpu;
+class GrVkImage;
+class GrVkRenderPass;
+class GrVkRenderTarget;
+class GrVkSecondaryCommandBuffer;
+
+class GrVkGpuCommandBuffer : public GrGpuCommandBuffer {
+public:
+ GrVkGpuCommandBuffer(GrVkGpu* gpu,
+ GrVkRenderTarget*,
+ const LoadAndStoreInfo& colorInfo,
+ const LoadAndStoreInfo& stencilInfo);
+
+ virtual ~GrVkGpuCommandBuffer();
+
+ void end() override;
+
+ void discard(GrRenderTarget* rt) override;
+
+private:
+ GrGpu* gpu() override;
+
+ void onSubmit(const SkIRect& bounds) override;
+
+ // Bind vertex and index buffers
+ void bindGeometry(const GrPrimitiveProcessor&, const GrNonInstancedMesh&);
+
+ sk_sp<GrVkPipelineState> prepareDrawState(const GrPipeline&,
+ const GrPrimitiveProcessor&,
+ GrPrimitiveType,
+ const GrVkRenderPass&);
+
+ void onDraw(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ const GrMesh* mesh,
+ int meshCount) override;
+
+ void onClear(GrRenderTarget* rt, const GrFixedClip&, GrColor color) override;
+
+ void onClearStencilClip(GrRenderTarget*, const GrFixedClip&, bool insideStencilMask) override;
+
+ const GrVkRenderPass* fRenderPass;
+ GrVkSecondaryCommandBuffer* fCommandBuffer;
+ GrVkGpu* fGpu;
+ GrVkRenderTarget* fRenderTarget;
+ VkClearValue fColorClearValue;
+
+ bool fIsEmpty;
+ bool fStartsWithClear;
+
+ typedef GrGpuCommandBuffer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkImage.cpp b/gfx/skia/skia/src/gpu/vk/GrVkImage.cpp
new file mode 100644
index 000000000..d0457ca28
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkImage.cpp
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkGpu.h"
+#include "GrVkImage.h"
+#include "GrVkMemory.h"
+#include "GrVkUtil.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
+ switch (format) {
+ case VK_FORMAT_S8_UINT:
+ return VK_IMAGE_ASPECT_STENCIL_BIT;
+ case VK_FORMAT_D24_UNORM_S8_UINT: // fallthrough
+ case VK_FORMAT_D32_SFLOAT_S8_UINT:
+ return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
+ default:
+ SkASSERT(GrVkFormatToPixelConfig(format, nullptr));
+ return VK_IMAGE_ASPECT_COLOR_BIT;
+ }
+}
+
+void GrVkImage::setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout,
+ VkAccessFlags dstAccessMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion) {
+ SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED != newLayout &&
+ VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout);
+ VkImageLayout currentLayout = this->currentLayout();
+
+ // If the old and new layout are the same, there is no reason to put in a barrier since the
+ // operations used for each layout are implicitly synchronized with eachother. The one exception
+ // is if the layout is GENERAL. In this case the image could have been used for any operation so
+ // we must respect the barrier.
+ if (newLayout == currentLayout && VK_IMAGE_LAYOUT_GENERAL != currentLayout) {
+ return;
+ }
+
+ VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(currentLayout);
+ VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(currentLayout);
+
+ VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat);
+ VkImageMemoryBarrier imageMemoryBarrier = {
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
+ nullptr, // pNext
+ srcAccessMask, // outputMask
+ dstAccessMask, // inputMask
+ currentLayout, // oldLayout
+ newLayout, // newLayout
+ VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
+ fInfo.fImage, // image
+ { aspectFlags, 0, fInfo.fLevelCount, 0, 1 } // subresourceRange
+ };
+
+ gpu->addImageMemoryBarrier(srcStageMask, dstStageMask, byRegion, &imageMemoryBarrier);
+
+ fInfo.fImageLayout = newLayout;
+}
+
+bool GrVkImage::InitImageInfo(const GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo* info) {
+ if (0 == imageDesc.fWidth || 0 == imageDesc.fHeight) {
+ return false;
+ }
+ VkImage image = 0;
+ GrVkAlloc alloc;
+
+ bool isLinear = VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling;
+ VkImageLayout initialLayout = isLinear ? VK_IMAGE_LAYOUT_PREINITIALIZED
+ : VK_IMAGE_LAYOUT_UNDEFINED;
+
+ // Create Image
+ VkSampleCountFlagBits vkSamples;
+ if (!GrSampleCountToVkSampleCount(imageDesc.fSamples, &vkSamples)) {
+ return false;
+ }
+
+ SkASSERT(VK_IMAGE_TILING_OPTIMAL == imageDesc.fImageTiling ||
+ VK_SAMPLE_COUNT_1_BIT == vkSamples);
+
+ // sRGB format images may need to be aliased to linear for various reasons (legacy mode):
+ VkImageCreateFlags createFlags = GrVkFormatIsSRGB(imageDesc.fFormat, nullptr)
+ ? VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT : 0;
+
+ const VkImageCreateInfo imageCreateInfo = {
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
+ NULL, // pNext
+ createFlags, // VkImageCreateFlags
+ imageDesc.fImageType, // VkImageType
+ imageDesc.fFormat, // VkFormat
+ { imageDesc.fWidth, imageDesc.fHeight, 1 }, // VkExtent3D
+ imageDesc.fLevels, // mipLevels
+ 1, // arrayLayers
+ vkSamples, // samples
+ imageDesc.fImageTiling, // VkImageTiling
+ imageDesc.fUsageFlags, // VkImageUsageFlags
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
+ 0, // queueFamilyCount
+ 0, // pQueueFamilyIndices
+ initialLayout // initialLayout
+ };
+
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateImage(gpu->device(), &imageCreateInfo, nullptr,
+ &image));
+
+ if (!GrVkMemory::AllocAndBindImageMemory(gpu, image, isLinear, &alloc)) {
+ VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
+ return false;
+ }
+
+ info->fImage = image;
+ info->fAlloc = alloc;
+ info->fImageTiling = imageDesc.fImageTiling;
+ info->fImageLayout = initialLayout;
+ info->fFormat = imageDesc.fFormat;
+ info->fLevelCount = imageDesc.fLevels;
+ return true;
+}
+
+void GrVkImage::DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo* info) {
+ VK_CALL(gpu, DestroyImage(gpu->device(), info->fImage, nullptr));
+ bool isLinear = VK_IMAGE_TILING_LINEAR == info->fImageTiling;
+ GrVkMemory::FreeImageMemory(gpu, isLinear, info->fAlloc);
+}
+
+void GrVkImage::setNewResource(VkImage image, const GrVkAlloc& alloc, VkImageTiling tiling) {
+ fResource = new Resource(image, alloc, tiling);
+}
+
+GrVkImage::~GrVkImage() {
+ // should have been released or abandoned first
+ SkASSERT(!fResource);
+}
+
+void GrVkImage::releaseImage(const GrVkGpu* gpu) {
+ if (fResource) {
+ fResource->unref(gpu);
+ fResource = nullptr;
+ }
+}
+
+void GrVkImage::abandonImage() {
+ if (fResource) {
+ fResource->unrefAndAbandon();
+ fResource = nullptr;
+ }
+}
+
+void GrVkImage::Resource::freeGPUData(const GrVkGpu* gpu) const {
+ VK_CALL(gpu, DestroyImage(gpu->device(), fImage, nullptr));
+ bool isLinear = (VK_IMAGE_TILING_LINEAR == fImageTiling);
+ GrVkMemory::FreeImageMemory(gpu, isLinear, fAlloc);
+}
+
+void GrVkImage::BorrowedResource::freeGPUData(const GrVkGpu* gpu) const {
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkImage.h b/gfx/skia/skia/src/gpu/vk/GrVkImage.h
new file mode 100644
index 000000000..21728c005
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkImage.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkImage_DEFINED
+#define GrVkImage_DEFINED
+
+#include "GrVkResource.h"
+
+#include "GrTypesPriv.h"
+#include "SkTypes.h"
+
+#include "vk/GrVkDefines.h"
+#include "vk/GrVkTypes.h"
+
+class GrVkGpu;
+
+class GrVkImage : SkNoncopyable {
+private:
+ class Resource;
+
+public:
+ enum Wrapped {
+ kNot_Wrapped,
+ kAdopted_Wrapped,
+ kBorrowed_Wrapped,
+ };
+
+ GrVkImage(const GrVkImageInfo& info, Wrapped wrapped)
+ : fInfo(info)
+ , fIsBorrowed(kBorrowed_Wrapped == wrapped) {
+ if (kBorrowed_Wrapped == wrapped) {
+ fResource = new BorrowedResource(info.fImage, info.fAlloc, info.fImageTiling);
+ } else {
+ fResource = new Resource(info.fImage, info.fAlloc, info.fImageTiling);
+ }
+ }
+ virtual ~GrVkImage();
+
+ VkImage image() const { return fInfo.fImage; }
+ const GrVkAlloc& alloc() const { return fInfo.fAlloc; }
+ VkFormat imageFormat() const { return fInfo.fFormat; }
+ uint32_t mipLevels() const { return fInfo.fLevelCount; }
+ const Resource* resource() const { return fResource; }
+ bool isLinearTiled() const {
+ return SkToBool(VK_IMAGE_TILING_LINEAR == fInfo.fImageTiling);
+ }
+
+ VkImageLayout currentLayout() const { return fInfo.fImageLayout; }
+
+ void setImageLayout(const GrVkGpu* gpu,
+ VkImageLayout newLayout,
+ VkAccessFlags dstAccessMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion);
+
+ struct ImageDesc {
+ VkImageType fImageType;
+ VkFormat fFormat;
+ uint32_t fWidth;
+ uint32_t fHeight;
+ uint32_t fLevels;
+ uint32_t fSamples;
+ VkImageTiling fImageTiling;
+ VkImageUsageFlags fUsageFlags;
+ VkFlags fMemProps;
+
+ ImageDesc()
+ : fImageType(VK_IMAGE_TYPE_2D)
+ , fFormat(VK_FORMAT_UNDEFINED)
+ , fWidth(0)
+ , fHeight(0)
+ , fLevels(1)
+ , fSamples(1)
+ , fImageTiling(VK_IMAGE_TILING_OPTIMAL)
+ , fUsageFlags(0)
+ , fMemProps(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {}
+ };
+
+ static bool InitImageInfo(const GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo*);
+ // Destroys the internal VkImage and VkDeviceMemory in the GrVkImageInfo
+ static void DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo*);
+
+protected:
+ void releaseImage(const GrVkGpu* gpu);
+ void abandonImage();
+
+ void setNewResource(VkImage image, const GrVkAlloc& alloc, VkImageTiling tiling);
+
+ GrVkImageInfo fInfo;
+ bool fIsBorrowed;
+
+private:
+ class Resource : public GrVkResource {
+ public:
+ Resource()
+ : INHERITED()
+ , fImage(VK_NULL_HANDLE) {
+ fAlloc.fMemory = VK_NULL_HANDLE;
+ fAlloc.fOffset = 0;
+ }
+
+ Resource(VkImage image, const GrVkAlloc& alloc, VkImageTiling tiling)
+ : fImage(image), fAlloc(alloc), fImageTiling(tiling) {}
+
+ ~Resource() override {}
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkImage: %d (%d refs)\n", fImage, this->getRefCnt());
+ }
+#endif
+ private:
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ VkImage fImage;
+ GrVkAlloc fAlloc;
+ VkImageTiling fImageTiling;
+
+ typedef GrVkResource INHERITED;
+ };
+
+ // for wrapped textures
+ class BorrowedResource : public Resource {
+ public:
+ BorrowedResource(VkImage image, const GrVkAlloc& alloc, VkImageTiling tiling)
+ : Resource(image, alloc, tiling) {
+ }
+ private:
+ void freeGPUData(const GrVkGpu* gpu) const override;
+ };
+
+ const Resource* fResource;
+
+ friend class GrVkRenderTarget;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkImageView.cpp b/gfx/skia/skia/src/gpu/vk/GrVkImageView.cpp
new file mode 100644
index 000000000..b737df5f9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkImageView.cpp
@@ -0,0 +1,45 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkImageView.h"
+#include "GrVkGpu.h"
+#include "GrVkUtil.h"
+
+const GrVkImageView* GrVkImageView::Create(const GrVkGpu* gpu, VkImage image, VkFormat format,
+ Type viewType, uint32_t miplevels) {
+ VkImageView imageView;
+
+ // Create the VkImageView
+ VkImageViewCreateInfo viewInfo = {
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // sType
+ NULL, // pNext
+ 0, // flags
+ image, // image
+ VK_IMAGE_VIEW_TYPE_2D, // viewType
+ format, // format
+ { VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_IDENTITY }, // components
+ { VK_IMAGE_ASPECT_COLOR_BIT, 0, miplevels, 0, 1 }, // subresourceRange
+ };
+ if (kStencil_Type == viewType) {
+ viewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
+ }
+
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateImageView(gpu->device(), &viewInfo,
+ nullptr, &imageView));
+ if (err) {
+ return nullptr;
+ }
+
+ return new GrVkImageView(imageView);
+}
+
+void GrVkImageView::freeGPUData(const GrVkGpu* gpu) const {
+ GR_VK_CALL(gpu->vkInterface(), DestroyImageView(gpu->device(), fImageView, nullptr));
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkImageView.h b/gfx/skia/skia/src/gpu/vk/GrVkImageView.h
new file mode 100644
index 000000000..1398987f8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkImageView.h
@@ -0,0 +1,48 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkImageView_DEFINED
+#define GrVkImageView_DEFINED
+
+#include "GrTypes.h"
+
+#include "GrVkResource.h"
+
+#include "vk/GrVkDefines.h"
+
+class GrVkImageView : public GrVkResource {
+public:
+ enum Type {
+ kColor_Type,
+ kStencil_Type
+ };
+
+ static const GrVkImageView* Create(const GrVkGpu* gpu, VkImage image, VkFormat format,
+ Type viewType, uint32_t miplevels);
+
+ VkImageView imageView() const { return fImageView; }
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkImageView: %d (%d refs)\n", fImageView, this->getRefCnt());
+ }
+#endif
+
+private:
+ GrVkImageView(VkImageView imageView) : INHERITED(), fImageView(imageView) {}
+
+ GrVkImageView(const GrVkImageView&);
+ GrVkImageView& operator=(const GrVkImageView&);
+
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ VkImageView fImageView;
+
+ typedef GrVkResource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkIndexBuffer.cpp b/gfx/skia/skia/src/gpu/vk/GrVkIndexBuffer.cpp
new file mode 100644
index 000000000..e56a525be
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkIndexBuffer.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkIndexBuffer.h"
+#include "GrVkGpu.h"
+
+GrVkIndexBuffer::GrVkIndexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* bufferResource)
+ : INHERITED(gpu, desc.fSizeInBytes, kIndex_GrBufferType,
+ desc.fDynamic ? kDynamic_GrAccessPattern : kStatic_GrAccessPattern)
+ , GrVkBuffer(desc, bufferResource) {
+ this->registerWithCache(SkBudgeted::kYes);
+}
+
+GrVkIndexBuffer* GrVkIndexBuffer::Create(GrVkGpu* gpu, size_t size, bool dynamic) {
+ GrVkBuffer::Desc desc;
+ desc.fDynamic = dynamic;
+ desc.fType = GrVkBuffer::kIndex_Type;
+ desc.fSizeInBytes = size;
+
+ const GrVkBuffer::Resource* bufferResource = GrVkBuffer::Create(gpu, desc);
+ if (!bufferResource) {
+ return nullptr;
+ }
+
+
+ GrVkIndexBuffer* buffer = new GrVkIndexBuffer(gpu, desc, bufferResource);
+ if (!buffer) {
+ bufferResource->unref(gpu);
+ }
+ return buffer;
+}
+
+void GrVkIndexBuffer::onRelease() {
+ if (!this->wasDestroyed()) {
+ this->vkRelease(this->getVkGpu());
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrVkIndexBuffer::onAbandon() {
+ this->vkAbandon();
+ INHERITED::onAbandon();
+}
+
+void GrVkIndexBuffer::onMap() {
+ if (!this->wasDestroyed()) {
+ this->GrBuffer::fMapPtr = this->vkMap(this->getVkGpu());
+ }
+}
+
+void GrVkIndexBuffer::onUnmap() {
+ if (!this->wasDestroyed()) {
+ this->vkUnmap(this->getVkGpu());
+ }
+}
+
+bool GrVkIndexBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
+ if (!this->wasDestroyed()) {
+ return this->vkUpdateData(this->getVkGpu(), src, srcSizeInBytes);
+ } else {
+ return false;
+ }
+}
+
+GrVkGpu* GrVkIndexBuffer::getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrVkGpu*>(this->getGpu());
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkIndexBuffer.h b/gfx/skia/skia/src/gpu/vk/GrVkIndexBuffer.h
new file mode 100644
index 000000000..cd945ac5b
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkIndexBuffer.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkIndexBuffer_DEFINED
+#define GrVkIndexBuffer_DEFINED
+
+#include "GrBuffer.h"
+#include "GrVkBuffer.h"
+
+class GrVkGpu;
+
+class GrVkIndexBuffer : public GrBuffer, public GrVkBuffer {
+
+public:
+ static GrVkIndexBuffer* Create(GrVkGpu* gpu, size_t size, bool dynamic);
+
+protected:
+ void onAbandon() override;
+ void onRelease() override;
+
+private:
+ GrVkIndexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* resource);
+
+ void onMap() override;
+ void onUnmap() override;
+ bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
+
+ GrVkGpu* getVkGpu() const;
+
+ typedef GrBuffer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkInterface.cpp b/gfx/skia/skia/src/gpu/vk/GrVkInterface.cpp
new file mode 100644
index 000000000..17b254eb6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkInterface.cpp
@@ -0,0 +1,318 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "vk/GrVkInterface.h"
+#include "vk/GrVkBackendContext.h"
+#include "vk/GrVkUtil.h"
+
+GrVkInterface::GrVkInterface() {
+}
+
+#define GET_PROC(F) functions->f ## F = (PFN_vk ## F) vkGetInstanceProcAddr(instance, "vk" #F)
+#define GET_PROC_LOCAL(inst, F) PFN_vk ## F F = (PFN_vk ## F) vkGetInstanceProcAddr(inst, "vk" #F)
+#define GET_DEV_PROC(F) functions->f ## F = (PFN_vk ## F) vkGetDeviceProcAddr(device, "vk" #F)
+
+const GrVkInterface* GrVkCreateInterface(VkInstance instance, VkDevice device,
+ uint32_t extensionFlags) {
+
+ GrVkInterface* interface = new GrVkInterface();
+ GrVkInterface::Functions* functions = &interface->fFunctions;
+
+ GET_PROC(CreateInstance);
+ GET_PROC(DestroyInstance);
+ GET_PROC(EnumeratePhysicalDevices);
+ GET_PROC(GetPhysicalDeviceFeatures);
+ GET_PROC(GetPhysicalDeviceFormatProperties);
+ GET_PROC(GetPhysicalDeviceImageFormatProperties);
+ GET_PROC(GetPhysicalDeviceProperties);
+ GET_PROC(GetPhysicalDeviceQueueFamilyProperties);
+ GET_PROC(GetPhysicalDeviceMemoryProperties);
+ GET_PROC(CreateDevice);
+ GET_PROC(DestroyDevice);
+ GET_PROC(EnumerateInstanceExtensionProperties);
+ GET_PROC(EnumerateDeviceExtensionProperties);
+ GET_PROC(EnumerateInstanceLayerProperties);
+ GET_PROC(EnumerateDeviceLayerProperties);
+ GET_DEV_PROC(GetDeviceQueue);
+ GET_DEV_PROC(QueueSubmit);
+ GET_DEV_PROC(QueueWaitIdle);
+ GET_DEV_PROC(DeviceWaitIdle);
+ GET_DEV_PROC(AllocateMemory);
+ GET_DEV_PROC(FreeMemory);
+ GET_DEV_PROC(MapMemory);
+ GET_DEV_PROC(UnmapMemory);
+ GET_DEV_PROC(FlushMappedMemoryRanges);
+ GET_DEV_PROC(InvalidateMappedMemoryRanges);
+ GET_DEV_PROC(GetDeviceMemoryCommitment);
+ GET_DEV_PROC(BindBufferMemory);
+ GET_DEV_PROC(BindImageMemory);
+ GET_DEV_PROC(GetBufferMemoryRequirements);
+ GET_DEV_PROC(GetImageMemoryRequirements);
+ GET_DEV_PROC(GetImageSparseMemoryRequirements);
+ GET_PROC(GetPhysicalDeviceSparseImageFormatProperties);
+ GET_DEV_PROC(QueueBindSparse);
+ GET_DEV_PROC(CreateFence);
+ GET_DEV_PROC(DestroyFence);
+ GET_DEV_PROC(ResetFences);
+ GET_DEV_PROC(GetFenceStatus);
+ GET_DEV_PROC(WaitForFences);
+ GET_DEV_PROC(CreateSemaphore);
+ GET_DEV_PROC(DestroySemaphore);
+ GET_DEV_PROC(CreateEvent);
+ GET_DEV_PROC(DestroyEvent);
+ GET_DEV_PROC(GetEventStatus);
+ GET_DEV_PROC(SetEvent);
+ GET_DEV_PROC(ResetEvent);
+ GET_DEV_PROC(CreateQueryPool);
+ GET_DEV_PROC(DestroyQueryPool);
+ GET_DEV_PROC(GetQueryPoolResults);
+ GET_DEV_PROC(CreateBuffer);
+ GET_DEV_PROC(DestroyBuffer);
+ GET_DEV_PROC(CreateBufferView);
+ GET_DEV_PROC(DestroyBufferView);
+ GET_DEV_PROC(CreateImage);
+ GET_DEV_PROC(DestroyImage);
+ GET_DEV_PROC(GetImageSubresourceLayout);
+ GET_DEV_PROC(CreateImageView);
+ GET_DEV_PROC(DestroyImageView);
+ GET_DEV_PROC(CreateShaderModule);
+ GET_DEV_PROC(DestroyShaderModule);
+ GET_DEV_PROC(CreatePipelineCache);
+ GET_DEV_PROC(DestroyPipelineCache);
+ GET_DEV_PROC(GetPipelineCacheData);
+ GET_DEV_PROC(MergePipelineCaches);
+ GET_DEV_PROC(CreateGraphicsPipelines);
+ GET_DEV_PROC(CreateComputePipelines);
+ GET_DEV_PROC(DestroyPipeline);
+ GET_DEV_PROC(CreatePipelineLayout);
+ GET_DEV_PROC(DestroyPipelineLayout);
+ GET_DEV_PROC(CreateSampler);
+ GET_DEV_PROC(DestroySampler);
+ GET_DEV_PROC(CreateDescriptorSetLayout);
+ GET_DEV_PROC(DestroyDescriptorSetLayout);
+ GET_DEV_PROC(CreateDescriptorPool);
+ GET_DEV_PROC(DestroyDescriptorPool);
+ GET_DEV_PROC(ResetDescriptorPool);
+ GET_DEV_PROC(AllocateDescriptorSets);
+ GET_DEV_PROC(FreeDescriptorSets);
+ GET_DEV_PROC(UpdateDescriptorSets);
+ GET_DEV_PROC(CreateFramebuffer);
+ GET_DEV_PROC(DestroyFramebuffer);
+ GET_DEV_PROC(CreateRenderPass);
+ GET_DEV_PROC(DestroyRenderPass);
+ GET_DEV_PROC(GetRenderAreaGranularity);
+ GET_DEV_PROC(CreateCommandPool);
+ GET_DEV_PROC(DestroyCommandPool);
+ GET_DEV_PROC(ResetCommandPool);
+ GET_DEV_PROC(AllocateCommandBuffers);
+ GET_DEV_PROC(FreeCommandBuffers);
+ GET_DEV_PROC(BeginCommandBuffer);
+ GET_DEV_PROC(EndCommandBuffer);
+ GET_DEV_PROC(ResetCommandBuffer);
+ GET_DEV_PROC(CmdBindPipeline);
+ GET_DEV_PROC(CmdSetViewport);
+ GET_DEV_PROC(CmdSetScissor);
+ GET_DEV_PROC(CmdSetLineWidth);
+ GET_DEV_PROC(CmdSetDepthBias);
+ GET_DEV_PROC(CmdSetBlendConstants);
+ GET_DEV_PROC(CmdSetDepthBounds);
+ GET_DEV_PROC(CmdSetStencilCompareMask);
+ GET_DEV_PROC(CmdSetStencilWriteMask);
+ GET_DEV_PROC(CmdSetStencilReference);
+ GET_DEV_PROC(CmdBindDescriptorSets);
+ GET_DEV_PROC(CmdBindIndexBuffer);
+ GET_DEV_PROC(CmdBindVertexBuffers);
+ GET_DEV_PROC(CmdDraw);
+ GET_DEV_PROC(CmdDrawIndexed);
+ GET_DEV_PROC(CmdDrawIndirect);
+ GET_DEV_PROC(CmdDrawIndexedIndirect);
+ GET_DEV_PROC(CmdDispatch);
+ GET_DEV_PROC(CmdDispatchIndirect);
+ GET_DEV_PROC(CmdCopyBuffer);
+ GET_DEV_PROC(CmdCopyImage);
+ GET_DEV_PROC(CmdBlitImage);
+ GET_DEV_PROC(CmdCopyBufferToImage);
+ GET_DEV_PROC(CmdCopyImageToBuffer);
+ GET_DEV_PROC(CmdUpdateBuffer);
+ GET_DEV_PROC(CmdFillBuffer);
+ GET_DEV_PROC(CmdClearColorImage);
+ GET_DEV_PROC(CmdClearDepthStencilImage);
+ GET_DEV_PROC(CmdClearAttachments);
+ GET_DEV_PROC(CmdResolveImage);
+ GET_DEV_PROC(CmdSetEvent);
+ GET_DEV_PROC(CmdResetEvent);
+ GET_DEV_PROC(CmdWaitEvents);
+ GET_DEV_PROC(CmdPipelineBarrier);
+ GET_DEV_PROC(CmdBeginQuery);
+ GET_DEV_PROC(CmdEndQuery);
+ GET_DEV_PROC(CmdResetQueryPool);
+ GET_DEV_PROC(CmdWriteTimestamp);
+ GET_DEV_PROC(CmdCopyQueryPoolResults);
+ GET_DEV_PROC(CmdPushConstants);
+ GET_DEV_PROC(CmdBeginRenderPass);
+ GET_DEV_PROC(CmdNextSubpass);
+ GET_DEV_PROC(CmdEndRenderPass);
+ GET_DEV_PROC(CmdExecuteCommands);
+
+ if (extensionFlags & kEXT_debug_report_GrVkExtensionFlag) {
+ GET_PROC(CreateDebugReportCallbackEXT);
+ GET_PROC(DebugReportMessageEXT);
+ GET_PROC(DestroyDebugReportCallbackEXT);
+ }
+
+ return interface;
+}
+
+#define RETURN_FALSE_INTERFACE \
+ if (kIsDebug) { SkDebugf("%s:%d GrVkInterface::validate() failed.\n", __FILE__, __LINE__); } \
+ return false;
+
+bool GrVkInterface::validate() const {
+ // functions that are always required
+ if (NULL == fFunctions.fCreateInstance ||
+ NULL == fFunctions.fDestroyInstance ||
+ NULL == fFunctions.fEnumeratePhysicalDevices ||
+ NULL == fFunctions.fGetPhysicalDeviceFeatures ||
+ NULL == fFunctions.fGetPhysicalDeviceFormatProperties ||
+ NULL == fFunctions.fGetPhysicalDeviceImageFormatProperties ||
+ NULL == fFunctions.fGetPhysicalDeviceProperties ||
+ NULL == fFunctions.fGetPhysicalDeviceQueueFamilyProperties ||
+ NULL == fFunctions.fGetPhysicalDeviceMemoryProperties ||
+ NULL == fFunctions.fCreateDevice ||
+ NULL == fFunctions.fDestroyDevice ||
+ NULL == fFunctions.fEnumerateInstanceExtensionProperties ||
+ NULL == fFunctions.fEnumerateDeviceExtensionProperties ||
+ NULL == fFunctions.fEnumerateInstanceLayerProperties ||
+ NULL == fFunctions.fEnumerateDeviceLayerProperties ||
+ NULL == fFunctions.fGetDeviceQueue ||
+ NULL == fFunctions.fQueueSubmit ||
+ NULL == fFunctions.fQueueWaitIdle ||
+ NULL == fFunctions.fDeviceWaitIdle ||
+ NULL == fFunctions.fAllocateMemory ||
+ NULL == fFunctions.fFreeMemory ||
+ NULL == fFunctions.fMapMemory ||
+ NULL == fFunctions.fUnmapMemory ||
+ NULL == fFunctions.fFlushMappedMemoryRanges ||
+ NULL == fFunctions.fInvalidateMappedMemoryRanges ||
+ NULL == fFunctions.fGetDeviceMemoryCommitment ||
+ NULL == fFunctions.fBindBufferMemory ||
+ NULL == fFunctions.fBindImageMemory ||
+ NULL == fFunctions.fGetBufferMemoryRequirements ||
+ NULL == fFunctions.fGetImageMemoryRequirements ||
+ NULL == fFunctions.fGetImageSparseMemoryRequirements ||
+ NULL == fFunctions.fGetPhysicalDeviceSparseImageFormatProperties ||
+ NULL == fFunctions.fQueueBindSparse ||
+ NULL == fFunctions.fCreateFence ||
+ NULL == fFunctions.fDestroyFence ||
+ NULL == fFunctions.fResetFences ||
+ NULL == fFunctions.fGetFenceStatus ||
+ NULL == fFunctions.fWaitForFences ||
+ NULL == fFunctions.fCreateSemaphore ||
+ NULL == fFunctions.fDestroySemaphore ||
+ NULL == fFunctions.fCreateEvent ||
+ NULL == fFunctions.fDestroyEvent ||
+ NULL == fFunctions.fGetEventStatus ||
+ NULL == fFunctions.fSetEvent ||
+ NULL == fFunctions.fResetEvent ||
+ NULL == fFunctions.fCreateQueryPool ||
+ NULL == fFunctions.fDestroyQueryPool ||
+ NULL == fFunctions.fGetQueryPoolResults ||
+ NULL == fFunctions.fCreateBuffer ||
+ NULL == fFunctions.fDestroyBuffer ||
+ NULL == fFunctions.fCreateBufferView ||
+ NULL == fFunctions.fDestroyBufferView ||
+ NULL == fFunctions.fCreateImage ||
+ NULL == fFunctions.fDestroyImage ||
+ NULL == fFunctions.fGetImageSubresourceLayout ||
+ NULL == fFunctions.fCreateImageView ||
+ NULL == fFunctions.fDestroyImageView ||
+ NULL == fFunctions.fCreateShaderModule ||
+ NULL == fFunctions.fDestroyShaderModule ||
+ NULL == fFunctions.fCreatePipelineCache ||
+ NULL == fFunctions.fDestroyPipelineCache ||
+ NULL == fFunctions.fGetPipelineCacheData ||
+ NULL == fFunctions.fMergePipelineCaches ||
+ NULL == fFunctions.fCreateGraphicsPipelines ||
+ NULL == fFunctions.fCreateComputePipelines ||
+ NULL == fFunctions.fDestroyPipeline ||
+ NULL == fFunctions.fCreatePipelineLayout ||
+ NULL == fFunctions.fDestroyPipelineLayout ||
+ NULL == fFunctions.fCreateSampler ||
+ NULL == fFunctions.fDestroySampler ||
+ NULL == fFunctions.fCreateDescriptorSetLayout ||
+ NULL == fFunctions.fDestroyDescriptorSetLayout ||
+ NULL == fFunctions.fCreateDescriptorPool ||
+ NULL == fFunctions.fDestroyDescriptorPool ||
+ NULL == fFunctions.fResetDescriptorPool ||
+ NULL == fFunctions.fAllocateDescriptorSets ||
+ NULL == fFunctions.fFreeDescriptorSets ||
+ NULL == fFunctions.fUpdateDescriptorSets ||
+ NULL == fFunctions.fCreateFramebuffer ||
+ NULL == fFunctions.fDestroyFramebuffer ||
+ NULL == fFunctions.fCreateRenderPass ||
+ NULL == fFunctions.fDestroyRenderPass ||
+ NULL == fFunctions.fGetRenderAreaGranularity ||
+ NULL == fFunctions.fCreateCommandPool ||
+ NULL == fFunctions.fDestroyCommandPool ||
+ NULL == fFunctions.fResetCommandPool ||
+ NULL == fFunctions.fAllocateCommandBuffers ||
+ NULL == fFunctions.fFreeCommandBuffers ||
+ NULL == fFunctions.fBeginCommandBuffer ||
+ NULL == fFunctions.fEndCommandBuffer ||
+ NULL == fFunctions.fResetCommandBuffer ||
+ NULL == fFunctions.fCmdBindPipeline ||
+ NULL == fFunctions.fCmdSetViewport ||
+ NULL == fFunctions.fCmdSetScissor ||
+ NULL == fFunctions.fCmdSetLineWidth ||
+ NULL == fFunctions.fCmdSetDepthBias ||
+ NULL == fFunctions.fCmdSetBlendConstants ||
+ NULL == fFunctions.fCmdSetDepthBounds ||
+ NULL == fFunctions.fCmdSetStencilCompareMask ||
+ NULL == fFunctions.fCmdSetStencilWriteMask ||
+ NULL == fFunctions.fCmdSetStencilReference ||
+ NULL == fFunctions.fCmdBindDescriptorSets ||
+ NULL == fFunctions.fCmdBindIndexBuffer ||
+ NULL == fFunctions.fCmdBindVertexBuffers ||
+ NULL == fFunctions.fCmdDraw ||
+ NULL == fFunctions.fCmdDrawIndexed ||
+ NULL == fFunctions.fCmdDrawIndirect ||
+ NULL == fFunctions.fCmdDrawIndexedIndirect ||
+ NULL == fFunctions.fCmdDispatch ||
+ NULL == fFunctions.fCmdDispatchIndirect ||
+ NULL == fFunctions.fCmdCopyBuffer ||
+ NULL == fFunctions.fCmdCopyImage ||
+ NULL == fFunctions.fCmdBlitImage ||
+ NULL == fFunctions.fCmdCopyBufferToImage ||
+ NULL == fFunctions.fCmdCopyImageToBuffer ||
+ NULL == fFunctions.fCmdUpdateBuffer ||
+ NULL == fFunctions.fCmdFillBuffer ||
+ NULL == fFunctions.fCmdClearColorImage ||
+ NULL == fFunctions.fCmdClearDepthStencilImage ||
+ NULL == fFunctions.fCmdClearAttachments ||
+ NULL == fFunctions.fCmdResolveImage ||
+ NULL == fFunctions.fCmdSetEvent ||
+ NULL == fFunctions.fCmdResetEvent ||
+ NULL == fFunctions.fCmdWaitEvents ||
+ NULL == fFunctions.fCmdPipelineBarrier ||
+ NULL == fFunctions.fCmdBeginQuery ||
+ NULL == fFunctions.fCmdEndQuery ||
+ NULL == fFunctions.fCmdResetQueryPool ||
+ NULL == fFunctions.fCmdWriteTimestamp ||
+ NULL == fFunctions.fCmdCopyQueryPoolResults ||
+ NULL == fFunctions.fCmdPushConstants ||
+ NULL == fFunctions.fCmdBeginRenderPass ||
+ NULL == fFunctions.fCmdNextSubpass ||
+ NULL == fFunctions.fCmdEndRenderPass ||
+ NULL == fFunctions.fCmdExecuteCommands ||
+ NULL == fFunctions.fCreateDebugReportCallbackEXT ||
+ NULL == fFunctions.fDebugReportMessageEXT ||
+ NULL == fFunctions.fDestroyDebugReportCallbackEXT) {
+
+ return false;
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkMemory.cpp b/gfx/skia/skia/src/gpu/vk/GrVkMemory.cpp
new file mode 100644
index 000000000..2853c89fe
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkMemory.cpp
@@ -0,0 +1,642 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkMemory.h"
+
+#include "GrVkGpu.h"
+#include "GrVkUtil.h"
+
+#ifdef SK_DEBUG
+// for simple tracking of how much we're using in each heap
+// last counter is for non-subheap allocations
+VkDeviceSize gHeapUsage[VK_MAX_MEMORY_HEAPS+1] = { 0 };
+#endif
+
+static bool get_valid_memory_type_index(const VkPhysicalDeviceMemoryProperties& physDevMemProps,
+ uint32_t typeBits,
+ VkMemoryPropertyFlags requestedMemFlags,
+ uint32_t* typeIndex,
+ uint32_t* heapIndex) {
+ for (uint32_t i = 0; i < physDevMemProps.memoryTypeCount; ++i) {
+ if (typeBits & (1 << i)) {
+ uint32_t supportedFlags = physDevMemProps.memoryTypes[i].propertyFlags &
+ requestedMemFlags;
+ if (supportedFlags == requestedMemFlags) {
+ *typeIndex = i;
+ *heapIndex = physDevMemProps.memoryTypes[i].heapIndex;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+static GrVkGpu::Heap buffer_type_to_heap(GrVkBuffer::Type type) {
+ const GrVkGpu::Heap kBufferToHeap[]{
+ GrVkGpu::kVertexBuffer_Heap,
+ GrVkGpu::kIndexBuffer_Heap,
+ GrVkGpu::kUniformBuffer_Heap,
+ GrVkGpu::kCopyReadBuffer_Heap,
+ GrVkGpu::kCopyWriteBuffer_Heap,
+ };
+ GR_STATIC_ASSERT(0 == GrVkBuffer::kVertex_Type);
+ GR_STATIC_ASSERT(1 == GrVkBuffer::kIndex_Type);
+ GR_STATIC_ASSERT(2 == GrVkBuffer::kUniform_Type);
+ GR_STATIC_ASSERT(3 == GrVkBuffer::kCopyRead_Type);
+ GR_STATIC_ASSERT(4 == GrVkBuffer::kCopyWrite_Type);
+
+ return kBufferToHeap[type];
+}
+
+bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
+ VkBuffer buffer,
+ GrVkBuffer::Type type,
+ bool dynamic,
+ GrVkAlloc* alloc) {
+ const GrVkInterface* iface = gpu->vkInterface();
+ VkDevice device = gpu->device();
+
+ VkMemoryRequirements memReqs;
+ GR_VK_CALL(iface, GetBufferMemoryRequirements(device, buffer, &memReqs));
+
+ uint32_t typeIndex = 0;
+ uint32_t heapIndex = 0;
+ const VkPhysicalDeviceMemoryProperties& phDevMemProps = gpu->physicalDeviceMemoryProperties();
+ if (dynamic) {
+ // try to get cached and ideally non-coherent memory first
+ if (!get_valid_memory_type_index(phDevMemProps,
+ memReqs.memoryTypeBits,
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
+ &typeIndex,
+ &heapIndex)) {
+ // some sort of host-visible memory type should always be available for dynamic buffers
+ SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps,
+ memReqs.memoryTypeBits,
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
+ &typeIndex,
+ &heapIndex));
+ }
+
+ VkMemoryPropertyFlags mpf = phDevMemProps.memoryTypes[typeIndex].propertyFlags;
+ alloc->fFlags = mpf & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT ? 0x0
+ : GrVkAlloc::kNoncoherent_Flag;
+ } else {
+ // device-local memory should always be available for static buffers
+ SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps,
+ memReqs.memoryTypeBits,
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
+ &typeIndex,
+ &heapIndex));
+ alloc->fFlags = 0x0;
+ }
+
+ GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type));
+
+ if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex, alloc)) {
+ // if static, try to allocate from non-host-visible non-device-local memory instead
+ if (dynamic ||
+ !get_valid_memory_type_index(phDevMemProps, memReqs.memoryTypeBits,
+ 0, &typeIndex, &heapIndex) ||
+ !heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex, alloc)) {
+ SkDebugf("Failed to alloc buffer\n");
+ return false;
+ }
+ }
+
+ // Bind buffer
+ VkResult err = GR_VK_CALL(iface, BindBufferMemory(device, buffer,
+ alloc->fMemory, alloc->fOffset));
+ if (err) {
+ SkASSERT_RELEASE(heap->free(*alloc));
+ return false;
+ }
+
+ return true;
+}
+
+void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type,
+ const GrVkAlloc& alloc) {
+
+ GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type));
+ SkASSERT_RELEASE(heap->free(alloc));
+}
+
+// for debugging
+static uint64_t gTotalImageMemory = 0;
+static uint64_t gTotalImageMemoryFullPage = 0;
+
+const VkDeviceSize kMaxSmallImageSize = 16 * 1024;
+const VkDeviceSize kMinVulkanPageSize = 16 * 1024;
+
+static VkDeviceSize align_size(VkDeviceSize size, VkDeviceSize alignment) {
+ return (size + alignment - 1) & ~(alignment - 1);
+}
+
+bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
+ VkImage image,
+ bool linearTiling,
+ GrVkAlloc* alloc) {
+ const GrVkInterface* iface = gpu->vkInterface();
+ VkDevice device = gpu->device();
+
+ VkMemoryRequirements memReqs;
+ GR_VK_CALL(iface, GetImageMemoryRequirements(device, image, &memReqs));
+
+ uint32_t typeIndex = 0;
+ uint32_t heapIndex = 0;
+ GrVkHeap* heap;
+ const VkPhysicalDeviceMemoryProperties& phDevMemProps = gpu->physicalDeviceMemoryProperties();
+ if (linearTiling) {
+ VkMemoryPropertyFlags desiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+ if (!get_valid_memory_type_index(phDevMemProps,
+ memReqs.memoryTypeBits,
+ desiredMemProps,
+ &typeIndex,
+ &heapIndex)) {
+ // some sort of host-visible memory type should always be available
+ SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps,
+ memReqs.memoryTypeBits,
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
+ &typeIndex,
+ &heapIndex));
+ }
+ heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap);
+ VkMemoryPropertyFlags mpf = phDevMemProps.memoryTypes[typeIndex].propertyFlags;
+ alloc->fFlags = mpf & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT ? 0x0
+ : GrVkAlloc::kNoncoherent_Flag;
+ } else {
+ // this memory type should always be available
+ SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps,
+ memReqs.memoryTypeBits,
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
+ &typeIndex,
+ &heapIndex));
+ if (memReqs.size <= kMaxSmallImageSize) {
+ heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap);
+ } else {
+ heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap);
+ }
+ alloc->fFlags = 0x0;
+ }
+
+ if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex, alloc)) {
+ // if optimal, try to allocate from non-host-visible non-device-local memory instead
+ if (linearTiling ||
+ !get_valid_memory_type_index(phDevMemProps, memReqs.memoryTypeBits,
+ 0, &typeIndex, &heapIndex) ||
+ !heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex, alloc)) {
+ SkDebugf("Failed to alloc image\n");
+ return false;
+ }
+ }
+
+ // Bind image
+ VkResult err = GR_VK_CALL(iface, BindImageMemory(device, image,
+ alloc->fMemory, alloc->fOffset));
+ if (err) {
+ SkASSERT_RELEASE(heap->free(*alloc));
+ return false;
+ }
+
+ gTotalImageMemory += alloc->fSize;
+
+ VkDeviceSize pageAlignedSize = align_size(alloc->fSize, kMinVulkanPageSize);
+ gTotalImageMemoryFullPage += pageAlignedSize;
+
+ return true;
+}
+
+void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
+ const GrVkAlloc& alloc) {
+ GrVkHeap* heap;
+ if (linearTiling) {
+ heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap);
+ } else if (alloc.fSize <= kMaxSmallImageSize) {
+ heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap);
+ } else {
+ heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap);
+ }
+ if (!heap->free(alloc)) {
+ // must be an adopted allocation
+ GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
+ } else {
+ gTotalImageMemory -= alloc.fSize;
+ VkDeviceSize pageAlignedSize = align_size(alloc.fSize, kMinVulkanPageSize);
+ gTotalImageMemoryFullPage -= pageAlignedSize;
+ }
+}
+
+VkPipelineStageFlags GrVkMemory::LayoutToPipelineStageFlags(const VkImageLayout layout) {
+ if (VK_IMAGE_LAYOUT_GENERAL == layout) {
+ return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
+ return VK_PIPELINE_STAGE_TRANSFER_BIT;
+ } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
+ return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
+ } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
+ return VK_PIPELINE_STAGE_HOST_BIT;
+ }
+
+ SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
+ return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+}
+
+VkAccessFlags GrVkMemory::LayoutToSrcAccessMask(const VkImageLayout layout) {
+ // Currently we assume we will never being doing any explict shader writes (this doesn't include
+ // color attachment or depth/stencil writes). So we will ignore the
+ // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
+
+ // We can only directly access the host memory if we are in preinitialized or general layout,
+ // and the image is linear.
+ // TODO: Add check for linear here so we are not always adding host to general, and we should
+ // only be in preinitialized if we are linear
+ VkAccessFlags flags = 0;;
+ if (VK_IMAGE_LAYOUT_GENERAL == layout) {
+ flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_TRANSFER_WRITE_BIT |
+ VK_ACCESS_TRANSFER_READ_BIT |
+ VK_ACCESS_SHADER_READ_BIT |
+ VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
+ } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
+ flags = VK_ACCESS_HOST_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
+ flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
+ flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
+ flags = VK_ACCESS_TRANSFER_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
+ flags = VK_ACCESS_TRANSFER_READ_BIT;
+ } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
+ flags = VK_ACCESS_SHADER_READ_BIT;
+ }
+ return flags;
+}
+
+void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
+ if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
+ VkMappedMemoryRange mappedMemoryRange;
+ memset(&mappedMemoryRange, 0, sizeof(VkMappedMemoryRange));
+ mappedMemoryRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
+ mappedMemoryRange.memory = alloc.fMemory;
+ mappedMemoryRange.offset = alloc.fOffset;
+ mappedMemoryRange.size = alloc.fSize;
+ GR_VK_CALL(gpu->vkInterface(), FlushMappedMemoryRanges(gpu->device(),
+ 1, &mappedMemoryRange));
+ }
+}
+
+void GrVkMemory::InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
+ if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
+ VkMappedMemoryRange mappedMemoryRange;
+ memset(&mappedMemoryRange, 0, sizeof(VkMappedMemoryRange));
+ mappedMemoryRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
+ mappedMemoryRange.memory = alloc.fMemory;
+ mappedMemoryRange.offset = alloc.fOffset;
+ mappedMemoryRange.size = alloc.fSize;
+ GR_VK_CALL(gpu->vkInterface(), InvalidateMappedMemoryRanges(gpu->device(),
+ 1, &mappedMemoryRange));
+ }
+}
+
+bool GrVkFreeListAlloc::alloc(VkDeviceSize requestedSize,
+ VkDeviceSize* allocOffset, VkDeviceSize* allocSize) {
+ VkDeviceSize alignedSize = align_size(requestedSize, fAlignment);
+
+ // find the smallest block big enough for our allocation
+ FreeList::Iter iter = fFreeList.headIter();
+ FreeList::Iter bestFitIter;
+ VkDeviceSize bestFitSize = fSize + 1;
+ VkDeviceSize secondLargestSize = 0;
+ VkDeviceSize secondLargestOffset = 0;
+ while (iter.get()) {
+ Block* block = iter.get();
+ // need to adjust size to match desired alignment
+ SkASSERT(align_size(block->fOffset, fAlignment) - block->fOffset == 0);
+ if (block->fSize >= alignedSize && block->fSize < bestFitSize) {
+ bestFitIter = iter;
+ bestFitSize = block->fSize;
+ }
+ if (secondLargestSize < block->fSize && block->fOffset != fLargestBlockOffset) {
+ secondLargestSize = block->fSize;
+ secondLargestOffset = block->fOffset;
+ }
+ iter.next();
+ }
+ SkASSERT(secondLargestSize <= fLargestBlockSize);
+
+ Block* bestFit = bestFitIter.get();
+ if (bestFit) {
+ SkASSERT(align_size(bestFit->fOffset, fAlignment) == bestFit->fOffset);
+ *allocOffset = bestFit->fOffset;
+ *allocSize = alignedSize;
+ // adjust or remove current block
+ VkDeviceSize originalBestFitOffset = bestFit->fOffset;
+ if (bestFit->fSize > alignedSize) {
+ bestFit->fOffset += alignedSize;
+ bestFit->fSize -= alignedSize;
+ if (fLargestBlockOffset == originalBestFitOffset) {
+ if (bestFit->fSize >= secondLargestSize) {
+ fLargestBlockSize = bestFit->fSize;
+ fLargestBlockOffset = bestFit->fOffset;
+ } else {
+ fLargestBlockSize = secondLargestSize;
+ fLargestBlockOffset = secondLargestOffset;
+ }
+ }
+#ifdef SK_DEBUG
+ VkDeviceSize largestSize = 0;
+ iter = fFreeList.headIter();
+ while (iter.get()) {
+ Block* block = iter.get();
+ if (largestSize < block->fSize) {
+ largestSize = block->fSize;
+ }
+ iter.next();
+ }
+ SkASSERT(largestSize == fLargestBlockSize);
+#endif
+ } else {
+ SkASSERT(bestFit->fSize == alignedSize);
+ if (fLargestBlockOffset == originalBestFitOffset) {
+ fLargestBlockSize = secondLargestSize;
+ fLargestBlockOffset = secondLargestOffset;
+ }
+ fFreeList.remove(bestFit);
+#ifdef SK_DEBUG
+ VkDeviceSize largestSize = 0;
+ iter = fFreeList.headIter();
+ while (iter.get()) {
+ Block* block = iter.get();
+ if (largestSize < block->fSize) {
+ largestSize = block->fSize;
+ }
+ iter.next();
+ }
+ SkASSERT(largestSize == fLargestBlockSize);
+#endif
+ }
+ fFreeSize -= alignedSize;
+ SkASSERT(*allocSize > 0);
+
+ return true;
+ }
+
+ SkDebugf("Can't allocate %d bytes, %d bytes available, largest free block %d\n", alignedSize, fFreeSize, fLargestBlockSize);
+
+ return false;
+}
+
+void GrVkFreeListAlloc::free(VkDeviceSize allocOffset, VkDeviceSize allocSize) {
+ // find the block right after this allocation
+ FreeList::Iter iter = fFreeList.headIter();
+ FreeList::Iter prev;
+ while (iter.get() && iter.get()->fOffset < allocOffset) {
+ prev = iter;
+ iter.next();
+ }
+ // we have four cases:
+ // we exactly follow the previous one
+ Block* block;
+ if (prev.get() && prev.get()->fOffset + prev.get()->fSize == allocOffset) {
+ block = prev.get();
+ block->fSize += allocSize;
+ if (block->fOffset == fLargestBlockOffset) {
+ fLargestBlockSize = block->fSize;
+ }
+ // and additionally we may exactly precede the next one
+ if (iter.get() && iter.get()->fOffset == allocOffset + allocSize) {
+ block->fSize += iter.get()->fSize;
+ if (iter.get()->fOffset == fLargestBlockOffset) {
+ fLargestBlockOffset = block->fOffset;
+ fLargestBlockSize = block->fSize;
+ }
+ fFreeList.remove(iter.get());
+ }
+ // or we only exactly proceed the next one
+ } else if (iter.get() && iter.get()->fOffset == allocOffset + allocSize) {
+ block = iter.get();
+ block->fSize += allocSize;
+ if (block->fOffset == fLargestBlockOffset) {
+ fLargestBlockOffset = allocOffset;
+ fLargestBlockSize = block->fSize;
+ }
+ block->fOffset = allocOffset;
+ // or we fall somewhere in between, with gaps
+ } else {
+ block = fFreeList.addBefore(iter);
+ block->fOffset = allocOffset;
+ block->fSize = allocSize;
+ }
+ fFreeSize += allocSize;
+ if (block->fSize > fLargestBlockSize) {
+ fLargestBlockSize = block->fSize;
+ fLargestBlockOffset = block->fOffset;
+ }
+
+#ifdef SK_DEBUG
+ VkDeviceSize largestSize = 0;
+ iter = fFreeList.headIter();
+ while (iter.get()) {
+ Block* block = iter.get();
+ if (largestSize < block->fSize) {
+ largestSize = block->fSize;
+ }
+ iter.next();
+ }
+ SkASSERT(fLargestBlockSize == largestSize);
+#endif
+}
+
+GrVkSubHeap::GrVkSubHeap(const GrVkGpu* gpu, uint32_t memoryTypeIndex, uint32_t heapIndex,
+ VkDeviceSize size, VkDeviceSize alignment)
+ : INHERITED(size, alignment)
+ , fGpu(gpu)
+#ifdef SK_DEBUG
+ , fHeapIndex(heapIndex)
+#endif
+ , fMemoryTypeIndex(memoryTypeIndex) {
+
+ VkMemoryAllocateInfo allocInfo = {
+ VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
+ NULL, // pNext
+ size, // allocationSize
+ memoryTypeIndex, // memoryTypeIndex
+ };
+
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateMemory(gpu->device(),
+ &allocInfo,
+ nullptr,
+ &fAlloc));
+ if (VK_SUCCESS != err) {
+ this->reset();
+ }
+#ifdef SK_DEBUG
+ else {
+ gHeapUsage[heapIndex] += size;
+ }
+#endif
+}
+
+GrVkSubHeap::~GrVkSubHeap() {
+ const GrVkInterface* iface = fGpu->vkInterface();
+ GR_VK_CALL(iface, FreeMemory(fGpu->device(), fAlloc, nullptr));
+#ifdef SK_DEBUG
+ gHeapUsage[fHeapIndex] -= fSize;
+#endif
+}
+
+bool GrVkSubHeap::alloc(VkDeviceSize size, GrVkAlloc* alloc) {
+ alloc->fMemory = fAlloc;
+ return INHERITED::alloc(size, &alloc->fOffset, &alloc->fSize);
+}
+
+void GrVkSubHeap::free(const GrVkAlloc& alloc) {
+ SkASSERT(alloc.fMemory == fAlloc);
+
+ INHERITED::free(alloc.fOffset, alloc.fSize);
+}
+
+bool GrVkHeap::subAlloc(VkDeviceSize size, VkDeviceSize alignment,
+ uint32_t memoryTypeIndex, uint32_t heapIndex, GrVkAlloc* alloc) {
+ VkDeviceSize alignedSize = align_size(size, alignment);
+
+ // if requested is larger than our subheap allocation, just alloc directly
+ if (alignedSize > fSubHeapSize) {
+ VkMemoryAllocateInfo allocInfo = {
+ VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
+ NULL, // pNext
+ size, // allocationSize
+ memoryTypeIndex, // memoryTypeIndex
+ };
+
+ VkResult err = GR_VK_CALL(fGpu->vkInterface(), AllocateMemory(fGpu->device(),
+ &allocInfo,
+ nullptr,
+ &alloc->fMemory));
+ if (VK_SUCCESS != err) {
+ return false;
+ }
+ alloc->fOffset = 0;
+ alloc->fSize = 0; // hint that this is not a subheap allocation
+#ifdef SK_DEBUG
+ gHeapUsage[VK_MAX_MEMORY_HEAPS] += alignedSize;
+#endif
+
+ return true;
+ }
+
+ // first try to find a subheap that fits our allocation request
+ int bestFitIndex = -1;
+ VkDeviceSize bestFitSize = 0x7FFFFFFF;
+ for (auto i = 0; i < fSubHeaps.count(); ++i) {
+ if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex &&
+ fSubHeaps[i]->alignment() == alignment) {
+ VkDeviceSize heapSize = fSubHeaps[i]->largestBlockSize();
+ if (heapSize >= alignedSize && heapSize < bestFitSize) {
+ bestFitIndex = i;
+ bestFitSize = heapSize;
+ }
+ }
+ }
+
+ if (bestFitIndex >= 0) {
+ SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment);
+ if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) {
+ fUsedSize += alloc->fSize;
+ return true;
+ }
+ return false;
+ }
+
+ // need to allocate a new subheap
+ SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back();
+ subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, heapIndex, fSubHeapSize, alignment));
+ // try to recover from failed allocation by only allocating what we need
+ if (subHeap->size() == 0) {
+ VkDeviceSize alignedSize = align_size(size, alignment);
+ subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, heapIndex, alignedSize, alignment));
+ if (subHeap->size() == 0) {
+ return false;
+ }
+ }
+ fAllocSize += fSubHeapSize;
+ if (subHeap->alloc(size, alloc)) {
+ fUsedSize += alloc->fSize;
+ return true;
+ }
+
+ return false;
+}
+
+bool GrVkHeap::singleAlloc(VkDeviceSize size, VkDeviceSize alignment,
+ uint32_t memoryTypeIndex, uint32_t heapIndex, GrVkAlloc* alloc) {
+ VkDeviceSize alignedSize = align_size(size, alignment);
+
+ // first try to find an unallocated subheap that fits our allocation request
+ int bestFitIndex = -1;
+ VkDeviceSize bestFitSize = 0x7FFFFFFF;
+ for (auto i = 0; i < fSubHeaps.count(); ++i) {
+ if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex &&
+ fSubHeaps[i]->alignment() == alignment &&
+ fSubHeaps[i]->unallocated()) {
+ VkDeviceSize heapSize = fSubHeaps[i]->size();
+ if (heapSize >= alignedSize && heapSize < bestFitSize) {
+ bestFitIndex = i;
+ bestFitSize = heapSize;
+ }
+ }
+ }
+
+ if (bestFitIndex >= 0) {
+ SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment);
+ if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) {
+ fUsedSize += alloc->fSize;
+ return true;
+ }
+ return false;
+ }
+
+ // need to allocate a new subheap
+ SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back();
+ subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, heapIndex, alignedSize, alignment));
+ fAllocSize += alignedSize;
+ if (subHeap->alloc(size, alloc)) {
+ fUsedSize += alloc->fSize;
+ return true;
+ }
+
+ return false;
+}
+
+bool GrVkHeap::free(const GrVkAlloc& alloc) {
+ // a size of 0 means we're using the system heap
+ if (0 == alloc.fSize) {
+ const GrVkInterface* iface = fGpu->vkInterface();
+ GR_VK_CALL(iface, FreeMemory(fGpu->device(), alloc.fMemory, nullptr));
+ return true;
+ }
+
+ for (auto i = 0; i < fSubHeaps.count(); ++i) {
+ if (fSubHeaps[i]->memory() == alloc.fMemory) {
+ fSubHeaps[i]->free(alloc);
+ fUsedSize -= alloc.fSize;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkMemory.h b/gfx/skia/skia/src/gpu/vk/GrVkMemory.h
new file mode 100644
index 000000000..a1d4392eb
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkMemory.h
@@ -0,0 +1,167 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkMemory_DEFINED
+#define GrVkMemory_DEFINED
+
+#include "GrVkBuffer.h"
+#include "SkTArray.h"
+#include "SkTLList.h"
+#include "vk/GrVkDefines.h"
+#include "vk/GrVkTypes.h"
+
+class GrVkGpu;
+
+namespace GrVkMemory {
+ /**
+ * Allocates vulkan device memory and binds it to the gpu's device for the given object.
+ * Returns true if allocation succeeded.
+ */
+ bool AllocAndBindBufferMemory(const GrVkGpu* gpu,
+ VkBuffer buffer,
+ GrVkBuffer::Type type,
+ bool dynamic,
+ GrVkAlloc* alloc);
+ void FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type, const GrVkAlloc& alloc);
+
+ bool AllocAndBindImageMemory(const GrVkGpu* gpu,
+ VkImage image,
+ bool linearTiling,
+ GrVkAlloc* alloc);
+ void FreeImageMemory(const GrVkGpu* gpu, bool linearTiling, const GrVkAlloc& alloc);
+
+ VkPipelineStageFlags LayoutToPipelineStageFlags(const VkImageLayout layout);
+
+ VkAccessFlags LayoutToSrcAccessMask(const VkImageLayout layout);
+
+ void FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc);
+ void InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc);
+}
+
+class GrVkFreeListAlloc {
+public:
+ GrVkFreeListAlloc(VkDeviceSize size, VkDeviceSize alignment)
+ : fSize(size)
+ , fAlignment(alignment)
+ , fFreeSize(size)
+ , fLargestBlockSize(size)
+ , fLargestBlockOffset(0) {
+ Block* block = fFreeList.addToTail();
+ block->fOffset = 0;
+ block->fSize = fSize;
+ }
+ ~GrVkFreeListAlloc() {
+ this->reset();
+ }
+
+ VkDeviceSize size() const { return fSize; }
+ VkDeviceSize alignment() const { return fAlignment; }
+ VkDeviceSize freeSize() const { return fFreeSize; }
+ VkDeviceSize largestBlockSize() const { return fLargestBlockSize; }
+
+ bool unallocated() const { return fSize == fFreeSize; }
+
+protected:
+ bool alloc(VkDeviceSize requestedSize, VkDeviceSize* allocOffset, VkDeviceSize* allocSize);
+ void free(VkDeviceSize allocOffset, VkDeviceSize allocSize);
+
+ void reset() {
+ fSize = 0;
+ fAlignment = 0;
+ fFreeSize = 0;
+ fLargestBlockSize = 0;
+ fFreeList.reset();
+ }
+
+ struct Block {
+ VkDeviceSize fOffset;
+ VkDeviceSize fSize;
+ };
+ typedef SkTLList<Block, 16> FreeList;
+
+ VkDeviceSize fSize;
+ VkDeviceSize fAlignment;
+ VkDeviceSize fFreeSize;
+ VkDeviceSize fLargestBlockSize;
+ VkDeviceSize fLargestBlockOffset;
+ FreeList fFreeList;
+};
+
+class GrVkSubHeap : public GrVkFreeListAlloc {
+public:
+ GrVkSubHeap(const GrVkGpu* gpu, uint32_t memoryTypeIndex, uint32_t heapIndex,
+ VkDeviceSize size, VkDeviceSize alignment);
+ ~GrVkSubHeap();
+
+ uint32_t memoryTypeIndex() const { return fMemoryTypeIndex; }
+ VkDeviceMemory memory() { return fAlloc; }
+
+ bool alloc(VkDeviceSize requestedSize, GrVkAlloc* alloc);
+ void free(const GrVkAlloc& alloc);
+
+private:
+ const GrVkGpu* fGpu;
+#ifdef SK_DEBUG
+ uint32_t fHeapIndex;
+#endif
+ uint32_t fMemoryTypeIndex;
+ VkDeviceMemory fAlloc;
+
+ typedef GrVkFreeListAlloc INHERITED;
+};
+
+class GrVkHeap {
+public:
+ enum Strategy {
+ kSubAlloc_Strategy, // alloc large subheaps and suballoc within them
+ kSingleAlloc_Strategy // alloc/recycle an individual subheap per object
+ };
+
+ GrVkHeap(const GrVkGpu* gpu, Strategy strategy, VkDeviceSize subHeapSize)
+ : fGpu(gpu)
+ , fSubHeapSize(subHeapSize)
+ , fAllocSize(0)
+ , fUsedSize(0) {
+ if (strategy == kSubAlloc_Strategy) {
+ fAllocFunc = &GrVkHeap::subAlloc;
+ } else {
+ fAllocFunc = &GrVkHeap::singleAlloc;
+ }
+ }
+
+ ~GrVkHeap() {}
+
+ VkDeviceSize allocSize() const { return fAllocSize; }
+ VkDeviceSize usedSize() const { return fUsedSize; }
+
+ bool alloc(VkDeviceSize size, VkDeviceSize alignment, uint32_t memoryTypeIndex,
+ uint32_t heapIndex, GrVkAlloc* alloc) {
+ SkASSERT(size > 0);
+ return (*this.*fAllocFunc)(size, alignment, memoryTypeIndex, heapIndex, alloc);
+ }
+ bool free(const GrVkAlloc& alloc);
+
+private:
+ typedef bool (GrVkHeap::*AllocFunc)(VkDeviceSize size, VkDeviceSize alignment,
+ uint32_t memoryTypeIndex, uint32_t heapIndex,
+ GrVkAlloc* alloc);
+
+ bool subAlloc(VkDeviceSize size, VkDeviceSize alignment,
+ uint32_t memoryTypeIndex, uint32_t heapIndex,
+ GrVkAlloc* alloc);
+ bool singleAlloc(VkDeviceSize size, VkDeviceSize alignment,
+ uint32_t memoryTypeIndex, uint32_t heapIndex,
+ GrVkAlloc* alloc);
+
+ const GrVkGpu* fGpu;
+ VkDeviceSize fSubHeapSize;
+ VkDeviceSize fAllocSize;
+ VkDeviceSize fUsedSize;
+ AllocFunc fAllocFunc;
+ SkTArray<SkAutoTDelete<GrVkSubHeap>> fSubHeaps;
+};
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkPipeline.cpp b/gfx/skia/skia/src/gpu/vk/GrVkPipeline.cpp
new file mode 100644
index 000000000..910398183
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkPipeline.cpp
@@ -0,0 +1,546 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkPipeline.h"
+
+#include "GrGeometryProcessor.h"
+#include "GrPipeline.h"
+#include "GrVkCommandBuffer.h"
+#include "GrVkGpu.h"
+#include "GrVkRenderTarget.h"
+#include "GrVkUtil.h"
+
+static inline const VkFormat& attrib_type_to_vkformat(GrVertexAttribType type) {
+ SkASSERT(type >= 0 && type < kGrVertexAttribTypeCount);
+ static const VkFormat kFormats[kGrVertexAttribTypeCount] = {
+ VK_FORMAT_R32_SFLOAT, // kFloat_GrVertexAttribType
+ VK_FORMAT_R32G32_SFLOAT, // kVec2f_GrVertexAttribType
+ VK_FORMAT_R32G32B32_SFLOAT, // kVec3f_GrVertexAttribType
+ VK_FORMAT_R32G32B32A32_SFLOAT, // kVec4f_GrVertexAttribType
+ VK_FORMAT_R8_UNORM, // kUByte_GrVertexAttribType
+ VK_FORMAT_R8G8B8A8_UNORM, // kVec4ub_GrVertexAttribType
+ VK_FORMAT_R16G16_UNORM, // kVec2us_GrVertexAttribType
+ };
+ GR_STATIC_ASSERT(0 == kFloat_GrVertexAttribType);
+ GR_STATIC_ASSERT(1 == kVec2f_GrVertexAttribType);
+ GR_STATIC_ASSERT(2 == kVec3f_GrVertexAttribType);
+ GR_STATIC_ASSERT(3 == kVec4f_GrVertexAttribType);
+ GR_STATIC_ASSERT(4 == kUByte_GrVertexAttribType);
+ GR_STATIC_ASSERT(5 == kVec4ub_GrVertexAttribType);
+ GR_STATIC_ASSERT(6 == kVec2us_GrVertexAttribType);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kFormats) == kGrVertexAttribTypeCount);
+ return kFormats[type];
+}
+
+static void setup_vertex_input_state(const GrPrimitiveProcessor& primProc,
+ VkPipelineVertexInputStateCreateInfo* vertexInputInfo,
+ VkVertexInputBindingDescription* bindingDesc,
+ int maxBindingDescCount,
+ VkVertexInputAttributeDescription* attributeDesc) {
+ // for now we have only one vertex buffer and one binding
+ memset(bindingDesc, 0, sizeof(VkVertexInputBindingDescription));
+ bindingDesc->binding = 0;
+ bindingDesc->stride = (uint32_t)primProc.getVertexStride();
+ bindingDesc->inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
+
+ // setup attribute descriptions
+ int vaCount = primProc.numAttribs();
+ if (vaCount > 0) {
+ size_t offset = 0;
+ for (int attribIndex = 0; attribIndex < vaCount; attribIndex++) {
+ const GrGeometryProcessor::Attribute& attrib = primProc.getAttrib(attribIndex);
+ GrVertexAttribType attribType = attrib.fType;
+
+ VkVertexInputAttributeDescription& vkAttrib = attributeDesc[attribIndex];
+ vkAttrib.location = attribIndex; // for now assume location = attribIndex
+ vkAttrib.binding = 0; // for now only one vertex buffer & binding
+ vkAttrib.format = attrib_type_to_vkformat(attribType);
+ vkAttrib.offset = static_cast<uint32_t>(offset);
+ offset += attrib.fOffset;
+ }
+ }
+
+ memset(vertexInputInfo, 0, sizeof(VkPipelineVertexInputStateCreateInfo));
+ vertexInputInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
+ vertexInputInfo->pNext = nullptr;
+ vertexInputInfo->flags = 0;
+ vertexInputInfo->vertexBindingDescriptionCount = 1;
+ vertexInputInfo->pVertexBindingDescriptions = bindingDesc;
+ vertexInputInfo->vertexAttributeDescriptionCount = vaCount;
+ vertexInputInfo->pVertexAttributeDescriptions = attributeDesc;
+}
+
+
+static void setup_input_assembly_state(GrPrimitiveType primitiveType,
+ VkPipelineInputAssemblyStateCreateInfo* inputAssemblyInfo) {
+ static const VkPrimitiveTopology gPrimitiveType2VkTopology[] = {
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN,
+ VK_PRIMITIVE_TOPOLOGY_POINT_LIST,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST,
+ VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
+ };
+
+ memset(inputAssemblyInfo, 0, sizeof(VkPipelineInputAssemblyStateCreateInfo));
+ inputAssemblyInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
+ inputAssemblyInfo->pNext = nullptr;
+ inputAssemblyInfo->flags = 0;
+ inputAssemblyInfo->primitiveRestartEnable = false;
+ inputAssemblyInfo->topology = gPrimitiveType2VkTopology[primitiveType];
+}
+
+
+static VkStencilOp stencil_op_to_vk_stencil_op(GrStencilOp op) {
+ static const VkStencilOp gTable[] = {
+ VK_STENCIL_OP_KEEP, // kKeep
+ VK_STENCIL_OP_ZERO, // kZero
+ VK_STENCIL_OP_REPLACE, // kReplace
+ VK_STENCIL_OP_INVERT, // kInvert
+ VK_STENCIL_OP_INCREMENT_AND_WRAP, // kIncWrap
+ VK_STENCIL_OP_DECREMENT_AND_WRAP, // kDecWrap
+ VK_STENCIL_OP_INCREMENT_AND_CLAMP, // kIncClamp
+ VK_STENCIL_OP_DECREMENT_AND_CLAMP, // kDecClamp
+ };
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kGrStencilOpCount);
+ GR_STATIC_ASSERT(0 == (int)GrStencilOp::kKeep);
+ GR_STATIC_ASSERT(1 == (int)GrStencilOp::kZero);
+ GR_STATIC_ASSERT(2 == (int)GrStencilOp::kReplace);
+ GR_STATIC_ASSERT(3 == (int)GrStencilOp::kInvert);
+ GR_STATIC_ASSERT(4 == (int)GrStencilOp::kIncWrap);
+ GR_STATIC_ASSERT(5 == (int)GrStencilOp::kDecWrap);
+ GR_STATIC_ASSERT(6 == (int)GrStencilOp::kIncClamp);
+ GR_STATIC_ASSERT(7 == (int)GrStencilOp::kDecClamp);
+ SkASSERT(op < (GrStencilOp)kGrStencilOpCount);
+ return gTable[(int)op];
+}
+
+static VkCompareOp stencil_func_to_vk_compare_op(GrStencilTest test) {
+ static const VkCompareOp gTable[] = {
+ VK_COMPARE_OP_ALWAYS, // kAlways
+ VK_COMPARE_OP_NEVER, // kNever
+ VK_COMPARE_OP_GREATER, // kGreater
+ VK_COMPARE_OP_GREATER_OR_EQUAL, // kGEqual
+ VK_COMPARE_OP_LESS, // kLess
+ VK_COMPARE_OP_LESS_OR_EQUAL, // kLEqual
+ VK_COMPARE_OP_EQUAL, // kEqual
+ VK_COMPARE_OP_NOT_EQUAL, // kNotEqual
+ };
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kGrStencilTestCount);
+ GR_STATIC_ASSERT(0 == (int)GrStencilTest::kAlways);
+ GR_STATIC_ASSERT(1 == (int)GrStencilTest::kNever);
+ GR_STATIC_ASSERT(2 == (int)GrStencilTest::kGreater);
+ GR_STATIC_ASSERT(3 == (int)GrStencilTest::kGEqual);
+ GR_STATIC_ASSERT(4 == (int)GrStencilTest::kLess);
+ GR_STATIC_ASSERT(5 == (int)GrStencilTest::kLEqual);
+ GR_STATIC_ASSERT(6 == (int)GrStencilTest::kEqual);
+ GR_STATIC_ASSERT(7 == (int)GrStencilTest::kNotEqual);
+ SkASSERT(test < (GrStencilTest)kGrStencilTestCount);
+
+ return gTable[(int)test];
+}
+
+static void setup_depth_stencil_state(const GrStencilSettings& stencilSettings,
+ VkPipelineDepthStencilStateCreateInfo* stencilInfo) {
+ memset(stencilInfo, 0, sizeof(VkPipelineDepthStencilStateCreateInfo));
+ stencilInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
+ stencilInfo->pNext = nullptr;
+ stencilInfo->flags = 0;
+ // set depth testing defaults
+ stencilInfo->depthTestEnable = VK_FALSE;
+ stencilInfo->depthWriteEnable = VK_FALSE;
+ stencilInfo->depthCompareOp = VK_COMPARE_OP_ALWAYS;
+ stencilInfo->depthBoundsTestEnable = VK_FALSE;
+ stencilInfo->stencilTestEnable = !stencilSettings.isDisabled();
+ if (!stencilSettings.isDisabled()) {
+ // Set front face
+ const GrStencilSettings::Face& front = stencilSettings.front();
+ stencilInfo->front.failOp = stencil_op_to_vk_stencil_op(front.fFailOp);
+ stencilInfo->front.passOp = stencil_op_to_vk_stencil_op(front.fPassOp);
+ stencilInfo->front.depthFailOp = stencilInfo->front.failOp;
+ stencilInfo->front.compareOp = stencil_func_to_vk_compare_op(front.fTest);
+ stencilInfo->front.compareMask = front.fTestMask;
+ stencilInfo->front.writeMask = front.fWriteMask;
+ stencilInfo->front.reference = front.fRef;
+
+ // Set back face
+ if (!stencilSettings.isTwoSided()) {
+ stencilInfo->back = stencilInfo->front;
+ } else {
+ const GrStencilSettings::Face& back = stencilSettings.back();
+ stencilInfo->back.failOp = stencil_op_to_vk_stencil_op(back.fFailOp);
+ stencilInfo->back.passOp = stencil_op_to_vk_stencil_op(back.fPassOp);
+ stencilInfo->back.depthFailOp = stencilInfo->front.failOp;
+ stencilInfo->back.compareOp = stencil_func_to_vk_compare_op(back.fTest);
+ stencilInfo->back.compareMask = back.fTestMask;
+ stencilInfo->back.writeMask = back.fWriteMask;
+ stencilInfo->back.reference = back.fRef;
+ }
+ }
+ stencilInfo->minDepthBounds = 0.0f;
+ stencilInfo->maxDepthBounds = 1.0f;
+}
+
+static void setup_viewport_scissor_state(VkPipelineViewportStateCreateInfo* viewportInfo) {
+ memset(viewportInfo, 0, sizeof(VkPipelineViewportStateCreateInfo));
+ viewportInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
+ viewportInfo->pNext = nullptr;
+ viewportInfo->flags = 0;
+
+ viewportInfo->viewportCount = 1;
+ viewportInfo->pViewports = nullptr; // This is set dynamically
+
+ viewportInfo->scissorCount = 1;
+ viewportInfo->pScissors = nullptr; // This is set dynamically
+
+ SkASSERT(viewportInfo->viewportCount == viewportInfo->scissorCount);
+}
+
+static void setup_multisample_state(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ const GrCaps* caps,
+ VkPipelineMultisampleStateCreateInfo* multisampleInfo) {
+ memset(multisampleInfo, 0, sizeof(VkPipelineMultisampleStateCreateInfo));
+ multisampleInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
+ multisampleInfo->pNext = nullptr;
+ multisampleInfo->flags = 0;
+ int numSamples = pipeline.getRenderTarget()->numColorSamples();
+ SkAssertResult(GrSampleCountToVkSampleCount(numSamples,
+ &multisampleInfo->rasterizationSamples));
+ float sampleShading = primProc.getSampleShading();
+ SkASSERT(sampleShading == 0.0f || caps->sampleShadingSupport());
+ multisampleInfo->sampleShadingEnable = sampleShading > 0.0f;
+ multisampleInfo->minSampleShading = sampleShading;
+ multisampleInfo->pSampleMask = nullptr;
+ multisampleInfo->alphaToCoverageEnable = VK_FALSE;
+ multisampleInfo->alphaToOneEnable = VK_FALSE;
+}
+
+static VkBlendFactor blend_coeff_to_vk_blend(GrBlendCoeff coeff) {
+ static const VkBlendFactor gTable[] = {
+ VK_BLEND_FACTOR_ZERO, // kZero_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE, // kOne_GrBlendCoeff
+ VK_BLEND_FACTOR_SRC_COLOR, // kSC_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR, // kISC_GrBlendCoeff
+ VK_BLEND_FACTOR_DST_COLOR, // kDC_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR, // kIDC_GrBlendCoeff
+ VK_BLEND_FACTOR_SRC_ALPHA, // kSA_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA, // kISA_GrBlendCoeff
+ VK_BLEND_FACTOR_DST_ALPHA, // kDA_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA, // kIDA_GrBlendCoeff
+ VK_BLEND_FACTOR_CONSTANT_COLOR, // kConstC_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR, // kIConstC_GrBlendCoeff
+ VK_BLEND_FACTOR_CONSTANT_ALPHA, // kConstA_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA, // kIConstA_GrBlendCoeff
+ VK_BLEND_FACTOR_SRC1_COLOR, // kS2C_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR, // kIS2C_GrBlendCoeff
+ VK_BLEND_FACTOR_SRC1_ALPHA, // kS2A_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA, // kIS2A_GrBlendCoeff
+
+ };
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kGrBlendCoeffCnt);
+ GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff);
+ GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff);
+ GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff);
+ GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff);
+ GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff);
+ GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff);
+ GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff);
+ GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff);
+ GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff);
+ GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff);
+ GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff);
+ GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff);
+ GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff);
+ GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff);
+ GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff);
+ GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff);
+ GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff);
+ GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff);
+
+ SkASSERT((unsigned)coeff < kGrBlendCoeffCnt);
+ return gTable[coeff];
+}
+
+
+static VkBlendOp blend_equation_to_vk_blend_op(GrBlendEquation equation) {
+ static const VkBlendOp gTable[] = {
+ VK_BLEND_OP_ADD, // kAdd_GrBlendEquation
+ VK_BLEND_OP_SUBTRACT, // kSubtract_GrBlendEquation
+ VK_BLEND_OP_REVERSE_SUBTRACT, // kReverseSubtract_GrBlendEquation
+ };
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(0 == kAdd_GrBlendEquation);
+ GR_STATIC_ASSERT(1 == kSubtract_GrBlendEquation);
+ GR_STATIC_ASSERT(2 == kReverseSubtract_GrBlendEquation);
+
+ SkASSERT((unsigned)equation < kGrBlendCoeffCnt);
+ return gTable[equation];
+}
+
+static bool blend_coeff_refs_constant(GrBlendCoeff coeff) {
+ static const bool gCoeffReferencesBlendConst[] = {
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ true,
+ true,
+ true,
+ true,
+
+ // extended blend coeffs
+ false,
+ false,
+ false,
+ false,
+ };
+ return gCoeffReferencesBlendConst[coeff];
+ GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gCoeffReferencesBlendConst));
+ // Individual enum asserts already made in blend_coeff_to_vk_blend
+}
+
+static void setup_color_blend_state(const GrPipeline& pipeline,
+ VkPipelineColorBlendStateCreateInfo* colorBlendInfo,
+ VkPipelineColorBlendAttachmentState* attachmentState) {
+ GrXferProcessor::BlendInfo blendInfo;
+ pipeline.getXferProcessor().getBlendInfo(&blendInfo);
+
+ GrBlendEquation equation = blendInfo.fEquation;
+ GrBlendCoeff srcCoeff = blendInfo.fSrcBlend;
+ GrBlendCoeff dstCoeff = blendInfo.fDstBlend;
+ bool blendOff = (kAdd_GrBlendEquation == equation || kSubtract_GrBlendEquation == equation) &&
+ kOne_GrBlendCoeff == srcCoeff && kZero_GrBlendCoeff == dstCoeff;
+
+ memset(attachmentState, 0, sizeof(VkPipelineColorBlendAttachmentState));
+ attachmentState->blendEnable = !blendOff;
+ if (!blendOff) {
+ attachmentState->srcColorBlendFactor = blend_coeff_to_vk_blend(srcCoeff);
+ attachmentState->dstColorBlendFactor = blend_coeff_to_vk_blend(dstCoeff);
+ attachmentState->colorBlendOp = blend_equation_to_vk_blend_op(equation);
+ attachmentState->srcAlphaBlendFactor = blend_coeff_to_vk_blend(srcCoeff);
+ attachmentState->dstAlphaBlendFactor = blend_coeff_to_vk_blend(dstCoeff);
+ attachmentState->alphaBlendOp = blend_equation_to_vk_blend_op(equation);
+ }
+
+ if (!blendInfo.fWriteColor) {
+ attachmentState->colorWriteMask = 0;
+ } else {
+ attachmentState->colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
+ VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
+ }
+
+ memset(colorBlendInfo, 0, sizeof(VkPipelineColorBlendStateCreateInfo));
+ colorBlendInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
+ colorBlendInfo->pNext = nullptr;
+ colorBlendInfo->flags = 0;
+ colorBlendInfo->logicOpEnable = VK_FALSE;
+ colorBlendInfo->attachmentCount = 1;
+ colorBlendInfo->pAttachments = attachmentState;
+ // colorBlendInfo->blendConstants is set dynamically
+}
+
+static VkCullModeFlags draw_face_to_vk_cull_mode(GrDrawFace drawFace) {
+ // Assumes that we've set the front face to be ccw
+ static const VkCullModeFlags gTable[] = {
+ VK_CULL_MODE_NONE, // kBoth_DrawFace
+ VK_CULL_MODE_BACK_BIT, // kCCW_DrawFace, cull back face
+ VK_CULL_MODE_FRONT_BIT, // kCW_DrawFace, cull front face
+ };
+ GR_STATIC_ASSERT(0 == (int)GrDrawFace::kBoth);
+ GR_STATIC_ASSERT(1 == (int)GrDrawFace::kCCW);
+ GR_STATIC_ASSERT(2 == (int)GrDrawFace::kCW);
+ SkASSERT(-1 < (int)drawFace && (int)drawFace <= 2);
+
+ return gTable[(int)drawFace];
+}
+
+static void setup_raster_state(const GrPipeline& pipeline,
+ VkPipelineRasterizationStateCreateInfo* rasterInfo) {
+ memset(rasterInfo, 0, sizeof(VkPipelineRasterizationStateCreateInfo));
+ rasterInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
+ rasterInfo->pNext = nullptr;
+ rasterInfo->flags = 0;
+ rasterInfo->depthClampEnable = VK_FALSE;
+ rasterInfo->rasterizerDiscardEnable = VK_FALSE;
+ rasterInfo->polygonMode = VK_POLYGON_MODE_FILL;
+ rasterInfo->cullMode = draw_face_to_vk_cull_mode(pipeline.getDrawFace());
+ rasterInfo->frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
+ rasterInfo->depthBiasEnable = VK_FALSE;
+ rasterInfo->depthBiasConstantFactor = 0.0f;
+ rasterInfo->depthBiasClamp = 0.0f;
+ rasterInfo->depthBiasSlopeFactor = 0.0f;
+ rasterInfo->lineWidth = 1.0f;
+}
+
+static void setup_dynamic_state(VkPipelineDynamicStateCreateInfo* dynamicInfo,
+ VkDynamicState* dynamicStates) {
+ memset(dynamicInfo, 0, sizeof(VkPipelineDynamicStateCreateInfo));
+ dynamicInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
+ dynamicInfo->pNext = VK_NULL_HANDLE;
+ dynamicInfo->flags = 0;
+ dynamicStates[0] = VK_DYNAMIC_STATE_VIEWPORT;
+ dynamicStates[1] = VK_DYNAMIC_STATE_SCISSOR;
+ dynamicStates[2] = VK_DYNAMIC_STATE_BLEND_CONSTANTS;
+ dynamicInfo->dynamicStateCount = 3;
+ dynamicInfo->pDynamicStates = dynamicStates;
+}
+
+GrVkPipeline* GrVkPipeline::Create(GrVkGpu* gpu, const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ VkPipelineShaderStageCreateInfo* shaderStageInfo,
+ int shaderStageCount,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass,
+ VkPipelineLayout layout,
+ VkPipelineCache cache) {
+ VkPipelineVertexInputStateCreateInfo vertexInputInfo;
+ VkVertexInputBindingDescription bindingDesc;
+ SkSTArray<16, VkVertexInputAttributeDescription> attributeDesc;
+ SkASSERT(primProc.numAttribs() <= gpu->vkCaps().maxVertexAttributes());
+ VkVertexInputAttributeDescription* pAttribs = attributeDesc.push_back_n(primProc.numAttribs());
+ setup_vertex_input_state(primProc, &vertexInputInfo, &bindingDesc, 1, pAttribs);
+
+ VkPipelineInputAssemblyStateCreateInfo inputAssemblyInfo;
+ setup_input_assembly_state(primitiveType, &inputAssemblyInfo);
+
+ VkPipelineDepthStencilStateCreateInfo depthStencilInfo;
+ setup_depth_stencil_state(pipeline.getStencil(), &depthStencilInfo);
+
+ VkPipelineViewportStateCreateInfo viewportInfo;
+ setup_viewport_scissor_state(&viewportInfo);
+
+ VkPipelineMultisampleStateCreateInfo multisampleInfo;
+ setup_multisample_state(pipeline, primProc, gpu->caps(), &multisampleInfo);
+
+ // We will only have one color attachment per pipeline.
+ VkPipelineColorBlendAttachmentState attachmentStates[1];
+ VkPipelineColorBlendStateCreateInfo colorBlendInfo;
+ setup_color_blend_state(pipeline, &colorBlendInfo, attachmentStates);
+
+ VkPipelineRasterizationStateCreateInfo rasterInfo;
+ setup_raster_state(pipeline, &rasterInfo);
+
+ VkDynamicState dynamicStates[3];
+ VkPipelineDynamicStateCreateInfo dynamicInfo;
+ setup_dynamic_state(&dynamicInfo, dynamicStates);
+
+ VkGraphicsPipelineCreateInfo pipelineCreateInfo;
+ memset(&pipelineCreateInfo, 0, sizeof(VkGraphicsPipelineCreateInfo));
+ pipelineCreateInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
+ pipelineCreateInfo.pNext = nullptr;
+ pipelineCreateInfo.flags = 0;
+ pipelineCreateInfo.stageCount = shaderStageCount;
+ pipelineCreateInfo.pStages = shaderStageInfo;
+ pipelineCreateInfo.pVertexInputState = &vertexInputInfo;
+ pipelineCreateInfo.pInputAssemblyState = &inputAssemblyInfo;
+ pipelineCreateInfo.pTessellationState = nullptr;
+ pipelineCreateInfo.pViewportState = &viewportInfo;
+ pipelineCreateInfo.pRasterizationState = &rasterInfo;
+ pipelineCreateInfo.pMultisampleState = &multisampleInfo;
+ pipelineCreateInfo.pDepthStencilState = &depthStencilInfo;
+ pipelineCreateInfo.pColorBlendState = &colorBlendInfo;
+ pipelineCreateInfo.pDynamicState = &dynamicInfo;
+ pipelineCreateInfo.layout = layout;
+ pipelineCreateInfo.renderPass = renderPass.vkRenderPass();
+ pipelineCreateInfo.subpass = 0;
+ pipelineCreateInfo.basePipelineHandle = VK_NULL_HANDLE;
+ pipelineCreateInfo.basePipelineIndex = -1;
+
+ VkPipeline vkPipeline;
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateGraphicsPipelines(gpu->device(),
+ cache, 1,
+ &pipelineCreateInfo,
+ nullptr, &vkPipeline));
+ if (err) {
+ return nullptr;
+ }
+
+ return new GrVkPipeline(vkPipeline);
+}
+
+void GrVkPipeline::freeGPUData(const GrVkGpu* gpu) const {
+ GR_VK_CALL(gpu->vkInterface(), DestroyPipeline(gpu->device(), fPipeline, nullptr));
+}
+
+static void set_dynamic_scissor_state(GrVkGpu* gpu,
+ GrVkCommandBuffer* cmdBuffer,
+ const GrPipeline& pipeline,
+ const GrRenderTarget& target) {
+ // We always use one scissor and if it is disabled we just make it the size of the RT
+ const GrScissorState& scissorState = pipeline.getScissorState();
+ VkRect2D scissor;
+ if (scissorState.enabled() &&
+ !scissorState.rect().contains(0, 0, target.width(), target.height())) {
+ // This all assumes the scissorState has previously been clipped to the device space render
+ // target.
+ scissor.offset.x = SkTMax(scissorState.rect().fLeft, 0);
+ scissor.extent.width = scissorState.rect().width();
+ if (kTopLeft_GrSurfaceOrigin == target.origin()) {
+ scissor.offset.y = scissorState.rect().fTop;
+ } else {
+ SkASSERT(kBottomLeft_GrSurfaceOrigin == target.origin());
+ scissor.offset.y = target.height() - scissorState.rect().fBottom;
+ }
+ scissor.offset.y = SkTMax(scissor.offset.y, 0);
+ scissor.extent.height = scissorState.rect().height();
+
+ SkASSERT(scissor.offset.x >= 0);
+ SkASSERT(scissor.offset.y >= 0);
+ } else {
+ scissor.extent.width = target.width();
+ scissor.extent.height = target.height();
+ scissor.offset.x = 0;
+ scissor.offset.y = 0;
+ }
+ cmdBuffer->setScissor(gpu, 0, 1, &scissor);
+}
+
+static void set_dynamic_viewport_state(GrVkGpu* gpu,
+ GrVkCommandBuffer* cmdBuffer,
+ const GrRenderTarget& target) {
+ // We always use one viewport the size of the RT
+ VkViewport viewport;
+ viewport.x = 0.0f;
+ viewport.y = 0.0f;
+ viewport.width = SkIntToScalar(target.width());
+ viewport.height = SkIntToScalar(target.height());
+ viewport.minDepth = 0.0f;
+ viewport.maxDepth = 1.0f;
+ cmdBuffer->setViewport(gpu, 0, 1, &viewport);
+}
+
+static void set_dynamic_blend_constant_state(GrVkGpu* gpu,
+ GrVkCommandBuffer* cmdBuffer,
+ const GrPipeline& pipeline) {
+ GrXferProcessor::BlendInfo blendInfo;
+ pipeline.getXferProcessor().getBlendInfo(&blendInfo);
+ GrBlendCoeff srcCoeff = blendInfo.fSrcBlend;
+ GrBlendCoeff dstCoeff = blendInfo.fDstBlend;
+ float floatColors[4];
+ if (blend_coeff_refs_constant(srcCoeff) || blend_coeff_refs_constant(dstCoeff)) {
+ GrColorToRGBAFloat(blendInfo.fBlendConstant, floatColors);
+ } else {
+ memset(floatColors, 0, 4 * sizeof(float));
+ }
+ cmdBuffer->setBlendConstants(gpu, floatColors);
+}
+
+void GrVkPipeline::SetDynamicState(GrVkGpu* gpu,
+ GrVkCommandBuffer* cmdBuffer,
+ const GrPipeline& pipeline) {
+ const GrRenderTarget& target = *pipeline.getRenderTarget();
+ set_dynamic_scissor_state(gpu, cmdBuffer, pipeline, target);
+ set_dynamic_viewport_state(gpu, cmdBuffer, target);
+ set_dynamic_blend_constant_state(gpu, cmdBuffer, pipeline);
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkPipeline.h b/gfx/skia/skia/src/gpu/vk/GrVkPipeline.h
new file mode 100644
index 000000000..256518571
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkPipeline.h
@@ -0,0 +1,57 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkPipeline_DEFINED
+#define GrVkPipeline_DEFINED
+
+#include "GrTypes.h"
+
+#include "GrVkResource.h"
+
+#include "vk/GrVkDefines.h"
+
+class GrNonInstancedVertices;
+class GrPipeline;
+class GrPrimitiveProcessor;
+class GrVkCommandBuffer;
+class GrVkGpu;
+class GrVkRenderPass;
+
+class GrVkPipeline : public GrVkResource {
+public:
+ static GrVkPipeline* Create(GrVkGpu* gpu,
+ const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ VkPipelineShaderStageCreateInfo* shaderStageInfo,
+ int shaderStageCount,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass,
+ VkPipelineLayout layout,
+ VkPipelineCache cache);
+
+ VkPipeline pipeline() const { return fPipeline; }
+
+ static void SetDynamicState(GrVkGpu*, GrVkCommandBuffer*, const GrPipeline&);
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkPipeline: %d (%d refs)\n", fPipeline, this->getRefCnt());
+ }
+#endif
+
+protected:
+ GrVkPipeline(VkPipeline pipeline) : INHERITED(), fPipeline(pipeline) {}
+
+ VkPipeline fPipeline;
+
+private:
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ typedef GrVkResource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkPipelineState.cpp b/gfx/skia/skia/src/gpu/vk/GrVkPipelineState.cpp
new file mode 100644
index 000000000..71ce831d6
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkPipelineState.cpp
@@ -0,0 +1,516 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkPipelineState.h"
+
+#include "GrPipeline.h"
+#include "GrTexturePriv.h"
+#include "GrVkCommandBuffer.h"
+#include "GrVkDescriptorPool.h"
+#include "GrVkDescriptorSet.h"
+#include "GrVkGpu.h"
+#include "GrVkImageView.h"
+#include "GrVkMemory.h"
+#include "GrVkPipeline.h"
+#include "GrVkRenderTarget.h"
+#include "GrVkSampler.h"
+#include "GrVkTexture.h"
+#include "GrVkUniformBuffer.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLGeometryProcessor.h"
+#include "glsl/GrGLSLXferProcessor.h"
+#include "SkMipMap.h"
+
+GrVkPipelineState::GrVkPipelineState(GrVkGpu* gpu,
+ const GrVkPipelineState::Desc& desc,
+ GrVkPipeline* pipeline,
+ VkPipelineLayout layout,
+ const GrVkDescriptorSetManager::Handle& samplerDSHandle,
+ const BuiltinUniformHandles& builtinUniformHandles,
+ const UniformInfoArray& uniforms,
+ uint32_t vertexUniformSize,
+ uint32_t fragmentUniformSize,
+ uint32_t numSamplers,
+ GrGLSLPrimitiveProcessor* geometryProcessor,
+ GrGLSLXferProcessor* xferProcessor,
+ const GrGLSLFragProcs& fragmentProcessors)
+ : fPipeline(pipeline)
+ , fPipelineLayout(layout)
+ , fUniformDescriptorSet(nullptr)
+ , fSamplerDescriptorSet(nullptr)
+ , fSamplerDSHandle(samplerDSHandle)
+ , fStartDS(SK_MaxS32)
+ , fDSCount(0)
+ , fBuiltinUniformHandles(builtinUniformHandles)
+ , fGeometryProcessor(geometryProcessor)
+ , fXferProcessor(xferProcessor)
+ , fFragmentProcessors(fragmentProcessors)
+ , fDesc(desc)
+ , fDataManager(uniforms, vertexUniformSize, fragmentUniformSize) {
+ fSamplers.setReserve(numSamplers);
+ fTextureViews.setReserve(numSamplers);
+ fTextures.setReserve(numSamplers);
+
+ fDescriptorSets[0] = VK_NULL_HANDLE;
+ fDescriptorSets[1] = VK_NULL_HANDLE;
+
+ // Currently we are always binding a descriptor set for uniform buffers.
+ if (vertexUniformSize || fragmentUniformSize) {
+ fDSCount++;
+ fStartDS = GrVkUniformHandler::kUniformBufferDescSet;
+ }
+ if (numSamplers) {
+ fDSCount++;
+ fStartDS = SkTMin(fStartDS, (int)GrVkUniformHandler::kSamplerDescSet);
+ }
+
+ fVertexUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, vertexUniformSize));
+ fFragmentUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, fragmentUniformSize));
+
+ fNumSamplers = numSamplers;
+}
+
+GrVkPipelineState::~GrVkPipelineState() {
+ // Must of freed all GPU resources before this is destroyed
+ SkASSERT(!fPipeline);
+ SkASSERT(!fPipelineLayout);
+ SkASSERT(!fSamplers.count());
+ SkASSERT(!fTextureViews.count());
+ SkASSERT(!fTextures.count());
+ for (int i = 0; i < fFragmentProcessors.count(); ++i) {
+ delete fFragmentProcessors[i];
+ }
+}
+
+void GrVkPipelineState::freeTempResources(const GrVkGpu* gpu) {
+ for (int i = 0; i < fSamplers.count(); ++i) {
+ fSamplers[i]->unref(gpu);
+ }
+ fSamplers.rewind();
+
+ for (int i = 0; i < fTextureViews.count(); ++i) {
+ fTextureViews[i]->unref(gpu);
+ }
+ fTextureViews.rewind();
+
+ for (int i = 0; i < fTextures.count(); ++i) {
+ fTextures[i]->unref(gpu);
+ }
+ fTextures.rewind();
+}
+
+void GrVkPipelineState::freeGPUResources(const GrVkGpu* gpu) {
+ if (fPipeline) {
+ fPipeline->unref(gpu);
+ fPipeline = nullptr;
+ }
+
+ if (fPipelineLayout) {
+ GR_VK_CALL(gpu->vkInterface(), DestroyPipelineLayout(gpu->device(),
+ fPipelineLayout,
+ nullptr));
+ fPipelineLayout = VK_NULL_HANDLE;
+ }
+
+ if (fVertexUniformBuffer) {
+ fVertexUniformBuffer->release(gpu);
+ }
+
+ if (fFragmentUniformBuffer) {
+ fFragmentUniformBuffer->release(gpu);
+ }
+
+ if (fUniformDescriptorSet) {
+ fUniformDescriptorSet->recycle(const_cast<GrVkGpu*>(gpu));
+ fUniformDescriptorSet = nullptr;
+ }
+
+ if (fSamplerDescriptorSet) {
+ fSamplerDescriptorSet->recycle(const_cast<GrVkGpu*>(gpu));
+ fSamplerDescriptorSet = nullptr;
+ }
+
+ this->freeTempResources(gpu);
+}
+
+void GrVkPipelineState::abandonGPUResources() {
+ fPipeline->unrefAndAbandon();
+ fPipeline = nullptr;
+
+ fPipelineLayout = VK_NULL_HANDLE;
+
+ fVertexUniformBuffer->abandon();
+ fFragmentUniformBuffer->abandon();
+
+ for (int i = 0; i < fSamplers.count(); ++i) {
+ fSamplers[i]->unrefAndAbandon();
+ }
+ fSamplers.rewind();
+
+ for (int i = 0; i < fTextureViews.count(); ++i) {
+ fTextureViews[i]->unrefAndAbandon();
+ }
+ fTextureViews.rewind();
+
+ for (int i = 0; i < fTextures.count(); ++i) {
+ fTextures[i]->unrefAndAbandon();
+ }
+ fTextures.rewind();
+
+ if (fUniformDescriptorSet) {
+ fUniformDescriptorSet->unrefAndAbandon();
+ fUniformDescriptorSet = nullptr;
+ }
+
+ if (fSamplerDescriptorSet) {
+ fSamplerDescriptorSet->unrefAndAbandon();
+ fSamplerDescriptorSet = nullptr;
+ }
+}
+
+static void append_texture_bindings(const GrProcessor& processor,
+ SkTArray<const GrTextureAccess*>* textureBindings) {
+ if (int numTextures = processor.numTextures()) {
+ const GrTextureAccess** bindings = textureBindings->push_back_n(numTextures);
+ int i = 0;
+ do {
+ bindings[i] = &processor.textureAccess(i);
+ } while (++i < numTextures);
+ }
+}
+
+void GrVkPipelineState::setData(GrVkGpu* gpu,
+ const GrPrimitiveProcessor& primProc,
+ const GrPipeline& pipeline) {
+ // This is here to protect against someone calling setData multiple times in a row without
+ // freeing the tempData between calls.
+ this->freeTempResources(gpu);
+
+ this->setRenderTargetState(pipeline);
+
+ SkSTArray<8, const GrTextureAccess*> textureBindings;
+
+ fGeometryProcessor->setData(fDataManager, primProc,
+ GrFragmentProcessor::CoordTransformIter(pipeline));
+ append_texture_bindings(primProc, &textureBindings);
+
+ GrFragmentProcessor::Iter iter(pipeline);
+ GrGLSLFragmentProcessor::Iter glslIter(fFragmentProcessors.begin(),
+ fFragmentProcessors.count());
+ const GrFragmentProcessor* fp = iter.next();
+ GrGLSLFragmentProcessor* glslFP = glslIter.next();
+ while (fp && glslFP) {
+ glslFP->setData(fDataManager, *fp);
+ append_texture_bindings(*fp, &textureBindings);
+ fp = iter.next();
+ glslFP = glslIter.next();
+ }
+ SkASSERT(!fp && !glslFP);
+
+ fXferProcessor->setData(fDataManager, pipeline.getXferProcessor());
+ append_texture_bindings(pipeline.getXferProcessor(), &textureBindings);
+
+ // Get new descriptor sets
+ if (fNumSamplers) {
+ if (fSamplerDescriptorSet) {
+ fSamplerDescriptorSet->recycle(gpu);
+ }
+ fSamplerDescriptorSet = gpu->resourceProvider().getSamplerDescriptorSet(fSamplerDSHandle);
+ int samplerDSIdx = GrVkUniformHandler::kSamplerDescSet;
+ fDescriptorSets[samplerDSIdx] = fSamplerDescriptorSet->descriptorSet();
+ this->writeSamplers(gpu, textureBindings, pipeline.getAllowSRGBInputs());
+ }
+
+ if (fVertexUniformBuffer.get() || fFragmentUniformBuffer.get()) {
+ if (fDataManager.uploadUniformBuffers(gpu, fVertexUniformBuffer, fFragmentUniformBuffer) ||
+ !fUniformDescriptorSet) {
+ if (fUniformDescriptorSet) {
+ fUniformDescriptorSet->recycle(gpu);
+ }
+ fUniformDescriptorSet = gpu->resourceProvider().getUniformDescriptorSet();
+ int uniformDSIdx = GrVkUniformHandler::kUniformBufferDescSet;
+ fDescriptorSets[uniformDSIdx] = fUniformDescriptorSet->descriptorSet();
+ this->writeUniformBuffers(gpu);
+ }
+ }
+}
+
+void GrVkPipelineState::writeUniformBuffers(const GrVkGpu* gpu) {
+ VkWriteDescriptorSet descriptorWrites[2];
+ memset(descriptorWrites, 0, 2 * sizeof(VkWriteDescriptorSet));
+
+ uint32_t firstUniformWrite = 0;
+ uint32_t uniformBindingUpdateCount = 0;
+
+ VkDescriptorBufferInfo vertBufferInfo;
+ // Vertex Uniform Buffer
+ if (fVertexUniformBuffer.get()) {
+ ++uniformBindingUpdateCount;
+ memset(&vertBufferInfo, 0, sizeof(VkDescriptorBufferInfo));
+ vertBufferInfo.buffer = fVertexUniformBuffer->buffer();
+ vertBufferInfo.offset = fVertexUniformBuffer->offset();
+ vertBufferInfo.range = fVertexUniformBuffer->size();
+
+ descriptorWrites[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ descriptorWrites[0].pNext = nullptr;
+ descriptorWrites[0].dstSet = fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet];
+ descriptorWrites[0].dstBinding = GrVkUniformHandler::kVertexBinding;
+ descriptorWrites[0].dstArrayElement = 0;
+ descriptorWrites[0].descriptorCount = 1;
+ descriptorWrites[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ descriptorWrites[0].pImageInfo = nullptr;
+ descriptorWrites[0].pBufferInfo = &vertBufferInfo;
+ descriptorWrites[0].pTexelBufferView = nullptr;
+ }
+
+ VkDescriptorBufferInfo fragBufferInfo;
+ // Fragment Uniform Buffer
+ if (fFragmentUniformBuffer.get()) {
+ if (0 == uniformBindingUpdateCount) {
+ firstUniformWrite = 1;
+ }
+ ++uniformBindingUpdateCount;
+ memset(&fragBufferInfo, 0, sizeof(VkDescriptorBufferInfo));
+ fragBufferInfo.buffer = fFragmentUniformBuffer->buffer();
+ fragBufferInfo.offset = fFragmentUniformBuffer->offset();
+ fragBufferInfo.range = fFragmentUniformBuffer->size();
+
+ descriptorWrites[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ descriptorWrites[1].pNext = nullptr;
+ descriptorWrites[1].dstSet = fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet];
+ descriptorWrites[1].dstBinding = GrVkUniformHandler::kFragBinding;;
+ descriptorWrites[1].dstArrayElement = 0;
+ descriptorWrites[1].descriptorCount = 1;
+ descriptorWrites[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ descriptorWrites[1].pImageInfo = nullptr;
+ descriptorWrites[1].pBufferInfo = &fragBufferInfo;
+ descriptorWrites[1].pTexelBufferView = nullptr;
+ }
+
+ if (uniformBindingUpdateCount) {
+ GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(),
+ uniformBindingUpdateCount,
+ &descriptorWrites[firstUniformWrite],
+ 0, nullptr));
+ }
+}
+
+void GrVkPipelineState::writeSamplers(GrVkGpu* gpu,
+ const SkTArray<const GrTextureAccess*>& textureBindings,
+ bool allowSRGBInputs) {
+ SkASSERT(fNumSamplers == textureBindings.count());
+
+ for (int i = 0; i < textureBindings.count(); ++i) {
+ const GrTextureParams& params = textureBindings[i]->getParams();
+
+ GrVkTexture* texture = static_cast<GrVkTexture*>(textureBindings[i]->getTexture());
+
+ fSamplers.push(gpu->resourceProvider().findOrCreateCompatibleSampler(params,
+ texture->texturePriv().maxMipMapLevel()));
+
+ const GrVkResource* textureResource = texture->resource();
+ textureResource->ref();
+ fTextures.push(textureResource);
+
+ const GrVkImageView* textureView = texture->textureView(allowSRGBInputs);
+ textureView->ref();
+ fTextureViews.push(textureView);
+
+ VkDescriptorImageInfo imageInfo;
+ memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));
+ imageInfo.sampler = fSamplers[i]->sampler();
+ imageInfo.imageView = textureView->imageView();
+ imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+ VkWriteDescriptorSet writeInfo;
+ memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
+ writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ writeInfo.pNext = nullptr;
+ writeInfo.dstSet = fDescriptorSets[GrVkUniformHandler::kSamplerDescSet];
+ writeInfo.dstBinding = i;
+ writeInfo.dstArrayElement = 0;
+ writeInfo.descriptorCount = 1;
+ writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ writeInfo.pImageInfo = &imageInfo;
+ writeInfo.pBufferInfo = nullptr;
+ writeInfo.pTexelBufferView = nullptr;
+
+ GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(),
+ 1,
+ &writeInfo,
+ 0,
+ nullptr));
+ }
+}
+
+void GrVkPipelineState::setRenderTargetState(const GrPipeline& pipeline) {
+ // Load the RT height uniform if it is needed to y-flip gl_FragCoord.
+ if (fBuiltinUniformHandles.fRTHeightUni.isValid() &&
+ fRenderTargetState.fRenderTargetSize.fHeight != pipeline.getRenderTarget()->height()) {
+ fDataManager.set1f(fBuiltinUniformHandles.fRTHeightUni,
+ SkIntToScalar(pipeline.getRenderTarget()->height()));
+ }
+
+ // set RT adjustment
+ const GrRenderTarget* rt = pipeline.getRenderTarget();
+ SkISize size;
+ size.set(rt->width(), rt->height());
+ SkASSERT(fBuiltinUniformHandles.fRTAdjustmentUni.isValid());
+ if (fRenderTargetState.fRenderTargetOrigin != rt->origin() ||
+ fRenderTargetState.fRenderTargetSize != size) {
+ fRenderTargetState.fRenderTargetSize = size;
+ fRenderTargetState.fRenderTargetOrigin = rt->origin();
+
+ float rtAdjustmentVec[4];
+ fRenderTargetState.getRTAdjustmentVec(rtAdjustmentVec);
+ fDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, rtAdjustmentVec);
+ }
+}
+
+void GrVkPipelineState::bind(const GrVkGpu* gpu, GrVkCommandBuffer* commandBuffer) {
+ commandBuffer->bindPipeline(gpu, fPipeline);
+
+ if (fDSCount) {
+ commandBuffer->bindDescriptorSets(gpu, this, fPipelineLayout, fStartDS, fDSCount,
+ &fDescriptorSets[fStartDS], 0, nullptr);
+ }
+}
+
+void GrVkPipelineState::addUniformResources(GrVkCommandBuffer& commandBuffer) {
+ if (fUniformDescriptorSet) {
+ commandBuffer.addRecycledResource(fUniformDescriptorSet);
+ }
+ if (fSamplerDescriptorSet) {
+ commandBuffer.addRecycledResource(fSamplerDescriptorSet);
+ }
+
+ if (fVertexUniformBuffer.get()) {
+ commandBuffer.addRecycledResource(fVertexUniformBuffer->resource());
+ }
+ if (fFragmentUniformBuffer.get()) {
+ commandBuffer.addRecycledResource(fFragmentUniformBuffer->resource());
+ }
+
+ for (int i = 0; i < fSamplers.count(); ++i) {
+ commandBuffer.addResource(fSamplers[i]);
+ }
+
+ for (int i = 0; i < fTextureViews.count(); ++i) {
+ commandBuffer.addResource(fTextureViews[i]);
+ }
+
+ for (int i = 0; i < fTextures.count(); ++i) {
+ commandBuffer.addResource(fTextures[i]);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrVkPipelineState::DescriptorPoolManager::getNewPool(GrVkGpu* gpu) {
+ if (fPool) {
+ fPool->unref(gpu);
+ uint32_t newPoolSize = fMaxDescriptors + ((fMaxDescriptors + 1) >> 1);
+ if (newPoolSize < kMaxDescLimit) {
+ fMaxDescriptors = newPoolSize;
+ } else {
+ fMaxDescriptors = kMaxDescLimit;
+ }
+
+ }
+ if (fMaxDescriptors) {
+ fPool = gpu->resourceProvider().findOrCreateCompatibleDescriptorPool(fDescType,
+ fMaxDescriptors);
+ }
+ SkASSERT(fPool || !fMaxDescriptors);
+}
+
+void GrVkPipelineState::DescriptorPoolManager::getNewDescriptorSet(GrVkGpu* gpu,
+ VkDescriptorSet* ds) {
+ if (!fMaxDescriptors) {
+ return;
+ }
+ fCurrentDescriptorCount += fDescCountPerSet;
+ if (fCurrentDescriptorCount > fMaxDescriptors) {
+ this->getNewPool(gpu);
+ fCurrentDescriptorCount = fDescCountPerSet;
+ }
+
+ VkDescriptorSetAllocateInfo dsAllocateInfo;
+ memset(&dsAllocateInfo, 0, sizeof(VkDescriptorSetAllocateInfo));
+ dsAllocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
+ dsAllocateInfo.pNext = nullptr;
+ dsAllocateInfo.descriptorPool = fPool->descPool();
+ dsAllocateInfo.descriptorSetCount = 1;
+ dsAllocateInfo.pSetLayouts = &fDescLayout;
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), AllocateDescriptorSets(gpu->device(),
+ &dsAllocateInfo,
+ ds));
+}
+
+void GrVkPipelineState::DescriptorPoolManager::freeGPUResources(const GrVkGpu* gpu) {
+ if (fDescLayout) {
+ GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorSetLayout(gpu->device(), fDescLayout,
+ nullptr));
+ fDescLayout = VK_NULL_HANDLE;
+ }
+
+ if (fPool) {
+ fPool->unref(gpu);
+ fPool = nullptr;
+ }
+}
+
+void GrVkPipelineState::DescriptorPoolManager::abandonGPUResources() {
+ fDescLayout = VK_NULL_HANDLE;
+ if (fPool) {
+ fPool->unrefAndAbandon();
+ fPool = nullptr;
+ }
+}
+
+uint32_t get_blend_info_key(const GrPipeline& pipeline) {
+ GrXferProcessor::BlendInfo blendInfo;
+ pipeline.getXferProcessor().getBlendInfo(&blendInfo);
+
+ static const uint32_t kBlendWriteShift = 1;
+ static const uint32_t kBlendCoeffShift = 5;
+ GR_STATIC_ASSERT(kLast_GrBlendCoeff < (1 << kBlendCoeffShift));
+ GR_STATIC_ASSERT(kFirstAdvancedGrBlendEquation - 1 < 4);
+
+ uint32_t key = blendInfo.fWriteColor;
+ key |= (blendInfo.fSrcBlend << kBlendWriteShift);
+ key |= (blendInfo.fDstBlend << (kBlendWriteShift + kBlendCoeffShift));
+ key |= (blendInfo.fEquation << (kBlendWriteShift + 2 * kBlendCoeffShift));
+
+ return key;
+}
+
+bool GrVkPipelineState::Desc::Build(Desc* desc,
+ const GrPrimitiveProcessor& primProc,
+ const GrPipeline& pipeline,
+ GrPrimitiveType primitiveType,
+ const GrGLSLCaps& caps) {
+ if (!INHERITED::Build(desc, primProc, primitiveType == kPoints_GrPrimitiveType, pipeline,
+ caps)) {
+ return false;
+ }
+
+ GrProcessorKeyBuilder b(&desc->key());
+ GrVkRenderTarget* vkRT = (GrVkRenderTarget*)pipeline.getRenderTarget();
+ vkRT->simpleRenderPass()->genKey(&b);
+
+ pipeline.getStencil().genKey(&b);
+
+ SkASSERT(sizeof(GrDrawFace) <= sizeof(uint32_t));
+ b.add32((int32_t)pipeline.getDrawFace());
+
+ b.add32(get_blend_info_key(pipeline));
+
+ b.add32(primitiveType);
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkPipelineState.h b/gfx/skia/skia/src/gpu/vk/GrVkPipelineState.h
new file mode 100644
index 000000000..d15fef487
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkPipelineState.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrVkPipelineState_DEFINED
+#define GrVkPipelineState_DEFINED
+
+#include "GrProgramDesc.h"
+#include "GrStencilSettings.h"
+#include "GrVkDescriptorSetManager.h"
+#include "GrVkImage.h"
+#include "GrVkPipelineStateDataManager.h"
+#include "glsl/GrGLSLProgramBuilder.h"
+
+#include "vk/GrVkDefines.h"
+
+class GrPipeline;
+class GrVkCommandBuffer;
+class GrVkDescriptorPool;
+class GrVkDescriptorSet;
+class GrVkGpu;
+class GrVkImageView;
+class GrVkPipeline;
+class GrVkSampler;
+class GrVkUniformBuffer;
+
+/**
+ * This class holds onto a GrVkPipeline object that we use for draws. Besides storing the acutal
+ * GrVkPipeline object, this class is also responsible handling all uniforms, descriptors, samplers,
+ * and other similar objects that are used along with the VkPipeline in the draw. This includes both
+ * allocating and freeing these objects, as well as updating their values.
+ */
+class GrVkPipelineState : public SkRefCnt {
+public:
+ typedef GrGLSLProgramBuilder::BuiltinUniformHandles BuiltinUniformHandles;
+
+ ~GrVkPipelineState();
+
+ GrVkPipeline* vkPipeline() const { return fPipeline; }
+
+ void setData(GrVkGpu*, const GrPrimitiveProcessor&, const GrPipeline&);
+
+ void bind(const GrVkGpu* gpu, GrVkCommandBuffer* commandBuffer);
+
+ void addUniformResources(GrVkCommandBuffer&);
+
+ void freeGPUResources(const GrVkGpu* gpu);
+
+ // This releases resources that only a given instance of a GrVkPipelineState needs to hold onto
+ // and don't need to survive across new uses of the GrVkPipelineState.
+ void freeTempResources(const GrVkGpu* gpu);
+
+ void abandonGPUResources();
+
+ /**
+ * For Vulkan we want to cache the entire VkPipeline for reuse of draws. The Desc here holds all
+ * the information needed to differentiate one pipeline from another.
+ *
+ * The GrProgramDesc contains all the information need to create the actual shaders for the
+ * pipeline.
+ *
+ * For Vulkan we need to add to the GrProgramDesc to include the rest of the state on the
+ * pipline. This includes stencil settings, blending information, render pass format, draw face
+ * information, and primitive type. Note that some state is set dynamically on the pipeline for
+ * each draw and thus is not included in this descriptor. This includes the viewport, scissor,
+ * and blend constant.
+ */
+ class Desc : public GrProgramDesc {
+ public:
+ static bool Build(Desc*,
+ const GrPrimitiveProcessor&,
+ const GrPipeline&,
+ GrPrimitiveType primitiveType,
+ const GrGLSLCaps&);
+ private:
+ typedef GrProgramDesc INHERITED;
+ };
+
+ const Desc& getDesc() { return fDesc; }
+
+private:
+ typedef GrVkPipelineStateDataManager::UniformInfoArray UniformInfoArray;
+ typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;
+
+ GrVkPipelineState(GrVkGpu* gpu,
+ const GrVkPipelineState::Desc&,
+ GrVkPipeline* pipeline,
+ VkPipelineLayout layout,
+ const GrVkDescriptorSetManager::Handle& samplerDSHandle,
+ const BuiltinUniformHandles& builtinUniformHandles,
+ const UniformInfoArray& uniforms,
+ uint32_t vertexUniformSize,
+ uint32_t fragmentUniformSize,
+ uint32_t numSamplers,
+ GrGLSLPrimitiveProcessor* geometryProcessor,
+ GrGLSLXferProcessor* xferProcessor,
+ const GrGLSLFragProcs& fragmentProcessors);
+
+ // Each pool will manage one type of descriptor. Thus each descriptor set we use will all be of
+ // one VkDescriptorType.
+ struct DescriptorPoolManager {
+ DescriptorPoolManager(VkDescriptorSetLayout layout, VkDescriptorType type,
+ uint32_t descCount, GrVkGpu* gpu)
+ : fDescLayout(layout)
+ , fDescType(type)
+ , fDescCountPerSet(descCount)
+ , fCurrentDescriptorCount(0)
+ , fPool(nullptr) {
+ SkASSERT(descCount < kMaxDescLimit >> 2);
+ fMaxDescriptors = fDescCountPerSet << 2;
+ this->getNewPool(gpu);
+ }
+
+ ~DescriptorPoolManager() {
+ SkASSERT(!fDescLayout);
+ SkASSERT(!fPool);
+ }
+
+ void getNewDescriptorSet(GrVkGpu* gpu, VkDescriptorSet* ds);
+
+ void freeGPUResources(const GrVkGpu* gpu);
+ void abandonGPUResources();
+
+ VkDescriptorSetLayout fDescLayout;
+ VkDescriptorType fDescType;
+ uint32_t fDescCountPerSet;
+ uint32_t fMaxDescriptors;
+ uint32_t fCurrentDescriptorCount;
+ GrVkDescriptorPool* fPool;
+
+ private:
+ static const uint32_t kMaxDescLimit = 1 << 10;
+
+ void getNewPool(GrVkGpu* gpu);
+ };
+
+ void writeUniformBuffers(const GrVkGpu* gpu);
+
+ void writeSamplers(GrVkGpu* gpu, const SkTArray<const GrTextureAccess*>& textureBindings,
+ bool allowSRGBInputs);
+
+ /**
+ * We use the RT's size and origin to adjust from Skia device space to vulkan normalized device
+ * space and to make device space positions have the correct origin for processors that require
+ * them.
+ */
+ struct RenderTargetState {
+ SkISize fRenderTargetSize;
+ GrSurfaceOrigin fRenderTargetOrigin;
+
+ RenderTargetState() { this->invalidate(); }
+ void invalidate() {
+ fRenderTargetSize.fWidth = -1;
+ fRenderTargetSize.fHeight = -1;
+ fRenderTargetOrigin = (GrSurfaceOrigin)-1;
+ }
+
+ /**
+ * Gets a vec4 that adjusts the position from Skia device coords to Vulkans normalized device
+ * coords. Assuming the transformed position, pos, is a homogeneous vec3, the vec, v, is
+ * applied as such:
+ * pos.x = dot(v.xy, pos.xz)
+ * pos.y = dot(v.zw, pos.yz)
+ */
+ void getRTAdjustmentVec(float* destVec) {
+ destVec[0] = 2.f / fRenderTargetSize.fWidth;
+ destVec[1] = -1.f;
+ if (kBottomLeft_GrSurfaceOrigin == fRenderTargetOrigin) {
+ destVec[2] = -2.f / fRenderTargetSize.fHeight;
+ destVec[3] = 1.f;
+ } else {
+ destVec[2] = 2.f / fRenderTargetSize.fHeight;
+ destVec[3] = -1.f;
+ }
+ }
+ };
+
+ // Helper for setData() that sets the view matrix and loads the render target height uniform
+ void setRenderTargetState(const GrPipeline&);
+
+ // GrVkResources
+ GrVkPipeline* fPipeline;
+
+ // Used for binding DescriptorSets to the command buffer but does not need to survive during
+ // command buffer execution. Thus this is not need to be a GrVkResource.
+ VkPipelineLayout fPipelineLayout;
+
+ // The DescriptorSets need to survive until the gpu has finished all draws that use them.
+ // However, they will only be freed by the descriptor pool. Thus by simply keeping the
+ // descriptor pool alive through the draw, the descritor sets will also stay alive. Thus we do
+ // not need a GrVkResource versions of VkDescriptorSet. We hold on to these in the
+ // GrVkPipelineState since we update the descriptor sets and bind them at separate times;
+ VkDescriptorSet fDescriptorSets[2];
+
+ // Once we move samplers over to use the resource provider for descriptor sets we will not need
+ // the above array and instead just use GrVkDescriptorSet like the uniform one here.
+ const GrVkDescriptorSet* fUniformDescriptorSet;
+ const GrVkDescriptorSet* fSamplerDescriptorSet;
+
+ const GrVkDescriptorSetManager::Handle fSamplerDSHandle;
+
+ // Meta data so we know which descriptor sets we are using and need to bind.
+ int fStartDS;
+ int fDSCount;
+
+ SkAutoTDelete<GrVkUniformBuffer> fVertexUniformBuffer;
+ SkAutoTDelete<GrVkUniformBuffer> fFragmentUniformBuffer;
+
+ // GrVkResources used for sampling textures
+ SkTDArray<GrVkSampler*> fSamplers;
+ SkTDArray<const GrVkImageView*> fTextureViews;
+ SkTDArray<const GrVkResource*> fTextures;
+
+ // Tracks the current render target uniforms stored in the vertex buffer.
+ RenderTargetState fRenderTargetState;
+ BuiltinUniformHandles fBuiltinUniformHandles;
+
+ // Processors in the GrVkPipelineState
+ SkAutoTDelete<GrGLSLPrimitiveProcessor> fGeometryProcessor;
+ SkAutoTDelete<GrGLSLXferProcessor> fXferProcessor;
+ GrGLSLFragProcs fFragmentProcessors;
+
+ Desc fDesc;
+
+ GrVkPipelineStateDataManager fDataManager;
+
+ int fNumSamplers;
+
+ friend class GrVkPipelineStateBuilder;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateBuilder.cpp b/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateBuilder.cpp
new file mode 100644
index 000000000..69ae4a498
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateBuilder.cpp
@@ -0,0 +1,169 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "vk/GrVkPipelineStateBuilder.h"
+
+#include "vk/GrVkDescriptorSetManager.h"
+#include "vk/GrVkGpu.h"
+#include "vk/GrVkRenderPass.h"
+
+GrVkPipelineState* GrVkPipelineStateBuilder::CreatePipelineState(
+ GrVkGpu* gpu,
+ const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ GrPrimitiveType primitiveType,
+ const GrVkPipelineState::Desc& desc,
+ const GrVkRenderPass& renderPass) {
+ // create a builder. This will be handed off to effects so they can use it to add
+ // uniforms, varyings, textures, etc
+ GrVkPipelineStateBuilder builder(gpu, pipeline, primProc, desc);
+
+ GrGLSLExpr4 inputColor;
+ GrGLSLExpr4 inputCoverage;
+
+ if (!builder.emitAndInstallProcs(&inputColor, &inputCoverage)) {
+ builder.cleanupFragmentProcessors();
+ return nullptr;
+ }
+
+ return builder.finalize(primitiveType, renderPass, desc);
+}
+
+GrVkPipelineStateBuilder::GrVkPipelineStateBuilder(GrVkGpu* gpu,
+ const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ const GrProgramDesc& desc)
+ : INHERITED(pipeline, primProc, desc)
+ , fGpu(gpu)
+ , fVaryingHandler(this)
+ , fUniformHandler(this) {
+}
+
+const GrCaps* GrVkPipelineStateBuilder::caps() const {
+ return fGpu->caps();
+}
+const GrGLSLCaps* GrVkPipelineStateBuilder::glslCaps() const {
+ return fGpu->vkCaps().glslCaps();
+}
+
+void GrVkPipelineStateBuilder::finalizeFragmentOutputColor(GrGLSLShaderVar& outputColor) {
+ outputColor.setLayoutQualifier("location = 0, index = 0");
+}
+
+void GrVkPipelineStateBuilder::finalizeFragmentSecondaryColor(GrGLSLShaderVar& outputColor) {
+ outputColor.setLayoutQualifier("location = 0, index = 1");
+}
+
+bool GrVkPipelineStateBuilder::CreateVkShaderModule(const GrVkGpu* gpu,
+ VkShaderStageFlagBits stage,
+ const GrGLSLShaderBuilder& builder,
+ VkShaderModule* shaderModule,
+ VkPipelineShaderStageCreateInfo* stageInfo) {
+ SkString shaderString;
+ for (int i = 0; i < builder.fCompilerStrings.count(); ++i) {
+ if (builder.fCompilerStrings[i]) {
+ shaderString.append(builder.fCompilerStrings[i]);
+ shaderString.append("\n");
+ }
+ }
+ return GrCompileVkShaderModule(gpu, shaderString.c_str(), stage, shaderModule, stageInfo);
+}
+
+GrVkPipelineState* GrVkPipelineStateBuilder::finalize(GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass,
+ const GrVkPipelineState::Desc& desc) {
+ VkDescriptorSetLayout dsLayout[2];
+ VkPipelineLayout pipelineLayout;
+ VkShaderModule vertShaderModule;
+ VkShaderModule fragShaderModule;
+
+ GrVkResourceProvider& resourceProvider = fGpu->resourceProvider();
+ // This layout is not owned by the PipelineStateBuilder and thus should no be destroyed
+ dsLayout[GrVkUniformHandler::kUniformBufferDescSet] = resourceProvider.getUniformDSLayout();
+
+ GrVkDescriptorSetManager::Handle samplerDSHandle;
+ resourceProvider.getSamplerDescriptorSetHandle(fUniformHandler, &samplerDSHandle);
+ dsLayout[GrVkUniformHandler::kSamplerDescSet] =
+ resourceProvider.getSamplerDSLayout(samplerDSHandle);
+
+ // Create the VkPipelineLayout
+ VkPipelineLayoutCreateInfo layoutCreateInfo;
+ memset(&layoutCreateInfo, 0, sizeof(VkPipelineLayoutCreateFlags));
+ layoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
+ layoutCreateInfo.pNext = 0;
+ layoutCreateInfo.flags = 0;
+ layoutCreateInfo.setLayoutCount = 2;
+ layoutCreateInfo.pSetLayouts = dsLayout;
+ layoutCreateInfo.pushConstantRangeCount = 0;
+ layoutCreateInfo.pPushConstantRanges = nullptr;
+
+ GR_VK_CALL_ERRCHECK(fGpu->vkInterface(), CreatePipelineLayout(fGpu->device(),
+ &layoutCreateInfo,
+ nullptr,
+ &pipelineLayout));
+
+ // We need to enable the following extensions so that the compiler can correctly make spir-v
+ // from our glsl shaders.
+ fVS.extensions().appendf("#extension GL_ARB_separate_shader_objects : enable\n");
+ fFS.extensions().appendf("#extension GL_ARB_separate_shader_objects : enable\n");
+ fVS.extensions().appendf("#extension GL_ARB_shading_language_420pack : enable\n");
+ fFS.extensions().appendf("#extension GL_ARB_shading_language_420pack : enable\n");
+
+ this->finalizeShaders();
+
+ VkPipelineShaderStageCreateInfo shaderStageInfo[2];
+ SkAssertResult(CreateVkShaderModule(fGpu,
+ VK_SHADER_STAGE_VERTEX_BIT,
+ fVS,
+ &vertShaderModule,
+ &shaderStageInfo[0]));
+
+ SkAssertResult(CreateVkShaderModule(fGpu,
+ VK_SHADER_STAGE_FRAGMENT_BIT,
+ fFS,
+ &fragShaderModule,
+ &shaderStageInfo[1]));
+
+ GrVkPipeline* pipeline = resourceProvider.createPipeline(fPipeline,
+ fPrimProc,
+ shaderStageInfo,
+ 2,
+ primitiveType,
+ renderPass,
+ pipelineLayout);
+ GR_VK_CALL(fGpu->vkInterface(), DestroyShaderModule(fGpu->device(), vertShaderModule,
+ nullptr));
+ GR_VK_CALL(fGpu->vkInterface(), DestroyShaderModule(fGpu->device(), fragShaderModule,
+ nullptr));
+
+ if (!pipeline) {
+ GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineLayout(fGpu->device(), pipelineLayout,
+ nullptr));
+ GR_VK_CALL(fGpu->vkInterface(),
+ DestroyDescriptorSetLayout(fGpu->device(),
+ dsLayout[GrVkUniformHandler::kSamplerDescSet],
+ nullptr));
+
+ this->cleanupFragmentProcessors();
+ return nullptr;
+ }
+
+ return new GrVkPipelineState(fGpu,
+ desc,
+ pipeline,
+ pipelineLayout,
+ samplerDSHandle,
+ fUniformHandles,
+ fUniformHandler.fUniforms,
+ fUniformHandler.fCurrentVertexUBOOffset,
+ fUniformHandler.fCurrentFragmentUBOOffset,
+ (uint32_t)fUniformHandler.numSamplers(),
+ fGeometryProcessor,
+ fXferProcessor,
+ fFragmentProcessors);
+}
+
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateBuilder.h b/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateBuilder.h
new file mode 100644
index 000000000..c887e36d4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateBuilder.h
@@ -0,0 +1,74 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkPipelineStateBuilder_DEFINED
+#define GrVkPipelineStateBuilder_DEFINED
+
+#include "glsl/GrGLSLProgramBuilder.h"
+
+#include "GrPipeline.h"
+#include "GrVkPipelineState.h"
+#include "GrVkUniformHandler.h"
+#include "GrVkVaryingHandler.h"
+
+#include "vk/GrVkDefines.h"
+
+class GrProgramDesc;
+class GrVkGpu;
+class GrVkRenderPass;
+
+class GrVkPipelineStateBuilder : public GrGLSLProgramBuilder {
+public:
+ /** Generates a pipeline state.
+ *
+ * The GrVkPipelineState implements what is specified in the GrPipeline and GrPrimitiveProcessor
+ * as input. After successful generation, the builder result objects are available to be used.
+ * @return true if generation was successful.
+ */
+ static GrVkPipelineState* CreatePipelineState(GrVkGpu*,
+ const GrPipeline&,
+ const GrPrimitiveProcessor&,
+ GrPrimitiveType,
+ const GrVkPipelineState::Desc&,
+ const GrVkRenderPass& renderPass);
+
+ const GrCaps* caps() const override;
+ const GrGLSLCaps* glslCaps() const override;
+
+ GrVkGpu* gpu() const { return fGpu; }
+
+ void finalizeFragmentOutputColor(GrGLSLShaderVar& outputColor) override;
+ void finalizeFragmentSecondaryColor(GrGLSLShaderVar& outputColor) override;
+
+private:
+ GrVkPipelineStateBuilder(GrVkGpu*,
+ const GrPipeline&,
+ const GrPrimitiveProcessor&,
+ const GrProgramDesc&);
+
+ GrVkPipelineState* finalize(GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass,
+ const GrVkPipelineState::Desc&);
+
+ static bool CreateVkShaderModule(const GrVkGpu* gpu,
+ VkShaderStageFlagBits stage,
+ const GrGLSLShaderBuilder& builder,
+ VkShaderModule* shaderModule,
+ VkPipelineShaderStageCreateInfo* stageInfo);
+
+ GrGLSLUniformHandler* uniformHandler() override { return &fUniformHandler; }
+ const GrGLSLUniformHandler* uniformHandler() const override { return &fUniformHandler; }
+ GrGLSLVaryingHandler* varyingHandler() override { return &fVaryingHandler; }
+
+ GrVkGpu* fGpu;
+ GrVkVaryingHandler fVaryingHandler;
+ GrVkUniformHandler fUniformHandler;
+
+ typedef GrGLSLProgramBuilder INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateCache.cpp b/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateCache.cpp
new file mode 100644
index 000000000..2e6a85bb2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateCache.cpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkResourceProvider.h"
+
+#include "GrVkGpu.h"
+#include "GrProcessor.h"
+#include "GrVkPipelineState.h"
+#include "GrVkPipelineStateBuilder.h"
+#include "SkOpts.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLProgramDataManager.h"
+
+#ifdef GR_PIPELINE_STATE_CACHE_STATS
+// Display pipeline state cache usage
+static const bool c_DisplayVkPipelineCache{false};
+#endif
+
+struct GrVkResourceProvider::PipelineStateCache::Entry {
+
+ Entry() : fPipelineState(nullptr) {}
+
+ static const GrVkPipelineState::Desc& GetKey(const Entry* entry) {
+ return entry->fPipelineState->getDesc();
+ }
+
+ static uint32_t Hash(const GrVkPipelineState::Desc& key) {
+ return key.getChecksum();
+ }
+
+ sk_sp<GrVkPipelineState> fPipelineState;
+
+private:
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(Entry);
+};
+
+GrVkResourceProvider::PipelineStateCache::PipelineStateCache(GrVkGpu* gpu)
+ : fCount(0)
+ , fGpu(gpu)
+#ifdef GR_PIPELINE_STATE_CACHE_STATS
+ , fTotalRequests(0)
+ , fCacheMisses(0)
+#endif
+{}
+
+GrVkResourceProvider::PipelineStateCache::~PipelineStateCache() {
+ SkASSERT(0 == fCount);
+ // dump stats
+#ifdef GR_PIPELINE_STATE_CACHE_STATS
+ if (c_DisplayVkPipelineCache) {
+ SkDebugf("--- Pipeline State Cache ---\n");
+ SkDebugf("Total requests: %d\n", fTotalRequests);
+ SkDebugf("Cache misses: %d\n", fCacheMisses);
+ SkDebugf("Cache miss %%: %f\n", (fTotalRequests > 0) ?
+ 100.f * fCacheMisses / fTotalRequests :
+ 0.f);
+ SkDebugf("---------------------\n");
+ }
+#endif
+}
+
+void GrVkResourceProvider::PipelineStateCache::reset() {
+ fHashTable.foreach([](Entry** entry) {
+ delete *entry;
+ });
+ fHashTable.reset();
+ fCount = 0;
+}
+
+void GrVkResourceProvider::PipelineStateCache::abandon() {
+ fHashTable.foreach([](Entry** entry) {
+ SkASSERT((*entry)->fPipelineState.get());
+ (*entry)->fPipelineState->abandonGPUResources();
+ });
+
+ this->reset();
+}
+
+void GrVkResourceProvider::PipelineStateCache::release() {
+ fHashTable.foreach([this](Entry** entry) {
+ SkASSERT((*entry)->fPipelineState.get());
+ (*entry)->fPipelineState->freeGPUResources(fGpu);
+ });
+
+ this->reset();
+}
+
+sk_sp<GrVkPipelineState> GrVkResourceProvider::PipelineStateCache::refPipelineState(
+ const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass) {
+#ifdef GR_PIPELINE_STATE_CACHE_STATS
+ ++fTotalRequests;
+#endif
+ // Get GrVkProgramDesc
+ GrVkPipelineState::Desc desc;
+ if (!GrVkPipelineState::Desc::Build(&desc, primProc, pipeline, primitiveType,
+ *fGpu->vkCaps().glslCaps())) {
+ GrCapsDebugf(fGpu->caps(), "Failed to build vk program descriptor!\n");
+ return nullptr;
+ }
+ desc.finalize();
+
+ Entry* entry = nullptr;
+ if (Entry** entryptr = fHashTable.find(desc)) {
+ SkASSERT(*entryptr);
+ entry = *entryptr;
+ }
+ if (!entry) {
+#ifdef GR_PIPELINE_STATE_CACHE_STATS
+ ++fCacheMisses;
+#endif
+ sk_sp<GrVkPipelineState> pipelineState(
+ GrVkPipelineStateBuilder::CreatePipelineState(fGpu,
+ pipeline,
+ primProc,
+ primitiveType,
+ desc,
+ renderPass));
+ if (nullptr == pipelineState) {
+ return nullptr;
+ }
+ if (fCount < kMaxEntries) {
+ entry = new Entry;
+ fCount++;
+ } else {
+ SkASSERT(fCount == kMaxEntries);
+ entry = fLRUList.head();
+ fLRUList.remove(entry);
+ entry->fPipelineState->freeGPUResources(fGpu);
+ fHashTable.remove(entry->fPipelineState->getDesc());
+ }
+ entry->fPipelineState = std::move(pipelineState);
+ fHashTable.set(entry);
+ fLRUList.addToTail(entry);
+ return entry->fPipelineState;
+ } else {
+ fLRUList.remove(entry);
+ fLRUList.addToTail(entry);
+ }
+ return entry->fPipelineState;
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateDataManager.cpp b/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateDataManager.cpp
new file mode 100644
index 000000000..ef75bd3b9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateDataManager.cpp
@@ -0,0 +1,286 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkPipelineStateDataManager.h"
+
+#include "GrVkGpu.h"
+#include "GrVkUniformBuffer.h"
+
+GrVkPipelineStateDataManager::GrVkPipelineStateDataManager(const UniformInfoArray& uniforms,
+ uint32_t vertexUniformSize,
+ uint32_t fragmentUniformSize)
+ : fVertexUniformSize(vertexUniformSize)
+ , fFragmentUniformSize(fragmentUniformSize)
+ , fVertexUniformsDirty(false)
+ , fFragmentUniformsDirty(false) {
+ fVertexUniformData.reset(vertexUniformSize);
+ fFragmentUniformData.reset(fragmentUniformSize);
+ int count = uniforms.count();
+ fUniforms.push_back_n(count);
+ // We must add uniforms in same order is the UniformInfoArray so that UniformHandles already
+ // owned by other objects will still match up here.
+ for (int i = 0; i < count; i++) {
+ Uniform& uniform = fUniforms[i];
+ const GrVkUniformHandler::UniformInfo uniformInfo = uniforms[i];
+ SkASSERT(GrGLSLShaderVar::kNonArray == uniformInfo.fVariable.getArrayCount() ||
+ uniformInfo.fVariable.getArrayCount() > 0);
+ SkDEBUGCODE(
+ uniform.fArrayCount = uniformInfo.fVariable.getArrayCount();
+ uniform.fType = uniformInfo.fVariable.getType();
+ );
+ uniform.fBinding =
+ (kVertex_GrShaderFlag == uniformInfo.fVisibility) ? GrVkUniformHandler::kVertexBinding
+ : GrVkUniformHandler::kFragBinding;
+ uniform.fOffset = uniformInfo.fUBOffset;
+ }
+}
+
+void* GrVkPipelineStateDataManager::getBufferPtrAndMarkDirty(const Uniform& uni) const {
+ void* buffer;
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {
+ buffer = fVertexUniformData.get();
+ fVertexUniformsDirty = true;
+ }
+ else {
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);
+ buffer = fFragmentUniformData.get();
+ fFragmentUniformsDirty = true;
+ }
+ buffer = static_cast<char*>(buffer)+uni.fOffset;
+ return buffer;
+}
+
+void GrVkPipelineStateDataManager::set1i(UniformHandle u, int32_t i) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt_GrSLType);
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ memcpy(buffer, &i, sizeof(int32_t));
+}
+
+void GrVkPipelineStateDataManager::set1iv(UniformHandle u,
+ int arrayCount,
+ const int32_t v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kInt_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrGLSLShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(int32_t) == 4);
+ for (int i = 0; i < arrayCount; ++i) {
+ const int32_t* curVec = &v[i];
+ memcpy(buffer, curVec, sizeof(int32_t));
+ buffer = static_cast<char*>(buffer) + 4*sizeof(int32_t);
+ }
+}
+
+void GrVkPipelineStateDataManager::set1f(UniformHandle u, float v0) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat_GrSLType);
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ memcpy(buffer, &v0, sizeof(float));
+}
+
+void GrVkPipelineStateDataManager::set1fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrGLSLShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ for (int i = 0; i < arrayCount; ++i) {
+ const float* curVec = &v[i];
+ memcpy(buffer, curVec, sizeof(float));
+ buffer = static_cast<char*>(buffer) + 4*sizeof(float);
+ }
+}
+
+void GrVkPipelineStateDataManager::set2f(UniformHandle u, float v0, float v1) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kVec2f_GrSLType);
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ float v[2] = { v0, v1 };
+ memcpy(buffer, v, 2 * sizeof(float));
+}
+
+void GrVkPipelineStateDataManager::set2fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kVec2f_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrGLSLShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ for (int i = 0; i < arrayCount; ++i) {
+ const float* curVec = &v[2 * i];
+ memcpy(buffer, curVec, 2 * sizeof(float));
+ buffer = static_cast<char*>(buffer) + 4*sizeof(float);
+ }
+}
+
+void GrVkPipelineStateDataManager::set3f(UniformHandle u, float v0, float v1, float v2) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kVec3f_GrSLType);
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ float v[3] = { v0, v1, v2 };
+ memcpy(buffer, v, 3 * sizeof(float));
+}
+
+void GrVkPipelineStateDataManager::set3fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kVec3f_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrGLSLShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ for (int i = 0; i < arrayCount; ++i) {
+ const float* curVec = &v[3 * i];
+ memcpy(buffer, curVec, 3 * sizeof(float));
+ buffer = static_cast<char*>(buffer) + 4*sizeof(float);
+ }
+}
+
+void GrVkPipelineStateDataManager::set4f(UniformHandle u,
+ float v0,
+ float v1,
+ float v2,
+ float v3) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kVec4f_GrSLType);
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ float v[4] = { v0, v1, v2, v3 };
+ memcpy(buffer, v, 4 * sizeof(float));
+}
+
+void GrVkPipelineStateDataManager::set4fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kVec4f_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrGLSLShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer = this->getBufferPtrAndMarkDirty(uni);
+ SkASSERT(sizeof(float) == 4);
+ memcpy(buffer, v, arrayCount * 4 * sizeof(float));
+}
+
+void GrVkPipelineStateDataManager::setMatrix2f(UniformHandle u, const float matrix[]) const {
+ this->setMatrices<2>(u, 1, matrix);
+}
+
+void GrVkPipelineStateDataManager::setMatrix2fv(UniformHandle u,
+ int arrayCount,
+ const float m[]) const {
+ this->setMatrices<2>(u, arrayCount, m);
+}
+
+void GrVkPipelineStateDataManager::setMatrix3f(UniformHandle u, const float matrix[]) const {
+ this->setMatrices<3>(u, 1, matrix);
+}
+
+void GrVkPipelineStateDataManager::setMatrix3fv(UniformHandle u,
+ int arrayCount,
+ const float m[]) const {
+ this->setMatrices<3>(u, arrayCount, m);
+}
+
+void GrVkPipelineStateDataManager::setMatrix4f(UniformHandle u, const float matrix[]) const {
+ this->setMatrices<4>(u, 1, matrix);
+}
+
+void GrVkPipelineStateDataManager::setMatrix4fv(UniformHandle u,
+ int arrayCount,
+ const float m[]) const {
+ this->setMatrices<4>(u, arrayCount, m);
+}
+
+template<int N> struct set_uniform_matrix;
+
+template<int N> inline void GrVkPipelineStateDataManager::setMatrices(UniformHandle u,
+ int arrayCount,
+ const float matrices[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kMat22f_GrSLType + (N - 2));
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrGLSLShaderVar::kNonArray == uni.fArrayCount));
+
+ void* buffer;
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {
+ buffer = fVertexUniformData.get();
+ fVertexUniformsDirty = true;
+ } else {
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);
+ buffer = fFragmentUniformData.get();
+ fFragmentUniformsDirty = true;
+ }
+
+ set_uniform_matrix<N>::set(buffer, uni.fOffset, arrayCount, matrices);
+}
+
+template<int N> struct set_uniform_matrix {
+ inline static void set(void* buffer, int uniformOffset, int count, const float matrices[]) {
+ GR_STATIC_ASSERT(sizeof(float) == 4);
+ buffer = static_cast<char*>(buffer) + uniformOffset;
+ for (int i = 0; i < count; ++i) {
+ const float* matrix = &matrices[N * N * i];
+ for (int j = 0; j < N; ++j) {
+ memcpy(buffer, &matrix[j * N], N * sizeof(float));
+ buffer = static_cast<char*>(buffer) + 4 * sizeof(float);
+ }
+ }
+ }
+};
+
+template<> struct set_uniform_matrix<4> {
+ inline static void set(void* buffer, int uniformOffset, int count, const float matrices[]) {
+ GR_STATIC_ASSERT(sizeof(float) == 4);
+ buffer = static_cast<char*>(buffer) + uniformOffset;
+ memcpy(buffer, matrices, count * 16 * sizeof(float));
+ }
+};
+
+bool GrVkPipelineStateDataManager::uploadUniformBuffers(GrVkGpu* gpu,
+ GrVkUniformBuffer* vertexBuffer,
+ GrVkUniformBuffer* fragmentBuffer) const {
+ bool updatedBuffer = false;
+ if (vertexBuffer && fVertexUniformsDirty) {
+ SkAssertResult(vertexBuffer->updateData(gpu, fVertexUniformData.get(), fVertexUniformSize,
+ &updatedBuffer));
+ fVertexUniformsDirty = false;
+ }
+
+ if (fragmentBuffer && fFragmentUniformsDirty) {
+ SkAssertResult(fragmentBuffer->updateData(gpu, fFragmentUniformData.get(),
+ fFragmentUniformSize, &updatedBuffer));
+ fFragmentUniformsDirty = false;
+ }
+ return updatedBuffer;
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateDataManager.h b/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateDataManager.h
new file mode 100644
index 000000000..312c6c659
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkPipelineStateDataManager.h
@@ -0,0 +1,83 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkPipelineStateDataManager_DEFINED
+#define GrVkPipelineStateDataManager_DEFINED
+
+#include "glsl/GrGLSLProgramDataManager.h"
+
+#include "vk/GrVkUniformHandler.h"
+
+class GrVkGpu;
+class GrVkUniformBuffer;
+
+class GrVkPipelineStateDataManager : public GrGLSLProgramDataManager {
+public:
+ typedef GrVkUniformHandler::UniformInfoArray UniformInfoArray;
+
+ GrVkPipelineStateDataManager(const UniformInfoArray&,
+ uint32_t vertexUniformSize,
+ uint32_t fragmentUniformSize);
+
+ void set1i(UniformHandle, int32_t) const override;
+ void set1iv(UniformHandle, int arrayCount, const int32_t v[]) const override;
+ void set1f(UniformHandle, float v0) const override;
+ void set1fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set2f(UniformHandle, float, float) const override;
+ void set2fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set3f(UniformHandle, float, float, float) const override;
+ void set3fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set4f(UniformHandle, float, float, float, float) const override;
+ void set4fv(UniformHandle, int arrayCount, const float v[]) const override;
+ // matrices are column-major, the first two upload a single matrix, the latter two upload
+ // arrayCount matrices into a uniform array.
+ void setMatrix2f(UniformHandle, const float matrix[]) const override;
+ void setMatrix3f(UniformHandle, const float matrix[]) const override;
+ void setMatrix4f(UniformHandle, const float matrix[]) const override;
+ void setMatrix2fv(UniformHandle, int arrayCount, const float matrices[]) const override;
+ void setMatrix3fv(UniformHandle, int arrayCount, const float matrices[]) const override;
+ void setMatrix4fv(UniformHandle, int arrayCount, const float matrices[]) const override;
+
+ // for nvpr only
+ void setPathFragmentInputTransform(VaryingHandle u, int components,
+ const SkMatrix& matrix) const override {
+ SkFAIL("Only supported in NVPR, which is not in vulkan");
+ }
+
+ // Returns true if either the vertex or fragment buffer needed to generate a new underlying
+ // VkBuffer object in order upload data. If true is returned, this is a signal to the caller
+ // that they will need to update the descriptor set that is using these buffers.
+ bool uploadUniformBuffers(GrVkGpu* gpu,
+ GrVkUniformBuffer* vertexBuffer,
+ GrVkUniformBuffer* fragmentBuffer) const;
+private:
+ struct Uniform {
+ uint32_t fBinding;
+ uint32_t fOffset;
+ SkDEBUGCODE(
+ GrSLType fType;
+ int fArrayCount;
+ );
+ };
+
+ template<int N> inline void setMatrices(UniformHandle, int arrayCount,
+ const float matrices[]) const;
+
+ void* getBufferPtrAndMarkDirty(const Uniform& uni) const;
+
+ uint32_t fVertexUniformSize;
+ uint32_t fFragmentUniformSize;
+
+ SkTArray<Uniform, true> fUniforms;
+
+ mutable SkAutoMalloc fVertexUniformData;
+ mutable SkAutoMalloc fFragmentUniformData;
+ mutable bool fVertexUniformsDirty;
+ mutable bool fFragmentUniformsDirty;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkRenderPass.cpp b/gfx/skia/skia/src/gpu/vk/GrVkRenderPass.cpp
new file mode 100644
index 000000000..ee2d3d9f0
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkRenderPass.cpp
@@ -0,0 +1,266 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkRenderPass.h"
+
+#include "GrProcessor.h"
+#include "GrVkFramebuffer.h"
+#include "GrVkGpu.h"
+#include "GrVkRenderTarget.h"
+#include "GrVkUtil.h"
+
+typedef GrVkRenderPass::AttachmentsDescriptor::AttachmentDesc AttachmentDesc;
+
+void setup_vk_attachment_description(VkAttachmentDescription* attachment,
+ const AttachmentDesc& desc,
+ VkImageLayout layout) {
+ attachment->flags = 0;
+ attachment->format = desc.fFormat;
+ SkAssertResult(GrSampleCountToVkSampleCount(desc.fSamples, &attachment->samples));
+ switch (layout) {
+ case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
+ attachment->loadOp = desc.fLoadStoreOps.fLoadOp;
+ attachment->storeOp = desc.fLoadStoreOps.fStoreOp;
+ attachment->stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ attachment->stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ break;
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
+ attachment->loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ attachment->storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ attachment->stencilLoadOp = desc.fLoadStoreOps.fLoadOp;
+ attachment->stencilStoreOp = desc.fLoadStoreOps.fStoreOp;
+ break;
+ default:
+ SkFAIL("Unexpected attachment layout");
+ }
+
+ attachment->initialLayout = layout;
+ attachment->finalLayout = layout;
+}
+
+void GrVkRenderPass::initSimple(const GrVkGpu* gpu, const GrVkRenderTarget& target) {
+ static const GrVkRenderPass::LoadStoreOps kBasicLoadStoreOps(VK_ATTACHMENT_LOAD_OP_LOAD,
+ VK_ATTACHMENT_STORE_OP_STORE);
+
+ this->init(gpu, target, kBasicLoadStoreOps, kBasicLoadStoreOps);
+}
+
+void GrVkRenderPass::init(const GrVkGpu* gpu,
+ const LoadStoreOps& colorOp,
+ const LoadStoreOps& stencilOp) {
+ uint32_t numAttachments = fAttachmentsDescriptor.fAttachmentCount;
+ // Attachment descriptions to be set on the render pass
+ SkTArray<VkAttachmentDescription> attachments(numAttachments);
+ attachments.reset(numAttachments);
+ memset(attachments.begin(), 0, numAttachments * sizeof(VkAttachmentDescription));
+
+ // Refs to attachments on the render pass (as described by teh VkAttachmentDescription above),
+ // that are used by the subpass.
+ VkAttachmentReference colorRef;
+ VkAttachmentReference stencilRef;
+ uint32_t currentAttachment = 0;
+
+ // Go through each of the attachment types (color, stencil) and set the necessary
+ // on the various Vk structs.
+ VkSubpassDescription subpassDesc;
+ memset(&subpassDesc, 0, sizeof(VkSubpassDescription));
+ subpassDesc.flags = 0;
+ subpassDesc.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ subpassDesc.inputAttachmentCount = 0;
+ subpassDesc.pInputAttachments = nullptr;
+ subpassDesc.pResolveAttachments = nullptr;
+
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ // set up color attachment
+ fAttachmentsDescriptor.fColor.fLoadStoreOps = colorOp;
+ setup_vk_attachment_description(&attachments[currentAttachment],
+ fAttachmentsDescriptor.fColor,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+ // setup subpass use of attachment
+ colorRef.attachment = currentAttachment++;
+ colorRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ subpassDesc.colorAttachmentCount = 1;
+ } else {
+ // I don't think there should ever be a time where we don't have a color attachment
+ SkASSERT(false);
+ colorRef.attachment = VK_ATTACHMENT_UNUSED;
+ colorRef.layout = VK_IMAGE_LAYOUT_UNDEFINED;
+ subpassDesc.colorAttachmentCount = 0;
+ }
+ subpassDesc.pColorAttachments = &colorRef;
+
+ if (fAttachmentFlags & kStencil_AttachmentFlag) {
+ // set up stencil attachment
+ fAttachmentsDescriptor.fStencil.fLoadStoreOps = stencilOp;
+ setup_vk_attachment_description(&attachments[currentAttachment],
+ fAttachmentsDescriptor.fStencil,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
+ // setup subpass use of attachment
+ stencilRef.attachment = currentAttachment++;
+ stencilRef.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ } else {
+ stencilRef.attachment = VK_ATTACHMENT_UNUSED;
+ stencilRef.layout = VK_IMAGE_LAYOUT_UNDEFINED;
+ }
+ subpassDesc.pDepthStencilAttachment = &stencilRef;
+
+ subpassDesc.preserveAttachmentCount = 0;
+ subpassDesc.pPreserveAttachments = nullptr;
+
+ SkASSERT(numAttachments == currentAttachment);
+
+ // Create the VkRenderPass compatible with the attachment descriptions above
+ VkRenderPassCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkRenderPassCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.attachmentCount = numAttachments;
+ createInfo.pAttachments = attachments.begin();
+ createInfo.subpassCount = 1;
+ createInfo.pSubpasses = &subpassDesc;
+ createInfo.dependencyCount = 0;
+ createInfo.pDependencies = nullptr;
+
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateRenderPass(gpu->device(),
+ &createInfo,
+ nullptr,
+ &fRenderPass));
+
+ // Get granularity for this render pass
+ GR_VK_CALL(gpu->vkInterface(), GetRenderAreaGranularity(gpu->device(),
+ fRenderPass,
+ &fGranularity));
+}
+
+void GrVkRenderPass::init(const GrVkGpu* gpu,
+ const GrVkRenderPass& compatibleRenderPass,
+ const LoadStoreOps& colorOp,
+ const LoadStoreOps& stencilOp) {
+ fAttachmentFlags = compatibleRenderPass.fAttachmentFlags;
+ fAttachmentsDescriptor = compatibleRenderPass.fAttachmentsDescriptor;
+ this->init(gpu, colorOp, stencilOp);
+}
+
+void GrVkRenderPass::init(const GrVkGpu* gpu,
+ const GrVkRenderTarget& target,
+ const LoadStoreOps& colorOp,
+ const LoadStoreOps& stencilOp) {
+ // Get attachment information from render target. This includes which attachments the render
+ // target has (color, stencil) and the attachments format and sample count.
+ target.getAttachmentsDescriptor(&fAttachmentsDescriptor, &fAttachmentFlags);
+ this->init(gpu, colorOp, stencilOp);
+}
+
+void GrVkRenderPass::freeGPUData(const GrVkGpu* gpu) const {
+ GR_VK_CALL(gpu->vkInterface(), DestroyRenderPass(gpu->device(), fRenderPass, nullptr));
+}
+
+// Works under the assumption that color attachment will always be the first attachment in our
+// attachment array if it exists.
+bool GrVkRenderPass::colorAttachmentIndex(uint32_t* index) const {
+ *index = 0;
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ return true;
+ }
+ return false;
+}
+
+// Works under the assumption that stencil attachment will always be after the color and resolve
+// attachment.
+bool GrVkRenderPass::stencilAttachmentIndex(uint32_t* index) const {
+ *index = 0;
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ ++(*index);
+ }
+ if (fAttachmentFlags & kStencil_AttachmentFlag) {
+ return true;
+ }
+ return false;
+}
+
+void GrVkRenderPass::getBeginInfo(const GrVkRenderTarget& target,
+ VkRenderPassBeginInfo* beginInfo,
+ VkSubpassContents* contents) const {
+ SkASSERT(this->isCompatible(target));
+
+ VkRect2D renderArea;
+ renderArea.offset = { 0, 0 };
+ renderArea.extent = { (uint32_t)target.width(), (uint32_t)target.height() };
+
+ memset(beginInfo, 0, sizeof(VkRenderPassBeginInfo));
+ beginInfo->sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ beginInfo->pNext = nullptr;
+ beginInfo->renderPass = fRenderPass;
+ beginInfo->framebuffer = target.framebuffer()->framebuffer();
+ beginInfo->renderArea = renderArea;
+ beginInfo->clearValueCount = 0;
+ beginInfo->pClearValues = nullptr;
+
+ // Currently just assuming no secondary cmd buffers. This value will need to be update if we
+ // have them.
+ *contents = VK_SUBPASS_CONTENTS_INLINE;
+}
+
+bool GrVkRenderPass::isCompatible(const AttachmentsDescriptor& desc,
+ const AttachmentFlags& flags) const {
+ if (flags != fAttachmentFlags) {
+ return false;
+ }
+
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ if (!fAttachmentsDescriptor.fColor.isCompatible(desc.fColor)) {
+ return false;
+ }
+ }
+ if (fAttachmentFlags & kStencil_AttachmentFlag) {
+ if (!fAttachmentsDescriptor.fStencil.isCompatible(desc.fStencil)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool GrVkRenderPass::isCompatible(const GrVkRenderTarget& target) const {
+ AttachmentsDescriptor desc;
+ AttachmentFlags flags;
+ target.getAttachmentsDescriptor(&desc, &flags);
+
+ return this->isCompatible(desc, flags);
+}
+
+bool GrVkRenderPass::isCompatible(const GrVkRenderPass& renderPass) const {
+ return this->isCompatible(renderPass.fAttachmentsDescriptor, renderPass.fAttachmentFlags);
+}
+
+bool GrVkRenderPass::equalLoadStoreOps(const LoadStoreOps& colorOps,
+ const LoadStoreOps& stencilOps) const {
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ if (fAttachmentsDescriptor.fColor.fLoadStoreOps != colorOps) {
+ return false;
+ }
+ }
+ if (fAttachmentFlags & kStencil_AttachmentFlag) {
+ if (fAttachmentsDescriptor.fStencil.fLoadStoreOps != stencilOps) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void GrVkRenderPass::genKey(GrProcessorKeyBuilder* b) const {
+ b->add32(fAttachmentFlags);
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ b->add32(fAttachmentsDescriptor.fColor.fFormat);
+ b->add32(fAttachmentsDescriptor.fColor.fSamples);
+ }
+ if (fAttachmentFlags & kStencil_AttachmentFlag) {
+ b->add32(fAttachmentsDescriptor.fStencil.fFormat);
+ b->add32(fAttachmentsDescriptor.fStencil.fSamples);
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkRenderPass.h b/gfx/skia/skia/src/gpu/vk/GrVkRenderPass.h
new file mode 100644
index 000000000..d59b5fa1d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkRenderPass.h
@@ -0,0 +1,146 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkRenderPass_DEFINED
+#define GrVkRenderPass_DEFINED
+
+#include "GrTypes.h"
+
+#include "GrVkResource.h"
+
+#include "vk/GrVkDefines.h"
+
+class GrProcessorKeyBuilder;
+class GrVkGpu;
+class GrVkRenderTarget;
+
+class GrVkRenderPass : public GrVkResource {
+public:
+ GrVkRenderPass() : INHERITED(), fRenderPass(VK_NULL_HANDLE) {}
+
+ struct LoadStoreOps {
+ VkAttachmentLoadOp fLoadOp;
+ VkAttachmentStoreOp fStoreOp;
+
+ LoadStoreOps(VkAttachmentLoadOp loadOp, VkAttachmentStoreOp storeOp)
+ : fLoadOp(loadOp)
+ , fStoreOp(storeOp) {}
+
+ bool operator==(const LoadStoreOps& right) const {
+ return fLoadOp == right.fLoadOp && fStoreOp == right.fStoreOp;
+ }
+
+ bool operator!=(const LoadStoreOps& right) const {
+ return !(*this == right);
+ }
+ };
+
+ void initSimple(const GrVkGpu* gpu, const GrVkRenderTarget& target);
+ void init(const GrVkGpu* gpu,
+ const GrVkRenderTarget& target,
+ const LoadStoreOps& colorOp,
+ const LoadStoreOps& stencilOp);
+
+ void init(const GrVkGpu* gpu,
+ const GrVkRenderPass& compatibleRenderPass,
+ const LoadStoreOps& colorOp,
+ const LoadStoreOps& stencilOp);
+
+ struct AttachmentsDescriptor {
+ struct AttachmentDesc {
+ VkFormat fFormat;
+ int fSamples;
+ LoadStoreOps fLoadStoreOps;
+
+ AttachmentDesc()
+ : fFormat(VK_FORMAT_UNDEFINED)
+ , fSamples(0)
+ , fLoadStoreOps(VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE) {}
+ bool operator==(const AttachmentDesc& right) const {
+ return (fFormat == right.fFormat &&
+ fSamples == right.fSamples &&
+ fLoadStoreOps == right.fLoadStoreOps);
+ }
+ bool operator!=(const AttachmentDesc& right) const {
+ return !(*this == right);
+ }
+ bool isCompatible(const AttachmentDesc& desc) const {
+ return (fFormat == desc.fFormat && fSamples == desc.fSamples);
+ }
+ };
+ AttachmentDesc fColor;
+ AttachmentDesc fStencil;
+ uint32_t fAttachmentCount;
+ };
+
+ enum AttachmentFlags {
+ kColor_AttachmentFlag = 0x1,
+ kStencil_AttachmentFlag = 0x2,
+ };
+ GR_DECL_BITFIELD_OPS_FRIENDS(AttachmentFlags);
+
+ // The following return the index of the render pass attachment array for the given attachment.
+ // If the render pass does not have the given attachment it will return false and not set the
+ // index value.
+ bool colorAttachmentIndex(uint32_t* index) const;
+ bool stencilAttachmentIndex(uint32_t* index) const;
+
+ // Sets the VkRenderPassBeginInfo and VkRenderPassContents need to begin a render pass.
+ // TODO: In the future I expect this function will also take an optional render area instead of
+ // defaulting to the entire render target.
+ // TODO: Figure out if load clear values should be passed into this function or should be stored
+ // on the GrVkRenderPass at create time since we'll know at that point if we want to do a load
+ // clear.
+ void getBeginInfo(const GrVkRenderTarget& target,
+ VkRenderPassBeginInfo* beginInfo,
+ VkSubpassContents* contents) const;
+
+ // Returns whether or not the structure of a RenderTarget matches that of the VkRenderPass in
+ // this object. Specifically this compares that the number of attachments, format of
+ // attachments, and sample counts are all the same. This function is used in the creation of
+ // basic RenderPasses that can be used when creating a VkFrameBuffer object.
+ bool isCompatible(const GrVkRenderTarget& target) const;
+
+ bool isCompatible(const GrVkRenderPass& renderPass) const;
+
+ bool equalLoadStoreOps(const LoadStoreOps& colorOps,
+ const LoadStoreOps& stencilOps) const;
+
+ VkRenderPass vkRenderPass() const { return fRenderPass; }
+
+ const VkExtent2D& granularity() const { return fGranularity; }
+
+ void genKey(GrProcessorKeyBuilder* b) const;
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkRenderPass: %d (%d refs)\n", fRenderPass, this->getRefCnt());
+ }
+#endif
+
+private:
+ GrVkRenderPass(const GrVkRenderPass&);
+
+ void init(const GrVkGpu* gpu,
+ const LoadStoreOps& colorOps,
+ const LoadStoreOps& stencilOps);
+
+ bool isCompatible(const AttachmentsDescriptor&, const AttachmentFlags&) const;
+
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ VkRenderPass fRenderPass;
+ AttachmentFlags fAttachmentFlags;
+ AttachmentsDescriptor fAttachmentsDescriptor;
+ VkExtent2D fGranularity;
+
+ typedef GrVkResource INHERITED;
+};
+
+GR_MAKE_BITFIELD_OPS(GrVkRenderPass::AttachmentFlags);
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkRenderTarget.cpp b/gfx/skia/skia/src/gpu/vk/GrVkRenderTarget.cpp
new file mode 100644
index 000000000..d6895d25f
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkRenderTarget.cpp
@@ -0,0 +1,386 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkRenderTarget.h"
+
+#include "GrRenderTargetPriv.h"
+#include "GrVkCommandBuffer.h"
+#include "GrVkFramebuffer.h"
+#include "GrVkGpu.h"
+#include "GrVkImageView.h"
+#include "GrVkResourceProvider.h"
+#include "GrVkUtil.h"
+
+#include "vk/GrVkTypes.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+// We're virtually derived from GrSurface (via GrRenderTarget) so its
+// constructor must be explicitly called.
+GrVkRenderTarget::GrVkRenderTarget(GrVkGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ const GrVkImageInfo& msaaInfo,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView,
+ GrVkImage::Wrapped wrapped)
+ : GrSurface(gpu, desc)
+ , GrVkImage(info, wrapped)
+ // for the moment we only support 1:1 color to stencil
+ , GrRenderTarget(gpu, desc)
+ , fColorAttachmentView(colorAttachmentView)
+ , fMSAAImage(new GrVkImage(msaaInfo, GrVkImage::kNot_Wrapped))
+ , fResolveAttachmentView(resolveAttachmentView)
+ , fFramebuffer(nullptr)
+ , fCachedSimpleRenderPass(nullptr) {
+ SkASSERT(desc.fSampleCnt);
+ // The plus 1 is to account for the resolve texture.
+ fColorValuesPerPixel = desc.fSampleCnt + 1; // TODO: this still correct?
+ this->createFramebuffer(gpu);
+ this->registerWithCache(budgeted);
+}
+
+// We're virtually derived from GrSurface (via GrRenderTarget) so its
+// constructor must be explicitly called.
+GrVkRenderTarget::GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ const GrVkImageInfo& msaaInfo,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView,
+ GrVkImage::Wrapped wrapped)
+ : GrSurface(gpu, desc)
+ , GrVkImage(info, wrapped)
+ // for the moment we only support 1:1 color to stencil
+ , GrRenderTarget(gpu, desc)
+ , fColorAttachmentView(colorAttachmentView)
+ , fMSAAImage(new GrVkImage(msaaInfo, GrVkImage::kNot_Wrapped))
+ , fResolveAttachmentView(resolveAttachmentView)
+ , fFramebuffer(nullptr)
+ , fCachedSimpleRenderPass(nullptr) {
+ SkASSERT(desc.fSampleCnt);
+ // The plus 1 is to account for the resolve texture.
+ fColorValuesPerPixel = desc.fSampleCnt + 1; // TODO: this still correct?
+ this->createFramebuffer(gpu);
+}
+
+// We're virtually derived from GrSurface (via GrRenderTarget) so its
+// constructor must be explicitly called.
+GrVkRenderTarget::GrVkRenderTarget(GrVkGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ const GrVkImageView* colorAttachmentView,
+ GrVkImage::Wrapped wrapped)
+ : GrSurface(gpu, desc)
+ , GrVkImage(info, wrapped)
+ , GrRenderTarget(gpu, desc)
+ , fColorAttachmentView(colorAttachmentView)
+ , fMSAAImage(nullptr)
+ , fResolveAttachmentView(nullptr)
+ , fFramebuffer(nullptr)
+ , fCachedSimpleRenderPass(nullptr) {
+ SkASSERT(!desc.fSampleCnt);
+ fColorValuesPerPixel = 1;
+ this->createFramebuffer(gpu);
+ this->registerWithCache(budgeted);
+}
+
+// We're virtually derived from GrSurface (via GrRenderTarget) so its
+// constructor must be explicitly called.
+GrVkRenderTarget::GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ const GrVkImageView* colorAttachmentView,
+ GrVkImage::Wrapped wrapped)
+ : GrSurface(gpu, desc)
+ , GrVkImage(info, wrapped)
+ , GrRenderTarget(gpu, desc)
+ , fColorAttachmentView(colorAttachmentView)
+ , fMSAAImage(nullptr)
+ , fResolveAttachmentView(nullptr)
+ , fFramebuffer(nullptr)
+ , fCachedSimpleRenderPass(nullptr) {
+ SkASSERT(!desc.fSampleCnt);
+ fColorValuesPerPixel = 1;
+ this->createFramebuffer(gpu);
+}
+
+GrVkRenderTarget*
+GrVkRenderTarget::Create(GrVkGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ GrVkImage::Wrapped wrapped) {
+ SkASSERT(1 == info.fLevelCount);
+ VkFormat pixelFormat;
+ GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat);
+
+ VkImage colorImage;
+
+ // create msaa surface if necessary
+ GrVkImageInfo msInfo;
+ const GrVkImageView* resolveAttachmentView = nullptr;
+ if (desc.fSampleCnt) {
+ GrVkImage::ImageDesc msImageDesc;
+ msImageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ msImageDesc.fFormat = pixelFormat;
+ msImageDesc.fWidth = desc.fWidth;
+ msImageDesc.fHeight = desc.fHeight;
+ msImageDesc.fLevels = 1;
+ msImageDesc.fSamples = desc.fSampleCnt;
+ msImageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ msImageDesc.fUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT |
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+ msImageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ if (!GrVkImage::InitImageInfo(gpu, msImageDesc, &msInfo)) {
+ return nullptr;
+ }
+
+ // Set color attachment image
+ colorImage = msInfo.fImage;
+
+ // Create Resolve attachment view
+ resolveAttachmentView = GrVkImageView::Create(gpu, info.fImage, pixelFormat,
+ GrVkImageView::kColor_Type, 1);
+ if (!resolveAttachmentView) {
+ GrVkImage::DestroyImageInfo(gpu, &msInfo);
+ return nullptr;
+ }
+ } else {
+ // Set color attachment image
+ colorImage = info.fImage;
+ }
+
+ // Get color attachment view
+ const GrVkImageView* colorAttachmentView = GrVkImageView::Create(gpu, colorImage, pixelFormat,
+ GrVkImageView::kColor_Type, 1);
+ if (!colorAttachmentView) {
+ if (desc.fSampleCnt) {
+ resolveAttachmentView->unref(gpu);
+ GrVkImage::DestroyImageInfo(gpu, &msInfo);
+ }
+ return nullptr;
+ }
+
+ GrVkRenderTarget* texRT;
+ if (desc.fSampleCnt) {
+ texRT = new GrVkRenderTarget(gpu, budgeted, desc, info, msInfo,
+ colorAttachmentView, resolveAttachmentView, wrapped);
+ } else {
+ texRT = new GrVkRenderTarget(gpu, budgeted, desc, info, colorAttachmentView, wrapped);
+ }
+
+ return texRT;
+}
+
+GrVkRenderTarget*
+GrVkRenderTarget::CreateNewRenderTarget(GrVkGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ const GrVkImage::ImageDesc& imageDesc) {
+ SkASSERT(imageDesc.fUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
+
+ GrVkImageInfo info;
+ if (!GrVkImage::InitImageInfo(gpu, imageDesc, &info)) {
+ return nullptr;
+ }
+
+ GrVkRenderTarget* rt = GrVkRenderTarget::Create(gpu, budgeted, desc, info,
+ GrVkImage::kNot_Wrapped);
+ if (!rt) {
+ GrVkImage::DestroyImageInfo(gpu, &info);
+ }
+ return rt;
+}
+
+GrVkRenderTarget*
+GrVkRenderTarget::CreateWrappedRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrWrapOwnership ownership,
+ const GrVkImageInfo* info) {
+ SkASSERT(info);
+ // We can wrap a rendertarget without its allocation, as long as we don't take ownership
+ SkASSERT(VK_NULL_HANDLE != info->fImage);
+ SkASSERT(VK_NULL_HANDLE != info->fAlloc.fMemory || kAdopt_GrWrapOwnership != ownership);
+
+ GrVkImage::Wrapped wrapped = kBorrow_GrWrapOwnership == ownership ? GrVkImage::kBorrowed_Wrapped
+ : GrVkImage::kAdopted_Wrapped;
+
+ GrVkRenderTarget* rt = GrVkRenderTarget::Create(gpu, SkBudgeted::kNo, desc, *info, wrapped);
+
+ return rt;
+}
+
+bool GrVkRenderTarget::completeStencilAttachment() {
+ this->createFramebuffer(this->getVkGpu());
+ return true;
+}
+
+void GrVkRenderTarget::createFramebuffer(GrVkGpu* gpu) {
+ if (fFramebuffer) {
+ fFramebuffer->unref(gpu);
+ }
+ if (fCachedSimpleRenderPass) {
+ fCachedSimpleRenderPass->unref(gpu);
+ }
+
+ // Vulkan requires us to create a compatible renderpass before we can create our framebuffer,
+ // so we use this to get a (cached) basic renderpass, only for creation.
+ fCachedSimpleRenderPass =
+ gpu->resourceProvider().findCompatibleRenderPass(*this, &fCompatibleRPHandle);
+
+ // Stencil attachment view is stored in the base RT stencil attachment
+ const GrVkImageView* stencilView = this->stencilAttachmentView();
+ fFramebuffer = GrVkFramebuffer::Create(gpu, this->width(), this->height(),
+ fCachedSimpleRenderPass, fColorAttachmentView,
+ stencilView);
+ SkASSERT(fFramebuffer);
+}
+
+void GrVkRenderTarget::getAttachmentsDescriptor(
+ GrVkRenderPass::AttachmentsDescriptor* desc,
+ GrVkRenderPass::AttachmentFlags* attachmentFlags) const {
+ int colorSamples = this->numColorSamples();
+ VkFormat colorFormat;
+ GrPixelConfigToVkFormat(this->config(), &colorFormat);
+ desc->fColor.fFormat = colorFormat;
+ desc->fColor.fSamples = colorSamples ? colorSamples : 1;
+ *attachmentFlags = GrVkRenderPass::kColor_AttachmentFlag;
+ uint32_t attachmentCount = 1;
+
+ const GrStencilAttachment* stencil = this->renderTargetPriv().getStencilAttachment();
+ if (stencil) {
+ const GrVkStencilAttachment* vkStencil = static_cast<const GrVkStencilAttachment*>(stencil);
+ desc->fStencil.fFormat = vkStencil->vkFormat();
+ desc->fStencil.fSamples = vkStencil->numSamples() ? vkStencil->numSamples() : 1;
+ // Currently in vulkan stencil and color attachments must all have same number of samples
+ SkASSERT(desc->fColor.fSamples == desc->fStencil.fSamples);
+ *attachmentFlags |= GrVkRenderPass::kStencil_AttachmentFlag;
+ ++attachmentCount;
+ }
+ desc->fAttachmentCount = attachmentCount;
+}
+
+GrVkRenderTarget::~GrVkRenderTarget() {
+ // either release or abandon should have been called by the owner of this object.
+ SkASSERT(!fMSAAImage);
+ SkASSERT(!fResolveAttachmentView);
+ SkASSERT(!fColorAttachmentView);
+ SkASSERT(!fFramebuffer);
+ SkASSERT(!fCachedSimpleRenderPass);
+}
+
+void GrVkRenderTarget::addResources(GrVkCommandBuffer& commandBuffer) const {
+ commandBuffer.addResource(this->framebuffer());
+ commandBuffer.addResource(this->colorAttachmentView());
+ commandBuffer.addResource(this->msaaImageResource() ? this->msaaImageResource()
+ : this->resource());
+ if (this->stencilImageResource()) {
+ commandBuffer.addResource(this->stencilImageResource());
+ commandBuffer.addResource(this->stencilAttachmentView());
+ }
+}
+
+void GrVkRenderTarget::releaseInternalObjects() {
+ GrVkGpu* gpu = this->getVkGpu();
+
+ if (fMSAAImage) {
+ fMSAAImage->releaseImage(gpu);
+ fMSAAImage = nullptr;
+ }
+
+ if (fResolveAttachmentView) {
+ fResolveAttachmentView->unref(gpu);
+ fResolveAttachmentView = nullptr;
+ }
+ if (fColorAttachmentView) {
+ fColorAttachmentView->unref(gpu);
+ fColorAttachmentView = nullptr;
+ }
+ if (fFramebuffer) {
+ fFramebuffer->unref(gpu);
+ fFramebuffer = nullptr;
+ }
+ if (fCachedSimpleRenderPass) {
+ fCachedSimpleRenderPass->unref(gpu);
+ fCachedSimpleRenderPass = nullptr;
+ }
+}
+
+void GrVkRenderTarget::abandonInternalObjects() {
+ if (fMSAAImage) {
+ fMSAAImage->abandonImage();
+ fMSAAImage = nullptr;
+ }
+
+ if (fResolveAttachmentView) {
+ fResolveAttachmentView->unrefAndAbandon();
+ fResolveAttachmentView = nullptr;
+ }
+ if (fColorAttachmentView) {
+ fColorAttachmentView->unrefAndAbandon();
+ fColorAttachmentView = nullptr;
+ }
+ if (fFramebuffer) {
+ fFramebuffer->unrefAndAbandon();
+ fFramebuffer = nullptr;
+ }
+ if (fCachedSimpleRenderPass) {
+ fCachedSimpleRenderPass->unrefAndAbandon();
+ fCachedSimpleRenderPass = nullptr;
+ }
+}
+
+void GrVkRenderTarget::onRelease() {
+ this->releaseInternalObjects();
+ this->releaseImage(this->getVkGpu());
+ GrRenderTarget::onRelease();
+}
+
+void GrVkRenderTarget::onAbandon() {
+ this->abandonInternalObjects();
+ this->abandonImage();
+ GrRenderTarget::onAbandon();
+}
+
+
+GrBackendObject GrVkRenderTarget::getRenderTargetHandle() const {
+ // If the render target is multisampled, we currently return the ImageInfo for the resolved
+ // image. If we only wrap the msaa target (currently not implemented) we should return a handle
+ // to that instead.
+ return (GrBackendObject)&fInfo;
+}
+
+const GrVkResource* GrVkRenderTarget::stencilImageResource() const {
+ const GrStencilAttachment* stencil = this->renderTargetPriv().getStencilAttachment();
+ if (stencil) {
+ const GrVkStencilAttachment* vkStencil = static_cast<const GrVkStencilAttachment*>(stencil);
+ return vkStencil->imageResource();
+ }
+
+ return nullptr;
+}
+
+const GrVkImageView* GrVkRenderTarget::stencilAttachmentView() const {
+ const GrStencilAttachment* stencil = this->renderTargetPriv().getStencilAttachment();
+ if (stencil) {
+ const GrVkStencilAttachment* vkStencil = static_cast<const GrVkStencilAttachment*>(stencil);
+ return vkStencil->stencilView();
+ }
+
+ return nullptr;
+}
+
+
+GrVkGpu* GrVkRenderTarget::getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrVkGpu*>(this->getGpu());
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkRenderTarget.h b/gfx/skia/skia/src/gpu/vk/GrVkRenderTarget.h
new file mode 100644
index 000000000..e3ebefba5
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkRenderTarget.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrVkRenderTarget_DEFINED
+#define GrVkRenderTarget_DEFINED
+
+#include "GrVkImage.h"
+#include "GrRenderTarget.h"
+
+#include "GrVkRenderPass.h"
+#include "GrVkResourceProvider.h"
+
+class GrVkCommandBuffer;
+class GrVkFramebuffer;
+class GrVkGpu;
+class GrVkImageView;
+class GrVkStencilAttachment;
+
+struct GrVkImageInfo;
+
+#ifdef SK_BUILD_FOR_WIN
+// Windows gives bogus warnings about inheriting asTexture/asRenderTarget via dominance.
+#pragma warning(push)
+#pragma warning(disable: 4250)
+#endif
+
+class GrVkRenderTarget: public GrRenderTarget, public virtual GrVkImage {
+public:
+ static GrVkRenderTarget* CreateNewRenderTarget(GrVkGpu*, SkBudgeted, const GrSurfaceDesc&,
+ const GrVkImage::ImageDesc&);
+
+ static GrVkRenderTarget* CreateWrappedRenderTarget(GrVkGpu*, const GrSurfaceDesc&,
+ GrWrapOwnership,
+ const GrVkImageInfo*);
+
+ ~GrVkRenderTarget() override;
+
+ const GrVkFramebuffer* framebuffer() const { return fFramebuffer; }
+ const GrVkImageView* colorAttachmentView() const { return fColorAttachmentView; }
+ const GrVkResource* msaaImageResource() const {
+ if (fMSAAImage) {
+ return fMSAAImage->fResource;
+ }
+ return nullptr;
+ }
+ GrVkImage* msaaImage() { return fMSAAImage; }
+ const GrVkImageView* resolveAttachmentView() const { return fResolveAttachmentView; }
+ const GrVkResource* stencilImageResource() const;
+ const GrVkImageView* stencilAttachmentView() const;
+
+ const GrVkRenderPass* simpleRenderPass() const { return fCachedSimpleRenderPass; }
+ GrVkResourceProvider::CompatibleRPHandle compatibleRenderPassHandle() const {
+ return fCompatibleRPHandle;
+ }
+
+ // override of GrRenderTarget
+ ResolveType getResolveType() const override {
+ if (this->numColorSamples() > 1) {
+ return kCanResolve_ResolveType;
+ }
+ return kAutoResolves_ResolveType;
+ }
+
+ bool canAttemptStencilAttachment() const override {
+ return true;
+ }
+
+ GrBackendObject getRenderTargetHandle() const override;
+
+ void getAttachmentsDescriptor(GrVkRenderPass::AttachmentsDescriptor* desc,
+ GrVkRenderPass::AttachmentFlags* flags) const;
+
+ void addResources(GrVkCommandBuffer& commandBuffer) const;
+
+protected:
+ GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ const GrVkImageInfo& msaaInfo,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView,
+ GrVkImage::Wrapped wrapped);
+
+ GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ const GrVkImageView* colorAttachmentView,
+ GrVkImage::Wrapped wrapped);
+
+ GrVkGpu* getVkGpu() const;
+
+ void onAbandon() override;
+ void onRelease() override;
+
+ // This accounts for the texture's memory and any MSAA renderbuffer's memory.
+ size_t onGpuMemorySize() const override {
+ SkASSERT(kUnknown_GrPixelConfig != fDesc.fConfig);
+ SkASSERT(!GrPixelConfigIsCompressed(fDesc.fConfig));
+ size_t colorBytes = GrBytesPerPixel(fDesc.fConfig);
+ SkASSERT(colorBytes > 0);
+ return fColorValuesPerPixel * fDesc.fWidth * fDesc.fHeight * colorBytes;
+ }
+
+ void createFramebuffer(GrVkGpu* gpu);
+
+ const GrVkImageView* fColorAttachmentView;
+ GrVkImage* fMSAAImage;
+ const GrVkImageView* fResolveAttachmentView;
+
+private:
+ GrVkRenderTarget(GrVkGpu* gpu,
+ SkBudgeted,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ const GrVkImageInfo& msaaInfo,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView,
+ GrVkImage::Wrapped wrapped);
+
+ GrVkRenderTarget(GrVkGpu* gpu,
+ SkBudgeted,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ const GrVkImageView* colorAttachmentView,
+ GrVkImage::Wrapped wrapped);
+
+ static GrVkRenderTarget* Create(GrVkGpu*, SkBudgeted, const GrSurfaceDesc&,
+ const GrVkImageInfo&, GrVkImage::Wrapped wrapped);
+
+ bool completeStencilAttachment() override;
+
+ void releaseInternalObjects();
+ void abandonInternalObjects();
+
+ const GrVkFramebuffer* fFramebuffer;
+ int fColorValuesPerPixel;
+
+ // This is a cached pointer to a simple render pass. The render target should unref it
+ // once it is done with it.
+ const GrVkRenderPass* fCachedSimpleRenderPass;
+ // This is a handle to be used to quickly get compatible GrVkRenderPasses for this render target
+ GrVkResourceProvider::CompatibleRPHandle fCompatibleRPHandle;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkResource.h b/gfx/skia/skia/src/gpu/vk/GrVkResource.h
new file mode 100644
index 000000000..9d7212ed8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkResource.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkResource_DEFINED
+#define GrVkResource_DEFINED
+
+#include "SkAtomics.h"
+#include "SkRandom.h"
+#include "SkTHash.h"
+
+class GrVkGpu;
+
+// uncomment to enable tracing of resource refs
+#ifdef SK_DEBUG
+#define SK_TRACE_VK_RESOURCES
+#endif
+
+/** \class GrVkResource
+
+ GrVkResource is the base class for Vulkan resources that may be shared by multiple
+ objects. When an existing owner wants to share a reference, it calls ref().
+ When an owner wants to release its reference, it calls unref(). When the
+ shared object's reference count goes to zero as the result of an unref()
+ call, its (virtual) destructor is called. It is an error for the
+ destructor to be called explicitly (or via the object going out of scope on
+ the stack or calling delete) if getRefCnt() > 1.
+
+ This is nearly identical to SkRefCntBase. The exceptions are that unref()
+ takes a GrVkGpu, and any derived classes must implement freeGPUData() and
+ possibly abandonSubResources().
+*/
+
+class GrVkResource : SkNoncopyable {
+public:
+ // Simple refCount tracing, to ensure that everything ref'ed is unref'ed.
+#ifdef SK_TRACE_VK_RESOURCES
+ struct Hash {
+ uint32_t operator()(const GrVkResource* const& r) const {
+ SkASSERT(r);
+ return r->fKey;
+ }
+ };
+
+ class Trace {
+ public:
+ ~Trace() {
+ fHashSet.foreach([](const GrVkResource* r) {
+ r->dumpInfo();
+ });
+ SkASSERT(0 == fHashSet.count());
+ }
+ void add(const GrVkResource* r) { fHashSet.add(r); }
+ void remove(const GrVkResource* r) { fHashSet.remove(r); }
+
+ private:
+ SkTHashSet<const GrVkResource*, GrVkResource::Hash> fHashSet;
+ };
+ static Trace fTrace;
+
+ static uint32_t fKeyCounter;
+#endif
+
+ /** Default construct, initializing the reference count to 1.
+ */
+ GrVkResource() : fRefCnt(1) {
+#ifdef SK_TRACE_VK_RESOURCES
+ fKey = sk_atomic_fetch_add(&fKeyCounter, 1u, sk_memory_order_relaxed);
+ fTrace.add(this);
+#endif
+ }
+
+ /** Destruct, asserting that the reference count is 1.
+ */
+ virtual ~GrVkResource() {
+#ifdef SK_DEBUG
+ SkASSERTF(fRefCnt == 1, "fRefCnt was %d", fRefCnt);
+ fRefCnt = 0; // illegal value, to catch us if we reuse after delete
+#endif
+ }
+
+#ifdef SK_DEBUG
+ /** Return the reference count. Use only for debugging. */
+ int32_t getRefCnt() const { return fRefCnt; }
+#endif
+
+ /** May return true if the caller is the only owner.
+ * Ensures that all previous owner's actions are complete.
+ */
+ bool unique() const {
+ if (1 == sk_atomic_load(&fRefCnt, sk_memory_order_acquire)) {
+ // The acquire barrier is only really needed if we return true. It
+ // prevents code conditioned on the result of unique() from running
+ // until previous owners are all totally done calling unref().
+ return true;
+ }
+ return false;
+ }
+
+ /** Increment the reference count.
+ Must be balanced by a call to unref() or unrefAndFreeResources().
+ */
+ void ref() const {
+ SkASSERT(fRefCnt > 0);
+ (void)sk_atomic_fetch_add(&fRefCnt, +1, sk_memory_order_relaxed); // No barrier required.
+ }
+
+ /** Decrement the reference count. If the reference count is 1 before the
+ decrement, then delete the object. Note that if this is the case, then
+ the object needs to have been allocated via new, and not on the stack.
+ Any GPU data associated with this resource will be freed before it's deleted.
+ */
+ void unref(const GrVkGpu* gpu) const {
+ SkASSERT(fRefCnt > 0);
+ SkASSERT(gpu);
+ // A release here acts in place of all releases we "should" have been doing in ref().
+ if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) {
+ // Like unique(), the acquire is only needed on success, to make sure
+ // code in internal_dispose() doesn't happen before the decrement.
+ this->internal_dispose(gpu);
+ }
+ }
+
+ /** Unref without freeing GPU data. Used only when we're abandoning the resource */
+ void unrefAndAbandon() const {
+ SkASSERT(fRefCnt > 0);
+ // A release here acts in place of all releases we "should" have been doing in ref().
+ if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) {
+ // Like unique(), the acquire is only needed on success, to make sure
+ // code in internal_dispose() doesn't happen before the decrement.
+ this->internal_dispose();
+ }
+ }
+
+#ifdef SK_DEBUG
+ void validate() const {
+ SkASSERT(fRefCnt > 0);
+ }
+#endif
+
+#ifdef SK_TRACE_VK_RESOURCES
+ /** Output a human-readable dump of this resource's information
+ */
+ virtual void dumpInfo() const = 0;
+#endif
+
+private:
+ /** Must be implemented by any subclasses.
+ * Deletes any Vk data associated with this resource
+ */
+ virtual void freeGPUData(const GrVkGpu* gpu) const = 0;
+
+ /** Must be overridden by subclasses that themselves store GrVkResources.
+ * Will unrefAndAbandon those resources without deleting the underlying Vk data
+ */
+ virtual void abandonSubResources() const {}
+
+ /**
+ * Called when the ref count goes to 0. Will free Vk resources.
+ */
+ void internal_dispose(const GrVkGpu* gpu) const {
+ this->freeGPUData(gpu);
+#ifdef SK_TRACE_VK_RESOURCES
+ fTrace.remove(this);
+#endif
+ SkASSERT(0 == fRefCnt);
+ fRefCnt = 1;
+ delete this;
+ }
+
+ /**
+ * Internal_dispose without freeing Vk resources. Used when we've lost context.
+ */
+ void internal_dispose() const {
+ this->abandonSubResources();
+#ifdef SK_TRACE_VK_RESOURCES
+ fTrace.remove(this);
+#endif
+ SkASSERT(0 == fRefCnt);
+ fRefCnt = 1;
+ delete this;
+ }
+
+ mutable int32_t fRefCnt;
+#ifdef SK_TRACE_VK_RESOURCES
+ uint32_t fKey;
+#endif
+
+ typedef SkNoncopyable INHERITED;
+};
+
+// This subclass allows for recycling
+class GrVkRecycledResource : public GrVkResource {
+public:
+ // When recycle is called and there is only one ref left on the resource, we will signal that
+ // the resource can be recycled for reuse. If the sublass (or whoever is managing this resource)
+ // decides not to recycle the objects, it is their responsibility to call unref on the object.
+ void recycle(GrVkGpu* gpu) const {
+ if (this->unique()) {
+ this->onRecycle(gpu);
+ } else {
+ this->unref(gpu);
+ }
+ }
+
+private:
+ virtual void onRecycle(GrVkGpu* gpu) const = 0;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkResourceProvider.cpp b/gfx/skia/skia/src/gpu/vk/GrVkResourceProvider.cpp
new file mode 100644
index 000000000..3027c2d34
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkResourceProvider.cpp
@@ -0,0 +1,481 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkResourceProvider.h"
+
+#include "GrTextureParams.h"
+#include "GrVkCommandBuffer.h"
+#include "GrVkCopyPipeline.h"
+#include "GrVkGLSLSampler.h"
+#include "GrVkPipeline.h"
+#include "GrVkRenderTarget.h"
+#include "GrVkSampler.h"
+#include "GrVkUniformBuffer.h"
+#include "GrVkUtil.h"
+
+#ifdef SK_TRACE_VK_RESOURCES
+GrVkResource::Trace GrVkResource::fTrace;
+uint32_t GrVkResource::fKeyCounter = 0;
+#endif
+
+GrVkResourceProvider::GrVkResourceProvider(GrVkGpu* gpu)
+ : fGpu(gpu)
+ , fPipelineCache(VK_NULL_HANDLE) {
+ fPipelineStateCache = new PipelineStateCache(gpu);
+}
+
+GrVkResourceProvider::~GrVkResourceProvider() {
+ SkASSERT(0 == fRenderPassArray.count());
+ SkASSERT(VK_NULL_HANDLE == fPipelineCache);
+ delete fPipelineStateCache;
+}
+
+void GrVkResourceProvider::init() {
+ VkPipelineCacheCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkPipelineCacheCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.initialDataSize = 0;
+ createInfo.pInitialData = nullptr;
+ VkResult result = GR_VK_CALL(fGpu->vkInterface(),
+ CreatePipelineCache(fGpu->device(), &createInfo, nullptr,
+ &fPipelineCache));
+ SkASSERT(VK_SUCCESS == result);
+ if (VK_SUCCESS != result) {
+ fPipelineCache = VK_NULL_HANDLE;
+ }
+
+ // Init uniform descriptor objects
+ fDescriptorSetManagers.emplace_back(fGpu, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
+ SkASSERT(1 == fDescriptorSetManagers.count());
+ fUniformDSHandle = GrVkDescriptorSetManager::Handle(0);
+}
+
+GrVkPipeline* GrVkResourceProvider::createPipeline(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ VkPipelineShaderStageCreateInfo* shaderStageInfo,
+ int shaderStageCount,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass,
+ VkPipelineLayout layout) {
+
+ return GrVkPipeline::Create(fGpu, pipeline, primProc, shaderStageInfo, shaderStageCount,
+ primitiveType, renderPass, layout, fPipelineCache);
+}
+
+GrVkCopyPipeline* GrVkResourceProvider::findOrCreateCopyPipeline(
+ const GrVkRenderTarget* dst,
+ VkPipelineShaderStageCreateInfo* shaderStageInfo,
+ VkPipelineLayout pipelineLayout) {
+ // Find or Create a compatible pipeline
+ GrVkCopyPipeline* pipeline = nullptr;
+ for (int i = 0; i < fCopyPipelines.count() && !pipeline; ++i) {
+ if (fCopyPipelines[i]->isCompatible(*dst->simpleRenderPass())) {
+ pipeline = fCopyPipelines[i];
+ }
+ }
+ if (!pipeline) {
+ pipeline = GrVkCopyPipeline::Create(fGpu, shaderStageInfo,
+ pipelineLayout,
+ dst->numColorSamples(),
+ *dst->simpleRenderPass(),
+ fPipelineCache);
+ fCopyPipelines.push_back(pipeline);
+ }
+ SkASSERT(pipeline);
+ pipeline->ref();
+ return pipeline;
+}
+
+// To create framebuffers, we first need to create a simple RenderPass that is
+// only used for framebuffer creation. When we actually render we will create
+// RenderPasses as needed that are compatible with the framebuffer.
+const GrVkRenderPass*
+GrVkResourceProvider::findCompatibleRenderPass(const GrVkRenderTarget& target,
+ CompatibleRPHandle* compatibleHandle) {
+ for (int i = 0; i < fRenderPassArray.count(); ++i) {
+ if (fRenderPassArray[i].isCompatible(target)) {
+ const GrVkRenderPass* renderPass = fRenderPassArray[i].getCompatibleRenderPass();
+ renderPass->ref();
+ if (compatibleHandle) {
+ *compatibleHandle = CompatibleRPHandle(i);
+ }
+ return renderPass;
+ }
+ }
+
+ const GrVkRenderPass* renderPass =
+ fRenderPassArray.emplace_back(fGpu, target).getCompatibleRenderPass();
+ renderPass->ref();
+
+ if (compatibleHandle) {
+ *compatibleHandle = CompatibleRPHandle(fRenderPassArray.count() - 1);
+ }
+ return renderPass;
+}
+
+const GrVkRenderPass*
+GrVkResourceProvider::findCompatibleRenderPass(const CompatibleRPHandle& compatibleHandle) {
+ SkASSERT(compatibleHandle.isValid() && compatibleHandle.toIndex() < fRenderPassArray.count());
+ int index = compatibleHandle.toIndex();
+ const GrVkRenderPass* renderPass = fRenderPassArray[index].getCompatibleRenderPass();
+ renderPass->ref();
+ return renderPass;
+}
+
+const GrVkRenderPass* GrVkResourceProvider::findRenderPass(
+ const GrVkRenderTarget& target,
+ const GrVkRenderPass::LoadStoreOps& colorOps,
+ const GrVkRenderPass::LoadStoreOps& stencilOps,
+ CompatibleRPHandle* compatibleHandle) {
+ GrVkResourceProvider::CompatibleRPHandle tempRPHandle;
+ GrVkResourceProvider::CompatibleRPHandle* pRPHandle = compatibleHandle ? compatibleHandle
+ : &tempRPHandle;
+ *pRPHandle = target.compatibleRenderPassHandle();
+
+ // This will get us the handle to (and possible create) the compatible set for the specific
+ // GrVkRenderPass we are looking for.
+ this->findCompatibleRenderPass(target, compatibleHandle);
+ return this->findRenderPass(*pRPHandle, colorOps, stencilOps);
+}
+
+const GrVkRenderPass*
+GrVkResourceProvider::findRenderPass(const CompatibleRPHandle& compatibleHandle,
+ const GrVkRenderPass::LoadStoreOps& colorOps,
+ const GrVkRenderPass::LoadStoreOps& stencilOps) {
+ SkASSERT(compatibleHandle.isValid() && compatibleHandle.toIndex() < fRenderPassArray.count());
+ CompatibleRenderPassSet& compatibleSet = fRenderPassArray[compatibleHandle.toIndex()];
+ const GrVkRenderPass* renderPass = compatibleSet.getRenderPass(fGpu,
+ colorOps,
+ stencilOps);
+ renderPass->ref();
+ return renderPass;
+}
+
+GrVkDescriptorPool* GrVkResourceProvider::findOrCreateCompatibleDescriptorPool(
+ VkDescriptorType type, uint32_t count) {
+ return new GrVkDescriptorPool(fGpu, type, count);
+}
+
+GrVkSampler* GrVkResourceProvider::findOrCreateCompatibleSampler(const GrTextureParams& params,
+ uint32_t mipLevels) {
+ GrVkSampler* sampler = fSamplers.find(GrVkSampler::GenerateKey(params, mipLevels));
+ if (!sampler) {
+ sampler = GrVkSampler::Create(fGpu, params, mipLevels);
+ fSamplers.add(sampler);
+ }
+ SkASSERT(sampler);
+ sampler->ref();
+ return sampler;
+}
+
+sk_sp<GrVkPipelineState> GrVkResourceProvider::findOrCreateCompatiblePipelineState(
+ const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& proc,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass) {
+ return fPipelineStateCache->refPipelineState(pipeline, proc, primitiveType, renderPass);
+}
+
+void GrVkResourceProvider::getSamplerDescriptorSetHandle(const GrVkUniformHandler& uniformHandler,
+ GrVkDescriptorSetManager::Handle* handle) {
+ SkASSERT(handle);
+ for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
+ if (fDescriptorSetManagers[i].isCompatible(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+ &uniformHandler)) {
+ *handle = GrVkDescriptorSetManager::Handle(i);
+ return;
+ }
+ }
+
+ fDescriptorSetManagers.emplace_back(fGpu, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+ &uniformHandler);
+ *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.count() - 1);
+}
+
+void GrVkResourceProvider::getSamplerDescriptorSetHandle(const SkTArray<uint32_t>& visibilities,
+ GrVkDescriptorSetManager::Handle* handle) {
+ SkASSERT(handle);
+ for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
+ if (fDescriptorSetManagers[i].isCompatible(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+ visibilities)) {
+ *handle = GrVkDescriptorSetManager::Handle(i);
+ return;
+ }
+ }
+
+ fDescriptorSetManagers.emplace_back(fGpu, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+ visibilities);
+ *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.count() - 1);
+}
+
+VkDescriptorSetLayout GrVkResourceProvider::getUniformDSLayout() const {
+ SkASSERT(fUniformDSHandle.isValid());
+ return fDescriptorSetManagers[fUniformDSHandle.toIndex()].layout();
+}
+
+VkDescriptorSetLayout GrVkResourceProvider::getSamplerDSLayout(
+ const GrVkDescriptorSetManager::Handle& handle) const {
+ SkASSERT(handle.isValid());
+ return fDescriptorSetManagers[handle.toIndex()].layout();
+}
+
+const GrVkDescriptorSet* GrVkResourceProvider::getUniformDescriptorSet() {
+ SkASSERT(fUniformDSHandle.isValid());
+ return fDescriptorSetManagers[fUniformDSHandle.toIndex()].getDescriptorSet(fGpu,
+ fUniformDSHandle);
+}
+
+const GrVkDescriptorSet* GrVkResourceProvider::getSamplerDescriptorSet(
+ const GrVkDescriptorSetManager::Handle& handle) {
+ SkASSERT(handle.isValid());
+ return fDescriptorSetManagers[handle.toIndex()].getDescriptorSet(fGpu, handle);
+}
+
+void GrVkResourceProvider::recycleDescriptorSet(const GrVkDescriptorSet* descSet,
+ const GrVkDescriptorSetManager::Handle& handle) {
+ SkASSERT(descSet);
+ SkASSERT(handle.isValid());
+ int managerIdx = handle.toIndex();
+ SkASSERT(managerIdx < fDescriptorSetManagers.count());
+ fDescriptorSetManagers[managerIdx].recycleDescriptorSet(descSet);
+}
+
+GrVkPrimaryCommandBuffer* GrVkResourceProvider::findOrCreatePrimaryCommandBuffer() {
+ GrVkPrimaryCommandBuffer* cmdBuffer = nullptr;
+ int count = fAvailableCommandBuffers.count();
+ if (count > 0) {
+ cmdBuffer = fAvailableCommandBuffers[count - 1];
+ SkASSERT(cmdBuffer->finished(fGpu));
+ fAvailableCommandBuffers.removeShuffle(count - 1);
+ } else {
+ cmdBuffer = GrVkPrimaryCommandBuffer::Create(fGpu, fGpu->cmdPool());
+ }
+ fActiveCommandBuffers.push_back(cmdBuffer);
+ cmdBuffer->ref();
+ return cmdBuffer;
+}
+
+void GrVkResourceProvider::checkCommandBuffers() {
+ for (int i = fActiveCommandBuffers.count()-1; i >= 0; --i) {
+ if (fActiveCommandBuffers[i]->finished(fGpu)) {
+ GrVkPrimaryCommandBuffer* cmdBuffer = fActiveCommandBuffers[i];
+ cmdBuffer->reset(fGpu);
+ fAvailableCommandBuffers.push_back(cmdBuffer);
+ fActiveCommandBuffers.removeShuffle(i);
+ }
+ }
+}
+
+GrVkSecondaryCommandBuffer* GrVkResourceProvider::findOrCreateSecondaryCommandBuffer() {
+ GrVkSecondaryCommandBuffer* cmdBuffer = nullptr;
+ int count = fAvailableSecondaryCommandBuffers.count();
+ if (count > 0) {
+ cmdBuffer = fAvailableSecondaryCommandBuffers[count-1];
+ fAvailableSecondaryCommandBuffers.removeShuffle(count - 1);
+ } else {
+ cmdBuffer = GrVkSecondaryCommandBuffer::Create(fGpu, fGpu->cmdPool());
+ }
+ return cmdBuffer;
+}
+
+void GrVkResourceProvider::recycleSecondaryCommandBuffer(GrVkSecondaryCommandBuffer* cb) {
+ cb->reset(fGpu);
+ fAvailableSecondaryCommandBuffers.push_back(cb);
+}
+
+const GrVkResource* GrVkResourceProvider::findOrCreateStandardUniformBufferResource() {
+ const GrVkResource* resource = nullptr;
+ int count = fAvailableUniformBufferResources.count();
+ if (count > 0) {
+ resource = fAvailableUniformBufferResources[count - 1];
+ fAvailableUniformBufferResources.removeShuffle(count - 1);
+ } else {
+ resource = GrVkUniformBuffer::CreateResource(fGpu, GrVkUniformBuffer::kStandardSize);
+ }
+ return resource;
+}
+
+void GrVkResourceProvider::recycleStandardUniformBufferResource(const GrVkResource* resource) {
+ fAvailableUniformBufferResources.push_back(resource);
+}
+
+void GrVkResourceProvider::destroyResources() {
+ // release our active command buffers
+ for (int i = 0; i < fActiveCommandBuffers.count(); ++i) {
+ SkASSERT(fActiveCommandBuffers[i]->finished(fGpu));
+ SkASSERT(fActiveCommandBuffers[i]->unique());
+ fActiveCommandBuffers[i]->reset(fGpu);
+ fActiveCommandBuffers[i]->unref(fGpu);
+ }
+ fActiveCommandBuffers.reset();
+ // release our available command buffers
+ for (int i = 0; i < fAvailableCommandBuffers.count(); ++i) {
+ SkASSERT(fAvailableCommandBuffers[i]->finished(fGpu));
+ SkASSERT(fAvailableCommandBuffers[i]->unique());
+ fAvailableCommandBuffers[i]->unref(fGpu);
+ }
+ fAvailableCommandBuffers.reset();
+
+ // release our available secondary command buffers
+ for (int i = 0; i < fAvailableSecondaryCommandBuffers.count(); ++i) {
+ SkASSERT(fAvailableSecondaryCommandBuffers[i]->unique());
+ fAvailableSecondaryCommandBuffers[i]->unref(fGpu);
+ }
+ fAvailableSecondaryCommandBuffers.reset();
+
+ // Release all copy pipelines
+ for (int i = 0; i < fCopyPipelines.count(); ++i) {
+ fCopyPipelines[i]->unref(fGpu);
+ }
+
+ // loop over all render pass sets to make sure we destroy all the internal VkRenderPasses
+ for (int i = 0; i < fRenderPassArray.count(); ++i) {
+ fRenderPassArray[i].releaseResources(fGpu);
+ }
+ fRenderPassArray.reset();
+
+ // Iterate through all store GrVkSamplers and unref them before resetting the hash.
+ SkTDynamicHash<GrVkSampler, uint16_t>::Iter iter(&fSamplers);
+ for (; !iter.done(); ++iter) {
+ (*iter).unref(fGpu);
+ }
+ fSamplers.reset();
+
+ fPipelineStateCache->release();
+
+ GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineCache(fGpu->device(), fPipelineCache, nullptr));
+ fPipelineCache = VK_NULL_HANDLE;
+
+ // We must release/destroy all command buffers and pipeline states before releasing the
+ // GrVkDescriptorSetManagers
+ for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
+ fDescriptorSetManagers[i].release(fGpu);
+ }
+ fDescriptorSetManagers.reset();
+
+ // release our uniform buffers
+ for (int i = 0; i < fAvailableUniformBufferResources.count(); ++i) {
+ SkASSERT(fAvailableUniformBufferResources[i]->unique());
+ fAvailableUniformBufferResources[i]->unref(fGpu);
+ }
+ fAvailableUniformBufferResources.reset();
+}
+
+void GrVkResourceProvider::abandonResources() {
+ // release our active command buffers
+ for (int i = 0; i < fActiveCommandBuffers.count(); ++i) {
+ SkASSERT(fActiveCommandBuffers[i]->finished(fGpu));
+ SkASSERT(fActiveCommandBuffers[i]->unique());
+ fActiveCommandBuffers[i]->unrefAndAbandon();
+ }
+ fActiveCommandBuffers.reset();
+ // release our available command buffers
+ for (int i = 0; i < fAvailableCommandBuffers.count(); ++i) {
+ SkASSERT(fAvailableCommandBuffers[i]->finished(fGpu));
+ SkASSERT(fAvailableCommandBuffers[i]->unique());
+ fAvailableCommandBuffers[i]->unrefAndAbandon();
+ }
+ fAvailableCommandBuffers.reset();
+
+ // release our available secondary command buffers
+ for (int i = 0; i < fAvailableSecondaryCommandBuffers.count(); ++i) {
+ SkASSERT(fAvailableSecondaryCommandBuffers[i]->unique());
+ fAvailableSecondaryCommandBuffers[i]->unrefAndAbandon();
+ }
+ fAvailableSecondaryCommandBuffers.reset();
+
+ // Abandon all copy pipelines
+ for (int i = 0; i < fCopyPipelines.count(); ++i) {
+ fCopyPipelines[i]->unrefAndAbandon();
+ }
+
+ // loop over all render pass sets to make sure we destroy all the internal VkRenderPasses
+ for (int i = 0; i < fRenderPassArray.count(); ++i) {
+ fRenderPassArray[i].abandonResources();
+ }
+ fRenderPassArray.reset();
+
+ // Iterate through all store GrVkSamplers and unrefAndAbandon them before resetting the hash.
+ SkTDynamicHash<GrVkSampler, uint16_t>::Iter iter(&fSamplers);
+ for (; !iter.done(); ++iter) {
+ (*iter).unrefAndAbandon();
+ }
+ fSamplers.reset();
+
+ fPipelineStateCache->abandon();
+
+ fPipelineCache = VK_NULL_HANDLE;
+
+ // We must abandon all command buffers and pipeline states before abandoning the
+ // GrVkDescriptorSetManagers
+ for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
+ fDescriptorSetManagers[i].abandon();
+ }
+ fDescriptorSetManagers.reset();
+
+ // release our uniform buffers
+ for (int i = 0; i < fAvailableUniformBufferResources.count(); ++i) {
+ SkASSERT(fAvailableUniformBufferResources[i]->unique());
+ fAvailableUniformBufferResources[i]->unrefAndAbandon();
+ }
+ fAvailableUniformBufferResources.reset();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrVkResourceProvider::CompatibleRenderPassSet::CompatibleRenderPassSet(
+ const GrVkGpu* gpu,
+ const GrVkRenderTarget& target)
+ : fLastReturnedIndex(0) {
+ fRenderPasses.emplace_back(new GrVkRenderPass());
+ fRenderPasses[0]->initSimple(gpu, target);
+}
+
+bool GrVkResourceProvider::CompatibleRenderPassSet::isCompatible(
+ const GrVkRenderTarget& target) const {
+ // The first GrVkRenderpass should always exists since we create the basic load store
+ // render pass on create
+ SkASSERT(fRenderPasses[0]);
+ return fRenderPasses[0]->isCompatible(target);
+}
+
+GrVkRenderPass* GrVkResourceProvider::CompatibleRenderPassSet::getRenderPass(
+ const GrVkGpu* gpu,
+ const GrVkRenderPass::LoadStoreOps& colorOps,
+ const GrVkRenderPass::LoadStoreOps& stencilOps) {
+ for (int i = 0; i < fRenderPasses.count(); ++i) {
+ int idx = (i + fLastReturnedIndex) % fRenderPasses.count();
+ if (fRenderPasses[idx]->equalLoadStoreOps(colorOps, stencilOps)) {
+ fLastReturnedIndex = idx;
+ return fRenderPasses[idx];
+ }
+ }
+ GrVkRenderPass* renderPass = fRenderPasses.emplace_back(new GrVkRenderPass());
+ renderPass->init(gpu, *this->getCompatibleRenderPass(), colorOps, stencilOps);
+ fLastReturnedIndex = fRenderPasses.count() - 1;
+ return renderPass;
+}
+
+void GrVkResourceProvider::CompatibleRenderPassSet::releaseResources(const GrVkGpu* gpu) {
+ for (int i = 0; i < fRenderPasses.count(); ++i) {
+ if (fRenderPasses[i]) {
+ fRenderPasses[i]->unref(gpu);
+ fRenderPasses[i] = nullptr;
+ }
+ }
+}
+
+void GrVkResourceProvider::CompatibleRenderPassSet::abandonResources() {
+ for (int i = 0; i < fRenderPasses.count(); ++i) {
+ if (fRenderPasses[i]) {
+ fRenderPasses[i]->unrefAndAbandon();
+ fRenderPasses[i] = nullptr;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkResourceProvider.h b/gfx/skia/skia/src/gpu/vk/GrVkResourceProvider.h
new file mode 100644
index 000000000..8200123e2
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkResourceProvider.h
@@ -0,0 +1,258 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkResourceProvider_DEFINED
+#define GrVkResourceProvider_DEFINED
+
+#include "GrGpu.h"
+#include "GrResourceHandle.h"
+#include "GrVkDescriptorPool.h"
+#include "GrVkDescriptorSetManager.h"
+#include "GrVkPipelineState.h"
+#include "GrVkRenderPass.h"
+#include "GrVkResource.h"
+#include "GrVkUtil.h"
+#include "SkTArray.h"
+#include "SkTDynamicHash.h"
+#include "SkTHash.h"
+#include "SkTInternalLList.h"
+
+#include "vk/GrVkDefines.h"
+
+class GrPipeline;
+class GrPrimitiveProcessor;
+class GrTextureParams;
+class GrVkCopyPipeline;
+class GrVkGpu;
+class GrVkPipeline;
+class GrVkPrimaryCommandBuffer;
+class GrVkRenderTarget;
+class GrVkSampler;
+class GrVkSecondaryCommandBuffer;
+class GrVkUniformHandler;
+
+class GrVkResourceProvider {
+public:
+ GrVkResourceProvider(GrVkGpu* gpu);
+ ~GrVkResourceProvider();
+
+ // Set up any initial vk objects
+ void init();
+
+ GrVkPipeline* createPipeline(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ VkPipelineShaderStageCreateInfo* shaderStageInfo,
+ int shaderStageCount,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass,
+ VkPipelineLayout layout);
+
+ GrVkCopyPipeline* findOrCreateCopyPipeline(const GrVkRenderTarget* dst,
+ VkPipelineShaderStageCreateInfo*,
+ VkPipelineLayout);
+
+ GR_DEFINE_RESOURCE_HANDLE_CLASS(CompatibleRPHandle);
+
+ // Finds or creates a simple render pass that matches the target, increments the refcount,
+ // and returns. The caller can optionally pass in a pointer to a CompatibleRPHandle. If this is
+ // non null it will be set to a handle that can be used in the furutre to quickly return a
+ // compatible GrVkRenderPasses without the need inspecting a GrVkRenderTarget.
+ const GrVkRenderPass* findCompatibleRenderPass(const GrVkRenderTarget& target,
+ CompatibleRPHandle* compatibleHandle = nullptr);
+ // The CompatibleRPHandle must be a valid handle previously set by a call to
+ // findCompatibleRenderPass(GrVkRenderTarget&, CompatibleRPHandle*).
+ const GrVkRenderPass* findCompatibleRenderPass(const CompatibleRPHandle& compatibleHandle);
+
+ // Finds or creates a render pass that matches the target and LoadStoreOps, increments the
+ // refcount, and returns. The caller can optionally pass in a pointer to a CompatibleRPHandle.
+ // If this is non null it will be set to a handle that can be used in the furutre to quickly
+ // return a GrVkRenderPasses without the need inspecting a GrVkRenderTarget.
+ const GrVkRenderPass* findRenderPass(const GrVkRenderTarget& target,
+ const GrVkRenderPass::LoadStoreOps& colorOps,
+ const GrVkRenderPass::LoadStoreOps& stencilOps,
+ CompatibleRPHandle* compatibleHandle = nullptr);
+
+ // The CompatibleRPHandle must be a valid handle previously set by a call to findRenderPass or
+ // findCompatibleRenderPass.
+ const GrVkRenderPass* findRenderPass(const CompatibleRPHandle& compatibleHandle,
+ const GrVkRenderPass::LoadStoreOps& colorOps,
+ const GrVkRenderPass::LoadStoreOps& stencilOps);
+
+ GrVkPrimaryCommandBuffer* findOrCreatePrimaryCommandBuffer();
+ void checkCommandBuffers();
+
+ GrVkSecondaryCommandBuffer* findOrCreateSecondaryCommandBuffer();
+ void recycleSecondaryCommandBuffer(GrVkSecondaryCommandBuffer* cb);
+
+ // Finds or creates a compatible GrVkDescriptorPool for the requested type and count.
+ // The refcount is incremented and a pointer returned.
+ // TODO: Currently this will just create a descriptor pool without holding onto a ref itself
+ // so we currently do not reuse them. Rquires knowing if another draw is currently using
+ // the GrVkDescriptorPool, the ability to reset pools, and the ability to purge pools out
+ // of our cache of GrVkDescriptorPools.
+ GrVkDescriptorPool* findOrCreateCompatibleDescriptorPool(VkDescriptorType type, uint32_t count);
+
+ // Finds or creates a compatible GrVkSampler based on the GrTextureParams.
+ // The refcount is incremented and a pointer returned.
+ GrVkSampler* findOrCreateCompatibleSampler(const GrTextureParams&, uint32_t mipLevels);
+
+ sk_sp<GrVkPipelineState> findOrCreateCompatiblePipelineState(const GrPipeline&,
+ const GrPrimitiveProcessor&,
+ GrPrimitiveType,
+ const GrVkRenderPass& renderPass);
+
+ void getSamplerDescriptorSetHandle(const GrVkUniformHandler&,
+ GrVkDescriptorSetManager::Handle* handle);
+ void getSamplerDescriptorSetHandle(const SkTArray<uint32_t>& visibilities,
+ GrVkDescriptorSetManager::Handle* handle);
+
+ // Returns the compatible VkDescriptorSetLayout to use for uniform buffers. The caller does not
+ // own the VkDescriptorSetLayout and thus should not delete it. This function should be used
+ // when the caller needs the layout to create a VkPipelineLayout.
+ VkDescriptorSetLayout getUniformDSLayout() const;
+
+ // Returns the compatible VkDescriptorSetLayout to use for a specific sampler handle. The caller
+ // does not own the VkDescriptorSetLayout and thus should not delete it. This function should be
+ // used when the caller needs the layout to create a VkPipelineLayout.
+ VkDescriptorSetLayout getSamplerDSLayout(const GrVkDescriptorSetManager::Handle&) const;
+
+ // Returns a GrVkDescriptorSet that can be used for uniform buffers. The GrVkDescriptorSet
+ // is already reffed for the caller.
+ const GrVkDescriptorSet* getUniformDescriptorSet();
+
+ // Returns a GrVkDescriptorSet that can be used for sampler descriptors that are compatible with
+ // the GrVkDescriptorSetManager::Handle passed in. The GrVkDescriptorSet is already reffed for
+ // the caller.
+ const GrVkDescriptorSet* getSamplerDescriptorSet(const GrVkDescriptorSetManager::Handle&);
+
+
+ // Signals that the descriptor set passed it, which is compatible with the passed in handle,
+ // can be reused by the next allocation request.
+ void recycleDescriptorSet(const GrVkDescriptorSet* descSet,
+ const GrVkDescriptorSetManager::Handle&);
+
+ // Creates or finds free uniform buffer resources of size GrVkUniformBuffer::kStandardSize.
+ // Anything larger will need to be created and released by the client.
+ const GrVkResource* findOrCreateStandardUniformBufferResource();
+
+ // Signals that the resource passed to it (which should be a uniform buffer resource)
+ // can be reused by the next uniform buffer resource request.
+ void recycleStandardUniformBufferResource(const GrVkResource*);
+
+ // Destroy any cached resources. To be called before destroying the VkDevice.
+ // The assumption is that all queues are idle and all command buffers are finished.
+ // For resource tracing to work properly, this should be called after unrefing all other
+ // resource usages.
+ void destroyResources();
+
+ // Abandon any cached resources. To be used when the context/VkDevice is lost.
+ // For resource tracing to work properly, this should be called after unrefing all other
+ // resource usages.
+ void abandonResources();
+
+private:
+#ifdef SK_DEBUG
+#define GR_PIPELINE_STATE_CACHE_STATS
+#endif
+
+ class PipelineStateCache : public ::SkNoncopyable {
+ public:
+ PipelineStateCache(GrVkGpu* gpu);
+ ~PipelineStateCache();
+
+ void abandon();
+ void release();
+ sk_sp<GrVkPipelineState> refPipelineState(const GrPipeline&,
+ const GrPrimitiveProcessor&,
+ GrPrimitiveType,
+ const GrVkRenderPass& renderPass);
+
+ private:
+ enum {
+ // We may actually have kMaxEntries+1 PipelineStates in context because we create a new
+ // PipelineState before evicting from the cache.
+ kMaxEntries = 128,
+ };
+
+ struct Entry;
+
+ void reset();
+
+ int fCount;
+ SkTHashTable<Entry*, const GrVkPipelineState::Desc&, Entry> fHashTable;
+ SkTInternalLList<Entry> fLRUList;
+
+ GrVkGpu* fGpu;
+
+#ifdef GR_PIPELINE_STATE_CACHE_STATS
+ int fTotalRequests;
+ int fCacheMisses;
+#endif
+ };
+
+ class CompatibleRenderPassSet {
+ public:
+ // This will always construct the basic load store render pass (all attachments load and
+ // store their data) so that there is at least one compatible VkRenderPass that can be used
+ // with this set.
+ CompatibleRenderPassSet(const GrVkGpu* gpu, const GrVkRenderTarget& target);
+
+ bool isCompatible(const GrVkRenderTarget& target) const;
+
+ GrVkRenderPass* getCompatibleRenderPass() const {
+ // The first GrVkRenderpass should always exist since we create the basic load store
+ // render pass on create
+ SkASSERT(fRenderPasses[0]);
+ return fRenderPasses[0];
+ }
+
+ GrVkRenderPass* getRenderPass(const GrVkGpu* gpu,
+ const GrVkRenderPass::LoadStoreOps& colorOps,
+ const GrVkRenderPass::LoadStoreOps& stencilOps);
+
+ void releaseResources(const GrVkGpu* gpu);
+ void abandonResources();
+
+ private:
+ SkSTArray<4, GrVkRenderPass*> fRenderPasses;
+ int fLastReturnedIndex;
+ };
+
+ GrVkGpu* fGpu;
+
+ // Central cache for creating pipelines
+ VkPipelineCache fPipelineCache;
+
+ // Cache of previously created copy pipelines
+ SkTArray<GrVkCopyPipeline*> fCopyPipelines;
+
+ SkSTArray<4, CompatibleRenderPassSet> fRenderPassArray;
+
+ // Array of PrimaryCommandBuffers that are currently in flight
+ SkSTArray<4, GrVkPrimaryCommandBuffer*, true> fActiveCommandBuffers;
+ // Array of available primary command buffers that are not in flight
+ SkSTArray<4, GrVkPrimaryCommandBuffer*, true> fAvailableCommandBuffers;
+
+ // Array of available secondary command buffers
+ SkSTArray<16, GrVkSecondaryCommandBuffer*, true> fAvailableSecondaryCommandBuffers;
+
+ // Array of available uniform buffer resources
+ SkSTArray<16, const GrVkResource*, true> fAvailableUniformBufferResources;
+
+ // Stores GrVkSampler objects that we've already created so we can reuse them across multiple
+ // GrVkPipelineStates
+ SkTDynamicHash<GrVkSampler, uint16_t> fSamplers;
+
+ // Cache of GrVkPipelineStates
+ PipelineStateCache* fPipelineStateCache;
+
+ SkSTArray<4, GrVkDescriptorSetManager, true> fDescriptorSetManagers;
+
+ GrVkDescriptorSetManager::Handle fUniformDSHandle;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkSampler.cpp b/gfx/skia/skia/src/gpu/vk/GrVkSampler.cpp
new file mode 100644
index 000000000..1d4e7066a
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkSampler.cpp
@@ -0,0 +1,97 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkSampler.h"
+
+#include "GrTextureAccess.h"
+#include "GrVkGpu.h"
+
+static inline VkSamplerAddressMode tile_to_vk_sampler_address(SkShader::TileMode tm) {
+ static const VkSamplerAddressMode gWrapModes[] = {
+ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
+ VK_SAMPLER_ADDRESS_MODE_REPEAT,
+ VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT
+ };
+ GR_STATIC_ASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gWrapModes));
+ GR_STATIC_ASSERT(0 == SkShader::kClamp_TileMode);
+ GR_STATIC_ASSERT(1 == SkShader::kRepeat_TileMode);
+ GR_STATIC_ASSERT(2 == SkShader::kMirror_TileMode);
+ return gWrapModes[tm];
+}
+
+GrVkSampler* GrVkSampler::Create(const GrVkGpu* gpu, const GrTextureParams& params,
+ uint32_t mipLevels) {
+ static VkFilter vkMinFilterModes[] = {
+ VK_FILTER_NEAREST,
+ VK_FILTER_LINEAR,
+ VK_FILTER_LINEAR
+ };
+ static VkFilter vkMagFilterModes[] = {
+ VK_FILTER_NEAREST,
+ VK_FILTER_LINEAR,
+ VK_FILTER_LINEAR
+ };
+
+ VkSamplerCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkSamplerCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
+ createInfo.pNext = 0;
+ createInfo.flags = 0;
+ createInfo.magFilter = vkMagFilterModes[params.filterMode()];
+ createInfo.minFilter = vkMinFilterModes[params.filterMode()];
+ createInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
+ createInfo.addressModeU = tile_to_vk_sampler_address(params.getTileModeX());
+ createInfo.addressModeV = tile_to_vk_sampler_address(params.getTileModeY());
+ createInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; // Shouldn't matter
+ createInfo.mipLodBias = 0.0f;
+ createInfo.anisotropyEnable = VK_FALSE;
+ createInfo.maxAnisotropy = 1.0f;
+ createInfo.compareEnable = VK_FALSE;
+ createInfo.compareOp = VK_COMPARE_OP_NEVER;
+ // Vulkan doesn't have a direct mapping of GL's nearest or linear filters for minFilter since
+ // there is always a mipmapMode. To get the same effect as GL we can set minLod = maxLod = 0.0.
+ // This works since our min and mag filters are the same (this forces us to use mag on the 0
+ // level mip). If the filters weren't the same we could set min = 0 and max = 0.25 to force
+ // the minFilter on mip level 0.
+ createInfo.minLod = 0.0f;
+ bool useMipMaps = GrTextureParams::kMipMap_FilterMode == params.filterMode() && mipLevels > 1;
+ createInfo.maxLod = !useMipMaps ? 0.0f : (float)(mipLevels);
+ createInfo.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
+ createInfo.unnormalizedCoordinates = VK_FALSE;
+
+ VkSampler sampler;
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateSampler(gpu->device(),
+ &createInfo,
+ nullptr,
+ &sampler));
+
+ return new GrVkSampler(sampler, GenerateKey(params, mipLevels));
+}
+
+void GrVkSampler::freeGPUData(const GrVkGpu* gpu) const {
+ SkASSERT(fSampler);
+ GR_VK_CALL(gpu->vkInterface(), DestroySampler(gpu->device(), fSampler, nullptr));
+}
+
+uint16_t GrVkSampler::GenerateKey(const GrTextureParams& params, uint32_t mipLevels) {
+ const int kTileModeXShift = 2;
+ const int kTileModeYShift = 4;
+ const int kMipLevelShift = 6;
+
+ uint16_t key = params.filterMode();
+
+ SkASSERT(params.filterMode() <= 3);
+ key |= (params.getTileModeX() << kTileModeXShift);
+
+ GR_STATIC_ASSERT(SkShader::kTileModeCount <= 4);
+ key |= (params.getTileModeY() << kTileModeYShift);
+
+ SkASSERT(mipLevels < 1024);
+ key |= (mipLevels << kMipLevelShift);
+
+ return key;
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkSampler.h b/gfx/skia/skia/src/gpu/vk/GrVkSampler.h
new file mode 100644
index 000000000..c0f60e421
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkSampler.h
@@ -0,0 +1,49 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkSampler_DEFINED
+#define GrVkSampler_DEFINED
+
+#include "GrVkResource.h"
+
+#include "vk/GrVkDefines.h"
+
+class GrTextureAccess;
+class GrTextureParams;
+class GrVkGpu;
+
+
+class GrVkSampler : public GrVkResource {
+public:
+ static GrVkSampler* Create(const GrVkGpu* gpu, const GrTextureParams&, uint32_t mipLevels);
+
+ VkSampler sampler() const { return fSampler; }
+
+ // Helpers for hashing GrVkSampler
+ static uint16_t GenerateKey(const GrTextureParams&, uint32_t mipLevels);
+
+ static const uint16_t& GetKey(const GrVkSampler& sampler) { return sampler.fKey; }
+ static uint32_t Hash(const uint16_t& key) { return key; }
+
+#ifdef SK_TRACE_VK_RESOURCES
+ void dumpInfo() const override {
+ SkDebugf("GrVkSampler: %d (%d refs)\n", fSampler, this->getRefCnt());
+ }
+#endif
+
+private:
+ GrVkSampler(VkSampler sampler, uint16_t key) : INHERITED(), fSampler(sampler), fKey(key) {}
+
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ VkSampler fSampler;
+ uint16_t fKey;
+
+ typedef GrVkResource INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkStencilAttachment.cpp b/gfx/skia/skia/src/gpu/vk/GrVkStencilAttachment.cpp
new file mode 100644
index 000000000..8e5940df9
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkStencilAttachment.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkStencilAttachment.h"
+#include "GrVkGpu.h"
+#include "GrVkImage.h"
+#include "GrVkImageView.h"
+#include "GrVkUtil.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+GrVkStencilAttachment::GrVkStencilAttachment(GrVkGpu* gpu,
+ const Format& format,
+ const GrVkImage::ImageDesc& desc,
+ const GrVkImageInfo& info,
+ const GrVkImageView* stencilView)
+ : GrStencilAttachment(gpu, desc.fWidth, desc.fHeight, format.fStencilBits, desc.fSamples)
+ , GrVkImage(info, GrVkImage::kNot_Wrapped)
+ , fFormat(format)
+ , fStencilView(stencilView) {
+ this->registerWithCache(SkBudgeted::kYes);
+ stencilView->ref();
+}
+
+GrVkStencilAttachment* GrVkStencilAttachment::Create(GrVkGpu* gpu,
+ int width,
+ int height,
+ int sampleCnt,
+ const Format& format) {
+ GrVkImage::ImageDesc imageDesc;
+ imageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ imageDesc.fFormat = format.fInternalFormat;
+ imageDesc.fWidth = width;
+ imageDesc.fHeight = height;
+ imageDesc.fLevels = 1;
+ imageDesc.fSamples = sampleCnt;
+ imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ imageDesc.fUsageFlags = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ GrVkImageInfo info;
+ if (!GrVkImage::InitImageInfo(gpu, imageDesc, &info)) {
+ return nullptr;
+ }
+
+ const GrVkImageView* imageView = GrVkImageView::Create(gpu, info.fImage,
+ format.fInternalFormat,
+ GrVkImageView::kStencil_Type, 1);
+ if (!imageView) {
+ GrVkImage::DestroyImageInfo(gpu, &info);
+ return nullptr;
+ }
+
+ GrVkStencilAttachment* stencil = new GrVkStencilAttachment(gpu, format, imageDesc,
+ info, imageView);
+ imageView->unref(gpu);
+
+ return stencil;
+}
+
+GrVkStencilAttachment::~GrVkStencilAttachment() {
+ // should have been released or abandoned first
+ SkASSERT(!fStencilView);
+}
+
+size_t GrVkStencilAttachment::onGpuMemorySize() const {
+ uint64_t size = this->width();
+ size *= this->height();
+ size *= fFormat.fTotalBits;
+ size *= SkTMax(1,this->numSamples());
+ return static_cast<size_t>(size / 8);
+}
+
+void GrVkStencilAttachment::onRelease() {
+ GrVkGpu* gpu = this->getVkGpu();
+
+ this->releaseImage(gpu);
+
+ fStencilView->unref(gpu);
+ fStencilView = nullptr;
+ GrStencilAttachment::onRelease();
+}
+
+void GrVkStencilAttachment::onAbandon() {
+ this->abandonImage();
+ fStencilView->unrefAndAbandon();
+ fStencilView = nullptr;
+ GrStencilAttachment::onAbandon();
+}
+
+GrVkGpu* GrVkStencilAttachment::getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrVkGpu*>(this->getGpu());
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkStencilAttachment.h b/gfx/skia/skia/src/gpu/vk/GrVkStencilAttachment.h
new file mode 100644
index 000000000..f6bf19af4
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkStencilAttachment.h
@@ -0,0 +1,57 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkStencil_DEFINED
+#define GrVkStencil_DEFINED
+
+#include "GrStencilAttachment.h"
+#include "GrVkImage.h"
+#include "vk/GrVkDefines.h"
+
+class GrVkImageView;
+class GrVkGpu;
+
+class GrVkStencilAttachment : public GrStencilAttachment, public GrVkImage {
+public:
+ struct Format {
+ VkFormat fInternalFormat;
+ int fStencilBits;
+ int fTotalBits;
+ bool fPacked;
+ };
+
+ static GrVkStencilAttachment* Create(GrVkGpu* gpu, int width, int height,
+ int sampleCnt, const Format& format);
+
+ ~GrVkStencilAttachment() override;
+
+ const GrVkResource* imageResource() const { return this->resource(); }
+ const GrVkImageView* stencilView() const { return fStencilView; }
+
+ VkFormat vkFormat() const { return fFormat.fInternalFormat; }
+
+protected:
+ void onRelease() override;
+ void onAbandon() override;
+
+private:
+ size_t onGpuMemorySize() const override;
+
+ GrVkStencilAttachment(GrVkGpu* gpu,
+ const Format& format,
+ const GrVkImage::ImageDesc&,
+ const GrVkImageInfo&,
+ const GrVkImageView* stencilView);
+
+ GrVkGpu* getVkGpu() const;
+
+ Format fFormat;
+
+ const GrVkImageView* fStencilView;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkTexture.cpp b/gfx/skia/skia/src/gpu/vk/GrVkTexture.cpp
new file mode 100644
index 000000000..bf399a835
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkTexture.cpp
@@ -0,0 +1,233 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkTexture.h"
+#include "GrVkGpu.h"
+#include "GrVkImageView.h"
+#include "GrTexturePriv.h"
+#include "GrVkTextureRenderTarget.h"
+#include "GrVkUtil.h"
+
+#include "vk/GrVkTypes.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+// Because this class is virtually derived from GrSurface we must explicitly call its constructor.
+GrVkTexture::GrVkTexture(GrVkGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ const GrVkImageView* view)
+ : GrSurface(gpu, desc)
+ , GrVkImage(info, GrVkImage::kNot_Wrapped)
+ , INHERITED(gpu, desc, kTexture2DSampler_GrSLType, desc.fIsMipMapped)
+ , fTextureView(view)
+ , fLinearTextureView(nullptr) {
+ this->registerWithCache(budgeted);
+}
+
+GrVkTexture::GrVkTexture(GrVkGpu* gpu,
+ Wrapped,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ const GrVkImageView* view,
+ GrVkImage::Wrapped wrapped)
+ : GrSurface(gpu, desc)
+ , GrVkImage(info, wrapped)
+ , INHERITED(gpu, desc, kTexture2DSampler_GrSLType, desc.fIsMipMapped)
+ , fTextureView(view)
+ , fLinearTextureView(nullptr) {
+ this->registerWithCacheWrapped();
+}
+
+// Because this class is virtually derived from GrSurface we must explicitly call its constructor.
+GrVkTexture::GrVkTexture(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ const GrVkImageView* view,
+ GrVkImage::Wrapped wrapped)
+ : GrSurface(gpu, desc)
+ , GrVkImage(info, wrapped)
+ , INHERITED(gpu, desc, kTexture2DSampler_GrSLType, desc.fIsMipMapped)
+ , fTextureView(view)
+ , fLinearTextureView(nullptr) {
+}
+
+GrVkTexture* GrVkTexture::CreateNewTexture(GrVkGpu* gpu, SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ const GrVkImage::ImageDesc& imageDesc) {
+ SkASSERT(imageDesc.fUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT);
+
+ GrVkImageInfo info;
+ if (!GrVkImage::InitImageInfo(gpu, imageDesc, &info)) {
+ return nullptr;
+ }
+
+ const GrVkImageView* imageView = GrVkImageView::Create(gpu, info.fImage, info.fFormat,
+ GrVkImageView::kColor_Type,
+ info.fLevelCount);
+ if (!imageView) {
+ GrVkImage::DestroyImageInfo(gpu, &info);
+ return nullptr;
+ }
+
+ return new GrVkTexture(gpu, budgeted, desc, info, imageView);
+}
+
+GrVkTexture* GrVkTexture::CreateWrappedTexture(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrWrapOwnership ownership,
+ const GrVkImageInfo* info) {
+ SkASSERT(info);
+ // Wrapped textures require both image and allocation (because they can be mapped)
+ SkASSERT(VK_NULL_HANDLE != info->fImage && VK_NULL_HANDLE != info->fAlloc.fMemory);
+
+ const GrVkImageView* imageView = GrVkImageView::Create(gpu, info->fImage, info->fFormat,
+ GrVkImageView::kColor_Type,
+ info->fLevelCount);
+ if (!imageView) {
+ return nullptr;
+ }
+
+ GrVkImage::Wrapped wrapped = kBorrow_GrWrapOwnership == ownership ? GrVkImage::kBorrowed_Wrapped
+ : GrVkImage::kAdopted_Wrapped;
+
+ return new GrVkTexture(gpu, kWrapped, desc, *info, imageView, wrapped);
+}
+
+GrVkTexture::~GrVkTexture() {
+ // either release or abandon should have been called by the owner of this object.
+ SkASSERT(!fTextureView);
+ SkASSERT(!fLinearTextureView);
+}
+
+void GrVkTexture::onRelease() {
+ // we create this and don't hand it off, so we should always destroy it
+ if (fTextureView) {
+ fTextureView->unref(this->getVkGpu());
+ fTextureView = nullptr;
+ }
+
+ if (fLinearTextureView) {
+ fLinearTextureView->unref(this->getVkGpu());
+ fLinearTextureView = nullptr;
+ }
+
+ this->releaseImage(this->getVkGpu());
+
+ INHERITED::onRelease();
+}
+
+void GrVkTexture::onAbandon() {
+ if (fTextureView) {
+ fTextureView->unrefAndAbandon();
+ fTextureView = nullptr;
+ }
+
+ if (fLinearTextureView) {
+ fLinearTextureView->unrefAndAbandon();
+ fLinearTextureView = nullptr;
+ }
+
+ this->abandonImage();
+ INHERITED::onAbandon();
+}
+
+GrBackendObject GrVkTexture::getTextureHandle() const {
+ return (GrBackendObject)&fInfo;
+}
+
+GrVkGpu* GrVkTexture::getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrVkGpu*>(this->getGpu());
+}
+
+const GrVkImageView* GrVkTexture::textureView(bool allowSRGB) {
+ VkFormat linearFormat;
+ if (allowSRGB || !GrVkFormatIsSRGB(fInfo.fFormat, &linearFormat)) {
+ return fTextureView;
+ }
+
+ if (!fLinearTextureView) {
+ fLinearTextureView = GrVkImageView::Create(this->getVkGpu(), fInfo.fImage,
+ linearFormat, GrVkImageView::kColor_Type,
+ fInfo.fLevelCount);
+ SkASSERT(fLinearTextureView);
+ }
+
+ return fLinearTextureView;
+}
+
+bool GrVkTexture::reallocForMipmap(GrVkGpu* gpu, uint32_t mipLevels) {
+ if (mipLevels == 1) {
+ // don't need to do anything for a 1x1 texture
+ return false;
+ }
+
+ const GrVkResource* oldResource = this->resource();
+
+ // We shouldn't realloc something that doesn't belong to us
+ if (fIsBorrowed) {
+ return false;
+ }
+
+ bool renderTarget = SkToBool(fDesc.fFlags & kRenderTarget_GrSurfaceFlag);
+
+ VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
+ if (renderTarget) {
+ usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ }
+ usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+ GrVkImage::ImageDesc imageDesc;
+ imageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ imageDesc.fFormat = fInfo.fFormat;
+ imageDesc.fWidth = fDesc.fWidth;
+ imageDesc.fHeight = fDesc.fHeight;
+ imageDesc.fLevels = mipLevels;
+ imageDesc.fSamples = 1;
+ imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ imageDesc.fUsageFlags = usageFlags;
+ imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ GrVkImageInfo info;
+ if (!GrVkImage::InitImageInfo(gpu, imageDesc, &info)) {
+ return false;
+ }
+
+ // have to create a new image view for new resource
+ const GrVkImageView* oldView = fTextureView;
+ VkImage image = info.fImage;
+ const GrVkImageView* textureView = GrVkImageView::Create(gpu, image, info.fFormat,
+ GrVkImageView::kColor_Type, mipLevels);
+ if (!textureView) {
+ GrVkImage::DestroyImageInfo(gpu, &info);
+ return false;
+ }
+
+ if (renderTarget) {
+ GrVkTextureRenderTarget* texRT = static_cast<GrVkTextureRenderTarget*>(this);
+ if (!texRT->updateForMipmap(gpu, info)) {
+ GrVkImage::DestroyImageInfo(gpu, &info);
+ return false;
+ }
+ }
+
+ oldResource->unref(gpu);
+ oldView->unref(gpu);
+ if (fLinearTextureView) {
+ fLinearTextureView->unref(gpu);
+ fLinearTextureView = nullptr;
+ }
+
+ this->setNewResource(info.fImage, info.fAlloc, info.fImageTiling);
+ fTextureView = textureView;
+ fInfo = info;
+ this->texturePriv().setMaxMipMapLevel(mipLevels);
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkTexture.h b/gfx/skia/skia/src/gpu/vk/GrVkTexture.h
new file mode 100644
index 000000000..06d147584
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkTexture.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkTexture_DEFINED
+#define GrVkTexture_DEFINED
+
+#include "GrGpu.h"
+#include "GrTexture.h"
+#include "GrVkImage.h"
+
+class GrVkGpu;
+class GrVkImageView;
+struct GrVkImageInfo;
+
+class GrVkTexture : public GrTexture, public virtual GrVkImage {
+public:
+ static GrVkTexture* CreateNewTexture(GrVkGpu*, SkBudgeted budgeted, const GrSurfaceDesc&,
+ const GrVkImage::ImageDesc&);
+
+ static GrVkTexture* CreateWrappedTexture(GrVkGpu*, const GrSurfaceDesc&,
+ GrWrapOwnership, const GrVkImageInfo*);
+
+ ~GrVkTexture() override;
+
+ GrBackendObject getTextureHandle() const override;
+
+ void textureParamsModified() override {}
+
+ const GrVkImageView* textureView(bool allowSRGB);
+
+ bool reallocForMipmap(GrVkGpu* gpu, uint32_t mipLevels);
+
+protected:
+ GrVkTexture(GrVkGpu*, const GrSurfaceDesc&, const GrVkImageInfo&, const GrVkImageView*,
+ GrVkImage::Wrapped wrapped);
+
+ GrVkGpu* getVkGpu() const;
+
+ void onAbandon() override;
+ void onRelease() override;
+
+private:
+ enum Wrapped { kWrapped };
+ GrVkTexture(GrVkGpu*, SkBudgeted, const GrSurfaceDesc&,
+ const GrVkImageInfo&, const GrVkImageView* imageView);
+ GrVkTexture(GrVkGpu*, Wrapped, const GrSurfaceDesc&,
+ const GrVkImageInfo&, const GrVkImageView* imageView, GrVkImage::Wrapped wrapped);
+
+ const GrVkImageView* fTextureView;
+ const GrVkImageView* fLinearTextureView;
+
+ typedef GrTexture INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkTextureRenderTarget.cpp b/gfx/skia/skia/src/gpu/vk/GrVkTextureRenderTarget.cpp
new file mode 100644
index 000000000..37b68af82
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkTextureRenderTarget.cpp
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkTextureRenderTarget.h"
+
+#include "GrRenderTargetPriv.h"
+#include "GrVkGpu.h"
+#include "GrVkImageView.h"
+#include "GrVkUtil.h"
+
+#include "SkMipMap.h"
+
+#include "vk/GrVkTypes.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+GrVkTextureRenderTarget* GrVkTextureRenderTarget::Create(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ SkBudgeted budgeted,
+ GrVkImage::Wrapped wrapped) {
+ VkImage image = info.fImage;
+ // Create the texture ImageView
+ const GrVkImageView* imageView = GrVkImageView::Create(gpu, image, info.fFormat,
+ GrVkImageView::kColor_Type,
+ info.fLevelCount);
+ if (!imageView) {
+ return nullptr;
+ }
+
+ VkFormat pixelFormat;
+ GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat);
+
+ VkImage colorImage;
+
+ // create msaa surface if necessary
+ GrVkImageInfo msInfo;
+ const GrVkImageView* resolveAttachmentView = nullptr;
+ if (desc.fSampleCnt) {
+ GrVkImage::ImageDesc msImageDesc;
+ msImageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ msImageDesc.fFormat = pixelFormat;
+ msImageDesc.fWidth = desc.fWidth;
+ msImageDesc.fHeight = desc.fHeight;
+ msImageDesc.fLevels = 1;
+ msImageDesc.fSamples = desc.fSampleCnt;
+ msImageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ msImageDesc.fUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT |
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+ msImageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ if (!GrVkImage::InitImageInfo(gpu, msImageDesc, &msInfo)) {
+ imageView->unref(gpu);
+ return nullptr;
+ }
+
+ // Set color attachment image
+ colorImage = msInfo.fImage;
+
+ // Create resolve attachment view.
+ resolveAttachmentView = GrVkImageView::Create(gpu, image, pixelFormat,
+ GrVkImageView::kColor_Type,
+ info.fLevelCount);
+ if (!resolveAttachmentView) {
+ GrVkImage::DestroyImageInfo(gpu, &msInfo);
+ imageView->unref(gpu);
+ return nullptr;
+ }
+ } else {
+ // Set color attachment image
+ colorImage = info.fImage;
+ }
+
+ const GrVkImageView* colorAttachmentView = GrVkImageView::Create(gpu, colorImage, pixelFormat,
+ GrVkImageView::kColor_Type, 1);
+ if (!colorAttachmentView) {
+ if (desc.fSampleCnt) {
+ resolveAttachmentView->unref(gpu);
+ GrVkImage::DestroyImageInfo(gpu, &msInfo);
+ }
+ imageView->unref(gpu);
+ return nullptr;
+ }
+
+ GrVkTextureRenderTarget* texRT;
+ if (desc.fSampleCnt) {
+ if (GrVkImage::kNot_Wrapped == wrapped) {
+ texRT = new GrVkTextureRenderTarget(gpu, budgeted, desc,
+ info, imageView, msInfo,
+ colorAttachmentView,
+ resolveAttachmentView);
+ } else {
+ texRT = new GrVkTextureRenderTarget(gpu, desc,
+ info, imageView, msInfo,
+ colorAttachmentView,
+ resolveAttachmentView, wrapped);
+ }
+ } else {
+ if (GrVkImage::kNot_Wrapped == wrapped) {
+ texRT = new GrVkTextureRenderTarget(gpu, budgeted, desc,
+ info, imageView,
+ colorAttachmentView);
+ } else {
+ texRT = new GrVkTextureRenderTarget(gpu, desc,
+ info, imageView,
+ colorAttachmentView, wrapped);
+ }
+ }
+ return texRT;
+}
+
+GrVkTextureRenderTarget*
+GrVkTextureRenderTarget::CreateNewTextureRenderTarget(GrVkGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ const GrVkImage::ImageDesc& imageDesc) {
+ SkASSERT(imageDesc.fUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
+ SkASSERT(imageDesc.fUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT);
+
+ GrVkImageInfo info;
+ if (!GrVkImage::InitImageInfo(gpu, imageDesc, &info)) {
+ return nullptr;
+ }
+
+ GrVkTextureRenderTarget* trt = Create(gpu, desc, info, budgeted, GrVkImage::kNot_Wrapped);
+ if (!trt) {
+ GrVkImage::DestroyImageInfo(gpu, &info);
+ }
+
+ return trt;
+}
+
+GrVkTextureRenderTarget*
+GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrWrapOwnership ownership,
+ const GrVkImageInfo* info) {
+ SkASSERT(info);
+ // Wrapped textures require both image and allocation (because they can be mapped)
+ SkASSERT(VK_NULL_HANDLE != info->fImage && VK_NULL_HANDLE != info->fAlloc.fMemory);
+
+ GrVkImage::Wrapped wrapped = kBorrow_GrWrapOwnership == ownership ? GrVkImage::kBorrowed_Wrapped
+ : GrVkImage::kAdopted_Wrapped;
+
+ GrVkTextureRenderTarget* trt = Create(gpu, desc, *info, SkBudgeted::kNo, wrapped);
+
+ return trt;
+}
+
+bool GrVkTextureRenderTarget::updateForMipmap(GrVkGpu* gpu, const GrVkImageInfo& newInfo) {
+ VkFormat pixelFormat;
+ GrPixelConfigToVkFormat(fDesc.fConfig, &pixelFormat);
+ if (fDesc.fSampleCnt) {
+ const GrVkImageView* resolveAttachmentView =
+ GrVkImageView::Create(gpu,
+ newInfo.fImage,
+ pixelFormat,
+ GrVkImageView::kColor_Type,
+ newInfo.fLevelCount);
+ if (!resolveAttachmentView) {
+ return false;
+ }
+ fResolveAttachmentView->unref(gpu);
+ fResolveAttachmentView = resolveAttachmentView;
+ } else {
+ const GrVkImageView* colorAttachmentView = GrVkImageView::Create(gpu,
+ newInfo.fImage,
+ pixelFormat,
+ GrVkImageView::kColor_Type,
+ 1);
+ if (!colorAttachmentView) {
+ return false;
+ }
+ fColorAttachmentView->unref(gpu);
+ fColorAttachmentView = colorAttachmentView;
+ }
+
+ this->createFramebuffer(gpu);
+ return true;
+}
+
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkTextureRenderTarget.h b/gfx/skia/skia/src/gpu/vk/GrVkTextureRenderTarget.h
new file mode 100644
index 000000000..225951106
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkTextureRenderTarget.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrVkTextureRenderTarget_DEFINED
+#define GrVkTextureRenderTarget_DEFINED
+
+#include "GrVkTexture.h"
+#include "GrVkRenderTarget.h"
+#include "GrVkGpu.h"
+
+#ifdef SK_BUILD_FOR_WIN
+// Windows gives bogus warnings about inheriting asTexture/asRenderTarget via dominance.
+#pragma warning(push)
+#pragma warning(disable: 4250)
+#endif
+
+class GrVkImageView;
+struct GrVkImageInfo;
+
+class GrVkTextureRenderTarget: public GrVkTexture, public GrVkRenderTarget {
+public:
+ static GrVkTextureRenderTarget* CreateNewTextureRenderTarget(GrVkGpu*, SkBudgeted,
+ const GrSurfaceDesc&,
+ const GrVkImage::ImageDesc&);
+
+ static GrVkTextureRenderTarget* CreateWrappedTextureRenderTarget(GrVkGpu*,
+ const GrSurfaceDesc&,
+ GrWrapOwnership,
+ const GrVkImageInfo*);
+
+ bool updateForMipmap(GrVkGpu* gpu, const GrVkImageInfo& newInfo);
+
+protected:
+ void onAbandon() override {
+ GrVkRenderTarget::onAbandon();
+ GrVkTexture::onAbandon();
+ }
+
+ void onRelease() override {
+ GrVkRenderTarget::onRelease();
+ GrVkTexture::onRelease();
+ }
+
+private:
+ GrVkTextureRenderTarget(GrVkGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ const GrVkImageView* texView,
+ const GrVkImageInfo& msaaInfo,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView)
+ : GrSurface(gpu, desc)
+ , GrVkImage(info, GrVkImage::kNot_Wrapped)
+ , GrVkTexture(gpu, desc, info, texView, GrVkImage::kNot_Wrapped)
+ , GrVkRenderTarget(gpu, desc, info, msaaInfo, colorAttachmentView,
+ resolveAttachmentView, GrVkImage::kNot_Wrapped) {
+ this->registerWithCache(budgeted);
+ }
+
+ GrVkTextureRenderTarget(GrVkGpu* gpu,
+ SkBudgeted budgeted,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ const GrVkImageView* texView,
+ const GrVkImageView* colorAttachmentView)
+ : GrSurface(gpu, desc)
+ , GrVkImage(info, GrVkImage::kNot_Wrapped)
+ , GrVkTexture(gpu, desc, info, texView, GrVkImage::kNot_Wrapped)
+ , GrVkRenderTarget(gpu, desc, info, colorAttachmentView, GrVkImage::kNot_Wrapped) {
+ this->registerWithCache(budgeted);
+ }
+ GrVkTextureRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ const GrVkImageView* texView,
+ const GrVkImageInfo& msaaInfo,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView,
+ GrVkImage::Wrapped wrapped)
+ : GrSurface(gpu, desc)
+ , GrVkImage(info, wrapped)
+ , GrVkTexture(gpu, desc, info, texView, wrapped)
+ , GrVkRenderTarget(gpu, desc, info, msaaInfo, colorAttachmentView,
+ resolveAttachmentView, wrapped) {
+ this->registerWithCacheWrapped();
+ }
+
+ GrVkTextureRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ const GrVkImageInfo& info,
+ const GrVkImageView* texView,
+ const GrVkImageView* colorAttachmentView,
+ GrVkImage::Wrapped wrapped)
+ : GrSurface(gpu, desc)
+ , GrVkImage(info, wrapped)
+ , GrVkTexture(gpu, desc, info, texView, wrapped)
+ , GrVkRenderTarget(gpu, desc, info, colorAttachmentView, wrapped) {
+ this->registerWithCacheWrapped();
+ }
+
+ static GrVkTextureRenderTarget* Create(GrVkGpu*,
+ const GrSurfaceDesc&,
+ const GrVkImageInfo&,
+ SkBudgeted budgeted,
+ GrVkImage::Wrapped wrapped);
+
+ // GrGLRenderTarget accounts for the texture's memory and any MSAA renderbuffer's memory.
+ size_t onGpuMemorySize() const override {
+ return GrVkRenderTarget::onGpuMemorySize();
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkTransferBuffer.cpp b/gfx/skia/skia/src/gpu/vk/GrVkTransferBuffer.cpp
new file mode 100644
index 000000000..9adfeaf75
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkTransferBuffer.cpp
@@ -0,0 +1,61 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkTransferBuffer.h"
+#include "GrVkGpu.h"
+#include "SkTraceMemoryDump.h"
+
+
+GrVkTransferBuffer* GrVkTransferBuffer::Create(GrVkGpu* gpu, size_t size, GrVkBuffer::Type type) {
+ GrVkBuffer::Desc desc;
+ desc.fDynamic = true;
+ SkASSERT(GrVkBuffer::kCopyRead_Type == type || GrVkBuffer::kCopyWrite_Type == type);
+ desc.fType = type;
+ desc.fSizeInBytes = size;
+
+ const GrVkBuffer::Resource* bufferResource = GrVkBuffer::Create(gpu, desc);
+ if (!bufferResource) {
+ return nullptr;
+ }
+
+ GrVkTransferBuffer* buffer = new GrVkTransferBuffer(gpu, desc, bufferResource);
+ if (!buffer) {
+ bufferResource->unref(gpu);
+ }
+ return buffer;
+}
+
+GrVkTransferBuffer::GrVkTransferBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* bufferResource)
+ : INHERITED(gpu, desc.fSizeInBytes,
+ kCopyRead_Type == desc.fType ?
+ kXferCpuToGpu_GrBufferType : kXferGpuToCpu_GrBufferType,
+ kStream_GrAccessPattern)
+ , GrVkBuffer(desc, bufferResource) {
+ this->registerWithCache(SkBudgeted::kYes);
+}
+
+void GrVkTransferBuffer::onRelease() {
+ if (!this->wasDestroyed()) {
+ this->vkRelease(this->getVkGpu());
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrVkTransferBuffer::onAbandon() {
+ this->vkAbandon();
+ INHERITED::onAbandon();
+}
+
+void GrVkTransferBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& dumpName) const {
+ SkString buffer_id;
+ buffer_id.appendU64((uint64_t)this->buffer());
+ traceMemoryDump->setMemoryBacking(dumpName.c_str(), "vk_buffer",
+ buffer_id.c_str());
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkTransferBuffer.h b/gfx/skia/skia/src/gpu/vk/GrVkTransferBuffer.h
new file mode 100644
index 000000000..a9756cbae
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkTransferBuffer.h
@@ -0,0 +1,56 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkTransferBuffer_DEFINED
+#define GrVkTransferBuffer_DEFINED
+
+#include "GrBuffer.h"
+#include "GrVkBuffer.h"
+
+class GrVkGpu;
+
+class GrVkTransferBuffer : public GrBuffer, public GrVkBuffer {
+
+public:
+ static GrVkTransferBuffer* Create(GrVkGpu* gpu, size_t size, GrVkBuffer::Type type);
+
+protected:
+ void onAbandon() override;
+ void onRelease() override;
+
+private:
+ GrVkTransferBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* resource);
+ void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& dumpName) const override;
+
+ void onMap() override {
+ if (!this->wasDestroyed()) {
+ this->GrBuffer::fMapPtr = this->vkMap(this->getVkGpu());
+ }
+ }
+
+ void onUnmap() override {
+ if (!this->wasDestroyed()) {
+ this->vkUnmap(this->getVkGpu());
+ }
+ }
+
+ bool onUpdateData(const void* src, size_t srcSizeInBytes) override {
+ SkFAIL("Not implemented for transfer buffers.");
+ return false;
+ }
+
+ GrVkGpu* getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return reinterpret_cast<GrVkGpu*>(this->getGpu());
+ }
+
+ typedef GrBuffer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkUniformBuffer.cpp b/gfx/skia/skia/src/gpu/vk/GrVkUniformBuffer.cpp
new file mode 100644
index 000000000..ac013f7df
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkUniformBuffer.cpp
@@ -0,0 +1,103 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkUniformBuffer.h"
+#include "GrVkGpu.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+GrVkUniformBuffer* GrVkUniformBuffer::Create(GrVkGpu* gpu, size_t size) {
+ if (0 == size) {
+ return nullptr;
+ }
+ const GrVkResource* resource = nullptr;
+ if (size <= GrVkUniformBuffer::kStandardSize) {
+ resource = gpu->resourceProvider().findOrCreateStandardUniformBufferResource();
+ } else {
+ resource = CreateResource(gpu, size);
+ }
+ if (!resource) {
+ return nullptr;
+ }
+
+ GrVkBuffer::Desc desc;
+ desc.fDynamic = true;
+ desc.fType = GrVkBuffer::kUniform_Type;
+ desc.fSizeInBytes = size;
+ GrVkUniformBuffer* buffer = new GrVkUniformBuffer(gpu, desc,
+ (const GrVkUniformBuffer::Resource*) resource);
+ if (!buffer) {
+ // this will destroy anything we got from the resource provider,
+ // but this avoids a conditional
+ resource->unref(gpu);
+ }
+ return buffer;
+}
+
+// We implement our own creation function for special buffer resource type
+const GrVkResource* GrVkUniformBuffer::CreateResource(GrVkGpu* gpu, size_t size) {
+ if (0 == size) {
+ return nullptr;
+ }
+
+ VkBuffer buffer;
+ GrVkAlloc alloc;
+
+ // create the buffer object
+ VkBufferCreateInfo bufInfo;
+ memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
+ bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ bufInfo.flags = 0;
+ bufInfo.size = size;
+ bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+ bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ bufInfo.queueFamilyIndexCount = 0;
+ bufInfo.pQueueFamilyIndices = nullptr;
+
+ VkResult err;
+ err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
+ if (err) {
+ return nullptr;
+ }
+
+ if (!GrVkMemory::AllocAndBindBufferMemory(gpu,
+ buffer,
+ kUniform_Type,
+ true, // dynamic
+ &alloc)) {
+ return nullptr;
+ }
+
+ const GrVkResource* resource = new GrVkUniformBuffer::Resource(buffer, alloc);
+ if (!resource) {
+ VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
+ GrVkMemory::FreeBufferMemory(gpu, kUniform_Type, alloc);
+ return nullptr;
+ }
+
+ return resource;
+}
+
+const GrVkBuffer::Resource* GrVkUniformBuffer::createResource(GrVkGpu* gpu,
+ const GrVkBuffer::Desc& descriptor) {
+ const GrVkResource* vkResource;
+ if (descriptor.fSizeInBytes <= GrVkUniformBuffer::kStandardSize) {
+ GrVkResourceProvider& provider = gpu->resourceProvider();
+ vkResource = provider.findOrCreateStandardUniformBufferResource();
+ } else {
+ vkResource = CreateResource(gpu, descriptor.fSizeInBytes);
+ }
+ return (const GrVkBuffer::Resource*) vkResource;
+}
+
+void GrVkUniformBuffer::Resource::onRecycle(GrVkGpu* gpu) const {
+ if (fAlloc.fSize <= GrVkUniformBuffer::kStandardSize) {
+ gpu->resourceProvider().recycleStandardUniformBufferResource(this);
+ } else {
+ this->unref(gpu);
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkUniformBuffer.h b/gfx/skia/skia/src/gpu/vk/GrVkUniformBuffer.h
new file mode 100644
index 000000000..2535e0c60
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkUniformBuffer.h
@@ -0,0 +1,58 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkUniformBuffer_DEFINED
+#define GrVkUniformBuffer_DEFINED
+
+#include "GrVkBuffer.h"
+
+class GrVkGpu;
+
+class GrVkUniformBuffer : public GrVkBuffer {
+
+public:
+ static GrVkUniformBuffer* Create(GrVkGpu* gpu, size_t size);
+ static const GrVkResource* CreateResource(GrVkGpu* gpu, size_t size);
+ static const size_t kStandardSize = 256;
+
+ void* map(GrVkGpu* gpu) {
+ return this->vkMap(gpu);
+ }
+ void unmap(GrVkGpu* gpu) {
+ this->vkUnmap(gpu);
+ }
+ // The output variable createdNewBuffer must be set to true if a new VkBuffer is created in
+ // order to upload the data
+ bool updateData(GrVkGpu* gpu, const void* src, size_t srcSizeInBytes,
+ bool* createdNewBuffer) {
+ return this->vkUpdateData(gpu, src, srcSizeInBytes, createdNewBuffer);
+ }
+ void release(const GrVkGpu* gpu) { this->vkRelease(gpu); }
+ void abandon() { this->vkAbandon(); }
+
+private:
+ class Resource : public GrVkBuffer::Resource {
+ public:
+ Resource(VkBuffer buf, const GrVkAlloc& alloc)
+ : INHERITED(buf, alloc, kUniform_Type) {}
+
+ void onRecycle(GrVkGpu* gpu) const override;
+
+ typedef GrVkBuffer::Resource INHERITED;
+ };
+
+ const GrVkBuffer::Resource* createResource(GrVkGpu* gpu,
+ const GrVkBuffer::Desc& descriptor) override;
+
+ GrVkUniformBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkUniformBuffer::Resource* resource)
+ : INHERITED(desc, resource) {}
+
+ typedef GrVkBuffer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkUniformHandler.cpp b/gfx/skia/skia/src/gpu/vk/GrVkUniformHandler.cpp
new file mode 100644
index 000000000..4a6e977a8
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkUniformHandler.cpp
@@ -0,0 +1,228 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkUniformHandler.h"
+#include "glsl/GrGLSLProgramBuilder.h"
+
+// To determine whether a current offset is aligned, we can just 'and' the lowest bits with the
+// alignment mask. A value of 0 means aligned, any other value is how many bytes past alignment we
+// are. This works since all alignments are powers of 2. The mask is always (alignment - 1).
+// This alignment mask will give correct alignments for using the std430 block layout. If you want
+// the std140 alignment, you can use this, but then make sure if you have an array type it is
+// aligned to 16 bytes (i.e. has mask of 0xF).
+uint32_t grsltype_to_alignment_mask(GrSLType type) {
+ SkASSERT(GrSLTypeIsFloatType(type));
+ static const uint32_t kAlignmentMask[] = {
+ 0x0, // kVoid_GrSLType, should never return this
+ 0x3, // kFloat_GrSLType
+ 0x7, // kVec2f_GrSLType
+ 0xF, // kVec3f_GrSLType
+ 0xF, // kVec4f_GrSLType
+ 0x7, // kMat22f_GrSLType
+ 0xF, // kMat33f_GrSLType
+ 0xF, // kMat44f_GrSLType
+ 0x0, // Sampler2D_GrSLType, should never return this
+ 0x0, // SamplerExternal_GrSLType, should never return this
+ 0x0, // Sampler2DRect_GrSLType, should never return this
+ 0x0, // SamplerBuffer_GrSLType, should never return this
+ 0x0, // kBool_GrSLType
+ 0x7, // kInt_GrSLType
+ 0x7, // kUint_GrSLType
+ 0x0, // Texture2D_GrSLType, should never return this
+ 0x0, // Sampler_GrSLType, should never return this
+ };
+ GR_STATIC_ASSERT(0 == kVoid_GrSLType);
+ GR_STATIC_ASSERT(1 == kFloat_GrSLType);
+ GR_STATIC_ASSERT(2 == kVec2f_GrSLType);
+ GR_STATIC_ASSERT(3 == kVec3f_GrSLType);
+ GR_STATIC_ASSERT(4 == kVec4f_GrSLType);
+ GR_STATIC_ASSERT(5 == kMat22f_GrSLType);
+ GR_STATIC_ASSERT(6 == kMat33f_GrSLType);
+ GR_STATIC_ASSERT(7 == kMat44f_GrSLType);
+ GR_STATIC_ASSERT(8 == kTexture2DSampler_GrSLType);
+ GR_STATIC_ASSERT(9 == kTextureExternalSampler_GrSLType);
+ GR_STATIC_ASSERT(10 == kTexture2DRectSampler_GrSLType);
+ GR_STATIC_ASSERT(11 == kTextureBufferSampler_GrSLType);
+ GR_STATIC_ASSERT(12 == kBool_GrSLType);
+ GR_STATIC_ASSERT(13 == kInt_GrSLType);
+ GR_STATIC_ASSERT(14 == kUint_GrSLType);
+ GR_STATIC_ASSERT(15 == kTexture2D_GrSLType);
+ GR_STATIC_ASSERT(16 == kSampler_GrSLType);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kAlignmentMask) == kGrSLTypeCount);
+ return kAlignmentMask[type];
+}
+
+/** Returns the size in bytes taken up in vulkanbuffers for floating point GrSLTypes.
+ For non floating point type returns 0. Currently this reflects the std140 alignment
+ so a mat22 takes up 8 floats. */
+static inline uint32_t grsltype_to_vk_size(GrSLType type) {
+ SkASSERT(GrSLTypeIsFloatType(type));
+ static const uint32_t kSizes[] = {
+ 0, // kVoid_GrSLType
+ sizeof(float), // kFloat_GrSLType
+ 2 * sizeof(float), // kVec2f_GrSLType
+ 3 * sizeof(float), // kVec3f_GrSLType
+ 4 * sizeof(float), // kVec4f_GrSLType
+ 8 * sizeof(float), // kMat22f_GrSLType. TODO: this will be 4 * szof(float) on std430.
+ 12 * sizeof(float), // kMat33f_GrSLType
+ 16 * sizeof(float), // kMat44f_GrSLType
+ 0, // kTexture2DSampler_GrSLType
+ 0, // kTextureExternalSampler_GrSLType
+ 0, // kTexture2DRectSampler_GrSLType
+ 0, // kTextureBufferSampler_GrSLType
+ 1, // kBool_GrSLType
+ 4, // kInt_GrSLType
+ 4, // kUint_GrSLType
+ 0, // kTexture2D_GrSLType
+ 0, // kSampler_GrSLType
+ };
+ return kSizes[type];
+
+ GR_STATIC_ASSERT(0 == kVoid_GrSLType);
+ GR_STATIC_ASSERT(1 == kFloat_GrSLType);
+ GR_STATIC_ASSERT(2 == kVec2f_GrSLType);
+ GR_STATIC_ASSERT(3 == kVec3f_GrSLType);
+ GR_STATIC_ASSERT(4 == kVec4f_GrSLType);
+ GR_STATIC_ASSERT(5 == kMat22f_GrSLType);
+ GR_STATIC_ASSERT(6 == kMat33f_GrSLType);
+ GR_STATIC_ASSERT(7 == kMat44f_GrSLType);
+ GR_STATIC_ASSERT(8 == kTexture2DSampler_GrSLType);
+ GR_STATIC_ASSERT(9 == kTextureExternalSampler_GrSLType);
+ GR_STATIC_ASSERT(10 == kTexture2DRectSampler_GrSLType);
+ GR_STATIC_ASSERT(11 == kTextureBufferSampler_GrSLType);
+ GR_STATIC_ASSERT(12 == kBool_GrSLType);
+ GR_STATIC_ASSERT(13 == kInt_GrSLType);
+ GR_STATIC_ASSERT(14 == kUint_GrSLType);
+ GR_STATIC_ASSERT(15 == kTexture2D_GrSLType);
+ GR_STATIC_ASSERT(16 == kSampler_GrSLType);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kSizes) == kGrSLTypeCount);
+}
+
+
+// Given the current offset into the ubo, calculate the offset for the uniform we're trying to add
+// taking into consideration all alignment requirements. The uniformOffset is set to the offset for
+// the new uniform, and currentOffset is updated to be the offset to the end of the new uniform.
+void get_ubo_aligned_offset(uint32_t* uniformOffset,
+ uint32_t* currentOffset,
+ GrSLType type,
+ int arrayCount) {
+ uint32_t alignmentMask = grsltype_to_alignment_mask(type);
+ // We want to use the std140 layout here, so we must make arrays align to 16 bytes.
+ if (arrayCount || type == kMat22f_GrSLType) {
+ alignmentMask = 0xF;
+ }
+ uint32_t offsetDiff = *currentOffset & alignmentMask;
+ if (offsetDiff != 0) {
+ offsetDiff = alignmentMask - offsetDiff + 1;
+ }
+ *uniformOffset = *currentOffset + offsetDiff;
+ SkASSERT(sizeof(float) == 4);
+ if (arrayCount) {
+ uint32_t elementSize = SkTMax<uint32_t>(16, grsltype_to_vk_size(type));
+ SkASSERT(0 == (elementSize & 0xF));
+ *currentOffset = *uniformOffset + elementSize * arrayCount;
+ } else {
+ *currentOffset = *uniformOffset + grsltype_to_vk_size(type);
+ }
+}
+
+GrGLSLUniformHandler::UniformHandle GrVkUniformHandler::internalAddUniformArray(
+ uint32_t visibility,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name,
+ bool mangleName,
+ int arrayCount,
+ const char** outName) {
+ SkASSERT(name && strlen(name));
+ SkDEBUGCODE(static const uint32_t kVisibilityMask = kVertex_GrShaderFlag|kFragment_GrShaderFlag);
+ SkASSERT(0 == (~kVisibilityMask & visibility));
+ SkASSERT(0 != visibility);
+ SkASSERT(kDefault_GrSLPrecision == precision || GrSLTypeIsFloatType(type));
+ GrSLTypeIsFloatType(type);
+
+ UniformInfo& uni = fUniforms.push_back();
+ uni.fVariable.setType(type);
+ // TODO this is a bit hacky, lets think of a better way. Basically we need to be able to use
+ // the uniform view matrix name in the GP, and the GP is immutable so it has to tell the PB
+ // exactly what name it wants to use for the uniform view matrix. If we prefix anythings, then
+ // the names will mismatch. I think the correct solution is to have all GPs which need the
+ // uniform view matrix, they should upload the view matrix in their setData along with regular
+ // uniforms.
+ char prefix = 'u';
+ if ('u' == name[0]) {
+ prefix = '\0';
+ }
+ fProgramBuilder->nameVariable(uni.fVariable.accessName(), prefix, name, mangleName);
+ uni.fVariable.setArrayCount(arrayCount);
+ // For now asserting the the visibility is either only vertex or only fragment
+ SkASSERT(kVertex_GrShaderFlag == visibility || kFragment_GrShaderFlag == visibility);
+ uni.fVisibility = visibility;
+ uni.fVariable.setPrecision(precision);
+ // When outputing the GLSL, only the outer uniform block will get the Uniform modifier. Thus
+ // we set the modifier to none for all uniforms declared inside the block.
+ uni.fVariable.setTypeModifier(GrGLSLShaderVar::kNone_TypeModifier);
+
+ uint32_t* currentOffset = kVertex_GrShaderFlag == visibility ? &fCurrentVertexUBOOffset
+ : &fCurrentFragmentUBOOffset;
+ get_ubo_aligned_offset(&uni.fUBOffset, currentOffset, type, arrayCount);
+
+ if (outName) {
+ *outName = uni.fVariable.c_str();
+ }
+
+ return GrGLSLUniformHandler::UniformHandle(fUniforms.count() - 1);
+}
+
+GrGLSLUniformHandler::SamplerHandle GrVkUniformHandler::internalAddSampler(uint32_t visibility,
+ GrPixelConfig config,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name) {
+ SkASSERT(name && strlen(name));
+ SkDEBUGCODE(static const uint32_t kVisMask = kVertex_GrShaderFlag | kFragment_GrShaderFlag);
+ SkASSERT(0 == (~kVisMask & visibility));
+ SkASSERT(0 != visibility);
+ SkString mangleName;
+ char prefix = 'u';
+ fProgramBuilder->nameVariable(&mangleName, prefix, name, true);
+ fSamplers.emplace_back(visibility, config, type, precision, mangleName.c_str(),
+ (uint32_t)fSamplers.count(), kSamplerDescSet);
+ return GrGLSLUniformHandler::SamplerHandle(fSamplers.count() - 1);
+}
+
+void GrVkUniformHandler::appendUniformDecls(GrShaderFlags visibility, SkString* out) const {
+ SkASSERT(kVertex_GrShaderFlag == visibility || kFragment_GrShaderFlag == visibility);
+
+ for (int i = 0; i < fSamplers.count(); ++i) {
+ const GrVkGLSLSampler& sampler = fSamplers[i];
+ SkASSERT(sampler.type() == kTexture2DSampler_GrSLType);
+ if (visibility == sampler.visibility()) {
+ sampler.fShaderVar.appendDecl(fProgramBuilder->glslCaps(), out);
+ out->append(";\n");
+ }
+ }
+
+ SkString uniformsString;
+ for (int i = 0; i < fUniforms.count(); ++i) {
+ const UniformInfo& localUniform = fUniforms[i];
+ if (visibility == localUniform.fVisibility) {
+ if (GrSLTypeIsFloatType(localUniform.fVariable.getType())) {
+ localUniform.fVariable.appendDecl(fProgramBuilder->glslCaps(), &uniformsString);
+ uniformsString.append(";\n");
+ }
+ }
+ }
+ if (!uniformsString.isEmpty()) {
+ uint32_t uniformBinding = (visibility == kVertex_GrShaderFlag) ? kVertexBinding
+ : kFragBinding;
+ const char* stage = (visibility == kVertex_GrShaderFlag) ? "vertex" : "fragment";
+ out->appendf("layout (set=%d, binding=%d) uniform %sUniformBuffer\n{\n",
+ kUniformBufferDescSet, uniformBinding, stage);
+ out->appendf("%s\n};\n", uniformsString.c_str());
+ }
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkUniformHandler.h b/gfx/skia/skia/src/gpu/vk/GrVkUniformHandler.h
new file mode 100644
index 000000000..a6ea93673
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkUniformHandler.h
@@ -0,0 +1,98 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkUniformHandler_DEFINED
+#define GrVkUniformHandler_DEFINED
+
+#include "glsl/GrGLSLUniformHandler.h"
+
+#include "GrAllocator.h"
+#include "GrVkGLSLSampler.h"
+#include "glsl/GrGLSLShaderVar.h"
+
+class GrVkUniformHandler : public GrGLSLUniformHandler {
+public:
+ static const int kUniformsPerBlock = 8;
+
+ enum {
+ kUniformBufferDescSet = 0,
+ kSamplerDescSet = 1,
+ };
+ enum {
+ kVertexBinding = 0,
+ kFragBinding = 1,
+ };
+
+ // fUBOffset is only valid if the GrSLType of the fVariable is not a sampler
+ struct UniformInfo {
+ GrGLSLShaderVar fVariable;
+ uint32_t fVisibility;
+ uint32_t fUBOffset;
+ };
+ typedef GrTAllocator<UniformInfo> UniformInfoArray;
+
+ const GrGLSLShaderVar& getUniformVariable(UniformHandle u) const override {
+ return fUniforms[u.toIndex()].fVariable;
+ }
+
+ const char* getUniformCStr(UniformHandle u) const override {
+ return this->getUniformVariable(u).c_str();
+ }
+
+private:
+ explicit GrVkUniformHandler(GrGLSLProgramBuilder* program)
+ : INHERITED(program)
+ , fUniforms(kUniformsPerBlock)
+ , fCurrentVertexUBOOffset(0)
+ , fCurrentFragmentUBOOffset(0)
+ , fCurrentSamplerBinding(0) {
+ }
+
+ UniformHandle internalAddUniformArray(uint32_t visibility,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name,
+ bool mangleName,
+ int arrayCount,
+ const char** outName) override;
+
+ SamplerHandle internalAddSampler(uint32_t visibility,
+ GrPixelConfig config,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name) override;
+
+ int numSamplers() const override { return fSamplers.count(); }
+ const GrGLSLSampler& getSampler(SamplerHandle handle) const override {
+ return fSamplers[handle.toIndex()];
+ }
+
+ void appendUniformDecls(GrShaderFlags, SkString*) const override;
+
+ bool hasVertexUniforms() const { return fCurrentVertexUBOOffset > 0; }
+ bool hasFragmentUniforms() const { return fCurrentFragmentUBOOffset > 0; }
+
+
+ const UniformInfo& getUniformInfo(UniformHandle u) const {
+ return fUniforms[u.toIndex()];
+ }
+
+
+ UniformInfoArray fUniforms;
+ SkTArray<GrVkGLSLSampler> fSamplers;
+
+ uint32_t fCurrentVertexUBOOffset;
+ uint32_t fCurrentFragmentUBOOffset;
+ uint32_t fCurrentSamplerBinding;
+
+ friend class GrVkPipelineStateBuilder;
+ friend class GrVkDescriptorSetManager;
+
+ typedef GrGLSLUniformHandler INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkUtil.cpp b/gfx/skia/skia/src/gpu/vk/GrVkUtil.cpp
new file mode 100644
index 000000000..a14f827dc
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkUtil.cpp
@@ -0,0 +1,359 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkUtil.h"
+
+#include "vk/GrVkGpu.h"
+#if USE_SKSL
+#include "SkSLCompiler.h"
+#endif
+
+bool GrPixelConfigToVkFormat(GrPixelConfig config, VkFormat* format) {
+ VkFormat dontCare;
+ if (!format) {
+ format = &dontCare;
+ }
+
+ switch (config) {
+ case kRGBA_8888_GrPixelConfig:
+ *format = VK_FORMAT_R8G8B8A8_UNORM;
+ break;
+ case kBGRA_8888_GrPixelConfig:
+ *format = VK_FORMAT_B8G8R8A8_UNORM;
+ break;
+ case kSRGBA_8888_GrPixelConfig:
+ *format = VK_FORMAT_R8G8B8A8_SRGB;
+ break;
+ case kSBGRA_8888_GrPixelConfig:
+ *format = VK_FORMAT_B8G8R8A8_SRGB;
+ break;
+ case kRGB_565_GrPixelConfig:
+ *format = VK_FORMAT_R5G6B5_UNORM_PACK16;
+ break;
+ case kRGBA_4444_GrPixelConfig:
+ // R4G4B4A4 is not required to be supported so we actually
+ // store the data is if it was B4G4R4A4 and swizzle in shaders
+ *format = VK_FORMAT_B4G4R4A4_UNORM_PACK16;
+ break;
+ case kIndex_8_GrPixelConfig:
+ // No current vulkan support for this config
+ return false;
+ case kAlpha_8_GrPixelConfig:
+ *format = VK_FORMAT_R8_UNORM;
+ break;
+ case kETC1_GrPixelConfig:
+ // converting to ETC2 which is a superset of ETC1
+ *format = VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
+ break;
+ case kLATC_GrPixelConfig:
+ // No current vulkan support for this config
+ return false;
+ case kR11_EAC_GrPixelConfig:
+ *format = VK_FORMAT_EAC_R11_UNORM_BLOCK;
+ break;
+ case kASTC_12x12_GrPixelConfig:
+ *format = VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
+ break;
+ case kRGBA_float_GrPixelConfig:
+ *format = VK_FORMAT_R32G32B32A32_SFLOAT;
+ break;
+ case kRGBA_half_GrPixelConfig:
+ *format = VK_FORMAT_R16G16B16A16_SFLOAT;
+ break;
+ case kAlpha_half_GrPixelConfig:
+ *format = VK_FORMAT_R16_SFLOAT;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+bool GrVkFormatToPixelConfig(VkFormat format, GrPixelConfig* config) {
+ GrPixelConfig dontCare;
+ if (!config) {
+ config = &dontCare;
+ }
+
+ switch (format) {
+ case VK_FORMAT_R8G8B8A8_UNORM:
+ *config = kRGBA_8888_GrPixelConfig;
+ break;
+ case VK_FORMAT_B8G8R8A8_UNORM:
+ *config = kBGRA_8888_GrPixelConfig;
+ break;
+ case VK_FORMAT_R8G8B8A8_SRGB:
+ *config = kSRGBA_8888_GrPixelConfig;
+ break;
+ case VK_FORMAT_B8G8R8A8_SRGB:
+ *config = kSBGRA_8888_GrPixelConfig;
+ break;
+ case VK_FORMAT_R5G6B5_UNORM_PACK16:
+ *config = kRGB_565_GrPixelConfig;
+ break;
+ case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+ // R4G4B4A4 is not required to be supported so we actually
+ // store RGBA_4444 data as B4G4R4A4.
+ *config = kRGBA_4444_GrPixelConfig;
+ break;
+ case VK_FORMAT_R8_UNORM:
+ *config = kAlpha_8_GrPixelConfig;
+ break;
+ case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+ *config = kETC1_GrPixelConfig;
+ break;
+ case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+ *config = kR11_EAC_GrPixelConfig;
+ break;
+ case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+ *config = kASTC_12x12_GrPixelConfig;
+ break;
+ case VK_FORMAT_R32G32B32A32_SFLOAT:
+ *config = kRGBA_float_GrPixelConfig;
+ break;
+ case VK_FORMAT_R16G16B16A16_SFLOAT:
+ *config = kRGBA_half_GrPixelConfig;
+ break;
+ case VK_FORMAT_R16_SFLOAT:
+ *config = kAlpha_half_GrPixelConfig;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+bool GrVkFormatIsSRGB(VkFormat format, VkFormat* linearFormat) {
+ VkFormat linearFmt = format;
+ switch (format) {
+ case VK_FORMAT_R8_SRGB:
+ linearFmt = VK_FORMAT_R8_UNORM;
+ break;
+ case VK_FORMAT_R8G8_SRGB:
+ linearFmt = VK_FORMAT_R8G8_UNORM;
+ break;
+ case VK_FORMAT_R8G8B8_SRGB:
+ linearFmt = VK_FORMAT_R8G8B8_UNORM;
+ break;
+ case VK_FORMAT_B8G8R8_SRGB:
+ linearFmt = VK_FORMAT_B8G8R8_UNORM;
+ break;
+ case VK_FORMAT_R8G8B8A8_SRGB:
+ linearFmt = VK_FORMAT_R8G8B8A8_UNORM;
+ break;
+ case VK_FORMAT_B8G8R8A8_SRGB:
+ linearFmt = VK_FORMAT_B8G8R8A8_UNORM;
+ break;
+ case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+ linearFmt = VK_FORMAT_A8B8G8R8_UNORM_PACK32;
+ break;
+ case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_BC1_RGB_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_BC2_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_BC2_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_BC3_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_BC3_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_BC7_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_BC7_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
+ break;
+ case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+ linearFmt = VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
+ break;
+ default:
+ break;
+ }
+ if (linearFormat) {
+ *linearFormat = linearFmt;
+ }
+ return (linearFmt != format);
+}
+
+bool GrSampleCountToVkSampleCount(uint32_t samples, VkSampleCountFlagBits* vkSamples) {
+ switch (samples) {
+ case 0: // fall through
+ case 1:
+ *vkSamples = VK_SAMPLE_COUNT_1_BIT;
+ return true;
+ case 2:
+ *vkSamples = VK_SAMPLE_COUNT_2_BIT;
+ return true;
+ case 4:
+ *vkSamples = VK_SAMPLE_COUNT_4_BIT;
+ return true;
+ case 8:
+ *vkSamples = VK_SAMPLE_COUNT_8_BIT;
+ return true;
+ case 16:
+ *vkSamples = VK_SAMPLE_COUNT_16_BIT;
+ return true;
+ case 32:
+ *vkSamples = VK_SAMPLE_COUNT_32_BIT;
+ return true;
+ case 64:
+ *vkSamples = VK_SAMPLE_COUNT_64_BIT;
+ return true;
+ default:
+ return false;
+ }
+}
+
+#if USE_SKSL
+SkSL::Program::Kind vk_shader_stage_to_skiasl_kind(VkShaderStageFlagBits stage) {
+ if (VK_SHADER_STAGE_VERTEX_BIT == stage) {
+ return SkSL::Program::kVertex_Kind;
+ }
+ SkASSERT(VK_SHADER_STAGE_FRAGMENT_BIT == stage);
+ return SkSL::Program::kFragment_Kind;
+}
+#else
+shaderc_shader_kind vk_shader_stage_to_shaderc_kind(VkShaderStageFlagBits stage) {
+ if (VK_SHADER_STAGE_VERTEX_BIT == stage) {
+ return shaderc_glsl_vertex_shader;
+ }
+ SkASSERT(VK_SHADER_STAGE_FRAGMENT_BIT == stage);
+ return shaderc_glsl_fragment_shader;
+}
+#endif
+
+bool GrCompileVkShaderModule(const GrVkGpu* gpu,
+ const char* shaderString,
+ VkShaderStageFlagBits stage,
+ VkShaderModule* shaderModule,
+ VkPipelineShaderStageCreateInfo* stageInfo) {
+ VkShaderModuleCreateInfo moduleCreateInfo;
+ memset(&moduleCreateInfo, 0, sizeof(VkShaderModuleCreateInfo));
+ moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
+ moduleCreateInfo.pNext = nullptr;
+ moduleCreateInfo.flags = 0;
+
+#if USE_SKSL
+ std::string code;
+#else
+ shaderc_compilation_result_t result = nullptr;
+#endif
+
+ if (gpu->vkCaps().canUseGLSLForShaderModule()) {
+ moduleCreateInfo.codeSize = strlen(shaderString);
+ moduleCreateInfo.pCode = (const uint32_t*)shaderString;
+ } else {
+
+#if USE_SKSL
+ bool result = gpu->shaderCompiler()->toSPIRV(vk_shader_stage_to_skiasl_kind(stage),
+ std::string(shaderString),
+ &code);
+ if (!result) {
+ SkDebugf("%s\n", gpu->shaderCompiler()->errorText().c_str());
+ return false;
+ }
+ moduleCreateInfo.codeSize = code.size();
+ moduleCreateInfo.pCode = (const uint32_t*)code.c_str();
+#else
+ shaderc_compiler_t compiler = gpu->shadercCompiler();
+
+ shaderc_compile_options_t options = shaderc_compile_options_initialize();
+
+ shaderc_shader_kind shadercStage = vk_shader_stage_to_shaderc_kind(stage);
+ result = shaderc_compile_into_spv(compiler,
+ shaderString,
+ strlen(shaderString),
+ shadercStage,
+ "shader",
+ "main",
+ options);
+ shaderc_compile_options_release(options);
+#ifdef SK_DEBUG
+ if (shaderc_result_get_num_errors(result)) {
+ SkDebugf("%s\n", shaderString);
+ SkDebugf("%s\n", shaderc_result_get_error_message(result));
+ return false;
+ }
+#endif // SK_DEBUG
+
+ moduleCreateInfo.codeSize = shaderc_result_get_length(result);
+ moduleCreateInfo.pCode = (const uint32_t*)shaderc_result_get_bytes(result);
+#endif // USE_SKSL
+ }
+
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateShaderModule(gpu->device(),
+ &moduleCreateInfo,
+ nullptr,
+ shaderModule));
+
+ if (!gpu->vkCaps().canUseGLSLForShaderModule()) {
+#if !USE_SKSL
+ shaderc_result_release(result);
+#endif
+ }
+ if (err) {
+ return false;
+ }
+
+ memset(stageInfo, 0, sizeof(VkPipelineShaderStageCreateInfo));
+ stageInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ stageInfo->pNext = nullptr;
+ stageInfo->flags = 0;
+ stageInfo->stage = stage;
+ stageInfo->module = *shaderModule;
+ stageInfo->pName = "main";
+ stageInfo->pSpecializationInfo = nullptr;
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkUtil.h b/gfx/skia/skia/src/gpu/vk/GrVkUtil.h
new file mode 100644
index 000000000..fae3c200d
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkUtil.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkUtil_DEFINED
+#define GrVkUtil_DEFINED
+
+#include "GrColor.h"
+#include "GrTypes.h"
+#include "vk/GrVkDefines.h"
+#include "vk/GrVkInterface.h"
+
+class GrVkGpu;
+
+// makes a Vk call on the interface
+#define GR_VK_CALL(IFACE, X) (IFACE)->fFunctions.f##X;
+// same as GR_VK_CALL but checks for success
+#ifdef SK_DEBUG
+#define GR_VK_CALL_ERRCHECK(IFACE, X) \
+ VkResult SK_MACRO_APPEND_LINE(ret) = GR_VK_CALL(IFACE, X); \
+ SkASSERT(VK_SUCCESS == SK_MACRO_APPEND_LINE(ret));
+#else
+#define GR_VK_CALL_ERRCHECK(IFACE, X) (void) GR_VK_CALL(IFACE, X);
+#endif
+
+/**
+ * Returns the vulkan texture format for the given GrPixelConfig
+ */
+bool GrPixelConfigToVkFormat(GrPixelConfig config, VkFormat* format);
+
+/**
+* Returns the GrPixelConfig for the given vulkan texture format
+*/
+bool GrVkFormatToPixelConfig(VkFormat format, GrPixelConfig* config);
+
+/**
+ * Returns true if the given vulkan texture format is sRGB encoded.
+ * Also provides the non-sRGB version, if there is one.
+ */
+bool GrVkFormatIsSRGB(VkFormat format, VkFormat* linearFormat);
+
+bool GrSampleCountToVkSampleCount(uint32_t samples, VkSampleCountFlagBits* vkSamples);
+
+bool GrCompileVkShaderModule(const GrVkGpu* gpu,
+ const char* shaderString,
+ VkShaderStageFlagBits stage,
+ VkShaderModule* shaderModule,
+ VkPipelineShaderStageCreateInfo* stageInfo);
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkVaryingHandler.cpp b/gfx/skia/skia/src/gpu/vk/GrVkVaryingHandler.cpp
new file mode 100644
index 000000000..f6fed2195
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkVaryingHandler.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkVaryingHandler.h"
+
+/** Returns the number of locations take up by a given GrSLType. We assume that all
+ scalar values are 32 bits. */
+static inline int grsltype_to_location_size(GrSLType type) {
+ static const uint32_t kSizes[] = {
+ 0, // kVoid_GrSLType
+ 1, // kFloat_GrSLType
+ 1, // kVec2f_GrSLType
+ 1, // kVec3f_GrSLType
+ 1, // kVec4f_GrSLType
+ 2, // kMat22f_GrSLType
+ 3, // kMat33f_GrSLType
+ 4, // kMat44f_GrSLType
+ 0, // kTexture2DSampler_GrSLType
+ 0, // kTextureExternalSampler_GrSLType
+ 0, // kTexture2DRectSampler_GrSLType
+ 0, // kTextureBufferSampler_GrSLType
+ 1, // kBool_GrSLType
+ 1, // kInt_GrSLType
+ 1, // kUint_GrSLType
+ 0, // kTexture2D_GrSLType
+ 0, // kSampler_GrSLType
+ };
+ return kSizes[type];
+
+ GR_STATIC_ASSERT(0 == kVoid_GrSLType);
+ GR_STATIC_ASSERT(1 == kFloat_GrSLType);
+ GR_STATIC_ASSERT(2 == kVec2f_GrSLType);
+ GR_STATIC_ASSERT(3 == kVec3f_GrSLType);
+ GR_STATIC_ASSERT(4 == kVec4f_GrSLType);
+ GR_STATIC_ASSERT(5 == kMat22f_GrSLType);
+ GR_STATIC_ASSERT(6 == kMat33f_GrSLType);
+ GR_STATIC_ASSERT(7 == kMat44f_GrSLType);
+ GR_STATIC_ASSERT(8 == kTexture2DSampler_GrSLType);
+ GR_STATIC_ASSERT(9 == kTextureExternalSampler_GrSLType);
+ GR_STATIC_ASSERT(10 == kTexture2DRectSampler_GrSLType);
+ GR_STATIC_ASSERT(11 == kTextureBufferSampler_GrSLType);
+ GR_STATIC_ASSERT(12 == kBool_GrSLType);
+ GR_STATIC_ASSERT(13 == kInt_GrSLType);
+ GR_STATIC_ASSERT(14 == kUint_GrSLType);
+ GR_STATIC_ASSERT(15 == kTexture2D_GrSLType);
+ GR_STATIC_ASSERT(16 == kSampler_GrSLType);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kSizes) == kGrSLTypeCount);
+}
+
+void finalize_helper(GrVkVaryingHandler::VarArray& vars) {
+ int locationIndex = 0;
+ for (int i = 0; i < vars.count(); ++i) {
+ GrGLSLShaderVar& var = vars[i];
+ SkString location;
+ location.appendf("location = %d", locationIndex);
+ var.setLayoutQualifier(location.c_str());
+
+ int elementSize = grsltype_to_location_size(var.getType());
+ SkASSERT(elementSize);
+ int numElements = 1;
+ if (var.isArray()) {
+ numElements = var.getArrayCount();
+ }
+ locationIndex += elementSize * numElements;
+ }
+ // Vulkan requires at least 64 locations to be supported for both vertex output and fragment
+ // input. If we ever hit this assert, then we'll need to add a cap to actually check the
+ // supported input and output values and adjust our supported shaders based on those values.
+ SkASSERT(locationIndex <= 64);
+}
+
+void GrVkVaryingHandler::onFinalize() {
+ finalize_helper(fVertexInputs);
+ finalize_helper(fVertexOutputs);
+ finalize_helper(fGeomInputs);
+ finalize_helper(fGeomOutputs);
+ finalize_helper(fFragInputs);
+ finalize_helper(fFragOutputs);
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkVaryingHandler.h b/gfx/skia/skia/src/gpu/vk/GrVkVaryingHandler.h
new file mode 100644
index 000000000..cebf45591
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkVaryingHandler.h
@@ -0,0 +1,27 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkVaryingHandler_DEFINED
+#define GrVkVaryingHandler_DEFINED
+
+#include "glsl/GrGLSLVarying.h"
+
+class GrVkVaryingHandler : public GrGLSLVaryingHandler {
+public:
+ GrVkVaryingHandler(GrGLSLProgramBuilder* program) : INHERITED(program) {}
+
+ typedef GrGLSLVaryingHandler::VarArray VarArray;
+
+private:
+ void onFinalize() override;
+
+ friend class GrVkPipelineStateBuilder;
+
+ typedef GrGLSLVaryingHandler INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkVertexBuffer.cpp b/gfx/skia/skia/src/gpu/vk/GrVkVertexBuffer.cpp
new file mode 100644
index 000000000..de7f3c220
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkVertexBuffer.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkVertexBuffer.h"
+#include "GrVkGpu.h"
+
+GrVkVertexBuffer::GrVkVertexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* bufferResource)
+ : INHERITED(gpu, desc.fSizeInBytes, kVertex_GrBufferType,
+ desc.fDynamic ? kDynamic_GrAccessPattern : kStatic_GrAccessPattern)
+ , GrVkBuffer(desc, bufferResource) {
+ this->registerWithCache(SkBudgeted::kYes);
+}
+
+GrVkVertexBuffer* GrVkVertexBuffer::Create(GrVkGpu* gpu, size_t size, bool dynamic) {
+ GrVkBuffer::Desc desc;
+ desc.fDynamic = dynamic;
+ desc.fType = GrVkBuffer::kVertex_Type;
+ desc.fSizeInBytes = size;
+
+ const GrVkBuffer::Resource* bufferResource = GrVkBuffer::Create(gpu, desc);
+ if (!bufferResource) {
+ return nullptr;
+ }
+
+ GrVkVertexBuffer* buffer = new GrVkVertexBuffer(gpu, desc, bufferResource);
+ if (!buffer) {
+ bufferResource->unref(gpu);
+ }
+ return buffer;
+}
+
+void GrVkVertexBuffer::onRelease() {
+ if (!this->wasDestroyed()) {
+ this->vkRelease(this->getVkGpu());
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrVkVertexBuffer::onAbandon() {
+ this->vkAbandon();
+ INHERITED::onAbandon();
+}
+
+void GrVkVertexBuffer::onMap() {
+ if (!this->wasDestroyed()) {
+ this->GrBuffer::fMapPtr = this->vkMap(this->getVkGpu());
+ }
+}
+
+void GrVkVertexBuffer::onUnmap() {
+ if (!this->wasDestroyed()) {
+ this->vkUnmap(this->getVkGpu());
+ }
+}
+
+bool GrVkVertexBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
+ if (!this->wasDestroyed()) {
+ return this->vkUpdateData(this->getVkGpu(), src, srcSizeInBytes);
+ } else {
+ return false;
+ }
+}
+
+GrVkGpu* GrVkVertexBuffer::getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrVkGpu*>(this->getGpu());
+}
diff --git a/gfx/skia/skia/src/gpu/vk/GrVkVertexBuffer.h b/gfx/skia/skia/src/gpu/vk/GrVkVertexBuffer.h
new file mode 100644
index 000000000..cae781e31
--- /dev/null
+++ b/gfx/skia/skia/src/gpu/vk/GrVkVertexBuffer.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkVertexBuffer_DEFINED
+#define GrVkVertexBuffer_DEFINED
+
+#include "GrBuffer.h"
+#include "GrVkBuffer.h"
+
+class GrVkGpu;
+
+class GrVkVertexBuffer : public GrBuffer, public GrVkBuffer {
+public:
+ static GrVkVertexBuffer* Create(GrVkGpu* gpu, size_t size, bool dynamic);
+
+protected:
+ void onAbandon() override;
+ void onRelease() override;
+
+private:
+ GrVkVertexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* resource);
+
+ void onMap() override;
+ void onUnmap() override;
+ bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
+
+ GrVkGpu* getVkGpu() const;
+
+ typedef GrBuffer INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkImage.cpp b/gfx/skia/skia/src/image/SkImage.cpp
new file mode 100644
index 000000000..67779ba14
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage.cpp
@@ -0,0 +1,524 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmap.h"
+#include "SkBitmapCache.h"
+#include "SkCanvas.h"
+#include "SkData.h"
+#include "SkImageEncoder.h"
+#include "SkImageFilter.h"
+#include "SkImageFilterCache.h"
+#include "SkImageGenerator.h"
+#include "SkImagePriv.h"
+#include "SkImageShader.h"
+#include "SkImage_Base.h"
+#include "SkNextID.h"
+#include "SkPicture.h"
+#include "SkPixelRef.h"
+#include "SkPixelSerializer.h"
+#include "SkReadPixelsRec.h"
+#include "SkSpecialImage.h"
+#include "SkString.h"
+#include "SkSurface.h"
+
+#if SK_SUPPORT_GPU
+#include "GrTexture.h"
+#include "GrContext.h"
+#include "SkImage_Gpu.h"
+#endif
+
+SkImage::SkImage(int width, int height, uint32_t uniqueID)
+ : fWidth(width)
+ , fHeight(height)
+ , fUniqueID(kNeedNewImageUniqueID == uniqueID ? SkNextID::ImageID() : uniqueID)
+{
+ SkASSERT(width > 0);
+ SkASSERT(height > 0);
+}
+
+bool SkImage::peekPixels(SkPixmap* pm) const {
+ SkPixmap tmp;
+ if (!pm) {
+ pm = &tmp;
+ }
+ return as_IB(this)->onPeekPixels(pm);
+}
+
+#ifdef SK_SUPPORT_LEGACY_PEEKPIXELS_PARMS
+const void* SkImage::peekPixels(SkImageInfo* info, size_t* rowBytes) const {
+ SkPixmap pm;
+ if (this->peekPixels(&pm)) {
+ if (info) {
+ *info = pm.info();
+ }
+ if (rowBytes) {
+ *rowBytes = pm.rowBytes();
+ }
+ return pm.addr();
+ }
+ return nullptr;
+}
+#endif
+
+bool SkImage::readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY, CachingHint chint) const {
+ SkReadPixelsRec rec(dstInfo, dstPixels, dstRowBytes, srcX, srcY);
+ if (!rec.trim(this->width(), this->height())) {
+ return false;
+ }
+ return as_IB(this)->onReadPixels(rec.fInfo, rec.fPixels, rec.fRowBytes, rec.fX, rec.fY, chint);
+}
+
+bool SkImage::scalePixels(const SkPixmap& dst, SkFilterQuality quality, CachingHint chint) const {
+ if (this->width() == dst.width() && this->height() == dst.height()) {
+ return this->readPixels(dst, 0, 0, chint);
+ }
+
+ // Idea: If/when SkImageGenerator supports a native-scaling API (where the generator itself
+ // can scale more efficiently) we should take advantage of it here.
+ //
+ SkBitmap bm;
+ if (as_IB(this)->getROPixels(&bm, chint)) {
+ bm.lockPixels();
+ SkPixmap pmap;
+ // Note: By calling the pixmap scaler, we never cache the final result, so the chint
+ // is (currently) only being applied to the getROPixels. If we get a request to
+ // also attempt to cache the final (scaled) result, we would add that logic here.
+ //
+ return bm.peekPixels(&pmap) && pmap.scalePixels(dst, quality);
+ }
+ return false;
+}
+
+void SkImage::preroll(GrContext* ctx) const {
+ // For now, and to maintain parity w/ previous pixelref behavior, we just force the image
+ // to produce a cached raster-bitmap form, so that drawing to a raster canvas should be fast.
+ //
+ SkBitmap bm;
+ if (as_IB(this)->getROPixels(&bm)) {
+ bm.lockPixels();
+ bm.unlockPixels();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkAlphaType SkImage::alphaType() const {
+ return as_IB(this)->onAlphaType();
+}
+
+sk_sp<SkShader> SkImage::makeShader(SkShader::TileMode tileX, SkShader::TileMode tileY,
+ const SkMatrix* localMatrix) const {
+ return SkImageShader::Make(sk_ref_sp(const_cast<SkImage*>(this)), tileX, tileY, localMatrix);
+}
+
+#ifdef SK_SUPPORT_LEGACY_CREATESHADER_PTR
+SkShader* SkImage::newShader(SkShader::TileMode tileX, SkShader::TileMode tileY,
+ const SkMatrix* localMatrix) const {
+ return this->makeShader(tileX, tileY, localMatrix).release();
+}
+#endif
+
+SkData* SkImage::encode(SkImageEncoder::Type type, int quality) const {
+ SkBitmap bm;
+ if (as_IB(this)->getROPixels(&bm)) {
+ return SkImageEncoder::EncodeData(bm, type, quality);
+ }
+ return nullptr;
+}
+
+SkData* SkImage::encode(SkPixelSerializer* serializer) const {
+ SkAutoTUnref<SkPixelSerializer> defaultSerializer;
+ SkPixelSerializer* effectiveSerializer = serializer;
+ if (!effectiveSerializer) {
+ defaultSerializer.reset(SkImageEncoder::CreatePixelSerializer());
+ SkASSERT(defaultSerializer.get());
+ effectiveSerializer = defaultSerializer.get();
+ }
+ sk_sp<SkData> encoded(this->refEncoded());
+ if (encoded && effectiveSerializer->useEncodedData(encoded->data(), encoded->size())) {
+ return encoded.release();
+ }
+
+ SkBitmap bm;
+ SkAutoPixmapUnlock apu;
+ if (as_IB(this)->getROPixels(&bm) && bm.requestLock(&apu)) {
+ return effectiveSerializer->encode(apu.pixmap());
+ }
+
+ return nullptr;
+}
+
+SkData* SkImage::refEncoded() const {
+ GrContext* ctx = nullptr; // should we allow the caller to pass in a ctx?
+ return as_IB(this)->onRefEncoded(ctx);
+}
+
+sk_sp<SkImage> SkImage::MakeFromEncoded(sk_sp<SkData> encoded, const SkIRect* subset) {
+ if (nullptr == encoded || 0 == encoded->size()) {
+ return nullptr;
+ }
+ SkImageGenerator* generator = SkImageGenerator::NewFromEncoded(encoded.get());
+ return SkImage::MakeFromGenerator(generator, subset);
+}
+
+const char* SkImage::toString(SkString* str) const {
+ str->appendf("image: (id:%d (%d, %d) %s)", this->uniqueID(), this->width(), this->height(),
+ this->isOpaque() ? "opaque" : "");
+ return str->c_str();
+}
+
+sk_sp<SkImage> SkImage::makeSubset(const SkIRect& subset) const {
+ if (subset.isEmpty()) {
+ return nullptr;
+ }
+
+ const SkIRect bounds = SkIRect::MakeWH(this->width(), this->height());
+ if (!bounds.contains(subset)) {
+ return nullptr;
+ }
+
+ // optimization : return self if the subset == our bounds
+ if (bounds == subset) {
+ return sk_ref_sp(const_cast<SkImage*>(this));
+ }
+ return as_IB(this)->onMakeSubset(subset);
+}
+
+#if SK_SUPPORT_GPU
+
+GrTexture* SkImage::getTexture() const {
+ return as_IB(this)->peekTexture();
+}
+
+bool SkImage::isTextureBacked() const { return SkToBool(as_IB(this)->peekTexture()); }
+
+GrBackendObject SkImage::getTextureHandle(bool flushPendingGrContextIO) const {
+ GrTexture* texture = as_IB(this)->peekTexture();
+ if (texture) {
+ GrContext* context = texture->getContext();
+ if (context) {
+ if (flushPendingGrContextIO) {
+ context->prepareSurfaceForExternalIO(texture);
+ }
+ }
+ return texture->getTextureHandle();
+ }
+ return 0;
+}
+
+#else
+
+GrTexture* SkImage::getTexture() const { return nullptr; }
+
+bool SkImage::isTextureBacked() const { return false; }
+
+GrBackendObject SkImage::getTextureHandle(bool) const { return 0; }
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool raster_canvas_supports(const SkImageInfo& info) {
+ switch (info.colorType()) {
+ case kN32_SkColorType:
+ return kUnpremul_SkAlphaType != info.alphaType();
+ case kRGB_565_SkColorType:
+ return true;
+ case kAlpha_8_SkColorType:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+SkImage_Base::SkImage_Base(int width, int height, uint32_t uniqueID)
+ : INHERITED(width, height, uniqueID)
+ , fAddedToCache(false)
+{}
+
+SkImage_Base::~SkImage_Base() {
+ if (fAddedToCache.load()) {
+ SkNotifyBitmapGenIDIsStale(this->uniqueID());
+ }
+}
+
+bool SkImage_Base::onReadPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY, CachingHint) const {
+ if (!raster_canvas_supports(dstInfo)) {
+ return false;
+ }
+
+ SkBitmap bm;
+ bm.installPixels(dstInfo, dstPixels, dstRowBytes);
+ SkCanvas canvas(bm);
+
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+ canvas.drawImage(this, -SkIntToScalar(srcX), -SkIntToScalar(srcY), &paint);
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkImage::readPixels(const SkPixmap& pmap, int srcX, int srcY, CachingHint chint) const {
+ return this->readPixels(pmap.info(), pmap.writable_addr(), pmap.rowBytes(), srcX, srcY, chint);
+}
+
+#if SK_SUPPORT_GPU
+#include "GrTextureToYUVPlanes.h"
+#endif
+
+#include "SkRGBAToYUV.h"
+
+bool SkImage::readYUV8Planes(const SkISize sizes[3], void* const planes[3],
+ const size_t rowBytes[3], SkYUVColorSpace colorSpace) const {
+#if SK_SUPPORT_GPU
+ if (GrTexture* texture = as_IB(this)->peekTexture()) {
+ if (GrTextureToYUVPlanes(texture, sizes, planes, rowBytes, colorSpace)) {
+ return true;
+ }
+ }
+#endif
+ return SkRGBAToYUV(this, sizes, planes, rowBytes, colorSpace);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImage> SkImage::MakeFromBitmap(const SkBitmap& bm) {
+ SkPixelRef* pr = bm.pixelRef();
+ if (nullptr == pr) {
+ return nullptr;
+ }
+
+ return SkMakeImageFromRasterBitmap(bm, kIfMutable_SkCopyPixelsMode);
+}
+
+bool SkImage::asLegacyBitmap(SkBitmap* bitmap, LegacyBitmapMode mode) const {
+ return as_IB(this)->onAsLegacyBitmap(bitmap, mode);
+}
+
+bool SkImage_Base::onAsLegacyBitmap(SkBitmap* bitmap, LegacyBitmapMode mode) const {
+ // As the base-class, all we can do is make a copy (regardless of mode).
+ // Subclasses that want to be more optimal should override.
+ SkImageInfo info = this->onImageInfo().makeColorType(kN32_SkColorType)
+ .makeAlphaType(this->alphaType());
+ if (!bitmap->tryAllocPixels(info)) {
+ return false;
+ }
+ if (!this->readPixels(bitmap->info(), bitmap->getPixels(), bitmap->rowBytes(), 0, 0)) {
+ bitmap->reset();
+ return false;
+ }
+
+ if (kRO_LegacyBitmapMode == mode) {
+ bitmap->setImmutable();
+ }
+ return true;
+}
+
+sk_sp<SkImage> SkImage::MakeFromPicture(sk_sp<SkPicture> picture, const SkISize& dimensions,
+ const SkMatrix* matrix, const SkPaint* paint) {
+ if (!picture) {
+ return nullptr;
+ }
+ return MakeFromGenerator(SkImageGenerator::NewFromPicture(dimensions, picture.get(),
+ matrix, paint));
+}
+
+sk_sp<SkImage> SkImage::makeWithFilter(const SkImageFilter* filter, const SkIRect& subset,
+ const SkIRect& clipBounds, SkIRect* outSubset,
+ SkIPoint* offset) const {
+ if (!filter || !outSubset || !offset || !this->bounds().contains(subset)) {
+ return nullptr;
+ }
+ sk_sp<SkSpecialImage> srcSpecialImage = SkSpecialImage::MakeFromImage(
+ subset, sk_ref_sp(const_cast<SkImage*>(this)));
+ if (!srcSpecialImage) {
+ return nullptr;
+ }
+
+ SkAutoTUnref<SkImageFilterCache> cache(
+ SkImageFilterCache::Create(SkImageFilterCache::kDefaultTransientSize));
+ SkImageFilter::OutputProperties outputProperties(as_IB(this)->onImageInfo().colorSpace());
+ SkImageFilter::Context context(SkMatrix::I(), clipBounds, cache.get(), outputProperties);
+
+ sk_sp<SkSpecialImage> result =
+ filter->filterImage(srcSpecialImage.get(), context, offset);
+
+ if (!result) {
+ return nullptr;
+ }
+
+ SkIRect fullSize = SkIRect::MakeWH(result->width(), result->height());
+#if SK_SUPPORT_GPU
+ if (result->isTextureBacked()) {
+ GrContext* context = result->getContext();
+ sk_sp<GrTexture> texture = result->asTextureRef(context);
+ fullSize = SkIRect::MakeWH(texture->width(), texture->height());
+ }
+#endif
+ *outSubset = SkIRect::MakeWH(result->width(), result->height());
+ if (!outSubset->intersect(clipBounds.makeOffset(-offset->x(), -offset->y()))) {
+ return nullptr;
+ }
+ offset->fX += outSubset->x();
+ offset->fY += outSubset->y();
+ // This isn't really a "tight" subset, but includes any texture padding.
+ return result->makeTightSubset(fullSize);
+}
+
+bool SkImage::isLazyGenerated() const {
+ return as_IB(this)->onIsLazyGenerated();
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+
+#if !SK_SUPPORT_GPU
+
+sk_sp<SkImage> SkImage::MakeTextureFromPixmap(GrContext*, const SkPixmap&, SkBudgeted budgeted) {
+ return nullptr;
+}
+
+sk_sp<SkImage> MakeTextureFromMipMap(GrContext*, const SkImageInfo&, const GrMipLevel* texels,
+ int mipLevelCount, SkBudgeted, SkSourceGammaTreatment) {
+ return nullptr;
+}
+
+sk_sp<SkImage> SkImage::MakeFromTexture(GrContext*, const GrBackendTextureDesc&, SkAlphaType,
+ sk_sp<SkColorSpace>, TextureReleaseProc, ReleaseContext) {
+ return nullptr;
+}
+
+size_t SkImage::getDeferredTextureImageData(const GrContextThreadSafeProxy&,
+ const DeferredTextureImageUsageParams[],
+ int paramCnt, void* buffer,
+ SkSourceGammaTreatment treatment) const {
+ return 0;
+}
+
+sk_sp<SkImage> SkImage::MakeFromDeferredTextureImageData(GrContext* context, const void*,
+ SkBudgeted) {
+ return nullptr;
+}
+
+sk_sp<SkImage> SkImage::MakeFromAdoptedTexture(GrContext*, const GrBackendTextureDesc&,
+ SkAlphaType, sk_sp<SkColorSpace>) {
+ return nullptr;
+}
+
+sk_sp<SkImage> SkImage::MakeFromYUVTexturesCopy(GrContext* ctx, SkYUVColorSpace space,
+ const GrBackendObject yuvTextureHandles[3],
+ const SkISize yuvSizes[3],
+ GrSurfaceOrigin origin,
+ sk_sp<SkColorSpace> imageColorSpace) {
+ return nullptr;
+}
+
+sk_sp<SkImage> SkImage::makeTextureImage(GrContext*) const {
+ return nullptr;
+}
+
+sk_sp<SkImage> SkImage::makeNonTextureImage() const {
+ return sk_ref_sp(const_cast<SkImage*>(this));
+}
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_SUPPORT_LEGACY_IMAGEFACTORY
+SkImage* SkImage::NewRasterCopy(const Info& info, const void* pixels, size_t rowBytes,
+ SkColorTable* ctable) {
+ return MakeRasterCopy(SkPixmap(info, pixels, rowBytes, ctable)).release();
+}
+
+SkImage* SkImage::NewRasterData(const Info& info, SkData* pixels, size_t rowBytes) {
+ return MakeRasterData(info, sk_ref_sp(pixels), rowBytes).release();
+}
+
+SkImage* SkImage::NewFromRaster(const Info& info, const void* pixels, size_t rowBytes,
+ RasterReleaseProc proc, ReleaseContext releasectx) {
+ return MakeFromRaster(SkPixmap(info, pixels, rowBytes), proc, releasectx).release();
+}
+
+SkImage* SkImage::NewFromBitmap(const SkBitmap& bm) {
+ return MakeFromBitmap(bm).release();
+}
+
+SkImage* SkImage::NewFromGenerator(SkImageGenerator* gen, const SkIRect* subset) {
+ return MakeFromGenerator(gen, subset).release();
+}
+
+SkImage* SkImage::NewFromEncoded(SkData* encoded, const SkIRect* subset) {
+ return MakeFromEncoded(sk_ref_sp(encoded), subset).release();
+}
+
+SkImage* SkImage::NewFromTexture(GrContext* ctx, const GrBackendTextureDesc& desc, SkAlphaType at,
+ TextureReleaseProc proc, ReleaseContext releasectx) {
+ return MakeFromTexture(ctx, desc, at, proc, releasectx).release();
+}
+
+SkImage* SkImage::NewFromAdoptedTexture(GrContext* ctx, const GrBackendTextureDesc& desc,
+ SkAlphaType at) {
+ return MakeFromAdoptedTexture(ctx, desc, at).release();
+}
+
+SkImage* SkImage::NewFromYUVTexturesCopy(GrContext* ctx, SkYUVColorSpace space,
+ const GrBackendObject yuvTextureHandles[3],
+ const SkISize yuvSizes[3],
+ GrSurfaceOrigin origin) {
+ return MakeFromYUVTexturesCopy(ctx, space, yuvTextureHandles, yuvSizes, origin).release();
+}
+
+SkImage* SkImage::NewFromPicture(const SkPicture* picture, const SkISize& dimensions,
+ const SkMatrix* matrix, const SkPaint* paint) {
+ return MakeFromPicture(sk_ref_sp(const_cast<SkPicture*>(picture)), dimensions,
+ matrix, paint).release();
+}
+
+SkImage* SkImage::NewTextureFromPixmap(GrContext* ctx, const SkPixmap& pmap, SkBudgeted budgeted) {
+ return MakeTextureFromPixmap(ctx, pmap, budgeted).release();
+}
+
+SkImage* SkImage::NewFromDeferredTextureImageData(GrContext* ctx, const void* data,
+ SkBudgeted budgeted) {
+ return MakeFromDeferredTextureImageData(ctx, data, budgeted).release();
+}
+#endif
+
+sk_sp<SkImage> MakeTextureFromMipMap(GrContext*, const SkImageInfo&, const GrMipLevel* texels,
+ int mipLevelCount, SkBudgeted) {
+ return nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#include "SkImageDeserializer.h"
+
+sk_sp<SkImage> SkImageDeserializer::makeFromData(SkData* data, const SkIRect* subset) {
+ return SkImage::MakeFromEncoded(sk_ref_sp(data), subset);
+}
+sk_sp<SkImage> SkImageDeserializer::makeFromMemory(const void* data, size_t length,
+ const SkIRect* subset) {
+ return SkImage::MakeFromEncoded(SkData::MakeWithCopy(data, length), subset);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkImage_pinAsTexture(const SkImage* image, GrContext* ctx) {
+ SkASSERT(image);
+ SkASSERT(ctx);
+ as_IB(image)->onPinAsTexture(ctx);
+}
+
+void SkImage_unpinAsTexture(const SkImage* image, GrContext* ctx) {
+ SkASSERT(image);
+ SkASSERT(ctx);
+ as_IB(image)->onUnpinAsTexture(ctx);
+}
diff --git a/gfx/skia/skia/src/image/SkImageShader.cpp b/gfx/skia/skia/src/image/SkImageShader.cpp
new file mode 100644
index 000000000..8407f1008
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImageShader.cpp
@@ -0,0 +1,266 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapProcShader.h"
+#include "SkBitmapProvider.h"
+#include "SkColorShader.h"
+#include "SkColorTable.h"
+#include "SkEmptyShader.h"
+#include "SkImage_Base.h"
+#include "SkImageShader.h"
+#include "SkReadBuffer.h"
+#include "SkWriteBuffer.h"
+
+SkImageShader::SkImageShader(sk_sp<SkImage> img, TileMode tmx, TileMode tmy, const SkMatrix* matrix)
+ : INHERITED(matrix)
+ , fImage(std::move(img))
+ , fTileModeX(tmx)
+ , fTileModeY(tmy)
+{}
+
+sk_sp<SkFlattenable> SkImageShader::CreateProc(SkReadBuffer& buffer) {
+ const TileMode tx = (TileMode)buffer.readUInt();
+ const TileMode ty = (TileMode)buffer.readUInt();
+ SkMatrix matrix;
+ buffer.readMatrix(&matrix);
+ sk_sp<SkImage> img = buffer.readImage();
+ if (!img) {
+ return nullptr;
+ }
+ return SkImageShader::Make(std::move(img), tx, ty, &matrix);
+}
+
+void SkImageShader::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeUInt(fTileModeX);
+ buffer.writeUInt(fTileModeY);
+ buffer.writeMatrix(this->getLocalMatrix());
+ buffer.writeImage(fImage.get());
+}
+
+bool SkImageShader::isOpaque() const {
+ return fImage->isOpaque();
+}
+
+size_t SkImageShader::onContextSize(const ContextRec& rec) const {
+ return SkBitmapProcLegacyShader::ContextSize(rec, SkBitmapProvider(fImage.get()).info());
+}
+
+SkShader::Context* SkImageShader::onCreateContext(const ContextRec& rec, void* storage) const {
+ return SkBitmapProcLegacyShader::MakeContext(*this, fTileModeX, fTileModeY,
+ SkBitmapProvider(fImage.get()), rec, storage);
+}
+
+SkImage* SkImageShader::onIsAImage(SkMatrix* texM, TileMode xy[]) const {
+ if (texM) {
+ *texM = this->getLocalMatrix();
+ }
+ if (xy) {
+ xy[0] = (TileMode)fTileModeX;
+ xy[1] = (TileMode)fTileModeY;
+ }
+ return const_cast<SkImage*>(fImage.get());
+}
+
+#ifdef SK_SUPPORT_LEGACY_SHADER_ISABITMAP
+bool SkImageShader::onIsABitmap(SkBitmap* texture, SkMatrix* texM, TileMode xy[]) const {
+ const SkBitmap* bm = as_IB(fImage)->onPeekBitmap();
+ if (!bm) {
+ return false;
+ }
+
+ if (texture) {
+ *texture = *bm;
+ }
+ if (texM) {
+ *texM = this->getLocalMatrix();
+ }
+ if (xy) {
+ xy[0] = (TileMode)fTileModeX;
+ xy[1] = (TileMode)fTileModeY;
+ }
+ return true;
+}
+#endif
+
+static bool bitmap_is_too_big(int w, int h) {
+ // SkBitmapProcShader stores bitmap coordinates in a 16bit buffer, as it
+ // communicates between its matrix-proc and its sampler-proc. Until we can
+ // widen that, we have to reject bitmaps that are larger.
+ //
+ static const int kMaxSize = 65535;
+
+ return w > kMaxSize || h > kMaxSize;
+}
+
+// returns true and set color if the bitmap can be drawn as a single color
+// (for efficiency)
+static bool can_use_color_shader(const SkImage* image, SkColor* color) {
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ // HWUI does not support color shaders (see b/22390304)
+ return false;
+#endif
+
+ if (1 != image->width() || 1 != image->height()) {
+ return false;
+ }
+
+ SkPixmap pmap;
+ if (!image->peekPixels(&pmap)) {
+ return false;
+ }
+
+ switch (pmap.colorType()) {
+ case kN32_SkColorType:
+ *color = SkUnPreMultiply::PMColorToColor(*pmap.addr32(0, 0));
+ return true;
+ case kRGB_565_SkColorType:
+ *color = SkPixel16ToColor(*pmap.addr16(0, 0));
+ return true;
+ case kIndex_8_SkColorType: {
+ const SkColorTable& ctable = *pmap.ctable();
+ *color = SkUnPreMultiply::PMColorToColor(ctable[*pmap.addr8(0, 0)]);
+ return true;
+ }
+ default: // just skip the other configs for now
+ break;
+ }
+ return false;
+}
+
+sk_sp<SkShader> SkImageShader::Make(sk_sp<SkImage> image, TileMode tx, TileMode ty,
+ const SkMatrix* localMatrix,
+ SkTBlitterAllocator* allocator) {
+ SkShader* shader;
+ SkColor color;
+ if (!image || bitmap_is_too_big(image->width(), image->height())) {
+ if (nullptr == allocator) {
+ shader = new SkEmptyShader;
+ } else {
+ shader = allocator->createT<SkEmptyShader>();
+ }
+ } else if (can_use_color_shader(image.get(), &color)) {
+ if (nullptr == allocator) {
+ shader = new SkColorShader(color);
+ } else {
+ shader = allocator->createT<SkColorShader>(color);
+ }
+ } else {
+ if (nullptr == allocator) {
+ shader = new SkImageShader(image, tx, ty, localMatrix);
+ } else {
+ shader = allocator->createT<SkImageShader>(image, tx, ty, localMatrix);
+ }
+ }
+ return sk_sp<SkShader>(shader);
+}
+
+#ifndef SK_IGNORE_TO_STRING
+void SkImageShader::toString(SkString* str) const {
+ const char* gTileModeName[SkShader::kTileModeCount] = {
+ "clamp", "repeat", "mirror"
+ };
+
+ str->appendf("ImageShader: ((%s %s) ", gTileModeName[fTileModeX], gTileModeName[fTileModeY]);
+ fImage->toString(str);
+ this->INHERITED::toString(str);
+ str->append(")");
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+
+#include "GrTextureAccess.h"
+#include "SkGr.h"
+#include "SkGrPriv.h"
+#include "effects/GrSimpleTextureEffect.h"
+#include "effects/GrBicubicEffect.h"
+#include "effects/GrSimpleTextureEffect.h"
+
+sk_sp<GrFragmentProcessor> SkImageShader::asFragmentProcessor(const AsFPArgs& args) const {
+ SkMatrix matrix;
+ matrix.setIDiv(fImage->width(), fImage->height());
+
+ SkMatrix lmInverse;
+ if (!this->getLocalMatrix().invert(&lmInverse)) {
+ return nullptr;
+ }
+ if (args.fLocalMatrix) {
+ SkMatrix inv;
+ if (!args.fLocalMatrix->invert(&inv)) {
+ return nullptr;
+ }
+ lmInverse.postConcat(inv);
+ }
+ matrix.preConcat(lmInverse);
+
+ SkShader::TileMode tm[] = { fTileModeX, fTileModeY };
+
+ // Must set wrap and filter on the sampler before requesting a texture. In two places below
+ // we check the matrix scale factors to determine how to interpret the filter quality setting.
+ // This completely ignores the complexity of the drawVertices case where explicit local coords
+ // are provided by the caller.
+ bool doBicubic;
+ GrTextureParams::FilterMode textureFilterMode =
+ GrSkFilterQualityToGrFilterMode(args.fFilterQuality, *args.fViewMatrix, this->getLocalMatrix(),
+ &doBicubic);
+ GrTextureParams params(tm, textureFilterMode);
+ SkAutoTUnref<GrTexture> texture(as_IB(fImage)->asTextureRef(args.fContext, params,
+ args.fGammaTreatment));
+ if (!texture) {
+ return nullptr;
+ }
+
+ SkImageInfo info = as_IB(fImage)->onImageInfo();
+ sk_sp<GrColorSpaceXform> colorSpaceXform = GrColorSpaceXform::Make(info.colorSpace(),
+ args.fDstColorSpace);
+ sk_sp<GrFragmentProcessor> inner;
+ if (doBicubic) {
+ inner = GrBicubicEffect::Make(texture, std::move(colorSpaceXform), matrix, tm);
+ } else {
+ inner = GrSimpleTextureEffect::Make(texture, std::move(colorSpaceXform), matrix, params);
+ }
+
+ if (GrPixelConfigIsAlphaOnly(texture->config())) {
+ return inner;
+ }
+ return sk_sp<GrFragmentProcessor>(GrFragmentProcessor::MulOutputByInputAlpha(std::move(inner)));
+}
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#include "SkImagePriv.h"
+
+sk_sp<SkShader> SkMakeBitmapShader(const SkBitmap& src, SkShader::TileMode tmx,
+ SkShader::TileMode tmy, const SkMatrix* localMatrix,
+ SkCopyPixelsMode cpm, SkTBlitterAllocator* allocator) {
+ // Until we learn otherwise, it seems that any caller that is passing an allocator must be
+ // assuming that the returned shader will have a stack-frame lifetime, so we assert that
+ // they are also asking for kNever_SkCopyPixelsMode. If that proves otherwise, we can remove
+ // or modify this assert.
+ SkASSERT(!allocator || (kNever_SkCopyPixelsMode == cpm));
+
+ return SkImageShader::Make(SkMakeImageFromRasterBitmap(src, cpm, allocator),
+ tmx, tmy, localMatrix, allocator);
+}
+
+static sk_sp<SkFlattenable> SkBitmapProcShader_CreateProc(SkReadBuffer& buffer) {
+ SkMatrix lm;
+ buffer.readMatrix(&lm);
+ sk_sp<SkImage> image = buffer.readBitmapAsImage();
+ SkShader::TileMode mx = (SkShader::TileMode)buffer.readUInt();
+ SkShader::TileMode my = (SkShader::TileMode)buffer.readUInt();
+ return image ? image->makeShader(mx, my, &lm) : nullptr;
+}
+
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkShader)
+SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkImageShader)
+SkFlattenable::Register("SkBitmapProcShader", SkBitmapProcShader_CreateProc, kSkShader_Type);
+SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END
+
diff --git a/gfx/skia/skia/src/image/SkImageShader.h b/gfx/skia/skia/src/image/SkImageShader.h
new file mode 100644
index 000000000..8905881a9
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImageShader.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageShader_DEFINED
+#define SkImageShader_DEFINED
+
+#include "SkImage.h"
+#include "SkShader.h"
+#include "SkBitmapProcShader.h"
+
+class SkImageShader : public SkShader {
+public:
+ static sk_sp<SkShader> Make(sk_sp<SkImage>, TileMode tx, TileMode ty,
+ const SkMatrix* localMatrix, SkTBlitterAllocator* = nullptr);
+
+ bool isOpaque() const override;
+
+ SK_TO_STRING_OVERRIDE()
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkImageShader)
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrFragmentProcessor> asFragmentProcessor(const AsFPArgs&) const override;
+#endif
+
+ SkImageShader(sk_sp<SkImage>, TileMode tx, TileMode ty, const SkMatrix* localMatrix);
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ size_t onContextSize(const ContextRec&) const override;
+ Context* onCreateContext(const ContextRec&, void* storage) const override;
+#ifdef SK_SUPPORT_LEGACY_SHADER_ISABITMAP
+ bool onIsABitmap(SkBitmap*, SkMatrix*, TileMode*) const override;
+#endif
+ SkImage* onIsAImage(SkMatrix*, TileMode*) const override;
+
+ sk_sp<SkImage> fImage;
+ const TileMode fTileModeX;
+ const TileMode fTileModeY;
+
+private:
+ friend class SkShader;
+
+ typedef SkShader INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkImage_Base.h b/gfx/skia/skia/src/image/SkImage_Base.h
new file mode 100644
index 000000000..b314cc6c2
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_Base.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImage_Base_DEFINED
+#define SkImage_Base_DEFINED
+
+#include "SkAtomics.h"
+#include "SkImage.h"
+#include "SkSurface.h"
+
+#if SK_SUPPORT_GPU
+ #include "GrTexture.h"
+#endif
+
+#include <new>
+
+class GrTextureParams;
+class SkImageCacherator;
+
+enum {
+ kNeedNewImageUniqueID = 0
+};
+
+class SkImage_Base : public SkImage {
+public:
+ SkImage_Base(int width, int height, uint32_t uniqueID);
+ virtual ~SkImage_Base();
+
+ // User: returns image info for this SkImage.
+ // Implementors: if you can not return the value, return an invalid ImageInfo with w=0 & h=0
+ // & unknown color space.
+ virtual SkImageInfo onImageInfo() const = 0;
+ virtual SkAlphaType onAlphaType() const = 0;
+
+ virtual bool onPeekPixels(SkPixmap*) const { return false; }
+
+ virtual const SkBitmap* onPeekBitmap() const { return nullptr; }
+
+ // Default impl calls onDraw
+ virtual bool onReadPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY, CachingHint) const;
+
+ virtual GrTexture* peekTexture() const { return nullptr; }
+#if SK_SUPPORT_GPU
+ virtual sk_sp<GrTexture> refPinnedTexture(uint32_t* uniqueID) const { return nullptr; }
+#endif
+ virtual SkImageCacherator* peekCacherator() const { return nullptr; }
+
+ // return a read-only copy of the pixels. We promise to not modify them,
+ // but only inspect them (or encode them).
+ virtual bool getROPixels(SkBitmap*, CachingHint = kAllow_CachingHint) const = 0;
+
+ // Caller must call unref when they are done.
+ virtual GrTexture* asTextureRef(GrContext*, const GrTextureParams&,
+ SkSourceGammaTreatment) const = 0;
+
+ virtual sk_sp<SkImage> onMakeSubset(const SkIRect&) const = 0;
+
+ // If a ctx is specified, then only gpu-specific formats are requested.
+ virtual SkData* onRefEncoded(GrContext*) const { return nullptr; }
+
+ virtual bool onAsLegacyBitmap(SkBitmap*, LegacyBitmapMode) const;
+
+ virtual bool onIsLazyGenerated() const { return false; }
+
+ // Call when this image is part of the key to a resourcecache entry. This allows the cache
+ // to know automatically those entries can be purged when this SkImage deleted.
+ void notifyAddedToCache() const {
+ fAddedToCache.store(true);
+ }
+
+ virtual void onPinAsTexture(GrContext*) const {}
+ virtual void onUnpinAsTexture(GrContext*) const {}
+
+private:
+ // Set true by caches when they cache content that's derived from the current pixels.
+ mutable SkAtomic<bool> fAddedToCache;
+
+ typedef SkImage INHERITED;
+};
+
+static inline SkImage_Base* as_IB(SkImage* image) {
+ return static_cast<SkImage_Base*>(image);
+}
+
+static inline SkImage_Base* as_IB(const sk_sp<SkImage>& image) {
+ return static_cast<SkImage_Base*>(image.get());
+}
+
+static inline const SkImage_Base* as_IB(const SkImage* image) {
+ return static_cast<const SkImage_Base*>(image);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkImage_Generator.cpp b/gfx/skia/skia/src/image/SkImage_Generator.cpp
new file mode 100644
index 000000000..412f573ba
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_Generator.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkImage_Base.h"
+#include "SkBitmap.h"
+#include "SkCanvas.h"
+#include "SkData.h"
+#include "SkImageCacherator.h"
+#include "SkImagePriv.h"
+#include "SkPixelRef.h"
+#include "SkSurface.h"
+
+class SkImage_Generator : public SkImage_Base {
+public:
+ SkImage_Generator(SkImageCacherator* cache)
+ : INHERITED(cache->info().width(), cache->info().height(), cache->uniqueID())
+ , fCache(cache) // take ownership
+ {}
+
+ virtual SkImageInfo onImageInfo() const override {
+ return fCache->info();
+ }
+ SkAlphaType onAlphaType() const override {
+ return fCache->info().alphaType();
+ }
+
+ bool onReadPixels(const SkImageInfo&, void*, size_t, int srcX, int srcY, CachingHint) const override;
+ SkImageCacherator* peekCacherator() const override { return fCache; }
+ SkData* onRefEncoded(GrContext*) const override;
+ sk_sp<SkImage> onMakeSubset(const SkIRect&) const override;
+ bool getROPixels(SkBitmap*, CachingHint) const override;
+ GrTexture* asTextureRef(GrContext*, const GrTextureParams&,
+ SkSourceGammaTreatment) const override;
+ bool onIsLazyGenerated() const override { return true; }
+
+private:
+ SkAutoTDelete<SkImageCacherator> fCache;
+
+ typedef SkImage_Base INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkImage_Generator::onReadPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRB,
+ int srcX, int srcY, CachingHint chint) const {
+ SkBitmap bm;
+ if (kDisallow_CachingHint == chint) {
+ if (fCache->lockAsBitmapOnlyIfAlreadyCached(&bm)) {
+ return bm.readPixels(dstInfo, dstPixels, dstRB, srcX, srcY);
+ } else {
+ // Try passing the caller's buffer directly down to the generator. If this fails we
+ // may still succeed in the general case, as the generator may prefer some other
+ // config, which we could then convert via SkBitmap::readPixels.
+ if (fCache->directGeneratePixels(dstInfo, dstPixels, dstRB, srcX, srcY)) {
+ return true;
+ }
+ // else fall through
+ }
+ }
+
+ if (this->getROPixels(&bm, chint)) {
+ return bm.readPixels(dstInfo, dstPixels, dstRB, srcX, srcY);
+ }
+ return false;
+}
+
+SkData* SkImage_Generator::onRefEncoded(GrContext* ctx) const {
+ return fCache->refEncoded(ctx);
+}
+
+bool SkImage_Generator::getROPixels(SkBitmap* bitmap, CachingHint chint) const {
+ return fCache->lockAsBitmap(bitmap, this, chint);
+}
+
+GrTexture* SkImage_Generator::asTextureRef(GrContext* ctx, const GrTextureParams& params,
+ SkSourceGammaTreatment gammaTreatment) const {
+ return fCache->lockAsTexture(ctx, params, gammaTreatment, this);
+}
+
+sk_sp<SkImage> SkImage_Generator::onMakeSubset(const SkIRect& subset) const {
+ // TODO: make this lazy, by wrapping the subset inside a new generator or something
+ // For now, we do effectively what we did before, make it a raster
+
+ const SkImageInfo info = SkImageInfo::MakeN32(subset.width(), subset.height(),
+ this->alphaType());
+ auto surface(SkSurface::MakeRaster(info));
+ if (!surface) {
+ return nullptr;
+ }
+ surface->getCanvas()->clear(0);
+ surface->getCanvas()->drawImage(this, SkIntToScalar(-subset.x()), SkIntToScalar(-subset.y()),
+ nullptr);
+ return surface->makeImageSnapshot();
+}
+
+sk_sp<SkImage> SkImage::MakeFromGenerator(SkImageGenerator* generator, const SkIRect* subset) {
+ if (!generator) {
+ return nullptr;
+ }
+ SkImageCacherator* cache = SkImageCacherator::NewFromGenerator(generator, subset);
+ if (!cache) {
+ return nullptr;
+ }
+ return sk_make_sp<SkImage_Generator>(cache);
+}
diff --git a/gfx/skia/skia/src/image/SkImage_Gpu.cpp b/gfx/skia/skia/src/image/SkImage_Gpu.cpp
new file mode 100644
index 000000000..da7a5a718
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_Gpu.cpp
@@ -0,0 +1,705 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAutoPixmapStorage.h"
+#include "GrCaps.h"
+#include "GrContext.h"
+#include "GrDrawContext.h"
+#include "GrImageIDTextureAdjuster.h"
+#include "GrTexturePriv.h"
+#include "effects/GrYUVEffect.h"
+#include "SkCanvas.h"
+#include "SkBitmapCache.h"
+#include "SkGrPriv.h"
+#include "SkImage_Gpu.h"
+#include "SkMipMap.h"
+#include "SkPixelRef.h"
+
+SkImage_Gpu::SkImage_Gpu(int w, int h, uint32_t uniqueID, SkAlphaType at, GrTexture* tex,
+ sk_sp<SkColorSpace> colorSpace, SkBudgeted budgeted)
+ : INHERITED(w, h, uniqueID)
+ , fTexture(SkRef(tex))
+ , fAlphaType(at)
+ , fBudgeted(budgeted)
+ , fColorSpace(std::move(colorSpace))
+ , fAddedRasterVersionToCache(false)
+{
+ SkASSERT(tex->width() == w);
+ SkASSERT(tex->height() == h);
+}
+
+SkImage_Gpu::~SkImage_Gpu() {
+ if (fAddedRasterVersionToCache.load()) {
+ SkNotifyBitmapGenIDIsStale(this->uniqueID());
+ }
+}
+
+extern void SkTextureImageApplyBudgetedDecision(SkImage* image) {
+ if (image->isTextureBacked()) {
+ ((SkImage_Gpu*)image)->applyBudgetDecision();
+ }
+}
+
+SkImageInfo SkImage_Gpu::onImageInfo() const {
+ SkColorType ct;
+ if (!GrPixelConfigToColorType(fTexture->config(), &ct)) {
+ ct = kUnknown_SkColorType;
+ }
+ return SkImageInfo::Make(fTexture->width(), fTexture->height(), ct, fAlphaType, fColorSpace);
+}
+
+static SkImageInfo make_info(int w, int h, SkAlphaType at, sk_sp<SkColorSpace> colorSpace) {
+ return SkImageInfo::MakeN32(w, h, at, std::move(colorSpace));
+}
+
+bool SkImage_Gpu::getROPixels(SkBitmap* dst, CachingHint chint) const {
+ if (SkBitmapCache::Find(this->uniqueID(), dst)) {
+ SkASSERT(dst->getGenerationID() == this->uniqueID());
+ SkASSERT(dst->isImmutable());
+ SkASSERT(dst->getPixels());
+ return true;
+ }
+
+ if (!dst->tryAllocPixels(make_info(this->width(), this->height(), this->alphaType(),
+ this->fColorSpace))) {
+ return false;
+ }
+ if (!fTexture->readPixels(0, 0, dst->width(), dst->height(), kSkia8888_GrPixelConfig,
+ dst->getPixels(), dst->rowBytes())) {
+ return false;
+ }
+
+ dst->pixelRef()->setImmutableWithID(this->uniqueID());
+ if (kAllow_CachingHint == chint) {
+ SkBitmapCache::Add(this->uniqueID(), *dst);
+ fAddedRasterVersionToCache.store(true);
+ }
+ return true;
+}
+
+GrTexture* SkImage_Gpu::asTextureRef(GrContext* ctx, const GrTextureParams& params,
+ SkSourceGammaTreatment gammaTreatment) const {
+ GrTextureAdjuster adjuster(this->peekTexture(), this->alphaType(), this->bounds(), this->uniqueID(),
+ this->onImageInfo().colorSpace());
+ return adjuster.refTextureSafeForParams(params, gammaTreatment, nullptr);
+}
+
+static void apply_premul(const SkImageInfo& info, void* pixels, size_t rowBytes) {
+ switch (info.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ break;
+ default:
+ return; // nothing to do
+ }
+
+ // SkColor is not necesarily RGBA or BGRA, but it is one of them on little-endian,
+ // and in either case, the alpha-byte is always in the same place, so we can safely call
+ // SkPreMultiplyColor()
+ //
+ SkColor* row = (SkColor*)pixels;
+ for (int y = 0; y < info.height(); ++y) {
+ for (int x = 0; x < info.width(); ++x) {
+ row[x] = SkPreMultiplyColor(row[x]);
+ }
+ }
+}
+
+bool SkImage_Gpu::onReadPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ int srcX, int srcY, CachingHint) const {
+ GrPixelConfig config = SkImageInfo2GrPixelConfig(info, *fTexture->getContext()->caps());
+ uint32_t flags = 0;
+ if (kUnpremul_SkAlphaType == info.alphaType() && kPremul_SkAlphaType == fAlphaType) {
+ // let the GPU perform this transformation for us
+ flags = GrContext::kUnpremul_PixelOpsFlag;
+ }
+ if (!fTexture->readPixels(srcX, srcY, info.width(), info.height(), config,
+ pixels, rowBytes, flags)) {
+ return false;
+ }
+ // do we have to manually fix-up the alpha channel?
+ // src dst
+ // unpremul premul fix manually
+ // premul unpremul done by kUnpremul_PixelOpsFlag
+ // all other combos need to change.
+ //
+ // Should this be handled by Ganesh? todo:?
+ //
+ if (kPremul_SkAlphaType == info.alphaType() && kUnpremul_SkAlphaType == fAlphaType) {
+ apply_premul(info, pixels, rowBytes);
+ }
+ return true;
+}
+
+sk_sp<SkImage> SkImage_Gpu::onMakeSubset(const SkIRect& subset) const {
+ GrContext* ctx = fTexture->getContext();
+ GrSurfaceDesc desc = fTexture->desc();
+ desc.fWidth = subset.width();
+ desc.fHeight = subset.height();
+
+ sk_sp<GrTexture> subTx(ctx->textureProvider()->createTexture(desc, fBudgeted));
+ if (!subTx) {
+ return nullptr;
+ }
+ ctx->copySurface(subTx.get(), fTexture, subset, SkIPoint::Make(0, 0));
+ return sk_make_sp<SkImage_Gpu>(desc.fWidth, desc.fHeight, kNeedNewImageUniqueID,
+ fAlphaType, subTx.get(), fColorSpace, fBudgeted);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static sk_sp<SkImage> new_wrapped_texture_common(GrContext* ctx, const GrBackendTextureDesc& desc,
+ SkAlphaType at, sk_sp<SkColorSpace> colorSpace,
+ GrWrapOwnership ownership,
+ SkImage::TextureReleaseProc releaseProc,
+ SkImage::ReleaseContext releaseCtx) {
+ if (desc.fWidth <= 0 || desc.fHeight <= 0) {
+ return nullptr;
+ }
+ SkAutoTUnref<GrTexture> tex(ctx->textureProvider()->wrapBackendTexture(desc, ownership));
+ if (!tex) {
+ return nullptr;
+ }
+ if (releaseProc) {
+ tex->setRelease(releaseProc, releaseCtx);
+ }
+
+ const SkBudgeted budgeted = SkBudgeted::kNo;
+ return sk_make_sp<SkImage_Gpu>(desc.fWidth, desc.fHeight, kNeedNewImageUniqueID,
+ at, tex, colorSpace, budgeted);
+}
+
+sk_sp<SkImage> SkImage::MakeFromTexture(GrContext* ctx, const GrBackendTextureDesc& desc,
+ SkAlphaType at, sk_sp<SkColorSpace> cs,
+ TextureReleaseProc releaseP, ReleaseContext releaseC) {
+ return new_wrapped_texture_common(ctx, desc, at, std::move(cs), kBorrow_GrWrapOwnership,
+ releaseP, releaseC);
+}
+
+sk_sp<SkImage> SkImage::MakeFromAdoptedTexture(GrContext* ctx, const GrBackendTextureDesc& desc,
+ SkAlphaType at, sk_sp<SkColorSpace> cs) {
+ return new_wrapped_texture_common(ctx, desc, at, std::move(cs), kAdopt_GrWrapOwnership,
+ nullptr, nullptr);
+}
+
+static sk_sp<SkImage> make_from_yuv_textures_copy(GrContext* ctx, SkYUVColorSpace colorSpace,
+ bool nv12,
+ const GrBackendObject yuvTextureHandles[],
+ const SkISize yuvSizes[],
+ GrSurfaceOrigin origin,
+ sk_sp<SkColorSpace> imageColorSpace) {
+ const SkBudgeted budgeted = SkBudgeted::kYes;
+
+ if (yuvSizes[0].fWidth <= 0 || yuvSizes[0].fHeight <= 0 || yuvSizes[1].fWidth <= 0 ||
+ yuvSizes[1].fHeight <= 0) {
+ return nullptr;
+ }
+ if (!nv12 && (yuvSizes[2].fWidth <= 0 || yuvSizes[2].fHeight <= 0)) {
+ return nullptr;
+ }
+
+ const GrPixelConfig kConfig = nv12 ? kRGBA_8888_GrPixelConfig : kAlpha_8_GrPixelConfig;
+
+ GrBackendTextureDesc yDesc;
+ yDesc.fConfig = kConfig;
+ yDesc.fOrigin = origin;
+ yDesc.fSampleCnt = 0;
+ yDesc.fTextureHandle = yuvTextureHandles[0];
+ yDesc.fWidth = yuvSizes[0].fWidth;
+ yDesc.fHeight = yuvSizes[0].fHeight;
+
+ GrBackendTextureDesc uDesc;
+ uDesc.fConfig = kConfig;
+ uDesc.fOrigin = origin;
+ uDesc.fSampleCnt = 0;
+ uDesc.fTextureHandle = yuvTextureHandles[1];
+ uDesc.fWidth = yuvSizes[1].fWidth;
+ uDesc.fHeight = yuvSizes[1].fHeight;
+
+ sk_sp<GrTexture> yTex(
+ ctx->textureProvider()->wrapBackendTexture(yDesc, kBorrow_GrWrapOwnership));
+ sk_sp<GrTexture> uTex(
+ ctx->textureProvider()->wrapBackendTexture(uDesc, kBorrow_GrWrapOwnership));
+ sk_sp<GrTexture> vTex;
+ if (nv12) {
+ vTex = uTex;
+ } else {
+ GrBackendTextureDesc vDesc;
+ vDesc.fConfig = kConfig;
+ vDesc.fOrigin = origin;
+ vDesc.fSampleCnt = 0;
+ vDesc.fTextureHandle = yuvTextureHandles[2];
+ vDesc.fWidth = yuvSizes[2].fWidth;
+ vDesc.fHeight = yuvSizes[2].fHeight;
+
+ vTex = sk_sp<GrTexture>(
+ ctx->textureProvider()->wrapBackendTexture(vDesc, kBorrow_GrWrapOwnership));
+ }
+ if (!yTex || !uTex || !vTex) {
+ return nullptr;
+ }
+
+ const int width = yuvSizes[0].fWidth;
+ const int height = yuvSizes[0].fHeight;
+
+ // Needs to be a render target in order to draw to it for the yuv->rgb conversion.
+ sk_sp<GrDrawContext> drawContext(ctx->makeDrawContext(SkBackingFit::kExact,
+ width, height,
+ kRGBA_8888_GrPixelConfig,
+ std::move(imageColorSpace),
+ 0,
+ origin));
+ if (!drawContext) {
+ return nullptr;
+ }
+
+ GrPaint paint;
+ paint.setPorterDuffXPFactory(SkXfermode::kSrc_Mode);
+ paint.addColorFragmentProcessor(
+ GrYUVEffect::MakeYUVToRGB(yTex.get(), uTex.get(), vTex.get(), yuvSizes, colorSpace, nv12));
+
+ const SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
+
+ drawContext->drawRect(GrNoClip(), paint, SkMatrix::I(), rect);
+ ctx->flushSurfaceWrites(drawContext->accessRenderTarget());
+ return sk_make_sp<SkImage_Gpu>(width, height, kNeedNewImageUniqueID,
+ kOpaque_SkAlphaType, drawContext->asTexture().get(),
+ sk_ref_sp(drawContext->getColorSpace()), budgeted);
+}
+
+sk_sp<SkImage> SkImage::MakeFromYUVTexturesCopy(GrContext* ctx, SkYUVColorSpace colorSpace,
+ const GrBackendObject yuvTextureHandles[3],
+ const SkISize yuvSizes[3], GrSurfaceOrigin origin,
+ sk_sp<SkColorSpace> imageColorSpace) {
+ return make_from_yuv_textures_copy(ctx, colorSpace, false, yuvTextureHandles, yuvSizes, origin,
+ std::move(imageColorSpace));
+}
+
+sk_sp<SkImage> SkImage::MakeFromNV12TexturesCopy(GrContext* ctx, SkYUVColorSpace colorSpace,
+ const GrBackendObject yuvTextureHandles[2],
+ const SkISize yuvSizes[2],
+ GrSurfaceOrigin origin,
+ sk_sp<SkColorSpace> imageColorSpace) {
+ return make_from_yuv_textures_copy(ctx, colorSpace, true, yuvTextureHandles, yuvSizes, origin,
+ std::move(imageColorSpace));
+}
+
+static sk_sp<SkImage> create_image_from_maker(GrTextureMaker* maker, SkAlphaType at, uint32_t id) {
+ SkAutoTUnref<GrTexture> texture(maker->refTextureForParams(GrTextureParams::ClampNoFilter(),
+ SkSourceGammaTreatment::kRespect));
+ if (!texture) {
+ return nullptr;
+ }
+ return sk_make_sp<SkImage_Gpu>(texture->width(), texture->height(), id, at, texture,
+ sk_ref_sp(maker->getColorSpace()), SkBudgeted::kNo);
+}
+
+sk_sp<SkImage> SkImage::makeTextureImage(GrContext *context) const {
+ if (!context) {
+ return nullptr;
+ }
+ if (GrTexture* peek = as_IB(this)->peekTexture()) {
+ return peek->getContext() == context ? sk_ref_sp(const_cast<SkImage*>(this)) : nullptr;
+ }
+
+ if (SkImageCacherator* cacher = as_IB(this)->peekCacherator()) {
+ GrImageTextureMaker maker(context, cacher, this, kDisallow_CachingHint);
+ return create_image_from_maker(&maker, this->alphaType(), this->uniqueID());
+ }
+
+ if (const SkBitmap* bmp = as_IB(this)->onPeekBitmap()) {
+ GrBitmapTextureMaker maker(context, *bmp);
+ return create_image_from_maker(&maker, this->alphaType(), this->uniqueID());
+ }
+ return nullptr;
+}
+
+sk_sp<SkImage> SkImage::makeNonTextureImage() const {
+ if (!this->isTextureBacked()) {
+ return sk_ref_sp(const_cast<SkImage*>(this));
+ }
+ SkImageInfo info = as_IB(this)->onImageInfo();
+ size_t rowBytes = info.minRowBytes();
+ size_t size = info.getSafeSize(rowBytes);
+ auto data = SkData::MakeUninitialized(size);
+ if (!data) {
+ return nullptr;
+ }
+ SkPixmap pm(info, data->writable_data(), rowBytes);
+ if (!this->readPixels(pm, 0, 0, kDisallow_CachingHint)) {
+ return nullptr;
+ }
+ return MakeRasterData(info, data, rowBytes);
+}
+
+sk_sp<SkImage> SkImage::MakeTextureFromPixmap(GrContext* ctx, const SkPixmap& pixmap,
+ SkBudgeted budgeted) {
+ if (!ctx) {
+ return nullptr;
+ }
+ SkAutoTUnref<GrTexture> texture(GrUploadPixmapToTexture(ctx, pixmap, budgeted));
+ if (!texture) {
+ return nullptr;
+ }
+ return sk_make_sp<SkImage_Gpu>(texture->width(), texture->height(), kNeedNewImageUniqueID,
+ pixmap.alphaType(), texture,
+ sk_ref_sp(pixmap.info().colorSpace()), budgeted);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+struct MipMapLevelData {
+ void* fPixelData;
+ size_t fRowBytes;
+};
+
+struct DeferredTextureImage {
+ uint32_t fContextUniqueID;
+ // Right now, the gamma treatment is only considered when generating mipmaps
+ SkSourceGammaTreatment fGammaTreatment;
+ // We don't store a SkImageInfo because it contains a ref-counted SkColorSpace.
+ int fWidth;
+ int fHeight;
+ SkColorType fColorType;
+ SkAlphaType fAlphaType;
+ void* fColorSpace;
+ size_t fColorSpaceSize;
+ int fColorTableCnt;
+ uint32_t* fColorTableData;
+ int fMipMapLevelCount;
+ // The fMipMapLevelData array may contain more than 1 element.
+ // It contains fMipMapLevelCount elements.
+ // That means this struct's size is not known at compile-time.
+ MipMapLevelData fMipMapLevelData[1];
+};
+} // anonymous namespace
+
+static bool should_use_mip_maps(const SkImage::DeferredTextureImageUsageParams & param) {
+ bool shouldUseMipMaps = false;
+
+ // Use mipmaps if either
+ // 1.) it is a perspective matrix, or
+ // 2.) the quality is med/high and the scale is < 1
+ if (param.fMatrix.hasPerspective()) {
+ shouldUseMipMaps = true;
+ }
+ if (param.fQuality == kMedium_SkFilterQuality ||
+ param.fQuality == kHigh_SkFilterQuality) {
+ SkScalar minAxisScale = param.fMatrix.getMinScale();
+ if (minAxisScale != -1.f && minAxisScale < 1.f) {
+ shouldUseMipMaps = true;
+ }
+ }
+
+
+ return shouldUseMipMaps;
+}
+
+namespace {
+
+class DTIBufferFiller
+{
+public:
+ explicit DTIBufferFiller(char* bufferAsCharPtr)
+ : bufferAsCharPtr_(bufferAsCharPtr) {}
+
+ void fillMember(const void* source, size_t memberOffset, size_t size) {
+ memcpy(bufferAsCharPtr_ + memberOffset, source, size);
+ }
+
+private:
+
+ char* bufferAsCharPtr_;
+};
+}
+
+#define FILL_MEMBER(bufferFiller, member, source) \
+ bufferFiller.fillMember(source, \
+ offsetof(DeferredTextureImage, member), \
+ sizeof(DeferredTextureImage::member));
+
+size_t SkImage::getDeferredTextureImageData(const GrContextThreadSafeProxy& proxy,
+ const DeferredTextureImageUsageParams params[],
+ int paramCnt, void* buffer,
+ SkSourceGammaTreatment gammaTreatment) const {
+ // Extract relevant min/max values from the params array.
+ int lowestPreScaleMipLevel = params[0].fPreScaleMipLevel;
+ SkFilterQuality highestFilterQuality = params[0].fQuality;
+ bool useMipMaps = should_use_mip_maps(params[0]);
+ for (int i = 1; i < paramCnt; ++i) {
+ if (lowestPreScaleMipLevel > params[i].fPreScaleMipLevel)
+ lowestPreScaleMipLevel = params[i].fPreScaleMipLevel;
+ if (highestFilterQuality < params[i].fQuality)
+ highestFilterQuality = params[i].fQuality;
+ useMipMaps |= should_use_mip_maps(params[i]);
+ }
+
+ const bool fillMode = SkToBool(buffer);
+ if (fillMode && !SkIsAlign8(reinterpret_cast<intptr_t>(buffer))) {
+ return 0;
+ }
+
+ // Calculate scaling parameters.
+ bool isScaled = lowestPreScaleMipLevel != 0;
+
+ SkISize scaledSize;
+ if (isScaled) {
+ // SkMipMap::ComputeLevelSize takes an index into an SkMipMap. SkMipMaps don't contain the
+ // base level, so to get an SkMipMap index we must subtract one from the GL MipMap level.
+ scaledSize = SkMipMap::ComputeLevelSize(this->width(), this->height(),
+ lowestPreScaleMipLevel - 1);
+ } else {
+ scaledSize = SkISize::Make(this->width(), this->height());
+ }
+
+ // We never want to scale at higher than SW medium quality, as SW medium matches GPU high.
+ SkFilterQuality scaleFilterQuality = highestFilterQuality;
+ if (scaleFilterQuality > kMedium_SkFilterQuality) {
+ scaleFilterQuality = kMedium_SkFilterQuality;
+ }
+
+ const int maxTextureSize = proxy.fCaps->maxTextureSize();
+ if (scaledSize.width() > maxTextureSize || scaledSize.height() > maxTextureSize) {
+ return 0;
+ }
+
+ SkAutoPixmapStorage pixmap;
+ SkImageInfo info;
+ size_t pixelSize = 0;
+ size_t ctSize = 0;
+ int ctCount = 0;
+ if (!isScaled && this->peekPixels(&pixmap)) {
+ info = pixmap.info();
+ pixelSize = SkAlign8(pixmap.getSafeSize());
+ if (pixmap.ctable()) {
+ ctCount = pixmap.ctable()->count();
+ ctSize = SkAlign8(pixmap.ctable()->count() * 4);
+ }
+ } else {
+ // Here we're just using presence of data to know whether there is a codec behind the image.
+ // In the future we will access the cacherator and get the exact data that we want to (e.g.
+ // yuv planes) upload.
+ sk_sp<SkData> data(this->refEncoded());
+ if (!data && !this->peekPixels(nullptr)) {
+ return 0;
+ }
+ info = SkImageInfo::MakeN32(scaledSize.width(), scaledSize.height(), this->alphaType());
+ pixelSize = SkAlign8(SkAutoPixmapStorage::AllocSize(info, nullptr));
+ if (fillMode) {
+ pixmap.alloc(info);
+ if (isScaled) {
+ if (!this->scalePixels(pixmap, scaleFilterQuality,
+ SkImage::kDisallow_CachingHint)) {
+ return 0;
+ }
+ } else {
+ if (!this->readPixels(pixmap, 0, 0, SkImage::kDisallow_CachingHint)) {
+ return 0;
+ }
+ }
+ SkASSERT(!pixmap.ctable());
+ }
+ }
+ SkAlphaType at = this->isOpaque() ? kOpaque_SkAlphaType : kPremul_SkAlphaType;
+ int mipMapLevelCount = 1;
+ if (useMipMaps) {
+ // SkMipMap only deals with the mipmap levels it generates, which does
+ // not include the base level.
+ // That means it generates and holds levels 1-x instead of 0-x.
+ // So the total mipmap level count is 1 more than what
+ // SkMipMap::ComputeLevelCount returns.
+ mipMapLevelCount = SkMipMap::ComputeLevelCount(scaledSize.width(), scaledSize.height()) + 1;
+
+ // We already initialized pixelSize to the size of the base level.
+ // SkMipMap will generate the extra mipmap levels. Their sizes need to
+ // be added to the total.
+ // Index 0 here does not refer to the base mipmap level -- it is
+ // SkMipMap's first generated mipmap level (level 1).
+ for (int currentMipMapLevelIndex = mipMapLevelCount - 2; currentMipMapLevelIndex >= 0;
+ currentMipMapLevelIndex--) {
+ SkISize mipSize = SkMipMap::ComputeLevelSize(scaledSize.width(), scaledSize.height(),
+ currentMipMapLevelIndex);
+ SkImageInfo mipInfo = SkImageInfo::MakeN32(mipSize.fWidth, mipSize.fHeight, at);
+ pixelSize += SkAlign8(SkAutoPixmapStorage::AllocSize(mipInfo, nullptr));
+ }
+ }
+ size_t size = 0;
+ size_t dtiSize = SkAlign8(sizeof(DeferredTextureImage));
+ size += dtiSize;
+ size += (mipMapLevelCount - 1) * sizeof(MipMapLevelData);
+ // We subtract 1 because DeferredTextureImage already includes the base
+ // level in its size
+ size_t pixelOffset = size;
+ size += pixelSize;
+ size_t ctOffset = size;
+ size += ctSize;
+ size_t colorSpaceOffset = 0;
+ size_t colorSpaceSize = 0;
+ if (info.colorSpace()) {
+ colorSpaceOffset = size;
+ colorSpaceSize = info.colorSpace()->writeToMemory(nullptr);
+ size += colorSpaceSize;
+ }
+ if (!fillMode) {
+ return size;
+ }
+ char* bufferAsCharPtr = reinterpret_cast<char*>(buffer);
+ char* pixelsAsCharPtr = bufferAsCharPtr + pixelOffset;
+ void* pixels = pixelsAsCharPtr;
+ void* ct = nullptr;
+ if (ctSize) {
+ ct = bufferAsCharPtr + ctOffset;
+ }
+
+ memcpy(reinterpret_cast<void*>(SkAlign8(reinterpret_cast<uintptr_t>(pixelsAsCharPtr))),
+ pixmap.addr(), pixmap.getSafeSize());
+ if (ctSize) {
+ memcpy(ct, pixmap.ctable()->readColors(), ctSize);
+ }
+
+ SkASSERT(info == pixmap.info());
+ size_t rowBytes = pixmap.rowBytes();
+ static_assert(std::is_standard_layout<DeferredTextureImage>::value,
+ "offsetof, which we use below, requires the type have standard layout");
+ auto dtiBufferFiller = DTIBufferFiller{bufferAsCharPtr};
+ FILL_MEMBER(dtiBufferFiller, fGammaTreatment, &gammaTreatment);
+ FILL_MEMBER(dtiBufferFiller, fContextUniqueID, &proxy.fContextUniqueID);
+ int width = info.width();
+ FILL_MEMBER(dtiBufferFiller, fWidth, &width);
+ int height = info.height();
+ FILL_MEMBER(dtiBufferFiller, fHeight, &height);
+ SkColorType colorType = info.colorType();
+ FILL_MEMBER(dtiBufferFiller, fColorType, &colorType);
+ SkAlphaType alphaType = info.alphaType();
+ FILL_MEMBER(dtiBufferFiller, fAlphaType, &alphaType);
+ FILL_MEMBER(dtiBufferFiller, fColorTableCnt, &ctCount);
+ FILL_MEMBER(dtiBufferFiller, fColorTableData, &ct);
+ FILL_MEMBER(dtiBufferFiller, fMipMapLevelCount, &mipMapLevelCount);
+ memcpy(bufferAsCharPtr + offsetof(DeferredTextureImage, fMipMapLevelData[0].fPixelData),
+ &pixels, sizeof(pixels));
+ memcpy(bufferAsCharPtr + offsetof(DeferredTextureImage, fMipMapLevelData[0].fRowBytes),
+ &rowBytes, sizeof(rowBytes));
+ if (colorSpaceSize) {
+ void* colorSpace = bufferAsCharPtr + colorSpaceOffset;
+ FILL_MEMBER(dtiBufferFiller, fColorSpace, &colorSpace);
+ FILL_MEMBER(dtiBufferFiller, fColorSpaceSize, &colorSpaceSize);
+ info.colorSpace()->writeToMemory(bufferAsCharPtr + colorSpaceOffset);
+ } else {
+ memset(bufferAsCharPtr + offsetof(DeferredTextureImage, fColorSpace),
+ 0, sizeof(DeferredTextureImage::fColorSpace));
+ memset(bufferAsCharPtr + offsetof(DeferredTextureImage, fColorSpaceSize),
+ 0, sizeof(DeferredTextureImage::fColorSpaceSize));
+ }
+
+ // Fill in the mipmap levels if they exist
+ char* mipLevelPtr = pixelsAsCharPtr + SkAlign8(pixmap.getSafeSize());
+
+ if (useMipMaps) {
+ static_assert(std::is_standard_layout<MipMapLevelData>::value,
+ "offsetof, which we use below, requires the type have a standard layout");
+
+ SkAutoTDelete<SkMipMap> mipmaps(SkMipMap::Build(pixmap, gammaTreatment, nullptr));
+ // SkMipMap holds only the mipmap levels it generates.
+ // A programmer can use the data they provided to SkMipMap::Build as level 0.
+ // So the SkMipMap provides levels 1-x but it stores them in its own
+ // range 0-(x-1).
+ for (int generatedMipLevelIndex = 0; generatedMipLevelIndex < mipMapLevelCount - 1;
+ generatedMipLevelIndex++) {
+ SkISize mipSize = SkMipMap::ComputeLevelSize(scaledSize.width(), scaledSize.height(),
+ generatedMipLevelIndex);
+
+ SkImageInfo mipInfo = SkImageInfo::MakeN32(mipSize.fWidth, mipSize.fHeight, at);
+ SkMipMap::Level mipLevel;
+ mipmaps->getLevel(generatedMipLevelIndex, &mipLevel);
+
+ // Make sure the mipmap data is after the start of the buffer
+ SkASSERT(mipLevelPtr > bufferAsCharPtr);
+ // Make sure the mipmap data starts before the end of the buffer
+ SkASSERT(mipLevelPtr < bufferAsCharPtr + pixelOffset + pixelSize);
+ // Make sure the mipmap data ends before the end of the buffer
+ SkASSERT(mipLevelPtr + mipLevel.fPixmap.getSafeSize() <=
+ bufferAsCharPtr + pixelOffset + pixelSize);
+
+ // getSafeSize includes rowbyte padding except for the last row,
+ // right?
+
+ memcpy(mipLevelPtr, mipLevel.fPixmap.addr(), mipLevel.fPixmap.getSafeSize());
+
+ memcpy(bufferAsCharPtr + offsetof(DeferredTextureImage, fMipMapLevelData) +
+ sizeof(MipMapLevelData) * (generatedMipLevelIndex + 1) +
+ offsetof(MipMapLevelData, fPixelData), &mipLevelPtr, sizeof(void*));
+ size_t rowBytes = mipLevel.fPixmap.rowBytes();
+ memcpy(bufferAsCharPtr + offsetof(DeferredTextureImage, fMipMapLevelData) +
+ sizeof(MipMapLevelData) * (generatedMipLevelIndex + 1) +
+ offsetof(MipMapLevelData, fRowBytes), &rowBytes, sizeof(rowBytes));
+
+ mipLevelPtr += SkAlign8(mipLevel.fPixmap.getSafeSize());
+ }
+ }
+ return size;
+}
+
+sk_sp<SkImage> SkImage::MakeFromDeferredTextureImageData(GrContext* context, const void* data,
+ SkBudgeted budgeted) {
+ if (!data) {
+ return nullptr;
+ }
+ const DeferredTextureImage* dti = reinterpret_cast<const DeferredTextureImage*>(data);
+
+ if (!context || context->uniqueID() != dti->fContextUniqueID) {
+ return nullptr;
+ }
+ SkAutoTUnref<SkColorTable> colorTable;
+ if (dti->fColorTableCnt) {
+ SkASSERT(dti->fColorTableData);
+ colorTable.reset(new SkColorTable(dti->fColorTableData, dti->fColorTableCnt));
+ }
+ int mipLevelCount = dti->fMipMapLevelCount;
+ SkASSERT(mipLevelCount >= 1);
+ sk_sp<SkColorSpace> colorSpace;
+ if (dti->fColorSpaceSize) {
+ colorSpace = SkColorSpace::Deserialize(dti->fColorSpace, dti->fColorSpaceSize);
+ }
+ SkImageInfo info = SkImageInfo::Make(dti->fWidth, dti->fHeight,
+ dti->fColorType, dti->fAlphaType, colorSpace);
+ if (mipLevelCount == 1) {
+ SkPixmap pixmap;
+ pixmap.reset(info, dti->fMipMapLevelData[0].fPixelData,
+ dti->fMipMapLevelData[0].fRowBytes, colorTable.get());
+ return SkImage::MakeTextureFromPixmap(context, pixmap, budgeted);
+ } else {
+ SkAutoTDeleteArray<GrMipLevel> texels(new GrMipLevel[mipLevelCount]);
+ for (int i = 0; i < mipLevelCount; i++) {
+ texels[i].fPixels = dti->fMipMapLevelData[i].fPixelData;
+ texels[i].fRowBytes = dti->fMipMapLevelData[i].fRowBytes;
+ }
+
+ return SkImage::MakeTextureFromMipMap(context, info, texels.get(),
+ mipLevelCount, SkBudgeted::kYes,
+ dti->fGammaTreatment);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImage> SkImage::MakeTextureFromMipMap(GrContext* ctx, const SkImageInfo& info,
+ const GrMipLevel* texels, int mipLevelCount,
+ SkBudgeted budgeted,
+ SkSourceGammaTreatment gammaTreatment) {
+ if (!ctx) {
+ return nullptr;
+ }
+ SkAutoTUnref<GrTexture> texture(GrUploadMipMapToTexture(ctx, info, texels, mipLevelCount));
+ if (!texture) {
+ return nullptr;
+ }
+ texture->texturePriv().setGammaTreatment(gammaTreatment);
+ return sk_make_sp<SkImage_Gpu>(texture->width(), texture->height(), kNeedNewImageUniqueID,
+ info.alphaType(), texture, sk_ref_sp(info.colorSpace()),
+ budgeted);
+}
diff --git a/gfx/skia/skia/src/image/SkImage_Gpu.h b/gfx/skia/skia/src/image/SkImage_Gpu.h
new file mode 100644
index 000000000..02260bcca
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_Gpu.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImage_Gpu_DEFINED
+#define SkImage_Gpu_DEFINED
+
+#include "SkAtomics.h"
+#include "GrTexture.h"
+#include "GrGpuResourcePriv.h"
+#include "SkBitmap.h"
+#include "SkGr.h"
+#include "SkImage_Base.h"
+#include "SkImagePriv.h"
+#include "SkSurface.h"
+
+class SkImage_Gpu : public SkImage_Base {
+public:
+ /**
+ * An "image" can be a subset/window into a larger texture, so we explicit take the
+ * width and height.
+ */
+ SkImage_Gpu(int w, int h, uint32_t uniqueID, SkAlphaType, GrTexture*, sk_sp<SkColorSpace>,
+ SkBudgeted);
+ ~SkImage_Gpu() override;
+
+ SkImageInfo onImageInfo() const override;
+ SkAlphaType onAlphaType() const override { return fAlphaType; }
+
+ void applyBudgetDecision() const {
+ if (SkBudgeted::kYes == fBudgeted) {
+ fTexture->resourcePriv().makeBudgeted();
+ } else {
+ fTexture->resourcePriv().makeUnbudgeted();
+ }
+ }
+
+ bool getROPixels(SkBitmap*, CachingHint) const override;
+ GrTexture* asTextureRef(GrContext* ctx, const GrTextureParams& params,
+ SkSourceGammaTreatment) const override;
+ sk_sp<SkImage> onMakeSubset(const SkIRect&) const override;
+
+ GrTexture* peekTexture() const override { return fTexture; }
+ sk_sp<GrTexture> refPinnedTexture(uint32_t* uniqueID) const override {
+ *uniqueID = this->uniqueID();
+ return sk_ref_sp(fTexture.get());
+ }
+ bool onReadPixels(const SkImageInfo&, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY, CachingHint) const override;
+
+private:
+ SkAutoTUnref<GrTexture> fTexture;
+ const SkAlphaType fAlphaType;
+ const SkBudgeted fBudgeted;
+ sk_sp<SkColorSpace> fColorSpace;
+ mutable SkAtomic<bool> fAddedRasterVersionToCache;
+
+
+ typedef SkImage_Base INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkImage_Raster.cpp b/gfx/skia/skia/src/image/SkImage_Raster.cpp
new file mode 100644
index 000000000..5799f0b3e
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_Raster.cpp
@@ -0,0 +1,377 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkImage_Base.h"
+#include "SkBitmap.h"
+#include "SkBitmapProcShader.h"
+#include "SkCanvas.h"
+#include "SkColorTable.h"
+#include "SkData.h"
+#include "SkImagePriv.h"
+#include "SkPixelRef.h"
+#include "SkSurface.h"
+
+#if SK_SUPPORT_GPU
+#include "GrContext.h"
+#include "GrImageIDTextureAdjuster.h"
+#include "SkGr.h"
+#include "SkGrPriv.h"
+#endif
+
+// fixes https://bug.skia.org/5096
+static bool is_not_subset(const SkBitmap& bm) {
+ SkASSERT(bm.pixelRef());
+ SkISize dim = bm.pixelRef()->info().dimensions();
+ SkASSERT(dim != bm.dimensions() || bm.pixelRefOrigin().isZero());
+ return dim == bm.dimensions();
+}
+
+class SkImage_Raster : public SkImage_Base {
+public:
+ static bool ValidArgs(const Info& info, size_t rowBytes, bool hasColorTable,
+ size_t* minSize) {
+ const int maxDimension = SK_MaxS32 >> 2;
+
+ if (info.width() <= 0 || info.height() <= 0) {
+ return false;
+ }
+ if (info.width() > maxDimension || info.height() > maxDimension) {
+ return false;
+ }
+ if ((unsigned)info.colorType() > (unsigned)kLastEnum_SkColorType) {
+ return false;
+ }
+ if ((unsigned)info.alphaType() > (unsigned)kLastEnum_SkAlphaType) {
+ return false;
+ }
+
+ if (kUnknown_SkColorType == info.colorType()) {
+ return false;
+ }
+
+ const bool needsCT = kIndex_8_SkColorType == info.colorType();
+ if (needsCT != hasColorTable) {
+ return false;
+ }
+
+ if (rowBytes < info.minRowBytes()) {
+ return false;
+ }
+
+ size_t size = info.getSafeSize(rowBytes);
+ if (0 == size) {
+ return false;
+ }
+
+ if (minSize) {
+ *minSize = size;
+ }
+ return true;
+ }
+
+ SkImage_Raster(const SkImageInfo&, sk_sp<SkData>, size_t rb, SkColorTable*);
+ virtual ~SkImage_Raster();
+
+ SkImageInfo onImageInfo() const override {
+ return fBitmap.info();
+ }
+ SkAlphaType onAlphaType() const override {
+ return fBitmap.alphaType();
+ }
+
+ bool onReadPixels(const SkImageInfo&, void*, size_t, int srcX, int srcY, CachingHint) const override;
+ bool onPeekPixels(SkPixmap*) const override;
+ const SkBitmap* onPeekBitmap() const override { return &fBitmap; }
+
+ SkData* onRefEncoded(GrContext*) const override;
+ bool getROPixels(SkBitmap*, CachingHint) const override;
+ GrTexture* asTextureRef(GrContext*, const GrTextureParams&,
+ SkSourceGammaTreatment) const override;
+ sk_sp<SkImage> onMakeSubset(const SkIRect&) const override;
+
+ // exposed for SkSurface_Raster via SkNewImageFromPixelRef
+ SkImage_Raster(const SkImageInfo&, SkPixelRef*, const SkIPoint& origin, size_t rowBytes);
+
+ SkPixelRef* getPixelRef() const { return fBitmap.pixelRef(); }
+
+ bool onAsLegacyBitmap(SkBitmap*, LegacyBitmapMode) const override;
+
+ SkImage_Raster(const SkBitmap& bm, bool bitmapMayBeMutable = false)
+ : INHERITED(bm.width(), bm.height(),
+ is_not_subset(bm) ? bm.getGenerationID()
+ : (uint32_t)kNeedNewImageUniqueID)
+ , fBitmap(bm)
+ {
+ if (bm.pixelRef()->isPreLocked()) {
+ // we only preemptively lock if there is no chance of triggering something expensive
+ // like a lazy decode or imagegenerator. PreLocked means it is flat pixels already.
+ fBitmap.lockPixels();
+ }
+ SkASSERT(bitmapMayBeMutable || fBitmap.isImmutable());
+ }
+
+ bool onIsLazyGenerated() const override {
+ return fBitmap.pixelRef() && fBitmap.pixelRef()->isLazyGenerated();
+ }
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrTexture> refPinnedTexture(uint32_t* uniqueID) const override;
+ void onPinAsTexture(GrContext*) const override;
+ void onUnpinAsTexture(GrContext*) const override;
+#endif
+
+private:
+ SkBitmap fBitmap;
+
+#if SK_SUPPORT_GPU
+ mutable sk_sp<GrTexture> fPinnedTexture;
+ mutable int32_t fPinnedCount = 0;
+ mutable uint32_t fPinnedUniqueID = 0;
+#endif
+
+ typedef SkImage_Base INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void release_data(void* addr, void* context) {
+ SkData* data = static_cast<SkData*>(context);
+ data->unref();
+}
+
+SkImage_Raster::SkImage_Raster(const Info& info, sk_sp<SkData> data, size_t rowBytes,
+ SkColorTable* ctable)
+ : INHERITED(info.width(), info.height(), kNeedNewImageUniqueID)
+{
+ void* addr = const_cast<void*>(data->data());
+
+ fBitmap.installPixels(info, addr, rowBytes, ctable, release_data, data.release());
+ fBitmap.setImmutable();
+ fBitmap.lockPixels();
+}
+
+SkImage_Raster::SkImage_Raster(const Info& info, SkPixelRef* pr, const SkIPoint& pixelRefOrigin,
+ size_t rowBytes)
+ : INHERITED(info.width(), info.height(), pr->getGenerationID())
+{
+ fBitmap.setInfo(info, rowBytes);
+ fBitmap.setPixelRef(pr, pixelRefOrigin);
+ fBitmap.lockPixels();
+ SkASSERT(fBitmap.isImmutable());
+}
+
+SkImage_Raster::~SkImage_Raster() {
+#if SK_SUPPORT_GPU
+ SkASSERT(nullptr == fPinnedTexture.get()); // want the caller to have manually unpinned
+#endif
+}
+
+bool SkImage_Raster::onReadPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY, CachingHint) const {
+ SkBitmap shallowCopy(fBitmap);
+ return shallowCopy.readPixels(dstInfo, dstPixels, dstRowBytes, srcX, srcY);
+}
+
+bool SkImage_Raster::onPeekPixels(SkPixmap* pm) const {
+ return fBitmap.peekPixels(pm);
+}
+
+SkData* SkImage_Raster::onRefEncoded(GrContext*) const {
+ SkPixelRef* pr = fBitmap.pixelRef();
+ const SkImageInfo prInfo = pr->info();
+ const SkImageInfo bmInfo = fBitmap.info();
+
+ // we only try if we (the image) cover the entire area of the pixelRef
+ if (prInfo.width() == bmInfo.width() && prInfo.height() == bmInfo.height()) {
+ return pr->refEncodedData();
+ }
+ return nullptr;
+}
+
+bool SkImage_Raster::getROPixels(SkBitmap* dst, CachingHint) const {
+ *dst = fBitmap;
+ return true;
+}
+
+GrTexture* SkImage_Raster::asTextureRef(GrContext* ctx, const GrTextureParams& params,
+ SkSourceGammaTreatment gammaTreatment) const {
+#if SK_SUPPORT_GPU
+ if (!ctx) {
+ return nullptr;
+ }
+
+ uint32_t uniqueID;
+ sk_sp<GrTexture> tex = this->refPinnedTexture(&uniqueID);
+ if (tex) {
+ GrTextureAdjuster adjuster(fPinnedTexture.get(), fBitmap.alphaType(), fBitmap.bounds(),
+ fPinnedUniqueID, fBitmap.colorSpace());
+ return adjuster.refTextureSafeForParams(params, gammaTreatment, nullptr);
+ }
+
+ return GrRefCachedBitmapTexture(ctx, fBitmap, params, gammaTreatment);
+#endif
+
+ return nullptr;
+}
+
+#if SK_SUPPORT_GPU
+
+sk_sp<GrTexture> SkImage_Raster::refPinnedTexture(uint32_t* uniqueID) const {
+ if (fPinnedTexture) {
+ SkASSERT(fPinnedCount > 0);
+ SkASSERT(fPinnedUniqueID != 0);
+ *uniqueID = fPinnedUniqueID;
+ return fPinnedTexture;
+ }
+ return nullptr;
+}
+
+void SkImage_Raster::onPinAsTexture(GrContext* ctx) const {
+ if (fPinnedTexture) {
+ SkASSERT(fPinnedCount > 0);
+ SkASSERT(fPinnedUniqueID != 0);
+ SkASSERT(fPinnedTexture->getContext() == ctx);
+ } else {
+ SkASSERT(fPinnedCount == 0);
+ SkASSERT(fPinnedUniqueID == 0);
+ fPinnedTexture.reset(GrRefCachedBitmapTexture(ctx, fBitmap,
+ GrTextureParams::ClampNoFilter(),
+ SkSourceGammaTreatment::kRespect));
+ fPinnedUniqueID = fBitmap.getGenerationID();
+ }
+ // Note: we always increment, even if we failed to create the pinned texture
+ ++fPinnedCount;
+}
+
+void SkImage_Raster::onUnpinAsTexture(GrContext* ctx) const {
+ // Note: we always decrement, even if fPinnedTexture is null
+ SkASSERT(fPinnedCount > 0);
+ SkASSERT(fPinnedUniqueID != 0);
+ if (fPinnedTexture) {
+ SkASSERT(fPinnedTexture->getContext() == ctx);
+ }
+
+ if (0 == --fPinnedCount) {
+ fPinnedTexture.reset(nullptr);
+ fPinnedUniqueID = 0;
+ }
+}
+#endif
+
+sk_sp<SkImage> SkImage_Raster::onMakeSubset(const SkIRect& subset) const {
+ // TODO : could consider heurist of sharing pixels, if subset is pretty close to complete
+
+ SkImageInfo info = SkImageInfo::MakeN32(subset.width(), subset.height(), fBitmap.alphaType());
+ auto surface(SkSurface::MakeRaster(info));
+ if (!surface) {
+ return nullptr;
+ }
+ surface->getCanvas()->clear(0);
+ surface->getCanvas()->drawImage(this, SkIntToScalar(-subset.x()), SkIntToScalar(-subset.y()),
+ nullptr);
+ return surface->makeImageSnapshot();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImage> SkImage::MakeRasterCopy(const SkPixmap& pmap) {
+ size_t size;
+ if (!SkImage_Raster::ValidArgs(pmap.info(), pmap.rowBytes(),
+ pmap.ctable() != nullptr, &size) || !pmap.addr()) {
+ return nullptr;
+ }
+
+ // Here we actually make a copy of the caller's pixel data
+ sk_sp<SkData> data(SkData::MakeWithCopy(pmap.addr(), size));
+ return sk_make_sp<SkImage_Raster>(pmap.info(), std::move(data), pmap.rowBytes(), pmap.ctable());
+}
+
+
+sk_sp<SkImage> SkImage::MakeRasterData(const SkImageInfo& info, sk_sp<SkData> data,
+ size_t rowBytes) {
+ size_t size;
+ if (!SkImage_Raster::ValidArgs(info, rowBytes, false, &size) || !data) {
+ return nullptr;
+ }
+
+ // did they give us enough data?
+ if (data->size() < size) {
+ return nullptr;
+ }
+
+ SkColorTable* ctable = nullptr;
+ return sk_make_sp<SkImage_Raster>(info, std::move(data), rowBytes, ctable);
+}
+
+sk_sp<SkImage> SkImage::MakeFromRaster(const SkPixmap& pmap, RasterReleaseProc proc,
+ ReleaseContext ctx) {
+ size_t size;
+ if (!SkImage_Raster::ValidArgs(pmap.info(), pmap.rowBytes(), false, &size) || !pmap.addr()) {
+ return nullptr;
+ }
+
+ sk_sp<SkData> data(SkData::MakeWithProc(pmap.addr(), size, proc, ctx));
+ return sk_make_sp<SkImage_Raster>(pmap.info(), std::move(data), pmap.rowBytes(), pmap.ctable());
+}
+
+sk_sp<SkImage> SkMakeImageFromPixelRef(const SkImageInfo& info, SkPixelRef* pr,
+ const SkIPoint& pixelRefOrigin, size_t rowBytes) {
+ if (!SkImage_Raster::ValidArgs(info, rowBytes, false, nullptr)) {
+ return nullptr;
+ }
+ return sk_make_sp<SkImage_Raster>(info, pr, pixelRefOrigin, rowBytes);
+}
+
+sk_sp<SkImage> SkMakeImageFromRasterBitmap(const SkBitmap& bm, SkCopyPixelsMode cpm,
+ SkTBlitterAllocator* allocator) {
+ bool hasColorTable = false;
+ if (kIndex_8_SkColorType == bm.colorType()) {
+ SkAutoLockPixels autoLockPixels(bm);
+ hasColorTable = bm.getColorTable() != nullptr;
+ }
+
+ if (!SkImage_Raster::ValidArgs(bm.info(), bm.rowBytes(), hasColorTable, nullptr)) {
+ return nullptr;
+ }
+
+ sk_sp<SkImage> image;
+ if (kAlways_SkCopyPixelsMode == cpm || (!bm.isImmutable() && kNever_SkCopyPixelsMode != cpm)) {
+ SkBitmap tmp(bm);
+ tmp.lockPixels();
+ SkPixmap pmap;
+ if (tmp.getPixels() && tmp.peekPixels(&pmap)) {
+ image = SkImage::MakeRasterCopy(pmap);
+ }
+ } else {
+ if (allocator) {
+ image.reset(allocator->createT<SkImage_Raster>(bm, kNever_SkCopyPixelsMode == cpm));
+ image.get()->ref(); // account for the allocator being an owner
+ } else {
+ image = sk_make_sp<SkImage_Raster>(bm, kNever_SkCopyPixelsMode == cpm);
+ }
+ }
+ return image;
+}
+
+const SkPixelRef* SkBitmapImageGetPixelRef(const SkImage* image) {
+ return ((const SkImage_Raster*)image)->getPixelRef();
+}
+
+bool SkImage_Raster::onAsLegacyBitmap(SkBitmap* bitmap, LegacyBitmapMode mode) const {
+ if (kRO_LegacyBitmapMode == mode) {
+ // When we're a snapshot from a surface, our bitmap may not be marked immutable
+ // even though logically always we are, but in that case we can't physically share our
+ // pixelref since the caller might call setImmutable() themselves
+ // (thus changing our state).
+ if (fBitmap.isImmutable()) {
+ bitmap->setInfo(fBitmap.info(), fBitmap.rowBytes());
+ bitmap->setPixelRef(fBitmap.pixelRef(), fBitmap.pixelRefOrigin());
+ return true;
+ }
+ }
+ return this->INHERITED::onAsLegacyBitmap(bitmap, mode);
+}
diff --git a/gfx/skia/skia/src/image/SkReadPixelsRec.h b/gfx/skia/skia/src/image/SkReadPixelsRec.h
new file mode 100644
index 000000000..ed93b74b0
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkReadPixelsRec.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkReadPixelsRec_DEFINED
+#define SkReadPixelsRec_DEFINED
+
+#include "SkImageInfo.h"
+
+/**
+ * Helper class to package and trim the parameters passed to readPixels()
+ */
+struct SkReadPixelsRec {
+ SkReadPixelsRec(const SkImageInfo& info, void* pixels, size_t rowBytes, int x, int y)
+ : fPixels(pixels)
+ , fRowBytes(rowBytes)
+ , fInfo(info)
+ , fX(x)
+ , fY(y)
+ {}
+
+ void* fPixels;
+ size_t fRowBytes;
+ SkImageInfo fInfo;
+ int fX;
+ int fY;
+
+ /*
+ * On true, may have modified its fields (except fRowBytes) to make it a legal subset
+ * of the specified src width/height.
+ *
+ * On false, leaves self unchanged, but indicates that it does not overlap src, or
+ * is not valid (e.g. bad fInfo) for readPixels().
+ */
+ bool trim(int srcWidth, int srcHeight);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkSurface.cpp b/gfx/skia/skia/src/image/SkSurface.cpp
new file mode 100644
index 000000000..a0b9059e7
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkSurface.cpp
@@ -0,0 +1,248 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAtomics.h"
+#include "SkSurface_Base.h"
+#include "SkImagePriv.h"
+#include "SkCanvas.h"
+
+#include "SkFontLCDConfig.h"
+static SkPixelGeometry compute_default_geometry() {
+ SkFontLCDConfig::LCDOrder order = SkFontLCDConfig::GetSubpixelOrder();
+ if (SkFontLCDConfig::kNONE_LCDOrder == order) {
+ return kUnknown_SkPixelGeometry;
+ } else {
+ // Bit0 is RGB(0), BGR(1)
+ // Bit1 is H(0), V(1)
+ const SkPixelGeometry gGeo[] = {
+ kRGB_H_SkPixelGeometry,
+ kBGR_H_SkPixelGeometry,
+ kRGB_V_SkPixelGeometry,
+ kBGR_V_SkPixelGeometry,
+ };
+ int index = 0;
+ if (SkFontLCDConfig::kBGR_LCDOrder == order) {
+ index |= 1;
+ }
+ if (SkFontLCDConfig::kVertical_LCDOrientation == SkFontLCDConfig::GetSubpixelOrientation()){
+ index |= 2;
+ }
+ return gGeo[index];
+ }
+}
+
+SkSurfaceProps::SkSurfaceProps() : fFlags(0), fPixelGeometry(kUnknown_SkPixelGeometry) {}
+
+SkSurfaceProps::SkSurfaceProps(InitType) : fFlags(0), fPixelGeometry(compute_default_geometry()) {}
+
+SkSurfaceProps::SkSurfaceProps(uint32_t flags, InitType)
+ : fFlags(flags)
+ , fPixelGeometry(compute_default_geometry())
+{}
+
+SkSurfaceProps::SkSurfaceProps(uint32_t flags, SkPixelGeometry pg)
+ : fFlags(flags), fPixelGeometry(pg)
+{}
+
+SkSurfaceProps::SkSurfaceProps(const SkSurfaceProps& other)
+ : fFlags(other.fFlags)
+ , fPixelGeometry(other.fPixelGeometry)
+{}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkSurface_Base::SkSurface_Base(int width, int height, const SkSurfaceProps* props)
+ : INHERITED(width, height, props)
+{
+ fCachedCanvas = nullptr;
+ fCachedImage = nullptr;
+}
+
+SkSurface_Base::SkSurface_Base(const SkImageInfo& info, const SkSurfaceProps* props)
+ : INHERITED(info, props)
+{
+ fCachedCanvas = nullptr;
+ fCachedImage = nullptr;
+}
+
+SkSurface_Base::~SkSurface_Base() {
+ // in case the canvas outsurvives us, we null the callback
+ if (fCachedCanvas) {
+ fCachedCanvas->setSurfaceBase(nullptr);
+ }
+
+ SkSafeUnref(fCachedImage);
+ SkSafeUnref(fCachedCanvas);
+}
+
+void SkSurface_Base::onDraw(SkCanvas* canvas, SkScalar x, SkScalar y, const SkPaint* paint) {
+ auto image = this->makeImageSnapshot(SkBudgeted::kYes);
+ if (image) {
+ canvas->drawImage(image, x, y, paint);
+ }
+}
+
+bool SkSurface_Base::outstandingImageSnapshot() const {
+ return fCachedImage && !fCachedImage->unique();
+}
+
+void SkSurface_Base::aboutToDraw(ContentChangeMode mode) {
+ this->dirtyGenerationID();
+
+ SkASSERT(!fCachedCanvas || fCachedCanvas->getSurfaceBase() == this);
+
+ if (fCachedImage) {
+ // the surface may need to fork its backend, if its sharing it with
+ // the cached image. Note: we only call if there is an outstanding owner
+ // on the image (besides us).
+ bool unique = fCachedImage->unique();
+ if (!unique) {
+ this->onCopyOnWrite(mode);
+ }
+
+ // regardless of copy-on-write, we must drop our cached image now, so
+ // that the next request will get our new contents.
+ fCachedImage->unref();
+ fCachedImage = nullptr;
+
+ if (unique) {
+ // Our content isn't held by any image now, so we can consider that content mutable.
+ // Raster surfaces need to be told it's safe to consider its pixels mutable again.
+ // We make this call after the ->unref() so the subclass can assert there are no images.
+ this->onRestoreBackingMutability();
+ }
+ } else if (kDiscard_ContentChangeMode == mode) {
+ this->onDiscard();
+ }
+}
+
+uint32_t SkSurface_Base::newGenerationID() {
+ SkASSERT(!fCachedCanvas || fCachedCanvas->getSurfaceBase() == this);
+ static int32_t gID;
+ return sk_atomic_inc(&gID) + 1;
+}
+
+static SkSurface_Base* asSB(SkSurface* surface) {
+ return static_cast<SkSurface_Base*>(surface);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkSurface::SkSurface(int width, int height, const SkSurfaceProps* props)
+ : fProps(SkSurfacePropsCopyOrDefault(props)), fWidth(width), fHeight(height)
+{
+ SkASSERT(fWidth > 0);
+ SkASSERT(fHeight > 0);
+ fGenerationID = 0;
+}
+
+SkSurface::SkSurface(const SkImageInfo& info, const SkSurfaceProps* props)
+ : fProps(SkSurfacePropsCopyOrDefault(props)), fWidth(info.width()), fHeight(info.height())
+{
+ SkASSERT(fWidth > 0);
+ SkASSERT(fHeight > 0);
+ fGenerationID = 0;
+}
+
+uint32_t SkSurface::generationID() {
+ if (0 == fGenerationID) {
+ fGenerationID = asSB(this)->newGenerationID();
+ }
+ return fGenerationID;
+}
+
+void SkSurface::notifyContentWillChange(ContentChangeMode mode) {
+ asSB(this)->aboutToDraw(mode);
+}
+
+SkCanvas* SkSurface::getCanvas() {
+ return asSB(this)->getCachedCanvas();
+}
+
+sk_sp<SkImage> SkSurface::makeImageSnapshot(SkBudgeted budgeted) {
+ // the caller will call unref() to balance this
+ return asSB(this)->refCachedImage(budgeted, kNo_ForceUnique);
+}
+
+sk_sp<SkImage> SkSurface::makeImageSnapshot(SkBudgeted budgeted, ForceUnique unique) {
+ // the caller will call unref() to balance this
+ return asSB(this)->refCachedImage(budgeted, unique);
+}
+
+sk_sp<SkSurface> SkSurface::makeSurface(const SkImageInfo& info) {
+ return asSB(this)->onNewSurface(info);
+}
+
+void SkSurface::draw(SkCanvas* canvas, SkScalar x, SkScalar y,
+ const SkPaint* paint) {
+ return asSB(this)->onDraw(canvas, x, y, paint);
+}
+
+bool SkSurface::peekPixels(SkPixmap* pmap) {
+ return this->getCanvas()->peekPixels(pmap);
+}
+
+#ifdef SK_SUPPORT_LEGACY_PEEKPIXELS_PARMS
+const void* SkSurface::peekPixels(SkImageInfo* info, size_t* rowBytes) {
+ SkPixmap pm;
+ if (this->peekPixels(&pm)) {
+ if (info) {
+ *info = pm.info();
+ }
+ if (rowBytes) {
+ *rowBytes = pm.rowBytes();
+ }
+ return pm.addr();
+ }
+ return nullptr;
+}
+#endif
+
+bool SkSurface::readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY) {
+ return this->getCanvas()->readPixels(dstInfo, dstPixels, dstRowBytes, srcX, srcY);
+}
+
+GrBackendObject SkSurface::getTextureHandle(BackendHandleAccess access) {
+ return asSB(this)->onGetTextureHandle(access);
+}
+
+bool SkSurface::getRenderTargetHandle(GrBackendObject* obj, BackendHandleAccess access) {
+ return asSB(this)->onGetRenderTargetHandle(obj, access);
+}
+
+void SkSurface::prepareForExternalIO() {
+ asSB(this)->onPrepareForExternalIO();
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+
+#if !SK_SUPPORT_GPU
+
+sk_sp<SkSurface> SkSurface::MakeRenderTarget(GrContext*, SkBudgeted, const SkImageInfo&,
+ int, GrSurfaceOrigin, const SkSurfaceProps*) {
+ return nullptr;
+}
+
+sk_sp<SkSurface> SkSurface::MakeFromBackendTexture(GrContext*, const GrBackendTextureDesc&,
+ sk_sp<SkColorSpace>, const SkSurfaceProps*) {
+ return nullptr;
+}
+
+sk_sp<SkSurface> SkSurface::MakeFromBackendRenderTarget(GrContext*,
+ const GrBackendRenderTargetDesc&,
+ sk_sp<SkColorSpace>,
+ const SkSurfaceProps*) {
+ return nullptr;
+}
+
+sk_sp<SkSurface> MakeFromBackendTextureAsRenderTarget(GrContext*, const GrBackendTextureDesc&,
+ sk_sp<SkColorSpace>, const SkSurfaceProps*) {
+ return nullptr;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkSurface_Base.h b/gfx/skia/skia/src/image/SkSurface_Base.h
new file mode 100644
index 000000000..8351bb87e
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkSurface_Base.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSurface_Base_DEFINED
+#define SkSurface_Base_DEFINED
+
+#include "SkCanvas.h"
+#include "SkImagePriv.h"
+#include "SkSurface.h"
+#include "SkSurfacePriv.h"
+
+class SkSurface_Base : public SkSurface {
+public:
+ SkSurface_Base(int width, int height, const SkSurfaceProps*);
+ SkSurface_Base(const SkImageInfo&, const SkSurfaceProps*);
+ virtual ~SkSurface_Base();
+
+ virtual GrBackendObject onGetTextureHandle(BackendHandleAccess) {
+ return 0;
+ }
+
+ virtual bool onGetRenderTargetHandle(GrBackendObject*, BackendHandleAccess) {
+ return false;
+ }
+
+ /**
+ * Allocate a canvas that will draw into this surface. We will cache this
+ * canvas, to return the same object to the caller multiple times. We
+ * take ownership, and will call unref() on the canvas when we go out of
+ * scope.
+ */
+ virtual SkCanvas* onNewCanvas() = 0;
+
+ virtual sk_sp<SkSurface> onNewSurface(const SkImageInfo&) = 0;
+
+ /**
+ * Allocate an SkImage that represents the current contents of the surface.
+ * This needs to be able to outlive the surface itself (if need be), and
+ * must faithfully represent the current contents, even if the surface
+ * is changed after this called (e.g. it is drawn to via its canvas).
+ */
+ virtual sk_sp<SkImage> onNewImageSnapshot(SkBudgeted, SkCopyPixelsMode) = 0;
+
+ /**
+ * Default implementation:
+ *
+ * image = this->newImageSnapshot();
+ * if (image) {
+ * image->draw(canvas, ...);
+ * image->unref();
+ * }
+ */
+ virtual void onDraw(SkCanvas*, SkScalar x, SkScalar y, const SkPaint*);
+
+ /**
+ * Called as a performance hint when the Surface is allowed to make it's contents
+ * undefined.
+ */
+ virtual void onDiscard() {}
+
+ /**
+ * If the surface is about to change, we call this so that our subclass
+ * can optionally fork their backend (copy-on-write) in case it was
+ * being shared with the cachedImage.
+ */
+ virtual void onCopyOnWrite(ContentChangeMode) = 0;
+
+ /**
+ * Signal the surface to remind its backing store that it's mutable again.
+ * Called only when we _didn't_ copy-on-write; we assume the copies start mutable.
+ */
+ virtual void onRestoreBackingMutability() {}
+
+ /**
+ * Issue any pending surface IO to the current backend 3D API and resolve any surface MSAA.
+ */
+ virtual void onPrepareForExternalIO() {}
+
+ inline SkCanvas* getCachedCanvas();
+ inline sk_sp<SkImage> refCachedImage(SkBudgeted, ForceUnique);
+
+ bool hasCachedImage() const { return fCachedImage != nullptr; }
+
+ // called by SkSurface to compute a new genID
+ uint32_t newGenerationID();
+
+private:
+ SkCanvas* fCachedCanvas;
+ SkImage* fCachedImage;
+
+ void aboutToDraw(ContentChangeMode mode);
+
+ // Returns true if there is an outstanding image-snapshot, indicating that a call to aboutToDraw
+ // would trigger a copy-on-write.
+ bool outstandingImageSnapshot() const;
+
+ friend class SkCanvas;
+ friend class SkSurface;
+
+ typedef SkSurface INHERITED;
+};
+
+SkCanvas* SkSurface_Base::getCachedCanvas() {
+ if (nullptr == fCachedCanvas) {
+ fCachedCanvas = this->onNewCanvas();
+ if (fCachedCanvas) {
+ fCachedCanvas->setSurfaceBase(this);
+ }
+ }
+ return fCachedCanvas;
+}
+
+sk_sp<SkImage> SkSurface_Base::refCachedImage(SkBudgeted budgeted, ForceUnique unique) {
+ SkImage* snap = fCachedImage;
+ if (kYes_ForceUnique == unique && snap && !snap->unique()) {
+ snap = nullptr;
+ }
+ if (snap) {
+ return sk_ref_sp(snap);
+ }
+ SkCopyPixelsMode cpm = (kYes_ForceUnique == unique) ? kAlways_SkCopyPixelsMode :
+ kIfMutable_SkCopyPixelsMode;
+ snap = this->onNewImageSnapshot(budgeted, cpm).release();
+ if (kNo_ForceUnique == unique) {
+ SkASSERT(!fCachedImage);
+ fCachedImage = SkSafeRef(snap);
+ }
+ SkASSERT(!fCachedCanvas || fCachedCanvas->getSurfaceBase() == this);
+ return sk_sp<SkImage>(snap);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkSurface_Gpu.cpp b/gfx/skia/skia/src/image/SkSurface_Gpu.cpp
new file mode 100644
index 000000000..300731311
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkSurface_Gpu.cpp
@@ -0,0 +1,267 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkSurface_Gpu.h"
+
+#include "GrContextPriv.h"
+#include "GrResourceProvider.h"
+#include "SkCanvas.h"
+#include "SkGpuDevice.h"
+#include "SkImage_Base.h"
+#include "SkImage_Gpu.h"
+#include "SkImagePriv.h"
+#include "SkSurface_Base.h"
+
+#if SK_SUPPORT_GPU
+
+SkSurface_Gpu::SkSurface_Gpu(sk_sp<SkGpuDevice> device)
+ : INHERITED(device->width(), device->height(), &device->surfaceProps())
+ , fDevice(std::move(device)) {
+}
+
+SkSurface_Gpu::~SkSurface_Gpu() {
+}
+
+static GrRenderTarget* prepare_rt_for_external_access(SkSurface_Gpu* surface,
+ SkSurface::BackendHandleAccess access) {
+ switch (access) {
+ case SkSurface::kFlushRead_BackendHandleAccess:
+ break;
+ case SkSurface::kFlushWrite_BackendHandleAccess:
+ case SkSurface::kDiscardWrite_BackendHandleAccess:
+ // for now we don't special-case on Discard, but we may in the future.
+ surface->notifyContentWillChange(SkSurface::kRetain_ContentChangeMode);
+ break;
+ }
+
+ // Grab the render target *after* firing notifications, as it may get switched if CoW kicks in.
+ surface->getDevice()->flush();
+ GrDrawContext* dc = surface->getDevice()->accessDrawContext();
+ return dc->accessRenderTarget();
+}
+
+GrBackendObject SkSurface_Gpu::onGetTextureHandle(BackendHandleAccess access) {
+ GrRenderTarget* rt = prepare_rt_for_external_access(this, access);
+ GrTexture* texture = rt->asTexture();
+ if (texture) {
+ return texture->getTextureHandle();
+ }
+ return 0;
+}
+
+bool SkSurface_Gpu::onGetRenderTargetHandle(GrBackendObject* obj, BackendHandleAccess access) {
+ GrRenderTarget* rt = prepare_rt_for_external_access(this, access);
+ *obj = rt->getRenderTargetHandle();
+ return true;
+}
+
+SkCanvas* SkSurface_Gpu::onNewCanvas() {
+ SkCanvas::InitFlags flags = SkCanvas::kDefault_InitFlags;
+ flags = static_cast<SkCanvas::InitFlags>(flags | SkCanvas::kConservativeRasterClip_InitFlag);
+
+ return new SkCanvas(fDevice.get(), flags);
+}
+
+sk_sp<SkSurface> SkSurface_Gpu::onNewSurface(const SkImageInfo& info) {
+ int sampleCount = fDevice->accessDrawContext()->numColorSamples();
+ GrSurfaceOrigin origin = fDevice->accessDrawContext()->origin();
+ // TODO: Make caller specify this (change virtual signature of onNewSurface).
+ static const SkBudgeted kBudgeted = SkBudgeted::kNo;
+ return SkSurface::MakeRenderTarget(fDevice->context(), kBudgeted, info, sampleCount,
+ origin, &this->props());
+}
+
+sk_sp<SkImage> SkSurface_Gpu::onNewImageSnapshot(SkBudgeted budgeted, SkCopyPixelsMode cpm) {
+ GrRenderTarget* rt = fDevice->accessDrawContext()->accessRenderTarget();
+ SkASSERT(rt);
+ GrTexture* tex = rt->asTexture();
+ SkAutoTUnref<GrTexture> copy;
+ // If the original render target is a buffer originally created by the client, then we don't
+ // want to ever retarget the SkSurface at another buffer we create. Force a copy now to avoid
+ // copy-on-write.
+ if (kAlways_SkCopyPixelsMode == cpm || !tex || rt->resourcePriv().refsWrappedObjects()) {
+ GrSurfaceDesc desc = fDevice->accessDrawContext()->desc();
+ GrContext* ctx = fDevice->context();
+ desc.fFlags = desc.fFlags & ~kRenderTarget_GrSurfaceFlag;
+ copy.reset(ctx->textureProvider()->createTexture(desc, budgeted));
+ if (!copy) {
+ return nullptr;
+ }
+ if (!ctx->copySurface(copy, rt)) {
+ return nullptr;
+ }
+ tex = copy;
+ }
+ const SkImageInfo info = fDevice->imageInfo();
+ sk_sp<SkImage> image;
+ if (tex) {
+ image = sk_make_sp<SkImage_Gpu>(info.width(), info.height(), kNeedNewImageUniqueID,
+ info.alphaType(), tex, sk_ref_sp(info.colorSpace()),
+ budgeted);
+ }
+ return image;
+}
+
+// Create a new render target and, if necessary, copy the contents of the old
+// render target into it. Note that this flushes the SkGpuDevice but
+// doesn't force an OpenGL flush.
+void SkSurface_Gpu::onCopyOnWrite(ContentChangeMode mode) {
+ GrRenderTarget* rt = fDevice->accessDrawContext()->accessRenderTarget();
+ // are we sharing our render target with the image? Note this call should never create a new
+ // image because onCopyOnWrite is only called when there is a cached image.
+ sk_sp<SkImage> image(this->refCachedImage(SkBudgeted::kNo, kNo_ForceUnique));
+ SkASSERT(image);
+ if (rt->asTexture() == as_IB(image)->peekTexture()) {
+ this->fDevice->replaceDrawContext(SkSurface::kRetain_ContentChangeMode == mode);
+ SkTextureImageApplyBudgetedDecision(image.get());
+ } else if (kDiscard_ContentChangeMode == mode) {
+ this->SkSurface_Gpu::onDiscard();
+ }
+}
+
+void SkSurface_Gpu::onDiscard() {
+ fDevice->accessDrawContext()->discard();
+}
+
+void SkSurface_Gpu::onPrepareForExternalIO() {
+ fDevice->flush();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkSurface_Gpu::Valid(const SkImageInfo& info) {
+ switch (info.colorType()) {
+ case kRGBA_F16_SkColorType:
+ return info.colorSpace() && info.colorSpace()->gammaIsLinear();
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ return !info.colorSpace() || info.colorSpace()->gammaCloseToSRGB();
+ default:
+ return !info.colorSpace();
+ }
+}
+
+bool SkSurface_Gpu::Valid(GrContext* context, GrPixelConfig config, SkColorSpace* colorSpace) {
+ switch (config) {
+ case kRGBA_half_GrPixelConfig:
+ return colorSpace && colorSpace->gammaIsLinear();
+ case kSRGBA_8888_GrPixelConfig:
+ case kSBGRA_8888_GrPixelConfig:
+ return context->caps()->srgbSupport() && colorSpace && colorSpace->gammaCloseToSRGB();
+ case kRGBA_8888_GrPixelConfig:
+ case kBGRA_8888_GrPixelConfig:
+ // If we don't have sRGB support, we may get here with a color space. It still needs
+ // to be sRGB-like (so that the application will work correctly on sRGB devices.)
+ return !colorSpace ||
+ (!context->caps()->srgbSupport() && colorSpace->gammaCloseToSRGB());
+ default:
+ return !colorSpace;
+ }
+}
+
+sk_sp<SkSurface> SkSurface::MakeRenderTarget(GrContext* ctx, SkBudgeted budgeted,
+ const SkImageInfo& info, int sampleCount,
+ GrSurfaceOrigin origin, const SkSurfaceProps* props) {
+ if (!SkSurface_Gpu::Valid(info)) {
+ return nullptr;
+ }
+
+ sk_sp<SkGpuDevice> device(SkGpuDevice::Make(
+ ctx, budgeted, info, sampleCount, origin, props, SkGpuDevice::kClear_InitContents));
+ if (!device) {
+ return nullptr;
+ }
+ return sk_make_sp<SkSurface_Gpu>(std::move(device));
+}
+
+sk_sp<SkSurface> SkSurface::MakeFromBackendTexture(GrContext* context,
+ const GrBackendTextureDesc& desc,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* props) {
+ if (!context) {
+ return nullptr;
+ }
+ if (!SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag)) {
+ return nullptr;
+ }
+ if (!SkSurface_Gpu::Valid(context, desc.fConfig, colorSpace.get())) {
+ return nullptr;
+ }
+
+ sk_sp<GrDrawContext> dc(context->contextPriv().makeBackendTextureDrawContext(
+ desc,
+ std::move(colorSpace),
+ props,
+ kBorrow_GrWrapOwnership));
+ if (!dc) {
+ return nullptr;
+ }
+
+ sk_sp<SkGpuDevice> device(SkGpuDevice::Make(std::move(dc), desc.fWidth, desc.fHeight,
+ SkGpuDevice::kUninit_InitContents));
+ if (!device) {
+ return nullptr;
+ }
+ return sk_make_sp<SkSurface_Gpu>(std::move(device));
+}
+
+sk_sp<SkSurface> SkSurface::MakeFromBackendRenderTarget(GrContext* context,
+ const GrBackendRenderTargetDesc& desc,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* props) {
+ if (!context) {
+ return nullptr;
+ }
+ if (!SkSurface_Gpu::Valid(context, desc.fConfig, colorSpace.get())) {
+ return nullptr;
+ }
+
+ sk_sp<GrDrawContext> dc(context->contextPriv().makeBackendRenderTargetDrawContext(
+ desc,
+ std::move(colorSpace),
+ props));
+ if (!dc) {
+ return nullptr;
+ }
+
+ sk_sp<SkGpuDevice> device(SkGpuDevice::Make(std::move(dc), desc.fWidth, desc.fHeight,
+ SkGpuDevice::kUninit_InitContents));
+ if (!device) {
+ return nullptr;
+ }
+
+ return sk_make_sp<SkSurface_Gpu>(std::move(device));
+}
+
+sk_sp<SkSurface> SkSurface::MakeFromBackendTextureAsRenderTarget(GrContext* context,
+ const GrBackendTextureDesc& desc,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* props) {
+ if (!context) {
+ return nullptr;
+ }
+ if (!SkSurface_Gpu::Valid(context, desc.fConfig, colorSpace.get())) {
+ return nullptr;
+ }
+
+ sk_sp<GrDrawContext> dc(context->contextPriv().makeBackendTextureAsRenderTargetDrawContext(
+ desc,
+ std::move(colorSpace),
+ props));
+ if (!dc) {
+ return nullptr;
+ }
+
+ sk_sp<SkGpuDevice> device(SkGpuDevice::Make(std::move(dc), desc.fWidth, desc.fHeight,
+ SkGpuDevice::kUninit_InitContents));
+ if (!device) {
+ return nullptr;
+ }
+ return sk_make_sp<SkSurface_Gpu>(std::move(device));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkSurface_Gpu.h b/gfx/skia/skia/src/image/SkSurface_Gpu.h
new file mode 100644
index 000000000..b7088ea6f
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkSurface_Gpu.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSurface_Gpu_DEFINED
+#define SkSurface_Gpu_DEFINED
+
+#include "SkSurface_Base.h"
+
+#if SK_SUPPORT_GPU
+
+class SkGpuDevice;
+
+class SkSurface_Gpu : public SkSurface_Base {
+public:
+ SkSurface_Gpu(sk_sp<SkGpuDevice>);
+ virtual ~SkSurface_Gpu();
+
+ GrBackendObject onGetTextureHandle(BackendHandleAccess) override;
+ bool onGetRenderTargetHandle(GrBackendObject*, BackendHandleAccess) override;
+ SkCanvas* onNewCanvas() override;
+ sk_sp<SkSurface> onNewSurface(const SkImageInfo&) override;
+ sk_sp<SkImage> onNewImageSnapshot(SkBudgeted, SkCopyPixelsMode) override;
+ void onCopyOnWrite(ContentChangeMode) override;
+ void onDiscard() override;
+ void onPrepareForExternalIO() override;
+
+ SkGpuDevice* getDevice() { return fDevice.get(); }
+
+ static bool Valid(const SkImageInfo&);
+ static bool Valid(GrContext*, GrPixelConfig, SkColorSpace*);
+
+private:
+ sk_sp<SkGpuDevice> fDevice;
+
+ typedef SkSurface_Base INHERITED;
+};
+
+#endif // SK_SUPPORT_GPU
+
+#endif // SkSurface_Gpu_DEFINED
diff --git a/gfx/skia/skia/src/image/SkSurface_Raster.cpp b/gfx/skia/skia/src/image/SkSurface_Raster.cpp
new file mode 100644
index 000000000..2b2bf6452
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkSurface_Raster.cpp
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkSurface_Base.h"
+#include "SkImagePriv.h"
+#include "SkCanvas.h"
+#include "SkDevice.h"
+#include "SkMallocPixelRef.h"
+
+static const size_t kIgnoreRowBytesValue = (size_t)~0;
+
+class SkSurface_Raster : public SkSurface_Base {
+public:
+ static bool Valid(const SkImageInfo&, size_t rb = kIgnoreRowBytesValue);
+
+ SkSurface_Raster(const SkImageInfo&, void*, size_t rb,
+ void (*releaseProc)(void* pixels, void* context), void* context,
+ const SkSurfaceProps*);
+ SkSurface_Raster(SkPixelRef*, const SkSurfaceProps*);
+
+ SkCanvas* onNewCanvas() override;
+ sk_sp<SkSurface> onNewSurface(const SkImageInfo&) override;
+ sk_sp<SkImage> onNewImageSnapshot(SkBudgeted, SkCopyPixelsMode) override;
+ void onDraw(SkCanvas*, SkScalar x, SkScalar y, const SkPaint*) override;
+ void onCopyOnWrite(ContentChangeMode) override;
+ void onRestoreBackingMutability() override;
+
+private:
+ SkBitmap fBitmap;
+ size_t fRowBytes;
+ bool fWeOwnThePixels;
+
+ typedef SkSurface_Base INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkSurface_Raster::Valid(const SkImageInfo& info, size_t rowBytes) {
+ if (info.isEmpty()) {
+ return false;
+ }
+
+ static const size_t kMaxTotalSize = SK_MaxS32;
+
+ int shift = 0;
+ switch (info.colorType()) {
+ case kAlpha_8_SkColorType:
+ if (info.colorSpace()) {
+ return false;
+ }
+ shift = 0;
+ break;
+ case kRGB_565_SkColorType:
+ if (info.colorSpace()) {
+ return false;
+ }
+ shift = 1;
+ break;
+ case kN32_SkColorType:
+ if (info.colorSpace() && !info.colorSpace()->gammaCloseToSRGB()) {
+ return false;
+ }
+ shift = 2;
+ break;
+ case kRGBA_F16_SkColorType:
+ if (!info.colorSpace() || !info.colorSpace()->gammaIsLinear()) {
+ return false;
+ }
+ shift = 3;
+ break;
+ default:
+ return false;
+ }
+
+ if (kIgnoreRowBytesValue == rowBytes) {
+ return true;
+ }
+
+ uint64_t minRB = (uint64_t)info.width() << shift;
+ if (minRB > rowBytes) {
+ return false;
+ }
+
+ size_t alignedRowBytes = rowBytes >> shift << shift;
+ if (alignedRowBytes != rowBytes) {
+ return false;
+ }
+
+ uint64_t size = sk_64_mul(info.height(), rowBytes);
+ if (size > kMaxTotalSize) {
+ return false;
+ }
+
+ return true;
+}
+
+SkSurface_Raster::SkSurface_Raster(const SkImageInfo& info, void* pixels, size_t rb,
+ void (*releaseProc)(void* pixels, void* context), void* context,
+ const SkSurfaceProps* props)
+ : INHERITED(info, props)
+{
+ fBitmap.installPixels(info, pixels, rb, nullptr, releaseProc, context);
+ fRowBytes = 0; // don't need to track the rowbytes
+ fWeOwnThePixels = false; // We are "Direct"
+}
+
+SkSurface_Raster::SkSurface_Raster(SkPixelRef* pr, const SkSurfaceProps* props)
+ : INHERITED(pr->info().width(), pr->info().height(), props)
+{
+ const SkImageInfo& info = pr->info();
+
+ fBitmap.setInfo(info, pr->rowBytes());
+ fBitmap.setPixelRef(pr);
+ fRowBytes = pr->rowBytes(); // we track this, so that subsequent re-allocs will match
+ fWeOwnThePixels = true;
+}
+
+SkCanvas* SkSurface_Raster::onNewCanvas() { return new SkCanvas(fBitmap, this->props()); }
+
+sk_sp<SkSurface> SkSurface_Raster::onNewSurface(const SkImageInfo& info) {
+ return SkSurface::MakeRaster(info, &this->props());
+}
+
+void SkSurface_Raster::onDraw(SkCanvas* canvas, SkScalar x, SkScalar y,
+ const SkPaint* paint) {
+ canvas->drawBitmap(fBitmap, x, y, paint);
+}
+
+sk_sp<SkImage> SkSurface_Raster::onNewImageSnapshot(SkBudgeted, SkCopyPixelsMode cpm) {
+ if (fWeOwnThePixels) {
+ // SkImage_raster requires these pixels are immutable for its full lifetime.
+ // We'll undo this via onRestoreBackingMutability() if we can avoid the COW.
+ if (SkPixelRef* pr = fBitmap.pixelRef()) {
+ pr->setTemporarilyImmutable();
+ }
+ } else {
+ cpm = kAlways_SkCopyPixelsMode;
+ }
+
+ // Our pixels are in memory, so read access on the snapshot SkImage could be cheap.
+ // Lock the shared pixel ref to ensure peekPixels() is usable.
+ return SkMakeImageFromRasterBitmap(fBitmap, cpm);
+}
+
+void SkSurface_Raster::onRestoreBackingMutability() {
+ SkASSERT(!this->hasCachedImage()); // Shouldn't be any snapshots out there.
+ if (SkPixelRef* pr = fBitmap.pixelRef()) {
+ pr->restoreMutability();
+ }
+}
+
+void SkSurface_Raster::onCopyOnWrite(ContentChangeMode mode) {
+ // are we sharing pixelrefs with the image?
+ sk_sp<SkImage> cached(this->refCachedImage(SkBudgeted::kNo, kNo_ForceUnique));
+ SkASSERT(cached);
+ if (SkBitmapImageGetPixelRef(cached.get()) == fBitmap.pixelRef()) {
+ SkASSERT(fWeOwnThePixels);
+ if (kDiscard_ContentChangeMode == mode) {
+ fBitmap.allocPixels();
+ } else {
+ SkBitmap prev(fBitmap);
+ fBitmap.allocPixels();
+ prev.lockPixels();
+ SkASSERT(prev.info() == fBitmap.info());
+ SkASSERT(prev.rowBytes() == fBitmap.rowBytes());
+ memcpy(fBitmap.getPixels(), prev.getPixels(), fBitmap.getSafeSize());
+ }
+ SkASSERT(fBitmap.rowBytes() == fRowBytes); // be sure we always use the same value
+
+ // Now fBitmap is a deep copy of itself (and therefore different from
+ // what is being used by the image. Next we update the canvas to use
+ // this as its backend, so we can't modify the image's pixels anymore.
+ SkASSERT(this->getCachedCanvas());
+ this->getCachedCanvas()->getDevice()->replaceBitmapBackendForRasterSurface(fBitmap);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkSurface> SkSurface::MakeRasterDirectReleaseProc(const SkImageInfo& info, void* pixels,
+ size_t rb, void (*releaseProc)(void* pixels, void* context), void* context,
+ const SkSurfaceProps* props) {
+ if (nullptr == releaseProc) {
+ context = nullptr;
+ }
+ if (!SkSurface_Raster::Valid(info, rb)) {
+ return nullptr;
+ }
+ if (nullptr == pixels) {
+ return nullptr;
+ }
+
+ return sk_make_sp<SkSurface_Raster>(info, pixels, rb, releaseProc, context, props);
+}
+
+sk_sp<SkSurface> SkSurface::MakeRasterDirect(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const SkSurfaceProps* props) {
+ return MakeRasterDirectReleaseProc(info, pixels, rowBytes, nullptr, nullptr, props);
+}
+
+sk_sp<SkSurface> SkSurface::MakeRaster(const SkImageInfo& info, size_t rowBytes,
+ const SkSurfaceProps* props) {
+ if (!SkSurface_Raster::Valid(info)) {
+ return nullptr;
+ }
+
+ // If the requested alpha type is opaque, then leave the pixels uninitialized.
+ // Alpha formats can be safely initialiezd to zero.
+ SkAutoTUnref<SkPixelRef> pr(info.isOpaque()
+ ? SkMallocPixelRef::NewAllocate(info, rowBytes, nullptr)
+ : SkMallocPixelRef::NewZeroed(info, rowBytes, nullptr));
+ if (nullptr == pr.get()) {
+ return nullptr;
+ }
+ if (rowBytes) {
+ SkASSERT(pr->rowBytes() == rowBytes);
+ }
+ return sk_make_sp<SkSurface_Raster>(pr, props);
+}
diff --git a/gfx/skia/skia/src/images/SkForceLinking.cpp b/gfx/skia/skia/src/images/SkForceLinking.cpp
new file mode 100644
index 000000000..81d485c88
--- /dev/null
+++ b/gfx/skia/skia/src/images/SkForceLinking.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkImageEncoder.h"
+#include "SkForceLinking.h"
+
+// This method is required to fool the linker into not discarding the pre-main
+// initialization and registration of the encoder classes. Passing true will
+// cause memory leaks.
+int SkForceLinking(bool doNotPassTrue) {
+ if (doNotPassTrue) {
+ SkASSERT(false);
+#if defined(SK_HAS_JPEG_LIBRARY) && !defined(SK_USE_CG_ENCODER) && !defined(SK_USE_WIC_ENCODER)
+ CreateJPEGImageEncoder();
+#endif
+#if defined(SK_HAS_WEBP_LIBRARY) && !defined(SK_USE_CG_ENCODER) && !defined(SK_USE_WIC_ENCODER)
+ CreateWEBPImageEncoder();
+#endif
+#if defined(SK_HAS_PNG_LIBRARY) && !defined(SK_USE_CG_ENCODER) && !defined(SK_USE_WIC_ENCODER)
+ CreatePNGImageEncoder();
+#endif
+
+ // Only link hardware texture codecs on platforms that build them. See images.gyp
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ CreateKTXImageEncoder();
+#endif
+
+#if defined (SK_USE_CG_ENCODER)
+ CreateImageEncoder_CG(SkImageEncoder::kPNG_Type);
+#endif
+#if defined (SK_USE_WIC_ENCODER)
+ CreateImageEncoder_WIC(SkImageEncoder::kPNG_Type);
+#endif
+ return -1;
+ }
+ return 0;
+}
diff --git a/gfx/skia/skia/src/images/SkGIFMovie.cpp b/gfx/skia/skia/src/images/SkGIFMovie.cpp
new file mode 100644
index 000000000..00df53d23
--- /dev/null
+++ b/gfx/skia/skia/src/images/SkGIFMovie.cpp
@@ -0,0 +1,451 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkMovie.h"
+#include "SkColor.h"
+#include "SkColorPriv.h"
+#include "SkStream.h"
+#include "SkTemplates.h"
+#include "SkUtils.h"
+
+#include "gif_lib.h"
+
+#if GIFLIB_MAJOR < 5 || (GIFLIB_MAJOR == 5 && GIFLIB_MINOR == 0)
+#define DGifCloseFile(a, b) DGifCloseFile(a)
+#endif
+
+class SkGIFMovie : public SkMovie {
+public:
+ SkGIFMovie(SkStream* stream);
+ virtual ~SkGIFMovie();
+
+protected:
+ virtual bool onGetInfo(Info*);
+ virtual bool onSetTime(SkMSec);
+ virtual bool onGetBitmap(SkBitmap*);
+
+private:
+ GifFileType* fGIF;
+ int fCurrIndex;
+ int fLastDrawIndex;
+ SkBitmap fBackup;
+ SkColor fPaintingColor;
+};
+
+static int Decode(GifFileType* fileType, GifByteType* out, int size) {
+ SkStream* stream = (SkStream*) fileType->UserData;
+ return (int) stream->read(out, size);
+}
+
+SkGIFMovie::SkGIFMovie(SkStream* stream)
+{
+#if GIFLIB_MAJOR < 5
+ fGIF = DGifOpen( stream, Decode );
+#else
+ fGIF = DGifOpen( stream, Decode, nullptr );
+#endif
+ if (nullptr == fGIF)
+ return;
+
+ if (DGifSlurp(fGIF) != GIF_OK)
+ {
+ DGifCloseFile(fGIF, nullptr);
+ fGIF = nullptr;
+ }
+ fCurrIndex = -1;
+ fLastDrawIndex = -1;
+ fPaintingColor = SkPackARGB32(0, 0, 0, 0);
+}
+
+SkGIFMovie::~SkGIFMovie()
+{
+ if (fGIF)
+ DGifCloseFile(fGIF, nullptr);
+}
+
+static SkMSec savedimage_duration(const SavedImage* image)
+{
+ for (int j = 0; j < image->ExtensionBlockCount; j++)
+ {
+ if (image->ExtensionBlocks[j].Function == GRAPHICS_EXT_FUNC_CODE)
+ {
+ SkASSERT(image->ExtensionBlocks[j].ByteCount >= 4);
+ const uint8_t* b = (const uint8_t*)image->ExtensionBlocks[j].Bytes;
+ return ((b[2] << 8) | b[1]) * 10;
+ }
+ }
+ return 0;
+}
+
+bool SkGIFMovie::onGetInfo(Info* info)
+{
+ if (nullptr == fGIF)
+ return false;
+
+ SkMSec dur = 0;
+ for (int i = 0; i < fGIF->ImageCount; i++)
+ dur += savedimage_duration(&fGIF->SavedImages[i]);
+
+ info->fDuration = dur;
+ info->fWidth = fGIF->SWidth;
+ info->fHeight = fGIF->SHeight;
+ info->fIsOpaque = false; // how to compute?
+ return true;
+}
+
+bool SkGIFMovie::onSetTime(SkMSec time)
+{
+ if (nullptr == fGIF)
+ return false;
+
+ SkMSec dur = 0;
+ for (int i = 0; i < fGIF->ImageCount; i++)
+ {
+ dur += savedimage_duration(&fGIF->SavedImages[i]);
+ if (dur >= time)
+ {
+ fCurrIndex = i;
+ return fLastDrawIndex != fCurrIndex;
+ }
+ }
+ fCurrIndex = fGIF->ImageCount - 1;
+ return true;
+}
+
+static void copyLine(uint32_t* dst, const unsigned char* src, const ColorMapObject* cmap,
+ int transparent, int width)
+{
+ for (; width > 0; width--, src++, dst++) {
+ if (*src != transparent) {
+ const GifColorType& col = cmap->Colors[*src];
+ *dst = SkPackARGB32(0xFF, col.Red, col.Green, col.Blue);
+ }
+ }
+}
+
+#if GIFLIB_MAJOR < 5
+static void copyInterlaceGroup(SkBitmap* bm, const unsigned char*& src,
+ const ColorMapObject* cmap, int transparent, int copyWidth,
+ int copyHeight, const GifImageDesc& imageDesc, int rowStep,
+ int startRow)
+{
+ int row;
+ // every 'rowStep'th row, starting with row 'startRow'
+ for (row = startRow; row < copyHeight; row += rowStep) {
+ uint32_t* dst = bm->getAddr32(imageDesc.Left, imageDesc.Top + row);
+ copyLine(dst, src, cmap, transparent, copyWidth);
+ src += imageDesc.Width;
+ }
+
+ // pad for rest height
+ src += imageDesc.Width * ((imageDesc.Height - row + rowStep - 1) / rowStep);
+}
+
+static void blitInterlace(SkBitmap* bm, const SavedImage* frame, const ColorMapObject* cmap,
+ int transparent)
+{
+ int width = bm->width();
+ int height = bm->height();
+ GifWord copyWidth = frame->ImageDesc.Width;
+ if (frame->ImageDesc.Left + copyWidth > width) {
+ copyWidth = width - frame->ImageDesc.Left;
+ }
+
+ GifWord copyHeight = frame->ImageDesc.Height;
+ if (frame->ImageDesc.Top + copyHeight > height) {
+ copyHeight = height - frame->ImageDesc.Top;
+ }
+
+ // deinterlace
+ const unsigned char* src = (unsigned char*)frame->RasterBits;
+
+ // group 1 - every 8th row, starting with row 0
+ copyInterlaceGroup(bm, src, cmap, transparent, copyWidth, copyHeight, frame->ImageDesc, 8, 0);
+
+ // group 2 - every 8th row, starting with row 4
+ copyInterlaceGroup(bm, src, cmap, transparent, copyWidth, copyHeight, frame->ImageDesc, 8, 4);
+
+ // group 3 - every 4th row, starting with row 2
+ copyInterlaceGroup(bm, src, cmap, transparent, copyWidth, copyHeight, frame->ImageDesc, 4, 2);
+
+ copyInterlaceGroup(bm, src, cmap, transparent, copyWidth, copyHeight, frame->ImageDesc, 2, 1);
+}
+#endif
+
+static void blitNormal(SkBitmap* bm, const SavedImage* frame, const ColorMapObject* cmap,
+ int transparent)
+{
+ int width = bm->width();
+ int height = bm->height();
+ const unsigned char* src = (unsigned char*)frame->RasterBits;
+ uint32_t* dst = bm->getAddr32(frame->ImageDesc.Left, frame->ImageDesc.Top);
+ GifWord copyWidth = frame->ImageDesc.Width;
+ if (frame->ImageDesc.Left + copyWidth > width) {
+ copyWidth = width - frame->ImageDesc.Left;
+ }
+
+ GifWord copyHeight = frame->ImageDesc.Height;
+ if (frame->ImageDesc.Top + copyHeight > height) {
+ copyHeight = height - frame->ImageDesc.Top;
+ }
+
+ for (; copyHeight > 0; copyHeight--) {
+ copyLine(dst, src, cmap, transparent, copyWidth);
+ src += frame->ImageDesc.Width;
+ dst += width;
+ }
+}
+
+static void fillRect(SkBitmap* bm, GifWord left, GifWord top, GifWord width, GifWord height,
+ uint32_t col)
+{
+ int bmWidth = bm->width();
+ int bmHeight = bm->height();
+ uint32_t* dst = bm->getAddr32(left, top);
+ GifWord copyWidth = width;
+ if (left + copyWidth > bmWidth) {
+ copyWidth = bmWidth - left;
+ }
+
+ GifWord copyHeight = height;
+ if (top + copyHeight > bmHeight) {
+ copyHeight = bmHeight - top;
+ }
+
+ for (; copyHeight > 0; copyHeight--) {
+ sk_memset32(dst, col, copyWidth);
+ dst += bmWidth;
+ }
+}
+
+static void drawFrame(SkBitmap* bm, const SavedImage* frame, const ColorMapObject* cmap)
+{
+ int transparent = -1;
+
+ for (int i = 0; i < frame->ExtensionBlockCount; ++i) {
+ ExtensionBlock* eb = frame->ExtensionBlocks + i;
+ if (eb->Function == GRAPHICS_EXT_FUNC_CODE &&
+ eb->ByteCount == 4) {
+ bool has_transparency = ((eb->Bytes[0] & 1) == 1);
+ if (has_transparency) {
+ transparent = (unsigned char)eb->Bytes[3];
+ }
+ }
+ }
+
+ if (frame->ImageDesc.ColorMap != nullptr) {
+ // use local color table
+ cmap = frame->ImageDesc.ColorMap;
+ }
+
+ if (cmap == nullptr || cmap->ColorCount != (1 << cmap->BitsPerPixel)) {
+ SkDEBUGFAIL("bad colortable setup");
+ return;
+ }
+
+#if GIFLIB_MAJOR < 5
+ // before GIFLIB 5, de-interlacing wasn't done by library at load time
+ if (frame->ImageDesc.Interlace) {
+ blitInterlace(bm, frame, cmap, transparent);
+ return;
+ }
+#endif
+
+ blitNormal(bm, frame, cmap, transparent);
+}
+
+static bool checkIfWillBeCleared(const SavedImage* frame)
+{
+ for (int i = 0; i < frame->ExtensionBlockCount; ++i) {
+ ExtensionBlock* eb = frame->ExtensionBlocks + i;
+ if (eb->Function == GRAPHICS_EXT_FUNC_CODE &&
+ eb->ByteCount == 4) {
+ // check disposal method
+ int disposal = ((eb->Bytes[0] >> 2) & 7);
+ if (disposal == 2 || disposal == 3) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+static void getTransparencyAndDisposalMethod(const SavedImage* frame, bool* trans, int* disposal)
+{
+ *trans = false;
+ *disposal = 0;
+ for (int i = 0; i < frame->ExtensionBlockCount; ++i) {
+ ExtensionBlock* eb = frame->ExtensionBlocks + i;
+ if (eb->Function == GRAPHICS_EXT_FUNC_CODE &&
+ eb->ByteCount == 4) {
+ *trans = ((eb->Bytes[0] & 1) == 1);
+ *disposal = ((eb->Bytes[0] >> 2) & 7);
+ }
+ }
+}
+
+// return true if area of 'target' is completely covers area of 'covered'
+static bool checkIfCover(const SavedImage* target, const SavedImage* covered)
+{
+ if (target->ImageDesc.Left <= covered->ImageDesc.Left
+ && covered->ImageDesc.Left + covered->ImageDesc.Width <=
+ target->ImageDesc.Left + target->ImageDesc.Width
+ && target->ImageDesc.Top <= covered->ImageDesc.Top
+ && covered->ImageDesc.Top + covered->ImageDesc.Height <=
+ target->ImageDesc.Top + target->ImageDesc.Height) {
+ return true;
+ }
+ return false;
+}
+
+static void disposeFrameIfNeeded(SkBitmap* bm, const SavedImage* cur, const SavedImage* next,
+ SkBitmap* backup, SkColor color)
+{
+ // We can skip disposal process if next frame is not transparent
+ // and completely covers current area
+ bool curTrans;
+ int curDisposal;
+ getTransparencyAndDisposalMethod(cur, &curTrans, &curDisposal);
+ bool nextTrans;
+ int nextDisposal;
+ getTransparencyAndDisposalMethod(next, &nextTrans, &nextDisposal);
+ if ((curDisposal == 2 || curDisposal == 3)
+ && (nextTrans || !checkIfCover(next, cur))) {
+ switch (curDisposal) {
+ // restore to background color
+ // -> 'background' means background under this image.
+ case 2:
+ fillRect(bm, cur->ImageDesc.Left, cur->ImageDesc.Top,
+ cur->ImageDesc.Width, cur->ImageDesc.Height,
+ color);
+ break;
+
+ // restore to previous
+ case 3:
+ bm->swap(*backup);
+ break;
+ }
+ }
+
+ // Save current image if next frame's disposal method == 3
+ if (nextDisposal == 3) {
+ const uint32_t* src = bm->getAddr32(0, 0);
+ uint32_t* dst = backup->getAddr32(0, 0);
+ int cnt = bm->width() * bm->height();
+ memcpy(dst, src, cnt*sizeof(uint32_t));
+ }
+}
+
+bool SkGIFMovie::onGetBitmap(SkBitmap* bm)
+{
+ const GifFileType* gif = fGIF;
+ if (nullptr == gif)
+ return false;
+
+ if (gif->ImageCount < 1) {
+ return false;
+ }
+
+ const int width = gif->SWidth;
+ const int height = gif->SHeight;
+ if (width <= 0 || height <= 0) {
+ return false;
+ }
+
+ // no need to draw
+ if (fLastDrawIndex >= 0 && fLastDrawIndex == fCurrIndex) {
+ return true;
+ }
+
+ int startIndex = fLastDrawIndex + 1;
+ if (fLastDrawIndex < 0 || !bm->readyToDraw()) {
+ // first time
+
+ startIndex = 0;
+
+ // create bitmap
+ if (!bm->tryAllocN32Pixels(width, height)) {
+ return false;
+ }
+ // create bitmap for backup
+ if (!fBackup.tryAllocN32Pixels(width, height)) {
+ return false;
+ }
+ } else if (startIndex > fCurrIndex) {
+ // rewind to 1st frame for repeat
+ startIndex = 0;
+ }
+
+ int lastIndex = fCurrIndex;
+ if (lastIndex < 0) {
+ // first time
+ lastIndex = 0;
+ } else if (lastIndex > fGIF->ImageCount - 1) {
+ // this block must not be reached.
+ lastIndex = fGIF->ImageCount - 1;
+ }
+
+ SkColor bgColor = SkPackARGB32(0, 0, 0, 0);
+ if (gif->SColorMap != nullptr) {
+ const GifColorType& col = gif->SColorMap->Colors[fGIF->SBackGroundColor];
+ bgColor = SkColorSetARGB(0xFF, col.Red, col.Green, col.Blue);
+ }
+
+ // draw each frames - not intelligent way
+ for (int i = startIndex; i <= lastIndex; i++) {
+ const SavedImage* cur = &fGIF->SavedImages[i];
+ if (i == 0) {
+ bool trans;
+ int disposal;
+ getTransparencyAndDisposalMethod(cur, &trans, &disposal);
+ if (!trans && gif->SColorMap != nullptr) {
+ fPaintingColor = bgColor;
+ } else {
+ fPaintingColor = SkColorSetARGB(0, 0, 0, 0);
+ }
+
+ bm->eraseColor(fPaintingColor);
+ fBackup.eraseColor(fPaintingColor);
+ } else {
+ // Dispose previous frame before move to next frame.
+ const SavedImage* prev = &fGIF->SavedImages[i-1];
+ disposeFrameIfNeeded(bm, prev, cur, &fBackup, fPaintingColor);
+ }
+
+ // Draw frame
+ // We can skip this process if this index is not last and disposal
+ // method == 2 or method == 3
+ if (i == lastIndex || !checkIfWillBeCleared(cur)) {
+ drawFrame(bm, cur, gif->SColorMap);
+ }
+ }
+
+ // save index
+ fLastDrawIndex = lastIndex;
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkTRegistry.h"
+
+SkMovie* Factory(SkStreamRewindable* stream) {
+ char buf[GIF_STAMP_LEN];
+ if (stream->read(buf, GIF_STAMP_LEN) == GIF_STAMP_LEN) {
+ if (memcmp(GIF_STAMP, buf, GIF_STAMP_LEN) == 0 ||
+ memcmp(GIF87_STAMP, buf, GIF_STAMP_LEN) == 0 ||
+ memcmp(GIF89_STAMP, buf, GIF_STAMP_LEN) == 0) {
+ // must rewind here, since our construct wants to re-read the data
+ stream->rewind();
+ return new SkGIFMovie(stream);
+ }
+ }
+ return nullptr;
+}
+
+static SkTRegistry<SkMovie*(*)(SkStreamRewindable*)> gReg(Factory);
diff --git a/gfx/skia/skia/src/images/SkImageEncoder.cpp b/gfx/skia/skia/src/images/SkImageEncoder.cpp
new file mode 100644
index 000000000..023885f45
--- /dev/null
+++ b/gfx/skia/skia/src/images/SkImageEncoder.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkImageEncoder.h"
+#include "SkBitmap.h"
+#include "SkPixelSerializer.h"
+#include "SkPixmap.h"
+#include "SkStream.h"
+#include "SkTemplates.h"
+
+SkImageEncoder::~SkImageEncoder() {}
+
+bool SkImageEncoder::encodeStream(SkWStream* stream, const SkBitmap& bm,
+ int quality) {
+ quality = SkMin32(100, SkMax32(0, quality));
+ return this->onEncode(stream, bm, quality);
+}
+
+bool SkImageEncoder::encodeFile(const char file[], const SkBitmap& bm,
+ int quality) {
+ quality = SkMin32(100, SkMax32(0, quality));
+ SkFILEWStream stream(file);
+ return this->onEncode(&stream, bm, quality);
+}
+
+SkData* SkImageEncoder::encodeData(const SkBitmap& bm, int quality) {
+ SkDynamicMemoryWStream stream;
+ quality = SkMin32(100, SkMax32(0, quality));
+ if (this->onEncode(&stream, bm, quality)) {
+ return stream.detachAsData().release();
+ }
+ return nullptr;
+}
+
+bool SkImageEncoder::EncodeFile(const char file[], const SkBitmap& bm, Type t,
+ int quality) {
+ SkAutoTDelete<SkImageEncoder> enc(SkImageEncoder::Create(t));
+ return enc.get() && enc.get()->encodeFile(file, bm, quality);
+}
+
+bool SkImageEncoder::EncodeStream(SkWStream* stream, const SkBitmap& bm, Type t,
+ int quality) {
+ SkAutoTDelete<SkImageEncoder> enc(SkImageEncoder::Create(t));
+ return enc.get() && enc.get()->encodeStream(stream, bm, quality);
+}
+
+SkData* SkImageEncoder::EncodeData(const SkBitmap& bm, Type t, int quality) {
+ SkAutoTDelete<SkImageEncoder> enc(SkImageEncoder::Create(t));
+ return enc.get() ? enc.get()->encodeData(bm, quality) : nullptr;
+}
+
+SkData* SkImageEncoder::EncodeData(const SkImageInfo& info, const void* pixels, size_t rowBytes,
+ Type t, int quality) {
+ SkBitmap bm;
+ if (!bm.installPixels(info, const_cast<void*>(pixels), rowBytes)) {
+ return nullptr;
+ }
+ bm.setImmutable();
+ return SkImageEncoder::EncodeData(bm, t, quality);
+}
+
+SkData* SkImageEncoder::EncodeData(const SkPixmap& pixmap,
+ Type t, int quality) {
+ SkBitmap bm;
+ if (!bm.installPixels(pixmap)) {
+ return nullptr;
+ }
+ bm.setImmutable();
+ return SkImageEncoder::EncodeData(bm, t, quality);
+}
+
+namespace {
+class ImageEncoderPixelSerializer final : public SkPixelSerializer {
+protected:
+ bool onUseEncodedData(const void*, size_t) override { return true; }
+ SkData* onEncode(const SkPixmap& pmap) override {
+ return SkImageEncoder::EncodeData(pmap, SkImageEncoder::kPNG_Type, 100);
+ }
+};
+} // namespace
+
+SkPixelSerializer* SkImageEncoder::CreatePixelSerializer() {
+ return new ImageEncoderPixelSerializer;
+}
diff --git a/gfx/skia/skia/src/images/SkImageEncoder_Factory.cpp b/gfx/skia/skia/src/images/SkImageEncoder_Factory.cpp
new file mode 100644
index 000000000..887ce55dd
--- /dev/null
+++ b/gfx/skia/skia/src/images/SkImageEncoder_Factory.cpp
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkImageEncoder.h"
+
+template SkImageEncoder_EncodeReg* SkImageEncoder_EncodeReg::gHead;
+
+SkImageEncoder* SkImageEncoder::Create(Type t) {
+ SkImageEncoder* codec = nullptr;
+ const SkImageEncoder_EncodeReg* curr = SkImageEncoder_EncodeReg::Head();
+ while (curr) {
+ if ((codec = curr->factory()(t)) != nullptr) {
+ return codec;
+ }
+ curr = curr->next();
+ }
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/images/SkJPEGImageEncoder.cpp b/gfx/skia/skia/src/images/SkJPEGImageEncoder.cpp
new file mode 100644
index 000000000..66b2440c2
--- /dev/null
+++ b/gfx/skia/skia/src/images/SkJPEGImageEncoder.cpp
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkImageEncoder.h"
+#include "SkColorPriv.h"
+#include "SkDither.h"
+#include "SkStream.h"
+#include "SkTemplates.h"
+#include "SkTime.h"
+#include "SkUtils.h"
+#include "SkRect.h"
+#include "SkCanvas.h"
+
+
+#include <stdio.h>
+#include "SkJPEGWriteUtility.h"
+extern "C" {
+ #include "jpeglib.h"
+ #include "jerror.h"
+}
+
+// These enable timing code that report milliseconds for an encoding
+//#define TIME_ENCODE
+
+typedef void (*WriteScanline)(uint8_t* SK_RESTRICT dst,
+ const void* SK_RESTRICT src, int width,
+ const SkPMColor* SK_RESTRICT ctable);
+
+static void Write_32_RGB(uint8_t* SK_RESTRICT dst,
+ const void* SK_RESTRICT srcRow, int width,
+ const SkPMColor*) {
+ const uint32_t* SK_RESTRICT src = (const uint32_t*)srcRow;
+ while (--width >= 0) {
+ uint32_t c = *src++;
+ dst[0] = SkGetPackedR32(c);
+ dst[1] = SkGetPackedG32(c);
+ dst[2] = SkGetPackedB32(c);
+ dst += 3;
+ }
+}
+
+static void Write_4444_RGB(uint8_t* SK_RESTRICT dst,
+ const void* SK_RESTRICT srcRow, int width,
+ const SkPMColor*) {
+ const SkPMColor16* SK_RESTRICT src = (const SkPMColor16*)srcRow;
+ while (--width >= 0) {
+ SkPMColor16 c = *src++;
+ dst[0] = SkPacked4444ToR32(c);
+ dst[1] = SkPacked4444ToG32(c);
+ dst[2] = SkPacked4444ToB32(c);
+ dst += 3;
+ }
+}
+
+static void Write_16_RGB(uint8_t* SK_RESTRICT dst,
+ const void* SK_RESTRICT srcRow, int width,
+ const SkPMColor*) {
+ const uint16_t* SK_RESTRICT src = (const uint16_t*)srcRow;
+ while (--width >= 0) {
+ uint16_t c = *src++;
+ dst[0] = SkPacked16ToR32(c);
+ dst[1] = SkPacked16ToG32(c);
+ dst[2] = SkPacked16ToB32(c);
+ dst += 3;
+ }
+}
+
+static void Write_Index_RGB(uint8_t* SK_RESTRICT dst,
+ const void* SK_RESTRICT srcRow, int width,
+ const SkPMColor* SK_RESTRICT ctable) {
+ const uint8_t* SK_RESTRICT src = (const uint8_t*)srcRow;
+ while (--width >= 0) {
+ uint32_t c = ctable[*src++];
+ dst[0] = SkGetPackedR32(c);
+ dst[1] = SkGetPackedG32(c);
+ dst[2] = SkGetPackedB32(c);
+ dst += 3;
+ }
+}
+
+static WriteScanline ChooseWriter(const SkBitmap& bm) {
+ switch (bm.colorType()) {
+ case kN32_SkColorType:
+ return Write_32_RGB;
+ case kRGB_565_SkColorType:
+ return Write_16_RGB;
+ case kARGB_4444_SkColorType:
+ return Write_4444_RGB;
+ case kIndex_8_SkColorType:
+ return Write_Index_RGB;
+ default:
+ return nullptr;
+ }
+}
+
+class SkJPEGImageEncoder : public SkImageEncoder {
+protected:
+ virtual bool onEncode(SkWStream* stream, const SkBitmap& bm, int quality) {
+#ifdef TIME_ENCODE
+ SkAutoTime atm("JPEG Encode");
+#endif
+
+ SkAutoLockPixels alp(bm);
+ if (nullptr == bm.getPixels()) {
+ return false;
+ }
+
+ jpeg_compress_struct cinfo;
+ skjpeg_error_mgr sk_err;
+ skjpeg_destination_mgr sk_wstream(stream);
+
+ // allocate these before set call setjmp
+ SkAutoTMalloc<uint8_t> oneRow;
+
+ cinfo.err = jpeg_std_error(&sk_err);
+ sk_err.error_exit = skjpeg_error_exit;
+ if (setjmp(sk_err.fJmpBuf)) {
+ return false;
+ }
+
+ // Keep after setjmp or mark volatile.
+ const WriteScanline writer = ChooseWriter(bm);
+ if (nullptr == writer) {
+ return false;
+ }
+
+ jpeg_create_compress(&cinfo);
+ cinfo.dest = &sk_wstream;
+ cinfo.image_width = bm.width();
+ cinfo.image_height = bm.height();
+ cinfo.input_components = 3;
+
+ // FIXME: Can we take advantage of other in_color_spaces in libjpeg-turbo?
+ cinfo.in_color_space = JCS_RGB;
+
+ // The gamma value is ignored by libjpeg-turbo.
+ cinfo.input_gamma = 1;
+
+ jpeg_set_defaults(&cinfo);
+
+ // Tells libjpeg-turbo to compute optimal Huffman coding tables
+ // for the image. This improves compression at the cost of
+ // slower encode performance.
+ cinfo.optimize_coding = TRUE;
+ jpeg_set_quality(&cinfo, quality, TRUE /* limit to baseline-JPEG values */);
+
+ jpeg_start_compress(&cinfo, TRUE);
+
+ const int width = bm.width();
+ uint8_t* oneRowP = oneRow.reset(width * 3);
+
+ const SkPMColor* colors = bm.getColorTable() ? bm.getColorTable()->readColors() : nullptr;
+ const void* srcRow = bm.getPixels();
+
+ while (cinfo.next_scanline < cinfo.image_height) {
+ JSAMPROW row_pointer[1]; /* pointer to JSAMPLE row[s] */
+
+ writer(oneRowP, srcRow, width, colors);
+ row_pointer[0] = oneRowP;
+ (void) jpeg_write_scanlines(&cinfo, row_pointer, 1);
+ srcRow = (const void*)((const char*)srcRow + bm.rowBytes());
+ }
+
+ jpeg_finish_compress(&cinfo);
+ jpeg_destroy_compress(&cinfo);
+
+ return true;
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+DEFINE_ENCODER_CREATOR(JPEGImageEncoder);
+///////////////////////////////////////////////////////////////////////////////
+
+static SkImageEncoder* sk_libjpeg_efactory(SkImageEncoder::Type t) {
+ return (SkImageEncoder::kJPEG_Type == t) ? new SkJPEGImageEncoder : nullptr;
+}
+
+static SkImageEncoder_EncodeReg gEReg(sk_libjpeg_efactory);
diff --git a/gfx/skia/skia/src/images/SkJPEGWriteUtility.cpp b/gfx/skia/skia/src/images/SkJPEGWriteUtility.cpp
new file mode 100644
index 000000000..aa0d3ec4d
--- /dev/null
+++ b/gfx/skia/skia/src/images/SkJPEGWriteUtility.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkJPEGWriteUtility.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void sk_init_destination(j_compress_ptr cinfo) {
+ skjpeg_destination_mgr* dest = (skjpeg_destination_mgr*)cinfo->dest;
+
+ dest->next_output_byte = dest->fBuffer;
+ dest->free_in_buffer = skjpeg_destination_mgr::kBufferSize;
+}
+
+static boolean sk_empty_output_buffer(j_compress_ptr cinfo) {
+ skjpeg_destination_mgr* dest = (skjpeg_destination_mgr*)cinfo->dest;
+
+// if (!dest->fStream->write(dest->fBuffer, skjpeg_destination_mgr::kBufferSize - dest->free_in_buffer))
+ if (!dest->fStream->write(dest->fBuffer,
+ skjpeg_destination_mgr::kBufferSize)) {
+ ERREXIT(cinfo, JERR_FILE_WRITE);
+ return false;
+ }
+
+ dest->next_output_byte = dest->fBuffer;
+ dest->free_in_buffer = skjpeg_destination_mgr::kBufferSize;
+ return TRUE;
+}
+
+static void sk_term_destination (j_compress_ptr cinfo) {
+ skjpeg_destination_mgr* dest = (skjpeg_destination_mgr*)cinfo->dest;
+
+ size_t size = skjpeg_destination_mgr::kBufferSize - dest->free_in_buffer;
+ if (size > 0) {
+ if (!dest->fStream->write(dest->fBuffer, size)) {
+ ERREXIT(cinfo, JERR_FILE_WRITE);
+ return;
+ }
+ }
+ dest->fStream->flush();
+}
+
+skjpeg_destination_mgr::skjpeg_destination_mgr(SkWStream* stream)
+ : fStream(stream) {
+ this->init_destination = sk_init_destination;
+ this->empty_output_buffer = sk_empty_output_buffer;
+ this->term_destination = sk_term_destination;
+}
+
+void skjpeg_error_exit(j_common_ptr cinfo) {
+ skjpeg_error_mgr* error = (skjpeg_error_mgr*)cinfo->err;
+
+ (*error->output_message) (cinfo);
+
+ /* Let the memory manager delete any temp files before we die */
+ jpeg_destroy(cinfo);
+
+ longjmp(error->fJmpBuf, -1);
+}
diff --git a/gfx/skia/skia/src/images/SkJPEGWriteUtility.h b/gfx/skia/skia/src/images/SkJPEGWriteUtility.h
new file mode 100644
index 000000000..91d07a361
--- /dev/null
+++ b/gfx/skia/skia/src/images/SkJPEGWriteUtility.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkJpegUtility_DEFINED
+#define SkJpegUtility_DEFINED
+
+#include "SkStream.h"
+
+extern "C" {
+ #include "jpeglib.h"
+ #include "jerror.h"
+}
+
+#include <setjmp.h>
+
+/* Our error-handling struct.
+ *
+*/
+struct skjpeg_error_mgr : jpeg_error_mgr {
+ jmp_buf fJmpBuf;
+};
+
+
+void skjpeg_error_exit(j_common_ptr cinfo);
+
+/////////////////////////////////////////////////////////////////////////////
+/* Our destination struct for directing decompressed pixels to our stream
+ * object.
+ */
+struct skjpeg_destination_mgr : jpeg_destination_mgr {
+ skjpeg_destination_mgr(SkWStream* stream);
+
+ SkWStream* fStream;
+
+ enum {
+ kBufferSize = 1024
+ };
+ uint8_t fBuffer[kBufferSize];
+};
+
+#endif
diff --git a/gfx/skia/skia/src/images/SkKTXImageEncoder.cpp b/gfx/skia/skia/src/images/SkKTXImageEncoder.cpp
new file mode 100644
index 000000000..078cec6b4
--- /dev/null
+++ b/gfx/skia/skia/src/images/SkKTXImageEncoder.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkColorPriv.h"
+#include "SkImageEncoder.h"
+#include "SkImageGenerator.h"
+#include "SkPixelRef.h"
+#include "SkStream.h"
+#include "SkStreamPriv.h"
+#include "SkTypes.h"
+
+#include "ktx.h"
+#include "etc1.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+// KTX Image Encoder
+//
+// KTX is a general texture data storage file format ratified by the Khronos Group. As an
+// overview, a KTX file contains all of the appropriate values needed to fully specify a
+// texture in an OpenGL application, including the use of compressed data.
+//
+// This encoder takes a best guess at how to encode the bitmap passed to it. If
+// there is an installed discardable pixel ref with existing PKM data, then we
+// will repurpose the existing ETC1 data into a KTX file. If the data contains
+// KTX data, then we simply return a copy of the same data. For all other files,
+// the underlying KTX library tries to do its best to encode the appropriate
+// data specified by the bitmap based on the config. (i.e. kAlpha8_Config will
+// be represented as a full resolution 8-bit image dump with the appropriate
+// OpenGL defines in the header).
+
+class SkKTXImageEncoder : public SkImageEncoder {
+protected:
+ bool onEncode(SkWStream* stream, const SkBitmap& bm, int quality) override;
+
+private:
+ virtual bool encodePKM(SkWStream* stream, const SkData *data);
+ typedef SkImageEncoder INHERITED;
+};
+
+bool SkKTXImageEncoder::onEncode(SkWStream* stream, const SkBitmap& bitmap, int) {
+ if (!bitmap.pixelRef()) {
+ return false;
+ }
+ sk_sp<SkData> data(bitmap.pixelRef()->refEncodedData());
+
+ // Is this even encoded data?
+ if (data) {
+ const uint8_t *bytes = data->bytes();
+ if (etc1_pkm_is_valid(bytes)) {
+ return this->encodePKM(stream, data.get());
+ }
+
+ // Is it a KTX file??
+ if (SkKTXFile::is_ktx(bytes, data->size())) {
+ return stream->write(bytes, data->size());
+ }
+
+ // If it's neither a KTX nor a PKM, then we need to
+ // get at the actual pixels, so fall through and decompress...
+ }
+
+ return SkKTXFile::WriteBitmapToKTX(stream, bitmap);
+}
+
+bool SkKTXImageEncoder::encodePKM(SkWStream* stream, const SkData *data) {
+ const uint8_t* bytes = data->bytes();
+ SkASSERT(etc1_pkm_is_valid(bytes));
+
+ etc1_uint32 width = etc1_pkm_get_width(bytes);
+ etc1_uint32 height = etc1_pkm_get_height(bytes);
+
+ // ETC1 Data is stored as compressed 4x4 pixel blocks, so we must make sure
+ // that our dimensions are valid.
+ if (width == 0 || (width & 3) != 0 || height == 0 || (height & 3) != 0) {
+ return false;
+ }
+
+ // Advance pointer to etc1 data.
+ bytes += ETC_PKM_HEADER_SIZE;
+
+ return SkKTXFile::WriteETC1ToKTX(stream, bytes, width, height);
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////
+DEFINE_ENCODER_CREATOR(KTXImageEncoder);
+/////////////////////////////////////////////////////////////////////////////////////////
+
+SkImageEncoder* sk_libktx_efactory(SkImageEncoder::Type t) {
+ return (SkImageEncoder::kKTX_Type == t) ? new SkKTXImageEncoder : nullptr;
+}
+
+static SkImageEncoder_EncodeReg gEReg(sk_libktx_efactory);
diff --git a/gfx/skia/skia/src/images/SkMovie.cpp b/gfx/skia/skia/src/images/SkMovie.cpp
new file mode 100644
index 000000000..a0a37dcff
--- /dev/null
+++ b/gfx/skia/skia/src/images/SkMovie.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkMovie.h"
+#include "SkCanvas.h"
+#include "SkPaint.h"
+
+// We should never see this in normal operation since our time values are
+// 0-based. So we use it as a sentinal.
+#define UNINITIALIZED_MSEC ((SkMSec)-1)
+
+SkMovie::SkMovie()
+{
+ fInfo.fDuration = UNINITIALIZED_MSEC; // uninitialized
+ fCurrTime = UNINITIALIZED_MSEC; // uninitialized
+ fNeedBitmap = true;
+}
+
+void SkMovie::ensureInfo()
+{
+ if (fInfo.fDuration == UNINITIALIZED_MSEC && !this->onGetInfo(&fInfo))
+ memset(&fInfo, 0, sizeof(fInfo)); // failure
+}
+
+SkMSec SkMovie::duration()
+{
+ this->ensureInfo();
+ return fInfo.fDuration;
+}
+
+int SkMovie::width()
+{
+ this->ensureInfo();
+ return fInfo.fWidth;
+}
+
+int SkMovie::height()
+{
+ this->ensureInfo();
+ return fInfo.fHeight;
+}
+
+int SkMovie::isOpaque()
+{
+ this->ensureInfo();
+ return fInfo.fIsOpaque;
+}
+
+bool SkMovie::setTime(SkMSec time)
+{
+ SkMSec dur = this->duration();
+ if (time > dur)
+ time = dur;
+
+ bool changed = false;
+ if (time != fCurrTime)
+ {
+ fCurrTime = time;
+ changed = this->onSetTime(time);
+ fNeedBitmap |= changed;
+ }
+ return changed;
+}
+
+const SkBitmap& SkMovie::bitmap()
+{
+ if (fCurrTime == UNINITIALIZED_MSEC) // uninitialized
+ this->setTime(0);
+
+ if (fNeedBitmap)
+ {
+ if (!this->onGetBitmap(&fBitmap)) // failure
+ fBitmap.reset();
+ fNeedBitmap = false;
+ }
+ return fBitmap;
+}
+
+////////////////////////////////////////////////////////////////////
+
+#include "SkStream.h"
+
+SkMovie* SkMovie::DecodeMemory(const void* data, size_t length) {
+ SkMemoryStream stream(data, length, false);
+ return SkMovie::DecodeStream(&stream);
+}
+
+SkMovie* SkMovie::DecodeFile(const char path[]) {
+ std::unique_ptr<SkStreamRewindable> stream = SkStream::MakeFromFile(path);
+ return stream ? SkMovie::DecodeStream(stream.get()) : nullptr;
+}
diff --git a/gfx/skia/skia/src/images/SkMovie_FactoryDefault.cpp b/gfx/skia/skia/src/images/SkMovie_FactoryDefault.cpp
new file mode 100644
index 000000000..84ae78a88
--- /dev/null
+++ b/gfx/skia/skia/src/images/SkMovie_FactoryDefault.cpp
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMovie.h"
+#include "SkStream.h"
+
+typedef SkTRegistry<SkMovie*(*)(SkStreamRewindable*)> MovieReg;
+
+SkMovie* SkMovie::DecodeStream(SkStreamRewindable* stream) {
+ const MovieReg* curr = MovieReg::Head();
+ while (curr) {
+ SkMovie* movie = curr->factory()(stream);
+ if (movie) {
+ return movie;
+ }
+ // we must rewind only if we got nullptr, since we gave the stream to the
+ // movie, who may have already started reading from it
+ stream->rewind();
+ curr = curr->next();
+ }
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/images/SkPNGImageEncoder.cpp b/gfx/skia/skia/src/images/SkPNGImageEncoder.cpp
new file mode 100644
index 000000000..69a53fb2a
--- /dev/null
+++ b/gfx/skia/skia/src/images/SkPNGImageEncoder.cpp
@@ -0,0 +1,362 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkImageEncoder.h"
+#include "SkColor.h"
+#include "SkColorPriv.h"
+#include "SkDither.h"
+#include "SkMath.h"
+#include "SkStream.h"
+#include "SkTemplates.h"
+#include "SkUtils.h"
+#include "transform_scanline.h"
+
+#include "png.h"
+
+/* These were dropped in libpng >= 1.4 */
+#ifndef png_infopp_NULL
+#define png_infopp_NULL nullptr
+#endif
+
+#ifndef png_bytepp_NULL
+#define png_bytepp_NULL nullptr
+#endif
+
+#ifndef int_p_NULL
+#define int_p_NULL nullptr
+#endif
+
+#ifndef png_flush_ptr_NULL
+#define png_flush_ptr_NULL nullptr
+#endif
+
+#define DEFAULT_FOR_SUPPRESS_PNG_IMAGE_DECODER_WARNINGS true
+// Suppress most PNG warnings when calling image decode functions.
+static const bool c_suppressPNGImageDecoderWarnings{
+ DEFAULT_FOR_SUPPRESS_PNG_IMAGE_DECODER_WARNINGS};
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkColorPriv.h"
+#include "SkUnPreMultiply.h"
+
+static void sk_error_fn(png_structp png_ptr, png_const_charp msg) {
+ if (!c_suppressPNGImageDecoderWarnings) {
+ SkDEBUGF(("------ png error %s\n", msg));
+ }
+ longjmp(png_jmpbuf(png_ptr), 1);
+}
+
+static void sk_write_fn(png_structp png_ptr, png_bytep data, png_size_t len) {
+ SkWStream* sk_stream = (SkWStream*)png_get_io_ptr(png_ptr);
+ if (!sk_stream->write(data, len)) {
+ png_error(png_ptr, "sk_write_fn Error!");
+ }
+}
+
+static transform_scanline_proc choose_proc(SkColorType ct, SkAlphaType alphaType) {
+ static const struct {
+ SkColorType fColorType;
+ SkAlphaType fAlphaType;
+ transform_scanline_proc fProc;
+ } gMap[] = {
+ { kRGB_565_SkColorType, kOpaque_SkAlphaType, transform_scanline_565 },
+ { kRGBA_8888_SkColorType, kOpaque_SkAlphaType, transform_scanline_RGBX },
+ { kBGRA_8888_SkColorType, kOpaque_SkAlphaType, transform_scanline_BGRX },
+ { kRGBA_8888_SkColorType, kPremul_SkAlphaType, transform_scanline_rgbA },
+ { kBGRA_8888_SkColorType, kPremul_SkAlphaType, transform_scanline_bgrA },
+ { kRGBA_8888_SkColorType, kUnpremul_SkAlphaType, transform_scanline_memcpy },
+ { kBGRA_8888_SkColorType, kUnpremul_SkAlphaType, transform_scanline_BGRA },
+ { kARGB_4444_SkColorType, kOpaque_SkAlphaType, transform_scanline_444 },
+ { kARGB_4444_SkColorType, kPremul_SkAlphaType, transform_scanline_4444 },
+ { kIndex_8_SkColorType, kOpaque_SkAlphaType, transform_scanline_memcpy },
+ { kIndex_8_SkColorType, kPremul_SkAlphaType, transform_scanline_memcpy },
+ { kIndex_8_SkColorType, kUnpremul_SkAlphaType, transform_scanline_memcpy },
+ { kGray_8_SkColorType, kOpaque_SkAlphaType, transform_scanline_memcpy },
+ };
+
+ for (auto entry : gMap) {
+ if (entry.fColorType == ct && entry.fAlphaType == alphaType) {
+ return entry.fProc;
+ }
+ }
+ sk_throw();
+ return nullptr;
+}
+
+// return the minimum legal bitdepth (by png standards) for this many colortable
+// entries. SkBitmap always stores in 8bits per pixel, but for colorcount <= 16,
+// we can use fewer bits per in png
+static int computeBitDepth(int colorCount) {
+#if 0
+ int bits = SkNextLog2(colorCount);
+ SkASSERT(bits >= 1 && bits <= 8);
+ // now we need bits itself to be a power of 2 (e.g. 1, 2, 4, 8)
+ return SkNextPow2(bits);
+#else
+ // for the moment, we don't know how to pack bitdepth < 8
+ return 8;
+#endif
+}
+
+/* Pack palette[] with the corresponding colors, and if the image has alpha, also
+ pack trans[] and return the number of alphas[] entries written. If the image is
+ opaque, the return value will always be 0.
+*/
+static inline int pack_palette(SkColorTable* ctable, png_color* SK_RESTRICT palette,
+ png_byte* SK_RESTRICT alphas, SkAlphaType alphaType) {
+ const SkPMColor* SK_RESTRICT colors = ctable->readColors();
+ const int count = ctable->count();
+ int numWithAlpha = 0;
+ if (kOpaque_SkAlphaType != alphaType) {
+ auto getUnpremulColor = [alphaType](uint8_t color, uint8_t alpha) {
+ if (kPremul_SkAlphaType == alphaType) {
+ const SkUnPreMultiply::Scale* table = SkUnPreMultiply::GetScaleTable();
+ const SkUnPreMultiply::Scale scale = table[alpha];
+ return (uint8_t) SkUnPreMultiply::ApplyScale(scale, color);
+ } else {
+ return color;
+ }
+ };
+
+ // PNG requires that all non-opaque colors come first in the palette. Write these first.
+ for (int i = 0; i < count; i++) {
+ uint8_t alpha = SkGetPackedA32(colors[i]);
+ if (0xFF != alpha) {
+ alphas[numWithAlpha] = alpha;
+ palette[numWithAlpha].red = getUnpremulColor(SkGetPackedR32(colors[i]), alpha);
+ palette[numWithAlpha].green = getUnpremulColor(SkGetPackedG32(colors[i]), alpha);
+ palette[numWithAlpha].blue = getUnpremulColor(SkGetPackedB32(colors[i]), alpha);
+ numWithAlpha++;
+ }
+ }
+
+ }
+
+ if (0 == numWithAlpha) {
+ // All of the entries are opaque.
+ for (int i = 0; i < count; i++) {
+ SkPMColor c = *colors++;
+ palette[i].red = SkGetPackedR32(c);
+ palette[i].green = SkGetPackedG32(c);
+ palette[i].blue = SkGetPackedB32(c);
+ }
+ } else {
+ // We have already written the non-opaque colors. Now just write the opaque colors.
+ int currIndex = numWithAlpha;
+ int i = 0;
+ while (currIndex != count) {
+ uint8_t alpha = SkGetPackedA32(colors[i]);
+ if (0xFF == alpha) {
+ palette[currIndex].red = SkGetPackedR32(colors[i]);
+ palette[currIndex].green = SkGetPackedG32(colors[i]);
+ palette[currIndex].blue = SkGetPackedB32(colors[i]);
+ currIndex++;
+ }
+
+ i++;
+ }
+ }
+
+ return numWithAlpha;
+}
+
+class SkPNGImageEncoder : public SkImageEncoder {
+protected:
+ bool onEncode(SkWStream* stream, const SkBitmap& bm, int quality) override;
+private:
+ bool doEncode(SkWStream* stream, const SkBitmap& bm,
+ SkAlphaType alphaType, int colorType,
+ int bitDepth, SkColorType ct,
+ png_color_8& sig_bit);
+
+ typedef SkImageEncoder INHERITED;
+};
+
+bool SkPNGImageEncoder::onEncode(SkWStream* stream,
+ const SkBitmap& bitmap,
+ int /*quality*/) {
+ const SkColorType ct = bitmap.colorType();
+ switch (ct) {
+ case kIndex_8_SkColorType:
+ case kGray_8_SkColorType:
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ case kARGB_4444_SkColorType:
+ case kRGB_565_SkColorType:
+ break;
+ default:
+ return false;
+ }
+
+ const SkAlphaType alphaType = bitmap.alphaType();
+ switch (alphaType) {
+ case kUnpremul_SkAlphaType:
+ if (kARGB_4444_SkColorType == ct) {
+ return false;
+ }
+
+ break;
+ case kOpaque_SkAlphaType:
+ case kPremul_SkAlphaType:
+ break;
+ default:
+ return false;
+ }
+
+ const bool isOpaque = (kOpaque_SkAlphaType == alphaType);
+ int bitDepth = 8; // default for color
+ png_color_8 sig_bit;
+ sk_bzero(&sig_bit, sizeof(png_color_8));
+
+ int colorType;
+ switch (ct) {
+ case kIndex_8_SkColorType:
+ sig_bit.red = 8;
+ sig_bit.green = 8;
+ sig_bit.blue = 8;
+ sig_bit.alpha = 8;
+ colorType = PNG_COLOR_TYPE_PALETTE;
+ break;
+ case kGray_8_SkColorType:
+ sig_bit.gray = 8;
+ colorType = PNG_COLOR_TYPE_GRAY;
+ SkASSERT(isOpaque);
+ break;
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ sig_bit.red = 8;
+ sig_bit.green = 8;
+ sig_bit.blue = 8;
+ sig_bit.alpha = 8;
+ colorType = isOpaque ? PNG_COLOR_TYPE_RGB : PNG_COLOR_TYPE_RGB_ALPHA;
+ break;
+ case kARGB_4444_SkColorType:
+ sig_bit.red = 4;
+ sig_bit.green = 4;
+ sig_bit.blue = 4;
+ sig_bit.alpha = 4;
+ colorType = isOpaque ? PNG_COLOR_TYPE_RGB : PNG_COLOR_TYPE_RGB_ALPHA;
+ break;
+ case kRGB_565_SkColorType:
+ sig_bit.red = 5;
+ sig_bit.green = 6;
+ sig_bit.blue = 5;
+ colorType = PNG_COLOR_TYPE_RGB;
+ SkASSERT(isOpaque);
+ break;
+ default:
+ return false;
+ }
+
+ SkAutoLockPixels alp(bitmap);
+ // readyToDraw checks for pixels (and colortable if that is required)
+ if (!bitmap.readyToDraw()) {
+ return false;
+ }
+
+ // we must do this after we have locked the pixels
+ SkColorTable* ctable = bitmap.getColorTable();
+ if (ctable) {
+ if (ctable->count() == 0) {
+ return false;
+ }
+ // check if we can store in fewer than 8 bits
+ bitDepth = computeBitDepth(ctable->count());
+ }
+
+ return doEncode(stream, bitmap, alphaType, colorType, bitDepth, ct, sig_bit);
+}
+
+bool SkPNGImageEncoder::doEncode(SkWStream* stream, const SkBitmap& bitmap,
+ SkAlphaType alphaType, int colorType,
+ int bitDepth, SkColorType ct,
+ png_color_8& sig_bit) {
+
+ png_structp png_ptr;
+ png_infop info_ptr;
+
+ png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, nullptr, sk_error_fn,
+ nullptr);
+ if (nullptr == png_ptr) {
+ return false;
+ }
+
+ info_ptr = png_create_info_struct(png_ptr);
+ if (nullptr == info_ptr) {
+ png_destroy_write_struct(&png_ptr, png_infopp_NULL);
+ return false;
+ }
+
+ /* Set error handling. REQUIRED if you aren't supplying your own
+ * error handling functions in the png_create_write_struct() call.
+ */
+ if (setjmp(png_jmpbuf(png_ptr))) {
+ png_destroy_write_struct(&png_ptr, &info_ptr);
+ return false;
+ }
+
+ png_set_write_fn(png_ptr, (void*)stream, sk_write_fn, png_flush_ptr_NULL);
+
+ /* Set the image information here. Width and height are up to 2^31,
+ * bit_depth is one of 1, 2, 4, 8, or 16, but valid values also depend on
+ * the color_type selected. color_type is one of PNG_COLOR_TYPE_GRAY,
+ * PNG_COLOR_TYPE_GRAY_ALPHA, PNG_COLOR_TYPE_PALETTE, PNG_COLOR_TYPE_RGB,
+ * or PNG_COLOR_TYPE_RGB_ALPHA. interlace is either PNG_INTERLACE_NONE or
+ * PNG_INTERLACE_ADAM7, and the compression_type and filter_type MUST
+ * currently be PNG_COMPRESSION_TYPE_BASE and PNG_FILTER_TYPE_BASE. REQUIRED
+ */
+
+ png_set_IHDR(png_ptr, info_ptr, bitmap.width(), bitmap.height(),
+ bitDepth, colorType,
+ PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_BASE,
+ PNG_FILTER_TYPE_BASE);
+
+ // set our colortable/trans arrays if needed
+ png_color paletteColors[256];
+ png_byte trans[256];
+ if (kIndex_8_SkColorType == ct) {
+ SkColorTable* colorTable = bitmap.getColorTable();
+ SkASSERT(colorTable);
+ int numTrans = pack_palette(colorTable, paletteColors, trans, alphaType);
+ png_set_PLTE(png_ptr, info_ptr, paletteColors, colorTable->count());
+ if (numTrans > 0) {
+ png_set_tRNS(png_ptr, info_ptr, trans, numTrans, nullptr);
+ }
+ }
+
+ png_set_sBIT(png_ptr, info_ptr, &sig_bit);
+ png_write_info(png_ptr, info_ptr);
+
+ const char* srcImage = (const char*)bitmap.getPixels();
+ SkAutoSTMalloc<1024, char> rowStorage(bitmap.width() << 2);
+ char* storage = rowStorage.get();
+ transform_scanline_proc proc = choose_proc(ct, alphaType);
+
+ for (int y = 0; y < bitmap.height(); y++) {
+ png_bytep row_ptr = (png_bytep)storage;
+ proc(storage, srcImage, bitmap.width(), SkColorTypeBytesPerPixel(ct));
+ png_write_rows(png_ptr, &row_ptr, 1);
+ srcImage += bitmap.rowBytes();
+ }
+
+ png_write_end(png_ptr, info_ptr);
+
+ /* clean up after the write, and free any memory allocated */
+ png_destroy_write_struct(&png_ptr, &info_ptr);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+DEFINE_ENCODER_CREATOR(PNGImageEncoder);
+///////////////////////////////////////////////////////////////////////////////
+
+SkImageEncoder* sk_libpng_efactory(SkImageEncoder::Type t) {
+ return (SkImageEncoder::kPNG_Type == t) ? new SkPNGImageEncoder : nullptr;
+}
+
+static SkImageEncoder_EncodeReg gEReg(sk_libpng_efactory);
diff --git a/gfx/skia/skia/src/images/SkWEBPImageEncoder.cpp b/gfx/skia/skia/src/images/SkWEBPImageEncoder.cpp
new file mode 100644
index 000000000..116608a25
--- /dev/null
+++ b/gfx/skia/skia/src/images/SkWEBPImageEncoder.cpp
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2010, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SkBitmap.h"
+#include "SkImageEncoder.h"
+#include "SkColorPriv.h"
+#include "SkStream.h"
+#include "SkTemplates.h"
+#include "SkUtils.h"
+
+// A WebP decoder only, on top of (subset of) libwebp
+// For more information on WebP image format, and libwebp library, see:
+// http://code.google.com/speed/webp/
+// http://www.webmproject.org/code/#libwebp_webp_image_decoder_library
+// http://review.webmproject.org/gitweb?p=libwebp.git
+
+#include <stdio.h>
+extern "C" {
+// If moving libwebp out of skia source tree, path for webp headers must be
+// updated accordingly. Here, we enforce using local copy in webp sub-directory.
+#include "webp/encode.h"
+}
+
+#include "SkUnPreMultiply.h"
+
+typedef void (*ScanlineImporter)(const uint8_t* in, uint8_t* out, int width,
+ const SkPMColor* SK_RESTRICT ctable);
+
+static void ARGB_8888_To_RGB(const uint8_t* in, uint8_t* rgb, int width,
+ const SkPMColor*) {
+ const uint32_t* SK_RESTRICT src = (const uint32_t*)in;
+ for (int i = 0; i < width; ++i) {
+ const uint32_t c = *src++;
+ rgb[0] = SkGetPackedR32(c);
+ rgb[1] = SkGetPackedG32(c);
+ rgb[2] = SkGetPackedB32(c);
+ rgb += 3;
+ }
+}
+
+static void ARGB_8888_To_RGBA(const uint8_t* in, uint8_t* rgb, int width,
+ const SkPMColor*) {
+ const uint32_t* SK_RESTRICT src = (const uint32_t*)in;
+ const SkUnPreMultiply::Scale* SK_RESTRICT table =
+ SkUnPreMultiply::GetScaleTable();
+ for (int i = 0; i < width; ++i) {
+ const uint32_t c = *src++;
+ uint8_t a = SkGetPackedA32(c);
+ uint8_t r = SkGetPackedR32(c);
+ uint8_t g = SkGetPackedG32(c);
+ uint8_t b = SkGetPackedB32(c);
+ if (0 != a && 255 != a) {
+ SkUnPreMultiply::Scale scale = table[a];
+ r = SkUnPreMultiply::ApplyScale(scale, r);
+ g = SkUnPreMultiply::ApplyScale(scale, g);
+ b = SkUnPreMultiply::ApplyScale(scale, b);
+ }
+ rgb[0] = r;
+ rgb[1] = g;
+ rgb[2] = b;
+ rgb[3] = a;
+ rgb += 4;
+ }
+}
+
+static void RGB_565_To_RGB(const uint8_t* in, uint8_t* rgb, int width,
+ const SkPMColor*) {
+ const uint16_t* SK_RESTRICT src = (const uint16_t*)in;
+ for (int i = 0; i < width; ++i) {
+ const uint16_t c = *src++;
+ rgb[0] = SkPacked16ToR32(c);
+ rgb[1] = SkPacked16ToG32(c);
+ rgb[2] = SkPacked16ToB32(c);
+ rgb += 3;
+ }
+}
+
+static void ARGB_4444_To_RGB(const uint8_t* in, uint8_t* rgb, int width,
+ const SkPMColor*) {
+ const SkPMColor16* SK_RESTRICT src = (const SkPMColor16*)in;
+ for (int i = 0; i < width; ++i) {
+ const SkPMColor16 c = *src++;
+ rgb[0] = SkPacked4444ToR32(c);
+ rgb[1] = SkPacked4444ToG32(c);
+ rgb[2] = SkPacked4444ToB32(c);
+ rgb += 3;
+ }
+}
+
+static void ARGB_4444_To_RGBA(const uint8_t* in, uint8_t* rgb, int width,
+ const SkPMColor*) {
+ const SkPMColor16* SK_RESTRICT src = (const SkPMColor16*)in;
+ const SkUnPreMultiply::Scale* SK_RESTRICT table =
+ SkUnPreMultiply::GetScaleTable();
+ for (int i = 0; i < width; ++i) {
+ const SkPMColor16 c = *src++;
+ uint8_t a = SkPacked4444ToA32(c);
+ uint8_t r = SkPacked4444ToR32(c);
+ uint8_t g = SkPacked4444ToG32(c);
+ uint8_t b = SkPacked4444ToB32(c);
+ if (0 != a && 255 != a) {
+ SkUnPreMultiply::Scale scale = table[a];
+ r = SkUnPreMultiply::ApplyScale(scale, r);
+ g = SkUnPreMultiply::ApplyScale(scale, g);
+ b = SkUnPreMultiply::ApplyScale(scale, b);
+ }
+ rgb[0] = r;
+ rgb[1] = g;
+ rgb[2] = b;
+ rgb[3] = a;
+ rgb += 4;
+ }
+}
+
+static void Index8_To_RGB(const uint8_t* in, uint8_t* rgb, int width,
+ const SkPMColor* SK_RESTRICT ctable) {
+ const uint8_t* SK_RESTRICT src = (const uint8_t*)in;
+ for (int i = 0; i < width; ++i) {
+ const uint32_t c = ctable[*src++];
+ rgb[0] = SkGetPackedR32(c);
+ rgb[1] = SkGetPackedG32(c);
+ rgb[2] = SkGetPackedB32(c);
+ rgb += 3;
+ }
+}
+
+static ScanlineImporter ChooseImporter(SkColorType ct, bool hasAlpha, int* bpp) {
+ switch (ct) {
+ case kN32_SkColorType:
+ if (hasAlpha) {
+ *bpp = 4;
+ return ARGB_8888_To_RGBA;
+ } else {
+ *bpp = 3;
+ return ARGB_8888_To_RGB;
+ }
+ case kARGB_4444_SkColorType:
+ if (hasAlpha) {
+ *bpp = 4;
+ return ARGB_4444_To_RGBA;
+ } else {
+ *bpp = 3;
+ return ARGB_4444_To_RGB;
+ }
+ case kRGB_565_SkColorType:
+ *bpp = 3;
+ return RGB_565_To_RGB;
+ case kIndex_8_SkColorType:
+ *bpp = 3;
+ return Index8_To_RGB;
+ default:
+ return nullptr;
+ }
+}
+
+static int stream_writer(const uint8_t* data, size_t data_size,
+ const WebPPicture* const picture) {
+ SkWStream* const stream = (SkWStream*)picture->custom_ptr;
+ return stream->write(data, data_size) ? 1 : 0;
+}
+
+class SkWEBPImageEncoder : public SkImageEncoder {
+protected:
+ bool onEncode(SkWStream* stream, const SkBitmap& bm, int quality) override;
+
+private:
+ typedef SkImageEncoder INHERITED;
+};
+
+bool SkWEBPImageEncoder::onEncode(SkWStream* stream, const SkBitmap& bm,
+ int quality) {
+ const bool hasAlpha = !bm.isOpaque();
+ int bpp = -1;
+ const ScanlineImporter scanline_import = ChooseImporter(bm.colorType(), hasAlpha, &bpp);
+ if (nullptr == scanline_import) {
+ return false;
+ }
+ if (-1 == bpp) {
+ return false;
+ }
+
+ SkAutoLockPixels alp(bm);
+ if (nullptr == bm.getPixels()) {
+ return false;
+ }
+
+ WebPConfig webp_config;
+ if (!WebPConfigPreset(&webp_config, WEBP_PRESET_DEFAULT, (float) quality)) {
+ return false;
+ }
+
+ WebPPicture pic;
+ WebPPictureInit(&pic);
+ pic.width = bm.width();
+ pic.height = bm.height();
+ pic.writer = stream_writer;
+ pic.custom_ptr = (void*)stream;
+
+ const SkPMColor* colors = bm.getColorTable() ? bm.getColorTable()->readColors() : nullptr;
+ const uint8_t* src = (uint8_t*)bm.getPixels();
+ const int rgbStride = pic.width * bpp;
+
+ // Import (for each scanline) the bit-map image (in appropriate color-space)
+ // to RGB color space.
+ uint8_t* rgb = new uint8_t[rgbStride * pic.height];
+ for (int y = 0; y < pic.height; ++y) {
+ scanline_import(src + y * bm.rowBytes(), rgb + y * rgbStride,
+ pic.width, colors);
+ }
+
+ bool ok;
+ if (bpp == 3) {
+ ok = SkToBool(WebPPictureImportRGB(&pic, rgb, rgbStride));
+ } else {
+ ok = SkToBool(WebPPictureImportRGBA(&pic, rgb, rgbStride));
+ }
+ delete[] rgb;
+
+ ok = ok && WebPEncode(&webp_config, &pic);
+ WebPPictureFree(&pic);
+
+ return ok;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+DEFINE_ENCODER_CREATOR(WEBPImageEncoder);
+///////////////////////////////////////////////////////////////////////////////
+
+static SkImageEncoder* sk_libwebp_efactory(SkImageEncoder::Type t) {
+ return (SkImageEncoder::kWEBP_Type == t) ? new SkWEBPImageEncoder : nullptr;
+}
+
+static SkImageEncoder_EncodeReg gEReg(sk_libwebp_efactory);
diff --git a/gfx/skia/skia/src/images/transform_scanline.h b/gfx/skia/skia/src/images/transform_scanline.h
new file mode 100644
index 000000000..a02e98ab0
--- /dev/null
+++ b/gfx/skia/skia/src/images/transform_scanline.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**
+ * Functions to transform scanlines between packed-pixel formats.
+ */
+
+#include "SkBitmap.h"
+#include "SkColor.h"
+#include "SkColorPriv.h"
+#include "SkPreConfig.h"
+#include "SkUnPreMultiply.h"
+
+/**
+ * Function template for transforming scanlines.
+ * Transform 'width' pixels from 'src' buffer into 'dst' buffer,
+ * repacking color channel data as appropriate for the given transformation.
+ * 'bpp' is bytes per pixel in the 'src' buffer.
+ */
+typedef void (*transform_scanline_proc)(char* SK_RESTRICT dst, const char* SK_RESTRICT src,
+ int width, int bpp);
+
+/**
+ * Identity transformation: just copy bytes from src to dst.
+ */
+static void transform_scanline_memcpy(char* SK_RESTRICT dst, const char* SK_RESTRICT src,
+ int width, int bpp) {
+ memcpy(dst, src, width * bpp);
+}
+
+/**
+ * Transform from kRGB_565_Config to 3-bytes-per-pixel RGB.
+ * Alpha channel data is not present in kRGB_565_Config format, so there is no
+ * alpha channel data to preserve.
+ */
+static void transform_scanline_565(char* SK_RESTRICT dst, const char* SK_RESTRICT src,
+ int width, int) {
+ const uint16_t* srcP = (const uint16_t*)src;
+ for (int i = 0; i < width; i++) {
+ unsigned c = *srcP++;
+ *dst++ = SkPacked16ToR32(c);
+ *dst++ = SkPacked16ToG32(c);
+ *dst++ = SkPacked16ToB32(c);
+ }
+}
+
+/**
+ * Transform from kRGBA_8888_SkColorType to 3-bytes-per-pixel RGB.
+ * Alpha channel data is abandoned.
+ */
+static void transform_scanline_RGBX(char* SK_RESTRICT dst, const char* SK_RESTRICT src,
+ int width, int) {
+ const uint32_t* srcP = (const SkPMColor*)src;
+ for (int i = 0; i < width; i++) {
+ uint32_t c = *srcP++;
+ *dst++ = (c >> 0) & 0xFF;
+ *dst++ = (c >> 8) & 0xFF;
+ *dst++ = (c >> 16) & 0xFF;
+ }
+}
+
+/**
+ * Transform from kBGRA_8888_SkColorType to 3-bytes-per-pixel RGB.
+ * Alpha channel data is abandoned.
+ */
+static void transform_scanline_BGRX(char* SK_RESTRICT dst, const char* SK_RESTRICT src,
+ int width, int) {
+ const uint32_t* srcP = (const SkPMColor*)src;
+ for (int i = 0; i < width; i++) {
+ uint32_t c = *srcP++;
+ *dst++ = (c >> 16) & 0xFF;
+ *dst++ = (c >> 8) & 0xFF;
+ *dst++ = (c >> 0) & 0xFF;
+ }
+}
+
+/**
+ * Transform from kARGB_4444_Config to 3-bytes-per-pixel RGB.
+ * Alpha channel data, if any, is abandoned.
+ */
+static void transform_scanline_444(char* SK_RESTRICT dst, const char* SK_RESTRICT src,
+ int width, int) {
+ const SkPMColor16* srcP = (const SkPMColor16*)src;
+ for (int i = 0; i < width; i++) {
+ SkPMColor16 c = *srcP++;
+ *dst++ = SkPacked4444ToR32(c);
+ *dst++ = SkPacked4444ToG32(c);
+ *dst++ = SkPacked4444ToB32(c);
+ }
+}
+
+template <bool kIsRGBA>
+static inline void transform_scanline_unpremultiply(char* SK_RESTRICT dst,
+ const char* SK_RESTRICT src, int width, int) {
+ const uint32_t* srcP = (const SkPMColor*)src;
+ const SkUnPreMultiply::Scale* table = SkUnPreMultiply::GetScaleTable();
+
+ for (int i = 0; i < width; i++) {
+ uint32_t c = *srcP++;
+ unsigned r, g, b, a;
+ if (kIsRGBA) {
+ r = (c >> 0) & 0xFF;
+ g = (c >> 8) & 0xFF;
+ b = (c >> 16) & 0xFF;
+ a = (c >> 24) & 0xFF;
+ } else {
+ r = (c >> 16) & 0xFF;
+ g = (c >> 8) & 0xFF;
+ b = (c >> 0) & 0xFF;
+ a = (c >> 24) & 0xFF;
+ }
+
+ if (0 != a && 255 != a) {
+ SkUnPreMultiply::Scale scale = table[a];
+ r = SkUnPreMultiply::ApplyScale(scale, r);
+ g = SkUnPreMultiply::ApplyScale(scale, g);
+ b = SkUnPreMultiply::ApplyScale(scale, b);
+ }
+ *dst++ = r;
+ *dst++ = g;
+ *dst++ = b;
+ *dst++ = a;
+ }
+}
+
+/**
+ * Transform from kPremul, kRGBA_8888_SkColorType to 4-bytes-per-pixel unpremultiplied RGBA.
+ */
+static void transform_scanline_rgbA(char* SK_RESTRICT dst, const char* SK_RESTRICT src,
+ int width, int bpp) {
+ transform_scanline_unpremultiply<true>(dst, src, width, bpp);
+}
+
+/**
+ * Transform from kPremul, kBGRA_8888_SkColorType to 4-bytes-per-pixel unpremultiplied RGBA.
+ */
+static void transform_scanline_bgrA(char* SK_RESTRICT dst, const char* SK_RESTRICT src,
+ int width, int bpp) {
+ transform_scanline_unpremultiply<false>(dst, src, width, bpp);
+}
+
+/**
+ * Transform from kUnpremul, kBGRA_8888_SkColorType to 4-bytes-per-pixel unpremultiplied RGBA.
+ */
+static void transform_scanline_BGRA(char* SK_RESTRICT dst, const char* SK_RESTRICT src,
+ int width, int) {
+ const uint32_t* srcP = (const SkPMColor*)src;
+ for (int i = 0; i < width; i++) {
+ uint32_t c = *srcP++;
+ *dst++ = (c >> 16) & 0xFF;
+ *dst++ = (c >> 8) & 0xFF;
+ *dst++ = (c >> 0) & 0xFF;
+ *dst++ = (c >> 24) & 0xFF;
+ }
+}
+
+/**
+ * Transform from kARGB_8888_Config to 4-bytes-per-pixel RGBA,
+ * with scaling of RGB based on alpha channel.
+ */
+static void transform_scanline_4444(char* SK_RESTRICT dst, const char* SK_RESTRICT src,
+ int width, int) {
+ const SkPMColor16* srcP = (const SkPMColor16*)src;
+ const SkUnPreMultiply::Scale* table = SkUnPreMultiply::GetScaleTable();
+
+ for (int i = 0; i < width; i++) {
+ SkPMColor16 c = *srcP++;
+ unsigned a = SkPacked4444ToA32(c);
+ unsigned r = SkPacked4444ToR32(c);
+ unsigned g = SkPacked4444ToG32(c);
+ unsigned b = SkPacked4444ToB32(c);
+
+ if (0 != a && 255 != a) {
+ SkUnPreMultiply::Scale scale = table[a];
+ r = SkUnPreMultiply::ApplyScale(scale, r);
+ g = SkUnPreMultiply::ApplyScale(scale, g);
+ b = SkUnPreMultiply::ApplyScale(scale, b);
+ }
+ *dst++ = r;
+ *dst++ = g;
+ *dst++ = b;
+ *dst++ = a;
+ }
+}
diff --git a/gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.cpp b/gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.cpp
new file mode 100644
index 000000000..6ced5bfc0
--- /dev/null
+++ b/gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.cpp
@@ -0,0 +1,258 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkDiscardableMemory.h"
+#include "SkDiscardableMemoryPool.h"
+#include "SkImageGenerator.h"
+#include "SkMutex.h"
+#include "SkOnce.h"
+#include "SkTInternalLList.h"
+
+// Note:
+// A PoolDiscardableMemory is memory that is counted in a pool.
+// A DiscardableMemoryPool is a pool of PoolDiscardableMemorys.
+
+namespace {
+
+class PoolDiscardableMemory;
+
+/**
+ * This non-global pool can be used for unit tests to verify that the
+ * pool works.
+ */
+class DiscardableMemoryPool : public SkDiscardableMemoryPool {
+public:
+ /**
+ * Without mutex, will be not be thread safe.
+ */
+ DiscardableMemoryPool(size_t budget, SkBaseMutex* mutex = nullptr);
+ virtual ~DiscardableMemoryPool();
+
+ SkDiscardableMemory* create(size_t bytes) override;
+
+ size_t getRAMUsed() override;
+ void setRAMBudget(size_t budget) override;
+ size_t getRAMBudget() override { return fBudget; }
+
+ /** purges all unlocked DMs */
+ void dumpPool() override;
+
+ #if SK_LAZY_CACHE_STATS // Defined in SkDiscardableMemoryPool.h
+ int getCacheHits() override { return fCacheHits; }
+ int getCacheMisses() override { return fCacheMisses; }
+ void resetCacheHitsAndMisses() override {
+ fCacheHits = fCacheMisses = 0;
+ }
+ int fCacheHits;
+ int fCacheMisses;
+ #endif // SK_LAZY_CACHE_STATS
+
+private:
+ SkBaseMutex* fMutex;
+ size_t fBudget;
+ size_t fUsed;
+ SkTInternalLList<PoolDiscardableMemory> fList;
+
+ /** Function called to free memory if needed */
+ void dumpDownTo(size_t budget);
+ /** called by DiscardableMemoryPool upon destruction */
+ void free(PoolDiscardableMemory* dm);
+ /** called by DiscardableMemoryPool::lock() */
+ bool lock(PoolDiscardableMemory* dm);
+ /** called by DiscardableMemoryPool::unlock() */
+ void unlock(PoolDiscardableMemory* dm);
+
+ friend class PoolDiscardableMemory;
+
+ typedef SkDiscardableMemory::Factory INHERITED;
+};
+
+/**
+ * A PoolDiscardableMemory is a SkDiscardableMemory that relies on
+ * a DiscardableMemoryPool object to manage the memory.
+ */
+class PoolDiscardableMemory : public SkDiscardableMemory {
+public:
+ PoolDiscardableMemory(DiscardableMemoryPool* pool,
+ void* pointer, size_t bytes);
+ virtual ~PoolDiscardableMemory();
+ bool lock() override;
+ void* data() override;
+ void unlock() override;
+ friend class DiscardableMemoryPool;
+private:
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(PoolDiscardableMemory);
+ DiscardableMemoryPool* const fPool;
+ bool fLocked;
+ void* fPointer;
+ const size_t fBytes;
+};
+
+PoolDiscardableMemory::PoolDiscardableMemory(DiscardableMemoryPool* pool,
+ void* pointer,
+ size_t bytes)
+ : fPool(pool)
+ , fLocked(true)
+ , fPointer(pointer)
+ , fBytes(bytes) {
+ SkASSERT(fPool != nullptr);
+ SkASSERT(fPointer != nullptr);
+ SkASSERT(fBytes > 0);
+ fPool->ref();
+}
+
+PoolDiscardableMemory::~PoolDiscardableMemory() {
+ SkASSERT(!fLocked); // contract for SkDiscardableMemory
+ fPool->free(this);
+ fPool->unref();
+}
+
+bool PoolDiscardableMemory::lock() {
+ SkASSERT(!fLocked); // contract for SkDiscardableMemory
+ return fPool->lock(this);
+}
+
+void* PoolDiscardableMemory::data() {
+ SkASSERT(fLocked); // contract for SkDiscardableMemory
+ return fPointer;
+}
+
+void PoolDiscardableMemory::unlock() {
+ SkASSERT(fLocked); // contract for SkDiscardableMemory
+ fPool->unlock(this);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+DiscardableMemoryPool::DiscardableMemoryPool(size_t budget,
+ SkBaseMutex* mutex)
+ : fMutex(mutex)
+ , fBudget(budget)
+ , fUsed(0) {
+ #if SK_LAZY_CACHE_STATS
+ fCacheHits = 0;
+ fCacheMisses = 0;
+ #endif // SK_LAZY_CACHE_STATS
+}
+DiscardableMemoryPool::~DiscardableMemoryPool() {
+ // PoolDiscardableMemory objects that belong to this pool are
+ // always deleted before deleting this pool since each one has a
+ // ref to the pool.
+ SkASSERT(fList.isEmpty());
+}
+
+void DiscardableMemoryPool::dumpDownTo(size_t budget) {
+ if (fMutex != nullptr) {
+ fMutex->assertHeld();
+ }
+ if (fUsed <= budget) {
+ return;
+ }
+ typedef SkTInternalLList<PoolDiscardableMemory>::Iter Iter;
+ Iter iter;
+ PoolDiscardableMemory* cur = iter.init(fList, Iter::kTail_IterStart);
+ while ((fUsed > budget) && (cur)) {
+ if (!cur->fLocked) {
+ PoolDiscardableMemory* dm = cur;
+ SkASSERT(dm->fPointer != nullptr);
+ sk_free(dm->fPointer);
+ dm->fPointer = nullptr;
+ SkASSERT(fUsed >= dm->fBytes);
+ fUsed -= dm->fBytes;
+ cur = iter.prev();
+ // Purged DMs are taken out of the list. This saves times
+ // looking them up. Purged DMs are NOT deleted.
+ fList.remove(dm);
+ } else {
+ cur = iter.prev();
+ }
+ }
+}
+
+SkDiscardableMemory* DiscardableMemoryPool::create(size_t bytes) {
+ void* addr = sk_malloc_flags(bytes, 0);
+ if (nullptr == addr) {
+ return nullptr;
+ }
+ PoolDiscardableMemory* dm = new PoolDiscardableMemory(this, addr, bytes);
+ SkAutoMutexAcquire autoMutexAcquire(fMutex);
+ fList.addToHead(dm);
+ fUsed += bytes;
+ this->dumpDownTo(fBudget);
+ return dm;
+}
+
+void DiscardableMemoryPool::free(PoolDiscardableMemory* dm) {
+ SkAutoMutexAcquire autoMutexAcquire(fMutex);
+ // This is called by dm's destructor.
+ if (dm->fPointer != nullptr) {
+ sk_free(dm->fPointer);
+ dm->fPointer = nullptr;
+ SkASSERT(fUsed >= dm->fBytes);
+ fUsed -= dm->fBytes;
+ fList.remove(dm);
+ } else {
+ SkASSERT(!fList.isInList(dm));
+ }
+}
+
+bool DiscardableMemoryPool::lock(PoolDiscardableMemory* dm) {
+ SkASSERT(dm != nullptr);
+ SkAutoMutexAcquire autoMutexAcquire(fMutex);
+ if (nullptr == dm->fPointer) {
+ // May have been purged while waiting for lock.
+ #if SK_LAZY_CACHE_STATS
+ ++fCacheMisses;
+ #endif // SK_LAZY_CACHE_STATS
+ return false;
+ }
+ dm->fLocked = true;
+ fList.remove(dm);
+ fList.addToHead(dm);
+ #if SK_LAZY_CACHE_STATS
+ ++fCacheHits;
+ #endif // SK_LAZY_CACHE_STATS
+ return true;
+}
+
+void DiscardableMemoryPool::unlock(PoolDiscardableMemory* dm) {
+ SkASSERT(dm != nullptr);
+ SkAutoMutexAcquire autoMutexAcquire(fMutex);
+ dm->fLocked = false;
+ this->dumpDownTo(fBudget);
+}
+
+size_t DiscardableMemoryPool::getRAMUsed() {
+ return fUsed;
+}
+void DiscardableMemoryPool::setRAMBudget(size_t budget) {
+ SkAutoMutexAcquire autoMutexAcquire(fMutex);
+ fBudget = budget;
+ this->dumpDownTo(fBudget);
+}
+void DiscardableMemoryPool::dumpPool() {
+ SkAutoMutexAcquire autoMutexAcquire(fMutex);
+ this->dumpDownTo(0);
+}
+
+} // namespace
+
+SkDiscardableMemoryPool* SkDiscardableMemoryPool::Create(size_t size, SkBaseMutex* mutex) {
+ return new DiscardableMemoryPool(size, mutex);
+}
+
+SK_DECLARE_STATIC_MUTEX(gMutex);
+
+SkDiscardableMemoryPool* SkGetGlobalDiscardableMemoryPool() {
+ static SkOnce once;
+ static SkDiscardableMemoryPool* global;
+ once([]{
+ global = SkDiscardableMemoryPool::Create(SK_DEFAULT_GLOBAL_DISCARDABLE_MEMORY_POOL_SIZE,
+ &gMutex);
+ });
+ return global;
+}
diff --git a/gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.h b/gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.h
new file mode 100644
index 000000000..92ba48bcb
--- /dev/null
+++ b/gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDiscardableMemoryPool_DEFINED
+#define SkDiscardableMemoryPool_DEFINED
+
+#include "SkDiscardableMemory.h"
+#include "SkMutex.h"
+
+#ifndef SK_LAZY_CACHE_STATS
+ #ifdef SK_DEBUG
+ #define SK_LAZY_CACHE_STATS 1
+ #else
+ #define SK_LAZY_CACHE_STATS 0
+ #endif
+#endif
+
+/**
+ * An implementation of Discardable Memory that manages a fixed-size
+ * budget of memory. When the allocated memory exceeds this size,
+ * unlocked blocks of memory are purged. If all memory is locked, it
+ * can exceed the memory-use budget.
+ */
+class SkDiscardableMemoryPool : public SkDiscardableMemory::Factory {
+public:
+ virtual ~SkDiscardableMemoryPool() { }
+
+ virtual size_t getRAMUsed() = 0;
+ virtual void setRAMBudget(size_t budget) = 0;
+ virtual size_t getRAMBudget() = 0;
+
+ /** purges all unlocked DMs */
+ virtual void dumpPool() = 0;
+
+ #if SK_LAZY_CACHE_STATS
+ /**
+ * These two values are a count of the number of successful and
+ * failed calls to SkDiscardableMemory::lock() for all DMs managed
+ * by this pool.
+ */
+ virtual int getCacheHits() = 0;
+ virtual int getCacheMisses() = 0;
+ virtual void resetCacheHitsAndMisses() = 0;
+ #endif
+
+ /**
+ * This non-global pool can be used for unit tests to verify that
+ * the pool works.
+ * Without mutex, will be not be thread safe.
+ */
+ static SkDiscardableMemoryPool* Create(
+ size_t size, SkBaseMutex* mutex = nullptr);
+};
+
+/**
+ * Returns (and creates if needed) a threadsafe global
+ * SkDiscardableMemoryPool.
+ */
+SkDiscardableMemoryPool* SkGetGlobalDiscardableMemoryPool();
+
+#if !defined(SK_DEFAULT_GLOBAL_DISCARDABLE_MEMORY_POOL_SIZE)
+#define SK_DEFAULT_GLOBAL_DISCARDABLE_MEMORY_POOL_SIZE (128 * 1024 * 1024)
+#endif
+
+#endif // SkDiscardableMemoryPool_DEFINED
diff --git a/gfx/skia/skia/src/lazy/SkDiscardablePixelRef.cpp b/gfx/skia/skia/src/lazy/SkDiscardablePixelRef.cpp
new file mode 100644
index 000000000..ba4a50ff8
--- /dev/null
+++ b/gfx/skia/skia/src/lazy/SkDiscardablePixelRef.cpp
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkDiscardablePixelRef.h"
+#include "SkDiscardableMemory.h"
+#include "SkImageGenerator.h"
+
+SkDiscardablePixelRef::SkDiscardablePixelRef(const SkImageInfo& info,
+ SkImageGenerator* generator,
+ size_t rowBytes,
+ SkDiscardableMemory::Factory* fact)
+ : INHERITED(info)
+ , fGenerator(generator)
+ , fDMFactory(fact)
+ , fRowBytes(rowBytes)
+ , fDiscardableMemory(nullptr)
+ , fDiscardableMemoryIsLocked(false)
+{
+ SkASSERT(fGenerator != nullptr);
+ SkASSERT(fRowBytes > 0);
+ // The SkImageGenerator contract requires fGenerator to always
+ // decode the same image on each call to getPixels().
+ this->setImmutable();
+ SkSafeRef(fDMFactory);
+}
+
+SkDiscardablePixelRef::~SkDiscardablePixelRef() {
+ if (fDiscardableMemoryIsLocked) {
+ fDiscardableMemory->unlock();
+ fDiscardableMemoryIsLocked = false;
+ }
+ delete fDiscardableMemory;
+ SkSafeUnref(fDMFactory);
+ delete fGenerator;
+}
+
+bool SkDiscardablePixelRef::onNewLockPixels(LockRec* rec) {
+ if (fDiscardableMemory != nullptr) {
+ if (fDiscardableMemory->lock()) {
+ fDiscardableMemoryIsLocked = true;
+ rec->fPixels = fDiscardableMemory->data();
+ rec->fColorTable = fCTable.get();
+ rec->fRowBytes = fRowBytes;
+ return true;
+ }
+ delete fDiscardableMemory;
+ fDiscardableMemory = nullptr;
+ fDiscardableMemoryIsLocked = false;
+ }
+
+ const size_t size = this->info().getSafeSize(fRowBytes);
+
+ if (fDMFactory != nullptr) {
+ fDiscardableMemory = fDMFactory->create(size);
+ fDiscardableMemoryIsLocked = true;
+ } else {
+ fDiscardableMemory = SkDiscardableMemory::Create(size);
+ fDiscardableMemoryIsLocked = true;
+ }
+ if (nullptr == fDiscardableMemory) {
+ fDiscardableMemoryIsLocked = false;
+ return false; // Memory allocation failed.
+ }
+
+ void* pixels = fDiscardableMemory->data();
+ const SkImageInfo& info = this->info();
+ SkPMColor colors[256];
+ int colorCount = 0;
+
+ if (!fGenerator->getPixels(info, pixels, fRowBytes, colors, &colorCount)) {
+ fDiscardableMemory->unlock();
+ fDiscardableMemoryIsLocked = false;
+ delete fDiscardableMemory;
+ fDiscardableMemory = nullptr;
+ return false;
+ }
+
+ // Note: our ctable is not purgeable, as it is not stored in the discardablememory block.
+ // This is because SkColorTable is refcntable, and therefore our caller could hold onto it
+ // beyond the scope of a lock/unlock. If we change the API/lifecycle for SkColorTable, we
+ // could move it into the block, but then again perhaps it is small enough that this doesn't
+ // really matter.
+ if (colorCount > 0) {
+ fCTable.reset(new SkColorTable(colors, colorCount));
+ } else {
+ fCTable.reset(nullptr);
+ }
+
+ rec->fPixels = pixels;
+ rec->fColorTable = fCTable.get();
+ rec->fRowBytes = fRowBytes;
+ return true;
+}
+
+void SkDiscardablePixelRef::onUnlockPixels() {
+ fDiscardableMemory->unlock();
+ fDiscardableMemoryIsLocked = false;
+}
+
+bool SkDEPRECATED_InstallDiscardablePixelRef(SkImageGenerator* generator, const SkIRect* subset,
+ SkBitmap* dst, SkDiscardableMemory::Factory* factory) {
+ SkAutoTDelete<SkImageGenerator> autoGenerator(generator);
+ if (nullptr == autoGenerator.get()) {
+ return false;
+ }
+
+ SkImageInfo prInfo = autoGenerator->getInfo();
+ if (prInfo.isEmpty()) {
+ return false;
+ }
+
+ SkIPoint origin = SkIPoint::Make(0, 0);
+ SkImageInfo bmInfo = prInfo;
+ if (subset) {
+ const SkIRect prBounds = SkIRect::MakeWH(prInfo.width(), prInfo.height());
+ if (subset->isEmpty() || !prBounds.contains(*subset)) {
+ return false;
+ }
+ bmInfo = prInfo.makeWH(subset->width(), subset->height());
+ origin.set(subset->x(), subset->y());
+ }
+
+ // must compute our desired rowBytes w.r.t. the pixelRef's dimensions, not ours, which may be
+ // smaller.
+ if (!dst->setInfo(bmInfo, prInfo.minRowBytes())) {
+ return false;
+ }
+
+ // Since dst->setInfo() may have changed/fixed-up info, we check from the bitmap
+ SkASSERT(dst->info().colorType() != kUnknown_SkColorType);
+
+ if (dst->empty()) { // Use a normal pixelref.
+ return dst->tryAllocPixels();
+ }
+ SkAutoTUnref<SkDiscardablePixelRef> ref(
+ new SkDiscardablePixelRef(prInfo, autoGenerator.release(), dst->rowBytes(), factory));
+ dst->setPixelRef(ref, origin.x(), origin.y());
+ return true;
+}
+
+// These are the public API
+
+bool SkDEPRECATED_InstallDiscardablePixelRef(SkImageGenerator* generator, SkBitmap* dst) {
+ return SkDEPRECATED_InstallDiscardablePixelRef(generator, nullptr, dst, nullptr);
+}
+
+bool SkDEPRECATED_InstallDiscardablePixelRef(SkData* encoded, SkBitmap* dst) {
+ SkImageGenerator* generator = SkImageGenerator::NewFromEncoded(encoded);
+ return generator ?
+ SkDEPRECATED_InstallDiscardablePixelRef(generator, nullptr, dst, nullptr) : false;
+}
diff --git a/gfx/skia/skia/src/lazy/SkDiscardablePixelRef.h b/gfx/skia/skia/src/lazy/SkDiscardablePixelRef.h
new file mode 100644
index 000000000..73a2b0825
--- /dev/null
+++ b/gfx/skia/skia/src/lazy/SkDiscardablePixelRef.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDiscardablePixelRef_DEFINED
+#define SkDiscardablePixelRef_DEFINED
+
+#include "SkDiscardableMemory.h"
+#include "SkImageGeneratorPriv.h"
+#include "SkImageInfo.h"
+#include "SkPixelRef.h"
+
+/**
+ * A PixelRef backed by SkDiscardableMemory, with the ability to
+ * re-generate the pixels (via a SkImageGenerator) if the DM is
+ * purged.
+ */
+class SkDiscardablePixelRef : public SkPixelRef {
+public:
+
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override {
+ return fDiscardableMemory;
+ }
+
+protected:
+ ~SkDiscardablePixelRef();
+
+ bool onNewLockPixels(LockRec*) override;
+ void onUnlockPixels() override;
+ bool onLockPixelsAreWritable() const override { return false; }
+
+ SkData* onRefEncodedData() override {
+ return fGenerator->refEncodedData();
+ }
+
+ bool onIsLazyGenerated() const override { return true; }
+
+private:
+ SkImageGenerator* const fGenerator;
+ SkDiscardableMemory::Factory* const fDMFactory;
+ const size_t fRowBytes;
+ // These const members should not change over the life of the
+ // PixelRef, since the SkBitmap doesn't expect them to change.
+
+ SkDiscardableMemory* fDiscardableMemory;
+ bool fDiscardableMemoryIsLocked;
+ SkAutoTUnref<SkColorTable> fCTable;
+
+ /* Takes ownership of SkImageGenerator. */
+ SkDiscardablePixelRef(const SkImageInfo&, SkImageGenerator*,
+ size_t rowBytes,
+ SkDiscardableMemory::Factory* factory);
+
+ bool onQueryYUV8(SkYUVSizeInfo* sizeInfo, SkYUVColorSpace* colorSpace) const override {
+ // If the image was already decoded with lockPixels(), favor not
+ // re-decoding to YUV8 planes.
+ if (fDiscardableMemory) {
+ return false;
+ }
+ return fGenerator->queryYUV8(sizeInfo, colorSpace);
+ }
+
+ bool onGetYUV8Planes(const SkYUVSizeInfo& sizeInfo, void* planes[3]) override {
+ // If the image was already decoded with lockPixels(), favor not
+ // re-decoding to YUV8 planes.
+ if (fDiscardableMemory) {
+ return false;
+ }
+ return fGenerator->getYUV8Planes(sizeInfo, planes);
+ }
+
+ friend bool SkDEPRECATED_InstallDiscardablePixelRef(SkImageGenerator*, const SkIRect*, SkBitmap*,
+ SkDiscardableMemory::Factory*);
+
+ typedef SkPixelRef INHERITED;
+};
+
+#endif // SkDiscardablePixelRef_DEFINED
diff --git a/gfx/skia/skia/src/opts/Sk4px_NEON.h b/gfx/skia/skia/src/opts/Sk4px_NEON.h
new file mode 100644
index 000000000..62f1deb4a
--- /dev/null
+++ b/gfx/skia/skia/src/opts/Sk4px_NEON.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+namespace { // See Sk4px.h
+
+inline Sk4px Sk4px::DupPMColor(SkPMColor px) { return Sk16b((uint8x16_t)vdupq_n_u32(px)); }
+
+inline Sk4px Sk4px::Load4(const SkPMColor px[4]) {
+ return Sk16b((uint8x16_t)vld1q_u32(px));
+}
+inline Sk4px Sk4px::Load2(const SkPMColor px[2]) {
+ uint32x2_t px2 = vld1_u32(px);
+ return Sk16b((uint8x16_t)vcombine_u32(px2, px2));
+}
+inline Sk4px Sk4px::Load1(const SkPMColor px[1]) {
+ return Sk16b((uint8x16_t)vdupq_n_u32(*px));
+}
+
+inline void Sk4px::store4(SkPMColor px[4]) const {
+ vst1q_u32(px, (uint32x4_t)this->fVec);
+}
+inline void Sk4px::store2(SkPMColor px[2]) const {
+ vst1_u32(px, (uint32x2_t)vget_low_u8(this->fVec));
+}
+inline void Sk4px::store1(SkPMColor px[1]) const {
+ vst1q_lane_u32(px, (uint32x4_t)this->fVec, 0);
+}
+
+inline Sk4px::Wide Sk4px::widenLo() const {
+ return Sk16h(vmovl_u8(vget_low_u8 (this->fVec)),
+ vmovl_u8(vget_high_u8(this->fVec)));
+}
+
+inline Sk4px::Wide Sk4px::widenHi() const {
+ return Sk16h(vshll_n_u8(vget_low_u8 (this->fVec), 8),
+ vshll_n_u8(vget_high_u8(this->fVec), 8));
+}
+
+inline Sk4px::Wide Sk4px::widenLoHi() const {
+ auto zipped = vzipq_u8(this->fVec, this->fVec);
+ return Sk16h((uint16x8_t)zipped.val[0],
+ (uint16x8_t)zipped.val[1]);
+}
+
+inline Sk4px::Wide Sk4px::mulWiden(const Sk16b& other) const {
+ return Sk16h(vmull_u8(vget_low_u8 (this->fVec), vget_low_u8 (other.fVec)),
+ vmull_u8(vget_high_u8(this->fVec), vget_high_u8(other.fVec)));
+}
+
+inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const {
+ const Sk4px::Wide o(other); // Should be no code, but allows us to access fLo, fHi.
+ return Sk16b(vcombine_u8(vaddhn_u16(this->fLo.fVec, o.fLo.fVec),
+ vaddhn_u16(this->fHi.fVec, o.fHi.fVec)));
+}
+
+inline Sk4px Sk4px::Wide::div255() const {
+ // Calculated as (x + (x+128)>>8 +128) >> 8. The 'r' in each instruction provides each +128.
+ return Sk16b(vcombine_u8(vraddhn_u16(this->fLo.fVec, vrshrq_n_u16(this->fLo.fVec, 8)),
+ vraddhn_u16(this->fHi.fVec, vrshrq_n_u16(this->fHi.fVec, 8))));
+}
+
+inline Sk4px Sk4px::alphas() const {
+ auto as = vshrq_n_u32((uint32x4_t)fVec, SK_A32_SHIFT); // ___3 ___2 ___1 ___0
+ return Sk16b((uint8x16_t)vmulq_n_u32(as, 0x01010101)); // 3333 2222 1111 0000
+}
+
+inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) {
+ uint8x16_t a8 = vdupq_n_u8(0); // ____ ____ ____ ____
+ a8 = vld1q_lane_u8(a+0, a8, 0); // ____ ____ ____ ___0
+ a8 = vld1q_lane_u8(a+1, a8, 4); // ____ ____ ___1 ___0
+ a8 = vld1q_lane_u8(a+2, a8, 8); // ____ ___2 ___1 ___0
+ a8 = vld1q_lane_u8(a+3, a8, 12); // ___3 ___2 ___1 ___0
+ auto a32 = (uint32x4_t)a8; //
+ return Sk16b((uint8x16_t)vmulq_n_u32(a32, 0x01010101)); // 3333 2222 1111 0000
+}
+
+inline Sk4px Sk4px::Load2Alphas(const SkAlpha a[2]) {
+ uint8x16_t a8 = vdupq_n_u8(0); // ____ ____ ____ ____
+ a8 = vld1q_lane_u8(a+0, a8, 0); // ____ ____ ____ ___0
+ a8 = vld1q_lane_u8(a+1, a8, 4); // ____ ____ ___1 ___0
+ auto a32 = (uint32x4_t)a8; //
+ return Sk16b((uint8x16_t)vmulq_n_u32(a32, 0x01010101)); // ____ ____ 1111 0000
+}
+
+inline Sk4px Sk4px::zeroColors() const {
+ return Sk16b(vandq_u8(this->fVec, (uint8x16_t)vdupq_n_u32(0xFF << SK_A32_SHIFT)));
+}
+
+inline Sk4px Sk4px::zeroAlphas() const {
+ // vbic(a,b) == a & ~b
+ return Sk16b(vbicq_u8(this->fVec, (uint8x16_t)vdupq_n_u32(0xFF << SK_A32_SHIFT)));
+}
+
+} // namespace
+
diff --git a/gfx/skia/skia/src/opts/Sk4px_SSE2.h b/gfx/skia/skia/src/opts/Sk4px_SSE2.h
new file mode 100644
index 000000000..dc0c8ace7
--- /dev/null
+++ b/gfx/skia/skia/src/opts/Sk4px_SSE2.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+namespace { // See Sk4px.h
+
+inline Sk4px Sk4px::DupPMColor(SkPMColor px) { return Sk16b(_mm_set1_epi32(px)); }
+
+inline Sk4px Sk4px::Load4(const SkPMColor px[4]) {
+ return Sk16b(_mm_loadu_si128((const __m128i*)px));
+}
+inline Sk4px Sk4px::Load2(const SkPMColor px[2]) {
+ return Sk16b(_mm_loadl_epi64((const __m128i*)px));
+}
+inline Sk4px Sk4px::Load1(const SkPMColor px[1]) { return Sk16b(_mm_cvtsi32_si128(*px)); }
+
+inline void Sk4px::store4(SkPMColor px[4]) const { _mm_storeu_si128((__m128i*)px, this->fVec); }
+inline void Sk4px::store2(SkPMColor px[2]) const { _mm_storel_epi64((__m128i*)px, this->fVec); }
+inline void Sk4px::store1(SkPMColor px[1]) const { *px = _mm_cvtsi128_si32(this->fVec); }
+
+inline Sk4px::Wide Sk4px::widenLo() const {
+ return Sk16h(_mm_unpacklo_epi8(this->fVec, _mm_setzero_si128()),
+ _mm_unpackhi_epi8(this->fVec, _mm_setzero_si128()));
+}
+
+inline Sk4px::Wide Sk4px::widenHi() const {
+ return Sk16h(_mm_unpacklo_epi8(_mm_setzero_si128(), this->fVec),
+ _mm_unpackhi_epi8(_mm_setzero_si128(), this->fVec));
+}
+
+inline Sk4px::Wide Sk4px::widenLoHi() const {
+ return Sk16h(_mm_unpacklo_epi8(this->fVec, this->fVec),
+ _mm_unpackhi_epi8(this->fVec, this->fVec));
+}
+
+inline Sk4px::Wide Sk4px::mulWiden(const Sk16b& other) const {
+ return this->widenLo() * Sk4px(other).widenLo();
+}
+
+inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const {
+ Sk4px::Wide r = (*this + other) >> 8;
+ return Sk4px(_mm_packus_epi16(r.fLo.fVec, r.fHi.fVec));
+}
+
+inline Sk4px Sk4px::Wide::div255() const {
+ // (x + 127) / 255 == ((x+128) * 257)>>16,
+ // and _mm_mulhi_epu16 makes the (_ * 257)>>16 part very convenient.
+ const __m128i _128 = _mm_set1_epi16(128),
+ _257 = _mm_set1_epi16(257);
+ return Sk4px(_mm_packus_epi16(_mm_mulhi_epu16(_mm_add_epi16(fLo.fVec, _128), _257),
+ _mm_mulhi_epu16(_mm_add_epi16(fHi.fVec, _128), _257)));
+}
+
+// Load4Alphas and Load2Alphas use possibly-unaligned loads (SkAlpha[] -> uint16_t or uint32_t).
+// These are safe on x86, often with no speed penalty.
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ inline Sk4px Sk4px::alphas() const {
+ static_assert(SK_A32_SHIFT == 24, "Intel's always little-endian.");
+ __m128i splat = _mm_set_epi8(15,15,15,15, 11,11,11,11, 7,7,7,7, 3,3,3,3);
+ return Sk16b(_mm_shuffle_epi8(this->fVec, splat));
+ }
+
+ inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) {
+ uint32_t as = *(const uint32_t*)a;
+ __m128i splat = _mm_set_epi8(3,3,3,3, 2,2,2,2, 1,1,1,1, 0,0,0,0);
+ return Sk16b(_mm_shuffle_epi8(_mm_cvtsi32_si128(as), splat));
+ }
+#else
+ inline Sk4px Sk4px::alphas() const {
+ static_assert(SK_A32_SHIFT == 24, "Intel's always little-endian.");
+ // We exploit that A >= rgb for any premul pixel.
+ __m128i as = fVec; // 3xxx 2xxx 1xxx 0xxx
+ as = _mm_max_epu8(as, _mm_srli_epi32(as, 8)); // 33xx 22xx 11xx 00xx
+ as = _mm_max_epu8(as, _mm_srli_epi32(as, 16)); // 3333 2222 1111 0000
+ return Sk16b(as);
+ }
+
+ inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) {
+ __m128i as = _mm_cvtsi32_si128(*(const uint32_t*)a); // ____ ____ ____ 3210
+ as = _mm_unpacklo_epi8 (as, as); // ____ ____ 3322 1100
+ as = _mm_unpacklo_epi16(as, as); // 3333 2222 1111 0000
+ return Sk16b(as);
+ }
+#endif
+
+inline Sk4px Sk4px::Load2Alphas(const SkAlpha a[2]) {
+ uint32_t as = *(const uint16_t*)a; // Aa -> Aa00
+ return Load4Alphas((const SkAlpha*)&as);
+}
+
+inline Sk4px Sk4px::zeroColors() const {
+ return Sk16b(_mm_and_si128(_mm_set1_epi32(0xFF << SK_A32_SHIFT), this->fVec));
+}
+
+inline Sk4px Sk4px::zeroAlphas() const {
+ // andnot(a,b) == ~a & b
+ return Sk16b(_mm_andnot_si128(_mm_set1_epi32(0xFF << SK_A32_SHIFT), this->fVec));
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/opts/Sk4px_none.h b/gfx/skia/skia/src/opts/Sk4px_none.h
new file mode 100644
index 000000000..10c3dedd0
--- /dev/null
+++ b/gfx/skia/skia/src/opts/Sk4px_none.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkUtils.h"
+
+namespace { // See Sk4px.h
+
+static_assert(sizeof(Sk4px) == 16, "This file uses memcpy / sk_memset32, so exact size matters.");
+
+inline Sk4px Sk4px::DupPMColor(SkPMColor px) {
+ Sk4px px4 = Sk16b();
+ sk_memset32((uint32_t*)&px4, px, 4);
+ return px4;
+}
+
+inline Sk4px Sk4px::Load4(const SkPMColor px[4]) {
+ Sk4px px4 = Sk16b();
+ memcpy(&px4, px, 16);
+ return px4;
+}
+
+inline Sk4px Sk4px::Load2(const SkPMColor px[2]) {
+ Sk4px px2 = Sk16b();
+ memcpy(&px2, px, 8);
+ return px2;
+}
+
+inline Sk4px Sk4px::Load1(const SkPMColor px[1]) {
+ Sk4px px1 = Sk16b();
+ memcpy(&px1, px, 4);
+ return px1;
+}
+
+inline void Sk4px::store4(SkPMColor px[4]) const { memcpy(px, this, 16); }
+inline void Sk4px::store2(SkPMColor px[2]) const { memcpy(px, this, 8); }
+inline void Sk4px::store1(SkPMColor px[1]) const { memcpy(px, this, 4); }
+
+inline Sk4px::Wide Sk4px::widenLo() const {
+ return Sk16h((*this)[ 0], (*this)[ 1], (*this)[ 2], (*this)[ 3],
+ (*this)[ 4], (*this)[ 5], (*this)[ 6], (*this)[ 7],
+ (*this)[ 8], (*this)[ 9], (*this)[10], (*this)[11],
+ (*this)[12], (*this)[13], (*this)[14], (*this)[15]);
+}
+
+inline Sk4px::Wide Sk4px::widenHi() const { return this->widenLo() << 8; }
+
+inline Sk4px::Wide Sk4px::widenLoHi() const { return this->widenLo() + this->widenHi(); }
+
+inline Sk4px::Wide Sk4px::mulWiden(const Sk16b& other) const {
+ return this->widenLo() * Sk4px(other).widenLo();
+}
+
+inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const {
+ Sk4px::Wide r = (*this + other) >> 8;
+ return Sk16b(r[ 0], r[ 1], r[ 2], r[ 3],
+ r[ 4], r[ 5], r[ 6], r[ 7],
+ r[ 8], r[ 9], r[10], r[11],
+ r[12], r[13], r[14], r[15]);
+}
+
+inline Sk4px Sk4px::Wide::div255() const {
+ // Calculated as ((x+128) + ((x+128)>>8)) >> 8.
+ auto v = *this + Sk16h(128);
+ return v.addNarrowHi(v>>8);
+}
+
+inline Sk4px Sk4px::alphas() const {
+ static_assert(SK_A32_SHIFT == 24, "This method assumes little-endian.");
+ return Sk16b((*this)[ 3], (*this)[ 3], (*this)[ 3], (*this)[ 3],
+ (*this)[ 7], (*this)[ 7], (*this)[ 7], (*this)[ 7],
+ (*this)[11], (*this)[11], (*this)[11], (*this)[11],
+ (*this)[15], (*this)[15], (*this)[15], (*this)[15]);
+}
+
+inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) {
+ return Sk16b(a[0], a[0], a[0], a[0],
+ a[1], a[1], a[1], a[1],
+ a[2], a[2], a[2], a[2],
+ a[3], a[3], a[3], a[3]);
+}
+
+inline Sk4px Sk4px::Load2Alphas(const SkAlpha a[2]) {
+ return Sk16b(a[0], a[0], a[0], a[0],
+ a[1], a[1], a[1], a[1],
+ 0,0,0,0,
+ 0,0,0,0);
+}
+
+inline Sk4px Sk4px::zeroAlphas() const {
+ static_assert(SK_A32_SHIFT == 24, "This method assumes little-endian.");
+ return Sk16b((*this)[ 0], (*this)[ 1], (*this)[ 2], 0,
+ (*this)[ 4], (*this)[ 5], (*this)[ 6], 0,
+ (*this)[ 8], (*this)[ 9], (*this)[10], 0,
+ (*this)[12], (*this)[13], (*this)[14], 0);
+}
+
+inline Sk4px Sk4px::zeroColors() const {
+ static_assert(SK_A32_SHIFT == 24, "This method assumes little-endian.");
+ return Sk16b(0,0,0, (*this)[ 3],
+ 0,0,0, (*this)[ 7],
+ 0,0,0, (*this)[11],
+ 0,0,0, (*this)[15]);
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/opts/SkBitmapFilter_opts_SSE2.cpp b/gfx/skia/skia/src/opts/SkBitmapFilter_opts_SSE2.cpp
new file mode 100644
index 000000000..ecaad23d7
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBitmapFilter_opts_SSE2.cpp
@@ -0,0 +1,500 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <emmintrin.h>
+#include "SkBitmap.h"
+#include "SkBitmapFilter_opts_SSE2.h"
+#include "SkBitmapProcState.h"
+#include "SkColor.h"
+#include "SkColorPriv.h"
+#include "SkConvolver.h"
+#include "SkShader.h"
+#include "SkUnPreMultiply.h"
+
+#if 0
+static inline void print128i(__m128i value) {
+ int *v = (int*) &value;
+ printf("% .11d % .11d % .11d % .11d\n", v[0], v[1], v[2], v[3]);
+}
+
+static inline void print128i_16(__m128i value) {
+ short *v = (short*) &value;
+ printf("% .5d % .5d % .5d % .5d % .5d % .5d % .5d % .5d\n", v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);
+}
+
+static inline void print128i_8(__m128i value) {
+ unsigned char *v = (unsigned char*) &value;
+ printf("%.3u %.3u %.3u %.3u %.3u %.3u %.3u %.3u %.3u %.3u %.3u %.3u %.3u %.3u %.3u %.3u\n",
+ v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7],
+ v[8], v[9], v[10], v[11], v[12], v[13], v[14], v[15]
+ );
+}
+
+static inline void print128f(__m128 value) {
+ float *f = (float*) &value;
+ printf("%3.4f %3.4f %3.4f %3.4f\n", f[0], f[1], f[2], f[3]);
+}
+#endif
+
+// Convolves horizontally along a single row. The row data is given in
+// |src_data| and continues for the num_values() of the filter.
+void convolveHorizontally_SSE2(const unsigned char* src_data,
+ const SkConvolutionFilter1D& filter,
+ unsigned char* out_row,
+ bool /*has_alpha*/) {
+ int num_values = filter.numValues();
+
+ int filter_offset, filter_length;
+ __m128i zero = _mm_setzero_si128();
+ __m128i mask[4];
+ // |mask| will be used to decimate all extra filter coefficients that are
+ // loaded by SIMD when |filter_length| is not divisible by 4.
+ // mask[0] is not used in following algorithm.
+ mask[1] = _mm_set_epi16(0, 0, 0, 0, 0, 0, 0, -1);
+ mask[2] = _mm_set_epi16(0, 0, 0, 0, 0, 0, -1, -1);
+ mask[3] = _mm_set_epi16(0, 0, 0, 0, 0, -1, -1, -1);
+
+ // Output one pixel each iteration, calculating all channels (RGBA) together.
+ for (int out_x = 0; out_x < num_values; out_x++) {
+ const SkConvolutionFilter1D::ConvolutionFixed* filter_values =
+ filter.FilterForValue(out_x, &filter_offset, &filter_length);
+
+ __m128i accum = _mm_setzero_si128();
+
+ // Compute the first pixel in this row that the filter affects. It will
+ // touch |filter_length| pixels (4 bytes each) after this.
+ const __m128i* row_to_filter =
+ reinterpret_cast<const __m128i*>(&src_data[filter_offset << 2]);
+
+ // We will load and accumulate with four coefficients per iteration.
+ for (int filter_x = 0; filter_x < filter_length >> 2; filter_x++) {
+
+ // Load 4 coefficients => duplicate 1st and 2nd of them for all channels.
+ __m128i coeff, coeff16;
+ // [16] xx xx xx xx c3 c2 c1 c0
+ coeff = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(filter_values));
+ // [16] xx xx xx xx c1 c1 c0 c0
+ coeff16 = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(1, 1, 0, 0));
+ // [16] c1 c1 c1 c1 c0 c0 c0 c0
+ coeff16 = _mm_unpacklo_epi16(coeff16, coeff16);
+
+ // Load four pixels => unpack the first two pixels to 16 bits =>
+ // multiply with coefficients => accumulate the convolution result.
+ // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
+ __m128i src8 = _mm_loadu_si128(row_to_filter);
+ // [16] a1 b1 g1 r1 a0 b0 g0 r0
+ __m128i src16 = _mm_unpacklo_epi8(src8, zero);
+ __m128i mul_hi = _mm_mulhi_epi16(src16, coeff16);
+ __m128i mul_lo = _mm_mullo_epi16(src16, coeff16);
+ // [32] a0*c0 b0*c0 g0*c0 r0*c0
+ __m128i t = _mm_unpacklo_epi16(mul_lo, mul_hi);
+ accum = _mm_add_epi32(accum, t);
+ // [32] a1*c1 b1*c1 g1*c1 r1*c1
+ t = _mm_unpackhi_epi16(mul_lo, mul_hi);
+ accum = _mm_add_epi32(accum, t);
+
+ // Duplicate 3rd and 4th coefficients for all channels =>
+ // unpack the 3rd and 4th pixels to 16 bits => multiply with coefficients
+ // => accumulate the convolution results.
+ // [16] xx xx xx xx c3 c3 c2 c2
+ coeff16 = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(3, 3, 2, 2));
+ // [16] c3 c3 c3 c3 c2 c2 c2 c2
+ coeff16 = _mm_unpacklo_epi16(coeff16, coeff16);
+ // [16] a3 g3 b3 r3 a2 g2 b2 r2
+ src16 = _mm_unpackhi_epi8(src8, zero);
+ mul_hi = _mm_mulhi_epi16(src16, coeff16);
+ mul_lo = _mm_mullo_epi16(src16, coeff16);
+ // [32] a2*c2 b2*c2 g2*c2 r2*c2
+ t = _mm_unpacklo_epi16(mul_lo, mul_hi);
+ accum = _mm_add_epi32(accum, t);
+ // [32] a3*c3 b3*c3 g3*c3 r3*c3
+ t = _mm_unpackhi_epi16(mul_lo, mul_hi);
+ accum = _mm_add_epi32(accum, t);
+
+ // Advance the pixel and coefficients pointers.
+ row_to_filter += 1;
+ filter_values += 4;
+ }
+
+ // When |filter_length| is not divisible by 4, we need to decimate some of
+ // the filter coefficient that was loaded incorrectly to zero; Other than
+ // that the algorithm is same with above, exceot that the 4th pixel will be
+ // always absent.
+ int r = filter_length&3;
+ if (r) {
+ // Note: filter_values must be padded to align_up(filter_offset, 8).
+ __m128i coeff, coeff16;
+ coeff = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(filter_values));
+ // Mask out extra filter taps.
+ coeff = _mm_and_si128(coeff, mask[r]);
+ coeff16 = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(1, 1, 0, 0));
+ coeff16 = _mm_unpacklo_epi16(coeff16, coeff16);
+
+ // Note: line buffer must be padded to align_up(filter_offset, 16).
+ // We resolve this by use C-version for the last horizontal line.
+ __m128i src8 = _mm_loadu_si128(row_to_filter);
+ __m128i src16 = _mm_unpacklo_epi8(src8, zero);
+ __m128i mul_hi = _mm_mulhi_epi16(src16, coeff16);
+ __m128i mul_lo = _mm_mullo_epi16(src16, coeff16);
+ __m128i t = _mm_unpacklo_epi16(mul_lo, mul_hi);
+ accum = _mm_add_epi32(accum, t);
+ t = _mm_unpackhi_epi16(mul_lo, mul_hi);
+ accum = _mm_add_epi32(accum, t);
+
+ src16 = _mm_unpackhi_epi8(src8, zero);
+ coeff16 = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(3, 3, 2, 2));
+ coeff16 = _mm_unpacklo_epi16(coeff16, coeff16);
+ mul_hi = _mm_mulhi_epi16(src16, coeff16);
+ mul_lo = _mm_mullo_epi16(src16, coeff16);
+ t = _mm_unpacklo_epi16(mul_lo, mul_hi);
+ accum = _mm_add_epi32(accum, t);
+ }
+
+ // Shift right for fixed point implementation.
+ accum = _mm_srai_epi32(accum, SkConvolutionFilter1D::kShiftBits);
+
+ // Packing 32 bits |accum| to 16 bits per channel (signed saturation).
+ accum = _mm_packs_epi32(accum, zero);
+ // Packing 16 bits |accum| to 8 bits per channel (unsigned saturation).
+ accum = _mm_packus_epi16(accum, zero);
+
+ // Store the pixel value of 32 bits.
+ *(reinterpret_cast<int*>(out_row)) = _mm_cvtsi128_si32(accum);
+ out_row += 4;
+ }
+}
+
+// Convolves horizontally along four rows. The row data is given in
+// |src_data| and continues for the num_values() of the filter.
+// The algorithm is almost same as |ConvolveHorizontally_SSE2|. Please
+// refer to that function for detailed comments.
+void convolve4RowsHorizontally_SSE2(const unsigned char* src_data[4],
+ const SkConvolutionFilter1D& filter,
+ unsigned char* out_row[4],
+ size_t outRowBytes) {
+ SkDEBUGCODE(const unsigned char* out_row_0_start = out_row[0];)
+
+ int num_values = filter.numValues();
+
+ int filter_offset, filter_length;
+ __m128i zero = _mm_setzero_si128();
+ __m128i mask[4];
+ // |mask| will be used to decimate all extra filter coefficients that are
+ // loaded by SIMD when |filter_length| is not divisible by 4.
+ // mask[0] is not used in following algorithm.
+ mask[1] = _mm_set_epi16(0, 0, 0, 0, 0, 0, 0, -1);
+ mask[2] = _mm_set_epi16(0, 0, 0, 0, 0, 0, -1, -1);
+ mask[3] = _mm_set_epi16(0, 0, 0, 0, 0, -1, -1, -1);
+
+ // Output one pixel each iteration, calculating all channels (RGBA) together.
+ for (int out_x = 0; out_x < num_values; out_x++) {
+ const SkConvolutionFilter1D::ConvolutionFixed* filter_values =
+ filter.FilterForValue(out_x, &filter_offset, &filter_length);
+
+ // four pixels in a column per iteration.
+ __m128i accum0 = _mm_setzero_si128();
+ __m128i accum1 = _mm_setzero_si128();
+ __m128i accum2 = _mm_setzero_si128();
+ __m128i accum3 = _mm_setzero_si128();
+ int start = (filter_offset<<2);
+ // We will load and accumulate with four coefficients per iteration.
+ for (int filter_x = 0; filter_x < (filter_length >> 2); filter_x++) {
+ __m128i coeff, coeff16lo, coeff16hi;
+ // [16] xx xx xx xx c3 c2 c1 c0
+ coeff = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(filter_values));
+ // [16] xx xx xx xx c1 c1 c0 c0
+ coeff16lo = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(1, 1, 0, 0));
+ // [16] c1 c1 c1 c1 c0 c0 c0 c0
+ coeff16lo = _mm_unpacklo_epi16(coeff16lo, coeff16lo);
+ // [16] xx xx xx xx c3 c3 c2 c2
+ coeff16hi = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(3, 3, 2, 2));
+ // [16] c3 c3 c3 c3 c2 c2 c2 c2
+ coeff16hi = _mm_unpacklo_epi16(coeff16hi, coeff16hi);
+
+ __m128i src8, src16, mul_hi, mul_lo, t;
+
+#define ITERATION(src, accum) \
+ src8 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src)); \
+ src16 = _mm_unpacklo_epi8(src8, zero); \
+ mul_hi = _mm_mulhi_epi16(src16, coeff16lo); \
+ mul_lo = _mm_mullo_epi16(src16, coeff16lo); \
+ t = _mm_unpacklo_epi16(mul_lo, mul_hi); \
+ accum = _mm_add_epi32(accum, t); \
+ t = _mm_unpackhi_epi16(mul_lo, mul_hi); \
+ accum = _mm_add_epi32(accum, t); \
+ src16 = _mm_unpackhi_epi8(src8, zero); \
+ mul_hi = _mm_mulhi_epi16(src16, coeff16hi); \
+ mul_lo = _mm_mullo_epi16(src16, coeff16hi); \
+ t = _mm_unpacklo_epi16(mul_lo, mul_hi); \
+ accum = _mm_add_epi32(accum, t); \
+ t = _mm_unpackhi_epi16(mul_lo, mul_hi); \
+ accum = _mm_add_epi32(accum, t)
+
+ ITERATION(src_data[0] + start, accum0);
+ ITERATION(src_data[1] + start, accum1);
+ ITERATION(src_data[2] + start, accum2);
+ ITERATION(src_data[3] + start, accum3);
+
+ start += 16;
+ filter_values += 4;
+ }
+
+ int r = filter_length & 3;
+ if (r) {
+ // Note: filter_values must be padded to align_up(filter_offset, 8);
+ __m128i coeff;
+ coeff = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(filter_values));
+ // Mask out extra filter taps.
+ coeff = _mm_and_si128(coeff, mask[r]);
+
+ __m128i coeff16lo = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(1, 1, 0, 0));
+ /* c1 c1 c1 c1 c0 c0 c0 c0 */
+ coeff16lo = _mm_unpacklo_epi16(coeff16lo, coeff16lo);
+ __m128i coeff16hi = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(3, 3, 2, 2));
+ coeff16hi = _mm_unpacklo_epi16(coeff16hi, coeff16hi);
+
+ __m128i src8, src16, mul_hi, mul_lo, t;
+
+ ITERATION(src_data[0] + start, accum0);
+ ITERATION(src_data[1] + start, accum1);
+ ITERATION(src_data[2] + start, accum2);
+ ITERATION(src_data[3] + start, accum3);
+ }
+
+ accum0 = _mm_srai_epi32(accum0, SkConvolutionFilter1D::kShiftBits);
+ accum0 = _mm_packs_epi32(accum0, zero);
+ accum0 = _mm_packus_epi16(accum0, zero);
+ accum1 = _mm_srai_epi32(accum1, SkConvolutionFilter1D::kShiftBits);
+ accum1 = _mm_packs_epi32(accum1, zero);
+ accum1 = _mm_packus_epi16(accum1, zero);
+ accum2 = _mm_srai_epi32(accum2, SkConvolutionFilter1D::kShiftBits);
+ accum2 = _mm_packs_epi32(accum2, zero);
+ accum2 = _mm_packus_epi16(accum2, zero);
+ accum3 = _mm_srai_epi32(accum3, SkConvolutionFilter1D::kShiftBits);
+ accum3 = _mm_packs_epi32(accum3, zero);
+ accum3 = _mm_packus_epi16(accum3, zero);
+
+ // We seem to be running off the edge here (chromium:491660).
+ SkASSERT(((size_t)out_row[0] - (size_t)out_row_0_start) < outRowBytes);
+
+ *(reinterpret_cast<int*>(out_row[0])) = _mm_cvtsi128_si32(accum0);
+ *(reinterpret_cast<int*>(out_row[1])) = _mm_cvtsi128_si32(accum1);
+ *(reinterpret_cast<int*>(out_row[2])) = _mm_cvtsi128_si32(accum2);
+ *(reinterpret_cast<int*>(out_row[3])) = _mm_cvtsi128_si32(accum3);
+
+ out_row[0] += 4;
+ out_row[1] += 4;
+ out_row[2] += 4;
+ out_row[3] += 4;
+ }
+}
+
+// Does vertical convolution to produce one output row. The filter values and
+// length are given in the first two parameters. These are applied to each
+// of the rows pointed to in the |source_data_rows| array, with each row
+// being |pixel_width| wide.
+//
+// The output must have room for |pixel_width * 4| bytes.
+template<bool has_alpha>
+void convolveVertically_SSE2(const SkConvolutionFilter1D::ConvolutionFixed* filter_values,
+ int filter_length,
+ unsigned char* const* source_data_rows,
+ int pixel_width,
+ unsigned char* out_row) {
+ int width = pixel_width & ~3;
+
+ __m128i zero = _mm_setzero_si128();
+ __m128i accum0, accum1, accum2, accum3, coeff16;
+ const __m128i* src;
+ // Output four pixels per iteration (16 bytes).
+ for (int out_x = 0; out_x < width; out_x += 4) {
+
+ // Accumulated result for each pixel. 32 bits per RGBA channel.
+ accum0 = _mm_setzero_si128();
+ accum1 = _mm_setzero_si128();
+ accum2 = _mm_setzero_si128();
+ accum3 = _mm_setzero_si128();
+
+ // Convolve with one filter coefficient per iteration.
+ for (int filter_y = 0; filter_y < filter_length; filter_y++) {
+
+ // Duplicate the filter coefficient 8 times.
+ // [16] cj cj cj cj cj cj cj cj
+ coeff16 = _mm_set1_epi16(filter_values[filter_y]);
+
+ // Load four pixels (16 bytes) together.
+ // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
+ src = reinterpret_cast<const __m128i*>(
+ &source_data_rows[filter_y][out_x << 2]);
+ __m128i src8 = _mm_loadu_si128(src);
+
+ // Unpack 1st and 2nd pixels from 8 bits to 16 bits for each channels =>
+ // multiply with current coefficient => accumulate the result.
+ // [16] a1 b1 g1 r1 a0 b0 g0 r0
+ __m128i src16 = _mm_unpacklo_epi8(src8, zero);
+ __m128i mul_hi = _mm_mulhi_epi16(src16, coeff16);
+ __m128i mul_lo = _mm_mullo_epi16(src16, coeff16);
+ // [32] a0 b0 g0 r0
+ __m128i t = _mm_unpacklo_epi16(mul_lo, mul_hi);
+ accum0 = _mm_add_epi32(accum0, t);
+ // [32] a1 b1 g1 r1
+ t = _mm_unpackhi_epi16(mul_lo, mul_hi);
+ accum1 = _mm_add_epi32(accum1, t);
+
+ // Unpack 3rd and 4th pixels from 8 bits to 16 bits for each channels =>
+ // multiply with current coefficient => accumulate the result.
+ // [16] a3 b3 g3 r3 a2 b2 g2 r2
+ src16 = _mm_unpackhi_epi8(src8, zero);
+ mul_hi = _mm_mulhi_epi16(src16, coeff16);
+ mul_lo = _mm_mullo_epi16(src16, coeff16);
+ // [32] a2 b2 g2 r2
+ t = _mm_unpacklo_epi16(mul_lo, mul_hi);
+ accum2 = _mm_add_epi32(accum2, t);
+ // [32] a3 b3 g3 r3
+ t = _mm_unpackhi_epi16(mul_lo, mul_hi);
+ accum3 = _mm_add_epi32(accum3, t);
+ }
+
+ // Shift right for fixed point implementation.
+ accum0 = _mm_srai_epi32(accum0, SkConvolutionFilter1D::kShiftBits);
+ accum1 = _mm_srai_epi32(accum1, SkConvolutionFilter1D::kShiftBits);
+ accum2 = _mm_srai_epi32(accum2, SkConvolutionFilter1D::kShiftBits);
+ accum3 = _mm_srai_epi32(accum3, SkConvolutionFilter1D::kShiftBits);
+
+ // Packing 32 bits |accum| to 16 bits per channel (signed saturation).
+ // [16] a1 b1 g1 r1 a0 b0 g0 r0
+ accum0 = _mm_packs_epi32(accum0, accum1);
+ // [16] a3 b3 g3 r3 a2 b2 g2 r2
+ accum2 = _mm_packs_epi32(accum2, accum3);
+
+ // Packing 16 bits |accum| to 8 bits per channel (unsigned saturation).
+ // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
+ accum0 = _mm_packus_epi16(accum0, accum2);
+
+ if (has_alpha) {
+ // Compute the max(ri, gi, bi) for each pixel.
+ // [8] xx a3 b3 g3 xx a2 b2 g2 xx a1 b1 g1 xx a0 b0 g0
+ __m128i a = _mm_srli_epi32(accum0, 8);
+ // [8] xx xx xx max3 xx xx xx max2 xx xx xx max1 xx xx xx max0
+ __m128i b = _mm_max_epu8(a, accum0); // Max of r and g.
+ // [8] xx xx a3 b3 xx xx a2 b2 xx xx a1 b1 xx xx a0 b0
+ a = _mm_srli_epi32(accum0, 16);
+ // [8] xx xx xx max3 xx xx xx max2 xx xx xx max1 xx xx xx max0
+ b = _mm_max_epu8(a, b); // Max of r and g and b.
+ // [8] max3 00 00 00 max2 00 00 00 max1 00 00 00 max0 00 00 00
+ b = _mm_slli_epi32(b, 24);
+
+ // Make sure the value of alpha channel is always larger than maximum
+ // value of color channels.
+ accum0 = _mm_max_epu8(b, accum0);
+ } else {
+ // Set value of alpha channels to 0xFF.
+ __m128i mask = _mm_set1_epi32(0xff000000);
+ accum0 = _mm_or_si128(accum0, mask);
+ }
+
+ // Store the convolution result (16 bytes) and advance the pixel pointers.
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(out_row), accum0);
+ out_row += 16;
+ }
+
+ // When the width of the output is not divisible by 4, We need to save one
+ // pixel (4 bytes) each time. And also the fourth pixel is always absent.
+ if (pixel_width & 3) {
+ accum0 = _mm_setzero_si128();
+ accum1 = _mm_setzero_si128();
+ accum2 = _mm_setzero_si128();
+ for (int filter_y = 0; filter_y < filter_length; ++filter_y) {
+ coeff16 = _mm_set1_epi16(filter_values[filter_y]);
+ // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
+ src = reinterpret_cast<const __m128i*>(
+ &source_data_rows[filter_y][width<<2]);
+ __m128i src8 = _mm_loadu_si128(src);
+ // [16] a1 b1 g1 r1 a0 b0 g0 r0
+ __m128i src16 = _mm_unpacklo_epi8(src8, zero);
+ __m128i mul_hi = _mm_mulhi_epi16(src16, coeff16);
+ __m128i mul_lo = _mm_mullo_epi16(src16, coeff16);
+ // [32] a0 b0 g0 r0
+ __m128i t = _mm_unpacklo_epi16(mul_lo, mul_hi);
+ accum0 = _mm_add_epi32(accum0, t);
+ // [32] a1 b1 g1 r1
+ t = _mm_unpackhi_epi16(mul_lo, mul_hi);
+ accum1 = _mm_add_epi32(accum1, t);
+ // [16] a3 b3 g3 r3 a2 b2 g2 r2
+ src16 = _mm_unpackhi_epi8(src8, zero);
+ mul_hi = _mm_mulhi_epi16(src16, coeff16);
+ mul_lo = _mm_mullo_epi16(src16, coeff16);
+ // [32] a2 b2 g2 r2
+ t = _mm_unpacklo_epi16(mul_lo, mul_hi);
+ accum2 = _mm_add_epi32(accum2, t);
+ }
+
+ accum0 = _mm_srai_epi32(accum0, SkConvolutionFilter1D::kShiftBits);
+ accum1 = _mm_srai_epi32(accum1, SkConvolutionFilter1D::kShiftBits);
+ accum2 = _mm_srai_epi32(accum2, SkConvolutionFilter1D::kShiftBits);
+ // [16] a1 b1 g1 r1 a0 b0 g0 r0
+ accum0 = _mm_packs_epi32(accum0, accum1);
+ // [16] a3 b3 g3 r3 a2 b2 g2 r2
+ accum2 = _mm_packs_epi32(accum2, zero);
+ // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
+ accum0 = _mm_packus_epi16(accum0, accum2);
+ if (has_alpha) {
+ // [8] xx a3 b3 g3 xx a2 b2 g2 xx a1 b1 g1 xx a0 b0 g0
+ __m128i a = _mm_srli_epi32(accum0, 8);
+ // [8] xx xx xx max3 xx xx xx max2 xx xx xx max1 xx xx xx max0
+ __m128i b = _mm_max_epu8(a, accum0); // Max of r and g.
+ // [8] xx xx a3 b3 xx xx a2 b2 xx xx a1 b1 xx xx a0 b0
+ a = _mm_srli_epi32(accum0, 16);
+ // [8] xx xx xx max3 xx xx xx max2 xx xx xx max1 xx xx xx max0
+ b = _mm_max_epu8(a, b); // Max of r and g and b.
+ // [8] max3 00 00 00 max2 00 00 00 max1 00 00 00 max0 00 00 00
+ b = _mm_slli_epi32(b, 24);
+ accum0 = _mm_max_epu8(b, accum0);
+ } else {
+ __m128i mask = _mm_set1_epi32(0xff000000);
+ accum0 = _mm_or_si128(accum0, mask);
+ }
+
+ for (int out_x = width; out_x < pixel_width; out_x++) {
+ *(reinterpret_cast<int*>(out_row)) = _mm_cvtsi128_si32(accum0);
+ accum0 = _mm_srli_si128(accum0, 4);
+ out_row += 4;
+ }
+ }
+}
+
+void convolveVertically_SSE2(const SkConvolutionFilter1D::ConvolutionFixed* filter_values,
+ int filter_length,
+ unsigned char* const* source_data_rows,
+ int pixel_width,
+ unsigned char* out_row,
+ bool has_alpha) {
+ if (has_alpha) {
+ convolveVertically_SSE2<true>(filter_values,
+ filter_length,
+ source_data_rows,
+ pixel_width,
+ out_row);
+ } else {
+ convolveVertically_SSE2<false>(filter_values,
+ filter_length,
+ source_data_rows,
+ pixel_width,
+ out_row);
+ }
+}
+
+void applySIMDPadding_SSE2(SkConvolutionFilter1D *filter) {
+ // Padding |paddingCount| of more dummy coefficients after the coefficients
+ // of last filter to prevent SIMD instructions which load 8 or 16 bytes
+ // together to access invalid memory areas. We are not trying to align the
+ // coefficients right now due to the opaqueness of <vector> implementation.
+ // This has to be done after all |AddFilter| calls.
+ for (int i = 0; i < 8; ++i) {
+ filter->addFilterValue(static_cast<SkConvolutionFilter1D::ConvolutionFixed>(0));
+ }
+}
diff --git a/gfx/skia/skia/src/opts/SkBitmapFilter_opts_SSE2.h b/gfx/skia/skia/src/opts/SkBitmapFilter_opts_SSE2.h
new file mode 100644
index 000000000..46ab5c8ae
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBitmapFilter_opts_SSE2.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapFilter_opts_sse2_DEFINED
+#define SkBitmapFilter_opts_sse2_DEFINED
+
+#include "SkBitmapProcState.h"
+#include "SkConvolver.h"
+
+void convolveVertically_SSE2(const SkConvolutionFilter1D::ConvolutionFixed* filter_values,
+ int filter_length,
+ unsigned char* const* source_data_rows,
+ int pixel_width,
+ unsigned char* out_row,
+ bool has_alpha);
+void convolve4RowsHorizontally_SSE2(const unsigned char* src_data[4],
+ const SkConvolutionFilter1D& filter,
+ unsigned char* out_row[4],
+ size_t outRowBytes);
+void convolveHorizontally_SSE2(const unsigned char* src_data,
+ const SkConvolutionFilter1D& filter,
+ unsigned char* out_row,
+ bool has_alpha);
+void applySIMDPadding_SSE2(SkConvolutionFilter1D* filter);
+
+#endif
diff --git a/gfx/skia/skia/src/opts/SkBitmapProcState_arm_neon.cpp b/gfx/skia/skia/src/opts/SkBitmapProcState_arm_neon.cpp
new file mode 100644
index 000000000..ce2656da6
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBitmapProcState_arm_neon.cpp
@@ -0,0 +1,499 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapProcState.h"
+#include "SkBitmapProcState_filter.h"
+#include "SkColorPriv.h"
+#include "SkFilterProc.h"
+#include "SkPaint.h"
+#include "SkShader.h" // for tilemodes
+#include "SkUtilsArm.h"
+
+// Required to ensure the table is part of the final binary.
+extern const SkBitmapProcState::SampleProc32 gSkBitmapProcStateSample32_neon[];
+
+#define NAME_WRAP(x) x ## _neon
+#include "SkBitmapProcState_filter_neon.h"
+#include "SkBitmapProcState_procs.h"
+
+const SkBitmapProcState::SampleProc32 gSkBitmapProcStateSample32_neon[] = {
+ S32_opaque_D32_nofilter_DXDY_neon,
+ S32_alpha_D32_nofilter_DXDY_neon,
+ S32_opaque_D32_nofilter_DX_neon,
+ S32_alpha_D32_nofilter_DX_neon,
+ S32_opaque_D32_filter_DXDY_neon,
+ S32_alpha_D32_filter_DXDY_neon,
+ S32_opaque_D32_filter_DX_neon,
+ S32_alpha_D32_filter_DX_neon,
+
+ S16_opaque_D32_nofilter_DXDY_neon,
+ S16_alpha_D32_nofilter_DXDY_neon,
+ S16_opaque_D32_nofilter_DX_neon,
+ S16_alpha_D32_nofilter_DX_neon,
+ S16_opaque_D32_filter_DXDY_neon,
+ S16_alpha_D32_filter_DXDY_neon,
+ S16_opaque_D32_filter_DX_neon,
+ S16_alpha_D32_filter_DX_neon,
+
+ SI8_opaque_D32_nofilter_DXDY_neon,
+ SI8_alpha_D32_nofilter_DXDY_neon,
+ SI8_opaque_D32_nofilter_DX_neon,
+ SI8_alpha_D32_nofilter_DX_neon,
+ SI8_opaque_D32_filter_DXDY_neon,
+ SI8_alpha_D32_filter_DXDY_neon,
+ SI8_opaque_D32_filter_DX_neon,
+ SI8_alpha_D32_filter_DX_neon,
+
+ S4444_opaque_D32_nofilter_DXDY_neon,
+ S4444_alpha_D32_nofilter_DXDY_neon,
+ S4444_opaque_D32_nofilter_DX_neon,
+ S4444_alpha_D32_nofilter_DX_neon,
+ S4444_opaque_D32_filter_DXDY_neon,
+ S4444_alpha_D32_filter_DXDY_neon,
+ S4444_opaque_D32_filter_DX_neon,
+ S4444_alpha_D32_filter_DX_neon,
+
+ // A8 treats alpha/opauqe the same (equally efficient)
+ SA8_alpha_D32_nofilter_DXDY_neon,
+ SA8_alpha_D32_nofilter_DXDY_neon,
+ SA8_alpha_D32_nofilter_DX_neon,
+ SA8_alpha_D32_nofilter_DX_neon,
+ SA8_alpha_D32_filter_DXDY_neon,
+ SA8_alpha_D32_filter_DXDY_neon,
+ SA8_alpha_D32_filter_DX_neon,
+ SA8_alpha_D32_filter_DX_neon,
+
+ // todo: possibly specialize on opaqueness
+ SG8_alpha_D32_nofilter_DXDY_neon,
+ SG8_alpha_D32_nofilter_DXDY_neon,
+ SG8_alpha_D32_nofilter_DX_neon,
+ SG8_alpha_D32_nofilter_DX_neon,
+ SG8_alpha_D32_filter_DXDY_neon,
+ SG8_alpha_D32_filter_DXDY_neon,
+ SG8_alpha_D32_filter_DX_neon,
+ SG8_alpha_D32_filter_DX_neon,
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include <arm_neon.h>
+#include "SkConvolver.h"
+
+// Convolves horizontally along a single row. The row data is given in
+// |srcData| and continues for the numValues() of the filter.
+void convolveHorizontally_neon(const unsigned char* srcData,
+ const SkConvolutionFilter1D& filter,
+ unsigned char* outRow,
+ bool hasAlpha) {
+ // Loop over each pixel on this row in the output image.
+ int numValues = filter.numValues();
+ for (int outX = 0; outX < numValues; outX++) {
+ uint8x8_t coeff_mask0 = vcreate_u8(0x0100010001000100);
+ uint8x8_t coeff_mask1 = vcreate_u8(0x0302030203020302);
+ uint8x8_t coeff_mask2 = vcreate_u8(0x0504050405040504);
+ uint8x8_t coeff_mask3 = vcreate_u8(0x0706070607060706);
+ // Get the filter that determines the current output pixel.
+ int filterOffset, filterLength;
+ const SkConvolutionFilter1D::ConvolutionFixed* filterValues =
+ filter.FilterForValue(outX, &filterOffset, &filterLength);
+
+ // Compute the first pixel in this row that the filter affects. It will
+ // touch |filterLength| pixels (4 bytes each) after this.
+ const unsigned char* rowToFilter = &srcData[filterOffset * 4];
+
+ // Apply the filter to the row to get the destination pixel in |accum|.
+ int32x4_t accum = vdupq_n_s32(0);
+ for (int filterX = 0; filterX < filterLength >> 2; filterX++) {
+ // Load 4 coefficients
+ int16x4_t coeffs, coeff0, coeff1, coeff2, coeff3;
+ coeffs = vld1_s16(filterValues);
+ coeff0 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_s16(coeffs), coeff_mask0));
+ coeff1 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_s16(coeffs), coeff_mask1));
+ coeff2 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_s16(coeffs), coeff_mask2));
+ coeff3 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_s16(coeffs), coeff_mask3));
+
+ // Load pixels and calc
+ uint8x16_t pixels = vld1q_u8(rowToFilter);
+ int16x8_t p01_16 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pixels)));
+ int16x8_t p23_16 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pixels)));
+
+ int16x4_t p0_src = vget_low_s16(p01_16);
+ int16x4_t p1_src = vget_high_s16(p01_16);
+ int16x4_t p2_src = vget_low_s16(p23_16);
+ int16x4_t p3_src = vget_high_s16(p23_16);
+
+ int32x4_t p0 = vmull_s16(p0_src, coeff0);
+ int32x4_t p1 = vmull_s16(p1_src, coeff1);
+ int32x4_t p2 = vmull_s16(p2_src, coeff2);
+ int32x4_t p3 = vmull_s16(p3_src, coeff3);
+
+ accum += p0;
+ accum += p1;
+ accum += p2;
+ accum += p3;
+
+ // Advance the pointers
+ rowToFilter += 16;
+ filterValues += 4;
+ }
+ int r = filterLength & 3;
+ if (r) {
+ const uint16_t mask[4][4] = {
+ {0, 0, 0, 0},
+ {0xFFFF, 0, 0, 0},
+ {0xFFFF, 0xFFFF, 0, 0},
+ {0xFFFF, 0xFFFF, 0xFFFF, 0}
+ };
+ uint16x4_t coeffs;
+ int16x4_t coeff0, coeff1, coeff2;
+ coeffs = vld1_u16(reinterpret_cast<const uint16_t*>(filterValues));
+ coeffs &= vld1_u16(&mask[r][0]);
+ coeff0 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_u16(coeffs), coeff_mask0));
+ coeff1 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_u16(coeffs), coeff_mask1));
+ coeff2 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_u16(coeffs), coeff_mask2));
+
+ // Load pixels and calc
+ uint8x16_t pixels = vld1q_u8(rowToFilter);
+ int16x8_t p01_16 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pixels)));
+ int16x8_t p23_16 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pixels)));
+ int32x4_t p0 = vmull_s16(vget_low_s16(p01_16), coeff0);
+ int32x4_t p1 = vmull_s16(vget_high_s16(p01_16), coeff1);
+ int32x4_t p2 = vmull_s16(vget_low_s16(p23_16), coeff2);
+
+ accum += p0;
+ accum += p1;
+ accum += p2;
+ }
+
+ // Bring this value back in range. All of the filter scaling factors
+ // are in fixed point with kShiftBits bits of fractional part.
+ accum = vshrq_n_s32(accum, SkConvolutionFilter1D::kShiftBits);
+
+ // Pack and store the new pixel.
+ int16x4_t accum16 = vqmovn_s32(accum);
+ uint8x8_t accum8 = vqmovun_s16(vcombine_s16(accum16, accum16));
+ vst1_lane_u32(reinterpret_cast<uint32_t*>(outRow), vreinterpret_u32_u8(accum8), 0);
+ outRow += 4;
+ }
+}
+
+// Does vertical convolution to produce one output row. The filter values and
+// length are given in the first two parameters. These are applied to each
+// of the rows pointed to in the |sourceDataRows| array, with each row
+// being |pixelWidth| wide.
+//
+// The output must have room for |pixelWidth * 4| bytes.
+template<bool hasAlpha>
+void convolveVertically_neon(const SkConvolutionFilter1D::ConvolutionFixed* filterValues,
+ int filterLength,
+ unsigned char* const* sourceDataRows,
+ int pixelWidth,
+ unsigned char* outRow) {
+ int width = pixelWidth & ~3;
+
+ int32x4_t accum0, accum1, accum2, accum3;
+ int16x4_t coeff16;
+
+ // Output four pixels per iteration (16 bytes).
+ for (int outX = 0; outX < width; outX += 4) {
+
+ // Accumulated result for each pixel. 32 bits per RGBA channel.
+ accum0 = accum1 = accum2 = accum3 = vdupq_n_s32(0);
+
+ // Convolve with one filter coefficient per iteration.
+ for (int filterY = 0; filterY < filterLength; filterY++) {
+
+ // Duplicate the filter coefficient 4 times.
+ // [16] cj cj cj cj
+ coeff16 = vdup_n_s16(filterValues[filterY]);
+
+ // Load four pixels (16 bytes) together.
+ // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
+ uint8x16_t src8 = vld1q_u8(&sourceDataRows[filterY][outX << 2]);
+
+ int16x8_t src16_01 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(src8)));
+ int16x8_t src16_23 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(src8)));
+ int16x4_t src16_0 = vget_low_s16(src16_01);
+ int16x4_t src16_1 = vget_high_s16(src16_01);
+ int16x4_t src16_2 = vget_low_s16(src16_23);
+ int16x4_t src16_3 = vget_high_s16(src16_23);
+
+ accum0 += vmull_s16(src16_0, coeff16);
+ accum1 += vmull_s16(src16_1, coeff16);
+ accum2 += vmull_s16(src16_2, coeff16);
+ accum3 += vmull_s16(src16_3, coeff16);
+ }
+
+ // Shift right for fixed point implementation.
+ accum0 = vshrq_n_s32(accum0, SkConvolutionFilter1D::kShiftBits);
+ accum1 = vshrq_n_s32(accum1, SkConvolutionFilter1D::kShiftBits);
+ accum2 = vshrq_n_s32(accum2, SkConvolutionFilter1D::kShiftBits);
+ accum3 = vshrq_n_s32(accum3, SkConvolutionFilter1D::kShiftBits);
+
+ // Packing 32 bits |accum| to 16 bits per channel (signed saturation).
+ // [16] a1 b1 g1 r1 a0 b0 g0 r0
+ int16x8_t accum16_0 = vcombine_s16(vqmovn_s32(accum0), vqmovn_s32(accum1));
+ // [16] a3 b3 g3 r3 a2 b2 g2 r2
+ int16x8_t accum16_1 = vcombine_s16(vqmovn_s32(accum2), vqmovn_s32(accum3));
+
+ // Packing 16 bits |accum| to 8 bits per channel (unsigned saturation).
+ // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
+ uint8x16_t accum8 = vcombine_u8(vqmovun_s16(accum16_0), vqmovun_s16(accum16_1));
+
+ if (hasAlpha) {
+ // Compute the max(ri, gi, bi) for each pixel.
+ // [8] xx a3 b3 g3 xx a2 b2 g2 xx a1 b1 g1 xx a0 b0 g0
+ uint8x16_t a = vreinterpretq_u8_u32(vshrq_n_u32(vreinterpretq_u32_u8(accum8), 8));
+ // [8] xx xx xx max3 xx xx xx max2 xx xx xx max1 xx xx xx max0
+ uint8x16_t b = vmaxq_u8(a, accum8); // Max of r and g
+ // [8] xx xx a3 b3 xx xx a2 b2 xx xx a1 b1 xx xx a0 b0
+ a = vreinterpretq_u8_u32(vshrq_n_u32(vreinterpretq_u32_u8(accum8), 16));
+ // [8] xx xx xx max3 xx xx xx max2 xx xx xx max1 xx xx xx max0
+ b = vmaxq_u8(a, b); // Max of r and g and b.
+ // [8] max3 00 00 00 max2 00 00 00 max1 00 00 00 max0 00 00 00
+ b = vreinterpretq_u8_u32(vshlq_n_u32(vreinterpretq_u32_u8(b), 24));
+
+ // Make sure the value of alpha channel is always larger than maximum
+ // value of color channels.
+ accum8 = vmaxq_u8(b, accum8);
+ } else {
+ // Set value of alpha channels to 0xFF.
+ accum8 = vreinterpretq_u8_u32(vreinterpretq_u32_u8(accum8) | vdupq_n_u32(0xFF000000));
+ }
+
+ // Store the convolution result (16 bytes) and advance the pixel pointers.
+ vst1q_u8(outRow, accum8);
+ outRow += 16;
+ }
+
+ // Process the leftovers when the width of the output is not divisible
+ // by 4, that is at most 3 pixels.
+ int r = pixelWidth & 3;
+ if (r) {
+
+ accum0 = accum1 = accum2 = vdupq_n_s32(0);
+
+ for (int filterY = 0; filterY < filterLength; ++filterY) {
+ coeff16 = vdup_n_s16(filterValues[filterY]);
+
+ // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
+ uint8x16_t src8 = vld1q_u8(&sourceDataRows[filterY][width << 2]);
+
+ int16x8_t src16_01 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(src8)));
+ int16x8_t src16_23 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(src8)));
+ int16x4_t src16_0 = vget_low_s16(src16_01);
+ int16x4_t src16_1 = vget_high_s16(src16_01);
+ int16x4_t src16_2 = vget_low_s16(src16_23);
+
+ accum0 += vmull_s16(src16_0, coeff16);
+ accum1 += vmull_s16(src16_1, coeff16);
+ accum2 += vmull_s16(src16_2, coeff16);
+ }
+
+ accum0 = vshrq_n_s32(accum0, SkConvolutionFilter1D::kShiftBits);
+ accum1 = vshrq_n_s32(accum1, SkConvolutionFilter1D::kShiftBits);
+ accum2 = vshrq_n_s32(accum2, SkConvolutionFilter1D::kShiftBits);
+
+ int16x8_t accum16_0 = vcombine_s16(vqmovn_s32(accum0), vqmovn_s32(accum1));
+ int16x8_t accum16_1 = vcombine_s16(vqmovn_s32(accum2), vqmovn_s32(accum2));
+
+ uint8x16_t accum8 = vcombine_u8(vqmovun_s16(accum16_0), vqmovun_s16(accum16_1));
+
+ if (hasAlpha) {
+ // Compute the max(ri, gi, bi) for each pixel.
+ // [8] xx a3 b3 g3 xx a2 b2 g2 xx a1 b1 g1 xx a0 b0 g0
+ uint8x16_t a = vreinterpretq_u8_u32(vshrq_n_u32(vreinterpretq_u32_u8(accum8), 8));
+ // [8] xx xx xx max3 xx xx xx max2 xx xx xx max1 xx xx xx max0
+ uint8x16_t b = vmaxq_u8(a, accum8); // Max of r and g
+ // [8] xx xx a3 b3 xx xx a2 b2 xx xx a1 b1 xx xx a0 b0
+ a = vreinterpretq_u8_u32(vshrq_n_u32(vreinterpretq_u32_u8(accum8), 16));
+ // [8] xx xx xx max3 xx xx xx max2 xx xx xx max1 xx xx xx max0
+ b = vmaxq_u8(a, b); // Max of r and g and b.
+ // [8] max3 00 00 00 max2 00 00 00 max1 00 00 00 max0 00 00 00
+ b = vreinterpretq_u8_u32(vshlq_n_u32(vreinterpretq_u32_u8(b), 24));
+
+ // Make sure the value of alpha channel is always larger than maximum
+ // value of color channels.
+ accum8 = vmaxq_u8(b, accum8);
+ } else {
+ // Set value of alpha channels to 0xFF.
+ accum8 = vreinterpretq_u8_u32(vreinterpretq_u32_u8(accum8) | vdupq_n_u32(0xFF000000));
+ }
+
+ switch(r) {
+ case 1:
+ vst1q_lane_u32(reinterpret_cast<uint32_t*>(outRow), vreinterpretq_u32_u8(accum8), 0);
+ break;
+ case 2:
+ vst1_u32(reinterpret_cast<uint32_t*>(outRow),
+ vreinterpret_u32_u8(vget_low_u8(accum8)));
+ break;
+ case 3:
+ vst1_u32(reinterpret_cast<uint32_t*>(outRow),
+ vreinterpret_u32_u8(vget_low_u8(accum8)));
+ vst1q_lane_u32(reinterpret_cast<uint32_t*>(outRow+8), vreinterpretq_u32_u8(accum8), 2);
+ break;
+ }
+ }
+}
+
+void convolveVertically_neon(const SkConvolutionFilter1D::ConvolutionFixed* filterValues,
+ int filterLength,
+ unsigned char* const* sourceDataRows,
+ int pixelWidth,
+ unsigned char* outRow,
+ bool sourceHasAlpha) {
+ if (sourceHasAlpha) {
+ convolveVertically_neon<true>(filterValues, filterLength,
+ sourceDataRows, pixelWidth,
+ outRow);
+ } else {
+ convolveVertically_neon<false>(filterValues, filterLength,
+ sourceDataRows, pixelWidth,
+ outRow);
+ }
+}
+
+// Convolves horizontally along four rows. The row data is given in
+// |src_data| and continues for the num_values() of the filter.
+// The algorithm is almost same as |ConvolveHorizontally_SSE2|. Please
+// refer to that function for detailed comments.
+void convolve4RowsHorizontally_neon(const unsigned char* srcData[4],
+ const SkConvolutionFilter1D& filter,
+ unsigned char* outRow[4],
+ size_t outRowBytes) {
+
+ uint8x8_t coeff_mask0 = vcreate_u8(0x0100010001000100);
+ uint8x8_t coeff_mask1 = vcreate_u8(0x0302030203020302);
+ uint8x8_t coeff_mask2 = vcreate_u8(0x0504050405040504);
+ uint8x8_t coeff_mask3 = vcreate_u8(0x0706070607060706);
+ int num_values = filter.numValues();
+
+ int filterOffset, filterLength;
+ // |mask| will be used to decimate all extra filter coefficients that are
+ // loaded by SIMD when |filter_length| is not divisible by 4.
+ // mask[0] is not used in following algorithm.
+ const uint16_t mask[4][4] = {
+ {0, 0, 0, 0},
+ {0xFFFF, 0, 0, 0},
+ {0xFFFF, 0xFFFF, 0, 0},
+ {0xFFFF, 0xFFFF, 0xFFFF, 0}
+ };
+
+ // Output one pixel each iteration, calculating all channels (RGBA) together.
+ for (int outX = 0; outX < num_values; outX++) {
+
+ const SkConvolutionFilter1D::ConvolutionFixed* filterValues =
+ filter.FilterForValue(outX, &filterOffset, &filterLength);
+
+ // four pixels in a column per iteration.
+ int32x4_t accum0 = vdupq_n_s32(0);
+ int32x4_t accum1 = vdupq_n_s32(0);
+ int32x4_t accum2 = vdupq_n_s32(0);
+ int32x4_t accum3 = vdupq_n_s32(0);
+
+ int start = (filterOffset<<2);
+
+ // We will load and accumulate with four coefficients per iteration.
+ for (int filter_x = 0; filter_x < (filterLength >> 2); filter_x++) {
+ int16x4_t coeffs, coeff0, coeff1, coeff2, coeff3;
+
+ coeffs = vld1_s16(filterValues);
+ coeff0 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_s16(coeffs), coeff_mask0));
+ coeff1 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_s16(coeffs), coeff_mask1));
+ coeff2 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_s16(coeffs), coeff_mask2));
+ coeff3 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_s16(coeffs), coeff_mask3));
+
+ uint8x16_t pixels;
+ int16x8_t p01_16, p23_16;
+ int32x4_t p0, p1, p2, p3;
+
+
+#define ITERATION(src, accum) \
+ pixels = vld1q_u8(src); \
+ p01_16 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pixels))); \
+ p23_16 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pixels))); \
+ p0 = vmull_s16(vget_low_s16(p01_16), coeff0); \
+ p1 = vmull_s16(vget_high_s16(p01_16), coeff1); \
+ p2 = vmull_s16(vget_low_s16(p23_16), coeff2); \
+ p3 = vmull_s16(vget_high_s16(p23_16), coeff3); \
+ accum += p0; \
+ accum += p1; \
+ accum += p2; \
+ accum += p3
+
+ ITERATION(srcData[0] + start, accum0);
+ ITERATION(srcData[1] + start, accum1);
+ ITERATION(srcData[2] + start, accum2);
+ ITERATION(srcData[3] + start, accum3);
+
+ start += 16;
+ filterValues += 4;
+ }
+
+ int r = filterLength & 3;
+ if (r) {
+ int16x4_t coeffs, coeff0, coeff1, coeff2, coeff3;
+ coeffs = vld1_s16(filterValues);
+ coeffs &= vreinterpret_s16_u16(vld1_u16(&mask[r][0]));
+ coeff0 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_s16(coeffs), coeff_mask0));
+ coeff1 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_s16(coeffs), coeff_mask1));
+ coeff2 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_s16(coeffs), coeff_mask2));
+ coeff3 = vreinterpret_s16_u8(vtbl1_u8(vreinterpret_u8_s16(coeffs), coeff_mask3));
+
+ uint8x16_t pixels;
+ int16x8_t p01_16, p23_16;
+ int32x4_t p0, p1, p2, p3;
+
+ ITERATION(srcData[0] + start, accum0);
+ ITERATION(srcData[1] + start, accum1);
+ ITERATION(srcData[2] + start, accum2);
+ ITERATION(srcData[3] + start, accum3);
+ }
+
+ int16x4_t accum16;
+ uint8x8_t res0, res1, res2, res3;
+
+#define PACK_RESULT(accum, res) \
+ accum = vshrq_n_s32(accum, SkConvolutionFilter1D::kShiftBits); \
+ accum16 = vqmovn_s32(accum); \
+ res = vqmovun_s16(vcombine_s16(accum16, accum16));
+
+ PACK_RESULT(accum0, res0);
+ PACK_RESULT(accum1, res1);
+ PACK_RESULT(accum2, res2);
+ PACK_RESULT(accum3, res3);
+
+ vst1_lane_u32(reinterpret_cast<uint32_t*>(outRow[0]), vreinterpret_u32_u8(res0), 0);
+ vst1_lane_u32(reinterpret_cast<uint32_t*>(outRow[1]), vreinterpret_u32_u8(res1), 0);
+ vst1_lane_u32(reinterpret_cast<uint32_t*>(outRow[2]), vreinterpret_u32_u8(res2), 0);
+ vst1_lane_u32(reinterpret_cast<uint32_t*>(outRow[3]), vreinterpret_u32_u8(res3), 0);
+ outRow[0] += 4;
+ outRow[1] += 4;
+ outRow[2] += 4;
+ outRow[3] += 4;
+ }
+}
+
+void applySIMDPadding_neon(SkConvolutionFilter1D *filter) {
+ // Padding |paddingCount| of more dummy coefficients after the coefficients
+ // of last filter to prevent SIMD instructions which load 8 or 16 bytes
+ // together to access invalid memory areas. We are not trying to align the
+ // coefficients right now due to the opaqueness of <vector> implementation.
+ // This has to be done after all |AddFilter| calls.
+ for (int i = 0; i < 8; ++i) {
+ filter->addFilterValue(static_cast<SkConvolutionFilter1D::ConvolutionFixed>(0));
+ }
+}
+
+void platformConvolutionProcs_arm_neon(SkConvolutionProcs* procs) {
+ procs->fExtraHorizontalReads = 3;
+ procs->fConvolveVertically = &convolveVertically_neon;
+ procs->fConvolve4RowsHorizontally = &convolve4RowsHorizontally_neon;
+ procs->fConvolveHorizontally = &convolveHorizontally_neon;
+ procs->fApplySIMDPadding = &applySIMDPadding_neon;
+}
diff --git a/gfx/skia/skia/src/opts/SkBitmapProcState_filter_neon.h b/gfx/skia/skia/src/opts/SkBitmapProcState_filter_neon.h
new file mode 100644
index 000000000..5cf0ac462
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBitmapProcState_filter_neon.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <arm_neon.h>
+#include "SkColorPriv.h"
+
+/*
+ * Filter_32_opaque
+ *
+ * There is no hard-n-fast rule that the filtering must produce
+ * exact results for the color components, but if the 4 incoming colors are
+ * all opaque, then the output color must also be opaque. Subsequent parts of
+ * the drawing pipeline may rely on this (e.g. which blitrow proc to use).
+ *
+ */
+// Chrome on Android uses -Os so we need to force these inline. Otherwise
+// calling the function in the inner loops will cause significant overhead on
+// some platforms.
+static SK_ALWAYS_INLINE void Filter_32_opaque_neon(unsigned x, unsigned y,
+ SkPMColor a00, SkPMColor a01,
+ SkPMColor a10, SkPMColor a11,
+ SkPMColor *dst) {
+ uint8x8_t vy, vconst16_8, v16_y, vres;
+ uint16x4_t vx, vconst16_16, v16_x, tmp;
+ uint32x2_t va0, va1;
+ uint16x8_t tmp1, tmp2;
+
+ vy = vdup_n_u8(y); // duplicate y into vy
+ vconst16_8 = vmov_n_u8(16); // set up constant in vconst16_8
+ v16_y = vsub_u8(vconst16_8, vy); // v16_y = 16-y
+
+ va0 = vdup_n_u32(a00); // duplicate a00
+ va1 = vdup_n_u32(a10); // duplicate a10
+ va0 = vset_lane_u32(a01, va0, 1); // set top to a01
+ va1 = vset_lane_u32(a11, va1, 1); // set top to a11
+
+ tmp1 = vmull_u8(vreinterpret_u8_u32(va0), v16_y); // tmp1 = [a01|a00] * (16-y)
+ tmp2 = vmull_u8(vreinterpret_u8_u32(va1), vy); // tmp2 = [a11|a10] * y
+
+ vx = vdup_n_u16(x); // duplicate x into vx
+ vconst16_16 = vmov_n_u16(16); // set up constant in vconst16_16
+ v16_x = vsub_u16(vconst16_16, vx); // v16_x = 16-x
+
+ tmp = vmul_u16(vget_high_u16(tmp1), vx); // tmp = a01 * x
+ tmp = vmla_u16(tmp, vget_high_u16(tmp2), vx); // tmp += a11 * x
+ tmp = vmla_u16(tmp, vget_low_u16(tmp1), v16_x); // tmp += a00 * (16-x)
+ tmp = vmla_u16(tmp, vget_low_u16(tmp2), v16_x); // tmp += a10 * (16-x)
+
+ vres = vshrn_n_u16(vcombine_u16(tmp, vcreate_u16(0)), 8); // shift down result by 8
+ vst1_lane_u32(dst, vreinterpret_u32_u8(vres), 0); // store result
+}
+
+static SK_ALWAYS_INLINE void Filter_32_alpha_neon(unsigned x, unsigned y,
+ SkPMColor a00, SkPMColor a01,
+ SkPMColor a10, SkPMColor a11,
+ SkPMColor *dst,
+ uint16_t scale) {
+ uint8x8_t vy, vconst16_8, v16_y, vres;
+ uint16x4_t vx, vconst16_16, v16_x, tmp, vscale;
+ uint32x2_t va0, va1;
+ uint16x8_t tmp1, tmp2;
+
+ vy = vdup_n_u8(y); // duplicate y into vy
+ vconst16_8 = vmov_n_u8(16); // set up constant in vconst16_8
+ v16_y = vsub_u8(vconst16_8, vy); // v16_y = 16-y
+
+ va0 = vdup_n_u32(a00); // duplicate a00
+ va1 = vdup_n_u32(a10); // duplicate a10
+ va0 = vset_lane_u32(a01, va0, 1); // set top to a01
+ va1 = vset_lane_u32(a11, va1, 1); // set top to a11
+
+ tmp1 = vmull_u8(vreinterpret_u8_u32(va0), v16_y); // tmp1 = [a01|a00] * (16-y)
+ tmp2 = vmull_u8(vreinterpret_u8_u32(va1), vy); // tmp2 = [a11|a10] * y
+
+ vx = vdup_n_u16(x); // duplicate x into vx
+ vconst16_16 = vmov_n_u16(16); // set up constant in vconst16_16
+ v16_x = vsub_u16(vconst16_16, vx); // v16_x = 16-x
+
+ tmp = vmul_u16(vget_high_u16(tmp1), vx); // tmp = a01 * x
+ tmp = vmla_u16(tmp, vget_high_u16(tmp2), vx); // tmp += a11 * x
+ tmp = vmla_u16(tmp, vget_low_u16(tmp1), v16_x); // tmp += a00 * (16-x)
+ tmp = vmla_u16(tmp, vget_low_u16(tmp2), v16_x); // tmp += a10 * (16-x)
+
+ vscale = vdup_n_u16(scale); // duplicate scale
+ tmp = vshr_n_u16(tmp, 8); // shift down result by 8
+ tmp = vmul_u16(tmp, vscale); // multiply result by scale
+
+ vres = vshrn_n_u16(vcombine_u16(tmp, vcreate_u16(0)), 8); // shift down result by 8
+ vst1_lane_u32(dst, vreinterpret_u32_u8(vres), 0); // store result
+}
diff --git a/gfx/skia/skia/src/opts/SkBitmapProcState_matrixProcs_neon.cpp b/gfx/skia/skia/src/opts/SkBitmapProcState_matrixProcs_neon.cpp
new file mode 100644
index 000000000..7789031c0
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBitmapProcState_matrixProcs_neon.cpp
@@ -0,0 +1,234 @@
+/* NEON optimized code (C) COPYRIGHT 2009 Motorola
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapProcState.h"
+#include "SkPerspIter.h"
+#include "SkShader.h"
+#include "SkUtilsArm.h"
+#include "SkBitmapProcState_utils.h"
+
+#include <arm_neon.h>
+
+extern const SkBitmapProcState::MatrixProc ClampX_ClampY_Procs_neon[];
+extern const SkBitmapProcState::MatrixProc RepeatX_RepeatY_Procs_neon[];
+
+static void decal_nofilter_scale_neon(uint32_t dst[], SkFixed fx, SkFixed dx, int count);
+static void decal_filter_scale_neon(uint32_t dst[], SkFixed fx, SkFixed dx, int count);
+
+// TILEX_PROCF(fx, max) SkClampMax((fx) >> 16, max)
+static inline int16x8_t sbpsm_clamp_tile8(int32x4_t low, int32x4_t high, unsigned max) {
+ int16x8_t res;
+
+ // get the hi 16s of all those 32s
+ res = vuzpq_s16(vreinterpretq_s16_s32(low), vreinterpretq_s16_s32(high)).val[1];
+
+ // clamp
+ res = vmaxq_s16(res, vdupq_n_s16(0));
+ res = vminq_s16(res, vdupq_n_s16(max));
+
+ return res;
+}
+
+// TILEX_PROCF(fx, max) SkClampMax((fx) >> 16, max)
+static inline int32x4_t sbpsm_clamp_tile4(int32x4_t f, unsigned max) {
+ int32x4_t res;
+
+ // get the hi 16s of all those 32s
+ res = vshrq_n_s32(f, 16);
+
+ // clamp
+ res = vmaxq_s32(res, vdupq_n_s32(0));
+ res = vminq_s32(res, vdupq_n_s32(max));
+
+ return res;
+}
+
+// TILEY_LOW_BITS(fy, max) (((fy) >> 12) & 0xF)
+static inline int32x4_t sbpsm_clamp_tile4_low_bits(int32x4_t fx) {
+ int32x4_t ret;
+
+ ret = vshrq_n_s32(fx, 12);
+
+ /* We don't need the mask below because the caller will
+ * overwrite the non-masked bits
+ */
+ //ret = vandq_s32(ret, vdupq_n_s32(0xF));
+
+ return ret;
+}
+
+// TILEX_PROCF(fx, max) (((fx)&0xFFFF)*((max)+1)>> 16)
+static inline int16x8_t sbpsm_repeat_tile8(int32x4_t low, int32x4_t high, unsigned max) {
+ uint16x8_t res;
+ uint32x4_t tmpl, tmph;
+
+ // get the lower 16 bits
+ res = vuzpq_u16(vreinterpretq_u16_s32(low), vreinterpretq_u16_s32(high)).val[0];
+
+ // bare multiplication, not SkFixedMul
+ tmpl = vmull_u16(vget_low_u16(res), vdup_n_u16(max+1));
+ tmph = vmull_u16(vget_high_u16(res), vdup_n_u16(max+1));
+
+ // extraction of the 16 upper bits
+ res = vuzpq_u16(vreinterpretq_u16_u32(tmpl), vreinterpretq_u16_u32(tmph)).val[1];
+
+ return vreinterpretq_s16_u16(res);
+}
+
+// TILEX_PROCF(fx, max) (((fx)&0xFFFF)*((max)+1)>> 16)
+static inline int32x4_t sbpsm_repeat_tile4(int32x4_t f, unsigned max) {
+ uint16x4_t res;
+ uint32x4_t tmp;
+
+ // get the lower 16 bits
+ res = vmovn_u32(vreinterpretq_u32_s32(f));
+
+ // bare multiplication, not SkFixedMul
+ tmp = vmull_u16(res, vdup_n_u16(max+1));
+
+ // extraction of the 16 upper bits
+ tmp = vshrq_n_u32(tmp, 16);
+
+ return vreinterpretq_s32_u32(tmp);
+}
+
+// TILEX_LOW_BITS(fx, max) ((((fx) & 0xFFFF) * ((max) + 1) >> 12) & 0xF)
+static inline int32x4_t sbpsm_repeat_tile4_low_bits(int32x4_t fx, unsigned max) {
+ uint16x4_t res;
+ uint32x4_t tmp;
+ int32x4_t ret;
+
+ // get the lower 16 bits
+ res = vmovn_u32(vreinterpretq_u32_s32(fx));
+
+ // bare multiplication, not SkFixedMul
+ tmp = vmull_u16(res, vdup_n_u16(max + 1));
+
+ // shift and mask
+ ret = vshrq_n_s32(vreinterpretq_s32_u32(tmp), 12);
+
+ /* We don't need the mask below because the caller will
+ * overwrite the non-masked bits
+ */
+ //ret = vandq_s32(ret, vdupq_n_s32(0xF));
+
+ return ret;
+}
+
+#define MAKENAME(suffix) ClampX_ClampY ## suffix ## _neon
+#define TILEX_PROCF(fx, max) SkClampMax((fx) >> 16, max)
+#define TILEY_PROCF(fy, max) SkClampMax((fy) >> 16, max)
+#define TILEX_PROCF_NEON8(l, h, max) sbpsm_clamp_tile8(l, h, max)
+#define TILEY_PROCF_NEON8(l, h, max) sbpsm_clamp_tile8(l, h, max)
+#define TILEX_PROCF_NEON4(fx, max) sbpsm_clamp_tile4(fx, max)
+#define TILEY_PROCF_NEON4(fy, max) sbpsm_clamp_tile4(fy, max)
+#define TILEX_LOW_BITS(fx, max) (((fx) >> 12) & 0xF)
+#define TILEY_LOW_BITS(fy, max) (((fy) >> 12) & 0xF)
+#define TILEX_LOW_BITS_NEON4(fx, max) sbpsm_clamp_tile4_low_bits(fx)
+#define TILEY_LOW_BITS_NEON4(fy, max) sbpsm_clamp_tile4_low_bits(fy)
+#define CHECK_FOR_DECAL
+#include "SkBitmapProcState_matrix_neon.h"
+
+#define MAKENAME(suffix) RepeatX_RepeatY ## suffix ## _neon
+#define TILEX_PROCF(fx, max) SK_USHIFT16(((fx) & 0xFFFF) * ((max) + 1))
+#define TILEY_PROCF(fy, max) SK_USHIFT16(((fy) & 0xFFFF) * ((max) + 1))
+#define TILEX_PROCF_NEON8(l, h, max) sbpsm_repeat_tile8(l, h, max)
+#define TILEY_PROCF_NEON8(l, h, max) sbpsm_repeat_tile8(l, h, max)
+#define TILEX_PROCF_NEON4(fx, max) sbpsm_repeat_tile4(fx, max)
+#define TILEY_PROCF_NEON4(fy, max) sbpsm_repeat_tile4(fy, max)
+#define TILEX_LOW_BITS(fx, max) ((((fx) & 0xFFFF) * ((max) + 1) >> 12) & 0xF)
+#define TILEY_LOW_BITS(fy, max) ((((fy) & 0xFFFF) * ((max) + 1) >> 12) & 0xF)
+#define TILEX_LOW_BITS_NEON4(fx, max) sbpsm_repeat_tile4_low_bits(fx, max)
+#define TILEY_LOW_BITS_NEON4(fy, max) sbpsm_repeat_tile4_low_bits(fy, max)
+#include "SkBitmapProcState_matrix_neon.h"
+
+
+
+void decal_nofilter_scale_neon(uint32_t dst[], SkFixed fx, SkFixed dx, int count) {
+ if (count >= 8) {
+ // SkFixed is 16.16 fixed point
+ SkFixed dx8 = dx * 8;
+ int32x4_t vdx8 = vdupq_n_s32(dx8);
+
+ // setup lbase and hbase
+ int32x4_t lbase, hbase;
+ lbase = vdupq_n_s32(fx);
+ lbase = vsetq_lane_s32(fx + dx, lbase, 1);
+ lbase = vsetq_lane_s32(fx + dx + dx, lbase, 2);
+ lbase = vsetq_lane_s32(fx + dx + dx + dx, lbase, 3);
+ hbase = lbase + vdupq_n_s32(4 * dx);
+
+ do {
+ // store the upper 16 bits
+ vst1q_u32(dst, vreinterpretq_u32_s16(
+ vuzpq_s16(vreinterpretq_s16_s32(lbase), vreinterpretq_s16_s32(hbase)).val[1]
+ ));
+
+ // on to the next group of 8
+ lbase += vdx8;
+ hbase += vdx8;
+ dst += 4; // we did 8 elements but the result is twice smaller
+ count -= 8;
+ fx += dx8;
+ } while (count >= 8);
+ }
+
+ uint16_t* xx = (uint16_t*)dst;
+ for (int i = count; i > 0; --i) {
+ *xx++ = SkToU16(fx >> 16); fx += dx;
+ }
+}
+
+void decal_filter_scale_neon(uint32_t dst[], SkFixed fx, SkFixed dx, int count) {
+ if (count >= 8) {
+ SkFixed dx8 = dx * 8;
+ int32x4_t vdx8 = vdupq_n_s32(dx8);
+
+ int32x4_t wide_fx, wide_fx2;
+ wide_fx = vdupq_n_s32(fx);
+ wide_fx = vsetq_lane_s32(fx + dx, wide_fx, 1);
+ wide_fx = vsetq_lane_s32(fx + dx + dx, wide_fx, 2);
+ wide_fx = vsetq_lane_s32(fx + dx + dx + dx, wide_fx, 3);
+
+ wide_fx2 = vaddq_s32(wide_fx, vdupq_n_s32(4 * dx));
+
+ while (count >= 8) {
+ int32x4_t wide_out;
+ int32x4_t wide_out2;
+
+ wide_out = vshlq_n_s32(vshrq_n_s32(wide_fx, 12), 14);
+ wide_out = wide_out | (vshrq_n_s32(wide_fx,16) + vdupq_n_s32(1));
+
+ wide_out2 = vshlq_n_s32(vshrq_n_s32(wide_fx2, 12), 14);
+ wide_out2 = wide_out2 | (vshrq_n_s32(wide_fx2,16) + vdupq_n_s32(1));
+
+ vst1q_u32(dst, vreinterpretq_u32_s32(wide_out));
+ vst1q_u32(dst+4, vreinterpretq_u32_s32(wide_out2));
+
+ dst += 8;
+ fx += dx8;
+ wide_fx += vdx8;
+ wide_fx2 += vdx8;
+ count -= 8;
+ }
+ }
+
+ if (count & 1)
+ {
+ SkASSERT((fx >> (16 + 14)) == 0);
+ *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1);
+ fx += dx;
+ }
+ while ((count -= 2) >= 0)
+ {
+ SkASSERT((fx >> (16 + 14)) == 0);
+ *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1);
+ fx += dx;
+
+ *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1);
+ fx += dx;
+ }
+}
diff --git a/gfx/skia/skia/src/opts/SkBitmapProcState_matrix_neon.h b/gfx/skia/skia/src/opts/SkBitmapProcState_matrix_neon.h
new file mode 100644
index 000000000..45691b90c
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBitmapProcState_matrix_neon.h
@@ -0,0 +1,500 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <arm_neon.h>
+
+#define SCALE_NOFILTER_NAME MAKENAME(_nofilter_scale)
+#define SCALE_FILTER_NAME MAKENAME(_filter_scale)
+#define AFFINE_NOFILTER_NAME MAKENAME(_nofilter_affine)
+#define AFFINE_FILTER_NAME MAKENAME(_filter_affine)
+#define PERSP_NOFILTER_NAME MAKENAME(_nofilter_persp)
+#define PERSP_FILTER_NAME MAKENAME(_filter_persp)
+
+#define PACK_FILTER_X_NAME MAKENAME(_pack_filter_x)
+#define PACK_FILTER_Y_NAME MAKENAME(_pack_filter_y)
+#define PACK_FILTER_X4_NAME MAKENAME(_pack_filter_x4)
+#define PACK_FILTER_Y4_NAME MAKENAME(_pack_filter_y4)
+
+#ifndef PREAMBLE
+ #define PREAMBLE(state)
+ #define PREAMBLE_PARAM_X
+ #define PREAMBLE_PARAM_Y
+ #define PREAMBLE_ARG_X
+ #define PREAMBLE_ARG_Y
+#endif
+
+static void SCALE_NOFILTER_NAME(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
+ SkMatrix::kScale_Mask)) == 0);
+
+ PREAMBLE(s);
+
+ // we store y, x, x, x, x, x
+ const unsigned maxX = s.fPixmap.width() - 1;
+ SkFractionalInt fx;
+ {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ const unsigned maxY = s.fPixmap.height() - 1;
+ *xy++ = TILEY_PROCF(mapper.fixedY(), maxY);
+ fx = mapper.fractionalIntX();
+ }
+
+ if (0 == maxX) {
+ // all of the following X values must be 0
+ memset(xy, 0, count * sizeof(uint16_t));
+ return;
+ }
+
+ const SkFractionalInt dx = s.fInvSxFractionalInt;
+
+#ifdef CHECK_FOR_DECAL
+ // test if we don't need to apply the tile proc
+ if (can_truncate_to_fixed_for_decal(fx, dx, count, maxX)) {
+ decal_nofilter_scale_neon(xy, SkFractionalIntToFixed(fx),
+ SkFractionalIntToFixed(dx), count);
+ return;
+ }
+#endif
+
+ if (count >= 8) {
+ SkFractionalInt dx2 = dx+dx;
+ SkFractionalInt dx4 = dx2+dx2;
+ SkFractionalInt dx8 = dx4+dx4;
+
+ // now build fx/fx+dx/fx+2dx/fx+3dx
+ SkFractionalInt fx1, fx2, fx3;
+ int32x4_t lbase, hbase;
+ int16_t *dst16 = (int16_t *)xy;
+
+ fx1 = fx+dx;
+ fx2 = fx1+dx;
+ fx3 = fx2+dx;
+
+ lbase = vdupq_n_s32(SkFractionalIntToFixed(fx));
+ lbase = vsetq_lane_s32(SkFractionalIntToFixed(fx1), lbase, 1);
+ lbase = vsetq_lane_s32(SkFractionalIntToFixed(fx2), lbase, 2);
+ lbase = vsetq_lane_s32(SkFractionalIntToFixed(fx3), lbase, 3);
+ hbase = vaddq_s32(lbase, vdupq_n_s32(SkFractionalIntToFixed(dx4)));
+
+ // store & bump
+ while (count >= 8) {
+
+ int16x8_t fx8;
+
+ fx8 = TILEX_PROCF_NEON8(lbase, hbase, maxX);
+
+ vst1q_s16(dst16, fx8);
+
+ // but preserving base & on to the next
+ lbase = vaddq_s32 (lbase, vdupq_n_s32(SkFractionalIntToFixed(dx8)));
+ hbase = vaddq_s32 (hbase, vdupq_n_s32(SkFractionalIntToFixed(dx8)));
+ dst16 += 8;
+ count -= 8;
+ fx += dx8;
+ };
+ xy = (uint32_t *) dst16;
+ }
+
+ uint16_t* xx = (uint16_t*)xy;
+ for (int i = count; i > 0; --i) {
+ *xx++ = TILEX_PROCF(SkFractionalIntToFixed(fx), maxX);
+ fx += dx;
+ }
+}
+
+static void AFFINE_NOFILTER_NAME(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT(s.fInvType & SkMatrix::kAffine_Mask);
+ SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
+ SkMatrix::kScale_Mask |
+ SkMatrix::kAffine_Mask)) == 0);
+
+ PREAMBLE(s);
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+
+ SkFractionalInt fx = mapper.fractionalIntX();
+ SkFractionalInt fy = mapper.fractionalIntY();
+ SkFractionalInt dx = s.fInvSxFractionalInt;
+ SkFractionalInt dy = s.fInvKyFractionalInt;
+ int maxX = s.fPixmap.width() - 1;
+ int maxY = s.fPixmap.height() - 1;
+
+ if (count >= 8) {
+ SkFractionalInt dx4 = dx * 4;
+ SkFractionalInt dy4 = dy * 4;
+ SkFractionalInt dx8 = dx * 8;
+ SkFractionalInt dy8 = dy * 8;
+
+ int32x4_t xbase, ybase;
+ int32x4_t x2base, y2base;
+ int16_t *dst16 = (int16_t *) xy;
+
+ // now build fx, fx+dx, fx+2dx, fx+3dx
+ xbase = vdupq_n_s32(SkFractionalIntToFixed(fx));
+ xbase = vsetq_lane_s32(SkFractionalIntToFixed(fx+dx), xbase, 1);
+ xbase = vsetq_lane_s32(SkFractionalIntToFixed(fx+dx+dx), xbase, 2);
+ xbase = vsetq_lane_s32(SkFractionalIntToFixed(fx+dx+dx+dx), xbase, 3);
+
+ // same for fy
+ ybase = vdupq_n_s32(SkFractionalIntToFixed(fy));
+ ybase = vsetq_lane_s32(SkFractionalIntToFixed(fy+dy), ybase, 1);
+ ybase = vsetq_lane_s32(SkFractionalIntToFixed(fy+dy+dy), ybase, 2);
+ ybase = vsetq_lane_s32(SkFractionalIntToFixed(fy+dy+dy+dy), ybase, 3);
+
+ x2base = vaddq_s32(xbase, vdupq_n_s32(SkFractionalIntToFixed(dx4)));
+ y2base = vaddq_s32(ybase, vdupq_n_s32(SkFractionalIntToFixed(dy4)));
+
+ // store & bump
+ do {
+ int16x8x2_t hi16;
+
+ hi16.val[0] = TILEX_PROCF_NEON8(xbase, x2base, maxX);
+ hi16.val[1] = TILEY_PROCF_NEON8(ybase, y2base, maxY);
+
+ vst2q_s16(dst16, hi16);
+
+ // moving base and on to the next
+ xbase = vaddq_s32(xbase, vdupq_n_s32(SkFractionalIntToFixed(dx8)));
+ ybase = vaddq_s32(ybase, vdupq_n_s32(SkFractionalIntToFixed(dy8)));
+ x2base = vaddq_s32(x2base, vdupq_n_s32(SkFractionalIntToFixed(dx8)));
+ y2base = vaddq_s32(y2base, vdupq_n_s32(SkFractionalIntToFixed(dy8)));
+
+ dst16 += 16; // 8x32 aka 16x16
+ count -= 8;
+ fx += dx8;
+ fy += dy8;
+ } while (count >= 8);
+ xy = (uint32_t *) dst16;
+ }
+
+ for (int i = count; i > 0; --i) {
+ *xy++ = (TILEY_PROCF(SkFractionalIntToFixed(fy), maxY) << 16) |
+ TILEX_PROCF(SkFractionalIntToFixed(fx), maxX);
+ fx += dx; fy += dy;
+ }
+}
+
+static void PERSP_NOFILTER_NAME(const SkBitmapProcState& s,
+ uint32_t* SK_RESTRICT xy,
+ int count, int x, int y) {
+ SkASSERT(s.fInvType & SkMatrix::kPerspective_Mask);
+
+ PREAMBLE(s);
+ // max{X,Y} are int here, but later shown/assumed to fit in 16 bits
+ int maxX = s.fPixmap.width() - 1;
+ int maxY = s.fPixmap.height() - 1;
+
+ SkPerspIter iter(s.fInvMatrix,
+ SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, count);
+
+ while ((count = iter.next()) != 0) {
+ const SkFixed* SK_RESTRICT srcXY = iter.getXY();
+
+ if (count >= 8) {
+ int32_t *mysrc = (int32_t *) srcXY;
+ int16_t *mydst = (int16_t *) xy;
+ do {
+ int16x8x2_t hi16;
+ int32x4x2_t xy1, xy2;
+
+ xy1 = vld2q_s32(mysrc);
+ xy2 = vld2q_s32(mysrc+8);
+
+ hi16.val[0] = TILEX_PROCF_NEON8(xy1.val[0], xy2.val[0], maxX);
+ hi16.val[1] = TILEY_PROCF_NEON8(xy1.val[1], xy2.val[1], maxY);
+
+ vst2q_s16(mydst, hi16);
+
+ count -= 8; // 8 iterations
+ mysrc += 16; // 16 longs
+ mydst += 16; // 16 shorts, aka 8 longs
+ } while (count >= 8);
+ // get xy and srcXY fixed up
+ srcXY = (const SkFixed *) mysrc;
+ xy = (uint32_t *) mydst;
+ }
+
+ while (--count >= 0) {
+ *xy++ = (TILEY_PROCF(srcXY[1], maxY) << 16) |
+ TILEX_PROCF(srcXY[0], maxX);
+ srcXY += 2;
+ }
+ }
+}
+
+static inline uint32_t PACK_FILTER_Y_NAME(SkFixed f, unsigned max,
+ SkFixed one PREAMBLE_PARAM_Y) {
+ unsigned i = TILEY_PROCF(f, max);
+ i = (i << 4) | TILEY_LOW_BITS(f, max);
+ return (i << 14) | (TILEY_PROCF((f + one), max));
+}
+
+static inline uint32_t PACK_FILTER_X_NAME(SkFixed f, unsigned max,
+ SkFixed one PREAMBLE_PARAM_X) {
+ unsigned i = TILEX_PROCF(f, max);
+ i = (i << 4) | TILEX_LOW_BITS(f, max);
+ return (i << 14) | (TILEX_PROCF((f + one), max));
+}
+
+static inline int32x4_t PACK_FILTER_X4_NAME(int32x4_t f, unsigned max,
+ SkFixed one PREAMBLE_PARAM_X) {
+ int32x4_t ret, res, wide_one;
+
+ // Prepare constants
+ wide_one = vdupq_n_s32(one);
+
+ // Step 1
+ res = TILEX_PROCF_NEON4(f, max);
+
+ // Step 2
+ ret = TILEX_LOW_BITS_NEON4(f, max);
+ ret = vsliq_n_s32(ret, res, 4);
+
+ // Step 3
+ res = TILEX_PROCF_NEON4(f + wide_one, max);
+ ret = vorrq_s32(vshlq_n_s32(ret, 14), res);
+
+ return ret;
+}
+
+static inline int32x4_t PACK_FILTER_Y4_NAME(int32x4_t f, unsigned max,
+ SkFixed one PREAMBLE_PARAM_X) {
+ int32x4_t ret, res, wide_one;
+
+ // Prepare constants
+ wide_one = vdupq_n_s32(one);
+
+ // Step 1
+ res = TILEY_PROCF_NEON4(f, max);
+
+ // Step 2
+ ret = TILEY_LOW_BITS_NEON4(f, max);
+ ret = vsliq_n_s32(ret, res, 4);
+
+ // Step 3
+ res = TILEY_PROCF_NEON4(f + wide_one, max);
+ ret = vorrq_s32(vshlq_n_s32(ret, 14), res);
+
+ return ret;
+}
+
+static void SCALE_FILTER_NAME(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
+ SkMatrix::kScale_Mask)) == 0);
+ SkASSERT(s.fInvKy == 0);
+
+ PREAMBLE(s);
+
+ const unsigned maxX = s.fPixmap.width() - 1;
+ const SkFixed one = s.fFilterOneX;
+ const SkFractionalInt dx = s.fInvSxFractionalInt;
+ SkFractionalInt fx;
+
+ {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ const SkFixed fy = mapper.fixedY();
+ const unsigned maxY = s.fPixmap.height() - 1;
+ // compute our two Y values up front
+ *xy++ = PACK_FILTER_Y_NAME(fy, maxY, s.fFilterOneY PREAMBLE_ARG_Y);
+ // now initialize fx
+ fx = mapper.fractionalIntX();
+ }
+
+#ifdef CHECK_FOR_DECAL
+ // test if we don't need to apply the tile proc
+ if (can_truncate_to_fixed_for_decal(fx, dx, count, maxX)) {
+ decal_filter_scale_neon(xy, SkFractionalIntToFixed(fx),
+ SkFractionalIntToFixed(dx), count);
+ return;
+ }
+#endif
+ {
+
+ if (count >= 4) {
+ int32x4_t wide_fx;
+
+ wide_fx = vdupq_n_s32(SkFractionalIntToFixed(fx));
+ wide_fx = vsetq_lane_s32(SkFractionalIntToFixed(fx+dx), wide_fx, 1);
+ wide_fx = vsetq_lane_s32(SkFractionalIntToFixed(fx+dx+dx), wide_fx, 2);
+ wide_fx = vsetq_lane_s32(SkFractionalIntToFixed(fx+dx+dx+dx), wide_fx, 3);
+
+ while (count >= 4) {
+ int32x4_t res;
+
+ res = PACK_FILTER_X4_NAME(wide_fx, maxX, one PREAMBLE_ARG_X);
+
+ vst1q_u32(xy, vreinterpretq_u32_s32(res));
+
+ wide_fx += vdupq_n_s32(SkFractionalIntToFixed(dx+dx+dx+dx));
+ fx += dx+dx+dx+dx;
+ xy += 4;
+ count -= 4;
+ }
+ }
+
+ while (--count >= 0) {
+ *xy++ = PACK_FILTER_X_NAME(SkFractionalIntToFixed(fx), maxX, one PREAMBLE_ARG_X);
+ fx += dx;
+ }
+
+ }
+}
+
+static void AFFINE_FILTER_NAME(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT(s.fInvType & SkMatrix::kAffine_Mask);
+ SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
+ SkMatrix::kScale_Mask |
+ SkMatrix::kAffine_Mask)) == 0);
+
+ PREAMBLE(s);
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+
+ SkFixed oneX = s.fFilterOneX;
+ SkFixed oneY = s.fFilterOneY;
+ SkFixed fx = mapper.fixedX();
+ SkFixed fy = mapper.fixedY();
+ SkFixed dx = s.fInvSx;
+ SkFixed dy = s.fInvKy;
+ unsigned maxX = s.fPixmap.width() - 1;
+ unsigned maxY = s.fPixmap.height() - 1;
+
+ if (count >= 4) {
+ int32x4_t wide_fy, wide_fx;
+
+ wide_fx = vdupq_n_s32(fx);
+ wide_fx = vsetq_lane_s32(fx+dx, wide_fx, 1);
+ wide_fx = vsetq_lane_s32(fx+dx+dx, wide_fx, 2);
+ wide_fx = vsetq_lane_s32(fx+dx+dx+dx, wide_fx, 3);
+
+ wide_fy = vdupq_n_s32(fy);
+ wide_fy = vsetq_lane_s32(fy+dy, wide_fy, 1);
+ wide_fy = vsetq_lane_s32(fy+dy+dy, wide_fy, 2);
+ wide_fy = vsetq_lane_s32(fy+dy+dy+dy, wide_fy, 3);
+
+ while (count >= 4) {
+ int32x4x2_t vxy;
+
+ // do the X side, then the Y side, then interleave them
+ vxy.val[0] = PACK_FILTER_Y4_NAME(wide_fy, maxY, oneY PREAMBLE_ARG_Y);
+ vxy.val[1] = PACK_FILTER_X4_NAME(wide_fx, maxX, oneX PREAMBLE_ARG_X);
+
+ // interleave as YXYXYXYX as part of the storing
+ vst2q_s32((int32_t*)xy, vxy);
+
+ // prepare next iteration
+ wide_fx += vdupq_n_s32(dx+dx+dx+dx);
+ fx += dx + dx + dx + dx;
+ wide_fy += vdupq_n_s32(dy+dy+dy+dy);
+ fy += dy+dy+dy+dy;
+ xy += 8; // 4 x's, 4 y's
+ count -= 4;
+ }
+ }
+
+ while (--count >= 0) {
+ // NB: writing Y/X
+ *xy++ = PACK_FILTER_Y_NAME(fy, maxY, oneY PREAMBLE_ARG_Y);
+ fy += dy;
+ *xy++ = PACK_FILTER_X_NAME(fx, maxX, oneX PREAMBLE_ARG_X);
+ fx += dx;
+ }
+}
+
+static void PERSP_FILTER_NAME(const SkBitmapProcState& s,
+ uint32_t* SK_RESTRICT xy, int count,
+ int x, int y) {
+ SkASSERT(s.fInvType & SkMatrix::kPerspective_Mask);
+
+ PREAMBLE(s);
+ unsigned maxX = s.fPixmap.width() - 1;
+ unsigned maxY = s.fPixmap.height() - 1;
+ SkFixed oneX = s.fFilterOneX;
+ SkFixed oneY = s.fFilterOneY;
+
+ SkPerspIter iter(s.fInvMatrix,
+ SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, count);
+
+ while ((count = iter.next()) != 0) {
+ const SkFixed* SK_RESTRICT srcXY = iter.getXY();
+
+ while (count >= 4) {
+ int32x4_t wide_x, wide_y;
+ int32x4x2_t vxy, vresyx;
+
+ // load src: x-y-x-y-x-y-x-y
+ vxy = vld2q_s32(srcXY);
+
+ // do the X side, then the Y side, then interleave them
+ wide_x = vsubq_s32(vxy.val[0], vdupq_n_s32(oneX>>1));
+ wide_y = vsubq_s32(vxy.val[1], vdupq_n_s32(oneY>>1));
+
+ vresyx.val[0] = PACK_FILTER_Y4_NAME(wide_y, maxY, oneY PREAMBLE_ARG_Y);
+ vresyx.val[1] = PACK_FILTER_X4_NAME(wide_x, maxX, oneX PREAMBLE_ARG_X);
+
+ // store interleaved as y-x-y-x-y-x-y-x (NB != read order)
+ vst2q_s32((int32_t*)xy, vresyx);
+
+ // on to the next iteration
+ srcXY += 2*4;
+ count -= 4;
+ xy += 2*4;
+ }
+
+ while (--count >= 0) {
+ // NB: we read x/y, we write y/x
+ *xy++ = PACK_FILTER_Y_NAME(srcXY[1] - (oneY >> 1), maxY,
+ oneY PREAMBLE_ARG_Y);
+ *xy++ = PACK_FILTER_X_NAME(srcXY[0] - (oneX >> 1), maxX,
+ oneX PREAMBLE_ARG_X);
+ srcXY += 2;
+ }
+ }
+}
+
+const SkBitmapProcState::MatrixProc MAKENAME(_Procs)[] = {
+ SCALE_NOFILTER_NAME,
+ SCALE_FILTER_NAME,
+ AFFINE_NOFILTER_NAME,
+ AFFINE_FILTER_NAME,
+ PERSP_NOFILTER_NAME,
+ PERSP_FILTER_NAME
+};
+
+#undef TILEX_PROCF_NEON8
+#undef TILEY_PROCF_NEON8
+#undef TILEX_PROCF_NEON4
+#undef TILEY_PROCF_NEON4
+#undef TILEX_LOW_BITS_NEON4
+#undef TILEY_LOW_BITS_NEON4
+
+#undef MAKENAME
+#undef TILEX_PROCF
+#undef TILEY_PROCF
+#ifdef CHECK_FOR_DECAL
+ #undef CHECK_FOR_DECAL
+#endif
+
+#undef SCALE_NOFILTER_NAME
+#undef SCALE_FILTER_NAME
+#undef AFFINE_NOFILTER_NAME
+#undef AFFINE_FILTER_NAME
+#undef PERSP_NOFILTER_NAME
+#undef PERSP_FILTER_NAME
+
+#undef PREAMBLE
+#undef PREAMBLE_PARAM_X
+#undef PREAMBLE_PARAM_Y
+#undef PREAMBLE_ARG_X
+#undef PREAMBLE_ARG_Y
+
+#undef TILEX_LOW_BITS
+#undef TILEY_LOW_BITS
diff --git a/gfx/skia/skia/src/opts/SkBitmapProcState_opts_SSE2.cpp b/gfx/skia/skia/src/opts/SkBitmapProcState_opts_SSE2.cpp
new file mode 100644
index 000000000..7a3bef0dd
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBitmapProcState_opts_SSE2.cpp
@@ -0,0 +1,621 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <emmintrin.h>
+#include "SkBitmapProcState_opts_SSE2.h"
+#include "SkColorPriv.h"
+#include "SkPaint.h"
+#include "SkUtils.h"
+
+void S32_opaque_D32_filter_DX_SSE2(const SkBitmapProcState& s,
+ const uint32_t* xy,
+ int count, uint32_t* colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(s.fFilterQuality != kNone_SkFilterQuality);
+ SkASSERT(kN32_SkColorType == s.fPixmap.colorType());
+ SkASSERT(s.fAlphaScale == 256);
+
+ const char* srcAddr = static_cast<const char*>(s.fPixmap.addr());
+ size_t rb = s.fPixmap.rowBytes();
+ uint32_t XY = *xy++;
+ unsigned y0 = XY >> 14;
+ const uint32_t* row0 = reinterpret_cast<const uint32_t*>(srcAddr + (y0 >> 4) * rb);
+ const uint32_t* row1 = reinterpret_cast<const uint32_t*>(srcAddr + (XY & 0x3FFF) * rb);
+ unsigned subY = y0 & 0xF;
+
+ // ( 0, 0, 0, 0, 0, 0, 0, 16)
+ __m128i sixteen = _mm_cvtsi32_si128(16);
+
+ // ( 0, 0, 0, 0, 16, 16, 16, 16)
+ sixteen = _mm_shufflelo_epi16(sixteen, 0);
+
+ // ( 0, 0, 0, 0, 0, 0, 0, y)
+ __m128i allY = _mm_cvtsi32_si128(subY);
+
+ // ( 0, 0, 0, 0, y, y, y, y)
+ allY = _mm_shufflelo_epi16(allY, 0);
+
+ // ( 0, 0, 0, 0, 16-y, 16-y, 16-y, 16-y)
+ __m128i negY = _mm_sub_epi16(sixteen, allY);
+
+ // (16-y, 16-y, 16-y, 16-y, y, y, y, y)
+ allY = _mm_unpacklo_epi64(allY, negY);
+
+ // (16, 16, 16, 16, 16, 16, 16, 16 )
+ sixteen = _mm_shuffle_epi32(sixteen, 0);
+
+ // ( 0, 0, 0, 0, 0, 0, 0, 0)
+ __m128i zero = _mm_setzero_si128();
+ do {
+ uint32_t XX = *xy++; // x0:14 | 4 | x1:14
+ unsigned x0 = XX >> 18;
+ unsigned x1 = XX & 0x3FFF;
+
+ // (0, 0, 0, 0, 0, 0, 0, x)
+ __m128i allX = _mm_cvtsi32_si128((XX >> 14) & 0x0F);
+
+ // (0, 0, 0, 0, x, x, x, x)
+ allX = _mm_shufflelo_epi16(allX, 0);
+
+ // (x, x, x, x, x, x, x, x)
+ allX = _mm_shuffle_epi32(allX, 0);
+
+ // (16-x, 16-x, 16-x, 16-x, 16-x, 16-x, 16-x)
+ __m128i negX = _mm_sub_epi16(sixteen, allX);
+
+ // Load 4 samples (pixels).
+ __m128i a00 = _mm_cvtsi32_si128(row0[x0]);
+ __m128i a01 = _mm_cvtsi32_si128(row0[x1]);
+ __m128i a10 = _mm_cvtsi32_si128(row1[x0]);
+ __m128i a11 = _mm_cvtsi32_si128(row1[x1]);
+
+ // (0, 0, a00, a10)
+ __m128i a00a10 = _mm_unpacklo_epi32(a10, a00);
+
+ // Expand to 16 bits per component.
+ a00a10 = _mm_unpacklo_epi8(a00a10, zero);
+
+ // ((a00 * (16-y)), (a10 * y)).
+ a00a10 = _mm_mullo_epi16(a00a10, allY);
+
+ // (a00 * (16-y) * (16-x), a10 * y * (16-x)).
+ a00a10 = _mm_mullo_epi16(a00a10, negX);
+
+ // (0, 0, a01, a10)
+ __m128i a01a11 = _mm_unpacklo_epi32(a11, a01);
+
+ // Expand to 16 bits per component.
+ a01a11 = _mm_unpacklo_epi8(a01a11, zero);
+
+ // (a01 * (16-y)), (a11 * y)
+ a01a11 = _mm_mullo_epi16(a01a11, allY);
+
+ // (a01 * (16-y) * x), (a11 * y * x)
+ a01a11 = _mm_mullo_epi16(a01a11, allX);
+
+ // (a00*w00 + a01*w01, a10*w10 + a11*w11)
+ __m128i sum = _mm_add_epi16(a00a10, a01a11);
+
+ // (DC, a00*w00 + a01*w01)
+ __m128i shifted = _mm_shuffle_epi32(sum, 0xEE);
+
+ // (DC, a00*w00 + a01*w01 + a10*w10 + a11*w11)
+ sum = _mm_add_epi16(sum, shifted);
+
+ // Divide each 16 bit component by 256.
+ sum = _mm_srli_epi16(sum, 8);
+
+ // Pack lower 4 16 bit values of sum into lower 4 bytes.
+ sum = _mm_packus_epi16(sum, zero);
+
+ // Extract low int and store.
+ *colors++ = _mm_cvtsi128_si32(sum);
+ } while (--count > 0);
+}
+
+void S32_alpha_D32_filter_DX_SSE2(const SkBitmapProcState& s,
+ const uint32_t* xy,
+ int count, uint32_t* colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(s.fFilterQuality != kNone_SkFilterQuality);
+ SkASSERT(kN32_SkColorType == s.fPixmap.colorType());
+ SkASSERT(s.fAlphaScale < 256);
+
+ const char* srcAddr = static_cast<const char*>(s.fPixmap.addr());
+ size_t rb = s.fPixmap.rowBytes();
+ uint32_t XY = *xy++;
+ unsigned y0 = XY >> 14;
+ const uint32_t* row0 = reinterpret_cast<const uint32_t*>(srcAddr + (y0 >> 4) * rb);
+ const uint32_t* row1 = reinterpret_cast<const uint32_t*>(srcAddr + (XY & 0x3FFF) * rb);
+ unsigned subY = y0 & 0xF;
+
+ // ( 0, 0, 0, 0, 0, 0, 0, 16)
+ __m128i sixteen = _mm_cvtsi32_si128(16);
+
+ // ( 0, 0, 0, 0, 16, 16, 16, 16)
+ sixteen = _mm_shufflelo_epi16(sixteen, 0);
+
+ // ( 0, 0, 0, 0, 0, 0, 0, y)
+ __m128i allY = _mm_cvtsi32_si128(subY);
+
+ // ( 0, 0, 0, 0, y, y, y, y)
+ allY = _mm_shufflelo_epi16(allY, 0);
+
+ // ( 0, 0, 0, 0, 16-y, 16-y, 16-y, 16-y)
+ __m128i negY = _mm_sub_epi16(sixteen, allY);
+
+ // (16-y, 16-y, 16-y, 16-y, y, y, y, y)
+ allY = _mm_unpacklo_epi64(allY, negY);
+
+ // (16, 16, 16, 16, 16, 16, 16, 16 )
+ sixteen = _mm_shuffle_epi32(sixteen, 0);
+
+ // ( 0, 0, 0, 0, 0, 0, 0, 0)
+ __m128i zero = _mm_setzero_si128();
+
+ // ( alpha, alpha, alpha, alpha, alpha, alpha, alpha, alpha )
+ __m128i alpha = _mm_set1_epi16(s.fAlphaScale);
+
+ do {
+ uint32_t XX = *xy++; // x0:14 | 4 | x1:14
+ unsigned x0 = XX >> 18;
+ unsigned x1 = XX & 0x3FFF;
+
+ // (0, 0, 0, 0, 0, 0, 0, x)
+ __m128i allX = _mm_cvtsi32_si128((XX >> 14) & 0x0F);
+
+ // (0, 0, 0, 0, x, x, x, x)
+ allX = _mm_shufflelo_epi16(allX, 0);
+
+ // (x, x, x, x, x, x, x, x)
+ allX = _mm_shuffle_epi32(allX, 0);
+
+ // (16-x, 16-x, 16-x, 16-x, 16-x, 16-x, 16-x)
+ __m128i negX = _mm_sub_epi16(sixteen, allX);
+
+ // Load 4 samples (pixels).
+ __m128i a00 = _mm_cvtsi32_si128(row0[x0]);
+ __m128i a01 = _mm_cvtsi32_si128(row0[x1]);
+ __m128i a10 = _mm_cvtsi32_si128(row1[x0]);
+ __m128i a11 = _mm_cvtsi32_si128(row1[x1]);
+
+ // (0, 0, a00, a10)
+ __m128i a00a10 = _mm_unpacklo_epi32(a10, a00);
+
+ // Expand to 16 bits per component.
+ a00a10 = _mm_unpacklo_epi8(a00a10, zero);
+
+ // ((a00 * (16-y)), (a10 * y)).
+ a00a10 = _mm_mullo_epi16(a00a10, allY);
+
+ // (a00 * (16-y) * (16-x), a10 * y * (16-x)).
+ a00a10 = _mm_mullo_epi16(a00a10, negX);
+
+ // (0, 0, a01, a10)
+ __m128i a01a11 = _mm_unpacklo_epi32(a11, a01);
+
+ // Expand to 16 bits per component.
+ a01a11 = _mm_unpacklo_epi8(a01a11, zero);
+
+ // (a01 * (16-y)), (a11 * y)
+ a01a11 = _mm_mullo_epi16(a01a11, allY);
+
+ // (a01 * (16-y) * x), (a11 * y * x)
+ a01a11 = _mm_mullo_epi16(a01a11, allX);
+
+ // (a00*w00 + a01*w01, a10*w10 + a11*w11)
+ __m128i sum = _mm_add_epi16(a00a10, a01a11);
+
+ // (DC, a00*w00 + a01*w01)
+ __m128i shifted = _mm_shuffle_epi32(sum, 0xEE);
+
+ // (DC, a00*w00 + a01*w01 + a10*w10 + a11*w11)
+ sum = _mm_add_epi16(sum, shifted);
+
+ // Divide each 16 bit component by 256.
+ sum = _mm_srli_epi16(sum, 8);
+
+ // Multiply by alpha.
+ sum = _mm_mullo_epi16(sum, alpha);
+
+ // Divide each 16 bit component by 256.
+ sum = _mm_srli_epi16(sum, 8);
+
+ // Pack lower 4 16 bit values of sum into lower 4 bytes.
+ sum = _mm_packus_epi16(sum, zero);
+
+ // Extract low int and store.
+ *colors++ = _mm_cvtsi128_si32(sum);
+ } while (--count > 0);
+}
+
+static inline uint32_t ClampX_ClampY_pack_filter(SkFixed f, unsigned max,
+ SkFixed one) {
+ unsigned i = SkClampMax(f >> 16, max);
+ i = (i << 4) | ((f >> 12) & 0xF);
+ return (i << 14) | SkClampMax((f + one) >> 16, max);
+}
+
+/* SSE version of ClampX_ClampY_filter_scale()
+ * portable version is in core/SkBitmapProcState_matrix.h
+ */
+void ClampX_ClampY_filter_scale_SSE2(const SkBitmapProcState& s, uint32_t xy[],
+ int count, int x, int y) {
+ SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
+ SkMatrix::kScale_Mask)) == 0);
+ SkASSERT(s.fInvKy == 0);
+
+ const unsigned maxX = s.fPixmap.width() - 1;
+ const SkFixed one = s.fFilterOneX;
+ const SkFixed dx = s.fInvSx;
+
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ const SkFixed fy = mapper.fixedY();
+ const unsigned maxY = s.fPixmap.height() - 1;
+ // compute our two Y values up front
+ *xy++ = ClampX_ClampY_pack_filter(fy, maxY, s.fFilterOneY);
+ // now initialize fx
+ SkFixed fx = mapper.fixedX();
+
+ // test if we don't need to apply the tile proc
+ if (dx > 0 && (unsigned)(fx >> 16) <= maxX &&
+ (unsigned)((fx + dx * (count - 1)) >> 16) < maxX) {
+ if (count >= 4) {
+ // SSE version of decal_filter_scale
+ while ((size_t(xy) & 0x0F) != 0) {
+ SkASSERT((fx >> (16 + 14)) == 0);
+ *xy++ = (fx >> 12 << 14) | ((fx >> 16) + 1);
+ fx += dx;
+ count--;
+ }
+
+ __m128i wide_1 = _mm_set1_epi32(1);
+ __m128i wide_dx4 = _mm_set1_epi32(dx * 4);
+ __m128i wide_fx = _mm_set_epi32(fx + dx * 3, fx + dx * 2,
+ fx + dx, fx);
+
+ while (count >= 4) {
+ __m128i wide_out;
+
+ wide_out = _mm_slli_epi32(_mm_srai_epi32(wide_fx, 12), 14);
+ wide_out = _mm_or_si128(wide_out, _mm_add_epi32(
+ _mm_srai_epi32(wide_fx, 16), wide_1));
+
+ _mm_store_si128(reinterpret_cast<__m128i*>(xy), wide_out);
+
+ xy += 4;
+ fx += dx * 4;
+ wide_fx = _mm_add_epi32(wide_fx, wide_dx4);
+ count -= 4;
+ } // while count >= 4
+ } // if count >= 4
+
+ while (count-- > 0) {
+ SkASSERT((fx >> (16 + 14)) == 0);
+ *xy++ = (fx >> 12 << 14) | ((fx >> 16) + 1);
+ fx += dx;
+ }
+ } else {
+ // SSE2 only support 16bit interger max & min, so only process the case
+ // maxX less than the max 16bit interger. Actually maxX is the bitmap's
+ // height, there should be rare bitmap whose height will be greater
+ // than max 16bit interger in the real world.
+ if ((count >= 4) && (maxX <= 0xFFFF)) {
+ while (((size_t)xy & 0x0F) != 0) {
+ *xy++ = ClampX_ClampY_pack_filter(fx, maxX, one);
+ fx += dx;
+ count--;
+ }
+
+ __m128i wide_fx = _mm_set_epi32(fx + dx * 3, fx + dx * 2,
+ fx + dx, fx);
+ __m128i wide_dx4 = _mm_set1_epi32(dx * 4);
+ __m128i wide_one = _mm_set1_epi32(one);
+ __m128i wide_maxX = _mm_set1_epi32(maxX);
+ __m128i wide_mask = _mm_set1_epi32(0xF);
+
+ while (count >= 4) {
+ __m128i wide_i;
+ __m128i wide_lo;
+ __m128i wide_fx1;
+
+ // i = SkClampMax(f>>16,maxX)
+ wide_i = _mm_max_epi16(_mm_srli_epi32(wide_fx, 16),
+ _mm_setzero_si128());
+ wide_i = _mm_min_epi16(wide_i, wide_maxX);
+
+ // i<<4 | TILEX_LOW_BITS(fx)
+ wide_lo = _mm_srli_epi32(wide_fx, 12);
+ wide_lo = _mm_and_si128(wide_lo, wide_mask);
+ wide_i = _mm_slli_epi32(wide_i, 4);
+ wide_i = _mm_or_si128(wide_i, wide_lo);
+
+ // i<<14
+ wide_i = _mm_slli_epi32(wide_i, 14);
+
+ // SkClampMax(((f+one))>>16,max)
+ wide_fx1 = _mm_add_epi32(wide_fx, wide_one);
+ wide_fx1 = _mm_max_epi16(_mm_srli_epi32(wide_fx1, 16),
+ _mm_setzero_si128());
+ wide_fx1 = _mm_min_epi16(wide_fx1, wide_maxX);
+
+ // final combination
+ wide_i = _mm_or_si128(wide_i, wide_fx1);
+ _mm_store_si128(reinterpret_cast<__m128i*>(xy), wide_i);
+
+ wide_fx = _mm_add_epi32(wide_fx, wide_dx4);
+ fx += dx * 4;
+ xy += 4;
+ count -= 4;
+ } // while count >= 4
+ } // if count >= 4
+
+ while (count-- > 0) {
+ *xy++ = ClampX_ClampY_pack_filter(fx, maxX, one);
+ fx += dx;
+ }
+ }
+}
+
+/* SSE version of ClampX_ClampY_nofilter_scale()
+ * portable version is in core/SkBitmapProcState_matrix.h
+ */
+void ClampX_ClampY_nofilter_scale_SSE2(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
+ SkMatrix::kScale_Mask)) == 0);
+
+ // we store y, x, x, x, x, x
+ const unsigned maxX = s.fPixmap.width() - 1;
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ const unsigned maxY = s.fPixmap.height() - 1;
+ *xy++ = SkClampMax(mapper.intY(), maxY);
+ SkFixed fx = mapper.fixedX();
+
+ if (0 == maxX) {
+ // all of the following X values must be 0
+ memset(xy, 0, count * sizeof(uint16_t));
+ return;
+ }
+
+ const SkFixed dx = s.fInvSx;
+
+ // test if we don't need to apply the tile proc
+ if ((unsigned)(fx >> 16) <= maxX &&
+ (unsigned)((fx + dx * (count - 1)) >> 16) <= maxX) {
+ // SSE version of decal_nofilter_scale
+ if (count >= 8) {
+ while (((size_t)xy & 0x0F) != 0) {
+ *xy++ = pack_two_shorts(fx >> 16, (fx + dx) >> 16);
+ fx += 2 * dx;
+ count -= 2;
+ }
+
+ __m128i wide_dx4 = _mm_set1_epi32(dx * 4);
+ __m128i wide_dx8 = _mm_add_epi32(wide_dx4, wide_dx4);
+
+ __m128i wide_low = _mm_set_epi32(fx + dx * 3, fx + dx * 2,
+ fx + dx, fx);
+ __m128i wide_high = _mm_add_epi32(wide_low, wide_dx4);
+
+ while (count >= 8) {
+ __m128i wide_out_low = _mm_srli_epi32(wide_low, 16);
+ __m128i wide_out_high = _mm_srli_epi32(wide_high, 16);
+
+ __m128i wide_result = _mm_packs_epi32(wide_out_low,
+ wide_out_high);
+ _mm_store_si128(reinterpret_cast<__m128i*>(xy), wide_result);
+
+ wide_low = _mm_add_epi32(wide_low, wide_dx8);
+ wide_high = _mm_add_epi32(wide_high, wide_dx8);
+
+ xy += 4;
+ fx += dx * 8;
+ count -= 8;
+ }
+ } // if count >= 8
+
+ uint16_t* xx = reinterpret_cast<uint16_t*>(xy);
+ while (count-- > 0) {
+ *xx++ = SkToU16(fx >> 16);
+ fx += dx;
+ }
+ } else {
+ // SSE2 only support 16bit interger max & min, so only process the case
+ // maxX less than the max 16bit interger. Actually maxX is the bitmap's
+ // height, there should be rare bitmap whose height will be greater
+ // than max 16bit interger in the real world.
+ if ((count >= 8) && (maxX <= 0xFFFF)) {
+ while (((size_t)xy & 0x0F) != 0) {
+ *xy++ = pack_two_shorts(SkClampMax((fx + dx) >> 16, maxX),
+ SkClampMax(fx >> 16, maxX));
+ fx += 2 * dx;
+ count -= 2;
+ }
+
+ __m128i wide_dx4 = _mm_set1_epi32(dx * 4);
+ __m128i wide_dx8 = _mm_add_epi32(wide_dx4, wide_dx4);
+
+ __m128i wide_low = _mm_set_epi32(fx + dx * 3, fx + dx * 2,
+ fx + dx, fx);
+ __m128i wide_high = _mm_add_epi32(wide_low, wide_dx4);
+ __m128i wide_maxX = _mm_set1_epi32(maxX);
+
+ while (count >= 8) {
+ __m128i wide_out_low = _mm_srli_epi32(wide_low, 16);
+ __m128i wide_out_high = _mm_srli_epi32(wide_high, 16);
+
+ wide_out_low = _mm_max_epi16(wide_out_low,
+ _mm_setzero_si128());
+ wide_out_low = _mm_min_epi16(wide_out_low, wide_maxX);
+ wide_out_high = _mm_max_epi16(wide_out_high,
+ _mm_setzero_si128());
+ wide_out_high = _mm_min_epi16(wide_out_high, wide_maxX);
+
+ __m128i wide_result = _mm_packs_epi32(wide_out_low,
+ wide_out_high);
+ _mm_store_si128(reinterpret_cast<__m128i*>(xy), wide_result);
+
+ wide_low = _mm_add_epi32(wide_low, wide_dx8);
+ wide_high = _mm_add_epi32(wide_high, wide_dx8);
+
+ xy += 4;
+ fx += dx * 8;
+ count -= 8;
+ }
+ } // if count >= 8
+
+ uint16_t* xx = reinterpret_cast<uint16_t*>(xy);
+ while (count-- > 0) {
+ *xx++ = SkClampMax(fx >> 16, maxX);
+ fx += dx;
+ }
+ }
+}
+
+/* SSE version of ClampX_ClampY_filter_affine()
+ * portable version is in core/SkBitmapProcState_matrix.h
+ */
+void ClampX_ClampY_filter_affine_SSE2(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+
+ SkFixed oneX = s.fFilterOneX;
+ SkFixed oneY = s.fFilterOneY;
+ SkFixed fx = mapper.fixedX();
+ SkFixed fy = mapper.fixedY();
+ SkFixed dx = s.fInvSx;
+ SkFixed dy = s.fInvKy;
+ unsigned maxX = s.fPixmap.width() - 1;
+ unsigned maxY = s.fPixmap.height() - 1;
+
+ if (count >= 2 && (maxX <= 0xFFFF)) {
+ SkFixed dx2 = dx + dx;
+ SkFixed dy2 = dy + dy;
+
+ __m128i wide_f = _mm_set_epi32(fx + dx, fy + dy, fx, fy);
+ __m128i wide_d2 = _mm_set_epi32(dx2, dy2, dx2, dy2);
+ __m128i wide_one = _mm_set_epi32(oneX, oneY, oneX, oneY);
+ __m128i wide_max = _mm_set_epi32(maxX, maxY, maxX, maxY);
+ __m128i wide_mask = _mm_set1_epi32(0xF);
+
+ while (count >= 2) {
+ // i = SkClampMax(f>>16,maxX)
+ __m128i wide_i = _mm_max_epi16(_mm_srli_epi32(wide_f, 16),
+ _mm_setzero_si128());
+ wide_i = _mm_min_epi16(wide_i, wide_max);
+
+ // i<<4 | TILEX_LOW_BITS(f)
+ __m128i wide_lo = _mm_srli_epi32(wide_f, 12);
+ wide_lo = _mm_and_si128(wide_lo, wide_mask);
+ wide_i = _mm_slli_epi32(wide_i, 4);
+ wide_i = _mm_or_si128(wide_i, wide_lo);
+
+ // i<<14
+ wide_i = _mm_slli_epi32(wide_i, 14);
+
+ // SkClampMax(((f+one))>>16,max)
+ __m128i wide_f1 = _mm_add_epi32(wide_f, wide_one);
+ wide_f1 = _mm_max_epi16(_mm_srli_epi32(wide_f1, 16),
+ _mm_setzero_si128());
+ wide_f1 = _mm_min_epi16(wide_f1, wide_max);
+
+ // final combination
+ wide_i = _mm_or_si128(wide_i, wide_f1);
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(xy), wide_i);
+
+ wide_f = _mm_add_epi32(wide_f, wide_d2);
+
+ fx += dx2;
+ fy += dy2;
+ xy += 4;
+ count -= 2;
+ } // while count >= 2
+ } // if count >= 2
+
+ while (count-- > 0) {
+ *xy++ = ClampX_ClampY_pack_filter(fy, maxY, oneY);
+ fy += dy;
+ *xy++ = ClampX_ClampY_pack_filter(fx, maxX, oneX);
+ fx += dx;
+ }
+}
+
+/* SSE version of ClampX_ClampY_nofilter_affine()
+ * portable version is in core/SkBitmapProcState_matrix.h
+ */
+void ClampX_ClampY_nofilter_affine_SSE2(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT(s.fInvType & SkMatrix::kAffine_Mask);
+ SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
+ SkMatrix::kScale_Mask |
+ SkMatrix::kAffine_Mask)) == 0);
+
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+
+ SkFixed fx = mapper.fixedX();
+ SkFixed fy = mapper.fixedY();
+ SkFixed dx = s.fInvSx;
+ SkFixed dy = s.fInvKy;
+ int maxX = s.fPixmap.width() - 1;
+ int maxY = s.fPixmap.height() - 1;
+
+ if (count >= 4 && (maxX <= 0xFFFF)) {
+ while (((size_t)xy & 0x0F) != 0) {
+ *xy++ = (SkClampMax(fy >> 16, maxY) << 16) |
+ SkClampMax(fx >> 16, maxX);
+ fx += dx;
+ fy += dy;
+ count--;
+ }
+
+ SkFixed dx4 = dx * 4;
+ SkFixed dy4 = dy * 4;
+
+ __m128i wide_fx = _mm_set_epi32(fx + dx * 3, fx + dx * 2,
+ fx + dx, fx);
+ __m128i wide_fy = _mm_set_epi32(fy + dy * 3, fy + dy * 2,
+ fy + dy, fy);
+ __m128i wide_dx4 = _mm_set1_epi32(dx4);
+ __m128i wide_dy4 = _mm_set1_epi32(dy4);
+
+ __m128i wide_maxX = _mm_set1_epi32(maxX);
+ __m128i wide_maxY = _mm_set1_epi32(maxY);
+
+ while (count >= 4) {
+ // SkClampMax(fx>>16,maxX)
+ __m128i wide_lo = _mm_max_epi16(_mm_srli_epi32(wide_fx, 16),
+ _mm_setzero_si128());
+ wide_lo = _mm_min_epi16(wide_lo, wide_maxX);
+
+ // SkClampMax(fy>>16,maxY)
+ __m128i wide_hi = _mm_max_epi16(_mm_srli_epi32(wide_fy, 16),
+ _mm_setzero_si128());
+ wide_hi = _mm_min_epi16(wide_hi, wide_maxY);
+
+ // final combination
+ __m128i wide_i = _mm_or_si128(_mm_slli_epi32(wide_hi, 16),
+ wide_lo);
+ _mm_store_si128(reinterpret_cast<__m128i*>(xy), wide_i);
+
+ wide_fx = _mm_add_epi32(wide_fx, wide_dx4);
+ wide_fy = _mm_add_epi32(wide_fy, wide_dy4);
+
+ fx += dx4;
+ fy += dy4;
+ xy += 4;
+ count -= 4;
+ } // while count >= 4
+ } // if count >= 4
+
+ while (count-- > 0) {
+ *xy++ = (SkClampMax(fy >> 16, maxY) << 16) |
+ SkClampMax(fx >> 16, maxX);
+ fx += dx;
+ fy += dy;
+ }
+}
diff --git a/gfx/skia/skia/src/opts/SkBitmapProcState_opts_SSE2.h b/gfx/skia/skia/src/opts/SkBitmapProcState_opts_SSE2.h
new file mode 100644
index 000000000..d14f282de
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBitmapProcState_opts_SSE2.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapProcState_opts_SSE2_DEFINED
+#define SkBitmapProcState_opts_SSE2_DEFINED
+
+#include "SkBitmapProcState.h"
+
+void S32_opaque_D32_filter_DX_SSE2(const SkBitmapProcState& s,
+ const uint32_t* xy,
+ int count, uint32_t* colors);
+void S32_alpha_D32_filter_DX_SSE2(const SkBitmapProcState& s,
+ const uint32_t* xy,
+ int count, uint32_t* colors);
+void ClampX_ClampY_filter_scale_SSE2(const SkBitmapProcState& s, uint32_t xy[],
+ int count, int x, int y);
+void ClampX_ClampY_nofilter_scale_SSE2(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y);
+void ClampX_ClampY_filter_affine_SSE2(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y);
+void ClampX_ClampY_nofilter_affine_SSE2(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y);
+
+#endif
diff --git a/gfx/skia/skia/src/opts/SkBitmapProcState_opts_SSSE3.cpp b/gfx/skia/skia/src/opts/SkBitmapProcState_opts_SSSE3.cpp
new file mode 100644
index 000000000..17d7da931
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBitmapProcState_opts_SSSE3.cpp
@@ -0,0 +1,761 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapProcState_opts_SSSE3.h"
+#include "SkColorPriv.h"
+#include "SkPaint.h"
+#include "SkUtils.h"
+
+/* With the exception of the compilers that don't support it, we always build the
+ * SSSE3 functions and enable the caller to determine SSSE3 support. However for
+ * compilers that do not support SSSE3 we provide a stub implementation.
+ */
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+
+#include <tmmintrin.h> // SSSE3
+
+// adding anonymous namespace seemed to force gcc to inline directly the
+// instantiation, instead of creating the functions
+// S32_generic_D32_filter_DX_SSSE3<true> and
+// S32_generic_D32_filter_DX_SSSE3<false> which were then called by the
+// external functions.
+namespace {
+// In this file, variations for alpha and non alpha versions are implemented
+// with a template, as it makes the code more compact and a bit easier to
+// maintain, while making the compiler generate the same exact code as with
+// two functions that only differ by a few lines.
+
+
+// Prepare all necessary constants for a round of processing for two pixel
+// pairs.
+// @param xy is the location where the xy parameters for four pixels should be
+// read from. It is identical in concept with argument two of
+// S32_{opaque}_D32_filter_DX methods.
+// @param mask_3FFF vector of 32 bit constants containing 3FFF,
+// suitable to mask the bottom 14 bits of a XY value.
+// @param mask_000F vector of 32 bit constants containing 000F,
+// suitable to mask the bottom 4 bits of a XY value.
+// @param sixteen_8bit vector of 8 bit components containing the value 16.
+// @param mask_dist_select vector of 8 bit components containing the shuffling
+// parameters to reorder x[0-3] parameters.
+// @param all_x_result vector of 8 bit components that will contain the
+// (4x(x3), 4x(x2), 4x(x1), 4x(x0)) upon return.
+// @param sixteen_minus_x vector of 8 bit components, containing
+// (4x(16 - x3), 4x(16 - x2), 4x(16 - x1), 4x(16 - x0))
+inline void PrepareConstantsTwoPixelPairs(const uint32_t* xy,
+ const __m128i& mask_3FFF,
+ const __m128i& mask_000F,
+ const __m128i& sixteen_8bit,
+ const __m128i& mask_dist_select,
+ __m128i* all_x_result,
+ __m128i* sixteen_minus_x,
+ int* x0,
+ int* x1) {
+ const __m128i xx = _mm_loadu_si128(reinterpret_cast<const __m128i *>(xy));
+
+ // 4 delta X
+ // (x03, x02, x01, x00)
+ const __m128i x0_wide = _mm_srli_epi32(xx, 18);
+ // (x13, x12, x11, x10)
+ const __m128i x1_wide = _mm_and_si128(xx, mask_3FFF);
+
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(x0), x0_wide);
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(x1), x1_wide);
+
+ __m128i all_x = _mm_and_si128(_mm_srli_epi32(xx, 14), mask_000F);
+
+ // (4x(x3), 4x(x2), 4x(x1), 4x(x0))
+ all_x = _mm_shuffle_epi8(all_x, mask_dist_select);
+
+ *all_x_result = all_x;
+ // (4x(16-x3), 4x(16-x2), 4x(16-x1), 4x(16-x0))
+ *sixteen_minus_x = _mm_sub_epi8(sixteen_8bit, all_x);
+}
+
+// Prepare all necessary constants for a round of processing for two pixel
+// pairs.
+// @param xy is the location where the xy parameters for four pixels should be
+// read from. It is identical in concept with argument two of
+// S32_{opaque}_D32_filter_DXDY methods.
+// @param mask_3FFF vector of 32 bit constants containing 3FFF,
+// suitable to mask the bottom 14 bits of a XY value.
+// @param mask_000F vector of 32 bit constants containing 000F,
+// suitable to mask the bottom 4 bits of a XY value.
+// @param sixteen_8bit vector of 8 bit components containing the value 16.
+// @param mask_dist_select vector of 8 bit components containing the shuffling
+// parameters to reorder x[0-3] parameters.
+// @param all_xy_result vector of 8 bit components that will contain the
+// (4x(y1), 4x(y0), 4x(x1), 4x(x0)) upon return.
+// @param sixteen_minus_x vector of 8 bit components, containing
+// (4x(16-y1), 4x(16-y0), 4x(16-x1), 4x(16-x0)).
+inline void PrepareConstantsTwoPixelPairsDXDY(const uint32_t* xy,
+ const __m128i& mask_3FFF,
+ const __m128i& mask_000F,
+ const __m128i& sixteen_8bit,
+ const __m128i& mask_dist_select,
+ __m128i* all_xy_result,
+ __m128i* sixteen_minus_xy,
+ int* xy0, int* xy1) {
+ const __m128i xy_wide =
+ _mm_loadu_si128(reinterpret_cast<const __m128i *>(xy));
+
+ // (x10, y10, x00, y00)
+ __m128i xy0_wide = _mm_srli_epi32(xy_wide, 18);
+ // (y10, y00, x10, x00)
+ xy0_wide = _mm_shuffle_epi32(xy0_wide, _MM_SHUFFLE(2, 0, 3, 1));
+ // (x11, y11, x01, y01)
+ __m128i xy1_wide = _mm_and_si128(xy_wide, mask_3FFF);
+ // (y11, y01, x11, x01)
+ xy1_wide = _mm_shuffle_epi32(xy1_wide, _MM_SHUFFLE(2, 0, 3, 1));
+
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(xy0), xy0_wide);
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(xy1), xy1_wide);
+
+ // (x1, y1, x0, y0)
+ __m128i all_xy = _mm_and_si128(_mm_srli_epi32(xy_wide, 14), mask_000F);
+ // (y1, y0, x1, x0)
+ all_xy = _mm_shuffle_epi32(all_xy, _MM_SHUFFLE(2, 0, 3, 1));
+ // (4x(y1), 4x(y0), 4x(x1), 4x(x0))
+ all_xy = _mm_shuffle_epi8(all_xy, mask_dist_select);
+
+ *all_xy_result = all_xy;
+ // (4x(16-y1), 4x(16-y0), 4x(16-x1), 4x(16-x0))
+ *sixteen_minus_xy = _mm_sub_epi8(sixteen_8bit, all_xy);
+}
+
+// Helper function used when processing one pixel pair.
+// @param pixel0..3 are the four input pixels
+// @param scale_x vector of 8 bit components to multiply the pixel[0:3]. This
+// will contain (4x(x1, 16-x1), 4x(x0, 16-x0))
+// or (4x(x3, 16-x3), 4x(x2, 16-x2))
+// @return a vector of 16 bit components containing:
+// (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0)
+inline __m128i ProcessPixelPairHelper(uint32_t pixel0,
+ uint32_t pixel1,
+ uint32_t pixel2,
+ uint32_t pixel3,
+ const __m128i& scale_x) {
+ __m128i a0, a1, a2, a3;
+ // Load 2 pairs of pixels
+ a0 = _mm_cvtsi32_si128(pixel0);
+ a1 = _mm_cvtsi32_si128(pixel1);
+
+ // Interleave pixels.
+ // (0, 0, 0, 0, 0, 0, 0, 0, Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0)
+ a0 = _mm_unpacklo_epi8(a0, a1);
+
+ a2 = _mm_cvtsi32_si128(pixel2);
+ a3 = _mm_cvtsi32_si128(pixel3);
+ // (0, 0, 0, 0, 0, 0, 0, 0, Aa3, Aa2, Ba3, Ba2, Ga3, Ga2, Ra3, Ra2)
+ a2 = _mm_unpacklo_epi8(a2, a3);
+
+ // two pairs of pixel pairs, interleaved.
+ // (Aa3, Aa2, Ba3, Ba2, Ga3, Ga2, Ra3, Ra2,
+ // Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0)
+ a0 = _mm_unpacklo_epi64(a0, a2);
+
+ // multiply and sum to 16 bit components.
+ // (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0)
+ // At that point, we use up a bit less than 12 bits for each 16 bit
+ // component:
+ // All components are less than 255. So,
+ // C0 * (16 - x) + C1 * x <= 255 * (16 - x) + 255 * x = 255 * 16.
+ return _mm_maddubs_epi16(a0, scale_x);
+}
+
+// Scale back the results after multiplications to the [0:255] range, and scale
+// by alpha when has_alpha is true.
+// Depending on whether one set or two sets of multiplications had been applied,
+// the results have to be shifted by four places (dividing by 16), or shifted
+// by eight places (dividing by 256), since each multiplication is by a quantity
+// in the range [0:16].
+template<bool has_alpha, int scale>
+inline __m128i ScaleFourPixels(__m128i* pixels,
+ const __m128i& alpha) {
+ // Divide each 16 bit component by 16 (or 256 depending on scale).
+ *pixels = _mm_srli_epi16(*pixels, scale);
+
+ if (has_alpha) {
+ // Multiply by alpha.
+ *pixels = _mm_mullo_epi16(*pixels, alpha);
+
+ // Divide each 16 bit component by 256.
+ *pixels = _mm_srli_epi16(*pixels, 8);
+ }
+ return *pixels;
+}
+
+// Wrapper to calculate two output pixels from four input pixels. The
+// arguments are the same as ProcessPixelPairHelper. Technically, there are
+// eight input pixels, but since sub_y == 0, the factors applied to half of the
+// pixels is zero (sub_y), and are therefore omitted here to save on some
+// processing.
+// @param alpha when has_alpha is true, scale all resulting components by this
+// value.
+// @return a vector of 16 bit components containing:
+// ((Aa2 * (16 - x1) + Aa3 * x1) * alpha, ...,
+// (Ra0 * (16 - x0) + Ra1 * x0) * alpha) (when has_alpha is true)
+// otherwise
+// (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0)
+// In both cases, the results are renormalized (divided by 16) to match the
+// expected formats when storing back the results into memory.
+template<bool has_alpha>
+inline __m128i ProcessPixelPairZeroSubY(uint32_t pixel0,
+ uint32_t pixel1,
+ uint32_t pixel2,
+ uint32_t pixel3,
+ const __m128i& scale_x,
+ const __m128i& alpha) {
+ __m128i sum = ProcessPixelPairHelper(pixel0, pixel1, pixel2, pixel3,
+ scale_x);
+ return ScaleFourPixels<has_alpha, 4>(&sum, alpha);
+}
+
+// Same as ProcessPixelPairZeroSubY, expect processing one output pixel at a
+// time instead of two. As in the above function, only two pixels are needed
+// to generate a single pixel since sub_y == 0.
+// @return same as ProcessPixelPairZeroSubY, except that only the bottom 4
+// 16 bit components are set.
+template<bool has_alpha>
+inline __m128i ProcessOnePixelZeroSubY(uint32_t pixel0,
+ uint32_t pixel1,
+ __m128i scale_x,
+ __m128i alpha) {
+ __m128i a0 = _mm_cvtsi32_si128(pixel0);
+ __m128i a1 = _mm_cvtsi32_si128(pixel1);
+
+ // Interleave
+ a0 = _mm_unpacklo_epi8(a0, a1);
+
+ // (a0 * (16-x) + a1 * x)
+ __m128i sum = _mm_maddubs_epi16(a0, scale_x);
+
+ return ScaleFourPixels<has_alpha, 4>(&sum, alpha);
+}
+
+// Methods when sub_y != 0
+
+
+// Same as ProcessPixelPairHelper, except that the values are scaled by y.
+// @param y vector of 16 bit components containing 'y' values. There are two
+// cases in practice, where y will contain the sub_y constant, or will
+// contain the 16 - sub_y constant.
+// @return vector of 16 bit components containing:
+// (y * (Aa2 * (16 - x1) + Aa3 * x1), ... , y * (Ra0 * (16 - x0) + Ra1 * x0))
+inline __m128i ProcessPixelPair(uint32_t pixel0,
+ uint32_t pixel1,
+ uint32_t pixel2,
+ uint32_t pixel3,
+ const __m128i& scale_x,
+ const __m128i& y) {
+ __m128i sum = ProcessPixelPairHelper(pixel0, pixel1, pixel2, pixel3,
+ scale_x);
+
+ // first row times 16-y or y depending on whether 'y' represents one or
+ // the other.
+ // Values will be up to 255 * 16 * 16 = 65280.
+ // (y * (Aa2 * (16 - x1) + Aa3 * x1), ... ,
+ // y * (Ra0 * (16 - x0) + Ra1 * x0))
+ sum = _mm_mullo_epi16(sum, y);
+
+ return sum;
+}
+
+// Process two pixel pairs out of eight input pixels.
+// In other methods, the distinct pixels are passed one by one, but in this
+// case, the rows, and index offsets to the pixels into the row are passed
+// to generate the 8 pixels.
+// @param row0..1 top and bottom row where to find input pixels.
+// @param x0..1 offsets into the row for all eight input pixels.
+// @param all_y vector of 16 bit components containing the constant sub_y
+// @param neg_y vector of 16 bit components containing the constant 16 - sub_y
+// @param alpha vector of 16 bit components containing the alpha value to scale
+// the results by, when has_alpha is true.
+// @return
+// (alpha * ((16-y) * (Aa2 * (16-x1) + Aa3 * x1) +
+// y * (Aa2' * (16-x1) + Aa3' * x1)),
+// ...
+// alpha * ((16-y) * (Ra0 * (16-x0) + Ra1 * x0) +
+// y * (Ra0' * (16-x0) + Ra1' * x0))
+// With the factor alpha removed when has_alpha is false.
+// The values are scaled back to 16 bit components, but with only the bottom
+// 8 bits being set.
+template<bool has_alpha>
+inline __m128i ProcessTwoPixelPairs(const uint32_t* row0,
+ const uint32_t* row1,
+ const int* x0,
+ const int* x1,
+ const __m128i& scale_x,
+ const __m128i& all_y,
+ const __m128i& neg_y,
+ const __m128i& alpha) {
+ __m128i sum0 = ProcessPixelPair(
+ row0[x0[0]], row0[x1[0]], row0[x0[1]], row0[x1[1]],
+ scale_x, neg_y);
+ __m128i sum1 = ProcessPixelPair(
+ row1[x0[0]], row1[x1[0]], row1[x0[1]], row1[x1[1]],
+ scale_x, all_y);
+
+ // 2 samples fully summed.
+ // ((16-y) * (Aa2 * (16-x1) + Aa3 * x1) +
+ // y * (Aa2' * (16-x1) + Aa3' * x1),
+ // ...
+ // (16-y) * (Ra0 * (16 - x0) + Ra1 * x0)) +
+ // y * (Ra0' * (16-x0) + Ra1' * x0))
+ // Each component, again can be at most 256 * 255 = 65280, so no overflow.
+ sum0 = _mm_add_epi16(sum0, sum1);
+
+ return ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
+}
+
+// Similar to ProcessTwoPixelPairs except the pixel indexes.
+template<bool has_alpha>
+inline __m128i ProcessTwoPixelPairsDXDY(const uint32_t* row00,
+ const uint32_t* row01,
+ const uint32_t* row10,
+ const uint32_t* row11,
+ const int* xy0,
+ const int* xy1,
+ const __m128i& scale_x,
+ const __m128i& all_y,
+ const __m128i& neg_y,
+ const __m128i& alpha) {
+ // first row
+ __m128i sum0 = ProcessPixelPair(
+ row00[xy0[0]], row00[xy1[0]], row10[xy0[1]], row10[xy1[1]],
+ scale_x, neg_y);
+ // second row
+ __m128i sum1 = ProcessPixelPair(
+ row01[xy0[0]], row01[xy1[0]], row11[xy0[1]], row11[xy1[1]],
+ scale_x, all_y);
+
+ // 2 samples fully summed.
+ // ((16-y1) * (Aa2 * (16-x1) + Aa3 * x1) +
+ // y0 * (Aa2' * (16-x1) + Aa3' * x1),
+ // ...
+ // (16-y0) * (Ra0 * (16 - x0) + Ra1 * x0)) +
+ // y0 * (Ra0' * (16-x0) + Ra1' * x0))
+ // Each component, again can be at most 256 * 255 = 65280, so no overflow.
+ sum0 = _mm_add_epi16(sum0, sum1);
+
+ return ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
+}
+
+
+// Same as ProcessPixelPair, except that performing the math one output pixel
+// at a time. This means that only the bottom four 16 bit components are set.
+inline __m128i ProcessOnePixel(uint32_t pixel0, uint32_t pixel1,
+ const __m128i& scale_x, const __m128i& y) {
+ __m128i a0 = _mm_cvtsi32_si128(pixel0);
+ __m128i a1 = _mm_cvtsi32_si128(pixel1);
+
+ // Interleave
+ // (0, 0, 0, 0, 0, 0, 0, 0, Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0)
+ a0 = _mm_unpacklo_epi8(a0, a1);
+
+ // (a0 * (16-x) + a1 * x)
+ a0 = _mm_maddubs_epi16(a0, scale_x);
+
+ // scale row by y
+ return _mm_mullo_epi16(a0, y);
+}
+
+// Notes about the various tricks that are used in this implementation:
+// - specialization for sub_y == 0.
+// Statistically, 1/16th of the samples will have sub_y == 0. When this
+// happens, the math goes from:
+// (16 - x)*(16 - y)*a00 + x*(16 - y)*a01 + (16 - x)*y*a10 + x*y*a11
+// to:
+// (16 - x)*a00 + 16*x*a01
+// much simpler. The simplification makes for an easy boost in performance.
+// - calculating 4 output pixels at a time.
+// This allows loading the coefficients x0 and x1 and shuffling them to the
+// optimum location only once per loop, instead of twice per loop.
+// This also allows us to store the four pixels with a single store.
+// - Use of 2 special SSSE3 instructions (comparatively to the SSE2 instruction
+// version):
+// _mm_shuffle_epi8 : this allows us to spread the coefficients x[0-3] loaded
+// in 32 bit values to 8 bit values repeated four times.
+// _mm_maddubs_epi16 : this allows us to perform multiplications and additions
+// in one swoop of 8bit values storing the results in 16 bit values. This
+// instruction is actually crucial for the speed of the implementation since
+// as one can see in the SSE2 implementation, all inputs have to be used as
+// 16 bits because the results are 16 bits. This basically allows us to process
+// twice as many pixel components per iteration.
+//
+// As a result, this method behaves faster than the traditional SSE2. The actual
+// boost varies greatly on the underlying architecture.
+template<bool has_alpha>
+void S32_generic_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
+ const uint32_t* xy,
+ int count, uint32_t* colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(s.fFilterQuality != kNone_SkFilterQuality);
+ SkASSERT(kN32_SkColorType == s.fPixmap.colorType());
+ if (has_alpha) {
+ SkASSERT(s.fAlphaScale < 256);
+ } else {
+ SkASSERT(s.fAlphaScale == 256);
+ }
+
+ const uint8_t* src_addr =
+ static_cast<const uint8_t*>(s.fPixmap.addr());
+ const size_t rb = s.fPixmap.rowBytes();
+ const uint32_t XY = *xy++;
+ const unsigned y0 = XY >> 14;
+ const uint32_t* row0 =
+ reinterpret_cast<const uint32_t*>(src_addr + (y0 >> 4) * rb);
+ const uint32_t* row1 =
+ reinterpret_cast<const uint32_t*>(src_addr + (XY & 0x3FFF) * rb);
+ const unsigned sub_y = y0 & 0xF;
+
+ // vector constants
+ const __m128i mask_dist_select = _mm_set_epi8(12, 12, 12, 12,
+ 8, 8, 8, 8,
+ 4, 4, 4, 4,
+ 0, 0, 0, 0);
+ const __m128i mask_3FFF = _mm_set1_epi32(0x3FFF);
+ const __m128i mask_000F = _mm_set1_epi32(0x000F);
+ const __m128i sixteen_8bit = _mm_set1_epi8(16);
+ // (0, 0, 0, 0, 0, 0, 0, 0)
+ const __m128i zero = _mm_setzero_si128();
+
+ __m128i alpha = _mm_setzero_si128();
+ if (has_alpha) {
+ // 8x(alpha)
+ alpha = _mm_set1_epi16(s.fAlphaScale);
+ }
+
+ if (sub_y == 0) {
+ // Unroll 4x, interleave bytes, use pmaddubsw (all_x is small)
+ while (count > 3) {
+ count -= 4;
+
+ int x0[4];
+ int x1[4];
+ __m128i all_x, sixteen_minus_x;
+ PrepareConstantsTwoPixelPairs(xy, mask_3FFF, mask_000F,
+ sixteen_8bit, mask_dist_select,
+ &all_x, &sixteen_minus_x, x0, x1);
+ xy += 4;
+
+ // First pair of pixel pairs.
+ // (4x(x1, 16-x1), 4x(x0, 16-x0))
+ __m128i scale_x;
+ scale_x = _mm_unpacklo_epi8(sixteen_minus_x, all_x);
+
+ __m128i sum0 = ProcessPixelPairZeroSubY<has_alpha>(
+ row0[x0[0]], row0[x1[0]], row0[x0[1]], row0[x1[1]],
+ scale_x, alpha);
+
+ // second pair of pixel pairs
+ // (4x (x3, 16-x3), 4x (16-x2, x2))
+ scale_x = _mm_unpackhi_epi8(sixteen_minus_x, all_x);
+
+ __m128i sum1 = ProcessPixelPairZeroSubY<has_alpha>(
+ row0[x0[2]], row0[x1[2]], row0[x0[3]], row0[x1[3]],
+ scale_x, alpha);
+
+ // Pack lower 4 16 bit values of sum into lower 4 bytes.
+ sum0 = _mm_packus_epi16(sum0, sum1);
+
+ // Extract low int and store.
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(colors), sum0);
+
+ colors += 4;
+ }
+
+ // handle remainder
+ while (count-- > 0) {
+ uint32_t xx = *xy++; // x0:14 | 4 | x1:14
+ unsigned x0 = xx >> 18;
+ unsigned x1 = xx & 0x3FFF;
+
+ // 16x(x)
+ const __m128i all_x = _mm_set1_epi8((xx >> 14) & 0x0F);
+
+ // (16x(16-x))
+ __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x);
+
+ scale_x = _mm_unpacklo_epi8(scale_x, all_x);
+
+ __m128i sum = ProcessOnePixelZeroSubY<has_alpha>(
+ row0[x0], row0[x1],
+ scale_x, alpha);
+
+ // Pack lower 4 16 bit values of sum into lower 4 bytes.
+ sum = _mm_packus_epi16(sum, zero);
+
+ // Extract low int and store.
+ *colors++ = _mm_cvtsi128_si32(sum);
+ }
+ } else { // more general case, y != 0
+ // 8x(16)
+ const __m128i sixteen_16bit = _mm_set1_epi16(16);
+
+ // 8x (y)
+ const __m128i all_y = _mm_set1_epi16(sub_y);
+
+ // 8x (16-y)
+ const __m128i neg_y = _mm_sub_epi16(sixteen_16bit, all_y);
+
+ // Unroll 4x, interleave bytes, use pmaddubsw (all_x is small)
+ while (count > 3) {
+ count -= 4;
+
+ int x0[4];
+ int x1[4];
+ __m128i all_x, sixteen_minus_x;
+ PrepareConstantsTwoPixelPairs(xy, mask_3FFF, mask_000F,
+ sixteen_8bit, mask_dist_select,
+ &all_x, &sixteen_minus_x, x0, x1);
+ xy += 4;
+
+ // First pair of pixel pairs
+ // (4x(x1, 16-x1), 4x(x0, 16-x0))
+ __m128i scale_x;
+ scale_x = _mm_unpacklo_epi8(sixteen_minus_x, all_x);
+
+ __m128i sum0 = ProcessTwoPixelPairs<has_alpha>(
+ row0, row1, x0, x1,
+ scale_x, all_y, neg_y, alpha);
+
+ // second pair of pixel pairs
+ // (4x (x3, 16-x3), 4x (16-x2, x2))
+ scale_x = _mm_unpackhi_epi8(sixteen_minus_x, all_x);
+
+ __m128i sum1 = ProcessTwoPixelPairs<has_alpha>(
+ row0, row1, x0 + 2, x1 + 2,
+ scale_x, all_y, neg_y, alpha);
+
+ // Do the final packing of the two results
+
+ // Pack lower 4 16 bit values of sum into lower 4 bytes.
+ sum0 = _mm_packus_epi16(sum0, sum1);
+
+ // Extract low int and store.
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(colors), sum0);
+
+ colors += 4;
+ }
+
+ // Left over.
+ while (count-- > 0) {
+ const uint32_t xx = *xy++; // x0:14 | 4 | x1:14
+ const unsigned x0 = xx >> 18;
+ const unsigned x1 = xx & 0x3FFF;
+
+ // 16x(x)
+ const __m128i all_x = _mm_set1_epi8((xx >> 14) & 0x0F);
+
+ // 16x (16-x)
+ __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x);
+
+ // (8x (x, 16-x))
+ scale_x = _mm_unpacklo_epi8(scale_x, all_x);
+
+ // first row.
+ __m128i sum0 = ProcessOnePixel(row0[x0], row0[x1], scale_x, neg_y);
+ // second row.
+ __m128i sum1 = ProcessOnePixel(row1[x0], row1[x1], scale_x, all_y);
+
+ // Add both rows for full sample
+ sum0 = _mm_add_epi16(sum0, sum1);
+
+ sum0 = ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
+
+ // Pack lower 4 16 bit values of sum into lower 4 bytes.
+ sum0 = _mm_packus_epi16(sum0, zero);
+
+ // Extract low int and store.
+ *colors++ = _mm_cvtsi128_si32(sum0);
+ }
+ }
+}
+
+/*
+ * Similar to S32_generic_D32_filter_DX_SSSE3, we do not need to handle the
+ * special case suby == 0 as suby is changing in every loop.
+ */
+template<bool has_alpha>
+void S32_generic_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
+ const uint32_t* xy,
+ int count, uint32_t* colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(s.fFilterQuality != kNone_SkFilterQuality);
+ SkASSERT(kN32_SkColorType == s.fPixmap.colorType());
+ if (has_alpha) {
+ SkASSERT(s.fAlphaScale < 256);
+ } else {
+ SkASSERT(s.fAlphaScale == 256);
+ }
+
+ const uint8_t* src_addr =
+ static_cast<const uint8_t*>(s.fPixmap.addr());
+ const size_t rb = s.fPixmap.rowBytes();
+
+ // vector constants
+ const __m128i mask_dist_select = _mm_set_epi8(12, 12, 12, 12,
+ 8, 8, 8, 8,
+ 4, 4, 4, 4,
+ 0, 0, 0, 0);
+ const __m128i mask_3FFF = _mm_set1_epi32(0x3FFF);
+ const __m128i mask_000F = _mm_set1_epi32(0x000F);
+ const __m128i sixteen_8bit = _mm_set1_epi8(16);
+
+ __m128i alpha;
+ if (has_alpha) {
+ // 8x(alpha)
+ alpha = _mm_set1_epi16(s.fAlphaScale);
+ }
+
+ // Unroll 2x, interleave bytes, use pmaddubsw (all_x is small)
+ while (count >= 2) {
+ int xy0[4];
+ int xy1[4];
+ __m128i all_xy, sixteen_minus_xy;
+ PrepareConstantsTwoPixelPairsDXDY(xy, mask_3FFF, mask_000F,
+ sixteen_8bit, mask_dist_select,
+ &all_xy, &sixteen_minus_xy, xy0, xy1);
+
+ // (4x(x1, 16-x1), 4x(x0, 16-x0))
+ __m128i scale_x = _mm_unpacklo_epi8(sixteen_minus_xy, all_xy);
+ // (4x(0, y1), 4x(0, y0))
+ __m128i all_y = _mm_unpackhi_epi8(all_xy, _mm_setzero_si128());
+ __m128i neg_y = _mm_sub_epi16(_mm_set1_epi16(16), all_y);
+
+ const uint32_t* row00 =
+ reinterpret_cast<const uint32_t*>(src_addr + xy0[2] * rb);
+ const uint32_t* row01 =
+ reinterpret_cast<const uint32_t*>(src_addr + xy1[2] * rb);
+ const uint32_t* row10 =
+ reinterpret_cast<const uint32_t*>(src_addr + xy0[3] * rb);
+ const uint32_t* row11 =
+ reinterpret_cast<const uint32_t*>(src_addr + xy1[3] * rb);
+
+ __m128i sum0 = ProcessTwoPixelPairsDXDY<has_alpha>(
+ row00, row01, row10, row11, xy0, xy1,
+ scale_x, all_y, neg_y, alpha);
+
+ // Pack lower 4 16 bit values of sum into lower 4 bytes.
+ sum0 = _mm_packus_epi16(sum0, _mm_setzero_si128());
+
+ // Extract low int and store.
+ _mm_storel_epi64(reinterpret_cast<__m128i *>(colors), sum0);
+
+ xy += 4;
+ colors += 2;
+ count -= 2;
+ }
+
+ // Handle the remainder
+ while (count-- > 0) {
+ uint32_t data = *xy++;
+ unsigned y0 = data >> 14;
+ unsigned y1 = data & 0x3FFF;
+ unsigned subY = y0 & 0xF;
+ y0 >>= 4;
+
+ data = *xy++;
+ unsigned x0 = data >> 14;
+ unsigned x1 = data & 0x3FFF;
+ unsigned subX = x0 & 0xF;
+ x0 >>= 4;
+
+ const uint32_t* row0 =
+ reinterpret_cast<const uint32_t*>(src_addr + y0 * rb);
+ const uint32_t* row1 =
+ reinterpret_cast<const uint32_t*>(src_addr + y1 * rb);
+
+ // 16x(x)
+ const __m128i all_x = _mm_set1_epi8(subX);
+
+ // 16x (16-x)
+ __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x);
+
+ // (8x (x, 16-x))
+ scale_x = _mm_unpacklo_epi8(scale_x, all_x);
+
+ // 8x(16)
+ const __m128i sixteen_16bit = _mm_set1_epi16(16);
+
+ // 8x (y)
+ const __m128i all_y = _mm_set1_epi16(subY);
+
+ // 8x (16-y)
+ const __m128i neg_y = _mm_sub_epi16(sixteen_16bit, all_y);
+
+ // first row.
+ __m128i sum0 = ProcessOnePixel(row0[x0], row0[x1], scale_x, neg_y);
+ // second row.
+ __m128i sum1 = ProcessOnePixel(row1[x0], row1[x1], scale_x, all_y);
+
+ // Add both rows for full sample
+ sum0 = _mm_add_epi16(sum0, sum1);
+
+ sum0 = ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
+
+ // Pack lower 4 16 bit values of sum into lower 4 bytes.
+ sum0 = _mm_packus_epi16(sum0, _mm_setzero_si128());
+
+ // Extract low int and store.
+ *colors++ = _mm_cvtsi128_si32(sum0);
+ }
+}
+} // namespace
+
+void S32_opaque_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
+ const uint32_t* xy,
+ int count, uint32_t* colors) {
+ S32_generic_D32_filter_DX_SSSE3<false>(s, xy, count, colors);
+}
+
+void S32_alpha_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
+ const uint32_t* xy,
+ int count, uint32_t* colors) {
+ S32_generic_D32_filter_DX_SSSE3<true>(s, xy, count, colors);
+}
+
+void S32_opaque_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
+ const uint32_t* xy,
+ int count, uint32_t* colors) {
+ S32_generic_D32_filter_DXDY_SSSE3<false>(s, xy, count, colors);
+}
+
+void S32_alpha_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
+ const uint32_t* xy,
+ int count, uint32_t* colors) {
+ S32_generic_D32_filter_DXDY_SSSE3<true>(s, xy, count, colors);
+}
+
+#else // SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+
+void S32_opaque_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
+ const uint32_t* xy,
+ int count, uint32_t* colors) {
+ sk_throw();
+}
+
+void S32_alpha_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
+ const uint32_t* xy,
+ int count, uint32_t* colors) {
+ sk_throw();
+}
+
+void S32_opaque_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
+ const uint32_t* xy,
+ int count, uint32_t* colors) {
+ sk_throw();
+}
+
+void S32_alpha_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
+ const uint32_t* xy,
+ int count, uint32_t* colors) {
+ sk_throw();
+}
+
+#endif
diff --git a/gfx/skia/skia/src/opts/SkBitmapProcState_opts_SSSE3.h b/gfx/skia/skia/src/opts/SkBitmapProcState_opts_SSSE3.h
new file mode 100644
index 000000000..e7799fa2c
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBitmapProcState_opts_SSSE3.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapProcState_opts_SSSE3_DEFINED
+#define SkBitmapProcState_opts_SSSE3_DEFINED
+
+#include "SkBitmapProcState.h"
+
+void S32_opaque_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
+ const uint32_t* xy,
+ int count, uint32_t* colors);
+void S32_alpha_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
+ const uint32_t* xy,
+ int count, uint32_t* colors);
+void S32_opaque_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
+ const uint32_t* xy,
+ int count, uint32_t* colors);
+void S32_alpha_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
+ const uint32_t* xy,
+ int count, uint32_t* colors);
+#endif
diff --git a/gfx/skia/skia/src/opts/SkBitmapProcState_opts_arm.cpp b/gfx/skia/skia/src/opts/SkBitmapProcState_opts_arm.cpp
new file mode 100644
index 000000000..e3726e727
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBitmapProcState_opts_arm.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkBitmapScaler.h"
+#include "SkBitmapProcState.h"
+#include "SkColorPriv.h"
+#include "SkPaint.h"
+#include "SkTypes.h"
+#include "SkUtils.h"
+#include "SkUtilsArm.h"
+
+#include "SkConvolver.h"
+
+void SkBitmapProcState::platformProcs() { }
+
+///////////////////////////////////////////////////////////////////////////////
+
+extern void platformConvolutionProcs_arm_neon(SkConvolutionProcs* procs);
+
+void platformConvolutionProcs_arm(SkConvolutionProcs* procs) {
+}
+
+void SkBitmapScaler::PlatformConvolutionProcs(SkConvolutionProcs* procs) {
+ SK_ARM_NEON_WRAP(platformConvolutionProcs_arm)(procs);
+}
diff --git a/gfx/skia/skia/src/opts/SkBitmapProcState_opts_mips_dsp.cpp b/gfx/skia/skia/src/opts/SkBitmapProcState_opts_mips_dsp.cpp
new file mode 100644
index 000000000..10f80c2a4
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBitmapProcState_opts_mips_dsp.cpp
@@ -0,0 +1,261 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkBitmapProcState.h"
+#include "SkBitmapScaler.h"
+#include "SkColorPriv.h"
+#include "SkPaint.h"
+#include "SkUtils.h"
+
+static void SI8_opaque_D32_nofilter_DX_mips_dsp(const SkBitmapProcState& s,
+ const uint32_t* SK_RESTRICT xy,
+ int count, SkPMColor* SK_RESTRICT colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(s.fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask));
+ SkASSERT(kNone_SkFilterQuality == s.fFilterQuality);
+ const SkPMColor* SK_RESTRICT table = s.fPixmap.ctable()->readColors();
+ const uint8_t* SK_RESTRICT srcAddr = (const uint8_t*)s.fPixmap.addr();
+ srcAddr = (const uint8_t*)((const char*)srcAddr + xy[0] * s.fPixmap.rowBytes());
+
+ if (1 == s.fPixmap.width()) {
+ uint8_t src = srcAddr[0];
+ SkPMColor dstValue = table[src];
+ sk_memset32(colors, dstValue, count);
+ } else {
+ const uint16_t* xx = (const uint16_t*)(xy + 1);
+ int s0, s1, s2, s3, s4, s5, s6, s7;
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ ".set noat \n\t"
+ "srl $t8, %[count], 4 \n\t"
+ "beqz $t8, 3f \n\t"
+ " nop \n\t"
+ "1: \n\t"
+ "addiu $t8, $t8, -1 \n\t"
+ "beqz $t8, 2f \n\t"
+ " addiu %[count], %[count], -16 \n\t"
+ "pref 0, 32(%[xx]) \n\t"
+ "lhu $t0, 0(%[xx]) \n\t"
+ "lhu $t1, 2(%[xx]) \n\t"
+ "lhu $t2, 4(%[xx]) \n\t"
+ "lhu $t3, 6(%[xx]) \n\t"
+ "lhu $t4, 8(%[xx]) \n\t"
+ "lhu $t5, 10(%[xx]) \n\t"
+ "lhu $t6, 12(%[xx]) \n\t"
+ "lhu $t7, 14(%[xx]) \n\t"
+ "lhu %[s0], 16(%[xx]) \n\t"
+ "lhu %[s1], 18(%[xx]) \n\t"
+ "lhu %[s2], 20(%[xx]) \n\t"
+ "lhu %[s3], 22(%[xx]) \n\t"
+ "lhu %[s4], 24(%[xx]) \n\t"
+ "lhu %[s5], 26(%[xx]) \n\t"
+ "lhu %[s6], 28(%[xx]) \n\t"
+ "lhu %[s7], 30(%[xx]) \n\t"
+ "lbux $t0, $t0(%[srcAddr]) \n\t"
+ "lbux $t1, $t1(%[srcAddr]) \n\t"
+ "lbux $t2, $t2(%[srcAddr]) \n\t"
+ "lbux $t3, $t3(%[srcAddr]) \n\t"
+ "lbux $t4, $t4(%[srcAddr]) \n\t"
+ "lbux $t5, $t5(%[srcAddr]) \n\t"
+ "lbux $t6, $t6(%[srcAddr]) \n\t"
+ "lbux $t7, $t7(%[srcAddr]) \n\t"
+ "lbux %[s0], %[s0](%[srcAddr]) \n\t"
+ "lbux %[s1], %[s1](%[srcAddr]) \n\t"
+ "lbux %[s2], %[s2](%[srcAddr]) \n\t"
+ "lbux %[s3], %[s3](%[srcAddr]) \n\t"
+ "lbux %[s4], %[s4](%[srcAddr]) \n\t"
+ "lbux %[s5], %[s5](%[srcAddr]) \n\t"
+ "lbux %[s6], %[s6](%[srcAddr]) \n\t"
+ "lbux %[s7], %[s7](%[srcAddr]) \n\t"
+ "sll $t0, $t0, 2 \n\t"
+ "sll $t1, $t1, 2 \n\t"
+ "sll $t2, $t2, 2 \n\t"
+ "sll $t3, $t3, 2 \n\t"
+ "sll $t4, $t4, 2 \n\t"
+ "sll $t5, $t5, 2 \n\t"
+ "sll $t6, $t6, 2 \n\t"
+ "sll $t7, $t7, 2 \n\t"
+ "sll %[s0], %[s0], 2 \n\t"
+ "sll %[s1], %[s1], 2 \n\t"
+ "sll %[s2], %[s2], 2 \n\t"
+ "sll %[s3], %[s3], 2 \n\t"
+ "sll %[s4], %[s4], 2 \n\t"
+ "sll %[s5], %[s5], 2 \n\t"
+ "sll %[s6], %[s6], 2 \n\t"
+ "sll %[s7], %[s7], 2 \n\t"
+ "pref 0, 64(%[table]) \n\t"
+ "lwx $t0, $t0(%[table]) \n\t"
+ "lwx $t1, $t1(%[table]) \n\t"
+ "lwx $t2, $t2(%[table]) \n\t"
+ "lwx $t3, $t3(%[table]) \n\t"
+ "lwx $t4, $t4(%[table]) \n\t"
+ "lwx $t5, $t5(%[table]) \n\t"
+ "lwx $t6, $t6(%[table]) \n\t"
+ "lwx $t7, $t7(%[table]) \n\t"
+ "lwx %[s0], %[s0](%[table]) \n\t"
+ "lwx %[s1], %[s1](%[table]) \n\t"
+ "lwx %[s2], %[s2](%[table]) \n\t"
+ "lwx %[s3], %[s3](%[table]) \n\t"
+ "lwx %[s4], %[s4](%[table]) \n\t"
+ "lwx %[s5], %[s5](%[table]) \n\t"
+ "lwx %[s6], %[s6](%[table]) \n\t"
+ "lwx %[s7], %[s7](%[table]) \n\t"
+ "pref 30, 64(%[colors]) \n\t"
+ "sw $t0, 0(%[colors]) \n\t"
+ "sw $t1, 4(%[colors]) \n\t"
+ "sw $t2, 8(%[colors]) \n\t"
+ "sw $t3, 12(%[colors]) \n\t"
+ "sw $t4, 16(%[colors]) \n\t"
+ "sw $t5, 20(%[colors]) \n\t"
+ "sw $t6, 24(%[colors]) \n\t"
+ "sw $t7, 28(%[colors]) \n\t"
+ "sw %[s0], 32(%[colors]) \n\t"
+ "sw %[s1], 36(%[colors]) \n\t"
+ "sw %[s2], 40(%[colors]) \n\t"
+ "sw %[s3], 44(%[colors]) \n\t"
+ "sw %[s4], 48(%[colors]) \n\t"
+ "sw %[s5], 52(%[colors]) \n\t"
+ "sw %[s6], 56(%[colors]) \n\t"
+ "sw %[s7], 60(%[colors]) \n\t"
+ "addiu %[xx], %[xx], 32 \n\t"
+ "b 1b \n\t"
+ " addiu %[colors], %[colors], 64 \n\t"
+ "2: \n\t"
+ "lhu $t0, 0(%[xx]) \n\t"
+ "lhu $t1, 2(%[xx]) \n\t"
+ "lhu $t2, 4(%[xx]) \n\t"
+ "lhu $t3, 6(%[xx]) \n\t"
+ "lhu $t4, 8(%[xx]) \n\t"
+ "lhu $t5, 10(%[xx]) \n\t"
+ "lhu $t6, 12(%[xx]) \n\t"
+ "lhu $t7, 14(%[xx]) \n\t"
+ "lhu %[s0], 16(%[xx]) \n\t"
+ "lhu %[s1], 18(%[xx]) \n\t"
+ "lhu %[s2], 20(%[xx]) \n\t"
+ "lhu %[s3], 22(%[xx]) \n\t"
+ "lhu %[s4], 24(%[xx]) \n\t"
+ "lhu %[s5], 26(%[xx]) \n\t"
+ "lhu %[s6], 28(%[xx]) \n\t"
+ "lhu %[s7], 30(%[xx]) \n\t"
+ "lbux $t0, $t0(%[srcAddr]) \n\t"
+ "lbux $t1, $t1(%[srcAddr]) \n\t"
+ "lbux $t2, $t2(%[srcAddr]) \n\t"
+ "lbux $t3, $t3(%[srcAddr]) \n\t"
+ "lbux $t4, $t4(%[srcAddr]) \n\t"
+ "lbux $t5, $t5(%[srcAddr]) \n\t"
+ "lbux $t6, $t6(%[srcAddr]) \n\t"
+ "lbux $t7, $t7(%[srcAddr]) \n\t"
+ "lbux %[s0], %[s0](%[srcAddr]) \n\t"
+ "lbux %[s1], %[s1](%[srcAddr]) \n\t"
+ "lbux %[s2], %[s2](%[srcAddr]) \n\t"
+ "lbux %[s3], %[s3](%[srcAddr]) \n\t"
+ "lbux %[s4], %[s4](%[srcAddr]) \n\t"
+ "lbux %[s5], %[s5](%[srcAddr]) \n\t"
+ "lbux %[s6], %[s6](%[srcAddr]) \n\t"
+ "lbux %[s7], %[s7](%[srcAddr]) \n\t"
+ "sll $t0, $t0, 2 \n\t"
+ "sll $t1, $t1, 2 \n\t"
+ "sll $t2, $t2, 2 \n\t"
+ "sll $t3, $t3, 2 \n\t"
+ "sll $t4, $t4, 2 \n\t"
+ "sll $t5, $t5, 2 \n\t"
+ "sll $t6, $t6, 2 \n\t"
+ "sll $t7, $t7, 2 \n\t"
+ "sll %[s0], %[s0], 2 \n\t"
+ "sll %[s1], %[s1], 2 \n\t"
+ "sll %[s2], %[s2], 2 \n\t"
+ "sll %[s3], %[s3], 2 \n\t"
+ "sll %[s4], %[s4], 2 \n\t"
+ "sll %[s5], %[s5], 2 \n\t"
+ "sll %[s6], %[s6], 2 \n\t"
+ "sll %[s7], %[s7], 2 \n\t"
+ "lwx $t0, $t0(%[table]) \n\t"
+ "lwx $t1, $t1(%[table]) \n\t"
+ "lwx $t2, $t2(%[table]) \n\t"
+ "lwx $t3, $t3(%[table]) \n\t"
+ "lwx $t4, $t4(%[table]) \n\t"
+ "lwx $t5, $t5(%[table]) \n\t"
+ "lwx $t6, $t6(%[table]) \n\t"
+ "lwx $t7, $t7(%[table]) \n\t"
+ "lwx %[s0], %[s0](%[table]) \n\t"
+ "lwx %[s1], %[s1](%[table]) \n\t"
+ "lwx %[s2], %[s2](%[table]) \n\t"
+ "lwx %[s3], %[s3](%[table]) \n\t"
+ "lwx %[s4], %[s4](%[table]) \n\t"
+ "lwx %[s5], %[s5](%[table]) \n\t"
+ "lwx %[s6], %[s6](%[table]) \n\t"
+ "lwx %[s7], %[s7](%[table]) \n\t"
+ "sw $t0, 0(%[colors]) \n\t"
+ "sw $t1, 4(%[colors]) \n\t"
+ "sw $t2, 8(%[colors]) \n\t"
+ "sw $t3, 12(%[colors]) \n\t"
+ "sw $t4, 16(%[colors]) \n\t"
+ "sw $t5, 20(%[colors]) \n\t"
+ "sw $t6, 24(%[colors]) \n\t"
+ "sw $t7, 28(%[colors]) \n\t"
+ "sw %[s0], 32(%[colors]) \n\t"
+ "sw %[s1], 36(%[colors]) \n\t"
+ "sw %[s2], 40(%[colors]) \n\t"
+ "sw %[s3], 44(%[colors]) \n\t"
+ "sw %[s4], 48(%[colors]) \n\t"
+ "sw %[s5], 52(%[colors]) \n\t"
+ "sw %[s6], 56(%[colors]) \n\t"
+ "sw %[s7], 60(%[colors]) \n\t"
+ "addiu %[xx], %[xx], 32 \n\t"
+ "beqz %[count], 4f \n\t"
+ " addiu %[colors], %[colors], 64 \n\t"
+ "3: \n\t"
+ "addiu %[count], %[count], -1 \n\t"
+ "lhu $t0, 0(%[xx]) \n\t"
+ "lbux $t1, $t0(%[srcAddr]) \n\t"
+ "sll $t1, $t1, 2 \n\t"
+ "lwx $t2, $t1(%[table]) \n\t"
+ "sw $t2, 0(%[colors]) \n\t"
+ "addiu %[xx], %[xx], 2 \n\t"
+ "bnez %[count], 3b \n\t"
+ " addiu %[colors], %[colors], 4 \n\t"
+ "4: \n\t"
+ ".set pop \n\t"
+ : [xx]"+r"(xx), [count]"+r"(count), [colors]"+r"(colors),
+ [s0]"=&r"(s0), [s1]"=&r"(s1), [s2]"=&r"(s2), [s3]"=&r"(s3),
+ [s4]"=&r"(s4), [s5]"=&r"(s5), [s6]"=&r"(s6), [s7]"=&r"(s7)
+ : [table]"r"(table), [srcAddr]"r"(srcAddr)
+ : "memory", "t0", "t1", "t2", "t3",
+ "t4", "t5", "t6", "t7", "t8"
+ );
+ }
+}
+
+/* If we replace a sampleproc, then we null-out the associated shaderproc,
+ otherwise the shader won't even look at the matrix/sampler
+ */
+
+void SkBitmapProcState::platformProcs() {
+ bool isOpaque = 256 == fAlphaScale;
+ bool justDx = false;
+
+ if (fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask)) {
+ justDx = true;
+ }
+
+ switch (fPixmap.colorType()) {
+ case kIndex_8_SkColorType:
+ if (justDx && kNone_SkFilterQuality == fFilterQuality) {
+ if (isOpaque) {
+ fSampleProc32 = SI8_opaque_D32_nofilter_DX_mips_dsp;
+ fShaderProc32 = nullptr;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void SkBitmapScaler::PlatformConvolutionProcs(SkConvolutionProcs*) {}
diff --git a/gfx/skia/skia/src/opts/SkBitmapProcState_opts_none.cpp b/gfx/skia/skia/src/opts/SkBitmapProcState_opts_none.cpp
new file mode 100644
index 000000000..f2217f350
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBitmapProcState_opts_none.cpp
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapScaler.h"
+#include "SkBitmapProcState.h"
+
+/* A platform may optionally overwrite any of these with accelerated
+ versions. On input, these will already have valid function pointers,
+ so a platform need only overwrite the ones it chooses, based on the
+ current state (e.g. fBitmap, fInvMatrix, etc.)
+
+ fShaderProc32
+ fShaderProc16
+ fMatrixProc
+ fSampleProc32
+ fSampleProc32
+ */
+
+// empty implementation just uses default supplied function pointers
+void SkBitmapProcState::platformProcs() {}
+
+// empty implementation just uses default supplied function pointers
+void SkBitmapScaler::PlatformConvolutionProcs(SkConvolutionProcs*) {}
diff --git a/gfx/skia/skia/src/opts/SkBlend_opts.h b/gfx/skia/skia/src/opts/SkBlend_opts.h
new file mode 100644
index 000000000..1da4c4fb0
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBlend_opts.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/*
+ninja -C out/Release dm nanobench ; and ./out/Release/dm --match Blend_opts ; and ./out/Release/nanobench --samples 300 --nompd --match LinearSrcOver -q
+ */
+
+#ifndef SkBlend_opts_DEFINED
+#define SkBlend_opts_DEFINED
+
+#include "SkNx.h"
+#include "SkPM4fPriv.h"
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include <immintrin.h>
+#endif
+
+namespace SK_OPTS_NS {
+
+static inline void srcover_srgb_srgb_1(uint32_t* dst, uint32_t src) {
+ if (src >= 0xFF000000) {
+ *dst = src;
+ return;
+ }
+ auto d = Sk4f_fromS32(*dst),
+ s = Sk4f_fromS32( src);
+ *dst = Sk4f_toS32(s + d * (1.0f - s[3]));
+}
+
+static inline void srcover_srgb_srgb_4(uint32_t* dst, const uint32_t* src) {
+ srcover_srgb_srgb_1(dst++, *src++);
+ srcover_srgb_srgb_1(dst++, *src++);
+ srcover_srgb_srgb_1(dst++, *src++);
+ srcover_srgb_srgb_1(dst , *src );
+}
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+
+ static inline __m128i load(const uint32_t* p) {
+ return _mm_loadu_si128(reinterpret_cast<const __m128i*>(p));
+ }
+
+ static inline void store(uint32_t* p, __m128i v) {
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(p), v);
+ }
+
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+
+ static void srcover_srgb_srgb(
+ uint32_t* dst, const uint32_t* const srcStart, int ndst, const int nsrc) {
+ const __m128i alphaMask = _mm_set1_epi32(0xFF000000);
+ while (ndst > 0) {
+ int count = SkTMin(ndst, nsrc);
+ ndst -= count;
+ const uint32_t* src = srcStart;
+ const uint32_t* end = dst + (count & ~3);
+ ptrdiff_t delta = src - dst;
+
+ while (dst < end) {
+ __m128i pixels = load(src);
+ if (_mm_testc_si128(pixels, alphaMask)) {
+ uint32_t* start = dst;
+ do {
+ store(dst, pixels);
+ dst += 4;
+ } while (dst < end
+ && _mm_testc_si128(pixels = load(dst + delta), alphaMask));
+ src += dst - start;
+ } else if (_mm_testz_si128(pixels, alphaMask)) {
+ do {
+ dst += 4;
+ src += 4;
+ } while (dst < end
+ && _mm_testz_si128(pixels = load(src), alphaMask));
+ } else {
+ uint32_t* start = dst;
+ do {
+ srcover_srgb_srgb_4(dst, dst + delta);
+ dst += 4;
+ } while (dst < end
+ && _mm_testnzc_si128(pixels = load(dst + delta), alphaMask));
+ src += dst - start;
+ }
+ }
+
+ count = count & 3;
+ while (count-- > 0) {
+ srcover_srgb_srgb_1(dst++, *src++);
+ }
+ }
+ }
+ #else
+ // SSE2 versions
+
+ // Note: In the next three comparisons a group of 4 pixels is converted to a group of
+ // "signed" pixels because the sse2 does not have an unsigned comparison.
+ // Make it so that we can use the signed comparison operators by biasing
+ // 0x00xxxxxx to 0x80xxxxxxx which is the smallest values and biasing 0xffxxxxxx to
+ // 0x7fxxxxxx which is the largest set of values.
+ static inline bool check_opaque_alphas(__m128i pixels) {
+ __m128i signedPixels = _mm_xor_si128(pixels, _mm_set1_epi32(0x80000000));
+ int mask =
+ _mm_movemask_epi8(
+ _mm_cmplt_epi32(signedPixels, _mm_set1_epi32(0x7F000000)));
+ return mask == 0;
+ }
+
+ static inline bool check_transparent_alphas(__m128i pixels) {
+ __m128i signedPixels = _mm_xor_si128(pixels, _mm_set1_epi32(0x80000000));
+ int mask =
+ _mm_movemask_epi8(
+ _mm_cmpgt_epi32(signedPixels, _mm_set1_epi32(0x80FFFFFF)));
+ return mask == 0;
+ }
+
+ static inline bool check_partial_alphas(__m128i pixels) {
+ __m128i signedPixels = _mm_xor_si128(pixels, _mm_set1_epi32(0x80000000));
+ __m128i opaque = _mm_cmplt_epi32(signedPixels, _mm_set1_epi32(0x7F000000));
+ __m128i transparent = _mm_cmpgt_epi32(signedPixels, _mm_set1_epi32(0x80FFFFFF));
+ int mask = _mm_movemask_epi8(_mm_xor_si128(opaque, transparent));
+ return mask == 0;
+ }
+
+ static void srcover_srgb_srgb(
+ uint32_t* dst, const uint32_t* const srcStart, int ndst, const int nsrc) {
+ while (ndst > 0) {
+ int count = SkTMin(ndst, nsrc);
+ ndst -= count;
+ const uint32_t* src = srcStart;
+ const uint32_t* end = dst + (count & ~3);
+ const ptrdiff_t delta = src - dst;
+
+ __m128i pixels = load(src);
+ do {
+ if (check_opaque_alphas(pixels)) {
+ uint32_t* start = dst;
+ do {
+ store(dst, pixels);
+ dst += 4;
+ } while (dst < end && check_opaque_alphas((pixels = load(dst + delta))));
+ src += dst - start;
+ } else if (check_transparent_alphas(pixels)) {
+ const uint32_t* start = dst;
+ do {
+ dst += 4;
+ } while (dst < end && check_transparent_alphas(pixels = load(dst + delta)));
+ src += dst - start;
+ } else {
+ const uint32_t* start = dst;
+ do {
+ srcover_srgb_srgb_4(dst, dst + delta);
+ dst += 4;
+ } while (dst < end && check_partial_alphas(pixels = load(dst + delta)));
+ src += dst - start;
+ }
+ } while (dst < end);
+
+ count = count & 3;
+ while (count-- > 0) {
+ srcover_srgb_srgb_1(dst++, *src++);
+ }
+ }
+ }
+ #endif
+#else
+
+ static void srcover_srgb_srgb(
+ uint32_t* dst, const uint32_t* const src, int ndst, const int nsrc) {
+ while (ndst > 0) {
+ int n = SkTMin(ndst, nsrc);
+
+ for (int i = 0; i < n; i++) {
+ srcover_srgb_srgb_1(dst++, src[i]);
+ }
+ ndst -= n;
+ }
+ }
+
+#endif
+
+} // namespace SK_OPTS_NS
+
+#endif//SkBlend_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkBlitMask_opts.h b/gfx/skia/skia/src/opts/SkBlitMask_opts.h
new file mode 100644
index 000000000..2f4fe6ffb
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBlitMask_opts.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlitMask_opts_DEFINED
+#define SkBlitMask_opts_DEFINED
+
+#include "Sk4px.h"
+
+namespace SK_OPTS_NS {
+
+#if defined(SK_ARM_HAS_NEON)
+ // The Sk4px versions below will work fine with NEON, but we have had many indications
+ // that it doesn't perform as well as this NEON-specific code. TODO(mtklein): why?
+ #include "SkColor_opts_neon.h"
+
+ template <bool isColor>
+ static void D32_A8_Opaque_Color_neon(void* SK_RESTRICT dst, size_t dstRB,
+ const void* SK_RESTRICT maskPtr, size_t maskRB,
+ SkColor color, int width, int height) {
+ SkPMColor pmc = SkPreMultiplyColor(color);
+ SkPMColor* SK_RESTRICT device = (SkPMColor*)dst;
+ const uint8_t* SK_RESTRICT mask = (const uint8_t*)maskPtr;
+ uint8x8x4_t vpmc;
+
+ maskRB -= width;
+ dstRB -= (width << 2);
+
+ if (width >= 8) {
+ vpmc.val[NEON_A] = vdup_n_u8(SkGetPackedA32(pmc));
+ vpmc.val[NEON_R] = vdup_n_u8(SkGetPackedR32(pmc));
+ vpmc.val[NEON_G] = vdup_n_u8(SkGetPackedG32(pmc));
+ vpmc.val[NEON_B] = vdup_n_u8(SkGetPackedB32(pmc));
+ }
+ do {
+ int w = width;
+ while (w >= 8) {
+ uint8x8_t vmask = vld1_u8(mask);
+ uint16x8_t vscale, vmask256 = SkAlpha255To256_neon8(vmask);
+ if (isColor) {
+ vscale = vsubw_u8(vdupq_n_u16(256),
+ SkAlphaMul_neon8(vpmc.val[NEON_A], vmask256));
+ } else {
+ vscale = vsubw_u8(vdupq_n_u16(256), vmask);
+ }
+ uint8x8x4_t vdev = vld4_u8((uint8_t*)device);
+
+ vdev.val[NEON_A] = SkAlphaMul_neon8(vpmc.val[NEON_A], vmask256)
+ + SkAlphaMul_neon8(vdev.val[NEON_A], vscale);
+ vdev.val[NEON_R] = SkAlphaMul_neon8(vpmc.val[NEON_R], vmask256)
+ + SkAlphaMul_neon8(vdev.val[NEON_R], vscale);
+ vdev.val[NEON_G] = SkAlphaMul_neon8(vpmc.val[NEON_G], vmask256)
+ + SkAlphaMul_neon8(vdev.val[NEON_G], vscale);
+ vdev.val[NEON_B] = SkAlphaMul_neon8(vpmc.val[NEON_B], vmask256)
+ + SkAlphaMul_neon8(vdev.val[NEON_B], vscale);
+
+ vst4_u8((uint8_t*)device, vdev);
+
+ mask += 8;
+ device += 8;
+ w -= 8;
+ }
+
+ while (w--) {
+ unsigned aa = *mask++;
+ if (isColor) {
+ *device = SkBlendARGB32(pmc, *device, aa);
+ } else {
+ *device = SkAlphaMulQ(pmc, SkAlpha255To256(aa))
+ + SkAlphaMulQ(*device, SkAlpha255To256(255 - aa));
+ }
+ device += 1;
+ };
+
+ device = (uint32_t*)((char*)device + dstRB);
+ mask += maskRB;
+
+ } while (--height != 0);
+ }
+
+ static void blit_mask_d32_a8_general(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* mask, size_t maskRB,
+ SkColor color, int w, int h) {
+ D32_A8_Opaque_Color_neon<true>(dst, dstRB, mask, maskRB, color, w, h);
+ }
+
+ // As above, but made slightly simpler by requiring that color is opaque.
+ static void blit_mask_d32_a8_opaque(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* mask, size_t maskRB,
+ SkColor color, int w, int h) {
+ D32_A8_Opaque_Color_neon<false>(dst, dstRB, mask, maskRB, color, w, h);
+ }
+
+ // Same as _opaque, but assumes color == SK_ColorBLACK, a very common and even simpler case.
+ static void blit_mask_d32_a8_black(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* maskPtr, size_t maskRB,
+ int width, int height) {
+ SkPMColor* SK_RESTRICT device = (SkPMColor*)dst;
+ const uint8_t* SK_RESTRICT mask = (const uint8_t*)maskPtr;
+
+ maskRB -= width;
+ dstRB -= (width << 2);
+ do {
+ int w = width;
+ while (w >= 8) {
+ uint8x8_t vmask = vld1_u8(mask);
+ uint16x8_t vscale = vsubw_u8(vdupq_n_u16(256), vmask);
+ uint8x8x4_t vdevice = vld4_u8((uint8_t*)device);
+
+ vdevice = SkAlphaMulQ_neon8(vdevice, vscale);
+ vdevice.val[NEON_A] += vmask;
+
+ vst4_u8((uint8_t*)device, vdevice);
+
+ mask += 8;
+ device += 8;
+ w -= 8;
+ }
+ while (w-- > 0) {
+ unsigned aa = *mask++;
+ *device = (aa << SK_A32_SHIFT)
+ + SkAlphaMulQ(*device, SkAlpha255To256(255 - aa));
+ device += 1;
+ };
+ device = (uint32_t*)((char*)device + dstRB);
+ mask += maskRB;
+ } while (--height != 0);
+ }
+
+#else
+ static void blit_mask_d32_a8_general(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* mask, size_t maskRB,
+ SkColor color, int w, int h) {
+ auto s = Sk4px::DupPMColor(SkPreMultiplyColor(color));
+ auto fn = [&](const Sk4px& d, const Sk4px& aa) {
+ // = (s + d(1-sa))aa + d(1-aa)
+ // = s*aa + d(1-sa*aa)
+ auto left = s.approxMulDiv255(aa),
+ right = d.approxMulDiv255(left.alphas().inv());
+ return left + right; // This does not overflow (exhaustively checked).
+ };
+ while (h --> 0) {
+ Sk4px::MapDstAlpha(w, dst, mask, fn);
+ dst += dstRB / sizeof(*dst);
+ mask += maskRB / sizeof(*mask);
+ }
+ }
+
+ // As above, but made slightly simpler by requiring that color is opaque.
+ static void blit_mask_d32_a8_opaque(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* mask, size_t maskRB,
+ SkColor color, int w, int h) {
+ SkASSERT(SkColorGetA(color) == 0xFF);
+ auto s = Sk4px::DupPMColor(SkPreMultiplyColor(color));
+ auto fn = [&](const Sk4px& d, const Sk4px& aa) {
+ // = (s + d(1-sa))aa + d(1-aa)
+ // = s*aa + d(1-sa*aa)
+ // ~~~>
+ // = s*aa + d(1-aa)
+ return s.approxMulDiv255(aa) + d.approxMulDiv255(aa.inv());
+ };
+ while (h --> 0) {
+ Sk4px::MapDstAlpha(w, dst, mask, fn);
+ dst += dstRB / sizeof(*dst);
+ mask += maskRB / sizeof(*mask);
+ }
+ }
+
+ // Same as _opaque, but assumes color == SK_ColorBLACK, a very common and even simpler case.
+ static void blit_mask_d32_a8_black(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* mask, size_t maskRB,
+ int w, int h) {
+ auto fn = [](const Sk4px& d, const Sk4px& aa) {
+ // = (s + d(1-sa))aa + d(1-aa)
+ // = s*aa + d(1-sa*aa)
+ // ~~~>
+ // a = 1*aa + d(1-1*aa) = aa + d(1-aa)
+ // c = 0*aa + d(1-1*aa) = d(1-aa)
+ return aa.zeroColors() + d.approxMulDiv255(aa.inv());
+ };
+ while (h --> 0) {
+ Sk4px::MapDstAlpha(w, dst, mask, fn);
+ dst += dstRB / sizeof(*dst);
+ mask += maskRB / sizeof(*mask);
+ }
+ }
+#endif
+
+static void blit_mask_d32_a8(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* mask, size_t maskRB,
+ SkColor color, int w, int h) {
+ if (color == SK_ColorBLACK) {
+ blit_mask_d32_a8_black(dst, dstRB, mask, maskRB, w, h);
+ } else if (SkColorGetA(color) == 0xFF) {
+ blit_mask_d32_a8_opaque(dst, dstRB, mask, maskRB, color, w, h);
+ } else {
+ blit_mask_d32_a8_general(dst, dstRB, mask, maskRB, color, w, h);
+ }
+}
+
+} // SK_OPTS_NS
+
+#endif//SkBlitMask_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkBlitMask_opts_arm.cpp b/gfx/skia/skia/src/opts/SkBlitMask_opts_arm.cpp
new file mode 100644
index 000000000..c5a9ea4f3
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBlitMask_opts_arm.cpp
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkColor.h"
+#include "SkColorPriv.h"
+#include "SkBlitMask.h"
+#include "SkUtilsArm.h"
+#include "SkBlitMask_opts_arm_neon.h"
+
+SkBlitMask::BlitLCD16RowProc SkBlitMask::PlatformBlitRowProcs16(bool isOpaque) {
+ if (isOpaque) {
+ return SK_ARM_NEON_WRAP(SkBlitLCD16OpaqueRow);
+ } else {
+ return SK_ARM_NEON_WRAP(SkBlitLCD16Row);
+ }
+}
+
+SkBlitMask::RowProc SkBlitMask::PlatformRowProcs(SkColorType dstCT,
+ SkMask::Format maskFormat,
+ RowFlags flags) {
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/opts/SkBlitMask_opts_arm_neon.cpp b/gfx/skia/skia/src/opts/SkBlitMask_opts_arm_neon.cpp
new file mode 100644
index 000000000..ad12369db
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBlitMask_opts_arm_neon.cpp
@@ -0,0 +1,211 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBlitMask.h"
+#include "SkColor_opts_neon.h"
+
+void SkBlitLCD16OpaqueRow_neon(SkPMColor dst[], const uint16_t src[],
+ SkColor color, int width,
+ SkPMColor opaqueDst) {
+ int colR = SkColorGetR(color);
+ int colG = SkColorGetG(color);
+ int colB = SkColorGetB(color);
+
+ uint8x8_t vcolR, vcolG, vcolB;
+ uint8x8_t vopqDstA, vopqDstR, vopqDstG, vopqDstB;
+
+ if (width >= 8) {
+ vcolR = vdup_n_u8(colR);
+ vcolG = vdup_n_u8(colG);
+ vcolB = vdup_n_u8(colB);
+ vopqDstA = vdup_n_u8(SkGetPackedA32(opaqueDst));
+ vopqDstR = vdup_n_u8(SkGetPackedR32(opaqueDst));
+ vopqDstG = vdup_n_u8(SkGetPackedG32(opaqueDst));
+ vopqDstB = vdup_n_u8(SkGetPackedB32(opaqueDst));
+ }
+
+ while (width >= 8) {
+ uint8x8x4_t vdst;
+ uint16x8_t vmask;
+ uint16x8_t vmaskR, vmaskG, vmaskB;
+ uint8x8_t vsel_trans, vsel_opq;
+
+ vdst = vld4_u8((uint8_t*)dst);
+ vmask = vld1q_u16(src);
+
+ // Prepare compare masks
+ vsel_trans = vmovn_u16(vceqq_u16(vmask, vdupq_n_u16(0)));
+ vsel_opq = vmovn_u16(vceqq_u16(vmask, vdupq_n_u16(0xFFFF)));
+
+ // Get all the color masks on 5 bits
+ vmaskR = vshrq_n_u16(vmask, SK_R16_SHIFT);
+ vmaskG = vshrq_n_u16(vshlq_n_u16(vmask, SK_R16_BITS),
+ SK_B16_BITS + SK_R16_BITS + 1);
+ vmaskB = vmask & vdupq_n_u16(SK_B16_MASK);
+
+ // Upscale to 0..32
+ vmaskR = vmaskR + vshrq_n_u16(vmaskR, 4);
+ vmaskG = vmaskG + vshrq_n_u16(vmaskG, 4);
+ vmaskB = vmaskB + vshrq_n_u16(vmaskB, 4);
+
+ vdst.val[NEON_A] = vbsl_u8(vsel_trans, vdst.val[NEON_A], vdup_n_u8(0xFF));
+ vdst.val[NEON_A] = vbsl_u8(vsel_opq, vopqDstA, vdst.val[NEON_A]);
+
+ vdst.val[NEON_R] = SkBlend32_neon8(vcolR, vdst.val[NEON_R], vmaskR);
+ vdst.val[NEON_G] = SkBlend32_neon8(vcolG, vdst.val[NEON_G], vmaskG);
+ vdst.val[NEON_B] = SkBlend32_neon8(vcolB, vdst.val[NEON_B], vmaskB);
+
+ vdst.val[NEON_R] = vbsl_u8(vsel_opq, vopqDstR, vdst.val[NEON_R]);
+ vdst.val[NEON_G] = vbsl_u8(vsel_opq, vopqDstG, vdst.val[NEON_G]);
+ vdst.val[NEON_B] = vbsl_u8(vsel_opq, vopqDstB, vdst.val[NEON_B]);
+
+ vst4_u8((uint8_t*)dst, vdst);
+
+ dst += 8;
+ src += 8;
+ width -= 8;
+ }
+
+ // Leftovers
+ for (int i = 0; i < width; i++) {
+ dst[i] = SkBlendLCD16Opaque(colR, colG, colB, dst[i], src[i],
+ opaqueDst);
+ }
+}
+
+void SkBlitLCD16Row_neon(SkPMColor dst[], const uint16_t src[],
+ SkColor color, int width, SkPMColor) {
+ int colA = SkColorGetA(color);
+ int colR = SkColorGetR(color);
+ int colG = SkColorGetG(color);
+ int colB = SkColorGetB(color);
+
+ colA = SkAlpha255To256(colA);
+
+ uint8x8_t vcolR, vcolG, vcolB;
+ uint16x8_t vcolA;
+
+ if (width >= 8) {
+ vcolA = vdupq_n_u16(colA);
+ vcolR = vdup_n_u8(colR);
+ vcolG = vdup_n_u8(colG);
+ vcolB = vdup_n_u8(colB);
+ }
+
+ while (width >= 8) {
+ uint8x8x4_t vdst;
+ uint16x8_t vmask;
+ uint16x8_t vmaskR, vmaskG, vmaskB;
+
+ vdst = vld4_u8((uint8_t*)dst);
+ vmask = vld1q_u16(src);
+
+ // Get all the color masks on 5 bits
+ vmaskR = vshrq_n_u16(vmask, SK_R16_SHIFT);
+ vmaskG = vshrq_n_u16(vshlq_n_u16(vmask, SK_R16_BITS),
+ SK_B16_BITS + SK_R16_BITS + 1);
+ vmaskB = vmask & vdupq_n_u16(SK_B16_MASK);
+
+ // Upscale to 0..32
+ vmaskR = vmaskR + vshrq_n_u16(vmaskR, 4);
+ vmaskG = vmaskG + vshrq_n_u16(vmaskG, 4);
+ vmaskB = vmaskB + vshrq_n_u16(vmaskB, 4);
+
+ vmaskR = vshrq_n_u16(vmaskR * vcolA, 8);
+ vmaskG = vshrq_n_u16(vmaskG * vcolA, 8);
+ vmaskB = vshrq_n_u16(vmaskB * vcolA, 8);
+
+ vdst.val[NEON_A] = vdup_n_u8(0xFF);
+ vdst.val[NEON_R] = SkBlend32_neon8(vcolR, vdst.val[NEON_R], vmaskR);
+ vdst.val[NEON_G] = SkBlend32_neon8(vcolG, vdst.val[NEON_G], vmaskG);
+ vdst.val[NEON_B] = SkBlend32_neon8(vcolB, vdst.val[NEON_B], vmaskB);
+
+ vst4_u8((uint8_t*)dst, vdst);
+
+ dst += 8;
+ src += 8;
+ width -= 8;
+ }
+
+ for (int i = 0; i < width; i++) {
+ dst[i] = SkBlendLCD16(colA, colR, colG, colB, dst[i], src[i]);
+ }
+}
+
+#define LOAD_LANE_16(reg, n) \
+ reg = vld1q_lane_u16(device, reg, n); \
+ device = (uint16_t*)((char*)device + deviceRB);
+
+#define STORE_LANE_16(reg, n) \
+ vst1_lane_u16(dst, reg, n); \
+ dst = (uint16_t*)((char*)dst + deviceRB);
+
+void SkRGB16BlitterBlitV_neon(uint16_t* device,
+ int height,
+ size_t deviceRB,
+ unsigned scale,
+ uint32_t src32) {
+ if (height >= 8)
+ {
+ uint16_t* dst = device;
+
+ // prepare constants
+ uint16x8_t vdev = vdupq_n_u16(0);
+ uint16x8_t vmaskq_g16 = vdupq_n_u16(SK_G16_MASK_IN_PLACE);
+ uint16x8_t vmaskq_ng16 = vdupq_n_u16(~SK_G16_MASK_IN_PLACE);
+ uint32x4_t vsrc32 = vdupq_n_u32(src32);
+ uint32x4_t vscale5 = vdupq_n_u32((uint32_t)scale);
+
+ while (height >= 8){
+ LOAD_LANE_16(vdev, 0)
+ LOAD_LANE_16(vdev, 1)
+ LOAD_LANE_16(vdev, 2)
+ LOAD_LANE_16(vdev, 3)
+ LOAD_LANE_16(vdev, 4)
+ LOAD_LANE_16(vdev, 5)
+ LOAD_LANE_16(vdev, 6)
+ LOAD_LANE_16(vdev, 7)
+
+ // Expand_rgb_16
+ uint16x8x2_t vdst = vzipq_u16((vdev & vmaskq_ng16), (vdev & vmaskq_g16));
+ uint32x4_t vdst32_lo = vmulq_u32(vreinterpretq_u32_u16(vdst.val[0]), vscale5);
+ uint32x4_t vdst32_hi = vmulq_u32(vreinterpretq_u32_u16(vdst.val[1]), vscale5);
+
+ // Compact_rgb_16
+ vdst32_lo = vaddq_u32(vdst32_lo, vsrc32);
+ vdst32_hi = vaddq_u32(vdst32_hi, vsrc32);
+ vdst32_lo = vshrq_n_u32(vdst32_lo, 5);
+ vdst32_hi = vshrq_n_u32(vdst32_hi, 5);
+
+ uint16x4_t vtmp_lo = vmovn_u32(vdst32_lo) & vget_low_u16(vmaskq_ng16);
+ uint16x4_t vtmp_hi = vshrn_n_u32(vdst32_lo, 16) & vget_low_u16(vmaskq_g16);
+ uint16x4_t vdst16_lo = vorr_u16(vtmp_lo, vtmp_hi);
+ vtmp_lo = vmovn_u32(vdst32_hi) & vget_low_u16(vmaskq_ng16);
+ vtmp_hi = vshrn_n_u32(vdst32_hi, 16) & vget_low_u16(vmaskq_g16);
+ uint16x4_t vdst16_hi = vorr_u16(vtmp_lo, vtmp_hi);
+
+ STORE_LANE_16(vdst16_lo, 0)
+ STORE_LANE_16(vdst16_lo, 1)
+ STORE_LANE_16(vdst16_lo, 2)
+ STORE_LANE_16(vdst16_lo, 3)
+ STORE_LANE_16(vdst16_hi, 0)
+ STORE_LANE_16(vdst16_hi, 1)
+ STORE_LANE_16(vdst16_hi, 2)
+ STORE_LANE_16(vdst16_hi, 3)
+ height -= 8;
+ }
+ }
+ while (height != 0){
+ uint32_t dst32 = SkExpand_rgb_16(*device) * scale;
+ *device = SkCompact_rgb_16((src32 + dst32) >> 5);
+ device = (uint16_t*)((char*)device + deviceRB);
+ height--;
+ }
+}
+
+#undef LOAD_LANE_16
+#undef STORE_LANE_16
diff --git a/gfx/skia/skia/src/opts/SkBlitMask_opts_arm_neon.h b/gfx/skia/skia/src/opts/SkBlitMask_opts_arm_neon.h
new file mode 100644
index 000000000..86366a461
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBlitMask_opts_arm_neon.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlitMask_opts_arm_neon_DEFINED
+#define SkBlitMask_opts_arm_neon_DEFINED
+
+#include "SkColor.h"
+#include "SkBlitMask.h"
+
+extern void SkBlitLCD16OpaqueRow_neon(SkPMColor dst[], const uint16_t src[],
+ SkColor color, int width,
+ SkPMColor opaqueDst);
+
+extern void SkBlitLCD16Row_neon(SkPMColor dst[], const uint16_t src[],
+ SkColor color, int width, SkPMColor);
+
+#endif // #ifndef SkBlitMask_opts_arm_neon_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkBlitMask_opts_none.cpp b/gfx/skia/skia/src/opts/SkBlitMask_opts_none.cpp
new file mode 100644
index 000000000..f26ee783b
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBlitMask_opts_none.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBlitMask.h"
+
+SkBlitMask::BlitLCD16RowProc SkBlitMask::PlatformBlitRowProcs16(bool isOpaque) {
+ return nullptr;
+}
+
+SkBlitMask::RowProc SkBlitMask::PlatformRowProcs(SkColorType dstCT,
+ SkMask::Format maskFormat,
+ RowFlags flags) {
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/opts/SkBlitRow_opts.h b/gfx/skia/skia/src/opts/SkBlitRow_opts.h
new file mode 100644
index 000000000..8119eb311
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBlitRow_opts.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlitRow_opts_DEFINED
+#define SkBlitRow_opts_DEFINED
+
+#include "Sk4px.h"
+#include "SkColorPriv.h"
+#include "SkMSAN.h"
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include "SkColor_opts_SSE2.h"
+#endif
+
+namespace SK_OPTS_NS {
+
+// Color32 uses the blend_256_round_alt algorithm from tests/BlendTest.cpp.
+// It's not quite perfect, but it's never wrong in the interesting edge cases,
+// and it's quite a bit faster than blend_perfect.
+//
+// blend_256_round_alt is our currently blessed algorithm. Please use it or an analogous one.
+static inline
+void blit_row_color32(SkPMColor* dst, const SkPMColor* src, int count, SkPMColor color) {
+ unsigned invA = 255 - SkGetPackedA32(color);
+ invA += invA >> 7;
+ SkASSERT(invA < 256); // We've should have already handled alpha == 0 externally.
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ __m128i colorHighAndRound = _mm_add_epi16(_mm_unpacklo_epi8(_mm_setzero_si128(), _mm_set1_epi32(color)), _mm_set1_epi16(128));
+ __m128i invA_16x = _mm_set1_epi16(invA);
+ #define BLIT_ROW_COLOR32_FN(px, lohi) \
+ _mm_srli_epi16(_mm_add_epi16(_mm_mullo_epi16(_mm_unpack ## lohi ## _epi8(px, _mm_setzero_si128()), invA_16x), colorHighAndRound), 8)
+ while (count >= 4) {
+ __m128i px = _mm_loadu_si128((const __m128i*)src);
+ _mm_storeu_si128((__m128i*)dst,
+ _mm_packus_epi16(BLIT_ROW_COLOR32_FN(px, lo), BLIT_ROW_COLOR32_FN(px, hi)));
+ src += 4;
+ dst += 4;
+ count -= 4;
+ }
+ if (count >= 2) {
+ __m128i px = _mm_loadl_epi64((const __m128i*)src);
+ _mm_storel_epi64((__m128i*)dst,
+ _mm_packus_epi16(BLIT_ROW_COLOR32_FN(px, lo), _mm_setzero_si128()));
+ src += 2;
+ dst += 2;
+ count -= 2;
+ }
+ if (count >= 1) {
+ __m128i px = _mm_cvtsi32_si128(*src);
+ *dst = _mm_cvtsi128_si32(
+ _mm_packus_epi16(BLIT_ROW_COLOR32_FN(px, lo), _mm_setzero_si128()));
+ }
+#else
+ Sk16h colorHighAndRound = Sk4px::DupPMColor(color).widenHi() + Sk16h(128);
+ Sk16b invA_16x(invA);
+
+ Sk4px::MapSrc(count, dst, src, [&](const Sk4px& src4) -> Sk4px {
+ return (src4 * invA_16x).addNarrowHi(colorHighAndRound);
+ });
+#endif
+}
+
+static inline
+void blit_row_s32a_opaque(SkPMColor* dst, const SkPMColor* src, int len, U8CPU alpha) {
+ SkASSERT(alpha == 0xFF);
+ sk_msan_assert_initialized(src, src+len);
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ while (len >= 16) {
+ // Load 16 source pixels.
+ auto s0 = _mm_loadu_si128((const __m128i*)(src) + 0),
+ s1 = _mm_loadu_si128((const __m128i*)(src) + 1),
+ s2 = _mm_loadu_si128((const __m128i*)(src) + 2),
+ s3 = _mm_loadu_si128((const __m128i*)(src) + 3);
+
+ const auto alphaMask = _mm_set1_epi32(0xFF000000);
+
+ auto ORed = _mm_or_si128(s3, _mm_or_si128(s2, _mm_or_si128(s1, s0)));
+ if (_mm_testz_si128(ORed, alphaMask)) {
+ // All 16 source pixels are transparent. Nothing to do.
+ src += 16;
+ dst += 16;
+ len -= 16;
+ continue;
+ }
+
+ auto d0 = (__m128i*)(dst) + 0,
+ d1 = (__m128i*)(dst) + 1,
+ d2 = (__m128i*)(dst) + 2,
+ d3 = (__m128i*)(dst) + 3;
+
+ auto ANDed = _mm_and_si128(s3, _mm_and_si128(s2, _mm_and_si128(s1, s0)));
+ if (_mm_testc_si128(ANDed, alphaMask)) {
+ // All 16 source pixels are opaque. SrcOver becomes Src.
+ _mm_storeu_si128(d0, s0);
+ _mm_storeu_si128(d1, s1);
+ _mm_storeu_si128(d2, s2);
+ _mm_storeu_si128(d3, s3);
+ src += 16;
+ dst += 16;
+ len -= 16;
+ continue;
+ }
+
+ // TODO: This math is wrong.
+ // Do SrcOver.
+ _mm_storeu_si128(d0, SkPMSrcOver_SSE2(s0, _mm_loadu_si128(d0)));
+ _mm_storeu_si128(d1, SkPMSrcOver_SSE2(s1, _mm_loadu_si128(d1)));
+ _mm_storeu_si128(d2, SkPMSrcOver_SSE2(s2, _mm_loadu_si128(d2)));
+ _mm_storeu_si128(d3, SkPMSrcOver_SSE2(s3, _mm_loadu_si128(d3)));
+ src += 16;
+ dst += 16;
+ len -= 16;
+ }
+
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ while (len >= 16) {
+ // Load 16 source pixels.
+ auto s0 = _mm_loadu_si128((const __m128i*)(src) + 0),
+ s1 = _mm_loadu_si128((const __m128i*)(src) + 1),
+ s2 = _mm_loadu_si128((const __m128i*)(src) + 2),
+ s3 = _mm_loadu_si128((const __m128i*)(src) + 3);
+
+ const auto alphaMask = _mm_set1_epi32(0xFF000000);
+
+ auto ORed = _mm_or_si128(s3, _mm_or_si128(s2, _mm_or_si128(s1, s0)));
+ if (0xffff == _mm_movemask_epi8(_mm_cmpeq_epi8(_mm_and_si128(ORed, alphaMask),
+ _mm_setzero_si128()))) {
+ // All 16 source pixels are transparent. Nothing to do.
+ src += 16;
+ dst += 16;
+ len -= 16;
+ continue;
+ }
+
+ auto d0 = (__m128i*)(dst) + 0,
+ d1 = (__m128i*)(dst) + 1,
+ d2 = (__m128i*)(dst) + 2,
+ d3 = (__m128i*)(dst) + 3;
+
+ auto ANDed = _mm_and_si128(s3, _mm_and_si128(s2, _mm_and_si128(s1, s0)));
+ if (0xffff == _mm_movemask_epi8(_mm_cmpeq_epi8(_mm_and_si128(ANDed, alphaMask),
+ alphaMask))) {
+ // All 16 source pixels are opaque. SrcOver becomes Src.
+ _mm_storeu_si128(d0, s0);
+ _mm_storeu_si128(d1, s1);
+ _mm_storeu_si128(d2, s2);
+ _mm_storeu_si128(d3, s3);
+ src += 16;
+ dst += 16;
+ len -= 16;
+ continue;
+ }
+
+ // TODO: This math is wrong.
+ // Do SrcOver.
+ _mm_storeu_si128(d0, SkPMSrcOver_SSE2(s0, _mm_loadu_si128(d0)));
+ _mm_storeu_si128(d1, SkPMSrcOver_SSE2(s1, _mm_loadu_si128(d1)));
+ _mm_storeu_si128(d2, SkPMSrcOver_SSE2(s2, _mm_loadu_si128(d2)));
+ _mm_storeu_si128(d3, SkPMSrcOver_SSE2(s3, _mm_loadu_si128(d3)));
+
+ src += 16;
+ dst += 16;
+ len -= 16;
+ }
+
+#elif defined(SK_ARM_HAS_NEON)
+ while (len >= 4) {
+ if ((src[0] | src[1] | src[2] | src[3]) == 0x00000000) {
+ // All 16 source pixels are transparent. Nothing to do.
+ src += 4;
+ dst += 4;
+ len -= 4;
+ continue;
+ }
+
+ if ((src[0] & src[1] & src[2] & src[3]) >= 0xFF000000) {
+ // All 16 source pixels are opaque. SrcOver becomes Src.
+ dst[0] = src[0];
+ dst[1] = src[1];
+ dst[2] = src[2];
+ dst[3] = src[3];
+ src += 4;
+ dst += 4;
+ len -= 4;
+ continue;
+ }
+
+ // Load 4 source and destination pixels.
+ auto src0 = vreinterpret_u8_u32(vld1_u32(src+0)),
+ src2 = vreinterpret_u8_u32(vld1_u32(src+2)),
+ dst0 = vreinterpret_u8_u32(vld1_u32(dst+0)),
+ dst2 = vreinterpret_u8_u32(vld1_u32(dst+2));
+
+ // TODO: This math is wrong.
+ const uint8x8_t alphas = vcreate_u8(0x0707070703030303);
+ auto invSA0_w = vsubw_u8(vdupq_n_u16(256), vtbl1_u8(src0, alphas)),
+ invSA2_w = vsubw_u8(vdupq_n_u16(256), vtbl1_u8(src2, alphas));
+
+ auto dstInvSA0 = vmulq_u16(invSA0_w, vmovl_u8(dst0)),
+ dstInvSA2 = vmulq_u16(invSA2_w, vmovl_u8(dst2));
+
+ dst0 = vadd_u8(src0, vshrn_n_u16(dstInvSA0, 8));
+ dst2 = vadd_u8(src2, vshrn_n_u16(dstInvSA2, 8));
+
+ vst1_u32(dst+0, vreinterpret_u32_u8(dst0));
+ vst1_u32(dst+2, vreinterpret_u32_u8(dst2));
+
+ src += 4;
+ dst += 4;
+ len -= 4;
+ }
+#endif
+
+ while (len-- > 0) {
+ // This 0xFF000000 is not semantically necessary, but for compatibility
+ // with chromium:611002 we need to keep it until we figure out where
+ // the non-premultiplied src values (like 0x00FFFFFF) are coming from.
+ // TODO(mtklein): sort this out and assert *src is premul here.
+ if (*src & 0xFF000000) {
+ *dst = (*src >= 0xFF000000) ? *src : SkPMSrcOver(*src, *dst);
+ }
+ src++;
+ dst++;
+ }
+}
+
+} // SK_OPTS_NS
+
+#endif//SkBlitRow_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkBlitRow_opts_SSE2.cpp b/gfx/skia/skia/src/opts/SkBlitRow_opts_SSE2.cpp
new file mode 100644
index 000000000..7ce1fc9a8
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBlitRow_opts_SSE2.cpp
@@ -0,0 +1,990 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <emmintrin.h>
+#include "SkBitmapProcState_opts_SSE2.h"
+#include "SkBlitRow_opts_SSE2.h"
+#include "SkColorPriv.h"
+#include "SkColor_opts_SSE2.h"
+#include "SkDither.h"
+#include "SkMSAN.h"
+#include "SkUtils.h"
+
+/* SSE2 version of S32_Blend_BlitRow32()
+ * portable version is in core/SkBlitRow_D32.cpp
+ */
+void S32_Blend_BlitRow32_SSE2(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha) {
+ SkASSERT(alpha <= 255);
+ if (count <= 0) {
+ return;
+ }
+
+ uint32_t src_scale = SkAlpha255To256(alpha);
+
+ if (count >= 4) {
+ SkASSERT(((size_t)dst & 0x03) == 0);
+ while (((size_t)dst & 0x0F) != 0) {
+ *dst = SkPMLerp(*src, *dst, src_scale);
+ src++;
+ dst++;
+ count--;
+ }
+
+ const __m128i *s = reinterpret_cast<const __m128i*>(src);
+ __m128i *d = reinterpret_cast<__m128i*>(dst);
+
+ while (count >= 4) {
+ // Load 4 pixels each of src and dest.
+ __m128i src_pixel = _mm_loadu_si128(s);
+ __m128i dst_pixel = _mm_load_si128(d);
+
+ __m128i result = SkPMLerp_SSE2(src_pixel, dst_pixel, src_scale);
+ _mm_store_si128(d, result);
+ s++;
+ d++;
+ count -= 4;
+ }
+ src = reinterpret_cast<const SkPMColor*>(s);
+ dst = reinterpret_cast<SkPMColor*>(d);
+ }
+
+ while (count > 0) {
+ *dst = SkPMLerp(*src, *dst, src_scale);
+ src++;
+ dst++;
+ count--;
+ }
+}
+
+void S32A_Blend_BlitRow32_SSE2(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha) {
+ SkASSERT(alpha <= 255);
+ if (count <= 0) {
+ return;
+ }
+
+ if (count >= 4) {
+ while (((size_t)dst & 0x0F) != 0) {
+ *dst = SkBlendARGB32(*src, *dst, alpha);
+ src++;
+ dst++;
+ count--;
+ }
+
+ const __m128i *s = reinterpret_cast<const __m128i*>(src);
+ __m128i *d = reinterpret_cast<__m128i*>(dst);
+ while (count >= 4) {
+ // Load 4 pixels each of src and dest.
+ __m128i src_pixel = _mm_loadu_si128(s);
+ __m128i dst_pixel = _mm_load_si128(d);
+
+ __m128i result = SkBlendARGB32_SSE2(src_pixel, dst_pixel, alpha);
+ _mm_store_si128(d, result);
+ s++;
+ d++;
+ count -= 4;
+ }
+ src = reinterpret_cast<const SkPMColor*>(s);
+ dst = reinterpret_cast<SkPMColor*>(d);
+ }
+
+ while (count > 0) {
+ *dst = SkBlendARGB32(*src, *dst, alpha);
+ src++;
+ dst++;
+ count--;
+ }
+}
+
+void Color32A_D565_SSE2(uint16_t dst[], SkPMColor src, int count, int x, int y) {
+ SkASSERT(count > 0);
+
+ uint32_t src_expand = (SkGetPackedG32(src) << 24) |
+ (SkGetPackedR32(src) << 13) |
+ (SkGetPackedB32(src) << 2);
+ unsigned scale = SkAlpha255To256(0xFF - SkGetPackedA32(src)) >> 3;
+
+ // Check if we have enough pixels to run SIMD
+ if (count >= (int)(8 + (((16 - (size_t)dst) & 0x0F) >> 1))) {
+ __m128i* dst_wide;
+ const __m128i src_R_wide = _mm_set1_epi16(SkGetPackedR32(src) << 2);
+ const __m128i src_G_wide = _mm_set1_epi16(SkGetPackedG32(src) << 3);
+ const __m128i src_B_wide = _mm_set1_epi16(SkGetPackedB32(src) << 2);
+ const __m128i scale_wide = _mm_set1_epi16(scale);
+ const __m128i mask_blue = _mm_set1_epi16(SK_B16_MASK);
+ const __m128i mask_green = _mm_set1_epi16(SK_G16_MASK << SK_G16_SHIFT);
+
+ // Align dst to an even 16 byte address (0-7 pixels)
+ while (((((size_t)dst) & 0x0F) != 0) && (count > 0)) {
+ *dst = SkBlend32_RGB16(src_expand, *dst, scale);
+ dst += 1;
+ count--;
+ }
+
+ dst_wide = reinterpret_cast<__m128i*>(dst);
+ do {
+ // Load eight RGB565 pixels
+ __m128i pixels = _mm_load_si128(dst_wide);
+
+ // Mask out sub-pixels
+ __m128i pixel_R = _mm_srli_epi16(pixels, SK_R16_SHIFT);
+ __m128i pixel_G = _mm_slli_epi16(pixels, SK_R16_BITS);
+ pixel_G = _mm_srli_epi16(pixel_G, SK_R16_BITS + SK_B16_BITS);
+ __m128i pixel_B = _mm_and_si128(pixels, mask_blue);
+
+ // Scale with alpha
+ pixel_R = _mm_mullo_epi16(pixel_R, scale_wide);
+ pixel_G = _mm_mullo_epi16(pixel_G, scale_wide);
+ pixel_B = _mm_mullo_epi16(pixel_B, scale_wide);
+
+ // Add src_X_wide and shift down again
+ pixel_R = _mm_add_epi16(pixel_R, src_R_wide);
+ pixel_R = _mm_srli_epi16(pixel_R, 5);
+ pixel_G = _mm_add_epi16(pixel_G, src_G_wide);
+ pixel_B = _mm_add_epi16(pixel_B, src_B_wide);
+ pixel_B = _mm_srli_epi16(pixel_B, 5);
+
+ // Combine into RGB565 and store
+ pixel_R = _mm_slli_epi16(pixel_R, SK_R16_SHIFT);
+ pixel_G = _mm_and_si128(pixel_G, mask_green);
+ pixels = _mm_or_si128(pixel_R, pixel_G);
+ pixels = _mm_or_si128(pixels, pixel_B);
+ _mm_store_si128(dst_wide, pixels);
+ count -= 8;
+ dst_wide++;
+ } while (count >= 8);
+
+ dst = reinterpret_cast<uint16_t*>(dst_wide);
+ }
+
+ // Small loop to handle remaining pixels.
+ while (count > 0) {
+ *dst = SkBlend32_RGB16(src_expand, *dst, scale);
+ dst += 1;
+ count--;
+ }
+}
+
+// The following (left) shifts cause the top 5 bits of the mask components to
+// line up with the corresponding components in an SkPMColor.
+// Note that the mask's RGB16 order may differ from the SkPMColor order.
+#define SK_R16x5_R32x5_SHIFT (SK_R32_SHIFT - SK_R16_SHIFT - SK_R16_BITS + 5)
+#define SK_G16x5_G32x5_SHIFT (SK_G32_SHIFT - SK_G16_SHIFT - SK_G16_BITS + 5)
+#define SK_B16x5_B32x5_SHIFT (SK_B32_SHIFT - SK_B16_SHIFT - SK_B16_BITS + 5)
+
+#if SK_R16x5_R32x5_SHIFT == 0
+ #define SkPackedR16x5ToUnmaskedR32x5_SSE2(x) (x)
+#elif SK_R16x5_R32x5_SHIFT > 0
+ #define SkPackedR16x5ToUnmaskedR32x5_SSE2(x) (_mm_slli_epi32(x, SK_R16x5_R32x5_SHIFT))
+#else
+ #define SkPackedR16x5ToUnmaskedR32x5_SSE2(x) (_mm_srli_epi32(x, -SK_R16x5_R32x5_SHIFT))
+#endif
+
+#if SK_G16x5_G32x5_SHIFT == 0
+ #define SkPackedG16x5ToUnmaskedG32x5_SSE2(x) (x)
+#elif SK_G16x5_G32x5_SHIFT > 0
+ #define SkPackedG16x5ToUnmaskedG32x5_SSE2(x) (_mm_slli_epi32(x, SK_G16x5_G32x5_SHIFT))
+#else
+ #define SkPackedG16x5ToUnmaskedG32x5_SSE2(x) (_mm_srli_epi32(x, -SK_G16x5_G32x5_SHIFT))
+#endif
+
+#if SK_B16x5_B32x5_SHIFT == 0
+ #define SkPackedB16x5ToUnmaskedB32x5_SSE2(x) (x)
+#elif SK_B16x5_B32x5_SHIFT > 0
+ #define SkPackedB16x5ToUnmaskedB32x5_SSE2(x) (_mm_slli_epi32(x, SK_B16x5_B32x5_SHIFT))
+#else
+ #define SkPackedB16x5ToUnmaskedB32x5_SSE2(x) (_mm_srli_epi32(x, -SK_B16x5_B32x5_SHIFT))
+#endif
+
+static __m128i SkBlendLCD16_SSE2(__m128i &src, __m128i &dst,
+ __m128i &mask, __m128i &srcA) {
+ // In the following comments, the components of src, dst and mask are
+ // abbreviated as (s)rc, (d)st, and (m)ask. Color components are marked
+ // by an R, G, B, or A suffix. Components of one of the four pixels that
+ // are processed in parallel are marked with 0, 1, 2, and 3. "d1B", for
+ // example is the blue channel of the second destination pixel. Memory
+ // layout is shown for an ARGB byte order in a color value.
+
+ // src and srcA store 8-bit values interleaved with zeros.
+ // src = (0xFF, 0, sR, 0, sG, 0, sB, 0, 0xFF, 0, sR, 0, sG, 0, sB, 0)
+ // srcA = (srcA, 0, srcA, 0, srcA, 0, srcA, 0,
+ // srcA, 0, srcA, 0, srcA, 0, srcA, 0)
+ // mask stores 16-bit values (compressed three channels) interleaved with zeros.
+ // Lo and Hi denote the low and high bytes of a 16-bit value, respectively.
+ // mask = (m0RGBLo, m0RGBHi, 0, 0, m1RGBLo, m1RGBHi, 0, 0,
+ // m2RGBLo, m2RGBHi, 0, 0, m3RGBLo, m3RGBHi, 0, 0)
+
+ // Get the R,G,B of each 16bit mask pixel, we want all of them in 5 bits.
+ // r = (0, m0R, 0, 0, 0, m1R, 0, 0, 0, m2R, 0, 0, 0, m3R, 0, 0)
+ __m128i r = _mm_and_si128(SkPackedR16x5ToUnmaskedR32x5_SSE2(mask),
+ _mm_set1_epi32(0x1F << SK_R32_SHIFT));
+
+ // g = (0, 0, m0G, 0, 0, 0, m1G, 0, 0, 0, m2G, 0, 0, 0, m3G, 0)
+ __m128i g = _mm_and_si128(SkPackedG16x5ToUnmaskedG32x5_SSE2(mask),
+ _mm_set1_epi32(0x1F << SK_G32_SHIFT));
+
+ // b = (0, 0, 0, m0B, 0, 0, 0, m1B, 0, 0, 0, m2B, 0, 0, 0, m3B)
+ __m128i b = _mm_and_si128(SkPackedB16x5ToUnmaskedB32x5_SSE2(mask),
+ _mm_set1_epi32(0x1F << SK_B32_SHIFT));
+
+ // Pack the 4 16bit mask pixels into 4 32bit pixels, (p0, p1, p2, p3)
+ // Each component (m0R, m0G, etc.) is then a 5-bit value aligned to an
+ // 8-bit position
+ // mask = (0, m0R, m0G, m0B, 0, m1R, m1G, m1B,
+ // 0, m2R, m2G, m2B, 0, m3R, m3G, m3B)
+ mask = _mm_or_si128(_mm_or_si128(r, g), b);
+
+ // Interleave R,G,B into the lower byte of word.
+ // i.e. split the sixteen 8-bit values from mask into two sets of eight
+ // 16-bit values, padded by zero.
+ __m128i maskLo, maskHi;
+ // maskLo = (0, 0, m0R, 0, m0G, 0, m0B, 0, 0, 0, m1R, 0, m1G, 0, m1B, 0)
+ maskLo = _mm_unpacklo_epi8(mask, _mm_setzero_si128());
+ // maskHi = (0, 0, m2R, 0, m2G, 0, m2B, 0, 0, 0, m3R, 0, m3G, 0, m3B, 0)
+ maskHi = _mm_unpackhi_epi8(mask, _mm_setzero_si128());
+
+ // Upscale from 0..31 to 0..32
+ // (allows to replace division by left-shift further down)
+ // Left-shift each component by 4 and add the result back to that component,
+ // mapping numbers in the range 0..15 to 0..15, and 16..31 to 17..32
+ maskLo = _mm_add_epi16(maskLo, _mm_srli_epi16(maskLo, 4));
+ maskHi = _mm_add_epi16(maskHi, _mm_srli_epi16(maskHi, 4));
+
+ // Multiply each component of maskLo and maskHi by srcA
+ maskLo = _mm_mullo_epi16(maskLo, srcA);
+ maskHi = _mm_mullo_epi16(maskHi, srcA);
+
+ // Left shift mask components by 8 (divide by 256)
+ maskLo = _mm_srli_epi16(maskLo, 8);
+ maskHi = _mm_srli_epi16(maskHi, 8);
+
+ // Interleave R,G,B into the lower byte of the word
+ // dstLo = (0, 0, d0R, 0, d0G, 0, d0B, 0, 0, 0, d1R, 0, d1G, 0, d1B, 0)
+ __m128i dstLo = _mm_unpacklo_epi8(dst, _mm_setzero_si128());
+ // dstLo = (0, 0, d2R, 0, d2G, 0, d2B, 0, 0, 0, d3R, 0, d3G, 0, d3B, 0)
+ __m128i dstHi = _mm_unpackhi_epi8(dst, _mm_setzero_si128());
+
+ // mask = (src - dst) * mask
+ maskLo = _mm_mullo_epi16(maskLo, _mm_sub_epi16(src, dstLo));
+ maskHi = _mm_mullo_epi16(maskHi, _mm_sub_epi16(src, dstHi));
+
+ // mask = (src - dst) * mask >> 5
+ maskLo = _mm_srai_epi16(maskLo, 5);
+ maskHi = _mm_srai_epi16(maskHi, 5);
+
+ // Add two pixels into result.
+ // result = dst + ((src - dst) * mask >> 5)
+ __m128i resultLo = _mm_add_epi16(dstLo, maskLo);
+ __m128i resultHi = _mm_add_epi16(dstHi, maskHi);
+
+ // Pack into 4 32bit dst pixels.
+ // resultLo and resultHi contain eight 16-bit components (two pixels) each.
+ // Merge into one SSE regsiter with sixteen 8-bit values (four pixels),
+ // clamping to 255 if necessary.
+ return _mm_packus_epi16(resultLo, resultHi);
+}
+
+static __m128i SkBlendLCD16Opaque_SSE2(__m128i &src, __m128i &dst,
+ __m128i &mask) {
+ // In the following comments, the components of src, dst and mask are
+ // abbreviated as (s)rc, (d)st, and (m)ask. Color components are marked
+ // by an R, G, B, or A suffix. Components of one of the four pixels that
+ // are processed in parallel are marked with 0, 1, 2, and 3. "d1B", for
+ // example is the blue channel of the second destination pixel. Memory
+ // layout is shown for an ARGB byte order in a color value.
+
+ // src and srcA store 8-bit values interleaved with zeros.
+ // src = (0xFF, 0, sR, 0, sG, 0, sB, 0, 0xFF, 0, sR, 0, sG, 0, sB, 0)
+ // mask stores 16-bit values (shown as high and low bytes) interleaved with
+ // zeros
+ // mask = (m0RGBLo, m0RGBHi, 0, 0, m1RGBLo, m1RGBHi, 0, 0,
+ // m2RGBLo, m2RGBHi, 0, 0, m3RGBLo, m3RGBHi, 0, 0)
+
+ // Get the R,G,B of each 16bit mask pixel, we want all of them in 5 bits.
+ // r = (0, m0R, 0, 0, 0, m1R, 0, 0, 0, m2R, 0, 0, 0, m3R, 0, 0)
+ __m128i r = _mm_and_si128(SkPackedR16x5ToUnmaskedR32x5_SSE2(mask),
+ _mm_set1_epi32(0x1F << SK_R32_SHIFT));
+
+ // g = (0, 0, m0G, 0, 0, 0, m1G, 0, 0, 0, m2G, 0, 0, 0, m3G, 0)
+ __m128i g = _mm_and_si128(SkPackedG16x5ToUnmaskedG32x5_SSE2(mask),
+ _mm_set1_epi32(0x1F << SK_G32_SHIFT));
+
+ // b = (0, 0, 0, m0B, 0, 0, 0, m1B, 0, 0, 0, m2B, 0, 0, 0, m3B)
+ __m128i b = _mm_and_si128(SkPackedB16x5ToUnmaskedB32x5_SSE2(mask),
+ _mm_set1_epi32(0x1F << SK_B32_SHIFT));
+
+ // Pack the 4 16bit mask pixels into 4 32bit pixels, (p0, p1, p2, p3)
+ // Each component (m0R, m0G, etc.) is then a 5-bit value aligned to an
+ // 8-bit position
+ // mask = (0, m0R, m0G, m0B, 0, m1R, m1G, m1B,
+ // 0, m2R, m2G, m2B, 0, m3R, m3G, m3B)
+ mask = _mm_or_si128(_mm_or_si128(r, g), b);
+
+ // Interleave R,G,B into the lower byte of word.
+ // i.e. split the sixteen 8-bit values from mask into two sets of eight
+ // 16-bit values, padded by zero.
+ __m128i maskLo, maskHi;
+ // maskLo = (0, 0, m0R, 0, m0G, 0, m0B, 0, 0, 0, m1R, 0, m1G, 0, m1B, 0)
+ maskLo = _mm_unpacklo_epi8(mask, _mm_setzero_si128());
+ // maskHi = (0, 0, m2R, 0, m2G, 0, m2B, 0, 0, 0, m3R, 0, m3G, 0, m3B, 0)
+ maskHi = _mm_unpackhi_epi8(mask, _mm_setzero_si128());
+
+ // Upscale from 0..31 to 0..32
+ // (allows to replace division by left-shift further down)
+ // Left-shift each component by 4 and add the result back to that component,
+ // mapping numbers in the range 0..15 to 0..15, and 16..31 to 17..32
+ maskLo = _mm_add_epi16(maskLo, _mm_srli_epi16(maskLo, 4));
+ maskHi = _mm_add_epi16(maskHi, _mm_srli_epi16(maskHi, 4));
+
+ // Interleave R,G,B into the lower byte of the word
+ // dstLo = (0, 0, d0R, 0, d0G, 0, d0B, 0, 0, 0, d1R, 0, d1G, 0, d1B, 0)
+ __m128i dstLo = _mm_unpacklo_epi8(dst, _mm_setzero_si128());
+ // dstLo = (0, 0, d2R, 0, d2G, 0, d2B, 0, 0, 0, d3R, 0, d3G, 0, d3B, 0)
+ __m128i dstHi = _mm_unpackhi_epi8(dst, _mm_setzero_si128());
+
+ // mask = (src - dst) * mask
+ maskLo = _mm_mullo_epi16(maskLo, _mm_sub_epi16(src, dstLo));
+ maskHi = _mm_mullo_epi16(maskHi, _mm_sub_epi16(src, dstHi));
+
+ // mask = (src - dst) * mask >> 5
+ maskLo = _mm_srai_epi16(maskLo, 5);
+ maskHi = _mm_srai_epi16(maskHi, 5);
+
+ // Add two pixels into result.
+ // result = dst + ((src - dst) * mask >> 5)
+ __m128i resultLo = _mm_add_epi16(dstLo, maskLo);
+ __m128i resultHi = _mm_add_epi16(dstHi, maskHi);
+
+ // Pack into 4 32bit dst pixels and force opaque.
+ // resultLo and resultHi contain eight 16-bit components (two pixels) each.
+ // Merge into one SSE regsiter with sixteen 8-bit values (four pixels),
+ // clamping to 255 if necessary. Set alpha components to 0xFF.
+ return _mm_or_si128(_mm_packus_epi16(resultLo, resultHi),
+ _mm_set1_epi32(SK_A32_MASK << SK_A32_SHIFT));
+}
+
+void SkBlitLCD16Row_SSE2(SkPMColor dst[], const uint16_t mask[],
+ SkColor src, int width, SkPMColor) {
+ if (width <= 0) {
+ return;
+ }
+
+ int srcA = SkColorGetA(src);
+ int srcR = SkColorGetR(src);
+ int srcG = SkColorGetG(src);
+ int srcB = SkColorGetB(src);
+
+ srcA = SkAlpha255To256(srcA);
+
+ if (width >= 4) {
+ SkASSERT(((size_t)dst & 0x03) == 0);
+ while (((size_t)dst & 0x0F) != 0) {
+ *dst = SkBlendLCD16(srcA, srcR, srcG, srcB, *dst, *mask);
+ mask++;
+ dst++;
+ width--;
+ }
+
+ __m128i *d = reinterpret_cast<__m128i*>(dst);
+ // Set alpha to 0xFF and replicate source four times in SSE register.
+ __m128i src_sse = _mm_set1_epi32(SkPackARGB32(0xFF, srcR, srcG, srcB));
+ // Interleave with zeros to get two sets of four 16-bit values.
+ src_sse = _mm_unpacklo_epi8(src_sse, _mm_setzero_si128());
+ // Set srcA_sse to contain eight copies of srcA, padded with zero.
+ // src_sse=(0xFF, 0, sR, 0, sG, 0, sB, 0, 0xFF, 0, sR, 0, sG, 0, sB, 0)
+ __m128i srcA_sse = _mm_set1_epi16(srcA);
+ while (width >= 4) {
+ // Load four destination pixels into dst_sse.
+ __m128i dst_sse = _mm_load_si128(d);
+ // Load four 16-bit masks into lower half of mask_sse.
+ __m128i mask_sse = _mm_loadl_epi64(
+ reinterpret_cast<const __m128i*>(mask));
+
+ // Check whether masks are equal to 0 and get the highest bit
+ // of each byte of result, if masks are all zero, we will get
+ // pack_cmp to 0xFFFF
+ int pack_cmp = _mm_movemask_epi8(_mm_cmpeq_epi16(mask_sse,
+ _mm_setzero_si128()));
+
+ // if mask pixels are not all zero, we will blend the dst pixels
+ if (pack_cmp != 0xFFFF) {
+ // Unpack 4 16bit mask pixels to
+ // mask_sse = (m0RGBLo, m0RGBHi, 0, 0, m1RGBLo, m1RGBHi, 0, 0,
+ // m2RGBLo, m2RGBHi, 0, 0, m3RGBLo, m3RGBHi, 0, 0)
+ mask_sse = _mm_unpacklo_epi16(mask_sse,
+ _mm_setzero_si128());
+
+ // Process 4 32bit dst pixels
+ __m128i result = SkBlendLCD16_SSE2(src_sse, dst_sse,
+ mask_sse, srcA_sse);
+ _mm_store_si128(d, result);
+ }
+
+ d++;
+ mask += 4;
+ width -= 4;
+ }
+
+ dst = reinterpret_cast<SkPMColor*>(d);
+ }
+
+ while (width > 0) {
+ *dst = SkBlendLCD16(srcA, srcR, srcG, srcB, *dst, *mask);
+ mask++;
+ dst++;
+ width--;
+ }
+}
+
+void SkBlitLCD16OpaqueRow_SSE2(SkPMColor dst[], const uint16_t mask[],
+ SkColor src, int width, SkPMColor opaqueDst) {
+ if (width <= 0) {
+ return;
+ }
+
+ int srcR = SkColorGetR(src);
+ int srcG = SkColorGetG(src);
+ int srcB = SkColorGetB(src);
+
+ if (width >= 4) {
+ SkASSERT(((size_t)dst & 0x03) == 0);
+ while (((size_t)dst & 0x0F) != 0) {
+ *dst = SkBlendLCD16Opaque(srcR, srcG, srcB, *dst, *mask, opaqueDst);
+ mask++;
+ dst++;
+ width--;
+ }
+
+ __m128i *d = reinterpret_cast<__m128i*>(dst);
+ // Set alpha to 0xFF and replicate source four times in SSE register.
+ __m128i src_sse = _mm_set1_epi32(SkPackARGB32(0xFF, srcR, srcG, srcB));
+ // Set srcA_sse to contain eight copies of srcA, padded with zero.
+ // src_sse=(0xFF, 0, sR, 0, sG, 0, sB, 0, 0xFF, 0, sR, 0, sG, 0, sB, 0)
+ src_sse = _mm_unpacklo_epi8(src_sse, _mm_setzero_si128());
+ while (width >= 4) {
+ // Load four destination pixels into dst_sse.
+ __m128i dst_sse = _mm_load_si128(d);
+ // Load four 16-bit masks into lower half of mask_sse.
+ __m128i mask_sse = _mm_loadl_epi64(
+ reinterpret_cast<const __m128i*>(mask));
+
+ // Check whether masks are equal to 0 and get the highest bit
+ // of each byte of result, if masks are all zero, we will get
+ // pack_cmp to 0xFFFF
+ int pack_cmp = _mm_movemask_epi8(_mm_cmpeq_epi16(mask_sse,
+ _mm_setzero_si128()));
+
+ // if mask pixels are not all zero, we will blend the dst pixels
+ if (pack_cmp != 0xFFFF) {
+ // Unpack 4 16bit mask pixels to
+ // mask_sse = (m0RGBLo, m0RGBHi, 0, 0, m1RGBLo, m1RGBHi, 0, 0,
+ // m2RGBLo, m2RGBHi, 0, 0, m3RGBLo, m3RGBHi, 0, 0)
+ mask_sse = _mm_unpacklo_epi16(mask_sse,
+ _mm_setzero_si128());
+
+ // Process 4 32bit dst pixels
+ __m128i result = SkBlendLCD16Opaque_SSE2(src_sse, dst_sse,
+ mask_sse);
+ _mm_store_si128(d, result);
+ }
+
+ d++;
+ mask += 4;
+ width -= 4;
+ }
+
+ dst = reinterpret_cast<SkPMColor*>(d);
+ }
+
+ while (width > 0) {
+ *dst = SkBlendLCD16Opaque(srcR, srcG, srcB, *dst, *mask, opaqueDst);
+ mask++;
+ dst++;
+ width--;
+ }
+}
+
+/* SSE2 version of S32_D565_Opaque()
+ * portable version is in core/SkBlitRow_D16.cpp
+ */
+void S32_D565_Opaque_SSE2(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src, int count,
+ U8CPU alpha, int /*x*/, int /*y*/) {
+ SkASSERT(255 == alpha);
+
+ if (count <= 0) {
+ return;
+ }
+
+ if (count >= 8) {
+ while (((size_t)dst & 0x0F) != 0) {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+
+ *dst++ = SkPixel32ToPixel16_ToU16(c);
+ count--;
+ }
+
+ const __m128i* s = reinterpret_cast<const __m128i*>(src);
+ __m128i* d = reinterpret_cast<__m128i*>(dst);
+
+ while (count >= 8) {
+ // Load 8 pixels of src.
+ __m128i src_pixel1 = _mm_loadu_si128(s++);
+ __m128i src_pixel2 = _mm_loadu_si128(s++);
+
+ __m128i d_pixel = SkPixel32ToPixel16_ToU16_SSE2(src_pixel1, src_pixel2);
+ _mm_store_si128(d++, d_pixel);
+ count -= 8;
+ }
+ src = reinterpret_cast<const SkPMColor*>(s);
+ dst = reinterpret_cast<uint16_t*>(d);
+ }
+
+ if (count > 0) {
+ do {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+ *dst++ = SkPixel32ToPixel16_ToU16(c);
+ } while (--count != 0);
+ }
+}
+
+/* SSE2 version of S32A_D565_Opaque()
+ * portable version is in core/SkBlitRow_D16.cpp
+ */
+void S32A_D565_Opaque_SSE2(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha, int /*x*/, int /*y*/) {
+ SkASSERT(255 == alpha);
+
+ if (count <= 0) {
+ return;
+ }
+
+ if (count >= 8) {
+ // Make dst 16 bytes alignment
+ while (((size_t)dst & 0x0F) != 0) {
+ SkPMColor c = *src++;
+ if (c) {
+ *dst = SkSrcOver32To16(c, *dst);
+ }
+ dst += 1;
+ count--;
+ }
+
+ const __m128i* s = reinterpret_cast<const __m128i*>(src);
+ __m128i* d = reinterpret_cast<__m128i*>(dst);
+ __m128i var255 = _mm_set1_epi16(255);
+ __m128i r16_mask = _mm_set1_epi16(SK_R16_MASK);
+ __m128i g16_mask = _mm_set1_epi16(SK_G16_MASK);
+ __m128i b16_mask = _mm_set1_epi16(SK_B16_MASK);
+
+ while (count >= 8) {
+ // Load 8 pixels of src.
+ __m128i src_pixel1 = _mm_loadu_si128(s++);
+ __m128i src_pixel2 = _mm_loadu_si128(s++);
+
+ // Check whether src pixels are equal to 0 and get the highest bit
+ // of each byte of result, if src pixels are all zero, src_cmp1 and
+ // src_cmp2 will be 0xFFFF.
+ int src_cmp1 = _mm_movemask_epi8(_mm_cmpeq_epi16(src_pixel1,
+ _mm_setzero_si128()));
+ int src_cmp2 = _mm_movemask_epi8(_mm_cmpeq_epi16(src_pixel2,
+ _mm_setzero_si128()));
+ if (src_cmp1 == 0xFFFF && src_cmp2 == 0xFFFF) {
+ d++;
+ count -= 8;
+ continue;
+ }
+
+ // Load 8 pixels of dst.
+ __m128i dst_pixel = _mm_load_si128(d);
+
+ // Extract A from src.
+ __m128i sa1 = _mm_slli_epi32(src_pixel1, (24 - SK_A32_SHIFT));
+ sa1 = _mm_srli_epi32(sa1, 24);
+ __m128i sa2 = _mm_slli_epi32(src_pixel2, (24 - SK_A32_SHIFT));
+ sa2 = _mm_srli_epi32(sa2, 24);
+ __m128i sa = _mm_packs_epi32(sa1, sa2);
+
+ // Extract R from src.
+ __m128i sr1 = _mm_slli_epi32(src_pixel1, (24 - SK_R32_SHIFT));
+ sr1 = _mm_srli_epi32(sr1, 24);
+ __m128i sr2 = _mm_slli_epi32(src_pixel2, (24 - SK_R32_SHIFT));
+ sr2 = _mm_srli_epi32(sr2, 24);
+ __m128i sr = _mm_packs_epi32(sr1, sr2);
+
+ // Extract G from src.
+ __m128i sg1 = _mm_slli_epi32(src_pixel1, (24 - SK_G32_SHIFT));
+ sg1 = _mm_srli_epi32(sg1, 24);
+ __m128i sg2 = _mm_slli_epi32(src_pixel2, (24 - SK_G32_SHIFT));
+ sg2 = _mm_srli_epi32(sg2, 24);
+ __m128i sg = _mm_packs_epi32(sg1, sg2);
+
+ // Extract B from src.
+ __m128i sb1 = _mm_slli_epi32(src_pixel1, (24 - SK_B32_SHIFT));
+ sb1 = _mm_srli_epi32(sb1, 24);
+ __m128i sb2 = _mm_slli_epi32(src_pixel2, (24 - SK_B32_SHIFT));
+ sb2 = _mm_srli_epi32(sb2, 24);
+ __m128i sb = _mm_packs_epi32(sb1, sb2);
+
+ // Extract R G B from dst.
+ __m128i dr = _mm_srli_epi16(dst_pixel, SK_R16_SHIFT);
+ dr = _mm_and_si128(dr, r16_mask);
+ __m128i dg = _mm_srli_epi16(dst_pixel, SK_G16_SHIFT);
+ dg = _mm_and_si128(dg, g16_mask);
+ __m128i db = _mm_srli_epi16(dst_pixel, SK_B16_SHIFT);
+ db = _mm_and_si128(db, b16_mask);
+
+ __m128i isa = _mm_sub_epi16(var255, sa); // 255 -sa
+
+ // Calculate R G B of result.
+ // Original algorithm is in SkSrcOver32To16().
+ dr = _mm_add_epi16(sr, SkMul16ShiftRound_SSE2(dr, isa, SK_R16_BITS));
+ dr = _mm_srli_epi16(dr, 8 - SK_R16_BITS);
+ dg = _mm_add_epi16(sg, SkMul16ShiftRound_SSE2(dg, isa, SK_G16_BITS));
+ dg = _mm_srli_epi16(dg, 8 - SK_G16_BITS);
+ db = _mm_add_epi16(sb, SkMul16ShiftRound_SSE2(db, isa, SK_B16_BITS));
+ db = _mm_srli_epi16(db, 8 - SK_B16_BITS);
+
+ // Pack R G B into 16-bit color.
+ __m128i d_pixel = SkPackRGB16_SSE2(dr, dg, db);
+
+ // Store 8 16-bit colors in dst.
+ _mm_store_si128(d++, d_pixel);
+ count -= 8;
+ }
+
+ src = reinterpret_cast<const SkPMColor*>(s);
+ dst = reinterpret_cast<uint16_t*>(d);
+ }
+
+ if (count > 0) {
+ do {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+ if (c) {
+ *dst = SkSrcOver32To16(c, *dst);
+ }
+ dst += 1;
+ } while (--count != 0);
+ }
+}
+
+void S32_D565_Opaque_Dither_SSE2(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha, int x, int y) {
+ SkASSERT(255 == alpha);
+
+ if (count <= 0) {
+ return;
+ }
+
+ if (count >= 8) {
+ while (((size_t)dst & 0x0F) != 0) {
+ DITHER_565_SCAN(y);
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+
+ unsigned dither = DITHER_VALUE(x);
+ *dst++ = SkDitherRGB32To565(c, dither);
+ DITHER_INC_X(x);
+ count--;
+ }
+
+ unsigned short dither_value[8];
+ __m128i dither;
+#ifdef ENABLE_DITHER_MATRIX_4X4
+ const uint8_t* dither_scan = gDitherMatrix_3Bit_4X4[(y) & 3];
+ dither_value[0] = dither_value[4] = dither_scan[(x) & 3];
+ dither_value[1] = dither_value[5] = dither_scan[(x + 1) & 3];
+ dither_value[2] = dither_value[6] = dither_scan[(x + 2) & 3];
+ dither_value[3] = dither_value[7] = dither_scan[(x + 3) & 3];
+#else
+ const uint16_t dither_scan = gDitherMatrix_3Bit_16[(y) & 3];
+ dither_value[0] = dither_value[4] = (dither_scan
+ >> (((x) & 3) << 2)) & 0xF;
+ dither_value[1] = dither_value[5] = (dither_scan
+ >> (((x + 1) & 3) << 2)) & 0xF;
+ dither_value[2] = dither_value[6] = (dither_scan
+ >> (((x + 2) & 3) << 2)) & 0xF;
+ dither_value[3] = dither_value[7] = (dither_scan
+ >> (((x + 3) & 3) << 2)) & 0xF;
+#endif
+ dither = _mm_loadu_si128((__m128i*) dither_value);
+
+ const __m128i* s = reinterpret_cast<const __m128i*>(src);
+ __m128i* d = reinterpret_cast<__m128i*>(dst);
+
+ while (count >= 8) {
+ // Load 8 pixels of src.
+ __m128i src_pixel1 = _mm_loadu_si128(s++);
+ __m128i src_pixel2 = _mm_loadu_si128(s++);
+
+ // Extract R from src.
+ __m128i sr1 = _mm_slli_epi32(src_pixel1, (24 - SK_R32_SHIFT));
+ sr1 = _mm_srli_epi32(sr1, 24);
+ __m128i sr2 = _mm_slli_epi32(src_pixel2, (24 - SK_R32_SHIFT));
+ sr2 = _mm_srli_epi32(sr2, 24);
+ __m128i sr = _mm_packs_epi32(sr1, sr2);
+
+ // SkDITHER_R32To565(sr, dither)
+ __m128i sr_offset = _mm_srli_epi16(sr, 5);
+ sr = _mm_add_epi16(sr, dither);
+ sr = _mm_sub_epi16(sr, sr_offset);
+ sr = _mm_srli_epi16(sr, SK_R32_BITS - SK_R16_BITS);
+
+ // Extract G from src.
+ __m128i sg1 = _mm_slli_epi32(src_pixel1, (24 - SK_G32_SHIFT));
+ sg1 = _mm_srli_epi32(sg1, 24);
+ __m128i sg2 = _mm_slli_epi32(src_pixel2, (24 - SK_G32_SHIFT));
+ sg2 = _mm_srli_epi32(sg2, 24);
+ __m128i sg = _mm_packs_epi32(sg1, sg2);
+
+ // SkDITHER_R32To565(sg, dither)
+ __m128i sg_offset = _mm_srli_epi16(sg, 6);
+ sg = _mm_add_epi16(sg, _mm_srli_epi16(dither, 1));
+ sg = _mm_sub_epi16(sg, sg_offset);
+ sg = _mm_srli_epi16(sg, SK_G32_BITS - SK_G16_BITS);
+
+ // Extract B from src.
+ __m128i sb1 = _mm_slli_epi32(src_pixel1, (24 - SK_B32_SHIFT));
+ sb1 = _mm_srli_epi32(sb1, 24);
+ __m128i sb2 = _mm_slli_epi32(src_pixel2, (24 - SK_B32_SHIFT));
+ sb2 = _mm_srli_epi32(sb2, 24);
+ __m128i sb = _mm_packs_epi32(sb1, sb2);
+
+ // SkDITHER_R32To565(sb, dither)
+ __m128i sb_offset = _mm_srli_epi16(sb, 5);
+ sb = _mm_add_epi16(sb, dither);
+ sb = _mm_sub_epi16(sb, sb_offset);
+ sb = _mm_srli_epi16(sb, SK_B32_BITS - SK_B16_BITS);
+
+ // Pack and store 16-bit dst pixel.
+ __m128i d_pixel = SkPackRGB16_SSE2(sr, sg, sb);
+ _mm_store_si128(d++, d_pixel);
+
+ count -= 8;
+ x += 8;
+ }
+
+ src = reinterpret_cast<const SkPMColor*>(s);
+ dst = reinterpret_cast<uint16_t*>(d);
+ }
+
+ if (count > 0) {
+ DITHER_565_SCAN(y);
+ do {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+
+ unsigned dither = DITHER_VALUE(x);
+ *dst++ = SkDitherRGB32To565(c, dither);
+ DITHER_INC_X(x);
+ } while (--count != 0);
+ }
+}
+
+/* SSE2 version of S32A_D565_Opaque_Dither()
+ * portable version is in core/SkBlitRow_D16.cpp
+ */
+void S32A_D565_Opaque_Dither_SSE2(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha, int x, int y) {
+ SkASSERT(255 == alpha);
+
+ if (count <= 0) {
+ return;
+ }
+
+ if (count >= 8) {
+ while (((size_t)dst & 0x0F) != 0) {
+ DITHER_565_SCAN(y);
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+ if (c) {
+ unsigned a = SkGetPackedA32(c);
+
+ int d = SkAlphaMul(DITHER_VALUE(x), SkAlpha255To256(a));
+
+ unsigned sr = SkGetPackedR32(c);
+ unsigned sg = SkGetPackedG32(c);
+ unsigned sb = SkGetPackedB32(c);
+ sr = SkDITHER_R32_FOR_565(sr, d);
+ sg = SkDITHER_G32_FOR_565(sg, d);
+ sb = SkDITHER_B32_FOR_565(sb, d);
+
+ uint32_t src_expanded = (sg << 24) | (sr << 13) | (sb << 2);
+ uint32_t dst_expanded = SkExpand_rgb_16(*dst);
+ dst_expanded = dst_expanded * (SkAlpha255To256(255 - a) >> 3);
+ // now src and dst expanded are in g:11 r:10 x:1 b:10
+ *dst = SkCompact_rgb_16((src_expanded + dst_expanded) >> 5);
+ }
+ dst += 1;
+ DITHER_INC_X(x);
+ count--;
+ }
+
+ unsigned short dither_value[8];
+ __m128i dither, dither_cur;
+#ifdef ENABLE_DITHER_MATRIX_4X4
+ const uint8_t* dither_scan = gDitherMatrix_3Bit_4X4[(y) & 3];
+ dither_value[0] = dither_value[4] = dither_scan[(x) & 3];
+ dither_value[1] = dither_value[5] = dither_scan[(x + 1) & 3];
+ dither_value[2] = dither_value[6] = dither_scan[(x + 2) & 3];
+ dither_value[3] = dither_value[7] = dither_scan[(x + 3) & 3];
+#else
+ const uint16_t dither_scan = gDitherMatrix_3Bit_16[(y) & 3];
+ dither_value[0] = dither_value[4] = (dither_scan
+ >> (((x) & 3) << 2)) & 0xF;
+ dither_value[1] = dither_value[5] = (dither_scan
+ >> (((x + 1) & 3) << 2)) & 0xF;
+ dither_value[2] = dither_value[6] = (dither_scan
+ >> (((x + 2) & 3) << 2)) & 0xF;
+ dither_value[3] = dither_value[7] = (dither_scan
+ >> (((x + 3) & 3) << 2)) & 0xF;
+#endif
+ dither = _mm_loadu_si128((__m128i*) dither_value);
+
+ const __m128i* s = reinterpret_cast<const __m128i*>(src);
+ __m128i* d = reinterpret_cast<__m128i*>(dst);
+ __m128i var256 = _mm_set1_epi16(256);
+ __m128i r16_mask = _mm_set1_epi16(SK_R16_MASK);
+ __m128i g16_mask = _mm_set1_epi16(SK_G16_MASK);
+ __m128i b16_mask = _mm_set1_epi16(SK_B16_MASK);
+
+ while (count >= 8) {
+ // Load 8 pixels of src and dst.
+ __m128i src_pixel1 = _mm_loadu_si128(s++);
+ __m128i src_pixel2 = _mm_loadu_si128(s++);
+ __m128i dst_pixel = _mm_load_si128(d);
+
+ // Extract A from src.
+ __m128i sa1 = _mm_slli_epi32(src_pixel1, (24 - SK_A32_SHIFT));
+ sa1 = _mm_srli_epi32(sa1, 24);
+ __m128i sa2 = _mm_slli_epi32(src_pixel2, (24 - SK_A32_SHIFT));
+ sa2 = _mm_srli_epi32(sa2, 24);
+ __m128i sa = _mm_packs_epi32(sa1, sa2);
+
+ // Calculate current dither value.
+ dither_cur = _mm_mullo_epi16(dither,
+ _mm_add_epi16(sa, _mm_set1_epi16(1)));
+ dither_cur = _mm_srli_epi16(dither_cur, 8);
+
+ // Extract R from src.
+ __m128i sr1 = _mm_slli_epi32(src_pixel1, (24 - SK_R32_SHIFT));
+ sr1 = _mm_srli_epi32(sr1, 24);
+ __m128i sr2 = _mm_slli_epi32(src_pixel2, (24 - SK_R32_SHIFT));
+ sr2 = _mm_srli_epi32(sr2, 24);
+ __m128i sr = _mm_packs_epi32(sr1, sr2);
+
+ // SkDITHER_R32_FOR_565(sr, d)
+ __m128i sr_offset = _mm_srli_epi16(sr, 5);
+ sr = _mm_add_epi16(sr, dither_cur);
+ sr = _mm_sub_epi16(sr, sr_offset);
+
+ // Expand sr.
+ sr = _mm_slli_epi16(sr, 2);
+
+ // Extract G from src.
+ __m128i sg1 = _mm_slli_epi32(src_pixel1, (24 - SK_G32_SHIFT));
+ sg1 = _mm_srli_epi32(sg1, 24);
+ __m128i sg2 = _mm_slli_epi32(src_pixel2, (24 - SK_G32_SHIFT));
+ sg2 = _mm_srli_epi32(sg2, 24);
+ __m128i sg = _mm_packs_epi32(sg1, sg2);
+
+ // sg = SkDITHER_G32_FOR_565(sg, d).
+ __m128i sg_offset = _mm_srli_epi16(sg, 6);
+ sg = _mm_add_epi16(sg, _mm_srli_epi16(dither_cur, 1));
+ sg = _mm_sub_epi16(sg, sg_offset);
+
+ // Expand sg.
+ sg = _mm_slli_epi16(sg, 3);
+
+ // Extract B from src.
+ __m128i sb1 = _mm_slli_epi32(src_pixel1, (24 - SK_B32_SHIFT));
+ sb1 = _mm_srli_epi32(sb1, 24);
+ __m128i sb2 = _mm_slli_epi32(src_pixel2, (24 - SK_B32_SHIFT));
+ sb2 = _mm_srli_epi32(sb2, 24);
+ __m128i sb = _mm_packs_epi32(sb1, sb2);
+
+ // sb = SkDITHER_B32_FOR_565(sb, d).
+ __m128i sb_offset = _mm_srli_epi16(sb, 5);
+ sb = _mm_add_epi16(sb, dither_cur);
+ sb = _mm_sub_epi16(sb, sb_offset);
+
+ // Expand sb.
+ sb = _mm_slli_epi16(sb, 2);
+
+ // Extract R G B from dst.
+ __m128i dr = _mm_srli_epi16(dst_pixel, SK_R16_SHIFT);
+ dr = _mm_and_si128(dr, r16_mask);
+ __m128i dg = _mm_srli_epi16(dst_pixel, SK_G16_SHIFT);
+ dg = _mm_and_si128(dg, g16_mask);
+ __m128i db = _mm_srli_epi16(dst_pixel, SK_B16_SHIFT);
+ db = _mm_and_si128(db, b16_mask);
+
+ // SkAlpha255To256(255 - a) >> 3
+ __m128i isa = _mm_sub_epi16(var256, sa);
+ isa = _mm_srli_epi16(isa, 3);
+
+ dr = _mm_mullo_epi16(dr, isa);
+ dr = _mm_add_epi16(dr, sr);
+ dr = _mm_srli_epi16(dr, 5);
+
+ dg = _mm_mullo_epi16(dg, isa);
+ dg = _mm_add_epi16(dg, sg);
+ dg = _mm_srli_epi16(dg, 5);
+
+ db = _mm_mullo_epi16(db, isa);
+ db = _mm_add_epi16(db, sb);
+ db = _mm_srli_epi16(db, 5);
+
+ // Package and store dst pixel.
+ __m128i d_pixel = SkPackRGB16_SSE2(dr, dg, db);
+ _mm_store_si128(d++, d_pixel);
+
+ count -= 8;
+ x += 8;
+ }
+
+ src = reinterpret_cast<const SkPMColor*>(s);
+ dst = reinterpret_cast<uint16_t*>(d);
+ }
+
+ if (count > 0) {
+ DITHER_565_SCAN(y);
+ do {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+ if (c) {
+ unsigned a = SkGetPackedA32(c);
+
+ int d = SkAlphaMul(DITHER_VALUE(x), SkAlpha255To256(a));
+
+ unsigned sr = SkGetPackedR32(c);
+ unsigned sg = SkGetPackedG32(c);
+ unsigned sb = SkGetPackedB32(c);
+ sr = SkDITHER_R32_FOR_565(sr, d);
+ sg = SkDITHER_G32_FOR_565(sg, d);
+ sb = SkDITHER_B32_FOR_565(sb, d);
+
+ uint32_t src_expanded = (sg << 24) | (sr << 13) | (sb << 2);
+ uint32_t dst_expanded = SkExpand_rgb_16(*dst);
+ dst_expanded = dst_expanded * (SkAlpha255To256(255 - a) >> 3);
+ // now src and dst expanded are in g:11 r:10 x:1 b:10
+ *dst = SkCompact_rgb_16((src_expanded + dst_expanded) >> 5);
+ }
+ dst += 1;
+ DITHER_INC_X(x);
+ } while (--count != 0);
+ }
+}
diff --git a/gfx/skia/skia/src/opts/SkBlitRow_opts_SSE2.h b/gfx/skia/skia/src/opts/SkBlitRow_opts_SSE2.h
new file mode 100644
index 000000000..652ff6ee0
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBlitRow_opts_SSE2.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlitRow_opts_SSE2_DEFINED
+#define SkBlitRow_opts_SSE2_DEFINED
+
+#include "SkBlitRow.h"
+
+void S32_Blend_BlitRow32_SSE2(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha);
+
+void S32A_Blend_BlitRow32_SSE2(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha);
+
+void Color32A_D565_SSE2(uint16_t dst[], SkPMColor src, int count, int x,
+ int y);
+
+void SkBlitLCD16Row_SSE2(SkPMColor dst[], const uint16_t src[],
+ SkColor color, int width, SkPMColor);
+void SkBlitLCD16OpaqueRow_SSE2(SkPMColor dst[], const uint16_t src[],
+ SkColor color, int width, SkPMColor opaqueDst);
+
+void S32_D565_Opaque_SSE2(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src, int count,
+ U8CPU alpha, int /*x*/, int /*y*/);
+void S32A_D565_Opaque_SSE2(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha, int /*x*/, int /*y*/);
+void S32_D565_Opaque_Dither_SSE2(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha, int x, int y);
+void S32A_D565_Opaque_Dither_SSE2(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha, int x, int y);
+#endif
diff --git a/gfx/skia/skia/src/opts/SkBlitRow_opts_arm.cpp b/gfx/skia/skia/src/opts/SkBlitRow_opts_arm.cpp
new file mode 100644
index 000000000..d4b1d0dd8
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBlitRow_opts_arm.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBlitRow.h"
+#include "SkUtilsArm.h"
+
+#include "SkBlitRow_opts_arm_neon.h"
+
+extern const SkBlitRow::Proc16 sk_blitrow_platform_565_procs_arm[] = {
+ nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
+};
+
+extern const SkBlitRow::ColorProc16 sk_blitrow_platform_565_colorprocs_arm[] = {
+ nullptr, nullptr,
+};
+
+extern const SkBlitRow::Proc32 sk_blitrow_platform_32_procs_arm[] = {
+ nullptr, nullptr, nullptr, nullptr,
+};
+
+SkBlitRow::Proc16 SkBlitRow::PlatformFactory565(unsigned flags) {
+ return SK_ARM_NEON_WRAP(sk_blitrow_platform_565_procs_arm)[flags];
+}
+
+SkBlitRow::ColorProc16 SkBlitRow::PlatformColorFactory565(unsigned flags) {
+ return SK_ARM_NEON_WRAP(sk_blitrow_platform_565_colorprocs_arm)[flags];
+}
+
+SkBlitRow::Proc32 SkBlitRow::PlatformProcs32(unsigned flags) {
+ return SK_ARM_NEON_WRAP(sk_blitrow_platform_32_procs_arm)[flags];
+}
diff --git a/gfx/skia/skia/src/opts/SkBlitRow_opts_arm_neon.cpp b/gfx/skia/skia/src/opts/SkBlitRow_opts_arm_neon.cpp
new file mode 100644
index 000000000..7998a8951
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBlitRow_opts_arm_neon.cpp
@@ -0,0 +1,1323 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBlitRow_opts_arm_neon.h"
+
+#include "SkBlitMask.h"
+#include "SkBlitRow.h"
+#include "SkColorPriv.h"
+#include "SkDither.h"
+#include "SkMathPriv.h"
+#include "SkUtils.h"
+
+#include "SkColor_opts_neon.h"
+#include <arm_neon.h>
+
+#ifdef SK_CPU_ARM64
+static inline uint8x8x4_t sk_vld4_u8_arm64_3(const SkPMColor* SK_RESTRICT & src) {
+ uint8x8x4_t vsrc;
+ uint8x8_t vsrc_0, vsrc_1, vsrc_2;
+
+ asm (
+ "ld4 {v0.8b - v3.8b}, [%[src]], #32 \t\n"
+ "mov %[vsrc0].8b, v0.8b \t\n"
+ "mov %[vsrc1].8b, v1.8b \t\n"
+ "mov %[vsrc2].8b, v2.8b \t\n"
+ : [vsrc0] "=w" (vsrc_0), [vsrc1] "=w" (vsrc_1),
+ [vsrc2] "=w" (vsrc_2), [src] "+&r" (src)
+ : : "v0", "v1", "v2", "v3"
+ );
+
+ vsrc.val[0] = vsrc_0;
+ vsrc.val[1] = vsrc_1;
+ vsrc.val[2] = vsrc_2;
+
+ return vsrc;
+}
+
+static inline uint8x8x4_t sk_vld4_u8_arm64_4(const SkPMColor* SK_RESTRICT & src) {
+ uint8x8x4_t vsrc;
+ uint8x8_t vsrc_0, vsrc_1, vsrc_2, vsrc_3;
+
+ asm (
+ "ld4 {v0.8b - v3.8b}, [%[src]], #32 \t\n"
+ "mov %[vsrc0].8b, v0.8b \t\n"
+ "mov %[vsrc1].8b, v1.8b \t\n"
+ "mov %[vsrc2].8b, v2.8b \t\n"
+ "mov %[vsrc3].8b, v3.8b \t\n"
+ : [vsrc0] "=w" (vsrc_0), [vsrc1] "=w" (vsrc_1),
+ [vsrc2] "=w" (vsrc_2), [vsrc3] "=w" (vsrc_3),
+ [src] "+&r" (src)
+ : : "v0", "v1", "v2", "v3"
+ );
+
+ vsrc.val[0] = vsrc_0;
+ vsrc.val[1] = vsrc_1;
+ vsrc.val[2] = vsrc_2;
+ vsrc.val[3] = vsrc_3;
+
+ return vsrc;
+}
+#endif
+
+void S32_D565_Opaque_neon(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src, int count,
+ U8CPU alpha, int /*x*/, int /*y*/) {
+ SkASSERT(255 == alpha);
+
+ while (count >= 8) {
+ uint8x8x4_t vsrc;
+ uint16x8_t vdst;
+
+ // Load
+#ifdef SK_CPU_ARM64
+ vsrc = sk_vld4_u8_arm64_3(src);
+#else
+ vsrc = vld4_u8((uint8_t*)src);
+ src += 8;
+#endif
+
+ // Convert src to 565
+ vdst = SkPixel32ToPixel16_neon8(vsrc);
+
+ // Store
+ vst1q_u16(dst, vdst);
+
+ // Prepare next iteration
+ dst += 8;
+ count -= 8;
+ };
+
+ // Leftovers
+ while (count > 0) {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+ *dst = SkPixel32ToPixel16_ToU16(c);
+ dst++;
+ count--;
+ };
+}
+
+void S32_D565_Blend_neon(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src, int count,
+ U8CPU alpha, int /*x*/, int /*y*/) {
+ SkASSERT(255 > alpha);
+
+ uint16x8_t vmask_blue, vscale;
+
+ // prepare constants
+ vscale = vdupq_n_u16(SkAlpha255To256(alpha));
+ vmask_blue = vmovq_n_u16(0x1F);
+
+ while (count >= 8) {
+ uint8x8x4_t vsrc;
+ uint16x8_t vdst, vdst_r, vdst_g, vdst_b;
+ uint16x8_t vres_r, vres_g, vres_b;
+
+ // Load src
+#ifdef SK_CPU_ARM64
+ vsrc = sk_vld4_u8_arm64_3(src);
+#else
+ {
+ register uint8x8_t d0 asm("d0");
+ register uint8x8_t d1 asm("d1");
+ register uint8x8_t d2 asm("d2");
+ register uint8x8_t d3 asm("d3");
+
+ asm (
+ "vld4.8 {d0-d3},[%[src]]!"
+ : "=w" (d0), "=w" (d1), "=w" (d2), "=w" (d3), [src] "+&r" (src)
+ :
+ );
+ vsrc.val[0] = d0;
+ vsrc.val[1] = d1;
+ vsrc.val[2] = d2;
+ }
+#endif
+
+ // Load and unpack dst
+ vdst = vld1q_u16(dst);
+ vdst_g = vshlq_n_u16(vdst, 5); // shift green to top of lanes
+ vdst_b = vandq_u16(vdst, vmask_blue); // extract blue
+ vdst_r = vshrq_n_u16(vdst, 6+5); // extract red
+ vdst_g = vshrq_n_u16(vdst_g, 5+5); // extract green
+
+ // Shift src to 565 range
+ vsrc.val[NEON_R] = vshr_n_u8(vsrc.val[NEON_R], 3);
+ vsrc.val[NEON_G] = vshr_n_u8(vsrc.val[NEON_G], 2);
+ vsrc.val[NEON_B] = vshr_n_u8(vsrc.val[NEON_B], 3);
+
+ // Scale src - dst
+ vres_r = vmovl_u8(vsrc.val[NEON_R]) - vdst_r;
+ vres_g = vmovl_u8(vsrc.val[NEON_G]) - vdst_g;
+ vres_b = vmovl_u8(vsrc.val[NEON_B]) - vdst_b;
+
+ vres_r = vshrq_n_u16(vres_r * vscale, 8);
+ vres_g = vshrq_n_u16(vres_g * vscale, 8);
+ vres_b = vshrq_n_u16(vres_b * vscale, 8);
+
+ vres_r += vdst_r;
+ vres_g += vdst_g;
+ vres_b += vdst_b;
+
+ // Combine
+ vres_b = vsliq_n_u16(vres_b, vres_g, 5); // insert green into blue
+ vres_b = vsliq_n_u16(vres_b, vres_r, 6+5); // insert red into green/blue
+
+ // Store
+ vst1q_u16(dst, vres_b);
+ dst += 8;
+ count -= 8;
+ }
+ if (count > 0) {
+ int scale = SkAlpha255To256(alpha);
+ do {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+ uint16_t d = *dst;
+ *dst++ = SkPackRGB16(
+ SkAlphaBlend(SkPacked32ToR16(c), SkGetPackedR16(d), scale),
+ SkAlphaBlend(SkPacked32ToG16(c), SkGetPackedG16(d), scale),
+ SkAlphaBlend(SkPacked32ToB16(c), SkGetPackedB16(d), scale));
+ } while (--count != 0);
+ }
+}
+
+#ifdef SK_CPU_ARM32
+void S32A_D565_Opaque_neon(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src, int count,
+ U8CPU alpha, int /*x*/, int /*y*/) {
+ SkASSERT(255 == alpha);
+
+ if (count >= 8) {
+ uint16_t* SK_RESTRICT keep_dst = 0;
+
+ asm volatile (
+ "ands ip, %[count], #7 \n\t"
+ "vmov.u8 d31, #1<<7 \n\t"
+ "vld1.16 {q12}, [%[dst]] \n\t"
+ "vld4.8 {d0-d3}, [%[src]] \n\t"
+ // Thumb does not support the standard ARM conditional
+ // instructions but instead requires the 'it' instruction
+ // to signal conditional execution
+ "it eq \n\t"
+ "moveq ip, #8 \n\t"
+ "mov %[keep_dst], %[dst] \n\t"
+
+ "add %[src], %[src], ip, LSL#2 \n\t"
+ "add %[dst], %[dst], ip, LSL#1 \n\t"
+ "subs %[count], %[count], ip \n\t"
+ "b 9f \n\t"
+ // LOOP
+ "2: \n\t"
+
+ "vld1.16 {q12}, [%[dst]]! \n\t"
+ "vld4.8 {d0-d3}, [%[src]]! \n\t"
+ "vst1.16 {q10}, [%[keep_dst]] \n\t"
+ "sub %[keep_dst], %[dst], #8*2 \n\t"
+ "subs %[count], %[count], #8 \n\t"
+ "9: \n\t"
+ "pld [%[dst],#32] \n\t"
+ // expand 0565 q12 to 8888 {d4-d7}
+ "vmovn.u16 d4, q12 \n\t"
+ "vshr.u16 q11, q12, #5 \n\t"
+ "vshr.u16 q10, q12, #6+5 \n\t"
+ "vmovn.u16 d5, q11 \n\t"
+ "vmovn.u16 d6, q10 \n\t"
+ "vshl.u8 d4, d4, #3 \n\t"
+ "vshl.u8 d5, d5, #2 \n\t"
+ "vshl.u8 d6, d6, #3 \n\t"
+
+ "vmovl.u8 q14, d31 \n\t"
+ "vmovl.u8 q13, d31 \n\t"
+ "vmovl.u8 q12, d31 \n\t"
+
+ // duplicate in 4/2/1 & 8pix vsns
+ "vmvn.8 d30, d3 \n\t"
+ "vmlal.u8 q14, d30, d6 \n\t"
+ "vmlal.u8 q13, d30, d5 \n\t"
+ "vmlal.u8 q12, d30, d4 \n\t"
+ "vshr.u16 q8, q14, #5 \n\t"
+ "vshr.u16 q9, q13, #6 \n\t"
+ "vaddhn.u16 d6, q14, q8 \n\t"
+ "vshr.u16 q8, q12, #5 \n\t"
+ "vaddhn.u16 d5, q13, q9 \n\t"
+ "vaddhn.u16 d4, q12, q8 \n\t"
+ // intentionally don't calculate alpha
+ // result in d4-d6
+
+ #ifdef SK_PMCOLOR_IS_RGBA
+ "vqadd.u8 d6, d6, d0 \n\t"
+ "vqadd.u8 d5, d5, d1 \n\t"
+ "vqadd.u8 d4, d4, d2 \n\t"
+ #else
+ "vqadd.u8 d6, d6, d2 \n\t"
+ "vqadd.u8 d5, d5, d1 \n\t"
+ "vqadd.u8 d4, d4, d0 \n\t"
+ #endif
+
+ // pack 8888 {d4-d6} to 0565 q10
+ "vshll.u8 q10, d6, #8 \n\t"
+ "vshll.u8 q3, d5, #8 \n\t"
+ "vshll.u8 q2, d4, #8 \n\t"
+ "vsri.u16 q10, q3, #5 \n\t"
+ "vsri.u16 q10, q2, #11 \n\t"
+
+ "bne 2b \n\t"
+
+ "1: \n\t"
+ "vst1.16 {q10}, [%[keep_dst]] \n\t"
+ : [count] "+r" (count)
+ : [dst] "r" (dst), [keep_dst] "r" (keep_dst), [src] "r" (src)
+ : "ip", "cc", "memory", "d0","d1","d2","d3","d4","d5","d6","d7",
+ "d16","d17","d18","d19","d20","d21","d22","d23","d24","d25","d26","d27","d28","d29",
+ "d30","d31"
+ );
+ }
+ else
+ { // handle count < 8
+ uint16_t* SK_RESTRICT keep_dst = 0;
+
+ asm volatile (
+ "vmov.u8 d31, #1<<7 \n\t"
+ "mov %[keep_dst], %[dst] \n\t"
+
+ "tst %[count], #4 \n\t"
+ "beq 14f \n\t"
+ "vld1.16 {d25}, [%[dst]]! \n\t"
+ "vld1.32 {q1}, [%[src]]! \n\t"
+
+ "14: \n\t"
+ "tst %[count], #2 \n\t"
+ "beq 12f \n\t"
+ "vld1.32 {d24[1]}, [%[dst]]! \n\t"
+ "vld1.32 {d1}, [%[src]]! \n\t"
+
+ "12: \n\t"
+ "tst %[count], #1 \n\t"
+ "beq 11f \n\t"
+ "vld1.16 {d24[1]}, [%[dst]]! \n\t"
+ "vld1.32 {d0[1]}, [%[src]]! \n\t"
+
+ "11: \n\t"
+ // unzips achieve the same as a vld4 operation
+ "vuzp.u16 q0, q1 \n\t"
+ "vuzp.u8 d0, d1 \n\t"
+ "vuzp.u8 d2, d3 \n\t"
+ // expand 0565 q12 to 8888 {d4-d7}
+ "vmovn.u16 d4, q12 \n\t"
+ "vshr.u16 q11, q12, #5 \n\t"
+ "vshr.u16 q10, q12, #6+5 \n\t"
+ "vmovn.u16 d5, q11 \n\t"
+ "vmovn.u16 d6, q10 \n\t"
+ "vshl.u8 d4, d4, #3 \n\t"
+ "vshl.u8 d5, d5, #2 \n\t"
+ "vshl.u8 d6, d6, #3 \n\t"
+
+ "vmovl.u8 q14, d31 \n\t"
+ "vmovl.u8 q13, d31 \n\t"
+ "vmovl.u8 q12, d31 \n\t"
+
+ // duplicate in 4/2/1 & 8pix vsns
+ "vmvn.8 d30, d3 \n\t"
+ "vmlal.u8 q14, d30, d6 \n\t"
+ "vmlal.u8 q13, d30, d5 \n\t"
+ "vmlal.u8 q12, d30, d4 \n\t"
+ "vshr.u16 q8, q14, #5 \n\t"
+ "vshr.u16 q9, q13, #6 \n\t"
+ "vaddhn.u16 d6, q14, q8 \n\t"
+ "vshr.u16 q8, q12, #5 \n\t"
+ "vaddhn.u16 d5, q13, q9 \n\t"
+ "vaddhn.u16 d4, q12, q8 \n\t"
+ // intentionally don't calculate alpha
+ // result in d4-d6
+
+ #ifdef SK_PMCOLOR_IS_RGBA
+ "vqadd.u8 d6, d6, d0 \n\t"
+ "vqadd.u8 d5, d5, d1 \n\t"
+ "vqadd.u8 d4, d4, d2 \n\t"
+ #else
+ "vqadd.u8 d6, d6, d2 \n\t"
+ "vqadd.u8 d5, d5, d1 \n\t"
+ "vqadd.u8 d4, d4, d0 \n\t"
+ #endif
+
+ // pack 8888 {d4-d6} to 0565 q10
+ "vshll.u8 q10, d6, #8 \n\t"
+ "vshll.u8 q3, d5, #8 \n\t"
+ "vshll.u8 q2, d4, #8 \n\t"
+ "vsri.u16 q10, q3, #5 \n\t"
+ "vsri.u16 q10, q2, #11 \n\t"
+
+ // store
+ "tst %[count], #4 \n\t"
+ "beq 24f \n\t"
+ "vst1.16 {d21}, [%[keep_dst]]! \n\t"
+
+ "24: \n\t"
+ "tst %[count], #2 \n\t"
+ "beq 22f \n\t"
+ "vst1.32 {d20[1]}, [%[keep_dst]]! \n\t"
+
+ "22: \n\t"
+ "tst %[count], #1 \n\t"
+ "beq 21f \n\t"
+ "vst1.16 {d20[1]}, [%[keep_dst]]! \n\t"
+
+ "21: \n\t"
+ : [count] "+r" (count)
+ : [dst] "r" (dst), [keep_dst] "r" (keep_dst), [src] "r" (src)
+ : "ip", "cc", "memory", "d0","d1","d2","d3","d4","d5","d6","d7",
+ "d16","d17","d18","d19","d20","d21","d22","d23","d24","d25","d26","d27","d28","d29",
+ "d30","d31"
+ );
+ }
+}
+
+#else // #ifdef SK_CPU_ARM32
+
+void S32A_D565_Opaque_neon(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src, int count,
+ U8CPU alpha, int /*x*/, int /*y*/) {
+ SkASSERT(255 == alpha);
+
+ if (count >= 16) {
+ asm (
+ "movi v4.8h, #0x80 \t\n"
+
+ "1: \t\n"
+ "sub %w[count], %w[count], #16 \t\n"
+ "ld1 {v16.8h-v17.8h}, [%[dst]] \t\n"
+ "ld4 {v0.16b-v3.16b}, [%[src]], #64 \t\n"
+ "prfm pldl1keep, [%[src],#512] \t\n"
+ "prfm pldl1keep, [%[dst],#256] \t\n"
+ "ushr v20.8h, v17.8h, #5 \t\n"
+ "ushr v31.8h, v16.8h, #5 \t\n"
+ "xtn v6.8b, v31.8h \t\n"
+ "xtn2 v6.16b, v20.8h \t\n"
+ "ushr v20.8h, v17.8h, #11 \t\n"
+ "shl v19.16b, v6.16b, #2 \t\n"
+ "ushr v31.8h, v16.8h, #11 \t\n"
+ "xtn v22.8b, v31.8h \t\n"
+ "xtn2 v22.16b, v20.8h \t\n"
+ "shl v18.16b, v22.16b, #3 \t\n"
+ "mvn v3.16b, v3.16b \t\n"
+ "xtn v16.8b, v16.8h \t\n"
+ "mov v7.16b, v4.16b \t\n"
+ "xtn2 v16.16b, v17.8h \t\n"
+ "umlal v7.8h, v3.8b, v19.8b \t\n"
+ "shl v16.16b, v16.16b, #3 \t\n"
+ "mov v22.16b, v4.16b \t\n"
+ "ushr v24.8h, v7.8h, #6 \t\n"
+ "umlal v22.8h, v3.8b, v18.8b \t\n"
+ "ushr v20.8h, v22.8h, #5 \t\n"
+ "addhn v20.8b, v22.8h, v20.8h \t\n"
+ "cmp %w[count], #16 \t\n"
+ "mov v6.16b, v4.16b \t\n"
+ "mov v5.16b, v4.16b \t\n"
+ "umlal v6.8h, v3.8b, v16.8b \t\n"
+ "umlal2 v5.8h, v3.16b, v19.16b \t\n"
+ "mov v17.16b, v4.16b \t\n"
+ "ushr v19.8h, v6.8h, #5 \t\n"
+ "umlal2 v17.8h, v3.16b, v18.16b \t\n"
+ "addhn v7.8b, v7.8h, v24.8h \t\n"
+ "ushr v18.8h, v5.8h, #6 \t\n"
+ "ushr v21.8h, v17.8h, #5 \t\n"
+ "addhn2 v7.16b, v5.8h, v18.8h \t\n"
+ "addhn2 v20.16b, v17.8h, v21.8h \t\n"
+ "mov v22.16b, v4.16b \t\n"
+ "addhn v6.8b, v6.8h, v19.8h \t\n"
+ "umlal2 v22.8h, v3.16b, v16.16b \t\n"
+ "ushr v5.8h, v22.8h, #5 \t\n"
+ "addhn2 v6.16b, v22.8h, v5.8h \t\n"
+ "uqadd v7.16b, v1.16b, v7.16b \t\n"
+#if SK_PMCOLOR_BYTE_ORDER(B,G,R,A)
+ "uqadd v20.16b, v2.16b, v20.16b \t\n"
+ "uqadd v6.16b, v0.16b, v6.16b \t\n"
+#elif SK_PMCOLOR_BYTE_ORDER(R,G,B,A)
+ "uqadd v20.16b, v0.16b, v20.16b \t\n"
+ "uqadd v6.16b, v2.16b, v6.16b \t\n"
+#else
+#error "This function only supports BGRA and RGBA."
+#endif
+ "shll v22.8h, v20.8b, #8 \t\n"
+ "shll v5.8h, v7.8b, #8 \t\n"
+ "sri v22.8h, v5.8h, #5 \t\n"
+ "shll v17.8h, v6.8b, #8 \t\n"
+ "shll2 v23.8h, v20.16b, #8 \t\n"
+ "shll2 v7.8h, v7.16b, #8 \t\n"
+ "sri v22.8h, v17.8h, #11 \t\n"
+ "sri v23.8h, v7.8h, #5 \t\n"
+ "shll2 v6.8h, v6.16b, #8 \t\n"
+ "st1 {v22.8h}, [%[dst]], #16 \t\n"
+ "sri v23.8h, v6.8h, #11 \t\n"
+ "st1 {v23.8h}, [%[dst]], #16 \t\n"
+ "b.ge 1b \t\n"
+ : [dst] "+&r" (dst), [src] "+&r" (src), [count] "+&r" (count)
+ :: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+ "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24",
+ "v31"
+ );
+ }
+ // Leftovers
+ if (count > 0) {
+ do {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+ if (c) {
+ *dst = SkSrcOver32To16(c, *dst);
+ }
+ dst += 1;
+ } while (--count != 0);
+ }
+}
+#endif // #ifdef SK_CPU_ARM32
+
+static uint32_t pmcolor_to_expand16(SkPMColor c) {
+ unsigned r = SkGetPackedR32(c);
+ unsigned g = SkGetPackedG32(c);
+ unsigned b = SkGetPackedB32(c);
+ return (g << 24) | (r << 13) | (b << 2);
+}
+
+void Color32A_D565_neon(uint16_t dst[], SkPMColor src, int count, int x, int y) {
+ uint32_t src_expand;
+ unsigned scale;
+ uint16x8_t vmask_blue;
+
+ if (count <= 0) return;
+ SkASSERT(((size_t)dst & 0x01) == 0);
+
+ /*
+ * This preamble code is in order to make dst aligned to 8 bytes
+ * in the next mutiple bytes read & write access.
+ */
+ src_expand = pmcolor_to_expand16(src);
+ scale = SkAlpha255To256(0xFF - SkGetPackedA32(src)) >> 3;
+
+#define DST_ALIGN 8
+
+ /*
+ * preamble_size is in byte, meantime, this blend32_16_row_neon updates 2 bytes at a time.
+ */
+ int preamble_size = (DST_ALIGN - (size_t)dst) & (DST_ALIGN - 1);
+
+ for (int i = 0; i < preamble_size; i+=2, dst++) {
+ uint32_t dst_expand = SkExpand_rgb_16(*dst) * scale;
+ *dst = SkCompact_rgb_16((src_expand + dst_expand) >> 5);
+ if (--count == 0)
+ break;
+ }
+
+ int count16 = 0;
+ count16 = count >> 4;
+ vmask_blue = vmovq_n_u16(SK_B16_MASK);
+
+ if (count16) {
+ uint16x8_t wide_sr;
+ uint16x8_t wide_sg;
+ uint16x8_t wide_sb;
+ uint16x8_t wide_256_sa;
+
+ unsigned sr = SkGetPackedR32(src);
+ unsigned sg = SkGetPackedG32(src);
+ unsigned sb = SkGetPackedB32(src);
+ unsigned sa = SkGetPackedA32(src);
+
+ // Operation: dst_rgb = src_rgb + ((256 - src_a) >> 3) x dst_rgb
+ // sr: 8-bit based, dr: 5-bit based, with dr x ((256-sa)>>3), 5-bit left shifted,
+ //thus, for sr, do 2-bit left shift to match MSB : (8 + 2 = 5 + 5)
+ wide_sr = vshlq_n_u16(vmovl_u8(vdup_n_u8(sr)), 2); // widen and src_red shift
+
+ // sg: 8-bit based, dg: 6-bit based, with dg x ((256-sa)>>3), 5-bit left shifted,
+ //thus, for sg, do 3-bit left shift to match MSB : (8 + 3 = 6 + 5)
+ wide_sg = vshlq_n_u16(vmovl_u8(vdup_n_u8(sg)), 3); // widen and src_grn shift
+
+ // sb: 8-bit based, db: 5-bit based, with db x ((256-sa)>>3), 5-bit left shifted,
+ //thus, for sb, do 2-bit left shift to match MSB : (8 + 2 = 5 + 5)
+ wide_sb = vshlq_n_u16(vmovl_u8(vdup_n_u8(sb)), 2); // widen and src blu shift
+
+ wide_256_sa =
+ vshrq_n_u16(vsubw_u8(vdupq_n_u16(256), vdup_n_u8(sa)), 3); // (256 - sa) >> 3
+
+ while (count16-- > 0) {
+ uint16x8_t vdst1, vdst1_r, vdst1_g, vdst1_b;
+ uint16x8_t vdst2, vdst2_r, vdst2_g, vdst2_b;
+ vdst1 = vld1q_u16(dst);
+ dst += 8;
+ vdst2 = vld1q_u16(dst);
+ dst -= 8; //to store dst again.
+
+ vdst1_g = vshlq_n_u16(vdst1, SK_R16_BITS); // shift green to top of lanes
+ vdst1_b = vdst1 & vmask_blue; // extract blue
+ vdst1_r = vshrq_n_u16(vdst1, SK_R16_SHIFT); // extract red
+ vdst1_g = vshrq_n_u16(vdst1_g, SK_R16_BITS + SK_B16_BITS); // extract green
+
+ vdst2_g = vshlq_n_u16(vdst2, SK_R16_BITS); // shift green to top of lanes
+ vdst2_b = vdst2 & vmask_blue; // extract blue
+ vdst2_r = vshrq_n_u16(vdst2, SK_R16_SHIFT); // extract red
+ vdst2_g = vshrq_n_u16(vdst2_g, SK_R16_BITS + SK_B16_BITS); // extract green
+
+ vdst1_r = vmlaq_u16(wide_sr, wide_256_sa, vdst1_r); // sr + (256-sa) x dr1
+ vdst1_g = vmlaq_u16(wide_sg, wide_256_sa, vdst1_g); // sg + (256-sa) x dg1
+ vdst1_b = vmlaq_u16(wide_sb, wide_256_sa, vdst1_b); // sb + (256-sa) x db1
+
+ vdst2_r = vmlaq_u16(wide_sr, wide_256_sa, vdst2_r); // sr + (256-sa) x dr2
+ vdst2_g = vmlaq_u16(wide_sg, wide_256_sa, vdst2_g); // sg + (256-sa) x dg2
+ vdst2_b = vmlaq_u16(wide_sb, wide_256_sa, vdst2_b); // sb + (256-sa) x db2
+
+ vdst1_r = vshrq_n_u16(vdst1_r, 5); // 5-bit right shift for 5-bit red
+ vdst1_g = vshrq_n_u16(vdst1_g, 5); // 5-bit right shift for 6-bit green
+ vdst1_b = vshrq_n_u16(vdst1_b, 5); // 5-bit right shift for 5-bit blue
+
+ vdst1 = vsliq_n_u16(vdst1_b, vdst1_g, SK_G16_SHIFT); // insert green into blue
+ vdst1 = vsliq_n_u16(vdst1, vdst1_r, SK_R16_SHIFT); // insert red into green/blue
+
+ vdst2_r = vshrq_n_u16(vdst2_r, 5); // 5-bit right shift for 5-bit red
+ vdst2_g = vshrq_n_u16(vdst2_g, 5); // 5-bit right shift for 6-bit green
+ vdst2_b = vshrq_n_u16(vdst2_b, 5); // 5-bit right shift for 5-bit blue
+
+ vdst2 = vsliq_n_u16(vdst2_b, vdst2_g, SK_G16_SHIFT); // insert green into blue
+ vdst2 = vsliq_n_u16(vdst2, vdst2_r, SK_R16_SHIFT); // insert red into green/blue
+
+ vst1q_u16(dst, vdst1);
+ dst += 8;
+ vst1q_u16(dst, vdst2);
+ dst += 8;
+ }
+ }
+
+ count &= 0xF;
+ if (count > 0) {
+ do {
+ uint32_t dst_expand = SkExpand_rgb_16(*dst) * scale;
+ *dst = SkCompact_rgb_16((src_expand + dst_expand) >> 5);
+ dst += 1;
+ } while (--count != 0);
+ }
+}
+
+static inline uint16x8_t SkDiv255Round_neon8(uint16x8_t prod) {
+ prod += vdupq_n_u16(128);
+ prod += vshrq_n_u16(prod, 8);
+ return vshrq_n_u16(prod, 8);
+}
+
+void S32A_D565_Blend_neon(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src, int count,
+ U8CPU alpha, int /*x*/, int /*y*/) {
+ SkASSERT(255 > alpha);
+
+ /* This code implements a Neon version of S32A_D565_Blend. The results have
+ * a few mismatches compared to the original code. These mismatches never
+ * exceed 1.
+ */
+
+ if (count >= 8) {
+ uint16x8_t valpha_max, vmask_blue;
+ uint8x8_t valpha;
+
+ // prepare constants
+ valpha_max = vmovq_n_u16(255);
+ valpha = vdup_n_u8(alpha);
+ vmask_blue = vmovq_n_u16(SK_B16_MASK);
+
+ do {
+ uint16x8_t vdst, vdst_r, vdst_g, vdst_b;
+ uint16x8_t vres_a, vres_r, vres_g, vres_b;
+ uint8x8x4_t vsrc;
+
+ // load pixels
+ vdst = vld1q_u16(dst);
+#ifdef SK_CPU_ARM64
+ vsrc = sk_vld4_u8_arm64_4(src);
+#elif (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 6))
+ asm (
+ "vld4.u8 %h[vsrc], [%[src]]!"
+ : [vsrc] "=w" (vsrc), [src] "+&r" (src)
+ : :
+ );
+#else
+ register uint8x8_t d0 asm("d0");
+ register uint8x8_t d1 asm("d1");
+ register uint8x8_t d2 asm("d2");
+ register uint8x8_t d3 asm("d3");
+
+ asm volatile (
+ "vld4.u8 {d0-d3},[%[src]]!;"
+ : "=w" (d0), "=w" (d1), "=w" (d2), "=w" (d3),
+ [src] "+&r" (src)
+ : :
+ );
+ vsrc.val[0] = d0;
+ vsrc.val[1] = d1;
+ vsrc.val[2] = d2;
+ vsrc.val[3] = d3;
+#endif
+
+
+ // deinterleave dst
+ vdst_g = vshlq_n_u16(vdst, SK_R16_BITS); // shift green to top of lanes
+ vdst_b = vdst & vmask_blue; // extract blue
+ vdst_r = vshrq_n_u16(vdst, SK_R16_SHIFT); // extract red
+ vdst_g = vshrq_n_u16(vdst_g, SK_R16_BITS + SK_B16_BITS); // extract green
+
+ // shift src to 565
+ vsrc.val[NEON_R] = vshr_n_u8(vsrc.val[NEON_R], 8 - SK_R16_BITS);
+ vsrc.val[NEON_G] = vshr_n_u8(vsrc.val[NEON_G], 8 - SK_G16_BITS);
+ vsrc.val[NEON_B] = vshr_n_u8(vsrc.val[NEON_B], 8 - SK_B16_BITS);
+
+ // calc src * src_scale
+ vres_a = vmull_u8(vsrc.val[NEON_A], valpha);
+ vres_r = vmull_u8(vsrc.val[NEON_R], valpha);
+ vres_g = vmull_u8(vsrc.val[NEON_G], valpha);
+ vres_b = vmull_u8(vsrc.val[NEON_B], valpha);
+
+ // prepare dst_scale
+ vres_a = SkDiv255Round_neon8(vres_a);
+ vres_a = valpha_max - vres_a; // 255 - (sa * src_scale) / 255
+
+ // add dst * dst_scale to previous result
+ vres_r = vmlaq_u16(vres_r, vdst_r, vres_a);
+ vres_g = vmlaq_u16(vres_g, vdst_g, vres_a);
+ vres_b = vmlaq_u16(vres_b, vdst_b, vres_a);
+
+#ifdef S32A_D565_BLEND_EXACT
+ // It is possible to get exact results with this but it is slow,
+ // even slower than C code in some cases
+ vres_r = SkDiv255Round_neon8(vres_r);
+ vres_g = SkDiv255Round_neon8(vres_g);
+ vres_b = SkDiv255Round_neon8(vres_b);
+#else
+ vres_r = vrshrq_n_u16(vres_r, 8);
+ vres_g = vrshrq_n_u16(vres_g, 8);
+ vres_b = vrshrq_n_u16(vres_b, 8);
+#endif
+ // pack result
+ vres_b = vsliq_n_u16(vres_b, vres_g, SK_G16_SHIFT); // insert green into blue
+ vres_b = vsliq_n_u16(vres_b, vres_r, SK_R16_SHIFT); // insert red into green/blue
+
+ // store
+ vst1q_u16(dst, vres_b);
+ dst += 8;
+ count -= 8;
+ } while (count >= 8);
+ }
+
+ // leftovers
+ while (count-- > 0) {
+ SkPMColor sc = *src++;
+ if (sc) {
+ uint16_t dc = *dst;
+ unsigned dst_scale = 255 - SkMulDiv255Round(SkGetPackedA32(sc), alpha);
+ unsigned dr = (SkPacked32ToR16(sc) * alpha) + (SkGetPackedR16(dc) * dst_scale);
+ unsigned dg = (SkPacked32ToG16(sc) * alpha) + (SkGetPackedG16(dc) * dst_scale);
+ unsigned db = (SkPacked32ToB16(sc) * alpha) + (SkGetPackedB16(dc) * dst_scale);
+ *dst = SkPackRGB16(SkDiv255Round(dr), SkDiv255Round(dg), SkDiv255Round(db));
+ }
+ dst += 1;
+ }
+}
+
+/* dither matrix for Neon, derived from gDitherMatrix_3Bit_16.
+ * each dither value is spaced out into byte lanes, and repeated
+ * to allow an 8-byte load from offsets 0, 1, 2 or 3 from the
+ * start of each row.
+ */
+static const uint8_t gDitherMatrix_Neon[48] = {
+ 0, 4, 1, 5, 0, 4, 1, 5, 0, 4, 1, 5,
+ 6, 2, 7, 3, 6, 2, 7, 3, 6, 2, 7, 3,
+ 1, 5, 0, 4, 1, 5, 0, 4, 1, 5, 0, 4,
+ 7, 3, 6, 2, 7, 3, 6, 2, 7, 3, 6, 2,
+
+};
+
+void S32_D565_Blend_Dither_neon(uint16_t *dst, const SkPMColor *src,
+ int count, U8CPU alpha, int x, int y)
+{
+
+ SkASSERT(255 > alpha);
+
+ // rescale alpha to range 1 - 256
+ int scale = SkAlpha255To256(alpha);
+
+ if (count >= 8) {
+ /* select row and offset for dither array */
+ const uint8_t *dstart = &gDitherMatrix_Neon[(y&3)*12 + (x&3)];
+
+ uint8x8_t vdither = vld1_u8(dstart); // load dither values
+ uint8x8_t vdither_g = vshr_n_u8(vdither, 1); // calc. green dither values
+
+ int16x8_t vscale = vdupq_n_s16(scale); // duplicate scale into neon reg
+ uint16x8_t vmask_b = vdupq_n_u16(0x1F); // set up blue mask
+
+ do {
+
+ uint8x8x4_t vsrc;
+ uint8x8_t vsrc_r, vsrc_g, vsrc_b;
+ uint8x8_t vsrc565_r, vsrc565_g, vsrc565_b;
+ uint16x8_t vsrc_dit_r, vsrc_dit_g, vsrc_dit_b;
+ uint16x8_t vsrc_res_r, vsrc_res_g, vsrc_res_b;
+ uint16x8_t vdst;
+ uint16x8_t vdst_r, vdst_g, vdst_b;
+ int16x8_t vres_r, vres_g, vres_b;
+ int8x8_t vres8_r, vres8_g, vres8_b;
+
+ // Load source and add dither
+#ifdef SK_CPU_ARM64
+ vsrc = sk_vld4_u8_arm64_3(src);
+#else
+ {
+ register uint8x8_t d0 asm("d0");
+ register uint8x8_t d1 asm("d1");
+ register uint8x8_t d2 asm("d2");
+ register uint8x8_t d3 asm("d3");
+
+ asm (
+ "vld4.8 {d0-d3},[%[src]]! "
+ : "=w" (d0), "=w" (d1), "=w" (d2), "=w" (d3), [src] "+&r" (src)
+ :
+ );
+ vsrc.val[0] = d0;
+ vsrc.val[1] = d1;
+ vsrc.val[2] = d2;
+ }
+#endif
+ vsrc_r = vsrc.val[NEON_R];
+ vsrc_g = vsrc.val[NEON_G];
+ vsrc_b = vsrc.val[NEON_B];
+
+ vsrc565_g = vshr_n_u8(vsrc_g, 6); // calc. green >> 6
+ vsrc565_r = vshr_n_u8(vsrc_r, 5); // calc. red >> 5
+ vsrc565_b = vshr_n_u8(vsrc_b, 5); // calc. blue >> 5
+
+ vsrc_dit_g = vaddl_u8(vsrc_g, vdither_g); // add in dither to green and widen
+ vsrc_dit_r = vaddl_u8(vsrc_r, vdither); // add in dither to red and widen
+ vsrc_dit_b = vaddl_u8(vsrc_b, vdither); // add in dither to blue and widen
+
+ vsrc_dit_r = vsubw_u8(vsrc_dit_r, vsrc565_r); // sub shifted red from result
+ vsrc_dit_g = vsubw_u8(vsrc_dit_g, vsrc565_g); // sub shifted green from result
+ vsrc_dit_b = vsubw_u8(vsrc_dit_b, vsrc565_b); // sub shifted blue from result
+
+ vsrc_res_r = vshrq_n_u16(vsrc_dit_r, 3);
+ vsrc_res_g = vshrq_n_u16(vsrc_dit_g, 2);
+ vsrc_res_b = vshrq_n_u16(vsrc_dit_b, 3);
+
+ // Load dst and unpack
+ vdst = vld1q_u16(dst);
+ vdst_g = vshrq_n_u16(vdst, 5); // shift down to get green
+ vdst_r = vshrq_n_u16(vshlq_n_u16(vdst, 5), 5+5); // double shift to extract red
+ vdst_b = vandq_u16(vdst, vmask_b); // mask to get blue
+
+ // subtract dst from src and widen
+ vres_r = vsubq_s16(vreinterpretq_s16_u16(vsrc_res_r), vreinterpretq_s16_u16(vdst_r));
+ vres_g = vsubq_s16(vreinterpretq_s16_u16(vsrc_res_g), vreinterpretq_s16_u16(vdst_g));
+ vres_b = vsubq_s16(vreinterpretq_s16_u16(vsrc_res_b), vreinterpretq_s16_u16(vdst_b));
+
+ // multiply diffs by scale and shift
+ vres_r = vmulq_s16(vres_r, vscale);
+ vres_g = vmulq_s16(vres_g, vscale);
+ vres_b = vmulq_s16(vres_b, vscale);
+
+ vres8_r = vshrn_n_s16(vres_r, 8);
+ vres8_g = vshrn_n_s16(vres_g, 8);
+ vres8_b = vshrn_n_s16(vres_b, 8);
+
+ // add dst to result
+ vres_r = vaddw_s8(vreinterpretq_s16_u16(vdst_r), vres8_r);
+ vres_g = vaddw_s8(vreinterpretq_s16_u16(vdst_g), vres8_g);
+ vres_b = vaddw_s8(vreinterpretq_s16_u16(vdst_b), vres8_b);
+
+ // put result into 565 format
+ vres_b = vsliq_n_s16(vres_b, vres_g, 5); // shift up green and insert into blue
+ vres_b = vsliq_n_s16(vres_b, vres_r, 6+5); // shift up red and insert into blue
+
+ // Store result
+ vst1q_u16(dst, vreinterpretq_u16_s16(vres_b));
+
+ // Next iteration
+ dst += 8;
+ count -= 8;
+
+ } while (count >= 8);
+ }
+
+ // Leftovers
+ if (count > 0) {
+ int scale = SkAlpha255To256(alpha);
+ DITHER_565_SCAN(y);
+ do {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+
+ int dither = DITHER_VALUE(x);
+ int sr = SkGetPackedR32(c);
+ int sg = SkGetPackedG32(c);
+ int sb = SkGetPackedB32(c);
+ sr = SkDITHER_R32To565(sr, dither);
+ sg = SkDITHER_G32To565(sg, dither);
+ sb = SkDITHER_B32To565(sb, dither);
+
+ uint16_t d = *dst;
+ *dst++ = SkPackRGB16(SkAlphaBlend(sr, SkGetPackedR16(d), scale),
+ SkAlphaBlend(sg, SkGetPackedG16(d), scale),
+ SkAlphaBlend(sb, SkGetPackedB16(d), scale));
+ DITHER_INC_X(x);
+ } while (--count != 0);
+ }
+}
+
+/* Neon version of S32_Blend_BlitRow32()
+ * portable version is in src/core/SkBlitRow_D32.cpp
+ */
+void S32_Blend_BlitRow32_neon(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha) {
+ SkASSERT(alpha <= 255);
+
+ if (count <= 0) {
+ return;
+ }
+
+ uint16_t src_scale = SkAlpha255To256(alpha);
+ uint16_t dst_scale = 256 - src_scale;
+
+ while (count >= 2) {
+ uint8x8_t vsrc, vdst, vres;
+ uint16x8_t vsrc_wide, vdst_wide;
+
+ /* These commented prefetches are a big win for count
+ * values > 64 on an A9 (Pandaboard) but hurt by 10% for count = 4.
+ * They also hurt a little (<5%) on an A15
+ */
+ //__builtin_prefetch(src+32);
+ //__builtin_prefetch(dst+32);
+
+ // Load
+ vsrc = vreinterpret_u8_u32(vld1_u32(src));
+ vdst = vreinterpret_u8_u32(vld1_u32(dst));
+
+ // Process src
+ vsrc_wide = vmovl_u8(vsrc);
+ vsrc_wide = vmulq_u16(vsrc_wide, vdupq_n_u16(src_scale));
+
+ // Process dst
+ vdst_wide = vmull_u8(vdst, vdup_n_u8(dst_scale));
+
+ // Combine
+#ifdef SK_SUPPORT_LEGACY_BROKEN_LERP
+ vres = vshrn_n_u16(vdst_wide, 8) + vshrn_n_u16(vsrc_wide, 8);
+#else
+ vdst_wide += vsrc_wide;
+ vres = vshrn_n_u16(vdst_wide, 8);
+#endif
+
+ // Store
+ vst1_u32(dst, vreinterpret_u32_u8(vres));
+
+ src += 2;
+ dst += 2;
+ count -= 2;
+ }
+
+ if (count == 1) {
+ uint8x8_t vsrc = vdup_n_u8(0), vdst = vdup_n_u8(0), vres;
+ uint16x8_t vsrc_wide, vdst_wide;
+
+ // Load
+ vsrc = vreinterpret_u8_u32(vld1_lane_u32(src, vreinterpret_u32_u8(vsrc), 0));
+ vdst = vreinterpret_u8_u32(vld1_lane_u32(dst, vreinterpret_u32_u8(vdst), 0));
+
+ // Process
+ vsrc_wide = vmovl_u8(vsrc);
+ vsrc_wide = vmulq_u16(vsrc_wide, vdupq_n_u16(src_scale));
+ vdst_wide = vmull_u8(vdst, vdup_n_u8(dst_scale));
+#ifdef SK_SUPPORT_LEGACY_BROKEN_LERP
+ vres = vshrn_n_u16(vdst_wide, 8) + vshrn_n_u16(vsrc_wide, 8);
+#else
+ vdst_wide += vsrc_wide;
+ vres = vshrn_n_u16(vdst_wide, 8);
+#endif
+
+ // Store
+ vst1_lane_u32(dst, vreinterpret_u32_u8(vres), 0);
+ }
+}
+
+#ifdef SK_CPU_ARM32
+void S32A_Blend_BlitRow32_neon(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha) {
+
+ SkASSERT(255 > alpha);
+
+ if (count <= 0) {
+ return;
+ }
+
+ unsigned alpha256 = SkAlpha255To256(alpha);
+
+ // First deal with odd counts
+ if (count & 1) {
+ uint8x8_t vsrc = vdup_n_u8(0), vdst = vdup_n_u8(0), vres;
+ uint16x8_t vdst_wide, vsrc_wide;
+ unsigned dst_scale;
+
+ // Load
+ vsrc = vreinterpret_u8_u32(vld1_lane_u32(src, vreinterpret_u32_u8(vsrc), 0));
+ vdst = vreinterpret_u8_u32(vld1_lane_u32(dst, vreinterpret_u32_u8(vdst), 0));
+
+ // Calc dst_scale
+ dst_scale = vget_lane_u8(vsrc, 3);
+ dst_scale = SkAlphaMulInv256(dst_scale, alpha256);
+
+ // Process src
+ vsrc_wide = vmovl_u8(vsrc);
+ vsrc_wide = vmulq_n_u16(vsrc_wide, alpha256);
+
+ // Process dst
+ vdst_wide = vmovl_u8(vdst);
+ vdst_wide = vmulq_n_u16(vdst_wide, dst_scale);
+
+ // Combine
+#ifdef SK_SUPPORT_LEGACY_BROKEN_LERP
+ vres = vshrn_n_u16(vdst_wide, 8) + vshrn_n_u16(vsrc_wide, 8);
+#else
+ vdst_wide += vsrc_wide;
+ vres = vshrn_n_u16(vdst_wide, 8);
+#endif
+
+ vst1_lane_u32(dst, vreinterpret_u32_u8(vres), 0);
+ dst++;
+ src++;
+ count--;
+ }
+
+ if (count) {
+ uint8x8_t alpha_mask;
+ static const uint8_t alpha_mask_setup[] = {3,3,3,3,7,7,7,7};
+ alpha_mask = vld1_u8(alpha_mask_setup);
+
+ do {
+
+ uint8x8_t vsrc, vdst, vres, vsrc_alphas;
+ uint16x8_t vdst_wide, vsrc_wide, vsrc_scale, vdst_scale;
+
+ __builtin_prefetch(src+32);
+ __builtin_prefetch(dst+32);
+
+ // Load
+ vsrc = vreinterpret_u8_u32(vld1_u32(src));
+ vdst = vreinterpret_u8_u32(vld1_u32(dst));
+
+ // Prepare src_scale
+ vsrc_scale = vdupq_n_u16(alpha256);
+
+ // Calc dst_scale
+ vsrc_alphas = vtbl1_u8(vsrc, alpha_mask);
+ vdst_scale = vmovl_u8(vsrc_alphas);
+#ifdef SK_SUPPORT_LEGACY_BROKEN_LERP
+ vdst_scale *= vsrc_scale;
+ vdst_scale = vshrq_n_u16(vdst_scale, 8);
+ vdst_scale = vsubq_u16(vdupq_n_u16(256), vdst_scale);
+#else
+ // Calculate SkAlphaMulInv256(vdst_scale, vsrc_scale).
+ // A 16-bit lane would overflow if we used 0xFFFF here,
+ // so use an approximation with 0xFF00 that is off by 1,
+ // and add back 1 after to get the correct value.
+ // This is valid if alpha256 <= 255.
+ vdst_scale = vmlsq_u16(vdupq_n_u16(0xFF00), vdst_scale, vsrc_scale);
+ vdst_scale = vsraq_n_u16(vdst_scale, vdst_scale, 8);
+ vdst_scale = vsraq_n_u16(vdupq_n_u16(1), vdst_scale, 8);
+#endif
+
+ // Process src
+ vsrc_wide = vmovl_u8(vsrc);
+ vsrc_wide *= vsrc_scale;
+
+ // Process dst
+ vdst_wide = vmovl_u8(vdst);
+ vdst_wide *= vdst_scale;
+
+ // Combine
+#ifdef SK_SUPPORT_LEGACY_BROKEN_LERP
+ vres = vshrn_n_u16(vdst_wide, 8) + vshrn_n_u16(vsrc_wide, 8);
+#else
+ vdst_wide += vsrc_wide;
+ vres = vshrn_n_u16(vdst_wide, 8);
+#endif
+
+ vst1_u32(dst, vreinterpret_u32_u8(vres));
+
+ src += 2;
+ dst += 2;
+ count -= 2;
+ } while(count);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#endif // #ifdef SK_CPU_ARM32
+
+void S32A_D565_Opaque_Dither_neon (uint16_t * SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha, int x, int y) {
+ SkASSERT(255 == alpha);
+
+#define UNROLL 8
+
+ if (count >= UNROLL) {
+
+ uint8x8_t dbase;
+ const uint8_t *dstart = &gDitherMatrix_Neon[(y&3)*12 + (x&3)];
+ dbase = vld1_u8(dstart);
+
+ do {
+ uint8x8x4_t vsrc;
+ uint8x8_t sr, sg, sb, sa, d;
+ uint16x8_t dst8, scale8, alpha8;
+ uint16x8_t dst_r, dst_g, dst_b;
+
+#ifdef SK_CPU_ARM64
+ vsrc = sk_vld4_u8_arm64_4(src);
+#else
+ {
+ register uint8x8_t d0 asm("d0");
+ register uint8x8_t d1 asm("d1");
+ register uint8x8_t d2 asm("d2");
+ register uint8x8_t d3 asm("d3");
+
+ asm ("vld4.8 {d0-d3},[%[src]]! "
+ : "=w" (d0), "=w" (d1), "=w" (d2), "=w" (d3), [src] "+r" (src)
+ :
+ );
+ vsrc.val[0] = d0;
+ vsrc.val[1] = d1;
+ vsrc.val[2] = d2;
+ vsrc.val[3] = d3;
+ }
+#endif
+ sa = vsrc.val[NEON_A];
+ sr = vsrc.val[NEON_R];
+ sg = vsrc.val[NEON_G];
+ sb = vsrc.val[NEON_B];
+
+ /* calculate 'd', which will be 0..7
+ * dbase[] is 0..7; alpha is 0..256; 16 bits suffice
+ */
+ alpha8 = vmovl_u8(dbase);
+ alpha8 = vmlal_u8(alpha8, sa, dbase);
+ d = vshrn_n_u16(alpha8, 8); // narrowing too
+
+ // sr = sr - (sr>>5) + d
+ /* watching for 8-bit overflow. d is 0..7; risky range of
+ * sr is >248; and then (sr>>5) is 7 so it offsets 'd';
+ * safe as long as we do ((sr-sr>>5) + d)
+ */
+ sr = vsub_u8(sr, vshr_n_u8(sr, 5));
+ sr = vadd_u8(sr, d);
+
+ // sb = sb - (sb>>5) + d
+ sb = vsub_u8(sb, vshr_n_u8(sb, 5));
+ sb = vadd_u8(sb, d);
+
+ // sg = sg - (sg>>6) + d>>1; similar logic for overflows
+ sg = vsub_u8(sg, vshr_n_u8(sg, 6));
+ sg = vadd_u8(sg, vshr_n_u8(d,1));
+
+ // need to pick up 8 dst's -- at 16 bits each, 128 bits
+ dst8 = vld1q_u16(dst);
+ dst_b = vandq_u16(dst8, vdupq_n_u16(SK_B16_MASK));
+ dst_g = vshrq_n_u16(vshlq_n_u16(dst8, SK_R16_BITS), SK_R16_BITS + SK_B16_BITS);
+ dst_r = vshrq_n_u16(dst8, SK_R16_SHIFT); // clearing hi bits
+
+ // blend
+ scale8 = vsubw_u8(vdupq_n_u16(256), sa);
+
+ // combine the addq and mul, save 3 insns
+ scale8 = vshrq_n_u16(scale8, 3);
+ dst_b = vmlaq_u16(vshll_n_u8(sb,2), dst_b, scale8);
+ dst_g = vmlaq_u16(vshll_n_u8(sg,3), dst_g, scale8);
+ dst_r = vmlaq_u16(vshll_n_u8(sr,2), dst_r, scale8);
+
+ // repack to store
+ dst8 = vshrq_n_u16(dst_b, 5);
+ dst8 = vsliq_n_u16(dst8, vshrq_n_u16(dst_g, 5), 5);
+ dst8 = vsliq_n_u16(dst8, vshrq_n_u16(dst_r,5), 11);
+
+ vst1q_u16(dst, dst8);
+
+ dst += UNROLL;
+ count -= UNROLL;
+ // skip x += UNROLL, since it's unchanged mod-4
+ } while (count >= UNROLL);
+ }
+#undef UNROLL
+
+ // residuals
+ if (count > 0) {
+ DITHER_565_SCAN(y);
+ do {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+ if (c) {
+ unsigned a = SkGetPackedA32(c);
+
+ // dither and alpha are just temporary variables to work-around
+ // an ICE in debug.
+ unsigned dither = DITHER_VALUE(x);
+ unsigned alpha = SkAlpha255To256(a);
+ int d = SkAlphaMul(dither, alpha);
+
+ unsigned sr = SkGetPackedR32(c);
+ unsigned sg = SkGetPackedG32(c);
+ unsigned sb = SkGetPackedB32(c);
+ sr = SkDITHER_R32_FOR_565(sr, d);
+ sg = SkDITHER_G32_FOR_565(sg, d);
+ sb = SkDITHER_B32_FOR_565(sb, d);
+
+ uint32_t src_expanded = (sg << 24) | (sr << 13) | (sb << 2);
+ uint32_t dst_expanded = SkExpand_rgb_16(*dst);
+ dst_expanded = dst_expanded * (SkAlpha255To256(255 - a) >> 3);
+ // now src and dst expanded are in g:11 r:10 x:1 b:10
+ *dst = SkCompact_rgb_16((src_expanded + dst_expanded) >> 5);
+ }
+ dst += 1;
+ DITHER_INC_X(x);
+ } while (--count != 0);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void S32_D565_Opaque_Dither_neon(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha, int x, int y) {
+ SkASSERT(255 == alpha);
+
+#define UNROLL 8
+ if (count >= UNROLL) {
+ uint8x8_t d;
+ const uint8_t *dstart = &gDitherMatrix_Neon[(y&3)*12 + (x&3)];
+ d = vld1_u8(dstart);
+
+ while (count >= UNROLL) {
+ uint8x8_t sr, sg, sb;
+ uint16x8_t dr, dg, db;
+ uint16x8_t dst8;
+ uint8x8x4_t vsrc;
+
+#ifdef SK_CPU_ARM64
+ vsrc = sk_vld4_u8_arm64_3(src);
+#else
+ {
+ register uint8x8_t d0 asm("d0");
+ register uint8x8_t d1 asm("d1");
+ register uint8x8_t d2 asm("d2");
+ register uint8x8_t d3 asm("d3");
+
+ asm (
+ "vld4.8 {d0-d3},[%[src]]! "
+ : "=w" (d0), "=w" (d1), "=w" (d2), "=w" (d3), [src] "+&r" (src)
+ :
+ );
+ vsrc.val[0] = d0;
+ vsrc.val[1] = d1;
+ vsrc.val[2] = d2;
+ }
+#endif
+ sr = vsrc.val[NEON_R];
+ sg = vsrc.val[NEON_G];
+ sb = vsrc.val[NEON_B];
+
+ /* XXX: if we want to prefetch, hide it in the above asm()
+ * using the gcc __builtin_prefetch(), the prefetch will
+ * fall to the bottom of the loop -- it won't stick up
+ * at the top of the loop, just after the vld4.
+ */
+
+ // sr = sr - (sr>>5) + d
+ sr = vsub_u8(sr, vshr_n_u8(sr, 5));
+ dr = vaddl_u8(sr, d);
+
+ // sb = sb - (sb>>5) + d
+ sb = vsub_u8(sb, vshr_n_u8(sb, 5));
+ db = vaddl_u8(sb, d);
+
+ // sg = sg - (sg>>6) + d>>1; similar logic for overflows
+ sg = vsub_u8(sg, vshr_n_u8(sg, 6));
+ dg = vaddl_u8(sg, vshr_n_u8(d, 1));
+
+ // pack high bits of each into 565 format (rgb, b is lsb)
+ dst8 = vshrq_n_u16(db, 3);
+ dst8 = vsliq_n_u16(dst8, vshrq_n_u16(dg, 2), 5);
+ dst8 = vsliq_n_u16(dst8, vshrq_n_u16(dr, 3), 11);
+
+ // store it
+ vst1q_u16(dst, dst8);
+
+ dst += UNROLL;
+ // we don't need to increment src as the asm above has already done it
+ count -= UNROLL;
+ x += UNROLL; // probably superfluous
+ }
+ }
+#undef UNROLL
+
+ // residuals
+ if (count > 0) {
+ DITHER_565_SCAN(y);
+ do {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+ SkASSERT(SkGetPackedA32(c) == 255);
+
+ unsigned dither = DITHER_VALUE(x);
+ *dst++ = SkDitherRGB32To565(c, dither);
+ DITHER_INC_X(x);
+ } while (--count != 0);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+const SkBlitRow::Proc16 sk_blitrow_platform_565_procs_arm_neon[] = {
+ // no dither
+ S32_D565_Opaque_neon,
+ S32_D565_Blend_neon,
+ S32A_D565_Opaque_neon,
+#if 0
+ S32A_D565_Blend_neon,
+#else
+ nullptr, // https://code.google.com/p/skia/issues/detail?id=2797
+#endif
+
+ // dither
+ S32_D565_Opaque_Dither_neon,
+ S32_D565_Blend_Dither_neon,
+ S32A_D565_Opaque_Dither_neon,
+ nullptr, // S32A_D565_Blend_Dither
+};
+
+const SkBlitRow::ColorProc16 sk_blitrow_platform_565_colorprocs_arm_neon[] = {
+ Color32A_D565_neon, // Color32_D565,
+ Color32A_D565_neon, // Color32A_D565,
+ Color32A_D565_neon, // Color32_D565_Dither,
+ Color32A_D565_neon, // Color32A_D565_Dither
+};
+
+const SkBlitRow::Proc32 sk_blitrow_platform_32_procs_arm_neon[] = {
+ nullptr, // S32_Opaque,
+ S32_Blend_BlitRow32_neon, // S32_Blend,
+ nullptr, // Ported to SkOpts
+#ifdef SK_CPU_ARM32
+ S32A_Blend_BlitRow32_neon // S32A_Blend
+#else
+ nullptr
+#endif
+};
diff --git a/gfx/skia/skia/src/opts/SkBlitRow_opts_arm_neon.h b/gfx/skia/skia/src/opts/SkBlitRow_opts_arm_neon.h
new file mode 100644
index 000000000..159a466c9
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBlitRow_opts_arm_neon.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkBlitRow_opts_arm_neon_DEFINED
+#define SkBlitRow_opts_arm_neon_DEFINED
+
+#include "SkBlitRow.h"
+
+extern const SkBlitRow::Proc16 sk_blitrow_platform_565_procs_arm_neon[];
+extern const SkBlitRow::ColorProc16 sk_blitrow_platform_565_colorprocs_arm_neon[];
+extern const SkBlitRow::Proc32 sk_blitrow_platform_32_procs_arm_neon[];
+
+#endif
diff --git a/gfx/skia/skia/src/opts/SkBlitRow_opts_mips_dsp.cpp b/gfx/skia/skia/src/opts/SkBlitRow_opts_mips_dsp.cpp
new file mode 100644
index 000000000..38922b2ea
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBlitRow_opts_mips_dsp.cpp
@@ -0,0 +1,958 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBlitRow.h"
+#include "SkBlitMask.h"
+#include "SkColorPriv.h"
+#include "SkDither.h"
+#include "SkMathPriv.h"
+
+static void S32_D565_Blend_mips_dsp(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src, int count,
+ U8CPU alpha, int /*x*/, int /*y*/) {
+ uint32_t t0, t1, t2, t3, t4, t5, t6;
+ uint32_t s0, s1, s2, s4, s5, s6;
+
+ alpha += 1;
+ if (count >= 2) {
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "sll %[s4], %[alpha], 8 \n\t"
+ "or %[s4], %[s4], %[alpha] \n\t"
+ "repl.ph %[s5], 0x1f \n\t"
+ "repl.ph %[s6], 0x3f \n\t"
+ "1: \n\t"
+ "lw %[s2], 0(%[src]) \n\t"
+ "lw %[s1], 4(%[src]) \n\t"
+ "lwr %[s0], 0(%[dst]) \n\t"
+ "lwl %[s0], 3(%[dst]) \n\t"
+ "and %[t1], %[s0], %[s5] \n\t"
+ "shra.ph %[t0], %[s0], 5 \n\t"
+ "and %[t2], %[t0], %[s6] \n\t"
+#ifdef __mips_dspr2
+ "shrl.ph %[t3], %[s0], 11 \n\t"
+#else
+ "shra.ph %[t0], %[s0], 11 \n\t"
+ "and %[t3], %[t0], %[s5] \n\t"
+#endif
+ "precrq.ph.w %[t0], %[s1], %[s2] \n\t"
+ "shrl.qb %[t5], %[t0], 3 \n\t"
+ "and %[t4], %[t5], %[s5] \n\t"
+ "ins %[s2], %[s1], 16, 16 \n\t"
+ "preceu.ph.qbra %[t0], %[s2] \n\t"
+ "shrl.qb %[t6], %[t0], 3 \n\t"
+#ifdef __mips_dspr2
+ "shrl.ph %[t5], %[s2], 10 \n\t"
+#else
+ "shra.ph %[t0], %[s2], 10 \n\t"
+ "and %[t5], %[t0], %[s6] \n\t"
+#endif
+ "subu.qb %[t4], %[t4], %[t1] \n\t"
+ "subu.qb %[t5], %[t5], %[t2] \n\t"
+ "subu.qb %[t6], %[t6], %[t3] \n\t"
+ "muleu_s.ph.qbr %[t4], %[s4], %[t4] \n\t"
+ "muleu_s.ph.qbr %[t5], %[s4], %[t5] \n\t"
+ "muleu_s.ph.qbr %[t6], %[s4], %[t6] \n\t"
+ "addiu %[count], %[count], -2 \n\t"
+ "addiu %[src], %[src], 8 \n\t"
+ "shra.ph %[t4], %[t4], 8 \n\t"
+ "shra.ph %[t5], %[t5], 8 \n\t"
+ "shra.ph %[t6], %[t6], 8 \n\t"
+ "addu.qb %[t4], %[t4], %[t1] \n\t"
+ "addu.qb %[t5], %[t5], %[t2] \n\t"
+ "addu.qb %[t6], %[t6], %[t3] \n\t"
+ "andi %[s0], %[t4], 0xffff \n\t"
+ "andi %[t0], %[t5], 0xffff \n\t"
+ "sll %[t0], %[t0], 0x5 \n\t"
+ "or %[s0], %[s0], %[t0] \n\t"
+ "sll %[t0], %[t6], 0xb \n\t"
+ "or %[t0], %[t0], %[s0] \n\t"
+ "sh %[t0], 0(%[dst]) \n\t"
+ "srl %[s1], %[t4], 16 \n\t"
+ "srl %[t0], %[t5], 16 \n\t"
+ "sll %[t5], %[t0], 5 \n\t"
+ "or %[t0], %[t5], %[s1] \n\t"
+ "srl %[s0], %[t6], 16 \n\t"
+ "sll %[s2], %[s0], 0xb \n\t"
+ "or %[s1], %[s2], %[t0] \n\t"
+ "sh %[s1], 2(%[dst]) \n\t"
+ "bge %[count], 2, 1b \n\t"
+ " addiu %[dst], %[dst], 4 \n\t"
+ ".set pop \n\t"
+ : [t0]"=&r"(t0), [t1]"=&r"(t1), [t2]"=&r"(t2), [t3]"=&r"(t3),
+ [t4]"=&r"(t4), [t5]"=&r"(t5), [t6]"=&r"(t6), [s0]"=&r"(s0),
+ [s1]"=&r"(s1), [s2]"=&r"(s2), [s4]"=&r"(s4), [s5]"=&r"(s5),
+ [s6]"=&r"(s6), [count]"+r"(count), [dst]"+r"(dst),
+ [src]"+r"(src)
+ : [alpha]"r"(alpha)
+ : "memory", "hi", "lo"
+ );
+ }
+
+ if (count == 1) {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+ SkASSERT(SkGetPackedA32(c) == 255);
+ uint16_t d = *dst;
+ *dst++ = SkPackRGB16(SkAlphaBlend(SkPacked32ToR16(c), SkGetPackedR16(d), alpha),
+ SkAlphaBlend(SkPacked32ToG16(c), SkGetPackedG16(d), alpha),
+ SkAlphaBlend(SkPacked32ToB16(c), SkGetPackedB16(d), alpha));
+ }
+}
+
+static void S32A_D565_Opaque_Dither_mips_dsp(uint16_t* __restrict__ dst,
+ const SkPMColor* __restrict__ src,
+ int count, U8CPU alpha, int x, int y) {
+ __asm__ volatile (
+ "pref 0, 0(%[src]) \n\t"
+ "pref 1, 0(%[dst]) \n\t"
+ "pref 0, 32(%[src]) \n\t"
+ "pref 1, 32(%[dst]) \n\t"
+ :
+ : [src]"r"(src), [dst]"r"(dst)
+ : "memory"
+ );
+
+ int32_t t0, t1, t2, t3, t4, t5, t6;
+ int32_t t7, t8, t9, s0, s1, s2, s3;
+ const uint16_t dither_scan = gDitherMatrix_3Bit_16[(y) & 3];
+
+ if (count >= 2) {
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "li %[s1], 0x01010101 \n\t"
+ "li %[s2], -2017 \n\t"
+ "1: \n\t"
+ "bnez %[s3], 4f \n\t"
+ " li %[s3], 2 \n\t"
+ "pref 0, 64(%[src]) \n\t"
+ "pref 1, 64(%[dst]) \n\t"
+ "4: \n\t"
+ "addiu %[s3], %[s3], -1 \n\t"
+ "lw %[t1], 0(%[src]) \n\t"
+ "andi %[t3], %[x], 0x3 \n\t"
+ "addiu %[x], %[x], 1 \n\t"
+ "sll %[t4], %[t3], 2 \n\t"
+ "srav %[t5], %[dither_scan], %[t4] \n\t"
+ "andi %[t3], %[t5], 0xf \n\t"
+ "lw %[t2], 4(%[src]) \n\t"
+ "andi %[t4], %[x], 0x3 \n\t"
+ "sll %[t5], %[t4], 2 \n\t"
+ "srav %[t6], %[dither_scan], %[t5] \n\t"
+ "addiu %[x], %[x], 1 \n\t"
+ "ins %[t3], %[t6], 8, 4 \n\t"
+ "srl %[t4], %[t1], 24 \n\t"
+ "addiu %[t0], %[t4], 1 \n\t"
+ "srl %[t4], %[t2], 24 \n\t"
+ "addiu %[t5], %[t4], 1 \n\t"
+ "ins %[t0], %[t5], 16, 16 \n\t"
+ "muleu_s.ph.qbr %[t4], %[t3], %[t0] \n\t"
+ "preceu.ph.qbla %[t3], %[t4] \n\t"
+ "andi %[t4], %[t1], 0xff \n\t"
+ "ins %[t4], %[t2], 16, 8 \n\t"
+ "shrl.qb %[t5], %[t4], 5 \n\t"
+ "subu.qb %[t6], %[t3], %[t5] \n\t"
+ "addq.ph %[t5], %[t6], %[t4] \n\t"
+ "ext %[t4], %[t1], 8, 8 \n\t"
+ "srl %[t6], %[t2], 8 \n\t"
+ "ins %[t4], %[t6], 16, 8 \n\t"
+ "shrl.qb %[t6], %[t4], 6 \n\t"
+ "shrl.qb %[t7], %[t3], 1 \n\t"
+ "subu.qb %[t8], %[t7], %[t6] \n\t"
+ "addq.ph %[t6], %[t8], %[t4] \n\t"
+ "ext %[t4], %[t1], 16, 8 \n\t"
+ "srl %[t7], %[t2], 16 \n\t"
+ "ins %[t4], %[t7], 16, 8 \n\t"
+ "shrl.qb %[t7], %[t4], 5 \n\t"
+ "subu.qb %[t8], %[t3], %[t7] \n\t"
+ "addq.ph %[t7], %[t8], %[t4] \n\t"
+ "shll.ph %[t4], %[t7], 2 \n\t"
+ "andi %[t9], %[t4], 0xffff \n\t"
+ "srl %[s0], %[t4], 16 \n\t"
+ "andi %[t3], %[t6], 0xffff \n\t"
+ "srl %[t4], %[t6], 16 \n\t"
+ "andi %[t6], %[t5], 0xffff \n\t"
+ "srl %[t7], %[t5], 16 \n\t"
+ "subq.ph %[t5], %[s1], %[t0] \n\t"
+ "srl %[t0], %[t5], 3 \n\t"
+ "beqz %[t1], 3f \n\t"
+ " lhu %[t5], 0(%[dst]) \n\t"
+ "sll %[t1], %[t6], 13 \n\t"
+ "or %[t8], %[t9], %[t1] \n\t"
+ "sll %[t1], %[t3], 24 \n\t"
+ "or %[t9], %[t1], %[t8] \n\t"
+ "andi %[t3], %[t5], 0x7e0 \n\t"
+ "sll %[t6], %[t3], 0x10 \n\t"
+ "and %[t8], %[s2], %[t5] \n\t"
+ "or %[t5], %[t6], %[t8] \n\t"
+ "andi %[t6], %[t0], 0xff \n\t"
+ "mul %[t1], %[t6], %[t5] \n\t"
+ "addu %[t5], %[t1], %[t9] \n\t"
+ "srl %[t6], %[t5], 5 \n\t"
+ "and %[t5], %[s2], %[t6] \n\t"
+ "srl %[t8], %[t6], 16 \n\t"
+ "andi %[t6], %[t8], 0x7e0 \n\t"
+ "or %[t1], %[t5], %[t6] \n\t"
+ "sh %[t1], 0(%[dst]) \n\t"
+ "3: \n\t"
+ "beqz %[t2], 2f \n\t"
+ " lhu %[t5], 2(%[dst]) \n\t"
+ "sll %[t1], %[t7], 13 \n\t"
+ "or %[t8], %[s0], %[t1] \n\t"
+ "sll %[t1], %[t4], 24 \n\t"
+ "or %[t9], %[t1], %[t8] \n\t"
+ "andi %[t3], %[t5], 0x7e0 \n\t"
+ "sll %[t6], %[t3], 0x10 \n\t"
+ "and %[t8], %[s2], %[t5] \n\t"
+ "or %[t5], %[t6], %[t8] \n\t"
+ "srl %[t6], %[t0], 16 \n\t"
+ "mul %[t1], %[t6], %[t5] \n\t"
+ "addu %[t5], %[t1], %[t9] \n\t"
+ "srl %[t6], %[t5], 5 \n\t"
+ "and %[t5], %[s2], %[t6] \n\t"
+ "srl %[t8], %[t6], 16 \n\t"
+ "andi %[t6], %[t8], 0x7e0 \n\t"
+ "or %[t1], %[t5], %[t6] \n\t"
+ "sh %[t1], 2(%[dst]) \n\t"
+ "2: \n\t"
+ "addiu %[count], %[count], -2 \n\t"
+ "addiu %[src], %[src], 8 \n\t"
+ "addiu %[t1], %[count], -1 \n\t"
+ "bgtz %[t1], 1b \n\t"
+ " addiu %[dst], %[dst], 4 \n\t"
+ ".set pop \n\t"
+ : [src]"+r"(src), [count]"+r"(count), [dst]"+r"(dst), [x]"+r"(x),
+ [t0]"=&r"(t0), [t1]"=&r"(t1), [t2]"=&r"(t2), [t3]"=&r"(t3),
+ [t4]"=&r"(t4), [t5]"=&r"(t5), [t6]"=&r"(t6), [t7]"=&r"(t7),
+ [t8]"=&r"(t8), [t9]"=&r"(t9), [s0]"=&r"(s0), [s1]"=&r"(s1),
+ [s2]"=&r"(s2), [s3]"=&r"(s3)
+ : [dither_scan]"r"(dither_scan)
+ : "memory", "hi", "lo"
+ );
+ }
+
+ if (count == 1) {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+ if (c) {
+ unsigned a = SkGetPackedA32(c);
+ int d = SkAlphaMul(DITHER_VALUE(x), SkAlpha255To256(a));
+
+ unsigned sr = SkGetPackedR32(c);
+ unsigned sg = SkGetPackedG32(c);
+ unsigned sb = SkGetPackedB32(c);
+ sr = SkDITHER_R32_FOR_565(sr, d);
+ sg = SkDITHER_G32_FOR_565(sg, d);
+ sb = SkDITHER_B32_FOR_565(sb, d);
+
+ uint32_t src_expanded = (sg << 24) | (sr << 13) | (sb << 2);
+ uint32_t dst_expanded = SkExpand_rgb_16(*dst);
+ dst_expanded = dst_expanded * (SkAlpha255To256(255 - a) >> 3);
+ // now src and dst expanded are in g:11 r:10 x:1 b:10
+ *dst = SkCompact_rgb_16((src_expanded + dst_expanded) >> 5);
+ }
+ dst += 1;
+ DITHER_INC_X(x);
+ }
+}
+
+static void S32_D565_Opaque_Dither_mips_dsp(uint16_t* __restrict__ dst,
+ const SkPMColor* __restrict__ src,
+ int count, U8CPU alpha, int x, int y) {
+ uint16_t dither_scan = gDitherMatrix_3Bit_16[(y) & 3];
+ uint32_t t0, t1, t2, t3, t4, t5;
+ uint32_t t6, t7, t8, t9, s0;
+ int dither[4];
+ int i;
+
+ for (i = 0; i < 4; i++, x++) {
+ dither[i] = (dither_scan >> ((x & 3) << 2)) & 0xF;
+ }
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "li %[s0], 1 \n\t"
+ "2: \n\t"
+ "beqz %[count], 1f \n\t"
+ " nop \n\t"
+ "addiu %[t0], %[count], -1 \n\t"
+ "beqz %[t0], 1f \n\t"
+ " nop \n\t"
+ "beqz %[s0], 3f \n\t"
+ " nop \n\t"
+ "lw %[t0], 0(%[dither]) \n\t"
+ "lw %[t1], 4(%[dither]) \n\t"
+ "li %[s0], 0 \n\t"
+ "b 4f \n\t"
+ " nop \n\t"
+ "3: \n\t"
+ "lw %[t0], 8(%[dither]) \n\t"
+ "lw %[t1], 12(%[dither]) \n\t"
+ "li %[s0], 1 \n\t"
+ "4: \n\t"
+ "sll %[t2], %[t0], 16 \n\t"
+ "or %[t1], %[t2], %[t1] \n\t"
+ "lw %[t0], 0(%[src]) \n\t"
+ "lw %[t2], 4(%[src]) \n\t"
+ "precrq.ph.w %[t3], %[t0], %[t2] \n\t"
+ "preceu.ph.qbra %[t9], %[t3] \n\t"
+#ifdef __mips_dspr2
+ "append %[t0], %[t2], 16 \n\t"
+ "preceu.ph.qbra %[t4], %[t0] \n\t"
+ "preceu.ph.qbla %[t5], %[t0] \n\t"
+#else
+ "sll %[t6], %[t0], 16 \n\t"
+ "sll %[t7], %[t2], 16 \n\t"
+ "precrq.ph.w %[t8], %[t6], %[t7] \n\t"
+ "preceu.ph.qbra %[t4], %[t8] \n\t"
+ "preceu.ph.qbla %[t5], %[t8] \n\t"
+#endif
+ "addu.qb %[t0], %[t4], %[t1] \n\t"
+ "shra.ph %[t2], %[t4], 5 \n\t"
+ "subu.qb %[t3], %[t0], %[t2] \n\t"
+ "shra.ph %[t6], %[t3], 3 \n\t"
+ "addu.qb %[t0], %[t9], %[t1] \n\t"
+ "shra.ph %[t2], %[t9], 5 \n\t"
+ "subu.qb %[t3], %[t0], %[t2] \n\t"
+ "shra.ph %[t7], %[t3], 3 \n\t"
+ "shra.ph %[t0], %[t1], 1 \n\t"
+ "shra.ph %[t2], %[t5], 6 \n\t"
+ "addu.qb %[t3], %[t5], %[t0] \n\t"
+ "subu.qb %[t4], %[t3], %[t2] \n\t"
+ "shra.ph %[t8], %[t4], 2 \n\t"
+ "precrq.ph.w %[t0], %[t6], %[t7] \n\t"
+#ifdef __mips_dspr2
+ "append %[t6], %[t7], 16 \n\t"
+#else
+ "sll %[t6], %[t6], 16 \n\t"
+ "sll %[t2], %[t7], 16 \n\t"
+ "precrq.ph.w %[t6], %[t6], %[t2] \n\t"
+#endif
+ "sra %[t4], %[t8], 16 \n\t"
+ "andi %[t5], %[t8], 0xFF \n\t"
+ "sll %[t7], %[t4], 5 \n\t"
+ "sra %[t8], %[t0], 5 \n\t"
+ "or %[t9], %[t7], %[t8] \n\t"
+ "or %[t3], %[t9], %[t0] \n\t"
+ "andi %[t4], %[t3], 0xFFFF \n\t"
+ "sll %[t7], %[t5], 5 \n\t"
+ "sra %[t8], %[t6], 5 \n\t"
+ "or %[t9], %[t7], %[t8] \n\t"
+ "or %[t3], %[t9], %[t6] \n\t"
+ "and %[t7], %[t3], 0xFFFF \n\t"
+ "sh %[t4], 0(%[dst]) \n\t"
+ "sh %[t7], 2(%[dst]) \n\t"
+ "addiu %[count], %[count], -2 \n\t"
+ "addiu %[src], %[src], 8 \n\t"
+ "b 2b \n\t"
+ " addiu %[dst], %[dst], 4 \n\t"
+ "1: \n\t"
+ ".set pop \n\t"
+ : [dst]"+r"(dst), [src]"+r"(src), [count]"+r"(count),
+ [x]"+r"(x), [t0]"=&r"(t0), [t1]"=&r"(t1), [t2]"=&r"(t2),
+ [t3]"=&r"(t3), [t4]"=&r"(t4), [t5]"=&r"(t5), [t6]"=&r"(t6),
+ [t7]"=&r"(t7), [t8]"=&r"(t8), [t9]"=&r"(t9), [s0]"=&r"(s0)
+ : [dither] "r" (dither)
+ : "memory"
+ );
+
+ if (count == 1) {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c); // only if DEBUG is turned on
+ SkASSERT(SkGetPackedA32(c) == 255);
+ unsigned dither = DITHER_VALUE(x);
+ *dst++ = SkDitherRGB32To565(c, dither);
+ }
+}
+
+static void S32_D565_Blend_Dither_mips_dsp(uint16_t* dst,
+ const SkPMColor* src,
+ int count, U8CPU alpha, int x, int y) {
+ int32_t t0, t1, t2, t3, t4, t5, t6;
+ int32_t s0, s1, s2, s3;
+ int x1 = 0;
+ uint32_t sc_mul;
+ uint32_t sc_add;
+#ifdef ENABLE_DITHER_MATRIX_4X4
+ const uint8_t* dither_scan = gDitherMatrix_3Bit_4X4[(y) & 3];
+#else // ENABLE_DITHER_MATRIX_4X4
+ const uint16_t dither_scan = gDitherMatrix_3Bit_16[(y) & 3];
+#endif // ENABLE_DITHER_MATRIX_4X4
+ int dither[4];
+
+ for (int i = 0; i < 4; i++) {
+ dither[i] = (dither_scan >> ((x & 3) << 2)) & 0xF;
+ x += 1;
+ }
+ alpha += 1;
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "li %[t0], 0x100 \n\t"
+ "subu %[t0], %[t0], %[alpha] \n\t"
+ "replv.ph %[sc_mul], %[alpha] \n\t"
+ "beqz %[alpha], 1f \n\t"
+ " nop \n\t"
+ "replv.qb %[sc_add], %[t0] \n\t"
+ "b 2f \n\t"
+ " nop \n\t"
+ "1: \n\t"
+ "replv.qb %[sc_add], %[alpha] \n\t"
+ "2: \n\t"
+ "addiu %[t2], %[count], -1 \n\t"
+ "blez %[t2], 3f \n\t"
+ " nop \n\t"
+ "lw %[s0], 0(%[src]) \n\t"
+ "lw %[s1], 4(%[src]) \n\t"
+ "bnez %[x1], 4f \n\t"
+ " nop \n\t"
+ "lw %[t0], 0(%[dither]) \n\t"
+ "lw %[t1], 4(%[dither]) \n\t"
+ "li %[x1], 1 \n\t"
+ "b 5f \n\t"
+ " nop \n\t"
+ "4: \n\t"
+ "lw %[t0], 8(%[dither]) \n\t"
+ "lw %[t1], 12(%[dither]) \n\t"
+ "li %[x1], 0 \n\t"
+ "5: \n\t"
+ "sll %[t3], %[t0], 7 \n\t"
+ "sll %[t4], %[t1], 7 \n\t"
+#ifdef __mips_dspr2
+ "append %[t0], %[t1], 16 \n\t"
+#else
+ "sll %[t0], %[t0], 8 \n\t"
+ "sll %[t2], %[t1], 8 \n\t"
+ "precrq.qb.ph %[t0], %[t0], %[t2] \n\t"
+#endif
+ "precrq.qb.ph %[t1], %[t3], %[t4] \n\t"
+ "sll %[t5], %[s0], 8 \n\t"
+ "sll %[t6], %[s1], 8 \n\t"
+ "precrq.qb.ph %[t4], %[t5], %[t6] \n\t"
+ "precrq.qb.ph %[t6], %[s0], %[s1] \n\t"
+ "preceu.ph.qbla %[t5], %[t4] \n\t"
+ "preceu.ph.qbra %[t4], %[t4] \n\t"
+ "preceu.ph.qbra %[t6], %[t6] \n\t"
+ "lh %[t2], 0(%[dst]) \n\t"
+ "lh %[s1], 2(%[dst]) \n\t"
+#ifdef __mips_dspr2
+ "append %[t2], %[s1], 16 \n\t"
+#else
+ "sll %[s1], %[s1], 16 \n\t"
+ "packrl.ph %[t2], %[t2], %[s1] \n\t"
+#endif
+ "shra.ph %[s1], %[t2], 11 \n\t"
+ "and %[s1], %[s1], 0x1F001F \n\t"
+ "shra.ph %[s2], %[t2], 5 \n\t"
+ "and %[s2], %[s2], 0x3F003F \n\t"
+ "and %[s3], %[t2], 0x1F001F \n\t"
+ "shrl.qb %[t3], %[t4], 5 \n\t"
+ "addu.qb %[t4], %[t4], %[t0] \n\t"
+ "subu.qb %[t4], %[t4], %[t3] \n\t"
+ "shrl.qb %[t4], %[t4], 3 \n\t"
+ "shrl.qb %[t3], %[t5], 5 \n\t"
+ "addu.qb %[t5], %[t5], %[t0] \n\t"
+ "subu.qb %[t5], %[t5], %[t3] \n\t"
+ "shrl.qb %[t5], %[t5], 3 \n\t"
+ "shrl.qb %[t3], %[t6], 6 \n\t"
+ "addu.qb %[t6], %[t6], %[t1] \n\t"
+ "subu.qb %[t6], %[t6], %[t3] \n\t"
+ "shrl.qb %[t6], %[t6], 2 \n\t"
+ "cmpu.lt.qb %[t4], %[s1] \n\t"
+ "pick.qb %[s0], %[sc_add], $0 \n\t"
+ "addu.qb %[s0], %[s0], %[s1] \n\t"
+ "subu.qb %[t4], %[t4], %[s1] \n\t"
+ "muleu_s.ph.qbl %[t0], %[t4], %[sc_mul] \n\t"
+ "muleu_s.ph.qbr %[t1], %[t4], %[sc_mul] \n\t"
+ "precrq.qb.ph %[t4], %[t0], %[t1] \n\t"
+ "addu.qb %[t4], %[t4], %[s0] \n\t"
+ "cmpu.lt.qb %[t5], %[s3] \n\t"
+ "pick.qb %[s0], %[sc_add], $0 \n\t"
+ "addu.qb %[s0], %[s0], %[s3] \n\t"
+ "subu.qb %[t5], %[t5], %[s3] \n\t"
+ "muleu_s.ph.qbl %[t0], %[t5], %[sc_mul] \n\t"
+ "muleu_s.ph.qbr %[t1], %[t5], %[sc_mul] \n\t"
+ "precrq.qb.ph %[t5], %[t0], %[t1] \n\t"
+ "addu.qb %[t5], %[t5], %[s0] \n\t"
+ "cmpu.lt.qb %[t6], %[s2] \n\t"
+ "pick.qb %[s0], %[sc_add], $0 \n\t"
+ "addu.qb %[s0], %[s0], %[s2] \n\t"
+ "subu.qb %[t6], %[t6], %[s2] \n\t"
+ "muleu_s.ph.qbl %[t0], %[t6], %[sc_mul] \n\t"
+ "muleu_s.ph.qbr %[t1], %[t6], %[sc_mul] \n\t"
+ "precrq.qb.ph %[t6], %[t0], %[t1] \n\t"
+ "addu.qb %[t6], %[t6], %[s0] \n\t"
+ "shll.ph %[s1], %[t4], 11 \n\t"
+ "shll.ph %[t0], %[t6], 5 \n\t"
+ "or %[s0], %[s1], %[t0] \n\t"
+ "or %[s1], %[s0], %[t5] \n\t"
+ "srl %[t2], %[s1], 16 \n\t"
+ "and %[t3], %[s1], 0xFFFF \n\t"
+ "sh %[t2], 0(%[dst]) \n\t"
+ "sh %[t3], 2(%[dst]) \n\t"
+ "addiu %[src], %[src], 8 \n\t"
+ "addi %[count], %[count], -2 \n\t"
+ "b 2b \n\t"
+ " addu %[dst], %[dst], 4 \n\t"
+ "3: \n\t"
+ ".set pop \n\t"
+ : [src]"+r"(src), [dst]"+r"(dst), [count]"+r"(count),
+ [x1]"+r"(x1), [sc_mul]"=&r"(sc_mul), [sc_add]"=&r"(sc_add),
+ [t0]"=&r"(t0), [t1]"=&r"(t1), [t2]"=&r"(t2), [t3]"=&r"(t3),
+ [t4]"=&r"(t4), [t5]"=&r"(t5), [t6]"=&r"(t6), [s0]"=&r"(s0),
+ [s1]"=&r"(s1), [s2]"=&r"(s2), [s3]"=&r"(s3)
+ : [dither]"r"(dither), [alpha]"r"(alpha)
+ : "memory", "hi", "lo"
+ );
+
+ if(count == 1) {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+ SkASSERT(SkGetPackedA32(c) == 255);
+ DITHER_565_SCAN(y);
+ int dither = DITHER_VALUE(x);
+ int sr = SkGetPackedR32(c);
+ int sg = SkGetPackedG32(c);
+ int sb = SkGetPackedB32(c);
+ sr = SkDITHER_R32To565(sr, dither);
+ sg = SkDITHER_G32To565(sg, dither);
+ sb = SkDITHER_B32To565(sb, dither);
+
+ uint16_t d = *dst;
+ *dst++ = SkPackRGB16(SkAlphaBlend(sr, SkGetPackedR16(d), alpha),
+ SkAlphaBlend(sg, SkGetPackedG16(d), alpha),
+ SkAlphaBlend(sb, SkGetPackedB16(d), alpha));
+ DITHER_INC_X(x);
+ }
+}
+
+static void S32A_D565_Opaque_mips_dsp(uint16_t* __restrict__ dst,
+ const SkPMColor* __restrict__ src,
+ int count, U8CPU alpha, int x, int y) {
+
+ __asm__ volatile (
+ "pref 0, 0(%[src]) \n\t"
+ "pref 1, 0(%[dst]) \n\t"
+ "pref 0, 32(%[src]) \n\t"
+ "pref 1, 32(%[dst]) \n\t"
+ :
+ : [src]"r"(src), [dst]"r"(dst)
+ : "memory"
+ );
+
+ uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8;
+ uint32_t t16;
+ uint32_t add_x10 = 0x100010;
+ uint32_t add_x20 = 0x200020;
+ uint32_t sa = 0xff00ff;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "blez %[count], 1f \n\t"
+ " nop \n\t"
+ "2: \n\t"
+ "beqz %[count], 1f \n\t"
+ " nop \n\t"
+ "addiu %[t0], %[count], -1 \n\t"
+ "beqz %[t0], 1f \n\t"
+ " nop \n\t"
+ "bnez %[t16], 3f \n\t"
+ " nop \n\t"
+ "li %[t16], 2 \n\t"
+ "pref 0, 64(%[src]) \n\t"
+ "pref 1, 64(%[dst]) \n\t"
+ "3: \n\t"
+ "addiu %[t16], %[t16], -1 \n\t"
+ "lw %[t0], 0(%[src]) \n\t"
+ "lw %[t1], 4(%[src]) \n\t"
+ "precrq.ph.w %[t2], %[t0], %[t1] \n\t"
+ "preceu.ph.qbra %[t8], %[t2] \n\t"
+#ifdef __mips_dspr2
+ "append %[t0], %[t1], 16 \n\t"
+#else
+ "sll %[t0], %[t0], 16 \n\t"
+ "sll %[t6], %[t1], 16 \n\t"
+ "precrq.ph.w %[t0], %[t0], %[t6] \n\t"
+#endif
+ "preceu.ph.qbra %[t3], %[t0] \n\t"
+ "preceu.ph.qbla %[t4], %[t0] \n\t"
+ "preceu.ph.qbla %[t0], %[t2] \n\t"
+ "subq.ph %[t1], %[sa], %[t0] \n\t"
+ "sra %[t2], %[t1], 8 \n\t"
+ "or %[t5], %[t2], %[t1] \n\t"
+ "replv.ph %[t2], %[t5] \n\t"
+ "lh %[t0], 0(%[dst]) \n\t"
+ "lh %[t1], 2(%[dst]) \n\t"
+ "and %[t1], %[t1], 0xffff \n\t"
+#ifdef __mips_dspr2
+ "append %[t0], %[t1], 16 \n\t"
+#else
+ "sll %[t5], %[t0], 16 \n\t"
+ "or %[t0], %[t5], %[t1] \n\t"
+#endif
+ "and %[t1], %[t0], 0x1f001f \n\t"
+ "shra.ph %[t6], %[t0], 11 \n\t"
+ "and %[t6], %[t6], 0x1f001f \n\t"
+ "and %[t7], %[t0], 0x7e007e0 \n\t"
+ "shra.ph %[t5], %[t7], 5 \n\t"
+ "muleu_s.ph.qbl %[t0], %[t2], %[t6] \n\t"
+ "addq.ph %[t7], %[t0], %[add_x10] \n\t"
+ "shra.ph %[t6], %[t7], 5 \n\t"
+ "addq.ph %[t6], %[t7], %[t6] \n\t"
+ "shra.ph %[t0], %[t6], 5 \n\t"
+ "addq.ph %[t7], %[t0], %[t3] \n\t"
+ "shra.ph %[t6], %[t7], 3 \n\t"
+ "muleu_s.ph.qbl %[t0], %[t2], %[t1] \n\t"
+ "addq.ph %[t7], %[t0], %[add_x10] \n\t"
+ "shra.ph %[t0], %[t7], 5 \n\t"
+ "addq.ph %[t7], %[t7], %[t0] \n\t"
+ "shra.ph %[t0], %[t7], 5 \n\t"
+ "addq.ph %[t7], %[t0], %[t8] \n\t"
+ "shra.ph %[t3], %[t7], 3 \n\t"
+ "muleu_s.ph.qbl %[t0], %[t2], %[t5] \n\t"
+ "addq.ph %[t7], %[t0], %[add_x20] \n\t"
+ "shra.ph %[t0], %[t7], 6 \n\t"
+ "addq.ph %[t8], %[t7], %[t0] \n\t"
+ "shra.ph %[t0], %[t8], 6 \n\t"
+ "addq.ph %[t7], %[t0], %[t4] \n\t"
+ "shra.ph %[t8], %[t7], 2 \n\t"
+ "shll.ph %[t0], %[t8], 5 \n\t"
+ "shll.ph %[t1], %[t6], 11 \n\t"
+ "or %[t2], %[t0], %[t1] \n\t"
+ "or %[t3], %[t2], %[t3] \n\t"
+ "sra %[t4], %[t3], 16 \n\t"
+ "sh %[t4], 0(%[dst]) \n\t"
+ "sh %[t3], 2(%[dst]) \n\t"
+ "addiu %[count], %[count], -2 \n\t"
+ "addiu %[src], %[src], 8 \n\t"
+ "b 2b \n\t"
+ " addiu %[dst], %[dst], 4 \n\t"
+ "1: \n\t"
+ ".set pop \n\t"
+ : [dst]"+r"(dst), [src]"+r"(src), [count]"+r"(count),
+ [t16]"=&r"(t16), [t0]"=&r"(t0), [t1]"=&r"(t1), [t2]"=&r"(t2),
+ [t3]"=&r"(t3), [t4]"=&r"(t4), [t5]"=&r"(t5), [t6]"=&r"(t6),
+ [t7]"=&r"(t7), [t8]"=&r"(t8)
+ : [add_x10]"r"(add_x10), [add_x20]"r"(add_x20), [sa]"r"(sa)
+ : "memory", "hi", "lo"
+ );
+
+ if (count == 1) {
+ SkPMColor c = *src++;
+ SkPMColorAssert(c);
+ if (c) {
+ *dst = SkSrcOver32To16(c, *dst);
+ }
+ dst += 1;
+ }
+}
+
+static void S32A_D565_Blend_mips_dsp(uint16_t* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src, int count,
+ U8CPU alpha, int /*x*/, int /*y*/) {
+ uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9;
+ uint32_t s0, s1, s2, s3;
+ unsigned dst_scale = 0;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "replv.qb %[t0], %[alpha] \n\t"
+ "repl.ph %[t6], 0x80 \n\t"
+ "repl.ph %[t7], 0xFF \n\t"
+ "1: \n\t"
+ "addiu %[t8], %[count], -1 \n\t"
+ "blez %[t8], 2f \n\t"
+ " nop \n\t"
+ "lw %[t8], 0(%[src]) \n\t"
+ "lw %[t9], 4(%[src]) \n\t"
+ "lh %[t4], 0(%[dst]) \n\t"
+ "lh %[t5], 2(%[dst]) \n\t"
+ "sll %[t5], %[t5], 16 \n\t"
+ "sll %[t2], %[t8], 8 \n\t"
+ "sll %[t3], %[t9], 8 \n\t"
+ "precrq.qb.ph %[t1], %[t2], %[t3] \n\t"
+ "precrq.qb.ph %[t3], %[t8], %[t9] \n\t"
+ "preceu.ph.qbla %[t8], %[t3] \n\t"
+ "muleu_s.ph.qbr %[s3], %[t0], %[t8] \n\t"
+ "preceu.ph.qbla %[t2], %[t1] \n\t"
+ "preceu.ph.qbra %[t1], %[t1] \n\t"
+ "preceu.ph.qbra %[t3], %[t3] \n\t"
+ "packrl.ph %[t9], %[t4], %[t5] \n\t"
+ "shra.ph %[s0], %[t9], 11 \n\t"
+ "and %[s0], %[s0], 0x1F001F \n\t"
+ "shra.ph %[s1], %[t9], 5 \n\t"
+ "and %[s1], %[s1], 0x3F003F \n\t"
+ "and %[s2], %[t9], 0x1F001F \n\t"
+ "addq.ph %[s3], %[s3], %[t6] \n\t"
+ "shra.ph %[t5], %[s3], 8 \n\t"
+ "and %[t5], %[t5], 0xFF00FF \n\t"
+ "addq.ph %[dst_scale], %[s3], %[t5] \n\t"
+ "shra.ph %[dst_scale], %[dst_scale], 8 \n\t"
+ "subq_s.ph %[dst_scale], %[t7], %[dst_scale] \n\t"
+ "sll %[dst_scale], %[dst_scale], 8 \n\t"
+ "precrq.qb.ph %[dst_scale], %[dst_scale], %[dst_scale] \n\t"
+ "shrl.qb %[t1], %[t1], 3 \n\t"
+ "shrl.qb %[t2], %[t2], 3 \n\t"
+ "shrl.qb %[t3], %[t3], 2 \n\t"
+ "muleu_s.ph.qbl %[t1], %[t0], %[t1] \n\t"
+ "muleu_s.ph.qbl %[t2], %[t0], %[t2] \n\t"
+ "muleu_s.ph.qbl %[t3], %[t0], %[t3] \n\t"
+ "muleu_s.ph.qbl %[t8], %[dst_scale], %[s0] \n\t"
+ "muleu_s.ph.qbl %[t9], %[dst_scale], %[s2] \n\t"
+ "muleu_s.ph.qbl %[t4], %[dst_scale], %[s1] \n\t"
+ "addq.ph %[t1], %[t1], %[t8] \n\t"
+ "addq.ph %[t2], %[t2], %[t9] \n\t"
+ "addq.ph %[t3], %[t3], %[t4] \n\t"
+ "addq.ph %[t8], %[t1], %[t6] \n\t"
+ "addq.ph %[t9], %[t2], %[t6] \n\t"
+ "addq.ph %[t4], %[t3], %[t6] \n\t"
+ "shra.ph %[t1], %[t8], 8 \n\t"
+ "addq.ph %[t1], %[t1], %[t8] \n\t"
+ "preceu.ph.qbla %[t1], %[t1] \n\t"
+ "shra.ph %[t2], %[t9], 8 \n\t"
+ "addq.ph %[t2], %[t2], %[t9] \n\t"
+ "preceu.ph.qbla %[t2], %[t2] \n\t"
+ "shra.ph %[t3], %[t4], 8 \n\t"
+ "addq.ph %[t3], %[t3], %[t4] \n\t"
+ "preceu.ph.qbla %[t3], %[t3] \n\t"
+ "shll.ph %[t8], %[t1], 11 \n\t"
+ "shll.ph %[t9], %[t3], 5 \n\t"
+ "or %[t8], %[t8], %[t9] \n\t"
+ "or %[s0], %[t8], %[t2] \n\t"
+ "srl %[t8], %[s0], 16 \n\t"
+ "and %[t9], %[s0], 0xFFFF \n\t"
+ "sh %[t8], 0(%[dst]) \n\t"
+ "sh %[t9], 2(%[dst]) \n\t"
+ "addiu %[src], %[src], 8 \n\t"
+ "addiu %[count], %[count], -2 \n\t"
+ "b 1b \n\t"
+ " addiu %[dst], %[dst], 4 \n\t"
+ "2: \n\t"
+ ".set pop \n\t"
+ : [src]"+r"(src), [dst]"+r"(dst), [count]"+r"(count),
+ [dst_scale]"+r"(dst_scale), [s0]"=&r"(s0), [s1]"=&r"(s1),
+ [s2]"=&r"(s2), [s3]"=&r"(s3), [t0]"=&r"(t0), [t1]"=&r"(t1),
+ [t2]"=&r"(t2), [t3]"=&r"(t3), [t4]"=&r"(t4), [t5]"=&r"(t5),
+ [t6]"=&r"(t6), [t7]"=&r"(t7), [t8]"=&r"(t8), [t9]"=&r"(t9)
+ : [alpha]"r"(alpha)
+ : "memory", "hi", "lo"
+ );
+
+ if (count == 1) {
+ SkPMColor sc = *src++;
+ SkPMColorAssert(sc);
+ if (sc) {
+ uint16_t dc = *dst;
+ unsigned dst_scale = 255 - SkMulDiv255Round(SkGetPackedA32(sc), alpha);
+ unsigned dr = (SkPacked32ToR16(sc) * alpha) + (SkGetPackedR16(dc) * dst_scale);
+ unsigned dg = (SkPacked32ToG16(sc) * alpha) + (SkGetPackedG16(dc) * dst_scale);
+ unsigned db = (SkPacked32ToB16(sc) * alpha) + (SkGetPackedB16(dc) * dst_scale);
+ *dst = SkPackRGB16(SkDiv255Round(dr), SkDiv255Round(dg), SkDiv255Round(db));
+ }
+ dst += 1;
+ }
+}
+
+static void S32_Blend_BlitRow32_mips_dsp(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha) {
+ int32_t t0, t1, t2, t3, t4, t5, t6, t7;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "li %[t2], 0x100 \n\t"
+ "addiu %[t0], %[alpha], 1 \n\t"
+ "subu %[t1], %[t2], %[t0] \n\t"
+ "replv.qb %[t7], %[t0] \n\t"
+ "replv.qb %[t6], %[t1] \n\t"
+ "1: \n\t"
+ "blez %[count], 2f \n\t"
+ "lw %[t0], 0(%[src]) \n\t"
+ "lw %[t1], 0(%[dst]) \n\t"
+ "preceu.ph.qbr %[t2], %[t0] \n\t"
+ "preceu.ph.qbl %[t3], %[t0] \n\t"
+ "preceu.ph.qbr %[t4], %[t1] \n\t"
+ "preceu.ph.qbl %[t5], %[t1] \n\t"
+ "muleu_s.ph.qbr %[t2], %[t7], %[t2] \n\t"
+ "muleu_s.ph.qbr %[t3], %[t7], %[t3] \n\t"
+ "muleu_s.ph.qbr %[t4], %[t6], %[t4] \n\t"
+ "muleu_s.ph.qbr %[t5], %[t6], %[t5] \n\t"
+ "addiu %[src], %[src], 4 \n\t"
+ "addiu %[count], %[count], -1 \n\t"
+#ifdef SK_SUPPORT_LEGACY_BROKEN_LERP
+ "precrq.qb.ph %[t0], %[t3], %[t2] \n\t"
+ "precrq.qb.ph %[t2], %[t5], %[t4] \n\t"
+ "addu %[t1], %[t0], %[t2] \n\t"
+#else
+ "addu %[t0], %[t3], %[t5] \n\t"
+ "addu %[t2], %[t2], %[t4] \n\t"
+ "precrq.qb.ph %[t1], %[t0], %[t2] \n\t"
+#endif
+ "sw %[t1], 0(%[dst]) \n\t"
+ "b 1b \n\t"
+ " addi %[dst], %[dst], 4 \n\t"
+ "2: \n\t"
+ ".set pop \n\t"
+ : [src]"+r"(src), [dst]"+r"(dst), [count]"+r"(count),
+ [t0]"=&r"(t0), [t1]"=&r"(t1), [t2]"=&r"(t2), [t3]"=&r"(t3),
+ [t4]"=&r"(t4), [t5]"=&r"(t5), [t6]"=&r"(t6), [t7]"=&r"(t7)
+ : [alpha]"r"(alpha)
+ : "memory", "hi", "lo"
+ );
+}
+
+void blitmask_d565_opaque_mips(int width, int height, uint16_t* device,
+ unsigned deviceRB, const uint8_t* alpha,
+ uint32_t expanded32, unsigned maskRB) {
+ uint32_t s0, s1, s2, s3;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ ".set noat \n\t"
+ "li $t9, 0x7E0F81F \n\t"
+ "1: \n\t"
+ "move $t8, %[width] \n\t"
+ "addiu %[height], %[height], -1 \n\t"
+ "2: \n\t"
+ "beqz $t8, 4f \n\t"
+ " addiu $t0, $t8, -4 \n\t"
+ "bltz $t0, 3f \n\t"
+ " nop \n\t"
+ "addiu $t8, $t8, -4 \n\t"
+ "lhu $t0, 0(%[device]) \n\t"
+ "lhu $t1, 2(%[device]) \n\t"
+ "lhu $t2, 4(%[device]) \n\t"
+ "lhu $t3, 6(%[device]) \n\t"
+ "lbu $t4, 0(%[alpha]) \n\t"
+ "lbu $t5, 1(%[alpha]) \n\t"
+ "lbu $t6, 2(%[alpha]) \n\t"
+ "lbu $t7, 3(%[alpha]) \n\t"
+ "replv.ph $t0, $t0 \n\t"
+ "replv.ph $t1, $t1 \n\t"
+ "replv.ph $t2, $t2 \n\t"
+ "replv.ph $t3, $t3 \n\t"
+ "addiu %[s0], $t4, 1 \n\t"
+ "addiu %[s1], $t5, 1 \n\t"
+ "addiu %[s2], $t6, 1 \n\t"
+ "addiu %[s3], $t7, 1 \n\t"
+ "srl %[s0], %[s0], 3 \n\t"
+ "srl %[s1], %[s1], 3 \n\t"
+ "srl %[s2], %[s2], 3 \n\t"
+ "srl %[s3], %[s3], 3 \n\t"
+ "and $t0, $t0, $t9 \n\t"
+ "and $t1, $t1, $t9 \n\t"
+ "and $t2, $t2, $t9 \n\t"
+ "and $t3, $t3, $t9 \n\t"
+ "subu $t4, %[expanded32], $t0 \n\t"
+ "subu $t5, %[expanded32], $t1 \n\t"
+ "subu $t6, %[expanded32], $t2 \n\t"
+ "subu $t7, %[expanded32], $t3 \n\t"
+ "mul $t4, $t4, %[s0] \n\t"
+ "mul $t5, $t5, %[s1] \n\t"
+ "mul $t6, $t6, %[s2] \n\t"
+ "mul $t7, $t7, %[s3] \n\t"
+ "addiu %[alpha], %[alpha], 4 \n\t"
+ "srl $t4, $t4, 5 \n\t"
+ "srl $t5, $t5, 5 \n\t"
+ "srl $t6, $t6, 5 \n\t"
+ "srl $t7, $t7, 5 \n\t"
+ "addu $t4, $t0, $t4 \n\t"
+ "addu $t5, $t1, $t5 \n\t"
+ "addu $t6, $t2, $t6 \n\t"
+ "addu $t7, $t3, $t7 \n\t"
+ "and $t4, $t4, $t9 \n\t"
+ "and $t5, $t5, $t9 \n\t"
+ "and $t6, $t6, $t9 \n\t"
+ "and $t7, $t7, $t9 \n\t"
+ "srl $t0, $t4, 16 \n\t"
+ "srl $t1, $t5, 16 \n\t"
+ "srl $t2, $t6, 16 \n\t"
+ "srl $t3, $t7, 16 \n\t"
+ "or %[s0], $t0, $t4 \n\t"
+ "or %[s1], $t1, $t5 \n\t"
+ "or %[s2], $t2, $t6 \n\t"
+ "or %[s3], $t3, $t7 \n\t"
+ "sh %[s0], 0(%[device]) \n\t"
+ "sh %[s1], 2(%[device]) \n\t"
+ "sh %[s2], 4(%[device]) \n\t"
+ "sh %[s3], 6(%[device]) \n\t"
+ "b 2b \n\t"
+ " addiu %[device], %[device], 8 \n\t"
+ "3: \n\t"
+ "lhu $t0, 0(%[device]) \n\t"
+ "lbu $t1, 0(%[alpha]) \n\t"
+ "addiu $t8, $t8, -1 \n\t"
+ "replv.ph $t2, $t0 \n\t"
+ "and $t2, $t2, $t9 \n\t"
+ "addiu $t0, $t1, 1 \n\t"
+ "srl $t0, $t0, 3 \n\t"
+ "subu $t3, %[expanded32], $t2 \n\t"
+ "mul $t3, $t3, $t0 \n\t"
+ "addiu %[alpha], %[alpha], 1 \n\t"
+ "srl $t3, $t3, 5 \n\t"
+ "addu $t3, $t2, $t3 \n\t"
+ "and $t3, $t3, $t9 \n\t"
+ "srl $t4, $t3, 16 \n\t"
+ "or %[s0], $t4, $t3 \n\t"
+ "sh %[s0], 0(%[device]) \n\t"
+ "bnez $t8, 3b \n\t"
+ "addiu %[device], %[device], 2 \n\t"
+ "4: \n\t"
+ "addu %[device], %[device], %[deviceRB] \n\t"
+ "bgtz %[height], 1b \n\t"
+ " addu %[alpha], %[alpha], %[maskRB] \n\t"
+ ".set pop \n\t"
+ : [height]"+r"(height), [alpha]"+r"(alpha), [device]"+r"(device),
+ [deviceRB]"+r"(deviceRB), [maskRB]"+r"(maskRB), [s0]"=&r"(s0),
+ [s1]"=&r"(s1), [s2]"=&r"(s2), [s3]"=&r"(s3)
+ : [expanded32] "r" (expanded32), [width] "r" (width)
+ : "memory", "hi", "lo", "t0", "t1", "t2", "t3",
+ "t4", "t5", "t6", "t7", "t8", "t9"
+ );
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+const SkBlitRow::Proc16 platform_565_procs_mips_dsp[] = {
+ // no dither
+ nullptr,
+ S32_D565_Blend_mips_dsp,
+ S32A_D565_Opaque_mips_dsp,
+ S32A_D565_Blend_mips_dsp,
+
+ // dither
+ S32_D565_Opaque_Dither_mips_dsp,
+ S32_D565_Blend_Dither_mips_dsp,
+ S32A_D565_Opaque_Dither_mips_dsp,
+ nullptr,
+};
+
+static const SkBlitRow::Proc32 platform_32_procs_mips_dsp[] = {
+ nullptr, // S32_Opaque,
+ S32_Blend_BlitRow32_mips_dsp, // S32_Blend,
+ nullptr, // S32A_Opaque,
+ nullptr, // S32A_Blend,
+};
+
+SkBlitRow::Proc16 SkBlitRow::PlatformFactory565(unsigned flags) {
+ return platform_565_procs_mips_dsp[flags];
+}
+
+SkBlitRow::ColorProc16 SkBlitRow::PlatformColorFactory565(unsigned flags) {
+ return nullptr;
+}
+
+SkBlitRow::Proc32 SkBlitRow::PlatformProcs32(unsigned flags) {
+ return platform_32_procs_mips_dsp[flags];
+}
diff --git a/gfx/skia/skia/src/opts/SkBlitRow_opts_none.cpp b/gfx/skia/skia/src/opts/SkBlitRow_opts_none.cpp
new file mode 100644
index 000000000..a9abe0658
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBlitRow_opts_none.cpp
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBlitRow.h"
+
+// Platform impl of Platform_procs with no overrides
+
+SkBlitRow::Proc16 SkBlitRow::PlatformFactory565(unsigned flags) {
+ return nullptr;
+}
+
+SkBlitRow::ColorProc16 SkBlitRow::PlatformColorFactory565(unsigned flags) {
+ return nullptr;
+}
+
+SkBlitRow::Proc32 SkBlitRow::PlatformProcs32(unsigned flags) {
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/opts/SkBlurImageFilter_opts.h b/gfx/skia/skia/src/opts/SkBlurImageFilter_opts.h
new file mode 100644
index 000000000..497bcde33
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBlurImageFilter_opts.h
@@ -0,0 +1,323 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlurImageFilter_opts_DEFINED
+#define SkBlurImageFilter_opts_DEFINED
+
+#include "SkColorPriv.h"
+#include "SkTypes.h"
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include <immintrin.h>
+#endif
+
+namespace SK_OPTS_NS {
+
+enum class BlurDirection { kX, kY };
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+// ARGB -> 000A 000R 000G 000B
+static inline __m128i expand(SkPMColor p) {
+ return _mm_cvtepu8_epi32(_mm_cvtsi32_si128(p));
+};
+// Axxx Rxxx Gxxx Bxxx -> ARGB
+static inline SkPMColor repack(__m128i p) {
+ const char _ = ~0; // Don't care what ends up in these bytes. This zeros them.
+ p = _mm_shuffle_epi8(p, _mm_set_epi8(_,_,_,_, _,_,_,_, _,_,_,_, 15,11,7,3));
+ return _mm_cvtsi128_si32(p);
+};
+#define mullo_epi32 _mm_mullo_epi32
+
+#else
+// ARGB -> 000A 000R 000G 000B
+static inline __m128i expand(int p) {
+ auto result = _mm_cvtsi32_si128(p);
+ result = _mm_unpacklo_epi8(result, _mm_setzero_si128());
+ result = _mm_unpacklo_epi16(result, _mm_setzero_si128());
+ return result;
+};
+// Axxx Rxxx Gxxx Bxxx -> ARGB
+static inline SkPMColor repack(__m128i p) {
+ p = _mm_srli_epi32(p, 24); // 000A 000R 000G 000B
+ p = _mm_packs_epi32(p, p); // xxxx xxxx 0A0R 0G0B
+ p = _mm_packus_epi16(p, p); // xxxx xxxx xxxx ARGB
+ return _mm_cvtsi128_si32(p);
+};
+
+// _mm_mullo_epi32 is not available, so use the standard trick to emulate it.
+static inline __m128i mullo_epi32(__m128i a, __m128i b) {
+ __m128i p02 = _mm_mul_epu32(a, b),
+ p13 = _mm_mul_epu32(_mm_srli_si128(a, 4),
+ _mm_srli_si128(b, 4));
+ return _mm_unpacklo_epi32(_mm_shuffle_epi32(p02, _MM_SHUFFLE(0,0,2,0)),
+ _mm_shuffle_epi32(p13, _MM_SHUFFLE(0,0,2,0)));
+};
+#endif
+#define INIT_SCALE const __m128i scale = _mm_set1_epi32((1 << 24) / kernelSize);
+#define INIT_HALF const __m128i half = _mm_set1_epi32(1 << 23);
+#define INIT_SUMS __m128i sum = _mm_setzero_si128();
+#define INCREMENT_SUMS(c) sum = _mm_add_epi32(sum, expand(c))
+#define DECREMENT_SUMS(c) sum = _mm_sub_epi32(sum, expand(c))
+#define STORE_SUMS \
+ auto result = mullo_epi32(sum, scale); \
+ result = _mm_add_epi32(result, half); \
+ *dptr = repack(result);
+#define DOUBLE_ROW_OPTIMIZATION
+
+#elif defined(SK_ARM_HAS_NEON)
+
+// val = (sum * scale * 2 + 0x8000) >> 16
+#define STORE_SUMS_DOUBLE \
+ uint16x8_t resultPixels = vreinterpretq_u16_s16(vqrdmulhq_s16( \
+ vreinterpretq_s16_u16(sum), vreinterpretq_s16_u16(scale))); \
+ if (dstDirection == BlurDirection::kX) { \
+ uint32x2_t px2 = vreinterpret_u32_u8(vmovn_u16(resultPixels)); \
+ vst1_lane_u32(dptr + 0, px2, 0); \
+ vst1_lane_u32(dptr + width, px2, 1); \
+ } else { \
+ vst1_u8((uint8_t*)dptr, vmovn_u16(resultPixels)); \
+ }
+
+#define INCREMENT_SUMS_DOUBLE(p) sum = vaddw_u8(sum, load_2_pixels(p))
+#define DECREMENT_SUMS_DOUBLE(p) sum = vsubw_u8(sum, load_2_pixels(p))
+
+// Fast path for kernel sizes between 2 and 127, working on two rows at a time.
+template<BlurDirection srcDirection, BlurDirection dstDirection>
+static int box_blur_double(const SkPMColor** src, int srcStride, const SkIRect& srcBounds,
+ SkPMColor** dst, int kernelSize,
+ int leftOffset, int rightOffset, int width, int height) {
+ // Load 2 pixels from adjacent rows.
+ auto load_2_pixels = [&](const SkPMColor* s) {
+ if (srcDirection == BlurDirection::kX) {
+ // 10% faster by adding these 2 prefetches
+ SK_PREFETCH(s + 16);
+ SK_PREFETCH(s + 16 + srcStride);
+ auto one = vld1_lane_u32(s + 0, vdup_n_u32(0), 0),
+ two = vld1_lane_u32(s + srcStride, one, 1);
+ return vreinterpret_u8_u32(two);
+ } else {
+ return vld1_u8((uint8_t*)s);
+ }
+ };
+ int left = srcBounds.left();
+ int right = srcBounds.right();
+ int top = srcBounds.top();
+ int bottom = srcBounds.bottom();
+ int incrementStart = SkMax32(left - rightOffset - 1, left - right);
+ int incrementEnd = SkMax32(right - rightOffset - 1, 0);
+ int decrementStart = SkMin32(left + leftOffset, width);
+ int decrementEnd = SkMin32(right + leftOffset, width);
+ const int srcStrideX = srcDirection == BlurDirection::kX ? 1 : srcStride;
+ const int dstStrideX = dstDirection == BlurDirection::kX ? 1 : height;
+ const int srcStrideY = srcDirection == BlurDirection::kX ? srcStride : 1;
+ const int dstStrideY = dstDirection == BlurDirection::kX ? width : 1;
+ const uint16x8_t scale = vdupq_n_u16((1 << 15) / kernelSize);
+
+ for (; bottom - top >= 2; top += 2) {
+ uint16x8_t sum = vdupq_n_u16(0);
+ const SkPMColor* lptr = *src;
+ const SkPMColor* rptr = *src;
+ SkPMColor* dptr = *dst;
+ int x;
+ for (x = incrementStart; x < 0; ++x) {
+ INCREMENT_SUMS_DOUBLE(rptr);
+ rptr += srcStrideX;
+ }
+ // Clear to zero when sampling to the left our domain. "sum" is zero here because we
+ // initialized it above, and the preceeding loop has no effect in this case.
+ for (x = 0; x < incrementStart; ++x) {
+ STORE_SUMS_DOUBLE
+ dptr += dstStrideX;
+ }
+ for (; x < decrementStart && x < incrementEnd; ++x) {
+ STORE_SUMS_DOUBLE
+ dptr += dstStrideX;
+ INCREMENT_SUMS_DOUBLE(rptr);
+ rptr += srcStrideX;
+ }
+ for (x = decrementStart; x < incrementEnd; ++x) {
+ STORE_SUMS_DOUBLE
+ dptr += dstStrideX;
+ INCREMENT_SUMS_DOUBLE(rptr);
+ rptr += srcStrideX;
+ DECREMENT_SUMS_DOUBLE(lptr);
+ lptr += srcStrideX;
+ }
+ for (x = incrementEnd; x < decrementStart; ++x) {
+ STORE_SUMS_DOUBLE
+ dptr += dstStrideX;
+ }
+ for (; x < decrementEnd; ++x) {
+ STORE_SUMS_DOUBLE
+ dptr += dstStrideX;
+ DECREMENT_SUMS_DOUBLE(lptr);
+ lptr += srcStrideX;
+ }
+ // Clear to zero when sampling to the right of our domain. "sum" is zero here because we
+ // added on then subtracted off all of the pixels, leaving zero.
+ for (; x < width; ++x) {
+ STORE_SUMS_DOUBLE
+ dptr += dstStrideX;
+ }
+ *src += srcStrideY * 2;
+ *dst += dstStrideY * 2;
+ }
+ return top;
+}
+
+// ARGB -> 0A0R 0G0B
+static inline uint16x4_t expand(SkPMColor p) {
+ return vget_low_u16(vmovl_u8(vreinterpret_u8_u32(vdup_n_u32(p))));
+};
+
+#define INIT_SCALE const uint32x4_t scale = vdupq_n_u32((1 << 24) / kernelSize);
+#define INIT_HALF const uint32x4_t half = vdupq_n_u32(1 << 23);
+#define INIT_SUMS uint32x4_t sum = vdupq_n_u32(0);
+#define INCREMENT_SUMS(c) sum = vaddw_u16(sum, expand(c));
+#define DECREMENT_SUMS(c) sum = vsubw_u16(sum, expand(c));
+
+#define STORE_SUMS \
+ uint32x4_t result = vmlaq_u32(half, sum, scale); \
+ uint16x4_t result16 = vqshrn_n_u32(result, 16); \
+ uint8x8_t result8 = vqshrn_n_u16(vcombine_u16(result16, result16), 8); \
+ vst1_lane_u32(dptr, vreinterpret_u32_u8(result8), 0);
+
+#define DOUBLE_ROW_OPTIMIZATION \
+ if (1 < kernelSize && kernelSize < 128) { \
+ top = box_blur_double<srcDirection, dstDirection>(&src, srcStride, srcBounds, &dst, \
+ kernelSize, leftOffset, rightOffset, \
+ width, height); \
+ }
+
+#else // Neither NEON nor >=SSE2.
+
+#define INIT_SCALE uint32_t scale = (1 << 24) / kernelSize;
+#define INIT_HALF uint32_t half = 1 << 23;
+#define INIT_SUMS int sumA = 0, sumR = 0, sumG = 0, sumB = 0;
+#define INCREMENT_SUMS(c) \
+ sumA += SkGetPackedA32(c); \
+ sumR += SkGetPackedR32(c); \
+ sumG += SkGetPackedG32(c); \
+ sumB += SkGetPackedB32(c)
+#define DECREMENT_SUMS(c) \
+ sumA -= SkGetPackedA32(c); \
+ sumR -= SkGetPackedR32(c); \
+ sumG -= SkGetPackedG32(c); \
+ sumB -= SkGetPackedB32(c)
+#define STORE_SUMS \
+ *dptr = SkPackARGB32((sumA * scale + half) >> 24, \
+ (sumR * scale + half) >> 24, \
+ (sumG * scale + half) >> 24, \
+ (sumB * scale + half) >> 24);
+#define DOUBLE_ROW_OPTIMIZATION
+
+#endif
+
+#define PREFETCH_RPTR \
+ if (srcDirection == BlurDirection::kY) { \
+ SK_PREFETCH(rptr); \
+ }
+
+template<BlurDirection srcDirection, BlurDirection dstDirection>
+static void box_blur(const SkPMColor* src, int srcStride, const SkIRect& srcBounds, SkPMColor* dst,
+ int kernelSize, int leftOffset, int rightOffset, int width, int height) {
+ int left = srcBounds.left();
+ int right = srcBounds.right();
+ int top = srcBounds.top();
+ int bottom = srcBounds.bottom();
+ int incrementStart = SkMax32(left - rightOffset - 1, left - right);
+ int incrementEnd = SkMax32(right - rightOffset - 1, 0);
+ int decrementStart = SkMin32(left + leftOffset, width);
+ int decrementEnd = SkMin32(right + leftOffset, width);
+ int srcStrideX = srcDirection == BlurDirection::kX ? 1 : srcStride;
+ int dstStrideX = dstDirection == BlurDirection::kX ? 1 : height;
+ int srcStrideY = srcDirection == BlurDirection::kX ? srcStride : 1;
+ int dstStrideY = dstDirection == BlurDirection::kX ? width : 1;
+ INIT_SCALE
+ INIT_HALF
+
+ // Clear to zero when sampling above our domain.
+ for (int y = 0; y < top; y++) {
+ SkColor* dptr = dst;
+ for (int x = 0; x < width; ++x) {
+ *dptr = 0;
+ dptr += dstStrideX;
+ }
+ dst += dstStrideY;
+ }
+
+ DOUBLE_ROW_OPTIMIZATION
+
+ for (int y = top; y < bottom; ++y) {
+ INIT_SUMS
+ const SkPMColor* lptr = src;
+ const SkPMColor* rptr = src;
+ SkColor* dptr = dst;
+ int x;
+ for (x = incrementStart; x < 0; ++x) {
+ INCREMENT_SUMS(*rptr);
+ rptr += srcStrideX;
+ PREFETCH_RPTR
+ }
+ // Clear to zero when sampling to the left of our domain.
+ for (x = 0; x < incrementStart; ++x) {
+ *dptr = 0;
+ dptr += dstStrideX;
+ }
+ for (; x < decrementStart && x < incrementEnd; ++x) {
+ STORE_SUMS
+ dptr += dstStrideX;
+ INCREMENT_SUMS(*rptr);
+ rptr += srcStrideX;
+ PREFETCH_RPTR
+ }
+ for (x = decrementStart; x < incrementEnd; ++x) {
+ STORE_SUMS
+ dptr += dstStrideX;
+ INCREMENT_SUMS(*rptr);
+ rptr += srcStrideX;
+ PREFETCH_RPTR
+ DECREMENT_SUMS(*lptr);
+ lptr += srcStrideX;
+ }
+ for (x = incrementEnd; x < decrementStart; ++x) {
+ STORE_SUMS
+ dptr += dstStrideX;
+ }
+ for (; x < decrementEnd; ++x) {
+ STORE_SUMS
+ dptr += dstStrideX;
+ DECREMENT_SUMS(*lptr);
+ lptr += srcStrideX;
+ }
+ // Clear to zero when sampling to the right of our domain.
+ for (; x < width; ++x) {
+ *dptr = 0;
+ dptr += dstStrideX;
+ }
+ src += srcStrideY;
+ dst += dstStrideY;
+ }
+ // Clear to zero when sampling below our domain.
+ for (int y = bottom; y < height; ++y) {
+ SkColor* dptr = dst;
+ for (int x = 0; x < width; ++x) {
+ *dptr = 0;
+ dptr += dstStrideX;
+ }
+ dst += dstStrideY;
+ }
+}
+
+static auto box_blur_xx = &box_blur<BlurDirection::kX, BlurDirection::kX>,
+ box_blur_xy = &box_blur<BlurDirection::kX, BlurDirection::kY>,
+ box_blur_yx = &box_blur<BlurDirection::kY, BlurDirection::kX>;
+
+} // namespace SK_OPTS_NS
+
+#endif
diff --git a/gfx/skia/skia/src/opts/SkChecksum_opts.h b/gfx/skia/skia/src/opts/SkChecksum_opts.h
new file mode 100644
index 000000000..3e1acf08d
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkChecksum_opts.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkChecksum_opts_DEFINED
+#define SkChecksum_opts_DEFINED
+
+#include "SkChecksum.h"
+#include "SkTypes.h"
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE42
+ #include <immintrin.h>
+#elif defined(SK_CPU_ARM64) && defined(SK_ARM_HAS_CRC32)
+ #include <arm_acle.h>
+#endif
+
+namespace SK_OPTS_NS {
+
+template <typename T>
+static inline T unaligned_load(const uint8_t* src) {
+ T val;
+ memcpy(&val, src, sizeof(val));
+ return val;
+}
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE42 && (defined(__x86_64__) || defined(_M_X64))
+ // This is not a CRC32. It's Just A Hash that uses those instructions because they're fast.
+ static uint32_t hash_fn(const void* vdata, size_t bytes, uint32_t seed) {
+ auto data = (const uint8_t*)vdata;
+
+ // _mm_crc32_u64() operates on 64-bit registers, so we use uint64_t for a while.
+ uint64_t hash = seed;
+ if (bytes >= 24) {
+ // We'll create 3 independent hashes, each using _mm_crc32_u64()
+ // to hash 8 bytes per step. Both 3 and independent are important:
+ // we can execute 3 of these instructions in parallel on a single core.
+ uint64_t a = hash,
+ b = hash,
+ c = hash;
+ size_t steps = bytes/24;
+ while (steps --> 0) {
+ a = _mm_crc32_u64(a, unaligned_load<uint64_t>(data+ 0));
+ b = _mm_crc32_u64(b, unaligned_load<uint64_t>(data+ 8));
+ c = _mm_crc32_u64(c, unaligned_load<uint64_t>(data+16));
+ data += 24;
+ }
+ bytes %= 24;
+ hash = a^b^c;
+ }
+
+ SkASSERT(bytes < 24);
+ if (bytes >= 16) {
+ hash = _mm_crc32_u64(hash, unaligned_load<uint64_t>(data));
+ bytes -= 8;
+ data += 8;
+ }
+
+ SkASSERT(bytes < 16);
+ if (bytes & 8) {
+ hash = _mm_crc32_u64(hash, unaligned_load<uint64_t>(data));
+ data += 8;
+ }
+
+ // The remainder of these _mm_crc32_u*() operate on a 32-bit register.
+ // We don't lose anything here: only the bottom 32-bits were populated.
+ auto hash32 = (uint32_t)hash;
+
+ if (bytes & 4) {
+ hash32 = _mm_crc32_u32(hash32, unaligned_load<uint32_t>(data));
+ data += 4;
+ }
+ if (bytes & 2) {
+ hash32 = _mm_crc32_u16(hash32, unaligned_load<uint16_t>(data));
+ data += 2;
+ }
+ if (bytes & 1) {
+ hash32 = _mm_crc32_u8(hash32, unaligned_load<uint8_t>(data));
+ }
+ return hash32;
+ }
+
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE42
+ // 32-bit version of above, using _mm_crc32_u32() but not _mm_crc32_u64().
+ static uint32_t hash_fn(const void* vdata, size_t bytes, uint32_t hash) {
+ auto data = (const uint8_t*)vdata;
+
+ if (bytes >= 12) {
+ // We'll create 3 independent hashes, each using _mm_crc32_u32()
+ // to hash 4 bytes per step. Both 3 and independent are important:
+ // we can execute 3 of these instructions in parallel on a single core.
+ uint32_t a = hash,
+ b = hash,
+ c = hash;
+ size_t steps = bytes/12;
+ while (steps --> 0) {
+ a = _mm_crc32_u32(a, unaligned_load<uint32_t>(data+0));
+ b = _mm_crc32_u32(b, unaligned_load<uint32_t>(data+4));
+ c = _mm_crc32_u32(c, unaligned_load<uint32_t>(data+8));
+ data += 12;
+ }
+ bytes %= 12;
+ hash = a^b^c;
+ }
+
+ SkASSERT(bytes < 12);
+ if (bytes >= 8) {
+ hash = _mm_crc32_u32(hash, unaligned_load<uint32_t>(data));
+ bytes -= 4;
+ data += 4;
+ }
+
+ SkASSERT(bytes < 8);
+ if (bytes & 4) {
+ hash = _mm_crc32_u32(hash, unaligned_load<uint32_t>(data));
+ data += 4;
+ }
+ if (bytes & 2) {
+ hash = _mm_crc32_u16(hash, unaligned_load<uint16_t>(data));
+ data += 2;
+ }
+ if (bytes & 1) {
+ hash = _mm_crc32_u8(hash, unaligned_load<uint8_t>(data));
+ }
+ return hash;
+ }
+
+#elif defined(SK_CPU_ARM64) && defined(SK_ARM_HAS_CRC32)
+ static uint32_t hash_fn(const void* vdata, size_t bytes, uint32_t hash) {
+ auto data = (const uint8_t*)vdata;
+ if (bytes >= 24) {
+ uint32_t a = hash,
+ b = hash,
+ c = hash;
+ size_t steps = bytes/24;
+ while (steps --> 0) {
+ a = __crc32d(a, unaligned_load<uint64_t>(data+ 0));
+ b = __crc32d(b, unaligned_load<uint64_t>(data+ 8));
+ c = __crc32d(c, unaligned_load<uint64_t>(data+16));
+ data += 24;
+ }
+ bytes %= 24;
+ hash = a^b^c;
+ }
+
+ SkASSERT(bytes < 24);
+ if (bytes >= 16) {
+ hash = __crc32d(hash, unaligned_load<uint64_t>(data));
+ bytes -= 8;
+ data += 8;
+ }
+
+ SkASSERT(bytes < 16);
+ if (bytes & 8) {
+ hash = __crc32d(hash, unaligned_load<uint64_t>(data));
+ data += 8;
+ }
+ if (bytes & 4) {
+ hash = __crc32w(hash, unaligned_load<uint32_t>(data));
+ data += 4;
+ }
+ if (bytes & 2) {
+ hash = __crc32h(hash, unaligned_load<uint16_t>(data));
+ data += 2;
+ }
+ if (bytes & 1) {
+ hash = __crc32b(hash, unaligned_load<uint8_t>(data));
+ }
+ return hash;
+ }
+
+#else
+ // This is Murmur3.
+ static uint32_t hash_fn(const void* vdata, size_t bytes, uint32_t hash) {
+ auto data = (const uint8_t*)vdata;
+
+ size_t original_bytes = bytes;
+
+ // Handle 4 bytes at a time while possible.
+ while (bytes >= 4) {
+ uint32_t k = unaligned_load<uint32_t>(data);
+ k *= 0xcc9e2d51;
+ k = (k << 15) | (k >> 17);
+ k *= 0x1b873593;
+
+ hash ^= k;
+ hash = (hash << 13) | (hash >> 19);
+ hash *= 5;
+ hash += 0xe6546b64;
+
+ bytes -= 4;
+ data += 4;
+ }
+
+ // Handle last 0-3 bytes.
+ uint32_t k = 0;
+ switch (bytes & 3) {
+ case 3: k ^= data[2] << 16;
+ case 2: k ^= data[1] << 8;
+ case 1: k ^= data[0] << 0;
+ k *= 0xcc9e2d51;
+ k = (k << 15) | (k >> 17);
+ k *= 0x1b873593;
+ hash ^= k;
+ }
+
+ hash ^= original_bytes;
+ return SkChecksum::Mix(hash);
+ }
+#endif
+
+} // namespace SK_OPTS_NS
+
+#endif//SkChecksum_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkColorCubeFilter_opts.h b/gfx/skia/skia/src/opts/SkColorCubeFilter_opts.h
new file mode 100644
index 000000000..12acd7803
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkColorCubeFilter_opts.h
@@ -0,0 +1,84 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SkColorCubeFilter_opts_DEFINED
+#define SkColorCubeFilter_opts_DEFINED
+
+#include "SkColor.h"
+#include "SkNx.h"
+#include "SkUnPreMultiply.h"
+
+namespace SK_OPTS_NS {
+
+static void color_cube_filter_span(const SkPMColor src[],
+ int count,
+ SkPMColor dst[],
+ const int* colorToIndex[2],
+ const SkScalar* colorToFactors[2],
+ int dim,
+ const SkColor* colorCube) {
+ uint8_t r, g, b, a;
+
+ for (int i = 0; i < count; ++i) {
+ const SkPMColor input = src[i];
+ a = input >> SK_A32_SHIFT;
+
+ if (a != 255) {
+ const SkColor source = SkUnPreMultiply::PMColorToColor(input);
+ r = SkColorGetR(source);
+ g = SkColorGetG(source);
+ b = SkColorGetB(source);
+ } else {
+ r = SkGetPackedR32(input);
+ g = SkGetPackedG32(input);
+ b = SkGetPackedB32(input);
+ }
+
+ const SkScalar g0 = colorToFactors[0][g],
+ g1 = colorToFactors[1][g],
+ b0 = colorToFactors[0][b],
+ b1 = colorToFactors[1][b];
+
+ const Sk4f g0b0(g0*b0),
+ g0b1(g0*b1),
+ g1b0(g1*b0),
+ g1b1(g1*b1);
+
+ const int i00 = (colorToIndex[0][g] + colorToIndex[0][b] * dim) * dim;
+ const int i01 = (colorToIndex[0][g] + colorToIndex[1][b] * dim) * dim;
+ const int i10 = (colorToIndex[1][g] + colorToIndex[0][b] * dim) * dim;
+ const int i11 = (colorToIndex[1][g] + colorToIndex[1][b] * dim) * dim;
+
+ Sk4f color(0.5f); // Starting from 0.5f gets us rounding for free.
+ for (int x = 0; x < 2; ++x) {
+ const int ix = colorToIndex[x][r];
+
+ const SkColor lutColor00 = colorCube[ix + i00];
+ const SkColor lutColor01 = colorCube[ix + i01];
+ const SkColor lutColor10 = colorCube[ix + i10];
+ const SkColor lutColor11 = colorCube[ix + i11];
+
+ Sk4f sum = SkNx_cast<float>(Sk4b::Load(&lutColor00)) * g0b0;
+ sum = sum + SkNx_cast<float>(Sk4b::Load(&lutColor01)) * g0b1;
+ sum = sum + SkNx_cast<float>(Sk4b::Load(&lutColor10)) * g1b0;
+ sum = sum + SkNx_cast<float>(Sk4b::Load(&lutColor11)) * g1b1;
+ color = color + sum * Sk4f((float)colorToFactors[x][r]);
+ }
+ if (a != 255) {
+ color = color * Sk4f(a * (1.0f/255));
+ }
+
+ // color is BGRA (SkColor order), dst is SkPMColor order, so may need to swap R+B.
+ #if defined(SK_PMCOLOR_IS_RGBA)
+ color = SkNx_shuffle<2,1,0,3>(color);
+ #endif
+ uint8_t* dstBytes = (uint8_t*)(dst+i);
+ SkNx_cast<uint8_t>(color).store(dstBytes);
+ dstBytes[SK_A32_SHIFT/8] = a;
+ }
+}
+
+} // namespace SK_OPTS NS
+
+#endif // SkColorCubeFilter_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkColor_opts_SSE2.h b/gfx/skia/skia/src/opts/SkColor_opts_SSE2.h
new file mode 100644
index 000000000..a3db88059
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkColor_opts_SSE2.h
@@ -0,0 +1,305 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColor_opts_SSE2_DEFINED
+#define SkColor_opts_SSE2_DEFINED
+
+#include <emmintrin.h>
+
+#define ASSERT_EQ(a,b) SkASSERT(0xffff == _mm_movemask_epi8(_mm_cmpeq_epi8((a), (b))))
+
+// Because no _mm_mul_epi32() in SSE2, we emulate it here.
+// Multiplies 4 32-bit integers from a by 4 32-bit intergers from b.
+// The 4 multiplication results should be represented within 32-bit
+// integers, otherwise they would be overflow.
+static inline __m128i Multiply32_SSE2(const __m128i& a, const __m128i& b) {
+ // Calculate results of a0 * b0 and a2 * b2.
+ __m128i r1 = _mm_mul_epu32(a, b);
+ // Calculate results of a1 * b1 and a3 * b3.
+ __m128i r2 = _mm_mul_epu32(_mm_srli_si128(a, 4), _mm_srli_si128(b, 4));
+ // Shuffle results to [63..0] and interleave the results.
+ __m128i r = _mm_unpacklo_epi32(_mm_shuffle_epi32(r1, _MM_SHUFFLE(0,0,2,0)),
+ _mm_shuffle_epi32(r2, _MM_SHUFFLE(0,0,2,0)));
+ return r;
+}
+
+static inline __m128i SkAlpha255To256_SSE2(const __m128i& alpha) {
+ return _mm_add_epi32(alpha, _mm_set1_epi32(1));
+}
+
+// See #define SkAlphaMulAlpha(a, b) SkMulDiv255Round(a, b) in SkXfermode.cpp.
+static inline __m128i SkAlphaMulAlpha_SSE2(const __m128i& a,
+ const __m128i& b) {
+ __m128i prod = _mm_mullo_epi16(a, b);
+ prod = _mm_add_epi32(prod, _mm_set1_epi32(128));
+ prod = _mm_add_epi32(prod, _mm_srli_epi32(prod, 8));
+ prod = _mm_srli_epi32(prod, 8);
+
+ return prod;
+}
+
+// Portable version SkAlphaMulQ is in SkColorPriv.h.
+static inline __m128i SkAlphaMulQ_SSE2(const __m128i& c, const __m128i& scale) {
+ const __m128i mask = _mm_set1_epi32(0xFF00FF);
+ __m128i s = _mm_or_si128(_mm_slli_epi32(scale, 16), scale);
+
+ // uint32_t rb = ((c & mask) * scale) >> 8
+ __m128i rb = _mm_and_si128(mask, c);
+ rb = _mm_mullo_epi16(rb, s);
+ rb = _mm_srli_epi16(rb, 8);
+
+ // uint32_t ag = ((c >> 8) & mask) * scale
+ __m128i ag = _mm_srli_epi16(c, 8);
+ ASSERT_EQ(ag, _mm_and_si128(mask, ag)); // ag = _mm_srli_epi16(c, 8) did this for us.
+ ag = _mm_mullo_epi16(ag, s);
+
+ // (rb & mask) | (ag & ~mask)
+ ASSERT_EQ(rb, _mm_and_si128(mask, rb)); // rb = _mm_srli_epi16(rb, 8) did this for us.
+ ag = _mm_andnot_si128(mask, ag);
+ return _mm_or_si128(rb, ag);
+}
+
+// Fast path for SkAlphaMulQ_SSE2 with a constant scale factor.
+static inline __m128i SkAlphaMulQ_SSE2(const __m128i& c, const unsigned scale) {
+ const __m128i mask = _mm_set1_epi32(0xFF00FF);
+ __m128i s = _mm_set1_epi16(scale << 8); // Move scale factor to upper byte of word.
+
+ // With mulhi, red and blue values are already in the right place and
+ // don't need to be divided by 256.
+ __m128i rb = _mm_and_si128(mask, c);
+ rb = _mm_mulhi_epu16(rb, s);
+
+ __m128i ag = _mm_andnot_si128(mask, c);
+ ag = _mm_mulhi_epu16(ag, s); // Alpha and green values are in the higher byte of each word.
+ ag = _mm_andnot_si128(mask, ag);
+
+ return _mm_or_si128(rb, ag);
+}
+
+// Portable version SkFastFourByteInterp256 is in SkColorPriv.h.
+static inline __m128i SkFastFourByteInterp256_SSE2(const __m128i& src, const __m128i& dst, const unsigned src_scale) {
+ // Computes dst + (((src - dst)*src_scale)>>8)
+ const __m128i mask = _mm_set1_epi32(0x00FF00FF);
+
+ // Unpack the 16x8-bit source into 2 8x16-bit splayed halves.
+ __m128i src_rb = _mm_and_si128(mask, src);
+ __m128i src_ag = _mm_srli_epi16(src, 8);
+ __m128i dst_rb = _mm_and_si128(mask, dst);
+ __m128i dst_ag = _mm_srli_epi16(dst, 8);
+
+ // Compute scaled differences.
+ __m128i diff_rb = _mm_sub_epi16(src_rb, dst_rb);
+ __m128i diff_ag = _mm_sub_epi16(src_ag, dst_ag);
+ __m128i s = _mm_set1_epi16(src_scale);
+ diff_rb = _mm_mullo_epi16(diff_rb, s);
+ diff_ag = _mm_mullo_epi16(diff_ag, s);
+
+ // Pack the differences back together.
+ diff_rb = _mm_srli_epi16(diff_rb, 8);
+ diff_ag = _mm_andnot_si128(mask, diff_ag);
+ __m128i diff = _mm_or_si128(diff_rb, diff_ag);
+
+ // Add difference to destination.
+ return _mm_add_epi8(dst, diff);
+}
+
+// Portable version SkPMLerp is in SkColorPriv.h
+static inline __m128i SkPMLerp_SSE2(const __m128i& src, const __m128i& dst, const unsigned scale) {
+#ifdef SK_SUPPORT_LEGACY_BROKEN_LERP
+ return _mm_add_epi8(SkAlphaMulQ_SSE2(src, scale), SkAlphaMulQ_SSE2(dst, 256 - scale));
+#else
+ return SkFastFourByteInterp256_SSE2(src, dst, scale);
+#endif
+}
+
+static inline __m128i SkGetPackedA32_SSE2(const __m128i& src) {
+#if SK_A32_SHIFT == 24 // It's very common (universal?) that alpha is the top byte.
+ return _mm_srli_epi32(src, 24); // You'd hope the compiler would remove the left shift then,
+#else // but I've seen Clang just do a dumb left shift of zero. :(
+ __m128i a = _mm_slli_epi32(src, (24 - SK_A32_SHIFT));
+ return _mm_srli_epi32(a, 24);
+#endif
+}
+
+static inline __m128i SkGetPackedR32_SSE2(const __m128i& src) {
+ __m128i r = _mm_slli_epi32(src, (24 - SK_R32_SHIFT));
+ return _mm_srli_epi32(r, 24);
+}
+
+static inline __m128i SkGetPackedG32_SSE2(const __m128i& src) {
+ __m128i g = _mm_slli_epi32(src, (24 - SK_G32_SHIFT));
+ return _mm_srli_epi32(g, 24);
+}
+
+static inline __m128i SkGetPackedB32_SSE2(const __m128i& src) {
+ __m128i b = _mm_slli_epi32(src, (24 - SK_B32_SHIFT));
+ return _mm_srli_epi32(b, 24);
+}
+
+static inline __m128i SkMul16ShiftRound_SSE2(const __m128i& a,
+ const __m128i& b, int shift) {
+ __m128i prod = _mm_mullo_epi16(a, b);
+ prod = _mm_add_epi16(prod, _mm_set1_epi16(1 << (shift - 1)));
+ prod = _mm_add_epi16(prod, _mm_srli_epi16(prod, shift));
+ prod = _mm_srli_epi16(prod, shift);
+
+ return prod;
+}
+
+static inline __m128i SkPackRGB16_SSE2(const __m128i& r,
+ const __m128i& g, const __m128i& b) {
+ __m128i dr = _mm_slli_epi16(r, SK_R16_SHIFT);
+ __m128i dg = _mm_slli_epi16(g, SK_G16_SHIFT);
+ __m128i db = _mm_slli_epi16(b, SK_B16_SHIFT);
+
+ __m128i c = _mm_or_si128(dr, dg);
+ return _mm_or_si128(c, db);
+}
+
+static inline __m128i SkPackARGB32_SSE2(const __m128i& a, const __m128i& r,
+ const __m128i& g, const __m128i& b) {
+ __m128i da = _mm_slli_epi32(a, SK_A32_SHIFT);
+ __m128i dr = _mm_slli_epi32(r, SK_R32_SHIFT);
+ __m128i dg = _mm_slli_epi32(g, SK_G32_SHIFT);
+ __m128i db = _mm_slli_epi32(b, SK_B32_SHIFT);
+
+ __m128i c = _mm_or_si128(da, dr);
+ c = _mm_or_si128(c, dg);
+ return _mm_or_si128(c, db);
+}
+
+static inline __m128i SkPacked16ToR32_SSE2(const __m128i& src) {
+ __m128i r = _mm_srli_epi32(src, SK_R16_SHIFT);
+ r = _mm_and_si128(r, _mm_set1_epi32(SK_R16_MASK));
+ r = _mm_or_si128(_mm_slli_epi32(r, (8 - SK_R16_BITS)),
+ _mm_srli_epi32(r, (2 * SK_R16_BITS - 8)));
+
+ return r;
+}
+
+static inline __m128i SkPacked16ToG32_SSE2(const __m128i& src) {
+ __m128i g = _mm_srli_epi32(src, SK_G16_SHIFT);
+ g = _mm_and_si128(g, _mm_set1_epi32(SK_G16_MASK));
+ g = _mm_or_si128(_mm_slli_epi32(g, (8 - SK_G16_BITS)),
+ _mm_srli_epi32(g, (2 * SK_G16_BITS - 8)));
+
+ return g;
+}
+
+static inline __m128i SkPacked16ToB32_SSE2(const __m128i& src) {
+ __m128i b = _mm_srli_epi32(src, SK_B16_SHIFT);
+ b = _mm_and_si128(b, _mm_set1_epi32(SK_B16_MASK));
+ b = _mm_or_si128(_mm_slli_epi32(b, (8 - SK_B16_BITS)),
+ _mm_srli_epi32(b, (2 * SK_B16_BITS - 8)));
+
+ return b;
+}
+
+static inline __m128i SkPixel16ToPixel32_SSE2(const __m128i& src) {
+ __m128i r = SkPacked16ToR32_SSE2(src);
+ __m128i g = SkPacked16ToG32_SSE2(src);
+ __m128i b = SkPacked16ToB32_SSE2(src);
+
+ return SkPackARGB32_SSE2(_mm_set1_epi32(0xFF), r, g, b);
+}
+
+static inline __m128i SkPixel32ToPixel16_ToU16_SSE2(const __m128i& src_pixel1,
+ const __m128i& src_pixel2) {
+ // Calculate result r.
+ __m128i r1 = _mm_srli_epi32(src_pixel1,
+ SK_R32_SHIFT + (8 - SK_R16_BITS));
+ r1 = _mm_and_si128(r1, _mm_set1_epi32(SK_R16_MASK));
+ __m128i r2 = _mm_srli_epi32(src_pixel2,
+ SK_R32_SHIFT + (8 - SK_R16_BITS));
+ r2 = _mm_and_si128(r2, _mm_set1_epi32(SK_R16_MASK));
+ __m128i r = _mm_packs_epi32(r1, r2);
+
+ // Calculate result g.
+ __m128i g1 = _mm_srli_epi32(src_pixel1,
+ SK_G32_SHIFT + (8 - SK_G16_BITS));
+ g1 = _mm_and_si128(g1, _mm_set1_epi32(SK_G16_MASK));
+ __m128i g2 = _mm_srli_epi32(src_pixel2,
+ SK_G32_SHIFT + (8 - SK_G16_BITS));
+ g2 = _mm_and_si128(g2, _mm_set1_epi32(SK_G16_MASK));
+ __m128i g = _mm_packs_epi32(g1, g2);
+
+ // Calculate result b.
+ __m128i b1 = _mm_srli_epi32(src_pixel1,
+ SK_B32_SHIFT + (8 - SK_B16_BITS));
+ b1 = _mm_and_si128(b1, _mm_set1_epi32(SK_B16_MASK));
+ __m128i b2 = _mm_srli_epi32(src_pixel2,
+ SK_B32_SHIFT + (8 - SK_B16_BITS));
+ b2 = _mm_and_si128(b2, _mm_set1_epi32(SK_B16_MASK));
+ __m128i b = _mm_packs_epi32(b1, b2);
+
+ // Store 8 16-bit colors in dst.
+ __m128i d_pixel = SkPackRGB16_SSE2(r, g, b);
+
+ return d_pixel;
+}
+
+// Portable version is SkPMSrcOver in SkColorPriv.h.
+static inline __m128i SkPMSrcOver_SSE2(const __m128i& src, const __m128i& dst) {
+ return _mm_add_epi32(src,
+ SkAlphaMulQ_SSE2(dst, _mm_sub_epi32(_mm_set1_epi32(256),
+ SkGetPackedA32_SSE2(src))));
+}
+
+// Fast path for SkBlendARGB32_SSE2 with a constant alpha factor.
+static inline __m128i SkBlendARGB32_SSE2(const __m128i& src, const __m128i& dst,
+ const unsigned aa) {
+ unsigned alpha = SkAlpha255To256(aa);
+#ifdef SK_SUPPORT_LEGACY_BROKEN_LERP
+ __m128i src_scale = _mm_set1_epi32(alpha);
+ // SkAlpha255To256(255 - SkAlphaMul(SkGetPackedA32(src), src_scale))
+ __m128i dst_scale = SkGetPackedA32_SSE2(src);
+ dst_scale = _mm_mullo_epi16(dst_scale, src_scale);
+ dst_scale = _mm_srli_epi16(dst_scale, 8);
+ dst_scale = _mm_sub_epi32(_mm_set1_epi32(256), dst_scale);
+
+ __m128i result = SkAlphaMulQ_SSE2(src, alpha);
+ return _mm_add_epi8(result, SkAlphaMulQ_SSE2(dst, dst_scale));
+#else
+ __m128i src_scale = _mm_set1_epi16(alpha);
+ // SkAlphaMulInv256(SkGetPackedA32(src), src_scale)
+ __m128i dst_scale = SkGetPackedA32_SSE2(src);
+ // High words in dst_scale are 0, so it's safe to multiply with 16-bit src_scale.
+ dst_scale = _mm_mullo_epi16(dst_scale, src_scale);
+ dst_scale = _mm_sub_epi32(_mm_set1_epi32(0xFFFF), dst_scale);
+ dst_scale = _mm_add_epi32(dst_scale, _mm_srli_epi32(dst_scale, 8));
+ dst_scale = _mm_srli_epi32(dst_scale, 8);
+ // Duplicate scales into 2x16-bit pattern per pixel.
+ dst_scale = _mm_shufflelo_epi16(dst_scale, _MM_SHUFFLE(2, 2, 0, 0));
+ dst_scale = _mm_shufflehi_epi16(dst_scale, _MM_SHUFFLE(2, 2, 0, 0));
+
+ const __m128i mask = _mm_set1_epi32(0x00FF00FF);
+
+ // Unpack the 16x8-bit source/destination into 2 8x16-bit splayed halves.
+ __m128i src_rb = _mm_and_si128(mask, src);
+ __m128i src_ag = _mm_srli_epi16(src, 8);
+ __m128i dst_rb = _mm_and_si128(mask, dst);
+ __m128i dst_ag = _mm_srli_epi16(dst, 8);
+
+ // Scale them.
+ src_rb = _mm_mullo_epi16(src_rb, src_scale);
+ src_ag = _mm_mullo_epi16(src_ag, src_scale);
+ dst_rb = _mm_mullo_epi16(dst_rb, dst_scale);
+ dst_ag = _mm_mullo_epi16(dst_ag, dst_scale);
+
+ // Add the scaled source and destination.
+ dst_rb = _mm_add_epi16(src_rb, dst_rb);
+ dst_ag = _mm_add_epi16(src_ag, dst_ag);
+
+ // Unsplay the halves back together.
+ dst_rb = _mm_srli_epi16(dst_rb, 8);
+ dst_ag = _mm_andnot_si128(mask, dst_ag);
+ return _mm_or_si128(dst_rb, dst_ag);
+#endif
+}
+
+#undef ASSERT_EQ
+#endif // SkColor_opts_SSE2_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkColor_opts_neon.h b/gfx/skia/skia/src/opts/SkColor_opts_neon.h
new file mode 100644
index 000000000..c316ab403
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkColor_opts_neon.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColor_opts_neon_DEFINED
+#define SkColor_opts_neon_DEFINED
+
+#include "SkTypes.h"
+#include "SkColorPriv.h"
+
+#include <arm_neon.h>
+
+#define NEON_A (SK_A32_SHIFT / 8)
+#define NEON_R (SK_R32_SHIFT / 8)
+#define NEON_G (SK_G32_SHIFT / 8)
+#define NEON_B (SK_B32_SHIFT / 8)
+
+static inline uint16x8_t SkAlpha255To256_neon8(uint8x8_t alpha) {
+ return vaddw_u8(vdupq_n_u16(1), alpha);
+}
+
+static inline uint8x8_t SkAlphaMul_neon8(uint8x8_t color, uint16x8_t scale) {
+ return vshrn_n_u16(vmovl_u8(color) * scale, 8);
+}
+
+static inline uint8x8x4_t SkAlphaMulQ_neon8(uint8x8x4_t color, uint16x8_t scale) {
+ uint8x8x4_t ret;
+
+ ret.val[NEON_A] = SkAlphaMul_neon8(color.val[NEON_A], scale);
+ ret.val[NEON_R] = SkAlphaMul_neon8(color.val[NEON_R], scale);
+ ret.val[NEON_G] = SkAlphaMul_neon8(color.val[NEON_G], scale);
+ ret.val[NEON_B] = SkAlphaMul_neon8(color.val[NEON_B], scale);
+
+ return ret;
+}
+
+/* This function expands 8 pixels from RGB565 (R, G, B from high to low) to
+ * SkPMColor (all possible configurations supported) in the exact same way as
+ * SkPixel16ToPixel32.
+ */
+static inline uint8x8x4_t SkPixel16ToPixel32_neon8(uint16x8_t vsrc) {
+
+ uint8x8x4_t ret;
+ uint8x8_t vr, vg, vb;
+
+ vr = vmovn_u16(vshrq_n_u16(vsrc, SK_R16_SHIFT));
+ vg = vmovn_u16(vshrq_n_u16(vshlq_n_u16(vsrc, SK_R16_BITS), SK_R16_BITS + SK_B16_BITS));
+ vb = vmovn_u16(vsrc & vdupq_n_u16(SK_B16_MASK));
+
+ ret.val[NEON_A] = vdup_n_u8(0xFF);
+ ret.val[NEON_R] = vshl_n_u8(vr, 8 - SK_R16_BITS) | vshr_n_u8(vr, 2 * SK_R16_BITS - 8);
+ ret.val[NEON_G] = vshl_n_u8(vg, 8 - SK_G16_BITS) | vshr_n_u8(vg, 2 * SK_G16_BITS - 8);
+ ret.val[NEON_B] = vshl_n_u8(vb, 8 - SK_B16_BITS) | vshr_n_u8(vb, 2 * SK_B16_BITS - 8);
+
+ return ret;
+}
+
+/* This function packs 8 pixels from SkPMColor (all possible configurations
+ * supported) to RGB565 (R, G, B from high to low) in the exact same way as
+ * SkPixel32ToPixel16.
+ */
+static inline uint16x8_t SkPixel32ToPixel16_neon8(uint8x8x4_t vsrc) {
+
+ uint16x8_t ret;
+
+ ret = vshll_n_u8(vsrc.val[NEON_R], 8);
+ ret = vsriq_n_u16(ret, vshll_n_u8(vsrc.val[NEON_G], 8), SK_R16_BITS);
+ ret = vsriq_n_u16(ret, vshll_n_u8(vsrc.val[NEON_B], 8), SK_R16_BITS + SK_G16_BITS);
+
+ return ret;
+}
+
+/* This function blends 8 pixels of the same channel in the exact same way as
+ * SkBlend32.
+ */
+static inline uint8x8_t SkBlend32_neon8(uint8x8_t src, uint8x8_t dst, uint16x8_t scale) {
+ int16x8_t src_wide, dst_wide;
+
+ src_wide = vreinterpretq_s16_u16(vmovl_u8(src));
+ dst_wide = vreinterpretq_s16_u16(vmovl_u8(dst));
+
+ src_wide = (src_wide - dst_wide) * vreinterpretq_s16_u16(scale);
+
+ dst_wide += vshrq_n_s16(src_wide, 5);
+
+ return vmovn_u16(vreinterpretq_u16_s16(dst_wide));
+}
+
+static inline SkPMColor SkFourByteInterp256_neon(SkPMColor src, SkPMColor dst,
+ unsigned srcScale) {
+ SkASSERT(srcScale <= 256);
+ int16x8_t vscale = vdupq_n_s16(srcScale);
+ int16x8_t vsrc_wide, vdst_wide, vdiff;
+ uint8x8_t res;
+
+ vsrc_wide = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(vdup_n_u32(src))));
+ vdst_wide = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(vdup_n_u32(dst))));
+
+ vdiff = vsrc_wide - vdst_wide;
+ vdiff *= vscale;
+
+ vdiff = vshrq_n_s16(vdiff, 8);
+
+ vdst_wide += vdiff;
+
+ res = vmovn_u16(vreinterpretq_u16_s16(vdst_wide));
+
+ return vget_lane_u32(vreinterpret_u32_u8(res), 0);
+}
+
+static inline SkPMColor SkFourByteInterp_neon(SkPMColor src, SkPMColor dst,
+ U8CPU srcWeight) {
+ SkASSERT(srcWeight <= 255);
+ unsigned scale = SkAlpha255To256(srcWeight);
+ return SkFourByteInterp256_neon(src, dst, scale);
+}
+
+#endif /* #ifndef SkColor_opts_neon_DEFINED */
diff --git a/gfx/skia/skia/src/opts/SkMorphologyImageFilter_opts.h b/gfx/skia/skia/src/opts/SkMorphologyImageFilter_opts.h
new file mode 100644
index 000000000..e30a9e497
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkMorphologyImageFilter_opts.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMorphologyImageFilter_opts_DEFINED
+#define SkMorphologyImageFilter_opts_DEFINED
+
+namespace SK_OPTS_NS {
+
+enum MorphType { kDilate, kErode };
+enum class MorphDirection { kX, kY };
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+template<MorphType type, MorphDirection direction>
+static void morph(const SkPMColor* src, SkPMColor* dst,
+ int radius, int width, int height, int srcStride, int dstStride) {
+ const int srcStrideX = direction == MorphDirection::kX ? 1 : srcStride;
+ const int dstStrideX = direction == MorphDirection::kX ? 1 : dstStride;
+ const int srcStrideY = direction == MorphDirection::kX ? srcStride : 1;
+ const int dstStrideY = direction == MorphDirection::kX ? dstStride : 1;
+ radius = SkMin32(radius, width - 1);
+ const SkPMColor* upperSrc = src + radius * srcStrideX;
+ for (int x = 0; x < width; ++x) {
+ const SkPMColor* lp = src;
+ const SkPMColor* up = upperSrc;
+ SkPMColor* dptr = dst;
+ for (int y = 0; y < height; ++y) {
+ __m128i extreme = (type == kDilate) ? _mm_setzero_si128()
+ : _mm_set1_epi32(0xFFFFFFFF);
+ for (const SkPMColor* p = lp; p <= up; p += srcStrideX) {
+ __m128i src_pixel = _mm_cvtsi32_si128(*p);
+ extreme = (type == kDilate) ? _mm_max_epu8(src_pixel, extreme)
+ : _mm_min_epu8(src_pixel, extreme);
+ }
+ *dptr = _mm_cvtsi128_si32(extreme);
+ dptr += dstStrideY;
+ lp += srcStrideY;
+ up += srcStrideY;
+ }
+ if (x >= radius) { src += srcStrideX; }
+ if (x + radius < width - 1) { upperSrc += srcStrideX; }
+ dst += dstStrideX;
+ }
+}
+
+#elif defined(SK_ARM_HAS_NEON)
+template<MorphType type, MorphDirection direction>
+static void morph(const SkPMColor* src, SkPMColor* dst,
+ int radius, int width, int height, int srcStride, int dstStride) {
+ const int srcStrideX = direction == MorphDirection::kX ? 1 : srcStride;
+ const int dstStrideX = direction == MorphDirection::kX ? 1 : dstStride;
+ const int srcStrideY = direction == MorphDirection::kX ? srcStride : 1;
+ const int dstStrideY = direction == MorphDirection::kX ? dstStride : 1;
+ radius = SkMin32(radius, width - 1);
+ const SkPMColor* upperSrc = src + radius * srcStrideX;
+ for (int x = 0; x < width; ++x) {
+ const SkPMColor* lp = src;
+ const SkPMColor* up = upperSrc;
+ SkPMColor* dptr = dst;
+ for (int y = 0; y < height; ++y) {
+ uint8x8_t extreme = vdup_n_u8(type == kDilate ? 0 : 255);
+ for (const SkPMColor* p = lp; p <= up; p += srcStrideX) {
+ uint8x8_t src_pixel = vreinterpret_u8_u32(vdup_n_u32(*p));
+ extreme = (type == kDilate) ? vmax_u8(src_pixel, extreme)
+ : vmin_u8(src_pixel, extreme);
+ }
+ *dptr = vget_lane_u32(vreinterpret_u32_u8(extreme), 0);
+ dptr += dstStrideY;
+ lp += srcStrideY;
+ up += srcStrideY;
+ }
+ if (x >= radius) src += srcStrideX;
+ if (x + radius < width - 1) upperSrc += srcStrideX;
+ dst += dstStrideX;
+ }
+}
+
+#else
+template<MorphType type, MorphDirection direction>
+static void morph(const SkPMColor* src, SkPMColor* dst,
+ int radius, int width, int height, int srcStride, int dstStride) {
+ const int srcStrideX = direction == MorphDirection::kX ? 1 : srcStride;
+ const int dstStrideX = direction == MorphDirection::kX ? 1 : dstStride;
+ const int srcStrideY = direction == MorphDirection::kX ? srcStride : 1;
+ const int dstStrideY = direction == MorphDirection::kX ? dstStride : 1;
+ radius = SkMin32(radius, width - 1);
+ const SkPMColor* upperSrc = src + radius * srcStrideX;
+ for (int x = 0; x < width; ++x) {
+ const SkPMColor* lp = src;
+ const SkPMColor* up = upperSrc;
+ SkPMColor* dptr = dst;
+ for (int y = 0; y < height; ++y) {
+ // If we're maxing (dilate), start from 0; if minning (erode), start from 255.
+ const int start = (type == kDilate) ? 0 : 255;
+ int B = start, G = start, R = start, A = start;
+ for (const SkPMColor* p = lp; p <= up; p += srcStrideX) {
+ int b = SkGetPackedB32(*p),
+ g = SkGetPackedG32(*p),
+ r = SkGetPackedR32(*p),
+ a = SkGetPackedA32(*p);
+ if (type == kDilate) {
+ B = SkTMax(b, B);
+ G = SkTMax(g, G);
+ R = SkTMax(r, R);
+ A = SkTMax(a, A);
+ } else {
+ B = SkTMin(b, B);
+ G = SkTMin(g, G);
+ R = SkTMin(r, R);
+ A = SkTMin(a, A);
+ }
+ }
+ *dptr = SkPackARGB32(A, R, G, B);
+ dptr += dstStrideY;
+ lp += srcStrideY;
+ up += srcStrideY;
+ }
+ if (x >= radius) { src += srcStrideX; }
+ if (x + radius < width - 1) { upperSrc += srcStrideX; }
+ dst += dstStrideX;
+ }
+}
+
+#endif
+
+static auto dilate_x = &morph<kDilate, MorphDirection::kX>,
+ dilate_y = &morph<kDilate, MorphDirection::kY>,
+ erode_x = &morph<kErode, MorphDirection::kX>,
+ erode_y = &morph<kErode, MorphDirection::kY>;
+
+} // namespace SK_OPTS_NS
+
+#endif//SkMorphologyImageFilter_opts_DEFINED
+
diff --git a/gfx/skia/skia/src/opts/SkNx_neon.h b/gfx/skia/skia/src/opts/SkNx_neon.h
new file mode 100644
index 000000000..f5a0b0978
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkNx_neon.h
@@ -0,0 +1,556 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNx_neon_DEFINED
+#define SkNx_neon_DEFINED
+
+#include <arm_neon.h>
+
+#define SKNX_IS_FAST
+
+// ARMv8 has vrndmq_f32 to floor 4 floats. Here we emulate it:
+// - roundtrip through integers via truncation
+// - subtract 1 if that's too big (possible for negative values).
+// This restricts the domain of our inputs to a maximum somehwere around 2^31. Seems plenty big.
+static inline float32x4_t armv7_vrndmq_f32(float32x4_t v) {
+ auto roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v));
+ auto too_big = vcgtq_f32(roundtrip, v);
+ return vsubq_f32(roundtrip, (float32x4_t)vandq_u32(too_big, (uint32x4_t)vdupq_n_f32(1)));
+}
+
+template <>
+class SkNx<2, float> {
+public:
+ SkNx(float32x2_t vec) : fVec(vec) {}
+
+ SkNx() {}
+ SkNx(float val) : fVec(vdup_n_f32(val)) {}
+ static SkNx Load(const void* ptr) { return vld1_f32((const float*)ptr); }
+ SkNx(float a, float b) { fVec = (float32x2_t) { a, b }; }
+
+ void store(void* ptr) const { vst1_f32((float*)ptr, fVec); }
+
+ SkNx invert() const {
+ float32x2_t est0 = vrecpe_f32(fVec),
+ est1 = vmul_f32(vrecps_f32(est0, fVec), est0);
+ return est1;
+ }
+
+ SkNx operator + (const SkNx& o) const { return vadd_f32(fVec, o.fVec); }
+ SkNx operator - (const SkNx& o) const { return vsub_f32(fVec, o.fVec); }
+ SkNx operator * (const SkNx& o) const { return vmul_f32(fVec, o.fVec); }
+ SkNx operator / (const SkNx& o) const {
+ #if defined(SK_CPU_ARM64)
+ return vdiv_f32(fVec, o.fVec);
+ #else
+ float32x2_t est0 = vrecpe_f32(o.fVec),
+ est1 = vmul_f32(vrecps_f32(est0, o.fVec), est0),
+ est2 = vmul_f32(vrecps_f32(est1, o.fVec), est1);
+ return vmul_f32(fVec, est2);
+ #endif
+ }
+
+ SkNx operator == (const SkNx& o) const { return vreinterpret_f32_u32(vceq_f32(fVec, o.fVec)); }
+ SkNx operator < (const SkNx& o) const { return vreinterpret_f32_u32(vclt_f32(fVec, o.fVec)); }
+ SkNx operator > (const SkNx& o) const { return vreinterpret_f32_u32(vcgt_f32(fVec, o.fVec)); }
+ SkNx operator <= (const SkNx& o) const { return vreinterpret_f32_u32(vcle_f32(fVec, o.fVec)); }
+ SkNx operator >= (const SkNx& o) const { return vreinterpret_f32_u32(vcge_f32(fVec, o.fVec)); }
+ SkNx operator != (const SkNx& o) const {
+ return vreinterpret_f32_u32(vmvn_u32(vceq_f32(fVec, o.fVec)));
+ }
+
+ static SkNx Min(const SkNx& l, const SkNx& r) { return vmin_f32(l.fVec, r.fVec); }
+ static SkNx Max(const SkNx& l, const SkNx& r) { return vmax_f32(l.fVec, r.fVec); }
+
+ SkNx rsqrt() const {
+ float32x2_t est0 = vrsqrte_f32(fVec);
+ return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0);
+ }
+
+ SkNx sqrt() const {
+ #if defined(SK_CPU_ARM64)
+ return vsqrt_f32(fVec);
+ #else
+ float32x2_t est0 = vrsqrte_f32(fVec),
+ est1 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0),
+ est2 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est1);
+ return vmul_f32(fVec, est2);
+ #endif
+ }
+
+ float operator[](int k) const {
+ SkASSERT(0 <= k && k < 2);
+ union { float32x2_t v; float fs[2]; } pun = {fVec};
+ return pun.fs[k&1];
+ }
+
+ bool allTrue() const {
+ auto v = vreinterpret_u32_f32(fVec);
+ return vget_lane_u32(v,0) && vget_lane_u32(v,1);
+ }
+ bool anyTrue() const {
+ auto v = vreinterpret_u32_f32(fVec);
+ return vget_lane_u32(v,0) || vget_lane_u32(v,1);
+ }
+
+ float32x2_t fVec;
+};
+
+template <>
+class SkNx<4, float> {
+public:
+ SkNx(float32x4_t vec) : fVec(vec) {}
+
+ SkNx() {}
+ SkNx(float val) : fVec(vdupq_n_f32(val)) {}
+ static SkNx Load(const void* ptr) { return vld1q_f32((const float*)ptr); }
+ SkNx(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; }
+
+ void store(void* ptr) const { vst1q_f32((float*)ptr, fVec); }
+ SkNx invert() const {
+ float32x4_t est0 = vrecpeq_f32(fVec),
+ est1 = vmulq_f32(vrecpsq_f32(est0, fVec), est0);
+ return est1;
+ }
+
+ SkNx operator + (const SkNx& o) const { return vaddq_f32(fVec, o.fVec); }
+ SkNx operator - (const SkNx& o) const { return vsubq_f32(fVec, o.fVec); }
+ SkNx operator * (const SkNx& o) const { return vmulq_f32(fVec, o.fVec); }
+ SkNx operator / (const SkNx& o) const {
+ #if defined(SK_CPU_ARM64)
+ return vdivq_f32(fVec, o.fVec);
+ #else
+ float32x4_t est0 = vrecpeq_f32(o.fVec),
+ est1 = vmulq_f32(vrecpsq_f32(est0, o.fVec), est0),
+ est2 = vmulq_f32(vrecpsq_f32(est1, o.fVec), est1);
+ return vmulq_f32(fVec, est2);
+ #endif
+ }
+
+ SkNx operator==(const SkNx& o) const { return vreinterpretq_f32_u32(vceqq_f32(fVec, o.fVec)); }
+ SkNx operator <(const SkNx& o) const { return vreinterpretq_f32_u32(vcltq_f32(fVec, o.fVec)); }
+ SkNx operator >(const SkNx& o) const { return vreinterpretq_f32_u32(vcgtq_f32(fVec, o.fVec)); }
+ SkNx operator<=(const SkNx& o) const { return vreinterpretq_f32_u32(vcleq_f32(fVec, o.fVec)); }
+ SkNx operator>=(const SkNx& o) const { return vreinterpretq_f32_u32(vcgeq_f32(fVec, o.fVec)); }
+ SkNx operator!=(const SkNx& o) const {
+ return vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec)));
+ }
+
+ static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.fVec); }
+ static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.fVec); }
+
+ SkNx abs() const { return vabsq_f32(fVec); }
+ SkNx floor() const {
+ #if defined(SK_CPU_ARM64)
+ return vrndmq_f32(fVec);
+ #else
+ return armv7_vrndmq_f32(fVec);
+ #endif
+ }
+
+
+ SkNx rsqrt() const {
+ float32x4_t est0 = vrsqrteq_f32(fVec);
+ return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0);
+ }
+
+ SkNx sqrt() const {
+ #if defined(SK_CPU_ARM64)
+ return vsqrtq_f32(fVec);
+ #else
+ float32x4_t est0 = vrsqrteq_f32(fVec),
+ est1 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0),
+ est2 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1);
+ return vmulq_f32(fVec, est2);
+ #endif
+ }
+
+ float operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { float32x4_t v; float fs[4]; } pun = {fVec};
+ return pun.fs[k&3];
+ }
+
+ bool allTrue() const {
+ auto v = vreinterpretq_u32_f32(fVec);
+ return vgetq_lane_u32(v,0) && vgetq_lane_u32(v,1)
+ && vgetq_lane_u32(v,2) && vgetq_lane_u32(v,3);
+ }
+ bool anyTrue() const {
+ auto v = vreinterpretq_u32_f32(fVec);
+ return vgetq_lane_u32(v,0) || vgetq_lane_u32(v,1)
+ || vgetq_lane_u32(v,2) || vgetq_lane_u32(v,3);
+ }
+
+ SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return vbslq_f32(vreinterpretq_u32_f32(fVec), t.fVec, e.fVec);
+ }
+
+ float32x4_t fVec;
+};
+
+// It's possible that for our current use cases, representing this as
+// half a uint16x8_t might be better than representing it as a uint16x4_t.
+// It'd make conversion to Sk4b one step simpler.
+template <>
+class SkNx<4, uint16_t> {
+public:
+ SkNx(const uint16x4_t& vec) : fVec(vec) {}
+
+ SkNx() {}
+ SkNx(uint16_t val) : fVec(vdup_n_u16(val)) {}
+ static SkNx Load(const void* ptr) { return vld1_u16((const uint16_t*)ptr); }
+
+ SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) {
+ fVec = (uint16x4_t) { a,b,c,d };
+ }
+
+ void store(void* ptr) const { vst1_u16((uint16_t*)ptr, fVec); }
+
+ SkNx operator + (const SkNx& o) const { return vadd_u16(fVec, o.fVec); }
+ SkNx operator - (const SkNx& o) const { return vsub_u16(fVec, o.fVec); }
+ SkNx operator * (const SkNx& o) const { return vmul_u16(fVec, o.fVec); }
+
+ SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
+ SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
+
+ static SkNx Min(const SkNx& a, const SkNx& b) { return vmin_u16(a.fVec, b.fVec); }
+
+ uint16_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { uint16x4_t v; uint16_t us[4]; } pun = {fVec};
+ return pun.us[k&3];
+ }
+
+ SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return vbsl_u16(fVec, t.fVec, e.fVec);
+ }
+
+ uint16x4_t fVec;
+};
+
+template <>
+class SkNx<8, uint16_t> {
+public:
+ SkNx(const uint16x8_t& vec) : fVec(vec) {}
+
+ SkNx() {}
+ SkNx(uint16_t val) : fVec(vdupq_n_u16(val)) {}
+ static SkNx Load(const void* ptr) { return vld1q_u16((const uint16_t*)ptr); }
+
+ SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
+ uint16_t e, uint16_t f, uint16_t g, uint16_t h) {
+ fVec = (uint16x8_t) { a,b,c,d, e,f,g,h };
+ }
+
+ void store(void* ptr) const { vst1q_u16((uint16_t*)ptr, fVec); }
+
+ SkNx operator + (const SkNx& o) const { return vaddq_u16(fVec, o.fVec); }
+ SkNx operator - (const SkNx& o) const { return vsubq_u16(fVec, o.fVec); }
+ SkNx operator * (const SkNx& o) const { return vmulq_u16(fVec, o.fVec); }
+
+ SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
+ SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
+
+ static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u16(a.fVec, b.fVec); }
+
+ uint16_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 8);
+ union { uint16x8_t v; uint16_t us[8]; } pun = {fVec};
+ return pun.us[k&7];
+ }
+
+ SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return vbslq_u16(fVec, t.fVec, e.fVec);
+ }
+
+ uint16x8_t fVec;
+};
+
+template <>
+class SkNx<4, uint8_t> {
+public:
+ typedef uint32_t __attribute__((aligned(1))) unaligned_uint32_t;
+
+ SkNx(const uint8x8_t& vec) : fVec(vec) {}
+
+ SkNx() {}
+ SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d) {
+ fVec = (uint8x8_t){a,b,c,d, 0,0,0,0};
+ }
+ static SkNx Load(const void* ptr) {
+ return (uint8x8_t)vld1_dup_u32((const unaligned_uint32_t*)ptr);
+ }
+ void store(void* ptr) const {
+ return vst1_lane_u32((unaligned_uint32_t*)ptr, (uint32x2_t)fVec, 0);
+ }
+ uint8_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { uint8x8_t v; uint8_t us[8]; } pun = {fVec};
+ return pun.us[k&3];
+ }
+
+ // TODO as needed
+
+ uint8x8_t fVec;
+};
+
+template <>
+class SkNx<16, uint8_t> {
+public:
+ SkNx(const uint8x16_t& vec) : fVec(vec) {}
+
+ SkNx() {}
+ SkNx(uint8_t val) : fVec(vdupq_n_u8(val)) {}
+ static SkNx Load(const void* ptr) { return vld1q_u8((const uint8_t*)ptr); }
+
+ SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
+ uint8_t e, uint8_t f, uint8_t g, uint8_t h,
+ uint8_t i, uint8_t j, uint8_t k, uint8_t l,
+ uint8_t m, uint8_t n, uint8_t o, uint8_t p) {
+ fVec = (uint8x16_t) { a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p };
+ }
+
+ void store(void* ptr) const { vst1q_u8((uint8_t*)ptr, fVec); }
+
+ SkNx saturatedAdd(const SkNx& o) const { return vqaddq_u8(fVec, o.fVec); }
+
+ SkNx operator + (const SkNx& o) const { return vaddq_u8(fVec, o.fVec); }
+ SkNx operator - (const SkNx& o) const { return vsubq_u8(fVec, o.fVec); }
+
+ static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u8(a.fVec, b.fVec); }
+ SkNx operator < (const SkNx& o) const { return vcltq_u8(fVec, o.fVec); }
+
+ uint8_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 16);
+ union { uint8x16_t v; uint8_t us[16]; } pun = {fVec};
+ return pun.us[k&15];
+ }
+
+ SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return vbslq_u8(fVec, t.fVec, e.fVec);
+ }
+
+ uint8x16_t fVec;
+};
+
+template <>
+class SkNx<4, int32_t> {
+public:
+ SkNx(const int32x4_t& vec) : fVec(vec) {}
+
+ SkNx() {}
+ SkNx(int32_t v) {
+ fVec = vdupq_n_s32(v);
+ }
+ SkNx(int32_t a, int32_t b, int32_t c, int32_t d) {
+ fVec = (int32x4_t){a,b,c,d};
+ }
+ static SkNx Load(const void* ptr) {
+ return vld1q_s32((const int32_t*)ptr);
+ }
+ void store(void* ptr) const {
+ return vst1q_s32((int32_t*)ptr, fVec);
+ }
+ int32_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { int32x4_t v; int32_t is[4]; } pun = {fVec};
+ return pun.is[k&3];
+ }
+
+ SkNx operator + (const SkNx& o) const { return vaddq_s32(fVec, o.fVec); }
+ SkNx operator - (const SkNx& o) const { return vsubq_s32(fVec, o.fVec); }
+ SkNx operator * (const SkNx& o) const { return vmulq_s32(fVec, o.fVec); }
+
+ SkNx operator & (const SkNx& o) const { return vandq_s32(fVec, o.fVec); }
+ SkNx operator | (const SkNx& o) const { return vorrq_s32(fVec, o.fVec); }
+ SkNx operator ^ (const SkNx& o) const { return veorq_s32(fVec, o.fVec); }
+
+ SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
+ SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
+
+ SkNx operator == (const SkNx& o) const {
+ return vreinterpretq_s32_u32(vceqq_s32(fVec, o.fVec));
+ }
+ SkNx operator < (const SkNx& o) const {
+ return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec));
+ }
+ SkNx operator > (const SkNx& o) const {
+ return vreinterpretq_s32_u32(vcgtq_s32(fVec, o.fVec));
+ }
+
+ static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.fVec); }
+ // TODO as needed
+
+ SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return vbslq_s32(vreinterpretq_u32_s32(fVec), t.fVec, e.fVec);
+ }
+
+ int32x4_t fVec;
+};
+
+template <>
+class SkNx<4, uint32_t> {
+public:
+ SkNx(const uint32x4_t& vec) : fVec(vec) {}
+
+ SkNx() {}
+ SkNx(uint32_t v) {
+ fVec = vdupq_n_u32(v);
+ }
+ SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
+ fVec = (uint32x4_t){a,b,c,d};
+ }
+ static SkNx Load(const void* ptr) {
+ return vld1q_u32((const uint32_t*)ptr);
+ }
+ void store(void* ptr) const {
+ return vst1q_u32((uint32_t*)ptr, fVec);
+ }
+ uint32_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { uint32x4_t v; uint32_t us[4]; } pun = {fVec};
+ return pun.us[k&3];
+ }
+
+ SkNx operator + (const SkNx& o) const { return vaddq_u32(fVec, o.fVec); }
+ SkNx operator - (const SkNx& o) const { return vsubq_u32(fVec, o.fVec); }
+ SkNx operator * (const SkNx& o) const { return vmulq_u32(fVec, o.fVec); }
+
+ SkNx operator & (const SkNx& o) const { return vandq_u32(fVec, o.fVec); }
+ SkNx operator | (const SkNx& o) const { return vorrq_u32(fVec, o.fVec); }
+ SkNx operator ^ (const SkNx& o) const { return veorq_u32(fVec, o.fVec); }
+
+ SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
+ SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
+
+ SkNx operator == (const SkNx& o) const { return vceqq_u32(fVec, o.fVec); }
+ SkNx operator < (const SkNx& o) const { return vcltq_u32(fVec, o.fVec); }
+ SkNx operator > (const SkNx& o) const { return vcgtq_u32(fVec, o.fVec); }
+
+ static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u32(a.fVec, b.fVec); }
+ // TODO as needed
+
+ SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return vbslq_u32(fVec, t.fVec, e.fVec);
+ }
+
+ uint32x4_t fVec;
+};
+
+template<> inline Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) {
+ return vcvtq_s32_f32(src.fVec);
+
+}
+template<> inline Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) {
+ return vcvtq_f32_s32(src.fVec);
+}
+template<> inline Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) {
+ return SkNx_cast<float>(Sk4i::Load(&src));
+}
+
+template<> inline Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) {
+ return vqmovn_u32(vcvtq_u32_f32(src.fVec));
+}
+
+template<> inline Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
+ return vcvtq_f32_u32(vmovl_u16(src.fVec));
+}
+
+template<> inline Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
+ uint32x4_t _32 = vcvtq_u32_f32(src.fVec);
+ uint16x4_t _16 = vqmovn_u32(_32);
+ return vqmovn_u16(vcombine_u16(_16, _16));
+}
+
+template<> inline Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
+ uint16x8_t _16 = vmovl_u8 (src.fVec) ;
+ uint32x4_t _32 = vmovl_u16(vget_low_u16(_16));
+ return vcvtq_f32_u32(_32);
+}
+
+template<> inline Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
+ Sk8f ab, cd;
+ SkNx_split(src, &ab, &cd);
+
+ Sk4f a,b,c,d;
+ SkNx_split(ab, &a, &b);
+ SkNx_split(cd, &c, &d);
+ return vuzpq_u8(vuzpq_u8((uint8x16_t)vcvtq_u32_f32(a.fVec),
+ (uint8x16_t)vcvtq_u32_f32(b.fVec)).val[0],
+ vuzpq_u8((uint8x16_t)vcvtq_u32_f32(c.fVec),
+ (uint8x16_t)vcvtq_u32_f32(d.fVec)).val[0]).val[0];
+}
+
+template<> inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
+ return vget_low_u16(vmovl_u8(src.fVec));
+}
+
+template<> inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
+ return vmovn_u16(vcombine_u16(src.fVec, src.fVec));
+}
+
+template<> inline Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) {
+ uint16x4_t _16 = vqmovun_s32(src.fVec);
+ return vqmovn_u16(vcombine_u16(_16, _16));
+}
+
+template<> inline Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) {
+ return vreinterpretq_s32_u32(vmovl_u16(src.fVec));
+}
+
+template<> inline Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) {
+ return vmovn_u32(vreinterpretq_u32_s32(src.fVec));
+}
+
+template<> /*static*/ inline Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) {
+ return vreinterpretq_s32_u32(src.fVec);
+}
+
+static inline Sk4i Sk4f_round(const Sk4f& x) {
+ return vcvtq_s32_f32((x + 0.5f).fVec);
+}
+
+static inline void Sk4h_load4(const void* ptr, Sk4h* r, Sk4h* g, Sk4h* b, Sk4h* a) {
+ uint16x4x4_t rgba = vld4_u16((const uint16_t*)ptr);
+ *r = rgba.val[0];
+ *g = rgba.val[1];
+ *b = rgba.val[2];
+ *a = rgba.val[3];
+}
+
+static inline void Sk4h_store4(void* dst, const Sk4h& r, const Sk4h& g, const Sk4h& b,
+ const Sk4h& a) {
+ uint16x4x4_t rgba = {{
+ r.fVec,
+ g.fVec,
+ b.fVec,
+ a.fVec,
+ }};
+ vst4_u16((uint16_t*) dst, rgba);
+}
+
+static inline void Sk4f_load4(const void* ptr, Sk4f* r, Sk4f* g, Sk4f* b, Sk4f* a) {
+ float32x4x4_t rgba = vld4q_f32((const float*) ptr);
+ *r = rgba.val[0];
+ *g = rgba.val[1];
+ *b = rgba.val[2];
+ *a = rgba.val[3];
+}
+
+static inline void Sk4f_store4(void* dst, const Sk4f& r, const Sk4f& g, const Sk4f& b,
+ const Sk4f& a) {
+ float32x4x4_t rgba = {{
+ r.fVec,
+ g.fVec,
+ b.fVec,
+ a.fVec,
+ }};
+ vst4q_f32((float*) dst, rgba);
+}
+
+#endif//SkNx_neon_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkNx_sse.h b/gfx/skia/skia/src/opts/SkNx_sse.h
new file mode 100644
index 000000000..25a5cd8f8
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkNx_sse.h
@@ -0,0 +1,506 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNx_sse_DEFINED
+#define SkNx_sse_DEFINED
+
+#include <immintrin.h>
+
+// This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything more recent.
+// If you do, make sure this is in a static inline function... anywhere else risks violating ODR.
+
+#define SKNX_IS_FAST
+
+template <>
+class SkNx<2, float> {
+public:
+ SkNx(const __m128& vec) : fVec(vec) {}
+
+ SkNx() {}
+ SkNx(float val) : fVec(_mm_set1_ps(val)) {}
+ static SkNx Load(const void* ptr) {
+ return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)ptr));
+ }
+ SkNx(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {}
+
+ void store(void* ptr) const { _mm_storel_pi((__m64*)ptr, fVec); }
+
+ SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
+ SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
+ SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); }
+ SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); }
+
+ SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
+ SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
+ SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
+ SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
+ SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); }
+ SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
+
+ static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
+ static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
+
+ SkNx sqrt() const { return _mm_sqrt_ps (fVec); }
+ SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); }
+ SkNx invert() const { return _mm_rcp_ps(fVec); }
+
+ float operator[](int k) const {
+ SkASSERT(0 <= k && k < 2);
+ union { __m128 v; float fs[4]; } pun = {fVec};
+ return pun.fs[k&1];
+ }
+
+ bool allTrue() const { return 0xff == (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
+ bool anyTrue() const { return 0x00 != (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
+
+ __m128 fVec;
+};
+
+template <>
+class SkNx<4, float> {
+public:
+ SkNx(const __m128& vec) : fVec(vec) {}
+
+ SkNx() {}
+ SkNx(float val) : fVec( _mm_set1_ps(val) ) {}
+ static SkNx Load(const void* ptr) { return _mm_loadu_ps((const float*)ptr); }
+
+ SkNx(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {}
+
+ void store(void* ptr) const { _mm_storeu_ps((float*)ptr, fVec); }
+
+ SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
+ SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
+ SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); }
+ SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); }
+
+ SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
+ SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
+ SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
+ SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
+ SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); }
+ SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
+
+ static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
+ static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
+
+ SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); }
+ SkNx floor() const {
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ return _mm_floor_ps(fVec);
+ #else
+ // Emulate _mm_floor_ps() with SSE2:
+ // - roundtrip through integers via truncation
+ // - subtract 1 if that's too big (possible for negative values).
+ // This restricts the domain of our inputs to a maximum somehwere around 2^31.
+ // Seems plenty big.
+ __m128 roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(fVec));
+ __m128 too_big = _mm_cmpgt_ps(roundtrip, fVec);
+ return _mm_sub_ps(roundtrip, _mm_and_ps(too_big, _mm_set1_ps(1.0f)));
+ #endif
+ }
+
+ SkNx sqrt() const { return _mm_sqrt_ps (fVec); }
+ SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); }
+ SkNx invert() const { return _mm_rcp_ps(fVec); }
+
+ float operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { __m128 v; float fs[4]; } pun = {fVec};
+ return pun.fs[k&3];
+ }
+
+ bool allTrue() const { return 0xffff == _mm_movemask_epi8(_mm_castps_si128(fVec)); }
+ bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(_mm_castps_si128(fVec)); }
+
+ SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ return _mm_blendv_ps(e.fVec, t.fVec, fVec);
+ #else
+ return _mm_or_ps(_mm_and_ps (fVec, t.fVec),
+ _mm_andnot_ps(fVec, e.fVec));
+ #endif
+ }
+
+ __m128 fVec;
+};
+
+template <>
+class SkNx<4, int32_t> {
+public:
+ SkNx(const __m128i& vec) : fVec(vec) {}
+
+ SkNx() {}
+ SkNx(int32_t val) : fVec(_mm_set1_epi32(val)) {}
+ static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
+ SkNx(int32_t a, int32_t b, int32_t c, int32_t d) : fVec(_mm_setr_epi32(a,b,c,d)) {}
+
+ void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
+
+ SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); }
+ SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); }
+ SkNx operator * (const SkNx& o) const {
+ __m128i mul20 = _mm_mul_epu32(fVec, o.fVec),
+ mul31 = _mm_mul_epu32(_mm_srli_si128(fVec, 4), _mm_srli_si128(o.fVec, 4));
+ return _mm_unpacklo_epi32(_mm_shuffle_epi32(mul20, _MM_SHUFFLE(0,0,2,0)),
+ _mm_shuffle_epi32(mul31, _MM_SHUFFLE(0,0,2,0)));
+ }
+
+ SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
+ SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
+ SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); }
+
+ SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); }
+ SkNx operator >> (int bits) const { return _mm_srai_epi32(fVec, bits); }
+
+ SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); }
+ SkNx operator < (const SkNx& o) const { return _mm_cmplt_epi32 (fVec, o.fVec); }
+ SkNx operator > (const SkNx& o) const { return _mm_cmpgt_epi32 (fVec, o.fVec); }
+
+ int32_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { __m128i v; int32_t is[4]; } pun = {fVec};
+ return pun.is[k&3];
+ }
+
+ SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ return _mm_blendv_epi8(e.fVec, t.fVec, fVec);
+ #else
+ return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
+ _mm_andnot_si128(fVec, e.fVec));
+ #endif
+ }
+
+ __m128i fVec;
+};
+
+template <>
+class SkNx<4, uint32_t> {
+public:
+ SkNx(const __m128i& vec) : fVec(vec) {}
+
+ SkNx() {}
+ SkNx(uint32_t val) : fVec(_mm_set1_epi32(val)) {}
+ static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
+ SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) : fVec(_mm_setr_epi32(a,b,c,d)) {}
+
+ void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
+
+ SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); }
+ SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); }
+ // Not quite sure how to best do operator * in SSE2. We probably don't use it.
+
+ SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
+ SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
+ SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); }
+
+ SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); }
+ SkNx operator >> (int bits) const { return _mm_srli_epi32(fVec, bits); }
+
+ SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); }
+ // operator < and > take a little extra fiddling to make work for unsigned ints.
+
+ uint32_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { __m128i v; uint32_t us[4]; } pun = {fVec};
+ return pun.us[k&3];
+ }
+
+ SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ return _mm_blendv_epi8(e.fVec, t.fVec, fVec);
+ #else
+ return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
+ _mm_andnot_si128(fVec, e.fVec));
+ #endif
+ }
+
+ __m128i fVec;
+};
+
+
+template <>
+class SkNx<4, uint16_t> {
+public:
+ SkNx(const __m128i& vec) : fVec(vec) {}
+
+ SkNx() {}
+ SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
+ static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); }
+ SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) : fVec(_mm_setr_epi16(a,b,c,d,0,0,0,0)) {}
+
+ void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); }
+
+ SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
+ SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
+ SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
+
+ SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
+ SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
+
+ uint16_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { __m128i v; uint16_t us[8]; } pun = {fVec};
+ return pun.us[k&3];
+ }
+
+ __m128i fVec;
+};
+
+template <>
+class SkNx<8, uint16_t> {
+public:
+ SkNx(const __m128i& vec) : fVec(vec) {}
+
+ SkNx() {}
+ SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
+ static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
+ SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
+ uint16_t e, uint16_t f, uint16_t g, uint16_t h) : fVec(_mm_setr_epi16(a,b,c,d,e,f,g,h)) {}
+
+ void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
+
+ SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
+ SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
+ SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
+
+ SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
+ SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
+
+ static SkNx Min(const SkNx& a, const SkNx& b) {
+ // No unsigned _mm_min_epu16, so we'll shift into a space where we can use the
+ // signed version, _mm_min_epi16, then shift back.
+ const uint16_t top = 0x8000; // Keep this separate from _mm_set1_epi16 or MSVC will whine.
+ const __m128i top_8x = _mm_set1_epi16(top);
+ return _mm_add_epi8(top_8x, _mm_min_epi16(_mm_sub_epi8(a.fVec, top_8x),
+ _mm_sub_epi8(b.fVec, top_8x)));
+ }
+
+ SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
+ _mm_andnot_si128(fVec, e.fVec));
+ }
+
+ uint16_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 8);
+ union { __m128i v; uint16_t us[8]; } pun = {fVec};
+ return pun.us[k&7];
+ }
+
+ __m128i fVec;
+};
+
+template <>
+class SkNx<4, uint8_t> {
+public:
+ SkNx() {}
+ SkNx(const __m128i& vec) : fVec(vec) {}
+ SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d)
+ : fVec(_mm_setr_epi8(a,b,c,d, 0,0,0,0, 0,0,0,0, 0,0,0,0)) {}
+
+
+ static SkNx Load(const void* ptr) { return _mm_cvtsi32_si128(*(const int*)ptr); }
+ void store(void* ptr) const { *(int*)ptr = _mm_cvtsi128_si32(fVec); }
+
+ uint8_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { __m128i v; uint8_t us[16]; } pun = {fVec};
+ return pun.us[k&3];
+ }
+
+ // TODO as needed
+
+ __m128i fVec;
+};
+
+template <>
+class SkNx<16, uint8_t> {
+public:
+ SkNx(const __m128i& vec) : fVec(vec) {}
+
+ SkNx() {}
+ SkNx(uint8_t val) : fVec(_mm_set1_epi8(val)) {}
+ static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
+ SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
+ uint8_t e, uint8_t f, uint8_t g, uint8_t h,
+ uint8_t i, uint8_t j, uint8_t k, uint8_t l,
+ uint8_t m, uint8_t n, uint8_t o, uint8_t p)
+ : fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p)) {}
+
+ void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
+
+ SkNx saturatedAdd(const SkNx& o) const { return _mm_adds_epu8(fVec, o.fVec); }
+
+ SkNx operator + (const SkNx& o) const { return _mm_add_epi8(fVec, o.fVec); }
+ SkNx operator - (const SkNx& o) const { return _mm_sub_epi8(fVec, o.fVec); }
+
+ static SkNx Min(const SkNx& a, const SkNx& b) { return _mm_min_epu8(a.fVec, b.fVec); }
+ SkNx operator < (const SkNx& o) const {
+ // There's no unsigned _mm_cmplt_epu8, so we flip the sign bits then use a signed compare.
+ auto flip = _mm_set1_epi8(char(0x80));
+ return _mm_cmplt_epi8(_mm_xor_si128(flip, fVec), _mm_xor_si128(flip, o.fVec));
+ }
+
+ uint8_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 16);
+ union { __m128i v; uint8_t us[16]; } pun = {fVec};
+ return pun.us[k&15];
+ }
+
+ SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
+ _mm_andnot_si128(fVec, e.fVec));
+ }
+
+ __m128i fVec;
+};
+
+template<> /*static*/ inline Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) {
+ return _mm_cvtepi32_ps(src.fVec);
+}
+template<> /*static*/ inline Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) {
+ return SkNx_cast<float>(Sk4i::Load(&src));
+}
+
+template <> /*static*/ inline Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) {
+ return _mm_cvttps_epi32(src.fVec);
+}
+
+template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) {
+#if 0 && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ // TODO: This seems to be causing code generation problems. Investigate?
+ return _mm_packus_epi32(src.fVec);
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ // With SSSE3, we can just shuffle the low 2 bytes from each lane right into place.
+ const int _ = ~0;
+ return _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,1, 4,5, 8,9, 12,13, _,_,_,_,_,_,_,_));
+#else
+ // With SSE2, we have to sign extend our input, making _mm_packs_epi32 do the pack we want.
+ __m128i x = _mm_srai_epi32(_mm_slli_epi32(src.fVec, 16), 16);
+ return _mm_packs_epi32(x,x);
+#endif
+}
+
+template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) {
+ return SkNx_cast<uint16_t>(SkNx_cast<int32_t>(src));
+}
+
+template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
+ auto _32 = _mm_cvttps_epi32(src.fVec);
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ const int _ = ~0;
+ return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_,_));
+#else
+ auto _16 = _mm_packus_epi16(_32, _32);
+ return _mm_packus_epi16(_16, _16);
+#endif
+}
+
+template<> /*static*/ inline Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ const int _ = ~0;
+ auto _32 = _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_));
+#else
+ auto _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()),
+ _32 = _mm_unpacklo_epi16(_16, _mm_setzero_si128());
+#endif
+ return _mm_cvtepi32_ps(_32);
+}
+
+template<> /*static*/ inline Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
+ auto _32 = _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
+ return _mm_cvtepi32_ps(_32);
+}
+
+template<> /*static*/ inline Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
+ Sk8f ab, cd;
+ SkNx_split(src, &ab, &cd);
+
+ Sk4f a,b,c,d;
+ SkNx_split(ab, &a, &b);
+ SkNx_split(cd, &c, &d);
+
+ return _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec),
+ _mm_cvttps_epi32(b.fVec)),
+ _mm_packus_epi16(_mm_cvttps_epi32(c.fVec),
+ _mm_cvttps_epi32(d.fVec)));
+}
+
+template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
+ return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128());
+}
+
+template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
+ return _mm_packus_epi16(src.fVec, src.fVec);
+}
+
+template<> /*static*/ inline Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) {
+ return _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
+}
+
+template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) {
+ return _mm_packus_epi16(_mm_packus_epi16(src.fVec, src.fVec), src.fVec);
+}
+
+template<> /*static*/ inline Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) {
+ return src.fVec;
+}
+
+static inline Sk4i Sk4f_round(const Sk4f& x) {
+ return _mm_cvtps_epi32(x.fVec);
+}
+
+static inline void Sk4h_load4(const void* ptr, Sk4h* r, Sk4h* g, Sk4h* b, Sk4h* a) {
+ __m128i lo = _mm_loadu_si128(((__m128i*)ptr) + 0),
+ hi = _mm_loadu_si128(((__m128i*)ptr) + 1);
+ __m128i even = _mm_unpacklo_epi16(lo, hi), // r0 r2 g0 g2 b0 b2 a0 a2
+ odd = _mm_unpackhi_epi16(lo, hi); // r1 r3 ...
+ __m128i rg = _mm_unpacklo_epi16(even, odd), // r0 r1 r2 r3 g0 g1 g2 g3
+ ba = _mm_unpackhi_epi16(even, odd); // b0 b1 ... a0 a1 ...
+ *r = rg;
+ *g = _mm_srli_si128(rg, 8);
+ *b = ba;
+ *a = _mm_srli_si128(ba, 8);
+}
+
+static inline void Sk4h_store4(void* dst, const Sk4h& r, const Sk4h& g, const Sk4h& b,
+ const Sk4h& a) {
+ __m128i rg = _mm_unpacklo_epi16(r.fVec, g.fVec);
+ __m128i ba = _mm_unpacklo_epi16(b.fVec, a.fVec);
+ __m128i lo = _mm_unpacklo_epi32(rg, ba);
+ __m128i hi = _mm_unpackhi_epi32(rg, ba);
+ _mm_storeu_si128(((__m128i*) dst) + 0, lo);
+ _mm_storeu_si128(((__m128i*) dst) + 1, hi);
+}
+
+static inline void Sk4f_load4(const void* ptr, Sk4f* r, Sk4f* g, Sk4f* b, Sk4f* a) {
+ __m128 v0 = _mm_loadu_ps(((float*)ptr) + 0),
+ v1 = _mm_loadu_ps(((float*)ptr) + 4),
+ v2 = _mm_loadu_ps(((float*)ptr) + 8),
+ v3 = _mm_loadu_ps(((float*)ptr) + 12);
+ _MM_TRANSPOSE4_PS(v0, v1, v2, v3);
+ *r = v0;
+ *g = v1;
+ *b = v2;
+ *a = v3;
+}
+
+static inline void Sk4f_store4(void* dst, const Sk4f& r, const Sk4f& g, const Sk4f& b,
+ const Sk4f& a) {
+ __m128 v0 = r.fVec,
+ v1 = g.fVec,
+ v2 = b.fVec,
+ v3 = a.fVec;
+ _MM_TRANSPOSE4_PS(v0, v1, v2, v3);
+ _mm_storeu_ps(((float*) dst) + 0, v0);
+ _mm_storeu_ps(((float*) dst) + 4, v1);
+ _mm_storeu_ps(((float*) dst) + 8, v2);
+ _mm_storeu_ps(((float*) dst) + 12, v3);
+}
+
+#endif//SkNx_sse_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkOpts_avx.cpp b/gfx/skia/skia/src/opts/SkOpts_avx.cpp
new file mode 100644
index 000000000..b5df2b69f
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkOpts_avx.cpp
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkOpts.h"
+
+#define SK_OPTS_NS avx
+
+namespace SkOpts {
+ void Init_avx() { }
+}
diff --git a/gfx/skia/skia/src/opts/SkOpts_crc32.cpp b/gfx/skia/skia/src/opts/SkOpts_crc32.cpp
new file mode 100644
index 000000000..8fc88aa7f
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkOpts_crc32.cpp
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkOpts.h"
+
+#define SK_OPTS_NS crc32
+#include "SkChecksum_opts.h"
+
+namespace SkOpts {
+ void Init_crc32() {
+ hash_fn = crc32::hash_fn;
+ }
+}
diff --git a/gfx/skia/skia/src/opts/SkOpts_hsw.cpp b/gfx/skia/skia/src/opts/SkOpts_hsw.cpp
new file mode 100644
index 000000000..53e2e5acd
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkOpts_hsw.cpp
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkOpts.h"
+
+#define SK_OPTS_NS hsw
+
+namespace SkOpts {
+ void Init_hsw() { }
+}
+
diff --git a/gfx/skia/skia/src/opts/SkOpts_neon.cpp b/gfx/skia/skia/src/opts/SkOpts_neon.cpp
new file mode 100644
index 000000000..751bea251
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkOpts_neon.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkOpts.h"
+
+#define SK_OPTS_NS sk_neon
+#include "SkBlitMask_opts.h"
+#include "SkBlitRow_opts.h"
+#include "SkBlurImageFilter_opts.h"
+#include "SkColorCubeFilter_opts.h"
+#include "SkMorphologyImageFilter_opts.h"
+#include "SkSwizzler_opts.h"
+#include "SkTextureCompressor_opts.h"
+#include "SkXfermode_opts.h"
+
+namespace SkOpts {
+ void Init_neon() {
+ create_xfermode = sk_neon::create_xfermode;
+
+ box_blur_xx = sk_neon::box_blur_xx;
+ box_blur_xy = sk_neon::box_blur_xy;
+ box_blur_yx = sk_neon::box_blur_yx;
+
+ dilate_x = sk_neon::dilate_x;
+ dilate_y = sk_neon::dilate_y;
+ erode_x = sk_neon::erode_x;
+ erode_y = sk_neon::erode_y;
+
+ texture_compressor = sk_neon::texture_compressor;
+ fill_block_dimensions = sk_neon::fill_block_dimensions;
+
+ blit_mask_d32_a8 = sk_neon::blit_mask_d32_a8;
+
+ blit_row_color32 = sk_neon::blit_row_color32;
+ blit_row_s32a_opaque = sk_neon::blit_row_s32a_opaque;
+
+ color_cube_filter_span = sk_neon::color_cube_filter_span;
+
+ RGBA_to_BGRA = sk_neon::RGBA_to_BGRA;
+ RGBA_to_rgbA = sk_neon::RGBA_to_rgbA;
+ RGBA_to_bgrA = sk_neon::RGBA_to_bgrA;
+ RGB_to_RGB1 = sk_neon::RGB_to_RGB1;
+ RGB_to_BGR1 = sk_neon::RGB_to_BGR1;
+ gray_to_RGB1 = sk_neon::gray_to_RGB1;
+ grayA_to_RGBA = sk_neon::grayA_to_RGBA;
+ grayA_to_rgbA = sk_neon::grayA_to_rgbA;
+ inverted_CMYK_to_RGB1 = sk_neon::inverted_CMYK_to_RGB1;
+ inverted_CMYK_to_BGR1 = sk_neon::inverted_CMYK_to_BGR1;
+ }
+}
diff --git a/gfx/skia/skia/src/opts/SkOpts_sse41.cpp b/gfx/skia/skia/src/opts/SkOpts_sse41.cpp
new file mode 100644
index 000000000..3a37834c7
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkOpts_sse41.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkOpts.h"
+
+#define SK_OPTS_NS sse41
+#include "SkBlurImageFilter_opts.h"
+#include "SkBlitRow_opts.h"
+#include "SkBlend_opts.h"
+#include "SkRasterPipeline_opts.h"
+
+namespace SkOpts {
+ void Init_sse41() {
+ box_blur_xx = sse41::box_blur_xx;
+ box_blur_xy = sse41::box_blur_xy;
+ box_blur_yx = sse41::box_blur_yx;
+ srcover_srgb_srgb = sse41::srcover_srgb_srgb;
+ blit_row_s32a_opaque = sse41::blit_row_s32a_opaque;
+
+ #define STAGE(stage, kCallNext) \
+ stages_4 [SkRasterPipeline::stage] = stage_4 <SK_OPTS_NS::stage, kCallNext>; \
+ stages_1_3[SkRasterPipeline::stage] = stage_1_3<SK_OPTS_NS::stage, kCallNext>
+
+ STAGE(store_565 , false);
+ STAGE(store_srgb, false);
+ STAGE(store_f16 , false);
+
+ STAGE(load_s_565 , true);
+ STAGE(load_s_srgb, true);
+ STAGE(load_s_f16 , true);
+
+ STAGE(load_d_565 , true);
+ STAGE(load_d_srgb, true);
+ STAGE(load_d_f16 , true);
+
+ STAGE(scale_u8, true);
+
+ STAGE(lerp_u8 , true);
+ STAGE(lerp_565 , true);
+ STAGE(lerp_constant_float, true);
+
+ STAGE(constant_color, true);
+
+ #undef STAGE
+
+ #define STAGE(stage) \
+ stages_4 [SkRasterPipeline::stage] = SK_OPTS_NS::stage; \
+ stages_1_3[SkRasterPipeline::stage] = SK_OPTS_NS::stage
+
+ STAGE(dst);
+ STAGE(dstatop);
+ STAGE(dstin);
+ STAGE(dstout);
+ STAGE(dstover);
+ STAGE(srcatop);
+ STAGE(srcin);
+ STAGE(srcout);
+ STAGE(srcover);
+ STAGE(clear);
+ STAGE(modulate);
+ STAGE(multiply);
+ STAGE(plus_);
+ STAGE(screen);
+ STAGE(xor_);
+ STAGE(colorburn);
+ STAGE(colordodge);
+ STAGE(darken);
+ STAGE(difference);
+ STAGE(exclusion);
+ STAGE(hardlight);
+ STAGE(lighten);
+ STAGE(overlay);
+ STAGE(softlight);
+ #undef STAGE
+
+ }
+}
diff --git a/gfx/skia/skia/src/opts/SkOpts_sse42.cpp b/gfx/skia/skia/src/opts/SkOpts_sse42.cpp
new file mode 100644
index 000000000..188318219
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkOpts_sse42.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkOpts.h"
+
+#define SK_OPTS_NS sse42
+#include "SkChecksum_opts.h"
+
+namespace SkOpts {
+ void Init_sse42() {
+ hash_fn = sse42::hash_fn;
+ }
+}
+
diff --git a/gfx/skia/skia/src/opts/SkOpts_ssse3.cpp b/gfx/skia/skia/src/opts/SkOpts_ssse3.cpp
new file mode 100644
index 000000000..ec968339a
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkOpts_ssse3.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkOpts.h"
+#define SK_OPTS_NS ssse3
+#include "SkBlitMask_opts.h"
+#include "SkColorCubeFilter_opts.h"
+#include "SkSwizzler_opts.h"
+#include "SkXfermode_opts.h"
+
+namespace SkOpts {
+ void Init_ssse3() {
+ create_xfermode = ssse3::create_xfermode;
+ blit_mask_d32_a8 = ssse3::blit_mask_d32_a8;
+ color_cube_filter_span = ssse3::color_cube_filter_span;
+
+ RGBA_to_BGRA = ssse3::RGBA_to_BGRA;
+ RGBA_to_rgbA = ssse3::RGBA_to_rgbA;
+ RGBA_to_bgrA = ssse3::RGBA_to_bgrA;
+ RGB_to_RGB1 = ssse3::RGB_to_RGB1;
+ RGB_to_BGR1 = ssse3::RGB_to_BGR1;
+ gray_to_RGB1 = ssse3::gray_to_RGB1;
+ grayA_to_RGBA = ssse3::grayA_to_RGBA;
+ grayA_to_rgbA = ssse3::grayA_to_rgbA;
+ inverted_CMYK_to_RGB1 = ssse3::inverted_CMYK_to_RGB1;
+ inverted_CMYK_to_BGR1 = ssse3::inverted_CMYK_to_BGR1;
+ }
+}
diff --git a/gfx/skia/skia/src/opts/SkRasterPipeline_opts.h b/gfx/skia/skia/src/opts/SkRasterPipeline_opts.h
new file mode 100644
index 000000000..fdb15b421
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkRasterPipeline_opts.h
@@ -0,0 +1,358 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRasterPipeline_opts_DEFINED
+#define SkRasterPipeline_opts_DEFINED
+
+#include "SkHalf.h"
+#include "SkPM4f.h"
+#include "SkRasterPipeline.h"
+#include "SkSRGB.h"
+
+using Kernel_Sk4f = void(void*, size_t, size_t, Sk4f&, Sk4f&, Sk4f&, Sk4f&,
+ Sk4f&, Sk4f&, Sk4f&, Sk4f&);
+
+// These are always static, and we _really_ want them to inline.
+// If you find yourself wanting a non-inline stage, write a SkRasterPipeline::Fn directly.
+#define KERNEL_Sk4f(name) \
+ static SK_ALWAYS_INLINE void name(void* ctx, size_t x, size_t tail, \
+ Sk4f& r, Sk4f& g, Sk4f& b, Sk4f& a, \
+ Sk4f& dr, Sk4f& dg, Sk4f& db, Sk4f& da)
+
+
+template <Kernel_Sk4f kernel, bool kCallNext>
+static inline void SK_VECTORCALL stage_4(SkRasterPipeline::Stage* st, size_t x, size_t tail,
+ Sk4f r, Sk4f g, Sk4f b, Sk4f a,
+ Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) {
+ // Passing 0 lets the optimizer completely drop any "if (tail) {...}" code in kernel.
+ kernel(st->ctx<void*>(), x,0, r,g,b,a, dr,dg,db,da);
+ if (kCallNext) {
+ st->next(x,tail, r,g,b,a, dr,dg,db,da); // It's faster to pass t here than 0.
+ }
+}
+
+template <Kernel_Sk4f kernel, bool kCallNext>
+static inline void SK_VECTORCALL stage_1_3(SkRasterPipeline::Stage* st, size_t x, size_t tail,
+ Sk4f r, Sk4f g, Sk4f b, Sk4f a,
+ Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) {
+#if defined(__clang__)
+ __builtin_assume(tail > 0); // This flourish lets Clang compile away any tail==0 code.
+#endif
+ kernel(st->ctx<void*>(), x,tail, r,g,b,a, dr,dg,db,da);
+ if (kCallNext) {
+ st->next(x,tail, r,g,b,a, dr,dg,db,da);
+ }
+}
+
+// Many xfermodes apply the same logic to each channel.
+#define RGBA_XFERMODE_Sk4f(name) \
+ static SK_ALWAYS_INLINE Sk4f name##_kernel(const Sk4f& s, const Sk4f& sa, \
+ const Sk4f& d, const Sk4f& da); \
+ static void SK_VECTORCALL name(SkRasterPipeline::Stage* st, size_t x, size_t tail, \
+ Sk4f r, Sk4f g, Sk4f b, Sk4f a, \
+ Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) { \
+ r = name##_kernel(r,a,dr,da); \
+ g = name##_kernel(g,a,dg,da); \
+ b = name##_kernel(b,a,db,da); \
+ a = name##_kernel(a,a,da,da); \
+ st->next(x,tail, r,g,b,a, dr,dg,db,da); \
+ } \
+ static SK_ALWAYS_INLINE Sk4f name##_kernel(const Sk4f& s, const Sk4f& sa, \
+ const Sk4f& d, const Sk4f& da)
+
+// Most of the rest apply the same logic to color channels and use srcover's alpha logic.
+#define RGB_XFERMODE_Sk4f(name) \
+ static SK_ALWAYS_INLINE Sk4f name##_kernel(const Sk4f& s, const Sk4f& sa, \
+ const Sk4f& d, const Sk4f& da); \
+ static void SK_VECTORCALL name(SkRasterPipeline::Stage* st, size_t x, size_t tail, \
+ Sk4f r, Sk4f g, Sk4f b, Sk4f a, \
+ Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) { \
+ r = name##_kernel(r,a,dr,da); \
+ g = name##_kernel(g,a,dg,da); \
+ b = name##_kernel(b,a,db,da); \
+ a = a + (da * (1.0f-a)); \
+ st->next(x,tail, r,g,b,a, dr,dg,db,da); \
+ } \
+ static SK_ALWAYS_INLINE Sk4f name##_kernel(const Sk4f& s, const Sk4f& sa, \
+ const Sk4f& d, const Sk4f& da)
+
+namespace SK_OPTS_NS {
+
+ // Clamp colors into [0,1] premul (e.g. just before storing back to memory).
+ static void clamp_01_premul(Sk4f& r, Sk4f& g, Sk4f& b, Sk4f& a) {
+ a = Sk4f::Max(a, 0.0f);
+ r = Sk4f::Max(r, 0.0f);
+ g = Sk4f::Max(g, 0.0f);
+ b = Sk4f::Max(b, 0.0f);
+
+ a = Sk4f::Min(a, 1.0f);
+ r = Sk4f::Min(r, a);
+ g = Sk4f::Min(g, a);
+ b = Sk4f::Min(b, a);
+ }
+
+ static Sk4f inv(const Sk4f& x) { return 1.0f - x; }
+
+ static Sk4f lerp(const Sk4f& from, const Sk4f& to, const Sk4f& cov) {
+ return from + (to-from)*cov;
+ }
+
+ template <typename T>
+ static SkNx<4,T> load_tail(size_t tail, const T* src) {
+ if (tail) {
+ return SkNx<4,T>(src[0], (tail>1 ? src[1] : 0), (tail>2 ? src[2] : 0), 0);
+ }
+ return SkNx<4,T>::Load(src);
+ }
+
+ template <typename T>
+ static void store_tail(size_t tail, const SkNx<4,T>& v, T* dst) {
+ switch(tail) {
+ case 0: return v.store(dst);
+ case 3: dst[2] = v[2];
+ case 2: dst[1] = v[1];
+ case 1: dst[0] = v[0];
+ }
+ }
+
+ static void from_565(const Sk4h& _565, Sk4f* r, Sk4f* g, Sk4f* b) {
+ Sk4i _32_bit = SkNx_cast<int>(_565);
+
+ *r = SkNx_cast<float>(_32_bit & SK_R16_MASK_IN_PLACE) * (1.0f / SK_R16_MASK_IN_PLACE);
+ *g = SkNx_cast<float>(_32_bit & SK_G16_MASK_IN_PLACE) * (1.0f / SK_G16_MASK_IN_PLACE);
+ *b = SkNx_cast<float>(_32_bit & SK_B16_MASK_IN_PLACE) * (1.0f / SK_B16_MASK_IN_PLACE);
+ }
+
+ static Sk4h to_565(const Sk4f& r, const Sk4f& g, const Sk4f& b) {
+ return SkNx_cast<uint16_t>( Sk4f_round(r * SK_R16_MASK) << SK_R16_SHIFT
+ | Sk4f_round(g * SK_G16_MASK) << SK_G16_SHIFT
+ | Sk4f_round(b * SK_B16_MASK) << SK_B16_SHIFT);
+ }
+
+
+ // The default shader produces a constant color (from the SkPaint).
+ KERNEL_Sk4f(constant_color) {
+ auto color = (const SkPM4f*)ctx;
+ r = color->r();
+ g = color->g();
+ b = color->b();
+ a = color->a();
+ }
+
+ // s' = d(1-c) + sc, for a constant c.
+ KERNEL_Sk4f(lerp_constant_float) {
+ Sk4f c = *(const float*)ctx;
+
+ r = lerp(dr, r, c);
+ g = lerp(dg, g, c);
+ b = lerp(db, b, c);
+ a = lerp(da, a, c);
+ }
+
+ // s' = sc for 8-bit c.
+ KERNEL_Sk4f(scale_u8) {
+ auto ptr = (const uint8_t*)ctx + x;
+
+ Sk4f c = SkNx_cast<float>(load_tail(tail, ptr)) * (1/255.0f);
+ r = r*c;
+ g = g*c;
+ b = b*c;
+ a = a*c;
+ }
+
+ // s' = d(1-c) + sc for 8-bit c.
+ KERNEL_Sk4f(lerp_u8) {
+ auto ptr = (const uint8_t*)ctx + x;
+
+ Sk4f c = SkNx_cast<float>(load_tail(tail, ptr)) * (1/255.0f);
+ r = lerp(dr, r, c);
+ g = lerp(dg, g, c);
+ b = lerp(db, b, c);
+ a = lerp(da, a, c);
+ }
+
+ // s' = d(1-c) + sc for 565 c.
+ KERNEL_Sk4f(lerp_565) {
+ auto ptr = (const uint16_t*)ctx + x;
+ Sk4f cr, cg, cb;
+ from_565(load_tail(tail, ptr), &cr, &cg, &cb);
+
+ r = lerp(dr, r, cr);
+ g = lerp(dg, g, cg);
+ b = lerp(db, b, cb);
+ a = 1.0f;
+ }
+
+ KERNEL_Sk4f(load_d_565) {
+ auto ptr = (const uint16_t*)ctx + x;
+ from_565(load_tail(tail, ptr), &dr,&dg,&db);
+ da = 1.0f;
+ }
+
+ KERNEL_Sk4f(load_s_565) {
+ auto ptr = (const uint16_t*)ctx + x;
+ from_565(load_tail(tail, ptr), &r,&g,&b);
+ a = 1.0f;
+ }
+
+ KERNEL_Sk4f(store_565) {
+ clamp_01_premul(r,g,b,a);
+ auto ptr = (uint16_t*)ctx + x;
+ store_tail(tail, to_565(r,g,b), ptr);
+ }
+
+ KERNEL_Sk4f(load_d_f16) {
+ auto ptr = (const uint64_t*)ctx + x;
+
+ if (tail) {
+ auto p0 = SkHalfToFloat_finite_ftz(ptr[0]) ,
+ p1 = tail>1 ? SkHalfToFloat_finite_ftz(ptr[1]) : Sk4f{0},
+ p2 = tail>2 ? SkHalfToFloat_finite_ftz(ptr[2]) : Sk4f{0};
+ dr = { p0[0],p1[0],p2[0],0 };
+ dg = { p0[1],p1[1],p2[1],0 };
+ db = { p0[2],p1[2],p2[2],0 };
+ da = { p0[3],p1[3],p2[3],0 };
+ return;
+ }
+
+ Sk4h rh, gh, bh, ah;
+ Sk4h_load4(ptr, &rh, &gh, &bh, &ah);
+ dr = SkHalfToFloat_finite_ftz(rh);
+ dg = SkHalfToFloat_finite_ftz(gh);
+ db = SkHalfToFloat_finite_ftz(bh);
+ da = SkHalfToFloat_finite_ftz(ah);
+ }
+
+ KERNEL_Sk4f(load_s_f16) {
+ auto ptr = (const uint64_t*)ctx + x;
+
+ if (tail) {
+ auto p0 = SkHalfToFloat_finite_ftz(ptr[0]) ,
+ p1 = tail>1 ? SkHalfToFloat_finite_ftz(ptr[1]) : Sk4f{0},
+ p2 = tail>2 ? SkHalfToFloat_finite_ftz(ptr[2]) : Sk4f{0};
+ r = { p0[0],p1[0],p2[0],0 };
+ g = { p0[1],p1[1],p2[1],0 };
+ b = { p0[2],p1[2],p2[2],0 };
+ a = { p0[3],p1[3],p2[3],0 };
+ return;
+ }
+
+ Sk4h rh, gh, bh, ah;
+ Sk4h_load4(ptr, &rh, &gh, &bh, &ah);
+ r = SkHalfToFloat_finite_ftz(rh);
+ g = SkHalfToFloat_finite_ftz(gh);
+ b = SkHalfToFloat_finite_ftz(bh);
+ a = SkHalfToFloat_finite_ftz(ah);
+ }
+
+ KERNEL_Sk4f(store_f16) {
+ clamp_01_premul(r,g,b,a);
+ auto ptr = (uint64_t*)ctx + x;
+
+ switch (tail) {
+ case 0: return Sk4h_store4(ptr, SkFloatToHalf_finite_ftz(r),
+ SkFloatToHalf_finite_ftz(g),
+ SkFloatToHalf_finite_ftz(b),
+ SkFloatToHalf_finite_ftz(a));
+
+ case 3: SkFloatToHalf_finite_ftz({r[2], g[2], b[2], a[2]}).store(ptr+2);
+ case 2: SkFloatToHalf_finite_ftz({r[1], g[1], b[1], a[1]}).store(ptr+1);
+ case 1: SkFloatToHalf_finite_ftz({r[0], g[0], b[0], a[0]}).store(ptr+0);
+ }
+ }
+
+
+ // Load 8-bit SkPMColor-order sRGB.
+ KERNEL_Sk4f(load_d_srgb) {
+ auto ptr = (const uint32_t*)ctx + x;
+
+ auto px = load_tail(tail, (const int*)ptr);
+ dr = sk_linear_from_srgb_math((px >> SK_R32_SHIFT) & 0xff);
+ dg = sk_linear_from_srgb_math((px >> SK_G32_SHIFT) & 0xff);
+ db = sk_linear_from_srgb_math((px >> SK_B32_SHIFT) & 0xff);
+ da = (1/255.0f)*SkNx_cast<float>((px >> SK_A32_SHIFT) & 0xff);
+ }
+
+ KERNEL_Sk4f(load_s_srgb) {
+ auto ptr = (const uint32_t*)ctx + x;
+
+ auto px = load_tail(tail, (const int*)ptr);
+ r = sk_linear_from_srgb_math((px >> SK_R32_SHIFT) & 0xff);
+ g = sk_linear_from_srgb_math((px >> SK_G32_SHIFT) & 0xff);
+ b = sk_linear_from_srgb_math((px >> SK_B32_SHIFT) & 0xff);
+ a = (1/255.0f)*SkNx_cast<float>((px >> SK_A32_SHIFT) & 0xff);
+ }
+
+ KERNEL_Sk4f(store_srgb) {
+ clamp_01_premul(r,g,b,a);
+ auto ptr = (uint32_t*)ctx + x;
+ store_tail(tail, ( sk_linear_to_srgb_noclamp(r) << SK_R32_SHIFT
+ | sk_linear_to_srgb_noclamp(g) << SK_G32_SHIFT
+ | sk_linear_to_srgb_noclamp(b) << SK_B32_SHIFT
+ | Sk4f_round(255.0f * a) << SK_A32_SHIFT), (int*)ptr);
+ }
+
+ RGBA_XFERMODE_Sk4f(clear) { return 0.0f; }
+ //RGBA_XFERMODE_Sk4f(src) { return s; } // This would be a no-op stage, so we just omit it.
+ RGBA_XFERMODE_Sk4f(dst) { return d; }
+
+ RGBA_XFERMODE_Sk4f(srcatop) { return s*da + d*inv(sa); }
+ RGBA_XFERMODE_Sk4f(srcin) { return s * da; }
+ RGBA_XFERMODE_Sk4f(srcout) { return s * inv(da); }
+ RGBA_XFERMODE_Sk4f(srcover) { return s + inv(sa)*d; }
+ RGBA_XFERMODE_Sk4f(dstatop) { return srcatop_kernel(d,da,s,sa); }
+ RGBA_XFERMODE_Sk4f(dstin) { return srcin_kernel (d,da,s,sa); }
+ RGBA_XFERMODE_Sk4f(dstout) { return srcout_kernel (d,da,s,sa); }
+ RGBA_XFERMODE_Sk4f(dstover) { return srcover_kernel(d,da,s,sa); }
+
+ RGBA_XFERMODE_Sk4f(modulate) { return s*d; }
+ RGBA_XFERMODE_Sk4f(multiply) { return s*inv(da) + d*inv(sa) + s*d; }
+ RGBA_XFERMODE_Sk4f(plus_) { return s + d; }
+ RGBA_XFERMODE_Sk4f(screen) { return s + d - s*d; }
+ RGBA_XFERMODE_Sk4f(xor_) { return s*inv(da) + d*inv(sa); }
+
+ RGB_XFERMODE_Sk4f(colorburn) {
+ return (d == da ).thenElse(d + s*inv(da),
+ (s == 0.0f).thenElse(s + d*inv(sa),
+ sa*(da - Sk4f::Min(da, (da-d)*sa/s)) + s*inv(da) + d*inv(sa)));
+ }
+ RGB_XFERMODE_Sk4f(colordodge) {
+ return (d == 0.0f).thenElse(d + s*inv(da),
+ (s == sa ).thenElse(s + d*inv(sa),
+ sa*Sk4f::Min(da, (d*sa)/(sa - s)) + s*inv(da) + d*inv(sa)));
+ }
+ RGB_XFERMODE_Sk4f(darken) { return s + d - Sk4f::Max(s*da, d*sa); }
+ RGB_XFERMODE_Sk4f(difference) { return s + d - 2.0f*Sk4f::Min(s*da,d*sa); }
+ RGB_XFERMODE_Sk4f(exclusion) { return s + d - 2.0f*s*d; }
+ RGB_XFERMODE_Sk4f(hardlight) {
+ return s*inv(da) + d*inv(sa)
+ + (2.0f*s <= sa).thenElse(2.0f*s*d, sa*da - 2.0f*(da-d)*(sa-s));
+ }
+ RGB_XFERMODE_Sk4f(lighten) { return s + d - Sk4f::Min(s*da, d*sa); }
+ RGB_XFERMODE_Sk4f(overlay) { return hardlight_kernel(d,da,s,sa); }
+ RGB_XFERMODE_Sk4f(softlight) {
+ Sk4f m = (da > 0.0f).thenElse(d / da, 0.0f),
+ s2 = 2.0f*s,
+ m4 = 4.0f*m;
+
+ // The logic forks three ways:
+ // 1. dark src?
+ // 2. light src, dark dst?
+ // 3. light src, light dst?
+ Sk4f darkSrc = d*(sa + (s2 - sa)*(1.0f - m)), // Used in case 1.
+ darkDst = (m4*m4 + m4)*(m - 1.0f) + 7.0f*m, // Used in case 2.
+ liteDst = m.rsqrt().invert() - m, // Used in case 3.
+ liteSrc = d*sa + da*(s2 - sa) * (4.0f*d <= da).thenElse(darkDst, liteDst); // 2 or 3?
+ return s*inv(da) + d*inv(sa) + (s2 <= sa).thenElse(darkSrc, liteSrc); // 1 or (2 or 3)?
+ }
+}
+
+#undef KERNEL_Sk4f
+#undef RGB_XFERMODE_Sk4f
+#undef RGB_XFERMODE_Sk4f
+
+#endif//SkRasterPipeline_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkSwizzler_opts.h b/gfx/skia/skia/src/opts/SkSwizzler_opts.h
new file mode 100644
index 000000000..a22e14502
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkSwizzler_opts.h
@@ -0,0 +1,846 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSwizzler_opts_DEFINED
+#define SkSwizzler_opts_DEFINED
+
+#include "SkColorPriv.h"
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ #include <immintrin.h>
+#elif defined(SK_ARM_HAS_NEON)
+ #include <arm_neon.h>
+#endif
+
+namespace SK_OPTS_NS {
+
+static void RGBA_to_rgbA_portable(uint32_t* dst, const void* vsrc, int count) {
+ auto src = (const uint32_t*)vsrc;
+ for (int i = 0; i < count; i++) {
+ uint8_t a = src[i] >> 24,
+ b = src[i] >> 16,
+ g = src[i] >> 8,
+ r = src[i] >> 0;
+ b = (b*a+127)/255;
+ g = (g*a+127)/255;
+ r = (r*a+127)/255;
+ dst[i] = (uint32_t)a << 24
+ | (uint32_t)b << 16
+ | (uint32_t)g << 8
+ | (uint32_t)r << 0;
+ }
+}
+
+static void RGBA_to_bgrA_portable(uint32_t* dst, const void* vsrc, int count) {
+ auto src = (const uint32_t*)vsrc;
+ for (int i = 0; i < count; i++) {
+ uint8_t a = src[i] >> 24,
+ b = src[i] >> 16,
+ g = src[i] >> 8,
+ r = src[i] >> 0;
+ b = (b*a+127)/255;
+ g = (g*a+127)/255;
+ r = (r*a+127)/255;
+ dst[i] = (uint32_t)a << 24
+ | (uint32_t)r << 16
+ | (uint32_t)g << 8
+ | (uint32_t)b << 0;
+ }
+}
+
+static void RGBA_to_BGRA_portable(uint32_t* dst, const void* vsrc, int count) {
+ auto src = (const uint32_t*)vsrc;
+ for (int i = 0; i < count; i++) {
+ uint8_t a = src[i] >> 24,
+ b = src[i] >> 16,
+ g = src[i] >> 8,
+ r = src[i] >> 0;
+ dst[i] = (uint32_t)a << 24
+ | (uint32_t)r << 16
+ | (uint32_t)g << 8
+ | (uint32_t)b << 0;
+ }
+}
+
+static void RGB_to_RGB1_portable(uint32_t dst[], const void* vsrc, int count) {
+ const uint8_t* src = (const uint8_t*)vsrc;
+ for (int i = 0; i < count; i++) {
+ uint8_t r = src[0],
+ g = src[1],
+ b = src[2];
+ src += 3;
+ dst[i] = (uint32_t)0xFF << 24
+ | (uint32_t)b << 16
+ | (uint32_t)g << 8
+ | (uint32_t)r << 0;
+ }
+}
+
+static void RGB_to_BGR1_portable(uint32_t dst[], const void* vsrc, int count) {
+ const uint8_t* src = (const uint8_t*)vsrc;
+ for (int i = 0; i < count; i++) {
+ uint8_t r = src[0],
+ g = src[1],
+ b = src[2];
+ src += 3;
+ dst[i] = (uint32_t)0xFF << 24
+ | (uint32_t)r << 16
+ | (uint32_t)g << 8
+ | (uint32_t)b << 0;
+ }
+}
+
+static void gray_to_RGB1_portable(uint32_t dst[], const void* vsrc, int count) {
+ const uint8_t* src = (const uint8_t*)vsrc;
+ for (int i = 0; i < count; i++) {
+ dst[i] = (uint32_t)0xFF << 24
+ | (uint32_t)src[i] << 16
+ | (uint32_t)src[i] << 8
+ | (uint32_t)src[i] << 0;
+ }
+}
+
+static void grayA_to_RGBA_portable(uint32_t dst[], const void* vsrc, int count) {
+ const uint8_t* src = (const uint8_t*)vsrc;
+ for (int i = 0; i < count; i++) {
+ uint8_t g = src[0],
+ a = src[1];
+ src += 2;
+ dst[i] = (uint32_t)a << 24
+ | (uint32_t)g << 16
+ | (uint32_t)g << 8
+ | (uint32_t)g << 0;
+ }
+}
+
+static void grayA_to_rgbA_portable(uint32_t dst[], const void* vsrc, int count) {
+ const uint8_t* src = (const uint8_t*)vsrc;
+ for (int i = 0; i < count; i++) {
+ uint8_t g = src[0],
+ a = src[1];
+ src += 2;
+ g = (g*a+127)/255;
+ dst[i] = (uint32_t)a << 24
+ | (uint32_t)g << 16
+ | (uint32_t)g << 8
+ | (uint32_t)g << 0;
+ }
+}
+
+static void inverted_CMYK_to_RGB1_portable(uint32_t* dst, const void* vsrc, int count) {
+ const uint32_t* src = (const uint32_t*)vsrc;
+ for (int i = 0; i < count; i++) {
+ uint8_t k = src[i] >> 24,
+ y = src[i] >> 16,
+ m = src[i] >> 8,
+ c = src[i] >> 0;
+ // See comments in SkSwizzler.cpp for details on the conversion formula.
+ uint8_t b = (y*k+127)/255,
+ g = (m*k+127)/255,
+ r = (c*k+127)/255;
+ dst[i] = (uint32_t)0xFF << 24
+ | (uint32_t) b << 16
+ | (uint32_t) g << 8
+ | (uint32_t) r << 0;
+ }
+}
+
+static void inverted_CMYK_to_BGR1_portable(uint32_t* dst, const void* vsrc, int count) {
+ const uint32_t* src = (const uint32_t*)vsrc;
+ for (int i = 0; i < count; i++) {
+ uint8_t k = src[i] >> 24,
+ y = src[i] >> 16,
+ m = src[i] >> 8,
+ c = src[i] >> 0;
+ uint8_t b = (y*k+127)/255,
+ g = (m*k+127)/255,
+ r = (c*k+127)/255;
+ dst[i] = (uint32_t)0xFF << 24
+ | (uint32_t) r << 16
+ | (uint32_t) g << 8
+ | (uint32_t) b << 0;
+ }
+}
+
+#if defined(SK_ARM_HAS_NEON)
+
+// Rounded divide by 255, (x + 127) / 255
+static uint8x8_t div255_round(uint16x8_t x) {
+ // result = (x + 127) / 255
+ // result = (x + 127) / 256 + error1
+ //
+ // error1 = (x + 127) / (255 * 256)
+ // error1 = (x + 127) / (256 * 256) + error2
+ //
+ // error2 = (x + 127) / (255 * 256 * 256)
+ //
+ // The maximum value of error2 is too small to matter. Thus:
+ // result = (x + 127) / 256 + (x + 127) / (256 * 256)
+ // result = ((x + 127) / 256 + x + 127) / 256
+ // result = ((x + 127) >> 8 + x + 127) >> 8
+ //
+ // Use >>> to represent "rounded right shift" which, conveniently,
+ // NEON supports in one instruction.
+ // result = ((x >>> 8) + x) >>> 8
+ //
+ // Note that the second right shift is actually performed as an
+ // "add, round, and narrow back to 8-bits" instruction.
+ return vraddhn_u16(x, vrshrq_n_u16(x, 8));
+}
+
+// Scale a byte by another, (x * y + 127) / 255
+static uint8x8_t scale(uint8x8_t x, uint8x8_t y) {
+ return div255_round(vmull_u8(x, y));
+}
+
+template <bool kSwapRB>
+static void premul_should_swapRB(uint32_t* dst, const void* vsrc, int count) {
+ auto src = (const uint32_t*)vsrc;
+ while (count >= 8) {
+ // Load 8 pixels.
+ uint8x8x4_t rgba = vld4_u8((const uint8_t*) src);
+
+ uint8x8_t a = rgba.val[3],
+ b = rgba.val[2],
+ g = rgba.val[1],
+ r = rgba.val[0];
+
+ // Premultiply.
+ b = scale(b, a);
+ g = scale(g, a);
+ r = scale(r, a);
+
+ // Store 8 premultiplied pixels.
+ if (kSwapRB) {
+ rgba.val[2] = r;
+ rgba.val[1] = g;
+ rgba.val[0] = b;
+ } else {
+ rgba.val[2] = b;
+ rgba.val[1] = g;
+ rgba.val[0] = r;
+ }
+ vst4_u8((uint8_t*) dst, rgba);
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+
+ // Call portable code to finish up the tail of [0,8) pixels.
+ auto proc = kSwapRB ? RGBA_to_bgrA_portable : RGBA_to_rgbA_portable;
+ proc(dst, src, count);
+}
+
+static void RGBA_to_rgbA(uint32_t* dst, const void* src, int count) {
+ premul_should_swapRB<false>(dst, src, count);
+}
+
+static void RGBA_to_bgrA(uint32_t* dst, const void* src, int count) {
+ premul_should_swapRB<true>(dst, src, count);
+}
+
+static void RGBA_to_BGRA(uint32_t* dst, const void* vsrc, int count) {
+ auto src = (const uint32_t*)vsrc;
+ while (count >= 16) {
+ // Load 16 pixels.
+ uint8x16x4_t rgba = vld4q_u8((const uint8_t*) src);
+
+ // Swap r and b.
+ SkTSwap(rgba.val[0], rgba.val[2]);
+
+ // Store 16 pixels.
+ vst4q_u8((uint8_t*) dst, rgba);
+ src += 16;
+ dst += 16;
+ count -= 16;
+ }
+
+ if (count >= 8) {
+ // Load 8 pixels.
+ uint8x8x4_t rgba = vld4_u8((const uint8_t*) src);
+
+ // Swap r and b.
+ SkTSwap(rgba.val[0], rgba.val[2]);
+
+ // Store 8 pixels.
+ vst4_u8((uint8_t*) dst, rgba);
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+
+ RGBA_to_BGRA_portable(dst, src, count);
+}
+
+template <bool kSwapRB>
+static void insert_alpha_should_swaprb(uint32_t dst[], const void* vsrc, int count) {
+ const uint8_t* src = (const uint8_t*) vsrc;
+ while (count >= 16) {
+ // Load 16 pixels.
+ uint8x16x3_t rgb = vld3q_u8(src);
+
+ // Insert an opaque alpha channel and swap if needed.
+ uint8x16x4_t rgba;
+ if (kSwapRB) {
+ rgba.val[0] = rgb.val[2];
+ rgba.val[2] = rgb.val[0];
+ } else {
+ rgba.val[0] = rgb.val[0];
+ rgba.val[2] = rgb.val[2];
+ }
+ rgba.val[1] = rgb.val[1];
+ rgba.val[3] = vdupq_n_u8(0xFF);
+
+ // Store 16 pixels.
+ vst4q_u8((uint8_t*) dst, rgba);
+ src += 16*3;
+ dst += 16;
+ count -= 16;
+ }
+
+ if (count >= 8) {
+ // Load 8 pixels.
+ uint8x8x3_t rgb = vld3_u8(src);
+
+ // Insert an opaque alpha channel and swap if needed.
+ uint8x8x4_t rgba;
+ if (kSwapRB) {
+ rgba.val[0] = rgb.val[2];
+ rgba.val[2] = rgb.val[0];
+ } else {
+ rgba.val[0] = rgb.val[0];
+ rgba.val[2] = rgb.val[2];
+ }
+ rgba.val[1] = rgb.val[1];
+ rgba.val[3] = vdup_n_u8(0xFF);
+
+ // Store 8 pixels.
+ vst4_u8((uint8_t*) dst, rgba);
+ src += 8*3;
+ dst += 8;
+ count -= 8;
+ }
+
+ // Call portable code to finish up the tail of [0,8) pixels.
+ auto proc = kSwapRB ? RGB_to_BGR1_portable : RGB_to_RGB1_portable;
+ proc(dst, src, count);
+}
+
+static void RGB_to_RGB1(uint32_t dst[], const void* src, int count) {
+ insert_alpha_should_swaprb<false>(dst, src, count);
+}
+
+static void RGB_to_BGR1(uint32_t dst[], const void* src, int count) {
+ insert_alpha_should_swaprb<true>(dst, src, count);
+}
+
+static void gray_to_RGB1(uint32_t dst[], const void* vsrc, int count) {
+ const uint8_t* src = (const uint8_t*) vsrc;
+ while (count >= 16) {
+ // Load 16 pixels.
+ uint8x16_t gray = vld1q_u8(src);
+
+ // Set each of the color channels.
+ uint8x16x4_t rgba;
+ rgba.val[0] = gray;
+ rgba.val[1] = gray;
+ rgba.val[2] = gray;
+ rgba.val[3] = vdupq_n_u8(0xFF);
+
+ // Store 16 pixels.
+ vst4q_u8((uint8_t*) dst, rgba);
+ src += 16;
+ dst += 16;
+ count -= 16;
+ }
+
+ if (count >= 8) {
+ // Load 8 pixels.
+ uint8x8_t gray = vld1_u8(src);
+
+ // Set each of the color channels.
+ uint8x8x4_t rgba;
+ rgba.val[0] = gray;
+ rgba.val[1] = gray;
+ rgba.val[2] = gray;
+ rgba.val[3] = vdup_n_u8(0xFF);
+
+ // Store 8 pixels.
+ vst4_u8((uint8_t*) dst, rgba);
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+
+ gray_to_RGB1_portable(dst, src, count);
+}
+
+template <bool kPremul>
+static void expand_grayA(uint32_t dst[], const void* vsrc, int count) {
+ const uint8_t* src = (const uint8_t*) vsrc;
+ while (count >= 16) {
+ // Load 16 pixels.
+ uint8x16x2_t ga = vld2q_u8(src);
+
+ // Premultiply if requested.
+ if (kPremul) {
+ ga.val[0] = vcombine_u8(
+ scale(vget_low_u8(ga.val[0]), vget_low_u8(ga.val[1])),
+ scale(vget_high_u8(ga.val[0]), vget_high_u8(ga.val[1])));
+ }
+
+ // Set each of the color channels.
+ uint8x16x4_t rgba;
+ rgba.val[0] = ga.val[0];
+ rgba.val[1] = ga.val[0];
+ rgba.val[2] = ga.val[0];
+ rgba.val[3] = ga.val[1];
+
+ // Store 16 pixels.
+ vst4q_u8((uint8_t*) dst, rgba);
+ src += 16*2;
+ dst += 16;
+ count -= 16;
+ }
+
+ if (count >= 8) {
+ // Load 8 pixels.
+ uint8x8x2_t ga = vld2_u8(src);
+
+ // Premultiply if requested.
+ if (kPremul) {
+ ga.val[0] = scale(ga.val[0], ga.val[1]);
+ }
+
+ // Set each of the color channels.
+ uint8x8x4_t rgba;
+ rgba.val[0] = ga.val[0];
+ rgba.val[1] = ga.val[0];
+ rgba.val[2] = ga.val[0];
+ rgba.val[3] = ga.val[1];
+
+ // Store 8 pixels.
+ vst4_u8((uint8_t*) dst, rgba);
+ src += 8*2;
+ dst += 8;
+ count -= 8;
+ }
+
+ auto proc = kPremul ? grayA_to_rgbA_portable : grayA_to_RGBA_portable;
+ proc(dst, src, count);
+}
+
+static void grayA_to_RGBA(uint32_t dst[], const void* src, int count) {
+ expand_grayA<false>(dst, src, count);
+}
+
+static void grayA_to_rgbA(uint32_t dst[], const void* src, int count) {
+ expand_grayA<true>(dst, src, count);
+}
+
+enum Format { kRGB1, kBGR1 };
+template <Format format>
+static void inverted_cmyk_to(uint32_t* dst, const void* vsrc, int count) {
+ auto src = (const uint32_t*)vsrc;
+ while (count >= 8) {
+ // Load 8 cmyk pixels.
+ uint8x8x4_t pixels = vld4_u8((const uint8_t*) src);
+
+ uint8x8_t k = pixels.val[3],
+ y = pixels.val[2],
+ m = pixels.val[1],
+ c = pixels.val[0];
+
+ // Scale to r, g, b.
+ uint8x8_t b = scale(y, k);
+ uint8x8_t g = scale(m, k);
+ uint8x8_t r = scale(c, k);
+
+ // Store 8 rgba pixels.
+ if (kBGR1 == format) {
+ pixels.val[3] = vdup_n_u8(0xFF);
+ pixels.val[2] = r;
+ pixels.val[1] = g;
+ pixels.val[0] = b;
+ } else {
+ pixels.val[3] = vdup_n_u8(0xFF);
+ pixels.val[2] = b;
+ pixels.val[1] = g;
+ pixels.val[0] = r;
+ }
+ vst4_u8((uint8_t*) dst, pixels);
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+
+ auto proc = (kBGR1 == format) ? inverted_CMYK_to_BGR1_portable : inverted_CMYK_to_RGB1_portable;
+ proc(dst, src, count);
+}
+
+static void inverted_CMYK_to_RGB1(uint32_t dst[], const void* src, int count) {
+ inverted_cmyk_to<kRGB1>(dst, src, count);
+}
+
+static void inverted_CMYK_to_BGR1(uint32_t dst[], const void* src, int count) {
+ inverted_cmyk_to<kBGR1>(dst, src, count);
+}
+
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+
+// Scale a byte by another.
+// Inputs are stored in 16-bit lanes, but are not larger than 8-bits.
+static __m128i scale(__m128i x, __m128i y) {
+ const __m128i _128 = _mm_set1_epi16(128);
+ const __m128i _257 = _mm_set1_epi16(257);
+
+ // (x+127)/255 == ((x+128)*257)>>16 for 0 <= x <= 255*255.
+ return _mm_mulhi_epu16(_mm_add_epi16(_mm_mullo_epi16(x, y), _128), _257);
+}
+
+template <bool kSwapRB>
+static void premul_should_swapRB(uint32_t* dst, const void* vsrc, int count) {
+ auto src = (const uint32_t*)vsrc;
+
+ auto premul8 = [](__m128i* lo, __m128i* hi) {
+ const __m128i zeros = _mm_setzero_si128();
+ __m128i planar;
+ if (kSwapRB) {
+ planar = _mm_setr_epi8(2,6,10,14, 1,5,9,13, 0,4,8,12, 3,7,11,15);
+ } else {
+ planar = _mm_setr_epi8(0,4,8,12, 1,5,9,13, 2,6,10,14, 3,7,11,15);
+ }
+
+ // Swizzle the pixels to 8-bit planar.
+ *lo = _mm_shuffle_epi8(*lo, planar); // rrrrgggg bbbbaaaa
+ *hi = _mm_shuffle_epi8(*hi, planar); // RRRRGGGG BBBBAAAA
+ __m128i rg = _mm_unpacklo_epi32(*lo, *hi), // rrrrRRRR ggggGGGG
+ ba = _mm_unpackhi_epi32(*lo, *hi); // bbbbBBBB aaaaAAAA
+
+ // Unpack to 16-bit planar.
+ __m128i r = _mm_unpacklo_epi8(rg, zeros), // r_r_r_r_ R_R_R_R_
+ g = _mm_unpackhi_epi8(rg, zeros), // g_g_g_g_ G_G_G_G_
+ b = _mm_unpacklo_epi8(ba, zeros), // b_b_b_b_ B_B_B_B_
+ a = _mm_unpackhi_epi8(ba, zeros); // a_a_a_a_ A_A_A_A_
+
+ // Premultiply!
+ r = scale(r, a);
+ g = scale(g, a);
+ b = scale(b, a);
+
+ // Repack into interlaced pixels.
+ rg = _mm_or_si128(r, _mm_slli_epi16(g, 8)); // rgrgrgrg RGRGRGRG
+ ba = _mm_or_si128(b, _mm_slli_epi16(a, 8)); // babababa BABABABA
+ *lo = _mm_unpacklo_epi16(rg, ba); // rgbargba rgbargba
+ *hi = _mm_unpackhi_epi16(rg, ba); // RGBARGBA RGBARGBA
+ };
+
+ while (count >= 8) {
+ __m128i lo = _mm_loadu_si128((const __m128i*) (src + 0)),
+ hi = _mm_loadu_si128((const __m128i*) (src + 4));
+
+ premul8(&lo, &hi);
+
+ _mm_storeu_si128((__m128i*) (dst + 0), lo);
+ _mm_storeu_si128((__m128i*) (dst + 4), hi);
+
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+
+ if (count >= 4) {
+ __m128i lo = _mm_loadu_si128((const __m128i*) src),
+ hi = _mm_setzero_si128();
+
+ premul8(&lo, &hi);
+
+ _mm_storeu_si128((__m128i*) dst, lo);
+
+ src += 4;
+ dst += 4;
+ count -= 4;
+ }
+
+ // Call portable code to finish up the tail of [0,4) pixels.
+ auto proc = kSwapRB ? RGBA_to_bgrA_portable : RGBA_to_rgbA_portable;
+ proc(dst, src, count);
+}
+
+static void RGBA_to_rgbA(uint32_t* dst, const void* src, int count) {
+ premul_should_swapRB<false>(dst, src, count);
+}
+
+static void RGBA_to_bgrA(uint32_t* dst, const void* src, int count) {
+ premul_should_swapRB<true>(dst, src, count);
+}
+
+static void RGBA_to_BGRA(uint32_t* dst, const void* vsrc, int count) {
+ auto src = (const uint32_t*)vsrc;
+ const __m128i swapRB = _mm_setr_epi8(2,1,0,3, 6,5,4,7, 10,9,8,11, 14,13,12,15);
+
+ while (count >= 4) {
+ __m128i rgba = _mm_loadu_si128((const __m128i*) src);
+ __m128i bgra = _mm_shuffle_epi8(rgba, swapRB);
+ _mm_storeu_si128((__m128i*) dst, bgra);
+
+ src += 4;
+ dst += 4;
+ count -= 4;
+ }
+
+ RGBA_to_BGRA_portable(dst, src, count);
+}
+
+template <bool kSwapRB>
+static void insert_alpha_should_swaprb(uint32_t dst[], const void* vsrc, int count) {
+ const uint8_t* src = (const uint8_t*) vsrc;
+
+ const __m128i alphaMask = _mm_set1_epi32(0xFF000000);
+ __m128i expand;
+ const uint8_t X = 0xFF; // Used a placeholder. The value of X is irrelevant.
+ if (kSwapRB) {
+ expand = _mm_setr_epi8(2,1,0,X, 5,4,3,X, 8,7,6,X, 11,10,9,X);
+ } else {
+ expand = _mm_setr_epi8(0,1,2,X, 3,4,5,X, 6,7,8,X, 9,10,11,X);
+ }
+
+ while (count >= 6) {
+ // Load a vector. While this actually contains 5 pixels plus an
+ // extra component, we will discard all but the first four pixels on
+ // this iteration.
+ __m128i rgb = _mm_loadu_si128((const __m128i*) src);
+
+ // Expand the first four pixels to RGBX and then mask to RGB(FF).
+ __m128i rgba = _mm_or_si128(_mm_shuffle_epi8(rgb, expand), alphaMask);
+
+ // Store 4 pixels.
+ _mm_storeu_si128((__m128i*) dst, rgba);
+
+ src += 4*3;
+ dst += 4;
+ count -= 4;
+ }
+
+ // Call portable code to finish up the tail of [0,4) pixels.
+ auto proc = kSwapRB ? RGB_to_BGR1_portable : RGB_to_RGB1_portable;
+ proc(dst, src, count);
+}
+
+static void RGB_to_RGB1(uint32_t dst[], const void* src, int count) {
+ insert_alpha_should_swaprb<false>(dst, src, count);
+}
+
+static void RGB_to_BGR1(uint32_t dst[], const void* src, int count) {
+ insert_alpha_should_swaprb<true>(dst, src, count);
+}
+
+static void gray_to_RGB1(uint32_t dst[], const void* vsrc, int count) {
+ const uint8_t* src = (const uint8_t*) vsrc;
+
+ const __m128i alphas = _mm_set1_epi8((uint8_t) 0xFF);
+ while (count >= 16) {
+ __m128i grays = _mm_loadu_si128((const __m128i*) src);
+
+ __m128i gg_lo = _mm_unpacklo_epi8(grays, grays);
+ __m128i gg_hi = _mm_unpackhi_epi8(grays, grays);
+ __m128i ga_lo = _mm_unpacklo_epi8(grays, alphas);
+ __m128i ga_hi = _mm_unpackhi_epi8(grays, alphas);
+
+ __m128i ggga0 = _mm_unpacklo_epi16(gg_lo, ga_lo);
+ __m128i ggga1 = _mm_unpackhi_epi16(gg_lo, ga_lo);
+ __m128i ggga2 = _mm_unpacklo_epi16(gg_hi, ga_hi);
+ __m128i ggga3 = _mm_unpackhi_epi16(gg_hi, ga_hi);
+
+ _mm_storeu_si128((__m128i*) (dst + 0), ggga0);
+ _mm_storeu_si128((__m128i*) (dst + 4), ggga1);
+ _mm_storeu_si128((__m128i*) (dst + 8), ggga2);
+ _mm_storeu_si128((__m128i*) (dst + 12), ggga3);
+
+ src += 16;
+ dst += 16;
+ count -= 16;
+ }
+
+ gray_to_RGB1_portable(dst, src, count);
+}
+
+static void grayA_to_RGBA(uint32_t dst[], const void* vsrc, int count) {
+ const uint8_t* src = (const uint8_t*) vsrc;
+ while (count >= 8) {
+ __m128i ga = _mm_loadu_si128((const __m128i*) src);
+
+ __m128i gg = _mm_or_si128(_mm_and_si128(ga, _mm_set1_epi16(0x00FF)),
+ _mm_slli_epi16(ga, 8));
+
+ __m128i ggga_lo = _mm_unpacklo_epi16(gg, ga);
+ __m128i ggga_hi = _mm_unpackhi_epi16(gg, ga);
+
+ _mm_storeu_si128((__m128i*) (dst + 0), ggga_lo);
+ _mm_storeu_si128((__m128i*) (dst + 4), ggga_hi);
+
+ src += 8*2;
+ dst += 8;
+ count -= 8;
+ }
+
+ grayA_to_RGBA_portable(dst, src, count);
+}
+
+static void grayA_to_rgbA(uint32_t dst[], const void* vsrc, int count) {
+ const uint8_t* src = (const uint8_t*) vsrc;
+ while (count >= 8) {
+ __m128i grayA = _mm_loadu_si128((const __m128i*) src);
+
+ __m128i g0 = _mm_and_si128(grayA, _mm_set1_epi16(0x00FF));
+ __m128i a0 = _mm_srli_epi16(grayA, 8);
+
+ // Premultiply
+ g0 = scale(g0, a0);
+
+ __m128i gg = _mm_or_si128(g0, _mm_slli_epi16(g0, 8));
+ __m128i ga = _mm_or_si128(g0, _mm_slli_epi16(a0, 8));
+
+
+ __m128i ggga_lo = _mm_unpacklo_epi16(gg, ga);
+ __m128i ggga_hi = _mm_unpackhi_epi16(gg, ga);
+
+ _mm_storeu_si128((__m128i*) (dst + 0), ggga_lo);
+ _mm_storeu_si128((__m128i*) (dst + 4), ggga_hi);
+
+ src += 8*2;
+ dst += 8;
+ count -= 8;
+ }
+
+ grayA_to_rgbA_portable(dst, src, count);
+}
+
+enum Format { kRGB1, kBGR1 };
+template <Format format>
+static void inverted_cmyk_to(uint32_t* dst, const void* vsrc, int count) {
+ auto src = (const uint32_t*)vsrc;
+
+ auto convert8 = [](__m128i* lo, __m128i* hi) {
+ const __m128i zeros = _mm_setzero_si128();
+ __m128i planar;
+ if (kBGR1 == format) {
+ planar = _mm_setr_epi8(2,6,10,14, 1,5,9,13, 0,4,8,12, 3,7,11,15);
+ } else {
+ planar = _mm_setr_epi8(0,4,8,12, 1,5,9,13, 2,6,10,14, 3,7,11,15);
+ }
+
+ // Swizzle the pixels to 8-bit planar.
+ *lo = _mm_shuffle_epi8(*lo, planar); // ccccmmmm yyyykkkk
+ *hi = _mm_shuffle_epi8(*hi, planar); // CCCCMMMM YYYYKKKK
+ __m128i cm = _mm_unpacklo_epi32(*lo, *hi), // ccccCCCC mmmmMMMM
+ yk = _mm_unpackhi_epi32(*lo, *hi); // yyyyYYYY kkkkKKKK
+
+ // Unpack to 16-bit planar.
+ __m128i c = _mm_unpacklo_epi8(cm, zeros), // c_c_c_c_ C_C_C_C_
+ m = _mm_unpackhi_epi8(cm, zeros), // m_m_m_m_ M_M_M_M_
+ y = _mm_unpacklo_epi8(yk, zeros), // y_y_y_y_ Y_Y_Y_Y_
+ k = _mm_unpackhi_epi8(yk, zeros); // k_k_k_k_ K_K_K_K_
+
+ // Scale to r, g, b.
+ __m128i r = scale(c, k),
+ g = scale(m, k),
+ b = scale(y, k);
+
+ // Repack into interlaced pixels.
+ __m128i rg = _mm_or_si128(r, _mm_slli_epi16(g, 8)), // rgrgrgrg RGRGRGRG
+ ba = _mm_or_si128(b, _mm_set1_epi16((uint16_t) 0xFF00)); // b1b1b1b1 B1B1B1B1
+ *lo = _mm_unpacklo_epi16(rg, ba); // rgbargba rgbargba
+ *hi = _mm_unpackhi_epi16(rg, ba); // RGB1RGB1 RGB1RGB1
+ };
+
+ while (count >= 8) {
+ __m128i lo = _mm_loadu_si128((const __m128i*) (src + 0)),
+ hi = _mm_loadu_si128((const __m128i*) (src + 4));
+
+ convert8(&lo, &hi);
+
+ _mm_storeu_si128((__m128i*) (dst + 0), lo);
+ _mm_storeu_si128((__m128i*) (dst + 4), hi);
+
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+
+ if (count >= 4) {
+ __m128i lo = _mm_loadu_si128((const __m128i*) src),
+ hi = _mm_setzero_si128();
+
+ convert8(&lo, &hi);
+
+ _mm_storeu_si128((__m128i*) dst, lo);
+
+ src += 4;
+ dst += 4;
+ count -= 4;
+ }
+
+ auto proc = (kBGR1 == format) ? inverted_CMYK_to_BGR1_portable : inverted_CMYK_to_RGB1_portable;
+ proc(dst, src, count);
+}
+
+static void inverted_CMYK_to_RGB1(uint32_t dst[], const void* src, int count) {
+ inverted_cmyk_to<kRGB1>(dst, src, count);
+}
+
+static void inverted_CMYK_to_BGR1(uint32_t dst[], const void* src, int count) {
+ inverted_cmyk_to<kBGR1>(dst, src, count);
+}
+
+#else
+
+static void RGBA_to_rgbA(uint32_t* dst, const void* src, int count) {
+ RGBA_to_rgbA_portable(dst, src, count);
+}
+
+static void RGBA_to_bgrA(uint32_t* dst, const void* src, int count) {
+ RGBA_to_bgrA_portable(dst, src, count);
+}
+
+static void RGBA_to_BGRA(uint32_t* dst, const void* src, int count) {
+ RGBA_to_BGRA_portable(dst, src, count);
+}
+
+static void RGB_to_RGB1(uint32_t dst[], const void* src, int count) {
+ RGB_to_RGB1_portable(dst, src, count);
+}
+
+static void RGB_to_BGR1(uint32_t dst[], const void* src, int count) {
+ RGB_to_BGR1_portable(dst, src, count);
+}
+
+static void gray_to_RGB1(uint32_t dst[], const void* src, int count) {
+ gray_to_RGB1_portable(dst, src, count);
+}
+
+static void grayA_to_RGBA(uint32_t dst[], const void* src, int count) {
+ grayA_to_RGBA_portable(dst, src, count);
+}
+
+static void grayA_to_rgbA(uint32_t dst[], const void* src, int count) {
+ grayA_to_rgbA_portable(dst, src, count);
+}
+
+static void inverted_CMYK_to_RGB1(uint32_t dst[], const void* src, int count) {
+ inverted_CMYK_to_RGB1_portable(dst, src, count);
+}
+
+static void inverted_CMYK_to_BGR1(uint32_t dst[], const void* src, int count) {
+ inverted_CMYK_to_BGR1_portable(dst, src, count);
+}
+
+#endif
+
+}
+
+#endif // SkSwizzler_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkTextureCompressor_opts.h b/gfx/skia/skia/src/opts/SkTextureCompressor_opts.h
new file mode 100644
index 000000000..06ced38bd
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkTextureCompressor_opts.h
@@ -0,0 +1,266 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTextureCompressor_opts_DEFINED
+#define SkTextureCompressor_opts_DEFINED
+
+#include "SkOpts.h"
+
+namespace SK_OPTS_NS {
+
+#if defined(SK_ARM_HAS_NEON)
+ // Converts indices in each of the four bits of the register from
+ // 0, 1, 2, 3, 4, 5, 6, 7
+ // to
+ // 3, 2, 1, 0, 4, 5, 6, 7
+ //
+ // A more detailed explanation can be found in SkTextureCompressor::convert_indices
+ static inline uint8x16_t convert_indices(const uint8x16_t &x) {
+ static const int8x16_t kThree = {
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ };
+
+ static const int8x16_t kZero = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ };
+
+ // Take top three bits
+ int8x16_t sx = vreinterpretq_s8_u8(x);
+
+ // Negate ...
+ sx = vnegq_s8(sx);
+
+ // Add three...
+ sx = vaddq_s8(sx, kThree);
+
+ // Generate negatives mask
+ const int8x16_t mask = vreinterpretq_s8_u8(vcltq_s8(sx, kZero));
+
+ // Absolute value
+ sx = vabsq_s8(sx);
+
+ // Add three to the values that were negative...
+ return vreinterpretq_u8_s8(vaddq_s8(sx, vandq_s8(mask, kThree)));
+ }
+
+ template<unsigned shift>
+ static inline uint64x2_t shift_swap(const uint64x2_t &x, const uint64x2_t &mask) {
+ uint64x2_t t = vandq_u64(mask, veorq_u64(x, vshrq_n_u64(x, shift)));
+ return veorq_u64(x, veorq_u64(t, vshlq_n_u64(t, shift)));
+ }
+
+ static inline uint64x2_t pack_indices(const uint64x2_t &x) {
+ // x: 00 a e 00 b f 00 c g 00 d h 00 i m 00 j n 00 k o 00 l p
+
+ static const uint64x2_t kMask1 = { 0x3FC0003FC00000ULL, 0x3FC0003FC00000ULL };
+ uint64x2_t ret = shift_swap<10>(x, kMask1);
+
+ // x: b f 00 00 00 a e c g i m 00 00 00 d h j n 00 k o 00 l p
+ static const uint64x2_t kMask2 = { (0x3FULL << 52), (0x3FULL << 52) };
+ static const uint64x2_t kMask3 = { (0x3FULL << 28), (0x3FULL << 28) };
+ const uint64x2_t x1 = vandq_u64(vshlq_n_u64(ret, 52), kMask2);
+ const uint64x2_t x2 = vandq_u64(vshlq_n_u64(ret, 20), kMask3);
+ ret = vshrq_n_u64(vorrq_u64(ret, vorrq_u64(x1, x2)), 16);
+
+ // x: 00 00 00 00 00 00 00 00 b f l p a e c g i m k o d h j n
+
+ static const uint64x2_t kMask4 = { 0xFC0000ULL, 0xFC0000ULL };
+ ret = shift_swap<6>(ret, kMask4);
+
+ #if defined (SK_CPU_BENDIAN)
+ // x: 00 00 00 00 00 00 00 00 b f l p a e i m c g k o d h j n
+
+ static const uint64x2_t kMask5 = { 0x3FULL, 0x3FULL };
+ ret = shift_swap<36>(ret, kMask5);
+
+ // x: 00 00 00 00 00 00 00 00 b f j n a e i m c g k o d h l p
+
+ static const uint64x2_t kMask6 = { 0xFFF000000ULL, 0xFFF000000ULL };
+ ret = shift_swap<12>(ret, kMask6);
+ #else
+ // x: 00 00 00 00 00 00 00 00 c g i m d h l p b f j n a e k o
+
+ static const uint64x2_t kMask5 = { 0xFC0ULL, 0xFC0ULL };
+ ret = shift_swap<36>(ret, kMask5);
+
+ // x: 00 00 00 00 00 00 00 00 a e i m d h l p b f j n c g k o
+
+ static const uint64x2_t kMask6 = { (0xFFFULL << 36), (0xFFFULL << 36) };
+ static const uint64x2_t kMask7 = { 0xFFFFFFULL, 0xFFFFFFULL };
+ static const uint64x2_t kMask8 = { 0xFFFULL, 0xFFFULL };
+ const uint64x2_t y1 = vandq_u64(ret, kMask6);
+ const uint64x2_t y2 = vshlq_n_u64(vandq_u64(ret, kMask7), 12);
+ const uint64x2_t y3 = vandq_u64(vshrq_n_u64(ret, 24), kMask8);
+ ret = vorrq_u64(y1, vorrq_u64(y2, y3));
+ #endif
+
+ // x: 00 00 00 00 00 00 00 00 a e i m b f j n c g k o d h l p
+
+ // Set the header
+ static const uint64x2_t kHeader = { 0x8490000000000000ULL, 0x8490000000000000ULL };
+ return vorrq_u64(kHeader, ret);
+ }
+
+ // Takes a row of alpha values and places the most significant three bits of each byte into
+ // the least significant bits of the same byte
+ static inline uint8x16_t make_index_row(const uint8x16_t &x) {
+ static const uint8x16_t kTopThreeMask = {
+ 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0,
+ 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0,
+ };
+ return vshrq_n_u8(vandq_u8(x, kTopThreeMask), 5);
+ }
+
+ // Returns true if all of the bits in x are 0.
+ static inline bool is_zero(uint8x16_t x) {
+ // First experiments say that this is way slower than just examining the lanes
+ // but it might need a little more investigation.
+ #if 0
+ // This code path tests the system register for overflow. We trigger
+ // overflow by adding x to a register with all of its bits set. The
+ // first instruction sets the bits.
+ int reg;
+ asm ("VTST.8 %%q0, %q1, %q1\n"
+ "VQADD.u8 %q1, %%q0\n"
+ "VMRS %0, FPSCR\n"
+ : "=r"(reg) : "w"(vreinterpretq_f32_u8(x)) : "q0", "q1");
+
+ // Bit 21 corresponds to the overflow flag.
+ return reg & (0x1 << 21);
+ #else
+ const uint64x2_t cvt = vreinterpretq_u64_u8(x);
+ const uint64_t l1 = vgetq_lane_u64(cvt, 0);
+ return (l1 == 0) && (l1 == vgetq_lane_u64(cvt, 1));
+ #endif
+ }
+
+ #if defined (SK_CPU_BENDIAN)
+ static inline uint64x2_t fix_endianness(uint64x2_t x) {
+ return x;
+ }
+ #else
+ static inline uint64x2_t fix_endianness(uint64x2_t x) {
+ return vreinterpretq_u64_u8(vrev64q_u8(vreinterpretq_u8_u64(x)));
+ }
+ #endif
+
+ static void compress_r11eac_blocks(uint8_t* dst, const uint8_t* src, size_t rowBytes) {
+ // Try to avoid switching between vector and non-vector ops...
+ const uint8_t *const src1 = src;
+ const uint8_t *const src2 = src + rowBytes;
+ const uint8_t *const src3 = src + 2*rowBytes;
+ const uint8_t *const src4 = src + 3*rowBytes;
+ uint8_t *const dst1 = dst;
+ uint8_t *const dst2 = dst + 16;
+
+ const uint8x16_t alphaRow1 = vld1q_u8(src1);
+ const uint8x16_t alphaRow2 = vld1q_u8(src2);
+ const uint8x16_t alphaRow3 = vld1q_u8(src3);
+ const uint8x16_t alphaRow4 = vld1q_u8(src4);
+
+ const uint8x16_t cmp12 = vceqq_u8(alphaRow1, alphaRow2);
+ const uint8x16_t cmp34 = vceqq_u8(alphaRow3, alphaRow4);
+ const uint8x16_t cmp13 = vceqq_u8(alphaRow1, alphaRow3);
+
+ const uint8x16_t cmp = vandq_u8(vandq_u8(cmp12, cmp34), cmp13);
+ const uint8x16_t ncmp = vmvnq_u8(cmp);
+ const uint8x16_t nAlphaRow1 = vmvnq_u8(alphaRow1);
+ if (is_zero(ncmp)) {
+ if (is_zero(alphaRow1)) {
+ static const uint64x2_t kTransparent = { 0x0020000000002000ULL,
+ 0x0020000000002000ULL };
+ vst1q_u8(dst1, vreinterpretq_u8_u64(kTransparent));
+ vst1q_u8(dst2, vreinterpretq_u8_u64(kTransparent));
+ return;
+ } else if (is_zero(nAlphaRow1)) {
+ vst1q_u8(dst1, cmp);
+ vst1q_u8(dst2, cmp);
+ return;
+ }
+ }
+
+ const uint8x16_t indexRow1 = convert_indices(make_index_row(alphaRow1));
+ const uint8x16_t indexRow2 = convert_indices(make_index_row(alphaRow2));
+ const uint8x16_t indexRow3 = convert_indices(make_index_row(alphaRow3));
+ const uint8x16_t indexRow4 = convert_indices(make_index_row(alphaRow4));
+
+ const uint64x2_t indexRow12 = vreinterpretq_u64_u8(
+ vorrq_u8(vshlq_n_u8(indexRow1, 3), indexRow2));
+ const uint64x2_t indexRow34 = vreinterpretq_u64_u8(
+ vorrq_u8(vshlq_n_u8(indexRow3, 3), indexRow4));
+
+ const uint32x4x2_t blockIndices = vtrnq_u32(vreinterpretq_u32_u64(indexRow12),
+ vreinterpretq_u32_u64(indexRow34));
+ const uint64x2_t blockIndicesLeft = vreinterpretq_u64_u32(vrev64q_u32(blockIndices.val[0]));
+ const uint64x2_t blockIndicesRight = vreinterpretq_u64_u32(vrev64q_u32(blockIndices.val[1]));
+
+ const uint64x2_t indicesLeft = fix_endianness(pack_indices(blockIndicesLeft));
+ const uint64x2_t indicesRight = fix_endianness(pack_indices(blockIndicesRight));
+
+ const uint64x2_t d1 = vcombine_u64(vget_low_u64(indicesLeft), vget_low_u64(indicesRight));
+ const uint64x2_t d2 = vcombine_u64(vget_high_u64(indicesLeft), vget_high_u64(indicesRight));
+ vst1q_u8(dst1, vreinterpretq_u8_u64(d1));
+ vst1q_u8(dst2, vreinterpretq_u8_u64(d2));
+ }
+
+ static bool compress_a8_r11eac(uint8_t* dst, const uint8_t* src,
+ int width, int height, size_t rowBytes) {
+
+ // Since we're going to operate on 4 blocks at a time, the src width
+ // must be a multiple of 16. However, the height only needs to be a
+ // multiple of 4
+ if (0 == width || 0 == height || (width % 16) != 0 || (height % 4) != 0) {
+ return false;
+ }
+
+ const int blocksX = width >> 2;
+ const int blocksY = height >> 2;
+
+ SkASSERT((blocksX % 4) == 0);
+
+ for (int y = 0; y < blocksY; ++y) {
+ for (int x = 0; x < blocksX; x+=4) {
+ // Compress it
+ compress_r11eac_blocks(dst, src + 4*x, rowBytes);
+ dst += 32;
+ }
+ src += 4 * rowBytes;
+ }
+ return true;
+ }
+
+ static SkOpts::TextureCompressor texture_compressor(SkColorType ct,
+ SkTextureCompressor::Format fmt) {
+ if (ct == kAlpha_8_SkColorType && fmt == SkTextureCompressor::kR11_EAC_Format) {
+ return compress_a8_r11eac;
+ }
+ return nullptr;
+ }
+ static bool fill_block_dimensions(SkTextureCompressor::Format fmt, int* x, int* y) {
+ if (fmt == SkTextureCompressor::kR11_EAC_Format) {
+ *x = 16;
+ *y = 4;
+ return true;
+ }
+ return false;
+ }
+
+#else
+ static SkOpts::TextureCompressor texture_compressor(SkColorType, SkTextureCompressor::Format) {
+ return nullptr;
+ }
+ static bool fill_block_dimensions(SkTextureCompressor::Format, int*, int*) {
+ return false;
+ }
+
+#endif
+
+} // namespace SK_OPTS_NS
+
+#endif//SkTextureCompressor_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkXfermode_opts.h b/gfx/skia/skia/src/opts/SkXfermode_opts.h
new file mode 100644
index 000000000..54f906e4f
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkXfermode_opts.h
@@ -0,0 +1,360 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Sk4pxXfermode_DEFINED
+#define Sk4pxXfermode_DEFINED
+
+#include "Sk4px.h"
+#include "SkMSAN.h"
+#include "SkNx.h"
+#include "SkXfermode_proccoeff.h"
+
+namespace {
+
+// Most xfermodes can be done most efficiently 4 pixels at a time in 8 or 16-bit fixed point.
+#define XFERMODE(Xfermode) \
+ struct Xfermode { Sk4px operator()(const Sk4px&, const Sk4px&) const; }; \
+ inline Sk4px Xfermode::operator()(const Sk4px& d, const Sk4px& s) const
+
+XFERMODE(Clear) { return Sk4px::DupPMColor(0); }
+XFERMODE(Src) { return s; }
+XFERMODE(Dst) { return d; }
+XFERMODE(SrcIn) { return s.approxMulDiv255(d.alphas() ); }
+XFERMODE(SrcOut) { return s.approxMulDiv255(d.alphas().inv()); }
+XFERMODE(SrcOver) { return s + d.approxMulDiv255(s.alphas().inv()); }
+XFERMODE(DstIn) { return SrcIn ()(s,d); }
+XFERMODE(DstOut) { return SrcOut ()(s,d); }
+XFERMODE(DstOver) { return SrcOver()(s,d); }
+
+// [ S * Da + (1 - Sa) * D]
+XFERMODE(SrcATop) { return (s * d.alphas() + d * s.alphas().inv()).div255(); }
+XFERMODE(DstATop) { return SrcATop()(s,d); }
+//[ S * (1 - Da) + (1 - Sa) * D ]
+XFERMODE(Xor) { return (s * d.alphas().inv() + d * s.alphas().inv()).div255(); }
+// [S + D ]
+XFERMODE(Plus) { return s.saturatedAdd(d); }
+// [S * D ]
+XFERMODE(Modulate) { return s.approxMulDiv255(d); }
+// [S + D - S * D]
+XFERMODE(Screen) {
+ // Doing the math as S + (1-S)*D or S + (D - S*D) means the add and subtract can be done
+ // in 8-bit space without overflow. S + (1-S)*D is a touch faster because inv() is cheap.
+ return s + d.approxMulDiv255(s.inv());
+}
+XFERMODE(Multiply) { return (s * d.alphas().inv() + d * s.alphas().inv() + s*d).div255(); }
+// [ Sa + Da - Sa*Da, Sc + Dc - 2*min(Sc*Da, Dc*Sa) ] (And notice Sa*Da == min(Sa*Da, Da*Sa).)
+XFERMODE(Difference) {
+ auto m = Sk4px::Wide::Min(s * d.alphas(), d * s.alphas()).div255();
+ // There's no chance of underflow, and if we subtract m before adding s+d, no overflow.
+ return (s - m) + (d - m.zeroAlphas());
+}
+// [ Sa + Da - Sa*Da, Sc + Dc - 2*Sc*Dc ]
+XFERMODE(Exclusion) {
+ auto p = s.approxMulDiv255(d);
+ // There's no chance of underflow, and if we subtract p before adding src+dst, no overflow.
+ return (s - p) + (d - p.zeroAlphas());
+}
+
+// We take care to use exact math for these next few modes where alphas
+// and colors are calculated using significantly different math. We need
+// to preserve premul invariants, and exact math makes this easier.
+//
+// TODO: Some of these implementations might be able to be sped up a bit
+// while maintaining exact math, but let's follow up with that.
+
+XFERMODE(HardLight) {
+ auto sa = s.alphas(),
+ da = d.alphas();
+
+ auto srcover = s + (d * sa.inv()).div255();
+
+ auto isLite = ((sa-s) < s).widenLoHi();
+
+ auto lite = sa*da - ((da-d)*(sa-s) << 1),
+ dark = s*d << 1,
+ both = s*da.inv() + d*sa.inv();
+
+ auto alphas = srcover;
+ auto colors = (both + isLite.thenElse(lite, dark)).div255();
+ return alphas.zeroColors() + colors.zeroAlphas();
+}
+XFERMODE(Overlay) { return HardLight()(s,d); }
+
+XFERMODE(Darken) {
+ auto sa = s.alphas(),
+ da = d.alphas();
+
+ auto sda = (s*da).div255(),
+ dsa = (d*sa).div255();
+
+ auto srcover = s + (d * sa.inv()).div255(),
+ dstover = d + (s * da.inv()).div255();
+ auto alphas = srcover,
+ colors = (sda < dsa).thenElse(srcover, dstover);
+ return alphas.zeroColors() + colors.zeroAlphas();
+}
+XFERMODE(Lighten) {
+ auto sa = s.alphas(),
+ da = d.alphas();
+
+ auto sda = (s*da).div255(),
+ dsa = (d*sa).div255();
+
+ auto srcover = s + (d * sa.inv()).div255(),
+ dstover = d + (s * da.inv()).div255();
+ auto alphas = srcover,
+ colors = (dsa < sda).thenElse(srcover, dstover);
+ return alphas.zeroColors() + colors.zeroAlphas();
+}
+#undef XFERMODE
+
+// Some xfermodes use math like divide or sqrt that's best done in floats 1 pixel at a time.
+#define XFERMODE(Xfermode) \
+ struct Xfermode { Sk4f operator()(const Sk4f&, const Sk4f&) const; }; \
+ inline Sk4f Xfermode::operator()(const Sk4f& d, const Sk4f& s) const
+
+static inline Sk4f a_rgb(const Sk4f& a, const Sk4f& rgb) {
+ static_assert(SK_A32_SHIFT == 24, "");
+ return a * Sk4f(0,0,0,1) + rgb * Sk4f(1,1,1,0);
+}
+static inline Sk4f alphas(const Sk4f& f) {
+ return f[SK_A32_SHIFT/8];
+}
+
+XFERMODE(ColorDodge) {
+ auto sa = alphas(s),
+ da = alphas(d),
+ isa = Sk4f(1)-sa,
+ ida = Sk4f(1)-da;
+
+ auto srcover = s + d*isa,
+ dstover = d + s*ida,
+ otherwise = sa * Sk4f::Min(da, (d*sa)*(sa-s).invert()) + s*ida + d*isa;
+
+ // Order matters here, preferring d==0 over s==sa.
+ auto colors = (d == Sk4f(0)).thenElse(dstover,
+ (s == sa).thenElse(srcover,
+ otherwise));
+ return a_rgb(srcover, colors);
+}
+XFERMODE(ColorBurn) {
+ auto sa = alphas(s),
+ da = alphas(d),
+ isa = Sk4f(1)-sa,
+ ida = Sk4f(1)-da;
+
+ auto srcover = s + d*isa,
+ dstover = d + s*ida,
+ otherwise = sa*(da-Sk4f::Min(da, (da-d)*sa*s.invert())) + s*ida + d*isa;
+
+ // Order matters here, preferring d==da over s==0.
+ auto colors = (d == da).thenElse(dstover,
+ (s == Sk4f(0)).thenElse(srcover,
+ otherwise));
+ return a_rgb(srcover, colors);
+}
+XFERMODE(SoftLight) {
+ auto sa = alphas(s),
+ da = alphas(d),
+ isa = Sk4f(1)-sa,
+ ida = Sk4f(1)-da;
+
+ // Some common terms.
+ auto m = (da > Sk4f(0)).thenElse(d / da, Sk4f(0)),
+ s2 = Sk4f(2)*s,
+ m4 = Sk4f(4)*m;
+
+ // The logic forks three ways:
+ // 1. dark src?
+ // 2. light src, dark dst?
+ // 3. light src, light dst?
+ auto darkSrc = d*(sa + (s2 - sa)*(Sk4f(1) - m)), // Used in case 1.
+ darkDst = (m4*m4 + m4)*(m - Sk4f(1)) + Sk4f(7)*m, // Used in case 2.
+ liteDst = m.sqrt() - m, // Used in case 3.
+ liteSrc = d*sa + da*(s2-sa)*(Sk4f(4)*d <= da).thenElse(darkDst, liteDst); // Case 2 or 3?
+
+ auto alpha = s + d*isa;
+ auto colors = s*ida + d*isa + (s2 <= sa).thenElse(darkSrc, liteSrc); // Case 1 or 2/3?
+
+ return a_rgb(alpha, colors);
+}
+#undef XFERMODE
+
+// A reasonable fallback mode for doing AA is to simply apply the transfermode first,
+// then linearly interpolate the AA.
+template <typename Xfermode>
+static Sk4px xfer_aa(const Sk4px& d, const Sk4px& s, const Sk4px& aa) {
+ Sk4px bw = Xfermode()(d, s);
+ return (bw * aa + d * aa.inv()).div255();
+}
+
+// For some transfermodes we specialize AA, either for correctness or performance.
+#define XFERMODE_AA(Xfermode) \
+ template <> Sk4px xfer_aa<Xfermode>(const Sk4px& d, const Sk4px& s, const Sk4px& aa)
+
+// Plus' clamp needs to happen after AA. skia:3852
+XFERMODE_AA(Plus) { // [ clamp( (1-AA)D + (AA)(S+D) ) == clamp(D + AA*S) ]
+ return d.saturatedAdd(s.approxMulDiv255(aa));
+}
+
+#undef XFERMODE_AA
+
+// Src and Clear modes are safe to use with unitialized dst buffers,
+// even if the implementation branches based on bytes from dst (e.g. asserts in Debug mode).
+// For those modes, just lie to MSAN that dst is always intialized.
+template <typename Xfermode> static void mark_dst_initialized_if_safe(void*, void*) {}
+template <> void mark_dst_initialized_if_safe<Src>(void* dst, void* end) {
+ sk_msan_mark_initialized(dst, end, "Src doesn't read dst.");
+}
+template <> void mark_dst_initialized_if_safe<Clear>(void* dst, void* end) {
+ sk_msan_mark_initialized(dst, end, "Clear doesn't read dst.");
+}
+
+template <typename Xfermode>
+class Sk4pxXfermode : public SkProcCoeffXfermode {
+public:
+ Sk4pxXfermode(const ProcCoeff& rec, SkXfermode::Mode mode)
+ : INHERITED(rec, mode) {}
+
+ void xfer32(SkPMColor dst[], const SkPMColor src[], int n, const SkAlpha aa[]) const override {
+ mark_dst_initialized_if_safe<Xfermode>(dst, dst+n);
+ if (nullptr == aa) {
+ Sk4px::MapDstSrc(n, dst, src, Xfermode());
+ } else {
+ Sk4px::MapDstSrcAlpha(n, dst, src, aa, xfer_aa<Xfermode>);
+ }
+ }
+
+ void xfer16(uint16_t dst[], const SkPMColor src[], int n, const SkAlpha aa[]) const override {
+ mark_dst_initialized_if_safe<Xfermode>(dst, dst+n);
+ SkPMColor dst32[4];
+ while (n >= 4) {
+ dst32[0] = SkPixel16ToPixel32(dst[0]);
+ dst32[1] = SkPixel16ToPixel32(dst[1]);
+ dst32[2] = SkPixel16ToPixel32(dst[2]);
+ dst32[3] = SkPixel16ToPixel32(dst[3]);
+
+ this->xfer32(dst32, src, 4, aa);
+
+ dst[0] = SkPixel32ToPixel16(dst32[0]);
+ dst[1] = SkPixel32ToPixel16(dst32[1]);
+ dst[2] = SkPixel32ToPixel16(dst32[2]);
+ dst[3] = SkPixel32ToPixel16(dst32[3]);
+
+ dst += 4;
+ src += 4;
+ aa += aa ? 4 : 0;
+ n -= 4;
+ }
+ while (n) {
+ SkPMColor dst32 = SkPixel16ToPixel32(*dst);
+ this->xfer32(&dst32, src, 1, aa);
+ *dst = SkPixel32ToPixel16(dst32);
+
+ dst += 1;
+ src += 1;
+ aa += aa ? 1 : 0;
+ n -= 1;
+ }
+ }
+
+private:
+ typedef SkProcCoeffXfermode INHERITED;
+};
+
+template <typename Xfermode>
+class Sk4fXfermode : public SkProcCoeffXfermode {
+public:
+ Sk4fXfermode(const ProcCoeff& rec, SkXfermode::Mode mode)
+ : INHERITED(rec, mode) {}
+
+ void xfer32(SkPMColor dst[], const SkPMColor src[], int n, const SkAlpha aa[]) const override {
+ for (int i = 0; i < n; i++) {
+ dst[i] = Xfer32_1(dst[i], src[i], aa ? aa+i : nullptr);
+ }
+ }
+
+ void xfer16(uint16_t dst[], const SkPMColor src[], int n, const SkAlpha aa[]) const override {
+ for (int i = 0; i < n; i++) {
+ SkPMColor dst32 = SkPixel16ToPixel32(dst[i]);
+ dst32 = Xfer32_1(dst32, src[i], aa ? aa+i : nullptr);
+ dst[i] = SkPixel32ToPixel16(dst32);
+ }
+ }
+
+private:
+ static SkPMColor Xfer32_1(SkPMColor dst, const SkPMColor src, const SkAlpha* aa) {
+ Sk4f d = Load(dst),
+ s = Load(src),
+ b = Xfermode()(d, s);
+ if (aa) {
+ Sk4f a = Sk4f(*aa) * Sk4f(1.0f/255);
+ b = b*a + d*(Sk4f(1)-a);
+ }
+ return Round(b);
+ }
+
+ static Sk4f Load(SkPMColor c) {
+ return SkNx_cast<float>(Sk4b::Load(&c)) * Sk4f(1.0f/255);
+ }
+
+ static SkPMColor Round(const Sk4f& f) {
+ SkPMColor c;
+ SkNx_cast<uint8_t>(f * Sk4f(255) + Sk4f(0.5f)).store(&c);
+ return c;
+ }
+
+ typedef SkProcCoeffXfermode INHERITED;
+};
+
+} // namespace
+
+namespace SK_OPTS_NS {
+
+static SkXfermode* create_xfermode(const ProcCoeff& rec, SkXfermode::Mode mode) {
+ switch (mode) {
+#define CASE(Xfermode) \
+ case SkXfermode::k##Xfermode##_Mode: return new Sk4pxXfermode<Xfermode>(rec, mode)
+ CASE(Clear);
+ CASE(Src);
+ CASE(Dst);
+ CASE(SrcOver);
+ CASE(DstOver);
+ CASE(SrcIn);
+ CASE(DstIn);
+ CASE(SrcOut);
+ CASE(DstOut);
+ CASE(SrcATop);
+ CASE(DstATop);
+ CASE(Xor);
+ CASE(Plus);
+ CASE(Modulate);
+ CASE(Screen);
+ CASE(Multiply);
+ CASE(Difference);
+ CASE(Exclusion);
+ CASE(HardLight);
+ CASE(Overlay);
+ CASE(Darken);
+ CASE(Lighten);
+ #undef CASE
+
+#define CASE(Xfermode) \
+ case SkXfermode::k##Xfermode##_Mode: return new Sk4fXfermode<Xfermode>(rec, mode)
+ CASE(ColorDodge);
+ CASE(ColorBurn);
+ CASE(SoftLight);
+ #undef CASE
+
+ default: break;
+ }
+ return nullptr;
+}
+
+} // namespace SK_OPTS_NS
+
+#endif//Sk4pxXfermode_DEFINED
diff --git a/gfx/skia/skia/src/opts/opts_check_x86.cpp b/gfx/skia/skia/src/opts/opts_check_x86.cpp
new file mode 100644
index 000000000..a8003a3b0
--- /dev/null
+++ b/gfx/skia/skia/src/opts/opts_check_x86.cpp
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapFilter_opts_SSE2.h"
+#include "SkBitmapProcState_opts_SSE2.h"
+#include "SkBitmapProcState_opts_SSSE3.h"
+#include "SkBitmapScaler.h"
+#include "SkBlitMask.h"
+#include "SkBlitRow.h"
+#include "SkBlitRow_opts_SSE2.h"
+#include "SkCpu.h"
+
+
+/*
+ *****************************************
+ *********This file is deprecated*********
+ *****************************************
+ * New CPU-specific work should be done in
+ * SkOpts framework. Run-time detection of
+ * available instruction set extensions is
+ * implemented in src/core/SkOpts.cpp file
+ *****************************************
+ */
+
+
+/* This file must *not* be compiled with -msse or any other optional SIMD
+ extension, otherwise gcc may generate SIMD instructions even for scalar ops
+ (and thus give an invalid instruction on Pentium3 on the code below).
+ For example, only files named *_SSE2.cpp in this directory should be
+ compiled with -msse2 or higher. */
+
+////////////////////////////////////////////////////////////////////////////////
+
+void SkBitmapScaler::PlatformConvolutionProcs(SkConvolutionProcs* procs) {
+ if (SkCpu::Supports(SkCpu::SSE2)) {
+ procs->fExtraHorizontalReads = 3;
+ procs->fConvolveVertically = &convolveVertically_SSE2;
+ procs->fConvolve4RowsHorizontally = &convolve4RowsHorizontally_SSE2;
+ procs->fConvolveHorizontally = &convolveHorizontally_SSE2;
+ procs->fApplySIMDPadding = &applySIMDPadding_SSE2;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void SkBitmapProcState::platformProcs() {
+ /* Every optimization in the function requires at least SSE2 */
+ if (!SkCpu::Supports(SkCpu::SSE2)) {
+ return;
+ }
+ const bool ssse3 = SkCpu::Supports(SkCpu::SSSE3);
+
+ /* Check fSampleProc32 */
+ if (fSampleProc32 == S32_opaque_D32_filter_DX) {
+ if (ssse3) {
+ fSampleProc32 = S32_opaque_D32_filter_DX_SSSE3;
+ } else {
+ fSampleProc32 = S32_opaque_D32_filter_DX_SSE2;
+ }
+ } else if (fSampleProc32 == S32_opaque_D32_filter_DXDY) {
+ if (ssse3) {
+ fSampleProc32 = S32_opaque_D32_filter_DXDY_SSSE3;
+ }
+ } else if (fSampleProc32 == S32_alpha_D32_filter_DX) {
+ if (ssse3) {
+ fSampleProc32 = S32_alpha_D32_filter_DX_SSSE3;
+ } else {
+ fSampleProc32 = S32_alpha_D32_filter_DX_SSE2;
+ }
+ } else if (fSampleProc32 == S32_alpha_D32_filter_DXDY) {
+ if (ssse3) {
+ fSampleProc32 = S32_alpha_D32_filter_DXDY_SSSE3;
+ }
+ }
+
+ /* Check fMatrixProc */
+ if (fMatrixProc == ClampX_ClampY_filter_scale) {
+ fMatrixProc = ClampX_ClampY_filter_scale_SSE2;
+ } else if (fMatrixProc == ClampX_ClampY_nofilter_scale) {
+ fMatrixProc = ClampX_ClampY_nofilter_scale_SSE2;
+ } else if (fMatrixProc == ClampX_ClampY_filter_affine) {
+ fMatrixProc = ClampX_ClampY_filter_affine_SSE2;
+ } else if (fMatrixProc == ClampX_ClampY_nofilter_affine) {
+ fMatrixProc = ClampX_ClampY_nofilter_affine_SSE2;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static const SkBlitRow::Proc16 platform_16_procs[] = {
+ S32_D565_Opaque_SSE2, // S32_D565_Opaque
+ nullptr, // S32_D565_Blend
+ S32A_D565_Opaque_SSE2, // S32A_D565_Opaque
+ nullptr, // S32A_D565_Blend
+ S32_D565_Opaque_Dither_SSE2, // S32_D565_Opaque_Dither
+ nullptr, // S32_D565_Blend_Dither
+ S32A_D565_Opaque_Dither_SSE2, // S32A_D565_Opaque_Dither
+ nullptr, // S32A_D565_Blend_Dither
+};
+
+SkBlitRow::Proc16 SkBlitRow::PlatformFactory565(unsigned flags) {
+ if (SkCpu::Supports(SkCpu::SSE2)) {
+ return platform_16_procs[flags];
+ } else {
+ return nullptr;
+ }
+}
+
+static const SkBlitRow::ColorProc16 platform_565_colorprocs_SSE2[] = {
+ Color32A_D565_SSE2, // Color32A_D565,
+ nullptr, // Color32A_D565_Dither
+};
+
+SkBlitRow::ColorProc16 SkBlitRow::PlatformColorFactory565(unsigned flags) {
+/* If you're thinking about writing an SSE4 version of this, do check it's
+ * actually faster on Atom. Our original SSE4 version was slower than this
+ * SSE2 version on Silvermont, and only marginally faster on a Core i7,
+ * mainly due to the MULLD timings.
+ */
+ if (SkCpu::Supports(SkCpu::SSE2)) {
+ return platform_565_colorprocs_SSE2[flags];
+ } else {
+ return nullptr;
+ }
+}
+
+static const SkBlitRow::Proc32 platform_32_procs_SSE2[] = {
+ nullptr, // S32_Opaque,
+ S32_Blend_BlitRow32_SSE2, // S32_Blend,
+ nullptr, // Ported to SkOpts
+ S32A_Blend_BlitRow32_SSE2, // S32A_Blend,
+};
+
+SkBlitRow::Proc32 SkBlitRow::PlatformProcs32(unsigned flags) {
+ if (SkCpu::Supports(SkCpu::SSE2)) {
+ return platform_32_procs_SSE2[flags];
+ } else {
+ return nullptr;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+SkBlitMask::BlitLCD16RowProc SkBlitMask::PlatformBlitRowProcs16(bool isOpaque) {
+ if (SkCpu::Supports(SkCpu::SSE2)) {
+ if (isOpaque) {
+ return SkBlitLCD16OpaqueRow_SSE2;
+ } else {
+ return SkBlitLCD16Row_SSE2;
+ }
+ } else {
+ return nullptr;
+ }
+
+}
+
+SkBlitMask::RowProc SkBlitMask::PlatformRowProcs(SkColorType, SkMask::Format, RowFlags) {
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/pathops/SkAddIntersections.cpp b/gfx/skia/skia/src/pathops/SkAddIntersections.cpp
new file mode 100644
index 000000000..b3a82cdec
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkAddIntersections.cpp
@@ -0,0 +1,564 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkAddIntersections.h"
+#include "SkOpCoincidence.h"
+#include "SkPathOpsBounds.h"
+
+#if DEBUG_ADD_INTERSECTING_TS
+
+static void debugShowLineIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " LINE_DEBUG_STR " " LINE_DEBUG_STR "\n",
+ __FUNCTION__, LINE_DEBUG_DATA(wt.pts()), LINE_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " LINE_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], LINE_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ if (pts == 2) {
+ SkDebugf(" " T_DEBUG_STR(wtTs, 1) " " PT_DEBUG_STR, i[0][1], PT_DEBUG_DATA(i, 1));
+ }
+ SkDebugf(" wnTs[0]=%g " LINE_DEBUG_STR, i[1][0], LINE_DEBUG_DATA(wn.pts()));
+ if (pts == 2) {
+ SkDebugf(" " T_DEBUG_STR(wnTs, 1), i[1][1]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowQuadLineIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn,
+ const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " QUAD_DEBUG_STR " " LINE_DEBUG_STR "\n",
+ __FUNCTION__, QUAD_DEBUG_DATA(wt.pts()), LINE_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " QUAD_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], QUAD_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " LINE_DEBUG_STR, i[1][0], LINE_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowQuadIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " QUAD_DEBUG_STR " " QUAD_DEBUG_STR "\n",
+ __FUNCTION__, QUAD_DEBUG_DATA(wt.pts()), QUAD_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " QUAD_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], QUAD_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " QUAD_DEBUG_STR, i[1][0], QUAD_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowConicLineIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CONIC_DEBUG_STR " " LINE_DEBUG_STR "\n",
+ __FUNCTION__, CONIC_DEBUG_DATA(wt.pts(), wt.weight()), LINE_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CONIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CONIC_DEBUG_DATA(wt.pts(), wt.weight()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " LINE_DEBUG_STR, i[1][0], LINE_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowConicQuadIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CONIC_DEBUG_STR " " QUAD_DEBUG_STR "\n",
+ __FUNCTION__, CONIC_DEBUG_DATA(wt.pts(), wt.weight()), QUAD_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CONIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CONIC_DEBUG_DATA(wt.pts(), wt.weight()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " QUAD_DEBUG_STR, i[1][0], QUAD_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowConicIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CONIC_DEBUG_STR " " CONIC_DEBUG_STR "\n",
+ __FUNCTION__, CONIC_DEBUG_DATA(wt.pts(), wt.weight()),
+ CONIC_DEBUG_DATA(wn.pts(), wn.weight()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CONIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CONIC_DEBUG_DATA(wt.pts(), wt.weight()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " CONIC_DEBUG_STR, i[1][0], CONIC_DEBUG_DATA(wn.pts(), wn.weight()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowCubicLineIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CUBIC_DEBUG_STR " " LINE_DEBUG_STR "\n",
+ __FUNCTION__, CUBIC_DEBUG_DATA(wt.pts()), LINE_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CUBIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CUBIC_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " LINE_DEBUG_STR, i[1][0], LINE_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowCubicQuadIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CUBIC_DEBUG_STR " " QUAD_DEBUG_STR "\n",
+ __FUNCTION__, CUBIC_DEBUG_DATA(wt.pts()), QUAD_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CUBIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CUBIC_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " QUAD_DEBUG_STR, i[1][0], QUAD_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowCubicConicIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CUBIC_DEBUG_STR " " CONIC_DEBUG_STR "\n",
+ __FUNCTION__, CUBIC_DEBUG_DATA(wt.pts()), CONIC_DEBUG_DATA(wn.pts(), wn.weight()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CUBIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CUBIC_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " CONIC_DEBUG_STR, i[1][0], CONIC_DEBUG_DATA(wn.pts(), wn.weight()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowCubicIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CUBIC_DEBUG_STR " " CUBIC_DEBUG_STR "\n",
+ __FUNCTION__, CUBIC_DEBUG_DATA(wt.pts()), CUBIC_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CUBIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CUBIC_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " CUBIC_DEBUG_STR, i[1][0], CUBIC_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+#else
+static void debugShowLineIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowQuadLineIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowQuadIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowConicLineIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowConicQuadIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowConicIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowCubicLineIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowCubicQuadIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowCubicConicIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowCubicIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+#endif
+
+bool AddIntersectTs(SkOpContour* test, SkOpContour* next, SkOpCoincidence* coincidence) {
+ if (test != next) {
+ if (AlmostLessUlps(test->bounds().fBottom, next->bounds().fTop)) {
+ return false;
+ }
+ // OPTIMIZATION: outset contour bounds a smidgen instead?
+ if (!SkPathOpsBounds::Intersects(test->bounds(), next->bounds())) {
+ return true;
+ }
+ }
+ SkIntersectionHelper wt;
+ wt.init(test);
+ do {
+ SkIntersectionHelper wn;
+ wn.init(next);
+ test->debugValidate();
+ next->debugValidate();
+ if (test == next && !wn.startAfter(wt)) {
+ continue;
+ }
+ do {
+ if (!SkPathOpsBounds::Intersects(wt.bounds(), wn.bounds())) {
+ continue;
+ }
+ int pts = 0;
+ SkIntersections ts { SkDEBUGCODE(test->globalState()) };
+ bool swap = false;
+ SkDQuad quad1, quad2;
+ SkDConic conic1, conic2;
+ SkDCubic cubic1, cubic2;
+ switch (wt.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ swap = true;
+ switch (wn.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ case SkIntersectionHelper::kLine_Segment:
+ pts = ts.lineHorizontal(wn.pts(), wt.left(),
+ wt.right(), wt.y(), wt.xFlipped());
+ debugShowLineIntersection(pts, wn, wt, ts);
+ break;
+ case SkIntersectionHelper::kQuad_Segment:
+ pts = ts.quadHorizontal(wn.pts(), wt.left(),
+ wt.right(), wt.y(), wt.xFlipped());
+ debugShowQuadLineIntersection(pts, wn, wt, ts);
+ break;
+ case SkIntersectionHelper::kConic_Segment:
+ pts = ts.conicHorizontal(wn.pts(), wn.weight(), wt.left(),
+ wt.right(), wt.y(), wt.xFlipped());
+ debugShowConicLineIntersection(pts, wn, wt, ts);
+ break;
+ case SkIntersectionHelper::kCubic_Segment:
+ pts = ts.cubicHorizontal(wn.pts(), wt.left(),
+ wt.right(), wt.y(), wt.xFlipped());
+ debugShowCubicLineIntersection(pts, wn, wt, ts);
+ break;
+ default:
+ SkASSERT(0);
+ }
+ break;
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ swap = true;
+ switch (wn.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ case SkIntersectionHelper::kLine_Segment: {
+ pts = ts.lineVertical(wn.pts(), wt.top(),
+ wt.bottom(), wt.x(), wt.yFlipped());
+ debugShowLineIntersection(pts, wn, wt, ts);
+ break;
+ }
+ case SkIntersectionHelper::kQuad_Segment: {
+ pts = ts.quadVertical(wn.pts(), wt.top(),
+ wt.bottom(), wt.x(), wt.yFlipped());
+ debugShowQuadLineIntersection(pts, wn, wt, ts);
+ break;
+ }
+ case SkIntersectionHelper::kConic_Segment: {
+ pts = ts.conicVertical(wn.pts(), wn.weight(), wt.top(),
+ wt.bottom(), wt.x(), wt.yFlipped());
+ debugShowConicLineIntersection(pts, wn, wt, ts);
+ break;
+ }
+ case SkIntersectionHelper::kCubic_Segment: {
+ pts = ts.cubicVertical(wn.pts(), wt.top(),
+ wt.bottom(), wt.x(), wt.yFlipped());
+ debugShowCubicLineIntersection(pts, wn, wt, ts);
+ break;
+ }
+ default:
+ SkASSERT(0);
+ }
+ break;
+ case SkIntersectionHelper::kLine_Segment:
+ switch (wn.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ pts = ts.lineHorizontal(wt.pts(), wn.left(),
+ wn.right(), wn.y(), wn.xFlipped());
+ debugShowLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ pts = ts.lineVertical(wt.pts(), wn.top(),
+ wn.bottom(), wn.x(), wn.yFlipped());
+ debugShowLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kLine_Segment:
+ pts = ts.lineLine(wt.pts(), wn.pts());
+ debugShowLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kQuad_Segment:
+ swap = true;
+ pts = ts.quadLine(wn.pts(), wt.pts());
+ debugShowQuadLineIntersection(pts, wn, wt, ts);
+ break;
+ case SkIntersectionHelper::kConic_Segment:
+ swap = true;
+ pts = ts.conicLine(wn.pts(), wn.weight(), wt.pts());
+ debugShowConicLineIntersection(pts, wn, wt, ts);
+ break;
+ case SkIntersectionHelper::kCubic_Segment:
+ swap = true;
+ pts = ts.cubicLine(wn.pts(), wt.pts());
+ debugShowCubicLineIntersection(pts, wn, wt, ts);
+ break;
+ default:
+ SkASSERT(0);
+ }
+ break;
+ case SkIntersectionHelper::kQuad_Segment:
+ switch (wn.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ pts = ts.quadHorizontal(wt.pts(), wn.left(),
+ wn.right(), wn.y(), wn.xFlipped());
+ debugShowQuadLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ pts = ts.quadVertical(wt.pts(), wn.top(),
+ wn.bottom(), wn.x(), wn.yFlipped());
+ debugShowQuadLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kLine_Segment:
+ pts = ts.quadLine(wt.pts(), wn.pts());
+ debugShowQuadLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kQuad_Segment: {
+ pts = ts.intersect(quad1.set(wt.pts()), quad2.set(wn.pts()));
+ debugShowQuadIntersection(pts, wt, wn, ts);
+ break;
+ }
+ case SkIntersectionHelper::kConic_Segment: {
+ swap = true;
+ pts = ts.intersect(conic2.set(wn.pts(), wn.weight()),
+ quad1.set(wt.pts()));
+ debugShowConicQuadIntersection(pts, wn, wt, ts);
+ break;
+ }
+ case SkIntersectionHelper::kCubic_Segment: {
+ swap = true;
+ pts = ts.intersect(cubic2.set(wn.pts()), quad1.set(wt.pts()));
+ debugShowCubicQuadIntersection(pts, wn, wt, ts);
+ break;
+ }
+ default:
+ SkASSERT(0);
+ }
+ break;
+ case SkIntersectionHelper::kConic_Segment:
+ switch (wn.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ pts = ts.conicHorizontal(wt.pts(), wt.weight(), wn.left(),
+ wn.right(), wn.y(), wn.xFlipped());
+ debugShowConicLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ pts = ts.conicVertical(wt.pts(), wt.weight(), wn.top(),
+ wn.bottom(), wn.x(), wn.yFlipped());
+ debugShowConicLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kLine_Segment:
+ pts = ts.conicLine(wt.pts(), wt.weight(), wn.pts());
+ debugShowConicLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kQuad_Segment: {
+ pts = ts.intersect(conic1.set(wt.pts(), wt.weight()),
+ quad2.set(wn.pts()));
+ debugShowConicQuadIntersection(pts, wt, wn, ts);
+ break;
+ }
+ case SkIntersectionHelper::kConic_Segment: {
+ pts = ts.intersect(conic1.set(wt.pts(), wt.weight()),
+ conic2.set(wn.pts(), wn.weight()));
+ debugShowConicIntersection(pts, wt, wn, ts);
+ break;
+ }
+ case SkIntersectionHelper::kCubic_Segment: {
+ swap = true;
+ pts = ts.intersect(cubic2.set(wn.pts()),
+ conic1.set(wt.pts(), wt.weight()));
+ debugShowCubicConicIntersection(pts, wn, wt, ts);
+ break;
+ }
+ }
+ break;
+ case SkIntersectionHelper::kCubic_Segment:
+ switch (wn.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ pts = ts.cubicHorizontal(wt.pts(), wn.left(),
+ wn.right(), wn.y(), wn.xFlipped());
+ debugShowCubicLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ pts = ts.cubicVertical(wt.pts(), wn.top(),
+ wn.bottom(), wn.x(), wn.yFlipped());
+ debugShowCubicLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kLine_Segment:
+ pts = ts.cubicLine(wt.pts(), wn.pts());
+ debugShowCubicLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kQuad_Segment: {
+ pts = ts.intersect(cubic1.set(wt.pts()), quad2.set(wn.pts()));
+ debugShowCubicQuadIntersection(pts, wt, wn, ts);
+ break;
+ }
+ case SkIntersectionHelper::kConic_Segment: {
+ pts = ts.intersect(cubic1.set(wt.pts()),
+ conic2.set(wn.pts(), wn.weight()));
+ debugShowCubicConicIntersection(pts, wt, wn, ts);
+ break;
+ }
+ case SkIntersectionHelper::kCubic_Segment: {
+ pts = ts.intersect(cubic1.set(wt.pts()), cubic2.set(wn.pts()));
+ debugShowCubicIntersection(pts, wt, wn, ts);
+ break;
+ }
+ default:
+ SkASSERT(0);
+ }
+ break;
+ default:
+ SkASSERT(0);
+ }
+#if DEBUG_T_SECT_LOOP_COUNT
+ test->globalState()->debugAddLoopCount(&ts, wt, wn);
+#endif
+ int coinIndex = -1;
+ SkOpPtT* coinPtT[2];
+ for (int pt = 0; pt < pts; ++pt) {
+ SkASSERT(ts[0][pt] >= 0 && ts[0][pt] <= 1);
+ SkASSERT(ts[1][pt] >= 0 && ts[1][pt] <= 1);
+ wt.segment()->debugValidate();
+ SkOpPtT* testTAt = wt.segment()->addT(ts[swap][pt]);
+ wn.segment()->debugValidate();
+ SkOpPtT* nextTAt = wn.segment()->addT(ts[!swap][pt]);
+ if (!testTAt->contains(nextTAt)) {
+ SkOpPtT* oppPrev = testTAt->oppPrev(nextTAt); // Returns nullptr if pair
+ if (oppPrev) { // already share a pt-t loop.
+ testTAt->span()->mergeMatches(nextTAt->span());
+ testTAt->addOpp(nextTAt, oppPrev);
+ }
+ if (testTAt->fPt != nextTAt->fPt) {
+ testTAt->span()->unaligned();
+ nextTAt->span()->unaligned();
+ }
+ wt.segment()->debugValidate();
+ wn.segment()->debugValidate();
+ }
+ if (!ts.isCoincident(pt)) {
+ continue;
+ }
+ if (coinIndex < 0) {
+ coinPtT[0] = testTAt;
+ coinPtT[1] = nextTAt;
+ coinIndex = pt;
+ continue;
+ }
+ if (coinPtT[0]->span() == testTAt->span()) {
+ coinIndex = -1;
+ continue;
+ }
+ if (coinPtT[1]->span() == nextTAt->span()) {
+ coinIndex = -1; // coincidence span collapsed
+ continue;
+ }
+ if (swap) {
+ SkTSwap(coinPtT[0], coinPtT[1]);
+ SkTSwap(testTAt, nextTAt);
+ }
+ SkASSERT(coincidence->globalState()->debugSkipAssert()
+ || coinPtT[0]->span()->t() < testTAt->span()->t());
+ if (coinPtT[0]->span()->deleted()) {
+ coinIndex = -1;
+ continue;
+ }
+ if (testTAt->span()->deleted()) {
+ coinIndex = -1;
+ continue;
+ }
+ coincidence->add(coinPtT[0], testTAt, coinPtT[1], nextTAt);
+ wt.segment()->debugValidate();
+ wn.segment()->debugValidate();
+ coinIndex = -1;
+ }
+ SkASSERT(coinIndex < 0); // expect coincidence to be paired
+ } while (wn.advance());
+ } while (wt.advance());
+ return true;
+}
diff --git a/gfx/skia/skia/src/pathops/SkAddIntersections.h b/gfx/skia/skia/src/pathops/SkAddIntersections.h
new file mode 100644
index 000000000..ca409800c
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkAddIntersections.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkAddIntersections_DEFINED
+#define SkAddIntersections_DEFINED
+
+#include "SkIntersectionHelper.h"
+#include "SkIntersections.h"
+
+class SkOpCoincidence;
+
+bool AddIntersectTs(SkOpContour* test, SkOpContour* next, SkOpCoincidence* coincidence);
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkDConicLineIntersection.cpp b/gfx/skia/skia/src/pathops/SkDConicLineIntersection.cpp
new file mode 100644
index 000000000..eb32068d0
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkDConicLineIntersection.cpp
@@ -0,0 +1,384 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkIntersections.h"
+#include "SkPathOpsConic.h"
+#include "SkPathOpsCurve.h"
+#include "SkPathOpsLine.h"
+
+class LineConicIntersections {
+public:
+ enum PinTPoint {
+ kPointUninitialized,
+ kPointInitialized
+ };
+
+ LineConicIntersections(const SkDConic& c, const SkDLine& l, SkIntersections* i)
+ : fConic(c)
+ , fLine(&l)
+ , fIntersections(i)
+ , fAllowNear(true) {
+ i->setMax(4); // allow short partial coincidence plus discrete intersection
+ }
+
+ LineConicIntersections(const SkDConic& c)
+ : fConic(c)
+ SkDEBUGPARAMS(fLine(nullptr))
+ SkDEBUGPARAMS(fIntersections(nullptr))
+ SkDEBUGPARAMS(fAllowNear(false)) {
+ }
+
+ void allowNear(bool allow) {
+ fAllowNear = allow;
+ }
+
+ void checkCoincident() {
+ int last = fIntersections->used() - 1;
+ for (int index = 0; index < last; ) {
+ double conicMidT = ((*fIntersections)[0][index] + (*fIntersections)[0][index + 1]) / 2;
+ SkDPoint conicMidPt = fConic.ptAtT(conicMidT);
+ double t = fLine->nearPoint(conicMidPt, nullptr);
+ if (t < 0) {
+ ++index;
+ continue;
+ }
+ if (fIntersections->isCoincident(index)) {
+ fIntersections->removeOne(index);
+ --last;
+ } else if (fIntersections->isCoincident(index + 1)) {
+ fIntersections->removeOne(index + 1);
+ --last;
+ } else {
+ fIntersections->setCoincident(index++);
+ }
+ fIntersections->setCoincident(index);
+ }
+ }
+
+#ifdef SK_DEBUG
+ static bool close_to(double a, double b, const double c[3]) {
+ double max = SkTMax(-SkTMin(SkTMin(c[0], c[1]), c[2]), SkTMax(SkTMax(c[0], c[1]), c[2]));
+ return approximately_zero_when_compared_to(a - b, max);
+ }
+#endif
+ int horizontalIntersect(double axisIntercept, double roots[2]) {
+ double conicVals[] = { fConic[0].fY, fConic[1].fY, fConic[2].fY };
+ return this->validT(conicVals, axisIntercept, roots);
+ }
+
+ int horizontalIntersect(double axisIntercept, double left, double right, bool flipped) {
+ this->addExactHorizontalEndPoints(left, right, axisIntercept);
+ if (fAllowNear) {
+ this->addNearHorizontalEndPoints(left, right, axisIntercept);
+ }
+ double roots[2];
+ int count = this->horizontalIntersect(axisIntercept, roots);
+ for (int index = 0; index < count; ++index) {
+ double conicT = roots[index];
+ SkDPoint pt = fConic.ptAtT(conicT);
+ SkDEBUGCODE(double conicVals[] = { fConic[0].fY, fConic[1].fY, fConic[2].fY });
+ SkOPOBJASSERT(fIntersections, close_to(pt.fY, axisIntercept, conicVals));
+ double lineT = (pt.fX - left) / (right - left);
+ if (this->pinTs(&conicT, &lineT, &pt, kPointInitialized)
+ && this->uniqueAnswer(conicT, pt)) {
+ fIntersections->insert(conicT, lineT, pt);
+ }
+ }
+ if (flipped) {
+ fIntersections->flip();
+ }
+ this->checkCoincident();
+ return fIntersections->used();
+ }
+
+ int intersect() {
+ this->addExactEndPoints();
+ if (fAllowNear) {
+ this->addNearEndPoints();
+ }
+ double rootVals[2];
+ int roots = this->intersectRay(rootVals);
+ for (int index = 0; index < roots; ++index) {
+ double conicT = rootVals[index];
+ double lineT = this->findLineT(conicT);
+#ifdef SK_DEBUG
+ if (!fIntersections->debugGlobalState()
+ || !fIntersections->debugGlobalState()->debugSkipAssert()) {
+ SkDEBUGCODE(SkDPoint conicPt = fConic.ptAtT(conicT));
+ SkDEBUGCODE(SkDPoint linePt = fLine->ptAtT(lineT));
+ SkASSERT(conicPt.approximatelyDEqual(linePt));
+ }
+#endif
+ SkDPoint pt;
+ if (this->pinTs(&conicT, &lineT, &pt, kPointUninitialized)
+ && this->uniqueAnswer(conicT, pt)) {
+ fIntersections->insert(conicT, lineT, pt);
+ }
+ }
+ this->checkCoincident();
+ return fIntersections->used();
+ }
+
+ int intersectRay(double roots[2]) {
+ double adj = (*fLine)[1].fX - (*fLine)[0].fX;
+ double opp = (*fLine)[1].fY - (*fLine)[0].fY;
+ double r[3];
+ for (int n = 0; n < 3; ++n) {
+ r[n] = (fConic[n].fY - (*fLine)[0].fY) * adj - (fConic[n].fX - (*fLine)[0].fX) * opp;
+ }
+ return this->validT(r, 0, roots);
+ }
+
+ int validT(double r[3], double axisIntercept, double roots[2]) {
+ double A = r[2];
+ double B = r[1] * fConic.fWeight - axisIntercept * fConic.fWeight + axisIntercept;
+ double C = r[0];
+ A += C - 2 * B; // A = a + c - 2*(b*w - xCept*w + xCept)
+ B -= C; // B = b*w - w * xCept + xCept - a
+ C -= axisIntercept;
+ return SkDQuad::RootsValidT(A, 2 * B, C, roots);
+ }
+
+ int verticalIntersect(double axisIntercept, double roots[2]) {
+ double conicVals[] = { fConic[0].fX, fConic[1].fX, fConic[2].fX };
+ return this->validT(conicVals, axisIntercept, roots);
+ }
+
+ int verticalIntersect(double axisIntercept, double top, double bottom, bool flipped) {
+ this->addExactVerticalEndPoints(top, bottom, axisIntercept);
+ if (fAllowNear) {
+ this->addNearVerticalEndPoints(top, bottom, axisIntercept);
+ }
+ double roots[2];
+ int count = this->verticalIntersect(axisIntercept, roots);
+ for (int index = 0; index < count; ++index) {
+ double conicT = roots[index];
+ SkDPoint pt = fConic.ptAtT(conicT);
+ SkDEBUGCODE(double conicVals[] = { fConic[0].fX, fConic[1].fX, fConic[2].fX });
+ SkOPOBJASSERT(fIntersections, close_to(pt.fX, axisIntercept, conicVals));
+ double lineT = (pt.fY - top) / (bottom - top);
+ if (this->pinTs(&conicT, &lineT, &pt, kPointInitialized)
+ && this->uniqueAnswer(conicT, pt)) {
+ fIntersections->insert(conicT, lineT, pt);
+ }
+ }
+ if (flipped) {
+ fIntersections->flip();
+ }
+ this->checkCoincident();
+ return fIntersections->used();
+ }
+
+protected:
+// OPTIMIZE: Functions of the form add .. points are indentical to the conic routines.
+ // add endpoints first to get zero and one t values exactly
+ void addExactEndPoints() {
+ for (int cIndex = 0; cIndex < SkDConic::kPointCount; cIndex += SkDConic::kPointLast) {
+ double lineT = fLine->exactPoint(fConic[cIndex]);
+ if (lineT < 0) {
+ continue;
+ }
+ double conicT = (double) (cIndex >> 1);
+ fIntersections->insert(conicT, lineT, fConic[cIndex]);
+ }
+ }
+
+ void addNearEndPoints() {
+ for (int cIndex = 0; cIndex < SkDConic::kPointCount; cIndex += SkDConic::kPointLast) {
+ double conicT = (double) (cIndex >> 1);
+ if (fIntersections->hasT(conicT)) {
+ continue;
+ }
+ double lineT = fLine->nearPoint(fConic[cIndex], nullptr);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(conicT, lineT, fConic[cIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ void addLineNearEndPoints() {
+ for (int lIndex = 0; lIndex < 2; ++lIndex) {
+ double lineT = (double) lIndex;
+ if (fIntersections->hasOppT(lineT)) {
+ continue;
+ }
+ double conicT = ((SkDCurve*) &fConic)->nearPoint(SkPath::kConic_Verb,
+ (*fLine)[lIndex], (*fLine)[!lIndex]);
+ if (conicT < 0) {
+ continue;
+ }
+ fIntersections->insert(conicT, lineT, (*fLine)[lIndex]);
+ }
+ }
+
+ void addExactHorizontalEndPoints(double left, double right, double y) {
+ for (int cIndex = 0; cIndex < SkDConic::kPointCount; cIndex += SkDConic::kPointLast) {
+ double lineT = SkDLine::ExactPointH(fConic[cIndex], left, right, y);
+ if (lineT < 0) {
+ continue;
+ }
+ double conicT = (double) (cIndex >> 1);
+ fIntersections->insert(conicT, lineT, fConic[cIndex]);
+ }
+ }
+
+ void addNearHorizontalEndPoints(double left, double right, double y) {
+ for (int cIndex = 0; cIndex < SkDConic::kPointCount; cIndex += SkDConic::kPointLast) {
+ double conicT = (double) (cIndex >> 1);
+ if (fIntersections->hasT(conicT)) {
+ continue;
+ }
+ double lineT = SkDLine::NearPointH(fConic[cIndex], left, right, y);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(conicT, lineT, fConic[cIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ void addExactVerticalEndPoints(double top, double bottom, double x) {
+ for (int cIndex = 0; cIndex < SkDConic::kPointCount; cIndex += SkDConic::kPointLast) {
+ double lineT = SkDLine::ExactPointV(fConic[cIndex], top, bottom, x);
+ if (lineT < 0) {
+ continue;
+ }
+ double conicT = (double) (cIndex >> 1);
+ fIntersections->insert(conicT, lineT, fConic[cIndex]);
+ }
+ }
+
+ void addNearVerticalEndPoints(double top, double bottom, double x) {
+ for (int cIndex = 0; cIndex < SkDConic::kPointCount; cIndex += SkDConic::kPointLast) {
+ double conicT = (double) (cIndex >> 1);
+ if (fIntersections->hasT(conicT)) {
+ continue;
+ }
+ double lineT = SkDLine::NearPointV(fConic[cIndex], top, bottom, x);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(conicT, lineT, fConic[cIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ double findLineT(double t) {
+ SkDPoint xy = fConic.ptAtT(t);
+ double dx = (*fLine)[1].fX - (*fLine)[0].fX;
+ double dy = (*fLine)[1].fY - (*fLine)[0].fY;
+ if (fabs(dx) > fabs(dy)) {
+ return (xy.fX - (*fLine)[0].fX) / dx;
+ }
+ return (xy.fY - (*fLine)[0].fY) / dy;
+ }
+
+ bool pinTs(double* conicT, double* lineT, SkDPoint* pt, PinTPoint ptSet) {
+ if (!approximately_one_or_less_double(*lineT)) {
+ return false;
+ }
+ if (!approximately_zero_or_more_double(*lineT)) {
+ return false;
+ }
+ double qT = *conicT = SkPinT(*conicT);
+ double lT = *lineT = SkPinT(*lineT);
+ if (lT == 0 || lT == 1 || (ptSet == kPointUninitialized && qT != 0 && qT != 1)) {
+ *pt = (*fLine).ptAtT(lT);
+ } else if (ptSet == kPointUninitialized) {
+ *pt = fConic.ptAtT(qT);
+ }
+ SkPoint gridPt = pt->asSkPoint();
+ if (SkDPoint::ApproximatelyEqual(gridPt, (*fLine)[0].asSkPoint())) {
+ *pt = (*fLine)[0];
+ *lineT = 0;
+ } else if (SkDPoint::ApproximatelyEqual(gridPt, (*fLine)[1].asSkPoint())) {
+ *pt = (*fLine)[1];
+ *lineT = 1;
+ }
+ if (fIntersections->used() > 0 && approximately_equal((*fIntersections)[1][0], *lineT)) {
+ return false;
+ }
+ if (gridPt == fConic[0].asSkPoint()) {
+ *pt = fConic[0];
+ *conicT = 0;
+ } else if (gridPt == fConic[2].asSkPoint()) {
+ *pt = fConic[2];
+ *conicT = 1;
+ }
+ return true;
+ }
+
+ bool uniqueAnswer(double conicT, const SkDPoint& pt) {
+ for (int inner = 0; inner < fIntersections->used(); ++inner) {
+ if (fIntersections->pt(inner) != pt) {
+ continue;
+ }
+ double existingConicT = (*fIntersections)[0][inner];
+ if (conicT == existingConicT) {
+ return false;
+ }
+ // check if midway on conic is also same point. If so, discard this
+ double conicMidT = (existingConicT + conicT) / 2;
+ SkDPoint conicMidPt = fConic.ptAtT(conicMidT);
+ if (conicMidPt.approximatelyEqual(pt)) {
+ return false;
+ }
+ }
+#if ONE_OFF_DEBUG
+ SkDPoint qPt = fConic.ptAtT(conicT);
+ SkDebugf("%s pt=(%1.9g,%1.9g) cPt=(%1.9g,%1.9g)\n", __FUNCTION__, pt.fX, pt.fY,
+ qPt.fX, qPt.fY);
+#endif
+ return true;
+ }
+
+private:
+ const SkDConic& fConic;
+ const SkDLine* fLine;
+ SkIntersections* fIntersections;
+ bool fAllowNear;
+};
+
+int SkIntersections::horizontal(const SkDConic& conic, double left, double right, double y,
+ bool flipped) {
+ SkDLine line = {{{ left, y }, { right, y }}};
+ LineConicIntersections c(conic, line, this);
+ return c.horizontalIntersect(y, left, right, flipped);
+}
+
+int SkIntersections::vertical(const SkDConic& conic, double top, double bottom, double x,
+ bool flipped) {
+ SkDLine line = {{{ x, top }, { x, bottom }}};
+ LineConicIntersections c(conic, line, this);
+ return c.verticalIntersect(x, top, bottom, flipped);
+}
+
+int SkIntersections::intersect(const SkDConic& conic, const SkDLine& line) {
+ LineConicIntersections c(conic, line, this);
+ c.allowNear(fAllowNear);
+ return c.intersect();
+}
+
+int SkIntersections::intersectRay(const SkDConic& conic, const SkDLine& line) {
+ LineConicIntersections c(conic, line, this);
+ fUsed = c.intersectRay(fT[0]);
+ for (int index = 0; index < fUsed; ++index) {
+ fPt[index] = conic.ptAtT(fT[0][index]);
+ }
+ return fUsed;
+}
+
+int SkIntersections::HorizontalIntercept(const SkDConic& conic, SkScalar y, double* roots) {
+ LineConicIntersections c(conic);
+ return c.horizontalIntersect(y, roots);
+}
+
+int SkIntersections::VerticalIntercept(const SkDConic& conic, SkScalar x, double* roots) {
+ LineConicIntersections c(conic);
+ return c.verticalIntersect(x, roots);
+}
diff --git a/gfx/skia/skia/src/pathops/SkDCubicLineIntersection.cpp b/gfx/skia/skia/src/pathops/SkDCubicLineIntersection.cpp
new file mode 100644
index 000000000..fd060de64
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkDCubicLineIntersection.cpp
@@ -0,0 +1,454 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkIntersections.h"
+#include "SkPathOpsCubic.h"
+#include "SkPathOpsCurve.h"
+#include "SkPathOpsLine.h"
+
+/*
+Find the interection of a line and cubic by solving for valid t values.
+
+Analogous to line-quadratic intersection, solve line-cubic intersection by
+representing the cubic as:
+ x = a(1-t)^3 + 2b(1-t)^2t + c(1-t)t^2 + dt^3
+ y = e(1-t)^3 + 2f(1-t)^2t + g(1-t)t^2 + ht^3
+and the line as:
+ y = i*x + j (if the line is more horizontal)
+or:
+ x = i*y + j (if the line is more vertical)
+
+Then using Mathematica, solve for the values of t where the cubic intersects the
+line:
+
+ (in) Resultant[
+ a*(1 - t)^3 + 3*b*(1 - t)^2*t + 3*c*(1 - t)*t^2 + d*t^3 - x,
+ e*(1 - t)^3 + 3*f*(1 - t)^2*t + 3*g*(1 - t)*t^2 + h*t^3 - i*x - j, x]
+ (out) -e + j +
+ 3 e t - 3 f t -
+ 3 e t^2 + 6 f t^2 - 3 g t^2 +
+ e t^3 - 3 f t^3 + 3 g t^3 - h t^3 +
+ i ( a -
+ 3 a t + 3 b t +
+ 3 a t^2 - 6 b t^2 + 3 c t^2 -
+ a t^3 + 3 b t^3 - 3 c t^3 + d t^3 )
+
+if i goes to infinity, we can rewrite the line in terms of x. Mathematica:
+
+ (in) Resultant[
+ a*(1 - t)^3 + 3*b*(1 - t)^2*t + 3*c*(1 - t)*t^2 + d*t^3 - i*y - j,
+ e*(1 - t)^3 + 3*f*(1 - t)^2*t + 3*g*(1 - t)*t^2 + h*t^3 - y, y]
+ (out) a - j -
+ 3 a t + 3 b t +
+ 3 a t^2 - 6 b t^2 + 3 c t^2 -
+ a t^3 + 3 b t^3 - 3 c t^3 + d t^3 -
+ i ( e -
+ 3 e t + 3 f t +
+ 3 e t^2 - 6 f t^2 + 3 g t^2 -
+ e t^3 + 3 f t^3 - 3 g t^3 + h t^3 )
+
+Solving this with Mathematica produces an expression with hundreds of terms;
+instead, use Numeric Solutions recipe to solve the cubic.
+
+The near-horizontal case, in terms of: Ax^3 + Bx^2 + Cx + D == 0
+ A = (-(-e + 3*f - 3*g + h) + i*(-a + 3*b - 3*c + d) )
+ B = 3*(-( e - 2*f + g ) + i*( a - 2*b + c ) )
+ C = 3*(-(-e + f ) + i*(-a + b ) )
+ D = (-( e ) + i*( a ) + j )
+
+The near-vertical case, in terms of: Ax^3 + Bx^2 + Cx + D == 0
+ A = ( (-a + 3*b - 3*c + d) - i*(-e + 3*f - 3*g + h) )
+ B = 3*( ( a - 2*b + c ) - i*( e - 2*f + g ) )
+ C = 3*( (-a + b ) - i*(-e + f ) )
+ D = ( ( a ) - i*( e ) - j )
+
+For horizontal lines:
+(in) Resultant[
+ a*(1 - t)^3 + 3*b*(1 - t)^2*t + 3*c*(1 - t)*t^2 + d*t^3 - j,
+ e*(1 - t)^3 + 3*f*(1 - t)^2*t + 3*g*(1 - t)*t^2 + h*t^3 - y, y]
+(out) e - j -
+ 3 e t + 3 f t +
+ 3 e t^2 - 6 f t^2 + 3 g t^2 -
+ e t^3 + 3 f t^3 - 3 g t^3 + h t^3
+ */
+
+class LineCubicIntersections {
+public:
+ enum PinTPoint {
+ kPointUninitialized,
+ kPointInitialized
+ };
+
+ LineCubicIntersections(const SkDCubic& c, const SkDLine& l, SkIntersections* i)
+ : fCubic(c)
+ , fLine(l)
+ , fIntersections(i)
+ , fAllowNear(true) {
+ i->setMax(4);
+ }
+
+ void allowNear(bool allow) {
+ fAllowNear = allow;
+ }
+
+ void checkCoincident() {
+ int last = fIntersections->used() - 1;
+ for (int index = 0; index < last; ) {
+ double cubicMidT = ((*fIntersections)[0][index] + (*fIntersections)[0][index + 1]) / 2;
+ SkDPoint cubicMidPt = fCubic.ptAtT(cubicMidT);
+ double t = fLine.nearPoint(cubicMidPt, nullptr);
+ if (t < 0) {
+ ++index;
+ continue;
+ }
+ if (fIntersections->isCoincident(index)) {
+ fIntersections->removeOne(index);
+ --last;
+ } else if (fIntersections->isCoincident(index + 1)) {
+ fIntersections->removeOne(index + 1);
+ --last;
+ } else {
+ fIntersections->setCoincident(index++);
+ }
+ fIntersections->setCoincident(index);
+ }
+ }
+
+ // see parallel routine in line quadratic intersections
+ int intersectRay(double roots[3]) {
+ double adj = fLine[1].fX - fLine[0].fX;
+ double opp = fLine[1].fY - fLine[0].fY;
+ SkDCubic c;
+ for (int n = 0; n < 4; ++n) {
+ c[n].fX = (fCubic[n].fY - fLine[0].fY) * adj - (fCubic[n].fX - fLine[0].fX) * opp;
+ }
+ double A, B, C, D;
+ SkDCubic::Coefficients(&c[0].fX, &A, &B, &C, &D);
+ int count = SkDCubic::RootsValidT(A, B, C, D, roots);
+ for (int index = 0; index < count; ++index) {
+ SkDPoint calcPt = c.ptAtT(roots[index]);
+ if (!approximately_zero(calcPt.fX)) {
+ for (int n = 0; n < 4; ++n) {
+ c[n].fY = (fCubic[n].fY - fLine[0].fY) * opp
+ + (fCubic[n].fX - fLine[0].fX) * adj;
+ }
+ double extremeTs[6];
+ int extrema = SkDCubic::FindExtrema(&c[0].fX, extremeTs);
+ count = c.searchRoots(extremeTs, extrema, 0, SkDCubic::kXAxis, roots);
+ break;
+ }
+ }
+ return count;
+ }
+
+ int intersect() {
+ addExactEndPoints();
+ if (fAllowNear) {
+ addNearEndPoints();
+ }
+ double rootVals[3];
+ int roots = intersectRay(rootVals);
+ for (int index = 0; index < roots; ++index) {
+ double cubicT = rootVals[index];
+ double lineT = findLineT(cubicT);
+ SkDPoint pt;
+ if (pinTs(&cubicT, &lineT, &pt, kPointUninitialized) && uniqueAnswer(cubicT, pt)) {
+ fIntersections->insert(cubicT, lineT, pt);
+ }
+ }
+ checkCoincident();
+ return fIntersections->used();
+ }
+
+ static int HorizontalIntersect(const SkDCubic& c, double axisIntercept, double roots[3]) {
+ double A, B, C, D;
+ SkDCubic::Coefficients(&c[0].fY, &A, &B, &C, &D);
+ D -= axisIntercept;
+ int count = SkDCubic::RootsValidT(A, B, C, D, roots);
+ for (int index = 0; index < count; ++index) {
+ SkDPoint calcPt = c.ptAtT(roots[index]);
+ if (!approximately_equal(calcPt.fY, axisIntercept)) {
+ double extremeTs[6];
+ int extrema = SkDCubic::FindExtrema(&c[0].fY, extremeTs);
+ count = c.searchRoots(extremeTs, extrema, axisIntercept, SkDCubic::kYAxis, roots);
+ break;
+ }
+ }
+ return count;
+ }
+
+ int horizontalIntersect(double axisIntercept, double left, double right, bool flipped) {
+ addExactHorizontalEndPoints(left, right, axisIntercept);
+ if (fAllowNear) {
+ addNearHorizontalEndPoints(left, right, axisIntercept);
+ }
+ double roots[3];
+ int count = HorizontalIntersect(fCubic, axisIntercept, roots);
+ for (int index = 0; index < count; ++index) {
+ double cubicT = roots[index];
+ SkDPoint pt = { fCubic.ptAtT(cubicT).fX, axisIntercept };
+ double lineT = (pt.fX - left) / (right - left);
+ if (pinTs(&cubicT, &lineT, &pt, kPointInitialized) && uniqueAnswer(cubicT, pt)) {
+ fIntersections->insert(cubicT, lineT, pt);
+ }
+ }
+ if (flipped) {
+ fIntersections->flip();
+ }
+ checkCoincident();
+ return fIntersections->used();
+ }
+
+ bool uniqueAnswer(double cubicT, const SkDPoint& pt) {
+ for (int inner = 0; inner < fIntersections->used(); ++inner) {
+ if (fIntersections->pt(inner) != pt) {
+ continue;
+ }
+ double existingCubicT = (*fIntersections)[0][inner];
+ if (cubicT == existingCubicT) {
+ return false;
+ }
+ // check if midway on cubic is also same point. If so, discard this
+ double cubicMidT = (existingCubicT + cubicT) / 2;
+ SkDPoint cubicMidPt = fCubic.ptAtT(cubicMidT);
+ if (cubicMidPt.approximatelyEqual(pt)) {
+ return false;
+ }
+ }
+#if ONE_OFF_DEBUG
+ SkDPoint cPt = fCubic.ptAtT(cubicT);
+ SkDebugf("%s pt=(%1.9g,%1.9g) cPt=(%1.9g,%1.9g)\n", __FUNCTION__, pt.fX, pt.fY,
+ cPt.fX, cPt.fY);
+#endif
+ return true;
+ }
+
+ static int VerticalIntersect(const SkDCubic& c, double axisIntercept, double roots[3]) {
+ double A, B, C, D;
+ SkDCubic::Coefficients(&c[0].fX, &A, &B, &C, &D);
+ D -= axisIntercept;
+ int count = SkDCubic::RootsValidT(A, B, C, D, roots);
+ for (int index = 0; index < count; ++index) {
+ SkDPoint calcPt = c.ptAtT(roots[index]);
+ if (!approximately_equal(calcPt.fX, axisIntercept)) {
+ double extremeTs[6];
+ int extrema = SkDCubic::FindExtrema(&c[0].fX, extremeTs);
+ count = c.searchRoots(extremeTs, extrema, axisIntercept, SkDCubic::kXAxis, roots);
+ break;
+ }
+ }
+ return count;
+ }
+
+ int verticalIntersect(double axisIntercept, double top, double bottom, bool flipped) {
+ addExactVerticalEndPoints(top, bottom, axisIntercept);
+ if (fAllowNear) {
+ addNearVerticalEndPoints(top, bottom, axisIntercept);
+ }
+ double roots[3];
+ int count = VerticalIntersect(fCubic, axisIntercept, roots);
+ for (int index = 0; index < count; ++index) {
+ double cubicT = roots[index];
+ SkDPoint pt = { axisIntercept, fCubic.ptAtT(cubicT).fY };
+ double lineT = (pt.fY - top) / (bottom - top);
+ if (pinTs(&cubicT, &lineT, &pt, kPointInitialized) && uniqueAnswer(cubicT, pt)) {
+ fIntersections->insert(cubicT, lineT, pt);
+ }
+ }
+ if (flipped) {
+ fIntersections->flip();
+ }
+ checkCoincident();
+ return fIntersections->used();
+ }
+
+ protected:
+
+ void addExactEndPoints() {
+ for (int cIndex = 0; cIndex < 4; cIndex += 3) {
+ double lineT = fLine.exactPoint(fCubic[cIndex]);
+ if (lineT < 0) {
+ continue;
+ }
+ double cubicT = (double) (cIndex >> 1);
+ fIntersections->insert(cubicT, lineT, fCubic[cIndex]);
+ }
+ }
+
+ /* Note that this does not look for endpoints of the line that are near the cubic.
+ These points are found later when check ends looks for missing points */
+ void addNearEndPoints() {
+ for (int cIndex = 0; cIndex < 4; cIndex += 3) {
+ double cubicT = (double) (cIndex >> 1);
+ if (fIntersections->hasT(cubicT)) {
+ continue;
+ }
+ double lineT = fLine.nearPoint(fCubic[cIndex], nullptr);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(cubicT, lineT, fCubic[cIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ void addLineNearEndPoints() {
+ for (int lIndex = 0; lIndex < 2; ++lIndex) {
+ double lineT = (double) lIndex;
+ if (fIntersections->hasOppT(lineT)) {
+ continue;
+ }
+ double cubicT = ((SkDCurve*) &fCubic)->nearPoint(SkPath::kCubic_Verb,
+ fLine[lIndex], fLine[!lIndex]);
+ if (cubicT < 0) {
+ continue;
+ }
+ fIntersections->insert(cubicT, lineT, fLine[lIndex]);
+ }
+ }
+
+ void addExactHorizontalEndPoints(double left, double right, double y) {
+ for (int cIndex = 0; cIndex < 4; cIndex += 3) {
+ double lineT = SkDLine::ExactPointH(fCubic[cIndex], left, right, y);
+ if (lineT < 0) {
+ continue;
+ }
+ double cubicT = (double) (cIndex >> 1);
+ fIntersections->insert(cubicT, lineT, fCubic[cIndex]);
+ }
+ }
+
+ void addNearHorizontalEndPoints(double left, double right, double y) {
+ for (int cIndex = 0; cIndex < 4; cIndex += 3) {
+ double cubicT = (double) (cIndex >> 1);
+ if (fIntersections->hasT(cubicT)) {
+ continue;
+ }
+ double lineT = SkDLine::NearPointH(fCubic[cIndex], left, right, y);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(cubicT, lineT, fCubic[cIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ void addExactVerticalEndPoints(double top, double bottom, double x) {
+ for (int cIndex = 0; cIndex < 4; cIndex += 3) {
+ double lineT = SkDLine::ExactPointV(fCubic[cIndex], top, bottom, x);
+ if (lineT < 0) {
+ continue;
+ }
+ double cubicT = (double) (cIndex >> 1);
+ fIntersections->insert(cubicT, lineT, fCubic[cIndex]);
+ }
+ }
+
+ void addNearVerticalEndPoints(double top, double bottom, double x) {
+ for (int cIndex = 0; cIndex < 4; cIndex += 3) {
+ double cubicT = (double) (cIndex >> 1);
+ if (fIntersections->hasT(cubicT)) {
+ continue;
+ }
+ double lineT = SkDLine::NearPointV(fCubic[cIndex], top, bottom, x);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(cubicT, lineT, fCubic[cIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ double findLineT(double t) {
+ SkDPoint xy = fCubic.ptAtT(t);
+ double dx = fLine[1].fX - fLine[0].fX;
+ double dy = fLine[1].fY - fLine[0].fY;
+ if (fabs(dx) > fabs(dy)) {
+ return (xy.fX - fLine[0].fX) / dx;
+ }
+ return (xy.fY - fLine[0].fY) / dy;
+ }
+
+ bool pinTs(double* cubicT, double* lineT, SkDPoint* pt, PinTPoint ptSet) {
+ if (!approximately_one_or_less(*lineT)) {
+ return false;
+ }
+ if (!approximately_zero_or_more(*lineT)) {
+ return false;
+ }
+ double cT = *cubicT = SkPinT(*cubicT);
+ double lT = *lineT = SkPinT(*lineT);
+ SkDPoint lPt = fLine.ptAtT(lT);
+ SkDPoint cPt = fCubic.ptAtT(cT);
+ if (!lPt.roughlyEqual(cPt)) {
+ return false;
+ }
+ // FIXME: if points are roughly equal but not approximately equal, need to do
+ // a binary search like quad/quad intersection to find more precise t values
+ if (lT == 0 || lT == 1 || (ptSet == kPointUninitialized && cT != 0 && cT != 1)) {
+ *pt = lPt;
+ } else if (ptSet == kPointUninitialized) {
+ *pt = cPt;
+ }
+ SkPoint gridPt = pt->asSkPoint();
+ if (gridPt == fLine[0].asSkPoint()) {
+ *lineT = 0;
+ } else if (gridPt == fLine[1].asSkPoint()) {
+ *lineT = 1;
+ }
+ if (gridPt == fCubic[0].asSkPoint() && approximately_equal(*cubicT, 0)) {
+ *cubicT = 0;
+ } else if (gridPt == fCubic[3].asSkPoint() && approximately_equal(*cubicT, 1)) {
+ *cubicT = 1;
+ }
+ return true;
+ }
+
+private:
+ const SkDCubic& fCubic;
+ const SkDLine& fLine;
+ SkIntersections* fIntersections;
+ bool fAllowNear;
+};
+
+int SkIntersections::horizontal(const SkDCubic& cubic, double left, double right, double y,
+ bool flipped) {
+ SkDLine line = {{{ left, y }, { right, y }}};
+ LineCubicIntersections c(cubic, line, this);
+ return c.horizontalIntersect(y, left, right, flipped);
+}
+
+int SkIntersections::vertical(const SkDCubic& cubic, double top, double bottom, double x,
+ bool flipped) {
+ SkDLine line = {{{ x, top }, { x, bottom }}};
+ LineCubicIntersections c(cubic, line, this);
+ return c.verticalIntersect(x, top, bottom, flipped);
+}
+
+int SkIntersections::intersect(const SkDCubic& cubic, const SkDLine& line) {
+ LineCubicIntersections c(cubic, line, this);
+ c.allowNear(fAllowNear);
+ return c.intersect();
+}
+
+int SkIntersections::intersectRay(const SkDCubic& cubic, const SkDLine& line) {
+ LineCubicIntersections c(cubic, line, this);
+ fUsed = c.intersectRay(fT[0]);
+ for (int index = 0; index < fUsed; ++index) {
+ fPt[index] = cubic.ptAtT(fT[0][index]);
+ }
+ return fUsed;
+}
+
+// SkDCubic accessors to Intersection utilities
+
+int SkDCubic::horizontalIntersect(double yIntercept, double roots[3]) const {
+ return LineCubicIntersections::HorizontalIntersect(*this, yIntercept, roots);
+}
+
+int SkDCubic::verticalIntersect(double xIntercept, double roots[3]) const {
+ return LineCubicIntersections::VerticalIntersect(*this, xIntercept, roots);
+}
diff --git a/gfx/skia/skia/src/pathops/SkDCubicToQuads.cpp b/gfx/skia/skia/src/pathops/SkDCubicToQuads.cpp
new file mode 100644
index 000000000..272b997d6
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkDCubicToQuads.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/*
+http://stackoverflow.com/questions/2009160/how-do-i-convert-the-2-control-points-of-a-cubic-curve-to-the-single-control-poi
+*/
+
+/*
+Let's call the control points of the cubic Q0..Q3 and the control points of the quadratic P0..P2.
+Then for degree elevation, the equations are:
+
+Q0 = P0
+Q1 = 1/3 P0 + 2/3 P1
+Q2 = 2/3 P1 + 1/3 P2
+Q3 = P2
+In your case you have Q0..Q3 and you're solving for P0..P2. There are two ways to compute P1 from
+ the equations above:
+
+P1 = 3/2 Q1 - 1/2 Q0
+P1 = 3/2 Q2 - 1/2 Q3
+If this is a degree-elevated cubic, then both equations will give the same answer for P1. Since
+ it's likely not, your best bet is to average them. So,
+
+P1 = -1/4 Q0 + 3/4 Q1 + 3/4 Q2 - 1/4 Q3
+*/
+
+#include "SkPathOpsCubic.h"
+#include "SkPathOpsQuad.h"
+
+// used for testing only
+SkDQuad SkDCubic::toQuad() const {
+ SkDQuad quad;
+ quad[0] = fPts[0];
+ const SkDPoint fromC1 = {(3 * fPts[1].fX - fPts[0].fX) / 2, (3 * fPts[1].fY - fPts[0].fY) / 2};
+ const SkDPoint fromC2 = {(3 * fPts[2].fX - fPts[3].fX) / 2, (3 * fPts[2].fY - fPts[3].fY) / 2};
+ quad[1].fX = (fromC1.fX + fromC2.fX) / 2;
+ quad[1].fY = (fromC1.fY + fromC2.fY) / 2;
+ quad[2] = fPts[3];
+ return quad;
+}
diff --git a/gfx/skia/skia/src/pathops/SkDLineIntersection.cpp b/gfx/skia/skia/src/pathops/SkDLineIntersection.cpp
new file mode 100644
index 000000000..71e2a064d
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkDLineIntersection.cpp
@@ -0,0 +1,333 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkIntersections.h"
+#include "SkPathOpsLine.h"
+
+void SkIntersections::cleanUpParallelLines(bool parallel) {
+ while (fUsed > 2) {
+ removeOne(1);
+ }
+ if (fUsed == 2 && !parallel) {
+ bool startMatch = fT[0][0] == 0 || zero_or_one(fT[1][0]);
+ bool endMatch = fT[0][1] == 1 || zero_or_one(fT[1][1]);
+ if ((!startMatch && !endMatch) || approximately_equal(fT[0][0], fT[0][1])) {
+ SkASSERT(startMatch || endMatch);
+ if (startMatch && endMatch && (fT[0][0] != 0 || !zero_or_one(fT[1][0]))
+ && fT[0][1] == 1 && zero_or_one(fT[1][1])) {
+ removeOne(0);
+ } else {
+ removeOne(endMatch);
+ }
+ }
+ }
+ if (fUsed == 2) {
+ fIsCoincident[0] = fIsCoincident[1] = 0x03;
+ }
+}
+
+void SkIntersections::computePoints(const SkDLine& line, int used) {
+ fPt[0] = line.ptAtT(fT[0][0]);
+ if ((fUsed = used) == 2) {
+ fPt[1] = line.ptAtT(fT[0][1]);
+ }
+}
+
+int SkIntersections::intersectRay(const SkDLine& a, const SkDLine& b) {
+ fMax = 2;
+ SkDVector aLen = a[1] - a[0];
+ SkDVector bLen = b[1] - b[0];
+ /* Slopes match when denom goes to zero:
+ axLen / ayLen == bxLen / byLen
+ (ayLen * byLen) * axLen / ayLen == (ayLen * byLen) * bxLen / byLen
+ byLen * axLen == ayLen * bxLen
+ byLen * axLen - ayLen * bxLen == 0 ( == denom )
+ */
+ double denom = bLen.fY * aLen.fX - aLen.fY * bLen.fX;
+ SkDVector ab0 = a[0] - b[0];
+ double numerA = ab0.fY * bLen.fX - bLen.fY * ab0.fX;
+ double numerB = ab0.fY * aLen.fX - aLen.fY * ab0.fX;
+ numerA /= denom;
+ numerB /= denom;
+ int used;
+ if (!approximately_zero(denom)) {
+ fT[0][0] = numerA;
+ fT[1][0] = numerB;
+ used = 1;
+ } else {
+ /* See if the axis intercepts match:
+ ay - ax * ayLen / axLen == by - bx * ayLen / axLen
+ axLen * (ay - ax * ayLen / axLen) == axLen * (by - bx * ayLen / axLen)
+ axLen * ay - ax * ayLen == axLen * by - bx * ayLen
+ */
+ if (!AlmostEqualUlps(aLen.fX * a[0].fY - aLen.fY * a[0].fX,
+ aLen.fX * b[0].fY - aLen.fY * b[0].fX)) {
+ return fUsed = 0;
+ }
+ // there's no great answer for intersection points for coincident rays, but return something
+ fT[0][0] = fT[1][0] = 0;
+ fT[1][0] = fT[1][1] = 1;
+ used = 2;
+ }
+ computePoints(a, used);
+ return fUsed;
+}
+
+// note that this only works if both lines are neither horizontal nor vertical
+int SkIntersections::intersect(const SkDLine& a, const SkDLine& b) {
+ fMax = 3; // note that we clean up so that there is no more than two in the end
+ // see if end points intersect the opposite line
+ double t;
+ for (int iA = 0; iA < 2; ++iA) {
+ if ((t = b.exactPoint(a[iA])) >= 0) {
+ insert(iA, t, a[iA]);
+ }
+ }
+ for (int iB = 0; iB < 2; ++iB) {
+ if ((t = a.exactPoint(b[iB])) >= 0) {
+ insert(t, iB, b[iB]);
+ }
+ }
+ /* Determine the intersection point of two line segments
+ Return FALSE if the lines don't intersect
+ from: http://paulbourke.net/geometry/lineline2d/ */
+ double axLen = a[1].fX - a[0].fX;
+ double ayLen = a[1].fY - a[0].fY;
+ double bxLen = b[1].fX - b[0].fX;
+ double byLen = b[1].fY - b[0].fY;
+ /* Slopes match when denom goes to zero:
+ axLen / ayLen == bxLen / byLen
+ (ayLen * byLen) * axLen / ayLen == (ayLen * byLen) * bxLen / byLen
+ byLen * axLen == ayLen * bxLen
+ byLen * axLen - ayLen * bxLen == 0 ( == denom )
+ */
+ double axByLen = axLen * byLen;
+ double ayBxLen = ayLen * bxLen;
+ // detect parallel lines the same way here and in SkOpAngle operator <
+ // so that non-parallel means they are also sortable
+ bool unparallel = fAllowNear ? NotAlmostEqualUlps_Pin(axByLen, ayBxLen)
+ : NotAlmostDequalUlps(axByLen, ayBxLen);
+ if (unparallel && fUsed == 0) {
+ double ab0y = a[0].fY - b[0].fY;
+ double ab0x = a[0].fX - b[0].fX;
+ double numerA = ab0y * bxLen - byLen * ab0x;
+ double numerB = ab0y * axLen - ayLen * ab0x;
+ double denom = axByLen - ayBxLen;
+ if (between(0, numerA, denom) && between(0, numerB, denom)) {
+ fT[0][0] = numerA / denom;
+ fT[1][0] = numerB / denom;
+ computePoints(a, 1);
+ }
+ }
+/* Allow tracking that both sets of end points are near each other -- the lines are entirely
+ coincident -- even when the end points are not exactly the same.
+ Mark this as a 'wild card' for the end points, so that either point is considered totally
+ coincident. Then, avoid folding the lines over each other, but allow either end to mate
+ to the next set of lines.
+ */
+ if (fAllowNear || !unparallel) {
+ double aNearB[2];
+ double bNearA[2];
+ bool aNotB[2] = {false, false};
+ bool bNotA[2] = {false, false};
+ int nearCount = 0;
+ for (int index = 0; index < 2; ++index) {
+ aNearB[index] = t = b.nearPoint(a[index], &aNotB[index]);
+ nearCount += t >= 0;
+ bNearA[index] = t = a.nearPoint(b[index], &bNotA[index]);
+ nearCount += t >= 0;
+ }
+ if (nearCount > 0) {
+ // Skip if each segment contributes to one end point.
+ if (nearCount != 2 || aNotB[0] == aNotB[1]) {
+ for (int iA = 0; iA < 2; ++iA) {
+ if (!aNotB[iA]) {
+ continue;
+ }
+ int nearer = aNearB[iA] > 0.5;
+ if (!bNotA[nearer]) {
+ continue;
+ }
+ SkASSERT(a[iA] != b[nearer]);
+ SkASSERT(iA == (bNearA[nearer] > 0.5));
+ insertNear(iA, nearer, a[iA], b[nearer]);
+ aNearB[iA] = -1;
+ bNearA[nearer] = -1;
+ nearCount -= 2;
+ }
+ }
+ if (nearCount > 0) {
+ for (int iA = 0; iA < 2; ++iA) {
+ if (aNearB[iA] >= 0) {
+ insert(iA, aNearB[iA], a[iA]);
+ }
+ }
+ for (int iB = 0; iB < 2; ++iB) {
+ if (bNearA[iB] >= 0) {
+ insert(bNearA[iB], iB, b[iB]);
+ }
+ }
+ }
+ }
+ }
+ cleanUpParallelLines(!unparallel);
+ SkASSERT(fUsed <= 2);
+ return fUsed;
+}
+
+static int horizontal_coincident(const SkDLine& line, double y) {
+ double min = line[0].fY;
+ double max = line[1].fY;
+ if (min > max) {
+ SkTSwap(min, max);
+ }
+ if (min > y || max < y) {
+ return 0;
+ }
+ if (AlmostEqualUlps(min, max) && max - min < fabs(line[0].fX - line[1].fX)) {
+ return 2;
+ }
+ return 1;
+}
+
+double SkIntersections::HorizontalIntercept(const SkDLine& line, double y) {
+ return SkPinT((y - line[0].fY) / (line[1].fY - line[0].fY));
+}
+
+int SkIntersections::horizontal(const SkDLine& line, double left, double right,
+ double y, bool flipped) {
+ fMax = 3; // clean up parallel at the end will limit the result to 2 at the most
+ // see if end points intersect the opposite line
+ double t;
+ const SkDPoint leftPt = { left, y };
+ if ((t = line.exactPoint(leftPt)) >= 0) {
+ insert(t, (double) flipped, leftPt);
+ }
+ if (left != right) {
+ const SkDPoint rightPt = { right, y };
+ if ((t = line.exactPoint(rightPt)) >= 0) {
+ insert(t, (double) !flipped, rightPt);
+ }
+ for (int index = 0; index < 2; ++index) {
+ if ((t = SkDLine::ExactPointH(line[index], left, right, y)) >= 0) {
+ insert((double) index, flipped ? 1 - t : t, line[index]);
+ }
+ }
+ }
+ int result = horizontal_coincident(line, y);
+ if (result == 1 && fUsed == 0) {
+ fT[0][0] = HorizontalIntercept(line, y);
+ double xIntercept = line[0].fX + fT[0][0] * (line[1].fX - line[0].fX);
+ if (between(left, xIntercept, right)) {
+ fT[1][0] = (xIntercept - left) / (right - left);
+ if (flipped) {
+ // OPTIMIZATION: ? instead of swapping, pass original line, use [1].fX - [0].fX
+ for (int index = 0; index < result; ++index) {
+ fT[1][index] = 1 - fT[1][index];
+ }
+ }
+ fPt[0].fX = xIntercept;
+ fPt[0].fY = y;
+ fUsed = 1;
+ }
+ }
+ if (fAllowNear || result == 2) {
+ if ((t = line.nearPoint(leftPt, nullptr)) >= 0) {
+ insert(t, (double) flipped, leftPt);
+ }
+ if (left != right) {
+ const SkDPoint rightPt = { right, y };
+ if ((t = line.nearPoint(rightPt, nullptr)) >= 0) {
+ insert(t, (double) !flipped, rightPt);
+ }
+ for (int index = 0; index < 2; ++index) {
+ if ((t = SkDLine::NearPointH(line[index], left, right, y)) >= 0) {
+ insert((double) index, flipped ? 1 - t : t, line[index]);
+ }
+ }
+ }
+ }
+ cleanUpParallelLines(result == 2);
+ return fUsed;
+}
+
+static int vertical_coincident(const SkDLine& line, double x) {
+ double min = line[0].fX;
+ double max = line[1].fX;
+ if (min > max) {
+ SkTSwap(min, max);
+ }
+ if (!precisely_between(min, x, max)) {
+ return 0;
+ }
+ if (AlmostEqualUlps(min, max)) {
+ return 2;
+ }
+ return 1;
+}
+
+double SkIntersections::VerticalIntercept(const SkDLine& line, double x) {
+ return SkPinT((x - line[0].fX) / (line[1].fX - line[0].fX));
+}
+
+int SkIntersections::vertical(const SkDLine& line, double top, double bottom,
+ double x, bool flipped) {
+ fMax = 3; // cleanup parallel lines will bring this back line
+ // see if end points intersect the opposite line
+ double t;
+ SkDPoint topPt = { x, top };
+ if ((t = line.exactPoint(topPt)) >= 0) {
+ insert(t, (double) flipped, topPt);
+ }
+ if (top != bottom) {
+ SkDPoint bottomPt = { x, bottom };
+ if ((t = line.exactPoint(bottomPt)) >= 0) {
+ insert(t, (double) !flipped, bottomPt);
+ }
+ for (int index = 0; index < 2; ++index) {
+ if ((t = SkDLine::ExactPointV(line[index], top, bottom, x)) >= 0) {
+ insert((double) index, flipped ? 1 - t : t, line[index]);
+ }
+ }
+ }
+ int result = vertical_coincident(line, x);
+ if (result == 1 && fUsed == 0) {
+ fT[0][0] = VerticalIntercept(line, x);
+ double yIntercept = line[0].fY + fT[0][0] * (line[1].fY - line[0].fY);
+ if (between(top, yIntercept, bottom)) {
+ fT[1][0] = (yIntercept - top) / (bottom - top);
+ if (flipped) {
+ // OPTIMIZATION: instead of swapping, pass original line, use [1].fY - [0].fY
+ for (int index = 0; index < result; ++index) {
+ fT[1][index] = 1 - fT[1][index];
+ }
+ }
+ fPt[0].fX = x;
+ fPt[0].fY = yIntercept;
+ fUsed = 1;
+ }
+ }
+ if (fAllowNear || result == 2) {
+ if ((t = line.nearPoint(topPt, nullptr)) >= 0) {
+ insert(t, (double) flipped, topPt);
+ }
+ if (top != bottom) {
+ SkDPoint bottomPt = { x, bottom };
+ if ((t = line.nearPoint(bottomPt, nullptr)) >= 0) {
+ insert(t, (double) !flipped, bottomPt);
+ }
+ for (int index = 0; index < 2; ++index) {
+ if ((t = SkDLine::NearPointV(line[index], top, bottom, x)) >= 0) {
+ insert((double) index, flipped ? 1 - t : t, line[index]);
+ }
+ }
+ }
+ }
+ cleanUpParallelLines(result == 2);
+ SkASSERT(fUsed <= 2);
+ return fUsed;
+}
+
diff --git a/gfx/skia/skia/src/pathops/SkDQuadLineIntersection.cpp b/gfx/skia/skia/src/pathops/SkDQuadLineIntersection.cpp
new file mode 100644
index 000000000..8d5baf694
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkDQuadLineIntersection.cpp
@@ -0,0 +1,470 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkIntersections.h"
+#include "SkPathOpsCurve.h"
+#include "SkPathOpsLine.h"
+#include "SkPathOpsQuad.h"
+
+/*
+Find the interection of a line and quadratic by solving for valid t values.
+
+From http://stackoverflow.com/questions/1853637/how-to-find-the-mathematical-function-defining-a-bezier-curve
+
+"A Bezier curve is a parametric function. A quadratic Bezier curve (i.e. three
+control points) can be expressed as: F(t) = A(1 - t)^2 + B(1 - t)t + Ct^2 where
+A, B and C are points and t goes from zero to one.
+
+This will give you two equations:
+
+ x = a(1 - t)^2 + b(1 - t)t + ct^2
+ y = d(1 - t)^2 + e(1 - t)t + ft^2
+
+If you add for instance the line equation (y = kx + m) to that, you'll end up
+with three equations and three unknowns (x, y and t)."
+
+Similar to above, the quadratic is represented as
+ x = a(1-t)^2 + 2b(1-t)t + ct^2
+ y = d(1-t)^2 + 2e(1-t)t + ft^2
+and the line as
+ y = g*x + h
+
+Using Mathematica, solve for the values of t where the quadratic intersects the
+line:
+
+ (in) t1 = Resultant[a*(1 - t)^2 + 2*b*(1 - t)*t + c*t^2 - x,
+ d*(1 - t)^2 + 2*e*(1 - t)*t + f*t^2 - g*x - h, x]
+ (out) -d + h + 2 d t - 2 e t - d t^2 + 2 e t^2 - f t^2 +
+ g (a - 2 a t + 2 b t + a t^2 - 2 b t^2 + c t^2)
+ (in) Solve[t1 == 0, t]
+ (out) {
+ {t -> (-2 d + 2 e + 2 a g - 2 b g -
+ Sqrt[(2 d - 2 e - 2 a g + 2 b g)^2 -
+ 4 (-d + 2 e - f + a g - 2 b g + c g) (-d + a g + h)]) /
+ (2 (-d + 2 e - f + a g - 2 b g + c g))
+ },
+ {t -> (-2 d + 2 e + 2 a g - 2 b g +
+ Sqrt[(2 d - 2 e - 2 a g + 2 b g)^2 -
+ 4 (-d + 2 e - f + a g - 2 b g + c g) (-d + a g + h)]) /
+ (2 (-d + 2 e - f + a g - 2 b g + c g))
+ }
+ }
+
+Using the results above (when the line tends towards horizontal)
+ A = (-(d - 2*e + f) + g*(a - 2*b + c) )
+ B = 2*( (d - e ) - g*(a - b ) )
+ C = (-(d ) + g*(a ) + h )
+
+If g goes to infinity, we can rewrite the line in terms of x.
+ x = g'*y + h'
+
+And solve accordingly in Mathematica:
+
+ (in) t2 = Resultant[a*(1 - t)^2 + 2*b*(1 - t)*t + c*t^2 - g'*y - h',
+ d*(1 - t)^2 + 2*e*(1 - t)*t + f*t^2 - y, y]
+ (out) a - h' - 2 a t + 2 b t + a t^2 - 2 b t^2 + c t^2 -
+ g' (d - 2 d t + 2 e t + d t^2 - 2 e t^2 + f t^2)
+ (in) Solve[t2 == 0, t]
+ (out) {
+ {t -> (2 a - 2 b - 2 d g' + 2 e g' -
+ Sqrt[(-2 a + 2 b + 2 d g' - 2 e g')^2 -
+ 4 (a - 2 b + c - d g' + 2 e g' - f g') (a - d g' - h')]) /
+ (2 (a - 2 b + c - d g' + 2 e g' - f g'))
+ },
+ {t -> (2 a - 2 b - 2 d g' + 2 e g' +
+ Sqrt[(-2 a + 2 b + 2 d g' - 2 e g')^2 -
+ 4 (a - 2 b + c - d g' + 2 e g' - f g') (a - d g' - h')])/
+ (2 (a - 2 b + c - d g' + 2 e g' - f g'))
+ }
+ }
+
+Thus, if the slope of the line tends towards vertical, we use:
+ A = ( (a - 2*b + c) - g'*(d - 2*e + f) )
+ B = 2*(-(a - b ) + g'*(d - e ) )
+ C = ( (a ) - g'*(d ) - h' )
+ */
+
+class LineQuadraticIntersections {
+public:
+ enum PinTPoint {
+ kPointUninitialized,
+ kPointInitialized
+ };
+
+ LineQuadraticIntersections(const SkDQuad& q, const SkDLine& l, SkIntersections* i)
+ : fQuad(q)
+ , fLine(&l)
+ , fIntersections(i)
+ , fAllowNear(true) {
+ i->setMax(5); // allow short partial coincidence plus discrete intersections
+ }
+
+ LineQuadraticIntersections(const SkDQuad& q)
+ : fQuad(q)
+ SkDEBUGPARAMS(fLine(nullptr))
+ SkDEBUGPARAMS(fIntersections(nullptr))
+ SkDEBUGPARAMS(fAllowNear(false)) {
+ }
+
+ void allowNear(bool allow) {
+ fAllowNear = allow;
+ }
+
+ void checkCoincident() {
+ int last = fIntersections->used() - 1;
+ for (int index = 0; index < last; ) {
+ double quadMidT = ((*fIntersections)[0][index] + (*fIntersections)[0][index + 1]) / 2;
+ SkDPoint quadMidPt = fQuad.ptAtT(quadMidT);
+ double t = fLine->nearPoint(quadMidPt, nullptr);
+ if (t < 0) {
+ ++index;
+ continue;
+ }
+ if (fIntersections->isCoincident(index)) {
+ fIntersections->removeOne(index);
+ --last;
+ } else if (fIntersections->isCoincident(index + 1)) {
+ fIntersections->removeOne(index + 1);
+ --last;
+ } else {
+ fIntersections->setCoincident(index++);
+ }
+ fIntersections->setCoincident(index);
+ }
+ }
+
+ int intersectRay(double roots[2]) {
+ /*
+ solve by rotating line+quad so line is horizontal, then finding the roots
+ set up matrix to rotate quad to x-axis
+ |cos(a) -sin(a)|
+ |sin(a) cos(a)|
+ note that cos(a) = A(djacent) / Hypoteneuse
+ sin(a) = O(pposite) / Hypoteneuse
+ since we are computing Ts, we can ignore hypoteneuse, the scale factor:
+ | A -O |
+ | O A |
+ A = line[1].fX - line[0].fX (adjacent side of the right triangle)
+ O = line[1].fY - line[0].fY (opposite side of the right triangle)
+ for each of the three points (e.g. n = 0 to 2)
+ quad[n].fY' = (quad[n].fY - line[0].fY) * A - (quad[n].fX - line[0].fX) * O
+ */
+ double adj = (*fLine)[1].fX - (*fLine)[0].fX;
+ double opp = (*fLine)[1].fY - (*fLine)[0].fY;
+ double r[3];
+ for (int n = 0; n < 3; ++n) {
+ r[n] = (fQuad[n].fY - (*fLine)[0].fY) * adj - (fQuad[n].fX - (*fLine)[0].fX) * opp;
+ }
+ double A = r[2];
+ double B = r[1];
+ double C = r[0];
+ A += C - 2 * B; // A = a - 2*b + c
+ B -= C; // B = -(b - c)
+ return SkDQuad::RootsValidT(A, 2 * B, C, roots);
+ }
+
+ int intersect() {
+ addExactEndPoints();
+ if (fAllowNear) {
+ addNearEndPoints();
+ }
+ double rootVals[2];
+ int roots = intersectRay(rootVals);
+ for (int index = 0; index < roots; ++index) {
+ double quadT = rootVals[index];
+ double lineT = findLineT(quadT);
+ SkDPoint pt;
+ if (pinTs(&quadT, &lineT, &pt, kPointUninitialized) && uniqueAnswer(quadT, pt)) {
+ fIntersections->insert(quadT, lineT, pt);
+ }
+ }
+ checkCoincident();
+ return fIntersections->used();
+ }
+
+ int horizontalIntersect(double axisIntercept, double roots[2]) {
+ double D = fQuad[2].fY; // f
+ double E = fQuad[1].fY; // e
+ double F = fQuad[0].fY; // d
+ D += F - 2 * E; // D = d - 2*e + f
+ E -= F; // E = -(d - e)
+ F -= axisIntercept;
+ return SkDQuad::RootsValidT(D, 2 * E, F, roots);
+ }
+
+ int horizontalIntersect(double axisIntercept, double left, double right, bool flipped) {
+ addExactHorizontalEndPoints(left, right, axisIntercept);
+ if (fAllowNear) {
+ addNearHorizontalEndPoints(left, right, axisIntercept);
+ }
+ double rootVals[2];
+ int roots = horizontalIntersect(axisIntercept, rootVals);
+ for (int index = 0; index < roots; ++index) {
+ double quadT = rootVals[index];
+ SkDPoint pt = fQuad.ptAtT(quadT);
+ double lineT = (pt.fX - left) / (right - left);
+ if (pinTs(&quadT, &lineT, &pt, kPointInitialized) && uniqueAnswer(quadT, pt)) {
+ fIntersections->insert(quadT, lineT, pt);
+ }
+ }
+ if (flipped) {
+ fIntersections->flip();
+ }
+ checkCoincident();
+ return fIntersections->used();
+ }
+
+ bool uniqueAnswer(double quadT, const SkDPoint& pt) {
+ for (int inner = 0; inner < fIntersections->used(); ++inner) {
+ if (fIntersections->pt(inner) != pt) {
+ continue;
+ }
+ double existingQuadT = (*fIntersections)[0][inner];
+ if (quadT == existingQuadT) {
+ return false;
+ }
+ // check if midway on quad is also same point. If so, discard this
+ double quadMidT = (existingQuadT + quadT) / 2;
+ SkDPoint quadMidPt = fQuad.ptAtT(quadMidT);
+ if (quadMidPt.approximatelyEqual(pt)) {
+ return false;
+ }
+ }
+#if ONE_OFF_DEBUG
+ SkDPoint qPt = fQuad.ptAtT(quadT);
+ SkDebugf("%s pt=(%1.9g,%1.9g) cPt=(%1.9g,%1.9g)\n", __FUNCTION__, pt.fX, pt.fY,
+ qPt.fX, qPt.fY);
+#endif
+ return true;
+ }
+
+ int verticalIntersect(double axisIntercept, double roots[2]) {
+ double D = fQuad[2].fX; // f
+ double E = fQuad[1].fX; // e
+ double F = fQuad[0].fX; // d
+ D += F - 2 * E; // D = d - 2*e + f
+ E -= F; // E = -(d - e)
+ F -= axisIntercept;
+ return SkDQuad::RootsValidT(D, 2 * E, F, roots);
+ }
+
+ int verticalIntersect(double axisIntercept, double top, double bottom, bool flipped) {
+ addExactVerticalEndPoints(top, bottom, axisIntercept);
+ if (fAllowNear) {
+ addNearVerticalEndPoints(top, bottom, axisIntercept);
+ }
+ double rootVals[2];
+ int roots = verticalIntersect(axisIntercept, rootVals);
+ for (int index = 0; index < roots; ++index) {
+ double quadT = rootVals[index];
+ SkDPoint pt = fQuad.ptAtT(quadT);
+ double lineT = (pt.fY - top) / (bottom - top);
+ if (pinTs(&quadT, &lineT, &pt, kPointInitialized) && uniqueAnswer(quadT, pt)) {
+ fIntersections->insert(quadT, lineT, pt);
+ }
+ }
+ if (flipped) {
+ fIntersections->flip();
+ }
+ checkCoincident();
+ return fIntersections->used();
+ }
+
+protected:
+ // add endpoints first to get zero and one t values exactly
+ void addExactEndPoints() {
+ for (int qIndex = 0; qIndex < 3; qIndex += 2) {
+ double lineT = fLine->exactPoint(fQuad[qIndex]);
+ if (lineT < 0) {
+ continue;
+ }
+ double quadT = (double) (qIndex >> 1);
+ fIntersections->insert(quadT, lineT, fQuad[qIndex]);
+ }
+ }
+
+ void addNearEndPoints() {
+ for (int qIndex = 0; qIndex < 3; qIndex += 2) {
+ double quadT = (double) (qIndex >> 1);
+ if (fIntersections->hasT(quadT)) {
+ continue;
+ }
+ double lineT = fLine->nearPoint(fQuad[qIndex], nullptr);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(quadT, lineT, fQuad[qIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ void addLineNearEndPoints() {
+ for (int lIndex = 0; lIndex < 2; ++lIndex) {
+ double lineT = (double) lIndex;
+ if (fIntersections->hasOppT(lineT)) {
+ continue;
+ }
+ double quadT = ((SkDCurve*) &fQuad)->nearPoint(SkPath::kQuad_Verb,
+ (*fLine)[lIndex], (*fLine)[!lIndex]);
+ if (quadT < 0) {
+ continue;
+ }
+ fIntersections->insert(quadT, lineT, (*fLine)[lIndex]);
+ }
+ }
+
+ void addExactHorizontalEndPoints(double left, double right, double y) {
+ for (int qIndex = 0; qIndex < 3; qIndex += 2) {
+ double lineT = SkDLine::ExactPointH(fQuad[qIndex], left, right, y);
+ if (lineT < 0) {
+ continue;
+ }
+ double quadT = (double) (qIndex >> 1);
+ fIntersections->insert(quadT, lineT, fQuad[qIndex]);
+ }
+ }
+
+ void addNearHorizontalEndPoints(double left, double right, double y) {
+ for (int qIndex = 0; qIndex < 3; qIndex += 2) {
+ double quadT = (double) (qIndex >> 1);
+ if (fIntersections->hasT(quadT)) {
+ continue;
+ }
+ double lineT = SkDLine::NearPointH(fQuad[qIndex], left, right, y);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(quadT, lineT, fQuad[qIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ void addExactVerticalEndPoints(double top, double bottom, double x) {
+ for (int qIndex = 0; qIndex < 3; qIndex += 2) {
+ double lineT = SkDLine::ExactPointV(fQuad[qIndex], top, bottom, x);
+ if (lineT < 0) {
+ continue;
+ }
+ double quadT = (double) (qIndex >> 1);
+ fIntersections->insert(quadT, lineT, fQuad[qIndex]);
+ }
+ }
+
+ void addNearVerticalEndPoints(double top, double bottom, double x) {
+ for (int qIndex = 0; qIndex < 3; qIndex += 2) {
+ double quadT = (double) (qIndex >> 1);
+ if (fIntersections->hasT(quadT)) {
+ continue;
+ }
+ double lineT = SkDLine::NearPointV(fQuad[qIndex], top, bottom, x);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(quadT, lineT, fQuad[qIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ double findLineT(double t) {
+ SkDPoint xy = fQuad.ptAtT(t);
+ double dx = (*fLine)[1].fX - (*fLine)[0].fX;
+ double dy = (*fLine)[1].fY - (*fLine)[0].fY;
+ if (fabs(dx) > fabs(dy)) {
+ return (xy.fX - (*fLine)[0].fX) / dx;
+ }
+ return (xy.fY - (*fLine)[0].fY) / dy;
+ }
+
+ bool pinTs(double* quadT, double* lineT, SkDPoint* pt, PinTPoint ptSet) {
+ if (!approximately_one_or_less_double(*lineT)) {
+ return false;
+ }
+ if (!approximately_zero_or_more_double(*lineT)) {
+ return false;
+ }
+ double qT = *quadT = SkPinT(*quadT);
+ double lT = *lineT = SkPinT(*lineT);
+ if (lT == 0 || lT == 1 || (ptSet == kPointUninitialized && qT != 0 && qT != 1)) {
+ *pt = (*fLine).ptAtT(lT);
+ } else if (ptSet == kPointUninitialized) {
+ *pt = fQuad.ptAtT(qT);
+ }
+ SkPoint gridPt = pt->asSkPoint();
+ if (SkDPoint::ApproximatelyEqual(gridPt, (*fLine)[0].asSkPoint())) {
+ *pt = (*fLine)[0];
+ *lineT = 0;
+ } else if (SkDPoint::ApproximatelyEqual(gridPt, (*fLine)[1].asSkPoint())) {
+ *pt = (*fLine)[1];
+ *lineT = 1;
+ }
+ if (fIntersections->used() > 0 && approximately_equal((*fIntersections)[1][0], *lineT)) {
+ return false;
+ }
+ if (gridPt == fQuad[0].asSkPoint()) {
+ *pt = fQuad[0];
+ *quadT = 0;
+ } else if (gridPt == fQuad[2].asSkPoint()) {
+ *pt = fQuad[2];
+ *quadT = 1;
+ }
+ return true;
+ }
+
+private:
+ const SkDQuad& fQuad;
+ const SkDLine* fLine;
+ SkIntersections* fIntersections;
+ bool fAllowNear;
+};
+
+int SkIntersections::horizontal(const SkDQuad& quad, double left, double right, double y,
+ bool flipped) {
+ SkDLine line = {{{ left, y }, { right, y }}};
+ LineQuadraticIntersections q(quad, line, this);
+ return q.horizontalIntersect(y, left, right, flipped);
+}
+
+int SkIntersections::vertical(const SkDQuad& quad, double top, double bottom, double x,
+ bool flipped) {
+ SkDLine line = {{{ x, top }, { x, bottom }}};
+ LineQuadraticIntersections q(quad, line, this);
+ return q.verticalIntersect(x, top, bottom, flipped);
+}
+
+int SkIntersections::intersect(const SkDQuad& quad, const SkDLine& line) {
+ LineQuadraticIntersections q(quad, line, this);
+ q.allowNear(fAllowNear);
+ return q.intersect();
+}
+
+int SkIntersections::intersectRay(const SkDQuad& quad, const SkDLine& line) {
+ LineQuadraticIntersections q(quad, line, this);
+ fUsed = q.intersectRay(fT[0]);
+ for (int index = 0; index < fUsed; ++index) {
+ fPt[index] = quad.ptAtT(fT[0][index]);
+ }
+ return fUsed;
+}
+
+int SkIntersections::HorizontalIntercept(const SkDQuad& quad, SkScalar y, double* roots) {
+ LineQuadraticIntersections q(quad);
+ return q.horizontalIntersect(y, roots);
+}
+
+int SkIntersections::VerticalIntercept(const SkDQuad& quad, SkScalar x, double* roots) {
+ LineQuadraticIntersections q(quad);
+ return q.verticalIntersect(x, roots);
+}
+
+// SkDQuad accessors to Intersection utilities
+
+int SkDQuad::horizontalIntersect(double yIntercept, double roots[2]) const {
+ return SkIntersections::HorizontalIntercept(*this, yIntercept, roots);
+}
+
+int SkDQuad::verticalIntersect(double xIntercept, double roots[2]) const {
+ return SkIntersections::VerticalIntercept(*this, xIntercept, roots);
+}
diff --git a/gfx/skia/skia/src/pathops/SkIntersectionHelper.h b/gfx/skia/skia/src/pathops/SkIntersectionHelper.h
new file mode 100644
index 000000000..9a8a582af
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkIntersectionHelper.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkIntersectionHelper_DEFINED
+#define SkIntersectionHelper_DEFINED
+
+#include "SkOpContour.h"
+#include "SkOpSegment.h"
+#include "SkPath.h"
+
+#ifdef SK_DEBUG
+#include "SkPathOpsPoint.h"
+#endif
+
+class SkIntersectionHelper {
+public:
+ enum SegmentType {
+ kHorizontalLine_Segment = -1,
+ kVerticalLine_Segment = 0,
+ kLine_Segment = SkPath::kLine_Verb,
+ kQuad_Segment = SkPath::kQuad_Verb,
+ kConic_Segment = SkPath::kConic_Verb,
+ kCubic_Segment = SkPath::kCubic_Verb,
+ };
+
+ bool advance() {
+ fSegment = fSegment->next();
+ return fSegment != nullptr;
+ }
+
+ SkScalar bottom() const {
+ return bounds().fBottom;
+ }
+
+ const SkPathOpsBounds& bounds() const {
+ return fSegment->bounds();
+ }
+
+ SkOpContour* contour() const {
+ return fSegment->contour();
+ }
+
+ void init(SkOpContour* contour) {
+ fSegment = contour->first();
+ }
+
+ SkScalar left() const {
+ return bounds().fLeft;
+ }
+
+ const SkPoint* pts() const {
+ return fSegment->pts();
+ }
+
+ SkScalar right() const {
+ return bounds().fRight;
+ }
+
+ SkOpSegment* segment() const {
+ return fSegment;
+ }
+
+ SegmentType segmentType() const {
+ SegmentType type = (SegmentType) fSegment->verb();
+ if (type != kLine_Segment) {
+ return type;
+ }
+ if (fSegment->isHorizontal()) {
+ return kHorizontalLine_Segment;
+ }
+ if (fSegment->isVertical()) {
+ return kVerticalLine_Segment;
+ }
+ return kLine_Segment;
+ }
+
+ bool startAfter(const SkIntersectionHelper& after) {
+ fSegment = after.fSegment->next();
+ return fSegment != nullptr;
+ }
+
+ SkScalar top() const {
+ return bounds().fTop;
+ }
+
+ SkScalar weight() const {
+ return fSegment->weight();
+ }
+
+ SkScalar x() const {
+ return bounds().fLeft;
+ }
+
+ bool xFlipped() const {
+ return x() != pts()[0].fX;
+ }
+
+ SkScalar y() const {
+ return bounds().fTop;
+ }
+
+ bool yFlipped() const {
+ return y() != pts()[0].fY;
+ }
+
+private:
+ SkOpSegment* fSegment;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkIntersections.cpp b/gfx/skia/skia/src/pathops/SkIntersections.cpp
new file mode 100644
index 000000000..9683796a5
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkIntersections.cpp
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkIntersections.h"
+
+int SkIntersections::closestTo(double rangeStart, double rangeEnd, const SkDPoint& testPt,
+ double* closestDist) const {
+ int closest = -1;
+ *closestDist = SK_ScalarMax;
+ for (int index = 0; index < fUsed; ++index) {
+ if (!between(rangeStart, fT[0][index], rangeEnd)) {
+ continue;
+ }
+ const SkDPoint& iPt = fPt[index];
+ double dist = testPt.distanceSquared(iPt);
+ if (*closestDist > dist) {
+ *closestDist = dist;
+ closest = index;
+ }
+ }
+ return closest;
+}
+
+void SkIntersections::flip() {
+ for (int index = 0; index < fUsed; ++index) {
+ fT[1][index] = 1 - fT[1][index];
+ }
+}
+
+int SkIntersections::insert(double one, double two, const SkDPoint& pt) {
+ if (fIsCoincident[0] == 3 && between(fT[0][0], one, fT[0][1])) {
+ // For now, don't allow a mix of coincident and non-coincident intersections
+ return -1;
+ }
+ SkASSERT(fUsed <= 1 || fT[0][0] <= fT[0][1]);
+ int index;
+ for (index = 0; index < fUsed; ++index) {
+ double oldOne = fT[0][index];
+ double oldTwo = fT[1][index];
+ if (one == oldOne && two == oldTwo) {
+ return -1;
+ }
+ if (more_roughly_equal(oldOne, one) && more_roughly_equal(oldTwo, two)) {
+ if ((precisely_zero(one) && !precisely_zero(oldOne))
+ || (precisely_equal(one, 1) && !precisely_equal(oldOne, 1))
+ || (precisely_zero(two) && !precisely_zero(oldTwo))
+ || (precisely_equal(two, 1) && !precisely_equal(oldTwo, 1))) {
+ SkASSERT(one >= 0 && one <= 1);
+ SkASSERT(two >= 0 && two <= 1);
+ fT[0][index] = one;
+ fT[1][index] = two;
+ fPt[index] = pt;
+ }
+ return -1;
+ }
+ #if ONE_OFF_DEBUG
+ if (pt.roughlyEqual(fPt[index])) {
+ SkDebugf("%s t=%1.9g pts roughly equal\n", __FUNCTION__, one);
+ }
+ #endif
+ if (fT[0][index] > one) {
+ break;
+ }
+ }
+ if (fUsed >= fMax) {
+ SkASSERT(0); // FIXME : this error, if it is to be handled at runtime in release, must
+ // be propagated all the way back down to the caller, and return failure.
+ fUsed = 0;
+ return 0;
+ }
+ int remaining = fUsed - index;
+ if (remaining > 0) {
+ memmove(&fPt[index + 1], &fPt[index], sizeof(fPt[0]) * remaining);
+ memmove(&fT[0][index + 1], &fT[0][index], sizeof(fT[0][0]) * remaining);
+ memmove(&fT[1][index + 1], &fT[1][index], sizeof(fT[1][0]) * remaining);
+ int clearMask = ~((1 << index) - 1);
+ fIsCoincident[0] += fIsCoincident[0] & clearMask;
+ fIsCoincident[1] += fIsCoincident[1] & clearMask;
+ }
+ fPt[index] = pt;
+ SkASSERT(one >= 0 && one <= 1);
+ SkASSERT(two >= 0 && two <= 1);
+ fT[0][index] = one;
+ fT[1][index] = two;
+ ++fUsed;
+ SkASSERT(fUsed <= SK_ARRAY_COUNT(fPt));
+ return index;
+}
+
+void SkIntersections::insertNear(double one, double two, const SkDPoint& pt1, const SkDPoint& pt2) {
+ SkASSERT(one == 0 || one == 1);
+ SkASSERT(two == 0 || two == 1);
+ SkASSERT(pt1 != pt2);
+ fNearlySame[one ? 1 : 0] = true;
+ (void) insert(one, two, pt1);
+ fPt2[one ? 1 : 0] = pt2;
+}
+
+int SkIntersections::insertCoincident(double one, double two, const SkDPoint& pt) {
+ int index = insertSwap(one, two, pt);
+ if (index >= 0) {
+ setCoincident(index);
+ }
+ return index;
+}
+
+void SkIntersections::setCoincident(int index) {
+ SkASSERT(index >= 0);
+ int bit = 1 << index;
+ fIsCoincident[0] |= bit;
+ fIsCoincident[1] |= bit;
+}
+
+void SkIntersections::merge(const SkIntersections& a, int aIndex, const SkIntersections& b,
+ int bIndex) {
+ this->reset();
+ fT[0][0] = a.fT[0][aIndex];
+ fT[1][0] = b.fT[0][bIndex];
+ fPt[0] = a.fPt[aIndex];
+ fPt2[0] = b.fPt[bIndex];
+ fUsed = 1;
+}
+
+int SkIntersections::mostOutside(double rangeStart, double rangeEnd, const SkDPoint& origin) const {
+ int result = -1;
+ for (int index = 0; index < fUsed; ++index) {
+ if (!between(rangeStart, fT[0][index], rangeEnd)) {
+ continue;
+ }
+ if (result < 0) {
+ result = index;
+ continue;
+ }
+ SkDVector best = fPt[result] - origin;
+ SkDVector test = fPt[index] - origin;
+ if (test.crossCheck(best) < 0) {
+ result = index;
+ }
+ }
+ return result;
+}
+
+void SkIntersections::removeOne(int index) {
+ int remaining = --fUsed - index;
+ if (remaining <= 0) {
+ return;
+ }
+ memmove(&fPt[index], &fPt[index + 1], sizeof(fPt[0]) * remaining);
+ memmove(&fT[0][index], &fT[0][index + 1], sizeof(fT[0][0]) * remaining);
+ memmove(&fT[1][index], &fT[1][index + 1], sizeof(fT[1][0]) * remaining);
+// SkASSERT(fIsCoincident[0] == 0);
+ int coBit = fIsCoincident[0] & (1 << index);
+ fIsCoincident[0] -= ((fIsCoincident[0] >> 1) & ~((1 << index) - 1)) + coBit;
+ SkASSERT(!(coBit ^ (fIsCoincident[1] & (1 << index))));
+ fIsCoincident[1] -= ((fIsCoincident[1] >> 1) & ~((1 << index) - 1)) + coBit;
+}
diff --git a/gfx/skia/skia/src/pathops/SkIntersections.h b/gfx/skia/skia/src/pathops/SkIntersections.h
new file mode 100644
index 000000000..abc10e19d
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkIntersections.h
@@ -0,0 +1,329 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkIntersections_DEFINE
+#define SkIntersections_DEFINE
+
+#include "SkPathOpsConic.h"
+#include "SkPathOpsCubic.h"
+#include "SkPathOpsLine.h"
+#include "SkPathOpsPoint.h"
+#include "SkPathOpsQuad.h"
+
+class SkIntersections {
+public:
+ SkIntersections(SkDEBUGCODE(SkOpGlobalState* globalState = nullptr))
+ : fSwap(0)
+#ifdef SK_DEBUG
+ SkDEBUGPARAMS(fDebugGlobalState(globalState))
+ , fDepth(0)
+#endif
+ {
+ sk_bzero(fPt, sizeof(fPt));
+ sk_bzero(fPt2, sizeof(fPt2));
+ sk_bzero(fT, sizeof(fT));
+ sk_bzero(fNearlySame, sizeof(fNearlySame));
+#if DEBUG_T_SECT_LOOP_COUNT
+ sk_bzero(fDebugLoopCount, sizeof(fDebugLoopCount));
+#endif
+ reset();
+ fMax = 0; // require that the caller set the max
+ }
+
+ class TArray {
+ public:
+ explicit TArray(const double ts[10]) : fTArray(ts) {}
+ double operator[](int n) const {
+ return fTArray[n];
+ }
+ const double* fTArray;
+ };
+ TArray operator[](int n) const { return TArray(fT[n]); }
+
+ void allowNear(bool nearAllowed) {
+ fAllowNear = nearAllowed;
+ }
+
+ void clearCoincidence(int index) {
+ SkASSERT(index >= 0);
+ int bit = 1 << index;
+ fIsCoincident[0] &= ~bit;
+ fIsCoincident[1] &= ~bit;
+ }
+
+ int conicHorizontal(const SkPoint a[3], SkScalar weight, SkScalar left, SkScalar right,
+ SkScalar y, bool flipped) {
+ SkDConic conic;
+ conic.set(a, weight);
+ fMax = 2;
+ return horizontal(conic, left, right, y, flipped);
+ }
+
+ int conicVertical(const SkPoint a[3], SkScalar weight, SkScalar top, SkScalar bottom,
+ SkScalar x, bool flipped) {
+ SkDConic conic;
+ conic.set(a, weight);
+ fMax = 2;
+ return vertical(conic, top, bottom, x, flipped);
+ }
+
+ int conicLine(const SkPoint a[3], SkScalar weight, const SkPoint b[2]) {
+ SkDConic conic;
+ conic.set(a, weight);
+ SkDLine line;
+ line.set(b);
+ fMax = 3; // 2; permit small coincident segment + non-coincident intersection
+ return intersect(conic, line);
+ }
+
+ int cubicHorizontal(const SkPoint a[4], SkScalar left, SkScalar right, SkScalar y,
+ bool flipped) {
+ SkDCubic cubic;
+ cubic.set(a);
+ fMax = 3;
+ return horizontal(cubic, left, right, y, flipped);
+ }
+
+ int cubicVertical(const SkPoint a[4], SkScalar top, SkScalar bottom, SkScalar x, bool flipped) {
+ SkDCubic cubic;
+ cubic.set(a);
+ fMax = 3;
+ return vertical(cubic, top, bottom, x, flipped);
+ }
+
+ int cubicLine(const SkPoint a[4], const SkPoint b[2]) {
+ SkDCubic cubic;
+ cubic.set(a);
+ SkDLine line;
+ line.set(b);
+ fMax = 3;
+ return intersect(cubic, line);
+ }
+
+#ifdef SK_DEBUG
+ SkOpGlobalState* debugGlobalState() { return fDebugGlobalState; }
+#endif
+
+ bool hasT(double t) const {
+ SkASSERT(t == 0 || t == 1);
+ return fUsed > 0 && (t == 0 ? fT[0][0] == 0 : fT[0][fUsed - 1] == 1);
+ }
+
+ bool hasOppT(double t) const {
+ SkASSERT(t == 0 || t == 1);
+ return fUsed > 0 && (fT[1][0] == t || fT[1][fUsed - 1] == t);
+ }
+
+ int insertSwap(double one, double two, const SkDPoint& pt) {
+ if (fSwap) {
+ return insert(two, one, pt);
+ } else {
+ return insert(one, two, pt);
+ }
+ }
+
+ bool isCoincident(int index) {
+ return (fIsCoincident[0] & 1 << index) != 0;
+ }
+
+ int lineHorizontal(const SkPoint a[2], SkScalar left, SkScalar right, SkScalar y,
+ bool flipped) {
+ SkDLine line;
+ line.set(a);
+ fMax = 2;
+ return horizontal(line, left, right, y, flipped);
+ }
+
+ int lineVertical(const SkPoint a[2], SkScalar top, SkScalar bottom, SkScalar x, bool flipped) {
+ SkDLine line;
+ line.set(a);
+ fMax = 2;
+ return vertical(line, top, bottom, x, flipped);
+ }
+
+ int lineLine(const SkPoint a[2], const SkPoint b[2]) {
+ SkDLine aLine, bLine;
+ aLine.set(a);
+ bLine.set(b);
+ fMax = 2;
+ return intersect(aLine, bLine);
+ }
+
+ bool nearlySame(int index) const {
+ SkASSERT(index == 0 || index == 1);
+ return fNearlySame[index];
+ }
+
+ const SkDPoint& pt(int index) const {
+ return fPt[index];
+ }
+
+ const SkDPoint& pt2(int index) const {
+ return fPt2[index];
+ }
+
+ int quadHorizontal(const SkPoint a[3], SkScalar left, SkScalar right, SkScalar y,
+ bool flipped) {
+ SkDQuad quad;
+ quad.set(a);
+ fMax = 2;
+ return horizontal(quad, left, right, y, flipped);
+ }
+
+ int quadVertical(const SkPoint a[3], SkScalar top, SkScalar bottom, SkScalar x, bool flipped) {
+ SkDQuad quad;
+ quad.set(a);
+ fMax = 2;
+ return vertical(quad, top, bottom, x, flipped);
+ }
+
+ int quadLine(const SkPoint a[3], const SkPoint b[2]) {
+ SkDQuad quad;
+ quad.set(a);
+ SkDLine line;
+ line.set(b);
+ return intersect(quad, line);
+ }
+
+ // leaves swap, max alone
+ void reset() {
+ fAllowNear = true;
+ fUsed = 0;
+ sk_bzero(fIsCoincident, sizeof(fIsCoincident));
+ }
+
+ void set(bool swap, int tIndex, double t) {
+ fT[(int) swap][tIndex] = t;
+ }
+
+ void setMax(int max) {
+ SkASSERT(max <= (int) SK_ARRAY_COUNT(fPt));
+ fMax = max;
+ }
+
+ void swap() {
+ fSwap ^= true;
+ }
+
+ bool swapped() const {
+ return fSwap;
+ }
+
+ int used() const {
+ return fUsed;
+ }
+
+ void downDepth() {
+ SkASSERT(--fDepth >= 0);
+ }
+
+ bool unBumpT(int index) {
+ SkASSERT(fUsed == 1);
+ fT[0][index] = fT[0][index] * (1 + BUMP_EPSILON * 2) - BUMP_EPSILON;
+ if (!between(0, fT[0][index], 1)) {
+ fUsed = 0;
+ return false;
+ }
+ return true;
+ }
+
+ void upDepth() {
+ SkASSERT(++fDepth < 16);
+ }
+
+ void alignQuadPts(const SkPoint a[3], const SkPoint b[3]);
+ int cleanUpCoincidence();
+ int closestTo(double rangeStart, double rangeEnd, const SkDPoint& testPt, double* dist) const;
+ void cubicInsert(double one, double two, const SkDPoint& pt, const SkDCubic& c1,
+ const SkDCubic& c2);
+ void flip();
+ int horizontal(const SkDLine&, double left, double right, double y, bool flipped);
+ int horizontal(const SkDQuad&, double left, double right, double y, bool flipped);
+ int horizontal(const SkDQuad&, double left, double right, double y, double tRange[2]);
+ int horizontal(const SkDCubic&, double y, double tRange[3]);
+ int horizontal(const SkDConic&, double left, double right, double y, bool flipped);
+ int horizontal(const SkDCubic&, double left, double right, double y, bool flipped);
+ int horizontal(const SkDCubic&, double left, double right, double y, double tRange[3]);
+ static double HorizontalIntercept(const SkDLine& line, double y);
+ static int HorizontalIntercept(const SkDQuad& quad, SkScalar y, double* roots);
+ static int HorizontalIntercept(const SkDConic& conic, SkScalar y, double* roots);
+ // FIXME : does not respect swap
+ int insert(double one, double two, const SkDPoint& pt);
+ void insertNear(double one, double two, const SkDPoint& pt1, const SkDPoint& pt2);
+ // start if index == 0 : end if index == 1
+ int insertCoincident(double one, double two, const SkDPoint& pt);
+ int intersect(const SkDLine&, const SkDLine&);
+ int intersect(const SkDQuad&, const SkDLine&);
+ int intersect(const SkDQuad&, const SkDQuad&);
+ int intersect(const SkDConic&, const SkDLine&);
+ int intersect(const SkDConic&, const SkDQuad&);
+ int intersect(const SkDConic&, const SkDConic&);
+ int intersect(const SkDCubic&, const SkDLine&);
+ int intersect(const SkDCubic&, const SkDQuad&);
+ int intersect(const SkDCubic&, const SkDConic&);
+ int intersect(const SkDCubic&, const SkDCubic&);
+ int intersectRay(const SkDLine&, const SkDLine&);
+ int intersectRay(const SkDQuad&, const SkDLine&);
+ int intersectRay(const SkDConic&, const SkDLine&);
+ int intersectRay(const SkDCubic&, const SkDLine&);
+ void merge(const SkIntersections& , int , const SkIntersections& , int );
+ int mostOutside(double rangeStart, double rangeEnd, const SkDPoint& origin) const;
+ void removeOne(int index);
+ void setCoincident(int index);
+ int vertical(const SkDLine&, double top, double bottom, double x, bool flipped);
+ int vertical(const SkDQuad&, double top, double bottom, double x, bool flipped);
+ int vertical(const SkDConic&, double top, double bottom, double x, bool flipped);
+ int vertical(const SkDCubic&, double top, double bottom, double x, bool flipped);
+ static double VerticalIntercept(const SkDLine& line, double x);
+ static int VerticalIntercept(const SkDQuad& quad, SkScalar x, double* roots);
+ static int VerticalIntercept(const SkDConic& conic, SkScalar x, double* roots);
+
+ int depth() const {
+#ifdef SK_DEBUG
+ return fDepth;
+#else
+ return 0;
+#endif
+ }
+
+ enum DebugLoop {
+ kIterations_DebugLoop,
+ kCoinCheck_DebugLoop,
+ kComputePerp_DebugLoop,
+ };
+
+ void debugBumpLoopCount(DebugLoop );
+ int debugCoincidentUsed() const;
+ int debugLoopCount(DebugLoop ) const;
+ void debugResetLoopCount();
+ void dump() const; // implemented for testing only
+
+private:
+ bool cubicCheckCoincidence(const SkDCubic& c1, const SkDCubic& c2);
+ bool cubicExactEnd(const SkDCubic& cubic1, bool start, const SkDCubic& cubic2);
+ void cubicNearEnd(const SkDCubic& cubic1, bool start, const SkDCubic& cubic2, const SkDRect& );
+ void cleanUpParallelLines(bool parallel);
+ void computePoints(const SkDLine& line, int used);
+
+ SkDPoint fPt[12]; // FIXME: since scans store points as SkPoint, this should also
+ SkDPoint fPt2[2]; // used by nearly same to store alternate intersection point
+ double fT[2][12];
+ uint16_t fIsCoincident[2]; // bit set for each curve's coincident T
+ bool fNearlySame[2]; // true if end points nearly match
+ unsigned char fUsed;
+ unsigned char fMax;
+ bool fAllowNear;
+ bool fSwap;
+#ifdef SK_DEBUG
+ SkOpGlobalState* fDebugGlobalState;
+ int fDepth;
+#endif
+#if DEBUG_T_SECT_LOOP_COUNT
+ int fDebugLoopCount[3];
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkLineParameters.h b/gfx/skia/skia/src/pathops/SkLineParameters.h
new file mode 100644
index 000000000..073d03602
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkLineParameters.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLineParameters_DEFINED
+#define SkLineParameters_DEFINED
+
+#include "SkPathOpsCubic.h"
+#include "SkPathOpsLine.h"
+#include "SkPathOpsQuad.h"
+
+// Sources
+// computer-aided design - volume 22 number 9 november 1990 pp 538 - 549
+// online at http://cagd.cs.byu.edu/~tom/papers/bezclip.pdf
+
+// This turns a line segment into a parameterized line, of the form
+// ax + by + c = 0
+// When a^2 + b^2 == 1, the line is normalized.
+// The distance to the line for (x, y) is d(x,y) = ax + by + c
+//
+// Note that the distances below are not necessarily normalized. To get the true
+// distance, it's necessary to either call normalize() after xxxEndPoints(), or
+// divide the result of xxxDistance() by sqrt(normalSquared())
+
+class SkLineParameters {
+public:
+
+ bool cubicEndPoints(const SkDCubic& pts) {
+ int endIndex = 1;
+ cubicEndPoints(pts, 0, endIndex);
+ if (dy() != 0) {
+ return true;
+ }
+ if (dx() == 0) {
+ cubicEndPoints(pts, 0, ++endIndex);
+ SkASSERT(endIndex == 2);
+ if (dy() != 0) {
+ return true;
+ }
+ if (dx() == 0) {
+ cubicEndPoints(pts, 0, ++endIndex); // line
+ SkASSERT(endIndex == 3);
+ return false;
+ }
+ }
+ // FIXME: after switching to round sort, remove bumping fA
+ if (dx() < 0) { // only worry about y bias when breaking cw/ccw tie
+ return true;
+ }
+ // if cubic tangent is on x axis, look at next control point to break tie
+ // control point may be approximate, so it must move significantly to account for error
+ if (NotAlmostEqualUlps(pts[0].fY, pts[++endIndex].fY)) {
+ if (pts[0].fY > pts[endIndex].fY) {
+ fA = DBL_EPSILON; // push it from 0 to slightly negative (y() returns -a)
+ }
+ return true;
+ }
+ if (endIndex == 3) {
+ return true;
+ }
+ SkASSERT(endIndex == 2);
+ if (pts[0].fY > pts[3].fY) {
+ fA = DBL_EPSILON; // push it from 0 to slightly negative (y() returns -a)
+ }
+ return true;
+ }
+
+ void cubicEndPoints(const SkDCubic& pts, int s, int e) {
+ fA = pts[s].fY - pts[e].fY;
+ fB = pts[e].fX - pts[s].fX;
+ fC = pts[s].fX * pts[e].fY - pts[e].fX * pts[s].fY;
+ }
+
+ double cubicPart(const SkDCubic& part) {
+ cubicEndPoints(part);
+ if (part[0] == part[1] || ((const SkDLine& ) part[0]).nearRay(part[2])) {
+ return pointDistance(part[3]);
+ }
+ return pointDistance(part[2]);
+ }
+
+ void lineEndPoints(const SkDLine& pts) {
+ fA = pts[0].fY - pts[1].fY;
+ fB = pts[1].fX - pts[0].fX;
+ fC = pts[0].fX * pts[1].fY - pts[1].fX * pts[0].fY;
+ }
+
+ bool quadEndPoints(const SkDQuad& pts) {
+ quadEndPoints(pts, 0, 1);
+ if (dy() != 0) {
+ return true;
+ }
+ if (dx() == 0) {
+ quadEndPoints(pts, 0, 2);
+ return false;
+ }
+ if (dx() < 0) { // only worry about y bias when breaking cw/ccw tie
+ return true;
+ }
+ // FIXME: after switching to round sort, remove this
+ if (pts[0].fY > pts[2].fY) {
+ fA = DBL_EPSILON;
+ }
+ return true;
+ }
+
+ void quadEndPoints(const SkDQuad& pts, int s, int e) {
+ fA = pts[s].fY - pts[e].fY;
+ fB = pts[e].fX - pts[s].fX;
+ fC = pts[s].fX * pts[e].fY - pts[e].fX * pts[s].fY;
+ }
+
+ double quadPart(const SkDQuad& part) {
+ quadEndPoints(part);
+ return pointDistance(part[2]);
+ }
+
+ double normalSquared() const {
+ return fA * fA + fB * fB;
+ }
+
+ bool normalize() {
+ double normal = sqrt(normalSquared());
+ if (approximately_zero(normal)) {
+ fA = fB = fC = 0;
+ return false;
+ }
+ double reciprocal = 1 / normal;
+ fA *= reciprocal;
+ fB *= reciprocal;
+ fC *= reciprocal;
+ return true;
+ }
+
+ void cubicDistanceY(const SkDCubic& pts, SkDCubic& distance) const {
+ double oneThird = 1 / 3.0;
+ for (int index = 0; index < 4; ++index) {
+ distance[index].fX = index * oneThird;
+ distance[index].fY = fA * pts[index].fX + fB * pts[index].fY + fC;
+ }
+ }
+
+ void quadDistanceY(const SkDQuad& pts, SkDQuad& distance) const {
+ double oneHalf = 1 / 2.0;
+ for (int index = 0; index < 3; ++index) {
+ distance[index].fX = index * oneHalf;
+ distance[index].fY = fA * pts[index].fX + fB * pts[index].fY + fC;
+ }
+ }
+
+ double controlPtDistance(const SkDCubic& pts, int index) const {
+ SkASSERT(index == 1 || index == 2);
+ return fA * pts[index].fX + fB * pts[index].fY + fC;
+ }
+
+ double controlPtDistance(const SkDQuad& pts) const {
+ return fA * pts[1].fX + fB * pts[1].fY + fC;
+ }
+
+ double pointDistance(const SkDPoint& pt) const {
+ return fA * pt.fX + fB * pt.fY + fC;
+ }
+
+ double dx() const {
+ return fB;
+ }
+
+ double dy() const {
+ return -fA;
+ }
+
+private:
+ double fA;
+ double fB;
+ double fC;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkOpAngle.cpp b/gfx/skia/skia/src/pathops/SkOpAngle.cpp
new file mode 100644
index 000000000..820b5dcee
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpAngle.cpp
@@ -0,0 +1,995 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkOpAngle.h"
+#include "SkOpSegment.h"
+#include "SkPathOpsCurve.h"
+#include "SkTSort.h"
+
+/* Angles are sorted counterclockwise. The smallest angle has a positive x and the smallest
+ positive y. The largest angle has a positive x and a zero y. */
+
+#if DEBUG_ANGLE
+ static bool CompareResult(const char* func, SkString* bugOut, SkString* bugPart, int append,
+ bool compare) {
+ SkDebugf("%s %c %d\n", bugOut->c_str(), compare ? 'T' : 'F', append);
+ SkDebugf("%sPart %s\n", func, bugPart[0].c_str());
+ SkDebugf("%sPart %s\n", func, bugPart[1].c_str());
+ SkDebugf("%sPart %s\n", func, bugPart[2].c_str());
+ return compare;
+ }
+
+ #define COMPARE_RESULT(append, compare) CompareResult(__FUNCTION__, &bugOut, bugPart, append, \
+ compare)
+#else
+ #define COMPARE_RESULT(append, compare) compare
+#endif
+
+/* quarter angle values for sector
+
+31 x > 0, y == 0 horizontal line (to the right)
+0 x > 0, y == epsilon quad/cubic horizontal tangent eventually going +y
+1 x > 0, y > 0, x > y nearer horizontal angle
+2 x + e == y quad/cubic 45 going horiz
+3 x > 0, y > 0, x == y 45 angle
+4 x == y + e quad/cubic 45 going vert
+5 x > 0, y > 0, x < y nearer vertical angle
+6 x == epsilon, y > 0 quad/cubic vertical tangent eventually going +x
+7 x == 0, y > 0 vertical line (to the top)
+
+ 8 7 6
+ 9 | 5
+ 10 | 4
+ 11 | 3
+ 12 \ | / 2
+ 13 | 1
+ 14 | 0
+ 15 --------------+------------- 31
+ 16 | 30
+ 17 | 29
+ 18 / | \ 28
+ 19 | 27
+ 20 | 26
+ 21 | 25
+ 22 23 24
+*/
+
+// return true if lh < this < rh
+bool SkOpAngle::after(SkOpAngle* test) {
+ SkOpAngle* lh = test;
+ SkOpAngle* rh = lh->fNext;
+ SkASSERT(lh != rh);
+ fPart.fCurve = fOriginalCurvePart;
+ lh->fPart.fCurve = lh->fOriginalCurvePart;
+ lh->fPart.fCurve.offset(lh->segment()->verb(), fPart.fCurve[0] - lh->fPart.fCurve[0]);
+ rh->fPart.fCurve = rh->fOriginalCurvePart;
+ rh->fPart.fCurve.offset(rh->segment()->verb(), fPart.fCurve[0] - rh->fPart.fCurve[0]);
+
+#if DEBUG_ANGLE
+ SkString bugOut;
+ bugOut.printf("%s [%d/%d] %d/%d tStart=%1.9g tEnd=%1.9g"
+ " < [%d/%d] %d/%d tStart=%1.9g tEnd=%1.9g"
+ " < [%d/%d] %d/%d tStart=%1.9g tEnd=%1.9g ", __FUNCTION__,
+ lh->segment()->debugID(), lh->debugID(), lh->fSectorStart, lh->fSectorEnd,
+ lh->fStart->t(), lh->fEnd->t(),
+ segment()->debugID(), debugID(), fSectorStart, fSectorEnd, fStart->t(), fEnd->t(),
+ rh->segment()->debugID(), rh->debugID(), rh->fSectorStart, rh->fSectorEnd,
+ rh->fStart->t(), rh->fEnd->t());
+ SkString bugPart[3] = { lh->debugPart(), this->debugPart(), rh->debugPart() };
+#endif
+ if (lh->fComputeSector && !lh->computeSector()) {
+ return COMPARE_RESULT(1, true);
+ }
+ if (fComputeSector && !this->computeSector()) {
+ return COMPARE_RESULT(2, true);
+ }
+ if (rh->fComputeSector && !rh->computeSector()) {
+ return COMPARE_RESULT(3, true);
+ }
+#if DEBUG_ANGLE // reset bugOut with computed sectors
+ bugOut.printf("%s [%d/%d] %d/%d tStart=%1.9g tEnd=%1.9g"
+ " < [%d/%d] %d/%d tStart=%1.9g tEnd=%1.9g"
+ " < [%d/%d] %d/%d tStart=%1.9g tEnd=%1.9g ", __FUNCTION__,
+ lh->segment()->debugID(), lh->debugID(), lh->fSectorStart, lh->fSectorEnd,
+ lh->fStart->t(), lh->fEnd->t(),
+ segment()->debugID(), debugID(), fSectorStart, fSectorEnd, fStart->t(), fEnd->t(),
+ rh->segment()->debugID(), rh->debugID(), rh->fSectorStart, rh->fSectorEnd,
+ rh->fStart->t(), rh->fEnd->t());
+#endif
+ bool ltrOverlap = (lh->fSectorMask | rh->fSectorMask) & fSectorMask;
+ bool lrOverlap = lh->fSectorMask & rh->fSectorMask;
+ int lrOrder; // set to -1 if either order works
+ if (!lrOverlap) { // no lh/rh sector overlap
+ if (!ltrOverlap) { // no lh/this/rh sector overlap
+ return COMPARE_RESULT(4, (lh->fSectorEnd > rh->fSectorStart)
+ ^ (fSectorStart > lh->fSectorEnd) ^ (fSectorStart > rh->fSectorStart));
+ }
+ int lrGap = (rh->fSectorStart - lh->fSectorStart + 32) & 0x1f;
+ /* A tiny change can move the start +/- 4. The order can only be determined if
+ lr gap is not 12 to 20 or -12 to -20.
+ -31 ..-21 1
+ -20 ..-12 -1
+ -11 .. -1 0
+ 0 shouldn't get here
+ 11 .. 1 1
+ 12 .. 20 -1
+ 21 .. 31 0
+ */
+ lrOrder = lrGap > 20 ? 0 : lrGap > 11 ? -1 : 1;
+ } else {
+ lrOrder = (int) lh->orderable(rh);
+ if (!ltrOverlap) {
+ return COMPARE_RESULT(5, !lrOrder);
+ }
+ }
+ int ltOrder;
+ SkASSERT((lh->fSectorMask & fSectorMask) || (rh->fSectorMask & fSectorMask));
+ if (lh->fSectorMask & fSectorMask) {
+ ltOrder = (int) lh->orderable(this);
+ } else {
+ int ltGap = (fSectorStart - lh->fSectorStart + 32) & 0x1f;
+ ltOrder = ltGap > 20 ? 0 : ltGap > 11 ? -1 : 1;
+ }
+ int trOrder;
+ if (rh->fSectorMask & fSectorMask) {
+ trOrder = (int) orderable(rh);
+ } else {
+ int trGap = (rh->fSectorStart - fSectorStart + 32) & 0x1f;
+ trOrder = trGap > 20 ? 0 : trGap > 11 ? -1 : 1;
+ }
+ if (lrOrder >= 0 && ltOrder >= 0 && trOrder >= 0) {
+ return COMPARE_RESULT(7, lrOrder ? (ltOrder & trOrder) : (ltOrder | trOrder));
+ }
+ SkASSERT(lrOrder >= 0 || ltOrder >= 0 || trOrder >= 0);
+// There's not enough information to sort. Get the pairs of angles in opposite planes.
+// If an order is < 0, the pair is already in an opposite plane. Check the remaining pairs.
+ // FIXME : once all variants are understood, rewrite this more simply
+ if (ltOrder == 0 && lrOrder == 0) {
+ SkASSERT(trOrder < 0);
+ // FIXME : once this is verified to work, remove one opposite angle call
+ SkDEBUGCODE(bool lrOpposite = lh->oppositePlanes(rh));
+ bool ltOpposite = lh->oppositePlanes(this);
+ SkASSERT(lrOpposite != ltOpposite);
+ return COMPARE_RESULT(8, ltOpposite);
+ } else if (ltOrder == 1 && trOrder == 0) {
+ SkASSERT(lrOrder < 0);
+ bool trOpposite = oppositePlanes(rh);
+ return COMPARE_RESULT(9, trOpposite);
+ } else if (lrOrder == 1 && trOrder == 1) {
+ SkASSERT(ltOrder < 0);
+ SkDEBUGCODE(bool trOpposite = oppositePlanes(rh));
+ bool lrOpposite = lh->oppositePlanes(rh);
+ SkASSERT(lrOpposite != trOpposite);
+ return COMPARE_RESULT(10, lrOpposite);
+ }
+ if (lrOrder < 0) {
+ if (ltOrder < 0) {
+ return COMPARE_RESULT(11, trOrder);
+ }
+ return COMPARE_RESULT(12, ltOrder);
+ }
+ return COMPARE_RESULT(13, !lrOrder);
+}
+
+// given a line, see if the opposite curve's convex hull is all on one side
+// returns -1=not on one side 0=this CW of test 1=this CCW of test
+int SkOpAngle::allOnOneSide(const SkOpAngle* test) {
+ SkASSERT(!fPart.isCurve());
+ SkASSERT(test->fPart.isCurve());
+ SkDPoint origin = fPart.fCurve[0];
+ SkDVector line = fPart.fCurve[1] - origin;
+ double crosses[3];
+ SkPath::Verb testVerb = test->segment()->verb();
+ int iMax = SkPathOpsVerbToPoints(testVerb);
+// SkASSERT(origin == test.fCurveHalf[0]);
+ const SkDCurve& testCurve = test->fPart.fCurve;
+ for (int index = 1; index <= iMax; ++index) {
+ double xy1 = line.fX * (testCurve[index].fY - origin.fY);
+ double xy2 = line.fY * (testCurve[index].fX - origin.fX);
+ crosses[index - 1] = AlmostBequalUlps(xy1, xy2) ? 0 : xy1 - xy2;
+ }
+ if (crosses[0] * crosses[1] < 0) {
+ return -1;
+ }
+ if (SkPath::kCubic_Verb == testVerb) {
+ if (crosses[0] * crosses[2] < 0 || crosses[1] * crosses[2] < 0) {
+ return -1;
+ }
+ }
+ if (crosses[0]) {
+ return crosses[0] < 0;
+ }
+ if (crosses[1]) {
+ return crosses[1] < 0;
+ }
+ if (SkPath::kCubic_Verb == testVerb && crosses[2]) {
+ return crosses[2] < 0;
+ }
+ fUnorderable = true;
+ return -1;
+}
+
+bool SkOpAngle::checkCrossesZero() const {
+ int start = SkTMin(fSectorStart, fSectorEnd);
+ int end = SkTMax(fSectorStart, fSectorEnd);
+ bool crossesZero = end - start > 16;
+ return crossesZero;
+}
+
+bool SkOpAngle::checkParallel(SkOpAngle* rh) {
+ SkDVector scratch[2];
+ const SkDVector* sweep, * tweep;
+ if (this->fPart.isOrdered()) {
+ sweep = this->fPart.fSweep;
+ } else {
+ scratch[0] = this->fPart.fCurve[1] - this->fPart.fCurve[0];
+ sweep = &scratch[0];
+ }
+ if (rh->fPart.isOrdered()) {
+ tweep = rh->fPart.fSweep;
+ } else {
+ scratch[1] = rh->fPart.fCurve[1] - rh->fPart.fCurve[0];
+ tweep = &scratch[1];
+ }
+ double s0xt0 = sweep->crossCheck(*tweep);
+ if (tangentsDiverge(rh, s0xt0)) {
+ return s0xt0 < 0;
+ }
+ // compute the perpendicular to the endpoints and see where it intersects the opposite curve
+ // if the intersections within the t range, do a cross check on those
+ bool inside;
+ if (!fEnd->contains(rh->fEnd)) {
+ if (this->endToSide(rh, &inside)) {
+ return inside;
+ }
+ if (rh->endToSide(this, &inside)) {
+ return !inside;
+ }
+ }
+ if (this->midToSide(rh, &inside)) {
+ return inside;
+ }
+ if (rh->midToSide(this, &inside)) {
+ return !inside;
+ }
+ // compute the cross check from the mid T values (last resort)
+ SkDVector m0 = segment()->dPtAtT(this->midT()) - this->fPart.fCurve[0];
+ SkDVector m1 = rh->segment()->dPtAtT(rh->midT()) - rh->fPart.fCurve[0];
+ double m0xm1 = m0.crossCheck(m1);
+ if (m0xm1 == 0) {
+ this->fUnorderable = true;
+ rh->fUnorderable = true;
+ return true;
+ }
+ return m0xm1 < 0;
+}
+
+// the original angle is too short to get meaningful sector information
+// lengthen it until it is long enough to be meaningful or leave it unset if lengthening it
+// would cause it to intersect one of the adjacent angles
+bool SkOpAngle::computeSector() {
+ if (fComputedSector) {
+ return !fUnorderable;
+ }
+ fComputedSector = true;
+ bool stepUp = fStart->t() < fEnd->t();
+ SkOpSpanBase* checkEnd = fEnd;
+ if (checkEnd->final() && stepUp) {
+ fUnorderable = true;
+ return false;
+ }
+ do {
+// advance end
+ const SkOpSegment* other = checkEnd->segment();
+ const SkOpSpanBase* oSpan = other->head();
+ do {
+ if (oSpan->segment() != segment()) {
+ continue;
+ }
+ if (oSpan == checkEnd) {
+ continue;
+ }
+ if (!approximately_equal(oSpan->t(), checkEnd->t())) {
+ continue;
+ }
+ goto recomputeSector;
+ } while (!oSpan->final() && (oSpan = oSpan->upCast()->next()));
+ checkEnd = stepUp ? !checkEnd->final()
+ ? checkEnd->upCast()->next() : nullptr
+ : checkEnd->prev();
+ } while (checkEnd);
+recomputeSector:
+ SkOpSpanBase* computedEnd = stepUp ? checkEnd ? checkEnd->prev() : fEnd->segment()->head()
+ : checkEnd ? checkEnd->upCast()->next() : fEnd->segment()->tail();
+ if (checkEnd == fEnd || computedEnd == fEnd || computedEnd == fStart) {
+ fUnorderable = true;
+ return false;
+ }
+ if (stepUp != (fStart->t() < computedEnd->t())) {
+ fUnorderable = true;
+ return false;
+ }
+ SkOpSpanBase* saveEnd = fEnd;
+ fComputedEnd = fEnd = computedEnd;
+ setSpans();
+ setSector();
+ fEnd = saveEnd;
+ return !fUnorderable;
+}
+
+int SkOpAngle::convexHullOverlaps(const SkOpAngle* rh) const {
+ const SkDVector* sweep = this->fPart.fSweep;
+ const SkDVector* tweep = rh->fPart.fSweep;
+ double s0xs1 = sweep[0].crossCheck(sweep[1]);
+ double s0xt0 = sweep[0].crossCheck(tweep[0]);
+ double s1xt0 = sweep[1].crossCheck(tweep[0]);
+ bool tBetweenS = s0xs1 > 0 ? s0xt0 > 0 && s1xt0 < 0 : s0xt0 < 0 && s1xt0 > 0;
+ double s0xt1 = sweep[0].crossCheck(tweep[1]);
+ double s1xt1 = sweep[1].crossCheck(tweep[1]);
+ tBetweenS |= s0xs1 > 0 ? s0xt1 > 0 && s1xt1 < 0 : s0xt1 < 0 && s1xt1 > 0;
+ double t0xt1 = tweep[0].crossCheck(tweep[1]);
+ if (tBetweenS) {
+ return -1;
+ }
+ if ((s0xt0 == 0 && s1xt1 == 0) || (s1xt0 == 0 && s0xt1 == 0)) { // s0 to s1 equals t0 to t1
+ return -1;
+ }
+ bool sBetweenT = t0xt1 > 0 ? s0xt0 < 0 && s0xt1 > 0 : s0xt0 > 0 && s0xt1 < 0;
+ sBetweenT |= t0xt1 > 0 ? s1xt0 < 0 && s1xt1 > 0 : s1xt0 > 0 && s1xt1 < 0;
+ if (sBetweenT) {
+ return -1;
+ }
+ // if all of the sweeps are in the same half plane, then the order of any pair is enough
+ if (s0xt0 >= 0 && s0xt1 >= 0 && s1xt0 >= 0 && s1xt1 >= 0) {
+ return 0;
+ }
+ if (s0xt0 <= 0 && s0xt1 <= 0 && s1xt0 <= 0 && s1xt1 <= 0) {
+ return 1;
+ }
+ // if the outside sweeps are greater than 180 degress:
+ // first assume the inital tangents are the ordering
+ // if the midpoint direction matches the inital order, that is enough
+ SkDVector m0 = this->segment()->dPtAtT(this->midT()) - this->fPart.fCurve[0];
+ SkDVector m1 = rh->segment()->dPtAtT(rh->midT()) - rh->fPart.fCurve[0];
+ double m0xm1 = m0.crossCheck(m1);
+ if (s0xt0 > 0 && m0xm1 > 0) {
+ return 0;
+ }
+ if (s0xt0 < 0 && m0xm1 < 0) {
+ return 1;
+ }
+ if (tangentsDiverge(rh, s0xt0)) {
+ return s0xt0 < 0;
+ }
+ return m0xm1 < 0;
+}
+
+// OPTIMIZATION: longest can all be either lazily computed here or precomputed in setup
+double SkOpAngle::distEndRatio(double dist) const {
+ double longest = 0;
+ const SkOpSegment& segment = *this->segment();
+ int ptCount = SkPathOpsVerbToPoints(segment.verb());
+ const SkPoint* pts = segment.pts();
+ for (int idx1 = 0; idx1 <= ptCount - 1; ++idx1) {
+ for (int idx2 = idx1 + 1; idx2 <= ptCount; ++idx2) {
+ if (idx1 == idx2) {
+ continue;
+ }
+ SkDVector v;
+ v.set(pts[idx2] - pts[idx1]);
+ double lenSq = v.lengthSquared();
+ longest = SkTMax(longest, lenSq);
+ }
+ }
+ return sqrt(longest) / dist;
+}
+
+bool SkOpAngle::endsIntersect(SkOpAngle* rh) {
+ SkPath::Verb lVerb = this->segment()->verb();
+ SkPath::Verb rVerb = rh->segment()->verb();
+ int lPts = SkPathOpsVerbToPoints(lVerb);
+ int rPts = SkPathOpsVerbToPoints(rVerb);
+ SkDLine rays[] = {{{this->fPart.fCurve[0], rh->fPart.fCurve[rPts]}},
+ {{this->fPart.fCurve[0], this->fPart.fCurve[lPts]}}};
+ if (this->fEnd->contains(rh->fEnd)) {
+ return checkParallel(rh);
+ }
+ double smallTs[2] = {-1, -1};
+ bool limited[2] = {false, false};
+ for (int index = 0; index < 2; ++index) {
+ SkPath::Verb cVerb = index ? rVerb : lVerb;
+ // if the curve is a line, then the line and the ray intersect only at their crossing
+ if (cVerb == SkPath::kLine_Verb) {
+ continue;
+ }
+ const SkOpSegment& segment = index ? *rh->segment() : *this->segment();
+ SkIntersections i;
+ (*CurveIntersectRay[cVerb])(segment.pts(), segment.weight(), rays[index], &i);
+ double tStart = index ? rh->fStart->t() : this->fStart->t();
+ double tEnd = index ? rh->fComputedEnd->t() : this->fComputedEnd->t();
+ bool testAscends = tStart < (index ? rh->fComputedEnd->t() : this->fComputedEnd->t());
+ double t = testAscends ? 0 : 1;
+ for (int idx2 = 0; idx2 < i.used(); ++idx2) {
+ double testT = i[0][idx2];
+ if (!approximately_between_orderable(tStart, testT, tEnd)) {
+ continue;
+ }
+ if (approximately_equal_orderable(tStart, testT)) {
+ continue;
+ }
+ smallTs[index] = t = testAscends ? SkTMax(t, testT) : SkTMin(t, testT);
+ limited[index] = approximately_equal_orderable(t, tEnd);
+ }
+ }
+ bool sRayLonger = false;
+ SkDVector sCept = {0, 0};
+ double sCeptT = -1;
+ int sIndex = -1;
+ bool useIntersect = false;
+ for (int index = 0; index < 2; ++index) {
+ if (smallTs[index] < 0) {
+ continue;
+ }
+ const SkOpSegment& segment = index ? *rh->segment() : *this->segment();
+ const SkDPoint& dPt = segment.dPtAtT(smallTs[index]);
+ SkDVector cept = dPt - rays[index][0];
+ // If this point is on the curve, it should have been detected earlier by ordinary
+ // curve intersection. This may be hard to determine in general, but for lines,
+ // the point could be close to or equal to its end, but shouldn't be near the start.
+ if ((index ? lPts : rPts) == 1) {
+ SkDVector total = rays[index][1] - rays[index][0];
+ if (cept.lengthSquared() * 2 < total.lengthSquared()) {
+ continue;
+ }
+ }
+ SkDVector end = rays[index][1] - rays[index][0];
+ if (cept.fX * end.fX < 0 || cept.fY * end.fY < 0) {
+ continue;
+ }
+ double rayDist = cept.length();
+ double endDist = end.length();
+ bool rayLonger = rayDist > endDist;
+ if (limited[0] && limited[1] && rayLonger) {
+ useIntersect = true;
+ sRayLonger = rayLonger;
+ sCept = cept;
+ sCeptT = smallTs[index];
+ sIndex = index;
+ break;
+ }
+ double delta = fabs(rayDist - endDist);
+ double minX, minY, maxX, maxY;
+ minX = minY = SK_ScalarInfinity;
+ maxX = maxY = -SK_ScalarInfinity;
+ const SkDCurve& curve = index ? rh->fPart.fCurve : this->fPart.fCurve;
+ int ptCount = index ? rPts : lPts;
+ for (int idx2 = 0; idx2 <= ptCount; ++idx2) {
+ minX = SkTMin(minX, curve[idx2].fX);
+ minY = SkTMin(minY, curve[idx2].fY);
+ maxX = SkTMax(maxX, curve[idx2].fX);
+ maxY = SkTMax(maxY, curve[idx2].fY);
+ }
+ double maxWidth = SkTMax(maxX - minX, maxY - minY);
+ delta /= maxWidth;
+ if (delta > 1e-3 && (useIntersect ^= true)) { // FIXME: move this magic number
+ sRayLonger = rayLonger;
+ sCept = cept;
+ sCeptT = smallTs[index];
+ sIndex = index;
+ }
+ }
+ if (useIntersect) {
+ const SkDCurve& curve = sIndex ? rh->fPart.fCurve : this->fPart.fCurve;
+ const SkOpSegment& segment = sIndex ? *rh->segment() : *this->segment();
+ double tStart = sIndex ? rh->fStart->t() : fStart->t();
+ SkDVector mid = segment.dPtAtT(tStart + (sCeptT - tStart) / 2) - curve[0];
+ double septDir = mid.crossCheck(sCept);
+ if (!septDir) {
+ return checkParallel(rh);
+ }
+ return sRayLonger ^ (sIndex == 0) ^ (septDir < 0);
+ } else {
+ return checkParallel(rh);
+ }
+}
+
+bool SkOpAngle::endToSide(const SkOpAngle* rh, bool* inside) const {
+ const SkOpSegment* segment = this->segment();
+ SkPath::Verb verb = segment->verb();
+ SkDLine rayEnd;
+ rayEnd[0].set(this->fEnd->pt());
+ rayEnd[1] = rayEnd[0];
+ SkDVector slopeAtEnd = (*CurveDSlopeAtT[verb])(segment->pts(), segment->weight(),
+ this->fEnd->t());
+ rayEnd[1].fX += slopeAtEnd.fY;
+ rayEnd[1].fY -= slopeAtEnd.fX;
+ SkIntersections iEnd;
+ const SkOpSegment* oppSegment = rh->segment();
+ SkPath::Verb oppVerb = oppSegment->verb();
+ (*CurveIntersectRay[oppVerb])(oppSegment->pts(), oppSegment->weight(), rayEnd, &iEnd);
+ double endDist;
+ int closestEnd = iEnd.closestTo(rh->fStart->t(), rh->fEnd->t(), rayEnd[0], &endDist);
+ if (closestEnd < 0) {
+ return false;
+ }
+ if (!endDist) {
+ return false;
+ }
+ SkDPoint start;
+ start.set(this->fStart->pt());
+ // OPTIMIZATION: multiple times in the code we find the max scalar
+ double minX, minY, maxX, maxY;
+ minX = minY = SK_ScalarInfinity;
+ maxX = maxY = -SK_ScalarInfinity;
+ const SkDCurve& curve = rh->fPart.fCurve;
+ int oppPts = SkPathOpsVerbToPoints(oppVerb);
+ for (int idx2 = 0; idx2 <= oppPts; ++idx2) {
+ minX = SkTMin(minX, curve[idx2].fX);
+ minY = SkTMin(minY, curve[idx2].fY);
+ maxX = SkTMax(maxX, curve[idx2].fX);
+ maxY = SkTMax(maxY, curve[idx2].fY);
+ }
+ double maxWidth = SkTMax(maxX - minX, maxY - minY);
+ endDist /= maxWidth;
+ if (endDist < 5e-12) { // empirically found
+ return false;
+ }
+ const SkDPoint* endPt = &rayEnd[0];
+ SkDPoint oppPt = iEnd.pt(closestEnd);
+ SkDVector vLeft = *endPt - start;
+ SkDVector vRight = oppPt - start;
+ double dir = vLeft.crossNoNormalCheck(vRight);
+ if (!dir) {
+ return false;
+ }
+ *inside = dir < 0;
+ return true;
+}
+
+/* y<0 y==0 y>0 x<0 x==0 x>0 xy<0 xy==0 xy>0
+ 0 x x x
+ 1 x x x
+ 2 x x x
+ 3 x x x
+ 4 x x x
+ 5 x x x
+ 6 x x x
+ 7 x x x
+ 8 x x x
+ 9 x x x
+ 10 x x x
+ 11 x x x
+ 12 x x x
+ 13 x x x
+ 14 x x x
+ 15 x x x
+*/
+int SkOpAngle::findSector(SkPath::Verb verb, double x, double y) const {
+ double absX = fabs(x);
+ double absY = fabs(y);
+ double xy = SkPath::kLine_Verb == verb || !AlmostEqualUlps(absX, absY) ? absX - absY : 0;
+ // If there are four quadrants and eight octants, and since the Latin for sixteen is sedecim,
+ // one could coin the term sedecimant for a space divided into 16 sections.
+ // http://english.stackexchange.com/questions/133688/word-for-something-partitioned-into-16-parts
+ static const int sedecimant[3][3][3] = {
+ // y<0 y==0 y>0
+ // x<0 x==0 x>0 x<0 x==0 x>0 x<0 x==0 x>0
+ {{ 4, 3, 2}, { 7, -1, 15}, {10, 11, 12}}, // abs(x) < abs(y)
+ {{ 5, -1, 1}, {-1, -1, -1}, { 9, -1, 13}}, // abs(x) == abs(y)
+ {{ 6, 3, 0}, { 7, -1, 15}, { 8, 11, 14}}, // abs(x) > abs(y)
+ };
+ int sector = sedecimant[(xy >= 0) + (xy > 0)][(y >= 0) + (y > 0)][(x >= 0) + (x > 0)] * 2 + 1;
+// SkASSERT(SkPath::kLine_Verb == verb || sector >= 0);
+ return sector;
+}
+
+SkOpGlobalState* SkOpAngle::globalState() const {
+ return this->segment()->globalState();
+}
+
+
+// OPTIMIZE: if this loops to only one other angle, after first compare fails, insert on other side
+// OPTIMIZE: return where insertion succeeded. Then, start next insertion on opposite side
+void SkOpAngle::insert(SkOpAngle* angle) {
+ if (angle->fNext) {
+ if (loopCount() >= angle->loopCount()) {
+ if (!merge(angle)) {
+ return;
+ }
+ } else if (fNext) {
+ if (!angle->merge(this)) {
+ return;
+ }
+ } else {
+ angle->insert(this);
+ }
+ return;
+ }
+ bool singleton = nullptr == fNext;
+ if (singleton) {
+ fNext = this;
+ }
+ SkOpAngle* next = fNext;
+ if (next->fNext == this) {
+ if (singleton || angle->after(this)) {
+ this->fNext = angle;
+ angle->fNext = next;
+ } else {
+ next->fNext = angle;
+ angle->fNext = this;
+ }
+ debugValidateNext();
+ return;
+ }
+ SkOpAngle* last = this;
+ do {
+ SkASSERT(last->fNext == next);
+ if (angle->after(last)) {
+ last->fNext = angle;
+ angle->fNext = next;
+ debugValidateNext();
+ return;
+ }
+ last = next;
+ next = next->fNext;
+ } while (true);
+}
+
+SkOpSpanBase* SkOpAngle::lastMarked() const {
+ if (fLastMarked) {
+ if (fLastMarked->chased()) {
+ return nullptr;
+ }
+ fLastMarked->setChased(true);
+ }
+ return fLastMarked;
+}
+
+bool SkOpAngle::loopContains(const SkOpAngle* angle) const {
+ if (!fNext) {
+ return false;
+ }
+ const SkOpAngle* first = this;
+ const SkOpAngle* loop = this;
+ const SkOpSegment* tSegment = angle->fStart->segment();
+ double tStart = angle->fStart->t();
+ double tEnd = angle->fEnd->t();
+ do {
+ const SkOpSegment* lSegment = loop->fStart->segment();
+ if (lSegment != tSegment) {
+ continue;
+ }
+ double lStart = loop->fStart->t();
+ if (lStart != tEnd) {
+ continue;
+ }
+ double lEnd = loop->fEnd->t();
+ if (lEnd == tStart) {
+ return true;
+ }
+ } while ((loop = loop->fNext) != first);
+ return false;
+}
+
+int SkOpAngle::loopCount() const {
+ int count = 0;
+ const SkOpAngle* first = this;
+ const SkOpAngle* next = this;
+ do {
+ next = next->fNext;
+ ++count;
+ } while (next && next != first);
+ return count;
+}
+
+bool SkOpAngle::merge(SkOpAngle* angle) {
+ SkASSERT(fNext);
+ SkASSERT(angle->fNext);
+ SkOpAngle* working = angle;
+ do {
+ if (this == working) {
+ return false;
+ }
+ working = working->fNext;
+ } while (working != angle);
+ do {
+ SkOpAngle* next = working->fNext;
+ working->fNext = nullptr;
+ insert(working);
+ working = next;
+ } while (working != angle);
+ // it's likely that a pair of the angles are unorderable
+ debugValidateNext();
+ return true;
+}
+
+double SkOpAngle::midT() const {
+ return (fStart->t() + fEnd->t()) / 2;
+}
+
+bool SkOpAngle::midToSide(const SkOpAngle* rh, bool* inside) const {
+ const SkOpSegment* segment = this->segment();
+ SkPath::Verb verb = segment->verb();
+ const SkPoint& startPt = this->fStart->pt();
+ const SkPoint& endPt = this->fEnd->pt();
+ SkDPoint dStartPt;
+ dStartPt.set(startPt);
+ SkDLine rayMid;
+ rayMid[0].fX = (startPt.fX + endPt.fX) / 2;
+ rayMid[0].fY = (startPt.fY + endPt.fY) / 2;
+ rayMid[1].fX = rayMid[0].fX + (endPt.fY - startPt.fY);
+ rayMid[1].fY = rayMid[0].fY - (endPt.fX - startPt.fX);
+ SkIntersections iMid;
+ (*CurveIntersectRay[verb])(segment->pts(), segment->weight(), rayMid, &iMid);
+ int iOutside = iMid.mostOutside(this->fStart->t(), this->fEnd->t(), dStartPt);
+ if (iOutside < 0) {
+ return false;
+ }
+ const SkOpSegment* oppSegment = rh->segment();
+ SkPath::Verb oppVerb = oppSegment->verb();
+ SkIntersections oppMid;
+ (*CurveIntersectRay[oppVerb])(oppSegment->pts(), oppSegment->weight(), rayMid, &oppMid);
+ int oppOutside = oppMid.mostOutside(rh->fStart->t(), rh->fEnd->t(), dStartPt);
+ if (oppOutside < 0) {
+ return false;
+ }
+ SkDVector iSide = iMid.pt(iOutside) - dStartPt;
+ SkDVector oppSide = oppMid.pt(oppOutside) - dStartPt;
+ double dir = iSide.crossCheck(oppSide);
+ if (!dir) {
+ return false;
+ }
+ *inside = dir < 0;
+ return true;
+}
+
+bool SkOpAngle::oppositePlanes(const SkOpAngle* rh) const {
+ int startSpan = SkTAbs(rh->fSectorStart - fSectorStart);
+ return startSpan >= 8;
+}
+
+bool SkOpAngle::orderable(SkOpAngle* rh) {
+ int result;
+ if (!fPart.isCurve()) {
+ if (!rh->fPart.isCurve()) {
+ double leftX = fTangentHalf.dx();
+ double leftY = fTangentHalf.dy();
+ double rightX = rh->fTangentHalf.dx();
+ double rightY = rh->fTangentHalf.dy();
+ double x_ry = leftX * rightY;
+ double rx_y = rightX * leftY;
+ if (x_ry == rx_y) {
+ if (leftX * rightX < 0 || leftY * rightY < 0) {
+ return true; // exactly 180 degrees apart
+ }
+ goto unorderable;
+ }
+ SkASSERT(x_ry != rx_y); // indicates an undetected coincidence -- worth finding earlier
+ return x_ry < rx_y;
+ }
+ if ((result = this->allOnOneSide(rh)) >= 0) {
+ return result;
+ }
+ if (fUnorderable || approximately_zero(rh->fSide)) {
+ goto unorderable;
+ }
+ } else if (!rh->fPart.isCurve()) {
+ if ((result = rh->allOnOneSide(this)) >= 0) {
+ return !result;
+ }
+ if (rh->fUnorderable || approximately_zero(fSide)) {
+ goto unorderable;
+ }
+ } else if ((result = this->convexHullOverlaps(rh)) >= 0) {
+ return result;
+ }
+ return this->endsIntersect(rh);
+unorderable:
+ fUnorderable = true;
+ rh->fUnorderable = true;
+ return true;
+}
+
+// OPTIMIZE: if this shows up in a profile, add a previous pointer
+// as is, this should be rarely called
+SkOpAngle* SkOpAngle::previous() const {
+ SkOpAngle* last = fNext;
+ do {
+ SkOpAngle* next = last->fNext;
+ if (next == this) {
+ return last;
+ }
+ last = next;
+ } while (true);
+}
+
+SkOpSegment* SkOpAngle::segment() const {
+ return fStart->segment();
+}
+
+void SkOpAngle::set(SkOpSpanBase* start, SkOpSpanBase* end) {
+ fStart = start;
+ fComputedEnd = fEnd = end;
+ SkASSERT(start != end);
+ fNext = nullptr;
+ fComputeSector = fComputedSector = fCheckCoincidence = false;
+ setSpans();
+ setSector();
+ SkDEBUGCODE(fID = start ? start->globalState()->nextAngleID() : -1);
+}
+
+void SkOpAngle::setSpans() {
+ fUnorderable = false;
+ fLastMarked = nullptr;
+ if (!fStart) {
+ fUnorderable = true;
+ return;
+ }
+ const SkOpSegment* segment = fStart->segment();
+ const SkPoint* pts = segment->pts();
+ SkDEBUGCODE(fPart.fCurve.fVerb = SkPath::kCubic_Verb); // required for SkDCurve debug check
+ SkDEBUGCODE(fPart.fCurve[2].fX = fPart.fCurve[2].fY = fPart.fCurve[3].fX = fPart.fCurve[3].fY
+ = SK_ScalarNaN); // make the non-line part uninitialized
+ SkDEBUGCODE(fPart.fCurve.fVerb = segment->verb()); // set the curve type for real
+ segment->subDivide(fStart, fEnd, &fPart.fCurve); // set at least the line part if not more
+ fOriginalCurvePart = fPart.fCurve;
+ const SkPath::Verb verb = segment->verb();
+ fPart.setCurveHullSweep(verb);
+ if (SkPath::kLine_Verb != verb && !fPart.isCurve()) {
+ SkDLine lineHalf;
+ fPart.fCurve[1] = fPart.fCurve[SkPathOpsVerbToPoints(verb)];
+ fOriginalCurvePart[1] = fPart.fCurve[1];
+ lineHalf[0].set(fPart.fCurve[0].asSkPoint());
+ lineHalf[1].set(fPart.fCurve[1].asSkPoint());
+ fTangentHalf.lineEndPoints(lineHalf);
+ fSide = 0;
+ }
+ switch (verb) {
+ case SkPath::kLine_Verb: {
+ SkASSERT(fStart != fEnd);
+ const SkPoint& cP1 = pts[fStart->t() < fEnd->t()];
+ SkDLine lineHalf;
+ lineHalf[0].set(fStart->pt());
+ lineHalf[1].set(cP1);
+ fTangentHalf.lineEndPoints(lineHalf);
+ fSide = 0;
+ } return;
+ case SkPath::kQuad_Verb:
+ case SkPath::kConic_Verb: {
+ SkLineParameters tangentPart;
+ (void) tangentPart.quadEndPoints(fPart.fCurve.fQuad);
+ fSide = -tangentPart.pointDistance(fPart.fCurve[2]); // not normalized -- compare sign only
+ } break;
+ case SkPath::kCubic_Verb: {
+ SkLineParameters tangentPart;
+ (void) tangentPart.cubicPart(fPart.fCurve.fCubic);
+ fSide = -tangentPart.pointDistance(fPart.fCurve[3]);
+ double testTs[4];
+ // OPTIMIZATION: keep inflections precomputed with cubic segment?
+ int testCount = SkDCubic::FindInflections(pts, testTs);
+ double startT = fStart->t();
+ double endT = fEnd->t();
+ double limitT = endT;
+ int index;
+ for (index = 0; index < testCount; ++index) {
+ if (!::between(startT, testTs[index], limitT)) {
+ testTs[index] = -1;
+ }
+ }
+ testTs[testCount++] = startT;
+ testTs[testCount++] = endT;
+ SkTQSort<double>(testTs, &testTs[testCount - 1]);
+ double bestSide = 0;
+ int testCases = (testCount << 1) - 1;
+ index = 0;
+ while (testTs[index] < 0) {
+ ++index;
+ }
+ index <<= 1;
+ for (; index < testCases; ++index) {
+ int testIndex = index >> 1;
+ double testT = testTs[testIndex];
+ if (index & 1) {
+ testT = (testT + testTs[testIndex + 1]) / 2;
+ }
+ // OPTIMIZE: could avoid call for t == startT, endT
+ SkDPoint pt = dcubic_xy_at_t(pts, segment->weight(), testT);
+ SkLineParameters tangentPart;
+ tangentPart.cubicEndPoints(fPart.fCurve.fCubic);
+ double testSide = tangentPart.pointDistance(pt);
+ if (fabs(bestSide) < fabs(testSide)) {
+ bestSide = testSide;
+ }
+ }
+ fSide = -bestSide; // compare sign only
+ } break;
+ default:
+ SkASSERT(0);
+ }
+}
+
+void SkOpAngle::setSector() {
+ if (!fStart) {
+ fUnorderable = true;
+ return;
+ }
+ const SkOpSegment* segment = fStart->segment();
+ SkPath::Verb verb = segment->verb();
+ fSectorStart = this->findSector(verb, fPart.fSweep[0].fX, fPart.fSweep[0].fY);
+ if (fSectorStart < 0) {
+ goto deferTilLater;
+ }
+ if (!fPart.isCurve()) { // if it's a line or line-like, note that both sectors are the same
+ SkASSERT(fSectorStart >= 0);
+ fSectorEnd = fSectorStart;
+ fSectorMask = 1 << fSectorStart;
+ return;
+ }
+ SkASSERT(SkPath::kLine_Verb != verb);
+ fSectorEnd = this->findSector(verb, fPart.fSweep[1].fX, fPart.fSweep[1].fY);
+ if (fSectorEnd < 0) {
+deferTilLater:
+ fSectorStart = fSectorEnd = -1;
+ fSectorMask = 0;
+ fComputeSector = true; // can't determine sector until segment length can be found
+ return;
+ }
+ if (fSectorEnd == fSectorStart
+ && (fSectorStart & 3) != 3) { // if the sector has no span, it can't be an exact angle
+ fSectorMask = 1 << fSectorStart;
+ return;
+ }
+ bool crossesZero = this->checkCrossesZero();
+ int start = SkTMin(fSectorStart, fSectorEnd);
+ bool curveBendsCCW = (fSectorStart == start) ^ crossesZero;
+ // bump the start and end of the sector span if they are on exact compass points
+ if ((fSectorStart & 3) == 3) {
+ fSectorStart = (fSectorStart + (curveBendsCCW ? 1 : 31)) & 0x1f;
+ }
+ if ((fSectorEnd & 3) == 3) {
+ fSectorEnd = (fSectorEnd + (curveBendsCCW ? 31 : 1)) & 0x1f;
+ }
+ crossesZero = this->checkCrossesZero();
+ start = SkTMin(fSectorStart, fSectorEnd);
+ int end = SkTMax(fSectorStart, fSectorEnd);
+ if (!crossesZero) {
+ fSectorMask = (unsigned) -1 >> (31 - end + start) << start;
+ } else {
+ fSectorMask = (unsigned) -1 >> (31 - start) | ((unsigned) -1 << end);
+ }
+}
+
+SkOpSpan* SkOpAngle::starter() {
+ return fStart->starter(fEnd);
+}
+
+bool SkOpAngle::tangentsDiverge(const SkOpAngle* rh, double s0xt0) const {
+ if (s0xt0 == 0) {
+ return false;
+ }
+ // if the ctrl tangents are not nearly parallel, use them
+ // solve for opposite direction displacement scale factor == m
+ // initial dir = v1.cross(v2) == v2.x * v1.y - v2.y * v1.x
+ // displacement of q1[1] : dq1 = { -m * v1.y, m * v1.x } + q1[1]
+ // straight angle when : v2.x * (dq1.y - q1[0].y) == v2.y * (dq1.x - q1[0].x)
+ // v2.x * (m * v1.x + v1.y) == v2.y * (-m * v1.y + v1.x)
+ // - m * (v2.x * v1.x + v2.y * v1.y) == v2.x * v1.y - v2.y * v1.x
+ // m = (v2.y * v1.x - v2.x * v1.y) / (v2.x * v1.x + v2.y * v1.y)
+ // m = v1.cross(v2) / v1.dot(v2)
+ const SkDVector* sweep = fPart.fSweep;
+ const SkDVector* tweep = rh->fPart.fSweep;
+ double s0dt0 = sweep[0].dot(tweep[0]);
+ if (!s0dt0) {
+ return true;
+ }
+ SkASSERT(s0dt0 != 0);
+ double m = s0xt0 / s0dt0;
+ double sDist = sweep[0].length() * m;
+ double tDist = tweep[0].length() * m;
+ bool useS = fabs(sDist) < fabs(tDist);
+ double mFactor = fabs(useS ? this->distEndRatio(sDist) : rh->distEndRatio(tDist));
+ return mFactor < 50; // empirically found limit
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpAngle.h b/gfx/skia/skia/src/pathops/SkOpAngle.h
new file mode 100644
index 000000000..cbdadf103
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpAngle.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOpAngle_DEFINED
+#define SkOpAngle_DEFINED
+
+#include "SkLineParameters.h"
+#include "SkPathOpsCurve.h"
+#if DEBUG_ANGLE
+#include "SkString.h"
+#endif
+
+class SkOpContour;
+class SkOpPtT;
+class SkOpSegment;
+class SkOpSpanBase;
+class SkOpSpan;
+
+class SkOpAngle {
+public:
+ enum IncludeType {
+ kUnaryWinding,
+ kUnaryXor,
+ kBinarySingle,
+ kBinaryOpp,
+ };
+
+ const SkOpAngle* debugAngle(int id) const;
+ const SkOpCoincidence* debugCoincidence() const;
+ SkOpContour* debugContour(int id) const;
+
+ int debugID() const {
+ return SkDEBUGRELEASE(fID, -1);
+ }
+
+#if DEBUG_SORT
+ void debugLoop() const;
+#endif
+
+#if DEBUG_ANGLE
+ bool debugCheckCoincidence() const { return fCheckCoincidence; }
+ void debugCheckNearCoincidence() const;
+ SkString debugPart() const;
+#endif
+ const SkOpPtT* debugPtT(int id) const;
+ const SkOpSegment* debugSegment(int id) const;
+ int debugSign() const;
+ const SkOpSpanBase* debugSpan(int id) const;
+ void debugValidate() const;
+ void debugValidateNext() const; // in debug builds, verify that angle loop is uncorrupted
+ double distEndRatio(double dist) const;
+ // available to testing only
+ void dump() const;
+ void dumpCurves() const;
+ void dumpLoop() const;
+ void dumpOne(bool functionHeader) const;
+ void dumpTo(const SkOpSegment* fromSeg, const SkOpAngle* ) const;
+ void dumpTest() const;
+
+ SkOpSpanBase* end() const {
+ return fEnd;
+ }
+
+ void insert(SkOpAngle* );
+ SkOpSpanBase* lastMarked() const;
+ bool loopContains(const SkOpAngle* ) const;
+ int loopCount() const;
+
+ SkOpAngle* next() const {
+ return fNext;
+ }
+
+ SkOpAngle* previous() const;
+ SkOpSegment* segment() const;
+ void set(SkOpSpanBase* start, SkOpSpanBase* end);
+
+ void setLastMarked(SkOpSpanBase* marked) {
+ fLastMarked = marked;
+ }
+
+ SkOpSpanBase* start() const {
+ return fStart;
+ }
+
+ SkOpSpan* starter();
+
+ bool unorderable() const {
+ return fUnorderable;
+ }
+
+private:
+ bool after(SkOpAngle* test);
+ int allOnOneSide(const SkOpAngle* test);
+ bool checkCrossesZero() const;
+ bool checkParallel(SkOpAngle* );
+ bool computeSector();
+ int convexHullOverlaps(const SkOpAngle* ) const;
+ bool endToSide(const SkOpAngle* rh, bool* inside) const;
+ bool endsIntersect(SkOpAngle* );
+ int findSector(SkPath::Verb verb, double x, double y) const;
+ SkOpGlobalState* globalState() const;
+ bool merge(SkOpAngle* );
+ double midT() const;
+ bool midToSide(const SkOpAngle* rh, bool* inside) const;
+ bool oppositePlanes(const SkOpAngle* rh) const;
+ bool orderable(SkOpAngle* rh); // false == this < rh ; true == this > rh
+ void setSector();
+ void setSpans();
+ bool tangentsDiverge(const SkOpAngle* rh, double s0xt0) const;
+
+ SkDCurve fOriginalCurvePart; // the curve from start to end
+ SkDCurveSweep fPart; // the curve from start to end offset as needed
+ double fSide;
+ SkLineParameters fTangentHalf; // used only to sort a pair of lines or line-like sections
+ SkOpAngle* fNext;
+ SkOpSpanBase* fLastMarked;
+ SkOpSpanBase* fStart;
+ SkOpSpanBase* fEnd;
+ SkOpSpanBase* fComputedEnd;
+ int fSectorMask;
+ int8_t fSectorStart; // in 32nds of a circle
+ int8_t fSectorEnd;
+ bool fUnorderable;
+ bool fComputeSector;
+ bool fComputedSector;
+ bool fCheckCoincidence;
+ SkDEBUGCODE(int fID);
+
+ friend class PathOpsAngleTester;
+};
+
+
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkOpBuilder.cpp b/gfx/skia/skia/src/pathops/SkOpBuilder.cpp
new file mode 100644
index 000000000..011d6a6ab
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpBuilder.cpp
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMatrix.h"
+#include "SkOpEdgeBuilder.h"
+#include "SkPathPriv.h"
+#include "SkPathOps.h"
+#include "SkPathOpsCommon.h"
+
+static bool one_contour(const SkPath& path) {
+ SkChunkAlloc allocator(256);
+ int verbCount = path.countVerbs();
+ uint8_t* verbs = (uint8_t*) allocator.alloc(sizeof(uint8_t) * verbCount,
+ SkChunkAlloc::kThrow_AllocFailType);
+ (void) path.getVerbs(verbs, verbCount);
+ for (int index = 1; index < verbCount; ++index) {
+ if (verbs[index] == SkPath::kMove_Verb) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool FixWinding(SkPath* path) {
+ SkPath::FillType fillType = path->getFillType();
+ if (fillType == SkPath::kInverseEvenOdd_FillType) {
+ fillType = SkPath::kInverseWinding_FillType;
+ } else if (fillType == SkPath::kEvenOdd_FillType) {
+ fillType = SkPath::kWinding_FillType;
+ }
+ SkPathPriv::FirstDirection dir;
+ if (one_contour(*path) && SkPathPriv::CheapComputeFirstDirection(*path, &dir)) {
+ if (dir != SkPathPriv::kCCW_FirstDirection) {
+ SkPath temp;
+ temp.reverseAddPath(*path);
+ *path = temp;
+ }
+ path->setFillType(fillType);
+ return true;
+ }
+ SkChunkAlloc allocator(4096);
+ SkOpContourHead contourHead;
+ SkOpGlobalState globalState(&contourHead, &allocator SkDEBUGPARAMS(false)
+ SkDEBUGPARAMS(nullptr));
+ SkOpEdgeBuilder builder(*path, &contourHead, &globalState);
+ if (builder.unparseable() || !builder.finish()) {
+ return false;
+ }
+ if (!contourHead.count()) {
+ return true;
+ }
+ if (!contourHead.next()) {
+ return false;
+ }
+ contourHead.joinAllSegments();
+ contourHead.resetReverse();
+ bool writePath = false;
+ SkOpSpan* topSpan;
+ globalState.setPhase(SkOpPhase::kFixWinding);
+ while ((topSpan = FindSortableTop(&contourHead))) {
+ SkOpSegment* topSegment = topSpan->segment();
+ SkOpContour* topContour = topSegment->contour();
+ SkASSERT(topContour->isCcw() >= 0);
+#if DEBUG_WINDING
+ SkDebugf("%s id=%d nested=%d ccw=%d\n", __FUNCTION__,
+ topSegment->debugID(), globalState.nested(), topContour->isCcw());
+#endif
+ if ((globalState.nested() & 1) != SkToBool(topContour->isCcw())) {
+ topContour->setReverse();
+ writePath = true;
+ }
+ topContour->markAllDone();
+ globalState.clearNested();
+ }
+ if (!writePath) {
+ path->setFillType(fillType);
+ return true;
+ }
+ SkPath empty;
+ SkPathWriter woundPath(empty);
+ SkOpContour* test = &contourHead;
+ do {
+ if (test->reversed()) {
+ test->toReversePath(&woundPath);
+ } else {
+ test->toPath(&woundPath);
+ }
+ } while ((test = test->next()));
+ *path = *woundPath.nativePath();
+ path->setFillType(fillType);
+ return true;
+}
+
+void SkOpBuilder::add(const SkPath& path, SkPathOp op) {
+ if (0 == fOps.count() && op != kUnion_SkPathOp) {
+ fPathRefs.push_back() = SkPath();
+ *fOps.append() = kUnion_SkPathOp;
+ }
+ fPathRefs.push_back() = path;
+ *fOps.append() = op;
+}
+
+void SkOpBuilder::reset() {
+ fPathRefs.reset();
+ fOps.reset();
+}
+
+/* OPTIMIZATION: Union doesn't need to be all-or-nothing. A run of three or more convex
+ paths with union ops could be locally resolved and still improve over doing the
+ ops one at a time. */
+bool SkOpBuilder::resolve(SkPath* result) {
+ SkPath original = *result;
+ int count = fOps.count();
+ bool allUnion = true;
+ SkPathPriv::FirstDirection firstDir = SkPathPriv::kUnknown_FirstDirection;
+ for (int index = 0; index < count; ++index) {
+ SkPath* test = &fPathRefs[index];
+ if (kUnion_SkPathOp != fOps[index] || test->isInverseFillType()) {
+ allUnion = false;
+ break;
+ }
+ // If all paths are convex, track direction, reversing as needed.
+ if (test->isConvex()) {
+ SkPathPriv::FirstDirection dir;
+ if (!SkPathPriv::CheapComputeFirstDirection(*test, &dir)) {
+ allUnion = false;
+ break;
+ }
+ if (firstDir == SkPathPriv::kUnknown_FirstDirection) {
+ firstDir = dir;
+ } else if (firstDir != dir) {
+ SkPath temp;
+ temp.reverseAddPath(*test);
+ *test = temp;
+ }
+ continue;
+ }
+ // If the path is not convex but its bounds do not intersect the others, simplify is enough.
+ const SkRect& testBounds = test->getBounds();
+ for (int inner = 0; inner < index; ++inner) {
+ // OPTIMIZE: check to see if the contour bounds do not intersect other contour bounds?
+ if (SkRect::Intersects(fPathRefs[inner].getBounds(), testBounds)) {
+ allUnion = false;
+ break;
+ }
+ }
+ }
+ if (!allUnion) {
+ *result = fPathRefs[0];
+ for (int index = 1; index < count; ++index) {
+ if (!Op(*result, fPathRefs[index], fOps[index], result)) {
+ reset();
+ *result = original;
+ return false;
+ }
+ }
+ reset();
+ return true;
+ }
+ SkPath sum;
+ for (int index = 0; index < count; ++index) {
+ if (!Simplify(fPathRefs[index], &fPathRefs[index])) {
+ reset();
+ *result = original;
+ return false;
+ }
+ if (!fPathRefs[index].isEmpty()) {
+ // convert the even odd result back to winding form before accumulating it
+ if (!FixWinding(&fPathRefs[index])) {
+ *result = original;
+ return false;
+ }
+ sum.addPath(fPathRefs[index]);
+ }
+ }
+ reset();
+ bool success = Simplify(sum, result);
+ if (!success) {
+ *result = original;
+ }
+ return success;
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpCoincidence.cpp b/gfx/skia/skia/src/pathops/SkOpCoincidence.cpp
new file mode 100755
index 000000000..f0481ab24
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpCoincidence.cpp
@@ -0,0 +1,1363 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkOpCoincidence.h"
+#include "SkOpSegment.h"
+#include "SkPathOpsTSect.h"
+
+// returns true if coincident span's start and end are the same
+bool SkCoincidentSpans::collapsed(const SkOpPtT* test) const {
+ return (fCoinPtTStart == test && fCoinPtTEnd->contains(test))
+ || (fCoinPtTEnd == test && fCoinPtTStart->contains(test))
+ || (fOppPtTStart == test && fOppPtTEnd->contains(test))
+ || (fOppPtTEnd == test && fOppPtTStart->contains(test));
+}
+
+// sets the span's end to the ptT referenced by the previous-next
+void SkCoincidentSpans::correctOneEnd(
+ const SkOpPtT* (SkCoincidentSpans::* getEnd)() const,
+ void (SkCoincidentSpans::*setEnd)(const SkOpPtT* ptT) ) {
+ const SkOpPtT* origPtT = (this->*getEnd)();
+ const SkOpSpanBase* origSpan = origPtT->span();
+ const SkOpSpan* prev = origSpan->prev();
+ const SkOpPtT* testPtT = prev ? prev->next()->ptT()
+ : origSpan->upCast()->next()->prev()->ptT();
+ if (origPtT != testPtT) {
+ (this->*setEnd)(testPtT);
+ }
+}
+
+/* Please keep this in sync with debugCorrectEnds */
+// FIXME: member pointers have fallen out of favor and can be replaced with
+// an alternative approach.
+// makes all span ends agree with the segment's spans that define them
+void SkCoincidentSpans::correctEnds() {
+ this->correctOneEnd(&SkCoincidentSpans::coinPtTStart, &SkCoincidentSpans::setCoinPtTStart);
+ this->correctOneEnd(&SkCoincidentSpans::coinPtTEnd, &SkCoincidentSpans::setCoinPtTEnd);
+ this->correctOneEnd(&SkCoincidentSpans::oppPtTStart, &SkCoincidentSpans::setOppPtTStart);
+ this->correctOneEnd(&SkCoincidentSpans::oppPtTEnd, &SkCoincidentSpans::setOppPtTEnd);
+}
+
+/* Please keep this in sync with debugExpand */
+// expand the range by checking adjacent spans for coincidence
+bool SkCoincidentSpans::expand() {
+ bool expanded = false;
+ const SkOpSegment* segment = coinPtTStart()->segment();
+ const SkOpSegment* oppSegment = oppPtTStart()->segment();
+ do {
+ const SkOpSpan* start = coinPtTStart()->span()->upCast();
+ const SkOpSpan* prev = start->prev();
+ const SkOpPtT* oppPtT;
+ if (!prev || !(oppPtT = prev->contains(oppSegment))) {
+ break;
+ }
+ double midT = (prev->t() + start->t()) / 2;
+ if (!segment->isClose(midT, oppSegment)) {
+ break;
+ }
+ setStarts(prev->ptT(), oppPtT);
+ expanded = true;
+ } while (true);
+ do {
+ const SkOpSpanBase* end = coinPtTEnd()->span();
+ SkOpSpanBase* next = end->final() ? nullptr : end->upCast()->next();
+ if (next && next->deleted()) {
+ break;
+ }
+ const SkOpPtT* oppPtT;
+ if (!next || !(oppPtT = next->contains(oppSegment))) {
+ break;
+ }
+ double midT = (end->t() + next->t()) / 2;
+ if (!segment->isClose(midT, oppSegment)) {
+ break;
+ }
+ setEnds(next->ptT(), oppPtT);
+ expanded = true;
+ } while (true);
+ return expanded;
+}
+
+// increase the range of this span
+bool SkCoincidentSpans::extend(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd,
+ const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd) {
+ bool result = false;
+ if (fCoinPtTStart->fT > coinPtTStart->fT || (this->flipped()
+ ? fOppPtTStart->fT < oppPtTStart->fT : fOppPtTStart->fT > oppPtTStart->fT)) {
+ this->setStarts(coinPtTStart, oppPtTStart);
+ result = true;
+ }
+ if (fCoinPtTEnd->fT < coinPtTEnd->fT || (this->flipped()
+ ? fOppPtTEnd->fT > oppPtTEnd->fT : fOppPtTEnd->fT < oppPtTEnd->fT)) {
+ this->setEnds(coinPtTEnd, oppPtTEnd);
+ result = true;
+ }
+ return result;
+}
+
+// set the range of this span
+void SkCoincidentSpans::set(SkCoincidentSpans* next, const SkOpPtT* coinPtTStart,
+ const SkOpPtT* coinPtTEnd, const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd) {
+ SkASSERT(SkOpCoincidence::Ordered(coinPtTStart, oppPtTStart));
+ fNext = next;
+ this->setStarts(coinPtTStart, oppPtTStart);
+ this->setEnds(coinPtTEnd, oppPtTEnd);
+}
+
+// returns true if both points are inside this
+bool SkCoincidentSpans::contains(const SkOpPtT* s, const SkOpPtT* e) const {
+ if (s->fT > e->fT) {
+ SkTSwap(s, e);
+ }
+ if (s->segment() == fCoinPtTStart->segment()) {
+ return fCoinPtTStart->fT <= s->fT && e->fT <= fCoinPtTEnd->fT;
+ } else {
+ SkASSERT(s->segment() == fOppPtTStart->segment());
+ double oppTs = fOppPtTStart->fT;
+ double oppTe = fOppPtTEnd->fT;
+ if (oppTs > oppTe) {
+ SkTSwap(oppTs, oppTe);
+ }
+ return oppTs <= s->fT && e->fT <= oppTe;
+ }
+}
+
+// A coincident span is unordered if the pairs of points in the main and opposite curves'
+// t values do not ascend or descend. For instance, if a tightly arced quadratic is
+// coincident with another curve, it may intersect it out of order.
+bool SkCoincidentSpans::ordered() const {
+ const SkOpSpanBase* start = this->coinPtTStart()->span();
+ const SkOpSpanBase* end = this->coinPtTEnd()->span();
+ const SkOpSpanBase* next = start->upCast()->next();
+ if (next == end) {
+ return true;
+ }
+ bool flipped = this->flipped();
+ const SkOpSegment* oppSeg = this->oppPtTStart()->segment();
+ double oppLastT = fOppPtTStart->fT;
+ do {
+ const SkOpPtT* opp = next->contains(oppSeg);
+ if (!opp) {
+ SkASSERT(0); // may assert if coincident span isn't fully processed
+ continue;
+ }
+ if ((oppLastT > opp->fT) != flipped) {
+ return false;
+ }
+ oppLastT = opp->fT;
+ if (next == end) {
+ break;
+ }
+ if (!next->upCastable()) {
+ return false;
+ }
+ next = next->upCast()->next();
+ } while (true);
+ return true;
+}
+
+// if there is an existing pair that overlaps the addition, extend it
+bool SkOpCoincidence::extend(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd,
+ const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd) {
+ SkCoincidentSpans* test = fHead;
+ if (!test) {
+ return false;
+ }
+ const SkOpSegment* coinSeg = coinPtTStart->segment();
+ const SkOpSegment* oppSeg = oppPtTStart->segment();
+ if (!Ordered(coinPtTStart, oppPtTStart)) {
+ SkTSwap(coinSeg, oppSeg);
+ SkTSwap(coinPtTStart, oppPtTStart);
+ SkTSwap(coinPtTEnd, oppPtTEnd);
+ if (coinPtTStart->fT > coinPtTEnd->fT) {
+ SkTSwap(coinPtTStart, coinPtTEnd);
+ SkTSwap(oppPtTStart, oppPtTEnd);
+ }
+ }
+ double oppMinT = SkTMin(oppPtTStart->fT, oppPtTEnd->fT);
+ SkDEBUGCODE(double oppMaxT = SkTMax(oppPtTStart->fT, oppPtTEnd->fT));
+ do {
+ if (coinSeg != test->coinPtTStart()->segment()) {
+ continue;
+ }
+ if (oppSeg != test->oppPtTStart()->segment()) {
+ continue;
+ }
+ double oTestMinT = SkTMin(test->oppPtTStart()->fT, test->oppPtTEnd()->fT);
+ double oTestMaxT = SkTMax(test->oppPtTStart()->fT, test->oppPtTEnd()->fT);
+ // if debug check triggers, caller failed to check if extended already exists
+ SkASSERT(test->coinPtTStart()->fT > coinPtTStart->fT
+ || coinPtTEnd->fT > test->coinPtTEnd()->fT
+ || oTestMinT > oppMinT || oppMaxT > oTestMaxT);
+ if ((test->coinPtTStart()->fT <= coinPtTEnd->fT
+ && coinPtTStart->fT <= test->coinPtTEnd()->fT)
+ || (oTestMinT <= oTestMaxT && oppMinT <= oTestMaxT)) {
+ test->extend(coinPtTStart, coinPtTEnd, oppPtTStart, oppPtTEnd);
+ return true;
+ }
+ } while ((test = test->next()));
+ return false;
+}
+
+// verifies that the coincidence hasn't already been added
+static void DebugCheckAdd(const SkCoincidentSpans* check, const SkOpPtT* coinPtTStart,
+ const SkOpPtT* coinPtTEnd, const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd) {
+#if DEBUG_COINCIDENCE
+ while (check) {
+ SkASSERT(check->coinPtTStart() != coinPtTStart || check->coinPtTEnd() != coinPtTEnd
+ || check->oppPtTStart() != oppPtTStart || check->oppPtTEnd() != oppPtTEnd);
+ SkASSERT(check->coinPtTStart() != oppPtTStart || check->coinPtTEnd() != oppPtTEnd
+ || check->oppPtTStart() != coinPtTStart || check->oppPtTEnd() != coinPtTEnd);
+ check = check->next();
+ }
+#endif
+}
+
+// adds a new coincident pair
+void SkOpCoincidence::add(SkOpPtT* coinPtTStart, SkOpPtT* coinPtTEnd, SkOpPtT* oppPtTStart,
+ SkOpPtT* oppPtTEnd) {
+ // OPTIMIZE: caller should have already sorted
+ if (!Ordered(coinPtTStart, oppPtTStart)) {
+ if (oppPtTStart->fT < oppPtTEnd->fT) {
+ this->add(oppPtTStart, oppPtTEnd, coinPtTStart, coinPtTEnd);
+ } else {
+ this->add(oppPtTEnd, oppPtTStart, coinPtTEnd, coinPtTStart);
+ }
+ return;
+ }
+ SkASSERT(Ordered(coinPtTStart, oppPtTStart));
+ // choose the ptT at the front of the list to track
+ coinPtTStart = coinPtTStart->span()->ptT();
+ coinPtTEnd = coinPtTEnd->span()->ptT();
+ oppPtTStart = oppPtTStart->span()->ptT();
+ oppPtTEnd = oppPtTEnd->span()->ptT();
+ SkASSERT(coinPtTStart->fT < coinPtTEnd->fT);
+ SkASSERT(oppPtTStart->fT != oppPtTEnd->fT);
+ SkOPASSERT(!coinPtTStart->deleted());
+ SkOPASSERT(!coinPtTEnd->deleted());
+ SkOPASSERT(!oppPtTStart->deleted());
+ SkOPASSERT(!oppPtTEnd->deleted());
+ DebugCheckAdd(fHead, coinPtTStart, coinPtTEnd, oppPtTStart, oppPtTEnd);
+ DebugCheckAdd(fTop, coinPtTStart, coinPtTEnd, oppPtTStart, oppPtTEnd);
+ SkCoincidentSpans* coinRec = SkOpTAllocator<SkCoincidentSpans>::Allocate(
+ this->globalState()->allocator());
+ coinRec->init(SkDEBUGCODE(fGlobalState));
+ coinRec->set(this->fHead, coinPtTStart, coinPtTEnd, oppPtTStart, oppPtTEnd);
+ fHead = coinRec;
+}
+
+// description below
+bool SkOpCoincidence::addEndMovedSpans(const SkOpSpan* base, const SkOpSpanBase* testSpan) {
+ const SkOpPtT* testPtT = testSpan->ptT();
+ const SkOpPtT* stopPtT = testPtT;
+ const SkOpSegment* baseSeg = base->segment();
+ while ((testPtT = testPtT->next()) != stopPtT) {
+ const SkOpSegment* testSeg = testPtT->segment();
+ if (testPtT->deleted()) {
+ continue;
+ }
+ if (testSeg == baseSeg) {
+ continue;
+ }
+ if (testPtT->span()->ptT() != testPtT) {
+ continue;
+ }
+ if (this->contains(baseSeg, testSeg, testPtT->fT)) {
+ continue;
+ }
+ // intersect perp with base->ptT() with testPtT->segment()
+ SkDVector dxdy = baseSeg->dSlopeAtT(base->t());
+ const SkPoint& pt = base->pt();
+ SkDLine ray = {{{pt.fX, pt.fY}, {pt.fX + dxdy.fY, pt.fY - dxdy.fX}}};
+ SkIntersections i;
+ (*CurveIntersectRay[testSeg->verb()])(testSeg->pts(), testSeg->weight(), ray, &i);
+ for (int index = 0; index < i.used(); ++index) {
+ double t = i[0][index];
+ if (!between(0, t, 1)) {
+ continue;
+ }
+ SkDPoint oppPt = i.pt(index);
+ if (!oppPt.approximatelyEqual(pt)) {
+ continue;
+ }
+ SkOpSegment* writableSeg = const_cast<SkOpSegment*>(testSeg);
+ SkOpPtT* oppStart = writableSeg->addT(t);
+ if (oppStart == testPtT) {
+ continue;
+ }
+ SkOpSpan* writableBase = const_cast<SkOpSpan*>(base);
+ oppStart->span()->addOpp(writableBase);
+ if (oppStart->deleted()) {
+ continue;
+ }
+ SkOpSegment* coinSeg = base->segment();
+ SkOpSegment* oppSeg = oppStart->segment();
+ double coinTs, coinTe, oppTs, oppTe;
+ if (Ordered(coinSeg, oppSeg)) {
+ coinTs = base->t();
+ coinTe = testSpan->t();
+ oppTs = oppStart->fT;
+ oppTe = testPtT->fT;
+ } else {
+ SkTSwap(coinSeg, oppSeg);
+ coinTs = oppStart->fT;
+ coinTe = testPtT->fT;
+ oppTs = base->t();
+ oppTe = testSpan->t();
+ }
+ if (coinTs > coinTe) {
+ SkTSwap(coinTs, coinTe);
+ SkTSwap(oppTs, oppTe);
+ }
+ bool added;
+ if (!this->addOrOverlap(coinSeg, oppSeg, coinTs, coinTe, oppTs, oppTe, &added)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+// description below
+bool SkOpCoincidence::addEndMovedSpans(const SkOpPtT* ptT) {
+ FAIL_IF(!ptT->span()->upCastable());
+ const SkOpSpan* base = ptT->span()->upCast();
+ const SkOpSpan* prev = base->prev();
+ FAIL_IF(!prev);
+ if (!prev->isCanceled()) {
+ if (!this->addEndMovedSpans(base, base->prev())) {
+ return false;
+ }
+ }
+ if (!base->isCanceled()) {
+ if (!this->addEndMovedSpans(base, base->next())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/* If A is coincident with B and B includes an endpoint, and A's matching point
+ is not the endpoint (i.e., there's an implied line connecting B-end and A)
+ then assume that the same implied line may intersect another curve close to B.
+ Since we only care about coincidence that was undetected, look at the
+ ptT list on B-segment adjacent to the B-end/A ptT loop (not in the loop, but
+ next door) and see if the A matching point is close enough to form another
+ coincident pair. If so, check for a new coincident span between B-end/A ptT loop
+ and the adjacent ptT loop.
+*/
+bool SkOpCoincidence::addEndMovedSpans(DEBUG_COIN_DECLARE_ONLY_PARAMS()) {
+ DEBUG_SET_PHASE();
+ SkCoincidentSpans* span = fHead;
+ if (!span) {
+ return true;
+ }
+ fTop = span;
+ fHead = nullptr;
+ do {
+ if (span->coinPtTStart()->fPt != span->oppPtTStart()->fPt) {
+ FAIL_IF(1 == span->coinPtTStart()->fT);
+ bool onEnd = span->coinPtTStart()->fT == 0;
+ bool oOnEnd = zero_or_one(span->oppPtTStart()->fT);
+ if (onEnd) {
+ if (!oOnEnd) { // if both are on end, any nearby intersect was already found
+ if (!this->addEndMovedSpans(span->oppPtTStart())) {
+ return false;
+ }
+ }
+ } else if (oOnEnd) {
+ if (!this->addEndMovedSpans(span->coinPtTStart())) {
+ return false;
+ }
+ }
+ }
+ if (span->coinPtTEnd()->fPt != span->oppPtTEnd()->fPt) {
+ bool onEnd = span->coinPtTEnd()->fT == 1;
+ bool oOnEnd = zero_or_one(span->oppPtTEnd()->fT);
+ if (onEnd) {
+ if (!oOnEnd) {
+ if (!this->addEndMovedSpans(span->oppPtTEnd())) {
+ return false;
+ }
+ }
+ } else if (oOnEnd) {
+ if (!this->addEndMovedSpans(span->coinPtTEnd())) {
+ return false;
+ }
+ }
+ }
+ } while ((span = span->next()));
+ this->restoreHead();
+ return true;
+}
+
+/* Please keep this in sync with debugAddExpanded */
+// for each coincident pair, match the spans
+// if the spans don't match, add the missing pt to the segment and loop it in the opposite span
+bool SkOpCoincidence::addExpanded(DEBUG_COIN_DECLARE_ONLY_PARAMS()) {
+ DEBUG_SET_PHASE();
+ SkCoincidentSpans* coin = this->fHead;
+ if (!coin) {
+ return true;
+ }
+ do {
+ const SkOpPtT* startPtT = coin->coinPtTStart();
+ const SkOpPtT* oStartPtT = coin->oppPtTStart();
+ double priorT = startPtT->fT;
+ double oPriorT = oStartPtT->fT;
+ FAIL_IF(!startPtT->contains(oStartPtT));
+ SkOPASSERT(coin->coinPtTEnd()->contains(coin->oppPtTEnd()));
+ const SkOpSpanBase* start = startPtT->span();
+ const SkOpSpanBase* oStart = oStartPtT->span();
+ const SkOpSpanBase* end = coin->coinPtTEnd()->span();
+ const SkOpSpanBase* oEnd = coin->oppPtTEnd()->span();
+ FAIL_IF(oEnd->deleted());
+ FAIL_IF(!start->upCastable());
+ const SkOpSpanBase* test = start->upCast()->next();
+ FAIL_IF(!coin->flipped() && !oStart->upCastable());
+ const SkOpSpanBase* oTest = coin->flipped() ? oStart->prev() : oStart->upCast()->next();
+ FAIL_IF(!oTest);
+ SkOpSegment* seg = start->segment();
+ SkOpSegment* oSeg = oStart->segment();
+ while (test != end || oTest != oEnd) {
+ const SkOpPtT* containedOpp = test->ptT()->contains(oSeg);
+ const SkOpPtT* containedThis = oTest->ptT()->contains(seg);
+ if (!containedOpp || !containedThis) {
+ // choose the ends, or the first common pt-t list shared by both
+ double nextT, oNextT;
+ if (containedOpp) {
+ nextT = test->t();
+ oNextT = containedOpp->fT;
+ } else if (containedThis) {
+ nextT = containedThis->fT;
+ oNextT = oTest->t();
+ } else {
+ // iterate through until a pt-t list found that contains the other
+ const SkOpSpanBase* walk = test;
+ const SkOpPtT* walkOpp;
+ do {
+ FAIL_IF(!walk->upCastable());
+ walk = walk->upCast()->next();
+ } while (!(walkOpp = walk->ptT()->contains(oSeg))
+ && walk != coin->coinPtTEnd()->span());
+ FAIL_IF(!walkOpp);
+ nextT = walk->t();
+ oNextT = walkOpp->fT;
+ }
+ // use t ranges to guess which one is missing
+ double startRange = nextT - priorT;
+ FAIL_IF(!startRange);
+ double startPart = (test->t() - priorT) / startRange;
+ double oStartRange = oNextT - oPriorT;
+ FAIL_IF(!oStartRange);
+ double oStartPart = (oTest->t() - oPriorT) / oStartRange;
+ FAIL_IF(startPart == oStartPart);
+ bool addToOpp = !containedOpp && !containedThis ? startPart < oStartPart
+ : !!containedThis;
+ bool startOver = false;
+ bool success = addToOpp ? oSeg->addExpanded(
+ oPriorT + oStartRange * startPart, test, &startOver)
+ : seg->addExpanded(
+ priorT + startRange * oStartPart, oTest, &startOver);
+ FAIL_IF(!success);
+ if (startOver) {
+ test = start;
+ oTest = oStart;
+ }
+ end = coin->coinPtTEnd()->span();
+ oEnd = coin->oppPtTEnd()->span();
+ }
+ if (test != end) {
+ FAIL_IF(!test->upCastable());
+ priorT = test->t();
+ test = test->upCast()->next();
+ }
+ if (oTest != oEnd) {
+ oPriorT = oTest->t();
+ oTest = coin->flipped() ? oTest->prev() : oTest->upCast()->next();
+ FAIL_IF(!oTest);
+ }
+
+ }
+ } while ((coin = coin->next()));
+ return true;
+}
+
+// given a t span, map the same range on the coincident span
+/*
+the curves may not scale linearly, so interpolation may only happen within known points
+remap over1s, over1e, cointPtTStart, coinPtTEnd to smallest range that captures over1s
+then repeat to capture over1e
+*/
+double SkOpCoincidence::TRange(const SkOpPtT* overS, double t,
+ const SkOpSegment* coinSeg SkDEBUGPARAMS(const SkOpPtT* overE)) {
+ const SkOpSpanBase* work = overS->span();
+ const SkOpPtT* foundStart = nullptr;
+ const SkOpPtT* foundEnd = nullptr;
+ const SkOpPtT* coinStart = nullptr;
+ const SkOpPtT* coinEnd = nullptr;
+ do {
+ const SkOpPtT* contained = work->contains(coinSeg);
+ if (!contained) {
+ if (work->final()) {
+ break;
+ }
+ continue;
+ }
+ if (work->t() <= t) {
+ coinStart = contained;
+ foundStart = work->ptT();
+ }
+ if (work->t() >= t) {
+ coinEnd = contained;
+ foundEnd = work->ptT();
+ break;
+ }
+ SkASSERT(work->ptT() != overE);
+ } while ((work = work->upCast()->next()));
+ if (!coinStart || !coinEnd) {
+ return 1;
+ }
+ // while overS->fT <=t and overS contains coinSeg
+ double denom = foundEnd->fT - foundStart->fT;
+ double sRatio = denom ? (t - foundStart->fT) / denom : 1;
+ return coinStart->fT + (coinEnd->fT - coinStart->fT) * sRatio;
+}
+
+// return true if span overlaps existing and needs to adjust the coincident list
+bool SkOpCoincidence::checkOverlap(SkCoincidentSpans* check,
+ const SkOpSegment* coinSeg, const SkOpSegment* oppSeg,
+ double coinTs, double coinTe, double oppTs, double oppTe,
+ SkTDArray<SkCoincidentSpans*>* overlaps) const {
+ if (!Ordered(coinSeg, oppSeg)) {
+ if (oppTs < oppTe) {
+ return this->checkOverlap(check, oppSeg, coinSeg, oppTs, oppTe, coinTs, coinTe,
+ overlaps);
+ }
+ return this->checkOverlap(check, oppSeg, coinSeg, oppTe, oppTs, coinTe, coinTs, overlaps);
+ }
+ bool swapOpp = oppTs > oppTe;
+ if (swapOpp) {
+ SkTSwap(oppTs, oppTe);
+ }
+ do {
+ if (check->coinPtTStart()->segment() != coinSeg) {
+ continue;
+ }
+ if (check->oppPtTStart()->segment() != oppSeg) {
+ continue;
+ }
+ double checkTs = check->coinPtTStart()->fT;
+ double checkTe = check->coinPtTEnd()->fT;
+ bool coinOutside = coinTe < checkTs || coinTs > checkTe;
+ double oCheckTs = check->oppPtTStart()->fT;
+ double oCheckTe = check->oppPtTEnd()->fT;
+ if (swapOpp) {
+ if (oCheckTs <= oCheckTe) {
+ return false;
+ }
+ SkTSwap(oCheckTs, oCheckTe);
+ }
+ bool oppOutside = oppTe < oCheckTs || oppTs > oCheckTe;
+ if (coinOutside && oppOutside) {
+ continue;
+ }
+ bool coinInside = coinTe <= checkTe && coinTs >= checkTs;
+ bool oppInside = oppTe <= oCheckTe && oppTs >= oCheckTs;
+ if (coinInside && oppInside) { // already included, do nothing
+ return false;
+ }
+ *overlaps->append() = check; // partial overlap, extend existing entry
+ } while ((check = check->next()));
+ return true;
+}
+
+/* Please keep this in sync with debugAddIfMissing() */
+// note that over1s, over1e, over2s, over2e are ordered
+bool SkOpCoincidence::addIfMissing(const SkOpPtT* over1s, const SkOpPtT* over2s,
+ double tStart, double tEnd, SkOpSegment* coinSeg, SkOpSegment* oppSeg, bool* added
+ SkDEBUGPARAMS(const SkOpPtT* over1e) SkDEBUGPARAMS(const SkOpPtT* over2e)) {
+ SkASSERT(tStart < tEnd);
+ SkASSERT(over1s->fT < over1e->fT);
+ SkASSERT(between(over1s->fT, tStart, over1e->fT));
+ SkASSERT(between(over1s->fT, tEnd, over1e->fT));
+ SkASSERT(over2s->fT < over2e->fT);
+ SkASSERT(between(over2s->fT, tStart, over2e->fT));
+ SkASSERT(between(over2s->fT, tEnd, over2e->fT));
+ SkASSERT(over1s->segment() == over1e->segment());
+ SkASSERT(over2s->segment() == over2e->segment());
+ SkASSERT(over1s->segment() == over2s->segment());
+ SkASSERT(over1s->segment() != coinSeg);
+ SkASSERT(over1s->segment() != oppSeg);
+ SkASSERT(coinSeg != oppSeg);
+ double coinTs, coinTe, oppTs, oppTe;
+ coinTs = TRange(over1s, tStart, coinSeg SkDEBUGPARAMS(over1e));
+ coinTe = TRange(over1s, tEnd, coinSeg SkDEBUGPARAMS(over1e));
+ if (coinSeg->collapsed(coinTs, coinTe)) {
+ return true;
+ }
+ oppTs = TRange(over2s, tStart, oppSeg SkDEBUGPARAMS(over2e));
+ oppTe = TRange(over2s, tEnd, oppSeg SkDEBUGPARAMS(over2e));
+ if (oppSeg->collapsed(oppTs, oppTe)) {
+ return true;
+ }
+ if (coinTs > coinTe) {
+ SkTSwap(coinTs, coinTe);
+ SkTSwap(oppTs, oppTe);
+ }
+ return this->addOrOverlap(coinSeg, oppSeg, coinTs, coinTe, oppTs, oppTe, added);
+}
+
+/* Please keep this in sync with debugAddOrOverlap() */
+// If this is called by addEndMovedSpans(), a returned false propogates out to an abort.
+// If this is called by AddIfMissing(), a returned false indicates there was nothing to add
+bool SkOpCoincidence::addOrOverlap(SkOpSegment* coinSeg, SkOpSegment* oppSeg,
+ double coinTs, double coinTe, double oppTs, double oppTe, bool* added) {
+ SkTDArray<SkCoincidentSpans*> overlaps;
+ FAIL_IF(!fTop);
+ if (!this->checkOverlap(fTop, coinSeg, oppSeg, coinTs, coinTe, oppTs, oppTe, &overlaps)) {
+ return true;
+ }
+ if (fHead && !this->checkOverlap(fHead, coinSeg, oppSeg, coinTs,
+ coinTe, oppTs, oppTe, &overlaps)) {
+ return true;
+ }
+ SkCoincidentSpans* overlap = overlaps.count() ? overlaps[0] : nullptr;
+ for (int index = 1; index < overlaps.count(); ++index) { // combine overlaps before continuing
+ SkCoincidentSpans* test = overlaps[index];
+ if (overlap->coinPtTStart()->fT > test->coinPtTStart()->fT) {
+ overlap->setCoinPtTStart(test->coinPtTStart());
+ }
+ if (overlap->coinPtTEnd()->fT < test->coinPtTEnd()->fT) {
+ overlap->setCoinPtTEnd(test->coinPtTEnd());
+ }
+ if (overlap->flipped()
+ ? overlap->oppPtTStart()->fT < test->oppPtTStart()->fT
+ : overlap->oppPtTStart()->fT > test->oppPtTStart()->fT) {
+ overlap->setOppPtTStart(test->oppPtTStart());
+ }
+ if (overlap->flipped()
+ ? overlap->oppPtTEnd()->fT > test->oppPtTEnd()->fT
+ : overlap->oppPtTEnd()->fT < test->oppPtTEnd()->fT) {
+ overlap->setOppPtTEnd(test->oppPtTEnd());
+ }
+ if (!fHead || !this->release(fHead, test)) {
+ SkAssertResult(this->release(fTop, test));
+ }
+ }
+ const SkOpPtT* cs = coinSeg->existing(coinTs, oppSeg);
+ const SkOpPtT* ce = coinSeg->existing(coinTe, oppSeg);
+ if (overlap && cs && ce && overlap->contains(cs, ce)) {
+ return true;
+ }
+ FAIL_IF(cs == ce && cs);
+ const SkOpPtT* os = oppSeg->existing(oppTs, coinSeg);
+ const SkOpPtT* oe = oppSeg->existing(oppTe, coinSeg);
+ if (overlap && os && oe && overlap->contains(os, oe)) {
+ return true;
+ }
+ SkASSERT(!cs || !cs->deleted());
+ SkASSERT(!os || !os->deleted());
+ SkASSERT(!ce || !ce->deleted());
+ SkASSERT(!oe || !oe->deleted());
+ const SkOpPtT* csExisting = !cs ? coinSeg->existing(coinTs, nullptr) : nullptr;
+ const SkOpPtT* ceExisting = !ce ? coinSeg->existing(coinTe, nullptr) : nullptr;
+ FAIL_IF(csExisting && csExisting == ceExisting);
+// FAIL_IF(csExisting && (csExisting == ce ||
+// csExisting->contains(ceExisting ? ceExisting : ce)));
+ FAIL_IF(ceExisting && (ceExisting == cs ||
+ ceExisting->contains(csExisting ? csExisting : cs)));
+ const SkOpPtT* osExisting = !os ? oppSeg->existing(oppTs, nullptr) : nullptr;
+ const SkOpPtT* oeExisting = !oe ? oppSeg->existing(oppTe, nullptr) : nullptr;
+ FAIL_IF(osExisting && osExisting == oeExisting);
+ FAIL_IF(osExisting && (osExisting == oe ||
+ osExisting->contains(oeExisting ? oeExisting : oe)));
+ FAIL_IF(oeExisting && (oeExisting == os ||
+ oeExisting->contains(osExisting ? osExisting : os)));
+ // extra line in debug code
+ this->debugValidate();
+ if (!cs || !os) {
+ SkOpPtT* csWritable = cs ? const_cast<SkOpPtT*>(cs)
+ : coinSeg->addT(coinTs);
+ if (csWritable == ce) {
+ return true;
+ }
+ SkOpPtT* osWritable = os ? const_cast<SkOpPtT*>(os)
+ : oppSeg->addT(oppTs);
+ FAIL_IF(!csWritable || !osWritable);
+ csWritable->span()->addOpp(osWritable->span());
+ cs = csWritable;
+ os = osWritable->active();
+ FAIL_IF((ce && ce->deleted()) || (oe && oe->deleted()));
+ }
+ if (!ce || !oe) {
+ SkOpPtT* ceWritable = ce ? const_cast<SkOpPtT*>(ce)
+ : coinSeg->addT(coinTe);
+ SkOpPtT* oeWritable = oe ? const_cast<SkOpPtT*>(oe)
+ : oppSeg->addT(oppTe);
+ ceWritable->span()->addOpp(oeWritable->span());
+ ce = ceWritable;
+ oe = oeWritable;
+ }
+ this->debugValidate();
+ FAIL_IF(cs->deleted());
+ FAIL_IF(os->deleted());
+ FAIL_IF(ce->deleted());
+ FAIL_IF(oe->deleted());
+ FAIL_IF(cs->contains(ce) || os->contains(oe));
+ bool result = true;
+ if (overlap) {
+ if (overlap->coinPtTStart()->segment() == coinSeg) {
+ result = overlap->extend(cs, ce, os, oe);
+ } else {
+ if (os->fT > oe->fT) {
+ SkTSwap(cs, ce);
+ SkTSwap(os, oe);
+ }
+ result = overlap->extend(os, oe, cs, ce);
+ }
+#if DEBUG_COINCIDENCE_VERBOSE
+ if (result) {
+ overlaps[0]->debugShow();
+ }
+#endif
+ } else {
+ this->add(cs, ce, os, oe);
+#if DEBUG_COINCIDENCE_VERBOSE
+ fHead->debugShow();
+#endif
+ }
+ this->debugValidate();
+ if (result) {
+ *added = true;
+ }
+ return true;
+}
+
+// Please keep this in sync with debugAddMissing()
+/* detects overlaps of different coincident runs on same segment */
+/* does not detect overlaps for pairs without any segments in common */
+// returns true if caller should loop again
+bool SkOpCoincidence::addMissing(bool* added DEBUG_COIN_DECLARE_PARAMS()) {
+ SkCoincidentSpans* outer = fHead;
+ *added = false;
+ if (!outer) {
+ return true;
+ }
+ fTop = outer;
+ fHead = nullptr;
+ do {
+ // addifmissing can modify the list that this is walking
+ // save head so that walker can iterate over old data unperturbed
+ // addifmissing adds to head freely then add saved head in the end
+ const SkOpPtT* ocs = outer->coinPtTStart();
+ SkASSERT(!ocs->deleted());
+ const SkOpSegment* outerCoin = ocs->segment();
+ SkASSERT(!outerCoin->done()); // if it's done, should have already been removed from list
+ const SkOpPtT* oos = outer->oppPtTStart();
+ if (oos->deleted()) {
+ return true;
+ }
+ const SkOpSegment* outerOpp = oos->segment();
+ SkASSERT(!outerOpp->done());
+ SkOpSegment* outerCoinWritable = const_cast<SkOpSegment*>(outerCoin);
+ SkOpSegment* outerOppWritable = const_cast<SkOpSegment*>(outerOpp);
+ SkCoincidentSpans* inner = outer;
+ while ((inner = inner->next())) {
+ this->debugValidate();
+ double overS, overE;
+ const SkOpPtT* ics = inner->coinPtTStart();
+ SkASSERT(!ics->deleted());
+ const SkOpSegment* innerCoin = ics->segment();
+ SkASSERT(!innerCoin->done());
+ const SkOpPtT* ios = inner->oppPtTStart();
+ SkASSERT(!ios->deleted());
+ const SkOpSegment* innerOpp = ios->segment();
+ SkASSERT(!innerOpp->done());
+ SkOpSegment* innerCoinWritable = const_cast<SkOpSegment*>(innerCoin);
+ SkOpSegment* innerOppWritable = const_cast<SkOpSegment*>(innerOpp);
+ if (outerCoin == innerCoin) {
+ const SkOpPtT* oce = outer->coinPtTEnd();
+ if (oce->deleted()) {
+ return true;
+ }
+ const SkOpPtT* ice = inner->coinPtTEnd();
+ SkASSERT(!ice->deleted());
+ if (outerOpp != innerOpp && this->overlap(ocs, oce, ics, ice, &overS, &overE)) {
+ (void) this->addIfMissing(ocs->starter(oce), ics->starter(ice),
+ overS, overE, outerOppWritable, innerOppWritable, added
+ SkDEBUGPARAMS(ocs->debugEnder(oce))
+ SkDEBUGPARAMS(ics->debugEnder(ice)));
+ }
+ } else if (outerCoin == innerOpp) {
+ const SkOpPtT* oce = outer->coinPtTEnd();
+ SkASSERT(!oce->deleted());
+ const SkOpPtT* ioe = inner->oppPtTEnd();
+ SkASSERT(!ioe->deleted());
+ if (outerOpp != innerCoin && this->overlap(ocs, oce, ios, ioe, &overS, &overE)) {
+ (void) this->addIfMissing(ocs->starter(oce), ios->starter(ioe),
+ overS, overE, outerOppWritable, innerCoinWritable, added
+ SkDEBUGPARAMS(ocs->debugEnder(oce))
+ SkDEBUGPARAMS(ios->debugEnder(ioe)));
+ }
+ } else if (outerOpp == innerCoin) {
+ const SkOpPtT* ooe = outer->oppPtTEnd();
+ SkASSERT(!ooe->deleted());
+ const SkOpPtT* ice = inner->coinPtTEnd();
+ SkASSERT(!ice->deleted());
+ SkASSERT(outerCoin != innerOpp);
+ if (this->overlap(oos, ooe, ics, ice, &overS, &overE)) {
+ (void) this->addIfMissing(oos->starter(ooe), ics->starter(ice),
+ overS, overE, outerCoinWritable, innerOppWritable, added
+ SkDEBUGPARAMS(oos->debugEnder(ooe))
+ SkDEBUGPARAMS(ics->debugEnder(ice)));
+ }
+ } else if (outerOpp == innerOpp) {
+ const SkOpPtT* ooe = outer->oppPtTEnd();
+ SkASSERT(!ooe->deleted());
+ const SkOpPtT* ioe = inner->oppPtTEnd();
+ if (ioe->deleted()) {
+ return true;
+ }
+ SkASSERT(outerCoin != innerCoin);
+ if (this->overlap(oos, ooe, ios, ioe, &overS, &overE)) {
+ (void) this->addIfMissing(oos->starter(ooe), ios->starter(ioe),
+ overS, overE, outerCoinWritable, innerCoinWritable, added
+ SkDEBUGPARAMS(oos->debugEnder(ooe))
+ SkDEBUGPARAMS(ios->debugEnder(ioe)));
+ }
+ }
+ this->debugValidate();
+ }
+ } while ((outer = outer->next()));
+ this->restoreHead();
+ return true;
+}
+
+bool SkOpCoincidence::addOverlap(const SkOpSegment* seg1, const SkOpSegment* seg1o,
+ const SkOpSegment* seg2, const SkOpSegment* seg2o,
+ const SkOpPtT* overS, const SkOpPtT* overE) {
+ const SkOpPtT* s1 = overS->find(seg1);
+ const SkOpPtT* e1 = overE->find(seg1);
+ if (!s1->starter(e1)->span()->upCast()->windValue()) {
+ s1 = overS->find(seg1o);
+ e1 = overE->find(seg1o);
+ if (!s1->starter(e1)->span()->upCast()->windValue()) {
+ return true;
+ }
+ }
+ const SkOpPtT* s2 = overS->find(seg2);
+ const SkOpPtT* e2 = overE->find(seg2);
+ if (!s2->starter(e2)->span()->upCast()->windValue()) {
+ s2 = overS->find(seg2o);
+ e2 = overE->find(seg2o);
+ if (!s2->starter(e2)->span()->upCast()->windValue()) {
+ return true;
+ }
+ }
+ if (s1->segment() == s2->segment()) {
+ return true;
+ }
+ if (s1->fT > e1->fT) {
+ SkTSwap(s1, e1);
+ SkTSwap(s2, e2);
+ }
+ this->add(s1, e1, s2, e2);
+ return true;
+}
+
+bool SkOpCoincidence::contains(const SkOpSegment* seg, const SkOpSegment* opp, double oppT) const {
+ if (this->contains(fHead, seg, opp, oppT)) {
+ return true;
+ }
+ if (this->contains(fTop, seg, opp, oppT)) {
+ return true;
+ }
+ return false;
+}
+
+bool SkOpCoincidence::contains(const SkCoincidentSpans* coin, const SkOpSegment* seg,
+ const SkOpSegment* opp, double oppT) const {
+ if (!coin) {
+ return false;
+ }
+ do {
+ if (coin->coinPtTStart()->segment() == seg && coin->oppPtTStart()->segment() == opp
+ && between(coin->oppPtTStart()->fT, oppT, coin->oppPtTEnd()->fT)) {
+ return true;
+ }
+ if (coin->oppPtTStart()->segment() == seg && coin->coinPtTStart()->segment() == opp
+ && between(coin->coinPtTStart()->fT, oppT, coin->coinPtTEnd()->fT)) {
+ return true;
+ }
+ } while ((coin = coin->next()));
+ return false;
+}
+
+bool SkOpCoincidence::contains(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd,
+ const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd) const {
+ const SkCoincidentSpans* test = fHead;
+ if (!test) {
+ return false;
+ }
+ const SkOpSegment* coinSeg = coinPtTStart->segment();
+ const SkOpSegment* oppSeg = oppPtTStart->segment();
+ if (!Ordered(coinPtTStart, oppPtTStart)) {
+ SkTSwap(coinSeg, oppSeg);
+ SkTSwap(coinPtTStart, oppPtTStart);
+ SkTSwap(coinPtTEnd, oppPtTEnd);
+ if (coinPtTStart->fT > coinPtTEnd->fT) {
+ SkTSwap(coinPtTStart, coinPtTEnd);
+ SkTSwap(oppPtTStart, oppPtTEnd);
+ }
+ }
+ double oppMinT = SkTMin(oppPtTStart->fT, oppPtTEnd->fT);
+ double oppMaxT = SkTMax(oppPtTStart->fT, oppPtTEnd->fT);
+ do {
+ if (coinSeg != test->coinPtTStart()->segment()) {
+ continue;
+ }
+ if (coinPtTStart->fT < test->coinPtTStart()->fT) {
+ continue;
+ }
+ if (coinPtTEnd->fT > test->coinPtTEnd()->fT) {
+ continue;
+ }
+ if (oppSeg != test->oppPtTStart()->segment()) {
+ continue;
+ }
+ if (oppMinT < SkTMin(test->oppPtTStart()->fT, test->oppPtTEnd()->fT)) {
+ continue;
+ }
+ if (oppMaxT > SkTMax(test->oppPtTStart()->fT, test->oppPtTEnd()->fT)) {
+ continue;
+ }
+ return true;
+ } while ((test = test->next()));
+ return false;
+}
+
+void SkOpCoincidence::correctEnds(DEBUG_COIN_DECLARE_ONLY_PARAMS()) {
+ DEBUG_SET_PHASE();
+ SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return;
+ }
+ do {
+ coin->correctEnds();
+ } while ((coin = coin->next()));
+}
+
+// walk span sets in parallel, moving winding from one to the other
+void SkOpCoincidence::apply(DEBUG_COIN_DECLARE_ONLY_PARAMS()) {
+ DEBUG_SET_PHASE();
+ SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return;
+ }
+ do {
+ SkOpSpan* start = coin->coinPtTStartWritable()->span()->upCast();
+ if (start->deleted()) {
+ continue;
+ }
+ const SkOpSpanBase* end = coin->coinPtTEnd()->span();
+ SkASSERT(start == start->starter(end));
+ bool flipped = coin->flipped();
+ SkOpSpan* oStart = (flipped ? coin->oppPtTEndWritable()
+ : coin->oppPtTStartWritable())->span()->upCast();
+ if (oStart->deleted()) {
+ continue;
+ }
+ const SkOpSpanBase* oEnd = (flipped ? coin->oppPtTStart() : coin->oppPtTEnd())->span();
+ SkASSERT(oStart == oStart->starter(oEnd));
+ SkOpSegment* segment = start->segment();
+ SkOpSegment* oSegment = oStart->segment();
+ bool operandSwap = segment->operand() != oSegment->operand();
+ if (flipped) {
+ if (oEnd->deleted()) {
+ continue;
+ }
+ do {
+ SkOpSpanBase* oNext = oStart->next();
+ if (oNext == oEnd) {
+ break;
+ }
+ oStart = oNext->upCast();
+ } while (true);
+ }
+ do {
+ int windValue = start->windValue();
+ int oppValue = start->oppValue();
+ int oWindValue = oStart->windValue();
+ int oOppValue = oStart->oppValue();
+ // winding values are added or subtracted depending on direction and wind type
+ // same or opposite values are summed depending on the operand value
+ int windDiff = operandSwap ? oOppValue : oWindValue;
+ int oWindDiff = operandSwap ? oppValue : windValue;
+ if (!flipped) {
+ windDiff = -windDiff;
+ oWindDiff = -oWindDiff;
+ }
+ bool addToStart = windValue && (windValue > windDiff || (windValue == windDiff
+ && oWindValue <= oWindDiff));
+ if (addToStart ? start->done() : oStart->done()) {
+ addToStart ^= true;
+ }
+ if (addToStart) {
+ if (operandSwap) {
+ SkTSwap(oWindValue, oOppValue);
+ }
+ if (flipped) {
+ windValue -= oWindValue;
+ oppValue -= oOppValue;
+ } else {
+ windValue += oWindValue;
+ oppValue += oOppValue;
+ }
+ if (segment->isXor()) {
+ windValue &= 1;
+ }
+ if (segment->oppXor()) {
+ oppValue &= 1;
+ }
+ oWindValue = oOppValue = 0;
+ } else {
+ if (operandSwap) {
+ SkTSwap(windValue, oppValue);
+ }
+ if (flipped) {
+ oWindValue -= windValue;
+ oOppValue -= oppValue;
+ } else {
+ oWindValue += windValue;
+ oOppValue += oppValue;
+ }
+ if (oSegment->isXor()) {
+ oWindValue &= 1;
+ }
+ if (oSegment->oppXor()) {
+ oOppValue &= 1;
+ }
+ windValue = oppValue = 0;
+ }
+#if 0 && DEBUG_COINCIDENCE
+ SkDebugf("seg=%d span=%d windValue=%d oppValue=%d\n", segment->debugID(),
+ start->debugID(), windValue, oppValue);
+ SkDebugf("seg=%d span=%d windValue=%d oppValue=%d\n", oSegment->debugID(),
+ oStart->debugID(), oWindValue, oOppValue);
+#endif
+ start->setWindValue(windValue);
+ start->setOppValue(oppValue);
+ oStart->setWindValue(oWindValue);
+ oStart->setOppValue(oOppValue);
+ if (!windValue && !oppValue) {
+ segment->markDone(start);
+ }
+ if (!oWindValue && !oOppValue) {
+ oSegment->markDone(oStart);
+ }
+ SkOpSpanBase* next = start->next();
+ SkOpSpanBase* oNext = flipped ? oStart->prev() : oStart->next();
+ if (next == end) {
+ break;
+ }
+ start = next->upCast();
+ // if the opposite ran out too soon, just reuse the last span
+ if (!oNext || !oNext->upCastable()) {
+ oNext = oStart;
+ }
+ oStart = oNext->upCast();
+ } while (true);
+ } while ((coin = coin->next()));
+}
+
+// Please keep this in sync with debugRelease()
+bool SkOpCoincidence::release(SkCoincidentSpans* coin, SkCoincidentSpans* remove) {
+ SkCoincidentSpans* head = coin;
+ SkCoincidentSpans* prev = nullptr;
+ SkCoincidentSpans* next;
+ do {
+ next = coin->next();
+ if (coin == remove) {
+ if (prev) {
+ prev->setNext(next);
+ } else if (head == fHead) {
+ fHead = next;
+ } else {
+ fTop = next;
+ }
+ break;
+ }
+ prev = coin;
+ } while ((coin = next));
+ return coin != nullptr;
+}
+
+void SkOpCoincidence::releaseDeleted(SkCoincidentSpans* coin) {
+ if (!coin) {
+ return;
+ }
+ SkCoincidentSpans* head = coin;
+ SkCoincidentSpans* prev = nullptr;
+ SkCoincidentSpans* next;
+ do {
+ next = coin->next();
+ if (coin->coinPtTStart()->deleted()) {
+ SkOPASSERT(coin->flipped() ? coin->oppPtTEnd()->deleted() :
+ coin->oppPtTStart()->deleted());
+ if (prev) {
+ prev->setNext(next);
+ } else if (head == fHead) {
+ fHead = next;
+ } else {
+ fTop = next;
+ }
+ } else {
+ SkOPASSERT(coin->flipped() ? !coin->oppPtTEnd()->deleted() :
+ !coin->oppPtTStart()->deleted());
+ prev = coin;
+ }
+ } while ((coin = next));
+}
+
+void SkOpCoincidence::releaseDeleted() {
+ this->releaseDeleted(fHead);
+ this->releaseDeleted(fTop);
+}
+
+void SkOpCoincidence::restoreHead() {
+ SkCoincidentSpans** headPtr = &fHead;
+ while (*headPtr) {
+ headPtr = (*headPtr)->nextPtr();
+ }
+ *headPtr = fTop;
+ fTop = nullptr;
+ // segments may have collapsed in the meantime; remove empty referenced segments
+ headPtr = &fHead;
+ while (*headPtr) {
+ SkCoincidentSpans* test = *headPtr;
+ if (test->coinPtTStart()->segment()->done() || test->oppPtTStart()->segment()->done()) {
+ *headPtr = test->next();
+ continue;
+ }
+ headPtr = (*headPtr)->nextPtr();
+ }
+}
+
+// Please keep this in sync with debugExpand()
+// expand the range by checking adjacent spans for coincidence
+bool SkOpCoincidence::expand(DEBUG_COIN_DECLARE_ONLY_PARAMS()) {
+ DEBUG_SET_PHASE();
+ SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return false;
+ }
+ bool expanded = false;
+ do {
+ if (coin->expand()) {
+ // check to see if multiple spans expanded so they are now identical
+ SkCoincidentSpans* test = fHead;
+ do {
+ if (coin == test) {
+ continue;
+ }
+ if (coin->coinPtTStart() == test->coinPtTStart()
+ && coin->oppPtTStart() == test->oppPtTStart()) {
+ this->release(fHead, test);
+ break;
+ }
+ } while ((test = test->next()));
+ expanded = true;
+ }
+ } while ((coin = coin->next()));
+ return expanded;
+}
+
+void SkOpCoincidence::findOverlaps(SkOpCoincidence* overlaps DEBUG_COIN_DECLARE_PARAMS()) const {
+ DEBUG_SET_PHASE();
+ overlaps->fHead = overlaps->fTop = nullptr;
+ SkCoincidentSpans* outer = fHead;
+ while (outer) {
+ const SkOpSegment* outerCoin = outer->coinPtTStart()->segment();
+ const SkOpSegment* outerOpp = outer->oppPtTStart()->segment();
+ SkCoincidentSpans* inner = outer;
+ while ((inner = inner->next())) {
+ const SkOpSegment* innerCoin = inner->coinPtTStart()->segment();
+ if (outerCoin == innerCoin) {
+ continue; // both winners are the same segment, so there's no additional overlap
+ }
+ const SkOpSegment* innerOpp = inner->oppPtTStart()->segment();
+ const SkOpPtT* overlapS;
+ const SkOpPtT* overlapE;
+ if ((outerOpp == innerCoin && SkOpPtT::Overlaps(outer->oppPtTStart(),
+ outer->oppPtTEnd(),inner->coinPtTStart(), inner->coinPtTEnd(), &overlapS,
+ &overlapE))
+ || (outerCoin == innerOpp && SkOpPtT::Overlaps(outer->coinPtTStart(),
+ outer->coinPtTEnd(), inner->oppPtTStart(), inner->oppPtTEnd(),
+ &overlapS, &overlapE))
+ || (outerOpp == innerOpp && SkOpPtT::Overlaps(outer->oppPtTStart(),
+ outer->oppPtTEnd(), inner->oppPtTStart(), inner->oppPtTEnd(),
+ &overlapS, &overlapE))) {
+ SkAssertResult(overlaps->addOverlap(outerCoin, outerOpp, innerCoin, innerOpp,
+ overlapS, overlapE));
+ }
+ }
+ outer = outer->next();
+ }
+}
+
+void SkOpCoincidence::fixUp(SkOpPtT* deleted, const SkOpPtT* kept) {
+ SkOPASSERT(deleted != kept);
+ if (fHead) {
+ this->fixUp(fHead, deleted, kept);
+ }
+ if (fTop) {
+ this->fixUp(fTop, deleted, kept);
+ }
+}
+
+void SkOpCoincidence::fixUp(SkCoincidentSpans* coin, SkOpPtT* deleted, const SkOpPtT* kept) {
+ SkCoincidentSpans* head = coin;
+ do {
+ if (coin->coinPtTStart() == deleted) {
+ if (coin->coinPtTEnd()->span() == kept->span()) {
+ this->release(head, coin);
+ continue;
+ }
+ coin->setCoinPtTStart(kept);
+ }
+ if (coin->coinPtTEnd() == deleted) {
+ if (coin->coinPtTStart()->span() == kept->span()) {
+ this->release(head, coin);
+ continue;
+ }
+ coin->setCoinPtTEnd(kept);
+ }
+ if (coin->oppPtTStart() == deleted) {
+ if (coin->oppPtTEnd()->span() == kept->span()) {
+ this->release(head, coin);
+ continue;
+ }
+ coin->setOppPtTStart(kept);
+ }
+ if (coin->oppPtTEnd() == deleted) {
+ if (coin->oppPtTStart()->span() == kept->span()) {
+ this->release(head, coin);
+ continue;
+ }
+ coin->setOppPtTEnd(kept);
+ }
+ } while ((coin = coin->next()));
+}
+
+// Please keep this in sync with debugMark()
+/* this sets up the coincidence links in the segments when the coincidence crosses multiple spans */
+void SkOpCoincidence::mark(DEBUG_COIN_DECLARE_ONLY_PARAMS()) {
+ DEBUG_SET_PHASE();
+ SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return;
+ }
+ do {
+ SkOpSpan* start = coin->coinPtTStartWritable()->span()->upCast();
+ SkASSERT(!start->deleted());
+ SkOpSpanBase* end = coin->coinPtTEndWritable()->span();
+ SkASSERT(!end->deleted());
+ SkOpSpanBase* oStart = coin->oppPtTStartWritable()->span();
+ SkASSERT(!oStart->deleted());
+ SkOpSpanBase* oEnd = coin->oppPtTEndWritable()->span();
+ SkASSERT(!oEnd->deleted());
+ bool flipped = coin->flipped();
+ if (flipped) {
+ SkTSwap(oStart, oEnd);
+ }
+ /* coin and opp spans may not match up. Mark the ends, and then let the interior
+ get marked as many times as the spans allow */
+ start->insertCoincidence(oStart->upCast());
+ end->insertCoinEnd(oEnd);
+ const SkOpSegment* segment = start->segment();
+ const SkOpSegment* oSegment = oStart->segment();
+ SkOpSpanBase* next = start;
+ SkOpSpanBase* oNext = oStart;
+ bool ordered = coin->ordered();
+ while ((next = next->upCast()->next()) != end) {
+ SkAssertResult(next->upCast()->insertCoincidence(oSegment, flipped, ordered));
+ }
+ while ((oNext = oNext->upCast()->next()) != oEnd) {
+ SkAssertResult(oNext->upCast()->insertCoincidence(segment, flipped, ordered));
+ }
+ } while ((coin = coin->next()));
+}
+
+// Please keep in sync with debugMarkCollapsed()
+void SkOpCoincidence::markCollapsed(SkCoincidentSpans* coin, SkOpPtT* test) {
+ SkCoincidentSpans* head = coin;
+ while (coin) {
+ if (coin->collapsed(test)) {
+ if (zero_or_one(coin->coinPtTStart()->fT) && zero_or_one(coin->coinPtTEnd()->fT)) {
+ coin->coinPtTStartWritable()->segment()->markAllDone();
+ }
+ if (zero_or_one(coin->oppPtTStart()->fT) && zero_or_one(coin->oppPtTEnd()->fT)) {
+ coin->oppPtTStartWritable()->segment()->markAllDone();
+ }
+ this->release(head, coin);
+ }
+ coin = coin->next();
+ }
+}
+
+// Please keep in sync with debugMarkCollapsed()
+void SkOpCoincidence::markCollapsed(SkOpPtT* test) {
+ markCollapsed(fHead, test);
+ markCollapsed(fTop, test);
+}
+
+bool SkOpCoincidence::Ordered(const SkOpSegment* coinSeg, const SkOpSegment* oppSeg) {
+ if (coinSeg->verb() < oppSeg->verb()) {
+ return true;
+ }
+ if (coinSeg->verb() > oppSeg->verb()) {
+ return false;
+ }
+ int count = (SkPathOpsVerbToPoints(coinSeg->verb()) + 1) * 2;
+ const SkScalar* cPt = &coinSeg->pts()[0].fX;
+ const SkScalar* oPt = &oppSeg->pts()[0].fX;
+ for (int index = 0; index < count; ++index) {
+ if (*cPt < *oPt) {
+ return true;
+ }
+ if (*cPt > *oPt) {
+ return false;
+ }
+ ++cPt;
+ ++oPt;
+ }
+ return true;
+}
+
+bool SkOpCoincidence::overlap(const SkOpPtT* coin1s, const SkOpPtT* coin1e,
+ const SkOpPtT* coin2s, const SkOpPtT* coin2e, double* overS, double* overE) const {
+ SkASSERT(coin1s->segment() == coin2s->segment());
+ *overS = SkTMax(SkTMin(coin1s->fT, coin1e->fT), SkTMin(coin2s->fT, coin2e->fT));
+ *overE = SkTMin(SkTMax(coin1s->fT, coin1e->fT), SkTMax(coin2s->fT, coin2e->fT));
+ return *overS < *overE;
+}
+
+// Commented-out lines keep this in sync with debugRelease()
+void SkOpCoincidence::release(const SkOpSegment* deleted) {
+ SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return;
+ }
+ do {
+ if (coin->coinPtTStart()->segment() == deleted
+ || coin->coinPtTEnd()->segment() == deleted
+ || coin->oppPtTStart()->segment() == deleted
+ || coin->oppPtTEnd()->segment() == deleted) {
+ this->release(fHead, coin);
+ }
+ } while ((coin = coin->next()));
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpCoincidence.h b/gfx/skia/skia/src/pathops/SkOpCoincidence.h
new file mode 100644
index 000000000..244035323
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpCoincidence.h
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOpCoincidence_DEFINED
+#define SkOpCoincidence_DEFINED
+
+#include "SkTDArray.h"
+#include "SkOpTAllocator.h"
+#include "SkOpSpan.h"
+#include "SkPathOpsTypes.h"
+
+class SkOpPtT;
+class SkOpSpanBase;
+
+class SkCoincidentSpans {
+public:
+ const SkOpPtT* coinPtTEnd() const { return fCoinPtTEnd; }
+ const SkOpPtT* coinPtTStart() const { return fCoinPtTStart; }
+
+ // These return non-const pointers so that, as copies, they can be added
+ // to a new span pair
+ SkOpPtT* coinPtTEndWritable() const { return const_cast<SkOpPtT*>(fCoinPtTEnd); }
+ SkOpPtT* coinPtTStartWritable() const { return const_cast<SkOpPtT*>(fCoinPtTStart); }
+
+ bool collapsed(const SkOpPtT* ) const;
+ bool contains(const SkOpPtT* s, const SkOpPtT* e) const;
+ void correctEnds();
+ void correctOneEnd(const SkOpPtT* (SkCoincidentSpans::* getEnd)() const,
+ void (SkCoincidentSpans::* setEnd)(const SkOpPtT* ptT) );
+
+#if DEBUG_COIN
+ void debugCorrectEnds(SkPathOpsDebug::GlitchLog* log) const;
+ void debugCorrectOneEnd(SkPathOpsDebug::GlitchLog* log,
+ const SkOpPtT* (SkCoincidentSpans::* getEnd)() const,
+ void (SkCoincidentSpans::* setEnd)(const SkOpPtT* ptT) const) const;
+ bool debugExpand(SkPathOpsDebug::GlitchLog* log) const;
+#endif
+
+ const char* debugID() const {
+#if DEBUG_COIN
+ return fGlobalState->debugCoinDictEntry().fFunctionName;
+#else
+ return nullptr;
+#endif
+ }
+
+ void debugShow() const;
+#ifdef SK_DEBUG
+ void debugStartCheck(const SkOpSpanBase* outer, const SkOpSpanBase* over,
+ const SkOpGlobalState* debugState) const;
+#endif
+ void dump() const;
+ bool expand();
+ bool extend(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd,
+ const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd);
+ bool flipped() const { return fOppPtTStart->fT > fOppPtTEnd->fT; }
+ SkDEBUGCODE(SkOpGlobalState* globalState() { return fGlobalState; })
+
+ void init(SkDEBUGCODE(SkOpGlobalState* globalState)) {
+ sk_bzero(this, sizeof(*this));
+ SkDEBUGCODE(fGlobalState = globalState);
+ }
+
+ SkCoincidentSpans* next() { return fNext; }
+ const SkCoincidentSpans* next() const { return fNext; }
+ SkCoincidentSpans** nextPtr() { return &fNext; }
+ const SkOpPtT* oppPtTStart() const { return fOppPtTStart; }
+ const SkOpPtT* oppPtTEnd() const { return fOppPtTEnd; }
+ // These return non-const pointers so that, as copies, they can be added
+ // to a new span pair
+ SkOpPtT* oppPtTStartWritable() const { return const_cast<SkOpPtT*>(fOppPtTStart); }
+ SkOpPtT* oppPtTEndWritable() const { return const_cast<SkOpPtT*>(fOppPtTEnd); }
+ bool ordered() const;
+
+ void set(SkCoincidentSpans* next, const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd,
+ const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd);
+
+ void setCoinPtTEnd(const SkOpPtT* ptT) {
+ SkOPASSERT(ptT == ptT->span()->ptT());
+ SkASSERT(!fCoinPtTStart || ptT->fT != fCoinPtTStart->fT);
+ SkASSERT(!fCoinPtTStart || fCoinPtTStart->segment() == ptT->segment());
+ fCoinPtTEnd = ptT;
+ ptT->setCoincident();
+ }
+
+ void setCoinPtTStart(const SkOpPtT* ptT) {
+ SkASSERT(ptT == ptT->span()->ptT());
+ SkOPASSERT(!fCoinPtTEnd || ptT->fT != fCoinPtTEnd->fT);
+ SkASSERT(!fCoinPtTEnd || fCoinPtTEnd->segment() == ptT->segment());
+ fCoinPtTStart = ptT;
+ ptT->setCoincident();
+ }
+
+ void setEnds(const SkOpPtT* coinPtTEnd, const SkOpPtT* oppPtTEnd) {
+ this->setCoinPtTEnd(coinPtTEnd);
+ this->setOppPtTEnd(oppPtTEnd);
+ }
+
+ void setOppPtTEnd(const SkOpPtT* ptT) {
+ SkOPASSERT(ptT == ptT->span()->ptT());
+ SkOPASSERT(!fOppPtTStart || ptT->fT != fOppPtTStart->fT);
+ SkASSERT(!fOppPtTStart || fOppPtTStart->segment() == ptT->segment());
+ fOppPtTEnd = ptT;
+ ptT->setCoincident();
+ }
+
+ void setOppPtTStart(const SkOpPtT* ptT) {
+ SkASSERT(ptT == ptT->span()->ptT());
+ SkASSERT(!fOppPtTEnd || ptT->fT != fOppPtTEnd->fT);
+ SkASSERT(!fOppPtTEnd || fOppPtTEnd->segment() == ptT->segment());
+ fOppPtTStart = ptT;
+ ptT->setCoincident();
+ }
+
+ void setStarts(const SkOpPtT* coinPtTStart, const SkOpPtT* oppPtTStart) {
+ this->setCoinPtTStart(coinPtTStart);
+ this->setOppPtTStart(oppPtTStart);
+ }
+
+ void setNext(SkCoincidentSpans* next) { fNext = next; }
+
+private:
+ SkCoincidentSpans* fNext;
+ const SkOpPtT* fCoinPtTStart;
+ const SkOpPtT* fCoinPtTEnd;
+ const SkOpPtT* fOppPtTStart;
+ const SkOpPtT* fOppPtTEnd;
+ SkDEBUGCODE(SkOpGlobalState* fGlobalState);
+};
+
+class SkOpCoincidence {
+public:
+ SkOpCoincidence(SkOpGlobalState* globalState)
+ : fHead(nullptr)
+ , fTop(nullptr)
+ , fGlobalState(globalState)
+ , fContinue(false)
+ , fSpanDeleted(false)
+ , fPtAllocated(false)
+ , fCoinExtended(false)
+ , fSpanMerged(false) {
+ globalState->setCoincidence(this);
+ }
+
+ void add(SkOpPtT* coinPtTStart, SkOpPtT* coinPtTEnd, SkOpPtT* oppPtTStart,
+ SkOpPtT* oppPtTEnd);
+ bool addEndMovedSpans(DEBUG_COIN_DECLARE_ONLY_PARAMS());
+ bool addExpanded(DEBUG_COIN_DECLARE_ONLY_PARAMS());
+ bool addMissing(bool* added DEBUG_COIN_DECLARE_PARAMS());
+ void apply(DEBUG_COIN_DECLARE_ONLY_PARAMS());
+ bool contains(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd,
+ const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd) const;
+ void correctEnds(DEBUG_COIN_DECLARE_ONLY_PARAMS());
+
+#if DEBUG_COIN
+ void debugAddEndMovedSpans(SkPathOpsDebug::GlitchLog* log) const;
+ void debugAddExpanded(SkPathOpsDebug::GlitchLog* ) const;
+ void debugAddMissing(SkPathOpsDebug::GlitchLog* , bool* added) const;
+ void debugAddOrOverlap(SkPathOpsDebug::GlitchLog* log,
+ const SkOpSegment* coinSeg, const SkOpSegment* oppSeg,
+ double coinTs, double coinTe, double oppTs, double oppTe,
+ bool* added) const;
+#endif
+
+ const SkOpAngle* debugAngle(int id) const {
+ return SkDEBUGRELEASE(fGlobalState->debugAngle(id), nullptr);
+ }
+
+ void debugCheckBetween() const;
+
+#if DEBUG_COIN
+ void debugCheckValid(SkPathOpsDebug::GlitchLog* log) const;
+#endif
+
+ SkOpContour* debugContour(int id) const {
+ return SkDEBUGRELEASE(fGlobalState->debugContour(id), nullptr);
+ }
+
+#if DEBUG_COIN
+ void debugCorrectEnds(SkPathOpsDebug::GlitchLog* log) const;
+ bool debugExpand(SkPathOpsDebug::GlitchLog* ) const;
+ void debugMark(SkPathOpsDebug::GlitchLog* ) const;
+ void debugMarkCollapsed(SkPathOpsDebug::GlitchLog* ,
+ const SkCoincidentSpans* coin, const SkOpPtT* test) const;
+ void debugMarkCollapsed(SkPathOpsDebug::GlitchLog* , const SkOpPtT* test) const;
+#endif
+
+ const SkOpPtT* debugPtT(int id) const {
+ return SkDEBUGRELEASE(fGlobalState->debugPtT(id), nullptr);
+ }
+
+ const SkOpSegment* debugSegment(int id) const {
+ return SkDEBUGRELEASE(fGlobalState->debugSegment(id), nullptr);
+ }
+
+#if DEBUG_COIN
+ void debugRelease(SkPathOpsDebug::GlitchLog* , const SkCoincidentSpans* ,
+ const SkCoincidentSpans* ) const;
+ void debugRelease(SkPathOpsDebug::GlitchLog* , const SkOpSegment* ) const;
+#endif
+ void debugShowCoincidence() const;
+
+ const SkOpSpanBase* debugSpan(int id) const {
+ return SkDEBUGRELEASE(fGlobalState->debugSpan(id), nullptr);
+ }
+
+ void debugValidate() const;
+ void dump() const;
+ bool expand(DEBUG_COIN_DECLARE_ONLY_PARAMS());
+ bool extend(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd, const SkOpPtT* oppPtTStart,
+ const SkOpPtT* oppPtTEnd);
+ void findOverlaps(SkOpCoincidence* DEBUG_COIN_DECLARE_PARAMS()) const;
+ void fixUp(SkOpPtT* deleted, const SkOpPtT* kept);
+
+ SkOpGlobalState* globalState() {
+ return fGlobalState;
+ }
+
+ const SkOpGlobalState* globalState() const {
+ return fGlobalState;
+ }
+
+ bool isEmpty() const {
+ return !fHead && !fTop;
+ }
+
+ void mark(DEBUG_COIN_DECLARE_ONLY_PARAMS());
+ void markCollapsed(SkOpPtT* );
+
+ static bool Ordered(const SkOpPtT* coinPtTStart, const SkOpPtT* oppPtTStart) {
+ return Ordered(coinPtTStart->segment(), oppPtTStart->segment());
+ }
+
+ static bool Ordered(const SkOpSegment* coin, const SkOpSegment* opp);
+ void release(const SkOpSegment* );
+ void releaseDeleted();
+
+private:
+ void add(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd, const SkOpPtT* oppPtTStart,
+ const SkOpPtT* oppPtTEnd) {
+ this->add(const_cast<SkOpPtT*>(coinPtTStart), const_cast<SkOpPtT*>(coinPtTEnd),
+ const_cast<SkOpPtT*>(oppPtTStart), const_cast<SkOpPtT*>(oppPtTEnd));
+ }
+
+ bool addEndMovedSpans(const SkOpSpan* base, const SkOpSpanBase* testSpan);
+ bool addEndMovedSpans(const SkOpPtT* ptT);
+
+ bool addIfMissing(const SkOpPtT* over1s, const SkOpPtT* over2s,
+ double tStart, double tEnd, SkOpSegment* coinSeg, SkOpSegment* oppSeg,
+ bool* added
+ SkDEBUGPARAMS(const SkOpPtT* over1e) SkDEBUGPARAMS(const SkOpPtT* over2e));
+ bool addOrOverlap(SkOpSegment* coinSeg, SkOpSegment* oppSeg,
+ double coinTs, double coinTe, double oppTs, double oppTe, bool* added);
+ bool addOverlap(const SkOpSegment* seg1, const SkOpSegment* seg1o,
+ const SkOpSegment* seg2, const SkOpSegment* seg2o,
+ const SkOpPtT* overS, const SkOpPtT* overE);
+ bool checkOverlap(SkCoincidentSpans* check,
+ const SkOpSegment* coinSeg, const SkOpSegment* oppSeg,
+ double coinTs, double coinTe, double oppTs, double oppTe,
+ SkTDArray<SkCoincidentSpans*>* overlaps) const;
+ bool contains(const SkOpSegment* seg, const SkOpSegment* opp, double oppT) const;
+ bool contains(const SkCoincidentSpans* coin, const SkOpSegment* seg,
+ const SkOpSegment* opp, double oppT) const;
+#if DEBUG_COIN
+ void debugAddIfMissing(SkPathOpsDebug::GlitchLog* ,
+ const SkCoincidentSpans* outer, const SkOpPtT* over1s,
+ const SkOpPtT* over1e) const;
+ void debugAddIfMissing(SkPathOpsDebug::GlitchLog* ,
+ const SkOpPtT* over1s, const SkOpPtT* over2s,
+ double tStart, double tEnd,
+ const SkOpSegment* coinSeg, const SkOpSegment* oppSeg, bool* added,
+ const SkOpPtT* over1e, const SkOpPtT* over2e) const;
+ void debugAddEndMovedSpans(SkPathOpsDebug::GlitchLog* ,
+ const SkOpSpan* base, const SkOpSpanBase* testSpan) const;
+ void debugAddEndMovedSpans(SkPathOpsDebug::GlitchLog* ,
+ const SkOpPtT* ptT) const;
+#endif
+ void fixUp(SkCoincidentSpans* coin, SkOpPtT* deleted, const SkOpPtT* kept);
+ void markCollapsed(SkCoincidentSpans* head, SkOpPtT* test);
+ bool overlap(const SkOpPtT* coinStart1, const SkOpPtT* coinEnd1,
+ const SkOpPtT* coinStart2, const SkOpPtT* coinEnd2,
+ double* overS, double* overE) const;
+ bool release(SkCoincidentSpans* coin, SkCoincidentSpans* );
+ void releaseDeleted(SkCoincidentSpans* );
+ void restoreHead();
+ // return coinPtT->segment()->t mapped from overS->fT <= t <= overE->fT
+ static double TRange(const SkOpPtT* overS, double t, const SkOpSegment* coinPtT
+ SkDEBUGPARAMS(const SkOpPtT* overE));
+
+ SkCoincidentSpans* fHead;
+ SkCoincidentSpans* fTop;
+ SkOpGlobalState* fGlobalState;
+ bool fContinue;
+ bool fSpanDeleted;
+ bool fPtAllocated;
+ bool fCoinExtended;
+ bool fSpanMerged;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkOpContour.cpp b/gfx/skia/skia/src/pathops/SkOpContour.cpp
new file mode 100644
index 000000000..981bd2957
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpContour.cpp
@@ -0,0 +1,70 @@
+/*
+* Copyright 2013 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+#include "SkOpContour.h"
+#include "SkOpTAllocator.h"
+#include "SkPathWriter.h"
+#include "SkReduceOrder.h"
+#include "SkTSort.h"
+
+SkOpSegment* SkOpContour::addCurve(SkPath::Verb verb, const SkPoint pts[4], SkScalar weight) {
+ SkChunkAlloc* allocator = this->globalState()->allocator();
+ switch (verb) {
+ case SkPath::kLine_Verb: {
+ SkPoint* ptStorage = SkOpTAllocator<SkPoint>::AllocateArray(allocator, 2);
+ memcpy(ptStorage, pts, sizeof(SkPoint) * 2);
+ return appendSegment().addLine(ptStorage, this);
+ } break;
+ case SkPath::kQuad_Verb: {
+ SkPoint* ptStorage = SkOpTAllocator<SkPoint>::AllocateArray(allocator, 3);
+ memcpy(ptStorage, pts, sizeof(SkPoint) * 3);
+ return appendSegment().addQuad(ptStorage, this);
+ } break;
+ case SkPath::kConic_Verb: {
+ SkPoint* ptStorage = SkOpTAllocator<SkPoint>::AllocateArray(allocator, 3);
+ memcpy(ptStorage, pts, sizeof(SkPoint) * 3);
+ return appendSegment().addConic(ptStorage, weight, this);
+ } break;
+ case SkPath::kCubic_Verb: {
+ SkPoint* ptStorage = SkOpTAllocator<SkPoint>::AllocateArray(allocator, 4);
+ memcpy(ptStorage, pts, sizeof(SkPoint) * 4);
+ return appendSegment().addCubic(ptStorage, this);
+ } break;
+ default:
+ SkASSERT(0);
+ }
+ return nullptr;
+}
+
+void SkOpContour::toPath(SkPathWriter* path) const {
+ const SkOpSegment* segment = &fHead;
+ do {
+ SkAssertResult(segment->addCurveTo(segment->head(), segment->tail(), path));
+ } while ((segment = segment->next()));
+ path->finishContour();
+ path->assemble();
+}
+
+void SkOpContour::toReversePath(SkPathWriter* path) const {
+ const SkOpSegment* segment = fTail;
+ do {
+ SkAssertResult(segment->addCurveTo(segment->tail(), segment->head(), path));
+ } while ((segment = segment->prev()));
+ path->finishContour();
+ path->assemble();
+}
+
+SkOpSegment* SkOpContour::undoneSegment(SkOpSpanBase** startPtr, SkOpSpanBase** endPtr) {
+ SkOpSegment* segment = &fHead;
+ do {
+ if (segment->done()) {
+ continue;
+ }
+ segment->undoneSpan(startPtr, endPtr);
+ return segment;
+ } while ((segment = segment->next()));
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpContour.h b/gfx/skia/skia/src/pathops/SkOpContour.h
new file mode 100644
index 000000000..dc07c5304
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpContour.h
@@ -0,0 +1,431 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOpContour_DEFINED
+#define SkOpContour_DEFINED
+
+#include "SkOpSegment.h"
+#include "SkTDArray.h"
+#include "SkTSort.h"
+
+enum class SkOpRayDir;
+struct SkOpRayHit;
+class SkPathWriter;
+
+class SkOpContour {
+public:
+ SkOpContour() {
+ reset();
+ }
+
+ ~SkOpContour() {
+ if (fNext) {
+ fNext->~SkOpContour();
+ }
+ }
+
+ bool operator<(const SkOpContour& rh) const {
+ return fBounds.fTop == rh.fBounds.fTop
+ ? fBounds.fLeft < rh.fBounds.fLeft
+ : fBounds.fTop < rh.fBounds.fTop;
+ }
+
+ void addConic(SkPoint pts[3], SkScalar weight) {
+ appendSegment().addConic(pts, weight, this);
+ }
+
+ void addCubic(SkPoint pts[4]) {
+ appendSegment().addCubic(pts, this);
+ }
+
+ SkOpSegment* addCurve(SkPath::Verb verb, const SkPoint pts[4], SkScalar weight = 1);
+
+ SkOpSegment* addLine(SkPoint pts[2]) {
+ SkASSERT(pts[0] != pts[1]);
+ return appendSegment().addLine(pts, this);
+ }
+
+ void addQuad(SkPoint pts[3]) {
+ appendSegment().addQuad(pts, this);
+ }
+
+ SkOpSegment& appendSegment() {
+ SkOpSegment* result = fCount++
+ ? SkOpTAllocator<SkOpSegment>::Allocate(this->globalState()->allocator()) : &fHead;
+ result->setPrev(fTail);
+ if (fTail) {
+ fTail->setNext(result);
+ }
+ fTail = result;
+ return *result;
+ }
+
+ const SkPathOpsBounds& bounds() const {
+ return fBounds;
+ }
+
+ void calcAngles() {
+ SkASSERT(fCount > 0);
+ SkOpSegment* segment = &fHead;
+ do {
+ segment->calcAngles();
+ } while ((segment = segment->next()));
+ }
+
+ void complete() {
+ setBounds();
+ }
+
+ int count() const {
+ return fCount;
+ }
+
+ int debugID() const {
+ return SkDEBUGRELEASE(fID, -1);
+ }
+
+ int debugIndent() const {
+ return SkDEBUGRELEASE(fDebugIndent, 0);
+ }
+
+
+ const SkOpAngle* debugAngle(int id) const {
+ return SkDEBUGRELEASE(this->globalState()->debugAngle(id), nullptr);
+ }
+
+ const SkOpCoincidence* debugCoincidence() const {
+ return this->globalState()->coincidence();
+ }
+
+#if DEBUG_COIN
+ void debugCheckHealth(SkPathOpsDebug::GlitchLog* ) const;
+#endif
+
+ SkOpContour* debugContour(int id) const {
+ return SkDEBUGRELEASE(this->globalState()->debugContour(id), nullptr);
+ }
+
+#if DEBUG_COIN
+ void debugMissingCoincidence(SkPathOpsDebug::GlitchLog* log) const;
+ void debugMoveMultiples(SkPathOpsDebug::GlitchLog* ) const;
+ void debugMoveNearby(SkPathOpsDebug::GlitchLog* log) const;
+#endif
+
+ const SkOpPtT* debugPtT(int id) const {
+ return SkDEBUGRELEASE(this->globalState()->debugPtT(id), nullptr);
+ }
+
+ const SkOpSegment* debugSegment(int id) const {
+ return SkDEBUGRELEASE(this->globalState()->debugSegment(id), nullptr);
+ }
+
+#if DEBUG_ACTIVE_SPANS
+ void debugShowActiveSpans() {
+ SkOpSegment* segment = &fHead;
+ do {
+ segment->debugShowActiveSpans();
+ } while ((segment = segment->next()));
+ }
+#endif
+
+ const SkOpSpanBase* debugSpan(int id) const {
+ return SkDEBUGRELEASE(this->globalState()->debugSpan(id), nullptr);
+ }
+
+ SkOpGlobalState* globalState() const {
+ return fState;
+ }
+
+ void debugValidate() const {
+#if DEBUG_VALIDATE
+ const SkOpSegment* segment = &fHead;
+ const SkOpSegment* prior = nullptr;
+ do {
+ segment->debugValidate();
+ SkASSERT(segment->prev() == prior);
+ prior = segment;
+ } while ((segment = segment->next()));
+ SkASSERT(prior == fTail);
+#endif
+ }
+
+ bool done() const {
+ return fDone;
+ }
+
+ void dump() const;
+ void dumpAll() const;
+ void dumpAngles() const;
+ void dumpContours() const;
+ void dumpContoursAll() const;
+ void dumpContoursAngles() const;
+ void dumpContoursPts() const;
+ void dumpContoursPt(int segmentID) const;
+ void dumpContoursSegment(int segmentID) const;
+ void dumpContoursSpan(int segmentID) const;
+ void dumpContoursSpans() const;
+ void dumpPt(int ) const;
+ void dumpPts(const char* prefix = "seg") const;
+ void dumpPtsX(const char* prefix) const;
+ void dumpSegment(int ) const;
+ void dumpSegments(const char* prefix = "seg", SkPathOp op = (SkPathOp) -1) const;
+ void dumpSpan(int ) const;
+ void dumpSpans() const;
+
+ const SkPoint& end() const {
+ return fTail->pts()[SkPathOpsVerbToPoints(fTail->verb())];
+ }
+
+ SkOpSpan* findSortableTop(SkOpContour* );
+
+ SkOpSegment* first() {
+ SkASSERT(fCount > 0);
+ return &fHead;
+ }
+
+ const SkOpSegment* first() const {
+ SkASSERT(fCount > 0);
+ return &fHead;
+ }
+
+ void indentDump() const {
+ SkDEBUGCODE(fDebugIndent += 2);
+ }
+
+ void init(SkOpGlobalState* globalState, bool operand, bool isXor) {
+ fState = globalState;
+ fOperand = operand;
+ fXor = isXor;
+ SkDEBUGCODE(fID = globalState->nextContourID());
+ }
+
+ int isCcw() const {
+ return fCcw;
+ }
+
+ bool isXor() const {
+ return fXor;
+ }
+
+ void joinSegments() {
+ SkOpSegment* segment = &fHead;
+ SkOpSegment* next;
+ do {
+ next = segment->next();
+ segment->joinEnds(next ? next : &fHead);
+ } while ((segment = next));
+ }
+
+ void markAllDone() {
+ SkOpSegment* segment = &fHead;
+ do {
+ segment->markAllDone();
+ } while ((segment = segment->next()));
+ }
+
+ // Please keep this aligned with debugMissingCoincidence()
+ bool missingCoincidence() {
+ SkASSERT(fCount > 0);
+ SkOpSegment* segment = &fHead;
+ bool result = false;
+ do {
+ if (segment->missingCoincidence()) {
+ result = true;
+ }
+ segment = segment->next();
+ } while (segment);
+ return result;
+ }
+
+ bool moveMultiples() {
+ SkASSERT(fCount > 0);
+ SkOpSegment* segment = &fHead;
+ do {
+ if (!segment->moveMultiples()) {
+ return false;
+ }
+ } while ((segment = segment->next()));
+ return true;
+ }
+
+ void moveNearby() {
+ SkASSERT(fCount > 0);
+ SkOpSegment* segment = &fHead;
+ do {
+ segment->moveNearby();
+ } while ((segment = segment->next()));
+ }
+
+ SkOpContour* next() {
+ return fNext;
+ }
+
+ const SkOpContour* next() const {
+ return fNext;
+ }
+
+ bool operand() const {
+ return fOperand;
+ }
+
+ bool oppXor() const {
+ return fOppXor;
+ }
+
+ void outdentDump() const {
+ SkDEBUGCODE(fDebugIndent -= 2);
+ }
+
+ void rayCheck(const SkOpRayHit& base, SkOpRayDir dir, SkOpRayHit** hits, SkChunkAlloc* );
+
+ void reset() {
+ fTail = nullptr;
+ fNext = nullptr;
+ fCount = 0;
+ fDone = false;
+ SkDEBUGCODE(fBounds.set(SK_ScalarMax, SK_ScalarMax, SK_ScalarMin, SK_ScalarMin));
+ SkDEBUGCODE(fFirstSorted = -1);
+ SkDEBUGCODE(fDebugIndent = 0);
+ }
+
+ void resetReverse() {
+ SkOpContour* next = this;
+ do {
+ next->fCcw = -1;
+ next->fReverse = false;
+ } while ((next = next->next()));
+ }
+
+ bool reversed() const {
+ return fReverse;
+ }
+
+ void setBounds() {
+ SkASSERT(fCount > 0);
+ const SkOpSegment* segment = &fHead;
+ fBounds = segment->bounds();
+ while ((segment = segment->next())) {
+ fBounds.add(segment->bounds());
+ }
+ }
+
+ void setCcw(int ccw) {
+ fCcw = ccw;
+ }
+
+ void setGlobalState(SkOpGlobalState* state) {
+ fState = state;
+ }
+
+ void setNext(SkOpContour* contour) {
+// SkASSERT(!fNext == !!contour);
+ fNext = contour;
+ }
+
+ void setOperand(bool isOp) {
+ fOperand = isOp;
+ }
+
+ void setOppXor(bool isOppXor) {
+ fOppXor = isOppXor;
+ }
+
+ void setReverse() {
+ fReverse = true;
+ }
+
+ void setXor(bool isXor) {
+ fXor = isXor;
+ }
+
+ void sortAngles() {
+ SkASSERT(fCount > 0);
+ SkOpSegment* segment = &fHead;
+ do {
+ segment->sortAngles();
+ } while ((segment = segment->next()));
+ }
+
+ const SkPoint& start() const {
+ return fHead.pts()[0];
+ }
+
+ void toPartialBackward(SkPathWriter* path) const {
+ const SkOpSegment* segment = fTail;
+ do {
+ SkAssertResult(segment->addCurveTo(segment->tail(), segment->head(), path));
+ } while ((segment = segment->prev()));
+ }
+
+ void toPartialForward(SkPathWriter* path) const {
+ const SkOpSegment* segment = &fHead;
+ do {
+ SkAssertResult(segment->addCurveTo(segment->head(), segment->tail(), path));
+ } while ((segment = segment->next()));
+ }
+
+ void toReversePath(SkPathWriter* path) const;
+ void toPath(SkPathWriter* path) const;
+ SkOpSegment* undoneSegment(SkOpSpanBase** startPtr, SkOpSpanBase** endPtr);
+
+private:
+ SkOpGlobalState* fState;
+ SkOpSegment fHead;
+ SkOpSegment* fTail;
+ SkOpContour* fNext;
+ SkPathOpsBounds fBounds;
+ int fCcw;
+ int fCount;
+ int fFirstSorted;
+ bool fDone; // set by find top segment
+ bool fOperand; // true for the second argument to a binary operator
+ bool fReverse; // true if contour should be reverse written to path (used only by fix winding)
+ bool fXor; // set if original path had even-odd fill
+ bool fOppXor; // set if opposite path had even-odd fill
+ SkDEBUGCODE(int fID);
+ SkDEBUGCODE(mutable int fDebugIndent);
+};
+
+class SkOpContourHead : public SkOpContour {
+public:
+ SkOpContour* appendContour() {
+ SkOpContour* contour = SkOpTAllocator<SkOpContour>::New(this->globalState()->allocator());
+ contour->setNext(nullptr);
+ SkOpContour* prev = this;
+ SkOpContour* next;
+ while ((next = prev->next())) {
+ prev = next;
+ }
+ prev->setNext(contour);
+ return contour;
+ }
+
+ void joinAllSegments() {
+ SkOpContour* next = this;
+ do {
+ next->joinSegments();
+ } while ((next = next->next()));
+ }
+
+ void remove(SkOpContour* contour) {
+ if (contour == this) {
+ SkASSERT(this->count() == 0);
+ return;
+ }
+ SkASSERT(contour->next() == nullptr);
+ SkOpContour* prev = this;
+ SkOpContour* next;
+ while ((next = prev->next()) != contour) {
+ SkASSERT(next);
+ prev = next;
+ }
+ SkASSERT(prev);
+ prev->setNext(nullptr);
+ }
+
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkOpCubicHull.cpp b/gfx/skia/skia/src/pathops/SkOpCubicHull.cpp
new file mode 100644
index 000000000..6b17608e8
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpCubicHull.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkPathOpsCubic.h"
+
+static bool rotate(const SkDCubic& cubic, int zero, int index, SkDCubic& rotPath) {
+ double dy = cubic[index].fY - cubic[zero].fY;
+ double dx = cubic[index].fX - cubic[zero].fX;
+ if (approximately_zero(dy)) {
+ if (approximately_zero(dx)) {
+ return false;
+ }
+ rotPath = cubic;
+ if (dy) {
+ rotPath[index].fY = cubic[zero].fY;
+ int mask = other_two(index, zero);
+ int side1 = index ^ mask;
+ int side2 = zero ^ mask;
+ if (approximately_equal(cubic[side1].fY, cubic[zero].fY)) {
+ rotPath[side1].fY = cubic[zero].fY;
+ }
+ if (approximately_equal(cubic[side2].fY, cubic[zero].fY)) {
+ rotPath[side2].fY = cubic[zero].fY;
+ }
+ }
+ return true;
+ }
+ for (int index = 0; index < 4; ++index) {
+ rotPath[index].fX = cubic[index].fX * dx + cubic[index].fY * dy;
+ rotPath[index].fY = cubic[index].fY * dx - cubic[index].fX * dy;
+ }
+ return true;
+}
+
+
+// Returns 0 if negative, 1 if zero, 2 if positive
+static int side(double x) {
+ return (x > 0) + (x >= 0);
+}
+
+/* Given a cubic, find the convex hull described by the end and control points.
+ The hull may have 3 or 4 points. Cubics that degenerate into a point or line
+ are not considered.
+
+ The hull is computed by assuming that three points, if unique and non-linear,
+ form a triangle. The fourth point may replace one of the first three, may be
+ discarded if in the triangle or on an edge, or may be inserted between any of
+ the three to form a convex quadralateral.
+
+ The indices returned in order describe the convex hull.
+*/
+int SkDCubic::convexHull(char order[4]) const {
+ size_t index;
+ // find top point
+ size_t yMin = 0;
+ for (index = 1; index < 4; ++index) {
+ if (fPts[yMin].fY > fPts[index].fY || (fPts[yMin].fY == fPts[index].fY
+ && fPts[yMin].fX > fPts[index].fX)) {
+ yMin = index;
+ }
+ }
+ order[0] = yMin;
+ int midX = -1;
+ int backupYMin = -1;
+ for (int pass = 0; pass < 2; ++pass) {
+ for (index = 0; index < 4; ++index) {
+ if (index == yMin) {
+ continue;
+ }
+ // rotate line from (yMin, index) to axis
+ // see if remaining two points are both above or below
+ // use this to find mid
+ int mask = other_two(yMin, index);
+ int side1 = yMin ^ mask;
+ int side2 = index ^ mask;
+ SkDCubic rotPath;
+ if (!rotate(*this, yMin, index, rotPath)) { // ! if cbc[yMin]==cbc[idx]
+ order[1] = side1;
+ order[2] = side2;
+ return 3;
+ }
+ int sides = side(rotPath[side1].fY - rotPath[yMin].fY);
+ sides ^= side(rotPath[side2].fY - rotPath[yMin].fY);
+ if (sides == 2) { // '2' means one remaining point <0, one >0
+ if (midX >= 0) {
+ // one of the control points is equal to an end point
+ order[0] = 0;
+ order[1] = 3;
+ if (fPts[1] == fPts[0] || fPts[1] == fPts[3]) {
+ order[2] = 2;
+ return 3;
+ }
+ if (fPts[2] == fPts[0] || fPts[2] == fPts[3]) {
+ order[2] = 1;
+ return 3;
+ }
+ // one of the control points may be very nearly but not exactly equal --
+ double dist1_0 = fPts[1].distanceSquared(fPts[0]);
+ double dist1_3 = fPts[1].distanceSquared(fPts[3]);
+ double dist2_0 = fPts[2].distanceSquared(fPts[0]);
+ double dist2_3 = fPts[2].distanceSquared(fPts[3]);
+ double smallest1distSq = SkTMin(dist1_0, dist1_3);
+ double smallest2distSq = SkTMin(dist2_0, dist2_3);
+ if (approximately_zero(SkTMin(smallest1distSq, smallest2distSq))) {
+ order[2] = smallest1distSq < smallest2distSq ? 2 : 1;
+ return 3;
+ }
+ }
+ midX = index;
+ } else if (sides == 0) { // '0' means both to one side or the other
+ backupYMin = index;
+ }
+ }
+ if (midX >= 0) {
+ break;
+ }
+ if (backupYMin < 0) {
+ break;
+ }
+ yMin = backupYMin;
+ backupYMin = -1;
+ }
+ if (midX < 0) {
+ midX = yMin ^ 3; // choose any other point
+ }
+ int mask = other_two(yMin, midX);
+ int least = yMin ^ mask;
+ int most = midX ^ mask;
+ order[0] = yMin;
+ order[1] = least;
+
+ // see if mid value is on same side of line (least, most) as yMin
+ SkDCubic midPath;
+ if (!rotate(*this, least, most, midPath)) { // ! if cbc[least]==cbc[most]
+ order[2] = midX;
+ return 3;
+ }
+ int midSides = side(midPath[yMin].fY - midPath[least].fY);
+ midSides ^= side(midPath[midX].fY - midPath[least].fY);
+ if (midSides != 2) { // if mid point is not between
+ order[2] = most;
+ return 3; // result is a triangle
+ }
+ order[2] = midX;
+ order[3] = most;
+ return 4; // result is a quadralateral
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpEdgeBuilder.cpp b/gfx/skia/skia/src/pathops/SkOpEdgeBuilder.cpp
new file mode 100644
index 000000000..ab2aca0a7
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpEdgeBuilder.cpp
@@ -0,0 +1,305 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkGeometry.h"
+#include "SkOpEdgeBuilder.h"
+#include "SkReduceOrder.h"
+
+void SkOpEdgeBuilder::init() {
+ fCurrentContour = fContoursHead;
+ fOperand = false;
+ fXorMask[0] = fXorMask[1] = (fPath->getFillType() & 1) ? kEvenOdd_PathOpsMask
+ : kWinding_PathOpsMask;
+ fUnparseable = false;
+ fSecondHalf = preFetch();
+}
+
+// very tiny points cause numerical instability : don't allow them
+static void force_small_to_zero(SkPoint* pt) {
+ if (SkScalarAbs(pt->fX) < FLT_EPSILON_ORDERABLE_ERR) {
+ pt->fX = 0;
+ }
+ if (SkScalarAbs(pt->fY) < FLT_EPSILON_ORDERABLE_ERR) {
+ pt->fY = 0;
+ }
+}
+
+static bool can_add_curve(SkPath::Verb verb, SkPoint* curve) {
+ if (SkPath::kMove_Verb == verb) {
+ return false;
+ }
+ for (int index = 0; index <= SkPathOpsVerbToPoints(verb); ++index) {
+ force_small_to_zero(&curve[index]);
+ }
+ return SkPath::kLine_Verb != verb || !SkDPoint::ApproximatelyEqual(curve[0], curve[1]);
+}
+
+void SkOpEdgeBuilder::addOperand(const SkPath& path) {
+ SkASSERT(fPathVerbs.count() > 0 && fPathVerbs.end()[-1] == SkPath::kDone_Verb);
+ fPathVerbs.pop();
+ fPath = &path;
+ fXorMask[1] = (fPath->getFillType() & 1) ? kEvenOdd_PathOpsMask
+ : kWinding_PathOpsMask;
+ preFetch();
+}
+
+bool SkOpEdgeBuilder::finish() {
+ fOperand = false;
+ if (fUnparseable || !walk()) {
+ return false;
+ }
+ complete();
+ if (fCurrentContour && !fCurrentContour->count()) {
+ fContoursHead->remove(fCurrentContour);
+ }
+ return true;
+}
+
+void SkOpEdgeBuilder::closeContour(const SkPoint& curveEnd, const SkPoint& curveStart) {
+ if (!SkDPoint::ApproximatelyEqual(curveEnd, curveStart)) {
+ *fPathVerbs.append() = SkPath::kLine_Verb;
+ *fPathPts.append() = curveStart;
+ } else {
+ int verbCount = fPathVerbs.count();
+ int ptsCount = fPathPts.count();
+ if (SkPath::kLine_Verb == fPathVerbs[verbCount - 1]
+ && fPathPts[ptsCount - 2] == curveStart) {
+ fPathVerbs.pop();
+ fPathPts.pop();
+ } else {
+ fPathPts[ptsCount - 1] = curveStart;
+ }
+ }
+ *fPathVerbs.append() = SkPath::kClose_Verb;
+}
+
+int SkOpEdgeBuilder::preFetch() {
+ if (!fPath->isFinite()) {
+ fUnparseable = true;
+ return 0;
+ }
+ SkPath::RawIter iter(*fPath);
+ SkPoint curveStart;
+ SkPoint curve[4];
+ SkPoint pts[4];
+ SkPath::Verb verb;
+ bool lastCurve = false;
+ do {
+ verb = iter.next(pts);
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ if (!fAllowOpenContours && lastCurve) {
+ closeContour(curve[0], curveStart);
+ }
+ *fPathVerbs.append() = verb;
+ force_small_to_zero(&pts[0]);
+ *fPathPts.append() = pts[0];
+ curveStart = curve[0] = pts[0];
+ lastCurve = false;
+ continue;
+ case SkPath::kLine_Verb:
+ force_small_to_zero(&pts[1]);
+ if (SkDPoint::ApproximatelyEqual(curve[0], pts[1])) {
+ uint8_t lastVerb = fPathVerbs.top();
+ if (lastVerb != SkPath::kLine_Verb && lastVerb != SkPath::kMove_Verb) {
+ fPathPts.top() = pts[1];
+ }
+ continue; // skip degenerate points
+ }
+ break;
+ case SkPath::kQuad_Verb:
+ force_small_to_zero(&pts[1]);
+ force_small_to_zero(&pts[2]);
+ curve[1] = pts[1];
+ curve[2] = pts[2];
+ verb = SkReduceOrder::Quad(curve, pts);
+ if (verb == SkPath::kMove_Verb) {
+ continue; // skip degenerate points
+ }
+ break;
+ case SkPath::kConic_Verb:
+ force_small_to_zero(&pts[1]);
+ force_small_to_zero(&pts[2]);
+ curve[1] = pts[1];
+ curve[2] = pts[2];
+ verb = SkReduceOrder::Quad(curve, pts);
+ if (SkPath::kQuad_Verb == verb && 1 != iter.conicWeight()) {
+ verb = SkPath::kConic_Verb;
+ } else if (verb == SkPath::kMove_Verb) {
+ continue; // skip degenerate points
+ }
+ break;
+ case SkPath::kCubic_Verb:
+ force_small_to_zero(&pts[1]);
+ force_small_to_zero(&pts[2]);
+ force_small_to_zero(&pts[3]);
+ curve[1] = pts[1];
+ curve[2] = pts[2];
+ curve[3] = pts[3];
+ verb = SkReduceOrder::Cubic(curve, pts);
+ if (verb == SkPath::kMove_Verb) {
+ continue; // skip degenerate points
+ }
+ break;
+ case SkPath::kClose_Verb:
+ closeContour(curve[0], curveStart);
+ lastCurve = false;
+ continue;
+ case SkPath::kDone_Verb:
+ continue;
+ }
+ *fPathVerbs.append() = verb;
+ int ptCount = SkPathOpsVerbToPoints(verb);
+ fPathPts.append(ptCount, &pts[1]);
+ if (verb == SkPath::kConic_Verb) {
+ *fWeights.append() = iter.conicWeight();
+ }
+ curve[0] = pts[ptCount];
+ lastCurve = true;
+ } while (verb != SkPath::kDone_Verb);
+ if (!fAllowOpenContours && lastCurve) {
+ closeContour(curve[0], curveStart);
+ }
+ *fPathVerbs.append() = SkPath::kDone_Verb;
+ return fPathVerbs.count() - 1;
+}
+
+bool SkOpEdgeBuilder::close() {
+ complete();
+ return true;
+}
+
+bool SkOpEdgeBuilder::walk() {
+ uint8_t* verbPtr = fPathVerbs.begin();
+ uint8_t* endOfFirstHalf = &verbPtr[fSecondHalf];
+ SkPoint* pointsPtr = fPathPts.begin() - 1;
+ SkScalar* weightPtr = fWeights.begin();
+ SkPath::Verb verb;
+ while ((verb = (SkPath::Verb) *verbPtr) != SkPath::kDone_Verb) {
+ if (verbPtr == endOfFirstHalf) {
+ fOperand = true;
+ }
+ verbPtr++;
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ if (fCurrentContour && fCurrentContour->count()) {
+ if (fAllowOpenContours) {
+ complete();
+ } else if (!close()) {
+ return false;
+ }
+ }
+ if (!fCurrentContour) {
+ fCurrentContour = fContoursHead->appendContour();
+ }
+ fCurrentContour->init(fGlobalState, fOperand,
+ fXorMask[fOperand] == kEvenOdd_PathOpsMask);
+ pointsPtr += 1;
+ continue;
+ case SkPath::kLine_Verb:
+ fCurrentContour->addLine(pointsPtr);
+ break;
+ case SkPath::kQuad_Verb:
+ {
+ SkVector v1 = pointsPtr[1] - pointsPtr[0];
+ SkVector v2 = pointsPtr[2] - pointsPtr[1];
+ if (v1.dot(v2) < 0) {
+ SkPoint pair[5];
+ if (SkChopQuadAtMaxCurvature(pointsPtr, pair) == 1) {
+ goto addOneQuad;
+ }
+ if (!SkScalarsAreFinite(&pair[0].fX, SK_ARRAY_COUNT(pair) * 2)) {
+ return false;
+ }
+ SkPoint cStorage[2][2];
+ SkPath::Verb v1 = SkReduceOrder::Quad(&pair[0], cStorage[0]);
+ SkPath::Verb v2 = SkReduceOrder::Quad(&pair[2], cStorage[1]);
+ SkPoint* curve1 = v1 != SkPath::kLine_Verb ? &pair[0] : cStorage[0];
+ SkPoint* curve2 = v2 != SkPath::kLine_Verb ? &pair[2] : cStorage[1];
+ if (can_add_curve(v1, curve1) && can_add_curve(v2, curve2)) {
+ fCurrentContour->addCurve(v1, curve1);
+ fCurrentContour->addCurve(v2, curve2);
+ break;
+ }
+ }
+ }
+ addOneQuad:
+ fCurrentContour->addQuad(pointsPtr);
+ break;
+ case SkPath::kConic_Verb: {
+ SkVector v1 = pointsPtr[1] - pointsPtr[0];
+ SkVector v2 = pointsPtr[2] - pointsPtr[1];
+ SkScalar weight = *weightPtr++;
+ if (v1.dot(v2) < 0) {
+ // FIXME: max curvature for conics hasn't been implemented; use placeholder
+ SkScalar maxCurvature = SkFindQuadMaxCurvature(pointsPtr);
+ if (maxCurvature > 0) {
+ SkConic conic(pointsPtr, weight);
+ SkConic pair[2];
+ if (!conic.chopAt(maxCurvature, pair)) {
+ // if result can't be computed, use original
+ fCurrentContour->addConic(pointsPtr, weight);
+ break;
+ }
+ SkPoint cStorage[2][3];
+ SkPath::Verb v1 = SkReduceOrder::Conic(pair[0], cStorage[0]);
+ SkPath::Verb v2 = SkReduceOrder::Conic(pair[1], cStorage[1]);
+ SkPoint* curve1 = v1 != SkPath::kLine_Verb ? pair[0].fPts : cStorage[0];
+ SkPoint* curve2 = v2 != SkPath::kLine_Verb ? pair[1].fPts : cStorage[1];
+ if (can_add_curve(v1, curve1) && can_add_curve(v2, curve2)) {
+ fCurrentContour->addCurve(v1, curve1, pair[0].fW);
+ fCurrentContour->addCurve(v2, curve2, pair[1].fW);
+ break;
+ }
+ }
+ }
+ fCurrentContour->addConic(pointsPtr, weight);
+ } break;
+ case SkPath::kCubic_Verb:
+ {
+ // Split complex cubics (such as self-intersecting curves or
+ // ones with difficult curvature) in two before proceeding.
+ // This can be required for intersection to succeed.
+ SkScalar splitT;
+ if (SkDCubic::ComplexBreak(pointsPtr, &splitT)) {
+ SkPoint pair[7];
+ SkChopCubicAt(pointsPtr, pair, splitT);
+ if (!SkScalarsAreFinite(&pair[0].fX, SK_ARRAY_COUNT(pair) * 2)) {
+ return false;
+ }
+ SkPoint cStorage[2][4];
+ SkPath::Verb v1 = SkReduceOrder::Cubic(&pair[0], cStorage[0]);
+ SkPath::Verb v2 = SkReduceOrder::Cubic(&pair[3], cStorage[1]);
+ SkPoint* curve1 = v1 == SkPath::kCubic_Verb ? &pair[0] : cStorage[0];
+ SkPoint* curve2 = v2 == SkPath::kCubic_Verb ? &pair[3] : cStorage[1];
+ if (can_add_curve(v1, curve1) && can_add_curve(v2, curve2)) {
+ fCurrentContour->addCurve(v1, curve1);
+ fCurrentContour->addCurve(v2, curve2);
+ break;
+ }
+ }
+ }
+ fCurrentContour->addCubic(pointsPtr);
+ break;
+ case SkPath::kClose_Verb:
+ SkASSERT(fCurrentContour);
+ if (!close()) {
+ return false;
+ }
+ continue;
+ default:
+ SkDEBUGFAIL("bad verb");
+ return false;
+ }
+ SkASSERT(fCurrentContour);
+ fCurrentContour->debugValidate();
+ pointsPtr += SkPathOpsVerbToPoints(verb);
+ }
+ if (fCurrentContour && fCurrentContour->count() &&!fAllowOpenContours && !close()) {
+ return false;
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpEdgeBuilder.h b/gfx/skia/skia/src/pathops/SkOpEdgeBuilder.h
new file mode 100644
index 000000000..c6fc7dcb0
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpEdgeBuilder.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOpEdgeBuilder_DEFINED
+#define SkOpEdgeBuilder_DEFINED
+
+#include "SkOpContour.h"
+#include "SkPathWriter.h"
+
+class SkOpEdgeBuilder {
+public:
+ SkOpEdgeBuilder(const SkPathWriter& path, SkOpContourHead* contours2,
+ SkOpGlobalState* globalState)
+ : fGlobalState(globalState)
+ , fPath(path.nativePath())
+ , fContoursHead(contours2)
+ , fAllowOpenContours(true) {
+ init();
+ }
+
+ SkOpEdgeBuilder(const SkPath& path, SkOpContourHead* contours2, SkOpGlobalState* globalState)
+ : fGlobalState(globalState)
+ , fPath(&path)
+ , fContoursHead(contours2)
+ , fAllowOpenContours(false) {
+ init();
+ }
+
+ void addOperand(const SkPath& path);
+
+ void complete() {
+ if (fCurrentContour && fCurrentContour->count()) {
+ fCurrentContour->complete();
+ fCurrentContour = nullptr;
+ }
+ }
+
+ bool finish();
+
+ const SkOpContour* head() const {
+ return fContoursHead;
+ }
+
+ void init();
+ bool unparseable() const { return fUnparseable; }
+ SkPathOpsMask xorMask() const { return fXorMask[fOperand]; }
+
+private:
+ void closeContour(const SkPoint& curveEnd, const SkPoint& curveStart);
+ bool close();
+ int preFetch();
+ bool walk();
+
+ SkOpGlobalState* fGlobalState;
+ const SkPath* fPath;
+ SkTDArray<SkPoint> fPathPts;
+ SkTDArray<SkScalar> fWeights;
+ SkTDArray<uint8_t> fPathVerbs;
+ SkOpContour* fCurrentContour;
+ SkOpContourHead* fContoursHead;
+ SkPathOpsMask fXorMask[2];
+ int fSecondHalf;
+ bool fOperand;
+ bool fAllowOpenContours;
+ bool fUnparseable;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkOpSegment.cpp b/gfx/skia/skia/src/pathops/SkOpSegment.cpp
new file mode 100644
index 000000000..2246f36ff
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpSegment.cpp
@@ -0,0 +1,1695 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkOpCoincidence.h"
+#include "SkOpContour.h"
+#include "SkOpSegment.h"
+#include "SkPathWriter.h"
+
+/*
+After computing raw intersections, post process all segments to:
+- find small collections of points that can be collapsed to a single point
+- find missing intersections to resolve differences caused by different algorithms
+
+Consider segments containing tiny or small intervals. Consider coincident segments
+because coincidence finds intersections through distance measurement that non-coincident
+intersection tests cannot.
+ */
+
+#define F (false) // discard the edge
+#define T (true) // keep the edge
+
+static const bool gUnaryActiveEdge[2][2] = {
+// from=0 from=1
+// to=0,1 to=0,1
+ {F, T}, {T, F},
+};
+
+static const bool gActiveEdge[kXOR_SkPathOp + 1][2][2][2][2] = {
+// miFrom=0 miFrom=1
+// miTo=0 miTo=1 miTo=0 miTo=1
+// suFrom=0 1 suFrom=0 1 suFrom=0 1 suFrom=0 1
+// suTo=0,1 suTo=0,1 suTo=0,1 suTo=0,1 suTo=0,1 suTo=0,1 suTo=0,1 suTo=0,1
+ {{{{F, F}, {F, F}}, {{T, F}, {T, F}}}, {{{T, T}, {F, F}}, {{F, T}, {T, F}}}}, // mi - su
+ {{{{F, F}, {F, F}}, {{F, T}, {F, T}}}, {{{F, F}, {T, T}}, {{F, T}, {T, F}}}}, // mi & su
+ {{{{F, T}, {T, F}}, {{T, T}, {F, F}}}, {{{T, F}, {T, F}}, {{F, F}, {F, F}}}}, // mi | su
+ {{{{F, T}, {T, F}}, {{T, F}, {F, T}}}, {{{T, F}, {F, T}}, {{F, T}, {T, F}}}}, // mi ^ su
+};
+
+#undef F
+#undef T
+
+SkOpAngle* SkOpSegment::activeAngle(SkOpSpanBase* start, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr, bool* done) {
+ if (SkOpAngle* result = activeAngleInner(start, startPtr, endPtr, done)) {
+ return result;
+ }
+ if (SkOpAngle* result = activeAngleOther(start, startPtr, endPtr, done)) {
+ return result;
+ }
+ return nullptr;
+}
+
+SkOpAngle* SkOpSegment::activeAngleInner(SkOpSpanBase* start, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr, bool* done) {
+ SkOpSpan* upSpan = start->upCastable();
+ if (upSpan) {
+ if (upSpan->windValue() || upSpan->oppValue()) {
+ SkOpSpanBase* next = upSpan->next();
+ if (!*endPtr) {
+ *startPtr = start;
+ *endPtr = next;
+ }
+ if (!upSpan->done()) {
+ if (upSpan->windSum() != SK_MinS32) {
+ return spanToAngle(start, next);
+ }
+ *done = false;
+ }
+ } else {
+ SkASSERT(upSpan->done());
+ }
+ }
+ SkOpSpan* downSpan = start->prev();
+ // edge leading into junction
+ if (downSpan) {
+ if (downSpan->windValue() || downSpan->oppValue()) {
+ if (!*endPtr) {
+ *startPtr = start;
+ *endPtr = downSpan;
+ }
+ if (!downSpan->done()) {
+ if (downSpan->windSum() != SK_MinS32) {
+ return spanToAngle(start, downSpan);
+ }
+ *done = false;
+ }
+ } else {
+ SkASSERT(downSpan->done());
+ }
+ }
+ return nullptr;
+}
+
+SkOpAngle* SkOpSegment::activeAngleOther(SkOpSpanBase* start, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr, bool* done) {
+ SkOpPtT* oPtT = start->ptT()->next();
+ SkOpSegment* other = oPtT->segment();
+ SkOpSpanBase* oSpan = oPtT->span();
+ return other->activeAngleInner(oSpan, startPtr, endPtr, done);
+}
+
+bool SkOpSegment::activeOp(SkOpSpanBase* start, SkOpSpanBase* end, int xorMiMask, int xorSuMask,
+ SkPathOp op) {
+ int sumMiWinding = this->updateWinding(end, start);
+ int sumSuWinding = this->updateOppWinding(end, start);
+#if DEBUG_LIMIT_WIND_SUM
+ SkASSERT(abs(sumMiWinding) <= DEBUG_LIMIT_WIND_SUM);
+ SkASSERT(abs(sumSuWinding) <= DEBUG_LIMIT_WIND_SUM);
+#endif
+ if (this->operand()) {
+ SkTSwap<int>(sumMiWinding, sumSuWinding);
+ }
+ return this->activeOp(xorMiMask, xorSuMask, start, end, op, &sumMiWinding, &sumSuWinding);
+}
+
+bool SkOpSegment::activeOp(int xorMiMask, int xorSuMask, SkOpSpanBase* start, SkOpSpanBase* end,
+ SkPathOp op, int* sumMiWinding, int* sumSuWinding) {
+ int maxWinding, sumWinding, oppMaxWinding, oppSumWinding;
+ this->setUpWindings(start, end, sumMiWinding, sumSuWinding,
+ &maxWinding, &sumWinding, &oppMaxWinding, &oppSumWinding);
+ bool miFrom;
+ bool miTo;
+ bool suFrom;
+ bool suTo;
+ if (operand()) {
+ miFrom = (oppMaxWinding & xorMiMask) != 0;
+ miTo = (oppSumWinding & xorMiMask) != 0;
+ suFrom = (maxWinding & xorSuMask) != 0;
+ suTo = (sumWinding & xorSuMask) != 0;
+ } else {
+ miFrom = (maxWinding & xorMiMask) != 0;
+ miTo = (sumWinding & xorMiMask) != 0;
+ suFrom = (oppMaxWinding & xorSuMask) != 0;
+ suTo = (oppSumWinding & xorSuMask) != 0;
+ }
+ bool result = gActiveEdge[op][miFrom][miTo][suFrom][suTo];
+#if DEBUG_ACTIVE_OP
+ SkDebugf("%s id=%d t=%1.9g tEnd=%1.9g op=%s miFrom=%d miTo=%d suFrom=%d suTo=%d result=%d\n",
+ __FUNCTION__, debugID(), start->t(), end->t(),
+ SkPathOpsDebug::kPathOpStr[op], miFrom, miTo, suFrom, suTo, result);
+#endif
+ return result;
+}
+
+bool SkOpSegment::activeWinding(SkOpSpanBase* start, SkOpSpanBase* end) {
+ int sumWinding = updateWinding(end, start);
+ return activeWinding(start, end, &sumWinding);
+}
+
+bool SkOpSegment::activeWinding(SkOpSpanBase* start, SkOpSpanBase* end, int* sumWinding) {
+ int maxWinding;
+ setUpWinding(start, end, &maxWinding, sumWinding);
+ bool from = maxWinding != 0;
+ bool to = *sumWinding != 0;
+ bool result = gUnaryActiveEdge[from][to];
+ return result;
+}
+
+bool SkOpSegment::addCurveTo(const SkOpSpanBase* start, const SkOpSpanBase* end,
+ SkPathWriter* path) const {
+ FAIL_IF(start->starter(end)->alreadyAdded());
+ SkDCurveSweep curvePart;
+ start->segment()->subDivide(start, end, &curvePart.fCurve);
+ curvePart.setCurveHullSweep(fVerb);
+ SkPath::Verb verb = curvePart.isCurve() ? fVerb : SkPath::kLine_Verb;
+ path->deferredMove(start->ptT());
+ switch (verb) {
+ case SkPath::kLine_Verb:
+ path->deferredLine(end->ptT());
+ break;
+ case SkPath::kQuad_Verb:
+ path->quadTo(curvePart.fCurve.fQuad.fPts[1].asSkPoint(), end->ptT());
+ break;
+ case SkPath::kConic_Verb:
+ path->conicTo(curvePart.fCurve.fConic.fPts[1].asSkPoint(), end->ptT(),
+ curvePart.fCurve.fConic.fWeight);
+ break;
+ case SkPath::kCubic_Verb:
+ path->cubicTo(curvePart.fCurve.fCubic.fPts[1].asSkPoint(),
+ curvePart.fCurve.fCubic.fPts[2].asSkPoint(), end->ptT());
+ break;
+ default:
+ SkASSERT(0);
+ }
+ return true;
+}
+
+const SkOpPtT* SkOpSegment::existing(double t, const SkOpSegment* opp) const {
+ const SkOpSpanBase* test = &fHead;
+ const SkOpPtT* testPtT;
+ SkPoint pt = this->ptAtT(t);
+ do {
+ testPtT = test->ptT();
+ if (testPtT->fT == t) {
+ break;
+ }
+ if (!this->match(testPtT, this, t, pt)) {
+ if (t < testPtT->fT) {
+ return nullptr;
+ }
+ continue;
+ }
+ if (!opp) {
+ return testPtT;
+ }
+ const SkOpPtT* loop = testPtT->next();
+ while (loop != testPtT) {
+ if (loop->segment() == this && loop->fT == t && loop->fPt == pt) {
+ goto foundMatch;
+ }
+ loop = loop->next();
+ }
+ return nullptr;
+ } while ((test = test->upCast()->next()));
+foundMatch:
+ return opp && !test->contains(opp) ? nullptr : testPtT;
+}
+
+// break the span so that the coincident part does not change the angle of the remainder
+bool SkOpSegment::addExpanded(double newT, const SkOpSpanBase* test, bool* startOver) {
+ if (this->contains(newT)) {
+ return true;
+ }
+ this->globalState()->resetAllocatedOpSpan();
+ SkOpPtT* newPtT = this->addT(newT);
+ *startOver |= this->globalState()->allocatedOpSpan();
+ if (!newPtT) {
+ return false;
+ }
+ newPtT->fPt = this->ptAtT(newT);
+ SkOpPtT* oppPrev = test->ptT()->oppPrev(newPtT);
+ if (oppPrev) {
+ // const cast away to change linked list; pt/t values stays unchanged
+ SkOpSpanBase* writableTest = const_cast<SkOpSpanBase*>(test);
+ writableTest->mergeMatches(newPtT->span());
+ writableTest->ptT()->addOpp(newPtT, oppPrev);
+ writableTest->checkForCollapsedCoincidence();
+ }
+ return true;
+}
+
+// Please keep this in sync with debugAddT()
+SkOpPtT* SkOpSegment::addT(double t) {
+ debugValidate();
+ SkPoint pt = this->ptAtT(t);
+ SkOpSpanBase* spanBase = &fHead;
+ do {
+ SkOpPtT* result = spanBase->ptT();
+ if (t == result->fT || (!zero_or_one(t) && this->match(result, this, t, pt))) {
+ spanBase->bumpSpanAdds();
+ return result;
+ }
+ if (t < result->fT) {
+ SkOpSpan* prev = result->span()->prev();
+ FAIL_WITH_NULL_IF(!prev);
+ // marks in global state that new op span has been allocated
+ SkOpSpan* span = this->insert(prev);
+ span->init(this, prev, t, pt);
+ this->debugValidate();
+#if DEBUG_ADD_T
+ SkDebugf("%s insert t=%1.9g segID=%d spanID=%d\n", __FUNCTION__, t,
+ span->segment()->debugID(), span->debugID());
+#endif
+ span->bumpSpanAdds();
+ return span->ptT();
+ }
+ FAIL_WITH_NULL_IF(spanBase == &fTail);
+ } while ((spanBase = spanBase->upCast()->next()));
+ SkASSERT(0);
+ return nullptr; // we never get here, but need this to satisfy compiler
+}
+
+void SkOpSegment::calcAngles() {
+ bool activePrior = !fHead.isCanceled();
+ if (activePrior && !fHead.simple()) {
+ addStartSpan();
+ }
+ SkOpSpan* prior = &fHead;
+ SkOpSpanBase* spanBase = fHead.next();
+ while (spanBase != &fTail) {
+ if (activePrior) {
+ SkOpAngle* priorAngle = SkOpTAllocator<SkOpAngle>::Allocate(
+ this->globalState()->allocator());
+ priorAngle->set(spanBase, prior);
+ spanBase->setFromAngle(priorAngle);
+ }
+ SkOpSpan* span = spanBase->upCast();
+ bool active = !span->isCanceled();
+ SkOpSpanBase* next = span->next();
+ if (active) {
+ SkOpAngle* angle = SkOpTAllocator<SkOpAngle>::Allocate(
+ this->globalState()->allocator());
+ angle->set(span, next);
+ span->setToAngle(angle);
+ }
+ activePrior = active;
+ prior = span;
+ spanBase = next;
+ }
+ if (activePrior && !fTail.simple()) {
+ addEndSpan();
+ }
+}
+
+// Please keep this in sync with debugClearAll()
+void SkOpSegment::clearAll() {
+ SkOpSpan* span = &fHead;
+ do {
+ this->clearOne(span);
+ } while ((span = span->next()->upCastable()));
+ this->globalState()->coincidence()->release(this);
+}
+
+// Please keep this in sync with debugClearOne()
+void SkOpSegment::clearOne(SkOpSpan* span) {
+ span->setWindValue(0);
+ span->setOppValue(0);
+ this->markDone(span);
+}
+
+bool SkOpSegment::collapsed(double s, double e) const {
+ const SkOpSpanBase* span = &fHead;
+ do {
+ if (span->collapsed(s, e)) {
+ return true;
+ }
+ } while (span->upCastable() && (span = span->upCast()->next()));
+ return false;
+}
+
+void SkOpSegment::ComputeOneSum(const SkOpAngle* baseAngle, SkOpAngle* nextAngle,
+ SkOpAngle::IncludeType includeType) {
+ SkOpSegment* baseSegment = baseAngle->segment();
+ int sumMiWinding = baseSegment->updateWindingReverse(baseAngle);
+ int sumSuWinding;
+ bool binary = includeType >= SkOpAngle::kBinarySingle;
+ if (binary) {
+ sumSuWinding = baseSegment->updateOppWindingReverse(baseAngle);
+ if (baseSegment->operand()) {
+ SkTSwap<int>(sumMiWinding, sumSuWinding);
+ }
+ }
+ SkOpSegment* nextSegment = nextAngle->segment();
+ int maxWinding, sumWinding;
+ SkOpSpanBase* last;
+ if (binary) {
+ int oppMaxWinding, oppSumWinding;
+ nextSegment->setUpWindings(nextAngle->start(), nextAngle->end(), &sumMiWinding,
+ &sumSuWinding, &maxWinding, &sumWinding, &oppMaxWinding, &oppSumWinding);
+ last = nextSegment->markAngle(maxWinding, sumWinding, oppMaxWinding, oppSumWinding,
+ nextAngle);
+ } else {
+ nextSegment->setUpWindings(nextAngle->start(), nextAngle->end(), &sumMiWinding,
+ &maxWinding, &sumWinding);
+ last = nextSegment->markAngle(maxWinding, sumWinding, nextAngle);
+ }
+ nextAngle->setLastMarked(last);
+}
+
+void SkOpSegment::ComputeOneSumReverse(SkOpAngle* baseAngle, SkOpAngle* nextAngle,
+ SkOpAngle::IncludeType includeType) {
+ SkOpSegment* baseSegment = baseAngle->segment();
+ int sumMiWinding = baseSegment->updateWinding(baseAngle);
+ int sumSuWinding;
+ bool binary = includeType >= SkOpAngle::kBinarySingle;
+ if (binary) {
+ sumSuWinding = baseSegment->updateOppWinding(baseAngle);
+ if (baseSegment->operand()) {
+ SkTSwap<int>(sumMiWinding, sumSuWinding);
+ }
+ }
+ SkOpSegment* nextSegment = nextAngle->segment();
+ int maxWinding, sumWinding;
+ SkOpSpanBase* last;
+ if (binary) {
+ int oppMaxWinding, oppSumWinding;
+ nextSegment->setUpWindings(nextAngle->end(), nextAngle->start(), &sumMiWinding,
+ &sumSuWinding, &maxWinding, &sumWinding, &oppMaxWinding, &oppSumWinding);
+ last = nextSegment->markAngle(maxWinding, sumWinding, oppMaxWinding, oppSumWinding,
+ nextAngle);
+ } else {
+ nextSegment->setUpWindings(nextAngle->end(), nextAngle->start(), &sumMiWinding,
+ &maxWinding, &sumWinding);
+ last = nextSegment->markAngle(maxWinding, sumWinding, nextAngle);
+ }
+ nextAngle->setLastMarked(last);
+}
+
+// at this point, the span is already ordered, or unorderable
+int SkOpSegment::computeSum(SkOpSpanBase* start, SkOpSpanBase* end,
+ SkOpAngle::IncludeType includeType) {
+ SkASSERT(includeType != SkOpAngle::kUnaryXor);
+ SkOpAngle* firstAngle = this->spanToAngle(end, start);
+ if (nullptr == firstAngle || nullptr == firstAngle->next()) {
+ return SK_NaN32;
+ }
+ // if all angles have a computed winding,
+ // or if no adjacent angles are orderable,
+ // or if adjacent orderable angles have no computed winding,
+ // there's nothing to do
+ // if two orderable angles are adjacent, and both are next to orderable angles,
+ // and one has winding computed, transfer to the other
+ SkOpAngle* baseAngle = nullptr;
+ bool tryReverse = false;
+ // look for counterclockwise transfers
+ SkOpAngle* angle = firstAngle->previous();
+ SkOpAngle* next = angle->next();
+ firstAngle = next;
+ do {
+ SkOpAngle* prior = angle;
+ angle = next;
+ next = angle->next();
+ SkASSERT(prior->next() == angle);
+ SkASSERT(angle->next() == next);
+ if (prior->unorderable() || angle->unorderable() || next->unorderable()) {
+ baseAngle = nullptr;
+ continue;
+ }
+ int testWinding = angle->starter()->windSum();
+ if (SK_MinS32 != testWinding) {
+ baseAngle = angle;
+ tryReverse = true;
+ continue;
+ }
+ if (baseAngle) {
+ ComputeOneSum(baseAngle, angle, includeType);
+ baseAngle = SK_MinS32 != angle->starter()->windSum() ? angle : nullptr;
+ }
+ } while (next != firstAngle);
+ if (baseAngle && SK_MinS32 == firstAngle->starter()->windSum()) {
+ firstAngle = baseAngle;
+ tryReverse = true;
+ }
+ if (tryReverse) {
+ baseAngle = nullptr;
+ SkOpAngle* prior = firstAngle;
+ do {
+ angle = prior;
+ prior = angle->previous();
+ SkASSERT(prior->next() == angle);
+ next = angle->next();
+ if (prior->unorderable() || angle->unorderable() || next->unorderable()) {
+ baseAngle = nullptr;
+ continue;
+ }
+ int testWinding = angle->starter()->windSum();
+ if (SK_MinS32 != testWinding) {
+ baseAngle = angle;
+ continue;
+ }
+ if (baseAngle) {
+ ComputeOneSumReverse(baseAngle, angle, includeType);
+ baseAngle = SK_MinS32 != angle->starter()->windSum() ? angle : nullptr;
+ }
+ } while (prior != firstAngle);
+ }
+ return start->starter(end)->windSum();
+}
+
+bool SkOpSegment::contains(double newT) const {
+ const SkOpSpanBase* spanBase = &fHead;
+ do {
+ if (spanBase->ptT()->contains(this, newT)) {
+ return true;
+ }
+ if (spanBase == &fTail) {
+ break;
+ }
+ spanBase = spanBase->upCast()->next();
+ } while (true);
+ return false;
+}
+
+void SkOpSegment::release(const SkOpSpan* span) {
+ if (span->done()) {
+ --fDoneCount;
+ }
+ --fCount;
+ SkOPASSERT(fCount >= fDoneCount);
+}
+
+#if DEBUG_ANGLE
+// called only by debugCheckNearCoincidence
+double SkOpSegment::distSq(double t, const SkOpAngle* oppAngle) const {
+ SkDPoint testPt = this->dPtAtT(t);
+ SkDLine testPerp = {{ testPt, testPt }};
+ SkDVector slope = this->dSlopeAtT(t);
+ testPerp[1].fX += slope.fY;
+ testPerp[1].fY -= slope.fX;
+ SkIntersections i;
+ const SkOpSegment* oppSegment = oppAngle->segment();
+ (*CurveIntersectRay[oppSegment->verb()])(oppSegment->pts(), oppSegment->weight(), testPerp, &i);
+ double closestDistSq = SK_ScalarInfinity;
+ for (int index = 0; index < i.used(); ++index) {
+ if (!between(oppAngle->start()->t(), i[0][index], oppAngle->end()->t())) {
+ continue;
+ }
+ double testDistSq = testPt.distanceSquared(i.pt(index));
+ if (closestDistSq > testDistSq) {
+ closestDistSq = testDistSq;
+ }
+ }
+ return closestDistSq;
+}
+#endif
+
+/*
+ The M and S variable name parts stand for the operators.
+ Mi stands for Minuend (see wiki subtraction, analogous to difference)
+ Su stands for Subtrahend
+ The Opp variable name part designates that the value is for the Opposite operator.
+ Opposite values result from combining coincident spans.
+ */
+SkOpSegment* SkOpSegment::findNextOp(SkTDArray<SkOpSpanBase*>* chase, SkOpSpanBase** nextStart,
+ SkOpSpanBase** nextEnd, bool* unsortable, SkPathOp op, int xorMiMask, int xorSuMask) {
+ SkOpSpanBase* start = *nextStart;
+ SkOpSpanBase* end = *nextEnd;
+ SkASSERT(start != end);
+ int step = start->step(end);
+ SkOpSegment* other = this->isSimple(nextStart, &step); // advances nextStart
+ if (other) {
+ // mark the smaller of startIndex, endIndex done, and all adjacent
+ // spans with the same T value (but not 'other' spans)
+#if DEBUG_WINDING
+ SkDebugf("%s simple\n", __FUNCTION__);
+#endif
+ SkOpSpan* startSpan = start->starter(end);
+ if (startSpan->done()) {
+ return nullptr;
+ }
+ markDone(startSpan);
+ *nextEnd = step > 0 ? (*nextStart)->upCast()->next() : (*nextStart)->prev();
+ return other;
+ }
+ SkOpSpanBase* endNear = step > 0 ? (*nextStart)->upCast()->next() : (*nextStart)->prev();
+ SkASSERT(endNear == end); // is this ever not end?
+ SkASSERT(endNear);
+ SkASSERT(start != endNear);
+ SkASSERT((start->t() < endNear->t()) ^ (step < 0));
+ // more than one viable candidate -- measure angles to find best
+ int calcWinding = computeSum(start, endNear, SkOpAngle::kBinaryOpp);
+ bool sortable = calcWinding != SK_NaN32;
+ if (!sortable) {
+ *unsortable = true;
+ markDone(start->starter(end));
+ return nullptr;
+ }
+ SkOpAngle* angle = this->spanToAngle(end, start);
+ if (angle->unorderable()) {
+ *unsortable = true;
+ markDone(start->starter(end));
+ return nullptr;
+ }
+#if DEBUG_SORT
+ SkDebugf("%s\n", __FUNCTION__);
+ angle->debugLoop();
+#endif
+ int sumMiWinding = updateWinding(end, start);
+ if (sumMiWinding == SK_MinS32) {
+ *unsortable = true;
+ markDone(start->starter(end));
+ return nullptr;
+ }
+ int sumSuWinding = updateOppWinding(end, start);
+ if (operand()) {
+ SkTSwap<int>(sumMiWinding, sumSuWinding);
+ }
+ SkOpAngle* nextAngle = angle->next();
+ const SkOpAngle* foundAngle = nullptr;
+ bool foundDone = false;
+ // iterate through the angle, and compute everyone's winding
+ SkOpSegment* nextSegment;
+ int activeCount = 0;
+ do {
+ nextSegment = nextAngle->segment();
+ bool activeAngle = nextSegment->activeOp(xorMiMask, xorSuMask, nextAngle->start(),
+ nextAngle->end(), op, &sumMiWinding, &sumSuWinding);
+ if (activeAngle) {
+ ++activeCount;
+ if (!foundAngle || (foundDone && activeCount & 1)) {
+ foundAngle = nextAngle;
+ foundDone = nextSegment->done(nextAngle);
+ }
+ }
+ if (nextSegment->done()) {
+ continue;
+ }
+ if (!activeAngle) {
+ (void) nextSegment->markAndChaseDone(nextAngle->start(), nextAngle->end());
+ }
+ SkOpSpanBase* last = nextAngle->lastMarked();
+ if (last) {
+ SkASSERT(!SkPathOpsDebug::ChaseContains(*chase, last));
+ *chase->append() = last;
+#if DEBUG_WINDING
+ SkDebugf("%s chase.append segment=%d span=%d", __FUNCTION__,
+ last->segment()->debugID(), last->debugID());
+ if (!last->final()) {
+ SkDebugf(" windSum=%d", last->upCast()->windSum());
+ }
+ SkDebugf("\n");
+#endif
+ }
+ } while ((nextAngle = nextAngle->next()) != angle);
+ start->segment()->markDone(start->starter(end));
+ if (!foundAngle) {
+ return nullptr;
+ }
+ *nextStart = foundAngle->start();
+ *nextEnd = foundAngle->end();
+ nextSegment = foundAngle->segment();
+#if DEBUG_WINDING
+ SkDebugf("%s from:[%d] to:[%d] start=%d end=%d\n",
+ __FUNCTION__, debugID(), nextSegment->debugID(), *nextStart, *nextEnd);
+ #endif
+ return nextSegment;
+}
+
+SkOpSegment* SkOpSegment::findNextWinding(SkTDArray<SkOpSpanBase*>* chase,
+ SkOpSpanBase** nextStart, SkOpSpanBase** nextEnd, bool* unsortable) {
+ SkOpSpanBase* start = *nextStart;
+ SkOpSpanBase* end = *nextEnd;
+ SkASSERT(start != end);
+ int step = start->step(end);
+ SkOpSegment* other = this->isSimple(nextStart, &step); // advances nextStart
+ if (other) {
+ // mark the smaller of startIndex, endIndex done, and all adjacent
+ // spans with the same T value (but not 'other' spans)
+#if DEBUG_WINDING
+ SkDebugf("%s simple\n", __FUNCTION__);
+#endif
+ SkOpSpan* startSpan = start->starter(end);
+ if (startSpan->done()) {
+ return nullptr;
+ }
+ markDone(startSpan);
+ *nextEnd = step > 0 ? (*nextStart)->upCast()->next() : (*nextStart)->prev();
+ return other;
+ }
+ SkOpSpanBase* endNear = step > 0 ? (*nextStart)->upCast()->next() : (*nextStart)->prev();
+ SkASSERT(endNear == end); // is this ever not end?
+ SkASSERT(endNear);
+ SkASSERT(start != endNear);
+ SkASSERT((start->t() < endNear->t()) ^ (step < 0));
+ // more than one viable candidate -- measure angles to find best
+ int calcWinding = computeSum(start, endNear, SkOpAngle::kUnaryWinding);
+ bool sortable = calcWinding != SK_NaN32;
+ if (!sortable) {
+ *unsortable = true;
+ markDone(start->starter(end));
+ return nullptr;
+ }
+ SkOpAngle* angle = this->spanToAngle(end, start);
+ if (angle->unorderable()) {
+ *unsortable = true;
+ markDone(start->starter(end));
+ return nullptr;
+ }
+#if DEBUG_SORT
+ SkDebugf("%s\n", __FUNCTION__);
+ angle->debugLoop();
+#endif
+ int sumWinding = updateWinding(end, start);
+ SkOpAngle* nextAngle = angle->next();
+ const SkOpAngle* foundAngle = nullptr;
+ bool foundDone = false;
+ // iterate through the angle, and compute everyone's winding
+ SkOpSegment* nextSegment;
+ int activeCount = 0;
+ do {
+ nextSegment = nextAngle->segment();
+ bool activeAngle = nextSegment->activeWinding(nextAngle->start(), nextAngle->end(),
+ &sumWinding);
+ if (activeAngle) {
+ ++activeCount;
+ if (!foundAngle || (foundDone && activeCount & 1)) {
+ foundAngle = nextAngle;
+ foundDone = nextSegment->done(nextAngle);
+ }
+ }
+ if (nextSegment->done()) {
+ continue;
+ }
+ if (!activeAngle) {
+ (void) nextSegment->markAndChaseDone(nextAngle->start(), nextAngle->end());
+ }
+ SkOpSpanBase* last = nextAngle->lastMarked();
+ if (last) {
+ SkASSERT(!SkPathOpsDebug::ChaseContains(*chase, last));
+ *chase->append() = last;
+#if DEBUG_WINDING
+ SkDebugf("%s chase.append segment=%d span=%d", __FUNCTION__,
+ last->segment()->debugID(), last->debugID());
+ if (!last->final()) {
+ SkDebugf(" windSum=%d", last->upCast()->windSum());
+ }
+ SkDebugf("\n");
+#endif
+ }
+ } while ((nextAngle = nextAngle->next()) != angle);
+ start->segment()->markDone(start->starter(end));
+ if (!foundAngle) {
+ return nullptr;
+ }
+ *nextStart = foundAngle->start();
+ *nextEnd = foundAngle->end();
+ nextSegment = foundAngle->segment();
+#if DEBUG_WINDING
+ SkDebugf("%s from:[%d] to:[%d] start=%d end=%d\n",
+ __FUNCTION__, debugID(), nextSegment->debugID(), *nextStart, *nextEnd);
+ #endif
+ return nextSegment;
+}
+
+SkOpSegment* SkOpSegment::findNextXor(SkOpSpanBase** nextStart, SkOpSpanBase** nextEnd,
+ bool* unsortable) {
+ SkOpSpanBase* start = *nextStart;
+ SkOpSpanBase* end = *nextEnd;
+ SkASSERT(start != end);
+ int step = start->step(end);
+ SkOpSegment* other = this->isSimple(nextStart, &step); // advances nextStart
+ if (other) {
+ // mark the smaller of startIndex, endIndex done, and all adjacent
+ // spans with the same T value (but not 'other' spans)
+#if DEBUG_WINDING
+ SkDebugf("%s simple\n", __FUNCTION__);
+#endif
+ SkOpSpan* startSpan = start->starter(end);
+ if (startSpan->done()) {
+ return nullptr;
+ }
+ markDone(startSpan);
+ *nextEnd = step > 0 ? (*nextStart)->upCast()->next() : (*nextStart)->prev();
+ return other;
+ }
+ SkDEBUGCODE(SkOpSpanBase* endNear = step > 0 ? (*nextStart)->upCast()->next() \
+ : (*nextStart)->prev());
+ SkASSERT(endNear == end); // is this ever not end?
+ SkASSERT(endNear);
+ SkASSERT(start != endNear);
+ SkASSERT((start->t() < endNear->t()) ^ (step < 0));
+ SkOpAngle* angle = this->spanToAngle(end, start);
+ if (!angle || angle->unorderable()) {
+ *unsortable = true;
+ markDone(start->starter(end));
+ return nullptr;
+ }
+#if DEBUG_SORT
+ SkDebugf("%s\n", __FUNCTION__);
+ angle->debugLoop();
+#endif
+ SkOpAngle* nextAngle = angle->next();
+ const SkOpAngle* foundAngle = nullptr;
+ bool foundDone = false;
+ // iterate through the angle, and compute everyone's winding
+ SkOpSegment* nextSegment;
+ int activeCount = 0;
+ do {
+ nextSegment = nextAngle->segment();
+ ++activeCount;
+ if (!foundAngle || (foundDone && activeCount & 1)) {
+ foundAngle = nextAngle;
+ if (!(foundDone = nextSegment->done(nextAngle))) {
+ break;
+ }
+ }
+ nextAngle = nextAngle->next();
+ } while (nextAngle != angle);
+ start->segment()->markDone(start->starter(end));
+ if (!foundAngle) {
+ return nullptr;
+ }
+ *nextStart = foundAngle->start();
+ *nextEnd = foundAngle->end();
+ nextSegment = foundAngle->segment();
+#if DEBUG_WINDING
+ SkDebugf("%s from:[%d] to:[%d] start=%d end=%d\n",
+ __FUNCTION__, debugID(), nextSegment->debugID(), *nextStart, *nextEnd);
+ #endif
+ return nextSegment;
+}
+
+SkOpGlobalState* SkOpSegment::globalState() const {
+ return contour()->globalState();
+}
+
+void SkOpSegment::init(SkPoint pts[], SkScalar weight, SkOpContour* contour, SkPath::Verb verb) {
+ fContour = contour;
+ fNext = nullptr;
+ fPts = pts;
+ fWeight = weight;
+ fVerb = verb;
+ fCount = 0;
+ fDoneCount = 0;
+ fVisited = false;
+ SkOpSpan* zeroSpan = &fHead;
+ zeroSpan->init(this, nullptr, 0, fPts[0]);
+ SkOpSpanBase* oneSpan = &fTail;
+ zeroSpan->setNext(oneSpan);
+ oneSpan->initBase(this, zeroSpan, 1, fPts[SkPathOpsVerbToPoints(fVerb)]);
+ SkDEBUGCODE(fID = globalState()->nextSegmentID());
+}
+
+bool SkOpSegment::isClose(double t, const SkOpSegment* opp) const {
+ SkDPoint cPt = this->dPtAtT(t);
+ SkDVector dxdy = (*CurveDSlopeAtT[this->verb()])(this->pts(), this->weight(), t);
+ SkDLine perp = {{ cPt, {cPt.fX + dxdy.fY, cPt.fY - dxdy.fX} }};
+ SkIntersections i;
+ (*CurveIntersectRay[opp->verb()])(opp->pts(), opp->weight(), perp, &i);
+ int used = i.used();
+ for (int index = 0; index < used; ++index) {
+ if (cPt.roughlyEqual(i.pt(index))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool SkOpSegment::isXor() const {
+ return fContour->isXor();
+}
+
+void SkOpSegment::markAllDone() {
+ SkOpSpan* span = this->head();
+ do {
+ this->markDone(span);
+ } while ((span = span->next()->upCastable()));
+}
+
+SkOpSpanBase* SkOpSegment::markAndChaseDone(SkOpSpanBase* start, SkOpSpanBase* end) {
+ int step = start->step(end);
+ SkOpSpan* minSpan = start->starter(end);
+ markDone(minSpan);
+ SkOpSpanBase* last = nullptr;
+ SkOpSegment* other = this;
+ while ((other = other->nextChase(&start, &step, &minSpan, &last))) {
+ if (other->done()) {
+ SkASSERT(!last);
+ break;
+ }
+ other->markDone(minSpan);
+ }
+ return last;
+}
+
+bool SkOpSegment::markAndChaseWinding(SkOpSpanBase* start, SkOpSpanBase* end, int winding,
+ SkOpSpanBase** lastPtr) {
+ SkOpSpan* spanStart = start->starter(end);
+ int step = start->step(end);
+ bool success = markWinding(spanStart, winding);
+ SkOpSpanBase* last = nullptr;
+ SkOpSegment* other = this;
+ while ((other = other->nextChase(&start, &step, &spanStart, &last))) {
+ if (spanStart->windSum() != SK_MinS32) {
+ SkASSERT(spanStart->windSum() == winding);
+ SkASSERT(!last);
+ break;
+ }
+ (void) other->markWinding(spanStart, winding);
+ }
+ if (lastPtr) {
+ *lastPtr = last;
+ }
+ return success;
+}
+
+bool SkOpSegment::markAndChaseWinding(SkOpSpanBase* start, SkOpSpanBase* end,
+ int winding, int oppWinding, SkOpSpanBase** lastPtr) {
+ SkOpSpan* spanStart = start->starter(end);
+ int step = start->step(end);
+ bool success = markWinding(spanStart, winding, oppWinding);
+ SkOpSpanBase* last = nullptr;
+ SkOpSegment* other = this;
+ while ((other = other->nextChase(&start, &step, &spanStart, &last))) {
+ if (spanStart->windSum() != SK_MinS32) {
+ if (this->operand() == other->operand()) {
+ if (spanStart->windSum() != winding || spanStart->oppSum() != oppWinding) {
+ this->globalState()->setWindingFailed();
+ return false;
+ }
+ } else {
+ SkASSERT(spanStart->windSum() == oppWinding);
+ SkASSERT(spanStart->oppSum() == winding);
+ }
+ SkASSERT(!last);
+ break;
+ }
+ if (this->operand() == other->operand()) {
+ (void) other->markWinding(spanStart, winding, oppWinding);
+ } else {
+ (void) other->markWinding(spanStart, oppWinding, winding);
+ }
+ }
+ if (lastPtr) {
+ *lastPtr = last;
+ }
+ return success;
+}
+
+SkOpSpanBase* SkOpSegment::markAngle(int maxWinding, int sumWinding, const SkOpAngle* angle) {
+ SkASSERT(angle->segment() == this);
+ if (UseInnerWinding(maxWinding, sumWinding)) {
+ maxWinding = sumWinding;
+ }
+ SkOpSpanBase* last;
+ (void) markAndChaseWinding(angle->start(), angle->end(), maxWinding, &last);
+#if DEBUG_WINDING
+ if (last) {
+ SkDebugf("%s last seg=%d span=%d", __FUNCTION__,
+ last->segment()->debugID(), last->debugID());
+ if (!last->final()) {
+ SkDebugf(" windSum=");
+ SkPathOpsDebug::WindingPrintf(last->upCast()->windSum());
+ }
+ SkDebugf("\n");
+ }
+#endif
+ return last;
+}
+
+SkOpSpanBase* SkOpSegment::markAngle(int maxWinding, int sumWinding, int oppMaxWinding,
+ int oppSumWinding, const SkOpAngle* angle) {
+ SkASSERT(angle->segment() == this);
+ if (UseInnerWinding(maxWinding, sumWinding)) {
+ maxWinding = sumWinding;
+ }
+ if (oppMaxWinding != oppSumWinding && UseInnerWinding(oppMaxWinding, oppSumWinding)) {
+ oppMaxWinding = oppSumWinding;
+ }
+ SkOpSpanBase* last = nullptr;
+ // caller doesn't require that this marks anything
+ (void) markAndChaseWinding(angle->start(), angle->end(), maxWinding, oppMaxWinding, &last);
+#if DEBUG_WINDING
+ if (last) {
+ SkDebugf("%s last segment=%d span=%d", __FUNCTION__,
+ last->segment()->debugID(), last->debugID());
+ if (!last->final()) {
+ SkDebugf(" windSum=");
+ SkPathOpsDebug::WindingPrintf(last->upCast()->windSum());
+ }
+ SkDebugf(" \n");
+ }
+#endif
+ return last;
+}
+
+void SkOpSegment::markDone(SkOpSpan* span) {
+ SkASSERT(this == span->segment());
+ if (span->done()) {
+ return;
+ }
+#if DEBUG_MARK_DONE
+ debugShowNewWinding(__FUNCTION__, span, span->windSum(), span->oppSum());
+#endif
+ span->setDone(true);
+ ++fDoneCount;
+ debugValidate();
+}
+
+bool SkOpSegment::markWinding(SkOpSpan* span, int winding) {
+ SkASSERT(this == span->segment());
+ SkASSERT(winding);
+ if (span->done()) {
+ return false;
+ }
+#if DEBUG_MARK_DONE
+ debugShowNewWinding(__FUNCTION__, span, winding);
+#endif
+ span->setWindSum(winding);
+ debugValidate();
+ return true;
+}
+
+bool SkOpSegment::markWinding(SkOpSpan* span, int winding, int oppWinding) {
+ SkASSERT(this == span->segment());
+ SkASSERT(winding || oppWinding);
+ if (span->done()) {
+ return false;
+ }
+#if DEBUG_MARK_DONE
+ debugShowNewWinding(__FUNCTION__, span, winding, oppWinding);
+#endif
+ span->setWindSum(winding);
+ span->setOppSum(oppWinding);
+ debugValidate();
+ return true;
+}
+
+bool SkOpSegment::match(const SkOpPtT* base, const SkOpSegment* testParent, double testT,
+ const SkPoint& testPt) const {
+ SkASSERT(this == base->segment());
+ if (this == testParent) {
+ if (precisely_equal(base->fT, testT)) {
+ return true;
+ }
+ }
+ if (!SkDPoint::ApproximatelyEqual(testPt, base->fPt)) {
+ return false;
+ }
+ return this != testParent || !this->ptsDisjoint(base->fT, base->fPt, testT, testPt);
+}
+
+static SkOpSegment* set_last(SkOpSpanBase** last, SkOpSpanBase* endSpan) {
+ if (last) {
+ *last = endSpan;
+ }
+ return nullptr;
+}
+
+SkOpSegment* SkOpSegment::nextChase(SkOpSpanBase** startPtr, int* stepPtr, SkOpSpan** minPtr,
+ SkOpSpanBase** last) const {
+ SkOpSpanBase* origStart = *startPtr;
+ int step = *stepPtr;
+ SkOpSpanBase* endSpan = step > 0 ? origStart->upCast()->next() : origStart->prev();
+ SkASSERT(endSpan);
+ SkOpAngle* angle = step > 0 ? endSpan->fromAngle() : endSpan->upCast()->toAngle();
+ SkOpSpanBase* foundSpan;
+ SkOpSpanBase* otherEnd;
+ SkOpSegment* other;
+ if (angle == nullptr) {
+ if (endSpan->t() != 0 && endSpan->t() != 1) {
+ return nullptr;
+ }
+ SkOpPtT* otherPtT = endSpan->ptT()->next();
+ other = otherPtT->segment();
+ foundSpan = otherPtT->span();
+ otherEnd = step > 0
+ ? foundSpan->upCastable() ? foundSpan->upCast()->next() : nullptr
+ : foundSpan->prev();
+ } else {
+ int loopCount = angle->loopCount();
+ if (loopCount > 2) {
+ return set_last(last, endSpan);
+ }
+ const SkOpAngle* next = angle->next();
+ if (nullptr == next) {
+ return nullptr;
+ }
+#if DEBUG_WINDING
+ if (angle->debugSign() != next->debugSign() && !angle->segment()->contour()->isXor()
+ && !next->segment()->contour()->isXor()) {
+ SkDebugf("%s mismatched signs\n", __FUNCTION__);
+ }
+#endif
+ other = next->segment();
+ foundSpan = endSpan = next->start();
+ otherEnd = next->end();
+ }
+ if (!otherEnd) {
+ return nullptr;
+ }
+ int foundStep = foundSpan->step(otherEnd);
+ if (*stepPtr != foundStep) {
+ return set_last(last, endSpan);
+ }
+ SkASSERT(*startPtr);
+ if (!otherEnd) {
+ return nullptr;
+ }
+// SkASSERT(otherEnd >= 0);
+ SkOpSpan* origMin = step < 0 ? origStart->prev() : origStart->upCast();
+ SkOpSpan* foundMin = foundSpan->starter(otherEnd);
+ if (foundMin->windValue() != origMin->windValue()
+ || foundMin->oppValue() != origMin->oppValue()) {
+ return set_last(last, endSpan);
+ }
+ *startPtr = foundSpan;
+ *stepPtr = foundStep;
+ if (minPtr) {
+ *minPtr = foundMin;
+ }
+ return other;
+}
+
+// Please keep this in sync with DebugClearVisited()
+void SkOpSegment::ClearVisited(SkOpSpanBase* span) {
+ // reset visited flag back to false
+ do {
+ SkOpPtT* ptT = span->ptT(), * stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ SkOpSegment* opp = ptT->segment();
+ opp->resetVisited();
+ }
+ } while (!span->final() && (span = span->upCast()->next()));
+}
+
+// Please keep this in sync with debugMissingCoincidence()
+// look for pairs of undetected coincident curves
+// assumes that segments going in have visited flag clear
+// Even though pairs of curves correct detect coincident runs, a run may be missed
+// if the coincidence is a product of multiple intersections. For instance, given
+// curves A, B, and C:
+// A-B intersect at a point 1; A-C and B-C intersect at point 2, so near
+// the end of C that the intersection is replaced with the end of C.
+// Even though A-B correctly do not detect an intersection at point 2,
+// the resulting run from point 1 to point 2 is coincident on A and B.
+bool SkOpSegment::missingCoincidence() {
+ if (this->done()) {
+ return false;
+ }
+ SkOpSpan* prior = nullptr;
+ SkOpSpanBase* spanBase = &fHead;
+ bool result = false;
+ do {
+ SkOpPtT* ptT = spanBase->ptT(), * spanStopPtT = ptT;
+ SkOPASSERT(ptT->span() == spanBase);
+ while ((ptT = ptT->next()) != spanStopPtT) {
+ if (ptT->deleted()) {
+ continue;
+ }
+ SkOpSegment* opp = ptT->span()->segment();
+ if (opp->done()) {
+ continue;
+ }
+ // when opp is encounted the 1st time, continue; on 2nd encounter, look for coincidence
+ if (!opp->visited()) {
+ continue;
+ }
+ if (spanBase == &fHead) {
+ continue;
+ }
+ if (ptT->segment() == this) {
+ continue;
+ }
+ SkOpSpan* span = spanBase->upCastable();
+ // FIXME?: this assumes that if the opposite segment is coincident then no more
+ // coincidence needs to be detected. This may not be true.
+ if (span && span->containsCoincidence(opp)) {
+ continue;
+ }
+ if (spanBase->containsCoinEnd(opp)) {
+ continue;
+ }
+ SkOpPtT* priorPtT = nullptr, * priorStopPtT;
+ // find prior span containing opp segment
+ SkOpSegment* priorOpp = nullptr;
+ SkOpSpan* priorTest = spanBase->prev();
+ while (!priorOpp && priorTest) {
+ priorStopPtT = priorPtT = priorTest->ptT();
+ while ((priorPtT = priorPtT->next()) != priorStopPtT) {
+ if (priorPtT->deleted()) {
+ continue;
+ }
+ SkOpSegment* segment = priorPtT->span()->segment();
+ if (segment == opp) {
+ prior = priorTest;
+ priorOpp = opp;
+ break;
+ }
+ }
+ priorTest = priorTest->prev();
+ }
+ if (!priorOpp) {
+ continue;
+ }
+ if (priorPtT == ptT) {
+ continue;
+ }
+ SkOpPtT* oppStart = prior->ptT();
+ SkOpPtT* oppEnd = spanBase->ptT();
+ bool swapped = priorPtT->fT > ptT->fT;
+ if (swapped) {
+ SkTSwap(priorPtT, ptT);
+ SkTSwap(oppStart, oppEnd);
+ }
+ SkOpCoincidence* coincidences = this->globalState()->coincidence();
+ SkOpPtT* rootPriorPtT = priorPtT->span()->ptT();
+ SkOpPtT* rootPtT = ptT->span()->ptT();
+ SkOpPtT* rootOppStart = oppStart->span()->ptT();
+ SkOpPtT* rootOppEnd = oppEnd->span()->ptT();
+ if (coincidences->contains(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd)) {
+ goto swapBack;
+ }
+ if (this->testForCoincidence(rootPriorPtT, rootPtT, prior, spanBase, opp)) {
+ // mark coincidence
+#if DEBUG_COINCIDENCE_VERBOSE
+ SkDebugf("%s coinSpan=%d endSpan=%d oppSpan=%d oppEndSpan=%d\n", __FUNCTION__,
+ rootPriorPtT->debugID(), rootPtT->debugID(), rootOppStart->debugID(),
+ rootOppEnd->debugID());
+#endif
+ if (!coincidences->extend(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd)) {
+ coincidences->add(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd);
+ }
+#if DEBUG_COINCIDENCE
+ SkASSERT(coincidences->contains(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd));
+#endif
+ result = true;
+ }
+ swapBack:
+ if (swapped) {
+ SkTSwap(priorPtT, ptT);
+ }
+ }
+ } while ((spanBase = spanBase->final() ? nullptr : spanBase->upCast()->next()));
+ ClearVisited(&fHead);
+ return result;
+}
+
+// please keep this in sync with debugMoveMultiples()
+// if a span has more than one intersection, merge the other segments' span as needed
+bool SkOpSegment::moveMultiples() {
+ debugValidate();
+ SkOpSpanBase* test = &fHead;
+ do {
+ int addCount = test->spanAddsCount();
+ FAIL_IF(addCount < 1);
+ if (addCount == 1) {
+ continue;
+ }
+ SkOpPtT* startPtT = test->ptT();
+ SkOpPtT* testPtT = startPtT;
+ do { // iterate through all spans associated with start
+ SkOpSpanBase* oppSpan = testPtT->span();
+ if (oppSpan->spanAddsCount() == addCount) {
+ continue;
+ }
+ if (oppSpan->deleted()) {
+ continue;
+ }
+ SkOpSegment* oppSegment = oppSpan->segment();
+ if (oppSegment == this) {
+ continue;
+ }
+ // find range of spans to consider merging
+ SkOpSpanBase* oppPrev = oppSpan;
+ SkOpSpanBase* oppFirst = oppSpan;
+ while ((oppPrev = oppPrev->prev())) {
+ if (!roughly_equal(oppPrev->t(), oppSpan->t())) {
+ break;
+ }
+ if (oppPrev->spanAddsCount() == addCount) {
+ continue;
+ }
+ if (oppPrev->deleted()) {
+ continue;
+ }
+ oppFirst = oppPrev;
+ }
+ SkOpSpanBase* oppNext = oppSpan;
+ SkOpSpanBase* oppLast = oppSpan;
+ while ((oppNext = oppNext->final() ? nullptr : oppNext->upCast()->next())) {
+ if (!roughly_equal(oppNext->t(), oppSpan->t())) {
+ break;
+ }
+ if (oppNext->spanAddsCount() == addCount) {
+ continue;
+ }
+ if (oppNext->deleted()) {
+ continue;
+ }
+ oppLast = oppNext;
+ }
+ if (oppFirst == oppLast) {
+ continue;
+ }
+ SkOpSpanBase* oppTest = oppFirst;
+ do {
+ if (oppTest == oppSpan) {
+ continue;
+ }
+ // check to see if the candidate meets specific criteria:
+ // it contains spans of segments in test's loop but not including 'this'
+ SkOpPtT* oppStartPtT = oppTest->ptT();
+ SkOpPtT* oppPtT = oppStartPtT;
+ while ((oppPtT = oppPtT->next()) != oppStartPtT) {
+ SkOpSegment* oppPtTSegment = oppPtT->segment();
+ if (oppPtTSegment == this) {
+ goto tryNextSpan;
+ }
+ SkOpPtT* matchPtT = startPtT;
+ do {
+ if (matchPtT->segment() == oppPtTSegment) {
+ goto foundMatch;
+ }
+ } while ((matchPtT = matchPtT->next()) != startPtT);
+ goto tryNextSpan;
+ foundMatch: // merge oppTest and oppSpan
+ oppSegment->debugValidate();
+ oppTest->mergeMatches(oppSpan);
+ oppTest->addOpp(oppSpan);
+ oppSegment->debugValidate();
+ goto checkNextSpan;
+ }
+ tryNextSpan:
+ ;
+ } while (oppTest != oppLast && (oppTest = oppTest->upCast()->next()));
+ } while ((testPtT = testPtT->next()) != startPtT);
+checkNextSpan:
+ ;
+ } while ((test = test->final() ? nullptr : test->upCast()->next()));
+ debugValidate();
+ return true;
+}
+
+// adjacent spans may have points close by
+bool SkOpSegment::spansNearby(const SkOpSpanBase* refSpan, const SkOpSpanBase* checkSpan) const {
+ const SkOpPtT* refHead = refSpan->ptT();
+ const SkOpPtT* checkHead = checkSpan->ptT();
+// if the first pt pair from adjacent spans are far apart, assume that all are far enough apart
+ if (!SkDPoint::WayRoughlyEqual(refHead->fPt, checkHead->fPt)) {
+#if DEBUG_COINCIDENCE
+ // verify that no combination of points are close
+ const SkOpPtT* dBugRef = refHead;
+ do {
+ const SkOpPtT* dBugCheck = checkHead;
+ do {
+ SkOPASSERT(!SkDPoint::ApproximatelyEqual(dBugRef->fPt, dBugCheck->fPt));
+ dBugCheck = dBugCheck->next();
+ } while (dBugCheck != checkHead);
+ dBugRef = dBugRef->next();
+ } while (dBugRef != refHead);
+#endif
+ return false;
+ }
+ // check only unique points
+ SkScalar distSqBest = SK_ScalarMax;
+ const SkOpPtT* refBest = nullptr;
+ const SkOpPtT* checkBest = nullptr;
+ const SkOpPtT* ref = refHead;
+ do {
+ if (ref->deleted()) {
+ continue;
+ }
+ while (ref->ptAlreadySeen(refHead)) {
+ ref = ref->next();
+ if (ref == refHead) {
+ goto doneCheckingDistance;
+ }
+ }
+ const SkOpPtT* check = checkHead;
+ const SkOpSegment* refSeg = ref->segment();
+ do {
+ if (check->deleted()) {
+ continue;
+ }
+ while (check->ptAlreadySeen(checkHead)) {
+ check = check->next();
+ if (check == checkHead) {
+ goto nextRef;
+ }
+ }
+ SkScalar distSq = ref->fPt.distanceToSqd(check->fPt);
+ if (distSqBest > distSq && (refSeg != check->segment()
+ || !refSeg->ptsDisjoint(*ref, *check))) {
+ distSqBest = distSq;
+ refBest = ref;
+ checkBest = check;
+ }
+ } while ((check = check->next()) != checkHead);
+nextRef:
+ ;
+ } while ((ref = ref->next()) != refHead);
+doneCheckingDistance:
+ return checkBest && refBest->segment()->match(refBest, checkBest->segment(), checkBest->fT,
+ checkBest->fPt);
+}
+
+// Please keep this function in sync with debugMoveNearby()
+// Move nearby t values and pts so they all hang off the same span. Alignment happens later.
+void SkOpSegment::moveNearby() {
+ debugValidate();
+ // release undeleted spans pointing to this seg that are linked to the primary span
+ SkOpSpanBase* spanBase = &fHead;
+ do {
+ SkOpPtT* ptT = spanBase->ptT();
+ const SkOpPtT* headPtT = ptT;
+ while ((ptT = ptT->next()) != headPtT) {
+ SkOpSpanBase* test = ptT->span();
+ if (ptT->segment() == this && !ptT->deleted() && test != spanBase
+ && test->ptT() == ptT) {
+ if (test->final()) {
+ if (spanBase == &fHead) {
+ this->clearAll();
+ return;
+ }
+ spanBase->upCast()->release(ptT);
+ } else if (test->prev()) {
+ test->upCast()->release(headPtT);
+ }
+ break;
+ }
+ }
+ spanBase = spanBase->upCast()->next();
+ } while (!spanBase->final());
+
+ // This loop looks for adjacent spans which are near by
+ spanBase = &fHead;
+ do { // iterate through all spans associated with start
+ SkOpSpanBase* test = spanBase->upCast()->next();
+ if (this->spansNearby(spanBase, test)) {
+ if (test->final()) {
+ if (spanBase->prev()) {
+ test->merge(spanBase->upCast());
+ } else {
+ this->clearAll();
+ return;
+ }
+ } else {
+ spanBase->merge(test->upCast());
+ }
+ }
+ spanBase = test;
+ } while (!spanBase->final());
+ debugValidate();
+}
+
+bool SkOpSegment::operand() const {
+ return fContour->operand();
+}
+
+bool SkOpSegment::oppXor() const {
+ return fContour->oppXor();
+}
+
+bool SkOpSegment::ptsDisjoint(double t1, const SkPoint& pt1, double t2, const SkPoint& pt2) const {
+ if (fVerb == SkPath::kLine_Verb) {
+ return false;
+ }
+ // quads (and cubics) can loop back to nearly a line so that an opposite curve
+ // hits in two places with very different t values.
+ // OPTIMIZATION: curves could be preflighted so that, for example, something like
+ // 'controls contained by ends' could avoid this check for common curves
+ // 'ends are extremes in x or y' is cheaper to compute and real-world common
+ // on the other hand, the below check is relatively inexpensive
+ double midT = (t1 + t2) / 2;
+ SkPoint midPt = this->ptAtT(midT);
+ double seDistSq = SkTMax(pt1.distanceToSqd(pt2) * 2, FLT_EPSILON * 2);
+ return midPt.distanceToSqd(pt1) > seDistSq || midPt.distanceToSqd(pt2) > seDistSq;
+}
+
+void SkOpSegment::setUpWindings(SkOpSpanBase* start, SkOpSpanBase* end, int* sumMiWinding,
+ int* maxWinding, int* sumWinding) {
+ int deltaSum = SpanSign(start, end);
+ *maxWinding = *sumMiWinding;
+ *sumWinding = *sumMiWinding -= deltaSum;
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM || SkTAbs(*sumWinding) <= DEBUG_LIMIT_WIND_SUM);
+}
+
+void SkOpSegment::setUpWindings(SkOpSpanBase* start, SkOpSpanBase* end, int* sumMiWinding,
+ int* sumSuWinding, int* maxWinding, int* sumWinding, int* oppMaxWinding,
+ int* oppSumWinding) {
+ int deltaSum = SpanSign(start, end);
+ int oppDeltaSum = OppSign(start, end);
+ if (operand()) {
+ *maxWinding = *sumSuWinding;
+ *sumWinding = *sumSuWinding -= deltaSum;
+ *oppMaxWinding = *sumMiWinding;
+ *oppSumWinding = *sumMiWinding -= oppDeltaSum;
+ } else {
+ *maxWinding = *sumMiWinding;
+ *sumWinding = *sumMiWinding -= deltaSum;
+ *oppMaxWinding = *sumSuWinding;
+ *oppSumWinding = *sumSuWinding -= oppDeltaSum;
+ }
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM || SkTAbs(*sumWinding) <= DEBUG_LIMIT_WIND_SUM);
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM || SkTAbs(*oppSumWinding) <= DEBUG_LIMIT_WIND_SUM);
+}
+
+void SkOpSegment::sortAngles() {
+ SkOpSpanBase* span = &this->fHead;
+ do {
+ SkOpAngle* fromAngle = span->fromAngle();
+ SkOpAngle* toAngle = span->final() ? nullptr : span->upCast()->toAngle();
+ if (!fromAngle && !toAngle) {
+ continue;
+ }
+#if DEBUG_ANGLE
+ bool wroteAfterHeader = false;
+#endif
+ SkOpAngle* baseAngle = fromAngle;
+ if (fromAngle && toAngle) {
+#if DEBUG_ANGLE
+ SkDebugf("%s [%d] tStart=%1.9g [%d]\n", __FUNCTION__, debugID(), span->t(),
+ span->debugID());
+ wroteAfterHeader = true;
+#endif
+ fromAngle->insert(toAngle);
+ } else if (!fromAngle) {
+ baseAngle = toAngle;
+ }
+ SkOpPtT* ptT = span->ptT(), * stopPtT = ptT;
+ do {
+ SkOpSpanBase* oSpan = ptT->span();
+ if (oSpan == span) {
+ continue;
+ }
+ SkOpAngle* oAngle = oSpan->fromAngle();
+ if (oAngle) {
+#if DEBUG_ANGLE
+ if (!wroteAfterHeader) {
+ SkDebugf("%s [%d] tStart=%1.9g [%d]\n", __FUNCTION__, debugID(),
+ span->t(), span->debugID());
+ wroteAfterHeader = true;
+ }
+#endif
+ if (!oAngle->loopContains(baseAngle)) {
+ baseAngle->insert(oAngle);
+ }
+ }
+ if (!oSpan->final()) {
+ oAngle = oSpan->upCast()->toAngle();
+ if (oAngle) {
+#if DEBUG_ANGLE
+ if (!wroteAfterHeader) {
+ SkDebugf("%s [%d] tStart=%1.9g [%d]\n", __FUNCTION__, debugID(),
+ span->t(), span->debugID());
+ wroteAfterHeader = true;
+ }
+#endif
+ if (!oAngle->loopContains(baseAngle)) {
+ baseAngle->insert(oAngle);
+ }
+ }
+ }
+ } while ((ptT = ptT->next()) != stopPtT);
+ if (baseAngle->loopCount() == 1) {
+ span->setFromAngle(nullptr);
+ if (toAngle) {
+ span->upCast()->setToAngle(nullptr);
+ }
+ baseAngle = nullptr;
+ }
+#if DEBUG_SORT
+ SkASSERT(!baseAngle || baseAngle->loopCount() > 1);
+#endif
+ } while (!span->final() && (span = span->upCast()->next()));
+}
+
+bool SkOpSegment::subDivide(const SkOpSpanBase* start, const SkOpSpanBase* end,
+ SkDCurve* edge) const {
+ SkASSERT(start != end);
+ const SkOpPtT& startPtT = *start->ptT();
+ const SkOpPtT& endPtT = *end->ptT();
+ SkDEBUGCODE(edge->fVerb = fVerb);
+ edge->fCubic[0].set(startPtT.fPt);
+ int points = SkPathOpsVerbToPoints(fVerb);
+ edge->fCubic[points].set(endPtT.fPt);
+ if (fVerb == SkPath::kLine_Verb) {
+ return false;
+ }
+ double startT = startPtT.fT;
+ double endT = endPtT.fT;
+ if ((startT == 0 || endT == 0) && (startT == 1 || endT == 1)) {
+ // don't compute midpoints if we already have them
+ if (fVerb == SkPath::kQuad_Verb) {
+ edge->fLine[1].set(fPts[1]);
+ return false;
+ }
+ if (fVerb == SkPath::kConic_Verb) {
+ edge->fConic[1].set(fPts[1]);
+ edge->fConic.fWeight = fWeight;
+ return false;
+ }
+ SkASSERT(fVerb == SkPath::kCubic_Verb);
+ if (startT == 0) {
+ edge->fCubic[1].set(fPts[1]);
+ edge->fCubic[2].set(fPts[2]);
+ return false;
+ }
+ edge->fCubic[1].set(fPts[2]);
+ edge->fCubic[2].set(fPts[1]);
+ return false;
+ }
+ if (fVerb == SkPath::kQuad_Verb) {
+ edge->fQuad[1] = SkDQuad::SubDivide(fPts, edge->fQuad[0], edge->fQuad[2], startT, endT);
+ } else if (fVerb == SkPath::kConic_Verb) {
+ edge->fConic[1] = SkDConic::SubDivide(fPts, fWeight, edge->fQuad[0], edge->fQuad[2],
+ startT, endT, &edge->fConic.fWeight);
+ } else {
+ SkASSERT(fVerb == SkPath::kCubic_Verb);
+ SkDCubic::SubDivide(fPts, edge->fCubic[0], edge->fCubic[3], startT, endT, &edge->fCubic[1]);
+ }
+ return true;
+}
+
+bool SkOpSegment::testForCoincidence(const SkOpPtT* priorPtT, const SkOpPtT* ptT,
+ const SkOpSpanBase* prior, const SkOpSpanBase* spanBase, const SkOpSegment* opp) const {
+ // average t, find mid pt
+ double midT = (prior->t() + spanBase->t()) / 2;
+ SkPoint midPt = this->ptAtT(midT);
+ bool coincident = true;
+ // if the mid pt is not near either end pt, project perpendicular through opp seg
+ if (!SkDPoint::ApproximatelyEqual(priorPtT->fPt, midPt)
+ && !SkDPoint::ApproximatelyEqual(ptT->fPt, midPt)) {
+ if (priorPtT->span() == ptT->span()) {
+ return false;
+ }
+ coincident = false;
+ SkIntersections i;
+ SkDCurve curvePart;
+ this->subDivide(prior, spanBase, &curvePart);
+ SkDVector dxdy = (*CurveDDSlopeAtT[fVerb])(curvePart, 0.5f);
+ SkDPoint partMidPt = (*CurveDDPointAtT[fVerb])(curvePart, 0.5f);
+ SkDLine ray = {{{midPt.fX, midPt.fY}, {partMidPt.fX + dxdy.fY, partMidPt.fY - dxdy.fX}}};
+ SkDCurve oppPart;
+ opp->subDivide(priorPtT->span(), ptT->span(), &oppPart);
+ (*CurveDIntersectRay[opp->verb()])(oppPart, ray, &i);
+ // measure distance and see if it's small enough to denote coincidence
+ for (int index = 0; index < i.used(); ++index) {
+ if (!between(0, i[0][index], 1)) {
+ continue;
+ }
+ SkDPoint oppPt = i.pt(index);
+ if (oppPt.approximatelyDEqual(midPt)) {
+ // the coincidence can occur at almost any angle
+ coincident = true;
+ }
+ }
+ }
+ return coincident;
+}
+
+void SkOpSegment::undoneSpan(SkOpSpanBase** start, SkOpSpanBase** end) {
+ SkOpSpan* span = this->head();
+ do {
+ if (!span->done()) {
+ break;
+ }
+ } while ((span = span->next()->upCastable()));
+ SkASSERT(span);
+ *start = span;
+ *end = span->next();
+}
+
+int SkOpSegment::updateOppWinding(const SkOpSpanBase* start, const SkOpSpanBase* end) const {
+ const SkOpSpan* lesser = start->starter(end);
+ int oppWinding = lesser->oppSum();
+ int oppSpanWinding = SkOpSegment::OppSign(start, end);
+ if (oppSpanWinding && UseInnerWinding(oppWinding - oppSpanWinding, oppWinding)
+ && oppWinding != SK_MaxS32) {
+ oppWinding -= oppSpanWinding;
+ }
+ return oppWinding;
+}
+
+int SkOpSegment::updateOppWinding(const SkOpAngle* angle) const {
+ const SkOpSpanBase* startSpan = angle->start();
+ const SkOpSpanBase* endSpan = angle->end();
+ return updateOppWinding(endSpan, startSpan);
+}
+
+int SkOpSegment::updateOppWindingReverse(const SkOpAngle* angle) const {
+ const SkOpSpanBase* startSpan = angle->start();
+ const SkOpSpanBase* endSpan = angle->end();
+ return updateOppWinding(startSpan, endSpan);
+}
+
+int SkOpSegment::updateWinding(SkOpSpanBase* start, SkOpSpanBase* end) {
+ SkOpSpan* lesser = start->starter(end);
+ int winding = lesser->windSum();
+ if (winding == SK_MinS32) {
+ winding = lesser->computeWindSum();
+ }
+ if (winding == SK_MinS32) {
+ return winding;
+ }
+ int spanWinding = SkOpSegment::SpanSign(start, end);
+ if (winding && UseInnerWinding(winding - spanWinding, winding)
+ && winding != SK_MaxS32) {
+ winding -= spanWinding;
+ }
+ return winding;
+}
+
+int SkOpSegment::updateWinding(SkOpAngle* angle) {
+ SkOpSpanBase* startSpan = angle->start();
+ SkOpSpanBase* endSpan = angle->end();
+ return updateWinding(endSpan, startSpan);
+}
+
+int SkOpSegment::updateWindingReverse(const SkOpAngle* angle) {
+ SkOpSpanBase* startSpan = angle->start();
+ SkOpSpanBase* endSpan = angle->end();
+ return updateWinding(startSpan, endSpan);
+}
+
+// OPTIMIZATION: does the following also work, and is it any faster?
+// return outerWinding * innerWinding > 0
+// || ((outerWinding + innerWinding < 0) ^ ((outerWinding - innerWinding) < 0)))
+bool SkOpSegment::UseInnerWinding(int outerWinding, int innerWinding) {
+ SkASSERT(outerWinding != SK_MaxS32);
+ SkASSERT(innerWinding != SK_MaxS32);
+ int absOut = SkTAbs(outerWinding);
+ int absIn = SkTAbs(innerWinding);
+ bool result = absOut == absIn ? outerWinding < 0 : absOut < absIn;
+ return result;
+}
+
+int SkOpSegment::windSum(const SkOpAngle* angle) const {
+ const SkOpSpan* minSpan = angle->start()->starter(angle->end());
+ return minSpan->windSum();
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpSegment.h b/gfx/skia/skia/src/pathops/SkOpSegment.h
new file mode 100644
index 000000000..b6e771401
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpSegment.h
@@ -0,0 +1,458 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOpSegment_DEFINE
+#define SkOpSegment_DEFINE
+
+#include "SkOpAngle.h"
+#include "SkOpSpan.h"
+#include "SkOpTAllocator.h"
+#include "SkPathOpsBounds.h"
+#include "SkPathOpsCubic.h"
+#include "SkPathOpsCurve.h"
+
+struct SkDCurve;
+class SkOpCoincidence;
+class SkOpContour;
+enum class SkOpRayDir;
+struct SkOpRayHit;
+class SkPathWriter;
+
+class SkOpSegment {
+public:
+ bool operator<(const SkOpSegment& rh) const {
+ return fBounds.fTop < rh.fBounds.fTop;
+ }
+
+ SkOpAngle* activeAngle(SkOpSpanBase* start, SkOpSpanBase** startPtr, SkOpSpanBase** endPtr,
+ bool* done);
+ SkOpAngle* activeAngleInner(SkOpSpanBase* start, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr, bool* done);
+ SkOpAngle* activeAngleOther(SkOpSpanBase* start, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr, bool* done);
+ bool activeOp(SkOpSpanBase* start, SkOpSpanBase* end, int xorMiMask, int xorSuMask,
+ SkPathOp op);
+ bool activeOp(int xorMiMask, int xorSuMask, SkOpSpanBase* start, SkOpSpanBase* end, SkPathOp op,
+ int* sumMiWinding, int* sumSuWinding);
+
+ bool activeWinding(SkOpSpanBase* start, SkOpSpanBase* end);
+ bool activeWinding(SkOpSpanBase* start, SkOpSpanBase* end, int* sumWinding);
+
+ SkOpSegment* addConic(SkPoint pts[3], SkScalar weight, SkOpContour* parent) {
+ init(pts, weight, parent, SkPath::kConic_Verb);
+ SkDCurve curve;
+ curve.fConic.set(pts, weight);
+ curve.setConicBounds(pts, weight, 0, 1, &fBounds);
+ return this;
+ }
+
+ SkOpSegment* addCubic(SkPoint pts[4], SkOpContour* parent) {
+ init(pts, 1, parent, SkPath::kCubic_Verb);
+ SkDCurve curve;
+ curve.fCubic.set(pts);
+ curve.setCubicBounds(pts, 1, 0, 1, &fBounds);
+ return this;
+ }
+
+ bool addCurveTo(const SkOpSpanBase* start, const SkOpSpanBase* end, SkPathWriter* path) const;
+
+ SkOpAngle* addEndSpan() {
+ SkOpAngle* angle = SkOpTAllocator<SkOpAngle>::Allocate(this->globalState()->allocator());
+ angle->set(&fTail, fTail.prev());
+ fTail.setFromAngle(angle);
+ return angle;
+ }
+
+ bool addExpanded(double newT, const SkOpSpanBase* test, bool* startOver);
+
+ SkOpSegment* addLine(SkPoint pts[2], SkOpContour* parent) {
+ SkASSERT(pts[0] != pts[1]);
+ init(pts, 1, parent, SkPath::kLine_Verb);
+ fBounds.set(pts, 2);
+ return this;
+ }
+
+ SkOpPtT* addMissing(double t, SkOpSegment* opp, bool* allExist);
+
+ SkOpAngle* addStartSpan() {
+ SkOpAngle* angle = SkOpTAllocator<SkOpAngle>::Allocate(this->globalState()->allocator());
+ angle->set(&fHead, fHead.next());
+ fHead.setToAngle(angle);
+ return angle;
+ }
+
+ SkOpSegment* addQuad(SkPoint pts[3], SkOpContour* parent) {
+ init(pts, 1, parent, SkPath::kQuad_Verb);
+ SkDCurve curve;
+ curve.fQuad.set(pts);
+ curve.setQuadBounds(pts, 1, 0, 1, &fBounds);
+ return this;
+ }
+
+ SkOpPtT* addT(double t);
+
+ template<typename T> T* allocateArray(int count) {
+ return SkOpTAllocator<T>::AllocateArray(this->globalState()->allocator(), count);
+ }
+
+ const SkPathOpsBounds& bounds() const {
+ return fBounds;
+ }
+
+ void bumpCount() {
+ ++fCount;
+ }
+
+ void calcAngles();
+ bool collapsed(double startT, double endT) const;
+ static void ComputeOneSum(const SkOpAngle* baseAngle, SkOpAngle* nextAngle,
+ SkOpAngle::IncludeType );
+ static void ComputeOneSumReverse(SkOpAngle* baseAngle, SkOpAngle* nextAngle,
+ SkOpAngle::IncludeType );
+ int computeSum(SkOpSpanBase* start, SkOpSpanBase* end, SkOpAngle::IncludeType includeType);
+
+ void clearAll();
+ void clearOne(SkOpSpan* span);
+ static void ClearVisited(SkOpSpanBase* span);
+ bool contains(double t) const;
+
+ SkOpContour* contour() const {
+ return fContour;
+ }
+
+ int count() const {
+ return fCount;
+ }
+
+ void debugAddAngle(double startT, double endT);
+#if DEBUG_COIN
+ const SkOpPtT* debugAddT(double t, SkPathOpsDebug::GlitchLog* ) const;
+#endif
+ const SkOpAngle* debugAngle(int id) const;
+#if DEBUG_ANGLE
+ void debugCheckAngleCoin() const;
+#endif
+#if DEBUG_COIN
+ void debugCheckHealth(SkPathOpsDebug::GlitchLog* ) const;
+ void debugClearAll(SkPathOpsDebug::GlitchLog* glitches) const;
+ void debugClearOne(const SkOpSpan* span, SkPathOpsDebug::GlitchLog* glitches) const;
+#endif
+ const SkOpCoincidence* debugCoincidence() const;
+ SkOpContour* debugContour(int id) const;
+
+ int debugID() const {
+ return SkDEBUGRELEASE(fID, -1);
+ }
+
+ SkOpAngle* debugLastAngle();
+#if DEBUG_COIN
+ void debugMissingCoincidence(SkPathOpsDebug::GlitchLog* glitches) const;
+ void debugMoveMultiples(SkPathOpsDebug::GlitchLog* glitches) const;
+ void debugMoveNearby(SkPathOpsDebug::GlitchLog* glitches) const;
+#endif
+ const SkOpPtT* debugPtT(int id) const;
+ void debugReset();
+ const SkOpSegment* debugSegment(int id) const;
+
+#if DEBUG_ACTIVE_SPANS
+ void debugShowActiveSpans() const;
+#endif
+#if DEBUG_MARK_DONE
+ void debugShowNewWinding(const char* fun, const SkOpSpan* span, int winding);
+ void debugShowNewWinding(const char* fun, const SkOpSpan* span, int winding, int oppWinding);
+#endif
+
+ const SkOpSpanBase* debugSpan(int id) const;
+ void debugValidate() const;
+
+#if DEBUG_COINCIDENCE_ORDER
+ void debugResetCoinT() const;
+ void debugSetCoinT(int, SkScalar ) const;
+#endif
+
+#if DEBUG_COIN
+ static void DebugClearVisited(const SkOpSpanBase* span);
+
+ bool debugVisited() const {
+ if (!fDebugVisited) {
+ fDebugVisited = true;
+ return false;
+ }
+ return true;
+ }
+#endif
+
+#if DEBUG_ANGLE
+ double distSq(double t, const SkOpAngle* opp) const;
+#endif
+
+ bool done() const {
+ SkOPASSERT(fDoneCount <= fCount);
+ return fDoneCount == fCount;
+ }
+
+ bool done(const SkOpAngle* angle) const {
+ return angle->start()->starter(angle->end())->done();
+ }
+
+ SkDPoint dPtAtT(double mid) const {
+ return (*CurveDPointAtT[fVerb])(fPts, fWeight, mid);
+ }
+
+ SkDVector dSlopeAtT(double mid) const {
+ return (*CurveDSlopeAtT[fVerb])(fPts, fWeight, mid);
+ }
+
+ void dump() const;
+ void dumpAll() const;
+ void dumpAngles() const;
+ void dumpCoin() const;
+ void dumpPts(const char* prefix = "seg") const;
+ void dumpPtsInner(const char* prefix = "seg") const;
+
+ const SkOpPtT* existing(double t, const SkOpSegment* opp) const;
+ SkOpSegment* findNextOp(SkTDArray<SkOpSpanBase*>* chase, SkOpSpanBase** nextStart,
+ SkOpSpanBase** nextEnd, bool* unsortable, SkPathOp op,
+ int xorMiMask, int xorSuMask);
+ SkOpSegment* findNextWinding(SkTDArray<SkOpSpanBase*>* chase, SkOpSpanBase** nextStart,
+ SkOpSpanBase** nextEnd, bool* unsortable);
+ SkOpSegment* findNextXor(SkOpSpanBase** nextStart, SkOpSpanBase** nextEnd, bool* unsortable);
+ SkOpSpan* findSortableTop(SkOpContour* );
+ SkOpGlobalState* globalState() const;
+
+ const SkOpSpan* head() const {
+ return &fHead;
+ }
+
+ SkOpSpan* head() {
+ return &fHead;
+ }
+
+ void init(SkPoint pts[], SkScalar weight, SkOpContour* parent, SkPath::Verb verb);
+
+ SkOpSpan* insert(SkOpSpan* prev) {
+ SkOpGlobalState* globalState = this->globalState();
+ globalState->setAllocatedOpSpan();
+ SkOpSpan* result = SkOpTAllocator<SkOpSpan>::Allocate(globalState->allocator());
+ SkOpSpanBase* next = prev->next();
+ result->setPrev(prev);
+ prev->setNext(result);
+ SkDEBUGCODE(result->ptT()->fT = 0);
+ result->setNext(next);
+ if (next) {
+ next->setPrev(result);
+ }
+ return result;
+ }
+
+ bool isClose(double t, const SkOpSegment* opp) const;
+
+ bool isHorizontal() const {
+ return fBounds.fTop == fBounds.fBottom;
+ }
+
+ SkOpSegment* isSimple(SkOpSpanBase** end, int* step) {
+ return nextChase(end, step, nullptr, nullptr);
+ }
+
+ bool isVertical() const {
+ return fBounds.fLeft == fBounds.fRight;
+ }
+
+ bool isVertical(SkOpSpanBase* start, SkOpSpanBase* end) const {
+ return (*CurveIsVertical[fVerb])(fPts, fWeight, start->t(), end->t());
+ }
+
+ bool isXor() const;
+
+ void joinEnds(SkOpSegment* start) {
+ fTail.ptT()->addOpp(start->fHead.ptT(), start->fHead.ptT());
+ }
+
+ const SkPoint& lastPt() const {
+ return fPts[SkPathOpsVerbToPoints(fVerb)];
+ }
+
+ void markAllDone();
+ SkOpSpanBase* markAndChaseDone(SkOpSpanBase* start, SkOpSpanBase* end);
+ bool markAndChaseWinding(SkOpSpanBase* start, SkOpSpanBase* end, int winding,
+ SkOpSpanBase** lastPtr);
+ bool markAndChaseWinding(SkOpSpanBase* start, SkOpSpanBase* end, int winding,
+ int oppWinding, SkOpSpanBase** lastPtr);
+ SkOpSpanBase* markAngle(int maxWinding, int sumWinding, const SkOpAngle* angle);
+ SkOpSpanBase* markAngle(int maxWinding, int sumWinding, int oppMaxWinding, int oppSumWinding,
+ const SkOpAngle* angle);
+ void markDone(SkOpSpan* );
+ bool markWinding(SkOpSpan* , int winding);
+ bool markWinding(SkOpSpan* , int winding, int oppWinding);
+ bool match(const SkOpPtT* span, const SkOpSegment* parent, double t, const SkPoint& pt) const;
+ bool missingCoincidence();
+ bool moveMultiples();
+ void moveNearby();
+
+ SkOpSegment* next() const {
+ return fNext;
+ }
+
+ SkOpSegment* nextChase(SkOpSpanBase** , int* step, SkOpSpan** , SkOpSpanBase** last) const;
+ bool operand() const;
+
+ static int OppSign(const SkOpSpanBase* start, const SkOpSpanBase* end) {
+ int result = start->t() < end->t() ? -start->upCast()->oppValue()
+ : end->upCast()->oppValue();
+ return result;
+ }
+
+ bool oppXor() const;
+
+ const SkOpSegment* prev() const {
+ return fPrev;
+ }
+
+ SkPoint ptAtT(double mid) const {
+ return (*CurvePointAtT[fVerb])(fPts, fWeight, mid);
+ }
+
+ const SkPoint* pts() const {
+ return fPts;
+ }
+
+ bool ptsDisjoint(const SkOpPtT& span, const SkOpPtT& test) const {
+ SkASSERT(this == span.segment());
+ SkASSERT(this == test.segment());
+ return ptsDisjoint(span.fT, span.fPt, test.fT, test.fPt);
+ }
+
+ bool ptsDisjoint(const SkOpPtT& span, double t, const SkPoint& pt) const {
+ SkASSERT(this == span.segment());
+ return ptsDisjoint(span.fT, span.fPt, t, pt);
+ }
+
+ bool ptsDisjoint(double t1, const SkPoint& pt1, double t2, const SkPoint& pt2) const;
+
+ void rayCheck(const SkOpRayHit& base, SkOpRayDir dir, SkOpRayHit** hits, SkChunkAlloc*);
+ void release(const SkOpSpan* );
+
+#if DEBUG_COIN
+ void resetDebugVisited() const {
+ fDebugVisited = false;
+ }
+#endif
+
+ void resetVisited() {
+ fVisited = false;
+ }
+
+ void setContour(SkOpContour* contour) {
+ fContour = contour;
+ }
+
+ void setNext(SkOpSegment* next) {
+ fNext = next;
+ }
+
+ void setPrev(SkOpSegment* prev) {
+ fPrev = prev;
+ }
+
+ void setUpWinding(SkOpSpanBase* start, SkOpSpanBase* end, int* maxWinding, int* sumWinding) {
+ int deltaSum = SpanSign(start, end);
+ *maxWinding = *sumWinding;
+ if (*sumWinding == SK_MinS32) {
+ return;
+ }
+ *sumWinding -= deltaSum;
+ }
+
+ void setUpWindings(SkOpSpanBase* start, SkOpSpanBase* end, int* sumMiWinding,
+ int* maxWinding, int* sumWinding);
+ void setUpWindings(SkOpSpanBase* start, SkOpSpanBase* end, int* sumMiWinding, int* sumSuWinding,
+ int* maxWinding, int* sumWinding, int* oppMaxWinding, int* oppSumWinding);
+ void sortAngles();
+ bool spansNearby(const SkOpSpanBase* ref, const SkOpSpanBase* check) const;
+
+ static int SpanSign(const SkOpSpanBase* start, const SkOpSpanBase* end) {
+ int result = start->t() < end->t() ? -start->upCast()->windValue()
+ : end->upCast()->windValue();
+ return result;
+ }
+
+ SkOpAngle* spanToAngle(SkOpSpanBase* start, SkOpSpanBase* end) {
+ SkASSERT(start != end);
+ return start->t() < end->t() ? start->upCast()->toAngle() : start->fromAngle();
+ }
+
+ bool subDivide(const SkOpSpanBase* start, const SkOpSpanBase* end, SkDCurve* result) const;
+
+ const SkOpSpanBase* tail() const {
+ return &fTail;
+ }
+
+ SkOpSpanBase* tail() {
+ return &fTail;
+ }
+
+ bool testForCoincidence(const SkOpPtT* priorPtT, const SkOpPtT* ptT, const SkOpSpanBase* prior,
+ const SkOpSpanBase* spanBase, const SkOpSegment* opp) const;
+
+ void undoneSpan(SkOpSpanBase** start, SkOpSpanBase** end);
+ int updateOppWinding(const SkOpSpanBase* start, const SkOpSpanBase* end) const;
+ int updateOppWinding(const SkOpAngle* angle) const;
+ int updateOppWindingReverse(const SkOpAngle* angle) const;
+ int updateWinding(SkOpSpanBase* start, SkOpSpanBase* end);
+ int updateWinding(SkOpAngle* angle);
+ int updateWindingReverse(const SkOpAngle* angle);
+
+ static bool UseInnerWinding(int outerWinding, int innerWinding);
+
+ SkPath::Verb verb() const {
+ return fVerb;
+ }
+
+ // look for two different spans that point to the same opposite segment
+ bool visited() {
+ if (!fVisited) {
+ fVisited = true;
+ return false;
+ }
+ return true;
+ }
+
+ SkScalar weight() const {
+ return fWeight;
+ }
+
+ SkOpSpan* windingSpanAtT(double tHit);
+ int windSum(const SkOpAngle* angle) const;
+
+private:
+ SkOpSpan fHead; // the head span always has its t set to zero
+ SkOpSpanBase fTail; // the tail span always has its t set to one
+ SkOpContour* fContour;
+ SkOpSegment* fNext; // forward-only linked list used by contour to walk the segments
+ const SkOpSegment* fPrev;
+ SkPoint* fPts; // pointer into array of points owned by edge builder that may be tweaked
+ SkPathOpsBounds fBounds; // tight bounds
+ SkScalar fWeight;
+ int fCount; // number of spans (one for a non-intersecting segment)
+ int fDoneCount; // number of processed spans (zero initially)
+ SkPath::Verb fVerb;
+ bool fVisited; // used by missing coincidence check
+#if DEBUG_COIN
+ mutable bool fDebugVisited; // used by debug missing coincidence check
+#endif
+#if DEBUG_COINCIDENCE_ORDER
+ mutable int fDebugBaseIndex;
+ mutable SkScalar fDebugBaseMin; // if > 0, the 1st t value in this seg vis-a-vis the ref seg
+ mutable SkScalar fDebugBaseMax;
+ mutable int fDebugLastIndex;
+ mutable SkScalar fDebugLastMin; // if > 0, the last t -- next t val - base has same sign
+ mutable SkScalar fDebugLastMax;
+#endif
+ SkDEBUGCODE(int fID);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkOpSpan.cpp b/gfx/skia/skia/src/pathops/SkOpSpan.cpp
new file mode 100755
index 000000000..2abc44e24
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpSpan.cpp
@@ -0,0 +1,475 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkOpCoincidence.h"
+#include "SkOpContour.h"
+#include "SkOpSegment.h"
+#include "SkPathWriter.h"
+
+bool SkOpPtT::alias() const {
+ return this->span()->ptT() != this;
+}
+
+const SkOpPtT* SkOpPtT::active() const {
+ if (!fDeleted) {
+ return this;
+ }
+ const SkOpPtT* ptT = this;
+ const SkOpPtT* stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ if (ptT->fSpan == fSpan && !ptT->fDeleted) {
+ return ptT;
+ }
+ }
+ SkASSERT(0); // should never return deleted
+ return this;
+}
+
+bool SkOpPtT::contains(const SkOpPtT* check) const {
+ SkOPASSERT(this != check);
+ const SkOpPtT* ptT = this;
+ const SkOpPtT* stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ if (ptT == check) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool SkOpPtT::contains(const SkOpSegment* segment, const SkPoint& pt) const {
+ SkASSERT(this->segment() != segment);
+ const SkOpPtT* ptT = this;
+ const SkOpPtT* stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ if (ptT->fPt == pt && ptT->segment() == segment) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool SkOpPtT::contains(const SkOpSegment* segment, double t) const {
+ const SkOpPtT* ptT = this;
+ const SkOpPtT* stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ if (ptT->fT == t && ptT->segment() == segment) {
+ return true;
+ }
+ }
+ return false;
+}
+
+const SkOpPtT* SkOpPtT::contains(const SkOpSegment* check) const {
+ SkASSERT(this->segment() != check);
+ const SkOpPtT* ptT = this;
+ const SkOpPtT* stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ if (ptT->segment() == check && !ptT->deleted()) {
+ return ptT;
+ }
+ }
+ return nullptr;
+}
+
+SkOpContour* SkOpPtT::contour() const {
+ return segment()->contour();
+}
+
+const SkOpPtT* SkOpPtT::find(const SkOpSegment* segment) const {
+ const SkOpPtT* ptT = this;
+ const SkOpPtT* stopPtT = ptT;
+ do {
+ if (ptT->segment() == segment && !ptT->deleted()) {
+ return ptT;
+ }
+ ptT = ptT->fNext;
+ } while (stopPtT != ptT);
+// SkASSERT(0);
+ return nullptr;
+}
+
+SkOpGlobalState* SkOpPtT::globalState() const {
+ return contour()->globalState();
+}
+
+void SkOpPtT::init(SkOpSpanBase* span, double t, const SkPoint& pt, bool duplicate) {
+ fT = t;
+ fPt = pt;
+ fSpan = span;
+ fNext = this;
+ fDuplicatePt = duplicate;
+ fDeleted = false;
+ fCoincident = false;
+ SkDEBUGCODE(fID = span->globalState()->nextPtTID());
+}
+
+bool SkOpPtT::onEnd() const {
+ const SkOpSpanBase* span = this->span();
+ if (span->ptT() != this) {
+ return false;
+ }
+ const SkOpSegment* segment = this->segment();
+ return span == segment->head() || span == segment->tail();
+}
+
+bool SkOpPtT::ptAlreadySeen(const SkOpPtT* check) const {
+ while (this != check) {
+ if (this->fPt == check->fPt) {
+ return true;
+ }
+ check = check->fNext;
+ }
+ return false;
+}
+
+SkOpPtT* SkOpPtT::prev() {
+ SkOpPtT* result = this;
+ SkOpPtT* next = this;
+ while ((next = next->fNext) != this) {
+ result = next;
+ }
+ SkASSERT(result->fNext == this);
+ return result;
+}
+
+const SkOpSegment* SkOpPtT::segment() const {
+ return span()->segment();
+}
+
+SkOpSegment* SkOpPtT::segment() {
+ return span()->segment();
+}
+
+void SkOpPtT::setDeleted() {
+ SkASSERT(this->span()->debugDeleted() || this->span()->ptT() != this);
+ SkOPASSERT(!fDeleted);
+ fDeleted = true;
+}
+
+void SkOpSpanBase::addOpp(SkOpSpanBase* opp) {
+ SkOpPtT* oppPrev = this->ptT()->oppPrev(opp->ptT());
+ if (!oppPrev) {
+ return;
+ }
+ this->mergeMatches(opp);
+ this->ptT()->addOpp(opp->ptT(), oppPrev);
+ this->checkForCollapsedCoincidence();
+}
+
+bool SkOpSpanBase::collapsed(double s, double e) const {
+ const SkOpPtT* start = &fPtT;
+ const SkOpPtT* walk = start;
+ double min = walk->fT;
+ double max = min;
+ const SkOpSegment* segment = this->segment();
+ while ((walk = walk->next()) != start) {
+ if (walk->segment() != segment) {
+ continue;
+ }
+ min = SkTMin(min, walk->fT);
+ max = SkTMax(max, walk->fT);
+ if (between(min, s, max) && between(min, e, max)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool SkOpSpanBase::contains(const SkOpSpanBase* span) const {
+ const SkOpPtT* start = &fPtT;
+ const SkOpPtT* check = &span->fPtT;
+ SkOPASSERT(start != check);
+ const SkOpPtT* walk = start;
+ while ((walk = walk->next()) != start) {
+ if (walk == check) {
+ return true;
+ }
+ }
+ return false;
+}
+
+const SkOpPtT* SkOpSpanBase::contains(const SkOpSegment* segment) const {
+ const SkOpPtT* start = &fPtT;
+ const SkOpPtT* walk = start;
+ while ((walk = walk->next()) != start) {
+ if (walk->deleted()) {
+ continue;
+ }
+ if (walk->segment() == segment && walk->span()->ptT() == walk) {
+ return walk;
+ }
+ }
+ return nullptr;
+}
+
+bool SkOpSpanBase::containsCoinEnd(const SkOpSegment* segment) const {
+ SkASSERT(this->segment() != segment);
+ const SkOpSpanBase* next = this;
+ while ((next = next->fCoinEnd) != this) {
+ if (next->segment() == segment) {
+ return true;
+ }
+ }
+ return false;
+}
+
+SkOpContour* SkOpSpanBase::contour() const {
+ return segment()->contour();
+}
+
+SkOpGlobalState* SkOpSpanBase::globalState() const {
+ return contour()->globalState();
+}
+
+void SkOpSpanBase::initBase(SkOpSegment* segment, SkOpSpan* prev, double t, const SkPoint& pt) {
+ fSegment = segment;
+ fPtT.init(this, t, pt, false);
+ fCoinEnd = this;
+ fFromAngle = nullptr;
+ fPrev = prev;
+ fSpanAdds = 0;
+ fAligned = true;
+ fChased = false;
+ SkDEBUGCODE(fCount = 1);
+ SkDEBUGCODE(fID = globalState()->nextSpanID());
+ SkDEBUGCODE(fDebugDeleted = false);
+}
+
+// this pair of spans share a common t value or point; merge them and eliminate duplicates
+// this does not compute the best t or pt value; this merely moves all data into a single list
+void SkOpSpanBase::merge(SkOpSpan* span) {
+ SkOpPtT* spanPtT = span->ptT();
+ SkASSERT(this->t() != spanPtT->fT);
+ SkASSERT(!zero_or_one(spanPtT->fT));
+ span->release(this->ptT());
+ if (this->contains(span)) {
+ SkOPASSERT(0); // check to see if this ever happens -- should have been found earlier
+ return; // merge is already in the ptT loop
+ }
+ SkOpPtT* remainder = spanPtT->next();
+ this->ptT()->insert(spanPtT);
+ while (remainder != spanPtT) {
+ SkOpPtT* next = remainder->next();
+ SkOpPtT* compare = spanPtT->next();
+ while (compare != spanPtT) {
+ SkOpPtT* nextC = compare->next();
+ if (nextC->span() == remainder->span() && nextC->fT == remainder->fT) {
+ goto tryNextRemainder;
+ }
+ compare = nextC;
+ }
+ spanPtT->insert(remainder);
+tryNextRemainder:
+ remainder = next;
+ }
+ fSpanAdds += span->fSpanAdds;
+}
+
+SkOpSpanBase* SkOpSpanBase::active() {
+ SkOpSpanBase* result = fPrev ? fPrev->next() : upCast()->next()->prev();
+ SkASSERT(this == result || fDebugDeleted);
+ return result;
+}
+
+// please keep in sync with debugCheckForCollapsedCoincidence()
+void SkOpSpanBase::checkForCollapsedCoincidence() {
+ SkOpCoincidence* coins = this->globalState()->coincidence();
+ if (coins->isEmpty()) {
+ return;
+ }
+// the insert above may have put both ends of a coincident run in the same span
+// for each coincident ptT in loop; see if its opposite in is also in the loop
+// this implementation is the motivation for marking that a ptT is referenced by a coincident span
+ SkOpPtT* head = this->ptT();
+ SkOpPtT* test = head;
+ do {
+ if (!test->coincident()) {
+ continue;
+ }
+ coins->markCollapsed(test);
+ } while ((test = test->next()) != head);
+ coins->releaseDeleted();
+}
+
+// please keep in sync with debugMergeMatches()
+// Look to see if pt-t linked list contains same segment more than once
+// if so, and if each pt-t is directly pointed to by spans in that segment,
+// merge them
+// keep the points, but remove spans so that the segment doesn't have 2 or more
+// spans pointing to the same pt-t loop at different loop elements
+void SkOpSpanBase::mergeMatches(SkOpSpanBase* opp) {
+ SkOpPtT* test = &fPtT;
+ SkOpPtT* testNext;
+ const SkOpPtT* stop = test;
+ do {
+ testNext = test->next();
+ if (test->deleted()) {
+ continue;
+ }
+ SkOpSpanBase* testBase = test->span();
+ SkASSERT(testBase->ptT() == test);
+ SkOpSegment* segment = test->segment();
+ if (segment->done()) {
+ continue;
+ }
+ SkOpPtT* inner = opp->ptT();
+ const SkOpPtT* innerStop = inner;
+ do {
+ if (inner->segment() != segment) {
+ continue;
+ }
+ if (inner->deleted()) {
+ continue;
+ }
+ SkOpSpanBase* innerBase = inner->span();
+ SkASSERT(innerBase->ptT() == inner);
+ // when the intersection is first detected, the span base is marked if there are
+ // more than one point in the intersection.
+ if (!zero_or_one(inner->fT)) {
+ innerBase->upCast()->release(test);
+ } else {
+ SkOPASSERT(inner->fT != test->fT);
+ if (!zero_or_one(test->fT)) {
+ testBase->upCast()->release(inner);
+ } else {
+ segment->markAllDone(); // mark segment as collapsed
+ SkDEBUGCODE(testBase->debugSetDeleted());
+ test->setDeleted();
+ SkDEBUGCODE(innerBase->debugSetDeleted());
+ inner->setDeleted();
+ }
+ }
+#ifdef SK_DEBUG // assert if another undeleted entry points to segment
+ const SkOpPtT* debugInner = inner;
+ while ((debugInner = debugInner->next()) != innerStop) {
+ if (debugInner->segment() != segment) {
+ continue;
+ }
+ if (debugInner->deleted()) {
+ continue;
+ }
+ SkOPASSERT(0);
+ }
+#endif
+ break;
+ } while ((inner = inner->next()) != innerStop);
+ } while ((test = testNext) != stop);
+ this->checkForCollapsedCoincidence();
+}
+
+int SkOpSpan::computeWindSum() {
+ SkOpGlobalState* globals = this->globalState();
+ SkOpContour* contourHead = globals->contourHead();
+ int windTry = 0;
+ while (!this->sortableTop(contourHead) && ++windTry < SkOpGlobalState::kMaxWindingTries) {
+ ;
+ }
+ return this->windSum();
+}
+
+bool SkOpSpan::containsCoincidence(const SkOpSegment* segment) const {
+ SkASSERT(this->segment() != segment);
+ const SkOpSpan* next = fCoincident;
+ do {
+ if (next->segment() == segment) {
+ return true;
+ }
+ } while ((next = next->fCoincident) != this);
+ return false;
+}
+
+void SkOpSpan::init(SkOpSegment* segment, SkOpSpan* prev, double t, const SkPoint& pt) {
+ SkASSERT(t != 1);
+ initBase(segment, prev, t, pt);
+ fCoincident = this;
+ fToAngle = nullptr;
+ fWindSum = fOppSum = SK_MinS32;
+ fWindValue = 1;
+ fOppValue = 0;
+ fTopTTry = 0;
+ fChased = fDone = false;
+ segment->bumpCount();
+ fAlreadyAdded = false;
+}
+
+// Please keep this in sync with debugInsertCoincidence()
+bool SkOpSpan::insertCoincidence(const SkOpSegment* segment, bool flipped, bool ordered) {
+ if (this->containsCoincidence(segment)) {
+ return true;
+ }
+ SkOpPtT* next = &fPtT;
+ while ((next = next->next()) != &fPtT) {
+ if (next->segment() == segment) {
+ SkOpSpan* span;
+ SkOpSpanBase* base = next->span();
+ if (!ordered) {
+ const SkOpSpanBase* spanEnd = fNext->contains(segment)->span();
+ const SkOpPtT* start = base->ptT()->starter(spanEnd->ptT());
+ FAIL_IF(!start->span()->upCastable());
+ span = const_cast<SkOpSpan*>(start->span()->upCast());
+ } else if (flipped) {
+ span = base->prev();
+ FAIL_IF(!span);
+ } else {
+ FAIL_IF(!base->upCastable());
+ span = base->upCast();
+ }
+ this->insertCoincidence(span);
+ return true;
+ }
+ }
+#if DEBUG_COINCIDENCE
+ SkASSERT(0); // FIXME? if we get here, the span is missing its opposite segment...
+#endif
+ return true;
+}
+
+void SkOpSpan::release(const SkOpPtT* kept) {
+ SkDEBUGCODE(fDebugDeleted = true);
+ SkOPASSERT(kept->span() != this);
+ SkASSERT(!final());
+ SkOpSpan* prev = this->prev();
+ SkASSERT(prev);
+ SkOpSpanBase* next = this->next();
+ SkASSERT(next);
+ prev->setNext(next);
+ next->setPrev(prev);
+ this->segment()->release(this);
+ SkOpCoincidence* coincidence = this->globalState()->coincidence();
+ if (coincidence) {
+ coincidence->fixUp(this->ptT(), kept);
+ }
+ this->ptT()->setDeleted();
+ SkOpPtT* stopPtT = this->ptT();
+ SkOpPtT* testPtT = stopPtT;
+ const SkOpSpanBase* keptSpan = kept->span();
+ do {
+ if (this == testPtT->span()) {
+ testPtT->setSpan(keptSpan);
+ }
+ } while ((testPtT = testPtT->next()) != stopPtT);
+}
+
+void SkOpSpan::setOppSum(int oppSum) {
+ SkASSERT(!final());
+ if (fOppSum != SK_MinS32 && fOppSum != oppSum) {
+ this->globalState()->setWindingFailed();
+ return;
+ }
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM || SkTAbs(oppSum) <= DEBUG_LIMIT_WIND_SUM);
+ fOppSum = oppSum;
+}
+
+void SkOpSpan::setWindSum(int windSum) {
+ SkASSERT(!final());
+ if (fWindSum != SK_MinS32 && fWindSum != windSum) {
+ this->globalState()->setWindingFailed();
+ return;
+ }
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM || SkTAbs(windSum) <= DEBUG_LIMIT_WIND_SUM);
+ fWindSum = windSum;
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpSpan.h b/gfx/skia/skia/src/pathops/SkOpSpan.h
new file mode 100644
index 000000000..023e7acfb
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpSpan.h
@@ -0,0 +1,570 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOpSpan_DEFINED
+#define SkOpSpan_DEFINED
+
+#include "SkPathOpsDebug.h"
+#include "SkPathOpsTypes.h"
+#include "SkPoint.h"
+
+class SkChunkAlloc;
+class SkOpAngle;
+class SkOpContour;
+class SkOpGlobalState;
+class SkOpSegment;
+class SkOpSpanBase;
+class SkOpSpan;
+struct SkPathOpsBounds;
+
+// subset of op span used by terminal span (when t is equal to one)
+class SkOpPtT {
+public:
+ enum {
+ kIsAlias = 1,
+ kIsDuplicate = 1
+ };
+
+ const SkOpPtT* active() const;
+
+ // please keep in sync with debugAddOpp()
+ void addOpp(SkOpPtT* opp, SkOpPtT* oppPrev) {
+ SkOpPtT* oldNext = this->fNext;
+ SkASSERT(this != opp);
+ this->fNext = opp;
+ SkASSERT(oppPrev != oldNext);
+ oppPrev->fNext = oldNext;
+ }
+
+ bool alias() const;
+ bool coincident() const { return fCoincident; }
+ bool contains(const SkOpPtT* ) const;
+ bool contains(const SkOpSegment*, const SkPoint& ) const;
+ bool contains(const SkOpSegment*, double t) const;
+ const SkOpPtT* contains(const SkOpSegment* ) const;
+ SkOpContour* contour() const;
+
+ int debugID() const {
+ return SkDEBUGRELEASE(fID, -1);
+ }
+
+ void debugAddOpp(const SkOpPtT* opp, const SkOpPtT* oppPrev) const;
+ const SkOpAngle* debugAngle(int id) const;
+ const SkOpCoincidence* debugCoincidence() const;
+ bool debugContains(const SkOpPtT* ) const;
+ const SkOpPtT* debugContains(const SkOpSegment* check) const;
+ SkOpContour* debugContour(int id) const;
+ const SkOpPtT* debugEnder(const SkOpPtT* end) const;
+ int debugLoopLimit(bool report) const;
+ bool debugMatchID(int id) const;
+ const SkOpPtT* debugOppPrev(const SkOpPtT* opp) const;
+ const SkOpPtT* debugPtT(int id) const;
+ void debugResetCoinT() const;
+ const SkOpSegment* debugSegment(int id) const;
+ void debugSetCoinT(int ) const;
+ const SkOpSpanBase* debugSpan(int id) const;
+ void debugValidate() const;
+
+ bool deleted() const {
+ return fDeleted;
+ }
+
+ bool duplicate() const {
+ return fDuplicatePt;
+ }
+
+ void dump() const; // available to testing only
+ void dumpAll() const;
+ void dumpBase() const;
+
+ const SkOpPtT* find(const SkOpSegment* ) const;
+ SkOpGlobalState* globalState() const;
+ void init(SkOpSpanBase* , double t, const SkPoint& , bool dup);
+
+ void insert(SkOpPtT* span) {
+ SkASSERT(span != this);
+ span->fNext = fNext;
+ fNext = span;
+ }
+
+ const SkOpPtT* next() const {
+ return fNext;
+ }
+
+ SkOpPtT* next() {
+ return fNext;
+ }
+
+ bool onEnd() const;
+
+ // returns nullptr if this is already in the opp ptT loop
+ SkOpPtT* oppPrev(const SkOpPtT* opp) const {
+ // find the fOpp ptr to opp
+ SkOpPtT* oppPrev = opp->fNext;
+ if (oppPrev == this) {
+ return nullptr;
+ }
+ while (oppPrev->fNext != opp) {
+ oppPrev = oppPrev->fNext;
+ if (oppPrev == this) {
+ return nullptr;
+ }
+ }
+ return oppPrev;
+ }
+
+ static bool Overlaps(const SkOpPtT* s1, const SkOpPtT* e1, const SkOpPtT* s2,
+ const SkOpPtT* e2, const SkOpPtT** sOut, const SkOpPtT** eOut) {
+ const SkOpPtT* start1 = s1->fT < e1->fT ? s1 : e1;
+ const SkOpPtT* start2 = s2->fT < e2->fT ? s2 : e2;
+ *sOut = between(s1->fT, start2->fT, e1->fT) ? start2
+ : between(s2->fT, start1->fT, e2->fT) ? start1 : nullptr;
+ const SkOpPtT* end1 = s1->fT < e1->fT ? e1 : s1;
+ const SkOpPtT* end2 = s2->fT < e2->fT ? e2 : s2;
+ *eOut = between(s1->fT, end2->fT, e1->fT) ? end2
+ : between(s2->fT, end1->fT, e2->fT) ? end1 : nullptr;
+ if (*sOut == *eOut) {
+ SkASSERT(start1->fT >= end2->fT || start2->fT >= end1->fT);
+ return false;
+ }
+ SkASSERT(!*sOut || *sOut != *eOut);
+ return *sOut && *eOut;
+ }
+
+ bool ptAlreadySeen(const SkOpPtT* head) const;
+ SkOpPtT* prev();
+
+ const SkOpSegment* segment() const;
+ SkOpSegment* segment();
+
+ void setCoincident() const {
+ SkOPASSERT(!fDeleted);
+ fCoincident = true;
+ }
+
+ void setDeleted();
+
+ void setSpan(const SkOpSpanBase* span) {
+ fSpan = const_cast<SkOpSpanBase*>(span);
+ }
+
+ const SkOpSpanBase* span() const {
+ return fSpan;
+ }
+
+ SkOpSpanBase* span() {
+ return fSpan;
+ }
+
+ const SkOpPtT* starter(const SkOpPtT* end) const {
+ return fT < end->fT ? this : end;
+ }
+
+ double fT;
+ SkPoint fPt; // cache of point value at this t
+protected:
+ SkOpSpanBase* fSpan; // contains winding data
+ SkOpPtT* fNext; // intersection on opposite curve or alias on this curve
+ bool fDeleted; // set if removed from span list
+ bool fDuplicatePt; // set if identical pt is somewhere in the next loop
+ // below mutable since referrer is otherwise always const
+ mutable bool fCoincident; // set if at some point a coincident span pointed here
+ SkDEBUGCODE(int fID);
+};
+
+class SkOpSpanBase {
+public:
+ SkOpSpanBase* active();
+ void addOpp(SkOpSpanBase* opp);
+
+ void bumpSpanAdds() {
+ ++fSpanAdds;
+ }
+
+ bool chased() const {
+ return fChased;
+ }
+
+ void checkForCollapsedCoincidence();
+
+ const SkOpSpanBase* coinEnd() const {
+ return fCoinEnd;
+ }
+
+ bool collapsed(double s, double e) const;
+ bool contains(const SkOpSpanBase* ) const;
+ const SkOpPtT* contains(const SkOpSegment* ) const;
+
+ bool containsCoinEnd(const SkOpSpanBase* coin) const {
+ SkASSERT(this != coin);
+ const SkOpSpanBase* next = this;
+ while ((next = next->fCoinEnd) != this) {
+ if (next == coin) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool containsCoinEnd(const SkOpSegment* ) const;
+ SkOpContour* contour() const;
+
+ int debugBumpCount() {
+ return SkDEBUGRELEASE(++fCount, -1);
+ }
+
+ int debugID() const {
+ return SkDEBUGRELEASE(fID, -1);
+ }
+
+#if DEBUG_COIN
+ void debugAddOpp(SkPathOpsDebug::GlitchLog* , const SkOpSpanBase* opp) const;
+#endif
+ bool debugAlignedEnd(double t, const SkPoint& pt) const;
+ bool debugAlignedInner() const;
+ const SkOpAngle* debugAngle(int id) const;
+#if DEBUG_COIN
+ void debugCheckForCollapsedCoincidence(SkPathOpsDebug::GlitchLog* ) const;
+#endif
+ const SkOpCoincidence* debugCoincidence() const;
+ bool debugCoinEndLoopCheck() const;
+ SkOpContour* debugContour(int id) const;
+#ifdef SK_DEBUG
+ bool debugDeleted() const { return fDebugDeleted; }
+#endif
+#if DEBUG_COIN
+ void debugInsertCoinEnd(SkPathOpsDebug::GlitchLog* ,
+ const SkOpSpanBase* ) const;
+ void debugMergeMatches(SkPathOpsDebug::GlitchLog* log,
+ const SkOpSpanBase* opp) const;
+#endif
+ const SkOpPtT* debugPtT(int id) const;
+ void debugResetCoinT() const;
+ const SkOpSegment* debugSegment(int id) const;
+ void debugSetCoinT(int ) const;
+#ifdef SK_DEBUG
+ void debugSetDeleted() { fDebugDeleted = true; }
+#endif
+ const SkOpSpanBase* debugSpan(int id) const;
+ const SkOpSpan* debugStarter(SkOpSpanBase const** endPtr) const;
+ SkOpGlobalState* globalState() const;
+ void debugValidate() const;
+
+ bool deleted() const {
+ return fPtT.deleted();
+ }
+
+ void dump() const; // available to testing only
+ void dumpCoin() const;
+ void dumpAll() const;
+ void dumpBase() const;
+ void dumpHead() const;
+
+ bool final() const {
+ return fPtT.fT == 1;
+ }
+
+ SkOpAngle* fromAngle() const {
+ return fFromAngle;
+ }
+
+ void initBase(SkOpSegment* parent, SkOpSpan* prev, double t, const SkPoint& pt);
+
+ // Please keep this in sync with debugInsertCoinEnd()
+ void insertCoinEnd(SkOpSpanBase* coin) {
+ if (containsCoinEnd(coin)) {
+ SkASSERT(coin->containsCoinEnd(this));
+ return;
+ }
+ debugValidate();
+ SkASSERT(this != coin);
+ SkOpSpanBase* coinNext = coin->fCoinEnd;
+ coin->fCoinEnd = this->fCoinEnd;
+ this->fCoinEnd = coinNext;
+ debugValidate();
+ }
+
+ void merge(SkOpSpan* span);
+ void mergeMatches(SkOpSpanBase* opp);
+
+ const SkOpSpan* prev() const {
+ return fPrev;
+ }
+
+ SkOpSpan* prev() {
+ return fPrev;
+ }
+
+ const SkPoint& pt() const {
+ return fPtT.fPt;
+ }
+
+ const SkOpPtT* ptT() const {
+ return &fPtT;
+ }
+
+ SkOpPtT* ptT() {
+ return &fPtT;
+ }
+
+ SkOpSegment* segment() const {
+ return fSegment;
+ }
+
+ void setAligned() {
+ fAligned = true;
+ }
+
+ void setChased(bool chased) {
+ fChased = chased;
+ }
+
+ void setFromAngle(SkOpAngle* angle) {
+ fFromAngle = angle;
+ }
+
+ void setPrev(SkOpSpan* prev) {
+ fPrev = prev;
+ }
+
+ bool simple() const {
+ fPtT.debugValidate();
+ return fPtT.next()->next() == &fPtT;
+ }
+
+ int spanAddsCount() const {
+ return fSpanAdds;
+ }
+
+ const SkOpSpan* starter(const SkOpSpanBase* end) const {
+ const SkOpSpanBase* result = t() < end->t() ? this : end;
+ return result->upCast();
+ }
+
+ SkOpSpan* starter(SkOpSpanBase* end) {
+ SkASSERT(this->segment() == end->segment());
+ SkOpSpanBase* result = t() < end->t() ? this : end;
+ return result->upCast();
+ }
+
+ SkOpSpan* starter(SkOpSpanBase** endPtr) {
+ SkOpSpanBase* end = *endPtr;
+ SkASSERT(this->segment() == end->segment());
+ SkOpSpanBase* result;
+ if (t() < end->t()) {
+ result = this;
+ } else {
+ result = end;
+ *endPtr = this;
+ }
+ return result->upCast();
+ }
+
+ int step(const SkOpSpanBase* end) const {
+ return t() < end->t() ? 1 : -1;
+ }
+
+ double t() const {
+ return fPtT.fT;
+ }
+
+ void unaligned() {
+ fAligned = false;
+ }
+
+ SkOpSpan* upCast() {
+ SkASSERT(!final());
+ return (SkOpSpan*) this;
+ }
+
+ const SkOpSpan* upCast() const {
+ SkOPASSERT(!final());
+ return (const SkOpSpan*) this;
+ }
+
+ SkOpSpan* upCastable() {
+ return final() ? nullptr : upCast();
+ }
+
+ const SkOpSpan* upCastable() const {
+ return final() ? nullptr : upCast();
+ }
+
+private:
+ void alignInner();
+
+protected: // no direct access to internals to avoid treating a span base as a span
+ SkOpPtT fPtT; // list of points and t values associated with the start of this span
+ SkOpSegment* fSegment; // segment that contains this span
+ SkOpSpanBase* fCoinEnd; // linked list of coincident spans that end here (may point to itself)
+ SkOpAngle* fFromAngle; // points to next angle from span start to end
+ SkOpSpan* fPrev; // previous intersection point
+ int fSpanAdds; // number of times intersections have been added to span
+ bool fAligned;
+ bool fChased; // set after span has been added to chase array
+ SkDEBUGCODE(int fCount); // number of pt/t pairs added
+ SkDEBUGCODE(int fID);
+ SkDEBUGCODE(bool fDebugDeleted); // set when span was merged with another span
+};
+
+class SkOpSpan : public SkOpSpanBase {
+public:
+ bool alreadyAdded() const {
+ if (fAlreadyAdded) {
+ return true;
+ }
+ fAlreadyAdded = true;
+ return false;
+ }
+
+ bool clearCoincident() {
+ SkASSERT(!final());
+ if (fCoincident == this) {
+ return false;
+ }
+ fCoincident = this;
+ return true;
+ }
+
+ int computeWindSum();
+ bool containsCoincidence(const SkOpSegment* ) const;
+
+ bool containsCoincidence(const SkOpSpan* coin) const {
+ SkASSERT(this != coin);
+ const SkOpSpan* next = this;
+ while ((next = next->fCoincident) != this) {
+ if (next == coin) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool debugCoinLoopCheck() const;
+#if DEBUG_COIN
+ void debugInsertCoincidence(SkPathOpsDebug::GlitchLog* , const SkOpSpan* ) const;
+ void debugInsertCoincidence(SkPathOpsDebug::GlitchLog* ,
+ const SkOpSegment* , bool flipped, bool ordered) const;
+#endif
+ void dumpCoin() const;
+ bool dumpSpan() const;
+
+ bool done() const {
+ SkASSERT(!final());
+ return fDone;
+ }
+
+ void init(SkOpSegment* parent, SkOpSpan* prev, double t, const SkPoint& pt);
+ bool insertCoincidence(const SkOpSegment* , bool flipped, bool ordered);
+
+ // Please keep this in sync with debugInsertCoincidence()
+ void insertCoincidence(SkOpSpan* coin) {
+ if (containsCoincidence(coin)) {
+ SkASSERT(coin->containsCoincidence(this));
+ return;
+ }
+ debugValidate();
+ SkASSERT(this != coin);
+ SkOpSpan* coinNext = coin->fCoincident;
+ coin->fCoincident = this->fCoincident;
+ this->fCoincident = coinNext;
+ debugValidate();
+ }
+
+ bool isCanceled() const {
+ SkASSERT(!final());
+ return fWindValue == 0 && fOppValue == 0;
+ }
+
+ bool isCoincident() const {
+ SkASSERT(!final());
+ return fCoincident != this;
+ }
+
+ SkOpSpanBase* next() const {
+ SkASSERT(!final());
+ return fNext;
+ }
+
+ int oppSum() const {
+ SkASSERT(!final());
+ return fOppSum;
+ }
+
+ int oppValue() const {
+ SkASSERT(!final());
+ return fOppValue;
+ }
+
+ void release(const SkOpPtT* );
+
+ SkOpPtT* setCoinStart(SkOpSpan* oldCoinStart, SkOpSegment* oppSegment);
+
+ void setDone(bool done) {
+ SkASSERT(!final());
+ fDone = done;
+ }
+
+ void setNext(SkOpSpanBase* nextT) {
+ SkASSERT(!final());
+ fNext = nextT;
+ }
+
+ void setOppSum(int oppSum);
+
+ void setOppValue(int oppValue) {
+ SkASSERT(!final());
+ SkASSERT(fOppSum == SK_MinS32);
+ SkASSERT(!oppValue || !fDone);
+ fOppValue = oppValue;
+ }
+
+ void setToAngle(SkOpAngle* angle) {
+ SkASSERT(!final());
+ fToAngle = angle;
+ }
+
+ void setWindSum(int windSum);
+
+ void setWindValue(int windValue) {
+ SkASSERT(!final());
+ SkASSERT(windValue >= 0);
+ SkASSERT(fWindSum == SK_MinS32);
+ SkOPASSERT(!windValue || !fDone);
+ fWindValue = windValue;
+ }
+
+ bool sortableTop(SkOpContour* );
+
+ SkOpAngle* toAngle() const {
+ SkASSERT(!final());
+ return fToAngle;
+ }
+
+ int windSum() const {
+ SkASSERT(!final());
+ return fWindSum;
+ }
+
+ int windValue() const {
+ SkOPASSERT(!final());
+ return fWindValue;
+ }
+
+private: // no direct access to internals to avoid treating a span base as a span
+ SkOpSpan* fCoincident; // linked list of spans coincident with this one (may point to itself)
+ SkOpAngle* fToAngle; // points to next angle from span start to end
+ SkOpSpanBase* fNext; // next intersection point
+ int fWindSum; // accumulated from contours surrounding this one.
+ int fOppSum; // for binary operators: the opposite winding sum
+ int fWindValue; // 0 == canceled; 1 == normal; >1 == coincident
+ int fOppValue; // normally 0 -- when binary coincident edges combine, opp value goes here
+ int fTopTTry; // specifies direction and t value to try next
+ bool fDone; // if set, this span to next higher T has been processed
+ mutable bool fAlreadyAdded;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkOpTAllocator.h b/gfx/skia/skia/src/pathops/SkOpTAllocator.h
new file mode 100644
index 000000000..e8835f02e
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpTAllocator.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOpTAllocator_DEFINED
+#define SkOpTAllocator_DEFINED
+
+#include "SkChunkAlloc.h"
+
+// T is SkOpAngle2, SkOpSpan2, or SkOpSegment2
+template<typename T>
+class SkOpTAllocator {
+public:
+ static T* Allocate(SkChunkAlloc* allocator) {
+ void* ptr = allocator->allocThrow(sizeof(T));
+ T* record = (T*) ptr;
+ return record;
+ }
+
+ static T* AllocateArray(SkChunkAlloc* allocator, int count) {
+ void* ptr = allocator->allocThrow(sizeof(T) * count);
+ T* record = (T*) ptr;
+ return record;
+ }
+
+ static T* New(SkChunkAlloc* allocator) {
+ return new (Allocate(allocator)) T();
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsBounds.h b/gfx/skia/skia/src/pathops/SkPathOpsBounds.h
new file mode 100644
index 000000000..610d7233a
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsBounds.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpBounds_DEFINED
+#define SkPathOpBounds_DEFINED
+
+#include "SkPathOpsRect.h"
+#include "SkRect.h"
+
+// SkPathOpsBounds, unlike SkRect, does not consider a line to be empty.
+struct SkPathOpsBounds : public SkRect {
+ static bool Intersects(const SkPathOpsBounds& a, const SkPathOpsBounds& b) {
+ return AlmostLessOrEqualUlps(a.fLeft, b.fRight)
+ && AlmostLessOrEqualUlps(b.fLeft, a.fRight)
+ && AlmostLessOrEqualUlps(a.fTop, b.fBottom)
+ && AlmostLessOrEqualUlps(b.fTop, a.fBottom);
+ }
+
+ // Note that add(), unlike SkRect::join() or SkRect::growToInclude()
+ // does not treat the bounds of horizontal and vertical lines as
+ // empty rectangles.
+ void add(SkScalar left, SkScalar top, SkScalar right, SkScalar bottom) {
+ if (left < fLeft) fLeft = left;
+ if (top < fTop) fTop = top;
+ if (right > fRight) fRight = right;
+ if (bottom > fBottom) fBottom = bottom;
+ }
+
+ void add(const SkPathOpsBounds& toAdd) {
+ add(toAdd.fLeft, toAdd.fTop, toAdd.fRight, toAdd.fBottom);
+ }
+
+ void add(const SkPoint& pt) {
+ if (pt.fX < fLeft) fLeft = pt.fX;
+ if (pt.fY < fTop) fTop = pt.fY;
+ if (pt.fX > fRight) fRight = pt.fX;
+ if (pt.fY > fBottom) fBottom = pt.fY;
+ }
+
+ void add(const SkDPoint& pt) {
+ if (pt.fX < fLeft) fLeft = SkDoubleToScalar(pt.fX);
+ if (pt.fY < fTop) fTop = SkDoubleToScalar(pt.fY);
+ if (pt.fX > fRight) fRight = SkDoubleToScalar(pt.fX);
+ if (pt.fY > fBottom) fBottom = SkDoubleToScalar(pt.fY);
+ }
+
+ bool almostContains(const SkPoint& pt) const {
+ return AlmostLessOrEqualUlps(fLeft, pt.fX)
+ && AlmostLessOrEqualUlps(pt.fX, fRight)
+ && AlmostLessOrEqualUlps(fTop, pt.fY)
+ && AlmostLessOrEqualUlps(pt.fY, fBottom);
+ }
+
+ bool contains(const SkPoint& pt) const {
+ return fLeft <= pt.fX && fTop <= pt.fY &&
+ fRight >= pt.fX && fBottom >= pt.fY;
+ }
+
+ typedef SkRect INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsCommon.cpp b/gfx/skia/skia/src/pathops/SkPathOpsCommon.cpp
new file mode 100644
index 000000000..3d6ba4dda
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsCommon.cpp
@@ -0,0 +1,335 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkAddIntersections.h"
+#include "SkOpCoincidence.h"
+#include "SkOpEdgeBuilder.h"
+#include "SkPathOpsCommon.h"
+#include "SkPathWriter.h"
+#include "SkTSort.h"
+
+SkScalar ScaleFactor(const SkPath& path) {
+ static const SkScalar twoTo10 = 1024.f;
+ SkScalar largest = 0;
+ const SkScalar* oneBounds = &path.getBounds().fLeft;
+ for (int index = 0; index < 4; ++index) {
+ largest = SkTMax(largest, SkScalarAbs(oneBounds[index]));
+ }
+ SkScalar scale = twoTo10;
+ SkScalar next;
+ while ((next = scale * twoTo10) < largest) {
+ scale = next;
+ }
+ return scale == twoTo10 ? SK_Scalar1 : scale;
+}
+
+void ScalePath(const SkPath& path, SkScalar scale, SkPath* scaled) {
+ SkMatrix matrix;
+ matrix.setScale(scale, scale);
+ *scaled = path;
+ scaled->transform(matrix);
+}
+
+const SkOpAngle* AngleWinding(SkOpSpanBase* start, SkOpSpanBase* end, int* windingPtr,
+ bool* sortablePtr) {
+ // find first angle, initialize winding to computed fWindSum
+ SkOpSegment* segment = start->segment();
+ const SkOpAngle* angle = segment->spanToAngle(start, end);
+ if (!angle) {
+ *windingPtr = SK_MinS32;
+ return nullptr;
+ }
+ bool computeWinding = false;
+ const SkOpAngle* firstAngle = angle;
+ bool loop = false;
+ bool unorderable = false;
+ int winding = SK_MinS32;
+ do {
+ angle = angle->next();
+ if (!angle) {
+ return nullptr;
+ }
+ unorderable |= angle->unorderable();
+ if ((computeWinding = unorderable || (angle == firstAngle && loop))) {
+ break; // if we get here, there's no winding, loop is unorderable
+ }
+ loop |= angle == firstAngle;
+ segment = angle->segment();
+ winding = segment->windSum(angle);
+ } while (winding == SK_MinS32);
+ // if the angle loop contains an unorderable span, the angle order may be useless
+ // directly compute the winding in this case for each span
+ if (computeWinding) {
+ firstAngle = angle;
+ winding = SK_MinS32;
+ do {
+ SkOpSpanBase* startSpan = angle->start();
+ SkOpSpanBase* endSpan = angle->end();
+ SkOpSpan* lesser = startSpan->starter(endSpan);
+ int testWinding = lesser->windSum();
+ if (testWinding == SK_MinS32) {
+ testWinding = lesser->computeWindSum();
+ }
+ if (testWinding != SK_MinS32) {
+ segment = angle->segment();
+ winding = testWinding;
+ }
+ angle = angle->next();
+ } while (angle != firstAngle);
+ }
+ *sortablePtr = !unorderable;
+ *windingPtr = winding;
+ return angle;
+}
+
+SkOpSegment* FindUndone(SkOpContourHead* contourList, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr) {
+ SkOpSegment* result;
+ SkOpContour* contour = contourList;
+ do {
+ result = contour->undoneSegment(startPtr, endPtr);
+ if (result) {
+ return result;
+ }
+ } while ((contour = contour->next()));
+ return nullptr;
+}
+
+SkOpSegment* FindChase(SkTDArray<SkOpSpanBase*>* chase, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr) {
+ while (chase->count()) {
+ SkOpSpanBase* span;
+ chase->pop(&span);
+ SkOpSegment* segment = span->segment();
+ *startPtr = span->ptT()->next()->span();
+ bool done = true;
+ *endPtr = nullptr;
+ if (SkOpAngle* last = segment->activeAngle(*startPtr, startPtr, endPtr, &done)) {
+ *startPtr = last->start();
+ *endPtr = last->end();
+ #if TRY_ROTATE
+ *chase->insert(0) = span;
+ #else
+ *chase->append() = span;
+ #endif
+ return last->segment();
+ }
+ if (done) {
+ continue;
+ }
+ // find first angle, initialize winding to computed wind sum
+ int winding;
+ bool sortable;
+ const SkOpAngle* angle = AngleWinding(*startPtr, *endPtr, &winding, &sortable);
+ if (!angle) {
+ return nullptr;
+ }
+ if (winding == SK_MinS32) {
+ continue;
+ }
+ int sumWinding SK_INIT_TO_AVOID_WARNING;
+ if (sortable) {
+ segment = angle->segment();
+ sumWinding = segment->updateWindingReverse(angle);
+ }
+ SkOpSegment* first = nullptr;
+ const SkOpAngle* firstAngle = angle;
+ while ((angle = angle->next()) != firstAngle) {
+ segment = angle->segment();
+ SkOpSpanBase* start = angle->start();
+ SkOpSpanBase* end = angle->end();
+ int maxWinding SK_INIT_TO_AVOID_WARNING;
+ if (sortable) {
+ segment->setUpWinding(start, end, &maxWinding, &sumWinding);
+ }
+ if (!segment->done(angle)) {
+ if (!first && (sortable || start->starter(end)->windSum() != SK_MinS32)) {
+ first = segment;
+ *startPtr = start;
+ *endPtr = end;
+ }
+ // OPTIMIZATION: should this also add to the chase?
+ if (sortable) {
+ (void) segment->markAngle(maxWinding, sumWinding, angle);
+ }
+ }
+ }
+ if (first) {
+ #if TRY_ROTATE
+ *chase->insert(0) = span;
+ #else
+ *chase->append() = span;
+ #endif
+ return first;
+ }
+ }
+ return nullptr;
+}
+
+bool SortContourList(SkOpContourHead** contourList, bool evenOdd, bool oppEvenOdd) {
+ SkTDArray<SkOpContour* > list;
+ SkOpContour* contour = *contourList;
+ do {
+ if (contour->count()) {
+ contour->setOppXor(contour->operand() ? evenOdd : oppEvenOdd);
+ *list.append() = contour;
+ }
+ } while ((contour = contour->next()));
+ int count = list.count();
+ if (!count) {
+ return false;
+ }
+ if (count > 1) {
+ SkTQSort<SkOpContour>(list.begin(), list.end() - 1);
+ }
+ contour = list[0];
+ SkOpContourHead* contourHead = static_cast<SkOpContourHead*>(contour);
+ contour->globalState()->setContourHead(contourHead);
+ *contourList = contourHead;
+ for (int index = 1; index < count; ++index) {
+ SkOpContour* next = list[index];
+ contour->setNext(next);
+ contour = next;
+ }
+ contour->setNext(nullptr);
+ return true;
+}
+
+static void calc_angles(SkOpContourHead* contourList DEBUG_COIN_DECLARE_PARAMS()) {
+ DEBUG_STATIC_SET_PHASE(contourList);
+ SkOpContour* contour = contourList;
+ do {
+ contour->calcAngles();
+ } while ((contour = contour->next()));
+}
+
+static bool missing_coincidence(SkOpContourHead* contourList DEBUG_COIN_DECLARE_PARAMS()) {
+ DEBUG_STATIC_SET_PHASE(contourList);
+ SkOpContour* contour = contourList;
+ bool result = false;
+ do {
+ result |= contour->missingCoincidence();
+ } while ((contour = contour->next()));
+ return result;
+}
+
+static bool move_multiples(SkOpContourHead* contourList DEBUG_COIN_DECLARE_PARAMS()) {
+ DEBUG_STATIC_SET_PHASE(contourList);
+ SkOpContour* contour = contourList;
+ do {
+ if (!contour->moveMultiples()) {
+ return false;
+ }
+ } while ((contour = contour->next()));
+ return true;
+}
+
+static void move_nearby(SkOpContourHead* contourList DEBUG_COIN_DECLARE_PARAMS()) {
+ DEBUG_STATIC_SET_PHASE(contourList);
+ SkOpContour* contour = contourList;
+ do {
+ contour->moveNearby();
+ } while ((contour = contour->next()));
+}
+
+static void sort_angles(SkOpContourHead* contourList) {
+ SkOpContour* contour = contourList;
+ do {
+ contour->sortAngles();
+ } while ((contour = contour->next()));
+}
+
+bool HandleCoincidence(SkOpContourHead* contourList, SkOpCoincidence* coincidence) {
+ SkOpGlobalState* globalState = contourList->globalState();
+ // match up points within the coincident runs
+ if (!coincidence->addExpanded(DEBUG_PHASE_ONLY_PARAMS(kIntersecting))) {
+ return false;
+ }
+ // combine t values when multiple intersections occur on some segments but not others
+ if (!move_multiples(contourList DEBUG_PHASE_PARAMS(kWalking))) {
+ return false;
+ }
+ // move t values and points together to eliminate small/tiny gaps
+ move_nearby(contourList DEBUG_COIN_PARAMS());
+ // add coincidence formed by pairing on curve points and endpoints
+ coincidence->correctEnds(DEBUG_PHASE_ONLY_PARAMS(kIntersecting));
+ if (!coincidence->addEndMovedSpans(DEBUG_COIN_ONLY_PARAMS())) {
+ return false;
+ }
+ const int SAFETY_COUNT = 3;
+ int safetyHatch = SAFETY_COUNT;
+ // look for coincidence present in A-B and A-C but missing in B-C
+ do {
+ bool added;
+ if (!coincidence->addMissing(&added DEBUG_ITER_PARAMS(SAFETY_COUNT - safetyHatch))) {
+ return false;
+ }
+ if (!added) {
+ break;
+ }
+ if (!--safetyHatch) {
+ SkASSERT(globalState->debugSkipAssert());
+ return false;
+ }
+ move_nearby(contourList DEBUG_ITER_PARAMS(SAFETY_COUNT - safetyHatch - 1));
+ } while (true);
+ // check to see if, loosely, coincident ranges may be expanded
+ if (coincidence->expand(DEBUG_COIN_ONLY_PARAMS())) {
+ bool added;
+ if (!coincidence->addMissing(&added DEBUG_COIN_PARAMS())) {
+ return false;
+ }
+ if (!coincidence->addExpanded(DEBUG_COIN_ONLY_PARAMS())) {
+ return false;
+ }
+ if (!move_multiples(contourList DEBUG_COIN_PARAMS())) {
+ return false;
+ }
+ move_nearby(contourList DEBUG_COIN_PARAMS());
+ }
+ // the expanded ranges may not align -- add the missing spans
+ if (!coincidence->addExpanded(DEBUG_PHASE_ONLY_PARAMS(kWalking))) {
+ return false;
+ }
+ // mark spans of coincident segments as coincident
+ coincidence->mark(DEBUG_COIN_ONLY_PARAMS());
+ // look for coincidence lines and curves undetected by intersection
+ if (missing_coincidence(contourList DEBUG_COIN_PARAMS())) {
+ (void) coincidence->expand(DEBUG_PHASE_ONLY_PARAMS(kIntersecting));
+ if (!coincidence->addExpanded(DEBUG_COIN_ONLY_PARAMS())) {
+ return false;
+ }
+ coincidence->mark(DEBUG_PHASE_ONLY_PARAMS(kWalking));
+ } else {
+ (void) coincidence->expand(DEBUG_COIN_ONLY_PARAMS());
+ }
+ (void) coincidence->expand(DEBUG_COIN_ONLY_PARAMS());
+
+ SkOpCoincidence overlaps(globalState);
+ safetyHatch = SAFETY_COUNT;
+ do {
+ SkOpCoincidence* pairs = overlaps.isEmpty() ? coincidence : &overlaps;
+ // adjust the winding value to account for coincident edges
+ pairs->apply(DEBUG_ITER_ONLY_PARAMS(SAFETY_COUNT - safetyHatch));
+ // For each coincident pair that overlaps another, when the receivers (the 1st of the pair)
+ // are different, construct a new pair to resolve their mutual span
+ pairs->findOverlaps(&overlaps DEBUG_ITER_PARAMS(SAFETY_COUNT - safetyHatch));
+ if (!--safetyHatch) {
+ SkASSERT(globalState->debugSkipAssert());
+ return false;
+ }
+ } while (!overlaps.isEmpty());
+ calc_angles(contourList DEBUG_COIN_PARAMS());
+ sort_angles(contourList);
+#if DEBUG_COINCIDENCE_VERBOSE
+ coincidence->debugShowCoincidence();
+#endif
+#if DEBUG_COINCIDENCE
+ coincidence->debugValidate();
+#endif
+ SkPathOpsDebug::ShowActiveSpans(contourList);
+ return true;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsCommon.h b/gfx/skia/skia/src/pathops/SkPathOpsCommon.h
new file mode 100644
index 000000000..beffb8522
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsCommon.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsCommon_DEFINED
+#define SkPathOpsCommon_DEFINED
+
+#include "SkOpAngle.h"
+#include "SkTDArray.h"
+
+class SkOpCoincidence;
+class SkOpContour;
+class SkPathWriter;
+
+const SkOpAngle* AngleWinding(SkOpSpanBase* start, SkOpSpanBase* end, int* windingPtr,
+ bool* sortable);
+SkOpSegment* FindChase(SkTDArray<SkOpSpanBase*>* chase, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr);
+SkOpSpan* FindSortableTop(SkOpContourHead* );
+SkOpSegment* FindUndone(SkOpContourHead* , SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr);
+bool FixWinding(SkPath* path);
+bool SortContourList(SkOpContourHead** , bool evenOdd, bool oppEvenOdd);
+bool HandleCoincidence(SkOpContourHead* , SkOpCoincidence* );
+bool OpDebug(const SkPath& one, const SkPath& two, SkPathOp op, SkPath* result
+ SkDEBUGPARAMS(bool skipAssert)
+ SkDEBUGPARAMS(const char* testName));
+SkScalar ScaleFactor(const SkPath& path);
+void ScalePath(const SkPath& path, SkScalar scale, SkPath* scaled);
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsConic.cpp b/gfx/skia/skia/src/pathops/SkPathOpsConic.cpp
new file mode 100644
index 000000000..dd523211d
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsConic.cpp
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkIntersections.h"
+#include "SkLineParameters.h"
+#include "SkPathOpsConic.h"
+#include "SkPathOpsCubic.h"
+#include "SkPathOpsQuad.h"
+
+// cribbed from the float version in SkGeometry.cpp
+static void conic_deriv_coeff(const double src[],
+ SkScalar w,
+ double coeff[3]) {
+ const double P20 = src[4] - src[0];
+ const double P10 = src[2] - src[0];
+ const double wP10 = w * P10;
+ coeff[0] = w * P20 - P20;
+ coeff[1] = P20 - 2 * wP10;
+ coeff[2] = wP10;
+}
+
+static double conic_eval_tan(const double coord[], SkScalar w, double t) {
+ double coeff[3];
+ conic_deriv_coeff(coord, w, coeff);
+ return t * (t * coeff[0] + coeff[1]) + coeff[2];
+}
+
+int SkDConic::FindExtrema(const double src[], SkScalar w, double t[1]) {
+ double coeff[3];
+ conic_deriv_coeff(src, w, coeff);
+
+ double tValues[2];
+ int roots = SkDQuad::RootsValidT(coeff[0], coeff[1], coeff[2], tValues);
+ // In extreme cases, the number of roots returned can be 2. Pathops
+ // will fail later on, so there's no advantage to plumbing in an error
+ // return here.
+ // SkASSERT(0 == roots || 1 == roots);
+
+ if (1 == roots) {
+ t[0] = tValues[0];
+ return 1;
+ }
+ return 0;
+}
+
+SkDVector SkDConic::dxdyAtT(double t) const {
+ SkDVector result = {
+ conic_eval_tan(&fPts[0].fX, fWeight, t),
+ conic_eval_tan(&fPts[0].fY, fWeight, t)
+ };
+ if (result.fX == 0 && result.fY == 0) {
+ if (zero_or_one(t)) {
+ result = fPts[2] - fPts[0];
+ } else {
+ // incomplete
+ SkDebugf("!k");
+ }
+ }
+ return result;
+}
+
+static double conic_eval_numerator(const double src[], SkScalar w, double t) {
+ SkASSERT(src);
+ SkASSERT(t >= 0 && t <= 1);
+ double src2w = src[2] * w;
+ double C = src[0];
+ double A = src[4] - 2 * src2w + C;
+ double B = 2 * (src2w - C);
+ return (A * t + B) * t + C;
+}
+
+
+static double conic_eval_denominator(SkScalar w, double t) {
+ double B = 2 * (w - 1);
+ double C = 1;
+ double A = -B;
+ return (A * t + B) * t + C;
+}
+
+bool SkDConic::hullIntersects(const SkDCubic& cubic, bool* isLinear) const {
+ return cubic.hullIntersects(*this, isLinear);
+}
+
+SkDPoint SkDConic::ptAtT(double t) const {
+ if (t == 0) {
+ return fPts[0];
+ }
+ if (t == 1) {
+ return fPts[2];
+ }
+ double denominator = conic_eval_denominator(fWeight, t);
+ SkDPoint result = {
+ conic_eval_numerator(&fPts[0].fX, fWeight, t) / denominator,
+ conic_eval_numerator(&fPts[0].fY, fWeight, t) / denominator
+ };
+ return result;
+}
+
+/* see quad subdivide for point rationale */
+/* w rationale : the mid point between t1 and t2 could be determined from the computed a/b/c
+ values if the computed w was known. Since we know the mid point at (t1+t2)/2, we'll assume
+ that it is the same as the point on the new curve t==(0+1)/2.
+
+ d / dz == conic_poly(dst, unknownW, .5) / conic_weight(unknownW, .5);
+
+ conic_poly(dst, unknownW, .5)
+ = a / 4 + (b * unknownW) / 2 + c / 4
+ = (a + c) / 4 + (bx * unknownW) / 2
+
+ conic_weight(unknownW, .5)
+ = unknownW / 2 + 1 / 2
+
+ d / dz == ((a + c) / 2 + b * unknownW) / (unknownW + 1)
+ d / dz * (unknownW + 1) == (a + c) / 2 + b * unknownW
+ unknownW = ((a + c) / 2 - d / dz) / (d / dz - b)
+
+ Thus, w is the ratio of the distance from the mid of end points to the on-curve point, and the
+ distance of the on-curve point to the control point.
+ */
+SkDConic SkDConic::subDivide(double t1, double t2) const {
+ double ax, ay, az;
+ if (t1 == 0) {
+ ax = fPts[0].fX;
+ ay = fPts[0].fY;
+ az = 1;
+ } else if (t1 != 1) {
+ ax = conic_eval_numerator(&fPts[0].fX, fWeight, t1);
+ ay = conic_eval_numerator(&fPts[0].fY, fWeight, t1);
+ az = conic_eval_denominator(fWeight, t1);
+ } else {
+ ax = fPts[2].fX;
+ ay = fPts[2].fY;
+ az = 1;
+ }
+ double midT = (t1 + t2) / 2;
+ double dx = conic_eval_numerator(&fPts[0].fX, fWeight, midT);
+ double dy = conic_eval_numerator(&fPts[0].fY, fWeight, midT);
+ double dz = conic_eval_denominator(fWeight, midT);
+ double cx, cy, cz;
+ if (t2 == 1) {
+ cx = fPts[2].fX;
+ cy = fPts[2].fY;
+ cz = 1;
+ } else if (t2 != 0) {
+ cx = conic_eval_numerator(&fPts[0].fX, fWeight, t2);
+ cy = conic_eval_numerator(&fPts[0].fY, fWeight, t2);
+ cz = conic_eval_denominator(fWeight, t2);
+ } else {
+ cx = fPts[0].fX;
+ cy = fPts[0].fY;
+ cz = 1;
+ }
+ double bx = 2 * dx - (ax + cx) / 2;
+ double by = 2 * dy - (ay + cy) / 2;
+ double bz = 2 * dz - (az + cz) / 2;
+ SkDConic dst = {{{{ax / az, ay / az}, {bx / bz, by / bz}, {cx / cz, cy / cz}}},
+ SkDoubleToScalar(bz / sqrt(az * cz)) };
+ return dst;
+}
+
+SkDPoint SkDConic::subDivide(const SkDPoint& a, const SkDPoint& c, double t1, double t2,
+ SkScalar* weight) const {
+ SkDConic chopped = this->subDivide(t1, t2);
+ *weight = chopped.fWeight;
+ return chopped[1];
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsConic.h b/gfx/skia/skia/src/pathops/SkPathOpsConic.h
new file mode 100644
index 000000000..4cbe147b4
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsConic.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathOpsConic_DEFINED
+#define SkPathOpsConic_DEFINED
+
+#include "SkPathOpsPoint.h"
+#include "SkPathOpsQuad.h"
+
+struct SkDConic {
+ static const int kPointCount = 3;
+ static const int kPointLast = kPointCount - 1;
+ static const int kMaxIntersections = 4;
+
+ SkDQuad fPts;
+ SkScalar fWeight;
+
+ bool collapsed() const {
+ return fPts.collapsed();
+ }
+
+ bool controlsInside() const {
+ return fPts.controlsInside();
+ }
+
+ void debugInit() {
+ fPts.debugInit();
+ }
+
+ SkDConic flip() const {
+ SkDConic result = {{{fPts[2], fPts[1], fPts[0]}}, fWeight};
+ return result;
+ }
+
+ static bool IsConic() { return true; }
+
+ const SkDConic& set(const SkPoint pts[kPointCount], SkScalar weight) {
+ fPts.set(pts);
+ fWeight = weight;
+ return *this;
+ }
+
+ const SkDPoint& operator[](int n) const { return fPts[n]; }
+ SkDPoint& operator[](int n) { return fPts[n]; }
+
+ static int AddValidTs(double s[], int realRoots, double* t) {
+ return SkDQuad::AddValidTs(s, realRoots, t);
+ }
+
+ void align(int endIndex, SkDPoint* dstPt) const {
+ fPts.align(endIndex, dstPt);
+ }
+
+ SkDVector dxdyAtT(double t) const;
+ static int FindExtrema(const double src[], SkScalar weight, double tValue[1]);
+
+ bool hullIntersects(const SkDQuad& quad, bool* isLinear) const {
+ return fPts.hullIntersects(quad, isLinear);
+ }
+
+ bool hullIntersects(const SkDConic& conic, bool* isLinear) const {
+ return fPts.hullIntersects(conic.fPts, isLinear);
+ }
+
+ bool hullIntersects(const SkDCubic& cubic, bool* isLinear) const;
+
+ bool isLinear(int startIndex, int endIndex) const {
+ return fPts.isLinear(startIndex, endIndex);
+ }
+
+ bool monotonicInX() const {
+ return fPts.monotonicInX();
+ }
+
+ bool monotonicInY() const {
+ return fPts.monotonicInY();
+ }
+
+ void otherPts(int oddMan, const SkDPoint* endPt[2]) const {
+ fPts.otherPts(oddMan, endPt);
+ }
+
+ SkDPoint ptAtT(double t) const;
+
+ static int RootsReal(double A, double B, double C, double t[2]) {
+ return SkDQuad::RootsReal(A, B, C, t);
+ }
+
+ static int RootsValidT(const double A, const double B, const double C, double s[2]) {
+ return SkDQuad::RootsValidT(A, B, C, s);
+ }
+
+ SkDConic subDivide(double t1, double t2) const;
+
+ static SkDConic SubDivide(const SkPoint a[kPointCount], SkScalar weight, double t1, double t2) {
+ SkDConic conic;
+ conic.set(a, weight);
+ return conic.subDivide(t1, t2);
+ }
+
+ SkDPoint subDivide(const SkDPoint& a, const SkDPoint& c, double t1, double t2,
+ SkScalar* weight) const;
+
+ static SkDPoint SubDivide(const SkPoint pts[kPointCount], SkScalar weight,
+ const SkDPoint& a, const SkDPoint& c,
+ double t1, double t2, SkScalar* newWeight) {
+ SkDConic conic;
+ conic.set(pts, weight);
+ return conic.subDivide(a, c, t1, t2, newWeight);
+ }
+
+ // utilities callable by the user from the debugger when the implementation code is linked in
+ void dump() const;
+ void dumpID(int id) const;
+ void dumpInner() const;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsCubic.cpp b/gfx/skia/skia/src/pathops/SkPathOpsCubic.cpp
new file mode 100644
index 000000000..bdae492de
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsCubic.cpp
@@ -0,0 +1,706 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkGeometry.h"
+#include "SkLineParameters.h"
+#include "SkPathOpsConic.h"
+#include "SkPathOpsCubic.h"
+#include "SkPathOpsCurve.h"
+#include "SkPathOpsLine.h"
+#include "SkPathOpsQuad.h"
+#include "SkPathOpsRect.h"
+#include "SkTSort.h"
+
+const int SkDCubic::gPrecisionUnit = 256; // FIXME: test different values in test framework
+
+void SkDCubic::align(int endIndex, int ctrlIndex, SkDPoint* dstPt) const {
+ if (fPts[endIndex].fX == fPts[ctrlIndex].fX) {
+ dstPt->fX = fPts[endIndex].fX;
+ }
+ if (fPts[endIndex].fY == fPts[ctrlIndex].fY) {
+ dstPt->fY = fPts[endIndex].fY;
+ }
+}
+
+// give up when changing t no longer moves point
+// also, copy point rather than recompute it when it does change
+double SkDCubic::binarySearch(double min, double max, double axisIntercept,
+ SearchAxis xAxis) const {
+ double t = (min + max) / 2;
+ double step = (t - min) / 2;
+ SkDPoint cubicAtT = ptAtT(t);
+ double calcPos = (&cubicAtT.fX)[xAxis];
+ double calcDist = calcPos - axisIntercept;
+ do {
+ double priorT = t - step;
+ SkASSERT(priorT >= min);
+ SkDPoint lessPt = ptAtT(priorT);
+ if (approximately_equal_half(lessPt.fX, cubicAtT.fX)
+ && approximately_equal_half(lessPt.fY, cubicAtT.fY)) {
+ return -1; // binary search found no point at this axis intercept
+ }
+ double lessDist = (&lessPt.fX)[xAxis] - axisIntercept;
+#if DEBUG_CUBIC_BINARY_SEARCH
+ SkDebugf("t=%1.9g calc=%1.9g dist=%1.9g step=%1.9g less=%1.9g\n", t, calcPos, calcDist,
+ step, lessDist);
+#endif
+ double lastStep = step;
+ step /= 2;
+ if (calcDist > 0 ? calcDist > lessDist : calcDist < lessDist) {
+ t = priorT;
+ } else {
+ double nextT = t + lastStep;
+ if (nextT > max) {
+ return -1;
+ }
+ SkDPoint morePt = ptAtT(nextT);
+ if (approximately_equal_half(morePt.fX, cubicAtT.fX)
+ && approximately_equal_half(morePt.fY, cubicAtT.fY)) {
+ return -1; // binary search found no point at this axis intercept
+ }
+ double moreDist = (&morePt.fX)[xAxis] - axisIntercept;
+ if (calcDist > 0 ? calcDist <= moreDist : calcDist >= moreDist) {
+ continue;
+ }
+ t = nextT;
+ }
+ SkDPoint testAtT = ptAtT(t);
+ cubicAtT = testAtT;
+ calcPos = (&cubicAtT.fX)[xAxis];
+ calcDist = calcPos - axisIntercept;
+ } while (!approximately_equal(calcPos, axisIntercept));
+ return t;
+}
+
+// FIXME: cache keep the bounds and/or precision with the caller?
+double SkDCubic::calcPrecision() const {
+ SkDRect dRect;
+ dRect.setBounds(*this); // OPTIMIZATION: just use setRawBounds ?
+ double width = dRect.fRight - dRect.fLeft;
+ double height = dRect.fBottom - dRect.fTop;
+ return (width > height ? width : height) / gPrecisionUnit;
+}
+
+
+/* classic one t subdivision */
+static void interp_cubic_coords(const double* src, double* dst, double t) {
+ double ab = SkDInterp(src[0], src[2], t);
+ double bc = SkDInterp(src[2], src[4], t);
+ double cd = SkDInterp(src[4], src[6], t);
+ double abc = SkDInterp(ab, bc, t);
+ double bcd = SkDInterp(bc, cd, t);
+ double abcd = SkDInterp(abc, bcd, t);
+
+ dst[0] = src[0];
+ dst[2] = ab;
+ dst[4] = abc;
+ dst[6] = abcd;
+ dst[8] = bcd;
+ dst[10] = cd;
+ dst[12] = src[6];
+}
+
+SkDCubicPair SkDCubic::chopAt(double t) const {
+ SkDCubicPair dst;
+ if (t == 0.5) {
+ dst.pts[0] = fPts[0];
+ dst.pts[1].fX = (fPts[0].fX + fPts[1].fX) / 2;
+ dst.pts[1].fY = (fPts[0].fY + fPts[1].fY) / 2;
+ dst.pts[2].fX = (fPts[0].fX + 2 * fPts[1].fX + fPts[2].fX) / 4;
+ dst.pts[2].fY = (fPts[0].fY + 2 * fPts[1].fY + fPts[2].fY) / 4;
+ dst.pts[3].fX = (fPts[0].fX + 3 * (fPts[1].fX + fPts[2].fX) + fPts[3].fX) / 8;
+ dst.pts[3].fY = (fPts[0].fY + 3 * (fPts[1].fY + fPts[2].fY) + fPts[3].fY) / 8;
+ dst.pts[4].fX = (fPts[1].fX + 2 * fPts[2].fX + fPts[3].fX) / 4;
+ dst.pts[4].fY = (fPts[1].fY + 2 * fPts[2].fY + fPts[3].fY) / 4;
+ dst.pts[5].fX = (fPts[2].fX + fPts[3].fX) / 2;
+ dst.pts[5].fY = (fPts[2].fY + fPts[3].fY) / 2;
+ dst.pts[6] = fPts[3];
+ return dst;
+ }
+ interp_cubic_coords(&fPts[0].fX, &dst.pts[0].fX, t);
+ interp_cubic_coords(&fPts[0].fY, &dst.pts[0].fY, t);
+ return dst;
+}
+
+void SkDCubic::Coefficients(const double* src, double* A, double* B, double* C, double* D) {
+ *A = src[6]; // d
+ *B = src[4] * 3; // 3*c
+ *C = src[2] * 3; // 3*b
+ *D = src[0]; // a
+ *A -= *D - *C + *B; // A = -a + 3*b - 3*c + d
+ *B += 3 * *D - 2 * *C; // B = 3*a - 6*b + 3*c
+ *C -= 3 * *D; // C = -3*a + 3*b
+}
+
+bool SkDCubic::endsAreExtremaInXOrY() const {
+ return (between(fPts[0].fX, fPts[1].fX, fPts[3].fX)
+ && between(fPts[0].fX, fPts[2].fX, fPts[3].fX))
+ || (between(fPts[0].fY, fPts[1].fY, fPts[3].fY)
+ && between(fPts[0].fY, fPts[2].fY, fPts[3].fY));
+}
+
+// Do a quick reject by rotating all points relative to a line formed by
+// a pair of one cubic's points. If the 2nd cubic's points
+// are on the line or on the opposite side from the 1st cubic's 'odd man', the
+// curves at most intersect at the endpoints.
+/* if returning true, check contains true if cubic's hull collapsed, making the cubic linear
+ if returning false, check contains true if the the cubic pair have only the end point in common
+*/
+bool SkDCubic::hullIntersects(const SkDPoint* pts, int ptCount, bool* isLinear) const {
+ bool linear = true;
+ char hullOrder[4];
+ int hullCount = convexHull(hullOrder);
+ int end1 = hullOrder[0];
+ int hullIndex = 0;
+ const SkDPoint* endPt[2];
+ endPt[0] = &fPts[end1];
+ do {
+ hullIndex = (hullIndex + 1) % hullCount;
+ int end2 = hullOrder[hullIndex];
+ endPt[1] = &fPts[end2];
+ double origX = endPt[0]->fX;
+ double origY = endPt[0]->fY;
+ double adj = endPt[1]->fX - origX;
+ double opp = endPt[1]->fY - origY;
+ int oddManMask = other_two(end1, end2);
+ int oddMan = end1 ^ oddManMask;
+ double sign = (fPts[oddMan].fY - origY) * adj - (fPts[oddMan].fX - origX) * opp;
+ int oddMan2 = end2 ^ oddManMask;
+ double sign2 = (fPts[oddMan2].fY - origY) * adj - (fPts[oddMan2].fX - origX) * opp;
+ if (sign * sign2 < 0) {
+ continue;
+ }
+ if (approximately_zero(sign)) {
+ sign = sign2;
+ if (approximately_zero(sign)) {
+ continue;
+ }
+ }
+ linear = false;
+ bool foundOutlier = false;
+ for (int n = 0; n < ptCount; ++n) {
+ double test = (pts[n].fY - origY) * adj - (pts[n].fX - origX) * opp;
+ if (test * sign > 0 && !precisely_zero(test)) {
+ foundOutlier = true;
+ break;
+ }
+ }
+ if (!foundOutlier) {
+ return false;
+ }
+ endPt[0] = endPt[1];
+ end1 = end2;
+ } while (hullIndex);
+ *isLinear = linear;
+ return true;
+}
+
+bool SkDCubic::hullIntersects(const SkDCubic& c2, bool* isLinear) const {
+ return hullIntersects(c2.fPts, c2.kPointCount, isLinear);
+}
+
+bool SkDCubic::hullIntersects(const SkDQuad& quad, bool* isLinear) const {
+ return hullIntersects(quad.fPts, quad.kPointCount, isLinear);
+}
+
+bool SkDCubic::hullIntersects(const SkDConic& conic, bool* isLinear) const {
+
+ return hullIntersects(conic.fPts, isLinear);
+}
+
+bool SkDCubic::isLinear(int startIndex, int endIndex) const {
+ if (fPts[0].approximatelyDEqual(fPts[3])) {
+ return ((const SkDQuad *) this)->isLinear(0, 2);
+ }
+ SkLineParameters lineParameters;
+ lineParameters.cubicEndPoints(*this, startIndex, endIndex);
+ // FIXME: maybe it's possible to avoid this and compare non-normalized
+ lineParameters.normalize();
+ double tiniest = SkTMin(SkTMin(SkTMin(SkTMin(SkTMin(SkTMin(SkTMin(fPts[0].fX, fPts[0].fY),
+ fPts[1].fX), fPts[1].fY), fPts[2].fX), fPts[2].fY), fPts[3].fX), fPts[3].fY);
+ double largest = SkTMax(SkTMax(SkTMax(SkTMax(SkTMax(SkTMax(SkTMax(fPts[0].fX, fPts[0].fY),
+ fPts[1].fX), fPts[1].fY), fPts[2].fX), fPts[2].fY), fPts[3].fX), fPts[3].fY);
+ largest = SkTMax(largest, -tiniest);
+ double distance = lineParameters.controlPtDistance(*this, 1);
+ if (!approximately_zero_when_compared_to(distance, largest)) {
+ return false;
+ }
+ distance = lineParameters.controlPtDistance(*this, 2);
+ return approximately_zero_when_compared_to(distance, largest);
+}
+
+bool SkDCubic::ComplexBreak(const SkPoint pointsPtr[4], SkScalar* t) {
+ SkScalar d[3];
+ SkCubicType cubicType = SkClassifyCubic(pointsPtr, d);
+ if (cubicType == kLoop_SkCubicType) {
+ // crib code from gpu path utils that finds t values where loop self-intersects
+ // use it to find mid of t values which should be a friendly place to chop
+ SkScalar tempSqrt = SkScalarSqrt(4.f * d[0] * d[2] - 3.f * d[1] * d[1]);
+ SkScalar ls = d[1] - tempSqrt;
+ SkScalar lt = 2.f * d[0];
+ SkScalar ms = d[1] + tempSqrt;
+ SkScalar mt = 2.f * d[0];
+ if (roughly_between(0, ls, lt) && roughly_between(0, ms, mt)) {
+ ls = ls / lt;
+ ms = ms / mt;
+ SkASSERT(roughly_between(0, ls, 1) && roughly_between(0, ms, 1));
+ *t = (ls + ms) / 2;
+ SkASSERT(roughly_between(0, *t, 1));
+ return *t > 0 && *t < 1;
+ }
+ } else if (kSerpentine_SkCubicType == cubicType || kCusp_SkCubicType == cubicType) {
+ SkDCubic cubic;
+ cubic.set(pointsPtr);
+ double inflectionTs[2];
+ int infTCount = cubic.findInflections(inflectionTs);
+ if (infTCount == 2) {
+ double maxCurvature[3];
+ int roots = cubic.findMaxCurvature(maxCurvature);
+#if DEBUG_CUBIC_SPLIT
+ SkDebugf("%s\n", __FUNCTION__);
+ cubic.dump();
+ for (int index = 0; index < infTCount; ++index) {
+ SkDebugf("inflectionsTs[%d]=%1.9g ", index, inflectionTs[index]);
+ SkDPoint pt = cubic.ptAtT(inflectionTs[index]);
+ SkDVector dPt = cubic.dxdyAtT(inflectionTs[index]);
+ SkDLine perp = {{pt - dPt, pt + dPt}};
+ perp.dump();
+ }
+ for (int index = 0; index < roots; ++index) {
+ SkDebugf("maxCurvature[%d]=%1.9g ", index, maxCurvature[index]);
+ SkDPoint pt = cubic.ptAtT(maxCurvature[index]);
+ SkDVector dPt = cubic.dxdyAtT(maxCurvature[index]);
+ SkDLine perp = {{pt - dPt, pt + dPt}};
+ perp.dump();
+ }
+#endif
+ for (int index = 0; index < roots; ++index) {
+ if (between(inflectionTs[0], maxCurvature[index], inflectionTs[1])) {
+ *t = maxCurvature[index];
+ return *t > 0 && *t < 1;
+ }
+ }
+ } else if (infTCount == 1) {
+ *t = inflectionTs[0];
+ return *t > 0 && *t < 1;
+ }
+ }
+ return false;
+}
+
+bool SkDCubic::monotonicInX() const {
+ return precisely_between(fPts[0].fX, fPts[1].fX, fPts[3].fX)
+ && precisely_between(fPts[0].fX, fPts[2].fX, fPts[3].fX);
+}
+
+bool SkDCubic::monotonicInY() const {
+ return precisely_between(fPts[0].fY, fPts[1].fY, fPts[3].fY)
+ && precisely_between(fPts[0].fY, fPts[2].fY, fPts[3].fY);
+}
+
+void SkDCubic::otherPts(int index, const SkDPoint* o1Pts[kPointCount - 1]) const {
+ int offset = (int) !SkToBool(index);
+ o1Pts[0] = &fPts[offset];
+ o1Pts[1] = &fPts[++offset];
+ o1Pts[2] = &fPts[++offset];
+}
+
+int SkDCubic::searchRoots(double extremeTs[6], int extrema, double axisIntercept,
+ SearchAxis xAxis, double* validRoots) const {
+ extrema += findInflections(&extremeTs[extrema]);
+ extremeTs[extrema++] = 0;
+ extremeTs[extrema] = 1;
+ SkASSERT(extrema < 6);
+ SkTQSort(extremeTs, extremeTs + extrema);
+ int validCount = 0;
+ for (int index = 0; index < extrema; ) {
+ double min = extremeTs[index];
+ double max = extremeTs[++index];
+ if (min == max) {
+ continue;
+ }
+ double newT = binarySearch(min, max, axisIntercept, xAxis);
+ if (newT >= 0) {
+ if (validCount >= 3) {
+ return 0;
+ }
+ validRoots[validCount++] = newT;
+ }
+ }
+ return validCount;
+}
+
+// cubic roots
+
+static const double PI = 3.141592653589793;
+
+// from SkGeometry.cpp (and Numeric Solutions, 5.6)
+int SkDCubic::RootsValidT(double A, double B, double C, double D, double t[3]) {
+ double s[3];
+ int realRoots = RootsReal(A, B, C, D, s);
+ int foundRoots = SkDQuad::AddValidTs(s, realRoots, t);
+ for (int index = 0; index < realRoots; ++index) {
+ double tValue = s[index];
+ if (!approximately_one_or_less(tValue) && between(1, tValue, 1.00005)) {
+ for (int idx2 = 0; idx2 < foundRoots; ++idx2) {
+ if (approximately_equal(t[idx2], 1)) {
+ goto nextRoot;
+ }
+ }
+ SkASSERT(foundRoots < 3);
+ t[foundRoots++] = 1;
+ } else if (!approximately_zero_or_more(tValue) && between(-0.00005, tValue, 0)) {
+ for (int idx2 = 0; idx2 < foundRoots; ++idx2) {
+ if (approximately_equal(t[idx2], 0)) {
+ goto nextRoot;
+ }
+ }
+ SkASSERT(foundRoots < 3);
+ t[foundRoots++] = 0;
+ }
+nextRoot:
+ ;
+ }
+ return foundRoots;
+}
+
+int SkDCubic::RootsReal(double A, double B, double C, double D, double s[3]) {
+#ifdef SK_DEBUG
+ // create a string mathematica understands
+ // GDB set print repe 15 # if repeated digits is a bother
+ // set print elements 400 # if line doesn't fit
+ char str[1024];
+ sk_bzero(str, sizeof(str));
+ SK_SNPRINTF(str, sizeof(str), "Solve[%1.19g x^3 + %1.19g x^2 + %1.19g x + %1.19g == 0, x]",
+ A, B, C, D);
+ SkPathOpsDebug::MathematicaIze(str, sizeof(str));
+#if ONE_OFF_DEBUG && ONE_OFF_DEBUG_MATHEMATICA
+ SkDebugf("%s\n", str);
+#endif
+#endif
+ if (approximately_zero(A)
+ && approximately_zero_when_compared_to(A, B)
+ && approximately_zero_when_compared_to(A, C)
+ && approximately_zero_when_compared_to(A, D)) { // we're just a quadratic
+ return SkDQuad::RootsReal(B, C, D, s);
+ }
+ if (approximately_zero_when_compared_to(D, A)
+ && approximately_zero_when_compared_to(D, B)
+ && approximately_zero_when_compared_to(D, C)) { // 0 is one root
+ int num = SkDQuad::RootsReal(A, B, C, s);
+ for (int i = 0; i < num; ++i) {
+ if (approximately_zero(s[i])) {
+ return num;
+ }
+ }
+ s[num++] = 0;
+ return num;
+ }
+ if (approximately_zero(A + B + C + D)) { // 1 is one root
+ int num = SkDQuad::RootsReal(A, A + B, -D, s);
+ for (int i = 0; i < num; ++i) {
+ if (AlmostDequalUlps(s[i], 1)) {
+ return num;
+ }
+ }
+ s[num++] = 1;
+ return num;
+ }
+ double a, b, c;
+ {
+ double invA = 1 / A;
+ a = B * invA;
+ b = C * invA;
+ c = D * invA;
+ }
+ double a2 = a * a;
+ double Q = (a2 - b * 3) / 9;
+ double R = (2 * a2 * a - 9 * a * b + 27 * c) / 54;
+ double R2 = R * R;
+ double Q3 = Q * Q * Q;
+ double R2MinusQ3 = R2 - Q3;
+ double adiv3 = a / 3;
+ double r;
+ double* roots = s;
+ if (R2MinusQ3 < 0) { // we have 3 real roots
+ // the divide/root can, due to finite precisions, be slightly outside of -1...1
+ double theta = acos(SkTPin(R / sqrt(Q3), -1., 1.));
+ double neg2RootQ = -2 * sqrt(Q);
+
+ r = neg2RootQ * cos(theta / 3) - adiv3;
+ *roots++ = r;
+
+ r = neg2RootQ * cos((theta + 2 * PI) / 3) - adiv3;
+ if (!AlmostDequalUlps(s[0], r)) {
+ *roots++ = r;
+ }
+ r = neg2RootQ * cos((theta - 2 * PI) / 3) - adiv3;
+ if (!AlmostDequalUlps(s[0], r) && (roots - s == 1 || !AlmostDequalUlps(s[1], r))) {
+ *roots++ = r;
+ }
+ } else { // we have 1 real root
+ double sqrtR2MinusQ3 = sqrt(R2MinusQ3);
+ double A = fabs(R) + sqrtR2MinusQ3;
+ A = SkDCubeRoot(A);
+ if (R > 0) {
+ A = -A;
+ }
+ if (A != 0) {
+ A += Q / A;
+ }
+ r = A - adiv3;
+ *roots++ = r;
+ if (AlmostDequalUlps((double) R2, (double) Q3)) {
+ r = -A / 2 - adiv3;
+ if (!AlmostDequalUlps(s[0], r)) {
+ *roots++ = r;
+ }
+ }
+ }
+ return static_cast<int>(roots - s);
+}
+
+// from http://www.cs.sunysb.edu/~qin/courses/geometry/4.pdf
+// c(t) = a(1-t)^3 + 3bt(1-t)^2 + 3c(1-t)t^2 + dt^3
+// c'(t) = -3a(1-t)^2 + 3b((1-t)^2 - 2t(1-t)) + 3c(2t(1-t) - t^2) + 3dt^2
+// = 3(b-a)(1-t)^2 + 6(c-b)t(1-t) + 3(d-c)t^2
+static double derivative_at_t(const double* src, double t) {
+ double one_t = 1 - t;
+ double a = src[0];
+ double b = src[2];
+ double c = src[4];
+ double d = src[6];
+ return 3 * ((b - a) * one_t * one_t + 2 * (c - b) * t * one_t + (d - c) * t * t);
+}
+
+// OPTIMIZE? compute t^2, t(1-t), and (1-t)^2 and pass them to another version of derivative at t?
+SkDVector SkDCubic::dxdyAtT(double t) const {
+ SkDVector result = { derivative_at_t(&fPts[0].fX, t), derivative_at_t(&fPts[0].fY, t) };
+ if (result.fX == 0 && result.fY == 0) {
+ if (t == 0) {
+ result = fPts[2] - fPts[0];
+ } else if (t == 1) {
+ result = fPts[3] - fPts[1];
+ } else {
+ // incomplete
+ SkDebugf("!c");
+ }
+ if (result.fX == 0 && result.fY == 0 && zero_or_one(t)) {
+ result = fPts[3] - fPts[0];
+ }
+ }
+ return result;
+}
+
+// OPTIMIZE? share code with formulate_F1DotF2
+int SkDCubic::findInflections(double tValues[]) const {
+ double Ax = fPts[1].fX - fPts[0].fX;
+ double Ay = fPts[1].fY - fPts[0].fY;
+ double Bx = fPts[2].fX - 2 * fPts[1].fX + fPts[0].fX;
+ double By = fPts[2].fY - 2 * fPts[1].fY + fPts[0].fY;
+ double Cx = fPts[3].fX + 3 * (fPts[1].fX - fPts[2].fX) - fPts[0].fX;
+ double Cy = fPts[3].fY + 3 * (fPts[1].fY - fPts[2].fY) - fPts[0].fY;
+ return SkDQuad::RootsValidT(Bx * Cy - By * Cx, Ax * Cy - Ay * Cx, Ax * By - Ay * Bx, tValues);
+}
+
+static void formulate_F1DotF2(const double src[], double coeff[4]) {
+ double a = src[2] - src[0];
+ double b = src[4] - 2 * src[2] + src[0];
+ double c = src[6] + 3 * (src[2] - src[4]) - src[0];
+ coeff[0] = c * c;
+ coeff[1] = 3 * b * c;
+ coeff[2] = 2 * b * b + c * a;
+ coeff[3] = a * b;
+}
+
+/** SkDCubic'(t) = At^2 + Bt + C, where
+ A = 3(-a + 3(b - c) + d)
+ B = 6(a - 2b + c)
+ C = 3(b - a)
+ Solve for t, keeping only those that fit between 0 < t < 1
+*/
+int SkDCubic::FindExtrema(const double src[], double tValues[2]) {
+ // we divide A,B,C by 3 to simplify
+ double a = src[0];
+ double b = src[2];
+ double c = src[4];
+ double d = src[6];
+ double A = d - a + 3 * (b - c);
+ double B = 2 * (a - b - b + c);
+ double C = b - a;
+
+ return SkDQuad::RootsValidT(A, B, C, tValues);
+}
+
+/* from SkGeometry.cpp
+ Looking for F' dot F'' == 0
+
+ A = b - a
+ B = c - 2b + a
+ C = d - 3c + 3b - a
+
+ F' = 3Ct^2 + 6Bt + 3A
+ F'' = 6Ct + 6B
+
+ F' dot F'' -> CCt^3 + 3BCt^2 + (2BB + CA)t + AB
+*/
+int SkDCubic::findMaxCurvature(double tValues[]) const {
+ double coeffX[4], coeffY[4];
+ int i;
+ formulate_F1DotF2(&fPts[0].fX, coeffX);
+ formulate_F1DotF2(&fPts[0].fY, coeffY);
+ for (i = 0; i < 4; i++) {
+ coeffX[i] = coeffX[i] + coeffY[i];
+ }
+ return RootsValidT(coeffX[0], coeffX[1], coeffX[2], coeffX[3], tValues);
+}
+
+SkDPoint SkDCubic::ptAtT(double t) const {
+ if (0 == t) {
+ return fPts[0];
+ }
+ if (1 == t) {
+ return fPts[3];
+ }
+ double one_t = 1 - t;
+ double one_t2 = one_t * one_t;
+ double a = one_t2 * one_t;
+ double b = 3 * one_t2 * t;
+ double t2 = t * t;
+ double c = 3 * one_t * t2;
+ double d = t2 * t;
+ SkDPoint result = {a * fPts[0].fX + b * fPts[1].fX + c * fPts[2].fX + d * fPts[3].fX,
+ a * fPts[0].fY + b * fPts[1].fY + c * fPts[2].fY + d * fPts[3].fY};
+ return result;
+}
+
+/*
+ Given a cubic c, t1, and t2, find a small cubic segment.
+
+ The new cubic is defined as points A, B, C, and D, where
+ s1 = 1 - t1
+ s2 = 1 - t2
+ A = c[0]*s1*s1*s1 + 3*c[1]*s1*s1*t1 + 3*c[2]*s1*t1*t1 + c[3]*t1*t1*t1
+ D = c[0]*s2*s2*s2 + 3*c[1]*s2*s2*t2 + 3*c[2]*s2*t2*t2 + c[3]*t2*t2*t2
+
+ We don't have B or C. So We define two equations to isolate them.
+ First, compute two reference T values 1/3 and 2/3 from t1 to t2:
+
+ c(at (2*t1 + t2)/3) == E
+ c(at (t1 + 2*t2)/3) == F
+
+ Next, compute where those values must be if we know the values of B and C:
+
+ _12 = A*2/3 + B*1/3
+ 12_ = A*1/3 + B*2/3
+ _23 = B*2/3 + C*1/3
+ 23_ = B*1/3 + C*2/3
+ _34 = C*2/3 + D*1/3
+ 34_ = C*1/3 + D*2/3
+ _123 = (A*2/3 + B*1/3)*2/3 + (B*2/3 + C*1/3)*1/3 = A*4/9 + B*4/9 + C*1/9
+ 123_ = (A*1/3 + B*2/3)*1/3 + (B*1/3 + C*2/3)*2/3 = A*1/9 + B*4/9 + C*4/9
+ _234 = (B*2/3 + C*1/3)*2/3 + (C*2/3 + D*1/3)*1/3 = B*4/9 + C*4/9 + D*1/9
+ 234_ = (B*1/3 + C*2/3)*1/3 + (C*1/3 + D*2/3)*2/3 = B*1/9 + C*4/9 + D*4/9
+ _1234 = (A*4/9 + B*4/9 + C*1/9)*2/3 + (B*4/9 + C*4/9 + D*1/9)*1/3
+ = A*8/27 + B*12/27 + C*6/27 + D*1/27
+ = E
+ 1234_ = (A*1/9 + B*4/9 + C*4/9)*1/3 + (B*1/9 + C*4/9 + D*4/9)*2/3
+ = A*1/27 + B*6/27 + C*12/27 + D*8/27
+ = F
+ E*27 = A*8 + B*12 + C*6 + D
+ F*27 = A + B*6 + C*12 + D*8
+
+Group the known values on one side:
+
+ M = E*27 - A*8 - D = B*12 + C* 6
+ N = F*27 - A - D*8 = B* 6 + C*12
+ M*2 - N = B*18
+ N*2 - M = C*18
+ B = (M*2 - N)/18
+ C = (N*2 - M)/18
+ */
+
+static double interp_cubic_coords(const double* src, double t) {
+ double ab = SkDInterp(src[0], src[2], t);
+ double bc = SkDInterp(src[2], src[4], t);
+ double cd = SkDInterp(src[4], src[6], t);
+ double abc = SkDInterp(ab, bc, t);
+ double bcd = SkDInterp(bc, cd, t);
+ double abcd = SkDInterp(abc, bcd, t);
+ return abcd;
+}
+
+SkDCubic SkDCubic::subDivide(double t1, double t2) const {
+ if (t1 == 0 || t2 == 1) {
+ if (t1 == 0 && t2 == 1) {
+ return *this;
+ }
+ SkDCubicPair pair = chopAt(t1 == 0 ? t2 : t1);
+ SkDCubic dst = t1 == 0 ? pair.first() : pair.second();
+ return dst;
+ }
+ SkDCubic dst;
+ double ax = dst[0].fX = interp_cubic_coords(&fPts[0].fX, t1);
+ double ay = dst[0].fY = interp_cubic_coords(&fPts[0].fY, t1);
+ double ex = interp_cubic_coords(&fPts[0].fX, (t1*2+t2)/3);
+ double ey = interp_cubic_coords(&fPts[0].fY, (t1*2+t2)/3);
+ double fx = interp_cubic_coords(&fPts[0].fX, (t1+t2*2)/3);
+ double fy = interp_cubic_coords(&fPts[0].fY, (t1+t2*2)/3);
+ double dx = dst[3].fX = interp_cubic_coords(&fPts[0].fX, t2);
+ double dy = dst[3].fY = interp_cubic_coords(&fPts[0].fY, t2);
+ double mx = ex * 27 - ax * 8 - dx;
+ double my = ey * 27 - ay * 8 - dy;
+ double nx = fx * 27 - ax - dx * 8;
+ double ny = fy * 27 - ay - dy * 8;
+ /* bx = */ dst[1].fX = (mx * 2 - nx) / 18;
+ /* by = */ dst[1].fY = (my * 2 - ny) / 18;
+ /* cx = */ dst[2].fX = (nx * 2 - mx) / 18;
+ /* cy = */ dst[2].fY = (ny * 2 - my) / 18;
+ // FIXME: call align() ?
+ return dst;
+}
+
+void SkDCubic::subDivide(const SkDPoint& a, const SkDPoint& d,
+ double t1, double t2, SkDPoint dst[2]) const {
+ SkASSERT(t1 != t2);
+ // this approach assumes that the control points computed directly are accurate enough
+ SkDCubic sub = subDivide(t1, t2);
+ dst[0] = sub[1] + (a - sub[0]);
+ dst[1] = sub[2] + (d - sub[3]);
+ if (t1 == 0 || t2 == 0) {
+ align(0, 1, t1 == 0 ? &dst[0] : &dst[1]);
+ }
+ if (t1 == 1 || t2 == 1) {
+ align(3, 2, t1 == 1 ? &dst[0] : &dst[1]);
+ }
+ if (AlmostBequalUlps(dst[0].fX, a.fX)) {
+ dst[0].fX = a.fX;
+ }
+ if (AlmostBequalUlps(dst[0].fY, a.fY)) {
+ dst[0].fY = a.fY;
+ }
+ if (AlmostBequalUlps(dst[1].fX, d.fX)) {
+ dst[1].fX = d.fX;
+ }
+ if (AlmostBequalUlps(dst[1].fY, d.fY)) {
+ dst[1].fY = d.fY;
+ }
+}
+
+double SkDCubic::top(const SkDCubic& dCurve, double startT, double endT, SkDPoint*topPt) const {
+ double extremeTs[2];
+ double topT = -1;
+ int roots = SkDCubic::FindExtrema(&fPts[0].fY, extremeTs);
+ for (int index = 0; index < roots; ++index) {
+ double t = startT + (endT - startT) * extremeTs[index];
+ SkDPoint mid = dCurve.ptAtT(t);
+ if (topPt->fY > mid.fY || (topPt->fY == mid.fY && topPt->fX > mid.fX)) {
+ topT = t;
+ *topPt = mid;
+ }
+ }
+ return topT;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsCubic.h b/gfx/skia/skia/src/pathops/SkPathOpsCubic.h
new file mode 100644
index 000000000..16bca7953
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsCubic.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathOpsCubic_DEFINED
+#define SkPathOpsCubic_DEFINED
+
+#include "SkPath.h"
+#include "SkPathOpsPoint.h"
+
+struct SkDCubicPair {
+ const SkDCubic& first() const { return (const SkDCubic&) pts[0]; }
+ const SkDCubic& second() const { return (const SkDCubic&) pts[3]; }
+ SkDPoint pts[7];
+};
+
+struct SkDCubic {
+ static const int kPointCount = 4;
+ static const int kPointLast = kPointCount - 1;
+ static const int kMaxIntersections = 9;
+
+ enum SearchAxis {
+ kXAxis,
+ kYAxis
+ };
+
+ bool collapsed() const {
+ return fPts[0].approximatelyEqual(fPts[1]) && fPts[0].approximatelyEqual(fPts[2])
+ && fPts[0].approximatelyEqual(fPts[3]);
+ }
+
+ bool controlsInside() const {
+ SkDVector v01 = fPts[0] - fPts[1];
+ SkDVector v02 = fPts[0] - fPts[2];
+ SkDVector v03 = fPts[0] - fPts[3];
+ SkDVector v13 = fPts[1] - fPts[3];
+ SkDVector v23 = fPts[2] - fPts[3];
+ return v03.dot(v01) > 0 && v03.dot(v02) > 0 && v03.dot(v13) > 0 && v03.dot(v23) > 0;
+ }
+
+ static bool IsConic() { return false; }
+
+ const SkDPoint& operator[](int n) const { SkASSERT(n >= 0 && n < kPointCount); return fPts[n]; }
+ SkDPoint& operator[](int n) { SkASSERT(n >= 0 && n < kPointCount); return fPts[n]; }
+
+ void align(int endIndex, int ctrlIndex, SkDPoint* dstPt) const;
+ double binarySearch(double min, double max, double axisIntercept, SearchAxis xAxis) const;
+ double calcPrecision() const;
+ SkDCubicPair chopAt(double t) const;
+ static void Coefficients(const double* cubic, double* A, double* B, double* C, double* D);
+ static bool ComplexBreak(const SkPoint pts[4], SkScalar* t);
+ int convexHull(char order[kPointCount]) const;
+
+ void debugInit() {
+ sk_bzero(fPts, sizeof(fPts));
+ }
+
+ void dump() const; // callable from the debugger when the implementation code is linked in
+ void dumpID(int id) const;
+ void dumpInner() const;
+ SkDVector dxdyAtT(double t) const;
+ bool endsAreExtremaInXOrY() const;
+ static int FindExtrema(const double src[], double tValue[2]);
+ int findInflections(double tValues[2]) const;
+
+ static int FindInflections(const SkPoint a[kPointCount], double tValues[2]) {
+ SkDCubic cubic;
+ return cubic.set(a).findInflections(tValues);
+ }
+
+ int findMaxCurvature(double tValues[]) const;
+ bool hullIntersects(const SkDCubic& c2, bool* isLinear) const;
+ bool hullIntersects(const SkDConic& c, bool* isLinear) const;
+ bool hullIntersects(const SkDQuad& c2, bool* isLinear) const;
+ bool hullIntersects(const SkDPoint* pts, int ptCount, bool* isLinear) const;
+ bool isLinear(int startIndex, int endIndex) const;
+ bool monotonicInX() const;
+ bool monotonicInY() const;
+ void otherPts(int index, const SkDPoint* o1Pts[kPointCount - 1]) const;
+ SkDPoint ptAtT(double t) const;
+ static int RootsReal(double A, double B, double C, double D, double t[3]);
+ static int RootsValidT(const double A, const double B, const double C, double D, double s[3]);
+
+ int searchRoots(double extremes[6], int extrema, double axisIntercept,
+ SearchAxis xAxis, double* validRoots) const;
+
+ /**
+ * Return the number of valid roots (0 < root < 1) for this cubic intersecting the
+ * specified horizontal line.
+ */
+ int horizontalIntersect(double yIntercept, double roots[3]) const;
+ /**
+ * Return the number of valid roots (0 < root < 1) for this cubic intersecting the
+ * specified vertical line.
+ */
+ int verticalIntersect(double xIntercept, double roots[3]) const;
+
+ const SkDCubic& set(const SkPoint pts[kPointCount]) {
+ fPts[0] = pts[0];
+ fPts[1] = pts[1];
+ fPts[2] = pts[2];
+ fPts[3] = pts[3];
+ return *this;
+ }
+
+ SkDCubic subDivide(double t1, double t2) const;
+
+ static SkDCubic SubDivide(const SkPoint a[kPointCount], double t1, double t2) {
+ SkDCubic cubic;
+ return cubic.set(a).subDivide(t1, t2);
+ }
+
+ void subDivide(const SkDPoint& a, const SkDPoint& d, double t1, double t2, SkDPoint p[2]) const;
+
+ static void SubDivide(const SkPoint pts[kPointCount], const SkDPoint& a, const SkDPoint& d, double t1,
+ double t2, SkDPoint p[2]) {
+ SkDCubic cubic;
+ cubic.set(pts).subDivide(a, d, t1, t2, p);
+ }
+
+ double top(const SkDCubic& dCurve, double startT, double endT, SkDPoint*topPt) const;
+ SkDQuad toQuad() const;
+
+ static const int gPrecisionUnit;
+
+ SkDPoint fPts[kPointCount];
+};
+
+/* Given the set [0, 1, 2, 3], and two of the four members, compute an XOR mask
+ that computes the other two. Note that:
+
+ one ^ two == 3 for (0, 3), (1, 2)
+ one ^ two < 3 for (0, 1), (0, 2), (1, 3), (2, 3)
+ 3 - (one ^ two) is either 0, 1, or 2
+ 1 >> (3 - (one ^ two)) is either 0 or 1
+thus:
+ returned == 2 for (0, 3), (1, 2)
+ returned == 3 for (0, 1), (0, 2), (1, 3), (2, 3)
+given that:
+ (0, 3) ^ 2 -> (2, 1) (1, 2) ^ 2 -> (3, 0)
+ (0, 1) ^ 3 -> (3, 2) (0, 2) ^ 3 -> (3, 1) (1, 3) ^ 3 -> (2, 0) (2, 3) ^ 3 -> (1, 0)
+*/
+inline int other_two(int one, int two) {
+ return 1 >> (3 - (one ^ two)) ^ 3;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsCurve.cpp b/gfx/skia/skia/src/pathops/SkPathOpsCurve.cpp
new file mode 100644
index 000000000..503c140aa
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsCurve.cpp
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkPathOpsBounds.h"
+#include "SkPathOpsRect.h"
+#include "SkPathOpsCurve.h"
+
+ // this cheats and assumes that the perpendicular to the point is the closest ray to the curve
+ // this case (where the line and the curve are nearly coincident) may be the only case that counts
+double SkDCurve::nearPoint(SkPath::Verb verb, const SkDPoint& xy, const SkDPoint& opp) const {
+ int count = SkPathOpsVerbToPoints(verb);
+ double minX = fCubic.fPts[0].fX;
+ double maxX = minX;
+ for (int index = 1; index <= count; ++index) {
+ minX = SkTMin(minX, fCubic.fPts[index].fX);
+ maxX = SkTMax(maxX, fCubic.fPts[index].fX);
+ }
+ if (!AlmostBetweenUlps(minX, xy.fX, maxX)) {
+ return -1;
+ }
+ double minY = fCubic.fPts[0].fY;
+ double maxY = minY;
+ for (int index = 1; index <= count; ++index) {
+ minY = SkTMin(minY, fCubic.fPts[index].fY);
+ maxY = SkTMax(maxY, fCubic.fPts[index].fY);
+ }
+ if (!AlmostBetweenUlps(minY, xy.fY, maxY)) {
+ return -1;
+ }
+ SkIntersections i;
+ SkDLine perp = {{ xy, { xy.fX + opp.fY - xy.fY, xy.fY + xy.fX - opp.fX }}};
+ (*CurveDIntersectRay[verb])(*this, perp, &i);
+ int minIndex = -1;
+ double minDist = FLT_MAX;
+ for (int index = 0; index < i.used(); ++index) {
+ double dist = xy.distance(i.pt(index));
+ if (minDist > dist) {
+ minDist = dist;
+ minIndex = index;
+ }
+ }
+ if (minIndex < 0) {
+ return -1;
+ }
+ double largest = SkTMax(SkTMax(maxX, maxY), -SkTMin(minX, minY));
+ if (!AlmostEqualUlps_Pin(largest, largest + minDist)) { // is distance within ULPS tolerance?
+ return -1;
+ }
+ return SkPinT(i[0][minIndex]);
+}
+
+void SkDCurve::offset(SkPath::Verb verb, const SkDVector& off) {
+ int count = SkPathOpsVerbToPoints(verb);
+ for (int index = 0; index <= count; ++index) {
+ fCubic.fPts[index] += off;
+ }
+}
+
+void SkDCurve::setConicBounds(const SkPoint curve[3], SkScalar curveWeight,
+ double tStart, double tEnd, SkPathOpsBounds* bounds) {
+ SkDConic dCurve;
+ dCurve.set(curve, curveWeight);
+ SkDRect dRect;
+ dRect.setBounds(dCurve, fConic, tStart, tEnd);
+ bounds->set(SkDoubleToScalar(dRect.fLeft), SkDoubleToScalar(dRect.fTop),
+ SkDoubleToScalar(dRect.fRight), SkDoubleToScalar(dRect.fBottom));
+}
+
+void SkDCurve::setCubicBounds(const SkPoint curve[4], SkScalar ,
+ double tStart, double tEnd, SkPathOpsBounds* bounds) {
+ SkDCubic dCurve;
+ dCurve.set(curve);
+ SkDRect dRect;
+ dRect.setBounds(dCurve, fCubic, tStart, tEnd);
+ bounds->set(SkDoubleToScalar(dRect.fLeft), SkDoubleToScalar(dRect.fTop),
+ SkDoubleToScalar(dRect.fRight), SkDoubleToScalar(dRect.fBottom));
+}
+
+void SkDCurve::setQuadBounds(const SkPoint curve[3], SkScalar ,
+ double tStart, double tEnd, SkPathOpsBounds* bounds) {
+ SkDQuad dCurve;
+ dCurve.set(curve);
+ SkDRect dRect;
+ dRect.setBounds(dCurve, fQuad, tStart, tEnd);
+ bounds->set(SkDoubleToScalar(dRect.fLeft), SkDoubleToScalar(dRect.fTop),
+ SkDoubleToScalar(dRect.fRight), SkDoubleToScalar(dRect.fBottom));
+}
+
+void SkDCurveSweep::setCurveHullSweep(SkPath::Verb verb) {
+ fOrdered = true;
+ fSweep[0] = fCurve[1] - fCurve[0];
+ if (SkPath::kLine_Verb == verb) {
+ fSweep[1] = fSweep[0];
+ fIsCurve = false;
+ return;
+ }
+ fSweep[1] = fCurve[2] - fCurve[0];
+ // OPTIMIZE: I do the following float check a lot -- probably need a
+ // central place for this val-is-small-compared-to-curve check
+ double maxVal = 0;
+ for (int index = 0; index <= SkPathOpsVerbToPoints(verb); ++index) {
+ maxVal = SkTMax(maxVal, SkTMax(SkTAbs(fCurve[index].fX),
+ SkTAbs(fCurve[index].fY)));
+ }
+ {
+ if (SkPath::kCubic_Verb != verb) {
+ if (roughly_zero_when_compared_to(fSweep[0].fX, maxVal)
+ && roughly_zero_when_compared_to(fSweep[0].fY, maxVal)) {
+ fSweep[0] = fSweep[1];
+ }
+ goto setIsCurve;
+ }
+ SkDVector thirdSweep = fCurve[3] - fCurve[0];
+ if (fSweep[0].fX == 0 && fSweep[0].fY == 0) {
+ fSweep[0] = fSweep[1];
+ fSweep[1] = thirdSweep;
+ if (roughly_zero_when_compared_to(fSweep[0].fX, maxVal)
+ && roughly_zero_when_compared_to(fSweep[0].fY, maxVal)) {
+ fSweep[0] = fSweep[1];
+ fCurve[1] = fCurve[3];
+ }
+ goto setIsCurve;
+ }
+ double s1x3 = fSweep[0].crossCheck(thirdSweep);
+ double s3x2 = thirdSweep.crossCheck(fSweep[1]);
+ if (s1x3 * s3x2 >= 0) { // if third vector is on or between first two vectors
+ goto setIsCurve;
+ }
+ double s2x1 = fSweep[1].crossCheck(fSweep[0]);
+ // FIXME: If the sweep of the cubic is greater than 180 degrees, we're in trouble
+ // probably such wide sweeps should be artificially subdivided earlier so that never happens
+ SkASSERT(s1x3 * s2x1 < 0 || s1x3 * s3x2 < 0);
+ if (s3x2 * s2x1 < 0) {
+ SkASSERT(s2x1 * s1x3 > 0);
+ fSweep[0] = fSweep[1];
+ fOrdered = false;
+ }
+ fSweep[1] = thirdSweep;
+ }
+setIsCurve:
+ fIsCurve = fSweep[0].crossCheck(fSweep[1]) != 0;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsCurve.h b/gfx/skia/skia/src/pathops/SkPathOpsCurve.h
new file mode 100644
index 000000000..2b50864e5
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsCurve.h
@@ -0,0 +1,415 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsCurve_DEFINE
+#define SkPathOpsCurve_DEFINE
+
+#include "SkIntersections.h"
+
+#ifndef SK_RELEASE
+#include "SkPath.h"
+#endif
+
+struct SkPathOpsBounds;
+
+struct SkOpCurve {
+ SkPoint fPts[4];
+ SkScalar fWeight;
+ SkDEBUGCODE(SkPath::Verb fVerb);
+
+ const SkPoint& operator[](int n) const {
+ SkASSERT(n >= 0 && n <= SkPathOpsVerbToPoints(fVerb));
+ return fPts[n];
+ }
+
+ void dump() const;
+
+ void set(const SkDQuad& quad) {
+ for (int index = 0; index < SkDQuad::kPointCount; ++index) {
+ fPts[index] = quad[index].asSkPoint();
+ }
+ SkDEBUGCODE(fWeight = 1);
+ SkDEBUGCODE(fVerb = SkPath::kQuad_Verb);
+ }
+
+ void set(const SkDCubic& cubic) {
+ for (int index = 0; index < SkDCubic::kPointCount; ++index) {
+ fPts[index] = cubic[index].asSkPoint();
+ }
+ SkDEBUGCODE(fWeight = 1);
+ SkDEBUGCODE(fVerb = SkPath::kCubic_Verb);
+ }
+
+};
+
+struct SkDCurve {
+ union {
+ SkDLine fLine;
+ SkDQuad fQuad;
+ SkDConic fConic;
+ SkDCubic fCubic;
+ };
+ SkDEBUGCODE(SkPath::Verb fVerb);
+
+ const SkDPoint& operator[](int n) const {
+ SkASSERT(n >= 0 && n <= SkPathOpsVerbToPoints(fVerb));
+ return fCubic[n];
+ }
+
+ SkDPoint& operator[](int n) {
+ SkASSERT(n >= 0 && n <= SkPathOpsVerbToPoints(fVerb));
+ return fCubic[n];
+ }
+
+ SkDPoint conicTop(const SkPoint curve[3], SkScalar curveWeight,
+ double s, double e, double* topT);
+ SkDPoint cubicTop(const SkPoint curve[4], SkScalar , double s, double e, double* topT);
+ void dump() const;
+ void dumpID(int ) const;
+ SkDPoint lineTop(const SkPoint[2], SkScalar , double , double , double* topT);
+ double nearPoint(SkPath::Verb verb, const SkDPoint& xy, const SkDPoint& opp) const;
+ void offset(SkPath::Verb verb, const SkDVector& );
+ SkDPoint quadTop(const SkPoint curve[3], SkScalar , double s, double e, double* topT);
+
+ void setConicBounds(const SkPoint curve[3], SkScalar curveWeight,
+ double s, double e, SkPathOpsBounds* );
+ void setCubicBounds(const SkPoint curve[4], SkScalar ,
+ double s, double e, SkPathOpsBounds* );
+ void setQuadBounds(const SkPoint curve[3], SkScalar ,
+ double s, double e, SkPathOpsBounds*);
+};
+
+class SkDCurveSweep {
+public:
+ bool isCurve() const { return fIsCurve; }
+ bool isOrdered() const { return fOrdered; }
+ void setCurveHullSweep(SkPath::Verb verb);
+
+ SkDCurve fCurve;
+ SkDVector fSweep[2];
+private:
+ bool fIsCurve;
+ bool fOrdered; // cleared when a cubic's control point isn't between the sweep vectors
+
+};
+
+extern SkDPoint (SkDCurve::* const Top[])(const SkPoint curve[], SkScalar cWeight,
+ double tStart, double tEnd, double* topT);
+
+static SkDPoint dline_xy_at_t(const SkPoint a[2], SkScalar , double t) {
+ SkDLine line;
+ line.set(a);
+ return line.ptAtT(t);
+}
+
+static SkDPoint dquad_xy_at_t(const SkPoint a[3], SkScalar , double t) {
+ SkDQuad quad;
+ quad.set(a);
+ return quad.ptAtT(t);
+}
+
+static SkDPoint dconic_xy_at_t(const SkPoint a[3], SkScalar weight, double t) {
+ SkDConic conic;
+ conic.set(a, weight);
+ return conic.ptAtT(t);
+}
+
+static SkDPoint dcubic_xy_at_t(const SkPoint a[4], SkScalar , double t) {
+ SkDCubic cubic;
+ cubic.set(a);
+ return cubic.ptAtT(t);
+}
+
+static SkDPoint (* const CurveDPointAtT[])(const SkPoint[], SkScalar , double ) = {
+ nullptr,
+ dline_xy_at_t,
+ dquad_xy_at_t,
+ dconic_xy_at_t,
+ dcubic_xy_at_t
+};
+
+static SkDPoint ddline_xy_at_t(const SkDCurve& c, double t) {
+ return c.fLine.ptAtT(t);
+}
+
+static SkDPoint ddquad_xy_at_t(const SkDCurve& c, double t) {
+ return c.fQuad.ptAtT(t);
+}
+
+static SkDPoint ddconic_xy_at_t(const SkDCurve& c, double t) {
+ return c.fConic.ptAtT(t);
+}
+
+static SkDPoint ddcubic_xy_at_t(const SkDCurve& c, double t) {
+ return c.fCubic.ptAtT(t);
+}
+
+static SkDPoint (* const CurveDDPointAtT[])(const SkDCurve& , double ) = {
+ nullptr,
+ ddline_xy_at_t,
+ ddquad_xy_at_t,
+ ddconic_xy_at_t,
+ ddcubic_xy_at_t
+};
+
+static SkPoint fline_xy_at_t(const SkPoint a[2], SkScalar weight, double t) {
+ return dline_xy_at_t(a, weight, t).asSkPoint();
+}
+
+static SkPoint fquad_xy_at_t(const SkPoint a[3], SkScalar weight, double t) {
+ return dquad_xy_at_t(a, weight, t).asSkPoint();
+}
+
+static SkPoint fconic_xy_at_t(const SkPoint a[3], SkScalar weight, double t) {
+ return dconic_xy_at_t(a, weight, t).asSkPoint();
+}
+
+static SkPoint fcubic_xy_at_t(const SkPoint a[4], SkScalar weight, double t) {
+ return dcubic_xy_at_t(a, weight, t).asSkPoint();
+}
+
+static SkPoint (* const CurvePointAtT[])(const SkPoint[], SkScalar , double ) = {
+ nullptr,
+ fline_xy_at_t,
+ fquad_xy_at_t,
+ fconic_xy_at_t,
+ fcubic_xy_at_t
+};
+
+static SkDVector dline_dxdy_at_t(const SkPoint a[2], SkScalar , double ) {
+ SkDLine line;
+ line.set(a);
+ return line[1] - line[0];
+}
+
+static SkDVector dquad_dxdy_at_t(const SkPoint a[3], SkScalar , double t) {
+ SkDQuad quad;
+ quad.set(a);
+ return quad.dxdyAtT(t);
+}
+
+static SkDVector dconic_dxdy_at_t(const SkPoint a[3], SkScalar weight, double t) {
+ SkDConic conic;
+ conic.set(a, weight);
+ return conic.dxdyAtT(t);
+}
+
+static SkDVector dcubic_dxdy_at_t(const SkPoint a[4], SkScalar , double t) {
+ SkDCubic cubic;
+ cubic.set(a);
+ return cubic.dxdyAtT(t);
+}
+
+static SkDVector (* const CurveDSlopeAtT[])(const SkPoint[], SkScalar , double ) = {
+ nullptr,
+ dline_dxdy_at_t,
+ dquad_dxdy_at_t,
+ dconic_dxdy_at_t,
+ dcubic_dxdy_at_t
+};
+
+static SkDVector ddline_dxdy_at_t(const SkDCurve& c, double ) {
+ return c.fLine.fPts[1] - c.fLine.fPts[0];
+}
+
+static SkDVector ddquad_dxdy_at_t(const SkDCurve& c, double t) {
+ return c.fQuad.dxdyAtT(t);
+}
+
+static SkDVector ddconic_dxdy_at_t(const SkDCurve& c, double t) {
+ return c.fConic.dxdyAtT(t);
+}
+
+static SkDVector ddcubic_dxdy_at_t(const SkDCurve& c, double t) {
+ return c.fCubic.dxdyAtT(t);
+}
+
+static SkDVector (* const CurveDDSlopeAtT[])(const SkDCurve& , double ) = {
+ nullptr,
+ ddline_dxdy_at_t,
+ ddquad_dxdy_at_t,
+ ddconic_dxdy_at_t,
+ ddcubic_dxdy_at_t
+};
+
+static SkVector fline_dxdy_at_t(const SkPoint a[2], SkScalar , double ) {
+ return a[1] - a[0];
+}
+
+static SkVector fquad_dxdy_at_t(const SkPoint a[3], SkScalar weight, double t) {
+ return dquad_dxdy_at_t(a, weight, t).asSkVector();
+}
+
+static SkVector fconic_dxdy_at_t(const SkPoint a[3], SkScalar weight, double t) {
+ return dconic_dxdy_at_t(a, weight, t).asSkVector();
+}
+
+static SkVector fcubic_dxdy_at_t(const SkPoint a[4], SkScalar weight, double t) {
+ return dcubic_dxdy_at_t(a, weight, t).asSkVector();
+}
+
+static SkVector (* const CurveSlopeAtT[])(const SkPoint[], SkScalar , double ) = {
+ nullptr,
+ fline_dxdy_at_t,
+ fquad_dxdy_at_t,
+ fconic_dxdy_at_t,
+ fcubic_dxdy_at_t
+};
+
+static bool line_is_vertical(const SkPoint a[2], SkScalar , double startT, double endT) {
+ SkDLine line;
+ line.set(a);
+ SkDPoint dst[2] = { line.ptAtT(startT), line.ptAtT(endT) };
+ return AlmostEqualUlps(dst[0].fX, dst[1].fX);
+}
+
+static bool quad_is_vertical(const SkPoint a[3], SkScalar , double startT, double endT) {
+ SkDQuad quad;
+ quad.set(a);
+ SkDQuad dst = quad.subDivide(startT, endT);
+ return AlmostEqualUlps(dst[0].fX, dst[1].fX) && AlmostEqualUlps(dst[1].fX, dst[2].fX);
+}
+
+static bool conic_is_vertical(const SkPoint a[3], SkScalar weight, double startT, double endT) {
+ SkDConic conic;
+ conic.set(a, weight);
+ SkDConic dst = conic.subDivide(startT, endT);
+ return AlmostEqualUlps(dst[0].fX, dst[1].fX) && AlmostEqualUlps(dst[1].fX, dst[2].fX);
+}
+
+static bool cubic_is_vertical(const SkPoint a[4], SkScalar , double startT, double endT) {
+ SkDCubic cubic;
+ cubic.set(a);
+ SkDCubic dst = cubic.subDivide(startT, endT);
+ return AlmostEqualUlps(dst[0].fX, dst[1].fX) && AlmostEqualUlps(dst[1].fX, dst[2].fX)
+ && AlmostEqualUlps(dst[2].fX, dst[3].fX);
+}
+
+static bool (* const CurveIsVertical[])(const SkPoint[], SkScalar , double , double) = {
+ nullptr,
+ line_is_vertical,
+ quad_is_vertical,
+ conic_is_vertical,
+ cubic_is_vertical
+};
+
+static void line_intersect_ray(const SkPoint a[2], SkScalar , const SkDLine& ray,
+ SkIntersections* i) {
+ SkDLine line;
+ line.set(a);
+ i->intersectRay(line, ray);
+}
+
+static void quad_intersect_ray(const SkPoint a[3], SkScalar , const SkDLine& ray,
+ SkIntersections* i) {
+ SkDQuad quad;
+ quad.set(a);
+ i->intersectRay(quad, ray);
+}
+
+static void conic_intersect_ray(const SkPoint a[3], SkScalar weight, const SkDLine& ray,
+ SkIntersections* i) {
+ SkDConic conic;
+ conic.set(a, weight);
+ i->intersectRay(conic, ray);
+}
+
+static void cubic_intersect_ray(const SkPoint a[4], SkScalar , const SkDLine& ray,
+ SkIntersections* i) {
+ SkDCubic cubic;
+ cubic.set(a);
+ i->intersectRay(cubic, ray);
+}
+
+static void (* const CurveIntersectRay[])(const SkPoint[] , SkScalar , const SkDLine& ,
+ SkIntersections* ) = {
+ nullptr,
+ line_intersect_ray,
+ quad_intersect_ray,
+ conic_intersect_ray,
+ cubic_intersect_ray
+};
+
+static void dline_intersect_ray(const SkDCurve& c, const SkDLine& ray, SkIntersections* i) {
+ i->intersectRay(c.fLine, ray);
+}
+
+static void dquad_intersect_ray(const SkDCurve& c, const SkDLine& ray, SkIntersections* i) {
+ i->intersectRay(c.fQuad, ray);
+}
+
+static void dconic_intersect_ray(const SkDCurve& c, const SkDLine& ray, SkIntersections* i) {
+ i->intersectRay(c.fConic, ray);
+}
+
+static void dcubic_intersect_ray(const SkDCurve& c, const SkDLine& ray, SkIntersections* i) {
+ i->intersectRay(c.fCubic, ray);
+}
+
+static void (* const CurveDIntersectRay[])(const SkDCurve& , const SkDLine& , SkIntersections* ) = {
+ nullptr,
+ dline_intersect_ray,
+ dquad_intersect_ray,
+ dconic_intersect_ray,
+ dcubic_intersect_ray
+};
+
+static int line_intercept_h(const SkPoint a[2], SkScalar , SkScalar y, double* roots) {
+ SkDLine line;
+ roots[0] = SkIntersections::HorizontalIntercept(line.set(a), y);
+ return between(0, roots[0], 1);
+}
+
+static int line_intercept_v(const SkPoint a[2], SkScalar , SkScalar x, double* roots) {
+ SkDLine line;
+ roots[0] = SkIntersections::VerticalIntercept(line.set(a), x);
+ return between(0, roots[0], 1);
+}
+
+static int quad_intercept_h(const SkPoint a[2], SkScalar , SkScalar y, double* roots) {
+ SkDQuad quad;
+ return SkIntersections::HorizontalIntercept(quad.set(a), y, roots);
+}
+
+static int quad_intercept_v(const SkPoint a[2], SkScalar , SkScalar x, double* roots) {
+ SkDQuad quad;
+ return SkIntersections::VerticalIntercept(quad.set(a), x, roots);
+}
+
+static int conic_intercept_h(const SkPoint a[2], SkScalar w, SkScalar y, double* roots) {
+ SkDConic conic;
+ return SkIntersections::HorizontalIntercept(conic.set(a, w), y, roots);
+}
+
+static int conic_intercept_v(const SkPoint a[2], SkScalar w, SkScalar x, double* roots) {
+ SkDConic conic;
+ return SkIntersections::VerticalIntercept(conic.set(a, w), x, roots);
+}
+
+static int cubic_intercept_h(const SkPoint a[3], SkScalar , SkScalar y, double* roots) {
+ SkDCubic cubic;
+ return cubic.set(a).horizontalIntersect(y, roots);
+}
+
+static int cubic_intercept_v(const SkPoint a[3], SkScalar , SkScalar x, double* roots) {
+ SkDCubic cubic;
+ return cubic.set(a).verticalIntersect(x, roots);
+}
+
+static int (* const CurveIntercept[])(const SkPoint[] , SkScalar , SkScalar , double* ) = {
+ nullptr,
+ nullptr,
+ line_intercept_h,
+ line_intercept_v,
+ quad_intercept_h,
+ quad_intercept_v,
+ conic_intercept_h,
+ conic_intercept_v,
+ cubic_intercept_h,
+ cubic_intercept_v,
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsDebug.cpp b/gfx/skia/skia/src/pathops/SkPathOpsDebug.cpp
new file mode 100644
index 000000000..e744c7565
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsDebug.cpp
@@ -0,0 +1,2913 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMutex.h"
+#include "SkOpCoincidence.h"
+#include "SkOpContour.h"
+#include "SkPath.h"
+#include "SkPathOpsDebug.h"
+#include "SkString.h"
+
+#undef FAIL_IF
+#define FAIL_IF(cond, coin) \
+ do { if (cond) log->record(SkPathOpsDebug::kFail_Glitch, coin); } while (false)
+
+#undef FAIL_WITH_NULL_IF
+#define FAIL_WITH_NULL_IF(cond, span) \
+ do { if (cond) log->record(SkPathOpsDebug::kFail_Glitch, span); } while (false)
+
+#undef RETURN_FALSE_IF
+#define RETURN_FALSE_IF(cond, span) \
+ do { if (cond) log->record(SkPathOpsDebug::kReturnFalse_Glitch, span); \
+ } while (false)
+
+class SkCoincidentSpans;
+
+#if DEBUG_VALIDATE
+extern bool FLAGS_runFail;
+#endif
+
+#if DEBUG_SORT
+int SkPathOpsDebug::gSortCountDefault = SK_MaxS32;
+int SkPathOpsDebug::gSortCount;
+#endif
+
+#if DEBUG_ACTIVE_OP
+const char* SkPathOpsDebug::kPathOpStr[] = {"diff", "sect", "union", "xor"};
+#endif
+
+#if defined SK_DEBUG || !FORCE_RELEASE
+
+const char* SkPathOpsDebug::kLVerbStr[] = {"", "line", "quad", "cubic"};
+
+int SkPathOpsDebug::gContourID = 0;
+int SkPathOpsDebug::gSegmentID = 0;
+
+bool SkPathOpsDebug::ChaseContains(const SkTDArray<SkOpSpanBase* >& chaseArray,
+ const SkOpSpanBase* span) {
+ for (int index = 0; index < chaseArray.count(); ++index) {
+ const SkOpSpanBase* entry = chaseArray[index];
+ if (entry == span) {
+ return true;
+ }
+ }
+ return false;
+}
+#endif
+
+#if DEBUG_COIN
+
+SkPathOpsDebug::CoinDict SkPathOpsDebug::gCoinSumChangedDict;
+SkPathOpsDebug::CoinDict SkPathOpsDebug::gCoinSumVisitedDict;
+
+static const int kGlitchType_Count = SkPathOpsDebug::kUnalignedTail_Glitch + 1;
+
+struct SpanGlitch {
+ const SkOpSpanBase* fBase;
+ const SkOpSpanBase* fSuspect;
+ const SkOpSegment* fSegment;
+ const SkOpSegment* fOppSegment;
+ const SkOpPtT* fCoinSpan;
+ const SkOpPtT* fEndSpan;
+ const SkOpPtT* fOppSpan;
+ const SkOpPtT* fOppEndSpan;
+ double fStartT;
+ double fEndT;
+ double fOppStartT;
+ double fOppEndT;
+ SkPoint fPt;
+ SkPathOpsDebug::GlitchType fType;
+
+ void dumpType() const;
+};
+
+struct SkPathOpsDebug::GlitchLog {
+ void init(const SkOpGlobalState* state) {
+ fGlobalState = state;
+ }
+
+ SpanGlitch* recordCommon(GlitchType type) {
+ SpanGlitch* glitch = fGlitches.push();
+ glitch->fBase = nullptr;
+ glitch->fSuspect = nullptr;
+ glitch->fSegment = nullptr;
+ glitch->fOppSegment = nullptr;
+ glitch->fCoinSpan = nullptr;
+ glitch->fEndSpan = nullptr;
+ glitch->fOppSpan = nullptr;
+ glitch->fOppEndSpan = nullptr;
+ glitch->fStartT = SK_ScalarNaN;
+ glitch->fEndT = SK_ScalarNaN;
+ glitch->fOppStartT = SK_ScalarNaN;
+ glitch->fOppEndT = SK_ScalarNaN;
+ glitch->fPt = { SK_ScalarNaN, SK_ScalarNaN };
+ glitch->fType = type;
+ return glitch;
+ }
+
+ void record(GlitchType type, const SkOpSpanBase* base,
+ const SkOpSpanBase* suspect = NULL) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fBase = base;
+ glitch->fSuspect = suspect;
+ }
+
+ void record(GlitchType type, const SkOpSpanBase* base,
+ const SkOpPtT* ptT) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fBase = base;
+ glitch->fCoinSpan = ptT;
+ }
+
+ void record(GlitchType type, const SkCoincidentSpans* coin,
+ const SkCoincidentSpans* opp = NULL) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fCoinSpan = coin->coinPtTStart();
+ glitch->fEndSpan = coin->coinPtTEnd();
+ if (opp) {
+ glitch->fOppSpan = opp->coinPtTStart();
+ glitch->fOppEndSpan = opp->coinPtTEnd();
+ }
+ }
+
+ void record(GlitchType type, const SkOpSpanBase* base,
+ const SkOpSegment* seg, double t, SkPoint pt) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fBase = base;
+ glitch->fSegment = seg;
+ glitch->fStartT = t;
+ glitch->fPt = pt;
+ }
+
+ void record(GlitchType type, const SkOpSpanBase* base, double t,
+ SkPoint pt) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fBase = base;
+ glitch->fStartT = t;
+ glitch->fPt = pt;
+ }
+
+ void record(GlitchType type, const SkCoincidentSpans* coin,
+ const SkOpPtT* coinSpan, const SkOpPtT* endSpan) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fCoinSpan = coin->coinPtTStart();
+ glitch->fEndSpan = coin->coinPtTEnd();
+ glitch->fEndSpan = endSpan;
+ glitch->fOppSpan = coinSpan;
+ glitch->fOppEndSpan = endSpan;
+ }
+
+ void record(GlitchType type, const SkCoincidentSpans* coin,
+ const SkOpSpanBase* base) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fBase = base;
+ glitch->fCoinSpan = coin->coinPtTStart();
+ glitch->fEndSpan = coin->coinPtTEnd();
+ }
+
+ void record(GlitchType type, const SkOpPtT* ptTS, const SkOpPtT* ptTE,
+ const SkOpPtT* oPtTS, const SkOpPtT* oPtTE) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fCoinSpan = ptTS;
+ glitch->fEndSpan = ptTE;
+ glitch->fOppSpan = oPtTS;
+ glitch->fOppEndSpan = oPtTE;
+ }
+
+ void record(GlitchType type, const SkOpSegment* seg, double startT,
+ double endT, const SkOpSegment* oppSeg, double oppStartT, double oppEndT) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fSegment = seg;
+ glitch->fStartT = startT;
+ glitch->fEndT = endT;
+ glitch->fOppSegment = oppSeg;
+ glitch->fOppStartT = oppStartT;
+ glitch->fOppEndT = oppEndT;
+ }
+
+ void record(GlitchType type, const SkOpSegment* seg,
+ const SkOpSpan* span) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fSegment = seg;
+ glitch->fBase = span;
+ }
+
+ void record(GlitchType type, double t, const SkOpSpanBase* span) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fStartT = t;
+ glitch->fBase = span;
+ }
+
+ void record(GlitchType type, const SkOpSegment* seg) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fSegment = seg;
+ }
+
+ void record(GlitchType type, const SkCoincidentSpans* coin,
+ const SkOpPtT* ptT) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fCoinSpan = coin->coinPtTStart();
+ glitch->fEndSpan = ptT;
+ }
+
+ SkTDArray<SpanGlitch> fGlitches;
+ const SkOpGlobalState* fGlobalState;
+};
+
+
+void SkPathOpsDebug::CoinDict::add(const SkPathOpsDebug::CoinDict& dict) {
+ int count = dict.fDict.count();
+ for (int index = 0; index < count; ++index) {
+ this->add(dict.fDict[index]);
+ }
+}
+
+void SkPathOpsDebug::CoinDict::add(const CoinDictEntry& key) {
+ int count = fDict.count();
+ for (int index = 0; index < count; ++index) {
+ CoinDictEntry* entry = &fDict[index];
+ if (entry->fIteration == key.fIteration && entry->fLineNumber == key.fLineNumber) {
+ SkASSERT(!strcmp(entry->fFunctionName, key.fFunctionName));
+ if (entry->fGlitchType == kUninitialized_Glitch) {
+ entry->fGlitchType = key.fGlitchType;
+ }
+ return;
+ }
+ }
+ *fDict.append() = key;
+}
+
+#endif
+
+#if DEBUG_COIN
+static void missing_coincidence(SkPathOpsDebug::GlitchLog* glitches, const SkOpContourHead* contourList) {
+ const SkOpContour* contour = contourList;
+ // bool result = false;
+ do {
+ /* result |= */ contour->debugMissingCoincidence(glitches);
+ } while ((contour = contour->next()));
+ return;
+}
+
+static void move_multiples(SkPathOpsDebug::GlitchLog* glitches, const SkOpContourHead* contourList) {
+ const SkOpContour* contour = contourList;
+ do {
+ if (contour->debugMoveMultiples(glitches), false) {
+ return;
+ }
+ } while ((contour = contour->next()));
+ return;
+}
+
+static void move_nearby(SkPathOpsDebug::GlitchLog* glitches, const SkOpContourHead* contourList) {
+ const SkOpContour* contour = contourList;
+ do {
+ contour->debugMoveNearby(glitches);
+ } while ((contour = contour->next()));
+}
+
+
+#endif
+
+#if DEBUG_COIN
+void SkOpGlobalState::debugAddToCoinChangedDict() {
+
+#if DEBUG_COINCIDENCE
+ CheckHealth(contourList);
+#endif
+ // see if next coincident operation makes a change; if so, record it
+ SkPathOpsDebug::GlitchLog glitches;
+ const char* funcName = fCoinDictEntry.fFunctionName;
+ if (!strcmp("calc_angles", funcName)) {
+ ;
+ } else if (!strcmp("missing_coincidence", funcName)) {
+ missing_coincidence(&glitches, fContourHead);
+ } else if (!strcmp("move_multiples", funcName)) {
+ move_multiples(&glitches, fContourHead);
+ } else if (!strcmp("move_nearby", funcName)) {
+ move_nearby(&glitches, fContourHead);
+ } else if (!strcmp("addExpanded", funcName)) {
+ fCoincidence->debugAddExpanded(&glitches);
+ } else if (!strcmp("addMissing", funcName)) {
+ bool added;
+ fCoincidence->debugAddMissing(&glitches, &added);
+ } else if (!strcmp("addEndMovedSpans", funcName)) {
+ fCoincidence->debugAddEndMovedSpans(&glitches);
+ } else if (!strcmp("correctEnds", funcName)) {
+ fCoincidence->debugCorrectEnds(&glitches);
+ } else if (!strcmp("expand", funcName)) {
+ fCoincidence->debugExpand(&glitches);
+ } else if (!strcmp("findOverlaps", funcName)) {
+ ;
+ } else if (!strcmp("mark", funcName)) {
+ fCoincidence->debugMark(&glitches);
+ } else if (!strcmp("apply", funcName)) {
+ ;
+ } else {
+ SkASSERT(0); // add missing case
+ }
+ if (glitches.fGlitches.count()) {
+ fCoinDictEntry.fGlitchType = glitches.fGlitches[0].fType;
+ }
+ fCoinChangedDict.add(fCoinDictEntry);
+}
+#endif
+
+void SkPathOpsDebug::ShowActiveSpans(SkOpContourHead* contourList) {
+#if DEBUG_ACTIVE_SPANS
+ SkOpContour* contour = contourList;
+ do {
+ contour->debugShowActiveSpans();
+ } while ((contour = contour->next()));
+#endif
+}
+
+#if DEBUG_COINCIDENCE || DEBUG_COIN
+void SkPathOpsDebug::CheckHealth(SkOpContourHead* contourList) {
+#if DEBUG_COINCIDENCE
+ contourList->globalState()->debugSetCheckHealth(true);
+#endif
+#if DEBUG_COIN
+ GlitchLog glitches;
+ const SkOpContour* contour = contourList;
+ const SkOpCoincidence* coincidence = contour->globalState()->coincidence();
+ coincidence->debugCheckValid(&glitches); // don't call validate; spans may be inconsistent
+ do {
+ contour->debugCheckHealth(&glitches);
+ contour->debugMissingCoincidence(&glitches);
+ } while ((contour = contour->next()));
+ bool added;
+ coincidence->debugAddMissing(&glitches, &added);
+ coincidence->debugExpand(&glitches);
+ coincidence->debugAddExpanded(&glitches);
+ coincidence->debugMark(&glitches);
+ unsigned mask = 0;
+ for (int index = 0; index < glitches.fGlitches.count(); ++index) {
+ const SpanGlitch& glitch = glitches.fGlitches[index];
+ mask |= 1 << glitch.fType;
+ }
+ for (int index = 0; index < kGlitchType_Count; ++index) {
+ SkDebugf(mask & (1 << index) ? "x" : "-");
+ }
+ for (int index = 0; index < glitches.fGlitches.count(); ++index) {
+ const SpanGlitch& glitch = glitches.fGlitches[index];
+ SkDebugf("%02d: ", index);
+ if (glitch.fBase) {
+ SkDebugf(" seg/base=%d/%d", glitch.fBase->segment()->debugID(),
+ glitch.fBase->debugID());
+ }
+ if (glitch.fSuspect) {
+ SkDebugf(" seg/base=%d/%d", glitch.fSuspect->segment()->debugID(),
+ glitch.fSuspect->debugID());
+ }
+ if (glitch.fSegment) {
+ SkDebugf(" segment=%d", glitch.fSegment->debugID());
+ }
+ if (glitch.fCoinSpan) {
+ SkDebugf(" coinSeg/Span/PtT=%d/%d/%d", glitch.fCoinSpan->segment()->debugID(),
+ glitch.fCoinSpan->span()->debugID(), glitch.fCoinSpan->debugID());
+ }
+ if (glitch.fEndSpan) {
+ SkDebugf(" endSpan=%d", glitch.fEndSpan->debugID());
+ }
+ if (glitch.fOppSpan) {
+ SkDebugf(" oppSeg/Span/PtT=%d/%d/%d", glitch.fOppSpan->segment()->debugID(),
+ glitch.fOppSpan->span()->debugID(), glitch.fOppSpan->debugID());
+ }
+ if (glitch.fOppEndSpan) {
+ SkDebugf(" oppEndSpan=%d", glitch.fOppEndSpan->debugID());
+ }
+ if (!SkScalarIsNaN(glitch.fStartT)) {
+ SkDebugf(" startT=%g", glitch.fStartT);
+ }
+ if (!SkScalarIsNaN(glitch.fEndT)) {
+ SkDebugf(" endT=%g", glitch.fEndT);
+ }
+ if (glitch.fOppSegment) {
+ SkDebugf(" segment=%d", glitch.fOppSegment->debugID());
+ }
+ if (!SkScalarIsNaN(glitch.fOppStartT)) {
+ SkDebugf(" oppStartT=%g", glitch.fOppStartT);
+ }
+ if (!SkScalarIsNaN(glitch.fOppEndT)) {
+ SkDebugf(" oppEndT=%g", glitch.fOppEndT);
+ }
+ if (!SkScalarIsNaN(glitch.fPt.fX) || !SkScalarIsNaN(glitch.fPt.fY)) {
+ SkDebugf(" pt=%g,%g", glitch.fPt.fX, glitch.fPt.fY);
+ }
+ DumpGlitchType(glitch.fType);
+ SkDebugf("\n");
+ }
+#if DEBUG_COINCIDENCE
+ contourList->globalState()->debugSetCheckHealth(false);
+#endif
+#if 01 && DEBUG_ACTIVE_SPANS
+// SkDebugf("active after %s:\n", id);
+ ShowActiveSpans(contourList);
+#endif
+#endif
+}
+#endif
+
+#if DEBUG_COIN
+void SkPathOpsDebug::DumpGlitchType(GlitchType glitchType) {
+ switch (glitchType) {
+ case kAddCorruptCoin_Glitch: SkDebugf(" AddCorruptCoin"); break;
+ case kAddExpandedCoin_Glitch: SkDebugf(" AddExpandedCoin"); break;
+ case kAddExpandedFail_Glitch: SkDebugf(" AddExpandedFail"); break;
+ case kAddIfCollapsed_Glitch: SkDebugf(" AddIfCollapsed"); break;; break;
+ case kAddIfMissingCoin_Glitch: SkDebugf(" AddIfMissingCoin"); break;
+ case kAddMissingCoin_Glitch: SkDebugf(" AddMissingCoin"); break;
+ case kAddMissingExtend_Glitch: SkDebugf(" AddMissingExtend"); break;
+ case kAddOrOverlap_Glitch: SkDebugf(" AAddOrOverlap"); break;
+ case kCollapsedCoin_Glitch: SkDebugf(" CollapsedCoin"); break;
+ case kCollapsedDone_Glitch: SkDebugf(" CollapsedDone"); break;
+ case kCollapsedOppValue_Glitch: SkDebugf(" CollapsedOppValue"); break;
+ case kCollapsedSpan_Glitch: SkDebugf(" CollapsedSpan"); break;
+ case kCollapsedWindValue_Glitch: SkDebugf(" CollapsedWindValue"); break;
+ case kCorrectEnd_Glitch: SkDebugf(" CorrectEnd"); break;
+ case kDeletedCoin_Glitch: SkDebugf(" DeletedCoin"); break;
+ case kExpandCoin_Glitch: SkDebugf(" ExpandCoin"); break;
+ case kFail_Glitch: SkDebugf(" Fail"); break;
+ case kMarkCoinEnd_Glitch: SkDebugf(" MarkCoinEnd"); break;
+ case kMarkCoinInsert_Glitch: SkDebugf(" MarkCoinInsert"); break;
+ case kMarkCoinMissing_Glitch: SkDebugf(" MarkCoinMissing"); break;
+ case kMarkCoinStart_Glitch: SkDebugf(" MarkCoinStart"); break;
+ case kMergeMatches_Glitch: SkDebugf(" MergeMatches"); break;
+ case kMissingCoin_Glitch: SkDebugf(" MissingCoin"); break;
+ case kMissingDone_Glitch: SkDebugf(" MissingDone"); break;
+ case kMissingIntersection_Glitch: SkDebugf(" MissingIntersection"); break;
+ case kMoveMultiple_Glitch: SkDebugf(" MoveMultiple"); break;
+ case kMoveNearbyClearAll_Glitch: SkDebugf(" MoveNearbyClearAll"); break;
+ case kMoveNearbyClearAll2_Glitch: SkDebugf(" MoveNearbyClearAll2"); break;
+ case kMoveNearbyMerge_Glitch: SkDebugf(" MoveNearbyMerge"); break;
+ case kMoveNearbyMergeFinal_Glitch: SkDebugf(" MoveNearbyMergeFinal"); break;
+ case kMoveNearbyRelease_Glitch: SkDebugf(" MoveNearbyRelease"); break;
+ case kMoveNearbyReleaseFinal_Glitch: SkDebugf(" MoveNearbyReleaseFinal"); break;
+ case kReleasedSpan_Glitch: SkDebugf(" ReleasedSpan"); break;
+ case kReturnFalse_Glitch: SkDebugf(" ReturnFalse"); break;
+ case kUnaligned_Glitch: SkDebugf(" Unaligned"); break;
+ case kUnalignedHead_Glitch: SkDebugf(" UnalignedHead"); break;
+ case kUnalignedTail_Glitch: SkDebugf(" UnalignedTail"); break;
+ case kUninitialized_Glitch: break;
+ default: SkASSERT(0);
+ }
+}
+#endif
+
+#if defined SK_DEBUG || !FORCE_RELEASE
+void SkPathOpsDebug::MathematicaIze(char* str, size_t bufferLen) {
+ size_t len = strlen(str);
+ bool num = false;
+ for (size_t idx = 0; idx < len; ++idx) {
+ if (num && str[idx] == 'e') {
+ if (len + 2 >= bufferLen) {
+ return;
+ }
+ memmove(&str[idx + 2], &str[idx + 1], len - idx);
+ str[idx] = '*';
+ str[idx + 1] = '^';
+ ++len;
+ }
+ num = str[idx] >= '0' && str[idx] <= '9';
+ }
+}
+
+#if DEBUG_VALIDATE
+void SkPathOpsDebug::SetPhase(SkOpContourHead* contourList, CoinID next,
+ int lineNumber, SkOpPhase phase) {
+ AddedCoin(contourList, next, 0, lineNumber);
+ contourList->globalState()->setPhase(phase);
+}
+#endif
+
+bool SkPathOpsDebug::ValidWind(int wind) {
+ return wind > SK_MinS32 + 0xFFFF && wind < SK_MaxS32 - 0xFFFF;
+}
+
+void SkPathOpsDebug::WindingPrintf(int wind) {
+ if (wind == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", wind);
+ }
+}
+#endif // defined SK_DEBUG || !FORCE_RELEASE
+
+
+#if DEBUG_SHOW_TEST_NAME
+void* SkPathOpsDebug::CreateNameStr() { return new char[DEBUG_FILENAME_STRING_LENGTH]; }
+
+void SkPathOpsDebug::DeleteNameStr(void* v) { delete[] reinterpret_cast<char*>(v); }
+
+void SkPathOpsDebug::BumpTestName(char* test) {
+ char* num = test + strlen(test);
+ while (num[-1] >= '0' && num[-1] <= '9') {
+ --num;
+ }
+ if (num[0] == '\0') {
+ return;
+ }
+ int dec = atoi(num);
+ if (dec == 0) {
+ return;
+ }
+ ++dec;
+ SK_SNPRINTF(num, DEBUG_FILENAME_STRING_LENGTH - (num - test), "%d", dec);
+}
+#endif
+
+static void show_function_header(const char* functionName) {
+ SkDebugf("\nstatic void %s(skiatest::Reporter* reporter, const char* filename) {\n", functionName);
+ if (strcmp("skphealth_com76", functionName) == 0) {
+ SkDebugf("found it\n");
+ }
+}
+
+static const char* gOpStrs[] = {
+ "kDifference_SkPathOp",
+ "kIntersect_SkPathOp",
+ "kUnion_SkPathOp",
+ "kXOR_PathOp",
+ "kReverseDifference_SkPathOp",
+};
+
+const char* SkPathOpsDebug::OpStr(SkPathOp op) {
+ return gOpStrs[op];
+}
+
+static void show_op(SkPathOp op, const char* pathOne, const char* pathTwo) {
+ SkDebugf(" testPathOp(reporter, %s, %s, %s, filename);\n", pathOne, pathTwo, gOpStrs[op]);
+ SkDebugf("}\n");
+}
+
+SK_DECLARE_STATIC_MUTEX(gTestMutex);
+
+void SkPathOpsDebug::ShowPath(const SkPath& a, const SkPath& b, SkPathOp shapeOp,
+ const char* testName) {
+ SkAutoMutexAcquire ac(gTestMutex);
+ show_function_header(testName);
+ ShowOnePath(a, "path", true);
+ ShowOnePath(b, "pathB", true);
+ show_op(shapeOp, "path", "pathB");
+}
+
+#include "SkPathOpsTypes.h"
+#include "SkIntersectionHelper.h"
+#include "SkIntersections.h"
+
+#if DEBUG_COIN
+
+SK_DECLARE_STATIC_MUTEX(gCoinDictMutex);
+
+void SkOpGlobalState::debugAddToGlobalCoinDicts() {
+ SkAutoMutexAcquire ac(&gCoinDictMutex);
+ SkPathOpsDebug::gCoinSumChangedDict.add(fCoinChangedDict);
+ SkPathOpsDebug::gCoinSumVisitedDict.add(fCoinVisitedDict);
+}
+
+#endif
+
+#if DEBUG_T_SECT_LOOP_COUNT
+void SkOpGlobalState::debugAddLoopCount(SkIntersections* i, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn) {
+ for (int index = 0; index < (int) SK_ARRAY_COUNT(fDebugLoopCount); ++index) {
+ SkIntersections::DebugLoop looper = (SkIntersections::DebugLoop) index;
+ if (fDebugLoopCount[index] >= i->debugLoopCount(looper)) {
+ continue;
+ }
+ fDebugLoopCount[index] = i->debugLoopCount(looper);
+ fDebugWorstVerb[index * 2] = wt.segment()->verb();
+ fDebugWorstVerb[index * 2 + 1] = wn.segment()->verb();
+ sk_bzero(&fDebugWorstPts[index * 8], sizeof(SkPoint) * 8);
+ memcpy(&fDebugWorstPts[index * 2 * 4], wt.pts(),
+ (SkPathOpsVerbToPoints(wt.segment()->verb()) + 1) * sizeof(SkPoint));
+ memcpy(&fDebugWorstPts[(index * 2 + 1) * 4], wn.pts(),
+ (SkPathOpsVerbToPoints(wn.segment()->verb()) + 1) * sizeof(SkPoint));
+ fDebugWorstWeight[index * 2] = wt.weight();
+ fDebugWorstWeight[index * 2 + 1] = wn.weight();
+ }
+ i->debugResetLoopCount();
+}
+
+void SkOpGlobalState::debugDoYourWorst(SkOpGlobalState* local) {
+ for (int index = 0; index < (int) SK_ARRAY_COUNT(fDebugLoopCount); ++index) {
+ if (fDebugLoopCount[index] >= local->fDebugLoopCount[index]) {
+ continue;
+ }
+ fDebugLoopCount[index] = local->fDebugLoopCount[index];
+ fDebugWorstVerb[index * 2] = local->fDebugWorstVerb[index * 2];
+ fDebugWorstVerb[index * 2 + 1] = local->fDebugWorstVerb[index * 2 + 1];
+ memcpy(&fDebugWorstPts[index * 2 * 4], &local->fDebugWorstPts[index * 2 * 4],
+ sizeof(SkPoint) * 8);
+ fDebugWorstWeight[index * 2] = local->fDebugWorstWeight[index * 2];
+ fDebugWorstWeight[index * 2 + 1] = local->fDebugWorstWeight[index * 2 + 1];
+ }
+ local->debugResetLoopCounts();
+}
+
+static void dump_curve(SkPath::Verb verb, const SkPoint& pts, float weight) {
+ if (!verb) {
+ return;
+ }
+ const char* verbs[] = { "", "line", "quad", "conic", "cubic" };
+ SkDebugf("%s: {{", verbs[verb]);
+ int ptCount = SkPathOpsVerbToPoints(verb);
+ for (int index = 0; index <= ptCount; ++index) {
+ SkDPoint::Dump((&pts)[index]);
+ if (index < ptCount - 1) {
+ SkDebugf(", ");
+ }
+ }
+ SkDebugf("}");
+ if (weight != 1) {
+ SkDebugf(", ");
+ if (weight == floorf(weight)) {
+ SkDebugf("%.0f", weight);
+ } else {
+ SkDebugf("%1.9gf", weight);
+ }
+ }
+ SkDebugf("}\n");
+}
+
+void SkOpGlobalState::debugLoopReport() {
+ const char* loops[] = { "iterations", "coinChecks", "perpCalcs" };
+ SkDebugf("\n");
+ for (int index = 0; index < (int) SK_ARRAY_COUNT(fDebugLoopCount); ++index) {
+ SkDebugf("%s: %d\n", loops[index], fDebugLoopCount[index]);
+ dump_curve(fDebugWorstVerb[index * 2], fDebugWorstPts[index * 2 * 4],
+ fDebugWorstWeight[index * 2]);
+ dump_curve(fDebugWorstVerb[index * 2 + 1], fDebugWorstPts[(index * 2 + 1) * 4],
+ fDebugWorstWeight[index * 2 + 1]);
+ }
+}
+
+void SkOpGlobalState::debugResetLoopCounts() {
+ sk_bzero(fDebugLoopCount, sizeof(fDebugLoopCount));
+ sk_bzero(fDebugWorstVerb, sizeof(fDebugWorstVerb));
+ sk_bzero(fDebugWorstPts, sizeof(fDebugWorstPts));
+ sk_bzero(fDebugWorstWeight, sizeof(fDebugWorstWeight));
+}
+#endif
+
+#ifdef SK_DEBUG
+bool SkOpGlobalState::debugRunFail() const {
+#if DEBUG_VALIDATE
+ return FLAGS_runFail;
+#else
+ return false;
+#endif
+}
+#endif
+
+// this is const so it can be called by const methods that overwise don't alter state
+#if DEBUG_VALIDATE || DEBUG_COIN
+void SkOpGlobalState::debugSetPhase(const char* funcName DEBUG_COIN_DECLARE_PARAMS()) const {
+ auto writable = const_cast<SkOpGlobalState*>(this);
+#if DEBUG_VALIDATE
+ writable->setPhase(phase);
+#endif
+#if DEBUG_COIN
+ SkPathOpsDebug::CoinDictEntry* entry = &writable->fCoinDictEntry;
+ writable->fPreviousFuncName = entry->fFunctionName;
+ entry->fIteration = iteration;
+ entry->fLineNumber = lineNo;
+ entry->fGlitchType = SkPathOpsDebug::kUninitialized_Glitch;
+ entry->fFunctionName = funcName;
+ writable->fCoinVisitedDict.add(*entry);
+ writable->debugAddToCoinChangedDict();
+#endif
+}
+#endif
+
+#if DEBUG_T_SECT_LOOP_COUNT
+void SkIntersections::debugBumpLoopCount(DebugLoop index) {
+ fDebugLoopCount[index]++;
+}
+
+int SkIntersections::debugLoopCount(DebugLoop index) const {
+ return fDebugLoopCount[index];
+}
+
+void SkIntersections::debugResetLoopCount() {
+ sk_bzero(fDebugLoopCount, sizeof(fDebugLoopCount));
+}
+#endif
+
+#include "SkPathOpsCubic.h"
+#include "SkPathOpsQuad.h"
+
+SkDCubic SkDQuad::debugToCubic() const {
+ SkDCubic cubic;
+ cubic[0] = fPts[0];
+ cubic[2] = fPts[1];
+ cubic[3] = fPts[2];
+ cubic[1].fX = (cubic[0].fX + cubic[2].fX * 2) / 3;
+ cubic[1].fY = (cubic[0].fY + cubic[2].fY * 2) / 3;
+ cubic[2].fX = (cubic[3].fX + cubic[2].fX * 2) / 3;
+ cubic[2].fY = (cubic[3].fY + cubic[2].fY * 2) / 3;
+ return cubic;
+}
+
+void SkDRect::debugInit() {
+ fLeft = fTop = fRight = fBottom = SK_ScalarNaN;
+}
+
+#include "SkOpAngle.h"
+#include "SkOpSegment.h"
+
+#if DEBUG_COIN
+// commented-out lines keep this in sync with addT()
+ const SkOpPtT* SkOpSegment::debugAddT(double t, SkPathOpsDebug::GlitchLog* log) const {
+ debugValidate();
+ SkPoint pt = this->ptAtT(t);
+ const SkOpSpanBase* span = &fHead;
+ do {
+ const SkOpPtT* result = span->ptT();
+ if (t == result->fT || this->match(result, this, t, pt)) {
+// span->bumpSpanAdds();
+ return result;
+ }
+ if (t < result->fT) {
+ const SkOpSpan* prev = result->span()->prev();
+ FAIL_WITH_NULL_IF(!prev, span);
+ // marks in global state that new op span has been allocated
+ this->globalState()->setAllocatedOpSpan();
+// span->init(this, prev, t, pt);
+ this->debugValidate();
+// #if DEBUG_ADD_T
+// SkDebugf("%s insert t=%1.9g segID=%d spanID=%d\n", __FUNCTION__, t,
+// span->segment()->debugID(), span->debugID());
+// #endif
+// span->bumpSpanAdds();
+ return nullptr;
+ }
+ FAIL_WITH_NULL_IF(span != &fTail, span);
+ } while ((span = span->upCast()->next()));
+ SkASSERT(0);
+ return nullptr; // we never get here, but need this to satisfy compiler
+}
+#endif
+
+#if DEBUG_ANGLE
+void SkOpSegment::debugCheckAngleCoin() const {
+ const SkOpSpanBase* base = &fHead;
+ const SkOpSpan* span;
+ do {
+ const SkOpAngle* angle = base->fromAngle();
+ if (angle && angle->debugCheckCoincidence()) {
+ angle->debugCheckNearCoincidence();
+ }
+ if (base->final()) {
+ break;
+ }
+ span = base->upCast();
+ angle = span->toAngle();
+ if (angle && angle->debugCheckCoincidence()) {
+ angle->debugCheckNearCoincidence();
+ }
+ } while ((base = span->next()));
+}
+#endif
+
+#if DEBUG_COIN
+// this mimics the order of the checks in handle coincidence
+void SkOpSegment::debugCheckHealth(SkPathOpsDebug::GlitchLog* glitches) const {
+ debugMoveMultiples(glitches);
+ debugMoveNearby(glitches);
+ debugMissingCoincidence(glitches);
+}
+
+// commented-out lines keep this in sync with clearAll()
+void SkOpSegment::debugClearAll(SkPathOpsDebug::GlitchLog* glitches) const {
+ const SkOpSpan* span = &fHead;
+ do {
+ this->debugClearOne(span, glitches);
+ } while ((span = span->next()->upCastable()));
+ this->globalState()->coincidence()->debugRelease(glitches, this);
+}
+
+// commented-out lines keep this in sync with clearOne()
+void SkOpSegment::debugClearOne(const SkOpSpan* span, SkPathOpsDebug::GlitchLog* glitches) const {
+ if (span->windValue()) glitches->record(SkPathOpsDebug::kCollapsedWindValue_Glitch, span);
+ if (span->oppValue()) glitches->record(SkPathOpsDebug::kCollapsedOppValue_Glitch, span);
+ if (!span->done()) glitches->record(SkPathOpsDebug::kCollapsedDone_Glitch, span);
+}
+#endif
+
+SkOpAngle* SkOpSegment::debugLastAngle() {
+ SkOpAngle* result = nullptr;
+ SkOpSpan* span = this->head();
+ do {
+ if (span->toAngle()) {
+ SkASSERT(!result);
+ result = span->toAngle();
+ }
+ } while ((span = span->next()->upCastable()));
+ SkASSERT(result);
+ return result;
+}
+
+#if DEBUG_COIN
+// commented-out lines keep this in sync with ClearVisited
+void SkOpSegment::DebugClearVisited(const SkOpSpanBase* span) {
+ // reset visited flag back to false
+ do {
+ const SkOpPtT* ptT = span->ptT(), * stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ const SkOpSegment* opp = ptT->segment();
+ opp->resetDebugVisited();
+ }
+ } while (!span->final() && (span = span->upCast()->next()));
+}
+#endif
+
+#if DEBUG_COIN
+// commented-out lines keep this in sync with missingCoincidence()
+// look for pairs of undetected coincident curves
+// assumes that segments going in have visited flag clear
+// Even though pairs of curves correct detect coincident runs, a run may be missed
+// if the coincidence is a product of multiple intersections. For instance, given
+// curves A, B, and C:
+// A-B intersect at a point 1; A-C and B-C intersect at point 2, so near
+// the end of C that the intersection is replaced with the end of C.
+// Even though A-B correctly do not detect an intersection at point 2,
+// the resulting run from point 1 to point 2 is coincident on A and B.
+void SkOpSegment::debugMissingCoincidence(SkPathOpsDebug::GlitchLog* log) const {
+ if (this->done()) {
+ return;
+ }
+ const SkOpSpan* prior = nullptr;
+ const SkOpSpanBase* spanBase = &fHead;
+// bool result = false;
+ do {
+ const SkOpPtT* ptT = spanBase->ptT(), * spanStopPtT = ptT;
+ SkASSERT(ptT->span() == spanBase);
+ while ((ptT = ptT->next()) != spanStopPtT) {
+ if (ptT->deleted()) {
+ continue;
+ }
+ const SkOpSegment* opp = ptT->span()->segment();
+ if (opp->done()) {
+ continue;
+ }
+ // when opp is encounted the 1st time, continue; on 2nd encounter, look for coincidence
+ if (!opp->debugVisited()) {
+ continue;
+ }
+ if (spanBase == &fHead) {
+ continue;
+ }
+ if (ptT->segment() == this) {
+ continue;
+ }
+ const SkOpSpan* span = spanBase->upCastable();
+ // FIXME?: this assumes that if the opposite segment is coincident then no more
+ // coincidence needs to be detected. This may not be true.
+ if (span && span->segment() != opp && span->containsCoincidence(opp)) { // debug has additional condition since it may be called before inner duplicate points have been deleted
+ continue;
+ }
+ if (spanBase->segment() != opp && spanBase->containsCoinEnd(opp)) { // debug has additional condition since it may be called before inner duplicate points have been deleted
+ continue;
+ }
+ const SkOpPtT* priorPtT = nullptr, * priorStopPtT;
+ // find prior span containing opp segment
+ const SkOpSegment* priorOpp = nullptr;
+ const SkOpSpan* priorTest = spanBase->prev();
+ while (!priorOpp && priorTest) {
+ priorStopPtT = priorPtT = priorTest->ptT();
+ while ((priorPtT = priorPtT->next()) != priorStopPtT) {
+ if (priorPtT->deleted()) {
+ continue;
+ }
+ const SkOpSegment* segment = priorPtT->span()->segment();
+ if (segment == opp) {
+ prior = priorTest;
+ priorOpp = opp;
+ break;
+ }
+ }
+ priorTest = priorTest->prev();
+ }
+ if (!priorOpp) {
+ continue;
+ }
+ if (priorPtT == ptT) {
+ continue;
+ }
+ const SkOpPtT* oppStart = prior->ptT();
+ const SkOpPtT* oppEnd = spanBase->ptT();
+ bool swapped = priorPtT->fT > ptT->fT;
+ if (swapped) {
+ SkTSwap(priorPtT, ptT);
+ SkTSwap(oppStart, oppEnd);
+ }
+ const SkOpCoincidence* coincidence = this->globalState()->coincidence();
+ const SkOpPtT* rootPriorPtT = priorPtT->span()->ptT();
+ const SkOpPtT* rootPtT = ptT->span()->ptT();
+ const SkOpPtT* rootOppStart = oppStart->span()->ptT();
+ const SkOpPtT* rootOppEnd = oppEnd->span()->ptT();
+ if (coincidence->contains(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd)) {
+ goto swapBack;
+ }
+ if (testForCoincidence(rootPriorPtT, rootPtT, prior, spanBase, opp)) {
+ // mark coincidence
+#if DEBUG_COINCIDENCE_VERBOSE
+// SkDebugf("%s coinSpan=%d endSpan=%d oppSpan=%d oppEndSpan=%d\n", __FUNCTION__,
+// rootPriorPtT->debugID(), rootPtT->debugID(), rootOppStart->debugID(),
+// rootOppEnd->debugID());
+#endif
+ log->record(SkPathOpsDebug::kMissingCoin_Glitch, priorPtT, ptT, oppStart, oppEnd);
+ // coincidences->add(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd);
+ // }
+#if DEBUG_COINCIDENCE
+// SkASSERT(coincidences->contains(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd);
+#endif
+ // result = true;
+ }
+ swapBack:
+ if (swapped) {
+ SkTSwap(priorPtT, ptT);
+ }
+ }
+ } while ((spanBase = spanBase->final() ? nullptr : spanBase->upCast()->next()));
+ DebugClearVisited(&fHead);
+ return;
+}
+
+// commented-out lines keep this in sync with moveMultiples()
+// if a span has more than one intersection, merge the other segments' span as needed
+void SkOpSegment::debugMoveMultiples(SkPathOpsDebug::GlitchLog* glitches) const {
+ debugValidate();
+ const SkOpSpanBase* test = &fHead;
+ do {
+ int addCount = test->spanAddsCount();
+ SkASSERT(addCount >= 1);
+ if (addCount == 1) {
+ continue;
+ }
+ const SkOpPtT* startPtT = test->ptT();
+ const SkOpPtT* testPtT = startPtT;
+ do { // iterate through all spans associated with start
+ const SkOpSpanBase* oppSpan = testPtT->span();
+ if (oppSpan->spanAddsCount() == addCount) {
+ continue;
+ }
+ if (oppSpan->deleted()) {
+ continue;
+ }
+ const SkOpSegment* oppSegment = oppSpan->segment();
+ if (oppSegment == this) {
+ continue;
+ }
+ // find range of spans to consider merging
+ const SkOpSpanBase* oppPrev = oppSpan;
+ const SkOpSpanBase* oppFirst = oppSpan;
+ while ((oppPrev = oppPrev->prev())) {
+ if (!roughly_equal(oppPrev->t(), oppSpan->t())) {
+ break;
+ }
+ if (oppPrev->spanAddsCount() == addCount) {
+ continue;
+ }
+ if (oppPrev->deleted()) {
+ continue;
+ }
+ oppFirst = oppPrev;
+ }
+ const SkOpSpanBase* oppNext = oppSpan;
+ const SkOpSpanBase* oppLast = oppSpan;
+ while ((oppNext = oppNext->final() ? nullptr : oppNext->upCast()->next())) {
+ if (!roughly_equal(oppNext->t(), oppSpan->t())) {
+ break;
+ }
+ if (oppNext->spanAddsCount() == addCount) {
+ continue;
+ }
+ if (oppNext->deleted()) {
+ continue;
+ }
+ oppLast = oppNext;
+ }
+ if (oppFirst == oppLast) {
+ continue;
+ }
+ const SkOpSpanBase* oppTest = oppFirst;
+ do {
+ if (oppTest == oppSpan) {
+ continue;
+ }
+ // check to see if the candidate meets specific criteria:
+ // it contains spans of segments in test's loop but not including 'this'
+ const SkOpPtT* oppStartPtT = oppTest->ptT();
+ const SkOpPtT* oppPtT = oppStartPtT;
+ while ((oppPtT = oppPtT->next()) != oppStartPtT) {
+ const SkOpSegment* oppPtTSegment = oppPtT->segment();
+ if (oppPtTSegment == this) {
+ goto tryNextSpan;
+ }
+ const SkOpPtT* matchPtT = startPtT;
+ do {
+ if (matchPtT->segment() == oppPtTSegment) {
+ goto foundMatch;
+ }
+ } while ((matchPtT = matchPtT->next()) != startPtT);
+ goto tryNextSpan;
+ foundMatch: // merge oppTest and oppSpan
+ oppSegment->debugValidate();
+ oppTest->debugMergeMatches(glitches, oppSpan);
+ oppTest->debugAddOpp(glitches, oppSpan);
+ oppSegment->debugValidate();
+ goto checkNextSpan;
+ }
+ tryNextSpan:
+ ;
+ } while (oppTest != oppLast && (oppTest = oppTest->upCast()->next()));
+ } while ((testPtT = testPtT->next()) != startPtT);
+checkNextSpan:
+ ;
+ } while ((test = test->final() ? nullptr : test->upCast()->next()));
+ debugValidate();
+ return;
+}
+
+// commented-out lines keep this in sync with moveNearby()
+// Move nearby t values and pts so they all hang off the same span. Alignment happens later.
+void SkOpSegment::debugMoveNearby(SkPathOpsDebug::GlitchLog* glitches) const {
+ debugValidate();
+ // release undeleted spans pointing to this seg that are linked to the primary span
+ const SkOpSpanBase* spanBase = &fHead;
+ do {
+ const SkOpPtT* ptT = spanBase->ptT();
+ const SkOpPtT* headPtT = ptT;
+ while ((ptT = ptT->next()) != headPtT) {
+ const SkOpSpanBase* test = ptT->span();
+ if (ptT->segment() == this && !ptT->deleted() && test != spanBase
+ && test->ptT() == ptT) {
+ if (test->final()) {
+ if (spanBase == &fHead) {
+ glitches->record(SkPathOpsDebug::kMoveNearbyClearAll_Glitch, this);
+// return;
+ }
+ glitches->record(SkPathOpsDebug::kMoveNearbyReleaseFinal_Glitch, spanBase, ptT);
+ } else if (test->prev()) {
+ glitches->record(SkPathOpsDebug::kMoveNearbyRelease_Glitch, test, headPtT);
+ }
+// break;
+ }
+ }
+ spanBase = spanBase->upCast()->next();
+ } while (!spanBase->final());
+
+ // This loop looks for adjacent spans which are near by
+ spanBase = &fHead;
+ do { // iterate through all spans associated with start
+ const SkOpSpanBase* test = spanBase->upCast()->next();
+ if (this->spansNearby(spanBase, test)) {
+ if (test->final()) {
+ if (spanBase->prev()) {
+ glitches->record(SkPathOpsDebug::kMoveNearbyMergeFinal_Glitch, test);
+ } else {
+ glitches->record(SkPathOpsDebug::kMoveNearbyClearAll2_Glitch, this);
+ // return
+ }
+ } else {
+ glitches->record(SkPathOpsDebug::kMoveNearbyMerge_Glitch, spanBase);
+ }
+ }
+ spanBase = test;
+ } while (!spanBase->final());
+ debugValidate();
+}
+#endif
+
+void SkOpSegment::debugReset() {
+ this->init(this->fPts, this->fWeight, this->contour(), this->verb());
+}
+
+#if DEBUG_COINCIDENCE_ORDER
+void SkOpSegment::debugSetCoinT(int index, SkScalar t) const {
+ if (fDebugBaseMax < 0 || fDebugBaseIndex == index) {
+ fDebugBaseIndex = index;
+ fDebugBaseMin = SkTMin(t, fDebugBaseMin);
+ fDebugBaseMax = SkTMax(t, fDebugBaseMax);
+ return;
+ }
+ SkASSERT(fDebugBaseMin >= t || t >= fDebugBaseMax);
+ if (fDebugLastMax < 0 || fDebugLastIndex == index) {
+ fDebugLastIndex = index;
+ fDebugLastMin = SkTMin(t, fDebugLastMin);
+ fDebugLastMax = SkTMax(t, fDebugLastMax);
+ return;
+ }
+ SkASSERT(fDebugLastMin >= t || t >= fDebugLastMax);
+ SkASSERT((t - fDebugBaseMin > 0) == (fDebugLastMin - fDebugBaseMin > 0));
+}
+#endif
+
+#if DEBUG_ACTIVE_SPANS
+void SkOpSegment::debugShowActiveSpans() const {
+ debugValidate();
+ if (done()) {
+ return;
+ }
+ int lastId = -1;
+ double lastT = -1;
+ const SkOpSpan* span = &fHead;
+ do {
+ if (span->done()) {
+ continue;
+ }
+ if (lastId == this->debugID() && lastT == span->t()) {
+ continue;
+ }
+ lastId = this->debugID();
+ lastT = span->t();
+ SkDebugf("%s id=%d", __FUNCTION__, this->debugID());
+ // since endpoints may have be adjusted, show actual computed curves
+ SkDCurve curvePart;
+ this->subDivide(span, span->next(), &curvePart);
+ const SkDPoint* pts = curvePart.fCubic.fPts;
+ SkDebugf(" (%1.9g,%1.9g", pts[0].fX, pts[0].fY);
+ for (int vIndex = 1; vIndex <= SkPathOpsVerbToPoints(fVerb); ++vIndex) {
+ SkDebugf(" %1.9g,%1.9g", pts[vIndex].fX, pts[vIndex].fY);
+ }
+ if (SkPath::kConic_Verb == fVerb) {
+ SkDebugf(" %1.9gf", curvePart.fConic.fWeight);
+ }
+ SkDebugf(") t=%1.9g tEnd=%1.9g", span->t(), span->next()->t());
+ if (span->windSum() == SK_MinS32) {
+ SkDebugf(" windSum=?");
+ } else {
+ SkDebugf(" windSum=%d", span->windSum());
+ }
+ if (span->oppValue() && span->oppSum() == SK_MinS32) {
+ SkDebugf(" oppSum=?");
+ } else if (span->oppValue() || span->oppSum() != SK_MinS32) {
+ SkDebugf(" oppSum=%d", span->oppSum());
+ }
+ SkDebugf(" windValue=%d", span->windValue());
+ if (span->oppValue() || span->oppSum() != SK_MinS32) {
+ SkDebugf(" oppValue=%d", span->oppValue());
+ }
+ SkDebugf("\n");
+ } while ((span = span->next()->upCastable()));
+}
+#endif
+
+#if DEBUG_MARK_DONE
+void SkOpSegment::debugShowNewWinding(const char* fun, const SkOpSpan* span, int winding) {
+ const SkPoint& pt = span->ptT()->fPt;
+ SkDebugf("%s id=%d", fun, this->debugID());
+ SkDebugf(" (%1.9g,%1.9g", fPts[0].fX, fPts[0].fY);
+ for (int vIndex = 1; vIndex <= SkPathOpsVerbToPoints(fVerb); ++vIndex) {
+ SkDebugf(" %1.9g,%1.9g", fPts[vIndex].fX, fPts[vIndex].fY);
+ }
+ SkDebugf(") t=%1.9g [%d] (%1.9g,%1.9g) tEnd=%1.9g newWindSum=",
+ span->t(), span->debugID(), pt.fX, pt.fY, span->next()->t());
+ if (winding == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", winding);
+ }
+ SkDebugf(" windSum=");
+ if (span->windSum() == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", span->windSum());
+ }
+ SkDebugf(" windValue=%d\n", span->windValue());
+}
+
+void SkOpSegment::debugShowNewWinding(const char* fun, const SkOpSpan* span, int winding,
+ int oppWinding) {
+ const SkPoint& pt = span->ptT()->fPt;
+ SkDebugf("%s id=%d", fun, this->debugID());
+ SkDebugf(" (%1.9g,%1.9g", fPts[0].fX, fPts[0].fY);
+ for (int vIndex = 1; vIndex <= SkPathOpsVerbToPoints(fVerb); ++vIndex) {
+ SkDebugf(" %1.9g,%1.9g", fPts[vIndex].fX, fPts[vIndex].fY);
+ }
+ SkDebugf(") t=%1.9g [%d] (%1.9g,%1.9g) tEnd=%1.9g newWindSum=",
+ span->t(), span->debugID(), pt.fX, pt.fY, span->next()->t(), winding, oppWinding);
+ if (winding == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", winding);
+ }
+ SkDebugf(" newOppSum=");
+ if (oppWinding == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", oppWinding);
+ }
+ SkDebugf(" oppSum=");
+ if (span->oppSum() == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", span->oppSum());
+ }
+ SkDebugf(" windSum=");
+ if (span->windSum() == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", span->windSum());
+ }
+ SkDebugf(" windValue=%d oppValue=%d\n", span->windValue(), span->oppValue());
+}
+
+#endif
+
+// loop looking for a pair of angle parts that are too close to be sorted
+/* This is called after other more simple intersection and angle sorting tests have been exhausted.
+ This should be rarely called -- the test below is thorough and time consuming.
+ This checks the distance between start points; the distance between
+*/
+#if DEBUG_ANGLE
+void SkOpAngle::debugCheckNearCoincidence() const {
+ const SkOpAngle* test = this;
+ do {
+ const SkOpSegment* testSegment = test->segment();
+ double testStartT = test->start()->t();
+ SkDPoint testStartPt = testSegment->dPtAtT(testStartT);
+ double testEndT = test->end()->t();
+ SkDPoint testEndPt = testSegment->dPtAtT(testEndT);
+ double testLenSq = testStartPt.distanceSquared(testEndPt);
+ SkDebugf("%s testLenSq=%1.9g id=%d\n", __FUNCTION__, testLenSq, testSegment->debugID());
+ double testMidT = (testStartT + testEndT) / 2;
+ const SkOpAngle* next = test;
+ while ((next = next->fNext) != this) {
+ SkOpSegment* nextSegment = next->segment();
+ double testMidDistSq = testSegment->distSq(testMidT, next);
+ double testEndDistSq = testSegment->distSq(testEndT, next);
+ double nextStartT = next->start()->t();
+ SkDPoint nextStartPt = nextSegment->dPtAtT(nextStartT);
+ double distSq = testStartPt.distanceSquared(nextStartPt);
+ double nextEndT = next->end()->t();
+ double nextMidT = (nextStartT + nextEndT) / 2;
+ double nextMidDistSq = nextSegment->distSq(nextMidT, test);
+ double nextEndDistSq = nextSegment->distSq(nextEndT, test);
+ SkDebugf("%s distSq=%1.9g testId=%d nextId=%d\n", __FUNCTION__, distSq,
+ testSegment->debugID(), nextSegment->debugID());
+ SkDebugf("%s testMidDistSq=%1.9g\n", __FUNCTION__, testMidDistSq);
+ SkDebugf("%s testEndDistSq=%1.9g\n", __FUNCTION__, testEndDistSq);
+ SkDebugf("%s nextMidDistSq=%1.9g\n", __FUNCTION__, nextMidDistSq);
+ SkDebugf("%s nextEndDistSq=%1.9g\n", __FUNCTION__, nextEndDistSq);
+ SkDPoint nextEndPt = nextSegment->dPtAtT(nextEndT);
+ double nextLenSq = nextStartPt.distanceSquared(nextEndPt);
+ SkDebugf("%s nextLenSq=%1.9g\n", __FUNCTION__, nextLenSq);
+ SkDebugf("\n");
+ }
+ test = test->fNext;
+ } while (test->fNext != this);
+}
+#endif
+
+#if DEBUG_ANGLE
+SkString SkOpAngle::debugPart() const {
+ SkString result;
+ switch (this->segment()->verb()) {
+ case SkPath::kLine_Verb:
+ result.printf(LINE_DEBUG_STR " id=%d", LINE_DEBUG_DATA(fPart.fCurve),
+ this->segment()->debugID());
+ break;
+ case SkPath::kQuad_Verb:
+ result.printf(QUAD_DEBUG_STR " id=%d", QUAD_DEBUG_DATA(fPart.fCurve),
+ this->segment()->debugID());
+ break;
+ case SkPath::kConic_Verb:
+ result.printf(CONIC_DEBUG_STR " id=%d",
+ CONIC_DEBUG_DATA(fPart.fCurve, fPart.fCurve.fConic.fWeight),
+ this->segment()->debugID());
+ break;
+ case SkPath::kCubic_Verb:
+ result.printf(CUBIC_DEBUG_STR " id=%d", CUBIC_DEBUG_DATA(fPart.fCurve),
+ this->segment()->debugID());
+ break;
+ default:
+ SkASSERT(0);
+ }
+ return result;
+}
+#endif
+
+#if DEBUG_SORT
+void SkOpAngle::debugLoop() const {
+ const SkOpAngle* first = this;
+ const SkOpAngle* next = this;
+ do {
+ next->dumpOne(true);
+ SkDebugf("\n");
+ next = next->fNext;
+ } while (next && next != first);
+ next = first;
+ do {
+ next->debugValidate();
+ next = next->fNext;
+ } while (next && next != first);
+}
+#endif
+
+void SkOpAngle::debugValidate() const {
+#if DEBUG_COINCIDENCE
+ if (this->globalState()->debugCheckHealth()) {
+ return;
+ }
+#endif
+#if DEBUG_VALIDATE
+ const SkOpAngle* first = this;
+ const SkOpAngle* next = this;
+ int wind = 0;
+ int opp = 0;
+ int lastXor = -1;
+ int lastOppXor = -1;
+ do {
+ if (next->unorderable()) {
+ return;
+ }
+ const SkOpSpan* minSpan = next->start()->starter(next->end());
+ if (minSpan->windValue() == SK_MinS32) {
+ return;
+ }
+ bool op = next->segment()->operand();
+ bool isXor = next->segment()->isXor();
+ bool oppXor = next->segment()->oppXor();
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM || between(0, minSpan->windValue(), DEBUG_LIMIT_WIND_SUM));
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM
+ || between(-DEBUG_LIMIT_WIND_SUM, minSpan->oppValue(), DEBUG_LIMIT_WIND_SUM));
+ bool useXor = op ? oppXor : isXor;
+ SkASSERT(lastXor == -1 || lastXor == (int) useXor);
+ lastXor = (int) useXor;
+ wind += next->debugSign() * (op ? minSpan->oppValue() : minSpan->windValue());
+ if (useXor) {
+ wind &= 1;
+ }
+ useXor = op ? isXor : oppXor;
+ SkASSERT(lastOppXor == -1 || lastOppXor == (int) useXor);
+ lastOppXor = (int) useXor;
+ opp += next->debugSign() * (op ? minSpan->windValue() : minSpan->oppValue());
+ if (useXor) {
+ opp &= 1;
+ }
+ next = next->fNext;
+ } while (next && next != first);
+ SkASSERT(wind == 0 || !FLAGS_runFail);
+ SkASSERT(opp == 0 || !FLAGS_runFail);
+#endif
+}
+
+void SkOpAngle::debugValidateNext() const {
+#if !FORCE_RELEASE
+ const SkOpAngle* first = this;
+ const SkOpAngle* next = first;
+ SkTDArray<const SkOpAngle*>(angles);
+ do {
+// SkASSERT_RELEASE(next->fSegment->debugContains(next));
+ angles.push(next);
+ next = next->next();
+ if (next == first) {
+ break;
+ }
+ SkASSERT_RELEASE(!angles.contains(next));
+ if (!next) {
+ return;
+ }
+ } while (true);
+#endif
+}
+
+#ifdef SK_DEBUG
+void SkCoincidentSpans::debugStartCheck(const SkOpSpanBase* outer, const SkOpSpanBase* over,
+ const SkOpGlobalState* debugState) const {
+ SkASSERT(coinPtTEnd()->span() == over || !debugState->debugRunFail());
+ SkASSERT(oppPtTEnd()->span() == outer || !debugState->debugRunFail());
+}
+#endif
+
+#if DEBUG_COIN
+// sets the span's end to the ptT referenced by the previous-next
+void SkCoincidentSpans::debugCorrectOneEnd(SkPathOpsDebug::GlitchLog* log,
+ const SkOpPtT* (SkCoincidentSpans::* getEnd)() const,
+ void (SkCoincidentSpans::*setEnd)(const SkOpPtT* ptT) const ) const {
+ const SkOpPtT* origPtT = (this->*getEnd)();
+ const SkOpSpanBase* origSpan = origPtT->span();
+ const SkOpSpan* prev = origSpan->prev();
+ const SkOpPtT* testPtT = prev ? prev->next()->ptT()
+ : origSpan->upCast()->next()->prev()->ptT();
+ if (origPtT != testPtT) {
+ log->record(SkPathOpsDebug::kCorrectEnd_Glitch, this, origPtT, testPtT);
+ }
+}
+
+
+/* Commented-out lines keep this in sync with correctEnds */
+// FIXME: member pointers have fallen out of favor and can be replaced with
+// an alternative approach.
+// makes all span ends agree with the segment's spans that define them
+void SkCoincidentSpans::debugCorrectEnds(SkPathOpsDebug::GlitchLog* log) const {
+ this->debugCorrectOneEnd(log, &SkCoincidentSpans::coinPtTStart, nullptr);
+ this->debugCorrectOneEnd(log, &SkCoincidentSpans::coinPtTEnd, nullptr);
+ this->debugCorrectOneEnd(log, &SkCoincidentSpans::oppPtTStart, nullptr);
+ this->debugCorrectOneEnd(log, &SkCoincidentSpans::oppPtTEnd, nullptr);
+}
+
+/* Commented-out lines keep this in sync with expand */
+// expand the range by checking adjacent spans for coincidence
+bool SkCoincidentSpans::debugExpand(SkPathOpsDebug::GlitchLog* log) const {
+ bool expanded = false;
+ const SkOpSegment* segment = coinPtTStart()->segment();
+ const SkOpSegment* oppSegment = oppPtTStart()->segment();
+ do {
+ const SkOpSpan* start = coinPtTStart()->span()->upCast();
+ const SkOpSpan* prev = start->prev();
+ const SkOpPtT* oppPtT;
+ if (!prev || !(oppPtT = prev->contains(oppSegment))) {
+ break;
+ }
+ double midT = (prev->t() + start->t()) / 2;
+ if (!segment->isClose(midT, oppSegment)) {
+ break;
+ }
+ if (log) log->record(SkPathOpsDebug::kExpandCoin_Glitch, this, prev->ptT(), oppPtT);
+ expanded = true;
+ } while (false); // actual continues while expansion is possible
+ do {
+ const SkOpSpanBase* end = coinPtTEnd()->span();
+ SkOpSpanBase* next = end->final() ? nullptr : end->upCast()->next();
+ if (next && next->deleted()) {
+ break;
+ }
+ const SkOpPtT* oppPtT;
+ if (!next || !(oppPtT = next->contains(oppSegment))) {
+ break;
+ }
+ double midT = (end->t() + next->t()) / 2;
+ if (!segment->isClose(midT, oppSegment)) {
+ break;
+ }
+ if (log) log->record(SkPathOpsDebug::kExpandCoin_Glitch, this, next->ptT(), oppPtT);
+ expanded = true;
+ } while (false); // actual continues while expansion is possible
+ return expanded;
+}
+
+// description below
+void SkOpCoincidence::debugAddEndMovedSpans(SkPathOpsDebug::GlitchLog* log, const SkOpSpan* base, const SkOpSpanBase* testSpan) const {
+ const SkOpPtT* testPtT = testSpan->ptT();
+ const SkOpPtT* stopPtT = testPtT;
+ const SkOpSegment* baseSeg = base->segment();
+ while ((testPtT = testPtT->next()) != stopPtT) {
+ const SkOpSegment* testSeg = testPtT->segment();
+ if (testPtT->deleted()) {
+ continue;
+ }
+ if (testSeg == baseSeg) {
+ continue;
+ }
+ if (testPtT->span()->ptT() != testPtT) {
+ continue;
+ }
+ if (this->contains(baseSeg, testSeg, testPtT->fT)) {
+ continue;
+ }
+ // intersect perp with base->ptT() with testPtT->segment()
+ SkDVector dxdy = baseSeg->dSlopeAtT(base->t());
+ const SkPoint& pt = base->pt();
+ SkDLine ray = {{{pt.fX, pt.fY}, {pt.fX + dxdy.fY, pt.fY - dxdy.fX}}};
+ SkIntersections i;
+ (*CurveIntersectRay[testSeg->verb()])(testSeg->pts(), testSeg->weight(), ray, &i);
+ for (int index = 0; index < i.used(); ++index) {
+ double t = i[0][index];
+ if (!between(0, t, 1)) {
+ continue;
+ }
+ SkDPoint oppPt = i.pt(index);
+ if (!oppPt.approximatelyEqual(pt)) {
+ continue;
+ }
+ SkOpSegment* writableSeg = const_cast<SkOpSegment*>(testSeg);
+ SkOpPtT* oppStart = writableSeg->addT(t);
+ if (oppStart == testPtT) {
+ continue;
+ }
+ SkOpSpan* writableBase = const_cast<SkOpSpan*>(base);
+ oppStart->span()->addOpp(writableBase);
+ if (oppStart->deleted()) {
+ continue;
+ }
+ SkOpSegment* coinSeg = base->segment();
+ SkOpSegment* oppSeg = oppStart->segment();
+ double coinTs, coinTe, oppTs, oppTe;
+ if (Ordered(coinSeg, oppSeg)) {
+ coinTs = base->t();
+ coinTe = testSpan->t();
+ oppTs = oppStart->fT;
+ oppTe = testPtT->fT;
+ } else {
+ SkTSwap(coinSeg, oppSeg);
+ coinTs = oppStart->fT;
+ coinTe = testPtT->fT;
+ oppTs = base->t();
+ oppTe = testSpan->t();
+ }
+ if (coinTs > coinTe) {
+ SkTSwap(coinTs, coinTe);
+ SkTSwap(oppTs, oppTe);
+ }
+ bool added;
+ if (this->debugAddOrOverlap(log, coinSeg, oppSeg, coinTs, coinTe, oppTs, oppTe, &added), false) {
+ return;
+ }
+ }
+ }
+ return;
+}
+
+// description below
+void SkOpCoincidence::debugAddEndMovedSpans(SkPathOpsDebug::GlitchLog* log, const SkOpPtT* ptT) const {
+ FAIL_IF(!ptT->span()->upCastable(), ptT->span());
+ const SkOpSpan* base = ptT->span()->upCast();
+ const SkOpSpan* prev = base->prev();
+ FAIL_IF(!prev, ptT->span());
+ if (!prev->isCanceled()) {
+ if (this->debugAddEndMovedSpans(log, base, base->prev()), false) {
+ return;
+ }
+ }
+ if (!base->isCanceled()) {
+ if (this->debugAddEndMovedSpans(log, base, base->next()), false) {
+ return;
+ }
+ }
+ return;
+}
+
+/* If A is coincident with B and B includes an endpoint, and A's matching point
+ is not the endpoint (i.e., there's an implied line connecting B-end and A)
+ then assume that the same implied line may intersect another curve close to B.
+ Since we only care about coincidence that was undetected, look at the
+ ptT list on B-segment adjacent to the B-end/A ptT loop (not in the loop, but
+ next door) and see if the A matching point is close enough to form another
+ coincident pair. If so, check for a new coincident span between B-end/A ptT loop
+ and the adjacent ptT loop.
+*/
+void SkOpCoincidence::debugAddEndMovedSpans(SkPathOpsDebug::GlitchLog* log) const {
+ const SkCoincidentSpans* span = fHead;
+ if (!span) {
+ return;
+ }
+// fTop = span;
+// fHead = nullptr;
+ do {
+ if (span->coinPtTStart()->fPt != span->oppPtTStart()->fPt) {
+ FAIL_IF(1 == span->coinPtTStart()->fT, span);
+ bool onEnd = span->coinPtTStart()->fT == 0;
+ bool oOnEnd = zero_or_one(span->oppPtTStart()->fT);
+ if (onEnd) {
+ if (!oOnEnd) { // if both are on end, any nearby intersect was already found
+ if (this->debugAddEndMovedSpans(log, span->oppPtTStart()), false) {
+ return;
+ }
+ }
+ } else if (oOnEnd) {
+ if (this->debugAddEndMovedSpans(log, span->coinPtTStart()), false) {
+ return;
+ }
+ }
+ }
+ if (span->coinPtTEnd()->fPt != span->oppPtTEnd()->fPt) {
+ bool onEnd = span->coinPtTEnd()->fT == 1;
+ bool oOnEnd = zero_or_one(span->oppPtTEnd()->fT);
+ if (onEnd) {
+ if (!oOnEnd) {
+ if (this->debugAddEndMovedSpans(log, span->oppPtTEnd()), false) {
+ return;
+ }
+ }
+ } else if (oOnEnd) {
+ if (this->debugAddEndMovedSpans(log, span->coinPtTEnd()), false) {
+ return;
+ }
+ }
+ }
+ } while ((span = span->next()));
+// this->restoreHead();
+ return;
+}
+
+/* Commented-out lines keep this in sync with addExpanded */
+// for each coincident pair, match the spans
+// if the spans don't match, add the mssing pt to the segment and loop it in the opposite span
+void SkOpCoincidence::debugAddExpanded(SkPathOpsDebug::GlitchLog* log) const {
+ const SkCoincidentSpans* coin = this->fHead;
+ if (!coin) {
+ return;
+ }
+ do {
+ const SkOpPtT* startPtT = coin->coinPtTStart();
+ const SkOpPtT* oStartPtT = coin->oppPtTStart();
+ double priorT = startPtT->fT;
+ double oPriorT = oStartPtT->fT;
+ FAIL_IF(startPtT->contains(oStartPtT), coin);
+ SkOPASSERT(coin->coinPtTEnd()->contains(coin->oppPtTEnd()));
+ const SkOpSpanBase* start = startPtT->span();
+ const SkOpSpanBase* oStart = oStartPtT->span();
+ const SkOpSpanBase* end = coin->coinPtTEnd()->span();
+ const SkOpSpanBase* oEnd = coin->oppPtTEnd()->span();
+ FAIL_IF(oEnd->deleted(), coin);
+ FAIL_IF(!start->upCastable(), coin);
+ const SkOpSpanBase* test = start->upCast()->next();
+ FAIL_IF(!coin->flipped() && !oStart->upCastable(), coin);
+ const SkOpSpanBase* oTest = coin->flipped() ? oStart->prev() : oStart->upCast()->next();
+ FAIL_IF(!oTest, coin);
+ const SkOpSegment* seg = start->segment();
+ const SkOpSegment* oSeg = oStart->segment();
+ while (test != end || oTest != oEnd) {
+ const SkOpPtT* containedOpp = test->ptT()->contains(oSeg);
+ const SkOpPtT* containedThis = oTest->ptT()->contains(seg);
+ if (!containedOpp || !containedThis) {
+ // choose the ends, or the first common pt-t list shared by both
+ double nextT, oNextT;
+ if (containedOpp) {
+ nextT = test->t();
+ oNextT = containedOpp->fT;
+ } else if (containedThis) {
+ nextT = containedThis->fT;
+ oNextT = oTest->t();
+ } else {
+ // iterate through until a pt-t list found that contains the other
+ const SkOpSpanBase* walk = test;
+ const SkOpPtT* walkOpp;
+ do {
+ FAIL_IF(!walk->upCastable(), coin);
+ walk = walk->upCast()->next();
+ } while (!(walkOpp = walk->ptT()->contains(oSeg))
+ && walk != coin->coinPtTEnd()->span());
+ nextT = walk->t();
+ oNextT = walkOpp->fT;
+ }
+ // use t ranges to guess which one is missing
+ double startRange = coin->coinPtTEnd()->fT - startPtT->fT;
+ FAIL_IF(!startRange, coin);
+ double startPart = (test->t() - startPtT->fT) / startRange;
+ double oStartRange = coin->oppPtTEnd()->fT - oStartPtT->fT;
+ FAIL_IF(!oStartRange, coin);
+ double oStartPart = (oTest->t() - oStartPtT->fT) / oStartRange;
+ FAIL_IF(startPart == oStartPart, coin);
+ bool addToOpp = !containedOpp && !containedThis ? startPart < oStartPart
+ : !!containedThis;
+ bool startOver = false;
+ addToOpp ? log->record(SkPathOpsDebug::kAddExpandedCoin_Glitch,
+ oPriorT + oStartRange * startPart, test)
+ : log->record(SkPathOpsDebug::kAddExpandedCoin_Glitch,
+ priorT + startRange * oStartPart, oTest);
+ // FAIL_IF(!success, coin);
+ if (startOver) {
+ test = start;
+ oTest = oStart;
+ }
+ end = coin->coinPtTEnd()->span();
+ oEnd = coin->oppPtTEnd()->span();
+ }
+ if (test != end) {
+ FAIL_IF(!test->upCastable(), coin);
+ priorT = test->t();
+ test = test->upCast()->next();
+ }
+ if (oTest != oEnd) {
+ oPriorT = oTest->t();
+ oTest = coin->flipped() ? oTest->prev() : oTest->upCast()->next();
+ FAIL_IF(!oTest, coin);
+ }
+ }
+ } while ((coin = coin->next()));
+ return;
+}
+
+/* Commented-out lines keep this in sync with addIfMissing() */
+void SkOpCoincidence::debugAddIfMissing(SkPathOpsDebug::GlitchLog* log, const SkCoincidentSpans* outer, const SkOpPtT* over1s,
+ const SkOpPtT* over1e) const {
+// SkASSERT(fTop);
+ if (fTop && alreadyAdded(fTop, outer, over1s, over1e)) { // in debug, fTop may be null
+ return;
+ }
+ if (fHead && alreadyAdded(fHead, outer, over1s, over1e)) {
+ return;
+ }
+ log->record(SkPathOpsDebug::kAddIfMissingCoin_Glitch, outer->coinPtTStart(), outer->coinPtTEnd(), over1s, over1e);
+ this->debugValidate();
+ return;
+}
+
+/* Commented-out lines keep this in sync addIfMissing() */
+// note that over1s, over1e, over2s, over2e are ordered
+void SkOpCoincidence::debugAddIfMissing(SkPathOpsDebug::GlitchLog* log, const SkOpPtT* over1s, const SkOpPtT* over2s,
+ double tStart, double tEnd, const SkOpSegment* coinSeg, const SkOpSegment* oppSeg, bool* added,
+ const SkOpPtT* over1e, const SkOpPtT* over2e) const {
+ SkASSERT(tStart < tEnd);
+ SkASSERT(over1s->fT < over1e->fT);
+ SkASSERT(between(over1s->fT, tStart, over1e->fT));
+ SkASSERT(between(over1s->fT, tEnd, over1e->fT));
+ SkASSERT(over2s->fT < over2e->fT);
+ SkASSERT(between(over2s->fT, tStart, over2e->fT));
+ SkASSERT(between(over2s->fT, tEnd, over2e->fT));
+ SkASSERT(over1s->segment() == over1e->segment());
+ SkASSERT(over2s->segment() == over2e->segment());
+ SkASSERT(over1s->segment() == over2s->segment());
+ SkASSERT(over1s->segment() != coinSeg);
+ SkASSERT(over1s->segment() != oppSeg);
+ SkASSERT(coinSeg != oppSeg);
+ double coinTs, coinTe, oppTs, oppTe;
+ coinTs = TRange(over1s, tStart, coinSeg SkDEBUGPARAMS(over1e));
+ coinTe = TRange(over1s, tEnd, coinSeg SkDEBUGPARAMS(over1e));
+ if (coinSeg->collapsed(coinTs, coinTe)) {
+ return log->record(SkPathOpsDebug::kAddIfCollapsed_Glitch, coinSeg);
+ }
+ oppTs = TRange(over2s, tStart, oppSeg SkDEBUGPARAMS(over2e));
+ oppTe = TRange(over2s, tEnd, oppSeg SkDEBUGPARAMS(over2e));
+ if (oppSeg->collapsed(oppTs, oppTe)) {
+ return log->record(SkPathOpsDebug::kAddIfCollapsed_Glitch, oppSeg);
+ }
+ if (coinTs > coinTe) {
+ SkTSwap(coinTs, coinTe);
+ SkTSwap(oppTs, oppTe);
+ }
+ return this->debugAddOrOverlap(log, coinSeg, oppSeg, coinTs, coinTe, oppTs, oppTe, added
+ );
+}
+
+/* Commented-out lines keep this in sync addOrOverlap() */
+// If this is called by addEndMovedSpans(), a returned false propogates out to an abort.
+// If this is called by AddIfMissing(), a returned false indicates there was nothing to add
+void SkOpCoincidence::debugAddOrOverlap(SkPathOpsDebug::GlitchLog* log,
+ const SkOpSegment* coinSeg, const SkOpSegment* oppSeg,
+ double coinTs, double coinTe, double oppTs, double oppTe, bool* added) const {
+ SkTDArray<SkCoincidentSpans*> overlaps;
+ SkOPASSERT(!fTop); // this is (correctly) reversed in addifMissing()
+ if (fTop && !this->checkOverlap(fTop, coinSeg, oppSeg, coinTs, coinTe, oppTs, oppTe,
+ &overlaps)) {
+ return;
+ }
+ if (fHead && !this->checkOverlap(fHead, coinSeg, oppSeg, coinTs,
+ coinTe, oppTs, oppTe, &overlaps)) {
+ return;
+ }
+ const SkCoincidentSpans* overlap = overlaps.count() ? overlaps[0] : nullptr;
+ for (int index = 1; index < overlaps.count(); ++index) { // combine overlaps before continuing
+ const SkCoincidentSpans* test = overlaps[index];
+ if (overlap->coinPtTStart()->fT > test->coinPtTStart()->fT) {
+ log->record(SkPathOpsDebug::kAddOrOverlap_Glitch, overlap, test->coinPtTStart());
+ }
+ if (overlap->coinPtTEnd()->fT < test->coinPtTEnd()->fT) {
+ log->record(SkPathOpsDebug::kAddOrOverlap_Glitch, overlap, test->coinPtTEnd());
+ }
+ if (overlap->flipped()
+ ? overlap->oppPtTStart()->fT < test->oppPtTStart()->fT
+ : overlap->oppPtTStart()->fT > test->oppPtTStart()->fT) {
+ log->record(SkPathOpsDebug::kAddOrOverlap_Glitch, overlap, test->oppPtTStart());
+ }
+ if (overlap->flipped()
+ ? overlap->oppPtTEnd()->fT > test->oppPtTEnd()->fT
+ : overlap->oppPtTEnd()->fT < test->oppPtTEnd()->fT) {
+ log->record(SkPathOpsDebug::kAddOrOverlap_Glitch, overlap, test->oppPtTEnd());
+ }
+ if (!fHead) { this->debugRelease(log, fHead, test);
+ this->debugRelease(log, fTop, test);
+ }
+ }
+ const SkOpPtT* cs = coinSeg->existing(coinTs, oppSeg);
+ const SkOpPtT* ce = coinSeg->existing(coinTe, oppSeg);
+ RETURN_FALSE_IF(overlap && cs && ce && overlap->contains(cs, ce), coinSeg);
+ RETURN_FALSE_IF(cs != ce || !cs, coinSeg);
+ const SkOpPtT* os = oppSeg->existing(oppTs, coinSeg);
+ const SkOpPtT* oe = oppSeg->existing(oppTe, coinSeg);
+ RETURN_FALSE_IF(overlap && os && oe && overlap->contains(os, oe), oppSeg);
+ SkASSERT(true || !cs || !cs->deleted());
+ SkASSERT(true || !os || !os->deleted());
+ SkASSERT(true || !ce || !ce->deleted());
+ SkASSERT(true || !oe || !oe->deleted());
+ const SkOpPtT* csExisting = !cs ? coinSeg->existing(coinTs, nullptr) : nullptr;
+ const SkOpPtT* ceExisting = !ce ? coinSeg->existing(coinTe, nullptr) : nullptr;
+ RETURN_FALSE_IF(csExisting && csExisting == ceExisting, coinSeg);
+ RETURN_FALSE_IF(csExisting && (csExisting == ce ||
+ csExisting->contains(ceExisting ? ceExisting : ce)), coinSeg);
+ RETURN_FALSE_IF(ceExisting && (ceExisting == cs ||
+ ceExisting->contains(csExisting ? csExisting : cs)), coinSeg);
+ const SkOpPtT* osExisting = !os ? oppSeg->existing(oppTs, nullptr) : nullptr;
+ const SkOpPtT* oeExisting = !oe ? oppSeg->existing(oppTe, nullptr) : nullptr;
+ RETURN_FALSE_IF(osExisting && osExisting == oeExisting, oppSeg);
+ RETURN_FALSE_IF(osExisting && (osExisting == oe ||
+ osExisting->contains(oeExisting ? oeExisting : oe)), oppSeg);
+ RETURN_FALSE_IF(oeExisting && (oeExisting == os ||
+ oeExisting->contains(osExisting ? osExisting : os)), oppSeg);
+ bool csDeleted = false, osDeleted = false, ceDeleted = false, oeDeleted = false;
+ this->debugValidate();
+ if (!cs || !os) {
+ if (!cs)
+ cs = coinSeg->debugAddT(coinTs, log);
+ if (!os)
+ os = oppSeg->debugAddT(oppTs, log);
+// RETURN_FALSE_IF(callerAborts, !csWritable || !osWritable);
+ if (cs && os) cs->span()->debugAddOpp(log, os->span());
+// cs = csWritable;
+// os = osWritable->active();
+ RETURN_FALSE_IF((ce && ce->deleted()) || (oe && oe->deleted()), coinSeg);
+ }
+ if (!ce || !oe) {
+ if (!ce)
+ ce = coinSeg->debugAddT(coinTe, log);
+ if (!oe)
+ oe = oppSeg->debugAddT(oppTe, log);
+ if (ce && oe) ce->span()->debugAddOpp(log, oe->span());
+// ce = ceWritable;
+// oe = oeWritable;
+ }
+ this->debugValidate();
+ RETURN_FALSE_IF(csDeleted, coinSeg);
+ RETURN_FALSE_IF(osDeleted, oppSeg);
+ RETURN_FALSE_IF(ceDeleted, coinSeg);
+ RETURN_FALSE_IF(oeDeleted, oppSeg);
+ RETURN_FALSE_IF(!cs || !ce || cs == ce || cs->contains(ce) || !os || !oe || os == oe || os->contains(oe), coinSeg);
+ bool result = true;
+ if (overlap) {
+ if (overlap->coinPtTStart()->segment() == coinSeg) {
+ log->record(SkPathOpsDebug::kAddMissingExtend_Glitch, coinSeg, coinTs, coinTe, oppSeg, oppTs, oppTe);
+ } else {
+ if (oppTs > oppTe) {
+ SkTSwap(coinTs, coinTe);
+ SkTSwap(oppTs, oppTe);
+ }
+ log->record(SkPathOpsDebug::kAddMissingExtend_Glitch, oppSeg, oppTs, oppTe, coinSeg, coinTs, coinTe);
+ }
+#if 0 && DEBUG_COINCIDENCE_VERBOSE
+ if (result) {
+ overlap->debugShow();
+ }
+#endif
+ } else {
+ log->record(SkPathOpsDebug::kAddMissingCoin_Glitch, coinSeg, coinTs, coinTe, oppSeg, oppTs, oppTe);
+#if 0 && DEBUG_COINCIDENCE_VERBOSE
+ fHead->debugShow();
+#endif
+ }
+ this->debugValidate();
+ return (void) result;
+}
+
+// Extra commented-out lines keep this in sync with addMissing()
+/* detects overlaps of different coincident runs on same segment */
+/* does not detect overlaps for pairs without any segments in common */
+// returns true if caller should loop again
+void SkOpCoincidence::debugAddMissing(SkPathOpsDebug::GlitchLog* log, bool* added) const {
+ const SkCoincidentSpans* outer = fHead;
+ *added = false;
+ if (!outer) {
+ return;
+ }
+ // fTop = outer;
+ // fHead = nullptr;
+ do {
+ // addifmissing can modify the list that this is walking
+ // save head so that walker can iterate over old data unperturbed
+ // addifmissing adds to head freely then add saved head in the end
+ const SkOpPtT* ocs = outer->coinPtTStart();
+ SkASSERT(!ocs->deleted());
+ const SkOpSegment* outerCoin = ocs->segment();
+ SkASSERT(!outerCoin->done()); // if it's done, should have already been removed from list
+ const SkOpPtT* oos = outer->oppPtTStart();
+ if (oos->deleted()) {
+ return;
+ }
+ const SkOpSegment* outerOpp = oos->segment();
+ SkASSERT(!outerOpp->done());
+// SkOpSegment* outerCoinWritable = const_cast<SkOpSegment*>(outerCoin);
+// SkOpSegment* outerOppWritable = const_cast<SkOpSegment*>(outerOpp);
+ const SkCoincidentSpans* inner = outer;
+ while ((inner = inner->next())) {
+ this->debugValidate();
+ double overS, overE;
+ const SkOpPtT* ics = inner->coinPtTStart();
+ SkASSERT(!ics->deleted());
+ const SkOpSegment* innerCoin = ics->segment();
+ SkASSERT(!innerCoin->done());
+ const SkOpPtT* ios = inner->oppPtTStart();
+ SkASSERT(!ios->deleted());
+ const SkOpSegment* innerOpp = ios->segment();
+ SkASSERT(!innerOpp->done());
+// SkOpSegment* innerCoinWritable = const_cast<SkOpSegment*>(innerCoin);
+// SkOpSegment* innerOppWritable = const_cast<SkOpSegment*>(innerOpp);
+ if (outerCoin == innerCoin) {
+ const SkOpPtT* oce = outer->coinPtTEnd();
+ if (oce->deleted()) {
+ return;
+ }
+ const SkOpPtT* ice = inner->coinPtTEnd();
+ SkASSERT(!ice->deleted());
+ if (outerOpp != innerOpp && this->overlap(ocs, oce, ics, ice, &overS, &overE)) {
+ this->debugAddIfMissing(log, ocs->starter(oce), ics->starter(ice),
+ overS, overE, outerOpp, innerOpp, added,
+ ocs->debugEnder(oce),
+ ics->debugEnder(ice));
+ }
+ } else if (outerCoin == innerOpp) {
+ const SkOpPtT* oce = outer->coinPtTEnd();
+ SkASSERT(!oce->deleted());
+ const SkOpPtT* ioe = inner->oppPtTEnd();
+ SkASSERT(!ioe->deleted());
+ if (outerOpp != innerCoin && this->overlap(ocs, oce, ios, ioe, &overS, &overE)) {
+ this->debugAddIfMissing(log, ocs->starter(oce), ios->starter(ioe),
+ overS, overE, outerOpp, innerCoin, added,
+ ocs->debugEnder(oce),
+ ios->debugEnder(ioe));
+ }
+ } else if (outerOpp == innerCoin) {
+ const SkOpPtT* ooe = outer->oppPtTEnd();
+ SkASSERT(!ooe->deleted());
+ const SkOpPtT* ice = inner->coinPtTEnd();
+ SkASSERT(!ice->deleted());
+ SkASSERT(outerCoin != innerOpp);
+ if (this->overlap(oos, ooe, ics, ice, &overS, &overE)) {
+ this->debugAddIfMissing(log, oos->starter(ooe), ics->starter(ice),
+ overS, overE, outerCoin, innerOpp, added,
+ oos->debugEnder(ooe),
+ ics->debugEnder(ice));
+ }
+ } else if (outerOpp == innerOpp) {
+ const SkOpPtT* ooe = outer->oppPtTEnd();
+ SkASSERT(!ooe->deleted());
+ const SkOpPtT* ioe = inner->oppPtTEnd();
+ if (ioe->deleted()) {
+ return;
+ }
+ SkASSERT(outerCoin != innerCoin);
+ if (this->overlap(oos, ooe, ios, ioe, &overS, &overE)) {
+ this->debugAddIfMissing(log, oos->starter(ooe), ios->starter(ioe),
+ overS, overE, outerCoin, innerCoin, added,
+ oos->debugEnder(ooe),
+ ios->debugEnder(ioe));
+ }
+ }
+ this->debugValidate();
+ }
+ } while ((outer = outer->next()));
+ // this->restoreHead();
+ return;
+}
+
+// Commented-out lines keep this in sync with release()
+void SkOpCoincidence::debugRelease(SkPathOpsDebug::GlitchLog* log, const SkCoincidentSpans* coin, const SkCoincidentSpans* remove) const {
+ const SkCoincidentSpans* head = coin;
+ const SkCoincidentSpans* prev = nullptr;
+ const SkCoincidentSpans* next;
+ do {
+ next = coin->next();
+ if (coin == remove) {
+ if (prev) {
+// prev->setNext(next);
+ } else if (head == fHead) {
+// fHead = next;
+ } else {
+// fTop = next;
+ }
+ log->record(SkPathOpsDebug::kReleasedSpan_Glitch, coin);
+ }
+ prev = coin;
+ } while ((coin = next));
+ return;
+}
+
+void SkOpCoincidence::debugRelease(SkPathOpsDebug::GlitchLog* log, const SkOpSegment* deleted) const {
+ const SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return;
+ }
+ do {
+ if (coin->coinPtTStart()->segment() == deleted
+ || coin->coinPtTEnd()->segment() == deleted
+ || coin->oppPtTStart()->segment() == deleted
+ || coin->oppPtTEnd()->segment() == deleted) {
+ log->record(SkPathOpsDebug::kReleasedSpan_Glitch, coin);
+ }
+ } while ((coin = coin->next()));
+}
+
+// Commented-out lines keep this in sync with expand()
+// expand the range by checking adjacent spans for coincidence
+bool SkOpCoincidence::debugExpand(SkPathOpsDebug::GlitchLog* log) const {
+ const SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return false;
+ }
+ bool expanded = false;
+ do {
+ if (coin->debugExpand(log)) {
+ // check to see if multiple spans expanded so they are now identical
+ const SkCoincidentSpans* test = fHead;
+ do {
+ if (coin == test) {
+ continue;
+ }
+ if (coin->coinPtTStart() == test->coinPtTStart()
+ && coin->oppPtTStart() == test->oppPtTStart()) {
+ if (log) log->record(SkPathOpsDebug::kExpandCoin_Glitch, fHead, test->coinPtTStart());
+ break;
+ }
+ } while ((test = test->next()));
+ expanded = true;
+ }
+ } while ((coin = coin->next()));
+ return expanded;
+}
+
+// Commented-out lines keep this in sync with mark()
+/* this sets up the coincidence links in the segments when the coincidence crosses multiple spans */
+void SkOpCoincidence::debugMark(SkPathOpsDebug::GlitchLog* log) const {
+ const SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return;
+ }
+ do {
+ FAIL_IF(!coin->coinPtTStartWritable()->span()->upCastable(), coin);
+ const SkOpSpan* start = coin->coinPtTStartWritable()->span()->upCast();
+// SkASSERT(start->deleted());
+ const SkOpSpanBase* end = coin->coinPtTEndWritable()->span();
+// SkASSERT(end->deleted());
+ const SkOpSpanBase* oStart = coin->oppPtTStartWritable()->span();
+// SkASSERT(oStart->deleted());
+ const SkOpSpanBase* oEnd = coin->oppPtTEndWritable()->span();
+// SkASSERT(oEnd->deleted());
+ bool flipped = coin->flipped();
+ if (flipped) {
+ SkTSwap(oStart, oEnd);
+ }
+ /* coin and opp spans may not match up. Mark the ends, and then let the interior
+ get marked as many times as the spans allow */
+ start->debugInsertCoincidence(log, oStart->upCast());
+ end->debugInsertCoinEnd(log, oEnd);
+ const SkOpSegment* segment = start->segment();
+ const SkOpSegment* oSegment = oStart->segment();
+ const SkOpSpanBase* next = start;
+ const SkOpSpanBase* oNext = oStart;
+ bool ordered = coin->ordered();
+ while ((next = next->upCast()->next()) != end) {
+ FAIL_IF(!next->upCastable(), coin);
+ if (next->upCast()->debugInsertCoincidence(log, oSegment, flipped, ordered), false) {
+ return;
+ }
+ }
+ while ((oNext = oNext->upCast()->next()) != oEnd) {
+ FAIL_IF(!oNext->upCastable(), coin);
+ if (oNext->upCast()->debugInsertCoincidence(log, segment, flipped, ordered), false) {
+ return;
+ }
+ }
+ } while ((coin = coin->next()));
+ return;
+}
+#endif
+
+#if DEBUG_COIN
+// Commented-out lines keep this in sync with markCollapsed()
+void SkOpCoincidence::debugMarkCollapsed(SkPathOpsDebug::GlitchLog* log, const SkCoincidentSpans* coin, const SkOpPtT* test) const {
+ const SkCoincidentSpans* head = coin;
+ while (coin) {
+ if (coin->collapsed(test)) {
+ if (zero_or_one(coin->coinPtTStart()->fT) && zero_or_one(coin->coinPtTEnd()->fT)) {
+ log->record(SkPathOpsDebug::kCollapsedCoin_Glitch, coin);
+ }
+ if (zero_or_one(coin->oppPtTStart()->fT) && zero_or_one(coin->oppPtTEnd()->fT)) {
+ log->record(SkPathOpsDebug::kCollapsedCoin_Glitch, coin);
+ }
+ this->debugRelease(log, head, coin);
+ }
+ coin = coin->next();
+ }
+}
+
+// Commented-out lines keep this in sync with markCollapsed()
+void SkOpCoincidence::debugMarkCollapsed(SkPathOpsDebug::GlitchLog* log, const SkOpPtT* test) const {
+ this->debugMarkCollapsed(log, fHead, test);
+ this->debugMarkCollapsed(log, fTop, test);
+}
+#endif
+
+void SkCoincidentSpans::debugShow() const {
+ SkDebugf("coinSpan - id=%d t=%1.9g tEnd=%1.9g\n", coinPtTStart()->segment()->debugID(),
+ coinPtTStart()->fT, coinPtTEnd()->fT);
+ SkDebugf("coinSpan + id=%d t=%1.9g tEnd=%1.9g\n", oppPtTStart()->segment()->debugID(),
+ oppPtTStart()->fT, oppPtTEnd()->fT);
+}
+
+void SkOpCoincidence::debugShowCoincidence() const {
+#if DEBUG_COINCIDENCE
+ const SkCoincidentSpans* span = fHead;
+ while (span) {
+ span->debugShow();
+ span = span->next();
+ }
+#endif
+}
+
+#if DEBUG_COIN
+static void DebugCheckBetween(const SkOpSpanBase* next, const SkOpSpanBase* end,
+ double oStart, double oEnd, const SkOpSegment* oSegment,
+ SkPathOpsDebug::GlitchLog* log) {
+ SkASSERT(next != end);
+ SkASSERT(!next->contains(end) || log);
+ if (next->t() > end->t()) {
+ SkTSwap(next, end);
+ }
+ do {
+ const SkOpPtT* ptT = next->ptT();
+ int index = 0;
+ bool somethingBetween = false;
+ do {
+ ++index;
+ ptT = ptT->next();
+ const SkOpPtT* checkPtT = next->ptT();
+ if (ptT == checkPtT) {
+ break;
+ }
+ bool looped = false;
+ for (int check = 0; check < index; ++check) {
+ if ((looped = checkPtT == ptT)) {
+ break;
+ }
+ checkPtT = checkPtT->next();
+ }
+ if (looped) {
+ SkASSERT(0);
+ break;
+ }
+ if (ptT->deleted()) {
+ continue;
+ }
+ if (ptT->segment() != oSegment) {
+ continue;
+ }
+ somethingBetween |= between(oStart, ptT->fT, oEnd);
+ } while (true);
+ SkASSERT(somethingBetween);
+ } while (next != end && (next = next->upCast()->next()));
+}
+
+static void DebugCheckOverlap(const SkCoincidentSpans* test, const SkCoincidentSpans* list,
+ SkPathOpsDebug::GlitchLog* log) {
+ if (!list) {
+ return;
+ }
+ const SkOpSegment* coinSeg = test->coinPtTStart()->segment();
+ SkASSERT(coinSeg == test->coinPtTEnd()->segment());
+ const SkOpSegment* oppSeg = test->oppPtTStart()->segment();
+ SkASSERT(oppSeg == test->oppPtTEnd()->segment());
+ SkASSERT(coinSeg != test->oppPtTStart()->segment());
+ SkDEBUGCODE(double tcs = test->coinPtTStart()->fT);
+ SkASSERT(between(0, tcs, 1));
+ SkDEBUGCODE(double tce = test->coinPtTEnd()->fT);
+ SkASSERT(between(0, tce, 1));
+ SkASSERT(tcs < tce);
+ double tos = test->oppPtTStart()->fT;
+ SkASSERT(between(0, tos, 1));
+ double toe = test->oppPtTEnd()->fT;
+ SkASSERT(between(0, toe, 1));
+ SkASSERT(tos != toe);
+ if (tos > toe) {
+ SkTSwap(tos, toe);
+ }
+ do {
+ double lcs, lce, los, loe;
+ if (coinSeg == list->coinPtTStart()->segment()) {
+ if (oppSeg != list->oppPtTStart()->segment()) {
+ continue;
+ }
+ lcs = list->coinPtTStart()->fT;
+ lce = list->coinPtTEnd()->fT;
+ los = list->oppPtTStart()->fT;
+ loe = list->oppPtTEnd()->fT;
+ if (los > loe) {
+ SkTSwap(los, loe);
+ }
+ } else if (coinSeg == list->oppPtTStart()->segment()) {
+ if (oppSeg != list->coinPtTStart()->segment()) {
+ continue;
+ }
+ lcs = list->oppPtTStart()->fT;
+ lce = list->oppPtTEnd()->fT;
+ if (lcs > lce) {
+ SkTSwap(lcs, lce);
+ }
+ los = list->coinPtTStart()->fT;
+ loe = list->coinPtTEnd()->fT;
+ } else {
+ continue;
+ }
+ SkASSERT(tce < lcs || lce < tcs);
+ SkASSERT(toe < los || loe < tos);
+ } while ((list = list->next()));
+}
+
+
+static void DebugCheckOverlapTop(const SkCoincidentSpans* head, const SkCoincidentSpans* opt,
+ SkPathOpsDebug::GlitchLog* log) {
+ // check for overlapping coincident spans
+ const SkCoincidentSpans* test = head;
+ while (test) {
+ const SkCoincidentSpans* next = test->next();
+ DebugCheckOverlap(test, next, log);
+ DebugCheckOverlap(test, opt, log);
+ test = next;
+ }
+}
+
+static void DebugValidate(const SkCoincidentSpans* head, const SkCoincidentSpans* opt,
+ SkPathOpsDebug::GlitchLog* log) {
+ // look for pts inside coincident spans that are not inside the opposite spans
+ const SkCoincidentSpans* coin = head;
+ while (coin) {
+ SkASSERT(SkOpCoincidence::Ordered(coin->coinPtTStart()->segment(),
+ coin->oppPtTStart()->segment()));
+ SkASSERT(coin->coinPtTStart()->span()->ptT() == coin->coinPtTStart());
+ SkASSERT(coin->coinPtTEnd()->span()->ptT() == coin->coinPtTEnd());
+ SkASSERT(coin->oppPtTStart()->span()->ptT() == coin->oppPtTStart());
+ SkASSERT(coin->oppPtTEnd()->span()->ptT() == coin->oppPtTEnd());
+ coin = coin->next();
+ }
+ DebugCheckOverlapTop(head, opt, log);
+}
+#endif
+
+void SkOpCoincidence::debugValidate() const {
+#if DEBUG_COINCIDENCE
+ DebugValidate(fHead, fTop, nullptr);
+ DebugValidate(fTop, nullptr, nullptr);
+#endif
+}
+
+#if DEBUG_COIN
+static void DebugCheckBetween(const SkCoincidentSpans* head, const SkCoincidentSpans* opt,
+ SkPathOpsDebug::GlitchLog* log) {
+ // look for pts inside coincident spans that are not inside the opposite spans
+ const SkCoincidentSpans* coin = head;
+ while (coin) {
+ DebugCheckBetween(coin->coinPtTStart()->span(), coin->coinPtTEnd()->span(),
+ coin->oppPtTStart()->fT, coin->oppPtTEnd()->fT, coin->oppPtTStart()->segment(),
+ log);
+ DebugCheckBetween(coin->oppPtTStart()->span(), coin->oppPtTEnd()->span(),
+ coin->coinPtTStart()->fT, coin->coinPtTEnd()->fT, coin->coinPtTStart()->segment(),
+ log);
+ coin = coin->next();
+ }
+ DebugCheckOverlapTop(head, opt, log);
+}
+#endif
+
+void SkOpCoincidence::debugCheckBetween() const {
+#if DEBUG_COINCIDENCE
+ if (fGlobalState->debugCheckHealth()) {
+ return;
+ }
+ DebugCheckBetween(fHead, fTop, nullptr);
+ DebugCheckBetween(fTop, nullptr, nullptr);
+#endif
+}
+
+#if DEBUG_COIN
+void SkOpContour::debugCheckHealth(SkPathOpsDebug::GlitchLog* log) const {
+ const SkOpSegment* segment = &fHead;
+ do {
+ segment->debugCheckHealth(log);
+ } while ((segment = segment->next()));
+}
+
+void SkOpCoincidence::debugCheckValid(SkPathOpsDebug::GlitchLog* log) const {
+#if DEBUG_VALIDATE
+ DebugValidate(fHead, fTop, log);
+ DebugValidate(fTop, nullptr, log);
+#endif
+}
+
+void SkOpCoincidence::debugCorrectEnds(SkPathOpsDebug::GlitchLog* log) const {
+ const SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return;
+ }
+ do {
+ coin->debugCorrectEnds(log);
+ } while ((coin = coin->next()));
+}
+
+// commmented-out lines keep this aligned with missingCoincidence()
+void SkOpContour::debugMissingCoincidence(SkPathOpsDebug::GlitchLog* log) const {
+// SkASSERT(fCount > 0);
+ const SkOpSegment* segment = &fHead;
+// bool result = false;
+ do {
+ if (segment->debugMissingCoincidence(log), false) {
+// result = true;
+ }
+ segment = segment->next();
+ } while (segment);
+ return;
+}
+
+void SkOpContour::debugMoveMultiples(SkPathOpsDebug::GlitchLog* log) const {
+ SkASSERT(fCount > 0);
+ const SkOpSegment* segment = &fHead;
+ do {
+ if (segment->debugMoveMultiples(log), false) {
+ return;
+ }
+ } while ((segment = segment->next()));
+ return;
+}
+
+void SkOpContour::debugMoveNearby(SkPathOpsDebug::GlitchLog* log) const {
+ SkASSERT(fCount > 0);
+ const SkOpSegment* segment = &fHead;
+ do {
+ segment->debugMoveNearby(log);
+ } while ((segment = segment->next()));
+}
+#endif
+
+#if DEBUG_COINCIDENCE_ORDER
+void SkOpSegment::debugResetCoinT() const {
+ fDebugBaseIndex = -1;
+ fDebugBaseMin = 1;
+ fDebugBaseMax = -1;
+ fDebugLastIndex = -1;
+ fDebugLastMin = 1;
+ fDebugLastMax = -1;
+}
+#endif
+
+void SkOpSegment::debugValidate() const {
+#if DEBUG_COINCIDENCE_ORDER
+ {
+ const SkOpSpanBase* span = &fHead;
+ do {
+ span->debugResetCoinT();
+ } while (!span->final() && (span = span->upCast()->next()));
+ span = &fHead;
+ int index = 0;
+ do {
+ span->debugSetCoinT(index++);
+ } while (!span->final() && (span = span->upCast()->next()));
+ }
+#endif
+#if DEBUG_COINCIDENCE
+ if (this->globalState()->debugCheckHealth()) {
+ return;
+ }
+#endif
+#if DEBUG_VALIDATE
+ const SkOpSpanBase* span = &fHead;
+ double lastT = -1;
+ const SkOpSpanBase* prev = nullptr;
+ int count = 0;
+ int done = 0;
+ do {
+ if (!span->final()) {
+ ++count;
+ done += span->upCast()->done() ? 1 : 0;
+ }
+ SkASSERT(span->segment() == this);
+ SkASSERT(!prev || prev->upCast()->next() == span);
+ SkASSERT(!prev || prev == span->prev());
+ prev = span;
+ double t = span->ptT()->fT;
+ SkASSERT(lastT < t);
+ lastT = t;
+ span->debugValidate();
+ } while (!span->final() && (span = span->upCast()->next()));
+ SkASSERT(count == fCount);
+ SkASSERT(done == fDoneCount);
+ SkASSERT(count >= fDoneCount);
+ SkASSERT(span->final());
+ span->debugValidate();
+#endif
+}
+
+#if DEBUG_COIN
+
+// Commented-out lines keep this in sync with addOpp()
+void SkOpSpanBase::debugAddOpp(SkPathOpsDebug::GlitchLog* log, const SkOpSpanBase* opp) const {
+ const SkOpPtT* oppPrev = this->ptT()->oppPrev(opp->ptT());
+ if (!oppPrev) {
+ return;
+ }
+ this->debugMergeMatches(log, opp);
+ this->ptT()->debugAddOpp(opp->ptT(), oppPrev);
+ this->debugCheckForCollapsedCoincidence(log);
+}
+
+// Commented-out lines keep this in sync with checkForCollapsedCoincidence()
+void SkOpSpanBase::debugCheckForCollapsedCoincidence(SkPathOpsDebug::GlitchLog* log) const {
+ const SkOpCoincidence* coins = this->globalState()->coincidence();
+ if (coins->isEmpty()) {
+ return;
+ }
+// the insert above may have put both ends of a coincident run in the same span
+// for each coincident ptT in loop; see if its opposite in is also in the loop
+// this implementation is the motivation for marking that a ptT is referenced by a coincident span
+ const SkOpPtT* head = this->ptT();
+ const SkOpPtT* test = head;
+ do {
+ if (!test->coincident()) {
+ continue;
+ }
+ coins->debugMarkCollapsed(log, test);
+ } while ((test = test->next()) != head);
+}
+#endif
+
+bool SkOpSpanBase::debugCoinEndLoopCheck() const {
+ int loop = 0;
+ const SkOpSpanBase* next = this;
+ SkOpSpanBase* nextCoin;
+ do {
+ nextCoin = next->fCoinEnd;
+ SkASSERT(nextCoin == this || nextCoin->fCoinEnd != nextCoin);
+ for (int check = 1; check < loop - 1; ++check) {
+ const SkOpSpanBase* checkCoin = this->fCoinEnd;
+ const SkOpSpanBase* innerCoin = checkCoin;
+ for (int inner = check + 1; inner < loop; ++inner) {
+ innerCoin = innerCoin->fCoinEnd;
+ if (checkCoin == innerCoin) {
+ SkDebugf("*** bad coincident end loop ***\n");
+ return false;
+ }
+ }
+ }
+ ++loop;
+ } while ((next = nextCoin) && next != this);
+ return true;
+}
+
+#if DEBUG_COIN
+// Commented-out lines keep this in sync with insertCoinEnd()
+void SkOpSpanBase::debugInsertCoinEnd(SkPathOpsDebug::GlitchLog* log, const SkOpSpanBase* coin) const {
+ if (containsCoinEnd(coin)) {
+// SkASSERT(coin->containsCoinEnd(this));
+ return;
+ }
+ debugValidate();
+// SkASSERT(this != coin);
+ log->record(SkPathOpsDebug::kMarkCoinEnd_Glitch, this, coin);
+// coin->fCoinEnd = this->fCoinEnd;
+// this->fCoinEnd = coinNext;
+ debugValidate();
+}
+
+// Commented-out lines keep this in sync with mergeMatches()
+// Look to see if pt-t linked list contains same segment more than once
+// if so, and if each pt-t is directly pointed to by spans in that segment,
+// merge them
+// keep the points, but remove spans so that the segment doesn't have 2 or more
+// spans pointing to the same pt-t loop at different loop elements
+void SkOpSpanBase::debugMergeMatches(SkPathOpsDebug::GlitchLog* log, const SkOpSpanBase* opp) const {
+ const SkOpPtT* test = &fPtT;
+ const SkOpPtT* testNext;
+ const SkOpPtT* stop = test;
+ do {
+ testNext = test->next();
+ if (test->deleted()) {
+ continue;
+ }
+ const SkOpSpanBase* testBase = test->span();
+ SkASSERT(testBase->ptT() == test);
+ const SkOpSegment* segment = test->segment();
+ if (segment->done()) {
+ continue;
+ }
+ const SkOpPtT* inner = opp->ptT();
+ const SkOpPtT* innerStop = inner;
+ do {
+ if (inner->segment() != segment) {
+ continue;
+ }
+ if (inner->deleted()) {
+ continue;
+ }
+ const SkOpSpanBase* innerBase = inner->span();
+ SkASSERT(innerBase->ptT() == inner);
+ // when the intersection is first detected, the span base is marked if there are
+ // more than one point in the intersection.
+// if (!innerBase->hasMultipleHint() && !testBase->hasMultipleHint()) {
+ if (!zero_or_one(inner->fT)) {
+ log->record(SkPathOpsDebug::kMergeMatches_Glitch, innerBase, test);
+ } else {
+ SkASSERT(inner->fT != test->fT);
+ if (!zero_or_one(test->fT)) {
+ log->record(SkPathOpsDebug::kMergeMatches_Glitch, testBase, inner);
+ } else {
+ log->record(SkPathOpsDebug::kMergeMatches_Glitch, segment);
+// SkDEBUGCODE(testBase->debugSetDeleted());
+// test->setDeleted();
+// SkDEBUGCODE(innerBase->debugSetDeleted());
+// inner->setDeleted();
+ }
+ }
+#ifdef SK_DEBUG // assert if another undeleted entry points to segment
+ const SkOpPtT* debugInner = inner;
+ while ((debugInner = debugInner->next()) != innerStop) {
+ if (debugInner->segment() != segment) {
+ continue;
+ }
+ if (debugInner->deleted()) {
+ continue;
+ }
+ SkOPASSERT(0);
+ }
+#endif
+ break;
+// }
+ break;
+ } while ((inner = inner->next()) != innerStop);
+ } while ((test = testNext) != stop);
+ this->debugCheckForCollapsedCoincidence(log);
+}
+
+#endif
+
+void SkOpSpanBase::debugResetCoinT() const {
+#if DEBUG_COINCIDENCE_ORDER
+ const SkOpPtT* ptT = &fPtT;
+ do {
+ ptT->debugResetCoinT();
+ ptT = ptT->next();
+ } while (ptT != &fPtT);
+#endif
+}
+
+void SkOpSpanBase::debugSetCoinT(int index) const {
+#if DEBUG_COINCIDENCE_ORDER
+ const SkOpPtT* ptT = &fPtT;
+ do {
+ if (!ptT->deleted()) {
+ ptT->debugSetCoinT(index);
+ }
+ ptT = ptT->next();
+ } while (ptT != &fPtT);
+#endif
+}
+
+const SkOpSpan* SkOpSpanBase::debugStarter(SkOpSpanBase const** endPtr) const {
+ const SkOpSpanBase* end = *endPtr;
+ SkASSERT(this->segment() == end->segment());
+ const SkOpSpanBase* result;
+ if (t() < end->t()) {
+ result = this;
+ } else {
+ result = end;
+ *endPtr = this;
+ }
+ return result->upCast();
+}
+
+void SkOpSpanBase::debugValidate() const {
+#if DEBUG_COINCIDENCE
+ if (this->globalState()->debugCheckHealth()) {
+ return;
+ }
+#endif
+#if DEBUG_VALIDATE
+ const SkOpPtT* ptT = &fPtT;
+ SkASSERT(ptT->span() == this);
+ do {
+// SkASSERT(SkDPoint::RoughlyEqual(fPtT.fPt, ptT->fPt));
+ ptT->debugValidate();
+ ptT = ptT->next();
+ } while (ptT != &fPtT);
+ SkASSERT(this->debugCoinEndLoopCheck());
+ if (!this->final()) {
+ SkASSERT(this->upCast()->debugCoinLoopCheck());
+ }
+ if (fFromAngle) {
+ fFromAngle->debugValidate();
+ }
+ if (!this->final() && this->upCast()->toAngle()) {
+ this->upCast()->toAngle()->debugValidate();
+ }
+#endif
+}
+
+bool SkOpSpan::debugCoinLoopCheck() const {
+ int loop = 0;
+ const SkOpSpan* next = this;
+ SkOpSpan* nextCoin;
+ do {
+ nextCoin = next->fCoincident;
+ SkASSERT(nextCoin == this || nextCoin->fCoincident != nextCoin);
+ for (int check = 1; check < loop - 1; ++check) {
+ const SkOpSpan* checkCoin = this->fCoincident;
+ const SkOpSpan* innerCoin = checkCoin;
+ for (int inner = check + 1; inner < loop; ++inner) {
+ innerCoin = innerCoin->fCoincident;
+ if (checkCoin == innerCoin) {
+ SkDebugf("*** bad coincident loop ***\n");
+ return false;
+ }
+ }
+ }
+ ++loop;
+ } while ((next = nextCoin) && next != this);
+ return true;
+}
+
+#if DEBUG_COIN
+// Commented-out lines keep this in sync with insertCoincidence() in header
+void SkOpSpan::debugInsertCoincidence(SkPathOpsDebug::GlitchLog* log, const SkOpSpan* coin) const {
+ if (containsCoincidence(coin)) {
+// SkASSERT(coin->containsCoincidence(this));
+ return;
+ }
+ debugValidate();
+// SkASSERT(this != coin);
+ log->record(SkPathOpsDebug::kMarkCoinStart_Glitch, this, coin);
+// coin->fCoincident = this->fCoincident;
+// this->fCoincident = coinNext;
+ debugValidate();
+}
+
+// Commented-out lines keep this in sync with insertCoincidence()
+void SkOpSpan::debugInsertCoincidence(SkPathOpsDebug::GlitchLog* log, const SkOpSegment* segment, bool flipped, bool ordered) const {
+ if (this->containsCoincidence(segment)) {
+ return;
+ }
+ const SkOpPtT* next = &fPtT;
+ while ((next = next->next()) != &fPtT) {
+ if (next->segment() == segment) {
+ const SkOpSpan* span;
+ const SkOpSpanBase* base = next->span();
+ if (!ordered) {
+ const SkOpSpanBase* spanEnd = fNext->contains(segment)->span();
+ const SkOpPtT* start = base->ptT()->starter(spanEnd->ptT());
+ FAIL_IF(!start->span()->upCastable(), this);
+ span = const_cast<SkOpSpan*>(start->span()->upCast());
+ }
+ else if (flipped) {
+ span = base->prev();
+ FAIL_IF(!span, this);
+ }
+ else {
+ FAIL_IF(!base->upCastable(), this);
+ span = base->upCast();
+ }
+ log->record(SkPathOpsDebug::kMarkCoinInsert_Glitch, span);
+ return;
+ }
+ }
+#if DEBUG_COIN
+ log->record(SkPathOpsDebug::kMarkCoinMissing_Glitch, segment, this);
+#endif
+ return;
+}
+#endif
+
+// called only by test code
+int SkIntersections::debugCoincidentUsed() const {
+ if (!fIsCoincident[0]) {
+ SkASSERT(!fIsCoincident[1]);
+ return 0;
+ }
+ int count = 0;
+ SkDEBUGCODE(int count2 = 0;)
+ for (int index = 0; index < fUsed; ++index) {
+ if (fIsCoincident[0] & (1 << index)) {
+ ++count;
+ }
+#ifdef SK_DEBUG
+ if (fIsCoincident[1] & (1 << index)) {
+ ++count2;
+ }
+#endif
+ }
+ SkASSERT(count == count2);
+ return count;
+}
+
+#include "SkOpContour.h"
+
+// Commented-out lines keep this in sync with addOpp()
+void SkOpPtT::debugAddOpp(const SkOpPtT* opp, const SkOpPtT* oppPrev) const {
+ SkDEBUGCODE(const SkOpPtT* oldNext = this->fNext);
+ SkASSERT(this != opp);
+// this->fNext = opp;
+ SkASSERT(oppPrev != oldNext);
+// oppPrev->fNext = oldNext;
+}
+
+bool SkOpPtT::debugContains(const SkOpPtT* check) const {
+ SkASSERT(this != check);
+ const SkOpPtT* ptT = this;
+ int links = 0;
+ do {
+ ptT = ptT->next();
+ if (ptT == check) {
+ return true;
+ }
+ ++links;
+ const SkOpPtT* test = this;
+ for (int index = 0; index < links; ++index) {
+ if (ptT == test) {
+ return false;
+ }
+ test = test->next();
+ }
+ } while (true);
+}
+
+const SkOpPtT* SkOpPtT::debugContains(const SkOpSegment* check) const {
+ SkASSERT(this->segment() != check);
+ const SkOpPtT* ptT = this;
+ int links = 0;
+ do {
+ ptT = ptT->next();
+ if (ptT->segment() == check) {
+ return ptT;
+ }
+ ++links;
+ const SkOpPtT* test = this;
+ for (int index = 0; index < links; ++index) {
+ if (ptT == test) {
+ return nullptr;
+ }
+ test = test->next();
+ }
+ } while (true);
+}
+
+const SkOpPtT* SkOpPtT::debugEnder(const SkOpPtT* end) const {
+ return fT < end->fT ? end : this;
+}
+
+int SkOpPtT::debugLoopLimit(bool report) const {
+ int loop = 0;
+ const SkOpPtT* next = this;
+ do {
+ for (int check = 1; check < loop - 1; ++check) {
+ const SkOpPtT* checkPtT = this->fNext;
+ const SkOpPtT* innerPtT = checkPtT;
+ for (int inner = check + 1; inner < loop; ++inner) {
+ innerPtT = innerPtT->fNext;
+ if (checkPtT == innerPtT) {
+ if (report) {
+ SkDebugf("*** bad ptT loop ***\n");
+ }
+ return loop;
+ }
+ }
+ }
+ // there's nothing wrong with extremely large loop counts -- but this may appear to hang
+ // by taking a very long time to figure out that no loop entry is a duplicate
+ // -- and it's likely that a large loop count is indicative of a bug somewhere
+ if (++loop > 1000) {
+ SkDebugf("*** loop count exceeds 1000 ***\n");
+ return 1000;
+ }
+ } while ((next = next->fNext) && next != this);
+ return 0;
+}
+
+const SkOpPtT* SkOpPtT::debugOppPrev(const SkOpPtT* opp) const {
+ return this->oppPrev(const_cast<SkOpPtT*>(opp));
+}
+
+void SkOpPtT::debugResetCoinT() const {
+#if DEBUG_COINCIDENCE_ORDER
+ this->segment()->debugResetCoinT();
+#endif
+}
+
+void SkOpPtT::debugSetCoinT(int index) const {
+#if DEBUG_COINCIDENCE_ORDER
+ this->segment()->debugSetCoinT(index, fT);
+#endif
+}
+
+void SkOpPtT::debugValidate() const {
+#if DEBUG_COINCIDENCE
+ if (this->globalState()->debugCheckHealth()) {
+ return;
+ }
+#endif
+#if DEBUG_VALIDATE
+ SkOpPhase phase = contour()->globalState()->phase();
+ if (phase == SkOpPhase::kIntersecting || phase == SkOpPhase::kFixWinding) {
+ return;
+ }
+ SkASSERT(fNext);
+ SkASSERT(fNext != this);
+ SkASSERT(fNext->fNext);
+ SkASSERT(debugLoopLimit(false) == 0);
+#endif
+}
+
+static void output_scalar(SkScalar num) {
+ if (num == (int) num) {
+ SkDebugf("%d", (int) num);
+ } else {
+ SkString str;
+ str.printf("%1.9g", num);
+ int width = (int) str.size();
+ const char* cStr = str.c_str();
+ while (cStr[width - 1] == '0') {
+ --width;
+ }
+ str.resize(width);
+ SkDebugf("%sf", str.c_str());
+ }
+}
+
+static void output_points(const SkPoint* pts, int count) {
+ for (int index = 0; index < count; ++index) {
+ output_scalar(pts[index].fX);
+ SkDebugf(", ");
+ output_scalar(pts[index].fY);
+ if (index + 1 < count) {
+ SkDebugf(", ");
+ }
+ }
+}
+
+static void showPathContours(SkPath::RawIter& iter, const char* pathName) {
+ uint8_t verb;
+ SkPoint pts[4];
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ SkDebugf(" %s.moveTo(", pathName);
+ output_points(&pts[0], 1);
+ SkDebugf(");\n");
+ continue;
+ case SkPath::kLine_Verb:
+ SkDebugf(" %s.lineTo(", pathName);
+ output_points(&pts[1], 1);
+ SkDebugf(");\n");
+ break;
+ case SkPath::kQuad_Verb:
+ SkDebugf(" %s.quadTo(", pathName);
+ output_points(&pts[1], 2);
+ SkDebugf(");\n");
+ break;
+ case SkPath::kConic_Verb:
+ SkDebugf(" %s.conicTo(", pathName);
+ output_points(&pts[1], 2);
+ SkDebugf(", %1.9gf);\n", iter.conicWeight());
+ break;
+ case SkPath::kCubic_Verb:
+ SkDebugf(" %s.cubicTo(", pathName);
+ output_points(&pts[1], 3);
+ SkDebugf(");\n");
+ break;
+ case SkPath::kClose_Verb:
+ SkDebugf(" %s.close();\n", pathName);
+ break;
+ default:
+ SkDEBUGFAIL("bad verb");
+ return;
+ }
+ }
+}
+
+static const char* gFillTypeStr[] = {
+ "kWinding_FillType",
+ "kEvenOdd_FillType",
+ "kInverseWinding_FillType",
+ "kInverseEvenOdd_FillType"
+};
+
+void SkPathOpsDebug::ShowOnePath(const SkPath& path, const char* name, bool includeDeclaration) {
+ SkPath::RawIter iter(path);
+#define SUPPORT_RECT_CONTOUR_DETECTION 0
+#if SUPPORT_RECT_CONTOUR_DETECTION
+ int rectCount = path.isRectContours() ? path.rectContours(nullptr, nullptr) : 0;
+ if (rectCount > 0) {
+ SkTDArray<SkRect> rects;
+ SkTDArray<SkPath::Direction> directions;
+ rects.setCount(rectCount);
+ directions.setCount(rectCount);
+ path.rectContours(rects.begin(), directions.begin());
+ for (int contour = 0; contour < rectCount; ++contour) {
+ const SkRect& rect = rects[contour];
+ SkDebugf("path.addRect(%1.9g, %1.9g, %1.9g, %1.9g, %s);\n", rect.fLeft, rect.fTop,
+ rect.fRight, rect.fBottom, directions[contour] == SkPath::kCCW_Direction
+ ? "SkPath::kCCW_Direction" : "SkPath::kCW_Direction");
+ }
+ return;
+ }
+#endif
+ SkPath::FillType fillType = path.getFillType();
+ SkASSERT(fillType >= SkPath::kWinding_FillType && fillType <= SkPath::kInverseEvenOdd_FillType);
+ if (includeDeclaration) {
+ SkDebugf(" SkPath %s;\n", name);
+ }
+ SkDebugf(" %s.setFillType(SkPath::%s);\n", name, gFillTypeStr[fillType]);
+ iter.setPath(path);
+ showPathContours(iter, name);
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsDebug.h b/gfx/skia/skia/src/pathops/SkPathOpsDebug.h
new file mode 100644
index 000000000..f07d7d052
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsDebug.h
@@ -0,0 +1,382 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsDebug_DEFINED
+#define SkPathOpsDebug_DEFINED
+
+#include "SkPathOps.h"
+#include "SkTypes.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+
+enum class SkOpPhase : char;
+class SkOpContourHead;
+
+#ifdef SK_RELEASE
+#define FORCE_RELEASE 1
+#else
+#define FORCE_RELEASE 1 // set force release to 1 for multiple thread -- no debugging
+#endif
+
+#define DEBUG_UNDER_DEVELOPMENT 1
+
+#define ONE_OFF_DEBUG 0
+#define ONE_OFF_DEBUG_MATHEMATICA 0
+
+#if defined(SK_BUILD_FOR_WIN) || defined(SK_BUILD_FOR_ANDROID)
+ #define SK_RAND(seed) rand()
+#else
+ #define SK_RAND(seed) rand_r(&seed)
+#endif
+#ifdef SK_BUILD_FOR_WIN
+ #define SK_SNPRINTF _snprintf
+#else
+ #define SK_SNPRINTF snprintf
+#endif
+
+#define WIND_AS_STRING(x) char x##Str[12]; \
+ if (!SkPathOpsDebug::ValidWind(x)) strcpy(x##Str, "?"); \
+ else SK_SNPRINTF(x##Str, sizeof(x##Str), "%d", x)
+
+#if FORCE_RELEASE
+
+#define DEBUG_ACTIVE_OP 0
+#define DEBUG_ACTIVE_SPANS 0
+#define DEBUG_ADD_INTERSECTING_TS 0
+#define DEBUG_ADD_T 0
+#define DEBUG_ALIGNMENT 0
+#define DEBUG_ANGLE 0
+#define DEBUG_ASSEMBLE 0
+#define DEBUG_COINCIDENCE 0 // sanity checking
+#define DEBUG_COINCIDENCE_DUMP 0 // accumulate and dump which algorithms fired
+#define DEBUG_COINCIDENCE_ORDER 0 // for well behaved curves, check if pairs match up in t-order
+#define DEBUG_COINCIDENCE_VERBOSE 0 // usually whether the next function generates coincidence
+#define DEBUG_CUBIC_BINARY_SEARCH 0
+#define DEBUG_CUBIC_SPLIT 0
+#define DEBUG_DUMP_SEGMENTS 0
+#define DEBUG_FLOW 0
+#define DEBUG_LIMIT_WIND_SUM 0
+#define DEBUG_MARK_DONE 0
+#define DEBUG_PATH_CONSTRUCTION 0
+#define DEBUG_PERP 0
+#define DEBUG_SHOW_TEST_NAME 0
+#define DEBUG_SORT 0
+#define DEBUG_T_SECT 0
+#define DEBUG_T_SECT_DUMP 0
+#define DEBUG_T_SECT_LOOP_COUNT 0
+#define DEBUG_VALIDATE 0
+#define DEBUG_WINDING 0
+#define DEBUG_WINDING_AT_T 0
+
+#else
+
+#define DEBUG_ACTIVE_OP 1
+#define DEBUG_ACTIVE_SPANS 1
+#define DEBUG_ADD_INTERSECTING_TS 1
+#define DEBUG_ADD_T 1
+#define DEBUG_ALIGNMENT 0
+#define DEBUG_ANGLE 1
+#define DEBUG_ASSEMBLE 1
+#define DEBUG_COINCIDENCE 01
+#define DEBUG_COINCIDENCE_DUMP 0
+#define DEBUG_COINCIDENCE_ORDER 0 // tight arc quads may generate out-of-order coincdence spans
+#define DEBUG_COINCIDENCE_VERBOSE 01
+#define DEBUG_CUBIC_BINARY_SEARCH 0
+#define DEBUG_CUBIC_SPLIT 1
+#define DEBUG_DUMP_SEGMENTS 1
+#define DEBUG_FLOW 1
+#define DEBUG_LIMIT_WIND_SUM 15
+#define DEBUG_MARK_DONE 1
+#define DEBUG_PATH_CONSTRUCTION 1
+#define DEBUG_PERP 1
+#define DEBUG_SHOW_TEST_NAME 1
+#define DEBUG_SORT 1
+#define DEBUG_T_SECT 0
+#define DEBUG_T_SECT_DUMP 0 // Use 1 normally. Use 2 to number segments, 3 for script output
+#define DEBUG_T_SECT_LOOP_COUNT 0
+#define DEBUG_VALIDATE 1
+#define DEBUG_WINDING 1
+#define DEBUG_WINDING_AT_T 1
+
+#endif
+
+#ifdef SK_RELEASE
+ #define SkDEBUGRELEASE(a, b) b
+ #define SkDEBUGPARAMS(...)
+#else
+ #define SkDEBUGRELEASE(a, b) a
+ #define SkDEBUGPARAMS(...) , __VA_ARGS__
+#endif
+
+#if DEBUG_VALIDATE == 0
+ #define PATH_OPS_DEBUG_VALIDATE_PARAMS(...)
+#else
+ #define PATH_OPS_DEBUG_VALIDATE_PARAMS(...) , __VA_ARGS__
+#endif
+
+#if DEBUG_T_SECT == 0
+ #define PATH_OPS_DEBUG_T_SECT_RELEASE(a, b) b
+ #define PATH_OPS_DEBUG_T_SECT_PARAMS(...)
+ #define PATH_OPS_DEBUG_T_SECT_CODE(...)
+#else
+ #define PATH_OPS_DEBUG_T_SECT_RELEASE(a, b) a
+ #define PATH_OPS_DEBUG_T_SECT_PARAMS(...) , __VA_ARGS__
+ #define PATH_OPS_DEBUG_T_SECT_CODE(...) __VA_ARGS__
+#endif
+
+#if DEBUG_T_SECT_DUMP > 1
+ extern int gDumpTSectNum;
+#endif
+
+#if DEBUG_COINCIDENCE || DEBUG_COINCIDENCE_DUMP
+ #define DEBUG_COIN 1
+#else
+ #define DEBUG_COIN 0
+#endif
+
+#if DEBUG_COIN
+ #define DEBUG_COIN_DECLARE_ONLY_PARAMS() \
+ int lineNo, SkOpPhase phase, int iteration
+ #define DEBUG_COIN_DECLARE_PARAMS() \
+ , DEBUG_COIN_DECLARE_ONLY_PARAMS()
+ #define DEBUG_COIN_ONLY_PARAMS() \
+ __LINE__, SkOpPhase::kNoChange, 0
+ #define DEBUG_COIN_PARAMS() \
+ , DEBUG_COIN_ONLY_PARAMS()
+ #define DEBUG_ITER_ONLY_PARAMS(iteration) \
+ __LINE__, SkOpPhase::kNoChange, iteration
+ #define DEBUG_ITER_PARAMS(iteration) \
+ , DEBUG_ITER_ONLY_PARAMS(iteration)
+ #define DEBUG_PHASE_ONLY_PARAMS(phase) \
+ __LINE__, SkOpPhase::phase, 0
+ #define DEBUG_PHASE_PARAMS(phase) \
+ , DEBUG_PHASE_ONLY_PARAMS(phase)
+ #define DEBUG_SET_PHASE() \
+ this->globalState()->debugSetPhase(__func__, lineNo, phase, iteration)
+ #define DEBUG_STATIC_SET_PHASE(obj) \
+ obj->globalState()->debugSetPhase(__func__, lineNo, phase, iteration)
+#elif DEBUG_VALIDATE
+ #define DEBUG_COIN_DECLARE_ONLY_PARAMS() \
+ SkOpPhase phase
+ #define DEBUG_COIN_DECLARE_PARAMS() \
+ , DEBUG_COIN_DECLARE_ONLY_PARAMS()
+ #define DEBUG_COIN_ONLY_PARAMS() \
+ SkOpPhase::kNoChange
+ #define DEBUG_COIN_PARAMS() \
+ , DEBUG_COIN_ONLY_PARAMS()
+ #define DEBUG_ITER_ONLY_PARAMS(iteration) \
+ SkOpPhase::kNoChange
+ #define DEBUG_ITER_PARAMS(iteration) \
+ , DEBUG_ITER_ONLY_PARAMS(iteration)
+ #define DEBUG_PHASE_ONLY_PARAMS(phase) \
+ SkOpPhase::phase
+ #define DEBUG_PHASE_PARAMS(phase) \
+ , DEBUG_PHASE_ONLY_PARAMS(phase)
+ #define DEBUG_SET_PHASE() \
+ this->globalState()->debugSetPhase(phase)
+ #define DEBUG_STATIC_SET_PHASE(obj) \
+ obj->globalState()->debugSetPhase(phase)
+#else
+ #define DEBUG_COIN_DECLARE_ONLY_PARAMS()
+ #define DEBUG_COIN_DECLARE_PARAMS()
+ #define DEBUG_COIN_ONLY_PARAMS()
+ #define DEBUG_COIN_PARAMS()
+ #define DEBUG_ITER_ONLY_PARAMS(iteration)
+ #define DEBUG_ITER_PARAMS(iteration)
+ #define DEBUG_PHASE_ONLY_PARAMS(phase)
+ #define DEBUG_PHASE_PARAMS(phase)
+ #define DEBUG_SET_PHASE()
+ #define DEBUG_STATIC_SET_PHASE(obj)
+#endif
+
+#define CUBIC_DEBUG_STR "{{{%1.9g,%1.9g}, {%1.9g,%1.9g}, {%1.9g,%1.9g}, {%1.9g,%1.9g}}}"
+#define CONIC_DEBUG_STR "{{{{%1.9g,%1.9g}, {%1.9g,%1.9g}, {%1.9g,%1.9g}}}, %1.9g}"
+#define QUAD_DEBUG_STR "{{{%1.9g,%1.9g}, {%1.9g,%1.9g}, {%1.9g,%1.9g}}}"
+#define LINE_DEBUG_STR "{{{%1.9g,%1.9g}, {%1.9g,%1.9g}}}"
+#define PT_DEBUG_STR "{{%1.9g,%1.9g}}"
+
+#define T_DEBUG_STR(t, n) #t "[" #n "]=%1.9g"
+#define TX_DEBUG_STR(t) #t "[%d]=%1.9g"
+#define CUBIC_DEBUG_DATA(c) c[0].fX, c[0].fY, c[1].fX, c[1].fY, c[2].fX, c[2].fY, c[3].fX, c[3].fY
+#define CONIC_DEBUG_DATA(c, w) c[0].fX, c[0].fY, c[1].fX, c[1].fY, c[2].fX, c[2].fY, w
+#define QUAD_DEBUG_DATA(q) q[0].fX, q[0].fY, q[1].fX, q[1].fY, q[2].fX, q[2].fY
+#define LINE_DEBUG_DATA(l) l[0].fX, l[0].fY, l[1].fX, l[1].fY
+#define PT_DEBUG_DATA(i, n) i.pt(n).asSkPoint().fX, i.pt(n).asSkPoint().fY
+
+#ifndef DEBUG_TEST
+#define DEBUG_TEST 0
+#endif
+
+#if DEBUG_SHOW_TEST_NAME
+#include "SkTLS.h"
+#endif
+
+// Tests with extreme numbers may fail, but all other tests should never fail.
+#define FAIL_IF(cond) \
+ do { bool fail = (cond); SkOPASSERT(!fail); if (fail) return false; } while (false)
+
+#define FAIL_WITH_NULL_IF(cond) \
+ do { bool fail = (cond); SkOPASSERT(!fail); if (fail) return nullptr; } while (false)
+
+// Some functions serve two masters: one allows the function to fail, the other expects success
+// always. If abort is true, tests with normal numbers may not fail and assert if they do so.
+// If abort is false, both normal and extreme numbers may return false without asserting.
+#define RETURN_FALSE_IF(abort, cond) \
+ do { bool fail = (cond); SkOPASSERT(!(abort) || !fail); if (fail) return false; \
+ } while (false)
+
+class SkPathOpsDebug {
+public:
+ static const char* kLVerbStr[];
+
+#if DEBUG_COIN
+ struct GlitchLog;
+
+ enum GlitchType {
+ kUninitialized_Glitch,
+ kAddCorruptCoin_Glitch,
+ kAddExpandedCoin_Glitch,
+ kAddExpandedFail_Glitch,
+ kAddIfCollapsed_Glitch,
+ kAddIfMissingCoin_Glitch,
+ kAddMissingCoin_Glitch,
+ kAddMissingExtend_Glitch,
+ kAddOrOverlap_Glitch,
+ kCollapsedCoin_Glitch,
+ kCollapsedDone_Glitch,
+ kCollapsedOppValue_Glitch,
+ kCollapsedSpan_Glitch,
+ kCollapsedWindValue_Glitch,
+ kCorrectEnd_Glitch,
+ kDeletedCoin_Glitch,
+ kExpandCoin_Glitch,
+ kFail_Glitch,
+ kMarkCoinEnd_Glitch,
+ kMarkCoinInsert_Glitch,
+ kMarkCoinMissing_Glitch,
+ kMarkCoinStart_Glitch,
+ kMergeMatches_Glitch,
+ kMissingCoin_Glitch,
+ kMissingDone_Glitch,
+ kMissingIntersection_Glitch,
+ kMoveMultiple_Glitch,
+ kMoveNearbyClearAll_Glitch,
+ kMoveNearbyClearAll2_Glitch,
+ kMoveNearbyMerge_Glitch,
+ kMoveNearbyMergeFinal_Glitch,
+ kMoveNearbyRelease_Glitch,
+ kMoveNearbyReleaseFinal_Glitch,
+ kReleasedSpan_Glitch,
+ kReturnFalse_Glitch,
+ kUnaligned_Glitch,
+ kUnalignedHead_Glitch,
+ kUnalignedTail_Glitch,
+ };
+
+ struct CoinDictEntry {
+ int fIteration;
+ int fLineNumber;
+ GlitchType fGlitchType;
+ const char* fFunctionName;
+ };
+
+ struct CoinDict {
+ void add(const CoinDictEntry& key);
+ void add(const CoinDict& dict);
+ void dump(const char* str, bool visitCheck) const;
+ SkTDArray<CoinDictEntry> fDict;
+ };
+
+ static CoinDict gCoinSumChangedDict;
+ static CoinDict gCoinSumVisitedDict;
+ static CoinDict gCoinVistedDict;
+#endif
+
+#if defined(SK_DEBUG) || !FORCE_RELEASE
+ static int gContourID;
+ static int gSegmentID;
+#endif
+
+#if DEBUG_SORT
+ static int gSortCountDefault;
+ static int gSortCount;
+#endif
+
+#if DEBUG_ACTIVE_OP
+ static const char* kPathOpStr[];
+#endif
+
+ static void MathematicaIze(char* str, size_t bufferSize);
+ static bool ValidWind(int winding);
+ static void WindingPrintf(int winding);
+
+#if DEBUG_SHOW_TEST_NAME
+ static void* CreateNameStr();
+ static void DeleteNameStr(void* v);
+#define DEBUG_FILENAME_STRING_LENGTH 64
+#define DEBUG_FILENAME_STRING (reinterpret_cast<char* >(SkTLS::Get(SkPathOpsDebug::CreateNameStr, \
+ SkPathOpsDebug::DeleteNameStr)))
+ static void BumpTestName(char* );
+#endif
+ static const char* OpStr(SkPathOp );
+ static void ShowActiveSpans(SkOpContourHead* contourList);
+ static void ShowOnePath(const SkPath& path, const char* name, bool includeDeclaration);
+ static void ShowPath(const SkPath& one, const SkPath& two, SkPathOp op, const char* name);
+
+ static bool ChaseContains(const SkTDArray<class SkOpSpanBase*>& , const class SkOpSpanBase* );
+
+ static void CheckHealth(class SkOpContourHead* contourList);
+
+ static const class SkOpAngle* DebugAngleAngle(const class SkOpAngle*, int id);
+ static class SkOpContour* DebugAngleContour(class SkOpAngle*, int id);
+ static const class SkOpPtT* DebugAnglePtT(const class SkOpAngle*, int id);
+ static const class SkOpSegment* DebugAngleSegment(const class SkOpAngle*, int id);
+ static const class SkOpSpanBase* DebugAngleSpan(const class SkOpAngle*, int id);
+
+ static const class SkOpAngle* DebugContourAngle(class SkOpContour*, int id);
+ static class SkOpContour* DebugContourContour(class SkOpContour*, int id);
+ static const class SkOpPtT* DebugContourPtT(class SkOpContour*, int id);
+ static const class SkOpSegment* DebugContourSegment(class SkOpContour*, int id);
+ static const class SkOpSpanBase* DebugContourSpan(class SkOpContour*, int id);
+
+ static const class SkOpAngle* DebugCoincidenceAngle(class SkOpCoincidence*, int id);
+ static class SkOpContour* DebugCoincidenceContour(class SkOpCoincidence*, int id);
+ static const class SkOpPtT* DebugCoincidencePtT(class SkOpCoincidence*, int id);
+ static const class SkOpSegment* DebugCoincidenceSegment(class SkOpCoincidence*, int id);
+ static const class SkOpSpanBase* DebugCoincidenceSpan(class SkOpCoincidence*, int id);
+
+ static const class SkOpAngle* DebugPtTAngle(const class SkOpPtT*, int id);
+ static class SkOpContour* DebugPtTContour(class SkOpPtT*, int id);
+ static const class SkOpPtT* DebugPtTPtT(const class SkOpPtT*, int id);
+ static const class SkOpSegment* DebugPtTSegment(const class SkOpPtT*, int id);
+ static const class SkOpSpanBase* DebugPtTSpan(const class SkOpPtT*, int id);
+
+ static const class SkOpAngle* DebugSegmentAngle(const class SkOpSegment*, int id);
+ static class SkOpContour* DebugSegmentContour(class SkOpSegment*, int id);
+ static const class SkOpPtT* DebugSegmentPtT(const class SkOpSegment*, int id);
+ static const class SkOpSegment* DebugSegmentSegment(const class SkOpSegment*, int id);
+ static const class SkOpSpanBase* DebugSegmentSpan(const class SkOpSegment*, int id);
+
+ static const class SkOpAngle* DebugSpanAngle(const class SkOpSpanBase*, int id);
+ static class SkOpContour* DebugSpanContour(class SkOpSpanBase*, int id);
+ static const class SkOpPtT* DebugSpanPtT(const class SkOpSpanBase*, int id);
+ static const class SkOpSegment* DebugSpanSegment(const class SkOpSpanBase*, int id);
+ static const class SkOpSpanBase* DebugSpanSpan(const class SkOpSpanBase*, int id);
+
+#if DEBUG_COIN
+ static void DumpCoinDict();
+ static void DumpGlitchType(GlitchType );
+#endif
+};
+
+struct SkDQuad;
+
+// generates tools/path_sorter.htm and path_visualizer.htm compatible data
+void DumpQ(const SkDQuad& quad1, const SkDQuad& quad2, int testNo);
+void DumpT(const SkDQuad& quad, double t);
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsLine.cpp b/gfx/skia/skia/src/pathops/SkPathOpsLine.cpp
new file mode 100644
index 000000000..6fa091db8
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsLine.cpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkPathOpsLine.h"
+
+SkDPoint SkDLine::ptAtT(double t) const {
+ if (0 == t) {
+ return fPts[0];
+ }
+ if (1 == t) {
+ return fPts[1];
+ }
+ double one_t = 1 - t;
+ SkDPoint result = { one_t * fPts[0].fX + t * fPts[1].fX, one_t * fPts[0].fY + t * fPts[1].fY };
+ return result;
+}
+
+double SkDLine::exactPoint(const SkDPoint& xy) const {
+ if (xy == fPts[0]) { // do cheapest test first
+ return 0;
+ }
+ if (xy == fPts[1]) {
+ return 1;
+ }
+ return -1;
+}
+
+double SkDLine::nearPoint(const SkDPoint& xy, bool* unequal) const {
+ if (!AlmostBetweenUlps(fPts[0].fX, xy.fX, fPts[1].fX)
+ || !AlmostBetweenUlps(fPts[0].fY, xy.fY, fPts[1].fY)) {
+ return -1;
+ }
+ // project a perpendicular ray from the point to the line; find the T on the line
+ SkDVector len = fPts[1] - fPts[0]; // the x/y magnitudes of the line
+ double denom = len.fX * len.fX + len.fY * len.fY; // see DLine intersectRay
+ SkDVector ab0 = xy - fPts[0];
+ double numer = len.fX * ab0.fX + ab0.fY * len.fY;
+ if (!between(0, numer, denom)) {
+ return -1;
+ }
+ if (!denom) {
+ return 0;
+ }
+ double t = numer / denom;
+ SkDPoint realPt = ptAtT(t);
+ double dist = realPt.distance(xy); // OPTIMIZATION: can we compare against distSq instead ?
+ // find the ordinal in the original line with the largest unsigned exponent
+ double tiniest = SkTMin(SkTMin(SkTMin(fPts[0].fX, fPts[0].fY), fPts[1].fX), fPts[1].fY);
+ double largest = SkTMax(SkTMax(SkTMax(fPts[0].fX, fPts[0].fY), fPts[1].fX), fPts[1].fY);
+ largest = SkTMax(largest, -tiniest);
+ if (!AlmostEqualUlps_Pin(largest, largest + dist)) { // is the dist within ULPS tolerance?
+ return -1;
+ }
+ if (unequal) {
+ *unequal = (float) largest != (float) (largest + dist);
+ }
+ t = SkPinT(t); // a looser pin breaks skpwww_lptemp_com_3
+ SkASSERT(between(0, t, 1));
+ return t;
+}
+
+bool SkDLine::nearRay(const SkDPoint& xy) const {
+ // project a perpendicular ray from the point to the line; find the T on the line
+ SkDVector len = fPts[1] - fPts[0]; // the x/y magnitudes of the line
+ double denom = len.fX * len.fX + len.fY * len.fY; // see DLine intersectRay
+ SkDVector ab0 = xy - fPts[0];
+ double numer = len.fX * ab0.fX + ab0.fY * len.fY;
+ double t = numer / denom;
+ SkDPoint realPt = ptAtT(t);
+ double dist = realPt.distance(xy); // OPTIMIZATION: can we compare against distSq instead ?
+ // find the ordinal in the original line with the largest unsigned exponent
+ double tiniest = SkTMin(SkTMin(SkTMin(fPts[0].fX, fPts[0].fY), fPts[1].fX), fPts[1].fY);
+ double largest = SkTMax(SkTMax(SkTMax(fPts[0].fX, fPts[0].fY), fPts[1].fX), fPts[1].fY);
+ largest = SkTMax(largest, -tiniest);
+ return RoughlyEqualUlps(largest, largest + dist); // is the dist within ULPS tolerance?
+}
+
+double SkDLine::ExactPointH(const SkDPoint& xy, double left, double right, double y) {
+ if (xy.fY == y) {
+ if (xy.fX == left) {
+ return 0;
+ }
+ if (xy.fX == right) {
+ return 1;
+ }
+ }
+ return -1;
+}
+
+double SkDLine::NearPointH(const SkDPoint& xy, double left, double right, double y) {
+ if (!AlmostBequalUlps(xy.fY, y)) {
+ return -1;
+ }
+ if (!AlmostBetweenUlps(left, xy.fX, right)) {
+ return -1;
+ }
+ double t = (xy.fX - left) / (right - left);
+ t = SkPinT(t);
+ SkASSERT(between(0, t, 1));
+ double realPtX = (1 - t) * left + t * right;
+ SkDVector distU = {xy.fY - y, xy.fX - realPtX};
+ double distSq = distU.fX * distU.fX + distU.fY * distU.fY;
+ double dist = sqrt(distSq); // OPTIMIZATION: can we compare against distSq instead ?
+ double tiniest = SkTMin(SkTMin(y, left), right);
+ double largest = SkTMax(SkTMax(y, left), right);
+ largest = SkTMax(largest, -tiniest);
+ if (!AlmostEqualUlps(largest, largest + dist)) { // is the dist within ULPS tolerance?
+ return -1;
+ }
+ return t;
+}
+
+double SkDLine::ExactPointV(const SkDPoint& xy, double top, double bottom, double x) {
+ if (xy.fX == x) {
+ if (xy.fY == top) {
+ return 0;
+ }
+ if (xy.fY == bottom) {
+ return 1;
+ }
+ }
+ return -1;
+}
+
+double SkDLine::NearPointV(const SkDPoint& xy, double top, double bottom, double x) {
+ if (!AlmostBequalUlps(xy.fX, x)) {
+ return -1;
+ }
+ if (!AlmostBetweenUlps(top, xy.fY, bottom)) {
+ return -1;
+ }
+ double t = (xy.fY - top) / (bottom - top);
+ t = SkPinT(t);
+ SkASSERT(between(0, t, 1));
+ double realPtY = (1 - t) * top + t * bottom;
+ SkDVector distU = {xy.fX - x, xy.fY - realPtY};
+ double distSq = distU.fX * distU.fX + distU.fY * distU.fY;
+ double dist = sqrt(distSq); // OPTIMIZATION: can we compare against distSq instead ?
+ double tiniest = SkTMin(SkTMin(x, top), bottom);
+ double largest = SkTMax(SkTMax(x, top), bottom);
+ largest = SkTMax(largest, -tiniest);
+ if (!AlmostEqualUlps(largest, largest + dist)) { // is the dist within ULPS tolerance?
+ return -1;
+ }
+ return t;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsLine.h b/gfx/skia/skia/src/pathops/SkPathOpsLine.h
new file mode 100644
index 000000000..882dadc1f
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsLine.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsLine_DEFINED
+#define SkPathOpsLine_DEFINED
+
+#include "SkPathOpsPoint.h"
+
+struct SkDLine {
+ SkDPoint fPts[2];
+
+ const SkDPoint& operator[](int n) const { SkASSERT(n >= 0 && n < 2); return fPts[n]; }
+ SkDPoint& operator[](int n) { SkASSERT(n >= 0 && n < 2); return fPts[n]; }
+
+ const SkDLine& set(const SkPoint pts[2]) {
+ fPts[0] = pts[0];
+ fPts[1] = pts[1];
+ return *this;
+ }
+
+ double exactPoint(const SkDPoint& xy) const;
+ static double ExactPointH(const SkDPoint& xy, double left, double right, double y);
+ static double ExactPointV(const SkDPoint& xy, double top, double bottom, double x);
+
+ double nearPoint(const SkDPoint& xy, bool* unequal) const;
+ bool nearRay(const SkDPoint& xy) const;
+ static double NearPointH(const SkDPoint& xy, double left, double right, double y);
+ static double NearPointV(const SkDPoint& xy, double top, double bottom, double x);
+ SkDPoint ptAtT(double t) const;
+
+ void dump() const;
+ void dumpID(int ) const;
+ void dumpInner() const;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsOp.cpp b/gfx/skia/skia/src/pathops/SkPathOpsOp.cpp
new file mode 100644
index 000000000..e622451a9
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsOp.cpp
@@ -0,0 +1,472 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkAddIntersections.h"
+#include "SkOpCoincidence.h"
+#include "SkOpEdgeBuilder.h"
+#include "SkPathOpsCommon.h"
+#include "SkPathWriter.h"
+
+static SkOpSegment* findChaseOp(SkTDArray<SkOpSpanBase*>& chase, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr) {
+ while (chase.count()) {
+ SkOpSpanBase* span;
+ chase.pop(&span);
+ // OPTIMIZE: prev makes this compatible with old code -- but is it necessary?
+ *startPtr = span->ptT()->prev()->span();
+ SkOpSegment* segment = (*startPtr)->segment();
+ bool done = true;
+ *endPtr = nullptr;
+ if (SkOpAngle* last = segment->activeAngle(*startPtr, startPtr, endPtr, &done)) {
+ *startPtr = last->start();
+ *endPtr = last->end();
+ #if TRY_ROTATE
+ *chase.insert(0) = span;
+ #else
+ *chase.append() = span;
+ #endif
+ return last->segment();
+ }
+ if (done) {
+ continue;
+ }
+ int winding;
+ bool sortable;
+ const SkOpAngle* angle = AngleWinding(*startPtr, *endPtr, &winding, &sortable);
+ if (!angle) {
+ return nullptr;
+ }
+ if (winding == SK_MinS32) {
+ continue;
+ }
+ int sumMiWinding, sumSuWinding;
+ if (sortable) {
+ segment = angle->segment();
+ sumMiWinding = segment->updateWindingReverse(angle);
+ if (sumMiWinding == SK_MinS32) {
+ SkASSERT(segment->globalState()->debugSkipAssert());
+ return nullptr;
+ }
+ sumSuWinding = segment->updateOppWindingReverse(angle);
+ if (sumSuWinding == SK_MinS32) {
+ SkASSERT(segment->globalState()->debugSkipAssert());
+ return nullptr;
+ }
+ if (segment->operand()) {
+ SkTSwap<int>(sumMiWinding, sumSuWinding);
+ }
+ }
+ SkOpSegment* first = nullptr;
+ const SkOpAngle* firstAngle = angle;
+ while ((angle = angle->next()) != firstAngle) {
+ segment = angle->segment();
+ SkOpSpanBase* start = angle->start();
+ SkOpSpanBase* end = angle->end();
+ int maxWinding, sumWinding, oppMaxWinding, oppSumWinding;
+ if (sortable) {
+ segment->setUpWindings(start, end, &sumMiWinding, &sumSuWinding,
+ &maxWinding, &sumWinding, &oppMaxWinding, &oppSumWinding);
+ }
+ if (!segment->done(angle)) {
+ if (!first && (sortable || start->starter(end)->windSum() != SK_MinS32)) {
+ first = segment;
+ *startPtr = start;
+ *endPtr = end;
+ }
+ // OPTIMIZATION: should this also add to the chase?
+ if (sortable) {
+ (void) segment->markAngle(maxWinding, sumWinding, oppMaxWinding,
+ oppSumWinding, angle);
+ }
+ }
+ }
+ if (first) {
+ #if TRY_ROTATE
+ *chase.insert(0) = span;
+ #else
+ *chase.append() = span;
+ #endif
+ return first;
+ }
+ }
+ return nullptr;
+}
+
+static bool bridgeOp(SkOpContourHead* contourList, const SkPathOp op,
+ const int xorMask, const int xorOpMask, SkPathWriter* simple) {
+ bool unsortable = false;
+ do {
+ SkOpSpan* span = FindSortableTop(contourList);
+ if (!span) {
+ break;
+ }
+ SkOpSegment* current = span->segment();
+ SkOpSpanBase* start = span->next();
+ SkOpSpanBase* end = span;
+ SkTDArray<SkOpSpanBase*> chase;
+ do {
+ if (current->activeOp(start, end, xorMask, xorOpMask, op)) {
+ do {
+ if (!unsortable && current->done()) {
+ break;
+ }
+ SkASSERT(unsortable || !current->done());
+ SkOpSpanBase* nextStart = start;
+ SkOpSpanBase* nextEnd = end;
+ SkOpSegment* next = current->findNextOp(&chase, &nextStart, &nextEnd,
+ &unsortable, op, xorMask, xorOpMask);
+ if (!next) {
+ if (!unsortable && simple->hasMove()
+ && current->verb() != SkPath::kLine_Verb
+ && !simple->isClosed()) {
+ if (!current->addCurveTo(start, end, simple)) {
+ return false;
+ }
+ if (!simple->isClosed()) {
+ SkPathOpsDebug::ShowActiveSpans(contourList);
+ }
+ }
+ break;
+ }
+ #if DEBUG_FLOW
+ SkDebugf("%s current id=%d from=(%1.9g,%1.9g) to=(%1.9g,%1.9g)\n", __FUNCTION__,
+ current->debugID(), start->pt().fX, start->pt().fY,
+ end->pt().fX, end->pt().fY);
+ #endif
+ if (!current->addCurveTo(start, end, simple)) {
+ return false;
+ }
+ current = next;
+ start = nextStart;
+ end = nextEnd;
+ } while (!simple->isClosed() && (!unsortable || !start->starter(end)->done()));
+ if (current->activeWinding(start, end) && !simple->isClosed()) {
+ SkOpSpan* spanStart = start->starter(end);
+ if (!spanStart->done()) {
+ if (!current->addCurveTo(start, end, simple)) {
+ return false;
+ }
+ current->markDone(spanStart);
+ }
+ }
+ simple->finishContour();
+ } else {
+ SkOpSpanBase* last = current->markAndChaseDone(start, end);
+ if (last && !last->chased()) {
+ last->setChased(true);
+ SkASSERT(!SkPathOpsDebug::ChaseContains(chase, last));
+ *chase.append() = last;
+#if DEBUG_WINDING
+ SkDebugf("%s chase.append id=%d", __FUNCTION__, last->segment()->debugID());
+ if (!last->final()) {
+ SkDebugf(" windSum=%d", last->upCast()->windSum());
+ }
+ SkDebugf("\n");
+#endif
+ }
+ }
+ current = findChaseOp(chase, &start, &end);
+ SkPathOpsDebug::ShowActiveSpans(contourList);
+ if (!current) {
+ break;
+ }
+ } while (true);
+ } while (true);
+ return true;
+}
+
+// pretty picture:
+// https://docs.google.com/a/google.com/drawings/d/1sPV8rPfpEFXymBp3iSbDRWAycp1b-7vD9JP2V-kn9Ss/edit?usp=sharing
+static const SkPathOp gOpInverse[kReverseDifference_SkPathOp + 1][2][2] = {
+// inside minuend outside minuend
+// inside subtrahend outside subtrahend inside subtrahend outside subtrahend
+{{ kDifference_SkPathOp, kIntersect_SkPathOp }, { kUnion_SkPathOp, kReverseDifference_SkPathOp }},
+{{ kIntersect_SkPathOp, kDifference_SkPathOp }, { kReverseDifference_SkPathOp, kUnion_SkPathOp }},
+{{ kUnion_SkPathOp, kReverseDifference_SkPathOp }, { kDifference_SkPathOp, kIntersect_SkPathOp }},
+{{ kXOR_SkPathOp, kXOR_SkPathOp }, { kXOR_SkPathOp, kXOR_SkPathOp }},
+{{ kReverseDifference_SkPathOp, kUnion_SkPathOp }, { kIntersect_SkPathOp, kDifference_SkPathOp }},
+};
+
+static const bool gOutInverse[kReverseDifference_SkPathOp + 1][2][2] = {
+ {{ false, false }, { true, false }}, // diff
+ {{ false, false }, { false, true }}, // sect
+ {{ false, true }, { true, true }}, // union
+ {{ false, true }, { true, false }}, // xor
+ {{ false, true }, { false, false }}, // rev diff
+};
+
+#define DEBUGGING_PATHOPS_FROM_HOST 0 // enable to debug svg in chrome -- note path hardcoded below
+#if DEBUGGING_PATHOPS_FROM_HOST
+#include "SkData.h"
+#include "SkStream.h"
+
+static void dump_path(FILE* file, const SkPath& path, bool force, bool dumpAsHex) {
+ SkDynamicMemoryWStream wStream;
+ path.dump(&wStream, force, dumpAsHex);
+ sk_sp<SkData> data(wStream.detachAsData());
+ fprintf(file, "%.*s\n", (int) data->size(), (char*) data->data());
+}
+
+static int dumpID = 0;
+
+static void dump_op(const SkPath& one, const SkPath& two, SkPathOp op) {
+#if SK_BUILD_FOR_MAC
+ FILE* file = fopen("/Users/caryclark/Documents/svgop.txt", "w");
+#else
+ FILE* file = fopen("/usr/local/google/home/caryclark/Documents/svgop.txt", "w");
+#endif
+ fprintf(file,
+ "\nstatic void fuzz763_%d(skiatest::Reporter* reporter, const char* filename) {\n",
+ ++dumpID);
+ fprintf(file, " SkPath path;\n");
+ fprintf(file, " path.setFillType((SkPath::FillType) %d);\n", one.getFillType());
+ dump_path(file, one, false, true);
+ fprintf(file, " SkPath path1(path);\n");
+ fprintf(file, " path.reset();\n");
+ fprintf(file, " path.setFillType((SkPath::FillType) %d);\n", two.getFillType());
+ dump_path(file, two, false, true);
+ fprintf(file, " SkPath path2(path);\n");
+ fprintf(file, " testPathOp(reporter, path1, path2, (SkPathOp) %d, filename);\n", op);
+ fprintf(file, "}\n");
+ fclose(file);
+}
+#endif
+
+
+#if DEBUG_T_SECT_LOOP_COUNT
+
+#include "SkMutex.h"
+
+SK_DECLARE_STATIC_MUTEX(debugWorstLoop);
+
+SkOpGlobalState debugWorstState(nullptr, nullptr SkDEBUGPARAMS(false) SkDEBUGPARAMS(nullptr)
+ SkDEBUGPARAMS(nullptr));
+
+void ReportPathOpsDebugging() {
+ debugWorstState.debugLoopReport();
+}
+
+extern void (*gVerboseFinalize)();
+
+#endif
+
+bool OpDebug(const SkPath& one, const SkPath& two, SkPathOp op, SkPath* result
+ SkDEBUGPARAMS(bool skipAssert) SkDEBUGPARAMS(const char* testName)) {
+ SkChunkAlloc allocator(4096); // FIXME: add a constant expression here, tune
+ SkOpContour contour;
+ SkOpContourHead* contourList = static_cast<SkOpContourHead*>(&contour);
+ SkOpGlobalState globalState(contourList, &allocator
+ SkDEBUGPARAMS(skipAssert) SkDEBUGPARAMS(testName));
+ SkOpCoincidence coincidence(&globalState);
+#if DEBUGGING_PATHOPS_FROM_HOST
+ dump_op(one, two, op);
+#endif
+ op = gOpInverse[op][one.isInverseFillType()][two.isInverseFillType()];
+ SkPath::FillType fillType = gOutInverse[op][one.isInverseFillType()][two.isInverseFillType()]
+ ? SkPath::kInverseEvenOdd_FillType : SkPath::kEvenOdd_FillType;
+ SkScalar scaleFactor = SkTMax(ScaleFactor(one), ScaleFactor(two));
+ SkPath scaledOne, scaledTwo;
+ const SkPath* minuend, * subtrahend;
+ if (scaleFactor > SK_Scalar1) {
+ ScalePath(one, 1.f / scaleFactor, &scaledOne);
+ minuend = &scaledOne;
+ ScalePath(two, 1.f / scaleFactor, &scaledTwo);
+ subtrahend = &scaledTwo;
+ } else {
+ minuend = &one;
+ subtrahend = &two;
+ }
+ if (op == kReverseDifference_SkPathOp) {
+ SkTSwap(minuend, subtrahend);
+ op = kDifference_SkPathOp;
+ }
+#if DEBUG_SORT
+ SkPathOpsDebug::gSortCount = SkPathOpsDebug::gSortCountDefault;
+#endif
+ // turn path into list of segments
+ SkOpEdgeBuilder builder(*minuend, contourList, &globalState);
+ if (builder.unparseable()) {
+ return false;
+ }
+ const int xorMask = builder.xorMask();
+ builder.addOperand(*subtrahend);
+ if (!builder.finish()) {
+ return false;
+ }
+#if DEBUG_DUMP_SEGMENTS
+ contourList->dumpSegments("seg", op);
+#endif
+
+ const int xorOpMask = builder.xorMask();
+ if (!SortContourList(&contourList, xorMask == kEvenOdd_PathOpsMask,
+ xorOpMask == kEvenOdd_PathOpsMask)) {
+ result->reset();
+ result->setFillType(fillType);
+ return true;
+ }
+ // find all intersections between segments
+ SkOpContour* current = contourList;
+ do {
+ SkOpContour* next = current;
+ while (AddIntersectTs(current, next, &coincidence)
+ && (next = next->next()))
+ ;
+ } while ((current = current->next()));
+#if DEBUG_VALIDATE
+ globalState.setPhase(SkOpPhase::kWalking);
+#endif
+ bool success = HandleCoincidence(contourList, &coincidence);
+#if DEBUG_COIN
+ globalState.debugAddToGlobalCoinDicts();
+#endif
+ if (!success) {
+ return false;
+ }
+#if DEBUG_ALIGNMENT
+ contourList->dumpSegments("aligned");
+#endif
+ // construct closed contours
+ result->reset();
+ result->setFillType(fillType);
+ SkPathWriter wrapper(*result);
+ if (!bridgeOp(contourList, op, xorMask, xorOpMask, &wrapper)) {
+ return false;
+ }
+ wrapper.assemble(); // if some edges could not be resolved, assemble remaining
+#if DEBUG_T_SECT_LOOP_COUNT
+ {
+ SkAutoMutexAcquire autoM(debugWorstLoop);
+ if (!gVerboseFinalize) {
+ gVerboseFinalize = &ReportPathOpsDebugging;
+ }
+ debugWorstState.debugDoYourWorst(&globalState);
+ }
+#endif
+ if (scaleFactor > 1) {
+ ScalePath(*result, scaleFactor, result);
+ }
+ return true;
+}
+
+#define DEBUG_VERIFY 0
+
+#if DEBUG_VERIFY
+#include "SkBitmap.h"
+#include "SkCanvas.h"
+#include "SkPaint.h"
+
+const int bitWidth = 64;
+const int bitHeight = 64;
+
+static void debug_scale_matrix(const SkPath& one, const SkPath& two, SkMatrix& scale) {
+ SkRect larger = one.getBounds();
+ larger.join(two.getBounds());
+ SkScalar largerWidth = larger.width();
+ if (largerWidth < 4) {
+ largerWidth = 4;
+ }
+ SkScalar largerHeight = larger.height();
+ if (largerHeight < 4) {
+ largerHeight = 4;
+ }
+ SkScalar hScale = (bitWidth - 2) / largerWidth;
+ SkScalar vScale = (bitHeight - 2) / largerHeight;
+ scale.reset();
+ scale.preScale(hScale, vScale);
+ larger.fLeft *= hScale;
+ larger.fRight *= hScale;
+ larger.fTop *= vScale;
+ larger.fBottom *= vScale;
+ SkScalar dx = -16000 > larger.fLeft ? -16000 - larger.fLeft
+ : 16000 < larger.fRight ? 16000 - larger.fRight : 0;
+ SkScalar dy = -16000 > larger.fTop ? -16000 - larger.fTop
+ : 16000 < larger.fBottom ? 16000 - larger.fBottom : 0;
+ scale.preTranslate(dx, dy);
+}
+
+static int debug_paths_draw_the_same(const SkPath& one, const SkPath& two, SkBitmap& bits) {
+ if (bits.width() == 0) {
+ bits.allocN32Pixels(bitWidth * 2, bitHeight);
+ }
+ SkCanvas canvas(bits);
+ canvas.drawColor(SK_ColorWHITE);
+ SkPaint paint;
+ canvas.save();
+ const SkRect& bounds1 = one.getBounds();
+ canvas.translate(-bounds1.fLeft + 1, -bounds1.fTop + 1);
+ canvas.drawPath(one, paint);
+ canvas.restore();
+ canvas.save();
+ canvas.translate(-bounds1.fLeft + 1 + bitWidth, -bounds1.fTop + 1);
+ canvas.drawPath(two, paint);
+ canvas.restore();
+ int errors = 0;
+ for (int y = 0; y < bitHeight - 1; ++y) {
+ uint32_t* addr1 = bits.getAddr32(0, y);
+ uint32_t* addr2 = bits.getAddr32(0, y + 1);
+ uint32_t* addr3 = bits.getAddr32(bitWidth, y);
+ uint32_t* addr4 = bits.getAddr32(bitWidth, y + 1);
+ for (int x = 0; x < bitWidth - 1; ++x) {
+ // count 2x2 blocks
+ bool err = addr1[x] != addr3[x];
+ if (err) {
+ errors += addr1[x + 1] != addr3[x + 1]
+ && addr2[x] != addr4[x] && addr2[x + 1] != addr4[x + 1];
+ }
+ }
+ }
+ return errors;
+}
+
+#endif
+
+bool Op(const SkPath& one, const SkPath& two, SkPathOp op, SkPath* result) {
+#if DEBUG_VERIFY
+ if (!OpDebug(one, two, op, result SkDEBUGPARAMS(nullptr))) {
+ SkDebugf("%s did not expect failure\none: fill=%d\n", __FUNCTION__, one.getFillType());
+ one.dumpHex();
+ SkDebugf("two: fill=%d\n", two.getFillType());
+ two.dumpHex();
+ SkASSERT(0);
+ return false;
+ }
+ SkPath pathOut, scaledPathOut;
+ SkRegion rgnA, rgnB, openClip, rgnOut;
+ openClip.setRect(-16000, -16000, 16000, 16000);
+ rgnA.setPath(one, openClip);
+ rgnB.setPath(two, openClip);
+ rgnOut.op(rgnA, rgnB, (SkRegion::Op) op);
+ rgnOut.getBoundaryPath(&pathOut);
+ SkMatrix scale;
+ debug_scale_matrix(one, two, scale);
+ SkRegion scaledRgnA, scaledRgnB, scaledRgnOut;
+ SkPath scaledA, scaledB;
+ scaledA.addPath(one, scale);
+ scaledA.setFillType(one.getFillType());
+ scaledB.addPath(two, scale);
+ scaledB.setFillType(two.getFillType());
+ scaledRgnA.setPath(scaledA, openClip);
+ scaledRgnB.setPath(scaledB, openClip);
+ scaledRgnOut.op(scaledRgnA, scaledRgnB, (SkRegion::Op) op);
+ scaledRgnOut.getBoundaryPath(&scaledPathOut);
+ SkBitmap bitmap;
+ SkPath scaledOut;
+ scaledOut.addPath(*result, scale);
+ scaledOut.setFillType(result->getFillType());
+ int errors = debug_paths_draw_the_same(scaledPathOut, scaledOut, bitmap);
+ const int MAX_ERRORS = 9;
+ if (errors > MAX_ERRORS) {
+ SkDebugf("%s did not expect failure\none: fill=%d\n", __FUNCTION__, one.getFillType());
+ one.dumpHex();
+ SkDebugf("two: fill=%d\n", two.getFillType());
+ two.dumpHex();
+ SkASSERT(0);
+ }
+ return true;
+#else
+ return OpDebug(one, two, op, result SkDEBUGPARAMS(true) SkDEBUGPARAMS(nullptr));
+#endif
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsPoint.cpp b/gfx/skia/skia/src/pathops/SkPathOpsPoint.cpp
new file mode 100644
index 000000000..e0f175dac
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsPoint.cpp
@@ -0,0 +1,12 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkPathOpsPoint.h"
+
+SkDVector operator-(const SkDPoint& a, const SkDPoint& b) {
+ SkDVector v = {a.fX - b.fX, a.fY - b.fY};
+ return v;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsPoint.h b/gfx/skia/skia/src/pathops/SkPathOpsPoint.h
new file mode 100644
index 000000000..f314f69d0
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsPoint.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsPoint_DEFINED
+#define SkPathOpsPoint_DEFINED
+
+#include "SkPathOpsTypes.h"
+#include "SkPoint.h"
+
+inline bool AlmostEqualUlps(const SkPoint& pt1, const SkPoint& pt2) {
+ return AlmostEqualUlps(pt1.fX, pt2.fX) && AlmostEqualUlps(pt1.fY, pt2.fY);
+}
+
+struct SkDVector {
+ double fX;
+ double fY;
+
+ void set(const SkVector& pt) {
+ fX = pt.fX;
+ fY = pt.fY;
+ }
+
+ // only used by testing
+ void operator+=(const SkDVector& v) {
+ fX += v.fX;
+ fY += v.fY;
+ }
+
+ // only called by nearestT, which is currently only used by testing
+ void operator-=(const SkDVector& v) {
+ fX -= v.fX;
+ fY -= v.fY;
+ }
+
+ // only used by testing
+ void operator/=(const double s) {
+ fX /= s;
+ fY /= s;
+ }
+
+ // only used by testing
+ void operator*=(const double s) {
+ fX *= s;
+ fY *= s;
+ }
+
+ SkVector asSkVector() const {
+ SkVector v = {SkDoubleToScalar(fX), SkDoubleToScalar(fY)};
+ return v;
+ }
+
+ // only used by testing
+ double cross(const SkDVector& a) const {
+ return fX * a.fY - fY * a.fX;
+ }
+
+ // similar to cross, this bastardization considers nearly coincident to be zero
+ // uses ulps epsilon == 16
+ double crossCheck(const SkDVector& a) const {
+ double xy = fX * a.fY;
+ double yx = fY * a.fX;
+ return AlmostEqualUlps(xy, yx) ? 0 : xy - yx;
+ }
+
+ // allow tinier numbers
+ double crossNoNormalCheck(const SkDVector& a) const {
+ double xy = fX * a.fY;
+ double yx = fY * a.fX;
+ return AlmostEqualUlpsNoNormalCheck(xy, yx) ? 0 : xy - yx;
+ }
+
+ double dot(const SkDVector& a) const {
+ return fX * a.fX + fY * a.fY;
+ }
+
+ double length() const {
+ return sqrt(lengthSquared());
+ }
+
+ double lengthSquared() const {
+ return fX * fX + fY * fY;
+ }
+
+ void normalize() {
+ double inverseLength = 1 / this->length();
+ fX *= inverseLength;
+ fY *= inverseLength;
+ }
+};
+
+struct SkDPoint {
+ double fX;
+ double fY;
+
+ void set(const SkPoint& pt) {
+ fX = pt.fX;
+ fY = pt.fY;
+ }
+
+ friend SkDVector operator-(const SkDPoint& a, const SkDPoint& b);
+
+ friend bool operator==(const SkDPoint& a, const SkDPoint& b) {
+ return a.fX == b.fX && a.fY == b.fY;
+ }
+
+ friend bool operator!=(const SkDPoint& a, const SkDPoint& b) {
+ return a.fX != b.fX || a.fY != b.fY;
+ }
+
+ void operator=(const SkPoint& pt) {
+ fX = pt.fX;
+ fY = pt.fY;
+ }
+
+ // only used by testing
+ void operator+=(const SkDVector& v) {
+ fX += v.fX;
+ fY += v.fY;
+ }
+
+ // only used by testing
+ void operator-=(const SkDVector& v) {
+ fX -= v.fX;
+ fY -= v.fY;
+ }
+
+ // only used by testing
+ SkDPoint operator+(const SkDVector& v) {
+ SkDPoint result = *this;
+ result += v;
+ return result;
+ }
+
+ // only used by testing
+ SkDPoint operator-(const SkDVector& v) {
+ SkDPoint result = *this;
+ result -= v;
+ return result;
+ }
+
+ // note: this can not be implemented with
+ // return approximately_equal(a.fY, fY) && approximately_equal(a.fX, fX);
+ // because that will not take the magnitude of the values into account
+ bool approximatelyDEqual(const SkDPoint& a) const {
+ if (approximately_equal(fX, a.fX) && approximately_equal(fY, a.fY)) {
+ return true;
+ }
+ if (!RoughlyEqualUlps(fX, a.fX) || !RoughlyEqualUlps(fY, a.fY)) {
+ return false;
+ }
+ double dist = distance(a); // OPTIMIZATION: can we compare against distSq instead ?
+ double tiniest = SkTMin(SkTMin(SkTMin(fX, a.fX), fY), a.fY);
+ double largest = SkTMax(SkTMax(SkTMax(fX, a.fX), fY), a.fY);
+ largest = SkTMax(largest, -tiniest);
+ return AlmostDequalUlps(largest, largest + dist); // is the dist within ULPS tolerance?
+ }
+
+ bool approximatelyDEqual(const SkPoint& a) const {
+ SkDPoint dA;
+ dA.set(a);
+ return approximatelyDEqual(dA);
+ }
+
+ bool approximatelyEqual(const SkDPoint& a) const {
+ if (approximately_equal(fX, a.fX) && approximately_equal(fY, a.fY)) {
+ return true;
+ }
+ if (!RoughlyEqualUlps(fX, a.fX) || !RoughlyEqualUlps(fY, a.fY)) {
+ return false;
+ }
+ double dist = distance(a); // OPTIMIZATION: can we compare against distSq instead ?
+ double tiniest = SkTMin(SkTMin(SkTMin(fX, a.fX), fY), a.fY);
+ double largest = SkTMax(SkTMax(SkTMax(fX, a.fX), fY), a.fY);
+ largest = SkTMax(largest, -tiniest);
+ return AlmostPequalUlps(largest, largest + dist); // is the dist within ULPS tolerance?
+ }
+
+ bool approximatelyEqual(const SkPoint& a) const {
+ SkDPoint dA;
+ dA.set(a);
+ return approximatelyEqual(dA);
+ }
+
+ static bool ApproximatelyEqual(const SkPoint& a, const SkPoint& b) {
+ if (approximately_equal(a.fX, b.fX) && approximately_equal(a.fY, b.fY)) {
+ return true;
+ }
+ if (!RoughlyEqualUlps(a.fX, b.fX) || !RoughlyEqualUlps(a.fY, b.fY)) {
+ return false;
+ }
+ SkDPoint dA, dB;
+ dA.set(a);
+ dB.set(b);
+ double dist = dA.distance(dB); // OPTIMIZATION: can we compare against distSq instead ?
+ float tiniest = SkTMin(SkTMin(SkTMin(a.fX, b.fX), a.fY), b.fY);
+ float largest = SkTMax(SkTMax(SkTMax(a.fX, b.fX), a.fY), b.fY);
+ largest = SkTMax(largest, -tiniest);
+ return AlmostDequalUlps((double) largest, largest + dist); // is dist within ULPS tolerance?
+ }
+
+ // only used by testing
+ bool approximatelyZero() const {
+ return approximately_zero(fX) && approximately_zero(fY);
+ }
+
+ SkPoint asSkPoint() const {
+ SkPoint pt = {SkDoubleToScalar(fX), SkDoubleToScalar(fY)};
+ return pt;
+ }
+
+ double distance(const SkDPoint& a) const {
+ SkDVector temp = *this - a;
+ return temp.length();
+ }
+
+ double distanceSquared(const SkDPoint& a) const {
+ SkDVector temp = *this - a;
+ return temp.lengthSquared();
+ }
+
+ static SkDPoint Mid(const SkDPoint& a, const SkDPoint& b) {
+ SkDPoint result;
+ result.fX = (a.fX + b.fX) / 2;
+ result.fY = (a.fY + b.fY) / 2;
+ return result;
+ }
+
+ bool roughlyEqual(const SkDPoint& a) const {
+ if (roughly_equal(fX, a.fX) && roughly_equal(fY, a.fY)) {
+ return true;
+ }
+ double dist = distance(a); // OPTIMIZATION: can we compare against distSq instead ?
+ double tiniest = SkTMin(SkTMin(SkTMin(fX, a.fX), fY), a.fY);
+ double largest = SkTMax(SkTMax(SkTMax(fX, a.fX), fY), a.fY);
+ largest = SkTMax(largest, -tiniest);
+ return RoughlyEqualUlps(largest, largest + dist); // is the dist within ULPS tolerance?
+ }
+
+ static bool RoughlyEqual(const SkPoint& a, const SkPoint& b) {
+ if (!RoughlyEqualUlps(a.fX, b.fX) && !RoughlyEqualUlps(a.fY, b.fY)) {
+ return false;
+ }
+ SkDPoint dA, dB;
+ dA.set(a);
+ dB.set(b);
+ double dist = dA.distance(dB); // OPTIMIZATION: can we compare against distSq instead ?
+ float tiniest = SkTMin(SkTMin(SkTMin(a.fX, b.fX), a.fY), b.fY);
+ float largest = SkTMax(SkTMax(SkTMax(a.fX, b.fX), a.fY), b.fY);
+ largest = SkTMax(largest, -tiniest);
+ return RoughlyEqualUlps((double) largest, largest + dist); // is dist within ULPS tolerance?
+ }
+
+ // very light weight check, should only be used for inequality check
+ static bool WayRoughlyEqual(const SkPoint& a, const SkPoint& b) {
+ float largestNumber = SkTMax(SkTAbs(a.fX), SkTMax(SkTAbs(a.fY),
+ SkTMax(SkTAbs(b.fX), SkTAbs(b.fY))));
+ SkVector diffs = a - b;
+ float largestDiff = SkTMax(diffs.fX, diffs.fY);
+ return roughly_zero_when_compared_to(largestDiff, largestNumber);
+ }
+
+ // utilities callable by the user from the debugger when the implementation code is linked in
+ void dump() const;
+ static void Dump(const SkPoint& pt);
+ static void DumpHex(const SkPoint& pt);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsQuad.cpp b/gfx/skia/skia/src/pathops/SkPathOpsQuad.cpp
new file mode 100644
index 000000000..ab1ba05c5
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsQuad.cpp
@@ -0,0 +1,390 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkIntersections.h"
+#include "SkLineParameters.h"
+#include "SkPathOpsCubic.h"
+#include "SkPathOpsCurve.h"
+#include "SkPathOpsQuad.h"
+
+// from blackpawn.com/texts/pointinpoly
+static bool pointInTriangle(const SkDPoint fPts[3], const SkDPoint& test) {
+ SkDVector v0 = fPts[2] - fPts[0];
+ SkDVector v1 = fPts[1] - fPts[0];
+ SkDVector v2 = test - fPts[0];
+ double dot00 = v0.dot(v0);
+ double dot01 = v0.dot(v1);
+ double dot02 = v0.dot(v2);
+ double dot11 = v1.dot(v1);
+ double dot12 = v1.dot(v2);
+ // Compute barycentric coordinates
+ double invDenom = 1 / (dot00 * dot11 - dot01 * dot01);
+ double u = (dot11 * dot02 - dot01 * dot12) * invDenom;
+ double v = (dot00 * dot12 - dot01 * dot02) * invDenom;
+ // Check if point is in triangle
+ return u >= 0 && v >= 0 && u + v < 1;
+}
+
+static bool matchesEnd(const SkDPoint fPts[3], const SkDPoint& test) {
+ return fPts[0] == test || fPts[2] == test;
+}
+
+/* started with at_most_end_pts_in_common from SkDQuadIntersection.cpp */
+// Do a quick reject by rotating all points relative to a line formed by
+// a pair of one quad's points. If the 2nd quad's points
+// are on the line or on the opposite side from the 1st quad's 'odd man', the
+// curves at most intersect at the endpoints.
+/* if returning true, check contains true if quad's hull collapsed, making the cubic linear
+ if returning false, check contains true if the the quad pair have only the end point in common
+*/
+bool SkDQuad::hullIntersects(const SkDQuad& q2, bool* isLinear) const {
+ bool linear = true;
+ for (int oddMan = 0; oddMan < kPointCount; ++oddMan) {
+ const SkDPoint* endPt[2];
+ this->otherPts(oddMan, endPt);
+ double origX = endPt[0]->fX;
+ double origY = endPt[0]->fY;
+ double adj = endPt[1]->fX - origX;
+ double opp = endPt[1]->fY - origY;
+ double sign = (fPts[oddMan].fY - origY) * adj - (fPts[oddMan].fX - origX) * opp;
+ if (approximately_zero(sign)) {
+ continue;
+ }
+ linear = false;
+ bool foundOutlier = false;
+ for (int n = 0; n < kPointCount; ++n) {
+ double test = (q2[n].fY - origY) * adj - (q2[n].fX - origX) * opp;
+ if (test * sign > 0 && !precisely_zero(test)) {
+ foundOutlier = true;
+ break;
+ }
+ }
+ if (!foundOutlier) {
+ return false;
+ }
+ }
+ if (linear && !matchesEnd(fPts, q2.fPts[0]) && !matchesEnd(fPts, q2.fPts[2])) {
+ // if the end point of the opposite quad is inside the hull that is nearly a line,
+ // then representing the quad as a line may cause the intersection to be missed.
+ // Check to see if the endpoint is in the triangle.
+ if (pointInTriangle(fPts, q2.fPts[0]) || pointInTriangle(fPts, q2.fPts[2])) {
+ linear = false;
+ }
+ }
+ *isLinear = linear;
+ return true;
+}
+
+bool SkDQuad::hullIntersects(const SkDConic& conic, bool* isLinear) const {
+ return conic.hullIntersects(*this, isLinear);
+}
+
+bool SkDQuad::hullIntersects(const SkDCubic& cubic, bool* isLinear) const {
+ return cubic.hullIntersects(*this, isLinear);
+}
+
+/* bit twiddling for finding the off curve index (x&~m is the pair in [0,1,2] excluding oddMan)
+oddMan opp x=oddMan^opp x=x-oddMan m=x>>2 x&~m
+ 0 1 1 1 0 1
+ 2 2 2 0 2
+ 1 1 0 -1 -1 0
+ 2 3 2 0 2
+ 2 1 3 1 0 1
+ 2 0 -2 -1 0
+*/
+void SkDQuad::otherPts(int oddMan, const SkDPoint* endPt[2]) const {
+ for (int opp = 1; opp < kPointCount; ++opp) {
+ int end = (oddMan ^ opp) - oddMan; // choose a value not equal to oddMan
+ end &= ~(end >> 2); // if the value went negative, set it to zero
+ endPt[opp - 1] = &fPts[end];
+ }
+}
+
+int SkDQuad::AddValidTs(double s[], int realRoots, double* t) {
+ int foundRoots = 0;
+ for (int index = 0; index < realRoots; ++index) {
+ double tValue = s[index];
+ if (approximately_zero_or_more(tValue) && approximately_one_or_less(tValue)) {
+ if (approximately_less_than_zero(tValue)) {
+ tValue = 0;
+ } else if (approximately_greater_than_one(tValue)) {
+ tValue = 1;
+ }
+ for (int idx2 = 0; idx2 < foundRoots; ++idx2) {
+ if (approximately_equal(t[idx2], tValue)) {
+ goto nextRoot;
+ }
+ }
+ t[foundRoots++] = tValue;
+ }
+nextRoot:
+ {}
+ }
+ return foundRoots;
+}
+
+// note: caller expects multiple results to be sorted smaller first
+// note: http://en.wikipedia.org/wiki/Loss_of_significance has an interesting
+// analysis of the quadratic equation, suggesting why the following looks at
+// the sign of B -- and further suggesting that the greatest loss of precision
+// is in b squared less two a c
+int SkDQuad::RootsValidT(double A, double B, double C, double t[2]) {
+ double s[2];
+ int realRoots = RootsReal(A, B, C, s);
+ int foundRoots = AddValidTs(s, realRoots, t);
+ return foundRoots;
+}
+
+/*
+Numeric Solutions (5.6) suggests to solve the quadratic by computing
+ Q = -1/2(B + sgn(B)Sqrt(B^2 - 4 A C))
+and using the roots
+ t1 = Q / A
+ t2 = C / Q
+*/
+// this does not discard real roots <= 0 or >= 1
+int SkDQuad::RootsReal(const double A, const double B, const double C, double s[2]) {
+ const double p = B / (2 * A);
+ const double q = C / A;
+ if (!A || (approximately_zero(A) && (approximately_zero_inverse(p)
+ || approximately_zero_inverse(q)))) {
+ if (approximately_zero(B)) {
+ s[0] = 0;
+ return C == 0;
+ }
+ s[0] = -C / B;
+ return 1;
+ }
+ /* normal form: x^2 + px + q = 0 */
+ const double p2 = p * p;
+ if (!AlmostDequalUlps(p2, q) && p2 < q) {
+ return 0;
+ }
+ double sqrt_D = 0;
+ if (p2 > q) {
+ sqrt_D = sqrt(p2 - q);
+ }
+ s[0] = sqrt_D - p;
+ s[1] = -sqrt_D - p;
+ return 1 + !AlmostDequalUlps(s[0], s[1]);
+}
+
+bool SkDQuad::isLinear(int startIndex, int endIndex) const {
+ SkLineParameters lineParameters;
+ lineParameters.quadEndPoints(*this, startIndex, endIndex);
+ // FIXME: maybe it's possible to avoid this and compare non-normalized
+ lineParameters.normalize();
+ double distance = lineParameters.controlPtDistance(*this);
+ double tiniest = SkTMin(SkTMin(SkTMin(SkTMin(SkTMin(fPts[0].fX, fPts[0].fY),
+ fPts[1].fX), fPts[1].fY), fPts[2].fX), fPts[2].fY);
+ double largest = SkTMax(SkTMax(SkTMax(SkTMax(SkTMax(fPts[0].fX, fPts[0].fY),
+ fPts[1].fX), fPts[1].fY), fPts[2].fX), fPts[2].fY);
+ largest = SkTMax(largest, -tiniest);
+ return approximately_zero_when_compared_to(distance, largest);
+}
+
+SkDVector SkDQuad::dxdyAtT(double t) const {
+ double a = t - 1;
+ double b = 1 - 2 * t;
+ double c = t;
+ SkDVector result = { a * fPts[0].fX + b * fPts[1].fX + c * fPts[2].fX,
+ a * fPts[0].fY + b * fPts[1].fY + c * fPts[2].fY };
+ if (result.fX == 0 && result.fY == 0) {
+ if (zero_or_one(t)) {
+ result = fPts[2] - fPts[0];
+ } else {
+ // incomplete
+ SkDebugf("!q");
+ }
+ }
+ return result;
+}
+
+// OPTIMIZE: assert if caller passes in t == 0 / t == 1 ?
+SkDPoint SkDQuad::ptAtT(double t) const {
+ if (0 == t) {
+ return fPts[0];
+ }
+ if (1 == t) {
+ return fPts[2];
+ }
+ double one_t = 1 - t;
+ double a = one_t * one_t;
+ double b = 2 * one_t * t;
+ double c = t * t;
+ SkDPoint result = { a * fPts[0].fX + b * fPts[1].fX + c * fPts[2].fX,
+ a * fPts[0].fY + b * fPts[1].fY + c * fPts[2].fY };
+ return result;
+}
+
+static double interp_quad_coords(const double* src, double t) {
+ if (0 == t) {
+ return src[0];
+ }
+ if (1 == t) {
+ return src[4];
+ }
+ double ab = SkDInterp(src[0], src[2], t);
+ double bc = SkDInterp(src[2], src[4], t);
+ double abc = SkDInterp(ab, bc, t);
+ return abc;
+}
+
+bool SkDQuad::monotonicInX() const {
+ return between(fPts[0].fX, fPts[1].fX, fPts[2].fX);
+}
+
+bool SkDQuad::monotonicInY() const {
+ return between(fPts[0].fY, fPts[1].fY, fPts[2].fY);
+}
+
+/*
+Given a quadratic q, t1, and t2, find a small quadratic segment.
+
+The new quadratic is defined by A, B, and C, where
+ A = c[0]*(1 - t1)*(1 - t1) + 2*c[1]*t1*(1 - t1) + c[2]*t1*t1
+ C = c[3]*(1 - t1)*(1 - t1) + 2*c[2]*t1*(1 - t1) + c[1]*t1*t1
+
+To find B, compute the point halfway between t1 and t2:
+
+q(at (t1 + t2)/2) == D
+
+Next, compute where D must be if we know the value of B:
+
+_12 = A/2 + B/2
+12_ = B/2 + C/2
+123 = A/4 + B/2 + C/4
+ = D
+
+Group the known values on one side:
+
+B = D*2 - A/2 - C/2
+*/
+
+// OPTIMIZE? : special case t1 = 1 && t2 = 0
+SkDQuad SkDQuad::subDivide(double t1, double t2) const {
+ if (0 == t1 && 1 == t2) {
+ return *this;
+ }
+ SkDQuad dst;
+ double ax = dst[0].fX = interp_quad_coords(&fPts[0].fX, t1);
+ double ay = dst[0].fY = interp_quad_coords(&fPts[0].fY, t1);
+ double dx = interp_quad_coords(&fPts[0].fX, (t1 + t2) / 2);
+ double dy = interp_quad_coords(&fPts[0].fY, (t1 + t2) / 2);
+ double cx = dst[2].fX = interp_quad_coords(&fPts[0].fX, t2);
+ double cy = dst[2].fY = interp_quad_coords(&fPts[0].fY, t2);
+ /* bx = */ dst[1].fX = 2 * dx - (ax + cx) / 2;
+ /* by = */ dst[1].fY = 2 * dy - (ay + cy) / 2;
+ return dst;
+}
+
+void SkDQuad::align(int endIndex, SkDPoint* dstPt) const {
+ if (fPts[endIndex].fX == fPts[1].fX) {
+ dstPt->fX = fPts[endIndex].fX;
+ }
+ if (fPts[endIndex].fY == fPts[1].fY) {
+ dstPt->fY = fPts[endIndex].fY;
+ }
+}
+
+SkDPoint SkDQuad::subDivide(const SkDPoint& a, const SkDPoint& c, double t1, double t2) const {
+ SkASSERT(t1 != t2);
+ SkDPoint b;
+ SkDQuad sub = subDivide(t1, t2);
+ SkDLine b0 = {{a, sub[1] + (a - sub[0])}};
+ SkDLine b1 = {{c, sub[1] + (c - sub[2])}};
+ SkIntersections i;
+ i.intersectRay(b0, b1);
+ if (i.used() == 1 && i[0][0] >= 0 && i[1][0] >= 0) {
+ b = i.pt(0);
+ } else {
+ SkASSERT(i.used() <= 2);
+ return SkDPoint::Mid(b0[1], b1[1]);
+ }
+ if (t1 == 0 || t2 == 0) {
+ align(0, &b);
+ }
+ if (t1 == 1 || t2 == 1) {
+ align(2, &b);
+ }
+ if (AlmostBequalUlps(b.fX, a.fX)) {
+ b.fX = a.fX;
+ } else if (AlmostBequalUlps(b.fX, c.fX)) {
+ b.fX = c.fX;
+ }
+ if (AlmostBequalUlps(b.fY, a.fY)) {
+ b.fY = a.fY;
+ } else if (AlmostBequalUlps(b.fY, c.fY)) {
+ b.fY = c.fY;
+ }
+ return b;
+}
+
+/* classic one t subdivision */
+static void interp_quad_coords(const double* src, double* dst, double t) {
+ double ab = SkDInterp(src[0], src[2], t);
+ double bc = SkDInterp(src[2], src[4], t);
+ dst[0] = src[0];
+ dst[2] = ab;
+ dst[4] = SkDInterp(ab, bc, t);
+ dst[6] = bc;
+ dst[8] = src[4];
+}
+
+SkDQuadPair SkDQuad::chopAt(double t) const
+{
+ SkDQuadPair dst;
+ interp_quad_coords(&fPts[0].fX, &dst.pts[0].fX, t);
+ interp_quad_coords(&fPts[0].fY, &dst.pts[0].fY, t);
+ return dst;
+}
+
+static int valid_unit_divide(double numer, double denom, double* ratio)
+{
+ if (numer < 0) {
+ numer = -numer;
+ denom = -denom;
+ }
+ if (denom == 0 || numer == 0 || numer >= denom) {
+ return 0;
+ }
+ double r = numer / denom;
+ if (r == 0) { // catch underflow if numer <<<< denom
+ return 0;
+ }
+ *ratio = r;
+ return 1;
+}
+
+/** Quad'(t) = At + B, where
+ A = 2(a - 2b + c)
+ B = 2(b - a)
+ Solve for t, only if it fits between 0 < t < 1
+*/
+int SkDQuad::FindExtrema(const double src[], double tValue[1]) {
+ /* At + B == 0
+ t = -B / A
+ */
+ double a = src[0];
+ double b = src[2];
+ double c = src[4];
+ return valid_unit_divide(a - b, a - b - b + c, tValue);
+}
+
+/* Parameterization form, given A*t*t + 2*B*t*(1-t) + C*(1-t)*(1-t)
+ *
+ * a = A - 2*B + C
+ * b = 2*B - 2*C
+ * c = C
+ */
+void SkDQuad::SetABC(const double* quad, double* a, double* b, double* c) {
+ *a = quad[0]; // a = A
+ *b = 2 * quad[2]; // b = 2*B
+ *c = quad[4]; // c = C
+ *b -= *c; // b = 2*B - C
+ *a -= *b; // a = A - 2*B + C
+ *b -= *c; // b = 2*B - 2*C
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsQuad.h b/gfx/skia/skia/src/pathops/SkPathOpsQuad.h
new file mode 100644
index 000000000..32cfe58ec
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsQuad.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathOpsQuad_DEFINED
+#define SkPathOpsQuad_DEFINED
+
+#include "SkPathOpsPoint.h"
+
+struct SkOpCurve;
+
+struct SkDQuadPair {
+ const SkDQuad& first() const { return (const SkDQuad&) pts[0]; }
+ const SkDQuad& second() const { return (const SkDQuad&) pts[2]; }
+ SkDPoint pts[5];
+};
+
+struct SkDQuad {
+ static const int kPointCount = 3;
+ static const int kPointLast = kPointCount - 1;
+ static const int kMaxIntersections = 4;
+
+ SkDPoint fPts[kPointCount];
+
+ bool collapsed() const {
+ return fPts[0].approximatelyEqual(fPts[1]) && fPts[0].approximatelyEqual(fPts[2]);
+ }
+
+ bool controlsInside() const {
+ SkDVector v01 = fPts[0] - fPts[1];
+ SkDVector v02 = fPts[0] - fPts[2];
+ SkDVector v12 = fPts[1] - fPts[2];
+ return v02.dot(v01) > 0 && v02.dot(v12) > 0;
+ }
+
+ void debugInit() {
+ sk_bzero(fPts, sizeof(fPts));
+ }
+
+ SkDQuad flip() const {
+ SkDQuad result = {{fPts[2], fPts[1], fPts[0]}};
+ return result;
+ }
+
+ static bool IsConic() { return false; }
+
+ const SkDQuad& set(const SkPoint pts[kPointCount]) {
+ fPts[0] = pts[0];
+ fPts[1] = pts[1];
+ fPts[2] = pts[2];
+ return *this;
+ }
+
+ const SkDPoint& operator[](int n) const { SkASSERT(n >= 0 && n < kPointCount); return fPts[n]; }
+ SkDPoint& operator[](int n) { SkASSERT(n >= 0 && n < kPointCount); return fPts[n]; }
+
+ static int AddValidTs(double s[], int realRoots, double* t);
+ void align(int endIndex, SkDPoint* dstPt) const;
+ SkDQuadPair chopAt(double t) const;
+ SkDVector dxdyAtT(double t) const;
+ static int FindExtrema(const double src[], double tValue[1]);
+
+ /**
+ * Return the number of valid roots (0 < root < 1) for this cubic intersecting the
+ * specified horizontal line.
+ */
+ int horizontalIntersect(double yIntercept, double roots[2]) const;
+
+ bool hullIntersects(const SkDQuad& , bool* isLinear) const;
+ bool hullIntersects(const SkDConic& , bool* isLinear) const;
+ bool hullIntersects(const SkDCubic& , bool* isLinear) const;
+ bool isLinear(int startIndex, int endIndex) const;
+ bool monotonicInX() const;
+ bool monotonicInY() const;
+ void otherPts(int oddMan, const SkDPoint* endPt[2]) const;
+ SkDPoint ptAtT(double t) const;
+ static int RootsReal(double A, double B, double C, double t[2]);
+ static int RootsValidT(const double A, const double B, const double C, double s[2]);
+ static void SetABC(const double* quad, double* a, double* b, double* c);
+ SkDQuad subDivide(double t1, double t2) const;
+ static SkDQuad SubDivide(const SkPoint a[kPointCount], double t1, double t2) {
+ SkDQuad quad;
+ quad.set(a);
+ return quad.subDivide(t1, t2);
+ }
+ SkDPoint subDivide(const SkDPoint& a, const SkDPoint& c, double t1, double t2) const;
+ static SkDPoint SubDivide(const SkPoint pts[kPointCount], const SkDPoint& a, const SkDPoint& c,
+ double t1, double t2) {
+ SkDQuad quad;
+ quad.set(pts);
+ return quad.subDivide(a, c, t1, t2);
+ }
+
+ /**
+ * Return the number of valid roots (0 < root < 1) for this cubic intersecting the
+ * specified vertical line.
+ */
+ int verticalIntersect(double xIntercept, double roots[2]) const;
+
+ SkDCubic debugToCubic() const;
+ // utilities callable by the user from the debugger when the implementation code is linked in
+ void dump() const;
+ void dumpID(int id) const;
+ void dumpInner() const;
+
+private:
+// static double Tangent(const double* quadratic, double t); // uncalled
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsRect.cpp b/gfx/skia/skia/src/pathops/SkPathOpsRect.cpp
new file mode 100644
index 000000000..8c0115353
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsRect.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkPathOpsConic.h"
+#include "SkPathOpsCubic.h"
+#include "SkPathOpsLine.h"
+#include "SkPathOpsQuad.h"
+#include "SkPathOpsRect.h"
+
+void SkDRect::setBounds(const SkDQuad& curve, const SkDQuad& sub, double startT, double endT) {
+ set(sub[0]);
+ add(sub[2]);
+ double tValues[2];
+ int roots = 0;
+ if (!sub.monotonicInX()) {
+ roots = SkDQuad::FindExtrema(&sub[0].fX, tValues);
+ }
+ if (!sub.monotonicInY()) {
+ roots += SkDQuad::FindExtrema(&sub[0].fY, &tValues[roots]);
+ }
+ for (int index = 0; index < roots; ++index) {
+ double t = startT + (endT - startT) * tValues[index];
+ add(curve.ptAtT(t));
+ }
+}
+
+void SkDRect::setBounds(const SkDConic& curve, const SkDConic& sub, double startT, double endT) {
+ set(sub[0]);
+ add(sub[2]);
+ double tValues[2];
+ int roots = 0;
+ if (!sub.monotonicInX()) {
+ roots = SkDConic::FindExtrema(&sub[0].fX, sub.fWeight, tValues);
+ }
+ if (!sub.monotonicInY()) {
+ roots += SkDConic::FindExtrema(&sub[0].fY, sub.fWeight, &tValues[roots]);
+ }
+ for (int index = 0; index < roots; ++index) {
+ double t = startT + (endT - startT) * tValues[index];
+ add(curve.ptAtT(t));
+ }
+}
+
+void SkDRect::setBounds(const SkDCubic& curve, const SkDCubic& sub, double startT, double endT) {
+ set(sub[0]);
+ add(sub[3]);
+ double tValues[4];
+ int roots = 0;
+ if (!sub.monotonicInX()) {
+ roots = SkDCubic::FindExtrema(&sub[0].fX, tValues);
+ }
+ if (!sub.monotonicInY()) {
+ roots += SkDCubic::FindExtrema(&sub[0].fY, &tValues[roots]);
+ }
+ for (int index = 0; index < roots; ++index) {
+ double t = startT + (endT - startT) * tValues[index];
+ add(curve.ptAtT(t));
+ }
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsRect.h b/gfx/skia/skia/src/pathops/SkPathOpsRect.h
new file mode 100644
index 000000000..d4e5f5489
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsRect.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsRect_DEFINED
+#define SkPathOpsRect_DEFINED
+
+#include "SkPathOpsPoint.h"
+
+struct SkDRect {
+ double fLeft, fTop, fRight, fBottom;
+
+ void add(const SkDPoint& pt) {
+ fLeft = SkTMin(fLeft, pt.fX);
+ fTop = SkTMin(fTop, pt.fY);
+ fRight = SkTMax(fRight, pt.fX);
+ fBottom = SkTMax(fBottom, pt.fY);
+ }
+
+ bool contains(const SkDPoint& pt) const {
+ return approximately_between(fLeft, pt.fX, fRight)
+ && approximately_between(fTop, pt.fY, fBottom);
+ }
+
+ void debugInit();
+
+ bool intersects(const SkDRect& r) const {
+ SkASSERT(fLeft <= fRight);
+ SkASSERT(fTop <= fBottom);
+ SkASSERT(r.fLeft <= r.fRight);
+ SkASSERT(r.fTop <= r.fBottom);
+ return r.fLeft <= fRight && fLeft <= r.fRight && r.fTop <= fBottom && fTop <= r.fBottom;
+ }
+
+ void set(const SkDPoint& pt) {
+ fLeft = fRight = pt.fX;
+ fTop = fBottom = pt.fY;
+ }
+
+ double width() const {
+ return fRight - fLeft;
+ }
+
+ double height() const {
+ return fBottom - fTop;
+ }
+
+ void setBounds(const SkDConic& curve) {
+ setBounds(curve, curve, 0, 1);
+ }
+
+ void setBounds(const SkDConic& curve, const SkDConic& sub, double tStart, double tEnd);
+
+ void setBounds(const SkDCubic& curve) {
+ setBounds(curve, curve, 0, 1);
+ }
+
+ void setBounds(const SkDCubic& curve, const SkDCubic& sub, double tStart, double tEnd);
+
+ void setBounds(const SkDQuad& curve) {
+ setBounds(curve, curve, 0, 1);
+ }
+
+ void setBounds(const SkDQuad& curve, const SkDQuad& sub, double tStart, double tEnd);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsSimplify.cpp b/gfx/skia/skia/src/pathops/SkPathOpsSimplify.cpp
new file mode 100644
index 000000000..f9f8f5c71
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsSimplify.cpp
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkAddIntersections.h"
+#include "SkOpCoincidence.h"
+#include "SkOpEdgeBuilder.h"
+#include "SkPathOpsCommon.h"
+#include "SkPathWriter.h"
+
+static bool bridgeWinding(SkOpContourHead* contourList, SkPathWriter* simple) {
+ bool unsortable = false;
+ do {
+ SkOpSpan* span = FindSortableTop(contourList);
+ if (!span) {
+ break;
+ }
+ SkOpSegment* current = span->segment();
+ SkOpSpanBase* start = span->next();
+ SkOpSpanBase* end = span;
+ SkTDArray<SkOpSpanBase*> chase;
+ do {
+ if (current->activeWinding(start, end)) {
+ do {
+ if (!unsortable && current->done()) {
+ break;
+ }
+ SkASSERT(unsortable || !current->done());
+ SkOpSpanBase* nextStart = start;
+ SkOpSpanBase* nextEnd = end;
+ SkOpSegment* next = current->findNextWinding(&chase, &nextStart, &nextEnd,
+ &unsortable);
+ if (!next) {
+ break;
+ }
+ #if DEBUG_FLOW
+ SkDebugf("%s current id=%d from=(%1.9g,%1.9g) to=(%1.9g,%1.9g)\n", __FUNCTION__,
+ current->debugID(), start->pt().fX, start->pt().fY,
+ end->pt().fX, end->pt().fY);
+ #endif
+ if (!current->addCurveTo(start, end, simple)) {
+ return false;
+ }
+ current = next;
+ start = nextStart;
+ end = nextEnd;
+ } while (!simple->isClosed() && (!unsortable || !start->starter(end)->done()));
+ if (current->activeWinding(start, end) && !simple->isClosed()) {
+ SkOpSpan* spanStart = start->starter(end);
+ if (!spanStart->done()) {
+ if (!current->addCurveTo(start, end, simple)) {
+ return false;
+ }
+ current->markDone(spanStart);
+ }
+ }
+ simple->finishContour();
+ } else {
+ SkOpSpanBase* last = current->markAndChaseDone(start, end);
+ if (last && !last->chased()) {
+ last->setChased(true);
+ SkASSERT(!SkPathOpsDebug::ChaseContains(chase, last));
+ *chase.append() = last;
+#if DEBUG_WINDING
+ SkDebugf("%s chase.append id=%d", __FUNCTION__, last->segment()->debugID());
+ if (!last->final()) {
+ SkDebugf(" windSum=%d", last->upCast()->windSum());
+ }
+ SkDebugf("\n");
+#endif
+ }
+ }
+ current = FindChase(&chase, &start, &end);
+ SkPathOpsDebug::ShowActiveSpans(contourList);
+ if (!current) {
+ break;
+ }
+ } while (true);
+ } while (true);
+ return true;
+}
+
+// returns true if all edges were processed
+static bool bridgeXor(SkOpContourHead* contourList, SkPathWriter* simple) {
+ SkOpSegment* current;
+ SkOpSpanBase* start;
+ SkOpSpanBase* end;
+ bool unsortable = false;
+ while ((current = FindUndone(contourList, &start, &end))) {
+ do {
+ if (!unsortable && current->done()) {
+ SkPathOpsDebug::ShowActiveSpans(contourList);
+ }
+ SkASSERT(unsortable || !current->done());
+ SkOpSpanBase* nextStart = start;
+ SkOpSpanBase* nextEnd = end;
+ SkOpSegment* next = current->findNextXor(&nextStart, &nextEnd, &unsortable);
+ if (!next) {
+ if (!unsortable && simple->hasMove()
+ && current->verb() != SkPath::kLine_Verb
+ && !simple->isClosed()) {
+ if (!current->addCurveTo(start, end, simple)) {
+ return false;
+ }
+ if (!simple->isClosed()) {
+ SkPathOpsDebug::ShowActiveSpans(contourList);
+ }
+ }
+ break;
+ }
+ #if DEBUG_FLOW
+ SkDebugf("%s current id=%d from=(%1.9g,%1.9g) to=(%1.9g,%1.9g)\n", __FUNCTION__,
+ current->debugID(), start->pt().fX, start->pt().fY,
+ end->pt().fX, end->pt().fY);
+ #endif
+ if (!current->addCurveTo(start, end, simple)) {
+ return false;
+ }
+ current = next;
+ start = nextStart;
+ end = nextEnd;
+ } while (!simple->isClosed() && (!unsortable || !start->starter(end)->done()));
+ if (!simple->isClosed()) {
+ SkASSERT(unsortable);
+ SkOpSpan* spanStart = start->starter(end);
+ if (!spanStart->done()) {
+ if (!current->addCurveTo(start, end, simple)) {
+ return false;
+ }
+ current->markDone(spanStart);
+ }
+ }
+ simple->finishContour();
+ SkPathOpsDebug::ShowActiveSpans(contourList);
+ }
+ return true;
+}
+
+// FIXME : add this as a member of SkPath
+bool SimplifyDebug(const SkPath& path, SkPath* result
+ SkDEBUGPARAMS(bool skipAssert) SkDEBUGPARAMS(const char* testName)) {
+ // returns 1 for evenodd, -1 for winding, regardless of inverse-ness
+ SkPath::FillType fillType = path.isInverseFillType() ? SkPath::kInverseEvenOdd_FillType
+ : SkPath::kEvenOdd_FillType;
+ if (path.isConvex()) {
+ if (result != &path) {
+ *result = path;
+ }
+ result->setFillType(fillType);
+ return true;
+ }
+ // turn path into list of segments
+ SkChunkAlloc allocator(4096); // FIXME: constant-ize, tune
+ SkOpContour contour;
+ SkOpContourHead* contourList = static_cast<SkOpContourHead*>(&contour);
+ SkOpGlobalState globalState(contourList, &allocator
+ SkDEBUGPARAMS(skipAssert) SkDEBUGPARAMS(testName));
+ SkOpCoincidence coincidence(&globalState);
+ SkScalar scaleFactor = ScaleFactor(path);
+ SkPath scaledPath;
+ const SkPath* workingPath;
+ if (scaleFactor > SK_Scalar1) {
+ ScalePath(path, 1.f / scaleFactor, &scaledPath);
+ workingPath = &scaledPath;
+ } else {
+ workingPath = &path;
+ }
+#if DEBUG_SORT
+ SkPathOpsDebug::gSortCount = SkPathOpsDebug::gSortCountDefault;
+#endif
+ SkOpEdgeBuilder builder(*workingPath, contourList, &globalState);
+ if (!builder.finish()) {
+ return false;
+ }
+#if DEBUG_DUMP_SEGMENTS
+ contour.dumpSegments();
+#endif
+ if (!SortContourList(&contourList, false, false)) {
+ result->reset();
+ result->setFillType(fillType);
+ return true;
+ }
+ // find all intersections between segments
+ SkOpContour* current = contourList;
+ do {
+ SkOpContour* next = current;
+ while (AddIntersectTs(current, next, &coincidence)
+ && (next = next->next()));
+ } while ((current = current->next()));
+#if DEBUG_VALIDATE
+ globalState.setPhase(SkOpPhase::kWalking);
+#endif
+ bool success = HandleCoincidence(contourList, &coincidence);
+#if DEBUG_COIN
+ globalState.debugAddToGlobalCoinDicts();
+#endif
+ if (!success) {
+ return false;
+ }
+#if DEBUG_DUMP_ALIGNMENT
+ contour.dumpSegments("aligned");
+#endif
+ // construct closed contours
+ result->reset();
+ result->setFillType(fillType);
+ SkPathWriter wrapper(*result);
+ if (builder.xorMask() == kWinding_PathOpsMask ? !bridgeWinding(contourList, &wrapper)
+ : !bridgeXor(contourList, &wrapper)) {
+ return false;
+ }
+ wrapper.assemble(); // if some edges could not be resolved, assemble remaining
+ if (scaleFactor > 1) {
+ ScalePath(*result, scaleFactor, result);
+ }
+ return true;
+}
+
+bool Simplify(const SkPath& path, SkPath* result) {
+ return SimplifyDebug(path, result SkDEBUGPARAMS(true) SkDEBUGPARAMS(nullptr));
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsTSect.cpp b/gfx/skia/skia/src/pathops/SkPathOpsTSect.cpp
new file mode 100644
index 000000000..3e7817ca9
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsTSect.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPathOpsTSect.h"
+
+int SkIntersections::intersect(const SkDQuad& quad1, const SkDQuad& quad2) {
+ SkTSect<SkDQuad, SkDQuad> sect1(quad1
+ SkDEBUGPARAMS(debugGlobalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(1));
+ SkTSect<SkDQuad, SkDQuad> sect2(quad2
+ SkDEBUGPARAMS(debugGlobalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(2));
+ SkTSect<SkDQuad, SkDQuad>::BinarySearch(&sect1, &sect2, this);
+ return used();
+}
+
+int SkIntersections::intersect(const SkDConic& conic, const SkDQuad& quad) {
+ SkTSect<SkDConic, SkDQuad> sect1(conic
+ SkDEBUGPARAMS(debugGlobalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(1));
+ SkTSect<SkDQuad, SkDConic> sect2(quad
+ SkDEBUGPARAMS(debugGlobalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(2));
+ SkTSect<SkDConic, SkDQuad>::BinarySearch(&sect1, &sect2, this);
+ return used();
+}
+
+int SkIntersections::intersect(const SkDConic& conic1, const SkDConic& conic2) {
+ SkTSect<SkDConic, SkDConic> sect1(conic1
+ SkDEBUGPARAMS(debugGlobalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(1));
+ SkTSect<SkDConic, SkDConic> sect2(conic2
+ SkDEBUGPARAMS(debugGlobalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(2));
+ SkTSect<SkDConic, SkDConic>::BinarySearch(&sect1, &sect2, this);
+ return used();
+}
+
+int SkIntersections::intersect(const SkDCubic& cubic, const SkDQuad& quad) {
+ SkTSect<SkDCubic, SkDQuad> sect1(cubic
+ SkDEBUGPARAMS(debugGlobalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(1));
+ SkTSect<SkDQuad, SkDCubic> sect2(quad
+ SkDEBUGPARAMS(debugGlobalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(2));
+ SkTSect<SkDCubic, SkDQuad>::BinarySearch(&sect1, &sect2, this);
+ return used();
+}
+
+int SkIntersections::intersect(const SkDCubic& cubic, const SkDConic& conic) {
+ SkTSect<SkDCubic, SkDConic> sect1(cubic
+ SkDEBUGPARAMS(debugGlobalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(1));
+ SkTSect<SkDConic, SkDCubic> sect2(conic
+ SkDEBUGPARAMS(debugGlobalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(2));
+ SkTSect<SkDCubic, SkDConic>::BinarySearch(&sect1, &sect2, this);
+ return used();
+}
+
+int SkIntersections::intersect(const SkDCubic& cubic1, const SkDCubic& cubic2) {
+ SkTSect<SkDCubic, SkDCubic> sect1(cubic1
+ SkDEBUGPARAMS(debugGlobalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(1));
+ SkTSect<SkDCubic, SkDCubic> sect2(cubic2
+ SkDEBUGPARAMS(debugGlobalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(2));
+ SkTSect<SkDCubic, SkDCubic>::BinarySearch(&sect1, &sect2, this);
+ return used();
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsTSect.h b/gfx/skia/skia/src/pathops/SkPathOpsTSect.h
new file mode 100644
index 000000000..a04a4e442
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsTSect.h
@@ -0,0 +1,2365 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsTSect_DEFINED
+#define SkPathOpsTSect_DEFINED
+
+#include "SkChunkAlloc.h"
+#include "SkPathOpsBounds.h"
+#include "SkPathOpsRect.h"
+#include "SkIntersections.h"
+#include "SkTSort.h"
+
+#ifdef SK_DEBUG
+typedef uint8_t SkOpDebugBool;
+#else
+typedef bool SkOpDebugBool;
+#endif
+
+/* TCurve and OppCurve are one of { SkDQuadratic, SkDConic, SkDCubic } */
+template<typename TCurve, typename OppCurve>
+class SkTCoincident {
+public:
+ SkTCoincident() {
+ this->init();
+ }
+
+ void debugInit() {
+#ifdef SK_DEBUG
+ this->fPerpPt.fX = this->fPerpPt.fY = SK_ScalarNaN;
+ this->fPerpT = SK_ScalarNaN;
+ this->fMatch = 0xFF;
+#endif
+ }
+
+ char dumpIsCoincidentStr() const;
+ void dump() const;
+
+ bool isMatch() const {
+ SkASSERT(!!fMatch == fMatch);
+ return SkToBool(fMatch);
+ }
+
+ void init() {
+ fPerpT = -1;
+ fMatch = false;
+ fPerpPt.fX = fPerpPt.fY = SK_ScalarNaN;
+ }
+
+ void markCoincident() {
+ if (!fMatch) {
+ fPerpT = -1;
+ }
+ fMatch = true;
+ }
+
+ const SkDPoint& perpPt() const {
+ return fPerpPt;
+ }
+
+ double perpT() const {
+ return fPerpT;
+ }
+
+ void setPerp(const TCurve& c1, double t, const SkDPoint& cPt, const OppCurve& );
+
+private:
+ SkDPoint fPerpPt;
+ double fPerpT; // perpendicular intersection on opposite curve
+ SkOpDebugBool fMatch;
+};
+
+template<typename TCurve, typename OppCurve> class SkTSect;
+template<typename TCurve, typename OppCurve> class SkTSpan;
+
+template<typename TCurve, typename OppCurve>
+struct SkTSpanBounded {
+ SkTSpan<TCurve, OppCurve>* fBounded;
+ SkTSpanBounded* fNext;
+};
+
+/* Curve is either TCurve or SkDCubic */
+template<typename TCurve, typename OppCurve>
+class SkTSpan {
+public:
+ void addBounded(SkTSpan<OppCurve, TCurve>* , SkChunkAlloc* );
+ double closestBoundedT(const SkDPoint& pt) const;
+ bool contains(double t) const;
+
+ void debugInit() {
+ TCurve dummy;
+ dummy.debugInit();
+ init(dummy);
+ initBounds(dummy);
+ fCoinStart.init();
+ fCoinEnd.init();
+ }
+
+ const SkTSect<OppCurve, TCurve>* debugOpp() const;
+
+#ifdef SK_DEBUG
+ void debugSetGlobalState(SkOpGlobalState* state) {
+ fDebugGlobalState = state;
+ }
+#endif
+
+ const SkTSpan* debugSpan(int ) const;
+ const SkTSpan* debugT(double t) const;
+#ifdef SK_DEBUG
+ bool debugIsBefore(const SkTSpan* span) const;
+#endif
+ void dump() const;
+ void dumpAll() const;
+ void dumpBounded(int id) const;
+ void dumpBounds() const;
+ void dumpCoin() const;
+
+ double endT() const {
+ return fEndT;
+ }
+
+ SkTSpan<OppCurve, TCurve>* findOppSpan(const SkTSpan<OppCurve, TCurve>* opp) const;
+
+ SkTSpan<OppCurve, TCurve>* findOppT(double t) const {
+ SkTSpan<OppCurve, TCurve>* result = oppT(t);
+ SkOPASSERT(result);
+ return result;
+ }
+
+ SkDEBUGCODE(SkOpGlobalState* globalState() const { return fDebugGlobalState; })
+
+ bool hasOppT(double t) const {
+ return SkToBool(oppT(t));
+ }
+
+ int hullsIntersect(SkTSpan<OppCurve, TCurve>* span, bool* start, bool* oppStart);
+ void init(const TCurve& );
+ void initBounds(const TCurve& );
+
+ bool isBounded() const {
+ return fBounded != nullptr;
+ }
+
+ bool linearsIntersect(SkTSpan<OppCurve, TCurve>* span);
+ double linearT(const SkDPoint& ) const;
+
+ void markCoincident() {
+ fCoinStart.markCoincident();
+ fCoinEnd.markCoincident();
+ }
+
+ const SkTSpan* next() const {
+ return fNext;
+ }
+
+ bool onlyEndPointsInCommon(const SkTSpan<OppCurve, TCurve>* opp, bool* start,
+ bool* oppStart, bool* ptsInCommon);
+
+ const TCurve& part() const {
+ return fPart;
+ }
+
+ bool removeAllBounded();
+ bool removeBounded(const SkTSpan<OppCurve, TCurve>* opp);
+
+ void reset() {
+ fBounded = nullptr;
+ }
+
+ void resetBounds(const TCurve& curve) {
+ fIsLinear = fIsLine = false;
+ initBounds(curve);
+ }
+
+ bool split(SkTSpan* work, SkChunkAlloc* heap) {
+ return splitAt(work, (work->fStartT + work->fEndT) * 0.5, heap);
+ }
+
+ bool splitAt(SkTSpan* work, double t, SkChunkAlloc* heap);
+
+ double startT() const {
+ return fStartT;
+ }
+
+private:
+
+ // implementation is for testing only
+ int debugID() const {
+ return PATH_OPS_DEBUG_T_SECT_RELEASE(fID, -1);
+ }
+
+ void dumpID() const;
+
+ int hullCheck(const SkTSpan<OppCurve, TCurve>* opp, bool* start, bool* oppStart);
+ int linearIntersects(const OppCurve& ) const;
+ SkTSpan<OppCurve, TCurve>* oppT(double t) const;
+
+ void validate() const;
+ void validateBounded() const;
+ void validatePerpT(double oppT) const;
+ void validatePerpPt(double t, const SkDPoint& ) const;
+
+ TCurve fPart;
+ SkTCoincident<TCurve, OppCurve> fCoinStart;
+ SkTCoincident<TCurve, OppCurve> fCoinEnd;
+ SkTSpanBounded<OppCurve, TCurve>* fBounded;
+ SkTSpan* fPrev;
+ SkTSpan* fNext;
+ SkDRect fBounds;
+ double fStartT;
+ double fEndT;
+ double fBoundsMax;
+ SkOpDebugBool fCollapsed;
+ SkOpDebugBool fHasPerp;
+ SkOpDebugBool fIsLinear;
+ SkOpDebugBool fIsLine;
+ SkOpDebugBool fDeleted;
+ SkDEBUGCODE(SkOpGlobalState* fDebugGlobalState);
+ SkDEBUGCODE(SkTSect<TCurve, OppCurve>* fDebugSect);
+ PATH_OPS_DEBUG_T_SECT_CODE(int fID);
+ friend class SkTSect<TCurve, OppCurve>;
+ friend class SkTSect<OppCurve, TCurve>;
+ friend class SkTSpan<OppCurve, TCurve>;
+};
+
+template<typename TCurve, typename OppCurve>
+class SkTSect {
+public:
+ SkTSect(const TCurve& c SkDEBUGPARAMS(SkOpGlobalState* ) PATH_OPS_DEBUG_T_SECT_PARAMS(int id));
+ static void BinarySearch(SkTSect* sect1, SkTSect<OppCurve, TCurve>* sect2,
+ SkIntersections* intersections);
+
+ SkDEBUGCODE(SkOpGlobalState* globalState() { return fDebugGlobalState; })
+ // for testing only
+ bool debugHasBounded(const SkTSpan<OppCurve, TCurve>* ) const;
+
+ const SkTSect<OppCurve, TCurve>* debugOpp() const {
+ return SkDEBUGRELEASE(fOppSect, nullptr);
+ }
+
+ const SkTSpan<TCurve, OppCurve>* debugSpan(int id) const;
+ const SkTSpan<TCurve, OppCurve>* debugT(double t) const;
+ void dump() const;
+ void dumpBoth(SkTSect<OppCurve, TCurve>* ) const;
+ void dumpBounded(int id) const;
+ void dumpBounds() const;
+ void dumpCoin() const;
+ void dumpCoinCurves() const;
+ void dumpCurves() const;
+
+private:
+ enum {
+ kZeroS1Set = 1,
+ kOneS1Set = 2,
+ kZeroS2Set = 4,
+ kOneS2Set = 8
+ };
+
+ SkTSpan<TCurve, OppCurve>* addFollowing(SkTSpan<TCurve, OppCurve>* prior);
+ void addForPerp(SkTSpan<OppCurve, TCurve>* span, double t);
+ SkTSpan<TCurve, OppCurve>* addOne();
+
+ SkTSpan<TCurve, OppCurve>* addSplitAt(SkTSpan<TCurve, OppCurve>* span, double t) {
+ SkTSpan<TCurve, OppCurve>* result = this->addOne();
+ SkDEBUGCODE(result->debugSetGlobalState(this->globalState()));
+ result->splitAt(span, t, &fHeap);
+ result->initBounds(fCurve);
+ span->initBounds(fCurve);
+ return result;
+ }
+
+ bool binarySearchCoin(SkTSect<OppCurve, TCurve>* , double tStart, double tStep, double* t,
+ double* oppT);
+ SkTSpan<TCurve, OppCurve>* boundsMax() const;
+ bool coincidentCheck(SkTSect<OppCurve, TCurve>* sect2);
+ void coincidentForce(SkTSect<OppCurve, TCurve>* sect2, double start1s, double start1e);
+ bool coincidentHasT(double t);
+ int collapsed() const;
+ void computePerpendiculars(SkTSect<OppCurve, TCurve>* sect2, SkTSpan<TCurve, OppCurve>* first,
+ SkTSpan<TCurve, OppCurve>* last);
+ int countConsecutiveSpans(SkTSpan<TCurve, OppCurve>* first,
+ SkTSpan<TCurve, OppCurve>** last) const;
+
+ int debugID() const {
+ return PATH_OPS_DEBUG_T_SECT_RELEASE(fID, -1);
+ }
+
+ bool deleteEmptySpans();
+ void dumpCommon(const SkTSpan<TCurve, OppCurve>* ) const;
+ void dumpCommonCurves(const SkTSpan<TCurve, OppCurve>* ) const;
+ static int EndsEqual(const SkTSect* sect1, const SkTSect<OppCurve, TCurve>* sect2,
+ SkIntersections* );
+ bool extractCoincident(SkTSect<OppCurve, TCurve>* sect2, SkTSpan<TCurve, OppCurve>* first,
+ SkTSpan<TCurve, OppCurve>* last, SkTSpan<TCurve, OppCurve>** result);
+ SkTSpan<TCurve, OppCurve>* findCoincidentRun(SkTSpan<TCurve, OppCurve>* first,
+ SkTSpan<TCurve, OppCurve>** lastPtr);
+ int intersects(SkTSpan<TCurve, OppCurve>* span, SkTSect<OppCurve, TCurve>* opp,
+ SkTSpan<OppCurve, TCurve>* oppSpan, int* oppResult);
+ bool isParallel(const SkDLine& thisLine, const SkTSect<OppCurve, TCurve>* opp) const;
+ int linesIntersect(SkTSpan<TCurve, OppCurve>* span, SkTSect<OppCurve, TCurve>* opp,
+ SkTSpan<OppCurve, TCurve>* oppSpan, SkIntersections* );
+ bool markSpanGone(SkTSpan<TCurve, OppCurve>* span);
+ bool matchedDirection(double t, const SkTSect<OppCurve, TCurve>* sect2, double t2) const;
+ void matchedDirCheck(double t, const SkTSect<OppCurve, TCurve>* sect2, double t2,
+ bool* calcMatched, bool* oppMatched) const;
+ void mergeCoincidence(SkTSect<OppCurve, TCurve>* sect2);
+ SkTSpan<TCurve, OppCurve>* prev(SkTSpan<TCurve, OppCurve>* ) const;
+ void removeByPerpendicular(SkTSect<OppCurve, TCurve>* opp);
+ void recoverCollapsed();
+ void removeCoincident(SkTSpan<TCurve, OppCurve>* span, bool isBetween);
+ void removeAllBut(const SkTSpan<OppCurve, TCurve>* keep, SkTSpan<TCurve, OppCurve>* span,
+ SkTSect<OppCurve, TCurve>* opp);
+ bool removeSpan(SkTSpan<TCurve, OppCurve>* span);
+ void removeSpanRange(SkTSpan<TCurve, OppCurve>* first, SkTSpan<TCurve, OppCurve>* last);
+ void removeSpans(SkTSpan<TCurve, OppCurve>* span, SkTSect<OppCurve, TCurve>* opp);
+ SkTSpan<TCurve, OppCurve>* spanAtT(double t, SkTSpan<TCurve, OppCurve>** priorSpan);
+ SkTSpan<TCurve, OppCurve>* tail();
+ void trim(SkTSpan<TCurve, OppCurve>* span, SkTSect<OppCurve, TCurve>* opp);
+ void unlinkSpan(SkTSpan<TCurve, OppCurve>* span);
+ bool updateBounded(SkTSpan<TCurve, OppCurve>* first, SkTSpan<TCurve, OppCurve>* last,
+ SkTSpan<OppCurve, TCurve>* oppFirst);
+ void validate() const;
+ void validateBounded() const;
+
+ const TCurve& fCurve;
+ SkChunkAlloc fHeap;
+ SkTSpan<TCurve, OppCurve>* fHead;
+ SkTSpan<TCurve, OppCurve>* fCoincident;
+ SkTSpan<TCurve, OppCurve>* fDeleted;
+ int fActiveCount;
+ bool fRemovedStartT;
+ bool fRemovedEndT;
+ SkDEBUGCODE(SkOpGlobalState* fDebugGlobalState);
+ SkDEBUGCODE(SkTSect<OppCurve, TCurve>* fOppSect);
+ PATH_OPS_DEBUG_T_SECT_CODE(int fID);
+ PATH_OPS_DEBUG_T_SECT_CODE(int fDebugCount);
+#if DEBUG_T_SECT
+ int fDebugAllocatedCount;
+#endif
+ friend class SkTSpan<TCurve, OppCurve>;
+ friend class SkTSpan<OppCurve, TCurve>;
+ friend class SkTSect<OppCurve, TCurve>;
+};
+
+#define COINCIDENT_SPAN_COUNT 9
+
+template<typename TCurve, typename OppCurve>
+void SkTCoincident<TCurve, OppCurve>::setPerp(const TCurve& c1, double t,
+ const SkDPoint& cPt, const OppCurve& c2) {
+ SkDVector dxdy = c1.dxdyAtT(t);
+ SkDLine perp = {{ cPt, {cPt.fX + dxdy.fY, cPt.fY - dxdy.fX} }};
+ SkIntersections i;
+ int used = i.intersectRay(c2, perp);
+ // only keep closest
+ if (used == 0 || used == 3) {
+ this->init();
+ return;
+ }
+ fPerpT = i[0][0];
+ fPerpPt = i.pt(0);
+ SkASSERT(used <= 2);
+ if (used == 2) {
+ double distSq = (fPerpPt - cPt).lengthSquared();
+ double dist2Sq = (i.pt(1) - cPt).lengthSquared();
+ if (dist2Sq < distSq) {
+ fPerpT = i[0][1];
+ fPerpPt = i.pt(1);
+ }
+ }
+#if DEBUG_T_SECT
+ SkDebugf("setPerp t=%1.9g cPt=(%1.9g,%1.9g) %s oppT=%1.9g fPerpPt=(%1.9g,%1.9g)\n",
+ t, cPt.fX, cPt.fY,
+ cPt.approximatelyEqual(fPerpPt) ? "==" : "!=", fPerpT, fPerpPt.fX, fPerpPt.fY);
+#endif
+ fMatch = cPt.approximatelyEqual(fPerpPt);
+#if DEBUG_T_SECT
+ if (fMatch) {
+ SkDebugf(""); // allow setting breakpoint
+ }
+#endif
+}
+
+template<typename TCurve, typename OppCurve>
+void SkTSpan<TCurve, OppCurve>::addBounded(SkTSpan<OppCurve, TCurve>* span, SkChunkAlloc* heap) {
+ SkTSpanBounded<OppCurve, TCurve>* bounded = new (heap->allocThrow(
+ sizeof(SkTSpanBounded<OppCurve, TCurve>)))(SkTSpanBounded<OppCurve, TCurve>);
+ bounded->fBounded = span;
+ bounded->fNext = fBounded;
+ fBounded = bounded;
+}
+
+template<typename TCurve, typename OppCurve>
+SkTSpan<TCurve, OppCurve>* SkTSect<TCurve, OppCurve>::addFollowing(
+ SkTSpan<TCurve, OppCurve>* prior) {
+ SkTSpan<TCurve, OppCurve>* result = this->addOne();
+ SkDEBUGCODE(result->debugSetGlobalState(this->globalState()));
+ result->fStartT = prior ? prior->fEndT : 0;
+ SkTSpan<TCurve, OppCurve>* next = prior ? prior->fNext : fHead;
+ result->fEndT = next ? next->fStartT : 1;
+ result->fPrev = prior;
+ result->fNext = next;
+ if (prior) {
+ prior->fNext = result;
+ } else {
+ fHead = result;
+ }
+ if (next) {
+ next->fPrev = result;
+ }
+ result->resetBounds(fCurve);
+ result->validate();
+ return result;
+}
+
+template<typename TCurve, typename OppCurve>
+void SkTSect<TCurve, OppCurve>::addForPerp(SkTSpan<OppCurve, TCurve>* span, double t) {
+ if (!span->hasOppT(t)) {
+ SkTSpan<TCurve, OppCurve>* priorSpan;
+ SkTSpan<TCurve, OppCurve>* opp = this->spanAtT(t, &priorSpan);
+ if (!opp) {
+ opp = this->addFollowing(priorSpan);
+#if DEBUG_PERP
+ SkDebugf("%s priorSpan=%d t=%1.9g opp=%d\n", __FUNCTION__, priorSpan ?
+ priorSpan->debugID() : -1, t, opp->debugID());
+#endif
+ }
+#if DEBUG_PERP
+ opp->dump(); SkDebugf("\n");
+ SkDebugf("%s addBounded span=%d opp=%d\n", __FUNCTION__, priorSpan ?
+ priorSpan->debugID() : -1, opp->debugID());
+#endif
+ opp->addBounded(span, &fHeap);
+ span->addBounded(opp, &fHeap);
+ }
+ this->validate();
+#if DEBUG_T_SECT
+ span->validatePerpT(t);
+#endif
+}
+
+template<typename TCurve, typename OppCurve>
+double SkTSpan<TCurve, OppCurve>::closestBoundedT(const SkDPoint& pt) const {
+ double result = -1;
+ double closest = DBL_MAX;
+ const SkTSpanBounded<OppCurve, TCurve>* testBounded = fBounded;
+ while (testBounded) {
+ const SkTSpan<OppCurve, TCurve>* test = testBounded->fBounded;
+ double startDist = test->fPart[0].distanceSquared(pt);
+ if (closest > startDist) {
+ closest = startDist;
+ result = test->fStartT;
+ }
+ double endDist = test->fPart[OppCurve::kPointLast].distanceSquared(pt);
+ if (closest > endDist) {
+ closest = endDist;
+ result = test->fEndT;
+ }
+ testBounded = testBounded->fNext;
+ }
+ SkASSERT(between(0, result, 1));
+ return result;
+}
+
+#ifdef SK_DEBUG
+template<typename TCurve, typename OppCurve>
+bool SkTSpan<TCurve, OppCurve>::debugIsBefore(const SkTSpan* span) const {
+ const SkTSpan* work = this;
+ do {
+ if (span == work) {
+ return true;
+ }
+ } while ((work = work->fNext));
+ return false;
+}
+#endif
+
+template<typename TCurve, typename OppCurve>
+bool SkTSpan<TCurve, OppCurve>::contains(double t) const {
+ const SkTSpan* work = this;
+ do {
+ if (between(work->fStartT, t, work->fEndT)) {
+ return true;
+ }
+ } while ((work = work->fNext));
+ return false;
+}
+
+template<typename TCurve, typename OppCurve>
+const SkTSect<OppCurve, TCurve>* SkTSpan<TCurve, OppCurve>::debugOpp() const {
+ return SkDEBUGRELEASE(fDebugSect->debugOpp(), nullptr);
+}
+
+template<typename TCurve, typename OppCurve>
+SkTSpan<OppCurve, TCurve>* SkTSpan<TCurve, OppCurve>::findOppSpan(
+ const SkTSpan<OppCurve, TCurve>* opp) const {
+ SkTSpanBounded<OppCurve, TCurve>* bounded = fBounded;
+ while (bounded) {
+ SkTSpan<OppCurve, TCurve>* test = bounded->fBounded;
+ if (opp == test) {
+ return test;
+ }
+ bounded = bounded->fNext;
+ }
+ return nullptr;
+}
+
+// returns 0 if no hull intersection
+// 1 if hulls intersect
+// 2 if hulls only share a common endpoint
+// -1 if linear and further checking is required
+template<typename TCurve, typename OppCurve>
+int SkTSpan<TCurve, OppCurve>::hullCheck(const SkTSpan<OppCurve, TCurve>* opp,
+ bool* start, bool* oppStart) {
+ if (fIsLinear) {
+ return -1;
+ }
+ bool ptsInCommon;
+ if (onlyEndPointsInCommon(opp, start, oppStart, &ptsInCommon)) {
+ SkASSERT(ptsInCommon);
+ return 2;
+ }
+ bool linear;
+ if (fPart.hullIntersects(opp->fPart, &linear)) {
+ if (!linear) { // check set true if linear
+ return 1;
+ }
+ fIsLinear = true;
+ fIsLine = fPart.controlsInside();
+ return ptsInCommon ? 1 : -1;
+ } else { // hull is not linear; check set true if intersected at the end points
+ return ((int) ptsInCommon) << 1; // 0 or 2
+ }
+ return 0;
+}
+
+// OPTIMIZE ? If at_most_end_pts_in_common detects that one quad is near linear,
+// use line intersection to guess a better split than 0.5
+// OPTIMIZE Once at_most_end_pts_in_common detects linear, mark span so all future splits are linear
+template<typename TCurve, typename OppCurve>
+int SkTSpan<TCurve, OppCurve>::hullsIntersect(SkTSpan<OppCurve, TCurve>* opp,
+ bool* start, bool* oppStart) {
+ if (!fBounds.intersects(opp->fBounds)) {
+ return 0;
+ }
+ int hullSect = this->hullCheck(opp, start, oppStart);
+ if (hullSect >= 0) {
+ return hullSect;
+ }
+ hullSect = opp->hullCheck(this, oppStart, start);
+ if (hullSect >= 0) {
+ return hullSect;
+ }
+ return -1;
+}
+
+template<typename TCurve, typename OppCurve>
+void SkTSpan<TCurve, OppCurve>::init(const TCurve& c) {
+ fPrev = fNext = nullptr;
+ fStartT = 0;
+ fEndT = 1;
+ fBounded = nullptr;
+ resetBounds(c);
+}
+
+template<typename TCurve, typename OppCurve>
+void SkTSpan<TCurve, OppCurve>::initBounds(const TCurve& c) {
+ fPart = c.subDivide(fStartT, fEndT);
+ fBounds.setBounds(fPart);
+ fCoinStart.init();
+ fCoinEnd.init();
+ fBoundsMax = SkTMax(fBounds.width(), fBounds.height());
+ fCollapsed = fPart.collapsed();
+ fHasPerp = false;
+ fDeleted = false;
+#if DEBUG_T_SECT
+ if (fCollapsed) {
+ SkDebugf(""); // for convenient breakpoints
+ }
+#endif
+}
+
+template<typename TCurve, typename OppCurve>
+bool SkTSpan<TCurve, OppCurve>::linearsIntersect(SkTSpan<OppCurve, TCurve>* span) {
+ int result = this->linearIntersects(span->fPart);
+ if (result <= 1) {
+ return SkToBool(result);
+ }
+ SkASSERT(span->fIsLinear);
+ result = span->linearIntersects(this->fPart);
+// SkASSERT(result <= 1);
+ return SkToBool(result);
+}
+
+template<typename TCurve, typename OppCurve>
+double SkTSpan<TCurve, OppCurve>::linearT(const SkDPoint& pt) const {
+ SkDVector len = fPart[TCurve::kPointLast] - fPart[0];
+ return fabs(len.fX) > fabs(len.fY)
+ ? (pt.fX - fPart[0].fX) / len.fX
+ : (pt.fY - fPart[0].fY) / len.fY;
+}
+
+template<typename TCurve, typename OppCurve>
+int SkTSpan<TCurve, OppCurve>::linearIntersects(const OppCurve& q2) const {
+ // looks like q1 is near-linear
+ int start = 0, end = TCurve::kPointLast; // the outside points are usually the extremes
+ if (!fPart.controlsInside()) {
+ double dist = 0; // if there's any question, compute distance to find best outsiders
+ for (int outer = 0; outer < TCurve::kPointCount - 1; ++outer) {
+ for (int inner = outer + 1; inner < TCurve::kPointCount; ++inner) {
+ double test = (fPart[outer] - fPart[inner]).lengthSquared();
+ if (dist > test) {
+ continue;
+ }
+ dist = test;
+ start = outer;
+ end = inner;
+ }
+ }
+ }
+ // see if q2 is on one side of the line formed by the extreme points
+ double origX = fPart[start].fX;
+ double origY = fPart[start].fY;
+ double adj = fPart[end].fX - origX;
+ double opp = fPart[end].fY - origY;
+ double maxPart = SkTMax(fabs(adj), fabs(opp));
+ double sign = 0; // initialization to shut up warning in release build
+ for (int n = 0; n < OppCurve::kPointCount; ++n) {
+ double dx = q2[n].fY - origY;
+ double dy = q2[n].fX - origX;
+ double maxVal = SkTMax(maxPart, SkTMax(fabs(dx), fabs(dy)));
+ double test = (q2[n].fY - origY) * adj - (q2[n].fX - origX) * opp;
+ if (precisely_zero_when_compared_to(test, maxVal)) {
+ return 1;
+ }
+ if (approximately_zero_when_compared_to(test, maxVal)) {
+ return 3;
+ }
+ if (n == 0) {
+ sign = test;
+ continue;
+ }
+ if (test * sign < 0) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+template<typename TCurve, typename OppCurve>
+bool SkTSpan<TCurve, OppCurve>::onlyEndPointsInCommon(const SkTSpan<OppCurve, TCurve>* opp,
+ bool* start, bool* oppStart, bool* ptsInCommon) {
+ if (opp->fPart[0] == fPart[0]) {
+ *start = *oppStart = true;
+ } else if (opp->fPart[0] == fPart[TCurve::kPointLast]) {
+ *start = false;
+ *oppStart = true;
+ } else if (opp->fPart[OppCurve::kPointLast] == fPart[0]) {
+ *start = true;
+ *oppStart = false;
+ } else if (opp->fPart[OppCurve::kPointLast] == fPart[TCurve::kPointLast]) {
+ *start = *oppStart = false;
+ } else {
+ *ptsInCommon = false;
+ return false;
+ }
+ *ptsInCommon = true;
+ const SkDPoint* otherPts[TCurve::kPointCount - 1], * oppOtherPts[OppCurve::kPointCount - 1];
+ int baseIndex = *start ? 0 : TCurve::kPointLast;
+ fPart.otherPts(baseIndex, otherPts);
+ opp->fPart.otherPts(*oppStart ? 0 : OppCurve::kPointLast, oppOtherPts);
+ const SkDPoint& base = fPart[baseIndex];
+ for (int o1 = 0; o1 < (int) SK_ARRAY_COUNT(otherPts); ++o1) {
+ SkDVector v1 = *otherPts[o1] - base;
+ for (int o2 = 0; o2 < (int) SK_ARRAY_COUNT(oppOtherPts); ++o2) {
+ SkDVector v2 = *oppOtherPts[o2] - base;
+ if (v2.dot(v1) >= 0) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+template<typename TCurve, typename OppCurve>
+SkTSpan<OppCurve, TCurve>* SkTSpan<TCurve, OppCurve>::oppT(double t) const {
+ SkTSpanBounded<OppCurve, TCurve>* bounded = fBounded;
+ while (bounded) {
+ SkTSpan<OppCurve, TCurve>* test = bounded->fBounded;
+ if (between(test->fStartT, t, test->fEndT)) {
+ return test;
+ }
+ bounded = bounded->fNext;
+ }
+ return nullptr;
+}
+
+template<typename TCurve, typename OppCurve>
+bool SkTSpan<TCurve, OppCurve>::removeAllBounded() {
+ bool deleteSpan = false;
+ SkTSpanBounded<OppCurve, TCurve>* bounded = fBounded;
+ while (bounded) {
+ SkTSpan<OppCurve, TCurve>* opp = bounded->fBounded;
+ deleteSpan |= opp->removeBounded(this);
+ bounded = bounded->fNext;
+ }
+ return deleteSpan;
+}
+
+template<typename TCurve, typename OppCurve>
+bool SkTSpan<TCurve, OppCurve>::removeBounded(const SkTSpan<OppCurve, TCurve>* opp) {
+ if (fHasPerp) {
+ bool foundStart = false;
+ bool foundEnd = false;
+ SkTSpanBounded<OppCurve, TCurve>* bounded = fBounded;
+ while (bounded) {
+ SkTSpan<OppCurve, TCurve>* test = bounded->fBounded;
+ if (opp != test) {
+ foundStart |= between(test->fStartT, fCoinStart.perpT(), test->fEndT);
+ foundEnd |= between(test->fStartT, fCoinEnd.perpT(), test->fEndT);
+ }
+ bounded = bounded->fNext;
+ }
+ if (!foundStart || !foundEnd) {
+ fHasPerp = false;
+ fCoinStart.init();
+ fCoinEnd.init();
+ }
+ }
+ SkTSpanBounded<OppCurve, TCurve>* bounded = fBounded;
+ SkTSpanBounded<OppCurve, TCurve>* prev = nullptr;
+ while (bounded) {
+ SkTSpanBounded<OppCurve, TCurve>* boundedNext = bounded->fNext;
+ if (opp == bounded->fBounded) {
+ if (prev) {
+ prev->fNext = boundedNext;
+ return false;
+ } else {
+ fBounded = boundedNext;
+ return fBounded == nullptr;
+ }
+ }
+ prev = bounded;
+ bounded = boundedNext;
+ }
+ SkOPASSERT(0);
+ return false;
+}
+
+template<typename TCurve, typename OppCurve>
+bool SkTSpan<TCurve, OppCurve>::splitAt(SkTSpan* work, double t, SkChunkAlloc* heap) {
+ fStartT = t;
+ fEndT = work->fEndT;
+ if (fStartT == fEndT) {
+ fCollapsed = true;
+ return false;
+ }
+ work->fEndT = t;
+ if (work->fStartT == work->fEndT) {
+ work->fCollapsed = true;
+ return false;
+ }
+ fPrev = work;
+ fNext = work->fNext;
+ fIsLinear = work->fIsLinear;
+ fIsLine = work->fIsLine;
+
+ work->fNext = this;
+ if (fNext) {
+ fNext->fPrev = this;
+ }
+ this->validate();
+ SkTSpanBounded<OppCurve, TCurve>* bounded = work->fBounded;
+ fBounded = nullptr;
+ while (bounded) {
+ this->addBounded(bounded->fBounded, heap);
+ bounded = bounded->fNext;
+ }
+ bounded = fBounded;
+ while (bounded) {
+ bounded->fBounded->addBounded(this, heap);
+ bounded = bounded->fNext;
+ }
+ return true;
+}
+
+template<typename TCurve, typename OppCurve>
+void SkTSpan<TCurve, OppCurve>::validate() const {
+#if DEBUG_VALIDATE
+ SkASSERT(this != fPrev);
+ SkASSERT(this != fNext);
+ SkASSERT(fNext == nullptr || fNext != fPrev);
+ SkASSERT(fNext == nullptr || this == fNext->fPrev);
+ SkASSERT(fPrev == nullptr || this == fPrev->fNext);
+ this->validateBounded();
+#endif
+#if DEBUG_T_SECT
+ SkASSERT(fBounds.width() || fBounds.height() || fCollapsed);
+ SkASSERT(fBoundsMax == SkTMax(fBounds.width(), fBounds.height()) || fCollapsed == 0xFF);
+ SkASSERT(0 <= fStartT);
+ SkASSERT(fEndT <= 1);
+ SkASSERT(fStartT <= fEndT);
+ SkASSERT(fBounded || fCollapsed == 0xFF);
+ if (fHasPerp) {
+ if (fCoinStart.isMatch()) {
+ validatePerpT(fCoinStart.perpT());
+ validatePerpPt(fCoinStart.perpT(), fCoinStart.perpPt());
+ }
+ if (fCoinEnd.isMatch()) {
+ validatePerpT(fCoinEnd.perpT());
+ validatePerpPt(fCoinEnd.perpT(), fCoinEnd.perpPt());
+ }
+ }
+#endif
+}
+
+template<typename TCurve, typename OppCurve>
+void SkTSpan<TCurve, OppCurve>::validateBounded() const {
+#if DEBUG_VALIDATE
+ const SkTSpanBounded<OppCurve, TCurve>* testBounded = fBounded;
+ while (testBounded) {
+ SkDEBUGCODE(const SkTSpan<OppCurve, TCurve>* overlap = testBounded->fBounded);
+ SkASSERT(!overlap->fDeleted);
+#if DEBUG_T_SECT
+ SkASSERT(((this->debugID() ^ overlap->debugID()) & 1) == 1);
+ SkASSERT(overlap->findOppSpan(this));
+#endif
+ testBounded = testBounded->fNext;
+ }
+#endif
+}
+
+template<typename TCurve, typename OppCurve>
+void SkTSpan<TCurve, OppCurve>::validatePerpT(double oppT) const {
+ const SkTSpanBounded<OppCurve, TCurve>* testBounded = fBounded;
+ while (testBounded) {
+ const SkTSpan<OppCurve, TCurve>* overlap = testBounded->fBounded;
+ if (precisely_between(overlap->fStartT, oppT, overlap->fEndT)) {
+ return;
+ }
+ testBounded = testBounded->fNext;
+ }
+ SkASSERT(0);
+}
+
+template<typename TCurve, typename OppCurve>
+void SkTSpan<TCurve, OppCurve>::validatePerpPt(double t, const SkDPoint& pt) const {
+ SkASSERT(fDebugSect->fOppSect->fCurve.ptAtT(t) == pt);
+}
+
+
+template<typename TCurve, typename OppCurve>
+SkTSect<TCurve, OppCurve>::SkTSect(const TCurve& c
+ SkDEBUGPARAMS(SkOpGlobalState* debugGlobalState)
+ PATH_OPS_DEBUG_T_SECT_PARAMS(int id))
+ : fCurve(c)
+ , fHeap(sizeof(SkTSpan<TCurve, OppCurve>) * 4)
+ , fCoincident(nullptr)
+ , fDeleted(nullptr)
+ , fActiveCount(0)
+ SkDEBUGPARAMS(fDebugGlobalState(debugGlobalState))
+ PATH_OPS_DEBUG_T_SECT_PARAMS(fID(id))
+ PATH_OPS_DEBUG_T_SECT_PARAMS(fDebugCount(0))
+ PATH_OPS_DEBUG_T_SECT_PARAMS(fDebugAllocatedCount(0))
+{
+ fHead = addOne();
+ SkDEBUGCODE(fHead->debugSetGlobalState(debugGlobalState));
+ fHead->init(c);
+}
+
+template<typename TCurve, typename OppCurve>
+SkTSpan<TCurve, OppCurve>* SkTSect<TCurve, OppCurve>::addOne() {
+ SkTSpan<TCurve, OppCurve>* result;
+ if (fDeleted) {
+ result = fDeleted;
+ fDeleted = result->fNext;
+ } else {
+ result = new (fHeap.allocThrow(sizeof(SkTSpan<TCurve, OppCurve>)))(
+ SkTSpan<TCurve, OppCurve>);
+#if DEBUG_T_SECT
+ ++fDebugAllocatedCount;
+#endif
+ }
+ result->reset();
+ result->fHasPerp = false;
+ result->fDeleted = false;
+ ++fActiveCount;
+ PATH_OPS_DEBUG_T_SECT_CODE(result->fID = fDebugCount++ * 2 + fID);
+ SkDEBUGCODE(result->fDebugSect = this);
+#ifdef SK_DEBUG
+ result->fPart.debugInit();
+ result->fCoinStart.debugInit();
+ result->fCoinEnd.debugInit();
+ result->fPrev = result->fNext = nullptr;
+ result->fBounds.debugInit();
+ result->fStartT = result->fEndT = result->fBoundsMax = SK_ScalarNaN;
+ result->fCollapsed = result->fIsLinear = result->fIsLine = 0xFF;
+#endif
+ return result;
+}
+
+template<typename TCurve, typename OppCurve>
+bool SkTSect<TCurve, OppCurve>::binarySearchCoin(SkTSect<OppCurve, TCurve>* sect2, double tStart,
+ double tStep, double* resultT, double* oppT) {
+ SkTSpan<TCurve, OppCurve> work;
+ double result = work.fStartT = work.fEndT = tStart;
+ SkDEBUGCODE(work.fDebugSect = this);
+ SkDPoint last = fCurve.ptAtT(tStart);
+ SkDPoint oppPt;
+ bool flip = false;
+ bool contained = false;
+ SkDEBUGCODE(bool down = tStep < 0);
+ const OppCurve& opp = sect2->fCurve;
+ do {
+ tStep *= 0.5;
+ work.fStartT += tStep;
+ if (flip) {
+ tStep = -tStep;
+ flip = false;
+ }
+ work.initBounds(fCurve);
+ if (work.fCollapsed) {
+ return false;
+ }
+ if (last.approximatelyEqual(work.fPart[0])) {
+ break;
+ }
+ last = work.fPart[0];
+ work.fCoinStart.setPerp(fCurve, work.fStartT, last, opp);
+ if (work.fCoinStart.isMatch()) {
+#if DEBUG_T_SECT
+ work.validatePerpPt(work.fCoinStart.perpT(), work.fCoinStart.perpPt());
+#endif
+ double oppTTest = work.fCoinStart.perpT();
+ if (sect2->fHead->contains(oppTTest)) {
+ *oppT = oppTTest;
+ oppPt = work.fCoinStart.perpPt();
+ contained = true;
+ SkASSERT(down ? result > work.fStartT : result < work.fStartT);
+ result = work.fStartT;
+ continue;
+ }
+ }
+ tStep = -tStep;
+ flip = true;
+ } while (true);
+ if (!contained) {
+ return false;
+ }
+ if (last.approximatelyEqual(fCurve[0])) {
+ result = 0;
+ } else if (last.approximatelyEqual(fCurve[TCurve::kPointLast])) {
+ result = 1;
+ }
+ if (oppPt.approximatelyEqual(opp[0])) {
+ *oppT = 0;
+ } else if (oppPt.approximatelyEqual(opp[OppCurve::kPointLast])) {
+ *oppT = 1;
+ }
+ *resultT = result;
+ return true;
+}
+
+// OPTIMIZE ? keep a sorted list of sizes in the form of a doubly-linked list in quad span
+// so that each quad sect has a pointer to the largest, and can update it as spans
+// are split
+template<typename TCurve, typename OppCurve>
+SkTSpan<TCurve, OppCurve>* SkTSect<TCurve, OppCurve>::boundsMax() const {
+ SkTSpan<TCurve, OppCurve>* test = fHead;
+ SkTSpan<TCurve, OppCurve>* largest = fHead;
+ bool lCollapsed = largest->fCollapsed;
+ while ((test = test->fNext)) {
+ bool tCollapsed = test->fCollapsed;
+ if ((lCollapsed && !tCollapsed) || (lCollapsed == tCollapsed &&
+ largest->fBoundsMax < test->fBoundsMax)) {
+ largest = test;
+ lCollapsed = test->fCollapsed;
+ }
+ }
+ return largest;
+}
+
+template<typename TCurve, typename OppCurve>
+bool SkTSect<TCurve, OppCurve>::coincidentCheck(SkTSect<OppCurve, TCurve>* sect2) {
+ SkTSpan<TCurve, OppCurve>* first = fHead;
+ SkTSpan<TCurve, OppCurve>* last, * next;
+ do {
+ int consecutive = this->countConsecutiveSpans(first, &last);
+ next = last->fNext;
+ if (consecutive < COINCIDENT_SPAN_COUNT) {
+ continue;
+ }
+ this->validate();
+ sect2->validate();
+ this->computePerpendiculars(sect2, first, last);
+ this->validate();
+ sect2->validate();
+ // check to see if a range of points are on the curve
+ SkTSpan<TCurve, OppCurve>* coinStart = first;
+ do {
+ bool success = this->extractCoincident(sect2, coinStart, last, &coinStart);
+ if (!success) {
+ return false;
+ }
+ } while (coinStart && !last->fDeleted);
+ if (!fHead || !sect2->fHead) {
+ break;
+ }
+ if (!next || next->fDeleted) {
+ break;
+ }
+ } while ((first = next));
+ return true;
+}
+
+template<typename TCurve, typename OppCurve>
+void SkTSect<TCurve, OppCurve>::coincidentForce(SkTSect<OppCurve, TCurve>* sect2,
+ double start1s, double start1e) {
+ SkTSpan<TCurve, OppCurve>* first = fHead;
+ SkTSpan<TCurve, OppCurve>* last = this->tail();
+ SkTSpan<OppCurve, TCurve>* oppFirst = sect2->fHead;
+ SkTSpan<OppCurve, TCurve>* oppLast = sect2->tail();
+ bool deleteEmptySpans = this->updateBounded(first, last, oppFirst);
+ deleteEmptySpans |= sect2->updateBounded(oppFirst, oppLast, first);
+ this->removeSpanRange(first, last);
+ sect2->removeSpanRange(oppFirst, oppLast);
+ first->fStartT = start1s;
+ first->fEndT = start1e;
+ first->resetBounds(fCurve);
+ first->fCoinStart.setPerp(fCurve, start1s, fCurve[0], sect2->fCurve);
+ first->fCoinEnd.setPerp(fCurve, start1e, fCurve[TCurve::kPointLast], sect2->fCurve);
+ bool oppMatched = first->fCoinStart.perpT() < first->fCoinEnd.perpT();
+ double oppStartT = first->fCoinStart.perpT() == -1 ? 0 : SkTMax(0., first->fCoinStart.perpT());
+ double oppEndT = first->fCoinEnd.perpT() == -1 ? 1 : SkTMin(1., first->fCoinEnd.perpT());
+ if (!oppMatched) {
+ SkTSwap(oppStartT, oppEndT);
+ }
+ oppFirst->fStartT = oppStartT;
+ oppFirst->fEndT = oppEndT;
+ oppFirst->resetBounds(sect2->fCurve);
+ this->removeCoincident(first, false);
+ sect2->removeCoincident(oppFirst, true);
+ if (deleteEmptySpans) {
+ this->deleteEmptySpans();
+ sect2->deleteEmptySpans();
+ }
+}
+
+template<typename TCurve, typename OppCurve>
+bool SkTSect<TCurve, OppCurve>::coincidentHasT(double t) {
+ SkTSpan<TCurve, OppCurve>* test = fCoincident;
+ while (test) {
+ if (between(test->fStartT, t, test->fEndT)) {
+ return true;
+ }
+ test = test->fNext;
+ }
+ return false;
+}
+
+template<typename TCurve, typename OppCurve>
+int SkTSect<TCurve, OppCurve>::collapsed() const {
+ int result = 0;
+ const SkTSpan<TCurve, OppCurve>* test = fHead;
+ while (test) {
+ if (test->fCollapsed) {
+ ++result;
+ }
+ test = test->next();
+ }
+ return result;
+}
+
+template<typename TCurve, typename OppCurve>
+void SkTSect<TCurve, OppCurve>::computePerpendiculars(SkTSect<OppCurve, TCurve>* sect2,
+ SkTSpan<TCurve, OppCurve>* first, SkTSpan<TCurve, OppCurve>* last) {
+ const OppCurve& opp = sect2->fCurve;
+ SkTSpan<TCurve, OppCurve>* work = first;
+ SkTSpan<TCurve, OppCurve>* prior = nullptr;
+ do {
+ if (!work->fHasPerp && !work->fCollapsed) {
+ if (prior) {
+ work->fCoinStart = prior->fCoinEnd;
+ } else {
+ work->fCoinStart.setPerp(fCurve, work->fStartT, work->fPart[0], opp);
+ }
+ if (work->fCoinStart.isMatch()) {
+ double perpT = work->fCoinStart.perpT();
+ if (sect2->coincidentHasT(perpT)) {
+ work->fCoinStart.init();
+ } else {
+ sect2->addForPerp(work, perpT);
+ }
+ }
+ work->fCoinEnd.setPerp(fCurve, work->fEndT, work->fPart[TCurve::kPointLast], opp);
+ if (work->fCoinEnd.isMatch()) {
+ double perpT = work->fCoinEnd.perpT();
+ if (sect2->coincidentHasT(perpT)) {
+ work->fCoinEnd.init();
+ } else {
+ sect2->addForPerp(work, perpT);
+ }
+ }
+ work->fHasPerp = true;
+ }
+ if (work == last) {
+ break;
+ }
+ prior = work;
+ work = work->fNext;
+ SkASSERT(work);
+ } while (true);
+}
+
+template<typename TCurve, typename OppCurve>
+int SkTSect<TCurve, OppCurve>::countConsecutiveSpans(SkTSpan<TCurve, OppCurve>* first,
+ SkTSpan<TCurve, OppCurve>** lastPtr) const {
+ int consecutive = 1;
+ SkTSpan<TCurve, OppCurve>* last = first;
+ do {
+ SkTSpan<TCurve, OppCurve>* next = last->fNext;
+ if (!next) {
+ break;
+ }
+ if (next->fStartT > last->fEndT) {
+ break;
+ }
+ ++consecutive;
+ last = next;
+ } while (true);
+ *lastPtr = last;
+ return consecutive;
+}
+
+template<typename TCurve, typename OppCurve>
+bool SkTSect<TCurve, OppCurve>::debugHasBounded(const SkTSpan<OppCurve, TCurve>* span) const {
+ const SkTSpan<TCurve, OppCurve>* test = fHead;
+ if (!test) {
+ return false;
+ }
+ do {
+ if (test->findOppSpan(span)) {
+ return true;
+ }
+ } while ((test = test->next()));
+ return false;
+}
+
+template<typename TCurve, typename OppCurve>
+bool SkTSect<TCurve, OppCurve>::deleteEmptySpans() {
+ SkTSpan<TCurve, OppCurve>* test;
+ SkTSpan<TCurve, OppCurve>* next = fHead;
+ while ((test = next)) {
+ next = test->fNext;
+ if (!test->fBounded) {
+ if (!this->removeSpan(test)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+template<typename TCurve, typename OppCurve>
+bool SkTSect<TCurve, OppCurve>::extractCoincident(
+ SkTSect<OppCurve, TCurve>* sect2,
+ SkTSpan<TCurve, OppCurve>* first, SkTSpan<TCurve, OppCurve>* last,
+ SkTSpan<TCurve, OppCurve>** result) {
+ first = findCoincidentRun(first, &last);
+ if (!first || !last) {
+ *result = nullptr;
+ return true;
+ }
+ // march outwards to find limit of coincidence from here to previous and next spans
+ double startT = first->fStartT;
+ double oppStartT SK_INIT_TO_AVOID_WARNING;
+ double oppEndT SK_INIT_TO_AVOID_WARNING;
+ SkTSpan<TCurve, OppCurve>* prev = first->fPrev;
+ SkASSERT(first->fCoinStart.isMatch());
+ SkTSpan<OppCurve, TCurve>* oppFirst = first->findOppT(first->fCoinStart.perpT());
+ SkOPASSERT(last->fCoinEnd.isMatch());
+ bool oppMatched = first->fCoinStart.perpT() < first->fCoinEnd.perpT();
+ double coinStart;
+ SkDEBUGCODE(double coinEnd);
+ SkTSpan<OppCurve, TCurve>* cutFirst;
+ if (prev && prev->fEndT == startT
+ && this->binarySearchCoin(sect2, startT, prev->fStartT - startT, &coinStart,
+ &oppStartT)
+ && prev->fStartT < coinStart && coinStart < startT
+ && (cutFirst = prev->oppT(oppStartT))) {
+ oppFirst = cutFirst;
+ first = this->addSplitAt(prev, coinStart);
+ first->markCoincident();
+ prev->fCoinEnd.markCoincident();
+ if (oppFirst->fStartT < oppStartT && oppStartT < oppFirst->fEndT) {
+ SkTSpan<OppCurve, TCurve>* oppHalf = sect2->addSplitAt(oppFirst, oppStartT);
+ if (oppMatched) {
+ oppFirst->fCoinEnd.markCoincident();
+ oppHalf->markCoincident();
+ oppFirst = oppHalf;
+ } else {
+ oppFirst->markCoincident();
+ oppHalf->fCoinStart.markCoincident();
+ }
+ }
+ } else {
+ SkDEBUGCODE(coinStart = first->fStartT);
+ SkDEBUGCODE(oppStartT = oppMatched ? oppFirst->fStartT : oppFirst->fEndT);
+ }
+ // FIXME: incomplete : if we're not at the end, find end of coin
+ SkTSpan<OppCurve, TCurve>* oppLast;
+ SkOPASSERT(last->fCoinEnd.isMatch());
+ oppLast = last->findOppT(last->fCoinEnd.perpT());
+ SkDEBUGCODE(coinEnd = last->fEndT);
+#ifdef SK_DEBUG
+ if (!this->globalState() || !this->globalState()->debugSkipAssert()) {
+ oppEndT = oppMatched ? oppLast->fEndT : oppLast->fStartT;
+ }
+#endif
+ if (!oppMatched) {
+ SkTSwap(oppFirst, oppLast);
+ SkTSwap(oppStartT, oppEndT);
+ }
+ SkOPASSERT(oppStartT < oppEndT);
+ SkASSERT(coinStart == first->fStartT);
+ SkASSERT(coinEnd == last->fEndT);
+ SkOPASSERT(oppStartT == oppFirst->fStartT);
+ SkOPASSERT(oppEndT == oppLast->fEndT);
+ if (!oppFirst) {
+ *result = nullptr;
+ return true;
+ }
+ if (!oppLast) {
+ *result = nullptr;
+ return true;
+ }
+ // reduce coincident runs to single entries
+ this->validate();
+ sect2->validate();
+ bool deleteEmptySpans = this->updateBounded(first, last, oppFirst);
+ deleteEmptySpans |= sect2->updateBounded(oppFirst, oppLast, first);
+ this->removeSpanRange(first, last);
+ sect2->removeSpanRange(oppFirst, oppLast);
+ first->fEndT = last->fEndT;
+ first->resetBounds(this->fCurve);
+ first->fCoinStart.setPerp(fCurve, first->fStartT, first->fPart[0], sect2->fCurve);
+ first->fCoinEnd.setPerp(fCurve, first->fEndT, first->fPart[TCurve::kPointLast], sect2->fCurve);
+ oppStartT = first->fCoinStart.perpT();
+ oppEndT = first->fCoinEnd.perpT();
+ if (between(0, oppStartT, 1) && between(0, oppEndT, 1)) {
+ if (!oppMatched) {
+ SkTSwap(oppStartT, oppEndT);
+ }
+ oppFirst->fStartT = oppStartT;
+ oppFirst->fEndT = oppEndT;
+ oppFirst->resetBounds(sect2->fCurve);
+ }
+ this->validateBounded();
+ sect2->validateBounded();
+ last = first->fNext;
+ this->removeCoincident(first, false);
+ sect2->removeCoincident(oppFirst, true);
+ if (deleteEmptySpans) {
+ if (!this->deleteEmptySpans() || !sect2->deleteEmptySpans()) {
+ *result = nullptr;
+ return false;
+ }
+ }
+ this->validate();
+ sect2->validate();
+ *result = last && !last->fDeleted && fHead && sect2->fHead ? last : nullptr;
+ return true;
+}
+
+template<typename TCurve, typename OppCurve>
+SkTSpan<TCurve, OppCurve>* SkTSect<TCurve, OppCurve>::findCoincidentRun(
+ SkTSpan<TCurve, OppCurve>* first, SkTSpan<TCurve, OppCurve>** lastPtr) {
+ SkTSpan<TCurve, OppCurve>* work = first;
+ SkTSpan<TCurve, OppCurve>* lastCandidate = nullptr;
+ first = nullptr;
+ // find the first fully coincident span
+ do {
+ if (work->fCoinStart.isMatch()) {
+#if DEBUG_T_SECT
+ work->validatePerpT(work->fCoinStart.perpT());
+ work->validatePerpPt(work->fCoinStart.perpT(), work->fCoinStart.perpPt());
+#endif
+ SkASSERT(work->hasOppT(work->fCoinStart.perpT()));
+ if (!work->fCoinEnd.isMatch()) {
+ break;
+ }
+ lastCandidate = work;
+ if (!first) {
+ first = work;
+ }
+ } else if (first && work->fCollapsed) {
+ *lastPtr = lastCandidate;
+ return first;
+ } else {
+ lastCandidate = nullptr;
+ SkOPASSERT(!first);
+ }
+ if (work == *lastPtr) {
+ return first;
+ }
+ work = work->fNext;
+ if (!work) {
+ return nullptr;
+ }
+ } while (true);
+ if (lastCandidate) {
+ *lastPtr = lastCandidate;
+ }
+ return first;
+}
+
+template<typename TCurve, typename OppCurve>
+int SkTSect<TCurve, OppCurve>::intersects(SkTSpan<TCurve, OppCurve>* span,
+ SkTSect<OppCurve, TCurve>* opp,
+ SkTSpan<OppCurve, TCurve>* oppSpan, int* oppResult) {
+ bool spanStart, oppStart;
+ int hullResult = span->hullsIntersect(oppSpan, &spanStart, &oppStart);
+ if (hullResult >= 0) {
+ if (hullResult == 2) { // hulls have one point in common
+ if (!span->fBounded || !span->fBounded->fNext) {
+ SkASSERT(!span->fBounded || span->fBounded->fBounded == oppSpan);
+ if (spanStart) {
+ span->fEndT = span->fStartT;
+ } else {
+ span->fStartT = span->fEndT;
+ }
+ } else {
+ hullResult = 1;
+ }
+ if (!oppSpan->fBounded || !oppSpan->fBounded->fNext) {
+ SkASSERT(!oppSpan->fBounded || oppSpan->fBounded->fBounded == span);
+ if (oppStart) {
+ oppSpan->fEndT = oppSpan->fStartT;
+ } else {
+ oppSpan->fStartT = oppSpan->fEndT;
+ }
+ *oppResult = 2;
+ } else {
+ *oppResult = 1;
+ }
+ } else {
+ *oppResult = 1;
+ }
+ return hullResult;
+ }
+ if (span->fIsLine && oppSpan->fIsLine) {
+ SkIntersections i;
+ int sects = this->linesIntersect(span, opp, oppSpan, &i);
+ if (sects == 2) {
+ return *oppResult = 1;
+ }
+ if (!sects) {
+ return -1;
+ }
+ span->fStartT = span->fEndT = i[0][0];
+ oppSpan->fStartT = oppSpan->fEndT = i[1][0];
+ return *oppResult = 2;
+ }
+ if (span->fIsLinear || oppSpan->fIsLinear) {
+ return *oppResult = (int) span->linearsIntersect(oppSpan);
+ }
+ return *oppResult = 1;
+}
+
+template<typename TCurve>
+static bool is_parallel(const SkDLine& thisLine, const TCurve& opp) {
+ if (!opp.IsConic()) {
+ return false; // FIXME : breaks a lot of stuff now
+ }
+ int finds = 0;
+ SkDLine thisPerp;
+ thisPerp.fPts[0].fX = thisLine.fPts[1].fX + (thisLine.fPts[1].fY - thisLine.fPts[0].fY);
+ thisPerp.fPts[0].fY = thisLine.fPts[1].fY + (thisLine.fPts[0].fX - thisLine.fPts[1].fX);
+ thisPerp.fPts[1] = thisLine.fPts[1];
+ SkIntersections perpRayI;
+ perpRayI.intersectRay(opp, thisPerp);
+ for (int pIndex = 0; pIndex < perpRayI.used(); ++pIndex) {
+ finds += perpRayI.pt(pIndex).approximatelyEqual(thisPerp.fPts[1]);
+ }
+ thisPerp.fPts[1].fX = thisLine.fPts[0].fX + (thisLine.fPts[1].fY - thisLine.fPts[0].fY);
+ thisPerp.fPts[1].fY = thisLine.fPts[0].fY + (thisLine.fPts[0].fX - thisLine.fPts[1].fX);
+ thisPerp.fPts[0] = thisLine.fPts[0];
+ perpRayI.intersectRay(opp, thisPerp);
+ for (int pIndex = 0; pIndex < perpRayI.used(); ++pIndex) {
+ finds += perpRayI.pt(pIndex).approximatelyEqual(thisPerp.fPts[0]);
+ }
+ return finds >= 2;
+}
+
+// while the intersection points are sufficiently far apart:
+// construct the tangent lines from the intersections
+// find the point where the tangent line intersects the opposite curve
+template<typename TCurve, typename OppCurve>
+int SkTSect<TCurve, OppCurve>::linesIntersect(SkTSpan<TCurve, OppCurve>* span,
+ SkTSect<OppCurve, TCurve>* opp,
+ SkTSpan<OppCurve, TCurve>* oppSpan, SkIntersections* i) {
+ SkIntersections thisRayI, oppRayI;
+ SkDLine thisLine = {{ span->fPart[0], span->fPart[TCurve::kPointLast] }};
+ SkDLine oppLine = {{ oppSpan->fPart[0], oppSpan->fPart[OppCurve::kPointLast] }};
+ int loopCount = 0;
+ double bestDistSq = DBL_MAX;
+ if (!thisRayI.intersectRay(opp->fCurve, thisLine)) {
+ return 0;
+ }
+ if (!oppRayI.intersectRay(this->fCurve, oppLine)) {
+ return 0;
+ }
+ // if the ends of each line intersect the opposite curve, the lines are coincident
+ if (thisRayI.used() > 1) {
+ int ptMatches = 0;
+ for (int tIndex = 0; tIndex < thisRayI.used(); ++tIndex) {
+ for (int lIndex = 0; lIndex < (int) SK_ARRAY_COUNT(thisLine.fPts); ++lIndex) {
+ ptMatches += thisRayI.pt(tIndex).approximatelyEqual(thisLine.fPts[lIndex]);
+ }
+ }
+ if (ptMatches == 2 || is_parallel(thisLine, opp->fCurve)) {
+ return 2;
+ }
+ }
+ if (oppRayI.used() > 1) {
+ int ptMatches = 0;
+ for (int oIndex = 0; oIndex < oppRayI.used(); ++oIndex) {
+ for (int lIndex = 0; lIndex < (int) SK_ARRAY_COUNT(thisLine.fPts); ++lIndex) {
+ ptMatches += oppRayI.pt(oIndex).approximatelyEqual(oppLine.fPts[lIndex]);
+ }
+ }
+ if (ptMatches == 2|| is_parallel(oppLine, this->fCurve)) {
+ return 2;
+ }
+ }
+ do {
+ // pick the closest pair of points
+ double closest = DBL_MAX;
+ int closeIndex SK_INIT_TO_AVOID_WARNING;
+ int oppCloseIndex SK_INIT_TO_AVOID_WARNING;
+ for (int index = 0; index < oppRayI.used(); ++index) {
+ if (!roughly_between(span->fStartT, oppRayI[0][index], span->fEndT)) {
+ continue;
+ }
+ for (int oIndex = 0; oIndex < thisRayI.used(); ++oIndex) {
+ if (!roughly_between(oppSpan->fStartT, thisRayI[0][oIndex], oppSpan->fEndT)) {
+ continue;
+ }
+ double distSq = thisRayI.pt(index).distanceSquared(oppRayI.pt(oIndex));
+ if (closest > distSq) {
+ closest = distSq;
+ closeIndex = index;
+ oppCloseIndex = oIndex;
+ }
+ }
+ }
+ if (closest == DBL_MAX) {
+ break;
+ }
+ const SkDPoint& oppIPt = thisRayI.pt(oppCloseIndex);
+ const SkDPoint& iPt = oppRayI.pt(closeIndex);
+ if (between(span->fStartT, oppRayI[0][closeIndex], span->fEndT)
+ && between(oppSpan->fStartT, thisRayI[0][oppCloseIndex], oppSpan->fEndT)
+ && oppIPt.approximatelyEqual(iPt)) {
+ i->merge(oppRayI, closeIndex, thisRayI, oppCloseIndex);
+ return i->used();
+ }
+ double distSq = oppIPt.distanceSquared(iPt);
+ if (bestDistSq < distSq || ++loopCount > 5) {
+ return 0;
+ }
+ bestDistSq = distSq;
+ double oppStart = oppRayI[0][closeIndex];
+ thisLine[0] = fCurve.ptAtT(oppStart);
+ thisLine[1] = thisLine[0] + fCurve.dxdyAtT(oppStart);
+ if (!thisRayI.intersectRay(opp->fCurve, thisLine)) {
+ break;
+ }
+ double start = thisRayI[0][oppCloseIndex];
+ oppLine[0] = opp->fCurve.ptAtT(start);
+ oppLine[1] = oppLine[0] + opp->fCurve.dxdyAtT(start);
+ if (!oppRayI.intersectRay(this->fCurve, oppLine)) {
+ break;
+ }
+ } while (true);
+ // convergence may fail if the curves are nearly coincident
+ SkTCoincident<OppCurve, TCurve> oCoinS, oCoinE;
+ oCoinS.setPerp(opp->fCurve, oppSpan->fStartT, oppSpan->fPart[0], fCurve);
+ oCoinE.setPerp(opp->fCurve, oppSpan->fEndT, oppSpan->fPart[OppCurve::kPointLast], fCurve);
+ double tStart = oCoinS.perpT();
+ double tEnd = oCoinE.perpT();
+ bool swap = tStart > tEnd;
+ if (swap) {
+ SkTSwap(tStart, tEnd);
+ }
+ tStart = SkTMax(tStart, span->fStartT);
+ tEnd = SkTMin(tEnd, span->fEndT);
+ if (tStart > tEnd) {
+ return 0;
+ }
+ SkDVector perpS, perpE;
+ if (tStart == span->fStartT) {
+ SkTCoincident<TCurve, OppCurve> coinS;
+ coinS.setPerp(fCurve, span->fStartT, span->fPart[0], opp->fCurve);
+ perpS = span->fPart[0] - coinS.perpPt();
+ } else if (swap) {
+ perpS = oCoinE.perpPt() - oppSpan->fPart[OppCurve::kPointLast];
+ } else {
+ perpS = oCoinS.perpPt() - oppSpan->fPart[0];
+ }
+ if (tEnd == span->fEndT) {
+ SkTCoincident<TCurve, OppCurve> coinE;
+ coinE.setPerp(fCurve, span->fEndT, span->fPart[TCurve::kPointLast], opp->fCurve);
+ perpE = span->fPart[TCurve::kPointLast] - coinE.perpPt();
+ } else if (swap) {
+ perpE = oCoinS.perpPt() - oppSpan->fPart[0];
+ } else {
+ perpE = oCoinE.perpPt() - oppSpan->fPart[OppCurve::kPointLast];
+ }
+ if (perpS.dot(perpE) >= 0) {
+ return 0;
+ }
+ SkTCoincident<TCurve, OppCurve> coinW;
+ double workT = tStart;
+ double tStep = tEnd - tStart;
+ SkDPoint workPt;
+ do {
+ tStep *= 0.5;
+ if (precisely_zero(tStep)) {
+ return 0;
+ }
+ workT += tStep;
+ workPt = fCurve.ptAtT(workT);
+ coinW.setPerp(fCurve, workT, workPt, opp->fCurve);
+ double perpT = coinW.perpT();
+ if (coinW.isMatch() ? !between(oppSpan->fStartT, perpT, oppSpan->fEndT) : perpT < 0) {
+ continue;
+ }
+ SkDVector perpW = workPt - coinW.perpPt();
+ if ((perpS.dot(perpW) >= 0) == (tStep < 0)) {
+ tStep = -tStep;
+ }
+ if (workPt.approximatelyEqual(coinW.perpPt())) {
+ break;
+ }
+ } while (true);
+ double oppTTest = coinW.perpT();
+ if (!opp->fHead->contains(oppTTest)) {
+ return 0;
+ }
+ i->setMax(1);
+ i->insert(workT, oppTTest, workPt);
+ return 1;
+}
+
+
+template<typename TCurve, typename OppCurve>
+bool SkTSect<TCurve, OppCurve>::markSpanGone(SkTSpan<TCurve, OppCurve>* span) {
+ if (--fActiveCount < 0) {
+ return false;
+ }
+ span->fNext = fDeleted;
+ fDeleted = span;
+ SkOPASSERT(!span->fDeleted);
+ span->fDeleted = true;
+ return true;
+}
+
+template<typename TCurve, typename OppCurve>
+bool SkTSect<TCurve, OppCurve>::matchedDirection(double t, const SkTSect<OppCurve, TCurve>* sect2,
+ double t2) const {
+ SkDVector dxdy = this->fCurve.dxdyAtT(t);
+ SkDVector dxdy2 = sect2->fCurve.dxdyAtT(t2);
+ return dxdy.dot(dxdy2) >= 0;
+}
+
+template<typename TCurve, typename OppCurve>
+void SkTSect<TCurve, OppCurve>::matchedDirCheck(double t, const SkTSect<OppCurve, TCurve>* sect2,
+ double t2, bool* calcMatched, bool* oppMatched) const {
+ if (*calcMatched) {
+ SkASSERT(*oppMatched == this->matchedDirection(t, sect2, t2));
+ } else {
+ *oppMatched = this->matchedDirection(t, sect2, t2);
+ *calcMatched = true;
+ }
+}
+
+template<typename TCurve, typename OppCurve>
+void SkTSect<TCurve, OppCurve>::mergeCoincidence(SkTSect<OppCurve, TCurve>* sect2) {
+ double smallLimit = 0;
+ do {
+ // find the smallest unprocessed span
+ SkTSpan<TCurve, OppCurve>* smaller = nullptr;
+ SkTSpan<TCurve, OppCurve>* test = fCoincident;
+ do {
+ if (test->fStartT < smallLimit) {
+ continue;
+ }
+ if (smaller && smaller->fEndT < test->fStartT) {
+ continue;
+ }
+ smaller = test;
+ } while ((test = test->fNext));
+ if (!smaller) {
+ return;
+ }
+ smallLimit = smaller->fEndT;
+ // find next larger span
+ SkTSpan<TCurve, OppCurve>* prior = nullptr;
+ SkTSpan<TCurve, OppCurve>* larger = nullptr;
+ SkTSpan<TCurve, OppCurve>* largerPrior = nullptr;
+ test = fCoincident;
+ do {
+ if (test->fStartT < smaller->fEndT) {
+ continue;
+ }
+ SkASSERT(test->fStartT != smaller->fEndT);
+ if (larger && larger->fStartT < test->fStartT) {
+ continue;
+ }
+ largerPrior = prior;
+ larger = test;
+ } while ((prior = test), (test = test->fNext));
+ if (!larger) {
+ continue;
+ }
+ // check middle t value to see if it is coincident as well
+ double midT = (smaller->fEndT + larger->fStartT) / 2;
+ SkDPoint midPt = fCurve.ptAtT(midT);
+ SkTCoincident<TCurve, OppCurve> coin;
+ coin.setPerp(fCurve, midT, midPt, sect2->fCurve);
+ if (coin.isMatch()) {
+ smaller->fEndT = larger->fEndT;
+ smaller->fCoinEnd = larger->fCoinEnd;
+ if (largerPrior) {
+ largerPrior->fNext = larger->fNext;
+ largerPrior->validate();
+ } else {
+ fCoincident = larger->fNext;
+ }
+ }
+ } while (true);
+}
+
+template<typename TCurve, typename OppCurve>
+SkTSpan<TCurve, OppCurve>* SkTSect<TCurve, OppCurve>::prev(
+ SkTSpan<TCurve, OppCurve>* span) const {
+ SkTSpan<TCurve, OppCurve>* result = nullptr;
+ SkTSpan<TCurve, OppCurve>* test = fHead;
+ while (span != test) {
+ result = test;
+ test = test->fNext;
+ SkASSERT(test);
+ }
+ return result;
+}
+
+template<typename TCurve, typename OppCurve>
+void SkTSect<TCurve, OppCurve>::recoverCollapsed() {
+ SkTSpan<TCurve, OppCurve>* deleted = fDeleted;
+ while (deleted) {
+ SkTSpan<TCurve, OppCurve>* delNext = deleted->fNext;
+ if (deleted->fCollapsed) {
+ SkTSpan<TCurve, OppCurve>** spanPtr = &fHead;
+ while (*spanPtr && (*spanPtr)->fEndT <= deleted->fStartT) {
+ spanPtr = &(*spanPtr)->fNext;
+ }
+ deleted->fNext = *spanPtr;
+ *spanPtr = deleted;
+ }
+ deleted = delNext;
+ }
+}
+
+template<typename TCurve, typename OppCurve>
+void SkTSect<TCurve, OppCurve>::removeAllBut(const SkTSpan<OppCurve, TCurve>* keep,
+ SkTSpan<TCurve, OppCurve>* span, SkTSect<OppCurve, TCurve>* opp) {
+ const SkTSpanBounded<OppCurve, TCurve>* testBounded = span->fBounded;
+ while (testBounded) {
+ SkTSpan<OppCurve, TCurve>* bounded = testBounded->fBounded;
+ const SkTSpanBounded<OppCurve, TCurve>* next = testBounded->fNext;
+ // may have been deleted when opp did 'remove all but'
+ if (bounded != keep && !bounded->fDeleted) {
+ SkAssertResult(SkDEBUGCODE(!) span->removeBounded(bounded));
+ if (bounded->removeBounded(span)) {
+ opp->removeSpan(bounded);
+ }
+ }
+ testBounded = next;
+ }
+ SkASSERT(!span->fDeleted);
+ SkASSERT(span->findOppSpan(keep));
+ SkASSERT(keep->findOppSpan(span));
+}
+
+template<typename TCurve, typename OppCurve>
+void SkTSect<TCurve, OppCurve>::removeByPerpendicular(SkTSect<OppCurve, TCurve>* opp) {
+ SkTSpan<TCurve, OppCurve>* test = fHead;
+ SkTSpan<TCurve, OppCurve>* next;
+ do {
+ next = test->fNext;
+ if (test->fCoinStart.perpT() < 0 || test->fCoinEnd.perpT() < 0) {
+ continue;
+ }
+ SkDVector startV = test->fCoinStart.perpPt() - test->fPart[0];
+ SkDVector endV = test->fCoinEnd.perpPt() - test->fPart[TCurve::kPointLast];
+#if DEBUG_T_SECT
+ SkDebugf("%s startV=(%1.9g,%1.9g) endV=(%1.9g,%1.9g) dot=%1.9g\n", __FUNCTION__,
+ startV.fX, startV.fY, endV.fX, endV.fY, startV.dot(endV));
+#endif
+ if (startV.dot(endV) <= 0) {
+ continue;
+ }
+ this->removeSpans(test, opp);
+ } while ((test = next));
+}
+
+template<typename TCurve, typename OppCurve>
+void SkTSect<TCurve, OppCurve>::removeCoincident(SkTSpan<TCurve, OppCurve>* span, bool isBetween) {
+ this->unlinkSpan(span);
+ if (isBetween || between(0, span->fCoinStart.perpT(), 1)) {
+ --fActiveCount;
+ span->fNext = fCoincident;
+ fCoincident = span;
+ } else {
+ this->markSpanGone(span);
+ }
+}
+
+template<typename TCurve, typename OppCurve>
+bool SkTSect<TCurve, OppCurve>::removeSpan(SkTSpan<TCurve, OppCurve>* span) {
+ if (!span->fStartT) {
+ fRemovedStartT = true;
+ }
+ if (1 == span->fEndT) {
+ fRemovedEndT = true;
+ }
+ this->unlinkSpan(span);
+ return this->markSpanGone(span);
+}
+
+template<typename TCurve, typename OppCurve>
+void SkTSect<TCurve, OppCurve>::removeSpanRange(SkTSpan<TCurve, OppCurve>* first,
+ SkTSpan<TCurve, OppCurve>* last) {
+ if (first == last) {
+ return;
+ }
+ SkTSpan<TCurve, OppCurve>* span = first;
+ SkASSERT(span);
+ SkTSpan<TCurve, OppCurve>* final = last->fNext;
+ SkTSpan<TCurve, OppCurve>* next = span->fNext;
+ while ((span = next) && span != final) {
+ next = span->fNext;
+ this->markSpanGone(span);
+ }
+ if (final) {
+ final->fPrev = first;
+ }
+ first->fNext = final;
+ first->validate();
+}
+
+template<typename TCurve, typename OppCurve>
+void SkTSect<TCurve, OppCurve>::removeSpans(SkTSpan<TCurve, OppCurve>* span,
+ SkTSect<OppCurve, TCurve>* opp) {
+ SkTSpanBounded<OppCurve, TCurve>* bounded = span->fBounded;
+ while (bounded) {
+ SkTSpan<OppCurve, TCurve>* spanBounded = bounded->fBounded;
+ SkTSpanBounded<OppCurve, TCurve>* next = bounded->fNext;
+ if (span->removeBounded(spanBounded)) { // shuffles last into position 0
+ this->removeSpan(span);
+ }
+ if (spanBounded->removeBounded(span)) {
+ opp->removeSpan(spanBounded);
+ }
+ SkASSERT(!span->fDeleted || !opp->debugHasBounded(span));
+ bounded = next;
+ }
+}
+
+template<typename TCurve, typename OppCurve>
+SkTSpan<TCurve, OppCurve>* SkTSect<TCurve, OppCurve>::spanAtT(double t,
+ SkTSpan<TCurve, OppCurve>** priorSpan) {
+ SkTSpan<TCurve, OppCurve>* test = fHead;
+ SkTSpan<TCurve, OppCurve>* prev = nullptr;
+ while (test && test->fEndT < t) {
+ prev = test;
+ test = test->fNext;
+ }
+ *priorSpan = prev;
+ return test && test->fStartT <= t ? test : nullptr;
+}
+
+template<typename TCurve, typename OppCurve>
+SkTSpan<TCurve, OppCurve>* SkTSect<TCurve, OppCurve>::tail() {
+ SkTSpan<TCurve, OppCurve>* result = fHead;
+ SkTSpan<TCurve, OppCurve>* next = fHead;
+ while ((next = next->fNext)) {
+ if (next->fEndT > result->fEndT) {
+ result = next;
+ }
+ }
+ return result;
+}
+
+/* Each span has a range of opposite spans it intersects. After the span is split in two,
+ adjust the range to its new size */
+template<typename TCurve, typename OppCurve>
+void SkTSect<TCurve, OppCurve>::trim(SkTSpan<TCurve, OppCurve>* span,
+ SkTSect<OppCurve, TCurve>* opp) {
+ span->initBounds(fCurve);
+ const SkTSpanBounded<OppCurve, TCurve>* testBounded = span->fBounded;
+ while (testBounded) {
+ SkTSpan<OppCurve, TCurve>* test = testBounded->fBounded;
+ const SkTSpanBounded<OppCurve, TCurve>* next = testBounded->fNext;
+ int oppSects, sects = this->intersects(span, opp, test, &oppSects);
+ if (sects >= 1) {
+ if (oppSects == 2) {
+ test->initBounds(opp->fCurve);
+ opp->removeAllBut(span, test, this);
+ }
+ if (sects == 2) {
+ span->initBounds(fCurve);
+ this->removeAllBut(test, span, opp);
+ return;
+ }
+ } else {
+ if (span->removeBounded(test)) {
+ this->removeSpan(span);
+ }
+ if (test->removeBounded(span)) {
+ opp->removeSpan(test);
+ }
+ }
+ testBounded = next;
+ }
+}
+
+template<typename TCurve, typename OppCurve>
+void SkTSect<TCurve, OppCurve>::unlinkSpan(SkTSpan<TCurve, OppCurve>* span) {
+ SkTSpan<TCurve, OppCurve>* prev = span->fPrev;
+ SkTSpan<TCurve, OppCurve>* next = span->fNext;
+ if (prev) {
+ prev->fNext = next;
+ if (next) {
+ next->fPrev = prev;
+ next->validate();
+ }
+ } else {
+ fHead = next;
+ if (next) {
+ next->fPrev = nullptr;
+ }
+ }
+}
+
+template<typename TCurve, typename OppCurve>
+bool SkTSect<TCurve, OppCurve>::updateBounded(SkTSpan<TCurve, OppCurve>* first,
+ SkTSpan<TCurve, OppCurve>* last, SkTSpan<OppCurve, TCurve>* oppFirst) {
+ SkTSpan<TCurve, OppCurve>* test = first;
+ const SkTSpan<TCurve, OppCurve>* final = last->next();
+ bool deleteSpan = false;
+ do {
+ deleteSpan |= test->removeAllBounded();
+ } while ((test = test->fNext) != final && test);
+ first->fBounded = nullptr;
+ first->addBounded(oppFirst, &fHeap);
+ // cannot call validate until remove span range is called
+ return deleteSpan;
+}
+
+
+template<typename TCurve, typename OppCurve>
+void SkTSect<TCurve, OppCurve>::validate() const {
+#if DEBUG_VALIDATE
+ int count = 0;
+ double last = 0;
+ if (fHead) {
+ const SkTSpan<TCurve, OppCurve>* span = fHead;
+ SkASSERT(!span->fPrev);
+ const SkTSpan<TCurve, OppCurve>* next;
+ do {
+ span->validate();
+ SkASSERT(span->fStartT >= last);
+ last = span->fEndT;
+ ++count;
+ next = span->fNext;
+ SkASSERT(next != span);
+ } while ((span = next) != nullptr);
+ }
+ SkASSERT(count == fActiveCount);
+#endif
+#if DEBUG_T_SECT
+ SkASSERT(fActiveCount <= fDebugAllocatedCount);
+ int deletedCount = 0;
+ const SkTSpan<TCurve, OppCurve>* deleted = fDeleted;
+ while (deleted) {
+ ++deletedCount;
+ deleted = deleted->fNext;
+ }
+ const SkTSpan<TCurve, OppCurve>* coincident = fCoincident;
+ while (coincident) {
+ ++deletedCount;
+ coincident = coincident->fNext;
+ }
+ SkASSERT(fActiveCount + deletedCount == fDebugAllocatedCount);
+#endif
+}
+
+template<typename TCurve, typename OppCurve>
+void SkTSect<TCurve, OppCurve>::validateBounded() const {
+#if DEBUG_VALIDATE
+ if (!fHead) {
+ return;
+ }
+ const SkTSpan<TCurve, OppCurve>* span = fHead;
+ do {
+ span->validateBounded();
+ } while ((span = span->fNext) != nullptr);
+#endif
+}
+
+template<typename TCurve, typename OppCurve>
+int SkTSect<TCurve, OppCurve>::EndsEqual(const SkTSect<TCurve, OppCurve>* sect1,
+ const SkTSect<OppCurve, TCurve>* sect2, SkIntersections* intersections) {
+ int zeroOneSet = 0;
+ if (sect1->fCurve[0] == sect2->fCurve[0]) {
+ zeroOneSet |= kZeroS1Set | kZeroS2Set;
+ intersections->insert(0, 0, sect1->fCurve[0]);
+ }
+ if (sect1->fCurve[0] == sect2->fCurve[OppCurve::kPointLast]) {
+ zeroOneSet |= kZeroS1Set | kOneS2Set;
+ intersections->insert(0, 1, sect1->fCurve[0]);
+ }
+ if (sect1->fCurve[TCurve::kPointLast] == sect2->fCurve[0]) {
+ zeroOneSet |= kOneS1Set | kZeroS2Set;
+ intersections->insert(1, 0, sect1->fCurve[TCurve::kPointLast]);
+ }
+ if (sect1->fCurve[TCurve::kPointLast] == sect2->fCurve[OppCurve::kPointLast]) {
+ zeroOneSet |= kOneS1Set | kOneS2Set;
+ intersections->insert(1, 1, sect1->fCurve[TCurve::kPointLast]);
+ }
+ // check for zero
+ if (!(zeroOneSet & (kZeroS1Set | kZeroS2Set))
+ && sect1->fCurve[0].approximatelyEqual(sect2->fCurve[0])) {
+ zeroOneSet |= kZeroS1Set | kZeroS2Set;
+ intersections->insertNear(0, 0, sect1->fCurve[0], sect2->fCurve[0]);
+ }
+ if (!(zeroOneSet & (kZeroS1Set | kOneS2Set))
+ && sect1->fCurve[0].approximatelyEqual(sect2->fCurve[OppCurve::kPointLast])) {
+ zeroOneSet |= kZeroS1Set | kOneS2Set;
+ intersections->insertNear(0, 1, sect1->fCurve[0], sect2->fCurve[OppCurve::kPointLast]);
+ }
+ // check for one
+ if (!(zeroOneSet & (kOneS1Set | kZeroS2Set))
+ && sect1->fCurve[TCurve::kPointLast].approximatelyEqual(sect2->fCurve[0])) {
+ zeroOneSet |= kOneS1Set | kZeroS2Set;
+ intersections->insertNear(1, 0, sect1->fCurve[TCurve::kPointLast], sect2->fCurve[0]);
+ }
+ if (!(zeroOneSet & (kOneS1Set | kOneS2Set))
+ && sect1->fCurve[TCurve::kPointLast].approximatelyEqual(sect2->fCurve[
+ OppCurve::kPointLast])) {
+ zeroOneSet |= kOneS1Set | kOneS2Set;
+ intersections->insertNear(1, 1, sect1->fCurve[TCurve::kPointLast],
+ sect2->fCurve[OppCurve::kPointLast]);
+ }
+ return zeroOneSet;
+}
+
+template<typename TCurve, typename OppCurve>
+struct SkClosestRecord {
+ bool operator<(const SkClosestRecord& rh) const {
+ return fClosest < rh.fClosest;
+ }
+
+ void addIntersection(SkIntersections* intersections) const {
+ double r1t = fC1Index ? fC1Span->endT() : fC1Span->startT();
+ double r2t = fC2Index ? fC2Span->endT() : fC2Span->startT();
+ intersections->insert(r1t, r2t, fC1Span->part()[fC1Index]);
+ }
+
+ void findEnd(const SkTSpan<TCurve, OppCurve>* span1, const SkTSpan<OppCurve, TCurve>* span2,
+ int c1Index, int c2Index) {
+ const TCurve& c1 = span1->part();
+ const OppCurve& c2 = span2->part();
+ if (!c1[c1Index].approximatelyEqual(c2[c2Index])) {
+ return;
+ }
+ double dist = c1[c1Index].distanceSquared(c2[c2Index]);
+ if (fClosest < dist) {
+ return;
+ }
+ fC1Span = span1;
+ fC2Span = span2;
+ fC1StartT = span1->startT();
+ fC1EndT = span1->endT();
+ fC2StartT = span2->startT();
+ fC2EndT = span2->endT();
+ fC1Index = c1Index;
+ fC2Index = c2Index;
+ fClosest = dist;
+ }
+
+ bool matesWith(const SkClosestRecord& mate SkDEBUGPARAMS(SkIntersections* i)) const {
+ SkASSERT(fC1Span == mate.fC1Span || fC1Span->endT() <= mate.fC1Span->startT()
+ || mate.fC1Span->endT() <= fC1Span->startT());
+ SkOPOBJASSERT(i, fC2Span == mate.fC2Span || fC2Span->endT() <= mate.fC2Span->startT()
+ || mate.fC2Span->endT() <= fC2Span->startT());
+ return fC1Span == mate.fC1Span || fC1Span->endT() == mate.fC1Span->startT()
+ || fC1Span->startT() == mate.fC1Span->endT()
+ || fC2Span == mate.fC2Span
+ || fC2Span->endT() == mate.fC2Span->startT()
+ || fC2Span->startT() == mate.fC2Span->endT();
+ }
+
+ void merge(const SkClosestRecord& mate) {
+ fC1Span = mate.fC1Span;
+ fC2Span = mate.fC2Span;
+ fClosest = mate.fClosest;
+ fC1Index = mate.fC1Index;
+ fC2Index = mate.fC2Index;
+ }
+
+ void reset() {
+ fClosest = FLT_MAX;
+ SkDEBUGCODE(fC1Span = nullptr);
+ SkDEBUGCODE(fC2Span = nullptr);
+ SkDEBUGCODE(fC1Index = fC2Index = -1);
+ }
+
+ void update(const SkClosestRecord& mate) {
+ fC1StartT = SkTMin(fC1StartT, mate.fC1StartT);
+ fC1EndT = SkTMax(fC1EndT, mate.fC1EndT);
+ fC2StartT = SkTMin(fC2StartT, mate.fC2StartT);
+ fC2EndT = SkTMax(fC2EndT, mate.fC2EndT);
+ }
+
+ const SkTSpan<TCurve, OppCurve>* fC1Span;
+ const SkTSpan<OppCurve, TCurve>* fC2Span;
+ double fC1StartT;
+ double fC1EndT;
+ double fC2StartT;
+ double fC2EndT;
+ double fClosest;
+ int fC1Index;
+ int fC2Index;
+};
+
+template<typename TCurve, typename OppCurve>
+struct SkClosestSect {
+ SkClosestSect()
+ : fUsed(0) {
+ fClosest.push_back().reset();
+ }
+
+ bool find(const SkTSpan<TCurve, OppCurve>* span1, const SkTSpan<OppCurve, TCurve>* span2
+ SkDEBUGPARAMS(SkIntersections* i)) {
+ SkClosestRecord<TCurve, OppCurve>* record = &fClosest[fUsed];
+ record->findEnd(span1, span2, 0, 0);
+ record->findEnd(span1, span2, 0, OppCurve::kPointLast);
+ record->findEnd(span1, span2, TCurve::kPointLast, 0);
+ record->findEnd(span1, span2, TCurve::kPointLast, OppCurve::kPointLast);
+ if (record->fClosest == FLT_MAX) {
+ return false;
+ }
+ for (int index = 0; index < fUsed; ++index) {
+ SkClosestRecord<TCurve, OppCurve>* test = &fClosest[index];
+ if (test->matesWith(*record SkDEBUGPARAMS(i))) {
+ if (test->fClosest > record->fClosest) {
+ test->merge(*record);
+ }
+ test->update(*record);
+ record->reset();
+ return false;
+ }
+ }
+ ++fUsed;
+ fClosest.push_back().reset();
+ return true;
+ }
+
+ void finish(SkIntersections* intersections) const {
+ SkSTArray<TCurve::kMaxIntersections * 3,
+ const SkClosestRecord<TCurve, OppCurve>*, true> closestPtrs;
+ for (int index = 0; index < fUsed; ++index) {
+ closestPtrs.push_back(&fClosest[index]);
+ }
+ SkTQSort<const SkClosestRecord<TCurve, OppCurve> >(closestPtrs.begin(), closestPtrs.end()
+ - 1);
+ for (int index = 0; index < fUsed; ++index) {
+ const SkClosestRecord<TCurve, OppCurve>* test = closestPtrs[index];
+ test->addIntersection(intersections);
+ }
+ }
+
+ // this is oversized so that an extra records can merge into final one
+ SkSTArray<TCurve::kMaxIntersections * 2, SkClosestRecord<TCurve, OppCurve>, true> fClosest;
+ int fUsed;
+};
+
+// returns true if the rect is too small to consider
+template<typename TCurve, typename OppCurve>
+void SkTSect<TCurve, OppCurve>::BinarySearch(SkTSect<TCurve, OppCurve>* sect1,
+ SkTSect<OppCurve, TCurve>* sect2, SkIntersections* intersections) {
+#if DEBUG_T_SECT_DUMP > 1
+ gDumpTSectNum = 0;
+#endif
+ SkDEBUGCODE(sect1->fOppSect = sect2);
+ SkDEBUGCODE(sect2->fOppSect = sect1);
+ intersections->reset();
+ intersections->setMax(TCurve::kMaxIntersections + 3); // give extra for slop
+ SkTSpan<TCurve, OppCurve>* span1 = sect1->fHead;
+ SkTSpan<OppCurve, TCurve>* span2 = sect2->fHead;
+ int oppSect, sect = sect1->intersects(span1, sect2, span2, &oppSect);
+// SkASSERT(between(0, sect, 2));
+ if (!sect) {
+ return;
+ }
+ if (sect == 2 && oppSect == 2) {
+ (void) EndsEqual(sect1, sect2, intersections);
+ return;
+ }
+ span1->addBounded(span2, &sect1->fHeap);
+ span2->addBounded(span1, &sect2->fHeap);
+ const int kMaxCoinLoopCount = 8;
+ int coinLoopCount = kMaxCoinLoopCount;
+ double start1s SK_INIT_TO_AVOID_WARNING;
+ double start1e SK_INIT_TO_AVOID_WARNING;
+ do {
+ // find the largest bounds
+ SkTSpan<TCurve, OppCurve>* largest1 = sect1->boundsMax();
+ if (!largest1) {
+ break;
+ }
+ SkTSpan<OppCurve, TCurve>* largest2 = sect2->boundsMax();
+ sect1->fRemovedStartT = sect1->fRemovedEndT = false;
+ sect2->fRemovedStartT = sect2->fRemovedEndT = false;
+ // split it
+ if (!largest2 || (largest1 && (largest1->fBoundsMax > largest2->fBoundsMax
+ || (!largest1->fCollapsed && largest2->fCollapsed)))) {
+ if (largest1->fCollapsed) {
+ break;
+ }
+ // trim parts that don't intersect the opposite
+ SkTSpan<TCurve, OppCurve>* half1 = sect1->addOne();
+ SkDEBUGCODE(half1->debugSetGlobalState(sect1->globalState()));
+ if (!half1->split(largest1, &sect1->fHeap)) {
+ break;
+ }
+ sect1->trim(largest1, sect2);
+ sect1->trim(half1, sect2);
+ } else {
+ if (largest2->fCollapsed) {
+ break;
+ }
+ // trim parts that don't intersect the opposite
+ SkTSpan<OppCurve, TCurve>* half2 = sect2->addOne();
+ SkDEBUGCODE(half2->debugSetGlobalState(sect2->globalState()));
+ if (!half2->split(largest2, &sect2->fHeap)) {
+ break;
+ }
+ sect2->trim(largest2, sect1);
+ sect2->trim(half2, sect1);
+ }
+ sect1->validate();
+ sect2->validate();
+#if DEBUG_T_SECT_LOOP_COUNT
+ intersections->debugBumpLoopCount(SkIntersections::kIterations_DebugLoop);
+#endif
+ // if there are 9 or more continuous spans on both sects, suspect coincidence
+ if (sect1->fActiveCount >= COINCIDENT_SPAN_COUNT
+ && sect2->fActiveCount >= COINCIDENT_SPAN_COUNT) {
+ if (coinLoopCount == kMaxCoinLoopCount) {
+ start1s = sect1->fHead->fStartT;
+ start1e = sect1->tail()->fEndT;
+ }
+ if (!sect1->coincidentCheck(sect2)) {
+ return;
+ }
+ sect1->validate();
+ sect2->validate();
+#if DEBUG_T_SECT_LOOP_COUNT
+ intersections->debugBumpLoopCount(SkIntersections::kCoinCheck_DebugLoop);
+#endif
+ if (!--coinLoopCount && sect1->fHead && sect2->fHead) {
+ /* All known working cases resolve in two tries. Sadly, cubicConicTests[0]
+ gets stuck in a loop. It adds an extension to allow a coincident end
+ perpendicular to track its intersection in the opposite curve. However,
+ the bounding box of the extension does not intersect the original curve,
+ so the extension is discarded, only to be added again the next time around. */
+ sect1->coincidentForce(sect2, start1s, start1e);
+ sect1->validate();
+ sect2->validate();
+ }
+ }
+ if (sect1->fActiveCount >= COINCIDENT_SPAN_COUNT
+ && sect2->fActiveCount >= COINCIDENT_SPAN_COUNT) {
+ sect1->computePerpendiculars(sect2, sect1->fHead, sect1->tail());
+ sect2->computePerpendiculars(sect1, sect2->fHead, sect2->tail());
+ sect1->removeByPerpendicular(sect2);
+ sect1->validate();
+ sect2->validate();
+#if DEBUG_T_SECT_LOOP_COUNT
+ intersections->debugBumpLoopCount(SkIntersections::kComputePerp_DebugLoop);
+#endif
+ if (sect1->collapsed() > TCurve::kMaxIntersections) {
+ break;
+ }
+ }
+#if DEBUG_T_SECT_DUMP
+ sect1->dumpBoth(sect2);
+#endif
+ if (!sect1->fHead || !sect2->fHead) {
+ break;
+ }
+ } while (true);
+ SkTSpan<TCurve, OppCurve>* coincident = sect1->fCoincident;
+ if (coincident) {
+ // if there is more than one coincident span, check loosely to see if they should be joined
+ if (coincident->fNext) {
+ sect1->mergeCoincidence(sect2);
+ coincident = sect1->fCoincident;
+ }
+ SkASSERT(sect2->fCoincident); // courtesy check : coincidence only looks at sect 1
+ do {
+ if (!coincident->fCoinStart.isMatch()) {
+ continue;
+ }
+ if (!coincident->fCoinEnd.isMatch()) {
+ continue;
+ }
+ int index = intersections->insertCoincident(coincident->fStartT,
+ coincident->fCoinStart.perpT(), coincident->fPart[0]);
+ if ((intersections->insertCoincident(coincident->fEndT,
+ coincident->fCoinEnd.perpT(),
+ coincident->fPart[TCurve::kPointLast]) < 0) && index >= 0) {
+ intersections->clearCoincidence(index);
+ }
+ } while ((coincident = coincident->fNext));
+ }
+ int zeroOneSet = EndsEqual(sect1, sect2, intersections);
+ if (!sect1->fHead || !sect2->fHead) {
+ // if the final iteration contains an end (0 or 1),
+ if (sect1->fRemovedStartT && !(zeroOneSet & kZeroS1Set)) {
+ SkTCoincident<TCurve, OppCurve> perp; // intersect perpendicular with opposite curve
+ perp.setPerp(sect1->fCurve, 0, sect1->fCurve.fPts[0], sect2->fCurve);
+ if (perp.isMatch()) {
+ intersections->insert(0, perp.perpT(), perp.perpPt());
+ }
+ }
+ if (sect1->fRemovedEndT && !(zeroOneSet & kOneS1Set)) {
+ SkTCoincident<TCurve, OppCurve> perp;
+ perp.setPerp(sect1->fCurve, 1, sect1->fCurve.fPts[TCurve::kPointLast], sect2->fCurve);
+ if (perp.isMatch()) {
+ intersections->insert(1, perp.perpT(), perp.perpPt());
+ }
+ }
+ if (sect2->fRemovedStartT && !(zeroOneSet & kZeroS2Set)) {
+ SkTCoincident<OppCurve, TCurve> perp;
+ perp.setPerp(sect2->fCurve, 0, sect2->fCurve.fPts[0], sect1->fCurve);
+ if (perp.isMatch()) {
+ intersections->insert(perp.perpT(), 0, perp.perpPt());
+ }
+ }
+ if (sect2->fRemovedEndT && !(zeroOneSet & kOneS2Set)) {
+ SkTCoincident<OppCurve, TCurve> perp;
+ perp.setPerp(sect2->fCurve, 1, sect2->fCurve.fPts[OppCurve::kPointLast], sect1->fCurve);
+ if (perp.isMatch()) {
+ intersections->insert(perp.perpT(), 1, perp.perpPt());
+ }
+ }
+ return;
+ }
+ sect1->recoverCollapsed();
+ sect2->recoverCollapsed();
+ SkTSpan<TCurve, OppCurve>* result1 = sect1->fHead;
+ // check heads and tails for zero and ones and insert them if we haven't already done so
+ const SkTSpan<TCurve, OppCurve>* head1 = result1;
+ if (!(zeroOneSet & kZeroS1Set) && approximately_less_than_zero(head1->fStartT)) {
+ const SkDPoint& start1 = sect1->fCurve[0];
+ if (head1->isBounded()) {
+ double t = head1->closestBoundedT(start1);
+ if (sect2->fCurve.ptAtT(t).approximatelyEqual(start1)) {
+ intersections->insert(0, t, start1);
+ }
+ }
+ }
+ const SkTSpan<OppCurve, TCurve>* head2 = sect2->fHead;
+ if (!(zeroOneSet & kZeroS2Set) && approximately_less_than_zero(head2->fStartT)) {
+ const SkDPoint& start2 = sect2->fCurve[0];
+ if (head2->isBounded()) {
+ double t = head2->closestBoundedT(start2);
+ if (sect1->fCurve.ptAtT(t).approximatelyEqual(start2)) {
+ intersections->insert(t, 0, start2);
+ }
+ }
+ }
+ const SkTSpan<TCurve, OppCurve>* tail1 = sect1->tail();
+ if (!(zeroOneSet & kOneS1Set) && approximately_greater_than_one(tail1->fEndT)) {
+ const SkDPoint& end1 = sect1->fCurve[TCurve::kPointLast];
+ if (tail1->isBounded()) {
+ double t = tail1->closestBoundedT(end1);
+ if (sect2->fCurve.ptAtT(t).approximatelyEqual(end1)) {
+ intersections->insert(1, t, end1);
+ }
+ }
+ }
+ const SkTSpan<OppCurve, TCurve>* tail2 = sect2->tail();
+ if (!(zeroOneSet & kOneS2Set) && approximately_greater_than_one(tail2->fEndT)) {
+ const SkDPoint& end2 = sect2->fCurve[OppCurve::kPointLast];
+ if (tail2->isBounded()) {
+ double t = tail2->closestBoundedT(end2);
+ if (sect1->fCurve.ptAtT(t).approximatelyEqual(end2)) {
+ intersections->insert(t, 1, end2);
+ }
+ }
+ }
+ SkClosestSect<TCurve, OppCurve> closest;
+ do {
+ while (result1 && result1->fCoinStart.isMatch() && result1->fCoinEnd.isMatch()) {
+ result1 = result1->fNext;
+ }
+ if (!result1) {
+ break;
+ }
+ SkTSpan<OppCurve, TCurve>* result2 = sect2->fHead;
+ bool found = false;
+ while (result2) {
+ found |= closest.find(result1, result2 SkDEBUGPARAMS(intersections));
+ result2 = result2->fNext;
+ }
+ } while ((result1 = result1->fNext));
+ closest.finish(intersections);
+ // if there is more than one intersection and it isn't already coincident, check
+ int last = intersections->used() - 1;
+ for (int index = 0; index < last; ) {
+ if (intersections->isCoincident(index) && intersections->isCoincident(index + 1)) {
+ ++index;
+ continue;
+ }
+ double midT = ((*intersections)[0][index] + (*intersections)[0][index + 1]) / 2;
+ SkDPoint midPt = sect1->fCurve.ptAtT(midT);
+ // intersect perpendicular with opposite curve
+ SkTCoincident<TCurve, OppCurve> perp;
+ perp.setPerp(sect1->fCurve, midT, midPt, sect2->fCurve);
+ if (!perp.isMatch()) {
+ ++index;
+ continue;
+ }
+ if (intersections->isCoincident(index)) {
+ intersections->removeOne(index);
+ --last;
+ } else if (intersections->isCoincident(index + 1)) {
+ intersections->removeOne(index + 1);
+ --last;
+ } else {
+ intersections->setCoincident(index++);
+ }
+ intersections->setCoincident(index);
+ }
+ SkASSERT(intersections->used() <= TCurve::kMaxIntersections);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsTightBounds.cpp b/gfx/skia/skia/src/pathops/SkPathOpsTightBounds.cpp
new file mode 100644
index 000000000..d748ff538
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsTightBounds.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkOpEdgeBuilder.h"
+#include "SkPathOpsCommon.h"
+
+bool TightBounds(const SkPath& path, SkRect* result) {
+ SkPath::RawIter iter(path);
+ SkRect moveBounds = { SK_ScalarMax, SK_ScalarMax, SK_ScalarMin, SK_ScalarMin };
+ bool wellBehaved = true;
+ SkPath::Verb verb;
+ do {
+ SkPoint pts[4];
+ verb = iter.next(pts);
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ moveBounds.fLeft = SkTMin(moveBounds.fLeft, pts[0].fX);
+ moveBounds.fTop = SkTMin(moveBounds.fTop, pts[0].fY);
+ moveBounds.fRight = SkTMax(moveBounds.fRight, pts[0].fX);
+ moveBounds.fBottom = SkTMax(moveBounds.fBottom, pts[0].fY);
+ break;
+ case SkPath::kQuad_Verb:
+ case SkPath::kConic_Verb:
+ if (!wellBehaved) {
+ break;
+ }
+ wellBehaved &= between(pts[0].fX, pts[1].fX, pts[2].fX);
+ wellBehaved &= between(pts[0].fY, pts[1].fY, pts[2].fY);
+ break;
+ case SkPath::kCubic_Verb:
+ if (!wellBehaved) {
+ break;
+ }
+ wellBehaved &= between(pts[0].fX, pts[1].fX, pts[3].fX);
+ wellBehaved &= between(pts[0].fY, pts[1].fY, pts[3].fY);
+ wellBehaved &= between(pts[0].fX, pts[2].fX, pts[3].fX);
+ wellBehaved &= between(pts[0].fY, pts[2].fY, pts[3].fY);
+ break;
+ default:
+ break;
+ }
+ } while (verb != SkPath::kDone_Verb);
+ if (wellBehaved) {
+ *result = path.getBounds();
+ return true;
+ }
+ SkChunkAlloc allocator(4096); // FIXME: constant-ize, tune
+ SkOpContour contour;
+ SkOpContourHead* contourList = static_cast<SkOpContourHead*>(&contour);
+ SkOpGlobalState globalState(contourList, &allocator SkDEBUGPARAMS(false)
+ SkDEBUGPARAMS(nullptr));
+ // turn path into list of segments
+ SkScalar scaleFactor = ScaleFactor(path);
+ SkPath scaledPath;
+ const SkPath* workingPath;
+ if (scaleFactor > SK_Scalar1) {
+ ScalePath(path, 1.f / scaleFactor, &scaledPath);
+ workingPath = &scaledPath;
+ } else {
+ workingPath = &path;
+ }
+ SkOpEdgeBuilder builder(*workingPath, contourList, &globalState);
+ if (!builder.finish()) {
+ return false;
+ }
+ if (!SortContourList(&contourList, false, false)) {
+ *result = moveBounds;
+ return true;
+ }
+ SkOpContour* current = contourList;
+ SkPathOpsBounds bounds = current->bounds();
+ while ((current = current->next())) {
+ bounds.add(current->bounds());
+ }
+ *result = bounds;
+ if (!moveBounds.isEmpty()) {
+ result->join(moveBounds);
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsTypes.cpp b/gfx/skia/skia/src/pathops/SkPathOpsTypes.cpp
new file mode 100644
index 000000000..5f87076c2
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsTypes.cpp
@@ -0,0 +1,251 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkFloatBits.h"
+#include "SkOpCoincidence.h"
+#include "SkPathOpsTypes.h"
+
+static bool arguments_denormalized(float a, float b, int epsilon) {
+ float denormalizedCheck = FLT_EPSILON * epsilon / 2;
+ return fabsf(a) <= denormalizedCheck && fabsf(b) <= denormalizedCheck;
+}
+
+// from http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
+// FIXME: move to SkFloatBits.h
+static bool equal_ulps(float a, float b, int epsilon, int depsilon) {
+ if (arguments_denormalized(a, b, depsilon)) {
+ return true;
+ }
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits < bBits + epsilon && bBits < aBits + epsilon;
+}
+
+static bool equal_ulps_no_normal_check(float a, float b, int epsilon, int depsilon) {
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits < bBits + epsilon && bBits < aBits + epsilon;
+}
+
+static bool equal_ulps_pin(float a, float b, int epsilon, int depsilon) {
+ if (!SkScalarIsFinite(a) || !SkScalarIsFinite(b)) {
+ return false;
+ }
+ if (arguments_denormalized(a, b, depsilon)) {
+ return true;
+ }
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits < bBits + epsilon && bBits < aBits + epsilon;
+}
+
+static bool d_equal_ulps(float a, float b, int epsilon) {
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits < bBits + epsilon && bBits < aBits + epsilon;
+}
+
+static bool not_equal_ulps(float a, float b, int epsilon) {
+ if (arguments_denormalized(a, b, epsilon)) {
+ return false;
+ }
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits >= bBits + epsilon || bBits >= aBits + epsilon;
+}
+
+static bool not_equal_ulps_pin(float a, float b, int epsilon) {
+ if (!SkScalarIsFinite(a) || !SkScalarIsFinite(b)) {
+ return false;
+ }
+ if (arguments_denormalized(a, b, epsilon)) {
+ return false;
+ }
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits >= bBits + epsilon || bBits >= aBits + epsilon;
+}
+
+static bool d_not_equal_ulps(float a, float b, int epsilon) {
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits >= bBits + epsilon || bBits >= aBits + epsilon;
+}
+
+static bool less_ulps(float a, float b, int epsilon) {
+ if (arguments_denormalized(a, b, epsilon)) {
+ return a <= b - FLT_EPSILON * epsilon;
+ }
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits <= bBits - epsilon;
+}
+
+static bool less_or_equal_ulps(float a, float b, int epsilon) {
+ if (arguments_denormalized(a, b, epsilon)) {
+ return a < b + FLT_EPSILON * epsilon;
+ }
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits < bBits + epsilon;
+}
+
+// equality using the same error term as between
+bool AlmostBequalUlps(float a, float b) {
+ const int UlpsEpsilon = 2;
+ return equal_ulps(a, b, UlpsEpsilon, UlpsEpsilon);
+}
+
+bool AlmostPequalUlps(float a, float b) {
+ const int UlpsEpsilon = 8;
+ return equal_ulps(a, b, UlpsEpsilon, UlpsEpsilon);
+}
+
+bool AlmostDequalUlps(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return d_equal_ulps(a, b, UlpsEpsilon);
+}
+
+bool AlmostDequalUlps(double a, double b) {
+ return AlmostDequalUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool AlmostEqualUlps(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return equal_ulps(a, b, UlpsEpsilon, UlpsEpsilon);
+}
+
+bool AlmostEqualUlpsNoNormalCheck(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return equal_ulps_no_normal_check(a, b, UlpsEpsilon, UlpsEpsilon);
+}
+
+bool AlmostEqualUlps_Pin(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return equal_ulps_pin(a, b, UlpsEpsilon, UlpsEpsilon);
+}
+
+bool NotAlmostEqualUlps(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return not_equal_ulps(a, b, UlpsEpsilon);
+}
+
+bool NotAlmostEqualUlps_Pin(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return not_equal_ulps_pin(a, b, UlpsEpsilon);
+}
+
+bool NotAlmostDequalUlps(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return d_not_equal_ulps(a, b, UlpsEpsilon);
+}
+
+bool RoughlyEqualUlps(float a, float b) {
+ const int UlpsEpsilon = 256;
+ const int DUlpsEpsilon = 1024;
+ return equal_ulps(a, b, UlpsEpsilon, DUlpsEpsilon);
+}
+
+bool AlmostBetweenUlps(float a, float b, float c) {
+ const int UlpsEpsilon = 2;
+ return a <= c ? less_or_equal_ulps(a, b, UlpsEpsilon) && less_or_equal_ulps(b, c, UlpsEpsilon)
+ : less_or_equal_ulps(b, a, UlpsEpsilon) && less_or_equal_ulps(c, b, UlpsEpsilon);
+}
+
+bool AlmostLessUlps(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return less_ulps(a, b, UlpsEpsilon);
+}
+
+bool AlmostLessOrEqualUlps(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return less_or_equal_ulps(a, b, UlpsEpsilon);
+}
+
+int UlpsDistance(float a, float b) {
+ SkFloatIntUnion floatIntA, floatIntB;
+ floatIntA.fFloat = a;
+ floatIntB.fFloat = b;
+ // Different signs means they do not match.
+ if ((floatIntA.fSignBitInt < 0) != (floatIntB.fSignBitInt < 0)) {
+ // Check for equality to make sure +0 == -0
+ return a == b ? 0 : SK_MaxS32;
+ }
+ // Find the difference in ULPs.
+ return SkTAbs(floatIntA.fSignBitInt - floatIntB.fSignBitInt);
+}
+
+// cube root approximation using bit hack for 64-bit float
+// adapted from Kahan's cbrt
+static double cbrt_5d(double d) {
+ const unsigned int B1 = 715094163;
+ double t = 0.0;
+ unsigned int* pt = (unsigned int*) &t;
+ unsigned int* px = (unsigned int*) &d;
+ pt[1] = px[1] / 3 + B1;
+ return t;
+}
+
+// iterative cube root approximation using Halley's method (double)
+static double cbrta_halleyd(const double a, const double R) {
+ const double a3 = a * a * a;
+ const double b = a * (a3 + R + R) / (a3 + a3 + R);
+ return b;
+}
+
+// cube root approximation using 3 iterations of Halley's method (double)
+static double halley_cbrt3d(double d) {
+ double a = cbrt_5d(d);
+ a = cbrta_halleyd(a, d);
+ a = cbrta_halleyd(a, d);
+ return cbrta_halleyd(a, d);
+}
+
+double SkDCubeRoot(double x) {
+ if (approximately_zero_cubed(x)) {
+ return 0;
+ }
+ double result = halley_cbrt3d(fabs(x));
+ if (x < 0) {
+ result = -result;
+ }
+ return result;
+}
+
+SkOpGlobalState::SkOpGlobalState(SkOpContourHead* head,
+ SkChunkAlloc* allocator
+ SkDEBUGPARAMS(bool debugSkipAssert)
+ SkDEBUGPARAMS(const char* testName))
+ : fAllocator(allocator)
+ , fCoincidence(nullptr)
+ , fContourHead(head)
+ , fNested(0)
+ , fWindingFailed(false)
+ , fPhase(SkOpPhase::kIntersecting)
+ SkDEBUGPARAMS(fDebugTestName(testName))
+ SkDEBUGPARAMS(fAngleID(0))
+ SkDEBUGPARAMS(fCoinID(0))
+ SkDEBUGPARAMS(fContourID(0))
+ SkDEBUGPARAMS(fPtTID(0))
+ SkDEBUGPARAMS(fSegmentID(0))
+ SkDEBUGPARAMS(fSpanID(0))
+ SkDEBUGPARAMS(fDebugSkipAssert(debugSkipAssert)) {
+#if DEBUG_T_SECT_LOOP_COUNT
+ debugResetLoopCounts();
+#endif
+#if DEBUG_COIN
+ fPreviousFuncName = nullptr;
+#endif
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsTypes.h b/gfx/skia/skia/src/pathops/SkPathOpsTypes.h
new file mode 100644
index 000000000..786eb2288
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsTypes.h
@@ -0,0 +1,618 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsTypes_DEFINED
+#define SkPathOpsTypes_DEFINED
+
+#include <float.h> // for FLT_EPSILON
+#include <math.h> // for fabs, sqrt
+
+#include "SkFloatingPoint.h"
+#include "SkPath.h"
+#include "SkPathOps.h"
+#include "SkPathOpsDebug.h"
+#include "SkScalar.h"
+
+enum SkPathOpsMask {
+ kWinding_PathOpsMask = -1,
+ kNo_PathOpsMask = 0,
+ kEvenOdd_PathOpsMask = 1
+};
+
+class SkChunkAlloc;
+class SkOpCoincidence;
+class SkOpContour;
+class SkOpContourHead;
+class SkIntersections;
+class SkIntersectionHelper;
+
+enum class SkOpPhase : char {
+ kNoChange,
+ kIntersecting,
+ kWalking,
+ kFixWinding,
+};
+
+class SkOpGlobalState {
+public:
+ SkOpGlobalState(SkOpContourHead* head,
+ SkChunkAlloc* allocator SkDEBUGPARAMS(bool debugSkipAssert)
+ SkDEBUGPARAMS(const char* testName));
+
+ enum {
+ kMaxWindingTries = 10
+ };
+
+ bool allocatedOpSpan() const {
+ return fAllocatedOpSpan;
+ }
+
+ SkChunkAlloc* allocator() {
+ return fAllocator;
+ }
+
+ void bumpNested() {
+ ++fNested;
+ }
+
+ void clearNested() {
+ fNested = 0;
+ }
+
+ SkOpCoincidence* coincidence() {
+ return fCoincidence;
+ }
+
+ SkOpContourHead* contourHead() {
+ return fContourHead;
+ }
+
+#ifdef SK_DEBUG
+ const class SkOpAngle* debugAngle(int id) const;
+ const SkOpCoincidence* debugCoincidence() const;
+ SkOpContour* debugContour(int id) const;
+ const class SkOpPtT* debugPtT(int id) const;
+ bool debugRunFail() const;
+ const class SkOpSegment* debugSegment(int id) const;
+ bool debugSkipAssert() const { return fDebugSkipAssert; }
+ const class SkOpSpanBase* debugSpan(int id) const;
+ const char* debugTestName() const { return fDebugTestName; }
+#endif
+
+#if DEBUG_T_SECT_LOOP_COUNT
+ void debugAddLoopCount(SkIntersections* , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& );
+ void debugDoYourWorst(SkOpGlobalState* );
+ void debugLoopReport();
+ void debugResetLoopCounts();
+#endif
+
+#if DEBUG_COINCIDENCE
+ void debugSetCheckHealth(bool check) { fDebugCheckHealth = check; }
+ bool debugCheckHealth() const { return fDebugCheckHealth; }
+#endif
+
+#if DEBUG_VALIDATE || DEBUG_COIN
+ void debugSetPhase(const char* funcName DEBUG_COIN_DECLARE_PARAMS()) const;
+#endif
+
+#if DEBUG_COIN
+ void debugAddToCoinChangedDict();
+ void debugAddToGlobalCoinDicts();
+ SkPathOpsDebug::CoinDict* debugCoinChangedDict() { return &fCoinChangedDict; }
+ const SkPathOpsDebug::CoinDictEntry& debugCoinDictEntry() const { return fCoinDictEntry; }
+
+ static void DumpCoinDict();
+#endif
+
+
+ int nested() const {
+ return fNested;
+ }
+
+#ifdef SK_DEBUG
+ int nextAngleID() {
+ return ++fAngleID;
+ }
+
+ int nextCoinID() {
+ return ++fCoinID;
+ }
+
+ int nextContourID() {
+ return ++fContourID;
+ }
+
+ int nextPtTID() {
+ return ++fPtTID;
+ }
+
+ int nextSegmentID() {
+ return ++fSegmentID;
+ }
+
+ int nextSpanID() {
+ return ++fSpanID;
+ }
+#endif
+
+ SkOpPhase phase() const {
+ return fPhase;
+ }
+
+ void resetAllocatedOpSpan() {
+ fAllocatedOpSpan = false;
+ }
+
+ void setAllocatedOpSpan() {
+ fAllocatedOpSpan = true;
+ }
+
+ void setCoincidence(SkOpCoincidence* coincidence) {
+ fCoincidence = coincidence;
+ }
+
+ void setContourHead(SkOpContourHead* contourHead) {
+ fContourHead = contourHead;
+ }
+
+ void setPhase(SkOpPhase phase) {
+ if (SkOpPhase::kNoChange == phase) {
+ return;
+ }
+ SkASSERT(fPhase != phase);
+ fPhase = phase;
+ }
+
+ // called in very rare cases where angles are sorted incorrectly -- signfies op will fail
+ void setWindingFailed() {
+ fWindingFailed = true;
+ }
+
+ bool windingFailed() const {
+ return fWindingFailed;
+ }
+
+private:
+ SkChunkAlloc* fAllocator;
+ SkOpCoincidence* fCoincidence;
+ SkOpContourHead* fContourHead;
+ int fNested;
+ bool fAllocatedOpSpan;
+ bool fWindingFailed;
+ SkOpPhase fPhase;
+#ifdef SK_DEBUG
+ const char* fDebugTestName;
+ void* fDebugReporter;
+ int fAngleID;
+ int fCoinID;
+ int fContourID;
+ int fPtTID;
+ int fSegmentID;
+ int fSpanID;
+ bool fDebugSkipAssert;
+#endif
+#if DEBUG_T_SECT_LOOP_COUNT
+ int fDebugLoopCount[3];
+ SkPath::Verb fDebugWorstVerb[6];
+ SkPoint fDebugWorstPts[24];
+ float fDebugWorstWeight[6];
+#endif
+#if DEBUG_COIN
+ SkPathOpsDebug::CoinDict fCoinChangedDict;
+ SkPathOpsDebug::CoinDict fCoinVisitedDict;
+ SkPathOpsDebug::CoinDictEntry fCoinDictEntry;
+ const char* fPreviousFuncName;
+#endif
+#if DEBUG_COINCIDENCE
+ bool fDebugCheckHealth;
+#endif
+};
+
+#ifdef SK_DEBUG
+#if DEBUG_COINCIDENCE
+#define SkOPASSERT(cond) SkASSERT((this->globalState() && \
+ (this->globalState()->debugCheckHealth() || \
+ this->globalState()->debugSkipAssert())) || (cond))
+#else
+#define SkOPASSERT(cond) SkASSERT((this->globalState() && \
+ this->globalState()->debugSkipAssert()) || (cond))
+#endif
+#define SkOPOBJASSERT(obj, cond) SkASSERT((obj->debugGlobalState() && \
+ obj->debugGlobalState()->debugSkipAssert()) || (cond))
+#else
+#define SkOPASSERT(cond)
+#define SkOPOBJASSERT(obj, cond)
+#endif
+
+// Use Almost Equal when comparing coordinates. Use epsilon to compare T values.
+bool AlmostEqualUlps(float a, float b);
+inline bool AlmostEqualUlps(double a, double b) {
+ return AlmostEqualUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool AlmostEqualUlpsNoNormalCheck(float a, float b);
+inline bool AlmostEqualUlpsNoNormalCheck(double a, double b) {
+ return AlmostEqualUlpsNoNormalCheck(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool AlmostEqualUlps_Pin(float a, float b);
+inline bool AlmostEqualUlps_Pin(double a, double b) {
+ return AlmostEqualUlps_Pin(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+// Use Almost Dequal when comparing should not special case denormalized values.
+bool AlmostDequalUlps(float a, float b);
+bool AlmostDequalUlps(double a, double b);
+
+bool NotAlmostEqualUlps(float a, float b);
+inline bool NotAlmostEqualUlps(double a, double b) {
+ return NotAlmostEqualUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool NotAlmostEqualUlps_Pin(float a, float b);
+inline bool NotAlmostEqualUlps_Pin(double a, double b) {
+ return NotAlmostEqualUlps_Pin(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool NotAlmostDequalUlps(float a, float b);
+inline bool NotAlmostDequalUlps(double a, double b) {
+ return NotAlmostDequalUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+// Use Almost Bequal when comparing coordinates in conjunction with between.
+bool AlmostBequalUlps(float a, float b);
+inline bool AlmostBequalUlps(double a, double b) {
+ return AlmostBequalUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool AlmostPequalUlps(float a, float b);
+inline bool AlmostPequalUlps(double a, double b) {
+ return AlmostPequalUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool RoughlyEqualUlps(float a, float b);
+inline bool RoughlyEqualUlps(double a, double b) {
+ return RoughlyEqualUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool AlmostLessUlps(float a, float b);
+inline bool AlmostLessUlps(double a, double b) {
+ return AlmostLessUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool AlmostLessOrEqualUlps(float a, float b);
+inline bool AlmostLessOrEqualUlps(double a, double b) {
+ return AlmostLessOrEqualUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool AlmostBetweenUlps(float a, float b, float c);
+inline bool AlmostBetweenUlps(double a, double b, double c) {
+ return AlmostBetweenUlps(SkDoubleToScalar(a), SkDoubleToScalar(b), SkDoubleToScalar(c));
+}
+
+int UlpsDistance(float a, float b);
+inline int UlpsDistance(double a, double b) {
+ return UlpsDistance(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+// FLT_EPSILON == 1.19209290E-07 == 1 / (2 ^ 23)
+// DBL_EPSILON == 2.22045e-16
+const double FLT_EPSILON_CUBED = FLT_EPSILON * FLT_EPSILON * FLT_EPSILON;
+const double FLT_EPSILON_HALF = FLT_EPSILON / 2;
+const double FLT_EPSILON_DOUBLE = FLT_EPSILON * 2;
+const double FLT_EPSILON_ORDERABLE_ERR = FLT_EPSILON * 16;
+const double FLT_EPSILON_SQUARED = FLT_EPSILON * FLT_EPSILON;
+const double FLT_EPSILON_SQRT = sqrt(FLT_EPSILON);
+const double FLT_EPSILON_INVERSE = 1 / FLT_EPSILON;
+const double DBL_EPSILON_ERR = DBL_EPSILON * 4; // FIXME: tune -- allow a few bits of error
+const double DBL_EPSILON_SUBDIVIDE_ERR = DBL_EPSILON * 16;
+const double ROUGH_EPSILON = FLT_EPSILON * 64;
+const double MORE_ROUGH_EPSILON = FLT_EPSILON * 256;
+const double WAY_ROUGH_EPSILON = FLT_EPSILON * 2048;
+const double BUMP_EPSILON = FLT_EPSILON * 4096;
+
+const SkScalar INVERSE_NUMBER_RANGE = FLT_EPSILON_ORDERABLE_ERR;
+
+inline bool zero_or_one(double x) {
+ return x == 0 || x == 1;
+}
+
+inline bool approximately_zero(double x) {
+ return fabs(x) < FLT_EPSILON;
+}
+
+inline bool precisely_zero(double x) {
+ return fabs(x) < DBL_EPSILON_ERR;
+}
+
+inline bool precisely_subdivide_zero(double x) {
+ return fabs(x) < DBL_EPSILON_SUBDIVIDE_ERR;
+}
+
+inline bool approximately_zero(float x) {
+ return fabs(x) < FLT_EPSILON;
+}
+
+inline bool approximately_zero_cubed(double x) {
+ return fabs(x) < FLT_EPSILON_CUBED;
+}
+
+inline bool approximately_zero_half(double x) {
+ return fabs(x) < FLT_EPSILON_HALF;
+}
+
+inline bool approximately_zero_double(double x) {
+ return fabs(x) < FLT_EPSILON_DOUBLE;
+}
+
+inline bool approximately_zero_orderable(double x) {
+ return fabs(x) < FLT_EPSILON_ORDERABLE_ERR;
+}
+
+inline bool approximately_zero_squared(double x) {
+ return fabs(x) < FLT_EPSILON_SQUARED;
+}
+
+inline bool approximately_zero_sqrt(double x) {
+ return fabs(x) < FLT_EPSILON_SQRT;
+}
+
+inline bool roughly_zero(double x) {
+ return fabs(x) < ROUGH_EPSILON;
+}
+
+inline bool approximately_zero_inverse(double x) {
+ return fabs(x) > FLT_EPSILON_INVERSE;
+}
+
+inline bool approximately_zero_when_compared_to(double x, double y) {
+ return x == 0 || fabs(x) < fabs(y * FLT_EPSILON);
+}
+
+inline bool precisely_zero_when_compared_to(double x, double y) {
+ return x == 0 || fabs(x) < fabs(y * DBL_EPSILON);
+}
+
+inline bool roughly_zero_when_compared_to(double x, double y) {
+ return x == 0 || fabs(x) < fabs(y * ROUGH_EPSILON);
+}
+
+// Use this for comparing Ts in the range of 0 to 1. For general numbers (larger and smaller) use
+// AlmostEqualUlps instead.
+inline bool approximately_equal(double x, double y) {
+ return approximately_zero(x - y);
+}
+
+inline bool precisely_equal(double x, double y) {
+ return precisely_zero(x - y);
+}
+
+inline bool precisely_subdivide_equal(double x, double y) {
+ return precisely_subdivide_zero(x - y);
+}
+
+inline bool approximately_equal_half(double x, double y) {
+ return approximately_zero_half(x - y);
+}
+
+inline bool approximately_equal_double(double x, double y) {
+ return approximately_zero_double(x - y);
+}
+
+inline bool approximately_equal_orderable(double x, double y) {
+ return approximately_zero_orderable(x - y);
+}
+
+inline bool approximately_equal_squared(double x, double y) {
+ return approximately_equal(x, y);
+}
+
+inline bool approximately_greater(double x, double y) {
+ return x - FLT_EPSILON >= y;
+}
+
+inline bool approximately_greater_double(double x, double y) {
+ return x - FLT_EPSILON_DOUBLE >= y;
+}
+
+inline bool approximately_greater_orderable(double x, double y) {
+ return x - FLT_EPSILON_ORDERABLE_ERR >= y;
+}
+
+inline bool approximately_greater_or_equal(double x, double y) {
+ return x + FLT_EPSILON > y;
+}
+
+inline bool approximately_greater_or_equal_double(double x, double y) {
+ return x + FLT_EPSILON_DOUBLE > y;
+}
+
+inline bool approximately_greater_or_equal_orderable(double x, double y) {
+ return x + FLT_EPSILON_ORDERABLE_ERR > y;
+}
+
+inline bool approximately_lesser(double x, double y) {
+ return x + FLT_EPSILON <= y;
+}
+
+inline bool approximately_lesser_double(double x, double y) {
+ return x + FLT_EPSILON_DOUBLE <= y;
+}
+
+inline bool approximately_lesser_orderable(double x, double y) {
+ return x + FLT_EPSILON_ORDERABLE_ERR <= y;
+}
+
+inline bool approximately_lesser_or_equal(double x, double y) {
+ return x - FLT_EPSILON < y;
+}
+
+inline bool approximately_lesser_or_equal_double(double x, double y) {
+ return x - FLT_EPSILON_DOUBLE < y;
+}
+
+inline bool approximately_lesser_or_equal_orderable(double x, double y) {
+ return x - FLT_EPSILON_ORDERABLE_ERR < y;
+}
+
+inline bool approximately_greater_than_one(double x) {
+ return x > 1 - FLT_EPSILON;
+}
+
+inline bool precisely_greater_than_one(double x) {
+ return x > 1 - DBL_EPSILON_ERR;
+}
+
+inline bool approximately_less_than_zero(double x) {
+ return x < FLT_EPSILON;
+}
+
+inline bool precisely_less_than_zero(double x) {
+ return x < DBL_EPSILON_ERR;
+}
+
+inline bool approximately_negative(double x) {
+ return x < FLT_EPSILON;
+}
+
+inline bool approximately_negative_orderable(double x) {
+ return x < FLT_EPSILON_ORDERABLE_ERR;
+}
+
+inline bool precisely_negative(double x) {
+ return x < DBL_EPSILON_ERR;
+}
+
+inline bool approximately_one_or_less(double x) {
+ return x < 1 + FLT_EPSILON;
+}
+
+inline bool approximately_one_or_less_double(double x) {
+ return x < 1 + FLT_EPSILON_DOUBLE;
+}
+
+inline bool approximately_positive(double x) {
+ return x > -FLT_EPSILON;
+}
+
+inline bool approximately_positive_squared(double x) {
+ return x > -(FLT_EPSILON_SQUARED);
+}
+
+inline bool approximately_zero_or_more(double x) {
+ return x > -FLT_EPSILON;
+}
+
+inline bool approximately_zero_or_more_double(double x) {
+ return x > -FLT_EPSILON_DOUBLE;
+}
+
+inline bool approximately_between_orderable(double a, double b, double c) {
+ return a <= c
+ ? approximately_negative_orderable(a - b) && approximately_negative_orderable(b - c)
+ : approximately_negative_orderable(b - a) && approximately_negative_orderable(c - b);
+}
+
+inline bool approximately_between(double a, double b, double c) {
+ return a <= c ? approximately_negative(a - b) && approximately_negative(b - c)
+ : approximately_negative(b - a) && approximately_negative(c - b);
+}
+
+inline bool precisely_between(double a, double b, double c) {
+ return a <= c ? precisely_negative(a - b) && precisely_negative(b - c)
+ : precisely_negative(b - a) && precisely_negative(c - b);
+}
+
+// returns true if (a <= b <= c) || (a >= b >= c)
+inline bool between(double a, double b, double c) {
+ SkASSERT(((a <= b && b <= c) || (a >= b && b >= c)) == ((a - b) * (c - b) <= 0)
+ || (precisely_zero(a) && precisely_zero(b) && precisely_zero(c)));
+ return (a - b) * (c - b) <= 0;
+}
+
+inline bool roughly_equal(double x, double y) {
+ return fabs(x - y) < ROUGH_EPSILON;
+}
+
+inline bool roughly_negative(double x) {
+ return x < ROUGH_EPSILON;
+}
+
+inline bool roughly_between(double a, double b, double c) {
+ return a <= c ? roughly_negative(a - b) && roughly_negative(b - c)
+ : roughly_negative(b - a) && roughly_negative(c - b);
+}
+
+inline bool more_roughly_equal(double x, double y) {
+ return fabs(x - y) < MORE_ROUGH_EPSILON;
+}
+
+struct SkDPoint;
+struct SkDVector;
+struct SkDLine;
+struct SkDQuad;
+struct SkDConic;
+struct SkDCubic;
+struct SkDRect;
+
+inline SkPath::Verb SkPathOpsPointsToVerb(int points) {
+ int verb = (1 << points) >> 1;
+#ifdef SK_DEBUG
+ switch (points) {
+ case 0: SkASSERT(SkPath::kMove_Verb == verb); break;
+ case 1: SkASSERT(SkPath::kLine_Verb == verb); break;
+ case 2: SkASSERT(SkPath::kQuad_Verb == verb); break;
+ case 3: SkASSERT(SkPath::kCubic_Verb == verb); break;
+ default: SkDEBUGFAIL("should not be here");
+ }
+#endif
+ return (SkPath::Verb)verb;
+}
+
+inline int SkPathOpsVerbToPoints(SkPath::Verb verb) {
+ int points = (int) verb - (((int) verb + 1) >> 2);
+#ifdef SK_DEBUG
+ switch (verb) {
+ case SkPath::kLine_Verb: SkASSERT(1 == points); break;
+ case SkPath::kQuad_Verb: SkASSERT(2 == points); break;
+ case SkPath::kConic_Verb: SkASSERT(2 == points); break;
+ case SkPath::kCubic_Verb: SkASSERT(3 == points); break;
+ default: SkDEBUGFAIL("should not get here");
+ }
+#endif
+ return points;
+}
+
+inline double SkDInterp(double A, double B, double t) {
+ return A + (B - A) * t;
+}
+
+double SkDCubeRoot(double x);
+
+/* Returns -1 if negative, 0 if zero, 1 if positive
+*/
+inline int SkDSign(double x) {
+ return (x > 0) - (x < 0);
+}
+
+/* Returns 0 if negative, 1 if zero, 2 if positive
+*/
+inline int SKDSide(double x) {
+ return (x > 0) + (x >= 0);
+}
+
+/* Returns 1 if negative, 2 if zero, 4 if positive
+*/
+inline int SkDSideBit(double x) {
+ return 1 << SKDSide(x);
+}
+
+inline double SkPinT(double t) {
+ return precisely_less_than_zero(t) ? 0 : precisely_greater_than_one(t) ? 1 : t;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsWinding.cpp b/gfx/skia/skia/src/pathops/SkPathOpsWinding.cpp
new file mode 100644
index 000000000..35cabcf62
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsWinding.cpp
@@ -0,0 +1,416 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// given a prospective edge, compute its initial winding by projecting a ray
+// if the ray hits another edge
+ // if the edge doesn't have a winding yet, hop up to that edge and start over
+ // concern : check for hops forming a loop
+ // if the edge is unsortable, or
+ // the intersection is nearly at the ends, or
+ // the tangent at the intersection is nearly coincident to the ray,
+ // choose a different ray and try again
+ // concern : if it is unable to succeed after N tries, try another edge? direction?
+// if no edge is hit, compute the winding directly
+
+// given the top span, project the most perpendicular ray and look for intersections
+ // let's try up and then down. What the hey
+
+// bestXY is initialized by caller with basePt
+
+#include "SkOpContour.h"
+#include "SkOpSegment.h"
+#include "SkPathOpsCurve.h"
+
+enum class SkOpRayDir {
+ kLeft,
+ kTop,
+ kRight,
+ kBottom,
+};
+
+#if DEBUG_WINDING
+const char* gDebugRayDirName[] = {
+ "kLeft",
+ "kTop",
+ "kRight",
+ "kBottom"
+};
+#endif
+
+static int xy_index(SkOpRayDir dir) {
+ return static_cast<int>(dir) & 1;
+}
+
+static SkScalar pt_xy(const SkPoint& pt, SkOpRayDir dir) {
+ return (&pt.fX)[xy_index(dir)];
+}
+
+static SkScalar pt_yx(const SkPoint& pt, SkOpRayDir dir) {
+ return (&pt.fX)[!xy_index(dir)];
+}
+
+static double pt_dxdy(const SkDVector& v, SkOpRayDir dir) {
+ return (&v.fX)[xy_index(dir)];
+}
+
+static double pt_dydx(const SkDVector& v, SkOpRayDir dir) {
+ return (&v.fX)[!xy_index(dir)];
+}
+
+static SkScalar rect_side(const SkRect& r, SkOpRayDir dir) {
+ return (&r.fLeft)[static_cast<int>(dir)];
+}
+
+static bool sideways_overlap(const SkRect& rect, const SkPoint& pt, SkOpRayDir dir) {
+ int i = !xy_index(dir);
+ return approximately_between((&rect.fLeft)[i], (&pt.fX)[i], (&rect.fRight)[i]);
+}
+
+static bool less_than(SkOpRayDir dir) {
+ return static_cast<bool>((static_cast<int>(dir) & 2) == 0);
+}
+
+static bool ccw_dxdy(const SkDVector& v, SkOpRayDir dir) {
+ bool vPartPos = pt_dydx(v, dir) > 0;
+ bool leftBottom = ((static_cast<int>(dir) + 1) & 2) != 0;
+ return vPartPos == leftBottom;
+}
+
+struct SkOpRayHit {
+ SkOpRayDir makeTestBase(SkOpSpan* span, double t) {
+ fNext = nullptr;
+ fSpan = span;
+ fT = span->t() * (1 - t) + span->next()->t() * t;
+ SkOpSegment* segment = span->segment();
+ fSlope = segment->dSlopeAtT(fT);
+ fPt = segment->ptAtT(fT);
+ fValid = true;
+ return fabs(fSlope.fX) < fabs(fSlope.fY) ? SkOpRayDir::kLeft : SkOpRayDir::kTop;
+ }
+
+ SkOpRayHit* fNext;
+ SkOpSpan* fSpan;
+ SkPoint fPt;
+ double fT;
+ SkDVector fSlope;
+ bool fValid;
+};
+
+void SkOpContour::rayCheck(const SkOpRayHit& base, SkOpRayDir dir, SkOpRayHit** hits,
+ SkChunkAlloc* allocator) {
+ // if the bounds extreme is outside the best, we're done
+ SkScalar baseXY = pt_xy(base.fPt, dir);
+ SkScalar boundsXY = rect_side(fBounds, dir);
+ bool checkLessThan = less_than(dir);
+ if (!approximately_equal(baseXY, boundsXY) && (baseXY < boundsXY) == checkLessThan) {
+ return;
+ }
+ SkOpSegment* testSegment = &fHead;
+ do {
+ testSegment->rayCheck(base, dir, hits, allocator);
+ } while ((testSegment = testSegment->next()));
+}
+
+void SkOpSegment::rayCheck(const SkOpRayHit& base, SkOpRayDir dir, SkOpRayHit** hits,
+ SkChunkAlloc* allocator) {
+ if (!sideways_overlap(fBounds, base.fPt, dir)) {
+ return;
+ }
+ SkScalar baseXY = pt_xy(base.fPt, dir);
+ SkScalar boundsXY = rect_side(fBounds, dir);
+ bool checkLessThan = less_than(dir);
+ if (!approximately_equal(baseXY, boundsXY) && (baseXY < boundsXY) == checkLessThan) {
+ return;
+ }
+ double tVals[3];
+ SkScalar baseYX = pt_yx(base.fPt, dir);
+ int roots = (*CurveIntercept[fVerb * 2 + xy_index(dir)])(fPts, fWeight, baseYX, tVals);
+ for (int index = 0; index < roots; ++index) {
+ double t = tVals[index];
+ if (base.fSpan->segment() == this && approximately_equal(base.fT, t)) {
+ continue;
+ }
+ SkDVector slope;
+ SkPoint pt;
+ SkDEBUGCODE(sk_bzero(&slope, sizeof(slope)));
+ bool valid = false;
+ if (approximately_zero(t)) {
+ pt = fPts[0];
+ } else if (approximately_equal(t, 1)) {
+ pt = fPts[SkPathOpsVerbToPoints(fVerb)];
+ } else {
+ SkASSERT(between(0, t, 1));
+ pt = this->ptAtT(t);
+ if (SkDPoint::ApproximatelyEqual(pt, base.fPt)) {
+ if (base.fSpan->segment() == this) {
+ continue;
+ }
+ } else {
+ SkScalar ptXY = pt_xy(pt, dir);
+ if (!approximately_equal(baseXY, ptXY) && (baseXY < ptXY) == checkLessThan) {
+ continue;
+ }
+ slope = this->dSlopeAtT(t);
+ if (fVerb == SkPath::kCubic_Verb && base.fSpan->segment() == this
+ && roughly_equal(base.fT, t)
+ && SkDPoint::RoughlyEqual(pt, base.fPt)) {
+ #if DEBUG_WINDING
+ SkDebugf("%s (rarely expect this)\n", __FUNCTION__);
+ #endif
+ continue;
+ }
+ if (fabs(pt_dydx(slope, dir) * 10000) > fabs(pt_dxdy(slope, dir))) {
+ valid = true;
+ }
+ }
+ }
+ SkOpSpan* span = this->windingSpanAtT(t);
+ if (!span) {
+ valid = false;
+ } else if (!span->windValue() && !span->oppValue()) {
+ continue;
+ }
+ SkOpRayHit* newHit = SkOpTAllocator<SkOpRayHit>::Allocate(allocator);
+ newHit->fNext = *hits;
+ newHit->fPt = pt;
+ newHit->fSlope = slope;
+ newHit->fSpan = span;
+ newHit->fT = t;
+ newHit->fValid = valid;
+ *hits = newHit;
+ }
+}
+
+SkOpSpan* SkOpSegment::windingSpanAtT(double tHit) {
+ SkOpSpan* span = &fHead;
+ SkOpSpanBase* next;
+ do {
+ next = span->next();
+ if (approximately_equal(tHit, next->t())) {
+ return nullptr;
+ }
+ if (tHit < next->t()) {
+ return span;
+ }
+ } while (!next->final() && (span = next->upCast()));
+ return nullptr;
+}
+
+static bool hit_compare_x(const SkOpRayHit* a, const SkOpRayHit* b) {
+ return a->fPt.fX < b->fPt.fX;
+}
+
+static bool reverse_hit_compare_x(const SkOpRayHit* a, const SkOpRayHit* b) {
+ return b->fPt.fX < a->fPt.fX;
+}
+
+static bool hit_compare_y(const SkOpRayHit* a, const SkOpRayHit* b) {
+ return a->fPt.fY < b->fPt.fY;
+}
+
+static bool reverse_hit_compare_y(const SkOpRayHit* a, const SkOpRayHit* b) {
+ return b->fPt.fY < a->fPt.fY;
+}
+
+static double get_t_guess(int tTry, int* dirOffset) {
+ double t = 0.5;
+ *dirOffset = tTry & 1;
+ int tBase = tTry >> 1;
+ int tBits = 0;
+ while (tTry >>= 1) {
+ t /= 2;
+ ++tBits;
+ }
+ if (tBits) {
+ int tIndex = (tBase - 1) & ((1 << tBits) - 1);
+ t += t * 2 * tIndex;
+ }
+ return t;
+}
+
+bool SkOpSpan::sortableTop(SkOpContour* contourHead) {
+ SkChunkAlloc allocator(1024);
+ int dirOffset;
+ double t = get_t_guess(fTopTTry++, &dirOffset);
+ SkOpRayHit hitBase;
+ SkOpRayDir dir = hitBase.makeTestBase(this, t);
+ if (hitBase.fSlope.fX == 0 && hitBase.fSlope.fY == 0) {
+ return false;
+ }
+ SkOpRayHit* hitHead = &hitBase;
+ dir = static_cast<SkOpRayDir>(static_cast<int>(dir) + dirOffset);
+ if (hitBase.fSpan && hitBase.fSpan->segment()->verb() > SkPath::kLine_Verb
+ && !pt_yx(hitBase.fSlope.asSkVector(), dir)) {
+ return false;
+ }
+ SkOpContour* contour = contourHead;
+ do {
+ contour->rayCheck(hitBase, dir, &hitHead, &allocator);
+ } while ((contour = contour->next()));
+ // sort hits
+ SkSTArray<1, SkOpRayHit*> sorted;
+ SkOpRayHit* hit = hitHead;
+ while (hit) {
+ sorted.push_back(hit);
+ hit = hit->fNext;
+ }
+ int count = sorted.count();
+ SkTQSort(sorted.begin(), sorted.end() - 1, xy_index(dir)
+ ? less_than(dir) ? hit_compare_y : reverse_hit_compare_y
+ : less_than(dir) ? hit_compare_x : reverse_hit_compare_x);
+ // verify windings
+#if DEBUG_WINDING
+ SkDebugf("%s dir=%s seg=%d t=%1.9g pt=(%1.9g,%1.9g)\n", __FUNCTION__,
+ gDebugRayDirName[static_cast<int>(dir)], hitBase.fSpan->segment()->debugID(),
+ hitBase.fT, hitBase.fPt.fX, hitBase.fPt.fY);
+ for (int index = 0; index < count; ++index) {
+ hit = sorted[index];
+ SkOpSpan* span = hit->fSpan;
+ SkOpSegment* hitSegment = span ? span->segment() : nullptr;
+ bool operand = span ? hitSegment->operand() : false;
+ bool ccw = ccw_dxdy(hit->fSlope, dir);
+ SkDebugf("%s [%d] valid=%d operand=%d span=%d ccw=%d ", __FUNCTION__, index,
+ hit->fValid, operand, span ? span->debugID() : -1, ccw);
+ if (span) {
+ hitSegment->dumpPtsInner();
+ }
+ SkDebugf(" t=%1.9g pt=(%1.9g,%1.9g) slope=(%1.9g,%1.9g)\n", hit->fT,
+ hit->fPt.fX, hit->fPt.fY, hit->fSlope.fX, hit->fSlope.fY);
+ }
+#endif
+ const SkPoint* last = nullptr;
+ int wind = 0;
+ int oppWind = 0;
+ for (int index = 0; index < count; ++index) {
+ hit = sorted[index];
+ if (!hit->fValid) {
+ return false;
+ }
+ bool ccw = ccw_dxdy(hit->fSlope, dir);
+// SkASSERT(!approximately_zero(hit->fT) || !hit->fValid);
+ SkOpSpan* span = hit->fSpan;
+ if (!span) {
+ return false;
+ }
+ SkOpSegment* hitSegment = span->segment();
+ if (span->windValue() == 0 && span->oppValue() == 0) {
+ continue;
+ }
+ if (last && SkDPoint::ApproximatelyEqual(*last, hit->fPt)) {
+ return false;
+ }
+ if (index < count - 1) {
+ const SkPoint& next = sorted[index + 1]->fPt;
+ if (SkDPoint::ApproximatelyEqual(next, hit->fPt)) {
+ return false;
+ }
+ }
+ bool operand = hitSegment->operand();
+ if (operand) {
+ SkTSwap(wind, oppWind);
+ }
+ int lastWind = wind;
+ int lastOpp = oppWind;
+ int windValue = ccw ? -span->windValue() : span->windValue();
+ int oppValue = ccw ? -span->oppValue() : span->oppValue();
+ wind += windValue;
+ oppWind += oppValue;
+ bool sumSet = false;
+ int spanSum = span->windSum();
+ int windSum = SkOpSegment::UseInnerWinding(lastWind, wind) ? wind : lastWind;
+ if (spanSum == SK_MinS32) {
+ span->setWindSum(windSum);
+ sumSet = true;
+ } else {
+ // the need for this condition suggests that UseInnerWinding is flawed
+ // happened when last = 1 wind = -1
+#if 0
+ SkASSERT((hitSegment->isXor() ? (windSum & 1) == (spanSum & 1) : windSum == spanSum)
+ || (abs(wind) == abs(lastWind)
+ && (windSum ^ wind ^ lastWind) == spanSum));
+#endif
+ }
+ int oSpanSum = span->oppSum();
+ int oppSum = SkOpSegment::UseInnerWinding(lastOpp, oppWind) ? oppWind : lastOpp;
+ if (oSpanSum == SK_MinS32) {
+ span->setOppSum(oppSum);
+ } else {
+#if 0
+ SkASSERT(hitSegment->oppXor() ? (oppSum & 1) == (oSpanSum & 1) : oppSum == oSpanSum
+ || (abs(oppWind) == abs(lastOpp)
+ && (oppSum ^ oppWind ^ lastOpp) == oSpanSum));
+#endif
+ }
+ if (sumSet) {
+ if (this->globalState()->phase() == SkOpPhase::kFixWinding) {
+ hitSegment->contour()->setCcw(ccw);
+ } else {
+ (void) hitSegment->markAndChaseWinding(span, span->next(), windSum, oppSum, nullptr);
+ (void) hitSegment->markAndChaseWinding(span->next(), span, windSum, oppSum, nullptr);
+ }
+ }
+ if (operand) {
+ SkTSwap(wind, oppWind);
+ }
+ last = &hit->fPt;
+ this->globalState()->bumpNested();
+ }
+ return true;
+}
+
+SkOpSpan* SkOpSegment::findSortableTop(SkOpContour* contourHead) {
+ SkOpSpan* span = &fHead;
+ SkOpSpanBase* next;
+ do {
+ next = span->next();
+ if (span->done()) {
+ continue;
+ }
+ if (span->windSum() != SK_MinS32) {
+ return span;
+ }
+ if (span->sortableTop(contourHead)) {
+ return span;
+ }
+ } while (!next->final() && (span = next->upCast()));
+ return nullptr;
+}
+
+SkOpSpan* SkOpContour::findSortableTop(SkOpContour* contourHead) {
+ SkOpSegment* testSegment = &fHead;
+ bool allDone = true;
+ do {
+ if (testSegment->done()) {
+ continue;
+ }
+ allDone = false;
+ SkOpSpan* result = testSegment->findSortableTop(contourHead);
+ if (result) {
+ return result;
+ }
+ } while ((testSegment = testSegment->next()));
+ if (allDone) {
+ fDone = true;
+ }
+ return nullptr;
+}
+
+SkOpSpan* FindSortableTop(SkOpContourHead* contourHead) {
+ for (int index = 0; index < SkOpGlobalState::kMaxWindingTries; ++index) {
+ SkOpContour* contour = contourHead;
+ do {
+ if (contour->done()) {
+ continue;
+ }
+ SkOpSpan* result = contour->findSortableTop(contourHead);
+ if (result) {
+ return result;
+ }
+ } while ((contour = contour->next()));
+ }
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathWriter.cpp b/gfx/skia/skia/src/pathops/SkPathWriter.cpp
new file mode 100644
index 000000000..1f6dddd13
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathWriter.cpp
@@ -0,0 +1,362 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkOpSpan.h"
+#include "SkPathOpsPoint.h"
+#include "SkPathWriter.h"
+#include "SkTSort.h"
+
+// wrap path to keep track of whether the contour is initialized and non-empty
+SkPathWriter::SkPathWriter(SkPath& path)
+ : fPathPtr(&path)
+{
+ init();
+}
+
+void SkPathWriter::close() {
+ if (fCurrent.isEmpty()) {
+ return;
+ }
+ SkASSERT(this->isClosed());
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("path.close();\n");
+#endif
+ fCurrent.close();
+ fPathPtr->addPath(fCurrent);
+ fCurrent.reset();
+ init();
+}
+
+void SkPathWriter::conicTo(const SkPoint& pt1, const SkOpPtT* pt2, SkScalar weight) {
+ this->update(pt2);
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("path.conicTo(%1.9g,%1.9g, %1.9g,%1.9g, %1.9g);\n",
+ pt1.fX, pt1.fY, pt2->fPt.fX, pt2->fPt.fY, weight);
+#endif
+ fCurrent.conicTo(pt1, pt2->fPt, weight);
+}
+
+void SkPathWriter::cubicTo(const SkPoint& pt1, const SkPoint& pt2, const SkOpPtT* pt3) {
+ this->update(pt3);
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("path.cubicTo(%1.9g,%1.9g, %1.9g,%1.9g, %1.9g,%1.9g);\n",
+ pt1.fX, pt1.fY, pt2.fX, pt2.fY, pt3->fPt.fX, pt3->fPt.fY);
+#endif
+ fCurrent.cubicTo(pt1, pt2, pt3->fPt);
+}
+
+void SkPathWriter::deferredLine(const SkOpPtT* pt) {
+ SkASSERT(fFirstPtT);
+ SkASSERT(fDefer[0]);
+ if (fDefer[0] == pt) {
+ // FIXME: why we're adding a degenerate line? Caller should have preflighted this.
+ return;
+ }
+ if (pt->contains(fDefer[0])) {
+ // FIXME: why we're adding a degenerate line?
+ return;
+ }
+ SkASSERT(!this->matchedLast(pt));
+ if (fDefer[1] && this->changedSlopes(pt)) {
+ this->lineTo();
+ fDefer[0] = fDefer[1];
+ }
+ fDefer[1] = pt;
+}
+
+void SkPathWriter::deferredMove(const SkOpPtT* pt) {
+ if (!fDefer[1]) {
+ fFirstPtT = fDefer[0] = pt;
+ return;
+ }
+ SkASSERT(fDefer[0]);
+ if (!this->matchedLast(pt)) {
+ this->finishContour();
+ fFirstPtT = fDefer[0] = pt;
+ }
+}
+
+void SkPathWriter::finishContour() {
+ if (!this->matchedLast(fDefer[0])) {
+ if (!fDefer[1]) {
+ return;
+ }
+ this->lineTo();
+ }
+ if (fCurrent.isEmpty()) {
+ return;
+ }
+ if (this->isClosed()) {
+ this->close();
+ } else {
+ SkASSERT(fDefer[1]);
+ fEndPtTs.push(fFirstPtT);
+ fEndPtTs.push(fDefer[1]);
+ fPartials.push_back(fCurrent);
+ this->init();
+ }
+}
+
+void SkPathWriter::init() {
+ fCurrent.reset();
+ fFirstPtT = fDefer[0] = fDefer[1] = nullptr;
+}
+
+bool SkPathWriter::isClosed() const {
+ return this->matchedLast(fFirstPtT);
+}
+
+void SkPathWriter::lineTo() {
+ if (fCurrent.isEmpty()) {
+ this->moveTo();
+ }
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("path.lineTo(%1.9g,%1.9g);\n", fDefer[1]->fPt.fX, fDefer[1]->fPt.fY);
+#endif
+ fCurrent.lineTo(fDefer[1]->fPt);
+}
+
+bool SkPathWriter::matchedLast(const SkOpPtT* test) const {
+ if (test == fDefer[1]) {
+ return true;
+ }
+ if (!test) {
+ return false;
+ }
+ if (!fDefer[1]) {
+ return false;
+ }
+ return test->contains(fDefer[1]);
+}
+
+void SkPathWriter::moveTo() {
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("path.moveTo(%1.9g,%1.9g);\n", fFirstPtT->fPt.fX, fFirstPtT->fPt.fY);
+#endif
+ fCurrent.moveTo(fFirstPtT->fPt);
+}
+
+void SkPathWriter::quadTo(const SkPoint& pt1, const SkOpPtT* pt2) {
+ this->update(pt2);
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("path.quadTo(%1.9g,%1.9g, %1.9g,%1.9g);\n",
+ pt1.fX, pt1.fY, pt2->fPt.fX, pt2->fPt.fY);
+#endif
+ fCurrent.quadTo(pt1, pt2->fPt);
+}
+
+void SkPathWriter::update(const SkOpPtT* pt) {
+ if (!fDefer[1]) {
+ this->moveTo();
+ } else if (!this->matchedLast(fDefer[0])) {
+ this->lineTo();
+ }
+ fDefer[0] = fDefer[1] = pt; // set both to know that there is not a pending deferred line
+}
+
+bool SkPathWriter::someAssemblyRequired() {
+ this->finishContour();
+ return fEndPtTs.count() > 0;
+}
+
+bool SkPathWriter::changedSlopes(const SkOpPtT* ptT) const {
+ if (matchedLast(fDefer[0])) {
+ return false;
+ }
+ SkVector deferDxdy = fDefer[1]->fPt - fDefer[0]->fPt;
+ SkVector lineDxdy = ptT->fPt - fDefer[1]->fPt;
+ return deferDxdy.fX * lineDxdy.fY != deferDxdy.fY * lineDxdy.fX;
+}
+
+class DistanceLessThan {
+public:
+ DistanceLessThan(double* distances) : fDistances(distances) { }
+ double* fDistances;
+ bool operator()(const int one, const int two) {
+ return fDistances[one] < fDistances[two];
+ }
+};
+
+ /*
+ check start and end of each contour
+ if not the same, record them
+ match them up
+ connect closest
+ reassemble contour pieces into new path
+ */
+void SkPathWriter::assemble() {
+#if DEBUG_SHOW_TEST_NAME
+ SkDebugf("</div>\n");
+#endif
+ if (!this->someAssemblyRequired()) {
+ return;
+ }
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("%s\n", __FUNCTION__);
+#endif
+ SkOpPtT const* const* runs = fEndPtTs.begin(); // starts, ends of partial contours
+ int endCount = fEndPtTs.count(); // all starts and ends
+ SkASSERT(endCount > 0);
+ SkASSERT(endCount == fPartials.count() * 2);
+#if DEBUG_ASSEMBLE
+ for (int index = 0; index < endCount; index += 2) {
+ const SkOpPtT* eStart = runs[index];
+ const SkOpPtT* eEnd = runs[index + 1];
+ SkASSERT(eStart != eEnd);
+ SkASSERT(!eStart->contains(eEnd));
+ SkDebugf("%s contour start=(%1.9g,%1.9g) end=(%1.9g,%1.9g)\n", __FUNCTION__,
+ eStart->fPt.fX, eStart->fPt.fY, eEnd->fPt.fX, eEnd->fPt.fY);
+ }
+#endif
+ SkTDArray<int> sLink, eLink;
+ int linkCount = endCount / 2; // number of partial contours
+ sLink.append(linkCount);
+ eLink.append(linkCount);
+ int rIndex, iIndex;
+ for (rIndex = 0; rIndex < linkCount; ++rIndex) {
+ sLink[rIndex] = eLink[rIndex] = SK_MaxS32;
+ }
+ const int entries = endCount * (endCount - 1) / 2; // folded triangle
+ SkSTArray<8, double, true> distances(entries);
+ SkSTArray<8, int, true> sortedDist(entries);
+ SkSTArray<8, int, true> distLookup(entries);
+ int rRow = 0;
+ int dIndex = 0;
+ for (rIndex = 0; rIndex < endCount - 1; ++rIndex) {
+ const SkOpPtT* oPtT = runs[rIndex];
+ for (iIndex = rIndex + 1; iIndex < endCount; ++iIndex) {
+ const SkOpPtT* iPtT = runs[iIndex];
+ double dx = iPtT->fPt.fX - oPtT->fPt.fX;
+ double dy = iPtT->fPt.fY - oPtT->fPt.fY;
+ double dist = dx * dx + dy * dy;
+ distLookup.push_back(rRow + iIndex);
+ distances.push_back(dist); // oStart distance from iStart
+ sortedDist.push_back(dIndex++);
+ }
+ rRow += endCount;
+ }
+ SkASSERT(dIndex == entries);
+ SkTQSort<int>(sortedDist.begin(), sortedDist.end() - 1, DistanceLessThan(distances.begin()));
+ int remaining = linkCount; // number of start/end pairs
+ for (rIndex = 0; rIndex < entries; ++rIndex) {
+ int pair = sortedDist[rIndex];
+ pair = distLookup[pair];
+ int row = pair / endCount;
+ int col = pair - row * endCount;
+ int ndxOne = row >> 1;
+ bool endOne = row & 1;
+ int* linkOne = endOne ? eLink.begin() : sLink.begin();
+ if (linkOne[ndxOne] != SK_MaxS32) {
+ continue;
+ }
+ int ndxTwo = col >> 1;
+ bool endTwo = col & 1;
+ int* linkTwo = endTwo ? eLink.begin() : sLink.begin();
+ if (linkTwo[ndxTwo] != SK_MaxS32) {
+ continue;
+ }
+ SkASSERT(&linkOne[ndxOne] != &linkTwo[ndxTwo]);
+ bool flip = endOne == endTwo;
+ linkOne[ndxOne] = flip ? ~ndxTwo : ndxTwo;
+ linkTwo[ndxTwo] = flip ? ~ndxOne : ndxOne;
+ if (!--remaining) {
+ break;
+ }
+ }
+ SkASSERT(!remaining);
+#if DEBUG_ASSEMBLE
+ for (rIndex = 0; rIndex < linkCount; ++rIndex) {
+ int s = sLink[rIndex];
+ int e = eLink[rIndex];
+ SkDebugf("%s %c%d <- s%d - e%d -> %c%d\n", __FUNCTION__, s < 0 ? 's' : 'e',
+ s < 0 ? ~s : s, rIndex, rIndex, e < 0 ? 'e' : 's', e < 0 ? ~e : e);
+ }
+#endif
+ rIndex = 0;
+ do {
+ bool forward = true;
+ bool first = true;
+ int sIndex = sLink[rIndex];
+ SkASSERT(sIndex != SK_MaxS32);
+ sLink[rIndex] = SK_MaxS32;
+ int eIndex;
+ if (sIndex < 0) {
+ eIndex = sLink[~sIndex];
+ sLink[~sIndex] = SK_MaxS32;
+ } else {
+ eIndex = eLink[sIndex];
+ eLink[sIndex] = SK_MaxS32;
+ }
+ SkASSERT(eIndex != SK_MaxS32);
+#if DEBUG_ASSEMBLE
+ SkDebugf("%s sIndex=%c%d eIndex=%c%d\n", __FUNCTION__, sIndex < 0 ? 's' : 'e',
+ sIndex < 0 ? ~sIndex : sIndex, eIndex < 0 ? 's' : 'e',
+ eIndex < 0 ? ~eIndex : eIndex);
+#endif
+ do {
+ const SkPath& contour = fPartials[rIndex];
+ if (forward) {
+ fPathPtr->addPath(contour,
+ first ? SkPath::kAppend_AddPathMode : SkPath::kExtend_AddPathMode);
+ } else {
+ SkASSERT(!first);
+ fPathPtr->reverseAddPath(contour);
+ }
+ if (first) {
+ first = false;
+ }
+#if DEBUG_ASSEMBLE
+ SkDebugf("%s rIndex=%d eIndex=%s%d close=%d\n", __FUNCTION__, rIndex,
+ eIndex < 0 ? "~" : "", eIndex < 0 ? ~eIndex : eIndex,
+ sIndex == ((rIndex != eIndex) ^ forward ? eIndex : ~eIndex));
+#endif
+ if (sIndex == ((rIndex != eIndex) ^ forward ? eIndex : ~eIndex)) {
+ fPathPtr->close();
+ break;
+ }
+ if (forward) {
+ eIndex = eLink[rIndex];
+ SkASSERT(eIndex != SK_MaxS32);
+ eLink[rIndex] = SK_MaxS32;
+ if (eIndex >= 0) {
+ SkASSERT(sLink[eIndex] == rIndex);
+ sLink[eIndex] = SK_MaxS32;
+ } else {
+ SkASSERT(eLink[~eIndex] == ~rIndex);
+ eLink[~eIndex] = SK_MaxS32;
+ }
+ } else {
+ eIndex = sLink[rIndex];
+ SkASSERT(eIndex != SK_MaxS32);
+ sLink[rIndex] = SK_MaxS32;
+ if (eIndex >= 0) {
+ SkASSERT(eLink[eIndex] == rIndex);
+ eLink[eIndex] = SK_MaxS32;
+ } else {
+ SkASSERT(sLink[~eIndex] == ~rIndex);
+ sLink[~eIndex] = SK_MaxS32;
+ }
+ }
+ rIndex = eIndex;
+ if (rIndex < 0) {
+ forward ^= 1;
+ rIndex = ~rIndex;
+ }
+ } while (true);
+ for (rIndex = 0; rIndex < linkCount; ++rIndex) {
+ if (sLink[rIndex] != SK_MaxS32) {
+ break;
+ }
+ }
+ } while (rIndex < linkCount);
+#if DEBUG_ASSEMBLE
+ for (rIndex = 0; rIndex < linkCount; ++rIndex) {
+ SkASSERT(sLink[rIndex] == SK_MaxS32);
+ SkASSERT(eLink[rIndex] == SK_MaxS32);
+ }
+#endif
+ return;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathWriter.h b/gfx/skia/skia/src/pathops/SkPathWriter.h
new file mode 100644
index 000000000..bd13c718a
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathWriter.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathWriter_DEFINED
+#define SkPathWriter_DEFINED
+
+#include "SkPath.h"
+#include "SkTArray.h"
+#include "SkTDArray.h"
+
+class SkOpPtT;
+
+// Construct the path one contour at a time.
+// If the contour is closed, copy it to the final output.
+// Otherwise, keep the partial contour for later assembly.
+
+class SkPathWriter {
+public:
+ SkPathWriter(SkPath& path);
+ void assemble();
+ void conicTo(const SkPoint& pt1, const SkOpPtT* pt2, SkScalar weight);
+ void cubicTo(const SkPoint& pt1, const SkPoint& pt2, const SkOpPtT* pt3);
+ void deferredLine(const SkOpPtT* pt);
+ void deferredMove(const SkOpPtT* pt);
+ void finishContour();
+ bool hasMove() const { return !fFirstPtT; }
+ void init();
+ bool isClosed() const;
+ const SkPath* nativePath() const { return fPathPtr; }
+ void quadTo(const SkPoint& pt1, const SkOpPtT* pt2);
+
+private:
+ bool changedSlopes(const SkOpPtT* pt) const;
+ void close();
+ const SkTDArray<const SkOpPtT*>& endPtTs() const { return fEndPtTs; }
+ void lineTo();
+ bool matchedLast(const SkOpPtT*) const;
+ void moveTo();
+ const SkTArray<SkPath>& partials() const { return fPartials; }
+ bool someAssemblyRequired();
+ void update(const SkOpPtT* pt);
+
+ SkPath fCurrent; // contour under construction
+ SkTArray<SkPath> fPartials; // contours with mismatched starts and ends
+ SkTDArray<const SkOpPtT*> fEndPtTs; // possible pt values for partial starts and ends
+ SkPath* fPathPtr; // closed contours are written here
+ const SkOpPtT* fDefer[2]; // [0] deferred move, [1] deferred line
+ const SkOpPtT* fFirstPtT; // first in current contour
+};
+
+#endif /* defined(__PathOps__SkPathWriter__) */
diff --git a/gfx/skia/skia/src/pathops/SkReduceOrder.cpp b/gfx/skia/skia/src/pathops/SkReduceOrder.cpp
new file mode 100644
index 000000000..7f7ea11d3
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkReduceOrder.cpp
@@ -0,0 +1,283 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkGeometry.h"
+#include "SkReduceOrder.h"
+
+int SkReduceOrder::reduce(const SkDLine& line) {
+ fLine[0] = line[0];
+ int different = line[0] != line[1];
+ fLine[1] = line[different];
+ return 1 + different;
+}
+
+static int coincident_line(const SkDQuad& quad, SkDQuad& reduction) {
+ reduction[0] = reduction[1] = quad[0];
+ return 1;
+}
+
+static int reductionLineCount(const SkDQuad& reduction) {
+ return 1 + !reduction[0].approximatelyEqual(reduction[1]);
+}
+
+static int vertical_line(const SkDQuad& quad, SkDQuad& reduction) {
+ reduction[0] = quad[0];
+ reduction[1] = quad[2];
+ return reductionLineCount(reduction);
+}
+
+static int horizontal_line(const SkDQuad& quad, SkDQuad& reduction) {
+ reduction[0] = quad[0];
+ reduction[1] = quad[2];
+ return reductionLineCount(reduction);
+}
+
+static int check_linear(const SkDQuad& quad,
+ int minX, int maxX, int minY, int maxY, SkDQuad& reduction) {
+ if (!quad.isLinear(0, 2)) {
+ return 0;
+ }
+ // four are colinear: return line formed by outside
+ reduction[0] = quad[0];
+ reduction[1] = quad[2];
+ return reductionLineCount(reduction);
+}
+
+// reduce to a quadratic or smaller
+// look for identical points
+// look for all four points in a line
+ // note that three points in a line doesn't simplify a cubic
+// look for approximation with single quadratic
+ // save approximation with multiple quadratics for later
+int SkReduceOrder::reduce(const SkDQuad& quad) {
+ int index, minX, maxX, minY, maxY;
+ int minXSet, minYSet;
+ minX = maxX = minY = maxY = 0;
+ minXSet = minYSet = 0;
+ for (index = 1; index < 3; ++index) {
+ if (quad[minX].fX > quad[index].fX) {
+ minX = index;
+ }
+ if (quad[minY].fY > quad[index].fY) {
+ minY = index;
+ }
+ if (quad[maxX].fX < quad[index].fX) {
+ maxX = index;
+ }
+ if (quad[maxY].fY < quad[index].fY) {
+ maxY = index;
+ }
+ }
+ for (index = 0; index < 3; ++index) {
+ if (AlmostEqualUlps(quad[index].fX, quad[minX].fX)) {
+ minXSet |= 1 << index;
+ }
+ if (AlmostEqualUlps(quad[index].fY, quad[minY].fY)) {
+ minYSet |= 1 << index;
+ }
+ }
+ if ((minXSet & 0x05) == 0x5 && (minYSet & 0x05) == 0x5) { // test for degenerate
+ // this quad starts and ends at the same place, so never contributes
+ // to the fill
+ return coincident_line(quad, fQuad);
+ }
+ if (minXSet == 0x7) { // test for vertical line
+ return vertical_line(quad, fQuad);
+ }
+ if (minYSet == 0x7) { // test for horizontal line
+ return horizontal_line(quad, fQuad);
+ }
+ int result = check_linear(quad, minX, maxX, minY, maxY, fQuad);
+ if (result) {
+ return result;
+ }
+ fQuad = quad;
+ return 3;
+}
+
+////////////////////////////////////////////////////////////////////////////////////
+
+static int coincident_line(const SkDCubic& cubic, SkDCubic& reduction) {
+ reduction[0] = reduction[1] = cubic[0];
+ return 1;
+}
+
+static int reductionLineCount(const SkDCubic& reduction) {
+ return 1 + !reduction[0].approximatelyEqual(reduction[1]);
+}
+
+static int vertical_line(const SkDCubic& cubic, SkDCubic& reduction) {
+ reduction[0] = cubic[0];
+ reduction[1] = cubic[3];
+ return reductionLineCount(reduction);
+}
+
+static int horizontal_line(const SkDCubic& cubic, SkDCubic& reduction) {
+ reduction[0] = cubic[0];
+ reduction[1] = cubic[3];
+ return reductionLineCount(reduction);
+}
+
+// check to see if it is a quadratic or a line
+static int check_quadratic(const SkDCubic& cubic, SkDCubic& reduction) {
+ double dx10 = cubic[1].fX - cubic[0].fX;
+ double dx23 = cubic[2].fX - cubic[3].fX;
+ double midX = cubic[0].fX + dx10 * 3 / 2;
+ double sideAx = midX - cubic[3].fX;
+ double sideBx = dx23 * 3 / 2;
+ if (approximately_zero(sideAx) ? !approximately_equal(sideAx, sideBx)
+ : !AlmostEqualUlps_Pin(sideAx, sideBx)) {
+ return 0;
+ }
+ double dy10 = cubic[1].fY - cubic[0].fY;
+ double dy23 = cubic[2].fY - cubic[3].fY;
+ double midY = cubic[0].fY + dy10 * 3 / 2;
+ double sideAy = midY - cubic[3].fY;
+ double sideBy = dy23 * 3 / 2;
+ if (approximately_zero(sideAy) ? !approximately_equal(sideAy, sideBy)
+ : !AlmostEqualUlps_Pin(sideAy, sideBy)) {
+ return 0;
+ }
+ reduction[0] = cubic[0];
+ reduction[1].fX = midX;
+ reduction[1].fY = midY;
+ reduction[2] = cubic[3];
+ return 3;
+}
+
+static int check_linear(const SkDCubic& cubic,
+ int minX, int maxX, int minY, int maxY, SkDCubic& reduction) {
+ if (!cubic.isLinear(0, 3)) {
+ return 0;
+ }
+ // four are colinear: return line formed by outside
+ reduction[0] = cubic[0];
+ reduction[1] = cubic[3];
+ return reductionLineCount(reduction);
+}
+
+/* food for thought:
+http://objectmix.com/graphics/132906-fast-precision-driven-cubic-quadratic-piecewise-degree-reduction-algos-2-a.html
+
+Given points c1, c2, c3 and c4 of a cubic Bezier, the points of the
+corresponding quadratic Bezier are (given in convex combinations of
+points):
+
+q1 = (11/13)c1 + (3/13)c2 -(3/13)c3 + (2/13)c4
+q2 = -c1 + (3/2)c2 + (3/2)c3 - c4
+q3 = (2/13)c1 - (3/13)c2 + (3/13)c3 + (11/13)c4
+
+Of course, this curve does not interpolate the end-points, but it would
+be interesting to see the behaviour of such a curve in an applet.
+
+--
+Kalle Rutanen
+http://kaba.hilvi.org
+
+*/
+
+// reduce to a quadratic or smaller
+// look for identical points
+// look for all four points in a line
+ // note that three points in a line doesn't simplify a cubic
+// look for approximation with single quadratic
+ // save approximation with multiple quadratics for later
+int SkReduceOrder::reduce(const SkDCubic& cubic, Quadratics allowQuadratics) {
+ int index, minX, maxX, minY, maxY;
+ int minXSet, minYSet;
+ minX = maxX = minY = maxY = 0;
+ minXSet = minYSet = 0;
+ for (index = 1; index < 4; ++index) {
+ if (cubic[minX].fX > cubic[index].fX) {
+ minX = index;
+ }
+ if (cubic[minY].fY > cubic[index].fY) {
+ minY = index;
+ }
+ if (cubic[maxX].fX < cubic[index].fX) {
+ maxX = index;
+ }
+ if (cubic[maxY].fY < cubic[index].fY) {
+ maxY = index;
+ }
+ }
+ for (index = 0; index < 4; ++index) {
+ double cx = cubic[index].fX;
+ double cy = cubic[index].fY;
+ double denom = SkTMax(fabs(cx), SkTMax(fabs(cy),
+ SkTMax(fabs(cubic[minX].fX), fabs(cubic[minY].fY))));
+ if (denom == 0) {
+ minXSet |= 1 << index;
+ minYSet |= 1 << index;
+ continue;
+ }
+ double inv = 1 / denom;
+ if (approximately_equal_half(cx * inv, cubic[minX].fX * inv)) {
+ minXSet |= 1 << index;
+ }
+ if (approximately_equal_half(cy * inv, cubic[minY].fY * inv)) {
+ minYSet |= 1 << index;
+ }
+ }
+ if (minXSet == 0xF) { // test for vertical line
+ if (minYSet == 0xF) { // return 1 if all four are coincident
+ return coincident_line(cubic, fCubic);
+ }
+ return vertical_line(cubic, fCubic);
+ }
+ if (minYSet == 0xF) { // test for horizontal line
+ return horizontal_line(cubic, fCubic);
+ }
+ int result = check_linear(cubic, minX, maxX, minY, maxY, fCubic);
+ if (result) {
+ return result;
+ }
+ if (allowQuadratics == SkReduceOrder::kAllow_Quadratics
+ && (result = check_quadratic(cubic, fCubic))) {
+ return result;
+ }
+ fCubic = cubic;
+ return 4;
+}
+
+SkPath::Verb SkReduceOrder::Quad(const SkPoint a[3], SkPoint* reducePts) {
+ SkDQuad quad;
+ quad.set(a);
+ SkReduceOrder reducer;
+ int order = reducer.reduce(quad);
+ if (order == 2) { // quad became line
+ for (int index = 0; index < order; ++index) {
+ *reducePts++ = reducer.fLine[index].asSkPoint();
+ }
+ }
+ return SkPathOpsPointsToVerb(order - 1);
+}
+
+SkPath::Verb SkReduceOrder::Conic(const SkConic& c, SkPoint* reducePts) {
+ SkPath::Verb verb = SkReduceOrder::Quad(c.fPts, reducePts);
+ if (verb > SkPath::kLine_Verb && c.fW == 1) {
+ return SkPath::kQuad_Verb;
+ }
+ return verb == SkPath::kQuad_Verb ? SkPath::kConic_Verb : verb;
+}
+
+SkPath::Verb SkReduceOrder::Cubic(const SkPoint a[4], SkPoint* reducePts) {
+ if (SkDPoint::ApproximatelyEqual(a[0], a[1]) && SkDPoint::ApproximatelyEqual(a[0], a[2])
+ && SkDPoint::ApproximatelyEqual(a[0], a[3])) {
+ reducePts[0] = a[0];
+ return SkPath::kMove_Verb;
+ }
+ SkDCubic cubic;
+ cubic.set(a);
+ SkReduceOrder reducer;
+ int order = reducer.reduce(cubic, kAllow_Quadratics);
+ if (order == 2 || order == 3) { // cubic became line or quad
+ for (int index = 0; index < order; ++index) {
+ *reducePts++ = reducer.fQuad[index].asSkPoint();
+ }
+ }
+ return SkPathOpsPointsToVerb(order - 1);
+}
diff --git a/gfx/skia/skia/src/pathops/SkReduceOrder.h b/gfx/skia/skia/src/pathops/SkReduceOrder.h
new file mode 100644
index 000000000..7efb71d4f
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkReduceOrder.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkReduceOrder_DEFINED
+#define SkReduceOrder_DEFINED
+
+#include "SkPathOpsCubic.h"
+#include "SkPathOpsLine.h"
+#include "SkPathOpsQuad.h"
+
+struct SkConic;
+
+union SkReduceOrder {
+ enum Quadratics {
+ kNo_Quadratics,
+ kAllow_Quadratics
+ };
+
+ int reduce(const SkDCubic& cubic, Quadratics);
+ int reduce(const SkDLine& line);
+ int reduce(const SkDQuad& quad);
+
+ static SkPath::Verb Conic(const SkConic& conic, SkPoint* reducePts);
+ static SkPath::Verb Cubic(const SkPoint pts[4], SkPoint* reducePts);
+ static SkPath::Verb Quad(const SkPoint pts[3], SkPoint* reducePts);
+
+ SkDLine fLine;
+ SkDQuad fQuad;
+ SkDCubic fCubic;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkBitmapKey.h b/gfx/skia/skia/src/pdf/SkBitmapKey.h
new file mode 100644
index 000000000..a640468d6
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkBitmapKey.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkBitmapKey_DEFINED
+#define SkBitmapKey_DEFINED
+
+#include "SkBitmap.h"
+#include "SkImage.h"
+#include "SkCanvas.h"
+
+struct SkBitmapKey {
+ SkIRect fSubset;
+ uint32_t fID;
+ bool operator==(const SkBitmapKey& rhs) const {
+ return fID == rhs.fID && fSubset == rhs.fSubset;
+ }
+ bool operator!=(const SkBitmapKey& rhs) const { return !(*this == rhs); }
+};
+
+/**
+ This class has all the advantages of SkBitmaps and SkImages.
+ */
+class SkImageSubset {
+public:
+ SkImageSubset(sk_sp<SkImage> i, SkIRect subset = {0, 0, 0, 0})
+ : fImage(std::move(i)) {
+ if (!fImage) {
+ fSubset = {0, 0, 0, 0};
+ fID = 0;
+ return;
+ }
+ fID = fImage->uniqueID();
+ if (subset.isEmpty()) {
+ fSubset = fImage->bounds();
+ // SkImage always has a non-zero dimensions.
+ SkASSERT(!fSubset.isEmpty());
+ } else {
+ fSubset = subset;
+ if (!fSubset.intersect(fImage->bounds())) {
+ fImage = nullptr;
+ fSubset = {0, 0, 0, 0};
+ fID = 0;
+ }
+ }
+ }
+
+ void setID(uint32_t id) { fID = id; }
+
+ bool isValid() const { return fImage != nullptr; }
+
+ SkIRect bounds() const { return SkIRect::MakeSize(this->dimensions()); }
+
+ SkISize dimensions() const { return fSubset.size(); }
+
+ sk_sp<SkImage> makeImage() const {
+ return fSubset == fImage->bounds() ? fImage : fImage->makeSubset(fSubset);
+ }
+
+ SkBitmapKey getKey() const { return SkBitmapKey{fSubset, fID}; }
+
+ void draw(SkCanvas* canvas, SkPaint* paint) const {
+ SkASSERT(this->isValid());
+ SkRect src = SkRect::Make(fSubset),
+ dst = SkRect::Make(this->bounds());
+ canvas->drawImageRect(fImage.get(), src, dst, paint);
+ }
+
+private:
+ SkIRect fSubset;
+ sk_sp<SkImage> fImage;
+ uint32_t fID;
+};
+
+#endif // SkBitmapKey_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkDeflate.cpp b/gfx/skia/skia/src/pdf/SkDeflate.cpp
new file mode 100644
index 000000000..c2b85fccb
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkDeflate.cpp
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkData.h"
+#include "SkDeflate.h"
+#include "SkMakeUnique.h"
+
+#include "zlib.h"
+
+namespace {
+
+// Different zlib implementations use different T.
+// We've seen size_t and unsigned.
+template <typename T> void* skia_alloc_func(void*, T items, T size) {
+ return sk_calloc_throw(SkToSizeT(items) * SkToSizeT(size));
+}
+
+void skia_free_func(void*, void* address) { sk_free(address); }
+
+} // namespace
+
+#define SKDEFLATEWSTREAM_INPUT_BUFFER_SIZE 4096
+#define SKDEFLATEWSTREAM_OUTPUT_BUFFER_SIZE 4224 // 4096 + 128, usually big
+ // enough to always do a
+ // single loop.
+
+// called by both write() and finalize()
+static void do_deflate(int flush,
+ z_stream* zStream,
+ SkWStream* out,
+ unsigned char* inBuffer,
+ size_t inBufferSize) {
+ zStream->next_in = inBuffer;
+ zStream->avail_in = SkToInt(inBufferSize);
+ unsigned char outBuffer[SKDEFLATEWSTREAM_OUTPUT_BUFFER_SIZE];
+ SkDEBUGCODE(int returnValue;)
+ do {
+ zStream->next_out = outBuffer;
+ zStream->avail_out = sizeof(outBuffer);
+ SkDEBUGCODE(returnValue =) deflate(zStream, flush);
+ SkASSERT(!zStream->msg);
+
+ out->write(outBuffer, sizeof(outBuffer) - zStream->avail_out);
+ } while (zStream->avail_in || !zStream->avail_out);
+ SkASSERT(flush == Z_FINISH
+ ? returnValue == Z_STREAM_END
+ : returnValue == Z_OK);
+}
+
+// Hide all zlib impl details.
+struct SkDeflateWStream::Impl {
+ SkWStream* fOut;
+ unsigned char fInBuffer[SKDEFLATEWSTREAM_INPUT_BUFFER_SIZE];
+ size_t fInBufferIndex;
+ z_stream fZStream;
+};
+
+SkDeflateWStream::SkDeflateWStream(SkWStream* out,
+ int compressionLevel,
+ bool gzip)
+ : fImpl(skstd::make_unique<SkDeflateWStream::Impl>()) {
+ fImpl->fOut = out;
+ fImpl->fInBufferIndex = 0;
+ if (!fImpl->fOut) {
+ return;
+ }
+ fImpl->fZStream.next_in = nullptr;
+ fImpl->fZStream.zalloc = &skia_alloc_func;
+ fImpl->fZStream.zfree = &skia_free_func;
+ fImpl->fZStream.opaque = nullptr;
+ SkASSERT(compressionLevel <= 9 && compressionLevel >= -1);
+ SkDEBUGCODE(int r =) deflateInit2(&fImpl->fZStream, compressionLevel,
+ Z_DEFLATED, gzip ? 0x1F : 0x0F,
+ 8, Z_DEFAULT_STRATEGY);
+ SkASSERT(Z_OK == r);
+}
+
+SkDeflateWStream::~SkDeflateWStream() { this->finalize(); }
+
+void SkDeflateWStream::finalize() {
+ if (!fImpl->fOut) {
+ return;
+ }
+ do_deflate(Z_FINISH, &fImpl->fZStream, fImpl->fOut, fImpl->fInBuffer,
+ fImpl->fInBufferIndex);
+ (void)deflateEnd(&fImpl->fZStream);
+ fImpl->fOut = nullptr;
+}
+
+bool SkDeflateWStream::write(const void* void_buffer, size_t len) {
+ if (!fImpl->fOut) {
+ return false;
+ }
+ const char* buffer = (const char*)void_buffer;
+ while (len > 0) {
+ size_t tocopy =
+ SkTMin(len, sizeof(fImpl->fInBuffer) - fImpl->fInBufferIndex);
+ memcpy(fImpl->fInBuffer + fImpl->fInBufferIndex, buffer, tocopy);
+ len -= tocopy;
+ buffer += tocopy;
+ fImpl->fInBufferIndex += tocopy;
+ SkASSERT(fImpl->fInBufferIndex <= sizeof(fImpl->fInBuffer));
+
+ // if the buffer isn't filled, don't call into zlib yet.
+ if (sizeof(fImpl->fInBuffer) == fImpl->fInBufferIndex) {
+ do_deflate(Z_NO_FLUSH, &fImpl->fZStream, fImpl->fOut,
+ fImpl->fInBuffer, fImpl->fInBufferIndex);
+ fImpl->fInBufferIndex = 0;
+ }
+ }
+ return true;
+}
+
+size_t SkDeflateWStream::bytesWritten() const {
+ return fImpl->fZStream.total_in + fImpl->fInBufferIndex;
+}
diff --git a/gfx/skia/skia/src/pdf/SkDeflate.h b/gfx/skia/skia/src/pdf/SkDeflate.h
new file mode 100644
index 000000000..387de40a1
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkDeflate.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkFlate_DEFINED
+#define SkFlate_DEFINED
+
+#include "SkStream.h"
+
+/**
+ * Wrap a stream in this class to compress the information written to
+ * this stream using the Deflate algorithm.
+ *
+ * See http://en.wikipedia.org/wiki/DEFLATE
+ */
+class SkDeflateWStream final : public SkWStream {
+public:
+ /** Does not take ownership of the stream.
+
+ @param compressionLevel - 0 is no compression; 1 is best
+ speed; 9 is best compression. The default, -1, is to use
+ zlib's Z_DEFAULT_COMPRESSION level.
+
+ @param gzip iff true, output a gzip file. "The gzip format is
+ a wrapper, documented in RFC 1952, around a deflate stream."
+ gzip adds a header with a magic number to the beginning of the
+ stream, alowing a client to identify a gzip file.
+ */
+ SkDeflateWStream(SkWStream*,
+ int compressionLevel = -1,
+ bool gzip = false);
+
+ /** The destructor calls finalize(). */
+ ~SkDeflateWStream();
+
+ /** Write the end of the compressed stream. All subsequent calls to
+ write() will fail. Subsequent calls to finalize() do nothing. */
+ void finalize();
+
+ // The SkWStream interface:
+ bool write(const void*, size_t) override;
+ size_t bytesWritten() const override;
+
+private:
+ struct Impl;
+ std::unique_ptr<Impl> fImpl;
+};
+
+#endif // SkFlate_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkDocument_PDF_None.cpp b/gfx/skia/skia/src/pdf/SkDocument_PDF_None.cpp
new file mode 100644
index 000000000..0e3ad8a01
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkDocument_PDF_None.cpp
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkDocument.h"
+sk_sp<SkDocument> SkDocument::MakePDF(SkWStream*,
+ SkScalar,
+ const SkDocument::PDFMetadata&,
+ sk_sp<SkPixelSerializer>,
+ bool) {
+ return nullptr;
+}
+sk_sp<SkDocument> SkDocument::MakePDF(const char path[], SkScalar) {
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/pdf/SkJpegInfo.cpp b/gfx/skia/skia/src/pdf/SkJpegInfo.cpp
new file mode 100644
index 000000000..5e5ec792e
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkJpegInfo.cpp
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkData.h"
+#include "SkJpegInfo.h"
+
+namespace {
+class JpegSegment {
+public:
+ JpegSegment(const SkData* skdata)
+ : fData(static_cast<const char*>(skdata->data()))
+ , fSize(skdata->size())
+ , fOffset(0)
+ , fLength(0) {}
+ bool read() {
+ if (!this->readBigendianUint16(&fMarker)) {
+ return false;
+ }
+ if (JpegSegment::StandAloneMarker(fMarker)) {
+ fLength = 0;
+ fBuffer = nullptr;
+ return true;
+ }
+ if (!this->readBigendianUint16(&fLength) || fLength < 2) {
+ return false;
+ }
+ fLength -= 2; // Length includes itself for some reason.
+ if (fOffset + fLength > fSize) {
+ return false; // Segment too long.
+ }
+ fBuffer = &fData[fOffset];
+ fOffset += fLength;
+ return true;
+ }
+
+ bool isSOF() {
+ return (fMarker & 0xFFF0) == 0xFFC0 && fMarker != 0xFFC4 &&
+ fMarker != 0xFFC8 && fMarker != 0xFFCC;
+ }
+ uint16_t marker() { return fMarker; }
+ uint16_t length() { return fLength; }
+ const char* data() { return fBuffer; }
+
+ static uint16_t GetBigendianUint16(const char* ptr) {
+ // "the most significant byte shall come first"
+ return (static_cast<uint8_t>(ptr[0]) << 8) |
+ static_cast<uint8_t>(ptr[1]);
+ }
+
+private:
+ const char* const fData;
+ const size_t fSize;
+ size_t fOffset;
+ const char* fBuffer;
+ uint16_t fMarker;
+ uint16_t fLength;
+
+ bool readBigendianUint16(uint16_t* value) {
+ if (fOffset + 2 > fSize) {
+ return false;
+ }
+ *value = JpegSegment::GetBigendianUint16(&fData[fOffset]);
+ fOffset += 2;
+ return true;
+ }
+ static bool StandAloneMarker(uint16_t marker) {
+ // RST[m] markers or SOI, EOI, TEM
+ return (marker & 0xFFF8) == 0xFFD0 || marker == 0xFFD8 ||
+ marker == 0xFFD9 || marker == 0xFF01;
+ }
+};
+} // namespace
+
+bool SkIsJFIF(const SkData* skdata, SkJFIFInfo* info) {
+ static const uint16_t kSOI = 0xFFD8;
+ static const uint16_t kAPP0 = 0xFFE0;
+ JpegSegment segment(skdata);
+ if (!segment.read() || segment.marker() != kSOI) {
+ return false; // not a JPEG
+ }
+ if (!segment.read() || segment.marker() != kAPP0) {
+ return false; // not an APP0 segment
+ }
+ static const char kJfif[] = {'J', 'F', 'I', 'F', '\0'};
+ SkASSERT(segment.data());
+ if (SkToSizeT(segment.length()) < sizeof(kJfif) ||
+ 0 != memcmp(segment.data(), kJfif, sizeof(kJfif))) {
+ return false; // Not JFIF JPEG
+ }
+ do {
+ if (!segment.read()) {
+ return false; // malformed JPEG
+ }
+ } while (!segment.isSOF());
+ if (segment.length() < 6) {
+ return false; // SOF segment is short
+ }
+ if (8 != segment.data()[0]) {
+ return false; // Only support 8-bit precision
+ }
+ int numberOfComponents = segment.data()[5];
+ if (numberOfComponents != 1 && numberOfComponents != 3) {
+ return false; // Invalid JFIF
+ }
+ if (info) {
+ info->fSize.set(JpegSegment::GetBigendianUint16(&segment.data()[3]),
+ JpegSegment::GetBigendianUint16(&segment.data()[1]));
+ if (numberOfComponents == 3) {
+ info->fType = SkJFIFInfo::kYCbCr;
+ } else {
+ info->fType = SkJFIFInfo::kGrayscale;
+ }
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/pdf/SkJpegInfo.h b/gfx/skia/skia/src/pdf/SkJpegInfo.h
new file mode 100644
index 000000000..39de99455
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkJpegInfo.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkJpegInfo_DEFINED
+#define SkJpegInfo_DEFINED
+
+#include "SkSize.h"
+
+class SkData;
+
+struct SkJFIFInfo {
+ SkISize fSize;
+ enum Type {
+ kGrayscale,
+ kYCbCr,
+ } fType;
+};
+
+/** Returns true iff the data seems to be a valid JFIF JPEG image.
+ If so and if info is not nullptr, populate info.
+
+ JPEG/JFIF References:
+ http://www.w3.org/Graphics/JPEG/itu-t81.pdf
+ http://www.w3.org/Graphics/JPEG/jfif3.pdf
+*/
+bool SkIsJFIF(const SkData* skdata, SkJFIFInfo* info);
+
+#endif // SkJpegInfo_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFBitmap.cpp b/gfx/skia/skia/src/pdf/SkPDFBitmap.cpp
new file mode 100644
index 000000000..2d789d04d
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFBitmap.cpp
@@ -0,0 +1,524 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkColorPriv.h"
+#include "SkData.h"
+#include "SkDeflate.h"
+#include "SkImage_Base.h"
+#include "SkJpegInfo.h"
+#include "SkPDFBitmap.h"
+#include "SkPDFCanon.h"
+#include "SkPDFTypes.h"
+#include "SkStream.h"
+#include "SkUnPreMultiply.h"
+
+void image_get_ro_pixels(const SkImage* image, SkBitmap* dst) {
+ if(as_IB(image)->getROPixels(dst)
+ && dst->dimensions() == image->dimensions()) {
+ if (dst->colorType() != kIndex_8_SkColorType) {
+ return;
+ }
+ // We must check to see if the bitmap has a color table.
+ SkAutoLockPixels autoLockPixels(*dst);
+ if (!dst->getColorTable()) {
+ // We can't use an indexed bitmap with no colortable.
+ dst->reset();
+ } else {
+ return;
+ }
+ }
+ // no pixels or wrong size: fill with zeros.
+ dst->setInfo(SkImageInfo::MakeN32(image->width(), image->height(), image->alphaType()));
+}
+
+bool image_compute_is_opaque(const SkImage* image) {
+ if (image->isOpaque()) {
+ return true;
+ }
+ // keep output PDF small at cost of possible resource use.
+ SkBitmap bm;
+ image_get_ro_pixels(image, &bm);
+ return SkBitmap::ComputeIsOpaque(bm);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static void pdf_stream_begin(SkWStream* stream) {
+ static const char streamBegin[] = " stream\n";
+ stream->write(streamBegin, strlen(streamBegin));
+}
+
+static void pdf_stream_end(SkWStream* stream) {
+ static const char streamEnd[] = "\nendstream";
+ stream->write(streamEnd, strlen(streamEnd));
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+// write a single byte to a stream n times.
+static void fill_stream(SkWStream* out, char value, size_t n) {
+ char buffer[4096];
+ memset(buffer, value, sizeof(buffer));
+ for (size_t i = 0; i < n / sizeof(buffer); ++i) {
+ out->write(buffer, sizeof(buffer));
+ }
+ out->write(buffer, n % sizeof(buffer));
+}
+
+// TODO(reed@): Decide if these five functions belong in SkColorPriv.h
+static bool SkIsBGRA(SkColorType ct) {
+ SkASSERT(kBGRA_8888_SkColorType == ct || kRGBA_8888_SkColorType == ct);
+ return kBGRA_8888_SkColorType == ct;
+}
+
+// Interpret value as the given 4-byte SkColorType (BGRA_8888 or
+// RGBA_8888) and return the appropriate component. Each component
+// should be interpreted according to the associated SkAlphaType and
+// SkColorProfileType.
+static U8CPU SkGetA32Component(uint32_t value, SkColorType ct) {
+ return (value >> (SkIsBGRA(ct) ? SK_BGRA_A32_SHIFT : SK_RGBA_A32_SHIFT)) & 0xFF;
+}
+static U8CPU SkGetR32Component(uint32_t value, SkColorType ct) {
+ return (value >> (SkIsBGRA(ct) ? SK_BGRA_R32_SHIFT : SK_RGBA_R32_SHIFT)) & 0xFF;
+}
+static U8CPU SkGetG32Component(uint32_t value, SkColorType ct) {
+ return (value >> (SkIsBGRA(ct) ? SK_BGRA_G32_SHIFT : SK_RGBA_G32_SHIFT)) & 0xFF;
+}
+static U8CPU SkGetB32Component(uint32_t value, SkColorType ct) {
+ return (value >> (SkIsBGRA(ct) ? SK_BGRA_B32_SHIFT : SK_RGBA_B32_SHIFT)) & 0xFF;
+}
+
+// unpremultiply and extract R, G, B components.
+static void pmcolor_to_rgb24(uint32_t color, uint8_t* rgb, SkColorType ct) {
+ uint32_t s = SkUnPreMultiply::GetScale(SkGetA32Component(color, ct));
+ rgb[0] = SkUnPreMultiply::ApplyScale(s, SkGetR32Component(color, ct));
+ rgb[1] = SkUnPreMultiply::ApplyScale(s, SkGetG32Component(color, ct));
+ rgb[2] = SkUnPreMultiply::ApplyScale(s, SkGetB32Component(color, ct));
+}
+
+/* It is necessary to average the color component of transparent
+ pixels with their surrounding neighbors since the PDF renderer may
+ separately re-sample the alpha and color channels when the image is
+ not displayed at its native resolution. Since an alpha of zero
+ gives no information about the color component, the pathological
+ case is a white image with sharp transparency bounds - the color
+ channel goes to black, and the should-be-transparent pixels are
+ rendered as grey because of the separate soft mask and color
+ resizing. e.g.: gm/bitmappremul.cpp */
+static void get_neighbor_avg_color(const SkBitmap& bm,
+ int xOrig,
+ int yOrig,
+ uint8_t rgb[3],
+ SkColorType ct) {
+ unsigned a = 0, r = 0, g = 0, b = 0;
+ // Clamp the range to the edge of the bitmap.
+ int ymin = SkTMax(0, yOrig - 1);
+ int ymax = SkTMin(yOrig + 1, bm.height() - 1);
+ int xmin = SkTMax(0, xOrig - 1);
+ int xmax = SkTMin(xOrig + 1, bm.width() - 1);
+ for (int y = ymin; y <= ymax; ++y) {
+ uint32_t* scanline = bm.getAddr32(0, y);
+ for (int x = xmin; x <= xmax; ++x) {
+ uint32_t color = scanline[x];
+ a += SkGetA32Component(color, ct);
+ r += SkGetR32Component(color, ct);
+ g += SkGetG32Component(color, ct);
+ b += SkGetB32Component(color, ct);
+ }
+ }
+ if (a > 0) {
+ rgb[0] = SkToU8(255 * r / a);
+ rgb[1] = SkToU8(255 * g / a);
+ rgb[2] = SkToU8(255 * b / a);
+ } else {
+ rgb[0] = rgb[1] = rgb[2] = 0;
+ }
+}
+
+static size_t pixel_count(const SkBitmap& bm) {
+ return SkToSizeT(bm.width()) * SkToSizeT(bm.height());
+}
+
+static const SkBitmap& not4444(const SkBitmap& input, SkBitmap* copy) {
+ if (input.colorType() != kARGB_4444_SkColorType) {
+ return input;
+ }
+ // ARGB_4444 is rarely used, so we can do a wasteful tmp copy.
+ SkAssertResult(input.copyTo(copy, kN32_SkColorType));
+ copy->setImmutable();
+ return *copy;
+}
+
+static size_t pdf_color_component_count(SkColorType ct) {
+ switch (ct) {
+ case kRGB_565_SkColorType:
+ case kARGB_4444_SkColorType:
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ return 3;
+ case kAlpha_8_SkColorType:
+ case kIndex_8_SkColorType:
+ case kGray_8_SkColorType:
+ return 1;
+ case kUnknown_SkColorType:
+ default:
+ SkDEBUGFAIL("unexpected color type");
+ return 0;
+ }
+}
+
+static void bitmap_to_pdf_pixels(const SkBitmap& bitmap, SkWStream* out) {
+ if (!bitmap.getPixels()) {
+ size_t size = pixel_count(bitmap) *
+ pdf_color_component_count(bitmap.colorType());
+ fill_stream(out, '\x00', size);
+ return;
+ }
+ SkBitmap copy;
+ const SkBitmap& bm = not4444(bitmap, &copy);
+ SkAutoLockPixels autoLockPixels(bm);
+ SkColorType colorType = bm.colorType();
+ SkAlphaType alphaType = bm.alphaType();
+ switch (colorType) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType: {
+ SkASSERT(3 == pdf_color_component_count(colorType));
+ SkAutoTMalloc<uint8_t> scanline(3 * bm.width());
+ for (int y = 0; y < bm.height(); ++y) {
+ const uint32_t* src = bm.getAddr32(0, y);
+ uint8_t* dst = scanline.get();
+ for (int x = 0; x < bm.width(); ++x) {
+ if (alphaType == kPremul_SkAlphaType) {
+ uint32_t color = *src++;
+ U8CPU alpha = SkGetA32Component(color, colorType);
+ if (alpha != SK_AlphaTRANSPARENT) {
+ pmcolor_to_rgb24(color, dst, colorType);
+ } else {
+ get_neighbor_avg_color(bm, x, y, dst, colorType);
+ }
+ dst += 3;
+ } else {
+ uint32_t color = *src++;
+ *dst++ = SkGetR32Component(color, colorType);
+ *dst++ = SkGetG32Component(color, colorType);
+ *dst++ = SkGetB32Component(color, colorType);
+ }
+ }
+ out->write(scanline.get(), 3 * bm.width());
+ }
+ return;
+ }
+ case kRGB_565_SkColorType: {
+ SkASSERT(3 == pdf_color_component_count(colorType));
+ SkAutoTMalloc<uint8_t> scanline(3 * bm.width());
+ for (int y = 0; y < bm.height(); ++y) {
+ const uint16_t* src = bm.getAddr16(0, y);
+ uint8_t* dst = scanline.get();
+ for (int x = 0; x < bm.width(); ++x) {
+ U16CPU color565 = *src++;
+ *dst++ = SkPacked16ToR32(color565);
+ *dst++ = SkPacked16ToG32(color565);
+ *dst++ = SkPacked16ToB32(color565);
+ }
+ out->write(scanline.get(), 3 * bm.width());
+ }
+ return;
+ }
+ case kAlpha_8_SkColorType:
+ SkASSERT(1 == pdf_color_component_count(colorType));
+ fill_stream(out, '\x00', pixel_count(bm));
+ return;
+ case kGray_8_SkColorType:
+ case kIndex_8_SkColorType:
+ SkASSERT(1 == pdf_color_component_count(colorType));
+ // these two formats need no transformation to serialize.
+ for (int y = 0; y < bm.height(); ++y) {
+ out->write(bm.getAddr8(0, y), bm.width());
+ }
+ return;
+ case kUnknown_SkColorType:
+ case kARGB_4444_SkColorType:
+ default:
+ SkDEBUGFAIL("unexpected color type");
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static void bitmap_alpha_to_a8(const SkBitmap& bitmap, SkWStream* out) {
+ if (!bitmap.getPixels()) {
+ fill_stream(out, '\xFF', pixel_count(bitmap));
+ return;
+ }
+ SkBitmap copy;
+ const SkBitmap& bm = not4444(bitmap, &copy);
+ SkAutoLockPixels autoLockPixels(bm);
+ SkColorType colorType = bm.colorType();
+ switch (colorType) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType: {
+ SkAutoTMalloc<uint8_t> scanline(bm.width());
+ for (int y = 0; y < bm.height(); ++y) {
+ uint8_t* dst = scanline.get();
+ const SkPMColor* src = bm.getAddr32(0, y);
+ for (int x = 0; x < bm.width(); ++x) {
+ *dst++ = SkGetA32Component(*src++, colorType);
+ }
+ out->write(scanline.get(), bm.width());
+ }
+ return;
+ }
+ case kAlpha_8_SkColorType:
+ for (int y = 0; y < bm.height(); ++y) {
+ out->write(bm.getAddr8(0, y), bm.width());
+ }
+ return;
+ case kIndex_8_SkColorType: {
+ SkColorTable* ct = bm.getColorTable();
+ SkASSERT(ct);
+ SkAutoTMalloc<uint8_t> scanline(bm.width());
+ for (int y = 0; y < bm.height(); ++y) {
+ uint8_t* dst = scanline.get();
+ const uint8_t* src = bm.getAddr8(0, y);
+ for (int x = 0; x < bm.width(); ++x) {
+ *dst++ = SkGetPackedA32((*ct)[*src++]);
+ }
+ out->write(scanline.get(), bm.width());
+ }
+ return;
+ }
+ case kRGB_565_SkColorType:
+ case kGray_8_SkColorType:
+ SkDEBUGFAIL("color type has no alpha");
+ return;
+ case kARGB_4444_SkColorType:
+ SkDEBUGFAIL("4444 color type should have been converted to N32");
+ return;
+ case kUnknown_SkColorType:
+ default:
+ SkDEBUGFAIL("unexpected color type");
+ }
+}
+
+static sk_sp<SkPDFArray> make_indexed_color_space(
+ const SkColorTable* table,
+ SkAlphaType alphaType) {
+ auto result = sk_make_sp<SkPDFArray>();
+ result->reserve(4);
+ result->appendName("Indexed");
+ result->appendName("DeviceRGB");
+ SkASSERT(table);
+ if (table->count() < 1) {
+ result->appendInt(0);
+ char shortTableArray[3] = {0, 0, 0};
+ SkString tableString(shortTableArray, SK_ARRAY_COUNT(shortTableArray));
+ result->appendString(tableString);
+ return result;
+ }
+ result->appendInt(table->count() - 1); // maximum color index.
+
+ // Potentially, this could be represented in fewer bytes with a stream.
+ // Max size as a string is 1.5k.
+ char tableArray[256 * 3];
+ SkASSERT(3u * table->count() <= SK_ARRAY_COUNT(tableArray));
+ uint8_t* tablePtr = reinterpret_cast<uint8_t*>(tableArray);
+ const SkPMColor* colors = table->readColors();
+ for (int i = 0; i < table->count(); i++) {
+ if (alphaType == kPremul_SkAlphaType) {
+ pmcolor_to_rgb24(colors[i], tablePtr, kN32_SkColorType);
+ tablePtr += 3;
+ } else {
+ *tablePtr++ = SkGetR32Component(colors[i], kN32_SkColorType);
+ *tablePtr++ = SkGetG32Component(colors[i], kN32_SkColorType);
+ *tablePtr++ = SkGetB32Component(colors[i], kN32_SkColorType);
+ }
+ }
+ SkString tableString(tableArray, 3 * table->count());
+ result->appendString(tableString);
+ return result;
+}
+
+static void emit_image_xobject(SkWStream* stream,
+ const SkImage* image,
+ bool alpha,
+ const sk_sp<SkPDFObject>& smask,
+ const SkPDFObjNumMap& objNumMap) {
+ SkBitmap bitmap;
+ image_get_ro_pixels(image, &bitmap); // TODO(halcanary): test
+ SkAutoLockPixels autoLockPixels(bitmap); // with malformed images.
+
+ // Write to a temporary buffer to get the compressed length.
+ SkDynamicMemoryWStream buffer;
+ SkDeflateWStream deflateWStream(&buffer);
+ if (alpha) {
+ bitmap_alpha_to_a8(bitmap, &deflateWStream);
+ } else {
+ bitmap_to_pdf_pixels(bitmap, &deflateWStream);
+ }
+ deflateWStream.finalize(); // call before detachAsStream().
+ std::unique_ptr<SkStreamAsset> asset(buffer.detachAsStream());
+
+ SkPDFDict pdfDict("XObject");
+ pdfDict.insertName("Subtype", "Image");
+ pdfDict.insertInt("Width", bitmap.width());
+ pdfDict.insertInt("Height", bitmap.height());
+ if (alpha) {
+ pdfDict.insertName("ColorSpace", "DeviceGray");
+ } else if (bitmap.colorType() == kIndex_8_SkColorType) {
+ SkASSERT(1 == pdf_color_component_count(bitmap.colorType()));
+ pdfDict.insertObject("ColorSpace",
+ make_indexed_color_space(bitmap.getColorTable(),
+ bitmap.alphaType()));
+ } else if (1 == pdf_color_component_count(bitmap.colorType())) {
+ pdfDict.insertName("ColorSpace", "DeviceGray");
+ } else {
+ pdfDict.insertName("ColorSpace", "DeviceRGB");
+ }
+ if (smask) {
+ pdfDict.insertObjRef("SMask", smask);
+ }
+ pdfDict.insertInt("BitsPerComponent", 8);
+ pdfDict.insertName("Filter", "FlateDecode");
+ pdfDict.insertInt("Length", asset->getLength());
+ pdfDict.emitObject(stream, objNumMap);
+
+ pdf_stream_begin(stream);
+ stream->writeStream(asset.get(), asset->getLength());
+ pdf_stream_end(stream);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+// This SkPDFObject only outputs the alpha layer of the given bitmap.
+class PDFAlphaBitmap final : public SkPDFObject {
+public:
+ PDFAlphaBitmap(sk_sp<SkImage> image) : fImage(std::move(image)) { SkASSERT(fImage); }
+ void emitObject(SkWStream* stream,
+ const SkPDFObjNumMap& objNumMap) const override {
+ SkASSERT(fImage);
+ emit_image_xobject(stream, fImage.get(), true, nullptr, objNumMap);
+ }
+ void drop() override { fImage = nullptr; }
+
+private:
+ sk_sp<SkImage> fImage;
+};
+
+} // namespace
+
+////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+class PDFDefaultBitmap final : public SkPDFObject {
+public:
+ void emitObject(SkWStream* stream,
+ const SkPDFObjNumMap& objNumMap) const override {
+ SkASSERT(fImage);
+ emit_image_xobject(stream, fImage.get(), false, fSMask, objNumMap);
+ }
+ void addResources(SkPDFObjNumMap* catalog) const override {
+ catalog->addObjectRecursively(fSMask.get());
+ }
+ void drop() override { fImage = nullptr; fSMask = nullptr; }
+ PDFDefaultBitmap(sk_sp<SkImage> image, sk_sp<SkPDFObject> smask)
+ : fImage(std::move(image)), fSMask(std::move(smask)) { SkASSERT(fImage); }
+
+private:
+ sk_sp<SkImage> fImage;
+ sk_sp<SkPDFObject> fSMask;
+};
+} // namespace
+
+////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+/**
+ * This PDFObject assumes that its constructor was handed YUV or
+ * Grayscale JFIF Jpeg-encoded data that can be directly embedded
+ * into a PDF.
+ */
+class PDFJpegBitmap final : public SkPDFObject {
+public:
+ SkISize fSize;
+ sk_sp<SkData> fData;
+ bool fIsYUV;
+ PDFJpegBitmap(SkISize size, SkData* data, bool isYUV)
+ : fSize(size), fData(SkRef(data)), fIsYUV(isYUV) { SkASSERT(data); }
+ void emitObject(SkWStream*, const SkPDFObjNumMap&) const override;
+ void drop() override { fData = nullptr; }
+};
+
+void PDFJpegBitmap::emitObject(SkWStream* stream,
+ const SkPDFObjNumMap& objNumMap) const {
+ SkASSERT(fData);
+ SkPDFDict pdfDict("XObject");
+ pdfDict.insertName("Subtype", "Image");
+ pdfDict.insertInt("Width", fSize.width());
+ pdfDict.insertInt("Height", fSize.height());
+ if (fIsYUV) {
+ pdfDict.insertName("ColorSpace", "DeviceRGB");
+ } else {
+ pdfDict.insertName("ColorSpace", "DeviceGray");
+ }
+ pdfDict.insertInt("BitsPerComponent", 8);
+ pdfDict.insertName("Filter", "DCTDecode");
+ pdfDict.insertInt("ColorTransform", 0);
+ pdfDict.insertInt("Length", SkToInt(fData->size()));
+ pdfDict.emitObject(stream, objNumMap);
+ pdf_stream_begin(stream);
+ stream->write(fData->data(), fData->size());
+ pdf_stream_end(stream);
+}
+} // namespace
+
+////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkPDFObject> SkPDFCreateBitmapObject(sk_sp<SkImage> image,
+ SkPixelSerializer* pixelSerializer) {
+ SkASSERT(image);
+ sk_sp<SkData> data(image->refEncoded());
+ SkJFIFInfo info;
+ if (data && SkIsJFIF(data.get(), &info) &&
+ (!pixelSerializer ||
+ pixelSerializer->useEncodedData(data->data(), data->size()))) {
+ // If there is a SkPixelSerializer, give it a chance to
+ // re-encode the JPEG with more compression by returning false
+ // from useEncodedData.
+ bool yuv = info.fType == SkJFIFInfo::kYCbCr;
+ if (info.fSize == image->dimensions()) { // Sanity check.
+ // hold on to data, not image.
+ #ifdef SK_PDF_IMAGE_STATS
+ gJpegImageObjects.fetch_add(1);
+ #endif
+ return sk_make_sp<PDFJpegBitmap>(info.fSize, data.get(), yuv);
+ }
+ }
+
+ if (pixelSerializer) {
+ SkBitmap bm;
+ SkAutoPixmapUnlock apu;
+ if (as_IB(image.get())->getROPixels(&bm) && bm.requestLock(&apu)) {
+ data.reset(pixelSerializer->encode(apu.pixmap()));
+ if (data && SkIsJFIF(data.get(), &info)) {
+ bool yuv = info.fType == SkJFIFInfo::kYCbCr;
+ if (info.fSize == image->dimensions()) { // Sanity check.
+ return sk_make_sp<PDFJpegBitmap>(info.fSize, data.get(), yuv);
+ }
+ }
+ }
+ }
+
+ sk_sp<SkPDFObject> smask;
+ if (!image_compute_is_opaque(image.get())) {
+ smask = sk_make_sp<PDFAlphaBitmap>(image);
+ }
+ #ifdef SK_PDF_IMAGE_STATS
+ gRegularImageObjects.fetch_add(1);
+ #endif
+ return sk_make_sp<PDFDefaultBitmap>(std::move(image), std::move(smask));
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFBitmap.h b/gfx/skia/skia/src/pdf/SkPDFBitmap.h
new file mode 100644
index 000000000..8de796c70
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFBitmap.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPDFBitmap_DEFINED
+#define SkPDFBitmap_DEFINED
+
+#include "SkRefCnt.h"
+
+class SkImage;
+class SkPixelSerializer;
+class SkPDFObject;
+
+/**
+ * SkPDFBitmap wraps a SkImage and serializes it as an image Xobject.
+ * It is designed to use a minimal amout of memory, aside from refing
+ * the image, and its emitObject() does not cache any data.
+ */
+sk_sp<SkPDFObject> SkPDFCreateBitmapObject(sk_sp<SkImage>,
+ SkPixelSerializer*);
+
+#endif // SkPDFBitmap_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFCanon.cpp b/gfx/skia/skia/src/pdf/SkPDFCanon.cpp
new file mode 100644
index 000000000..a804d6b47
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFCanon.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkImage.h"
+#include "SkPDFBitmap.h"
+#include "SkPDFCanon.h"
+#include "SkPDFFont.h"
+
+////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+template <typename K, typename V> struct UnrefValue {
+ void operator()(K, V** v) { SkSafeUnref(*v); }
+};
+}
+
+SkPDFCanon::~SkPDFCanon() {
+ // TODO(halcanary): make SkTHashSet work nicely with sk_sp<>,
+ // or use std::unordered_set<>
+ fGraphicStateRecords.foreach ([](WrapGS w) { w.fPtr->unref(); });
+ fPDFBitmapMap.foreach(UnrefValue<SkBitmapKey, SkPDFObject>());
+ fTypefaceMetrics.foreach(UnrefValue<uint32_t, SkAdvancedTypefaceMetrics>());
+ fFontDescriptors.foreach(UnrefValue<uint32_t, SkPDFDict>());
+ fFontMap.foreach(UnrefValue<uint64_t, SkPDFFont>());
+}
+
+void SkPDFCanon::reset() {
+ this->~SkPDFCanon();
+ new (this)SkPDFCanon;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+template <typename T>
+sk_sp<SkPDFObject> find_shader(const SkTArray<T>& records,
+ const SkPDFShader::State& state) {
+ for (const T& record : records) {
+ if (record.fShaderState == state) {
+ return record.fShaderObject;
+ }
+ }
+ return nullptr;
+}
+
+sk_sp<SkPDFObject> SkPDFCanon::findFunctionShader(
+ const SkPDFShader::State& state) const {
+ return find_shader(fFunctionShaderRecords, state);
+}
+void SkPDFCanon::addFunctionShader(sk_sp<SkPDFObject> pdfShader,
+ SkPDFShader::State state) {
+ fFunctionShaderRecords.emplace_back(std::move(state), std::move(pdfShader));
+}
+
+sk_sp<SkPDFObject> SkPDFCanon::findAlphaShader(
+ const SkPDFShader::State& state) const {
+ return find_shader(fAlphaShaderRecords, state);
+}
+void SkPDFCanon::addAlphaShader(sk_sp<SkPDFObject> pdfShader,
+ SkPDFShader::State state) {
+ fAlphaShaderRecords.emplace_back(std::move(state), std::move(pdfShader));
+}
+
+sk_sp<SkPDFObject> SkPDFCanon::findImageShader(
+ const SkPDFShader::State& state) const {
+ return find_shader(fImageShaderRecords, state);
+}
+
+void SkPDFCanon::addImageShader(sk_sp<SkPDFObject> pdfShader,
+ SkPDFShader::State state) {
+ fImageShaderRecords.emplace_back(std::move(state), std::move(pdfShader));
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+const SkPDFGraphicState* SkPDFCanon::findGraphicState(
+ const SkPDFGraphicState& key) const {
+ const WrapGS* ptr = fGraphicStateRecords.find(WrapGS(&key));
+ return ptr ? ptr->fPtr : nullptr;
+}
+
+void SkPDFCanon::addGraphicState(const SkPDFGraphicState* state) {
+ SkASSERT(state);
+ WrapGS w(SkRef(state));
+ SkASSERT(!fGraphicStateRecords.contains(w));
+ fGraphicStateRecords.add(w);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkPDFObject> SkPDFCanon::findPDFBitmap(SkBitmapKey key) const {
+ SkPDFObject** ptr = fPDFBitmapMap.find(key);
+ return ptr ? sk_ref_sp(*ptr) : sk_sp<SkPDFObject>();
+}
+
+void SkPDFCanon::addPDFBitmap(SkBitmapKey key, sk_sp<SkPDFObject> pdfBitmap) {
+ fPDFBitmapMap.set(key, pdfBitmap.release());
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkPDFStream> SkPDFCanon::makeInvertFunction() {
+ if (fInvertFunction) {
+ return fInvertFunction;
+ }
+ fInvertFunction = SkPDFGraphicState::MakeInvertFunction();
+ return fInvertFunction;
+}
+sk_sp<SkPDFDict> SkPDFCanon::makeNoSmaskGraphicState() {
+ if (fNoSmaskGraphicState) {
+ return fNoSmaskGraphicState;
+ }
+ fNoSmaskGraphicState = SkPDFGraphicState::MakeNoSmaskGraphicState();
+ return fNoSmaskGraphicState;
+}
+sk_sp<SkPDFArray> SkPDFCanon::makeRangeObject() {
+ if (fRangeObject) {
+ return fRangeObject;
+ }
+ fRangeObject = SkPDFShader::MakeRangeObject();
+ return fRangeObject;
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFCanon.h b/gfx/skia/skia/src/pdf/SkPDFCanon.h
new file mode 100644
index 000000000..2da9e52f7
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFCanon.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPDFCanon_DEFINED
+#define SkPDFCanon_DEFINED
+
+#include "SkPDFGraphicState.h"
+#include "SkPDFShader.h"
+#include "SkPixelSerializer.h"
+#include "SkTDArray.h"
+#include "SkTHash.h"
+#include "SkBitmapKey.h"
+
+class SkAdvancedTypefaceMetrics;
+class SkPDFFont;
+
+/**
+ * The SkPDFCanon canonicalizes objects across PDF pages
+ * (SkPDFDevices) and across draw calls.
+ *
+ * The PDF backend works correctly if:
+ * - There is no more than one SkPDFCanon for each thread.
+ * - Every SkPDFDevice is given a pointer to a SkPDFCanon on creation.
+ * - All SkPDFDevices in a document share the same SkPDFCanon.
+ * The SkPDFDocument class makes this happen by owning a single
+ * SkPDFCanon.
+ *
+ * The addFoo() methods will ref the Foo; the canon's destructor will
+ * call foo->unref() on all of these objects.
+ *
+ * The findFoo() methods do not change the ref count of the Foo
+ * objects.
+ */
+class SkPDFCanon : SkNoncopyable {
+public:
+ ~SkPDFCanon();
+
+ // reset to original setting, unrefs all objects.
+ void reset();
+
+ sk_sp<SkPDFObject> findFunctionShader(const SkPDFShader::State&) const;
+ void addFunctionShader(sk_sp<SkPDFObject>, SkPDFShader::State);
+
+ sk_sp<SkPDFObject> findAlphaShader(const SkPDFShader::State&) const;
+ void addAlphaShader(sk_sp<SkPDFObject>, SkPDFShader::State);
+
+ sk_sp<SkPDFObject> findImageShader(const SkPDFShader::State&) const;
+ void addImageShader(sk_sp<SkPDFObject>, SkPDFShader::State);
+
+ const SkPDFGraphicState* findGraphicState(const SkPDFGraphicState&) const;
+ void addGraphicState(const SkPDFGraphicState*);
+
+ sk_sp<SkPDFObject> findPDFBitmap(SkBitmapKey key) const;
+ void addPDFBitmap(SkBitmapKey key, sk_sp<SkPDFObject>);
+
+ SkTHashMap<uint32_t, SkAdvancedTypefaceMetrics*> fTypefaceMetrics;
+ SkTHashMap<uint32_t, SkPDFDict*> fFontDescriptors;
+ SkTHashMap<uint64_t, SkPDFFont*> fFontMap;
+
+ SkPixelSerializer* getPixelSerializer() const { return fPixelSerializer.get(); }
+ void setPixelSerializer(sk_sp<SkPixelSerializer> ps) {
+ fPixelSerializer = std::move(ps);
+ }
+
+ sk_sp<SkPDFStream> makeInvertFunction();
+ sk_sp<SkPDFDict> makeNoSmaskGraphicState();
+ sk_sp<SkPDFArray> makeRangeObject();
+
+private:
+ struct ShaderRec {
+ SkPDFShader::State fShaderState;
+ sk_sp<SkPDFObject> fShaderObject;
+ ShaderRec(SkPDFShader::State s, sk_sp<SkPDFObject> o)
+ : fShaderState(std::move(s)), fShaderObject(std::move(o)) {}
+ };
+ SkTArray<ShaderRec> fFunctionShaderRecords;
+ SkTArray<ShaderRec> fAlphaShaderRecords;
+ SkTArray<ShaderRec> fImageShaderRecords;
+
+ struct WrapGS {
+ explicit WrapGS(const SkPDFGraphicState* ptr = nullptr) : fPtr(ptr) {}
+ const SkPDFGraphicState* fPtr;
+ bool operator==(const WrapGS& rhs) const {
+ SkASSERT(fPtr);
+ SkASSERT(rhs.fPtr);
+ return *fPtr == *rhs.fPtr;
+ }
+ struct Hash {
+ uint32_t operator()(const WrapGS& w) const {
+ SkASSERT(w.fPtr);
+ return w.fPtr->hash();
+ }
+ };
+ };
+ SkTHashSet<WrapGS, WrapGS::Hash> fGraphicStateRecords;
+
+ // TODO(halcanary): make SkTHashMap<K, sk_sp<V>> work correctly.
+ SkTHashMap<SkBitmapKey, SkPDFObject*> fPDFBitmapMap;
+
+ sk_sp<SkPixelSerializer> fPixelSerializer;
+ sk_sp<SkPDFStream> fInvertFunction;
+ sk_sp<SkPDFDict> fNoSmaskGraphicState;
+ sk_sp<SkPDFArray> fRangeObject;
+};
+#endif // SkPDFCanon_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFCanvas.cpp b/gfx/skia/skia/src/pdf/SkPDFCanvas.cpp
new file mode 100644
index 000000000..c7e39259c
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFCanvas.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkLatticeIter.h"
+#include "SkPDFCanvas.h"
+#include "SkPDFDevice.h"
+
+SkPDFCanvas::SkPDFCanvas(const sk_sp<SkPDFDevice>& dev)
+ : SkCanvas(dev.get()) {}
+
+SkPDFCanvas::~SkPDFCanvas() {}
+
+/*
+ * PDF's impl sometimes wants to access the raster clip as a SkRegion. To keep this valid,
+ * we intercept all clip calls to ensure that the clip stays BW (i.e. never antialiased), since
+ * an antialiased clip won't build a SkRegion (it builds SkAAClip).
+ */
+void SkPDFCanvas::onClipRect(const SkRect& rect, ClipOp op, ClipEdgeStyle edgeStyle) {
+ this->INHERITED::onClipRect(rect, op, kHard_ClipEdgeStyle);
+}
+
+void SkPDFCanvas::onClipRRect(const SkRRect& rrect, ClipOp op, ClipEdgeStyle edgeStyle) {
+ this->INHERITED::onClipRRect(rrect, op, kHard_ClipEdgeStyle);
+}
+
+void SkPDFCanvas::onClipPath(const SkPath& path, ClipOp op, ClipEdgeStyle edgeStyle) {
+ this->INHERITED::onClipPath(path, op, kHard_ClipEdgeStyle);
+}
+
+void SkPDFCanvas::onDrawBitmapNine(const SkBitmap& bitmap,
+ const SkIRect& center,
+ const SkRect& dst,
+ const SkPaint* paint) {
+ SkLatticeIter iter(bitmap.width(), bitmap.height(), center, dst);
+ SkRect srcR, dstR;
+ while (iter.next(&srcR, &dstR)) {
+ this->drawBitmapRect(bitmap, srcR, dstR, paint);
+ }
+}
+
+void SkPDFCanvas::onDrawImageNine(const SkImage* image,
+ const SkIRect& center,
+ const SkRect& dst,
+ const SkPaint* paint) {
+ SkLatticeIter iter(image->width(), image->height(), center, dst);
+ SkRect srcR, dstR;
+ while (iter.next(&srcR, &dstR)) {
+ this->drawImageRect(image, srcR, dstR, paint);
+ }
+}
+
+void SkPDFCanvas::onDrawImageRect(const SkImage* image,
+ const SkRect* src,
+ const SkRect& dst,
+ const SkPaint* paint,
+ SkCanvas::SrcRectConstraint constraint) {
+ SkAutoCanvasRestore autoCanvasRestore(this, true);
+ this->clipRect(dst);
+ this->SkCanvas::onDrawImageRect(image, src, dst, paint, constraint);
+}
+
+void SkPDFCanvas::onDrawBitmapRect(const SkBitmap& bitmap,
+ const SkRect* src,
+ const SkRect& dst,
+ const SkPaint* paint,
+ SkCanvas::SrcRectConstraint constraint) {
+ SkAutoCanvasRestore autoCanvasRestore(this, true);
+ this->clipRect(dst);
+ this->SkCanvas::onDrawBitmapRect(bitmap, src, dst, paint, constraint);
+}
+
+void SkPDFCanvas::onDrawImageLattice(const SkImage* image,
+ const Lattice& lattice,
+ const SkRect& dst,
+ const SkPaint* paint) {
+ SkLatticeIter iter(lattice, dst);
+ SkRect srcR, dstR;
+ while (iter.next(&srcR, &dstR)) {
+ this->drawImageRect(image, srcR, dstR, paint);
+ }
+}
+
+void SkPDFCanvas::onDrawBitmapLattice(const SkBitmap& bitmap,
+ const Lattice& lattice,
+ const SkRect& dst,
+ const SkPaint* paint) {
+ SkLatticeIter iter(lattice, dst);
+ SkRect srcR, dstR;
+ while (iter.next(&srcR, &dstR)) {
+ this->drawBitmapRect(bitmap, srcR, dstR, paint);
+ }
+}
+
diff --git a/gfx/skia/skia/src/pdf/SkPDFCanvas.h b/gfx/skia/skia/src/pdf/SkPDFCanvas.h
new file mode 100644
index 000000000..ead13c34a
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFCanvas.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPDFCanvas_DEFINED
+#define SkPDFCanvas_DEFINED
+
+#include "SkCanvas.h"
+
+class SkPDFDevice;
+
+class SkPDFCanvas : public SkCanvas {
+public:
+ SkPDFCanvas(const sk_sp<SkPDFDevice>&);
+ ~SkPDFCanvas();
+
+protected:
+ void onClipRect(const SkRect&, ClipOp, ClipEdgeStyle) override;
+ void onClipRRect(const SkRRect&, ClipOp, ClipEdgeStyle) override;
+ void onClipPath(const SkPath&, ClipOp, ClipEdgeStyle) override;
+
+ void onDrawBitmapNine(const SkBitmap&, const SkIRect&, const SkRect&,
+ const SkPaint*) override;
+
+ void onDrawImageNine(const SkImage*, const SkIRect&, const SkRect&,
+ const SkPaint*) override;
+
+ void onDrawImageRect(const SkImage*,
+ const SkRect*,
+ const SkRect&,
+ const SkPaint*,
+ SkCanvas::SrcRectConstraint) override;
+
+ void onDrawBitmapRect(const SkBitmap&,
+ const SkRect*,
+ const SkRect&,
+ const SkPaint*,
+ SkCanvas::SrcRectConstraint) override;
+
+ void onDrawImageLattice(const SkImage*,
+ const Lattice&,
+ const SkRect&,
+ const SkPaint*) override;
+
+ void onDrawBitmapLattice(const SkBitmap&,
+ const Lattice&,
+ const SkRect&,
+ const SkPaint*) override;
+
+private:
+ typedef SkCanvas INHERITED;
+};
+
+#endif // SkPDFCanvas_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFConvertType1FontStream.cpp b/gfx/skia/skia/src/pdf/SkPDFConvertType1FontStream.cpp
new file mode 100644
index 000000000..d75da5c78
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFConvertType1FontStream.cpp
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPDFConvertType1FontStream.h"
+
+#include <ctype.h>
+
+static bool parsePFBSection(const uint8_t** src, size_t* len, int sectionType,
+ size_t* size) {
+ // PFB sections have a two or six bytes header. 0x80 and a one byte
+ // section type followed by a four byte section length. Type one is
+ // an ASCII section (includes a length), type two is a binary section
+ // (includes a length) and type three is an EOF marker with no length.
+ const uint8_t* buf = *src;
+ if (*len < 2 || buf[0] != 0x80 || buf[1] != sectionType) {
+ return false;
+ } else if (buf[1] == 3) {
+ return true;
+ } else if (*len < 6) {
+ return false;
+ }
+
+ *size = (size_t)buf[2] | ((size_t)buf[3] << 8) | ((size_t)buf[4] << 16) |
+ ((size_t)buf[5] << 24);
+ size_t consumed = *size + 6;
+ if (consumed > *len) {
+ return false;
+ }
+ *src = *src + consumed;
+ *len = *len - consumed;
+ return true;
+}
+
+static bool parsePFB(const uint8_t* src, size_t size, size_t* headerLen,
+ size_t* dataLen, size_t* trailerLen) {
+ const uint8_t* srcPtr = src;
+ size_t remaining = size;
+
+ return parsePFBSection(&srcPtr, &remaining, 1, headerLen) &&
+ parsePFBSection(&srcPtr, &remaining, 2, dataLen) &&
+ parsePFBSection(&srcPtr, &remaining, 1, trailerLen) &&
+ parsePFBSection(&srcPtr, &remaining, 3, nullptr);
+}
+
+/* The sections of a PFA file are implicitly defined. The body starts
+ * after the line containing "eexec," and the trailer starts with 512
+ * literal 0's followed by "cleartomark" (plus arbitrary white space).
+ *
+ * This function assumes that src is NUL terminated, but the NUL
+ * termination is not included in size.
+ *
+ */
+static bool parsePFA(const char* src, size_t size, size_t* headerLen,
+ size_t* hexDataLen, size_t* dataLen, size_t* trailerLen) {
+ const char* end = src + size;
+
+ const char* dataPos = strstr(src, "eexec");
+ if (!dataPos) {
+ return false;
+ }
+ dataPos += strlen("eexec");
+ while ((*dataPos == '\n' || *dataPos == '\r' || *dataPos == ' ') &&
+ dataPos < end) {
+ dataPos++;
+ }
+ *headerLen = dataPos - src;
+
+ const char* trailerPos = strstr(dataPos, "cleartomark");
+ if (!trailerPos) {
+ return false;
+ }
+ int zeroCount = 0;
+ for (trailerPos--; trailerPos > dataPos && zeroCount < 512; trailerPos--) {
+ if (*trailerPos == '\n' || *trailerPos == '\r' || *trailerPos == ' ') {
+ continue;
+ } else if (*trailerPos == '0') {
+ zeroCount++;
+ } else {
+ return false;
+ }
+ }
+ if (zeroCount != 512) {
+ return false;
+ }
+
+ *hexDataLen = trailerPos - src - *headerLen;
+ *trailerLen = size - *headerLen - *hexDataLen;
+
+ // Verify that the data section is hex encoded and count the bytes.
+ int nibbles = 0;
+ for (; dataPos < trailerPos; dataPos++) {
+ if (isspace(*dataPos)) {
+ continue;
+ }
+ if (!isxdigit(*dataPos)) {
+ return false;
+ }
+ nibbles++;
+ }
+ *dataLen = (nibbles + 1) / 2;
+
+ return true;
+}
+
+static int8_t hexToBin(uint8_t c) {
+ if (!isxdigit(c)) {
+ return -1;
+ } else if (c <= '9') {
+ return c - '0';
+ } else if (c <= 'F') {
+ return c - 'A' + 10;
+ } else if (c <= 'f') {
+ return c - 'a' + 10;
+ }
+ return -1;
+}
+
+sk_sp<SkData> SkPDFConvertType1FontStream(
+ std::unique_ptr<SkStreamAsset> srcStream, size_t* headerLen,
+ size_t* dataLen, size_t* trailerLen) {
+ size_t srcLen = srcStream ? srcStream->getLength() : 0;
+ SkASSERT(srcLen);
+ if (!srcLen) {
+ return nullptr;
+ }
+ // Flatten and Nul-terminate the source stream so that we can use
+ // strstr() to search it.
+ SkAutoTMalloc<uint8_t> sourceBuffer(SkToInt(srcLen + 1));
+ (void)srcStream->read(sourceBuffer.get(), srcLen);
+ sourceBuffer[SkToInt(srcLen)] = 0;
+ const uint8_t* src = sourceBuffer.get();
+
+ if (parsePFB(src, srcLen, headerLen, dataLen, trailerLen)) {
+ static const int kPFBSectionHeaderLength = 6;
+ const size_t length = *headerLen + *dataLen + *trailerLen;
+ SkASSERT(length > 0);
+ SkASSERT(length + (2 * kPFBSectionHeaderLength) <= srcLen);
+
+ sk_sp<SkData> data(SkData::MakeUninitialized(length));
+
+ const uint8_t* const srcHeader = src + kPFBSectionHeaderLength;
+ // There is a six-byte section header before header and data
+ // (but not trailer) that we're not going to copy.
+ const uint8_t* const srcData = srcHeader + *headerLen + kPFBSectionHeaderLength;
+ const uint8_t* const srcTrailer = srcData + *headerLen;
+
+ uint8_t* const resultHeader = (uint8_t*)data->writable_data();
+ uint8_t* const resultData = resultHeader + *headerLen;
+ uint8_t* const resultTrailer = resultData + *dataLen;
+
+ SkASSERT(resultTrailer + *trailerLen == resultHeader + length);
+
+ memcpy(resultHeader, srcHeader, *headerLen);
+ memcpy(resultData, srcData, *dataLen);
+ memcpy(resultTrailer, srcTrailer, *trailerLen);
+
+ return data;
+ }
+
+ // A PFA has to be converted for PDF.
+ size_t hexDataLen;
+ if (!parsePFA((const char*)src, srcLen, headerLen, &hexDataLen, dataLen,
+ trailerLen)) {
+ return nullptr;
+ }
+ const size_t length = *headerLen + *dataLen + *trailerLen;
+ SkASSERT(length > 0);
+ auto data = SkData::MakeUninitialized(length);
+ uint8_t* buffer = (uint8_t*)data->writable_data();
+
+ memcpy(buffer, src, *headerLen);
+ uint8_t* const resultData = &(buffer[*headerLen]);
+
+ const uint8_t* hexData = src + *headerLen;
+ const uint8_t* trailer = hexData + hexDataLen;
+ size_t outputOffset = 0;
+ uint8_t dataByte = 0; // To hush compiler.
+ bool highNibble = true;
+ for (; hexData < trailer; hexData++) {
+ int8_t curNibble = hexToBin(*hexData);
+ if (curNibble < 0) {
+ continue;
+ }
+ if (highNibble) {
+ dataByte = curNibble << 4;
+ highNibble = false;
+ } else {
+ dataByte |= curNibble;
+ highNibble = true;
+ resultData[outputOffset++] = dataByte;
+ }
+ }
+ if (!highNibble) {
+ resultData[outputOffset++] = dataByte;
+ }
+ SkASSERT(outputOffset == *dataLen);
+
+ uint8_t* const resultTrailer = &(buffer[SkToInt(*headerLen + outputOffset)]);
+ memcpy(resultTrailer, src + *headerLen + hexDataLen, *trailerLen);
+ return data;
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFConvertType1FontStream.h b/gfx/skia/skia/src/pdf/SkPDFConvertType1FontStream.h
new file mode 100644
index 000000000..ffd2da309
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFConvertType1FontStream.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPDFConvertType1FontStream_DEFINED
+#define SkPDFConvertType1FontStream_DEFINED
+
+#include "SkData.h"
+#include "SkStream.h"
+
+/*
+ "A standard Type 1 font program, as described in the Adobe Type 1
+ Font Format specification, consists of three parts: a clear-text
+ portion (written using PostScript syntax), an encrypted portion, and
+ a fixed-content portion. The fixed-content portion contains 512
+ ASCII zeros followed by a cleartomark operator, and perhaps followed
+ by additional data. Although the encrypted portion of a standard
+ Type 1 font may be in binary or ASCII hexadecimal format, PDF
+ supports only the binary format."
+*/
+sk_sp<SkData> SkPDFConvertType1FontStream(
+ std::unique_ptr<SkStreamAsset> srcStream, size_t* headerLen,
+ size_t* dataLen, size_t* trailerLen);
+
+#endif // SkPDFConvertType1FontStream_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFDevice.cpp b/gfx/skia/skia/src/pdf/SkPDFDevice.cpp
new file mode 100644
index 000000000..217dd3f2b
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFDevice.cpp
@@ -0,0 +1,2325 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPDFDevice.h"
+
+#include "SkAdvancedTypefaceMetrics.h"
+#include "SkAnnotationKeys.h"
+#include "SkBitmapDevice.h"
+#include "SkBitmapKey.h"
+#include "SkColor.h"
+#include "SkColorFilter.h"
+#include "SkDraw.h"
+#include "SkDrawFilter.h"
+#include "SkGlyphCache.h"
+#include "SkImageFilterCache.h"
+#include "SkMakeUnique.h"
+#include "SkPath.h"
+#include "SkPathEffect.h"
+#include "SkPathOps.h"
+#include "SkPDFBitmap.h"
+#include "SkPDFCanon.h"
+#include "SkPDFDocument.h"
+#include "SkPDFFont.h"
+#include "SkPDFFormXObject.h"
+#include "SkPDFGraphicState.h"
+#include "SkPDFResourceDict.h"
+#include "SkPDFShader.h"
+#include "SkPDFTypes.h"
+#include "SkPDFUtils.h"
+#include "SkPixelRef.h"
+#include "SkRasterClip.h"
+#include "SkRRect.h"
+#include "SkScopeExit.h"
+#include "SkString.h"
+#include "SkSurface.h"
+#include "SkTemplates.h"
+#include "SkTextBlobRunIterator.h"
+#include "SkTextFormatParams.h"
+#include "SkUtils.h"
+#include "SkXfermodeInterpretation.h"
+
+#define DPI_FOR_RASTER_SCALE_ONE 72
+
+// Utility functions
+
+// If the paint will definitely draw opaquely, replace kSrc with
+// kSrcOver. http://crbug.com/473572
+static void replace_srcmode_on_opaque_paint(SkPaint* paint) {
+ if (kSrcOver_SkXfermodeInterpretation == SkInterpretXfermode(*paint, false)) {
+ paint->setBlendMode(SkBlendMode::kSrcOver);
+ }
+}
+
+static void emit_pdf_color(SkColor color, SkWStream* result) {
+ SkASSERT(SkColorGetA(color) == 0xFF); // We handle alpha elsewhere.
+ SkPDFUtils::AppendColorComponent(SkColorGetR(color), result);
+ result->writeText(" ");
+ SkPDFUtils::AppendColorComponent(SkColorGetG(color), result);
+ result->writeText(" ");
+ SkPDFUtils::AppendColorComponent(SkColorGetB(color), result);
+ result->writeText(" ");
+}
+
+static SkPaint calculate_text_paint(const SkPaint& paint) {
+ SkPaint result = paint;
+ if (result.isFakeBoldText()) {
+ SkScalar fakeBoldScale = SkScalarInterpFunc(result.getTextSize(),
+ kStdFakeBoldInterpKeys,
+ kStdFakeBoldInterpValues,
+ kStdFakeBoldInterpLength);
+ SkScalar width = SkScalarMul(result.getTextSize(), fakeBoldScale);
+ if (result.getStyle() == SkPaint::kFill_Style) {
+ result.setStyle(SkPaint::kStrokeAndFill_Style);
+ } else {
+ width += result.getStrokeWidth();
+ }
+ result.setStrokeWidth(width);
+ }
+ return result;
+}
+
+static SkImageSubset make_image_subset(const SkBitmap& bitmap) {
+ SkASSERT(!bitmap.drawsNothing());
+ SkIRect subset = bitmap.getSubset();
+ SkAutoLockPixels autoLockPixels(bitmap);
+ SkASSERT(bitmap.pixelRef());
+ SkBitmap tmp;
+ tmp.setInfo(bitmap.pixelRef()->info(), bitmap.rowBytes());
+ tmp.setPixelRef(bitmap.pixelRef());
+ tmp.lockPixels();
+ auto img = SkImage::MakeFromBitmap(tmp);
+ if (img) {
+ SkASSERT(!bitmap.isImmutable() || img->uniqueID() == bitmap.getGenerationID());
+ SkASSERT(img->bounds().contains(subset));
+ }
+ SkImageSubset imageSubset(std::move(img), subset);
+ // SkImage::MakeFromBitmap only preserves genID for immutable
+ // bitmaps. Use the bitmap's original ID for de-duping.
+ imageSubset.setID(bitmap.getGenerationID());
+ return imageSubset;
+}
+
+SkPDFDevice::GraphicStateEntry::GraphicStateEntry()
+ : fColor(SK_ColorBLACK)
+ , fTextScaleX(SK_Scalar1)
+ , fTextFill(SkPaint::kFill_Style)
+ , fShaderIndex(-1)
+ , fGraphicStateIndex(-1) {
+ fMatrix.reset();
+}
+
+bool SkPDFDevice::GraphicStateEntry::compareInitialState(
+ const GraphicStateEntry& cur) {
+ return fColor == cur.fColor &&
+ fShaderIndex == cur.fShaderIndex &&
+ fGraphicStateIndex == cur.fGraphicStateIndex &&
+ fMatrix == cur.fMatrix &&
+ fClipStack == cur.fClipStack &&
+ (fTextScaleX == 0 ||
+ (fTextScaleX == cur.fTextScaleX && fTextFill == cur.fTextFill));
+}
+
+class GraphicStackState {
+public:
+ GraphicStackState(const SkClipStack& existingClipStack,
+ const SkRegion& existingClipRegion,
+ SkWStream* contentStream)
+ : fStackDepth(0),
+ fContentStream(contentStream) {
+ fEntries[0].fClipStack = existingClipStack;
+ fEntries[0].fClipRegion = existingClipRegion;
+ }
+
+ void updateClip(const SkClipStack& clipStack, const SkRegion& clipRegion,
+ const SkPoint& translation);
+ void updateMatrix(const SkMatrix& matrix);
+ void updateDrawingState(const SkPDFDevice::GraphicStateEntry& state);
+
+ void drainStack();
+
+private:
+ void push();
+ void pop();
+ SkPDFDevice::GraphicStateEntry* currentEntry() { return &fEntries[fStackDepth]; }
+
+ // Conservative limit on save depth, see impl. notes in PDF 1.4 spec.
+ static const int kMaxStackDepth = 12;
+ SkPDFDevice::GraphicStateEntry fEntries[kMaxStackDepth + 1];
+ int fStackDepth;
+ SkWStream* fContentStream;
+};
+
+void GraphicStackState::drainStack() {
+ while (fStackDepth) {
+ pop();
+ }
+}
+
+void GraphicStackState::push() {
+ SkASSERT(fStackDepth < kMaxStackDepth);
+ fContentStream->writeText("q\n");
+ fStackDepth++;
+ fEntries[fStackDepth] = fEntries[fStackDepth - 1];
+}
+
+void GraphicStackState::pop() {
+ SkASSERT(fStackDepth > 0);
+ fContentStream->writeText("Q\n");
+ fStackDepth--;
+}
+
+/* Calculate an inverted path's equivalent non-inverted path, given the
+ * canvas bounds.
+ * outPath may alias with invPath (since this is supported by PathOps).
+ */
+static bool calculate_inverse_path(const SkRect& bounds, const SkPath& invPath,
+ SkPath* outPath) {
+ SkASSERT(invPath.isInverseFillType());
+
+ SkPath clipPath;
+ clipPath.addRect(bounds);
+
+ return Op(clipPath, invPath, kIntersect_SkPathOp, outPath);
+}
+
+// Sanity check the numerical values of the SkRegion ops and PathOps ops
+// enums so region_op_to_pathops_op can do a straight passthrough cast.
+// If these are failing, it may be necessary to make region_op_to_pathops_op
+// do more.
+static_assert(SkRegion::kDifference_Op == (int)kDifference_SkPathOp, "region_pathop_mismatch");
+static_assert(SkRegion::kIntersect_Op == (int)kIntersect_SkPathOp, "region_pathop_mismatch");
+static_assert(SkRegion::kUnion_Op == (int)kUnion_SkPathOp, "region_pathop_mismatch");
+static_assert(SkRegion::kXOR_Op == (int)kXOR_SkPathOp, "region_pathop_mismatch");
+static_assert(SkRegion::kReverseDifference_Op == (int)kReverseDifference_SkPathOp,
+ "region_pathop_mismatch");
+
+static SkPathOp region_op_to_pathops_op(SkCanvas::ClipOp op) {
+ SkASSERT(op >= 0);
+ SkASSERT(op <= SkCanvas::kReverseDifference_Op);
+ return (SkPathOp)op;
+}
+
+/* Uses Path Ops to calculate a vector SkPath clip from a clip stack.
+ * Returns true if successful, or false if not successful.
+ * If successful, the resulting clip is stored in outClipPath.
+ * If not successful, outClipPath is undefined, and a fallback method
+ * should be used.
+ */
+static bool get_clip_stack_path(const SkMatrix& transform,
+ const SkClipStack& clipStack,
+ const SkRegion& clipRegion,
+ SkPath* outClipPath) {
+ outClipPath->reset();
+ outClipPath->setFillType(SkPath::kInverseWinding_FillType);
+
+ const SkClipStack::Element* clipEntry;
+ SkClipStack::Iter iter;
+ iter.reset(clipStack, SkClipStack::Iter::kBottom_IterStart);
+ for (clipEntry = iter.next(); clipEntry; clipEntry = iter.next()) {
+ SkPath entryPath;
+ if (SkClipStack::Element::kEmpty_Type == clipEntry->getType()) {
+ outClipPath->reset();
+ outClipPath->setFillType(SkPath::kInverseWinding_FillType);
+ continue;
+ } else {
+ clipEntry->asPath(&entryPath);
+ }
+ entryPath.transform(transform);
+
+ if (SkCanvas::kReplace_Op == clipEntry->getOp()) {
+ *outClipPath = entryPath;
+ } else {
+ SkPathOp op = region_op_to_pathops_op(clipEntry->getOp());
+ if (!Op(*outClipPath, entryPath, op, outClipPath)) {
+ return false;
+ }
+ }
+ }
+
+ if (outClipPath->isInverseFillType()) {
+ // The bounds are slightly outset to ensure this is correct in the
+ // face of floating-point accuracy and possible SkRegion bitmap
+ // approximations.
+ SkRect clipBounds = SkRect::Make(clipRegion.getBounds());
+ clipBounds.outset(SK_Scalar1, SK_Scalar1);
+ if (!calculate_inverse_path(clipBounds, *outClipPath, outClipPath)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// TODO(vandebo): Take advantage of SkClipStack::getSaveCount(), the PDF
+// graphic state stack, and the fact that we can know all the clips used
+// on the page to optimize this.
+void GraphicStackState::updateClip(const SkClipStack& clipStack,
+ const SkRegion& clipRegion,
+ const SkPoint& translation) {
+ if (clipStack == currentEntry()->fClipStack) {
+ return;
+ }
+
+ while (fStackDepth > 0) {
+ pop();
+ if (clipStack == currentEntry()->fClipStack) {
+ return;
+ }
+ }
+ push();
+
+ currentEntry()->fClipStack = clipStack;
+ currentEntry()->fClipRegion = clipRegion;
+
+ SkMatrix transform;
+ transform.setTranslate(translation.fX, translation.fY);
+
+ SkPath clipPath;
+ if (get_clip_stack_path(transform, clipStack, clipRegion, &clipPath)) {
+ SkPDFUtils::EmitPath(clipPath, SkPaint::kFill_Style, fContentStream);
+ SkPath::FillType clipFill = clipPath.getFillType();
+ NOT_IMPLEMENTED(clipFill == SkPath::kInverseEvenOdd_FillType, false);
+ NOT_IMPLEMENTED(clipFill == SkPath::kInverseWinding_FillType, false);
+ if (clipFill == SkPath::kEvenOdd_FillType) {
+ fContentStream->writeText("W* n\n");
+ } else {
+ fContentStream->writeText("W n\n");
+ }
+ }
+ // If Op() fails (pathological case; e.g. input values are
+ // extremely large or NaN), emit no clip at all.
+}
+
+void GraphicStackState::updateMatrix(const SkMatrix& matrix) {
+ if (matrix == currentEntry()->fMatrix) {
+ return;
+ }
+
+ if (currentEntry()->fMatrix.getType() != SkMatrix::kIdentity_Mask) {
+ SkASSERT(fStackDepth > 0);
+ SkASSERT(fEntries[fStackDepth].fClipStack ==
+ fEntries[fStackDepth -1].fClipStack);
+ pop();
+
+ SkASSERT(currentEntry()->fMatrix.getType() == SkMatrix::kIdentity_Mask);
+ }
+ if (matrix.getType() == SkMatrix::kIdentity_Mask) {
+ return;
+ }
+
+ push();
+ SkPDFUtils::AppendTransform(matrix, fContentStream);
+ currentEntry()->fMatrix = matrix;
+}
+
+void GraphicStackState::updateDrawingState(const SkPDFDevice::GraphicStateEntry& state) {
+ // PDF treats a shader as a color, so we only set one or the other.
+ if (state.fShaderIndex >= 0) {
+ if (state.fShaderIndex != currentEntry()->fShaderIndex) {
+ SkPDFUtils::ApplyPattern(state.fShaderIndex, fContentStream);
+ currentEntry()->fShaderIndex = state.fShaderIndex;
+ }
+ } else {
+ if (state.fColor != currentEntry()->fColor ||
+ currentEntry()->fShaderIndex >= 0) {
+ emit_pdf_color(state.fColor, fContentStream);
+ fContentStream->writeText("RG ");
+ emit_pdf_color(state.fColor, fContentStream);
+ fContentStream->writeText("rg\n");
+ currentEntry()->fColor = state.fColor;
+ currentEntry()->fShaderIndex = -1;
+ }
+ }
+
+ if (state.fGraphicStateIndex != currentEntry()->fGraphicStateIndex) {
+ SkPDFUtils::ApplyGraphicState(state.fGraphicStateIndex, fContentStream);
+ currentEntry()->fGraphicStateIndex = state.fGraphicStateIndex;
+ }
+
+ if (state.fTextScaleX) {
+ if (state.fTextScaleX != currentEntry()->fTextScaleX) {
+ SkScalar pdfScale = SkScalarMul(state.fTextScaleX,
+ SkIntToScalar(100));
+ SkPDFUtils::AppendScalar(pdfScale, fContentStream);
+ fContentStream->writeText(" Tz\n");
+ currentEntry()->fTextScaleX = state.fTextScaleX;
+ }
+ if (state.fTextFill != currentEntry()->fTextFill) {
+ static_assert(SkPaint::kFill_Style == 0, "enum_must_match_value");
+ static_assert(SkPaint::kStroke_Style == 1, "enum_must_match_value");
+ static_assert(SkPaint::kStrokeAndFill_Style == 2, "enum_must_match_value");
+ fContentStream->writeDecAsText(state.fTextFill);
+ fContentStream->writeText(" Tr\n");
+ currentEntry()->fTextFill = state.fTextFill;
+ }
+ }
+}
+
+static bool not_supported_for_layers(const SkPaint& layerPaint) {
+ // PDF does not support image filters, so render them on CPU.
+ // Note that this rendering is done at "screen" resolution (100dpi), not
+ // printer resolution.
+ // TODO: It may be possible to express some filters natively using PDF
+ // to improve quality and file size (https://bug.skia.org/3043)
+
+ // TODO: should we return true if there is a colorfilter?
+ return layerPaint.getImageFilter() != nullptr;
+}
+
+SkBaseDevice* SkPDFDevice::onCreateDevice(const CreateInfo& cinfo, const SkPaint* layerPaint) {
+ if (layerPaint && not_supported_for_layers(*layerPaint)) {
+ // need to return a raster device, which we will detect in drawDevice()
+ return SkBitmapDevice::Create(cinfo.fInfo, SkSurfaceProps(0, kUnknown_SkPixelGeometry));
+ }
+ SkISize size = SkISize::Make(cinfo.fInfo.width(), cinfo.fInfo.height());
+ return SkPDFDevice::Create(size, fRasterDpi, fDocument);
+}
+
+SkPDFCanon* SkPDFDevice::getCanon() const { return fDocument->canon(); }
+
+
+
+// A helper class to automatically finish a ContentEntry at the end of a
+// drawing method and maintain the state needed between set up and finish.
+class ScopedContentEntry {
+public:
+ ScopedContentEntry(SkPDFDevice* device, const SkDraw& draw,
+ const SkPaint& paint, bool hasText = false)
+ : fDevice(device),
+ fContentEntry(nullptr),
+ fBlendMode(SkBlendMode::kSrcOver),
+ fDstFormXObject(nullptr) {
+ init(draw.fClipStack, draw.fRC->bwRgn(), *draw.fMatrix, paint, hasText);
+ }
+ ScopedContentEntry(SkPDFDevice* device, const SkClipStack* clipStack,
+ const SkRegion& clipRegion, const SkMatrix& matrix,
+ const SkPaint& paint, bool hasText = false)
+ : fDevice(device),
+ fContentEntry(nullptr),
+ fBlendMode(SkBlendMode::kSrcOver),
+ fDstFormXObject(nullptr) {
+ init(clipStack, clipRegion, matrix, paint, hasText);
+ }
+
+ ~ScopedContentEntry() {
+ if (fContentEntry) {
+ SkPath* shape = &fShape;
+ if (shape->isEmpty()) {
+ shape = nullptr;
+ }
+ fDevice->finishContentEntry(fBlendMode, std::move(fDstFormXObject), shape);
+ }
+ }
+
+ SkPDFDevice::ContentEntry* entry() { return fContentEntry; }
+
+ /* Returns true when we explicitly need the shape of the drawing. */
+ bool needShape() {
+ switch (fBlendMode) {
+ case SkBlendMode::kClear:
+ case SkBlendMode::kSrc:
+ case SkBlendMode::kSrcIn:
+ case SkBlendMode::kSrcOut:
+ case SkBlendMode::kDstIn:
+ case SkBlendMode::kDstOut:
+ case SkBlendMode::kSrcATop:
+ case SkBlendMode::kDstATop:
+ case SkBlendMode::kModulate:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /* Returns true unless we only need the shape of the drawing. */
+ bool needSource() {
+ if (fBlendMode == SkBlendMode::kClear) {
+ return false;
+ }
+ return true;
+ }
+
+ /* If the shape is different than the alpha component of the content, then
+ * setShape should be called with the shape. In particular, images and
+ * devices have rectangular shape.
+ */
+ void setShape(const SkPath& shape) {
+ fShape = shape;
+ }
+
+private:
+ SkPDFDevice* fDevice;
+ SkPDFDevice::ContentEntry* fContentEntry;
+ SkBlendMode fBlendMode;
+ sk_sp<SkPDFObject> fDstFormXObject;
+ SkPath fShape;
+
+ void init(const SkClipStack* clipStack, const SkRegion& clipRegion,
+ const SkMatrix& matrix, const SkPaint& paint, bool hasText) {
+ // Shape has to be flatten before we get here.
+ if (matrix.hasPerspective()) {
+ NOT_IMPLEMENTED(!matrix.hasPerspective(), false);
+ return;
+ }
+ fBlendMode = paint.getBlendMode();
+ fContentEntry = fDevice->setUpContentEntry(clipStack, clipRegion,
+ matrix, paint, hasText,
+ &fDstFormXObject);
+ }
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+SkPDFDevice::SkPDFDevice(SkISize pageSize, SkScalar rasterDpi, SkPDFDocument* doc, bool flip)
+ : INHERITED(SkImageInfo::MakeUnknown(pageSize.width(), pageSize.height()),
+ SkSurfaceProps(0, kUnknown_SkPixelGeometry))
+ , fPageSize(pageSize)
+ , fExistingClipRegion(SkIRect::MakeSize(pageSize))
+ , fRasterDpi(rasterDpi)
+ , fDocument(doc) {
+ SkASSERT(pageSize.width() > 0);
+ SkASSERT(pageSize.height() > 0);
+
+ if (flip) {
+ // Skia generally uses the top left as the origin but PDF
+ // natively has the origin at the bottom left. This matrix
+ // corrects for that. But that only needs to be done once, we
+ // don't do it when layering.
+ fInitialTransform.setTranslate(0, SkIntToScalar(pageSize.fHeight));
+ fInitialTransform.preScale(SK_Scalar1, -SK_Scalar1);
+ } else {
+ fInitialTransform.setIdentity();
+ }
+}
+
+SkPDFDevice::~SkPDFDevice() {
+ this->cleanUp();
+}
+
+void SkPDFDevice::init() {
+ fContentEntries.reset();
+}
+
+void SkPDFDevice::cleanUp() {
+ fGraphicStateResources.unrefAll();
+ fXObjectResources.unrefAll();
+ fFontResources.unrefAll();
+ fShaderResources.unrefAll();
+}
+
+void SkPDFDevice::drawAnnotation(const SkDraw& d, const SkRect& rect, const char key[],
+ SkData* value) {
+ if (0 == rect.width() && 0 == rect.height()) {
+ handlePointAnnotation({ rect.x(), rect.y() }, *d.fMatrix, key, value);
+ } else {
+ SkPath path;
+ path.addRect(rect);
+ handlePathAnnotation(path, d, key, value);
+ }
+}
+
+void SkPDFDevice::drawPaint(const SkDraw& d, const SkPaint& paint) {
+ SkPaint newPaint = paint;
+ replace_srcmode_on_opaque_paint(&newPaint);
+
+ newPaint.setStyle(SkPaint::kFill_Style);
+ ScopedContentEntry content(this, d, newPaint);
+ internalDrawPaint(newPaint, content.entry());
+}
+
+void SkPDFDevice::internalDrawPaint(const SkPaint& paint,
+ SkPDFDevice::ContentEntry* contentEntry) {
+ if (!contentEntry) {
+ return;
+ }
+ SkRect bbox = SkRect::MakeWH(SkIntToScalar(this->width()),
+ SkIntToScalar(this->height()));
+ SkMatrix inverse;
+ if (!contentEntry->fState.fMatrix.invert(&inverse)) {
+ return;
+ }
+ inverse.mapRect(&bbox);
+
+ SkPDFUtils::AppendRectangle(bbox, &contentEntry->fContent);
+ SkPDFUtils::PaintPath(paint.getStyle(), SkPath::kWinding_FillType,
+ &contentEntry->fContent);
+}
+
+void SkPDFDevice::drawPoints(const SkDraw& d,
+ SkCanvas::PointMode mode,
+ size_t count,
+ const SkPoint* points,
+ const SkPaint& srcPaint) {
+ SkPaint passedPaint = srcPaint;
+ replace_srcmode_on_opaque_paint(&passedPaint);
+
+ if (count == 0) {
+ return;
+ }
+
+ // SkDraw::drawPoints converts to multiple calls to fDevice->drawPath.
+ // We only use this when there's a path effect because of the overhead
+ // of multiple calls to setUpContentEntry it causes.
+ if (passedPaint.getPathEffect()) {
+ if (d.fRC->isEmpty()) {
+ return;
+ }
+ SkDraw pointDraw(d);
+ pointDraw.fDevice = this;
+ pointDraw.drawPoints(mode, count, points, passedPaint, true);
+ return;
+ }
+
+ const SkPaint* paint = &passedPaint;
+ SkPaint modifiedPaint;
+
+ if (mode == SkCanvas::kPoints_PointMode &&
+ paint->getStrokeCap() != SkPaint::kRound_Cap) {
+ modifiedPaint = *paint;
+ paint = &modifiedPaint;
+ if (paint->getStrokeWidth()) {
+ // PDF won't draw a single point with square/butt caps because the
+ // orientation is ambiguous. Draw a rectangle instead.
+ modifiedPaint.setStyle(SkPaint::kFill_Style);
+ SkScalar strokeWidth = paint->getStrokeWidth();
+ SkScalar halfStroke = SkScalarHalf(strokeWidth);
+ for (size_t i = 0; i < count; i++) {
+ SkRect r = SkRect::MakeXYWH(points[i].fX, points[i].fY, 0, 0);
+ r.inset(-halfStroke, -halfStroke);
+ drawRect(d, r, modifiedPaint);
+ }
+ return;
+ } else {
+ modifiedPaint.setStrokeCap(SkPaint::kRound_Cap);
+ }
+ }
+
+ ScopedContentEntry content(this, d, *paint);
+ if (!content.entry()) {
+ return;
+ }
+
+ switch (mode) {
+ case SkCanvas::kPolygon_PointMode:
+ SkPDFUtils::MoveTo(points[0].fX, points[0].fY,
+ &content.entry()->fContent);
+ for (size_t i = 1; i < count; i++) {
+ SkPDFUtils::AppendLine(points[i].fX, points[i].fY,
+ &content.entry()->fContent);
+ }
+ SkPDFUtils::StrokePath(&content.entry()->fContent);
+ break;
+ case SkCanvas::kLines_PointMode:
+ for (size_t i = 0; i < count/2; i++) {
+ SkPDFUtils::MoveTo(points[i * 2].fX, points[i * 2].fY,
+ &content.entry()->fContent);
+ SkPDFUtils::AppendLine(points[i * 2 + 1].fX,
+ points[i * 2 + 1].fY,
+ &content.entry()->fContent);
+ SkPDFUtils::StrokePath(&content.entry()->fContent);
+ }
+ break;
+ case SkCanvas::kPoints_PointMode:
+ SkASSERT(paint->getStrokeCap() == SkPaint::kRound_Cap);
+ for (size_t i = 0; i < count; i++) {
+ SkPDFUtils::MoveTo(points[i].fX, points[i].fY,
+ &content.entry()->fContent);
+ SkPDFUtils::ClosePath(&content.entry()->fContent);
+ SkPDFUtils::StrokePath(&content.entry()->fContent);
+ }
+ break;
+ default:
+ SkASSERT(false);
+ }
+}
+
+static sk_sp<SkPDFDict> create_link_annotation(const SkRect& translatedRect) {
+ auto annotation = sk_make_sp<SkPDFDict>("Annot");
+ annotation->insertName("Subtype", "Link");
+ annotation->insertInt("F", 4); // required by ISO 19005
+
+ auto border = sk_make_sp<SkPDFArray>();
+ border->reserve(3);
+ border->appendInt(0); // Horizontal corner radius.
+ border->appendInt(0); // Vertical corner radius.
+ border->appendInt(0); // Width, 0 = no border.
+ annotation->insertObject("Border", std::move(border));
+
+ auto rect = sk_make_sp<SkPDFArray>();
+ rect->reserve(4);
+ rect->appendScalar(translatedRect.fLeft);
+ rect->appendScalar(translatedRect.fTop);
+ rect->appendScalar(translatedRect.fRight);
+ rect->appendScalar(translatedRect.fBottom);
+ annotation->insertObject("Rect", std::move(rect));
+
+ return annotation;
+}
+
+static sk_sp<SkPDFDict> create_link_to_url(const SkData* urlData, const SkRect& r) {
+ sk_sp<SkPDFDict> annotation = create_link_annotation(r);
+ SkString url(static_cast<const char *>(urlData->data()),
+ urlData->size() - 1);
+ auto action = sk_make_sp<SkPDFDict>("Action");
+ action->insertName("S", "URI");
+ action->insertString("URI", url);
+ annotation->insertObject("A", std::move(action));
+ return annotation;
+}
+
+static sk_sp<SkPDFDict> create_link_named_dest(const SkData* nameData,
+ const SkRect& r) {
+ sk_sp<SkPDFDict> annotation = create_link_annotation(r);
+ SkString name(static_cast<const char *>(nameData->data()),
+ nameData->size() - 1);
+ annotation->insertName("Dest", name);
+ return annotation;
+}
+
+void SkPDFDevice::drawRect(const SkDraw& d,
+ const SkRect& rect,
+ const SkPaint& srcPaint) {
+ SkPaint paint = srcPaint;
+ replace_srcmode_on_opaque_paint(&paint);
+ SkRect r = rect;
+ r.sort();
+
+ if (paint.getPathEffect()) {
+ if (d.fRC->isEmpty()) {
+ return;
+ }
+ SkPath path;
+ path.addRect(r);
+ drawPath(d, path, paint, nullptr, true);
+ return;
+ }
+
+ ScopedContentEntry content(this, d, paint);
+ if (!content.entry()) {
+ return;
+ }
+ SkPDFUtils::AppendRectangle(r, &content.entry()->fContent);
+ SkPDFUtils::PaintPath(paint.getStyle(), SkPath::kWinding_FillType,
+ &content.entry()->fContent);
+}
+
+void SkPDFDevice::drawRRect(const SkDraw& draw,
+ const SkRRect& rrect,
+ const SkPaint& srcPaint) {
+ SkPaint paint = srcPaint;
+ replace_srcmode_on_opaque_paint(&paint);
+ SkPath path;
+ path.addRRect(rrect);
+ this->drawPath(draw, path, paint, nullptr, true);
+}
+
+void SkPDFDevice::drawOval(const SkDraw& draw,
+ const SkRect& oval,
+ const SkPaint& srcPaint) {
+ SkPaint paint = srcPaint;
+ replace_srcmode_on_opaque_paint(&paint);
+ SkPath path;
+ path.addOval(oval);
+ this->drawPath(draw, path, paint, nullptr, true);
+}
+
+void SkPDFDevice::drawPath(const SkDraw& d,
+ const SkPath& origPath,
+ const SkPaint& srcPaint,
+ const SkMatrix* prePathMatrix,
+ bool pathIsMutable) {
+ SkPaint paint = srcPaint;
+ replace_srcmode_on_opaque_paint(&paint);
+ SkPath modifiedPath;
+ SkPath* pathPtr = const_cast<SkPath*>(&origPath);
+
+ SkMatrix matrix = *d.fMatrix;
+ if (prePathMatrix) {
+ if (paint.getPathEffect() || paint.getStyle() != SkPaint::kFill_Style) {
+ if (!pathIsMutable) {
+ pathPtr = &modifiedPath;
+ pathIsMutable = true;
+ }
+ origPath.transform(*prePathMatrix, pathPtr);
+ } else {
+ matrix.preConcat(*prePathMatrix);
+ }
+ }
+
+ if (paint.getPathEffect()) {
+ if (d.fRC->isEmpty()) {
+ return;
+ }
+ if (!pathIsMutable) {
+ pathPtr = &modifiedPath;
+ pathIsMutable = true;
+ }
+ bool fill = paint.getFillPath(origPath, pathPtr);
+
+ SkPaint noEffectPaint(paint);
+ noEffectPaint.setPathEffect(nullptr);
+ if (fill) {
+ noEffectPaint.setStyle(SkPaint::kFill_Style);
+ } else {
+ noEffectPaint.setStyle(SkPaint::kStroke_Style);
+ noEffectPaint.setStrokeWidth(0);
+ }
+ drawPath(d, *pathPtr, noEffectPaint, nullptr, true);
+ return;
+ }
+
+ if (handleInversePath(d, origPath, paint, pathIsMutable, prePathMatrix)) {
+ return;
+ }
+
+ ScopedContentEntry content(this, d.fClipStack, d.fRC->bwRgn(), matrix, paint);
+ if (!content.entry()) {
+ return;
+ }
+ bool consumeDegeratePathSegments =
+ paint.getStyle() == SkPaint::kFill_Style ||
+ (paint.getStrokeCap() != SkPaint::kRound_Cap &&
+ paint.getStrokeCap() != SkPaint::kSquare_Cap);
+ SkPDFUtils::EmitPath(*pathPtr, paint.getStyle(),
+ consumeDegeratePathSegments,
+ &content.entry()->fContent);
+ SkPDFUtils::PaintPath(paint.getStyle(), pathPtr->getFillType(),
+ &content.entry()->fContent);
+}
+
+
+void SkPDFDevice::drawImageRect(const SkDraw& d,
+ const SkImage* image,
+ const SkRect* src,
+ const SkRect& dst,
+ const SkPaint& srcPaint,
+ SkCanvas::SrcRectConstraint) {
+ if (!image) {
+ return;
+ }
+ SkIRect bounds = image->bounds();
+ SkPaint paint = srcPaint;
+ if (image->isOpaque()) {
+ replace_srcmode_on_opaque_paint(&paint);
+ }
+ SkRect srcRect = src ? *src : SkRect::Make(bounds);
+ SkMatrix transform;
+ transform.setRectToRect(srcRect, dst, SkMatrix::kFill_ScaleToFit);
+ if (src) {
+ if (!srcRect.intersect(SkRect::Make(bounds))) {
+ return;
+ }
+ srcRect.roundOut(&bounds);
+ transform.preTranslate(SkIntToScalar(bounds.x()),
+ SkIntToScalar(bounds.y()));
+ }
+ SkImageSubset imageSubset(sk_ref_sp(const_cast<SkImage*>(image)), bounds);
+ if (!imageSubset.isValid()) {
+ return;
+ }
+ transform.postConcat(*d.fMatrix);
+ this->internalDrawImage(transform, d.fClipStack, d.fRC->bwRgn(),
+ std::move(imageSubset), paint);
+}
+
+void SkPDFDevice::drawBitmapRect(const SkDraw& d,
+ const SkBitmap& bitmap,
+ const SkRect* src,
+ const SkRect& dst,
+ const SkPaint& srcPaint,
+ SkCanvas::SrcRectConstraint) {
+ if (bitmap.drawsNothing()) {
+ return;
+ }
+ SkIRect bounds = bitmap.bounds();
+ SkPaint paint = srcPaint;
+ if (bitmap.isOpaque()) {
+ replace_srcmode_on_opaque_paint(&paint);
+ }
+ SkRect srcRect = src ? *src : SkRect::Make(bounds);
+ SkMatrix transform;
+ transform.setRectToRect(srcRect, dst, SkMatrix::kFill_ScaleToFit);
+ if (src) {
+ if (!srcRect.intersect(SkRect::Make(bounds))) {
+ return;
+ }
+ srcRect.roundOut(&bounds);
+ transform.preTranslate(SkIntToScalar(bounds.x()),
+ SkIntToScalar(bounds.y()));
+ }
+ SkBitmap bitmapSubset;
+ if (!bitmap.extractSubset(&bitmapSubset, bounds)) {
+ return;
+ }
+ SkImageSubset imageSubset = make_image_subset(bitmapSubset);
+ if (!imageSubset.isValid()) {
+ return;
+ }
+ transform.postConcat(*d.fMatrix);
+ this->internalDrawImage(transform, d.fClipStack, d.fRC->bwRgn(),
+ std::move(imageSubset), paint);
+}
+
+void SkPDFDevice::drawBitmap(const SkDraw& d,
+ const SkBitmap& bitmap,
+ const SkMatrix& matrix,
+ const SkPaint& srcPaint) {
+ if (bitmap.drawsNothing() || d.fRC->isEmpty()) {
+ return;
+ }
+ SkPaint paint = srcPaint;
+ if (bitmap.isOpaque()) {
+ replace_srcmode_on_opaque_paint(&paint);
+ }
+ SkImageSubset imageSubset = make_image_subset(bitmap);
+ if (!imageSubset.isValid()) {
+ return;
+ }
+ SkMatrix transform = matrix;
+ transform.postConcat(*d.fMatrix);
+ this->internalDrawImage(
+ transform, d.fClipStack, d.fRC->bwRgn(), std::move(imageSubset), paint);
+}
+
+void SkPDFDevice::drawSprite(const SkDraw& d,
+ const SkBitmap& bitmap,
+ int x,
+ int y,
+ const SkPaint& srcPaint) {
+ if (bitmap.drawsNothing() || d.fRC->isEmpty()) {
+ return;
+ }
+ SkPaint paint = srcPaint;
+ if (bitmap.isOpaque()) {
+ replace_srcmode_on_opaque_paint(&paint);
+ }
+ SkImageSubset imageSubset = make_image_subset(bitmap);
+ if (!imageSubset.isValid()) {
+ return;
+ }
+ SkMatrix transform = SkMatrix::MakeTrans(SkIntToScalar(x), SkIntToScalar(y));
+ this->internalDrawImage(
+ transform, d.fClipStack, d.fRC->bwRgn(), std::move(imageSubset), paint);
+}
+
+void SkPDFDevice::drawImage(const SkDraw& draw,
+ const SkImage* image,
+ SkScalar x,
+ SkScalar y,
+ const SkPaint& srcPaint) {
+ SkPaint paint = srcPaint;
+ if (!image) {
+ return;
+ }
+ if (image->isOpaque()) {
+ replace_srcmode_on_opaque_paint(&paint);
+ }
+ if (draw.fRC->isEmpty()) {
+ return;
+ }
+ SkImageSubset imageSubset(sk_ref_sp(const_cast<SkImage*>(image)));
+ if (!imageSubset.isValid()) {
+ return;
+ }
+ SkMatrix transform = SkMatrix::MakeTrans(x, y);
+ transform.postConcat(*draw.fMatrix);
+ this->internalDrawImage(
+ transform, draw.fClipStack, draw.fRC->bwRgn(), std::move(imageSubset), paint);
+}
+
+namespace {
+class GlyphPositioner {
+public:
+ GlyphPositioner(SkDynamicMemoryWStream* content,
+ SkScalar textSkewX,
+ bool wideChars,
+ bool defaultPositioning,
+ SkPoint origin)
+ : fContent(content)
+ , fCurrentMatrixOrigin(origin)
+ , fTextSkewX(textSkewX)
+ , fWideChars(wideChars)
+ , fDefaultPositioning(defaultPositioning) {
+ }
+ ~GlyphPositioner() { this->flush(); }
+ void flush() {
+ if (fInText) {
+ fContent->writeText("> Tj\n");
+ fInText = false;
+ }
+ }
+ void writeGlyph(SkPoint xy,
+ SkScalar advanceWidth,
+ uint16_t glyph) {
+ if (!fInitialized) {
+ // Flip the text about the x-axis to account for origin swap and include
+ // the passed parameters.
+ fContent->writeText("1 0 ");
+ SkPDFUtils::AppendScalar(-fTextSkewX, fContent);
+ fContent->writeText(" -1 ");
+ SkPDFUtils::AppendScalar(fCurrentMatrixOrigin.x(), fContent);
+ fContent->writeText(" ");
+ SkPDFUtils::AppendScalar(fCurrentMatrixOrigin.y(), fContent);
+ fContent->writeText(" Tm\n");
+ fCurrentMatrixOrigin.set(0.0f, 0.0f);
+ fInitialized = true;
+ }
+ if (!fDefaultPositioning) {
+ SkPoint position = xy - fCurrentMatrixOrigin;
+ if (position != SkPoint{fXAdvance, 0}) {
+ this->flush();
+ SkPDFUtils::AppendScalar(position.x(), fContent);
+ fContent->writeText(" ");
+ SkPDFUtils::AppendScalar(-position.y(), fContent);
+ fContent->writeText(" Td ");
+ fCurrentMatrixOrigin = xy;
+ fXAdvance = 0;
+ }
+ fXAdvance += advanceWidth;
+ }
+ if (!fInText) {
+ fContent->writeText("<");
+ fInText = true;
+ }
+ if (fWideChars) {
+ SkPDFUtils::WriteUInt16BE(fContent, glyph);
+ } else {
+ SkASSERT(0 == glyph >> 8);
+ SkPDFUtils::WriteUInt8(fContent, static_cast<uint8_t>(glyph));
+ }
+ }
+
+private:
+ SkDynamicMemoryWStream* fContent;
+ SkPoint fCurrentMatrixOrigin;
+ SkScalar fXAdvance = 0.0f;
+ SkScalar fTextSkewX;
+ bool fWideChars;
+ bool fInText = false;
+ bool fInitialized = false;
+ const bool fDefaultPositioning;
+};
+
+/** Given the m-to-n glyph-to-character mapping data (as returned by
+ harfbuzz), iterate over the clusters. */
+class Clusterator {
+public:
+ Clusterator() : fClusters(nullptr), fUtf8Text(nullptr), fGlyphCount(0), fTextByteLength(0) {}
+ explicit Clusterator(uint32_t glyphCount)
+ : fClusters(nullptr)
+ , fUtf8Text(nullptr)
+ , fGlyphCount(glyphCount)
+ , fTextByteLength(0) {}
+ // The clusters[] array is an array of offsets into utf8Text[],
+ // one offset for each glyph. See SkTextBlobBuilder for more info.
+ Clusterator(const uint32_t* clusters,
+ const char* utf8Text,
+ uint32_t glyphCount,
+ uint32_t textByteLength)
+ : fClusters(clusters)
+ , fUtf8Text(utf8Text)
+ , fGlyphCount(glyphCount)
+ , fTextByteLength(textByteLength) {
+ // This is a cheap heuristic for /ReversedChars which seems to
+ // work for clusters produced by HarfBuzz, which either
+ // increase from zero (LTR) or decrease to zero (RTL).
+ // "ReversedChars" is how PDF deals with RTL text.
+ fReversedChars =
+ fUtf8Text && fClusters && fGlyphCount && fClusters[0] != 0;
+ }
+ struct Cluster {
+ const char* fUtf8Text;
+ uint32_t fTextByteLength;
+ uint32_t fGlyphIndex;
+ uint32_t fGlyphCount;
+ explicit operator bool() const { return fGlyphCount != 0; }
+ };
+ // True if this looks like right-to-left text.
+ bool reversedChars() const { return fReversedChars; }
+ Cluster next() {
+ if ((!fUtf8Text || !fClusters) && fGlyphCount) {
+ // These glyphs have no text. Treat as one "cluster".
+ uint32_t glyphCount = fGlyphCount;
+ fGlyphCount = 0;
+ return Cluster{nullptr, 0, 0, glyphCount};
+ }
+ if (fGlyphCount == 0 || fTextByteLength == 0) {
+ return Cluster{nullptr, 0, 0, 0}; // empty
+ }
+ SkASSERT(fUtf8Text);
+ SkASSERT(fClusters);
+ uint32_t cluster = fClusters[0];
+ if (cluster >= fTextByteLength) {
+ return Cluster{nullptr, 0, 0, 0}; // bad input.
+ }
+ uint32_t glyphsInCluster = 1;
+ while (glyphsInCluster < fGlyphCount &&
+ fClusters[glyphsInCluster] == cluster) {
+ ++glyphsInCluster;
+ }
+ SkASSERT(glyphsInCluster <= fGlyphCount);
+ uint32_t textLength = 0;
+ if (glyphsInCluster == fGlyphCount) {
+ // consumes rest of glyphs and rest of text
+ if (kInvalidCluster == fPreviousCluster) { // LTR text or single cluster
+ textLength = fTextByteLength - cluster;
+ } else { // RTL text; last cluster.
+ SkASSERT(fPreviousCluster < fTextByteLength);
+ if (fPreviousCluster <= cluster) { // bad input.
+ return Cluster{nullptr, 0, 0, 0};
+ }
+ textLength = fPreviousCluster - cluster;
+ }
+ fGlyphCount = 0;
+ return Cluster{fUtf8Text + cluster,
+ textLength,
+ fGlyphIndex,
+ glyphsInCluster};
+ }
+ SkASSERT(glyphsInCluster < fGlyphCount);
+ uint32_t nextCluster = fClusters[glyphsInCluster];
+ if (nextCluster >= fTextByteLength) {
+ return Cluster{nullptr, 0, 0, 0}; // bad input.
+ }
+ if (nextCluster > cluster) { // LTR text
+ if (kInvalidCluster != fPreviousCluster) {
+ return Cluster{nullptr, 0, 0, 0}; // bad input.
+ }
+ textLength = nextCluster - cluster;
+ } else { // RTL text
+ SkASSERT(nextCluster < cluster);
+ if (kInvalidCluster == fPreviousCluster) { // first cluster
+ textLength = fTextByteLength - cluster;
+ } else { // later cluster
+ if (fPreviousCluster <= cluster) {
+ return Cluster{nullptr, 0, 0, 0}; // bad input.
+ }
+ textLength = fPreviousCluster - cluster;
+ }
+ fPreviousCluster = cluster;
+ }
+ uint32_t glyphIndex = fGlyphIndex;
+ fGlyphCount -= glyphsInCluster;
+ fGlyphIndex += glyphsInCluster;
+ fClusters += glyphsInCluster;
+ return Cluster{fUtf8Text + cluster,
+ textLength,
+ glyphIndex,
+ glyphsInCluster};
+ }
+
+private:
+ static constexpr uint32_t kInvalidCluster = 0xFFFFFFFF;
+ const uint32_t* fClusters;
+ const char* fUtf8Text;
+ uint32_t fGlyphCount;
+ uint32_t fTextByteLength;
+ uint32_t fGlyphIndex = 0;
+ uint32_t fPreviousCluster = kInvalidCluster;
+ bool fReversedChars = false;
+};
+
+struct TextStorage {
+ SkAutoTMalloc<char> fUtf8textStorage;
+ SkAutoTMalloc<uint32_t> fClusterStorage;
+ SkAutoTMalloc<SkGlyphID> fGlyphStorage;
+};
+} // namespace
+
+/** Given some unicode text (as passed to drawText(), convert to
+ glyphs (via primitive shaping), while preserving
+ glyph-to-character mapping information. */
+static Clusterator make_clusterator(
+ const void* sourceText,
+ size_t sourceByteCount,
+ const SkPaint& paint,
+ TextStorage* storage,
+ int glyphCount) {
+ SkASSERT(SkPaint::kGlyphID_TextEncoding != paint.getTextEncoding());
+ SkASSERT(glyphCount == paint.textToGlyphs(sourceText, sourceByteCount, nullptr));
+ SkASSERT(glyphCount > 0);
+ storage->fGlyphStorage.reset(SkToSizeT(glyphCount));
+ (void)paint.textToGlyphs(sourceText, sourceByteCount, storage->fGlyphStorage.get());
+ storage->fClusterStorage.reset(SkToSizeT(glyphCount));
+ uint32_t* clusters = storage->fClusterStorage.get();
+ uint32_t utf8ByteCount = 0;
+ const char* utf8Text = nullptr;
+ switch (paint.getTextEncoding()) {
+ case SkPaint::kUTF8_TextEncoding: {
+ const char* txtPtr = (const char*)sourceText;
+ for (int i = 0; i < glyphCount; ++i) {
+ clusters[i] = SkToU32(txtPtr - (const char*)sourceText);
+ txtPtr += SkUTF8_LeadByteToCount(*(const unsigned char*)txtPtr);
+ SkASSERT(txtPtr <= (const char*)sourceText + sourceByteCount);
+ }
+ SkASSERT(txtPtr == (const char*)sourceText + sourceByteCount);
+ utf8ByteCount = SkToU32(sourceByteCount);
+ utf8Text = (const char*)sourceText;
+ break;
+ }
+ case SkPaint::kUTF16_TextEncoding: {
+ const uint16_t* utf16ptr = (const uint16_t*)sourceText;
+ int utf16count = SkToInt(sourceByteCount / sizeof(uint16_t));
+ utf8ByteCount = SkToU32(SkUTF16_ToUTF8(utf16ptr, utf16count));
+ storage->fUtf8textStorage.reset(utf8ByteCount);
+ char* txtPtr = storage->fUtf8textStorage.get();
+ utf8Text = txtPtr;
+ int clusterIndex = 0;
+ while (utf16ptr < (const uint16_t*)sourceText + utf16count) {
+ clusters[clusterIndex++] = SkToU32(txtPtr - utf8Text);
+ SkUnichar uni = SkUTF16_NextUnichar(&utf16ptr);
+ txtPtr += SkUTF8_FromUnichar(uni, txtPtr);
+ }
+ SkASSERT(clusterIndex == glyphCount);
+ SkASSERT(txtPtr == storage->fUtf8textStorage.get() + utf8ByteCount);
+ SkASSERT(utf16ptr == (const uint16_t*)sourceText + utf16count);
+ break;
+ }
+ case SkPaint::kUTF32_TextEncoding: {
+ const SkUnichar* utf32 = (const SkUnichar*)sourceText;
+ int utf32count = SkToInt(sourceByteCount / sizeof(SkUnichar));
+ SkASSERT(glyphCount == utf32count);
+ for (int i = 0; i < utf32count; ++i) {
+ utf8ByteCount += SkToU32(SkUTF8_FromUnichar(utf32[i]));
+ }
+ storage->fUtf8textStorage.reset(SkToSizeT(utf8ByteCount));
+ char* txtPtr = storage->fUtf8textStorage.get();
+ utf8Text = txtPtr;
+ for (int i = 0; i < utf32count; ++i) {
+ clusters[i] = SkToU32(txtPtr - utf8Text);
+ txtPtr += SkUTF8_FromUnichar(utf32[i], txtPtr);
+ }
+ break;
+ }
+ default:
+ SkDEBUGFAIL("");
+ break;
+ }
+ return Clusterator(clusters, utf8Text, SkToU32(glyphCount), utf8ByteCount);
+}
+
+static SkUnichar map_glyph(const SkTDArray<SkUnichar>& glyphToUnicode, SkGlyphID glyph) {
+ return SkToInt(glyph) < glyphToUnicode.count() ? glyphToUnicode[SkToInt(glyph)] : -1;
+}
+
+static void update_font(SkWStream* wStream, int fontIndex, SkScalar textSize) {
+ wStream->writeText("/");
+ char prefix = SkPDFResourceDict::GetResourceTypePrefix(SkPDFResourceDict::kFont_ResourceType);
+ wStream->write(&prefix, 1);
+ wStream->writeDecAsText(fontIndex);
+ wStream->writeText(" ");
+ SkPDFUtils::AppendScalar(textSize, wStream);
+ wStream->writeText(" Tf\n");
+}
+
+void SkPDFDevice::internalDrawText(
+ const SkDraw& d, const void* sourceText, size_t sourceByteCount,
+ const SkScalar pos[], SkTextBlob::GlyphPositioning positioning,
+ SkPoint offset, const SkPaint& srcPaint, const uint32_t* clusters,
+ uint32_t textByteLength, const char* utf8Text) {
+ NOT_IMPLEMENTED(srcPaint.getMaskFilter() != nullptr, false);
+ if (srcPaint.getMaskFilter() != nullptr) {
+ // Don't pretend we support drawing MaskFilters, it makes for artifacts
+ // making text unreadable (e.g. same text twice when using CSS shadows).
+ return;
+ }
+ NOT_IMPLEMENTED(srcPaint.isVerticalText(), false);
+ if (srcPaint.isVerticalText()) {
+ // Don't pretend we support drawing vertical text. It is not
+ // clear to me how to switch to "vertical writing" mode in PDF.
+ // Currently neither Chromium or Android set this flag.
+ // https://bug.skia.org/5665
+ return;
+ }
+ if (0 == sourceByteCount || !sourceText) {
+ return;
+ }
+ SkPaint paint = calculate_text_paint(srcPaint);
+ replace_srcmode_on_opaque_paint(&paint);
+ if (!paint.getTypeface()) {
+ paint.setTypeface(SkTypeface::MakeDefault());
+ }
+ SkTypeface* typeface = paint.getTypeface();
+ if (!typeface) {
+ SkDebugf("SkPDF: SkTypeface::MakeDefault() returned nullptr.\n");
+ return;
+ }
+
+ const SkAdvancedTypefaceMetrics* metrics =
+ SkPDFFont::GetMetrics(typeface, fDocument->canon());
+ if (!metrics) {
+ return;
+ }
+ int glyphCount = paint.textToGlyphs(sourceText, sourceByteCount, nullptr);
+ if (glyphCount <= 0) {
+ return;
+ }
+
+ // These three heap buffers are only used in the case where no glyphs
+ // are passed to drawText() (most clients pass glyphs or a textblob).
+ TextStorage storage;
+ const SkGlyphID* glyphs = nullptr;
+ Clusterator clusterator;
+ if (textByteLength > 0) {
+ SkASSERT(glyphCount == SkToInt(sourceByteCount / sizeof(SkGlyphID)));
+ glyphs = (const SkGlyphID*)sourceText;
+ clusterator = Clusterator(clusters, utf8Text, SkToU32(glyphCount), textByteLength);
+ SkASSERT(clusters);
+ SkASSERT(utf8Text);
+ SkASSERT(srcPaint.getTextEncoding() == SkPaint::kGlyphID_TextEncoding);
+ SkASSERT(glyphCount == paint.textToGlyphs(sourceText, sourceByteCount, nullptr));
+ } else if (SkPaint::kGlyphID_TextEncoding == srcPaint.getTextEncoding()) {
+ SkASSERT(glyphCount == SkToInt(sourceByteCount / sizeof(SkGlyphID)));
+ glyphs = (const SkGlyphID*)sourceText;
+ clusterator = Clusterator(SkToU32(glyphCount));
+ SkASSERT(glyphCount == paint.textToGlyphs(sourceText, sourceByteCount, nullptr));
+ SkASSERT(nullptr == clusters);
+ SkASSERT(nullptr == utf8Text);
+ } else {
+ SkASSERT(nullptr == clusters);
+ SkASSERT(nullptr == utf8Text);
+ clusterator = make_clusterator(sourceText, sourceByteCount, srcPaint,
+ &storage, glyphCount);
+ glyphs = storage.fGlyphStorage;
+ }
+ bool defaultPositioning = (positioning == SkTextBlob::kDefault_Positioning);
+ paint.setHinting(SkPaint::kNo_Hinting);
+ SkAutoGlyphCache glyphCache(paint, nullptr, nullptr);
+
+ SkPaint::Align alignment = paint.getTextAlign();
+ float alignmentFactor = SkPaint::kLeft_Align == alignment ? 0.0f :
+ SkPaint::kCenter_Align == alignment ? -0.5f :
+ /* SkPaint::kRight_Align */ -1.0f;
+ if (defaultPositioning && alignment != SkPaint::kLeft_Align) {
+ SkScalar advance = 0;
+ for (int i = 0; i < glyphCount; ++i) {
+ advance += glyphCache->getGlyphIDAdvance(glyphs[i]).fAdvanceX;
+ }
+ offset.offset(alignmentFactor * advance, 0);
+ }
+ ScopedContentEntry content(this, d, paint, true);
+ if (!content.entry()) {
+ return;
+ }
+ SkDynamicMemoryWStream* out = &content.entry()->fContent;
+ SkScalar textSize = paint.getTextSize();
+ const SkTDArray<SkUnichar>& glyphToUnicode = metrics->fGlyphToUnicode;
+
+ out->writeText("BT\n");
+ SK_AT_SCOPE_EXIT(out->writeText("ET\n"));
+
+ const SkGlyphID maxGlyphID = metrics->fLastGlyphID;
+ bool multiByteGlyphs = SkPDFFont::IsMultiByte(SkPDFFont::FontType(*metrics));
+ if (clusterator.reversedChars()) {
+ out->writeText("/ReversedChars BMC\n");
+ }
+ SK_AT_SCOPE_EXIT(if (clusterator.reversedChars()) { out->writeText("EMC\n"); } );
+ GlyphPositioner glyphPositioner(out,
+ paint.getTextSkewX(),
+ multiByteGlyphs,
+ defaultPositioning,
+ offset);
+ SkPDFFont* font = nullptr;
+
+ while (Clusterator::Cluster c = clusterator.next()) {
+ int index = c.fGlyphIndex;
+ int glyphLimit = index + c.fGlyphCount;
+
+ bool actualText = false;
+ SK_AT_SCOPE_EXIT(if (actualText) { glyphPositioner.flush(); out->writeText("EMC\n"); } );
+ if (c.fUtf8Text) { // real cluster
+ // Check if `/ActualText` needed.
+ const char* textPtr = c.fUtf8Text;
+ // TODO(halcanary): validate utf8 input.
+ SkUnichar unichar = SkUTF8_NextUnichar(&textPtr);
+ const char* textEnd = c.fUtf8Text + c.fTextByteLength;
+ if (textPtr < textEnd || // more characters left
+ glyphLimit > index + 1 || // toUnicode wouldn't work
+ unichar != map_glyph(glyphToUnicode, glyphs[index])) // test single Unichar map
+ {
+ glyphPositioner.flush();
+ out->writeText("/Span<</ActualText <");
+ SkPDFUtils::WriteUTF16beHex(out, 0xFEFF); // U+FEFF = BYTE ORDER MARK
+ // the BOM marks this text as UTF-16BE, not PDFDocEncoding.
+ SkPDFUtils::WriteUTF16beHex(out, unichar); // first char
+ while (textPtr < textEnd) {
+ unichar = SkUTF8_NextUnichar(&textPtr);
+ SkPDFUtils::WriteUTF16beHex(out, unichar);
+ }
+ out->writeText("> >> BDC\n"); // begin marked-content sequence
+ // with an associated property list.
+ actualText = true;
+ }
+ }
+ for (; index < glyphLimit; ++index) {
+ SkGlyphID gid = glyphs[index];
+ if (gid > maxGlyphID) {
+ continue;
+ }
+ if (!font || !font->hasGlyph(gid)) {
+ // Not yet specified font or need to switch font.
+ int fontIndex = this->getFontResourceIndex(typeface, gid);
+ // All preconditions for SkPDFFont::GetFontResource are met.
+ SkASSERT(fontIndex >= 0);
+ if (fontIndex < 0) {
+ return;
+ }
+ glyphPositioner.flush();
+ update_font(out, fontIndex, textSize);
+ font = fFontResources[fontIndex];
+ SkASSERT(font); // All preconditions for SkPDFFont::GetFontResource are met.
+ if (!font) {
+ return;
+ }
+ SkASSERT(font->multiByteGlyphs() == multiByteGlyphs);
+ }
+ SkPoint xy{0, 0};
+ SkScalar advance{0};
+ if (!defaultPositioning) {
+ advance = glyphCache->getGlyphIDAdvance(gid).fAdvanceX;
+ xy = SkTextBlob::kFull_Positioning == positioning
+ ? SkPoint{pos[2 * index], pos[2 * index + 1]}
+ : SkPoint{pos[index], 0};
+ if (alignment != SkPaint::kLeft_Align) {
+ xy.offset(alignmentFactor * advance, 0);
+ }
+ }
+ font->noteGlyphUsage(gid);
+ SkGlyphID encodedGlyph = multiByteGlyphs ? gid : font->glyphToPDFFontEncoding(gid);
+ glyphPositioner.writeGlyph(xy, advance, encodedGlyph);
+ }
+ }
+}
+
+void SkPDFDevice::drawText(const SkDraw& d, const void* text, size_t len,
+ SkScalar x, SkScalar y, const SkPaint& paint) {
+ this->internalDrawText(d, text, len, nullptr, SkTextBlob::kDefault_Positioning,
+ SkPoint{x, y}, paint, nullptr, 0, nullptr);
+}
+
+void SkPDFDevice::drawPosText(const SkDraw& d, const void* text, size_t len,
+ const SkScalar pos[], int scalarsPerPos,
+ const SkPoint& offset, const SkPaint& paint) {
+ this->internalDrawText(d, text, len, pos, (SkTextBlob::GlyphPositioning)scalarsPerPos,
+ offset, paint, nullptr, 0, nullptr);
+}
+
+void SkPDFDevice::drawTextBlob(const SkDraw& draw, const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint &paint, SkDrawFilter* drawFilter) {
+ for (SkTextBlobRunIterator it(blob); !it.done(); it.next()) {
+ SkPaint runPaint(paint);
+ it.applyFontToPaint(&runPaint);
+ if (drawFilter && !drawFilter->filter(&runPaint, SkDrawFilter::kText_Type)) {
+ continue;
+ }
+ runPaint.setFlags(this->filterTextFlags(runPaint));
+ SkPoint offset = it.offset() + SkPoint{x, y};
+ this->internalDrawText(draw, it.glyphs(), sizeof(SkGlyphID) * it.glyphCount(),
+ it.pos(), it.positioning(), offset, runPaint,
+ it.clusters(), it.textSize(), it.text());
+ }
+}
+
+void SkPDFDevice::drawVertices(const SkDraw& d, SkCanvas::VertexMode,
+ int vertexCount, const SkPoint verts[],
+ const SkPoint texs[], const SkColor colors[],
+ SkXfermode* xmode, const uint16_t indices[],
+ int indexCount, const SkPaint& paint) {
+ if (d.fRC->isEmpty()) {
+ return;
+ }
+ // TODO: implement drawVertices
+}
+
+void SkPDFDevice::drawDevice(const SkDraw& d, SkBaseDevice* device,
+ int x, int y, const SkPaint& paint) {
+ SkASSERT(!paint.getImageFilter());
+
+ // Check if the source device is really a bitmapdevice (because that's what we returned
+ // from createDevice (likely due to an imagefilter)
+ SkPixmap pmap;
+ if (device->peekPixels(&pmap)) {
+ SkBitmap bitmap;
+ bitmap.installPixels(pmap);
+ this->drawSprite(d, bitmap, x, y, paint);
+ return;
+ }
+
+ // our onCreateCompatibleDevice() always creates SkPDFDevice subclasses.
+ SkPDFDevice* pdfDevice = static_cast<SkPDFDevice*>(device);
+
+ SkScalar scalarX = SkIntToScalar(x);
+ SkScalar scalarY = SkIntToScalar(y);
+ for (const RectWithData& l : pdfDevice->fLinkToURLs) {
+ SkRect r = l.rect.makeOffset(scalarX, scalarY);
+ fLinkToURLs.emplace_back(r, l.data.get());
+ }
+ for (const RectWithData& l : pdfDevice->fLinkToDestinations) {
+ SkRect r = l.rect.makeOffset(scalarX, scalarY);
+ fLinkToDestinations.emplace_back(r, l.data.get());
+ }
+ for (const NamedDestination& d : pdfDevice->fNamedDestinations) {
+ SkPoint p = d.point + SkPoint::Make(scalarX, scalarY);
+ fNamedDestinations.emplace_back(d.nameData.get(), p);
+ }
+
+ if (pdfDevice->isContentEmpty()) {
+ return;
+ }
+
+ SkMatrix matrix;
+ matrix.setTranslate(SkIntToScalar(x), SkIntToScalar(y));
+ ScopedContentEntry content(this, d.fClipStack, d.fRC->bwRgn(), matrix, paint);
+ if (!content.entry()) {
+ return;
+ }
+ if (content.needShape()) {
+ SkPath shape;
+ shape.addRect(SkRect::MakeXYWH(SkIntToScalar(x), SkIntToScalar(y),
+ SkIntToScalar(device->width()),
+ SkIntToScalar(device->height())));
+ content.setShape(shape);
+ }
+ if (!content.needSource()) {
+ return;
+ }
+
+ sk_sp<SkPDFObject> xObject = pdfDevice->makeFormXObjectFromDevice();
+ SkPDFUtils::DrawFormXObject(this->addXObjectResource(xObject.get()),
+ &content.entry()->fContent);
+}
+
+sk_sp<SkSurface> SkPDFDevice::makeSurface(const SkImageInfo& info, const SkSurfaceProps& props) {
+ return SkSurface::MakeRaster(info, &props);
+}
+
+
+sk_sp<SkPDFDict> SkPDFDevice::makeResourceDict() const {
+ SkTDArray<SkPDFObject*> fonts;
+ fonts.setReserve(fFontResources.count());
+ for (SkPDFFont* font : fFontResources) {
+ fonts.push(font);
+ }
+ return SkPDFResourceDict::Make(
+ &fGraphicStateResources,
+ &fShaderResources,
+ &fXObjectResources,
+ &fonts);
+}
+
+sk_sp<SkPDFArray> SkPDFDevice::copyMediaBox() const {
+ auto mediaBox = sk_make_sp<SkPDFArray>();
+ mediaBox->reserve(4);
+ mediaBox->appendInt(0);
+ mediaBox->appendInt(0);
+ mediaBox->appendInt(fPageSize.width());
+ mediaBox->appendInt(fPageSize.height());
+ return mediaBox;
+}
+
+std::unique_ptr<SkStreamAsset> SkPDFDevice::content() const {
+ SkDynamicMemoryWStream buffer;
+ if (fInitialTransform.getType() != SkMatrix::kIdentity_Mask) {
+ SkPDFUtils::AppendTransform(fInitialTransform, &buffer);
+ }
+
+ GraphicStackState gsState(fExistingClipStack, fExistingClipRegion, &buffer);
+ for (const auto& entry : fContentEntries) {
+ SkPoint translation;
+ translation.iset(this->getOrigin());
+ translation.negate();
+ gsState.updateClip(entry.fState.fClipStack, entry.fState.fClipRegion,
+ translation);
+ gsState.updateMatrix(entry.fState.fMatrix);
+ gsState.updateDrawingState(entry.fState);
+
+ entry.fContent.writeToStream(&buffer);
+ }
+ gsState.drainStack();
+ if (buffer.bytesWritten() > 0) {
+ return std::unique_ptr<SkStreamAsset>(buffer.detachAsStream());
+ } else {
+ return skstd::make_unique<SkMemoryStream>();
+ }
+}
+
+/* Draws an inverse filled path by using Path Ops to compute the positive
+ * inverse using the current clip as the inverse bounds.
+ * Return true if this was an inverse path and was properly handled,
+ * otherwise returns false and the normal drawing routine should continue,
+ * either as a (incorrect) fallback or because the path was not inverse
+ * in the first place.
+ */
+bool SkPDFDevice::handleInversePath(const SkDraw& d, const SkPath& origPath,
+ const SkPaint& paint, bool pathIsMutable,
+ const SkMatrix* prePathMatrix) {
+ if (!origPath.isInverseFillType()) {
+ return false;
+ }
+
+ if (d.fRC->isEmpty()) {
+ return false;
+ }
+
+ SkPath modifiedPath;
+ SkPath* pathPtr = const_cast<SkPath*>(&origPath);
+ SkPaint noInversePaint(paint);
+
+ // Merge stroking operations into final path.
+ if (SkPaint::kStroke_Style == paint.getStyle() ||
+ SkPaint::kStrokeAndFill_Style == paint.getStyle()) {
+ bool doFillPath = paint.getFillPath(origPath, &modifiedPath);
+ if (doFillPath) {
+ noInversePaint.setStyle(SkPaint::kFill_Style);
+ noInversePaint.setStrokeWidth(0);
+ pathPtr = &modifiedPath;
+ } else {
+ // To be consistent with the raster output, hairline strokes
+ // are rendered as non-inverted.
+ modifiedPath.toggleInverseFillType();
+ drawPath(d, modifiedPath, paint, nullptr, true);
+ return true;
+ }
+ }
+
+ // Get bounds of clip in current transform space
+ // (clip bounds are given in device space).
+ SkRect bounds;
+ SkMatrix transformInverse;
+ SkMatrix totalMatrix = *d.fMatrix;
+ if (prePathMatrix) {
+ totalMatrix.preConcat(*prePathMatrix);
+ }
+ if (!totalMatrix.invert(&transformInverse)) {
+ return false;
+ }
+ bounds.set(d.fRC->getBounds());
+ transformInverse.mapRect(&bounds);
+
+ // Extend the bounds by the line width (plus some padding)
+ // so the edge doesn't cause a visible stroke.
+ bounds.outset(paint.getStrokeWidth() + SK_Scalar1,
+ paint.getStrokeWidth() + SK_Scalar1);
+
+ if (!calculate_inverse_path(bounds, *pathPtr, &modifiedPath)) {
+ return false;
+ }
+
+ drawPath(d, modifiedPath, noInversePaint, prePathMatrix, true);
+ return true;
+}
+
+void SkPDFDevice::handlePointAnnotation(const SkPoint& point,
+ const SkMatrix& matrix,
+ const char key[], SkData* value) {
+ if (!value) {
+ return;
+ }
+
+ if (!strcmp(SkAnnotationKeys::Define_Named_Dest_Key(), key)) {
+ SkPoint transformedPoint;
+ matrix.mapXY(point.x(), point.y(), &transformedPoint);
+ fNamedDestinations.emplace_back(value, transformedPoint);
+ }
+}
+
+void SkPDFDevice::handlePathAnnotation(const SkPath& path,
+ const SkDraw& d,
+ const char key[], SkData* value) {
+ if (!value) {
+ return;
+ }
+
+ SkRasterClip clip = *d.fRC;
+ clip.op(path, *d.fMatrix, SkIRect::MakeWH(width(), height()),
+ SkRegion::kIntersect_Op,
+ false);
+ SkRect transformedRect = SkRect::Make(clip.getBounds());
+
+ if (!strcmp(SkAnnotationKeys::URL_Key(), key)) {
+ if (!transformedRect.isEmpty()) {
+ fLinkToURLs.emplace_back(transformedRect, value);
+ }
+ } else if (!strcmp(SkAnnotationKeys::Link_Named_Dest_Key(), key)) {
+ if (!transformedRect.isEmpty()) {
+ fLinkToDestinations.emplace_back(transformedRect, value);
+ }
+ }
+}
+
+void SkPDFDevice::appendAnnotations(SkPDFArray* array) const {
+ array->reserve(fLinkToURLs.count() + fLinkToDestinations.count());
+ for (const RectWithData& rectWithURL : fLinkToURLs) {
+ SkRect r;
+ fInitialTransform.mapRect(&r, rectWithURL.rect);
+ array->appendObject(create_link_to_url(rectWithURL.data.get(), r));
+ }
+ for (const RectWithData& linkToDestination : fLinkToDestinations) {
+ SkRect r;
+ fInitialTransform.mapRect(&r, linkToDestination.rect);
+ array->appendObject(
+ create_link_named_dest(linkToDestination.data.get(), r));
+ }
+}
+
+void SkPDFDevice::appendDestinations(SkPDFDict* dict, SkPDFObject* page) const {
+ for (const NamedDestination& dest : fNamedDestinations) {
+ auto pdfDest = sk_make_sp<SkPDFArray>();
+ pdfDest->reserve(5);
+ pdfDest->appendObjRef(sk_ref_sp(page));
+ pdfDest->appendName("XYZ");
+ SkPoint p = fInitialTransform.mapXY(dest.point.x(), dest.point.y());
+ pdfDest->appendScalar(p.x());
+ pdfDest->appendScalar(p.y());
+ pdfDest->appendInt(0); // Leave zoom unchanged
+ SkString name(static_cast<const char*>(dest.nameData->data()));
+ dict->insertObject(name, std::move(pdfDest));
+ }
+}
+
+sk_sp<SkPDFObject> SkPDFDevice::makeFormXObjectFromDevice() {
+ SkMatrix inverseTransform = SkMatrix::I();
+ if (!fInitialTransform.isIdentity()) {
+ if (!fInitialTransform.invert(&inverseTransform)) {
+ SkDEBUGFAIL("Layer initial transform should be invertible.");
+ inverseTransform.reset();
+ }
+ }
+ sk_sp<SkPDFObject> xobject =
+ SkPDFMakeFormXObject(this->content(), this->copyMediaBox(),
+ this->makeResourceDict(), inverseTransform, nullptr);
+ // We always draw the form xobjects that we create back into the device, so
+ // we simply preserve the font usage instead of pulling it out and merging
+ // it back in later.
+ this->cleanUp(); // Reset this device to have no content.
+ this->init();
+ return xobject;
+}
+
+void SkPDFDevice::drawFormXObjectWithMask(int xObjectIndex,
+ sk_sp<SkPDFObject> mask,
+ const SkClipStack* clipStack,
+ const SkRegion& clipRegion,
+ SkBlendMode mode,
+ bool invertClip) {
+ if (clipRegion.isEmpty() && !invertClip) {
+ return;
+ }
+
+ sk_sp<SkPDFDict> sMaskGS = SkPDFGraphicState::GetSMaskGraphicState(
+ std::move(mask), invertClip,
+ SkPDFGraphicState::kAlpha_SMaskMode, fDocument->canon());
+
+ SkMatrix identity;
+ identity.reset();
+ SkPaint paint;
+ paint.setBlendMode(mode);
+ ScopedContentEntry content(this, clipStack, clipRegion, identity, paint);
+ if (!content.entry()) {
+ return;
+ }
+ SkPDFUtils::ApplyGraphicState(addGraphicStateResource(sMaskGS.get()),
+ &content.entry()->fContent);
+ SkPDFUtils::DrawFormXObject(xObjectIndex, &content.entry()->fContent);
+
+ // Call makeNoSmaskGraphicState() instead of
+ // SkPDFGraphicState::MakeNoSmaskGraphicState so that the canon
+ // can deduplicate.
+ sMaskGS = fDocument->canon()->makeNoSmaskGraphicState();
+ SkPDFUtils::ApplyGraphicState(addGraphicStateResource(sMaskGS.get()),
+ &content.entry()->fContent);
+}
+
+SkPDFDevice::ContentEntry* SkPDFDevice::setUpContentEntry(const SkClipStack* clipStack,
+ const SkRegion& clipRegion,
+ const SkMatrix& matrix,
+ const SkPaint& paint,
+ bool hasText,
+ sk_sp<SkPDFObject>* dst) {
+ *dst = nullptr;
+ if (clipRegion.isEmpty()) {
+ return nullptr;
+ }
+
+ // The clip stack can come from an SkDraw where it is technically optional.
+ SkClipStack synthesizedClipStack;
+ if (clipStack == nullptr) {
+ if (clipRegion == fExistingClipRegion) {
+ clipStack = &fExistingClipStack;
+ } else {
+ // GraphicStackState::updateClip expects the clip stack to have
+ // fExistingClip as a prefix, so start there, then set the clip
+ // to the passed region.
+ synthesizedClipStack = fExistingClipStack;
+ SkPath clipPath;
+ clipRegion.getBoundaryPath(&clipPath);
+ synthesizedClipStack.clipPath(clipPath, SkMatrix::I(), SkCanvas::kReplace_Op, false);
+ clipStack = &synthesizedClipStack;
+ }
+ }
+
+ SkBlendMode blendMode = paint.getBlendMode();
+
+ // For the following modes, we want to handle source and destination
+ // separately, so make an object of what's already there.
+ if (blendMode == SkBlendMode::kClear ||
+ blendMode == SkBlendMode::kSrc ||
+ blendMode == SkBlendMode::kSrcIn ||
+ blendMode == SkBlendMode::kDstIn ||
+ blendMode == SkBlendMode::kSrcOut ||
+ blendMode == SkBlendMode::kDstOut ||
+ blendMode == SkBlendMode::kSrcATop ||
+ blendMode == SkBlendMode::kDstATop ||
+ blendMode == SkBlendMode::kModulate) {
+ if (!isContentEmpty()) {
+ *dst = this->makeFormXObjectFromDevice();
+ SkASSERT(isContentEmpty());
+ } else if (blendMode != SkBlendMode::kSrc &&
+ blendMode != SkBlendMode::kSrcOut) {
+ // Except for Src and SrcOut, if there isn't anything already there,
+ // then we're done.
+ return nullptr;
+ }
+ }
+ // TODO(vandebo): Figure out how/if we can handle the following modes:
+ // Xor, Plus.
+
+ // Dst xfer mode doesn't draw source at all.
+ if (blendMode == SkBlendMode::kDst) {
+ return nullptr;
+ }
+
+ SkPDFDevice::ContentEntry* entry;
+ if (fContentEntries.back() && fContentEntries.back()->fContent.getOffset() == 0) {
+ entry = fContentEntries.back();
+ } else if (blendMode != SkBlendMode::kDstOver) {
+ entry = fContentEntries.emplace_back();
+ } else {
+ entry = fContentEntries.emplace_front();
+ }
+ populateGraphicStateEntryFromPaint(matrix, *clipStack, clipRegion, paint,
+ hasText, &entry->fState);
+ return entry;
+}
+
+void SkPDFDevice::finishContentEntry(SkBlendMode blendMode,
+ sk_sp<SkPDFObject> dst,
+ SkPath* shape) {
+ if (blendMode != SkBlendMode::kClear &&
+ blendMode != SkBlendMode::kSrc &&
+ blendMode != SkBlendMode::kDstOver &&
+ blendMode != SkBlendMode::kSrcIn &&
+ blendMode != SkBlendMode::kDstIn &&
+ blendMode != SkBlendMode::kSrcOut &&
+ blendMode != SkBlendMode::kDstOut &&
+ blendMode != SkBlendMode::kSrcATop &&
+ blendMode != SkBlendMode::kDstATop &&
+ blendMode != SkBlendMode::kModulate) {
+ SkASSERT(!dst);
+ return;
+ }
+ if (blendMode == SkBlendMode::kDstOver) {
+ SkASSERT(!dst);
+ if (fContentEntries.front()->fContent.getOffset() == 0) {
+ // For DstOver, an empty content entry was inserted before the rest
+ // of the content entries. If nothing was drawn, it needs to be
+ // removed.
+ fContentEntries.pop_front();
+ }
+ return;
+ }
+ if (!dst) {
+ SkASSERT(blendMode == SkBlendMode::kSrc ||
+ blendMode == SkBlendMode::kSrcOut);
+ return;
+ }
+
+ SkASSERT(dst);
+ SkASSERT(fContentEntries.count() == 1);
+ // Changing the current content into a form-xobject will destroy the clip
+ // objects which is fine since the xobject will already be clipped. However
+ // if source has shape, we need to clip it too, so a copy of the clip is
+ // saved.
+
+ SkClipStack clipStack = fContentEntries.front()->fState.fClipStack;
+ SkRegion clipRegion = fContentEntries.front()->fState.fClipRegion;
+
+ SkMatrix identity;
+ identity.reset();
+ SkPaint stockPaint;
+
+ sk_sp<SkPDFObject> srcFormXObject;
+ if (isContentEmpty()) {
+ // If nothing was drawn and there's no shape, then the draw was a
+ // no-op, but dst needs to be restored for that to be true.
+ // If there is shape, then an empty source with Src, SrcIn, SrcOut,
+ // DstIn, DstAtop or Modulate reduces to Clear and DstOut or SrcAtop
+ // reduces to Dst.
+ if (shape == nullptr || blendMode == SkBlendMode::kDstOut ||
+ blendMode == SkBlendMode::kSrcATop) {
+ ScopedContentEntry content(this, &fExistingClipStack,
+ fExistingClipRegion, identity,
+ stockPaint);
+ // TODO: addXObjectResource take sk_sp
+ SkPDFUtils::DrawFormXObject(this->addXObjectResource(dst.get()),
+ &content.entry()->fContent);
+ return;
+ } else {
+ blendMode = SkBlendMode::kClear;
+ }
+ } else {
+ SkASSERT(fContentEntries.count() == 1);
+ srcFormXObject = this->makeFormXObjectFromDevice();
+ }
+
+ // TODO(vandebo) srcFormXObject may contain alpha, but here we want it
+ // without alpha.
+ if (blendMode == SkBlendMode::kSrcATop) {
+ // TODO(vandebo): In order to properly support SrcATop we have to track
+ // the shape of what's been drawn at all times. It's the intersection of
+ // the non-transparent parts of the device and the outlines (shape) of
+ // all images and devices drawn.
+ drawFormXObjectWithMask(addXObjectResource(srcFormXObject.get()), dst,
+ &fExistingClipStack, fExistingClipRegion,
+ SkBlendMode::kSrcOver, true);
+ } else {
+ if (shape != nullptr) {
+ // Draw shape into a form-xobject.
+ SkRasterClip rc(clipRegion);
+ SkDraw d;
+ d.fMatrix = &identity;
+ d.fRC = &rc;
+ d.fClipStack = &clipStack;
+ SkPaint filledPaint;
+ filledPaint.setColor(SK_ColorBLACK);
+ filledPaint.setStyle(SkPaint::kFill_Style);
+ this->drawPath(d, *shape, filledPaint, nullptr, true);
+ drawFormXObjectWithMask(addXObjectResource(dst.get()),
+ this->makeFormXObjectFromDevice(),
+ &fExistingClipStack, fExistingClipRegion,
+ SkBlendMode::kSrcOver, true);
+
+ } else {
+ drawFormXObjectWithMask(addXObjectResource(dst.get()), srcFormXObject,
+ &fExistingClipStack, fExistingClipRegion,
+ SkBlendMode::kSrcOver, true);
+ }
+ }
+
+ if (blendMode == SkBlendMode::kClear) {
+ return;
+ } else if (blendMode == SkBlendMode::kSrc ||
+ blendMode == SkBlendMode::kDstATop) {
+ ScopedContentEntry content(this, &fExistingClipStack,
+ fExistingClipRegion, identity, stockPaint);
+ if (content.entry()) {
+ SkPDFUtils::DrawFormXObject(
+ this->addXObjectResource(srcFormXObject.get()),
+ &content.entry()->fContent);
+ }
+ if (blendMode == SkBlendMode::kSrc) {
+ return;
+ }
+ } else if (blendMode == SkBlendMode::kSrcATop) {
+ ScopedContentEntry content(this, &fExistingClipStack,
+ fExistingClipRegion, identity, stockPaint);
+ if (content.entry()) {
+ SkPDFUtils::DrawFormXObject(this->addXObjectResource(dst.get()),
+ &content.entry()->fContent);
+ }
+ }
+
+ SkASSERT(blendMode == SkBlendMode::kSrcIn ||
+ blendMode == SkBlendMode::kDstIn ||
+ blendMode == SkBlendMode::kSrcOut ||
+ blendMode == SkBlendMode::kDstOut ||
+ blendMode == SkBlendMode::kSrcATop ||
+ blendMode == SkBlendMode::kDstATop ||
+ blendMode == SkBlendMode::kModulate);
+
+ if (blendMode == SkBlendMode::kSrcIn ||
+ blendMode == SkBlendMode::kSrcOut ||
+ blendMode == SkBlendMode::kSrcATop) {
+ drawFormXObjectWithMask(addXObjectResource(srcFormXObject.get()),
+ std::move(dst),
+ &fExistingClipStack, fExistingClipRegion,
+ SkBlendMode::kSrcOver,
+ blendMode == SkBlendMode::kSrcOut);
+ return;
+ } else {
+ SkBlendMode mode = SkBlendMode::kSrcOver;
+ int resourceID = addXObjectResource(dst.get());
+ if (blendMode == SkBlendMode::kModulate) {
+ drawFormXObjectWithMask(addXObjectResource(srcFormXObject.get()),
+ std::move(dst), &fExistingClipStack,
+ fExistingClipRegion,
+ SkBlendMode::kSrcOver, false);
+ mode = SkBlendMode::kMultiply;
+ }
+ drawFormXObjectWithMask(resourceID, std::move(srcFormXObject),
+ &fExistingClipStack, fExistingClipRegion, mode,
+ blendMode == SkBlendMode::kDstOut);
+ return;
+ }
+}
+
+bool SkPDFDevice::isContentEmpty() {
+ if (!fContentEntries.front() || fContentEntries.front()->fContent.getOffset() == 0) {
+ SkASSERT(fContentEntries.count() <= 1);
+ return true;
+ }
+ return false;
+}
+
+void SkPDFDevice::populateGraphicStateEntryFromPaint(
+ const SkMatrix& matrix,
+ const SkClipStack& clipStack,
+ const SkRegion& clipRegion,
+ const SkPaint& paint,
+ bool hasText,
+ SkPDFDevice::GraphicStateEntry* entry) {
+ NOT_IMPLEMENTED(paint.getPathEffect() != nullptr, false);
+ NOT_IMPLEMENTED(paint.getMaskFilter() != nullptr, false);
+ NOT_IMPLEMENTED(paint.getColorFilter() != nullptr, false);
+
+ entry->fMatrix = matrix;
+ entry->fClipStack = clipStack;
+ entry->fClipRegion = clipRegion;
+ entry->fColor = SkColorSetA(paint.getColor(), 0xFF);
+ entry->fShaderIndex = -1;
+
+ // PDF treats a shader as a color, so we only set one or the other.
+ sk_sp<SkPDFObject> pdfShader;
+ SkShader* shader = paint.getShader();
+ SkColor color = paint.getColor();
+ if (shader) {
+ // PDF positions patterns relative to the initial transform, so
+ // we need to apply the current transform to the shader parameters.
+ SkMatrix transform = matrix;
+ transform.postConcat(fInitialTransform);
+
+ // PDF doesn't support kClamp_TileMode, so we simulate it by making
+ // a pattern the size of the current clip.
+ SkIRect bounds = clipRegion.getBounds();
+
+ // We need to apply the initial transform to bounds in order to get
+ // bounds in a consistent coordinate system.
+ SkRect boundsTemp;
+ boundsTemp.set(bounds);
+ fInitialTransform.mapRect(&boundsTemp);
+ boundsTemp.roundOut(&bounds);
+
+ SkScalar rasterScale =
+ SkIntToScalar(fRasterDpi) / DPI_FOR_RASTER_SCALE_ONE;
+ pdfShader = SkPDFShader::GetPDFShader(
+ fDocument, fRasterDpi, shader, transform, bounds, rasterScale);
+
+ if (pdfShader.get()) {
+ // pdfShader has been canonicalized so we can directly compare
+ // pointers.
+ int resourceIndex = fShaderResources.find(pdfShader.get());
+ if (resourceIndex < 0) {
+ resourceIndex = fShaderResources.count();
+ fShaderResources.push(pdfShader.get());
+ pdfShader.get()->ref();
+ }
+ entry->fShaderIndex = resourceIndex;
+ } else {
+ // A color shader is treated as an invalid shader so we don't have
+ // to set a shader just for a color.
+ SkShader::GradientInfo gradientInfo;
+ SkColor gradientColor;
+ gradientInfo.fColors = &gradientColor;
+ gradientInfo.fColorOffsets = nullptr;
+ gradientInfo.fColorCount = 1;
+ if (shader->asAGradient(&gradientInfo) ==
+ SkShader::kColor_GradientType) {
+ entry->fColor = SkColorSetA(gradientColor, 0xFF);
+ color = gradientColor;
+ }
+ }
+ }
+
+ sk_sp<SkPDFGraphicState> newGraphicState;
+ if (color == paint.getColor()) {
+ newGraphicState.reset(
+ SkPDFGraphicState::GetGraphicStateForPaint(fDocument->canon(), paint));
+ } else {
+ SkPaint newPaint = paint;
+ newPaint.setColor(color);
+ newGraphicState.reset(
+ SkPDFGraphicState::GetGraphicStateForPaint(fDocument->canon(), newPaint));
+ }
+ int resourceIndex = addGraphicStateResource(newGraphicState.get());
+ entry->fGraphicStateIndex = resourceIndex;
+
+ if (hasText) {
+ entry->fTextScaleX = paint.getTextScaleX();
+ entry->fTextFill = paint.getStyle();
+ } else {
+ entry->fTextScaleX = 0;
+ }
+}
+
+int SkPDFDevice::addGraphicStateResource(SkPDFObject* gs) {
+ // Assumes that gs has been canonicalized (so we can directly compare
+ // pointers).
+ int result = fGraphicStateResources.find(gs);
+ if (result < 0) {
+ result = fGraphicStateResources.count();
+ fGraphicStateResources.push(gs);
+ gs->ref();
+ }
+ return result;
+}
+
+int SkPDFDevice::addXObjectResource(SkPDFObject* xObject) {
+ // TODO(halcanary): make this take a sk_sp<SkPDFObject>
+ // Assumes that xobject has been canonicalized (so we can directly compare
+ // pointers).
+ int result = fXObjectResources.find(xObject);
+ if (result < 0) {
+ result = fXObjectResources.count();
+ fXObjectResources.push(SkRef(xObject));
+ }
+ return result;
+}
+
+int SkPDFDevice::getFontResourceIndex(SkTypeface* typeface, uint16_t glyphID) {
+ sk_sp<SkPDFFont> newFont(
+ SkPDFFont::GetFontResource(fDocument->canon(), typeface, glyphID));
+ if (!newFont) {
+ return -1;
+ }
+ int resourceIndex = fFontResources.find(newFont.get());
+ if (resourceIndex < 0) {
+ fDocument->registerFont(newFont.get());
+ resourceIndex = fFontResources.count();
+ fFontResources.push(newFont.release());
+ }
+ return resourceIndex;
+}
+
+static SkSize rect_to_size(const SkRect& r) {
+ return SkSize::Make(r.width(), r.height());
+}
+
+static sk_sp<SkImage> color_filter(const SkImageSubset& imageSubset,
+ SkColorFilter* colorFilter) {
+ auto surface =
+ SkSurface::MakeRaster(SkImageInfo::MakeN32Premul(imageSubset.dimensions()));
+ SkASSERT(surface);
+ SkCanvas* canvas = surface->getCanvas();
+ canvas->clear(SK_ColorTRANSPARENT);
+ SkPaint paint;
+ paint.setColorFilter(sk_ref_sp(colorFilter));
+ imageSubset.draw(canvas, &paint);
+ canvas->flush();
+ return surface->makeImageSnapshot();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+void SkPDFDevice::internalDrawImage(const SkMatrix& origMatrix,
+ const SkClipStack* clipStack,
+ const SkRegion& origClipRegion,
+ SkImageSubset imageSubset,
+ const SkPaint& paint) {
+ if (imageSubset.dimensions().isZero()) {
+ return;
+ }
+ #ifdef SK_PDF_IMAGE_STATS
+ gDrawImageCalls.fetch_add(1);
+ #endif
+ SkMatrix matrix = origMatrix;
+ SkRegion perspectiveBounds;
+ const SkRegion* clipRegion = &origClipRegion;
+
+ // Rasterize the bitmap using perspective in a new bitmap.
+ if (origMatrix.hasPerspective()) {
+ if (fRasterDpi == 0) {
+ return;
+ }
+ // Transform the bitmap in the new space, without taking into
+ // account the initial transform.
+ SkPath perspectiveOutline;
+ SkRect imageBounds = SkRect::Make(imageSubset.bounds());
+ perspectiveOutline.addRect(imageBounds);
+ perspectiveOutline.transform(origMatrix);
+
+ // TODO(edisonn): perf - use current clip too.
+ // Retrieve the bounds of the new shape.
+ SkRect bounds = perspectiveOutline.getBounds();
+
+ // Transform the bitmap in the new space, taking into
+ // account the initial transform.
+ SkMatrix total = origMatrix;
+ total.postConcat(fInitialTransform);
+ SkScalar dpiScale = SkIntToScalar(fRasterDpi) /
+ SkIntToScalar(DPI_FOR_RASTER_SCALE_ONE);
+ total.postScale(dpiScale, dpiScale);
+
+ SkPath physicalPerspectiveOutline;
+ physicalPerspectiveOutline.addRect(imageBounds);
+ physicalPerspectiveOutline.transform(total);
+
+ SkRect physicalPerspectiveBounds =
+ physicalPerspectiveOutline.getBounds();
+ SkScalar scaleX = physicalPerspectiveBounds.width() / bounds.width();
+ SkScalar scaleY = physicalPerspectiveBounds.height() / bounds.height();
+
+ // TODO(edisonn): A better approach would be to use a bitmap shader
+ // (in clamp mode) and draw a rect over the entire bounding box. Then
+ // intersect perspectiveOutline to the clip. That will avoid introducing
+ // alpha to the image while still giving good behavior at the edge of
+ // the image. Avoiding alpha will reduce the pdf size and generation
+ // CPU time some.
+
+ SkISize wh = rect_to_size(physicalPerspectiveBounds).toCeil();
+
+ auto surface = SkSurface::MakeRaster(SkImageInfo::MakeN32Premul(wh));
+ if (!surface) {
+ return;
+ }
+ SkCanvas* canvas = surface->getCanvas();
+ canvas->clear(SK_ColorTRANSPARENT);
+
+ SkScalar deltaX = bounds.left();
+ SkScalar deltaY = bounds.top();
+
+ SkMatrix offsetMatrix = origMatrix;
+ offsetMatrix.postTranslate(-deltaX, -deltaY);
+ offsetMatrix.postScale(scaleX, scaleY);
+
+ // Translate the draw in the new canvas, so we perfectly fit the
+ // shape in the bitmap.
+ canvas->setMatrix(offsetMatrix);
+ imageSubset.draw(canvas, nullptr);
+ // Make sure the final bits are in the bitmap.
+ canvas->flush();
+
+ // In the new space, we use the identity matrix translated
+ // and scaled to reflect DPI.
+ matrix.setScale(1 / scaleX, 1 / scaleY);
+ matrix.postTranslate(deltaX, deltaY);
+
+ perspectiveBounds.setRect(bounds.roundOut());
+ clipRegion = &perspectiveBounds;
+
+ imageSubset = SkImageSubset(surface->makeImageSnapshot());
+ }
+
+ SkMatrix scaled;
+ // Adjust for origin flip.
+ scaled.setScale(SK_Scalar1, -SK_Scalar1);
+ scaled.postTranslate(0, SK_Scalar1);
+ // Scale the image up from 1x1 to WxH.
+ SkIRect subset = imageSubset.bounds();
+ scaled.postScale(SkIntToScalar(imageSubset.dimensions().width()),
+ SkIntToScalar(imageSubset.dimensions().height()));
+ scaled.postConcat(matrix);
+ ScopedContentEntry content(this, clipStack, *clipRegion, scaled, paint);
+ if (!content.entry()) {
+ return;
+ }
+ if (content.needShape()) {
+ SkPath shape;
+ shape.addRect(SkRect::Make(subset));
+ shape.transform(matrix);
+ content.setShape(shape);
+ }
+ if (!content.needSource()) {
+ return;
+ }
+
+ if (SkColorFilter* colorFilter = paint.getColorFilter()) {
+ // TODO(https://bug.skia.org/4378): implement colorfilter on other
+ // draw calls. This code here works for all
+ // drawBitmap*()/drawImage*() calls amd ImageFilters (which
+ // rasterize a layer on this backend). Fortuanely, this seems
+ // to be how Chromium impements most color-filters.
+ sk_sp<SkImage> img = color_filter(imageSubset, colorFilter);
+ imageSubset = SkImageSubset(std::move(img));
+ // TODO(halcanary): de-dupe this by caching filtered images.
+ // (maybe in the resource cache?)
+ }
+
+ SkBitmapKey key = imageSubset.getKey();
+ sk_sp<SkPDFObject> pdfimage = fDocument->canon()->findPDFBitmap(key);
+ if (!pdfimage) {
+ sk_sp<SkImage> img = imageSubset.makeImage();
+ if (!img) {
+ return;
+ }
+ pdfimage = SkPDFCreateBitmapObject(
+ std::move(img), fDocument->canon()->getPixelSerializer());
+ if (!pdfimage) {
+ return;
+ }
+ fDocument->serialize(pdfimage); // serialize images early.
+ fDocument->canon()->addPDFBitmap(key, pdfimage);
+ }
+ // TODO(halcanary): addXObjectResource() should take a sk_sp<SkPDFObject>
+ SkPDFUtils::DrawFormXObject(this->addXObjectResource(pdfimage.get()),
+ &content.entry()->fContent);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "SkSpecialImage.h"
+#include "SkImageFilter.h"
+
+void SkPDFDevice::drawSpecial(const SkDraw& draw, SkSpecialImage* srcImg, int x, int y,
+ const SkPaint& paint) {
+ SkASSERT(!srcImg->isTextureBacked());
+
+ SkBitmap resultBM;
+
+ SkImageFilter* filter = paint.getImageFilter();
+ if (filter) {
+ SkIPoint offset = SkIPoint::Make(0, 0);
+ SkMatrix matrix = *draw.fMatrix;
+ matrix.postTranslate(SkIntToScalar(-x), SkIntToScalar(-y));
+ const SkIRect clipBounds = draw.fRC->getBounds().makeOffset(-x, -y);
+ SkAutoTUnref<SkImageFilterCache> cache(this->getImageFilterCache());
+ // TODO: Should PDF be operating in a specified color space? For now, run the filter
+ // in the same color space as the source (this is different from all other backends).
+ SkImageFilter::OutputProperties outputProperties(srcImg->getColorSpace());
+ SkImageFilter::Context ctx(matrix, clipBounds, cache.get(), outputProperties);
+
+ sk_sp<SkSpecialImage> resultImg(filter->filterImage(srcImg, ctx, &offset));
+ if (resultImg) {
+ SkPaint tmpUnfiltered(paint);
+ tmpUnfiltered.setImageFilter(nullptr);
+ if (resultImg->getROPixels(&resultBM)) {
+ this->drawSprite(draw, resultBM, x + offset.x(), y + offset.y(), tmpUnfiltered);
+ }
+ }
+ } else {
+ if (srcImg->getROPixels(&resultBM)) {
+ this->drawSprite(draw, resultBM, x, y, paint);
+ }
+ }
+}
+
+sk_sp<SkSpecialImage> SkPDFDevice::makeSpecial(const SkBitmap& bitmap) {
+ return SkSpecialImage::MakeFromRaster(bitmap.bounds(), bitmap);
+}
+
+sk_sp<SkSpecialImage> SkPDFDevice::makeSpecial(const SkImage* image) {
+ return SkSpecialImage::MakeFromImage(SkIRect::MakeWH(image->width(), image->height()),
+ image->makeNonTextureImage());
+}
+
+sk_sp<SkSpecialImage> SkPDFDevice::snapSpecial() {
+ return nullptr;
+}
+
+SkImageFilterCache* SkPDFDevice::getImageFilterCache() {
+ // We always return a transient cache, so it is freed after each
+ // filter traversal.
+ return SkImageFilterCache::Create(SkImageFilterCache::kDefaultTransientSize);
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFDevice.h b/gfx/skia/skia/src/pdf/SkPDFDevice.h
new file mode 100644
index 000000000..7d207e7cf
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFDevice.h
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPDFDevice_DEFINED
+#define SkPDFDevice_DEFINED
+
+#include "SkBitmap.h"
+#include "SkCanvas.h"
+#include "SkClipStack.h"
+#include "SkData.h"
+#include "SkDevice.h"
+#include "SkPaint.h"
+#include "SkRect.h"
+#include "SkRefCnt.h"
+#include "SkSinglyLinkedList.h"
+#include "SkStream.h"
+#include "SkTDArray.h"
+#include "SkTextBlob.h"
+
+class SkImageSubset;
+class SkPath;
+class SkPDFArray;
+class SkPDFCanon;
+class SkPDFDevice;
+class SkPDFDocument;
+class SkPDFDict;
+class SkPDFFont;
+class SkPDFObject;
+class SkPDFStream;
+class SkRRect;
+
+/** \class SkPDFDevice
+
+ The drawing context for the PDF backend.
+*/
+class SkPDFDevice final : public SkBaseDevice {
+public:
+ /** Create a PDF drawing context. SkPDFDevice applies a
+ * scale-and-translate transform to move the origin from the
+ * bottom left (PDF default) to the top left (Skia default).
+ * @param pageSize Page size in point units.
+ * 1 point == 127/360 mm == 1/72 inch
+ * @param rasterDpi the DPI at which features without native PDF
+ * support will be rasterized (e.g. draw image with
+ * perspective, draw text with perspective, ...). A
+ * larger DPI would create a PDF that reflects the
+ * original intent with better fidelity, but it can make
+ * for larger PDF files too, which would use more memory
+ * while rendering, and it would be slower to be processed
+ * or sent online or to printer. A good choice is
+ * SK_ScalarDefaultRasterDPI(72.0f).
+ * @param SkPDFDocument. A non-null pointer back to the
+ * document. The document is repsonsible for
+ * de-duplicating across pages (via the SkPDFCanon) and
+ * for early serializing of large immutable objects, such
+ * as images (via SkPDFDocument::serialize()).
+ */
+ static SkPDFDevice* Create(SkISize pageSize,
+ SkScalar rasterDpi,
+ SkPDFDocument* doc) {
+ return new SkPDFDevice(pageSize, rasterDpi, doc, true);
+ }
+
+ /** Create a PDF drawing context without fipping the y-axis. */
+ static SkPDFDevice* CreateUnflipped(SkISize pageSize,
+ SkScalar rasterDpi,
+ SkPDFDocument* doc) {
+ return new SkPDFDevice(pageSize, rasterDpi, doc, false);
+ }
+
+ virtual ~SkPDFDevice();
+
+ /** These are called inside the per-device-layer loop for each draw call.
+ When these are called, we have already applied any saveLayer operations,
+ and are handling any looping from the paint, and any effects from the
+ DrawFilter.
+ */
+ void drawPaint(const SkDraw&, const SkPaint& paint) override;
+ void drawPoints(const SkDraw&, SkCanvas::PointMode mode,
+ size_t count, const SkPoint[],
+ const SkPaint& paint) override;
+ void drawRect(const SkDraw&, const SkRect& r, const SkPaint& paint) override;
+ void drawOval(const SkDraw&, const SkRect& oval, const SkPaint& paint) override;
+ void drawRRect(const SkDraw&, const SkRRect& rr, const SkPaint& paint) override;
+ void drawPath(const SkDraw&, const SkPath& origpath,
+ const SkPaint& paint, const SkMatrix* prePathMatrix,
+ bool pathIsMutable) override;
+ void drawBitmapRect(const SkDraw& draw, const SkBitmap& bitmap, const SkRect* src,
+ const SkRect& dst, const SkPaint&, SkCanvas::SrcRectConstraint) override;
+ void drawBitmap(const SkDraw&, const SkBitmap& bitmap,
+ const SkMatrix& matrix, const SkPaint&) override;
+ void drawSprite(const SkDraw&, const SkBitmap& bitmap, int x, int y,
+ const SkPaint& paint) override;
+ void drawImage(const SkDraw&,
+ const SkImage*,
+ SkScalar x,
+ SkScalar y,
+ const SkPaint&) override;
+ void drawImageRect(const SkDraw&,
+ const SkImage*,
+ const SkRect* src,
+ const SkRect& dst,
+ const SkPaint&,
+ SkCanvas::SrcRectConstraint) override;
+ void drawText(const SkDraw&, const void* text, size_t len,
+ SkScalar x, SkScalar y, const SkPaint&) override;
+ void drawPosText(const SkDraw&, const void* text, size_t len,
+ const SkScalar pos[], int scalarsPerPos,
+ const SkPoint& offset, const SkPaint&) override;
+ void drawTextBlob(const SkDraw&, const SkTextBlob*, SkScalar x, SkScalar y,
+ const SkPaint &, SkDrawFilter*) override;
+ void drawVertices(const SkDraw&, SkCanvas::VertexMode,
+ int vertexCount, const SkPoint verts[],
+ const SkPoint texs[], const SkColor colors[],
+ SkXfermode* xmode, const uint16_t indices[],
+ int indexCount, const SkPaint& paint) override;
+ void drawDevice(const SkDraw&, SkBaseDevice*, int x, int y,
+ const SkPaint&) override;
+
+ // PDF specific methods.
+
+ /** Create the resource dictionary for this device. */
+ sk_sp<SkPDFDict> makeResourceDict() const;
+
+ /** Add our annotations (link to urls and destinations) to the supplied
+ * array.
+ * @param array Array to add annotations to.
+ */
+ void appendAnnotations(SkPDFArray* array) const;
+
+ /** Add our named destinations to the supplied dictionary.
+ * @param dict Dictionary to add destinations to.
+ * @param page The PDF object representing the page for this device.
+ */
+ void appendDestinations(SkPDFDict* dict, SkPDFObject* page) const;
+
+ /** Returns a copy of the media box for this device. */
+ sk_sp<SkPDFArray> copyMediaBox() const;
+
+ /** Returns a SkStream with the page contents.
+ */
+ std::unique_ptr<SkStreamAsset> content() const;
+
+ SkPDFCanon* getCanon() const;
+
+ // It is important to not confuse GraphicStateEntry with SkPDFGraphicState, the
+ // later being our representation of an object in the PDF file.
+ struct GraphicStateEntry {
+ GraphicStateEntry();
+
+ // Compare the fields we care about when setting up a new content entry.
+ bool compareInitialState(const GraphicStateEntry& b);
+
+ SkMatrix fMatrix;
+ // We can't do set operations on Paths, though PDF natively supports
+ // intersect. If the clip stack does anything other than intersect,
+ // we have to fall back to the region. Treat fClipStack as authoritative.
+ // See https://bugs.skia.org/221
+ SkClipStack fClipStack;
+ SkRegion fClipRegion;
+
+ // When emitting the content entry, we will ensure the graphic state
+ // is set to these values first.
+ SkColor fColor;
+ SkScalar fTextScaleX; // Zero means we don't care what the value is.
+ SkPaint::Style fTextFill; // Only if TextScaleX is non-zero.
+ int fShaderIndex;
+ int fGraphicStateIndex;
+ };
+
+protected:
+ sk_sp<SkSurface> makeSurface(const SkImageInfo&, const SkSurfaceProps&) override;
+
+ void drawAnnotation(const SkDraw&, const SkRect&, const char key[], SkData* value) override;
+
+ void drawSpecial(const SkDraw&, SkSpecialImage*, int x, int y, const SkPaint&) override;
+ sk_sp<SkSpecialImage> makeSpecial(const SkBitmap&) override;
+ sk_sp<SkSpecialImage> makeSpecial(const SkImage*) override;
+ sk_sp<SkSpecialImage> snapSpecial() override;
+ SkImageFilterCache* getImageFilterCache() override;
+
+private:
+ struct RectWithData {
+ SkRect rect;
+ sk_sp<SkData> data;
+ RectWithData(const SkRect& rect, SkData* data)
+ : rect(rect), data(SkRef(data)) {}
+ RectWithData(RectWithData&&) = default;
+ RectWithData& operator=(RectWithData&& other) = default;
+ };
+
+ struct NamedDestination {
+ sk_sp<SkData> nameData;
+ SkPoint point;
+ NamedDestination(SkData* nameData, const SkPoint& point)
+ : nameData(SkRef(nameData)), point(point) {}
+ NamedDestination(NamedDestination&&) = default;
+ NamedDestination& operator=(NamedDestination&&) = default;
+ };
+
+ // TODO(vandebo): push most of SkPDFDevice's state into a core object in
+ // order to get the right access levels without using friend.
+ friend class ScopedContentEntry;
+
+ SkISize fPageSize;
+ SkMatrix fInitialTransform;
+ SkClipStack fExistingClipStack;
+ SkRegion fExistingClipRegion;
+
+ SkTArray<RectWithData> fLinkToURLs;
+ SkTArray<RectWithData> fLinkToDestinations;
+ SkTArray<NamedDestination> fNamedDestinations;
+
+ SkTDArray<SkPDFObject*> fGraphicStateResources;
+ SkTDArray<SkPDFObject*> fXObjectResources;
+ SkTDArray<SkPDFFont*> fFontResources;
+ SkTDArray<SkPDFObject*> fShaderResources;
+
+ struct ContentEntry {
+ GraphicStateEntry fState;
+ SkDynamicMemoryWStream fContent;
+ };
+ SkSinglyLinkedList<ContentEntry> fContentEntries;
+
+ SkScalar fRasterDpi;
+
+ SkPDFDocument* fDocument;
+ ////////////////////////////////////////////////////////////////////////////
+
+ SkPDFDevice(SkISize pageSize,
+ SkScalar rasterDpi,
+ SkPDFDocument* doc,
+ bool flip);
+
+ SkBaseDevice* onCreateDevice(const CreateInfo&, const SkPaint*) override;
+
+ void init();
+ void cleanUp();
+ sk_sp<SkPDFObject> makeFormXObjectFromDevice();
+
+ void drawFormXObjectWithMask(int xObjectIndex,
+ sk_sp<SkPDFObject> mask,
+ const SkClipStack* clipStack,
+ const SkRegion& clipRegion,
+ SkBlendMode,
+ bool invertClip);
+
+ // If the paint or clip is such that we shouldn't draw anything, this
+ // returns nullptr and does not create a content entry.
+ // setUpContentEntry and finishContentEntry can be used directly, but
+ // the preferred method is to use the ScopedContentEntry helper class.
+ ContentEntry* setUpContentEntry(const SkClipStack* clipStack,
+ const SkRegion& clipRegion,
+ const SkMatrix& matrix,
+ const SkPaint& paint,
+ bool hasText,
+ sk_sp<SkPDFObject>* dst);
+ void finishContentEntry(SkBlendMode, sk_sp<SkPDFObject> dst, SkPath* shape);
+ bool isContentEmpty();
+
+ void populateGraphicStateEntryFromPaint(const SkMatrix& matrix,
+ const SkClipStack& clipStack,
+ const SkRegion& clipRegion,
+ const SkPaint& paint,
+ bool hasText,
+ GraphicStateEntry* entry);
+ int addGraphicStateResource(SkPDFObject* gs);
+ int addXObjectResource(SkPDFObject* xObject);
+
+ int getFontResourceIndex(SkTypeface* typeface, uint16_t glyphID);
+
+
+ void internalDrawText(const SkDraw&, const void*, size_t, const SkScalar pos[],
+ SkTextBlob::GlyphPositioning, SkPoint, const SkPaint&,
+ const uint32_t*, uint32_t, const char*);
+
+ void internalDrawPaint(const SkPaint& paint, ContentEntry* contentEntry);
+
+ void internalDrawImage(const SkMatrix& origMatrix,
+ const SkClipStack* clipStack,
+ const SkRegion& origClipRegion,
+ SkImageSubset imageSubset,
+ const SkPaint& paint);
+
+ bool handleInversePath(const SkDraw& d, const SkPath& origPath,
+ const SkPaint& paint, bool pathIsMutable,
+ const SkMatrix* prePathMatrix = nullptr);
+ void handlePointAnnotation(const SkPoint&, const SkMatrix&, const char key[], SkData* value);
+ void handlePathAnnotation(const SkPath&, const SkDraw& d, const char key[], SkData* value);
+
+ typedef SkBaseDevice INHERITED;
+
+ // TODO(edisonn): Only SkDocument_PDF and SkPDFImageShader should be able to create
+ // an SkPDFDevice
+ //friend class SkDocument_PDF;
+ //friend class SkPDFImageShader;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFDocument.cpp b/gfx/skia/skia/src/pdf/SkPDFDocument.cpp
new file mode 100644
index 000000000..ab5f46597
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFDocument.cpp
@@ -0,0 +1,463 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMakeUnique.h"
+#include "SkPDFCanon.h"
+#include "SkPDFCanvas.h"
+#include "SkPDFDevice.h"
+#include "SkPDFDocument.h"
+#include "SkPDFUtils.h"
+#include "SkStream.h"
+
+SkPDFObjectSerializer::SkPDFObjectSerializer() : fBaseOffset(0), fNextToBeSerialized(0) {}
+
+template <class T> static void renew(T* t) { t->~T(); new (t) T; }
+
+SkPDFObjectSerializer::~SkPDFObjectSerializer() {
+ for (int i = 0; i < fObjNumMap.objects().count(); ++i) {
+ fObjNumMap.objects()[i]->drop();
+ }
+}
+
+void SkPDFObjectSerializer::addObjectRecursively(const sk_sp<SkPDFObject>& object) {
+ fObjNumMap.addObjectRecursively(object.get());
+}
+
+#define SKPDF_MAGIC "\xD3\xEB\xE9\xE1"
+#ifndef SK_BUILD_FOR_WIN32
+static_assert((SKPDF_MAGIC[0] & 0x7F) == "Skia"[0], "");
+static_assert((SKPDF_MAGIC[1] & 0x7F) == "Skia"[1], "");
+static_assert((SKPDF_MAGIC[2] & 0x7F) == "Skia"[2], "");
+static_assert((SKPDF_MAGIC[3] & 0x7F) == "Skia"[3], "");
+#endif
+void SkPDFObjectSerializer::serializeHeader(SkWStream* wStream,
+ const SkDocument::PDFMetadata& md) {
+ fBaseOffset = wStream->bytesWritten();
+ static const char kHeader[] = "%PDF-1.4\n%" SKPDF_MAGIC "\n";
+ wStream->write(kHeader, strlen(kHeader));
+ // The PDF spec recommends including a comment with four
+ // bytes, all with their high bits set. "\xD3\xEB\xE9\xE1" is
+ // "Skia" with the high bits set.
+ fInfoDict = SkPDFMetadata::MakeDocumentInformationDict(md);
+ this->addObjectRecursively(fInfoDict);
+ this->serializeObjects(wStream);
+}
+#undef SKPDF_MAGIC
+
+// Serialize all objects in the fObjNumMap that have not yet been serialized;
+void SkPDFObjectSerializer::serializeObjects(SkWStream* wStream) {
+ const SkTArray<sk_sp<SkPDFObject>>& objects = fObjNumMap.objects();
+ while (fNextToBeSerialized < objects.count()) {
+ SkPDFObject* object = objects[fNextToBeSerialized].get();
+ int32_t index = fNextToBeSerialized + 1; // Skip object 0.
+ // "The first entry in the [XREF] table (object number 0) is
+ // always free and has a generation number of 65,535; it is
+ // the head of the linked list of free objects."
+ SkASSERT(fOffsets.count() == fNextToBeSerialized);
+ fOffsets.push(this->offset(wStream));
+ wStream->writeDecAsText(index);
+ wStream->writeText(" 0 obj\n"); // Generation number is always 0.
+ object->emitObject(wStream, fObjNumMap);
+ wStream->writeText("\nendobj\n");
+ object->drop();
+ ++fNextToBeSerialized;
+ }
+}
+
+// Xref table and footer
+void SkPDFObjectSerializer::serializeFooter(SkWStream* wStream,
+ const sk_sp<SkPDFObject> docCatalog,
+ sk_sp<SkPDFObject> id) {
+ this->serializeObjects(wStream);
+ int32_t xRefFileOffset = this->offset(wStream);
+ // Include the special zeroth object in the count.
+ int32_t objCount = SkToS32(fOffsets.count() + 1);
+ wStream->writeText("xref\n0 ");
+ wStream->writeDecAsText(objCount);
+ wStream->writeText("\n0000000000 65535 f \n");
+ for (int i = 0; i < fOffsets.count(); i++) {
+ wStream->writeBigDecAsText(fOffsets[i], 10);
+ wStream->writeText(" 00000 n \n");
+ }
+ SkPDFDict trailerDict;
+ trailerDict.insertInt("Size", objCount);
+ SkASSERT(docCatalog);
+ trailerDict.insertObjRef("Root", docCatalog);
+ SkASSERT(fInfoDict);
+ trailerDict.insertObjRef("Info", std::move(fInfoDict));
+ if (id) {
+ trailerDict.insertObject("ID", std::move(id));
+ }
+ wStream->writeText("trailer\n");
+ trailerDict.emitObject(wStream, fObjNumMap);
+ wStream->writeText("\nstartxref\n");
+ wStream->writeBigDecAsText(xRefFileOffset);
+ wStream->writeText("\n%%EOF");
+}
+
+int32_t SkPDFObjectSerializer::offset(SkWStream* wStream) {
+ size_t offset = wStream->bytesWritten();
+ SkASSERT(offset > fBaseOffset);
+ return SkToS32(offset - fBaseOffset);
+}
+
+
+// return root node.
+static sk_sp<SkPDFDict> generate_page_tree(SkTArray<sk_sp<SkPDFDict>>* pages) {
+ // PDF wants a tree describing all the pages in the document. We arbitrary
+ // choose 8 (kNodeSize) as the number of allowed children. The internal
+ // nodes have type "Pages" with an array of children, a parent pointer, and
+ // the number of leaves below the node as "Count." The leaves are passed
+ // into the method, have type "Page" and need a parent pointer. This method
+ // builds the tree bottom up, skipping internal nodes that would have only
+ // one child.
+ static const int kNodeSize = 8;
+
+ // curNodes takes a reference to its items, which it passes to pageTree.
+ int totalPageCount = pages->count();
+ SkTArray<sk_sp<SkPDFDict>> curNodes;
+ curNodes.swap(pages);
+
+ // nextRoundNodes passes its references to nodes on to curNodes.
+ int treeCapacity = kNodeSize;
+ do {
+ SkTArray<sk_sp<SkPDFDict>> nextRoundNodes;
+ for (int i = 0; i < curNodes.count(); ) {
+ if (i > 0 && i + 1 == curNodes.count()) {
+ SkASSERT(curNodes[i]);
+ nextRoundNodes.emplace_back(std::move(curNodes[i]));
+ break;
+ }
+
+ auto newNode = sk_make_sp<SkPDFDict>("Pages");
+ auto kids = sk_make_sp<SkPDFArray>();
+ kids->reserve(kNodeSize);
+
+ int count = 0;
+ for (; i < curNodes.count() && count < kNodeSize; i++, count++) {
+ SkASSERT(curNodes[i]);
+ curNodes[i]->insertObjRef("Parent", newNode);
+ kids->appendObjRef(std::move(curNodes[i]));
+ }
+
+ // treeCapacity is the number of leaf nodes possible for the
+ // current set of subtrees being generated. (i.e. 8, 64, 512, ...).
+ // It is hard to count the number of leaf nodes in the current
+ // subtree. However, by construction, we know that unless it's the
+ // last subtree for the current depth, the leaf count will be
+ // treeCapacity, otherwise it's what ever is left over after
+ // consuming treeCapacity chunks.
+ int pageCount = treeCapacity;
+ if (i == curNodes.count()) {
+ pageCount = ((totalPageCount - 1) % treeCapacity) + 1;
+ }
+ newNode->insertInt("Count", pageCount);
+ newNode->insertObject("Kids", std::move(kids));
+ nextRoundNodes.emplace_back(std::move(newNode));
+ }
+ SkDEBUGCODE( for (const auto& n : curNodes) { SkASSERT(!n); } );
+
+ curNodes.swap(&nextRoundNodes);
+ nextRoundNodes.reset();
+ treeCapacity *= kNodeSize;
+ } while (curNodes.count() > 1);
+ return std::move(curNodes[0]);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+SkPDFDocument::SkPDFDocument(SkWStream* stream,
+ void (*doneProc)(SkWStream*, bool),
+ SkScalar rasterDpi,
+ const SkDocument::PDFMetadata& metadata,
+ sk_sp<SkPixelSerializer> jpegEncoder,
+ bool pdfa)
+ : SkDocument(stream, doneProc)
+ , fRasterDpi(rasterDpi)
+ , fMetadata(metadata)
+ , fPDFA(pdfa) {
+ fCanon.setPixelSerializer(std::move(jpegEncoder));
+}
+
+SkPDFDocument::~SkPDFDocument() {
+ // subclasses of SkDocument must call close() in their destructors.
+ this->close();
+}
+
+void SkPDFDocument::serialize(const sk_sp<SkPDFObject>& object) {
+ fObjectSerializer.addObjectRecursively(object);
+ fObjectSerializer.serializeObjects(this->getStream());
+}
+
+SkCanvas* SkPDFDocument::onBeginPage(SkScalar width, SkScalar height,
+ const SkRect& trimBox) {
+ SkASSERT(!fCanvas.get()); // endPage() was called before this.
+ if (fPages.empty()) {
+ // if this is the first page if the document.
+ fObjectSerializer.serializeHeader(this->getStream(), fMetadata);
+ fDests = sk_make_sp<SkPDFDict>();
+ if (fPDFA) {
+ SkPDFMetadata::UUID uuid = SkPDFMetadata::CreateUUID(fMetadata);
+ // We use the same UUID for Document ID and Instance ID since this
+ // is the first revision of this document (and Skia does not
+ // support revising existing PDF documents).
+ // If we are not in PDF/A mode, don't use a UUID since testing
+ // works best with reproducible outputs.
+ fID = SkPDFMetadata::MakePdfId(uuid, uuid);
+ fXMP = SkPDFMetadata::MakeXMPObject(fMetadata, uuid, uuid);
+ fObjectSerializer.addObjectRecursively(fXMP);
+ fObjectSerializer.serializeObjects(this->getStream());
+ }
+ }
+ SkISize pageSize = SkISize::Make(
+ SkScalarRoundToInt(width), SkScalarRoundToInt(height));
+ fPageDevice.reset(
+ SkPDFDevice::Create(pageSize, fRasterDpi, this));
+ fCanvas = sk_make_sp<SkPDFCanvas>(fPageDevice);
+ fCanvas->clipRect(trimBox);
+ fCanvas->translate(trimBox.x(), trimBox.y());
+ return fCanvas.get();
+}
+
+void SkPDFDocument::onEndPage() {
+ SkASSERT(fCanvas.get());
+ fCanvas->flush();
+ fCanvas.reset(nullptr);
+ SkASSERT(fPageDevice);
+ auto page = sk_make_sp<SkPDFDict>("Page");
+ page->insertObject("Resources", fPageDevice->makeResourceDict());
+ page->insertObject("MediaBox", fPageDevice->copyMediaBox());
+ auto annotations = sk_make_sp<SkPDFArray>();
+ fPageDevice->appendAnnotations(annotations.get());
+ if (annotations->size() > 0) {
+ page->insertObject("Annots", std::move(annotations));
+ }
+ auto contentObject = sk_make_sp<SkPDFStream>(fPageDevice->content());
+ this->serialize(contentObject);
+ page->insertObjRef("Contents", std::move(contentObject));
+ fPageDevice->appendDestinations(fDests.get(), page.get());
+ fPages.emplace_back(std::move(page));
+ fPageDevice.reset(nullptr);
+}
+
+void SkPDFDocument::onAbort() {
+ this->reset();
+}
+
+void SkPDFDocument::reset() {
+ fCanvas.reset(nullptr);
+ fPages.reset();
+ fCanon.reset();
+ renew(&fObjectSerializer);
+ fFonts.reset();
+}
+
+static sk_sp<SkData> SkSrgbIcm() {
+ // Source: http://www.argyllcms.com/icclibsrc.html
+ static const char kProfile[] =
+ "\0\0\14\214argl\2 \0\0mntrRGB XYZ \7\336\0\1\0\6\0\26\0\17\0:acspM"
+ "SFT\0\0\0\0IEC sRGB\0\0\0\0\0\0\0\0\0\0\0\0\0\0\366\326\0\1\0\0\0\0"
+ "\323-argl\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\21desc\0\0\1P\0\0\0\231cprt\0"
+ "\0\1\354\0\0\0gdmnd\0\0\2T\0\0\0pdmdd\0\0\2\304\0\0\0\210tech\0\0\3"
+ "L\0\0\0\14vued\0\0\3X\0\0\0gview\0\0\3\300\0\0\0$lumi\0\0\3\344\0\0"
+ "\0\24meas\0\0\3\370\0\0\0$wtpt\0\0\4\34\0\0\0\24bkpt\0\0\0040\0\0\0"
+ "\24rXYZ\0\0\4D\0\0\0\24gXYZ\0\0\4X\0\0\0\24bXYZ\0\0\4l\0\0\0\24rTR"
+ "C\0\0\4\200\0\0\10\14gTRC\0\0\4\200\0\0\10\14bTRC\0\0\4\200\0\0\10"
+ "\14desc\0\0\0\0\0\0\0?sRGB IEC61966-2.1 (Equivalent to www.srgb.co"
+ "m 1998 HP profile)\0\0\0\0\0\0\0\0\0\0\0?sRGB IEC61966-2.1 (Equiva"
+ "lent to www.srgb.com 1998 HP profile)\0\0\0\0\0\0\0\0text\0\0\0\0C"
+ "reated by Graeme W. Gill. Released into the public domain. No Warr"
+ "anty, Use at your own risk.\0\0desc\0\0\0\0\0\0\0\26IEC http://www"
+ ".iec.ch\0\0\0\0\0\0\0\0\0\0\0\26IEC http://www.iec.ch\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0desc\0\0\0\0\0\0\0.IEC 61966-2.1 Default RGB colour sp"
+ "ace - sRGB\0\0\0\0\0\0\0\0\0\0\0.IEC 61966-2.1 Default RGB colour "
+ "space - sRGB\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0sig \0\0\0"
+ "\0CRT desc\0\0\0\0\0\0\0\rIEC61966-2.1\0\0\0\0\0\0\0\0\0\0\0\rIEC6"
+ "1966-2.1\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0view\0\0\0\0"
+ "\0\23\244|\0\24_0\0\20\316\2\0\3\355\262\0\4\23\n\0\3\\g\0\0\0\1XY"
+ "Z \0\0\0\0\0L\n=\0P\0\0\0W\36\270meas\0\0\0\0\0\0\0\1\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0\0\0\0\0\0\2\217\0\0\0\2XYZ \0\0\0\0\0\0\363Q\0\1\0\0\0"
+ "\1\26\314XYZ \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0XYZ \0\0\0\0\0\0o\240"
+ "\0\0008\365\0\0\3\220XYZ \0\0\0\0\0\0b\227\0\0\267\207\0\0\30\331X"
+ "YZ \0\0\0\0\0\0$\237\0\0\17\204\0\0\266\304curv\0\0\0\0\0\0\4\0\0\0"
+ "\0\5\0\n\0\17\0\24\0\31\0\36\0#\0(\0-\0002\0007\0;\0@\0E\0J\0O\0T\0"
+ "Y\0^\0c\0h\0m\0r\0w\0|\0\201\0\206\0\213\0\220\0\225\0\232\0\237\0"
+ "\244\0\251\0\256\0\262\0\267\0\274\0\301\0\306\0\313\0\320\0\325\0"
+ "\333\0\340\0\345\0\353\0\360\0\366\0\373\1\1\1\7\1\r\1\23\1\31\1\37"
+ "\1%\1+\0012\0018\1>\1E\1L\1R\1Y\1`\1g\1n\1u\1|\1\203\1\213\1\222\1"
+ "\232\1\241\1\251\1\261\1\271\1\301\1\311\1\321\1\331\1\341\1\351\1"
+ "\362\1\372\2\3\2\14\2\24\2\35\2&\2/\0028\2A\2K\2T\2]\2g\2q\2z\2\204"
+ "\2\216\2\230\2\242\2\254\2\266\2\301\2\313\2\325\2\340\2\353\2\365"
+ "\3\0\3\13\3\26\3!\3-\0038\3C\3O\3Z\3f\3r\3~\3\212\3\226\3\242\3\256"
+ "\3\272\3\307\3\323\3\340\3\354\3\371\4\6\4\23\4 \4-\4;\4H\4U\4c\4q"
+ "\4~\4\214\4\232\4\250\4\266\4\304\4\323\4\341\4\360\4\376\5\r\5\34"
+ "\5+\5:\5I\5X\5g\5w\5\206\5\226\5\246\5\265\5\305\5\325\5\345\5\366"
+ "\6\6\6\26\6'\0067\6H\6Y\6j\6{\6\214\6\235\6\257\6\300\6\321\6\343\6"
+ "\365\7\7\7\31\7+\7=\7O\7a\7t\7\206\7\231\7\254\7\277\7\322\7\345\7"
+ "\370\10\13\10\37\0102\10F\10Z\10n\10\202\10\226\10\252\10\276\10\322"
+ "\10\347\10\373\t\20\t%\t:\tO\td\ty\t\217\t\244\t\272\t\317\t\345\t"
+ "\373\n\21\n'\n=\nT\nj\n\201\n\230\n\256\n\305\n\334\n\363\13\13\13"
+ "\"\0139\13Q\13i\13\200\13\230\13\260\13\310\13\341\13\371\14\22\14"
+ "*\14C\14\\\14u\14\216\14\247\14\300\14\331\14\363\r\r\r&\r@\rZ\rt\r"
+ "\216\r\251\r\303\r\336\r\370\16\23\16.\16I\16d\16\177\16\233\16\266"
+ "\16\322\16\356\17\t\17%\17A\17^\17z\17\226\17\263\17\317\17\354\20"
+ "\t\20&\20C\20a\20~\20\233\20\271\20\327\20\365\21\23\0211\21O\21m\21"
+ "\214\21\252\21\311\21\350\22\7\22&\22E\22d\22\204\22\243\22\303\22"
+ "\343\23\3\23#\23C\23c\23\203\23\244\23\305\23\345\24\6\24'\24I\24j"
+ "\24\213\24\255\24\316\24\360\25\22\0254\25V\25x\25\233\25\275\25\340"
+ "\26\3\26&\26I\26l\26\217\26\262\26\326\26\372\27\35\27A\27e\27\211"
+ "\27\256\27\322\27\367\30\33\30@\30e\30\212\30\257\30\325\30\372\31"
+ " \31E\31k\31\221\31\267\31\335\32\4\32*\32Q\32w\32\236\32\305\32\354"
+ "\33\24\33;\33c\33\212\33\262\33\332\34\2\34*\34R\34{\34\243\34\314"
+ "\34\365\35\36\35G\35p\35\231\35\303\35\354\36\26\36@\36j\36\224\36"
+ "\276\36\351\37\23\37>\37i\37\224\37\277\37\352 \25 A l \230 \304 \360"
+ "!\34!H!u!\241!\316!\373\"'\"U\"\202\"\257\"\335#\n#8#f#\224#\302#\360"
+ "$\37$M$|$\253$\332%\t%8%h%\227%\307%\367&'&W&\207&\267&\350'\30'I'"
+ "z'\253'\334(\r(?(q(\242(\324)\6)8)k)\235)\320*\2*5*h*\233*\317+\2+"
+ "6+i+\235+\321,\5,9,n,\242,\327-\14-A-v-\253-\341.\26.L.\202.\267.\356"
+ "/$/Z/\221/\307/\376050l0\2440\3331\0221J1\2021\2721\3622*2c2\2332\324"
+ "3\r3F3\1773\2703\3614+4e4\2364\3305\0235M5\2075\3025\375676r6\2566"
+ "\3517$7`7\2347\3278\0248P8\2148\3109\0059B9\1779\2749\371:6:t:\262"
+ ":\357;-;k;\252;\350<'<e<\244<\343=\"=a=\241=\340> >`>\240>\340?!?a"
+ "?\242?\342@#@d@\246@\347A)AjA\254A\356B0BrB\265B\367C:C}C\300D\3DG"
+ "D\212D\316E\22EUE\232E\336F\"FgF\253F\360G5G{G\300H\5HKH\221H\327I"
+ "\35IcI\251I\360J7J}J\304K\14KSK\232K\342L*LrL\272M\2MJM\223M\334N%"
+ "NnN\267O\0OIO\223O\335P'PqP\273Q\6QPQ\233Q\346R1R|R\307S\23S_S\252"
+ "S\366TBT\217T\333U(UuU\302V\17V\\V\251V\367WDW\222W\340X/X}X\313Y\32"
+ "YiY\270Z\7ZVZ\246Z\365[E[\225[\345\\5\\\206\\\326]']x]\311^\32^l^\275"
+ "_\17_a_\263`\5`W`\252`\374aOa\242a\365bIb\234b\360cCc\227c\353d@d\224"
+ "d\351e=e\222e\347f=f\222f\350g=g\223g\351h?h\226h\354iCi\232i\361j"
+ "Hj\237j\367kOk\247k\377lWl\257m\10m`m\271n\22nkn\304o\36oxo\321p+p"
+ "\206p\340q:q\225q\360rKr\246s\1s]s\270t\24tpt\314u(u\205u\341v>v\233"
+ "v\370wVw\263x\21xnx\314y*y\211y\347zFz\245{\4{c{\302|!|\201|\341}A"
+ "}\241~\1~b~\302\177#\177\204\177\345\200G\200\250\201\n\201k\201\315"
+ "\2020\202\222\202\364\203W\203\272\204\35\204\200\204\343\205G\205"
+ "\253\206\16\206r\206\327\207;\207\237\210\4\210i\210\316\2113\211\231"
+ "\211\376\212d\212\312\2130\213\226\213\374\214c\214\312\2151\215\230"
+ "\215\377\216f\216\316\2176\217\236\220\6\220n\220\326\221?\221\250"
+ "\222\21\222z\222\343\223M\223\266\224 \224\212\224\364\225_\225\311"
+ "\2264\226\237\227\n\227u\227\340\230L\230\270\231$\231\220\231\374"
+ "\232h\232\325\233B\233\257\234\34\234\211\234\367\235d\235\322\236"
+ "@\236\256\237\35\237\213\237\372\240i\240\330\241G\241\266\242&\242"
+ "\226\243\6\243v\243\346\244V\244\307\2458\245\251\246\32\246\213\246"
+ "\375\247n\247\340\250R\250\304\2517\251\251\252\34\252\217\253\2\253"
+ "u\253\351\254\\\254\320\255D\255\270\256-\256\241\257\26\257\213\260"
+ "\0\260u\260\352\261`\261\326\262K\262\302\2638\263\256\264%\264\234"
+ "\265\23\265\212\266\1\266y\266\360\267h\267\340\270Y\270\321\271J\271"
+ "\302\272;\272\265\273.\273\247\274!\274\233\275\25\275\217\276\n\276"
+ "\204\276\377\277z\277\365\300p\300\354\301g\301\343\302_\302\333\303"
+ "X\303\324\304Q\304\316\305K\305\310\306F\306\303\307A\307\277\310="
+ "\310\274\311:\311\271\3128\312\267\3136\313\266\3145\314\265\3155\315"
+ "\265\3166\316\266\3177\317\270\3209\320\272\321<\321\276\322?\322\301"
+ "\323D\323\306\324I\324\313\325N\325\321\326U\326\330\327\\\327\340"
+ "\330d\330\350\331l\331\361\332v\332\373\333\200\334\5\334\212\335\20"
+ "\335\226\336\34\336\242\337)\337\257\3406\340\275\341D\341\314\342"
+ "S\342\333\343c\343\353\344s\344\374\345\204\346\r\346\226\347\37\347"
+ "\251\3502\350\274\351F\351\320\352[\352\345\353p\353\373\354\206\355"
+ "\21\355\234\356(\356\264\357@\357\314\360X\360\345\361r\361\377\362"
+ "\214\363\31\363\247\3644\364\302\365P\365\336\366m\366\373\367\212"
+ "\370\31\370\250\3718\371\307\372W\372\347\373w\374\7\374\230\375)\375"
+ "\272\376K\376\334\377m\377\377";
+ const size_t kProfileLength = 3212;
+ static_assert(kProfileLength == sizeof(kProfile) - 1, "");
+ return SkData::MakeWithoutCopy(kProfile, kProfileLength);
+}
+
+static sk_sp<SkPDFStream> make_srgb_color_profile() {
+ sk_sp<SkPDFStream> stream = sk_make_sp<SkPDFStream>(SkSrgbIcm());
+ stream->dict()->insertInt("N", 3);
+ sk_sp<SkPDFArray> array = sk_make_sp<SkPDFArray>();
+ array->appendScalar(0.0f);
+ array->appendScalar(1.0f);
+ array->appendScalar(0.0f);
+ array->appendScalar(1.0f);
+ array->appendScalar(0.0f);
+ array->appendScalar(1.0f);
+ stream->dict()->insertObject("Range", std::move(array));
+ return stream;
+}
+
+static sk_sp<SkPDFArray> make_srgb_output_intents() {
+ // sRGB is specified by HTML, CSS, and SVG.
+ auto outputIntent = sk_make_sp<SkPDFDict>("OutputIntent");
+ outputIntent->insertName("S", "GTS_PDFA1");
+ outputIntent->insertString("RegistryName", "http://www.color.org");
+ outputIntent->insertString("OutputConditionIdentifier",
+ "Custom");
+ outputIntent->insertString("Info","sRGB IEC61966-2.1");
+ outputIntent->insertObjRef("DestOutputProfile",
+ make_srgb_color_profile());
+ auto intentArray = sk_make_sp<SkPDFArray>();
+ intentArray->appendObject(std::move(outputIntent));
+ return intentArray;
+}
+
+void SkPDFDocument::onClose(SkWStream* stream) {
+ SkASSERT(!fCanvas.get());
+ if (fPages.empty()) {
+ this->reset();
+ return;
+ }
+ auto docCatalog = sk_make_sp<SkPDFDict>("Catalog");
+ if (fPDFA) {
+ SkASSERT(fXMP);
+ docCatalog->insertObjRef("Metadata", fXMP);
+ // Don't specify OutputIntents if we are not in PDF/A mode since
+ // no one has ever asked for this feature.
+ docCatalog->insertObject("OutputIntents", make_srgb_output_intents());
+ }
+ SkASSERT(!fPages.empty());
+ docCatalog->insertObjRef("Pages", generate_page_tree(&fPages));
+ SkASSERT(fPages.empty());
+
+ if (fDests->size() > 0) {
+ docCatalog->insertObjRef("Dests", std::move(fDests));
+ }
+
+ // Build font subsetting info before calling addObjectRecursively().
+ SkPDFCanon* canon = &fCanon;
+ fFonts.foreach([canon](SkPDFFont* p){ p->getFontSubset(canon); });
+ fObjectSerializer.addObjectRecursively(docCatalog);
+ fObjectSerializer.serializeObjects(this->getStream());
+ fObjectSerializer.serializeFooter(this->getStream(), docCatalog, fID);
+ this->reset();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkDocument> SkPDFMakeDocument(SkWStream* stream,
+ void (*proc)(SkWStream*, bool),
+ SkScalar dpi,
+ const SkDocument::PDFMetadata& metadata,
+ sk_sp<SkPixelSerializer> jpeg,
+ bool pdfa) {
+ return stream ? sk_make_sp<SkPDFDocument>(stream, proc, dpi, metadata,
+ std::move(jpeg), pdfa)
+ : nullptr;
+}
+
+sk_sp<SkDocument> SkDocument::MakePDF(const char path[], SkScalar dpi) {
+ auto delete_wstream = [](SkWStream* stream, bool) { delete stream; };
+ auto stream = skstd::make_unique<SkFILEWStream>(path);
+ return stream->isValid()
+ ? SkPDFMakeDocument(stream.release(), delete_wstream, dpi,
+ SkDocument::PDFMetadata(), nullptr,
+ false)
+ : nullptr;
+}
+
+sk_sp<SkDocument> SkDocument::MakePDF(SkWStream* stream,
+ SkScalar dpi,
+ const SkDocument::PDFMetadata& metadata,
+ sk_sp<SkPixelSerializer> jpegEncoder,
+ bool pdfa) {
+ return SkPDFMakeDocument(stream, nullptr, dpi, metadata,
+ std::move(jpegEncoder), pdfa);
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFDocument.h b/gfx/skia/skia/src/pdf/SkPDFDocument.h
new file mode 100644
index 000000000..b62a7a59a
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFDocument.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPDFDocument_DEFINED
+#define SkPDFDocument_DEFINED
+
+#include "SkDocument.h"
+#include "SkPDFCanon.h"
+#include "SkPDFMetadata.h"
+#include "SkPDFFont.h"
+
+class SkPDFDevice;
+
+sk_sp<SkDocument> SkPDFMakeDocument(SkWStream* stream,
+ void (*doneProc)(SkWStream*, bool),
+ SkScalar rasterDpi,
+ const SkDocument::PDFMetadata&,
+ sk_sp<SkPixelSerializer>,
+ bool pdfa);
+
+// Logically part of SkPDFDocument (like SkPDFCanon), but separate to
+// keep similar functionality together.
+struct SkPDFObjectSerializer : SkNoncopyable {
+ SkPDFObjNumMap fObjNumMap;
+ SkTDArray<int32_t> fOffsets;
+ sk_sp<SkPDFObject> fInfoDict;
+ size_t fBaseOffset;
+ int32_t fNextToBeSerialized; // index in fObjNumMap
+
+ SkPDFObjectSerializer();
+ ~SkPDFObjectSerializer();
+ void addObjectRecursively(const sk_sp<SkPDFObject>&);
+ void serializeHeader(SkWStream*, const SkDocument::PDFMetadata&);
+ void serializeObjects(SkWStream*);
+ void serializeFooter(SkWStream*, const sk_sp<SkPDFObject>, sk_sp<SkPDFObject>);
+ int32_t offset(SkWStream*);
+};
+
+/** Concrete implementation of SkDocument that creates PDF files. This
+ class does not produced linearized or optimized PDFs; instead it
+ it attempts to use a minimum amount of RAM. */
+class SkPDFDocument : public SkDocument {
+public:
+ SkPDFDocument(SkWStream*,
+ void (*)(SkWStream*, bool),
+ SkScalar,
+ const SkDocument::PDFMetadata&,
+ sk_sp<SkPixelSerializer>,
+ bool);
+ virtual ~SkPDFDocument();
+ SkCanvas* onBeginPage(SkScalar, SkScalar, const SkRect&) override;
+ void onEndPage() override;
+ void onClose(SkWStream*) override;
+ void onAbort() override;
+
+ /**
+ Serialize the object, as well as any other objects it
+ indirectly refers to. If any any other objects have been added
+ to the SkPDFObjNumMap without serializing them, they will be
+ serialized as well.
+
+ It might go without saying that objects should not be changed
+ after calling serialize, since those changes will be too late.
+ */
+ void serialize(const sk_sp<SkPDFObject>&);
+ SkPDFCanon* canon() { return &fCanon; }
+ void registerFont(SkPDFFont* f) { fFonts.add(f); }
+
+private:
+ SkPDFObjectSerializer fObjectSerializer;
+ SkPDFCanon fCanon;
+ SkTArray<sk_sp<SkPDFDict>> fPages;
+ SkTHashSet<SkPDFFont*> fFonts;
+ sk_sp<SkPDFDict> fDests;
+ sk_sp<SkPDFDevice> fPageDevice;
+ sk_sp<SkCanvas> fCanvas;
+ sk_sp<SkPDFObject> fID;
+ sk_sp<SkPDFObject> fXMP;
+ SkScalar fRasterDpi;
+ SkDocument::PDFMetadata fMetadata;
+ bool fPDFA;
+
+ void reset();
+};
+
+#endif // SkPDFDocument_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFFont.cpp b/gfx/skia/skia/src/pdf/SkPDFFont.cpp
new file mode 100644
index 000000000..284e1bfcd
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFFont.cpp
@@ -0,0 +1,736 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkData.h"
+#include "SkGlyphCache.h"
+#include "SkPaint.h"
+#include "SkPDFCanon.h"
+#include "SkPDFConvertType1FontStream.h"
+#include "SkPDFDevice.h"
+#include "SkPDFMakeCIDGlyphWidthsArray.h"
+#include "SkPDFMakeToUnicodeCmap.h"
+#include "SkPDFFont.h"
+#include "SkPDFUtils.h"
+#include "SkRefCnt.h"
+#include "SkScalar.h"
+#include "SkStream.h"
+#include "SkTypes.h"
+#include "SkUtils.h"
+
+#ifdef SK_PDF_USE_SFNTLY
+ #include "sample/chromium/font_subsetter.h"
+#endif
+
+namespace {
+// PDF's notion of symbolic vs non-symbolic is related to the character set, not
+// symbols vs. characters. Rarely is a font the right character set to call it
+// non-symbolic, so always call it symbolic. (PDF 1.4 spec, section 5.7.1)
+static const int32_t kPdfSymbolic = 4;
+
+struct SkPDFType0Font final : public SkPDFFont {
+ SkPDFType0Font(SkPDFFont::Info, const SkAdvancedTypefaceMetrics&);
+ virtual ~SkPDFType0Font();
+ void getFontSubset(SkPDFCanon*) override;
+#ifdef SK_DEBUG
+ void emitObject(SkWStream*, const SkPDFObjNumMap&) const override;
+ bool fPopulated;
+#endif
+ typedef SkPDFDict INHERITED;
+};
+
+struct SkPDFType1Font final : public SkPDFFont {
+ SkPDFType1Font(SkPDFFont::Info, const SkAdvancedTypefaceMetrics&, SkPDFCanon*);
+ virtual ~SkPDFType1Font() {}
+ void getFontSubset(SkPDFCanon*) override {} // TODO(halcanary): implement
+};
+
+struct SkPDFType3Font final : public SkPDFFont {
+ SkPDFType3Font(SkPDFFont::Info, const SkAdvancedTypefaceMetrics&);
+ virtual ~SkPDFType3Font() {}
+ void getFontSubset(SkPDFCanon*) override;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// File-Local Functions
+///////////////////////////////////////////////////////////////////////////////
+
+static SkAutoGlyphCache vector_cache(SkTypeface* face, SkScalar size = 0) {
+ SkPaint tmpPaint;
+ tmpPaint.setHinting(SkPaint::kNo_Hinting);
+ tmpPaint.setTypeface(sk_ref_sp(face));
+ if (0 == size) {
+ SkASSERT(face);
+ tmpPaint.setTextSize((SkScalar)face->getUnitsPerEm());
+ } else {
+ tmpPaint.setTextSize(size);
+ }
+ const SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
+ SkAutoGlyphCache glyphCache(tmpPaint, &props, nullptr);
+ SkASSERT(glyphCache.get());
+ return glyphCache;
+}
+
+// scale from em-units to base-1000, returning as a SkScalar
+SkScalar from_font_units(SkScalar scaled, uint16_t emSize) {
+ if (emSize == 1000) {
+ return scaled;
+ } else {
+ return SkScalarMulDiv(scaled, 1000, emSize);
+ }
+}
+
+SkScalar scaleFromFontUnits(int16_t val, uint16_t emSize) {
+ return from_font_units(SkIntToScalar(val), emSize);
+}
+
+
+void setGlyphWidthAndBoundingBox(SkScalar width, SkIRect box,
+ SkDynamicMemoryWStream* content) {
+ // Specify width and bounding box for the glyph.
+ SkPDFUtils::AppendScalar(width, content);
+ content->writeText(" 0 ");
+ content->writeDecAsText(box.fLeft);
+ content->writeText(" ");
+ content->writeDecAsText(box.fTop);
+ content->writeText(" ");
+ content->writeDecAsText(box.fRight);
+ content->writeText(" ");
+ content->writeDecAsText(box.fBottom);
+ content->writeText(" d1\n");
+}
+
+static sk_sp<SkPDFArray> makeFontBBox(SkIRect glyphBBox, uint16_t emSize) {
+ auto bbox = sk_make_sp<SkPDFArray>();
+ bbox->reserve(4);
+ bbox->appendScalar(scaleFromFontUnits(glyphBBox.fLeft, emSize));
+ bbox->appendScalar(scaleFromFontUnits(glyphBBox.fBottom, emSize));
+ bbox->appendScalar(scaleFromFontUnits(glyphBBox.fRight, emSize));
+ bbox->appendScalar(scaleFromFontUnits(glyphBBox.fTop, emSize));
+ return bbox;
+}
+} // namespace
+
+///////////////////////////////////////////////////////////////////////////////
+// class SkPDFFont
+///////////////////////////////////////////////////////////////////////////////
+
+/* Font subset design: It would be nice to be able to subset fonts
+ * (particularly type 3 fonts), but it's a lot of work and not a priority.
+ *
+ * Resources are canonicalized and uniqueified by pointer so there has to be
+ * some additional state indicating which subset of the font is used. It
+ * must be maintained at the page granularity and then combined at the document
+ * granularity. a) change SkPDFFont to fill in its state on demand, kind of
+ * like SkPDFGraphicState. b) maintain a per font glyph usage class in each
+ * page/pdf device. c) in the document, retrieve the per font glyph usage
+ * from each page and combine it and ask for a resource with that subset.
+ */
+
+SkPDFFont::~SkPDFFont() {}
+
+static bool can_embed(const SkAdvancedTypefaceMetrics& metrics) {
+ return !SkToBool(metrics.fFlags & SkAdvancedTypefaceMetrics::kNotEmbeddable_FontFlag);
+}
+
+const SkAdvancedTypefaceMetrics* SkPDFFont::GetMetrics(SkTypeface* typeface,
+ SkPDFCanon* canon) {
+ SkASSERT(typeface);
+ SkFontID id = typeface->uniqueID();
+ if (SkAdvancedTypefaceMetrics** ptr = canon->fTypefaceMetrics.find(id)) {
+ return *ptr;
+ }
+ int count = typeface->countGlyphs();
+ if (count <= 0 || count > 1 + SK_MaxU16) {
+ // Cache nullptr to skip this check. Use SkSafeUnref().
+ canon->fTypefaceMetrics.set(id, nullptr);
+ return nullptr;
+ }
+ sk_sp<SkAdvancedTypefaceMetrics> metrics(
+ typeface->getAdvancedTypefaceMetrics(
+ SkTypeface::kGlyphNames_PerGlyphInfo | SkTypeface::kToUnicode_PerGlyphInfo,
+ nullptr, 0));
+ if (!metrics) {
+ metrics = sk_make_sp<SkAdvancedTypefaceMetrics>();
+ metrics->fLastGlyphID = SkToU16(count - 1);
+ }
+ SkASSERT(metrics->fLastGlyphID == SkToU16(count - 1));
+ return *canon->fTypefaceMetrics.set(id, metrics.release());
+}
+
+SkAdvancedTypefaceMetrics::FontType SkPDFFont::FontType(const SkAdvancedTypefaceMetrics& metrics) {
+ if (SkToBool(metrics.fFlags & SkAdvancedTypefaceMetrics::kMultiMaster_FontFlag) ||
+ SkToBool(metrics.fFlags & SkAdvancedTypefaceMetrics::kNotEmbeddable_FontFlag)) {
+ // force Type3 fallback.
+ return SkAdvancedTypefaceMetrics::kOther_Font;
+ }
+ return metrics.fType;
+}
+
+static SkGlyphID first_nonzero_glyph_for_single_byte_encoding(SkGlyphID gid) {
+ return gid != 0 ? gid - (gid - 1) % 255 : 1;
+}
+
+SkPDFFont* SkPDFFont::GetFontResource(SkPDFCanon* canon,
+ SkTypeface* face,
+ SkGlyphID glyphID) {
+ SkASSERT(canon);
+ SkASSERT(face); // All SkPDFDevice::internalDrawText ensures this.
+ const SkAdvancedTypefaceMetrics* fontMetrics = SkPDFFont::GetMetrics(face, canon);
+ SkASSERT(fontMetrics); // SkPDFDevice::internalDrawText ensures the typeface is good.
+ // GetMetrics only returns null to signify a bad typeface.
+ const SkAdvancedTypefaceMetrics& metrics = *fontMetrics;
+ SkAdvancedTypefaceMetrics::FontType type = SkPDFFont::FontType(metrics);
+ bool multibyte = SkPDFFont::IsMultiByte(type);
+ SkGlyphID subsetCode = multibyte ? 0 : first_nonzero_glyph_for_single_byte_encoding(glyphID);
+ uint64_t fontID = (SkTypeface::UniqueID(face) << 16) | subsetCode;
+
+ if (SkPDFFont** found = canon->fFontMap.find(fontID)) {
+ SkPDFFont* foundFont = *found;
+ SkASSERT(foundFont && multibyte == foundFont->multiByteGlyphs());
+ return SkRef(foundFont);
+ }
+
+ sk_sp<SkTypeface> typeface(sk_ref_sp(face));
+ SkASSERT(typeface);
+
+ SkGlyphID lastGlyph = metrics.fLastGlyphID;
+ SkASSERT(typeface->countGlyphs() == SkToInt(1 + metrics.fLastGlyphID));
+
+ // should be caught by SkPDFDevice::internalDrawText
+ SkASSERT(glyphID <= lastGlyph);
+
+ SkGlyphID firstNonZeroGlyph;
+ if (multibyte) {
+ firstNonZeroGlyph = 1;
+ } else {
+ firstNonZeroGlyph = subsetCode;
+ lastGlyph = SkToU16(SkTMin<int>((int)lastGlyph, 254 + (int)subsetCode));
+ }
+ SkPDFFont::Info info = {std::move(typeface), firstNonZeroGlyph, lastGlyph, type};
+ sk_sp<SkPDFFont> font;
+ switch (type) {
+ case SkAdvancedTypefaceMetrics::kType1CID_Font:
+ case SkAdvancedTypefaceMetrics::kTrueType_Font:
+ SkASSERT(multibyte);
+ font = sk_make_sp<SkPDFType0Font>(std::move(info), metrics);
+ break;
+ case SkAdvancedTypefaceMetrics::kType1_Font:
+ SkASSERT(!multibyte);
+ font = sk_make_sp<SkPDFType1Font>(std::move(info), metrics, canon);
+ break;
+ default:
+ SkASSERT(!multibyte);
+ // Type3 is our fallback font.
+ font = sk_make_sp<SkPDFType3Font>(std::move(info), metrics);
+ break;
+ }
+ canon->fFontMap.set(fontID, SkRef(font.get()));
+ return font.release(); // TODO(halcanary) return sk_sp<SkPDFFont>.
+}
+
+SkPDFFont::SkPDFFont(SkPDFFont::Info info)
+ : SkPDFDict("Font")
+ , fTypeface(std::move(info.fTypeface))
+ , fGlyphUsage(info.fLastGlyphID + 1) // TODO(halcanary): Adjust mapping?
+ , fFirstGlyphID(info.fFirstGlyphID)
+ , fLastGlyphID(info.fLastGlyphID)
+ , fFontType(info.fFontType) {
+ SkASSERT(fTypeface);
+}
+
+static void add_common_font_descriptor_entries(SkPDFDict* descriptor,
+ const SkAdvancedTypefaceMetrics& metrics,
+ int16_t defaultWidth) {
+ const uint16_t emSize = metrics.fEmSize;
+ descriptor->insertName("FontName", metrics.fFontName);
+ descriptor->insertInt("Flags", (size_t)(metrics.fStyle | kPdfSymbolic));
+ descriptor->insertScalar("Ascent",
+ scaleFromFontUnits(metrics.fAscent, emSize));
+ descriptor->insertScalar("Descent",
+ scaleFromFontUnits(metrics.fDescent, emSize));
+ descriptor->insertScalar("StemV",
+ scaleFromFontUnits(metrics.fStemV, emSize));
+ descriptor->insertScalar("CapHeight",
+ scaleFromFontUnits(metrics.fCapHeight, emSize));
+ descriptor->insertInt("ItalicAngle", metrics.fItalicAngle);
+ descriptor->insertObject(
+ "FontBBox", makeFontBBox(metrics.fBBox, metrics.fEmSize));
+ if (defaultWidth > 0) {
+ descriptor->insertScalar("MissingWidth",
+ scaleFromFontUnits(defaultWidth, emSize));
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// class SkPDFType0Font
+///////////////////////////////////////////////////////////////////////////////
+
+SkPDFType0Font::SkPDFType0Font(
+ SkPDFFont::Info info,
+ const SkAdvancedTypefaceMetrics& metrics)
+ : SkPDFFont(std::move(info)) {
+ SkDEBUGCODE(fPopulated = false);
+}
+
+SkPDFType0Font::~SkPDFType0Font() {}
+
+
+#ifdef SK_DEBUG
+void SkPDFType0Font::emitObject(SkWStream* stream,
+ const SkPDFObjNumMap& objNumMap) const {
+ SkASSERT(fPopulated);
+ return INHERITED::emitObject(stream, objNumMap);
+}
+#endif
+
+#ifdef SK_PDF_USE_SFNTLY
+// if possible, make no copy.
+static sk_sp<SkData> stream_to_data(std::unique_ptr<SkStreamAsset> stream) {
+ SkASSERT(stream);
+ (void)stream->rewind();
+ SkASSERT(stream->hasLength());
+ size_t size = stream->getLength();
+ if (const void* base = stream->getMemoryBase()) {
+ SkData::ReleaseProc proc =
+ [](const void*, void* ctx) { delete (SkStreamAsset*)ctx; };
+ return SkData::MakeWithProc(base, size, proc, stream.release());
+ }
+ return SkData::MakeFromStream(stream.get(), size);
+}
+
+static sk_sp<SkPDFStream> get_subset_font_stream(
+ std::unique_ptr<SkStreamAsset> fontAsset,
+ const SkBitSet& glyphUsage,
+ const char* fontName,
+ int ttcIndex) {
+ // Generate glyph id array in format needed by sfntly.
+ // TODO(halcanary): sfntly should take a more compact format.
+ SkTDArray<unsigned> subset;
+ if (!glyphUsage.has(0)) {
+ subset.push(0); // Always include glyph 0.
+ }
+ glyphUsage.exportTo(&subset);
+
+ unsigned char* subsetFont{nullptr};
+ sk_sp<SkData> fontData(stream_to_data(std::move(fontAsset)));
+#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) || defined(GOOGLE3)
+ // TODO(halcanary): update Android Framework to newest version of Sfntly.
+ (void)ttcIndex;
+ int subsetFontSize = SfntlyWrapper::SubsetFont(fontName,
+ fontData->bytes(),
+ fontData->size(),
+ subset.begin(),
+ subset.count(),
+ &subsetFont);
+#else
+ (void)fontName;
+ int subsetFontSize = SfntlyWrapper::SubsetFont(ttcIndex,
+ fontData->bytes(),
+ fontData->size(),
+ subset.begin(),
+ subset.count(),
+ &subsetFont);
+#endif
+ fontData.reset();
+ subset.reset();
+ SkASSERT(subsetFontSize > 0 || subsetFont == nullptr);
+ if (subsetFontSize < 1) {
+ return nullptr;
+ }
+ SkASSERT(subsetFont != nullptr);
+ auto subsetStream = sk_make_sp<SkPDFStream>(
+ SkData::MakeWithProc(
+ subsetFont, subsetFontSize,
+ [](const void* p, void*) { delete[] (unsigned char*)p; },
+ nullptr));
+ subsetStream->dict()->insertInt("Length1", subsetFontSize);
+ return subsetStream;
+}
+#endif // SK_PDF_USE_SFNTLY
+
+void SkPDFType0Font::getFontSubset(SkPDFCanon* canon) {
+ const SkAdvancedTypefaceMetrics* metricsPtr =
+ SkPDFFont::GetMetrics(this->typeface(), canon);
+ SkASSERT(metricsPtr);
+ if (!metricsPtr) { return; }
+ const SkAdvancedTypefaceMetrics& metrics = *metricsPtr;
+ SkASSERT(can_embed(metrics));
+ SkAdvancedTypefaceMetrics::FontType type = this->getType();
+ SkTypeface* face = this->typeface();
+ SkASSERT(face);
+
+ auto descriptor = sk_make_sp<SkPDFDict>("FontDescriptor");
+ add_common_font_descriptor_entries(descriptor.get(), metrics, 0);
+
+ int ttcIndex;
+ std::unique_ptr<SkStreamAsset> fontAsset(face->openStream(&ttcIndex));
+ size_t fontSize = fontAsset ? fontAsset->getLength() : 0;
+ if (0 == fontSize) {
+ SkDebugf("Error: (SkTypeface)(%p)::openStream() returned "
+ "empty stream (%p) when identified as kType1CID_Font "
+ "or kTrueType_Font.\n", face, fontAsset.get());
+ } else {
+ switch (type) {
+ case SkAdvancedTypefaceMetrics::kTrueType_Font: {
+ #ifdef SK_PDF_USE_SFNTLY
+ if (!SkToBool(metrics.fFlags &
+ SkAdvancedTypefaceMetrics::kNotSubsettable_FontFlag)) {
+ sk_sp<SkPDFStream> subsetStream = get_subset_font_stream(
+ std::move(fontAsset), this->glyphUsage(),
+ metrics.fFontName.c_str(), ttcIndex);
+ if (subsetStream) {
+ descriptor->insertObjRef("FontFile2", std::move(subsetStream));
+ break;
+ }
+ // If subsetting fails, fall back to original font data.
+ fontAsset.reset(face->openStream(&ttcIndex));
+ SkASSERT(fontAsset);
+ SkASSERT(fontAsset->getLength() == fontSize);
+ if (!fontAsset || fontAsset->getLength() == 0) { break; }
+ }
+ #endif // SK_PDF_USE_SFNTLY
+ auto fontStream = sk_make_sp<SkPDFSharedStream>(std::move(fontAsset));
+ fontStream->dict()->insertInt("Length1", fontSize);
+ descriptor->insertObjRef("FontFile2", std::move(fontStream));
+ break;
+ }
+ case SkAdvancedTypefaceMetrics::kType1CID_Font: {
+ auto fontStream = sk_make_sp<SkPDFSharedStream>(std::move(fontAsset));
+ fontStream->dict()->insertName("Subtype", "CIDFontType0C");
+ descriptor->insertObjRef("FontFile3", std::move(fontStream));
+ break;
+ }
+ default:
+ SkASSERT(false);
+ }
+ }
+
+ auto newCIDFont = sk_make_sp<SkPDFDict>("Font");
+ newCIDFont->insertObjRef("FontDescriptor", std::move(descriptor));
+ newCIDFont->insertName("BaseFont", metrics.fFontName);
+
+ switch (type) {
+ case SkAdvancedTypefaceMetrics::kType1CID_Font:
+ newCIDFont->insertName("Subtype", "CIDFontType0");
+ break;
+ case SkAdvancedTypefaceMetrics::kTrueType_Font:
+ newCIDFont->insertName("Subtype", "CIDFontType2");
+ newCIDFont->insertName("CIDToGIDMap", "Identity");
+ break;
+ default:
+ SkASSERT(false);
+ }
+ auto sysInfo = sk_make_sp<SkPDFDict>();
+ sysInfo->insertString("Registry", "Adobe");
+ sysInfo->insertString("Ordering", "Identity");
+ sysInfo->insertInt("Supplement", 0);
+ newCIDFont->insertObject("CIDSystemInfo", std::move(sysInfo));
+
+ uint16_t emSize = metrics.fEmSize;
+ int16_t defaultWidth = 0;
+ {
+ SkAutoGlyphCache glyphCache = vector_cache(face);
+ sk_sp<SkPDFArray> widths = SkPDFMakeCIDGlyphWidthsArray(
+ glyphCache.get(), &this->glyphUsage(), emSize, &defaultWidth);
+ if (widths && widths->size() > 0) {
+ newCIDFont->insertObject("W", std::move(widths));
+ }
+ newCIDFont->insertScalar(
+ "DW", scaleFromFontUnits(defaultWidth, emSize));
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+
+ this->insertName("Subtype", "Type0");
+ this->insertName("BaseFont", metrics.fFontName);
+ this->insertName("Encoding", "Identity-H");
+ auto descendantFonts = sk_make_sp<SkPDFArray>();
+ descendantFonts->appendObjRef(std::move(newCIDFont));
+ this->insertObject("DescendantFonts", std::move(descendantFonts));
+
+ if (metrics.fGlyphToUnicode.count() > 0) {
+ this->insertObjRef("ToUnicode",
+ SkPDFMakeToUnicodeCmap(metrics.fGlyphToUnicode,
+ &this->glyphUsage(),
+ multiByteGlyphs(),
+ firstGlyphID(),
+ lastGlyphID()));
+ }
+ SkDEBUGCODE(fPopulated = true);
+ return;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// class SkPDFType1Font
+///////////////////////////////////////////////////////////////////////////////
+
+static sk_sp<SkPDFDict> make_type1_font_descriptor(
+ SkTypeface* typeface,
+ const SkAdvancedTypefaceMetrics& info) {
+ auto descriptor = sk_make_sp<SkPDFDict>("FontDescriptor");
+ add_common_font_descriptor_entries(descriptor.get(), info, 0);
+ if (!can_embed(info)) {
+ return descriptor;
+ }
+ int ttcIndex;
+ size_t header SK_INIT_TO_AVOID_WARNING;
+ size_t data SK_INIT_TO_AVOID_WARNING;
+ size_t trailer SK_INIT_TO_AVOID_WARNING;
+ std::unique_ptr<SkStreamAsset> rawFontData(typeface->openStream(&ttcIndex));
+ sk_sp<SkData> fontData = SkPDFConvertType1FontStream(std::move(rawFontData),
+ &header, &data, &trailer);
+ if (fontData) {
+ auto fontStream = sk_make_sp<SkPDFStream>(std::move(fontData));
+ fontStream->dict()->insertInt("Length1", header);
+ fontStream->dict()->insertInt("Length2", data);
+ fontStream->dict()->insertInt("Length3", trailer);
+ descriptor->insertObjRef("FontFile", std::move(fontStream));
+ }
+ return descriptor;
+}
+
+static void populate_type_1_font(SkPDFDict* font,
+ const SkAdvancedTypefaceMetrics& info,
+ SkTypeface* typeface,
+ SkGlyphID firstGlyphID,
+ SkGlyphID lastGlyphID) {
+ font->insertName("Subtype", "Type1");
+ font->insertName("BaseFont", info.fFontName);
+
+ // glyphCount not including glyph 0
+ unsigned glyphCount = 1 + lastGlyphID - firstGlyphID;
+ SkASSERT(glyphCount > 0 && glyphCount <= 255);
+ font->insertInt("FirstChar", (size_t)0);
+ font->insertInt("LastChar", (size_t)glyphCount);
+ {
+ SkAutoGlyphCache glyphCache = vector_cache(typeface);
+ auto widths = sk_make_sp<SkPDFArray>();
+ SkScalar advance = glyphCache->getGlyphIDAdvance(0).fAdvanceX;
+ const uint16_t emSize = info.fEmSize;
+ widths->appendScalar(from_font_units(advance, emSize));
+ for (unsigned gID = firstGlyphID; gID <= lastGlyphID; gID++) {
+ advance = glyphCache->getGlyphIDAdvance(gID).fAdvanceX;
+ widths->appendScalar(from_font_units(advance, emSize));
+ }
+ font->insertObject("Widths", std::move(widths));
+ }
+ auto encDiffs = sk_make_sp<SkPDFArray>();
+ encDiffs->reserve(lastGlyphID - firstGlyphID + 3);
+ encDiffs->appendInt(0);
+ const SkTArray<SkString>& glyphNames = info.fGlyphNames;
+ SkASSERT(glyphNames.count() > lastGlyphID);
+ encDiffs->appendName(glyphNames[0].c_str());
+ const SkString unknown("UNKNOWN");
+ for (int gID = firstGlyphID; gID <= lastGlyphID; gID++) {
+ const bool valid = gID < glyphNames.count() && !glyphNames[gID].isEmpty();
+ const SkString& name = valid ? glyphNames[gID] : unknown;
+ encDiffs->appendName(name);
+ }
+
+ auto encoding = sk_make_sp<SkPDFDict>("Encoding");
+ encoding->insertObject("Differences", std::move(encDiffs));
+ font->insertObject("Encoding", std::move(encoding));
+}
+
+SkPDFType1Font::SkPDFType1Font(SkPDFFont::Info info,
+ const SkAdvancedTypefaceMetrics& metrics,
+ SkPDFCanon* canon)
+ : SkPDFFont(std::move(info))
+{
+ SkFontID fontID = this->typeface()->uniqueID();
+ sk_sp<SkPDFDict> fontDescriptor;
+ if (SkPDFDict** ptr = canon->fFontDescriptors.find(fontID)) {
+ fontDescriptor = sk_ref_sp(*ptr);
+ } else {
+ fontDescriptor = make_type1_font_descriptor(this->typeface(), metrics);
+ canon->fFontDescriptors.set(fontID, SkRef(fontDescriptor.get()));
+ }
+ this->insertObjRef("FontDescriptor", std::move(fontDescriptor));
+ // TODO(halcanary): subset this (advances and names).
+ populate_type_1_font(this, metrics, this->typeface(),
+ this->firstGlyphID(), this->lastGlyphID());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// class SkPDFType3Font
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+// returns [0, first, first+1, ... last-1, last]
+struct SingleByteGlyphIdIterator {
+ SingleByteGlyphIdIterator(SkGlyphID first, SkGlyphID last)
+ : fFirst(first), fLast(last) {
+ SkASSERT(fFirst > 0);
+ SkASSERT(fLast >= first);
+ }
+ struct Iter {
+ void operator++() {
+ fCurrent = (0 == fCurrent) ? fFirst : fCurrent + 1;
+ }
+ // This is an input_iterator
+ SkGlyphID operator*() const { return (SkGlyphID)fCurrent; }
+ bool operator!=(const Iter& rhs) const {
+ return fCurrent != rhs.fCurrent;
+ }
+ Iter(SkGlyphID f, int c) : fFirst(f), fCurrent(c) {}
+ private:
+ const SkGlyphID fFirst;
+ int fCurrent; // must be int to make fLast+1 to fit
+ };
+ Iter begin() const { return Iter(fFirst, 0); }
+ Iter end() const { return Iter(fFirst, (int)fLast + 1); }
+private:
+ const SkGlyphID fFirst;
+ const SkGlyphID fLast;
+};
+}
+
+static void add_type3_font_info(SkPDFCanon* canon,
+ SkPDFDict* font,
+ SkTypeface* typeface,
+ SkScalar emSize,
+ const SkBitSet& subset,
+ SkGlyphID firstGlyphID,
+ SkGlyphID lastGlyphID) {
+ const SkAdvancedTypefaceMetrics* metrics = SkPDFFont::GetMetrics(typeface, canon);
+ SkASSERT(lastGlyphID >= firstGlyphID);
+ // Remove unused glyphs at the end of the range.
+ // Keep the lastGlyphID >= firstGlyphID invariant true.
+ while (lastGlyphID > firstGlyphID && !subset.has(lastGlyphID)) {
+ --lastGlyphID;
+ }
+ SkASSERT(emSize > 0.0f);
+ SkAutoGlyphCache cache = vector_cache(typeface, emSize);
+ font->insertName("Subtype", "Type3");
+ // Flip about the x-axis and scale by 1/emSize.
+ SkMatrix fontMatrix;
+ fontMatrix.setScale(SkScalarInvert(emSize), -SkScalarInvert(emSize));
+ font->insertObject("FontMatrix", SkPDFUtils::MatrixToArray(fontMatrix));
+
+ auto charProcs = sk_make_sp<SkPDFDict>();
+ auto encoding = sk_make_sp<SkPDFDict>("Encoding");
+
+ auto encDiffs = sk_make_sp<SkPDFArray>();
+ // length(firstGlyphID .. lastGlyphID) == lastGlyphID - firstGlyphID + 1
+ // plus 1 for glyph 0;
+ SkASSERT(firstGlyphID > 0);
+ SkASSERT(lastGlyphID >= firstGlyphID);
+ int glyphCount = lastGlyphID - firstGlyphID + 2;
+ // one other entry for the index of first glyph.
+ encDiffs->reserve(glyphCount + 1);
+ encDiffs->appendInt(0); // index of first glyph
+
+ auto widthArray = sk_make_sp<SkPDFArray>();
+ widthArray->reserve(glyphCount);
+
+ SkIRect bbox = SkIRect::MakeEmpty();
+
+ sk_sp<SkPDFStream> emptyStream;
+ for (SkGlyphID gID : SingleByteGlyphIdIterator(firstGlyphID, lastGlyphID)) {
+ bool skipGlyph = gID != 0 && !subset.has(gID);
+ SkString characterName;
+ SkScalar advance = 0.0f;
+ SkIRect glyphBBox;
+ if (skipGlyph) {
+ characterName.set("g0");
+ } else {
+ characterName.printf("g%X", gID);
+ const SkGlyph& glyph = cache->getGlyphIDMetrics(gID);
+ advance = SkFloatToScalar(glyph.fAdvanceX);
+ glyphBBox = SkIRect::MakeXYWH(glyph.fLeft, glyph.fTop,
+ glyph.fWidth, glyph.fHeight);
+ bbox.join(glyphBBox);
+ const SkPath* path = cache->findPath(glyph);
+ if (path && !path->isEmpty()) {
+ SkDynamicMemoryWStream content;
+ setGlyphWidthAndBoundingBox(SkFloatToScalar(glyph.fAdvanceX), glyphBBox,
+ &content);
+ SkPDFUtils::EmitPath(*path, SkPaint::kFill_Style, &content);
+ SkPDFUtils::PaintPath(SkPaint::kFill_Style, path->getFillType(),
+ &content);
+ charProcs->insertObjRef(
+ characterName, sk_make_sp<SkPDFStream>(
+ std::unique_ptr<SkStreamAsset>(content.detachAsStream())));
+ } else {
+ if (!emptyStream) {
+ emptyStream = sk_make_sp<SkPDFStream>(
+ std::unique_ptr<SkStreamAsset>(
+ new SkMemoryStream((size_t)0)));
+ }
+ charProcs->insertObjRef(characterName, emptyStream);
+ }
+ }
+ encDiffs->appendName(characterName.c_str());
+ widthArray->appendScalar(advance);
+ }
+
+ encoding->insertObject("Differences", std::move(encDiffs));
+ font->insertInt("FirstChar", 0);
+ font->insertInt("LastChar", lastGlyphID - firstGlyphID + 1);
+ /* FontBBox: "A rectangle expressed in the glyph coordinate
+ system, specifying the font bounding box. This is the smallest
+ rectangle enclosing the shape that would result if all of the
+ glyphs of the font were placed with their origins coincident and
+ then filled." */
+ auto fontBBox = sk_make_sp<SkPDFArray>();
+ fontBBox->reserve(4);
+ fontBBox->appendInt(bbox.left());
+ fontBBox->appendInt(bbox.bottom());
+ fontBBox->appendInt(bbox.right());
+ fontBBox->appendInt(bbox.top());
+ font->insertObject("FontBBox", std::move(fontBBox));
+ font->insertName("CIDToGIDMap", "Identity");
+ if (metrics && metrics->fGlyphToUnicode.count() > 0) {
+ font->insertObjRef("ToUnicode",
+ SkPDFMakeToUnicodeCmap(metrics->fGlyphToUnicode,
+ &subset,
+ false,
+ firstGlyphID,
+ lastGlyphID));
+ }
+ auto descriptor = sk_make_sp<SkPDFDict>("FontDescriptor");
+ int32_t fontDescriptorFlags = kPdfSymbolic;
+ if (metrics) {
+ // Type3 FontDescriptor does not require all the same fields.
+ descriptor->insertName("FontName", metrics->fFontName);
+ descriptor->insertInt("ItalicAngle", metrics->fItalicAngle);
+ fontDescriptorFlags |= (int32_t)metrics->fStyle;
+ }
+ descriptor->insertInt("Flags", fontDescriptorFlags);
+ font->insertObjRef("FontDescriptor", std::move(descriptor));
+ font->insertObject("Widths", std::move(widthArray));
+ font->insertObject("Encoding", std::move(encoding));
+ font->insertObject("CharProcs", std::move(charProcs));
+}
+
+SkPDFType3Font::SkPDFType3Font(SkPDFFont::Info info,
+ const SkAdvancedTypefaceMetrics& metrics)
+ : SkPDFFont(std::move(info)) {}
+
+void SkPDFType3Font::getFontSubset(SkPDFCanon* canon) {
+ const SkAdvancedTypefaceMetrics* info =
+ SkPDFFont::GetMetrics(this->typeface(), canon);
+ SkASSERT(info);
+ uint16_t emSize = info->fEmSize > 0 ? info->fEmSize : 1000;
+ add_type3_font_info(canon, this, this->typeface(), (SkScalar)emSize,
+ this->glyphUsage(),
+ this->firstGlyphID(), this->lastGlyphID());
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool SkPDFFont::CanEmbedTypeface(SkTypeface* typeface, SkPDFCanon* canon) {
+ const SkAdvancedTypefaceMetrics* metrics = SkPDFFont::GetMetrics(typeface, canon);
+ return metrics && can_embed(*metrics);
+}
+
+void SkPDFFont::drop() {
+ fTypeface = nullptr;
+ fGlyphUsage.~SkBitSet();
+ new (&fGlyphUsage) SkBitSet(0);
+ this->SkPDFDict::drop();
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFFont.h b/gfx/skia/skia/src/pdf/SkPDFFont.h
new file mode 100644
index 000000000..a14ae6357
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFFont.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPDFFont_DEFINED
+#define SkPDFFont_DEFINED
+
+#include "SkAdvancedTypefaceMetrics.h"
+#include "SkBitSet.h"
+#include "SkPDFTypes.h"
+#include "SkTDArray.h"
+#include "SkTypeface.h"
+
+class SkPDFCanon;
+class SkPDFFont;
+
+/** \class SkPDFFont
+ A PDF Object class representing a font. The font may have resources
+ attached to it in order to embed the font. SkPDFFonts are canonicalized
+ so that resource deduplication will only include one copy of a font.
+ This class uses the same pattern as SkPDFGraphicState, a static weak
+ reference to each instantiated class.
+*/
+class SkPDFFont : public SkPDFDict {
+
+public:
+ virtual ~SkPDFFont();
+
+ /** Returns the typeface represented by this class. Returns nullptr for the
+ * default typeface.
+ */
+ SkTypeface* typeface() const { return fTypeface.get(); }
+
+ /** Returns the font type represented in this font. For Type0 fonts,
+ * returns the type of the decendant font.
+ */
+ SkAdvancedTypefaceMetrics::FontType getType() const { return fFontType; }
+
+ static SkAdvancedTypefaceMetrics::FontType FontType(const SkAdvancedTypefaceMetrics&);
+
+ static bool IsMultiByte(SkAdvancedTypefaceMetrics::FontType type) {
+ return type == SkAdvancedTypefaceMetrics::kType1CID_Font ||
+ type == SkAdvancedTypefaceMetrics::kTrueType_Font;
+ }
+
+ /** Returns true if this font encoding supports glyph IDs above 255.
+ */
+ bool multiByteGlyphs() const { return SkPDFFont::IsMultiByte(this->getType()); }
+
+ /** Return true if this font has an encoding for the passed glyph id.
+ */
+ bool hasGlyph(SkGlyphID gid) {
+ return (gid >= fFirstGlyphID && gid <= fLastGlyphID) || gid == 0;
+ }
+
+ /** Convert the input glyph ID into the font encoding. */
+ SkGlyphID glyphToPDFFontEncoding(SkGlyphID gid) const {
+ if (this->multiByteGlyphs() || gid == 0) {
+ return gid;
+ }
+ SkASSERT(gid >= fFirstGlyphID && gid <= fLastGlyphID);
+ SkASSERT(fFirstGlyphID > 0);
+ return gid - fFirstGlyphID + 1;
+ }
+
+ void noteGlyphUsage(SkGlyphID glyph) {
+ SkASSERT(this->hasGlyph(glyph));
+ fGlyphUsage.set(glyph);
+ }
+
+ /** Get the font resource for the passed typeface and glyphID. The
+ * reference count of the object is incremented and it is the caller's
+ * responsibility to unreference it when done. This is needed to
+ * accommodate the weak reference pattern used when the returned object
+ * is new and has no other references.
+ * @param typeface The typeface to find, not nullptr.
+ * @param glyphID Specify which section of a large font is of interest.
+ */
+ static SkPDFFont* GetFontResource(SkPDFCanon* canon,
+ SkTypeface* typeface,
+ SkGlyphID glyphID);
+
+ /** Uses (kGlyphNames_PerGlyphInfo | kToUnicode_PerGlyphInfo) to get
+ * SkAdvancedTypefaceMetrics, and caches the result.
+ * @param typeface can not be nullptr.
+ * @return nullptr only when typeface is bad.
+ */
+ static const SkAdvancedTypefaceMetrics* GetMetrics(SkTypeface* typeface,
+ SkPDFCanon* canon);
+
+ /** Subset the font based on current usage.
+ * Must be called before emitObject().
+ */
+ virtual void getFontSubset(SkPDFCanon*) = 0;
+
+ /**
+ * Return false iff the typeface has its NotEmbeddable flag set.
+ * typeface is not nullptr
+ */
+ static bool CanEmbedTypeface(SkTypeface*, SkPDFCanon*);
+
+protected:
+ // Common constructor to handle common members.
+ struct Info {
+ sk_sp<SkTypeface> fTypeface;
+ SkGlyphID fFirstGlyphID;
+ SkGlyphID fLastGlyphID;
+ SkAdvancedTypefaceMetrics::FontType fFontType;
+ };
+ SkPDFFont(Info);
+
+ SkGlyphID firstGlyphID() const { return fFirstGlyphID; }
+ SkGlyphID lastGlyphID() const { return fLastGlyphID; }
+ const SkBitSet& glyphUsage() const { return fGlyphUsage; }
+ sk_sp<SkTypeface> refTypeface() const { return fTypeface; }
+
+ void drop() override;
+
+private:
+ sk_sp<SkTypeface> fTypeface;
+ SkBitSet fGlyphUsage;
+
+ // The glyph IDs accessible with this font. For Type1 (non CID) fonts,
+ // this will be a subset if the font has more than 255 glyphs.
+ const SkGlyphID fFirstGlyphID;
+ const SkGlyphID fLastGlyphID;
+ const SkAdvancedTypefaceMetrics::FontType fFontType;
+
+ typedef SkPDFDict INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFFormXObject.cpp b/gfx/skia/skia/src/pdf/SkPDFFormXObject.cpp
new file mode 100644
index 000000000..d0c7fe76c
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFFormXObject.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkPDFFormXObject.h"
+#include "SkPDFUtils.h"
+
+sk_sp<SkPDFObject> SkPDFMakeFormXObject(std::unique_ptr<SkStreamAsset> content,
+ sk_sp<SkPDFArray> mediaBox,
+ sk_sp<SkPDFDict> resourceDict,
+ const SkMatrix& inverseTransform,
+ const char* colorSpace) {
+ auto form = sk_make_sp<SkPDFStream>(std::move(content));
+ form->dict()->insertName("Type", "XObject");
+ form->dict()->insertName("Subtype", "Form");
+ if (!inverseTransform.isIdentity()) {
+ sk_sp<SkPDFObject> mat(SkPDFUtils::MatrixToArray(inverseTransform));
+ form->dict()->insertObject("Matrix", std::move(mat));
+ }
+ form->dict()->insertObject("Resources", std::move(resourceDict));
+ form->dict()->insertObject("BBox", std::move(mediaBox));
+
+ // Right now FormXObject is only used for saveLayer, which implies
+ // isolated blending. Do this conditionally if that changes.
+ // TODO(halcanary): Is this comment obsolete, since we use it for
+ // alpha masks?
+ auto group = sk_make_sp<SkPDFDict>("Group");
+ group->insertName("S", "Transparency");
+ if (colorSpace != nullptr) {
+ group->insertName("CS", colorSpace);
+ }
+ group->insertBool("I", true); // Isolated.
+ form->dict()->insertObject("Group", std::move(group));
+ return form;
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFFormXObject.h b/gfx/skia/skia/src/pdf/SkPDFFormXObject.h
new file mode 100644
index 000000000..e62b69cd4
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFFormXObject.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPDFFormXObject_DEFINED
+#define SkPDFFormXObject_DEFINED
+
+#include "SkPDFDevice.h"
+#include "SkPDFTypes.h"
+
+/** A form XObject is a self contained description of a graphics
+ object. A form XObject is a page object with slightly different
+ syntax, that can be drawn into a page content stream, just like a
+ bitmap XObject can be drawn into a page content stream.
+*/
+sk_sp<SkPDFObject> SkPDFMakeFormXObject(std::unique_ptr<SkStreamAsset> content,
+ sk_sp<SkPDFArray> mediaBox,
+ sk_sp<SkPDFDict> resourceDict,
+ const SkMatrix& inverseTransform,
+ const char* colorSpace);
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFGraphicState.cpp b/gfx/skia/skia/src/pdf/SkPDFGraphicState.cpp
new file mode 100644
index 000000000..d60526c11
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFGraphicState.cpp
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkData.h"
+#include "SkPaint.h"
+#include "SkPDFCanon.h"
+#include "SkPDFFormXObject.h"
+#include "SkPDFGraphicState.h"
+#include "SkPDFUtils.h"
+
+static const char* as_blend_mode(SkBlendMode mode) {
+ switch (mode) {
+ case SkBlendMode::kSrcOver:
+ return "Normal";
+ case SkBlendMode::kMultiply:
+ return "Multiply";
+ case SkBlendMode::kScreen:
+ return "Screen";
+ case SkBlendMode::kOverlay:
+ return "Overlay";
+ case SkBlendMode::kDarken:
+ return "Darken";
+ case SkBlendMode::kLighten:
+ return "Lighten";
+ case SkBlendMode::kColorDodge:
+ return "ColorDodge";
+ case SkBlendMode::kColorBurn:
+ return "ColorBurn";
+ case SkBlendMode::kHardLight:
+ return "HardLight";
+ case SkBlendMode::kSoftLight:
+ return "SoftLight";
+ case SkBlendMode::kDifference:
+ return "Difference";
+ case SkBlendMode::kExclusion:
+ return "Exclusion";
+ case SkBlendMode::kHue:
+ return "Hue";
+ case SkBlendMode::kSaturation:
+ return "Saturation";
+ case SkBlendMode::kColor:
+ return "Color";
+ case SkBlendMode::kLuminosity:
+ return "Luminosity";
+
+ // These are handled in SkPDFDevice::setUpContentEntry.
+ case SkBlendMode::kClear:
+ case SkBlendMode::kSrc:
+ case SkBlendMode::kDst:
+ case SkBlendMode::kDstOver:
+ case SkBlendMode::kSrcIn:
+ case SkBlendMode::kDstIn:
+ case SkBlendMode::kSrcOut:
+ case SkBlendMode::kDstOut:
+ case SkBlendMode::kSrcATop:
+ case SkBlendMode::kDstATop:
+ case SkBlendMode::kModulate:
+ return "Normal";
+
+ // TODO(vandebo): Figure out if we can support more of these modes.
+ case SkBlendMode::kXor:
+ case SkBlendMode::kPlus:
+ return nullptr;
+ }
+ return nullptr;
+}
+
+// If a SkXfermode is unsupported in PDF, this function returns
+// SrcOver, otherwise, it returns that Xfermode as a Mode.
+static SkBlendMode mode_for_pdf(SkBlendMode mode) {
+ switch (mode) {
+ case SkBlendMode::kSrcOver:
+ case SkBlendMode::kMultiply:
+ case SkBlendMode::kScreen:
+ case SkBlendMode::kOverlay:
+ case SkBlendMode::kDarken:
+ case SkBlendMode::kLighten:
+ case SkBlendMode::kColorDodge:
+ case SkBlendMode::kColorBurn:
+ case SkBlendMode::kHardLight:
+ case SkBlendMode::kSoftLight:
+ case SkBlendMode::kDifference:
+ case SkBlendMode::kExclusion:
+ case SkBlendMode::kHue:
+ case SkBlendMode::kSaturation:
+ case SkBlendMode::kColor:
+ case SkBlendMode::kLuminosity:
+ // Mode is suppported and handled by pdf graphics state.
+ return mode;
+ default:
+ return SkBlendMode::kSrcOver; // Default mode.
+ }
+}
+
+SkPDFGraphicState::SkPDFGraphicState(const SkPaint& p)
+ : fStrokeWidth(p.getStrokeWidth())
+ , fStrokeMiter(p.getStrokeMiter())
+ , fAlpha(p.getAlpha())
+ , fStrokeCap(SkToU8(p.getStrokeCap()))
+ , fStrokeJoin(SkToU8(p.getStrokeJoin()))
+ , fMode(SkToU8((unsigned)mode_for_pdf(p.getBlendMode()))) {}
+
+// static
+SkPDFGraphicState* SkPDFGraphicState::GetGraphicStateForPaint(
+ SkPDFCanon* canon, const SkPaint& paint) {
+ SkASSERT(canon);
+ SkPDFGraphicState key(paint);
+ if (const SkPDFGraphicState* canonGS = canon->findGraphicState(key)) {
+ // The returned SkPDFGraphicState must be made non-const,
+ // since the emitObject() interface is non-const. But We
+ // promise that there is no way to mutate this object from
+ // here on out.
+ return SkRef(const_cast<SkPDFGraphicState*>(canonGS));
+ }
+ SkPDFGraphicState* pdfGraphicState = new SkPDFGraphicState(paint);
+ canon->addGraphicState(pdfGraphicState);
+ return pdfGraphicState;
+}
+
+sk_sp<SkPDFStream> SkPDFGraphicState::MakeInvertFunction() {
+ // Acrobat crashes if we use a type 0 function, kpdf crashes if we use
+ // a type 2 function, so we use a type 4 function.
+ auto domainAndRange = sk_make_sp<SkPDFArray>();
+ domainAndRange->reserve(2);
+ domainAndRange->appendInt(0);
+ domainAndRange->appendInt(1);
+
+ static const char psInvert[] = "{1 exch sub}";
+ // Do not copy the trailing '\0' into the SkData.
+ auto invertFunction = sk_make_sp<SkPDFStream>(
+ SkData::MakeWithoutCopy(psInvert, strlen(psInvert)));
+ invertFunction->dict()->insertInt("FunctionType", 4);
+ invertFunction->dict()->insertObject("Domain", domainAndRange);
+ invertFunction->dict()->insertObject("Range", std::move(domainAndRange));
+ return invertFunction;
+}
+
+sk_sp<SkPDFDict> SkPDFGraphicState::GetSMaskGraphicState(
+ sk_sp<SkPDFObject> sMask,
+ bool invert,
+ SkPDFSMaskMode sMaskMode,
+ SkPDFCanon* canon) {
+ // The practical chances of using the same mask more than once are unlikely
+ // enough that it's not worth canonicalizing.
+ auto sMaskDict = sk_make_sp<SkPDFDict>("Mask");
+ if (sMaskMode == kAlpha_SMaskMode) {
+ sMaskDict->insertName("S", "Alpha");
+ } else if (sMaskMode == kLuminosity_SMaskMode) {
+ sMaskDict->insertName("S", "Luminosity");
+ }
+ sMaskDict->insertObjRef("G", std::move(sMask));
+ if (invert) {
+ // Instead of calling SkPDFGraphicState::MakeInvertFunction,
+ // let the canon deduplicate this object.
+ sMaskDict->insertObjRef("TR", canon->makeInvertFunction());
+ }
+
+ auto result = sk_make_sp<SkPDFDict>("ExtGState");
+ result->insertObject("SMask", std::move(sMaskDict));
+ return result;
+}
+
+sk_sp<SkPDFDict> SkPDFGraphicState::MakeNoSmaskGraphicState() {
+ auto noSMaskGS = sk_make_sp<SkPDFDict>("ExtGState");
+ noSMaskGS->insertName("SMask", "None");
+ return noSMaskGS;
+}
+
+void SkPDFGraphicState::emitObject(
+ SkWStream* stream,
+ const SkPDFObjNumMap& objNumMap) const {
+ auto dict = sk_make_sp<SkPDFDict>("ExtGState");
+ dict->insertName("Type", "ExtGState");
+
+ SkScalar alpha = SkIntToScalar(fAlpha) / 0xFF;
+ dict->insertScalar("CA", alpha);
+ dict->insertScalar("ca", alpha);
+
+ SkPaint::Cap strokeCap = (SkPaint::Cap)fStrokeCap;
+ SkPaint::Join strokeJoin = (SkPaint::Join)fStrokeJoin;
+
+ static_assert(SkPaint::kButt_Cap == 0, "paint_cap_mismatch");
+ static_assert(SkPaint::kRound_Cap == 1, "paint_cap_mismatch");
+ static_assert(SkPaint::kSquare_Cap == 2, "paint_cap_mismatch");
+ static_assert(SkPaint::kCapCount == 3, "paint_cap_mismatch");
+ SkASSERT(strokeCap >= 0 && strokeCap <= 2);
+ dict->insertInt("LC", strokeCap);
+
+ static_assert(SkPaint::kMiter_Join == 0, "paint_join_mismatch");
+ static_assert(SkPaint::kRound_Join == 1, "paint_join_mismatch");
+ static_assert(SkPaint::kBevel_Join == 2, "paint_join_mismatch");
+ static_assert(SkPaint::kJoinCount == 3, "paint_join_mismatch");
+ SkASSERT(strokeJoin >= 0 && strokeJoin <= 2);
+ dict->insertInt("LJ", strokeJoin);
+
+ dict->insertScalar("LW", fStrokeWidth);
+ dict->insertScalar("ML", fStrokeMiter);
+ dict->insertBool("SA", true); // SA = Auto stroke adjustment.
+ dict->insertName("BM", as_blend_mode((SkBlendMode)fMode));
+ dict->emitObject(stream, objNumMap);
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFGraphicState.h b/gfx/skia/skia/src/pdf/SkPDFGraphicState.h
new file mode 100644
index 000000000..8ee6728f5
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFGraphicState.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPDFGraphicState_DEFINED
+#define SkPDFGraphicState_DEFINED
+
+#include "SkPDFTypes.h"
+#include "SkOpts.h"
+
+class SkPaint;
+class SkPDFCanon;
+
+/** \class SkPDFGraphicState
+ SkPaint objects roughly correspond to graphic state dictionaries that can
+ be installed. So that a given dictionary is only output to the pdf file
+ once, we want to canonicalize them.
+*/
+class SkPDFGraphicState final : public SkPDFObject {
+
+public:
+ enum SkPDFSMaskMode {
+ kAlpha_SMaskMode,
+ kLuminosity_SMaskMode
+ };
+
+ // Override emitObject so that we can populate the dictionary on
+ // demand.
+ void emitObject(SkWStream* stream,
+ const SkPDFObjNumMap& objNumMap) const override;
+
+ /** Get the graphic state for the passed SkPaint. The reference count of
+ * the object is incremented and it is the caller's responsibility to
+ * unreference it when done. This is needed to accommodate the weak
+ * reference pattern used when the returned object is new and has no
+ * other references.
+ * @param paint The SkPaint to emulate.
+ */
+ static SkPDFGraphicState* GetGraphicStateForPaint(SkPDFCanon* canon,
+ const SkPaint& paint);
+
+ /** Make a graphic state that only sets the passed soft mask.
+ * @param sMask The form xobject to use as a soft mask.
+ * @param invert Indicates if the alpha of the sMask should be inverted.
+ * @param sMaskMode Whether to use alpha or luminosity for the sMask.
+ *
+ * These are not de-duped.
+ */
+ static sk_sp<SkPDFDict> GetSMaskGraphicState(sk_sp<SkPDFObject> sMask,
+ bool invert,
+ SkPDFSMaskMode sMaskMode,
+ SkPDFCanon* canon);
+
+ /** Make a graphic state that only unsets the soft mask. */
+ static sk_sp<SkPDFDict> MakeNoSmaskGraphicState();
+ static sk_sp<SkPDFStream> MakeInvertFunction();
+
+ bool operator==(const SkPDFGraphicState& rhs) const {
+ return 0 == memcmp(&fStrokeWidth, &rhs.fStrokeWidth, 12);
+ }
+ uint32_t hash() const { return SkOpts::hash(&fStrokeWidth, 12); }
+
+private:
+ const SkScalar fStrokeWidth;
+ const SkScalar fStrokeMiter;
+ const uint8_t fAlpha;
+ const uint8_t fStrokeCap; // SkPaint::Cap
+ const uint8_t fStrokeJoin; // SkPaint::Join
+ const uint8_t fMode; // SkBlendMode
+
+ SkPDFGraphicState(const SkPaint&);
+
+ typedef SkPDFDict INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.cpp b/gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.cpp
new file mode 100644
index 000000000..988961e64
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.cpp
@@ -0,0 +1,262 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitSet.h"
+#include "SkPDFMakeCIDGlyphWidthsArray.h"
+#include "SkPaint.h"
+#include "SkGlyphCache.h"
+
+// TODO(halcanary): Write unit tests for SkPDFMakeCIDGlyphWidthsArray().
+
+// TODO(halcanary): The logic in this file originated in several
+// disparate places. I feel sure that someone could simplify this
+// down to a single easy-to-read function.
+
+namespace {
+
+struct AdvanceMetric {
+ enum MetricType {
+ kDefault, // Default advance: fAdvance.count = 1
+ kRange, // Advances for a range: fAdvance.count = fEndID-fStartID
+ kRun // fStartID-fEndID have same advance: fAdvance.count = 1
+ };
+ MetricType fType;
+ uint16_t fStartId;
+ uint16_t fEndId;
+ SkTDArray<int16_t> fAdvance;
+ AdvanceMetric(uint16_t startId) : fStartId(startId) {}
+ AdvanceMetric(AdvanceMetric&&) = default;
+ AdvanceMetric& operator=(AdvanceMetric&& other) = default;
+ AdvanceMetric(const AdvanceMetric&) = delete;
+ AdvanceMetric& operator=(const AdvanceMetric&) = delete;
+};
+const int16_t kInvalidAdvance = SK_MinS16;
+const int16_t kDontCareAdvance = SK_MinS16 + 1;
+} // namespace
+
+// scale from em-units to base-1000, returning as a SkScalar
+static SkScalar from_font_units(SkScalar scaled, uint16_t emSize) {
+ if (emSize == 1000) {
+ return scaled;
+ } else {
+ return SkScalarMulDiv(scaled, 1000, emSize);
+ }
+}
+
+static SkScalar scale_from_font_units(int16_t val, uint16_t emSize) {
+ return from_font_units(SkIntToScalar(val), emSize);
+}
+
+static void strip_uninteresting_trailing_advances_from_range(
+ AdvanceMetric* range) {
+ SkASSERT(range);
+
+ int expectedAdvanceCount = range->fEndId - range->fStartId + 1;
+ if (range->fAdvance.count() < expectedAdvanceCount) {
+ return;
+ }
+
+ for (int i = expectedAdvanceCount - 1; i >= 0; --i) {
+ if (range->fAdvance[i] != kDontCareAdvance &&
+ range->fAdvance[i] != kInvalidAdvance &&
+ range->fAdvance[i] != 0) {
+ range->fEndId = range->fStartId + i;
+ break;
+ }
+ }
+}
+
+static void zero_wildcards_in_range(AdvanceMetric* range) {
+ SkASSERT(range);
+ if (range->fType != AdvanceMetric::kRange) {
+ return;
+ }
+ SkASSERT(range->fAdvance.count() == range->fEndId - range->fStartId + 1);
+
+ // Zero out wildcards.
+ for (int i = 0; i < range->fAdvance.count(); ++i) {
+ if (range->fAdvance[i] == kDontCareAdvance) {
+ range->fAdvance[i] = 0;
+ }
+ }
+}
+
+static void finish_range(
+ AdvanceMetric* range,
+ int endId,
+ AdvanceMetric::MetricType type) {
+ range->fEndId = endId;
+ range->fType = type;
+ strip_uninteresting_trailing_advances_from_range(range);
+ int newLength;
+ if (type == AdvanceMetric::kRange) {
+ newLength = range->fEndId - range->fStartId + 1;
+ } else {
+ if (range->fEndId == range->fStartId) {
+ range->fType = AdvanceMetric::kRange;
+ }
+ newLength = 1;
+ }
+ SkASSERT(range->fAdvance.count() >= newLength);
+ range->fAdvance.setCount(newLength);
+ zero_wildcards_in_range(range);
+}
+
+static void compose_advance_data(const AdvanceMetric& range,
+ uint16_t emSize,
+ int16_t* defaultAdvance,
+ SkPDFArray* result) {
+ switch (range.fType) {
+ case AdvanceMetric::kDefault: {
+ SkASSERT(range.fAdvance.count() == 1);
+ *defaultAdvance = range.fAdvance[0];
+ break;
+ }
+ case AdvanceMetric::kRange: {
+ auto advanceArray = sk_make_sp<SkPDFArray>();
+ for (int j = 0; j < range.fAdvance.count(); j++)
+ advanceArray->appendScalar(
+ scale_from_font_units(range.fAdvance[j], emSize));
+ result->appendInt(range.fStartId);
+ result->appendObject(std::move(advanceArray));
+ break;
+ }
+ case AdvanceMetric::kRun: {
+ SkASSERT(range.fAdvance.count() == 1);
+ result->appendInt(range.fStartId);
+ result->appendInt(range.fEndId);
+ result->appendScalar(
+ scale_from_font_units(range.fAdvance[0], emSize));
+ break;
+ }
+ }
+}
+
+/** Retrieve advance data for glyphs. Used by the PDF backend. */
+// TODO(halcanary): this function is complex enough to need its logic
+// tested with unit tests.
+sk_sp<SkPDFArray> SkPDFMakeCIDGlyphWidthsArray(SkGlyphCache* cache,
+ const SkBitSet* subset,
+ uint16_t emSize,
+ int16_t* defaultAdvance) {
+ // Assuming that on average, the ASCII representation of an advance plus
+ // a space is 8 characters and the ASCII representation of a glyph id is 3
+ // characters, then the following cut offs for using different range types
+ // apply:
+ // The cost of stopping and starting the range is 7 characers
+ // a. Removing 4 0's or don't care's is a win
+ // The cost of stopping and starting the range plus a run is 22
+ // characters
+ // b. Removing 3 repeating advances is a win
+ // c. Removing 2 repeating advances and 3 don't cares is a win
+ // When not currently in a range the cost of a run over a range is 16
+ // characaters, so:
+ // d. Removing a leading 0/don't cares is a win because it is omitted
+ // e. Removing 2 repeating advances is a win
+
+ auto result = sk_make_sp<SkPDFArray>();
+ int num_glyphs = SkToInt(cache->getGlyphCount());
+
+ bool prevRange = false;
+
+ int16_t lastAdvance = kInvalidAdvance;
+ int repeatedAdvances = 0;
+ int wildCardsInRun = 0;
+ int trailingWildCards = 0;
+
+ // Limit the loop count to glyph id ranges provided.
+ int lastIndex = num_glyphs;
+ if (subset) {
+ while (!subset->has(lastIndex - 1) && lastIndex > 0) {
+ --lastIndex;
+ }
+ }
+ AdvanceMetric curRange(0);
+
+ for (int gId = 0; gId <= lastIndex; gId++) {
+ int16_t advance = kInvalidAdvance;
+ if (gId < lastIndex) {
+ if (!subset || 0 == gId || subset->has(gId)) {
+ advance = (int16_t)cache->getGlyphIDAdvance(gId).fAdvanceX;
+ } else {
+ advance = kDontCareAdvance;
+ }
+ }
+ if (advance == lastAdvance) {
+ repeatedAdvances++;
+ trailingWildCards = 0;
+ } else if (advance == kDontCareAdvance) {
+ wildCardsInRun++;
+ trailingWildCards++;
+ } else if (curRange.fAdvance.count() ==
+ repeatedAdvances + 1 + wildCardsInRun) { // All in run.
+ if (lastAdvance == 0) {
+ curRange.fStartId = gId; // reset
+ curRange.fAdvance.setCount(0);
+ trailingWildCards = 0;
+ } else if (repeatedAdvances + 1 >= 2 || trailingWildCards >= 4) {
+ finish_range(&curRange, gId - 1, AdvanceMetric::kRun);
+ compose_advance_data(curRange, emSize, defaultAdvance, result.get());
+ prevRange = true;
+ curRange = AdvanceMetric(gId);
+ trailingWildCards = 0;
+ }
+ repeatedAdvances = 0;
+ wildCardsInRun = trailingWildCards;
+ trailingWildCards = 0;
+ } else {
+ if (lastAdvance == 0 &&
+ repeatedAdvances + 1 + wildCardsInRun >= 4) {
+ finish_range(&curRange,
+ gId - repeatedAdvances - wildCardsInRun - 2,
+ AdvanceMetric::kRange);
+ compose_advance_data(curRange, emSize, defaultAdvance, result.get());
+ prevRange = true;
+ curRange = AdvanceMetric(gId);
+ trailingWildCards = 0;
+ } else if (trailingWildCards >= 4 && repeatedAdvances + 1 < 2) {
+ finish_range(&curRange, gId - trailingWildCards - 1,
+ AdvanceMetric::kRange);
+ compose_advance_data(curRange, emSize, defaultAdvance, result.get());
+ prevRange = true;
+ curRange = AdvanceMetric(gId);
+ trailingWildCards = 0;
+ } else if (lastAdvance != 0 &&
+ (repeatedAdvances + 1 >= 3 ||
+ (repeatedAdvances + 1 >= 2 && wildCardsInRun >= 3))) {
+ finish_range(&curRange,
+ gId - repeatedAdvances - wildCardsInRun - 2,
+ AdvanceMetric::kRange);
+ compose_advance_data(curRange, emSize, defaultAdvance, result.get());
+ curRange =
+ AdvanceMetric(gId - repeatedAdvances - wildCardsInRun - 1);
+ curRange.fAdvance.append(1, &lastAdvance);
+ finish_range(&curRange, gId - 1, AdvanceMetric::kRun);
+ compose_advance_data(curRange, emSize, defaultAdvance, result.get());
+ prevRange = true;
+ curRange = AdvanceMetric(gId);
+ trailingWildCards = 0;
+ }
+ repeatedAdvances = 0;
+ wildCardsInRun = trailingWildCards;
+ trailingWildCards = 0;
+ }
+ curRange.fAdvance.append(1, &advance);
+ if (advance != kDontCareAdvance) {
+ lastAdvance = advance;
+ }
+ }
+ if (curRange.fStartId == lastIndex) {
+ if (!prevRange) {
+ return nullptr; // https://crbug.com/567031
+ }
+ } else {
+ finish_range(&curRange, lastIndex - 1, AdvanceMetric::kRange);
+ compose_advance_data(curRange, emSize, defaultAdvance, result.get());
+ }
+ return result;
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.h b/gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.h
new file mode 100644
index 000000000..d7a53a9b1
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPDFMakeCIDGlyphWidthsArray_DEFINED
+#define SkPDFMakeCIDGlyphWidthsArray_DEFINED
+
+#include "SkPDFTypes.h"
+
+class SkBitSet;
+class SkGlyphCache;
+
+/* PDF 32000-1:2008, page 270: "The array’s elements have a variable
+ format that can specify individual widths for consecutive CIDs or
+ one width for a range of CIDs". */
+sk_sp<SkPDFArray> SkPDFMakeCIDGlyphWidthsArray(SkGlyphCache* cache,
+ const SkBitSet* subset,
+ uint16_t emSize,
+ int16_t* defaultWidth);
+
+#endif // SkPDFMakeCIDGlyphWidthsArray_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.cpp b/gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.cpp
new file mode 100644
index 000000000..afe773207
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.cpp
@@ -0,0 +1,225 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPDFMakeToUnicodeCmap.h"
+#include "SkPDFUtils.h"
+#include "SkUtils.h"
+
+static void append_tounicode_header(SkDynamicMemoryWStream* cmap,
+ bool multibyte) {
+ // 12 dict begin: 12 is an Adobe-suggested value. Shall not change.
+ // It's there to prevent old version Adobe Readers from malfunctioning.
+ const char* kHeader =
+ "/CIDInit /ProcSet findresource begin\n"
+ "12 dict begin\n"
+ "begincmap\n";
+ cmap->writeText(kHeader);
+
+ // The /CIDSystemInfo must be consistent to the one in
+ // SkPDFFont::populateCIDFont().
+ // We can not pass over the system info object here because the format is
+ // different. This is not a reference object.
+ const char* kSysInfo =
+ "/CIDSystemInfo\n"
+ "<< /Registry (Adobe)\n"
+ "/Ordering (UCS)\n"
+ "/Supplement 0\n"
+ ">> def\n";
+ cmap->writeText(kSysInfo);
+
+ // The CMapName must be consistent to /CIDSystemInfo above.
+ // /CMapType 2 means ToUnicode.
+ // Codespace range just tells the PDF processor the valid range.
+ const char* kTypeInfoHeader =
+ "/CMapName /Adobe-Identity-UCS def\n"
+ "/CMapType 2 def\n"
+ "1 begincodespacerange\n";
+ cmap->writeText(kTypeInfoHeader);
+ if (multibyte) {
+ cmap->writeText("<0000> <FFFF>\n");
+ } else {
+ cmap->writeText("<00> <FF>\n");
+ }
+ cmap->writeText("endcodespacerange\n");
+}
+
+static void append_cmap_footer(SkDynamicMemoryWStream* cmap) {
+ const char kFooter[] =
+ "endcmap\n"
+ "CMapName currentdict /CMap defineresource pop\n"
+ "end\n"
+ "end";
+ cmap->writeText(kFooter);
+}
+
+namespace {
+struct BFChar {
+ SkGlyphID fGlyphId;
+ SkUnichar fUnicode;
+};
+
+struct BFRange {
+ SkGlyphID fStart;
+ SkGlyphID fEnd;
+ SkUnichar fUnicode;
+};
+} // namespace
+
+static void write_glyph(SkDynamicMemoryWStream* cmap,
+ bool multiByte,
+ SkGlyphID gid) {
+ if (multiByte) {
+ SkPDFUtils::WriteUInt16BE(cmap, gid);
+ } else {
+ SkPDFUtils::WriteUInt8(cmap, SkToU8(gid));
+ }
+}
+
+static void append_bfchar_section(const SkTDArray<BFChar>& bfchar,
+ bool multiByte,
+ SkDynamicMemoryWStream* cmap) {
+ // PDF spec defines that every bf* list can have at most 100 entries.
+ for (int i = 0; i < bfchar.count(); i += 100) {
+ int count = bfchar.count() - i;
+ count = SkMin32(count, 100);
+ cmap->writeDecAsText(count);
+ cmap->writeText(" beginbfchar\n");
+ for (int j = 0; j < count; ++j) {
+ cmap->writeText("<");
+ write_glyph(cmap, multiByte, bfchar[i + j].fGlyphId);
+ cmap->writeText("> <");
+ SkPDFUtils::WriteUTF16beHex(cmap, bfchar[i + j].fUnicode);
+ cmap->writeText(">\n");
+ }
+ cmap->writeText("endbfchar\n");
+ }
+}
+
+static void append_bfrange_section(const SkTDArray<BFRange>& bfrange,
+ bool multiByte,
+ SkDynamicMemoryWStream* cmap) {
+ // PDF spec defines that every bf* list can have at most 100 entries.
+ for (int i = 0; i < bfrange.count(); i += 100) {
+ int count = bfrange.count() - i;
+ count = SkMin32(count, 100);
+ cmap->writeDecAsText(count);
+ cmap->writeText(" beginbfrange\n");
+ for (int j = 0; j < count; ++j) {
+ cmap->writeText("<");
+ write_glyph(cmap, multiByte, bfrange[i + j].fStart);
+ cmap->writeText("> <");
+ write_glyph(cmap, multiByte, bfrange[i + j].fEnd);
+ cmap->writeText("> <");
+ SkPDFUtils::WriteUTF16beHex(cmap, bfrange[i + j].fUnicode);
+ cmap->writeText(">\n");
+ }
+ cmap->writeText("endbfrange\n");
+ }
+}
+
+// Generate <bfchar> and <bfrange> table according to PDF spec 1.4 and Adobe
+// Technote 5014.
+// The function is not static so we can test it in unit tests.
+//
+// Current implementation guarantees bfchar and bfrange entries do not overlap.
+//
+// Current implementation does not attempt aggresive optimizations against
+// following case because the specification is not clear.
+//
+// 4 beginbfchar 1 beginbfchar
+// <0003> <0013> <0020> <0014>
+// <0005> <0015> to endbfchar
+// <0007> <0017> 1 beginbfrange
+// <0020> <0014> <0003> <0007> <0013>
+// endbfchar endbfrange
+//
+// Adobe Technote 5014 said: "Code mappings (unlike codespace ranges) may
+// overlap, but succeeding maps supersede preceding maps."
+//
+// In case of searching text in PDF, bfrange will have higher precedence so
+// typing char id 0x0014 in search box will get glyph id 0x0004 first. However,
+// the spec does not mention how will this kind of conflict being resolved.
+//
+// For the worst case (having 65536 continuous unicode and we use every other
+// one of them), the possible savings by aggressive optimization is 416KB
+// pre-compressed and does not provide enough motivation for implementation.
+void SkPDFAppendCmapSections(const SkTDArray<SkUnichar>& glyphToUnicode,
+ const SkBitSet* subset,
+ SkDynamicMemoryWStream* cmap,
+ bool multiByteGlyphs,
+ SkGlyphID firstGlyphID,
+ SkGlyphID lastGlyphID) {
+ if (glyphToUnicode.isEmpty()) {
+ return;
+ }
+ int glyphOffset = 0;
+ if (!multiByteGlyphs) {
+ glyphOffset = firstGlyphID - 1;
+ }
+
+ SkTDArray<BFChar> bfcharEntries;
+ SkTDArray<BFRange> bfrangeEntries;
+
+ BFRange currentRangeEntry = {0, 0, 0};
+ bool rangeEmpty = true;
+ const int limit =
+ SkMin32(lastGlyphID + 1, glyphToUnicode.count()) - glyphOffset;
+
+ for (int i = firstGlyphID - glyphOffset; i < limit + 1; ++i) {
+ bool inSubset = i < limit &&
+ (subset == nullptr || subset->has(i + glyphOffset));
+ if (!rangeEmpty) {
+ // PDF spec requires bfrange not changing the higher byte,
+ // e.g. <1035> <10FF> <2222> is ok, but
+ // <1035> <1100> <2222> is no good
+ bool inRange =
+ i == currentRangeEntry.fEnd + 1 &&
+ i >> 8 == currentRangeEntry.fStart >> 8 &&
+ i < limit &&
+ glyphToUnicode[i + glyphOffset] ==
+ currentRangeEntry.fUnicode + i - currentRangeEntry.fStart;
+ if (!inSubset || !inRange) {
+ if (currentRangeEntry.fEnd > currentRangeEntry.fStart) {
+ bfrangeEntries.push(currentRangeEntry);
+ } else {
+ BFChar* entry = bfcharEntries.append();
+ entry->fGlyphId = currentRangeEntry.fStart;
+ entry->fUnicode = currentRangeEntry.fUnicode;
+ }
+ rangeEmpty = true;
+ }
+ }
+ if (inSubset) {
+ currentRangeEntry.fEnd = i;
+ if (rangeEmpty) {
+ currentRangeEntry.fStart = i;
+ currentRangeEntry.fUnicode = glyphToUnicode[i + glyphOffset];
+ rangeEmpty = false;
+ }
+ }
+ }
+
+ // The spec requires all bfchar entries for a font must come before bfrange
+ // entries.
+ append_bfchar_section(bfcharEntries, multiByteGlyphs, cmap);
+ append_bfrange_section(bfrangeEntries, multiByteGlyphs, cmap);
+}
+
+sk_sp<SkPDFStream> SkPDFMakeToUnicodeCmap(
+ const SkTDArray<SkUnichar>& glyphToUnicode,
+ const SkBitSet* subset,
+ bool multiByteGlyphs,
+ SkGlyphID firstGlyphID,
+ SkGlyphID lastGlyphID) {
+ SkDynamicMemoryWStream cmap;
+ append_tounicode_header(&cmap, multiByteGlyphs);
+ SkPDFAppendCmapSections(glyphToUnicode, subset, &cmap, multiByteGlyphs,
+ firstGlyphID, lastGlyphID);
+ append_cmap_footer(&cmap);
+ return sk_make_sp<SkPDFStream>(
+ std::unique_ptr<SkStreamAsset>(cmap.detachAsStream()));
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.h b/gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.h
new file mode 100644
index 000000000..0c4d1c37d
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPDFMakeToUnicodeCmap_DEFINED
+#define SkPDFMakeToUnicodeCmap_DEFINED
+
+#include "SkTDArray.h"
+#include "SkPDFFont.h"
+#include "SkStream.h"
+
+sk_sp<SkPDFStream> SkPDFMakeToUnicodeCmap(
+ const SkTDArray<SkUnichar>& glyphToUnicode,
+ const SkBitSet* subset,
+ bool multiByteGlyphs,
+ SkGlyphID firstGlyphID,
+ SkGlyphID lastGlyphID);
+
+// Exposed for unit testing.
+void SkPDFAppendCmapSections(const SkTDArray<SkUnichar>& glyphToUnicode,
+ const SkBitSet* subset,
+ SkDynamicMemoryWStream* cmap,
+ bool multiByteGlyphs,
+ SkGlyphID firstGlyphID,
+ SkGlyphID lastGlyphID);
+
+#endif // SkPDFMakeToUnicodeCmap_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFMetadata.cpp b/gfx/skia/skia/src/pdf/SkPDFMetadata.cpp
new file mode 100644
index 000000000..42068843b
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFMetadata.cpp
@@ -0,0 +1,329 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMD5.h"
+#include "SkMilestone.h"
+#include "SkPDFMetadata.h"
+#include "SkPDFTypes.h"
+#include <utility>
+
+#define SKPDF_STRING(X) SKPDF_STRING_IMPL(X)
+#define SKPDF_STRING_IMPL(X) #X
+#define SKPDF_PRODUCER "Skia/PDF m" SKPDF_STRING(SK_MILESTONE)
+#define SKPDF_CUSTOM_PRODUCER_KEY "ProductionLibrary"
+
+static SkString pdf_date(const SkTime::DateTime& dt) {
+ int timeZoneMinutes = SkToInt(dt.fTimeZoneMinutes);
+ char timezoneSign = timeZoneMinutes >= 0 ? '+' : '-';
+ int timeZoneHours = SkTAbs(timeZoneMinutes) / 60;
+ timeZoneMinutes = SkTAbs(timeZoneMinutes) % 60;
+ return SkStringPrintf(
+ "D:%04u%02u%02u%02u%02u%02u%c%02d'%02d'",
+ static_cast<unsigned>(dt.fYear), static_cast<unsigned>(dt.fMonth),
+ static_cast<unsigned>(dt.fDay), static_cast<unsigned>(dt.fHour),
+ static_cast<unsigned>(dt.fMinute),
+ static_cast<unsigned>(dt.fSecond), timezoneSign, timeZoneHours,
+ timeZoneMinutes);
+}
+
+namespace {
+static const struct {
+ const char* const key;
+ SkString SkDocument::PDFMetadata::*const valuePtr;
+} gMetadataKeys[] = {
+ {"Title", &SkDocument::PDFMetadata::fTitle},
+ {"Author", &SkDocument::PDFMetadata::fAuthor},
+ {"Subject", &SkDocument::PDFMetadata::fSubject},
+ {"Keywords", &SkDocument::PDFMetadata::fKeywords},
+ {"Creator", &SkDocument::PDFMetadata::fCreator},
+};
+} // namespace
+
+sk_sp<SkPDFObject> SkPDFMetadata::MakeDocumentInformationDict(
+ const SkDocument::PDFMetadata& metadata) {
+ auto dict = sk_make_sp<SkPDFDict>();
+ for (const auto keyValuePtr : gMetadataKeys) {
+ const SkString& value = metadata.*(keyValuePtr.valuePtr);
+ if (value.size() > 0) {
+ dict->insertString(keyValuePtr.key, value);
+ }
+ }
+ if (metadata.fProducer.isEmpty()) {
+ dict->insertString("Producer", SKPDF_PRODUCER);
+ } else {
+ dict->insertString("Producer", metadata.fProducer);
+ dict->insertString(SKPDF_CUSTOM_PRODUCER_KEY, SKPDF_PRODUCER);
+ }
+ if (metadata.fCreation.fEnabled) {
+ dict->insertString("CreationDate",
+ pdf_date(metadata.fCreation.fDateTime));
+ }
+ if (metadata.fModified.fEnabled) {
+ dict->insertString("ModDate", pdf_date(metadata.fModified.fDateTime));
+ }
+ return dict;
+}
+
+SkPDFMetadata::UUID SkPDFMetadata::CreateUUID(
+ const SkDocument::PDFMetadata& metadata) {
+ // The main requirement is for the UUID to be unique; the exact
+ // format of the data that will be hashed is not important.
+ SkMD5 md5;
+ const char uuidNamespace[] = "org.skia.pdf\n";
+ md5.write(uuidNamespace, strlen(uuidNamespace));
+ double msec = SkTime::GetMSecs();
+ md5.write(&msec, sizeof(msec));
+ SkTime::DateTime dateTime;
+ SkTime::GetDateTime(&dateTime);
+ md5.write(&dateTime, sizeof(dateTime));
+ if (metadata.fCreation.fEnabled) {
+ md5.write(&metadata.fCreation.fDateTime,
+ sizeof(metadata.fCreation.fDateTime));
+ }
+ if (metadata.fModified.fEnabled) {
+ md5.write(&metadata.fModified.fDateTime,
+ sizeof(metadata.fModified.fDateTime));
+ }
+
+ for (const auto keyValuePtr : gMetadataKeys) {
+ md5.write(keyValuePtr.key, strlen(keyValuePtr.key));
+ md5.write("\037", 1);
+ const SkString& value = metadata.*(keyValuePtr.valuePtr);
+ md5.write(value.c_str(), value.size());
+ md5.write("\036", 1);
+ }
+ SkMD5::Digest digest;
+ md5.finish(digest);
+ // See RFC 4122, page 6-7.
+ digest.data[6] = (digest.data[6] & 0x0F) | 0x30;
+ digest.data[8] = (digest.data[6] & 0x3F) | 0x80;
+ static_assert(sizeof(digest) == sizeof(UUID), "uuid_size");
+ SkPDFMetadata::UUID uuid;
+ memcpy(&uuid, &digest, sizeof(digest));
+ return uuid;
+}
+
+sk_sp<SkPDFObject> SkPDFMetadata::MakePdfId(const UUID& doc,
+ const UUID& instance) {
+ // /ID [ <81b14aafa313db63dbd6f981e49f94f4>
+ // <81b14aafa313db63dbd6f981e49f94f4> ]
+ auto array = sk_make_sp<SkPDFArray>();
+ static_assert(sizeof(SkPDFMetadata::UUID) == 16, "uuid_size");
+ array->appendString(
+ SkString(reinterpret_cast<const char*>(&doc), sizeof(UUID)));
+ array->appendString(
+ SkString(reinterpret_cast<const char*>(&instance), sizeof(UUID)));
+ return array;
+}
+
+#define HEXIFY(INPUT_PTR, OUTPUT_PTR, HEX_STRING, BYTE_COUNT) \
+ do { \
+ for (int i = 0; i < (BYTE_COUNT); ++i) { \
+ uint8_t value = *(INPUT_PTR)++; \
+ *(OUTPUT_PTR)++ = (HEX_STRING)[value >> 4]; \
+ *(OUTPUT_PTR)++ = (HEX_STRING)[value & 0xF]; \
+ } \
+ } while (false)
+static SkString uuid_to_string(const SkPDFMetadata::UUID& uuid) {
+ // 8-4-4-4-12
+ char buffer[36]; // [32 + 4]
+ static const char gHex[] = "0123456789abcdef";
+ SkASSERT(strlen(gHex) == 16);
+ char* ptr = buffer;
+ const uint8_t* data = uuid.fData;
+ HEXIFY(data, ptr, gHex, 4);
+ *ptr++ = '-';
+ HEXIFY(data, ptr, gHex, 2);
+ *ptr++ = '-';
+ HEXIFY(data, ptr, gHex, 2);
+ *ptr++ = '-';
+ HEXIFY(data, ptr, gHex, 2);
+ *ptr++ = '-';
+ HEXIFY(data, ptr, gHex, 6);
+ SkASSERT(ptr == buffer + 36);
+ SkASSERT(data == uuid.fData + 16);
+ return SkString(buffer, 36);
+}
+#undef HEXIFY
+
+namespace {
+class PDFXMLObject final : public SkPDFObject {
+public:
+ PDFXMLObject(SkString xml) : fXML(std::move(xml)) {}
+ void emitObject(SkWStream* stream,
+ const SkPDFObjNumMap& omap) const override {
+ SkPDFDict dict("Metadata");
+ dict.insertName("Subtype", "XML");
+ dict.insertInt("Length", fXML.size());
+ dict.emitObject(stream, omap);
+ static const char streamBegin[] = " stream\n";
+ stream->write(streamBegin, strlen(streamBegin));
+ // Do not compress this. The standard requires that a
+ // program that does not understand PDF can grep for
+ // "<?xpacket" and extracť the entire XML.
+ stream->write(fXML.c_str(), fXML.size());
+ static const char streamEnd[] = "\nendstream";
+ stream->write(streamEnd, strlen(streamEnd));
+ }
+
+private:
+ const SkString fXML;
+};
+} // namespace
+
+static int count_xml_escape_size(const SkString& input) {
+ int extra = 0;
+ for (size_t i = 0; i < input.size(); ++i) {
+ if (input[i] == '&') {
+ extra += 4; // strlen("&amp;") - strlen("&")
+ } else if (input[i] == '<') {
+ extra += 3; // strlen("&lt;") - strlen("<")
+ }
+ }
+ return extra;
+}
+
+const SkString escape_xml(const SkString& input,
+ const char* before = nullptr,
+ const char* after = nullptr) {
+ if (input.size() == 0) {
+ return input;
+ }
+ // "&" --> "&amp;" and "<" --> "&lt;"
+ // text is assumed to be in UTF-8
+ // all strings are xml content, not attribute values.
+ size_t beforeLen = before ? strlen(before) : 0;
+ size_t afterLen = after ? strlen(after) : 0;
+ int extra = count_xml_escape_size(input);
+ SkString output(input.size() + extra + beforeLen + afterLen);
+ char* out = output.writable_str();
+ if (before) {
+ strncpy(out, before, beforeLen);
+ out += beforeLen;
+ }
+ static const char kAmp[] = "&amp;";
+ static const char kLt[] = "&lt;";
+ for (size_t i = 0; i < input.size(); ++i) {
+ if (input[i] == '&') {
+ strncpy(out, kAmp, strlen(kAmp));
+ out += strlen(kAmp);
+ } else if (input[i] == '<') {
+ strncpy(out, kLt, strlen(kLt));
+ out += strlen(kLt);
+ } else {
+ *out++ = input[i];
+ }
+ }
+ if (after) {
+ strncpy(out, after, afterLen);
+ out += afterLen;
+ }
+ // Validate that we haven't written outside of our string.
+ SkASSERT(out == &output.writable_str()[output.size()]);
+ *out = '\0';
+ return output;
+}
+
+sk_sp<SkPDFObject> SkPDFMetadata::MakeXMPObject(
+ const SkDocument::PDFMetadata& metadata,
+ const UUID& doc,
+ const UUID& instance) {
+ static const char templateString[] =
+ "<?xpacket begin=\"\" id=\"W5M0MpCehiHzreSzNTczkc9d\"?>\n"
+ "<x:xmpmeta xmlns:x=\"adobe:ns:meta/\"\n"
+ " x:xmptk=\"Adobe XMP Core 5.4-c005 78.147326, "
+ "2012/08/23-13:03:03\">\n"
+ "<rdf:RDF "
+ "xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\">\n"
+ "<rdf:Description rdf:about=\"\"\n"
+ " xmlns:xmp=\"http://ns.adobe.com/xap/1.0/\"\n"
+ " xmlns:dc=\"http://purl.org/dc/elements/1.1/\"\n"
+ " xmlns:xmpMM=\"http://ns.adobe.com/xap/1.0/mm/\"\n"
+ " xmlns:pdf=\"http://ns.adobe.com/pdf/1.3/\"\n"
+ " xmlns:pdfaid=\"http://www.aiim.org/pdfa/ns/id/\">\n"
+ "<pdfaid:part>2</pdfaid:part>\n"
+ "<pdfaid:conformance>B</pdfaid:conformance>\n"
+ "%s" // ModifyDate
+ "%s" // CreateDate
+ "%s" // xmp:CreatorTool
+ "<dc:format>application/pdf</dc:format>\n"
+ "%s" // dc:title
+ "%s" // dc:description
+ "%s" // author
+ "%s" // keywords
+ "<xmpMM:DocumentID>uuid:%s</xmpMM:DocumentID>\n"
+ "<xmpMM:InstanceID>uuid:%s</xmpMM:InstanceID>\n"
+ "%s" // pdf:Producer
+ "%s" // pdf:Keywords
+ "</rdf:Description>\n"
+ "</rdf:RDF>\n"
+ "</x:xmpmeta>\n" // Note: the standard suggests 4k of padding.
+ "<?xpacket end=\"w\"?>\n";
+
+ SkString creationDate;
+ SkString modificationDate;
+ if (metadata.fCreation.fEnabled) {
+ SkString tmp;
+ metadata.fCreation.fDateTime.toISO8601(&tmp);
+ SkASSERT(0 == count_xml_escape_size(tmp));
+ // YYYY-mm-ddTHH:MM:SS[+|-]ZZ:ZZ; no need to escape
+ creationDate = SkStringPrintf("<xmp:CreateDate>%s</xmp:CreateDate>\n",
+ tmp.c_str());
+ }
+ if (metadata.fModified.fEnabled) {
+ SkString tmp;
+ metadata.fModified.fDateTime.toISO8601(&tmp);
+ SkASSERT(0 == count_xml_escape_size(tmp));
+ modificationDate = SkStringPrintf(
+ "<xmp:ModifyDate>%s</xmp:ModifyDate>\n", tmp.c_str());
+ }
+ SkString title =
+ escape_xml(metadata.fTitle,
+ "<dc:title><rdf:Alt><rdf:li xml:lang=\"x-default\">",
+ "</rdf:li></rdf:Alt></dc:title>\n");
+ SkString author =
+ escape_xml(metadata.fAuthor, "<dc:creator><rdf:Bag><rdf:li>",
+ "</rdf:li></rdf:Bag></dc:creator>\n");
+ // TODO: in theory, XMP can support multiple authors. Split on a delimiter?
+ SkString subject = escape_xml(
+ metadata.fSubject,
+ "<dc:description><rdf:Alt><rdf:li xml:lang=\"x-default\">",
+ "</rdf:li></rdf:Alt></dc:description>\n");
+ SkString keywords1 =
+ escape_xml(metadata.fKeywords, "<dc:subject><rdf:Bag><rdf:li>",
+ "</rdf:li></rdf:Bag></dc:subject>\n");
+ SkString keywords2 = escape_xml(metadata.fKeywords, "<pdf:Keywords>",
+ "</pdf:Keywords>\n");
+ // TODO: in theory, keywords can be a list too.
+
+ SkString producer("<pdf:Producer>" SKPDF_PRODUCER "</pdf:Producer>\n");
+ if (!metadata.fProducer.isEmpty()) {
+ // TODO: register a developer prefix to make
+ // <skia:SKPDF_CUSTOM_PRODUCER_KEY> a real XML tag.
+ producer = escape_xml(
+ metadata.fProducer, "<pdf:Producer>",
+ "</pdf:Producer>\n<!-- <skia:" SKPDF_CUSTOM_PRODUCER_KEY ">"
+ SKPDF_PRODUCER "</skia:" SKPDF_CUSTOM_PRODUCER_KEY "> -->\n");
+ }
+
+ SkString creator = escape_xml(metadata.fCreator, "<xmp:CreatorTool>",
+ "</xmp:CreatorTool>\n");
+ SkString documentID = uuid_to_string(doc); // no need to escape
+ SkASSERT(0 == count_xml_escape_size(documentID));
+ SkString instanceID = uuid_to_string(instance);
+ SkASSERT(0 == count_xml_escape_size(instanceID));
+ return sk_make_sp<PDFXMLObject>(SkStringPrintf(
+ templateString, modificationDate.c_str(), creationDate.c_str(),
+ creator.c_str(), title.c_str(), subject.c_str(), author.c_str(),
+ keywords1.c_str(), documentID.c_str(), instanceID.c_str(),
+ producer.c_str(), keywords2.c_str()));
+}
+
+#undef SKPDF_CUSTOM_PRODUCER_KEY
+#undef SKPDF_PRODUCER
+#undef SKPDF_STRING
+#undef SKPDF_STRING_IMPL
diff --git a/gfx/skia/skia/src/pdf/SkPDFMetadata.h b/gfx/skia/skia/src/pdf/SkPDFMetadata.h
new file mode 100644
index 000000000..3091be4d7
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFMetadata.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPDFMetadata_DEFINED
+#define SkPDFMetadata_DEFINED
+
+#include "SkDocument.h"
+
+class SkPDFObject;
+
+namespace SkPDFMetadata {
+sk_sp<SkPDFObject> MakeDocumentInformationDict(const SkDocument::PDFMetadata&);
+
+struct UUID {
+ uint8_t fData[16];
+};
+
+UUID CreateUUID(const SkDocument::PDFMetadata&);
+
+sk_sp<SkPDFObject> MakePdfId(const UUID& doc, const UUID& instance);
+
+sk_sp<SkPDFObject> MakeXMPObject(const SkDocument::PDFMetadata&,
+ const UUID& doc,
+ const UUID& instance);
+}
+#endif // SkPDFMetadata_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFResourceDict.cpp b/gfx/skia/skia/src/pdf/SkPDFResourceDict.cpp
new file mode 100644
index 000000000..67e81b6c7
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFResourceDict.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPDFResourceDict.h"
+#include "SkPDFTypes.h"
+#include "SkPostConfig.h"
+
+// Sanity check that the values of enum SkPDFResourceType correspond to the
+// expected values as defined in the arrays below.
+// If these are failing, you may need to update the resource_type_prefixes
+// and resource_type_names arrays below.
+static_assert(SkPDFResourceDict::kExtGState_ResourceType == 0, "resource_type_mismatch");
+static_assert(SkPDFResourceDict::kPattern_ResourceType == 1, "resource_type_mismatch");
+static_assert(SkPDFResourceDict::kXObject_ResourceType == 2, "resource_type_mismatch");
+static_assert(SkPDFResourceDict::kFont_ResourceType == 3, "resource_type_mismatch");
+
+static const char resource_type_prefixes[] = {
+ 'G',
+ 'P',
+ 'X',
+ 'F'
+};
+
+static const char* resource_type_names[] = {
+ "ExtGState",
+ "Pattern",
+ "XObject",
+ "Font"
+};
+
+char SkPDFResourceDict::GetResourceTypePrefix(
+ SkPDFResourceDict::SkPDFResourceType type) {
+ SkASSERT(type >= 0);
+ SkASSERT(type < SkPDFResourceDict::kResourceTypeCount);
+
+ return resource_type_prefixes[type];
+}
+
+static const char* get_resource_type_name(
+ SkPDFResourceDict::SkPDFResourceType type) {
+ SkASSERT(type >= 0);
+ SkASSERT(type < SK_ARRAY_COUNT(resource_type_names));
+
+ return resource_type_names[type];
+}
+
+SkString SkPDFResourceDict::getResourceName(
+ SkPDFResourceDict::SkPDFResourceType type, int key) {
+ return SkStringPrintf("%c%d", SkPDFResourceDict::GetResourceTypePrefix(type), key);
+}
+
+static void add_subdict(
+ const SkTDArray<SkPDFObject*>& resourceList,
+ SkPDFResourceDict::SkPDFResourceType type,
+ SkPDFDict* dst) {
+ if (0 == resourceList.count()) {
+ return;
+ }
+ auto resources = sk_make_sp<SkPDFDict>();
+ for (int i = 0; i < resourceList.count(); i++) {
+ resources->insertObjRef(SkPDFResourceDict::getResourceName(type, i),
+ sk_ref_sp(resourceList[i]));
+ }
+ dst->insertObject(get_resource_type_name(type), std::move(resources));
+}
+
+sk_sp<SkPDFDict> SkPDFResourceDict::Make(
+ const SkTDArray<SkPDFObject*>* gStateResources,
+ const SkTDArray<SkPDFObject*>* patternResources,
+ const SkTDArray<SkPDFObject*>* xObjectResources,
+ const SkTDArray<SkPDFObject*>* fontResources) {
+ auto dict = sk_make_sp<SkPDFDict>();
+ static const char kProcs[][7] = {
+ "PDF", "Text", "ImageB", "ImageC", "ImageI"};
+ auto procSets = sk_make_sp<SkPDFArray>();
+
+ procSets->reserve(SK_ARRAY_COUNT(kProcs));
+ for (size_t i = 0; i < SK_ARRAY_COUNT(kProcs); i++) {
+ procSets->appendName(kProcs[i]);
+ }
+ dict->insertObject("ProcSets", std::move(procSets));
+
+ if (gStateResources) {
+ add_subdict(*gStateResources, kExtGState_ResourceType, dict.get());
+ }
+ if (patternResources) {
+ add_subdict(*patternResources, kPattern_ResourceType, dict.get());
+ }
+ if (xObjectResources) {
+ add_subdict(*xObjectResources, kXObject_ResourceType, dict.get());
+ }
+ if (fontResources) {
+ add_subdict(*fontResources, kFont_ResourceType, dict.get());
+ }
+ return dict;
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFResourceDict.h b/gfx/skia/skia/src/pdf/SkPDFResourceDict.h
new file mode 100644
index 000000000..a9618b242
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFResourceDict.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPDFResourceDict_DEFINED
+#define SkPDFResourceDict_DEFINED
+
+#include "SkRefCnt.h"
+#include "SkTDArray.h"
+
+class SkPDFDict;
+class SkPDFObject;
+
+/** \class SkPDFResourceDict
+
+ A resource dictionary, which maintains the relevant sub-dicts and
+ allows generation of a list of referenced SkPDFObjects inserted with
+ insertResourceAsRef.
+*/
+class SkPDFResourceDict {
+public:
+ enum SkPDFResourceType {
+ kExtGState_ResourceType,
+ kPattern_ResourceType,
+ kXObject_ResourceType,
+ kFont_ResourceType,
+ // These additional types are defined by the spec, but not
+ // currently used by Skia: ColorSpace, Shading, Properties
+ kResourceTypeCount
+ };
+
+ static char GetResourceTypePrefix(SkPDFResourceDict::SkPDFResourceType type);
+
+ /** Create a PDF resource dictionary.
+ * The full set of ProcSet entries is automatically created for backwards
+ * compatibility, as recommended by the PDF spec.
+ *
+ * Any arguments can be nullptr.
+ */
+ static sk_sp<SkPDFDict> Make(
+ const SkTDArray<SkPDFObject*>* gStateResources,
+ const SkTDArray<SkPDFObject*>* patternResources,
+ const SkTDArray<SkPDFObject*>* xObjectResources,
+ const SkTDArray<SkPDFObject*>* fontResources);
+
+ /**
+ * Returns the name for the resource that will be generated by the resource
+ * dict.
+ *
+ * @param type The type of resource being entered, like
+ * kPattern_ResourceType or kExtGState_ResourceType.
+ * @param key The resource key, should be unique within its type.
+ */
+ static SkString getResourceName(SkPDFResourceType type, int key);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFShader.cpp b/gfx/skia/skia/src/pdf/SkPDFShader.cpp
new file mode 100644
index 000000000..82b5b3475
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFShader.cpp
@@ -0,0 +1,1362 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkPDFShader.h"
+
+#include "SkData.h"
+#include "SkPDFCanon.h"
+#include "SkPDFDevice.h"
+#include "SkPDFDocument.h"
+#include "SkPDFFormXObject.h"
+#include "SkPDFGraphicState.h"
+#include "SkPDFResourceDict.h"
+#include "SkPDFUtils.h"
+#include "SkScalar.h"
+#include "SkStream.h"
+#include "SkTemplates.h"
+
+static bool inverse_transform_bbox(const SkMatrix& matrix, SkRect* bbox) {
+ SkMatrix inverse;
+ if (!matrix.invert(&inverse)) {
+ return false;
+ }
+ inverse.mapRect(bbox);
+ return true;
+}
+
+static void unitToPointsMatrix(const SkPoint pts[2], SkMatrix* matrix) {
+ SkVector vec = pts[1] - pts[0];
+ SkScalar mag = vec.length();
+ SkScalar inv = mag ? SkScalarInvert(mag) : 0;
+
+ vec.scale(inv);
+ matrix->setSinCos(vec.fY, vec.fX);
+ matrix->preScale(mag, mag);
+ matrix->postTranslate(pts[0].fX, pts[0].fY);
+}
+
+static const int kColorComponents = 3;
+typedef uint8_t ColorTuple[kColorComponents];
+
+/* Assumes t + startOffset is on the stack and does a linear interpolation on t
+ between startOffset and endOffset from prevColor to curColor (for each color
+ component), leaving the result in component order on the stack. It assumes
+ there are always 3 components per color.
+ @param range endOffset - startOffset
+ @param curColor[components] The current color components.
+ @param prevColor[components] The previous color components.
+ @param result The result ps function.
+ */
+static void interpolateColorCode(SkScalar range, const ColorTuple& curColor,
+ const ColorTuple& prevColor,
+ SkDynamicMemoryWStream* result) {
+ SkASSERT(range != SkIntToScalar(0));
+
+ // Figure out how to scale each color component.
+ SkScalar multiplier[kColorComponents];
+ for (int i = 0; i < kColorComponents; i++) {
+ static const SkScalar kColorScale = SkScalarInvert(255);
+ multiplier[i] = kColorScale * (curColor[i] - prevColor[i]) / range;
+ }
+
+ // Calculate when we no longer need to keep a copy of the input parameter t.
+ // If the last component to use t is i, then dupInput[0..i - 1] = true
+ // and dupInput[i .. components] = false.
+ bool dupInput[kColorComponents];
+ dupInput[kColorComponents - 1] = false;
+ for (int i = kColorComponents - 2; i >= 0; i--) {
+ dupInput[i] = dupInput[i + 1] || multiplier[i + 1] != 0;
+ }
+
+ if (!dupInput[0] && multiplier[0] == 0) {
+ result->writeText("pop ");
+ }
+
+ for (int i = 0; i < kColorComponents; i++) {
+ // If the next components needs t and this component will consume a
+ // copy, make another copy.
+ if (dupInput[i] && multiplier[i] != 0) {
+ result->writeText("dup ");
+ }
+
+ if (multiplier[i] == 0) {
+ SkPDFUtils::AppendColorComponent(prevColor[i], result);
+ result->writeText(" ");
+ } else {
+ if (multiplier[i] != 1) {
+ SkPDFUtils::AppendScalar(multiplier[i], result);
+ result->writeText(" mul ");
+ }
+ if (prevColor[i] != 0) {
+ SkPDFUtils::AppendColorComponent(prevColor[i], result);
+ result->writeText(" add ");
+ }
+ }
+
+ if (dupInput[i]) {
+ result->writeText("exch\n");
+ }
+ }
+}
+
+/* Generate Type 4 function code to map t=[0,1) to the passed gradient,
+ clamping at the edges of the range. The generated code will be of the form:
+ if (t < 0) {
+ return colorData[0][r,g,b];
+ } else {
+ if (t < info.fColorOffsets[1]) {
+ return linearinterpolation(colorData[0][r,g,b],
+ colorData[1][r,g,b]);
+ } else {
+ if (t < info.fColorOffsets[2]) {
+ return linearinterpolation(colorData[1][r,g,b],
+ colorData[2][r,g,b]);
+ } else {
+
+ ... } else {
+ return colorData[info.fColorCount - 1][r,g,b];
+ }
+ ...
+ }
+ }
+ */
+static void gradientFunctionCode(const SkShader::GradientInfo& info,
+ SkDynamicMemoryWStream* result) {
+ /* We want to linearly interpolate from the previous color to the next.
+ Scale the colors from 0..255 to 0..1 and determine the multipliers
+ for interpolation.
+ C{r,g,b}(t, section) = t - offset_(section-1) + t * Multiplier{r,g,b}.
+ */
+
+ SkAutoSTMalloc<4, ColorTuple> colorDataAlloc(info.fColorCount);
+ ColorTuple *colorData = colorDataAlloc.get();
+ for (int i = 0; i < info.fColorCount; i++) {
+ colorData[i][0] = SkColorGetR(info.fColors[i]);
+ colorData[i][1] = SkColorGetG(info.fColors[i]);
+ colorData[i][2] = SkColorGetB(info.fColors[i]);
+ }
+
+ // Clamp the initial color.
+ result->writeText("dup 0 le {pop ");
+ SkPDFUtils::AppendColorComponent(colorData[0][0], result);
+ result->writeText(" ");
+ SkPDFUtils::AppendColorComponent(colorData[0][1], result);
+ result->writeText(" ");
+ SkPDFUtils::AppendColorComponent(colorData[0][2], result);
+ result->writeText(" }\n");
+
+ // The gradient colors.
+ int gradients = 0;
+ for (int i = 1 ; i < info.fColorCount; i++) {
+ if (info.fColorOffsets[i] == info.fColorOffsets[i - 1]) {
+ continue;
+ }
+ gradients++;
+
+ result->writeText("{dup ");
+ SkPDFUtils::AppendScalar(info.fColorOffsets[i], result);
+ result->writeText(" le {");
+ if (info.fColorOffsets[i - 1] != 0) {
+ SkPDFUtils::AppendScalar(info.fColorOffsets[i - 1], result);
+ result->writeText(" sub\n");
+ }
+
+ interpolateColorCode(info.fColorOffsets[i] - info.fColorOffsets[i - 1],
+ colorData[i], colorData[i - 1], result);
+ result->writeText("}\n");
+ }
+
+ // Clamp the final color.
+ result->writeText("{pop ");
+ SkPDFUtils::AppendColorComponent(colorData[info.fColorCount - 1][0], result);
+ result->writeText(" ");
+ SkPDFUtils::AppendColorComponent(colorData[info.fColorCount - 1][1], result);
+ result->writeText(" ");
+ SkPDFUtils::AppendColorComponent(colorData[info.fColorCount - 1][2], result);
+
+ for (int i = 0 ; i < gradients + 1; i++) {
+ result->writeText("} ifelse\n");
+ }
+}
+
+static sk_sp<SkPDFDict> createInterpolationFunction(const ColorTuple& color1,
+ const ColorTuple& color2) {
+ auto retval = sk_make_sp<SkPDFDict>();
+
+ auto c0 = sk_make_sp<SkPDFArray>();
+ c0->appendColorComponent(color1[0]);
+ c0->appendColorComponent(color1[1]);
+ c0->appendColorComponent(color1[2]);
+ retval->insertObject("C0", std::move(c0));
+
+ auto c1 = sk_make_sp<SkPDFArray>();
+ c1->appendColorComponent(color2[0]);
+ c1->appendColorComponent(color2[1]);
+ c1->appendColorComponent(color2[2]);
+ retval->insertObject("C1", std::move(c1));
+
+ auto domain = sk_make_sp<SkPDFArray>();
+ domain->appendScalar(0);
+ domain->appendScalar(1.0f);
+ retval->insertObject("Domain", std::move(domain));
+
+ retval->insertInt("FunctionType", 2);
+ retval->insertScalar("N", 1.0f);
+
+ return retval;
+}
+
+static sk_sp<SkPDFDict> gradientStitchCode(const SkShader::GradientInfo& info) {
+ auto retval = sk_make_sp<SkPDFDict>();
+
+ // normalize color stops
+ int colorCount = info.fColorCount;
+ SkTDArray<SkColor> colors(info.fColors, colorCount);
+ SkTDArray<SkScalar> colorOffsets(info.fColorOffsets, colorCount);
+
+ int i = 1;
+ while (i < colorCount - 1) {
+ // ensure stops are in order
+ if (colorOffsets[i - 1] > colorOffsets[i]) {
+ colorOffsets[i] = colorOffsets[i - 1];
+ }
+
+ // remove points that are between 2 coincident points
+ if ((colorOffsets[i - 1] == colorOffsets[i]) && (colorOffsets[i] == colorOffsets[i + 1])) {
+ colorCount -= 1;
+ colors.remove(i);
+ colorOffsets.remove(i);
+ } else {
+ i++;
+ }
+ }
+ // find coincident points and slightly move them over
+ for (i = 1; i < colorCount - 1; i++) {
+ if (colorOffsets[i - 1] == colorOffsets[i]) {
+ colorOffsets[i] += 0.00001f;
+ }
+ }
+ // check if last 2 stops coincide
+ if (colorOffsets[i - 1] == colorOffsets[i]) {
+ colorOffsets[i - 1] -= 0.00001f;
+ }
+
+ SkAutoSTMalloc<4, ColorTuple> colorDataAlloc(colorCount);
+ ColorTuple *colorData = colorDataAlloc.get();
+ for (int i = 0; i < colorCount; i++) {
+ colorData[i][0] = SkColorGetR(colors[i]);
+ colorData[i][1] = SkColorGetG(colors[i]);
+ colorData[i][2] = SkColorGetB(colors[i]);
+ }
+
+ // no need for a stitch function if there are only 2 stops.
+ if (colorCount == 2)
+ return createInterpolationFunction(colorData[0], colorData[1]);
+
+ auto encode = sk_make_sp<SkPDFArray>();
+ auto bounds = sk_make_sp<SkPDFArray>();
+ auto functions = sk_make_sp<SkPDFArray>();
+
+ auto domain = sk_make_sp<SkPDFArray>();
+ domain->appendScalar(0);
+ domain->appendScalar(1.0f);
+ retval->insertObject("Domain", std::move(domain));
+ retval->insertInt("FunctionType", 3);
+
+ for (int i = 1; i < colorCount; i++) {
+ if (i > 1) {
+ bounds->appendScalar(colorOffsets[i-1]);
+ }
+
+ encode->appendScalar(0);
+ encode->appendScalar(1.0f);
+
+ functions->appendObject(createInterpolationFunction(colorData[i-1], colorData[i]));
+ }
+
+ retval->insertObject("Encode", std::move(encode));
+ retval->insertObject("Bounds", std::move(bounds));
+ retval->insertObject("Functions", std::move(functions));
+
+ return retval;
+}
+
+/* Map a value of t on the stack into [0, 1) for Repeat or Mirror tile mode. */
+static void tileModeCode(SkShader::TileMode mode,
+ SkDynamicMemoryWStream* result) {
+ if (mode == SkShader::kRepeat_TileMode) {
+ result->writeText("dup truncate sub\n"); // Get the fractional part.
+ result->writeText("dup 0 le {1 add} if\n"); // Map (-1,0) => (0,1)
+ return;
+ }
+
+ if (mode == SkShader::kMirror_TileMode) {
+ // Map t mod 2 into [0, 1, 1, 0].
+ // Code Stack
+ result->writeText("abs " // Map negative to positive.
+ "dup " // t.s t.s
+ "truncate " // t.s t
+ "dup " // t.s t t
+ "cvi " // t.s t T
+ "2 mod " // t.s t (i mod 2)
+ "1 eq " // t.s t true|false
+ "3 1 roll " // true|false t.s t
+ "sub " // true|false 0.s
+ "exch " // 0.s true|false
+ "{1 exch sub} if\n"); // 1 - 0.s|0.s
+ }
+}
+
+/**
+ * Returns PS function code that applies inverse perspective
+ * to a x, y point.
+ * The function assumes that the stack has at least two elements,
+ * and that the top 2 elements are numeric values.
+ * After executing this code on a PS stack, the last 2 elements are updated
+ * while the rest of the stack is preserved intact.
+ * inversePerspectiveMatrix is the inverse perspective matrix.
+ */
+static void apply_perspective_to_coordinates(
+ const SkMatrix& inversePerspectiveMatrix,
+ SkDynamicMemoryWStream* code) {
+ if (!inversePerspectiveMatrix.hasPerspective()) {
+ return;
+ }
+
+ // Perspective matrix should be:
+ // 1 0 0
+ // 0 1 0
+ // p0 p1 p2
+
+ const SkScalar p0 = inversePerspectiveMatrix[SkMatrix::kMPersp0];
+ const SkScalar p1 = inversePerspectiveMatrix[SkMatrix::kMPersp1];
+ const SkScalar p2 = inversePerspectiveMatrix[SkMatrix::kMPersp2];
+
+ // y = y / (p2 + p0 x + p1 y)
+ // x = x / (p2 + p0 x + p1 y)
+
+ // Input on stack: x y
+ code->writeText(" dup "); // x y y
+ SkPDFUtils::AppendScalar(p1, code); // x y y p1
+ code->writeText(" mul " // x y y*p1
+ " 2 index "); // x y y*p1 x
+ SkPDFUtils::AppendScalar(p0, code); // x y y p1 x p0
+ code->writeText(" mul "); // x y y*p1 x*p0
+ SkPDFUtils::AppendScalar(p2, code); // x y y p1 x*p0 p2
+ code->writeText(" add " // x y y*p1 x*p0+p2
+ "add " // x y y*p1+x*p0+p2
+ "3 1 roll " // y*p1+x*p0+p2 x y
+ "2 index " // z x y y*p1+x*p0+p2
+ "div " // y*p1+x*p0+p2 x y/(y*p1+x*p0+p2)
+ "3 1 roll " // y/(y*p1+x*p0+p2) y*p1+x*p0+p2 x
+ "exch " // y/(y*p1+x*p0+p2) x y*p1+x*p0+p2
+ "div " // y/(y*p1+x*p0+p2) x/(y*p1+x*p0+p2)
+ "exch\n"); // x/(y*p1+x*p0+p2) y/(y*p1+x*p0+p2)
+}
+
+static void linearCode(const SkShader::GradientInfo& info,
+ const SkMatrix& perspectiveRemover,
+ SkDynamicMemoryWStream* function) {
+ function->writeText("{");
+
+ apply_perspective_to_coordinates(perspectiveRemover, function);
+
+ function->writeText("pop\n"); // Just ditch the y value.
+ tileModeCode(info.fTileMode, function);
+ gradientFunctionCode(info, function);
+ function->writeText("}");
+}
+
+static void radialCode(const SkShader::GradientInfo& info,
+ const SkMatrix& perspectiveRemover,
+ SkDynamicMemoryWStream* function) {
+ function->writeText("{");
+
+ apply_perspective_to_coordinates(perspectiveRemover, function);
+
+ // Find the distance from the origin.
+ function->writeText("dup " // x y y
+ "mul " // x y^2
+ "exch " // y^2 x
+ "dup " // y^2 x x
+ "mul " // y^2 x^2
+ "add " // y^2+x^2
+ "sqrt\n"); // sqrt(y^2+x^2)
+
+ tileModeCode(info.fTileMode, function);
+ gradientFunctionCode(info, function);
+ function->writeText("}");
+}
+
+/* Conical gradient shader, based on the Canvas spec for radial gradients
+ See: http://www.w3.org/TR/2dcontext/#dom-context-2d-createradialgradient
+ */
+static void twoPointConicalCode(const SkShader::GradientInfo& info,
+ const SkMatrix& perspectiveRemover,
+ SkDynamicMemoryWStream* function) {
+ SkScalar dx = info.fPoint[1].fX - info.fPoint[0].fX;
+ SkScalar dy = info.fPoint[1].fY - info.fPoint[0].fY;
+ SkScalar r0 = info.fRadius[0];
+ SkScalar dr = info.fRadius[1] - info.fRadius[0];
+ SkScalar a = SkScalarMul(dx, dx) + SkScalarMul(dy, dy) -
+ SkScalarMul(dr, dr);
+
+ // First compute t, if the pixel falls outside the cone, then we'll end
+ // with 'false' on the stack, otherwise we'll push 'true' with t below it
+
+ // We start with a stack of (x y), copy it and then consume one copy in
+ // order to calculate b and the other to calculate c.
+ function->writeText("{");
+
+ apply_perspective_to_coordinates(perspectiveRemover, function);
+
+ function->writeText("2 copy ");
+
+ // Calculate b and b^2; b = -2 * (y * dy + x * dx + r0 * dr).
+ SkPDFUtils::AppendScalar(dy, function);
+ function->writeText(" mul exch ");
+ SkPDFUtils::AppendScalar(dx, function);
+ function->writeText(" mul add ");
+ SkPDFUtils::AppendScalar(SkScalarMul(r0, dr), function);
+ function->writeText(" add -2 mul dup dup mul\n");
+
+ // c = x^2 + y^2 + radius0^2
+ function->writeText("4 2 roll dup mul exch dup mul add ");
+ SkPDFUtils::AppendScalar(SkScalarMul(r0, r0), function);
+ function->writeText(" sub dup 4 1 roll\n");
+
+ // Contents of the stack at this point: c, b, b^2, c
+
+ // if a = 0, then we collapse to a simpler linear case
+ if (a == 0) {
+
+ // t = -c/b
+ function->writeText("pop pop div neg dup ");
+
+ // compute radius(t)
+ SkPDFUtils::AppendScalar(dr, function);
+ function->writeText(" mul ");
+ SkPDFUtils::AppendScalar(r0, function);
+ function->writeText(" add\n");
+
+ // if r(t) < 0, then it's outside the cone
+ function->writeText("0 lt {pop false} {true} ifelse\n");
+
+ } else {
+
+ // quadratic case: the Canvas spec wants the largest
+ // root t for which radius(t) > 0
+
+ // compute the discriminant (b^2 - 4ac)
+ SkPDFUtils::AppendScalar(SkScalarMul(SkIntToScalar(4), a), function);
+ function->writeText(" mul sub dup\n");
+
+ // if d >= 0, proceed
+ function->writeText("0 ge {\n");
+
+ // an intermediate value we'll use to compute the roots:
+ // q = -0.5 * (b +/- sqrt(d))
+ function->writeText("sqrt exch dup 0 lt {exch -1 mul} if");
+ function->writeText(" add -0.5 mul dup\n");
+
+ // first root = q / a
+ SkPDFUtils::AppendScalar(a, function);
+ function->writeText(" div\n");
+
+ // second root = c / q
+ function->writeText("3 1 roll div\n");
+
+ // put the larger root on top of the stack
+ function->writeText("2 copy gt {exch} if\n");
+
+ // compute radius(t) for larger root
+ function->writeText("dup ");
+ SkPDFUtils::AppendScalar(dr, function);
+ function->writeText(" mul ");
+ SkPDFUtils::AppendScalar(r0, function);
+ function->writeText(" add\n");
+
+ // if r(t) > 0, we have our t, pop off the smaller root and we're done
+ function->writeText(" 0 gt {exch pop true}\n");
+
+ // otherwise, throw out the larger one and try the smaller root
+ function->writeText("{pop dup\n");
+ SkPDFUtils::AppendScalar(dr, function);
+ function->writeText(" mul ");
+ SkPDFUtils::AppendScalar(r0, function);
+ function->writeText(" add\n");
+
+ // if r(t) < 0, push false, otherwise the smaller root is our t
+ function->writeText("0 le {pop false} {true} ifelse\n");
+ function->writeText("} ifelse\n");
+
+ // d < 0, clear the stack and push false
+ function->writeText("} {pop pop pop false} ifelse\n");
+ }
+
+ // if the pixel is in the cone, proceed to compute a color
+ function->writeText("{");
+ tileModeCode(info.fTileMode, function);
+ gradientFunctionCode(info, function);
+
+ // otherwise, just write black
+ function->writeText("} {0 0 0} ifelse }");
+}
+
+static void sweepCode(const SkShader::GradientInfo& info,
+ const SkMatrix& perspectiveRemover,
+ SkDynamicMemoryWStream* function) {
+ function->writeText("{exch atan 360 div\n");
+ tileModeCode(info.fTileMode, function);
+ gradientFunctionCode(info, function);
+ function->writeText("}");
+}
+
+static void drawBitmapMatrix(SkCanvas* canvas, const SkBitmap& bm, const SkMatrix& matrix) {
+ SkAutoCanvasRestore acr(canvas, true);
+ canvas->concat(matrix);
+ canvas->drawBitmap(bm, 0, 0);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static sk_sp<SkPDFStream> make_alpha_function_shader(SkPDFDocument* doc,
+ SkScalar dpi,
+ const SkPDFShader::State& state);
+static sk_sp<SkPDFDict> make_function_shader(SkPDFCanon* canon,
+ const SkPDFShader::State& state);
+
+static sk_sp<SkPDFStream> make_image_shader(SkPDFDocument* doc,
+ SkScalar dpi,
+ const SkPDFShader::State& state,
+ SkBitmap image);
+
+static sk_sp<SkPDFObject> get_pdf_shader_by_state(
+ SkPDFDocument* doc,
+ SkScalar dpi,
+ SkPDFShader::State state,
+ SkBitmap image) {
+ SkPDFCanon* canon = doc->canon();
+ if (state.fType == SkShader::kNone_GradientType && image.isNull()) {
+ // TODO(vandebo) This drops SKComposeShader on the floor. We could
+ // handle compose shader by pulling things up to a layer, drawing with
+ // the first shader, applying the xfer mode and drawing again with the
+ // second shader, then applying the layer to the original drawing.
+ return nullptr;
+ } else if (state.fType == SkShader::kNone_GradientType) {
+ sk_sp<SkPDFObject> shader = canon->findImageShader(state);
+ if (!shader) {
+ shader = make_image_shader(doc, dpi, state, std::move(image));
+ canon->addImageShader(shader, std::move(state));
+ }
+ return shader;
+ } else if (state.GradientHasAlpha()) {
+ sk_sp<SkPDFObject> shader = canon->findAlphaShader(state);
+ if (!shader) {
+ shader = make_alpha_function_shader(doc, dpi, state);
+ canon->addAlphaShader(shader, std::move(state));
+ }
+ return shader;
+ } else {
+ sk_sp<SkPDFObject> shader = canon->findFunctionShader(state);
+ if (!shader) {
+ shader = make_function_shader(canon, state);
+ canon->addFunctionShader(shader, std::move(state));
+ }
+ return shader;
+ }
+}
+
+sk_sp<SkPDFObject> SkPDFShader::GetPDFShader(SkPDFDocument* doc,
+ SkScalar dpi,
+ SkShader* shader,
+ const SkMatrix& matrix,
+ const SkIRect& surfaceBBox,
+ SkScalar rasterScale) {
+ SkBitmap image;
+ State state(shader, matrix, surfaceBBox, rasterScale, &image);
+ return get_pdf_shader_by_state(
+ doc, dpi, std::move(state), std::move(image));
+}
+
+static sk_sp<SkPDFDict> get_gradient_resource_dict(
+ SkPDFObject* functionShader,
+ SkPDFObject* gState) {
+ SkTDArray<SkPDFObject*> patterns;
+ if (functionShader) {
+ patterns.push(functionShader);
+ }
+ SkTDArray<SkPDFObject*> graphicStates;
+ if (gState) {
+ graphicStates.push(gState);
+ }
+ return SkPDFResourceDict::Make(&graphicStates, &patterns, nullptr, nullptr);
+}
+
+static void populate_tiling_pattern_dict(SkPDFDict* pattern,
+ SkRect& bbox,
+ sk_sp<SkPDFDict> resources,
+ const SkMatrix& matrix) {
+ const int kTiling_PatternType = 1;
+ const int kColoredTilingPattern_PaintType = 1;
+ const int kConstantSpacing_TilingType = 1;
+
+ pattern->insertName("Type", "Pattern");
+ pattern->insertInt("PatternType", kTiling_PatternType);
+ pattern->insertInt("PaintType", kColoredTilingPattern_PaintType);
+ pattern->insertInt("TilingType", kConstantSpacing_TilingType);
+ pattern->insertObject("BBox", SkPDFUtils::RectToArray(bbox));
+ pattern->insertScalar("XStep", bbox.width());
+ pattern->insertScalar("YStep", bbox.height());
+ pattern->insertObject("Resources", std::move(resources));
+ if (!matrix.isIdentity()) {
+ pattern->insertObject("Matrix", SkPDFUtils::MatrixToArray(matrix));
+ }
+}
+
+/**
+ * Creates a content stream which fills the pattern P0 across bounds.
+ * @param gsIndex A graphics state resource index to apply, or <0 if no
+ * graphics state to apply.
+ */
+static std::unique_ptr<SkStreamAsset> create_pattern_fill_content(
+ int gsIndex, SkRect& bounds) {
+ SkDynamicMemoryWStream content;
+ if (gsIndex >= 0) {
+ SkPDFUtils::ApplyGraphicState(gsIndex, &content);
+ }
+ SkPDFUtils::ApplyPattern(0, &content);
+ SkPDFUtils::AppendRectangle(bounds, &content);
+ SkPDFUtils::PaintPath(SkPaint::kFill_Style, SkPath::kEvenOdd_FillType,
+ &content);
+
+ return std::unique_ptr<SkStreamAsset>(content.detachAsStream());
+}
+
+/**
+ * Creates a ExtGState with the SMask set to the luminosityShader in
+ * luminosity mode. The shader pattern extends to the bbox.
+ */
+static sk_sp<SkPDFObject> create_smask_graphic_state(
+ SkPDFDocument* doc, SkScalar dpi, const SkPDFShader::State& state) {
+ SkRect bbox;
+ bbox.set(state.fBBox);
+
+ sk_sp<SkPDFObject> luminosityShader(
+ get_pdf_shader_by_state(doc, dpi, state.MakeAlphaToLuminosityState(),
+ SkBitmap()));
+
+ std::unique_ptr<SkStreamAsset> alphaStream(create_pattern_fill_content(-1, bbox));
+
+ sk_sp<SkPDFDict> resources =
+ get_gradient_resource_dict(luminosityShader.get(), nullptr);
+
+ sk_sp<SkPDFObject> alphaMask =
+ SkPDFMakeFormXObject(std::move(alphaStream),
+ SkPDFUtils::RectToArray(bbox),
+ std::move(resources),
+ SkMatrix::I(),
+ "DeviceRGB");
+ return SkPDFGraphicState::GetSMaskGraphicState(
+ std::move(alphaMask), false,
+ SkPDFGraphicState::kLuminosity_SMaskMode, doc->canon());
+}
+
+static sk_sp<SkPDFStream> make_alpha_function_shader(SkPDFDocument* doc,
+ SkScalar dpi,
+ const SkPDFShader::State& state) {
+ SkRect bbox;
+ bbox.set(state.fBBox);
+
+ SkPDFShader::State opaqueState(state.MakeOpaqueState());
+
+ sk_sp<SkPDFObject> colorShader(
+ get_pdf_shader_by_state(doc, dpi, std::move(opaqueState), SkBitmap()));
+ if (!colorShader) {
+ return nullptr;
+ }
+
+ // Create resource dict with alpha graphics state as G0 and
+ // pattern shader as P0, then write content stream.
+ sk_sp<SkPDFObject> alphaGs = create_smask_graphic_state(doc, dpi, state);
+
+ sk_sp<SkPDFDict> resourceDict =
+ get_gradient_resource_dict(colorShader.get(), alphaGs.get());
+
+ std::unique_ptr<SkStreamAsset> colorStream(
+ create_pattern_fill_content(0, bbox));
+ auto alphaFunctionShader = sk_make_sp<SkPDFStream>(std::move(colorStream));
+
+ populate_tiling_pattern_dict(alphaFunctionShader->dict(), bbox,
+ std::move(resourceDict), SkMatrix::I());
+ return alphaFunctionShader;
+}
+
+// Finds affine and persp such that in = affine * persp.
+// but it returns the inverse of perspective matrix.
+static bool split_perspective(const SkMatrix in, SkMatrix* affine,
+ SkMatrix* perspectiveInverse) {
+ const SkScalar p2 = in[SkMatrix::kMPersp2];
+
+ if (SkScalarNearlyZero(p2)) {
+ return false;
+ }
+
+ const SkScalar zero = SkIntToScalar(0);
+ const SkScalar one = SkIntToScalar(1);
+
+ const SkScalar sx = in[SkMatrix::kMScaleX];
+ const SkScalar kx = in[SkMatrix::kMSkewX];
+ const SkScalar tx = in[SkMatrix::kMTransX];
+ const SkScalar ky = in[SkMatrix::kMSkewY];
+ const SkScalar sy = in[SkMatrix::kMScaleY];
+ const SkScalar ty = in[SkMatrix::kMTransY];
+ const SkScalar p0 = in[SkMatrix::kMPersp0];
+ const SkScalar p1 = in[SkMatrix::kMPersp1];
+
+ // Perspective matrix would be:
+ // 1 0 0
+ // 0 1 0
+ // p0 p1 p2
+ // But we need the inverse of persp.
+ perspectiveInverse->setAll(one, zero, zero,
+ zero, one, zero,
+ -p0/p2, -p1/p2, 1/p2);
+
+ affine->setAll(sx - p0 * tx / p2, kx - p1 * tx / p2, tx / p2,
+ ky - p0 * ty / p2, sy - p1 * ty / p2, ty / p2,
+ zero, zero, one);
+
+ return true;
+}
+
+sk_sp<SkPDFArray> SkPDFShader::MakeRangeObject() {
+ auto range = sk_make_sp<SkPDFArray>();
+ range->reserve(6);
+ range->appendInt(0);
+ range->appendInt(1);
+ range->appendInt(0);
+ range->appendInt(1);
+ range->appendInt(0);
+ range->appendInt(1);
+ return range;
+}
+
+static sk_sp<SkPDFStream> make_ps_function(
+ std::unique_ptr<SkStreamAsset> psCode,
+ sk_sp<SkPDFArray> domain,
+ sk_sp<SkPDFObject> range) {
+ auto result = sk_make_sp<SkPDFStream>(std::move(psCode));
+ result->dict()->insertInt("FunctionType", 4);
+ result->dict()->insertObject("Domain", std::move(domain));
+ result->dict()->insertObject("Range", std::move(range));
+ return result;
+}
+
+// catch cases where the inner just touches the outer circle
+// and make the inner circle just inside the outer one to match raster
+static void FixUpRadius(const SkPoint& p1, SkScalar& r1, const SkPoint& p2, SkScalar& r2) {
+ // detect touching circles
+ SkScalar distance = SkPoint::Distance(p1, p2);
+ SkScalar subtractRadii = fabs(r1 - r2);
+ if (fabs(distance - subtractRadii) < 0.002f) {
+ if (r1 > r2) {
+ r1 += 0.002f;
+ } else {
+ r2 += 0.002f;
+ }
+ }
+}
+
+static sk_sp<SkPDFDict> make_function_shader(SkPDFCanon* canon,
+ const SkPDFShader::State& state) {
+ void (*codeFunction)(const SkShader::GradientInfo& info,
+ const SkMatrix& perspectiveRemover,
+ SkDynamicMemoryWStream* function) = nullptr;
+ SkPoint transformPoints[2];
+ const SkShader::GradientInfo* info = &state.fInfo;
+ SkMatrix finalMatrix = state.fCanvasTransform;
+ finalMatrix.preConcat(state.fShaderTransform);
+
+ bool doStitchFunctions = (state.fType == SkShader::kLinear_GradientType ||
+ state.fType == SkShader::kRadial_GradientType ||
+ state.fType == SkShader::kConical_GradientType) &&
+ info->fTileMode == SkShader::kClamp_TileMode &&
+ !finalMatrix.hasPerspective();
+
+ auto domain = sk_make_sp<SkPDFArray>();
+
+ int32_t shadingType = 1;
+ auto pdfShader = sk_make_sp<SkPDFDict>();
+ // The two point radial gradient further references
+ // state.fInfo
+ // in translating from x, y coordinates to the t parameter. So, we have
+ // to transform the points and radii according to the calculated matrix.
+ if (doStitchFunctions) {
+ pdfShader->insertObject("Function", gradientStitchCode(*info));
+ shadingType = (state.fType == SkShader::kLinear_GradientType) ? 2 : 3;
+
+ auto extend = sk_make_sp<SkPDFArray>();
+ extend->reserve(2);
+ extend->appendBool(true);
+ extend->appendBool(true);
+ pdfShader->insertObject("Extend", std::move(extend));
+
+ auto coords = sk_make_sp<SkPDFArray>();
+ if (state.fType == SkShader::kConical_GradientType) {
+ coords->reserve(6);
+ SkScalar r1 = info->fRadius[0];
+ SkScalar r2 = info->fRadius[1];
+ SkPoint pt1 = info->fPoint[0];
+ SkPoint pt2 = info->fPoint[1];
+ FixUpRadius(pt1, r1, pt2, r2);
+
+ coords->appendScalar(pt1.fX);
+ coords->appendScalar(pt1.fY);
+ coords->appendScalar(r1);
+
+ coords->appendScalar(pt2.fX);
+ coords->appendScalar(pt2.fY);
+ coords->appendScalar(r2);
+ } else if (state.fType == SkShader::kRadial_GradientType) {
+ coords->reserve(6);
+ const SkPoint& pt1 = info->fPoint[0];
+
+ coords->appendScalar(pt1.fX);
+ coords->appendScalar(pt1.fY);
+ coords->appendScalar(0);
+
+ coords->appendScalar(pt1.fX);
+ coords->appendScalar(pt1.fY);
+ coords->appendScalar(info->fRadius[0]);
+ } else {
+ coords->reserve(4);
+ const SkPoint& pt1 = info->fPoint[0];
+ const SkPoint& pt2 = info->fPoint[1];
+
+ coords->appendScalar(pt1.fX);
+ coords->appendScalar(pt1.fY);
+
+ coords->appendScalar(pt2.fX);
+ coords->appendScalar(pt2.fY);
+ }
+
+ pdfShader->insertObject("Coords", std::move(coords));
+ } else {
+ // Depending on the type of the gradient, we want to transform the
+ // coordinate space in different ways.
+ transformPoints[0] = info->fPoint[0];
+ transformPoints[1] = info->fPoint[1];
+ switch (state.fType) {
+ case SkShader::kLinear_GradientType:
+ codeFunction = &linearCode;
+ break;
+ case SkShader::kRadial_GradientType:
+ transformPoints[1] = transformPoints[0];
+ transformPoints[1].fX += info->fRadius[0];
+ codeFunction = &radialCode;
+ break;
+ case SkShader::kConical_GradientType: {
+ transformPoints[1] = transformPoints[0];
+ transformPoints[1].fX += SK_Scalar1;
+ codeFunction = &twoPointConicalCode;
+ break;
+ }
+ case SkShader::kSweep_GradientType:
+ transformPoints[1] = transformPoints[0];
+ transformPoints[1].fX += SK_Scalar1;
+ codeFunction = &sweepCode;
+ break;
+ case SkShader::kColor_GradientType:
+ case SkShader::kNone_GradientType:
+ default:
+ return nullptr;
+ }
+
+ // Move any scaling (assuming a unit gradient) or translation
+ // (and rotation for linear gradient), of the final gradient from
+ // info->fPoints to the matrix (updating bbox appropriately). Now
+ // the gradient can be drawn on on the unit segment.
+ SkMatrix mapperMatrix;
+ unitToPointsMatrix(transformPoints, &mapperMatrix);
+
+ finalMatrix.preConcat(mapperMatrix);
+
+ // Preserves as much as posible in the final matrix, and only removes
+ // the perspective. The inverse of the perspective is stored in
+ // perspectiveInverseOnly matrix and has 3 useful numbers
+ // (p0, p1, p2), while everything else is either 0 or 1.
+ // In this way the shader will handle it eficiently, with minimal code.
+ SkMatrix perspectiveInverseOnly = SkMatrix::I();
+ if (finalMatrix.hasPerspective()) {
+ if (!split_perspective(finalMatrix,
+ &finalMatrix, &perspectiveInverseOnly)) {
+ return nullptr;
+ }
+ }
+
+ SkRect bbox;
+ bbox.set(state.fBBox);
+ if (!inverse_transform_bbox(finalMatrix, &bbox)) {
+ return nullptr;
+ }
+ domain->reserve(4);
+ domain->appendScalar(bbox.fLeft);
+ domain->appendScalar(bbox.fRight);
+ domain->appendScalar(bbox.fTop);
+ domain->appendScalar(bbox.fBottom);
+
+ SkDynamicMemoryWStream functionCode;
+
+ if (state.fType == SkShader::kConical_GradientType) {
+ SkShader::GradientInfo twoPointRadialInfo = *info;
+ SkMatrix inverseMapperMatrix;
+ if (!mapperMatrix.invert(&inverseMapperMatrix)) {
+ return nullptr;
+ }
+ inverseMapperMatrix.mapPoints(twoPointRadialInfo.fPoint, 2);
+ twoPointRadialInfo.fRadius[0] =
+ inverseMapperMatrix.mapRadius(info->fRadius[0]);
+ twoPointRadialInfo.fRadius[1] =
+ inverseMapperMatrix.mapRadius(info->fRadius[1]);
+ codeFunction(twoPointRadialInfo, perspectiveInverseOnly, &functionCode);
+ } else {
+ codeFunction(*info, perspectiveInverseOnly, &functionCode);
+ }
+
+ pdfShader->insertObject("Domain", domain);
+
+ // Call canon->makeRangeObject() instead of
+ // SkPDFShader::MakeRangeObject() so that the canon can
+ // deduplicate.
+ std::unique_ptr<SkStreamAsset> functionStream(
+ functionCode.detachAsStream());
+ sk_sp<SkPDFStream> function = make_ps_function(std::move(functionStream),
+ std::move(domain),
+ canon->makeRangeObject());
+ pdfShader->insertObjRef("Function", std::move(function));
+ }
+
+ pdfShader->insertInt("ShadingType", shadingType);
+ pdfShader->insertName("ColorSpace", "DeviceRGB");
+
+ auto pdfFunctionShader = sk_make_sp<SkPDFDict>("Pattern");
+ pdfFunctionShader->insertInt("PatternType", 2);
+ pdfFunctionShader->insertObject("Matrix",
+ SkPDFUtils::MatrixToArray(finalMatrix));
+ pdfFunctionShader->insertObject("Shading", std::move(pdfShader));
+
+ return pdfFunctionShader;
+}
+
+static sk_sp<SkPDFStream> make_image_shader(SkPDFDocument* doc,
+ SkScalar dpi,
+ const SkPDFShader::State& state,
+ SkBitmap image) {
+ SkASSERT(state.fBitmapKey ==
+ (SkBitmapKey{image.getSubset(), image.getGenerationID()}));
+ SkAutoLockPixels SkAutoLockPixels(image);
+
+ // The image shader pattern cell will be drawn into a separate device
+ // in pattern cell space (no scaling on the bitmap, though there may be
+ // translations so that all content is in the device, coordinates > 0).
+
+ // Map clip bounds to shader space to ensure the device is large enough
+ // to handle fake clamping.
+ SkMatrix finalMatrix = state.fCanvasTransform;
+ finalMatrix.preConcat(state.fShaderTransform);
+ SkRect deviceBounds;
+ deviceBounds.set(state.fBBox);
+ if (!inverse_transform_bbox(finalMatrix, &deviceBounds)) {
+ return nullptr;
+ }
+
+ SkRect bitmapBounds;
+ image.getBounds(&bitmapBounds);
+
+ // For tiling modes, the bounds should be extended to include the bitmap,
+ // otherwise the bitmap gets clipped out and the shader is empty and awful.
+ // For clamp modes, we're only interested in the clip region, whether
+ // or not the main bitmap is in it.
+ SkShader::TileMode tileModes[2];
+ tileModes[0] = state.fImageTileModes[0];
+ tileModes[1] = state.fImageTileModes[1];
+ if (tileModes[0] != SkShader::kClamp_TileMode ||
+ tileModes[1] != SkShader::kClamp_TileMode) {
+ deviceBounds.join(bitmapBounds);
+ }
+
+ SkISize size = SkISize::Make(SkScalarRoundToInt(deviceBounds.width()),
+ SkScalarRoundToInt(deviceBounds.height()));
+ sk_sp<SkPDFDevice> patternDevice(
+ SkPDFDevice::CreateUnflipped(size, dpi, doc));
+ SkCanvas canvas(patternDevice.get());
+
+ SkRect patternBBox;
+ image.getBounds(&patternBBox);
+
+ // Translate the canvas so that the bitmap origin is at (0, 0).
+ canvas.translate(-deviceBounds.left(), -deviceBounds.top());
+ patternBBox.offset(-deviceBounds.left(), -deviceBounds.top());
+ // Undo the translation in the final matrix
+ finalMatrix.preTranslate(deviceBounds.left(), deviceBounds.top());
+
+ // If the bitmap is out of bounds (i.e. clamp mode where we only see the
+ // stretched sides), canvas will clip this out and the extraneous data
+ // won't be saved to the PDF.
+ canvas.drawBitmap(image, 0, 0);
+
+ SkScalar width = SkIntToScalar(image.width());
+ SkScalar height = SkIntToScalar(image.height());
+
+ // Tiling is implied. First we handle mirroring.
+ if (tileModes[0] == SkShader::kMirror_TileMode) {
+ SkMatrix xMirror;
+ xMirror.setScale(-1, 1);
+ xMirror.postTranslate(2 * width, 0);
+ drawBitmapMatrix(&canvas, image, xMirror);
+ patternBBox.fRight += width;
+ }
+ if (tileModes[1] == SkShader::kMirror_TileMode) {
+ SkMatrix yMirror;
+ yMirror.setScale(SK_Scalar1, -SK_Scalar1);
+ yMirror.postTranslate(0, 2 * height);
+ drawBitmapMatrix(&canvas, image, yMirror);
+ patternBBox.fBottom += height;
+ }
+ if (tileModes[0] == SkShader::kMirror_TileMode &&
+ tileModes[1] == SkShader::kMirror_TileMode) {
+ SkMatrix mirror;
+ mirror.setScale(-1, -1);
+ mirror.postTranslate(2 * width, 2 * height);
+ drawBitmapMatrix(&canvas, image, mirror);
+ }
+
+ // Then handle Clamping, which requires expanding the pattern canvas to
+ // cover the entire surfaceBBox.
+
+ // If both x and y are in clamp mode, we start by filling in the corners.
+ // (Which are just a rectangles of the corner colors.)
+ if (tileModes[0] == SkShader::kClamp_TileMode &&
+ tileModes[1] == SkShader::kClamp_TileMode) {
+ SkPaint paint;
+ SkRect rect;
+ rect = SkRect::MakeLTRB(deviceBounds.left(), deviceBounds.top(), 0, 0);
+ if (!rect.isEmpty()) {
+ paint.setColor(image.getColor(0, 0));
+ canvas.drawRect(rect, paint);
+ }
+
+ rect = SkRect::MakeLTRB(width, deviceBounds.top(),
+ deviceBounds.right(), 0);
+ if (!rect.isEmpty()) {
+ paint.setColor(image.getColor(image.width() - 1, 0));
+ canvas.drawRect(rect, paint);
+ }
+
+ rect = SkRect::MakeLTRB(width, height,
+ deviceBounds.right(), deviceBounds.bottom());
+ if (!rect.isEmpty()) {
+ paint.setColor(image.getColor(image.width() - 1,
+ image.height() - 1));
+ canvas.drawRect(rect, paint);
+ }
+
+ rect = SkRect::MakeLTRB(deviceBounds.left(), height,
+ 0, deviceBounds.bottom());
+ if (!rect.isEmpty()) {
+ paint.setColor(image.getColor(0, image.height() - 1));
+ canvas.drawRect(rect, paint);
+ }
+ }
+
+ // Then expand the left, right, top, then bottom.
+ if (tileModes[0] == SkShader::kClamp_TileMode) {
+ SkIRect subset = SkIRect::MakeXYWH(0, 0, 1, image.height());
+ if (deviceBounds.left() < 0) {
+ SkBitmap left;
+ SkAssertResult(image.extractSubset(&left, subset));
+
+ SkMatrix leftMatrix;
+ leftMatrix.setScale(-deviceBounds.left(), 1);
+ leftMatrix.postTranslate(deviceBounds.left(), 0);
+ drawBitmapMatrix(&canvas, left, leftMatrix);
+
+ if (tileModes[1] == SkShader::kMirror_TileMode) {
+ leftMatrix.postScale(SK_Scalar1, -SK_Scalar1);
+ leftMatrix.postTranslate(0, 2 * height);
+ drawBitmapMatrix(&canvas, left, leftMatrix);
+ }
+ patternBBox.fLeft = 0;
+ }
+
+ if (deviceBounds.right() > width) {
+ SkBitmap right;
+ subset.offset(image.width() - 1, 0);
+ SkAssertResult(image.extractSubset(&right, subset));
+
+ SkMatrix rightMatrix;
+ rightMatrix.setScale(deviceBounds.right() - width, 1);
+ rightMatrix.postTranslate(width, 0);
+ drawBitmapMatrix(&canvas, right, rightMatrix);
+
+ if (tileModes[1] == SkShader::kMirror_TileMode) {
+ rightMatrix.postScale(SK_Scalar1, -SK_Scalar1);
+ rightMatrix.postTranslate(0, 2 * height);
+ drawBitmapMatrix(&canvas, right, rightMatrix);
+ }
+ patternBBox.fRight = deviceBounds.width();
+ }
+ }
+
+ if (tileModes[1] == SkShader::kClamp_TileMode) {
+ SkIRect subset = SkIRect::MakeXYWH(0, 0, image.width(), 1);
+ if (deviceBounds.top() < 0) {
+ SkBitmap top;
+ SkAssertResult(image.extractSubset(&top, subset));
+
+ SkMatrix topMatrix;
+ topMatrix.setScale(SK_Scalar1, -deviceBounds.top());
+ topMatrix.postTranslate(0, deviceBounds.top());
+ drawBitmapMatrix(&canvas, top, topMatrix);
+
+ if (tileModes[0] == SkShader::kMirror_TileMode) {
+ topMatrix.postScale(-1, 1);
+ topMatrix.postTranslate(2 * width, 0);
+ drawBitmapMatrix(&canvas, top, topMatrix);
+ }
+ patternBBox.fTop = 0;
+ }
+
+ if (deviceBounds.bottom() > height) {
+ SkBitmap bottom;
+ subset.offset(0, image.height() - 1);
+ SkAssertResult(image.extractSubset(&bottom, subset));
+
+ SkMatrix bottomMatrix;
+ bottomMatrix.setScale(SK_Scalar1, deviceBounds.bottom() - height);
+ bottomMatrix.postTranslate(0, height);
+ drawBitmapMatrix(&canvas, bottom, bottomMatrix);
+
+ if (tileModes[0] == SkShader::kMirror_TileMode) {
+ bottomMatrix.postScale(-1, 1);
+ bottomMatrix.postTranslate(2 * width, 0);
+ drawBitmapMatrix(&canvas, bottom, bottomMatrix);
+ }
+ patternBBox.fBottom = deviceBounds.height();
+ }
+ }
+
+ auto imageShader = sk_make_sp<SkPDFStream>(patternDevice->content());
+ populate_tiling_pattern_dict(imageShader->dict(), patternBBox,
+ patternDevice->makeResourceDict(), finalMatrix);
+ return imageShader;
+}
+
+bool SkPDFShader::State::operator==(const SkPDFShader::State& b) const {
+ if (fType != b.fType ||
+ fCanvasTransform != b.fCanvasTransform ||
+ fShaderTransform != b.fShaderTransform ||
+ fBBox != b.fBBox) {
+ return false;
+ }
+
+ if (fType == SkShader::kNone_GradientType) {
+ if (fBitmapKey != b.fBitmapKey ||
+ fBitmapKey.fID == 0 ||
+ fImageTileModes[0] != b.fImageTileModes[0] ||
+ fImageTileModes[1] != b.fImageTileModes[1]) {
+ return false;
+ }
+ } else {
+ if (fInfo.fColorCount != b.fInfo.fColorCount ||
+ memcmp(fInfo.fColors, b.fInfo.fColors,
+ sizeof(SkColor) * fInfo.fColorCount) != 0 ||
+ memcmp(fInfo.fColorOffsets, b.fInfo.fColorOffsets,
+ sizeof(SkScalar) * fInfo.fColorCount) != 0 ||
+ fInfo.fPoint[0] != b.fInfo.fPoint[0] ||
+ fInfo.fTileMode != b.fInfo.fTileMode) {
+ return false;
+ }
+
+ switch (fType) {
+ case SkShader::kLinear_GradientType:
+ if (fInfo.fPoint[1] != b.fInfo.fPoint[1]) {
+ return false;
+ }
+ break;
+ case SkShader::kRadial_GradientType:
+ if (fInfo.fRadius[0] != b.fInfo.fRadius[0]) {
+ return false;
+ }
+ break;
+ case SkShader::kConical_GradientType:
+ if (fInfo.fPoint[1] != b.fInfo.fPoint[1] ||
+ fInfo.fRadius[0] != b.fInfo.fRadius[0] ||
+ fInfo.fRadius[1] != b.fInfo.fRadius[1]) {
+ return false;
+ }
+ break;
+ case SkShader::kSweep_GradientType:
+ case SkShader::kNone_GradientType:
+ case SkShader::kColor_GradientType:
+ break;
+ }
+ }
+ return true;
+}
+
+SkPDFShader::State::State(SkShader* shader, const SkMatrix& canvasTransform,
+ const SkIRect& bbox, SkScalar rasterScale,
+ SkBitmap* imageDst)
+ : fCanvasTransform(canvasTransform),
+ fBBox(bbox) {
+ SkASSERT(imageDst);
+ fInfo.fColorCount = 0;
+ fInfo.fColors = nullptr;
+ fInfo.fColorOffsets = nullptr;
+ fImageTileModes[0] = fImageTileModes[1] = SkShader::kClamp_TileMode;
+ fType = shader->asAGradient(&fInfo);
+
+ if (fType != SkShader::kNone_GradientType) {
+ fBitmapKey = SkBitmapKey{{0, 0, 0, 0}, 0};
+ fShaderTransform = shader->getLocalMatrix();
+ this->allocateGradientInfoStorage();
+ shader->asAGradient(&fInfo);
+ return;
+ }
+ if (SkImage* skimg = shader->isAImage(&fShaderTransform, fImageTileModes)) {
+ // TODO(halcanary): delay converting to bitmap.
+ if (skimg->asLegacyBitmap(imageDst, SkImage::kRO_LegacyBitmapMode)) {
+ fBitmapKey = SkBitmapKey{imageDst->getSubset(), imageDst->getGenerationID()};
+ return;
+ }
+ }
+ fShaderTransform = shader->getLocalMatrix();
+ // Generic fallback for unsupported shaders:
+ // * allocate a bbox-sized bitmap
+ // * shade the whole area
+ // * use the result as a bitmap shader
+
+ // bbox is in device space. While that's exactly what we
+ // want for sizing our bitmap, we need to map it into
+ // shader space for adjustments (to match
+ // MakeImageShader's behavior).
+ SkRect shaderRect = SkRect::Make(bbox);
+ if (!inverse_transform_bbox(canvasTransform, &shaderRect)) {
+ imageDst->reset();
+ return;
+ }
+
+ // Clamp the bitmap size to about 1M pixels
+ static const SkScalar kMaxBitmapArea = 1024 * 1024;
+ SkScalar bitmapArea = rasterScale * bbox.width() * rasterScale * bbox.height();
+ if (bitmapArea > kMaxBitmapArea) {
+ rasterScale *= SkScalarSqrt(kMaxBitmapArea / bitmapArea);
+ }
+
+ SkISize size = SkISize::Make(SkScalarRoundToInt(rasterScale * bbox.width()),
+ SkScalarRoundToInt(rasterScale * bbox.height()));
+ SkSize scale = SkSize::Make(SkIntToScalar(size.width()) / shaderRect.width(),
+ SkIntToScalar(size.height()) / shaderRect.height());
+
+ imageDst->allocN32Pixels(size.width(), size.height());
+ imageDst->eraseColor(SK_ColorTRANSPARENT);
+
+ SkPaint p;
+ p.setShader(sk_ref_sp(shader));
+
+ SkCanvas canvas(*imageDst);
+ canvas.scale(scale.width(), scale.height());
+ canvas.translate(-shaderRect.x(), -shaderRect.y());
+ canvas.drawPaint(p);
+
+ fShaderTransform.setTranslate(shaderRect.x(), shaderRect.y());
+ fShaderTransform.preScale(1 / scale.width(), 1 / scale.height());
+ fBitmapKey = SkBitmapKey{imageDst->getSubset(), imageDst->getGenerationID()};
+}
+
+SkPDFShader::State::State(const SkPDFShader::State& other)
+ : fType(other.fType),
+ fCanvasTransform(other.fCanvasTransform),
+ fShaderTransform(other.fShaderTransform),
+ fBBox(other.fBBox)
+{
+ // Only gradients supported for now, since that is all that is used.
+ // If needed, image state copy constructor can be added here later.
+ SkASSERT(fType != SkShader::kNone_GradientType);
+
+ if (fType != SkShader::kNone_GradientType) {
+ fInfo = other.fInfo;
+
+ this->allocateGradientInfoStorage();
+ for (int i = 0; i < fInfo.fColorCount; i++) {
+ fInfo.fColors[i] = other.fInfo.fColors[i];
+ fInfo.fColorOffsets[i] = other.fInfo.fColorOffsets[i];
+ }
+ }
+}
+
+/**
+ * Create a copy of this gradient state with alpha assigned to RGB luminousity.
+ * Only valid for gradient states.
+ */
+SkPDFShader::State SkPDFShader::State::MakeAlphaToLuminosityState() const {
+ SkASSERT(fBitmapKey == (SkBitmapKey{{0, 0, 0, 0}, 0}));
+ SkASSERT(fType != SkShader::kNone_GradientType);
+
+ SkPDFShader::State newState(*this);
+
+ for (int i = 0; i < fInfo.fColorCount; i++) {
+ SkAlpha alpha = SkColorGetA(fInfo.fColors[i]);
+ newState.fInfo.fColors[i] = SkColorSetARGB(255, alpha, alpha, alpha);
+ }
+
+ return newState;
+}
+
+/**
+ * Create a copy of this gradient state with alpha set to fully opaque
+ * Only valid for gradient states.
+ */
+SkPDFShader::State SkPDFShader::State::MakeOpaqueState() const {
+ SkASSERT(fBitmapKey == (SkBitmapKey{{0, 0, 0, 0}, 0}));
+ SkASSERT(fType != SkShader::kNone_GradientType);
+
+ SkPDFShader::State newState(*this);
+ for (int i = 0; i < fInfo.fColorCount; i++) {
+ newState.fInfo.fColors[i] = SkColorSetA(fInfo.fColors[i],
+ SK_AlphaOPAQUE);
+ }
+
+ return newState;
+}
+
+/**
+ * Returns true if state is a gradient and the gradient has alpha.
+ */
+bool SkPDFShader::State::GradientHasAlpha() const {
+ if (fType == SkShader::kNone_GradientType) {
+ return false;
+ }
+
+ for (int i = 0; i < fInfo.fColorCount; i++) {
+ SkAlpha alpha = SkColorGetA(fInfo.fColors[i]);
+ if (alpha != SK_AlphaOPAQUE) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void SkPDFShader::State::allocateGradientInfoStorage() {
+ fColors.reset(new SkColor[fInfo.fColorCount]);
+ fStops.reset(new SkScalar[fInfo.fColorCount]);
+ fInfo.fColors = fColors.get();
+ fInfo.fColorOffsets = fStops.get();
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFShader.h b/gfx/skia/skia/src/pdf/SkPDFShader.h
new file mode 100644
index 000000000..db13cd50b
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFShader.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPDFShader_DEFINED
+#define SkPDFShader_DEFINED
+
+#include "SkBitmapKey.h"
+#include "SkPDFTypes.h"
+#include "SkShader.h"
+
+class SkPDFCanon;
+class SkPDFDocument;
+class SkMatrix;
+struct SkIRect;
+
+/** \class SkPDFShader
+
+ In PDF parlance, this is a pattern, used in place of a color when the
+ pattern color space is selected.
+*/
+
+class SkPDFShader {
+public:
+ /** Get the PDF shader for the passed SkShader. If the SkShader is
+ * invalid in some way, returns nullptr. The reference count of
+ * the object is incremented and it is the caller's responsibility to
+ * unreference it when done. This is needed to accommodate the weak
+ * reference pattern used when the returned object is new and has no
+ * other references.
+ * @param shader The SkShader to emulate.
+ * @param matrix The current transform. (PDF shaders are absolutely
+ * positioned, relative to where the page is drawn.)
+ * @param surfceBBox The bounding box of the drawing surface (with matrix
+ * already applied).
+ * @param rasterScale Additional scale to be applied for early
+ * rasterization.
+ */
+ static sk_sp<SkPDFObject> GetPDFShader(SkPDFDocument* doc,
+ SkScalar dpi,
+ SkShader* shader,
+ const SkMatrix& matrix,
+ const SkIRect& surfaceBBox,
+ SkScalar rasterScale);
+
+ static sk_sp<SkPDFArray> MakeRangeObject();
+
+ class State {
+ public:
+ SkShader::GradientType fType;
+ SkShader::GradientInfo fInfo;
+ std::unique_ptr<SkColor[]> fColors;
+ std::unique_ptr<SkScalar[]> fStops;
+ SkMatrix fCanvasTransform;
+ SkMatrix fShaderTransform;
+ SkIRect fBBox;
+
+ SkBitmapKey fBitmapKey;
+ SkShader::TileMode fImageTileModes[2];
+
+ State(SkShader* shader, const SkMatrix& canvasTransform,
+ const SkIRect& bbox, SkScalar rasterScale,
+ SkBitmap* dstImage);
+
+ bool operator==(const State& b) const;
+
+ State MakeAlphaToLuminosityState() const;
+ State MakeOpaqueState() const;
+
+ bool GradientHasAlpha() const;
+
+ State(State&&) = default;
+ State& operator=(State&&) = default;
+
+ private:
+ State(const State& other);
+ State& operator=(const State& rhs);
+ void allocateGradientInfoStorage();
+ };
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFTypes.cpp b/gfx/skia/skia/src/pdf/SkPDFTypes.cpp
new file mode 100644
index 000000000..7a1e0a48f
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFTypes.cpp
@@ -0,0 +1,613 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkData.h"
+#include "SkDeflate.h"
+#include "SkMakeUnique.h"
+#include "SkPDFTypes.h"
+#include "SkPDFUtils.h"
+#include "SkStream.h"
+#include "SkStreamPriv.h"
+
+////////////////////////////////////////////////////////////////////////////////
+
+SkString* pun(char* x) { return reinterpret_cast<SkString*>(x); }
+const SkString* pun(const char* x) {
+ return reinterpret_cast<const SkString*>(x);
+}
+
+SkPDFUnion::SkPDFUnion(Type t) : fType(t) {}
+
+SkPDFUnion::~SkPDFUnion() {
+ switch (fType) {
+ case Type::kNameSkS:
+ case Type::kStringSkS:
+ pun(fSkString)->~SkString();
+ return;
+ case Type::kObjRef:
+ case Type::kObject:
+ SkASSERT(fObject);
+ fObject->unref();
+ return;
+ default:
+ return;
+ }
+}
+
+SkPDFUnion& SkPDFUnion::operator=(SkPDFUnion&& other) {
+ if (this != &other) {
+ this->~SkPDFUnion();
+ new (this) SkPDFUnion(std::move(other));
+ }
+ return *this;
+}
+
+SkPDFUnion::SkPDFUnion(SkPDFUnion&& other) {
+ SkASSERT(this != &other);
+ memcpy(this, &other, sizeof(*this));
+ other.fType = Type::kDestroyed;
+}
+
+#if 0
+SkPDFUnion SkPDFUnion::copy() const {
+ SkPDFUnion u(fType);
+ memcpy(&u, this, sizeof(u));
+ switch (fType) {
+ case Type::kNameSkS:
+ case Type::kStringSkS:
+ new (pun(u.fSkString)) SkString(*pun(fSkString));
+ return u;
+ case Type::kObjRef:
+ case Type::kObject:
+ SkRef(u.fObject);
+ return u;
+ default:
+ return u;
+ }
+}
+SkPDFUnion& SkPDFUnion::operator=(const SkPDFUnion& other) {
+ return *this = other.copy();
+}
+SkPDFUnion::SkPDFUnion(const SkPDFUnion& other) {
+ *this = other.copy();
+}
+#endif
+
+bool SkPDFUnion::isName() const {
+ return Type::kName == fType || Type::kNameSkS == fType;
+}
+
+#ifdef SK_DEBUG
+// Most names need no escaping. Such names are handled as static
+// const strings.
+bool is_valid_name(const char* n) {
+ static const char kControlChars[] = "/%()<>[]{}";
+ while (*n) {
+ if (*n < '!' || *n > '~' || strchr(kControlChars, *n)) {
+ return false;
+ }
+ ++n;
+ }
+ return true;
+}
+#endif // SK_DEBUG
+
+// Given an arbitrary string, write it as a valid name (not including
+// leading slash).
+static void write_name_escaped(SkWStream* o, const char* name) {
+ static const char kToEscape[] = "#/%()<>[]{}";
+ static const char kHex[] = "0123456789ABCDEF";
+ for (const uint8_t* n = reinterpret_cast<const uint8_t*>(name); *n; ++n) {
+ if (*n < '!' || *n > '~' || strchr(kToEscape, *n)) {
+ char buffer[3] = {'#', '\0', '\0'};
+ buffer[1] = kHex[(*n >> 4) & 0xF];
+ buffer[2] = kHex[*n & 0xF];
+ o->write(buffer, sizeof(buffer));
+ } else {
+ o->write(n, 1);
+ }
+ }
+}
+
+void SkPDFUnion::emitObject(SkWStream* stream,
+ const SkPDFObjNumMap& objNumMap) const {
+ switch (fType) {
+ case Type::kInt:
+ stream->writeDecAsText(fIntValue);
+ return;
+ case Type::kColorComponent:
+ SkPDFUtils::AppendColorComponent(SkToU8(fIntValue), stream);
+ return;
+ case Type::kBool:
+ stream->writeText(fBoolValue ? "true" : "false");
+ return;
+ case Type::kScalar:
+ SkPDFUtils::AppendScalar(fScalarValue, stream);
+ return;
+ case Type::kName:
+ stream->writeText("/");
+ SkASSERT(is_valid_name(fStaticString));
+ stream->writeText(fStaticString);
+ return;
+ case Type::kString:
+ SkASSERT(fStaticString);
+ SkPDFUtils::WriteString(stream, fStaticString,
+ strlen(fStaticString));
+ return;
+ case Type::kNameSkS:
+ stream->writeText("/");
+ write_name_escaped(stream, pun(fSkString)->c_str());
+ return;
+ case Type::kStringSkS:
+ SkPDFUtils::WriteString(stream, pun(fSkString)->c_str(),
+ pun(fSkString)->size());
+ return;
+ case Type::kObjRef:
+ stream->writeDecAsText(objNumMap.getObjectNumber(fObject));
+ stream->writeText(" 0 R"); // Generation number is always 0.
+ return;
+ case Type::kObject:
+ fObject->emitObject(stream, objNumMap);
+ return;
+ default:
+ SkDEBUGFAIL("SkPDFUnion::emitObject with bad type");
+ }
+}
+
+void SkPDFUnion::addResources(SkPDFObjNumMap* objNumMap) const {
+ switch (fType) {
+ case Type::kInt:
+ case Type::kColorComponent:
+ case Type::kBool:
+ case Type::kScalar:
+ case Type::kName:
+ case Type::kString:
+ case Type::kNameSkS:
+ case Type::kStringSkS:
+ return; // These have no resources.
+ case Type::kObjRef:
+ objNumMap->addObjectRecursively(fObject);
+ return;
+ case Type::kObject:
+ fObject->addResources(objNumMap);
+ return;
+ default:
+ SkDEBUGFAIL("SkPDFUnion::addResources with bad type");
+ }
+}
+
+SkPDFUnion SkPDFUnion::Int(int32_t value) {
+ SkPDFUnion u(Type::kInt);
+ u.fIntValue = value;
+ return u;
+}
+
+SkPDFUnion SkPDFUnion::ColorComponent(uint8_t value) {
+ SkPDFUnion u(Type::kColorComponent);
+ u.fIntValue = value;
+ return u;
+}
+
+SkPDFUnion SkPDFUnion::Bool(bool value) {
+ SkPDFUnion u(Type::kBool);
+ u.fBoolValue = value;
+ return u;
+}
+
+SkPDFUnion SkPDFUnion::Scalar(SkScalar value) {
+ SkPDFUnion u(Type::kScalar);
+ u.fScalarValue = value;
+ return u;
+}
+
+SkPDFUnion SkPDFUnion::Name(const char* value) {
+ SkPDFUnion u(Type::kName);
+ SkASSERT(value);
+ SkASSERT(is_valid_name(value));
+ u.fStaticString = value;
+ return u;
+}
+
+SkPDFUnion SkPDFUnion::String(const char* value) {
+ SkPDFUnion u(Type::kString);
+ SkASSERT(value);
+ u.fStaticString = value;
+ return u;
+}
+
+SkPDFUnion SkPDFUnion::Name(const SkString& s) {
+ SkPDFUnion u(Type::kNameSkS);
+ new (pun(u.fSkString)) SkString(s);
+ return u;
+}
+
+SkPDFUnion SkPDFUnion::String(const SkString& s) {
+ SkPDFUnion u(Type::kStringSkS);
+ new (pun(u.fSkString)) SkString(s);
+ return u;
+}
+
+SkPDFUnion SkPDFUnion::ObjRef(sk_sp<SkPDFObject> objSp) {
+ SkPDFUnion u(Type::kObjRef);
+ SkASSERT(objSp.get());
+ u.fObject = objSp.release(); // take ownership into union{}
+ return u;
+}
+
+SkPDFUnion SkPDFUnion::Object(sk_sp<SkPDFObject> objSp) {
+ SkPDFUnion u(Type::kObject);
+ SkASSERT(objSp.get());
+ u.fObject = objSp.release(); // take ownership into union{}
+ return u;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+#if 0 // Enable if needed.
+void SkPDFAtom::emitObject(SkWStream* stream,
+ const SkPDFObjNumMap& objNumMap) const {
+ fValue.emitObject(stream, objNumMap);
+}
+void SkPDFAtom::addResources(SkPDFObjNumMap* map) const {
+ fValue.addResources(map);
+}
+#endif // 0
+
+////////////////////////////////////////////////////////////////////////////////
+
+SkPDFArray::SkPDFArray() { SkDEBUGCODE(fDumped = false;) }
+
+SkPDFArray::~SkPDFArray() { this->drop(); }
+
+void SkPDFArray::drop() {
+ fValues.reset();
+ SkDEBUGCODE(fDumped = true;)
+}
+
+int SkPDFArray::size() const { return fValues.count(); }
+
+void SkPDFArray::reserve(int length) {
+ // TODO(halcanary): implement SkTArray<T>::reserve() or change the
+ // contstructor of SkPDFArray to take reserve size.
+}
+
+void SkPDFArray::emitObject(SkWStream* stream,
+ const SkPDFObjNumMap& objNumMap) const {
+ SkASSERT(!fDumped);
+ stream->writeText("[");
+ for (int i = 0; i < fValues.count(); i++) {
+ fValues[i].emitObject(stream, objNumMap);
+ if (i + 1 < fValues.count()) {
+ stream->writeText(" ");
+ }
+ }
+ stream->writeText("]");
+}
+
+void SkPDFArray::addResources(SkPDFObjNumMap* catalog) const {
+ SkASSERT(!fDumped);
+ for (const SkPDFUnion& value : fValues) {
+ value.addResources(catalog);
+ }
+}
+
+void SkPDFArray::append(SkPDFUnion&& value) {
+ fValues.emplace_back(std::move(value));
+}
+
+void SkPDFArray::appendInt(int32_t value) {
+ this->append(SkPDFUnion::Int(value));
+}
+
+void SkPDFArray::appendColorComponent(uint8_t value) {
+ this->append(SkPDFUnion::ColorComponent(value));
+}
+
+void SkPDFArray::appendBool(bool value) {
+ this->append(SkPDFUnion::Bool(value));
+}
+
+void SkPDFArray::appendScalar(SkScalar value) {
+ this->append(SkPDFUnion::Scalar(value));
+}
+
+void SkPDFArray::appendName(const char name[]) {
+ this->append(SkPDFUnion::Name(SkString(name)));
+}
+
+void SkPDFArray::appendName(const SkString& name) {
+ this->append(SkPDFUnion::Name(name));
+}
+
+void SkPDFArray::appendString(const SkString& value) {
+ this->append(SkPDFUnion::String(value));
+}
+
+void SkPDFArray::appendString(const char value[]) {
+ this->append(SkPDFUnion::String(value));
+}
+
+void SkPDFArray::appendObject(sk_sp<SkPDFObject> objSp) {
+ this->append(SkPDFUnion::Object(std::move(objSp)));
+}
+
+void SkPDFArray::appendObjRef(sk_sp<SkPDFObject> objSp) {
+ this->append(SkPDFUnion::ObjRef(std::move(objSp)));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPDFDict::~SkPDFDict() { this->drop(); }
+
+void SkPDFDict::drop() {
+ fRecords.reset();
+ SkDEBUGCODE(fDumped = true;)
+}
+
+SkPDFDict::SkPDFDict(const char type[]) {
+ SkDEBUGCODE(fDumped = false;)
+ if (type) {
+ this->insertName("Type", type);
+ }
+}
+
+void SkPDFDict::emitObject(SkWStream* stream,
+ const SkPDFObjNumMap& objNumMap) const {
+ stream->writeText("<<");
+ this->emitAll(stream, objNumMap);
+ stream->writeText(">>");
+}
+
+void SkPDFDict::emitAll(SkWStream* stream,
+ const SkPDFObjNumMap& objNumMap) const {
+ SkASSERT(!fDumped);
+ for (int i = 0; i < fRecords.count(); i++) {
+ fRecords[i].fKey.emitObject(stream, objNumMap);
+ stream->writeText(" ");
+ fRecords[i].fValue.emitObject(stream, objNumMap);
+ if (i + 1 < fRecords.count()) {
+ stream->writeText("\n");
+ }
+ }
+}
+
+void SkPDFDict::addResources(SkPDFObjNumMap* catalog) const {
+ SkASSERT(!fDumped);
+ for (int i = 0; i < fRecords.count(); i++) {
+ fRecords[i].fKey.addResources(catalog);
+ fRecords[i].fValue.addResources(catalog);
+ }
+}
+
+SkPDFDict::Record::Record(SkPDFUnion&& k, SkPDFUnion&& v)
+ : fKey(std::move(k)), fValue(std::move(v)) {}
+
+int SkPDFDict::size() const { return fRecords.count(); }
+
+void SkPDFDict::insertObjRef(const char key[], sk_sp<SkPDFObject> objSp) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::ObjRef(std::move(objSp)));
+}
+
+void SkPDFDict::insertObjRef(const SkString& key, sk_sp<SkPDFObject> objSp) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::ObjRef(std::move(objSp)));
+}
+
+void SkPDFDict::insertObject(const char key[], sk_sp<SkPDFObject> objSp) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Object(std::move(objSp)));
+}
+void SkPDFDict::insertObject(const SkString& key, sk_sp<SkPDFObject> objSp) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Object(std::move(objSp)));
+}
+
+void SkPDFDict::insertBool(const char key[], bool value) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Bool(value));
+}
+
+void SkPDFDict::insertInt(const char key[], int32_t value) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Int(value));
+}
+
+void SkPDFDict::insertInt(const char key[], size_t value) {
+ this->insertInt(key, SkToS32(value));
+}
+
+void SkPDFDict::insertScalar(const char key[], SkScalar value) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Scalar(value));
+}
+
+void SkPDFDict::insertName(const char key[], const char name[]) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Name(name));
+}
+
+void SkPDFDict::insertName(const char key[], const SkString& name) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Name(name));
+}
+
+void SkPDFDict::insertString(const char key[], const char value[]) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::String(value));
+}
+
+void SkPDFDict::insertString(const char key[], const SkString& value) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::String(value));
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+SkPDFSharedStream::SkPDFSharedStream(std::unique_ptr<SkStreamAsset> data)
+ : fAsset(std::move(data)) {
+ SkASSERT(fAsset);
+}
+
+SkPDFSharedStream::~SkPDFSharedStream() { this->drop(); }
+
+void SkPDFSharedStream::drop() {
+ fAsset = nullptr;;
+ fDict.drop();
+}
+
+#ifdef SK_PDF_LESS_COMPRESSION
+void SkPDFSharedStream::emitObject(
+ SkWStream* stream,
+ const SkPDFObjNumMap& objNumMap) const {
+ SkASSERT(fAsset);
+ std::unique_ptr<SkStreamAsset> dup(fAsset->duplicate());
+ SkASSERT(dup && dup->hasLength());
+ size_t length = dup->getLength();
+ stream->writeText("<<");
+ fDict.emitAll(stream, objNumMap);
+ stream->writeText("\n");
+ SkPDFUnion::Name("Length").emitObject(stream, objNumMap);
+ stream->writeText(" ");
+ SkPDFUnion::Int(length).emitObject(stream, objNumMap);
+ stream->writeText("\n>>stream\n");
+ SkStreamCopy(stream, dup.get());
+ stream->writeText("\nendstream");
+}
+#else
+void SkPDFSharedStream::emitObject(
+ SkWStream* stream,
+ const SkPDFObjNumMap& objNumMap) const {
+ SkASSERT(fAsset);
+ SkDynamicMemoryWStream buffer;
+ SkDeflateWStream deflateWStream(&buffer);
+ // Since emitObject is const, this function doesn't change the dictionary.
+ std::unique_ptr<SkStreamAsset> dup(fAsset->duplicate()); // Cheap copy
+ SkASSERT(dup);
+ SkStreamCopy(&deflateWStream, dup.get());
+ deflateWStream.finalize();
+ size_t length = buffer.bytesWritten();
+ stream->writeText("<<");
+ fDict.emitAll(stream, objNumMap);
+ stream->writeText("\n");
+ SkPDFUnion::Name("Length").emitObject(stream, objNumMap);
+ stream->writeText(" ");
+ SkPDFUnion::Int(length).emitObject(stream, objNumMap);
+ stream->writeText("\n");
+ SkPDFUnion::Name("Filter").emitObject(stream, objNumMap);
+ stream->writeText(" ");
+ SkPDFUnion::Name("FlateDecode").emitObject(stream, objNumMap);
+ stream->writeText(">>");
+ stream->writeText(" stream\n");
+ buffer.writeToStream(stream);
+ stream->writeText("\nendstream");
+}
+#endif
+
+void SkPDFSharedStream::addResources(
+ SkPDFObjNumMap* catalog) const {
+ SkASSERT(fAsset);
+ fDict.addResources(catalog);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+
+SkPDFStream:: SkPDFStream(sk_sp<SkData> data) {
+ this->setData(skstd::make_unique<SkMemoryStream>(std::move(data)));
+}
+
+SkPDFStream::SkPDFStream(std::unique_ptr<SkStreamAsset> stream) {
+ this->setData(std::move(stream));
+}
+
+SkPDFStream::SkPDFStream() {}
+
+SkPDFStream::~SkPDFStream() {}
+
+void SkPDFStream::addResources(SkPDFObjNumMap* catalog) const {
+ SkASSERT(fCompressedData);
+ fDict.addResources(catalog);
+}
+
+void SkPDFStream::drop() {
+ fCompressedData.reset(nullptr);
+ fDict.drop();
+}
+
+void SkPDFStream::emitObject(SkWStream* stream,
+ const SkPDFObjNumMap& objNumMap) const {
+ SkASSERT(fCompressedData);
+ fDict.emitObject(stream, objNumMap);
+ // duplicate (a cheap operation) preserves const on fCompressedData.
+ std::unique_ptr<SkStreamAsset> dup(fCompressedData->duplicate());
+ SkASSERT(dup);
+ SkASSERT(dup->hasLength());
+ stream->writeText(" stream\n");
+ stream->writeStream(dup.get(), dup->getLength());
+ stream->writeText("\nendstream");
+}
+
+void SkPDFStream::setData(std::unique_ptr<SkStreamAsset> stream) {
+ SkASSERT(!fCompressedData); // Only call this function once.
+ SkASSERT(stream);
+ // Code assumes that the stream starts at the beginning.
+
+ #ifdef SK_PDF_LESS_COMPRESSION
+ fCompressedData = std::move(stream);
+ SkASSERT(fCompressedData && fCompressedData->hasLength());
+ fDict.insertInt("Length", fCompressedData->getLength());
+ #else
+
+ SkASSERT(stream->hasLength());
+ SkDynamicMemoryWStream compressedData;
+ SkDeflateWStream deflateWStream(&compressedData);
+ if (stream->getLength() > 0) {
+ SkStreamCopy(&deflateWStream, stream.get());
+ }
+ deflateWStream.finalize();
+ size_t compressedLength = compressedData.bytesWritten();
+ size_t originalLength = stream->getLength();
+
+ if (originalLength <= compressedLength + strlen("/Filter_/FlateDecode_")) {
+ SkAssertResult(stream->rewind());
+ fCompressedData = std::move(stream);
+ fDict.insertInt("Length", originalLength);
+ return;
+ }
+ fCompressedData.reset(compressedData.detachAsStream());
+ fDict.insertName("Filter", "FlateDecode");
+ fDict.insertInt("Length", compressedLength);
+ #endif
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool SkPDFObjNumMap::addObject(SkPDFObject* obj) {
+ if (fObjectNumbers.find(obj)) {
+ return false;
+ }
+ fObjectNumbers.set(obj, fObjectNumbers.count() + 1);
+ fObjects.emplace_back(sk_ref_sp(obj));
+ return true;
+}
+
+void SkPDFObjNumMap::addObjectRecursively(SkPDFObject* obj) {
+ if (obj && this->addObject(obj)) {
+ obj->addResources(this);
+ }
+}
+
+int32_t SkPDFObjNumMap::getObjectNumber(SkPDFObject* obj) const {
+ int32_t* objectNumberFound = fObjectNumbers.find(obj);
+ SkASSERT(objectNumberFound);
+ return *objectNumberFound;
+}
+
+#ifdef SK_PDF_IMAGE_STATS
+SkAtomic<int> gDrawImageCalls(0);
+SkAtomic<int> gJpegImageObjects(0);
+SkAtomic<int> gRegularImageObjects(0);
+
+void SkPDFImageDumpStats() {
+ SkDebugf("\ntotal PDF drawImage/drawBitmap calls: %d\n"
+ "total PDF jpeg images: %d\n"
+ "total PDF regular images: %d\n",
+ gDrawImageCalls.load(),
+ gJpegImageObjects.load(),
+ gRegularImageObjects.load());
+}
+#endif // SK_PDF_IMAGE_STATS
diff --git a/gfx/skia/skia/src/pdf/SkPDFTypes.h b/gfx/skia/skia/src/pdf/SkPDFTypes.h
new file mode 100644
index 000000000..201d62b2e
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFTypes.h
@@ -0,0 +1,396 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPDFTypes_DEFINED
+#define SkPDFTypes_DEFINED
+
+#include "SkRefCnt.h"
+#include "SkScalar.h"
+#include "SkTHash.h"
+#include "SkTypes.h"
+
+class SkData;
+class SkPDFObjNumMap;
+class SkPDFObject;
+class SkStreamAsset;
+class SkString;
+class SkWStream;
+
+#ifdef SK_PDF_IMAGE_STATS
+#include "SkAtomics.h"
+#endif
+
+/** \class SkPDFObject
+
+ A PDF Object is the base class for primitive elements in a PDF file. A
+ common subtype is used to ease the use of indirect object references,
+ which are common in the PDF format.
+
+*/
+class SkPDFObject : public SkRefCnt {
+public:
+ /** Subclasses must implement this method to print the object to the
+ * PDF file.
+ * @param catalog The object catalog to use.
+ * @param stream The writable output stream to send the output to.
+ */
+ virtual void emitObject(SkWStream* stream,
+ const SkPDFObjNumMap& objNumMap) const = 0;
+
+ /**
+ * Adds all transitive dependencies of this object to the
+ * catalog. Implementations should respect the catalog's object
+ * substitution map.
+ */
+ virtual void addResources(SkPDFObjNumMap* catalog) const {}
+
+ /**
+ * Release all resources associated with this SkPDFObject. It is
+ * an error to call emitObject() or addResources() after calling
+ * drop().
+ */
+ virtual void drop() {}
+
+ virtual ~SkPDFObject() {}
+private:
+ typedef SkRefCnt INHERITED;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ A SkPDFUnion is a non-virtualized implementation of the
+ non-compound, non-specialized PDF Object types: Name, String,
+ Number, Boolean.
+ */
+class SkPDFUnion {
+public:
+ // Move contstructor and assignemnt operator destroy the argument
+ // and steal their references (if needed).
+ SkPDFUnion(SkPDFUnion&& other);
+ SkPDFUnion& operator=(SkPDFUnion&& other);
+
+ ~SkPDFUnion();
+
+ /** The following nine functions are the standard way of creating
+ SkPDFUnion objects. */
+
+ static SkPDFUnion Int(int32_t);
+
+ static SkPDFUnion Int(size_t v) { return SkPDFUnion::Int(SkToS32(v)); }
+
+ static SkPDFUnion Bool(bool);
+
+ static SkPDFUnion Scalar(SkScalar);
+
+ static SkPDFUnion ColorComponent(uint8_t);
+
+ /** These two functions do NOT take ownership of char*, and do NOT
+ copy the string. Suitable for passing in static const
+ strings. For example:
+ SkPDFUnion n = SkPDFUnion::Name("Length");
+ SkPDFUnion u = SkPDFUnion::String("Identity"); */
+
+ /** SkPDFUnion::Name(const char*) assumes that the passed string
+ is already a valid name (that is: it has no control or
+ whitespace characters). This will not copy the name. */
+ static SkPDFUnion Name(const char*);
+
+ /** SkPDFUnion::String will encode the passed string. This will
+ not copy the name. */
+ static SkPDFUnion String(const char*);
+
+ /** SkPDFUnion::Name(const SkString&) does not assume that the
+ passed string is already a valid name and it will escape the
+ string. */
+ static SkPDFUnion Name(const SkString&);
+
+ /** SkPDFUnion::String will encode the passed string. */
+ static SkPDFUnion String(const SkString&);
+
+ static SkPDFUnion Object(sk_sp<SkPDFObject>);
+ static SkPDFUnion ObjRef(sk_sp<SkPDFObject>);
+
+ /** These two non-virtual methods mirror SkPDFObject's
+ corresponding virtuals. */
+ void emitObject(SkWStream*, const SkPDFObjNumMap&) const;
+ void addResources(SkPDFObjNumMap*) const;
+
+ bool isName() const;
+
+private:
+ union {
+ int32_t fIntValue;
+ bool fBoolValue;
+ SkScalar fScalarValue;
+ const char* fStaticString;
+ char fSkString[sizeof(SkString)];
+ SkPDFObject* fObject;
+ };
+ enum class Type : char {
+ /** It is an error to call emitObject() or addResources() on an
+ kDestroyed object. */
+ kDestroyed = 0,
+ kInt,
+ kColorComponent,
+ kBool,
+ kScalar,
+ kName,
+ kString,
+ kNameSkS,
+ kStringSkS,
+ kObjRef,
+ kObject,
+ };
+ Type fType;
+
+ SkPDFUnion(Type);
+ // We do not now need copy constructor and copy assignment, so we
+ // will disable this functionality.
+ SkPDFUnion& operator=(const SkPDFUnion&) = delete;
+ SkPDFUnion(const SkPDFUnion&) = delete;
+};
+static_assert(sizeof(SkString) == sizeof(void*), "SkString_size");
+
+////////////////////////////////////////////////////////////////////////////////
+
+#if 0 // Enable if needed.
+/** This class is a SkPDFUnion with SkPDFObject virtuals attached.
+ The only use case of this is when a non-compound PDF object is
+ referenced indirectly. */
+class SkPDFAtom final : public SkPDFObject {
+public:
+ void emitObject(SkWStream* stream,
+ const SkPDFObjNumMap& objNumMap) final;
+ void addResources(SkPDFObjNumMap* const final;
+ SkPDFAtom(SkPDFUnion&& v) : fValue(std::move(v) {}
+
+private:
+ const SkPDFUnion fValue;
+ typedef SkPDFObject INHERITED;
+};
+#endif // 0
+
+////////////////////////////////////////////////////////////////////////////////
+
+/** \class SkPDFArray
+
+ An array object in a PDF.
+*/
+class SkPDFArray final : public SkPDFObject {
+public:
+ /** Create a PDF array. Maximum length is 8191.
+ */
+ SkPDFArray();
+ virtual ~SkPDFArray();
+
+ // The SkPDFObject interface.
+ void emitObject(SkWStream* stream,
+ const SkPDFObjNumMap& objNumMap) const override;
+ void addResources(SkPDFObjNumMap*) const override;
+ void drop() override;
+
+ /** The size of the array.
+ */
+ int size() const;
+
+ /** Preallocate space for the given number of entries.
+ * @param length The number of array slots to preallocate.
+ */
+ void reserve(int length);
+
+ /** Appends a value to the end of the array.
+ * @param value The value to add to the array.
+ */
+ void appendInt(int32_t);
+ void appendColorComponent(uint8_t);
+ void appendBool(bool);
+ void appendScalar(SkScalar);
+ void appendName(const char[]);
+ void appendName(const SkString&);
+ void appendString(const char[]);
+ void appendString(const SkString&);
+ void appendObject(sk_sp<SkPDFObject>);
+ void appendObjRef(sk_sp<SkPDFObject>);
+
+private:
+ SkTArray<SkPDFUnion> fValues;
+ void append(SkPDFUnion&& value);
+ SkDEBUGCODE(bool fDumped;)
+};
+
+/** \class SkPDFDict
+
+ A dictionary object in a PDF.
+*/
+class SkPDFDict : public SkPDFObject {
+public:
+ /** Create a PDF dictionary.
+ * @param type The value of the Type entry, nullptr for no type.
+ */
+ explicit SkPDFDict(const char type[] = nullptr);
+
+ virtual ~SkPDFDict();
+
+ // The SkPDFObject interface.
+ void emitObject(SkWStream* stream,
+ const SkPDFObjNumMap& objNumMap) const override;
+ void addResources(SkPDFObjNumMap*) const override;
+ void drop() override;
+
+ /** The size of the dictionary.
+ */
+ int size() const;
+
+ /** Add the value to the dictionary with the given key.
+ * @param key The text of the key for this dictionary entry.
+ * @param value The value for this dictionary entry.
+ */
+ void insertObject(const char key[], sk_sp<SkPDFObject>);
+ void insertObject(const SkString& key, sk_sp<SkPDFObject>);
+ void insertObjRef(const char key[], sk_sp<SkPDFObject>);
+ void insertObjRef(const SkString& key, sk_sp<SkPDFObject>);
+
+ /** Add the value to the dictionary with the given key.
+ * @param key The text of the key for this dictionary entry.
+ * @param value The value for this dictionary entry.
+ */
+ void insertBool(const char key[], bool value);
+ void insertInt(const char key[], int32_t value);
+ void insertInt(const char key[], size_t value);
+ void insertScalar(const char key[], SkScalar value);
+ void insertName(const char key[], const char nameValue[]);
+ void insertName(const char key[], const SkString& nameValue);
+ void insertString(const char key[], const char value[]);
+ void insertString(const char key[], const SkString& value);
+
+ /** Emit the dictionary, without the "<<" and ">>".
+ */
+ void emitAll(SkWStream* stream,
+ const SkPDFObjNumMap& objNumMap) const;
+
+private:
+ struct Record {
+ SkPDFUnion fKey;
+ SkPDFUnion fValue;
+ Record(SkPDFUnion&&, SkPDFUnion&&);
+ Record(Record&&) = default;
+ Record& operator=(Record&&) = default;
+ Record(const Record&) = delete;
+ Record& operator=(const Record&) = delete;
+ };
+ SkTArray<Record> fRecords;
+ SkDEBUGCODE(bool fDumped;)
+};
+
+/** \class SkPDFSharedStream
+
+ This class takes an asset and assumes that it is backed by
+ long-lived shared data (for example, an open file
+ descriptor). That is: no memory savings can be made by holding on
+ to a compressed version instead.
+ */
+class SkPDFSharedStream final : public SkPDFObject {
+public:
+ SkPDFSharedStream(std::unique_ptr<SkStreamAsset> data);
+ ~SkPDFSharedStream();
+ SkPDFDict* dict() { return &fDict; }
+ void emitObject(SkWStream*,
+ const SkPDFObjNumMap&) const override;
+ void addResources(SkPDFObjNumMap*) const override;
+ void drop() override;
+
+private:
+ std::unique_ptr<SkStreamAsset> fAsset;
+ SkPDFDict fDict;
+ typedef SkPDFObject INHERITED;
+};
+
+/** \class SkPDFStream
+
+ This class takes an asset and assumes that it is the only owner of
+ the asset's data. It immediately compresses the asset to save
+ memory.
+ */
+
+class SkPDFStream final : public SkPDFObject {
+
+public:
+ /** Create a PDF stream. A Length entry is automatically added to the
+ * stream dictionary.
+ * @param data The data part of the stream.
+ * @param stream The data part of the stream. */
+ explicit SkPDFStream(sk_sp<SkData> data);
+ explicit SkPDFStream(std::unique_ptr<SkStreamAsset> stream);
+ virtual ~SkPDFStream();
+
+ SkPDFDict* dict() { return &fDict; }
+
+ // The SkPDFObject interface.
+ void emitObject(SkWStream* stream,
+ const SkPDFObjNumMap& objNumMap) const override;
+ void addResources(SkPDFObjNumMap*) const final;
+ void drop() override;
+
+protected:
+ /* Create a PDF stream with no data. The setData method must be called to
+ * set the data. */
+ SkPDFStream();
+
+ /** Only call this function once. */
+ void setData(std::unique_ptr<SkStreamAsset> stream);
+
+private:
+ std::unique_ptr<SkStreamAsset> fCompressedData;
+ SkPDFDict fDict;
+
+ typedef SkPDFDict INHERITED;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+/** \class SkPDFObjNumMap
+
+ The PDF Object Number Map manages object numbers. It is used to
+ create the PDF cross reference table.
+*/
+class SkPDFObjNumMap : SkNoncopyable {
+public:
+ /** Add the passed object to the catalog.
+ * @param obj The object to add.
+ * @return True iff the object was not already added to the catalog.
+ */
+ bool addObject(SkPDFObject* obj);
+
+ /** Add the passed object to the catalog, as well as all its dependencies.
+ * @param obj The object to add. If nullptr, this is a noop.
+ */
+ void addObjectRecursively(SkPDFObject* obj);
+
+ /** Get the object number for the passed object.
+ * @param obj The object of interest.
+ */
+ int32_t getObjectNumber(SkPDFObject* obj) const;
+
+ const SkTArray<sk_sp<SkPDFObject>>& objects() const { return fObjects; }
+
+private:
+ SkTArray<sk_sp<SkPDFObject>> fObjects;
+ SkTHashMap<SkPDFObject*, int32_t> fObjectNumbers;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_PDF_IMAGE_STATS
+extern SkAtomic<int> gDrawImageCalls;
+extern SkAtomic<int> gJpegImageObjects;
+extern SkAtomic<int> gRegularImageObjects;
+extern void SkPDFImageDumpStats();
+#endif // SK_PDF_IMAGE_STATS
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFUtils.cpp b/gfx/skia/skia/src/pdf/SkPDFUtils.cpp
new file mode 100644
index 000000000..0fe6fb59a
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFUtils.cpp
@@ -0,0 +1,481 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkData.h"
+#include "SkFixed.h"
+#include "SkGeometry.h"
+#include "SkPDFResourceDict.h"
+#include "SkPDFUtils.h"
+#include "SkStream.h"
+#include "SkString.h"
+#include "SkPDFTypes.h"
+
+#include <cmath>
+
+sk_sp<SkPDFArray> SkPDFUtils::RectToArray(const SkRect& rect) {
+ auto result = sk_make_sp<SkPDFArray>();
+ result->reserve(4);
+ result->appendScalar(rect.fLeft);
+ result->appendScalar(rect.fTop);
+ result->appendScalar(rect.fRight);
+ result->appendScalar(rect.fBottom);
+ return result;
+}
+
+sk_sp<SkPDFArray> SkPDFUtils::MatrixToArray(const SkMatrix& matrix) {
+ SkScalar values[6];
+ if (!matrix.asAffine(values)) {
+ SkMatrix::SetAffineIdentity(values);
+ }
+
+ auto result = sk_make_sp<SkPDFArray>();
+ result->reserve(6);
+ for (size_t i = 0; i < SK_ARRAY_COUNT(values); i++) {
+ result->appendScalar(values[i]);
+ }
+ return result;
+}
+
+// static
+void SkPDFUtils::AppendTransform(const SkMatrix& matrix, SkWStream* content) {
+ SkScalar values[6];
+ if (!matrix.asAffine(values)) {
+ SkMatrix::SetAffineIdentity(values);
+ }
+ for (size_t i = 0; i < SK_ARRAY_COUNT(values); i++) {
+ SkPDFUtils::AppendScalar(values[i], content);
+ content->writeText(" ");
+ }
+ content->writeText("cm\n");
+}
+
+// static
+void SkPDFUtils::MoveTo(SkScalar x, SkScalar y, SkWStream* content) {
+ SkPDFUtils::AppendScalar(x, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(y, content);
+ content->writeText(" m\n");
+}
+
+// static
+void SkPDFUtils::AppendLine(SkScalar x, SkScalar y, SkWStream* content) {
+ SkPDFUtils::AppendScalar(x, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(y, content);
+ content->writeText(" l\n");
+}
+
+// static
+void SkPDFUtils::AppendCubic(SkScalar ctl1X, SkScalar ctl1Y,
+ SkScalar ctl2X, SkScalar ctl2Y,
+ SkScalar dstX, SkScalar dstY, SkWStream* content) {
+ SkString cmd("y\n");
+ SkPDFUtils::AppendScalar(ctl1X, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(ctl1Y, content);
+ content->writeText(" ");
+ if (ctl2X != dstX || ctl2Y != dstY) {
+ cmd.set("c\n");
+ SkPDFUtils::AppendScalar(ctl2X, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(ctl2Y, content);
+ content->writeText(" ");
+ }
+ SkPDFUtils::AppendScalar(dstX, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(dstY, content);
+ content->writeText(" ");
+ content->writeText(cmd.c_str());
+}
+
+static void append_quad(const SkPoint quad[], SkWStream* content) {
+ SkPoint cubic[4];
+ SkConvertQuadToCubic(quad, cubic);
+ SkPDFUtils::AppendCubic(cubic[1].fX, cubic[1].fY, cubic[2].fX, cubic[2].fY,
+ cubic[3].fX, cubic[3].fY, content);
+}
+
+// static
+void SkPDFUtils::AppendRectangle(const SkRect& rect, SkWStream* content) {
+ // Skia has 0,0 at top left, pdf at bottom left. Do the right thing.
+ SkScalar bottom = SkMinScalar(rect.fBottom, rect.fTop);
+
+ SkPDFUtils::AppendScalar(rect.fLeft, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(bottom, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(rect.width(), content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(rect.height(), content);
+ content->writeText(" re\n");
+}
+
+// static
+void SkPDFUtils::EmitPath(const SkPath& path, SkPaint::Style paintStyle,
+ bool doConsumeDegerates, SkWStream* content) {
+ // Filling a path with no area results in a drawing in PDF renderers but
+ // Chrome expects to be able to draw some such entities with no visible
+ // result, so we detect those cases and discard the drawing for them.
+ // Specifically: moveTo(X), lineTo(Y) and moveTo(X), lineTo(X), lineTo(Y).
+ enum SkipFillState {
+ kEmpty_SkipFillState,
+ kSingleLine_SkipFillState,
+ kNonSingleLine_SkipFillState,
+ };
+ SkipFillState fillState = kEmpty_SkipFillState;
+ //if (paintStyle != SkPaint::kFill_Style) {
+ // fillState = kNonSingleLine_SkipFillState;
+ //}
+ SkPoint lastMovePt = SkPoint::Make(0,0);
+ SkDynamicMemoryWStream currentSegment;
+ SkPoint args[4];
+ SkPath::Iter iter(path, false);
+ for (SkPath::Verb verb = iter.next(args, doConsumeDegerates);
+ verb != SkPath::kDone_Verb;
+ verb = iter.next(args, doConsumeDegerates)) {
+ // args gets all the points, even the implicit first point.
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ MoveTo(args[0].fX, args[0].fY, &currentSegment);
+ lastMovePt = args[0];
+ fillState = kEmpty_SkipFillState;
+ break;
+ case SkPath::kLine_Verb:
+ AppendLine(args[1].fX, args[1].fY, &currentSegment);
+ if ((fillState == kEmpty_SkipFillState) && (args[0] != lastMovePt)) {
+ fillState = kSingleLine_SkipFillState;
+ break;
+ }
+ fillState = kNonSingleLine_SkipFillState;
+ break;
+ case SkPath::kQuad_Verb:
+ append_quad(args, &currentSegment);
+ fillState = kNonSingleLine_SkipFillState;
+ break;
+ case SkPath::kConic_Verb: {
+ const SkScalar tol = SK_Scalar1 / 4;
+ SkAutoConicToQuads converter;
+ const SkPoint* quads = converter.computeQuads(args, iter.conicWeight(), tol);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ append_quad(&quads[i * 2], &currentSegment);
+ }
+ fillState = kNonSingleLine_SkipFillState;
+ } break;
+ case SkPath::kCubic_Verb:
+ AppendCubic(args[1].fX, args[1].fY, args[2].fX, args[2].fY,
+ args[3].fX, args[3].fY, &currentSegment);
+ fillState = kNonSingleLine_SkipFillState;
+ break;
+ case SkPath::kClose_Verb:
+
+ ClosePath(&currentSegment);
+
+ currentSegment.writeToStream(content);
+ currentSegment.reset();
+ break;
+ default:
+ SkASSERT(false);
+ break;
+ }
+ }
+ if (currentSegment.bytesWritten() > 0) {
+ currentSegment.writeToStream(content);
+ }
+}
+
+// static
+void SkPDFUtils::ClosePath(SkWStream* content) {
+ content->writeText("h\n");
+}
+
+// static
+void SkPDFUtils::PaintPath(SkPaint::Style style, SkPath::FillType fill,
+ SkWStream* content) {
+ if (style == SkPaint::kFill_Style) {
+ content->writeText("f");
+ } else if (style == SkPaint::kStrokeAndFill_Style) {
+ content->writeText("B");
+ } else if (style == SkPaint::kStroke_Style) {
+ content->writeText("S");
+ }
+
+ if (style != SkPaint::kStroke_Style) {
+ NOT_IMPLEMENTED(fill == SkPath::kInverseEvenOdd_FillType, false);
+ NOT_IMPLEMENTED(fill == SkPath::kInverseWinding_FillType, false);
+ if (fill == SkPath::kEvenOdd_FillType) {
+ content->writeText("*");
+ }
+ }
+ content->writeText("\n");
+}
+
+// static
+void SkPDFUtils::StrokePath(SkWStream* content) {
+ SkPDFUtils::PaintPath(
+ SkPaint::kStroke_Style, SkPath::kWinding_FillType, content);
+}
+
+// static
+void SkPDFUtils::DrawFormXObject(int objectIndex, SkWStream* content) {
+ content->writeText("/");
+ content->writeText(SkPDFResourceDict::getResourceName(
+ SkPDFResourceDict::kXObject_ResourceType,
+ objectIndex).c_str());
+ content->writeText(" Do\n");
+}
+
+// static
+void SkPDFUtils::ApplyGraphicState(int objectIndex, SkWStream* content) {
+ content->writeText("/");
+ content->writeText(SkPDFResourceDict::getResourceName(
+ SkPDFResourceDict::kExtGState_ResourceType,
+ objectIndex).c_str());
+ content->writeText(" gs\n");
+}
+
+// static
+void SkPDFUtils::ApplyPattern(int objectIndex, SkWStream* content) {
+ // Select Pattern color space (CS, cs) and set pattern object as current
+ // color (SCN, scn)
+ SkString resourceName = SkPDFResourceDict::getResourceName(
+ SkPDFResourceDict::kPattern_ResourceType,
+ objectIndex);
+ content->writeText("/Pattern CS/Pattern cs/");
+ content->writeText(resourceName.c_str());
+ content->writeText(" SCN/");
+ content->writeText(resourceName.c_str());
+ content->writeText(" scn\n");
+}
+
+size_t SkPDFUtils::ColorToDecimal(uint8_t value, char result[5]) {
+ if (value == 255 || value == 0) {
+ result[0] = value ? '1' : '0';
+ result[1] = '\0';
+ return 1;
+ }
+ // int x = 0.5 + (1000.0 / 255.0) * value;
+ int x = SkFixedRoundToInt((SK_Fixed1 * 1000 / 255) * value);
+ result[0] = '.';
+ for (int i = 3; i > 0; --i) {
+ result[i] = '0' + x % 10;
+ x /= 10;
+ }
+ int j;
+ for (j = 3; j > 1; --j) {
+ if (result[j] != '0') {
+ break;
+ }
+ }
+ result[j + 1] = '\0';
+ return j + 1;
+}
+
+void SkPDFUtils::AppendScalar(SkScalar value, SkWStream* stream) {
+ char result[kMaximumFloatDecimalLength];
+ size_t len = SkPDFUtils::FloatToDecimal(SkScalarToFloat(value), result);
+ SkASSERT(len < kMaximumFloatDecimalLength);
+ stream->write(result, len);
+}
+
+// Return pow(10.0, e), optimized for common cases.
+inline double pow10(int e) {
+ switch (e) {
+ case 0: return 1.0; // common cases
+ case 1: return 10.0;
+ case 2: return 100.0;
+ case 3: return 1e+03;
+ case 4: return 1e+04;
+ case 5: return 1e+05;
+ case 6: return 1e+06;
+ case 7: return 1e+07;
+ case 8: return 1e+08;
+ case 9: return 1e+09;
+ case 10: return 1e+10;
+ case 11: return 1e+11;
+ case 12: return 1e+12;
+ case 13: return 1e+13;
+ case 14: return 1e+14;
+ case 15: return 1e+15;
+ default:
+ if (e > 15) {
+ double value = 1e+15;
+ while (e-- > 15) { value *= 10.0; }
+ return value;
+ } else {
+ SkASSERT(e < 0);
+ double value = 1.0;
+ while (e++ < 0) { value /= 10.0; }
+ return value;
+ }
+ }
+}
+
+/** Write a string into result, includeing a terminating '\0' (for
+ unit testing). Return strlen(result) (for SkWStream::write) The
+ resulting string will be in the form /[-]?([0-9]*.)?[0-9]+/ and
+ sscanf(result, "%f", &x) will return the original value iff the
+ value is finite. This function accepts all possible input values.
+
+ Motivation: "PDF does not support [numbers] in exponential format
+ (such as 6.02e23)." Otherwise, this function would rely on a
+ sprintf-type function from the standard library. */
+size_t SkPDFUtils::FloatToDecimal(float value,
+ char result[kMaximumFloatDecimalLength]) {
+ /* The longest result is -FLT_MIN.
+ We serialize it as "-.0000000000000000000000000000000000000117549435"
+ which has 48 characters plus a terminating '\0'. */
+
+ /* section C.1 of the PDF1.4 spec (http://goo.gl/0SCswJ) says that
+ most PDF rasterizers will use fixed-point scalars that lack the
+ dynamic range of floats. Even if this is the case, I want to
+ serialize these (uncommon) very small and very large scalar
+ values with enough precision to allow a floating-point
+ rasterizer to read them in with perfect accuracy.
+ Experimentally, rasterizers such as pdfium do seem to benefit
+ from this. Rasterizers that rely on fixed-point scalars should
+ gracefully ignore these values that they can not parse. */
+ char* output = &result[0];
+ const char* const end = &result[kMaximumFloatDecimalLength - 1];
+ // subtract one to leave space for '\0'.
+
+ /* This function is written to accept any possible input value,
+ including non-finite values such as INF and NAN. In that case,
+ we ignore value-correctness and and output a syntacticly-valid
+ number. */
+ if (value == SK_FloatInfinity) {
+ value = FLT_MAX; // nearest finite float.
+ }
+ if (value == SK_FloatNegativeInfinity) {
+ value = -FLT_MAX; // nearest finite float.
+ }
+ if (!std::isfinite(value) || value == 0.0f) {
+ // NAN is unsupported in PDF. Always output a valid number.
+ // Also catch zero here, as a special case.
+ *output++ = '0';
+ *output = '\0';
+ return output - result;
+ }
+ if (value < 0.0) {
+ *output++ = '-';
+ value = -value;
+ }
+ SkASSERT(value >= 0.0f);
+
+ int binaryExponent;
+ (void)std::frexp(value, &binaryExponent);
+ static const double kLog2 = 0.3010299956639812; // log10(2.0);
+ int decimalExponent = static_cast<int>(std::floor(kLog2 * binaryExponent));
+ int decimalShift = decimalExponent - 8;
+ double power = pow10(-decimalShift);
+ int32_t d = static_cast<int32_t>(value * power + 0.5);
+ // SkASSERT(value == (float)(d * pow(10.0, decimalShift)));
+ SkASSERT(d <= 999999999);
+ if (d > 167772159) { // floor(pow(10,1+log10(1<<24)))
+ // need one fewer decimal digits for 24-bit precision.
+ decimalShift = decimalExponent - 7;
+ // SkASSERT(power * 0.1 = pow10(-decimalShift));
+ // recalculate to get rounding right.
+ d = static_cast<int32_t>(value * (power * 0.1) + 0.5);
+ SkASSERT(d <= 99999999);
+ }
+ while (d % 10 == 0) {
+ d /= 10;
+ ++decimalShift;
+ }
+ SkASSERT(d > 0);
+ // SkASSERT(value == (float)(d * pow(10.0, decimalShift)));
+ uint8_t buffer[9]; // decimal value buffer.
+ int bufferIndex = 0;
+ do {
+ buffer[bufferIndex++] = d % 10;
+ d /= 10;
+ } while (d != 0);
+ SkASSERT(bufferIndex <= (int)sizeof(buffer) && bufferIndex > 0);
+ if (decimalShift >= 0) {
+ do {
+ --bufferIndex;
+ *output++ = '0' + buffer[bufferIndex];
+ } while (bufferIndex);
+ for (int i = 0; i < decimalShift; ++i) {
+ *output++ = '0';
+ }
+ } else {
+ int placesBeforeDecimal = bufferIndex + decimalShift;
+ if (placesBeforeDecimal > 0) {
+ while (placesBeforeDecimal-- > 0) {
+ --bufferIndex;
+ *output++ = '0' + buffer[bufferIndex];
+ }
+ *output++ = '.';
+ } else {
+ *output++ = '.';
+ int placesAfterDecimal = -placesBeforeDecimal;
+ while (placesAfterDecimal-- > 0) {
+ *output++ = '0';
+ }
+ }
+ while (bufferIndex > 0) {
+ --bufferIndex;
+ *output++ = '0' + buffer[bufferIndex];
+ if (output == end) {
+ break; // denormalized: don't need extra precision.
+ // Note: denormalized numbers will not have the same number of
+ // significantDigits, but do not need them to round-trip.
+ }
+ }
+ }
+ SkASSERT(output <= end);
+ *output = '\0';
+ return output - result;
+}
+
+void SkPDFUtils::WriteString(SkWStream* wStream, const char* cin, size_t len) {
+ SkDEBUGCODE(static const size_t kMaxLen = 65535;)
+ SkASSERT(len <= kMaxLen);
+
+ size_t extraCharacterCount = 0;
+ for (size_t i = 0; i < len; i++) {
+ if (cin[i] > '~' || cin[i] < ' ') {
+ extraCharacterCount += 3;
+ }
+ if (cin[i] == '\\' || cin[i] == '(' || cin[i] == ')') {
+ ++extraCharacterCount;
+ }
+ }
+ if (extraCharacterCount <= len) {
+ wStream->writeText("(");
+ for (size_t i = 0; i < len; i++) {
+ if (cin[i] > '~' || cin[i] < ' ') {
+ uint8_t c = static_cast<uint8_t>(cin[i]);
+ uint8_t octal[4];
+ octal[0] = '\\';
+ octal[1] = '0' + ( c >> 6 );
+ octal[2] = '0' + ((c >> 3) & 0x07);
+ octal[3] = '0' + ( c & 0x07);
+ wStream->write(octal, 4);
+ } else {
+ if (cin[i] == '\\' || cin[i] == '(' || cin[i] == ')') {
+ wStream->writeText("\\");
+ }
+ wStream->write(&cin[i], 1);
+ }
+ }
+ wStream->writeText(")");
+ } else {
+ wStream->writeText("<");
+ for (size_t i = 0; i < len; i++) {
+ uint8_t c = static_cast<uint8_t>(cin[i]);
+ static const char gHex[] = "0123456789ABCDEF";
+ char hexValue[2];
+ hexValue[0] = gHex[(c >> 4) & 0xF];
+ hexValue[1] = gHex[ c & 0xF];
+ wStream->write(hexValue, 2);
+ }
+ wStream->writeText(">");
+ }
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFUtils.h b/gfx/skia/skia/src/pdf/SkPDFUtils.h
new file mode 100644
index 000000000..964689f4f
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFUtils.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPDFUtils_DEFINED
+#define SkPDFUtils_DEFINED
+
+#include "SkPaint.h"
+#include "SkPath.h"
+#include "SkStream.h"
+#include "SkUtils.h"
+
+class SkMatrix;
+class SkPDFArray;
+struct SkRect;
+
+#if 0
+#define PRINT_NOT_IMPL(str) fprintf(stderr, str)
+#else
+#define PRINT_NOT_IMPL(str)
+#endif
+
+#define NOT_IMPLEMENTED(condition, assert) \
+ do { \
+ if ((bool)(condition)) { \
+ PRINT_NOT_IMPL("NOT_IMPLEMENTED: " #condition "\n"); \
+ SkDEBUGCODE(SkASSERT(!assert);) \
+ } \
+ } while (0)
+
+namespace SkPDFUtils {
+
+sk_sp<SkPDFArray> RectToArray(const SkRect& rect);
+sk_sp<SkPDFArray> MatrixToArray(const SkMatrix& matrix);
+void AppendTransform(const SkMatrix& matrix, SkWStream* content);
+
+void MoveTo(SkScalar x, SkScalar y, SkWStream* content);
+void AppendLine(SkScalar x, SkScalar y, SkWStream* content);
+void AppendCubic(SkScalar ctl1X, SkScalar ctl1Y,
+ SkScalar ctl2X, SkScalar ctl2Y,
+ SkScalar dstX, SkScalar dstY, SkWStream* content);
+void AppendRectangle(const SkRect& rect, SkWStream* content);
+void EmitPath(const SkPath& path, SkPaint::Style paintStyle,
+ bool doConsumeDegerates, SkWStream* content);
+inline void EmitPath(const SkPath& path, SkPaint::Style paintStyle,
+ SkWStream* content) {
+ SkPDFUtils::EmitPath(path, paintStyle, true, content);
+}
+void ClosePath(SkWStream* content);
+void PaintPath(SkPaint::Style style, SkPath::FillType fill,
+ SkWStream* content);
+void StrokePath(SkWStream* content);
+void DrawFormXObject(int objectIndex, SkWStream* content);
+void ApplyGraphicState(int objectIndex, SkWStream* content);
+void ApplyPattern(int objectIndex, SkWStream* content);
+
+// Converts (value / 255.0) with three significant digits of accuracy.
+// Writes value as string into result. Returns strlen() of result.
+size_t ColorToDecimal(uint8_t value, char result[5]);
+inline void AppendColorComponent(uint8_t value, SkWStream* wStream) {
+ char buffer[5];
+ size_t len = SkPDFUtils::ColorToDecimal(value, buffer);
+ wStream->write(buffer, len);
+}
+
+// 3 = '-', '.', and '\0' characters.
+// 9 = number of significant digits
+// abs(FLT_MIN_10_EXP) = number of zeros in FLT_MIN
+const size_t kMaximumFloatDecimalLength = 3 + 9 - FLT_MIN_10_EXP;
+// FloatToDecimal is exposed for unit tests.
+size_t FloatToDecimal(float value,
+ char output[kMaximumFloatDecimalLength]);
+void AppendScalar(SkScalar value, SkWStream* stream);
+void WriteString(SkWStream* wStream, const char* input, size_t len);
+
+inline void WriteUInt16BE(SkDynamicMemoryWStream* wStream, uint16_t value) {
+ static const char gHex[] = "0123456789ABCDEF";
+ char result[4];
+ result[0] = gHex[ value >> 12 ];
+ result[1] = gHex[0xF & (value >> 8 )];
+ result[2] = gHex[0xF & (value >> 4 )];
+ result[3] = gHex[0xF & (value )];
+ wStream->write(result, 4);
+}
+inline void WriteUInt8(SkDynamicMemoryWStream* wStream, uint8_t value) {
+ static const char gHex[] = "0123456789ABCDEF";
+ char result[2];
+ result[0] = gHex[value >> 4 ];
+ result[1] = gHex[0xF & value];
+ wStream->write(result, 2);
+}
+inline void WriteUTF16beHex(SkDynamicMemoryWStream* wStream, SkUnichar utf32) {
+ uint16_t utf16[2] = {0, 0};
+ size_t len = SkUTF16_FromUnichar(utf32, utf16);
+ SkASSERT(len == 1 || len == 2);
+ SkPDFUtils::WriteUInt16BE(wStream, utf16[0]);
+ if (len == 2) {
+ SkPDFUtils::WriteUInt16BE(wStream, utf16[1]);
+ }
+}
+} // namespace SkPDFUtils
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkScopeExit.h b/gfx/skia/skia/src/pdf/SkScopeExit.h
new file mode 100644
index 000000000..5b7bcdc07
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkScopeExit.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkScopeExit_DEFINED
+#define SkScopeExit_DEFINED
+
+#include "SkTypes.h"
+
+/**
+ * SK_AT_SCOPE_EXIT(stmt) evaluates stmt when the current scope ends.
+ *
+ * E.g.
+ * {
+ * int x = 5;
+ * {
+ * SK_AT_SCOPE_EXIT(x--);
+ * SkASSERT(x == 5);
+ * }
+ * SkASSERT(x == 4);
+ * }
+ */
+template <typename Fn>
+class SkScopeExit {
+public:
+ SkScopeExit(Fn f) : fFn(std::move(f)) {}
+ ~SkScopeExit() { fFn(); }
+
+private:
+ Fn fFn;
+
+ SkScopeExit( const SkScopeExit& ) = delete;
+ SkScopeExit& operator=(const SkScopeExit& ) = delete;
+ SkScopeExit( SkScopeExit&&) = delete;
+ SkScopeExit& operator=( SkScopeExit&&) = delete;
+};
+
+template <typename Fn>
+inline SkScopeExit<Fn> SkMakeScopeExit(Fn&& fn) {
+ return {std::move(fn)};
+}
+
+#define SK_AT_SCOPE_EXIT(stmt) \
+ SK_UNUSED auto&& SK_MACRO_APPEND_LINE(at_scope_exit_) = \
+ SkMakeScopeExit([&]() { stmt; });
+
+#endif // SkScopeExit_DEFINED
diff --git a/gfx/skia/skia/src/pipe/SkPipeCanvas.cpp b/gfx/skia/skia/src/pipe/SkPipeCanvas.cpp
new file mode 100644
index 000000000..3b636a23c
--- /dev/null
+++ b/gfx/skia/skia/src/pipe/SkPipeCanvas.cpp
@@ -0,0 +1,1069 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPathEffect.h"
+#include "SkColorFilter.h"
+#include "SkDrawLooper.h"
+#include "SkImageFilter.h"
+#include "SkMaskFilter.h"
+#include "SkPipeCanvas.h"
+#include "SkPipeFormat.h"
+#include "SkRasterizer.h"
+#include "SkRSXform.h"
+#include "SkShader.h"
+#include "SkStream.h"
+#include "SkTextBlob.h"
+#include "SkTypeface.h"
+
+template <typename T> void write_rrect(T* writer, const SkRRect& rrect) {
+ char tmp[SkRRect::kSizeInMemory];
+ rrect.writeToMemory(tmp);
+ writer->write(tmp, SkRRect::kSizeInMemory);
+}
+
+template <typename T> void write_pad(T* writer, const void* buffer, size_t len) {
+ writer->write(buffer, len & ~3);
+ if (len & 3) {
+ const char* src = (const char*)buffer + (len & ~3);
+ len &= 3;
+ uint32_t tmp = 0;
+ memcpy(&tmp, src, len);
+ writer->write(&tmp, 4);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static uint16_t compute_nondef(const SkPaint& paint, PaintUsage usage) {
+ // kRespectsStroke_PaintUsage is only valid if other bits are also set
+ SkASSERT(0 != (usage & ~kRespectsStroke_PaintUsage));
+
+ const SkScalar kTextSize_Default = 12;
+ const SkScalar kTextScaleX_Default = 1;
+ const SkScalar kTextSkewX_Default = 0;
+ const SkScalar kStrokeWidth_Default = 0;
+ const SkScalar kStrokeMiter_Default = 4;
+ const SkColor kColor_Default = SK_ColorBLACK;
+
+ unsigned bits = (paint.getColor() != kColor_Default) ? kColor_NonDef : 0;
+
+ if (usage & kText_PaintUsage) {
+ bits |= (paint.getTextSize() != kTextSize_Default ? kTextSize_NonDef : 0);
+ bits |= (paint.getTextScaleX() != kTextScaleX_Default ? kTextScaleX_NonDef : 0);
+ bits |= (paint.getTextSkewX() != kTextSkewX_Default ? kTextSkewX_NonDef : 0);
+ bits |= (paint.getTypeface() ? kTypeface_NonDef : 0);
+ }
+
+ // TODO: kImage_PaintUsage only needs the shader/maskfilter IF its colortype is kAlpha_8
+
+ if (usage & (kVertices_PaintUsage | kDrawPaint_PaintUsage | kImage_PaintUsage |
+ kText_PaintUsage | kGeometry_PaintUsage | kTextBlob_PaintUsage)) {
+ bits |= (paint.getShader() ? kShader_NonDef : 0);
+ }
+
+ if (usage & (kText_PaintUsage | kGeometry_PaintUsage | kTextBlob_PaintUsage)) {
+ bits |= (paint.getPathEffect() ? kPathEffect_NonDef : 0);
+ bits |= (paint.getRasterizer() ? kRasterizer_NonDef : 0);
+
+ if (paint.getStyle() != SkPaint::kFill_Style || (usage & kRespectsStroke_PaintUsage)) {
+ bits |= (paint.getStrokeWidth() != kStrokeWidth_Default ? kStrokeWidth_NonDef : 0);
+ bits |= (paint.getStrokeMiter() != kStrokeMiter_Default ? kStrokeMiter_NonDef : 0);
+ }
+ }
+
+ if (usage &
+ (kText_PaintUsage | kGeometry_PaintUsage | kImage_PaintUsage | kTextBlob_PaintUsage))
+ {
+ bits |= (paint.getMaskFilter() ? kMaskFilter_NonDef : 0);
+ }
+
+ bits |= (paint.getColorFilter() ? kColorFilter_NonDef : 0);
+ bits |= (paint.getImageFilter() ? kImageFilter_NonDef : 0);
+ bits |= (paint.getDrawLooper() ? kDrawLooper_NonDef : 0);
+
+ return SkToU16(bits);
+}
+
+static uint32_t pack_paint_flags(unsigned flags, unsigned hint, unsigned align,
+ unsigned filter, unsigned style, unsigned caps, unsigned joins,
+ unsigned encoding) {
+ SkASSERT(kFlags_BPF + kHint_BPF + kAlign_BPF + kFilter_BPF <= 32);
+
+ ASSERT_FITS_IN(flags, kFlags_BPF);
+ ASSERT_FITS_IN(filter, kFilter_BPF);
+ ASSERT_FITS_IN(style, kStyle_BPF);
+ ASSERT_FITS_IN(caps, kCaps_BPF);
+ ASSERT_FITS_IN(joins, kJoins_BPF);
+ ASSERT_FITS_IN(hint, kHint_BPF);
+ ASSERT_FITS_IN(align, kAlign_BPF);
+ ASSERT_FITS_IN(encoding, kEncoding_BPF);
+
+ // left-align the fields of "known" size, and right-align the last (flatFlags) so it can easly
+ // add more bits in the future.
+
+ uint32_t packed = 0;
+ int shift = 32;
+
+ shift -= kFlags_BPF; packed |= (flags << shift);
+ shift -= kFilter_BPF; packed |= (filter << shift);
+ shift -= kStyle_BPF; packed |= (style << shift);
+ // these are only needed for stroking (geometry or text)
+ shift -= kCaps_BPF; packed |= (caps << shift);
+ shift -= kJoins_BPF; packed |= (joins << shift);
+ // these are only needed for text
+ shift -= kHint_BPF; packed |= (hint << shift);
+ shift -= kAlign_BPF; packed |= (align << shift);
+ shift -= kEncoding_BPF; packed |= (encoding << shift);
+
+ return packed;
+}
+
+#define CHECK_WRITE_SCALAR(writer, nondef, paint, Field) \
+ do { if (nondef & (k##Field##_NonDef)) { \
+ writer.writeScalar(paint.get##Field()); \
+ }} while (0)
+
+#define CHECK_WRITE_FLATTENABLE(writer, nondef, paint, Field) \
+ do { if (nondef & (k##Field##_NonDef)) { \
+ SkFlattenable* f = paint.get##Field(); \
+ SkASSERT(f != nullptr); \
+ writer.writeFlattenable(f); \
+ } } while (0)
+
+/*
+ * Header:
+ * paint flags : 32
+ * non_def bits : 16
+ * xfermode enum : 8
+ * pad zeros : 8
+ */
+static void write_paint(SkWriteBuffer& writer, const SkPaint& paint, unsigned usage) {
+ uint32_t packedFlags = pack_paint_flags(paint.getFlags(), paint.getHinting(),
+ paint.getTextAlign(), paint.getFilterQuality(),
+ paint.getStyle(), paint.getStrokeCap(),
+ paint.getStrokeJoin(), paint.getTextEncoding());
+ writer.write32(packedFlags);
+
+ unsigned nondef = compute_nondef(paint, (PaintUsage)usage);
+ const uint8_t pad = 0;
+ writer.write32((nondef << 16) | ((unsigned)paint.getBlendMode() << 8) | pad);
+
+ CHECK_WRITE_SCALAR(writer, nondef, paint, TextSize);
+ CHECK_WRITE_SCALAR(writer, nondef, paint, TextScaleX);
+ CHECK_WRITE_SCALAR(writer, nondef, paint, TextSkewX);
+ CHECK_WRITE_SCALAR(writer, nondef, paint, StrokeWidth);
+ CHECK_WRITE_SCALAR(writer, nondef, paint, StrokeMiter);
+
+ if (nondef & kColor_NonDef) {
+ writer.write32(paint.getColor());
+ }
+ if (nondef & kTypeface_NonDef) {
+ // TODO: explore idea of writing bits indicating "use the prev (or prev N) face"
+ // e.g. 1-N bits is an index into a ring buffer of typefaces
+ SkTypeface* tf = paint.getTypeface();
+ SkASSERT(tf);
+ writer.writeTypeface(tf);
+ }
+
+ CHECK_WRITE_FLATTENABLE(writer, nondef, paint, PathEffect);
+ CHECK_WRITE_FLATTENABLE(writer, nondef, paint, Shader);
+ CHECK_WRITE_FLATTENABLE(writer, nondef, paint, MaskFilter);
+ CHECK_WRITE_FLATTENABLE(writer, nondef, paint, ColorFilter);
+ CHECK_WRITE_FLATTENABLE(writer, nondef, paint, Rasterizer);
+ CHECK_WRITE_FLATTENABLE(writer, nondef, paint, ImageFilter);
+ CHECK_WRITE_FLATTENABLE(writer, nondef, paint, DrawLooper);
+}
+
+class SkPipeWriter : public SkBinaryWriteBuffer {
+ enum {
+ N = 1024/4,
+ };
+ uint32_t fStorage[N];
+ SkWStream* fStream;
+
+public:
+ SkPipeWriter(SkWStream* stream, SkDeduper* deduper)
+ : SkBinaryWriteBuffer(fStorage, sizeof(fStorage))
+ , fStream(stream)
+ {
+ this->setDeduper(deduper);
+ }
+
+ SkPipeWriter(SkPipeCanvas* pc) : SkPipeWriter(pc->fStream, pc->fDeduper) {}
+
+ ~SkPipeWriter() override {
+ SkASSERT(SkIsAlign4(fStream->bytesWritten()));
+ this->writeToStream(fStream);
+ }
+
+ void writePaint(const SkPaint& paint) override {
+ write_paint(*this, paint, kUnknown_PaintUsage);
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkPipeCanvas::SkPipeCanvas(const SkRect& cull, SkPipeDeduper* deduper, SkWStream* stream)
+ : INHERITED(SkScalarCeilToInt(cull.width()), SkScalarCeilToInt(cull.height()))
+ , fDeduper(deduper)
+ , fStream(stream)
+{}
+
+SkPipeCanvas::~SkPipeCanvas() {}
+
+void SkPipeCanvas::willSave() {
+ fStream->write32(pack_verb(SkPipeVerb::kSave));
+ this->INHERITED::willSave();
+}
+
+SkCanvas::SaveLayerStrategy SkPipeCanvas::getSaveLayerStrategy(const SaveLayerRec& rec) {
+ SkPipeWriter writer(this);
+ uint32_t extra = rec.fSaveLayerFlags;
+
+ // remap this wacky flag
+ if (extra & (1 << 31)/*SkCanvas::kDontClipToLayer_PrivateSaveLayerFlag*/) {
+ extra &= ~(1 << 31);
+ extra |= kDontClipToLayer_SaveLayerMask;
+ }
+
+ if (rec.fBounds) {
+ extra |= kHasBounds_SaveLayerMask;
+ }
+ if (rec.fPaint) {
+ extra |= kHasPaint_SaveLayerMask;
+ }
+ if (rec.fBackdrop) {
+ extra |= kHasBackdrop_SaveLayerMask;
+ }
+
+ writer.write32(pack_verb(SkPipeVerb::kSaveLayer, extra));
+ if (rec.fBounds) {
+ writer.writeRect(*rec.fBounds);
+ }
+ if (rec.fPaint) {
+ write_paint(writer, *rec.fPaint, kSaveLayer_PaintUsage);
+ }
+ if (rec.fBackdrop) {
+ writer.writeFlattenable(rec.fBackdrop);
+ }
+ return kNoLayer_SaveLayerStrategy;
+}
+
+void SkPipeCanvas::willRestore() {
+ fStream->write32(pack_verb(SkPipeVerb::kRestore));
+ this->INHERITED::willRestore();
+}
+
+template <typename T> void write_sparse_matrix(T* writer, const SkMatrix& matrix) {
+ SkMatrix::TypeMask tm = matrix.getType();
+ SkScalar tmp[9];
+ if (tm & SkMatrix::kPerspective_Mask) {
+ matrix.get9(tmp);
+ writer->write(tmp, 9 * sizeof(SkScalar));
+ } else if (tm & SkMatrix::kAffine_Mask) {
+ tmp[0] = matrix[SkMatrix::kMScaleX];
+ tmp[1] = matrix[SkMatrix::kMSkewX];
+ tmp[2] = matrix[SkMatrix::kMTransX];
+ tmp[3] = matrix[SkMatrix::kMScaleY];
+ tmp[4] = matrix[SkMatrix::kMSkewY];
+ tmp[5] = matrix[SkMatrix::kMTransY];
+ writer->write(tmp, 6 * sizeof(SkScalar));
+ } else if (tm & SkMatrix::kScale_Mask) {
+ tmp[0] = matrix[SkMatrix::kMScaleX];
+ tmp[1] = matrix[SkMatrix::kMTransX];
+ tmp[2] = matrix[SkMatrix::kMScaleY];
+ tmp[3] = matrix[SkMatrix::kMTransY];
+ writer->write(tmp, 4 * sizeof(SkScalar));
+ } else if (tm & SkMatrix::kTranslate_Mask) {
+ tmp[0] = matrix[SkMatrix::kMTransX];
+ tmp[1] = matrix[SkMatrix::kMTransY];
+ writer->write(tmp, 2 * sizeof(SkScalar));
+ }
+ // else write nothing for Identity
+}
+
+static void do_concat(SkWStream* stream, const SkMatrix& matrix, bool isSetMatrix) {
+ unsigned mtype = matrix.getType();
+ SkASSERT(0 == (mtype & ~kTypeMask_ConcatMask));
+ unsigned extra = mtype;
+ if (isSetMatrix) {
+ extra |= kSetMatrix_ConcatMask;
+ }
+ if (mtype || isSetMatrix) {
+ stream->write32(pack_verb(SkPipeVerb::kConcat, extra));
+ write_sparse_matrix(stream, matrix);
+ }
+}
+
+void SkPipeCanvas::didConcat(const SkMatrix& matrix) {
+ do_concat(fStream, matrix, false);
+ this->INHERITED::didConcat(matrix);
+}
+
+void SkPipeCanvas::didSetMatrix(const SkMatrix& matrix) {
+ do_concat(fStream, matrix, true);
+ this->INHERITED::didSetMatrix(matrix);
+}
+
+void SkPipeCanvas::onClipRect(const SkRect& rect, ClipOp op, ClipEdgeStyle edgeStyle) {
+ fStream->write32(pack_verb(SkPipeVerb::kClipRect, ((unsigned)op << 1) | edgeStyle));
+ fStream->write(&rect, 4 * sizeof(SkScalar));
+
+ this->INHERITED::onClipRect(rect, op, edgeStyle);
+}
+
+void SkPipeCanvas::onClipRRect(const SkRRect& rrect, ClipOp op, ClipEdgeStyle edgeStyle) {
+ fStream->write32(pack_verb(SkPipeVerb::kClipRRect, ((unsigned)op << 1) | edgeStyle));
+ write_rrect(fStream, rrect);
+
+ this->INHERITED::onClipRRect(rrect, op, edgeStyle);
+}
+
+void SkPipeCanvas::onClipPath(const SkPath& path, ClipOp op, ClipEdgeStyle edgeStyle) {
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kClipPath, ((unsigned)op << 1) | edgeStyle));
+ writer.writePath(path);
+
+ this->INHERITED::onClipPath(path, op, edgeStyle);
+}
+
+void SkPipeCanvas::onClipRegion(const SkRegion& deviceRgn, ClipOp op) {
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kClipRegion, (unsigned)op << 1));
+ writer.writeRegion(deviceRgn);
+
+ this->INHERITED::onClipRegion(deviceRgn, op);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkPipeCanvas::onDrawArc(const SkRect& bounds, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) {
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawArc, (int)useCenter));
+ writer.writeRect(bounds);
+ writer.writeScalar(startAngle);
+ writer.writeScalar(sweepAngle);
+ write_paint(writer, paint, kGeometry_PaintUsage);
+}
+
+void SkPipeCanvas::onDrawAtlas(const SkImage* image, const SkRSXform xform[], const SkRect rect[],
+ const SkColor colors[], int count, SkXfermode::Mode mode,
+ const SkRect* cull, const SkPaint* paint) {
+ unsigned extra = (unsigned)mode;
+ SkASSERT(0 == (extra & ~kMode_DrawAtlasMask));
+ if (colors) {
+ extra |= kHasColors_DrawAtlasMask;
+ }
+ if (cull) {
+ extra |= kHasCull_DrawAtlasMask;
+ }
+ if (paint) {
+ extra |= kHasPaint_DrawAtlasMask;
+ }
+
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawAtlas, extra));
+ writer.writeImage(image);
+ writer.write32(count);
+ writer.write(xform, count * sizeof(SkRSXform));
+ writer.write(rect, count * sizeof(SkRect));
+ if (colors) {
+ writer.write(colors, count * sizeof(SkColor));
+ }
+ if (cull) {
+ writer.writeRect(*cull);
+ }
+ if (paint) {
+ write_paint(writer, *paint, kImage_PaintUsage);
+ }
+}
+
+void SkPipeCanvas::onDrawPaint(const SkPaint& paint) {
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawPaint));
+ write_paint(writer, paint, kDrawPaint_PaintUsage);
+}
+
+void SkPipeCanvas::onDrawPoints(PointMode mode, size_t count, const SkPoint pts[],
+ const SkPaint& paint) {
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawPoints, mode));
+ writer.write32(SkToU32(count));
+ writer.write(pts, count * sizeof(SkPoint));
+ write_paint(writer, paint, kGeometry_PaintUsage | kRespectsStroke_PaintUsage);
+}
+
+void SkPipeCanvas::onDrawRect(const SkRect& rect, const SkPaint& paint) {
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawRect));
+ writer.write(&rect, sizeof(SkRect));
+ write_paint(writer, paint, kGeometry_PaintUsage);
+}
+
+void SkPipeCanvas::onDrawOval(const SkRect& rect, const SkPaint& paint) {
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawOval));
+ writer.write(&rect, sizeof(SkRect));
+ write_paint(writer, paint, kGeometry_PaintUsage);
+}
+
+void SkPipeCanvas::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawRRect));
+ write_rrect(&writer, rrect);
+ write_paint(writer, paint, kGeometry_PaintUsage);
+}
+
+void SkPipeCanvas::onDrawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint& paint) {
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawDRRect));
+ write_rrect(&writer, outer);
+ write_rrect(&writer, inner);
+ write_paint(writer, paint, kGeometry_PaintUsage);
+}
+
+void SkPipeCanvas::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawPath));
+ writer.writePath(path);
+ write_paint(writer, paint, kGeometry_PaintUsage);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static sk_sp<SkImage> make_from_bitmap(const SkBitmap& bitmap) {
+ // If we just "make" an image, it will force a CPU copy (if its mutable), only to have
+ // us then either find it in our cache, or compress and send it.
+ //
+ // Better could be to look it up in our cache first, and only create/compress it if we have to.
+ //
+ // But for now, just do the dumb thing...
+ return SkImage::MakeFromBitmap(bitmap);
+}
+
+void SkPipeCanvas::onDrawBitmap(const SkBitmap& bitmap, SkScalar x, SkScalar y,
+ const SkPaint* paint) {
+ sk_sp<SkImage> image = make_from_bitmap(bitmap);
+ if (image) {
+ this->onDrawImage(image.get(), x, y, paint);
+ }
+}
+
+void SkPipeCanvas::onDrawBitmapRect(const SkBitmap& bitmap, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ sk_sp<SkImage> image = make_from_bitmap(bitmap);
+ if (image) {
+ this->onDrawImageRect(image.get(), src, dst, paint, constraint);
+ }
+}
+
+void SkPipeCanvas::onDrawBitmapNine(const SkBitmap& bitmap, const SkIRect& center,
+ const SkRect& dst, const SkPaint* paint) {
+ sk_sp<SkImage> image = make_from_bitmap(bitmap);
+ if (image) {
+ this->onDrawImageNine(image.get(), center, dst, paint);
+ }
+}
+
+void SkPipeCanvas::onDrawBitmapLattice(const SkBitmap& bitmap, const Lattice& lattice,
+ const SkRect& dst, const SkPaint* paint) {
+ sk_sp<SkImage> image = make_from_bitmap(bitmap);
+ if (image) {
+ this->onDrawImageLattice(image.get(), lattice, dst, paint);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkPipeCanvas::onDrawImage(const SkImage* image, SkScalar left, SkScalar top,
+ const SkPaint* paint) {
+ unsigned extra = 0;
+ if (paint) {
+ extra |= kHasPaint_DrawImageMask;
+ }
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawImage, extra));
+ writer.writeImage(image);
+ writer.writeScalar(left);
+ writer.writeScalar(top);
+ if (paint) {
+ write_paint(writer, *paint, kImage_PaintUsage);
+ }
+}
+
+void SkPipeCanvas::onDrawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ SkASSERT(0 == ((unsigned)constraint & ~1));
+ unsigned extra = (unsigned)constraint;
+ if (paint) {
+ extra |= kHasPaint_DrawImageRectMask;
+ }
+ if (src) {
+ extra |= kHasSrcRect_DrawImageRectMask;
+ }
+
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawImageRect, extra));
+ writer.writeImage(image);
+ if (src) {
+ writer.write(src, sizeof(*src));
+ }
+ writer.write(&dst, sizeof(dst));
+ if (paint) {
+ write_paint(writer, *paint, kImage_PaintUsage);
+ }
+}
+
+void SkPipeCanvas::onDrawImageNine(const SkImage* image, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint) {
+ unsigned extra = 0;
+ if (paint) {
+ extra |= kHasPaint_DrawImageNineMask;
+ }
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawImageNine, extra));
+ writer.writeImage(image);
+ writer.write(&center, sizeof(center));
+ writer.write(&dst, sizeof(dst));
+ if (paint) {
+ write_paint(writer, *paint, kImage_PaintUsage);
+ }
+}
+
+void SkPipeCanvas::onDrawImageLattice(const SkImage* image, const Lattice& lattice,
+ const SkRect& dst, const SkPaint* paint) {
+ unsigned extra = 0;
+ if (paint) {
+ extra |= kHasPaint_DrawImageLatticeMask;
+ }
+ if (lattice.fFlags) {
+ extra |= kHasFlags_DrawImageLatticeMask;
+ }
+ if (lattice.fXCount >= kCount_DrawImageLatticeMask) {
+ extra |= kCount_DrawImageLatticeMask << kXCount_DrawImageLatticeShift;
+ } else {
+ extra |= lattice.fXCount << kXCount_DrawImageLatticeShift;
+ }
+ if (lattice.fYCount >= kCount_DrawImageLatticeMask) {
+ extra |= kCount_DrawImageLatticeMask << kYCount_DrawImageLatticeShift;
+ } else {
+ extra |= lattice.fYCount << kYCount_DrawImageLatticeShift;
+ }
+
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawImageLattice, extra));
+ writer.writeImage(image);
+ if (lattice.fXCount >= kCount_DrawImageLatticeMask) {
+ writer.write32(lattice.fXCount);
+ }
+ if (lattice.fYCount >= kCount_DrawImageLatticeMask) {
+ writer.write32(lattice.fYCount);
+ }
+ // Often these divs will be small (8 or 16 bits). Consider sniffing that and writing a flag
+ // so we can store them smaller.
+ writer.write(lattice.fXDivs, lattice.fXCount * sizeof(int32_t));
+ writer.write(lattice.fYDivs, lattice.fYCount * sizeof(int32_t));
+ if (lattice.fFlags) {
+ int32_t count = (lattice.fXCount + 1) * (lattice.fYCount + 1);
+ SkASSERT(count > 0);
+ write_pad(&writer, lattice.fFlags, count);
+ }
+ SkASSERT(lattice.fBounds);
+ writer.write(&lattice.fBounds, sizeof(*lattice.fBounds));
+ writer.write(&dst, sizeof(dst));
+ if (paint) {
+ write_paint(writer, *paint, kImage_PaintUsage);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkPipeCanvas::onDrawText(const void* text, size_t byteLength, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ SkASSERT(byteLength);
+
+ bool compact = fits_in(byteLength, 24);
+
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawText, compact ? (unsigned)byteLength : 0));
+ if (!compact) {
+ writer.write32(SkToU32(byteLength));
+ }
+ write_pad(&writer, text, byteLength);
+ writer.writeScalar(x);
+ writer.writeScalar(y);
+ write_paint(writer, paint, kText_PaintUsage);
+}
+
+void SkPipeCanvas::onDrawPosText(const void* text, size_t byteLength, const SkPoint pos[],
+ const SkPaint& paint) {
+ SkASSERT(byteLength);
+
+ bool compact = fits_in(byteLength, 24);
+
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawPosText, compact ? (unsigned)byteLength : 0));
+ if (!compact) {
+ writer.write32(SkToU32(byteLength));
+ }
+ write_pad(&writer, text, byteLength);
+ writer.writePointArray(pos, paint.countText(text, byteLength));
+ write_paint(writer, paint, kText_PaintUsage);
+}
+
+void SkPipeCanvas::onDrawPosTextH(const void* text, size_t byteLength, const SkScalar xpos[],
+ SkScalar constY, const SkPaint& paint) {
+ SkASSERT(byteLength);
+
+ bool compact = fits_in(byteLength, 24);
+
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawPosTextH, compact ? (unsigned)byteLength : 0));
+ if (!compact) {
+ writer.write32(SkToU32(byteLength));
+ }
+ write_pad(&writer, text, byteLength);
+ writer.writeScalarArray(xpos, paint.countText(text, byteLength));
+ writer.writeScalar(constY);
+ write_paint(writer, paint, kText_PaintUsage);
+}
+
+void SkPipeCanvas::onDrawTextOnPath(const void* text, size_t byteLength, const SkPath& path,
+ const SkMatrix* matrix, const SkPaint& paint) {
+ SkASSERT(byteLength > 0);
+
+ unsigned extra = 0;
+ if (byteLength <= kTextLength_DrawTextOnPathMask) {
+ extra |= byteLength;
+ } // else we will write the length after the packedverb
+ SkMatrix::TypeMask tm = matrix ? matrix->getType() : SkMatrix::kIdentity_Mask;
+ extra |= (unsigned)tm << kMatrixType_DrawTextOnPathShift;
+
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawTextOnPath, extra));
+ if (byteLength > kTextLength_DrawTextOnPathMask) {
+ writer.write32(byteLength);
+ }
+ write_pad(&writer, text, byteLength);
+ writer.writePath(path);
+ if (matrix) {
+ write_sparse_matrix(&writer, *matrix);
+ }
+ write_paint(writer, paint, kText_PaintUsage);
+}
+
+void SkPipeCanvas::onDrawTextRSXform(const void* text, size_t byteLength, const SkRSXform xform[],
+ const SkRect* cull, const SkPaint& paint) {
+ SkASSERT(byteLength);
+
+ bool compact = fits_in(byteLength, 23);
+ unsigned extra = compact ? (byteLength << 1) : 0;
+ if (cull) {
+ extra |= 1;
+ }
+
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawTextRSXform, extra));
+ if (!compact) {
+ writer.write32(SkToU32(byteLength));
+ }
+ write_pad(&writer, text, byteLength);
+
+ int count = paint.countText(text, byteLength);
+ writer.write32(count); // maybe we can/should store this in extra as well?
+ writer.write(xform, count * sizeof(SkRSXform));
+ if (cull) {
+ writer.writeRect(*cull);
+ }
+ write_paint(writer, paint, kText_PaintUsage);
+}
+
+void SkPipeCanvas::onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint &paint) {
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawTextBlob, 0));
+ blob->flatten(writer);
+ writer.writeScalar(x);
+ writer.writeScalar(y);
+ write_paint(writer, paint, kTextBlob_PaintUsage);
+}
+
+void SkPipeCanvas::onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint) {
+ unsigned extra = fDeduper->findOrDefinePicture(const_cast<SkPicture*>(picture));
+ if (matrix) {
+ extra |= kHasMatrix_DrawPictureExtra;
+ }
+ if (paint) {
+ extra |= kHasPaint_DrawPictureExtra;
+ }
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawPicture, extra));
+ if (matrix) {
+ writer.writeMatrix(*matrix);
+ }
+ if (paint) {
+ write_paint(writer, *paint, kSaveLayer_PaintUsage);
+ }
+}
+
+void SkPipeCanvas::onDrawRegion(const SkRegion& region, const SkPaint& paint) {
+ size_t size = region.writeToMemory(nullptr);
+ unsigned extra = 0;
+ if (fits_in(size, 24)) {
+ extra = SkToUInt(size);
+ }
+
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawRegion, extra));
+ if (0 == extra) {
+ writer.write32(size);
+ }
+ SkAutoSMalloc<2048> storage(size);
+ region.writeToMemory(storage.get());
+ write_pad(&writer, storage.get(), size);
+ write_paint(writer, paint, kGeometry_PaintUsage);
+}
+
+void SkPipeCanvas::onDrawVertices(VertexMode vmode, int vertexCount,
+ const SkPoint vertices[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint) {
+ SkASSERT(vertexCount > 0);
+
+ unsigned extra = 0;
+ if (vertexCount <= kVCount_DrawVerticesMask) {
+ extra |= vertexCount;
+ }
+ extra |= (unsigned)vmode << kVMode_DrawVerticesShift;
+
+ SkXfermode::Mode mode = SkXfermode::kModulate_Mode;
+ if (xmode && !SkXfermode::AsMode(xmode, &mode)) {
+ mode = (SkXfermode::Mode)0xFF; // sentinel for read the xfer later
+ }
+ extra |= (unsigned)mode << kXMode_DrawVerticesShift;
+
+ if (texs) {
+ extra |= kHasTex_DrawVerticesMask;
+ }
+ if (colors) {
+ extra |= kHasColors_DrawVerticesMask;
+ }
+ if (indexCount > 0) {
+ extra |= kHasIndices_DrawVerticesMask;
+ }
+
+ SkPipeWriter writer(this);
+ writer.write32(pack_verb(SkPipeVerb::kDrawVertices, extra));
+ if (vertexCount > kVCount_DrawVerticesMask) {
+ writer.write32(vertexCount);
+ }
+ if (mode == (SkXfermode::Mode)0xFF) {
+ writer.writeFlattenable(xmode);
+ }
+ writer.write(vertices, vertexCount * sizeof(SkPoint));
+ if (texs) {
+ writer.write(texs, vertexCount * sizeof(SkPoint));
+ }
+ if (colors) {
+ writer.write(colors, vertexCount * sizeof(SkColor));
+ }
+ if (indexCount > 0) {
+ writer.write32(indexCount);
+ SkASSERT(SkIsAlign2(indexCount));
+ writer.write(indices, indexCount * sizeof(uint16_t));
+ }
+ write_paint(writer, paint, kVertices_PaintUsage);
+}
+
+void SkPipeCanvas::onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkXfermode* xfer,
+ const SkPaint& paint) {
+ SkPipeWriter writer(this);
+ unsigned extra = 0;
+ SkXfermode::Mode mode = SkXfermode::kModulate_Mode;
+ if (xfer && !xfer->asMode(&mode)) {
+ mode = (SkXfermode::Mode)kExplicitXfer_DrawPatchExtraValue;
+ } else {
+ xfer = nullptr; // signal that we're using the mode enum
+ }
+ SkASSERT(0 == (mode & ~kModeEnum_DrawPatchExtraMask));
+ extra = (unsigned)mode;
+ if (colors) {
+ extra |= kHasColors_DrawPatchExtraMask;
+ }
+ if (texCoords) {
+ extra |= kHasTexture_DrawPatchExtraMask;
+ }
+ writer.write32(pack_verb(SkPipeVerb::kDrawPatch, extra));
+ writer.write(cubics, sizeof(SkPoint) * 12);
+ if (colors) {
+ writer.write(colors, sizeof(SkColor) * 4);
+ }
+ if (texCoords) {
+ writer.write(texCoords, sizeof(SkPoint) * 4);
+ }
+ if (xfer) {
+ xfer->flatten(writer);
+ }
+ write_paint(writer, paint, kGeometry_PaintUsage);
+}
+
+void SkPipeCanvas::onDrawAnnotation(const SkRect& rect, const char key[], SkData* data) {
+ const size_t len = strlen(key) + 1; // must write the trailing 0
+ bool compact = fits_in(len, 23);
+ uint32_t extra = compact ? (unsigned)len : 0;
+ extra <<= 1; // make room for has_data_sentinel
+ if (data) {
+ extra |= 1;
+ }
+
+ fStream->write32(pack_verb(SkPipeVerb::kDrawAnnotation, extra));
+ fStream->write(&rect, sizeof(SkRect));
+ if (!compact) {
+ fStream->write32(SkToU32(len));
+ }
+ write_pad(fStream, key, len);
+ if (data) {
+ fStream->write32(SkToU32(data->size()));
+ write_pad(fStream, data->data(), data->size());
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class A8Serializer : public SkPixelSerializer {
+protected:
+ bool onUseEncodedData(const void* data, size_t len) {
+ return true;
+ }
+
+ SkData* onEncode(const SkPixmap& pmap) {
+ if (kAlpha_8_SkColorType == pmap.colorType()) {
+ SkDynamicMemoryWStream stream;
+ stream.write("skiaimgf", 8);
+ stream.write32(pmap.width());
+ stream.write32(pmap.height());
+ stream.write16(pmap.colorType());
+ stream.write16(pmap.alphaType());
+ stream.write32(0); // no colorspace for now
+ for (int y = 0; y < pmap.height(); ++y) {
+ stream.write(pmap.addr8(0, y), pmap.width());
+ }
+ return stream.detachAsData().release();
+ }
+ return nullptr;
+ }
+};
+
+static bool show_deduper_traffic = false;
+
+int SkPipeDeduper::findOrDefineImage(SkImage* image) {
+ int index = fImages.find(image->uniqueID());
+ SkASSERT(index >= 0);
+ if (index) {
+ if (show_deduper_traffic) {
+ SkDebugf(" reuseImage(%d)\n", index - 1);
+ }
+ return index;
+ }
+
+ A8Serializer serial;
+ sk_sp<SkData> data(image->encode(&serial));
+ if (!data) {
+ data.reset(image->encode());
+ }
+ if (data) {
+ index = fImages.add(image->uniqueID());
+ SkASSERT(index > 0);
+ SkASSERT(fits_in(index, 24));
+ fStream->write32(pack_verb(SkPipeVerb::kDefineImage, index));
+
+ uint32_t len = SkToU32(data->size());
+ fStream->write32(SkAlign4(len));
+ write_pad(fStream, data->data(), len);
+
+ if (show_deduper_traffic) {
+ int size = image->width() * image->height() << 2;
+ SkDebugf(" defineImage(%d) %d -> %d\n", index - 1, size, len);
+ }
+ return index;
+ }
+ SkDebugf("+++ failed to encode image [%d %d]\n", image->width(), image->height());
+ return 0; // failed to encode
+}
+
+int SkPipeDeduper::findOrDefinePicture(SkPicture* picture) {
+ int index = fPictures.find(picture->uniqueID());
+ SkASSERT(index >= 0);
+ if (index) {
+ if (show_deduper_traffic) {
+ SkDebugf(" reusePicture(%d)\n", index - 1);
+ }
+ return index;
+ }
+
+ size_t prevWritten = fStream->bytesWritten();
+ unsigned extra = 0; // 0 means we're defining a new picture, non-zero means undef_index + 1
+ fStream->write32(pack_verb(SkPipeVerb::kDefinePicture, extra));
+ const SkRect cull = picture->cullRect();
+ fStream->write(&cull, sizeof(cull));
+ picture->playback(fPipeCanvas);
+ // call fPictures.add *after* we're written the picture, so that any nested pictures will have
+ // already been defined, and we get the "last" index value.
+ index = fPictures.add(picture->uniqueID());
+ ASSERT_FITS_IN(index, kObjectDefinitionBits);
+ fStream->write32(pack_verb(SkPipeVerb::kEndPicture, index));
+
+ SkDebugf(" definePicture(%d) %d\n",
+ index - 1, SkToU32(fStream->bytesWritten() - prevWritten));
+ return index;
+}
+
+static sk_sp<SkData> encode(SkTypeface* tf) {
+ SkDynamicMemoryWStream stream;
+ tf->serialize(&stream);
+ return sk_sp<SkData>(stream.detachAsData());
+}
+
+int SkPipeDeduper::findOrDefineTypeface(SkTypeface* typeface) {
+ if (!typeface) {
+ return 0; // default
+ }
+
+ int index = fTypefaces.find(typeface->uniqueID());
+ SkASSERT(index >= 0);
+ if (index) {
+ if (show_deduper_traffic) {
+ SkDebugf(" reuseTypeface(%d)\n", index - 1);
+ }
+ return index;
+ }
+
+ sk_sp<SkData> data = fTFSerializer ? fTFSerializer->serialize(typeface) : encode(typeface);
+ if (data) {
+ index = fTypefaces.add(typeface->uniqueID());
+ SkASSERT(index > 0);
+ SkASSERT(fits_in(index, 24));
+ fStream->write32(pack_verb(SkPipeVerb::kDefineTypeface, index));
+
+ uint32_t len = SkToU32(data->size());
+ fStream->write32(SkAlign4(len));
+ write_pad(fStream, data->data(), len);
+
+ if (show_deduper_traffic) {
+ SkDebugf(" defineTypeface(%d) %d\n", index - 1, len);
+ }
+ return index;
+ }
+ SkDebugf("+++ failed to encode typeface %d\n", typeface->uniqueID());
+ return 0; // failed to encode
+}
+
+int SkPipeDeduper::findOrDefineFactory(SkFlattenable* flattenable) {
+ if (!flattenable) {
+ return 0;
+ }
+
+ int index = fFactories.find(flattenable->getFactory());
+ SkASSERT(index >= 0);
+ if (index) {
+ if (show_deduper_traffic) {
+ SkDebugf(" reuseFactory(%d)\n", index - 1);
+ }
+ return index;
+ }
+
+ index = fFactories.add(flattenable->getFactory());
+ ASSERT_FITS_IN(index, kIndex_DefineFactoryExtraBits);
+ const char* name = flattenable->getTypeName();
+ size_t len = strlen(name);
+ ASSERT_FITS_IN(len, kNameLength_DefineFactoryExtraBits);
+ unsigned extra = (index << kNameLength_DefineFactoryExtraBits) | len;
+ size_t prevWritten = fStream->bytesWritten();
+ fStream->write32(pack_verb(SkPipeVerb::kDefineFactory, extra));
+ write_pad(fStream, name, len + 1);
+ if (false) {
+ SkDebugf(" defineFactory(%d) %d %s\n",
+ index - 1, SkToU32(fStream->bytesWritten() - prevWritten), name);
+ }
+ return index;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#include "SkPipe.h"
+
+class SkPipeSerializer::Impl {
+public:
+ SkPipeDeduper fDeduper;
+ std::unique_ptr<SkPipeCanvas> fCanvas;
+};
+
+SkPipeSerializer::SkPipeSerializer() : fImpl(new Impl) {}
+
+SkPipeSerializer::~SkPipeSerializer() {
+ if (fImpl->fCanvas) {
+ this->endWrite();
+ }
+}
+
+void SkPipeSerializer::setTypefaceSerializer(SkTypefaceSerializer* tfs) {
+ fImpl->fDeduper.setTypefaceSerializer(tfs);
+}
+
+void SkPipeSerializer::resetCache() {
+ fImpl->fDeduper.resetCaches();
+}
+
+sk_sp<SkData> SkPipeSerializer::writeImage(SkImage* image) {
+ SkDynamicMemoryWStream stream;
+ this->writeImage(image, &stream);
+ return stream.detachAsData();
+}
+
+sk_sp<SkData> SkPipeSerializer::writePicture(SkPicture* picture) {
+ SkDynamicMemoryWStream stream;
+ this->writePicture(picture, &stream);
+ return stream.detachAsData();
+}
+
+void SkPipeSerializer::writePicture(SkPicture* picture, SkWStream* stream) {
+ int index = fImpl->fDeduper.findPicture(picture);
+ if (0 == index) {
+ // Try to define the picture
+ this->beginWrite(picture->cullRect(), stream);
+ index = fImpl->fDeduper.findOrDefinePicture(picture);
+ this->endWrite();
+ }
+ stream->write32(pack_verb(SkPipeVerb::kWritePicture, index));
+}
+
+void SkPipeSerializer::writeImage(SkImage* image, SkWStream* stream) {
+ int index = fImpl->fDeduper.findImage(image);
+ if (0 == index) {
+ // Try to define the image
+ fImpl->fDeduper.setStream(stream);
+ index = fImpl->fDeduper.findOrDefineImage(image);
+ }
+ stream->write32(pack_verb(SkPipeVerb::kWriteImage, index));
+}
+
+SkCanvas* SkPipeSerializer::beginWrite(const SkRect& cull, SkWStream* stream) {
+ SkASSERT(nullptr == fImpl->fCanvas);
+ fImpl->fCanvas.reset(new SkPipeCanvas(cull, &fImpl->fDeduper, stream));
+ fImpl->fDeduper.setStream(stream);
+ fImpl->fDeduper.setCanvas(fImpl->fCanvas.get());
+ return fImpl->fCanvas.get();
+}
+
+void SkPipeSerializer::endWrite() {
+ fImpl->fCanvas->restoreToCount(1);
+ fImpl->fCanvas.reset(nullptr);
+ fImpl->fDeduper.setCanvas(nullptr);
+}
diff --git a/gfx/skia/skia/src/pipe/SkPipeCanvas.h b/gfx/skia/skia/src/pipe/SkPipeCanvas.h
new file mode 100644
index 000000000..50d76edfd
--- /dev/null
+++ b/gfx/skia/skia/src/pipe/SkPipeCanvas.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPipeCanvas_DEFINED
+#define SkPipeCanvas_DEFINED
+
+#include "SkCanvas.h"
+#include "SkDeduper.h"
+#include "SkImage.h"
+#include "SkPipe.h"
+#include "SkTypeface.h"
+#include "SkWriteBuffer.h"
+
+class SkPipeCanvas;
+class SkPipeWriter;
+
+template <typename T> class SkTIndexSet {
+public:
+ void reset() { fArray.reset(); }
+
+ // returns the found index or 0
+ int find(const T& key) const {
+ const Rec* stop = fArray.end();
+ for (const Rec* curr = fArray.begin(); curr < stop; ++curr) {
+ if (key == curr->fKey) {
+ return curr->fIndex;
+ }
+ }
+ return 0;
+ }
+
+ // returns the new index
+ int add(const T& key) {
+ Rec* rec = fArray.append();
+ rec->fKey = key;
+ rec->fIndex = fNextIndex++;
+ return rec->fIndex;
+ }
+
+private:
+ struct Rec {
+ T fKey;
+ int fIndex;
+ };
+
+ SkTDArray<Rec> fArray;
+ int fNextIndex = 1;
+};
+
+class SkPipeDeduper : public SkDeduper {
+public:
+ void resetCaches() {
+ fImages.reset();
+ fPictures.reset();
+ fTypefaces.reset();
+ fFactories.reset();
+ }
+
+ void setCanvas(SkPipeCanvas* canvas) { fPipeCanvas = canvas; }
+ void setStream(SkWStream* stream) { fStream = stream; }
+ void setTypefaceSerializer(SkTypefaceSerializer* tfs) { fTFSerializer = tfs; }
+
+ // returns 0 if not found
+ int findImage(SkImage* image) const { return fImages.find(image->uniqueID()); }
+ int findPicture(SkPicture* picture) const { return fPictures.find(picture->uniqueID()); }
+
+ int findOrDefineImage(SkImage*) override;
+ int findOrDefinePicture(SkPicture*) override;
+ int findOrDefineTypeface(SkTypeface*) override;
+ int findOrDefineFactory(SkFlattenable*) override;
+
+private:
+ SkPipeCanvas* fPipeCanvas = nullptr;
+ SkWStream* fStream = nullptr;
+
+ SkTypefaceSerializer* fTFSerializer = nullptr;
+
+ // All our keys (at the moment) are 32bit uniqueIDs
+ SkTIndexSet<uint32_t> fImages;
+ SkTIndexSet<uint32_t> fPictures;
+ SkTIndexSet<uint32_t> fTypefaces;
+ SkTIndexSet<SkFlattenable::Factory> fFactories;
+};
+
+
+class SkPipeCanvas : public SkCanvas {
+public:
+ SkPipeCanvas(const SkRect& cull, SkPipeDeduper*, SkWStream*);
+ ~SkPipeCanvas() override;
+
+protected:
+ void willSave() override;
+ SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec&) override;
+ void willRestore() override;
+
+ void didConcat(const SkMatrix&) override;
+ void didSetMatrix(const SkMatrix&) override;
+
+ void onDrawArc(const SkRect&, SkScalar startAngle, SkScalar sweepAngle, bool useCenter,
+ const SkPaint&) override;
+ void onDrawAtlas(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[],
+ int count, SkXfermode::Mode, const SkRect* cull, const SkPaint*) override;
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override;
+ void onDrawText(const void* text, size_t byteLength, SkScalar x, SkScalar y,
+ const SkPaint&) override;
+ void onDrawPosText(const void* text, size_t byteLength, const SkPoint pos[],
+ const SkPaint&) override;
+ void onDrawPosTextH(const void* text, size_t byteLength, const SkScalar xpos[],
+ SkScalar constY, const SkPaint&) override;
+ void onDrawTextOnPath(const void* text, size_t byteLength, const SkPath&, const SkMatrix*,
+ const SkPaint&) override;
+ void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y, const SkPaint&) override;
+ void onDrawTextRSXform(const void* text, size_t byteLength, const SkRSXform xform[],
+ const SkRect* cull, const SkPaint& paint) override;
+ void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4], const SkPoint texCoords[4],
+ SkXfermode*, const SkPaint&) override;
+
+ void onDrawPaint(const SkPaint&) override;
+ void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&) override;
+ void onDrawRect(const SkRect&, const SkPaint&) override;
+ void onDrawOval(const SkRect&, const SkPaint&) override;
+ void onDrawRegion(const SkRegion&, const SkPaint&) override;
+ void onDrawRRect(const SkRRect&, const SkPaint&) override;
+ void onDrawPath(const SkPath&, const SkPaint&) override;
+
+ void onDrawImage(const SkImage*, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawImageRect(const SkImage*, const SkRect* src, const SkRect& dst,
+ const SkPaint*, SrcRectConstraint) override;
+ void onDrawImageNine(const SkImage*, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawImageLattice(const SkImage*, const Lattice& lattice, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawVertices(VertexMode vmode, int vertexCount,
+ const SkPoint vertices[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint&) override;
+
+ void onClipRect(const SkRect&, ClipOp, ClipEdgeStyle) override;
+ void onClipRRect(const SkRRect&, ClipOp, ClipEdgeStyle) override;
+ void onClipPath(const SkPath&, ClipOp, ClipEdgeStyle) override;
+ void onClipRegion(const SkRegion&, ClipOp) override;
+
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override;
+ void onDrawAnnotation(const SkRect&, const char[], SkData*) override;
+
+ // These we turn into images
+ void onDrawBitmap(const SkBitmap&, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawBitmapRect(const SkBitmap&, const SkRect* src, const SkRect& dst, const SkPaint*,
+ SrcRectConstraint) override;
+ void onDrawBitmapNine(const SkBitmap&, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawBitmapLattice(const SkBitmap&, const Lattice& lattice, const SkRect& dst,
+ const SkPaint*) override;
+
+private:
+ SkPipeDeduper* fDeduper;
+ SkWStream* fStream;
+
+ friend class SkPipeWriter;
+
+ typedef SkCanvas INHERITED;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/pipe/SkPipeFormat.h b/gfx/skia/skia/src/pipe/SkPipeFormat.h
new file mode 100644
index 000000000..9a1d30c7b
--- /dev/null
+++ b/gfx/skia/skia/src/pipe/SkPipeFormat.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPipeFormat_DEFINED
+#define SkPipeFormat_DEFINED
+
+#include "SkTypes.h"
+
+#define kDefinePicture_ExtPipeVerb SkSetFourByteTag('s', 'k', 'p', 'i')
+
+enum class SkPipeVerb : uint8_t {
+ kSave, // extra == 0
+ kSaveLayer,
+ kRestore, // extra == 0
+ kConcat, // extra == SkMatrix::MaskType
+
+ kClipRect, // extra == (SkRegion::Op << 1) | isAntiAlias:1
+ kClipRRect, // extra == (SkRegion::Op << 1) | isAntiAlias:1
+ kClipPath, // extra == (SkRegion::Op << 1) | isAntiAlias:1
+ kClipRegion, // extra == (SkRegion::Op << 1)
+
+ kDrawArc, // extra == useCenter
+ kDrawAtlas, // extra == has_colors | has_cull | has_paint | mode
+ kDrawDRRect,
+ kDrawText, // extra == byteLength:24 else next 32
+ kDrawPosText, // extra == byteLength:24 else next 32
+ kDrawPosTextH, // extra == byteLength:24 else next 32
+ kDrawRegion, // extra == size:24 of region, or 0 means next 32
+ kDrawTextOnPath,
+ kDrawTextBlob,
+ kDrawTextRSXform, // extra == (byteLength:23 << 1) else next 32 | has_cull_rect:1
+ kDrawPatch,
+ kDrawPaint, // extra == 0
+ kDrawPoints, // extra == PointMode
+ kDrawRect, // extra == 0
+ kDrawPath, // extra == 0
+ kDrawOval, // extra == 0
+ kDrawRRect, // extra == 0
+
+ kDrawImage, // extra == has_paint:1
+ kDrawImageRect, // extra == constraint | has_src_rect | has_paint
+ kDrawImageNine, // extra == has_paint:1
+ kDrawImageLattice, // extra == x_count:8 | y_count:8 | has_paint:1
+
+ kDrawVertices,
+
+ kDrawPicture, // extra == picture_index
+ kDrawAnnotation, // extra == (key_len_plus_1:23 << 1) else next 32 | has_data:1
+
+ kDefineImage, // extra == image_index
+ kDefineTypeface,
+ kDefineFactory, // extra == factory_index (followed by padded getTypeName string)
+ kDefinePicture, // extra == 0 or forget_index + 1 (0 means we're defining a new picture)
+ kEndPicture, // extra == picture_index
+ kWriteImage, // extra == image_index
+ kWritePicture, // extra == picture_index
+};
+
+enum PaintUsage {
+ kText_PaintUsage = 1 << 0,
+ kTextBlob_PaintUsage = 1 << 1,
+ kGeometry_PaintUsage = 1 << 2,
+ kImage_PaintUsage = 1 << 3,
+ kSaveLayer_PaintUsage = 1 << 4,
+ kDrawPaint_PaintUsage = 1 << 5,
+ kVertices_PaintUsage = 1 << 6,
+ kRespectsStroke_PaintUsage = 1 << 7,
+ kUnknown_PaintUsage = 0xFF,
+};
+
+// must sum to <= 32
+enum BitsPerField {
+ kFlags_BPF = 16,
+ kFilter_BPF = 2,
+ kStyle_BPF = 2,
+ kCaps_BPF = 2,
+ kJoins_BPF = 2,
+ kHint_BPF = 2,
+ kAlign_BPF = 2,
+ kEncoding_BPF = 2,
+};
+
+enum {
+ kTextSize_NonDef = 1 << 0,
+ kTextScaleX_NonDef = 1 << 1,
+ kTextSkewX_NonDef = 1 << 2,
+ kStrokeWidth_NonDef = 1 << 3,
+ kStrokeMiter_NonDef = 1 << 4,
+ kColor_NonDef = 1 << 5,
+ kTypeface_NonDef = 1 << 6,
+ kPathEffect_NonDef = 1 << 7,
+ kShader_NonDef = 1 << 8,
+ kMaskFilter_NonDef = 1 << 9,
+ kColorFilter_NonDef = 1 << 10,
+ kRasterizer_NonDef = 1 << 11,
+ kImageFilter_NonDef = 1 << 12,
+ kDrawLooper_NonDef = 1 << 13,
+};
+
+enum {
+ kFlags_SaveLayerMask = 0xFF,
+ kHasBounds_SaveLayerMask = 1 << 8,
+ kHasPaint_SaveLayerMask = 1 << 9,
+ kHasBackdrop_SaveLayerMask = 1 << 10,
+ kDontClipToLayer_SaveLayerMask = 1 << 11,
+};
+
+enum {
+ kObjectDefinitionBits = 20,
+ kIndex_ObjectDefinitionMask = ((1 << kObjectDefinitionBits) - 1),
+ kUser_ObjectDefinitionMask = 0x7 << kObjectDefinitionBits,
+ kUndef_ObjectDefinitionMask = 1 << 23,
+ // (Undef:1 | User:3 | Index:20) must fit in extra:24
+};
+
+enum {
+ kTypeMask_ConcatMask = 0xF,
+ kSetMatrix_ConcatMask = 1 << 4,
+};
+
+enum {
+ kMode_DrawAtlasMask = 0xFF,
+ kHasColors_DrawAtlasMask = 1 << 8,
+ kHasCull_DrawAtlasMask = 1 << 9,
+ kHasPaint_DrawAtlasMask = 1 << 10,
+};
+
+enum {
+ kHasPaint_DrawImageMask = 1 << 0,
+};
+
+enum {
+ kConstraint_DrawImageRectMask = 1 << 0,
+ kHasPaint_DrawImageRectMask = 1 << 1,
+ kHasSrcRect_DrawImageRectMask = 1 << 2,
+};
+
+enum {
+ kHasPaint_DrawImageNineMask = 1 << 0,
+};
+
+enum {
+ // picture_index takes the first kObjectDefinitionBits bits
+ kHasMatrix_DrawPictureExtra = 1 << 21,
+ kHasPaint_DrawPictureExtra = 1 << 22,
+};
+
+enum {
+ kIndex_DefineFactoryExtraBits = 10,
+ kNameLength_DefineFactoryExtraBits = 14, // includes trailing 0
+ kNameLength_DefineFactoryExtraMask = (1 << kNameLength_DefineFactoryExtraBits) - 1,
+};
+
+enum {
+ kModeEnum_DrawPatchExtraMask = 0xFF,
+ kExplicitXfer_DrawPatchExtraValue = 0xFF,
+ kHasColors_DrawPatchExtraMask = 0x100,
+ kHasTexture_DrawPatchExtraMask = 0x200,
+};
+
+enum {
+ // if we store a zero for VCount, then read an int after the packedverb for the vcount
+ kVCount_DrawVerticesMask = (1 << 11) - 1,
+
+ kVMode_DrawVerticesShift = 11,
+ kVMode_DrawVerticesMask = 3 << kVMode_DrawVerticesShift,
+
+ kXMode_DrawVerticesShift = 13,
+ kXMode_DrawVerticesMask = 0xFF << kXMode_DrawVerticesShift,
+
+ kHasTex_DrawVerticesMask = 1 << 21,
+ kHasColors_DrawVerticesMask = 1 << 22,
+ kHasIndices_DrawVerticesMask = 1 << 23,
+};
+
+enum {
+ kTextLength_DrawTextOnPathMask = (1 << 16) - 1,
+ kMatrixType_DrawTextOnPathShift = 16,
+ kMatrixType_DrawTextOnPathMask = 0xF << kMatrixType_DrawTextOnPathShift,
+};
+
+enum {
+ kHasPaint_DrawImageLatticeMask = 1 << 0,
+ kHasFlags_DrawImageLatticeMask = 1 << 1,
+ kXCount_DrawImageLatticeShift = 2, // bits 2:9 are xcount or FF means 32bits follow
+ kYCount_DrawImageLatticeShift = 10, // bits 10:17 are ycount or FF means 32bits follow
+ kCount_DrawImageLatticeMask = 0xFF, // sentinel for 32bits follow,
+ // thus max inline count is 254
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static inline bool fits_in(int value, int bits) {
+ return value >= 0 && value < (1 << bits);
+}
+
+static inline void ASSERT_FITS_IN(int value, int bits) {
+ SkASSERT(fits_in(value, bits));
+}
+
+static inline uint32_t pack_verb(SkPipeVerb verb, unsigned extra = 0) {
+ //SkDebugf("pack [%d] %d\n", verb, extra);
+ ASSERT_FITS_IN((unsigned)verb, 8);
+ ASSERT_FITS_IN(extra, 24);
+ return ((uint32_t)verb << 24) | extra;
+}
+
+static inline SkPipeVerb unpack_verb(uint32_t data) {
+ return (SkPipeVerb)(data >> 24);
+}
+
+static inline unsigned unpack_verb_extra(uint32_t data) {
+ return data & 0xFFFFFF;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/pipe/SkPipeReader.cpp b/gfx/skia/skia/src/pipe/SkPipeReader.cpp
new file mode 100644
index 000000000..47d4072d0
--- /dev/null
+++ b/gfx/skia/skia/src/pipe/SkPipeReader.cpp
@@ -0,0 +1,962 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCanvas.h"
+#include "SkDeduper.h"
+#include "SkPicture.h"
+#include "SkPictureRecorder.h"
+#include "SkPipe.h"
+#include "SkPipeFormat.h"
+#include "SkReadBuffer.h"
+#include "SkRefSet.h"
+#include "SkRSXform.h"
+#include "SkTextBlob.h"
+#include "SkTypeface.h"
+
+class SkPipeReader;
+
+static bool do_playback(SkPipeReader& reader, SkCanvas* canvas, int* endPictureIndex = nullptr);
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkPipeInflator : public SkInflator {
+public:
+ SkPipeInflator(SkRefSet<SkImage>* images, SkRefSet<SkPicture>* pictures,
+ SkRefSet<SkTypeface>* typefaces, SkTDArray<SkFlattenable::Factory>* factories,
+ SkTypefaceDeserializer* tfd)
+ : fImages(images)
+ , fPictures(pictures)
+ , fTypefaces(typefaces)
+ , fFactories(factories)
+ , fTFDeserializer(tfd)
+ {}
+
+ SkImage* getImage(int index) override {
+ return index ? fImages->get(index - 1) : nullptr;
+ }
+ SkPicture* getPicture(int index) override {
+ return index ? fPictures->get(index - 1) : nullptr;
+ }
+ SkTypeface* getTypeface(int index) override {
+ return fTypefaces->get(index - 1);
+ }
+ SkFlattenable::Factory getFactory(int index) override {
+ return index ? fFactories->getAt(index - 1) : nullptr;
+ }
+
+ bool setImage(int index, SkImage* img) {
+ return fImages->set(index - 1, img);
+ }
+ bool setPicture(int index, SkPicture* pic) {
+ return fPictures->set(index - 1, pic);
+ }
+ bool setTypeface(int index, SkTypeface* face) {
+ return fTypefaces->set(index - 1, face);
+ }
+ bool setFactory(int index, SkFlattenable::Factory factory) {
+ SkASSERT(index > 0);
+ SkASSERT(factory);
+ index -= 1;
+ if ((unsigned)index < (unsigned)fFactories->count()) {
+ (*fFactories)[index] = factory;
+ return true;
+ }
+ if (fFactories->count() == index) {
+ *fFactories->append() = factory;
+ return true;
+ }
+ SkDebugf("setFactory: index [%d] out of range %d\n", index, fFactories->count());
+ return false;
+ }
+
+ void setTypefaceDeserializer(SkTypefaceDeserializer* tfd) {
+ fTFDeserializer = tfd;
+ }
+
+ sk_sp<SkTypeface> makeTypeface(const void* data, size_t size);
+
+private:
+ SkRefSet<SkImage>* fImages;
+ SkRefSet<SkPicture>* fPictures;
+ SkRefSet<SkTypeface>* fTypefaces;
+ SkTDArray<SkFlattenable::Factory>* fFactories;
+
+ SkTypefaceDeserializer* fTFDeserializer;
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+template <typename T> const T* skip(SkReadBuffer& reader, int count = 1) {
+ return (const T*)reader.skip(count * sizeof(T));
+}
+
+static SkRRect read_rrect(SkReadBuffer& reader) {
+ SkRRect rrect;
+ rrect.readFromMemory(reader.skip(SkRRect::kSizeInMemory), SkRRect::kSizeInMemory);
+ return rrect;
+}
+
+static SkMatrix read_sparse_matrix(SkReadBuffer& reader, SkMatrix::TypeMask tm) {
+ SkMatrix matrix;
+ matrix.reset();
+
+ if (tm & SkMatrix::kPerspective_Mask) {
+ matrix.set9(skip<SkScalar>(reader, 9));
+ } else if (tm & SkMatrix::kAffine_Mask) {
+ const SkScalar* tmp = skip<SkScalar>(reader, 6);
+ matrix[SkMatrix::kMScaleX] = tmp[0];
+ matrix[SkMatrix::kMSkewX] = tmp[1];
+ matrix[SkMatrix::kMTransX] = tmp[2];
+ matrix[SkMatrix::kMScaleY] = tmp[3];
+ matrix[SkMatrix::kMSkewY] = tmp[4];
+ matrix[SkMatrix::kMTransY] = tmp[5];
+ } else if (tm & SkMatrix::kScale_Mask) {
+ const SkScalar* tmp = skip<SkScalar>(reader, 4);
+ matrix[SkMatrix::kMScaleX] = tmp[0];
+ matrix[SkMatrix::kMTransX] = tmp[1];
+ matrix[SkMatrix::kMScaleY] = tmp[2];
+ matrix[SkMatrix::kMTransY] = tmp[3];
+ } else if (tm & SkMatrix::kTranslate_Mask) {
+ const SkScalar* tmp = skip<SkScalar>(reader, 2);
+ matrix[SkMatrix::kMTransX] = tmp[0];
+ matrix[SkMatrix::kMTransY] = tmp[1];
+ }
+ // else read nothing for Identity
+ return matrix;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#define CHECK_SET_SCALAR(Field) \
+ do { if (nondef & k##Field##_NonDef) { \
+ paint.set##Field(reader.readScalar()); \
+ }} while (0)
+
+#define CHECK_SET_FLATTENABLE(Field) \
+ do { if (nondef & k##Field##_NonDef) { \
+ paint.set##Field(reader.read##Field()); \
+ }} while (0)
+
+/*
+ * Header:
+ * paint flags : 32
+ * non_def bits : 16
+ * xfermode enum : 8
+ * pad zeros : 8
+ */
+static SkPaint read_paint(SkReadBuffer& reader) {
+ SkPaint paint;
+
+ uint32_t packedFlags = reader.read32();
+ uint32_t extra = reader.read32();
+ unsigned nondef = extra >> 16;
+ paint.setBlendMode(SkBlendMode((extra >> 8) & 0xFF));
+ SkASSERT((extra & 0xFF) == 0); // zero pad byte
+
+ packedFlags >>= 2; // currently unused
+ paint.setTextEncoding((SkPaint::TextEncoding)(packedFlags & 3)); packedFlags >>= 2;
+ paint.setTextAlign((SkPaint::Align)(packedFlags & 3)); packedFlags >>= 2;
+ paint.setHinting((SkPaint::Hinting)(packedFlags & 3)); packedFlags >>= 2;
+ paint.setStrokeJoin((SkPaint::Join)(packedFlags & 3)); packedFlags >>= 2;
+ paint.setStrokeCap((SkPaint::Cap)(packedFlags & 3)); packedFlags >>= 2;
+ paint.setStyle((SkPaint::Style)(packedFlags & 3)); packedFlags >>= 2;
+ paint.setFilterQuality((SkFilterQuality)(packedFlags & 3)); packedFlags >>= 2;
+ paint.setFlags(packedFlags);
+
+ CHECK_SET_SCALAR(TextSize);
+ CHECK_SET_SCALAR(TextScaleX);
+ CHECK_SET_SCALAR(TextSkewX);
+ CHECK_SET_SCALAR(StrokeWidth);
+ CHECK_SET_SCALAR(StrokeMiter);
+
+ if (nondef & kColor_NonDef) {
+ paint.setColor(reader.read32());
+ }
+
+ CHECK_SET_FLATTENABLE(Typeface);
+ CHECK_SET_FLATTENABLE(PathEffect);
+ CHECK_SET_FLATTENABLE(Shader);
+ CHECK_SET_FLATTENABLE(MaskFilter);
+ CHECK_SET_FLATTENABLE(ColorFilter);
+ CHECK_SET_FLATTENABLE(Rasterizer);
+ CHECK_SET_FLATTENABLE(ImageFilter);
+ CHECK_SET_FLATTENABLE(DrawLooper);
+
+ return paint;
+}
+
+class SkPipeReader : public SkReadBuffer {
+public:
+ SkPipeReader(SkPipeDeserializer* sink, const void* data, size_t size)
+ : SkReadBuffer(data, size)
+ , fSink(sink)
+ {}
+
+ SkPipeDeserializer* fSink;
+
+ SkFlattenable::Factory findFactory(const char name[]) {
+ SkFlattenable::Factory factory;
+ // Check if a custom Factory has been specified for this flattenable.
+ if (!(factory = this->getCustomFactory(SkString(name)))) {
+ // If there is no custom Factory, check for a default.
+ factory = SkFlattenable::NameToFactory(name);
+ }
+ return factory;
+ }
+
+ void readPaint(SkPaint* paint) override {
+ *paint = read_paint(*this);
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+typedef void (*SkPipeHandler)(SkPipeReader&, uint32_t packedVerb, SkCanvas*);
+
+static void save_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kSave == unpack_verb(packedVerb));
+ canvas->save();
+}
+
+static void saveLayer_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kSaveLayer == unpack_verb(packedVerb));
+ unsigned extra = unpack_verb_extra(packedVerb);
+ const SkRect* bounds = (extra & kHasBounds_SaveLayerMask) ? skip<SkRect>(reader) : nullptr;
+ SkPaint paintStorage, *paint = nullptr;
+ if (extra & kHasPaint_SaveLayerMask) {
+ paintStorage = read_paint(reader);
+ paint = &paintStorage;
+ }
+ sk_sp<SkImageFilter> backdrop;
+ if (extra & kHasBackdrop_SaveLayerMask) {
+ backdrop = reader.readImageFilter();
+ }
+ SkCanvas::SaveLayerFlags flags = (SkCanvas::SaveLayerFlags)(extra & kFlags_SaveLayerMask);
+
+ // unremap this wacky flag
+ if (extra & kDontClipToLayer_SaveLayerMask) {
+ flags |= (1 << 31);//SkCanvas::kDontClipToLayer_PrivateSaveLayerFlag;
+ }
+
+ canvas->saveLayer(SkCanvas::SaveLayerRec(bounds, paint, backdrop.get(), flags));
+}
+
+static void restore_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kRestore == unpack_verb(packedVerb));
+ canvas->restore();
+}
+
+static void concat_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kConcat == unpack_verb(packedVerb));
+ SkMatrix::TypeMask tm = (SkMatrix::TypeMask)(packedVerb & kTypeMask_ConcatMask);
+ const SkMatrix matrix = read_sparse_matrix(reader, tm);
+ if (packedVerb & kSetMatrix_ConcatMask) {
+ canvas->setMatrix(matrix);
+ } else {
+ canvas->concat(matrix);
+ }
+}
+
+static void clipRect_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kClipRect == unpack_verb(packedVerb));
+ SkCanvas::ClipOp op = (SkCanvas::ClipOp)(unpack_verb_extra(packedVerb) >> 1);
+ bool isAA = unpack_verb_extra(packedVerb) & 1;
+ canvas->clipRect(*skip<SkRect>(reader), op, isAA);
+}
+
+static void clipRRect_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kClipRRect == unpack_verb(packedVerb));
+ SkCanvas::ClipOp op = (SkCanvas::ClipOp)(unpack_verb_extra(packedVerb) >> 1);
+ bool isAA = unpack_verb_extra(packedVerb) & 1;
+ canvas->clipRRect(read_rrect(reader), op, isAA);
+}
+
+static void clipPath_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kClipPath == unpack_verb(packedVerb));
+ SkCanvas::ClipOp op = (SkCanvas::ClipOp)(unpack_verb_extra(packedVerb) >> 1);
+ bool isAA = unpack_verb_extra(packedVerb) & 1;
+ SkPath path;
+ reader.readPath(&path);
+ canvas->clipPath(path, op, isAA);
+}
+
+static void clipRegion_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kClipRegion == unpack_verb(packedVerb));
+ SkCanvas::ClipOp op = (SkCanvas::ClipOp)(unpack_verb_extra(packedVerb) >> 1);
+ SkRegion region;
+ reader.readRegion(&region);
+ canvas->clipRegion(region, op);
+}
+
+static void drawArc_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawArc == unpack_verb(packedVerb));
+ const bool useCenter = (bool)(unpack_verb_extra(packedVerb) & 1);
+ const SkScalar* scalars = skip<SkScalar>(reader, 6); // bounds[0..3], start[4], sweep[5]
+ const SkRect* bounds = (const SkRect*)scalars;
+ canvas->drawArc(*bounds, scalars[4], scalars[5], useCenter, read_paint(reader));
+}
+
+static void drawAtlas_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawAtlas == unpack_verb(packedVerb));
+ SkXfermode::Mode mode = (SkXfermode::Mode)(packedVerb & kMode_DrawAtlasMask);
+ sk_sp<SkImage> image(reader.readImage());
+ int count = reader.read32();
+ const SkRSXform* xform = skip<SkRSXform>(reader, count);
+ const SkRect* rect = skip<SkRect>(reader, count);
+ const SkColor* color = nullptr;
+ if (packedVerb & kHasColors_DrawAtlasMask) {
+ color = skip<SkColor>(reader, count);
+ }
+ const SkRect* cull = nullptr;
+ if (packedVerb & kHasCull_DrawAtlasMask) {
+ cull = skip<SkRect>(reader);
+ }
+ SkPaint paintStorage, *paint = nullptr;
+ if (packedVerb & kHasPaint_DrawAtlasMask) {
+ paintStorage = read_paint(reader);
+ paint = &paintStorage;
+ }
+ canvas->drawAtlas(image, xform, rect, color, count, mode, cull, paint);
+}
+
+static void drawDRRect_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawDRRect == unpack_verb(packedVerb));
+ const SkRRect outer = read_rrect(reader);
+ const SkRRect inner = read_rrect(reader);
+ canvas->drawDRRect(outer, inner, read_paint(reader));
+}
+
+static void drawText_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawText == unpack_verb(packedVerb));
+ uint32_t len = unpack_verb_extra(packedVerb);
+ if (0 == len) {
+ len = reader.read32();
+ }
+ const void* text = reader.skip(SkAlign4(len));
+ SkScalar x = reader.readScalar();
+ SkScalar y = reader.readScalar();
+ canvas->drawText(text, len, x, y, read_paint(reader));
+}
+
+static void drawPosText_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawPosText == unpack_verb(packedVerb));
+ uint32_t len = unpack_verb_extra(packedVerb);
+ if (0 == len) {
+ len = reader.read32();
+ }
+ const void* text = reader.skip(SkAlign4(len));
+ int count = reader.read32();
+ const SkPoint* pos = skip<SkPoint>(reader, count);
+ SkPaint paint = read_paint(reader);
+ SkASSERT(paint.countText(text, len) == count);
+ canvas->drawPosText(text, len, pos, paint);
+}
+
+static void drawPosTextH_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawPosTextH == unpack_verb(packedVerb));
+ uint32_t len = unpack_verb_extra(packedVerb);
+ if (0 == len) {
+ len = reader.read32();
+ }
+ const void* text = reader.skip(SkAlign4(len));
+ int count = reader.read32();
+ const SkScalar* xpos = skip<SkScalar>(reader, count);
+ SkScalar constY = reader.readScalar();
+ SkPaint paint = read_paint(reader);
+ SkASSERT(paint.countText(text, len) == count);
+ canvas->drawPosTextH(text, len, xpos, constY, paint);
+}
+
+static void drawTextOnPath_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawTextOnPath == unpack_verb(packedVerb));
+ uint32_t byteLength = packedVerb & kTextLength_DrawTextOnPathMask;
+ SkMatrix::TypeMask tm = (SkMatrix::TypeMask)
+ ((packedVerb & kMatrixType_DrawTextOnPathMask) >> kMatrixType_DrawTextOnPathShift);
+
+ if (0 == byteLength) {
+ byteLength = reader.read32();
+ }
+ const void* text = reader.skip(SkAlign4(byteLength));
+ SkPath path;
+ reader.readPath(&path);
+ const SkMatrix* matrix = nullptr;
+ SkMatrix matrixStorage;
+ if (tm != SkMatrix::kIdentity_Mask) {
+ matrixStorage = read_sparse_matrix(reader, tm);
+ matrix = &matrixStorage;
+ }
+ canvas->drawTextOnPath(text, byteLength, path, matrix, read_paint(reader));
+}
+
+static void drawTextBlob_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ sk_sp<SkTextBlob> tb = SkTextBlob::MakeFromBuffer(reader);
+ SkScalar x = reader.readScalar();
+ SkScalar y = reader.readScalar();
+ canvas->drawTextBlob(tb, x, y, read_paint(reader));
+}
+
+static void drawTextRSXform_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawTextRSXform == unpack_verb(packedVerb));
+ uint32_t len = unpack_verb_extra(packedVerb) >> 1;
+ if (0 == len) {
+ len = reader.read32();
+ }
+ const void* text = reader.skip(SkAlign4(len));
+ int count = reader.read32();
+ const SkRSXform* xform = skip<SkRSXform>(reader, count);
+ const SkRect* cull = (packedVerb & 1) ? skip<SkRect>(reader) : nullptr;
+ SkPaint paint = read_paint(reader);
+ SkASSERT(paint.countText(text, len) == count);
+ canvas->drawTextRSXform(text, len, xform, cull, paint);
+}
+
+static void drawPatch_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawPatch == unpack_verb(packedVerb));
+ const SkColor* colors = nullptr;
+ const SkPoint* tex = nullptr;
+ const SkPoint* cubics = skip<SkPoint>(reader, 12);
+ if (packedVerb & kHasColors_DrawPatchExtraMask) {
+ colors = skip<SkColor>(reader, 4);
+ }
+ if (packedVerb & kHasTexture_DrawPatchExtraMask) {
+ tex = skip<SkPoint>(reader, 4);
+ }
+ sk_sp<SkXfermode> xfer;
+ unsigned mode = packedVerb & kModeEnum_DrawPatchExtraMask;
+ if (kExplicitXfer_DrawPatchExtraValue == mode) {
+ xfer = reader.readXfermode();
+ } else {
+ if (mode != SkXfermode::kSrcOver_Mode) {
+ xfer = SkXfermode::Make((SkXfermode::Mode)mode);
+ }
+ }
+ canvas->drawPatch(cubics, colors, tex, xfer.get(), read_paint(reader));
+}
+
+static void drawPaint_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawPaint == unpack_verb(packedVerb));
+ canvas->drawPaint(read_paint(reader));
+}
+
+static void drawRect_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawRect == unpack_verb(packedVerb));
+ const SkRect* rect = skip<SkRect>(reader);
+ canvas->drawRect(*rect, read_paint(reader));
+}
+
+static void drawRegion_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawRegion == unpack_verb(packedVerb));
+ size_t size = unpack_verb_extra(packedVerb);
+ if (0 == size) {
+ size = reader.read32();
+ }
+ SkRegion region;
+ region.readFromMemory(skip<char>(reader, SkAlign4(size)), size);
+ canvas->drawRegion(region, read_paint(reader));
+}
+
+static void drawOval_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawOval == unpack_verb(packedVerb));
+ const SkRect* rect = skip<SkRect>(reader);
+ canvas->drawOval(*rect, read_paint(reader));
+}
+
+static void drawRRect_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawRRect == unpack_verb(packedVerb));
+ SkRRect rrect = read_rrect(reader);
+ canvas->drawRRect(rrect, read_paint(reader));
+}
+
+static void drawPath_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawPath == unpack_verb(packedVerb));
+ SkPath path;
+ reader.readPath(&path);
+ canvas->drawPath(path, read_paint(reader));
+}
+
+static void drawPoints_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawPoints == unpack_verb(packedVerb));
+ SkCanvas::PointMode mode = (SkCanvas::PointMode)unpack_verb_extra(packedVerb);
+ int count = reader.read32();
+ const SkPoint* points = skip<SkPoint>(reader, count);
+ canvas->drawPoints(mode, count, points, read_paint(reader));
+}
+
+static void drawImage_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawImage == unpack_verb(packedVerb));
+ sk_sp<SkImage> image(reader.readImage());
+ SkScalar x = reader.readScalar();
+ SkScalar y = reader.readScalar();
+ SkPaint paintStorage, *paint = nullptr;
+ if (packedVerb & kHasPaint_DrawImageMask) {
+ paintStorage = read_paint(reader);
+ paint = &paintStorage;
+ }
+ canvas->drawImage(image, x, y, paint);
+}
+
+static void drawImageRect_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawImageRect == unpack_verb(packedVerb));
+ sk_sp<SkImage> image(reader.readImage());
+ SkCanvas::SrcRectConstraint constraint =
+ (SkCanvas::SrcRectConstraint)(packedVerb & kConstraint_DrawImageRectMask);
+ const SkRect* src = (packedVerb & kHasSrcRect_DrawImageRectMask) ?
+ skip<SkRect>(reader) : nullptr;
+ const SkRect* dst = skip<SkRect>(reader);
+ SkPaint paintStorage, *paint = nullptr;
+ if (packedVerb & kHasPaint_DrawImageRectMask) {
+ paintStorage = read_paint(reader);
+ paint = &paintStorage;
+ }
+ if (src) {
+ canvas->drawImageRect(image, *src, *dst, paint, constraint);
+ } else {
+ canvas->drawImageRect(image, *dst, paint);
+ }
+}
+
+static void drawImageNine_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawImageNine == unpack_verb(packedVerb));
+ sk_sp<SkImage> image(reader.readImage());
+ const SkIRect* center = skip<SkIRect>(reader);
+ const SkRect* dst = skip<SkRect>(reader);
+ SkPaint paintStorage, *paint = nullptr;
+ if (packedVerb & kHasPaint_DrawImageNineMask) {
+ paintStorage = read_paint(reader);
+ paint = &paintStorage;
+ }
+ canvas->drawImageNine(image, *center, *dst, paint);
+}
+
+static void drawImageLattice_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawImageLattice == unpack_verb(packedVerb));
+ sk_sp<SkImage> image(reader.readImage());
+
+ SkCanvas::Lattice lattice;
+ lattice.fXCount = (packedVerb >> kXCount_DrawImageLatticeShift) & kCount_DrawImageLatticeMask;
+ if (lattice.fXCount == kCount_DrawImageLatticeMask) {
+ lattice.fXCount = reader.read32();
+ }
+ lattice.fYCount = (packedVerb >> kXCount_DrawImageLatticeShift) & kCount_DrawImageLatticeMask;
+ if (lattice.fYCount == kCount_DrawImageLatticeMask) {
+ lattice.fYCount = reader.read32();
+ }
+ lattice.fXDivs = skip<int32_t>(reader, lattice.fXCount);
+ lattice.fYDivs = skip<int32_t>(reader, lattice.fYCount);
+ if (packedVerb & kHasFlags_DrawImageLatticeMask) {
+ int32_t count = (lattice.fXCount + 1) * (lattice.fYCount + 1);
+ SkASSERT(count > 0);
+ lattice.fFlags = skip<SkCanvas::Lattice::Flags>(reader, SkAlign4(count));
+ } else {
+ lattice.fFlags = nullptr;
+ }
+ lattice.fBounds = skip<SkIRect>(reader);
+ const SkRect* dst = skip<SkRect>(reader);
+
+ SkPaint paintStorage, *paint = nullptr;
+ if (packedVerb & kHasPaint_DrawImageLatticeMask) {
+ paintStorage = read_paint(reader);
+ paint = &paintStorage;
+ }
+ canvas->drawImageLattice(image.get(), lattice, *dst, paint);
+}
+
+static void drawVertices_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawVertices == unpack_verb(packedVerb));
+ SkCanvas::VertexMode vmode = (SkCanvas::VertexMode)
+ ((packedVerb & kVMode_DrawVerticesMask) >> kVMode_DrawVerticesShift);
+ int vertexCount = packedVerb & kVCount_DrawVerticesMask;
+ if (0 == vertexCount) {
+ vertexCount = reader.read32();
+ }
+ sk_sp<SkXfermode> xfer;
+ unsigned xmode = (packedVerb & kXMode_DrawVerticesMask) >> kXMode_DrawVerticesShift;
+ if (0xFF == xmode) {
+ xfer = reader.readXfermode();
+ } else {
+ xfer = SkXfermode::Make((SkXfermode::Mode)xmode);
+ }
+ const SkPoint* vertices = skip<SkPoint>(reader, vertexCount);
+ const SkPoint* texs = nullptr;
+ if (packedVerb & kHasTex_DrawVerticesMask) {
+ texs = skip<SkPoint>(reader, vertexCount);
+ }
+ const SkColor* colors = nullptr;
+ if (packedVerb & kHasColors_DrawVerticesMask) {
+ colors = skip<SkColor>(reader, vertexCount);
+ }
+ int indexCount = 0;
+ const uint16_t* indices = nullptr;
+ if (packedVerb & kHasIndices_DrawVerticesMask) {
+ indexCount = reader.read32();
+ indices = skip<uint16_t>(reader, indexCount);
+ }
+
+ canvas->drawVertices(vmode, vertexCount, vertices, texs, colors, xfer.get(),
+ indices, indexCount, read_paint(reader));
+}
+
+static void drawPicture_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawPicture == unpack_verb(packedVerb));
+ unsigned extra = unpack_verb_extra(packedVerb);
+ int index = extra & kIndex_ObjectDefinitionMask;
+ SkPicture* pic = reader.getInflator()->getPicture(index);
+ SkMatrix matrixStorage, *matrix = nullptr;
+ SkPaint paintStorage, *paint = nullptr;
+ if (extra & kHasMatrix_DrawPictureExtra) {
+ reader.readMatrix(&matrixStorage);
+ matrix = &matrixStorage;
+ }
+ if (extra & kHasPaint_DrawPictureExtra) {
+ paintStorage = read_paint(reader);
+ paint = &paintStorage;
+ }
+ canvas->drawPicture(pic, matrix, paint);
+}
+
+static void drawAnnotation_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDrawAnnotation == unpack_verb(packedVerb));
+ const SkRect* rect = skip<SkRect>(reader);
+
+ // len includes the key's trailing 0
+ uint32_t len = unpack_verb_extra(packedVerb) >> 1;
+ if (0 == len) {
+ len = reader.read32();
+ }
+ const char* key = skip<char>(reader, len);
+ sk_sp<SkData> data;
+ if (packedVerb & 1) {
+ uint32_t size = reader.read32();
+ data = SkData::MakeWithCopy(reader.skip(SkAlign4(size)), size);
+ }
+ canvas->drawAnnotation(*rect, key, data);
+}
+
+#if 0
+ stream.write("skiacodc", 8);
+ stream.write32(pmap.width());
+ stream.write32(pmap.height());
+ stream.write16(pmap.colorType());
+ stream.write16(pmap.alphaType());
+ stream.write32(0); // no colorspace for now
+ for (int y = 0; y < pmap.height(); ++y) {
+ stream.write(pmap.addr8(0, y), pmap.width());
+ }
+#endif
+
+static sk_sp<SkImage> make_from_skiaimageformat(const void* encoded, size_t encodedSize) {
+ if (encodedSize < 24) {
+ return nullptr;
+ }
+
+ SkMemoryStream stream(encoded, encodedSize);
+ char signature[8];
+ stream.read(signature, 8);
+ if (memcmp(signature, "skiaimgf", 8)) {
+ return nullptr;
+ }
+
+ int width = stream.readU32();
+ int height = stream.readU32();
+ SkColorType ct = (SkColorType)stream.readU16();
+ SkAlphaType at = (SkAlphaType)stream.readU16();
+ SkASSERT(kAlpha_8_SkColorType == ct);
+
+ SkDEBUGCODE(size_t colorSpaceSize =) stream.readU32();
+ SkASSERT(0 == colorSpaceSize);
+
+ SkImageInfo info = SkImageInfo::Make(width, height, ct, at);
+ size_t size = width * height;
+ sk_sp<SkData> pixels = SkData::MakeUninitialized(size);
+ stream.read(pixels->writable_data(), size);
+ SkASSERT(encodedSize == SkAlign4(stream.getPosition()));
+ return SkImage::MakeRasterData(info, pixels, width);
+}
+
+static sk_sp<SkImage> make_from_encoded(const sk_sp<SkData>& data) {
+ sk_sp<SkImage> image = make_from_skiaimageformat(data->data(), data->size());
+ if (!image) {
+ image = SkImage::MakeFromEncoded(data);
+ }
+ return image;
+}
+
+static void defineImage_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas*) {
+ SkASSERT(SkPipeVerb::kDefineImage == unpack_verb(packedVerb));
+ SkPipeInflator* inflator = (SkPipeInflator*)reader.getInflator();
+ uint32_t extra = unpack_verb_extra(packedVerb);
+ int index = extra & kIndex_ObjectDefinitionMask;
+
+ if (extra & kUndef_ObjectDefinitionMask) {
+ // zero-index means we are "forgetting" that cache entry
+ inflator->setImage(index, nullptr);
+ } else {
+ // we are defining a new image
+ sk_sp<SkData> data = reader.readByteArrayAsData();
+ sk_sp<SkImage> image = make_from_encoded(data);
+ if (!image) {
+ SkDebugf("-- failed to decode\n");
+ }
+ inflator->setImage(index, image.get());
+ }
+}
+
+sk_sp<SkTypeface> SkPipeInflator::makeTypeface(const void* data, size_t size) {
+ if (fTFDeserializer) {
+ return fTFDeserializer->deserialize(data, size);
+ }
+ SkMemoryStream stream(data, size, false);
+ return SkTypeface::MakeDeserialize(&stream);
+}
+
+static void defineTypeface_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDefineTypeface == unpack_verb(packedVerb));
+ SkPipeInflator* inflator = (SkPipeInflator*)reader.getInflator();
+ uint32_t extra = unpack_verb_extra(packedVerb);
+ int index = extra & kIndex_ObjectDefinitionMask;
+
+ if (extra & kUndef_ObjectDefinitionMask) {
+ // zero-index means we are "forgetting" that cache entry
+ inflator->setTypeface(index, nullptr);
+ } else {
+ // we are defining a new image
+ sk_sp<SkData> data = reader.readByteArrayAsData();
+ // TODO: seems like we could "peek" to see the array, and not need to copy it.
+ sk_sp<SkTypeface> tf = inflator->makeTypeface(data->data(), data->size());
+ inflator->setTypeface(index, tf.get());
+ }
+}
+
+static void defineFactory_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDefineFactory == unpack_verb(packedVerb));
+ SkPipeInflator* inflator = (SkPipeInflator*)reader.getInflator();
+ uint32_t extra = unpack_verb_extra(packedVerb);
+ int index = extra >> kNameLength_DefineFactoryExtraBits;
+ size_t len = extra & kNameLength_DefineFactoryExtraMask;
+ // +1 for the trailing null char
+ const char* name = (const char*)reader.skip(SkAlign4(len + 1));
+ SkFlattenable::Factory factory = reader.findFactory(name);
+ if (factory) {
+ inflator->setFactory(index, factory);
+ }
+}
+
+static void definePicture_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ SkASSERT(SkPipeVerb::kDefinePicture == unpack_verb(packedVerb));
+ int deleteIndex = unpack_verb_extra(packedVerb);
+
+ SkPipeInflator* inflator = (SkPipeInflator*)reader.getInflator();
+
+ if (deleteIndex) {
+ inflator->setPicture(deleteIndex - 1, nullptr);
+ } else {
+ SkPictureRecorder recorder;
+ int pictureIndex = -1; // invalid
+ const SkRect* cull = skip<SkRect>(reader);
+ do_playback(reader, recorder.beginRecording(*cull), &pictureIndex);
+ SkASSERT(pictureIndex > 0);
+ sk_sp<SkPicture> picture = recorder.finishRecordingAsPicture();
+ inflator->setPicture(pictureIndex, picture.get());
+ }
+}
+
+static void endPicture_handler(SkPipeReader& reader, uint32_t packedVerb, SkCanvas* canvas) {
+ sk_throw(); // never call me
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+struct HandlerRec {
+ SkPipeHandler fProc;
+ const char* fName;
+};
+
+#define HANDLER(name) { name##_handler, #name }
+const HandlerRec gPipeHandlers[] = {
+ HANDLER(save),
+ HANDLER(saveLayer),
+ HANDLER(restore),
+ HANDLER(concat),
+
+ HANDLER(clipRect),
+ HANDLER(clipRRect),
+ HANDLER(clipPath),
+ HANDLER(clipRegion),
+
+ HANDLER(drawArc),
+ HANDLER(drawAtlas),
+ HANDLER(drawDRRect),
+ HANDLER(drawText),
+ HANDLER(drawPosText),
+ HANDLER(drawPosTextH),
+ HANDLER(drawRegion),
+ HANDLER(drawTextOnPath),
+ HANDLER(drawTextBlob),
+ HANDLER(drawTextRSXform),
+ HANDLER(drawPatch),
+ HANDLER(drawPaint),
+ HANDLER(drawPoints),
+ HANDLER(drawRect),
+ HANDLER(drawPath),
+ HANDLER(drawOval),
+ HANDLER(drawRRect),
+
+ HANDLER(drawImage),
+ HANDLER(drawImageRect),
+ HANDLER(drawImageNine),
+ HANDLER(drawImageLattice),
+
+ HANDLER(drawVertices),
+
+ HANDLER(drawPicture),
+ HANDLER(drawAnnotation),
+
+ HANDLER(defineImage),
+ HANDLER(defineTypeface),
+ HANDLER(defineFactory),
+ HANDLER(definePicture),
+ HANDLER(endPicture), // handled special -- should never be called
+};
+#undef HANDLER
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkPipeDeserializer::Impl {
+public:
+ SkRefSet<SkImage> fImages;
+ SkRefSet<SkPicture> fPictures;
+ SkRefSet<SkTypeface> fTypefaces;
+ SkTDArray<SkFlattenable::Factory> fFactories;
+
+ SkTypefaceDeserializer* fTFDeserializer = nullptr;
+};
+
+SkPipeDeserializer::SkPipeDeserializer() : fImpl(new Impl) {}
+SkPipeDeserializer::~SkPipeDeserializer() {}
+
+void SkPipeDeserializer::setTypefaceDeserializer(SkTypefaceDeserializer* tfd) {
+ fImpl->fTFDeserializer = tfd;
+}
+
+sk_sp<SkImage> SkPipeDeserializer::readImage(const void* data, size_t size) {
+ if (size < sizeof(uint32_t)) {
+ SkDebugf("-------- data length too short for readImage %d\n", size);
+ return nullptr;
+ }
+
+ const uint32_t* ptr = (const uint32_t*)data;
+ uint32_t packedVerb = *ptr++;
+ size -= 4;
+
+ if (SkPipeVerb::kDefineImage == unpack_verb(packedVerb)) {
+ SkPipeInflator inflator(&fImpl->fImages, &fImpl->fPictures,
+ &fImpl->fTypefaces, &fImpl->fFactories,
+ fImpl->fTFDeserializer);
+ SkPipeReader reader(this, ptr, size);
+ reader.setInflator(&inflator);
+ defineImage_handler(reader, packedVerb, nullptr);
+ packedVerb = reader.read32(); // read the next verb
+ }
+ if (SkPipeVerb::kWriteImage != unpack_verb(packedVerb)) {
+ SkDebugf("-------- unexpected verb for readImage %d\n", unpack_verb(packedVerb));
+ return nullptr;
+ }
+ int index = unpack_verb_extra(packedVerb);
+ if (0 == index) {
+ return nullptr; // writer failed
+ }
+ return sk_ref_sp(fImpl->fImages.get(index - 1));
+}
+
+sk_sp<SkPicture> SkPipeDeserializer::readPicture(const void* data, size_t size) {
+ if (size < sizeof(uint32_t)) {
+ SkDebugf("-------- data length too short for readPicture %d\n", size);
+ return nullptr;
+ }
+
+ const uint32_t* ptr = (const uint32_t*)data;
+ uint32_t packedVerb = *ptr++;
+ size -= 4;
+
+ if (SkPipeVerb::kDefinePicture == unpack_verb(packedVerb)) {
+ SkPipeInflator inflator(&fImpl->fImages, &fImpl->fPictures,
+ &fImpl->fTypefaces, &fImpl->fFactories,
+ fImpl->fTFDeserializer);
+ SkPipeReader reader(this, ptr, size);
+ reader.setInflator(&inflator);
+ definePicture_handler(reader, packedVerb, nullptr);
+ packedVerb = reader.read32(); // read the next verb
+ }
+ if (SkPipeVerb::kWritePicture != unpack_verb(packedVerb)) {
+ SkDebugf("-------- unexpected verb for readPicture %d\n", unpack_verb(packedVerb));
+ return nullptr;
+ }
+ int index = unpack_verb_extra(packedVerb);
+ if (0 == index) {
+ return nullptr; // writer failed
+ }
+ return sk_ref_sp(fImpl->fPictures.get(index - 1));
+}
+
+static bool do_playback(SkPipeReader& reader, SkCanvas* canvas, int* endPictureIndex) {
+ int indent = 0;
+
+ const bool showEachVerb = false;
+ int counter = 0;
+ while (!reader.eof()) {
+ uint32_t prevOffset = reader.offset();
+ uint32_t packedVerb = reader.read32();
+ SkPipeVerb verb = unpack_verb(packedVerb);
+ if ((unsigned)verb >= SK_ARRAY_COUNT(gPipeHandlers)) {
+ SkDebugf("------- bad verb %d\n", verb);
+ return false;
+ }
+ if (SkPipeVerb::kRestore == verb) {
+ indent -= 1;
+ SkASSERT(indent >= 0);
+ }
+
+ if (SkPipeVerb::kEndPicture == verb) {
+ if (endPictureIndex) {
+ *endPictureIndex = unpack_verb_extra(packedVerb);
+ }
+ return true;
+ }
+ HandlerRec rec = gPipeHandlers[(unsigned)verb];
+ rec.fProc(reader, packedVerb, canvas);
+ if (showEachVerb) {
+ for (int i = 0; i < indent; ++i) {
+ SkDebugf(" ");
+ }
+ SkDebugf("%d [%d] %s %d\n", prevOffset, counter++, rec.fName, reader.offset() - prevOffset);
+ }
+ if (!reader.isValid()) {
+ SkDebugf("-------- bad reader\n");
+ return false;
+ }
+
+ switch (verb) {
+ case SkPipeVerb::kSave:
+ case SkPipeVerb::kSaveLayer:
+ indent += 1;
+ break;
+ default:
+ break;
+ }
+ }
+ return true;
+}
+
+bool SkPipeDeserializer::playback(const void* data, size_t size, SkCanvas* canvas) {
+ SkPipeInflator inflator(&fImpl->fImages, &fImpl->fPictures,
+ &fImpl->fTypefaces, &fImpl->fFactories,
+ fImpl->fTFDeserializer);
+ SkPipeReader reader(this, data, size);
+ reader.setInflator(&inflator);
+ return do_playback(reader, canvas);
+}
+
diff --git a/gfx/skia/skia/src/pipe/SkRefSet.h b/gfx/skia/skia/src/pipe/SkRefSet.h
new file mode 100644
index 000000000..5f23ba219
--- /dev/null
+++ b/gfx/skia/skia/src/pipe/SkRefSet.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRefSet_DEFINED
+#define SkRefSet_DEFINED
+
+#include "SkRefCnt.h"
+#include "SkTDArray.h"
+
+template <typename T> class SkRefSet {
+public:
+ ~SkRefSet() { fArray.unrefAll(); }
+
+ T* get(int index) const {
+ SkASSERT((unsigned)index < (unsigned)fArray.count());
+ return fArray[index];
+ }
+
+ bool set(int index, T* value) {
+ if ((unsigned)index < (unsigned)fArray.count()) {
+ SkRefCnt_SafeAssign(fArray[index], value);
+ return true;
+ }
+ if (fArray.count() == index && value) {
+ *fArray.append() = SkRef(value);
+ return true;
+ }
+ SkDebugf("SkRefSet: index [%d] out of range %d\n", index, fArray.count());
+ return false;
+ }
+
+private:
+ SkTDArray<T*> fArray;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/ports/SkDebug_android.cpp b/gfx/skia/skia/src/ports/SkDebug_android.cpp
new file mode 100644
index 000000000..0a1b59a2c
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkDebug_android.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_ANDROID)
+
+#include <stdio.h>
+
+#define LOG_TAG "skia"
+#include <android/log.h>
+
+// Print debug output to stdout as well. This is useful for command line
+// applications (e.g. skia_launcher). To enable, include android_output as a
+// gyp dependency.
+bool gSkDebugToStdOut = false;
+
+void SkDebugf(const char format[], ...) {
+ va_list args1, args2;
+ va_start(args1, format);
+
+ if (gSkDebugToStdOut) {
+ va_copy(args2, args1);
+ vprintf(format, args2);
+ va_end(args2);
+ }
+
+ __android_log_vprint(ANDROID_LOG_DEBUG, LOG_TAG, format, args1);
+
+ va_end(args1);
+}
+
+#endif//defined(SK_BUILD_FOR_ANDROID)
diff --git a/gfx/skia/skia/src/ports/SkDebug_stdio.cpp b/gfx/skia/skia/src/ports/SkDebug_stdio.cpp
new file mode 100644
index 000000000..230c5f2c5
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkDebug_stdio.cpp
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if !defined(SK_BUILD_FOR_WIN32) && !defined(SK_BUILD_FOR_ANDROID)
+
+#include <stdarg.h>
+#include <stdio.h>
+
+void SkDebugf(const char format[], ...) {
+ va_list args;
+ va_start(args, format);
+ vfprintf(stderr, format, args);
+ va_end(args);
+}
+#endif//!defined(SK_BUILD_FOR_WIN32) && !defined(SK_BUILD_FOR_ANDROID)
diff --git a/gfx/skia/skia/src/ports/SkDebug_win.cpp b/gfx/skia/skia/src/ports/SkDebug_win.cpp
new file mode 100644
index 000000000..da1e2b573
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkDebug_win.cpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+
+#if defined(SK_BUILD_FOR_WIN32)
+
+#include "SkLeanWindows.h"
+
+#include <stdarg.h>
+#include <stdio.h>
+
+static const size_t kBufferSize = 2048;
+
+void SkDebugf(const char format[], ...) {
+ char buffer[kBufferSize + 1];
+ va_list args;
+
+ va_start(args, format);
+ vfprintf(stderr, format, args);
+ va_end(args);
+ fflush(stderr); // stderr seems to be buffered on Windows.
+
+ va_start(args, format);
+ vsnprintf(buffer, kBufferSize, format, args);
+ va_end(args);
+
+ OutputDebugStringA(buffer);
+}
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/ports/SkDiscardableMemory_none.cpp b/gfx/skia/skia/src/ports/SkDiscardableMemory_none.cpp
new file mode 100644
index 000000000..7e5c0aead
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkDiscardableMemory_none.cpp
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkDiscardableMemory.h"
+#include "SkDiscardableMemoryPool.h"
+#include "SkTypes.h"
+
+SkDiscardableMemory* SkDiscardableMemory::Create(size_t bytes) {
+ return SkGetGlobalDiscardableMemoryPool()->create(bytes);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontConfigInterface.cpp b/gfx/skia/skia/src/ports/SkFontConfigInterface.cpp
new file mode 100644
index 000000000..5b8731c3d
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontConfigInterface.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFontConfigInterface.h"
+#include "SkFontMgr.h"
+#include "SkMutex.h"
+#include "SkRefCnt.h"
+
+SK_DECLARE_STATIC_MUTEX(gFontConfigInterfaceMutex);
+static SkFontConfigInterface* gFontConfigInterface;
+
+SkFontConfigInterface* SkFontConfigInterface::RefGlobal() {
+ SkAutoMutexAcquire ac(gFontConfigInterfaceMutex);
+
+ if (gFontConfigInterface) {
+ return SkRef(gFontConfigInterface);
+ }
+ return SkSafeRef(SkFontConfigInterface::GetSingletonDirectInterface());
+}
+
+SkFontConfigInterface* SkFontConfigInterface::SetGlobal(SkFontConfigInterface* fc) {
+ SkAutoMutexAcquire ac(gFontConfigInterfaceMutex);
+
+ SkRefCnt_SafeAssign(gFontConfigInterface, fc);
+ return fc;
+}
diff --git a/gfx/skia/skia/src/ports/SkFontConfigInterface_direct.cpp b/gfx/skia/skia/src/ports/SkFontConfigInterface_direct.cpp
new file mode 100644
index 000000000..df68fae0a
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontConfigInterface_direct.cpp
@@ -0,0 +1,735 @@
+/*
+ * Copyright 2009-2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/* migrated from chrome/src/skia/ext/SkFontHost_fontconfig_direct.cpp */
+
+#include "SkBuffer.h"
+#include "SkDataTable.h"
+#include "SkFixed.h"
+#include "SkFontConfigInterface_direct.h"
+#include "SkFontStyle.h"
+#include "SkMutex.h"
+#include "SkStream.h"
+#include "SkString.h"
+#include "SkTArray.h"
+#include "SkTDArray.h"
+#include "SkTemplates.h"
+#include "SkTypeface.h"
+#include "SkTypes.h"
+
+#include <fontconfig/fontconfig.h>
+#include <unistd.h>
+
+#ifdef SK_DEBUG
+# include "SkTLS.h"
+#endif
+
+namespace {
+
+// Fontconfig is not threadsafe before 2.10.91. Before that, we lock with a global mutex.
+// See https://bug.skia.org/1497 for background.
+SK_DECLARE_STATIC_MUTEX(gFCMutex);
+
+#ifdef SK_DEBUG
+void* CreateThreadFcLocked() { return new bool(false); }
+void DeleteThreadFcLocked(void* v) { delete static_cast<bool*>(v); }
+# define THREAD_FC_LOCKED \
+ static_cast<bool*>(SkTLS::Get(CreateThreadFcLocked, DeleteThreadFcLocked))
+#endif
+
+struct FCLocker {
+ // Assume FcGetVersion() has always been thread safe.
+
+ FCLocker() {
+ if (FcGetVersion() < 21091) {
+ gFCMutex.acquire();
+ } else {
+ SkDEBUGCODE(bool* threadLocked = THREAD_FC_LOCKED);
+ SkASSERT(false == *threadLocked);
+ SkDEBUGCODE(*threadLocked = true);
+ }
+ }
+
+ ~FCLocker() {
+ AssertHeld();
+ if (FcGetVersion() < 21091) {
+ gFCMutex.release();
+ } else {
+ SkDEBUGCODE(*THREAD_FC_LOCKED = false);
+ }
+ }
+
+ static void AssertHeld() { SkDEBUGCODE(
+ if (FcGetVersion() < 21091) {
+ gFCMutex.assertHeld();
+ } else {
+ SkASSERT(true == *THREAD_FC_LOCKED);
+ }
+ ) }
+};
+
+} // namespace
+
+size_t SkFontConfigInterface::FontIdentity::writeToMemory(void* addr) const {
+ size_t size = sizeof(fID) + sizeof(fTTCIndex);
+ size += sizeof(int32_t) + sizeof(int32_t) + sizeof(uint8_t); // weight, width, italic
+ size += sizeof(int32_t) + fString.size(); // store length+data
+ if (addr) {
+ SkWBuffer buffer(addr, size);
+
+ buffer.write32(fID);
+ buffer.write32(fTTCIndex);
+ buffer.write32(fString.size());
+ buffer.write32(fStyle.weight());
+ buffer.write32(fStyle.width());
+ buffer.write8(fStyle.slant());
+ buffer.write(fString.c_str(), fString.size());
+ buffer.padToAlign4();
+
+ SkASSERT(buffer.pos() == size);
+ }
+ return size;
+}
+
+size_t SkFontConfigInterface::FontIdentity::readFromMemory(const void* addr,
+ size_t size) {
+ SkRBuffer buffer(addr, size);
+
+ (void)buffer.readU32(&fID);
+ (void)buffer.readS32(&fTTCIndex);
+ uint32_t strLen, weight, width;
+ (void)buffer.readU32(&strLen);
+ (void)buffer.readU32(&weight);
+ (void)buffer.readU32(&width);
+ uint8_t u8;
+ (void)buffer.readU8(&u8);
+ SkFontStyle::Slant slant = (SkFontStyle::Slant)u8;
+ fStyle = SkFontStyle(weight, width, slant);
+ fString.resize(strLen);
+ (void)buffer.read(fString.writable_str(), strLen);
+ buffer.skipToAlign4();
+
+ return buffer.pos(); // the actual number of bytes read
+}
+
+#ifdef SK_DEBUG
+static void make_iden(SkFontConfigInterface::FontIdentity* iden) {
+ iden->fID = 10;
+ iden->fTTCIndex = 2;
+ iden->fString.set("Hello world");
+ iden->fStyle = SkFontStyle(300, 6, SkFontStyle::kItalic_Slant);
+}
+
+static void test_writeToMemory(const SkFontConfigInterface::FontIdentity& iden0,
+ int initValue) {
+ SkFontConfigInterface::FontIdentity iden1;
+
+ size_t size0 = iden0.writeToMemory(nullptr);
+
+ SkAutoMalloc storage(size0);
+ memset(storage.get(), initValue, size0);
+
+ size_t size1 = iden0.writeToMemory(storage.get());
+ SkASSERT(size0 == size1);
+
+ SkASSERT(iden0 != iden1);
+ size_t size2 = iden1.readFromMemory(storage.get(), size1);
+ SkASSERT(size2 == size1);
+ SkASSERT(iden0 == iden1);
+}
+
+static void fontconfiginterface_unittest() {
+ SkFontConfigInterface::FontIdentity iden0, iden1;
+
+ SkASSERT(iden0 == iden1);
+
+ make_iden(&iden0);
+ SkASSERT(iden0 != iden1);
+
+ make_iden(&iden1);
+ SkASSERT(iden0 == iden1);
+
+ test_writeToMemory(iden0, 0);
+ test_writeToMemory(iden0, 0);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Returns the string from the pattern, or nullptr
+static const char* get_string(FcPattern* pattern, const char field[], int index = 0) {
+ const char* name;
+ if (FcPatternGetString(pattern, field, index, (FcChar8**)&name) != FcResultMatch) {
+ name = nullptr;
+ }
+ return name;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+// Equivalence classes, used to match the Liberation and other fonts
+// with their metric-compatible replacements. See the discussion in
+// GetFontEquivClass().
+enum FontEquivClass
+{
+ OTHER,
+ SANS,
+ SERIF,
+ MONO,
+ SYMBOL,
+ PGOTHIC,
+ GOTHIC,
+ PMINCHO,
+ MINCHO,
+ SIMSUN,
+ NSIMSUN,
+ SIMHEI,
+ PMINGLIU,
+ MINGLIU,
+ PMINGLIUHK,
+ MINGLIUHK,
+ CAMBRIA,
+ CALIBRI,
+};
+
+// Match the font name against a whilelist of fonts, returning the equivalence
+// class.
+FontEquivClass GetFontEquivClass(const char* fontname)
+{
+ // It would be nice for fontconfig to tell us whether a given suggested
+ // replacement is a "strong" match (that is, an equivalent font) or
+ // a "weak" match (that is, fontconfig's next-best attempt at finding a
+ // substitute). However, I played around with the fontconfig API for
+ // a good few hours and could not make it reveal this information.
+ //
+ // So instead, we hardcode. Initially this function emulated
+ // /etc/fonts/conf.d/30-metric-aliases.conf
+ // from my Ubuntu system, but we're better off being very conservative.
+
+ // Arimo, Tinos and Cousine are a set of fonts metric-compatible with
+ // Arial, Times New Roman and Courier New with a character repertoire
+ // much larger than Liberation. Note that Cousine is metrically
+ // compatible with Courier New, but the former is sans-serif while
+ // the latter is serif.
+
+
+ struct FontEquivMap {
+ FontEquivClass clazz;
+ const char name[40];
+ };
+
+ static const FontEquivMap kFontEquivMap[] = {
+ { SANS, "Arial" },
+ { SANS, "Arimo" },
+ { SANS, "Liberation Sans" },
+
+ { SERIF, "Times New Roman" },
+ { SERIF, "Tinos" },
+ { SERIF, "Liberation Serif" },
+
+ { MONO, "Courier New" },
+ { MONO, "Cousine" },
+ { MONO, "Liberation Mono" },
+
+ { SYMBOL, "Symbol" },
+ { SYMBOL, "Symbol Neu" },
+
+ // MS Pゴシック
+ { PGOTHIC, "MS PGothic" },
+ { PGOTHIC, "\xef\xbc\xad\xef\xbc\xb3 \xef\xbc\xb0"
+ "\xe3\x82\xb4\xe3\x82\xb7\xe3\x83\x83\xe3\x82\xaf" },
+ { PGOTHIC, "Noto Sans CJK JP" },
+ { PGOTHIC, "IPAPGothic" },
+ { PGOTHIC, "MotoyaG04Gothic" },
+
+ // MS ゴシック
+ { GOTHIC, "MS Gothic" },
+ { GOTHIC, "\xef\xbc\xad\xef\xbc\xb3 "
+ "\xe3\x82\xb4\xe3\x82\xb7\xe3\x83\x83\xe3\x82\xaf" },
+ { GOTHIC, "Noto Sans Mono CJK JP" },
+ { GOTHIC, "IPAGothic" },
+ { GOTHIC, "MotoyaG04GothicMono" },
+
+ // MS P明朝
+ { PMINCHO, "MS PMincho" },
+ { PMINCHO, "\xef\xbc\xad\xef\xbc\xb3 \xef\xbc\xb0"
+ "\xe6\x98\x8e\xe6\x9c\x9d"},
+ { PMINCHO, "IPAPMincho" },
+ { PMINCHO, "MotoyaG04Mincho" },
+
+ // MS 明朝
+ { MINCHO, "MS Mincho" },
+ { MINCHO, "\xef\xbc\xad\xef\xbc\xb3 \xe6\x98\x8e\xe6\x9c\x9d" },
+ { MINCHO, "IPAMincho" },
+ { MINCHO, "MotoyaG04MinchoMono" },
+
+ // 宋体
+ { SIMSUN, "Simsun" },
+ { SIMSUN, "\xe5\xae\x8b\xe4\xbd\x93" },
+ { SIMSUN, "MSung GB18030" },
+ { SIMSUN, "Song ASC" },
+
+ // 新宋体
+ { NSIMSUN, "NSimsun" },
+ { NSIMSUN, "\xe6\x96\xb0\xe5\xae\x8b\xe4\xbd\x93" },
+ { NSIMSUN, "MSung GB18030" },
+ { NSIMSUN, "N Song ASC" },
+
+ // 黑体
+ { SIMHEI, "Simhei" },
+ { SIMHEI, "\xe9\xbb\x91\xe4\xbd\x93" },
+ { SIMHEI, "Noto Sans CJK SC" },
+ { SIMHEI, "MYingHeiGB18030" },
+ { SIMHEI, "MYingHeiB5HK" },
+
+ // 新細明體
+ { PMINGLIU, "PMingLiU"},
+ { PMINGLIU, "\xe6\x96\xb0\xe7\xb4\xb0\xe6\x98\x8e\xe9\xab\x94" },
+ { PMINGLIU, "MSung B5HK"},
+
+ // 細明體
+ { MINGLIU, "MingLiU"},
+ { MINGLIU, "\xe7\xb4\xb0\xe6\x98\x8e\xe9\xab\x94" },
+ { MINGLIU, "MSung B5HK"},
+
+ // 新細明體
+ { PMINGLIUHK, "PMingLiU_HKSCS"},
+ { PMINGLIUHK, "\xe6\x96\xb0\xe7\xb4\xb0\xe6\x98\x8e\xe9\xab\x94_HKSCS" },
+ { PMINGLIUHK, "MSung B5HK"},
+
+ // 細明體
+ { MINGLIUHK, "MingLiU_HKSCS"},
+ { MINGLIUHK, "\xe7\xb4\xb0\xe6\x98\x8e\xe9\xab\x94_HKSCS" },
+ { MINGLIUHK, "MSung B5HK"},
+
+ // Cambria
+ { CAMBRIA, "Cambria" },
+ { CAMBRIA, "Caladea" },
+
+ // Calibri
+ { CALIBRI, "Calibri" },
+ { CALIBRI, "Carlito" },
+ };
+
+ static const size_t kFontCount =
+ sizeof(kFontEquivMap)/sizeof(kFontEquivMap[0]);
+
+ // TODO(jungshik): If this loop turns out to be hot, turn
+ // the array to a static (hash)map to speed it up.
+ for (size_t i = 0; i < kFontCount; ++i) {
+ if (strcasecmp(kFontEquivMap[i].name, fontname) == 0)
+ return kFontEquivMap[i].clazz;
+ }
+ return OTHER;
+}
+
+
+// Return true if |font_a| and |font_b| are visually and at the metrics
+// level interchangeable.
+bool IsMetricCompatibleReplacement(const char* font_a, const char* font_b)
+{
+ FontEquivClass class_a = GetFontEquivClass(font_a);
+ FontEquivClass class_b = GetFontEquivClass(font_b);
+
+ return class_a != OTHER && class_a == class_b;
+}
+
+// Normally we only return exactly the font asked for. In last-resort
+// cases, the request either doesn't specify a font or is one of the
+// basic font names like "Sans", "Serif" or "Monospace". This function
+// tells you whether a given request is for such a fallback.
+bool IsFallbackFontAllowed(const SkString& family) {
+ const char* family_cstr = family.c_str();
+ return family.isEmpty() ||
+ strcasecmp(family_cstr, "sans") == 0 ||
+ strcasecmp(family_cstr, "serif") == 0 ||
+ strcasecmp(family_cstr, "monospace") == 0;
+}
+
+// Retrieves |is_bold|, |is_italic| and |font_family| properties from |font|.
+static int get_int(FcPattern* pattern, const char object[], int missing) {
+ int value;
+ if (FcPatternGetInteger(pattern, object, 0, &value) != FcResultMatch) {
+ return missing;
+ }
+ return value;
+}
+
+static int map_range(SkFixed value,
+ SkFixed old_min, SkFixed old_max,
+ SkFixed new_min, SkFixed new_max)
+{
+ SkASSERT(old_min < old_max);
+ SkASSERT(new_min <= new_max);
+ return new_min + SkMulDiv(value - old_min, new_max - new_min, old_max - old_min);
+}
+
+struct MapRanges {
+ SkFixed old_val;
+ SkFixed new_val;
+};
+
+static SkFixed map_ranges_fixed(SkFixed val, MapRanges const ranges[], int rangesCount) {
+ // -Inf to [0]
+ if (val < ranges[0].old_val) {
+ return ranges[0].new_val;
+ }
+
+ // Linear from [i] to [i+1]
+ for (int i = 0; i < rangesCount - 1; ++i) {
+ if (val < ranges[i+1].old_val) {
+ return map_range(val, ranges[i].old_val, ranges[i+1].old_val,
+ ranges[i].new_val, ranges[i+1].new_val);
+ }
+ }
+
+ // From [n] to +Inf
+ // if (fcweight < Inf)
+ return ranges[rangesCount-1].new_val;
+}
+
+static int map_ranges(int val, MapRanges const ranges[], int rangesCount) {
+ return SkFixedRoundToInt(map_ranges_fixed(SkIntToFixed(val), ranges, rangesCount));
+}
+
+template<int n> struct SkTFixed {
+ static_assert(-32768 <= n && n <= 32767, "SkTFixed_n_not_in_range");
+ static const SkFixed value = static_cast<SkFixed>(n << 16);
+};
+
+static SkFontStyle skfontstyle_from_fcpattern(FcPattern* pattern) {
+ typedef SkFontStyle SkFS;
+
+ static const MapRanges weightRanges[] = {
+ { SkTFixed<FC_WEIGHT_THIN>::value, SkTFixed<SkFS::kThin_Weight>::value },
+ { SkTFixed<FC_WEIGHT_EXTRALIGHT>::value, SkTFixed<SkFS::kExtraLight_Weight>::value },
+ { SkTFixed<FC_WEIGHT_LIGHT>::value, SkTFixed<SkFS::kLight_Weight>::value },
+ { SkTFixed<FC_WEIGHT_REGULAR>::value, SkTFixed<SkFS::kNormal_Weight>::value },
+ { SkTFixed<FC_WEIGHT_MEDIUM>::value, SkTFixed<SkFS::kMedium_Weight>::value },
+ { SkTFixed<FC_WEIGHT_DEMIBOLD>::value, SkTFixed<SkFS::kSemiBold_Weight>::value },
+ { SkTFixed<FC_WEIGHT_BOLD>::value, SkTFixed<SkFS::kBold_Weight>::value },
+ { SkTFixed<FC_WEIGHT_EXTRABOLD>::value, SkTFixed<SkFS::kExtraBold_Weight>::value },
+ { SkTFixed<FC_WEIGHT_BLACK>::value, SkTFixed<SkFS::kBlack_Weight>::value },
+ { SkTFixed<FC_WEIGHT_EXTRABLACK>::value, SkTFixed<SkFS::kExtraBlack_Weight>::value },
+ };
+ int weight = map_ranges(get_int(pattern, FC_WEIGHT, FC_WEIGHT_REGULAR),
+ weightRanges, SK_ARRAY_COUNT(weightRanges));
+
+ static const MapRanges widthRanges[] = {
+ { SkTFixed<FC_WIDTH_ULTRACONDENSED>::value, SkTFixed<SkFS::kUltraCondensed_Width>::value },
+ { SkTFixed<FC_WIDTH_EXTRACONDENSED>::value, SkTFixed<SkFS::kExtraCondensed_Width>::value },
+ { SkTFixed<FC_WIDTH_CONDENSED>::value, SkTFixed<SkFS::kCondensed_Width>::value },
+ { SkTFixed<FC_WIDTH_SEMICONDENSED>::value, SkTFixed<SkFS::kSemiCondensed_Width>::value },
+ { SkTFixed<FC_WIDTH_NORMAL>::value, SkTFixed<SkFS::kNormal_Width>::value },
+ { SkTFixed<FC_WIDTH_SEMIEXPANDED>::value, SkTFixed<SkFS::kSemiExpanded_Width>::value },
+ { SkTFixed<FC_WIDTH_EXPANDED>::value, SkTFixed<SkFS::kExpanded_Width>::value },
+ { SkTFixed<FC_WIDTH_EXTRAEXPANDED>::value, SkTFixed<SkFS::kExtraExpanded_Width>::value },
+ { SkTFixed<FC_WIDTH_ULTRAEXPANDED>::value, SkTFixed<SkFS::kUltraExpanded_Width>::value },
+ };
+ int width = map_ranges(get_int(pattern, FC_WIDTH, FC_WIDTH_NORMAL),
+ widthRanges, SK_ARRAY_COUNT(widthRanges));
+
+ SkFS::Slant slant = SkFS::kUpright_Slant;
+ switch (get_int(pattern, FC_SLANT, FC_SLANT_ROMAN)) {
+ case FC_SLANT_ROMAN: slant = SkFS::kUpright_Slant; break;
+ case FC_SLANT_ITALIC : slant = SkFS::kItalic_Slant ; break;
+ case FC_SLANT_OBLIQUE: slant = SkFS::kOblique_Slant; break;
+ default: SkASSERT(false); break;
+ }
+
+ return SkFontStyle(weight, width, slant);
+}
+
+static void fcpattern_from_skfontstyle(SkFontStyle style, FcPattern* pattern) {
+ typedef SkFontStyle SkFS;
+
+ static const MapRanges weightRanges[] = {
+ { SkTFixed<SkFS::kThin_Weight>::value, SkTFixed<FC_WEIGHT_THIN>::value },
+ { SkTFixed<SkFS::kExtraLight_Weight>::value, SkTFixed<FC_WEIGHT_EXTRALIGHT>::value },
+ { SkTFixed<SkFS::kLight_Weight>::value, SkTFixed<FC_WEIGHT_LIGHT>::value },
+ { SkTFixed<SkFS::kNormal_Weight>::value, SkTFixed<FC_WEIGHT_REGULAR>::value },
+ { SkTFixed<SkFS::kMedium_Weight>::value, SkTFixed<FC_WEIGHT_MEDIUM>::value },
+ { SkTFixed<SkFS::kSemiBold_Weight>::value, SkTFixed<FC_WEIGHT_DEMIBOLD>::value },
+ { SkTFixed<SkFS::kBold_Weight>::value, SkTFixed<FC_WEIGHT_BOLD>::value },
+ { SkTFixed<SkFS::kExtraBold_Weight>::value, SkTFixed<FC_WEIGHT_EXTRABOLD>::value },
+ { SkTFixed<SkFS::kBlack_Weight>::value, SkTFixed<FC_WEIGHT_BLACK>::value },
+ { SkTFixed<SkFS::kExtraBlack_Weight>::value, SkTFixed<FC_WEIGHT_EXTRABLACK>::value },
+ };
+ int weight = map_ranges(style.weight(), weightRanges, SK_ARRAY_COUNT(weightRanges));
+
+ static const MapRanges widthRanges[] = {
+ { SkTFixed<SkFS::kUltraCondensed_Width>::value, SkTFixed<FC_WIDTH_ULTRACONDENSED>::value },
+ { SkTFixed<SkFS::kExtraCondensed_Width>::value, SkTFixed<FC_WIDTH_EXTRACONDENSED>::value },
+ { SkTFixed<SkFS::kCondensed_Width>::value, SkTFixed<FC_WIDTH_CONDENSED>::value },
+ { SkTFixed<SkFS::kSemiCondensed_Width>::value, SkTFixed<FC_WIDTH_SEMICONDENSED>::value },
+ { SkTFixed<SkFS::kNormal_Width>::value, SkTFixed<FC_WIDTH_NORMAL>::value },
+ { SkTFixed<SkFS::kSemiExpanded_Width>::value, SkTFixed<FC_WIDTH_SEMIEXPANDED>::value },
+ { SkTFixed<SkFS::kExpanded_Width>::value, SkTFixed<FC_WIDTH_EXPANDED>::value },
+ { SkTFixed<SkFS::kExtraExpanded_Width>::value, SkTFixed<FC_WIDTH_EXTRAEXPANDED>::value },
+ { SkTFixed<SkFS::kUltraExpanded_Width>::value, SkTFixed<FC_WIDTH_ULTRAEXPANDED>::value },
+ };
+ int width = map_ranges(style.width(), widthRanges, SK_ARRAY_COUNT(widthRanges));
+
+ int slant = FC_SLANT_ROMAN;
+ switch (style.slant()) {
+ case SkFS::kUpright_Slant: slant = FC_SLANT_ROMAN ; break;
+ case SkFS::kItalic_Slant : slant = FC_SLANT_ITALIC ; break;
+ case SkFS::kOblique_Slant: slant = FC_SLANT_OBLIQUE; break;
+ default: SkASSERT(false); break;
+ }
+
+ FcPatternAddInteger(pattern, FC_WEIGHT, weight);
+ FcPatternAddInteger(pattern, FC_WIDTH , width);
+ FcPatternAddInteger(pattern, FC_SLANT , slant);
+}
+
+} // anonymous namespace
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define kMaxFontFamilyLength 2048
+#ifdef SK_FONT_CONFIG_INTERFACE_ONLY_ALLOW_SFNT_FONTS
+const char* kFontFormatTrueType = "TrueType";
+const char* kFontFormatCFF = "CFF";
+#endif
+
+SkFontConfigInterfaceDirect::SkFontConfigInterfaceDirect() {
+ FCLocker lock;
+
+ FcInit();
+
+ SkDEBUGCODE(fontconfiginterface_unittest();)
+}
+
+SkFontConfigInterfaceDirect::~SkFontConfigInterfaceDirect() {
+}
+
+bool SkFontConfigInterfaceDirect::isAccessible(const char* filename) {
+ if (access(filename, R_OK) != 0) {
+ return false;
+ }
+ return true;
+}
+
+bool SkFontConfigInterfaceDirect::isValidPattern(FcPattern* pattern) {
+#ifdef SK_FONT_CONFIG_INTERFACE_ONLY_ALLOW_SFNT_FONTS
+ const char* font_format = get_string(pattern, FC_FONTFORMAT);
+ if (font_format
+ && strcmp(font_format, kFontFormatTrueType) != 0
+ && strcmp(font_format, kFontFormatCFF) != 0)
+ {
+ return false;
+ }
+#endif
+
+ // fontconfig can also return fonts which are unreadable
+ const char* c_filename = get_string(pattern, FC_FILE);
+ if (!c_filename) {
+ return false;
+ }
+ return this->isAccessible(c_filename);
+}
+
+// Find matching font from |font_set| for the given font family.
+FcPattern* SkFontConfigInterfaceDirect::MatchFont(FcFontSet* font_set,
+ const char* post_config_family,
+ const SkString& family) {
+ // Older versions of fontconfig have a bug where they cannot select
+ // only scalable fonts so we have to manually filter the results.
+ FcPattern* match = nullptr;
+ for (int i = 0; i < font_set->nfont; ++i) {
+ FcPattern* current = font_set->fonts[i];
+ if (this->isValidPattern(current)) {
+ match = current;
+ break;
+ }
+ }
+
+ if (match && !IsFallbackFontAllowed(family)) {
+ bool acceptable_substitute = false;
+ for (int id = 0; id < 255; ++id) {
+ const char* post_match_family = get_string(match, FC_FAMILY, id);
+ if (!post_match_family)
+ break;
+ acceptable_substitute =
+ (strcasecmp(post_config_family, post_match_family) == 0 ||
+ // Workaround for Issue 12530:
+ // requested family: "Bitstream Vera Sans"
+ // post_config_family: "Arial"
+ // post_match_family: "Bitstream Vera Sans"
+ // -> We should treat this case as a good match.
+ strcasecmp(family.c_str(), post_match_family) == 0) ||
+ IsMetricCompatibleReplacement(family.c_str(), post_match_family);
+ if (acceptable_substitute)
+ break;
+ }
+ if (!acceptable_substitute)
+ return nullptr;
+ }
+
+ return match;
+}
+
+bool SkFontConfigInterfaceDirect::matchFamilyName(const char familyName[],
+ SkFontStyle style,
+ FontIdentity* outIdentity,
+ SkString* outFamilyName,
+ SkFontStyle* outStyle) {
+ SkString familyStr(familyName ? familyName : "");
+ if (familyStr.size() > kMaxFontFamilyLength) {
+ return false;
+ }
+
+ FCLocker lock;
+
+ FcPattern* pattern = FcPatternCreate();
+
+ if (familyName) {
+ FcPatternAddString(pattern, FC_FAMILY, (FcChar8*)familyName);
+ }
+ fcpattern_from_skfontstyle(style, pattern);
+
+ FcPatternAddBool(pattern, FC_SCALABLE, FcTrue);
+
+ FcConfigSubstitute(nullptr, pattern, FcMatchPattern);
+ FcDefaultSubstitute(pattern);
+
+ // Font matching:
+ // CSS often specifies a fallback list of families:
+ // font-family: a, b, c, serif;
+ // However, fontconfig will always do its best to find *a* font when asked
+ // for something so we need a way to tell if the match which it has found is
+ // "good enough" for us. Otherwise, we can return nullptr which gets piped up
+ // and lets WebKit know to try the next CSS family name. However, fontconfig
+ // configs allow substitutions (mapping "Arial -> Helvetica" etc) and we
+ // wish to support that.
+ //
+ // Thus, if a specific family is requested we set @family_requested. Then we
+ // record two strings: the family name after config processing and the
+ // family name after resolving. If the two are equal, it's a good match.
+ //
+ // So consider the case where a user has mapped Arial to Helvetica in their
+ // config.
+ // requested family: "Arial"
+ // post_config_family: "Helvetica"
+ // post_match_family: "Helvetica"
+ // -> good match
+ //
+ // and for a missing font:
+ // requested family: "Monaco"
+ // post_config_family: "Monaco"
+ // post_match_family: "Times New Roman"
+ // -> BAD match
+ //
+ // However, we special-case fallback fonts; see IsFallbackFontAllowed().
+
+ const char* post_config_family = get_string(pattern, FC_FAMILY);
+ if (!post_config_family) {
+ // we can just continue with an empty name, e.g. default font
+ post_config_family = "";
+ }
+
+ FcResult result;
+ FcFontSet* font_set = FcFontSort(nullptr, pattern, 0, nullptr, &result);
+ if (!font_set) {
+ FcPatternDestroy(pattern);
+ return false;
+ }
+
+ FcPattern* match = this->MatchFont(font_set, post_config_family, familyStr);
+ if (!match) {
+ FcPatternDestroy(pattern);
+ FcFontSetDestroy(font_set);
+ return false;
+ }
+
+ FcPatternDestroy(pattern);
+
+ // From here out we just extract our results from 'match'
+
+ post_config_family = get_string(match, FC_FAMILY);
+ if (!post_config_family) {
+ FcFontSetDestroy(font_set);
+ return false;
+ }
+
+ const char* c_filename = get_string(match, FC_FILE);
+ if (!c_filename) {
+ FcFontSetDestroy(font_set);
+ return false;
+ }
+
+ int face_index = get_int(match, FC_INDEX, 0);
+
+ FcFontSetDestroy(font_set);
+
+ if (outIdentity) {
+ outIdentity->fTTCIndex = face_index;
+ outIdentity->fString.set(c_filename);
+ }
+ if (outFamilyName) {
+ outFamilyName->set(post_config_family);
+ }
+ if (outStyle) {
+ *outStyle = skfontstyle_from_fcpattern(match);
+ }
+ return true;
+}
+
+SkStreamAsset* SkFontConfigInterfaceDirect::openStream(const FontIdentity& identity) {
+ return SkStream::MakeFromFile(identity.fString.c_str()).release();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool find_name(const SkTDArray<const char*>& list, const char* str) {
+ int count = list.count();
+ for (int i = 0; i < count; ++i) {
+ if (!strcmp(list[i], str)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+sk_sp<SkDataTable> SkFontConfigInterfaceDirect::getFamilyNames() {
+ FCLocker lock;
+
+ FcPattern* pat = FcPatternCreate();
+ SkAutoTCallVProc<FcPattern, FcPatternDestroy> autoDestroyPat(pat);
+ if (nullptr == pat) {
+ return nullptr;
+ }
+
+ FcObjectSet* os = FcObjectSetBuild(FC_FAMILY, (char *)0);
+ SkAutoTCallVProc<FcObjectSet, FcObjectSetDestroy> autoDestroyOs(os);
+ if (nullptr == os) {
+ return nullptr;
+ }
+
+ FcFontSet* fs = FcFontList(nullptr, pat, os);
+ SkAutoTCallVProc<FcFontSet, FcFontSetDestroy> autoDestroyFs(fs);
+ if (nullptr == fs) {
+ return nullptr;
+ }
+
+ SkTDArray<const char*> names;
+ SkTDArray<size_t> sizes;
+ for (int i = 0; i < fs->nfont; ++i) {
+ FcPattern* match = fs->fonts[i];
+ const char* famName = get_string(match, FC_FAMILY);
+ if (famName && !find_name(names, famName)) {
+ *names.append() = famName;
+ *sizes.append() = strlen(famName) + 1;
+ }
+ }
+
+ return SkDataTable::MakeCopyArrays((const void*const*)names.begin(),
+ sizes.begin(), names.count());
+}
diff --git a/gfx/skia/skia/src/ports/SkFontConfigInterface_direct.h b/gfx/skia/skia/src/ports/SkFontConfigInterface_direct.h
new file mode 100644
index 000000000..6cd0a8f9b
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontConfigInterface_direct.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2009-2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/* migrated from chrome/src/skia/ext/SkFontHost_fontconfig_direct.cpp */
+
+#include "SkFontConfigInterface.h"
+
+#include <fontconfig/fontconfig.h>
+
+class SkFontConfigInterfaceDirect : public SkFontConfigInterface {
+public:
+ SkFontConfigInterfaceDirect();
+ ~SkFontConfigInterfaceDirect() override;
+
+ bool matchFamilyName(const char familyName[],
+ SkFontStyle requested,
+ FontIdentity* outFontIdentifier,
+ SkString* outFamilyName,
+ SkFontStyle* outStyle) override;
+
+ SkStreamAsset* openStream(const FontIdentity&) override;
+
+ // new APIs
+ sk_sp<SkDataTable> getFamilyNames() override;
+
+protected:
+ virtual bool isAccessible(const char* filename);
+
+private:
+ bool isValidPattern(FcPattern* pattern);
+ FcPattern* MatchFont(FcFontSet* font_set, const char* post_config_family,
+ const SkString& family);
+ typedef SkFontConfigInterface INHERITED;
+};
diff --git a/gfx/skia/skia/src/ports/SkFontConfigInterface_direct_factory.cpp b/gfx/skia/skia/src/ports/SkFontConfigInterface_direct_factory.cpp
new file mode 100644
index 000000000..116ba8380
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontConfigInterface_direct_factory.cpp
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2009-2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFontConfigInterface_direct.h"
+#include "SkOnce.h"
+
+SkFontConfigInterface* SkFontConfigInterface::GetSingletonDirectInterface() {
+ static SkFontConfigInterface* singleton;
+ static SkOnce once;
+ once([]{ singleton = new SkFontConfigInterfaceDirect(); });
+ return singleton;
+}
diff --git a/gfx/skia/skia/src/ports/SkFontConfigTypeface.h b/gfx/skia/skia/src/ports/SkFontConfigTypeface.h
new file mode 100644
index 000000000..0da78aed8
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontConfigTypeface.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFontConfigInterface.h"
+#include "SkFontDescriptor.h"
+#include "SkFontHost_FreeType_common.h"
+#include "SkRefCnt.h"
+#include "SkStream.h"
+
+class SkFontDescriptor;
+
+class SkTypeface_FCI : public SkTypeface_FreeType {
+ sk_sp<SkFontConfigInterface> fFCI;
+ SkFontConfigInterface::FontIdentity fIdentity;
+ SkString fFamilyName;
+ std::unique_ptr<SkFontData> fFontData;
+
+public:
+ static SkTypeface_FCI* Create(sk_sp<SkFontConfigInterface> fci,
+ const SkFontConfigInterface::FontIdentity& fi,
+ const SkString& familyName,
+ const SkFontStyle& style)
+ {
+ return new SkTypeface_FCI(std::move(fci), fi, familyName, style);
+ }
+
+ static SkTypeface_FCI* Create(std::unique_ptr<SkFontData> data,
+ SkFontStyle style, bool isFixedPitch)
+ {
+ return new SkTypeface_FCI(std::move(data), style, isFixedPitch);
+ }
+
+ const SkFontConfigInterface::FontIdentity& getIdentity() const {
+ return fIdentity;
+ }
+
+protected:
+ SkTypeface_FCI(sk_sp<SkFontConfigInterface> fci,
+ const SkFontConfigInterface::FontIdentity& fi,
+ const SkString& familyName,
+ const SkFontStyle& style)
+ : INHERITED(style, false)
+ , fFCI(std::move(fci))
+ , fIdentity(fi)
+ , fFamilyName(familyName)
+ , fFontData(nullptr) {}
+
+ SkTypeface_FCI(std::unique_ptr<SkFontData> data, SkFontStyle style, bool isFixedPitch)
+ : INHERITED(style, isFixedPitch)
+ , fFontData(std::move(data))
+ {
+ SkASSERT(fFontData);
+ fIdentity.fTTCIndex = fFontData->getIndex();
+ }
+
+ void onGetFamilyName(SkString* familyName) const override { *familyName = fFamilyName; }
+ void onGetFontDescriptor(SkFontDescriptor*, bool*) const override;
+ SkStreamAsset* onOpenStream(int* ttcIndex) const override;
+ std::unique_ptr<SkFontData> onMakeFontData() const override;
+
+private:
+ typedef SkTypeface_FreeType INHERITED;
+};
diff --git a/gfx/skia/skia/src/ports/SkFontHost_FreeType.cpp b/gfx/skia/skia/src/ports/SkFontHost_FreeType.cpp
new file mode 100644
index 000000000..71ce865f0
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontHost_FreeType.cpp
@@ -0,0 +1,1776 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAdvancedTypefaceMetrics.h"
+#include "SkBitmap.h"
+#include "SkCanvas.h"
+#include "SkColorPriv.h"
+#include "SkDescriptor.h"
+#include "SkFDot6.h"
+#include "SkFontDescriptor.h"
+#include "SkFontHost_FreeType_common.h"
+#include "SkGlyph.h"
+#include "SkMask.h"
+#include "SkMaskGamma.h"
+#include "SkMatrix22.h"
+#include "SkMutex.h"
+#include "SkOTUtils.h"
+#include "SkPath.h"
+#include "SkScalerContext.h"
+#include "SkStream.h"
+#include "SkString.h"
+#include "SkTemplates.h"
+#include "SkTypes.h"
+#include <memory>
+
+#if defined(SK_CAN_USE_DLOPEN)
+#include <dlfcn.h>
+#endif
+#include <ft2build.h>
+#include FT_ADVANCES_H
+#include FT_BITMAP_H
+#include FT_FREETYPE_H
+#include FT_LCD_FILTER_H
+#include FT_MODULE_H
+#include FT_MULTIPLE_MASTERS_H
+#include FT_OUTLINE_H
+#include FT_SIZES_H
+#include FT_SYSTEM_H
+#include FT_TRUETYPE_TABLES_H
+#include FT_TYPE1_TABLES_H
+#include FT_XFREE86_H
+
+// FT_LOAD_COLOR and the corresponding FT_Pixel_Mode::FT_PIXEL_MODE_BGRA
+// were introduced in FreeType 2.5.0.
+// The following may be removed once FreeType 2.5.0 is required to build.
+#ifndef FT_LOAD_COLOR
+# define FT_LOAD_COLOR ( 1L << 20 )
+# define FT_PIXEL_MODE_BGRA 7
+#endif
+
+//#define ENABLE_GLYPH_SPEW // for tracing calls
+//#define DUMP_STRIKE_CREATION
+//#define SK_FONTHOST_FREETYPE_USE_NORMAL_LCD_FILTER
+//#define SK_FONTHOST_FREETYPE_RUNTIME_VERSION
+//#define SK_GAMMA_APPLY_TO_A8
+
+static bool isLCD(const SkScalerContext::Rec& rec) {
+ return SkMask::kLCD16_Format == rec.fMaskFormat;
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+extern "C" {
+ static void* sk_ft_alloc(FT_Memory, long size) {
+ return sk_malloc_throw(size);
+ }
+ static void sk_ft_free(FT_Memory, void* block) {
+ sk_free(block);
+ }
+ static void* sk_ft_realloc(FT_Memory, long cur_size, long new_size, void* block) {
+ return sk_realloc_throw(block, new_size);
+ }
+};
+FT_MemoryRec_ gFTMemory = { nullptr, sk_ft_alloc, sk_ft_free, sk_ft_realloc };
+
+class FreeTypeLibrary : SkNoncopyable {
+public:
+ FreeTypeLibrary() : fLibrary(nullptr), fIsLCDSupported(false), fLCDExtra(0) {
+ if (FT_New_Library(&gFTMemory, &fLibrary)) {
+ return;
+ }
+ FT_Add_Default_Modules(fLibrary);
+
+ // Setup LCD filtering. This reduces color fringes for LCD smoothed glyphs.
+ // Default { 0x10, 0x40, 0x70, 0x40, 0x10 } adds up to 0x110, simulating ink spread.
+ // SetLcdFilter must be called before SetLcdFilterWeights.
+ if (FT_Library_SetLcdFilter(fLibrary, FT_LCD_FILTER_DEFAULT) == 0) {
+ fIsLCDSupported = true;
+ fLCDExtra = 2; //Using a filter adds one full pixel to each side.
+
+#ifdef SK_FONTHOST_FREETYPE_USE_NORMAL_LCD_FILTER
+ // Adds to 0x110 simulating ink spread, but provides better results than default.
+ static unsigned char gGaussianLikeHeavyWeights[] = { 0x1A, 0x43, 0x56, 0x43, 0x1A, };
+
+# if SK_FONTHOST_FREETYPE_RUNTIME_VERSION > 0x020400
+ FT_Library_SetLcdFilterWeights(fLibrary, gGaussianLikeHeavyWeights);
+# elif SK_CAN_USE_DLOPEN == 1
+ //The FreeType library is already loaded, so symbols are available in process.
+ void* self = dlopen(nullptr, RTLD_LAZY);
+ if (self) {
+ FT_Library_SetLcdFilterWeightsProc setLcdFilterWeights;
+ //The following cast is non-standard, but safe for POSIX.
+ *reinterpret_cast<void**>(&setLcdFilterWeights) =
+ dlsym(self, "FT_Library_SetLcdFilterWeights");
+ dlclose(self);
+
+ if (setLcdFilterWeights) {
+ setLcdFilterWeights(fLibrary, gGaussianLikeHeavyWeights);
+ }
+ }
+# endif
+#endif
+ }
+ }
+ ~FreeTypeLibrary() {
+ if (fLibrary) {
+ FT_Done_Library(fLibrary);
+ }
+ }
+
+ FT_Library library() { return fLibrary; }
+ bool isLCDSupported() { return fIsLCDSupported; }
+ int lcdExtra() { return fLCDExtra; }
+
+private:
+ FT_Library fLibrary;
+ bool fIsLCDSupported;
+ int fLCDExtra;
+
+ // FT_Library_SetLcdFilterWeights was introduced in FreeType 2.4.0.
+ // The following platforms provide FreeType of at least 2.4.0.
+ // Ubuntu >= 11.04 (previous deprecated April 2013)
+ // Debian >= 6.0 (good)
+ // OpenSuse >= 11.4 (previous deprecated January 2012 / Nov 2013 for Evergreen 11.2)
+ // Fedora >= 14 (good)
+ // Android >= Gingerbread (good)
+ typedef FT_Error (*FT_Library_SetLcdFilterWeightsProc)(FT_Library, unsigned char*);
+};
+
+struct SkFaceRec;
+
+SK_DECLARE_STATIC_MUTEX(gFTMutex);
+static FreeTypeLibrary* gFTLibrary;
+static SkFaceRec* gFaceRecHead;
+
+// Private to ref_ft_library and unref_ft_library
+static int gFTCount;
+
+// Caller must lock gFTMutex before calling this function.
+static bool ref_ft_library() {
+ gFTMutex.assertHeld();
+ SkASSERT(gFTCount >= 0);
+
+ if (0 == gFTCount) {
+ SkASSERT(nullptr == gFTLibrary);
+ gFTLibrary = new FreeTypeLibrary;
+ }
+ ++gFTCount;
+ return gFTLibrary->library();
+}
+
+// Caller must lock gFTMutex before calling this function.
+static void unref_ft_library() {
+ gFTMutex.assertHeld();
+ SkASSERT(gFTCount > 0);
+
+ --gFTCount;
+ if (0 == gFTCount) {
+ SkASSERT(nullptr == gFaceRecHead);
+ SkASSERT(nullptr != gFTLibrary);
+ delete gFTLibrary;
+ SkDEBUGCODE(gFTLibrary = nullptr;)
+ }
+}
+
+class SkScalerContext_FreeType : public SkScalerContext_FreeType_Base {
+public:
+ SkScalerContext_FreeType(SkTypeface*, const SkScalerContextEffects&, const SkDescriptor* desc);
+ virtual ~SkScalerContext_FreeType();
+
+ bool success() const {
+ return fFTSize != nullptr && fFace != nullptr;
+ }
+
+protected:
+ unsigned generateGlyphCount() override;
+ uint16_t generateCharToGlyph(SkUnichar uni) override;
+ void generateAdvance(SkGlyph* glyph) override;
+ void generateMetrics(SkGlyph* glyph) override;
+ void generateImage(const SkGlyph& glyph) override;
+ void generatePath(const SkGlyph& glyph, SkPath* path) override;
+ void generateFontMetrics(SkPaint::FontMetrics*) override;
+ SkUnichar generateGlyphToChar(uint16_t glyph) override;
+
+private:
+ FT_Face fFace; // Shared face from gFaceRecHead.
+ FT_Size fFTSize; // The size on the fFace for this scaler.
+ FT_Int fStrikeIndex;
+
+ /** The rest of the matrix after FreeType handles the size.
+ * With outline font rasterization this is handled by FreeType with FT_Set_Transform.
+ * With bitmap only fonts this matrix must be applied to scale the bitmap.
+ */
+ SkMatrix fMatrix22Scalar;
+ /** Same as fMatrix22Scalar, but in FreeType units and space. */
+ FT_Matrix fMatrix22;
+ /** The actual size requested. */
+ SkVector fScale;
+
+ uint32_t fLoadGlyphFlags;
+ bool fDoLinearMetrics;
+ bool fLCDIsVert;
+
+ FT_Error setupSize();
+ void getBBoxForCurrentGlyph(SkGlyph* glyph, FT_BBox* bbox,
+ bool snapToPixelBoundary = false);
+ bool getCBoxForLetter(char letter, FT_BBox* bbox);
+ // Caller must lock gFTMutex before calling this function.
+ void updateGlyphIfLCD(SkGlyph* glyph);
+ // Caller must lock gFTMutex before calling this function.
+ // update FreeType2 glyph slot with glyph emboldened
+ void emboldenIfNeeded(FT_Face face, FT_GlyphSlot glyph);
+ bool shouldSubpixelBitmap(const SkGlyph&, const SkMatrix&);
+};
+
+///////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////
+
+struct SkFaceRec {
+ SkFaceRec* fNext;
+ FT_Face fFace;
+ FT_StreamRec fFTStream;
+ std::unique_ptr<SkStreamAsset> fSkStream;
+ uint32_t fRefCnt;
+ uint32_t fFontID;
+
+ SkFaceRec(std::unique_ptr<SkStreamAsset> stream, uint32_t fontID);
+};
+
+extern "C" {
+ static unsigned long sk_ft_stream_io(FT_Stream ftStream,
+ unsigned long offset,
+ unsigned char* buffer,
+ unsigned long count)
+ {
+ SkStreamAsset* stream = static_cast<SkStreamAsset*>(ftStream->descriptor.pointer);
+
+ if (count) {
+ if (!stream->seek(offset)) {
+ return 0;
+ }
+ count = stream->read(buffer, count);
+ }
+ return count;
+ }
+
+ static void sk_ft_stream_close(FT_Stream) {}
+}
+
+SkFaceRec::SkFaceRec(std::unique_ptr<SkStreamAsset> stream, uint32_t fontID)
+ : fNext(nullptr), fSkStream(std::move(stream)), fRefCnt(1), fFontID(fontID)
+{
+ sk_bzero(&fFTStream, sizeof(fFTStream));
+ fFTStream.size = fSkStream->getLength();
+ fFTStream.descriptor.pointer = fSkStream.get();
+ fFTStream.read = sk_ft_stream_io;
+ fFTStream.close = sk_ft_stream_close;
+}
+
+static void ft_face_setup_axes(FT_Face face, const SkFontData& data) {
+ if (!(face->face_flags & FT_FACE_FLAG_MULTIPLE_MASTERS)) {
+ return;
+ }
+
+ SkDEBUGCODE(
+ FT_MM_Var* variations = nullptr;
+ if (FT_Get_MM_Var(face, &variations)) {
+ SkDEBUGF(("INFO: font %s claims variations, but none found.\n", face->family_name));
+ return;
+ }
+ SkAutoFree autoFreeVariations(variations);
+
+ if (static_cast<FT_UInt>(data.getAxisCount()) != variations->num_axis) {
+ SkDEBUGF(("INFO: font %s has %d variations, but %d were specified.\n",
+ face->family_name, variations->num_axis, data.getAxisCount()));
+ return;
+ }
+ )
+
+ SkAutoSTMalloc<4, FT_Fixed> coords(data.getAxisCount());
+ for (int i = 0; i < data.getAxisCount(); ++i) {
+ coords[i] = data.getAxis()[i];
+ }
+ if (FT_Set_Var_Design_Coordinates(face, data.getAxisCount(), coords.get())) {
+ SkDEBUGF(("INFO: font %s has variations, but specified variations could not be set.\n",
+ face->family_name));
+ return;
+ }
+}
+
+// Will return 0 on failure
+// Caller must lock gFTMutex before calling this function.
+static FT_Face ref_ft_face(const SkTypeface* typeface) {
+ gFTMutex.assertHeld();
+
+ const SkFontID fontID = typeface->uniqueID();
+ SkFaceRec* rec = gFaceRecHead;
+ while (rec) {
+ if (rec->fFontID == fontID) {
+ SkASSERT(rec->fFace);
+ rec->fRefCnt += 1;
+ return rec->fFace;
+ }
+ rec = rec->fNext;
+ }
+
+ std::unique_ptr<SkFontData> data = typeface->makeFontData();
+ if (nullptr == data || !data->hasStream()) {
+ return nullptr;
+ }
+
+ rec = new SkFaceRec(data->detachStream(), fontID);
+
+ FT_Open_Args args;
+ memset(&args, 0, sizeof(args));
+ const void* memoryBase = rec->fSkStream->getMemoryBase();
+ if (memoryBase) {
+ args.flags = FT_OPEN_MEMORY;
+ args.memory_base = (const FT_Byte*)memoryBase;
+ args.memory_size = rec->fSkStream->getLength();
+ } else {
+ args.flags = FT_OPEN_STREAM;
+ args.stream = &rec->fFTStream;
+ }
+
+ FT_Error err = FT_Open_Face(gFTLibrary->library(), &args, data->getIndex(), &rec->fFace);
+ if (err) {
+ SkDEBUGF(("ERROR: unable to open font '%x'\n", fontID));
+ delete rec;
+ return nullptr;
+ }
+ SkASSERT(rec->fFace);
+
+ ft_face_setup_axes(rec->fFace, *data);
+
+ // FreeType will set the charmap to the "most unicode" cmap if it exists.
+ // If there are no unicode cmaps, the charmap is set to nullptr.
+ // However, "symbol" cmaps should also be considered "fallback unicode" cmaps
+ // because they are effectively private use area only (even if they aren't).
+ // This is the last on the fallback list at
+ // https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6cmap.html
+ if (!rec->fFace->charmap) {
+ FT_Select_Charmap(rec->fFace, FT_ENCODING_MS_SYMBOL);
+ }
+
+ rec->fNext = gFaceRecHead;
+ gFaceRecHead = rec;
+ return rec->fFace;
+}
+
+// Caller must lock gFTMutex before calling this function.
+extern void unref_ft_face(FT_Face face);
+void unref_ft_face(FT_Face face) {
+ gFTMutex.assertHeld();
+
+ SkFaceRec* rec = gFaceRecHead;
+ SkFaceRec* prev = nullptr;
+ while (rec) {
+ SkFaceRec* next = rec->fNext;
+ if (rec->fFace == face) {
+ if (--rec->fRefCnt == 0) {
+ if (prev) {
+ prev->fNext = next;
+ } else {
+ gFaceRecHead = next;
+ }
+ FT_Done_Face(face);
+ delete rec;
+ }
+ return;
+ }
+ prev = rec;
+ rec = next;
+ }
+ SkDEBUGFAIL("shouldn't get here, face not in list");
+}
+
+class AutoFTAccess {
+public:
+ AutoFTAccess(const SkTypeface* tf) : fFace(nullptr) {
+ gFTMutex.acquire();
+ if (!ref_ft_library()) {
+ sk_throw();
+ }
+ fFace = ref_ft_face(tf);
+ }
+
+ ~AutoFTAccess() {
+ if (fFace) {
+ unref_ft_face(fFace);
+ }
+ unref_ft_library();
+ gFTMutex.release();
+ }
+
+ FT_Face face() { return fFace; }
+
+private:
+ FT_Face fFace;
+};
+
+///////////////////////////////////////////////////////////////////////////
+
+static bool canEmbed(FT_Face face) {
+ FT_UShort fsType = FT_Get_FSType_Flags(face);
+ return (fsType & (FT_FSTYPE_RESTRICTED_LICENSE_EMBEDDING |
+ FT_FSTYPE_BITMAP_EMBEDDING_ONLY)) == 0;
+}
+
+static bool canSubset(FT_Face face) {
+ FT_UShort fsType = FT_Get_FSType_Flags(face);
+ return (fsType & FT_FSTYPE_NO_SUBSETTING) == 0;
+}
+
+static bool GetLetterCBox(FT_Face face, char letter, FT_BBox* bbox) {
+ const FT_UInt glyph_id = FT_Get_Char_Index(face, letter);
+ if (!glyph_id)
+ return false;
+ if (FT_Load_Glyph(face, glyph_id, FT_LOAD_NO_SCALE) != 0)
+ return false;
+ FT_Outline_Get_CBox(&face->glyph->outline, bbox);
+ return true;
+}
+
+static void populate_glyph_to_unicode(FT_Face& face, SkTDArray<SkUnichar>* glyphToUnicode) {
+ FT_Long numGlyphs = face->num_glyphs;
+ glyphToUnicode->setCount(SkToInt(numGlyphs));
+ sk_bzero(glyphToUnicode->begin(), sizeof((*glyphToUnicode)[0]) * numGlyphs);
+
+ FT_UInt glyphIndex;
+ SkUnichar charCode = FT_Get_First_Char(face, &glyphIndex);
+ while (glyphIndex) {
+ SkASSERT(glyphIndex < SkToUInt(numGlyphs));
+ // Use the first character that maps to this glyphID. https://crbug.com/359065
+ if (0 == (*glyphToUnicode)[glyphIndex]) {
+ (*glyphToUnicode)[glyphIndex] = charCode;
+ }
+ charCode = FT_Get_Next_Char(face, charCode, &glyphIndex);
+ }
+}
+
+SkAdvancedTypefaceMetrics* SkTypeface_FreeType::onGetAdvancedTypefaceMetrics(
+ PerGlyphInfo perGlyphInfo,
+ const uint32_t* glyphIDs,
+ uint32_t glyphIDsCount) const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (!face) {
+ return nullptr;
+ }
+
+ SkAdvancedTypefaceMetrics* info = new SkAdvancedTypefaceMetrics;
+ info->fFontName.set(FT_Get_Postscript_Name(face));
+
+ if (FT_HAS_MULTIPLE_MASTERS(face)) {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kMultiMaster_FontFlag;
+ }
+ if (!canEmbed(face)) {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kNotEmbeddable_FontFlag;
+ }
+ if (!canSubset(face)) {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kNotSubsettable_FontFlag;
+ }
+ info->fLastGlyphID = face->num_glyphs - 1;
+ info->fEmSize = 1000;
+
+ const char* fontType = FT_Get_X11_Font_Format(face);
+ if (strcmp(fontType, "Type 1") == 0) {
+ info->fType = SkAdvancedTypefaceMetrics::kType1_Font;
+ } else if (strcmp(fontType, "CID Type 1") == 0) {
+ info->fType = SkAdvancedTypefaceMetrics::kType1CID_Font;
+ } else if (strcmp(fontType, "CFF") == 0) {
+ info->fType = SkAdvancedTypefaceMetrics::kCFF_Font;
+ } else if (strcmp(fontType, "TrueType") == 0) {
+ info->fType = SkAdvancedTypefaceMetrics::kTrueType_Font;
+ TT_Header* ttHeader;
+ if ((ttHeader = (TT_Header*)FT_Get_Sfnt_Table(face, ft_sfnt_head)) != nullptr) {
+ info->fEmSize = ttHeader->Units_Per_EM;
+ }
+ } else {
+ info->fType = SkAdvancedTypefaceMetrics::kOther_Font;
+ }
+
+ info->fStyle = (SkAdvancedTypefaceMetrics::StyleFlags)0;
+ if (FT_IS_FIXED_WIDTH(face)) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kFixedPitch_Style;
+ }
+ if (face->style_flags & FT_STYLE_FLAG_ITALIC) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kItalic_Style;
+ }
+
+ PS_FontInfoRec psFontInfo;
+ TT_Postscript* postTable;
+ if (FT_Get_PS_Font_Info(face, &psFontInfo) == 0) {
+ info->fItalicAngle = psFontInfo.italic_angle;
+ } else if ((postTable = (TT_Postscript*)FT_Get_Sfnt_Table(face, ft_sfnt_post)) != nullptr) {
+ info->fItalicAngle = SkFixedToScalar(postTable->italicAngle);
+ } else {
+ info->fItalicAngle = 0;
+ }
+
+ info->fAscent = face->ascender;
+ info->fDescent = face->descender;
+
+ // Figure out a good guess for StemV - Min width of i, I, !, 1.
+ // This probably isn't very good with an italic font.
+ int16_t min_width = SHRT_MAX;
+ info->fStemV = 0;
+ char stem_chars[] = {'i', 'I', '!', '1'};
+ for (size_t i = 0; i < SK_ARRAY_COUNT(stem_chars); i++) {
+ FT_BBox bbox;
+ if (GetLetterCBox(face, stem_chars[i], &bbox)) {
+ int16_t width = bbox.xMax - bbox.xMin;
+ if (width > 0 && width < min_width) {
+ min_width = width;
+ info->fStemV = min_width;
+ }
+ }
+ }
+
+ TT_PCLT* pcltTable;
+ TT_OS2* os2Table;
+ if ((pcltTable = (TT_PCLT*)FT_Get_Sfnt_Table(face, ft_sfnt_pclt)) != nullptr) {
+ info->fCapHeight = pcltTable->CapHeight;
+ uint8_t serif_style = pcltTable->SerifStyle & 0x3F;
+ if (2 <= serif_style && serif_style <= 6) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kSerif_Style;
+ } else if (9 <= serif_style && serif_style <= 12) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kScript_Style;
+ }
+ } else if (((os2Table = (TT_OS2*)FT_Get_Sfnt_Table(face, ft_sfnt_os2)) != nullptr) &&
+ // sCapHeight is available only when version 2 or later.
+ os2Table->version != 0xFFFF &&
+ os2Table->version >= 2)
+ {
+ info->fCapHeight = os2Table->sCapHeight;
+ } else {
+ // Figure out a good guess for CapHeight: average the height of M and X.
+ FT_BBox m_bbox, x_bbox;
+ bool got_m, got_x;
+ got_m = GetLetterCBox(face, 'M', &m_bbox);
+ got_x = GetLetterCBox(face, 'X', &x_bbox);
+ if (got_m && got_x) {
+ info->fCapHeight = ((m_bbox.yMax - m_bbox.yMin) + (x_bbox.yMax - x_bbox.yMin)) / 2;
+ } else if (got_m && !got_x) {
+ info->fCapHeight = m_bbox.yMax - m_bbox.yMin;
+ } else if (!got_m && got_x) {
+ info->fCapHeight = x_bbox.yMax - x_bbox.yMin;
+ } else {
+ // Last resort, use the ascent.
+ info->fCapHeight = info->fAscent;
+ }
+ }
+
+ info->fBBox = SkIRect::MakeLTRB(face->bbox.xMin, face->bbox.yMax,
+ face->bbox.xMax, face->bbox.yMin);
+
+ if (!FT_IS_SCALABLE(face)) {
+ perGlyphInfo = kNo_PerGlyphInfo;
+ }
+
+ if (perGlyphInfo & kGlyphNames_PerGlyphInfo &&
+ info->fType == SkAdvancedTypefaceMetrics::kType1_Font)
+ {
+ // Postscript fonts may contain more than 255 glyphs, so we end up
+ // using multiple font descriptions with a glyph ordering. Record
+ // the name of each glyph.
+ info->fGlyphNames.reset(face->num_glyphs);
+ for (int gID = 0; gID < face->num_glyphs; gID++) {
+ char glyphName[128]; // PS limit for names is 127 bytes.
+ FT_Get_Glyph_Name(face, gID, glyphName, 128);
+ info->fGlyphNames[gID].set(glyphName);
+ }
+ }
+
+ if (perGlyphInfo & kToUnicode_PerGlyphInfo &&
+ info->fType != SkAdvancedTypefaceMetrics::kType1_Font &&
+ face->num_charmaps)
+ {
+ populate_glyph_to_unicode(face, &(info->fGlyphToUnicode));
+ }
+
+ return info;
+}
+
+///////////////////////////////////////////////////////////////////////////
+
+static bool bothZero(SkScalar a, SkScalar b) {
+ return 0 == a && 0 == b;
+}
+
+// returns false if there is any non-90-rotation or skew
+static bool isAxisAligned(const SkScalerContext::Rec& rec) {
+ return 0 == rec.fPreSkewX &&
+ (bothZero(rec.fPost2x2[0][1], rec.fPost2x2[1][0]) ||
+ bothZero(rec.fPost2x2[0][0], rec.fPost2x2[1][1]));
+}
+
+SkScalerContext* SkTypeface_FreeType::onCreateScalerContext(const SkScalerContextEffects& effects,
+ const SkDescriptor* desc) const {
+ SkScalerContext_FreeType* c =
+ new SkScalerContext_FreeType(const_cast<SkTypeface_FreeType*>(this), effects, desc);
+ if (!c->success()) {
+ delete c;
+ c = nullptr;
+ }
+ return c;
+}
+
+void SkTypeface_FreeType::onFilterRec(SkScalerContextRec* rec) const {
+ //BOGUS: http://code.google.com/p/chromium/issues/detail?id=121119
+ //Cap the requested size as larger sizes give bogus values.
+ //Remove when http://code.google.com/p/skia/issues/detail?id=554 is fixed.
+ //Note that this also currently only protects against large text size requests,
+ //the total matrix is not taken into account here.
+ if (rec->fTextSize > SkIntToScalar(1 << 14)) {
+ rec->fTextSize = SkIntToScalar(1 << 14);
+ }
+
+ if (isLCD(*rec)) {
+ // TODO: re-work so that FreeType is set-up and selected by the SkFontMgr.
+ SkAutoMutexAcquire ama(gFTMutex);
+ ref_ft_library();
+ if (!gFTLibrary->isLCDSupported()) {
+ // If the runtime Freetype library doesn't support LCD, disable it here.
+ rec->fMaskFormat = SkMask::kA8_Format;
+ }
+ unref_ft_library();
+ }
+
+ SkPaint::Hinting h = rec->getHinting();
+ if (SkPaint::kFull_Hinting == h && !isLCD(*rec)) {
+ // collapse full->normal hinting if we're not doing LCD
+ h = SkPaint::kNormal_Hinting;
+ }
+ if ((rec->fFlags & SkScalerContext::kSubpixelPositioning_Flag)) {
+ if (SkPaint::kNo_Hinting != h) {
+ h = SkPaint::kSlight_Hinting;
+ }
+ }
+
+ // rotated text looks bad with hinting, so we disable it as needed
+ if (!isAxisAligned(*rec)) {
+ h = SkPaint::kNo_Hinting;
+ }
+ rec->setHinting(h);
+
+#ifndef SK_GAMMA_APPLY_TO_A8
+ if (!isLCD(*rec)) {
+ // SRGBTODO: Is this correct? Do we want contrast boost?
+ rec->ignorePreBlend();
+ }
+#endif
+}
+
+int SkTypeface_FreeType::onGetUPEM() const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ return face ? face->units_per_EM : 0;
+}
+
+bool SkTypeface_FreeType::onGetKerningPairAdjustments(const uint16_t glyphs[],
+ int count, int32_t adjustments[]) const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (!face || !FT_HAS_KERNING(face)) {
+ return false;
+ }
+
+ for (int i = 0; i < count - 1; ++i) {
+ FT_Vector delta;
+ FT_Error err = FT_Get_Kerning(face, glyphs[i], glyphs[i+1],
+ FT_KERNING_UNSCALED, &delta);
+ if (err) {
+ return false;
+ }
+ adjustments[i] = delta.x;
+ }
+ return true;
+}
+
+/** Returns the bitmap strike equal to or just larger than the requested size. */
+static FT_Int chooseBitmapStrike(FT_Face face, FT_F26Dot6 scaleY) {
+ if (face == nullptr) {
+ SkDEBUGF(("chooseBitmapStrike aborted due to nullptr face.\n"));
+ return -1;
+ }
+
+ FT_Pos requestedPPEM = scaleY; // FT_Bitmap_Size::y_ppem is in 26.6 format.
+ FT_Int chosenStrikeIndex = -1;
+ FT_Pos chosenPPEM = 0;
+ for (FT_Int strikeIndex = 0; strikeIndex < face->num_fixed_sizes; ++strikeIndex) {
+ FT_Pos strikePPEM = face->available_sizes[strikeIndex].y_ppem;
+ if (strikePPEM == requestedPPEM) {
+ // exact match - our search stops here
+ return strikeIndex;
+ } else if (chosenPPEM < requestedPPEM) {
+ // attempt to increase chosenPPEM
+ if (chosenPPEM < strikePPEM) {
+ chosenPPEM = strikePPEM;
+ chosenStrikeIndex = strikeIndex;
+ }
+ } else {
+ // attempt to decrease chosenPPEM, but not below requestedPPEM
+ if (requestedPPEM < strikePPEM && strikePPEM < chosenPPEM) {
+ chosenPPEM = strikePPEM;
+ chosenStrikeIndex = strikeIndex;
+ }
+ }
+ }
+ return chosenStrikeIndex;
+}
+
+SkScalerContext_FreeType::SkScalerContext_FreeType(SkTypeface* typeface,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : SkScalerContext_FreeType_Base(typeface, effects, desc)
+ , fFace(nullptr)
+ , fFTSize(nullptr)
+ , fStrikeIndex(-1)
+{
+ SkAutoMutexAcquire ac(gFTMutex);
+
+ if (!ref_ft_library()) {
+ sk_throw();
+ }
+
+ // load the font file
+ using UnrefFTFace = SkFunctionWrapper<void, skstd::remove_pointer_t<FT_Face>, unref_ft_face>;
+ std::unique_ptr<skstd::remove_pointer_t<FT_Face>, UnrefFTFace> ftFace(ref_ft_face(typeface));
+ if (nullptr == ftFace) {
+ SkDEBUGF(("Could not create FT_Face.\n"));
+ return;
+ }
+
+ fRec.computeMatrices(SkScalerContextRec::kFull_PreMatrixScale, &fScale, &fMatrix22Scalar);
+
+ FT_F26Dot6 scaleX = SkScalarToFDot6(fScale.fX);
+ FT_F26Dot6 scaleY = SkScalarToFDot6(fScale.fY);
+ fMatrix22.xx = SkScalarToFixed(fMatrix22Scalar.getScaleX());
+ fMatrix22.xy = SkScalarToFixed(-fMatrix22Scalar.getSkewX());
+ fMatrix22.yx = SkScalarToFixed(-fMatrix22Scalar.getSkewY());
+ fMatrix22.yy = SkScalarToFixed(fMatrix22Scalar.getScaleY());
+
+ fLCDIsVert = SkToBool(fRec.fFlags & SkScalerContext::kLCD_Vertical_Flag);
+
+ // compute the flags we send to Load_Glyph
+ bool linearMetrics = SkToBool(fRec.fFlags & SkScalerContext::kSubpixelPositioning_Flag);
+ {
+ FT_Int32 loadFlags = FT_LOAD_DEFAULT;
+
+ if (SkMask::kBW_Format == fRec.fMaskFormat) {
+ // See http://code.google.com/p/chromium/issues/detail?id=43252#c24
+ loadFlags = FT_LOAD_TARGET_MONO;
+ if (fRec.getHinting() == SkPaint::kNo_Hinting) {
+ loadFlags = FT_LOAD_NO_HINTING;
+ linearMetrics = true;
+ }
+ } else {
+ switch (fRec.getHinting()) {
+ case SkPaint::kNo_Hinting:
+ loadFlags = FT_LOAD_NO_HINTING;
+ linearMetrics = true;
+ break;
+ case SkPaint::kSlight_Hinting:
+ loadFlags = FT_LOAD_TARGET_LIGHT; // This implies FORCE_AUTOHINT
+ break;
+ case SkPaint::kNormal_Hinting:
+ if (fRec.fFlags & SkScalerContext::kForceAutohinting_Flag) {
+ loadFlags = FT_LOAD_FORCE_AUTOHINT;
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ } else {
+ loadFlags = FT_LOAD_NO_AUTOHINT;
+#endif
+ }
+ break;
+ case SkPaint::kFull_Hinting:
+ if (fRec.fFlags & SkScalerContext::kForceAutohinting_Flag) {
+ loadFlags = FT_LOAD_FORCE_AUTOHINT;
+ break;
+ }
+ loadFlags = FT_LOAD_TARGET_NORMAL;
+ if (isLCD(fRec)) {
+ if (fLCDIsVert) {
+ loadFlags = FT_LOAD_TARGET_LCD_V;
+ } else {
+ loadFlags = FT_LOAD_TARGET_LCD;
+ }
+ }
+ break;
+ default:
+ SkDebugf("---------- UNKNOWN hinting %d\n", fRec.getHinting());
+ break;
+ }
+ }
+
+ if ((fRec.fFlags & SkScalerContext::kEmbeddedBitmapText_Flag) == 0) {
+ loadFlags |= FT_LOAD_NO_BITMAP;
+ }
+
+ // Always using FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH to get correct
+ // advances, as fontconfig and cairo do.
+ // See http://code.google.com/p/skia/issues/detail?id=222.
+ loadFlags |= FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH;
+
+ // Use vertical layout if requested.
+ if (fRec.fFlags & SkScalerContext::kVertical_Flag) {
+ loadFlags |= FT_LOAD_VERTICAL_LAYOUT;
+ }
+
+ loadFlags |= FT_LOAD_COLOR;
+
+ fLoadGlyphFlags = loadFlags;
+ }
+
+ using DoneFTSize = SkFunctionWrapper<FT_Error, skstd::remove_pointer_t<FT_Size>, FT_Done_Size>;
+ std::unique_ptr<skstd::remove_pointer_t<FT_Size>, DoneFTSize> ftSize([&ftFace]() -> FT_Size {
+ FT_Size size;
+ FT_Error err = FT_New_Size(ftFace.get(), &size);
+ if (err != 0) {
+ SkDEBUGF(("FT_New_Size(%s) returned 0x%x.\n", ftFace->family_name, err));
+ return nullptr;
+ }
+ return size;
+ }());
+ if (nullptr == ftSize) {
+ SkDEBUGF(("Could not create FT_Size.\n"));
+ return;
+ }
+
+ FT_Error err = FT_Activate_Size(ftSize.get());
+ if (err != 0) {
+ SkDEBUGF(("FT_Activate_Size(%s) returned 0x%x.\n", ftFace->family_name, err));
+ return;
+ }
+
+ if (FT_IS_SCALABLE(ftFace)) {
+ err = FT_Set_Char_Size(ftFace.get(), scaleX, scaleY, 72, 72);
+ if (err != 0) {
+ SkDEBUGF(("FT_Set_CharSize(%s, %f, %f) returned 0x%x.\n",
+ ftFace->family_name, fScale.fX, fScale.fY, err));
+ return;
+ }
+ } else if (FT_HAS_FIXED_SIZES(ftFace)) {
+ fStrikeIndex = chooseBitmapStrike(ftFace.get(), scaleY);
+ if (fStrikeIndex == -1) {
+ SkDEBUGF(("No glyphs for font \"%s\" size %f.\n", ftFace->family_name, fScale.fY));
+ return;
+ }
+
+ err = FT_Select_Size(ftFace.get(), fStrikeIndex);
+ if (err != 0) {
+ SkDEBUGF(("FT_Select_Size(%s, %d) returned 0x%x.\n",
+ ftFace->family_name, fStrikeIndex, err));
+ fStrikeIndex = -1;
+ return;
+ }
+
+ // A non-ideal size was picked, so recompute the matrix.
+ // This adjusts for the difference between FT_Set_Char_Size and FT_Select_Size.
+ fMatrix22Scalar.preScale(fScale.x() / ftFace->size->metrics.x_ppem,
+ fScale.y() / ftFace->size->metrics.y_ppem);
+ fMatrix22.xx = SkScalarToFixed(fMatrix22Scalar.getScaleX());
+ fMatrix22.xy = SkScalarToFixed(-fMatrix22Scalar.getSkewX());
+ fMatrix22.yx = SkScalarToFixed(-fMatrix22Scalar.getSkewY());
+ fMatrix22.yy = SkScalarToFixed(fMatrix22Scalar.getScaleY());
+
+ // FreeType does not provide linear metrics for bitmap fonts.
+ linearMetrics = false;
+
+ // FreeType documentation says:
+ // FT_LOAD_NO_BITMAP -- Ignore bitmap strikes when loading.
+ // Bitmap-only fonts ignore this flag.
+ //
+ // However, in FreeType 2.5.1 color bitmap only fonts do not ignore this flag.
+ // Force this flag off for bitmap only fonts.
+ fLoadGlyphFlags &= ~FT_LOAD_NO_BITMAP;
+ } else {
+ SkDEBUGF(("Unknown kind of font \"%s\" size %f.\n", fFace->family_name, fScale.fY));
+ return;
+ }
+
+ fFTSize = ftSize.release();
+ fFace = ftFace.release();
+ fDoLinearMetrics = linearMetrics;
+}
+
+SkScalerContext_FreeType::~SkScalerContext_FreeType() {
+ SkAutoMutexAcquire ac(gFTMutex);
+
+ if (fFTSize != nullptr) {
+ FT_Done_Size(fFTSize);
+ }
+
+ if (fFace != nullptr) {
+ unref_ft_face(fFace);
+ }
+
+ unref_ft_library();
+}
+
+/* We call this before each use of the fFace, since we may be sharing
+ this face with other context (at different sizes).
+*/
+FT_Error SkScalerContext_FreeType::setupSize() {
+ gFTMutex.assertHeld();
+ FT_Error err = FT_Activate_Size(fFTSize);
+ if (err != 0) {
+ return err;
+ }
+ FT_Set_Transform(fFace, &fMatrix22, nullptr);
+ return 0;
+}
+
+unsigned SkScalerContext_FreeType::generateGlyphCount() {
+ return fFace->num_glyphs;
+}
+
+uint16_t SkScalerContext_FreeType::generateCharToGlyph(SkUnichar uni) {
+ SkAutoMutexAcquire ac(gFTMutex);
+ return SkToU16(FT_Get_Char_Index( fFace, uni ));
+}
+
+SkUnichar SkScalerContext_FreeType::generateGlyphToChar(uint16_t glyph) {
+ SkAutoMutexAcquire ac(gFTMutex);
+ // iterate through each cmap entry, looking for matching glyph indices
+ FT_UInt glyphIndex;
+ SkUnichar charCode = FT_Get_First_Char( fFace, &glyphIndex );
+
+ while (glyphIndex != 0) {
+ if (glyphIndex == glyph) {
+ return charCode;
+ }
+ charCode = FT_Get_Next_Char( fFace, charCode, &glyphIndex );
+ }
+
+ return 0;
+}
+
+static SkScalar SkFT_FixedToScalar(FT_Fixed x) {
+ return SkFixedToScalar(x);
+}
+
+void SkScalerContext_FreeType::generateAdvance(SkGlyph* glyph) {
+ /* unhinted and light hinted text have linearly scaled advances
+ * which are very cheap to compute with some font formats...
+ */
+ if (fDoLinearMetrics) {
+ SkAutoMutexAcquire ac(gFTMutex);
+
+ if (this->setupSize()) {
+ glyph->zeroMetrics();
+ return;
+ }
+
+ FT_Error error;
+ FT_Fixed advance;
+
+ error = FT_Get_Advance( fFace, glyph->getGlyphID(),
+ fLoadGlyphFlags | FT_ADVANCE_FLAG_FAST_ONLY,
+ &advance );
+ if (0 == error) {
+ glyph->fRsbDelta = 0;
+ glyph->fLsbDelta = 0;
+ const SkScalar advanceScalar = SkFT_FixedToScalar(advance);
+ glyph->fAdvanceX = SkScalarToFloat(fMatrix22Scalar.getScaleX() * advanceScalar);
+ glyph->fAdvanceY = SkScalarToFloat(fMatrix22Scalar.getSkewY() * advanceScalar);
+ return;
+ }
+ }
+
+ /* otherwise, we need to load/hint the glyph, which is slower */
+ this->generateMetrics(glyph);
+ return;
+}
+
+void SkScalerContext_FreeType::getBBoxForCurrentGlyph(SkGlyph* glyph,
+ FT_BBox* bbox,
+ bool snapToPixelBoundary) {
+
+ FT_Outline_Get_CBox(&fFace->glyph->outline, bbox);
+
+ if (fRec.fFlags & SkScalerContext::kSubpixelPositioning_Flag) {
+ int dx = SkFixedToFDot6(glyph->getSubXFixed());
+ int dy = SkFixedToFDot6(glyph->getSubYFixed());
+ // negate dy since freetype-y-goes-up and skia-y-goes-down
+ bbox->xMin += dx;
+ bbox->yMin -= dy;
+ bbox->xMax += dx;
+ bbox->yMax -= dy;
+ }
+
+ // outset the box to integral boundaries
+ if (snapToPixelBoundary) {
+ bbox->xMin &= ~63;
+ bbox->yMin &= ~63;
+ bbox->xMax = (bbox->xMax + 63) & ~63;
+ bbox->yMax = (bbox->yMax + 63) & ~63;
+ }
+
+ // Must come after snapToPixelBoundary so that the width and height are
+ // consistent. Otherwise asserts will fire later on when generating the
+ // glyph image.
+ if (fRec.fFlags & SkScalerContext::kVertical_Flag) {
+ FT_Vector vector;
+ vector.x = fFace->glyph->metrics.vertBearingX - fFace->glyph->metrics.horiBearingX;
+ vector.y = -fFace->glyph->metrics.vertBearingY - fFace->glyph->metrics.horiBearingY;
+ FT_Vector_Transform(&vector, &fMatrix22);
+ bbox->xMin += vector.x;
+ bbox->xMax += vector.x;
+ bbox->yMin += vector.y;
+ bbox->yMax += vector.y;
+ }
+}
+
+bool SkScalerContext_FreeType::getCBoxForLetter(char letter, FT_BBox* bbox) {
+ const FT_UInt glyph_id = FT_Get_Char_Index(fFace, letter);
+ if (!glyph_id) {
+ return false;
+ }
+ if (FT_Load_Glyph(fFace, glyph_id, fLoadGlyphFlags) != 0) {
+ return false;
+ }
+ emboldenIfNeeded(fFace, fFace->glyph);
+ FT_Outline_Get_CBox(&fFace->glyph->outline, bbox);
+ return true;
+}
+
+void SkScalerContext_FreeType::updateGlyphIfLCD(SkGlyph* glyph) {
+ if (isLCD(fRec)) {
+ if (fLCDIsVert) {
+ glyph->fHeight += gFTLibrary->lcdExtra();
+ glyph->fTop -= gFTLibrary->lcdExtra() >> 1;
+ } else {
+ glyph->fWidth += gFTLibrary->lcdExtra();
+ glyph->fLeft -= gFTLibrary->lcdExtra() >> 1;
+ }
+ }
+}
+
+bool SkScalerContext_FreeType::shouldSubpixelBitmap(const SkGlyph& glyph, const SkMatrix& matrix) {
+ // If subpixel rendering of a bitmap *can* be done.
+ bool mechanism = fFace->glyph->format == FT_GLYPH_FORMAT_BITMAP &&
+ fRec.fFlags & SkScalerContext::kSubpixelPositioning_Flag &&
+ (glyph.getSubXFixed() || glyph.getSubYFixed());
+
+ // If subpixel rendering of a bitmap *should* be done.
+ // 1. If the face is not scalable then always allow subpixel rendering.
+ // Otherwise, if the font has an 8ppem strike 7 will subpixel render but 8 won't.
+ // 2. If the matrix is already not identity the bitmap will already be resampled,
+ // so resampling slightly differently shouldn't make much difference.
+ bool policy = !FT_IS_SCALABLE(fFace) || !matrix.isIdentity();
+
+ return mechanism && policy;
+}
+
+void SkScalerContext_FreeType::generateMetrics(SkGlyph* glyph) {
+ SkAutoMutexAcquire ac(gFTMutex);
+
+ glyph->fRsbDelta = 0;
+ glyph->fLsbDelta = 0;
+
+ FT_Error err;
+
+ if (this->setupSize()) {
+ glyph->zeroMetrics();
+ return;
+ }
+
+ err = FT_Load_Glyph( fFace, glyph->getGlyphID(), fLoadGlyphFlags );
+ if (err != 0) {
+ glyph->zeroMetrics();
+ return;
+ }
+ emboldenIfNeeded(fFace, fFace->glyph);
+
+ switch ( fFace->glyph->format ) {
+ case FT_GLYPH_FORMAT_OUTLINE:
+ if (0 == fFace->glyph->outline.n_contours) {
+ glyph->fWidth = 0;
+ glyph->fHeight = 0;
+ glyph->fTop = 0;
+ glyph->fLeft = 0;
+ } else {
+ FT_BBox bbox;
+ getBBoxForCurrentGlyph(glyph, &bbox, true);
+
+ glyph->fWidth = SkToU16(SkFDot6Floor(bbox.xMax - bbox.xMin));
+ glyph->fHeight = SkToU16(SkFDot6Floor(bbox.yMax - bbox.yMin));
+ glyph->fTop = -SkToS16(SkFDot6Floor(bbox.yMax));
+ glyph->fLeft = SkToS16(SkFDot6Floor(bbox.xMin));
+
+ updateGlyphIfLCD(glyph);
+ }
+ break;
+
+ case FT_GLYPH_FORMAT_BITMAP:
+ if (fRec.fFlags & SkScalerContext::kVertical_Flag) {
+ FT_Vector vector;
+ vector.x = fFace->glyph->metrics.vertBearingX - fFace->glyph->metrics.horiBearingX;
+ vector.y = -fFace->glyph->metrics.vertBearingY - fFace->glyph->metrics.horiBearingY;
+ FT_Vector_Transform(&vector, &fMatrix22);
+ fFace->glyph->bitmap_left += SkFDot6Floor(vector.x);
+ fFace->glyph->bitmap_top += SkFDot6Floor(vector.y);
+ }
+
+ if (fFace->glyph->bitmap.pixel_mode == FT_PIXEL_MODE_BGRA) {
+ glyph->fMaskFormat = SkMask::kARGB32_Format;
+ }
+
+ {
+ SkRect rect = SkRect::MakeXYWH(SkIntToScalar(fFace->glyph->bitmap_left),
+ -SkIntToScalar(fFace->glyph->bitmap_top),
+ SkIntToScalar(fFace->glyph->bitmap.width),
+ SkIntToScalar(fFace->glyph->bitmap.rows));
+ fMatrix22Scalar.mapRect(&rect);
+ if (this->shouldSubpixelBitmap(*glyph, fMatrix22Scalar)) {
+ rect.offset(SkFixedToScalar(glyph->getSubXFixed()),
+ SkFixedToScalar(glyph->getSubYFixed()));
+ }
+ SkIRect irect = rect.roundOut();
+ glyph->fWidth = SkToU16(irect.width());
+ glyph->fHeight = SkToU16(irect.height());
+ glyph->fTop = SkToS16(irect.top());
+ glyph->fLeft = SkToS16(irect.left());
+ }
+ break;
+
+ default:
+ SkDEBUGFAIL("unknown glyph format");
+ glyph->zeroMetrics();
+ return;
+ }
+
+ if (fRec.fFlags & SkScalerContext::kVertical_Flag) {
+ if (fDoLinearMetrics) {
+ const SkScalar advanceScalar = SkFT_FixedToScalar(fFace->glyph->linearVertAdvance);
+ glyph->fAdvanceX = SkScalarToFloat(fMatrix22Scalar.getSkewX() * advanceScalar);
+ glyph->fAdvanceY = SkScalarToFloat(fMatrix22Scalar.getScaleY() * advanceScalar);
+ } else {
+ glyph->fAdvanceX = -SkFDot6ToFloat(fFace->glyph->advance.x);
+ glyph->fAdvanceY = SkFDot6ToFloat(fFace->glyph->advance.y);
+ }
+ } else {
+ if (fDoLinearMetrics) {
+ const SkScalar advanceScalar = SkFT_FixedToScalar(fFace->glyph->linearHoriAdvance);
+ glyph->fAdvanceX = SkScalarToFloat(fMatrix22Scalar.getScaleX() * advanceScalar);
+ glyph->fAdvanceY = SkScalarToFloat(fMatrix22Scalar.getSkewY() * advanceScalar);
+ } else {
+ glyph->fAdvanceX = SkFDot6ToFloat(fFace->glyph->advance.x);
+ glyph->fAdvanceY = -SkFDot6ToFloat(fFace->glyph->advance.y);
+
+ if (fRec.fFlags & kDevKernText_Flag) {
+ glyph->fRsbDelta = SkToS8(fFace->glyph->rsb_delta);
+ glyph->fLsbDelta = SkToS8(fFace->glyph->lsb_delta);
+ }
+ }
+ }
+
+#ifdef ENABLE_GLYPH_SPEW
+ SkDEBUGF(("Metrics(glyph:%d flags:0x%x) w:%d\n", glyph->getGlyphID(), fLoadGlyphFlags, glyph->fWidth));
+#endif
+}
+
+static void clear_glyph_image(const SkGlyph& glyph) {
+ sk_bzero(glyph.fImage, glyph.rowBytes() * glyph.fHeight);
+}
+
+void SkScalerContext_FreeType::generateImage(const SkGlyph& glyph) {
+ SkAutoMutexAcquire ac(gFTMutex);
+
+ if (this->setupSize()) {
+ clear_glyph_image(glyph);
+ return;
+ }
+
+ FT_Error err = FT_Load_Glyph(fFace, glyph.getGlyphID(), fLoadGlyphFlags);
+ if (err != 0) {
+ SkDEBUGF(("SkScalerContext_FreeType::generateImage: FT_Load_Glyph(glyph:%d width:%d height:%d rb:%d flags:%d) returned 0x%x\n",
+ glyph.getGlyphID(), glyph.fWidth, glyph.fHeight, glyph.rowBytes(), fLoadGlyphFlags, err));
+ clear_glyph_image(glyph);
+ return;
+ }
+
+ emboldenIfNeeded(fFace, fFace->glyph);
+ SkMatrix* bitmapMatrix = &fMatrix22Scalar;
+ SkMatrix subpixelBitmapMatrix;
+ if (this->shouldSubpixelBitmap(glyph, *bitmapMatrix)) {
+ subpixelBitmapMatrix = fMatrix22Scalar;
+ subpixelBitmapMatrix.postTranslate(SkFixedToScalar(glyph.getSubXFixed()),
+ SkFixedToScalar(glyph.getSubYFixed()));
+ bitmapMatrix = &subpixelBitmapMatrix;
+ }
+ generateGlyphImage(fFace, glyph, *bitmapMatrix);
+}
+
+
+void SkScalerContext_FreeType::generatePath(const SkGlyph& glyph, SkPath* path) {
+ SkAutoMutexAcquire ac(gFTMutex);
+
+ SkASSERT(path);
+
+ if (this->setupSize()) {
+ path->reset();
+ return;
+ }
+
+ uint32_t flags = fLoadGlyphFlags;
+ flags |= FT_LOAD_NO_BITMAP; // ignore embedded bitmaps so we're sure to get the outline
+ flags &= ~FT_LOAD_RENDER; // don't scan convert (we just want the outline)
+
+ FT_Error err = FT_Load_Glyph( fFace, glyph.getGlyphID(), flags);
+
+ if (err != 0) {
+ SkDEBUGF(("SkScalerContext_FreeType::generatePath: FT_Load_Glyph(glyph:%d flags:%d) returned 0x%x\n",
+ glyph.getGlyphID(), flags, err));
+ path->reset();
+ return;
+ }
+ emboldenIfNeeded(fFace, fFace->glyph);
+
+ generateGlyphPath(fFace, path);
+
+ // The path's origin from FreeType is always the horizontal layout origin.
+ // Offset the path so that it is relative to the vertical origin if needed.
+ if (fRec.fFlags & SkScalerContext::kVertical_Flag) {
+ FT_Vector vector;
+ vector.x = fFace->glyph->metrics.vertBearingX - fFace->glyph->metrics.horiBearingX;
+ vector.y = -fFace->glyph->metrics.vertBearingY - fFace->glyph->metrics.horiBearingY;
+ FT_Vector_Transform(&vector, &fMatrix22);
+ path->offset(SkFDot6ToScalar(vector.x), -SkFDot6ToScalar(vector.y));
+ }
+}
+
+void SkScalerContext_FreeType::generateFontMetrics(SkPaint::FontMetrics* metrics) {
+ if (nullptr == metrics) {
+ return;
+ }
+
+ SkAutoMutexAcquire ac(gFTMutex);
+
+ if (this->setupSize()) {
+ sk_bzero(metrics, sizeof(*metrics));
+ return;
+ }
+
+ FT_Face face = fFace;
+
+ // fetch units/EM from "head" table if needed (ie for bitmap fonts)
+ SkScalar upem = SkIntToScalar(face->units_per_EM);
+ if (!upem) {
+ TT_Header* ttHeader = (TT_Header*)FT_Get_Sfnt_Table(face, ft_sfnt_head);
+ if (ttHeader) {
+ upem = SkIntToScalar(ttHeader->Units_Per_EM);
+ }
+ }
+
+ // use the os/2 table as a source of reasonable defaults.
+ SkScalar x_height = 0.0f;
+ SkScalar avgCharWidth = 0.0f;
+ SkScalar cap_height = 0.0f;
+ TT_OS2* os2 = (TT_OS2*) FT_Get_Sfnt_Table(face, ft_sfnt_os2);
+ if (os2) {
+ x_height = SkIntToScalar(os2->sxHeight) / upem * fScale.y();
+ avgCharWidth = SkIntToScalar(os2->xAvgCharWidth) / upem;
+ if (os2->version != 0xFFFF && os2->version >= 2) {
+ cap_height = SkIntToScalar(os2->sCapHeight) / upem * fScale.y();
+ }
+ }
+
+ // pull from format-specific metrics as needed
+ SkScalar ascent, descent, leading, xmin, xmax, ymin, ymax;
+ SkScalar underlineThickness, underlinePosition;
+ if (face->face_flags & FT_FACE_FLAG_SCALABLE) { // scalable outline font
+ // FreeType will always use HHEA metrics if they're not zero.
+ // It completely ignores the OS/2 fsSelection::UseTypoMetrics bit.
+ // It also ignores the VDMX tables, which are also of interest here
+ // (and override everything else when they apply).
+ static const int kUseTypoMetricsMask = (1 << 7);
+ if (os2 && os2->version != 0xFFFF && (os2->fsSelection & kUseTypoMetricsMask)) {
+ ascent = -SkIntToScalar(os2->sTypoAscender) / upem;
+ descent = -SkIntToScalar(os2->sTypoDescender) / upem;
+ leading = SkIntToScalar(os2->sTypoLineGap) / upem;
+ } else {
+ ascent = -SkIntToScalar(face->ascender) / upem;
+ descent = -SkIntToScalar(face->descender) / upem;
+ leading = SkIntToScalar(face->height + (face->descender - face->ascender)) / upem;
+ }
+ xmin = SkIntToScalar(face->bbox.xMin) / upem;
+ xmax = SkIntToScalar(face->bbox.xMax) / upem;
+ ymin = -SkIntToScalar(face->bbox.yMin) / upem;
+ ymax = -SkIntToScalar(face->bbox.yMax) / upem;
+ underlineThickness = SkIntToScalar(face->underline_thickness) / upem;
+ underlinePosition = -SkIntToScalar(face->underline_position +
+ face->underline_thickness / 2) / upem;
+
+ metrics->fFlags |= SkPaint::FontMetrics::kUnderlineThinknessIsValid_Flag;
+ metrics->fFlags |= SkPaint::FontMetrics::kUnderlinePositionIsValid_Flag;
+
+ // we may be able to synthesize x_height and cap_height from outline
+ if (!x_height) {
+ FT_BBox bbox;
+ if (getCBoxForLetter('x', &bbox)) {
+ x_height = SkIntToScalar(bbox.yMax) / 64.0f;
+ }
+ }
+ if (!cap_height) {
+ FT_BBox bbox;
+ if (getCBoxForLetter('H', &bbox)) {
+ cap_height = SkIntToScalar(bbox.yMax) / 64.0f;
+ }
+ }
+ } else if (fStrikeIndex != -1) { // bitmap strike metrics
+ SkScalar xppem = SkIntToScalar(face->size->metrics.x_ppem);
+ SkScalar yppem = SkIntToScalar(face->size->metrics.y_ppem);
+ ascent = -SkIntToScalar(face->size->metrics.ascender) / (yppem * 64.0f);
+ descent = -SkIntToScalar(face->size->metrics.descender) / (yppem * 64.0f);
+ leading = (SkIntToScalar(face->size->metrics.height) / (yppem * 64.0f)) + ascent - descent;
+ xmin = 0.0f;
+ xmax = SkIntToScalar(face->available_sizes[fStrikeIndex].width) / xppem;
+ ymin = descent + leading;
+ ymax = ascent - descent;
+ underlineThickness = 0;
+ underlinePosition = 0;
+
+ metrics->fFlags &= ~SkPaint::FontMetrics::kUnderlineThinknessIsValid_Flag;
+ metrics->fFlags &= ~SkPaint::FontMetrics::kUnderlinePositionIsValid_Flag;
+ } else {
+ sk_bzero(metrics, sizeof(*metrics));
+ return;
+ }
+
+ // synthesize elements that were not provided by the os/2 table or format-specific metrics
+ if (!x_height) {
+ x_height = -ascent * fScale.y();
+ }
+ if (!avgCharWidth) {
+ avgCharWidth = xmax - xmin;
+ }
+ if (!cap_height) {
+ cap_height = -ascent * fScale.y();
+ }
+
+ // disallow negative linespacing
+ if (leading < 0.0f) {
+ leading = 0.0f;
+ }
+
+ metrics->fTop = ymax * fScale.y();
+ metrics->fAscent = ascent * fScale.y();
+ metrics->fDescent = descent * fScale.y();
+ metrics->fBottom = ymin * fScale.y();
+ metrics->fLeading = leading * fScale.y();
+ metrics->fAvgCharWidth = avgCharWidth * fScale.y();
+ metrics->fXMin = xmin * fScale.y();
+ metrics->fXMax = xmax * fScale.y();
+ metrics->fXHeight = x_height;
+ metrics->fCapHeight = cap_height;
+ metrics->fUnderlineThickness = underlineThickness * fScale.y();
+ metrics->fUnderlinePosition = underlinePosition * fScale.y();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// hand-tuned value to reduce outline embolden strength
+#ifndef SK_OUTLINE_EMBOLDEN_DIVISOR
+ #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ #define SK_OUTLINE_EMBOLDEN_DIVISOR 34
+ #else
+ #define SK_OUTLINE_EMBOLDEN_DIVISOR 24
+ #endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkScalerContext_FreeType::emboldenIfNeeded(FT_Face face, FT_GlyphSlot glyph)
+{
+ // check to see if the embolden bit is set
+ if (0 == (fRec.fFlags & SkScalerContext::kEmbolden_Flag)) {
+ return;
+ }
+
+ switch (glyph->format) {
+ case FT_GLYPH_FORMAT_OUTLINE:
+ FT_Pos strength;
+ strength = FT_MulFix(face->units_per_EM, face->size->metrics.y_scale)
+ / SK_OUTLINE_EMBOLDEN_DIVISOR;
+ FT_Outline_Embolden(&glyph->outline, strength);
+ break;
+ case FT_GLYPH_FORMAT_BITMAP:
+ FT_GlyphSlot_Own_Bitmap(glyph);
+ FT_Bitmap_Embolden(glyph->library, &glyph->bitmap, kBitmapEmboldenStrength, 0);
+ break;
+ default:
+ SkDEBUGFAIL("unknown glyph format");
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkUtils.h"
+
+static SkUnichar next_utf8(const void** chars) {
+ return SkUTF8_NextUnichar((const char**)chars);
+}
+
+static SkUnichar next_utf16(const void** chars) {
+ return SkUTF16_NextUnichar((const uint16_t**)chars);
+}
+
+static SkUnichar next_utf32(const void** chars) {
+ const SkUnichar** uniChars = (const SkUnichar**)chars;
+ SkUnichar uni = **uniChars;
+ *uniChars += 1;
+ return uni;
+}
+
+typedef SkUnichar (*EncodingProc)(const void**);
+
+static EncodingProc find_encoding_proc(SkTypeface::Encoding enc) {
+ static const EncodingProc gProcs[] = {
+ next_utf8, next_utf16, next_utf32
+ };
+ SkASSERT((size_t)enc < SK_ARRAY_COUNT(gProcs));
+ return gProcs[enc];
+}
+
+int SkTypeface_FreeType::onCharsToGlyphs(const void* chars, Encoding encoding,
+ uint16_t glyphs[], int glyphCount) const
+{
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (!face) {
+ if (glyphs) {
+ sk_bzero(glyphs, glyphCount * sizeof(glyphs[0]));
+ }
+ return 0;
+ }
+
+ EncodingProc next_uni_proc = find_encoding_proc(encoding);
+
+ if (nullptr == glyphs) {
+ for (int i = 0; i < glyphCount; ++i) {
+ if (0 == FT_Get_Char_Index(face, next_uni_proc(&chars))) {
+ return i;
+ }
+ }
+ return glyphCount;
+ } else {
+ int first = glyphCount;
+ for (int i = 0; i < glyphCount; ++i) {
+ unsigned id = FT_Get_Char_Index(face, next_uni_proc(&chars));
+ glyphs[i] = SkToU16(id);
+ if (0 == id && i < first) {
+ first = i;
+ }
+ }
+ return first;
+ }
+}
+
+int SkTypeface_FreeType::onCountGlyphs() const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ return face ? face->num_glyphs : 0;
+}
+
+SkTypeface::LocalizedStrings* SkTypeface_FreeType::onCreateFamilyNameIterator() const {
+ SkTypeface::LocalizedStrings* nameIter =
+ SkOTUtils::LocalizedStrings_NameTable::CreateForFamilyNames(*this);
+ if (nullptr == nameIter) {
+ SkString familyName;
+ this->getFamilyName(&familyName);
+ SkString language("und"); //undetermined
+ nameIter = new SkOTUtils::LocalizedStrings_SingleName(familyName, language);
+ }
+ return nameIter;
+}
+
+int SkTypeface_FreeType::onGetTableTags(SkFontTableTag tags[]) const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+
+ FT_ULong tableCount = 0;
+ FT_Error error;
+
+ // When 'tag' is nullptr, returns number of tables in 'length'.
+ error = FT_Sfnt_Table_Info(face, 0, nullptr, &tableCount);
+ if (error) {
+ return 0;
+ }
+
+ if (tags) {
+ for (FT_ULong tableIndex = 0; tableIndex < tableCount; ++tableIndex) {
+ FT_ULong tableTag;
+ FT_ULong tablelength;
+ error = FT_Sfnt_Table_Info(face, tableIndex, &tableTag, &tablelength);
+ if (error) {
+ return 0;
+ }
+ tags[tableIndex] = static_cast<SkFontTableTag>(tableTag);
+ }
+ }
+ return tableCount;
+}
+
+size_t SkTypeface_FreeType::onGetTableData(SkFontTableTag tag, size_t offset,
+ size_t length, void* data) const
+{
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+
+ FT_ULong tableLength = 0;
+ FT_Error error;
+
+ // When 'length' is 0 it is overwritten with the full table length; 'offset' is ignored.
+ error = FT_Load_Sfnt_Table(face, tag, 0, nullptr, &tableLength);
+ if (error) {
+ return 0;
+ }
+
+ if (offset > tableLength) {
+ return 0;
+ }
+ FT_ULong size = SkTMin((FT_ULong)length, tableLength - (FT_ULong)offset);
+ if (data) {
+ error = FT_Load_Sfnt_Table(face, tag, offset, reinterpret_cast<FT_Byte*>(data), &size);
+ if (error) {
+ return 0;
+ }
+ }
+
+ return size;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+SkTypeface_FreeType::Scanner::Scanner() : fLibrary(nullptr) {
+ if (FT_New_Library(&gFTMemory, &fLibrary)) {
+ return;
+ }
+ FT_Add_Default_Modules(fLibrary);
+}
+SkTypeface_FreeType::Scanner::~Scanner() {
+ if (fLibrary) {
+ FT_Done_Library(fLibrary);
+ }
+}
+
+FT_Face SkTypeface_FreeType::Scanner::openFace(SkStreamAsset* stream, int ttcIndex,
+ FT_Stream ftStream) const
+{
+ if (fLibrary == nullptr) {
+ return nullptr;
+ }
+
+ FT_Open_Args args;
+ memset(&args, 0, sizeof(args));
+
+ const void* memoryBase = stream->getMemoryBase();
+
+ if (memoryBase) {
+ args.flags = FT_OPEN_MEMORY;
+ args.memory_base = (const FT_Byte*)memoryBase;
+ args.memory_size = stream->getLength();
+ } else {
+ memset(ftStream, 0, sizeof(*ftStream));
+ ftStream->size = stream->getLength();
+ ftStream->descriptor.pointer = stream;
+ ftStream->read = sk_ft_stream_io;
+ ftStream->close = sk_ft_stream_close;
+
+ args.flags = FT_OPEN_STREAM;
+ args.stream = ftStream;
+ }
+
+ FT_Face face;
+ if (FT_Open_Face(fLibrary, &args, ttcIndex, &face)) {
+ return nullptr;
+ }
+ return face;
+}
+
+bool SkTypeface_FreeType::Scanner::recognizedFont(SkStreamAsset* stream, int* numFaces) const {
+ SkAutoMutexAcquire libraryLock(fLibraryMutex);
+
+ FT_StreamRec streamRec;
+ FT_Face face = this->openFace(stream, -1, &streamRec);
+ if (nullptr == face) {
+ return false;
+ }
+
+ *numFaces = face->num_faces;
+
+ FT_Done_Face(face);
+ return true;
+}
+
+#include "SkTSearch.h"
+bool SkTypeface_FreeType::Scanner::scanFont(
+ SkStreamAsset* stream, int ttcIndex,
+ SkString* name, SkFontStyle* style, bool* isFixedPitch, AxisDefinitions* axes) const
+{
+ SkAutoMutexAcquire libraryLock(fLibraryMutex);
+
+ FT_StreamRec streamRec;
+ FT_Face face = this->openFace(stream, ttcIndex, &streamRec);
+ if (nullptr == face) {
+ return false;
+ }
+
+ int weight = SkFontStyle::kNormal_Weight;
+ int width = SkFontStyle::kNormal_Width;
+ SkFontStyle::Slant slant = SkFontStyle::kUpright_Slant;
+ if (face->style_flags & FT_STYLE_FLAG_BOLD) {
+ weight = SkFontStyle::kBold_Weight;
+ }
+ if (face->style_flags & FT_STYLE_FLAG_ITALIC) {
+ slant = SkFontStyle::kItalic_Slant;
+ }
+
+ PS_FontInfoRec psFontInfo;
+ TT_OS2* os2 = static_cast<TT_OS2*>(FT_Get_Sfnt_Table(face, ft_sfnt_os2));
+ if (os2 && os2->version != 0xffff) {
+ weight = os2->usWeightClass;
+ width = os2->usWidthClass;
+
+ // OS/2::fsSelection bit 9 indicates oblique.
+ if (SkToBool(os2->fsSelection & (1u << 9))) {
+ slant = SkFontStyle::kOblique_Slant;
+ }
+ } else if (0 == FT_Get_PS_Font_Info(face, &psFontInfo) && psFontInfo.weight) {
+ static const struct {
+ char const * const name;
+ int const weight;
+ } commonWeights [] = {
+ // There are probably more common names, but these are known to exist.
+ { "all", SkFontStyle::kNormal_Weight }, // Multiple Masters usually default to normal.
+ { "black", SkFontStyle::kBlack_Weight },
+ { "bold", SkFontStyle::kBold_Weight },
+ { "book", (SkFontStyle::kNormal_Weight + SkFontStyle::kLight_Weight)/2 },
+ { "demi", SkFontStyle::kSemiBold_Weight },
+ { "demibold", SkFontStyle::kSemiBold_Weight },
+ { "extra", SkFontStyle::kExtraBold_Weight },
+ { "extrabold", SkFontStyle::kExtraBold_Weight },
+ { "extralight", SkFontStyle::kExtraLight_Weight },
+ { "hairline", SkFontStyle::kThin_Weight },
+ { "heavy", SkFontStyle::kBlack_Weight },
+ { "light", SkFontStyle::kLight_Weight },
+ { "medium", SkFontStyle::kMedium_Weight },
+ { "normal", SkFontStyle::kNormal_Weight },
+ { "plain", SkFontStyle::kNormal_Weight },
+ { "regular", SkFontStyle::kNormal_Weight },
+ { "roman", SkFontStyle::kNormal_Weight },
+ { "semibold", SkFontStyle::kSemiBold_Weight },
+ { "standard", SkFontStyle::kNormal_Weight },
+ { "thin", SkFontStyle::kThin_Weight },
+ { "ultra", SkFontStyle::kExtraBold_Weight },
+ { "ultrablack", SkFontStyle::kExtraBlack_Weight },
+ { "ultrabold", SkFontStyle::kExtraBold_Weight },
+ { "ultraheavy", SkFontStyle::kExtraBlack_Weight },
+ { "ultralight", SkFontStyle::kExtraLight_Weight },
+ };
+ int const index = SkStrLCSearch(&commonWeights[0].name, SK_ARRAY_COUNT(commonWeights),
+ psFontInfo.weight, sizeof(commonWeights[0]));
+ if (index >= 0) {
+ weight = commonWeights[index].weight;
+ } else {
+ SkDEBUGF(("Do not know weight for: %s (%s) \n", face->family_name, psFontInfo.weight));
+ }
+ }
+
+ if (name) {
+ name->set(face->family_name);
+ }
+ if (style) {
+ *style = SkFontStyle(weight, width, slant);
+ }
+ if (isFixedPitch) {
+ *isFixedPitch = FT_IS_FIXED_WIDTH(face);
+ }
+
+ if (axes && face->face_flags & FT_FACE_FLAG_MULTIPLE_MASTERS) {
+ FT_MM_Var* variations = nullptr;
+ FT_Error err = FT_Get_MM_Var(face, &variations);
+ if (err) {
+ SkDEBUGF(("INFO: font %s claims to have variations, but none found.\n",
+ face->family_name));
+ return false;
+ }
+ SkAutoFree autoFreeVariations(variations);
+
+ axes->reset(variations->num_axis);
+ for (FT_UInt i = 0; i < variations->num_axis; ++i) {
+ const FT_Var_Axis& ftAxis = variations->axis[i];
+ (*axes)[i].fTag = ftAxis.tag;
+ (*axes)[i].fMinimum = ftAxis.minimum;
+ (*axes)[i].fDefault = ftAxis.def;
+ (*axes)[i].fMaximum = ftAxis.maximum;
+ }
+ }
+
+ FT_Done_Face(face);
+ return true;
+}
+
+/*static*/ void SkTypeface_FreeType::Scanner::computeAxisValues(
+ AxisDefinitions axisDefinitions,
+ const SkFontMgr::FontParameters::Axis* requestedAxes, int requestedAxisCount,
+ SkFixed* axisValues,
+ const SkString& name)
+{
+ for (int i = 0; i < axisDefinitions.count(); ++i) {
+ const Scanner::AxisDefinition& axisDefinition = axisDefinitions[i];
+ const SkScalar axisMin = SkFixedToScalar(axisDefinition.fMinimum);
+ const SkScalar axisMax = SkFixedToScalar(axisDefinition.fMaximum);
+ axisValues[i] = axisDefinition.fDefault;
+ for (int j = 0; j < requestedAxisCount; ++j) {
+ const SkFontMgr::FontParameters::Axis& axisSpecified = requestedAxes[j];
+ if (axisDefinition.fTag == axisSpecified.fTag) {
+ const SkScalar axisValue = SkTPin(axisSpecified.fStyleValue, axisMin, axisMax);
+ if (axisSpecified.fStyleValue != axisValue) {
+ SkDEBUGF(("Requested font axis value out of range: "
+ "%s '%c%c%c%c' %f; pinned to %f.\n",
+ name.c_str(),
+ (axisDefinition.fTag >> 24) & 0xFF,
+ (axisDefinition.fTag >> 16) & 0xFF,
+ (axisDefinition.fTag >> 8) & 0xFF,
+ (axisDefinition.fTag ) & 0xFF,
+ SkScalarToDouble(axisSpecified.fStyleValue),
+ SkScalarToDouble(axisValue)));
+ }
+ axisValues[i] = SkScalarToFixed(axisValue);
+ break;
+ }
+ }
+ // TODO: warn on defaulted axis?
+ }
+
+ SkDEBUGCODE(
+ // Check for axis specified, but not matched in font.
+ for (int i = 0; i < requestedAxisCount; ++i) {
+ SkFourByteTag skTag = requestedAxes[i].fTag;
+ bool found = false;
+ for (int j = 0; j < axisDefinitions.count(); ++j) {
+ if (skTag == axisDefinitions[j].fTag) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ SkDEBUGF(("Requested font axis not found: %s '%c%c%c%c'\n",
+ name.c_str(),
+ (skTag >> 24) & 0xFF,
+ (skTag >> 16) & 0xFF,
+ (skTag >> 8) & 0xFF,
+ (skTag) & 0xFF));
+ }
+ }
+ )
+}
diff --git a/gfx/skia/skia/src/ports/SkFontHost_FreeType_common.cpp b/gfx/skia/skia/src/ports/SkFontHost_FreeType_common.cpp
new file mode 100644
index 000000000..71606f26e
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontHost_FreeType_common.cpp
@@ -0,0 +1,635 @@
+/*
+ * Copyright 2006-2012 The Android Open Source Project
+ * Copyright 2012 Mozilla Foundation
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmap.h"
+#include "SkCanvas.h"
+#include "SkColor.h"
+#include "SkColorPriv.h"
+#include "SkFDot6.h"
+#include "SkFontHost_FreeType_common.h"
+#include "SkPath.h"
+
+#include <ft2build.h>
+#include FT_FREETYPE_H
+#include FT_BITMAP_H
+#include FT_IMAGE_H
+#include FT_OUTLINE_H
+// In the past, FT_GlyphSlot_Own_Bitmap was defined in this header file.
+#include FT_SYNTHESIS_H
+
+// FT_LOAD_COLOR and the corresponding FT_Pixel_Mode::FT_PIXEL_MODE_BGRA
+// were introduced in FreeType 2.5.0.
+// The following may be removed once FreeType 2.5.0 is required to build.
+#ifndef FT_LOAD_COLOR
+# define FT_LOAD_COLOR ( 1L << 20 )
+# define FT_PIXEL_MODE_BGRA 7
+#endif
+
+//#define SK_SHOW_TEXT_BLIT_COVERAGE
+
+static FT_Pixel_Mode compute_pixel_mode(SkMask::Format format) {
+ switch (format) {
+ case SkMask::kBW_Format:
+ return FT_PIXEL_MODE_MONO;
+ case SkMask::kA8_Format:
+ default:
+ return FT_PIXEL_MODE_GRAY;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static uint16_t packTriple(U8CPU r, U8CPU g, U8CPU b) {
+#ifdef SK_SHOW_TEXT_BLIT_COVERAGE
+ r = SkTMax(r, (U8CPU)0x40);
+ g = SkTMax(g, (U8CPU)0x40);
+ b = SkTMax(b, (U8CPU)0x40);
+#endif
+ return SkPack888ToRGB16(r, g, b);
+}
+
+static uint16_t grayToRGB16(U8CPU gray) {
+#ifdef SK_SHOW_TEXT_BLIT_COVERAGE
+ gray = SkTMax(gray, (U8CPU)0x40);
+#endif
+ return SkPack888ToRGB16(gray, gray, gray);
+}
+
+static int bittst(const uint8_t data[], int bitOffset) {
+ SkASSERT(bitOffset >= 0);
+ int lowBit = data[bitOffset >> 3] >> (~bitOffset & 7);
+ return lowBit & 1;
+}
+
+/**
+ * Copies a FT_Bitmap into an SkMask with the same dimensions.
+ *
+ * FT_PIXEL_MODE_MONO
+ * FT_PIXEL_MODE_GRAY
+ * FT_PIXEL_MODE_LCD
+ * FT_PIXEL_MODE_LCD_V
+ */
+template<bool APPLY_PREBLEND>
+static void copyFT2LCD16(const FT_Bitmap& bitmap, const SkMask& mask, int lcdIsBGR,
+ const uint8_t* tableR, const uint8_t* tableG, const uint8_t* tableB)
+{
+ SkASSERT(SkMask::kLCD16_Format == mask.fFormat);
+ if (FT_PIXEL_MODE_LCD != bitmap.pixel_mode) {
+ SkASSERT(mask.fBounds.width() == static_cast<int>(bitmap.width));
+ }
+ if (FT_PIXEL_MODE_LCD_V != bitmap.pixel_mode) {
+ SkASSERT(mask.fBounds.height() == static_cast<int>(bitmap.rows));
+ }
+
+ const uint8_t* src = bitmap.buffer;
+ uint16_t* dst = reinterpret_cast<uint16_t*>(mask.fImage);
+ const size_t dstRB = mask.fRowBytes;
+
+ const int width = mask.fBounds.width();
+ const int height = mask.fBounds.height();
+
+ switch (bitmap.pixel_mode) {
+ case FT_PIXEL_MODE_MONO:
+ for (int y = height; y --> 0;) {
+ for (int x = 0; x < width; ++x) {
+ dst[x] = -bittst(src, x);
+ }
+ dst = (uint16_t*)((char*)dst + dstRB);
+ src += bitmap.pitch;
+ }
+ break;
+ case FT_PIXEL_MODE_GRAY:
+ for (int y = height; y --> 0;) {
+ for (int x = 0; x < width; ++x) {
+ dst[x] = grayToRGB16(src[x]);
+ }
+ dst = (uint16_t*)((char*)dst + dstRB);
+ src += bitmap.pitch;
+ }
+ break;
+ case FT_PIXEL_MODE_LCD:
+ SkASSERT(3 * mask.fBounds.width() == static_cast<int>(bitmap.width));
+ for (int y = height; y --> 0;) {
+ const uint8_t* triple = src;
+ if (lcdIsBGR) {
+ for (int x = 0; x < width; x++) {
+ dst[x] = packTriple(sk_apply_lut_if<APPLY_PREBLEND>(triple[2], tableR),
+ sk_apply_lut_if<APPLY_PREBLEND>(triple[1], tableG),
+ sk_apply_lut_if<APPLY_PREBLEND>(triple[0], tableB));
+ triple += 3;
+ }
+ } else {
+ for (int x = 0; x < width; x++) {
+ dst[x] = packTriple(sk_apply_lut_if<APPLY_PREBLEND>(triple[0], tableR),
+ sk_apply_lut_if<APPLY_PREBLEND>(triple[1], tableG),
+ sk_apply_lut_if<APPLY_PREBLEND>(triple[2], tableB));
+ triple += 3;
+ }
+ }
+ src += bitmap.pitch;
+ dst = (uint16_t*)((char*)dst + dstRB);
+ }
+ break;
+ case FT_PIXEL_MODE_LCD_V:
+ SkASSERT(3 * mask.fBounds.height() == static_cast<int>(bitmap.rows));
+ for (int y = height; y --> 0;) {
+ const uint8_t* srcR = src;
+ const uint8_t* srcG = srcR + bitmap.pitch;
+ const uint8_t* srcB = srcG + bitmap.pitch;
+ if (lcdIsBGR) {
+ SkTSwap(srcR, srcB);
+ }
+ for (int x = 0; x < width; x++) {
+ dst[x] = packTriple(sk_apply_lut_if<APPLY_PREBLEND>(*srcR++, tableR),
+ sk_apply_lut_if<APPLY_PREBLEND>(*srcG++, tableG),
+ sk_apply_lut_if<APPLY_PREBLEND>(*srcB++, tableB));
+ }
+ src += 3 * bitmap.pitch;
+ dst = (uint16_t*)((char*)dst + dstRB);
+ }
+ break;
+ default:
+ SkDEBUGF(("FT_Pixel_Mode %d", bitmap.pixel_mode));
+ SkDEBUGFAIL("unsupported FT_Pixel_Mode for LCD16");
+ break;
+ }
+}
+
+/**
+ * Copies a FT_Bitmap into an SkMask with the same dimensions.
+ *
+ * Yes, No, Never Requested, Never Produced
+ *
+ * kBW kA8 k3D kARGB32 kLCD16
+ * FT_PIXEL_MODE_MONO Y Y NR N Y
+ * FT_PIXEL_MODE_GRAY N Y NR N Y
+ * FT_PIXEL_MODE_GRAY2 NP NP NR NP NP
+ * FT_PIXEL_MODE_GRAY4 NP NP NR NP NP
+ * FT_PIXEL_MODE_LCD NP NP NR NP NP
+ * FT_PIXEL_MODE_LCD_V NP NP NR NP NP
+ * FT_PIXEL_MODE_BGRA N N NR Y N
+ *
+ * TODO: All of these N need to be Y or otherwise ruled out.
+ */
+static void copyFTBitmap(const FT_Bitmap& srcFTBitmap, SkMask& dstMask) {
+ SkASSERTF(dstMask.fBounds.width() == static_cast<int>(srcFTBitmap.width),
+ "dstMask.fBounds.width() = %d\n"
+ "static_cast<int>(srcFTBitmap.width) = %d",
+ dstMask.fBounds.width(),
+ static_cast<int>(srcFTBitmap.width)
+ );
+ SkASSERTF(dstMask.fBounds.height() == static_cast<int>(srcFTBitmap.rows),
+ "dstMask.fBounds.height() = %d\n"
+ "static_cast<int>(srcFTBitmap.rows) = %d",
+ dstMask.fBounds.height(),
+ static_cast<int>(srcFTBitmap.rows)
+ );
+
+ const uint8_t* src = reinterpret_cast<const uint8_t*>(srcFTBitmap.buffer);
+ const FT_Pixel_Mode srcFormat = static_cast<FT_Pixel_Mode>(srcFTBitmap.pixel_mode);
+ // FT_Bitmap::pitch is an int and allowed to be negative.
+ const int srcPitch = srcFTBitmap.pitch;
+ const size_t srcRowBytes = SkTAbs(srcPitch);
+
+ uint8_t* dst = dstMask.fImage;
+ const SkMask::Format dstFormat = static_cast<SkMask::Format>(dstMask.fFormat);
+ const size_t dstRowBytes = dstMask.fRowBytes;
+
+ const size_t width = srcFTBitmap.width;
+ const size_t height = srcFTBitmap.rows;
+
+ if (SkMask::kLCD16_Format == dstFormat) {
+ copyFT2LCD16<false>(srcFTBitmap, dstMask, false, nullptr, nullptr, nullptr);
+ return;
+ }
+
+ if ((FT_PIXEL_MODE_MONO == srcFormat && SkMask::kBW_Format == dstFormat) ||
+ (FT_PIXEL_MODE_GRAY == srcFormat && SkMask::kA8_Format == dstFormat))
+ {
+ size_t commonRowBytes = SkTMin(srcRowBytes, dstRowBytes);
+ for (size_t y = height; y --> 0;) {
+ memcpy(dst, src, commonRowBytes);
+ src += srcPitch;
+ dst += dstRowBytes;
+ }
+ } else if (FT_PIXEL_MODE_MONO == srcFormat && SkMask::kA8_Format == dstFormat) {
+ for (size_t y = height; y --> 0;) {
+ uint8_t byte = 0;
+ int bits = 0;
+ const uint8_t* src_row = src;
+ uint8_t* dst_row = dst;
+ for (size_t x = width; x --> 0;) {
+ if (0 == bits) {
+ byte = *src_row++;
+ bits = 8;
+ }
+ *dst_row++ = byte & 0x80 ? 0xff : 0x00;
+ bits--;
+ byte <<= 1;
+ }
+ src += srcPitch;
+ dst += dstRowBytes;
+ }
+ } else if (FT_PIXEL_MODE_BGRA == srcFormat && SkMask::kARGB32_Format == dstFormat) {
+ // FT_PIXEL_MODE_BGRA is pre-multiplied.
+ for (size_t y = height; y --> 0;) {
+ const uint8_t* src_row = src;
+ SkPMColor* dst_row = reinterpret_cast<SkPMColor*>(dst);
+ for (size_t x = 0; x < width; ++x) {
+ uint8_t b = *src_row++;
+ uint8_t g = *src_row++;
+ uint8_t r = *src_row++;
+ uint8_t a = *src_row++;
+ *dst_row++ = SkPackARGB32(a, r, g, b);
+#ifdef SK_SHOW_TEXT_BLIT_COVERAGE
+ *(dst_row-1) = SkFourByteInterp256(*(dst_row-1), SK_ColorWHITE, 0x40);
+#endif
+ }
+ src += srcPitch;
+ dst += dstRowBytes;
+ }
+ } else {
+ SkDEBUGF(("FT_Pixel_Mode %d, SkMask::Format %d\n", srcFormat, dstFormat));
+ SkDEBUGFAIL("unsupported combination of FT_Pixel_Mode and SkMask::Format");
+ }
+}
+
+static inline int convert_8_to_1(unsigned byte) {
+ SkASSERT(byte <= 0xFF);
+ // Arbitrary decision that making the cutoff at 1/4 instead of 1/2 in general looks better.
+ return (byte >> 6) != 0;
+}
+
+static uint8_t pack_8_to_1(const uint8_t alpha[8]) {
+ unsigned bits = 0;
+ for (int i = 0; i < 8; ++i) {
+ bits <<= 1;
+ bits |= convert_8_to_1(alpha[i]);
+ }
+ return SkToU8(bits);
+}
+
+static void packA8ToA1(const SkMask& mask, const uint8_t* src, size_t srcRB) {
+ const int height = mask.fBounds.height();
+ const int width = mask.fBounds.width();
+ const int octs = width >> 3;
+ const int leftOverBits = width & 7;
+
+ uint8_t* dst = mask.fImage;
+ const int dstPad = mask.fRowBytes - SkAlign8(width)/8;
+ SkASSERT(dstPad >= 0);
+
+ const int srcPad = srcRB - width;
+ SkASSERT(srcPad >= 0);
+
+ for (int y = 0; y < height; ++y) {
+ for (int i = 0; i < octs; ++i) {
+ *dst++ = pack_8_to_1(src);
+ src += 8;
+ }
+ if (leftOverBits > 0) {
+ unsigned bits = 0;
+ int shift = 7;
+ for (int i = 0; i < leftOverBits; ++i, --shift) {
+ bits |= convert_8_to_1(*src++) << shift;
+ }
+ *dst++ = bits;
+ }
+ src += srcPad;
+ dst += dstPad;
+ }
+}
+
+inline SkMask::Format SkMaskFormat_for_SkColorType(SkColorType colorType) {
+ switch (colorType) {
+ case kAlpha_8_SkColorType:
+ return SkMask::kA8_Format;
+ case kN32_SkColorType:
+ return SkMask::kARGB32_Format;
+ default:
+ SkDEBUGFAIL("unsupported SkBitmap::Config");
+ return SkMask::kA8_Format;
+ }
+}
+
+inline SkColorType SkColorType_for_FTPixelMode(FT_Pixel_Mode pixel_mode) {
+ switch (pixel_mode) {
+ case FT_PIXEL_MODE_MONO:
+ case FT_PIXEL_MODE_GRAY:
+ return kAlpha_8_SkColorType;
+ case FT_PIXEL_MODE_BGRA:
+ return kN32_SkColorType;
+ default:
+ SkDEBUGFAIL("unsupported FT_PIXEL_MODE");
+ return kAlpha_8_SkColorType;
+ }
+}
+
+inline SkColorType SkColorType_for_SkMaskFormat(SkMask::Format format) {
+ switch (format) {
+ case SkMask::kBW_Format:
+ case SkMask::kA8_Format:
+ case SkMask::kLCD16_Format:
+ return kAlpha_8_SkColorType;
+ case SkMask::kARGB32_Format:
+ return kN32_SkColorType;
+ default:
+ SkDEBUGFAIL("unsupported destination SkBitmap::Config");
+ return kAlpha_8_SkColorType;
+ }
+}
+
+void SkScalerContext_FreeType_Base::generateGlyphImage(
+ FT_Face face,
+ const SkGlyph& glyph,
+ const SkMatrix& bitmapTransform)
+{
+ const bool doBGR = SkToBool(fRec.fFlags & SkScalerContext::kLCD_BGROrder_Flag);
+ const bool doVert = SkToBool(fRec.fFlags & SkScalerContext::kLCD_Vertical_Flag);
+
+ switch ( face->glyph->format ) {
+ case FT_GLYPH_FORMAT_OUTLINE: {
+ FT_Outline* outline = &face->glyph->outline;
+
+ int dx = 0, dy = 0;
+ if (fRec.fFlags & SkScalerContext::kSubpixelPositioning_Flag) {
+ dx = SkFixedToFDot6(glyph.getSubXFixed());
+ dy = SkFixedToFDot6(glyph.getSubYFixed());
+ // negate dy since freetype-y-goes-up and skia-y-goes-down
+ dy = -dy;
+ }
+ memset(glyph.fImage, 0, glyph.rowBytes() * glyph.fHeight);
+
+ if (SkMask::kLCD16_Format == glyph.fMaskFormat) {
+ FT_Outline_Translate(outline, dx, dy);
+ FT_Error err = FT_Render_Glyph(face->glyph, doVert ? FT_RENDER_MODE_LCD_V : FT_RENDER_MODE_LCD);
+ if (err) {
+ return;
+ }
+
+ SkMask mask;
+ glyph.toMask(&mask);
+
+ FT_GlyphSlotRec& ftGlyph = *face->glyph;
+
+ if (!SkIRect::Intersects(mask.fBounds,
+ SkIRect::MakeXYWH( ftGlyph.bitmap_left,
+ -ftGlyph.bitmap_top,
+ ftGlyph.bitmap.width,
+ ftGlyph.bitmap.rows)))
+ {
+ return;
+ }
+
+ // If the FT_Bitmap extent is larger, discard bits of the bitmap outside the mask.
+ // If the SkMask extent is larger, shrink mask to fit bitmap (clearing discarded).
+ unsigned char* origBuffer = ftGlyph.bitmap.buffer;
+ // First align the top left (origin).
+ if (-ftGlyph.bitmap_top < mask.fBounds.fTop) {
+ int32_t topDiff = mask.fBounds.fTop - (-ftGlyph.bitmap_top);
+ ftGlyph.bitmap.buffer += ftGlyph.bitmap.pitch * topDiff;
+ ftGlyph.bitmap.rows -= topDiff;
+ ftGlyph.bitmap_top = -mask.fBounds.fTop;
+ }
+ if (ftGlyph.bitmap_left < mask.fBounds.fLeft) {
+ int32_t leftDiff = mask.fBounds.fLeft - ftGlyph.bitmap_left;
+ ftGlyph.bitmap.buffer += leftDiff;
+ ftGlyph.bitmap.width -= leftDiff;
+ ftGlyph.bitmap_left = mask.fBounds.fLeft;
+ }
+ if (mask.fBounds.fTop < -ftGlyph.bitmap_top) {
+ mask.fImage += mask.fRowBytes * (-ftGlyph.bitmap_top - mask.fBounds.fTop);
+ mask.fBounds.fTop = -ftGlyph.bitmap_top;
+ }
+ if (mask.fBounds.fLeft < ftGlyph.bitmap_left) {
+ mask.fImage += sizeof(uint16_t) * (ftGlyph.bitmap_left - mask.fBounds.fLeft);
+ mask.fBounds.fLeft = ftGlyph.bitmap_left;
+ }
+ // Origins aligned, clean up the width and height.
+ int ftVertScale = (doVert ? 3 : 1);
+ int ftHoriScale = (doVert ? 1 : 3);
+ if (mask.fBounds.height() * ftVertScale < SkToInt(ftGlyph.bitmap.rows)) {
+ ftGlyph.bitmap.rows = mask.fBounds.height() * ftVertScale;
+ }
+ if (mask.fBounds.width() * ftHoriScale < SkToInt(ftGlyph.bitmap.width)) {
+ ftGlyph.bitmap.width = mask.fBounds.width() * ftHoriScale;
+ }
+ if (SkToInt(ftGlyph.bitmap.rows) < mask.fBounds.height() * ftVertScale) {
+ mask.fBounds.fBottom = mask.fBounds.fTop + ftGlyph.bitmap.rows / ftVertScale;
+ }
+ if (SkToInt(ftGlyph.bitmap.width) < mask.fBounds.width() * ftHoriScale) {
+ mask.fBounds.fRight = mask.fBounds.fLeft + ftGlyph.bitmap.width / ftHoriScale;
+ }
+
+ if (fPreBlend.isApplicable()) {
+ copyFT2LCD16<true>(ftGlyph.bitmap, mask, doBGR,
+ fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ } else {
+ copyFT2LCD16<false>(ftGlyph.bitmap, mask, doBGR,
+ fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ }
+ // Restore the buffer pointer so FreeType can properly free it.
+ ftGlyph.bitmap.buffer = origBuffer;
+ } else {
+ FT_BBox bbox;
+ FT_Bitmap target;
+ FT_Outline_Get_CBox(outline, &bbox);
+ /*
+ what we really want to do for subpixel is
+ offset(dx, dy)
+ compute_bounds
+ offset(bbox & !63)
+ but that is two calls to offset, so we do the following, which
+ achieves the same thing with only one offset call.
+ */
+ FT_Outline_Translate(outline, dx - ((bbox.xMin + dx) & ~63),
+ dy - ((bbox.yMin + dy) & ~63));
+
+ target.width = glyph.fWidth;
+ target.rows = glyph.fHeight;
+ target.pitch = glyph.rowBytes();
+ target.buffer = reinterpret_cast<uint8_t*>(glyph.fImage);
+ target.pixel_mode = compute_pixel_mode( (SkMask::Format)fRec.fMaskFormat);
+ target.num_grays = 256;
+
+ FT_Outline_Get_Bitmap(face->glyph->library, outline, &target);
+ }
+ } break;
+
+ case FT_GLYPH_FORMAT_BITMAP: {
+ FT_Pixel_Mode pixel_mode = static_cast<FT_Pixel_Mode>(face->glyph->bitmap.pixel_mode);
+ SkMask::Format maskFormat = static_cast<SkMask::Format>(glyph.fMaskFormat);
+
+ // Assume that the other formats do not exist.
+ SkASSERT(FT_PIXEL_MODE_MONO == pixel_mode ||
+ FT_PIXEL_MODE_GRAY == pixel_mode ||
+ FT_PIXEL_MODE_BGRA == pixel_mode);
+
+ // These are the only formats this ScalerContext should request.
+ SkASSERT(SkMask::kBW_Format == maskFormat ||
+ SkMask::kA8_Format == maskFormat ||
+ SkMask::kARGB32_Format == maskFormat ||
+ SkMask::kLCD16_Format == maskFormat);
+
+ // If no scaling needed, directly copy glyph bitmap.
+ if (bitmapTransform.isIdentity()) {
+ SkMask dstMask;
+ glyph.toMask(&dstMask);
+ copyFTBitmap(face->glyph->bitmap, dstMask);
+ break;
+ }
+
+ // Otherwise, scale the bitmap.
+
+ // Copy the FT_Bitmap into an SkBitmap (either A8 or ARGB)
+ SkBitmap unscaledBitmap;
+ // TODO: mark this as sRGB when the blits will be sRGB.
+ unscaledBitmap.allocPixels(SkImageInfo::Make(face->glyph->bitmap.width,
+ face->glyph->bitmap.rows,
+ SkColorType_for_FTPixelMode(pixel_mode),
+ kPremul_SkAlphaType));
+
+ SkMask unscaledBitmapAlias;
+ unscaledBitmapAlias.fImage = reinterpret_cast<uint8_t*>(unscaledBitmap.getPixels());
+ unscaledBitmapAlias.fBounds.set(0, 0, unscaledBitmap.width(), unscaledBitmap.height());
+ unscaledBitmapAlias.fRowBytes = unscaledBitmap.rowBytes();
+ unscaledBitmapAlias.fFormat = SkMaskFormat_for_SkColorType(unscaledBitmap.colorType());
+ copyFTBitmap(face->glyph->bitmap, unscaledBitmapAlias);
+
+ // Wrap the glyph's mask in a bitmap, unless the glyph's mask is BW or LCD.
+ // BW requires an A8 target for resizing, which can then be down sampled.
+ // LCD should use a 4x A8 target, which will then be down sampled.
+ // For simplicity, LCD uses A8 and is replicated.
+ int bitmapRowBytes = 0;
+ if (SkMask::kBW_Format != maskFormat && SkMask::kLCD16_Format != maskFormat) {
+ bitmapRowBytes = glyph.rowBytes();
+ }
+ SkBitmap dstBitmap;
+ // TODO: mark this as sRGB when the blits will be sRGB.
+ dstBitmap.setInfo(SkImageInfo::Make(glyph.fWidth, glyph.fHeight,
+ SkColorType_for_SkMaskFormat(maskFormat),
+ kPremul_SkAlphaType),
+ bitmapRowBytes);
+ if (SkMask::kBW_Format == maskFormat || SkMask::kLCD16_Format == maskFormat) {
+ dstBitmap.allocPixels();
+ } else {
+ dstBitmap.setPixels(glyph.fImage);
+ }
+
+ // Scale unscaledBitmap into dstBitmap.
+ SkCanvas canvas(dstBitmap);
+#ifdef SK_SHOW_TEXT_BLIT_COVERAGE
+ canvas.clear(0x33FF0000);
+#else
+ canvas.clear(SK_ColorTRANSPARENT);
+#endif
+ canvas.translate(-glyph.fLeft, -glyph.fTop);
+ canvas.concat(bitmapTransform);
+ canvas.translate(face->glyph->bitmap_left, -face->glyph->bitmap_top);
+
+ SkPaint paint;
+ paint.setFilterQuality(kMedium_SkFilterQuality);
+ canvas.drawBitmap(unscaledBitmap, 0, 0, &paint);
+
+ // If the destination is BW or LCD, convert from A8.
+ if (SkMask::kBW_Format == maskFormat) {
+ // Copy the A8 dstBitmap into the A1 glyph.fImage.
+ SkMask dstMask;
+ glyph.toMask(&dstMask);
+ packA8ToA1(dstMask, dstBitmap.getAddr8(0, 0), dstBitmap.rowBytes());
+ } else if (SkMask::kLCD16_Format == maskFormat) {
+ // Copy the A8 dstBitmap into the LCD16 glyph.fImage.
+ uint8_t* src = dstBitmap.getAddr8(0, 0);
+ uint16_t* dst = reinterpret_cast<uint16_t*>(glyph.fImage);
+ for (int y = dstBitmap.height(); y --> 0;) {
+ for (int x = 0; x < dstBitmap.width(); ++x) {
+ dst[x] = grayToRGB16(src[x]);
+ }
+ dst = (uint16_t*)((char*)dst + glyph.rowBytes());
+ src += dstBitmap.rowBytes();
+ }
+ }
+
+ } break;
+
+ default:
+ SkDEBUGFAIL("unknown glyph format");
+ memset(glyph.fImage, 0, glyph.rowBytes() * glyph.fHeight);
+ return;
+ }
+
+// We used to always do this pre-USE_COLOR_LUMINANCE, but with colorlum,
+// it is optional
+#if defined(SK_GAMMA_APPLY_TO_A8)
+ if (SkMask::kA8_Format == glyph.fMaskFormat && fPreBlend.isApplicable()) {
+ uint8_t* SK_RESTRICT dst = (uint8_t*)glyph.fImage;
+ unsigned rowBytes = glyph.rowBytes();
+
+ for (int y = glyph.fHeight - 1; y >= 0; --y) {
+ for (int x = glyph.fWidth - 1; x >= 0; --x) {
+ dst[x] = fPreBlend.fG[dst[x]];
+ }
+ dst += rowBytes;
+ }
+ }
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int move_proc(const FT_Vector* pt, void* ctx) {
+ SkPath* path = (SkPath*)ctx;
+ path->close(); // to close the previous contour (if any)
+ path->moveTo(SkFDot6ToScalar(pt->x), -SkFDot6ToScalar(pt->y));
+ return 0;
+}
+
+static int line_proc(const FT_Vector* pt, void* ctx) {
+ SkPath* path = (SkPath*)ctx;
+ path->lineTo(SkFDot6ToScalar(pt->x), -SkFDot6ToScalar(pt->y));
+ return 0;
+}
+
+static int quad_proc(const FT_Vector* pt0, const FT_Vector* pt1,
+ void* ctx) {
+ SkPath* path = (SkPath*)ctx;
+ path->quadTo(SkFDot6ToScalar(pt0->x), -SkFDot6ToScalar(pt0->y),
+ SkFDot6ToScalar(pt1->x), -SkFDot6ToScalar(pt1->y));
+ return 0;
+}
+
+static int cubic_proc(const FT_Vector* pt0, const FT_Vector* pt1,
+ const FT_Vector* pt2, void* ctx) {
+ SkPath* path = (SkPath*)ctx;
+ path->cubicTo(SkFDot6ToScalar(pt0->x), -SkFDot6ToScalar(pt0->y),
+ SkFDot6ToScalar(pt1->x), -SkFDot6ToScalar(pt1->y),
+ SkFDot6ToScalar(pt2->x), -SkFDot6ToScalar(pt2->y));
+ return 0;
+}
+
+void SkScalerContext_FreeType_Base::generateGlyphPath(FT_Face face,
+ SkPath* path)
+{
+ FT_Outline_Funcs funcs;
+
+ funcs.move_to = move_proc;
+ funcs.line_to = line_proc;
+ funcs.conic_to = quad_proc;
+ funcs.cubic_to = cubic_proc;
+ funcs.shift = 0;
+ funcs.delta = 0;
+
+ FT_Error err = FT_Outline_Decompose(&face->glyph->outline, &funcs, path);
+
+ if (err != 0) {
+ path->reset();
+ return;
+ }
+
+ path->close();
+}
diff --git a/gfx/skia/skia/src/ports/SkFontHost_FreeType_common.h b/gfx/skia/skia/src/ports/SkFontHost_FreeType_common.h
new file mode 100644
index 000000000..21e774866
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontHost_FreeType_common.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2006-2012 The Android Open Source Project
+ * Copyright 2012 Mozilla Foundation
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKFONTHOST_FREETYPE_COMMON_H_
+#define SKFONTHOST_FREETYPE_COMMON_H_
+
+#include "SkGlyph.h"
+#include "SkMutex.h"
+#include "SkScalerContext.h"
+#include "SkTypeface.h"
+#include "SkTypes.h"
+
+#include "SkFontMgr.h"
+
+#include <ft2build.h>
+#include FT_FREETYPE_H
+
+class SkScalerContext_FreeType_Base : public SkScalerContext {
+protected:
+ // See http://freetype.sourceforge.net/freetype2/docs/reference/ft2-bitmap_handling.html#FT_Bitmap_Embolden
+ // This value was chosen by eyeballing the result in Firefox and trying to match it.
+ static const FT_Pos kBitmapEmboldenStrength = 1 << 6;
+
+ SkScalerContext_FreeType_Base(SkTypeface* typeface, const SkScalerContextEffects& effects,
+ const SkDescriptor *desc)
+ : INHERITED(typeface, effects, desc)
+ {}
+
+ void generateGlyphImage(FT_Face face, const SkGlyph& glyph, const SkMatrix& bitmapTransform);
+ void generateGlyphPath(FT_Face face, SkPath* path);
+private:
+ typedef SkScalerContext INHERITED;
+};
+
+class SkTypeface_FreeType : public SkTypeface {
+public:
+ /** For SkFontMgrs to make use of our ability to extract
+ * name and style from a stream, using FreeType's API.
+ */
+ class Scanner : ::SkNoncopyable {
+ public:
+ Scanner();
+ ~Scanner();
+ struct AxisDefinition {
+ SkFourByteTag fTag;
+ SkFixed fMinimum;
+ SkFixed fDefault;
+ SkFixed fMaximum;
+ };
+ using AxisDefinitions = SkSTArray<4, AxisDefinition, true>;
+ bool recognizedFont(SkStreamAsset* stream, int* numFonts) const;
+ bool scanFont(SkStreamAsset* stream, int ttcIndex,
+ SkString* name, SkFontStyle* style, bool* isFixedPitch,
+ AxisDefinitions* axes) const;
+ static void computeAxisValues(
+ AxisDefinitions axisDefinitions,
+ const SkFontMgr::FontParameters::Axis* requestedAxis, int requestedAxisCount,
+ SkFixed* axisValues,
+ const SkString& name);
+
+ private:
+ FT_Face openFace(SkStreamAsset* stream, int ttcIndex, FT_Stream ftStream) const;
+ FT_Library fLibrary;
+ mutable SkMutex fLibraryMutex;
+ };
+
+protected:
+ SkTypeface_FreeType(const SkFontStyle& style, bool isFixedPitch)
+ : INHERITED(style, isFixedPitch)
+ {}
+
+ virtual SkScalerContext* onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*) const override;
+ void onFilterRec(SkScalerContextRec*) const override;
+ SkAdvancedTypefaceMetrics* onGetAdvancedTypefaceMetrics(
+ PerGlyphInfo, const uint32_t*, uint32_t) const override;
+ int onGetUPEM() const override;
+ virtual bool onGetKerningPairAdjustments(const uint16_t glyphs[], int count,
+ int32_t adjustments[]) const override;
+ virtual int onCharsToGlyphs(const void* chars, Encoding, uint16_t glyphs[],
+ int glyphCount) const override;
+ int onCountGlyphs() const override;
+
+ LocalizedStrings* onCreateFamilyNameIterator() const override;
+
+ int onGetTableTags(SkFontTableTag tags[]) const override;
+ virtual size_t onGetTableData(SkFontTableTag, size_t offset,
+ size_t length, void* data) const override;
+
+private:
+ typedef SkTypeface INHERITED;
+};
+
+#endif // SKFONTHOST_FREETYPE_COMMON_H_
diff --git a/gfx/skia/skia/src/ports/SkFontHost_cairo.cpp b/gfx/skia/skia/src/ports/SkFontHost_cairo.cpp
new file mode 100644
index 000000000..cfde89ffc
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontHost_cairo.cpp
@@ -0,0 +1,846 @@
+
+/*
+ * Copyright 2012 Mozilla Foundation
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "cairo.h"
+#include "cairo-ft.h"
+
+#include "SkFontHost_FreeType_common.h"
+
+#include "SkAdvancedTypefaceMetrics.h"
+#include "SkFDot6.h"
+#include "SkPath.h"
+#include "SkScalerContext.h"
+#include "SkTypefaceCache.h"
+
+#include <cmath>
+
+#include <ft2build.h>
+#include FT_FREETYPE_H
+#include FT_OUTLINE_H
+
+// for FT_GlyphSlot_Embolden
+#ifdef FT_SYNTHESIS_H
+#include FT_SYNTHESIS_H
+#endif
+
+// for FT_Library_SetLcdFilter
+#ifdef FT_LCD_FILTER_H
+#include FT_LCD_FILTER_H
+#else
+typedef enum FT_LcdFilter_
+{
+ FT_LCD_FILTER_NONE = 0,
+ FT_LCD_FILTER_DEFAULT = 1,
+ FT_LCD_FILTER_LIGHT = 2,
+ FT_LCD_FILTER_LEGACY = 16,
+} FT_LcdFilter;
+#endif
+
+// If compiling with FreeType before 2.5.0
+#ifndef FT_LOAD_COLOR
+# define FT_LOAD_COLOR ( 1L << 20 )
+# define FT_PIXEL_MODE_BGRA 7
+#endif
+
+#ifndef SK_CAN_USE_DLOPEN
+#define SK_CAN_USE_DLOPEN 1
+#endif
+#if SK_CAN_USE_DLOPEN
+#include <dlfcn.h>
+#endif
+
+#ifndef SK_FONTHOST_CAIRO_STANDALONE
+#define SK_FONTHOST_CAIRO_STANDALONE 1
+#endif
+
+static cairo_user_data_key_t kSkTypefaceKey;
+
+static bool gFontHintingEnabled = true;
+static FT_Error (*gSetLcdFilter)(FT_Library, FT_LcdFilter) = nullptr;
+static void (*gGlyphSlotEmbolden)(FT_GlyphSlot) = nullptr;
+
+void SkInitCairoFT(bool fontHintingEnabled)
+{
+ gFontHintingEnabled = fontHintingEnabled;
+#if SK_CAN_USE_DLOPEN
+ gSetLcdFilter = (FT_Error (*)(FT_Library, FT_LcdFilter))dlsym(RTLD_DEFAULT, "FT_Library_SetLcdFilter");
+ gGlyphSlotEmbolden = (void (*)(FT_GlyphSlot))dlsym(RTLD_DEFAULT, "FT_GlyphSlot_Embolden");
+#else
+ gSetLcdFilter = &FT_Library_SetLcdFilter;
+ gGlyphSlotEmbolden = &FT_GlyphSlot_Embolden;
+#endif
+ // FT_Library_SetLcdFilter may be provided but have no effect if FreeType
+ // is built without FT_CONFIG_OPTION_SUBPIXEL_RENDERING.
+ if (gSetLcdFilter &&
+ gSetLcdFilter(nullptr, FT_LCD_FILTER_NONE) == FT_Err_Unimplemented_Feature) {
+ gSetLcdFilter = nullptr;
+ }
+}
+
+#ifndef CAIRO_HAS_FC_FONT
+typedef struct _FcPattern FcPattern;
+#endif
+
+template<> struct SkTUnref<FcPattern> {
+ void operator()(FcPattern* pattern) {
+#ifdef CAIRO_HAS_FC_FONT
+ if (pattern) {
+ FcPatternDestroy(pattern);
+ }
+#endif
+ }
+};
+
+class SkScalerContext_CairoFT : public SkScalerContext_FreeType_Base {
+public:
+ SkScalerContext_CairoFT(SkTypeface* typeface, const SkScalerContextEffects& effects, const SkDescriptor* desc,
+ cairo_font_face_t* fontFace, FcPattern* pattern);
+ virtual ~SkScalerContext_CairoFT();
+
+ bool isValid() const {
+ return fScaledFont != nullptr;
+ }
+
+protected:
+ virtual unsigned generateGlyphCount() override;
+ virtual uint16_t generateCharToGlyph(SkUnichar uniChar) override;
+ virtual void generateAdvance(SkGlyph* glyph) override;
+ virtual void generateMetrics(SkGlyph* glyph) override;
+ virtual void generateImage(const SkGlyph& glyph) override;
+ virtual void generatePath(const SkGlyph& glyph, SkPath* path) override;
+ virtual void generateFontMetrics(SkPaint::FontMetrics* metrics) override;
+ virtual SkUnichar generateGlyphToChar(uint16_t glyph) override;
+
+private:
+ bool computeShapeMatrix(const SkMatrix& m);
+ void prepareGlyph(FT_GlyphSlot glyph);
+ void fixVerticalLayoutBearing(FT_GlyphSlot glyph);
+
+#ifdef CAIRO_HAS_FC_FONT
+ void parsePattern(FcPattern* pattern);
+ void resolvePattern(FcPattern* pattern);
+#endif
+
+ cairo_scaled_font_t* fScaledFont;
+ FT_Int32 fLoadGlyphFlags;
+ FT_LcdFilter fLcdFilter;
+ SkScalar fScaleX;
+ SkScalar fScaleY;
+ SkMatrix fShapeMatrix;
+ FT_Matrix fShapeMatrixFT;
+ bool fHaveShape;
+};
+
+class CairoLockedFTFace {
+public:
+ CairoLockedFTFace(cairo_scaled_font_t* scaledFont)
+ : fScaledFont(scaledFont)
+ , fFace(cairo_ft_scaled_font_lock_face(scaledFont))
+ {}
+
+ ~CairoLockedFTFace()
+ {
+ cairo_ft_scaled_font_unlock_face(fScaledFont);
+ }
+
+ FT_Face getFace()
+ {
+ return fFace;
+ }
+
+private:
+ cairo_scaled_font_t* fScaledFont;
+ FT_Face fFace;
+};
+
+template<typename T> static bool isLCD(const T& rec) {
+ return SkMask::kLCD16_Format == rec.fMaskFormat;
+}
+
+static bool bothZero(SkScalar a, SkScalar b) {
+ return 0 == a && 0 == b;
+}
+
+// returns false if there is any non-90-rotation or skew
+static bool isAxisAligned(const SkScalerContext::Rec& rec) {
+ return 0 == rec.fPreSkewX &&
+ (bothZero(rec.fPost2x2[0][1], rec.fPost2x2[1][0]) ||
+ bothZero(rec.fPost2x2[0][0], rec.fPost2x2[1][1]));
+}
+
+class SkCairoFTTypeface : public SkTypeface {
+public:
+ static SkTypeface* CreateTypeface(cairo_font_face_t* fontFace, FT_Face face,
+ FcPattern* pattern = nullptr) {
+ SkASSERT(fontFace != nullptr);
+ SkASSERT(cairo_font_face_get_type(fontFace) == CAIRO_FONT_TYPE_FT);
+ SkASSERT(face != nullptr);
+
+ SkFontStyle style(face->style_flags & FT_STYLE_FLAG_BOLD ?
+ SkFontStyle::kBold_Weight : SkFontStyle::kNormal_Weight,
+ SkFontStyle::kNormal_Width,
+ face->style_flags & FT_STYLE_FLAG_ITALIC ?
+ SkFontStyle::kItalic_Slant : SkFontStyle::kUpright_Slant);
+
+ bool isFixedWidth = face->face_flags & FT_FACE_FLAG_FIXED_WIDTH;
+
+ return new SkCairoFTTypeface(style, isFixedWidth, fontFace, pattern);
+ }
+
+ virtual SkStreamAsset* onOpenStream(int*) const override { return nullptr; }
+
+ virtual SkAdvancedTypefaceMetrics*
+ onGetAdvancedTypefaceMetrics(PerGlyphInfo,
+ const uint32_t*, uint32_t) const override
+ {
+ SkDEBUGCODE(SkDebugf("SkCairoFTTypeface::onGetAdvancedTypefaceMetrics unimplemented\n"));
+ return nullptr;
+ }
+
+ virtual SkScalerContext* onCreateScalerContext(const SkScalerContextEffects& effects, const SkDescriptor* desc) const override
+ {
+ SkScalerContext_CairoFT* ctx =
+ new SkScalerContext_CairoFT(const_cast<SkCairoFTTypeface*>(this), effects, desc,
+ fFontFace, fPattern);
+ if (!ctx->isValid()) {
+ delete ctx;
+ return nullptr;
+ }
+ return ctx;
+ }
+
+ virtual void onFilterRec(SkScalerContextRec* rec) const override
+ {
+ // No subpixel AA unless enabled in Fontconfig.
+ if (!fPattern && isLCD(*rec)) {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ }
+
+ // rotated text looks bad with hinting, so we disable it as needed
+ if (!gFontHintingEnabled || !isAxisAligned(*rec)) {
+ rec->setHinting(SkPaint::kNo_Hinting);
+ }
+
+ // Don't apply any gamma so that we match cairo-ft's results.
+ rec->ignorePreBlend();
+ }
+
+ virtual void onGetFontDescriptor(SkFontDescriptor*, bool*) const override
+ {
+ SkDEBUGCODE(SkDebugf("SkCairoFTTypeface::onGetFontDescriptor unimplemented\n"));
+ }
+
+ virtual int onCharsToGlyphs(void const*, SkTypeface::Encoding, uint16_t*, int) const override
+ {
+ return 0;
+ }
+
+ virtual int onCountGlyphs() const override
+ {
+ return 0;
+ }
+
+ virtual int onGetUPEM() const override
+ {
+ return 0;
+ }
+
+ virtual SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override
+ {
+ return nullptr;
+ }
+
+ virtual void onGetFamilyName(SkString* familyName) const override
+ {
+ familyName->reset();
+ }
+
+ virtual int onGetTableTags(SkFontTableTag*) const override
+ {
+ return 0;
+ }
+
+ virtual size_t onGetTableData(SkFontTableTag, size_t, size_t, void*) const override
+ {
+ return 0;
+ }
+
+private:
+
+ SkCairoFTTypeface(const SkFontStyle& style, bool isFixedWidth,
+ cairo_font_face_t* fontFace, FcPattern* pattern)
+ : SkTypeface(style, isFixedWidth)
+ , fFontFace(fontFace)
+ , fPattern(pattern)
+ {
+ cairo_font_face_set_user_data(fFontFace, &kSkTypefaceKey, this, nullptr);
+ cairo_font_face_reference(fFontFace);
+#ifdef CAIRO_HAS_FC_FONT
+ if (fPattern) {
+ FcPatternReference(fPattern);
+ }
+#endif
+ }
+
+ ~SkCairoFTTypeface()
+ {
+ cairo_font_face_set_user_data(fFontFace, &kSkTypefaceKey, nullptr, nullptr);
+ cairo_font_face_destroy(fFontFace);
+ }
+
+ cairo_font_face_t* fFontFace;
+ SkAutoTUnref<FcPattern> fPattern;
+};
+
+SkTypeface* SkCreateTypefaceFromCairoFTFontWithFontconfig(cairo_scaled_font_t* scaledFont, FcPattern* pattern)
+{
+ cairo_font_face_t* fontFace = cairo_scaled_font_get_font_face(scaledFont);
+ SkASSERT(cairo_font_face_status(fontFace) == CAIRO_STATUS_SUCCESS);
+
+ SkTypeface* typeface = reinterpret_cast<SkTypeface*>(cairo_font_face_get_user_data(fontFace, &kSkTypefaceKey));
+ if (typeface) {
+ typeface->ref();
+ } else {
+ CairoLockedFTFace faceLock(scaledFont);
+ if (FT_Face face = faceLock.getFace()) {
+ typeface = SkCairoFTTypeface::CreateTypeface(fontFace, face, pattern);
+ SkTypefaceCache::Add(typeface);
+ }
+ }
+
+ return typeface;
+}
+
+SkTypeface* SkCreateTypefaceFromCairoFTFont(cairo_scaled_font_t* scaledFont)
+{
+ return SkCreateTypefaceFromCairoFTFontWithFontconfig(scaledFont, nullptr);
+}
+
+SkScalerContext_CairoFT::SkScalerContext_CairoFT(SkTypeface* typeface, const SkScalerContextEffects& effects, const SkDescriptor* desc,
+ cairo_font_face_t* fontFace, FcPattern* pattern)
+ : SkScalerContext_FreeType_Base(typeface, effects, desc)
+ , fLcdFilter(FT_LCD_FILTER_NONE)
+{
+ SkMatrix matrix;
+ fRec.getSingleMatrix(&matrix);
+
+ cairo_matrix_t fontMatrix, ctMatrix;
+ cairo_matrix_init(&fontMatrix, matrix.getScaleX(), matrix.getSkewY(), matrix.getSkewX(), matrix.getScaleY(), 0.0, 0.0);
+ cairo_matrix_init_identity(&ctMatrix);
+
+ cairo_font_options_t *fontOptions = cairo_font_options_create();
+ fScaledFont = cairo_scaled_font_create(fontFace, &fontMatrix, &ctMatrix, fontOptions);
+ cairo_font_options_destroy(fontOptions);
+
+ computeShapeMatrix(matrix);
+
+ fRec.fFlags |= SkScalerContext::kEmbeddedBitmapText_Flag;
+
+#ifdef CAIRO_HAS_FC_FONT
+ resolvePattern(pattern);
+#endif
+
+ FT_Int32 loadFlags = FT_LOAD_DEFAULT;
+
+ if (SkMask::kBW_Format == fRec.fMaskFormat) {
+ if (fRec.getHinting() == SkPaint::kNo_Hinting) {
+ loadFlags |= FT_LOAD_NO_HINTING;
+ } else {
+ loadFlags = FT_LOAD_TARGET_MONO;
+ }
+ loadFlags |= FT_LOAD_MONOCHROME;
+ } else {
+ switch (fRec.getHinting()) {
+ case SkPaint::kNo_Hinting:
+ loadFlags |= FT_LOAD_NO_HINTING;
+ break;
+ case SkPaint::kSlight_Hinting:
+ loadFlags = FT_LOAD_TARGET_LIGHT; // This implies FORCE_AUTOHINT
+ break;
+ case SkPaint::kNormal_Hinting:
+ if (fRec.fFlags & SkScalerContext::kForceAutohinting_Flag) {
+ loadFlags |= FT_LOAD_FORCE_AUTOHINT;
+ }
+ break;
+ case SkPaint::kFull_Hinting:
+ if (isLCD(fRec)) {
+ if (fRec.fFlags & SkScalerContext::kLCD_Vertical_Flag) {
+ loadFlags = FT_LOAD_TARGET_LCD_V;
+ } else {
+ loadFlags = FT_LOAD_TARGET_LCD;
+ }
+ }
+ if (fRec.fFlags & SkScalerContext::kForceAutohinting_Flag) {
+ loadFlags |= FT_LOAD_FORCE_AUTOHINT;
+ }
+ break;
+ default:
+ SkDebugf("---------- UNKNOWN hinting %d\n", fRec.getHinting());
+ break;
+ }
+ }
+
+ // Disable autohinting to disable hinting even for "tricky" fonts.
+ if (!gFontHintingEnabled) {
+ loadFlags |= FT_LOAD_NO_AUTOHINT;
+ }
+
+ if ((fRec.fFlags & SkScalerContext::kEmbeddedBitmapText_Flag) == 0) {
+ loadFlags |= FT_LOAD_NO_BITMAP;
+ }
+
+ // Always using FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH to get correct
+ // advances, as fontconfig and cairo do.
+ // See http://code.google.com/p/skia/issues/detail?id=222.
+ loadFlags |= FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH;
+
+ if (fRec.fFlags & SkScalerContext::kVertical_Flag) {
+ loadFlags |= FT_LOAD_VERTICAL_LAYOUT;
+ }
+
+ loadFlags |= FT_LOAD_COLOR;
+
+ fLoadGlyphFlags = loadFlags;
+}
+
+SkScalerContext_CairoFT::~SkScalerContext_CairoFT()
+{
+ cairo_scaled_font_destroy(fScaledFont);
+}
+
+#ifdef CAIRO_HAS_FC_FONT
+void SkScalerContext_CairoFT::parsePattern(FcPattern* pattern)
+{
+ FcBool antialias, autohint, bitmap, embolden, hinting, vertical;
+
+ if (FcPatternGetBool(pattern, FC_AUTOHINT, 0, &autohint) == FcResultMatch && autohint) {
+ fRec.fFlags |= SkScalerContext::kForceAutohinting_Flag;
+ }
+ if (FcPatternGetBool(pattern, FC_EMBOLDEN, 0, &embolden) == FcResultMatch && embolden) {
+ fRec.fFlags |= SkScalerContext::kEmbolden_Flag;
+ }
+ if (FcPatternGetBool(pattern, FC_VERTICAL_LAYOUT, 0, &vertical) == FcResultMatch && vertical) {
+ fRec.fFlags |= SkScalerContext::kVertical_Flag;
+ }
+
+ // Match cairo-ft's handling of embeddedbitmap:
+ // If AA is explicitly disabled, leave bitmaps enabled.
+ // Otherwise, disable embedded bitmaps unless explicitly enabled.
+ if (FcPatternGetBool(pattern, FC_ANTIALIAS, 0, &antialias) == FcResultMatch && !antialias) {
+ fRec.fMaskFormat = SkMask::kBW_Format;
+ } else if (FcPatternGetBool(pattern, FC_EMBEDDED_BITMAP, 0, &bitmap) != FcResultMatch || !bitmap) {
+ fRec.fFlags &= ~SkScalerContext::kEmbeddedBitmapText_Flag;
+ }
+
+ if (fRec.fMaskFormat != SkMask::kBW_Format) {
+ int rgba;
+ if (!isLCD(fRec) ||
+ FcPatternGetInteger(pattern, FC_RGBA, 0, &rgba) != FcResultMatch) {
+ rgba = FC_RGBA_UNKNOWN;
+ }
+ switch (rgba) {
+ case FC_RGBA_RGB:
+ break;
+ case FC_RGBA_BGR:
+ fRec.fFlags |= SkScalerContext::kLCD_BGROrder_Flag;
+ break;
+ case FC_RGBA_VRGB:
+ fRec.fFlags |= SkScalerContext::kLCD_Vertical_Flag;
+ break;
+ case FC_RGBA_VBGR:
+ fRec.fFlags |= SkScalerContext::kLCD_Vertical_Flag |
+ SkScalerContext::kLCD_BGROrder_Flag;
+ break;
+ default:
+ fRec.fMaskFormat = SkMask::kA8_Format;
+ break;
+ }
+
+ int filter;
+ if (isLCD(fRec)) {
+ if (FcPatternGetInteger(pattern, FC_LCD_FILTER, 0, &filter) != FcResultMatch) {
+ filter = FC_LCD_LEGACY;
+ }
+ switch (filter) {
+ case FC_LCD_NONE:
+ fLcdFilter = FT_LCD_FILTER_NONE;
+ break;
+ case FC_LCD_DEFAULT:
+ fLcdFilter = FT_LCD_FILTER_DEFAULT;
+ break;
+ case FC_LCD_LIGHT:
+ fLcdFilter = FT_LCD_FILTER_LIGHT;
+ break;
+ case FC_LCD_LEGACY:
+ default:
+ fLcdFilter = FT_LCD_FILTER_LEGACY;
+ break;
+ }
+ }
+ }
+
+ if (fRec.getHinting() != SkPaint::kNo_Hinting) {
+ // Hinting was requested, so check if the fontconfig pattern needs to override it.
+ // If hinting is either explicitly enabled by fontconfig or not configured, try to
+ // parse the hint style. Otherwise, ensure hinting is disabled.
+ int hintstyle;
+ if (FcPatternGetBool(pattern, FC_HINTING, 0, &hinting) != FcResultMatch || hinting) {
+ if (FcPatternGetInteger(pattern, FC_HINT_STYLE, 0, &hintstyle) != FcResultMatch) {
+ hintstyle = FC_HINT_FULL;
+ }
+ } else {
+ hintstyle = FC_HINT_NONE;
+ }
+ switch (hintstyle) {
+ case FC_HINT_NONE:
+ fRec.setHinting(SkPaint::kNo_Hinting);
+ break;
+ case FC_HINT_SLIGHT:
+ fRec.setHinting(SkPaint::kSlight_Hinting);
+ break;
+ case FC_HINT_MEDIUM:
+ default:
+ fRec.setHinting(SkPaint::kNormal_Hinting);
+ break;
+ case FC_HINT_FULL:
+ fRec.setHinting(SkPaint::kFull_Hinting);
+ break;
+ }
+ }
+}
+
+void SkScalerContext_CairoFT::resolvePattern(FcPattern* pattern)
+{
+ if (!pattern) {
+ return;
+ }
+ FcValue value;
+ if (FcPatternGet(pattern, FC_PIXEL_SIZE, 0, &value) == FcResultNoMatch) {
+ SkAutoTUnref<FcPattern> scalePattern(FcPatternDuplicate(pattern));
+ if (scalePattern &&
+ FcPatternAddDouble(scalePattern, FC_PIXEL_SIZE, fScaleY) &&
+ FcConfigSubstitute(nullptr, scalePattern, FcMatchPattern)) {
+ FcDefaultSubstitute(scalePattern);
+ FcResult result;
+ SkAutoTUnref<FcPattern> resolved(FcFontMatch(nullptr, scalePattern, &result));
+ if (resolved) {
+ parsePattern(resolved);
+ return;
+ }
+ }
+ }
+ parsePattern(pattern);
+}
+#endif
+
+bool SkScalerContext_CairoFT::computeShapeMatrix(const SkMatrix& m)
+{
+ // Compute a shape matrix compatible with Cairo's _compute_transform.
+ // Finds major/minor scales and uses them to normalize the transform.
+ double scaleX = m.getScaleX();
+ double skewX = m.getSkewX();
+ double skewY = m.getSkewY();
+ double scaleY = m.getScaleY();
+ double det = scaleX * scaleY - skewY * skewX;
+ if (!std::isfinite(det)) {
+ fScaleX = fRec.fTextSize * fRec.fPreScaleX;
+ fScaleY = fRec.fTextSize;
+ fHaveShape = false;
+ return false;
+ }
+ double major = det != 0.0 ? hypot(scaleX, skewY) : 0.0;
+ double minor = major != 0.0 ? fabs(det) / major : 0.0;
+ // Limit scales to be above 1pt.
+ major = SkTMax(major, 1.0);
+ minor = SkTMax(minor, 1.0);
+
+ // If the font is not scalable, then choose the best available size.
+ CairoLockedFTFace faceLock(fScaledFont);
+ FT_Face face = faceLock.getFace();
+ if (face && !FT_IS_SCALABLE(face)) {
+ double bestDist = DBL_MAX;
+ FT_Int bestSize = -1;
+ for (FT_Int i = 0; i < face->num_fixed_sizes; i++) {
+ // Distance is positive if strike is larger than desired size,
+ // or negative if smaller. If previously a found smaller strike,
+ // then prefer a larger strike. Otherwise, minimize distance.
+ double dist = face->available_sizes[i].y_ppem / 64.0 - minor;
+ if (bestDist < 0 ? dist >= bestDist : fabs(dist) <= bestDist) {
+ bestDist = dist;
+ bestSize = i;
+ }
+ }
+ if (bestSize < 0) {
+ fScaleX = fRec.fTextSize * fRec.fPreScaleX;
+ fScaleY = fRec.fTextSize;
+ fHaveShape = false;
+ return false;
+ }
+ major = face->available_sizes[bestSize].x_ppem / 64.0;
+ minor = face->available_sizes[bestSize].y_ppem / 64.0;
+ fHaveShape = true;
+ } else {
+ fHaveShape = !m.isScaleTranslate();
+ }
+
+ fScaleX = SkDoubleToScalar(major);
+ fScaleY = SkDoubleToScalar(minor);
+
+ if (fHaveShape) {
+ // Normalize the transform and convert to fixed-point.
+ fShapeMatrix = m;
+ fShapeMatrix.preScale(SkDoubleToScalar(1.0 / major), SkDoubleToScalar(1.0 / minor));
+
+ fShapeMatrixFT.xx = SkScalarToFixed(fShapeMatrix.getScaleX());
+ fShapeMatrixFT.yx = SkScalarToFixed(-fShapeMatrix.getSkewY());
+ fShapeMatrixFT.xy = SkScalarToFixed(-fShapeMatrix.getSkewX());
+ fShapeMatrixFT.yy = SkScalarToFixed(fShapeMatrix.getScaleY());
+ }
+ return true;
+}
+
+unsigned SkScalerContext_CairoFT::generateGlyphCount()
+{
+ CairoLockedFTFace faceLock(fScaledFont);
+ return faceLock.getFace()->num_glyphs;
+}
+
+uint16_t SkScalerContext_CairoFT::generateCharToGlyph(SkUnichar uniChar)
+{
+ CairoLockedFTFace faceLock(fScaledFont);
+ return SkToU16(FT_Get_Char_Index(faceLock.getFace(), uniChar));
+}
+
+void SkScalerContext_CairoFT::generateAdvance(SkGlyph* glyph)
+{
+ generateMetrics(glyph);
+}
+
+void SkScalerContext_CairoFT::prepareGlyph(FT_GlyphSlot glyph)
+{
+ if (fRec.fFlags & SkScalerContext::kEmbolden_Flag &&
+ gGlyphSlotEmbolden) {
+ gGlyphSlotEmbolden(glyph);
+ }
+ if (fRec.fFlags & SkScalerContext::kVertical_Flag) {
+ fixVerticalLayoutBearing(glyph);
+ }
+}
+
+void SkScalerContext_CairoFT::fixVerticalLayoutBearing(FT_GlyphSlot glyph)
+{
+ FT_Vector vector;
+ vector.x = glyph->metrics.vertBearingX - glyph->metrics.horiBearingX;
+ vector.y = -glyph->metrics.vertBearingY - glyph->metrics.horiBearingY;
+ if (glyph->format == FT_GLYPH_FORMAT_OUTLINE) {
+ if (fHaveShape) {
+ FT_Vector_Transform(&vector, &fShapeMatrixFT);
+ }
+ FT_Outline_Translate(&glyph->outline, vector.x, vector.y);
+ } else if (glyph->format == FT_GLYPH_FORMAT_BITMAP) {
+ glyph->bitmap_left += SkFDot6Floor(vector.x);
+ glyph->bitmap_top += SkFDot6Floor(vector.y);
+ }
+}
+
+void SkScalerContext_CairoFT::generateMetrics(SkGlyph* glyph)
+{
+ SkASSERT(fScaledFont != nullptr);
+
+ glyph->zeroMetrics();
+
+ CairoLockedFTFace faceLock(fScaledFont);
+ FT_Face face = faceLock.getFace();
+
+ FT_Error err = FT_Load_Glyph( face, glyph->getGlyphID(), fLoadGlyphFlags );
+ if (err != 0) {
+ return;
+ }
+
+ prepareGlyph(face->glyph);
+
+ switch (face->glyph->format) {
+ case FT_GLYPH_FORMAT_OUTLINE:
+ if (!face->glyph->outline.n_contours) {
+ break;
+ }
+
+ FT_BBox bbox;
+ FT_Outline_Get_CBox(&face->glyph->outline, &bbox);
+ bbox.xMin &= ~63;
+ bbox.yMin &= ~63;
+ bbox.xMax = (bbox.xMax + 63) & ~63;
+ bbox.yMax = (bbox.yMax + 63) & ~63;
+ glyph->fWidth = SkToU16(SkFDot6Floor(bbox.xMax - bbox.xMin));
+ glyph->fHeight = SkToU16(SkFDot6Floor(bbox.yMax - bbox.yMin));
+ glyph->fTop = -SkToS16(SkFDot6Floor(bbox.yMax));
+ glyph->fLeft = SkToS16(SkFDot6Floor(bbox.xMin));
+
+ if (isLCD(fRec)) {
+ // In FreeType < 2.8.1, LCD filtering, if explicitly used, may
+ // add padding to the glyph. When not used, there is no padding.
+ // As of 2.8.1, LCD filtering is now always supported and may
+ // add padding even if an LCD filter is not explicitly set.
+ // Regardless, if no LCD filtering is used, or if LCD filtering
+ // doesn't add padding, it is safe to modify the glyph's bounds
+ // here. generateGlyphImage will detect if the mask is smaller
+ // than the bounds and clip things appropriately.
+ if (fRec.fFlags & kLCD_Vertical_Flag) {
+ glyph->fTop -= 1;
+ glyph->fHeight += 2;
+ } else {
+ glyph->fLeft -= 1;
+ glyph->fWidth += 2;
+ }
+ }
+ break;
+ case FT_GLYPH_FORMAT_BITMAP:
+ if (face->glyph->bitmap.pixel_mode == FT_PIXEL_MODE_BGRA) {
+ glyph->fMaskFormat = SkMask::kARGB32_Format;
+ }
+
+ if (isLCD(fRec)) {
+ fRec.fMaskFormat = SkMask::kA8_Format;
+ }
+
+ if (fHaveShape) {
+ // Ensure filtering is preserved when the bitmap is transformed.
+ // Otherwise, the result will look horrifically aliased.
+ if (fRec.fMaskFormat == SkMask::kBW_Format) {
+ fRec.fMaskFormat = SkMask::kA8_Format;
+ }
+
+ // Apply the shape matrix to the glyph's bounding box.
+ SkRect srcRect = SkRect::MakeXYWH(
+ SkIntToScalar(face->glyph->bitmap_left),
+ -SkIntToScalar(face->glyph->bitmap_top),
+ SkIntToScalar(face->glyph->bitmap.width),
+ SkIntToScalar(face->glyph->bitmap.rows));
+ SkRect destRect;
+ fShapeMatrix.mapRect(&destRect, srcRect);
+ SkIRect glyphRect = destRect.roundOut();
+ glyph->fWidth = SkToU16(glyphRect.width());
+ glyph->fHeight = SkToU16(glyphRect.height());
+ glyph->fTop = SkToS16(SkScalarRoundToInt(destRect.fTop));
+ glyph->fLeft = SkToS16(SkScalarRoundToInt(destRect.fLeft));
+ } else {
+ glyph->fWidth = SkToU16(face->glyph->bitmap.width);
+ glyph->fHeight = SkToU16(face->glyph->bitmap.rows);
+ glyph->fTop = -SkToS16(face->glyph->bitmap_top);
+ glyph->fLeft = SkToS16(face->glyph->bitmap_left);
+ }
+ break;
+ default:
+ SkDEBUGFAIL("unknown glyph format");
+ return;
+ }
+
+ if (fRec.fFlags & SkScalerContext::kVertical_Flag) {
+ glyph->fAdvanceX = -SkFDot6ToFloat(face->glyph->advance.x);
+ glyph->fAdvanceY = SkFDot6ToFloat(face->glyph->advance.y);
+ } else {
+ glyph->fAdvanceX = SkFDot6ToFloat(face->glyph->advance.x);
+ glyph->fAdvanceY = -SkFDot6ToFloat(face->glyph->advance.y);
+ }
+}
+
+void SkScalerContext_CairoFT::generateImage(const SkGlyph& glyph)
+{
+ SkASSERT(fScaledFont != nullptr);
+ CairoLockedFTFace faceLock(fScaledFont);
+ FT_Face face = faceLock.getFace();
+
+ FT_Error err = FT_Load_Glyph(face, glyph.getGlyphID(), fLoadGlyphFlags);
+
+ if (err != 0) {
+ memset(glyph.fImage, 0, glyph.rowBytes() * glyph.fHeight);
+ return;
+ }
+
+ prepareGlyph(face->glyph);
+
+ bool useLcdFilter =
+ face->glyph->format == FT_GLYPH_FORMAT_OUTLINE &&
+ isLCD(glyph) &&
+ gSetLcdFilter;
+ if (useLcdFilter) {
+ gSetLcdFilter(face->glyph->library, fLcdFilter);
+ }
+
+ SkMatrix matrix;
+ if (face->glyph->format == FT_GLYPH_FORMAT_BITMAP &&
+ fHaveShape) {
+ matrix = fShapeMatrix;
+ } else {
+ matrix.setIdentity();
+ }
+ generateGlyphImage(face, glyph, matrix);
+
+ if (useLcdFilter) {
+ gSetLcdFilter(face->glyph->library, FT_LCD_FILTER_NONE);
+ }
+}
+
+void SkScalerContext_CairoFT::generatePath(const SkGlyph& glyph, SkPath* path)
+{
+ SkASSERT(fScaledFont != nullptr);
+ CairoLockedFTFace faceLock(fScaledFont);
+ FT_Face face = faceLock.getFace();
+
+ SkASSERT(path);
+
+ uint32_t flags = fLoadGlyphFlags;
+ flags |= FT_LOAD_NO_BITMAP; // ignore embedded bitmaps so we're sure to get the outline
+ flags &= ~FT_LOAD_RENDER; // don't scan convert (we just want the outline)
+
+ FT_Error err = FT_Load_Glyph(face, glyph.getGlyphID(), flags);
+
+ if (err != 0) {
+ path->reset();
+ return;
+ }
+
+ prepareGlyph(face->glyph);
+
+ generateGlyphPath(face, path);
+}
+
+void SkScalerContext_CairoFT::generateFontMetrics(SkPaint::FontMetrics* metrics)
+{
+ if (metrics) {
+ memset(metrics, 0, sizeof(SkPaint::FontMetrics));
+ }
+}
+
+SkUnichar SkScalerContext_CairoFT::generateGlyphToChar(uint16_t glyph)
+{
+ SkASSERT(fScaledFont != nullptr);
+ CairoLockedFTFace faceLock(fScaledFont);
+ FT_Face face = faceLock.getFace();
+
+ FT_UInt glyphIndex;
+ SkUnichar charCode = FT_Get_First_Char(face, &glyphIndex);
+ while (glyphIndex != 0) {
+ if (glyphIndex == glyph) {
+ return charCode;
+ }
+ charCode = FT_Get_Next_Char(face, charCode, &glyphIndex);
+ }
+
+ return 0;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkFontMgr.h"
+
+SkFontMgr* SkFontMgr::Factory() {
+ // todo
+ return nullptr;
+}
+
diff --git a/gfx/skia/skia/src/ports/SkFontHost_mac.cpp b/gfx/skia/skia/src/ports/SkFontHost_mac.cpp
new file mode 100644
index 000000000..da1399504
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontHost_mac.cpp
@@ -0,0 +1,2621 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h" // Keep this before any #ifdef ...
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#ifdef SK_BUILD_FOR_MAC
+#import <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreText/CoreText.h>
+#include <CoreText/CTFontManager.h>
+#include <CoreGraphics/CoreGraphics.h>
+#include <CoreFoundation/CoreFoundation.h>
+#endif
+
+#include "SkAdvancedTypefaceMetrics.h"
+#include "SkCGUtils.h"
+#include "SkColorPriv.h"
+#include "SkDescriptor.h"
+#include "SkEndian.h"
+#include "SkFloatingPoint.h"
+#include "SkFontDescriptor.h"
+#include "SkFontMgr.h"
+#include "SkGlyph.h"
+#include "SkMakeUnique.h"
+#include "SkMaskGamma.h"
+#include "SkMathPriv.h"
+#include "SkMutex.h"
+#include "SkOTTable_glyf.h"
+#include "SkOTTable_head.h"
+#include "SkOTTable_hhea.h"
+#include "SkOTTable_loca.h"
+#include "SkOTUtils.h"
+#include "SkOnce.h"
+#include "SkPaint.h"
+#include "SkPath.h"
+#include "SkSFNTHeader.h"
+#include "SkStream.h"
+#include "SkString.h"
+#include "SkTemplates.h"
+#include "SkTypefaceCache.h"
+#include "SkTypeface_mac.h"
+#include "SkUtils.h"
+#include "SkUtils.h"
+
+#include <dlfcn.h>
+
+// Experimental code to use a global lock whenever we access CG, to see if this reduces
+// crashes in Chrome
+#define USE_GLOBAL_MUTEX_FOR_CG_ACCESS
+
+#ifdef USE_GLOBAL_MUTEX_FOR_CG_ACCESS
+ SK_DECLARE_STATIC_MUTEX(gCGMutex);
+ #define AUTO_CG_LOCK() SkAutoMutexAcquire amc(gCGMutex)
+#else
+ #define AUTO_CG_LOCK()
+#endif
+
+// Set to make glyph bounding boxes visible.
+#define SK_SHOW_TEXT_BLIT_COVERAGE 0
+
+class SkScalerContext_Mac;
+
+// CTFontManagerCopyAvailableFontFamilyNames() is not always available, so we
+// provide a wrapper here that will return an empty array if need be.
+static CFArrayRef SkCTFontManagerCopyAvailableFontFamilyNames() {
+#ifdef SK_BUILD_FOR_IOS
+ return CFArrayCreate(nullptr, nullptr, 0, nullptr);
+#else
+ return CTFontManagerCopyAvailableFontFamilyNames();
+#endif
+}
+
+
+// Being templated and taking const T* prevents calling
+// CFSafeRelease(autoCFRelease) through implicit conversion.
+template <typename T> static void CFSafeRelease(/*CFTypeRef*/const T* cfTypeRef) {
+ if (cfTypeRef) {
+ CFRelease(cfTypeRef);
+ }
+}
+
+// Being templated and taking const T* prevents calling
+// CFSafeRetain(autoCFRelease) through implicit conversion.
+template <typename T> static void CFSafeRetain(/*CFTypeRef*/const T* cfTypeRef) {
+ if (cfTypeRef) {
+ CFRetain(cfTypeRef);
+ }
+}
+
+/** Acts like a CFRef, but calls CFSafeRelease when it goes out of scope. */
+template<typename CFRef> class AutoCFRelease : private SkNoncopyable {
+public:
+ explicit AutoCFRelease(CFRef cfRef = nullptr) : fCFRef(cfRef) { }
+ ~AutoCFRelease() { CFSafeRelease(fCFRef); }
+
+ void reset(CFRef that = nullptr) {
+ if (that != fCFRef) {
+ CFSafeRelease(fCFRef);
+ fCFRef = that;
+ }
+ }
+
+ CFRef release() {
+ CFRef self = fCFRef;
+ fCFRef = nullptr;
+ return self;
+ }
+
+ operator CFRef() const { return fCFRef; }
+ CFRef get() const { return fCFRef; }
+
+ CFRef* operator&() { SkASSERT(fCFRef == nullptr); return &fCFRef; }
+private:
+ CFRef fCFRef;
+};
+
+static CFStringRef make_CFString(const char str[]) {
+ return CFStringCreateWithCString(nullptr, str, kCFStringEncodingUTF8);
+}
+
+template<typename T> class AutoCGTable : SkNoncopyable {
+public:
+ AutoCGTable(CGFontRef font)
+ //Undocumented: the tag parameter in this call is expected in machine order and not BE order.
+ : fCFData(CGFontCopyTableForTag(font, SkSetFourByteTag(T::TAG0, T::TAG1, T::TAG2, T::TAG3)))
+ , fData(fCFData ? reinterpret_cast<const T*>(CFDataGetBytePtr(fCFData)) : nullptr)
+ { }
+
+ const T* operator->() const { return fData; }
+
+private:
+ AutoCFRelease<CFDataRef> fCFData;
+public:
+ const T* fData;
+};
+
+// inline versions of these rect helpers
+
+static bool CGRectIsEmpty_inline(const CGRect& rect) {
+ return rect.size.width <= 0 || rect.size.height <= 0;
+}
+
+static CGFloat CGRectGetMinX_inline(const CGRect& rect) {
+ return rect.origin.x;
+}
+
+static CGFloat CGRectGetMaxX_inline(const CGRect& rect) {
+ return rect.origin.x + rect.size.width;
+}
+
+static CGFloat CGRectGetMinY_inline(const CGRect& rect) {
+ return rect.origin.y;
+}
+
+static CGFloat CGRectGetMaxY_inline(const CGRect& rect) {
+ return rect.origin.y + rect.size.height;
+}
+
+static CGFloat CGRectGetWidth_inline(const CGRect& rect) {
+ return rect.size.width;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void sk_memset_rect32(uint32_t* ptr, uint32_t value,
+ int width, int height, size_t rowBytes) {
+ SkASSERT(width);
+ SkASSERT(width * sizeof(uint32_t) <= rowBytes);
+
+ if (width >= 32) {
+ while (height) {
+ sk_memset32(ptr, value, width);
+ ptr = (uint32_t*)((char*)ptr + rowBytes);
+ height -= 1;
+ }
+ return;
+ }
+
+ rowBytes -= width * sizeof(uint32_t);
+
+ if (width >= 8) {
+ while (height) {
+ int w = width;
+ do {
+ *ptr++ = value; *ptr++ = value;
+ *ptr++ = value; *ptr++ = value;
+ *ptr++ = value; *ptr++ = value;
+ *ptr++ = value; *ptr++ = value;
+ w -= 8;
+ } while (w >= 8);
+ while (--w >= 0) {
+ *ptr++ = value;
+ }
+ ptr = (uint32_t*)((char*)ptr + rowBytes);
+ height -= 1;
+ }
+ } else {
+ while (height) {
+ int w = width;
+ do {
+ *ptr++ = value;
+ } while (--w > 0);
+ ptr = (uint32_t*)((char*)ptr + rowBytes);
+ height -= 1;
+ }
+ }
+}
+
+#include <sys/utsname.h>
+
+typedef uint32_t CGRGBPixel;
+
+static unsigned CGRGBPixel_getAlpha(CGRGBPixel pixel) {
+ return pixel & 0xFF;
+}
+
+static const char FONT_DEFAULT_NAME[] = "Lucida Sans";
+
+// See Source/WebKit/chromium/base/mac/mac_util.mm DarwinMajorVersionInternal for original source.
+static int readVersion() {
+ struct utsname info;
+ if (uname(&info) != 0) {
+ SkDebugf("uname failed\n");
+ return 0;
+ }
+ if (strcmp(info.sysname, "Darwin") != 0) {
+ SkDebugf("unexpected uname sysname %s\n", info.sysname);
+ return 0;
+ }
+ char* dot = strchr(info.release, '.');
+ if (!dot) {
+ SkDebugf("expected dot in uname release %s\n", info.release);
+ return 0;
+ }
+ int version = atoi(info.release);
+ if (version == 0) {
+ SkDebugf("could not parse uname release %s\n", info.release);
+ }
+ return version;
+}
+
+static int darwinVersion() {
+ static int darwin_version = readVersion();
+ return darwin_version;
+}
+
+static bool isSnowLeopard() {
+ return darwinVersion() == 10;
+}
+
+static bool isLion() {
+ return darwinVersion() == 11;
+}
+
+static bool isMountainLion() {
+ return darwinVersion() == 12;
+}
+
+static bool isLCDFormat(unsigned format) {
+ return SkMask::kLCD16_Format == format;
+}
+
+static CGFloat ScalarToCG(SkScalar scalar) {
+ if (sizeof(CGFloat) == sizeof(float)) {
+ return SkScalarToFloat(scalar);
+ } else {
+ SkASSERT(sizeof(CGFloat) == sizeof(double));
+ return (CGFloat) SkScalarToDouble(scalar);
+ }
+}
+
+static SkScalar CGToScalar(CGFloat cgFloat) {
+ if (sizeof(CGFloat) == sizeof(float)) {
+ return SkFloatToScalar(cgFloat);
+ } else {
+ SkASSERT(sizeof(CGFloat) == sizeof(double));
+ return SkDoubleToScalar(cgFloat);
+ }
+}
+
+static float CGToFloat(CGFloat cgFloat) {
+ if (sizeof(CGFloat) == sizeof(float)) {
+ return cgFloat;
+ } else {
+ SkASSERT(sizeof(CGFloat) == sizeof(double));
+ return static_cast<float>(cgFloat);
+ }
+}
+
+static CGAffineTransform MatrixToCGAffineTransform(const SkMatrix& matrix,
+ SkScalar sx = SK_Scalar1,
+ SkScalar sy = SK_Scalar1) {
+ return CGAffineTransformMake( ScalarToCG(matrix[SkMatrix::kMScaleX] * sx),
+ -ScalarToCG(matrix[SkMatrix::kMSkewY] * sy),
+ -ScalarToCG(matrix[SkMatrix::kMSkewX] * sx),
+ ScalarToCG(matrix[SkMatrix::kMScaleY] * sy),
+ ScalarToCG(matrix[SkMatrix::kMTransX] * sx),
+ ScalarToCG(matrix[SkMatrix::kMTransY] * sy));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define BITMAP_INFO_RGB (kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Host)
+
+/**
+ * There does not appear to be a publicly accessable API for determining if lcd
+ * font smoothing will be applied if we request it. The main issue is that if
+ * smoothing is applied a gamma of 2.0 will be used, if not a gamma of 1.0.
+ */
+static bool supports_LCD() {
+ static int gSupportsLCD = -1;
+ if (gSupportsLCD >= 0) {
+ return (bool) gSupportsLCD;
+ }
+ uint32_t rgb = 0;
+ AutoCFRelease<CGColorSpaceRef> colorspace(CGColorSpaceCreateDeviceRGB());
+ AutoCFRelease<CGContextRef> cgContext(CGBitmapContextCreate(&rgb, 1, 1, 8, 4,
+ colorspace, BITMAP_INFO_RGB));
+ AutoCFRelease<CTFontRef> ctFont(CTFontCreateWithName(CFSTR("Helvetica"), 16, nullptr));
+ CGContextSetShouldSmoothFonts(cgContext, true);
+ CGContextSetShouldAntialias(cgContext, true);
+ CGContextSetTextDrawingMode(cgContext, kCGTextFill);
+ CGContextSetGrayFillColor(cgContext, 1, 1);
+ CGPoint point = CGPointMake(-1, 0);
+ static const UniChar pipeChar = '|';
+ CGGlyph pipeGlyph;
+ CTFontGetGlyphsForCharacters(ctFont, &pipeChar, &pipeGlyph, 1);
+ CTFontDrawGlyphs(ctFont, &pipeGlyph, &point, 1, cgContext);
+
+ uint32_t r = (rgb >> 16) & 0xFF;
+ uint32_t g = (rgb >> 8) & 0xFF;
+ uint32_t b = (rgb >> 0) & 0xFF;
+ gSupportsLCD = (r != g || r != b);
+ return (bool) gSupportsLCD;
+}
+
+class Offscreen {
+public:
+ Offscreen()
+ : fRGBSpace(nullptr)
+ , fCG(nullptr)
+ , fDoAA(false)
+ , fDoLCD(false)
+ {
+ fSize.set(0, 0);
+ }
+
+ CGRGBPixel* getCG(const SkScalerContext_Mac& context, const SkGlyph& glyph,
+ CGGlyph glyphID, size_t* rowBytesPtr,
+ bool generateA8FromLCD);
+
+private:
+ enum {
+ kSize = 32 * 32 * sizeof(CGRGBPixel)
+ };
+ SkAutoSMalloc<kSize> fImageStorage;
+ AutoCFRelease<CGColorSpaceRef> fRGBSpace;
+
+ // cached state
+ AutoCFRelease<CGContextRef> fCG;
+ SkISize fSize;
+ bool fDoAA;
+ bool fDoLCD;
+
+ static int RoundSize(int dimension) {
+ return SkNextPow2(dimension);
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool find_dict_CGFloat(CFDictionaryRef dict, CFStringRef name, CGFloat* value) {
+ CFNumberRef num;
+ return CFDictionaryGetValueIfPresent(dict, name, (const void**)&num)
+ && CFNumberIsFloatType(num)
+ && CFNumberGetValue(num, kCFNumberCGFloatType, value);
+}
+
+template <typename S, typename D, typename C> struct LinearInterpolater {
+ struct Mapping {
+ S src_val;
+ D dst_val;
+ };
+ constexpr LinearInterpolater(Mapping const mapping[], int mappingCount)
+ : fMapping(mapping), fMappingCount(mappingCount) {}
+
+ static D map(S value, S src_min, S src_max, D dst_min, D dst_max) {
+ SkASSERT(src_min < src_max);
+ SkASSERT(dst_min <= dst_max);
+ return C()(dst_min + (((value - src_min) * (dst_max - dst_min)) / (src_max - src_min)));
+ }
+
+ int map(S val) const {
+ // -Inf to [0]
+ if (val < fMapping[0].src_val) {
+ return fMapping[0].dst_val;
+ }
+
+ // Linear from [i] to [i+1]
+ for (int i = 0; i < fMappingCount - 1; ++i) {
+ if (val < fMapping[i+1].src_val) {
+ return map(val, fMapping[i].src_val, fMapping[i+1].src_val,
+ fMapping[i].dst_val, fMapping[i+1].dst_val);
+ }
+ }
+
+ // From [n] to +Inf
+ // if (fcweight < Inf)
+ return fMapping[fMappingCount - 1].dst_val;
+ }
+
+ Mapping const * fMapping;
+ int fMappingCount;
+};
+
+struct RoundCGFloatToInt {
+ int operator()(CGFloat s) { return s + 0.5; }
+};
+
+static int ct_weight_to_fontstyle(CGFloat cgWeight) {
+ using Interpolator = LinearInterpolater<CGFloat, int, RoundCGFloatToInt>;
+
+ // Values determined by creating font data with every weight, creating a CTFont,
+ // and asking the CTFont for its weight. See TypefaceStyle test for basics.
+
+ // Note that Mac supports the old OS2 version A so 0 through 10 are as if multiplied by 100.
+ // However, on this end we can't tell.
+ static constexpr Interpolator::Mapping weightMappings[] = {
+ { -1.00, 0 },
+ { -0.70, 100 },
+ { -0.50, 200 },
+ { -0.23, 300 },
+ { 0.00, 400 },
+ { 0.20, 500 },
+ { 0.30, 600 },
+ { 0.40, 700 },
+ { 0.60, 800 },
+ { 0.80, 900 },
+ { 1.00, 1000 },
+ };
+ static constexpr Interpolator interpolater(weightMappings, SK_ARRAY_COUNT(weightMappings));
+ return interpolater.map(cgWeight);
+}
+
+static int ct_width_to_fontstyle(CGFloat cgWidth) {
+ using Interpolator = LinearInterpolater<CGFloat, int, RoundCGFloatToInt>;
+
+ // Values determined by creating font data with every width, creating a CTFont,
+ // and asking the CTFont for its width. See TypefaceStyle test for basics.
+ static constexpr Interpolator::Mapping widthMappings[] = {
+ { -0.5, 0 },
+ { 0.5, 10 },
+ };
+ static constexpr Interpolator interpolater(widthMappings, SK_ARRAY_COUNT(widthMappings));
+ return interpolater.map(cgWidth);
+}
+
+static SkFontStyle fontstyle_from_descriptor(CTFontDescriptorRef desc) {
+ AutoCFRelease<CFDictionaryRef> dict(
+ (CFDictionaryRef)CTFontDescriptorCopyAttribute(desc, kCTFontTraitsAttribute));
+ if (nullptr == dict.get()) {
+ return SkFontStyle();
+ }
+
+ CGFloat weight, width, slant;
+ if (!find_dict_CGFloat(dict, kCTFontWeightTrait, &weight)) {
+ weight = 0;
+ }
+ if (!find_dict_CGFloat(dict, kCTFontWidthTrait, &width)) {
+ width = 0;
+ }
+ if (!find_dict_CGFloat(dict, kCTFontSlantTrait, &slant)) {
+ slant = 0;
+ }
+
+ return SkFontStyle(ct_weight_to_fontstyle(weight),
+ ct_width_to_fontstyle(width),
+ slant ? SkFontStyle::kItalic_Slant
+ : SkFontStyle::kUpright_Slant);
+}
+
+#define WEIGHT_THRESHOLD ((SkFontStyle::kNormal_Weight + SkFontStyle::kBold_Weight)/2)
+
+// kCTFontColorGlyphsTrait was added in the Mac 10.7 and iPhone 4.3 SDKs.
+// Being an enum value it is not guarded by version macros, but old SDKs must still be supported.
+#if defined(__MAC_10_7) || defined(__IPHONE_4_3)
+static const uint32_t SkCTFontColorGlyphsTrait = kCTFontColorGlyphsTrait;
+#else
+static const uint32_t SkCTFontColorGlyphsTrait = (1 << 13);
+#endif
+
+class SkTypeface_Mac : public SkTypeface {
+public:
+ SkTypeface_Mac(CTFontRef fontRef, CFTypeRef resourceRef,
+ const SkFontStyle& fs, bool isFixedPitch,
+ bool isLocalStream)
+ : SkTypeface(fs, isFixedPitch)
+ , fFontRef(fontRef) // caller has already called CFRetain for us
+ , fOriginatingCFTypeRef(resourceRef) // caller has already called CFRetain for us
+ , fHasColorGlyphs(SkToBool(CTFontGetSymbolicTraits(fFontRef) & SkCTFontColorGlyphsTrait))
+ , fIsLocalStream(isLocalStream)
+ {
+ SkASSERT(fontRef);
+ }
+
+ AutoCFRelease<CTFontRef> fFontRef;
+ AutoCFRelease<CFTypeRef> fOriginatingCFTypeRef;
+ const bool fHasColorGlyphs;
+ bool hasColorGlyphs() const override { return fHasColorGlyphs; }
+
+protected:
+ int onGetUPEM() const override;
+ SkStreamAsset* onOpenStream(int* ttcIndex) const override;
+ std::unique_ptr<SkFontData> onMakeFontData() const override;
+ void onGetFamilyName(SkString* familyName) const override;
+ SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override;
+ int onGetTableTags(SkFontTableTag tags[]) const override;
+ virtual size_t onGetTableData(SkFontTableTag, size_t offset,
+ size_t length, void* data) const override;
+ SkScalerContext* onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*) const override;
+ void onFilterRec(SkScalerContextRec*) const override;
+ void onGetFontDescriptor(SkFontDescriptor*, bool*) const override;
+ virtual SkAdvancedTypefaceMetrics* onGetAdvancedTypefaceMetrics(
+ PerGlyphInfo,
+ const uint32_t*, uint32_t) const override;
+ virtual int onCharsToGlyphs(const void* chars, Encoding, uint16_t glyphs[],
+ int glyphCount) const override;
+ int onCountGlyphs() const override;
+
+private:
+ bool fIsLocalStream;
+
+ typedef SkTypeface INHERITED;
+};
+
+static bool find_by_CTFontRef(SkTypeface* cached, void* context) {
+ CTFontRef self = (CTFontRef)context;
+ CTFontRef other = ((SkTypeface_Mac*)cached)->fFontRef;
+
+ return CFEqual(self, other);
+}
+
+/** Creates a typeface, searching the cache if isLocalStream is false.
+ * Takes ownership of the CTFontRef and CFTypeRef.
+ */
+static SkTypeface* create_from_CTFontRef(CTFontRef f, CFTypeRef r, bool isLocalStream) {
+ SkASSERT(f);
+ AutoCFRelease<CTFontRef> font(f);
+ AutoCFRelease<CFTypeRef> resource(r);
+
+ if (!isLocalStream) {
+ SkTypeface* face = SkTypefaceCache::FindByProcAndRef(find_by_CTFontRef, (void*)font.get());
+ if (face) {
+ return face;
+ }
+ }
+
+ AutoCFRelease<CTFontDescriptorRef> desc(CTFontCopyFontDescriptor(font));
+ SkFontStyle style = fontstyle_from_descriptor(desc);
+ CTFontSymbolicTraits traits = CTFontGetSymbolicTraits(font);
+ bool isFixedPitch = SkToBool(traits & kCTFontMonoSpaceTrait);
+
+ SkTypeface* face = new SkTypeface_Mac(font.release(), resource.release(),
+ style, isFixedPitch, isLocalStream);
+ if (!isLocalStream) {
+ SkTypefaceCache::Add(face);
+ }
+ return face;
+}
+
+/** Creates a typeface from a descriptor, searching the cache. */
+static SkTypeface* create_from_desc(CTFontDescriptorRef desc) {
+ AutoCFRelease<CTFontRef> ctFont(CTFontCreateWithFontDescriptor(desc, 0, nullptr));
+ if (!ctFont) {
+ return nullptr;
+ }
+
+ return create_from_CTFontRef(ctFont.release(), nullptr, false);
+}
+
+static CTFontDescriptorRef create_descriptor(const char familyName[], const SkFontStyle& style) {
+ CTFontSymbolicTraits ctFontTraits = 0;
+ if (style.weight() >= SkFontStyle::kBold_Weight) {
+ ctFontTraits |= kCTFontBoldTrait;
+ }
+ if (style.slant() != SkFontStyle::kUpright_Slant) {
+ ctFontTraits |= kCTFontItalicTrait;
+ }
+
+ //TODO: add weight width slant
+
+ // Create the font info
+ AutoCFRelease<CFStringRef> cfFontName(make_CFString(familyName));
+
+ AutoCFRelease<CFNumberRef> cfFontTraits(
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &ctFontTraits));
+
+ AutoCFRelease<CFMutableDictionaryRef> cfAttributes(
+ CFDictionaryCreateMutable(kCFAllocatorDefault, 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+
+ AutoCFRelease<CFMutableDictionaryRef> cfTraits(
+ CFDictionaryCreateMutable(kCFAllocatorDefault, 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+
+ if (!cfFontName || !cfFontTraits || !cfAttributes || !cfTraits) {
+ return nullptr;
+ }
+
+ CFDictionaryAddValue(cfTraits, kCTFontSymbolicTrait, cfFontTraits);
+
+ CFDictionaryAddValue(cfAttributes, kCTFontFamilyNameAttribute, cfFontName);
+ CFDictionaryAddValue(cfAttributes, kCTFontTraitsAttribute, cfTraits);
+
+ return CTFontDescriptorCreateWithAttributes(cfAttributes);
+}
+
+/** Creates a typeface from a name, searching the cache. */
+static SkTypeface* create_from_name(const char familyName[], const SkFontStyle& style) {
+ AutoCFRelease<CTFontDescriptorRef> desc(create_descriptor(familyName, style));
+ if (!desc) {
+ return nullptr;
+ }
+ return create_from_desc(desc);
+}
+
+SK_DECLARE_STATIC_MUTEX(gGetDefaultFaceMutex);
+static SkTypeface* GetDefaultFace() {
+ SkAutoMutexAcquire ma(gGetDefaultFaceMutex);
+
+ static SkTypeface* gDefaultFace;
+
+ if (nullptr == gDefaultFace) {
+ gDefaultFace = create_from_name(FONT_DEFAULT_NAME, SkFontStyle());
+ }
+ return gDefaultFace;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+extern CTFontRef SkTypeface_GetCTFontRef(const SkTypeface* face);
+CTFontRef SkTypeface_GetCTFontRef(const SkTypeface* face) {
+ const SkTypeface_Mac* macface = (const SkTypeface_Mac*)face;
+ return macface ? macface->fFontRef.get() : nullptr;
+}
+
+/* This function is visible on the outside. It first searches the cache, and if
+ * not found, returns a new entry (after adding it to the cache).
+ */
+SkTypeface* SkCreateTypefaceFromCTFont(CTFontRef fontRef, CFTypeRef resourceRef) {
+ CFRetain(fontRef);
+ if (resourceRef) {
+ CFRetain(resourceRef);
+ }
+ return create_from_CTFontRef(fontRef, resourceRef, false);
+}
+
+static const char* map_css_names(const char* name) {
+ static const struct {
+ const char* fFrom; // name the caller specified
+ const char* fTo; // "canonical" name we map to
+ } gPairs[] = {
+ { "sans-serif", "Helvetica" },
+ { "serif", "Times" },
+ { "monospace", "Courier" }
+ };
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gPairs); i++) {
+ if (strcmp(name, gPairs[i].fFrom) == 0) {
+ return gPairs[i].fTo;
+ }
+ }
+ return name; // no change
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** GlyphRect is in FUnits (em space, y up). */
+struct GlyphRect {
+ int16_t fMinX;
+ int16_t fMinY;
+ int16_t fMaxX;
+ int16_t fMaxY;
+};
+
+class SkScalerContext_Mac : public SkScalerContext {
+public:
+ SkScalerContext_Mac(SkTypeface_Mac*, const SkScalerContextEffects&, const SkDescriptor*);
+
+protected:
+ unsigned generateGlyphCount(void) override;
+ uint16_t generateCharToGlyph(SkUnichar uni) override;
+ void generateAdvance(SkGlyph* glyph) override;
+ void generateMetrics(SkGlyph* glyph) override;
+ void generateImage(const SkGlyph& glyph) override;
+ void generatePath(const SkGlyph& glyph, SkPath* path) override;
+ void generateFontMetrics(SkPaint::FontMetrics*) override;
+
+private:
+ static void CTPathElement(void *info, const CGPathElement *element);
+
+ /** Returns the offset from the horizontal origin to the vertical origin in SkGlyph units. */
+ void getVerticalOffset(CGGlyph glyphID, SkPoint* offset) const;
+
+ /** Initializes and returns the value of fFBoundingBoxesGlyphOffset.
+ *
+ * For use with (and must be called before) generateBBoxes.
+ */
+ uint16_t getFBoundingBoxesGlyphOffset();
+
+ /** Initializes fFBoundingBoxes and returns true on success.
+ *
+ * On Lion and Mountain Lion, CTFontGetBoundingRectsForGlyphs has a bug which causes it to
+ * return a bad value in bounds.origin.x for SFNT fonts whose hhea::numberOfHMetrics is
+ * less than its maxp::numGlyphs. When this is the case we try to read the bounds from the
+ * font directly.
+ *
+ * This routine initializes fFBoundingBoxes to an array of
+ * fGlyphCount - fFBoundingBoxesGlyphOffset GlyphRects which contain the bounds in FUnits
+ * (em space, y up) of glyphs with ids in the range [fFBoundingBoxesGlyphOffset, fGlyphCount).
+ *
+ * Returns true if fFBoundingBoxes is properly initialized. The table can only be properly
+ * initialized for a TrueType font with 'head', 'loca', and 'glyf' tables.
+ *
+ * TODO: A future optimization will compute fFBoundingBoxes once per fCTFont.
+ */
+ bool generateBBoxes();
+
+ /** Converts from FUnits (em space, y up) to SkGlyph units (pixels, y down).
+ *
+ * Used on Snow Leopard to correct CTFontGetVerticalTranslationsForGlyphs.
+ * Used on Lion to correct CTFontGetBoundingRectsForGlyphs.
+ */
+ SkMatrix fFUnitMatrix;
+
+ Offscreen fOffscreen;
+
+ /** Unrotated variant of fCTFont.
+ *
+ * In 10.10.1 CTFontGetAdvancesForGlyphs applies the font transform to the width of the
+ * advances, but always sets the height to 0. This font is used to get the advances of the
+ * unrotated glyph, and then the rotation is applied separately.
+ *
+ * CT vertical metrics are pre-rotated (in em space, before transform) 90deg clock-wise.
+ * This makes kCTFontDefaultOrientation dangerous, because the metrics from
+ * kCTFontHorizontalOrientation are in a different space from kCTFontVerticalOrientation.
+ * With kCTFontVerticalOrientation the advances must be unrotated.
+ *
+ * Sometimes, creating a copy of a CTFont with the same size but different trasform will select
+ * different underlying font data. As a result, avoid ever creating more than one CTFont per
+ * SkScalerContext to ensure that only one CTFont is used.
+ *
+ * As a result of the above (and other constraints) this font contains the size, but not the
+ * transform. The transform must always be applied separately.
+ */
+ AutoCFRelease<CTFontRef> fCTFont;
+
+ /** The transform without the font size. */
+ CGAffineTransform fTransform;
+ CGAffineTransform fInvTransform;
+
+ AutoCFRelease<CGFontRef> fCGFont;
+ SkAutoTMalloc<GlyphRect> fFBoundingBoxes;
+ uint16_t fFBoundingBoxesGlyphOffset;
+ uint16_t fGlyphCount;
+ bool fGeneratedFBoundingBoxes;
+ const bool fDoSubPosition;
+ const bool fVertical;
+
+ friend class Offscreen;
+
+ typedef SkScalerContext INHERITED;
+};
+
+// CTFontCreateCopyWithAttributes or CTFontCreateCopyWithSymbolicTraits cannot be used on 10.10
+// and later, as they will return different underlying fonts depending on the size requested.
+// It is not possible to use descriptors with CTFontCreateWithFontDescriptor, since that does not
+// work with non-system fonts. As a result, create the strike specific CTFonts from the underlying
+// CGFont.
+static CTFontRef ctfont_create_exact_copy(CTFontRef baseFont, CGFloat textSize,
+ const CGAffineTransform* transform)
+{
+ AutoCFRelease<CGFontRef> baseCGFont(CTFontCopyGraphicsFont(baseFont, nullptr));
+
+ // The last parameter (CTFontDescriptorRef attributes) *must* be nullptr.
+ // If non-nullptr then with fonts with variation axes, the copy will fail in
+ // CGFontVariationFromDictCallback when it assumes kCGFontVariationAxisName is CFNumberRef
+ // which it quite obviously is not.
+
+ // Because we cannot setup the CTFont descriptor to match, the same restriction applies here
+ // as other uses of CTFontCreateWithGraphicsFont which is that such CTFonts should not escape
+ // the scaler context, since they aren't 'normal'.
+ return CTFontCreateWithGraphicsFont(baseCGFont, textSize, transform, nullptr);
+}
+
+SkScalerContext_Mac::SkScalerContext_Mac(SkTypeface_Mac* typeface,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : INHERITED(typeface, effects, desc)
+ , fFBoundingBoxes()
+ , fFBoundingBoxesGlyphOffset(0)
+ , fGeneratedFBoundingBoxes(false)
+ , fDoSubPosition(SkToBool(fRec.fFlags & kSubpixelPositioning_Flag))
+ , fVertical(SkToBool(fRec.fFlags & kVertical_Flag))
+
+{
+ AUTO_CG_LOCK();
+
+ CTFontRef ctFont = typeface->fFontRef.get();
+ CFIndex numGlyphs = CTFontGetGlyphCount(ctFont);
+ SkASSERT(numGlyphs >= 1 && numGlyphs <= 0xFFFF);
+ fGlyphCount = SkToU16(numGlyphs);
+
+ // CT on (at least) 10.9 will size color glyphs down from the requested size, but not up.
+ // As a result, it is necessary to know the actual device size and request that.
+ SkVector scale;
+ SkMatrix skTransform;
+ bool invertible = fRec.computeMatrices(SkScalerContextRec::kVertical_PreMatrixScale,
+ &scale, &skTransform, nullptr, nullptr, &fFUnitMatrix);
+ fTransform = MatrixToCGAffineTransform(skTransform);
+ // CGAffineTransformInvert documents that if the transform is non-invertible it will return the
+ // passed transform unchanged. It does so, but then also prints a message to stdout. Avoid this.
+ if (invertible) {
+ fInvTransform = CGAffineTransformInvert(fTransform);
+ } else {
+ fInvTransform = fTransform;
+ }
+
+ // The transform contains everything except the requested text size.
+ // Some properties, like 'trak', are based on the text size (before applying the matrix).
+ CGFloat textSize = ScalarToCG(scale.y());
+ fCTFont.reset(ctfont_create_exact_copy(ctFont, textSize, nullptr));
+ fCGFont.reset(CTFontCopyGraphicsFont(fCTFont, nullptr));
+
+ // The fUnitMatrix includes the text size (and em) as it is used to scale the raw font data.
+ SkScalar emPerFUnit = SkScalarInvert(SkIntToScalar(CGFontGetUnitsPerEm(fCGFont)));
+ fFUnitMatrix.preScale(emPerFUnit, -emPerFUnit);
+}
+
+CGRGBPixel* Offscreen::getCG(const SkScalerContext_Mac& context, const SkGlyph& glyph,
+ CGGlyph glyphID, size_t* rowBytesPtr,
+ bool generateA8FromLCD) {
+ if (!fRGBSpace) {
+ //It doesn't appear to matter what color space is specified.
+ //Regular blends and antialiased text are always (s*a + d*(1-a))
+ //and smoothed text is always g=2.0.
+ fRGBSpace.reset(CGColorSpaceCreateDeviceRGB());
+ }
+
+ // default to kBW_Format
+ bool doAA = false;
+ bool doLCD = false;
+
+ if (SkMask::kBW_Format != glyph.fMaskFormat) {
+ doLCD = true;
+ doAA = true;
+ }
+
+ // FIXME: lcd smoothed un-hinted rasterization unsupported.
+ if (!generateA8FromLCD && SkMask::kA8_Format == glyph.fMaskFormat) {
+ doLCD = false;
+ doAA = true;
+ }
+
+ // If this font might have color glyphs, disable LCD as there's no way to support it.
+ // CoreText doesn't tell us which format it ended up using, so we can't detect it.
+ // A8 will end up black on transparent, but TODO: we can detect gray and set to A8.
+ if (SkMask::kARGB32_Format == glyph.fMaskFormat) {
+ doLCD = false;
+ }
+
+ size_t rowBytes = fSize.fWidth * sizeof(CGRGBPixel);
+ if (!fCG || fSize.fWidth < glyph.fWidth || fSize.fHeight < glyph.fHeight) {
+ if (fSize.fWidth < glyph.fWidth) {
+ fSize.fWidth = RoundSize(glyph.fWidth);
+ }
+ if (fSize.fHeight < glyph.fHeight) {
+ fSize.fHeight = RoundSize(glyph.fHeight);
+ }
+
+ rowBytes = fSize.fWidth * sizeof(CGRGBPixel);
+ void* image = fImageStorage.reset(rowBytes * fSize.fHeight);
+ const CGImageAlphaInfo alpha = (SkMask::kARGB32_Format == glyph.fMaskFormat)
+ ? kCGImageAlphaPremultipliedFirst
+ : kCGImageAlphaNoneSkipFirst;
+ const CGBitmapInfo bitmapInfo = kCGBitmapByteOrder32Host | alpha;
+ fCG.reset(CGBitmapContextCreate(image, fSize.fWidth, fSize.fHeight, 8,
+ rowBytes, fRGBSpace, bitmapInfo));
+
+ // Skia handles quantization and subpixel positioning,
+ // so disable quantization and enabe subpixel positioning in CG.
+ CGContextSetAllowsFontSubpixelQuantization(fCG, false);
+ CGContextSetShouldSubpixelQuantizeFonts(fCG, false);
+
+ // Because CG always draws from the horizontal baseline,
+ // if there is a non-integral translation from the horizontal origin to the vertical origin,
+ // then CG cannot draw the glyph in the correct location without subpixel positioning.
+ CGContextSetAllowsFontSubpixelPositioning(fCG, true);
+ CGContextSetShouldSubpixelPositionFonts(fCG, true);
+
+ CGContextSetTextDrawingMode(fCG, kCGTextFill);
+
+ // Draw black on white to create mask. (Special path exists to speed this up in CG.)
+ CGContextSetGrayFillColor(fCG, 0.0f, 1.0f);
+
+ // force our checks below to happen
+ fDoAA = !doAA;
+ fDoLCD = !doLCD;
+
+ CGContextSetTextMatrix(fCG, context.fTransform);
+ }
+
+ if (fDoAA != doAA) {
+ CGContextSetShouldAntialias(fCG, doAA);
+ fDoAA = doAA;
+ }
+ if (fDoLCD != doLCD) {
+ CGContextSetShouldSmoothFonts(fCG, doLCD);
+ fDoLCD = doLCD;
+ }
+
+ CGRGBPixel* image = (CGRGBPixel*)fImageStorage.get();
+ // skip rows based on the glyph's height
+ image += (fSize.fHeight - glyph.fHeight) * fSize.fWidth;
+
+ // Erase to white (or transparent black if it's a color glyph, to not composite against white).
+ uint32_t bgColor = (SkMask::kARGB32_Format != glyph.fMaskFormat) ? 0xFFFFFFFF : 0x00000000;
+ sk_memset_rect32(image, bgColor, glyph.fWidth, glyph.fHeight, rowBytes);
+
+ float subX = 0;
+ float subY = 0;
+ if (context.fDoSubPosition) {
+ subX = SkFixedToFloat(glyph.getSubXFixed());
+ subY = SkFixedToFloat(glyph.getSubYFixed());
+ }
+
+ // CoreText and CoreGraphics always draw using the horizontal baseline origin.
+ if (context.fVertical) {
+ SkPoint offset;
+ context.getVerticalOffset(glyphID, &offset);
+ subX += offset.fX;
+ subY += offset.fY;
+ }
+
+ CGPoint point = CGPointMake(-glyph.fLeft + subX, glyph.fTop + glyph.fHeight - subY);
+ // Prior to 10.10, CTFontDrawGlyphs acted like CGContextShowGlyphsAtPositions and took
+ // 'positions' which are in text space. The glyph location (in device space) must be
+ // mapped into text space, so that CG can convert it back into device space.
+ // In 10.10.1, this is handled directly in CTFontDrawGlyphs.
+ //
+ // However, in 10.10.2 color glyphs no longer rotate based on the font transform.
+ // So always make the font transform identity and place the transform on the context.
+ point = CGPointApplyAffineTransform(point, context.fInvTransform);
+
+ CTFontDrawGlyphs(context.fCTFont, &glyphID, &point, 1, fCG);
+
+ SkASSERT(rowBytesPtr);
+ *rowBytesPtr = rowBytes;
+ return image;
+}
+
+void SkScalerContext_Mac::getVerticalOffset(CGGlyph glyphID, SkPoint* offset) const {
+ // Snow Leopard returns cgVertOffset in completely un-transformed FUnits (em space, y up).
+ // Lion and Leopard return cgVertOffset in CG units (pixels, y up).
+ CGSize cgVertOffset;
+ CTFontGetVerticalTranslationsForGlyphs(fCTFont, &glyphID, &cgVertOffset, 1);
+ if (isSnowLeopard()) {
+ SkPoint skVertOffset = { CGToScalar(cgVertOffset.width), CGToScalar(cgVertOffset.height) };
+ // From FUnits (em space, y up) to SkGlyph units (pixels, y down).
+ fFUnitMatrix.mapPoints(&skVertOffset, 1);
+ *offset = skVertOffset;
+ return;
+ }
+ cgVertOffset = CGSizeApplyAffineTransform(cgVertOffset, fTransform);
+ SkPoint skVertOffset = { CGToScalar(cgVertOffset.width), CGToScalar(cgVertOffset.height) };
+ // From CG units (pixels, y up) to SkGlyph units (pixels, y down).
+ skVertOffset.fY = -skVertOffset.fY;
+ *offset = skVertOffset;
+}
+
+uint16_t SkScalerContext_Mac::getFBoundingBoxesGlyphOffset() {
+ if (fFBoundingBoxesGlyphOffset) {
+ return fFBoundingBoxesGlyphOffset;
+ }
+ fFBoundingBoxesGlyphOffset = fGlyphCount; // fallback for all fonts
+ AutoCGTable<SkOTTableHorizontalHeader> hheaTable(fCGFont);
+ if (hheaTable.fData) {
+ fFBoundingBoxesGlyphOffset = SkEndian_SwapBE16(hheaTable->numberOfHMetrics);
+ }
+ return fFBoundingBoxesGlyphOffset;
+}
+
+bool SkScalerContext_Mac::generateBBoxes() {
+ if (fGeneratedFBoundingBoxes) {
+ return SkToBool(fFBoundingBoxes.get());
+ }
+ fGeneratedFBoundingBoxes = true;
+
+ AutoCGTable<SkOTTableHead> headTable(fCGFont);
+ if (!headTable.fData) {
+ return false;
+ }
+
+ AutoCGTable<SkOTTableIndexToLocation> locaTable(fCGFont);
+ if (!locaTable.fData) {
+ return false;
+ }
+
+ AutoCGTable<SkOTTableGlyph> glyfTable(fCGFont);
+ if (!glyfTable.fData) {
+ return false;
+ }
+
+ uint16_t entries = fGlyphCount - fFBoundingBoxesGlyphOffset;
+ fFBoundingBoxes.reset(entries);
+
+ SkOTTableHead::IndexToLocFormat locaFormat = headTable->indexToLocFormat;
+ SkOTTableGlyph::Iterator glyphDataIter(*glyfTable.fData, *locaTable.fData, locaFormat);
+ glyphDataIter.advance(fFBoundingBoxesGlyphOffset);
+ for (uint16_t boundingBoxesIndex = 0; boundingBoxesIndex < entries; ++boundingBoxesIndex) {
+ const SkOTTableGlyphData* glyphData = glyphDataIter.next();
+ GlyphRect& rect = fFBoundingBoxes[boundingBoxesIndex];
+ rect.fMinX = SkEndian_SwapBE16(glyphData->xMin);
+ rect.fMinY = SkEndian_SwapBE16(glyphData->yMin);
+ rect.fMaxX = SkEndian_SwapBE16(glyphData->xMax);
+ rect.fMaxY = SkEndian_SwapBE16(glyphData->yMax);
+ }
+
+ return true;
+}
+
+unsigned SkScalerContext_Mac::generateGlyphCount(void) {
+ return fGlyphCount;
+}
+
+uint16_t SkScalerContext_Mac::generateCharToGlyph(SkUnichar uni) {
+ AUTO_CG_LOCK();
+
+ CGGlyph cgGlyph[2];
+ UniChar theChar[2]; // UniChar is a UTF-16 16-bit code unit.
+
+ // Get the glyph
+ size_t numUniChar = SkUTF16_FromUnichar(uni, theChar);
+ SkASSERT(sizeof(CGGlyph) <= sizeof(uint16_t));
+
+ // Undocumented behavior of CTFontGetGlyphsForCharacters with non-bmp code points:
+ // When a surrogate pair is detected, the glyph index used is the index of the high surrogate.
+ // It is documented that if a mapping is unavailable, the glyph will be set to 0.
+ CTFontGetGlyphsForCharacters(fCTFont, theChar, cgGlyph, numUniChar);
+ return cgGlyph[0];
+}
+
+void SkScalerContext_Mac::generateAdvance(SkGlyph* glyph) {
+ this->generateMetrics(glyph);
+}
+
+void SkScalerContext_Mac::generateMetrics(SkGlyph* glyph) {
+ AUTO_CG_LOCK();
+
+ const CGGlyph cgGlyph = (CGGlyph) glyph->getGlyphID();
+ glyph->zeroMetrics();
+
+ // The following block produces cgAdvance in CG units (pixels, y up).
+ CGSize cgAdvance;
+ if (fVertical) {
+ CTFontGetAdvancesForGlyphs(fCTFont, kCTFontVerticalOrientation,
+ &cgGlyph, &cgAdvance, 1);
+ // Vertical advances are returned as widths instead of heights.
+ SkTSwap(cgAdvance.height, cgAdvance.width);
+ cgAdvance.height = -cgAdvance.height;
+ } else {
+ CTFontGetAdvancesForGlyphs(fCTFont, kCTFontHorizontalOrientation,
+ &cgGlyph, &cgAdvance, 1);
+ }
+ cgAdvance = CGSizeApplyAffineTransform(cgAdvance, fTransform);
+ glyph->fAdvanceX = CGToFloat(cgAdvance.width);
+ glyph->fAdvanceY = -CGToFloat(cgAdvance.height);
+
+ // The following produces skBounds in SkGlyph units (pixels, y down),
+ // or returns early if skBounds would be empty.
+ SkRect skBounds;
+
+ // On Mountain Lion, CTFontGetBoundingRectsForGlyphs with kCTFontVerticalOrientation and
+ // CTFontGetVerticalTranslationsForGlyphs do not agree when using OTF CFF fonts.
+ // For TTF fonts these two do agree and we can use CTFontGetBoundingRectsForGlyphs to get
+ // the bounding box and CTFontGetVerticalTranslationsForGlyphs to then draw the glyph
+ // inside that bounding box. However, with OTF CFF fonts this does not work. It appears that
+ // CTFontGetBoundingRectsForGlyphs with kCTFontVerticalOrientation on OTF CFF fonts tries
+ // to center the glyph along the vertical baseline and also perform some mysterious shift
+ // along the baseline. CTFontGetVerticalTranslationsForGlyphs does not appear to perform
+ // these steps.
+ //
+ // It is not known which is correct (or if either is correct). However, we must always draw
+ // from the horizontal origin and must use CTFontGetVerticalTranslationsForGlyphs to draw.
+ // As a result, we do not call CTFontGetBoundingRectsForGlyphs for vertical glyphs.
+
+ // On Snow Leopard, CTFontGetBoundingRectsForGlyphs ignores kCTFontVerticalOrientation and
+ // returns horizontal bounds.
+
+ // On Lion and Mountain Lion, CTFontGetBoundingRectsForGlyphs has a bug which causes it to
+ // return a bad value in cgBounds.origin.x for SFNT fonts whose hhea::numberOfHMetrics is
+ // less than its maxp::numGlyphs. When this is the case we try to read the bounds from the
+ // font directly.
+ if ((isLion() || isMountainLion()) &&
+ (cgGlyph < fGlyphCount && cgGlyph >= getFBoundingBoxesGlyphOffset() && generateBBoxes()))
+ {
+ const GlyphRect& gRect = fFBoundingBoxes[cgGlyph - fFBoundingBoxesGlyphOffset];
+ if (gRect.fMinX >= gRect.fMaxX || gRect.fMinY >= gRect.fMaxY) {
+ return;
+ }
+ skBounds = SkRect::MakeLTRB(gRect.fMinX, gRect.fMinY, gRect.fMaxX, gRect.fMaxY);
+ // From FUnits (em space, y up) to SkGlyph units (pixels, y down).
+ fFUnitMatrix.mapRect(&skBounds);
+
+ } else {
+ // CTFontGetBoundingRectsForGlyphs produces cgBounds in CG units (pixels, y up).
+ CGRect cgBounds;
+ CTFontGetBoundingRectsForGlyphs(fCTFont, kCTFontHorizontalOrientation,
+ &cgGlyph, &cgBounds, 1);
+ cgBounds = CGRectApplyAffineTransform(cgBounds, fTransform);
+
+ // BUG?
+ // 0x200B (zero-advance space) seems to return a huge (garbage) bounds, when
+ // it should be empty. So, if we see a zero-advance, we check if it has an
+ // empty path or not, and if so, we jam the bounds to 0. Hopefully a zero-advance
+ // is rare, so we won't incur a big performance cost for this extra check.
+ if (0 == cgAdvance.width && 0 == cgAdvance.height) {
+ AutoCFRelease<CGPathRef> path(CTFontCreatePathForGlyph(fCTFont, cgGlyph, nullptr));
+ if (nullptr == path || CGPathIsEmpty(path)) {
+ return;
+ }
+ }
+
+ if (CGRectIsEmpty_inline(cgBounds)) {
+ return;
+ }
+
+ // Convert cgBounds to SkGlyph units (pixels, y down).
+ skBounds = SkRect::MakeXYWH(cgBounds.origin.x, -cgBounds.origin.y - cgBounds.size.height,
+ cgBounds.size.width, cgBounds.size.height);
+ }
+
+ if (fVertical) {
+ // Due to all of the vertical bounds bugs, skBounds is always the horizontal bounds.
+ // Convert these horizontal bounds into vertical bounds.
+ SkPoint offset;
+ getVerticalOffset(cgGlyph, &offset);
+ skBounds.offset(offset);
+ }
+
+ // Currently the bounds are based on being rendered at (0,0).
+ // The top left must not move, since that is the base from which subpixel positioning is offset.
+ if (fDoSubPosition) {
+ skBounds.fRight += SkFixedToFloat(glyph->getSubXFixed());
+ skBounds.fBottom += SkFixedToFloat(glyph->getSubYFixed());
+ }
+
+ SkIRect skIBounds;
+ skBounds.roundOut(&skIBounds);
+ // Expand the bounds by 1 pixel, to give CG room for anti-aliasing.
+ // Note that this outset is to allow room for LCD smoothed glyphs. However, the correct outset
+ // is not currently known, as CG dilates the outlines by some percentage.
+ // Note that if this context is A8 and not back-forming from LCD, there is no need to outset.
+ skIBounds.outset(1, 1);
+ glyph->fLeft = SkToS16(skIBounds.fLeft);
+ glyph->fTop = SkToS16(skIBounds.fTop);
+ glyph->fWidth = SkToU16(skIBounds.width());
+ glyph->fHeight = SkToU16(skIBounds.height());
+}
+
+#include "SkColorPriv.h"
+
+static void build_power_table(uint8_t table[]) {
+ for (int i = 0; i < 256; i++) {
+ float x = i / 255.f;
+ int xx = SkScalarRoundToInt(x * x * 255);
+ table[i] = SkToU8(xx);
+ }
+}
+
+/**
+ * This will invert the gamma applied by CoreGraphics, so we can get linear
+ * values.
+ *
+ * CoreGraphics obscurely defaults to 2.0 as the smoothing gamma value.
+ * The color space used does not appear to affect this choice.
+ */
+static const uint8_t* getInverseGammaTableCoreGraphicSmoothing() {
+ static bool gInited;
+ static uint8_t gTableCoreGraphicsSmoothing[256];
+ if (!gInited) {
+ build_power_table(gTableCoreGraphicsSmoothing);
+ gInited = true;
+ }
+ return gTableCoreGraphicsSmoothing;
+}
+
+static void cgpixels_to_bits(uint8_t dst[], const CGRGBPixel src[], int count) {
+ while (count > 0) {
+ uint8_t mask = 0;
+ for (int i = 7; i >= 0; --i) {
+ mask |= ((CGRGBPixel_getAlpha(*src++) >> 7) ^ 0x1) << i;
+ if (0 == --count) {
+ break;
+ }
+ }
+ *dst++ = mask;
+ }
+}
+
+template<bool APPLY_PREBLEND>
+static inline uint8_t rgb_to_a8(CGRGBPixel rgb, const uint8_t* table8) {
+ U8CPU r = 0xFF - ((rgb >> 16) & 0xFF);
+ U8CPU g = 0xFF - ((rgb >> 8) & 0xFF);
+ U8CPU b = 0xFF - ((rgb >> 0) & 0xFF);
+ U8CPU lum = sk_apply_lut_if<APPLY_PREBLEND>(SkComputeLuminance(r, g, b), table8);
+#if SK_SHOW_TEXT_BLIT_COVERAGE
+ lum = SkTMax(lum, (U8CPU)0x30);
+#endif
+ return lum;
+}
+template<bool APPLY_PREBLEND>
+static void rgb_to_a8(const CGRGBPixel* SK_RESTRICT cgPixels, size_t cgRowBytes,
+ const SkGlyph& glyph, const uint8_t* table8) {
+ const int width = glyph.fWidth;
+ size_t dstRB = glyph.rowBytes();
+ uint8_t* SK_RESTRICT dst = (uint8_t*)glyph.fImage;
+
+ for (int y = 0; y < glyph.fHeight; y++) {
+ for (int i = 0; i < width; ++i) {
+ dst[i] = rgb_to_a8<APPLY_PREBLEND>(cgPixels[i], table8);
+ }
+ cgPixels = (CGRGBPixel*)((char*)cgPixels + cgRowBytes);
+ dst += dstRB;
+ }
+}
+
+template<bool APPLY_PREBLEND>
+static inline uint16_t rgb_to_lcd16(CGRGBPixel rgb, const uint8_t* tableR,
+ const uint8_t* tableG,
+ const uint8_t* tableB) {
+ U8CPU r = sk_apply_lut_if<APPLY_PREBLEND>(0xFF - ((rgb >> 16) & 0xFF), tableR);
+ U8CPU g = sk_apply_lut_if<APPLY_PREBLEND>(0xFF - ((rgb >> 8) & 0xFF), tableG);
+ U8CPU b = sk_apply_lut_if<APPLY_PREBLEND>(0xFF - ((rgb >> 0) & 0xFF), tableB);
+#if SK_SHOW_TEXT_BLIT_COVERAGE
+ r = SkTMax(r, (U8CPU)0x30);
+ g = SkTMax(g, (U8CPU)0x30);
+ b = SkTMax(b, (U8CPU)0x30);
+#endif
+ return SkPack888ToRGB16(r, g, b);
+}
+template<bool APPLY_PREBLEND>
+static void rgb_to_lcd16(const CGRGBPixel* SK_RESTRICT cgPixels, size_t cgRowBytes, const SkGlyph& glyph,
+ const uint8_t* tableR, const uint8_t* tableG, const uint8_t* tableB) {
+ const int width = glyph.fWidth;
+ size_t dstRB = glyph.rowBytes();
+ uint16_t* SK_RESTRICT dst = (uint16_t*)glyph.fImage;
+
+ for (int y = 0; y < glyph.fHeight; y++) {
+ for (int i = 0; i < width; i++) {
+ dst[i] = rgb_to_lcd16<APPLY_PREBLEND>(cgPixels[i], tableR, tableG, tableB);
+ }
+ cgPixels = (CGRGBPixel*)((char*)cgPixels + cgRowBytes);
+ dst = (uint16_t*)((char*)dst + dstRB);
+ }
+}
+
+static SkPMColor cgpixels_to_pmcolor(CGRGBPixel rgb) {
+ U8CPU a = (rgb >> 24) & 0xFF;
+ U8CPU r = (rgb >> 16) & 0xFF;
+ U8CPU g = (rgb >> 8) & 0xFF;
+ U8CPU b = (rgb >> 0) & 0xFF;
+#if SK_SHOW_TEXT_BLIT_COVERAGE
+ a = SkTMax(a, (U8CPU)0x30);
+#endif
+ return SkPackARGB32(a, r, g, b);
+}
+
+void SkScalerContext_Mac::generateImage(const SkGlyph& glyph) {
+ CGGlyph cgGlyph = (CGGlyph) glyph.getGlyphID();
+
+ // FIXME: lcd smoothed un-hinted rasterization unsupported.
+ bool generateA8FromLCD = fRec.getHinting() != SkPaint::kNo_Hinting;
+
+ // Draw the glyph
+ size_t cgRowBytes;
+ CGRGBPixel* cgPixels = fOffscreen.getCG(*this, glyph, cgGlyph, &cgRowBytes, generateA8FromLCD);
+ if (cgPixels == nullptr) {
+ return;
+ }
+
+ // Fix the glyph
+ const bool isLCD = isLCDFormat(glyph.fMaskFormat);
+ if (isLCD || (glyph.fMaskFormat == SkMask::kA8_Format && supports_LCD() && generateA8FromLCD)) {
+ const uint8_t* table = getInverseGammaTableCoreGraphicSmoothing();
+
+ //Note that the following cannot really be integrated into the
+ //pre-blend, since we may not be applying the pre-blend; when we aren't
+ //applying the pre-blend it means that a filter wants linear anyway.
+ //Other code may also be applying the pre-blend, so we'd need another
+ //one with this and one without.
+ CGRGBPixel* addr = cgPixels;
+ for (int y = 0; y < glyph.fHeight; ++y) {
+ for (int x = 0; x < glyph.fWidth; ++x) {
+ int r = (addr[x] >> 16) & 0xFF;
+ int g = (addr[x] >> 8) & 0xFF;
+ int b = (addr[x] >> 0) & 0xFF;
+ addr[x] = (table[r] << 16) | (table[g] << 8) | table[b];
+ }
+ addr = SkTAddOffset<CGRGBPixel>(addr, cgRowBytes);
+ }
+ }
+
+ // Convert glyph to mask
+ switch (glyph.fMaskFormat) {
+ case SkMask::kLCD16_Format: {
+ if (fPreBlend.isApplicable()) {
+ rgb_to_lcd16<true>(cgPixels, cgRowBytes, glyph,
+ fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ } else {
+ rgb_to_lcd16<false>(cgPixels, cgRowBytes, glyph,
+ fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ }
+ } break;
+ case SkMask::kA8_Format: {
+ if (fPreBlend.isApplicable()) {
+ rgb_to_a8<true>(cgPixels, cgRowBytes, glyph, fPreBlend.fG);
+ } else {
+ rgb_to_a8<false>(cgPixels, cgRowBytes, glyph, fPreBlend.fG);
+ }
+ } break;
+ case SkMask::kBW_Format: {
+ const int width = glyph.fWidth;
+ size_t dstRB = glyph.rowBytes();
+ uint8_t* dst = (uint8_t*)glyph.fImage;
+ for (int y = 0; y < glyph.fHeight; y++) {
+ cgpixels_to_bits(dst, cgPixels, width);
+ cgPixels = (CGRGBPixel*)((char*)cgPixels + cgRowBytes);
+ dst += dstRB;
+ }
+ } break;
+ case SkMask::kARGB32_Format: {
+ const int width = glyph.fWidth;
+ size_t dstRB = glyph.rowBytes();
+ SkPMColor* dst = (SkPMColor*)glyph.fImage;
+ for (int y = 0; y < glyph.fHeight; y++) {
+ for (int x = 0; x < width; ++x) {
+ dst[x] = cgpixels_to_pmcolor(cgPixels[x]);
+ }
+ cgPixels = (CGRGBPixel*)((char*)cgPixels + cgRowBytes);
+ dst = (SkPMColor*)((char*)dst + dstRB);
+ }
+ } break;
+ default:
+ SkDEBUGFAIL("unexpected mask format");
+ break;
+ }
+}
+
+/*
+ * Our subpixel resolution is only 2 bits in each direction, so a scale of 4
+ * seems sufficient, and possibly even correct, to allow the hinted outline
+ * to be subpixel positioned.
+ */
+#define kScaleForSubPixelPositionHinting (4.0f)
+
+void SkScalerContext_Mac::generatePath(const SkGlyph& glyph, SkPath* path) {
+ AUTO_CG_LOCK();
+
+ SkScalar scaleX = SK_Scalar1;
+ SkScalar scaleY = SK_Scalar1;
+
+ CGAffineTransform xform = fTransform;
+ /*
+ * For subpixel positioning, we want to return an unhinted outline, so it
+ * can be positioned nicely at fractional offsets. However, we special-case
+ * if the baseline of the (horizontal) text is axis-aligned. In those cases
+ * we want to retain hinting in the direction orthogonal to the baseline.
+ * e.g. for horizontal baseline, we want to retain hinting in Y.
+ * The way we remove hinting is to scale the font by some value (4) in that
+ * direction, ask for the path, and then scale the path back down.
+ */
+ if (fDoSubPosition) {
+ // start out by assuming that we want no hining in X and Y
+ scaleX = scaleY = kScaleForSubPixelPositionHinting;
+ // now see if we need to restore hinting for axis-aligned baselines
+ switch (this->computeAxisAlignmentForHText()) {
+ case kX_SkAxisAlignment:
+ scaleY = SK_Scalar1; // want hinting in the Y direction
+ break;
+ case kY_SkAxisAlignment:
+ scaleX = SK_Scalar1; // want hinting in the X direction
+ break;
+ default:
+ break;
+ }
+
+ CGAffineTransform scale(CGAffineTransformMakeScale(ScalarToCG(scaleX), ScalarToCG(scaleY)));
+ xform = CGAffineTransformConcat(fTransform, scale);
+ }
+
+ CGGlyph cgGlyph = (CGGlyph)glyph.getGlyphID();
+ AutoCFRelease<CGPathRef> cgPath(CTFontCreatePathForGlyph(fCTFont, cgGlyph, &xform));
+
+ path->reset();
+ if (cgPath != nullptr) {
+ CGPathApply(cgPath, path, SkScalerContext_Mac::CTPathElement);
+ }
+
+ if (fDoSubPosition) {
+ SkMatrix m;
+ m.setScale(SkScalarInvert(scaleX), SkScalarInvert(scaleY));
+ path->transform(m);
+ }
+ if (fVertical) {
+ SkPoint offset;
+ getVerticalOffset(cgGlyph, &offset);
+ path->offset(offset.fX, offset.fY);
+ }
+}
+
+void SkScalerContext_Mac::generateFontMetrics(SkPaint::FontMetrics* metrics) {
+ if (nullptr == metrics) {
+ return;
+ }
+
+ AUTO_CG_LOCK();
+
+ CGRect theBounds = CTFontGetBoundingBox(fCTFont);
+
+ metrics->fTop = CGToScalar(-CGRectGetMaxY_inline(theBounds));
+ metrics->fAscent = CGToScalar(-CTFontGetAscent(fCTFont));
+ metrics->fDescent = CGToScalar( CTFontGetDescent(fCTFont));
+ metrics->fBottom = CGToScalar(-CGRectGetMinY_inline(theBounds));
+ metrics->fLeading = CGToScalar( CTFontGetLeading(fCTFont));
+ metrics->fAvgCharWidth = CGToScalar( CGRectGetWidth_inline(theBounds));
+ metrics->fXMin = CGToScalar( CGRectGetMinX_inline(theBounds));
+ metrics->fXMax = CGToScalar( CGRectGetMaxX_inline(theBounds));
+ metrics->fMaxCharWidth = metrics->fXMax - metrics->fXMin;
+ metrics->fXHeight = CGToScalar( CTFontGetXHeight(fCTFont));
+ metrics->fCapHeight = CGToScalar( CTFontGetCapHeight(fCTFont));
+ metrics->fUnderlineThickness = CGToScalar( CTFontGetUnderlineThickness(fCTFont));
+ metrics->fUnderlinePosition = -CGToScalar( CTFontGetUnderlinePosition(fCTFont));
+
+ metrics->fFlags |= SkPaint::FontMetrics::kUnderlineThinknessIsValid_Flag;
+ metrics->fFlags |= SkPaint::FontMetrics::kUnderlinePositionIsValid_Flag;
+}
+
+void SkScalerContext_Mac::CTPathElement(void *info, const CGPathElement *element) {
+ SkPath* skPath = (SkPath*)info;
+
+ // Process the path element
+ switch (element->type) {
+ case kCGPathElementMoveToPoint:
+ skPath->moveTo(element->points[0].x, -element->points[0].y);
+ break;
+
+ case kCGPathElementAddLineToPoint:
+ skPath->lineTo(element->points[0].x, -element->points[0].y);
+ break;
+
+ case kCGPathElementAddQuadCurveToPoint:
+ skPath->quadTo(element->points[0].x, -element->points[0].y,
+ element->points[1].x, -element->points[1].y);
+ break;
+
+ case kCGPathElementAddCurveToPoint:
+ skPath->cubicTo(element->points[0].x, -element->points[0].y,
+ element->points[1].x, -element->points[1].y,
+ element->points[2].x, -element->points[2].y);
+ break;
+
+ case kCGPathElementCloseSubpath:
+ skPath->close();
+ break;
+
+ default:
+ SkDEBUGFAIL("Unknown path element!");
+ break;
+ }
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Returns nullptr on failure
+// Call must still manage its ownership of provider
+static SkTypeface* create_from_dataProvider(CGDataProviderRef provider) {
+ AutoCFRelease<CGFontRef> cg(CGFontCreateWithDataProvider(provider));
+ if (nullptr == cg) {
+ return nullptr;
+ }
+ CTFontRef ct = CTFontCreateWithGraphicsFont(cg, 0, nullptr, nullptr);
+ return ct ? create_from_CTFontRef(ct, nullptr, true) : nullptr;
+}
+
+// Web fonts added to the the CTFont registry do not return their character set.
+// Iterate through the font in this case. The existing caller caches the result,
+// so the performance impact isn't too bad.
+static void populate_glyph_to_unicode_slow(CTFontRef ctFont, CFIndex glyphCount,
+ SkTDArray<SkUnichar>* glyphToUnicode) {
+ glyphToUnicode->setCount(SkToInt(glyphCount));
+ SkUnichar* out = glyphToUnicode->begin();
+ sk_bzero(out, glyphCount * sizeof(SkUnichar));
+ UniChar unichar = 0;
+ while (glyphCount > 0) {
+ CGGlyph glyph;
+ if (CTFontGetGlyphsForCharacters(ctFont, &unichar, &glyph, 1)) {
+ out[glyph] = unichar;
+ --glyphCount;
+ }
+ if (++unichar == 0) {
+ break;
+ }
+ }
+}
+
+// Construct Glyph to Unicode table.
+// Unicode code points that require conjugate pairs in utf16 are not
+// supported.
+static void populate_glyph_to_unicode(CTFontRef ctFont, CFIndex glyphCount,
+ SkTDArray<SkUnichar>* glyphToUnicode) {
+ AutoCFRelease<CFCharacterSetRef> charSet(CTFontCopyCharacterSet(ctFont));
+ if (!charSet) {
+ populate_glyph_to_unicode_slow(ctFont, glyphCount, glyphToUnicode);
+ return;
+ }
+
+ AutoCFRelease<CFDataRef> bitmap(CFCharacterSetCreateBitmapRepresentation(kCFAllocatorDefault,
+ charSet));
+ if (!bitmap) {
+ return;
+ }
+ CFIndex length = CFDataGetLength(bitmap);
+ if (!length) {
+ return;
+ }
+ if (length > 8192) {
+ // TODO: Add support for Unicode above 0xFFFF
+ // Consider only the BMP portion of the Unicode character points.
+ // The bitmap may contain other planes, up to plane 16.
+ // See http://developer.apple.com/library/ios/#documentation/CoreFoundation/Reference/CFCharacterSetRef/Reference/reference.html
+ length = 8192;
+ }
+ const UInt8* bits = CFDataGetBytePtr(bitmap);
+ glyphToUnicode->setCount(SkToInt(glyphCount));
+ SkUnichar* out = glyphToUnicode->begin();
+ sk_bzero(out, glyphCount * sizeof(SkUnichar));
+ for (int i = 0; i < length; i++) {
+ int mask = bits[i];
+ if (!mask) {
+ continue;
+ }
+ for (int j = 0; j < 8; j++) {
+ CGGlyph glyph;
+ UniChar unichar = static_cast<UniChar>((i << 3) + j);
+ if (mask & (1 << j) && CTFontGetGlyphsForCharacters(ctFont, &unichar, &glyph, 1)) {
+ out[glyph] = unichar;
+ }
+ }
+ }
+}
+
+/** Assumes src and dst are not nullptr. */
+static void CFStringToSkString(CFStringRef src, SkString* dst) {
+ // Reserve enough room for the worst-case string,
+ // plus 1 byte for the trailing null.
+ CFIndex length = CFStringGetMaximumSizeForEncoding(CFStringGetLength(src),
+ kCFStringEncodingUTF8) + 1;
+ dst->resize(length);
+ CFStringGetCString(src, dst->writable_str(), length, kCFStringEncodingUTF8);
+ // Resize to the actual UTF-8 length used, stripping the null character.
+ dst->resize(strlen(dst->c_str()));
+}
+
+SkAdvancedTypefaceMetrics* SkTypeface_Mac::onGetAdvancedTypefaceMetrics(
+ PerGlyphInfo perGlyphInfo,
+ const uint32_t* glyphIDs,
+ uint32_t glyphIDsCount) const {
+
+ AUTO_CG_LOCK();
+
+ CTFontRef originalCTFont = fFontRef.get();
+ AutoCFRelease<CTFontRef> ctFont(ctfont_create_exact_copy(
+ originalCTFont, CTFontGetUnitsPerEm(originalCTFont), nullptr));
+
+ SkAdvancedTypefaceMetrics* info = new SkAdvancedTypefaceMetrics;
+
+ {
+ AutoCFRelease<CFStringRef> fontName(CTFontCopyPostScriptName(ctFont));
+ if (fontName.get()) {
+ CFStringToSkString(fontName, &info->fFontName);
+ }
+ }
+
+ CFIndex glyphCount = CTFontGetGlyphCount(ctFont);
+ info->fLastGlyphID = SkToU16(glyphCount - 1);
+ info->fEmSize = CTFontGetUnitsPerEm(ctFont);
+
+ if (perGlyphInfo & kToUnicode_PerGlyphInfo) {
+ populate_glyph_to_unicode(ctFont, glyphCount, &info->fGlyphToUnicode);
+ }
+
+ // If it's not a truetype font, mark it as 'other'. Assume that TrueType
+ // fonts always have both glyf and loca tables. At the least, this is what
+ // sfntly needs to subset the font. CTFontCopyAttribute() does not always
+ // succeed in determining this directly.
+ if (!this->getTableSize('glyf') || !this->getTableSize('loca')) {
+ return info;
+ }
+
+ info->fType = SkAdvancedTypefaceMetrics::kTrueType_Font;
+ CTFontSymbolicTraits symbolicTraits = CTFontGetSymbolicTraits(ctFont);
+ if (symbolicTraits & kCTFontMonoSpaceTrait) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kFixedPitch_Style;
+ }
+ if (symbolicTraits & kCTFontItalicTrait) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kItalic_Style;
+ }
+ CTFontStylisticClass stylisticClass = symbolicTraits & kCTFontClassMaskTrait;
+ if (stylisticClass >= kCTFontOldStyleSerifsClass && stylisticClass <= kCTFontSlabSerifsClass) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kSerif_Style;
+ } else if (stylisticClass & kCTFontScriptsClass) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kScript_Style;
+ }
+ info->fItalicAngle = (int16_t) CTFontGetSlantAngle(ctFont);
+ info->fAscent = (int16_t) CTFontGetAscent(ctFont);
+ info->fDescent = (int16_t) CTFontGetDescent(ctFont);
+ info->fCapHeight = (int16_t) CTFontGetCapHeight(ctFont);
+ CGRect bbox = CTFontGetBoundingBox(ctFont);
+
+ SkRect r;
+ r.set( CGToScalar(CGRectGetMinX_inline(bbox)), // Left
+ CGToScalar(CGRectGetMaxY_inline(bbox)), // Top
+ CGToScalar(CGRectGetMaxX_inline(bbox)), // Right
+ CGToScalar(CGRectGetMinY_inline(bbox))); // Bottom
+
+ r.roundOut(&(info->fBBox));
+
+ // Figure out a good guess for StemV - Min width of i, I, !, 1.
+ // This probably isn't very good with an italic font.
+ int16_t min_width = SHRT_MAX;
+ info->fStemV = 0;
+ static const UniChar stem_chars[] = {'i', 'I', '!', '1'};
+ const size_t count = sizeof(stem_chars) / sizeof(stem_chars[0]);
+ CGGlyph glyphs[count];
+ CGRect boundingRects[count];
+ if (CTFontGetGlyphsForCharacters(ctFont, stem_chars, glyphs, count)) {
+ CTFontGetBoundingRectsForGlyphs(ctFont, kCTFontHorizontalOrientation,
+ glyphs, boundingRects, count);
+ for (size_t i = 0; i < count; i++) {
+ int16_t width = (int16_t) boundingRects[i].size.width;
+ if (width > 0 && width < min_width) {
+ min_width = width;
+ info->fStemV = min_width;
+ }
+ }
+ }
+ return info;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static SK_SFNT_ULONG get_font_type_tag(const SkTypeface_Mac* typeface) {
+ CTFontRef ctFont = typeface->fFontRef.get();
+ AutoCFRelease<CFNumberRef> fontFormatRef(
+ static_cast<CFNumberRef>(CTFontCopyAttribute(ctFont, kCTFontFormatAttribute)));
+ if (!fontFormatRef) {
+ return 0;
+ }
+
+ SInt32 fontFormatValue;
+ if (!CFNumberGetValue(fontFormatRef, kCFNumberSInt32Type, &fontFormatValue)) {
+ return 0;
+ }
+
+ switch (fontFormatValue) {
+ case kCTFontFormatOpenTypePostScript:
+ return SkSFNTHeader::fontType_OpenTypeCFF::TAG;
+ case kCTFontFormatOpenTypeTrueType:
+ return SkSFNTHeader::fontType_WindowsTrueType::TAG;
+ case kCTFontFormatTrueType:
+ return SkSFNTHeader::fontType_MacTrueType::TAG;
+ case kCTFontFormatPostScript:
+ return SkSFNTHeader::fontType_PostScript::TAG;
+ case kCTFontFormatBitmap:
+ return SkSFNTHeader::fontType_MacTrueType::TAG;
+ case kCTFontFormatUnrecognized:
+ default:
+ //CT seems to be unreliable in being able to obtain the type,
+ //even if all we want is the first four bytes of the font resource.
+ //Just the presence of the FontForge 'FFTM' table seems to throw it off.
+ return SkSFNTHeader::fontType_WindowsTrueType::TAG;
+ }
+}
+
+SkStreamAsset* SkTypeface_Mac::onOpenStream(int* ttcIndex) const {
+ SK_SFNT_ULONG fontType = get_font_type_tag(this);
+ if (0 == fontType) {
+ return nullptr;
+ }
+
+ // get table tags
+ int numTables = this->countTables();
+ SkTDArray<SkFontTableTag> tableTags;
+ tableTags.setCount(numTables);
+ this->getTableTags(tableTags.begin());
+
+ // calc total size for font, save sizes
+ SkTDArray<size_t> tableSizes;
+ size_t totalSize = sizeof(SkSFNTHeader) + sizeof(SkSFNTHeader::TableDirectoryEntry) * numTables;
+ for (int tableIndex = 0; tableIndex < numTables; ++tableIndex) {
+ size_t tableSize = this->getTableSize(tableTags[tableIndex]);
+ totalSize += (tableSize + 3) & ~3;
+ *tableSizes.append() = tableSize;
+ }
+
+ // reserve memory for stream, and zero it (tables must be zero padded)
+ SkMemoryStream* stream = new SkMemoryStream(totalSize);
+ char* dataStart = (char*)stream->getMemoryBase();
+ sk_bzero(dataStart, totalSize);
+ char* dataPtr = dataStart;
+
+ // compute font header entries
+ uint16_t entrySelector = 0;
+ uint16_t searchRange = 1;
+ while (searchRange < numTables >> 1) {
+ entrySelector++;
+ searchRange <<= 1;
+ }
+ searchRange <<= 4;
+ uint16_t rangeShift = (numTables << 4) - searchRange;
+
+ // write font header
+ SkSFNTHeader* header = (SkSFNTHeader*)dataPtr;
+ header->fontType = fontType;
+ header->numTables = SkEndian_SwapBE16(numTables);
+ header->searchRange = SkEndian_SwapBE16(searchRange);
+ header->entrySelector = SkEndian_SwapBE16(entrySelector);
+ header->rangeShift = SkEndian_SwapBE16(rangeShift);
+ dataPtr += sizeof(SkSFNTHeader);
+
+ // write tables
+ SkSFNTHeader::TableDirectoryEntry* entry = (SkSFNTHeader::TableDirectoryEntry*)dataPtr;
+ dataPtr += sizeof(SkSFNTHeader::TableDirectoryEntry) * numTables;
+ for (int tableIndex = 0; tableIndex < numTables; ++tableIndex) {
+ size_t tableSize = tableSizes[tableIndex];
+ this->getTableData(tableTags[tableIndex], 0, tableSize, dataPtr);
+ entry->tag = SkEndian_SwapBE32(tableTags[tableIndex]);
+ entry->checksum = SkEndian_SwapBE32(SkOTUtils::CalcTableChecksum((SK_OT_ULONG*)dataPtr,
+ tableSize));
+ entry->offset = SkEndian_SwapBE32(SkToU32(dataPtr - dataStart));
+ entry->logicalLength = SkEndian_SwapBE32(SkToU32(tableSize));
+
+ dataPtr += (tableSize + 3) & ~3;
+ ++entry;
+ }
+
+ *ttcIndex = 0;
+ return stream;
+}
+
+struct NonDefaultAxesContext {
+ SkFixed* axisValue;
+ CFArrayRef cgAxes;
+};
+static void set_non_default_axes(CFTypeRef key, CFTypeRef value, void* context) {
+ NonDefaultAxesContext* self = static_cast<NonDefaultAxesContext*>(context);
+
+ if (CFGetTypeID(key) != CFStringGetTypeID() || CFGetTypeID(value) != CFNumberGetTypeID()) {
+ return;
+ }
+
+ // The key is a CFString which is a string from the 'name' table.
+ // Search the cgAxes for an axis with this name, and use its index to store the value.
+ CFIndex keyIndex = -1;
+ CFStringRef keyString = static_cast<CFStringRef>(key);
+ for (CFIndex i = 0; i < CFArrayGetCount(self->cgAxes); ++i) {
+ CFTypeRef cgAxis = CFArrayGetValueAtIndex(self->cgAxes, i);
+ if (CFGetTypeID(cgAxis) != CFDictionaryGetTypeID()) {
+ continue;
+ }
+
+ CFDictionaryRef cgAxisDict = static_cast<CFDictionaryRef>(cgAxis);
+ CFTypeRef cgAxisName = CFDictionaryGetValue(cgAxisDict, kCGFontVariationAxisName);
+ if (!cgAxisName || CFGetTypeID(cgAxisName) != CFStringGetTypeID()) {
+ continue;
+ }
+ CFStringRef cgAxisNameString = static_cast<CFStringRef>(cgAxisName);
+ if (CFStringCompare(keyString, cgAxisNameString, 0) == kCFCompareEqualTo) {
+ keyIndex = i;
+ break;
+ }
+ }
+ if (keyIndex == -1) {
+ return;
+ }
+
+ CFNumberRef valueNumber = static_cast<CFNumberRef>(value);
+ double valueDouble;
+ if (!CFNumberGetValue(valueNumber, kCFNumberDoubleType, &valueDouble) ||
+ valueDouble < SkFixedToDouble(SK_FixedMin) || SkFixedToDouble(SK_FixedMax) < valueDouble)
+ {
+ return;
+ }
+ self->axisValue[keyIndex] = SkDoubleToFixed(valueDouble);
+}
+static bool get_variations(CTFontRef fFontRef, CFIndex* cgAxisCount,
+ SkAutoSTMalloc<4, SkFixed>* axisValues)
+{
+ // CTFontCopyVariationAxes and CTFontCopyVariation do not work when applied to fonts which
+ // started life with CGFontCreateWithDataProvider (they simply always return nullptr).
+ // As a result, we are limited to CGFontCopyVariationAxes and CGFontCopyVariations.
+ AutoCFRelease<CGFontRef> cgFont(CTFontCopyGraphicsFont(fFontRef, nullptr));
+
+ AutoCFRelease<CFDictionaryRef> cgVariations(CGFontCopyVariations(cgFont));
+ // If a font has no variations CGFontCopyVariations returns nullptr (instead of an empty dict).
+ if (!cgVariations.get()) {
+ return false;
+ }
+
+ AutoCFRelease<CFArrayRef> cgAxes(CGFontCopyVariationAxes(cgFont));
+ *cgAxisCount = CFArrayGetCount(cgAxes);
+ axisValues->reset(*cgAxisCount);
+
+ // Set all of the axes to their default values.
+ // Fail if any default value cannot be determined.
+ for (CFIndex i = 0; i < *cgAxisCount; ++i) {
+ CFTypeRef cgAxis = CFArrayGetValueAtIndex(cgAxes, i);
+ if (CFGetTypeID(cgAxis) != CFDictionaryGetTypeID()) {
+ return false;
+ }
+
+ CFDictionaryRef cgAxisDict = static_cast<CFDictionaryRef>(cgAxis);
+ CFTypeRef axisDefaultValue = CFDictionaryGetValue(cgAxisDict,
+ kCGFontVariationAxisDefaultValue);
+ if (!axisDefaultValue || CFGetTypeID(axisDefaultValue) != CFNumberGetTypeID()) {
+ return false;
+ }
+ CFNumberRef axisDefaultValueNumber = static_cast<CFNumberRef>(axisDefaultValue);
+ double axisDefaultValueDouble;
+ if (!CFNumberGetValue(axisDefaultValueNumber, kCFNumberDoubleType, &axisDefaultValueDouble))
+ {
+ return false;
+ }
+ if (axisDefaultValueDouble < SkFixedToDouble(SK_FixedMin) ||
+ SkFixedToDouble(SK_FixedMax) < axisDefaultValueDouble)
+ {
+ return false;
+ }
+ (*axisValues)[(int)i] = SkDoubleToFixed(axisDefaultValueDouble);
+ }
+
+ // Override the default values with the given font's stated axis values.
+ NonDefaultAxesContext c = { axisValues->get(), cgAxes.get() };
+ CFDictionaryApplyFunction(cgVariations, set_non_default_axes, &c);
+
+ return true;
+}
+std::unique_ptr<SkFontData> SkTypeface_Mac::onMakeFontData() const {
+ int index;
+ std::unique_ptr<SkStreamAsset> stream(this->onOpenStream(&index));
+
+ CFIndex cgAxisCount;
+ SkAutoSTMalloc<4, SkFixed> axisValues;
+ if (get_variations(fFontRef, &cgAxisCount, &axisValues)) {
+ return skstd::make_unique<SkFontData>(std::move(stream), index,
+ axisValues.get(), cgAxisCount);
+ }
+ return skstd::make_unique<SkFontData>(std::move(stream), index, nullptr, 0);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+int SkTypeface_Mac::onGetUPEM() const {
+ AutoCFRelease<CGFontRef> cgFont(CTFontCopyGraphicsFont(fFontRef, nullptr));
+ return CGFontGetUnitsPerEm(cgFont);
+}
+
+SkTypeface::LocalizedStrings* SkTypeface_Mac::onCreateFamilyNameIterator() const {
+ SkTypeface::LocalizedStrings* nameIter =
+ SkOTUtils::LocalizedStrings_NameTable::CreateForFamilyNames(*this);
+ if (nullptr == nameIter) {
+ AutoCFRelease<CFStringRef> cfLanguage;
+ AutoCFRelease<CFStringRef> cfFamilyName(
+ CTFontCopyLocalizedName(fFontRef, kCTFontFamilyNameKey, &cfLanguage));
+
+ SkString skLanguage;
+ SkString skFamilyName;
+ if (cfLanguage.get()) {
+ CFStringToSkString(cfLanguage.get(), &skLanguage);
+ } else {
+ skLanguage = "und"; //undetermined
+ }
+ if (cfFamilyName.get()) {
+ CFStringToSkString(cfFamilyName.get(), &skFamilyName);
+ }
+
+ nameIter = new SkOTUtils::LocalizedStrings_SingleName(skFamilyName, skLanguage);
+ }
+ return nameIter;
+}
+
+// If, as is the case with web fonts, the CTFont data isn't available,
+// the CGFont data may work. While the CGFont may always provide the
+// right result, leave the CTFont code path to minimize disruption.
+static CFDataRef copyTableFromFont(CTFontRef ctFont, SkFontTableTag tag) {
+ CFDataRef data = CTFontCopyTable(ctFont, (CTFontTableTag) tag,
+ kCTFontTableOptionNoOptions);
+ if (nullptr == data) {
+ AutoCFRelease<CGFontRef> cgFont(CTFontCopyGraphicsFont(ctFont, nullptr));
+ data = CGFontCopyTableForTag(cgFont, tag);
+ }
+ return data;
+}
+
+int SkTypeface_Mac::onGetTableTags(SkFontTableTag tags[]) const {
+ AutoCFRelease<CFArrayRef> cfArray(CTFontCopyAvailableTables(fFontRef,
+ kCTFontTableOptionNoOptions));
+ if (nullptr == cfArray) {
+ return 0;
+ }
+ int count = SkToInt(CFArrayGetCount(cfArray));
+ if (tags) {
+ for (int i = 0; i < count; ++i) {
+ uintptr_t fontTag = reinterpret_cast<uintptr_t>(CFArrayGetValueAtIndex(cfArray, i));
+ tags[i] = static_cast<SkFontTableTag>(fontTag);
+ }
+ }
+ return count;
+}
+
+size_t SkTypeface_Mac::onGetTableData(SkFontTableTag tag, size_t offset,
+ size_t length, void* dstData) const {
+ AutoCFRelease<CFDataRef> srcData(copyTableFromFont(fFontRef, tag));
+ if (nullptr == srcData) {
+ return 0;
+ }
+
+ size_t srcSize = CFDataGetLength(srcData);
+ if (offset >= srcSize) {
+ return 0;
+ }
+ if (length > srcSize - offset) {
+ length = srcSize - offset;
+ }
+ if (dstData) {
+ memcpy(dstData, CFDataGetBytePtr(srcData) + offset, length);
+ }
+ return length;
+}
+
+SkScalerContext* SkTypeface_Mac::onCreateScalerContext(const SkScalerContextEffects& effects,
+ const SkDescriptor* desc) const {
+ return new SkScalerContext_Mac(const_cast<SkTypeface_Mac*>(this), effects, desc);
+}
+
+void SkTypeface_Mac::onFilterRec(SkScalerContextRec* rec) const {
+ if (rec->fFlags & SkScalerContext::kLCD_BGROrder_Flag ||
+ rec->fFlags & SkScalerContext::kLCD_Vertical_Flag)
+ {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ // Render the glyphs as close as possible to what was requested.
+ // The above turns off subpixel rendering, but the user requested it.
+ // Normal hinting will cause the A8 masks to be generated from CoreGraphics subpixel masks.
+ // See comments below for more details.
+ rec->setHinting(SkPaint::kNormal_Hinting);
+ }
+
+ unsigned flagsWeDontSupport = SkScalerContext::kDevKernText_Flag |
+ SkScalerContext::kForceAutohinting_Flag |
+ SkScalerContext::kLCD_BGROrder_Flag |
+ SkScalerContext::kLCD_Vertical_Flag;
+
+ rec->fFlags &= ~flagsWeDontSupport;
+
+ bool lcdSupport = supports_LCD();
+
+ // Only two levels of hinting are supported.
+ // kNo_Hinting means avoid CoreGraphics outline dilation.
+ // kNormal_Hinting means CoreGraphics outline dilation is allowed.
+ // If there is no lcd support, hinting (dilation) cannot be supported.
+ SkPaint::Hinting hinting = rec->getHinting();
+ if (SkPaint::kSlight_Hinting == hinting || !lcdSupport) {
+ hinting = SkPaint::kNo_Hinting;
+ } else if (SkPaint::kFull_Hinting == hinting) {
+ hinting = SkPaint::kNormal_Hinting;
+ }
+ rec->setHinting(hinting);
+
+ // FIXME: lcd smoothed un-hinted rasterization unsupported.
+ // Tracked by http://code.google.com/p/skia/issues/detail?id=915 .
+ // There is no current means to honor a request for unhinted lcd,
+ // so arbitrarilly ignore the hinting request and honor lcd.
+
+ // Hinting and smoothing should be orthogonal, but currently they are not.
+ // CoreGraphics has no API to influence hinting. However, its lcd smoothed
+ // output is drawn from auto-dilated outlines (the amount of which is
+ // determined by AppleFontSmoothing). Its regular anti-aliased output is
+ // drawn from un-dilated outlines.
+
+ // The behavior of Skia is as follows:
+ // [AA][no-hint]: generate AA using CoreGraphic's AA output.
+ // [AA][yes-hint]: use CoreGraphic's LCD output and reduce it to a single
+ // channel. This matches [LCD][yes-hint] in weight.
+ // [LCD][no-hint]: curently unable to honor, and must pick which to respect.
+ // Currenly side with LCD, effectively ignoring the hinting setting.
+ // [LCD][yes-hint]: generate LCD using CoreGraphic's LCD output.
+
+ if (isLCDFormat(rec->fMaskFormat)) {
+ if (lcdSupport) {
+ //CoreGraphics creates 555 masks for smoothed text anyway.
+ rec->fMaskFormat = SkMask::kLCD16_Format;
+ rec->setHinting(SkPaint::kNormal_Hinting);
+ } else {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ }
+ }
+
+ // CoreText provides no information as to whether a glyph will be color or not.
+ // Fonts may mix outlines and bitmaps, so information is needed on a glyph by glyph basis.
+ // If a font contains an 'sbix' table, consider it to be a color font, and disable lcd.
+ if (fHasColorGlyphs) {
+ rec->fMaskFormat = SkMask::kARGB32_Format;
+ }
+
+ // Unhinted A8 masks (those not derived from LCD masks) must respect SK_GAMMA_APPLY_TO_A8.
+ // All other masks can use regular gamma.
+ if (SkMask::kA8_Format == rec->fMaskFormat && SkPaint::kNo_Hinting == hinting) {
+#ifndef SK_GAMMA_APPLY_TO_A8
+ // SRGBTODO: Is this correct? Do we want contrast boost?
+ rec->ignorePreBlend();
+#endif
+ } else {
+ //CoreGraphics dialates smoothed text as needed.
+ rec->setContrast(0);
+ }
+}
+
+// we take ownership of the ref
+static const char* get_str(CFStringRef ref, SkString* str) {
+ if (nullptr == ref) {
+ return nullptr;
+ }
+ CFStringToSkString(ref, str);
+ CFSafeRelease(ref);
+ return str->c_str();
+}
+
+void SkTypeface_Mac::onGetFamilyName(SkString* familyName) const {
+ get_str(CTFontCopyFamilyName(fFontRef), familyName);
+}
+
+void SkTypeface_Mac::onGetFontDescriptor(SkFontDescriptor* desc,
+ bool* isLocalStream) const {
+ SkString tmpStr;
+
+ desc->setFamilyName(get_str(CTFontCopyFamilyName(fFontRef), &tmpStr));
+ desc->setFullName(get_str(CTFontCopyFullName(fFontRef), &tmpStr));
+ desc->setPostscriptName(get_str(CTFontCopyPostScriptName(fFontRef), &tmpStr));
+ desc->setStyle(this->fontStyle());
+ *isLocalStream = fIsLocalStream;
+}
+
+int SkTypeface_Mac::onCharsToGlyphs(const void* chars, Encoding encoding,
+ uint16_t glyphs[], int glyphCount) const
+{
+ // Undocumented behavior of CTFontGetGlyphsForCharacters with non-bmp code points:
+ // When a surrogate pair is detected, the glyph index used is the index of the high surrogate.
+ // It is documented that if a mapping is unavailable, the glyph will be set to 0.
+
+ SkAutoSTMalloc<1024, UniChar> charStorage;
+ const UniChar* src; // UniChar is a UTF-16 16-bit code unit.
+ int srcCount;
+ switch (encoding) {
+ case kUTF8_Encoding: {
+ const char* utf8 = reinterpret_cast<const char*>(chars);
+ UniChar* utf16 = charStorage.reset(2 * glyphCount);
+ src = utf16;
+ for (int i = 0; i < glyphCount; ++i) {
+ SkUnichar uni = SkUTF8_NextUnichar(&utf8);
+ utf16 += SkUTF16_FromUnichar(uni, utf16);
+ }
+ srcCount = SkToInt(utf16 - src);
+ break;
+ }
+ case kUTF16_Encoding: {
+ src = reinterpret_cast<const UniChar*>(chars);
+ int extra = 0;
+ for (int i = 0; i < glyphCount; ++i) {
+ if (SkUTF16_IsHighSurrogate(src[i + extra])) {
+ ++extra;
+ }
+ }
+ srcCount = glyphCount + extra;
+ break;
+ }
+ case kUTF32_Encoding: {
+ const SkUnichar* utf32 = reinterpret_cast<const SkUnichar*>(chars);
+ UniChar* utf16 = charStorage.reset(2 * glyphCount);
+ src = utf16;
+ for (int i = 0; i < glyphCount; ++i) {
+ utf16 += SkUTF16_FromUnichar(utf32[i], utf16);
+ }
+ srcCount = SkToInt(utf16 - src);
+ break;
+ }
+ }
+
+ // If glyphs is nullptr, CT still needs glyph storage for finding the first failure.
+ // Also, if there are any non-bmp code points, the provided 'glyphs' storage will be inadequate.
+ SkAutoSTMalloc<1024, uint16_t> glyphStorage;
+ uint16_t* macGlyphs = glyphs;
+ if (nullptr == macGlyphs || srcCount > glyphCount) {
+ macGlyphs = glyphStorage.reset(srcCount);
+ }
+
+ bool allEncoded = CTFontGetGlyphsForCharacters(fFontRef, src, macGlyphs, srcCount);
+
+ // If there were any non-bmp, then copy and compact.
+ // If 'glyphs' is nullptr, then compact glyphStorage in-place.
+ // If all are bmp and 'glyphs' is non-nullptr, 'glyphs' already contains the compact glyphs.
+ // If some are non-bmp and 'glyphs' is non-nullptr, copy and compact into 'glyphs'.
+ uint16_t* compactedGlyphs = glyphs;
+ if (nullptr == compactedGlyphs) {
+ compactedGlyphs = macGlyphs;
+ }
+ if (srcCount > glyphCount) {
+ int extra = 0;
+ for (int i = 0; i < glyphCount; ++i) {
+ compactedGlyphs[i] = macGlyphs[i + extra];
+ if (SkUTF16_IsHighSurrogate(src[i + extra])) {
+ ++extra;
+ }
+ }
+ }
+
+ if (allEncoded) {
+ return glyphCount;
+ }
+
+ // If we got false, then we need to manually look for first failure.
+ for (int i = 0; i < glyphCount; ++i) {
+ if (0 == compactedGlyphs[i]) {
+ return i;
+ }
+ }
+ // Odd to get here, as we expected CT to have returned true up front.
+ return glyphCount;
+}
+
+int SkTypeface_Mac::onCountGlyphs() const {
+ return SkToInt(CTFontGetGlyphCount(fFontRef));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+static bool find_desc_str(CTFontDescriptorRef desc, CFStringRef name, SkString* value) {
+ AutoCFRelease<CFStringRef> ref((CFStringRef)CTFontDescriptorCopyAttribute(desc, name));
+ if (nullptr == ref.get()) {
+ return false;
+ }
+ CFStringToSkString(ref, value);
+ return true;
+}
+
+#include "SkFontMgr.h"
+
+static inline int sqr(int value) {
+ SkASSERT(SkAbs32(value) < 0x7FFF); // check for overflow
+ return value * value;
+}
+
+// We normalize each axis (weight, width, italic) to be base-900
+static int compute_metric(const SkFontStyle& a, const SkFontStyle& b) {
+ return sqr(a.weight() - b.weight()) +
+ sqr((a.width() - b.width()) * 100) +
+ sqr((a.slant() != b.slant()) * 900);
+}
+
+class SkFontStyleSet_Mac : public SkFontStyleSet {
+public:
+ SkFontStyleSet_Mac(CTFontDescriptorRef desc)
+ : fArray(CTFontDescriptorCreateMatchingFontDescriptors(desc, nullptr))
+ , fCount(0) {
+ if (nullptr == fArray) {
+ fArray = CFArrayCreate(nullptr, nullptr, 0, nullptr);
+ }
+ fCount = SkToInt(CFArrayGetCount(fArray));
+ }
+
+ virtual ~SkFontStyleSet_Mac() {
+ CFRelease(fArray);
+ }
+
+ int count() override {
+ return fCount;
+ }
+
+ void getStyle(int index, SkFontStyle* style, SkString* name) override {
+ SkASSERT((unsigned)index < (unsigned)fCount);
+ CTFontDescriptorRef desc = (CTFontDescriptorRef)CFArrayGetValueAtIndex(fArray, index);
+ if (style) {
+ *style = fontstyle_from_descriptor(desc);
+ }
+ if (name) {
+ if (!find_desc_str(desc, kCTFontStyleNameAttribute, name)) {
+ name->reset();
+ }
+ }
+ }
+
+ SkTypeface* createTypeface(int index) override {
+ SkASSERT((unsigned)index < (unsigned)CFArrayGetCount(fArray));
+ CTFontDescriptorRef desc = (CTFontDescriptorRef)CFArrayGetValueAtIndex(fArray, index);
+
+ return create_from_desc(desc);
+ }
+
+ SkTypeface* matchStyle(const SkFontStyle& pattern) override {
+ if (0 == fCount) {
+ return nullptr;
+ }
+ return create_from_desc(findMatchingDesc(pattern));
+ }
+
+private:
+ CFArrayRef fArray;
+ int fCount;
+
+ CTFontDescriptorRef findMatchingDesc(const SkFontStyle& pattern) const {
+ int bestMetric = SK_MaxS32;
+ CTFontDescriptorRef bestDesc = nullptr;
+
+ for (int i = 0; i < fCount; ++i) {
+ CTFontDescriptorRef desc = (CTFontDescriptorRef)CFArrayGetValueAtIndex(fArray, i);
+ int metric = compute_metric(pattern, fontstyle_from_descriptor(desc));
+ if (0 == metric) {
+ return desc;
+ }
+ if (metric < bestMetric) {
+ bestMetric = metric;
+ bestDesc = desc;
+ }
+ }
+ SkASSERT(bestDesc);
+ return bestDesc;
+ }
+};
+
+class SkFontMgr_Mac : public SkFontMgr {
+ CFArrayRef fNames;
+ int fCount;
+
+ CFStringRef stringAt(int index) const {
+ SkASSERT((unsigned)index < (unsigned)fCount);
+ return (CFStringRef)CFArrayGetValueAtIndex(fNames, index);
+ }
+
+ static SkFontStyleSet* CreateSet(CFStringRef cfFamilyName) {
+ AutoCFRelease<CFMutableDictionaryRef> cfAttr(
+ CFDictionaryCreateMutable(kCFAllocatorDefault, 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+
+ CFDictionaryAddValue(cfAttr, kCTFontFamilyNameAttribute, cfFamilyName);
+
+ AutoCFRelease<CTFontDescriptorRef> desc(
+ CTFontDescriptorCreateWithAttributes(cfAttr));
+ return new SkFontStyleSet_Mac(desc);
+ }
+
+public:
+ SkFontMgr_Mac()
+ : fNames(SkCTFontManagerCopyAvailableFontFamilyNames())
+ , fCount(fNames ? SkToInt(CFArrayGetCount(fNames)) : 0) {}
+
+ virtual ~SkFontMgr_Mac() {
+ CFSafeRelease(fNames);
+ }
+
+protected:
+ int onCountFamilies() const override {
+ return fCount;
+ }
+
+ void onGetFamilyName(int index, SkString* familyName) const override {
+ if ((unsigned)index < (unsigned)fCount) {
+ CFStringToSkString(this->stringAt(index), familyName);
+ } else {
+ familyName->reset();
+ }
+ }
+
+ SkFontStyleSet* onCreateStyleSet(int index) const override {
+ if ((unsigned)index >= (unsigned)fCount) {
+ return nullptr;
+ }
+ return CreateSet(this->stringAt(index));
+ }
+
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override {
+ AutoCFRelease<CFStringRef> cfName(make_CFString(familyName));
+ return CreateSet(cfName);
+ }
+
+ virtual SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontStyle) const override {
+ SkAutoTUnref<SkFontStyleSet> sset(this->matchFamily(familyName));
+ return sset->matchStyle(fontStyle);
+ }
+
+ virtual SkTypeface* onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override {
+ AutoCFRelease<CTFontDescriptorRef> desc(create_descriptor(familyName, style));
+ AutoCFRelease<CTFontRef> currentFont(CTFontCreateWithFontDescriptor(desc, 0, nullptr));
+
+ // kCFStringEncodingUTF32 is BE unless there is a BOM.
+ // Since there is no machine endian option, explicitly state machine endian.
+#ifdef SK_CPU_LENDIAN
+ constexpr CFStringEncoding encoding = kCFStringEncodingUTF32LE;
+#else
+ constexpr CFStringEncoding encoding = kCFStringEncodingUTF32BE;
+#endif
+ AutoCFRelease<CFStringRef> string(CFStringCreateWithBytes(
+ kCFAllocatorDefault, reinterpret_cast<const UInt8 *>(&character), sizeof(character),
+ encoding, false));
+ CFRange range = CFRangeMake(0, CFStringGetLength(string)); // in UniChar units.
+ AutoCFRelease<CTFontRef> fallbackFont(CTFontCreateForString(currentFont, string, range));
+ return create_from_CTFontRef(fallbackFont.release(), nullptr, false);
+ }
+
+ virtual SkTypeface* onMatchFaceStyle(const SkTypeface* familyMember,
+ const SkFontStyle&) const override {
+ return nullptr;
+ }
+
+ SkTypeface* onCreateFromData(SkData* data, int ttcIndex) const override {
+ AutoCFRelease<CGDataProviderRef> pr(SkCreateDataProviderFromData(sk_ref_sp(data)));
+ if (nullptr == pr) {
+ return nullptr;
+ }
+ return create_from_dataProvider(pr);
+ }
+
+ SkTypeface* onCreateFromStream(SkStreamAsset* bareStream, int ttcIndex) const override {
+ std::unique_ptr<SkStreamAsset> stream(bareStream);
+ AutoCFRelease<CGDataProviderRef> pr(SkCreateDataProviderFromStream(std::move(stream)));
+ if (nullptr == pr) {
+ return nullptr;
+ }
+ return create_from_dataProvider(pr);
+ }
+
+ static CFNumberRef get_tag_for_name(CFStringRef name, CFArrayRef ctAxes) {
+ CFIndex ctAxisCount = CFArrayGetCount(ctAxes);
+ for (int i = 0; i < ctAxisCount; ++i) {
+ CFTypeRef ctAxisInfo = CFArrayGetValueAtIndex(ctAxes, i);
+ if (CFDictionaryGetTypeID() != CFGetTypeID(ctAxisInfo)) {
+ return nullptr;
+ }
+ CFDictionaryRef ctAxisInfoDict = static_cast<CFDictionaryRef>(ctAxisInfo);
+
+ CFTypeRef ctAxisName = CFDictionaryGetValue(ctAxisInfoDict,
+ kCTFontVariationAxisNameKey);
+ if (!ctAxisName || CFGetTypeID(ctAxisName) != CFStringGetTypeID()) {
+ return nullptr;
+ }
+
+ if (CFEqual(name, ctAxisName)) {
+ CFTypeRef tag = CFDictionaryGetValue(ctAxisInfoDict,
+ kCTFontVariationAxisIdentifierKey);
+ if (!tag || CFGetTypeID(tag) != CFNumberGetTypeID()) {
+ return nullptr;
+ }
+ return static_cast<CFNumberRef>(tag);
+ }
+ }
+ return nullptr;
+ }
+ static CFDictionaryRef get_axes(CGFontRef cg, const FontParameters& params) {
+ AutoCFRelease<CFArrayRef> cgAxes(CGFontCopyVariationAxes(cg));
+ if (!cgAxes) {
+ return nullptr;
+ }
+ CFIndex axisCount = CFArrayGetCount(cgAxes);
+
+ // The CGFont variation data is keyed by name, and lacks the tag.
+ // The CTFont variation data is keyed by tag, and also has the name.
+ // We would like to work with CTFont variaitons, but creating a CTFont font with
+ // CTFont variation dictionary runs into bugs. So use the CTFont variation data
+ // to match names to tags to create the appropriate CGFont.
+ AutoCFRelease<CTFontRef> ct(CTFontCreateWithGraphicsFont(cg, 0, nullptr, nullptr));
+ AutoCFRelease<CFArrayRef> ctAxes(CTFontCopyVariationAxes(ct));
+ if (!ctAxes || CFArrayGetCount(ctAxes) != axisCount) {
+ return nullptr;
+ }
+
+ int paramAxisCount;
+ const FontParameters::Axis* paramAxes = params.getAxes(&paramAxisCount);
+
+ CFMutableDictionaryRef dict = CFDictionaryCreateMutable(kCFAllocatorDefault, axisCount,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+ for (int i = 0; i < axisCount; ++i) {
+ CFTypeRef axisInfo = CFArrayGetValueAtIndex(cgAxes, i);
+ if (CFDictionaryGetTypeID() != CFGetTypeID(axisInfo)) {
+ return nullptr;
+ }
+ CFDictionaryRef axisInfoDict = static_cast<CFDictionaryRef>(axisInfo);
+
+ CFTypeRef axisName = CFDictionaryGetValue(axisInfoDict, kCGFontVariationAxisName);
+ if (!axisName || CFGetTypeID(axisName) != CFStringGetTypeID()) {
+ return nullptr;
+ }
+
+ CFNumberRef tagNumber = get_tag_for_name(static_cast<CFStringRef>(axisName), ctAxes);
+ if (!tagNumber) {
+ // Could not find a tag to go with the name of this index.
+ // This would be a bug in CG/CT.
+ continue;
+ }
+ int64_t tagLong;
+ if (!CFNumberGetValue(tagNumber, kCFNumberSInt64Type, &tagLong)) {
+ return nullptr;
+ }
+
+ // The variation axes can be set to any value, but cg will effectively pin them.
+ // Pin them here to normalize.
+ CFTypeRef min = CFDictionaryGetValue(axisInfoDict, kCGFontVariationAxisMinValue);
+ CFTypeRef max = CFDictionaryGetValue(axisInfoDict, kCGFontVariationAxisMaxValue);
+ CFTypeRef def = CFDictionaryGetValue(axisInfoDict, kCGFontVariationAxisDefaultValue);
+ if (!min || CFGetTypeID(min) != CFNumberGetTypeID() ||
+ !max || CFGetTypeID(max) != CFNumberGetTypeID() ||
+ !def || CFGetTypeID(def) != CFNumberGetTypeID())
+ {
+ return nullptr;
+ }
+ CFNumberRef minNumber = static_cast<CFNumberRef>(min);
+ CFNumberRef maxNumber = static_cast<CFNumberRef>(max);
+ CFNumberRef defNumber = static_cast<CFNumberRef>(def);
+ double minDouble;
+ double maxDouble;
+ double defDouble;
+ if (!CFNumberGetValue(minNumber, kCFNumberDoubleType, &minDouble) ||
+ !CFNumberGetValue(maxNumber, kCFNumberDoubleType, &maxDouble) ||
+ !CFNumberGetValue(defNumber, kCFNumberDoubleType, &defDouble))
+ {
+ return nullptr;
+ }
+
+ double value = defDouble;
+ for (int j = 0; j < paramAxisCount; ++j) {
+ if (paramAxes[j].fTag == tagLong) {
+ value = SkTPin(SkScalarToDouble(paramAxes[j].fStyleValue),minDouble,maxDouble);
+ break;
+ }
+ }
+ CFNumberRef valueNumber = CFNumberCreate(kCFAllocatorDefault, kCFNumberDoubleType,
+ &value);
+ CFDictionaryAddValue(dict, axisName, valueNumber);
+ CFRelease(valueNumber);
+ }
+ return dict;
+ }
+ SkTypeface* onCreateFromStream(SkStreamAsset* bs, const FontParameters& params) const override {
+ std::unique_ptr<SkStreamAsset> s(bs);
+ AutoCFRelease<CGDataProviderRef> provider(SkCreateDataProviderFromStream(std::move(s)));
+ if (nullptr == provider) {
+ return nullptr;
+ }
+ AutoCFRelease<CGFontRef> cg(CGFontCreateWithDataProvider(provider));
+ if (nullptr == cg) {
+ return nullptr;
+ }
+
+ AutoCFRelease<CFDictionaryRef> cgVariations(get_axes(cg, params));
+ // The CGFontRef returned by CGFontCreateCopyWithVariations when the passed CGFontRef was
+ // created from a data provider does not appear to have any ownership of the underlying
+ // data. The original CGFontRef must be kept alive until the copy will no longer be used.
+ AutoCFRelease<CGFontRef> cgVariant;
+ if (cgVariations) {
+ cgVariant.reset(CGFontCreateCopyWithVariations(cg, cgVariations));
+ } else {
+ cgVariant.reset(cg.release());
+ }
+
+ AutoCFRelease<CTFontRef> ct(CTFontCreateWithGraphicsFont(cgVariant, 0, nullptr, nullptr));
+ if (!ct) {
+ return nullptr;
+ }
+ return create_from_CTFontRef(ct.release(), cg.release(), true);
+ }
+
+ static CFDictionaryRef get_axes(CGFontRef cg, SkFontData* fontData) {
+ AutoCFRelease<CFArrayRef> cgAxes(CGFontCopyVariationAxes(cg));
+ if (!cgAxes) {
+ return nullptr;
+ }
+
+ CFIndex axisCount = CFArrayGetCount(cgAxes);
+ if (0 == axisCount || axisCount != fontData->getAxisCount()) {
+ return nullptr;
+ }
+
+ CFMutableDictionaryRef dict = CFDictionaryCreateMutable(kCFAllocatorDefault, axisCount,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+ for (int i = 0; i < fontData->getAxisCount(); ++i) {
+ CFTypeRef axisInfo = CFArrayGetValueAtIndex(cgAxes, i);
+ if (CFDictionaryGetTypeID() != CFGetTypeID(axisInfo)) {
+ return nullptr;
+ }
+ CFDictionaryRef axisInfoDict = static_cast<CFDictionaryRef>(axisInfo);
+
+ CFTypeRef axisName = CFDictionaryGetValue(axisInfoDict, kCGFontVariationAxisName);
+ if (!axisName || CFGetTypeID(axisName) != CFStringGetTypeID()) {
+ return nullptr;
+ }
+
+ // The variation axes can be set to any value, but cg will effectively pin them.
+ // Pin them here to normalize.
+ CFTypeRef min = CFDictionaryGetValue(axisInfoDict, kCGFontVariationAxisMinValue);
+ CFTypeRef max = CFDictionaryGetValue(axisInfoDict, kCGFontVariationAxisMaxValue);
+ if (!min || CFGetTypeID(min) != CFNumberGetTypeID() ||
+ !max || CFGetTypeID(max) != CFNumberGetTypeID())
+ {
+ return nullptr;
+ }
+ CFNumberRef minNumber = static_cast<CFNumberRef>(min);
+ CFNumberRef maxNumber = static_cast<CFNumberRef>(max);
+ double minDouble;
+ double maxDouble;
+ if (!CFNumberGetValue(minNumber, kCFNumberDoubleType, &minDouble) ||
+ !CFNumberGetValue(maxNumber, kCFNumberDoubleType, &maxDouble))
+ {
+ return nullptr;
+ }
+ double value = SkTPin(SkFixedToDouble(fontData->getAxis()[i]), minDouble, maxDouble);
+ CFNumberRef valueNumber = CFNumberCreate(kCFAllocatorDefault, kCFNumberDoubleType,
+ &value);
+
+ CFDictionaryAddValue(dict, axisName, valueNumber);
+ CFRelease(valueNumber);
+ }
+ return dict;
+ }
+ SkTypeface* onCreateFromFontData(std::unique_ptr<SkFontData> fontData) const override {
+ AutoCFRelease<CGDataProviderRef> provider(
+ SkCreateDataProviderFromStream(fontData->detachStream()));
+ if (nullptr == provider) {
+ return nullptr;
+ }
+ AutoCFRelease<CGFontRef> cg(CGFontCreateWithDataProvider(provider));
+ if (nullptr == cg) {
+ return nullptr;
+ }
+
+ AutoCFRelease<CFDictionaryRef> cgVariations(get_axes(cg, fontData.get()));
+ // The CGFontRef returned by CGFontCreateCopyWithVariations when the passed CGFontRef was
+ // created from a data provider does not appear to have any ownership of the underlying
+ // data. The original CGFontRef must be kept alive until the copy will no longer be used.
+ AutoCFRelease<CGFontRef> cgVariant;
+ if (cgVariations) {
+ cgVariant.reset(CGFontCreateCopyWithVariations(cg, cgVariations));
+ } else {
+ cgVariant.reset(cg.release());
+ }
+
+ AutoCFRelease<CTFontRef> ct(CTFontCreateWithGraphicsFont(cgVariant, 0, nullptr, nullptr));
+ if (!ct) {
+ return nullptr;
+ }
+ return create_from_CTFontRef(ct.release(), cg.release(), true);
+ }
+
+ SkTypeface* onCreateFromFile(const char path[], int ttcIndex) const override {
+ AutoCFRelease<CGDataProviderRef> pr(CGDataProviderCreateWithFilename(path));
+ if (nullptr == pr) {
+ return nullptr;
+ }
+ return create_from_dataProvider(pr);
+ }
+
+ SkTypeface* onLegacyCreateTypeface(const char familyName[], SkFontStyle style) const override {
+ if (familyName) {
+ familyName = map_css_names(familyName);
+ }
+
+ if (!familyName || !*familyName) {
+ familyName = FONT_DEFAULT_NAME;
+ }
+
+ SkTypeface* face = create_from_name(familyName, style);
+ if (face) {
+ return face;
+ }
+
+ return SkSafeRef(GetDefaultFace());
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkFontMgr* SkFontMgr::Factory() { return new SkFontMgr_Mac; }
+
+#endif//defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
diff --git a/gfx/skia/skia/src/ports/SkFontHost_win.cpp b/gfx/skia/skia/src/ports/SkFontHost_win.cpp
new file mode 100644
index 000000000..c1013d767
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontHost_win.cpp
@@ -0,0 +1,2503 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32)
+
+#include "SkAdvancedTypefaceMetrics.h"
+#include "SkBase64.h"
+#include "SkColorPriv.h"
+#include "SkData.h"
+#include "SkDescriptor.h"
+#include "SkFontDescriptor.h"
+#include "SkGlyph.h"
+#include "SkHRESULT.h"
+#include "SkMaskGamma.h"
+#include "SkMatrix22.h"
+#include "SkOTTable_maxp.h"
+#include "SkOTTable_name.h"
+#include "SkOTUtils.h"
+#include "SkPath.h"
+#include "SkSFNTHeader.h"
+#include "SkStream.h"
+#include "SkString.h"
+#include "SkTemplates.h"
+#include "SkTypeface_win.h"
+#include "SkTypeface_win_dw.h"
+#include "SkTypefaceCache.h"
+#include "SkUtils.h"
+
+#include "SkTypes.h"
+#include <tchar.h>
+#include <usp10.h>
+#include <objbase.h>
+
+static void (*gEnsureLOGFONTAccessibleProc)(const LOGFONT&);
+
+void SkTypeface_SetEnsureLOGFONTAccessibleProc(void (*proc)(const LOGFONT&)) {
+ gEnsureLOGFONTAccessibleProc = proc;
+}
+
+static void call_ensure_accessible(const LOGFONT& lf) {
+ if (gEnsureLOGFONTAccessibleProc) {
+ gEnsureLOGFONTAccessibleProc(lf);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// always packed xxRRGGBB
+typedef uint32_t SkGdiRGB;
+
+// define this in your Makefile or .gyp to enforce AA requests
+// which GDI ignores at small sizes. This flag guarantees AA
+// for rotated text, regardless of GDI's notions.
+//#define SK_ENFORCE_ROTATED_TEXT_AA_ON_WINDOWS
+
+static bool isLCD(const SkScalerContext::Rec& rec) {
+ return SkMask::kLCD16_Format == rec.fMaskFormat;
+}
+
+static bool bothZero(SkScalar a, SkScalar b) {
+ return 0 == a && 0 == b;
+}
+
+// returns false if there is any non-90-rotation or skew
+static bool isAxisAligned(const SkScalerContext::Rec& rec) {
+ return 0 == rec.fPreSkewX &&
+ (bothZero(rec.fPost2x2[0][1], rec.fPost2x2[1][0]) ||
+ bothZero(rec.fPost2x2[0][0], rec.fPost2x2[1][1]));
+}
+
+static bool needToRenderWithSkia(const SkScalerContext::Rec& rec) {
+#ifdef SK_ENFORCE_ROTATED_TEXT_AA_ON_WINDOWS
+ // What we really want to catch is when GDI will ignore the AA request and give
+ // us BW instead. Smallish rotated text is one heuristic, so this code is just
+ // an approximation. We shouldn't need to do this for larger sizes, but at those
+ // sizes, the quality difference gets less and less between our general
+ // scanconverter and GDI's.
+ if (SkMask::kA8_Format == rec.fMaskFormat && !isAxisAligned(rec)) {
+ return true;
+ }
+#endif
+ return rec.getHinting() == SkPaint::kNo_Hinting || rec.getHinting() == SkPaint::kSlight_Hinting;
+}
+
+static void tchar_to_skstring(const TCHAR t[], SkString* s) {
+#ifdef UNICODE
+ size_t sSize = WideCharToMultiByte(CP_UTF8, 0, t, -1, nullptr, 0, nullptr, nullptr);
+ s->resize(sSize);
+ WideCharToMultiByte(CP_UTF8, 0, t, -1, s->writable_str(), sSize, nullptr, nullptr);
+#else
+ s->set(t);
+#endif
+}
+
+static void dcfontname_to_skstring(HDC deviceContext, const LOGFONT& lf, SkString* familyName) {
+ int fontNameLen; //length of fontName in TCHARS.
+ if (0 == (fontNameLen = GetTextFace(deviceContext, 0, nullptr))) {
+ call_ensure_accessible(lf);
+ if (0 == (fontNameLen = GetTextFace(deviceContext, 0, nullptr))) {
+ fontNameLen = 0;
+ }
+ }
+
+ SkAutoSTArray<LF_FULLFACESIZE, TCHAR> fontName(fontNameLen+1);
+ if (0 == GetTextFace(deviceContext, fontNameLen, fontName.get())) {
+ call_ensure_accessible(lf);
+ if (0 == GetTextFace(deviceContext, fontNameLen, fontName.get())) {
+ fontName[0] = 0;
+ }
+ }
+
+ tchar_to_skstring(fontName.get(), familyName);
+}
+
+static void make_canonical(LOGFONT* lf) {
+ lf->lfHeight = -64;
+ lf->lfWidth = 0; // lfWidth is related to lfHeight, not to the OS/2::usWidthClass.
+ lf->lfQuality = CLEARTYPE_QUALITY;//PROOF_QUALITY;
+ lf->lfCharSet = DEFAULT_CHARSET;
+// lf->lfClipPrecision = 64;
+}
+
+static SkFontStyle get_style(const LOGFONT& lf) {
+ return SkFontStyle(lf.lfWeight,
+ SkFontStyle::kNormal_Width,
+ lf.lfItalic ? SkFontStyle::kItalic_Slant : SkFontStyle::kUpright_Slant);
+}
+
+static inline FIXED SkFixedToFIXED(SkFixed x) {
+ return *(FIXED*)(&x);
+}
+static inline SkFixed SkFIXEDToFixed(FIXED x) {
+ return *(SkFixed*)(&x);
+}
+
+static inline FIXED SkScalarToFIXED(SkScalar x) {
+ return SkFixedToFIXED(SkScalarToFixed(x));
+}
+
+static inline SkScalar SkFIXEDToScalar(FIXED x) {
+ return SkFixedToScalar(SkFIXEDToFixed(x));
+}
+
+static unsigned calculateGlyphCount(HDC hdc, const LOGFONT& lf) {
+ TEXTMETRIC textMetric;
+ if (0 == GetTextMetrics(hdc, &textMetric)) {
+ textMetric.tmPitchAndFamily = TMPF_VECTOR;
+ call_ensure_accessible(lf);
+ GetTextMetrics(hdc, &textMetric);
+ }
+
+ if (!(textMetric.tmPitchAndFamily & TMPF_VECTOR)) {
+ return textMetric.tmLastChar;
+ }
+
+ // The 'maxp' table stores the number of glyphs at offset 4, in 2 bytes.
+ uint16_t glyphs;
+ if (GDI_ERROR != GetFontData(hdc, SkOTTableMaximumProfile::TAG, 4, &glyphs, sizeof(glyphs))) {
+ return SkEndian_SwapBE16(glyphs);
+ }
+
+ // Binary search for glyph count.
+ static const MAT2 mat2 = {{0, 1}, {0, 0}, {0, 0}, {0, 1}};
+ int32_t max = SK_MaxU16 + 1;
+ int32_t min = 0;
+ GLYPHMETRICS gm;
+ while (min < max) {
+ int32_t mid = min + ((max - min) / 2);
+ if (GetGlyphOutlineW(hdc, mid, GGO_METRICS | GGO_GLYPH_INDEX, &gm, 0,
+ nullptr, &mat2) == GDI_ERROR) {
+ max = mid;
+ } else {
+ min = mid + 1;
+ }
+ }
+ SkASSERT(min == max);
+ return min;
+}
+
+static unsigned calculateUPEM(HDC hdc, const LOGFONT& lf) {
+ TEXTMETRIC textMetric;
+ if (0 == GetTextMetrics(hdc, &textMetric)) {
+ textMetric.tmPitchAndFamily = TMPF_VECTOR;
+ call_ensure_accessible(lf);
+ GetTextMetrics(hdc, &textMetric);
+ }
+
+ if (!(textMetric.tmPitchAndFamily & TMPF_VECTOR)) {
+ return textMetric.tmMaxCharWidth;
+ }
+
+ OUTLINETEXTMETRIC otm;
+ unsigned int otmRet = GetOutlineTextMetrics(hdc, sizeof(otm), &otm);
+ if (0 == otmRet) {
+ call_ensure_accessible(lf);
+ otmRet = GetOutlineTextMetrics(hdc, sizeof(otm), &otm);
+ }
+
+ return (0 == otmRet) ? 0 : otm.otmEMSquare;
+}
+
+class LogFontTypeface : public SkTypeface {
+public:
+ LogFontTypeface(const SkFontStyle& style, const LOGFONT& lf, bool serializeAsStream)
+ : SkTypeface(style, false)
+ , fLogFont(lf)
+ , fSerializeAsStream(serializeAsStream)
+ {
+ HFONT font = CreateFontIndirect(&lf);
+
+ HDC deviceContext = ::CreateCompatibleDC(nullptr);
+ HFONT savefont = (HFONT)SelectObject(deviceContext, font);
+
+ TEXTMETRIC textMetric;
+ if (0 == GetTextMetrics(deviceContext, &textMetric)) {
+ call_ensure_accessible(lf);
+ if (0 == GetTextMetrics(deviceContext, &textMetric)) {
+ textMetric.tmPitchAndFamily = TMPF_TRUETYPE;
+ }
+ }
+ if (deviceContext) {
+ ::SelectObject(deviceContext, savefont);
+ ::DeleteDC(deviceContext);
+ }
+ if (font) {
+ ::DeleteObject(font);
+ }
+
+ // The fixed pitch bit is set if the font is *not* fixed pitch.
+ this->setIsFixedPitch((textMetric.tmPitchAndFamily & TMPF_FIXED_PITCH) == 0);
+ this->setFontStyle(SkFontStyle(textMetric.tmWeight, style.width(), style.slant()));
+
+ // Used a logfont on a memory context, should never get a device font.
+ // Therefore all TMPF_DEVICE will be PostScript (cubic) fonts.
+ // If the font has cubic outlines, it will not be rendered with ClearType.
+ fCanBeLCD = !((textMetric.tmPitchAndFamily & TMPF_VECTOR) &&
+ (textMetric.tmPitchAndFamily & TMPF_DEVICE));
+ }
+
+ LOGFONT fLogFont;
+ bool fSerializeAsStream;
+ bool fCanBeLCD;
+
+ static LogFontTypeface* Create(const LOGFONT& lf) {
+ return new LogFontTypeface(get_style(lf), lf, false);
+ }
+
+ static void EnsureAccessible(const SkTypeface* face) {
+ call_ensure_accessible(static_cast<const LogFontTypeface*>(face)->fLogFont);
+ }
+
+protected:
+ SkStreamAsset* onOpenStream(int* ttcIndex) const override;
+ SkScalerContext* onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*) const override;
+ void onFilterRec(SkScalerContextRec*) const override;
+ SkAdvancedTypefaceMetrics* onGetAdvancedTypefaceMetrics(
+ PerGlyphInfo, const uint32_t*, uint32_t) const override;
+ void onGetFontDescriptor(SkFontDescriptor*, bool*) const override;
+ virtual int onCharsToGlyphs(const void* chars, Encoding encoding,
+ uint16_t glyphs[], int glyphCount) const override;
+ int onCountGlyphs() const override;
+ int onGetUPEM() const override;
+ void onGetFamilyName(SkString* familyName) const override;
+ SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override;
+ int onGetTableTags(SkFontTableTag tags[]) const override;
+ virtual size_t onGetTableData(SkFontTableTag, size_t offset,
+ size_t length, void* data) const override;
+};
+
+class FontMemResourceTypeface : public LogFontTypeface {
+public:
+ /**
+ * The created FontMemResourceTypeface takes ownership of fontMemResource.
+ */
+ static FontMemResourceTypeface* Create(const LOGFONT& lf, HANDLE fontMemResource) {
+ return new FontMemResourceTypeface(get_style(lf), lf, fontMemResource);
+ }
+
+protected:
+ void weak_dispose() const override {
+ RemoveFontMemResourceEx(fFontMemResource);
+ //SkTypefaceCache::Remove(this);
+ INHERITED::weak_dispose();
+ }
+
+private:
+ /**
+ * Takes ownership of fontMemResource.
+ */
+ FontMemResourceTypeface(const SkFontStyle& style, const LOGFONT& lf, HANDLE fontMemResource)
+ : LogFontTypeface(style, lf, true), fFontMemResource(fontMemResource)
+ { }
+
+ HANDLE fFontMemResource;
+
+ typedef LogFontTypeface INHERITED;
+};
+
+static const LOGFONT& get_default_font() {
+ static LOGFONT gDefaultFont;
+ return gDefaultFont;
+}
+
+static bool FindByLogFont(SkTypeface* face, void* ctx) {
+ LogFontTypeface* lface = static_cast<LogFontTypeface*>(face);
+ const LOGFONT* lf = reinterpret_cast<const LOGFONT*>(ctx);
+
+ return !memcmp(&lface->fLogFont, lf, sizeof(LOGFONT));
+}
+
+/**
+ * This guy is public. It first searches the cache, and if a match is not found,
+ * it creates a new face.
+ */
+SkTypeface* SkCreateTypefaceFromLOGFONT(const LOGFONT& origLF) {
+ LOGFONT lf = origLF;
+ make_canonical(&lf);
+ SkTypeface* face = SkTypefaceCache::FindByProcAndRef(FindByLogFont, &lf);
+ if (nullptr == face) {
+ face = LogFontTypeface::Create(lf);
+ SkTypefaceCache::Add(face);
+ }
+ return face;
+}
+
+/***
+ * This guy is public.
+ */
+
+SkTypeface* SkCreateTypefaceFromDWriteFont(IDWriteFactory* aFactory,
+ IDWriteFontFace* aFontFace,
+ SkFontStyle aStyle,
+ bool aForceGDI)
+{
+ return DWriteFontTypeface::Create(aFactory, aFontFace, aStyle, aForceGDI);
+}
+
+/**
+ * The created SkTypeface takes ownership of fontMemResource.
+ */
+SkTypeface* SkCreateFontMemResourceTypefaceFromLOGFONT(const LOGFONT& origLF, HANDLE fontMemResource) {
+ LOGFONT lf = origLF;
+ make_canonical(&lf);
+ // We'll never get a cache hit, so no point in putting this in SkTypefaceCache.
+ return FontMemResourceTypeface::Create(lf, fontMemResource);
+}
+
+/**
+ * This guy is public
+ */
+void SkLOGFONTFromTypeface(const SkTypeface* face, LOGFONT* lf) {
+ if (nullptr == face) {
+ *lf = get_default_font();
+ } else {
+ *lf = static_cast<const LogFontTypeface*>(face)->fLogFont;
+ }
+}
+
+// Construct Glyph to Unicode table.
+// Unicode code points that require conjugate pairs in utf16 are not
+// supported.
+// TODO(arthurhsu): Add support for conjugate pairs. It looks like that may
+// require parsing the TTF cmap table (platform 4, encoding 12) directly instead
+// of calling GetFontUnicodeRange().
+static void populate_glyph_to_unicode(HDC fontHdc, const unsigned glyphCount,
+ SkTDArray<SkUnichar>* glyphToUnicode) {
+ DWORD glyphSetBufferSize = GetFontUnicodeRanges(fontHdc, nullptr);
+ if (!glyphSetBufferSize) {
+ return;
+ }
+
+ SkAutoTDeleteArray<BYTE> glyphSetBuffer(new BYTE[glyphSetBufferSize]);
+ GLYPHSET* glyphSet =
+ reinterpret_cast<LPGLYPHSET>(glyphSetBuffer.get());
+ if (GetFontUnicodeRanges(fontHdc, glyphSet) != glyphSetBufferSize) {
+ return;
+ }
+
+ glyphToUnicode->setCount(glyphCount);
+ memset(glyphToUnicode->begin(), 0, glyphCount * sizeof(SkUnichar));
+ for (DWORD i = 0; i < glyphSet->cRanges; ++i) {
+ // There is no guarantee that within a Unicode range, the corresponding
+ // glyph id in a font file are continuous. So, even if we have ranges,
+ // we can't just use the first and last entry of the range to compute
+ // result. We need to enumerate them one by one.
+ int count = glyphSet->ranges[i].cGlyphs;
+ SkAutoTArray<WCHAR> chars(count + 1);
+ chars[count] = 0; // termintate string
+ SkAutoTArray<WORD> glyph(count);
+ for (USHORT j = 0; j < count; ++j) {
+ chars[j] = glyphSet->ranges[i].wcLow + j;
+ }
+ GetGlyphIndicesW(fontHdc, chars.get(), count, glyph.get(),
+ GGI_MARK_NONEXISTING_GLYPHS);
+ // If the glyph ID is valid, and the glyph is not mapped, then we will
+ // fill in the char id into the vector. If the glyph is mapped already,
+ // skip it.
+ // TODO(arthurhsu): better improve this. e.g. Get all used char ids from
+ // font cache, then generate this mapping table from there. It's
+ // unlikely to have collisions since glyph reuse happens mostly for
+ // different Unicode pages.
+ for (USHORT j = 0; j < count; ++j) {
+ if (glyph[j] != 0xffff && glyph[j] < glyphCount &&
+ (*glyphToUnicode)[glyph[j]] == 0) {
+ (*glyphToUnicode)[glyph[j]] = chars[j];
+ }
+ }
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+
+static int alignTo32(int n) {
+ return (n + 31) & ~31;
+}
+
+struct MyBitmapInfo : public BITMAPINFO {
+ RGBQUAD fMoreSpaceForColors[1];
+};
+
+class HDCOffscreen {
+public:
+ HDCOffscreen() {
+ fFont = 0;
+ fDC = 0;
+ fBM = 0;
+ fBits = nullptr;
+ fWidth = fHeight = 0;
+ fIsBW = false;
+ }
+
+ ~HDCOffscreen() {
+ if (fDC) {
+ DeleteDC(fDC);
+ }
+ if (fBM) {
+ DeleteObject(fBM);
+ }
+ }
+
+ void init(HFONT font, const XFORM& xform) {
+ fFont = font;
+ fXform = xform;
+ }
+
+ const void* draw(const SkGlyph&, bool isBW, size_t* srcRBPtr);
+
+private:
+ HDC fDC;
+ HBITMAP fBM;
+ HFONT fFont;
+ XFORM fXform;
+ void* fBits; // points into fBM
+ int fWidth;
+ int fHeight;
+ bool fIsBW;
+};
+
+const void* HDCOffscreen::draw(const SkGlyph& glyph, bool isBW,
+ size_t* srcRBPtr) {
+ // Can we share the scalercontext's fDDC, so we don't need to create
+ // a separate fDC here?
+ if (0 == fDC) {
+ fDC = CreateCompatibleDC(0);
+ if (0 == fDC) {
+ return nullptr;
+ }
+ SetGraphicsMode(fDC, GM_ADVANCED);
+ SetBkMode(fDC, TRANSPARENT);
+ SetTextAlign(fDC, TA_LEFT | TA_BASELINE);
+ SelectObject(fDC, fFont);
+
+ COLORREF color = 0x00FFFFFF;
+ SkDEBUGCODE(COLORREF prev =) SetTextColor(fDC, color);
+ SkASSERT(prev != CLR_INVALID);
+ }
+
+ if (fBM && (fIsBW != isBW || fWidth < glyph.fWidth || fHeight < glyph.fHeight)) {
+ DeleteObject(fBM);
+ fBM = 0;
+ }
+ fIsBW = isBW;
+
+ fWidth = SkMax32(fWidth, glyph.fWidth);
+ fHeight = SkMax32(fHeight, glyph.fHeight);
+
+ int biWidth = isBW ? alignTo32(fWidth) : fWidth;
+
+ if (0 == fBM) {
+ MyBitmapInfo info;
+ sk_bzero(&info, sizeof(info));
+ if (isBW) {
+ RGBQUAD blackQuad = { 0, 0, 0, 0 };
+ RGBQUAD whiteQuad = { 0xFF, 0xFF, 0xFF, 0 };
+ info.bmiColors[0] = blackQuad;
+ info.bmiColors[1] = whiteQuad;
+ }
+ info.bmiHeader.biSize = sizeof(info.bmiHeader);
+ info.bmiHeader.biWidth = biWidth;
+ info.bmiHeader.biHeight = fHeight;
+ info.bmiHeader.biPlanes = 1;
+ info.bmiHeader.biBitCount = isBW ? 1 : 32;
+ info.bmiHeader.biCompression = BI_RGB;
+ if (isBW) {
+ info.bmiHeader.biClrUsed = 2;
+ }
+ fBM = CreateDIBSection(fDC, &info, DIB_RGB_COLORS, &fBits, 0, 0);
+ if (0 == fBM) {
+ return nullptr;
+ }
+ SelectObject(fDC, fBM);
+ }
+
+ // erase
+ size_t srcRB = isBW ? (biWidth >> 3) : (fWidth << 2);
+ size_t size = fHeight * srcRB;
+ memset(fBits, 0, size);
+
+ XFORM xform = fXform;
+ xform.eDx = (float)-glyph.fLeft;
+ xform.eDy = (float)-glyph.fTop;
+ SetWorldTransform(fDC, &xform);
+
+ uint16_t glyphID = glyph.getGlyphID();
+ BOOL ret = ExtTextOutW(fDC, 0, 0, ETO_GLYPH_INDEX, nullptr, reinterpret_cast<LPCWSTR>(&glyphID), 1, nullptr);
+ GdiFlush();
+ if (0 == ret) {
+ return nullptr;
+ }
+ *srcRBPtr = srcRB;
+ // offset to the start of the image
+ return (const char*)fBits + (fHeight - glyph.fHeight) * srcRB;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+#define BUFFERSIZE (1 << 13)
+
+class SkScalerContext_GDI : public SkScalerContext {
+public:
+ SkScalerContext_GDI(SkTypeface*, const SkScalerContextEffects&, const SkDescriptor* desc);
+ virtual ~SkScalerContext_GDI();
+
+ // Returns true if the constructor was able to complete all of its
+ // initializations (which may include calling GDI).
+ bool isValid() const;
+
+protected:
+ unsigned generateGlyphCount() override;
+ uint16_t generateCharToGlyph(SkUnichar uni) override;
+ void generateAdvance(SkGlyph* glyph) override;
+ void generateMetrics(SkGlyph* glyph) override;
+ void generateImage(const SkGlyph& glyph) override;
+ void generatePath(const SkGlyph& glyph, SkPath* path) override;
+ void generateFontMetrics(SkPaint::FontMetrics*) override;
+
+private:
+ DWORD getGDIGlyphPath(const SkGlyph& glyph, UINT flags,
+ SkAutoSTMalloc<BUFFERSIZE, uint8_t>* glyphbuf);
+
+ HDCOffscreen fOffscreen;
+ /** fGsA is the non-rotational part of total matrix without the text height scale.
+ * Used to find the magnitude of advances.
+ */
+ MAT2 fGsA;
+ /** The total matrix without the textSize. */
+ MAT2 fMat22;
+ /** Scales font to EM size. */
+ MAT2 fHighResMat22;
+ HDC fDDC;
+ HFONT fSavefont;
+ HFONT fFont;
+ SCRIPT_CACHE fSC;
+ int fGlyphCount;
+
+ /** The total matrix which also removes EM scale. */
+ SkMatrix fHiResMatrix;
+ /** fG_inv is the inverse of the rotational part of the total matrix.
+ * Used to set the direction of advances.
+ */
+ SkMatrix fG_inv;
+ enum Type {
+ kTrueType_Type, kBitmap_Type, kLine_Type
+ } fType;
+ TEXTMETRIC fTM;
+};
+
+static FIXED float2FIXED(float x) {
+ return SkFixedToFIXED(SkFloatToFixed(x));
+}
+
+static inline float FIXED2float(FIXED x) {
+ return SkFixedToFloat(SkFIXEDToFixed(x));
+}
+
+static BYTE compute_quality(const SkScalerContext::Rec& rec) {
+ switch (rec.fMaskFormat) {
+ case SkMask::kBW_Format:
+ return NONANTIALIASED_QUALITY;
+ case SkMask::kLCD16_Format:
+ return CLEARTYPE_QUALITY;
+ default:
+ if (rec.fFlags & SkScalerContext::kGenA8FromLCD_Flag) {
+ return CLEARTYPE_QUALITY;
+ } else {
+ return ANTIALIASED_QUALITY;
+ }
+ }
+}
+
+SkScalerContext_GDI::SkScalerContext_GDI(SkTypeface* rawTypeface,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : SkScalerContext(rawTypeface, effects, desc)
+ , fDDC(0)
+ , fSavefont(0)
+ , fFont(0)
+ , fSC(0)
+ , fGlyphCount(-1)
+{
+ LogFontTypeface* typeface = reinterpret_cast<LogFontTypeface*>(rawTypeface);
+
+ fDDC = ::CreateCompatibleDC(nullptr);
+ if (!fDDC) {
+ return;
+ }
+ SetGraphicsMode(fDDC, GM_ADVANCED);
+ SetBkMode(fDDC, TRANSPARENT);
+
+ // When GDI hinting, remove the entire Y scale from sA and GsA. (Prevents 'linear' metrics.)
+ // When not hinting, remove only the integer Y scale from sA and GsA. (Applied by GDI.)
+ SkScalerContextRec::PreMatrixScale scaleConstraints =
+ (fRec.getHinting() == SkPaint::kNo_Hinting || fRec.getHinting() == SkPaint::kSlight_Hinting)
+ ? SkScalerContextRec::kVerticalInteger_PreMatrixScale
+ : SkScalerContextRec::kVertical_PreMatrixScale;
+ SkVector scale;
+ SkMatrix sA;
+ SkMatrix GsA;
+ SkMatrix A;
+ fRec.computeMatrices(scaleConstraints, &scale, &sA, &GsA, &fG_inv, &A);
+
+ fGsA.eM11 = SkScalarToFIXED(GsA.get(SkMatrix::kMScaleX));
+ fGsA.eM12 = SkScalarToFIXED(-GsA.get(SkMatrix::kMSkewY)); // This should be ~0.
+ fGsA.eM21 = SkScalarToFIXED(-GsA.get(SkMatrix::kMSkewX));
+ fGsA.eM22 = SkScalarToFIXED(GsA.get(SkMatrix::kMScaleY));
+
+ // When not hinting, scale was computed with kVerticalInteger, so is already an integer.
+ // The sA and GsA transforms will be used to create 'linear' metrics.
+
+ // When hinting, scale was computed with kVertical, stating that our port can handle
+ // non-integer scales. This is done so that sA and GsA are computed without any 'residual'
+ // scale in them, preventing 'linear' metrics. However, GDI cannot actually handle non-integer
+ // scales so we need to round in this case. This is fine, since all of the scale has been
+ // removed from sA and GsA, so GDI will be handling the scale completely.
+ SkScalar gdiTextSize = SkScalarRoundToScalar(scale.fY);
+
+ // GDI will not accept a size of zero, so round the range [0, 1] to 1.
+ // If the size was non-zero, the scale factors will also be non-zero and 1px tall text is drawn.
+ // If the size actually was zero, the scale factors will also be zero, so GDI will draw nothing.
+ if (gdiTextSize == 0) {
+ gdiTextSize = SK_Scalar1;
+ }
+
+ LOGFONT lf = typeface->fLogFont;
+ lf.lfHeight = -SkScalarTruncToInt(gdiTextSize);
+ lf.lfQuality = compute_quality(fRec);
+ fFont = CreateFontIndirect(&lf);
+ if (!fFont) {
+ return;
+ }
+
+ fSavefont = (HFONT)SelectObject(fDDC, fFont);
+
+ if (0 == GetTextMetrics(fDDC, &fTM)) {
+ call_ensure_accessible(lf);
+ if (0 == GetTextMetrics(fDDC, &fTM)) {
+ fTM.tmPitchAndFamily = TMPF_TRUETYPE;
+ }
+ }
+
+ XFORM xform;
+ if (fTM.tmPitchAndFamily & TMPF_VECTOR) {
+ // Used a logfont on a memory context, should never get a device font.
+ // Therefore all TMPF_DEVICE will be PostScript fonts.
+
+ // If TMPF_VECTOR is set, one of TMPF_TRUETYPE or TMPF_DEVICE means that
+ // we have an outline font. Otherwise we have a vector FON, which is
+ // scalable, but not an outline font.
+ // This was determined by testing with Type1 PFM/PFB and
+ // OpenTypeCFF OTF, as well as looking at Wine bugs and sources.
+ if (fTM.tmPitchAndFamily & (TMPF_TRUETYPE | TMPF_DEVICE)) {
+ // Truetype or PostScript.
+ fType = SkScalerContext_GDI::kTrueType_Type;
+ } else {
+ // Stroked FON.
+ fType = SkScalerContext_GDI::kLine_Type;
+ }
+
+ // fPost2x2 is column-major, left handed (y down).
+ // XFORM 2x2 is row-major, left handed (y down).
+ xform.eM11 = SkScalarToFloat(sA.get(SkMatrix::kMScaleX));
+ xform.eM12 = SkScalarToFloat(sA.get(SkMatrix::kMSkewY));
+ xform.eM21 = SkScalarToFloat(sA.get(SkMatrix::kMSkewX));
+ xform.eM22 = SkScalarToFloat(sA.get(SkMatrix::kMScaleY));
+ xform.eDx = 0;
+ xform.eDy = 0;
+
+ // MAT2 is row major, right handed (y up).
+ fMat22.eM11 = float2FIXED(xform.eM11);
+ fMat22.eM12 = float2FIXED(-xform.eM12);
+ fMat22.eM21 = float2FIXED(-xform.eM21);
+ fMat22.eM22 = float2FIXED(xform.eM22);
+
+ if (needToRenderWithSkia(fRec)) {
+ this->forceGenerateImageFromPath();
+ }
+
+ // Create a hires matrix if we need linear metrics.
+ if (this->isSubpixel()) {
+ OUTLINETEXTMETRIC otm;
+ UINT success = GetOutlineTextMetrics(fDDC, sizeof(otm), &otm);
+ if (0 == success) {
+ call_ensure_accessible(lf);
+ success = GetOutlineTextMetrics(fDDC, sizeof(otm), &otm);
+ }
+ if (0 != success) {
+ SkScalar upem = SkIntToScalar(otm.otmEMSquare);
+
+ SkScalar gdiTextSizeToEMScale = upem / gdiTextSize;
+ fHighResMat22.eM11 = float2FIXED(gdiTextSizeToEMScale);
+ fHighResMat22.eM12 = float2FIXED(0);
+ fHighResMat22.eM21 = float2FIXED(0);
+ fHighResMat22.eM22 = float2FIXED(gdiTextSizeToEMScale);
+
+ SkScalar removeEMScale = SkScalarInvert(upem);
+ fHiResMatrix = A;
+ fHiResMatrix.preScale(removeEMScale, removeEMScale);
+ }
+ }
+
+ } else {
+ // Assume bitmap
+ fType = SkScalerContext_GDI::kBitmap_Type;
+
+ xform.eM11 = 1.0f;
+ xform.eM12 = 0.0f;
+ xform.eM21 = 0.0f;
+ xform.eM22 = 1.0f;
+ xform.eDx = 0.0f;
+ xform.eDy = 0.0f;
+
+ // fPost2x2 is column-major, left handed (y down).
+ // MAT2 is row major, right handed (y up).
+ fMat22.eM11 = SkScalarToFIXED(fRec.fPost2x2[0][0]);
+ fMat22.eM12 = SkScalarToFIXED(-fRec.fPost2x2[1][0]);
+ fMat22.eM21 = SkScalarToFIXED(-fRec.fPost2x2[0][1]);
+ fMat22.eM22 = SkScalarToFIXED(fRec.fPost2x2[1][1]);
+ }
+
+ fOffscreen.init(fFont, xform);
+}
+
+SkScalerContext_GDI::~SkScalerContext_GDI() {
+ if (fDDC) {
+ ::SelectObject(fDDC, fSavefont);
+ ::DeleteDC(fDDC);
+ }
+ if (fFont) {
+ ::DeleteObject(fFont);
+ }
+ if (fSC) {
+ ::ScriptFreeCache(&fSC);
+ }
+}
+
+bool SkScalerContext_GDI::isValid() const {
+ return fDDC && fFont;
+}
+
+unsigned SkScalerContext_GDI::generateGlyphCount() {
+ if (fGlyphCount < 0) {
+ fGlyphCount = calculateGlyphCount(
+ fDDC, static_cast<const LogFontTypeface*>(this->getTypeface())->fLogFont);
+ }
+ return fGlyphCount;
+}
+
+uint16_t SkScalerContext_GDI::generateCharToGlyph(SkUnichar utf32) {
+ uint16_t index = 0;
+ WCHAR utf16[2];
+ // TODO(ctguil): Support characters that generate more than one glyph.
+ if (SkUTF16_FromUnichar(utf32, (uint16_t*)utf16) == 1) {
+ // Type1 fonts fail with uniscribe API. Use GetGlyphIndices for plane 0.
+
+ /** Real documentation for GetGlyphIndiciesW:
+ *
+ * When GGI_MARK_NONEXISTING_GLYPHS is not specified and a character does not map to a
+ * glyph, then the 'default character's glyph is returned instead. The 'default character'
+ * is available in fTM.tmDefaultChar. FON fonts have a default character, and there exists
+ * a usDefaultChar in the 'OS/2' table, version 2 and later. If there is no
+ * 'default character' specified by the font, then often the first character found is used.
+ *
+ * When GGI_MARK_NONEXISTING_GLYPHS is specified and a character does not map to a glyph,
+ * then the glyph 0xFFFF is used. In Windows XP and earlier, Bitmap/Vector FON usually use
+ * glyph 0x1F instead ('Terminal' appears to be special, returning 0xFFFF).
+ * Type1 PFM/PFB, TT, OT TT, OT CFF all appear to use 0xFFFF, even on XP.
+ */
+ DWORD result = GetGlyphIndicesW(fDDC, utf16, 1, &index, GGI_MARK_NONEXISTING_GLYPHS);
+ if (result == GDI_ERROR
+ || 0xFFFF == index
+ || (0x1F == index &&
+ (fType == SkScalerContext_GDI::kBitmap_Type ||
+ fType == SkScalerContext_GDI::kLine_Type)
+ /*&& winVer < Vista */)
+ )
+ {
+ index = 0;
+ }
+ } else {
+ // Use uniscribe to detemine glyph index for non-BMP characters.
+ static const int numWCHAR = 2;
+ static const int maxItems = 2;
+ // MSDN states that this can be nullptr, but some things don't work then.
+ SCRIPT_CONTROL sc = { 0 };
+ // Add extra item to SCRIPT_ITEM to work around a bug (now documented).
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=366643
+ SCRIPT_ITEM si[maxItems + 1];
+ int numItems;
+ HRZM(ScriptItemize(utf16, numWCHAR, maxItems, &sc, nullptr, si, &numItems),
+ "Could not itemize character.");
+
+ // Sometimes ScriptShape cannot find a glyph for a non-BMP and returns 2 space glyphs.
+ static const int maxGlyphs = 2;
+ SCRIPT_VISATTR vsa[maxGlyphs];
+ WORD outGlyphs[maxGlyphs];
+ WORD logClust[numWCHAR];
+ int numGlyphs;
+ HRZM(ScriptShape(fDDC, &fSC, utf16, numWCHAR, maxGlyphs, &si[0].a,
+ outGlyphs, logClust, vsa, &numGlyphs),
+ "Could not shape character.");
+ if (1 == numGlyphs) {
+ index = outGlyphs[0];
+ }
+ }
+ return index;
+}
+
+void SkScalerContext_GDI::generateAdvance(SkGlyph* glyph) {
+ this->generateMetrics(glyph);
+}
+
+void SkScalerContext_GDI::generateMetrics(SkGlyph* glyph) {
+ SkASSERT(fDDC);
+
+ if (fType == SkScalerContext_GDI::kBitmap_Type || fType == SkScalerContext_GDI::kLine_Type) {
+ SIZE size;
+ WORD glyphs = glyph->getGlyphID();
+ if (0 == GetTextExtentPointI(fDDC, &glyphs, 1, &size)) {
+ glyph->fWidth = SkToS16(fTM.tmMaxCharWidth);
+ } else {
+ glyph->fWidth = SkToS16(size.cx);
+ }
+ glyph->fHeight = SkToS16(size.cy);
+
+ glyph->fTop = SkToS16(-fTM.tmAscent);
+ // Bitmap FON cannot underhang, but vector FON may.
+ // There appears no means of determining underhang of vector FON.
+ glyph->fLeft = SkToS16(0);
+ glyph->fAdvanceX = glyph->fWidth;
+ glyph->fAdvanceY = 0;
+
+ // Vector FON will transform nicely, but bitmap FON do not.
+ if (fType == SkScalerContext_GDI::kLine_Type) {
+ SkRect bounds = SkRect::MakeXYWH(glyph->fLeft, glyph->fTop,
+ glyph->fWidth, glyph->fHeight);
+ SkMatrix m;
+ m.setAll(SkFIXEDToScalar(fMat22.eM11), -SkFIXEDToScalar(fMat22.eM21), 0,
+ -SkFIXEDToScalar(fMat22.eM12), SkFIXEDToScalar(fMat22.eM22), 0,
+ 0, 0, 1);
+ m.mapRect(&bounds);
+ bounds.roundOut(&bounds);
+ glyph->fLeft = SkScalarTruncToInt(bounds.fLeft);
+ glyph->fTop = SkScalarTruncToInt(bounds.fTop);
+ glyph->fWidth = SkScalarTruncToInt(bounds.width());
+ glyph->fHeight = SkScalarTruncToInt(bounds.height());
+ }
+
+ // Apply matrix to advance.
+ glyph->fAdvanceY = -FIXED2float(fMat22.eM12) * glyph->fAdvanceX;
+ glyph->fAdvanceX *= FIXED2float(fMat22.eM11);
+
+ return;
+ }
+
+ UINT glyphId = glyph->getGlyphID();
+
+ GLYPHMETRICS gm;
+ sk_bzero(&gm, sizeof(gm));
+
+ DWORD status = GetGlyphOutlineW(fDDC, glyphId, GGO_METRICS | GGO_GLYPH_INDEX, &gm, 0, nullptr, &fMat22);
+ if (GDI_ERROR == status) {
+ LogFontTypeface::EnsureAccessible(this->getTypeface());
+ status = GetGlyphOutlineW(fDDC, glyphId, GGO_METRICS | GGO_GLYPH_INDEX, &gm, 0, nullptr, &fMat22);
+ if (GDI_ERROR == status) {
+ glyph->zeroMetrics();
+ return;
+ }
+ }
+
+ bool empty = false;
+ // The black box is either the embedded bitmap size or the outline extent.
+ // It is 1x1 if nothing is to be drawn, but will also be 1x1 if something very small
+ // is to be drawn, like a '.'. We need to outset '.' but do not wish to outset ' '.
+ if (1 == gm.gmBlackBoxX && 1 == gm.gmBlackBoxY) {
+ // If GetGlyphOutline with GGO_NATIVE returns 0, we know there was no outline.
+ DWORD bufferSize = GetGlyphOutlineW(fDDC, glyphId, GGO_NATIVE | GGO_GLYPH_INDEX, &gm, 0, nullptr, &fMat22);
+ empty = (0 == bufferSize);
+ }
+
+ glyph->fTop = SkToS16(-gm.gmptGlyphOrigin.y);
+ glyph->fLeft = SkToS16(gm.gmptGlyphOrigin.x);
+ if (empty) {
+ glyph->fWidth = 0;
+ glyph->fHeight = 0;
+ } else {
+ // Outset, since the image may bleed out of the black box.
+ // For embedded bitmaps the black box should be exact.
+ // For outlines we need to outset by 1 in all directions for bleed.
+ // For ClearType we need to outset by 2 for bleed.
+ glyph->fWidth = gm.gmBlackBoxX + 4;
+ glyph->fHeight = gm.gmBlackBoxY + 4;
+ glyph->fTop -= 2;
+ glyph->fLeft -= 2;
+ }
+ // TODO(benjaminwagner): What is the type of gm.gmCellInc[XY]?
+ glyph->fAdvanceX = (float)((int)gm.gmCellIncX);
+ glyph->fAdvanceY = (float)((int)gm.gmCellIncY);
+ glyph->fRsbDelta = 0;
+ glyph->fLsbDelta = 0;
+
+ if (this->isSubpixel()) {
+ sk_bzero(&gm, sizeof(gm));
+ status = GetGlyphOutlineW(fDDC, glyphId, GGO_METRICS | GGO_GLYPH_INDEX, &gm, 0, nullptr, &fHighResMat22);
+ if (GDI_ERROR != status) {
+ SkPoint advance;
+ fHiResMatrix.mapXY(SkIntToScalar(gm.gmCellIncX), SkIntToScalar(gm.gmCellIncY), &advance);
+ glyph->fAdvanceX = SkScalarToFloat(advance.fX);
+ glyph->fAdvanceY = SkScalarToFloat(advance.fY);
+ }
+ } else if (!isAxisAligned(this->fRec)) {
+ status = GetGlyphOutlineW(fDDC, glyphId, GGO_METRICS | GGO_GLYPH_INDEX, &gm, 0, nullptr, &fGsA);
+ if (GDI_ERROR != status) {
+ SkPoint advance;
+ fG_inv.mapXY(SkIntToScalar(gm.gmCellIncX), SkIntToScalar(gm.gmCellIncY), &advance);
+ glyph->fAdvanceX = SkScalarToFloat(advance.fX);
+ glyph->fAdvanceY = SkScalarToFloat(advance.fY);
+ }
+ }
+}
+
+static const MAT2 gMat2Identity = {{0, 1}, {0, 0}, {0, 0}, {0, 1}};
+void SkScalerContext_GDI::generateFontMetrics(SkPaint::FontMetrics* metrics) {
+ if (nullptr == metrics) {
+ return;
+ }
+ sk_bzero(metrics, sizeof(*metrics));
+
+ SkASSERT(fDDC);
+
+#ifndef SK_GDI_ALWAYS_USE_TEXTMETRICS_FOR_FONT_METRICS
+ if (fType == SkScalerContext_GDI::kBitmap_Type || fType == SkScalerContext_GDI::kLine_Type) {
+#endif
+ metrics->fTop = SkIntToScalar(-fTM.tmAscent);
+ metrics->fAscent = SkIntToScalar(-fTM.tmAscent);
+ metrics->fDescent = SkIntToScalar(fTM.tmDescent);
+ metrics->fBottom = SkIntToScalar(fTM.tmDescent);
+ metrics->fLeading = SkIntToScalar(fTM.tmExternalLeading);
+ metrics->fAvgCharWidth = SkIntToScalar(fTM.tmAveCharWidth);
+ metrics->fMaxCharWidth = SkIntToScalar(fTM.tmMaxCharWidth);
+ metrics->fXMin = 0;
+ metrics->fXMax = metrics->fMaxCharWidth;
+ //metrics->fXHeight = 0;
+#ifndef SK_GDI_ALWAYS_USE_TEXTMETRICS_FOR_FONT_METRICS
+ return;
+ }
+#endif
+
+ OUTLINETEXTMETRIC otm;
+
+ uint32_t ret = GetOutlineTextMetrics(fDDC, sizeof(otm), &otm);
+ if (0 == ret) {
+ LogFontTypeface::EnsureAccessible(this->getTypeface());
+ ret = GetOutlineTextMetrics(fDDC, sizeof(otm), &otm);
+ }
+ if (0 == ret) {
+ return;
+ }
+
+#ifndef SK_GDI_ALWAYS_USE_TEXTMETRICS_FOR_FONT_METRICS
+ metrics->fTop = SkIntToScalar(-otm.otmrcFontBox.top);
+ metrics->fAscent = SkIntToScalar(-otm.otmAscent);
+ metrics->fDescent = SkIntToScalar(-otm.otmDescent);
+ metrics->fBottom = SkIntToScalar(-otm.otmrcFontBox.bottom);
+ metrics->fLeading = SkIntToScalar(otm.otmLineGap);
+ metrics->fAvgCharWidth = SkIntToScalar(otm.otmTextMetrics.tmAveCharWidth);
+ metrics->fMaxCharWidth = SkIntToScalar(otm.otmTextMetrics.tmMaxCharWidth);
+ metrics->fXMin = SkIntToScalar(otm.otmrcFontBox.left);
+ metrics->fXMax = SkIntToScalar(otm.otmrcFontBox.right);
+#endif
+ metrics->fUnderlineThickness = SkIntToScalar(otm.otmsUnderscoreSize);
+ metrics->fUnderlinePosition = -SkIntToScalar(otm.otmsUnderscorePosition);
+
+ metrics->fFlags |= SkPaint::FontMetrics::kUnderlineThinknessIsValid_Flag;
+ metrics->fFlags |= SkPaint::FontMetrics::kUnderlinePositionIsValid_Flag;
+
+ metrics->fXHeight = SkIntToScalar(otm.otmsXHeight);
+ GLYPHMETRICS gm;
+ sk_bzero(&gm, sizeof(gm));
+ DWORD len = GetGlyphOutlineW(fDDC, 'x', GGO_METRICS, &gm, 0, 0, &gMat2Identity);
+ if (len != GDI_ERROR && gm.gmBlackBoxY > 0) {
+ metrics->fXHeight = SkIntToScalar(gm.gmBlackBoxY);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+
+#define SK_SHOW_TEXT_BLIT_COVERAGE 0
+
+static void build_power_table(uint8_t table[], float ee) {
+ for (int i = 0; i < 256; i++) {
+ float x = i / 255.f;
+ x = sk_float_pow(x, ee);
+ int xx = SkScalarRoundToInt(x * 255);
+ table[i] = SkToU8(xx);
+ }
+}
+
+/**
+ * This will invert the gamma applied by GDI (gray-scale antialiased), so we
+ * can get linear values.
+ *
+ * GDI grayscale appears to use a hard-coded gamma of 2.3.
+ *
+ * GDI grayscale appears to draw using the black and white rasterizer at four
+ * times the size and then downsamples to compute the coverage mask. As a
+ * result there are only seventeen total grays. This lack of fidelity means
+ * that shifting into other color spaces is imprecise.
+ */
+static const uint8_t* getInverseGammaTableGDI() {
+ // Since build_power_table is idempotent, many threads can build gTableGdi
+ // simultaneously.
+
+ // Microsoft Specific:
+ // Making gInited volatile provides read-aquire and write-release in vc++.
+ // In VS2012, see compiler option /volatile:(ms|iso).
+ // Replace with C++11 atomics when possible.
+ static volatile bool gInited;
+ static uint8_t gTableGdi[256];
+ if (gInited) {
+ // Need a L/L (read) barrier (full acquire not needed). If gInited is observed
+ // true then gTableGdi is observable, but it must be requested.
+ } else {
+ build_power_table(gTableGdi, 2.3f);
+ // Need a S/S (write) barrier (full release not needed) here so that this
+ // write to gInited becomes observable after gTableGdi.
+ gInited = true;
+ }
+ return gTableGdi;
+}
+
+/**
+ * This will invert the gamma applied by GDI ClearType, so we can get linear
+ * values.
+ *
+ * GDI ClearType uses SPI_GETFONTSMOOTHINGCONTRAST / 1000 as the gamma value.
+ * If this value is not specified, the default is a gamma of 1.4.
+ */
+static const uint8_t* getInverseGammaTableClearType() {
+ // We don't expect SPI_GETFONTSMOOTHINGCONTRAST to ever change, so building
+ // gTableClearType with build_power_table is effectively idempotent.
+
+ // Microsoft Specific:
+ // Making gInited volatile provides read-aquire and write-release in vc++.
+ // In VS2012, see compiler option /volatile:(ms|iso).
+ // Replace with C++11 atomics when possible.
+ static volatile bool gInited;
+ static uint8_t gTableClearType[256];
+ if (gInited) {
+ // Need a L/L (read) barrier (acquire not needed). If gInited is observed
+ // true then gTableClearType is observable, but it must be requested.
+ } else {
+ UINT level = 0;
+ if (!SystemParametersInfo(SPI_GETFONTSMOOTHINGCONTRAST, 0, &level, 0) || !level) {
+ // can't get the data, so use a default
+ level = 1400;
+ }
+ build_power_table(gTableClearType, level / 1000.0f);
+ // Need a S/S (write) barrier (release not needed) here so that this
+ // write to gInited becomes observable after gTableClearType.
+ gInited = true;
+ }
+ return gTableClearType;
+}
+
+#include "SkColorPriv.h"
+
+//Cannot assume that the input rgb is gray due to possible setting of kGenA8FromLCD_Flag.
+template<bool APPLY_PREBLEND>
+static inline uint8_t rgb_to_a8(SkGdiRGB rgb, const uint8_t* table8) {
+ U8CPU r = (rgb >> 16) & 0xFF;
+ U8CPU g = (rgb >> 8) & 0xFF;
+ U8CPU b = (rgb >> 0) & 0xFF;
+ return sk_apply_lut_if<APPLY_PREBLEND>(SkComputeLuminance(r, g, b), table8);
+}
+
+template<bool APPLY_PREBLEND>
+static inline uint16_t rgb_to_lcd16(SkGdiRGB rgb, const uint8_t* tableR,
+ const uint8_t* tableG,
+ const uint8_t* tableB) {
+ U8CPU r = sk_apply_lut_if<APPLY_PREBLEND>((rgb >> 16) & 0xFF, tableR);
+ U8CPU g = sk_apply_lut_if<APPLY_PREBLEND>((rgb >> 8) & 0xFF, tableG);
+ U8CPU b = sk_apply_lut_if<APPLY_PREBLEND>((rgb >> 0) & 0xFF, tableB);
+#if SK_SHOW_TEXT_BLIT_COVERAGE
+ r = SkMax32(r, 10); g = SkMax32(g, 10); b = SkMax32(b, 10);
+#endif
+ return SkPack888ToRGB16(r, g, b);
+}
+
+// Is this GDI color neither black nor white? If so, we have to keep this
+// image as is, rather than smashing it down to a BW mask.
+//
+// returns int instead of bool, since we don't want/have to pay to convert
+// the zero/non-zero value into a bool
+static int is_not_black_or_white(SkGdiRGB c) {
+ // same as (but faster than)
+ // c &= 0x00FFFFFF;
+ // return 0 == c || 0x00FFFFFF == c;
+ return (c + (c & 1)) & 0x00FFFFFF;
+}
+
+static bool is_rgb_really_bw(const SkGdiRGB* src, int width, int height, size_t srcRB) {
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ if (is_not_black_or_white(src[x])) {
+ return false;
+ }
+ }
+ src = SkTAddOffset<const SkGdiRGB>(src, srcRB);
+ }
+ return true;
+}
+
+// gdi's bitmap is upside-down, so we reverse dst walking in Y
+// whenever we copy it into skia's buffer
+static void rgb_to_bw(const SkGdiRGB* SK_RESTRICT src, size_t srcRB,
+ const SkGlyph& glyph) {
+ const int width = glyph.fWidth;
+ const size_t dstRB = (width + 7) >> 3;
+ uint8_t* SK_RESTRICT dst = (uint8_t*)((char*)glyph.fImage + (glyph.fHeight - 1) * dstRB);
+
+ int byteCount = width >> 3;
+ int bitCount = width & 7;
+
+ // adjust srcRB to skip the values in our byteCount loop,
+ // since we increment src locally there
+ srcRB -= byteCount * 8 * sizeof(SkGdiRGB);
+
+ for (int y = 0; y < glyph.fHeight; ++y) {
+ if (byteCount > 0) {
+ for (int i = 0; i < byteCount; ++i) {
+ unsigned byte = 0;
+ byte |= src[0] & (1 << 7);
+ byte |= src[1] & (1 << 6);
+ byte |= src[2] & (1 << 5);
+ byte |= src[3] & (1 << 4);
+ byte |= src[4] & (1 << 3);
+ byte |= src[5] & (1 << 2);
+ byte |= src[6] & (1 << 1);
+ byte |= src[7] & (1 << 0);
+ dst[i] = byte;
+ src += 8;
+ }
+ }
+ if (bitCount > 0) {
+ unsigned byte = 0;
+ unsigned mask = 0x80;
+ for (int i = 0; i < bitCount; i++) {
+ byte |= src[i] & mask;
+ mask >>= 1;
+ }
+ dst[byteCount] = byte;
+ }
+ src = SkTAddOffset<const SkGdiRGB>(src, srcRB);
+ dst -= dstRB;
+ }
+#if SK_SHOW_TEXT_BLIT_COVERAGE
+ if (glyph.fWidth > 0 && glyph.fHeight > 0) {
+ uint8_t* first = (uint8_t*)glyph.fImage;
+ uint8_t* last = (uint8_t*)((char*)glyph.fImage + glyph.fHeight * dstRB - 1);
+ *first |= 1 << 7;
+ *last |= bitCount == 0 ? 1 : 1 << (8 - bitCount);
+ }
+#endif
+}
+
+template<bool APPLY_PREBLEND>
+static void rgb_to_a8(const SkGdiRGB* SK_RESTRICT src, size_t srcRB,
+ const SkGlyph& glyph, const uint8_t* table8) {
+ const size_t dstRB = glyph.rowBytes();
+ const int width = glyph.fWidth;
+ uint8_t* SK_RESTRICT dst = (uint8_t*)((char*)glyph.fImage + (glyph.fHeight - 1) * dstRB);
+
+ for (int y = 0; y < glyph.fHeight; y++) {
+ for (int i = 0; i < width; i++) {
+ dst[i] = rgb_to_a8<APPLY_PREBLEND>(src[i], table8);
+#if SK_SHOW_TEXT_BLIT_COVERAGE
+ dst[i] = SkMax32(dst[i], 10);
+#endif
+ }
+ src = SkTAddOffset<const SkGdiRGB>(src, srcRB);
+ dst -= dstRB;
+ }
+}
+
+template<bool APPLY_PREBLEND>
+static void rgb_to_lcd16(const SkGdiRGB* SK_RESTRICT src, size_t srcRB, const SkGlyph& glyph,
+ const uint8_t* tableR, const uint8_t* tableG, const uint8_t* tableB) {
+ const size_t dstRB = glyph.rowBytes();
+ const int width = glyph.fWidth;
+ uint16_t* SK_RESTRICT dst = (uint16_t*)((char*)glyph.fImage + (glyph.fHeight - 1) * dstRB);
+
+ for (int y = 0; y < glyph.fHeight; y++) {
+ for (int i = 0; i < width; i++) {
+ dst[i] = rgb_to_lcd16<APPLY_PREBLEND>(src[i], tableR, tableG, tableB);
+ }
+ src = SkTAddOffset<const SkGdiRGB>(src, srcRB);
+ dst = (uint16_t*)((char*)dst - dstRB);
+ }
+}
+
+void SkScalerContext_GDI::generateImage(const SkGlyph& glyph) {
+ SkASSERT(fDDC);
+
+ const bool isBW = SkMask::kBW_Format == fRec.fMaskFormat;
+ const bool isAA = !isLCD(fRec);
+
+ size_t srcRB;
+ const void* bits = fOffscreen.draw(glyph, isBW, &srcRB);
+ if (nullptr == bits) {
+ LogFontTypeface::EnsureAccessible(this->getTypeface());
+ bits = fOffscreen.draw(glyph, isBW, &srcRB);
+ if (nullptr == bits) {
+ sk_bzero(glyph.fImage, glyph.computeImageSize());
+ return;
+ }
+ }
+
+ if (!isBW) {
+ const uint8_t* table;
+ //The offscreen contains a GDI blit if isAA and kGenA8FromLCD_Flag is not set.
+ //Otherwise the offscreen contains a ClearType blit.
+ if (isAA && !(fRec.fFlags & SkScalerContext::kGenA8FromLCD_Flag)) {
+ table = getInverseGammaTableGDI();
+ } else {
+ table = getInverseGammaTableClearType();
+ }
+ //Note that the following cannot really be integrated into the
+ //pre-blend, since we may not be applying the pre-blend; when we aren't
+ //applying the pre-blend it means that a filter wants linear anyway.
+ //Other code may also be applying the pre-blend, so we'd need another
+ //one with this and one without.
+ SkGdiRGB* addr = (SkGdiRGB*)bits;
+ for (int y = 0; y < glyph.fHeight; ++y) {
+ for (int x = 0; x < glyph.fWidth; ++x) {
+ int r = (addr[x] >> 16) & 0xFF;
+ int g = (addr[x] >> 8) & 0xFF;
+ int b = (addr[x] >> 0) & 0xFF;
+ addr[x] = (table[r] << 16) | (table[g] << 8) | table[b];
+ }
+ addr = SkTAddOffset<SkGdiRGB>(addr, srcRB);
+ }
+ }
+
+ int width = glyph.fWidth;
+ size_t dstRB = glyph.rowBytes();
+ if (isBW) {
+ const uint8_t* src = (const uint8_t*)bits;
+ uint8_t* dst = (uint8_t*)((char*)glyph.fImage + (glyph.fHeight - 1) * dstRB);
+ for (int y = 0; y < glyph.fHeight; y++) {
+ memcpy(dst, src, dstRB);
+ src += srcRB;
+ dst -= dstRB;
+ }
+#if SK_SHOW_TEXT_BLIT_COVERAGE
+ if (glyph.fWidth > 0 && glyph.fHeight > 0) {
+ int bitCount = width & 7;
+ uint8_t* first = (uint8_t*)glyph.fImage;
+ uint8_t* last = (uint8_t*)((char*)glyph.fImage + glyph.fHeight * dstRB - 1);
+ *first |= 1 << 7;
+ *last |= bitCount == 0 ? 1 : 1 << (8 - bitCount);
+ }
+#endif
+ } else if (isAA) {
+ // since the caller may require A8 for maskfilters, we can't check for BW
+ // ... until we have the caller tell us that explicitly
+ const SkGdiRGB* src = (const SkGdiRGB*)bits;
+ if (fPreBlend.isApplicable()) {
+ rgb_to_a8<true>(src, srcRB, glyph, fPreBlend.fG);
+ } else {
+ rgb_to_a8<false>(src, srcRB, glyph, fPreBlend.fG);
+ }
+ } else { // LCD16
+ const SkGdiRGB* src = (const SkGdiRGB*)bits;
+ if (is_rgb_really_bw(src, width, glyph.fHeight, srcRB)) {
+ rgb_to_bw(src, srcRB, glyph);
+ ((SkGlyph*)&glyph)->fMaskFormat = SkMask::kBW_Format;
+ } else {
+ SkASSERT(SkMask::kLCD16_Format == glyph.fMaskFormat);
+ if (fPreBlend.isApplicable()) {
+ rgb_to_lcd16<true>(src, srcRB, glyph,
+ fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ } else {
+ rgb_to_lcd16<false>(src, srcRB, glyph,
+ fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ }
+ }
+ }
+}
+
+class GDIGlyphbufferPointIter {
+public:
+ GDIGlyphbufferPointIter(const uint8_t* glyphbuf, DWORD total_size)
+ : fHeaderIter(glyphbuf, total_size), fCurveIter(), fPointIter()
+ { }
+
+ POINTFX const * next() {
+nextHeader:
+ if (!fCurveIter.isSet()) {
+ const TTPOLYGONHEADER* header = fHeaderIter.next();
+ if (nullptr == header) {
+ return nullptr;
+ }
+ fCurveIter.set(header);
+ const TTPOLYCURVE* curve = fCurveIter.next();
+ if (nullptr == curve) {
+ return nullptr;
+ }
+ fPointIter.set(curve);
+ return &header->pfxStart;
+ }
+
+ const POINTFX* nextPoint = fPointIter.next();
+ if (nullptr == nextPoint) {
+ const TTPOLYCURVE* curve = fCurveIter.next();
+ if (nullptr == curve) {
+ fCurveIter.set();
+ goto nextHeader;
+ } else {
+ fPointIter.set(curve);
+ }
+ nextPoint = fPointIter.next();
+ }
+ return nextPoint;
+ }
+
+ WORD currentCurveType() {
+ return fPointIter.fCurveType;
+ }
+
+private:
+ /** Iterates over all of the polygon headers in a glyphbuf. */
+ class GDIPolygonHeaderIter {
+ public:
+ GDIPolygonHeaderIter(const uint8_t* glyphbuf, DWORD total_size)
+ : fCurPolygon(reinterpret_cast<const TTPOLYGONHEADER*>(glyphbuf))
+ , fEndPolygon(SkTAddOffset<const TTPOLYGONHEADER>(glyphbuf, total_size))
+ { }
+
+ const TTPOLYGONHEADER* next() {
+ if (fCurPolygon >= fEndPolygon) {
+ return nullptr;
+ }
+ const TTPOLYGONHEADER* thisPolygon = fCurPolygon;
+ fCurPolygon = SkTAddOffset<const TTPOLYGONHEADER>(fCurPolygon, fCurPolygon->cb);
+ return thisPolygon;
+ }
+ private:
+ const TTPOLYGONHEADER* fCurPolygon;
+ const TTPOLYGONHEADER* fEndPolygon;
+ };
+
+ /** Iterates over all of the polygon curves in a polygon header. */
+ class GDIPolygonCurveIter {
+ public:
+ GDIPolygonCurveIter() : fCurCurve(nullptr), fEndCurve(nullptr) { }
+
+ GDIPolygonCurveIter(const TTPOLYGONHEADER* curPolygon)
+ : fCurCurve(SkTAddOffset<const TTPOLYCURVE>(curPolygon, sizeof(TTPOLYGONHEADER)))
+ , fEndCurve(SkTAddOffset<const TTPOLYCURVE>(curPolygon, curPolygon->cb))
+ { }
+
+ bool isSet() { return fCurCurve != nullptr; }
+
+ void set(const TTPOLYGONHEADER* curPolygon) {
+ fCurCurve = SkTAddOffset<const TTPOLYCURVE>(curPolygon, sizeof(TTPOLYGONHEADER));
+ fEndCurve = SkTAddOffset<const TTPOLYCURVE>(curPolygon, curPolygon->cb);
+ }
+ void set() {
+ fCurCurve = nullptr;
+ fEndCurve = nullptr;
+ }
+
+ const TTPOLYCURVE* next() {
+ if (fCurCurve >= fEndCurve) {
+ return nullptr;
+ }
+ const TTPOLYCURVE* thisCurve = fCurCurve;
+ fCurCurve = SkTAddOffset<const TTPOLYCURVE>(fCurCurve, size_of_TTPOLYCURVE(*fCurCurve));
+ return thisCurve;
+ }
+ private:
+ size_t size_of_TTPOLYCURVE(const TTPOLYCURVE& curve) {
+ return 2*sizeof(WORD) + curve.cpfx*sizeof(POINTFX);
+ }
+ const TTPOLYCURVE* fCurCurve;
+ const TTPOLYCURVE* fEndCurve;
+ };
+
+ /** Iterates over all of the polygon points in a polygon curve. */
+ class GDIPolygonCurvePointIter {
+ public:
+ GDIPolygonCurvePointIter() : fCurveType(0), fCurPoint(nullptr), fEndPoint(nullptr) { }
+
+ GDIPolygonCurvePointIter(const TTPOLYCURVE* curPolygon)
+ : fCurveType(curPolygon->wType)
+ , fCurPoint(&curPolygon->apfx[0])
+ , fEndPoint(&curPolygon->apfx[curPolygon->cpfx])
+ { }
+
+ bool isSet() { return fCurPoint != nullptr; }
+
+ void set(const TTPOLYCURVE* curPolygon) {
+ fCurveType = curPolygon->wType;
+ fCurPoint = &curPolygon->apfx[0];
+ fEndPoint = &curPolygon->apfx[curPolygon->cpfx];
+ }
+ void set() {
+ fCurPoint = nullptr;
+ fEndPoint = nullptr;
+ }
+
+ const POINTFX* next() {
+ if (fCurPoint >= fEndPoint) {
+ return nullptr;
+ }
+ const POINTFX* thisPoint = fCurPoint;
+ ++fCurPoint;
+ return thisPoint;
+ }
+
+ WORD fCurveType;
+ private:
+ const POINTFX* fCurPoint;
+ const POINTFX* fEndPoint;
+ };
+
+ GDIPolygonHeaderIter fHeaderIter;
+ GDIPolygonCurveIter fCurveIter;
+ GDIPolygonCurvePointIter fPointIter;
+};
+
+static void sk_path_from_gdi_path(SkPath* path, const uint8_t* glyphbuf, DWORD total_size) {
+ const uint8_t* cur_glyph = glyphbuf;
+ const uint8_t* end_glyph = glyphbuf + total_size;
+
+ while (cur_glyph < end_glyph) {
+ const TTPOLYGONHEADER* th = (TTPOLYGONHEADER*)cur_glyph;
+
+ const uint8_t* end_poly = cur_glyph + th->cb;
+ const uint8_t* cur_poly = cur_glyph + sizeof(TTPOLYGONHEADER);
+
+ path->moveTo(SkFixedToScalar( SkFIXEDToFixed(th->pfxStart.x)),
+ SkFixedToScalar(-SkFIXEDToFixed(th->pfxStart.y)));
+
+ while (cur_poly < end_poly) {
+ const TTPOLYCURVE* pc = (const TTPOLYCURVE*)cur_poly;
+
+ if (pc->wType == TT_PRIM_LINE) {
+ for (uint16_t i = 0; i < pc->cpfx; i++) {
+ path->lineTo(SkFixedToScalar( SkFIXEDToFixed(pc->apfx[i].x)),
+ SkFixedToScalar(-SkFIXEDToFixed(pc->apfx[i].y)));
+ }
+ }
+
+ if (pc->wType == TT_PRIM_QSPLINE) {
+ for (uint16_t u = 0; u < pc->cpfx - 1; u++) { // Walk through points in spline
+ POINTFX pnt_b = pc->apfx[u]; // B is always the current point
+ POINTFX pnt_c = pc->apfx[u+1];
+
+ if (u < pc->cpfx - 2) { // If not on last spline, compute C
+ pnt_c.x = SkFixedToFIXED(SkFixedAve(SkFIXEDToFixed(pnt_b.x),
+ SkFIXEDToFixed(pnt_c.x)));
+ pnt_c.y = SkFixedToFIXED(SkFixedAve(SkFIXEDToFixed(pnt_b.y),
+ SkFIXEDToFixed(pnt_c.y)));
+ }
+
+ path->quadTo(SkFixedToScalar( SkFIXEDToFixed(pnt_b.x)),
+ SkFixedToScalar(-SkFIXEDToFixed(pnt_b.y)),
+ SkFixedToScalar( SkFIXEDToFixed(pnt_c.x)),
+ SkFixedToScalar(-SkFIXEDToFixed(pnt_c.y)));
+ }
+ }
+ // Advance past this TTPOLYCURVE.
+ cur_poly += sizeof(WORD) * 2 + sizeof(POINTFX) * pc->cpfx;
+ }
+ cur_glyph += th->cb;
+ path->close();
+ }
+}
+
+#define move_next_expected_hinted_point(iter, pElem) do {\
+ pElem = iter.next(); \
+ if (nullptr == pElem) return false; \
+} while(0)
+
+// It is possible for the hinted and unhinted versions of the same path to have
+// a different number of points due to GDI's handling of flipped points.
+// If this is detected, this will return false.
+static bool sk_path_from_gdi_paths(SkPath* path, const uint8_t* glyphbuf, DWORD total_size,
+ GDIGlyphbufferPointIter hintedYs) {
+ const uint8_t* cur_glyph = glyphbuf;
+ const uint8_t* end_glyph = glyphbuf + total_size;
+
+ POINTFX const * hintedPoint;
+
+ while (cur_glyph < end_glyph) {
+ const TTPOLYGONHEADER* th = (TTPOLYGONHEADER*)cur_glyph;
+
+ const uint8_t* end_poly = cur_glyph + th->cb;
+ const uint8_t* cur_poly = cur_glyph + sizeof(TTPOLYGONHEADER);
+
+ move_next_expected_hinted_point(hintedYs, hintedPoint);
+ path->moveTo(SkFixedToScalar( SkFIXEDToFixed(th->pfxStart.x)),
+ SkFixedToScalar(-SkFIXEDToFixed(hintedPoint->y)));
+
+ while (cur_poly < end_poly) {
+ const TTPOLYCURVE* pc = (const TTPOLYCURVE*)cur_poly;
+
+ if (pc->wType == TT_PRIM_LINE) {
+ for (uint16_t i = 0; i < pc->cpfx; i++) {
+ move_next_expected_hinted_point(hintedYs, hintedPoint);
+ path->lineTo(SkFixedToScalar( SkFIXEDToFixed(pc->apfx[i].x)),
+ SkFixedToScalar(-SkFIXEDToFixed(hintedPoint->y)));
+ }
+ }
+
+ if (pc->wType == TT_PRIM_QSPLINE) {
+ POINTFX currentPoint = pc->apfx[0];
+ move_next_expected_hinted_point(hintedYs, hintedPoint);
+ // only take the hinted y if it wasn't flipped
+ if (hintedYs.currentCurveType() == TT_PRIM_QSPLINE) {
+ currentPoint.y = hintedPoint->y;
+ }
+ for (uint16_t u = 0; u < pc->cpfx - 1; u++) { // Walk through points in spline
+ POINTFX pnt_b = currentPoint;//pc->apfx[u]; // B is always the current point
+ POINTFX pnt_c = pc->apfx[u+1];
+ move_next_expected_hinted_point(hintedYs, hintedPoint);
+ // only take the hinted y if it wasn't flipped
+ if (hintedYs.currentCurveType() == TT_PRIM_QSPLINE) {
+ pnt_c.y = hintedPoint->y;
+ }
+ currentPoint.x = pnt_c.x;
+ currentPoint.y = pnt_c.y;
+
+ if (u < pc->cpfx - 2) { // If not on last spline, compute C
+ pnt_c.x = SkFixedToFIXED(SkFixedAve(SkFIXEDToFixed(pnt_b.x),
+ SkFIXEDToFixed(pnt_c.x)));
+ pnt_c.y = SkFixedToFIXED(SkFixedAve(SkFIXEDToFixed(pnt_b.y),
+ SkFIXEDToFixed(pnt_c.y)));
+ }
+
+ path->quadTo(SkFixedToScalar( SkFIXEDToFixed(pnt_b.x)),
+ SkFixedToScalar(-SkFIXEDToFixed(pnt_b.y)),
+ SkFixedToScalar( SkFIXEDToFixed(pnt_c.x)),
+ SkFixedToScalar(-SkFIXEDToFixed(pnt_c.y)));
+ }
+ }
+ // Advance past this TTPOLYCURVE.
+ cur_poly += sizeof(WORD) * 2 + sizeof(POINTFX) * pc->cpfx;
+ }
+ cur_glyph += th->cb;
+ path->close();
+ }
+ return true;
+}
+
+DWORD SkScalerContext_GDI::getGDIGlyphPath(const SkGlyph& glyph, UINT flags,
+ SkAutoSTMalloc<BUFFERSIZE, uint8_t>* glyphbuf)
+{
+ GLYPHMETRICS gm;
+
+ DWORD total_size = GetGlyphOutlineW(fDDC, glyph.getGlyphID(), flags, &gm, BUFFERSIZE, glyphbuf->get(), &fMat22);
+ // Sometimes GetGlyphOutlineW returns a number larger than BUFFERSIZE even if BUFFERSIZE > 0.
+ // It has been verified that this does not involve a buffer overrun.
+ if (GDI_ERROR == total_size || total_size > BUFFERSIZE) {
+ // GDI_ERROR because the BUFFERSIZE was too small, or because the data was not accessible.
+ // When the data is not accessable GetGlyphOutlineW fails rather quickly,
+ // so just try to get the size. If that fails then ensure the data is accessible.
+ total_size = GetGlyphOutlineW(fDDC, glyph.getGlyphID(), flags, &gm, 0, nullptr, &fMat22);
+ if (GDI_ERROR == total_size) {
+ LogFontTypeface::EnsureAccessible(this->getTypeface());
+ total_size = GetGlyphOutlineW(fDDC, glyph.getGlyphID(), flags, &gm, 0, nullptr, &fMat22);
+ if (GDI_ERROR == total_size) {
+ // GetGlyphOutlineW is known to fail for some characters, such as spaces.
+ // In these cases, just return that the glyph does not have a shape.
+ return 0;
+ }
+ }
+
+ glyphbuf->reset(total_size);
+
+ DWORD ret = GetGlyphOutlineW(fDDC, glyph.getGlyphID(), flags, &gm, total_size, glyphbuf->get(), &fMat22);
+ if (GDI_ERROR == ret) {
+ LogFontTypeface::EnsureAccessible(this->getTypeface());
+ ret = GetGlyphOutlineW(fDDC, glyph.getGlyphID(), flags, &gm, total_size, glyphbuf->get(), &fMat22);
+ if (GDI_ERROR == ret) {
+ SkASSERT(false);
+ return 0;
+ }
+ }
+ }
+ return total_size;
+}
+
+void SkScalerContext_GDI::generatePath(const SkGlyph& glyph, SkPath* path) {
+ SkASSERT(path);
+ SkASSERT(fDDC);
+
+ path->reset();
+
+ // Out of all the fonts on a typical Windows box,
+ // 25% of glyphs require more than 2KB.
+ // 1% of glyphs require more than 4KB.
+ // 0.01% of glyphs require more than 8KB.
+ // 8KB is less than 1% of the normal 1MB stack on Windows.
+ // Note that some web fonts glyphs require more than 20KB.
+ //static const DWORD BUFFERSIZE = (1 << 13);
+
+ //GDI only uses hinted outlines when axis aligned.
+ UINT format = GGO_NATIVE | GGO_GLYPH_INDEX;
+ if (fRec.getHinting() == SkPaint::kNo_Hinting || fRec.getHinting() == SkPaint::kSlight_Hinting){
+ format |= GGO_UNHINTED;
+ }
+ SkAutoSTMalloc<BUFFERSIZE, uint8_t> glyphbuf(BUFFERSIZE);
+ DWORD total_size = getGDIGlyphPath(glyph, format, &glyphbuf);
+ if (0 == total_size) {
+ return;
+ }
+
+ if (fRec.getHinting() != SkPaint::kSlight_Hinting) {
+ sk_path_from_gdi_path(path, glyphbuf, total_size);
+ } else {
+ //GDI only uses hinted outlines when axis aligned.
+ UINT format = GGO_NATIVE | GGO_GLYPH_INDEX;
+
+ SkAutoSTMalloc<BUFFERSIZE, uint8_t> hintedGlyphbuf(BUFFERSIZE);
+ DWORD hinted_total_size = getGDIGlyphPath(glyph, format, &hintedGlyphbuf);
+ if (0 == hinted_total_size) {
+ return;
+ }
+
+ if (!sk_path_from_gdi_paths(path, glyphbuf, total_size,
+ GDIGlyphbufferPointIter(hintedGlyphbuf, hinted_total_size)))
+ {
+ path->reset();
+ sk_path_from_gdi_path(path, glyphbuf, total_size);
+ }
+ }
+}
+
+static void logfont_for_name(const char* familyName, LOGFONT* lf) {
+ sk_bzero(lf, sizeof(LOGFONT));
+#ifdef UNICODE
+ // Get the buffer size needed first.
+ size_t str_len = ::MultiByteToWideChar(CP_UTF8, 0, familyName,
+ -1, nullptr, 0);
+ // Allocate a buffer (str_len already has terminating null
+ // accounted for).
+ wchar_t *wideFamilyName = new wchar_t[str_len];
+ // Now actually convert the string.
+ ::MultiByteToWideChar(CP_UTF8, 0, familyName, -1,
+ wideFamilyName, str_len);
+ ::wcsncpy(lf->lfFaceName, wideFamilyName, LF_FACESIZE - 1);
+ delete [] wideFamilyName;
+ lf->lfFaceName[LF_FACESIZE-1] = L'\0';
+#else
+ ::strncpy(lf->lfFaceName, familyName, LF_FACESIZE - 1);
+ lf->lfFaceName[LF_FACESIZE - 1] = '\0';
+#endif
+}
+
+void LogFontTypeface::onGetFamilyName(SkString* familyName) const {
+ // Get the actual name of the typeface. The logfont may not know this.
+ HFONT font = CreateFontIndirect(&fLogFont);
+
+ HDC deviceContext = ::CreateCompatibleDC(nullptr);
+ HFONT savefont = (HFONT)SelectObject(deviceContext, font);
+
+ dcfontname_to_skstring(deviceContext, fLogFont, familyName);
+
+ if (deviceContext) {
+ ::SelectObject(deviceContext, savefont);
+ ::DeleteDC(deviceContext);
+ }
+ if (font) {
+ ::DeleteObject(font);
+ }
+}
+
+void LogFontTypeface::onGetFontDescriptor(SkFontDescriptor* desc,
+ bool* isLocalStream) const {
+ SkString familyName;
+ this->onGetFamilyName(&familyName);
+ desc->setFamilyName(familyName.c_str());
+ desc->setStyle(this->fontStyle());
+ *isLocalStream = this->fSerializeAsStream;
+}
+
+SkAdvancedTypefaceMetrics* LogFontTypeface::onGetAdvancedTypefaceMetrics(
+ PerGlyphInfo perGlyphInfo,
+ const uint32_t* glyphIDs,
+ uint32_t glyphIDsCount) const {
+ LOGFONT lf = fLogFont;
+ SkAdvancedTypefaceMetrics* info = nullptr;
+
+ HDC hdc = CreateCompatibleDC(nullptr);
+ HFONT font = CreateFontIndirect(&lf);
+ HFONT savefont = (HFONT)SelectObject(hdc, font);
+ HFONT designFont = nullptr;
+
+ const char stem_chars[] = {'i', 'I', '!', '1'};
+ int16_t min_width;
+ unsigned glyphCount;
+
+ // To request design units, create a logical font whose height is specified
+ // as unitsPerEm.
+ OUTLINETEXTMETRIC otm;
+ unsigned int otmRet = GetOutlineTextMetrics(hdc, sizeof(otm), &otm);
+ if (0 == otmRet) {
+ call_ensure_accessible(lf);
+ otmRet = GetOutlineTextMetrics(hdc, sizeof(otm), &otm);
+ }
+ if (!otmRet || !GetTextFace(hdc, LF_FACESIZE, lf.lfFaceName)) {
+ goto Error;
+ }
+ lf.lfHeight = -SkToS32(otm.otmEMSquare);
+ designFont = CreateFontIndirect(&lf);
+ SelectObject(hdc, designFont);
+ if (!GetOutlineTextMetrics(hdc, sizeof(otm), &otm)) {
+ goto Error;
+ }
+ glyphCount = calculateGlyphCount(hdc, fLogFont);
+
+ info = new SkAdvancedTypefaceMetrics;
+ info->fEmSize = otm.otmEMSquare;
+ info->fLastGlyphID = SkToU16(glyphCount - 1);
+ tchar_to_skstring(lf.lfFaceName, &info->fFontName);
+ // If bit 1 is set, the font may not be embedded in a document.
+ // If bit 1 is clear, the font can be embedded.
+ // If bit 2 is set, the embedding is read-only.
+ if (otm.otmfsType & 0x1) {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kNotEmbeddable_FontFlag;
+ }
+
+ if (perGlyphInfo & kToUnicode_PerGlyphInfo) {
+ populate_glyph_to_unicode(hdc, glyphCount, &(info->fGlyphToUnicode));
+ }
+
+ if (glyphCount > 0 &&
+ (otm.otmTextMetrics.tmPitchAndFamily & TMPF_TRUETYPE)) {
+ info->fType = SkAdvancedTypefaceMetrics::kTrueType_Font;
+ } else {
+ goto ReturnInfo;
+ }
+
+ // If this bit is clear the font is a fixed pitch font.
+ if (!(otm.otmTextMetrics.tmPitchAndFamily & TMPF_FIXED_PITCH)) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kFixedPitch_Style;
+ }
+ if (otm.otmTextMetrics.tmItalic) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kItalic_Style;
+ }
+ if (otm.otmTextMetrics.tmPitchAndFamily & FF_ROMAN) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kSerif_Style;
+ } else if (otm.otmTextMetrics.tmPitchAndFamily & FF_SCRIPT) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kScript_Style;
+ }
+
+ // The main italic angle of the font, in tenths of a degree counterclockwise
+ // from vertical.
+ info->fItalicAngle = otm.otmItalicAngle / 10;
+ info->fAscent = SkToS16(otm.otmTextMetrics.tmAscent);
+ info->fDescent = SkToS16(-otm.otmTextMetrics.tmDescent);
+ // TODO(ctguil): Use alternate cap height calculation.
+ // MSDN says otmsCapEmHeight is not support but it is returning a value on
+ // my Win7 box.
+ info->fCapHeight = otm.otmsCapEmHeight;
+ info->fBBox =
+ SkIRect::MakeLTRB(otm.otmrcFontBox.left, otm.otmrcFontBox.top,
+ otm.otmrcFontBox.right, otm.otmrcFontBox.bottom);
+
+ // Figure out a good guess for StemV - Min width of i, I, !, 1.
+ // This probably isn't very good with an italic font.
+ min_width = SHRT_MAX;
+ info->fStemV = 0;
+ for (size_t i = 0; i < SK_ARRAY_COUNT(stem_chars); i++) {
+ ABC abcWidths;
+ if (GetCharABCWidths(hdc, stem_chars[i], stem_chars[i], &abcWidths)) {
+ int16_t width = abcWidths.abcB;
+ if (width > 0 && width < min_width) {
+ min_width = width;
+ info->fStemV = min_width;
+ }
+ }
+ }
+
+Error:
+ReturnInfo:
+ SelectObject(hdc, savefont);
+ DeleteObject(designFont);
+ DeleteObject(font);
+ DeleteDC(hdc);
+
+ return info;
+}
+
+//Dummy representation of a Base64 encoded GUID from create_unique_font_name.
+#define BASE64_GUID_ID "XXXXXXXXXXXXXXXXXXXXXXXX"
+//Length of GUID representation from create_id, including nullptr terminator.
+#define BASE64_GUID_ID_LEN SK_ARRAY_COUNT(BASE64_GUID_ID)
+
+static_assert(BASE64_GUID_ID_LEN < LF_FACESIZE, "GUID_longer_than_facesize");
+
+/**
+ NameID 6 Postscript names cannot have the character '/'.
+ It would be easier to hex encode the GUID, but that is 32 bytes,
+ and many systems have issues with names longer than 28 bytes.
+ The following need not be any standard base64 encoding.
+ The encoded value is never decoded.
+*/
+static const char postscript_safe_base64_encode[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz"
+ "0123456789-_=";
+
+/**
+ Formats a GUID into Base64 and places it into buffer.
+ buffer should have space for at least BASE64_GUID_ID_LEN characters.
+ The string will always be null terminated.
+ XXXXXXXXXXXXXXXXXXXXXXXX0
+ */
+static void format_guid_b64(const GUID& guid, char* buffer, size_t bufferSize) {
+ SkASSERT(bufferSize >= BASE64_GUID_ID_LEN);
+ size_t written = SkBase64::Encode(&guid, sizeof(guid), buffer, postscript_safe_base64_encode);
+ SkASSERT(written < LF_FACESIZE);
+ buffer[written] = '\0';
+}
+
+/**
+ Creates a Base64 encoded GUID and places it into buffer.
+ buffer should have space for at least BASE64_GUID_ID_LEN characters.
+ The string will always be null terminated.
+ XXXXXXXXXXXXXXXXXXXXXXXX0
+ */
+static HRESULT create_unique_font_name(char* buffer, size_t bufferSize) {
+ GUID guid = {};
+ if (FAILED(CoCreateGuid(&guid))) {
+ return E_UNEXPECTED;
+ }
+ format_guid_b64(guid, buffer, bufferSize);
+
+ return S_OK;
+}
+
+/**
+ Introduces a font to GDI. On failure will return nullptr. The returned handle
+ should eventually be passed to RemoveFontMemResourceEx.
+*/
+static HANDLE activate_font(SkData* fontData) {
+ DWORD numFonts = 0;
+ //AddFontMemResourceEx just copies the data, but does not specify const.
+ HANDLE fontHandle = AddFontMemResourceEx(const_cast<void*>(fontData->data()),
+ static_cast<DWORD>(fontData->size()),
+ 0,
+ &numFonts);
+
+ if (fontHandle != nullptr && numFonts < 1) {
+ RemoveFontMemResourceEx(fontHandle);
+ return nullptr;
+ }
+
+ return fontHandle;
+}
+
+// Does not affect ownership of stream.
+static SkTypeface* create_from_stream(SkStreamAsset* stream) {
+ // Create a unique and unpredictable font name.
+ // Avoids collisions and access from CSS.
+ char familyName[BASE64_GUID_ID_LEN];
+ const int familyNameSize = SK_ARRAY_COUNT(familyName);
+ if (FAILED(create_unique_font_name(familyName, familyNameSize))) {
+ return nullptr;
+ }
+
+ // Change the name of the font.
+ sk_sp<SkData> rewrittenFontData(SkOTUtils::RenameFont(stream, familyName, familyNameSize-1));
+ if (nullptr == rewrittenFontData.get()) {
+ return nullptr;
+ }
+
+ // Register the font with GDI.
+ HANDLE fontReference = activate_font(rewrittenFontData.get());
+ if (nullptr == fontReference) {
+ return nullptr;
+ }
+
+ // Create the typeface.
+ LOGFONT lf;
+ logfont_for_name(familyName, &lf);
+
+ return SkCreateFontMemResourceTypefaceFromLOGFONT(lf, fontReference);
+}
+
+SkStreamAsset* LogFontTypeface::onOpenStream(int* ttcIndex) const {
+ *ttcIndex = 0;
+
+ const DWORD kTTCTag =
+ SkEndian_SwapBE32(SkSetFourByteTag('t', 't', 'c', 'f'));
+ LOGFONT lf = fLogFont;
+
+ HDC hdc = ::CreateCompatibleDC(nullptr);
+ HFONT font = CreateFontIndirect(&lf);
+ HFONT savefont = (HFONT)SelectObject(hdc, font);
+
+ SkMemoryStream* stream = nullptr;
+ DWORD tables[2] = {kTTCTag, 0};
+ for (int i = 0; i < SK_ARRAY_COUNT(tables); i++) {
+ DWORD bufferSize = GetFontData(hdc, tables[i], 0, nullptr, 0);
+ if (bufferSize == GDI_ERROR) {
+ call_ensure_accessible(lf);
+ bufferSize = GetFontData(hdc, tables[i], 0, nullptr, 0);
+ }
+ if (bufferSize != GDI_ERROR) {
+ stream = new SkMemoryStream(bufferSize);
+ if (GetFontData(hdc, tables[i], 0, (void*)stream->getMemoryBase(), bufferSize)) {
+ break;
+ } else {
+ delete stream;
+ stream = nullptr;
+ }
+ }
+ }
+
+ SelectObject(hdc, savefont);
+ DeleteObject(font);
+ DeleteDC(hdc);
+
+ return stream;
+}
+
+static void bmpCharsToGlyphs(HDC hdc, const WCHAR* bmpChars, int count, uint16_t* glyphs,
+ bool Ox1FHack)
+{
+ DWORD result = GetGlyphIndicesW(hdc, bmpChars, count, glyphs, GGI_MARK_NONEXISTING_GLYPHS);
+ if (GDI_ERROR == result) {
+ for (int i = 0; i < count; ++i) {
+ glyphs[i] = 0;
+ }
+ return;
+ }
+
+ if (Ox1FHack) {
+ for (int i = 0; i < count; ++i) {
+ if (0xFFFF == glyphs[i] || 0x1F == glyphs[i]) {
+ glyphs[i] = 0;
+ }
+ }
+ } else {
+ for (int i = 0; i < count; ++i) {
+ if (0xFFFF == glyphs[i]){
+ glyphs[i] = 0;
+ }
+ }
+ }
+}
+
+static uint16_t nonBmpCharToGlyph(HDC hdc, SCRIPT_CACHE* scriptCache, const WCHAR utf16[2]) {
+ uint16_t index = 0;
+ // Use uniscribe to detemine glyph index for non-BMP characters.
+ static const int numWCHAR = 2;
+ static const int maxItems = 2;
+ // MSDN states that this can be nullptr, but some things don't work then.
+ SCRIPT_CONTROL scriptControl = { 0 };
+ // Add extra item to SCRIPT_ITEM to work around a bug (now documented).
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=366643
+ SCRIPT_ITEM si[maxItems + 1];
+ int numItems;
+ HRZM(ScriptItemize(utf16, numWCHAR, maxItems, &scriptControl, nullptr, si, &numItems),
+ "Could not itemize character.");
+
+ // Sometimes ScriptShape cannot find a glyph for a non-BMP and returns 2 space glyphs.
+ static const int maxGlyphs = 2;
+ SCRIPT_VISATTR vsa[maxGlyphs];
+ WORD outGlyphs[maxGlyphs];
+ WORD logClust[numWCHAR];
+ int numGlyphs;
+ HRZM(ScriptShape(hdc, scriptCache, utf16, numWCHAR, maxGlyphs, &si[0].a,
+ outGlyphs, logClust, vsa, &numGlyphs),
+ "Could not shape character.");
+ if (1 == numGlyphs) {
+ index = outGlyphs[0];
+ }
+ return index;
+}
+
+class SkAutoHDC {
+public:
+ SkAutoHDC(const LOGFONT& lf)
+ : fHdc(::CreateCompatibleDC(nullptr))
+ , fFont(::CreateFontIndirect(&lf))
+ , fSavefont((HFONT)SelectObject(fHdc, fFont))
+ { }
+ ~SkAutoHDC() {
+ SelectObject(fHdc, fSavefont);
+ DeleteObject(fFont);
+ DeleteDC(fHdc);
+ }
+ operator HDC() { return fHdc; }
+private:
+ HDC fHdc;
+ HFONT fFont;
+ HFONT fSavefont;
+};
+#define SkAutoHDC(...) SK_REQUIRE_LOCAL_VAR(SkAutoHDC)
+
+int LogFontTypeface::onCharsToGlyphs(const void* chars, Encoding encoding,
+ uint16_t userGlyphs[], int glyphCount) const
+{
+ SkAutoHDC hdc(fLogFont);
+
+ TEXTMETRIC tm;
+ if (0 == GetTextMetrics(hdc, &tm)) {
+ call_ensure_accessible(fLogFont);
+ if (0 == GetTextMetrics(hdc, &tm)) {
+ tm.tmPitchAndFamily = TMPF_TRUETYPE;
+ }
+ }
+ bool Ox1FHack = !(tm.tmPitchAndFamily & TMPF_VECTOR) /*&& winVer < Vista */;
+
+ SkAutoSTMalloc<256, uint16_t> scratchGlyphs;
+ uint16_t* glyphs;
+ if (userGlyphs != nullptr) {
+ glyphs = userGlyphs;
+ } else {
+ glyphs = scratchGlyphs.reset(glyphCount);
+ }
+
+ SCRIPT_CACHE sc = 0;
+ switch (encoding) {
+ case SkTypeface::kUTF8_Encoding: {
+ static const int scratchCount = 256;
+ WCHAR scratch[scratchCount];
+ int glyphIndex = 0;
+ const char* currentUtf8 = reinterpret_cast<const char*>(chars);
+ SkUnichar currentChar;
+ if (glyphCount) {
+ currentChar = SkUTF8_NextUnichar(&currentUtf8);
+ }
+ while (glyphIndex < glyphCount) {
+ // Try a run of bmp.
+ int glyphsLeft = SkTMin(glyphCount - glyphIndex, scratchCount);
+ int runLength = 0;
+ while (runLength < glyphsLeft && currentChar <= 0xFFFF) {
+ scratch[runLength] = static_cast<WCHAR>(currentChar);
+ ++runLength;
+ if (runLength < glyphsLeft) {
+ currentChar = SkUTF8_NextUnichar(&currentUtf8);
+ }
+ }
+ if (runLength) {
+ bmpCharsToGlyphs(hdc, scratch, runLength, &glyphs[glyphIndex], Ox1FHack);
+ glyphIndex += runLength;
+ }
+
+ // Try a run of non-bmp.
+ while (glyphIndex < glyphCount && currentChar > 0xFFFF) {
+ SkUTF16_FromUnichar(currentChar, reinterpret_cast<uint16_t*>(scratch));
+ glyphs[glyphIndex] = nonBmpCharToGlyph(hdc, &sc, scratch);
+ ++glyphIndex;
+ if (glyphIndex < glyphCount) {
+ currentChar = SkUTF8_NextUnichar(&currentUtf8);
+ }
+ }
+ }
+ break;
+ }
+ case SkTypeface::kUTF16_Encoding: {
+ int glyphIndex = 0;
+ const WCHAR* currentUtf16 = reinterpret_cast<const WCHAR*>(chars);
+ while (glyphIndex < glyphCount) {
+ // Try a run of bmp.
+ int glyphsLeft = glyphCount - glyphIndex;
+ int runLength = 0;
+ while (runLength < glyphsLeft && !SkUTF16_IsHighSurrogate(currentUtf16[runLength])) {
+ ++runLength;
+ }
+ if (runLength) {
+ bmpCharsToGlyphs(hdc, currentUtf16, runLength, &glyphs[glyphIndex], Ox1FHack);
+ glyphIndex += runLength;
+ currentUtf16 += runLength;
+ }
+
+ // Try a run of non-bmp.
+ while (glyphIndex < glyphCount && SkUTF16_IsHighSurrogate(*currentUtf16)) {
+ glyphs[glyphIndex] = nonBmpCharToGlyph(hdc, &sc, currentUtf16);
+ ++glyphIndex;
+ currentUtf16 += 2;
+ }
+ }
+ break;
+ }
+ case SkTypeface::kUTF32_Encoding: {
+ static const int scratchCount = 256;
+ WCHAR scratch[scratchCount];
+ int glyphIndex = 0;
+ const uint32_t* utf32 = reinterpret_cast<const uint32_t*>(chars);
+ while (glyphIndex < glyphCount) {
+ // Try a run of bmp.
+ int glyphsLeft = SkTMin(glyphCount - glyphIndex, scratchCount);
+ int runLength = 0;
+ while (runLength < glyphsLeft && utf32[glyphIndex + runLength] <= 0xFFFF) {
+ scratch[runLength] = static_cast<WCHAR>(utf32[glyphIndex + runLength]);
+ ++runLength;
+ }
+ if (runLength) {
+ bmpCharsToGlyphs(hdc, scratch, runLength, &glyphs[glyphIndex], Ox1FHack);
+ glyphIndex += runLength;
+ }
+
+ // Try a run of non-bmp.
+ while (glyphIndex < glyphCount && utf32[glyphIndex] > 0xFFFF) {
+ SkUTF16_FromUnichar(utf32[glyphIndex], reinterpret_cast<uint16_t*>(scratch));
+ glyphs[glyphIndex] = nonBmpCharToGlyph(hdc, &sc, scratch);
+ ++glyphIndex;
+ }
+ }
+ break;
+ }
+ default:
+ SK_ABORT("Invalid Text Encoding");
+ }
+
+ if (sc) {
+ ::ScriptFreeCache(&sc);
+ }
+
+ for (int i = 0; i < glyphCount; ++i) {
+ if (0 == glyphs[i]) {
+ return i;
+ }
+ }
+ return glyphCount;
+}
+
+int LogFontTypeface::onCountGlyphs() const {
+ HDC hdc = ::CreateCompatibleDC(nullptr);
+ HFONT font = CreateFontIndirect(&fLogFont);
+ HFONT savefont = (HFONT)SelectObject(hdc, font);
+
+ unsigned int glyphCount = calculateGlyphCount(hdc, fLogFont);
+
+ SelectObject(hdc, savefont);
+ DeleteObject(font);
+ DeleteDC(hdc);
+
+ return glyphCount;
+}
+
+int LogFontTypeface::onGetUPEM() const {
+ HDC hdc = ::CreateCompatibleDC(nullptr);
+ HFONT font = CreateFontIndirect(&fLogFont);
+ HFONT savefont = (HFONT)SelectObject(hdc, font);
+
+ unsigned int upem = calculateUPEM(hdc, fLogFont);
+
+ SelectObject(hdc, savefont);
+ DeleteObject(font);
+ DeleteDC(hdc);
+
+ return upem;
+}
+
+SkTypeface::LocalizedStrings* LogFontTypeface::onCreateFamilyNameIterator() const {
+ SkTypeface::LocalizedStrings* nameIter =
+ SkOTUtils::LocalizedStrings_NameTable::CreateForFamilyNames(*this);
+ if (nullptr == nameIter) {
+ SkString familyName;
+ this->getFamilyName(&familyName);
+ SkString language("und"); //undetermined
+ nameIter = new SkOTUtils::LocalizedStrings_SingleName(familyName, language);
+ }
+ return nameIter;
+}
+
+int LogFontTypeface::onGetTableTags(SkFontTableTag tags[]) const {
+ SkSFNTHeader header;
+ if (sizeof(header) != this->onGetTableData(0, 0, sizeof(header), &header)) {
+ return 0;
+ }
+
+ int numTables = SkEndian_SwapBE16(header.numTables);
+
+ if (tags) {
+ size_t size = numTables * sizeof(SkSFNTHeader::TableDirectoryEntry);
+ SkAutoSTMalloc<0x20, SkSFNTHeader::TableDirectoryEntry> dir(numTables);
+ if (size != this->onGetTableData(0, sizeof(header), size, dir.get())) {
+ return 0;
+ }
+
+ for (int i = 0; i < numTables; ++i) {
+ tags[i] = SkEndian_SwapBE32(dir[i].tag);
+ }
+ }
+ return numTables;
+}
+
+size_t LogFontTypeface::onGetTableData(SkFontTableTag tag, size_t offset,
+ size_t length, void* data) const
+{
+ LOGFONT lf = fLogFont;
+
+ HDC hdc = ::CreateCompatibleDC(nullptr);
+ HFONT font = CreateFontIndirect(&lf);
+ HFONT savefont = (HFONT)SelectObject(hdc, font);
+
+ tag = SkEndian_SwapBE32(tag);
+ if (nullptr == data) {
+ length = 0;
+ }
+ DWORD bufferSize = GetFontData(hdc, tag, (DWORD) offset, data, (DWORD) length);
+ if (bufferSize == GDI_ERROR) {
+ call_ensure_accessible(lf);
+ bufferSize = GetFontData(hdc, tag, (DWORD) offset, data, (DWORD) length);
+ }
+
+ SelectObject(hdc, savefont);
+ DeleteObject(font);
+ DeleteDC(hdc);
+
+ return bufferSize == GDI_ERROR ? 0 : bufferSize;
+}
+
+SkScalerContext* LogFontTypeface::onCreateScalerContext(const SkScalerContextEffects& effects,
+ const SkDescriptor* desc) const {
+ SkScalerContext_GDI* ctx = new SkScalerContext_GDI(const_cast<LogFontTypeface*>(this),
+ effects, desc);
+ if (!ctx->isValid()) {
+ delete ctx;
+ ctx = nullptr;
+ }
+ return ctx;
+}
+
+void LogFontTypeface::onFilterRec(SkScalerContextRec* rec) const {
+ if (rec->fFlags & SkScalerContext::kLCD_BGROrder_Flag ||
+ rec->fFlags & SkScalerContext::kLCD_Vertical_Flag)
+ {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ rec->fFlags |= SkScalerContext::kGenA8FromLCD_Flag;
+ }
+
+ unsigned flagsWeDontSupport = SkScalerContext::kVertical_Flag |
+ SkScalerContext::kDevKernText_Flag |
+ SkScalerContext::kForceAutohinting_Flag |
+ SkScalerContext::kEmbeddedBitmapText_Flag |
+ SkScalerContext::kEmbolden_Flag |
+ SkScalerContext::kLCD_BGROrder_Flag |
+ SkScalerContext::kLCD_Vertical_Flag;
+ rec->fFlags &= ~flagsWeDontSupport;
+
+ SkPaint::Hinting h = rec->getHinting();
+ switch (h) {
+ case SkPaint::kNo_Hinting:
+ break;
+ case SkPaint::kSlight_Hinting:
+ // Only do slight hinting when axis aligned.
+ // TODO: re-enable slight hinting when FontHostTest can pass.
+ //if (!isAxisAligned(*rec)) {
+ h = SkPaint::kNo_Hinting;
+ //}
+ break;
+ case SkPaint::kNormal_Hinting:
+ case SkPaint::kFull_Hinting:
+ // TODO: need to be able to distinguish subpixel positioned glyphs
+ // and linear metrics.
+ //rec->fFlags &= ~SkScalerContext::kSubpixelPositioning_Flag;
+ h = SkPaint::kNormal_Hinting;
+ break;
+ default:
+ SkDEBUGFAIL("unknown hinting");
+ }
+ //TODO: if this is a bitmap font, squash hinting and subpixel.
+ rec->setHinting(h);
+
+// turn this off since GDI might turn A8 into BW! Need a bigger fix.
+#if 0
+ // Disable LCD when rotated, since GDI's output is ugly
+ if (isLCD(*rec) && !isAxisAligned(*rec)) {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ }
+#endif
+
+ if (!fCanBeLCD && isLCD(*rec)) {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ rec->fFlags &= ~SkScalerContext::kGenA8FromLCD_Flag;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkFontMgr.h"
+#include "SkDataTable.h"
+
+static bool valid_logfont_for_enum(const LOGFONT& lf) {
+ // TODO: Vector FON is unsupported and should not be listed.
+ return
+ // Ignore implicit vertical variants.
+ lf.lfFaceName[0] && lf.lfFaceName[0] != '@'
+
+ // DEFAULT_CHARSET is used to get all fonts, but also implies all
+ // character sets. Filter assuming all fonts support ANSI_CHARSET.
+ && ANSI_CHARSET == lf.lfCharSet
+ ;
+}
+
+/** An EnumFontFamExProc implementation which interprets builderParam as
+ * an SkTDArray<ENUMLOGFONTEX>* and appends logfonts which
+ * pass the valid_logfont_for_enum predicate.
+ */
+static int CALLBACK enum_family_proc(const LOGFONT* lf, const TEXTMETRIC*,
+ DWORD fontType, LPARAM builderParam) {
+ if (valid_logfont_for_enum(*lf)) {
+ SkTDArray<ENUMLOGFONTEX>* array = (SkTDArray<ENUMLOGFONTEX>*)builderParam;
+ *array->append() = *(ENUMLOGFONTEX*)lf;
+ }
+ return 1; // non-zero means continue
+}
+
+class SkFontStyleSetGDI : public SkFontStyleSet {
+public:
+ SkFontStyleSetGDI(const TCHAR familyName[]) {
+ LOGFONT lf;
+ sk_bzero(&lf, sizeof(lf));
+ lf.lfCharSet = DEFAULT_CHARSET;
+ _tcscpy_s(lf.lfFaceName, familyName);
+
+ HDC hdc = ::CreateCompatibleDC(nullptr);
+ ::EnumFontFamiliesEx(hdc, &lf, enum_family_proc, (LPARAM)&fArray, 0);
+ ::DeleteDC(hdc);
+ }
+
+ int count() override {
+ return fArray.count();
+ }
+
+ void getStyle(int index, SkFontStyle* fs, SkString* styleName) override {
+ if (fs) {
+ *fs = get_style(fArray[index].elfLogFont);
+ }
+ if (styleName) {
+ const ENUMLOGFONTEX& ref = fArray[index];
+ // For some reason, ENUMLOGFONTEX and LOGFONT disagree on their type in the
+ // non-unicode version.
+ // ENUMLOGFONTEX uses BYTE
+ // LOGFONT uses CHAR
+ // Here we assert they that the style name is logically the same (size) as
+ // a TCHAR, so we can use the same converter function.
+ SkASSERT(sizeof(TCHAR) == sizeof(ref.elfStyle[0]));
+ tchar_to_skstring((const TCHAR*)ref.elfStyle, styleName);
+ }
+ }
+
+ SkTypeface* createTypeface(int index) override {
+ return SkCreateTypefaceFromLOGFONT(fArray[index].elfLogFont);
+ }
+
+ SkTypeface* matchStyle(const SkFontStyle& pattern) override {
+ return this->matchStyleCSS3(pattern);
+ }
+
+private:
+ SkTDArray<ENUMLOGFONTEX> fArray;
+};
+
+class SkFontMgrGDI : public SkFontMgr {
+public:
+ SkFontMgrGDI() {
+ LOGFONT lf;
+ sk_bzero(&lf, sizeof(lf));
+ lf.lfCharSet = DEFAULT_CHARSET;
+
+ HDC hdc = ::CreateCompatibleDC(nullptr);
+ ::EnumFontFamiliesEx(hdc, &lf, enum_family_proc, (LPARAM)&fLogFontArray, 0);
+ ::DeleteDC(hdc);
+ }
+
+protected:
+ int onCountFamilies() const override {
+ return fLogFontArray.count();
+ }
+
+ void onGetFamilyName(int index, SkString* familyName) const override {
+ SkASSERT((unsigned)index < (unsigned)fLogFontArray.count());
+ tchar_to_skstring(fLogFontArray[index].elfLogFont.lfFaceName, familyName);
+ }
+
+ SkFontStyleSet* onCreateStyleSet(int index) const override {
+ SkASSERT((unsigned)index < (unsigned)fLogFontArray.count());
+ return new SkFontStyleSetGDI(fLogFontArray[index].elfLogFont.lfFaceName);
+ }
+
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override {
+ if (nullptr == familyName) {
+ familyName = ""; // do we need this check???
+ }
+ LOGFONT lf;
+ logfont_for_name(familyName, &lf);
+ return new SkFontStyleSetGDI(lf.lfFaceName);
+ }
+
+ virtual SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontstyle) const override {
+ // could be in base impl
+ SkAutoTUnref<SkFontStyleSet> sset(this->matchFamily(familyName));
+ return sset->matchStyle(fontstyle);
+ }
+
+ virtual SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override {
+ return nullptr;
+ }
+
+ virtual SkTypeface* onMatchFaceStyle(const SkTypeface* familyMember,
+ const SkFontStyle& fontstyle) const override {
+ // could be in base impl
+ SkString familyName;
+ ((LogFontTypeface*)familyMember)->getFamilyName(&familyName);
+ return this->matchFamilyStyle(familyName.c_str(), fontstyle);
+ }
+
+ SkTypeface* onCreateFromStream(SkStreamAsset* bareStream, int ttcIndex) const override {
+ SkAutoTDelete<SkStreamAsset> stream(bareStream);
+ return create_from_stream(stream);
+ }
+
+ SkTypeface* onCreateFromData(SkData* data, int ttcIndex) const override {
+ // could be in base impl
+ return this->createFromStream(new SkMemoryStream(sk_ref_sp(data)));
+ }
+
+ SkTypeface* onCreateFromFile(const char path[], int ttcIndex) const override {
+ // could be in base impl
+ return this->createFromStream(SkStream::MakeFromFile(path).release());
+ }
+
+ SkTypeface* onLegacyCreateTypeface(const char familyName[], SkFontStyle style) const override {
+ LOGFONT lf;
+ if (nullptr == familyName) {
+ lf = get_default_font();
+ } else {
+ logfont_for_name(familyName, &lf);
+ }
+
+ lf.lfWeight = style.weight();
+ lf.lfItalic = style.slant() == SkFontStyle::kUpright_Slant ? FALSE : TRUE;
+ return SkCreateTypefaceFromLOGFONT(lf);
+ }
+
+private:
+ SkTDArray<ENUMLOGFONTEX> fLogFontArray;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkFontMgr* SkFontMgr_New_GDI() { return new SkFontMgrGDI; }
+
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface.cpp b/gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface.cpp
new file mode 100644
index 000000000..d4c756900
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface.cpp
@@ -0,0 +1,302 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFontConfigInterface.h"
+#include "SkFontConfigTypeface.h"
+#include "SkFontDescriptor.h"
+#include "SkFontMgr.h"
+#include "SkFontMgr_FontConfigInterface.h"
+#include "SkFontStyle.h"
+#include "SkMakeUnique.h"
+#include "SkMutex.h"
+#include "SkString.h"
+#include "SkTypeface.h"
+#include "SkTypefaceCache.h"
+#include "SkResourceCache.h"
+
+SkStreamAsset* SkTypeface_FCI::onOpenStream(int* ttcIndex) const {
+ *ttcIndex = this->getIdentity().fTTCIndex;
+
+ if (fFontData) {
+ SkStreamAsset* stream = fFontData->getStream();
+ if (!stream) {
+ return nullptr;
+ }
+ return stream->duplicate();
+ }
+
+ return fFCI->openStream(this->getIdentity());
+}
+
+std::unique_ptr<SkFontData> SkTypeface_FCI::onMakeFontData() const {
+ if (fFontData) {
+ return skstd::make_unique<SkFontData>(*fFontData);
+ }
+
+ const SkFontConfigInterface::FontIdentity& id = this->getIdentity();
+ return skstd::make_unique<SkFontData>(std::unique_ptr<SkStreamAsset>(fFCI->openStream(id)),
+ id.fTTCIndex, nullptr, 0);
+}
+
+void SkTypeface_FCI::onGetFontDescriptor(SkFontDescriptor* desc, bool* isLocalStream) const {
+ SkString name;
+ this->getFamilyName(&name);
+ desc->setFamilyName(name.c_str());
+ desc->setStyle(this->fontStyle());
+ *isLocalStream = SkToBool(fFontData);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkFontStyleSet_FCI : public SkFontStyleSet {
+public:
+ SkFontStyleSet_FCI() {}
+
+ int count() override { return 0; }
+ void getStyle(int index, SkFontStyle*, SkString* style) override { SkASSERT(false); }
+ SkTypeface* createTypeface(int index) override { SkASSERT(false); return nullptr; }
+ SkTypeface* matchStyle(const SkFontStyle& pattern) override { return nullptr; }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkFontRequestCache {
+public:
+ struct Request : public SkResourceCache::Key {
+ private:
+ Request(const char* name, size_t nameLen, const SkFontStyle& style) : fStyle(style) {
+ /** Pointer to just after the last field of this class. */
+ char* content = const_cast<char*>(SkTAfter<const char>(&this->fStyle));
+
+ // No holes.
+ SkASSERT(SkTAddOffset<char>(this, sizeof(SkResourceCache::Key) + keySize) == content);
+
+ // Has a size divisible by size of uint32_t.
+ SkASSERT((content - reinterpret_cast<char*>(this)) % sizeof(uint32_t) == 0);
+
+ size_t contentLen = SkAlign4(nameLen);
+ sk_careful_memcpy(content, name, nameLen);
+ sk_bzero(content + nameLen, contentLen - nameLen);
+ this->init(nullptr, 0, keySize + contentLen);
+ }
+ const SkFontStyle fStyle;
+ /** The sum of the sizes of the fields of this class. */
+ static const size_t keySize = sizeof(fStyle);
+
+ public:
+ static Request* Create(const char* name, const SkFontStyle& style) {
+ size_t nameLen = name ? strlen(name) : 0;
+ size_t contentLen = SkAlign4(nameLen);
+ char* storage = new char[sizeof(Request) + contentLen];
+ return new (storage) Request(name, nameLen, style);
+ }
+ void operator delete(void* storage) {
+ delete[] reinterpret_cast<char*>(storage);
+ }
+ };
+
+
+private:
+ struct Result : public SkResourceCache::Rec {
+ Result(Request* request, SkTypeface* typeface)
+ : fRequest(request)
+ , fFace(SkSafeRef(typeface)) {}
+ Result(Result&&) = default;
+ Result& operator=(Result&&) = default;
+
+ const Key& getKey() const override { return *fRequest; }
+ size_t bytesUsed() const override { return fRequest->size() + sizeof(fFace); }
+ const char* getCategory() const override { return "request_cache"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override { return nullptr; }
+
+ SkAutoTDelete<Request> fRequest;
+ SkAutoTUnref<SkTypeface> fFace;
+ };
+
+ SkResourceCache fCachedResults;
+
+public:
+ SkFontRequestCache(size_t maxSize) : fCachedResults(maxSize) {}
+
+ /** Takes ownership of request. It will be deleted when no longer needed. */
+ void add(SkTypeface* face, Request* request) {
+ fCachedResults.add(new Result(request, face));
+ }
+ /** Does not take ownership of request. */
+ SkTypeface* findAndRef(Request* request) {
+ SkTypeface* face = nullptr;
+ fCachedResults.find(*request, [](const SkResourceCache::Rec& rec, void* context) -> bool {
+ const Result& result = static_cast<const Result&>(rec);
+ SkTypeface** face = static_cast<SkTypeface**>(context);
+
+ *face = result.fFace;
+ return true;
+ }, &face);
+ return SkSafeRef(face);
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool find_by_FontIdentity(SkTypeface* cachedTypeface, void* ctx) {
+ typedef SkFontConfigInterface::FontIdentity FontIdentity;
+ SkTypeface_FCI* cachedFCTypeface = static_cast<SkTypeface_FCI*>(cachedTypeface);
+ FontIdentity* identity = static_cast<FontIdentity*>(ctx);
+
+ return cachedFCTypeface->getIdentity() == *identity;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkFontMgr_FCI : public SkFontMgr {
+ sk_sp<SkFontConfigInterface> fFCI;
+ sk_sp<SkDataTable> fFamilyNames;
+ SkTypeface_FreeType::Scanner fScanner;
+
+ mutable SkMutex fMutex;
+ mutable SkTypefaceCache fTFCache;
+
+ // The value of maxSize here is a compromise between cache hits and cache size.
+ // See https://crbug.com/424082#63 for reason for current size.
+ static const size_t kMaxSize = 1 << 15;
+ mutable SkFontRequestCache fCache;
+
+public:
+ SkFontMgr_FCI(sk_sp<SkFontConfigInterface> fci)
+ : fFCI(std::move(fci))
+ , fFamilyNames(fFCI->getFamilyNames())
+ , fCache(kMaxSize)
+ {}
+
+protected:
+ int onCountFamilies() const override {
+ return fFamilyNames->count();
+ }
+
+ void onGetFamilyName(int index, SkString* familyName) const override {
+ familyName->set(fFamilyNames->atStr(index));
+ }
+
+ SkFontStyleSet* onCreateStyleSet(int index) const override {
+ return this->onMatchFamily(fFamilyNames->atStr(index));
+ }
+
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override {
+ return new SkFontStyleSet_FCI();
+ }
+
+ SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle&) const override { return nullptr; }
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override {
+ return nullptr;
+ }
+ SkTypeface* onMatchFaceStyle(const SkTypeface*,
+ const SkFontStyle&) const override { return nullptr; }
+
+ SkTypeface* onCreateFromData(SkData*, int ttcIndex) const override { return nullptr; }
+
+ SkTypeface* onCreateFromStream(SkStreamAsset* bareStream, int ttcIndex) const override {
+ std::unique_ptr<SkStreamAsset> stream(bareStream);
+ const size_t length = stream->getLength();
+ if (!length) {
+ return nullptr;
+ }
+ if (length >= 1024 * 1024 * 1024) {
+ return nullptr; // don't accept too large fonts (>= 1GB) for safety.
+ }
+
+ // TODO should the caller give us the style or should we get it from freetype?
+ SkFontStyle style;
+ bool isFixedPitch = false;
+ if (!fScanner.scanFont(stream.get(), 0, nullptr, &style, &isFixedPitch, nullptr)) {
+ return nullptr;
+ }
+
+ auto fontData = skstd::make_unique<SkFontData>(std::move(stream), ttcIndex, nullptr, 0);
+ return SkTypeface_FCI::Create(std::move(fontData), style, isFixedPitch);
+ }
+
+ SkTypeface* onCreateFromStream(SkStreamAsset* s, const FontParameters& params) const override {
+ using Scanner = SkTypeface_FreeType::Scanner;
+ std::unique_ptr<SkStreamAsset> stream(s);
+ const size_t length = stream->getLength();
+ if (!length) {
+ return nullptr;
+ }
+ if (length >= 1024 * 1024 * 1024) {
+ return nullptr; // don't accept too large fonts (>= 1GB) for safety.
+ }
+
+ bool isFixedPitch;
+ SkFontStyle style;
+ SkString name;
+ Scanner::AxisDefinitions axisDefinitions;
+ if (!fScanner.scanFont(stream.get(), params.getCollectionIndex(),
+ &name, &style, &isFixedPitch, &axisDefinitions))
+ {
+ return nullptr;
+ }
+
+ int paramAxisCount;
+ const FontParameters::Axis* paramAxes = params.getAxes(&paramAxisCount);
+ SkAutoSTMalloc<4, SkFixed> axisValues(axisDefinitions.count());
+ Scanner::computeAxisValues(axisDefinitions, paramAxes, paramAxisCount, axisValues, name);
+
+ auto fontData = skstd::make_unique<SkFontData>(std::move(stream),
+ params.getCollectionIndex(),
+ axisValues.get(),
+ axisDefinitions.count());
+ return SkTypeface_FCI::Create(std::move(fontData), style, isFixedPitch);
+ }
+
+ SkTypeface* onCreateFromFile(const char path[], int ttcIndex) const override {
+ std::unique_ptr<SkStreamAsset> stream = SkStream::MakeFromFile(path);
+ return stream.get() ? this->createFromStream(stream.release(), ttcIndex) : nullptr;
+ }
+
+ SkTypeface* onLegacyCreateTypeface(const char requestedFamilyName[],
+ SkFontStyle requestedStyle) const override
+ {
+ SkAutoMutexAcquire ama(fMutex);
+
+ // Check if this request is already in the request cache.
+ using Request = SkFontRequestCache::Request;
+ SkAutoTDelete<Request> request(Request::Create(requestedFamilyName, requestedStyle));
+ SkTypeface* face = fCache.findAndRef(request);
+ if (face) {
+ return face;
+ }
+
+ SkFontConfigInterface::FontIdentity identity;
+ SkString outFamilyName;
+ SkFontStyle outStyle;
+ if (!fFCI->matchFamilyName(requestedFamilyName, requestedStyle,
+ &identity, &outFamilyName, &outStyle))
+ {
+ return nullptr;
+ }
+
+ // Check if a typeface with this FontIdentity is already in the FontIdentity cache.
+ face = fTFCache.findByProcAndRef(find_by_FontIdentity, &identity);
+ if (!face) {
+ face = SkTypeface_FCI::Create(fFCI, identity, outFamilyName, outStyle);
+ // Add this FontIdentity to the FontIdentity cache.
+ fTFCache.add(face);
+ }
+ // Add this request to the request cache.
+ fCache.add(face, request.release());
+
+ return face;
+ }
+};
+
+SK_API SkFontMgr* SkFontMgr_New_FCI(sk_sp<SkFontConfigInterface> fci) {
+ SkASSERT(fci);
+ return new SkFontMgr_FCI(std::move(fci));
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface_factory.cpp
new file mode 100644
index 000000000..b2bb74ee1
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface_factory.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2008 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFontConfigInterface.h"
+#include "SkFontMgr.h"
+#include "SkFontMgr_FontConfigInterface.h"
+
+SkFontMgr* SkFontMgr::Factory() {
+ sk_sp<SkFontConfigInterface> fci(SkFontConfigInterface::RefGlobal());
+ if (!fci) {
+ return nullptr;
+ }
+ return SkFontMgr_New_FCI(std::move(fci));
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_android.cpp b/gfx/skia/skia/src/ports/SkFontMgr_android.cpp
new file mode 100644
index 000000000..3a84ecad8
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_android.cpp
@@ -0,0 +1,550 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+
+#include "SkData.h"
+#include "SkFixed.h"
+#include "SkFontDescriptor.h"
+#include "SkFontHost_FreeType_common.h"
+#include "SkFontMgr.h"
+#include "SkFontMgr_android.h"
+#include "SkFontMgr_android_parser.h"
+#include "SkFontStyle.h"
+#include "SkMakeUnique.h"
+#include "SkOSFile.h"
+#include "SkPaint.h"
+#include "SkRefCnt.h"
+#include "SkString.h"
+#include "SkStream.h"
+#include "SkTArray.h"
+#include "SkTDArray.h"
+#include "SkTSearch.h"
+#include "SkTemplates.h"
+#include "SkTypefaceCache.h"
+
+#include <limits>
+
+class SkData;
+
+class SkTypeface_Android : public SkTypeface_FreeType {
+public:
+ SkTypeface_Android(const SkFontStyle& style,
+ bool isFixedPitch,
+ const SkString& familyName)
+ : INHERITED(style, isFixedPitch)
+ , fFamilyName(familyName)
+ { }
+
+protected:
+ void onGetFamilyName(SkString* familyName) const override {
+ *familyName = fFamilyName;
+ }
+
+ SkString fFamilyName;
+
+private:
+ typedef SkTypeface_FreeType INHERITED;
+};
+
+class SkTypeface_AndroidSystem : public SkTypeface_Android {
+public:
+ SkTypeface_AndroidSystem(const SkString& pathName,
+ const bool cacheFontFiles,
+ int index,
+ const SkFixed* axes, int axesCount,
+ const SkFontStyle& style,
+ bool isFixedPitch,
+ const SkString& familyName,
+ const SkLanguage& lang,
+ FontVariant variantStyle)
+ : INHERITED(style, isFixedPitch, familyName)
+ , fPathName(pathName)
+ , fIndex(index)
+ , fAxes(axes, axesCount)
+ , fLang(lang)
+ , fVariantStyle(variantStyle)
+ , fFile(cacheFontFiles ? sk_fopen(fPathName.c_str(), kRead_SkFILE_Flag) : nullptr) {
+ if (cacheFontFiles) {
+ SkASSERT(fFile);
+ }
+ }
+
+ std::unique_ptr<SkStreamAsset> makeStream() const {
+ if (fFile) {
+ sk_sp<SkData> data(SkData::MakeFromFILE(fFile));
+ return data ? skstd::make_unique<SkMemoryStream>(std::move(data)) : nullptr;
+ }
+ return SkStream::MakeFromFile(fPathName.c_str());
+ }
+
+ virtual void onGetFontDescriptor(SkFontDescriptor* desc, bool* serialize) const override {
+ SkASSERT(desc);
+ SkASSERT(serialize);
+ desc->setFamilyName(fFamilyName.c_str());
+ desc->setStyle(this->fontStyle());
+ *serialize = false;
+ }
+ SkStreamAsset* onOpenStream(int* ttcIndex) const override {
+ *ttcIndex = fIndex;
+ return this->makeStream().release();
+ }
+ std::unique_ptr<SkFontData> onMakeFontData() const override {
+ return skstd::make_unique<SkFontData>(this->makeStream(), fIndex,
+ fAxes.begin(), fAxes.count());
+ }
+
+ const SkString fPathName;
+ int fIndex;
+ const SkSTArray<4, SkFixed, true> fAxes;
+ const SkLanguage fLang;
+ const FontVariant fVariantStyle;
+ SkAutoTCallVProc<FILE, sk_fclose> fFile;
+
+ typedef SkTypeface_Android INHERITED;
+};
+
+class SkTypeface_AndroidStream : public SkTypeface_Android {
+public:
+ SkTypeface_AndroidStream(std::unique_ptr<SkFontData> data,
+ const SkFontStyle& style,
+ bool isFixedPitch,
+ const SkString& familyName)
+ : INHERITED(style, isFixedPitch, familyName)
+ , fData(std::move(data))
+ { }
+
+ virtual void onGetFontDescriptor(SkFontDescriptor* desc,
+ bool* serialize) const override {
+ SkASSERT(desc);
+ SkASSERT(serialize);
+ desc->setFamilyName(fFamilyName.c_str());
+ *serialize = true;
+ }
+
+ SkStreamAsset* onOpenStream(int* ttcIndex) const override {
+ *ttcIndex = fData->getIndex();
+ return fData->getStream()->duplicate();
+ }
+
+ std::unique_ptr<SkFontData> onMakeFontData() const override {
+ return skstd::make_unique<SkFontData>(*fData);
+ }
+
+private:
+ const std::unique_ptr<const SkFontData> fData;
+ typedef SkTypeface_Android INHERITED;
+};
+
+class SkFontStyleSet_Android : public SkFontStyleSet {
+ typedef SkTypeface_FreeType::Scanner Scanner;
+
+public:
+ explicit SkFontStyleSet_Android(const FontFamily& family, const Scanner& scanner,
+ const bool cacheFontFiles) {
+ const SkString* cannonicalFamilyName = nullptr;
+ if (family.fNames.count() > 0) {
+ cannonicalFamilyName = &family.fNames[0];
+ }
+ // TODO? make this lazy
+ for (int i = 0; i < family.fFonts.count(); ++i) {
+ const FontFileInfo& fontFile = family.fFonts[i];
+
+ SkString pathName(family.fBasePath);
+ pathName.append(fontFile.fFileName);
+
+ std::unique_ptr<SkStreamAsset> stream = SkStream::MakeFromFile(pathName.c_str());
+ if (!stream) {
+ SkDEBUGF(("Requested font file %s does not exist or cannot be opened.\n",
+ pathName.c_str()));
+ continue;
+ }
+
+ const int ttcIndex = fontFile.fIndex;
+ SkString familyName;
+ SkFontStyle style;
+ bool isFixedWidth;
+ Scanner::AxisDefinitions axisDefinitions;
+ if (!scanner.scanFont(stream.get(), ttcIndex,
+ &familyName, &style, &isFixedWidth, &axisDefinitions))
+ {
+ SkDEBUGF(("Requested font file %s exists, but is not a valid font.\n",
+ pathName.c_str()));
+ continue;
+ }
+
+ int weight = fontFile.fWeight != 0 ? fontFile.fWeight : style.weight();
+ SkFontStyle::Slant slant = style.slant();
+ switch (fontFile.fStyle) {
+ case FontFileInfo::Style::kAuto: slant = style.slant(); break;
+ case FontFileInfo::Style::kNormal: slant = SkFontStyle::kUpright_Slant; break;
+ case FontFileInfo::Style::kItalic: slant = SkFontStyle::kItalic_Slant; break;
+ default: SkASSERT(false); break;
+ }
+ style = SkFontStyle(weight, style.width(), slant);
+
+ const SkLanguage& lang = family.fLanguage;
+ uint32_t variant = family.fVariant;
+ if (kDefault_FontVariant == variant) {
+ variant = kCompact_FontVariant | kElegant_FontVariant;
+ }
+
+ // The first specified family name overrides the family name found in the font.
+ // TODO: SkTypeface_AndroidSystem::onCreateFamilyNameIterator should return
+ // all of the specified family names in addition to the names found in the font.
+ if (cannonicalFamilyName != nullptr) {
+ familyName = *cannonicalFamilyName;
+ }
+
+ SkAutoSTMalloc<4, SkFixed> axisValues(axisDefinitions.count());
+ Scanner::computeAxisValues(axisDefinitions,
+ fontFile.fAxes.begin(), fontFile.fAxes.count(),
+ axisValues, familyName);
+
+ fStyles.push_back().reset(new SkTypeface_AndroidSystem(
+ pathName, cacheFontFiles, ttcIndex, axisValues.get(), axisDefinitions.count(),
+ style, isFixedWidth, familyName, lang, variant));
+ }
+ }
+
+ int count() override {
+ return fStyles.count();
+ }
+ void getStyle(int index, SkFontStyle* style, SkString* name) override {
+ if (index < 0 || fStyles.count() <= index) {
+ return;
+ }
+ if (style) {
+ *style = fStyles[index]->fontStyle();
+ }
+ if (name) {
+ name->reset();
+ }
+ }
+ SkTypeface_AndroidSystem* createTypeface(int index) override {
+ if (index < 0 || fStyles.count() <= index) {
+ return nullptr;
+ }
+ return SkRef(fStyles[index].get());
+ }
+
+ SkTypeface_AndroidSystem* matchStyle(const SkFontStyle& pattern) override {
+ return static_cast<SkTypeface_AndroidSystem*>(this->matchStyleCSS3(pattern));
+ }
+
+private:
+ SkTArray<SkAutoTUnref<SkTypeface_AndroidSystem>, true> fStyles;
+
+ friend struct NameToFamily;
+ friend class SkFontMgr_Android;
+
+ typedef SkFontStyleSet INHERITED;
+};
+
+/** On Android a single family can have many names, but our API assumes unique names.
+ * Map names to the back end so that all names for a given family refer to the same
+ * (non-replicated) set of typefaces.
+ * SkTDict<> doesn't let us do index-based lookup, so we write our own mapping.
+ */
+struct NameToFamily {
+ SkString name;
+ SkFontStyleSet_Android* styleSet;
+};
+
+class SkFontMgr_Android : public SkFontMgr {
+public:
+ SkFontMgr_Android(const SkFontMgr_Android_CustomFonts* custom) {
+ SkTDArray<FontFamily*> families;
+ if (custom && SkFontMgr_Android_CustomFonts::kPreferSystem != custom->fSystemFontUse) {
+ SkString base(custom->fBasePath);
+ SkFontMgr_Android_Parser::GetCustomFontFamilies(
+ families, base, custom->fFontsXml, custom->fFallbackFontsXml);
+ }
+ if (!custom ||
+ (custom && SkFontMgr_Android_CustomFonts::kOnlyCustom != custom->fSystemFontUse))
+ {
+ SkFontMgr_Android_Parser::GetSystemFontFamilies(families);
+ }
+ if (custom && SkFontMgr_Android_CustomFonts::kPreferSystem == custom->fSystemFontUse) {
+ SkString base(custom->fBasePath);
+ SkFontMgr_Android_Parser::GetCustomFontFamilies(
+ families, base, custom->fFontsXml, custom->fFallbackFontsXml);
+ }
+ this->buildNameToFamilyMap(families, custom ? custom->fIsolated : false);
+ this->findDefaultStyleSet();
+ families.deleteAll();
+ }
+
+protected:
+ /** Returns not how many families we have, but how many unique names
+ * exist among the families.
+ */
+ int onCountFamilies() const override {
+ return fNameToFamilyMap.count();
+ }
+
+ void onGetFamilyName(int index, SkString* familyName) const override {
+ if (index < 0 || fNameToFamilyMap.count() <= index) {
+ familyName->reset();
+ return;
+ }
+ familyName->set(fNameToFamilyMap[index].name);
+ }
+
+ SkFontStyleSet* onCreateStyleSet(int index) const override {
+ if (index < 0 || fNameToFamilyMap.count() <= index) {
+ return nullptr;
+ }
+ return SkRef(fNameToFamilyMap[index].styleSet);
+ }
+
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override {
+ if (!familyName) {
+ return nullptr;
+ }
+ SkAutoAsciiToLC tolc(familyName);
+ for (int i = 0; i < fNameToFamilyMap.count(); ++i) {
+ if (fNameToFamilyMap[i].name.equals(tolc.lc())) {
+ return SkRef(fNameToFamilyMap[i].styleSet);
+ }
+ }
+ // TODO: eventually we should not need to name fallback families.
+ for (int i = 0; i < fFallbackNameToFamilyMap.count(); ++i) {
+ if (fFallbackNameToFamilyMap[i].name.equals(tolc.lc())) {
+ return SkRef(fFallbackNameToFamilyMap[i].styleSet);
+ }
+ }
+ return nullptr;
+ }
+
+ virtual SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& style) const override {
+ SkAutoTUnref<SkFontStyleSet> sset(this->matchFamily(familyName));
+ return sset->matchStyle(style);
+ }
+
+ virtual SkTypeface* onMatchFaceStyle(const SkTypeface* typeface,
+ const SkFontStyle& style) const override {
+ for (int i = 0; i < fStyleSets.count(); ++i) {
+ for (int j = 0; j < fStyleSets[i]->fStyles.count(); ++j) {
+ if (fStyleSets[i]->fStyles[j] == typeface) {
+ return fStyleSets[i]->matchStyle(style);
+ }
+ }
+ }
+ return nullptr;
+ }
+
+ static sk_sp<SkTypeface_AndroidSystem> find_family_style_character(
+ const SkTArray<NameToFamily, true>& fallbackNameToFamilyMap,
+ const SkFontStyle& style, bool elegant,
+ const SkString& langTag, SkUnichar character)
+ {
+ for (int i = 0; i < fallbackNameToFamilyMap.count(); ++i) {
+ SkFontStyleSet_Android* family = fallbackNameToFamilyMap[i].styleSet;
+ sk_sp<SkTypeface_AndroidSystem> face(family->matchStyle(style));
+
+ if (!langTag.isEmpty() && !face->fLang.getTag().startsWith(langTag.c_str())) {
+ continue;
+ }
+
+ if (SkToBool(face->fVariantStyle & kElegant_FontVariant) != elegant) {
+ continue;
+ }
+
+ SkPaint paint;
+ paint.setTypeface(face);
+ paint.setTextEncoding(SkPaint::kUTF32_TextEncoding);
+
+ uint16_t glyphID;
+ paint.textToGlyphs(&character, sizeof(character), &glyphID);
+ if (glyphID != 0) {
+ return face;
+ }
+ }
+ return nullptr;
+ }
+
+ virtual SkTypeface* onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[],
+ int bcp47Count,
+ SkUnichar character) const override
+ {
+ // The variant 'elegant' is 'not squashed', 'compact' is 'stays in ascent/descent'.
+ // The variant 'default' means 'compact and elegant'.
+ // As a result, it is not possible to know the variant context from the font alone.
+ // TODO: add 'is_elegant' and 'is_compact' bits to 'style' request.
+
+ // The first time match anything elegant, second time anything not elegant.
+ for (int elegant = 2; elegant --> 0;) {
+ for (int bcp47Index = bcp47Count; bcp47Index --> 0;) {
+ SkLanguage lang(bcp47[bcp47Index]);
+ while (!lang.getTag().isEmpty()) {
+ sk_sp<SkTypeface_AndroidSystem> matchingTypeface =
+ find_family_style_character(fFallbackNameToFamilyMap,
+ style, SkToBool(elegant),
+ lang.getTag(), character);
+ if (matchingTypeface) {
+ return matchingTypeface.release();
+ }
+
+ lang = lang.getParent();
+ }
+ }
+ sk_sp<SkTypeface_AndroidSystem> matchingTypeface =
+ find_family_style_character(fFallbackNameToFamilyMap,
+ style, SkToBool(elegant),
+ SkString(), character);
+ if (matchingTypeface) {
+ return matchingTypeface.release();
+ }
+ }
+ return nullptr;
+ }
+
+ SkTypeface* onCreateFromData(SkData* data, int ttcIndex) const override {
+ return this->createFromStream(new SkMemoryStream(sk_ref_sp(data)), ttcIndex);
+ }
+
+ SkTypeface* onCreateFromFile(const char path[], int ttcIndex) const override {
+ std::unique_ptr<SkStreamAsset> stream = SkStream::MakeFromFile(path);
+ return stream.get() ? this->createFromStream(stream.release(), ttcIndex) : nullptr;
+ }
+
+ SkTypeface* onCreateFromStream(SkStreamAsset* bareStream, int ttcIndex) const override {
+ std::unique_ptr<SkStreamAsset> stream(bareStream);
+ bool isFixedPitch;
+ SkFontStyle style;
+ SkString name;
+ if (!fScanner.scanFont(stream.get(), ttcIndex, &name, &style, &isFixedPitch, nullptr)) {
+ return nullptr;
+ }
+ auto data = skstd::make_unique<SkFontData>(std::move(stream), ttcIndex, nullptr, 0);
+ return new SkTypeface_AndroidStream(std::move(data), style, isFixedPitch, name);
+ }
+
+ SkTypeface* onCreateFromStream(SkStreamAsset* s, const FontParameters& params) const override {
+ using Scanner = SkTypeface_FreeType::Scanner;
+ std::unique_ptr<SkStreamAsset> stream(s);
+ bool isFixedPitch;
+ SkFontStyle style;
+ SkString name;
+ Scanner::AxisDefinitions axisDefinitions;
+ if (!fScanner.scanFont(stream.get(), params.getCollectionIndex(),
+ &name, &style, &isFixedPitch, &axisDefinitions))
+ {
+ return nullptr;
+ }
+
+ int paramAxisCount;
+ const FontParameters::Axis* paramAxes = params.getAxes(&paramAxisCount);
+ SkAutoSTMalloc<4, SkFixed> axisValues(axisDefinitions.count());
+ Scanner::computeAxisValues(axisDefinitions, paramAxes, paramAxisCount, axisValues, name);
+
+ auto data = skstd::make_unique<SkFontData>(std::move(stream), params.getCollectionIndex(),
+ axisValues.get(), axisDefinitions.count());
+ return new SkTypeface_AndroidStream(std::move(data), style, isFixedPitch, name);
+ }
+
+ SkTypeface* onCreateFromFontData(std::unique_ptr<SkFontData> data) const override {
+ SkStreamAsset* stream(data->getStream());
+ bool isFixedPitch;
+ SkFontStyle style;
+ SkString name;
+ if (!fScanner.scanFont(stream, data->getIndex(), &name, &style, &isFixedPitch, nullptr)) {
+ return nullptr;
+ }
+ return new SkTypeface_AndroidStream(std::move(data), style, isFixedPitch, name);
+ }
+
+ SkTypeface* onLegacyCreateTypeface(const char familyName[], SkFontStyle style) const override {
+ if (familyName) {
+ // On Android, we must return nullptr when we can't find the requested
+ // named typeface so that the system/app can provide their own recovery
+ // mechanism. On other platforms we'd provide a typeface from the
+ // default family instead.
+ return this->onMatchFamilyStyle(familyName, style);
+ }
+ return fDefaultStyleSet->matchStyle(style);
+ }
+
+
+private:
+
+ SkTypeface_FreeType::Scanner fScanner;
+
+ SkTArray<sk_sp<SkFontStyleSet_Android>, true> fStyleSets;
+ sk_sp<SkFontStyleSet> fDefaultStyleSet;
+
+ SkTArray<NameToFamily, true> fNameToFamilyMap;
+ SkTArray<NameToFamily, true> fFallbackNameToFamilyMap;
+
+ void buildNameToFamilyMap(SkTDArray<FontFamily*> families, const bool isolated) {
+ for (int i = 0; i < families.count(); i++) {
+ FontFamily& family = *families[i];
+
+ SkTArray<NameToFamily, true>* nameToFamily = &fNameToFamilyMap;
+ if (family.fIsFallbackFont) {
+ nameToFamily = &fFallbackNameToFamilyMap;
+
+ if (0 == family.fNames.count()) {
+ SkString& fallbackName = family.fNames.push_back();
+ fallbackName.printf("%.2x##fallback", i);
+ }
+ }
+
+ sk_sp<SkFontStyleSet_Android> newSet =
+ sk_make_sp<SkFontStyleSet_Android>(family, fScanner, isolated);
+ if (0 == newSet->count()) {
+ continue;
+ }
+
+ for (const SkString& name : family.fNames) {
+ nameToFamily->emplace_back(NameToFamily{name, newSet.get()});
+ }
+ fStyleSets.emplace_back(std::move(newSet));
+ }
+ }
+
+ void findDefaultStyleSet() {
+ SkASSERT(!fStyleSets.empty());
+
+ static const char* defaultNames[] = { "sans-serif" };
+ for (const char* defaultName : defaultNames) {
+ fDefaultStyleSet.reset(this->onMatchFamily(defaultName));
+ if (fDefaultStyleSet) {
+ break;
+ }
+ }
+ if (nullptr == fDefaultStyleSet) {
+ fDefaultStyleSet = fStyleSets[0];
+ }
+ SkASSERT(fDefaultStyleSet);
+ }
+
+ typedef SkFontMgr INHERITED;
+};
+
+#ifdef SK_DEBUG
+static char const * const gSystemFontUseStrings[] = {
+ "OnlyCustom", "PreferCustom", "PreferSystem"
+};
+#endif
+SkFontMgr* SkFontMgr_New_Android(const SkFontMgr_Android_CustomFonts* custom) {
+ if (custom) {
+ SkASSERT(0 <= custom->fSystemFontUse);
+ SkASSERT(custom->fSystemFontUse < SK_ARRAY_COUNT(gSystemFontUseStrings));
+ SkDEBUGF(("SystemFontUse: %s BasePath: %s Fonts: %s FallbackFonts: %s\n",
+ gSystemFontUseStrings[custom->fSystemFontUse],
+ custom->fBasePath,
+ custom->fFontsXml,
+ custom->fFallbackFontsXml));
+ }
+
+ return new SkFontMgr_Android(custom);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_android_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_android_factory.cpp
new file mode 100644
index 000000000..ce39b2cf9
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_android_factory.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_ANDROID)
+
+#include "SkFontMgr.h"
+#include "SkFontMgr_android.h"
+
+// For test only.
+static const char* gTestFontsXml = nullptr;
+static const char* gTestFallbackFontsXml = nullptr;
+static const char* gTestBasePath = nullptr;
+
+void SkUseTestFontConfigFile(const char* fontsXml, const char* fallbackFontsXml,
+ const char* basePath)
+{
+ gTestFontsXml = fontsXml;
+ gTestFallbackFontsXml = fallbackFontsXml;
+ gTestBasePath = basePath;
+ SkASSERT(gTestFontsXml);
+ SkASSERT(gTestFallbackFontsXml);
+ SkASSERT(gTestBasePath);
+ SkDEBUGF(("Test BasePath: %s Fonts: %s FallbackFonts: %s\n",
+ gTestBasePath, gTestFontsXml, gTestFallbackFontsXml));
+}
+
+SkFontMgr* SkFontMgr::Factory() {
+ // These globals exist so that Chromium can override the environment.
+ // TODO: these globals need to be removed, and Chromium use SkFontMgr_New_Android instead.
+ if ((gTestFontsXml || gTestFallbackFontsXml) && gTestBasePath) {
+ SkFontMgr_Android_CustomFonts custom = {
+ SkFontMgr_Android_CustomFonts::kOnlyCustom,
+ gTestBasePath,
+ gTestFontsXml,
+ gTestFallbackFontsXml,
+ false /* fIsolated */
+ };
+ return SkFontMgr_New_Android(&custom);
+ }
+
+ return SkFontMgr_New_Android(nullptr);
+}
+
+#endif//defined(SK_BUILD_FOR_ANDROID)
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_android_parser.cpp b/gfx/skia/skia/src/ports/SkFontMgr_android_parser.cpp
new file mode 100644
index 000000000..306c6ffdb
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_android_parser.cpp
@@ -0,0 +1,801 @@
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Despite the name and location, this is portable code.
+
+#include "SkFixed.h"
+#include "SkFontMgr.h"
+#include "SkFontMgr_android_parser.h"
+#include "SkOSFile.h"
+#include "SkStream.h"
+#include "SkTDArray.h"
+#include "SkTSearch.h"
+#include "SkTemplates.h"
+#include "SkTLogic.h"
+
+#include <expat.h>
+
+#include <stdlib.h>
+#include <string.h>
+
+#define LMP_SYSTEM_FONTS_FILE "/system/etc/fonts.xml"
+#define OLD_SYSTEM_FONTS_FILE "/system/etc/system_fonts.xml"
+#define FALLBACK_FONTS_FILE "/system/etc/fallback_fonts.xml"
+#define VENDOR_FONTS_FILE "/vendor/etc/fallback_fonts.xml"
+
+#define LOCALE_FALLBACK_FONTS_SYSTEM_DIR "/system/etc"
+#define LOCALE_FALLBACK_FONTS_VENDOR_DIR "/vendor/etc"
+#define LOCALE_FALLBACK_FONTS_PREFIX "fallback_fonts-"
+#define LOCALE_FALLBACK_FONTS_SUFFIX ".xml"
+
+#ifndef SK_FONT_FILE_PREFIX
+# define SK_FONT_FILE_PREFIX "/fonts/"
+#endif
+
+/**
+ * This file contains TWO 'familyset' handlers:
+ * One for JB and earlier which works with
+ * /system/etc/system_fonts.xml
+ * /system/etc/fallback_fonts.xml
+ * /vendor/etc/fallback_fonts.xml
+ * /system/etc/fallback_fonts-XX.xml
+ * /vendor/etc/fallback_fonts-XX.xml
+ * and the other for LMP and later which works with
+ * /system/etc/fonts.xml
+ *
+ * If the 'familyset' 'version' attribute is 21 or higher the LMP parser is used, otherwise the JB.
+ */
+
+struct FamilyData;
+
+struct TagHandler {
+ /** Called at the start tag.
+ * Called immediately after the parent tag retuns this handler from a call to 'tag'.
+ * Allows setting up for handling the tag content and processing attributes.
+ * If nullptr, will not be called.
+ */
+ void (*start)(FamilyData* data, const char* tag, const char** attributes);
+
+ /** Called at the end tag.
+ * Allows post-processing of any accumulated information.
+ * This will be the last call made in relation to the current tag.
+ * If nullptr, will not be called.
+ */
+ void (*end)(FamilyData* data, const char* tag);
+
+ /** Called when a nested tag is encountered.
+ * This is responsible for determining how to handle the tag.
+ * If the tag is not recognized, return nullptr to skip the tag.
+ * If nullptr, all nested tags will be skipped.
+ */
+ const TagHandler* (*tag)(FamilyData* data, const char* tag, const char** attributes);
+
+ /** The character handler for this tag.
+ * This is only active for character data contained directly in this tag (not sub-tags).
+ * The first parameter will be castable to a FamilyData*.
+ * If nullptr, any character data in this tag will be ignored.
+ */
+ XML_CharacterDataHandler chars;
+};
+
+/** Represents the current parsing state. */
+struct FamilyData {
+ FamilyData(XML_Parser parser, SkTDArray<FontFamily*>& families,
+ const SkString& basePath, bool isFallback, const char* filename,
+ const TagHandler* topLevelHandler)
+ : fParser(parser)
+ , fFamilies(families)
+ , fCurrentFamily(nullptr)
+ , fCurrentFontInfo(nullptr)
+ , fVersion(0)
+ , fBasePath(basePath)
+ , fIsFallback(isFallback)
+ , fFilename(filename)
+ , fDepth(1)
+ , fSkip(0)
+ , fHandler(&topLevelHandler, 1)
+ { }
+
+ XML_Parser fParser; // The expat parser doing the work, owned by caller
+ SkTDArray<FontFamily*>& fFamilies; // The array to append families, owned by caller
+ SkAutoTDelete<FontFamily> fCurrentFamily; // The family being created, owned by this
+ FontFileInfo* fCurrentFontInfo; // The fontInfo being created, owned by fCurrentFamily
+ int fVersion; // The version of the file parsed.
+ const SkString& fBasePath; // The current base path.
+ const bool fIsFallback; // Indicates the file being parsed is a fallback file
+ const char* fFilename; // The name of the file currently being parsed.
+
+ int fDepth; // The current element depth of the parse.
+ int fSkip; // The depth to stop skipping, 0 if not skipping.
+ SkTDArray<const TagHandler*> fHandler; // The stack of current tag handlers.
+};
+
+static bool memeq(const char* s1, const char* s2, size_t n1, size_t n2) {
+ return n1 == n2 && 0 == memcmp(s1, s2, n1);
+}
+#define MEMEQ(c, s, n) memeq(c, s, sizeof(c) - 1, n)
+
+#define ATTS_NON_NULL(a, i) (a[i] != nullptr && a[i+1] != nullptr)
+
+#define SK_FONTMGR_ANDROID_PARSER_PREFIX "[SkFontMgr Android Parser] "
+
+#define SK_FONTCONFIGPARSER_WARNING(message, ...) SkDebugf( \
+ SK_FONTMGR_ANDROID_PARSER_PREFIX "%s:%d:%d: warning: " message "\n", \
+ self->fFilename, \
+ XML_GetCurrentLineNumber(self->fParser), \
+ XML_GetCurrentColumnNumber(self->fParser), \
+ ##__VA_ARGS__);
+
+static bool is_whitespace(char c) {
+ return c == ' ' || c == '\n'|| c == '\r' || c == '\t';
+}
+
+static void trim_string(SkString* s) {
+ char* str = s->writable_str();
+ const char* start = str; // start is inclusive
+ const char* end = start + s->size(); // end is exclusive
+ while (is_whitespace(*start)) { ++start; }
+ if (start != end) {
+ --end; // make end inclusive
+ while (is_whitespace(*end)) { --end; }
+ ++end; // make end exclusive
+ }
+ size_t len = end - start;
+ memmove(str, start, len);
+ s->resize(len);
+}
+
+namespace lmpParser {
+
+static const TagHandler axisHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ FontFileInfo& file = *self->fCurrentFontInfo;
+ SkFourByteTag axisTag = SkSetFourByteTag('\0','\0','\0','\0');
+ SkFixed axisStyleValue = 0;
+ bool axisTagIsValid = false;
+ bool axisStyleValueIsValid = false;
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* name = attributes[i];
+ const char* value = attributes[i+1];
+ size_t nameLen = strlen(name);
+ if (MEMEQ("tag", name, nameLen)) {
+ size_t valueLen = strlen(value);
+ if (valueLen == 4) {
+ axisTag = SkSetFourByteTag(value[0], value[1], value[2], value[3]);
+ axisTagIsValid = true;
+ for (int j = 0; j < file.fAxes.count() - 1; ++j) {
+ if (file.fAxes[j].fTag == axisTag) {
+ axisTagIsValid = false;
+ SK_FONTCONFIGPARSER_WARNING("'%c%c%c%c' axis specified more than once",
+ (axisTag >> 24) & 0xFF,
+ (axisTag >> 16) & 0xFF,
+ (axisTag >> 8) & 0xFF,
+ (axisTag ) & 0xFF);
+ }
+ }
+ } else {
+ SK_FONTCONFIGPARSER_WARNING("'%s' is an invalid axis tag", value);
+ }
+ } else if (MEMEQ("stylevalue", name, nameLen)) {
+ if (parse_fixed<16>(value, &axisStyleValue)) {
+ axisStyleValueIsValid = true;
+ } else {
+ SK_FONTCONFIGPARSER_WARNING("'%s' is an invalid axis stylevalue", value);
+ }
+ }
+ }
+ if (axisTagIsValid && axisStyleValueIsValid) {
+ SkFontMgr::FontParameters::Axis& axis = file.fAxes.push_back();
+ axis.fTag = axisTag;
+ axis.fStyleValue = SkFixedToScalar(axisStyleValue);
+ }
+ },
+ /*end*/nullptr,
+ /*tag*/nullptr,
+ /*chars*/nullptr,
+};
+
+static const TagHandler fontHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ // 'weight' (non-negative integer) [default 0]
+ // 'style' ("normal", "italic") [default "auto"]
+ // 'index' (non-negative integer) [default 0]
+ // The character data should be a filename.
+ FontFileInfo& file = self->fCurrentFamily->fFonts.push_back();
+ self->fCurrentFontInfo = &file;
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* name = attributes[i];
+ const char* value = attributes[i+1];
+ size_t nameLen = strlen(name);
+ if (MEMEQ("weight", name, nameLen)) {
+ if (!parse_non_negative_integer(value, &file.fWeight)) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' is an invalid weight", value);
+ }
+ } else if (MEMEQ("style", name, nameLen)) {
+ size_t valueLen = strlen(value);
+ if (MEMEQ("normal", value, valueLen)) {
+ file.fStyle = FontFileInfo::Style::kNormal;
+ } else if (MEMEQ("italic", value, valueLen)) {
+ file.fStyle = FontFileInfo::Style::kItalic;
+ }
+ } else if (MEMEQ("index", name, nameLen)) {
+ if (!parse_non_negative_integer(value, &file.fIndex)) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' is an invalid index", value);
+ }
+ }
+ }
+ },
+ /*end*/[](FamilyData* self, const char* tag) {
+ trim_string(&self->fCurrentFontInfo->fFileName);
+ },
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("axis", tag, len)) {
+ return &axisHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/[](void* data, const char* s, int len) {
+ FamilyData* self = static_cast<FamilyData*>(data);
+ self->fCurrentFontInfo->fFileName.append(s, len);
+ }
+};
+
+static const TagHandler familyHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ // 'name' (string) [optional]
+ // 'lang' (string) [default ""]
+ // 'variant' ("elegant", "compact") [default "default"]
+ // If there is no name, this is a fallback only font.
+ FontFamily* family = new FontFamily(self->fBasePath, true);
+ self->fCurrentFamily.reset(family);
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* name = attributes[i];
+ const char* value = attributes[i+1];
+ size_t nameLen = strlen(name);
+ size_t valueLen = strlen(value);
+ if (MEMEQ("name", name, nameLen)) {
+ SkAutoAsciiToLC tolc(value);
+ family->fNames.push_back().set(tolc.lc());
+ family->fIsFallbackFont = false;
+ } else if (MEMEQ("lang", name, nameLen)) {
+ family->fLanguage = SkLanguage(value, valueLen);
+ } else if (MEMEQ("variant", name, nameLen)) {
+ if (MEMEQ("elegant", value, valueLen)) {
+ family->fVariant = kElegant_FontVariant;
+ } else if (MEMEQ("compact", value, valueLen)) {
+ family->fVariant = kCompact_FontVariant;
+ }
+ }
+ }
+ },
+ /*end*/[](FamilyData* self, const char* tag) {
+ *self->fFamilies.append() = self->fCurrentFamily.release();
+ },
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("font", tag, len)) {
+ return &fontHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+static FontFamily* find_family(FamilyData* self, const SkString& familyName) {
+ for (int i = 0; i < self->fFamilies.count(); i++) {
+ FontFamily* candidate = self->fFamilies[i];
+ for (int j = 0; j < candidate->fNames.count(); j++) {
+ if (candidate->fNames[j] == familyName) {
+ return candidate;
+ }
+ }
+ }
+ return nullptr;
+}
+
+static const TagHandler aliasHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ // 'name' (string) introduces a new family name.
+ // 'to' (string) specifies which (previous) family to alias
+ // 'weight' (non-negative integer) [optional]
+ // If it *does not* have a weight, 'name' is an alias for the entire 'to' family.
+ // If it *does* have a weight, 'name' is a new family consisting of
+ // the font(s) with 'weight' from the 'to' family.
+
+ SkString aliasName;
+ SkString to;
+ int weight = 0;
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* name = attributes[i];
+ const char* value = attributes[i+1];
+ size_t nameLen = strlen(name);
+ if (MEMEQ("name", name, nameLen)) {
+ SkAutoAsciiToLC tolc(value);
+ aliasName.set(tolc.lc());
+ } else if (MEMEQ("to", name, nameLen)) {
+ to.set(value);
+ } else if (MEMEQ("weight", name, nameLen)) {
+ if (!parse_non_negative_integer(value, &weight)) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' is an invalid weight", value);
+ }
+ }
+ }
+
+ // Assumes that the named family is already declared
+ FontFamily* targetFamily = find_family(self, to);
+ if (!targetFamily) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' alias target not found", to.c_str());
+ return;
+ }
+
+ if (weight) {
+ FontFamily* family = new FontFamily(targetFamily->fBasePath, self->fIsFallback);
+ family->fNames.push_back().set(aliasName);
+
+ for (int i = 0; i < targetFamily->fFonts.count(); i++) {
+ if (targetFamily->fFonts[i].fWeight == weight) {
+ family->fFonts.push_back(targetFamily->fFonts[i]);
+ }
+ }
+ *self->fFamilies.append() = family;
+ } else {
+ targetFamily->fNames.push_back().set(aliasName);
+ }
+ },
+ /*end*/nullptr,
+ /*tag*/nullptr,
+ /*chars*/nullptr,
+};
+
+static const TagHandler familySetHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) { },
+ /*end*/nullptr,
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("family", tag, len)) {
+ return &familyHandler;
+ } else if (MEMEQ("alias", tag, len)) {
+ return &aliasHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+} // lmpParser
+
+namespace jbParser {
+
+static const TagHandler fileHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ // 'variant' ("elegant", "compact") [default "default"]
+ // 'lang' (string) [default ""]
+ // 'index' (non-negative integer) [default 0]
+ // The character data should be a filename.
+ FontFamily& currentFamily = *self->fCurrentFamily.get();
+ FontFileInfo& newFileInfo = currentFamily.fFonts.push_back();
+ if (attributes) {
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* name = attributes[i];
+ const char* value = attributes[i+1];
+ size_t nameLen = strlen(name);
+ size_t valueLen = strlen(value);
+ if (MEMEQ("variant", name, nameLen)) {
+ const FontVariant prevVariant = currentFamily.fVariant;
+ if (MEMEQ("elegant", value, valueLen)) {
+ currentFamily.fVariant = kElegant_FontVariant;
+ } else if (MEMEQ("compact", value, valueLen)) {
+ currentFamily.fVariant = kCompact_FontVariant;
+ }
+ if (currentFamily.fFonts.count() > 1 && currentFamily.fVariant != prevVariant) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' unexpected variant found\n"
+ "Note: Every font file within a family must have identical variants.",
+ value);
+ }
+
+ } else if (MEMEQ("lang", name, nameLen)) {
+ SkLanguage prevLang = currentFamily.fLanguage;
+ currentFamily.fLanguage = SkLanguage(value, valueLen);
+ if (currentFamily.fFonts.count() > 1 && currentFamily.fLanguage != prevLang) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' unexpected language found\n"
+ "Note: Every font file within a family must have identical languages.",
+ value);
+ }
+
+ } else if (MEMEQ("index", name, nameLen)) {
+ if (!parse_non_negative_integer(value, &newFileInfo.fIndex)) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' is an invalid index", value);
+ }
+ }
+ }
+ }
+ self->fCurrentFontInfo = &newFileInfo;
+ },
+ /*end*/nullptr,
+ /*tag*/nullptr,
+ /*chars*/[](void* data, const char* s, int len) {
+ FamilyData* self = static_cast<FamilyData*>(data);
+ self->fCurrentFontInfo->fFileName.append(s, len);
+ }
+};
+
+static const TagHandler fileSetHandler = {
+ /*start*/nullptr,
+ /*end*/nullptr,
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("file", tag, len)) {
+ return &fileHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+static const TagHandler nameHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ // The character data should be a name for the font.
+ self->fCurrentFamily->fNames.push_back();
+ },
+ /*end*/nullptr,
+ /*tag*/nullptr,
+ /*chars*/[](void* data, const char* s, int len) {
+ FamilyData* self = static_cast<FamilyData*>(data);
+ SkAutoAsciiToLC tolc(s, len);
+ self->fCurrentFamily->fNames.back().append(tolc.lc(), len);
+ }
+};
+
+static const TagHandler nameSetHandler = {
+ /*start*/nullptr,
+ /*end*/nullptr,
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("name", tag, len)) {
+ return &nameHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+static const TagHandler familyHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ self->fCurrentFamily.reset(new FontFamily(self->fBasePath, self->fIsFallback));
+ // 'order' (non-negative integer) [default -1]
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* value = attributes[i+1];
+ parse_non_negative_integer(value, &self->fCurrentFamily->fOrder);
+ }
+ },
+ /*end*/[](FamilyData* self, const char* tag) {
+ *self->fFamilies.append() = self->fCurrentFamily.release();
+ },
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("nameset", tag, len)) {
+ return &nameSetHandler;
+ } else if (MEMEQ("fileset", tag, len)) {
+ return &fileSetHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+static const TagHandler familySetHandler = {
+ /*start*/nullptr,
+ /*end*/nullptr,
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("family", tag, len)) {
+ return &familyHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+} // namespace jbParser
+
+static const TagHandler topLevelHandler = {
+ /*start*/nullptr,
+ /*end*/nullptr,
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("familyset", tag, len)) {
+ // 'version' (non-negative integer) [default 0]
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* name = attributes[i];
+ size_t nameLen = strlen(name);
+ if (MEMEQ("version", name, nameLen)) {
+ const char* value = attributes[i+1];
+ if (parse_non_negative_integer(value, &self->fVersion)) {
+ if (self->fVersion >= 21) {
+ return &lmpParser::familySetHandler;
+ }
+ }
+ }
+ }
+ return &jbParser::familySetHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+static void XMLCALL start_element_handler(void *data, const char *tag, const char **attributes) {
+ FamilyData* self = static_cast<FamilyData*>(data);
+
+ if (!self->fSkip) {
+ const TagHandler* parent = self->fHandler.top();
+ const TagHandler* child = parent->tag ? parent->tag(self, tag, attributes) : nullptr;
+ if (child) {
+ if (child->start) {
+ child->start(self, tag, attributes);
+ }
+ self->fHandler.push(child);
+ XML_SetCharacterDataHandler(self->fParser, child->chars);
+ } else {
+ SK_FONTCONFIGPARSER_WARNING("'%s' tag not recognized, skipping", tag);
+ XML_SetCharacterDataHandler(self->fParser, nullptr);
+ self->fSkip = self->fDepth;
+ }
+ }
+
+ ++self->fDepth;
+}
+
+static void XMLCALL end_element_handler(void* data, const char* tag) {
+ FamilyData* self = static_cast<FamilyData*>(data);
+ --self->fDepth;
+
+ if (!self->fSkip) {
+ const TagHandler* child = self->fHandler.top();
+ if (child->end) {
+ child->end(self, tag);
+ }
+ self->fHandler.pop();
+ const TagHandler* parent = self->fHandler.top();
+ XML_SetCharacterDataHandler(self->fParser, parent->chars);
+ }
+
+ if (self->fSkip == self->fDepth) {
+ self->fSkip = 0;
+ const TagHandler* parent = self->fHandler.top();
+ XML_SetCharacterDataHandler(self->fParser, parent->chars);
+ }
+}
+
+static void XMLCALL xml_entity_decl_handler(void *data,
+ const XML_Char *entityName,
+ int is_parameter_entity,
+ const XML_Char *value,
+ int value_length,
+ const XML_Char *base,
+ const XML_Char *systemId,
+ const XML_Char *publicId,
+ const XML_Char *notationName)
+{
+ FamilyData* self = static_cast<FamilyData*>(data);
+ SK_FONTCONFIGPARSER_WARNING("'%s' entity declaration found, stopping processing", entityName);
+ XML_StopParser(self->fParser, XML_FALSE);
+}
+
+static const XML_Memory_Handling_Suite sk_XML_alloc = {
+ sk_malloc_throw,
+ sk_realloc_throw,
+ sk_free
+};
+
+/**
+ * This function parses the given filename and stores the results in the given
+ * families array. Returns the version of the file, negative if the file does not exist.
+ */
+static int parse_config_file(const char* filename, SkTDArray<FontFamily*>& families,
+ const SkString& basePath, bool isFallback)
+{
+ SkFILEStream file(filename);
+
+ // Some of the files we attempt to parse (in particular, /vendor/etc/fallback_fonts.xml)
+ // are optional - failure here is okay because one of these optional files may not exist.
+ if (!file.isValid()) {
+ SkDebugf(SK_FONTMGR_ANDROID_PARSER_PREFIX "'%s' could not be opened\n", filename);
+ return -1;
+ }
+
+ SkAutoTCallVProc<skstd::remove_pointer_t<XML_Parser>, XML_ParserFree> parser(
+ XML_ParserCreate_MM(nullptr, &sk_XML_alloc, nullptr));
+ if (!parser) {
+ SkDebugf(SK_FONTMGR_ANDROID_PARSER_PREFIX "could not create XML parser\n");
+ return -1;
+ }
+
+ FamilyData self(parser, families, basePath, isFallback, filename, &topLevelHandler);
+ XML_SetUserData(parser, &self);
+
+ // Disable entity processing, to inhibit internal entity expansion. See expat CVE-2013-0340
+ XML_SetEntityDeclHandler(parser, xml_entity_decl_handler);
+
+ // Start parsing oldschool; switch these in flight if we detect a newer version of the file.
+ XML_SetElementHandler(parser, start_element_handler, end_element_handler);
+
+ // One would assume it would be faster to have a buffer on the stack and call XML_Parse.
+ // But XML_Parse will call XML_GetBuffer anyway and memmove the passed buffer into it.
+ // (Unless XML_CONTEXT_BYTES is undefined, but all users define it.)
+ // In debug, buffer a small odd number of bytes to detect slicing in XML_CharacterDataHandler.
+ static const int bufferSize = 512 SkDEBUGCODE( - 507);
+ bool done = false;
+ while (!done) {
+ void* buffer = XML_GetBuffer(parser, bufferSize);
+ if (!buffer) {
+ SkDebugf(SK_FONTMGR_ANDROID_PARSER_PREFIX "could not buffer enough to continue\n");
+ return -1;
+ }
+ size_t len = file.read(buffer, bufferSize);
+ done = file.isAtEnd();
+ XML_Status status = XML_ParseBuffer(parser, len, done);
+ if (XML_STATUS_ERROR == status) {
+ XML_Error error = XML_GetErrorCode(parser);
+ int line = XML_GetCurrentLineNumber(parser);
+ int column = XML_GetCurrentColumnNumber(parser);
+ const XML_LChar* errorString = XML_ErrorString(error);
+ SkDebugf(SK_FONTMGR_ANDROID_PARSER_PREFIX "%s:%d:%d error %d: %s.\n",
+ filename, line, column, error, errorString);
+ return -1;
+ }
+ }
+ return self.fVersion;
+}
+
+/** Returns the version of the system font file actually found, negative if none. */
+static int append_system_font_families(SkTDArray<FontFamily*>& fontFamilies,
+ const SkString& basePath)
+{
+ int initialCount = fontFamilies.count();
+ int version = parse_config_file(LMP_SYSTEM_FONTS_FILE, fontFamilies, basePath, false);
+ if (version < 0 || fontFamilies.count() == initialCount) {
+ version = parse_config_file(OLD_SYSTEM_FONTS_FILE, fontFamilies, basePath, false);
+ }
+ return version;
+}
+
+/**
+ * In some versions of Android prior to Android 4.2 (JellyBean MR1 at API
+ * Level 17) the fallback fonts for certain locales were encoded in their own
+ * XML files with a suffix that identified the locale. We search the provided
+ * directory for those files,add all of their entries to the fallback chain, and
+ * include the locale as part of each entry.
+ */
+static void append_fallback_font_families_for_locale(SkTDArray<FontFamily*>& fallbackFonts,
+ const char* dir,
+ const SkString& basePath)
+{
+ SkOSFile::Iter iter(dir, nullptr);
+ SkString fileName;
+ while (iter.next(&fileName, false)) {
+ // The size of the prefix and suffix.
+ static const size_t fixedLen = sizeof(LOCALE_FALLBACK_FONTS_PREFIX) - 1
+ + sizeof(LOCALE_FALLBACK_FONTS_SUFFIX) - 1;
+
+ // The size of the prefix, suffix, and a minimum valid language code
+ static const size_t minSize = fixedLen + 2;
+
+ if (fileName.size() < minSize ||
+ !fileName.startsWith(LOCALE_FALLBACK_FONTS_PREFIX) ||
+ !fileName.endsWith(LOCALE_FALLBACK_FONTS_SUFFIX))
+ {
+ continue;
+ }
+
+ SkString locale(fileName.c_str() + sizeof(LOCALE_FALLBACK_FONTS_PREFIX) - 1,
+ fileName.size() - fixedLen);
+
+ SkString absoluteFilename;
+ absoluteFilename.printf("%s/%s", dir, fileName.c_str());
+
+ SkTDArray<FontFamily*> langSpecificFonts;
+ parse_config_file(absoluteFilename.c_str(), langSpecificFonts, basePath, true);
+
+ for (int i = 0; i < langSpecificFonts.count(); ++i) {
+ FontFamily* family = langSpecificFonts[i];
+ family->fLanguage = SkLanguage(locale);
+ *fallbackFonts.append() = family;
+ }
+ }
+}
+
+static void append_system_fallback_font_families(SkTDArray<FontFamily*>& fallbackFonts,
+ const SkString& basePath)
+{
+ parse_config_file(FALLBACK_FONTS_FILE, fallbackFonts, basePath, true);
+ append_fallback_font_families_for_locale(fallbackFonts,
+ LOCALE_FALLBACK_FONTS_SYSTEM_DIR,
+ basePath);
+}
+
+static void mixin_vendor_fallback_font_families(SkTDArray<FontFamily*>& fallbackFonts,
+ const SkString& basePath)
+{
+ SkTDArray<FontFamily*> vendorFonts;
+ parse_config_file(VENDOR_FONTS_FILE, vendorFonts, basePath, true);
+ append_fallback_font_families_for_locale(vendorFonts,
+ LOCALE_FALLBACK_FONTS_VENDOR_DIR,
+ basePath);
+
+ // This loop inserts the vendor fallback fonts in the correct order in the
+ // overall fallbacks list.
+ int currentOrder = -1;
+ for (int i = 0; i < vendorFonts.count(); ++i) {
+ FontFamily* family = vendorFonts[i];
+ int order = family->fOrder;
+ if (order < 0) {
+ if (currentOrder < 0) {
+ // Default case - just add it to the end of the fallback list
+ *fallbackFonts.append() = family;
+ } else {
+ // no order specified on this font, but we're incrementing the order
+ // based on an earlier order insertion request
+ *fallbackFonts.insert(currentOrder++) = family;
+ }
+ } else {
+ // Add the font into the fallback list in the specified order. Set
+ // currentOrder for correct placement of other fonts in the vendor list.
+ *fallbackFonts.insert(order) = family;
+ currentOrder = order + 1;
+ }
+ }
+}
+
+void SkFontMgr_Android_Parser::GetSystemFontFamilies(SkTDArray<FontFamily*>& fontFamilies) {
+ // Version 21 of the system font configuration does not need any fallback configuration files.
+ SkString basePath(getenv("ANDROID_ROOT"));
+ basePath.append(SK_FONT_FILE_PREFIX, sizeof(SK_FONT_FILE_PREFIX) - 1);
+
+ if (append_system_font_families(fontFamilies, basePath) >= 21) {
+ return;
+ }
+
+ // Append all the fallback fonts to system fonts
+ SkTDArray<FontFamily*> fallbackFonts;
+ append_system_fallback_font_families(fallbackFonts, basePath);
+ mixin_vendor_fallback_font_families(fallbackFonts, basePath);
+ fontFamilies.append(fallbackFonts.count(), fallbackFonts.begin());
+}
+
+void SkFontMgr_Android_Parser::GetCustomFontFamilies(SkTDArray<FontFamily*>& fontFamilies,
+ const SkString& basePath,
+ const char* fontsXml,
+ const char* fallbackFontsXml,
+ const char* langFallbackFontsDir)
+{
+ if (fontsXml) {
+ parse_config_file(fontsXml, fontFamilies, basePath, false);
+ }
+ if (fallbackFontsXml) {
+ parse_config_file(fallbackFontsXml, fontFamilies, basePath, true);
+ }
+ if (langFallbackFontsDir) {
+ append_fallback_font_families_for_locale(fontFamilies,
+ langFallbackFontsDir,
+ basePath);
+ }
+}
+
+SkLanguage SkLanguage::getParent() const {
+ SkASSERT(!fTag.isEmpty());
+ const char* tag = fTag.c_str();
+
+ // strip off the rightmost "-.*"
+ const char* parentTagEnd = strrchr(tag, '-');
+ if (parentTagEnd == nullptr) {
+ return SkLanguage();
+ }
+ size_t parentTagLen = parentTagEnd - tag;
+ return SkLanguage(tag, parentTagLen);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_android_parser.h b/gfx/skia/skia/src/ports/SkFontMgr_android_parser.h
new file mode 100644
index 000000000..efd8144f1
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_android_parser.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_android_parser_DEFINED
+#define SkFontMgr_android_parser_DEFINED
+
+#include "SkFontMgr.h"
+#include "SkString.h"
+#include "SkTArray.h"
+#include "SkTDArray.h"
+#include "SkTypes.h"
+
+#include <climits>
+#include <limits>
+
+/** \class SkLanguage
+
+ The SkLanguage class represents a human written language, and is used by
+ text draw operations to determine which glyph to draw when drawing
+ characters with variants (ie Han-derived characters).
+*/
+class SkLanguage {
+public:
+ SkLanguage() { }
+ SkLanguage(const SkString& tag) : fTag(tag) { }
+ SkLanguage(const char* tag) : fTag(tag) { }
+ SkLanguage(const char* tag, size_t len) : fTag(tag, len) { }
+ SkLanguage(const SkLanguage& b) : fTag(b.fTag) { }
+
+ /** Gets a BCP 47 language identifier for this SkLanguage.
+ @return a BCP 47 language identifier representing this language
+ */
+ const SkString& getTag() const { return fTag; }
+
+ /** Performs BCP 47 fallback to return an SkLanguage one step more general.
+ @return an SkLanguage one step more general
+ */
+ SkLanguage getParent() const;
+
+ bool operator==(const SkLanguage& b) const {
+ return fTag == b.fTag;
+ }
+ bool operator!=(const SkLanguage& b) const {
+ return fTag != b.fTag;
+ }
+ SkLanguage& operator=(const SkLanguage& b) {
+ fTag = b.fTag;
+ return *this;
+ }
+
+private:
+ //! BCP 47 language identifier
+ SkString fTag;
+};
+
+enum FontVariants {
+ kDefault_FontVariant = 0x01,
+ kCompact_FontVariant = 0x02,
+ kElegant_FontVariant = 0x04,
+ kLast_FontVariant = kElegant_FontVariant,
+};
+typedef uint32_t FontVariant;
+
+// Must remain trivially movable (can be memmoved).
+struct FontFileInfo {
+ FontFileInfo() : fIndex(0), fWeight(0), fStyle(Style::kAuto) { }
+
+ SkString fFileName;
+ int fIndex;
+ int fWeight;
+ enum class Style { kAuto, kNormal, kItalic } fStyle;
+ SkTArray<SkFontMgr::FontParameters::Axis, true> fAxes;
+};
+
+/**
+ * A font family provides one or more names for a collection of fonts, each of
+ * which has a different style (normal, italic) or weight (thin, light, bold,
+ * etc).
+ * Some fonts may occur in compact variants for use in the user interface.
+ * Android distinguishes "fallback" fonts to support non-ASCII character sets.
+ */
+struct FontFamily {
+ FontFamily(const SkString& basePath, bool isFallbackFont)
+ : fVariant(kDefault_FontVariant)
+ , fOrder(-1)
+ , fIsFallbackFont(isFallbackFont)
+ , fBasePath(basePath)
+ { }
+
+ SkTArray<SkString, true> fNames;
+ SkTArray<FontFileInfo, true> fFonts;
+ SkLanguage fLanguage;
+ FontVariant fVariant;
+ int fOrder; // internal to the parser, not useful to users.
+ bool fIsFallbackFont;
+ const SkString fBasePath;
+};
+
+namespace SkFontMgr_Android_Parser {
+
+/** Parses system font configuration files and appends result to fontFamilies. */
+void GetSystemFontFamilies(SkTDArray<FontFamily*>& fontFamilies);
+
+/** Parses font configuration files and appends result to fontFamilies. */
+void GetCustomFontFamilies(SkTDArray<FontFamily*>& fontFamilies,
+ const SkString& basePath,
+ const char* fontsXml,
+ const char* fallbackFontsXml,
+ const char* langFallbackFontsDir = nullptr);
+
+} // SkFontMgr_Android_Parser namespace
+
+
+/** Parses a null terminated string into an integer type, checking for overflow.
+ * http://www.w3.org/TR/html-markup/datatypes.html#common.data.integer.non-negative-def
+ *
+ * If the string cannot be parsed into 'value', returns false and does not change 'value'.
+ */
+template <typename T> static bool parse_non_negative_integer(const char* s, T* value) {
+ static_assert(std::numeric_limits<T>::is_integer, "T_must_be_integer");
+
+ if (*s == '\0') {
+ return false;
+ }
+
+ const T nMax = std::numeric_limits<T>::max() / 10;
+ const T dMax = std::numeric_limits<T>::max() - (nMax * 10);
+ T n = 0;
+ for (; *s; ++s) {
+ // Check if digit
+ if (*s < '0' || '9' < *s) {
+ return false;
+ }
+ T d = *s - '0';
+ // Check for overflow
+ if (n > nMax || (n == nMax && d > dMax)) {
+ return false;
+ }
+ n = (n * 10) + d;
+ }
+ *value = n;
+ return true;
+}
+
+/** Parses a null terminated string into a signed fixed point value with bias N.
+ *
+ * Like http://www.w3.org/TR/html-markup/datatypes.html#common.data.float-def ,
+ * but may start with '.' and does not support 'e'. '-?((:digit:+(.:digit:+)?)|(.:digit:+))'
+ *
+ * Checks for overflow.
+ * Low bit rounding is not defined (is currently truncate).
+ * Bias (N) required to allow for the sign bit and 4 bits of integer.
+ *
+ * If the string cannot be parsed into 'value', returns false and does not change 'value'.
+ */
+template <int N, typename T> static bool parse_fixed(const char* s, T* value) {
+ static_assert(std::numeric_limits<T>::is_integer, "T_must_be_integer");
+ static_assert(std::numeric_limits<T>::is_signed, "T_must_be_signed");
+ static_assert(sizeof(T) * CHAR_BIT - N >= 5, "N_must_leave_four_bits_plus_sign");
+
+ bool negate = false;
+ if (*s == '-') {
+ ++s;
+ negate = true;
+ }
+ if (*s == '\0') {
+ return false;
+ }
+
+ const T nMax = (std::numeric_limits<T>::max() >> N) / 10;
+ const T dMax = (std::numeric_limits<T>::max() >> N) - (nMax * 10);
+ T n = 0;
+ T frac = 0;
+ for (; *s; ++s) {
+ // Check if digit
+ if (*s < '0' || '9' < *s) {
+ // If it wasn't a digit, check if it is a '.' followed by something.
+ if (*s != '.' || s[1] == '\0') {
+ return false;
+ }
+ // Find the end, verify digits.
+ for (++s; *s; ++s) {
+ if (*s < '0' || '9' < *s) {
+ return false;
+ }
+ }
+ // Read back toward the '.'.
+ for (--s; *s != '.'; --s) {
+ T d = *s - '0';
+ frac = (frac + (d << N)) / 10; // This requires four bits overhead.
+ }
+ break;
+ }
+ T d = *s - '0';
+ // Check for overflow
+ if (n > nMax || (n == nMax && d > dMax)) {
+ return false;
+ }
+ n = (n * 10) + d;
+ }
+ if (negate) {
+ n = -n;
+ frac = -frac;
+ }
+ *value = SkLeftShift(n, N) + frac;
+ return true;
+}
+
+#endif /* SkFontMgr_android_parser_DEFINED */
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_custom.cpp b/gfx/skia/skia/src/ports/SkFontMgr_custom.cpp
new file mode 100644
index 000000000..9a8aa4946
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_custom.cpp
@@ -0,0 +1,522 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFontDescriptor.h"
+#include "SkFontHost_FreeType_common.h"
+#include "SkFontMgr.h"
+#include "SkFontMgr_custom.h"
+#include "SkFontStyle.h"
+#include "SkMakeUnique.h"
+#include "SkOSFile.h"
+#include "SkRefCnt.h"
+#include "SkStream.h"
+#include "SkString.h"
+#include "SkTArray.h"
+#include "SkTemplates.h"
+#include "SkTypeface.h"
+#include "SkTypefaceCache.h"
+#include "SkTypes.h"
+
+#include <limits>
+#include <memory>
+
+class SkData;
+
+/** The base SkTypeface implementation for the custom font manager. */
+class SkTypeface_Custom : public SkTypeface_FreeType {
+public:
+ SkTypeface_Custom(const SkFontStyle& style, bool isFixedPitch,
+ bool sysFont, const SkString familyName, int index)
+ : INHERITED(style, isFixedPitch)
+ , fIsSysFont(sysFont), fFamilyName(familyName), fIndex(index)
+ { }
+
+ bool isSysFont() const { return fIsSysFont; }
+
+protected:
+ void onGetFamilyName(SkString* familyName) const override {
+ *familyName = fFamilyName;
+ }
+
+ void onGetFontDescriptor(SkFontDescriptor* desc, bool* isLocal) const override {
+ desc->setFamilyName(fFamilyName.c_str());
+ desc->setStyle(this->fontStyle());
+ *isLocal = !this->isSysFont();
+ }
+
+ int getIndex() const { return fIndex; }
+
+private:
+ const bool fIsSysFont;
+ const SkString fFamilyName;
+ const int fIndex;
+
+ typedef SkTypeface_FreeType INHERITED;
+};
+
+/** The empty SkTypeface implementation for the custom font manager.
+ * Used as the last resort fallback typeface.
+ */
+class SkTypeface_Empty : public SkTypeface_Custom {
+public:
+ SkTypeface_Empty() : INHERITED(SkFontStyle(), false, true, SkString(), 0) {}
+
+protected:
+ SkStreamAsset* onOpenStream(int*) const override { return nullptr; }
+
+private:
+ typedef SkTypeface_Custom INHERITED;
+};
+
+/** The stream SkTypeface implementation for the custom font manager. */
+class SkTypeface_Stream : public SkTypeface_Custom {
+public:
+ SkTypeface_Stream(std::unique_ptr<SkFontData> fontData,
+ const SkFontStyle& style, bool isFixedPitch, bool sysFont,
+ const SkString familyName)
+ : INHERITED(style, isFixedPitch, sysFont, familyName, fontData->getIndex())
+ , fData(std::move(fontData))
+ { }
+
+protected:
+ SkStreamAsset* onOpenStream(int* ttcIndex) const override {
+ *ttcIndex = fData->getIndex();
+ return fData->getStream()->duplicate();
+ }
+
+ std::unique_ptr<SkFontData> onMakeFontData() const override {
+ return skstd::make_unique<SkFontData>(*fData);
+ }
+
+private:
+ const std::unique_ptr<const SkFontData> fData;
+
+ typedef SkTypeface_Custom INHERITED;
+};
+
+/** The file SkTypeface implementation for the custom font manager. */
+class SkTypeface_File : public SkTypeface_Custom {
+public:
+ SkTypeface_File(const SkFontStyle& style, bool isFixedPitch, bool sysFont,
+ const SkString familyName, const char path[], int index)
+ : INHERITED(style, isFixedPitch, sysFont, familyName, index)
+ , fPath(path)
+ { }
+
+protected:
+ SkStreamAsset* onOpenStream(int* ttcIndex) const override {
+ *ttcIndex = this->getIndex();
+ return SkStream::MakeFromFile(fPath.c_str()).release();
+ }
+
+private:
+ SkString fPath;
+
+ typedef SkTypeface_Custom INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * SkFontStyleSet_Custom
+ *
+ * This class is used by SkFontMgr_Custom to hold SkTypeface_Custom families.
+ */
+class SkFontStyleSet_Custom : public SkFontStyleSet {
+public:
+ explicit SkFontStyleSet_Custom(const SkString familyName) : fFamilyName(familyName) { }
+
+ /** Should only be called during the inital build phase. */
+ void appendTypeface(sk_sp<SkTypeface_Custom> typeface) {
+ fStyles.emplace_back(std::move(typeface));
+ }
+
+ int count() override {
+ return fStyles.count();
+ }
+
+ void getStyle(int index, SkFontStyle* style, SkString* name) override {
+ SkASSERT(index < fStyles.count());
+ if (style) {
+ *style = fStyles[index]->fontStyle();
+ }
+ if (name) {
+ name->reset();
+ }
+ }
+
+ SkTypeface* createTypeface(int index) override {
+ SkASSERT(index < fStyles.count());
+ return SkRef(fStyles[index].get());
+ }
+
+ SkTypeface* matchStyle(const SkFontStyle& pattern) override {
+ return this->matchStyleCSS3(pattern);
+ }
+
+ SkString getFamilyName() { return fFamilyName; }
+
+private:
+ SkTArray<sk_sp<SkTypeface_Custom>> fStyles;
+ SkString fFamilyName;
+
+ friend class SkFontMgr_Custom;
+};
+
+/**
+ * SkFontMgr_Custom
+ *
+ * This class is essentially a collection of SkFontStyleSet_Custom,
+ * one SkFontStyleSet_Custom for each family. This class may be modified
+ * to load fonts from any source by changing the initialization.
+ */
+class SkFontMgr_Custom : public SkFontMgr {
+public:
+ typedef SkTArray<sk_sp<SkFontStyleSet_Custom>> Families;
+ class SystemFontLoader {
+ public:
+ virtual ~SystemFontLoader() { }
+ virtual void loadSystemFonts(const SkTypeface_FreeType::Scanner&, Families*) const = 0;
+ };
+ explicit SkFontMgr_Custom(const SystemFontLoader& loader) : fDefaultFamily(nullptr) {
+ loader.loadSystemFonts(fScanner, &fFamilies);
+
+ // Try to pick a default font.
+ static const char* defaultNames[] = {
+ "Arial", "Verdana", "Times New Roman", "Droid Sans", nullptr
+ };
+ for (size_t i = 0; i < SK_ARRAY_COUNT(defaultNames); ++i) {
+ sk_sp<SkFontStyleSet_Custom> set(this->onMatchFamily(defaultNames[i]));
+ if (nullptr == set) {
+ continue;
+ }
+
+ sk_sp<SkTypeface> tf(set->matchStyle(SkFontStyle(SkFontStyle::kNormal_Weight,
+ SkFontStyle::kNormal_Width,
+ SkFontStyle::kUpright_Slant)));
+ if (nullptr == tf) {
+ continue;
+ }
+
+ fDefaultFamily = set.get();
+ break;
+ }
+ if (nullptr == fDefaultFamily) {
+ fDefaultFamily = fFamilies[0].get();
+ }
+ }
+
+protected:
+ int onCountFamilies() const override {
+ return fFamilies.count();
+ }
+
+ void onGetFamilyName(int index, SkString* familyName) const override {
+ SkASSERT(index < fFamilies.count());
+ familyName->set(fFamilies[index]->getFamilyName());
+ }
+
+ SkFontStyleSet_Custom* onCreateStyleSet(int index) const override {
+ SkASSERT(index < fFamilies.count());
+ return SkRef(fFamilies[index].get());
+ }
+
+ SkFontStyleSet_Custom* onMatchFamily(const char familyName[]) const override {
+ for (int i = 0; i < fFamilies.count(); ++i) {
+ if (fFamilies[i]->getFamilyName().equals(familyName)) {
+ return SkRef(fFamilies[i].get());
+ }
+ }
+ return nullptr;
+ }
+
+ SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontStyle) const override
+ {
+ SkAutoTUnref<SkFontStyleSet> sset(this->matchFamily(familyName));
+ return sset->matchStyle(fontStyle);
+ }
+
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override
+ {
+ return nullptr;
+ }
+
+ SkTypeface* onMatchFaceStyle(const SkTypeface* familyMember,
+ const SkFontStyle& fontStyle) const override
+ {
+ for (int i = 0; i < fFamilies.count(); ++i) {
+ for (int j = 0; j < fFamilies[i]->fStyles.count(); ++j) {
+ if (fFamilies[i]->fStyles[j].get() == familyMember) {
+ return fFamilies[i]->matchStyle(fontStyle);
+ }
+ }
+ }
+ return nullptr;
+ }
+
+ SkTypeface* onCreateFromData(SkData* data, int ttcIndex) const override {
+ return this->createFromStream(new SkMemoryStream(sk_ref_sp(data)), ttcIndex);
+ }
+
+ SkTypeface* onCreateFromStream(SkStreamAsset* bareStream, int ttcIndex) const override {
+ return this->createFromStream(bareStream, FontParameters().setCollectionIndex(ttcIndex));
+ }
+
+ SkTypeface* onCreateFromStream(SkStreamAsset* s, const FontParameters& params) const override {
+ using Scanner = SkTypeface_FreeType::Scanner;
+ std::unique_ptr<SkStreamAsset> stream(s);
+ bool isFixedPitch;
+ SkFontStyle style;
+ SkString name;
+ Scanner::AxisDefinitions axisDefinitions;
+ if (!fScanner.scanFont(stream.get(), params.getCollectionIndex(),
+ &name, &style, &isFixedPitch, &axisDefinitions))
+ {
+ return nullptr;
+ }
+
+ int paramAxisCount;
+ const FontParameters::Axis* paramAxes = params.getAxes(&paramAxisCount);
+ SkAutoSTMalloc<4, SkFixed> axisValues(axisDefinitions.count());
+ Scanner::computeAxisValues(axisDefinitions, paramAxes, paramAxisCount, axisValues, name);
+
+ auto data = skstd::make_unique<SkFontData>(std::move(stream), params.getCollectionIndex(),
+ axisValues.get(), axisDefinitions.count());
+ return new SkTypeface_Stream(std::move(data), style, isFixedPitch, false, name);
+ }
+
+ SkTypeface* onCreateFromFontData(std::unique_ptr<SkFontData> data) const override {
+ bool isFixedPitch;
+ SkFontStyle style;
+ SkString name;
+ if (!fScanner.scanFont(data->getStream(), data->getIndex(),
+ &name, &style, &isFixedPitch, nullptr))
+ {
+ return nullptr;
+ }
+ return new SkTypeface_Stream(std::move(data), style, isFixedPitch, false, name);
+ }
+
+ SkTypeface* onCreateFromFile(const char path[], int ttcIndex) const override {
+ std::unique_ptr<SkStreamAsset> stream = SkStream::MakeFromFile(path);
+ return stream.get() ? this->createFromStream(stream.release(), ttcIndex) : nullptr;
+ }
+
+ SkTypeface* onLegacyCreateTypeface(const char familyName[], SkFontStyle style) const override {
+ SkTypeface* tf = nullptr;
+
+ if (familyName) {
+ tf = this->onMatchFamilyStyle(familyName, style);
+ }
+
+ if (nullptr == tf) {
+ tf = fDefaultFamily->matchStyle(style);
+ }
+
+ return tf;
+ }
+
+private:
+ Families fFamilies;
+ SkFontStyleSet_Custom* fDefaultFamily;
+ SkTypeface_FreeType::Scanner fScanner;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class DirectorySystemFontLoader : public SkFontMgr_Custom::SystemFontLoader {
+public:
+ DirectorySystemFontLoader(const char* dir) : fBaseDirectory(dir) { }
+
+ void loadSystemFonts(const SkTypeface_FreeType::Scanner& scanner,
+ SkFontMgr_Custom::Families* families) const override
+ {
+ load_directory_fonts(scanner, fBaseDirectory, ".ttf", families);
+ load_directory_fonts(scanner, fBaseDirectory, ".ttc", families);
+ load_directory_fonts(scanner, fBaseDirectory, ".otf", families);
+ load_directory_fonts(scanner, fBaseDirectory, ".pfb", families);
+
+ if (families->empty()) {
+ SkFontStyleSet_Custom* family = new SkFontStyleSet_Custom(SkString());
+ families->push_back().reset(family);
+ family->appendTypeface(sk_make_sp<SkTypeface_Empty>());
+ }
+ }
+
+private:
+ static SkFontStyleSet_Custom* find_family(SkFontMgr_Custom::Families& families,
+ const char familyName[])
+ {
+ for (int i = 0; i < families.count(); ++i) {
+ if (families[i]->getFamilyName().equals(familyName)) {
+ return families[i].get();
+ }
+ }
+ return nullptr;
+ }
+
+ static void load_directory_fonts(const SkTypeface_FreeType::Scanner& scanner,
+ const SkString& directory, const char* suffix,
+ SkFontMgr_Custom::Families* families)
+ {
+ SkOSFile::Iter iter(directory.c_str(), suffix);
+ SkString name;
+
+ while (iter.next(&name, false)) {
+ SkString filename(SkOSPath::Join(directory.c_str(), name.c_str()));
+ std::unique_ptr<SkStreamAsset> stream = SkStream::MakeFromFile(filename.c_str());
+ if (!stream) {
+ SkDebugf("---- failed to open <%s>\n", filename.c_str());
+ continue;
+ }
+
+ int numFaces;
+ if (!scanner.recognizedFont(stream.get(), &numFaces)) {
+ SkDebugf("---- failed to open <%s> as a font\n", filename.c_str());
+ continue;
+ }
+
+ for (int faceIndex = 0; faceIndex < numFaces; ++faceIndex) {
+ bool isFixedPitch;
+ SkString realname;
+ SkFontStyle style = SkFontStyle(); // avoid uninitialized warning
+ if (!scanner.scanFont(stream.get(), faceIndex,
+ &realname, &style, &isFixedPitch, nullptr))
+ {
+ SkDebugf("---- failed to open <%s> <%d> as a font\n",
+ filename.c_str(), faceIndex);
+ continue;
+ }
+
+ SkFontStyleSet_Custom* addTo = find_family(*families, realname.c_str());
+ if (nullptr == addTo) {
+ addTo = new SkFontStyleSet_Custom(realname);
+ families->push_back().reset(addTo);
+ }
+ addTo->appendTypeface(sk_make_sp<SkTypeface_File>(style, isFixedPitch, true,
+ realname, filename.c_str(),
+ faceIndex));
+ }
+ }
+
+ SkOSFile::Iter dirIter(directory.c_str());
+ while (dirIter.next(&name, true)) {
+ if (name.startsWith(".")) {
+ continue;
+ }
+ SkString dirname(SkOSPath::Join(directory.c_str(), name.c_str()));
+ load_directory_fonts(scanner, dirname, suffix, families);
+ }
+ }
+
+ SkString fBaseDirectory;
+};
+
+SK_API SkFontMgr* SkFontMgr_New_Custom_Directory(const char* dir) {
+ return new SkFontMgr_Custom(DirectorySystemFontLoader(dir));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct SkEmbeddedResource { const uint8_t* data; size_t size; };
+struct SkEmbeddedResourceHeader { const SkEmbeddedResource* entries; int count; };
+
+class EmbeddedSystemFontLoader : public SkFontMgr_Custom::SystemFontLoader {
+public:
+ EmbeddedSystemFontLoader(const SkEmbeddedResourceHeader* header) : fHeader(header) { }
+
+ void loadSystemFonts(const SkTypeface_FreeType::Scanner& scanner,
+ SkFontMgr_Custom::Families* families) const override
+ {
+ for (int i = 0; i < fHeader->count; ++i) {
+ const SkEmbeddedResource& fontEntry = fHeader->entries[i];
+ load_embedded_font(scanner, fontEntry.data, fontEntry.size, i, families);
+ }
+
+ if (families->empty()) {
+ SkFontStyleSet_Custom* family = new SkFontStyleSet_Custom(SkString());
+ families->push_back().reset(family);
+ family->appendTypeface(sk_make_sp<SkTypeface_Empty>());
+ }
+ }
+
+private:
+ static SkFontStyleSet_Custom* find_family(SkFontMgr_Custom::Families& families,
+ const char familyName[])
+ {
+ for (int i = 0; i < families.count(); ++i) {
+ if (families[i]->getFamilyName().equals(familyName)) {
+ return families[i].get();
+ }
+ }
+ return nullptr;
+ }
+
+ static void load_embedded_font(const SkTypeface_FreeType::Scanner& scanner,
+ const uint8_t* data, size_t size, int index,
+ SkFontMgr_Custom::Families* families)
+ {
+ auto stream = skstd::make_unique<SkMemoryStream>(data, size, false);
+
+ int numFaces;
+ if (!scanner.recognizedFont(stream.get(), &numFaces)) {
+ SkDebugf("---- failed to open <%d> as a font\n", index);
+ return;
+ }
+
+ for (int faceIndex = 0; faceIndex < numFaces; ++faceIndex) {
+ bool isFixedPitch;
+ SkString realname;
+ SkFontStyle style = SkFontStyle(); // avoid uninitialized warning
+ if (!scanner.scanFont(stream.get(), faceIndex,
+ &realname, &style, &isFixedPitch, nullptr))
+ {
+ SkDebugf("---- failed to open <%d> <%d> as a font\n", index, faceIndex);
+ return;
+ }
+
+ SkFontStyleSet_Custom* addTo = find_family(*families, realname.c_str());
+ if (nullptr == addTo) {
+ addTo = new SkFontStyleSet_Custom(realname);
+ families->push_back().reset(addTo);
+ }
+ auto data = skstd::make_unique<SkFontData>(std::move(stream), faceIndex, nullptr, 0);
+ addTo->appendTypeface(sk_make_sp<SkTypeface_Stream>(std::move(data),
+ style, isFixedPitch,
+ true, realname));
+ }
+ }
+
+ const SkEmbeddedResourceHeader* fHeader;
+};
+
+SkFontMgr* SkFontMgr_New_Custom_Embedded(const SkEmbeddedResourceHeader* header) {
+ return new SkFontMgr_Custom(EmbeddedSystemFontLoader(header));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class EmptyFontLoader : public SkFontMgr_Custom::SystemFontLoader {
+public:
+ EmptyFontLoader() { }
+
+ void loadSystemFonts(const SkTypeface_FreeType::Scanner& scanner,
+ SkFontMgr_Custom::Families* families) const override
+ {
+ SkFontStyleSet_Custom* family = new SkFontStyleSet_Custom(SkString());
+ families->push_back().reset(family);
+ family->appendTypeface(sk_make_sp<SkTypeface_Empty>());
+ }
+
+};
+
+SK_API SkFontMgr* SkFontMgr_New_Custom_Empty() {
+ return new SkFontMgr_Custom(EmptyFontLoader());
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_custom_directory_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_custom_directory_factory.cpp
new file mode 100644
index 000000000..0ca6f4b3b
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_custom_directory_factory.cpp
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFontMgr.h"
+#include "SkFontMgr_custom.h"
+
+#ifndef SK_FONT_FILE_PREFIX
+# define SK_FONT_FILE_PREFIX "/usr/share/fonts/"
+#endif
+
+SkFontMgr* SkFontMgr::Factory() {
+ return SkFontMgr_New_Custom_Directory(SK_FONT_FILE_PREFIX);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_custom_embedded_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_custom_embedded_factory.cpp
new file mode 100644
index 000000000..6ea6a2d2a
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_custom_embedded_factory.cpp
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFontMgr.h"
+
+struct SkEmbeddedResource { const uint8_t* data; size_t size; };
+struct SkEmbeddedResourceHeader { const SkEmbeddedResource* entries; int count; };
+SkFontMgr* SkFontMgr_New_Custom_Embedded(const SkEmbeddedResourceHeader* header);
+
+extern "C" const SkEmbeddedResourceHeader SK_EMBEDDED_FONTS;
+SkFontMgr* SkFontMgr::Factory() {
+ return SkFontMgr_New_Custom_Embedded(&SK_EMBEDDED_FONTS);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_custom_empty_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_custom_empty_factory.cpp
new file mode 100644
index 000000000..c9487cdcc
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_custom_empty_factory.cpp
@@ -0,0 +1,13 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFontMgr.h"
+#include "SkFontMgr_custom.h"
+
+SkFontMgr* SkFontMgr::Factory() {
+ return SkFontMgr_New_Custom_Empty();
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_empty_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_empty_factory.cpp
new file mode 100644
index 000000000..b4232cde1
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_empty_factory.cpp
@@ -0,0 +1,13 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFontMgr.h"
+
+SkFontMgr* SkFontMgr::Factory() {
+ // Always return nullptr, an empty SkFontMgr will be used.
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_fontconfig.cpp b/gfx/skia/skia/src/ports/SkFontMgr_fontconfig.cpp
new file mode 100644
index 000000000..1f0055870
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_fontconfig.cpp
@@ -0,0 +1,958 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAdvancedTypefaceMetrics.h"
+#include "SkDataTable.h"
+#include "SkFixed.h"
+#include "SkFontDescriptor.h"
+#include "SkFontHost_FreeType_common.h"
+#include "SkFontMgr.h"
+#include "SkFontStyle.h"
+#include "SkMakeUnique.h"
+#include "SkMath.h"
+#include "SkMutex.h"
+#include "SkOSFile.h"
+#include "SkRefCnt.h"
+#include "SkStream.h"
+#include "SkString.h"
+#include "SkTDArray.h"
+#include "SkTemplates.h"
+#include "SkTypeface.h"
+#include "SkTypefaceCache.h"
+#include "SkTypes.h"
+
+#include <fontconfig/fontconfig.h>
+#include <string.h>
+
+class SkData;
+
+// FC_POSTSCRIPT_NAME was added with b561ff20 which ended up in 2.10.92
+// Ubuntu 12.04 is on 2.8.0, 13.10 is on 2.10.93
+// Debian 7 is on 2.9.0, 8 is on 2.11
+// OpenSUSE 12.2 is on 2.9.0, 12.3 is on 2.10.2, 13.1 2.11.0
+// Fedora 19 is on 2.10.93
+#ifndef FC_POSTSCRIPT_NAME
+# define FC_POSTSCRIPT_NAME "postscriptname"
+#endif
+
+#ifdef SK_DEBUG
+# include "SkTLS.h"
+#endif
+
+/** Since FontConfig is poorly documented, this gives a high level overview:
+ *
+ * FcConfig is a handle to a FontConfig configuration instance. Each 'configuration' is independent
+ * from any others which may exist. There exists a default global configuration which is created
+ * and destroyed by FcInit and FcFini, but this default should not normally be used.
+ * Instead, one should use FcConfigCreate and FcInit* to have a named local state.
+ *
+ * FcPatterns are {objectName -> [element]} (maps from object names to a list of elements).
+ * Each element is some internal data plus an FcValue which is a variant (a union with a type tag).
+ * Lists of elements are not typed, except by convention. Any collection of FcValues must be
+ * assumed to be heterogeneous by the code, but the code need not do anything particularly
+ * interesting if the values go against convention.
+ *
+ * Somewhat like DirectWrite, FontConfig supports synthetics through FC_EMBOLDEN and FC_MATRIX.
+ * Like all synthetic information, such information must be passed with the font data.
+ */
+
+namespace {
+
+// Fontconfig is not threadsafe before 2.10.91. Before that, we lock with a global mutex.
+// See https://bug.skia.org/1497 for background.
+SK_DECLARE_STATIC_MUTEX(gFCMutex);
+
+#ifdef SK_DEBUG
+void* CreateThreadFcLocked() { return new bool(false); }
+void DeleteThreadFcLocked(void* v) { delete static_cast<bool*>(v); }
+# define THREAD_FC_LOCKED \
+ static_cast<bool*>(SkTLS::Get(CreateThreadFcLocked, DeleteThreadFcLocked))
+#endif
+
+struct FCLocker {
+ // Assume FcGetVersion() has always been thread safe.
+
+ FCLocker() {
+ if (FcGetVersion() < 21091) {
+ gFCMutex.acquire();
+ } else {
+ SkDEBUGCODE(bool* threadLocked = THREAD_FC_LOCKED);
+ SkASSERT(false == *threadLocked);
+ SkDEBUGCODE(*threadLocked = true);
+ }
+ }
+
+ ~FCLocker() {
+ AssertHeld();
+ if (FcGetVersion() < 21091) {
+ gFCMutex.release();
+ } else {
+ SkDEBUGCODE(*THREAD_FC_LOCKED = false);
+ }
+ }
+
+ static void AssertHeld() { SkDEBUGCODE(
+ if (FcGetVersion() < 21091) {
+ gFCMutex.assertHeld();
+ } else {
+ SkASSERT(true == *THREAD_FC_LOCKED);
+ }
+ ) }
+};
+
+} // namespace
+
+template<typename T, void (*D)(T*)> void FcTDestroy(T* t) {
+ FCLocker::AssertHeld();
+ D(t);
+}
+template <typename T, T* (*C)(), void (*D)(T*)> class SkAutoFc
+ : public SkAutoTCallVProc<T, FcTDestroy<T, D> > {
+public:
+ SkAutoFc() : SkAutoTCallVProc<T, FcTDestroy<T, D> >(C()) {
+ T* obj = this->operator T*();
+ SkASSERT_RELEASE(nullptr != obj);
+ }
+ explicit SkAutoFc(T* obj) : SkAutoTCallVProc<T, FcTDestroy<T, D> >(obj) {}
+};
+
+typedef SkAutoFc<FcCharSet, FcCharSetCreate, FcCharSetDestroy> SkAutoFcCharSet;
+typedef SkAutoFc<FcConfig, FcConfigCreate, FcConfigDestroy> SkAutoFcConfig;
+typedef SkAutoFc<FcFontSet, FcFontSetCreate, FcFontSetDestroy> SkAutoFcFontSet;
+typedef SkAutoFc<FcLangSet, FcLangSetCreate, FcLangSetDestroy> SkAutoFcLangSet;
+typedef SkAutoFc<FcObjectSet, FcObjectSetCreate, FcObjectSetDestroy> SkAutoFcObjectSet;
+typedef SkAutoFc<FcPattern, FcPatternCreate, FcPatternDestroy> SkAutoFcPattern;
+
+static bool get_bool(FcPattern* pattern, const char object[], bool missing = false) {
+ FcBool value;
+ if (FcPatternGetBool(pattern, object, 0, &value) != FcResultMatch) {
+ return missing;
+ }
+ return value;
+}
+
+static int get_int(FcPattern* pattern, const char object[], int missing) {
+ int value;
+ if (FcPatternGetInteger(pattern, object, 0, &value) != FcResultMatch) {
+ return missing;
+ }
+ return value;
+}
+
+static const char* get_string(FcPattern* pattern, const char object[], const char* missing = "") {
+ FcChar8* value;
+ if (FcPatternGetString(pattern, object, 0, &value) != FcResultMatch) {
+ return missing;
+ }
+ return (const char*)value;
+}
+
+static const FcMatrix* get_matrix(FcPattern* pattern, const char object[]) {
+ FcMatrix* matrix;
+ if (FcPatternGetMatrix(pattern, object, 0, &matrix) != FcResultMatch) {
+ return nullptr;
+ }
+ return matrix;
+}
+
+enum SkWeakReturn {
+ kIsWeak_WeakReturn,
+ kIsStrong_WeakReturn,
+ kNoId_WeakReturn
+};
+/** Ideally there would exist a call like
+ * FcResult FcPatternIsWeak(pattern, object, id, FcBool* isWeak);
+ *
+ * However, there is no such call and as of Fc 2.11.0 even FcPatternEquals ignores the weak bit.
+ * Currently, the only reliable way of finding the weak bit is by its effect on matching.
+ * The weak bit only affects the matching of FC_FAMILY and FC_POSTSCRIPT_NAME object values.
+ * A element with the weak bit is scored after FC_LANG, without the weak bit is scored before.
+ * Note that the weak bit is stored on the element, not on the value it holds.
+ */
+static SkWeakReturn is_weak(FcPattern* pattern, const char object[], int id) {
+ FCLocker::AssertHeld();
+
+ FcResult result;
+
+ // Create a copy of the pattern with only the value 'pattern'['object'['id']] in it.
+ // Internally, FontConfig pattern objects are linked lists, so faster to remove from head.
+ SkAutoFcObjectSet requestedObjectOnly(FcObjectSetBuild(object, nullptr));
+ SkAutoFcPattern minimal(FcPatternFilter(pattern, requestedObjectOnly));
+ FcBool hasId = true;
+ for (int i = 0; hasId && i < id; ++i) {
+ hasId = FcPatternRemove(minimal, object, 0);
+ }
+ if (!hasId) {
+ return kNoId_WeakReturn;
+ }
+ FcValue value;
+ result = FcPatternGet(minimal, object, 0, &value);
+ if (result != FcResultMatch) {
+ return kNoId_WeakReturn;
+ }
+ while (hasId) {
+ hasId = FcPatternRemove(minimal, object, 1);
+ }
+
+ // Create a font set with two patterns.
+ // 1. the same 'object' as minimal and a lang object with only 'nomatchlang'.
+ // 2. a different 'object' from minimal and a lang object with only 'matchlang'.
+ SkAutoFcFontSet fontSet;
+
+ SkAutoFcLangSet strongLangSet;
+ FcLangSetAdd(strongLangSet, (const FcChar8*)"nomatchlang");
+ SkAutoFcPattern strong(FcPatternDuplicate(minimal));
+ FcPatternAddLangSet(strong, FC_LANG, strongLangSet);
+
+ SkAutoFcLangSet weakLangSet;
+ FcLangSetAdd(weakLangSet, (const FcChar8*)"matchlang");
+ SkAutoFcPattern weak;
+ FcPatternAddString(weak, object, (const FcChar8*)"nomatchstring");
+ FcPatternAddLangSet(weak, FC_LANG, weakLangSet);
+
+ FcFontSetAdd(fontSet, strong.release());
+ FcFontSetAdd(fontSet, weak.release());
+
+ // Add 'matchlang' to the copy of the pattern.
+ FcPatternAddLangSet(minimal, FC_LANG, weakLangSet);
+
+ // Run a match against the copy of the pattern.
+ // If the 'id' was weak, then we should match the pattern with 'matchlang'.
+ // If the 'id' was strong, then we should match the pattern with 'nomatchlang'.
+
+ // Note that this config is only used for FcFontRenderPrepare, which we don't even want.
+ // However, there appears to be no way to match/sort without it.
+ SkAutoFcConfig config;
+ FcFontSet* fontSets[1] = { fontSet };
+ SkAutoFcPattern match(FcFontSetMatch(config, fontSets, SK_ARRAY_COUNT(fontSets),
+ minimal, &result));
+
+ FcLangSet* matchLangSet;
+ FcPatternGetLangSet(match, FC_LANG, 0, &matchLangSet);
+ return FcLangEqual == FcLangSetHasLang(matchLangSet, (const FcChar8*)"matchlang")
+ ? kIsWeak_WeakReturn : kIsStrong_WeakReturn;
+}
+
+/** Removes weak elements from either FC_FAMILY or FC_POSTSCRIPT_NAME objects in the property.
+ * This can be quite expensive, and should not be used more than once per font lookup.
+ * This removes all of the weak elements after the last strong element.
+ */
+static void remove_weak(FcPattern* pattern, const char object[]) {
+ FCLocker::AssertHeld();
+
+ SkAutoFcObjectSet requestedObjectOnly(FcObjectSetBuild(object, nullptr));
+ SkAutoFcPattern minimal(FcPatternFilter(pattern, requestedObjectOnly));
+
+ int lastStrongId = -1;
+ int numIds;
+ SkWeakReturn result;
+ for (int id = 0; ; ++id) {
+ result = is_weak(minimal, object, 0);
+ if (kNoId_WeakReturn == result) {
+ numIds = id;
+ break;
+ }
+ if (kIsStrong_WeakReturn == result) {
+ lastStrongId = id;
+ }
+ SkAssertResult(FcPatternRemove(minimal, object, 0));
+ }
+
+ // If they were all weak, then leave the pattern alone.
+ if (lastStrongId < 0) {
+ return;
+ }
+
+ // Remove everything after the last strong.
+ for (int id = lastStrongId + 1; id < numIds; ++id) {
+ SkAssertResult(FcPatternRemove(pattern, object, lastStrongId + 1));
+ }
+}
+
+static int map_range(SkFixed value,
+ SkFixed old_min, SkFixed old_max,
+ SkFixed new_min, SkFixed new_max)
+{
+ SkASSERT(old_min < old_max);
+ SkASSERT(new_min <= new_max);
+ return new_min + SkMulDiv(value - old_min, new_max - new_min, old_max - old_min);
+}
+
+struct MapRanges {
+ SkFixed old_val;
+ SkFixed new_val;
+};
+
+static SkFixed map_ranges_fixed(SkFixed val, MapRanges const ranges[], int rangesCount) {
+ // -Inf to [0]
+ if (val < ranges[0].old_val) {
+ return ranges[0].new_val;
+ }
+
+ // Linear from [i] to [i+1]
+ for (int i = 0; i < rangesCount - 1; ++i) {
+ if (val < ranges[i+1].old_val) {
+ return map_range(val, ranges[i].old_val, ranges[i+1].old_val,
+ ranges[i].new_val, ranges[i+1].new_val);
+ }
+ }
+
+ // From [n] to +Inf
+ // if (fcweight < Inf)
+ return ranges[rangesCount-1].new_val;
+}
+
+static int map_ranges(int val, MapRanges const ranges[], int rangesCount) {
+ return SkFixedRoundToInt(map_ranges_fixed(SkIntToFixed(val), ranges, rangesCount));
+}
+
+template<int n> struct SkTFixed {
+ static_assert(-32768 <= n && n <= 32767, "SkTFixed_n_not_in_range");
+ static const SkFixed value = static_cast<SkFixed>(n << 16);
+};
+
+static SkFontStyle skfontstyle_from_fcpattern(FcPattern* pattern) {
+ typedef SkFontStyle SkFS;
+
+ static const MapRanges weightRanges[] = {
+ { SkTFixed<FC_WEIGHT_THIN>::value, SkTFixed<SkFS::kThin_Weight>::value },
+ { SkTFixed<FC_WEIGHT_EXTRALIGHT>::value, SkTFixed<SkFS::kExtraLight_Weight>::value },
+ { SkTFixed<FC_WEIGHT_LIGHT>::value, SkTFixed<SkFS::kLight_Weight>::value },
+ { SkTFixed<FC_WEIGHT_REGULAR>::value, SkTFixed<SkFS::kNormal_Weight>::value },
+ { SkTFixed<FC_WEIGHT_MEDIUM>::value, SkTFixed<SkFS::kMedium_Weight>::value },
+ { SkTFixed<FC_WEIGHT_DEMIBOLD>::value, SkTFixed<SkFS::kSemiBold_Weight>::value },
+ { SkTFixed<FC_WEIGHT_BOLD>::value, SkTFixed<SkFS::kBold_Weight>::value },
+ { SkTFixed<FC_WEIGHT_EXTRABOLD>::value, SkTFixed<SkFS::kExtraBold_Weight>::value },
+ { SkTFixed<FC_WEIGHT_BLACK>::value, SkTFixed<SkFS::kBlack_Weight>::value },
+ { SkTFixed<FC_WEIGHT_EXTRABLACK>::value, SkTFixed<SkFS::kExtraBlack_Weight>::value },
+ };
+ int weight = map_ranges(get_int(pattern, FC_WEIGHT, FC_WEIGHT_REGULAR),
+ weightRanges, SK_ARRAY_COUNT(weightRanges));
+
+ static const MapRanges widthRanges[] = {
+ { SkTFixed<FC_WIDTH_ULTRACONDENSED>::value, SkTFixed<SkFS::kUltraCondensed_Width>::value },
+ { SkTFixed<FC_WIDTH_EXTRACONDENSED>::value, SkTFixed<SkFS::kExtraCondensed_Width>::value },
+ { SkTFixed<FC_WIDTH_CONDENSED>::value, SkTFixed<SkFS::kCondensed_Width>::value },
+ { SkTFixed<FC_WIDTH_SEMICONDENSED>::value, SkTFixed<SkFS::kSemiCondensed_Width>::value },
+ { SkTFixed<FC_WIDTH_NORMAL>::value, SkTFixed<SkFS::kNormal_Width>::value },
+ { SkTFixed<FC_WIDTH_SEMIEXPANDED>::value, SkTFixed<SkFS::kSemiExpanded_Width>::value },
+ { SkTFixed<FC_WIDTH_EXPANDED>::value, SkTFixed<SkFS::kExpanded_Width>::value },
+ { SkTFixed<FC_WIDTH_EXTRAEXPANDED>::value, SkTFixed<SkFS::kExtraExpanded_Width>::value },
+ { SkTFixed<FC_WIDTH_ULTRAEXPANDED>::value, SkTFixed<SkFS::kUltraExpanded_Width>::value },
+ };
+ int width = map_ranges(get_int(pattern, FC_WIDTH, FC_WIDTH_NORMAL),
+ widthRanges, SK_ARRAY_COUNT(widthRanges));
+
+ SkFS::Slant slant = SkFS::kUpright_Slant;
+ switch (get_int(pattern, FC_SLANT, FC_SLANT_ROMAN)) {
+ case FC_SLANT_ROMAN: slant = SkFS::kUpright_Slant; break;
+ case FC_SLANT_ITALIC : slant = SkFS::kItalic_Slant ; break;
+ case FC_SLANT_OBLIQUE: slant = SkFS::kOblique_Slant; break;
+ default: SkASSERT(false); break;
+ }
+
+ return SkFontStyle(weight, width, slant);
+}
+
+static void fcpattern_from_skfontstyle(SkFontStyle style, FcPattern* pattern) {
+ FCLocker::AssertHeld();
+
+ typedef SkFontStyle SkFS;
+
+ static const MapRanges weightRanges[] = {
+ { SkTFixed<SkFS::kThin_Weight>::value, SkTFixed<FC_WEIGHT_THIN>::value },
+ { SkTFixed<SkFS::kExtraLight_Weight>::value, SkTFixed<FC_WEIGHT_EXTRALIGHT>::value },
+ { SkTFixed<SkFS::kLight_Weight>::value, SkTFixed<FC_WEIGHT_LIGHT>::value },
+ { SkTFixed<SkFS::kNormal_Weight>::value, SkTFixed<FC_WEIGHT_REGULAR>::value },
+ { SkTFixed<SkFS::kMedium_Weight>::value, SkTFixed<FC_WEIGHT_MEDIUM>::value },
+ { SkTFixed<SkFS::kSemiBold_Weight>::value, SkTFixed<FC_WEIGHT_DEMIBOLD>::value },
+ { SkTFixed<SkFS::kBold_Weight>::value, SkTFixed<FC_WEIGHT_BOLD>::value },
+ { SkTFixed<SkFS::kExtraBold_Weight>::value, SkTFixed<FC_WEIGHT_EXTRABOLD>::value },
+ { SkTFixed<SkFS::kBlack_Weight>::value, SkTFixed<FC_WEIGHT_BLACK>::value },
+ { SkTFixed<SkFS::kExtraBlack_Weight>::value, SkTFixed<FC_WEIGHT_EXTRABLACK>::value },
+ };
+ int weight = map_ranges(style.weight(), weightRanges, SK_ARRAY_COUNT(weightRanges));
+
+ static const MapRanges widthRanges[] = {
+ { SkTFixed<SkFS::kUltraCondensed_Width>::value, SkTFixed<FC_WIDTH_ULTRACONDENSED>::value },
+ { SkTFixed<SkFS::kExtraCondensed_Width>::value, SkTFixed<FC_WIDTH_EXTRACONDENSED>::value },
+ { SkTFixed<SkFS::kCondensed_Width>::value, SkTFixed<FC_WIDTH_CONDENSED>::value },
+ { SkTFixed<SkFS::kSemiCondensed_Width>::value, SkTFixed<FC_WIDTH_SEMICONDENSED>::value },
+ { SkTFixed<SkFS::kNormal_Width>::value, SkTFixed<FC_WIDTH_NORMAL>::value },
+ { SkTFixed<SkFS::kSemiExpanded_Width>::value, SkTFixed<FC_WIDTH_SEMIEXPANDED>::value },
+ { SkTFixed<SkFS::kExpanded_Width>::value, SkTFixed<FC_WIDTH_EXPANDED>::value },
+ { SkTFixed<SkFS::kExtraExpanded_Width>::value, SkTFixed<FC_WIDTH_EXTRAEXPANDED>::value },
+ { SkTFixed<SkFS::kUltraExpanded_Width>::value, SkTFixed<FC_WIDTH_ULTRAEXPANDED>::value },
+ };
+ int width = map_ranges(style.width(), widthRanges, SK_ARRAY_COUNT(widthRanges));
+
+ int slant = FC_SLANT_ROMAN;
+ switch (style.slant()) {
+ case SkFS::kUpright_Slant: slant = FC_SLANT_ROMAN ; break;
+ case SkFS::kItalic_Slant : slant = FC_SLANT_ITALIC ; break;
+ case SkFS::kOblique_Slant: slant = FC_SLANT_OBLIQUE; break;
+ default: SkASSERT(false); break;
+ }
+
+ FcPatternAddInteger(pattern, FC_WEIGHT, weight);
+ FcPatternAddInteger(pattern, FC_WIDTH , width);
+ FcPatternAddInteger(pattern, FC_SLANT , slant);
+}
+
+class SkTypeface_stream : public SkTypeface_FreeType {
+public:
+ /** @param data takes ownership of the font data.*/
+ SkTypeface_stream(std::unique_ptr<SkFontData> data, const SkFontStyle& style, bool fixedWidth)
+ : INHERITED(style, fixedWidth)
+ , fData(std::move(data))
+ { }
+
+ void onGetFamilyName(SkString* familyName) const override {
+ familyName->reset();
+ }
+
+ void onGetFontDescriptor(SkFontDescriptor* desc, bool* serialize) const override {
+ *serialize = true;
+ }
+
+ SkStreamAsset* onOpenStream(int* ttcIndex) const override {
+ *ttcIndex = fData->getIndex();
+ return fData->getStream()->duplicate();
+ }
+
+ std::unique_ptr<SkFontData> onMakeFontData() const override {
+ return skstd::make_unique<SkFontData>(*fData);
+ }
+
+private:
+ const std::unique_ptr<const SkFontData> fData;
+
+ typedef SkTypeface_FreeType INHERITED;
+};
+
+class SkTypeface_fontconfig : public SkTypeface_FreeType {
+public:
+ /** @param pattern takes ownership of the reference. */
+ static SkTypeface_fontconfig* Create(FcPattern* pattern) {
+ return new SkTypeface_fontconfig(pattern);
+ }
+ mutable SkAutoFcPattern fPattern;
+
+ void onGetFamilyName(SkString* familyName) const override {
+ *familyName = get_string(fPattern, FC_FAMILY);
+ }
+
+ void onGetFontDescriptor(SkFontDescriptor* desc, bool* serialize) const override {
+ FCLocker lock;
+ desc->setFamilyName(get_string(fPattern, FC_FAMILY));
+ desc->setFullName(get_string(fPattern, FC_FULLNAME));
+ desc->setPostscriptName(get_string(fPattern, FC_POSTSCRIPT_NAME));
+ desc->setStyle(this->fontStyle());
+ *serialize = false;
+ }
+
+ SkStreamAsset* onOpenStream(int* ttcIndex) const override {
+ FCLocker lock;
+ *ttcIndex = get_int(fPattern, FC_INDEX, 0);
+ return SkStream::MakeFromFile(get_string(fPattern, FC_FILE)).release();
+ }
+
+ void onFilterRec(SkScalerContextRec* rec) const override {
+ const FcMatrix* fcMatrix = get_matrix(fPattern, FC_MATRIX);
+ if (fcMatrix) {
+ // fPost2x2 is column-major, left handed (y down).
+ // FcMatrix is column-major, right handed (y up).
+ SkMatrix fm;
+ fm.setAll(fcMatrix->xx,-fcMatrix->xy, 0,
+ -fcMatrix->yx, fcMatrix->yy, 0,
+ 0 , 0 , 1);
+
+ SkMatrix sm;
+ rec->getMatrixFrom2x2(&sm);
+
+ sm.preConcat(fm);
+ rec->fPost2x2[0][0] = sm.getScaleX();
+ rec->fPost2x2[0][1] = sm.getSkewX();
+ rec->fPost2x2[1][0] = sm.getSkewY();
+ rec->fPost2x2[1][1] = sm.getScaleY();
+ }
+ if (get_bool(fPattern, FC_EMBOLDEN)) {
+ rec->fFlags |= SkScalerContext::kEmbolden_Flag;
+ }
+ this->INHERITED::onFilterRec(rec);
+ }
+
+ SkAdvancedTypefaceMetrics* onGetAdvancedTypefaceMetrics(PerGlyphInfo perGlyphInfo,
+ const uint32_t* glyphIDs,
+ uint32_t glyphIDsCount) const override
+ {
+ SkAdvancedTypefaceMetrics* info =
+ this->INHERITED::onGetAdvancedTypefaceMetrics(perGlyphInfo, glyphIDs, glyphIDsCount);
+
+ // Simulated fonts shouldn't be considered to be of the type of their data.
+ if (get_matrix(fPattern, FC_MATRIX) || get_bool(fPattern, FC_EMBOLDEN)) {
+ info->fType = SkAdvancedTypefaceMetrics::kOther_Font;
+ }
+ return info;
+ }
+
+ virtual ~SkTypeface_fontconfig() {
+ // Hold the lock while unrefing the pattern.
+ FCLocker lock;
+ fPattern.reset();
+ }
+
+private:
+ /** @param pattern takes ownership of the reference. */
+ SkTypeface_fontconfig(FcPattern* pattern)
+ : INHERITED(skfontstyle_from_fcpattern(pattern),
+ FC_PROPORTIONAL != get_int(pattern, FC_SPACING, FC_PROPORTIONAL))
+ , fPattern(pattern)
+ { }
+
+ typedef SkTypeface_FreeType INHERITED;
+};
+
+class SkFontMgr_fontconfig : public SkFontMgr {
+ mutable SkAutoFcConfig fFC;
+ sk_sp<SkDataTable> fFamilyNames;
+ SkTypeface_FreeType::Scanner fScanner;
+
+ class StyleSet : public SkFontStyleSet {
+ public:
+ /** @param parent does not take ownership of the reference.
+ * @param fontSet takes ownership of the reference.
+ */
+ StyleSet(const SkFontMgr_fontconfig* parent, FcFontSet* fontSet)
+ : fFontMgr(SkRef(parent)), fFontSet(fontSet)
+ { }
+
+ virtual ~StyleSet() {
+ // Hold the lock while unrefing the font set.
+ FCLocker lock;
+ fFontSet.reset();
+ }
+
+ int count() override { return fFontSet->nfont; }
+
+ void getStyle(int index, SkFontStyle* style, SkString* styleName) override {
+ if (index < 0 || fFontSet->nfont <= index) {
+ return;
+ }
+
+ FCLocker lock;
+ if (style) {
+ *style = skfontstyle_from_fcpattern(fFontSet->fonts[index]);
+ }
+ if (styleName) {
+ *styleName = get_string(fFontSet->fonts[index], FC_STYLE);
+ }
+ }
+
+ SkTypeface* createTypeface(int index) override {
+ FCLocker lock;
+
+ FcPattern* match = fFontSet->fonts[index];
+ return fFontMgr->createTypefaceFromFcPattern(match);
+ }
+
+ SkTypeface* matchStyle(const SkFontStyle& style) override {
+ FCLocker lock;
+
+ SkAutoFcPattern pattern;
+ fcpattern_from_skfontstyle(style, pattern);
+ FcConfigSubstitute(fFontMgr->fFC, pattern, FcMatchPattern);
+ FcDefaultSubstitute(pattern);
+
+ FcResult result;
+ FcFontSet* fontSets[1] = { fFontSet };
+ SkAutoFcPattern match(FcFontSetMatch(fFontMgr->fFC,
+ fontSets, SK_ARRAY_COUNT(fontSets),
+ pattern, &result));
+ if (nullptr == match) {
+ return nullptr;
+ }
+
+ return fFontMgr->createTypefaceFromFcPattern(match);
+ }
+
+ private:
+ SkAutoTUnref<const SkFontMgr_fontconfig> fFontMgr;
+ SkAutoFcFontSet fFontSet;
+ };
+
+ static bool FindName(const SkTDArray<const char*>& list, const char* str) {
+ int count = list.count();
+ for (int i = 0; i < count; ++i) {
+ if (!strcmp(list[i], str)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ static sk_sp<SkDataTable> GetFamilyNames(FcConfig* fcconfig) {
+ FCLocker lock;
+
+ SkTDArray<const char*> names;
+ SkTDArray<size_t> sizes;
+
+ static const FcSetName fcNameSet[] = { FcSetSystem, FcSetApplication };
+ for (int setIndex = 0; setIndex < (int)SK_ARRAY_COUNT(fcNameSet); ++setIndex) {
+ // Return value of FcConfigGetFonts must not be destroyed.
+ FcFontSet* allFonts(FcConfigGetFonts(fcconfig, fcNameSet[setIndex]));
+ if (nullptr == allFonts) {
+ continue;
+ }
+
+ for (int fontIndex = 0; fontIndex < allFonts->nfont; ++fontIndex) {
+ FcPattern* current = allFonts->fonts[fontIndex];
+ for (int id = 0; ; ++id) {
+ FcChar8* fcFamilyName;
+ FcResult result = FcPatternGetString(current, FC_FAMILY, id, &fcFamilyName);
+ if (FcResultNoId == result) {
+ break;
+ }
+ if (FcResultMatch != result) {
+ continue;
+ }
+ const char* familyName = reinterpret_cast<const char*>(fcFamilyName);
+ if (familyName && !FindName(names, familyName)) {
+ *names.append() = familyName;
+ *sizes.append() = strlen(familyName) + 1;
+ }
+ }
+ }
+ }
+
+ return SkDataTable::MakeCopyArrays((void const *const *)names.begin(),
+ sizes.begin(), names.count());
+ }
+
+ static bool FindByFcPattern(SkTypeface* cached, void* ctx) {
+ SkTypeface_fontconfig* cshFace = static_cast<SkTypeface_fontconfig*>(cached);
+ FcPattern* ctxPattern = static_cast<FcPattern*>(ctx);
+ return FcTrue == FcPatternEqual(cshFace->fPattern, ctxPattern);
+ }
+
+ mutable SkMutex fTFCacheMutex;
+ mutable SkTypefaceCache fTFCache;
+ /** Creates a typeface using a typeface cache.
+ * @param pattern a complete pattern from FcFontRenderPrepare.
+ */
+ SkTypeface* createTypefaceFromFcPattern(FcPattern* pattern) const {
+ FCLocker::AssertHeld();
+ SkAutoMutexAcquire ama(fTFCacheMutex);
+ SkTypeface* face = fTFCache.findByProcAndRef(FindByFcPattern, pattern);
+ if (nullptr == face) {
+ FcPatternReference(pattern);
+ face = SkTypeface_fontconfig::Create(pattern);
+ if (face) {
+ fTFCache.add(face);
+ }
+ }
+ return face;
+ }
+
+public:
+ /** Takes control of the reference to 'config'. */
+ explicit SkFontMgr_fontconfig(FcConfig* config)
+ : fFC(config ? config : FcInitLoadConfigAndFonts())
+ , fFamilyNames(GetFamilyNames(fFC)) { }
+
+ virtual ~SkFontMgr_fontconfig() {
+ // Hold the lock while unrefing the config.
+ FCLocker lock;
+ fFC.reset();
+ }
+
+protected:
+ int onCountFamilies() const override {
+ return fFamilyNames->count();
+ }
+
+ void onGetFamilyName(int index, SkString* familyName) const override {
+ familyName->set(fFamilyNames->atStr(index));
+ }
+
+ SkFontStyleSet* onCreateStyleSet(int index) const override {
+ return this->onMatchFamily(fFamilyNames->atStr(index));
+ }
+
+ /** True if any string object value in the font is the same
+ * as a string object value in the pattern.
+ */
+ static bool AnyMatching(FcPattern* font, FcPattern* pattern, const char* object) {
+ FcChar8* fontString;
+ FcChar8* patternString;
+ FcResult result;
+ // Set an arbitrary limit on the number of pattern object values to consider.
+ // TODO: re-write this to avoid N*M
+ static const int maxId = 16;
+ for (int patternId = 0; patternId < maxId; ++patternId) {
+ result = FcPatternGetString(pattern, object, patternId, &patternString);
+ if (FcResultNoId == result) {
+ break;
+ }
+ if (FcResultMatch != result) {
+ continue;
+ }
+ for (int fontId = 0; fontId < maxId; ++fontId) {
+ result = FcPatternGetString(font, object, fontId, &fontString);
+ if (FcResultNoId == result) {
+ break;
+ }
+ if (FcResultMatch != result) {
+ continue;
+ }
+ if (0 == FcStrCmpIgnoreCase(patternString, fontString)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ static bool FontAccessible(FcPattern* font) {
+ // FontConfig can return fonts which are unreadable.
+ const char* filename = get_string(font, FC_FILE, nullptr);
+ if (nullptr == filename) {
+ return false;
+ }
+ return sk_exists(filename, kRead_SkFILE_Flag);
+ }
+
+ static bool FontFamilyNameMatches(FcPattern* font, FcPattern* pattern) {
+ return AnyMatching(font, pattern, FC_FAMILY);
+ }
+
+ static bool FontContainsCharacter(FcPattern* font, uint32_t character) {
+ FcResult result;
+ FcCharSet* matchCharSet;
+ for (int charSetId = 0; ; ++charSetId) {
+ result = FcPatternGetCharSet(font, FC_CHARSET, charSetId, &matchCharSet);
+ if (FcResultNoId == result) {
+ break;
+ }
+ if (FcResultMatch != result) {
+ continue;
+ }
+ if (FcCharSetHasChar(matchCharSet, character)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override {
+ FCLocker lock;
+
+ SkAutoFcPattern pattern;
+ FcPatternAddString(pattern, FC_FAMILY, (FcChar8*)familyName);
+ FcConfigSubstitute(fFC, pattern, FcMatchPattern);
+ FcDefaultSubstitute(pattern);
+
+ FcPattern* matchPattern;
+ SkAutoFcPattern strongPattern(nullptr);
+ if (familyName) {
+ strongPattern.reset(FcPatternDuplicate(pattern));
+ remove_weak(strongPattern, FC_FAMILY);
+ matchPattern = strongPattern;
+ } else {
+ matchPattern = pattern;
+ }
+
+ SkAutoFcFontSet matches;
+ // TODO: Some families have 'duplicates' due to symbolic links.
+ // The patterns are exactly the same except for the FC_FILE.
+ // It should be possible to collapse these patterns by normalizing.
+ static const FcSetName fcNameSet[] = { FcSetSystem, FcSetApplication };
+ for (int setIndex = 0; setIndex < (int)SK_ARRAY_COUNT(fcNameSet); ++setIndex) {
+ // Return value of FcConfigGetFonts must not be destroyed.
+ FcFontSet* allFonts(FcConfigGetFonts(fFC, fcNameSet[setIndex]));
+ if (nullptr == allFonts) {
+ continue;
+ }
+
+ for (int fontIndex = 0; fontIndex < allFonts->nfont; ++fontIndex) {
+ FcPattern* font = allFonts->fonts[fontIndex];
+ if (FontAccessible(font) && FontFamilyNameMatches(font, matchPattern)) {
+ FcFontSetAdd(matches, FcFontRenderPrepare(fFC, pattern, font));
+ }
+ }
+ }
+
+ return new StyleSet(this, matches.release());
+ }
+
+ virtual SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& style) const override
+ {
+ FCLocker lock;
+
+ SkAutoFcPattern pattern;
+ FcPatternAddString(pattern, FC_FAMILY, (FcChar8*)familyName);
+ fcpattern_from_skfontstyle(style, pattern);
+ FcConfigSubstitute(fFC, pattern, FcMatchPattern);
+ FcDefaultSubstitute(pattern);
+
+ // We really want to match strong (prefered) and same (acceptable) only here.
+ // If a family name was specified, assume that any weak matches after the last strong match
+ // are weak (default) and ignore them.
+ // The reason for is that after substitution the pattern for 'sans-serif' looks like
+ // "wwwwwwwwwwwwwwswww" where there are many weak but preferred names, followed by defaults.
+ // So it is possible to have weakly matching but preferred names.
+ // In aliases, bindings are weak by default, so this is easy and common.
+ // If no family name was specified, we'll probably only get weak matches, but that's ok.
+ FcPattern* matchPattern;
+ SkAutoFcPattern strongPattern(nullptr);
+ if (familyName) {
+ strongPattern.reset(FcPatternDuplicate(pattern));
+ remove_weak(strongPattern, FC_FAMILY);
+ matchPattern = strongPattern;
+ } else {
+ matchPattern = pattern;
+ }
+
+ FcResult result;
+ SkAutoFcPattern font(FcFontMatch(fFC, pattern, &result));
+ if (nullptr == font || !FontAccessible(font) || !FontFamilyNameMatches(font, matchPattern)) {
+ return nullptr;
+ }
+
+ return createTypefaceFromFcPattern(font);
+ }
+
+ virtual SkTypeface* onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[],
+ int bcp47Count,
+ SkUnichar character) const override
+ {
+ FCLocker lock;
+
+ SkAutoFcPattern pattern;
+ if (familyName) {
+ FcValue familyNameValue;
+ familyNameValue.type = FcTypeString;
+ familyNameValue.u.s = reinterpret_cast<const FcChar8*>(familyName);
+ FcPatternAddWeak(pattern, FC_FAMILY, familyNameValue, FcFalse);
+ }
+ fcpattern_from_skfontstyle(style, pattern);
+
+ SkAutoFcCharSet charSet;
+ FcCharSetAddChar(charSet, character);
+ FcPatternAddCharSet(pattern, FC_CHARSET, charSet);
+
+ if (bcp47Count > 0) {
+ SkASSERT(bcp47);
+ SkAutoFcLangSet langSet;
+ for (int i = bcp47Count; i --> 0;) {
+ FcLangSetAdd(langSet, (const FcChar8*)bcp47[i]);
+ }
+ FcPatternAddLangSet(pattern, FC_LANG, langSet);
+ }
+
+ FcConfigSubstitute(fFC, pattern, FcMatchPattern);
+ FcDefaultSubstitute(pattern);
+
+ FcResult result;
+ SkAutoFcPattern font(FcFontMatch(fFC, pattern, &result));
+ if (nullptr == font || !FontAccessible(font) || !FontContainsCharacter(font, character)) {
+ return nullptr;
+ }
+
+ return createTypefaceFromFcPattern(font);
+ }
+
+ virtual SkTypeface* onMatchFaceStyle(const SkTypeface* typeface,
+ const SkFontStyle& style) const override
+ {
+ //TODO: should the SkTypeface_fontconfig know its family?
+ const SkTypeface_fontconfig* fcTypeface =
+ static_cast<const SkTypeface_fontconfig*>(typeface);
+ return this->matchFamilyStyle(get_string(fcTypeface->fPattern, FC_FAMILY), style);
+ }
+
+ SkTypeface* onCreateFromStream(SkStreamAsset* bareStream, int ttcIndex) const override {
+ std::unique_ptr<SkStreamAsset> stream(bareStream);
+ const size_t length = stream->getLength();
+ if (length <= 0 || (1u << 30) < length) {
+ return nullptr;
+ }
+
+ SkFontStyle style;
+ bool isFixedWidth = false;
+ if (!fScanner.scanFont(stream.get(), ttcIndex, nullptr, &style, &isFixedWidth, nullptr)) {
+ return nullptr;
+ }
+
+ auto data = skstd::make_unique<SkFontData>(std::move(stream), ttcIndex, nullptr, 0);
+ return new SkTypeface_stream(std::move(data), style, isFixedWidth);
+ }
+
+ SkTypeface* onCreateFromStream(SkStreamAsset* s, const FontParameters& params) const override {
+ using Scanner = SkTypeface_FreeType::Scanner;
+ std::unique_ptr<SkStreamAsset> stream(s);
+ bool isFixedPitch;
+ SkFontStyle style;
+ SkString name;
+ Scanner::AxisDefinitions axisDefinitions;
+ if (!fScanner.scanFont(stream.get(), params.getCollectionIndex(),
+ &name, &style, &isFixedPitch, &axisDefinitions))
+ {
+ return nullptr;
+ }
+
+ int paramAxisCount;
+ const FontParameters::Axis* paramAxes = params.getAxes(&paramAxisCount);
+ SkAutoSTMalloc<4, SkFixed> axisValues(axisDefinitions.count());
+ Scanner::computeAxisValues(axisDefinitions, paramAxes, paramAxisCount, axisValues, name);
+
+ auto data = skstd::make_unique<SkFontData>(std::move(stream), params.getCollectionIndex(),
+ axisValues.get(), axisDefinitions.count());
+ return new SkTypeface_stream(std::move(data), style, isFixedPitch);
+ }
+
+ SkTypeface* onCreateFromData(SkData* data, int ttcIndex) const override {
+ return this->createFromStream(new SkMemoryStream(sk_ref_sp(data)), ttcIndex);
+ }
+
+ SkTypeface* onCreateFromFile(const char path[], int ttcIndex) const override {
+ return this->createFromStream(SkStream::MakeFromFile(path).release(), ttcIndex);
+ }
+
+ SkTypeface* onCreateFromFontData(std::unique_ptr<SkFontData> fontData) const override {
+ SkStreamAsset* stream(fontData->getStream());
+ const size_t length = stream->getLength();
+ if (length <= 0 || (1u << 30) < length) {
+ return nullptr;
+ }
+
+ const int ttcIndex = fontData->getIndex();
+ SkFontStyle style;
+ bool isFixedWidth = false;
+ if (!fScanner.scanFont(stream, ttcIndex, nullptr, &style, &isFixedWidth, nullptr)) {
+ return nullptr;
+ }
+
+ return new SkTypeface_stream(std::move(fontData), style, isFixedWidth);
+ }
+
+ SkTypeface* onLegacyCreateTypeface(const char familyName[], SkFontStyle style) const override {
+ SkAutoTUnref<SkTypeface> typeface(this->matchFamilyStyle(familyName, style));
+ if (typeface.get()) {
+ return typeface.release();
+ }
+
+ return this->matchFamilyStyle(nullptr, style);
+ }
+};
+
+SK_API SkFontMgr* SkFontMgr_New_FontConfig(FcConfig* fc) {
+ return new SkFontMgr_fontconfig(fc);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_fontconfig_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_fontconfig_factory.cpp
new file mode 100644
index 000000000..cdf055608
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_fontconfig_factory.cpp
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFontMgr.h"
+#include "SkFontMgr_fontconfig.h"
+#include "SkTypes.h"
+
+SkFontMgr* SkFontMgr::Factory() {
+ return SkFontMgr_New_FontConfig(nullptr);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_win_dw.cpp b/gfx/skia/skia/src/ports/SkFontMgr_win_dw.cpp
new file mode 100644
index 000000000..7201dc10b
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_win_dw.cpp
@@ -0,0 +1,1095 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32)
+
+#include "SkDWrite.h"
+#include "SkDWriteFontFileStream.h"
+#include "SkFontMgr.h"
+#include "SkHRESULT.h"
+#include "SkMutex.h"
+#include "SkStream.h"
+#include "SkTScopedComPtr.h"
+#include "SkTypeface.h"
+#include "SkTypefaceCache.h"
+#include "SkTypeface_win_dw.h"
+#include "SkTypes.h"
+#include "SkUtils.h"
+
+#include <dwrite.h>
+
+#if SK_HAS_DWRITE_2_H
+#include <dwrite_2.h>
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+
+class StreamFontFileLoader : public IDWriteFontFileLoader {
+public:
+ // IUnknown methods
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, void** ppvObject);
+ virtual ULONG STDMETHODCALLTYPE AddRef();
+ virtual ULONG STDMETHODCALLTYPE Release();
+
+ // IDWriteFontFileLoader methods
+ virtual HRESULT STDMETHODCALLTYPE CreateStreamFromKey(
+ void const* fontFileReferenceKey,
+ UINT32 fontFileReferenceKeySize,
+ IDWriteFontFileStream** fontFileStream);
+
+ // Takes ownership of stream.
+ static HRESULT Create(SkStreamAsset* stream, StreamFontFileLoader** streamFontFileLoader) {
+ *streamFontFileLoader = new StreamFontFileLoader(stream);
+ if (nullptr == *streamFontFileLoader) {
+ return E_OUTOFMEMORY;
+ }
+ return S_OK;
+ }
+
+ SkAutoTDelete<SkStreamAsset> fStream;
+
+private:
+ StreamFontFileLoader(SkStreamAsset* stream) : fStream(stream), fRefCount(1) { }
+ virtual ~StreamFontFileLoader() { }
+
+ ULONG fRefCount;
+};
+
+HRESULT StreamFontFileLoader::QueryInterface(REFIID iid, void** ppvObject) {
+ if (iid == IID_IUnknown || iid == __uuidof(IDWriteFontFileLoader)) {
+ *ppvObject = this;
+ AddRef();
+ return S_OK;
+ } else {
+ *ppvObject = nullptr;
+ return E_NOINTERFACE;
+ }
+}
+
+ULONG StreamFontFileLoader::AddRef() {
+ return InterlockedIncrement(&fRefCount);
+}
+
+ULONG StreamFontFileLoader::Release() {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+}
+
+HRESULT StreamFontFileLoader::CreateStreamFromKey(
+ void const* fontFileReferenceKey,
+ UINT32 fontFileReferenceKeySize,
+ IDWriteFontFileStream** fontFileStream)
+{
+ SkTScopedComPtr<SkDWriteFontFileStreamWrapper> stream;
+ HR(SkDWriteFontFileStreamWrapper::Create(fStream->duplicate(), &stream));
+ *fontFileStream = stream.release();
+ return S_OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+class StreamFontFileEnumerator : public IDWriteFontFileEnumerator {
+public:
+ // IUnknown methods
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, void** ppvObject);
+ virtual ULONG STDMETHODCALLTYPE AddRef();
+ virtual ULONG STDMETHODCALLTYPE Release();
+
+ // IDWriteFontFileEnumerator methods
+ virtual HRESULT STDMETHODCALLTYPE MoveNext(BOOL* hasCurrentFile);
+ virtual HRESULT STDMETHODCALLTYPE GetCurrentFontFile(IDWriteFontFile** fontFile);
+
+ static HRESULT Create(IDWriteFactory* factory, IDWriteFontFileLoader* fontFileLoader,
+ StreamFontFileEnumerator** streamFontFileEnumerator) {
+ *streamFontFileEnumerator = new StreamFontFileEnumerator(factory, fontFileLoader);
+ if (nullptr == *streamFontFileEnumerator) {
+ return E_OUTOFMEMORY;
+ }
+ return S_OK;
+ }
+private:
+ StreamFontFileEnumerator(IDWriteFactory* factory, IDWriteFontFileLoader* fontFileLoader);
+ virtual ~StreamFontFileEnumerator() { }
+
+ ULONG fRefCount;
+
+ SkTScopedComPtr<IDWriteFactory> fFactory;
+ SkTScopedComPtr<IDWriteFontFile> fCurrentFile;
+ SkTScopedComPtr<IDWriteFontFileLoader> fFontFileLoader;
+ bool fHasNext;
+};
+
+StreamFontFileEnumerator::StreamFontFileEnumerator(IDWriteFactory* factory,
+ IDWriteFontFileLoader* fontFileLoader)
+ : fRefCount(1)
+ , fFactory(SkRefComPtr(factory))
+ , fCurrentFile()
+ , fFontFileLoader(SkRefComPtr(fontFileLoader))
+ , fHasNext(true)
+{ }
+
+HRESULT StreamFontFileEnumerator::QueryInterface(REFIID iid, void** ppvObject) {
+ if (iid == IID_IUnknown || iid == __uuidof(IDWriteFontFileEnumerator)) {
+ *ppvObject = this;
+ AddRef();
+ return S_OK;
+ } else {
+ *ppvObject = nullptr;
+ return E_NOINTERFACE;
+ }
+}
+
+ULONG StreamFontFileEnumerator::AddRef() {
+ return InterlockedIncrement(&fRefCount);
+}
+
+ULONG StreamFontFileEnumerator::Release() {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+}
+
+HRESULT StreamFontFileEnumerator::MoveNext(BOOL* hasCurrentFile) {
+ *hasCurrentFile = FALSE;
+
+ if (!fHasNext) {
+ return S_OK;
+ }
+ fHasNext = false;
+
+ UINT32 dummy = 0;
+ HR(fFactory->CreateCustomFontFileReference(
+ &dummy, //cannot be nullptr
+ sizeof(dummy), //even if this is 0
+ fFontFileLoader.get(),
+ &fCurrentFile));
+
+ *hasCurrentFile = TRUE;
+ return S_OK;
+}
+
+HRESULT StreamFontFileEnumerator::GetCurrentFontFile(IDWriteFontFile** fontFile) {
+ if (fCurrentFile.get() == nullptr) {
+ *fontFile = nullptr;
+ return E_FAIL;
+ }
+
+ *fontFile = SkRefComPtr(fCurrentFile.get());
+ return S_OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+class StreamFontCollectionLoader : public IDWriteFontCollectionLoader {
+public:
+ // IUnknown methods
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, void** ppvObject);
+ virtual ULONG STDMETHODCALLTYPE AddRef();
+ virtual ULONG STDMETHODCALLTYPE Release();
+
+ // IDWriteFontCollectionLoader methods
+ virtual HRESULT STDMETHODCALLTYPE CreateEnumeratorFromKey(
+ IDWriteFactory* factory,
+ void const* collectionKey,
+ UINT32 collectionKeySize,
+ IDWriteFontFileEnumerator** fontFileEnumerator);
+
+ static HRESULT Create(IDWriteFontFileLoader* fontFileLoader,
+ StreamFontCollectionLoader** streamFontCollectionLoader) {
+ *streamFontCollectionLoader = new StreamFontCollectionLoader(fontFileLoader);
+ if (nullptr == *streamFontCollectionLoader) {
+ return E_OUTOFMEMORY;
+ }
+ return S_OK;
+ }
+private:
+ StreamFontCollectionLoader(IDWriteFontFileLoader* fontFileLoader)
+ : fRefCount(1)
+ , fFontFileLoader(SkRefComPtr(fontFileLoader))
+ { }
+ virtual ~StreamFontCollectionLoader() { }
+
+ ULONG fRefCount;
+ SkTScopedComPtr<IDWriteFontFileLoader> fFontFileLoader;
+};
+
+HRESULT StreamFontCollectionLoader::QueryInterface(REFIID iid, void** ppvObject) {
+ if (iid == IID_IUnknown || iid == __uuidof(IDWriteFontCollectionLoader)) {
+ *ppvObject = this;
+ AddRef();
+ return S_OK;
+ } else {
+ *ppvObject = nullptr;
+ return E_NOINTERFACE;
+ }
+}
+
+ULONG StreamFontCollectionLoader::AddRef() {
+ return InterlockedIncrement(&fRefCount);
+}
+
+ULONG StreamFontCollectionLoader::Release() {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+}
+
+HRESULT StreamFontCollectionLoader::CreateEnumeratorFromKey(
+ IDWriteFactory* factory,
+ void const* collectionKey,
+ UINT32 collectionKeySize,
+ IDWriteFontFileEnumerator** fontFileEnumerator)
+{
+ SkTScopedComPtr<StreamFontFileEnumerator> enumerator;
+ HR(StreamFontFileEnumerator::Create(factory, fFontFileLoader.get(), &enumerator));
+ *fontFileEnumerator = enumerator.release();
+ return S_OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+class SkFontMgr_DirectWrite : public SkFontMgr {
+public:
+ /** localeNameLength must include the null terminator. */
+ SkFontMgr_DirectWrite(IDWriteFactory* factory, IDWriteFontCollection* fontCollection,
+ IDWriteFontFallback* fallback, WCHAR* localeName, int localeNameLength)
+ : fFactory(SkRefComPtr(factory))
+#if SK_HAS_DWRITE_2_H
+ , fFontFallback(SkSafeRefComPtr(fallback))
+#endif
+ , fFontCollection(SkRefComPtr(fontCollection))
+ , fLocaleName(localeNameLength)
+ {
+#if SK_HAS_DWRITE_2_H
+ if (!SUCCEEDED(fFactory->QueryInterface(&fFactory2))) {
+ // IUnknown::QueryInterface states that if it fails, punk will be set to nullptr.
+ // http://blogs.msdn.com/b/oldnewthing/archive/2004/03/26/96777.aspx
+ SkASSERT_RELEASE(nullptr == fFactory2.get());
+ }
+ if (fFontFallback.get()) {
+ // factory must be provied if fallback is non-null, else the fallback will not be used.
+ SkASSERT(fFactory2.get());
+ }
+#endif
+ memcpy(fLocaleName.get(), localeName, localeNameLength * sizeof(WCHAR));
+ }
+
+protected:
+ int onCountFamilies() const override;
+ void onGetFamilyName(int index, SkString* familyName) const override;
+ SkFontStyleSet* onCreateStyleSet(int index) const override;
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override;
+ SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontstyle) const override;
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override;
+ SkTypeface* onMatchFaceStyle(const SkTypeface* familyMember,
+ const SkFontStyle& fontstyle) const override;
+ SkTypeface* onCreateFromStream(SkStreamAsset* stream, int ttcIndex) const override;
+ SkTypeface* onCreateFromData(SkData* data, int ttcIndex) const override;
+ SkTypeface* onCreateFromFile(const char path[], int ttcIndex) const override;
+ SkTypeface* onLegacyCreateTypeface(const char familyName[], SkFontStyle) const override;
+
+private:
+ HRESULT getByFamilyName(const WCHAR familyName[], IDWriteFontFamily** fontFamily) const;
+ HRESULT getDefaultFontFamily(IDWriteFontFamily** fontFamily) const;
+
+ /** Creates a typeface using a typeface cache. */
+ SkTypeface* createTypefaceFromDWriteFont(IDWriteFontFace* fontFace,
+ IDWriteFont* font,
+ IDWriteFontFamily* fontFamily) const;
+
+ SkTScopedComPtr<IDWriteFactory> fFactory;
+#if SK_HAS_DWRITE_2_H
+ SkTScopedComPtr<IDWriteFactory2> fFactory2;
+ SkTScopedComPtr<IDWriteFontFallback> fFontFallback;
+#endif
+ SkTScopedComPtr<IDWriteFontCollection> fFontCollection;
+ SkSMallocWCHAR fLocaleName;
+ mutable SkMutex fTFCacheMutex;
+ mutable SkTypefaceCache fTFCache;
+
+ friend class SkFontStyleSet_DirectWrite;
+ friend class FontFallbackRenderer;
+};
+
+class SkFontStyleSet_DirectWrite : public SkFontStyleSet {
+public:
+ SkFontStyleSet_DirectWrite(const SkFontMgr_DirectWrite* fontMgr,
+ IDWriteFontFamily* fontFamily)
+ : fFontMgr(SkRef(fontMgr))
+ , fFontFamily(SkRefComPtr(fontFamily))
+ { }
+
+ int count() override;
+ void getStyle(int index, SkFontStyle* fs, SkString* styleName) override;
+ SkTypeface* createTypeface(int index) override;
+ SkTypeface* matchStyle(const SkFontStyle& pattern) override;
+
+private:
+ SkAutoTUnref<const SkFontMgr_DirectWrite> fFontMgr;
+ SkTScopedComPtr<IDWriteFontFamily> fFontFamily;
+};
+
+static HRESULT are_same(IUnknown* a, IUnknown* b, bool& same) {
+ SkTScopedComPtr<IUnknown> iunkA;
+ HRM(a->QueryInterface(&iunkA), "Failed to QI<IUnknown> for a.");
+
+ SkTScopedComPtr<IUnknown> iunkB;
+ HRM(b->QueryInterface(&iunkB), "Failed to QI<IUnknown> for b.");
+
+ same = (iunkA.get() == iunkB.get());
+ return S_OK;
+}
+
+struct ProtoDWriteTypeface {
+ IDWriteFontFace* fDWriteFontFace;
+ IDWriteFont* fDWriteFont;
+ IDWriteFontFamily* fDWriteFontFamily;
+};
+
+static bool FindByDWriteFont(SkTypeface* cached, void* ctx) {
+ DWriteFontTypeface* cshFace = reinterpret_cast<DWriteFontTypeface*>(cached);
+ ProtoDWriteTypeface* ctxFace = reinterpret_cast<ProtoDWriteTypeface*>(ctx);
+ bool same;
+
+ //Check to see if the two fonts are identical.
+ HRB(are_same(cshFace->fDWriteFont.get(), ctxFace->fDWriteFont, same));
+ if (same) {
+ return true;
+ }
+
+ HRB(are_same(cshFace->fDWriteFontFace.get(), ctxFace->fDWriteFontFace, same));
+ if (same) {
+ return true;
+ }
+
+ //Check if the two fonts share the same loader and have the same key.
+ UINT32 cshNumFiles;
+ UINT32 ctxNumFiles;
+ HRB(cshFace->fDWriteFontFace->GetFiles(&cshNumFiles, nullptr));
+ HRB(ctxFace->fDWriteFontFace->GetFiles(&ctxNumFiles, nullptr));
+ if (cshNumFiles != ctxNumFiles) {
+ return false;
+ }
+
+ SkTScopedComPtr<IDWriteFontFile> cshFontFile;
+ SkTScopedComPtr<IDWriteFontFile> ctxFontFile;
+ HRB(cshFace->fDWriteFontFace->GetFiles(&cshNumFiles, &cshFontFile));
+ HRB(ctxFace->fDWriteFontFace->GetFiles(&ctxNumFiles, &ctxFontFile));
+
+ //for (each file) { //we currently only admit fonts from one file.
+ SkTScopedComPtr<IDWriteFontFileLoader> cshFontFileLoader;
+ SkTScopedComPtr<IDWriteFontFileLoader> ctxFontFileLoader;
+ HRB(cshFontFile->GetLoader(&cshFontFileLoader));
+ HRB(ctxFontFile->GetLoader(&ctxFontFileLoader));
+ HRB(are_same(cshFontFileLoader.get(), ctxFontFileLoader.get(), same));
+ if (!same) {
+ return false;
+ }
+ //}
+
+ const void* cshRefKey;
+ UINT32 cshRefKeySize;
+ const void* ctxRefKey;
+ UINT32 ctxRefKeySize;
+ HRB(cshFontFile->GetReferenceKey(&cshRefKey, &cshRefKeySize));
+ HRB(ctxFontFile->GetReferenceKey(&ctxRefKey, &ctxRefKeySize));
+ if (cshRefKeySize != ctxRefKeySize) {
+ return false;
+ }
+ if (0 != memcmp(cshRefKey, ctxRefKey, ctxRefKeySize)) {
+ return false;
+ }
+
+ //TODO: better means than comparing name strings?
+ //NOTE: .ttc and fake bold/italic will end up here.
+ SkTScopedComPtr<IDWriteLocalizedStrings> cshFamilyNames;
+ SkTScopedComPtr<IDWriteLocalizedStrings> cshFaceNames;
+ HRB(cshFace->fDWriteFontFamily->GetFamilyNames(&cshFamilyNames));
+ HRB(cshFace->fDWriteFont->GetFaceNames(&cshFaceNames));
+ UINT32 cshFamilyNameLength;
+ UINT32 cshFaceNameLength;
+ HRB(cshFamilyNames->GetStringLength(0, &cshFamilyNameLength));
+ HRB(cshFaceNames->GetStringLength(0, &cshFaceNameLength));
+
+ SkTScopedComPtr<IDWriteLocalizedStrings> ctxFamilyNames;
+ SkTScopedComPtr<IDWriteLocalizedStrings> ctxFaceNames;
+ HRB(ctxFace->fDWriteFontFamily->GetFamilyNames(&ctxFamilyNames));
+ HRB(ctxFace->fDWriteFont->GetFaceNames(&ctxFaceNames));
+ UINT32 ctxFamilyNameLength;
+ UINT32 ctxFaceNameLength;
+ HRB(ctxFamilyNames->GetStringLength(0, &ctxFamilyNameLength));
+ HRB(ctxFaceNames->GetStringLength(0, &ctxFaceNameLength));
+
+ if (cshFamilyNameLength != ctxFamilyNameLength ||
+ cshFaceNameLength != ctxFaceNameLength)
+ {
+ return false;
+ }
+
+ SkSMallocWCHAR cshFamilyName(cshFamilyNameLength+1);
+ SkSMallocWCHAR cshFaceName(cshFaceNameLength+1);
+ HRB(cshFamilyNames->GetString(0, cshFamilyName.get(), cshFamilyNameLength+1));
+ HRB(cshFaceNames->GetString(0, cshFaceName.get(), cshFaceNameLength+1));
+
+ SkSMallocWCHAR ctxFamilyName(ctxFamilyNameLength+1);
+ SkSMallocWCHAR ctxFaceName(ctxFaceNameLength+1);
+ HRB(ctxFamilyNames->GetString(0, ctxFamilyName.get(), ctxFamilyNameLength+1));
+ HRB(ctxFaceNames->GetString(0, ctxFaceName.get(), ctxFaceNameLength+1));
+
+ return wcscmp(cshFamilyName.get(), ctxFamilyName.get()) == 0 &&
+ wcscmp(cshFaceName.get(), ctxFaceName.get()) == 0;
+}
+
+SkTypeface* SkFontMgr_DirectWrite::createTypefaceFromDWriteFont(
+ IDWriteFontFace* fontFace,
+ IDWriteFont* font,
+ IDWriteFontFamily* fontFamily) const {
+ SkAutoMutexAcquire ama(fTFCacheMutex);
+ ProtoDWriteTypeface spec = { fontFace, font, fontFamily };
+ SkTypeface* face = fTFCache.findByProcAndRef(FindByDWriteFont, &spec);
+ if (nullptr == face) {
+ face = DWriteFontTypeface::Create(fFactory.get(), fontFace, font, fontFamily);
+ if (face) {
+ fTFCache.add(face);
+ }
+ }
+ return face;
+}
+
+int SkFontMgr_DirectWrite::onCountFamilies() const {
+ return fFontCollection->GetFontFamilyCount();
+}
+
+void SkFontMgr_DirectWrite::onGetFamilyName(int index, SkString* familyName) const {
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HRVM(fFontCollection->GetFontFamily(index, &fontFamily), "Could not get requested family.");
+
+ SkTScopedComPtr<IDWriteLocalizedStrings> familyNames;
+ HRVM(fontFamily->GetFamilyNames(&familyNames), "Could not get family names.");
+
+ sk_get_locale_string(familyNames.get(), fLocaleName.get(), familyName);
+}
+
+SkFontStyleSet* SkFontMgr_DirectWrite::onCreateStyleSet(int index) const {
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HRNM(fFontCollection->GetFontFamily(index, &fontFamily), "Could not get requested family.");
+
+ return new SkFontStyleSet_DirectWrite(this, fontFamily.get());
+}
+
+SkFontStyleSet* SkFontMgr_DirectWrite::onMatchFamily(const char familyName[]) const {
+ SkSMallocWCHAR dwFamilyName;
+ HRN(sk_cstring_to_wchar(familyName, &dwFamilyName));
+
+ UINT32 index;
+ BOOL exists;
+ HRNM(fFontCollection->FindFamilyName(dwFamilyName.get(), &index, &exists),
+ "Failed while finding family by name.");
+ if (!exists) {
+ return nullptr;
+ }
+
+ return this->onCreateStyleSet(index);
+}
+
+SkTypeface* SkFontMgr_DirectWrite::onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontstyle) const {
+ SkAutoTUnref<SkFontStyleSet> sset(this->matchFamily(familyName));
+ return sset->matchStyle(fontstyle);
+}
+
+class FontFallbackRenderer : public IDWriteTextRenderer {
+public:
+ FontFallbackRenderer(const SkFontMgr_DirectWrite* outer, UINT32 character)
+ : fRefCount(1), fOuter(SkSafeRef(outer)), fCharacter(character), fResolvedTypeface(nullptr) {
+ }
+
+ virtual ~FontFallbackRenderer() { }
+
+ // IDWriteTextRenderer methods
+ virtual HRESULT STDMETHODCALLTYPE DrawGlyphRun(
+ void* clientDrawingContext,
+ FLOAT baselineOriginX,
+ FLOAT baselineOriginY,
+ DWRITE_MEASURING_MODE measuringMode,
+ DWRITE_GLYPH_RUN const* glyphRun,
+ DWRITE_GLYPH_RUN_DESCRIPTION const* glyphRunDescription,
+ IUnknown* clientDrawingEffect) override
+ {
+ SkTScopedComPtr<IDWriteFont> font;
+ HRM(fOuter->fFontCollection->GetFontFromFontFace(glyphRun->fontFace, &font),
+ "Could not get font from font face.");
+
+ // It is possible that the font passed does not actually have the requested character,
+ // due to no font being found and getting the fallback font.
+ // Check that the font actually contains the requested character.
+ BOOL exists;
+ HRM(font->HasCharacter(fCharacter, &exists), "Could not find character.");
+
+ if (exists) {
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HRM(font->GetFontFamily(&fontFamily), "Could not get family.");
+ fResolvedTypeface = fOuter->createTypefaceFromDWriteFont(glyphRun->fontFace,
+ font.get(),
+ fontFamily.get());
+ }
+
+ return S_OK;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE DrawUnderline(
+ void* clientDrawingContext,
+ FLOAT baselineOriginX,
+ FLOAT baselineOriginY,
+ DWRITE_UNDERLINE const* underline,
+ IUnknown* clientDrawingEffect) override
+ { return E_NOTIMPL; }
+
+ virtual HRESULT STDMETHODCALLTYPE DrawStrikethrough(
+ void* clientDrawingContext,
+ FLOAT baselineOriginX,
+ FLOAT baselineOriginY,
+ DWRITE_STRIKETHROUGH const* strikethrough,
+ IUnknown* clientDrawingEffect) override
+ { return E_NOTIMPL; }
+
+ virtual HRESULT STDMETHODCALLTYPE DrawInlineObject(
+ void* clientDrawingContext,
+ FLOAT originX,
+ FLOAT originY,
+ IDWriteInlineObject* inlineObject,
+ BOOL isSideways,
+ BOOL isRightToLeft,
+ IUnknown* clientDrawingEffect) override
+ { return E_NOTIMPL; }
+
+ // IDWritePixelSnapping methods
+ virtual HRESULT STDMETHODCALLTYPE IsPixelSnappingDisabled(
+ void* clientDrawingContext,
+ BOOL* isDisabled) override
+ {
+ *isDisabled = FALSE;
+ return S_OK;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE GetCurrentTransform(
+ void* clientDrawingContext,
+ DWRITE_MATRIX* transform) override
+ {
+ const DWRITE_MATRIX ident = { 1.0, 0.0, 0.0, 1.0, 0.0, 0.0 };
+ *transform = ident;
+ return S_OK;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE GetPixelsPerDip(
+ void* clientDrawingContext,
+ FLOAT* pixelsPerDip) override
+ {
+ *pixelsPerDip = 1.0f;
+ return S_OK;
+ }
+
+ // IUnknown methods
+ ULONG STDMETHODCALLTYPE AddRef() override {
+ return InterlockedIncrement(&fRefCount);
+ }
+
+ ULONG STDMETHODCALLTYPE Release() override {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(IID const& riid, void** ppvObject) override{
+ if (__uuidof(IUnknown) == riid ||
+ __uuidof(IDWritePixelSnapping) == riid ||
+ __uuidof(IDWriteTextRenderer) == riid)
+ {
+ *ppvObject = this;
+ this->AddRef();
+ return S_OK;
+ }
+ *ppvObject = nullptr;
+ return E_FAIL;
+ }
+
+ SkTypeface* FallbackTypeface() { return fResolvedTypeface; }
+
+protected:
+ ULONG fRefCount;
+ SkAutoTUnref<const SkFontMgr_DirectWrite> fOuter;
+ UINT32 fCharacter;
+ SkTypeface* fResolvedTypeface;
+};
+
+class FontFallbackSource : public IDWriteTextAnalysisSource {
+public:
+ FontFallbackSource(const WCHAR* string, UINT32 length, const WCHAR* locale,
+ IDWriteNumberSubstitution* numberSubstitution)
+ : fString(string)
+ , fLength(length)
+ , fLocale(locale)
+ , fNumberSubstitution(numberSubstitution)
+ { }
+
+ virtual ~FontFallbackSource() { }
+
+ // IDWriteTextAnalysisSource methods
+ virtual HRESULT STDMETHODCALLTYPE GetTextAtPosition(
+ UINT32 textPosition,
+ WCHAR const** textString,
+ UINT32* textLength) override
+ {
+ if (fLength <= textPosition) {
+ *textString = nullptr;
+ *textLength = 0;
+ return S_OK;
+ }
+ *textString = fString + textPosition;
+ *textLength = fLength - textPosition;
+ return S_OK;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE GetTextBeforePosition(
+ UINT32 textPosition,
+ WCHAR const** textString,
+ UINT32* textLength) override
+ {
+ if (textPosition < 1 || fLength <= textPosition) {
+ *textString = nullptr;
+ *textLength = 0;
+ return S_OK;
+ }
+ *textString = fString;
+ *textLength = textPosition;
+ return S_OK;
+ }
+
+ virtual DWRITE_READING_DIRECTION STDMETHODCALLTYPE GetParagraphReadingDirection() override {
+ // TODO: this is also interesting.
+ return DWRITE_READING_DIRECTION_LEFT_TO_RIGHT;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE GetLocaleName(
+ UINT32 textPosition,
+ UINT32* textLength,
+ WCHAR const** localeName) override
+ {
+ *localeName = fLocale;
+ return S_OK;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE GetNumberSubstitution(
+ UINT32 textPosition,
+ UINT32* textLength,
+ IDWriteNumberSubstitution** numberSubstitution) override
+ {
+ *numberSubstitution = fNumberSubstitution;
+ return S_OK;
+ }
+
+ // IUnknown methods
+ ULONG STDMETHODCALLTYPE AddRef() override {
+ return InterlockedIncrement(&fRefCount);
+ }
+
+ ULONG STDMETHODCALLTYPE Release() override {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(IID const& riid, void** ppvObject) override{
+ if (__uuidof(IUnknown) == riid ||
+ __uuidof(IDWriteTextAnalysisSource) == riid)
+ {
+ *ppvObject = this;
+ this->AddRef();
+ return S_OK;
+ }
+ *ppvObject = nullptr;
+ return E_FAIL;
+ }
+
+protected:
+ ULONG fRefCount;
+ const WCHAR* fString;
+ UINT32 fLength;
+ const WCHAR* fLocale;
+ IDWriteNumberSubstitution* fNumberSubstitution;
+};
+
+SkTypeface* SkFontMgr_DirectWrite::onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const
+{
+ const DWriteStyle dwStyle(style);
+
+ const WCHAR* dwFamilyName = nullptr;
+ SkSMallocWCHAR dwFamilyNameLocal;
+ if (familyName) {
+ HRN(sk_cstring_to_wchar(familyName, &dwFamilyNameLocal));
+ dwFamilyName = dwFamilyNameLocal;
+ }
+
+ WCHAR str[16];
+ UINT32 strLen = static_cast<UINT32>(
+ SkUTF16_FromUnichar(character, reinterpret_cast<uint16_t*>(str)));
+
+ const SkSMallocWCHAR* dwBcp47;
+ SkSMallocWCHAR dwBcp47Local;
+ if (bcp47Count < 1) {
+ dwBcp47 = &fLocaleName;
+ } else {
+ // TODO: support fallback stack.
+ // TODO: DirectWrite supports 'zh-CN' or 'zh-Hans', but 'zh' misses completely
+ // and may produce a Japanese font.
+ HRN(sk_cstring_to_wchar(bcp47[bcp47Count - 1], &dwBcp47Local));
+ dwBcp47 = &dwBcp47Local;
+ }
+
+#if SK_HAS_DWRITE_2_H
+ if (fFactory2.get()) {
+ SkTScopedComPtr<IDWriteFontFallback> systemFontFallback;
+ IDWriteFontFallback* fontFallback = fFontFallback.get();
+ if (!fontFallback) {
+ HRNM(fFactory2->GetSystemFontFallback(&systemFontFallback),
+ "Could not get system fallback.");
+ fontFallback = systemFontFallback.get();
+ }
+
+ SkTScopedComPtr<IDWriteNumberSubstitution> numberSubstitution;
+ HRNM(fFactory2->CreateNumberSubstitution(DWRITE_NUMBER_SUBSTITUTION_METHOD_NONE, nullptr, TRUE,
+ &numberSubstitution),
+ "Could not create number substitution.");
+ SkTScopedComPtr<FontFallbackSource> fontFallbackSource(
+ new FontFallbackSource(str, strLen, *dwBcp47, numberSubstitution.get()));
+
+ UINT32 mappedLength;
+ SkTScopedComPtr<IDWriteFont> font;
+ FLOAT scale;
+ HRNM(fontFallback->MapCharacters(fontFallbackSource.get(),
+ 0, // textPosition,
+ strLen,
+ fFontCollection.get(),
+ dwFamilyName,
+ dwStyle.fWeight,
+ dwStyle.fSlant,
+ dwStyle.fWidth,
+ &mappedLength,
+ &font,
+ &scale),
+ "Could not map characters");
+ if (!font.get()) {
+ return nullptr;
+ }
+
+ SkTScopedComPtr<IDWriteFontFace> fontFace;
+ HRNM(font->CreateFontFace(&fontFace), "Could not get font face from font.");
+
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HRNM(font->GetFontFamily(&fontFamily), "Could not get family from font.");
+ return this->createTypefaceFromDWriteFont(fontFace.get(), font.get(), fontFamily.get());
+ }
+#else
+# pragma message("No dwrite_2.h is available, font fallback may be affected.")
+#endif
+
+ SkTScopedComPtr<IDWriteTextFormat> fallbackFormat;
+ HRNM(fFactory->CreateTextFormat(dwFamilyName ? dwFamilyName : L"",
+ fFontCollection.get(),
+ dwStyle.fWeight,
+ dwStyle.fSlant,
+ dwStyle.fWidth,
+ 72.0f,
+ *dwBcp47,
+ &fallbackFormat),
+ "Could not create text format.");
+
+ SkTScopedComPtr<IDWriteTextLayout> fallbackLayout;
+ HRNM(fFactory->CreateTextLayout(str, strLen, fallbackFormat.get(),
+ 200.0f, 200.0f,
+ &fallbackLayout),
+ "Could not create text layout.");
+
+ SkTScopedComPtr<FontFallbackRenderer> fontFallbackRenderer(
+ new FontFallbackRenderer(this, character));
+
+ HRNM(fallbackLayout->Draw(nullptr, fontFallbackRenderer.get(), 50.0f, 50.0f),
+ "Could not draw layout with renderer.");
+
+ return fontFallbackRenderer->FallbackTypeface();
+}
+
+SkTypeface* SkFontMgr_DirectWrite::onMatchFaceStyle(const SkTypeface* familyMember,
+ const SkFontStyle& fontstyle) const {
+ SkString familyName;
+ SkFontStyleSet_DirectWrite sset(
+ this, ((DWriteFontTypeface*)familyMember)->fDWriteFontFamily.get()
+ );
+ return sset.matchStyle(fontstyle);
+}
+
+template <typename T> class SkAutoIDWriteUnregister {
+public:
+ SkAutoIDWriteUnregister(IDWriteFactory* factory, T* unregister)
+ : fFactory(factory), fUnregister(unregister)
+ { }
+
+ ~SkAutoIDWriteUnregister() {
+ if (fUnregister) {
+ unregister(fFactory, fUnregister);
+ }
+ }
+
+ T* detatch() {
+ T* old = fUnregister;
+ fUnregister = nullptr;
+ return old;
+ }
+
+private:
+ HRESULT unregister(IDWriteFactory* factory, IDWriteFontFileLoader* unregister) {
+ return factory->UnregisterFontFileLoader(unregister);
+ }
+
+ HRESULT unregister(IDWriteFactory* factory, IDWriteFontCollectionLoader* unregister) {
+ return factory->UnregisterFontCollectionLoader(unregister);
+ }
+
+ IDWriteFactory* fFactory;
+ T* fUnregister;
+};
+
+SkTypeface* SkFontMgr_DirectWrite::onCreateFromStream(SkStreamAsset* stream, int ttcIndex) const {
+ SkTScopedComPtr<StreamFontFileLoader> fontFileLoader;
+ // This transfers ownership of stream to the new object.
+ HRN(StreamFontFileLoader::Create(stream, &fontFileLoader));
+ HRN(fFactory->RegisterFontFileLoader(fontFileLoader.get()));
+ SkAutoIDWriteUnregister<StreamFontFileLoader> autoUnregisterFontFileLoader(
+ fFactory.get(), fontFileLoader.get());
+
+ SkTScopedComPtr<StreamFontCollectionLoader> fontCollectionLoader;
+ HRN(StreamFontCollectionLoader::Create(fontFileLoader.get(), &fontCollectionLoader));
+ HRN(fFactory->RegisterFontCollectionLoader(fontCollectionLoader.get()));
+ SkAutoIDWriteUnregister<StreamFontCollectionLoader> autoUnregisterFontCollectionLoader(
+ fFactory.get(), fontCollectionLoader.get());
+
+ SkTScopedComPtr<IDWriteFontCollection> fontCollection;
+ HRN(fFactory->CreateCustomFontCollection(fontCollectionLoader.get(), nullptr, 0, &fontCollection));
+
+ // Find the first non-simulated font which has the given ttc index.
+ UINT32 familyCount = fontCollection->GetFontFamilyCount();
+ for (UINT32 familyIndex = 0; familyIndex < familyCount; ++familyIndex) {
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HRN(fontCollection->GetFontFamily(familyIndex, &fontFamily));
+
+ UINT32 fontCount = fontFamily->GetFontCount();
+ for (UINT32 fontIndex = 0; fontIndex < fontCount; ++fontIndex) {
+ SkTScopedComPtr<IDWriteFont> font;
+ HRN(fontFamily->GetFont(fontIndex, &font));
+ if (font->GetSimulations() != DWRITE_FONT_SIMULATIONS_NONE) {
+ continue;
+ }
+
+ SkTScopedComPtr<IDWriteFontFace> fontFace;
+ HRN(font->CreateFontFace(&fontFace));
+
+ UINT32 faceIndex = fontFace->GetIndex();
+ if (faceIndex == ttcIndex) {
+ return DWriteFontTypeface::Create(fFactory.get(),
+ fontFace.get(), font.get(), fontFamily.get(),
+ autoUnregisterFontFileLoader.detatch(),
+ autoUnregisterFontCollectionLoader.detatch());
+ }
+ }
+ }
+
+ return nullptr;
+}
+
+SkTypeface* SkFontMgr_DirectWrite::onCreateFromData(SkData* data, int ttcIndex) const {
+ return this->createFromStream(new SkMemoryStream(sk_ref_sp(data)), ttcIndex);
+}
+
+SkTypeface* SkFontMgr_DirectWrite::onCreateFromFile(const char path[], int ttcIndex) const {
+ return this->createFromStream(SkStream::MakeFromFile(path).release(), ttcIndex);
+}
+
+HRESULT SkFontMgr_DirectWrite::getByFamilyName(const WCHAR wideFamilyName[],
+ IDWriteFontFamily** fontFamily) const {
+ UINT32 index;
+ BOOL exists;
+ HR(fFontCollection->FindFamilyName(wideFamilyName, &index, &exists));
+
+ if (exists) {
+ HR(fFontCollection->GetFontFamily(index, fontFamily));
+ }
+ return S_OK;
+}
+
+HRESULT SkFontMgr_DirectWrite::getDefaultFontFamily(IDWriteFontFamily** fontFamily) const {
+ NONCLIENTMETRICSW metrics;
+ metrics.cbSize = sizeof(metrics);
+ if (0 == SystemParametersInfoW(SPI_GETNONCLIENTMETRICS,
+ sizeof(metrics),
+ &metrics,
+ 0)) {
+ return E_UNEXPECTED;
+ }
+ HRM(this->getByFamilyName(metrics.lfMessageFont.lfFaceName, fontFamily),
+ "Could not create DWrite font family from LOGFONT.");
+ return S_OK;
+}
+
+SkTypeface* SkFontMgr_DirectWrite::onLegacyCreateTypeface(const char familyName[],
+ SkFontStyle style) const {
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ if (familyName) {
+ SkSMallocWCHAR wideFamilyName;
+ if (SUCCEEDED(sk_cstring_to_wchar(familyName, &wideFamilyName))) {
+ this->getByFamilyName(wideFamilyName, &fontFamily);
+ }
+ }
+
+ if (nullptr == fontFamily.get()) {
+ // No family with given name, try default.
+ HRNM(this->getDefaultFontFamily(&fontFamily), "Could not get default font family.");
+ }
+
+ if (nullptr == fontFamily.get()) {
+ // Could not obtain the default font.
+ HRNM(fFontCollection->GetFontFamily(0, &fontFamily),
+ "Could not get default-default font family.");
+ }
+
+ SkTScopedComPtr<IDWriteFont> font;
+ DWriteStyle dwStyle(style);
+ HRNM(fontFamily->GetFirstMatchingFont(dwStyle.fWeight, dwStyle.fWidth, dwStyle.fSlant, &font),
+ "Could not get matching font.");
+
+ SkTScopedComPtr<IDWriteFontFace> fontFace;
+ HRNM(font->CreateFontFace(&fontFace), "Could not create font face.");
+
+ return this->createTypefaceFromDWriteFont(fontFace.get(), font.get(), fontFamily.get());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+int SkFontStyleSet_DirectWrite::count() {
+ return fFontFamily->GetFontCount();
+}
+
+SkTypeface* SkFontStyleSet_DirectWrite::createTypeface(int index) {
+ SkTScopedComPtr<IDWriteFont> font;
+ HRNM(fFontFamily->GetFont(index, &font), "Could not get font.");
+
+ SkTScopedComPtr<IDWriteFontFace> fontFace;
+ HRNM(font->CreateFontFace(&fontFace), "Could not create font face.");
+
+ return fFontMgr->createTypefaceFromDWriteFont(fontFace.get(), font.get(), fFontFamily.get());
+}
+
+void SkFontStyleSet_DirectWrite::getStyle(int index, SkFontStyle* fs, SkString* styleName) {
+ SkTScopedComPtr<IDWriteFont> font;
+ HRVM(fFontFamily->GetFont(index, &font), "Could not get font.");
+
+ if (fs) {
+ *fs = get_style(font.get());
+ }
+
+ if (styleName) {
+ SkTScopedComPtr<IDWriteLocalizedStrings> faceNames;
+ if (SUCCEEDED(font->GetFaceNames(&faceNames))) {
+ sk_get_locale_string(faceNames.get(), fFontMgr->fLocaleName.get(), styleName);
+ }
+ }
+}
+
+SkTypeface* SkFontStyleSet_DirectWrite::matchStyle(const SkFontStyle& pattern) {
+ SkTScopedComPtr<IDWriteFont> font;
+ DWriteStyle dwStyle(pattern);
+ // TODO: perhaps use GetMatchingFonts and get the least simulated?
+ HRNM(fFontFamily->GetFirstMatchingFont(dwStyle.fWeight, dwStyle.fWidth, dwStyle.fSlant, &font),
+ "Could not match font in family.");
+
+ SkTScopedComPtr<IDWriteFontFace> fontFace;
+ HRNM(font->CreateFontFace(&fontFace), "Could not create font face.");
+
+ return fFontMgr->createTypefaceFromDWriteFont(fontFace.get(), font.get(),
+ fFontFamily.get());
+}
+
+////////////////////////////////////////////////////////////////////////////////
+#include "SkTypeface_win.h"
+
+SK_API SkFontMgr* SkFontMgr_New_DirectWrite(IDWriteFactory* factory,
+ IDWriteFontCollection* collection) {
+ return SkFontMgr_New_DirectWrite(factory, collection, nullptr);
+}
+
+SK_API SkFontMgr* SkFontMgr_New_DirectWrite(IDWriteFactory* factory,
+ IDWriteFontCollection* collection,
+ IDWriteFontFallback* fallback) {
+ if (nullptr == factory) {
+ factory = sk_get_dwrite_factory();
+ if (nullptr == factory) {
+ return nullptr;
+ }
+ }
+
+ SkTScopedComPtr<IDWriteFontCollection> systemFontCollection;
+ if (nullptr == collection) {
+ HRNM(factory->GetSystemFontCollection(&systemFontCollection, FALSE),
+ "Could not get system font collection.");
+ collection = systemFontCollection.get();
+ }
+
+ WCHAR localeNameStorage[LOCALE_NAME_MAX_LENGTH];
+ WCHAR* localeName = nullptr;
+ int localeNameLen = 0;
+
+ // Dynamically load GetUserDefaultLocaleName function, as it is not available on XP.
+ SkGetUserDefaultLocaleNameProc getUserDefaultLocaleNameProc = nullptr;
+ HRESULT hr = SkGetGetUserDefaultLocaleNameProc(&getUserDefaultLocaleNameProc);
+ if (nullptr == getUserDefaultLocaleNameProc) {
+ SK_TRACEHR(hr, "Could not get GetUserDefaultLocaleName.");
+ } else {
+ localeNameLen = getUserDefaultLocaleNameProc(localeNameStorage, LOCALE_NAME_MAX_LENGTH);
+ if (localeNameLen) {
+ localeName = localeNameStorage;
+ };
+ }
+
+ return new SkFontMgr_DirectWrite(factory, collection, fallback, localeName, localeNameLen);
+}
+
+#include "SkFontMgr_indirect.h"
+SK_API SkFontMgr* SkFontMgr_New_DirectWriteRenderer(SkRemotableFontMgr* proxy) {
+ SkAutoTUnref<SkFontMgr> impl(SkFontMgr_New_DirectWrite());
+ if (impl.get() == nullptr) {
+ return nullptr;
+ }
+ return new SkFontMgr_Indirect(impl.get(), proxy);
+}
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_win_dw_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_win_dw_factory.cpp
new file mode 100644
index 000000000..52e22aec5
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_win_dw_factory.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32) // And !SKIA_GDI?
+
+#include "SkFontMgr.h"
+#include "SkTypeface_win.h"
+
+SkFontMgr* SkFontMgr::Factory() {
+ return SkFontMgr_New_DirectWrite();
+}
+
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_win_gdi_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_win_gdi_factory.cpp
new file mode 100644
index 000000000..c1ca822c2
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_win_gdi_factory.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32) // And SKIA_GDI?
+
+#include "SkFontMgr.h"
+#include "SkTypeface_win.h"
+
+SkFontMgr* SkFontMgr::Factory() {
+ return SkFontMgr_New_GDI();
+}
+
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/ports/SkGlobalInitialization_default.cpp b/gfx/skia/skia/src/ports/SkGlobalInitialization_default.cpp
new file mode 100644
index 000000000..c6fad41f9
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkGlobalInitialization_default.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapSourceDeserializer.h"
+#include "SkDashPathEffect.h"
+#include "SkGradientShader.h"
+#include "SkImageSource.h"
+#include "SkLayerRasterizer.h"
+
+// Security note:
+//
+// As new subclasses are added here, they should be reviewed by chrome security before they
+// support deserializing cross-process: chrome-security@google.com. SampleFilterFuzz.cpp should
+// also be amended to exercise the new subclass.
+//
+// See SkReadBuffer::isCrossProcess() and SkPicture::PictureIOSecurityPrecautionsEnabled()
+//
+
+/*
+ * None of these are strictly "required" for Skia to operate.
+ *
+ * These are the bulk of our "effects" -- subclasses of various effects on SkPaint.
+ *
+ * Clients should feel free to dup this file and modify it as needed. This function "InitEffects"
+ * will automatically be called before any of skia's effects are asked to be deserialized.
+ */
+void SkFlattenable::PrivateInitializer::InitEffects() {
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkBitmapSourceDeserializer)
+
+ // Rasterizer
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkLayerRasterizer)
+
+ // Shader
+ SkGradientShader::InitializeFlattenables();
+
+ // PathEffect
+ SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkDashPathEffect)
+
+ // ImageFilter
+ SkImageFilter::InitializeFlattenables();
+}
diff --git a/gfx/skia/skia/src/ports/SkImageEncoder_CG.cpp b/gfx/skia/skia/src/ports/SkImageEncoder_CG.cpp
new file mode 100644
index 000000000..789285626
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageEncoder_CG.cpp
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#include "SkBitmap.h"
+#include "SkCGUtils.h"
+#include "SkColorPriv.h"
+#include "SkData.h"
+#include "SkImageEncoder.h"
+#include "SkStream.h"
+#include "SkStreamPriv.h"
+#include "SkTemplates.h"
+#include "SkUnPreMultiply.h"
+
+#ifdef SK_BUILD_FOR_MAC
+#include <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreGraphics/CoreGraphics.h>
+#include <ImageIO/ImageIO.h>
+#include <MobileCoreServices/MobileCoreServices.h>
+#endif
+
+static size_t consumer_put(void* info, const void* buffer, size_t count) {
+ SkWStream* stream = reinterpret_cast<SkWStream*>(info);
+ return stream->write(buffer, count) ? count : 0;
+}
+
+static void consumer_release(void* info) {
+ // we do nothing, since by design we don't "own" the stream (i.e. info)
+}
+
+static CGDataConsumerRef SkStreamToCGDataConsumer(SkWStream* stream) {
+ CGDataConsumerCallbacks procs;
+ procs.putBytes = consumer_put;
+ procs.releaseConsumer = consumer_release;
+ // we don't own/reference the stream, so it our consumer must not live
+ // longer that our caller's ownership of the stream
+ return CGDataConsumerCreate(stream, &procs);
+}
+
+static CGImageDestinationRef SkStreamToImageDestination(SkWStream* stream,
+ CFStringRef type) {
+ CGDataConsumerRef consumer = SkStreamToCGDataConsumer(stream);
+ if (nullptr == consumer) {
+ return nullptr;
+ }
+ SkAutoTCallVProc<const void, CFRelease> arconsumer(consumer);
+
+ return CGImageDestinationCreateWithDataConsumer(consumer, type, 1, nullptr);
+}
+
+class SkImageEncoder_CG : public SkImageEncoder {
+public:
+ SkImageEncoder_CG(Type t) : fType(t) {}
+
+protected:
+ virtual bool onEncode(SkWStream* stream, const SkBitmap& bm, int quality);
+
+private:
+ Type fType;
+};
+
+/* Encode bitmaps via CGImageDestination. We setup a DataConsumer which writes
+ to our SkWStream. Since we don't reference/own the SkWStream, our consumer
+ must only live for the duration of the onEncode() method.
+ */
+bool SkImageEncoder_CG::onEncode(SkWStream* stream, const SkBitmap& bm,
+ int quality) {
+ // Used for converting a bitmap to 8888.
+ const SkBitmap* bmPtr = &bm;
+ SkBitmap bitmap8888;
+
+ CFStringRef type;
+ switch (fType) {
+ case kICO_Type:
+ type = kUTTypeICO;
+ break;
+ case kBMP_Type:
+ type = kUTTypeBMP;
+ break;
+ case kGIF_Type:
+ type = kUTTypeGIF;
+ break;
+ case kJPEG_Type:
+ type = kUTTypeJPEG;
+ break;
+ case kPNG_Type:
+ // PNG encoding an ARGB_4444 bitmap gives the following errors in GM:
+ // <Error>: CGImageDestinationAddImage image could not be converted to destination
+ // format.
+ // <Error>: CGImageDestinationFinalize image destination does not have enough images
+ // So instead we copy to 8888.
+ if (bm.colorType() == kARGB_4444_SkColorType) {
+ bm.copyTo(&bitmap8888, kN32_SkColorType);
+ bmPtr = &bitmap8888;
+ }
+ type = kUTTypePNG;
+ break;
+ default:
+ return false;
+ }
+
+ CGImageDestinationRef dst = SkStreamToImageDestination(stream, type);
+ if (nullptr == dst) {
+ return false;
+ }
+ SkAutoTCallVProc<const void, CFRelease> ardst(dst);
+
+ CGImageRef image = SkCreateCGImageRef(*bmPtr);
+ if (nullptr == image) {
+ return false;
+ }
+ SkAutoTCallVProc<CGImage, CGImageRelease> agimage(image);
+
+ CGImageDestinationAddImage(dst, image, nullptr);
+ return CGImageDestinationFinalize(dst);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_USE_CG_ENCODER
+static SkImageEncoder* sk_imageencoder_cg_factory(SkImageEncoder::Type t) {
+ switch (t) {
+ case SkImageEncoder::kICO_Type:
+ case SkImageEncoder::kBMP_Type:
+ case SkImageEncoder::kGIF_Type:
+ case SkImageEncoder::kJPEG_Type:
+ case SkImageEncoder::kPNG_Type:
+ break;
+ default:
+ return nullptr;
+ }
+ return new SkImageEncoder_CG(t);
+}
+
+static SkImageEncoder_EncodeReg gEReg(sk_imageencoder_cg_factory);
+#endif
+
+SkImageEncoder* CreateImageEncoder_CG(SkImageEncoder::Type type) {
+ return new SkImageEncoder_CG(type);
+}
+
+#endif//defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
diff --git a/gfx/skia/skia/src/ports/SkImageEncoder_WIC.cpp b/gfx/skia/skia/src/ports/SkImageEncoder_WIC.cpp
new file mode 100644
index 000000000..6524526bd
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageEncoder_WIC.cpp
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+
+#if defined(SK_BUILD_FOR_WIN32)
+
+// Workaround for:
+// http://connect.microsoft.com/VisualStudio/feedback/details/621653/
+// http://crbug.com/225822
+// In VS2010 both intsafe.h and stdint.h define the following without guards.
+// SkTypes brought in windows.h and stdint.h and the following defines are
+// not used by this file. However, they may be re-introduced by wincodec.h.
+#undef INT8_MIN
+#undef INT16_MIN
+#undef INT32_MIN
+#undef INT64_MIN
+#undef INT8_MAX
+#undef UINT8_MAX
+#undef INT16_MAX
+#undef UINT16_MAX
+#undef INT32_MAX
+#undef UINT32_MAX
+#undef INT64_MAX
+#undef UINT64_MAX
+
+#include <wincodec.h>
+#include "SkAutoCoInitialize.h"
+#include "SkBitmap.h"
+#include "SkImageEncoder.h"
+#include "SkIStream.h"
+#include "SkStream.h"
+#include "SkTScopedComPtr.h"
+#include "SkUnPreMultiply.h"
+
+//All Windows SDKs back to XPSP2 export the CLSID_WICImagingFactory symbol.
+//In the Windows8 SDK the CLSID_WICImagingFactory symbol is still exported
+//but CLSID_WICImagingFactory is then #defined to CLSID_WICImagingFactory2.
+//Undo this #define if it has been done so that we link against the symbols
+//we intended to link against on all SDKs.
+#if defined(CLSID_WICImagingFactory)
+#undef CLSID_WICImagingFactory
+#endif
+
+class SkImageEncoder_WIC : public SkImageEncoder {
+public:
+ SkImageEncoder_WIC(Type t) : fType(t) {}
+
+protected:
+ virtual bool onEncode(SkWStream* stream, const SkBitmap& bm, int quality);
+
+private:
+ Type fType;
+};
+
+bool SkImageEncoder_WIC::onEncode(SkWStream* stream
+ , const SkBitmap& bitmapOrig
+ , int quality)
+{
+ GUID type;
+ switch (fType) {
+ case kJPEG_Type:
+ type = GUID_ContainerFormatJpeg;
+ break;
+ case kPNG_Type:
+ type = GUID_ContainerFormatPng;
+ break;
+ default:
+ return false;
+ }
+
+ // First convert to BGRA if necessary.
+ SkBitmap bitmap;
+ if (!bitmapOrig.copyTo(&bitmap, kBGRA_8888_SkColorType)) {
+ return false;
+ }
+
+ // WIC expects unpremultiplied pixels. Unpremultiply if necessary.
+ if (kPremul_SkAlphaType == bitmap.alphaType()) {
+ uint8_t* pixels = reinterpret_cast<uint8_t*>(bitmap.getPixels());
+ for (int y = 0; y < bitmap.height(); ++y) {
+ for (int x = 0; x < bitmap.width(); ++x) {
+ uint8_t* bytes = pixels + y * bitmap.rowBytes() + x * bitmap.bytesPerPixel();
+ SkPMColor* src = reinterpret_cast<SkPMColor*>(bytes);
+ SkColor* dst = reinterpret_cast<SkColor*>(bytes);
+ *dst = SkUnPreMultiply::PMColorToColor(*src);
+ }
+ }
+ }
+
+ // Finally, if we are performing a jpeg encode, we must convert to BGR.
+ void* pixels = bitmap.getPixels();
+ size_t rowBytes = bitmap.rowBytes();
+ SkAutoMalloc pixelStorage;
+ WICPixelFormatGUID formatDesired = GUID_WICPixelFormat32bppBGRA;
+ if (kJPEG_Type == fType) {
+ formatDesired = GUID_WICPixelFormat24bppBGR;
+ rowBytes = SkAlign4(bitmap.width() * 3);
+ pixelStorage.reset(rowBytes * bitmap.height());
+ for (int y = 0; y < bitmap.height(); y++) {
+ uint8_t* dstRow = SkTAddOffset<uint8_t>(pixelStorage.get(), y * rowBytes);
+ for (int x = 0; x < bitmap.width(); x++) {
+ uint32_t bgra = *bitmap.getAddr32(x, y);
+ dstRow[0] = (uint8_t) (bgra >> 0);
+ dstRow[1] = (uint8_t) (bgra >> 8);
+ dstRow[2] = (uint8_t) (bgra >> 16);
+ dstRow += 3;
+ }
+ }
+
+ pixels = pixelStorage.get();
+ }
+
+
+ //Initialize COM.
+ SkAutoCoInitialize scopedCo;
+ if (!scopedCo.succeeded()) {
+ return false;
+ }
+
+ HRESULT hr = S_OK;
+
+ //Create Windows Imaging Component ImagingFactory.
+ SkTScopedComPtr<IWICImagingFactory> piImagingFactory;
+ if (SUCCEEDED(hr)) {
+ hr = CoCreateInstance(
+ CLSID_WICImagingFactory
+ , nullptr
+ , CLSCTX_INPROC_SERVER
+ , IID_PPV_ARGS(&piImagingFactory)
+ );
+ }
+
+ //Convert the SkWStream to an IStream.
+ SkTScopedComPtr<IStream> piStream;
+ if (SUCCEEDED(hr)) {
+ hr = SkWIStream::CreateFromSkWStream(stream, &piStream);
+ }
+
+ //Create an encode of the appropriate type.
+ SkTScopedComPtr<IWICBitmapEncoder> piEncoder;
+ if (SUCCEEDED(hr)) {
+ hr = piImagingFactory->CreateEncoder(type, nullptr, &piEncoder);
+ }
+
+ if (SUCCEEDED(hr)) {
+ hr = piEncoder->Initialize(piStream.get(), WICBitmapEncoderNoCache);
+ }
+
+ //Create a the frame.
+ SkTScopedComPtr<IWICBitmapFrameEncode> piBitmapFrameEncode;
+ SkTScopedComPtr<IPropertyBag2> piPropertybag;
+ if (SUCCEEDED(hr)) {
+ hr = piEncoder->CreateNewFrame(&piBitmapFrameEncode, &piPropertybag);
+ }
+
+ if (SUCCEEDED(hr)) {
+ PROPBAG2 name = { 0 };
+ name.dwType = PROPBAG2_TYPE_DATA;
+ name.vt = VT_R4;
+ name.pstrName = L"ImageQuality";
+
+ VARIANT value;
+ VariantInit(&value);
+ value.vt = VT_R4;
+ value.fltVal = (FLOAT)(quality / 100.0);
+
+ //Ignore result code.
+ // This returns E_FAIL if the named property is not in the bag.
+ //TODO(bungeman) enumerate the properties,
+ // write and set hr iff property exists.
+ piPropertybag->Write(1, &name, &value);
+ }
+ if (SUCCEEDED(hr)) {
+ hr = piBitmapFrameEncode->Initialize(piPropertybag.get());
+ }
+
+ //Set the size of the frame.
+ const UINT width = bitmap.width();
+ const UINT height = bitmap.height();
+ if (SUCCEEDED(hr)) {
+ hr = piBitmapFrameEncode->SetSize(width, height);
+ }
+
+ //Set the pixel format of the frame. If native encoded format cannot match BGRA,
+ //it will choose the closest pixel format that it supports.
+ WICPixelFormatGUID formatGUID = formatDesired;
+ if (SUCCEEDED(hr)) {
+ hr = piBitmapFrameEncode->SetPixelFormat(&formatGUID);
+ }
+ if (SUCCEEDED(hr)) {
+ //Be sure the image format is the one requested.
+ hr = IsEqualGUID(formatGUID, formatDesired) ? S_OK : E_FAIL;
+ }
+
+ //Write the pixels into the frame.
+ if (SUCCEEDED(hr)) {
+ hr = piBitmapFrameEncode->WritePixels(height,
+ (UINT) rowBytes,
+ (UINT) rowBytes * height,
+ reinterpret_cast<BYTE*>(pixels));
+ }
+
+ if (SUCCEEDED(hr)) {
+ hr = piBitmapFrameEncode->Commit();
+ }
+
+ if (SUCCEEDED(hr)) {
+ hr = piEncoder->Commit();
+ }
+
+ return SUCCEEDED(hr);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_USE_WIC_ENCODER
+static SkImageEncoder* sk_imageencoder_wic_factory(SkImageEncoder::Type t) {
+ switch (t) {
+ case SkImageEncoder::kPNG_Type:
+ case SkImageEncoder::kJPEG_Type:
+ break;
+ default:
+ return nullptr;
+ }
+ return new SkImageEncoder_WIC(t);
+}
+
+static SkImageEncoder_EncodeReg gEReg(sk_imageencoder_wic_factory);
+#endif
+
+SkImageEncoder* CreateImageEncoder_WIC(SkImageEncoder::Type type) {
+ return new SkImageEncoder_WIC(type);
+}
+
+#endif // defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/ports/SkImageEncoder_none.cpp b/gfx/skia/skia/src/ports/SkImageEncoder_none.cpp
new file mode 100644
index 000000000..c7d4b9242
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageEncoder_none.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmap.h"
+#include "SkImage.h"
+#include "SkImageEncoder.h"
+#include "SkMovie.h"
+#include "SkPixelSerializer.h"
+#include "SkStream.h"
+
+/////////////////////////////////////////////////////////////////////////
+
+// Empty implementation for SkMovie.
+
+SkMovie* SkMovie::DecodeStream(SkStreamRewindable* stream) {
+ return nullptr;
+}
+
+/////////////////////////////////////////////////////////////////////////
+
+// Empty implementations for SkImageEncoder.
+
+SkImageEncoder* SkImageEncoder::Create(Type t) {
+ return nullptr;
+}
+
+bool SkImageEncoder::EncodeFile(const char file[], const SkBitmap&, Type, int quality) {
+ return false;
+}
+
+bool SkImageEncoder::EncodeStream(SkWStream*, const SkBitmap&, SkImageEncoder::Type, int) {
+ return false;
+}
+
+SkData* SkImageEncoder::EncodeData(const SkBitmap&, Type, int quality) {
+ return nullptr;
+}
+
+SkData* SkImageEncoder::EncodeData(const SkImageInfo&, const void* pixels, size_t rowBytes,
+ Type, int quality) {
+ return nullptr;
+}
+
+SkData* SkImageEncoder::EncodeData(const SkPixmap&, Type, int) {
+ return nullptr;
+}
+
+bool SkImageEncoder::encodeStream(SkWStream*, const SkBitmap&, int) {
+ return false;
+}
+
+SkData* SkImageEncoder::encodeData(const SkBitmap&, int) {
+ return nullptr;
+}
+
+bool SkImageEncoder::encodeFile(const char file[], const SkBitmap& bm, int quality) {
+ return false;
+}
+
+namespace {
+class ImageEncoderPixelSerializer final : public SkPixelSerializer {
+protected:
+ bool onUseEncodedData(const void*, size_t) override { return true; }
+ SkData* onEncode(const SkPixmap&) override { return nullptr; }
+};
+} // namespace
+
+SkPixelSerializer* SkImageEncoder::CreatePixelSerializer() {
+ return new ImageEncoderPixelSerializer;
+}
+
+/////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/skia/src/ports/SkImageGeneratorCG.cpp b/gfx/skia/skia/src/ports/SkImageGeneratorCG.cpp
new file mode 100644
index 000000000..c54eebf9c
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageGeneratorCG.cpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkImageGeneratorCG.h"
+
+#ifdef SK_BUILD_FOR_MAC
+#include <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreGraphics/CoreGraphics.h>
+#include <ImageIO/ImageIO.h>
+#include <MobileCoreServices/MobileCoreServices.h>
+#endif
+
+static CGImageSourceRef data_to_CGImageSrc(SkData* data) {
+ CGDataProviderRef cgData = CGDataProviderCreateWithData(data, data->data(), data->size(),
+ nullptr);
+ if (!cgData) {
+ return nullptr;
+ }
+ CGImageSourceRef imageSrc = CGImageSourceCreateWithDataProvider(cgData, 0);
+ CGDataProviderRelease(cgData);
+ return imageSrc;
+}
+
+SkImageGenerator* SkImageGeneratorCG::NewFromEncodedCG(SkData* data) {
+ CGImageSourceRef imageSrc = data_to_CGImageSrc(data);
+ if (!imageSrc) {
+ return nullptr;
+ }
+
+ // Make sure we call CFRelease to free the imageSrc. Since CFRelease actually takes
+ // a const void*, we must cast the imageSrc to a const void*.
+ SkAutoTCallVProc<const void, CFRelease> autoImageSrc(imageSrc);
+
+ CFDictionaryRef properties = CGImageSourceCopyPropertiesAtIndex(imageSrc, 0, nullptr);
+ if (!properties) {
+ return nullptr;
+ }
+
+ CFNumberRef widthRef = (CFNumberRef) (CFDictionaryGetValue(properties,
+ kCGImagePropertyPixelWidth));
+ CFNumberRef heightRef = (CFNumberRef) (CFDictionaryGetValue(properties,
+ kCGImagePropertyPixelHeight));
+ if (nullptr == widthRef || nullptr == heightRef) {
+ return nullptr;
+ }
+ bool hasAlpha = (bool) (CFDictionaryGetValue(properties,
+ kCGImagePropertyHasAlpha));
+
+ int width, height;
+ if (!CFNumberGetValue(widthRef, kCFNumberIntType, &width) ||
+ !CFNumberGetValue(heightRef, kCFNumberIntType, &height)) {
+ return nullptr;
+ }
+
+ SkAlphaType alphaType = hasAlpha ? kPremul_SkAlphaType : kOpaque_SkAlphaType;
+ SkImageInfo info = SkImageInfo::Make(width, height, kN32_SkColorType, alphaType);
+
+ // FIXME: We have the opportunity to extract color space information here,
+ // though I think it makes sense to wait until we understand how
+ // we want to communicate it to the generator.
+
+ return new SkImageGeneratorCG(info, autoImageSrc.release(), data);
+}
+
+SkImageGeneratorCG::SkImageGeneratorCG(const SkImageInfo& info, const void* imageSrc, SkData* data)
+ : INHERITED(info)
+ , fImageSrc(imageSrc)
+ , fData(SkRef(data))
+{}
+
+SkData* SkImageGeneratorCG::onRefEncodedData(SK_REFENCODEDDATA_CTXPARAM) {
+ return SkRef(fData.get());
+}
+
+bool SkImageGeneratorCG::onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ SkPMColor ctable[], int* ctableCount) {
+ if (kN32_SkColorType != info.colorType()) {
+ // FIXME: Support other colorTypes.
+ return false;
+ }
+
+ switch (info.alphaType()) {
+ case kOpaque_SkAlphaType:
+ if (kOpaque_SkAlphaType != this->getInfo().alphaType()) {
+ return false;
+ }
+ break;
+ case kPremul_SkAlphaType:
+ break;
+ default:
+ return false;
+ }
+
+ CGImageRef image = CGImageSourceCreateImageAtIndex((CGImageSourceRef) fImageSrc.get(), 0,
+ nullptr);
+ if (!image) {
+ return false;
+ }
+ SkAutoTCallVProc<CGImage, CGImageRelease> autoImage(image);
+
+ // FIXME: Using this function (as opposed to swizzling ourselves) greatly
+ // restricts the color and alpha types that we support. If we
+ // swizzle ourselves, we can add support for:
+ // kUnpremul_SkAlphaType
+ // 16-bit per component RGBA
+ // kGray_8_SkColorType
+ // kIndex_8_SkColorType
+ // Additionally, it would be interesting to compare the performance
+ // of SkSwizzler with CG's built in swizzler.
+ if (!SkCopyPixelsFromCGImage(info, rowBytes, pixels, image)) {
+ return false;
+ }
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/ports/SkImageGeneratorCG.h b/gfx/skia/skia/src/ports/SkImageGeneratorCG.h
new file mode 100644
index 000000000..baf3669ff
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageGeneratorCG.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#include "SkCGUtils.h"
+#include "SkData.h"
+#include "SkImageGenerator.h"
+#include "SkTemplates.h"
+
+class SkImageGeneratorCG : public SkImageGenerator {
+public:
+ /*
+ * Refs the data if an image generator can be returned. Otherwise does
+ * not affect the data.
+ */
+ static SkImageGenerator* NewFromEncodedCG(SkData* data);
+
+protected:
+ SkData* onRefEncodedData(SK_REFENCODEDDATA_CTXPARAM) override;
+
+ bool onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes, SkPMColor ctable[],
+ int* ctableCount) override;
+
+private:
+ /*
+ * Takes ownership of the imageSrc
+ * Refs the data
+ */
+ SkImageGeneratorCG(const SkImageInfo& info, const void* imageSrc, SkData* data);
+
+ SkAutoTCallVProc<const void, CFRelease> fImageSrc;
+ sk_sp<SkData> fData;
+
+ typedef SkImageGenerator INHERITED;
+};
+
+#endif //defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
diff --git a/gfx/skia/skia/src/ports/SkImageGeneratorWIC.cpp b/gfx/skia/skia/src/ports/SkImageGeneratorWIC.cpp
new file mode 100644
index 000000000..52d1377f2
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageGeneratorWIC.cpp
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkImageGeneratorWIC.h"
+#include "SkIStream.h"
+#include "SkStream.h"
+
+// All Windows SDKs back to XPSP2 export the CLSID_WICImagingFactory symbol.
+// In the Windows8 SDK the CLSID_WICImagingFactory symbol is still exported
+// but CLSID_WICImagingFactory is then #defined to CLSID_WICImagingFactory2.
+// Undo this #define if it has been done so that we link against the symbols
+// we intended to link against on all SDKs.
+#if defined(CLSID_WICImagingFactory)
+ #undef CLSID_WICImagingFactory
+#endif
+
+SkImageGenerator* SkImageGeneratorWIC::NewFromEncodedWIC(SkData* data) {
+ // Create Windows Imaging Component ImagingFactory.
+ SkTScopedComPtr<IWICImagingFactory> imagingFactory;
+ HRESULT hr = CoCreateInstance(CLSID_WICImagingFactory, nullptr, CLSCTX_INPROC_SERVER,
+ IID_PPV_ARGS(&imagingFactory));
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Create an IStream.
+ SkTScopedComPtr<IStream> iStream;
+ // Note that iStream will take ownership of the new memory stream because
+ // we set |deleteOnRelease| to true.
+ hr = SkIStream::CreateFromSkStream(new SkMemoryStream(sk_ref_sp(data)), true, &iStream);
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Create the decoder from the stream.
+ SkTScopedComPtr<IWICBitmapDecoder> decoder;
+ hr = imagingFactory->CreateDecoderFromStream(iStream.get(), nullptr,
+ WICDecodeMetadataCacheOnDemand, &decoder);
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Select the first frame from the decoder.
+ SkTScopedComPtr<IWICBitmapFrameDecode> imageFrame;
+ hr = decoder->GetFrame(0, &imageFrame);
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Treat the frame as an image source.
+ SkTScopedComPtr<IWICBitmapSource> imageSource;
+ hr = imageFrame->QueryInterface(IID_PPV_ARGS(&imageSource));
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Get the size of the image.
+ UINT width;
+ UINT height;
+ hr = imageSource->GetSize(&width, &height);
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Get the encoded pixel format.
+ WICPixelFormatGUID format;
+ hr = imageSource->GetPixelFormat(&format);
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Recommend kOpaque if the image is opaque and kPremul otherwise.
+ // FIXME: We are stuck recommending kPremul for all indexed formats
+ // (Ex: GUID_WICPixelFormat8bppIndexed) because we don't have
+ // a way to check if the image has alpha.
+ SkAlphaType alphaType = kPremul_SkAlphaType;
+
+ if (GUID_WICPixelFormat16bppBGR555 == format ||
+ GUID_WICPixelFormat16bppBGR565 == format ||
+ GUID_WICPixelFormat32bppBGR101010 == format ||
+ GUID_WICPixelFormatBlackWhite == format ||
+ GUID_WICPixelFormat2bppGray == format ||
+ GUID_WICPixelFormat4bppGray == format ||
+ GUID_WICPixelFormat8bppGray == format ||
+ GUID_WICPixelFormat16bppGray == format ||
+ GUID_WICPixelFormat16bppGrayFixedPoint == format ||
+ GUID_WICPixelFormat16bppGrayHalf == format ||
+ GUID_WICPixelFormat32bppGrayFloat == format ||
+ GUID_WICPixelFormat32bppGrayFixedPoint == format ||
+ GUID_WICPixelFormat32bppRGBE == format ||
+ GUID_WICPixelFormat24bppRGB == format ||
+ GUID_WICPixelFormat24bppBGR == format ||
+ GUID_WICPixelFormat32bppBGR == format ||
+ GUID_WICPixelFormat48bppRGB == format ||
+ GUID_WICPixelFormat48bppBGR == format ||
+ GUID_WICPixelFormat48bppRGBFixedPoint == format ||
+ GUID_WICPixelFormat48bppBGRFixedPoint == format ||
+ GUID_WICPixelFormat48bppRGBHalf == format ||
+ GUID_WICPixelFormat64bppRGBFixedPoint == format ||
+ GUID_WICPixelFormat64bppRGBHalf == format ||
+ GUID_WICPixelFormat96bppRGBFixedPoint == format ||
+ GUID_WICPixelFormat128bppRGBFloat == format ||
+ GUID_WICPixelFormat128bppRGBFixedPoint == format ||
+ GUID_WICPixelFormat32bppRGB == format ||
+ GUID_WICPixelFormat64bppRGB == format ||
+ GUID_WICPixelFormat96bppRGBFloat == format ||
+ GUID_WICPixelFormat32bppCMYK == format ||
+ GUID_WICPixelFormat64bppCMYK == format ||
+ GUID_WICPixelFormat8bppY == format ||
+ GUID_WICPixelFormat8bppCb == format ||
+ GUID_WICPixelFormat8bppCr == format ||
+ GUID_WICPixelFormat16bppCbCr == format)
+ {
+ alphaType = kOpaque_SkAlphaType;
+ }
+
+ // FIXME: If we change the implementation to handle swizzling ourselves,
+ // we can support more output formats.
+ SkImageInfo info = SkImageInfo::MakeN32(width, height, alphaType);
+ return new SkImageGeneratorWIC(info, imagingFactory.release(), imageSource.release(), data);
+}
+
+SkImageGeneratorWIC::SkImageGeneratorWIC(const SkImageInfo& info,
+ IWICImagingFactory* imagingFactory, IWICBitmapSource* imageSource, SkData* data)
+ : INHERITED(info)
+ , fImagingFactory(imagingFactory)
+ , fImageSource(imageSource)
+ , fData(SkRef(data))
+{}
+
+SkData* SkImageGeneratorWIC::onRefEncodedData(SK_REFENCODEDDATA_CTXPARAM) {
+ return SkRef(fData.get());
+}
+
+bool SkImageGeneratorWIC::onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ SkPMColor ctable[], int* ctableCount) {
+ if (kN32_SkColorType != info.colorType()) {
+ return false;
+ }
+
+ // Create a format converter.
+ SkTScopedComPtr<IWICFormatConverter> formatConverter;
+ HRESULT hr = fImagingFactory->CreateFormatConverter(&formatConverter);
+ if (FAILED(hr)) {
+ return false;
+ }
+
+ GUID format = GUID_WICPixelFormat32bppPBGRA;
+ if (kUnpremul_SkAlphaType == info.alphaType()) {
+ format = GUID_WICPixelFormat32bppBGRA;
+ }
+
+ hr = formatConverter->Initialize(fImageSource.get(), format, WICBitmapDitherTypeNone, nullptr,
+ 0.0, WICBitmapPaletteTypeCustom);
+ if (FAILED(hr)) {
+ return false;
+ }
+
+ // Treat the format converter as an image source.
+ SkTScopedComPtr<IWICBitmapSource> formatConverterSrc;
+ hr = formatConverter->QueryInterface(IID_PPV_ARGS(&formatConverterSrc));
+ if (FAILED(hr)) {
+ return false;
+ }
+
+ // Set the destination pixels.
+ hr = formatConverterSrc->CopyPixels(nullptr, (UINT) rowBytes, (UINT) rowBytes * info.height(),
+ (BYTE*) pixels);
+
+ return SUCCEEDED(hr);
+}
diff --git a/gfx/skia/skia/src/ports/SkImageGeneratorWIC.h b/gfx/skia/skia/src/ports/SkImageGeneratorWIC.h
new file mode 100644
index 000000000..76cb6df1a
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageGeneratorWIC.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "SkData.h"
+#include "SkImageGenerator.h"
+#include "SkTemplates.h"
+#include "SkTScopedComPtr.h"
+
+#include <wincodec.h>
+
+/*
+ * Any Windows program that uses COM must initialize the COM library by calling
+ * the CoInitializeEx function. In addition, each thread that uses a COM
+ * interface must make a separate call to this function.
+ *
+ * For every successful call to CoInitializeEx, the thread must call
+ * CoUninitialize before it exits.
+ *
+ * SkImageGeneratorWIC requires the COM library and leaves it to the client to
+ * initialize COM for their application.
+ *
+ * For more information on initializing COM, please see:
+ * https://msdn.microsoft.com/en-us/library/windows/desktop/ff485844.aspx
+ */
+class SkImageGeneratorWIC : public SkImageGenerator {
+public:
+ /*
+ * Refs the data if an image generator can be returned. Otherwise does
+ * not affect the data.
+ */
+ static SkImageGenerator* NewFromEncodedWIC(SkData* data);
+
+protected:
+ SkData* onRefEncodedData(SK_REFENCODEDDATA_CTXPARAM) override;
+
+ bool onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes, SkPMColor ctable[],
+ int* ctableCount) override;
+
+private:
+ /*
+ * Takes ownership of the imagingFactory
+ * Takes ownership of the imageSource
+ * Refs the data
+ */
+ SkImageGeneratorWIC(const SkImageInfo& info, IWICImagingFactory* imagingFactory,
+ IWICBitmapSource* imageSource, SkData* data);
+
+ SkTScopedComPtr<IWICImagingFactory> fImagingFactory;
+ SkTScopedComPtr<IWICBitmapSource> fImageSource;
+ sk_sp<SkData> fData;
+
+ typedef SkImageGenerator INHERITED;
+};
+
+#endif // SK_BUILD_FOR_WIN
diff --git a/gfx/skia/skia/src/ports/SkImageGenerator_none.cpp b/gfx/skia/skia/src/ports/SkImageGenerator_none.cpp
new file mode 100644
index 000000000..2dce1c2dc
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageGenerator_none.cpp
@@ -0,0 +1,12 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkImageGenerator.h"
+
+SkImageGenerator* SkImageGenerator::NewFromEncodedImpl(SkData*) {
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/ports/SkImageGenerator_skia.cpp b/gfx/skia/skia/src/ports/SkImageGenerator_skia.cpp
new file mode 100644
index 000000000..b6ddee935
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageGenerator_skia.cpp
@@ -0,0 +1,13 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkData.h"
+#include "SkCodecImageGenerator.h"
+
+SkImageGenerator* SkImageGenerator::NewFromEncodedImpl(SkData* data) {
+ return SkCodecImageGenerator::NewFromEncodedCodec(data);
+}
diff --git a/gfx/skia/skia/src/ports/SkMemory_malloc.cpp b/gfx/skia/skia/src/ports/SkMemory_malloc.cpp
new file mode 100644
index 000000000..f06dc35ea
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkMemory_malloc.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+
+#include <stdlib.h>
+
+#define SK_DEBUGFAILF(fmt, ...) \
+ SkASSERT((SkDebugf(fmt"\n", __VA_ARGS__), false))
+
+static inline void sk_out_of_memory(size_t size) {
+ SK_DEBUGFAILF("sk_out_of_memory (asked for " SK_SIZE_T_SPECIFIER " bytes)",
+ size);
+ abort();
+}
+
+static inline void* throw_on_failure(size_t size, void* p) {
+ if (size > 0 && p == nullptr) {
+ // If we've got a nullptr here, the only reason we should have failed is running out of RAM.
+ sk_out_of_memory(size);
+ }
+ return p;
+}
+
+void sk_abort_no_print() {
+#if defined(SK_BUILD_FOR_WIN) && defined(SK_IS_BOT)
+ // do not display a system dialog before aborting the process
+ _set_abort_behavior(0, _WRITE_ABORT_MSG);
+#endif
+#if defined(SK_DEBUG) && defined(SK_BUILD_FOR_WIN)
+ __debugbreak();
+#else
+ abort();
+#endif
+}
+
+void sk_out_of_memory(void) {
+ SkDEBUGFAIL("sk_out_of_memory");
+ abort();
+}
+
+void* sk_malloc_throw(size_t size) {
+ return sk_malloc_flags(size, SK_MALLOC_THROW);
+}
+
+void* sk_realloc_throw(void* addr, size_t size) {
+ return throw_on_failure(size, realloc(addr, size));
+}
+
+void sk_free(void* p) {
+ if (p) {
+ free(p);
+ }
+}
+
+void* sk_malloc_flags(size_t size, unsigned flags) {
+ void* p = malloc(size);
+ if (flags & SK_MALLOC_THROW) {
+ return throw_on_failure(size, p);
+ } else {
+ return p;
+ }
+}
+
+void* sk_calloc(size_t size) {
+ return calloc(size, 1);
+}
+
+void* sk_calloc_throw(size_t size) {
+ return throw_on_failure(size, sk_calloc(size));
+}
diff --git a/gfx/skia/skia/src/ports/SkMemory_mozalloc.cpp b/gfx/skia/skia/src/ports/SkMemory_mozalloc.cpp
new file mode 100644
index 000000000..f29881d96
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkMemory_mozalloc.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2011 Google Inc.
+ * Copyright 2012 Mozilla Foundation
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+
+#include "mozilla/mozalloc.h"
+#include "mozilla/mozalloc_abort.h"
+#include "mozilla/mozalloc_oom.h"
+#include "prenv.h"
+
+void sk_abort_no_print() {
+#ifdef SK_DEBUG
+ const char* env = PR_GetEnv("MOZ_SKIA_DISABLE_ASSERTS");
+ if (env && *env != '0') {
+ return;
+ }
+#endif
+ mozalloc_abort("Abort from sk_abort");
+}
+
+void sk_out_of_memory(void) {
+ SkDEBUGFAIL("sk_out_of_memory");
+ mozalloc_handle_oom(0);
+}
+
+void* sk_malloc_throw(size_t size) {
+ return sk_malloc_flags(size, SK_MALLOC_THROW);
+}
+
+void* sk_realloc_throw(void* addr, size_t size) {
+ return moz_xrealloc(addr, size);
+}
+
+void sk_free(void* p) {
+ free(p);
+}
+
+void* sk_malloc_flags(size_t size, unsigned flags) {
+ return (flags & SK_MALLOC_THROW) ? moz_xmalloc(size) : malloc(size);
+}
+
+void* sk_calloc(size_t size) {
+ return calloc(size, 1);
+}
+
+void* sk_calloc_throw(size_t size) {
+ return moz_xcalloc(size, 1);
+}
diff --git a/gfx/skia/skia/src/ports/SkOSFile_posix.cpp b/gfx/skia/skia/src/ports/SkOSFile_posix.cpp
new file mode 100644
index 000000000..396de68bb
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkOSFile_posix.cpp
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkOSFile.h"
+#include "SkString.h"
+#include "SkTFitsIn.h"
+#include "SkTemplates.h"
+#include "SkTypes.h"
+
+#include <dirent.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+bool sk_exists(const char *path, SkFILE_Flags flags) {
+ int mode = F_OK;
+ if (flags & kRead_SkFILE_Flag) {
+ mode |= R_OK;
+ }
+ if (flags & kWrite_SkFILE_Flag) {
+ mode |= W_OK;
+ }
+ return (0 == access(path, mode));
+}
+
+typedef struct {
+ dev_t dev;
+ ino_t ino;
+} SkFILEID;
+
+static bool sk_ino(FILE* a, SkFILEID* id) {
+ int fd = fileno(a);
+ if (fd < 0) {
+ return 0;
+ }
+ struct stat status;
+ if (0 != fstat(fd, &status)) {
+ return 0;
+ }
+ id->dev = status.st_dev;
+ id->ino = status.st_ino;
+ return true;
+}
+
+bool sk_fidentical(FILE* a, FILE* b) {
+ SkFILEID aID, bID;
+ return sk_ino(a, &aID) && sk_ino(b, &bID)
+ && aID.ino == bID.ino
+ && aID.dev == bID.dev;
+}
+
+void sk_fmunmap(const void* addr, size_t length) {
+ munmap(const_cast<void*>(addr), length);
+}
+
+void* sk_fdmmap(int fd, size_t* size) {
+ struct stat status;
+ if (0 != fstat(fd, &status)) {
+ return nullptr;
+ }
+ if (!S_ISREG(status.st_mode)) {
+ return nullptr;
+ }
+ if (!SkTFitsIn<size_t>(status.st_size)) {
+ return nullptr;
+ }
+ size_t fileSize = static_cast<size_t>(status.st_size);
+
+ void* addr = mmap(nullptr, fileSize, PROT_READ, MAP_PRIVATE, fd, 0);
+ if (MAP_FAILED == addr) {
+ return nullptr;
+ }
+
+ *size = fileSize;
+ return addr;
+}
+
+int sk_fileno(FILE* f) {
+ return fileno(f);
+}
+
+void* sk_fmmap(FILE* f, size_t* size) {
+ int fd = sk_fileno(f);
+ if (fd < 0) {
+ return nullptr;
+ }
+
+ return sk_fdmmap(fd, size);
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+struct SkOSFileIterData {
+ SkOSFileIterData() : fDIR(0) { }
+ DIR* fDIR;
+ SkString fPath, fSuffix;
+};
+static_assert(sizeof(SkOSFileIterData) <= SkOSFile::Iter::kStorageSize, "not_enough_space");
+
+SkOSFile::Iter::Iter() { new (fSelf.get()) SkOSFileIterData; }
+
+SkOSFile::Iter::Iter(const char path[], const char suffix[]) {
+ new (fSelf.get()) SkOSFileIterData;
+ this->reset(path, suffix);
+}
+
+SkOSFile::Iter::~Iter() {
+ SkOSFileIterData& self = *static_cast<SkOSFileIterData*>(fSelf.get());
+ if (self.fDIR) {
+ ::closedir(self.fDIR);
+ }
+ self.~SkOSFileIterData();
+}
+
+void SkOSFile::Iter::reset(const char path[], const char suffix[]) {
+ SkOSFileIterData& self = *static_cast<SkOSFileIterData*>(fSelf.get());
+ if (self.fDIR) {
+ ::closedir(self.fDIR);
+ self.fDIR = 0;
+ }
+
+ self.fPath.set(path);
+ if (path) {
+ self.fDIR = ::opendir(path);
+ self.fSuffix.set(suffix);
+ } else {
+ self.fSuffix.reset();
+ }
+}
+
+// returns true if suffix is empty, or if str ends with suffix
+static bool issuffixfor(const SkString& suffix, const char str[]) {
+ size_t suffixLen = suffix.size();
+ size_t strLen = strlen(str);
+
+ return strLen >= suffixLen &&
+ memcmp(suffix.c_str(), str + strLen - suffixLen, suffixLen) == 0;
+}
+
+bool SkOSFile::Iter::next(SkString* name, bool getDir) {
+ SkOSFileIterData& self = *static_cast<SkOSFileIterData*>(fSelf.get());
+ if (self.fDIR) {
+ dirent* entry;
+
+ while ((entry = ::readdir(self.fDIR)) != nullptr) {
+ struct stat s;
+ SkString str(self.fPath);
+
+ if (!str.endsWith("/") && !str.endsWith("\\")) {
+ str.append("/");
+ }
+ str.append(entry->d_name);
+
+ if (0 == stat(str.c_str(), &s)) {
+ if (getDir) {
+ if (s.st_mode & S_IFDIR) {
+ break;
+ }
+ } else {
+ if (!(s.st_mode & S_IFDIR) && issuffixfor(self.fSuffix, entry->d_name)) {
+ break;
+ }
+ }
+ }
+ }
+ if (entry) { // we broke out with a file
+ if (name) {
+ name->set(entry->d_name);
+ }
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/ports/SkOSFile_stdio.cpp b/gfx/skia/skia/src/ports/SkOSFile_stdio.cpp
new file mode 100644
index 000000000..1c4bd4bab
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkOSFile_stdio.cpp
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkOSFile.h"
+#include "SkTypes.h"
+
+#include <errno.h>
+#include <stdio.h>
+#include <sys/stat.h>
+
+#ifdef SK_BUILD_FOR_UNIX
+#include <unistd.h>
+#endif
+
+#ifdef _WIN32
+#include <direct.h>
+#include <io.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#import <CoreFoundation/CoreFoundation.h>
+
+static FILE* ios_open_from_bundle(const char path[], const char* perm) {
+ // Get a reference to the main bundle
+ CFBundleRef mainBundle = CFBundleGetMainBundle();
+
+ // Get a reference to the file's URL
+ CFStringRef pathRef = CFStringCreateWithCString(NULL, path, kCFStringEncodingUTF8);
+ CFURLRef imageURL = CFBundleCopyResourceURL(mainBundle, pathRef, NULL, NULL);
+ CFRelease(pathRef);
+ if (!imageURL) {
+ return nullptr;
+ }
+
+ // Convert the URL reference into a string reference
+ CFStringRef imagePath = CFURLCopyFileSystemPath(imageURL, kCFURLPOSIXPathStyle);
+ CFRelease(imageURL);
+
+ // Get the system encoding method
+ CFStringEncoding encodingMethod = CFStringGetSystemEncoding();
+
+ // Convert the string reference into a C string
+ const char *finalPath = CFStringGetCStringPtr(imagePath, encodingMethod);
+ FILE* fileHandle = fopen(finalPath, perm);
+ CFRelease(imagePath);
+ return fileHandle;
+}
+#endif
+
+
+FILE* sk_fopen(const char path[], SkFILE_Flags flags) {
+ char perm[4];
+ char* p = perm;
+
+ if (flags & kRead_SkFILE_Flag) {
+ *p++ = 'r';
+ }
+ if (flags & kWrite_SkFILE_Flag) {
+ *p++ = 'w';
+ }
+ *p++ = 'b';
+ *p = 0;
+
+ //TODO: on Windows fopen is just ASCII or the current code page,
+ //convert to utf16 and use _wfopen
+ FILE* file = nullptr;
+#ifdef SK_BUILD_FOR_IOS
+ // if read-only, try to open from bundle first
+ if (kRead_SkFILE_Flag == flags) {
+ file = ios_open_from_bundle(path, perm);
+ }
+ // otherwise just read from the Documents directory (default)
+ if (!file) {
+#endif
+ file = fopen(path, perm);
+#ifdef SK_BUILD_FOR_IOS
+ }
+#endif
+ if (nullptr == file && (flags & kWrite_SkFILE_Flag)) {
+ SkDEBUGF(("sk_fopen: fopen(\"%s\", \"%s\") returned NULL (errno:%d): %s\n",
+ path, perm, errno, strerror(errno)));
+ }
+ return file;
+}
+
+char* sk_fgets(char* str, int size, FILE* f) {
+ return fgets(str, size, (FILE *)f);
+}
+
+int sk_feof(FILE *f) {
+ // no :: namespace qualifier because it breaks android
+ return feof((FILE *)f);
+}
+
+size_t sk_fgetsize(FILE* f) {
+ SkASSERT(f);
+
+ long curr = ftell(f); // remember where we are
+ if (curr < 0) {
+ return 0;
+ }
+
+ fseek(f, 0, SEEK_END); // go to the end
+ long size = ftell(f); // record the size
+ if (size < 0) {
+ size = 0;
+ }
+
+ fseek(f, curr, SEEK_SET); // go back to our prev location
+ return size;
+}
+
+bool sk_frewind(FILE* f) {
+ SkASSERT(f);
+ ::rewind(f);
+ return true;
+}
+
+size_t sk_fread(void* buffer, size_t byteCount, FILE* f) {
+ SkASSERT(f);
+ if (buffer == nullptr) {
+ size_t curr = ftell(f);
+ if ((long)curr == -1) {
+ SkDEBUGF(("sk_fread: ftell(%p) returned -1 feof:%d ferror:%d\n", f, feof(f), ferror(f)));
+ return 0;
+ }
+ int err = fseek(f, (long)byteCount, SEEK_CUR);
+ if (err != 0) {
+ SkDEBUGF(("sk_fread: fseek(%d) tell:%d failed with feof:%d ferror:%d returned:%d\n",
+ byteCount, curr, feof(f), ferror(f), err));
+ return 0;
+ }
+ return byteCount;
+ }
+ else
+ return fread(buffer, 1, byteCount, f);
+}
+
+size_t sk_fwrite(const void* buffer, size_t byteCount, FILE* f) {
+ SkASSERT(f);
+ return fwrite(buffer, 1, byteCount, f);
+}
+
+void sk_fflush(FILE* f) {
+ SkASSERT(f);
+ fflush(f);
+}
+
+void sk_fsync(FILE* f) {
+#if !defined(_WIN32) && !defined(SK_BUILD_FOR_ANDROID) && !defined(__UCLIBC__) \
+ && !defined(_NEWLIB_VERSION)
+ int fd = fileno(f);
+ fsync(fd);
+#endif
+}
+
+bool sk_fseek(FILE* f, size_t byteCount) {
+ int err = fseek(f, (long)byteCount, SEEK_SET);
+ return err == 0;
+}
+
+bool sk_fmove(FILE* f, long byteCount) {
+ int err = fseek(f, byteCount, SEEK_CUR);
+ return err == 0;
+}
+
+size_t sk_ftell(FILE* f) {
+ long curr = ftell(f);
+ if (curr < 0) {
+ return 0;
+ }
+ return curr;
+}
+
+void sk_fclose(FILE* f) {
+ SkASSERT(f);
+ fclose(f);
+}
+
+bool sk_isdir(const char *path) {
+ struct stat status;
+ if (0 != stat(path, &status)) {
+ return false;
+ }
+ return SkToBool(status.st_mode & S_IFDIR);
+}
+
+bool sk_mkdir(const char* path) {
+ if (sk_isdir(path)) {
+ return true;
+ }
+ if (sk_exists(path)) {
+ fprintf(stderr,
+ "sk_mkdir: path '%s' already exists but is not a directory\n",
+ path);
+ return false;
+ }
+
+ int retval;
+#ifdef _WIN32
+ retval = _mkdir(path);
+#else
+ retval = mkdir(path, 0777);
+#endif
+ if (0 == retval) {
+ return true;
+ } else {
+ fprintf(stderr, "sk_mkdir: error %d creating dir '%s'\n", errno, path);
+ return false;
+ }
+}
diff --git a/gfx/skia/skia/src/ports/SkOSFile_win.cpp b/gfx/skia/skia/src/ports/SkOSFile_win.cpp
new file mode 100644
index 000000000..cf46cea98
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkOSFile_win.cpp
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32)
+
+#include "SkLeanWindows.h"
+#include "SkOSFile.h"
+#include "SkTFitsIn.h"
+
+#include <io.h>
+#include <stdio.h>
+#include <sys/stat.h>
+
+bool sk_exists(const char *path, SkFILE_Flags flags) {
+ int mode = 0; // existence
+ if (flags & kRead_SkFILE_Flag) {
+ mode |= 4; // read
+ }
+ if (flags & kWrite_SkFILE_Flag) {
+ mode |= 2; // write
+ }
+ return (0 == _access(path, mode));
+}
+
+typedef struct {
+ ULONGLONG fVolume;
+ ULONGLONG fLsbSize;
+ ULONGLONG fMsbSize;
+} SkFILEID;
+
+static bool sk_ino(FILE* f, SkFILEID* id) {
+ int fileno = _fileno((FILE*)f);
+ if (fileno < 0) {
+ return false;
+ }
+
+ HANDLE file = (HANDLE)_get_osfhandle(fileno);
+ if (INVALID_HANDLE_VALUE == file) {
+ return false;
+ }
+
+ //TODO: call GetFileInformationByHandleEx on Vista and later with FileIdInfo.
+ BY_HANDLE_FILE_INFORMATION info;
+ if (0 == GetFileInformationByHandle(file, &info)) {
+ return false;
+ }
+ id->fVolume = info.dwVolumeSerialNumber;
+ id->fLsbSize = info.nFileIndexLow + (((ULONGLONG)info.nFileIndexHigh) << 32);
+ id->fMsbSize = 0;
+
+ return true;
+}
+
+bool sk_fidentical(FILE* a, FILE* b) {
+ SkFILEID aID, bID;
+ return sk_ino(a, &aID) && sk_ino(b, &bID)
+ && aID.fLsbSize == bID.fLsbSize
+ && aID.fMsbSize == bID.fMsbSize
+ && aID.fVolume == bID.fVolume;
+}
+
+class SkAutoNullKernelHandle : SkNoncopyable {
+public:
+ SkAutoNullKernelHandle(const HANDLE handle) : fHandle(handle) { }
+ ~SkAutoNullKernelHandle() { CloseHandle(fHandle); }
+ operator HANDLE() const { return fHandle; }
+ bool isValid() const { return SkToBool(fHandle); }
+private:
+ HANDLE fHandle;
+};
+typedef SkAutoNullKernelHandle SkAutoWinMMap;
+
+void sk_fmunmap(const void* addr, size_t) {
+ UnmapViewOfFile(addr);
+}
+
+void* sk_fdmmap(int fileno, size_t* length) {
+ HANDLE file = (HANDLE)_get_osfhandle(fileno);
+ if (INVALID_HANDLE_VALUE == file) {
+ return nullptr;
+ }
+
+ LARGE_INTEGER fileSize;
+ if (0 == GetFileSizeEx(file, &fileSize)) {
+ //TODO: use SK_TRACEHR(GetLastError(), "Could not get file size.") to report.
+ return nullptr;
+ }
+ if (!SkTFitsIn<size_t>(fileSize.QuadPart)) {
+ return nullptr;
+ }
+
+ SkAutoWinMMap mmap(CreateFileMapping(file, nullptr, PAGE_READONLY, 0, 0, nullptr));
+ if (!mmap.isValid()) {
+ //TODO: use SK_TRACEHR(GetLastError(), "Could not create file mapping.") to report.
+ return nullptr;
+ }
+
+ // Eventually call UnmapViewOfFile
+ void* addr = MapViewOfFile(mmap, FILE_MAP_READ, 0, 0, 0);
+ if (nullptr == addr) {
+ //TODO: use SK_TRACEHR(GetLastError(), "Could not map view of file.") to report.
+ return nullptr;
+ }
+
+ *length = static_cast<size_t>(fileSize.QuadPart);
+ return addr;
+}
+
+int sk_fileno(FILE* f) {
+ return _fileno((FILE*)f);
+}
+
+void* sk_fmmap(FILE* f, size_t* length) {
+ int fileno = sk_fileno(f);
+ if (fileno < 0) {
+ return nullptr;
+ }
+
+ return sk_fdmmap(fileno, length);
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+struct SkOSFileIterData {
+ SkOSFileIterData() : fHandle(0), fPath16(nullptr) { }
+ HANDLE fHandle;
+ uint16_t* fPath16;
+};
+static_assert(sizeof(SkOSFileIterData) <= SkOSFile::Iter::kStorageSize, "not_enough_space");
+
+static uint16_t* concat_to_16(const char src[], const char suffix[]) {
+ size_t i, len = strlen(src);
+ size_t len2 = 3 + (suffix ? strlen(suffix) : 0);
+ uint16_t* dst = (uint16_t*)sk_malloc_throw((len + len2) * sizeof(uint16_t));
+
+ for (i = 0; i < len; i++) {
+ dst[i] = src[i];
+ }
+
+ if (i > 0 && dst[i-1] != '/') {
+ dst[i++] = '/';
+ }
+ dst[i++] = '*';
+
+ if (suffix) {
+ while (*suffix) {
+ dst[i++] = *suffix++;
+ }
+ }
+ dst[i] = 0;
+ SkASSERT(i + 1 <= len + len2);
+
+ return dst;
+}
+
+SkOSFile::Iter::Iter() { new (fSelf.get()) SkOSFileIterData; }
+
+SkOSFile::Iter::Iter(const char path[], const char suffix[]) {
+ new (fSelf.get()) SkOSFileIterData;
+ this->reset(path, suffix);
+}
+
+SkOSFile::Iter::~Iter() {
+ SkOSFileIterData& self = *static_cast<SkOSFileIterData*>(fSelf.get());
+ sk_free(self.fPath16);
+ if (self.fHandle) {
+ ::FindClose(self.fHandle);
+ }
+ self.~SkOSFileIterData();
+}
+
+void SkOSFile::Iter::reset(const char path[], const char suffix[]) {
+ SkOSFileIterData& self = *static_cast<SkOSFileIterData*>(fSelf.get());
+ if (self.fHandle) {
+ ::FindClose(self.fHandle);
+ self.fHandle = 0;
+ }
+ if (nullptr == path) {
+ path = "";
+ }
+
+ sk_free(self.fPath16);
+ self.fPath16 = concat_to_16(path, suffix);
+}
+
+static bool is_magic_dir(const uint16_t dir[]) {
+ // return true for "." and ".."
+ return dir[0] == '.' && (dir[1] == 0 || (dir[1] == '.' && dir[2] == 0));
+}
+
+static bool get_the_file(HANDLE handle, SkString* name, WIN32_FIND_DATAW* dataPtr, bool getDir) {
+ WIN32_FIND_DATAW data;
+
+ if (nullptr == dataPtr) {
+ if (::FindNextFileW(handle, &data))
+ dataPtr = &data;
+ else
+ return false;
+ }
+
+ for (;;) {
+ if (getDir) {
+ if ((dataPtr->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) &&
+ !is_magic_dir((uint16_t*)dataPtr->cFileName))
+ {
+ break;
+ }
+ } else {
+ if (!(dataPtr->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
+ break;
+ }
+ }
+ if (!::FindNextFileW(handle, dataPtr)) {
+ return false;
+ }
+ }
+ // if we get here, we've found a file/dir
+ if (name) {
+ name->setUTF16((uint16_t*)dataPtr->cFileName);
+ }
+ return true;
+}
+
+bool SkOSFile::Iter::next(SkString* name, bool getDir) {
+ SkOSFileIterData& self = *static_cast<SkOSFileIterData*>(fSelf.get());
+ WIN32_FIND_DATAW data;
+ WIN32_FIND_DATAW* dataPtr = nullptr;
+
+ if (self.fHandle == 0) { // our first time
+ if (self.fPath16 == nullptr || *self.fPath16 == 0) { // check for no path
+ return false;
+ }
+
+ self.fHandle = ::FindFirstFileW((LPCWSTR)self.fPath16, &data);
+ if (self.fHandle != 0 && self.fHandle != (HANDLE)~0) {
+ dataPtr = &data;
+ }
+ }
+ return self.fHandle != (HANDLE)~0 && get_the_file(self.fHandle, name, dataPtr, getDir);
+}
+
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/ports/SkOSLibrary.h b/gfx/skia/skia/src/ports/SkOSLibrary.h
new file mode 100644
index 000000000..ea1378f8b
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkOSLibrary.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOSLibrary_DEFINED
+#define SkOSLibrary_DEFINED
+
+void* DynamicLoadLibrary(const char* libraryName);
+void* GetProcedureAddress(void* library, const char* functionName);
+
+#endif
diff --git a/gfx/skia/skia/src/ports/SkOSLibrary_posix.cpp b/gfx/skia/skia/src/ports/SkOSLibrary_posix.cpp
new file mode 100644
index 000000000..6372a8122
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkOSLibrary_posix.cpp
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkTypes.h"
+#if !defined(SK_BUILD_FOR_WIN32)
+
+#include "SkOSLibrary.h"
+
+#include <dlfcn.h>
+
+void* DynamicLoadLibrary(const char* libraryName) {
+ void* result = dlopen(libraryName, RTLD_LAZY);
+ if (!result) {
+ SkDebugf("Error loading %s {\n %s\n}\n", libraryName, dlerror());
+ }
+ return result;
+}
+
+void* GetProcedureAddress(void* library, const char* functionName) {
+ return dlsym(library, functionName);
+}
+#endif//!defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/ports/SkOSLibrary_win.cpp b/gfx/skia/skia/src/ports/SkOSLibrary_win.cpp
new file mode 100644
index 000000000..b6d8dd318
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkOSLibrary_win.cpp
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32)
+
+#include "SkOSLibrary.h"
+#include "SkLeanWindows.h"
+
+void* DynamicLoadLibrary(const char* libraryName) {
+ return LoadLibraryA(libraryName);
+}
+
+void* GetProcedureAddress(void* library, const char* functionName) {
+ return reinterpret_cast<void*>(::GetProcAddress((HMODULE)library, functionName));
+}
+
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/ports/SkRemotableFontMgr_win_dw.cpp b/gfx/skia/skia/src/ports/SkRemotableFontMgr_win_dw.cpp
new file mode 100644
index 000000000..a4c895ad0
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkRemotableFontMgr_win_dw.cpp
@@ -0,0 +1,490 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32)
+
+#include "SkDWrite.h"
+#include "SkDWriteFontFileStream.h"
+#include "SkDataTable.h"
+#include "SkHRESULT.h"
+#include "SkMutex.h"
+#include "SkRemotableFontMgr.h"
+#include "SkStream.h"
+#include "SkString.h"
+#include "SkTArray.h"
+#include "SkTScopedComPtr.h"
+#include "SkTypeface_win_dw.h"
+#include "SkTypes.h"
+#include "SkUtils.h"
+
+#include <dwrite.h>
+
+class SK_API SkRemotableFontMgr_DirectWrite : public SkRemotableFontMgr {
+private:
+ struct DataId {
+ IUnknown* fLoader; // In COM only IUnknown pointers may be safely used for identity.
+ void* fKey;
+ UINT32 fKeySize;
+
+ DataId() { }
+
+ DataId(DataId&& that) : fLoader(that.fLoader), fKey(that.fKey), fKeySize(that.fKeySize) {
+ that.fLoader = nullptr;
+ that.fKey = nullptr;
+ SkDEBUGCODE(that.fKeySize = 0xFFFFFFFF;)
+ }
+
+ ~DataId() {
+ if (fLoader) {
+ fLoader->Release();
+ }
+ sk_free(fKey);
+ }
+ };
+
+ mutable SkTArray<DataId> fDataIdCache;
+ mutable SkMutex fDataIdCacheMutex;
+
+ int FindOrAdd(IDWriteFontFileLoader* fontFileLoader,
+ const void* refKey, UINT32 refKeySize) const
+ {
+ SkTScopedComPtr<IUnknown> fontFileLoaderId;
+ HR_GENERAL(fontFileLoader->QueryInterface(&fontFileLoaderId),
+ "Failed to re-convert to IDWriteFontFileLoader.",
+ SkFontIdentity::kInvalidDataId);
+
+ SkAutoMutexAcquire ama(fDataIdCacheMutex);
+ int count = fDataIdCache.count();
+ int i;
+ for (i = 0; i < count; ++i) {
+ const DataId& current = fDataIdCache[i];
+ if (fontFileLoaderId.get() == current.fLoader &&
+ refKeySize == current.fKeySize &&
+ 0 == memcmp(refKey, current.fKey, refKeySize))
+ {
+ return i;
+ }
+ }
+ DataId& added = fDataIdCache.push_back();
+ added.fLoader = fontFileLoaderId.release(); // Ref is passed.
+ added.fKey = sk_malloc_throw(refKeySize);
+ memcpy(added.fKey, refKey, refKeySize);
+ added.fKeySize = refKeySize;
+
+ return i;
+ }
+
+public:
+
+
+ /** localeNameLength must include the null terminator. */
+ SkRemotableFontMgr_DirectWrite(IDWriteFontCollection* fontCollection,
+ WCHAR* localeName, int localeNameLength)
+ : fFontCollection(SkRefComPtr(fontCollection))
+ , fLocaleName(localeNameLength)
+ {
+ memcpy(fLocaleName.get(), localeName, localeNameLength * sizeof(WCHAR));
+ }
+
+ sk_sp<SkDataTable> getFamilyNames() const override {
+ int count = fFontCollection->GetFontFamilyCount();
+
+ SkDataTableBuilder names(1024);
+ for (int index = 0; index < count; ++index) {
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HRNM(fFontCollection->GetFontFamily(index, &fontFamily),
+ "Could not get requested family.");
+
+ SkTScopedComPtr<IDWriteLocalizedStrings> familyNames;
+ HRNM(fontFamily->GetFamilyNames(&familyNames), "Could not get family names.");
+
+ SkString familyName;
+ sk_get_locale_string(familyNames.get(), fLocaleName.get(), &familyName);
+
+ names.appendString(familyName);
+ }
+ return names.detachDataTable();
+ }
+
+ HRESULT FontToIdentity(IDWriteFont* font, SkFontIdentity* fontId) const {
+ SkTScopedComPtr<IDWriteFontFace> fontFace;
+ HRM(font->CreateFontFace(&fontFace), "Could not create font face.");
+
+ UINT32 numFiles;
+ HR(fontFace->GetFiles(&numFiles, nullptr));
+ if (numFiles > 1) {
+ return E_FAIL;
+ }
+
+ // data id
+ SkTScopedComPtr<IDWriteFontFile> fontFile;
+ HR(fontFace->GetFiles(&numFiles, &fontFile));
+
+ SkTScopedComPtr<IDWriteFontFileLoader> fontFileLoader;
+ HR(fontFile->GetLoader(&fontFileLoader));
+
+ const void* refKey;
+ UINT32 refKeySize;
+ HR(fontFile->GetReferenceKey(&refKey, &refKeySize));
+
+ fontId->fDataId = FindOrAdd(fontFileLoader.get(), refKey, refKeySize);
+
+ // index
+ fontId->fTtcIndex = fontFace->GetIndex();
+
+ // style
+ fontId->fFontStyle = get_style(font);
+ return S_OK;
+ }
+
+ SkRemotableFontIdentitySet* getIndex(int familyIndex) const override {
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HRNM(fFontCollection->GetFontFamily(familyIndex, &fontFamily),
+ "Could not get requested family.");
+
+ int count = fontFamily->GetFontCount();
+ SkFontIdentity* fontIds;
+ SkAutoTUnref<SkRemotableFontIdentitySet> fontIdSet(
+ new SkRemotableFontIdentitySet(count, &fontIds));
+ for (int fontIndex = 0; fontIndex < count; ++fontIndex) {
+ SkTScopedComPtr<IDWriteFont> font;
+ HRNM(fontFamily->GetFont(fontIndex, &font), "Could not get font.");
+
+ HRN(FontToIdentity(font.get(), &fontIds[fontIndex]));
+ }
+ return fontIdSet.release();
+ }
+
+ virtual SkFontIdentity matchIndexStyle(int familyIndex,
+ const SkFontStyle& pattern) const override
+ {
+ SkFontIdentity identity = { SkFontIdentity::kInvalidDataId };
+
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HR_GENERAL(fFontCollection->GetFontFamily(familyIndex, &fontFamily),
+ "Could not get requested family.",
+ identity);
+
+ const DWriteStyle dwStyle(pattern);
+ SkTScopedComPtr<IDWriteFont> font;
+ HR_GENERAL(fontFamily->GetFirstMatchingFont(dwStyle.fWeight, dwStyle.fWidth,
+ dwStyle.fSlant, &font),
+ "Could not match font in family.",
+ identity);
+
+ HR_GENERAL(FontToIdentity(font.get(), &identity), nullptr, identity);
+
+ return identity;
+ }
+
+ static HRESULT getDefaultFontFamilyName(SkSMallocWCHAR* name) {
+ NONCLIENTMETRICSW metrics;
+ metrics.cbSize = sizeof(metrics);
+ if (0 == SystemParametersInfoW(SPI_GETNONCLIENTMETRICS,
+ sizeof(metrics),
+ &metrics,
+ 0)) {
+ return E_UNEXPECTED;
+ }
+
+ size_t len = wcsnlen_s(metrics.lfMessageFont.lfFaceName, LF_FACESIZE) + 1;
+ if (0 != wcsncpy_s(name->reset(len), len, metrics.lfMessageFont.lfFaceName, _TRUNCATE)) {
+ return E_UNEXPECTED;
+ }
+
+ return S_OK;
+ }
+
+ SkRemotableFontIdentitySet* matchName(const char familyName[]) const override {
+ SkSMallocWCHAR dwFamilyName;
+ if (nullptr == familyName) {
+ HR_GENERAL(getDefaultFontFamilyName(&dwFamilyName),
+ nullptr, SkRemotableFontIdentitySet::NewEmpty());
+ } else {
+ HR_GENERAL(sk_cstring_to_wchar(familyName, &dwFamilyName),
+ nullptr, SkRemotableFontIdentitySet::NewEmpty());
+ }
+
+ UINT32 index;
+ BOOL exists;
+ HR_GENERAL(fFontCollection->FindFamilyName(dwFamilyName.get(), &index, &exists),
+ "Failed while finding family by name.",
+ SkRemotableFontIdentitySet::NewEmpty());
+ if (!exists) {
+ return SkRemotableFontIdentitySet::NewEmpty();
+ }
+
+ return this->getIndex(index);
+ }
+
+ virtual SkFontIdentity matchNameStyle(const char familyName[],
+ const SkFontStyle& style) const override
+ {
+ SkFontIdentity identity = { SkFontIdentity::kInvalidDataId };
+
+ SkSMallocWCHAR dwFamilyName;
+ if (nullptr == familyName) {
+ HR_GENERAL(getDefaultFontFamilyName(&dwFamilyName), nullptr, identity);
+ } else {
+ HR_GENERAL(sk_cstring_to_wchar(familyName, &dwFamilyName), nullptr, identity);
+ }
+
+ UINT32 index;
+ BOOL exists;
+ HR_GENERAL(fFontCollection->FindFamilyName(dwFamilyName.get(), &index, &exists),
+ "Failed while finding family by name.",
+ identity);
+ if (!exists) {
+ return identity;
+ }
+
+ return this->matchIndexStyle(index, style);
+ }
+
+ class FontFallbackRenderer : public IDWriteTextRenderer {
+ public:
+ FontFallbackRenderer(const SkRemotableFontMgr_DirectWrite* outer, UINT32 character)
+ : fRefCount(1), fOuter(SkSafeRef(outer)), fCharacter(character) {
+ fIdentity.fDataId = SkFontIdentity::kInvalidDataId;
+ }
+
+ virtual ~FontFallbackRenderer() { }
+
+ // IDWriteTextRenderer methods
+ virtual HRESULT STDMETHODCALLTYPE DrawGlyphRun(
+ void* clientDrawingContext,
+ FLOAT baselineOriginX,
+ FLOAT baselineOriginY,
+ DWRITE_MEASURING_MODE measuringMode,
+ DWRITE_GLYPH_RUN const* glyphRun,
+ DWRITE_GLYPH_RUN_DESCRIPTION const* glyphRunDescription,
+ IUnknown* clientDrawingEffect) override
+ {
+ SkTScopedComPtr<IDWriteFont> font;
+ HRM(fOuter->fFontCollection->GetFontFromFontFace(glyphRun->fontFace, &font),
+ "Could not get font from font face.");
+
+ // It is possible that the font passed does not actually have the requested character,
+ // due to no font being found and getting the fallback font.
+ // Check that the font actually contains the requested character.
+ BOOL exists;
+ HRM(font->HasCharacter(fCharacter, &exists), "Could not find character.");
+
+ if (exists) {
+ HR(fOuter->FontToIdentity(font.get(), &fIdentity));
+ }
+
+ return S_OK;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE DrawUnderline(
+ void* clientDrawingContext,
+ FLOAT baselineOriginX,
+ FLOAT baselineOriginY,
+ DWRITE_UNDERLINE const* underline,
+ IUnknown* clientDrawingEffect) override
+ { return E_NOTIMPL; }
+
+ virtual HRESULT STDMETHODCALLTYPE DrawStrikethrough(
+ void* clientDrawingContext,
+ FLOAT baselineOriginX,
+ FLOAT baselineOriginY,
+ DWRITE_STRIKETHROUGH const* strikethrough,
+ IUnknown* clientDrawingEffect) override
+ { return E_NOTIMPL; }
+
+ virtual HRESULT STDMETHODCALLTYPE DrawInlineObject(
+ void* clientDrawingContext,
+ FLOAT originX,
+ FLOAT originY,
+ IDWriteInlineObject* inlineObject,
+ BOOL isSideways,
+ BOOL isRightToLeft,
+ IUnknown* clientDrawingEffect) override
+ { return E_NOTIMPL; }
+
+ // IDWritePixelSnapping methods
+ virtual HRESULT STDMETHODCALLTYPE IsPixelSnappingDisabled(
+ void* clientDrawingContext,
+ BOOL* isDisabled) override
+ {
+ *isDisabled = FALSE;
+ return S_OK;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE GetCurrentTransform(
+ void* clientDrawingContext,
+ DWRITE_MATRIX* transform) override
+ {
+ const DWRITE_MATRIX ident = {1.0, 0.0, 0.0, 1.0, 0.0, 0.0};
+ *transform = ident;
+ return S_OK;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE GetPixelsPerDip(
+ void* clientDrawingContext,
+ FLOAT* pixelsPerDip) override
+ {
+ *pixelsPerDip = 1.0f;
+ return S_OK;
+ }
+
+ // IUnknown methods
+ ULONG STDMETHODCALLTYPE AddRef() override {
+ return InterlockedIncrement(&fRefCount);
+ }
+
+ ULONG STDMETHODCALLTYPE Release() override {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(
+ IID const& riid, void** ppvObject) override
+ {
+ if (__uuidof(IUnknown) == riid ||
+ __uuidof(IDWritePixelSnapping) == riid ||
+ __uuidof(IDWriteTextRenderer) == riid)
+ {
+ *ppvObject = this;
+ this->AddRef();
+ return S_OK;
+ }
+ *ppvObject = nullptr;
+ return E_FAIL;
+ }
+
+ const SkFontIdentity FallbackIdentity() { return fIdentity; }
+
+ protected:
+ ULONG fRefCount;
+ SkAutoTUnref<const SkRemotableFontMgr_DirectWrite> fOuter;
+ UINT32 fCharacter;
+ SkFontIdentity fIdentity;
+ };
+
+ virtual SkFontIdentity matchNameStyleCharacter(const char familyName[],
+ const SkFontStyle& pattern,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override
+ {
+ SkFontIdentity identity = { SkFontIdentity::kInvalidDataId };
+
+ IDWriteFactory* dwFactory = sk_get_dwrite_factory();
+ if (nullptr == dwFactory) {
+ return identity;
+ }
+
+ // TODO: use IDWriteFactory2::GetSystemFontFallback when available.
+
+ const DWriteStyle dwStyle(pattern);
+
+ SkSMallocWCHAR dwFamilyName;
+ if (nullptr == familyName) {
+ HR_GENERAL(getDefaultFontFamilyName(&dwFamilyName), nullptr, identity);
+ } else {
+ HR_GENERAL(sk_cstring_to_wchar(familyName, &dwFamilyName), nullptr, identity);
+ }
+
+ const SkSMallocWCHAR* dwBcp47;
+ SkSMallocWCHAR dwBcp47Local;
+ if (bcp47Count < 1) {
+ dwBcp47 = &fLocaleName;
+ } else {
+ //TODO: support fallback stack.
+ HR_GENERAL(sk_cstring_to_wchar(bcp47[bcp47Count-1], &dwBcp47Local), nullptr, identity);
+ dwBcp47 = &dwBcp47Local;
+ }
+
+ SkTScopedComPtr<IDWriteTextFormat> fallbackFormat;
+ HR_GENERAL(dwFactory->CreateTextFormat(dwFamilyName,
+ fFontCollection.get(),
+ dwStyle.fWeight,
+ dwStyle.fSlant,
+ dwStyle.fWidth,
+ 72.0f,
+ *dwBcp47,
+ &fallbackFormat),
+ "Could not create text format.",
+ identity);
+
+ WCHAR str[16];
+ UINT32 strLen = static_cast<UINT32>(
+ SkUTF16_FromUnichar(character, reinterpret_cast<uint16_t*>(str)));
+ SkTScopedComPtr<IDWriteTextLayout> fallbackLayout;
+ HR_GENERAL(dwFactory->CreateTextLayout(str, strLen, fallbackFormat.get(),
+ 200.0f, 200.0f,
+ &fallbackLayout),
+ "Could not create text layout.",
+ identity);
+
+ SkTScopedComPtr<FontFallbackRenderer> fontFallbackRenderer(
+ new FontFallbackRenderer(this, character));
+
+ HR_GENERAL(fallbackLayout->Draw(nullptr, fontFallbackRenderer.get(), 50.0f, 50.0f),
+ "Could not draw layout with renderer.",
+ identity);
+
+ return fontFallbackRenderer->FallbackIdentity();
+ }
+
+ SkStreamAsset* getData(int dataId) const override {
+ SkAutoMutexAcquire ama(fDataIdCacheMutex);
+ if (dataId >= fDataIdCache.count()) {
+ return nullptr;
+ }
+ const DataId& id = fDataIdCache[dataId];
+
+ SkTScopedComPtr<IDWriteFontFileLoader> loader;
+ HRNM(id.fLoader->QueryInterface(&loader), "QuerryInterface IDWriteFontFileLoader failed");
+
+ SkTScopedComPtr<IDWriteFontFileStream> fontFileStream;
+ HRNM(loader->CreateStreamFromKey(id.fKey, id.fKeySize, &fontFileStream),
+ "Could not create font file stream.");
+
+ return new SkDWriteFontFileStream(fontFileStream.get());
+ }
+
+private:
+ SkTScopedComPtr<IDWriteFontCollection> fFontCollection;
+ SkSMallocWCHAR fLocaleName;
+
+ typedef SkRemotableFontMgr INHERITED;
+};
+
+SkRemotableFontMgr* SkRemotableFontMgr_New_DirectWrite() {
+ IDWriteFactory* factory = sk_get_dwrite_factory();
+ if (nullptr == factory) {
+ return nullptr;
+ }
+
+ SkTScopedComPtr<IDWriteFontCollection> sysFontCollection;
+ HRNM(factory->GetSystemFontCollection(&sysFontCollection, FALSE),
+ "Could not get system font collection.");
+
+ WCHAR localeNameStorage[LOCALE_NAME_MAX_LENGTH];
+ WCHAR* localeName = nullptr;
+ int localeNameLen = 0;
+
+ // Dynamically load GetUserDefaultLocaleName function, as it is not available on XP.
+ SkGetUserDefaultLocaleNameProc getUserDefaultLocaleNameProc = nullptr;
+ HRESULT hr = SkGetGetUserDefaultLocaleNameProc(&getUserDefaultLocaleNameProc);
+ if (nullptr == getUserDefaultLocaleNameProc) {
+ SK_TRACEHR(hr, "Could not get GetUserDefaultLocaleName.");
+ } else {
+ localeNameLen = getUserDefaultLocaleNameProc(localeNameStorage, LOCALE_NAME_MAX_LENGTH);
+ if (localeNameLen) {
+ localeName = localeNameStorage;
+ };
+ }
+
+ return new SkRemotableFontMgr_DirectWrite(sysFontCollection.get(), localeName, localeNameLen);
+}
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/ports/SkScalerContext_win_dw.cpp b/gfx/skia/skia/src/ports/SkScalerContext_win_dw.cpp
new file mode 100644
index 000000000..d050cb069
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkScalerContext_win_dw.cpp
@@ -0,0 +1,938 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32)
+
+#undef GetGlyphIndices
+
+#include "SkDraw.h"
+#include "SkDWrite.h"
+#include "SkDWriteGeometrySink.h"
+#include "SkEndian.h"
+#include "SkGlyph.h"
+#include "SkHRESULT.h"
+#include "SkMaskGamma.h"
+#include "SkMatrix22.h"
+#include "SkMutex.h"
+#include "SkOTTable_EBLC.h"
+#include "SkOTTable_EBSC.h"
+#include "SkOTTable_gasp.h"
+#include "SkOTTable_maxp.h"
+#include "SkPath.h"
+#include "SkRasterClip.h"
+#include "SkScalerContext.h"
+#include "SkScalerContext_win_dw.h"
+#include "SkSharedMutex.h"
+#include "SkTScopedComPtr.h"
+#include "SkTypeface_win_dw.h"
+
+#include <dwrite.h>
+#if SK_HAS_DWRITE_1_H
+# include <dwrite_1.h>
+#endif
+
+/* Note:
+ * In versions 8 and 8.1 of Windows, some calls in DWrite are not thread safe.
+ * The DWriteFactoryMutex protects the calls that are problematic.
+ */
+static SkSharedMutex DWriteFactoryMutex;
+
+typedef SkAutoSharedMutexShared Shared;
+
+static bool isLCD(const SkScalerContext::Rec& rec) {
+ return SkMask::kLCD16_Format == rec.fMaskFormat;
+}
+
+static bool is_hinted_without_gasp(DWriteFontTypeface* typeface) {
+ SkAutoExclusive l(DWriteFactoryMutex);
+ AutoTDWriteTable<SkOTTableMaximumProfile> maxp(typeface->fDWriteFontFace.get());
+ if (!maxp.fExists) {
+ return false;
+ }
+ if (maxp.fSize < sizeof(SkOTTableMaximumProfile::Version::TT)) {
+ return false;
+ }
+ if (maxp->version.version != SkOTTableMaximumProfile::Version::TT::VERSION) {
+ return false;
+ }
+
+ if (0 == maxp->version.tt.maxSizeOfInstructions) {
+ // No hints.
+ return false;
+ }
+
+ AutoTDWriteTable<SkOTTableGridAndScanProcedure> gasp(typeface->fDWriteFontFace.get());
+ return !gasp.fExists;
+}
+
+/** A PPEMRange is inclusive, [min, max]. */
+struct PPEMRange {
+ int min;
+ int max;
+};
+
+/** If the rendering mode for the specified 'size' is gridfit, then place
+ * the gridfit range into 'range'. Otherwise, leave 'range' alone.
+ */
+static void expand_range_if_gridfit_only(DWriteFontTypeface* typeface, int size, PPEMRange* range) {
+ AutoTDWriteTable<SkOTTableGridAndScanProcedure> gasp(typeface->fDWriteFontFace.get());
+ if (!gasp.fExists) {
+ return;
+ }
+ if (gasp.fSize < sizeof(SkOTTableGridAndScanProcedure)) {
+ return;
+ }
+ if (gasp->version != SkOTTableGridAndScanProcedure::version0 &&
+ gasp->version != SkOTTableGridAndScanProcedure::version1)
+ {
+ return;
+ }
+
+ uint16_t numRanges = SkEndianSwap16(gasp->numRanges);
+ if (numRanges > 1024 ||
+ gasp.fSize < sizeof(SkOTTableGridAndScanProcedure) +
+ sizeof(SkOTTableGridAndScanProcedure::GaspRange) * numRanges)
+ {
+ return;
+ }
+
+ const SkOTTableGridAndScanProcedure::GaspRange* rangeTable =
+ SkTAfter<const SkOTTableGridAndScanProcedure::GaspRange>(gasp.get());
+ int minPPEM = -1;
+ for (uint16_t i = 0; i < numRanges; ++i, ++rangeTable) {
+ int maxPPEM = SkEndianSwap16(rangeTable->maxPPEM);
+ // Test that the size is in range and the range is gridfit only.
+ if (minPPEM < size && size <= maxPPEM &&
+ rangeTable->flags.raw.value == SkOTTableGridAndScanProcedure::GaspRange::behavior::Raw::GridfitMask)
+ {
+ range->min = minPPEM + 1;
+ range->max = maxPPEM;
+ return;
+ }
+ minPPEM = maxPPEM;
+ }
+}
+
+static bool has_bitmap_strike(DWriteFontTypeface* typeface, PPEMRange range) {
+ SkAutoExclusive l(DWriteFactoryMutex);
+ {
+ AutoTDWriteTable<SkOTTableEmbeddedBitmapLocation> eblc(typeface->fDWriteFontFace.get());
+ if (!eblc.fExists) {
+ return false;
+ }
+ if (eblc.fSize < sizeof(SkOTTableEmbeddedBitmapLocation)) {
+ return false;
+ }
+ if (eblc->version != SkOTTableEmbeddedBitmapLocation::version_initial) {
+ return false;
+ }
+
+ uint32_t numSizes = SkEndianSwap32(eblc->numSizes);
+ if (numSizes > 1024 ||
+ eblc.fSize < sizeof(SkOTTableEmbeddedBitmapLocation) +
+ sizeof(SkOTTableEmbeddedBitmapLocation::BitmapSizeTable) * numSizes)
+ {
+ return false;
+ }
+
+ const SkOTTableEmbeddedBitmapLocation::BitmapSizeTable* sizeTable =
+ SkTAfter<const SkOTTableEmbeddedBitmapLocation::BitmapSizeTable>(eblc.get());
+ for (uint32_t i = 0; i < numSizes; ++i, ++sizeTable) {
+ if (sizeTable->ppemX == sizeTable->ppemY &&
+ range.min <= sizeTable->ppemX && sizeTable->ppemX <= range.max)
+ {
+ // TODO: determine if we should dig through IndexSubTableArray/IndexSubTable
+ // to determine the actual number of glyphs with bitmaps.
+
+ // TODO: Ensure that the bitmaps actually cover a significant portion of the strike.
+
+ // TODO: Ensure that the bitmaps are bi-level?
+ if (sizeTable->endGlyphIndex >= sizeTable->startGlyphIndex + 3) {
+ return true;
+ }
+ }
+ }
+ }
+
+ {
+ AutoTDWriteTable<SkOTTableEmbeddedBitmapScaling> ebsc(typeface->fDWriteFontFace.get());
+ if (!ebsc.fExists) {
+ return false;
+ }
+ if (ebsc.fSize < sizeof(SkOTTableEmbeddedBitmapScaling)) {
+ return false;
+ }
+ if (ebsc->version != SkOTTableEmbeddedBitmapScaling::version_initial) {
+ return false;
+ }
+
+ uint32_t numSizes = SkEndianSwap32(ebsc->numSizes);
+ if (numSizes > 1024 ||
+ ebsc.fSize < sizeof(SkOTTableEmbeddedBitmapScaling) +
+ sizeof(SkOTTableEmbeddedBitmapScaling::BitmapScaleTable) * numSizes)
+ {
+ return false;
+ }
+
+ const SkOTTableEmbeddedBitmapScaling::BitmapScaleTable* scaleTable =
+ SkTAfter<const SkOTTableEmbeddedBitmapScaling::BitmapScaleTable>(ebsc.get());
+ for (uint32_t i = 0; i < numSizes; ++i, ++scaleTable) {
+ if (scaleTable->ppemX == scaleTable->ppemY &&
+ range.min <= scaleTable->ppemX && scaleTable->ppemX <= range.max) {
+ // EBSC tables are normally only found in bitmap only fonts.
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static bool both_zero(SkScalar a, SkScalar b) {
+ return 0 == a && 0 == b;
+}
+
+// returns false if there is any non-90-rotation or skew
+static bool is_axis_aligned(const SkScalerContext::Rec& rec) {
+ return 0 == rec.fPreSkewX &&
+ (both_zero(rec.fPost2x2[0][1], rec.fPost2x2[1][0]) ||
+ both_zero(rec.fPost2x2[0][0], rec.fPost2x2[1][1]));
+}
+
+SkScalerContext_DW::SkScalerContext_DW(DWriteFontTypeface* typeface,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : SkScalerContext(typeface, effects, desc)
+ , fTypeface(SkRef(typeface))
+ , fGlyphCount(-1) {
+
+#if SK_HAS_DWRITE_2_H
+ fTypeface->fFactory->QueryInterface<IDWriteFactory2>(&fFactory2);
+
+ SkTScopedComPtr<IDWriteFontFace2> fontFace2;
+ fTypeface->fDWriteFontFace->QueryInterface<IDWriteFontFace2>(&fontFace2);
+ fIsColorFont = fFactory2.get() && fontFace2.get() && fontFace2->IsColorFont();
+#endif
+
+ IDWriteFactory* factory = sk_get_dwrite_factory();
+ if (factory != nullptr) {
+ HRVM(factory->CreateRenderingParams(&fDefaultRenderingParams),
+ "Could not create default rendering params");
+ }
+
+ // In general, all glyphs should DWriteFontFace::GetRecommendedRenderingMode
+ // except when bi-level rendering is requested or there are embedded
+ // bi-level bitmaps (and the embedded bitmap flag is set and no rotation).
+ //
+ // DirectWrite's IDWriteFontFace::GetRecommendedRenderingMode does not do
+ // this. As a result, determine the actual size of the text and then see if
+ // there are any embedded bi-level bitmaps of that size. If there are, then
+ // force bitmaps by requesting bi-level rendering.
+ //
+ // FreeType allows for separate ppemX and ppemY, but DirectWrite assumes
+ // square pixels and only uses ppemY. Therefore the transform must track any
+ // non-uniform x-scale.
+ //
+ // Also, rotated glyphs should have the same absolute advance widths as
+ // horizontal glyphs and the subpixel flag should not affect glyph shapes.
+
+ SkVector scale;
+ SkMatrix GsA;
+ fRec.computeMatrices(SkScalerContextRec::kVertical_PreMatrixScale,
+ &scale, &fSkXform, &GsA, &fG_inv);
+
+ fXform.m11 = SkScalarToFloat(fSkXform.getScaleX());
+ fXform.m12 = SkScalarToFloat(fSkXform.getSkewY());
+ fXform.m21 = SkScalarToFloat(fSkXform.getSkewX());
+ fXform.m22 = SkScalarToFloat(fSkXform.getScaleY());
+ fXform.dx = 0;
+ fXform.dy = 0;
+
+ fGsA.m11 = SkScalarToFloat(GsA.get(SkMatrix::kMScaleX));
+ fGsA.m12 = SkScalarToFloat(GsA.get(SkMatrix::kMSkewY)); // This should be ~0.
+ fGsA.m21 = SkScalarToFloat(GsA.get(SkMatrix::kMSkewX));
+ fGsA.m22 = SkScalarToFloat(GsA.get(SkMatrix::kMScaleY));
+ fGsA.dx = 0;
+ fGsA.dy = 0;
+
+ // realTextSize is the actual device size we want (as opposed to the size the user requested).
+ // gdiTextSize is the size we request when GDI compatible.
+ // If the scale is negative, this means the matrix will do the flip anyway.
+ const SkScalar realTextSize = scale.fY;
+ // Due to floating point math, the lower bits are suspect. Round carefully.
+ SkScalar gdiTextSize = SkScalarRoundToScalar(realTextSize * 64.0f) / 64.0f;
+ if (gdiTextSize == 0) {
+ gdiTextSize = SK_Scalar1;
+ }
+
+ bool bitmapRequested = SkToBool(fRec.fFlags & SkScalerContext::kEmbeddedBitmapText_Flag);
+ bool treatLikeBitmap = false;
+ bool axisAlignedBitmap = false;
+ if (bitmapRequested) {
+ // When embedded bitmaps are requested, treat the entire range like
+ // a bitmap strike if the range is gridfit only and contains a bitmap.
+ int bitmapPPEM = SkScalarTruncToInt(gdiTextSize);
+ PPEMRange range = { bitmapPPEM, bitmapPPEM };
+ expand_range_if_gridfit_only(typeface, bitmapPPEM, &range);
+ treatLikeBitmap = has_bitmap_strike(typeface, range);
+
+ axisAlignedBitmap = is_axis_aligned(fRec);
+ }
+
+ // If the user requested aliased, do so with aliased compatible metrics.
+ if (SkMask::kBW_Format == fRec.fMaskFormat) {
+ fTextSizeRender = gdiTextSize;
+ fRenderingMode = DWRITE_RENDERING_MODE_ALIASED;
+ fTextureType = DWRITE_TEXTURE_ALIASED_1x1;
+ fTextSizeMeasure = gdiTextSize;
+ fMeasuringMode = DWRITE_MEASURING_MODE_GDI_CLASSIC;
+
+ // If we can use a bitmap, use gdi classic rendering and measurement.
+ // This will not always provide a bitmap, but matches expected behavior.
+ } else if ((treatLikeBitmap && axisAlignedBitmap) || typeface->ForceGDI()) {
+ fTextSizeRender = gdiTextSize;
+ fRenderingMode = DWRITE_RENDERING_MODE_CLEARTYPE_GDI_CLASSIC;
+ fTextureType = DWRITE_TEXTURE_CLEARTYPE_3x1;
+ fTextSizeMeasure = gdiTextSize;
+ fMeasuringMode = DWRITE_MEASURING_MODE_GDI_CLASSIC;
+
+ // If rotated but the horizontal text could have used a bitmap,
+ // render high quality rotated glyphs but measure using bitmap metrics.
+ } else if (treatLikeBitmap) {
+ fTextSizeRender = gdiTextSize;
+ fRenderingMode = DWRITE_RENDERING_MODE_CLEARTYPE_NATURAL_SYMMETRIC;
+ fTextureType = DWRITE_TEXTURE_CLEARTYPE_3x1;
+ fTextSizeMeasure = gdiTextSize;
+ fMeasuringMode = DWRITE_MEASURING_MODE_GDI_CLASSIC;
+
+ // Fonts that have hints but no gasp table get non-symmetric rendering.
+ // Usually such fonts have low quality hints which were never tested
+ // with anything but GDI ClearType classic. Such fonts often rely on
+ // drop out control in the y direction in order to be legible.
+ } else if (is_hinted_without_gasp(typeface)) {
+ fTextSizeRender = gdiTextSize;
+ fRenderingMode = DWRITE_RENDERING_MODE_CLEARTYPE_NATURAL;
+ fTextureType = DWRITE_TEXTURE_CLEARTYPE_3x1;
+ fTextSizeMeasure = realTextSize;
+ fMeasuringMode = DWRITE_MEASURING_MODE_NATURAL;
+
+ // The normal case is to use the recommended rendering mode
+ } else {
+ fTextSizeRender = realTextSize;
+ fTextureType = DWRITE_TEXTURE_CLEARTYPE_3x1;
+ fTextSizeMeasure = realTextSize;
+ fMeasuringMode = DWRITE_MEASURING_MODE_NATURAL;
+
+ if (!SUCCEEDED(fTypeface->fDWriteFontFace->GetRecommendedRenderingMode(
+ fTextSizeRender,
+ 1.0f,
+ fMeasuringMode,
+ fDefaultRenderingParams.get(),
+ &fRenderingMode))) {
+ fRenderingMode = DWRITE_RENDERING_MODE_CLEARTYPE_NATURAL_SYMMETRIC;
+ }
+
+ // We don't support outline mode right now.
+ if (fRenderingMode == DWRITE_RENDERING_MODE_OUTLINE) {
+ fRenderingMode = DWRITE_RENDERING_MODE_CLEARTYPE_NATURAL_SYMMETRIC;
+ }
+ }
+
+ if (this->isSubpixel()) {
+ fTextSizeMeasure = realTextSize;
+ fMeasuringMode = DWRITE_MEASURING_MODE_NATURAL;
+ }
+}
+
+SkScalerContext_DW::~SkScalerContext_DW() {
+}
+
+unsigned SkScalerContext_DW::generateGlyphCount() {
+ if (fGlyphCount < 0) {
+ fGlyphCount = fTypeface->fDWriteFontFace->GetGlyphCount();
+ }
+ return fGlyphCount;
+}
+
+uint16_t SkScalerContext_DW::generateCharToGlyph(SkUnichar uni) {
+ uint16_t index = 0;
+ fTypeface->fDWriteFontFace->GetGlyphIndices(reinterpret_cast<UINT32*>(&uni), 1, &index);
+ return index;
+}
+
+void SkScalerContext_DW::generateAdvance(SkGlyph* glyph) {
+ //Delta is the difference between the right/left side bearing metric
+ //and where the right/left side bearing ends up after hinting.
+ //DirectWrite does not provide this information.
+ glyph->fRsbDelta = 0;
+ glyph->fLsbDelta = 0;
+
+ glyph->fAdvanceX = 0;
+ glyph->fAdvanceY = 0;
+
+ uint16_t glyphId = glyph->getGlyphID();
+ DWRITE_GLYPH_METRICS gm;
+
+ if (DWRITE_MEASURING_MODE_GDI_CLASSIC == fMeasuringMode ||
+ DWRITE_MEASURING_MODE_GDI_NATURAL == fMeasuringMode)
+ {
+ SkAutoExclusive l(DWriteFactoryMutex);
+ HRVM(fTypeface->fDWriteFontFace->GetGdiCompatibleGlyphMetrics(
+ fTextSizeMeasure,
+ 1.0f, // pixelsPerDip
+ &fGsA,
+ DWRITE_MEASURING_MODE_GDI_NATURAL == fMeasuringMode,
+ &glyphId, 1,
+ &gm),
+ "Could not get gdi compatible glyph metrics.");
+ } else {
+ SkAutoExclusive l(DWriteFactoryMutex);
+ HRVM(fTypeface->fDWriteFontFace->GetDesignGlyphMetrics(&glyphId, 1, &gm),
+ "Could not get design metrics.");
+ }
+
+ DWRITE_FONT_METRICS dwfm;
+ {
+ Shared l(DWriteFactoryMutex);
+ fTypeface->fDWriteFontFace->GetMetrics(&dwfm);
+ }
+ SkScalar advanceX = SkScalarMulDiv(fTextSizeMeasure,
+ SkIntToScalar(gm.advanceWidth),
+ SkIntToScalar(dwfm.designUnitsPerEm));
+
+ SkVector vecs[1] = { { advanceX, 0 } };
+ if (DWRITE_MEASURING_MODE_GDI_CLASSIC == fMeasuringMode ||
+ DWRITE_MEASURING_MODE_GDI_NATURAL == fMeasuringMode)
+ {
+ // DirectWrite produced 'compatible' metrics, but while close,
+ // the end result is not always an integer as it would be with GDI.
+ vecs[0].fX = SkScalarRoundToScalar(advanceX);
+ fG_inv.mapVectors(vecs, SK_ARRAY_COUNT(vecs));
+ } else {
+ fSkXform.mapVectors(vecs, SK_ARRAY_COUNT(vecs));
+ }
+
+ glyph->fAdvanceX = SkScalarToFloat(vecs[0].fX);
+ glyph->fAdvanceY = SkScalarToFloat(vecs[0].fY);
+}
+
+HRESULT SkScalerContext_DW::getBoundingBox(SkGlyph* glyph,
+ DWRITE_RENDERING_MODE renderingMode,
+ DWRITE_TEXTURE_TYPE textureType,
+ RECT* bbox)
+{
+ //Measure raster size.
+ fXform.dx = SkFixedToFloat(glyph->getSubXFixed());
+ fXform.dy = SkFixedToFloat(glyph->getSubYFixed());
+
+ FLOAT advance = 0;
+
+ UINT16 glyphId = glyph->getGlyphID();
+
+ DWRITE_GLYPH_OFFSET offset;
+ offset.advanceOffset = 0.0f;
+ offset.ascenderOffset = 0.0f;
+
+ DWRITE_GLYPH_RUN run;
+ run.glyphCount = 1;
+ run.glyphAdvances = &advance;
+ run.fontFace = fTypeface->fDWriteFontFace.get();
+ run.fontEmSize = SkScalarToFloat(fTextSizeRender);
+ run.bidiLevel = 0;
+ run.glyphIndices = &glyphId;
+ run.isSideways = FALSE;
+ run.glyphOffsets = &offset;
+
+ SkTScopedComPtr<IDWriteGlyphRunAnalysis> glyphRunAnalysis;
+ {
+ SkAutoExclusive l(DWriteFactoryMutex);
+ HRM(fTypeface->fFactory->CreateGlyphRunAnalysis(
+ &run,
+ 1.0f, // pixelsPerDip,
+ &fXform,
+ renderingMode,
+ fMeasuringMode,
+ 0.0f, // baselineOriginX,
+ 0.0f, // baselineOriginY,
+ &glyphRunAnalysis),
+ "Could not create glyph run analysis.");
+ }
+ {
+ Shared l(DWriteFactoryMutex);
+ HRM(glyphRunAnalysis->GetAlphaTextureBounds(textureType, bbox),
+ "Could not get texture bounds.");
+ }
+ return S_OK;
+}
+
+/** GetAlphaTextureBounds succeeds but sometimes returns empty bounds like
+ * { 0x80000000, 0x80000000, 0x80000000, 0x80000000 }
+ * for small, but not quite zero, sized glyphs.
+ * Only set as non-empty if the returned bounds are non-empty.
+ */
+static bool glyph_check_and_set_bounds(SkGlyph* glyph, const RECT& bbox) {
+ if (bbox.left >= bbox.right || bbox.top >= bbox.bottom) {
+ return false;
+ }
+ glyph->fWidth = SkToU16(bbox.right - bbox.left);
+ glyph->fHeight = SkToU16(bbox.bottom - bbox.top);
+ glyph->fLeft = SkToS16(bbox.left);
+ glyph->fTop = SkToS16(bbox.top);
+ return true;
+}
+
+bool SkScalerContext_DW::isColorGlyph(const SkGlyph& glyph) {
+#if SK_HAS_DWRITE_2_H
+ SkTScopedComPtr<IDWriteColorGlyphRunEnumerator> colorLayer;
+ if (getColorGlyphRun(glyph, &colorLayer)) {
+ return true;
+ }
+#endif
+ return false;
+}
+
+#if SK_HAS_DWRITE_2_H
+bool SkScalerContext_DW::getColorGlyphRun(const SkGlyph& glyph,
+ IDWriteColorGlyphRunEnumerator** colorGlyph)
+{
+ FLOAT advance = 0;
+ UINT16 glyphId = glyph.getGlyphID();
+
+ DWRITE_GLYPH_OFFSET offset;
+ offset.advanceOffset = 0.0f;
+ offset.ascenderOffset = 0.0f;
+
+ DWRITE_GLYPH_RUN run;
+ run.glyphCount = 1;
+ run.glyphAdvances = &advance;
+ run.fontFace = fTypeface->fDWriteFontFace.get();
+ run.fontEmSize = SkScalarToFloat(fTextSizeRender);
+ run.bidiLevel = 0;
+ run.glyphIndices = &glyphId;
+ run.isSideways = FALSE;
+ run.glyphOffsets = &offset;
+
+ HRESULT hr = fFactory2->TranslateColorGlyphRun(
+ 0, 0, &run, nullptr, fMeasuringMode, &fXform, 0, colorGlyph);
+ if (hr == DWRITE_E_NOCOLOR) {
+ return false;
+ }
+ HRBM(hr, "Failed to translate color glyph run");
+ return true;
+}
+#endif
+
+void SkScalerContext_DW::generateMetrics(SkGlyph* glyph) {
+ glyph->fWidth = 0;
+ glyph->fHeight = 0;
+ glyph->fLeft = 0;
+ glyph->fTop = 0;
+
+ this->generateAdvance(glyph);
+
+#if SK_HAS_DWRITE_2_H
+ if (fIsColorFont && isColorGlyph(*glyph)) {
+ glyph->fMaskFormat = SkMask::kARGB32_Format;
+ }
+#endif
+
+ RECT bbox;
+ HRVM(this->getBoundingBox(glyph, fRenderingMode, fTextureType, &bbox),
+ "Requested bounding box could not be determined.");
+
+ if (glyph_check_and_set_bounds(glyph, bbox)) {
+ return;
+ }
+
+ // GetAlphaTextureBounds succeeds but returns an empty RECT if there are no
+ // glyphs of the specified texture type. When this happens, try with the
+ // alternate texture type.
+ if (DWRITE_TEXTURE_CLEARTYPE_3x1 == fTextureType) {
+ HRVM(this->getBoundingBox(glyph,
+ DWRITE_RENDERING_MODE_ALIASED,
+ DWRITE_TEXTURE_ALIASED_1x1,
+ &bbox),
+ "Fallback bounding box could not be determined.");
+ if (glyph_check_and_set_bounds(glyph, bbox)) {
+ glyph->fForceBW = 1;
+ }
+ }
+ // TODO: handle the case where a request for DWRITE_TEXTURE_ALIASED_1x1
+ // fails, and try DWRITE_TEXTURE_CLEARTYPE_3x1.
+}
+
+void SkScalerContext_DW::generateFontMetrics(SkPaint::FontMetrics* metrics) {
+ if (nullptr == metrics) {
+ return;
+ }
+
+ sk_bzero(metrics, sizeof(*metrics));
+
+ DWRITE_FONT_METRICS dwfm;
+ if (DWRITE_MEASURING_MODE_GDI_CLASSIC == fMeasuringMode ||
+ DWRITE_MEASURING_MODE_GDI_NATURAL == fMeasuringMode)
+ {
+ fTypeface->fDWriteFontFace->GetGdiCompatibleMetrics(
+ fTextSizeRender,
+ 1.0f, // pixelsPerDip
+ &fXform,
+ &dwfm);
+ } else {
+ fTypeface->fDWriteFontFace->GetMetrics(&dwfm);
+ }
+
+ SkScalar upem = SkIntToScalar(dwfm.designUnitsPerEm);
+
+ metrics->fAscent = -fTextSizeRender * SkIntToScalar(dwfm.ascent) / upem;
+ metrics->fDescent = fTextSizeRender * SkIntToScalar(dwfm.descent) / upem;
+ metrics->fLeading = fTextSizeRender * SkIntToScalar(dwfm.lineGap) / upem;
+ metrics->fXHeight = fTextSizeRender * SkIntToScalar(dwfm.xHeight) / upem;
+ metrics->fUnderlineThickness = fTextSizeRender * SkIntToScalar(dwfm.underlineThickness) / upem;
+ metrics->fUnderlinePosition = -(fTextSizeRender * SkIntToScalar(dwfm.underlinePosition) / upem);
+
+ metrics->fFlags |= SkPaint::FontMetrics::kUnderlineThinknessIsValid_Flag;
+ metrics->fFlags |= SkPaint::FontMetrics::kUnderlinePositionIsValid_Flag;
+
+#if SK_HAS_DWRITE_1_H
+ if (fTypeface->fDWriteFontFace1.get()) {
+ DWRITE_FONT_METRICS1 dwfm1;
+ fTypeface->fDWriteFontFace1->GetMetrics(&dwfm1);
+ metrics->fTop = -fTextSizeRender * SkIntToScalar(dwfm1.glyphBoxTop) / upem;
+ metrics->fBottom = -fTextSizeRender * SkIntToScalar(dwfm1.glyphBoxBottom) / upem;
+ metrics->fXMin = fTextSizeRender * SkIntToScalar(dwfm1.glyphBoxLeft) / upem;
+ metrics->fXMax = fTextSizeRender * SkIntToScalar(dwfm1.glyphBoxRight) / upem;
+
+ metrics->fMaxCharWidth = metrics->fXMax - metrics->fXMin;
+ return;
+ }
+#else
+# pragma message("No dwrite_1.h is available, font metrics may be affected.")
+#endif
+
+ AutoTDWriteTable<SkOTTableHead> head(fTypeface->fDWriteFontFace.get());
+ if (head.fExists &&
+ head.fSize >= sizeof(SkOTTableHead) &&
+ head->version == SkOTTableHead::version1)
+ {
+ metrics->fTop = -fTextSizeRender * (int16_t)SkEndian_SwapBE16(head->yMax) / upem;
+ metrics->fBottom = -fTextSizeRender * (int16_t)SkEndian_SwapBE16(head->yMin) / upem;
+ metrics->fXMin = fTextSizeRender * (int16_t)SkEndian_SwapBE16(head->xMin) / upem;
+ metrics->fXMax = fTextSizeRender * (int16_t)SkEndian_SwapBE16(head->xMax) / upem;
+
+ metrics->fMaxCharWidth = metrics->fXMax - metrics->fXMin;
+ return;
+ }
+
+ metrics->fTop = metrics->fAscent;
+ metrics->fBottom = metrics->fDescent;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkColorPriv.h"
+
+static void bilevel_to_bw(const uint8_t* SK_RESTRICT src, const SkGlyph& glyph) {
+ const int width = glyph.fWidth;
+ const size_t dstRB = (width + 7) >> 3;
+ uint8_t* SK_RESTRICT dst = static_cast<uint8_t*>(glyph.fImage);
+
+ int byteCount = width >> 3;
+ int bitCount = width & 7;
+
+ for (int y = 0; y < glyph.fHeight; ++y) {
+ if (byteCount > 0) {
+ for (int i = 0; i < byteCount; ++i) {
+ unsigned byte = 0;
+ byte |= src[0] & (1 << 7);
+ byte |= src[1] & (1 << 6);
+ byte |= src[2] & (1 << 5);
+ byte |= src[3] & (1 << 4);
+ byte |= src[4] & (1 << 3);
+ byte |= src[5] & (1 << 2);
+ byte |= src[6] & (1 << 1);
+ byte |= src[7] & (1 << 0);
+ dst[i] = byte;
+ src += 8;
+ }
+ }
+ if (bitCount > 0) {
+ unsigned byte = 0;
+ unsigned mask = 0x80;
+ for (int i = 0; i < bitCount; i++) {
+ byte |= (src[i]) & mask;
+ mask >>= 1;
+ }
+ dst[byteCount] = byte;
+ }
+ src += bitCount;
+ dst += dstRB;
+ }
+}
+
+template<bool APPLY_PREBLEND>
+static void rgb_to_a8(const uint8_t* SK_RESTRICT src, const SkGlyph& glyph, const uint8_t* table8) {
+ const size_t dstRB = glyph.rowBytes();
+ const U16CPU width = glyph.fWidth;
+ uint8_t* SK_RESTRICT dst = static_cast<uint8_t*>(glyph.fImage);
+
+ for (U16CPU y = 0; y < glyph.fHeight; y++) {
+ for (U16CPU i = 0; i < width; i++) {
+ U8CPU g = src[1];
+ src += 3;
+
+ // Ignore the R, B channels. It looks the closest to what
+ // D2D with grayscale AA has. But there's no way
+ // to just get a grayscale AA alpha texture from a glyph run.
+ dst[i] = sk_apply_lut_if<APPLY_PREBLEND>(g, table8);
+ }
+ dst = (uint8_t*)((char*)dst + dstRB);
+ }
+}
+
+template<bool APPLY_PREBLEND, bool RGB>
+static void rgb_to_lcd16(const uint8_t* SK_RESTRICT src, const SkGlyph& glyph,
+ const uint8_t* tableR, const uint8_t* tableG, const uint8_t* tableB) {
+ const size_t dstRB = glyph.rowBytes();
+ const U16CPU width = glyph.fWidth;
+ uint16_t* SK_RESTRICT dst = static_cast<uint16_t*>(glyph.fImage);
+
+ for (U16CPU y = 0; y < glyph.fHeight; y++) {
+ for (U16CPU i = 0; i < width; i++) {
+ U8CPU r, g, b;
+ if (RGB) {
+ r = sk_apply_lut_if<APPLY_PREBLEND>(*(src++), tableR);
+ g = sk_apply_lut_if<APPLY_PREBLEND>(*(src++), tableG);
+ b = sk_apply_lut_if<APPLY_PREBLEND>(*(src++), tableB);
+ } else {
+ b = sk_apply_lut_if<APPLY_PREBLEND>(*(src++), tableB);
+ g = sk_apply_lut_if<APPLY_PREBLEND>(*(src++), tableG);
+ r = sk_apply_lut_if<APPLY_PREBLEND>(*(src++), tableR);
+ }
+ dst[i] = SkPack888ToRGB16(r, g, b);
+ }
+ dst = (uint16_t*)((char*)dst + dstRB);
+ }
+}
+
+const void* SkScalerContext_DW::drawDWMask(const SkGlyph& glyph,
+ DWRITE_RENDERING_MODE renderingMode,
+ DWRITE_TEXTURE_TYPE textureType)
+{
+ int sizeNeeded = glyph.fWidth * glyph.fHeight;
+ if (DWRITE_RENDERING_MODE_ALIASED != renderingMode) {
+ sizeNeeded *= 3;
+ }
+ if (sizeNeeded > fBits.count()) {
+ fBits.setCount(sizeNeeded);
+ }
+
+ // erase
+ memset(fBits.begin(), 0, sizeNeeded);
+
+ fXform.dx = SkFixedToFloat(glyph.getSubXFixed());
+ fXform.dy = SkFixedToFloat(glyph.getSubYFixed());
+
+ FLOAT advance = 0.0f;
+
+ UINT16 index = glyph.getGlyphID();
+
+ DWRITE_GLYPH_OFFSET offset;
+ offset.advanceOffset = 0.0f;
+ offset.ascenderOffset = 0.0f;
+
+ DWRITE_GLYPH_RUN run;
+ run.glyphCount = 1;
+ run.glyphAdvances = &advance;
+ run.fontFace = fTypeface->fDWriteFontFace.get();
+ run.fontEmSize = SkScalarToFloat(fTextSizeRender);
+ run.bidiLevel = 0;
+ run.glyphIndices = &index;
+ run.isSideways = FALSE;
+ run.glyphOffsets = &offset;
+ {
+
+ SkTScopedComPtr<IDWriteGlyphRunAnalysis> glyphRunAnalysis;
+ {
+ SkAutoExclusive l(DWriteFactoryMutex);
+ HRNM(fTypeface->fFactory->CreateGlyphRunAnalysis(&run,
+ 1.0f, // pixelsPerDip,
+ &fXform,
+ renderingMode,
+ fMeasuringMode,
+ 0.0f, // baselineOriginX,
+ 0.0f, // baselineOriginY,
+ &glyphRunAnalysis),
+ "Could not create glyph run analysis.");
+ }
+ //NOTE: this assumes that the glyph has already been measured
+ //with an exact same glyph run analysis.
+ RECT bbox;
+ bbox.left = glyph.fLeft;
+ bbox.top = glyph.fTop;
+ bbox.right = glyph.fLeft + glyph.fWidth;
+ bbox.bottom = glyph.fTop + glyph.fHeight;
+ {
+ Shared l(DWriteFactoryMutex);
+ HRNM(glyphRunAnalysis->CreateAlphaTexture(textureType,
+ &bbox,
+ fBits.begin(),
+ sizeNeeded),
+ "Could not draw mask.");
+ }
+ }
+ return fBits.begin();
+}
+
+#if SK_HAS_DWRITE_2_H
+void SkScalerContext_DW::generateColorGlyphImage(const SkGlyph& glyph) {
+ SkASSERT(isColorGlyph(glyph));
+ SkASSERT(glyph.fMaskFormat == SkMask::Format::kARGB32_Format);
+
+ memset(glyph.fImage, 0, glyph.computeImageSize());
+
+ SkTScopedComPtr<IDWriteColorGlyphRunEnumerator> colorLayers;
+ getColorGlyphRun(glyph, &colorLayers);
+ SkASSERT(colorLayers.get());
+
+ SkMatrix matrix = fSkXform;
+ matrix.postTranslate(-SkIntToScalar(glyph.fLeft), -SkIntToScalar(glyph.fTop));
+ SkRasterClip rc(SkIRect::MakeWH(glyph.fWidth, glyph.fHeight));
+ SkDraw draw;
+ draw.fDst = SkPixmap(SkImageInfo::MakeN32(glyph.fWidth, glyph.fHeight, kPremul_SkAlphaType),
+ glyph.fImage,
+ glyph.ComputeRowBytes(glyph.fWidth, SkMask::Format::kARGB32_Format));
+ draw.fMatrix = &matrix;
+ draw.fRC = &rc;
+
+ SkPaint paint;
+ if (fRenderingMode != DWRITE_RENDERING_MODE_ALIASED) {
+ paint.setFlags(SkPaint::Flags::kAntiAlias_Flag);
+ }
+
+ BOOL hasNextRun = FALSE;
+ while (SUCCEEDED(colorLayers->MoveNext(&hasNextRun)) && hasNextRun) {
+ const DWRITE_COLOR_GLYPH_RUN* colorGlyph;
+ HRVM(colorLayers->GetCurrentRun(&colorGlyph), "Could not get current color glyph run");
+
+ SkColor color;
+ if (colorGlyph->paletteIndex != 0xffff) {
+ color = SkColorSetARGB(SkFloatToIntRound(colorGlyph->runColor.a * 255),
+ SkFloatToIntRound(colorGlyph->runColor.r * 255),
+ SkFloatToIntRound(colorGlyph->runColor.g * 255),
+ SkFloatToIntRound(colorGlyph->runColor.b * 255));
+ } else {
+ // If all components of runColor are 0 or (equivalently) paletteIndex is 0xFFFF then
+ // the 'current brush' is used. fRec.getLuminanceColor() is kinda sorta what is wanted
+ // here, but not really, it will often be the wrong value because it wan't designed for
+ // this.
+ // TODO: implement this fully, bug.skia.org/5788
+ color = fRec.getLuminanceColor();
+ }
+ paint.setColor(color);
+
+ SkPath path;
+ SkTScopedComPtr<IDWriteGeometrySink> geometryToPath;
+ HRVM(SkDWriteGeometrySink::Create(&path, &geometryToPath),
+ "Could not create geometry to path converter.");
+ {
+ SkAutoExclusive l(DWriteFactoryMutex);
+ HRVM(colorGlyph->glyphRun.fontFace->GetGlyphRunOutline(
+ colorGlyph->glyphRun.fontEmSize,
+ colorGlyph->glyphRun.glyphIndices,
+ colorGlyph->glyphRun.glyphAdvances,
+ colorGlyph->glyphRun.glyphOffsets,
+ colorGlyph->glyphRun.glyphCount,
+ colorGlyph->glyphRun.isSideways,
+ colorGlyph->glyphRun.bidiLevel % 2, //rtl
+ geometryToPath.get()),
+ "Could not create glyph outline.");
+ }
+ draw.drawPath(path, paint, nullptr, true /* pathIsMutable */);
+ }
+}
+#endif
+
+void SkScalerContext_DW::generateImage(const SkGlyph& glyph) {
+ //Create the mask.
+ DWRITE_RENDERING_MODE renderingMode = fRenderingMode;
+ DWRITE_TEXTURE_TYPE textureType = fTextureType;
+ if (glyph.fForceBW) {
+ renderingMode = DWRITE_RENDERING_MODE_ALIASED;
+ textureType = DWRITE_TEXTURE_ALIASED_1x1;
+ }
+
+#if SK_HAS_DWRITE_2_H
+ if (SkMask::kARGB32_Format == glyph.fMaskFormat) {
+ generateColorGlyphImage(glyph);
+ return;
+ }
+#endif
+
+ const void* bits = this->drawDWMask(glyph, renderingMode, textureType);
+ if (!bits) {
+ sk_bzero(glyph.fImage, glyph.computeImageSize());
+ return;
+ }
+
+ //Copy the mask into the glyph.
+ const uint8_t* src = (const uint8_t*)bits;
+ if (DWRITE_RENDERING_MODE_ALIASED == renderingMode) {
+ bilevel_to_bw(src, glyph);
+ const_cast<SkGlyph&>(glyph).fMaskFormat = SkMask::kBW_Format;
+ } else if (!isLCD(fRec)) {
+ if (fPreBlend.isApplicable()) {
+ rgb_to_a8<true>(src, glyph, fPreBlend.fG);
+ } else {
+ rgb_to_a8<false>(src, glyph, fPreBlend.fG);
+ }
+ } else {
+ SkASSERT(SkMask::kLCD16_Format == glyph.fMaskFormat);
+ if (fPreBlend.isApplicable()) {
+ if (fRec.fFlags & SkScalerContext::kLCD_BGROrder_Flag) {
+ rgb_to_lcd16<true, false>(src, glyph, fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ } else {
+ rgb_to_lcd16<true, true>(src, glyph, fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ }
+ } else {
+ if (fRec.fFlags & SkScalerContext::kLCD_BGROrder_Flag) {
+ rgb_to_lcd16<false, false>(src, glyph, fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ } else {
+ rgb_to_lcd16<false, true>(src, glyph, fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ }
+ }
+ }
+}
+
+void SkScalerContext_DW::generatePath(const SkGlyph& glyph, SkPath* path) {
+ SkASSERT(path);
+
+ path->reset();
+
+ SkTScopedComPtr<IDWriteGeometrySink> geometryToPath;
+ HRVM(SkDWriteGeometrySink::Create(path, &geometryToPath),
+ "Could not create geometry to path converter.");
+ uint16_t glyphId = glyph.getGlyphID();
+ {
+ SkAutoExclusive l(DWriteFactoryMutex);
+ //TODO: convert to<->from DIUs? This would make a difference if hinting.
+ //It may not be needed, it appears that DirectWrite only hints at em size.
+ HRVM(fTypeface->fDWriteFontFace->GetGlyphRunOutline(SkScalarToFloat(fTextSizeRender),
+ &glyphId,
+ nullptr, //advances
+ nullptr, //offsets
+ 1, //num glyphs
+ FALSE, //sideways
+ FALSE, //rtl
+ geometryToPath.get()),
+ "Could not create glyph outline.");
+ }
+
+ path->transform(fSkXform);
+}
+
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/ports/SkScalerContext_win_dw.h b/gfx/skia/skia/src/ports/SkScalerContext_win_dw.h
new file mode 100644
index 000000000..f2a7e255b
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkScalerContext_win_dw.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkScalarContext_win_dw_DEFINED
+#define SkScalarContext_win_dw_DEFINED
+
+#include "SkScalar.h"
+#include "SkScalerContext.h"
+#include "SkTypeface_win_dw.h"
+#include "SkTypes.h"
+
+#include <dwrite.h>
+#if SK_HAS_DWRITE_2_H
+#include <dwrite_2.h>
+#endif
+
+class SkGlyph;
+class SkDescriptor;
+
+class SkScalerContext_DW : public SkScalerContext {
+public:
+ SkScalerContext_DW(DWriteFontTypeface*, const SkScalerContextEffects&, const SkDescriptor*);
+ virtual ~SkScalerContext_DW();
+
+protected:
+ unsigned generateGlyphCount() override;
+ uint16_t generateCharToGlyph(SkUnichar uni) override;
+ void generateAdvance(SkGlyph* glyph) override;
+ void generateMetrics(SkGlyph* glyph) override;
+ void generateImage(const SkGlyph& glyph) override;
+ void generatePath(const SkGlyph& glyph, SkPath* path) override;
+ void generateFontMetrics(SkPaint::FontMetrics*) override;
+
+private:
+ const void* drawDWMask(const SkGlyph& glyph,
+ DWRITE_RENDERING_MODE renderingMode,
+ DWRITE_TEXTURE_TYPE textureType);
+
+ HRESULT getBoundingBox(SkGlyph* glyph,
+ DWRITE_RENDERING_MODE renderingMode,
+ DWRITE_TEXTURE_TYPE textureType,
+ RECT* bbox);
+
+ bool isColorGlyph(const SkGlyph& glyph);
+
+#if SK_HAS_DWRITE_2_H
+ bool getColorGlyphRun(const SkGlyph& glyph, IDWriteColorGlyphRunEnumerator** colorGlyph);
+
+ void generateColorGlyphImage(const SkGlyph& glyph);
+#endif
+
+ SkTDArray<uint8_t> fBits;
+ /** The total matrix without the text height scale. */
+ SkMatrix fSkXform;
+ /** The total matrix without the text height scale. */
+ DWRITE_MATRIX fXform;
+ /** The non-rotational part of total matrix without the text height scale.
+ * This is used to find the magnitude of gdi compatible advances.
+ */
+ DWRITE_MATRIX fGsA;
+ /** The inverse of the rotational part of the total matrix.
+ * This is used to find the direction of gdi compatible advances.
+ */
+ SkMatrix fG_inv;
+ /** The text size to render with. */
+ SkScalar fTextSizeRender;
+ /** The text size to measure with. */
+ SkScalar fTextSizeMeasure;
+ SkAutoTUnref<DWriteFontTypeface> fTypeface;
+ int fGlyphCount;
+ DWRITE_RENDERING_MODE fRenderingMode;
+ DWRITE_TEXTURE_TYPE fTextureType;
+ DWRITE_MEASURING_MODE fMeasuringMode;
+ SkTScopedComPtr<IDWriteRenderingParams> fDefaultRenderingParams;
+#if SK_HAS_DWRITE_2_H
+ SkTScopedComPtr<IDWriteFactory2> fFactory2;
+ bool fIsColorFont;
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/src/ports/SkTLS_none.cpp b/gfx/skia/skia/src/ports/SkTLS_none.cpp
new file mode 100644
index 000000000..d655560c2
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkTLS_none.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTLS.h"
+
+static void* gSpecific = nullptr;
+
+void* SkTLS::PlatformGetSpecific(bool) {
+ return gSpecific;
+}
+
+void SkTLS::PlatformSetSpecific(void* ptr) {
+ gSpecific = ptr;
+}
diff --git a/gfx/skia/skia/src/ports/SkTLS_pthread.cpp b/gfx/skia/skia/src/ports/SkTLS_pthread.cpp
new file mode 100644
index 000000000..445d76d13
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkTLS_pthread.cpp
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTLS.h"
+#include "SkOnce.h"
+
+#include <pthread.h>
+
+static pthread_key_t gSkTLSKey;
+
+void* SkTLS::PlatformGetSpecific(bool forceCreateTheSlot) {
+ // should we use forceCreateTheSlot to potentially just return nullptr if
+ // we've never been called with forceCreateTheSlot==true ?
+ static SkOnce once;
+ once(pthread_key_create, &gSkTLSKey, SkTLS::Destructor);
+ return pthread_getspecific(gSkTLSKey);
+}
+
+void SkTLS::PlatformSetSpecific(void* ptr) {
+ (void)pthread_setspecific(gSkTLSKey, ptr);
+}
diff --git a/gfx/skia/skia/src/ports/SkTLS_win.cpp b/gfx/skia/skia/src/ports/SkTLS_win.cpp
new file mode 100644
index 000000000..c349ad99c
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkTLS_win.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32)
+
+#include "SkLeanWindows.h"
+#include "SkMutex.h"
+#include "SkTLS.h"
+
+static bool gOnce = false;
+static DWORD gTlsIndex;
+SK_DECLARE_STATIC_MUTEX(gMutex);
+
+void* SkTLS::PlatformGetSpecific(bool forceCreateTheSlot) {
+ if (!forceCreateTheSlot && !gOnce) {
+ return nullptr;
+ }
+
+ if (!gOnce) {
+ SkAutoMutexAcquire tmp(gMutex);
+ if (!gOnce) {
+ gTlsIndex = TlsAlloc();
+ gOnce = true;
+ }
+ }
+ return TlsGetValue(gTlsIndex);
+}
+
+void SkTLS::PlatformSetSpecific(void* ptr) {
+ SkASSERT(gOnce);
+ (void)TlsSetValue(gTlsIndex, ptr);
+}
+
+// Call TLS destructors on thread exit. Code based on Chromium's
+// base/threading/thread_local_storage_win.cc
+#ifdef _WIN64
+
+#pragma comment(linker, "/INCLUDE:_tls_used")
+#pragma comment(linker, "/INCLUDE:skia_tls_callback")
+
+#else
+
+#pragma comment(linker, "/INCLUDE:__tls_used")
+#pragma comment(linker, "/INCLUDE:_skia_tls_callback")
+
+#endif
+
+void NTAPI onTLSCallback(PVOID unused, DWORD reason, PVOID unused2) {
+ if ((DLL_THREAD_DETACH == reason || DLL_PROCESS_DETACH == reason) && gOnce) {
+ void* ptr = TlsGetValue(gTlsIndex);
+ if (ptr != nullptr) {
+ SkTLS::Destructor(ptr);
+ TlsSetValue(gTlsIndex, nullptr);
+ }
+ }
+}
+
+extern "C" {
+
+#ifdef _WIN64
+
+#pragma const_seg(".CRT$XLB")
+extern const PIMAGE_TLS_CALLBACK skia_tls_callback;
+const PIMAGE_TLS_CALLBACK skia_tls_callback = onTLSCallback;
+#pragma const_seg()
+
+#else
+
+#pragma data_seg(".CRT$XLB")
+PIMAGE_TLS_CALLBACK skia_tls_callback = onTLSCallback;
+#pragma data_seg()
+
+#endif
+}
+
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/ports/SkTypeface_win_dw.cpp b/gfx/skia/skia/src/ports/SkTypeface_win_dw.cpp
new file mode 100644
index 000000000..96a728f60
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkTypeface_win_dw.cpp
@@ -0,0 +1,431 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32)
+
+// SkTypes will include Windows.h, which will pull in all of the GDI defines.
+// GDI #defines GetGlyphIndices to GetGlyphIndicesA or GetGlyphIndicesW, but
+// IDWriteFontFace has a method called GetGlyphIndices. Since this file does
+// not use GDI, undefing GetGlyphIndices makes things less confusing.
+#undef GetGlyphIndices
+
+#include "SkDWrite.h"
+#include "SkDWriteFontFileStream.h"
+#include "SkFontDescriptor.h"
+#include "SkFontStream.h"
+#include "SkOTTable_head.h"
+#include "SkOTTable_hhea.h"
+#include "SkOTTable_OS_2.h"
+#include "SkOTTable_post.h"
+#include "SkOTUtils.h"
+#include "SkScalerContext.h"
+#include "SkScalerContext_win_dw.h"
+#include "SkTypeface_win_dw.h"
+#include "SkUtils.h"
+
+void DWriteFontTypeface::onGetFamilyName(SkString* familyName) const {
+ SkTScopedComPtr<IDWriteLocalizedStrings> familyNames;
+ HRV(fDWriteFontFamily->GetFamilyNames(&familyNames));
+
+ sk_get_locale_string(familyNames.get(), nullptr/*fMgr->fLocaleName.get()*/, familyName);
+}
+
+void DWriteFontTypeface::onGetFontDescriptor(SkFontDescriptor* desc,
+ bool* isLocalStream) const {
+ // Get the family name.
+ SkTScopedComPtr<IDWriteLocalizedStrings> familyNames;
+ HRV(fDWriteFontFamily->GetFamilyNames(&familyNames));
+
+ SkString utf8FamilyName;
+ sk_get_locale_string(familyNames.get(), nullptr/*fMgr->fLocaleName.get()*/, &utf8FamilyName);
+
+ desc->setFamilyName(utf8FamilyName.c_str());
+ desc->setStyle(this->fontStyle());
+ *isLocalStream = SkToBool(fDWriteFontFileLoader.get());
+}
+
+static SkUnichar next_utf8(const void** chars) {
+ return SkUTF8_NextUnichar((const char**)chars);
+}
+
+static SkUnichar next_utf16(const void** chars) {
+ return SkUTF16_NextUnichar((const uint16_t**)chars);
+}
+
+static SkUnichar next_utf32(const void** chars) {
+ const SkUnichar** uniChars = (const SkUnichar**)chars;
+ SkUnichar uni = **uniChars;
+ *uniChars += 1;
+ return uni;
+}
+
+typedef SkUnichar (*EncodingProc)(const void**);
+
+static EncodingProc find_encoding_proc(SkTypeface::Encoding enc) {
+ static const EncodingProc gProcs[] = {
+ next_utf8, next_utf16, next_utf32
+ };
+ SkASSERT((size_t)enc < SK_ARRAY_COUNT(gProcs));
+ return gProcs[enc];
+}
+
+int DWriteFontTypeface::onCharsToGlyphs(const void* chars, Encoding encoding,
+ uint16_t glyphs[], int glyphCount) const
+{
+ if (nullptr == glyphs) {
+ EncodingProc next_ucs4_proc = find_encoding_proc(encoding);
+ for (int i = 0; i < glyphCount; ++i) {
+ const SkUnichar c = next_ucs4_proc(&chars);
+ BOOL exists;
+ fDWriteFont->HasCharacter(c, &exists);
+ if (!exists) {
+ return i;
+ }
+ }
+ return glyphCount;
+ }
+
+ switch (encoding) {
+ case SkTypeface::kUTF8_Encoding:
+ case SkTypeface::kUTF16_Encoding: {
+ static const int scratchCount = 256;
+ UINT32 scratch[scratchCount];
+ EncodingProc next_ucs4_proc = find_encoding_proc(encoding);
+ for (int baseGlyph = 0; baseGlyph < glyphCount; baseGlyph += scratchCount) {
+ int glyphsLeft = glyphCount - baseGlyph;
+ int limit = SkTMin(glyphsLeft, scratchCount);
+ for (int i = 0; i < limit; ++i) {
+ scratch[i] = next_ucs4_proc(&chars);
+ }
+ fDWriteFontFace->GetGlyphIndices(scratch, limit, &glyphs[baseGlyph]);
+ }
+ break;
+ }
+ case SkTypeface::kUTF32_Encoding: {
+ const UINT32* utf32 = reinterpret_cast<const UINT32*>(chars);
+ fDWriteFontFace->GetGlyphIndices(utf32, glyphCount, glyphs);
+ break;
+ }
+ default:
+ SK_ABORT("Invalid Text Encoding");
+ }
+
+ for (int i = 0; i < glyphCount; ++i) {
+ if (0 == glyphs[i]) {
+ return i;
+ }
+ }
+ return glyphCount;
+}
+
+int DWriteFontTypeface::onCountGlyphs() const {
+ return fDWriteFontFace->GetGlyphCount();
+}
+
+int DWriteFontTypeface::onGetUPEM() const {
+ DWRITE_FONT_METRICS metrics;
+ fDWriteFontFace->GetMetrics(&metrics);
+ return metrics.designUnitsPerEm;
+}
+
+class LocalizedStrings_IDWriteLocalizedStrings : public SkTypeface::LocalizedStrings {
+public:
+ /** Takes ownership of the IDWriteLocalizedStrings. */
+ explicit LocalizedStrings_IDWriteLocalizedStrings(IDWriteLocalizedStrings* strings)
+ : fIndex(0), fStrings(strings)
+ { }
+
+ bool next(SkTypeface::LocalizedString* localizedString) override {
+ if (fIndex >= fStrings->GetCount()) {
+ return false;
+ }
+
+ // String
+ UINT32 stringLen;
+ HRBM(fStrings->GetStringLength(fIndex, &stringLen), "Could not get string length.");
+
+ SkSMallocWCHAR wString(stringLen+1);
+ HRBM(fStrings->GetString(fIndex, wString.get(), stringLen+1), "Could not get string.");
+
+ HRB(sk_wchar_to_skstring(wString.get(), stringLen, &localizedString->fString));
+
+ // Locale
+ UINT32 localeLen;
+ HRBM(fStrings->GetLocaleNameLength(fIndex, &localeLen), "Could not get locale length.");
+
+ SkSMallocWCHAR wLocale(localeLen+1);
+ HRBM(fStrings->GetLocaleName(fIndex, wLocale.get(), localeLen+1), "Could not get locale.");
+
+ HRB(sk_wchar_to_skstring(wLocale.get(), localeLen, &localizedString->fLanguage));
+
+ ++fIndex;
+ return true;
+ }
+
+private:
+ UINT32 fIndex;
+ SkTScopedComPtr<IDWriteLocalizedStrings> fStrings;
+};
+
+SkTypeface::LocalizedStrings* DWriteFontTypeface::onCreateFamilyNameIterator() const {
+ SkTypeface::LocalizedStrings* nameIter =
+ SkOTUtils::LocalizedStrings_NameTable::CreateForFamilyNames(*this);
+ if (nullptr == nameIter) {
+ SkTScopedComPtr<IDWriteLocalizedStrings> familyNames;
+ HRNM(fDWriteFontFamily->GetFamilyNames(&familyNames), "Could not obtain family names.");
+ nameIter = new LocalizedStrings_IDWriteLocalizedStrings(familyNames.release());
+ }
+ return nameIter;
+}
+
+int DWriteFontTypeface::onGetTableTags(SkFontTableTag tags[]) const {
+ DWRITE_FONT_FACE_TYPE type = fDWriteFontFace->GetType();
+ if (type != DWRITE_FONT_FACE_TYPE_CFF &&
+ type != DWRITE_FONT_FACE_TYPE_TRUETYPE &&
+ type != DWRITE_FONT_FACE_TYPE_TRUETYPE_COLLECTION)
+ {
+ return 0;
+ }
+
+ int ttcIndex;
+ SkAutoTDelete<SkStream> stream(this->openStream(&ttcIndex));
+ return stream.get() ? SkFontStream::GetTableTags(stream, ttcIndex, tags) : 0;
+}
+
+size_t DWriteFontTypeface::onGetTableData(SkFontTableTag tag, size_t offset,
+ size_t length, void* data) const
+{
+ AutoDWriteTable table(fDWriteFontFace.get(), SkEndian_SwapBE32(tag));
+ if (!table.fExists) {
+ return 0;
+ }
+
+ if (offset > table.fSize) {
+ return 0;
+ }
+ size_t size = SkTMin(length, table.fSize - offset);
+ if (data) {
+ memcpy(data, table.fData + offset, size);
+ }
+
+ return size;
+}
+
+SkStreamAsset* DWriteFontTypeface::onOpenStream(int* ttcIndex) const {
+ *ttcIndex = fDWriteFontFace->GetIndex();
+
+ UINT32 numFiles;
+ HRNM(fDWriteFontFace->GetFiles(&numFiles, nullptr),
+ "Could not get number of font files.");
+ if (numFiles != 1) {
+ return nullptr;
+ }
+
+ SkTScopedComPtr<IDWriteFontFile> fontFile;
+ HRNM(fDWriteFontFace->GetFiles(&numFiles, &fontFile), "Could not get font files.");
+
+ const void* fontFileKey;
+ UINT32 fontFileKeySize;
+ HRNM(fontFile->GetReferenceKey(&fontFileKey, &fontFileKeySize),
+ "Could not get font file reference key.");
+
+ SkTScopedComPtr<IDWriteFontFileLoader> fontFileLoader;
+ HRNM(fontFile->GetLoader(&fontFileLoader), "Could not get font file loader.");
+
+ SkTScopedComPtr<IDWriteFontFileStream> fontFileStream;
+ HRNM(fontFileLoader->CreateStreamFromKey(fontFileKey, fontFileKeySize,
+ &fontFileStream),
+ "Could not create font file stream.");
+
+ return new SkDWriteFontFileStream(fontFileStream.get());
+}
+
+SkScalerContext* DWriteFontTypeface::onCreateScalerContext(const SkScalerContextEffects& effects,
+ const SkDescriptor* desc) const {
+ return new SkScalerContext_DW(const_cast<DWriteFontTypeface*>(this), effects, desc);
+}
+
+#ifdef MOZ_SKIA
+IDWriteRenderingParams* GetDwriteRenderingParams(bool aGDI);
+#endif
+
+void DWriteFontTypeface::onFilterRec(SkScalerContext::Rec* rec) const {
+ if (rec->fFlags & SkScalerContext::kLCD_Vertical_Flag) {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ }
+
+ unsigned flagsWeDontSupport = SkScalerContext::kVertical_Flag |
+ SkScalerContext::kDevKernText_Flag |
+ SkScalerContext::kForceAutohinting_Flag |
+ SkScalerContext::kEmbolden_Flag |
+ SkScalerContext::kLCD_Vertical_Flag;
+ rec->fFlags &= ~flagsWeDontSupport;
+
+ SkPaint::Hinting h = rec->getHinting();
+ // DirectWrite does not provide for hinting hints.
+ h = SkPaint::kSlight_Hinting;
+ rec->setHinting(h);
+
+#if defined(SK_FONT_HOST_USE_SYSTEM_SETTINGS)
+ IDWriteFactory* factory = sk_get_dwrite_factory();
+ if (factory != nullptr) {
+ SkTScopedComPtr<IDWriteRenderingParams> defaultRenderingParams;
+ if (SUCCEEDED(factory->CreateRenderingParams(&defaultRenderingParams))) {
+ float gamma = defaultRenderingParams->GetGamma();
+ rec->setDeviceGamma(gamma);
+ rec->setPaintGamma(gamma);
+
+ rec->setContrast(defaultRenderingParams->GetEnhancedContrast());
+ }
+ }
+#elif defined(MOZ_SKIA)
+ IDWriteRenderingParams* params = GetDwriteRenderingParams(ForceGDI());
+ SkASSERT(params);
+ rec->setContrast(params->GetEnhancedContrast());
+
+ // GDI gamma should be 2.3
+ // See the LUT gamma values comment for GDI fonts.
+ float gamma = ForceGDI() ? 2.3f : params->GetGamma();
+ rec->setDeviceGamma(gamma);
+ rec->setPaintGamma(gamma);
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+//PDF Support
+
+// Construct Glyph to Unicode table.
+// Unicode code points that require conjugate pairs in utf16 are not
+// supported.
+// TODO(bungeman): This never does what anyone wants.
+// What is really wanted is the text to glyphs mapping
+static void populate_glyph_to_unicode(IDWriteFontFace* fontFace,
+ const unsigned glyphCount,
+ SkTDArray<SkUnichar>* glyphToUnicode) {
+ //Do this like free type instead
+ SkAutoTMalloc<SkUnichar> glyphToUni(glyphCount);
+ int maxGlyph = -1;
+ for (UINT32 c = 0; c < 0x10FFFF; ++c) {
+ UINT16 glyph = 0;
+ HRVM(fontFace->GetGlyphIndices(&c, 1, &glyph),
+ "Failed to get glyph index.");
+ // Intermittent DW bug on Windows 10. See crbug.com/470146.
+ if (glyph >= glyphCount) {
+ return;
+ }
+ if (0 < glyph) {
+ maxGlyph = SkTMax(static_cast<int>(glyph), maxGlyph);
+ glyphToUni[glyph] = c;
+ }
+ }
+
+ SkTDArray<SkUnichar>(glyphToUni, maxGlyph + 1).swap(*glyphToUnicode);
+}
+
+SkAdvancedTypefaceMetrics* DWriteFontTypeface::onGetAdvancedTypefaceMetrics(
+ PerGlyphInfo perGlyphInfo,
+ const uint32_t* glyphIDs,
+ uint32_t glyphIDsCount) const {
+
+ SkAdvancedTypefaceMetrics* info = nullptr;
+
+ HRESULT hr = S_OK;
+
+ const unsigned glyphCount = fDWriteFontFace->GetGlyphCount();
+
+ DWRITE_FONT_METRICS dwfm;
+ fDWriteFontFace->GetMetrics(&dwfm);
+
+ info = new SkAdvancedTypefaceMetrics;
+ info->fEmSize = dwfm.designUnitsPerEm;
+ info->fLastGlyphID = SkToU16(glyphCount - 1);
+
+ info->fAscent = SkToS16(dwfm.ascent);
+ info->fDescent = SkToS16(dwfm.descent);
+ info->fCapHeight = SkToS16(dwfm.capHeight);
+
+ // SkAdvancedTypefaceMetrics::fFontName is in theory supposed to be
+ // the PostScript name of the font. However, due to the way it is currently
+ // used, it must actually be a family name.
+ SkTScopedComPtr<IDWriteLocalizedStrings> familyNames;
+ hr = fDWriteFontFamily->GetFamilyNames(&familyNames);
+
+ UINT32 familyNameLen;
+ hr = familyNames->GetStringLength(0, &familyNameLen);
+
+ SkSMallocWCHAR familyName(familyNameLen+1);
+ hr = familyNames->GetString(0, familyName.get(), familyNameLen+1);
+
+ hr = sk_wchar_to_skstring(familyName.get(), familyNameLen, &info->fFontName);
+
+ if (perGlyphInfo & kToUnicode_PerGlyphInfo) {
+ populate_glyph_to_unicode(fDWriteFontFace.get(), glyphCount, &(info->fGlyphToUnicode));
+ }
+
+ DWRITE_FONT_FACE_TYPE fontType = fDWriteFontFace->GetType();
+ if (fontType != DWRITE_FONT_FACE_TYPE_TRUETYPE &&
+ fontType != DWRITE_FONT_FACE_TYPE_TRUETYPE_COLLECTION)
+ {
+ return info;
+ }
+
+ // Simulated fonts aren't really TrueType fonts.
+ if (fDWriteFontFace->GetSimulations() == DWRITE_FONT_SIMULATIONS_NONE) {
+ info->fType = SkAdvancedTypefaceMetrics::kTrueType_Font;
+ }
+
+ AutoTDWriteTable<SkOTTableHead> headTable(fDWriteFontFace.get());
+ AutoTDWriteTable<SkOTTablePostScript> postTable(fDWriteFontFace.get());
+ AutoTDWriteTable<SkOTTableHorizontalHeader> hheaTable(fDWriteFontFace.get());
+ AutoTDWriteTable<SkOTTableOS2> os2Table(fDWriteFontFace.get());
+ if (!headTable.fExists || !postTable.fExists || !hheaTable.fExists || !os2Table.fExists) {
+ return info;
+ }
+
+ //There exist CJK fonts which set the IsFixedPitch and Monospace bits,
+ //but have full width, latin half-width, and half-width kana.
+ bool fixedWidth = (postTable->isFixedPitch &&
+ (1 == SkEndian_SwapBE16(hheaTable->numberOfHMetrics)));
+ //Monospace
+ if (fixedWidth) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kFixedPitch_Style;
+ }
+ //Italic
+ if (os2Table->version.v0.fsSelection.field.Italic) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kItalic_Style;
+ }
+ //Serif
+ using SerifStyle = SkPanose::Data::TextAndDisplay::SerifStyle;
+ SerifStyle serifStyle = os2Table->version.v0.panose.data.textAndDisplay.bSerifStyle;
+ if (SkPanose::FamilyType::TextAndDisplay == os2Table->version.v0.panose.bFamilyType) {
+ if (SerifStyle::Cove == serifStyle ||
+ SerifStyle::ObtuseCove == serifStyle ||
+ SerifStyle::SquareCove == serifStyle ||
+ SerifStyle::ObtuseSquareCove == serifStyle ||
+ SerifStyle::Square == serifStyle ||
+ SerifStyle::Thin == serifStyle ||
+ SerifStyle::Bone == serifStyle ||
+ SerifStyle::Exaggerated == serifStyle ||
+ SerifStyle::Triangle == serifStyle)
+ {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kSerif_Style;
+ }
+ //Script
+ } else if (SkPanose::FamilyType::Script == os2Table->version.v0.panose.bFamilyType) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kScript_Style;
+ }
+
+ info->fItalicAngle = SkEndian_SwapBE32(postTable->italicAngle) >> 16;
+
+ info->fBBox = SkIRect::MakeLTRB((int32_t)SkEndian_SwapBE16((uint16_t)headTable->xMin),
+ (int32_t)SkEndian_SwapBE16((uint16_t)headTable->yMax),
+ (int32_t)SkEndian_SwapBE16((uint16_t)headTable->xMax),
+ (int32_t)SkEndian_SwapBE16((uint16_t)headTable->yMin));
+ return info;
+}
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/ports/SkTypeface_win_dw.h b/gfx/skia/skia/src/ports/SkTypeface_win_dw.h
new file mode 100644
index 000000000..fbf982193
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkTypeface_win_dw.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTypeface_win_dw_DEFINED
+#define SkTypeface_win_dw_DEFINED
+
+#include "SkAdvancedTypefaceMetrics.h"
+#include "SkDWrite.h"
+#include "SkHRESULT.h"
+#include "SkLeanWindows.h"
+#include "SkTScopedComPtr.h"
+#include "SkTypeface.h"
+#include "SkTypefaceCache.h"
+
+#include <dwrite.h>
+#if SK_HAS_DWRITE_1_H
+# include <dwrite_1.h>
+#endif
+
+class SkFontDescriptor;
+struct SkScalerContextRec;
+
+static SkFontStyle get_style(IDWriteFont* font) {
+ int weight = font->GetWeight();
+ int width = font->GetStretch();
+ SkFontStyle::Slant slant = SkFontStyle::kUpright_Slant;
+ switch (font->GetStyle()) {
+ case DWRITE_FONT_STYLE_NORMAL: slant = SkFontStyle::kUpright_Slant; break;
+ case DWRITE_FONT_STYLE_OBLIQUE: slant = SkFontStyle::kOblique_Slant; break;
+ case DWRITE_FONT_STYLE_ITALIC: slant = SkFontStyle::kItalic_Slant; break;
+ default: SkASSERT(false); break;
+ }
+ return SkFontStyle(weight, width, slant);
+}
+
+class DWriteFontTypeface : public SkTypeface {
+private:
+ DWriteFontTypeface(const SkFontStyle& style,
+ IDWriteFactory* factory,
+ IDWriteFontFace* fontFace,
+ IDWriteFont* font = nullptr,
+ IDWriteFontFamily* fontFamily = nullptr,
+ IDWriteFontFileLoader* fontFileLoader = nullptr,
+ IDWriteFontCollectionLoader* fontCollectionLoader = nullptr)
+ : SkTypeface(style, false)
+ , fFactory(SkRefComPtr(factory))
+ , fDWriteFontCollectionLoader(SkSafeRefComPtr(fontCollectionLoader))
+ , fDWriteFontFileLoader(SkSafeRefComPtr(fontFileLoader))
+ , fDWriteFontFamily(SkSafeRefComPtr(fontFamily))
+ , fDWriteFont(SkSafeRefComPtr(font))
+ , fDWriteFontFace(SkRefComPtr(fontFace))
+ , fForceGDI(false)
+ {
+#if SK_HAS_DWRITE_1_H
+ if (!SUCCEEDED(fDWriteFontFace->QueryInterface(&fDWriteFontFace1))) {
+ // IUnknown::QueryInterface states that if it fails, punk will be set to nullptr.
+ // http://blogs.msdn.com/b/oldnewthing/archive/2004/03/26/96777.aspx
+ SkASSERT_RELEASE(nullptr == fDWriteFontFace1.get());
+ }
+#endif
+ }
+
+public:
+ SkTScopedComPtr<IDWriteFactory> fFactory;
+ SkTScopedComPtr<IDWriteFontCollectionLoader> fDWriteFontCollectionLoader;
+ SkTScopedComPtr<IDWriteFontFileLoader> fDWriteFontFileLoader;
+ SkTScopedComPtr<IDWriteFontFamily> fDWriteFontFamily;
+ SkTScopedComPtr<IDWriteFont> fDWriteFont;
+ SkTScopedComPtr<IDWriteFontFace> fDWriteFontFace;
+#if SK_HAS_DWRITE_1_H
+ SkTScopedComPtr<IDWriteFontFace1> fDWriteFontFace1;
+#endif
+
+ static DWriteFontTypeface* Create(IDWriteFactory* factory,
+ IDWriteFontFace* fontFace,
+ SkFontStyle aStyle,
+ bool aForceGDI) {
+ DWriteFontTypeface* typeface =
+ new DWriteFontTypeface(aStyle, factory, fontFace,
+ nullptr, nullptr,
+ nullptr, nullptr);
+ typeface->fForceGDI = aForceGDI;
+ return typeface;
+ }
+
+ static DWriteFontTypeface* Create(IDWriteFactory* factory,
+ IDWriteFontFace* fontFace,
+ IDWriteFont* font,
+ IDWriteFontFamily* fontFamily,
+ IDWriteFontFileLoader* fontFileLoader = nullptr,
+ IDWriteFontCollectionLoader* fontCollectionLoader = nullptr) {
+ return new DWriteFontTypeface(get_style(font), factory, fontFace, font, fontFamily,
+ fontFileLoader, fontCollectionLoader);
+ }
+
+ bool ForceGDI() const { return fForceGDI; }
+
+protected:
+ void weak_dispose() const override {
+ if (fDWriteFontCollectionLoader.get()) {
+ HRV(fFactory->UnregisterFontCollectionLoader(fDWriteFontCollectionLoader.get()));
+ }
+ if (fDWriteFontFileLoader.get()) {
+ HRV(fFactory->UnregisterFontFileLoader(fDWriteFontFileLoader.get()));
+ }
+
+ //SkTypefaceCache::Remove(this);
+ INHERITED::weak_dispose();
+ }
+
+ SkStreamAsset* onOpenStream(int* ttcIndex) const override;
+ SkScalerContext* onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*) const override;
+ void onFilterRec(SkScalerContextRec*) const override;
+ SkAdvancedTypefaceMetrics* onGetAdvancedTypefaceMetrics(
+ PerGlyphInfo, const uint32_t*, uint32_t) const override;
+ void onGetFontDescriptor(SkFontDescriptor*, bool*) const override;
+ virtual int onCharsToGlyphs(const void* chars, Encoding encoding,
+ uint16_t glyphs[], int glyphCount) const override;
+ int onCountGlyphs() const override;
+ int onGetUPEM() const override;
+ void onGetFamilyName(SkString* familyName) const override;
+ SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override;
+ int onGetTableTags(SkFontTableTag tags[]) const override;
+ virtual size_t onGetTableData(SkFontTableTag, size_t offset,
+ size_t length, void* data) const override;
+
+private:
+ typedef SkTypeface INHERITED;
+ bool fForceGDI;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkIBMFamilyClass.h b/gfx/skia/skia/src/sfnt/SkIBMFamilyClass.h
new file mode 100644
index 000000000..b95c91efb
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkIBMFamilyClass.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkIBMFamilyClass_DEFINED
+#define SkIBMFamilyClass_DEFINED
+
+#include "SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkIBMFamilyClass {
+ enum class Class : SK_OT_BYTE {
+ NoClassification = 0,
+ OldstyleSerifs = 1,
+ TransitionalSerifs = 2,
+ ModernSerifs = 3,
+ ClarendonSerifs = 4,
+ SlabSerifs = 5,
+ //6 reserved for future use
+ FreeformSerifs = 7,
+ SansSerif = 8,
+ Ornamentals = 9,
+ Scripts = 10,
+ //11 reserved for future use
+ Symbolic = 12,
+ //13-15 reserved for future use
+ } familyClass;
+ union SubClass {
+ enum class OldstyleSerifs : SK_OT_BYTE {
+ NoClassification = 0,
+ IBMRoundedLegibility = 1,
+ Garalde = 2,
+ Venetian = 3,
+ ModifiedVenetian = 4,
+ DutchModern = 5,
+ DutchTraditional = 6,
+ Contemporary = 7,
+ Calligraphic = 8,
+ //9-14 reserved for future use
+ Miscellaneous = 15,
+ } oldstyleSerifs;
+ enum class TransitionalSerifs : SK_OT_BYTE {
+ NoClassification = 0,
+ DirectLine = 1,
+ Script = 2,
+ //3-14 reserved for future use
+ Miscellaneous = 15,
+ } transitionalSerifs;
+ enum class ModernSerifs : SK_OT_BYTE {
+ NoClassification = 0,
+ Italian = 1,
+ Script = 2,
+ //3-14 reserved for future use
+ Miscellaneous = 15,
+ } modernSerifs;
+ enum class ClarendonSerifs : SK_OT_BYTE {
+ NoClassification = 0,
+ Clarendon = 1,
+ Modern = 2,
+ Traditional = 3,
+ Newspaper = 4,
+ StubSerif = 5,
+ Monotone = 6,
+ Typewriter = 7,
+ //8-14 reserved for future use
+ Miscellaneous = 15,
+ } clarendonSerifs;
+ enum class SlabSerifs : SK_OT_BYTE {
+ NoClassification = 0,
+ Monotone = 1,
+ Humanist = 2,
+ Geometric = 3,
+ Swiss = 4,
+ Typewriter = 5,
+ //6-14 reserved for future use
+ Miscellaneous = 15,
+ } slabSerifs;
+ enum class FreeformSerifs : SK_OT_BYTE {
+ NoClassification = 0,
+ Modern = 1,
+ //2-14 reserved for future use
+ Miscellaneous = 15,
+ } freeformSerifs;
+ enum class SansSerif : SK_OT_BYTE {
+ NoClassification = 0,
+ IBMNeoGrotesqueGothic = 1,
+ Humanist = 2,
+ LowXRoundGeometric = 3,
+ HighXRoundGeometric = 4,
+ NeoGrotesqueGothic = 5,
+ ModifiedNeoGrotesqueGothic = 6,
+ //7-8 reserved for future use
+ TypewriterGothic = 9,
+ Matrix = 10,
+ //11-14 reserved for future use
+ Miscellaneous = 15,
+ } sansSerif;
+ enum class Ornamentals : SK_OT_BYTE {
+ NoClassification = 0,
+ Engraver = 1,
+ BlackLetter = 2,
+ Decorative = 3,
+ ThreeDimensional = 4,
+ //5-14 reserved for future use
+ Miscellaneous = 15,
+ } ornamentals;
+ enum class Scripts : SK_OT_BYTE {
+ NoClassification = 0,
+ Uncial = 1,
+ Brush_Joined = 2,
+ Formal_Joined = 3,
+ Monotone_Joined = 4,
+ Calligraphic = 5,
+ Brush_Unjoined = 6,
+ Formal_Unjoined = 7,
+ Monotone_Unjoined = 8,
+ //9-14 reserved for future use
+ Miscellaneous = 15,
+ } scripts;
+ enum class Symbolic : SK_OT_BYTE {
+ NoClassification = 0,
+ //1-2 reserved for future use
+ MixedSerif = 3,
+ //4-5 reserved for future use
+ OldstyleSerif = 6,
+ NeoGrotesqueSansSerif = 7,
+ //8-14 reserved for future use
+ Miscellaneous = 15,
+ } symbolic;
+ } familySubClass;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkIBMFamilyClass) == 2, "sizeof_SkIBMFamilyClass_not_2");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTableTypes.h b/gfx/skia/skia/src/sfnt/SkOTTableTypes.h
new file mode 100644
index 000000000..119f17ef7
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTableTypes.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTableTypes_DEFINED
+#define SkOTTableTypes_DEFINED
+
+#include "SkTypes.h"
+#include "SkEndian.h"
+
+//All SK_OT_ prefixed types should be considered as big endian.
+typedef uint8_t SK_OT_BYTE;
+#if CHAR_BIT == 8
+typedef signed char SK_OT_CHAR; //easier to debug
+#else
+typedef int8_t SK_OT_CHAR;
+#endif
+typedef uint16_t SK_OT_SHORT;
+typedef uint16_t SK_OT_USHORT;
+typedef uint32_t SK_OT_ULONG;
+typedef uint32_t SK_OT_LONG;
+//16.16 Signed fixed point representation.
+typedef int32_t SK_OT_Fixed;
+//2.14 Signed fixed point representation.
+typedef uint16_t SK_OT_F2DOT14;
+//F units are the units of measurement in em space.
+typedef uint16_t SK_OT_FWORD;
+typedef uint16_t SK_OT_UFWORD;
+//Number of seconds since 12:00 midnight, January 1, 1904.
+typedef uint64_t SK_OT_LONGDATETIME;
+
+#define SK_OT_BYTE_BITFIELD SK_UINT8_BITFIELD
+
+template<typename T> class SkOTTableTAG {
+public:
+ /**
+ * SkOTTableTAG<T>::value is the big endian value of an OpenType table tag.
+ * It may be directly compared with raw big endian table data.
+ */
+ static const SK_OT_ULONG value = SkTEndian_SwapBE32(
+ SkSetFourByteTag(T::TAG0, T::TAG1, T::TAG2, T::TAG3)
+ );
+};
+
+/** SkOTSetUSHORTBit<N>::value is an SK_OT_USHORT with the Nth BE bit set. */
+template <unsigned N> struct SkOTSetUSHORTBit {
+ static_assert(N < 16, "NTooBig");
+ static const uint16_t bit = 1u << N;
+ static const SK_OT_USHORT value = SkTEndian_SwapBE16(bit);
+};
+
+/** SkOTSetULONGBit<N>::value is an SK_OT_ULONG with the Nth BE bit set. */
+template <unsigned N> struct SkOTSetULONGBit {
+ static_assert(N < 32, "NTooBig");
+ static const uint32_t bit = 1u << N;
+ static const SK_OT_ULONG value = SkTEndian_SwapBE32(bit);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_EBDT.h b/gfx/skia/skia/src/sfnt/SkOTTable_EBDT.h
new file mode 100644
index 000000000..856b59a83
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_EBDT.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_EBDT_DEFINED
+#define SkOTTable_EBDT_DEFINED
+
+#include "SkEndian.h"
+#include "SkOTTableTypes.h"
+#include "SkOTTable_head.h"
+#include "SkOTTable_loca.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableEmbeddedBitmapData {
+ static const SK_OT_CHAR TAG0 = 'E';
+ static const SK_OT_CHAR TAG1 = 'B';
+ static const SK_OT_CHAR TAG2 = 'D';
+ static const SK_OT_CHAR TAG3 = 'T';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableEmbeddedBitmapData>::value;
+
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed version_initial = SkTEndian_SwapBE32(0x00020000);
+
+ struct BigGlyphMetrics {
+ SK_OT_BYTE height;
+ SK_OT_BYTE width;
+ SK_OT_CHAR horiBearingX;
+ SK_OT_CHAR horiBearingY;
+ SK_OT_BYTE horiAdvance;
+ SK_OT_CHAR vertBearingX;
+ SK_OT_CHAR vertBearingY;
+ SK_OT_BYTE vertAdvance;
+ };
+
+ struct SmallGlyphMetrics {
+ SK_OT_BYTE height;
+ SK_OT_BYTE width;
+ SK_OT_CHAR bearingX;
+ SK_OT_CHAR bearingY;
+ SK_OT_BYTE advance;
+ };
+
+ // Small metrics, byte-aligned data.
+ struct Format1 {
+ SmallGlyphMetrics smallGlyphMetrics;
+ //SK_OT_BYTE[] byteAlignedBitmap;
+ };
+
+ // Small metrics, bit-aligned data.
+ struct Format2 {
+ SmallGlyphMetrics smallGlyphMetrics;
+ //SK_OT_BYTE[] bitAlignedBitmap;
+ };
+
+ // Format 3 is not used.
+
+ // EBLC metrics (IndexSubTable::header::indexFormat 2 or 5), compressed data.
+ // Only used on Mac.
+ struct Format4 {
+ SK_OT_ULONG whiteTreeOffset;
+ SK_OT_ULONG blackTreeOffset;
+ SK_OT_ULONG glyphDataOffset;
+ };
+
+ // EBLC metrics (IndexSubTable::header::indexFormat 2 or 5), bit-aligned data.
+ struct Format5 {
+ //SK_OT_BYTE[] bitAlignedBitmap;
+ };
+
+ // Big metrics, byte-aligned data.
+ struct Format6 {
+ BigGlyphMetrics bigGlyphMetrics;
+ //SK_OT_BYTE[] byteAlignedBitmap;
+ };
+
+ // Big metrics, bit-aligned data.
+ struct Format7 {
+ BigGlyphMetrics bigGlyphMetrics;
+ //SK_OT_BYTE[] bitAlignedBitmap;
+ };
+
+ struct EBDTComponent {
+ SK_OT_USHORT glyphCode; // Component glyph code
+ SK_OT_CHAR xOffset; // Position of component left
+ SK_OT_CHAR yOffset; // Position of component top
+ };
+
+ struct Format8 {
+ SmallGlyphMetrics smallMetrics; // Metrics information for the glyph
+ SK_OT_BYTE pad; // Pad to short boundary
+ SK_OT_USHORT numComponents; // Number of components
+ //EBDTComponent componentArray[numComponents]; // Glyph code, offset array
+ };
+
+ struct Format9 {
+ BigGlyphMetrics bigMetrics; // Metrics information for the glyph
+ SK_OT_USHORT numComponents; // Number of components
+ //EBDTComponent componentArray[numComponents]; // Glyph code, offset array
+ };
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_EBLC.h b/gfx/skia/skia/src/sfnt/SkOTTable_EBLC.h
new file mode 100644
index 000000000..d77dc137f
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_EBLC.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_EBLC_DEFINED
+#define SkOTTable_EBLC_DEFINED
+
+#include "SkEndian.h"
+#include "SkOTTable_EBDT.h"
+#include "SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableEmbeddedBitmapLocation {
+ static const SK_OT_CHAR TAG0 = 'E';
+ static const SK_OT_CHAR TAG1 = 'B';
+ static const SK_OT_CHAR TAG2 = 'L';
+ static const SK_OT_CHAR TAG3 = 'C';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableEmbeddedBitmapLocation>::value;
+
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed version_initial = SkTEndian_SwapBE32(0x00020000);
+
+ SK_OT_ULONG numSizes;
+
+ struct SbitLineMetrics {
+ SK_OT_CHAR ascender;
+ SK_OT_CHAR descender;
+ SK_OT_BYTE widthMax;
+ SK_OT_CHAR caretSlopeNumerator;
+ SK_OT_CHAR caretSlopeDenominator;
+ SK_OT_CHAR caretOffset;
+ SK_OT_CHAR minOriginSB;
+ SK_OT_CHAR minAdvanceSB;
+ SK_OT_CHAR maxBeforeBL;
+ SK_OT_CHAR minAfterBL;
+ SK_OT_CHAR pad1;
+ SK_OT_CHAR pad2;
+ };
+
+ struct BitmapSizeTable {
+ SK_OT_ULONG indexSubTableArrayOffset; //offset to indexSubtableArray from beginning of EBLC.
+ SK_OT_ULONG indexTablesSize; //number of bytes in corresponding index subtables and array
+ SK_OT_ULONG numberOfIndexSubTables; //an index subtable for each range or format change
+ SK_OT_ULONG colorRef; //not used; set to 0.
+ SbitLineMetrics hori; //line metrics for text rendered horizontally
+ SbitLineMetrics vert; //line metrics for text rendered vertically
+ SK_OT_USHORT startGlyphIndex; //lowest glyph index for this size
+ SK_OT_USHORT endGlyphIndex; //highest glyph index for this size
+ SK_OT_BYTE ppemX; //horizontal pixels per Em
+ SK_OT_BYTE ppemY; //vertical pixels per Em
+ struct BitDepth {
+ enum Value : SK_OT_BYTE {
+ BW = 1,
+ Gray4 = 2,
+ Gray16 = 4,
+ Gray256 = 8,
+ };
+ SK_OT_BYTE value;
+ } bitDepth; //the Microsoft rasterizer v.1.7 or greater supports
+ union Flags {
+ struct Field {
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Horizontal, // Horizontal small glyph metrics
+ Vertical, // Vertical small glyph metrics
+ Reserved02,
+ Reserved03,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_CHAR Horizontal = 1u << 0;
+ static const SK_OT_CHAR Vertical = 1u << 1;
+ SK_OT_CHAR value;
+ } raw;
+ } flags;
+ }; //bitmapSizeTable[numSizes];
+
+ struct IndexSubTableArray {
+ SK_OT_USHORT firstGlyphIndex; //first glyph code of this range
+ SK_OT_USHORT lastGlyphIndex; //last glyph code of this range (inclusive)
+ SK_OT_ULONG additionalOffsetToIndexSubtable; //add to BitmapSizeTable::indexSubTableArrayOffset to get offset from beginning of 'EBLC'
+ }; //indexSubTableArray[BitmapSizeTable::numberOfIndexSubTables];
+
+ struct IndexSubHeader {
+ SK_OT_USHORT indexFormat; //format of this indexSubTable
+ SK_OT_USHORT imageFormat; //format of 'EBDT' image data
+ SK_OT_ULONG imageDataOffset; //offset to image data in 'EBDT' table
+ };
+
+ // Variable metrics glyphs with 4 byte offsets
+ struct IndexSubTable1 {
+ IndexSubHeader header;
+ //SK_OT_ULONG offsetArray[lastGlyphIndex - firstGlyphIndex + 1 + 1]; //last element points to one past end of last glyph
+ //glyphData = offsetArray[glyphIndex - firstGlyphIndex] + imageDataOffset
+ };
+
+ // All Glyphs have identical metrics
+ struct IndexSubTable2 {
+ IndexSubHeader header;
+ SK_OT_ULONG imageSize; // all glyphs are of the same size
+ SkOTTableEmbeddedBitmapData::BigGlyphMetrics bigMetrics; // all glyphs have the same metrics; glyph data may be compressed, byte-aligned, or bit-aligned
+ };
+
+ // Variable metrics glyphs with 2 byte offsets
+ struct IndexSubTable3 {
+ IndexSubHeader header;
+ //SK_OT_USHORT offsetArray[lastGlyphIndex - firstGlyphIndex + 1 + 1]; //last element points to one past end of last glyph, may have extra element to force even number of elements
+ //glyphData = offsetArray[glyphIndex - firstGlyphIndex] + imageDataOffset
+ };
+
+ // Variable metrics glyphs with sparse glyph codes
+ struct IndexSubTable4 {
+ IndexSubHeader header;
+ SK_OT_ULONG numGlyphs;
+ struct CodeOffsetPair {
+ SK_OT_USHORT glyphCode;
+ SK_OT_USHORT offset; //location in EBDT
+ }; //glyphArray[numGlyphs+1]
+ };
+
+ // Constant metrics glyphs with sparse glyph codes
+ struct IndexSubTable5 {
+ IndexSubHeader header;
+ SK_OT_ULONG imageSize; //all glyphs have the same data size
+ SkOTTableEmbeddedBitmapData::BigGlyphMetrics bigMetrics; //all glyphs have the same metrics
+ SK_OT_ULONG numGlyphs;
+ //SK_OT_USHORT glyphCodeArray[numGlyphs] //must have even number of entries (set pad to 0)
+ };
+
+ union IndexSubTable {
+ IndexSubHeader header;
+ IndexSubTable1 format1;
+ IndexSubTable2 format2;
+ IndexSubTable3 format3;
+ IndexSubTable4 format4;
+ IndexSubTable5 format5;
+ };
+
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_EBSC.h b/gfx/skia/skia/src/sfnt/SkOTTable_EBSC.h
new file mode 100644
index 000000000..316c45d1c
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_EBSC.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_EBSC_DEFINED
+#define SkOTTable_EBSC_DEFINED
+
+#include "SkEndian.h"
+#include "SkOTTable_EBLC.h"
+#include "SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableEmbeddedBitmapScaling {
+ static const SK_OT_CHAR TAG0 = 'E';
+ static const SK_OT_CHAR TAG1 = 'S';
+ static const SK_OT_CHAR TAG2 = 'B';
+ static const SK_OT_CHAR TAG3 = 'C';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableEmbeddedBitmapScaling>::value;
+
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed version_initial = SkTEndian_SwapBE32(0x00020000);
+
+ SK_OT_ULONG numSizes;
+
+ struct BitmapScaleTable {
+ SkOTTableEmbeddedBitmapLocation::SbitLineMetrics hori;
+ SkOTTableEmbeddedBitmapLocation::SbitLineMetrics vert;
+ SK_OT_BYTE ppemX; //target horizontal pixels per EM
+ SK_OT_BYTE ppemY; //target vertical pixels per EM
+ SK_OT_BYTE substitutePpemX; //use bitmaps of this size
+ SK_OT_BYTE substitutePpemY; //use bitmaps of this size
+ }; //bitmapScaleTable[numSizes];
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2.h
new file mode 100644
index 000000000..438257805
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_DEFINED
+#define SkOTTable_OS_2_DEFINED
+
+#include "SkOTTable_OS_2_VA.h"
+#include "SkOTTable_OS_2_V0.h"
+#include "SkOTTable_OS_2_V1.h"
+#include "SkOTTable_OS_2_V2.h"
+#include "SkOTTable_OS_2_V3.h"
+#include "SkOTTable_OS_2_V4.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableOS2 {
+ static constexpr SK_OT_CHAR TAG0 = 'O';
+ static constexpr SK_OT_CHAR TAG1 = 'S';
+ static constexpr SK_OT_CHAR TAG2 = '/';
+ static constexpr SK_OT_CHAR TAG3 = '2';
+ static constexpr SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableOS2>::value;
+
+ union Version {
+ SK_OT_USHORT version;
+
+ //original V0 TT
+ struct VA : SkOTTableOS2_VA { } vA;
+ struct V0 : SkOTTableOS2_V0 { } v0;
+ struct V1 : SkOTTableOS2_V1 { } v1;
+ struct V2 : SkOTTableOS2_V2 { } v2;
+ //makes fsType 0-3 exclusive
+ struct V3 : SkOTTableOS2_V3 { } v3;
+ //defines fsSelection bits 7-9
+ struct V4 : SkOTTableOS2_V4 { } v4;
+ } version;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2::Version::VA) == 68, "sizeof_SkOTTableOS2__VA_not_68");
+static_assert(sizeof(SkOTTableOS2::Version::V0) == 78, "sizeof_SkOTTableOS2__V0_not_78");
+static_assert(sizeof(SkOTTableOS2::Version::V1) == 86, "sizeof_SkOTTableOS2__V1_not_86");
+static_assert(sizeof(SkOTTableOS2::Version::V2) == 96, "sizeof_SkOTTableOS2__V2_not_96");
+static_assert(sizeof(SkOTTableOS2::Version::V3) == 96, "sizeof_SkOTTableOS2__V3_not_96");
+static_assert(sizeof(SkOTTableOS2::Version::V4) == 96, "sizeof_SkOTTableOS2__V4_not_96");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V0.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V0.h
new file mode 100644
index 000000000..0c9f61198
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V0.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_V0_DEFINED
+#define SkOTTable_OS_2_V0_DEFINED
+
+#include "SkEndian.h"
+#include "SkIBMFamilyClass.h"
+#include "SkOTTableTypes.h"
+#include "SkPanose.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableOS2_V0 {
+ SK_OT_USHORT version;
+ //SkOTTableOS2_VA::VERSION and SkOTTableOS2_V0::VERSION are both 0.
+ //The only way to differentiate these two versions is by the size of the table.
+ static const SK_OT_USHORT VERSION = SkTEndian_SwapBE16(0);
+
+ SK_OT_SHORT xAvgCharWidth;
+ struct WeightClass {
+ enum Value : SK_OT_USHORT {
+ Thin = SkTEndian_SwapBE16(100),
+ ExtraLight = SkTEndian_SwapBE16(200),
+ Light = SkTEndian_SwapBE16(300),
+ Normal = SkTEndian_SwapBE16(400),
+ Medium = SkTEndian_SwapBE16(500),
+ SemiBold = SkTEndian_SwapBE16(600),
+ Bold = SkTEndian_SwapBE16(700),
+ ExtraBold = SkTEndian_SwapBE16(800),
+ Black = SkTEndian_SwapBE16(900),
+ };
+ SK_OT_USHORT value;
+ } usWeightClass;
+ struct WidthClass {
+ enum Value : SK_OT_USHORT {
+ UltraCondensed = SkTEndian_SwapBE16(1),
+ ExtraCondensed = SkTEndian_SwapBE16(2),
+ Condensed = SkTEndian_SwapBE16(3),
+ SemiCondensed = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiExpanded = SkTEndian_SwapBE16(6),
+ Expanded = SkTEndian_SwapBE16(7),
+ ExtraExpanded = SkTEndian_SwapBE16(8),
+ UltraExpanded = SkTEndian_SwapBE16(9),
+ } value;
+ } usWidthClass;
+ union Type {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved00,
+ Restricted,
+ PreviewPrint,
+ Editable,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT Installable = 0;
+ static const SK_OT_USHORT RestrictedMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT PreviewPrintMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT EditableMask = SkOTSetUSHORTBit<3>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsType;
+ SK_OT_SHORT ySubscriptXSize;
+ SK_OT_SHORT ySubscriptYSize;
+ SK_OT_SHORT ySubscriptXOffset;
+ SK_OT_SHORT ySubscriptYOffset;
+ SK_OT_SHORT ySuperscriptXSize;
+ SK_OT_SHORT ySuperscriptYSize;
+ SK_OT_SHORT ySuperscriptXOffset;
+ SK_OT_SHORT ySuperscriptYOffset;
+ SK_OT_SHORT yStrikeoutSize;
+ SK_OT_SHORT yStrikeoutPosition;
+ SkIBMFamilyClass sFamilyClass;
+ SkPanose panose;
+ SK_OT_ULONG ulCharRange[4];
+ SK_OT_CHAR achVendID[4];
+ union Selection {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Italic,
+ Underscore,
+ Negative,
+ Outlined,
+ Strikeout,
+ Bold,
+ Regular,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ItalicMask = SkOTSetUSHORTBit<0>::value;
+ static const SK_OT_USHORT UnderscoreMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT NegativeMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT OutlinedMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT StrikeoutMask = SkOTSetUSHORTBit<4>::value;
+ static const SK_OT_USHORT BoldMask = SkOTSetUSHORTBit<5>::value;
+ static const SK_OT_USHORT RegularMask = SkOTSetUSHORTBit<6>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsSelection;
+ SK_OT_USHORT usFirstCharIndex;
+ SK_OT_USHORT usLastCharIndex;
+ //version0
+ SK_OT_SHORT sTypoAscender;
+ SK_OT_SHORT sTypoDescender;
+ SK_OT_SHORT sTypoLineGap;
+ SK_OT_USHORT usWinAscent;
+ SK_OT_USHORT usWinDescent;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2_V0) == 78, "sizeof_SkOTTableOS2_V0_not_78");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V1.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V1.h
new file mode 100644
index 000000000..40cfdb64a
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V1.h
@@ -0,0 +1,515 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_V1_DEFINED
+#define SkOTTable_OS_2_V1_DEFINED
+
+#include "SkEndian.h"
+#include "SkIBMFamilyClass.h"
+#include "SkOTTableTypes.h"
+#include "SkPanose.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableOS2_V1 {
+ SK_OT_USHORT version;
+ static const SK_OT_USHORT VERSION = SkTEndian_SwapBE16(1);
+
+ SK_OT_SHORT xAvgCharWidth;
+ struct WeightClass {
+ enum Value : SK_OT_USHORT {
+ Thin = SkTEndian_SwapBE16(100),
+ ExtraLight = SkTEndian_SwapBE16(200),
+ Light = SkTEndian_SwapBE16(300),
+ Normal = SkTEndian_SwapBE16(400),
+ Medium = SkTEndian_SwapBE16(500),
+ SemiBold = SkTEndian_SwapBE16(600),
+ Bold = SkTEndian_SwapBE16(700),
+ ExtraBold = SkTEndian_SwapBE16(800),
+ Black = SkTEndian_SwapBE16(900),
+ };
+ SK_OT_USHORT value;
+ } usWeightClass;
+ struct WidthClass {
+ enum Value : SK_OT_USHORT {
+ UltraCondensed = SkTEndian_SwapBE16(1),
+ ExtraCondensed = SkTEndian_SwapBE16(2),
+ Condensed = SkTEndian_SwapBE16(3),
+ SemiCondensed = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiExpanded = SkTEndian_SwapBE16(6),
+ Expanded = SkTEndian_SwapBE16(7),
+ ExtraExpanded = SkTEndian_SwapBE16(8),
+ UltraExpanded = SkTEndian_SwapBE16(9),
+ } value;
+ } usWidthClass;
+ union Type {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved00,
+ Restricted,
+ PreviewPrint,
+ Editable,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT Installable = 0;
+ static const SK_OT_USHORT RestrictedMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT PreviewPrintMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT EditableMask = SkOTSetUSHORTBit<3>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsType;
+ SK_OT_SHORT ySubscriptXSize;
+ SK_OT_SHORT ySubscriptYSize;
+ SK_OT_SHORT ySubscriptXOffset;
+ SK_OT_SHORT ySubscriptYOffset;
+ SK_OT_SHORT ySuperscriptXSize;
+ SK_OT_SHORT ySuperscriptYSize;
+ SK_OT_SHORT ySuperscriptXOffset;
+ SK_OT_SHORT ySuperscriptYOffset;
+ SK_OT_SHORT yStrikeoutSize;
+ SK_OT_SHORT yStrikeoutPosition;
+ SkIBMFamilyClass sFamilyClass;
+ SkPanose panose;
+ union UnicodeRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Thai,
+ Lao,
+ BasicGeorgian,
+ GeorgianExtended,
+ HangulJamo,
+ LatinExtendedAdditional,
+ GreekExtended,
+ GeneralPunctuation)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Bengali,
+ Gurmukhi,
+ Gujarati,
+ Oriya,
+ Tamil,
+ Telugu,
+ Kannada,
+ Malayalam)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ GreekSymbolsAndCoptic,
+ Cyrillic,
+ Armenian,
+ BasicHebrew,
+ HebrewExtendedAB,
+ BasicArabic,
+ ArabicExtended,
+ Devanagari)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ BasicLatin,
+ Latin1Supplement,
+ LatinExtendedA,
+ LatinExtendedB,
+ IPAExtensions,
+ SpacingModifierLetters,
+ CombiningDiacriticalMarks,
+ BasicGreek)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ Hangul,
+ Reserved057,
+ Reserved058,
+ CJKUnifiedIdeographs,
+ PrivateUseArea,
+ CJKCompatibilityIdeographs,
+ AlphabeticPresentationForms,
+ ArabicPresentationFormsA)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ CJKSymbolsAndPunctuation,
+ Hiragana,
+ Katakana,
+ Bopomofo,
+ HangulCompatibilityJamo,
+ CJKMiscellaneous,
+ EnclosedCJKLettersAndMonths,
+ CJKCompatibility)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ ControlPictures,
+ OpticalCharacterRecognition,
+ EnclosedAlphanumerics,
+ BoxDrawing,
+ BlockElements,
+ GeometricShapes,
+ MiscellaneousSymbols,
+ Dingbats)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ SuperscriptsAndSubscripts,
+ CurrencySymbols,
+ CombiningDiacriticalMarksForSymbols,
+ LetterlikeSymbols,
+ NumberForms,
+ Arrows,
+ MathematicalOperators,
+ MiscellaneousTechnical)
+
+ //l2 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved088,
+ Reserved089,
+ Reserved090,
+ Reserved091,
+ Reserved092,
+ Reserved093,
+ Reserved094,
+ Reserved095)
+ //l2 16-23
+ SK_OT_BYTE_BITFIELD(
+ Reserved080,
+ Reserved081,
+ Reserved082,
+ Reserved083,
+ Reserved084,
+ Reserved085,
+ Reserved086,
+ Reserved087)
+ //l2 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved072,
+ Reserved073,
+ Reserved074,
+ Reserved075,
+ Reserved076,
+ Reserved077,
+ Reserved078,
+ Reserved079)
+ //l2 0-7
+ SK_OT_BYTE_BITFIELD(
+ CombiningHalfMarks,
+ CJKCompatibilityForms,
+ SmallFormVariants,
+ ArabicPresentationFormsB,
+ HalfwidthAndFullwidthForms,
+ Specials,
+ Reserved70,
+ Reserved71)
+
+ //l3 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved120,
+ Reserved121,
+ Reserved122,
+ Reserved123,
+ Reserved124,
+ Reserved125,
+ Reserved126,
+ Reserved127)
+ //l3 16-23
+ SK_OT_BYTE_BITFIELD(
+ Reserved112,
+ Reserved113,
+ Reserved114,
+ Reserved115,
+ Reserved116,
+ Reserved117,
+ Reserved118,
+ Reserved119)
+ //l3 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved104,
+ Reserved105,
+ Reserved106,
+ Reserved107,
+ Reserved108,
+ Reserved109,
+ Reserved110,
+ Reserved111)
+ //l3 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved096,
+ Reserved097,
+ Reserved098,
+ Reserved099,
+ Reserved100,
+ Reserved101,
+ Reserved102,
+ Reserved103)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG BasicLatinMask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin1SupplementMask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG LatinExtendedAMask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG LatinExtendedBMask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG IPAExtensionsMask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG SpacingModifierLettersMask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksMask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG BasicGreekMask = SkOTSetULONGBit<7>::value;
+ static const SK_OT_ULONG GreekSymbolsAndCCopticMask = SkOTSetULONGBit<8>::value;
+ static const SK_OT_ULONG CyrillicMask = SkOTSetULONGBit<9>::value;
+ static const SK_OT_ULONG ArmenianMask = SkOTSetULONGBit<10>::value;
+ static const SK_OT_ULONG BasicHebrewMask = SkOTSetULONGBit<11>::value;
+ static const SK_OT_ULONG HebrewExtendedABMask = SkOTSetULONGBit<12>::value;
+ static const SK_OT_ULONG BasicArabicMask = SkOTSetULONGBit<13>::value;
+ static const SK_OT_ULONG ArabicExtendedMask = SkOTSetULONGBit<14>::value;
+ static const SK_OT_ULONG DevanagariMask = SkOTSetULONGBit<15>::value;
+ static const SK_OT_ULONG BengaliMask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG GurmukhiMask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG GujaratiMask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG OriyaMask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG TamilMask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG TeluguMask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG KannadaMask = SkOTSetULONGBit<22>::value;
+ static const SK_OT_ULONG MalayalamMask = SkOTSetULONGBit<23>::value;
+ static const SK_OT_ULONG ThaiMask = SkOTSetULONGBit<24>::value;
+ static const SK_OT_ULONG LaoMask = SkOTSetULONGBit<25>::value;
+ static const SK_OT_ULONG BasicGeorgianMask = SkOTSetULONGBit<26>::value;
+ static const SK_OT_ULONG GeorgianExtendedMask = SkOTSetULONGBit<27>::value;
+ static const SK_OT_ULONG HangulJamoMask = SkOTSetULONGBit<28>::value;
+ static const SK_OT_ULONG LatinExtendedAdditionalMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG GreekExtendedMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG GeneralPunctuationMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG SuperscriptsAndSubscriptsMask = SkOTSetULONGBit<32 - 32>::value;
+ static const SK_OT_ULONG CurrencySymbolsMask = SkOTSetULONGBit<33 - 32>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksForSymbolsMask = SkOTSetULONGBit<34 - 32>::value;
+ static const SK_OT_ULONG LetterlikeSymbolsMask = SkOTSetULONGBit<35 - 32>::value;
+ static const SK_OT_ULONG NumberFormsMask = SkOTSetULONGBit<36 - 32>::value;
+ static const SK_OT_ULONG ArrowsMask = SkOTSetULONGBit<37 - 32>::value;
+ static const SK_OT_ULONG MathematicalOperatorsMask = SkOTSetULONGBit<38 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousTechnicalMask = SkOTSetULONGBit<39 - 32>::value;
+ static const SK_OT_ULONG ControlPicturesMask = SkOTSetULONGBit<40 - 32>::value;
+ static const SK_OT_ULONG OpticalCharacterRecognitionMask = SkOTSetULONGBit<41 - 32>::value;
+ static const SK_OT_ULONG EnclosedAlphanumericsMask = SkOTSetULONGBit<42 - 32>::value;
+ static const SK_OT_ULONG BoxDrawingMask = SkOTSetULONGBit<43 - 32>::value;
+ static const SK_OT_ULONG BlockElementsMask = SkOTSetULONGBit<44 - 32>::value;
+ static const SK_OT_ULONG GeometricShapesMask = SkOTSetULONGBit<45 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousSymbolsMask = SkOTSetULONGBit<46 - 32>::value;
+ static const SK_OT_ULONG DingbatsMask = SkOTSetULONGBit<47 - 32>::value;
+ static const SK_OT_ULONG CJKSymbolsAndPunctuationMask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG HiraganaMask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG KatakanaMask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG BopomofoMask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG HangulCompatibilityJamoMask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG CJKMiscellaneousMask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG EnclosedCJKLettersAndMonthsMask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityMask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG HangulMask = SkOTSetULONGBit<56 - 32>::value;
+ //Reserved
+ //Reserved
+ static const SK_OT_ULONG CJKUnifiedIdeographsMask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG PrivateUseAreaMask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityIdeographsMask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG AlphabeticPresentationFormsMask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsAMask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ struct l2 {
+ static const SK_OT_ULONG CombiningHalfMarksMask = SkOTSetULONGBit<64 - 64>::value;
+ static const SK_OT_ULONG CJKCompatibilityFormsMask = SkOTSetULONGBit<65 - 64>::value;
+ static const SK_OT_ULONG SmallFormVariantsMask = SkOTSetULONGBit<66 - 64>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsBMask = SkOTSetULONGBit<67 - 64>::value;
+ static const SK_OT_ULONG HalfwidthAndFullwidthFormsMask = SkOTSetULONGBit<68 - 64>::value;
+ static const SK_OT_ULONG SpecialsMask = SkOTSetULONGBit<69 - 64>::value;
+ };
+ SK_OT_ULONG value[4];
+ } raw;
+ } ulUnicodeRange;
+ SK_OT_CHAR achVendID[4];
+ union Selection {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Italic,
+ Underscore,
+ Negative,
+ Outlined,
+ Strikeout,
+ Bold,
+ Regular,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ItalicMask = SkOTSetUSHORTBit<0>::value;
+ static const SK_OT_USHORT UnderscoreMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT NegativeMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT OutlinedMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT StrikeoutMask = SkOTSetUSHORTBit<4>::value;
+ static const SK_OT_USHORT BoldMask = SkOTSetUSHORTBit<5>::value;
+ static const SK_OT_USHORT RegularMask = SkOTSetUSHORTBit<6>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsSelection;
+ SK_OT_USHORT usFirstCharIndex;
+ SK_OT_USHORT usLastCharIndex;
+ //version0
+ SK_OT_SHORT sTypoAscender;
+ SK_OT_SHORT sTypoDescender;
+ SK_OT_SHORT sTypoLineGap;
+ SK_OT_USHORT usWinAscent;
+ SK_OT_USHORT usWinDescent;
+ //version1
+ union CodePageRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved24,
+ Reserved25,
+ Reserved26,
+ Reserved27,
+ Reserved28,
+ MacintoshCharacterSet,
+ OEMCharacterSet,
+ SymbolCharacterSet)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Thai_874,
+ JISJapan_932,
+ ChineseSimplified_936,
+ KoreanWansung_949,
+ ChineseTraditional_950,
+ KoreanJohab_1361,
+ Reserved22,
+ Reserved23)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ Latin1_1252,
+ Latin2EasternEurope_1250,
+ Cyrillic_1251,
+ Greek_1253,
+ Turkish_1254,
+ Hebrew_1255,
+ Arabic_1256,
+ WindowsBaltic_1257)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ IBMTurkish_857,
+ IBMCyrillic_855,
+ Latin2_852,
+ MSDOSBaltic_775,
+ Greek_737,
+ Arabic_708,
+ WELatin1_850,
+ US_437)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ IBMGreek_869,
+ MSDOSRussian_866,
+ MSDOSNordic_865,
+ Arabic_864,
+ MSDOSCanadianFrench_863,
+ Hebrew_862,
+ MSDOSIcelandic_861,
+ MSDOSPortuguese_860)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved40,
+ Reserved41,
+ Reserved42,
+ Reserved43,
+ Reserved44,
+ Reserved45,
+ Reserved46,
+ Reserved47)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved32,
+ Reserved33,
+ Reserved34,
+ Reserved35,
+ Reserved36,
+ Reserved37,
+ Reserved38,
+ Reserved39)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG Latin1_1252Mask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin2EasternEurope_1250Mask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG Cyrillic_1251Mask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG Greek_1253Mask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG Turkish_1254Mask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG Hebrew_1255Mask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG Arabic_1256Mask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG WindowsBaltic_1257Mask = SkOTSetULONGBit<7>::value;
+ static const SK_OT_ULONG Thai_874Mask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG JISJapan_932Mask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG ChineseSimplified_936Mask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG KoreanWansung_949Mask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG ChineseTraditional_950Mask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG KoreanJohab_1361Mask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG MacintoshCharacterSetMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG OEMCharacterSetMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG SymbolCharacterSetMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG IBMGreek_869Mask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG MSDOSRussian_866Mask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG MSDOSNordic_865Mask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG Arabic_864Mask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG MSDOSCanadianFrench_863Mask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG Hebrew_862Mask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG MSDOSIcelandic_861Mask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG MSDOSPortuguese_860Mask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG IBMTurkish_857Mask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG IBMCyrillic_855Mask = SkOTSetULONGBit<57 - 32>::value;
+ static const SK_OT_ULONG Latin2_852Mask = SkOTSetULONGBit<58 - 32>::value;
+ static const SK_OT_ULONG MSDOSBaltic_775Mask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG Greek_737Mask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG Arabic_708Mask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG WELatin1_850Mask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG US_437Mask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ SK_OT_ULONG value[2];
+ } raw;
+ } ulCodePageRange;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2_V1) == 86, "sizeof_SkOTTableOS2_V1_not_86");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V2.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V2.h
new file mode 100644
index 000000000..080bf95d9
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V2.h
@@ -0,0 +1,538 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_V2_DEFINED
+#define SkOTTable_OS_2_V2_DEFINED
+
+#include "SkEndian.h"
+#include "SkIBMFamilyClass.h"
+#include "SkOTTableTypes.h"
+#include "SkPanose.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableOS2_V2 {
+ SK_OT_USHORT version;
+ static const SK_OT_USHORT VERSION = SkTEndian_SwapBE16(2);
+
+ SK_OT_SHORT xAvgCharWidth;
+ struct WeightClass {
+ enum Value : SK_OT_USHORT {
+ Thin = SkTEndian_SwapBE16(100),
+ ExtraLight = SkTEndian_SwapBE16(200),
+ Light = SkTEndian_SwapBE16(300),
+ Normal = SkTEndian_SwapBE16(400),
+ Medium = SkTEndian_SwapBE16(500),
+ SemiBold = SkTEndian_SwapBE16(600),
+ Bold = SkTEndian_SwapBE16(700),
+ ExtraBold = SkTEndian_SwapBE16(800),
+ Black = SkTEndian_SwapBE16(900),
+ };
+ SK_OT_USHORT value;
+ } usWeightClass;
+ struct WidthClass {
+ enum Value : SK_OT_USHORT {
+ UltraCondensed = SkTEndian_SwapBE16(1),
+ ExtraCondensed = SkTEndian_SwapBE16(2),
+ Condensed = SkTEndian_SwapBE16(3),
+ SemiCondensed = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiExpanded = SkTEndian_SwapBE16(6),
+ Expanded = SkTEndian_SwapBE16(7),
+ ExtraExpanded = SkTEndian_SwapBE16(8),
+ UltraExpanded = SkTEndian_SwapBE16(9),
+ } value;
+ } usWidthClass;
+ union Type {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ NoSubsetting,
+ Bitmap,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved00,
+ Restricted,
+ PreviewPrint,
+ Editable,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT Installable = 0;
+ static const SK_OT_USHORT RestrictedMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT PreviewPrintMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT EditableMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT NoSubsettingMask = SkOTSetUSHORTBit<8>::value;
+ static const SK_OT_USHORT BitmapMask = SkOTSetUSHORTBit<9>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsType;
+ SK_OT_SHORT ySubscriptXSize;
+ SK_OT_SHORT ySubscriptYSize;
+ SK_OT_SHORT ySubscriptXOffset;
+ SK_OT_SHORT ySubscriptYOffset;
+ SK_OT_SHORT ySuperscriptXSize;
+ SK_OT_SHORT ySuperscriptYSize;
+ SK_OT_SHORT ySuperscriptXOffset;
+ SK_OT_SHORT ySuperscriptYOffset;
+ SK_OT_SHORT yStrikeoutSize;
+ SK_OT_SHORT yStrikeoutPosition;
+ SkIBMFamilyClass sFamilyClass;
+ SkPanose panose;
+ union UnicodeRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Thai,
+ Lao,
+ Georgian,
+ Reserved027,
+ HangulJamo,
+ LatinExtendedAdditional,
+ GreekExtended,
+ GeneralPunctuation)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Bengali,
+ Gurmukhi,
+ Gujarati,
+ Oriya,
+ Tamil,
+ Telugu,
+ Kannada,
+ Malayalam)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved008,
+ Cyrillic,
+ Armenian,
+ Hebrew,
+ Reserved012,
+ Arabic,
+ Reserved014,
+ Devanagari)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ BasicLatin,
+ Latin1Supplement,
+ LatinExtendedA,
+ LatinExtendedB,
+ IPAExtensions,
+ SpacingModifierLetters,
+ CombiningDiacriticalMarks,
+ Greek)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ Hangul,
+ Surrogates,
+ Reserved058,
+ CJKUnifiedIdeographs,
+ PrivateUseArea,
+ CJKCompatibilityIdeographs,
+ AlphabeticPresentationForms,
+ ArabicPresentationFormsA)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ CJKSymbolsAndPunctuation,
+ Hiragana,
+ Katakana,
+ Bopomofo,
+ HangulCompatibilityJamo,
+ CJKMiscellaneous,
+ EnclosedCJKLettersAndMonths,
+ CJKCompatibility)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ ControlPictures,
+ OpticalCharacterRecognition,
+ EnclosedAlphanumerics,
+ BoxDrawing,
+ BlockElements,
+ GeometricShapes,
+ MiscellaneousSymbols,
+ Dingbats)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ SuperscriptsAndSubscripts,
+ CurrencySymbols,
+ CombiningDiacriticalMarksForSymbols,
+ LetterlikeSymbols,
+ NumberForms,
+ Arrows,
+ MathematicalOperators,
+ MiscellaneousTechnical)
+
+ //l2 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved088,
+ Reserved089,
+ Reserved090,
+ Reserved091,
+ Reserved092,
+ Reserved093,
+ Reserved094,
+ Reserved095)
+ //l2 16-23
+ SK_OT_BYTE_BITFIELD(
+ Khmer,
+ Mongolian,
+ Braille,
+ Yi,
+ Reserved084,
+ Reserved085,
+ Reserved086,
+ Reserved087)
+ //l2 8-15
+ SK_OT_BYTE_BITFIELD(
+ Thaana,
+ Sinhala,
+ Myanmar,
+ Ethiopic,
+ Cherokee,
+ UnifiedCanadianSyllabics,
+ Ogham,
+ Runic)
+ //l2 0-7
+ SK_OT_BYTE_BITFIELD(
+ CombiningHalfMarks,
+ CJKCompatibilityForms,
+ SmallFormVariants,
+ ArabicPresentationFormsB,
+ HalfwidthAndFullwidthForms,
+ Specials,
+ Tibetan,
+ Syriac)
+
+ //l3 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved120,
+ Reserved121,
+ Reserved122,
+ Reserved123,
+ Reserved124,
+ Reserved125,
+ Reserved126,
+ Reserved127)
+ //l3 16-23
+ SK_OT_BYTE_BITFIELD(
+ Reserved112,
+ Reserved113,
+ Reserved114,
+ Reserved115,
+ Reserved116,
+ Reserved117,
+ Reserved118,
+ Reserved119)
+ //l3 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved104,
+ Reserved105,
+ Reserved106,
+ Reserved107,
+ Reserved108,
+ Reserved109,
+ Reserved110,
+ Reserved111)
+ //l3 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved096,
+ Reserved097,
+ Reserved098,
+ Reserved099,
+ Reserved100,
+ Reserved101,
+ Reserved102,
+ Reserved103)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG BasicLatinMask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin1SupplementMask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG LatinExtendedAMask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG LatinExtendedBMask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG IPAExtensionsMask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG SpacingModifierLettersMask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksMask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG GreekMask = SkOTSetULONGBit<7>::value;
+ //Reserved
+ static const SK_OT_ULONG CyrillicMask = SkOTSetULONGBit<9>::value;
+ static const SK_OT_ULONG ArmenianMask = SkOTSetULONGBit<10>::value;
+ static const SK_OT_ULONG HebrewMask = SkOTSetULONGBit<11>::value;
+ //Reserved
+ static const SK_OT_ULONG ArabicMask = SkOTSetULONGBit<13>::value;
+ //Reserved
+ static const SK_OT_ULONG DevanagariMask = SkOTSetULONGBit<15>::value;
+ static const SK_OT_ULONG BengaliMask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG GurmukhiMask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG GujaratiMask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG OriyaMask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG TamilMask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG TeluguMask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG KannadaMask = SkOTSetULONGBit<22>::value;
+ static const SK_OT_ULONG MalayalamMask = SkOTSetULONGBit<23>::value;
+ static const SK_OT_ULONG ThaiMask = SkOTSetULONGBit<24>::value;
+ static const SK_OT_ULONG LaoMask = SkOTSetULONGBit<25>::value;
+ static const SK_OT_ULONG GeorgianMask = SkOTSetULONGBit<26>::value;
+ //Reserved
+ static const SK_OT_ULONG HangulJamoMask = SkOTSetULONGBit<28>::value;
+ static const SK_OT_ULONG LatinExtendedAdditionalMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG GreekExtendedMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG GeneralPunctuationMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG SuperscriptsAndSubscriptsMask = SkOTSetULONGBit<32 - 32>::value;
+ static const SK_OT_ULONG CurrencySymbolsMask = SkOTSetULONGBit<33 - 32>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksForSymbolsMask = SkOTSetULONGBit<34 - 32>::value;
+ static const SK_OT_ULONG LetterlikeSymbolsMask = SkOTSetULONGBit<35 - 32>::value;
+ static const SK_OT_ULONG NumberFormsMask = SkOTSetULONGBit<36 - 32>::value;
+ static const SK_OT_ULONG ArrowsMask = SkOTSetULONGBit<37 - 32>::value;
+ static const SK_OT_ULONG MathematicalOperatorsMask = SkOTSetULONGBit<38 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousTechnicalMask = SkOTSetULONGBit<39 - 32>::value;
+ static const SK_OT_ULONG ControlPicturesMask = SkOTSetULONGBit<40 - 32>::value;
+ static const SK_OT_ULONG OpticalCharacterRecognitionMask = SkOTSetULONGBit<41 - 32>::value;
+ static const SK_OT_ULONG EnclosedAlphanumericsMask = SkOTSetULONGBit<42 - 32>::value;
+ static const SK_OT_ULONG BoxDrawingMask = SkOTSetULONGBit<43 - 32>::value;
+ static const SK_OT_ULONG BlockElementsMask = SkOTSetULONGBit<44 - 32>::value;
+ static const SK_OT_ULONG GeometricShapesMask = SkOTSetULONGBit<45 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousSymbolsMask = SkOTSetULONGBit<46 - 32>::value;
+ static const SK_OT_ULONG DingbatsMask = SkOTSetULONGBit<47 - 32>::value;
+ static const SK_OT_ULONG CJKSymbolsAndPunctuationMask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG HiraganaMask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG KatakanaMask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG BopomofoMask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG HangulCompatibilityJamoMask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG CJKMiscellaneousMask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG EnclosedCJKLettersAndMonthsMask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityMask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG HangulMask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG SurrogatesMask = SkOTSetULONGBit<57 - 32>::value;
+ //Reserved
+ static const SK_OT_ULONG CJKUnifiedIdeographsMask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG PrivateUseAreaMask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityIdeographsMask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG AlphabeticPresentationFormsMask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsAMask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ struct l2 {
+ static const SK_OT_ULONG CombiningHalfMarksMask = SkOTSetULONGBit<64 - 64>::value;
+ static const SK_OT_ULONG CJKCompatibilityFormsMask = SkOTSetULONGBit<65 - 64>::value;
+ static const SK_OT_ULONG SmallFormVariantsMask = SkOTSetULONGBit<66 - 64>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsBMask = SkOTSetULONGBit<67 - 64>::value;
+ static const SK_OT_ULONG HalfwidthAndFullwidthFormsMask = SkOTSetULONGBit<68 - 64>::value;
+ static const SK_OT_ULONG SpecialsMask = SkOTSetULONGBit<69 - 64>::value;
+ static const SK_OT_ULONG TibetanMask = SkOTSetULONGBit<70 - 64>::value;
+ static const SK_OT_ULONG SyriacMask = SkOTSetULONGBit<71 - 64>::value;
+ static const SK_OT_ULONG ThaanaMask = SkOTSetULONGBit<72 - 64>::value;
+ static const SK_OT_ULONG SinhalaMask = SkOTSetULONGBit<73 - 64>::value;
+ static const SK_OT_ULONG MyanmarMask = SkOTSetULONGBit<74 - 64>::value;
+ static const SK_OT_ULONG EthiopicMask = SkOTSetULONGBit<75 - 64>::value;
+ static const SK_OT_ULONG CherokeeMask = SkOTSetULONGBit<76 - 64>::value;
+ static const SK_OT_ULONG UnifiedCanadianSyllabicsMask = SkOTSetULONGBit<77 - 64>::value;
+ static const SK_OT_ULONG OghamMask = SkOTSetULONGBit<78 - 64>::value;
+ static const SK_OT_ULONG RunicMask = SkOTSetULONGBit<79 - 64>::value;
+ static const SK_OT_ULONG KhmerMask = SkOTSetULONGBit<80 - 64>::value;
+ static const SK_OT_ULONG MongolianMask = SkOTSetULONGBit<81 - 64>::value;
+ static const SK_OT_ULONG BrailleMask = SkOTSetULONGBit<82 - 64>::value;
+ static const SK_OT_ULONG YiMask = SkOTSetULONGBit<83 - 64>::value;
+ };
+ SK_OT_ULONG value[4];
+ } raw;
+ } ulUnicodeRange;
+ SK_OT_CHAR achVendID[4];
+ union Selection {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Italic,
+ Underscore,
+ Negative,
+ Outlined,
+ Strikeout,
+ Bold,
+ Regular,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ItalicMask = SkOTSetUSHORTBit<0>::value;
+ static const SK_OT_USHORT UnderscoreMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT NegativeMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT OutlinedMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT StrikeoutMask = SkOTSetUSHORTBit<4>::value;
+ static const SK_OT_USHORT BoldMask = SkOTSetUSHORTBit<5>::value;
+ static const SK_OT_USHORT RegularMask = SkOTSetUSHORTBit<6>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsSelection;
+ SK_OT_USHORT usFirstCharIndex;
+ SK_OT_USHORT usLastCharIndex;
+ //version0
+ SK_OT_SHORT sTypoAscender;
+ SK_OT_SHORT sTypoDescender;
+ SK_OT_SHORT sTypoLineGap;
+ SK_OT_USHORT usWinAscent;
+ SK_OT_USHORT usWinDescent;
+ //version1
+ union CodePageRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved24,
+ Reserved25,
+ Reserved26,
+ Reserved27,
+ Reserved28,
+ MacintoshCharacterSet,
+ OEMCharacterSet,
+ SymbolCharacterSet)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Thai_874,
+ JISJapan_932,
+ ChineseSimplified_936,
+ KoreanWansung_949,
+ ChineseTraditional_950,
+ KoreanJohab_1361,
+ Reserved22,
+ Reserved23)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Vietnamese,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ Latin1_1252,
+ Latin2EasternEurope_1250,
+ Cyrillic_1251,
+ Greek_1253,
+ Turkish_1254,
+ Hebrew_1255,
+ Arabic_1256,
+ WindowsBaltic_1257)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ IBMTurkish_857,
+ IBMCyrillic_855,
+ Latin2_852,
+ MSDOSBaltic_775,
+ Greek_737,
+ Arabic_708,
+ WELatin1_850,
+ US_437)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ IBMGreek_869,
+ MSDOSRussian_866,
+ MSDOSNordic_865,
+ Arabic_864,
+ MSDOSCanadianFrench_863,
+ Hebrew_862,
+ MSDOSIcelandic_861,
+ MSDOSPortuguese_860)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved40,
+ Reserved41,
+ Reserved42,
+ Reserved43,
+ Reserved44,
+ Reserved45,
+ Reserved46,
+ Reserved47)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved32,
+ Reserved33,
+ Reserved34,
+ Reserved35,
+ Reserved36,
+ Reserved37,
+ Reserved38,
+ Reserved39)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG Latin1_1252Mask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin2EasternEurope_1250Mask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG Cyrillic_1251Mask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG Greek_1253Mask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG Turkish_1254Mask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG Hebrew_1255Mask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG Arabic_1256Mask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG WindowsBaltic_1257Mask = SkOTSetULONGBit<7>::value;
+ static const SK_OT_ULONG Vietnamese_1258Mask = SkOTSetULONGBit<8>::value;
+ static const SK_OT_ULONG Thai_874Mask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG JISJapan_932Mask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG ChineseSimplified_936Mask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG KoreanWansung_949Mask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG ChineseTraditional_950Mask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG KoreanJohab_1361Mask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG MacintoshCharacterSetMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG OEMCharacterSetMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG SymbolCharacterSetMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG IBMGreek_869Mask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG MSDOSRussian_866Mask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG MSDOSNordic_865Mask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG Arabic_864Mask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG MSDOSCanadianFrench_863Mask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG Hebrew_862Mask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG MSDOSIcelandic_861Mask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG MSDOSPortuguese_860Mask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG IBMTurkish_857Mask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG IBMCyrillic_855Mask = SkOTSetULONGBit<57 - 32>::value;
+ static const SK_OT_ULONG Latin2_852Mask = SkOTSetULONGBit<58 - 32>::value;
+ static const SK_OT_ULONG MSDOSBaltic_775Mask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG Greek_737Mask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG Arabic_708Mask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG WELatin1_850Mask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG US_437Mask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ SK_OT_ULONG value[2];
+ } raw;
+ } ulCodePageRange;
+ //version2
+ SK_OT_SHORT sxHeight;
+ SK_OT_SHORT sCapHeight;
+ SK_OT_USHORT usDefaultChar;
+ SK_OT_USHORT usBreakChar;
+ SK_OT_USHORT usMaxContext;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2_V2) == 96, "sizeof_SkOTTableOS2_V2_not_96");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V3.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V3.h
new file mode 100644
index 000000000..e77c46606
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V3.h
@@ -0,0 +1,547 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_V3_DEFINED
+#define SkOTTable_OS_2_V3_DEFINED
+
+#include "SkEndian.h"
+#include "SkIBMFamilyClass.h"
+#include "SkOTTableTypes.h"
+#include "SkPanose.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableOS2_V3 {
+ SK_OT_USHORT version;
+ static const SK_OT_USHORT VERSION = SkTEndian_SwapBE16(3);
+
+ SK_OT_SHORT xAvgCharWidth;
+ struct WeightClass {
+ enum Value : SK_OT_USHORT {
+ Thin = SkTEndian_SwapBE16(100),
+ ExtraLight = SkTEndian_SwapBE16(200),
+ Light = SkTEndian_SwapBE16(300),
+ Normal = SkTEndian_SwapBE16(400),
+ Medium = SkTEndian_SwapBE16(500),
+ SemiBold = SkTEndian_SwapBE16(600),
+ Bold = SkTEndian_SwapBE16(700),
+ ExtraBold = SkTEndian_SwapBE16(800),
+ Black = SkTEndian_SwapBE16(900),
+ };
+ SK_OT_USHORT value;
+ } usWeightClass;
+ struct WidthClass {
+ enum Value : SK_OT_USHORT {
+ UltraCondensed = SkTEndian_SwapBE16(1),
+ ExtraCondensed = SkTEndian_SwapBE16(2),
+ Condensed = SkTEndian_SwapBE16(3),
+ SemiCondensed = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiExpanded = SkTEndian_SwapBE16(6),
+ Expanded = SkTEndian_SwapBE16(7),
+ ExtraExpanded = SkTEndian_SwapBE16(8),
+ UltraExpanded = SkTEndian_SwapBE16(9),
+ } value;
+ } usWidthClass;
+ union Type {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ NoSubsetting,
+ Bitmap,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved00,
+ Restricted,
+ PreviewPrint,
+ Editable,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT Installable = 0;
+ static const SK_OT_USHORT RestrictedMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT PreviewPrintMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT EditableMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT NoSubsettingMask = SkOTSetUSHORTBit<8>::value;
+ static const SK_OT_USHORT BitmapMask = SkOTSetUSHORTBit<9>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsType;
+ SK_OT_SHORT ySubscriptXSize;
+ SK_OT_SHORT ySubscriptYSize;
+ SK_OT_SHORT ySubscriptXOffset;
+ SK_OT_SHORT ySubscriptYOffset;
+ SK_OT_SHORT ySuperscriptXSize;
+ SK_OT_SHORT ySuperscriptYSize;
+ SK_OT_SHORT ySuperscriptXOffset;
+ SK_OT_SHORT ySuperscriptYOffset;
+ SK_OT_SHORT yStrikeoutSize;
+ SK_OT_SHORT yStrikeoutPosition;
+ SkIBMFamilyClass sFamilyClass;
+ SkPanose panose;
+ union UnicodeRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Thai,
+ Lao,
+ Georgian,
+ Reserved027,
+ HangulJamo,
+ LatinExtendedAdditional,
+ GreekExtended,
+ GeneralPunctuation)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Bengali,
+ Gurmukhi,
+ Gujarati,
+ Oriya,
+ Tamil,
+ Telugu,
+ Kannada,
+ Malayalam)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved008,
+ Cyrillic,
+ Armenian,
+ Hebrew,
+ Reserved012,
+ Arabic,
+ Reserved014,
+ Devanagari)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ BasicLatin,
+ Latin1Supplement,
+ LatinExtendedA,
+ LatinExtendedB,
+ IPAExtensions,
+ SpacingModifierLetters,
+ CombiningDiacriticalMarks,
+ GreekAndCoptic)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ Hangul,
+ NonPlane0,
+ Reserved058,
+ CJKUnifiedIdeographs,
+ PrivateUseArea,
+ CJKCompatibilityIdeographs,
+ AlphabeticPresentationForms,
+ ArabicPresentationFormsA)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ CJKSymbolsAndPunctuation,
+ Hiragana,
+ Katakana,
+ Bopomofo,
+ HangulCompatibilityJamo,
+ Reserved053,
+ EnclosedCJKLettersAndMonths,
+ CJKCompatibility)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ ControlPictures,
+ OpticalCharacterRecognition,
+ EnclosedAlphanumerics,
+ BoxDrawing,
+ BlockElements,
+ GeometricShapes,
+ MiscellaneousSymbols,
+ Dingbats)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ SuperscriptsAndSubscripts,
+ CurrencySymbols,
+ CombiningDiacriticalMarksForSymbols,
+ LetterlikeSymbols,
+ NumberForms,
+ Arrows,
+ MathematicalOperators,
+ MiscellaneousTechnical)
+
+ //l2 24-31
+ SK_OT_BYTE_BITFIELD(
+ MusicalSymbols,
+ MathematicalAlphanumericSymbols,
+ PrivateUse,
+ VariationSelectors,
+ Tags,
+ Reserved093,
+ Reserved094,
+ Reserved095)
+ //l2 16-23
+ SK_OT_BYTE_BITFIELD(
+ Khmer,
+ Mongolian,
+ Braille,
+ Yi,
+ Tagalog_Hanunoo_Buhid_Tagbanwa,
+ OldItalic,
+ Gothic,
+ Deseret)
+ //l2 8-15
+ SK_OT_BYTE_BITFIELD(
+ Thaana,
+ Sinhala,
+ Myanmar,
+ Ethiopic,
+ Cherokee,
+ UnifiedCanadianSyllabics,
+ Ogham,
+ Runic)
+ //l2 0-7
+ SK_OT_BYTE_BITFIELD(
+ CombiningHalfMarks,
+ CJKCompatibilityForms,
+ SmallFormVariants,
+ ArabicPresentationFormsB,
+ HalfwidthAndFullwidthForms,
+ Specials,
+ Tibetan,
+ Syriac)
+
+ //l3 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved120,
+ Reserved121,
+ Reserved122,
+ Reserved123,
+ Reserved124,
+ Reserved125,
+ Reserved126,
+ Reserved127)
+ //l3 16-23
+ SK_OT_BYTE_BITFIELD(
+ Reserved112,
+ Reserved113,
+ Reserved114,
+ Reserved115,
+ Reserved116,
+ Reserved117,
+ Reserved118,
+ Reserved119)
+ //l3 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved104,
+ Reserved105,
+ Reserved106,
+ Reserved107,
+ Reserved108,
+ Reserved109,
+ Reserved110,
+ Reserved111)
+ //l3 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved096,
+ Reserved097,
+ Reserved098,
+ Reserved099,
+ Reserved100,
+ Reserved101,
+ Reserved102,
+ Reserved103)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG BasicLatinMask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin1SupplementMask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG LatinExtendedAMask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG LatinExtendedBMask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG IPAExtensionsMask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG SpacingModifierLettersMask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksMask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG GreekAndCopticMask = SkOTSetULONGBit<7>::value;
+ //Reserved
+ static const SK_OT_ULONG CyrillicMask = SkOTSetULONGBit<9>::value;
+ static const SK_OT_ULONG ArmenianMask = SkOTSetULONGBit<10>::value;
+ static const SK_OT_ULONG HebrewMask = SkOTSetULONGBit<11>::value;
+ //Reserved
+ static const SK_OT_ULONG ArabicMask = SkOTSetULONGBit<13>::value;
+ //Reserved
+ static const SK_OT_ULONG DevanagariMask = SkOTSetULONGBit<15>::value;
+ static const SK_OT_ULONG BengaliMask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG GurmukhiMask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG GujaratiMask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG OriyaMask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG TamilMask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG TeluguMask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG KannadaMask = SkOTSetULONGBit<22>::value;
+ static const SK_OT_ULONG MalayalamMask = SkOTSetULONGBit<23>::value;
+ static const SK_OT_ULONG ThaiMask = SkOTSetULONGBit<24>::value;
+ static const SK_OT_ULONG LaoMask = SkOTSetULONGBit<25>::value;
+ static const SK_OT_ULONG GeorgianMask = SkOTSetULONGBit<26>::value;
+ //Reserved
+ static const SK_OT_ULONG HangulJamoMask = SkOTSetULONGBit<28>::value;
+ static const SK_OT_ULONG LatinExtendedAdditionalMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG GreekExtendedMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG GeneralPunctuationMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG SuperscriptsAndSubscriptsMask = SkOTSetULONGBit<32 - 32>::value;
+ static const SK_OT_ULONG CurrencySymbolsMask = SkOTSetULONGBit<33 - 32>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksForSymbolsMask = SkOTSetULONGBit<34 - 32>::value;
+ static const SK_OT_ULONG LetterlikeSymbolsMask = SkOTSetULONGBit<35 - 32>::value;
+ static const SK_OT_ULONG NumberFormsMask = SkOTSetULONGBit<36 - 32>::value;
+ static const SK_OT_ULONG ArrowsMask = SkOTSetULONGBit<37 - 32>::value;
+ static const SK_OT_ULONG MathematicalOperatorsMask = SkOTSetULONGBit<38 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousTechnicalMask = SkOTSetULONGBit<39 - 32>::value;
+ static const SK_OT_ULONG ControlPicturesMask = SkOTSetULONGBit<40 - 32>::value;
+ static const SK_OT_ULONG OpticalCharacterRecognitionMask = SkOTSetULONGBit<41 - 32>::value;
+ static const SK_OT_ULONG EnclosedAlphanumericsMask = SkOTSetULONGBit<42 - 32>::value;
+ static const SK_OT_ULONG BoxDrawingMask = SkOTSetULONGBit<43 - 32>::value;
+ static const SK_OT_ULONG BlockElementsMask = SkOTSetULONGBit<44 - 32>::value;
+ static const SK_OT_ULONG GeometricShapesMask = SkOTSetULONGBit<45 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousSymbolsMask = SkOTSetULONGBit<46 - 32>::value;
+ static const SK_OT_ULONG DingbatsMask = SkOTSetULONGBit<47 - 32>::value;
+ static const SK_OT_ULONG CJKSymbolsAndPunctuationMask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG HiraganaMask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG KatakanaMask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG BopomofoMask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG HangulCompatibilityJamoMask = SkOTSetULONGBit<52 - 32>::value;
+ //Reserved
+ static const SK_OT_ULONG EnclosedCJKLettersAndMonthsMask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityMask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG HangulMask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG NonPlane0Mask = SkOTSetULONGBit<57 - 32>::value;
+ //Reserved
+ static const SK_OT_ULONG CJKUnifiedIdeographsMask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG PrivateUseAreaMask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityIdeographsMask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG AlphabeticPresentationFormsMask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsAMask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ struct l2 {
+ static const SK_OT_ULONG CombiningHalfMarksMask = SkOTSetULONGBit<64 - 64>::value;
+ static const SK_OT_ULONG CJKCompatibilityFormsMask = SkOTSetULONGBit<65 - 64>::value;
+ static const SK_OT_ULONG SmallFormVariantsMask = SkOTSetULONGBit<66 - 64>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsBMask = SkOTSetULONGBit<67 - 64>::value;
+ static const SK_OT_ULONG HalfwidthAndFullwidthFormsMask = SkOTSetULONGBit<68 - 64>::value;
+ static const SK_OT_ULONG SpecialsMask = SkOTSetULONGBit<69 - 64>::value;
+ static const SK_OT_ULONG TibetanMask = SkOTSetULONGBit<70 - 64>::value;
+ static const SK_OT_ULONG SyriacMask = SkOTSetULONGBit<71 - 64>::value;
+ static const SK_OT_ULONG ThaanaMask = SkOTSetULONGBit<72 - 64>::value;
+ static const SK_OT_ULONG SinhalaMask = SkOTSetULONGBit<73 - 64>::value;
+ static const SK_OT_ULONG MyanmarMask = SkOTSetULONGBit<74 - 64>::value;
+ static const SK_OT_ULONG EthiopicMask = SkOTSetULONGBit<75 - 64>::value;
+ static const SK_OT_ULONG CherokeeMask = SkOTSetULONGBit<76 - 64>::value;
+ static const SK_OT_ULONG UnifiedCanadianSyllabicsMask = SkOTSetULONGBit<77 - 64>::value;
+ static const SK_OT_ULONG OghamMask = SkOTSetULONGBit<78 - 64>::value;
+ static const SK_OT_ULONG RunicMask = SkOTSetULONGBit<79 - 64>::value;
+ static const SK_OT_ULONG KhmerMask = SkOTSetULONGBit<80 - 64>::value;
+ static const SK_OT_ULONG MongolianMask = SkOTSetULONGBit<81 - 64>::value;
+ static const SK_OT_ULONG BrailleMask = SkOTSetULONGBit<82 - 64>::value;
+ static const SK_OT_ULONG YiMask = SkOTSetULONGBit<83 - 64>::value;
+ static const SK_OT_ULONG Tagalog_Hanunoo_Buhid_TagbanwaMask = SkOTSetULONGBit<84 - 64>::value;
+ static const SK_OT_ULONG OldItalicMask = SkOTSetULONGBit<85 - 64>::value;
+ static const SK_OT_ULONG GothicMask = SkOTSetULONGBit<86 - 64>::value;
+ static const SK_OT_ULONG DeseretMask = SkOTSetULONGBit<87 - 64>::value;
+ static const SK_OT_ULONG MusicalSymbolsMask = SkOTSetULONGBit<88 - 64>::value;
+ static const SK_OT_ULONG MathematicalAlphanumericSymbolsMask = SkOTSetULONGBit<89 - 64>::value;
+ static const SK_OT_ULONG PrivateUseMask = SkOTSetULONGBit<90 - 64>::value;
+ static const SK_OT_ULONG VariationSelectorsMask = SkOTSetULONGBit<91 - 64>::value;
+ static const SK_OT_ULONG TagsMask = SkOTSetULONGBit<92 - 64>::value;
+ };
+ SK_OT_ULONG value[4];
+ } raw;
+ } ulUnicodeRange;
+ SK_OT_CHAR achVendID[4];
+ union Selection {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Italic,
+ Underscore,
+ Negative,
+ Outlined,
+ Strikeout,
+ Bold,
+ Regular,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ItalicMask = SkOTSetUSHORTBit<0>::value;
+ static const SK_OT_USHORT UnderscoreMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT NegativeMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT OutlinedMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT StrikeoutMask = SkOTSetUSHORTBit<4>::value;
+ static const SK_OT_USHORT BoldMask = SkOTSetUSHORTBit<5>::value;
+ static const SK_OT_USHORT RegularMask = SkOTSetUSHORTBit<6>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsSelection;
+ SK_OT_USHORT usFirstCharIndex;
+ SK_OT_USHORT usLastCharIndex;
+ //version0
+ SK_OT_SHORT sTypoAscender;
+ SK_OT_SHORT sTypoDescender;
+ SK_OT_SHORT sTypoLineGap;
+ SK_OT_USHORT usWinAscent;
+ SK_OT_USHORT usWinDescent;
+ //version1
+ union CodePageRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved24,
+ Reserved25,
+ Reserved26,
+ Reserved27,
+ Reserved28,
+ MacintoshCharacterSet,
+ OEMCharacterSet,
+ SymbolCharacterSet)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Thai_874,
+ JISJapan_932,
+ ChineseSimplified_936,
+ KoreanWansung_949,
+ ChineseTraditional_950,
+ KoreanJohab_1361,
+ Reserved22,
+ Reserved23)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Vietnamese,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ Latin1_1252,
+ Latin2EasternEurope_1250,
+ Cyrillic_1251,
+ Greek_1253,
+ Turkish_1254,
+ Hebrew_1255,
+ Arabic_1256,
+ WindowsBaltic_1257)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ IBMTurkish_857,
+ IBMCyrillic_855,
+ Latin2_852,
+ MSDOSBaltic_775,
+ Greek_737,
+ Arabic_708,
+ WELatin1_850,
+ US_437)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ IBMGreek_869,
+ MSDOSRussian_866,
+ MSDOSNordic_865,
+ Arabic_864,
+ MSDOSCanadianFrench_863,
+ Hebrew_862,
+ MSDOSIcelandic_861,
+ MSDOSPortuguese_860)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved40,
+ Reserved41,
+ Reserved42,
+ Reserved43,
+ Reserved44,
+ Reserved45,
+ Reserved46,
+ Reserved47)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved32,
+ Reserved33,
+ Reserved34,
+ Reserved35,
+ Reserved36,
+ Reserved37,
+ Reserved38,
+ Reserved39)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG Latin1_1252Mask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin2EasternEurope_1250Mask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG Cyrillic_1251Mask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG Greek_1253Mask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG Turkish_1254Mask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG Hebrew_1255Mask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG Arabic_1256Mask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG WindowsBaltic_1257Mask = SkOTSetULONGBit<7>::value;
+ static const SK_OT_ULONG Vietnamese_1258Mask = SkOTSetULONGBit<8>::value;
+ static const SK_OT_ULONG Thai_874Mask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG JISJapan_932Mask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG ChineseSimplified_936Mask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG KoreanWansung_949Mask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG ChineseTraditional_950Mask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG KoreanJohab_1361Mask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG MacintoshCharacterSetMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG OEMCharacterSetMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG SymbolCharacterSetMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG IBMGreek_869Mask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG MSDOSRussian_866Mask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG MSDOSNordic_865Mask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG Arabic_864Mask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG MSDOSCanadianFrench_863Mask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG Hebrew_862Mask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG MSDOSIcelandic_861Mask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG MSDOSPortuguese_860Mask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG IBMTurkish_857Mask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG IBMCyrillic_855Mask = SkOTSetULONGBit<57 - 32>::value;
+ static const SK_OT_ULONG Latin2_852Mask = SkOTSetULONGBit<58 - 32>::value;
+ static const SK_OT_ULONG MSDOSBaltic_775Mask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG Greek_737Mask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG Arabic_708Mask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG WELatin1_850Mask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG US_437Mask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ SK_OT_ULONG value[2];
+ } raw;
+ } ulCodePageRange;
+ //version2
+ SK_OT_SHORT sxHeight;
+ SK_OT_SHORT sCapHeight;
+ SK_OT_USHORT usDefaultChar;
+ SK_OT_USHORT usBreakChar;
+ SK_OT_USHORT usMaxContext;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2_V3) == 96, "sizeof_SkOTTableOS2_V3_not_96");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V4.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V4.h
new file mode 100644
index 000000000..0dbb21ff5
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V4.h
@@ -0,0 +1,582 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_V4_DEFINED
+#define SkOTTable_OS_2_V4_DEFINED
+
+#include "SkEndian.h"
+#include "SkIBMFamilyClass.h"
+#include "SkOTTableTypes.h"
+#include "SkPanose.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableOS2_V4 {
+ SK_OT_USHORT version;
+ static const SK_OT_USHORT VERSION = SkTEndian_SwapBE16(4);
+
+ SK_OT_SHORT xAvgCharWidth;
+ struct WeightClass {
+ enum Value : SK_OT_USHORT {
+ Thin = SkTEndian_SwapBE16(100),
+ ExtraLight = SkTEndian_SwapBE16(200),
+ Light = SkTEndian_SwapBE16(300),
+ Normal = SkTEndian_SwapBE16(400),
+ Medium = SkTEndian_SwapBE16(500),
+ SemiBold = SkTEndian_SwapBE16(600),
+ Bold = SkTEndian_SwapBE16(700),
+ ExtraBold = SkTEndian_SwapBE16(800),
+ Black = SkTEndian_SwapBE16(900),
+ };
+ SK_OT_USHORT value;
+ } usWeightClass;
+ struct WidthClass {
+ enum Value : SK_OT_USHORT {
+ UltraCondensed = SkTEndian_SwapBE16(1),
+ ExtraCondensed = SkTEndian_SwapBE16(2),
+ Condensed = SkTEndian_SwapBE16(3),
+ SemiCondensed = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiExpanded = SkTEndian_SwapBE16(6),
+ Expanded = SkTEndian_SwapBE16(7),
+ ExtraExpanded = SkTEndian_SwapBE16(8),
+ UltraExpanded = SkTEndian_SwapBE16(9),
+ } value;
+ } usWidthClass;
+ union Type {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ NoSubsetting,
+ Bitmap,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved00,
+ Restricted,
+ PreviewPrint,
+ Editable,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT Installable = 0;
+ static const SK_OT_USHORT RestrictedMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT PreviewPrintMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT EditableMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT NoSubsettingMask = SkOTSetUSHORTBit<8>::value;
+ static const SK_OT_USHORT BitmapMask = SkOTSetUSHORTBit<9>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsType;
+ SK_OT_SHORT ySubscriptXSize;
+ SK_OT_SHORT ySubscriptYSize;
+ SK_OT_SHORT ySubscriptXOffset;
+ SK_OT_SHORT ySubscriptYOffset;
+ SK_OT_SHORT ySuperscriptXSize;
+ SK_OT_SHORT ySuperscriptYSize;
+ SK_OT_SHORT ySuperscriptXOffset;
+ SK_OT_SHORT ySuperscriptYOffset;
+ SK_OT_SHORT yStrikeoutSize;
+ SK_OT_SHORT yStrikeoutPosition;
+ SkIBMFamilyClass sFamilyClass;
+ SkPanose panose;
+ union UnicodeRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Thai,
+ Lao,
+ Georgian,
+ Balinese,
+ HangulJamo,
+ LatinExtendedAdditional,
+ GreekExtended,
+ GeneralPunctuation)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Bengali,
+ Gurmukhi,
+ Gujarati,
+ Oriya,
+ Tamil,
+ Telugu,
+ Kannada,
+ Malayalam)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Coptic,
+ Cyrillic,
+ Armenian,
+ Hebrew,
+ Vai,
+ Arabic,
+ NKo,
+ Devanagari)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ BasicLatin,
+ Latin1Supplement,
+ LatinExtendedA,
+ LatinExtendedB,
+ IPAExtensions,
+ SpacingModifierLetters,
+ CombiningDiacriticalMarks,
+ GreekAndCoptic)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ Hangul,
+ NonPlane0,
+ Phoenician,
+ CJKUnifiedIdeographs,
+ PrivateUseArea,
+ CJKCompatibilityIdeographs,
+ AlphabeticPresentationForms,
+ ArabicPresentationFormsA)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ CJKSymbolsAndPunctuation,
+ Hiragana,
+ Katakana,
+ Bopomofo,
+ HangulCompatibilityJamo,
+ PhagsPa,
+ EnclosedCJKLettersAndMonths,
+ CJKCompatibility)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ ControlPictures,
+ OpticalCharacterRecognition,
+ EnclosedAlphanumerics,
+ BoxDrawing,
+ BlockElements,
+ GeometricShapes,
+ MiscellaneousSymbols,
+ Dingbats)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ SuperscriptsAndSubscripts,
+ CurrencySymbols,
+ CombiningDiacriticalMarksForSymbols,
+ LetterlikeSymbols,
+ NumberForms,
+ Arrows,
+ MathematicalOperators,
+ MiscellaneousTechnical)
+
+ //l2 24-31
+ SK_OT_BYTE_BITFIELD(
+ MusicalSymbols,
+ MathematicalAlphanumericSymbols,
+ PrivateUse,
+ VariationSelectors,
+ Tags,
+ Limbu,
+ TaiLe,
+ NewTaiLue)
+ //l2 16-23
+ SK_OT_BYTE_BITFIELD(
+ Khmer,
+ Mongolian,
+ Braille,
+ Yi,
+ Tagalog_Hanunoo_Buhid_Tagbanwa,
+ OldItalic,
+ Gothic,
+ Deseret)
+ //l2 8-15
+ SK_OT_BYTE_BITFIELD(
+ Thaana,
+ Sinhala,
+ Myanmar,
+ Ethiopic,
+ Cherokee,
+ UnifiedCanadianSyllabics,
+ Ogham,
+ Runic)
+ //l2 0-7
+ SK_OT_BYTE_BITFIELD(
+ CombiningHalfMarks,
+ CJKCompatibilityForms,
+ SmallFormVariants,
+ ArabicPresentationFormsB,
+ HalfwidthAndFullwidthForms,
+ Specials,
+ Tibetan,
+ Syriac)
+
+ //l3 24-31
+ SK_OT_BYTE_BITFIELD(
+ PhaistosDisc,
+ Carian_Lycian_Lydian,
+ DominoTiles_MahjongTiles,
+ Reserved123,
+ Reserved124,
+ Reserved125,
+ Reserved126,
+ Reserved127)
+ //l3 16-23
+ SK_OT_BYTE_BITFIELD(
+ Sundanese,
+ Lepcha,
+ OlChiki,
+ Saurashtra,
+ KayahLi,
+ Rejang,
+ Cham,
+ AncientSymbols)
+ //l3 8-15
+ SK_OT_BYTE_BITFIELD(
+ OldPersian,
+ Shavian,
+ Osmanya,
+ CypriotSyllabary,
+ Kharoshthi,
+ TaiXuanJingSymbols,
+ Cuneiform,
+ CountingRodNumerals)
+ //l3 0-7
+ SK_OT_BYTE_BITFIELD(
+ Buginese,
+ Glagolitic,
+ Tifinagh,
+ YijingHexagramSymbols,
+ SylotiNagri,
+ LinearB_AegeanNumbers,
+ AncientGreekNumbers,
+ Ugaritic)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG BasicLatinMask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin1SupplementMask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG LatinExtendedAMask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG LatinExtendedBMask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG IPAExtensionsMask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG SpacingModifierLettersMask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksMask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG GreekAndCopticMask = SkOTSetULONGBit<7>::value;
+ static const SK_OT_ULONG CopticMask = SkOTSetULONGBit<8>::value;
+ static const SK_OT_ULONG CyrillicMask = SkOTSetULONGBit<9>::value;
+ static const SK_OT_ULONG ArmenianMask = SkOTSetULONGBit<10>::value;
+ static const SK_OT_ULONG HebrewMask = SkOTSetULONGBit<11>::value;
+ static const SK_OT_ULONG VaiMask = SkOTSetULONGBit<12>::value;
+ static const SK_OT_ULONG ArabicMask = SkOTSetULONGBit<13>::value;
+ static const SK_OT_ULONG NKoMask = SkOTSetULONGBit<14>::value;
+ static const SK_OT_ULONG DevanagariMask = SkOTSetULONGBit<15>::value;
+ static const SK_OT_ULONG BengaliMask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG GurmukhiMask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG GujaratiMask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG OriyaMask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG TamilMask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG TeluguMask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG KannadaMask = SkOTSetULONGBit<22>::value;
+ static const SK_OT_ULONG MalayalamMask = SkOTSetULONGBit<23>::value;
+ static const SK_OT_ULONG ThaiMask = SkOTSetULONGBit<24>::value;
+ static const SK_OT_ULONG LaoMask = SkOTSetULONGBit<25>::value;
+ static const SK_OT_ULONG GeorgianMask = SkOTSetULONGBit<26>::value;
+ static const SK_OT_ULONG BalineseMask = SkOTSetULONGBit<27>::value;
+ static const SK_OT_ULONG HangulJamoMask = SkOTSetULONGBit<28>::value;
+ static const SK_OT_ULONG LatinExtendedAdditionalMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG GreekExtendedMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG GeneralPunctuationMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG SuperscriptsAndSubscriptsMask = SkOTSetULONGBit<32 - 32>::value;
+ static const SK_OT_ULONG CurrencySymbolsMask = SkOTSetULONGBit<33 - 32>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksForSymbolsMask = SkOTSetULONGBit<34 - 32>::value;
+ static const SK_OT_ULONG LetterlikeSymbolsMask = SkOTSetULONGBit<35 - 32>::value;
+ static const SK_OT_ULONG NumberFormsMask = SkOTSetULONGBit<36 - 32>::value;
+ static const SK_OT_ULONG ArrowsMask = SkOTSetULONGBit<37 - 32>::value;
+ static const SK_OT_ULONG MathematicalOperatorsMask = SkOTSetULONGBit<38 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousTechnicalMask = SkOTSetULONGBit<39 - 32>::value;
+ static const SK_OT_ULONG ControlPicturesMask = SkOTSetULONGBit<40 - 32>::value;
+ static const SK_OT_ULONG OpticalCharacterRecognitionMask = SkOTSetULONGBit<41 - 32>::value;
+ static const SK_OT_ULONG EnclosedAlphanumericsMask = SkOTSetULONGBit<42 - 32>::value;
+ static const SK_OT_ULONG BoxDrawingMask = SkOTSetULONGBit<43 - 32>::value;
+ static const SK_OT_ULONG BlockElementsMask = SkOTSetULONGBit<44 - 32>::value;
+ static const SK_OT_ULONG GeometricShapesMask = SkOTSetULONGBit<45 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousSymbolsMask = SkOTSetULONGBit<46 - 32>::value;
+ static const SK_OT_ULONG DingbatsMask = SkOTSetULONGBit<47 - 32>::value;
+ static const SK_OT_ULONG CJKSymbolsAndPunctuationMask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG HiraganaMask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG KatakanaMask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG BopomofoMask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG HangulCompatibilityJamoMask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG PhagsPaMask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG EnclosedCJKLettersAndMonthsMask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityMask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG HangulMask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG NonPlane0Mask = SkOTSetULONGBit<57 - 32>::value;
+ static const SK_OT_ULONG PhoenicianMask = SkOTSetULONGBit<58 - 32>::value;
+ static const SK_OT_ULONG CJKUnifiedIdeographsMask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG PrivateUseAreaMask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityIdeographsMask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG AlphabeticPresentationFormsMask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsAMask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ struct l2 {
+ static const SK_OT_ULONG CombiningHalfMarksMask = SkOTSetULONGBit<64 - 64>::value;
+ static const SK_OT_ULONG CJKCompatibilityFormsMask = SkOTSetULONGBit<65 - 64>::value;
+ static const SK_OT_ULONG SmallFormVariantsMask = SkOTSetULONGBit<66 - 64>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsBMask = SkOTSetULONGBit<67 - 64>::value;
+ static const SK_OT_ULONG HalfwidthAndFullwidthFormsMask = SkOTSetULONGBit<68 - 64>::value;
+ static const SK_OT_ULONG SpecialsMask = SkOTSetULONGBit<69 - 64>::value;
+ static const SK_OT_ULONG TibetanMask = SkOTSetULONGBit<70 - 64>::value;
+ static const SK_OT_ULONG SyriacMask = SkOTSetULONGBit<71 - 64>::value;
+ static const SK_OT_ULONG ThaanaMask = SkOTSetULONGBit<72 - 64>::value;
+ static const SK_OT_ULONG SinhalaMask = SkOTSetULONGBit<73 - 64>::value;
+ static const SK_OT_ULONG MyanmarMask = SkOTSetULONGBit<74 - 64>::value;
+ static const SK_OT_ULONG EthiopicMask = SkOTSetULONGBit<75 - 64>::value;
+ static const SK_OT_ULONG CherokeeMask = SkOTSetULONGBit<76 - 64>::value;
+ static const SK_OT_ULONG UnifiedCanadianSyllabicsMask = SkOTSetULONGBit<77 - 64>::value;
+ static const SK_OT_ULONG OghamMask = SkOTSetULONGBit<78 - 64>::value;
+ static const SK_OT_ULONG RunicMask = SkOTSetULONGBit<79 - 64>::value;
+ static const SK_OT_ULONG KhmerMask = SkOTSetULONGBit<80 - 64>::value;
+ static const SK_OT_ULONG MongolianMask = SkOTSetULONGBit<81 - 64>::value;
+ static const SK_OT_ULONG BrailleMask = SkOTSetULONGBit<82 - 64>::value;
+ static const SK_OT_ULONG YiMask = SkOTSetULONGBit<83 - 64>::value;
+ static const SK_OT_ULONG Tagalog_Hanunoo_Buhid_TagbanwaMask = SkOTSetULONGBit<84 - 64>::value;
+ static const SK_OT_ULONG OldItalicMask = SkOTSetULONGBit<85 - 64>::value;
+ static const SK_OT_ULONG GothicMask = SkOTSetULONGBit<86 - 64>::value;
+ static const SK_OT_ULONG DeseretMask = SkOTSetULONGBit<87 - 64>::value;
+ static const SK_OT_ULONG MusicalSymbolsMask = SkOTSetULONGBit<88 - 64>::value;
+ static const SK_OT_ULONG MathematicalAlphanumericSymbolsMask = SkOTSetULONGBit<89 - 64>::value;
+ static const SK_OT_ULONG PrivateUseMask = SkOTSetULONGBit<90 - 64>::value;
+ static const SK_OT_ULONG VariationSelectorsMask = SkOTSetULONGBit<91 - 64>::value;
+ static const SK_OT_ULONG TagsMask = SkOTSetULONGBit<92 - 64>::value;
+ static const SK_OT_ULONG LimbuMask = SkOTSetULONGBit<93 - 64>::value;
+ static const SK_OT_ULONG TaiLeMask = SkOTSetULONGBit<94 - 64>::value;
+ static const SK_OT_ULONG NewTaiLueMask = SkOTSetULONGBit<95 - 64>::value;
+ };
+ struct l3 {
+ static const SK_OT_ULONG BugineseMask = SkOTSetULONGBit<96 - 96>::value;
+ static const SK_OT_ULONG GlagoliticMask = SkOTSetULONGBit<97 - 96>::value;
+ static const SK_OT_ULONG TifinaghMask = SkOTSetULONGBit<98 - 96>::value;
+ static const SK_OT_ULONG YijingHexagramSymbolsMask = SkOTSetULONGBit<99 - 96>::value;
+ static const SK_OT_ULONG SylotiNagriMask = SkOTSetULONGBit<100 - 96>::value;
+ static const SK_OT_ULONG LinearB_AegeanNumbersMask = SkOTSetULONGBit<101 - 96>::value;
+ static const SK_OT_ULONG AncientGreekNumbersMask = SkOTSetULONGBit<102 - 96>::value;
+ static const SK_OT_ULONG UgariticMask = SkOTSetULONGBit<103 - 96>::value;
+ static const SK_OT_ULONG OldPersianMask = SkOTSetULONGBit<104 - 96>::value;
+ static const SK_OT_ULONG ShavianMask = SkOTSetULONGBit<105 - 96>::value;
+ static const SK_OT_ULONG OsmanyaMask = SkOTSetULONGBit<106 - 96>::value;
+ static const SK_OT_ULONG CypriotSyllabaryMask = SkOTSetULONGBit<107 - 96>::value;
+ static const SK_OT_ULONG KharoshthiMask = SkOTSetULONGBit<108 - 96>::value;
+ static const SK_OT_ULONG TaiXuanJingSymbolsMask = SkOTSetULONGBit<109 - 96>::value;
+ static const SK_OT_ULONG CuneiformMask = SkOTSetULONGBit<110 - 96>::value;
+ static const SK_OT_ULONG CountingRodNumeralsMask = SkOTSetULONGBit<111 - 96>::value;
+ static const SK_OT_ULONG SundaneseMask = SkOTSetULONGBit<112 - 96>::value;
+ static const SK_OT_ULONG LepchaMask = SkOTSetULONGBit<113 - 96>::value;
+ static const SK_OT_ULONG OlChikiMask = SkOTSetULONGBit<114 - 96>::value;
+ static const SK_OT_ULONG SaurashtraMask = SkOTSetULONGBit<115 - 96>::value;
+ static const SK_OT_ULONG KayahLiMask = SkOTSetULONGBit<116 - 96>::value;
+ static const SK_OT_ULONG RejangMask = SkOTSetULONGBit<117 - 96>::value;
+ static const SK_OT_ULONG ChamMask = SkOTSetULONGBit<118 - 96>::value;
+ static const SK_OT_ULONG AncientSymbolsMask = SkOTSetULONGBit<119 - 96>::value;
+ static const SK_OT_ULONG PhaistosDiscMask = SkOTSetULONGBit<120 - 96>::value;
+ static const SK_OT_ULONG Carian_Lycian_LydianMask = SkOTSetULONGBit<121 - 96>::value;
+ static const SK_OT_ULONG DominoTiles_MahjongTilesMask = SkOTSetULONGBit<122 - 96>::value;
+ };
+ SK_OT_ULONG value[4];
+ } raw;
+ } ulUnicodeRange;
+ SK_OT_CHAR achVendID[4];
+ union Selection {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ WWS,
+ Oblique,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Italic,
+ Underscore,
+ Negative,
+ Outlined,
+ Strikeout,
+ Bold,
+ Regular,
+ UseTypoMetrics)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ItalicMask = SkOTSetUSHORTBit<0>::value;
+ static const SK_OT_USHORT UnderscoreMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT NegativeMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT OutlinedMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT StrikeoutMask = SkOTSetUSHORTBit<4>::value;
+ static const SK_OT_USHORT BoldMask = SkOTSetUSHORTBit<5>::value;
+ static const SK_OT_USHORT RegularMask = SkOTSetUSHORTBit<6>::value;
+ static const SK_OT_USHORT UseTypoMetricsMask = SkOTSetUSHORTBit<7>::value;
+ static const SK_OT_USHORT WWSMask = SkOTSetUSHORTBit<8>::value;
+ static const SK_OT_USHORT ObliqueMask = SkOTSetUSHORTBit<9>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsSelection;
+ SK_OT_USHORT usFirstCharIndex;
+ SK_OT_USHORT usLastCharIndex;
+ //version0
+ SK_OT_SHORT sTypoAscender;
+ SK_OT_SHORT sTypoDescender;
+ SK_OT_SHORT sTypoLineGap;
+ SK_OT_USHORT usWinAscent;
+ SK_OT_USHORT usWinDescent;
+ //version1
+ union CodePageRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved24,
+ Reserved25,
+ Reserved26,
+ Reserved27,
+ Reserved28,
+ MacintoshCharacterSet,
+ OEMCharacterSet,
+ SymbolCharacterSet)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Thai_874,
+ JISJapan_932,
+ ChineseSimplified_936,
+ KoreanWansung_949,
+ ChineseTraditional_950,
+ KoreanJohab_1361,
+ Reserved22,
+ Reserved23)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Vietnamese,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ Latin1_1252,
+ Latin2EasternEurope_1250,
+ Cyrillic_1251,
+ Greek_1253,
+ Turkish_1254,
+ Hebrew_1255,
+ Arabic_1256,
+ WindowsBaltic_1257)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ IBMTurkish_857,
+ IBMCyrillic_855,
+ Latin2_852,
+ MSDOSBaltic_775,
+ Greek_737,
+ Arabic_708,
+ WELatin1_850,
+ US_437)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ IBMGreek_869,
+ MSDOSRussian_866,
+ MSDOSNordic_865,
+ Arabic_864,
+ MSDOSCanadianFrench_863,
+ Hebrew_862,
+ MSDOSIcelandic_861,
+ MSDOSPortuguese_860)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved40,
+ Reserved41,
+ Reserved42,
+ Reserved43,
+ Reserved44,
+ Reserved45,
+ Reserved46,
+ Reserved47)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved32,
+ Reserved33,
+ Reserved34,
+ Reserved35,
+ Reserved36,
+ Reserved37,
+ Reserved38,
+ Reserved39)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG Latin1_1252Mask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin2EasternEurope_1250Mask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG Cyrillic_1251Mask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG Greek_1253Mask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG Turkish_1254Mask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG Hebrew_1255Mask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG Arabic_1256Mask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG WindowsBaltic_1257Mask = SkOTSetULONGBit<7>::value;
+ static const SK_OT_ULONG Vietnamese_1258Mask = SkOTSetULONGBit<8>::value;
+ static const SK_OT_ULONG Thai_874Mask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG JISJapan_932Mask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG ChineseSimplified_936Mask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG KoreanWansung_949Mask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG ChineseTraditional_950Mask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG KoreanJohab_1361Mask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG MacintoshCharacterSetMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG OEMCharacterSetMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG SymbolCharacterSetMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG IBMGreek_869Mask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG MSDOSRussian_866Mask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG MSDOSNordic_865Mask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG Arabic_864Mask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG MSDOSCanadianFrench_863Mask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG Hebrew_862Mask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG MSDOSIcelandic_861Mask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG MSDOSPortuguese_860Mask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG IBMTurkish_857Mask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG IBMCyrillic_855Mask = SkOTSetULONGBit<57 - 32>::value;
+ static const SK_OT_ULONG Latin2_852Mask = SkOTSetULONGBit<58 - 32>::value;
+ static const SK_OT_ULONG MSDOSBaltic_775Mask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG Greek_737Mask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG Arabic_708Mask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG WELatin1_850Mask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG US_437Mask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ SK_OT_ULONG value[2];
+ } raw;
+ } ulCodePageRange;
+ //version2
+ SK_OT_SHORT sxHeight;
+ SK_OT_SHORT sCapHeight;
+ SK_OT_USHORT usDefaultChar;
+ SK_OT_USHORT usBreakChar;
+ SK_OT_USHORT usMaxContext;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2_V4) == 96, "sizeof_SkOTTableOS2_V4_not_96");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_VA.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_VA.h
new file mode 100644
index 000000000..63b904c5b
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_VA.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_VA_DEFINED
+#define SkOTTable_OS_2_VA_DEFINED
+
+#include "SkEndian.h"
+#include "SkIBMFamilyClass.h"
+#include "SkOTTableTypes.h"
+#include "SkPanose.h"
+
+#pragma pack(push, 1)
+
+//Original V0 TT
+struct SkOTTableOS2_VA {
+ SK_OT_USHORT version;
+ //SkOTTableOS2_VA::VERSION and SkOTTableOS2_V0::VERSION are both 0.
+ //The only way to differentiate these two versions is by the size of the table.
+ static const SK_OT_USHORT VERSION = SkTEndian_SwapBE16(0);
+
+ SK_OT_SHORT xAvgCharWidth;
+ struct WeightClass {
+ enum Value : SK_OT_USHORT {
+ UltraLight = SkTEndian_SwapBE16(1),
+ ExtraLight = SkTEndian_SwapBE16(2),
+ Light = SkTEndian_SwapBE16(3),
+ SemiLight = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiBold = SkTEndian_SwapBE16(6),
+ Bold = SkTEndian_SwapBE16(7),
+ ExtraBold = SkTEndian_SwapBE16(8),
+ UltraBold = SkTEndian_SwapBE16(9),
+ SK_SEQ_END,
+ } value;
+ } usWeightClass;
+ struct WidthClass {
+ enum Value : SK_OT_USHORT {
+ UltraCondensed = SkTEndian_SwapBE16(1),
+ ExtraCondensed = SkTEndian_SwapBE16(2),
+ Condensed = SkTEndian_SwapBE16(3),
+ SemiCondensed = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiExpanded = SkTEndian_SwapBE16(6),
+ Expanded = SkTEndian_SwapBE16(7),
+ ExtraExpanded = SkTEndian_SwapBE16(8),
+ UltraExpanded = SkTEndian_SwapBE16(9),
+ SK_SEQ_END,
+ } value;
+ } usWidthClass;
+ union Type {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved00,
+ Restricted,
+ PreviewPrint,
+ Editable,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT Installable = 0;
+ static const SK_OT_USHORT RestrictedMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT PreviewPrintMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT EditableMask = SkOTSetUSHORTBit<3>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsType;
+ SK_OT_SHORT ySubscriptXSize;
+ SK_OT_SHORT ySubscriptYSize;
+ SK_OT_SHORT ySubscriptXOffset;
+ SK_OT_SHORT ySubscriptYOffset;
+ SK_OT_SHORT ySuperscriptXSize;
+ SK_OT_SHORT ySuperscriptYSize;
+ SK_OT_SHORT ySuperscriptXOffset;
+ SK_OT_SHORT ySuperscriptYOffset;
+ SK_OT_SHORT yStrikeoutSize;
+ SK_OT_SHORT yStrikeoutPosition;
+ SkIBMFamilyClass sFamilyClass;
+ SkPanose panose;
+ SK_OT_ULONG ulCharRange[4];
+ SK_OT_CHAR achVendID[4];
+ union Selection {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Italic,
+ Underscore,
+ Negative,
+ Outlined,
+ Strikeout,
+ Bold,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ItalicMask = SkOTSetUSHORTBit<0>::value;
+ static const SK_OT_USHORT UnderscoreMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT NegativeMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT OutlinedMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT StrikeoutMask = SkOTSetUSHORTBit<4>::value;
+ static const SK_OT_USHORT BoldMask = SkOTSetUSHORTBit<5>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsSelection;
+ SK_OT_USHORT usFirstCharIndex;
+ SK_OT_USHORT usLastCharIndex;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2_VA) == 68, "sizeof_SkOTTableOS2_VA_not_68");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_gasp.h b/gfx/skia/skia/src/sfnt/SkOTTable_gasp.h
new file mode 100644
index 000000000..5af590e5c
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_gasp.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_gasp_DEFINED
+#define SkOTTable_gasp_DEFINED
+
+#include "SkEndian.h"
+#include "SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableGridAndScanProcedure {
+ static const SK_OT_CHAR TAG0 = 'g';
+ static const SK_OT_CHAR TAG1 = 'a';
+ static const SK_OT_CHAR TAG2 = 's';
+ static const SK_OT_CHAR TAG3 = 'p';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableGridAndScanProcedure>::value;
+
+ SK_OT_USHORT version;
+ static const SK_OT_USHORT version0 = SkTEndian_SwapBE16(0);
+ static const SK_OT_USHORT version1 = SkTEndian_SwapBE16(1);
+
+ SK_OT_USHORT numRanges;
+
+ struct GaspRange {
+ SK_OT_USHORT maxPPEM;
+ union behavior {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Gridfit,
+ DoGray,
+ SymmetricGridfit, // Version 1
+ SymmetricSmoothing, // Version 1
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT GridfitMask = SkTEndian_SwapBE16(1 << 0);
+ static const SK_OT_USHORT DoGrayMask = SkTEndian_SwapBE16(1 << 1);
+ static const SK_OT_USHORT SymmetricGridfitMask = SkTEndian_SwapBE16(1 << 2);
+ static const SK_OT_USHORT SymmetricSmoothingMask = SkTEndian_SwapBE16(1 << 3);
+ SK_OT_USHORT value;
+ } raw;
+ } flags;
+ }; //gaspRange[numRanges]
+};
+
+#pragma pack(pop)
+
+
+#include <stddef.h>
+static_assert(offsetof(SkOTTableGridAndScanProcedure, numRanges) == 2, "SkOTTableGridAndScanProcedure_numRanges_not_at_2");
+static_assert(sizeof(SkOTTableGridAndScanProcedure) == 4, "sizeof_SkOTTableGridAndScanProcedure_not_4");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_glyf.h b/gfx/skia/skia/src/sfnt/SkOTTable_glyf.h
new file mode 100644
index 000000000..77bd982b0
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_glyf.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_glyf_DEFINED
+#define SkOTTable_glyf_DEFINED
+
+#include "SkEndian.h"
+#include "SkOTTableTypes.h"
+#include "SkOTTable_head.h"
+#include "SkOTTable_loca.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableGlyphData;
+
+extern uint8_t const * const SK_OT_GlyphData_NoOutline;
+
+struct SkOTTableGlyph {
+ static const SK_OT_CHAR TAG0 = 'g';
+ static const SK_OT_CHAR TAG1 = 'l';
+ static const SK_OT_CHAR TAG2 = 'y';
+ static const SK_OT_CHAR TAG3 = 'f';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableGlyph>::value;
+
+ class Iterator {
+ public:
+ Iterator(const SkOTTableGlyph& glyf,
+ const SkOTTableIndexToLocation& loca,
+ SkOTTableHead::IndexToLocFormat locaFormat)
+ : fGlyf(glyf)
+ , fLocaFormat(SkOTTableHead::IndexToLocFormat::ShortOffsets == locaFormat.value ? 0 : 1)
+ , fCurrentGlyphOffset(0)
+ { fLocaPtr.shortOffset = reinterpret_cast<const SK_OT_USHORT*>(&loca); }
+
+ void advance(uint16_t num) {
+ fLocaPtr.shortOffset += num << fLocaFormat;
+ fCurrentGlyphOffset = fLocaFormat ? SkEndian_SwapBE32(*fLocaPtr.longOffset)
+ : uint32_t(SkEndian_SwapBE16(*fLocaPtr.shortOffset) << 1);
+ }
+ const SkOTTableGlyphData* next() {
+ uint32_t previousGlyphOffset = fCurrentGlyphOffset;
+ advance(1);
+ if (previousGlyphOffset == fCurrentGlyphOffset) {
+ return reinterpret_cast<const SkOTTableGlyphData*>(&SK_OT_GlyphData_NoOutline);
+ } else {
+ return reinterpret_cast<const SkOTTableGlyphData*>(
+ reinterpret_cast<const SK_OT_BYTE*>(&fGlyf) + previousGlyphOffset
+ );
+ }
+ }
+ private:
+ const SkOTTableGlyph& fGlyf;
+ uint16_t fLocaFormat; //0 or 1
+ uint32_t fCurrentGlyphOffset;
+ union LocaPtr {
+ const SK_OT_USHORT* shortOffset;
+ const SK_OT_ULONG* longOffset;
+ } fLocaPtr;
+ };
+};
+
+struct SkOTTableGlyphData {
+ SK_OT_SHORT numberOfContours; //== -1 Composite, > 0 Simple
+ SK_OT_FWORD xMin;
+ SK_OT_FWORD yMin;
+ SK_OT_FWORD xMax;
+ SK_OT_FWORD yMax;
+
+ struct Simple {
+ SK_OT_USHORT endPtsOfContours[1/*numberOfContours*/];
+
+ struct Instructions {
+ SK_OT_USHORT length;
+ SK_OT_BYTE data[1/*length*/];
+ };
+
+ union Flags {
+ struct Field {
+ SK_OT_BYTE_BITFIELD(
+ OnCurve,
+ xShortVector,
+ yShortVector,
+ Repeat,
+ xIsSame_xShortVectorPositive,
+ yIsSame_yShortVectorPositive,
+ Reserved6,
+ Reserved7)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT OnCurveMask = SkTEndian_SwapBE16(1 << 0);
+ static const SK_OT_USHORT xShortVectorMask = SkTEndian_SwapBE16(1 << 1);
+ static const SK_OT_USHORT yShortVectorMask = SkTEndian_SwapBE16(1 << 2);
+ static const SK_OT_USHORT RepeatMask = SkTEndian_SwapBE16(1 << 3);
+ static const SK_OT_USHORT xIsSame_xShortVectorPositiveMask = SkTEndian_SwapBE16(1 << 4);
+ static const SK_OT_USHORT yIsSame_yShortVectorPositiveMask = SkTEndian_SwapBE16(1 << 5);
+ SK_OT_BYTE value;
+ } raw;
+ };
+
+ //xCoordinates
+ //yCoordinates
+ };
+
+ struct Composite {
+ struct Component {
+ union Flags {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ WE_HAVE_INSTRUCTIONS,
+ USE_MY_METRICS,
+ OVERLAP_COMPOUND,
+ SCALED_COMPONENT_OFFSET,
+ UNSCALED_COMPONENT_OFFSET,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ ARG_1_AND_2_ARE_WORDS,
+ ARGS_ARE_XY_VALUES,
+ ROUND_XY_TO_GRID,
+ WE_HAVE_A_SCALE,
+ RESERVED,
+ MORE_COMPONENTS,
+ WE_HAVE_AN_X_AND_Y_SCALE,
+ WE_HAVE_A_TWO_BY_TWO)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ARG_1_AND_2_ARE_WORDS_Mask = SkTEndian_SwapBE16(1 << 0);
+ static const SK_OT_USHORT ARGS_ARE_XY_VALUES_Mask = SkTEndian_SwapBE16(1 << 1);
+ static const SK_OT_USHORT ROUND_XY_TO_GRID_Mask = SkTEndian_SwapBE16(1 << 2);
+ static const SK_OT_USHORT WE_HAVE_A_SCALE_Mask = SkTEndian_SwapBE16(1 << 3);
+ static const SK_OT_USHORT RESERVED_Mask = SkTEndian_SwapBE16(1 << 4);
+ static const SK_OT_USHORT MORE_COMPONENTS_Mask = SkTEndian_SwapBE16(1 << 5);
+ static const SK_OT_USHORT WE_HAVE_AN_X_AND_Y_SCALE_Mask = SkTEndian_SwapBE16(1 << 6);
+ static const SK_OT_USHORT WE_HAVE_A_TWO_BY_TWO_Mask = SkTEndian_SwapBE16(1 << 7);
+
+ static const SK_OT_USHORT WE_HAVE_INSTRUCTIONS_Mask = SkTEndian_SwapBE16(1 << 8);
+ static const SK_OT_USHORT USE_MY_METRICS_Mask = SkTEndian_SwapBE16(1 << 9);
+ static const SK_OT_USHORT OVERLAP_COMPOUND_Mask = SkTEndian_SwapBE16(1 << 10);
+ static const SK_OT_USHORT SCALED_COMPONENT_OFFSET_Mask = SkTEndian_SwapBE16(1 << 11);
+ static const SK_OT_USHORT UNSCALED_COMPONENT_OFFSET_mask = SkTEndian_SwapBE16(1 << 12);
+ //Reserved
+ //Reserved
+ //Reserved
+ SK_OT_USHORT value;
+ } raw;
+ } flags;
+ SK_OT_USHORT glyphIndex;
+ union Transform {
+ union Matrix {
+ /** !WE_HAVE_A_SCALE & !WE_HAVE_AN_X_AND_Y_SCALE & !WE_HAVE_A_TWO_BY_TWO */
+ struct None { } none;
+ /** WE_HAVE_A_SCALE */
+ struct Scale {
+ SK_OT_F2DOT14 a_d;
+ } scale;
+ /** WE_HAVE_AN_X_AND_Y_SCALE */
+ struct ScaleXY {
+ SK_OT_F2DOT14 a;
+ SK_OT_F2DOT14 d;
+ } scaleXY;
+ /** WE_HAVE_A_TWO_BY_TWO */
+ struct TwoByTwo {
+ SK_OT_F2DOT14 a;
+ SK_OT_F2DOT14 b;
+ SK_OT_F2DOT14 c;
+ SK_OT_F2DOT14 d;
+ } twoByTwo;
+ };
+ /** ARG_1_AND_2_ARE_WORDS & ARGS_ARE_XY_VALUES */
+ struct WordValue {
+ SK_OT_FWORD e;
+ SK_OT_FWORD f;
+ SkOTTableGlyphData::Composite::Component::Transform::Matrix matrix;
+ } wordValue;
+ /** !ARG_1_AND_2_ARE_WORDS & ARGS_ARE_XY_VALUES */
+ struct ByteValue {
+ SK_OT_CHAR e;
+ SK_OT_CHAR f;
+ SkOTTableGlyphData::Composite::Component::Transform::Matrix matrix;
+ } byteValue;
+ /** ARG_1_AND_2_ARE_WORDS & !ARGS_ARE_XY_VALUES */
+ struct WordIndex {
+ SK_OT_USHORT compoundPointIndex;
+ SK_OT_USHORT componentPointIndex;
+ SkOTTableGlyphData::Composite::Component::Transform::Matrix matrix;
+ } wordIndex;
+ /** !ARG_1_AND_2_ARE_WORDS & !ARGS_ARE_XY_VALUES */
+ struct ByteIndex {
+ SK_OT_BYTE compoundPointIndex;
+ SK_OT_BYTE componentPointIndex;
+ SkOTTableGlyphData::Composite::Component::Transform::Matrix matrix;
+ } byteIndex;
+ } transform;
+ } component;//[] last element does not set MORE_COMPONENTS
+
+ /** Comes after the last Component if the last component has WE_HAVE_INSTR. */
+ struct Instructions {
+ SK_OT_USHORT length;
+ SK_OT_BYTE data[1/*length*/];
+ };
+ };
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_head.h b/gfx/skia/skia/src/sfnt/SkOTTable_head.h
new file mode 100644
index 000000000..0011eead8
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_head.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_head_DEFINED
+#define SkOTTable_head_DEFINED
+
+#include "SkEndian.h"
+#include "SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableHead {
+ static const SK_OT_CHAR TAG0 = 'h';
+ static const SK_OT_CHAR TAG1 = 'e';
+ static const SK_OT_CHAR TAG2 = 'a';
+ static const SK_OT_CHAR TAG3 = 'd';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableHead>::value;
+
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed version1 = SkTEndian_SwapBE32(0x00010000);
+ SK_OT_Fixed fontRevision;
+ static const uint32_t fontChecksum = 0xB1B0AFBA; //checksum of all TT fonts
+ SK_OT_ULONG checksumAdjustment;
+ SK_OT_ULONG magicNumber;
+ static const SK_OT_ULONG magicNumberConst = SkTEndian_SwapBE32(0x5F0F3CF5);
+ union Flags {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ GXMetamorphosis_Apple,
+ HasStrongRTL_Apple,
+ HasIndicStyleRearrangement,
+ AgfaMicroTypeExpressProcessed,
+ FontConverted,
+ DesignedForClearType,
+ LastResort,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ BaselineAtY0,
+ LeftSidebearingAtX0,
+ InstructionsDependOnPointSize,
+ IntegerScaling,
+ InstructionsAlterAdvanceWidth,
+ VerticalCenteredGlyphs_Apple,
+ Reserved06,
+ RequiresLayout_Apple)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT BaselineAtY0Mask = SkTEndian_SwapBE16(1 << 0);
+ static const SK_OT_USHORT LeftSidebearingAtX0Mask = SkTEndian_SwapBE16(1 << 1);
+ static const SK_OT_USHORT InstructionsDependOnPointSizeMask = SkTEndian_SwapBE16(1 << 2);
+ static const SK_OT_USHORT IntegerScalingMask = SkTEndian_SwapBE16(1 << 3);
+ static const SK_OT_USHORT InstructionsAlterAdvanceWidthMask = SkTEndian_SwapBE16(1 << 4);
+ static const SK_OT_USHORT VerticalCenteredGlyphs_AppleMask = SkTEndian_SwapBE16(1 << 5);
+ //Reserved
+ static const SK_OT_USHORT RequiresLayout_AppleMask = SkTEndian_SwapBE16(1 << 7);
+
+ static const SK_OT_USHORT GXMetamorphosis_AppleMask = SkTEndian_SwapBE16(1 << 8);
+ static const SK_OT_USHORT HasStrongRTL_AppleMask = SkTEndian_SwapBE16(1 << 9);
+ static const SK_OT_USHORT HasIndicStyleRearrangementMask = SkTEndian_SwapBE16(1 << 10);
+ static const SK_OT_USHORT AgfaMicroTypeExpressProcessedMask = SkTEndian_SwapBE16(1 << 11);
+ static const SK_OT_USHORT FontConvertedMask = SkTEndian_SwapBE16(1 << 12);
+ static const SK_OT_USHORT DesignedForClearTypeMask = SkTEndian_SwapBE16(1 << 13);
+ static const SK_OT_USHORT LastResortMask = SkTEndian_SwapBE16(1 << 14);
+ //Reserved
+ SK_OT_USHORT value;
+ } raw;
+ } flags;
+ SK_OT_USHORT unitsPerEm;
+ SK_OT_LONGDATETIME created;
+ SK_OT_LONGDATETIME modified;
+ SK_OT_SHORT xMin;
+ SK_OT_SHORT yMin;
+ SK_OT_SHORT xMax;
+ SK_OT_SHORT yMax;
+ union MacStyle {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Bold,
+ Italic,
+ Underline,
+ Outline,
+ Shadow,
+ Condensed,
+ Extended,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT BoldMask = SkTEndian_SwapBE16(1);
+ static const SK_OT_USHORT ItalicMask = SkTEndian_SwapBE16(1 << 1);
+ static const SK_OT_USHORT UnderlineMask = SkTEndian_SwapBE16(1 << 2);
+ static const SK_OT_USHORT OutlineMask = SkTEndian_SwapBE16(1 << 3);
+ static const SK_OT_USHORT ShadowMask = SkTEndian_SwapBE16(1 << 4);
+ static const SK_OT_USHORT CondensedMask = SkTEndian_SwapBE16(1 << 5);
+ static const SK_OT_USHORT ExtendedMask = SkTEndian_SwapBE16(1 << 6);
+
+ SK_OT_USHORT value;
+ } raw;
+ } macStyle;
+ SK_OT_USHORT lowestRecPPEM;
+ struct FontDirectionHint {
+ enum Value : SK_OT_SHORT {
+ FullyMixedDirectionalGlyphs = SkTEndian_SwapBE16(0),
+ OnlyStronglyLTR = SkTEndian_SwapBE16(1),
+ StronglyLTR = SkTEndian_SwapBE16(2),
+ OnlyStronglyRTL = static_cast<SK_OT_SHORT>(SkTEndian_SwapBE16((uint16_t)-1)),
+ StronglyRTL = static_cast<SK_OT_SHORT>(SkTEndian_SwapBE16((uint16_t)-2)),
+ } value;
+ } fontDirectionHint;
+ struct IndexToLocFormat {
+ enum Value : SK_OT_SHORT {
+ ShortOffsets = SkTEndian_SwapBE16(0),
+ LongOffsets = SkTEndian_SwapBE16(1),
+ } value;
+ } indexToLocFormat;
+ struct GlyphDataFormat {
+ enum Value : SK_OT_SHORT {
+ CurrentFormat = SkTEndian_SwapBE16(0),
+ } value;
+ } glyphDataFormat;
+};
+
+#pragma pack(pop)
+
+
+#include <stddef.h>
+static_assert(offsetof(SkOTTableHead, glyphDataFormat) == 52, "SkOTTableHead_glyphDataFormat_not_at_52");
+static_assert(sizeof(SkOTTableHead) == 54, "sizeof_SkOTTableHead_not_54");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_hhea.h b/gfx/skia/skia/src/sfnt/SkOTTable_hhea.h
new file mode 100644
index 000000000..76b15e276
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_hhea.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_hhea_DEFINED
+#define SkOTTable_hhea_DEFINED
+
+#include "SkEndian.h"
+#include "SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableHorizontalHeader {
+ static const SK_OT_CHAR TAG0 = 'h';
+ static const SK_OT_CHAR TAG1 = 'h';
+ static const SK_OT_CHAR TAG2 = 'e';
+ static const SK_OT_CHAR TAG3 = 'a';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableHorizontalHeader>::value;
+
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed version1 = SkTEndian_SwapBE32(0x00010000);
+ SK_OT_FWORD Ascender;
+ SK_OT_FWORD Descender;
+ SK_OT_FWORD LineGap;
+ SK_OT_UFWORD advanceWidthMax;
+ SK_OT_FWORD minLeftSideBearing;
+ SK_OT_FWORD minRightSideBearing;
+ SK_OT_FWORD xMaxExtent;
+ SK_OT_SHORT caretSlopeRise;
+ SK_OT_SHORT caretSlopeRun;
+ SK_OT_SHORT caretOffset;
+ SK_OT_SHORT Reserved24;
+ SK_OT_SHORT Reserved26;
+ SK_OT_SHORT Reserved28;
+ SK_OT_SHORT Reserved30;
+ struct MetricDataFormat {
+ enum Value : SK_OT_SHORT {
+ CurrentFormat = SkTEndian_SwapBE16(0),
+ } value;
+ } metricDataFormat;
+ SK_OT_USHORT numberOfHMetrics;
+};
+
+#pragma pack(pop)
+
+
+#include <stddef.h>
+static_assert(offsetof(SkOTTableHorizontalHeader, numberOfHMetrics) == 34, "SkOTTableHorizontalHeader_numberOfHMetrics_not_at_34");
+static_assert(sizeof(SkOTTableHorizontalHeader) == 36, "sizeof_SkOTTableHorizontalHeader_not_36");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_loca.h b/gfx/skia/skia/src/sfnt/SkOTTable_loca.h
new file mode 100644
index 000000000..586daf1d4
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_loca.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_loca_DEFINED
+#define SkOTTable_loca_DEFINED
+
+#include "SkEndian.h"
+#include "SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableIndexToLocation {
+ static const SK_OT_CHAR TAG0 = 'l';
+ static const SK_OT_CHAR TAG1 = 'o';
+ static const SK_OT_CHAR TAG2 = 'c';
+ static const SK_OT_CHAR TAG3 = 'a';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableIndexToLocation>::value;
+
+ union Offsets {
+ SK_OT_USHORT shortOffset[1];
+ SK_OT_ULONG longOffset[1];
+ } offsets;
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_maxp.h b/gfx/skia/skia/src/sfnt/SkOTTable_maxp.h
new file mode 100644
index 000000000..d7feac698
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_maxp.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_maxp_DEFINED
+#define SkOTTable_maxp_DEFINED
+
+#include "SkOTTableTypes.h"
+#include "SkOTTable_maxp_CFF.h"
+#include "SkOTTable_maxp_TT.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableMaximumProfile {
+ static const SK_OT_CHAR TAG0 = 'm';
+ static const SK_OT_CHAR TAG1 = 'a';
+ static const SK_OT_CHAR TAG2 = 'x';
+ static const SK_OT_CHAR TAG3 = 'p';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableMaximumProfile>::value;
+
+ union Version {
+ SK_OT_Fixed version;
+
+ struct CFF : SkOTTableMaximumProfile_CFF { } cff;
+ struct TT : SkOTTableMaximumProfile_TT { } tt;
+ } version;
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_maxp_CFF.h b/gfx/skia/skia/src/sfnt/SkOTTable_maxp_CFF.h
new file mode 100644
index 000000000..435d82388
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_maxp_CFF.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_maxp_CFF_DEFINED
+#define SkOTTable_maxp_CFF_DEFINED
+
+#include "SkEndian.h"
+#include "SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableMaximumProfile_CFF {
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed VERSION = SkTEndian_SwapBE32(0x00005000);
+
+ SK_OT_USHORT numGlyphs;
+};
+
+#pragma pack(pop)
+
+
+#include <stddef.h>
+static_assert(offsetof(SkOTTableMaximumProfile_CFF, numGlyphs) == 4, "SkOTTableMaximumProfile_CFF_numGlyphs_not_at_4");
+static_assert(sizeof(SkOTTableMaximumProfile_CFF) == 6, "sizeof_SkOTTableMaximumProfile_CFF_not_6");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_maxp_TT.h b/gfx/skia/skia/src/sfnt/SkOTTable_maxp_TT.h
new file mode 100644
index 000000000..d45999723
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_maxp_TT.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_maxp_TT_DEFINED
+#define SkOTTable_maxp_TT_DEFINED
+
+#include "SkEndian.h"
+#include "SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableMaximumProfile_TT {
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed VERSION = SkTEndian_SwapBE32(0x00010000);
+
+ SK_OT_USHORT numGlyphs;
+ SK_OT_USHORT maxPoints;
+ SK_OT_USHORT maxContours;
+ SK_OT_USHORT maxCompositePoints;
+ SK_OT_USHORT maxCompositeContours;
+ struct MaxZones {
+ enum Value : SK_OT_USHORT {
+ DoesNotUseTwilightZone = SkTEndian_SwapBE16(1),
+ UsesTwilightZone = SkTEndian_SwapBE16(2),
+ } value;
+ } maxZones;
+ SK_OT_USHORT maxTwilightPoints;
+ SK_OT_USHORT maxStorage;
+ SK_OT_USHORT maxFunctionDefs;
+ SK_OT_USHORT maxInstructionDefs;
+ SK_OT_USHORT maxStackElements;
+ SK_OT_USHORT maxSizeOfInstructions;
+ SK_OT_USHORT maxComponentElements;
+ SK_OT_USHORT maxComponentDepth;
+};
+
+#pragma pack(pop)
+
+
+#include <stddef.h>
+static_assert(offsetof(SkOTTableMaximumProfile_TT, maxComponentDepth) == 30, "SkOTTableMaximumProfile_TT_maxComponentDepth_not_at_30");
+static_assert(sizeof(SkOTTableMaximumProfile_TT) == 32, "sizeof_SkOTTableMaximumProfile_TT_not_32");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_name.cpp b/gfx/skia/skia/src/sfnt/SkOTTable_name.cpp
new file mode 100644
index 000000000..476e0ce36
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_name.cpp
@@ -0,0 +1,534 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkOTTable_name.h"
+
+#include "SkEndian.h"
+#include "SkString.h"
+#include "SkTSearch.h"
+#include "SkTemplates.h"
+#include "SkUtils.h"
+
+static SkUnichar SkUTF16BE_NextUnichar(const uint16_t** srcPtr) {
+ SkASSERT(srcPtr && *srcPtr);
+
+ const uint16_t* src = *srcPtr;
+ SkUnichar c = SkEndian_SwapBE16(*src++);
+
+ SkASSERT(!SkUTF16_IsLowSurrogate(c));
+ if (SkUTF16_IsHighSurrogate(c)) {
+ unsigned c2 = SkEndian_SwapBE16(*src++);
+ SkASSERT(SkUTF16_IsLowSurrogate(c2));
+
+ c = (c << 10) + c2 + (0x10000 - (0xD800 << 10) - 0xDC00);
+ }
+ *srcPtr = src;
+ return c;
+}
+
+static void SkStringFromUTF16BE(const uint16_t* utf16be, size_t length, SkString& utf8) {
+ SkASSERT(utf16be != nullptr);
+
+ utf8.reset();
+ size_t numberOf16BitValues = length / 2;
+ const uint16_t* end = utf16be + numberOf16BitValues;
+ while (utf16be < end) {
+ utf8.appendUnichar(SkUTF16BE_NextUnichar(&utf16be));
+ }
+}
+
+/** UnicodeFromMacRoman[macRomanPoint - 0x80] -> unicodeCodePoint.
+ * Derived from http://www.unicode.org/Public/MAPPINGS/VENDORS/APPLE/ROMAN.TXT .
+ * In MacRoman the first 128 code points match ASCII code points.
+ * This maps the second 128 MacRoman code points to unicode code points.
+ */
+static uint16_t UnicodeFromMacRoman[0x80] = {
+ 0x00C4, 0x00C5, 0x00C7, 0x00C9, 0x00D1, 0x00D6, 0x00DC, 0x00E1,
+ 0x00E0, 0x00E2, 0x00E4, 0x00E3, 0x00E5, 0x00E7, 0x00E9, 0x00E8,
+ 0x00EA, 0x00EB, 0x00ED, 0x00EC, 0x00EE, 0x00EF, 0x00F1, 0x00F3,
+ 0x00F2, 0x00F4, 0x00F6, 0x00F5, 0x00FA, 0x00F9, 0x00FB, 0x00FC,
+ 0x2020, 0x00B0, 0x00A2, 0x00A3, 0x00A7, 0x2022, 0x00B6, 0x00DF,
+ 0x00AE, 0x00A9, 0x2122, 0x00B4, 0x00A8, 0x2260, 0x00C6, 0x00D8,
+ 0x221E, 0x00B1, 0x2264, 0x2265, 0x00A5, 0x00B5, 0x2202, 0x2211,
+ 0x220F, 0x03C0, 0x222B, 0x00AA, 0x00BA, 0x03A9, 0x00E6, 0x00F8,
+ 0x00BF, 0x00A1, 0x00AC, 0x221A, 0x0192, 0x2248, 0x2206, 0x00AB,
+ 0x00BB, 0x2026, 0x00A0, 0x00C0, 0x00C3, 0x00D5, 0x0152, 0x0153,
+ 0x2013, 0x2014, 0x201C, 0x201D, 0x2018, 0x2019, 0x00F7, 0x25CA,
+ 0x00FF, 0x0178, 0x2044, 0x20AC, 0x2039, 0x203A, 0xFB01, 0xFB02,
+ 0x2021, 0x00B7, 0x201A, 0x201E, 0x2030, 0x00C2, 0x00CA, 0x00C1,
+ 0x00CB, 0x00C8, 0x00CD, 0x00CE, 0x00CF, 0x00CC, 0x00D3, 0x00D4,
+ 0xF8FF, 0x00D2, 0x00DA, 0x00DB, 0x00D9, 0x0131, 0x02C6, 0x02DC,
+ 0x00AF, 0x02D8, 0x02D9, 0x02DA, 0x00B8, 0x02DD, 0x02DB, 0x02C7,
+};
+
+static void SkStringFromMacRoman(const uint8_t* macRoman, size_t length, SkString& utf8) {
+ utf8.reset();
+ for (size_t i = 0; i < length; ++i) {
+ utf8.appendUnichar(macRoman[i] < 0x80 ? macRoman[i]
+ : UnicodeFromMacRoman[macRoman[i] - 0x80]);
+ }
+}
+
+static struct BCP47FromLanguageId {
+ uint16_t languageID;
+ const char* bcp47;
+}
+/** The Mac and Windows values do not conflict, so this is currently one single table. */
+BCP47FromLanguageID[] = {
+ /** A mapping from Mac Language Designators to BCP 47 codes.
+ * The following list was constructed more or less manually.
+ * Apple now uses BCP 47 (post OSX10.4), so there will be no new entries.
+ */
+ {0, "en"}, //English
+ {1, "fr"}, //French
+ {2, "de"}, //German
+ {3, "it"}, //Italian
+ {4, "nl"}, //Dutch
+ {5, "sv"}, //Swedish
+ {6, "es"}, //Spanish
+ {7, "da"}, //Danish
+ {8, "pt"}, //Portuguese
+ {9, "nb"}, //Norwegian
+ {10, "he"}, //Hebrew
+ {11, "ja"}, //Japanese
+ {12, "ar"}, //Arabic
+ {13, "fi"}, //Finnish
+ {14, "el"}, //Greek
+ {15, "is"}, //Icelandic
+ {16, "mt"}, //Maltese
+ {17, "tr"}, //Turkish
+ {18, "hr"}, //Croatian
+ {19, "zh-Hant"}, //Chinese (Traditional)
+ {20, "ur"}, //Urdu
+ {21, "hi"}, //Hindi
+ {22, "th"}, //Thai
+ {23, "ko"}, //Korean
+ {24, "lt"}, //Lithuanian
+ {25, "pl"}, //Polish
+ {26, "hu"}, //Hungarian
+ {27, "et"}, //Estonian
+ {28, "lv"}, //Latvian
+ {29, "se"}, //Sami
+ {30, "fo"}, //Faroese
+ {31, "fa"}, //Farsi (Persian)
+ {32, "ru"}, //Russian
+ {33, "zh-Hans"}, //Chinese (Simplified)
+ {34, "nl"}, //Dutch
+ {35, "ga"}, //Irish(Gaelic)
+ {36, "sq"}, //Albanian
+ {37, "ro"}, //Romanian
+ {38, "cs"}, //Czech
+ {39, "sk"}, //Slovak
+ {40, "sl"}, //Slovenian
+ {41, "yi"}, //Yiddish
+ {42, "sr"}, //Serbian
+ {43, "mk"}, //Macedonian
+ {44, "bg"}, //Bulgarian
+ {45, "uk"}, //Ukrainian
+ {46, "be"}, //Byelorussian
+ {47, "uz"}, //Uzbek
+ {48, "kk"}, //Kazakh
+ {49, "az-Cyrl"}, //Azerbaijani (Cyrillic)
+ {50, "az-Arab"}, //Azerbaijani (Arabic)
+ {51, "hy"}, //Armenian
+ {52, "ka"}, //Georgian
+ {53, "mo"}, //Moldavian
+ {54, "ky"}, //Kirghiz
+ {55, "tg"}, //Tajiki
+ {56, "tk"}, //Turkmen
+ {57, "mn-Mong"}, //Mongolian (Traditional)
+ {58, "mn-Cyrl"}, //Mongolian (Cyrillic)
+ {59, "ps"}, //Pashto
+ {60, "ku"}, //Kurdish
+ {61, "ks"}, //Kashmiri
+ {62, "sd"}, //Sindhi
+ {63, "bo"}, //Tibetan
+ {64, "ne"}, //Nepali
+ {65, "sa"}, //Sanskrit
+ {66, "mr"}, //Marathi
+ {67, "bn"}, //Bengali
+ {68, "as"}, //Assamese
+ {69, "gu"}, //Gujarati
+ {70, "pa"}, //Punjabi
+ {71, "or"}, //Oriya
+ {72, "ml"}, //Malayalam
+ {73, "kn"}, //Kannada
+ {74, "ta"}, //Tamil
+ {75, "te"}, //Telugu
+ {76, "si"}, //Sinhalese
+ {77, "my"}, //Burmese
+ {78, "km"}, //Khmer
+ {79, "lo"}, //Lao
+ {80, "vi"}, //Vietnamese
+ {81, "id"}, //Indonesian
+ {82, "tl"}, //Tagalog
+ {83, "ms-Latn"}, //Malay (Roman)
+ {84, "ms-Arab"}, //Malay (Arabic)
+ {85, "am"}, //Amharic
+ {86, "ti"}, //Tigrinya
+ {87, "om"}, //Oromo
+ {88, "so"}, //Somali
+ {89, "sw"}, //Swahili
+ {90, "rw"}, //Kinyarwanda/Ruanda
+ {91, "rn"}, //Rundi
+ {92, "ny"}, //Nyanja/Chewa
+ {93, "mg"}, //Malagasy
+ {94, "eo"}, //Esperanto
+ {128, "cy"}, //Welsh
+ {129, "eu"}, //Basque
+ {130, "ca"}, //Catalan
+ {131, "la"}, //Latin
+ {132, "qu"}, //Quechua
+ {133, "gn"}, //Guarani
+ {134, "ay"}, //Aymara
+ {135, "tt"}, //Tatar
+ {136, "ug"}, //Uighur
+ {137, "dz"}, //Dzongkha
+ {138, "jv-Latn"}, //Javanese (Roman)
+ {139, "su-Latn"}, //Sundanese (Roman)
+ {140, "gl"}, //Galician
+ {141, "af"}, //Afrikaans
+ {142, "br"}, //Breton
+ {143, "iu"}, //Inuktitut
+ {144, "gd"}, //Scottish (Gaelic)
+ {145, "gv"}, //Manx (Gaelic)
+ {146, "ga"}, //Irish (Gaelic with Lenition)
+ {147, "to"}, //Tongan
+ {148, "el"}, //Greek (Polytonic) Note: ISO 15924 does not have an equivalent script name.
+ {149, "kl"}, //Greenlandic
+ {150, "az-Latn"}, //Azerbaijani (Roman)
+ {151, "nn"}, //Nynorsk
+
+ /** A mapping from Windows LCID to BCP 47 codes.
+ * This list is the sorted, curated output of tools/win_lcid.cpp.
+ * Note that these are sorted by value for quick binary lookup, and not logically by lsb.
+ * The 'bare' language ids (e.g. 0x0001 for Arabic) are ommitted
+ * as they do not appear as valid language ids in the OpenType specification.
+ */
+ { 0x0401, "ar-SA" }, //Arabic
+ { 0x0402, "bg-BG" }, //Bulgarian
+ { 0x0403, "ca-ES" }, //Catalan
+ { 0x0404, "zh-TW" }, //Chinese (Traditional)
+ { 0x0405, "cs-CZ" }, //Czech
+ { 0x0406, "da-DK" }, //Danish
+ { 0x0407, "de-DE" }, //German
+ { 0x0408, "el-GR" }, //Greek
+ { 0x0409, "en-US" }, //English
+ { 0x040a, "es-ES_tradnl" }, //Spanish
+ { 0x040b, "fi-FI" }, //Finnish
+ { 0x040c, "fr-FR" }, //French
+ { 0x040d, "he-IL" }, //Hebrew
+ { 0x040d, "he" }, //Hebrew
+ { 0x040e, "hu-HU" }, //Hungarian
+ { 0x040e, "hu" }, //Hungarian
+ { 0x040f, "is-IS" }, //Icelandic
+ { 0x0410, "it-IT" }, //Italian
+ { 0x0411, "ja-JP" }, //Japanese
+ { 0x0412, "ko-KR" }, //Korean
+ { 0x0413, "nl-NL" }, //Dutch
+ { 0x0414, "nb-NO" }, //Norwegian (Bokmål)
+ { 0x0415, "pl-PL" }, //Polish
+ { 0x0416, "pt-BR" }, //Portuguese
+ { 0x0417, "rm-CH" }, //Romansh
+ { 0x0418, "ro-RO" }, //Romanian
+ { 0x0419, "ru-RU" }, //Russian
+ { 0x041a, "hr-HR" }, //Croatian
+ { 0x041b, "sk-SK" }, //Slovak
+ { 0x041c, "sq-AL" }, //Albanian
+ { 0x041d, "sv-SE" }, //Swedish
+ { 0x041e, "th-TH" }, //Thai
+ { 0x041f, "tr-TR" }, //Turkish
+ { 0x0420, "ur-PK" }, //Urdu
+ { 0x0421, "id-ID" }, //Indonesian
+ { 0x0422, "uk-UA" }, //Ukrainian
+ { 0x0423, "be-BY" }, //Belarusian
+ { 0x0424, "sl-SI" }, //Slovenian
+ { 0x0425, "et-EE" }, //Estonian
+ { 0x0426, "lv-LV" }, //Latvian
+ { 0x0427, "lt-LT" }, //Lithuanian
+ { 0x0428, "tg-Cyrl-TJ" }, //Tajik (Cyrillic)
+ { 0x0429, "fa-IR" }, //Persian
+ { 0x042a, "vi-VN" }, //Vietnamese
+ { 0x042b, "hy-AM" }, //Armenian
+ { 0x042c, "az-Latn-AZ" }, //Azeri (Latin)
+ { 0x042d, "eu-ES" }, //Basque
+ { 0x042e, "hsb-DE" }, //Upper Sorbian
+ { 0x042f, "mk-MK" }, //Macedonian (FYROM)
+ { 0x0432, "tn-ZA" }, //Setswana
+ { 0x0434, "xh-ZA" }, //isiXhosa
+ { 0x0435, "zu-ZA" }, //isiZulu
+ { 0x0436, "af-ZA" }, //Afrikaans
+ { 0x0437, "ka-GE" }, //Georgian
+ { 0x0438, "fo-FO" }, //Faroese
+ { 0x0439, "hi-IN" }, //Hindi
+ { 0x043a, "mt-MT" }, //Maltese
+ { 0x043b, "se-NO" }, //Sami (Northern)
+ { 0x043e, "ms-MY" }, //Malay
+ { 0x043f, "kk-KZ" }, //Kazakh
+ { 0x0440, "ky-KG" }, //Kyrgyz
+ { 0x0441, "sw-KE" }, //Kiswahili
+ { 0x0442, "tk-TM" }, //Turkmen
+ { 0x0443, "uz-Latn-UZ" }, //Uzbek (Latin)
+ { 0x0443, "uz" }, //Uzbek
+ { 0x0444, "tt-RU" }, //Tatar
+ { 0x0445, "bn-IN" }, //Bengali
+ { 0x0446, "pa-IN" }, //Punjabi
+ { 0x0447, "gu-IN" }, //Gujarati
+ { 0x0448, "or-IN" }, //Oriya
+ { 0x0449, "ta-IN" }, //Tamil
+ { 0x044a, "te-IN" }, //Telugu
+ { 0x044b, "kn-IN" }, //Kannada
+ { 0x044c, "ml-IN" }, //Malayalam
+ { 0x044d, "as-IN" }, //Assamese
+ { 0x044e, "mr-IN" }, //Marathi
+ { 0x044f, "sa-IN" }, //Sanskrit
+ { 0x0450, "mn-Cyrl" }, //Mongolian (Cyrillic)
+ { 0x0451, "bo-CN" }, //Tibetan
+ { 0x0452, "cy-GB" }, //Welsh
+ { 0x0453, "km-KH" }, //Khmer
+ { 0x0454, "lo-LA" }, //Lao
+ { 0x0456, "gl-ES" }, //Galician
+ { 0x0457, "kok-IN" }, //Konkani
+ { 0x045a, "syr-SY" }, //Syriac
+ { 0x045b, "si-LK" }, //Sinhala
+ { 0x045d, "iu-Cans-CA" }, //Inuktitut (Syllabics)
+ { 0x045e, "am-ET" }, //Amharic
+ { 0x0461, "ne-NP" }, //Nepali
+ { 0x0462, "fy-NL" }, //Frisian
+ { 0x0463, "ps-AF" }, //Pashto
+ { 0x0464, "fil-PH" }, //Filipino
+ { 0x0465, "dv-MV" }, //Divehi
+ { 0x0468, "ha-Latn-NG" }, //Hausa (Latin)
+ { 0x046a, "yo-NG" }, //Yoruba
+ { 0x046b, "quz-BO" }, //Quechua
+ { 0x046c, "nso-ZA" }, //Sesotho sa Leboa
+ { 0x046d, "ba-RU" }, //Bashkir
+ { 0x046e, "lb-LU" }, //Luxembourgish
+ { 0x046f, "kl-GL" }, //Greenlandic
+ { 0x0470, "ig-NG" }, //Igbo
+ { 0x0478, "ii-CN" }, //Yi
+ { 0x047a, "arn-CL" }, //Mapudungun
+ { 0x047c, "moh-CA" }, //Mohawk
+ { 0x047e, "br-FR" }, //Breton
+ { 0x0480, "ug-CN" }, //Uyghur
+ { 0x0481, "mi-NZ" }, //Maori
+ { 0x0482, "oc-FR" }, //Occitan
+ { 0x0483, "co-FR" }, //Corsican
+ { 0x0484, "gsw-FR" }, //Alsatian
+ { 0x0485, "sah-RU" }, //Yakut
+ { 0x0486, "qut-GT" }, //K'iche
+ { 0x0487, "rw-RW" }, //Kinyarwanda
+ { 0x0488, "wo-SN" }, //Wolof
+ { 0x048c, "prs-AF" }, //Dari
+ { 0x0491, "gd-GB" }, //Scottish Gaelic
+ { 0x0801, "ar-IQ" }, //Arabic
+ { 0x0804, "zh-Hans" }, //Chinese (Simplified)
+ { 0x0807, "de-CH" }, //German
+ { 0x0809, "en-GB" }, //English
+ { 0x080a, "es-MX" }, //Spanish
+ { 0x080c, "fr-BE" }, //French
+ { 0x0810, "it-CH" }, //Italian
+ { 0x0813, "nl-BE" }, //Dutch
+ { 0x0814, "nn-NO" }, //Norwegian (Nynorsk)
+ { 0x0816, "pt-PT" }, //Portuguese
+ { 0x081a, "sr-Latn-CS" }, //Serbian (Latin)
+ { 0x081d, "sv-FI" }, //Swedish
+ { 0x082c, "az-Cyrl-AZ" }, //Azeri (Cyrillic)
+ { 0x082e, "dsb-DE" }, //Lower Sorbian
+ { 0x082e, "dsb" }, //Lower Sorbian
+ { 0x083b, "se-SE" }, //Sami (Northern)
+ { 0x083c, "ga-IE" }, //Irish
+ { 0x083e, "ms-BN" }, //Malay
+ { 0x0843, "uz-Cyrl-UZ" }, //Uzbek (Cyrillic)
+ { 0x0845, "bn-BD" }, //Bengali
+ { 0x0850, "mn-Mong-CN" }, //Mongolian (Traditional Mongolian)
+ { 0x085d, "iu-Latn-CA" }, //Inuktitut (Latin)
+ { 0x085f, "tzm-Latn-DZ" }, //Tamazight (Latin)
+ { 0x086b, "quz-EC" }, //Quechua
+ { 0x0c01, "ar-EG" }, //Arabic
+ { 0x0c04, "zh-Hant" }, //Chinese (Traditional)
+ { 0x0c07, "de-AT" }, //German
+ { 0x0c09, "en-AU" }, //English
+ { 0x0c0a, "es-ES" }, //Spanish
+ { 0x0c0c, "fr-CA" }, //French
+ { 0x0c1a, "sr-Cyrl-CS" }, //Serbian (Cyrillic)
+ { 0x0c3b, "se-FI" }, //Sami (Northern)
+ { 0x0c6b, "quz-PE" }, //Quechua
+ { 0x1001, "ar-LY" }, //Arabic
+ { 0x1004, "zh-SG" }, //Chinese (Simplified)
+ { 0x1007, "de-LU" }, //German
+ { 0x1009, "en-CA" }, //English
+ { 0x100a, "es-GT" }, //Spanish
+ { 0x100c, "fr-CH" }, //French
+ { 0x101a, "hr-BA" }, //Croatian (Latin)
+ { 0x103b, "smj-NO" }, //Sami (Lule)
+ { 0x1401, "ar-DZ" }, //Arabic
+ { 0x1404, "zh-MO" }, //Chinese (Traditional)
+ { 0x1407, "de-LI" }, //German
+ { 0x1409, "en-NZ" }, //English
+ { 0x140a, "es-CR" }, //Spanish
+ { 0x140c, "fr-LU" }, //French
+ { 0x141a, "bs-Latn-BA" }, //Bosnian (Latin)
+ { 0x141a, "bs" }, //Bosnian
+ { 0x143b, "smj-SE" }, //Sami (Lule)
+ { 0x143b, "smj" }, //Sami (Lule)
+ { 0x1801, "ar-MA" }, //Arabic
+ { 0x1809, "en-IE" }, //English
+ { 0x180a, "es-PA" }, //Spanish
+ { 0x180c, "fr-MC" }, //French
+ { 0x181a, "sr-Latn-BA" }, //Serbian (Latin)
+ { 0x183b, "sma-NO" }, //Sami (Southern)
+ { 0x1c01, "ar-TN" }, //Arabic
+ { 0x1c09, "en-ZA" }, //English
+ { 0x1c0a, "es-DO" }, //Spanish
+ { 0x1c1a, "sr-Cyrl-BA" }, //Serbian (Cyrillic)
+ { 0x1c3b, "sma-SE" }, //Sami (Southern)
+ { 0x1c3b, "sma" }, //Sami (Southern)
+ { 0x2001, "ar-OM" }, //Arabic
+ { 0x2009, "en-JM" }, //English
+ { 0x200a, "es-VE" }, //Spanish
+ { 0x201a, "bs-Cyrl-BA" }, //Bosnian (Cyrillic)
+ { 0x201a, "bs-Cyrl" }, //Bosnian (Cyrillic)
+ { 0x203b, "sms-FI" }, //Sami (Skolt)
+ { 0x203b, "sms" }, //Sami (Skolt)
+ { 0x2401, "ar-YE" }, //Arabic
+ { 0x2409, "en-029" }, //English
+ { 0x240a, "es-CO" }, //Spanish
+ { 0x241a, "sr-Latn-RS" }, //Serbian (Latin)
+ { 0x243b, "smn-FI" }, //Sami (Inari)
+ { 0x2801, "ar-SY" }, //Arabic
+ { 0x2809, "en-BZ" }, //English
+ { 0x280a, "es-PE" }, //Spanish
+ { 0x281a, "sr-Cyrl-RS" }, //Serbian (Cyrillic)
+ { 0x2c01, "ar-JO" }, //Arabic
+ { 0x2c09, "en-TT" }, //English
+ { 0x2c0a, "es-AR" }, //Spanish
+ { 0x2c1a, "sr-Latn-ME" }, //Serbian (Latin)
+ { 0x3001, "ar-LB" }, //Arabic
+ { 0x3009, "en-ZW" }, //English
+ { 0x300a, "es-EC" }, //Spanish
+ { 0x301a, "sr-Cyrl-ME" }, //Serbian (Cyrillic)
+ { 0x3401, "ar-KW" }, //Arabic
+ { 0x3409, "en-PH" }, //English
+ { 0x340a, "es-CL" }, //Spanish
+ { 0x3801, "ar-AE" }, //Arabic
+ { 0x380a, "es-UY" }, //Spanish
+ { 0x3c01, "ar-BH" }, //Arabic
+ { 0x3c0a, "es-PY" }, //Spanish
+ { 0x4001, "ar-QA" }, //Arabic
+ { 0x4009, "en-IN" }, //English
+ { 0x400a, "es-BO" }, //Spanish
+ { 0x4409, "en-MY" }, //English
+ { 0x440a, "es-SV" }, //Spanish
+ { 0x4809, "en-SG" }, //English
+ { 0x480a, "es-HN" }, //Spanish
+ { 0x4c0a, "es-NI" }, //Spanish
+ { 0x500a, "es-PR" }, //Spanish
+ { 0x540a, "es-US" }, //Spanish
+};
+
+namespace {
+bool BCP47FromLanguageIdLess(const BCP47FromLanguageId& a, const BCP47FromLanguageId& b) {
+ return a.languageID < b.languageID;
+}
+}
+
+bool SkOTTableName::Iterator::next(SkOTTableName::Iterator::Record& record) {
+ const size_t nameRecordsCount = SkEndian_SwapBE16(fName.count);
+ const SkOTTableName::Record* nameRecords = SkTAfter<const SkOTTableName::Record>(&fName);
+ const SkOTTableName::Record* nameRecord;
+
+ // Find the next record which matches the requested type.
+ do {
+ if (fIndex >= nameRecordsCount) {
+ return false;
+ }
+
+ nameRecord = &nameRecords[fIndex];
+ ++fIndex;
+ } while (fType != -1 && nameRecord->nameID.fontSpecific != fType);
+
+ record.type = nameRecord->nameID.fontSpecific;
+
+ const uint16_t stringTableOffset = SkEndian_SwapBE16(fName.stringOffset);
+ const char* stringTable = SkTAddOffset<const char>(&fName, stringTableOffset);
+
+ // Decode the name into UTF-8.
+ const uint16_t nameOffset = SkEndian_SwapBE16(nameRecord->offset);
+ const uint16_t nameLength = SkEndian_SwapBE16(nameRecord->length);
+ const char* nameString = SkTAddOffset<const char>(stringTable, nameOffset);
+ switch (nameRecord->platformID.value) {
+ case SkOTTableName::Record::PlatformID::Windows:
+ if (SkOTTableName::Record::EncodingID::Windows::UnicodeBMPUCS2
+ != nameRecord->encodingID.windows.value
+ && SkOTTableName::Record::EncodingID::Windows::UnicodeUCS4
+ != nameRecord->encodingID.windows.value
+ && SkOTTableName::Record::EncodingID::Windows::Symbol
+ != nameRecord->encodingID.windows.value)
+ {
+ record.name.reset();
+ break;
+ }
+ case SkOTTableName::Record::PlatformID::Unicode:
+ case SkOTTableName::Record::PlatformID::ISO:
+ SkStringFromUTF16BE((const uint16_t*)nameString, nameLength, record.name);
+ break;
+
+ case SkOTTableName::Record::PlatformID::Macintosh:
+ // TODO: need better decoding, especially on Mac.
+ if (SkOTTableName::Record::EncodingID::Macintosh::Roman
+ != nameRecord->encodingID.macintosh.value)
+ {
+ record.name.reset();
+ break;
+ }
+ SkStringFromMacRoman((const uint8_t*)nameString, nameLength, record.name);
+ break;
+
+ case SkOTTableName::Record::PlatformID::Custom:
+ // These should never appear in a 'name' table.
+ default:
+ SkASSERT(false);
+ record.name.reset();
+ break;
+ }
+
+ // Determine the language.
+ const uint16_t languageID = SkEndian_SwapBE16(nameRecord->languageID.languageTagID);
+
+ // Handle format 1 languages.
+ if (SkOTTableName::format_1 == fName.format && languageID >= 0x8000) {
+ const uint16_t languageTagRecordIndex = languageID - 0x8000;
+
+ const SkOTTableName::Format1Ext* format1ext =
+ SkTAfter<const SkOTTableName::Format1Ext>(nameRecords, nameRecordsCount);
+
+ if (languageTagRecordIndex < SkEndian_SwapBE16(format1ext->langTagCount)) {
+ const SkOTTableName::Format1Ext::LangTagRecord* languageTagRecord =
+ SkTAfter<const SkOTTableName::Format1Ext::LangTagRecord>(format1ext);
+
+ uint16_t offset = SkEndian_SwapBE16(languageTagRecord[languageTagRecordIndex].offset);
+ uint16_t length = SkEndian_SwapBE16(languageTagRecord[languageTagRecordIndex].length);
+ const uint16_t* string = SkTAddOffset<const uint16_t>(stringTable, offset);
+ SkStringFromUTF16BE(string, length, record.language);
+ return true;
+ }
+ }
+
+ // Handle format 0 languages, translating them into BCP 47.
+ const BCP47FromLanguageId target = { languageID, "" };
+ int languageIndex = SkTSearch<BCP47FromLanguageId, BCP47FromLanguageIdLess>(
+ BCP47FromLanguageID, SK_ARRAY_COUNT(BCP47FromLanguageID), target, sizeof(target));
+ if (languageIndex >= 0) {
+ record.language = BCP47FromLanguageID[languageIndex].bcp47;
+ return true;
+ }
+
+ // Unknown language, return the BCP 47 code 'und' for 'undetermined'.
+ record.language = "und";
+ return true;
+}
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_name.h b/gfx/skia/skia/src/sfnt/SkOTTable_name.h
new file mode 100644
index 000000000..4c1ce5bb6
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_name.h
@@ -0,0 +1,575 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_name_DEFINED
+#define SkOTTable_name_DEFINED
+
+#include "SkEndian.h"
+#include "SkOTTableTypes.h"
+#include "SkString.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableName {
+ static const SK_OT_CHAR TAG0 = 'n';
+ static const SK_OT_CHAR TAG1 = 'a';
+ static const SK_OT_CHAR TAG2 = 'm';
+ static const SK_OT_CHAR TAG3 = 'e';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableName>::value;
+
+ SK_OT_USHORT format;
+ static const SK_OT_USHORT format_0 = SkTEndian_SwapBE16(0);
+ /** Format 1 was added in OpenType 1.6 (April 2009). */
+ static const SK_OT_USHORT format_1 = SkTEndian_SwapBE16(1);
+
+ /** The number of name records which follow. */
+ SK_OT_USHORT count;
+
+ /** Offset in SK_OT_BYTEs to start of string storage area (from start of table). */
+ SK_OT_USHORT stringOffset;
+
+ struct Record {
+ /** The platform ID specifies how to interpret the encoding and language ID. */
+ struct PlatformID {
+ enum Value : SK_OT_USHORT {
+ Unicode = SkTEndian_SwapBE16(0),
+ Macintosh = SkTEndian_SwapBE16(1),
+ ISO = SkTEndian_SwapBE16(2), // Deprecated, use Unicode instead.
+ Windows = SkTEndian_SwapBE16(3),
+ Custom = SkTEndian_SwapBE16(4),
+ } value;
+ } platformID;
+
+ union EncodingID {
+ SK_OT_USHORT custom;
+
+ /** Always UTF-16BE. */
+ struct Unicode {
+ enum Value : SK_OT_USHORT {
+ Unicode10 = SkTEndian_SwapBE16(0),
+ Unicode11 = SkTEndian_SwapBE16(1),
+ ISO10646 = SkTEndian_SwapBE16(2), //deprecated, use Unicode11
+ Unicode20BMP = SkTEndian_SwapBE16(3),
+ Unicode20 = SkTEndian_SwapBE16(4),
+ UnicodeVariationSequences = SkTEndian_SwapBE16(5),
+ UnicodeFull = SkTEndian_SwapBE16(6),
+ } value;
+ } unicode;
+
+ /** These are Mac encodings, see http://www.unicode.org/Public/MAPPINGS/VENDORS/APPLE/
+ * for their mappings to unicode.
+ * Name table strings using PlatformID::Macintosh must use Roman.
+ */
+ struct Macintosh {
+ enum Value : SK_OT_USHORT {
+ Roman = SkTEndian_SwapBE16(0),
+ Japanese = SkTEndian_SwapBE16(1),
+ ChineseTraditional = SkTEndian_SwapBE16(2),
+ Korean = SkTEndian_SwapBE16(3),
+ Arabic = SkTEndian_SwapBE16(4),
+ Hebrew = SkTEndian_SwapBE16(5),
+ Greek = SkTEndian_SwapBE16(6),
+ Russian = SkTEndian_SwapBE16(7),
+ RSymbol = SkTEndian_SwapBE16(8),
+ Devanagari = SkTEndian_SwapBE16(9),
+ Gurmukhi = SkTEndian_SwapBE16(10),
+ Gujarati = SkTEndian_SwapBE16(11),
+ Oriya = SkTEndian_SwapBE16(12),
+ Bengali = SkTEndian_SwapBE16(13),
+ Tamil = SkTEndian_SwapBE16(14),
+ Telugu = SkTEndian_SwapBE16(15),
+ Kannada = SkTEndian_SwapBE16(16),
+ Malayalam = SkTEndian_SwapBE16(17),
+ Sinhalese = SkTEndian_SwapBE16(18),
+ Burmese = SkTEndian_SwapBE16(19),
+ Khmer = SkTEndian_SwapBE16(20),
+ Thai = SkTEndian_SwapBE16(21),
+ Laotian = SkTEndian_SwapBE16(22),
+ Georgian = SkTEndian_SwapBE16(23),
+ Armenian = SkTEndian_SwapBE16(24),
+ ChineseSimplified = SkTEndian_SwapBE16(25),
+ Tibetan = SkTEndian_SwapBE16(26),
+ Mongolian = SkTEndian_SwapBE16(27),
+ Geez = SkTEndian_SwapBE16(28),
+ Slavic = SkTEndian_SwapBE16(29),
+ Vietnamese = SkTEndian_SwapBE16(30),
+ Sindhi = SkTEndian_SwapBE16(31),
+ Uninterpreted = SkTEndian_SwapBE16(32),
+ } value;
+ } macintosh;
+
+ /** Deprecated, use Unicode instead. */
+ struct ISO {
+ enum Value : SK_OT_USHORT {
+ ASCII7 = SkTEndian_SwapBE16(0),
+ ISO10646 = SkTEndian_SwapBE16(1),
+ ISO88591 = SkTEndian_SwapBE16(2),
+ } value;
+ } iso;
+
+ /** Name table strings using PlatformID::Windows must use Symbol, UnicodeBMPUCS2, or
+ * UnicodeUCS4. Symbol and UnicodeBMPUCS2 are both UCS2-BE, UnicodeUCS4 is actually
+ * UTF-16BE.
+ */
+ struct Windows {
+ enum Value : SK_OT_USHORT {
+ Symbol = SkTEndian_SwapBE16(0), // UCS2-BE, but don't use this font to display it's own name.
+ UnicodeBMPUCS2 = SkTEndian_SwapBE16(1), // UCS2-BE, Windows default
+ ShiftJIS = SkTEndian_SwapBE16(2),
+ PRC = SkTEndian_SwapBE16(3),
+ Big5 = SkTEndian_SwapBE16(4),
+ Wansung = SkTEndian_SwapBE16(5),
+ Johab = SkTEndian_SwapBE16(6),
+ UnicodeUCS4 = SkTEndian_SwapBE16(10), // UTF-16BE. It means UCS4 in charmaps.
+ } value;
+ } windows;
+ } encodingID;
+
+ /** LanguageIDs <= 0x7FFF are predefined.
+ * LanguageIDs > 0x7FFF are indexes into the langTagRecord array
+ * (in format 1 name tables, see SkOTTableName::format).
+ */
+ union LanguageID {
+ /** A value greater than 0x7FFF.
+ * languageTagID - 0x8000 is an index into the langTagRecord array.
+ */
+ SK_OT_USHORT languageTagID;
+
+ /** These are known as Language Designators.
+ * Apple now uses BCP 47 (post OSX10.4), so there will be no new entries.
+ */
+ struct Macintosh {
+ enum Value : SK_OT_USHORT {
+ English = SkTEndian_SwapBE16(0),
+ French = SkTEndian_SwapBE16(1),
+ German = SkTEndian_SwapBE16(2),
+ Italian = SkTEndian_SwapBE16(3),
+ Dutch = SkTEndian_SwapBE16(4),
+ Swedish = SkTEndian_SwapBE16(5),
+ Spanish = SkTEndian_SwapBE16(6),
+ Danish = SkTEndian_SwapBE16(7),
+ Portuguese = SkTEndian_SwapBE16(8),
+ Norwegian = SkTEndian_SwapBE16(9),
+ Hebrew = SkTEndian_SwapBE16(10),
+ Japanese = SkTEndian_SwapBE16(11),
+ Arabic = SkTEndian_SwapBE16(12),
+ Finnish = SkTEndian_SwapBE16(13),
+ Greek = SkTEndian_SwapBE16(14),
+ Icelandic = SkTEndian_SwapBE16(15),
+ Maltese = SkTEndian_SwapBE16(16),
+ Turkish = SkTEndian_SwapBE16(17),
+ Croatian = SkTEndian_SwapBE16(18),
+ ChineseTraditional = SkTEndian_SwapBE16(19),
+ Urdu = SkTEndian_SwapBE16(20),
+ Hindi = SkTEndian_SwapBE16(21),
+ Thai = SkTEndian_SwapBE16(22),
+ Korean = SkTEndian_SwapBE16(23),
+ Lithuanian = SkTEndian_SwapBE16(24),
+ Polish = SkTEndian_SwapBE16(25),
+ Hungarian = SkTEndian_SwapBE16(26),
+ Estonian = SkTEndian_SwapBE16(27),
+ Latvian = SkTEndian_SwapBE16(28),
+ Sami = SkTEndian_SwapBE16(29),
+ Faroese = SkTEndian_SwapBE16(30),
+ Farsi_Persian = SkTEndian_SwapBE16(31),
+ Russian = SkTEndian_SwapBE16(32),
+ ChineseSimplified = SkTEndian_SwapBE16(33),
+ Flemish = SkTEndian_SwapBE16(34),
+ IrishGaelic = SkTEndian_SwapBE16(35),
+ Albanian = SkTEndian_SwapBE16(36),
+ Romanian = SkTEndian_SwapBE16(37),
+ Czech = SkTEndian_SwapBE16(38),
+ Slovak = SkTEndian_SwapBE16(39),
+ Slovenian = SkTEndian_SwapBE16(40),
+ Yiddish = SkTEndian_SwapBE16(41),
+ Serbian = SkTEndian_SwapBE16(42),
+ Macedonian = SkTEndian_SwapBE16(43),
+ Bulgarian = SkTEndian_SwapBE16(44),
+ Ukrainian = SkTEndian_SwapBE16(45),
+ Byelorussian = SkTEndian_SwapBE16(46),
+ Uzbek = SkTEndian_SwapBE16(47),
+ Kazakh = SkTEndian_SwapBE16(48),
+ AzerbaijaniCyrillic = SkTEndian_SwapBE16(49),
+ AzerbaijaniArabic = SkTEndian_SwapBE16(50),
+ Armenian = SkTEndian_SwapBE16(51),
+ Georgian = SkTEndian_SwapBE16(52),
+ Moldavian = SkTEndian_SwapBE16(53),
+ Kirghiz = SkTEndian_SwapBE16(54),
+ Tajiki = SkTEndian_SwapBE16(55),
+ Turkmen = SkTEndian_SwapBE16(56),
+ MongolianTraditional = SkTEndian_SwapBE16(57),
+ MongolianCyrillic = SkTEndian_SwapBE16(58),
+ Pashto = SkTEndian_SwapBE16(59),
+ Kurdish = SkTEndian_SwapBE16(60),
+ Kashmiri = SkTEndian_SwapBE16(61),
+ Sindhi = SkTEndian_SwapBE16(62),
+ Tibetan = SkTEndian_SwapBE16(63),
+ Nepali = SkTEndian_SwapBE16(64),
+ Sanskrit = SkTEndian_SwapBE16(65),
+ Marathi = SkTEndian_SwapBE16(66),
+ Bengali = SkTEndian_SwapBE16(67),
+ Assamese = SkTEndian_SwapBE16(68),
+ Gujarati = SkTEndian_SwapBE16(69),
+ Punjabi = SkTEndian_SwapBE16(70),
+ Oriya = SkTEndian_SwapBE16(71),
+ Malayalam = SkTEndian_SwapBE16(72),
+ Kannada = SkTEndian_SwapBE16(73),
+ Tamil = SkTEndian_SwapBE16(74),
+ Telugu = SkTEndian_SwapBE16(75),
+ Sinhalese = SkTEndian_SwapBE16(76),
+ Burmese = SkTEndian_SwapBE16(77),
+ Khmer = SkTEndian_SwapBE16(78),
+ Lao = SkTEndian_SwapBE16(79),
+ Vietnamese = SkTEndian_SwapBE16(80),
+ Indonesian = SkTEndian_SwapBE16(81),
+ Tagalong = SkTEndian_SwapBE16(82),
+ MalayRoman = SkTEndian_SwapBE16(83),
+ MalayArabic = SkTEndian_SwapBE16(84),
+ Amharic = SkTEndian_SwapBE16(85),
+ Tigrinya = SkTEndian_SwapBE16(86),
+ Galla = SkTEndian_SwapBE16(87),
+ Somali = SkTEndian_SwapBE16(88),
+ Swahili = SkTEndian_SwapBE16(89),
+ Kinyarwanda_Ruanda = SkTEndian_SwapBE16(90),
+ Rundi = SkTEndian_SwapBE16(91),
+ Nyanja_Chewa = SkTEndian_SwapBE16(92),
+ Malagasy = SkTEndian_SwapBE16(93),
+ Esperanto = SkTEndian_SwapBE16(94),
+ Welsh = SkTEndian_SwapBE16(128),
+ Basque = SkTEndian_SwapBE16(129),
+ Catalan = SkTEndian_SwapBE16(130),
+ Latin = SkTEndian_SwapBE16(131),
+ Quenchua = SkTEndian_SwapBE16(132),
+ Guarani = SkTEndian_SwapBE16(133),
+ Aymara = SkTEndian_SwapBE16(134),
+ Tatar = SkTEndian_SwapBE16(135),
+ Uighur = SkTEndian_SwapBE16(136),
+ Dzongkha = SkTEndian_SwapBE16(137),
+ JavaneseRoman = SkTEndian_SwapBE16(138),
+ SundaneseRoman = SkTEndian_SwapBE16(139),
+ Galician = SkTEndian_SwapBE16(140),
+ Afrikaans = SkTEndian_SwapBE16(141),
+ Breton = SkTEndian_SwapBE16(142),
+ Inuktitut = SkTEndian_SwapBE16(143),
+ ScottishGaelic = SkTEndian_SwapBE16(144),
+ ManxGaelic = SkTEndian_SwapBE16(145),
+ IrishGaelicWithLenition = SkTEndian_SwapBE16(146),
+ Tongan = SkTEndian_SwapBE16(147),
+ GreekPolytonic = SkTEndian_SwapBE16(148),
+ Greenlandic = SkTEndian_SwapBE16(149),
+ AzerbaijaniRoman = SkTEndian_SwapBE16(150),
+ } value;
+ } macintosh;
+
+ /** These are known as LCIDs.
+ * On Windows the current set can be had from EnumSystemLocalesEx and LocaleNameToLCID.
+ */
+ struct Windows {
+ enum Value : SK_OT_USHORT {
+ Afrikaans_SouthAfrica = SkTEndian_SwapBE16(0x0436),
+ Albanian_Albania = SkTEndian_SwapBE16(0x041C),
+ Alsatian_France = SkTEndian_SwapBE16(0x0484),
+ Amharic_Ethiopia = SkTEndian_SwapBE16(0x045E),
+ Arabic_Algeria = SkTEndian_SwapBE16(0x1401),
+ Arabic_Bahrain = SkTEndian_SwapBE16(0x3C01),
+ Arabic_Egypt = SkTEndian_SwapBE16(0x0C01),
+ Arabic_Iraq = SkTEndian_SwapBE16(0x0801),
+ Arabic_Jordan = SkTEndian_SwapBE16(0x2C01),
+ Arabic_Kuwait = SkTEndian_SwapBE16(0x3401),
+ Arabic_Lebanon = SkTEndian_SwapBE16(0x3001),
+ Arabic_Libya = SkTEndian_SwapBE16(0x1001),
+ Arabic_Morocco = SkTEndian_SwapBE16(0x1801),
+ Arabic_Oman = SkTEndian_SwapBE16(0x2001),
+ Arabic_Qatar = SkTEndian_SwapBE16(0x4001),
+ Arabic_SaudiArabia = SkTEndian_SwapBE16(0x0401),
+ Arabic_Syria = SkTEndian_SwapBE16(0x2801),
+ Arabic_Tunisia = SkTEndian_SwapBE16(0x1C01),
+ Arabic_UAE = SkTEndian_SwapBE16(0x3801),
+ Arabic_Yemen = SkTEndian_SwapBE16(0x2401),
+ Armenian_Armenia = SkTEndian_SwapBE16(0x042B),
+ Assamese_India = SkTEndian_SwapBE16(0x044D),
+ AzeriCyrillic_Azerbaijan = SkTEndian_SwapBE16(0x082C),
+ AzeriLatin_Azerbaijan = SkTEndian_SwapBE16(0x042C),
+ Bashkir_Russia = SkTEndian_SwapBE16(0x046D),
+ Basque_Basque = SkTEndian_SwapBE16(0x042D),
+ Belarusian_Belarus = SkTEndian_SwapBE16(0x0423),
+ Bengali_Bangladesh = SkTEndian_SwapBE16(0x0845),
+ Bengali_India = SkTEndian_SwapBE16(0x0445),
+ BosnianCyrillic_BosniaAndHerzegovina = SkTEndian_SwapBE16(0x201A),
+ BosnianLatin_BosniaAndHerzegovina = SkTEndian_SwapBE16(0x141A),
+ Breton_France = SkTEndian_SwapBE16(0x047E),
+ Bulgarian_Bulgaria = SkTEndian_SwapBE16(0x0402),
+ Catalan_Catalan = SkTEndian_SwapBE16(0x0403),
+ Chinese_HongKongSAR = SkTEndian_SwapBE16(0x0C04),
+ Chinese_MacaoSAR = SkTEndian_SwapBE16(0x1404),
+ Chinese_PeoplesRepublicOfChina = SkTEndian_SwapBE16(0x0804),
+ Chinese_Singapore = SkTEndian_SwapBE16(0x1004),
+ Chinese_Taiwan = SkTEndian_SwapBE16(0x0404),
+ Corsican_France = SkTEndian_SwapBE16(0x0483),
+ Croatian_Croatia = SkTEndian_SwapBE16(0x041A),
+ CroatianLatin_BosniaAndHerzegovina = SkTEndian_SwapBE16(0x101A),
+ Czech_CzechRepublic = SkTEndian_SwapBE16(0x0405),
+ Danish_Denmark = SkTEndian_SwapBE16(0x0406),
+ Dari_Afghanistan = SkTEndian_SwapBE16(0x048C),
+ Divehi_Maldives = SkTEndian_SwapBE16(0x0465),
+ Dutch_Belgium = SkTEndian_SwapBE16(0x0813),
+ Dutch_Netherlands = SkTEndian_SwapBE16(0x0413),
+ English_Australia = SkTEndian_SwapBE16(0x0C09),
+ English_Belize = SkTEndian_SwapBE16(0x2809),
+ English_Canada = SkTEndian_SwapBE16(0x1009),
+ English_Caribbean = SkTEndian_SwapBE16(0x2409),
+ English_India = SkTEndian_SwapBE16(0x4009),
+ English_Ireland = SkTEndian_SwapBE16(0x1809),
+ English_Jamaica = SkTEndian_SwapBE16(0x2009),
+ English_Malaysia = SkTEndian_SwapBE16(0x4409),
+ English_NewZealand = SkTEndian_SwapBE16(0x1409),
+ English_RepublicOfThePhilippines = SkTEndian_SwapBE16(0x3409),
+ English_Singapore = SkTEndian_SwapBE16(0x4809),
+ English_SouthAfrica = SkTEndian_SwapBE16(0x1C09),
+ English_TrinidadAndTobago = SkTEndian_SwapBE16(0x2C09),
+ English_UnitedKingdom = SkTEndian_SwapBE16(0x0809),
+ English_UnitedStates = SkTEndian_SwapBE16(0x0409),
+ English_Zimbabwe = SkTEndian_SwapBE16(0x3009),
+ Estonian_Estonia = SkTEndian_SwapBE16(0x0425),
+ Faroese_FaroeIslands = SkTEndian_SwapBE16(0x0438),
+ Filipino_Philippines = SkTEndian_SwapBE16(0x0464),
+ Finnish_Finland = SkTEndian_SwapBE16(0x040B),
+ French_Belgium = SkTEndian_SwapBE16(0x080C),
+ French_Canada = SkTEndian_SwapBE16(0x0C0C),
+ French_France = SkTEndian_SwapBE16(0x040C),
+ French_Luxembourg = SkTEndian_SwapBE16(0x140c),
+ French_PrincipalityOfMonoco = SkTEndian_SwapBE16(0x180C),
+ French_Switzerland = SkTEndian_SwapBE16(0x100C),
+ Frisian_Netherlands = SkTEndian_SwapBE16(0x0462),
+ Galician_Galician = SkTEndian_SwapBE16(0x0456),
+ Georgian_Georgia = SkTEndian_SwapBE16(0x0437),
+ German_Austria = SkTEndian_SwapBE16(0x0C07),
+ German_Germany = SkTEndian_SwapBE16(0x0407),
+ German_Liechtenstein = SkTEndian_SwapBE16(0x1407),
+ German_Luxembourg = SkTEndian_SwapBE16(0x1007),
+ German_Switzerland = SkTEndian_SwapBE16(0x0807),
+ Greek_Greece = SkTEndian_SwapBE16(0x0408),
+ Greenlandic_Greenland = SkTEndian_SwapBE16(0x046F),
+ Gujarati_India = SkTEndian_SwapBE16(0x0447),
+ HausaLatin_Nigeria = SkTEndian_SwapBE16(0x0468),
+ Hebrew_Israel = SkTEndian_SwapBE16(0x040D),
+ Hindi_India = SkTEndian_SwapBE16(0x0439),
+ Hungarian_Hungary = SkTEndian_SwapBE16(0x040E),
+ Icelandic_Iceland = SkTEndian_SwapBE16(0x040F),
+ Igbo_Nigeria = SkTEndian_SwapBE16(0x0470),
+ Indonesian_Indonesia = SkTEndian_SwapBE16(0x0421),
+ Inuktitut_Canada = SkTEndian_SwapBE16(0x045D),
+ InuktitutLatin_Canada = SkTEndian_SwapBE16(0x085D),
+ Irish_Ireland = SkTEndian_SwapBE16(0x083C),
+ isiXhosa_SouthAfrica = SkTEndian_SwapBE16(0x0434),
+ isiZulu_SouthAfrica = SkTEndian_SwapBE16(0x0435),
+ Italian_Italy = SkTEndian_SwapBE16(0x0410),
+ Italian_Switzerland = SkTEndian_SwapBE16(0x0810),
+ Japanese_Japan = SkTEndian_SwapBE16(0x0411),
+ Kannada_India = SkTEndian_SwapBE16(0x044B),
+ Kazakh_Kazakhstan = SkTEndian_SwapBE16(0x043F),
+ Khmer_Cambodia = SkTEndian_SwapBE16(0x0453),
+ Kiche_Guatemala = SkTEndian_SwapBE16(0x0486),
+ Kinyarwanda_Rwanda = SkTEndian_SwapBE16(0x0487),
+ Kiswahili_Kenya = SkTEndian_SwapBE16(0x0441),
+ Konkani_India = SkTEndian_SwapBE16(0x0457),
+ Korean_Korea = SkTEndian_SwapBE16(0x0412),
+ Kyrgyz_Kyrgyzstan = SkTEndian_SwapBE16(0x0440),
+ Lao_LaoPDR = SkTEndian_SwapBE16(0x0454),
+ Latvian_Latvia = SkTEndian_SwapBE16(0x0426),
+ Lithuanian_Lithuania = SkTEndian_SwapBE16(0x0427),
+ LowerSorbian_Germany = SkTEndian_SwapBE16(0x082E),
+ Luxembourgish_Luxembourg = SkTEndian_SwapBE16(0x046E),
+ MacedonianFYROM_FormerYugoslavRepublicOfMacedonia = SkTEndian_SwapBE16(0x042F),
+ Malay_BruneiDarussalam = SkTEndian_SwapBE16(0x083E),
+ Malay_Malaysia = SkTEndian_SwapBE16(0x043E),
+ Malayalam_India = SkTEndian_SwapBE16(0x044C),
+ Maltese_Malta = SkTEndian_SwapBE16(0x043A),
+ Maori_NewZealand = SkTEndian_SwapBE16(0x0481),
+ Mapudungun_Chile = SkTEndian_SwapBE16(0x047A),
+ Marathi_India = SkTEndian_SwapBE16(0x044E),
+ Mohawk_Mohawk = SkTEndian_SwapBE16(0x047C),
+ MongolianCyrillic_Mongolia = SkTEndian_SwapBE16(0x0450),
+ MongolianTraditional_PeoplesRepublicOfChina = SkTEndian_SwapBE16(0x0850),
+ Nepali_Nepal = SkTEndian_SwapBE16(0x0461),
+ NorwegianBokmal_Norway = SkTEndian_SwapBE16(0x0414),
+ NorwegianNynorsk_Norway = SkTEndian_SwapBE16(0x0814),
+ Occitan_France = SkTEndian_SwapBE16(0x0482),
+ Odia_India = SkTEndian_SwapBE16(0x0448),
+ Pashto_Afghanistan = SkTEndian_SwapBE16(0x0463),
+ Polish_Poland = SkTEndian_SwapBE16(0x0415),
+ Portuguese_Brazil = SkTEndian_SwapBE16(0x0416),
+ Portuguese_Portugal = SkTEndian_SwapBE16(0x0816),
+ Punjabi_India = SkTEndian_SwapBE16(0x0446),
+ Quechua_Bolivia = SkTEndian_SwapBE16(0x046B),
+ Quechua_Ecuador = SkTEndian_SwapBE16(0x086B),
+ Quechua_Peru = SkTEndian_SwapBE16(0x0C6B),
+ Romanian_Romania = SkTEndian_SwapBE16(0x0418),
+ Romansh_Switzerland = SkTEndian_SwapBE16(0x0417),
+ Russian_Russia = SkTEndian_SwapBE16(0x0419),
+ SamiInari_Finland = SkTEndian_SwapBE16(0x243B),
+ SamiLule_Norway = SkTEndian_SwapBE16(0x103B),
+ SamiLule_Sweden = SkTEndian_SwapBE16(0x143B),
+ SamiNorthern_Finland = SkTEndian_SwapBE16(0x0C3B),
+ SamiNorthern_Norway = SkTEndian_SwapBE16(0x043B),
+ SamiNorthern_Sweden = SkTEndian_SwapBE16(0x083B),
+ SamiSkolt_Finland = SkTEndian_SwapBE16(0x203B),
+ SamiSouthern_Norway = SkTEndian_SwapBE16(0x183B),
+ SamiSouthern_Sweden = SkTEndian_SwapBE16(0x1C3B),
+ Sanskrit_India = SkTEndian_SwapBE16(0x044F),
+ SerbianCyrillic_BosniaAndHerzegovina = SkTEndian_SwapBE16(0x1C1A),
+ SerbianCyrillic_Serbia = SkTEndian_SwapBE16(0x0C1A),
+ SerbianLatin_BosniaAndHerzegovina = SkTEndian_SwapBE16(0x181A),
+ SerbianLatin_Serbia = SkTEndian_SwapBE16(0x081A),
+ SesothoSaLeboa_SouthAfrica = SkTEndian_SwapBE16(0x046C),
+ Setswana_SouthAfrica = SkTEndian_SwapBE16(0x0432),
+ Sinhala_SriLanka = SkTEndian_SwapBE16(0x045B),
+ Slovak_Slovakia = SkTEndian_SwapBE16(0x041B),
+ Slovenian_Slovenia = SkTEndian_SwapBE16(0x0424),
+ Spanish_Argentina = SkTEndian_SwapBE16(0x2C0A),
+ Spanish_Bolivia = SkTEndian_SwapBE16(0x400A),
+ Spanish_Chile = SkTEndian_SwapBE16(0x340A),
+ Spanish_Colombia = SkTEndian_SwapBE16(0x240A),
+ Spanish_CostaRica = SkTEndian_SwapBE16(0x140A),
+ Spanish_DominicanRepublic = SkTEndian_SwapBE16(0x1C0A),
+ Spanish_Ecuador = SkTEndian_SwapBE16(0x300A),
+ Spanish_ElSalvador = SkTEndian_SwapBE16(0x440A),
+ Spanish_Guatemala = SkTEndian_SwapBE16(0x100A),
+ Spanish_Honduras = SkTEndian_SwapBE16(0x480A),
+ Spanish_Mexico = SkTEndian_SwapBE16(0x080A),
+ Spanish_Nicaragua = SkTEndian_SwapBE16(0x4C0A),
+ Spanish_Panama = SkTEndian_SwapBE16(0x180A),
+ Spanish_Paraguay = SkTEndian_SwapBE16(0x3C0A),
+ Spanish_Peru = SkTEndian_SwapBE16(0x280A),
+ Spanish_PuertoRico = SkTEndian_SwapBE16(0x500A),
+ SpanishModernSort_Spain = SkTEndian_SwapBE16(0x0C0A),
+ SpanishTraditionalSort_Spain = SkTEndian_SwapBE16(0x040A),
+ Spanish_UnitedStates = SkTEndian_SwapBE16(0x540A),
+ Spanish_Uruguay = SkTEndian_SwapBE16(0x380A),
+ Spanish_Venezuela = SkTEndian_SwapBE16(0x200A),
+ Sweden_Finland = SkTEndian_SwapBE16(0x081D),
+ Swedish_Sweden = SkTEndian_SwapBE16(0x041D),
+ Syriac_Syria = SkTEndian_SwapBE16(0x045A),
+ TajikCyrillic_Tajikistan = SkTEndian_SwapBE16(0x0428),
+ TamazightLatin_Algeria = SkTEndian_SwapBE16(0x085F),
+ Tamil_India = SkTEndian_SwapBE16(0x0449),
+ Tatar_Russia = SkTEndian_SwapBE16(0x0444),
+ Telugu_India = SkTEndian_SwapBE16(0x044A),
+ Thai_Thailand = SkTEndian_SwapBE16(0x041E),
+ Tibetan_PRC = SkTEndian_SwapBE16(0x0451),
+ Turkish_Turkey = SkTEndian_SwapBE16(0x041F),
+ Turkmen_Turkmenistan = SkTEndian_SwapBE16(0x0442),
+ Uighur_PRC = SkTEndian_SwapBE16(0x0480),
+ Ukrainian_Ukraine = SkTEndian_SwapBE16(0x0422),
+ UpperSorbian_Germany = SkTEndian_SwapBE16(0x042E),
+ Urdu_IslamicRepublicOfPakistan = SkTEndian_SwapBE16(0x0420),
+ UzbekCyrillic_Uzbekistan = SkTEndian_SwapBE16(0x0843),
+ UzbekLatin_Uzbekistan = SkTEndian_SwapBE16(0x0443),
+ Vietnamese_Vietnam = SkTEndian_SwapBE16(0x042A),
+ Welsh_UnitedKingdom = SkTEndian_SwapBE16(0x0452),
+ Wolof_Senegal = SkTEndian_SwapBE16(0x0488),
+ Yakut_Russia = SkTEndian_SwapBE16(0x0485),
+ Yi_PRC = SkTEndian_SwapBE16(0x0478),
+ Yoruba_Nigeria = SkTEndian_SwapBE16(0x046A),
+ } value;
+ } windows;
+ } languageID;
+
+ /** NameIDs <= 0xFF are predefined. Those > 0xFF are font specific. */
+ union NameID {
+ /** A font specific name id which should be greater than 0xFF. */
+ SK_OT_USHORT fontSpecific;
+ struct Predefined {
+ enum Value : SK_OT_USHORT {
+ CopyrightNotice = SkTEndian_SwapBE16(0),
+ FontFamilyName = SkTEndian_SwapBE16(1),
+ FontSubfamilyName = SkTEndian_SwapBE16(2),
+ UniqueFontIdentifier = SkTEndian_SwapBE16(3),
+ FullFontName = SkTEndian_SwapBE16(4),
+ VersionString = SkTEndian_SwapBE16(5), //Version <number>.<number>
+ PostscriptName = SkTEndian_SwapBE16(6), //See spec for constraints.
+ Trademark = SkTEndian_SwapBE16(7),
+ ManufacturerName = SkTEndian_SwapBE16(8),
+ Designer = SkTEndian_SwapBE16(9),
+ Description = SkTEndian_SwapBE16(10),
+ URLVendor = SkTEndian_SwapBE16(11),
+ URLDesigner = SkTEndian_SwapBE16(12),
+ LicenseDescription = SkTEndian_SwapBE16(13),
+ LicenseInfoURL = SkTEndian_SwapBE16(14),
+ PreferredFamily = SkTEndian_SwapBE16(16),
+ PreferredSubfamily = SkTEndian_SwapBE16(17),
+ CompatibleFullName = SkTEndian_SwapBE16(18),
+ SampleText = SkTEndian_SwapBE16(19),
+ PostscriptCIDFindfontName = SkTEndian_SwapBE16(20),
+ WWSFamilyName = SkTEndian_SwapBE16(21),
+ WWSSubfamilyName = SkTEndian_SwapBE16(22),
+ } value;
+ } predefined;
+ } nameID;
+
+ /** The length of the string in SK_OT_BYTEs. */
+ SK_OT_USHORT length;
+
+ /** Offset in SK_OT_BYTEs from start of string storage area
+ * (see SkOTTableName::stringOffset).
+ */
+ SK_OT_USHORT offset;
+ }; //nameRecord[count];
+
+ struct Format1Ext {
+ /** The number of languageTagRecords which follow. */
+ SK_OT_USHORT langTagCount;
+
+ /** The encoding of a langTagRecord string is always UTF-16BE.
+ * The content should follow IETF specification BCP 47.
+ */
+ struct LangTagRecord {
+ /** The length of the string in SK_OT_BYTEs. */
+ SK_OT_USHORT length;
+
+ /** Offset in SK_OT_BYTEs from start of string storage area
+ * (see SkOTTableName::stringOffset).
+ */
+ SK_OT_USHORT offset;
+ }; //langTagRecord[langTagCount]
+ }; //format1ext (if format == format_1)
+
+ class Iterator {
+ public:
+ Iterator(const SkOTTableName& name) : fName(name), fIndex(0), fType(-1) { }
+ Iterator(const SkOTTableName& name, SkOTTableName::Record::NameID::Predefined::Value type)
+ : fName(name), fIndex(0), fType(type)
+ { }
+
+ void reset(SkOTTableName::Record::NameID::Predefined::Value type) {
+ fIndex = 0;
+ fType = type;
+ }
+
+ struct Record {
+ SkString name;
+ SkString language;
+ SK_OT_USHORT type;
+ };
+ bool next(Record&);
+
+ private:
+ const SkOTTableName& fName;
+ size_t fIndex;
+ int fType;
+ };
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableName) == 6, "sizeof_SkOTTableName_not_6");
+static_assert(sizeof(SkOTTableName::Format1Ext) == 2, "sizeof_SkOTTableNameF1_not_2");
+static_assert(sizeof(SkOTTableName::Format1Ext::LangTagRecord) == 4, "sizeof_SkOTTableNameLangTagRecord_not_4");
+static_assert(sizeof(SkOTTableName::Record) == 12, "sizeof_SkOTTableNameRecord_not_12");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_post.h b/gfx/skia/skia/src/sfnt/SkOTTable_post.h
new file mode 100644
index 000000000..1af692983
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_post.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_post_DEFINED
+#define SkOTTable_post_DEFINED
+
+#include "SkEndian.h"
+#include "SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTablePostScript {
+ static const SK_OT_CHAR TAG0 = 'p';
+ static const SK_OT_CHAR TAG1 = 'o';
+ static const SK_OT_CHAR TAG2 = 's';
+ static const SK_OT_CHAR TAG3 = 't';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTablePostScript>::value;
+
+ struct Format {
+ enum Value : SK_OT_Fixed {
+ version1 = SkTEndian_SwapBE32(0x00010000),
+ version2 = SkTEndian_SwapBE32(0x00020000),
+ version2_5 = SkTEndian_SwapBE32(0x00025000),
+ version3 = SkTEndian_SwapBE32(0x00030000),
+ version4 = SkTEndian_SwapBE32(0x00040000),
+ };
+ SK_OT_Fixed value;
+ } format;
+ SK_OT_Fixed italicAngle;
+ SK_OT_FWORD underlinePosition;
+ SK_OT_FWORD underlineThickness;
+ SK_OT_ULONG isFixedPitch;
+ SK_OT_ULONG minMemType42;
+ SK_OT_ULONG maxMemType42;
+ SK_OT_ULONG minMemType1;
+ SK_OT_ULONG maxMemType1;
+};
+
+#pragma pack(pop)
+
+
+#include <stddef.h>
+static_assert(offsetof(SkOTTablePostScript, maxMemType1) == 28, "SkOTTablePostScript_maxMemType1_not_at_28");
+static_assert(sizeof(SkOTTablePostScript) == 32, "sizeof_SkOTTablePostScript_not_32");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTUtils.cpp b/gfx/skia/skia/src/sfnt/SkOTUtils.cpp
new file mode 100644
index 000000000..cb533ff3f
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTUtils.cpp
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkData.h"
+#include "SkEndian.h"
+#include "SkSFNTHeader.h"
+#include "SkStream.h"
+#include "SkOTTable_head.h"
+#include "SkOTTable_name.h"
+#include "SkOTTableTypes.h"
+#include "SkOTUtils.h"
+
+extern const uint8_t SK_OT_GlyphData_NoOutline[] = {
+ 0x0,0x0, //SkOTTableGlyphData::numberOfContours
+ 0x0,0x0, //SkOTTableGlyphData::xMin
+ 0x0,0x0, //SkOTTableGlyphData::yMin
+ 0x0,0x0, //SkOTTableGlyphData::xMax
+ 0x0,0x0, //SkOTTableGlyphData::yMax
+
+ 0x0,0x0, //SkOTTableGlyphDataInstructions::length
+};
+
+uint32_t SkOTUtils::CalcTableChecksum(SK_OT_ULONG *data, size_t length) {
+ uint32_t sum = 0;
+ SK_OT_ULONG *dataEnd = data + ((length + 3) & ~3) / sizeof(SK_OT_ULONG);
+ for (; data < dataEnd; ++data) {
+ sum += SkEndian_SwapBE32(*data);
+ }
+ return sum;
+}
+
+SkData* SkOTUtils::RenameFont(SkStreamAsset* fontData, const char* fontName, int fontNameLen) {
+
+ // Get the sfnt header.
+ SkSFNTHeader sfntHeader;
+ if (fontData->read(&sfntHeader, sizeof(sfntHeader)) < sizeof(sfntHeader)) {
+ return nullptr;
+ }
+
+ // Find the existing 'name' table.
+ int tableIndex;
+ SkSFNTHeader::TableDirectoryEntry tableEntry;
+ int numTables = SkEndian_SwapBE16(sfntHeader.numTables);
+ for (tableIndex = 0; tableIndex < numTables; ++tableIndex) {
+ if (fontData->read(&tableEntry, sizeof(tableEntry)) < sizeof(tableEntry)) {
+ return nullptr;
+ }
+ if (SkOTTableName::TAG == tableEntry.tag) {
+ break;
+ }
+ }
+ if (tableIndex == numTables) {
+ return nullptr;
+ }
+
+ if (!fontData->rewind()) {
+ return nullptr;
+ }
+
+ // The required 'name' record types: Family, Style, Unique, Full and PostScript.
+ const SkOTTableName::Record::NameID::Predefined::Value namesToCreate[] = {
+ SkOTTableName::Record::NameID::Predefined::FontFamilyName,
+ SkOTTableName::Record::NameID::Predefined::FontSubfamilyName,
+ SkOTTableName::Record::NameID::Predefined::UniqueFontIdentifier,
+ SkOTTableName::Record::NameID::Predefined::FullFontName,
+ SkOTTableName::Record::NameID::Predefined::PostscriptName,
+ };
+ const int namesCount = SK_ARRAY_COUNT(namesToCreate);
+
+ // Copy the data, leaving out the old name table.
+ // In theory, we could also remove the DSIG table if it exists.
+ size_t nameTableLogicalSize = sizeof(SkOTTableName) + (namesCount * sizeof(SkOTTableName::Record)) + (fontNameLen * sizeof(wchar_t));
+ size_t nameTablePhysicalSize = (nameTableLogicalSize + 3) & ~3; // Rounded up to a multiple of 4.
+
+ size_t oldNameTablePhysicalSize = (SkEndian_SwapBE32(tableEntry.logicalLength) + 3) & ~3; // Rounded up to a multiple of 4.
+ size_t oldNameTableOffset = SkEndian_SwapBE32(tableEntry.offset);
+
+ //originalDataSize is the size of the original data without the name table.
+ size_t originalDataSize = fontData->getLength() - oldNameTablePhysicalSize;
+ size_t newDataSize = originalDataSize + nameTablePhysicalSize;
+
+ auto rewrittenFontData = SkData::MakeUninitialized(newDataSize);
+ SK_OT_BYTE* data = static_cast<SK_OT_BYTE*>(rewrittenFontData->writable_data());
+
+ if (fontData->read(data, oldNameTableOffset) < oldNameTableOffset) {
+ return nullptr;
+ }
+ if (fontData->skip(oldNameTablePhysicalSize) < oldNameTablePhysicalSize) {
+ return nullptr;
+ }
+ if (fontData->read(data + oldNameTableOffset, originalDataSize - oldNameTableOffset) < originalDataSize - oldNameTableOffset) {
+ return nullptr;
+ }
+
+ //Fix up the offsets of the directory entries after the old 'name' table entry.
+ SkSFNTHeader::TableDirectoryEntry* currentEntry = reinterpret_cast<SkSFNTHeader::TableDirectoryEntry*>(data + sizeof(SkSFNTHeader));
+ SkSFNTHeader::TableDirectoryEntry* endEntry = currentEntry + numTables;
+ SkSFNTHeader::TableDirectoryEntry* headTableEntry = nullptr;
+ for (; currentEntry < endEntry; ++currentEntry) {
+ uint32_t oldOffset = SkEndian_SwapBE32(currentEntry->offset);
+ if (oldOffset > oldNameTableOffset) {
+ currentEntry->offset = SkEndian_SwapBE32(SkToU32(oldOffset - oldNameTablePhysicalSize));
+ }
+ if (SkOTTableHead::TAG == currentEntry->tag) {
+ headTableEntry = currentEntry;
+ }
+ }
+
+ // Make the table directory entry point to the new 'name' table.
+ SkSFNTHeader::TableDirectoryEntry* nameTableEntry = reinterpret_cast<SkSFNTHeader::TableDirectoryEntry*>(data + sizeof(SkSFNTHeader)) + tableIndex;
+ nameTableEntry->logicalLength = SkEndian_SwapBE32(SkToU32(nameTableLogicalSize));
+ nameTableEntry->offset = SkEndian_SwapBE32(SkToU32(originalDataSize));
+
+ // Write the new 'name' table after the original font data.
+ SkOTTableName* nameTable = reinterpret_cast<SkOTTableName*>(data + originalDataSize);
+ unsigned short stringOffset = sizeof(SkOTTableName) + (namesCount * sizeof(SkOTTableName::Record));
+ nameTable->format = SkOTTableName::format_0;
+ nameTable->count = SkEndian_SwapBE16(namesCount);
+ nameTable->stringOffset = SkEndian_SwapBE16(stringOffset);
+
+ SkOTTableName::Record* nameRecords = reinterpret_cast<SkOTTableName::Record*>(data + originalDataSize + sizeof(SkOTTableName));
+ for (int i = 0; i < namesCount; ++i) {
+ nameRecords[i].platformID.value = SkOTTableName::Record::PlatformID::Windows;
+ nameRecords[i].encodingID.windows.value = SkOTTableName::Record::EncodingID::Windows::UnicodeBMPUCS2;
+ nameRecords[i].languageID.windows.value = SkOTTableName::Record::LanguageID::Windows::English_UnitedStates;
+ nameRecords[i].nameID.predefined.value = namesToCreate[i];
+ nameRecords[i].offset = SkEndian_SwapBE16(0);
+ nameRecords[i].length = SkEndian_SwapBE16(SkToU16(fontNameLen * sizeof(wchar_t)));
+ }
+
+ SK_OT_USHORT* nameString = reinterpret_cast<SK_OT_USHORT*>(data + originalDataSize + stringOffset);
+ for (int i = 0; i < fontNameLen; ++i) {
+ nameString[i] = SkEndian_SwapBE16(fontName[i]);
+ }
+
+ unsigned char* logical = data + originalDataSize + nameTableLogicalSize;
+ unsigned char* physical = data + originalDataSize + nameTablePhysicalSize;
+ for (; logical < physical; ++logical) {
+ *logical = 0;
+ }
+
+ // Update the table checksum in the directory entry.
+ nameTableEntry->checksum = SkEndian_SwapBE32(SkOTUtils::CalcTableChecksum(reinterpret_cast<SK_OT_ULONG*>(nameTable), nameTableLogicalSize));
+
+ // Update the checksum adjustment in the head table.
+ if (headTableEntry) {
+ size_t headTableOffset = SkEndian_SwapBE32(headTableEntry->offset);
+ if (headTableOffset + sizeof(SkOTTableHead) < originalDataSize) {
+ SkOTTableHead* headTable = reinterpret_cast<SkOTTableHead*>(data + headTableOffset);
+ headTable->checksumAdjustment = SkEndian_SwapBE32(0);
+ uint32_t unadjustedFontChecksum = SkOTUtils::CalcTableChecksum(reinterpret_cast<SK_OT_ULONG*>(data), originalDataSize + nameTablePhysicalSize);
+ headTable->checksumAdjustment = SkEndian_SwapBE32(SkOTTableHead::fontChecksum - unadjustedFontChecksum);
+ }
+ }
+
+ return rewrittenFontData.release();
+}
+
+
+SkOTUtils::LocalizedStrings_NameTable*
+SkOTUtils::LocalizedStrings_NameTable::CreateForFamilyNames(const SkTypeface& typeface) {
+ static const SkFontTableTag nameTag = SkSetFourByteTag('n','a','m','e');
+ size_t nameTableSize = typeface.getTableSize(nameTag);
+ if (0 == nameTableSize) {
+ return nullptr;
+ }
+ SkAutoTDeleteArray<uint8_t> nameTableData(new uint8_t[nameTableSize]);
+ size_t copied = typeface.getTableData(nameTag, 0, nameTableSize, nameTableData.get());
+ if (copied != nameTableSize) {
+ return nullptr;
+ }
+
+ return new SkOTUtils::LocalizedStrings_NameTable((SkOTTableName*)nameTableData.release(),
+ SkOTUtils::LocalizedStrings_NameTable::familyNameTypes,
+ SK_ARRAY_COUNT(SkOTUtils::LocalizedStrings_NameTable::familyNameTypes));
+}
+
+bool SkOTUtils::LocalizedStrings_NameTable::next(SkTypeface::LocalizedString* localizedString) {
+ do {
+ SkOTTableName::Iterator::Record record;
+ if (fFamilyNameIter.next(record)) {
+ localizedString->fString = record.name;
+ localizedString->fLanguage = record.language;
+ return true;
+ }
+ if (fTypesCount == fTypesIndex + 1) {
+ return false;
+ }
+ ++fTypesIndex;
+ fFamilyNameIter.reset(fTypes[fTypesIndex]);
+ } while (true);
+}
+
+SkOTTableName::Record::NameID::Predefined::Value
+SkOTUtils::LocalizedStrings_NameTable::familyNameTypes[3] = {
+ SkOTTableName::Record::NameID::Predefined::FontFamilyName,
+ SkOTTableName::Record::NameID::Predefined::PreferredFamily,
+ SkOTTableName::Record::NameID::Predefined::WWSFamilyName,
+};
diff --git a/gfx/skia/skia/src/sfnt/SkOTUtils.h b/gfx/skia/skia/src/sfnt/SkOTUtils.h
new file mode 100644
index 000000000..1773e69ab
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTUtils.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTUtils_DEFINED
+#define SkOTUtils_DEFINED
+
+#include "SkOTTableTypes.h"
+#include "SkOTTable_name.h"
+#include "SkTypeface.h"
+
+class SkData;
+class SkStream;
+
+struct SkOTUtils {
+ /**
+ * Calculates the OpenType checksum for data.
+ */
+ static uint32_t CalcTableChecksum(SK_OT_ULONG *data, size_t length);
+
+ /**
+ * Renames an sfnt font. On failure (invalid data or not an sfnt font)
+ * returns nullptr.
+ *
+ * Essentially, this removes any existing 'name' table and replaces it
+ * with a new one in which FontFamilyName, FontSubfamilyName,
+ * UniqueFontIdentifier, FullFontName, and PostscriptName are fontName.
+ *
+ * The new 'name' table records will be written with the Windows,
+ * UnicodeBMPUCS2, and English_UnitedStates settings.
+ *
+ * fontName and fontNameLen must be specified in terms of ASCII chars.
+ *
+ * Does not affect fontData's ownership.
+ */
+ static SkData* RenameFont(SkStreamAsset* fontData, const char* fontName, int fontNameLen);
+
+ /** An implementation of LocalizedStrings which obtains it's data from a 'name' table. */
+ class LocalizedStrings_NameTable : public SkTypeface::LocalizedStrings {
+ public:
+ /** Takes ownership of the nameTableData and will free it with SK_DELETE. */
+ LocalizedStrings_NameTable(SkOTTableName* nameTableData,
+ SkOTTableName::Record::NameID::Predefined::Value types[],
+ int typesCount)
+ : fTypes(types), fTypesCount(typesCount), fTypesIndex(0)
+ , fNameTableData(nameTableData), fFamilyNameIter(*nameTableData, fTypes[fTypesIndex])
+ { }
+
+ /** Creates an iterator over all the family names in the 'name' table of a typeface.
+ * If no valid 'name' table can be found, returns nullptr.
+ */
+ static LocalizedStrings_NameTable* CreateForFamilyNames(const SkTypeface& typeface);
+
+ bool next(SkTypeface::LocalizedString* localizedString) override;
+ private:
+ static SkOTTableName::Record::NameID::Predefined::Value familyNameTypes[3];
+
+ SkOTTableName::Record::NameID::Predefined::Value* fTypes;
+ int fTypesCount;
+ int fTypesIndex;
+ SkAutoTDeleteArray<SkOTTableName> fNameTableData;
+ SkOTTableName::Iterator fFamilyNameIter;
+ };
+
+ /** An implementation of LocalizedStrings which has one name. */
+ class LocalizedStrings_SingleName : public SkTypeface::LocalizedStrings {
+ public:
+ LocalizedStrings_SingleName(SkString name, SkString language)
+ : fName(name), fLanguage(language), fHasNext(true)
+ { }
+
+ bool next(SkTypeface::LocalizedString* localizedString) override {
+ localizedString->fString = fName;
+ localizedString->fLanguage = fLanguage;
+
+ bool hadNext = fHasNext;
+ fHasNext = false;
+ return hadNext;
+ }
+
+ private:
+ SkString fName;
+ SkString fLanguage;
+ bool fHasNext;
+ };
+};
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkPanose.h b/gfx/skia/skia/src/sfnt/SkPanose.h
new file mode 100644
index 000000000..e3991697c
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkPanose.h
@@ -0,0 +1,527 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPanose_DEFINED
+#define SkPanose_DEFINED
+
+#include "SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkPanose {
+ //This value changes the meaning of the following 9 bytes.
+ enum class FamilyType : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ TextAndDisplay = 2,
+ Script = 3,
+ Decorative = 4,
+ Pictoral = 5,
+ } bFamilyType;
+
+ union Data {
+ struct TextAndDisplay {
+ enum class SerifStyle : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Cove = 2,
+ ObtuseCove = 3,
+ SquareCove = 4,
+ ObtuseSquareCove = 5,
+ Square = 6,
+ Thin = 7,
+ Bone = 8,
+ Exaggerated = 9,
+ Triangle = 10,
+ NormalSans = 11,
+ ObtuseSans = 12,
+ PerpSans = 13,
+ Flared = 14,
+ Rounded = 15,
+ } bSerifStyle;
+
+ enum class Weight : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ VeryLight = 2,
+ Light = 3,
+ Thin = 4,
+ Book = 5,
+ Medium = 6,
+ Demi = 7,
+ Bold = 8,
+ Heavy = 9,
+ Black = 10,
+ ExtraBlack = 11,
+ } bWeight;
+
+ enum class Proportion : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ OldStyle = 2,
+ Modern = 3,
+ EvenWidth = 4,
+ Expanded = 5,
+ Condensed = 6,
+ VeryExpanded = 7,
+ VeryCondensed = 8,
+ Monospaced = 9,
+ } bProportion;
+
+ enum class Contrast : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ None = 2,
+ VeryLow = 3,
+ Low = 4,
+ MediumLow = 5,
+ Medium = 6,
+ MediumHigh = 7,
+ High = 8,
+ VeryHigh = 9,
+ } bContrast;
+
+#ifdef SK_WIN_PANOSE
+ //This is what Windows (and FontForge and Apple TT spec) define.
+ //The Impact font uses 9.
+ enum class StrokeVariation : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ GradualDiagonal = 2,
+ GradualTransitional = 3,
+ GradualVertical = 4,
+ GradualHorizontal = 5,
+ RapidVertical = 6,
+ RapidHorizontal = 7,
+ InstantVertical = 8,
+ } bStrokeVariation;
+#else
+ //Stroke variation description in OT OS/2 ver0,ver1 is incorrect.
+ //This is what HP Panose says.
+ enum class StrokeVariation : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ NoVariation = 2,
+ Gradual_Diagonal = 3,
+ Gradual_Transitional = 4,
+ Gradual_Vertical = 5,
+ Gradual_Horizontal = 6,
+ Rapid_Vertical = 7,
+ Rapid_Horizontal = 8,
+ Instant_Vertical = 9,
+ Instant_Horizontal = 10,
+ } bStrokeVariation;
+#endif
+
+ enum class ArmStyle : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ StraightArms_Horizontal = 2,
+ StraightArms_Wedge = 3,
+ StraightArms_Vertical = 4,
+ StraightArms_SingleSerif = 5,
+ StraightArms_DoubleSerif = 6,
+ NonStraightArms_Horizontal = 7,
+ NonStraightArms_Wedge = 8,
+ NonStraightArms_Vertical = 9,
+ NonStraightArms_SingleSerif = 10,
+ NonStraightArms_DoubleSerif = 11,
+ } bArmStyle;
+
+ enum class Letterform : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Normal_Contact = 2,
+ Normal_Weighted = 3,
+ Normal_Boxed = 4,
+ Normal_Flattened = 5,
+ Normal_Rounded = 6,
+ Normal_OffCenter = 7,
+ Normal_Square = 8,
+ Oblique_Contact = 9,
+ Oblique_Weighted = 10,
+ Oblique_Boxed = 11,
+ Oblique_Flattened = 12,
+ Oblique_Rounded = 13,
+ Oblique_OffCenter = 14,
+ Oblique_Square = 15,
+ } bLetterform;
+
+ enum class Midline : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Standard_Trimmed = 2,
+ Standard_Pointed = 3,
+ Standard_Serifed = 4,
+ High_Trimmed = 5,
+ High_Pointed = 6,
+ High_Serifed = 7,
+ Constant_Trimmed = 8,
+ Constant_Pointed = 9,
+ Constant_Serifed = 10,
+ Low_Trimmed = 11,
+ Low_Pointed = 12,
+ Low_Serifed = 13,
+ } bMidline;
+
+ enum class XHeight : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Constant_Small = 2,
+ Constant_Standard = 3,
+ Constant_Large = 4,
+ Ducking_Small = 5,
+ Ducking_Standard = 6,
+ Ducking_Large = 7,
+ } bXHeight;
+ } textAndDisplay;
+
+ struct Script {
+ enum class ToolKind : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ FlatNib = 2,
+ PressurePoint = 3,
+ Engraved = 4,
+ Ball = 5,
+ Brush = 6,
+ Rough = 7,
+ FeltPen = 8,
+ WildBrush = 9,
+ } bToolKind;
+
+ enum class Weight : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ VeryLight = 2,
+ Light = 3,
+ Thin = 4,
+ Book = 5,
+ Medium = 6,
+ Demi = 7,
+ Bold = 8,
+ Heavy = 9,
+ Black = 10,
+ ExtraBlack = 11,
+ } bWeight;
+
+ enum class Spacing : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ ProportionalSpaced = 2,
+ Monospaced = 3,
+ } bSpacing;
+
+ enum class AspectRatio : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ VeryCondensed = 2,
+ Condensed = 3,
+ Normal = 4,
+ Expanded = 5,
+ VeryExpanded = 6,
+ } bAspectRatio;
+
+ enum class Contrast : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ None = 2,
+ VeryLow = 3,
+ Low = 4,
+ MediumLow = 5,
+ Medium = 6,
+ MediumHigh = 7,
+ High = 8,
+ VeryHigh = 9,
+ } bContrast;
+
+ enum class Topology : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Roman_Disconnected = 2,
+ Roman_Trailing = 3,
+ Roman_Connected = 4,
+ Cursive_Disconnected = 5,
+ Cursive_Trailing = 6,
+ Cursive_Connected = 7,
+ Blackletter_Disconnected = 8,
+ Blackletter_Trailing = 9,
+ Blackletter_Connected = 10,
+ } bTopology;
+
+ enum class Form : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Upright_NoWrapping = 2,
+ Upright_SomeWrapping = 3,
+ Upright_MoreWrapping = 4,
+ Upright_ExtremeWrapping = 5,
+ Oblique_NoWrapping = 6,
+ Oblique_SomeWrapping = 7,
+ Oblique_MoreWrapping = 8,
+ Oblique_ExtremeWrapping = 9,
+ Exaggerated_NoWrapping = 10,
+ Exaggerated_SomeWrapping = 11,
+ Exaggerated_MoreWrapping = 12,
+ Exaggerated_ExtremeWrapping = 13,
+ } bForm;
+
+ enum class Finials : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ None_NoLoops = 2,
+ None_ClosedLoops = 3,
+ None_OpenLoops = 4,
+ Sharp_NoLoops = 5,
+ Sharp_ClosedLoops = 6,
+ Sharp_OpenLoops = 7,
+ Tapered_NoLoops = 8,
+ Tapered_ClosedLoops = 9,
+ Tapered_OpenLoops = 10,
+ Round_NoLoops = 11,
+ Round_ClosedLoops = 12,
+ Round_OpenLoops = 13,
+ } bFinials;
+
+ enum class XAscent : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ VeryLow = 2,
+ Low = 3,
+ Medium = 4,
+ High = 5,
+ VeryHigh = 6,
+ } bXAscent;
+ } script;
+
+ struct Decorative {
+ enum class Class : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Derivative = 2,
+ NonStandard_Topology = 3,
+ NonStandard_Elements = 4,
+ NonStandard_Aspect = 5,
+ Initials = 6,
+ Cartoon = 7,
+ PictureStems = 8,
+ Ornamented = 9,
+ TextAndBackground = 10,
+ Collage = 11,
+ Montage = 12,
+ } bClass;
+
+ enum class Weight : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ VeryLight = 2,
+ Light = 3,
+ Thin = 4,
+ Book = 5,
+ Medium = 6,
+ Demi = 7,
+ Bold = 8,
+ Heavy = 9,
+ Black = 10,
+ ExtraBlack = 11,
+ } bWeight;
+
+ enum class Aspect : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ SuperCondensed = 2,
+ VeryCondensed = 3,
+ Condensed = 4,
+ Normal = 5,
+ Extended = 6,
+ VeryExtended = 7,
+ SuperExtended = 8,
+ Monospaced = 9,
+ } bAspect;
+
+ enum class Contrast : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ None = 2,
+ VeryLow = 3,
+ Low = 4,
+ MediumLow = 5,
+ Medium = 6,
+ MediumHigh = 7,
+ High = 8,
+ VeryHigh = 9,
+ HorizontalLow = 10,
+ HorizontalMedium = 11,
+ HorizontalHigh = 12,
+ Broken = 13,
+ } bContrast;
+
+ enum class SerifVariant : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Cove = 2,
+ ObtuseCove = 3,
+ SquareCove = 4,
+ ObtuseSquareCove = 5,
+ Square = 6,
+ Thin = 7,
+ Oval = 8,
+ Exaggerated = 9,
+ Triangle = 10,
+ NormalSans = 11,
+ ObtuseSans = 12,
+ PerpendicularSans = 13,
+ Flared = 14,
+ Rounded = 15,
+ Script = 16,
+ } bSerifVariant;
+
+ enum class Treatment : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ None_StandardSolidFill = 2,
+ White_NoFill = 3,
+ PatternedFill = 4,
+ ComplexFill = 5,
+ ShapedFill = 6,
+ DrawnDistressed = 7,
+ } bTreatment;
+
+ enum class Lining : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ None = 2,
+ Inline = 3,
+ Outline = 4,
+ Engraved = 5,
+ Shadow = 6,
+ Relief = 7,
+ Backdrop = 8,
+ } bLining;
+
+ enum class Topology : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Standard = 2,
+ Square = 3,
+ MultipleSegment = 4,
+ DecoWacoMidlines = 5,
+ UnevenWeighting = 6,
+ DiverseArms = 7,
+ DiverseForms = 8,
+ LombardicForms = 9,
+ UpperCaseInLowerCase = 10,
+ ImpliedTopology = 11,
+ HorseshoeEandA = 12,
+ Cursive = 13,
+ Blackletter = 14,
+ SwashVariance = 15,
+ } bTopology;
+
+ enum class RangeOfCharacters : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ ExtendedCollection = 2,
+ Litterals = 3,
+ NoLowerCase = 4,
+ SmallCaps = 5,
+ } bRangeOfCharacters;
+ } decorative;
+
+ struct Pictoral {
+ enum class Kind : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Montages = 2,
+ Pictures = 3,
+ Shapes = 4,
+ Scientific = 5,
+ Music = 6,
+ Expert = 7,
+ Patterns = 8,
+ Boarders = 9,
+ Icons = 10,
+ Logos = 11,
+ IndustrySpecific = 12,
+ } bKind;
+
+ enum class Weight : SK_OT_BYTE {
+ NoFit = 1,
+ } bWeight;
+
+ enum class Spacing : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ ProportionalSpaced = 2,
+ Monospaced = 3,
+ } bSpacing;
+
+ enum class AspectRatioAndContrast : SK_OT_BYTE {
+ NoFit = 1,
+ } bAspectRatioAndContrast;
+
+ enum class AspectRatio94 : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ NoWidth = 2,
+ ExceptionallyWide = 3,
+ SuperWide = 4,
+ VeryWide = 5,
+ Wide = 6,
+ Normal = 7,
+ Narrow = 8,
+ VeryNarrow = 9,
+ } bAspectRatio94;
+
+ enum class AspectRatio119 : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ NoWidth = 2,
+ ExceptionallyWide = 3,
+ SuperWide = 4,
+ VeryWide = 5,
+ Wide = 6,
+ Normal = 7,
+ Narrow = 8,
+ VeryNarrow = 9,
+ } bAspectRatio119;
+
+ enum class AspectRatio157 : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ NoWidth = 2,
+ ExceptionallyWide = 3,
+ SuperWide = 4,
+ VeryWide = 5,
+ Wide = 6,
+ Normal = 7,
+ Narrow = 8,
+ VeryNarrow = 9,
+ } bAspectRatio157;
+
+ enum class AspectRatio163 : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ NoWidth = 2,
+ ExceptionallyWide = 3,
+ SuperWide = 4,
+ VeryWide = 5,
+ Wide = 6,
+ Normal = 7,
+ Narrow = 8,
+ VeryNarrow = 9,
+ } bAspectRatio163;
+ } pictoral;
+ } data;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkPanose) == 10, "sizeof_SkPanose_not_10");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkSFNTHeader.h b/gfx/skia/skia/src/sfnt/SkSFNTHeader.h
new file mode 100644
index 000000000..c2ea1e0b2
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkSFNTHeader.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSFNTHeader_DEFINED
+#define SkSFNTHeader_DEFINED
+
+#include "SkEndian.h"
+#include "SkOTTableTypes.h"
+
+//All SK_SFNT_ prefixed types should be considered as big endian.
+typedef uint16_t SK_SFNT_USHORT;
+typedef uint32_t SK_SFNT_ULONG;
+
+#pragma pack(push, 1)
+
+struct SkSFNTHeader {
+ SK_SFNT_ULONG fontType;
+ struct fontType_WindowsTrueType {
+ static const SK_OT_CHAR TAG0 = 0;
+ static const SK_OT_CHAR TAG1 = 1;
+ static const SK_OT_CHAR TAG2 = 0;
+ static const SK_OT_CHAR TAG3 = 0;
+ static const SK_OT_ULONG TAG = SkOTTableTAG<fontType_WindowsTrueType>::value;
+ };
+ struct fontType_MacTrueType {
+ static const SK_OT_CHAR TAG0 = 't';
+ static const SK_OT_CHAR TAG1 = 'r';
+ static const SK_OT_CHAR TAG2 = 'u';
+ static const SK_OT_CHAR TAG3 = 'e';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<fontType_MacTrueType>::value;
+ };
+ struct fontType_PostScript {
+ static const SK_OT_CHAR TAG0 = 't';
+ static const SK_OT_CHAR TAG1 = 'y';
+ static const SK_OT_CHAR TAG2 = 'p';
+ static const SK_OT_CHAR TAG3 = '1';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<fontType_PostScript>::value;
+ };
+ struct fontType_OpenTypeCFF {
+ static const SK_OT_CHAR TAG0 = 'O';
+ static const SK_OT_CHAR TAG1 = 'T';
+ static const SK_OT_CHAR TAG2 = 'T';
+ static const SK_OT_CHAR TAG3 = 'O';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<fontType_OpenTypeCFF>::value;
+ };
+
+ SK_SFNT_USHORT numTables;
+ SK_SFNT_USHORT searchRange;
+ SK_SFNT_USHORT entrySelector;
+ SK_SFNT_USHORT rangeShift;
+
+ struct TableDirectoryEntry {
+ SK_SFNT_ULONG tag;
+ SK_SFNT_ULONG checksum;
+ SK_SFNT_ULONG offset; //From beginning of header.
+ SK_SFNT_ULONG logicalLength;
+ }; //tableDirectoryEntries[numTables]
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkSFNTHeader) == 12, "sizeof_SkSFNTHeader_not_12");
+static_assert(sizeof(SkSFNTHeader::TableDirectoryEntry) == 16, "sizeof_SkSFNTHeader_TableDirectoryEntry_not_16");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkTTCFHeader.h b/gfx/skia/skia/src/sfnt/SkTTCFHeader.h
new file mode 100644
index 000000000..2dc77eea5
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkTTCFHeader.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTTCFHeader_DEFINED
+#define SkTTCFHeader_DEFINED
+
+#include "SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkTTCFHeader {
+ SK_SFNT_ULONG ttcTag;
+ static const SK_OT_CHAR TAG0 = 't';
+ static const SK_OT_CHAR TAG1 = 't';
+ static const SK_OT_CHAR TAG2 = 'c';
+ static const SK_OT_CHAR TAG3 = 'f';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkTTCFHeader>::value;
+
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed version_1 = SkTEndian_SwapBE32(1 << 16);
+ static const SK_OT_Fixed version_2 = SkTEndian_SwapBE32(2 << 16);
+
+ SK_OT_ULONG numOffsets;
+ //SK_OT_ULONG offset[numOffsets]
+
+ struct Version2Ext {
+ SK_OT_ULONG dsigType;
+ struct dsigType_None {
+ static const SK_OT_CHAR TAG0 = 0;
+ static const SK_OT_CHAR TAG1 = 0;
+ static const SK_OT_CHAR TAG2 = 0;
+ static const SK_OT_CHAR TAG3 = 0;
+ static const SK_OT_ULONG TAG = SkOTTableTAG<dsigType_None>::value;
+ };
+ struct dsigType_Format1 {
+ static const SK_OT_CHAR TAG0 = 'D';
+ static const SK_OT_CHAR TAG1 = 'S';
+ static const SK_OT_CHAR TAG2 = 'I';
+ static const SK_OT_CHAR TAG3 = 'G';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<dsigType_Format1>::value;
+ };
+ SK_OT_ULONG dsigLength; //Length of DSIG table (in bytes).
+ SK_OT_ULONG dsigOffset; //Offset of DSIG table from the beginning of file (in bytes).
+ };// version2ext (if version == version_2)
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkTTCFHeader) == 12, "sizeof_SkTTCFHeader_not_12");
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/GLSL.std.450.h b/gfx/skia/skia/src/sksl/GLSL.std.450.h
new file mode 100644
index 000000000..54cc00e9a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/GLSL.std.450.h
@@ -0,0 +1,131 @@
+/*
+** Copyright (c) 2014-2016 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+#ifndef GLSLstd450_H
+#define GLSLstd450_H
+
+static const int GLSLstd450Version = 100;
+static const int GLSLstd450Revision = 3;
+
+enum GLSLstd450 {
+ GLSLstd450Bad = 0, // Don't use
+
+ GLSLstd450Round = 1,
+ GLSLstd450RoundEven = 2,
+ GLSLstd450Trunc = 3,
+ GLSLstd450FAbs = 4,
+ GLSLstd450SAbs = 5,
+ GLSLstd450FSign = 6,
+ GLSLstd450SSign = 7,
+ GLSLstd450Floor = 8,
+ GLSLstd450Ceil = 9,
+ GLSLstd450Fract = 10,
+
+ GLSLstd450Radians = 11,
+ GLSLstd450Degrees = 12,
+ GLSLstd450Sin = 13,
+ GLSLstd450Cos = 14,
+ GLSLstd450Tan = 15,
+ GLSLstd450Asin = 16,
+ GLSLstd450Acos = 17,
+ GLSLstd450Atan = 18,
+ GLSLstd450Sinh = 19,
+ GLSLstd450Cosh = 20,
+ GLSLstd450Tanh = 21,
+ GLSLstd450Asinh = 22,
+ GLSLstd450Acosh = 23,
+ GLSLstd450Atanh = 24,
+ GLSLstd450Atan2 = 25,
+
+ GLSLstd450Pow = 26,
+ GLSLstd450Exp = 27,
+ GLSLstd450Log = 28,
+ GLSLstd450Exp2 = 29,
+ GLSLstd450Log2 = 30,
+ GLSLstd450Sqrt = 31,
+ GLSLstd450InverseSqrt = 32,
+
+ GLSLstd450Determinant = 33,
+ GLSLstd450MatrixInverse = 34,
+
+ GLSLstd450Modf = 35, // second operand needs an OpVariable to write to
+ GLSLstd450ModfStruct = 36, // no OpVariable operand
+ GLSLstd450FMin = 37,
+ GLSLstd450UMin = 38,
+ GLSLstd450SMin = 39,
+ GLSLstd450FMax = 40,
+ GLSLstd450UMax = 41,
+ GLSLstd450SMax = 42,
+ GLSLstd450FClamp = 43,
+ GLSLstd450UClamp = 44,
+ GLSLstd450SClamp = 45,
+ GLSLstd450FMix = 46,
+ GLSLstd450IMix = 47, // Reserved
+ GLSLstd450Step = 48,
+ GLSLstd450SmoothStep = 49,
+
+ GLSLstd450Fma = 50,
+ GLSLstd450Frexp = 51, // second operand needs an OpVariable to write to
+ GLSLstd450FrexpStruct = 52, // no OpVariable operand
+ GLSLstd450Ldexp = 53,
+
+ GLSLstd450PackSnorm4x8 = 54,
+ GLSLstd450PackUnorm4x8 = 55,
+ GLSLstd450PackSnorm2x16 = 56,
+ GLSLstd450PackUnorm2x16 = 57,
+ GLSLstd450PackHalf2x16 = 58,
+ GLSLstd450PackDouble2x32 = 59,
+ GLSLstd450UnpackSnorm2x16 = 60,
+ GLSLstd450UnpackUnorm2x16 = 61,
+ GLSLstd450UnpackHalf2x16 = 62,
+ GLSLstd450UnpackSnorm4x8 = 63,
+ GLSLstd450UnpackUnorm4x8 = 64,
+ GLSLstd450UnpackDouble2x32 = 65,
+
+ GLSLstd450Length = 66,
+ GLSLstd450Distance = 67,
+ GLSLstd450Cross = 68,
+ GLSLstd450Normalize = 69,
+ GLSLstd450FaceForward = 70,
+ GLSLstd450Reflect = 71,
+ GLSLstd450Refract = 72,
+
+ GLSLstd450FindILsb = 73,
+ GLSLstd450FindSMsb = 74,
+ GLSLstd450FindUMsb = 75,
+
+ GLSLstd450InterpolateAtCentroid = 76,
+ GLSLstd450InterpolateAtSample = 77,
+ GLSLstd450InterpolateAtOffset = 78,
+
+ GLSLstd450NMin = 79,
+ GLSLstd450NMax = 80,
+ GLSLstd450NClamp = 81,
+
+ GLSLstd450Count
+};
+
+#endif // #ifndef GLSLstd450_H
diff --git a/gfx/skia/skia/src/sksl/SkSLCodeGenerator.h b/gfx/skia/skia/src/sksl/SkSLCodeGenerator.h
new file mode 100644
index 000000000..7fa8c1931
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLCodeGenerator.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CODEGENERATOR
+#define SKSL_CODEGENERATOR
+
+#include "ir/SkSLProgram.h"
+#include <vector>
+#include <iostream>
+
+namespace SkSL {
+
+/**
+ * Abstract superclass of all code generators, which take a Program as input and produce code as
+ * output.
+ */
+class CodeGenerator {
+public:
+ virtual ~CodeGenerator() {}
+
+ virtual void generateCode(const Program& program, std::ostream& out) = 0;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLCompiler.cpp b/gfx/skia/skia/src/sksl/SkSLCompiler.cpp
new file mode 100644
index 000000000..d2ad81223
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLCompiler.cpp
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkSLCompiler.h"
+
+#include <fstream>
+#include <streambuf>
+
+#include "SkSLIRGenerator.h"
+#include "SkSLParser.h"
+#include "SkSLSPIRVCodeGenerator.h"
+#include "ir/SkSLExpression.h"
+#include "ir/SkSLIntLiteral.h"
+#include "ir/SkSLSymbolTable.h"
+#include "ir/SkSLVarDeclaration.h"
+#include "SkMutex.h"
+
+#define STRINGIFY(x) #x
+
+// include the built-in shader symbols as static strings
+
+static std::string SKSL_INCLUDE =
+#include "sksl.include"
+;
+
+static std::string SKSL_VERT_INCLUDE =
+#include "sksl_vert.include"
+;
+
+static std::string SKSL_FRAG_INCLUDE =
+#include "sksl_frag.include"
+;
+
+namespace SkSL {
+
+Compiler::Compiler()
+: fErrorCount(0) {
+ auto types = std::shared_ptr<SymbolTable>(new SymbolTable(*this));
+ auto symbols = std::shared_ptr<SymbolTable>(new SymbolTable(types, *this));
+ fIRGenerator = new IRGenerator(&fContext, symbols, *this);
+ fTypes = types;
+ #define ADD_TYPE(t) types->addWithoutOwnership(fContext.f ## t ## _Type->fName, \
+ fContext.f ## t ## _Type.get())
+ ADD_TYPE(Void);
+ ADD_TYPE(Float);
+ ADD_TYPE(Vec2);
+ ADD_TYPE(Vec3);
+ ADD_TYPE(Vec4);
+ ADD_TYPE(Double);
+ ADD_TYPE(DVec2);
+ ADD_TYPE(DVec3);
+ ADD_TYPE(DVec4);
+ ADD_TYPE(Int);
+ ADD_TYPE(IVec2);
+ ADD_TYPE(IVec3);
+ ADD_TYPE(IVec4);
+ ADD_TYPE(UInt);
+ ADD_TYPE(UVec2);
+ ADD_TYPE(UVec3);
+ ADD_TYPE(UVec4);
+ ADD_TYPE(Bool);
+ ADD_TYPE(BVec2);
+ ADD_TYPE(BVec3);
+ ADD_TYPE(BVec4);
+ ADD_TYPE(Mat2x2);
+ types->addWithoutOwnership("mat2x2", fContext.fMat2x2_Type.get());
+ ADD_TYPE(Mat2x3);
+ ADD_TYPE(Mat2x4);
+ ADD_TYPE(Mat3x2);
+ ADD_TYPE(Mat3x3);
+ types->addWithoutOwnership("mat3x3", fContext.fMat3x3_Type.get());
+ ADD_TYPE(Mat3x4);
+ ADD_TYPE(Mat4x2);
+ ADD_TYPE(Mat4x3);
+ ADD_TYPE(Mat4x4);
+ types->addWithoutOwnership("mat4x4", fContext.fMat4x4_Type.get());
+ ADD_TYPE(GenType);
+ ADD_TYPE(GenDType);
+ ADD_TYPE(GenIType);
+ ADD_TYPE(GenUType);
+ ADD_TYPE(GenBType);
+ ADD_TYPE(Mat);
+ ADD_TYPE(Vec);
+ ADD_TYPE(GVec);
+ ADD_TYPE(GVec2);
+ ADD_TYPE(GVec3);
+ ADD_TYPE(GVec4);
+ ADD_TYPE(DVec);
+ ADD_TYPE(IVec);
+ ADD_TYPE(UVec);
+ ADD_TYPE(BVec);
+
+ ADD_TYPE(Sampler1D);
+ ADD_TYPE(Sampler2D);
+ ADD_TYPE(Sampler3D);
+ ADD_TYPE(SamplerCube);
+ ADD_TYPE(Sampler2DRect);
+ ADD_TYPE(Sampler1DArray);
+ ADD_TYPE(Sampler2DArray);
+ ADD_TYPE(SamplerCubeArray);
+ ADD_TYPE(SamplerBuffer);
+ ADD_TYPE(Sampler2DMS);
+ ADD_TYPE(Sampler2DMSArray);
+
+ ADD_TYPE(GSampler1D);
+ ADD_TYPE(GSampler2D);
+ ADD_TYPE(GSampler3D);
+ ADD_TYPE(GSamplerCube);
+ ADD_TYPE(GSampler2DRect);
+ ADD_TYPE(GSampler1DArray);
+ ADD_TYPE(GSampler2DArray);
+ ADD_TYPE(GSamplerCubeArray);
+ ADD_TYPE(GSamplerBuffer);
+ ADD_TYPE(GSampler2DMS);
+ ADD_TYPE(GSampler2DMSArray);
+
+ ADD_TYPE(Sampler1DShadow);
+ ADD_TYPE(Sampler2DShadow);
+ ADD_TYPE(SamplerCubeShadow);
+ ADD_TYPE(Sampler2DRectShadow);
+ ADD_TYPE(Sampler1DArrayShadow);
+ ADD_TYPE(Sampler2DArrayShadow);
+ ADD_TYPE(SamplerCubeArrayShadow);
+ ADD_TYPE(GSampler2DArrayShadow);
+ ADD_TYPE(GSamplerCubeArrayShadow);
+
+ std::vector<std::unique_ptr<ProgramElement>> ignored;
+ this->internalConvertProgram(SKSL_INCLUDE, &ignored);
+ ASSERT(!fErrorCount);
+}
+
+Compiler::~Compiler() {
+ delete fIRGenerator;
+}
+
+void Compiler::internalConvertProgram(std::string text,
+ std::vector<std::unique_ptr<ProgramElement>>* result) {
+ Parser parser(text, *fTypes, *this);
+ std::vector<std::unique_ptr<ASTDeclaration>> parsed = parser.file();
+ if (fErrorCount) {
+ return;
+ }
+ for (size_t i = 0; i < parsed.size(); i++) {
+ ASTDeclaration& decl = *parsed[i];
+ switch (decl.fKind) {
+ case ASTDeclaration::kVar_Kind: {
+ std::unique_ptr<VarDeclarations> s = fIRGenerator->convertVarDeclarations(
+ (ASTVarDeclarations&) decl,
+ Variable::kGlobal_Storage);
+ if (s) {
+ result->push_back(std::move(s));
+ }
+ break;
+ }
+ case ASTDeclaration::kFunction_Kind: {
+ std::unique_ptr<FunctionDefinition> f = fIRGenerator->convertFunction(
+ (ASTFunction&) decl);
+ if (f) {
+ result->push_back(std::move(f));
+ }
+ break;
+ }
+ case ASTDeclaration::kInterfaceBlock_Kind: {
+ std::unique_ptr<InterfaceBlock> i = fIRGenerator->convertInterfaceBlock(
+ (ASTInterfaceBlock&) decl);
+ if (i) {
+ result->push_back(std::move(i));
+ }
+ break;
+ }
+ case ASTDeclaration::kExtension_Kind: {
+ std::unique_ptr<Extension> e = fIRGenerator->convertExtension((ASTExtension&) decl);
+ if (e) {
+ result->push_back(std::move(e));
+ }
+ break;
+ }
+ default:
+ ABORT("unsupported declaration: %s\n", decl.description().c_str());
+ }
+ }
+}
+
+std::unique_ptr<Program> Compiler::convertProgram(Program::Kind kind, std::string text) {
+ fErrorText = "";
+ fErrorCount = 0;
+ fIRGenerator->pushSymbolTable();
+ std::vector<std::unique_ptr<ProgramElement>> elements;
+ switch (kind) {
+ case Program::kVertex_Kind:
+ this->internalConvertProgram(SKSL_VERT_INCLUDE, &elements);
+ break;
+ case Program::kFragment_Kind:
+ this->internalConvertProgram(SKSL_FRAG_INCLUDE, &elements);
+ break;
+ }
+ this->internalConvertProgram(text, &elements);
+ auto result = std::unique_ptr<Program>(new Program(kind, std::move(elements),
+ fIRGenerator->fSymbolTable));;
+ fIRGenerator->popSymbolTable();
+ this->writeErrorCount();
+ return result;
+}
+
+void Compiler::error(Position position, std::string msg) {
+ fErrorCount++;
+ fErrorText += "error: " + position.description() + ": " + msg.c_str() + "\n";
+}
+
+std::string Compiler::errorText() {
+ std::string result = fErrorText;
+ return result;
+}
+
+void Compiler::writeErrorCount() {
+ if (fErrorCount) {
+ fErrorText += to_string(fErrorCount) + " error";
+ if (fErrorCount > 1) {
+ fErrorText += "s";
+ }
+ fErrorText += "\n";
+ }
+}
+
+bool Compiler::toSPIRV(Program::Kind kind, const std::string& text, std::ostream& out) {
+ auto program = this->convertProgram(kind, text);
+ if (fErrorCount == 0) {
+ SkSL::SPIRVCodeGenerator cg(&fContext);
+ cg.generateCode(*program.get(), out);
+ ASSERT(!out.rdstate());
+ }
+ return fErrorCount == 0;
+}
+
+bool Compiler::toSPIRV(Program::Kind kind, const std::string& text, std::string* out) {
+ std::stringstream buffer;
+ bool result = this->toSPIRV(kind, text, buffer);
+ if (result) {
+ *out = buffer.str();
+ }
+ return result;
+}
+
+bool Compiler::toGLSL(Program::Kind kind, const std::string& text, GLCaps caps,
+ std::ostream& out) {
+ auto program = this->convertProgram(kind, text);
+ if (fErrorCount == 0) {
+ SkSL::GLSLCodeGenerator cg(&fContext, caps);
+ cg.generateCode(*program.get(), out);
+ ASSERT(!out.rdstate());
+ }
+ return fErrorCount == 0;
+}
+
+bool Compiler::toGLSL(Program::Kind kind, const std::string& text, GLCaps caps,
+ std::string* out) {
+ std::stringstream buffer;
+ bool result = this->toGLSL(kind, text, caps, buffer);
+ if (result) {
+ *out = buffer.str();
+ }
+ return result;
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/sksl/SkSLCompiler.h b/gfx/skia/skia/src/sksl/SkSLCompiler.h
new file mode 100644
index 000000000..9cd1eac3f
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLCompiler.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_COMPILER
+#define SKSL_COMPILER
+
+#include <vector>
+#include "ir/SkSLProgram.h"
+#include "ir/SkSLSymbolTable.h"
+#include "SkSLContext.h"
+#include "SkSLErrorReporter.h"
+#include "SkSLGLSLCodeGenerator.h"
+
+namespace SkSL {
+
+class IRGenerator;
+
+/**
+ * Main compiler entry point. This is a traditional compiler design which first parses the .sksl
+ * file into an abstract syntax tree (a tree of ASTNodes), then performs semantic analysis to
+ * produce a Program (a tree of IRNodes), then feeds the Program into a CodeGenerator to produce
+ * compiled output.
+ */
+class Compiler : public ErrorReporter {
+public:
+ Compiler();
+
+ ~Compiler();
+
+ std::unique_ptr<Program> convertProgram(Program::Kind kind, std::string text);
+
+ bool toSPIRV(Program::Kind kind, const std::string& text, std::ostream& out);
+
+ bool toSPIRV(Program::Kind kind, const std::string& text, std::string* out);
+
+ bool toGLSL(Program::Kind kind, const std::string& text, GLCaps caps, std::ostream& out);
+
+ bool toGLSL(Program::Kind kind, const std::string& text, GLCaps caps, std::string* out);
+
+ void error(Position position, std::string msg) override;
+
+ std::string errorText();
+
+ void writeErrorCount();
+
+private:
+
+ void internalConvertProgram(std::string text,
+ std::vector<std::unique_ptr<ProgramElement>>* result);
+
+ std::shared_ptr<SymbolTable> fTypes;
+ IRGenerator* fIRGenerator;
+ std::string fSkiaVertText; // FIXME store parsed version instead
+
+ Context fContext;
+ int fErrorCount;
+ std::string fErrorText;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLContext.h b/gfx/skia/skia/src/sksl/SkSLContext.h
new file mode 100644
index 000000000..1f124d05e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLContext.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CONTEXT
+#define SKSL_CONTEXT
+
+#include "ir/SkSLType.h"
+
+namespace SkSL {
+
+/**
+ * Contains compiler-wide objects, which currently means the core types.
+ */
+class Context {
+public:
+ Context()
+ : fVoid_Type(new Type("void"))
+ , fDouble_Type(new Type("double", true))
+ , fDVec2_Type(new Type("dvec2", *fDouble_Type, 2))
+ , fDVec3_Type(new Type("dvec3", *fDouble_Type, 3))
+ , fDVec4_Type(new Type("dvec4", *fDouble_Type, 4))
+ , fFloat_Type(new Type("float", true, { fDouble_Type.get() }))
+ , fVec2_Type(new Type("vec2", *fFloat_Type, 2))
+ , fVec3_Type(new Type("vec3", *fFloat_Type, 3))
+ , fVec4_Type(new Type("vec4", *fFloat_Type, 4))
+ , fUInt_Type(new Type("uint", true, { fFloat_Type.get(), fDouble_Type.get() }))
+ , fUVec2_Type(new Type("uvec2", *fUInt_Type, 2))
+ , fUVec3_Type(new Type("uvec3", *fUInt_Type, 3))
+ , fUVec4_Type(new Type("uvec4", *fUInt_Type, 4))
+ , fInt_Type(new Type("int", true, { fUInt_Type.get(), fFloat_Type.get(), fDouble_Type.get() }))
+ , fIVec2_Type(new Type("ivec2", *fInt_Type, 2))
+ , fIVec3_Type(new Type("ivec3", *fInt_Type, 3))
+ , fIVec4_Type(new Type("ivec4", *fInt_Type, 4))
+ , fBool_Type(new Type("bool", false))
+ , fBVec2_Type(new Type("bvec2", *fBool_Type, 2))
+ , fBVec3_Type(new Type("bvec3", *fBool_Type, 3))
+ , fBVec4_Type(new Type("bvec4", *fBool_Type, 4))
+ , fMat2x2_Type(new Type("mat2", *fFloat_Type, 2, 2))
+ , fMat2x3_Type(new Type("mat2x3", *fFloat_Type, 2, 3))
+ , fMat2x4_Type(new Type("mat2x4", *fFloat_Type, 2, 4))
+ , fMat3x2_Type(new Type("mat3x2", *fFloat_Type, 3, 2))
+ , fMat3x3_Type(new Type("mat3", *fFloat_Type, 3, 3))
+ , fMat3x4_Type(new Type("mat3x4", *fFloat_Type, 3, 4))
+ , fMat4x2_Type(new Type("mat4x2", *fFloat_Type, 4, 2))
+ , fMat4x3_Type(new Type("mat4x3", *fFloat_Type, 4, 3))
+ , fMat4x4_Type(new Type("mat4", *fFloat_Type, 4, 4))
+ , fDMat2x2_Type(new Type("dmat2", *fFloat_Type, 2, 2))
+ , fDMat2x3_Type(new Type("dmat2x3", *fFloat_Type, 2, 3))
+ , fDMat2x4_Type(new Type("dmat2x4", *fFloat_Type, 2, 4))
+ , fDMat3x2_Type(new Type("dmat3x2", *fFloat_Type, 3, 2))
+ , fDMat3x3_Type(new Type("dmat3", *fFloat_Type, 3, 3))
+ , fDMat3x4_Type(new Type("dmat3x4", *fFloat_Type, 3, 4))
+ , fDMat4x2_Type(new Type("dmat4x2", *fFloat_Type, 4, 2))
+ , fDMat4x3_Type(new Type("dmat4x3", *fFloat_Type, 4, 3))
+ , fDMat4x4_Type(new Type("dmat4", *fFloat_Type, 4, 4))
+ , fSampler1D_Type(new Type("sampler1D", SpvDim1D, false, false, false, true))
+ , fSampler2D_Type(new Type("sampler2D", SpvDim2D, false, false, false, true))
+ , fSampler3D_Type(new Type("sampler3D", SpvDim3D, false, false, false, true))
+ , fSamplerCube_Type(new Type("samplerCube"))
+ , fSampler2DRect_Type(new Type("sampler2DRect"))
+ , fSampler1DArray_Type(new Type("sampler1DArray"))
+ , fSampler2DArray_Type(new Type("sampler2DArray"))
+ , fSamplerCubeArray_Type(new Type("samplerCubeArray"))
+ , fSamplerBuffer_Type(new Type("samplerBuffer"))
+ , fSampler2DMS_Type(new Type("sampler2DMS"))
+ , fSampler2DMSArray_Type(new Type("sampler2DMSArray"))
+ , fSampler1DShadow_Type(new Type("sampler1DShadow"))
+ , fSampler2DShadow_Type(new Type("sampler2DShadow"))
+ , fSamplerCubeShadow_Type(new Type("samplerCubeShadow"))
+ , fSampler2DRectShadow_Type(new Type("sampler2DRectShadow"))
+ , fSampler1DArrayShadow_Type(new Type("sampler1DArrayShadow"))
+ , fSampler2DArrayShadow_Type(new Type("sampler2DArrayShadow"))
+ , fSamplerCubeArrayShadow_Type(new Type("samplerCubeArrayShadow"))
+ // FIXME figure out what we're supposed to do with the gsampler et al. types)
+ , fGSampler1D_Type(new Type("$gsampler1D", static_type(*fSampler1D_Type)))
+ , fGSampler2D_Type(new Type("$gsampler2D", static_type(*fSampler2D_Type)))
+ , fGSampler3D_Type(new Type("$gsampler3D", static_type(*fSampler3D_Type)))
+ , fGSamplerCube_Type(new Type("$gsamplerCube", static_type(*fSamplerCube_Type)))
+ , fGSampler2DRect_Type(new Type("$gsampler2DRect", static_type(*fSampler2DRect_Type)))
+ , fGSampler1DArray_Type(new Type("$gsampler1DArray", static_type(*fSampler1DArray_Type)))
+ , fGSampler2DArray_Type(new Type("$gsampler2DArray", static_type(*fSampler2DArray_Type)))
+ , fGSamplerCubeArray_Type(new Type("$gsamplerCubeArray", static_type(*fSamplerCubeArray_Type)))
+ , fGSamplerBuffer_Type(new Type("$gsamplerBuffer", static_type(*fSamplerBuffer_Type)))
+ , fGSampler2DMS_Type(new Type("$gsampler2DMS", static_type(*fSampler2DMS_Type)))
+ , fGSampler2DMSArray_Type(new Type("$gsampler2DMSArray", static_type(*fSampler2DMSArray_Type)))
+ , fGSampler2DArrayShadow_Type(new Type("$gsampler2DArrayShadow",
+ static_type(*fSampler2DArrayShadow_Type)))
+ , fGSamplerCubeArrayShadow_Type(new Type("$gsamplerCubeArrayShadow",
+ static_type(*fSamplerCubeArrayShadow_Type)))
+ , fGenType_Type(new Type("$genType", { fFloat_Type.get(), fVec2_Type.get(), fVec3_Type.get(),
+ fVec4_Type.get() }))
+ , fGenDType_Type(new Type("$genDType", { fDouble_Type.get(), fDVec2_Type.get(),
+ fDVec3_Type.get(), fDVec4_Type.get() }))
+ , fGenIType_Type(new Type("$genIType", { fInt_Type.get(), fIVec2_Type.get(), fIVec3_Type.get(),
+ fIVec4_Type.get() }))
+ , fGenUType_Type(new Type("$genUType", { fUInt_Type.get(), fUVec2_Type.get(), fUVec3_Type.get(),
+ fUVec4_Type.get() }))
+ , fGenBType_Type(new Type("$genBType", { fBool_Type.get(), fBVec2_Type.get(), fBVec3_Type.get(),
+ fBVec4_Type.get() }))
+ , fMat_Type(new Type("$mat"))
+ , fVec_Type(new Type("$vec", { fVec2_Type.get(), fVec2_Type.get(), fVec3_Type.get(),
+ fVec4_Type.get() }))
+ , fGVec_Type(new Type("$gvec"))
+ , fGVec2_Type(new Type("$gvec2"))
+ , fGVec3_Type(new Type("$gvec3"))
+ , fGVec4_Type(new Type("$gvec4", static_type(*fVec4_Type)))
+ , fDVec_Type(new Type("$dvec"))
+ , fIVec_Type(new Type("$ivec"))
+ , fUVec_Type(new Type("$uvec"))
+ , fBVec_Type(new Type("$bvec", { fBVec2_Type.get(), fBVec2_Type.get(), fBVec3_Type.get(),
+ fBVec4_Type.get() }))
+ , fInvalid_Type(new Type("<INVALID>")) {}
+
+ static std::vector<const Type*> static_type(const Type& t) {
+ return { &t, &t, &t, &t };
+ }
+
+ const std::unique_ptr<Type> fVoid_Type;
+
+ const std::unique_ptr<Type> fDouble_Type;
+ const std::unique_ptr<Type> fDVec2_Type;
+ const std::unique_ptr<Type> fDVec3_Type;
+ const std::unique_ptr<Type> fDVec4_Type;
+
+ const std::unique_ptr<Type> fFloat_Type;
+ const std::unique_ptr<Type> fVec2_Type;
+ const std::unique_ptr<Type> fVec3_Type;
+ const std::unique_ptr<Type> fVec4_Type;
+
+ const std::unique_ptr<Type> fUInt_Type;
+ const std::unique_ptr<Type> fUVec2_Type;
+ const std::unique_ptr<Type> fUVec3_Type;
+ const std::unique_ptr<Type> fUVec4_Type;
+
+ const std::unique_ptr<Type> fInt_Type;
+ const std::unique_ptr<Type> fIVec2_Type;
+ const std::unique_ptr<Type> fIVec3_Type;
+ const std::unique_ptr<Type> fIVec4_Type;
+
+ const std::unique_ptr<Type> fBool_Type;
+ const std::unique_ptr<Type> fBVec2_Type;
+ const std::unique_ptr<Type> fBVec3_Type;
+ const std::unique_ptr<Type> fBVec4_Type;
+
+ const std::unique_ptr<Type> fMat2x2_Type;
+ const std::unique_ptr<Type> fMat2x3_Type;
+ const std::unique_ptr<Type> fMat2x4_Type;
+ const std::unique_ptr<Type> fMat3x2_Type;
+ const std::unique_ptr<Type> fMat3x3_Type;
+ const std::unique_ptr<Type> fMat3x4_Type;
+ const std::unique_ptr<Type> fMat4x2_Type;
+ const std::unique_ptr<Type> fMat4x3_Type;
+ const std::unique_ptr<Type> fMat4x4_Type;
+
+ const std::unique_ptr<Type> fDMat2x2_Type;
+ const std::unique_ptr<Type> fDMat2x3_Type;
+ const std::unique_ptr<Type> fDMat2x4_Type;
+ const std::unique_ptr<Type> fDMat3x2_Type;
+ const std::unique_ptr<Type> fDMat3x3_Type;
+ const std::unique_ptr<Type> fDMat3x4_Type;
+ const std::unique_ptr<Type> fDMat4x2_Type;
+ const std::unique_ptr<Type> fDMat4x3_Type;
+ const std::unique_ptr<Type> fDMat4x4_Type;
+
+ const std::unique_ptr<Type> fSampler1D_Type;
+ const std::unique_ptr<Type> fSampler2D_Type;
+ const std::unique_ptr<Type> fSampler3D_Type;
+ const std::unique_ptr<Type> fSamplerCube_Type;
+ const std::unique_ptr<Type> fSampler2DRect_Type;
+ const std::unique_ptr<Type> fSampler1DArray_Type;
+ const std::unique_ptr<Type> fSampler2DArray_Type;
+ const std::unique_ptr<Type> fSamplerCubeArray_Type;
+ const std::unique_ptr<Type> fSamplerBuffer_Type;
+ const std::unique_ptr<Type> fSampler2DMS_Type;
+ const std::unique_ptr<Type> fSampler2DMSArray_Type;
+ const std::unique_ptr<Type> fSampler1DShadow_Type;
+ const std::unique_ptr<Type> fSampler2DShadow_Type;
+ const std::unique_ptr<Type> fSamplerCubeShadow_Type;
+ const std::unique_ptr<Type> fSampler2DRectShadow_Type;
+ const std::unique_ptr<Type> fSampler1DArrayShadow_Type;
+ const std::unique_ptr<Type> fSampler2DArrayShadow_Type;
+ const std::unique_ptr<Type> fSamplerCubeArrayShadow_Type;
+
+ const std::unique_ptr<Type> fGSampler1D_Type;
+ const std::unique_ptr<Type> fGSampler2D_Type;
+ const std::unique_ptr<Type> fGSampler3D_Type;
+ const std::unique_ptr<Type> fGSamplerCube_Type;
+ const std::unique_ptr<Type> fGSampler2DRect_Type;
+ const std::unique_ptr<Type> fGSampler1DArray_Type;
+ const std::unique_ptr<Type> fGSampler2DArray_Type;
+ const std::unique_ptr<Type> fGSamplerCubeArray_Type;
+ const std::unique_ptr<Type> fGSamplerBuffer_Type;
+ const std::unique_ptr<Type> fGSampler2DMS_Type;
+ const std::unique_ptr<Type> fGSampler2DMSArray_Type;
+ const std::unique_ptr<Type> fGSampler2DArrayShadow_Type;
+ const std::unique_ptr<Type> fGSamplerCubeArrayShadow_Type;
+
+ const std::unique_ptr<Type> fGenType_Type;
+ const std::unique_ptr<Type> fGenDType_Type;
+ const std::unique_ptr<Type> fGenIType_Type;
+ const std::unique_ptr<Type> fGenUType_Type;
+ const std::unique_ptr<Type> fGenBType_Type;
+
+ const std::unique_ptr<Type> fMat_Type;
+
+ const std::unique_ptr<Type> fVec_Type;
+
+ const std::unique_ptr<Type> fGVec_Type;
+ const std::unique_ptr<Type> fGVec2_Type;
+ const std::unique_ptr<Type> fGVec3_Type;
+ const std::unique_ptr<Type> fGVec4_Type;
+ const std::unique_ptr<Type> fDVec_Type;
+ const std::unique_ptr<Type> fIVec_Type;
+ const std::unique_ptr<Type> fUVec_Type;
+
+ const std::unique_ptr<Type> fBVec_Type;
+
+ const std::unique_ptr<Type> fInvalid_Type;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLErrorReporter.h b/gfx/skia/skia/src/sksl/SkSLErrorReporter.h
new file mode 100644
index 000000000..26b44711c
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLErrorReporter.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ERRORREPORTER
+#define SKSL_ERRORREPORTER
+
+#include "SkSLPosition.h"
+
+namespace SkSL {
+
+/**
+ * Interface for the compiler to report errors.
+ */
+class ErrorReporter {
+public:
+ virtual ~ErrorReporter() {}
+
+ virtual void error(Position position, std::string msg) = 0;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLGLSLCodeGenerator.cpp b/gfx/skia/skia/src/sksl/SkSLGLSLCodeGenerator.cpp
new file mode 100644
index 000000000..da0bcb903
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLGLSLCodeGenerator.cpp
@@ -0,0 +1,480 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkSLGLSLCodeGenerator.h"
+
+#include "string.h"
+
+#include "GLSL.std.450.h"
+
+#include "ir/SkSLExpressionStatement.h"
+#include "ir/SkSLExtension.h"
+#include "ir/SkSLIndexExpression.h"
+#include "ir/SkSLVariableReference.h"
+
+namespace SkSL {
+
+void GLSLCodeGenerator::write(const char* s) {
+ if (s[0] == 0) {
+ return;
+ }
+ if (fAtLineStart) {
+ for (int i = 0; i < fIndentation; i++) {
+ *fOut << " ";
+ }
+ }
+ *fOut << s;
+ fAtLineStart = false;
+}
+
+void GLSLCodeGenerator::writeLine(const char* s) {
+ this->write(s);
+ *fOut << "\n";
+ fAtLineStart = true;
+}
+
+void GLSLCodeGenerator::write(const std::string& s) {
+ this->write(s.c_str());
+}
+
+void GLSLCodeGenerator::writeLine(const std::string& s) {
+ this->writeLine(s.c_str());
+}
+
+void GLSLCodeGenerator::writeLine() {
+ this->writeLine("");
+}
+
+void GLSLCodeGenerator::writeExtension(const Extension& ext) {
+ this->writeLine("#extension " + ext.fName + " : enable");
+}
+
+void GLSLCodeGenerator::writeType(const Type& type) {
+ if (type.kind() == Type::kStruct_Kind) {
+ for (const Type* search : fWrittenStructs) {
+ if (*search == type) {
+ // already written
+ this->write(type.name());
+ return;
+ }
+ }
+ fWrittenStructs.push_back(&type);
+ this->writeLine("struct " + type.name() + " {");
+ fIndentation++;
+ for (const auto& f : type.fields()) {
+ this->writeModifiers(f.fModifiers);
+ // sizes (which must be static in structs) are part of the type name here
+ this->writeType(*f.fType);
+ this->writeLine(" " + f.fName + ";");
+ }
+ fIndentation--;
+ this->writeLine("}");
+ } else {
+ this->write(type.name());
+ }
+}
+
+void GLSLCodeGenerator::writeExpression(const Expression& expr, Precedence parentPrecedence) {
+ switch (expr.fKind) {
+ case Expression::kBinary_Kind:
+ this->writeBinaryExpression((BinaryExpression&) expr, parentPrecedence);
+ break;
+ case Expression::kBoolLiteral_Kind:
+ this->writeBoolLiteral((BoolLiteral&) expr);
+ break;
+ case Expression::kConstructor_Kind:
+ this->writeConstructor((Constructor&) expr);
+ break;
+ case Expression::kIntLiteral_Kind:
+ this->writeIntLiteral((IntLiteral&) expr);
+ break;
+ case Expression::kFieldAccess_Kind:
+ this->writeFieldAccess(((FieldAccess&) expr));
+ break;
+ case Expression::kFloatLiteral_Kind:
+ this->writeFloatLiteral(((FloatLiteral&) expr));
+ break;
+ case Expression::kFunctionCall_Kind:
+ this->writeFunctionCall((FunctionCall&) expr);
+ break;
+ case Expression::kPrefix_Kind:
+ this->writePrefixExpression((PrefixExpression&) expr, parentPrecedence);
+ break;
+ case Expression::kPostfix_Kind:
+ this->writePostfixExpression((PostfixExpression&) expr, parentPrecedence);
+ break;
+ case Expression::kSwizzle_Kind:
+ this->writeSwizzle((Swizzle&) expr);
+ break;
+ case Expression::kVariableReference_Kind:
+ this->writeVariableReference((VariableReference&) expr);
+ break;
+ case Expression::kTernary_Kind:
+ this->writeTernaryExpression((TernaryExpression&) expr, parentPrecedence);
+ break;
+ case Expression::kIndex_Kind:
+ this->writeIndexExpression((IndexExpression&) expr);
+ break;
+ default:
+ ABORT("unsupported expression: %s", expr.description().c_str());
+ }
+}
+
+void GLSLCodeGenerator::writeFunctionCall(const FunctionCall& c) {
+ this->write(c.fFunction.fName + "(");
+ const char* separator = "";
+ for (const auto& arg : c.fArguments) {
+ this->write(separator);
+ separator = ", ";
+ this->writeExpression(*arg, kSequence_Precedence);
+ }
+ this->write(")");
+}
+
+void GLSLCodeGenerator::writeConstructor(const Constructor& c) {
+ this->write(c.fType.name() + "(");
+ const char* separator = "";
+ for (const auto& arg : c.fArguments) {
+ this->write(separator);
+ separator = ", ";
+ this->writeExpression(*arg, kSequence_Precedence);
+ }
+ this->write(")");
+}
+
+void GLSLCodeGenerator::writeVariableReference(const VariableReference& ref) {
+ this->write(ref.fVariable.fName);
+}
+
+void GLSLCodeGenerator::writeIndexExpression(const IndexExpression& expr) {
+ this->writeExpression(*expr.fBase, kPostfix_Precedence);
+ this->write("[");
+ this->writeExpression(*expr.fIndex, kTopLevel_Precedence);
+ this->write("]");
+}
+
+void GLSLCodeGenerator::writeFieldAccess(const FieldAccess& f) {
+ if (f.fOwnerKind == FieldAccess::kDefault_OwnerKind) {
+ this->writeExpression(*f.fBase, kPostfix_Precedence);
+ this->write(".");
+ }
+ this->write(f.fBase->fType.fields()[f.fFieldIndex].fName);
+}
+
+void GLSLCodeGenerator::writeSwizzle(const Swizzle& swizzle) {
+ this->writeExpression(*swizzle.fBase, kPostfix_Precedence);
+ this->write(".");
+ for (int c : swizzle.fComponents) {
+ this->write(&("x\0y\0z\0w\0"[c * 2]));
+ }
+}
+
+static GLSLCodeGenerator::Precedence get_binary_precedence(Token::Kind op) {
+ switch (op) {
+ case Token::STAR: // fall through
+ case Token::SLASH: // fall through
+ case Token::PERCENT: return GLSLCodeGenerator::kMultiplicative_Precedence;
+ case Token::PLUS: // fall through
+ case Token::MINUS: return GLSLCodeGenerator::kAdditive_Precedence;
+ case Token::SHL: // fall through
+ case Token::SHR: return GLSLCodeGenerator::kShift_Precedence;
+ case Token::LT: // fall through
+ case Token::GT: // fall through
+ case Token::LTEQ: // fall through
+ case Token::GTEQ: return GLSLCodeGenerator::kRelational_Precedence;
+ case Token::EQEQ: // fall through
+ case Token::NEQ: return GLSLCodeGenerator::kEquality_Precedence;
+ case Token::BITWISEAND: return GLSLCodeGenerator::kBitwiseAnd_Precedence;
+ case Token::BITWISEXOR: return GLSLCodeGenerator::kBitwiseXor_Precedence;
+ case Token::BITWISEOR: return GLSLCodeGenerator::kBitwiseOr_Precedence;
+ case Token::LOGICALAND: return GLSLCodeGenerator::kLogicalAnd_Precedence;
+ case Token::LOGICALXOR: return GLSLCodeGenerator::kLogicalXor_Precedence;
+ case Token::LOGICALOR: return GLSLCodeGenerator::kLogicalOr_Precedence;
+ case Token::EQ: // fall through
+ case Token::PLUSEQ: // fall through
+ case Token::MINUSEQ: // fall through
+ case Token::STAREQ: // fall through
+ case Token::SLASHEQ: // fall through
+ case Token::PERCENTEQ: // fall through
+ case Token::SHLEQ: // fall through
+ case Token::SHREQ: // fall through
+ case Token::LOGICALANDEQ: // fall through
+ case Token::LOGICALXOREQ: // fall through
+ case Token::LOGICALOREQ: // fall through
+ case Token::BITWISEANDEQ: // fall through
+ case Token::BITWISEXOREQ: // fall through
+ case Token::BITWISEOREQ: return GLSLCodeGenerator::kAssignment_Precedence;
+ default: ABORT("unsupported binary operator");
+ }
+}
+
+void GLSLCodeGenerator::writeBinaryExpression(const BinaryExpression& b,
+ Precedence parentPrecedence) {
+ Precedence precedence = get_binary_precedence(b.fOperator);
+ if (precedence >= parentPrecedence) {
+ this->write("(");
+ }
+ this->writeExpression(*b.fLeft, precedence);
+ this->write(" " + Token::OperatorName(b.fOperator) + " ");
+ this->writeExpression(*b.fRight, precedence);
+ if (precedence >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void GLSLCodeGenerator::writeTernaryExpression(const TernaryExpression& t,
+ Precedence parentPrecedence) {
+ if (kTernary_Precedence >= parentPrecedence) {
+ this->write("(");
+ }
+ this->writeExpression(*t.fTest, kTernary_Precedence);
+ this->write(" ? ");
+ this->writeExpression(*t.fIfTrue, kTernary_Precedence);
+ this->write(" : ");
+ this->writeExpression(*t.fIfFalse, kTernary_Precedence);
+ if (kTernary_Precedence >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void GLSLCodeGenerator::writePrefixExpression(const PrefixExpression& p,
+ Precedence parentPrecedence) {
+ if (kPrefix_Precedence >= parentPrecedence) {
+ this->write("(");
+ }
+ this->write(Token::OperatorName(p.fOperator));
+ this->writeExpression(*p.fOperand, kPrefix_Precedence);
+ if (kPrefix_Precedence >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void GLSLCodeGenerator::writePostfixExpression(const PostfixExpression& p,
+ Precedence parentPrecedence) {
+ if (kPostfix_Precedence >= parentPrecedence) {
+ this->write("(");
+ }
+ this->writeExpression(*p.fOperand, kPostfix_Precedence);
+ this->write(Token::OperatorName(p.fOperator));
+ if (kPostfix_Precedence >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void GLSLCodeGenerator::writeBoolLiteral(const BoolLiteral& b) {
+ this->write(b.fValue ? "true" : "false");
+}
+
+void GLSLCodeGenerator::writeIntLiteral(const IntLiteral& i) {
+ this->write(to_string(i.fValue));
+}
+
+void GLSLCodeGenerator::writeFloatLiteral(const FloatLiteral& f) {
+ this->write(to_string(f.fValue));
+}
+
+void GLSLCodeGenerator::writeFunction(const FunctionDefinition& f) {
+ this->writeType(f.fDeclaration.fReturnType);
+ this->write(" " + f.fDeclaration.fName + "(");
+ const char* separator = "";
+ for (const auto& param : f.fDeclaration.fParameters) {
+ this->write(separator);
+ separator = ", ";
+ this->writeModifiers(param->fModifiers);
+ this->writeType(param->fType);
+ this->write(" " + param->fName);
+ }
+ this->write(") ");
+ this->writeBlock(*f.fBody);
+ this->writeLine();
+}
+
+void GLSLCodeGenerator::writeModifiers(const Modifiers& modifiers) {
+ this->write(modifiers.description());
+}
+
+void GLSLCodeGenerator::writeInterfaceBlock(const InterfaceBlock& intf) {
+ if (intf.fVariable.fName == "gl_PerVertex") {
+ return;
+ }
+ this->writeModifiers(intf.fVariable.fModifiers);
+ this->writeLine(intf.fVariable.fType.name() + " {");
+ fIndentation++;
+ for (const auto& f : intf.fVariable.fType.fields()) {
+ this->writeModifiers(f.fModifiers);
+ this->writeType(*f.fType);
+ this->writeLine(" " + f.fName + ";");
+ }
+ fIndentation--;
+ this->writeLine("};");
+}
+
+void GLSLCodeGenerator::writeVarDeclarations(const VarDeclarations& decl) {
+ ASSERT(decl.fVars.size() > 0);
+ this->writeModifiers(decl.fVars[0].fVar->fModifiers);
+ this->writeType(decl.fBaseType);
+ std::string separator = " ";
+ for (const auto& var : decl.fVars) {
+ ASSERT(var.fVar->fModifiers == decl.fVars[0].fVar->fModifiers);
+ this->write(separator);
+ separator = ", ";
+ this->write(var.fVar->fName);
+ for (const auto& size : var.fSizes) {
+ this->write("[");
+ this->writeExpression(*size, kTopLevel_Precedence);
+ this->write("]");
+ }
+ if (var.fValue) {
+ this->write(" = ");
+ this->writeExpression(*var.fValue, kTopLevel_Precedence);
+ }
+ }
+ this->write(";");
+}
+
+void GLSLCodeGenerator::writeStatement(const Statement& s) {
+ switch (s.fKind) {
+ case Statement::kBlock_Kind:
+ this->writeBlock((Block&) s);
+ break;
+ case Statement::kExpression_Kind:
+ this->writeExpression(*((ExpressionStatement&) s).fExpression, kTopLevel_Precedence);
+ this->write(";");
+ break;
+ case Statement::kReturn_Kind:
+ this->writeReturnStatement((ReturnStatement&) s);
+ break;
+ case Statement::kVarDeclarations_Kind:
+ this->writeVarDeclarations(*((VarDeclarationsStatement&) s).fDeclaration);
+ break;
+ case Statement::kIf_Kind:
+ this->writeIfStatement((IfStatement&) s);
+ break;
+ case Statement::kFor_Kind:
+ this->writeForStatement((ForStatement&) s);
+ break;
+ case Statement::kWhile_Kind:
+ this->writeWhileStatement((WhileStatement&) s);
+ break;
+ case Statement::kDo_Kind:
+ this->writeDoStatement((DoStatement&) s);
+ break;
+ case Statement::kBreak_Kind:
+ this->write("break;");
+ break;
+ case Statement::kContinue_Kind:
+ this->write("continue;");
+ break;
+ case Statement::kDiscard_Kind:
+ this->write("discard;");
+ break;
+ default:
+ ABORT("unsupported statement: %s", s.description().c_str());
+ }
+}
+
+void GLSLCodeGenerator::writeBlock(const Block& b) {
+ this->writeLine("{");
+ fIndentation++;
+ for (const auto& s : b.fStatements) {
+ this->writeStatement(*s);
+ this->writeLine();
+ }
+ fIndentation--;
+ this->write("}");
+}
+
+void GLSLCodeGenerator::writeIfStatement(const IfStatement& stmt) {
+ this->write("if (");
+ this->writeExpression(*stmt.fTest, kTopLevel_Precedence);
+ this->write(") ");
+ this->writeStatement(*stmt.fIfTrue);
+ if (stmt.fIfFalse) {
+ this->write(" else ");
+ this->writeStatement(*stmt.fIfFalse);
+ }
+}
+
+void GLSLCodeGenerator::writeForStatement(const ForStatement& f) {
+ this->write("for (");
+ if (f.fInitializer) {
+ this->writeStatement(*f.fInitializer);
+ } else {
+ this->write("; ");
+ }
+ if (f.fTest) {
+ this->writeExpression(*f.fTest, kTopLevel_Precedence);
+ }
+ this->write("; ");
+ if (f.fNext) {
+ this->writeExpression(*f.fNext, kTopLevel_Precedence);
+ }
+ this->write(") ");
+ this->writeStatement(*f.fStatement);
+}
+
+void GLSLCodeGenerator::writeWhileStatement(const WhileStatement& w) {
+ this->write("while (");
+ this->writeExpression(*w.fTest, kTopLevel_Precedence);
+ this->write(") ");
+ this->writeStatement(*w.fStatement);
+}
+
+void GLSLCodeGenerator::writeDoStatement(const DoStatement& d) {
+ this->write("do ");
+ this->writeStatement(*d.fStatement);
+ this->write(" while (");
+ this->writeExpression(*d.fTest, kTopLevel_Precedence);
+ this->write(");");
+}
+
+void GLSLCodeGenerator::writeReturnStatement(const ReturnStatement& r) {
+ this->write("return");
+ if (r.fExpression) {
+ this->write(" ");
+ this->writeExpression(*r.fExpression, kTopLevel_Precedence);
+ }
+ this->write(";");
+}
+
+void GLSLCodeGenerator::generateCode(const Program& program, std::ostream& out) {
+ ASSERT(fOut == nullptr);
+ fOut = &out;
+ this->write("#version " + to_string(fCaps.fVersion));
+ if (fCaps.fStandard == GLCaps::kGLES_Standard) {
+ this->write(" es");
+ }
+ this->writeLine();
+ for (const auto& e : program.fElements) {
+ switch (e->fKind) {
+ case ProgramElement::kExtension_Kind:
+ this->writeExtension((Extension&) *e);
+ break;
+ case ProgramElement::kVar_Kind: {
+ VarDeclarations& decl = (VarDeclarations&) *e;
+ if (decl.fVars.size() > 0 &&
+ decl.fVars[0].fVar->fModifiers.fLayout.fBuiltin == -1) {
+ this->writeVarDeclarations(decl);
+ this->writeLine();
+ }
+ break;
+ }
+ case ProgramElement::kInterfaceBlock_Kind:
+ this->writeInterfaceBlock((InterfaceBlock&) *e);
+ break;
+ case ProgramElement::kFunction_Kind:
+ this->writeFunction((FunctionDefinition&) *e);
+ break;
+ default:
+ printf("%s\n", e->description().c_str());
+ ABORT("unsupported program element");
+ }
+ }
+ fOut = nullptr;
+}
+
+}
diff --git a/gfx/skia/skia/src/sksl/SkSLGLSLCodeGenerator.h b/gfx/skia/skia/src/sksl/SkSLGLSLCodeGenerator.h
new file mode 100644
index 000000000..3534affcc
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLGLSLCodeGenerator.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_GLSLCODEGENERATOR
+#define SKSL_GLSLCODEGENERATOR
+
+#include <stack>
+#include <tuple>
+#include <unordered_map>
+
+#include "SkSLCodeGenerator.h"
+#include "ir/SkSLBinaryExpression.h"
+#include "ir/SkSLBoolLiteral.h"
+#include "ir/SkSLConstructor.h"
+#include "ir/SkSLDoStatement.h"
+#include "ir/SkSLExtension.h"
+#include "ir/SkSLFloatLiteral.h"
+#include "ir/SkSLIfStatement.h"
+#include "ir/SkSLIndexExpression.h"
+#include "ir/SkSLInterfaceBlock.h"
+#include "ir/SkSLIntLiteral.h"
+#include "ir/SkSLFieldAccess.h"
+#include "ir/SkSLForStatement.h"
+#include "ir/SkSLFunctionCall.h"
+#include "ir/SkSLFunctionDeclaration.h"
+#include "ir/SkSLFunctionDefinition.h"
+#include "ir/SkSLPrefixExpression.h"
+#include "ir/SkSLPostfixExpression.h"
+#include "ir/SkSLProgramElement.h"
+#include "ir/SkSLReturnStatement.h"
+#include "ir/SkSLStatement.h"
+#include "ir/SkSLSwizzle.h"
+#include "ir/SkSLTernaryExpression.h"
+#include "ir/SkSLVarDeclaration.h"
+#include "ir/SkSLVarDeclarationStatement.h"
+#include "ir/SkSLVariableReference.h"
+#include "ir/SkSLWhileStatement.h"
+
+namespace SkSL {
+
+#define kLast_Capability SpvCapabilityMultiViewport
+
+struct GLCaps {
+ int fVersion;
+ enum {
+ kGL_Standard,
+ kGLES_Standard
+ } fStandard;
+};
+
+/**
+ * Converts a Program into GLSL code.
+ */
+class GLSLCodeGenerator : public CodeGenerator {
+public:
+ enum Precedence {
+ kParentheses_Precedence = 1,
+ kPostfix_Precedence = 2,
+ kPrefix_Precedence = 3,
+ kMultiplicative_Precedence = 4,
+ kAdditive_Precedence = 5,
+ kShift_Precedence = 6,
+ kRelational_Precedence = 7,
+ kEquality_Precedence = 8,
+ kBitwiseAnd_Precedence = 9,
+ kBitwiseXor_Precedence = 10,
+ kBitwiseOr_Precedence = 11,
+ kLogicalAnd_Precedence = 12,
+ kLogicalXor_Precedence = 13,
+ kLogicalOr_Precedence = 14,
+ kTernary_Precedence = 15,
+ kAssignment_Precedence = 16,
+ kSequence_Precedence = 17,
+ kTopLevel_Precedence = 18
+ };
+
+ GLSLCodeGenerator(const Context* context, GLCaps caps)
+ : fContext(*context)
+ , fCaps(caps)
+ , fIndentation(0)
+ , fAtLineStart(true) {}
+
+ void generateCode(const Program& program, std::ostream& out) override;
+
+private:
+ void write(const char* s);
+
+ void writeLine();
+
+ void writeLine(const char* s);
+
+ void write(const std::string& s);
+
+ void writeLine(const std::string& s);
+
+ void writeType(const Type& type);
+
+ void writeExtension(const Extension& ext);
+
+ void writeInterfaceBlock(const InterfaceBlock& intf);
+
+ void writeFunctionStart(const FunctionDeclaration& f);
+
+ void writeFunctionDeclaration(const FunctionDeclaration& f);
+
+ void writeFunction(const FunctionDefinition& f);
+
+ void writeLayout(const Layout& layout);
+
+ void writeModifiers(const Modifiers& modifiers);
+
+ void writeGlobalVars(const VarDeclaration& vs);
+
+ void writeVarDeclarations(const VarDeclarations& decl);
+
+ void writeVariableReference(const VariableReference& ref);
+
+ void writeExpression(const Expression& expr, Precedence parentPrecedence);
+
+ void writeIntrinsicCall(const FunctionCall& c);
+
+ void writeFunctionCall(const FunctionCall& c);
+
+ void writeConstructor(const Constructor& c);
+
+ void writeFieldAccess(const FieldAccess& f);
+
+ void writeSwizzle(const Swizzle& swizzle);
+
+ void writeBinaryExpression(const BinaryExpression& b, Precedence parentPrecedence);
+
+ void writeTernaryExpression(const TernaryExpression& t, Precedence parentPrecedence);
+
+ void writeIndexExpression(const IndexExpression& expr);
+
+ void writePrefixExpression(const PrefixExpression& p, Precedence parentPrecedence);
+
+ void writePostfixExpression(const PostfixExpression& p, Precedence parentPrecedence);
+
+ void writeBoolLiteral(const BoolLiteral& b);
+
+ void writeIntLiteral(const IntLiteral& i);
+
+ void writeFloatLiteral(const FloatLiteral& f);
+
+ void writeStatement(const Statement& s);
+
+ void writeBlock(const Block& b);
+
+ void writeIfStatement(const IfStatement& stmt);
+
+ void writeForStatement(const ForStatement& f);
+
+ void writeWhileStatement(const WhileStatement& w);
+
+ void writeDoStatement(const DoStatement& d);
+
+ void writeReturnStatement(const ReturnStatement& r);
+
+ const Context& fContext;
+ const GLCaps fCaps;
+ std::ostream* fOut;
+ int fIndentation;
+ bool fAtLineStart;
+ // Keeps track of which struct types we have written. Given that we are unlikely to ever write
+ // more than one or two structs per shader, a simple linear search will be faster than anything
+ // fancier.
+ std::vector<const Type*> fWrittenStructs;
+};
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLIRGenerator.cpp b/gfx/skia/skia/src/sksl/SkSLIRGenerator.cpp
new file mode 100644
index 000000000..c30cac17d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLIRGenerator.cpp
@@ -0,0 +1,1260 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkSLIRGenerator.h"
+
+#include "limits.h"
+
+#include "ast/SkSLASTBoolLiteral.h"
+#include "ast/SkSLASTFieldSuffix.h"
+#include "ast/SkSLASTFloatLiteral.h"
+#include "ast/SkSLASTIndexSuffix.h"
+#include "ast/SkSLASTIntLiteral.h"
+#include "ir/SkSLBinaryExpression.h"
+#include "ir/SkSLBoolLiteral.h"
+#include "ir/SkSLBreakStatement.h"
+#include "ir/SkSLConstructor.h"
+#include "ir/SkSLContinueStatement.h"
+#include "ir/SkSLDiscardStatement.h"
+#include "ir/SkSLDoStatement.h"
+#include "ir/SkSLExpressionStatement.h"
+#include "ir/SkSLField.h"
+#include "ir/SkSLFieldAccess.h"
+#include "ir/SkSLFloatLiteral.h"
+#include "ir/SkSLForStatement.h"
+#include "ir/SkSLFunctionCall.h"
+#include "ir/SkSLFunctionDeclaration.h"
+#include "ir/SkSLFunctionDefinition.h"
+#include "ir/SkSLFunctionReference.h"
+#include "ir/SkSLIfStatement.h"
+#include "ir/SkSLIndexExpression.h"
+#include "ir/SkSLInterfaceBlock.h"
+#include "ir/SkSLIntLiteral.h"
+#include "ir/SkSLLayout.h"
+#include "ir/SkSLPostfixExpression.h"
+#include "ir/SkSLPrefixExpression.h"
+#include "ir/SkSLReturnStatement.h"
+#include "ir/SkSLSwizzle.h"
+#include "ir/SkSLTernaryExpression.h"
+#include "ir/SkSLUnresolvedFunction.h"
+#include "ir/SkSLVariable.h"
+#include "ir/SkSLVarDeclaration.h"
+#include "ir/SkSLVarDeclarationStatement.h"
+#include "ir/SkSLVariableReference.h"
+#include "ir/SkSLWhileStatement.h"
+
+namespace SkSL {
+
+class AutoSymbolTable {
+public:
+ AutoSymbolTable(IRGenerator* ir)
+ : fIR(ir)
+ , fPrevious(fIR->fSymbolTable) {
+ fIR->pushSymbolTable();
+ }
+
+ ~AutoSymbolTable() {
+ fIR->popSymbolTable();
+ ASSERT(fPrevious == fIR->fSymbolTable);
+ }
+
+ IRGenerator* fIR;
+ std::shared_ptr<SymbolTable> fPrevious;
+};
+
+IRGenerator::IRGenerator(const Context* context, std::shared_ptr<SymbolTable> symbolTable,
+ ErrorReporter& errorReporter)
+: fContext(*context)
+, fCurrentFunction(nullptr)
+, fSymbolTable(std::move(symbolTable))
+, fErrors(errorReporter) {}
+
+void IRGenerator::pushSymbolTable() {
+ fSymbolTable.reset(new SymbolTable(std::move(fSymbolTable), fErrors));
+}
+
+void IRGenerator::popSymbolTable() {
+ fSymbolTable = fSymbolTable->fParent;
+}
+
+std::unique_ptr<Extension> IRGenerator::convertExtension(const ASTExtension& extension) {
+ return std::unique_ptr<Extension>(new Extension(extension.fPosition, extension.fName));
+}
+
+std::unique_ptr<Statement> IRGenerator::convertStatement(const ASTStatement& statement) {
+ switch (statement.fKind) {
+ case ASTStatement::kBlock_Kind:
+ return this->convertBlock((ASTBlock&) statement);
+ case ASTStatement::kVarDeclaration_Kind:
+ return this->convertVarDeclarationStatement((ASTVarDeclarationStatement&) statement);
+ case ASTStatement::kExpression_Kind:
+ return this->convertExpressionStatement((ASTExpressionStatement&) statement);
+ case ASTStatement::kIf_Kind:
+ return this->convertIf((ASTIfStatement&) statement);
+ case ASTStatement::kFor_Kind:
+ return this->convertFor((ASTForStatement&) statement);
+ case ASTStatement::kWhile_Kind:
+ return this->convertWhile((ASTWhileStatement&) statement);
+ case ASTStatement::kDo_Kind:
+ return this->convertDo((ASTDoStatement&) statement);
+ case ASTStatement::kReturn_Kind:
+ return this->convertReturn((ASTReturnStatement&) statement);
+ case ASTStatement::kBreak_Kind:
+ return this->convertBreak((ASTBreakStatement&) statement);
+ case ASTStatement::kContinue_Kind:
+ return this->convertContinue((ASTContinueStatement&) statement);
+ case ASTStatement::kDiscard_Kind:
+ return this->convertDiscard((ASTDiscardStatement&) statement);
+ default:
+ ABORT("unsupported statement type: %d\n", statement.fKind);
+ }
+}
+
+std::unique_ptr<Block> IRGenerator::convertBlock(const ASTBlock& block) {
+ AutoSymbolTable table(this);
+ std::vector<std::unique_ptr<Statement>> statements;
+ for (size_t i = 0; i < block.fStatements.size(); i++) {
+ std::unique_ptr<Statement> statement = this->convertStatement(*block.fStatements[i]);
+ if (!statement) {
+ return nullptr;
+ }
+ statements.push_back(std::move(statement));
+ }
+ return std::unique_ptr<Block>(new Block(block.fPosition, std::move(statements), fSymbolTable));
+}
+
+std::unique_ptr<Statement> IRGenerator::convertVarDeclarationStatement(
+ const ASTVarDeclarationStatement& s) {
+ auto decl = this->convertVarDeclarations(*s.fDeclarations, Variable::kLocal_Storage);
+ if (!decl) {
+ return nullptr;
+ }
+ return std::unique_ptr<Statement>(new VarDeclarationsStatement(std::move(decl)));
+}
+
+Modifiers IRGenerator::convertModifiers(const ASTModifiers& modifiers) {
+ return Modifiers(modifiers);
+}
+
+std::unique_ptr<VarDeclarations> IRGenerator::convertVarDeclarations(const ASTVarDeclarations& decl,
+ Variable::Storage storage) {
+ std::vector<VarDeclaration> variables;
+ const Type* baseType = this->convertType(*decl.fType);
+ if (!baseType) {
+ return nullptr;
+ }
+ for (const auto& varDecl : decl.fVars) {
+ Modifiers modifiers = this->convertModifiers(decl.fModifiers);
+ const Type* type = baseType;
+ ASSERT(type->kind() != Type::kArray_Kind);
+ std::vector<std::unique_ptr<Expression>> sizes;
+ for (const auto& rawSize : varDecl.fSizes) {
+ if (rawSize) {
+ auto size = this->coerce(this->convertExpression(*rawSize), *fContext.fInt_Type);
+ if (!size) {
+ return nullptr;
+ }
+ std::string name = type->fName;
+ uint64_t count;
+ if (size->fKind == Expression::kIntLiteral_Kind) {
+ count = ((IntLiteral&) *size).fValue;
+ if (count <= 0) {
+ fErrors.error(size->fPosition, "array size must be positive");
+ }
+ name += "[" + to_string(count) + "]";
+ } else {
+ count = -1;
+ name += "[]";
+ }
+ type = new Type(name, Type::kArray_Kind, *type, (int) count);
+ fSymbolTable->takeOwnership((Type*) type);
+ sizes.push_back(std::move(size));
+ } else {
+ type = new Type(type->fName + "[]", Type::kArray_Kind, *type, -1);
+ fSymbolTable->takeOwnership((Type*) type);
+ sizes.push_back(nullptr);
+ }
+ }
+ auto var = std::unique_ptr<Variable>(new Variable(decl.fPosition, modifiers, varDecl.fName,
+ *type, storage));
+ std::unique_ptr<Expression> value;
+ if (varDecl.fValue) {
+ value = this->convertExpression(*varDecl.fValue);
+ if (!value) {
+ return nullptr;
+ }
+ value = this->coerce(std::move(value), *type);
+ }
+ if ("gl_FragCoord" == varDecl.fName && (*fSymbolTable)[varDecl.fName]) {
+ // already defined, just update the modifiers
+ Variable* old = (Variable*) (*fSymbolTable)[varDecl.fName];
+ old->fModifiers = var->fModifiers;
+ } else {
+ variables.emplace_back(var.get(), std::move(sizes), std::move(value));
+ fSymbolTable->add(varDecl.fName, std::move(var));
+ }
+ }
+ return std::unique_ptr<VarDeclarations>(new VarDeclarations(decl.fPosition,
+ baseType,
+ std::move(variables)));
+}
+
+std::unique_ptr<Statement> IRGenerator::convertIf(const ASTIfStatement& s) {
+ std::unique_ptr<Expression> test = this->coerce(this->convertExpression(*s.fTest),
+ *fContext.fBool_Type);
+ if (!test) {
+ return nullptr;
+ }
+ std::unique_ptr<Statement> ifTrue = this->convertStatement(*s.fIfTrue);
+ if (!ifTrue) {
+ return nullptr;
+ }
+ std::unique_ptr<Statement> ifFalse;
+ if (s.fIfFalse) {
+ ifFalse = this->convertStatement(*s.fIfFalse);
+ if (!ifFalse) {
+ return nullptr;
+ }
+ }
+ return std::unique_ptr<Statement>(new IfStatement(s.fPosition, std::move(test),
+ std::move(ifTrue), std::move(ifFalse)));
+}
+
+std::unique_ptr<Statement> IRGenerator::convertFor(const ASTForStatement& f) {
+ AutoSymbolTable table(this);
+ std::unique_ptr<Statement> initializer = this->convertStatement(*f.fInitializer);
+ if (!initializer) {
+ return nullptr;
+ }
+ std::unique_ptr<Expression> test = this->coerce(this->convertExpression(*f.fTest),
+ *fContext.fBool_Type);
+ if (!test) {
+ return nullptr;
+ }
+ std::unique_ptr<Expression> next = this->convertExpression(*f.fNext);
+ if (!next) {
+ return nullptr;
+ }
+ this->checkValid(*next);
+ std::unique_ptr<Statement> statement = this->convertStatement(*f.fStatement);
+ if (!statement) {
+ return nullptr;
+ }
+ return std::unique_ptr<Statement>(new ForStatement(f.fPosition, std::move(initializer),
+ std::move(test), std::move(next),
+ std::move(statement), fSymbolTable));
+}
+
+std::unique_ptr<Statement> IRGenerator::convertWhile(const ASTWhileStatement& w) {
+ std::unique_ptr<Expression> test = this->coerce(this->convertExpression(*w.fTest),
+ *fContext.fBool_Type);
+ if (!test) {
+ return nullptr;
+ }
+ std::unique_ptr<Statement> statement = this->convertStatement(*w.fStatement);
+ if (!statement) {
+ return nullptr;
+ }
+ return std::unique_ptr<Statement>(new WhileStatement(w.fPosition, std::move(test),
+ std::move(statement)));
+}
+
+std::unique_ptr<Statement> IRGenerator::convertDo(const ASTDoStatement& d) {
+ std::unique_ptr<Expression> test = this->coerce(this->convertExpression(*d.fTest),
+ *fContext.fBool_Type);
+ if (!test) {
+ return nullptr;
+ }
+ std::unique_ptr<Statement> statement = this->convertStatement(*d.fStatement);
+ if (!statement) {
+ return nullptr;
+ }
+ return std::unique_ptr<Statement>(new DoStatement(d.fPosition, std::move(statement),
+ std::move(test)));
+}
+
+std::unique_ptr<Statement> IRGenerator::convertExpressionStatement(
+ const ASTExpressionStatement& s) {
+ std::unique_ptr<Expression> e = this->convertExpression(*s.fExpression);
+ if (!e) {
+ return nullptr;
+ }
+ this->checkValid(*e);
+ return std::unique_ptr<Statement>(new ExpressionStatement(std::move(e)));
+}
+
+std::unique_ptr<Statement> IRGenerator::convertReturn(const ASTReturnStatement& r) {
+ ASSERT(fCurrentFunction);
+ if (r.fExpression) {
+ std::unique_ptr<Expression> result = this->convertExpression(*r.fExpression);
+ if (!result) {
+ return nullptr;
+ }
+ if (fCurrentFunction->fReturnType == *fContext.fVoid_Type) {
+ fErrors.error(result->fPosition, "may not return a value from a void function");
+ } else {
+ result = this->coerce(std::move(result), fCurrentFunction->fReturnType);
+ if (!result) {
+ return nullptr;
+ }
+ }
+ return std::unique_ptr<Statement>(new ReturnStatement(std::move(result)));
+ } else {
+ if (fCurrentFunction->fReturnType != *fContext.fVoid_Type) {
+ fErrors.error(r.fPosition, "expected function to return '" +
+ fCurrentFunction->fReturnType.description() + "'");
+ }
+ return std::unique_ptr<Statement>(new ReturnStatement(r.fPosition));
+ }
+}
+
+std::unique_ptr<Statement> IRGenerator::convertBreak(const ASTBreakStatement& b) {
+ return std::unique_ptr<Statement>(new BreakStatement(b.fPosition));
+}
+
+std::unique_ptr<Statement> IRGenerator::convertContinue(const ASTContinueStatement& c) {
+ return std::unique_ptr<Statement>(new ContinueStatement(c.fPosition));
+}
+
+std::unique_ptr<Statement> IRGenerator::convertDiscard(const ASTDiscardStatement& d) {
+ return std::unique_ptr<Statement>(new DiscardStatement(d.fPosition));
+}
+
+static const Type& expand_generics(const Type& type, int i) {
+ if (type.kind() == Type::kGeneric_Kind) {
+ return *type.coercibleTypes()[i];
+ }
+ return type;
+}
+
+static void expand_generics(const FunctionDeclaration& decl,
+ std::shared_ptr<SymbolTable> symbolTable) {
+ for (int i = 0; i < 4; i++) {
+ const Type& returnType = expand_generics(decl.fReturnType, i);
+ std::vector<const Variable*> parameters;
+ for (const auto& p : decl.fParameters) {
+ Variable* var = new Variable(p->fPosition, Modifiers(p->fModifiers), p->fName,
+ expand_generics(p->fType, i),
+ Variable::kParameter_Storage);
+ symbolTable->takeOwnership(var);
+ parameters.push_back(var);
+ }
+ symbolTable->add(decl.fName, std::unique_ptr<FunctionDeclaration>(new FunctionDeclaration(
+ decl.fPosition,
+ decl.fName,
+ std::move(parameters),
+ std::move(returnType))));
+ }
+}
+
+std::unique_ptr<FunctionDefinition> IRGenerator::convertFunction(const ASTFunction& f) {
+ bool isGeneric;
+ const Type* returnType = this->convertType(*f.fReturnType);
+ if (!returnType) {
+ return nullptr;
+ }
+ isGeneric = returnType->kind() == Type::kGeneric_Kind;
+ std::vector<const Variable*> parameters;
+ for (const auto& param : f.fParameters) {
+ const Type* type = this->convertType(*param->fType);
+ if (!type) {
+ return nullptr;
+ }
+ for (int j = (int) param->fSizes.size() - 1; j >= 0; j--) {
+ int size = param->fSizes[j];
+ std::string name = type->name() + "[" + to_string(size) + "]";
+ Type* newType = new Type(std::move(name), Type::kArray_Kind, *type, size);
+ fSymbolTable->takeOwnership(newType);
+ type = newType;
+ }
+ std::string name = param->fName;
+ Modifiers modifiers = this->convertModifiers(param->fModifiers);
+ Position pos = param->fPosition;
+ Variable* var = new Variable(pos, modifiers, std::move(name), *type,
+ Variable::kParameter_Storage);
+ fSymbolTable->takeOwnership(var);
+ parameters.push_back(var);
+ isGeneric |= type->kind() == Type::kGeneric_Kind;
+ }
+
+ // find existing declaration
+ const FunctionDeclaration* decl = nullptr;
+ auto entry = (*fSymbolTable)[f.fName];
+ if (entry) {
+ std::vector<const FunctionDeclaration*> functions;
+ switch (entry->fKind) {
+ case Symbol::kUnresolvedFunction_Kind:
+ functions = ((UnresolvedFunction*) entry)->fFunctions;
+ break;
+ case Symbol::kFunctionDeclaration_Kind:
+ functions.push_back((FunctionDeclaration*) entry);
+ break;
+ default:
+ fErrors.error(f.fPosition, "symbol '" + f.fName + "' was already defined");
+ return nullptr;
+ }
+ for (const auto& other : functions) {
+ ASSERT(other->fName == f.fName);
+ if (parameters.size() == other->fParameters.size()) {
+ bool match = true;
+ for (size_t i = 0; i < parameters.size(); i++) {
+ if (parameters[i]->fType != other->fParameters[i]->fType) {
+ match = false;
+ break;
+ }
+ }
+ if (match) {
+ if (*returnType != other->fReturnType) {
+ FunctionDeclaration newDecl(f.fPosition, f.fName, parameters, *returnType);
+ fErrors.error(f.fPosition, "functions '" + newDecl.description() +
+ "' and '" + other->description() +
+ "' differ only in return type");
+ return nullptr;
+ }
+ decl = other;
+ for (size_t i = 0; i < parameters.size(); i++) {
+ if (parameters[i]->fModifiers != other->fParameters[i]->fModifiers) {
+ fErrors.error(f.fPosition, "modifiers on parameter " +
+ to_string(i + 1) + " differ between " +
+ "declaration and definition");
+ return nullptr;
+ }
+ }
+ if (other->fDefined) {
+ fErrors.error(f.fPosition, "duplicate definition of " +
+ other->description());
+ }
+ break;
+ }
+ }
+ }
+ }
+ if (!decl) {
+ // couldn't find an existing declaration
+ if (isGeneric) {
+ ASSERT(!f.fBody);
+ expand_generics(FunctionDeclaration(f.fPosition, f.fName, parameters, *returnType),
+ fSymbolTable);
+ } else {
+ auto newDecl = std::unique_ptr<FunctionDeclaration>(new FunctionDeclaration(
+ f.fPosition,
+ f.fName,
+ parameters,
+ *returnType));
+ decl = newDecl.get();
+ fSymbolTable->add(decl->fName, std::move(newDecl));
+ }
+ }
+ if (f.fBody) {
+ ASSERT(!fCurrentFunction);
+ fCurrentFunction = decl;
+ decl->fDefined = true;
+ std::shared_ptr<SymbolTable> old = fSymbolTable;
+ AutoSymbolTable table(this);
+ for (size_t i = 0; i < parameters.size(); i++) {
+ fSymbolTable->addWithoutOwnership(parameters[i]->fName, decl->fParameters[i]);
+ }
+ std::unique_ptr<Block> body = this->convertBlock(*f.fBody);
+ fCurrentFunction = nullptr;
+ if (!body) {
+ return nullptr;
+ }
+ return std::unique_ptr<FunctionDefinition>(new FunctionDefinition(f.fPosition, *decl,
+ std::move(body)));
+ }
+ return nullptr;
+}
+
+std::unique_ptr<InterfaceBlock> IRGenerator::convertInterfaceBlock(const ASTInterfaceBlock& intf) {
+ std::shared_ptr<SymbolTable> old = fSymbolTable;
+ AutoSymbolTable table(this);
+ Modifiers mods = this->convertModifiers(intf.fModifiers);
+ std::vector<Type::Field> fields;
+ for (size_t i = 0; i < intf.fDeclarations.size(); i++) {
+ std::unique_ptr<VarDeclarations> decl = this->convertVarDeclarations(
+ *intf.fDeclarations[i],
+ Variable::kGlobal_Storage);
+ for (const auto& var : decl->fVars) {
+ fields.push_back(Type::Field(var.fVar->fModifiers, var.fVar->fName,
+ &var.fVar->fType));
+ if (var.fValue) {
+ fErrors.error(decl->fPosition,
+ "initializers are not permitted on interface block fields");
+ }
+ if (var.fVar->fModifiers.fFlags & (Modifiers::kIn_Flag |
+ Modifiers::kOut_Flag |
+ Modifiers::kUniform_Flag |
+ Modifiers::kConst_Flag)) {
+ fErrors.error(decl->fPosition,
+ "interface block fields may not have storage qualifiers");
+ }
+ }
+ }
+ Type* type = new Type(intf.fInterfaceName, fields);
+ fSymbolTable->takeOwnership(type);
+ std::string name = intf.fValueName.length() > 0 ? intf.fValueName : intf.fInterfaceName;
+ Variable* var = new Variable(intf.fPosition, mods, name, *type, Variable::kGlobal_Storage);
+ fSymbolTable->takeOwnership(var);
+ if (intf.fValueName.length()) {
+ old->addWithoutOwnership(intf.fValueName, var);
+ } else {
+ for (size_t i = 0; i < fields.size(); i++) {
+ old->add(fields[i].fName, std::unique_ptr<Field>(new Field(intf.fPosition, *var,
+ (int) i)));
+ }
+ }
+ return std::unique_ptr<InterfaceBlock>(new InterfaceBlock(intf.fPosition, *var, fSymbolTable));
+}
+
+const Type* IRGenerator::convertType(const ASTType& type) {
+ const Symbol* result = (*fSymbolTable)[type.fName];
+ if (result && result->fKind == Symbol::kType_Kind) {
+ return (const Type*) result;
+ }
+ fErrors.error(type.fPosition, "unknown type '" + type.fName + "'");
+ return nullptr;
+}
+
+std::unique_ptr<Expression> IRGenerator::convertExpression(const ASTExpression& expr) {
+ switch (expr.fKind) {
+ case ASTExpression::kIdentifier_Kind:
+ return this->convertIdentifier((ASTIdentifier&) expr);
+ case ASTExpression::kBool_Kind:
+ return std::unique_ptr<Expression>(new BoolLiteral(fContext, expr.fPosition,
+ ((ASTBoolLiteral&) expr).fValue));
+ case ASTExpression::kInt_Kind:
+ return std::unique_ptr<Expression>(new IntLiteral(fContext, expr.fPosition,
+ ((ASTIntLiteral&) expr).fValue));
+ case ASTExpression::kFloat_Kind:
+ return std::unique_ptr<Expression>(new FloatLiteral(fContext, expr.fPosition,
+ ((ASTFloatLiteral&) expr).fValue));
+ case ASTExpression::kBinary_Kind:
+ return this->convertBinaryExpression((ASTBinaryExpression&) expr);
+ case ASTExpression::kPrefix_Kind:
+ return this->convertPrefixExpression((ASTPrefixExpression&) expr);
+ case ASTExpression::kSuffix_Kind:
+ return this->convertSuffixExpression((ASTSuffixExpression&) expr);
+ case ASTExpression::kTernary_Kind:
+ return this->convertTernaryExpression((ASTTernaryExpression&) expr);
+ default:
+ ABORT("unsupported expression type: %d\n", expr.fKind);
+ }
+}
+
+std::unique_ptr<Expression> IRGenerator::convertIdentifier(const ASTIdentifier& identifier) {
+ const Symbol* result = (*fSymbolTable)[identifier.fText];
+ if (!result) {
+ fErrors.error(identifier.fPosition, "unknown identifier '" + identifier.fText + "'");
+ return nullptr;
+ }
+ switch (result->fKind) {
+ case Symbol::kFunctionDeclaration_Kind: {
+ std::vector<const FunctionDeclaration*> f = {
+ (const FunctionDeclaration*) result
+ };
+ return std::unique_ptr<FunctionReference>(new FunctionReference(fContext,
+ identifier.fPosition,
+ f));
+ }
+ case Symbol::kUnresolvedFunction_Kind: {
+ const UnresolvedFunction* f = (const UnresolvedFunction*) result;
+ return std::unique_ptr<FunctionReference>(new FunctionReference(fContext,
+ identifier.fPosition,
+ f->fFunctions));
+ }
+ case Symbol::kVariable_Kind: {
+ const Variable* var = (const Variable*) result;
+ this->markReadFrom(*var);
+ return std::unique_ptr<VariableReference>(new VariableReference(identifier.fPosition,
+ *var));
+ }
+ case Symbol::kField_Kind: {
+ const Field* field = (const Field*) result;
+ VariableReference* base = new VariableReference(identifier.fPosition, field->fOwner);
+ return std::unique_ptr<Expression>(new FieldAccess(
+ std::unique_ptr<Expression>(base),
+ field->fFieldIndex,
+ FieldAccess::kAnonymousInterfaceBlock_OwnerKind));
+ }
+ case Symbol::kType_Kind: {
+ const Type* t = (const Type*) result;
+ return std::unique_ptr<TypeReference>(new TypeReference(fContext, identifier.fPosition,
+ *t));
+ }
+ default:
+ ABORT("unsupported symbol type %d\n", result->fKind);
+ }
+
+}
+
+std::unique_ptr<Expression> IRGenerator::coerce(std::unique_ptr<Expression> expr,
+ const Type& type) {
+ if (!expr) {
+ return nullptr;
+ }
+ if (expr->fType == type) {
+ return expr;
+ }
+ this->checkValid(*expr);
+ if (expr->fType == *fContext.fInvalid_Type) {
+ return nullptr;
+ }
+ if (!expr->fType.canCoerceTo(type)) {
+ fErrors.error(expr->fPosition, "expected '" + type.description() + "', but found '" +
+ expr->fType.description() + "'");
+ return nullptr;
+ }
+ if (type.kind() == Type::kScalar_Kind) {
+ std::vector<std::unique_ptr<Expression>> args;
+ args.push_back(std::move(expr));
+ ASTIdentifier id(Position(), type.description());
+ std::unique_ptr<Expression> ctor = this->convertIdentifier(id);
+ ASSERT(ctor);
+ return this->call(Position(), std::move(ctor), std::move(args));
+ }
+ ABORT("cannot coerce %s to %s", expr->fType.description().c_str(),
+ type.description().c_str());
+}
+
+static bool is_matrix_multiply(const Type& left, const Type& right) {
+ if (left.kind() == Type::kMatrix_Kind) {
+ return right.kind() == Type::kMatrix_Kind || right.kind() == Type::kVector_Kind;
+ }
+ return left.kind() == Type::kVector_Kind && right.kind() == Type::kMatrix_Kind;
+}
+/**
+ * Determines the operand and result types of a binary expression. Returns true if the expression is
+ * legal, false otherwise. If false, the values of the out parameters are undefined.
+ */
+static bool determine_binary_type(const Context& context,
+ Token::Kind op,
+ const Type& left,
+ const Type& right,
+ const Type** outLeftType,
+ const Type** outRightType,
+ const Type** outResultType,
+ bool tryFlipped) {
+ bool isLogical;
+ switch (op) {
+ case Token::EQEQ: // fall through
+ case Token::NEQ: // fall through
+ case Token::LT: // fall through
+ case Token::GT: // fall through
+ case Token::LTEQ: // fall through
+ case Token::GTEQ:
+ isLogical = true;
+ break;
+ case Token::LOGICALOR: // fall through
+ case Token::LOGICALAND: // fall through
+ case Token::LOGICALXOR: // fall through
+ case Token::LOGICALOREQ: // fall through
+ case Token::LOGICALANDEQ: // fall through
+ case Token::LOGICALXOREQ:
+ *outLeftType = context.fBool_Type.get();
+ *outRightType = context.fBool_Type.get();
+ *outResultType = context.fBool_Type.get();
+ return left.canCoerceTo(*context.fBool_Type) &&
+ right.canCoerceTo(*context.fBool_Type);
+ case Token::STAR: // fall through
+ case Token::STAREQ:
+ if (is_matrix_multiply(left, right)) {
+ // determine final component type
+ if (determine_binary_type(context, Token::STAR, left.componentType(),
+ right.componentType(), outLeftType, outRightType,
+ outResultType, false)) {
+ *outLeftType = &(*outResultType)->toCompound(context, left.columns(),
+ left.rows());;
+ *outRightType = &(*outResultType)->toCompound(context, right.columns(),
+ right.rows());;
+ int leftColumns = left.columns();
+ int leftRows = left.rows();
+ int rightColumns;
+ int rightRows;
+ if (right.kind() == Type::kVector_Kind) {
+ // matrix * vector treats the vector as a column vector, so we need to
+ // transpose it
+ rightColumns = right.rows();
+ rightRows = right.columns();
+ ASSERT(rightColumns == 1);
+ } else {
+ rightColumns = right.columns();
+ rightRows = right.rows();
+ }
+ if (rightColumns > 1) {
+ *outResultType = &(*outResultType)->toCompound(context, rightColumns,
+ leftRows);
+ } else {
+ // result was a column vector, transpose it back to a row
+ *outResultType = &(*outResultType)->toCompound(context, leftRows,
+ rightColumns);
+ }
+ return leftColumns == rightRows;
+ } else {
+ return false;
+ }
+ }
+ // fall through
+ default:
+ isLogical = false;
+ }
+ // FIXME: need to disallow illegal operations like vec3 > vec3. Also do not currently have
+ // full support for numbers other than float.
+ if (left == right) {
+ *outLeftType = &left;
+ *outRightType = &left;
+ if (isLogical) {
+ *outResultType = context.fBool_Type.get();
+ } else {
+ *outResultType = &left;
+ }
+ return true;
+ }
+ // FIXME: incorrect for shift operations
+ if (left.canCoerceTo(right)) {
+ *outLeftType = &right;
+ *outRightType = &right;
+ if (isLogical) {
+ *outResultType = context.fBool_Type.get();
+ } else {
+ *outResultType = &right;
+ }
+ return true;
+ }
+ if ((left.kind() == Type::kVector_Kind || left.kind() == Type::kMatrix_Kind) &&
+ (right.kind() == Type::kScalar_Kind)) {
+ if (determine_binary_type(context, op, left.componentType(), right, outLeftType,
+ outRightType, outResultType, false)) {
+ *outLeftType = &(*outLeftType)->toCompound(context, left.columns(), left.rows());
+ if (!isLogical) {
+ *outResultType = &(*outResultType)->toCompound(context, left.columns(),
+ left.rows());
+ }
+ return true;
+ }
+ return false;
+ }
+ if (tryFlipped) {
+ return determine_binary_type(context, op, right, left, outRightType, outLeftType,
+ outResultType, false);
+ }
+ return false;
+}
+
+std::unique_ptr<Expression> IRGenerator::convertBinaryExpression(
+ const ASTBinaryExpression& expression) {
+ std::unique_ptr<Expression> left = this->convertExpression(*expression.fLeft);
+ if (!left) {
+ return nullptr;
+ }
+ std::unique_ptr<Expression> right = this->convertExpression(*expression.fRight);
+ if (!right) {
+ return nullptr;
+ }
+ const Type* leftType;
+ const Type* rightType;
+ const Type* resultType;
+ if (!determine_binary_type(fContext, expression.fOperator, left->fType, right->fType, &leftType,
+ &rightType, &resultType, true)) {
+ fErrors.error(expression.fPosition, "type mismatch: '" +
+ Token::OperatorName(expression.fOperator) +
+ "' cannot operate on '" + left->fType.fName +
+ "', '" + right->fType.fName + "'");
+ return nullptr;
+ }
+ switch (expression.fOperator) {
+ case Token::EQ: // fall through
+ case Token::PLUSEQ: // fall through
+ case Token::MINUSEQ: // fall through
+ case Token::STAREQ: // fall through
+ case Token::SLASHEQ: // fall through
+ case Token::PERCENTEQ: // fall through
+ case Token::SHLEQ: // fall through
+ case Token::SHREQ: // fall through
+ case Token::BITWISEOREQ: // fall through
+ case Token::BITWISEXOREQ: // fall through
+ case Token::BITWISEANDEQ: // fall through
+ case Token::LOGICALOREQ: // fall through
+ case Token::LOGICALXOREQ: // fall through
+ case Token::LOGICALANDEQ:
+ this->markWrittenTo(*left);
+ default:
+ break;
+ }
+ return std::unique_ptr<Expression>(new BinaryExpression(expression.fPosition,
+ this->coerce(std::move(left),
+ *leftType),
+ expression.fOperator,
+ this->coerce(std::move(right),
+ *rightType),
+ *resultType));
+}
+
+std::unique_ptr<Expression> IRGenerator::convertTernaryExpression(
+ const ASTTernaryExpression& expression) {
+ std::unique_ptr<Expression> test = this->coerce(this->convertExpression(*expression.fTest),
+ *fContext.fBool_Type);
+ if (!test) {
+ return nullptr;
+ }
+ std::unique_ptr<Expression> ifTrue = this->convertExpression(*expression.fIfTrue);
+ if (!ifTrue) {
+ return nullptr;
+ }
+ std::unique_ptr<Expression> ifFalse = this->convertExpression(*expression.fIfFalse);
+ if (!ifFalse) {
+ return nullptr;
+ }
+ const Type* trueType;
+ const Type* falseType;
+ const Type* resultType;
+ if (!determine_binary_type(fContext, Token::EQEQ, ifTrue->fType, ifFalse->fType, &trueType,
+ &falseType, &resultType, true)) {
+ fErrors.error(expression.fPosition, "ternary operator result mismatch: '" +
+ ifTrue->fType.fName + "', '" +
+ ifFalse->fType.fName + "'");
+ return nullptr;
+ }
+ ASSERT(trueType == falseType);
+ ifTrue = this->coerce(std::move(ifTrue), *trueType);
+ ifFalse = this->coerce(std::move(ifFalse), *falseType);
+ return std::unique_ptr<Expression>(new TernaryExpression(expression.fPosition,
+ std::move(test),
+ std::move(ifTrue),
+ std::move(ifFalse)));
+}
+
+std::unique_ptr<Expression> IRGenerator::call(Position position,
+ const FunctionDeclaration& function,
+ std::vector<std::unique_ptr<Expression>> arguments) {
+ if (function.fParameters.size() != arguments.size()) {
+ std::string msg = "call to '" + function.fName + "' expected " +
+ to_string(function.fParameters.size()) +
+ " argument";
+ if (function.fParameters.size() != 1) {
+ msg += "s";
+ }
+ msg += ", but found " + to_string(arguments.size());
+ fErrors.error(position, msg);
+ return nullptr;
+ }
+ for (size_t i = 0; i < arguments.size(); i++) {
+ arguments[i] = this->coerce(std::move(arguments[i]), function.fParameters[i]->fType);
+ if (arguments[i] && (function.fParameters[i]->fModifiers.fFlags & Modifiers::kOut_Flag)) {
+ this->markWrittenTo(*arguments[i]);
+ }
+ }
+ return std::unique_ptr<FunctionCall>(new FunctionCall(position, function,
+ std::move(arguments)));
+}
+
+/**
+ * Determines the cost of coercing the arguments of a function to the required types. Returns true
+ * if the cost could be computed, false if the call is not valid. Cost has no particular meaning
+ * other than "lower costs are preferred".
+ */
+bool IRGenerator::determineCallCost(const FunctionDeclaration& function,
+ const std::vector<std::unique_ptr<Expression>>& arguments,
+ int* outCost) {
+ if (function.fParameters.size() != arguments.size()) {
+ return false;
+ }
+ int total = 0;
+ for (size_t i = 0; i < arguments.size(); i++) {
+ int cost;
+ if (arguments[i]->fType.determineCoercionCost(function.fParameters[i]->fType, &cost)) {
+ total += cost;
+ } else {
+ return false;
+ }
+ }
+ *outCost = total;
+ return true;
+}
+
+std::unique_ptr<Expression> IRGenerator::call(Position position,
+ std::unique_ptr<Expression> functionValue,
+ std::vector<std::unique_ptr<Expression>> arguments) {
+ if (functionValue->fKind == Expression::kTypeReference_Kind) {
+ return this->convertConstructor(position,
+ ((TypeReference&) *functionValue).fValue,
+ std::move(arguments));
+ }
+ if (functionValue->fKind != Expression::kFunctionReference_Kind) {
+ fErrors.error(position, "'" + functionValue->description() + "' is not a function");
+ return nullptr;
+ }
+ FunctionReference* ref = (FunctionReference*) functionValue.get();
+ int bestCost = INT_MAX;
+ const FunctionDeclaration* best = nullptr;
+ if (ref->fFunctions.size() > 1) {
+ for (const auto& f : ref->fFunctions) {
+ int cost;
+ if (this->determineCallCost(*f, arguments, &cost) && cost < bestCost) {
+ bestCost = cost;
+ best = f;
+ }
+ }
+ if (best) {
+ return this->call(position, *best, std::move(arguments));
+ }
+ std::string msg = "no match for " + ref->fFunctions[0]->fName + "(";
+ std::string separator = "";
+ for (size_t i = 0; i < arguments.size(); i++) {
+ msg += separator;
+ separator = ", ";
+ msg += arguments[i]->fType.description();
+ }
+ msg += ")";
+ fErrors.error(position, msg);
+ return nullptr;
+ }
+ return this->call(position, *ref->fFunctions[0], std::move(arguments));
+}
+
+std::unique_ptr<Expression> IRGenerator::convertConstructor(
+ Position position,
+ const Type& type,
+ std::vector<std::unique_ptr<Expression>> args) {
+ // FIXME: add support for structs and arrays
+ Type::Kind kind = type.kind();
+ if (!type.isNumber() && kind != Type::kVector_Kind && kind != Type::kMatrix_Kind) {
+ fErrors.error(position, "cannot construct '" + type.description() + "'");
+ return nullptr;
+ }
+ if (type == *fContext.fFloat_Type && args.size() == 1 &&
+ args[0]->fKind == Expression::kIntLiteral_Kind) {
+ int64_t value = ((IntLiteral&) *args[0]).fValue;
+ return std::unique_ptr<Expression>(new FloatLiteral(fContext, position, (double) value));
+ }
+ if (args.size() == 1 && args[0]->fType == type) {
+ // argument is already the right type, just return it
+ return std::move(args[0]);
+ }
+ if (type.isNumber()) {
+ if (args.size() != 1) {
+ fErrors.error(position, "invalid arguments to '" + type.description() +
+ "' constructor, (expected exactly 1 argument, but found " +
+ to_string(args.size()) + ")");
+ }
+ if (args[0]->fType == *fContext.fBool_Type) {
+ std::unique_ptr<IntLiteral> zero(new IntLiteral(fContext, position, 0));
+ std::unique_ptr<IntLiteral> one(new IntLiteral(fContext, position, 1));
+ return std::unique_ptr<Expression>(
+ new TernaryExpression(position, std::move(args[0]),
+ this->coerce(std::move(one), type),
+ this->coerce(std::move(zero),
+ type)));
+ } else if (!args[0]->fType.isNumber()) {
+ fErrors.error(position, "invalid argument to '" + type.description() +
+ "' constructor (expected a number or bool, but found '" +
+ args[0]->fType.description() + "')");
+ }
+ } else {
+ ASSERT(kind == Type::kVector_Kind || kind == Type::kMatrix_Kind);
+ int actual = 0;
+ for (size_t i = 0; i < args.size(); i++) {
+ if (args[i]->fType.kind() == Type::kVector_Kind ||
+ args[i]->fType.kind() == Type::kMatrix_Kind) {
+ int columns = args[i]->fType.columns();
+ int rows = args[i]->fType.rows();
+ args[i] = this->coerce(std::move(args[i]),
+ type.componentType().toCompound(fContext, columns, rows));
+ actual += args[i]->fType.rows() * args[i]->fType.columns();
+ } else if (args[i]->fType.kind() == Type::kScalar_Kind) {
+ actual += 1;
+ if (type.kind() != Type::kScalar_Kind) {
+ args[i] = this->coerce(std::move(args[i]), type.componentType());
+ }
+ } else {
+ fErrors.error(position, "'" + args[i]->fType.description() + "' is not a valid "
+ "parameter to '" + type.description() + "' constructor");
+ return nullptr;
+ }
+ }
+ int min = type.rows() * type.columns();
+ int max = type.columns() > 1 ? INT_MAX : min;
+ if ((actual < min || actual > max) &&
+ !((kind == Type::kVector_Kind || kind == Type::kMatrix_Kind) && (actual == 1))) {
+ fErrors.error(position, "invalid arguments to '" + type.description() +
+ "' constructor (expected " + to_string(min) + " scalar" +
+ (min == 1 ? "" : "s") + ", but found " + to_string(actual) +
+ ")");
+ return nullptr;
+ }
+ }
+ return std::unique_ptr<Expression>(new Constructor(position, std::move(type), std::move(args)));
+}
+
+std::unique_ptr<Expression> IRGenerator::convertPrefixExpression(
+ const ASTPrefixExpression& expression) {
+ std::unique_ptr<Expression> base = this->convertExpression(*expression.fOperand);
+ if (!base) {
+ return nullptr;
+ }
+ switch (expression.fOperator) {
+ case Token::PLUS:
+ if (!base->fType.isNumber() && base->fType.kind() != Type::kVector_Kind) {
+ fErrors.error(expression.fPosition,
+ "'+' cannot operate on '" + base->fType.description() + "'");
+ return nullptr;
+ }
+ return base;
+ case Token::MINUS:
+ if (!base->fType.isNumber() && base->fType.kind() != Type::kVector_Kind) {
+ fErrors.error(expression.fPosition,
+ "'-' cannot operate on '" + base->fType.description() + "'");
+ return nullptr;
+ }
+ if (base->fKind == Expression::kIntLiteral_Kind) {
+ return std::unique_ptr<Expression>(new IntLiteral(fContext, base->fPosition,
+ -((IntLiteral&) *base).fValue));
+ }
+ if (base->fKind == Expression::kFloatLiteral_Kind) {
+ double value = -((FloatLiteral&) *base).fValue;
+ return std::unique_ptr<Expression>(new FloatLiteral(fContext, base->fPosition,
+ value));
+ }
+ return std::unique_ptr<Expression>(new PrefixExpression(Token::MINUS, std::move(base)));
+ case Token::PLUSPLUS:
+ if (!base->fType.isNumber()) {
+ fErrors.error(expression.fPosition,
+ "'" + Token::OperatorName(expression.fOperator) +
+ "' cannot operate on '" + base->fType.description() + "'");
+ return nullptr;
+ }
+ this->markWrittenTo(*base);
+ break;
+ case Token::MINUSMINUS:
+ if (!base->fType.isNumber()) {
+ fErrors.error(expression.fPosition,
+ "'" + Token::OperatorName(expression.fOperator) +
+ "' cannot operate on '" + base->fType.description() + "'");
+ return nullptr;
+ }
+ this->markWrittenTo(*base);
+ break;
+ case Token::NOT:
+ if (base->fType != *fContext.fBool_Type) {
+ fErrors.error(expression.fPosition,
+ "'" + Token::OperatorName(expression.fOperator) +
+ "' cannot operate on '" + base->fType.description() + "'");
+ return nullptr;
+ }
+ break;
+ default:
+ ABORT("unsupported prefix operator\n");
+ }
+ return std::unique_ptr<Expression>(new PrefixExpression(expression.fOperator,
+ std::move(base)));
+}
+
+std::unique_ptr<Expression> IRGenerator::convertIndex(std::unique_ptr<Expression> base,
+ const ASTExpression& index) {
+ if (base->fType.kind() != Type::kArray_Kind && base->fType.kind() != Type::kMatrix_Kind) {
+ fErrors.error(base->fPosition, "expected array, but found '" + base->fType.description() +
+ "'");
+ return nullptr;
+ }
+ std::unique_ptr<Expression> converted = this->convertExpression(index);
+ if (!converted) {
+ return nullptr;
+ }
+ converted = this->coerce(std::move(converted), *fContext.fInt_Type);
+ if (!converted) {
+ return nullptr;
+ }
+ return std::unique_ptr<Expression>(new IndexExpression(fContext, std::move(base),
+ std::move(converted)));
+}
+
+std::unique_ptr<Expression> IRGenerator::convertField(std::unique_ptr<Expression> base,
+ const std::string& field) {
+ auto fields = base->fType.fields();
+ for (size_t i = 0; i < fields.size(); i++) {
+ if (fields[i].fName == field) {
+ return std::unique_ptr<Expression>(new FieldAccess(std::move(base), (int) i));
+ }
+ }
+ fErrors.error(base->fPosition, "type '" + base->fType.description() + "' does not have a "
+ "field named '" + field + "");
+ return nullptr;
+}
+
+std::unique_ptr<Expression> IRGenerator::convertSwizzle(std::unique_ptr<Expression> base,
+ const std::string& fields) {
+ if (base->fType.kind() != Type::kVector_Kind) {
+ fErrors.error(base->fPosition, "cannot swizzle type '" + base->fType.description() + "'");
+ return nullptr;
+ }
+ std::vector<int> swizzleComponents;
+ for (char c : fields) {
+ switch (c) {
+ case 'x': // fall through
+ case 'r': // fall through
+ case 's':
+ swizzleComponents.push_back(0);
+ break;
+ case 'y': // fall through
+ case 'g': // fall through
+ case 't':
+ if (base->fType.columns() >= 2) {
+ swizzleComponents.push_back(1);
+ break;
+ }
+ // fall through
+ case 'z': // fall through
+ case 'b': // fall through
+ case 'p':
+ if (base->fType.columns() >= 3) {
+ swizzleComponents.push_back(2);
+ break;
+ }
+ // fall through
+ case 'w': // fall through
+ case 'a': // fall through
+ case 'q':
+ if (base->fType.columns() >= 4) {
+ swizzleComponents.push_back(3);
+ break;
+ }
+ // fall through
+ default:
+ fErrors.error(base->fPosition, "invalid swizzle component '" + std::string(1, c) +
+ "'");
+ return nullptr;
+ }
+ }
+ ASSERT(swizzleComponents.size() > 0);
+ if (swizzleComponents.size() > 4) {
+ fErrors.error(base->fPosition, "too many components in swizzle mask '" + fields + "'");
+ return nullptr;
+ }
+ return std::unique_ptr<Expression>(new Swizzle(fContext, std::move(base), swizzleComponents));
+}
+
+std::unique_ptr<Expression> IRGenerator::convertSuffixExpression(
+ const ASTSuffixExpression& expression) {
+ std::unique_ptr<Expression> base = this->convertExpression(*expression.fBase);
+ if (!base) {
+ return nullptr;
+ }
+ switch (expression.fSuffix->fKind) {
+ case ASTSuffix::kIndex_Kind:
+ return this->convertIndex(std::move(base),
+ *((ASTIndexSuffix&) *expression.fSuffix).fExpression);
+ case ASTSuffix::kCall_Kind: {
+ auto rawArguments = &((ASTCallSuffix&) *expression.fSuffix).fArguments;
+ std::vector<std::unique_ptr<Expression>> arguments;
+ for (size_t i = 0; i < rawArguments->size(); i++) {
+ std::unique_ptr<Expression> converted =
+ this->convertExpression(*(*rawArguments)[i]);
+ if (!converted) {
+ return nullptr;
+ }
+ arguments.push_back(std::move(converted));
+ }
+ return this->call(expression.fPosition, std::move(base), std::move(arguments));
+ }
+ case ASTSuffix::kField_Kind: {
+ switch (base->fType.kind()) {
+ case Type::kVector_Kind:
+ return this->convertSwizzle(std::move(base),
+ ((ASTFieldSuffix&) *expression.fSuffix).fField);
+ case Type::kStruct_Kind:
+ return this->convertField(std::move(base),
+ ((ASTFieldSuffix&) *expression.fSuffix).fField);
+ default:
+ fErrors.error(base->fPosition, "cannot swizzle value of type '" +
+ base->fType.description() + "'");
+ return nullptr;
+ }
+ }
+ case ASTSuffix::kPostIncrement_Kind:
+ if (!base->fType.isNumber()) {
+ fErrors.error(expression.fPosition,
+ "'++' cannot operate on '" + base->fType.description() + "'");
+ return nullptr;
+ }
+ this->markWrittenTo(*base);
+ return std::unique_ptr<Expression>(new PostfixExpression(std::move(base),
+ Token::PLUSPLUS));
+ case ASTSuffix::kPostDecrement_Kind:
+ if (!base->fType.isNumber()) {
+ fErrors.error(expression.fPosition,
+ "'--' cannot operate on '" + base->fType.description() + "'");
+ return nullptr;
+ }
+ this->markWrittenTo(*base);
+ return std::unique_ptr<Expression>(new PostfixExpression(std::move(base),
+ Token::MINUSMINUS));
+ default:
+ ABORT("unsupported suffix operator");
+ }
+}
+
+void IRGenerator::checkValid(const Expression& expr) {
+ switch (expr.fKind) {
+ case Expression::kFunctionReference_Kind:
+ fErrors.error(expr.fPosition, "expected '(' to begin function call");
+ break;
+ case Expression::kTypeReference_Kind:
+ fErrors.error(expr.fPosition, "expected '(' to begin constructor invocation");
+ break;
+ default:
+ ASSERT(expr.fType != *fContext.fInvalid_Type);
+ break;
+ }
+}
+
+void IRGenerator::markReadFrom(const Variable& var) {
+ var.fIsReadFrom = true;
+}
+
+static bool has_duplicates(const Swizzle& swizzle) {
+ int bits = 0;
+ for (int idx : swizzle.fComponents) {
+ ASSERT(idx >= 0 && idx <= 3);
+ int bit = 1 << idx;
+ if (bits & bit) {
+ return true;
+ }
+ bits |= bit;
+ }
+ return false;
+}
+
+void IRGenerator::markWrittenTo(const Expression& expr) {
+ switch (expr.fKind) {
+ case Expression::kVariableReference_Kind: {
+ const Variable& var = ((VariableReference&) expr).fVariable;
+ if (var.fModifiers.fFlags & (Modifiers::kConst_Flag | Modifiers::kUniform_Flag)) {
+ fErrors.error(expr.fPosition,
+ "cannot modify immutable variable '" + var.fName + "'");
+ }
+ var.fIsWrittenTo = true;
+ break;
+ }
+ case Expression::kFieldAccess_Kind:
+ this->markWrittenTo(*((FieldAccess&) expr).fBase);
+ break;
+ case Expression::kSwizzle_Kind:
+ if (has_duplicates((Swizzle&) expr)) {
+ fErrors.error(expr.fPosition,
+ "cannot write to the same swizzle field more than once");
+ }
+ this->markWrittenTo(*((Swizzle&) expr).fBase);
+ break;
+ case Expression::kIndex_Kind:
+ this->markWrittenTo(*((IndexExpression&) expr).fBase);
+ break;
+ default:
+ fErrors.error(expr.fPosition, "cannot assign to '" + expr.description() + "'");
+ break;
+ }
+}
+
+}
diff --git a/gfx/skia/skia/src/sksl/SkSLIRGenerator.h b/gfx/skia/skia/src/sksl/SkSLIRGenerator.h
new file mode 100644
index 000000000..a3ff210b4
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLIRGenerator.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_IRGENERATOR
+#define SKSL_IRGENERATOR
+
+#include "SkSLErrorReporter.h"
+#include "ast/SkSLASTBinaryExpression.h"
+#include "ast/SkSLASTBlock.h"
+#include "ast/SkSLASTBreakStatement.h"
+#include "ast/SkSLASTCallSuffix.h"
+#include "ast/SkSLASTContinueStatement.h"
+#include "ast/SkSLASTDiscardStatement.h"
+#include "ast/SkSLASTDoStatement.h"
+#include "ast/SkSLASTExpression.h"
+#include "ast/SkSLASTExpressionStatement.h"
+#include "ast/SkSLASTExtension.h"
+#include "ast/SkSLASTForStatement.h"
+#include "ast/SkSLASTFunction.h"
+#include "ast/SkSLASTIdentifier.h"
+#include "ast/SkSLASTIfStatement.h"
+#include "ast/SkSLASTInterfaceBlock.h"
+#include "ast/SkSLASTModifiers.h"
+#include "ast/SkSLASTPrefixExpression.h"
+#include "ast/SkSLASTReturnStatement.h"
+#include "ast/SkSLASTStatement.h"
+#include "ast/SkSLASTSuffixExpression.h"
+#include "ast/SkSLASTTernaryExpression.h"
+#include "ast/SkSLASTVarDeclaration.h"
+#include "ast/SkSLASTVarDeclarationStatement.h"
+#include "ast/SkSLASTWhileStatement.h"
+#include "ir/SkSLBlock.h"
+#include "ir/SkSLExpression.h"
+#include "ir/SkSLExtension.h"
+#include "ir/SkSLFunctionDefinition.h"
+#include "ir/SkSLInterfaceBlock.h"
+#include "ir/SkSLModifiers.h"
+#include "ir/SkSLSymbolTable.h"
+#include "ir/SkSLStatement.h"
+#include "ir/SkSLType.h"
+#include "ir/SkSLTypeReference.h"
+#include "ir/SkSLVarDeclaration.h"
+
+namespace SkSL {
+
+/**
+ * Performs semantic analysis on an abstract syntax tree (AST) and produces the corresponding
+ * (unoptimized) intermediate representation (IR).
+ */
+class IRGenerator {
+public:
+ IRGenerator(const Context* context, std::shared_ptr<SymbolTable> root,
+ ErrorReporter& errorReporter);
+
+ std::unique_ptr<VarDeclarations> convertVarDeclarations(const ASTVarDeclarations& decl,
+ Variable::Storage storage);
+ std::unique_ptr<FunctionDefinition> convertFunction(const ASTFunction& f);
+ std::unique_ptr<Statement> convertStatement(const ASTStatement& statement);
+ std::unique_ptr<Expression> convertExpression(const ASTExpression& expression);
+
+private:
+ void pushSymbolTable();
+ void popSymbolTable();
+
+ const Type* convertType(const ASTType& type);
+ std::unique_ptr<Expression> call(Position position,
+ const FunctionDeclaration& function,
+ std::vector<std::unique_ptr<Expression>> arguments);
+ bool determineCallCost(const FunctionDeclaration& function,
+ const std::vector<std::unique_ptr<Expression>>& arguments,
+ int* outCost);
+ std::unique_ptr<Expression> call(Position position, std::unique_ptr<Expression> function,
+ std::vector<std::unique_ptr<Expression>> arguments);
+ std::unique_ptr<Expression> coerce(std::unique_ptr<Expression> expr, const Type& type);
+ std::unique_ptr<Block> convertBlock(const ASTBlock& block);
+ std::unique_ptr<Statement> convertBreak(const ASTBreakStatement& b);
+ std::unique_ptr<Expression> convertConstructor(Position position,
+ const Type& type,
+ std::vector<std::unique_ptr<Expression>> params);
+ std::unique_ptr<Statement> convertContinue(const ASTContinueStatement& c);
+ std::unique_ptr<Statement> convertDiscard(const ASTDiscardStatement& d);
+ std::unique_ptr<Statement> convertDo(const ASTDoStatement& d);
+ std::unique_ptr<Expression> convertBinaryExpression(const ASTBinaryExpression& expression);
+ std::unique_ptr<Extension> convertExtension(const ASTExtension& e);
+ std::unique_ptr<Statement> convertExpressionStatement(const ASTExpressionStatement& s);
+ std::unique_ptr<Statement> convertFor(const ASTForStatement& f);
+ std::unique_ptr<Expression> convertIdentifier(const ASTIdentifier& identifier);
+ std::unique_ptr<Statement> convertIf(const ASTIfStatement& s);
+ std::unique_ptr<Expression> convertIndex(std::unique_ptr<Expression> base,
+ const ASTExpression& index);
+ std::unique_ptr<InterfaceBlock> convertInterfaceBlock(const ASTInterfaceBlock& s);
+ Modifiers convertModifiers(const ASTModifiers& m);
+ std::unique_ptr<Expression> convertPrefixExpression(const ASTPrefixExpression& expression);
+ std::unique_ptr<Statement> convertReturn(const ASTReturnStatement& r);
+ std::unique_ptr<Expression> convertSuffixExpression(const ASTSuffixExpression& expression);
+ std::unique_ptr<Expression> convertField(std::unique_ptr<Expression> base,
+ const std::string& field);
+ std::unique_ptr<Expression> convertSwizzle(std::unique_ptr<Expression> base,
+ const std::string& fields);
+ std::unique_ptr<Expression> convertTernaryExpression(const ASTTernaryExpression& expression);
+ std::unique_ptr<Statement> convertVarDeclarationStatement(const ASTVarDeclarationStatement& s);
+ std::unique_ptr<Statement> convertWhile(const ASTWhileStatement& w);
+
+ void checkValid(const Expression& expr);
+ void markReadFrom(const Variable& var);
+ void markWrittenTo(const Expression& expr);
+
+ const Context& fContext;
+ const FunctionDeclaration* fCurrentFunction;
+ std::shared_ptr<SymbolTable> fSymbolTable;
+ ErrorReporter& fErrors;
+
+ friend class AutoSymbolTable;
+ friend class Compiler;
+};
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLMain.cpp b/gfx/skia/skia/src/sksl/SkSLMain.cpp
new file mode 100644
index 000000000..24fbb6c26
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLMain.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "stdio.h"
+#include <fstream>
+#include "SkSLCompiler.h"
+
+/**
+ * Very simple standalone executable to facilitate testing.
+ */
+int main(int argc, const char** argv) {
+ if (argc != 3) {
+ printf("usage: skslc <input> <output>\n");
+ exit(1);
+ }
+ SkSL::Program::Kind kind;
+ size_t len = strlen(argv[1]);
+ if (len > 5 && !strcmp(argv[1] + strlen(argv[1]) - 5, ".vert")) {
+ kind = SkSL::Program::kVertex_Kind;
+ } else if (len > 5 && !strcmp(argv[1] + strlen(argv[1]) - 5, ".frag")) {
+ kind = SkSL::Program::kFragment_Kind;
+ } else {
+ printf("input filename must end in '.vert' or '.frag'\n");
+ exit(1);
+ }
+
+ std::ifstream in(argv[1]);
+ std::string text((std::istreambuf_iterator<char>(in)),
+ std::istreambuf_iterator<char>());
+ if (in.rdstate()) {
+ printf("error reading '%s'\n", argv[1]);
+ exit(2);
+ }
+ std::ofstream out(argv[2], std::ofstream::binary);
+ SkSL::Compiler compiler;
+ if (!compiler.toSPIRV(kind, text, out)) {
+ printf("%s", compiler.errorText().c_str());
+ exit(3);
+ }
+ if (out.rdstate()) {
+ printf("error writing '%s'\n", argv[2]);
+ exit(4);
+ }
+}
diff --git a/gfx/skia/skia/src/sksl/SkSLParser.cpp b/gfx/skia/skia/src/sksl/SkSLParser.cpp
new file mode 100644
index 000000000..b240e4501
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLParser.cpp
@@ -0,0 +1,1407 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "stdio.h"
+#include "SkSLParser.h"
+#include "SkSLToken.h"
+
+#define register
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunneeded-internal-declaration"
+#pragma clang diagnostic ignored "-Wnull-conversion"
+#pragma clang diagnostic ignored "-Wsign-compare"
+#endif
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wsign-compare"
+#endif
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable:4018)
+#endif
+#include "lex.sksl.c"
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+#undef register
+
+#include "ast/SkSLASTBinaryExpression.h"
+#include "ast/SkSLASTBlock.h"
+#include "ast/SkSLASTBoolLiteral.h"
+#include "ast/SkSLASTBreakStatement.h"
+#include "ast/SkSLASTCallSuffix.h"
+#include "ast/SkSLASTContinueStatement.h"
+#include "ast/SkSLASTDiscardStatement.h"
+#include "ast/SkSLASTDoStatement.h"
+#include "ast/SkSLASTExpression.h"
+#include "ast/SkSLASTExpressionStatement.h"
+#include "ast/SkSLASTExtension.h"
+#include "ast/SkSLASTFieldSuffix.h"
+#include "ast/SkSLASTFloatLiteral.h"
+#include "ast/SkSLASTForStatement.h"
+#include "ast/SkSLASTFunction.h"
+#include "ast/SkSLASTIdentifier.h"
+#include "ast/SkSLASTIfStatement.h"
+#include "ast/SkSLASTIndexSuffix.h"
+#include "ast/SkSLASTInterfaceBlock.h"
+#include "ast/SkSLASTIntLiteral.h"
+#include "ast/SkSLASTParameter.h"
+#include "ast/SkSLASTPrefixExpression.h"
+#include "ast/SkSLASTReturnStatement.h"
+#include "ast/SkSLASTStatement.h"
+#include "ast/SkSLASTSuffixExpression.h"
+#include "ast/SkSLASTTernaryExpression.h"
+#include "ast/SkSLASTType.h"
+#include "ast/SkSLASTVarDeclaration.h"
+#include "ast/SkSLASTVarDeclarationStatement.h"
+#include "ast/SkSLASTWhileStatement.h"
+#include "ir/SkSLSymbolTable.h"
+#include "ir/SkSLType.h"
+
+namespace SkSL {
+
+Parser::Parser(std::string text, SymbolTable& types, ErrorReporter& errors)
+: fPushback(Position(-1, -1), Token::INVALID_TOKEN, "")
+, fTypes(types)
+, fErrors(errors) {
+ sksllex_init(&fScanner);
+ fBuffer = sksl_scan_string(text.c_str(), fScanner);
+ skslset_lineno(1, fScanner);
+
+ if (false) {
+ // avoid unused warning
+ yyunput(0, nullptr, fScanner);
+ }
+}
+
+Parser::~Parser() {
+ sksl_delete_buffer(fBuffer, fScanner);
+ sksllex_destroy(fScanner);
+}
+
+/* (precision | directive | declaration)* END_OF_FILE */
+std::vector<std::unique_ptr<ASTDeclaration>> Parser::file() {
+ std::vector<std::unique_ptr<ASTDeclaration>> result;
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::END_OF_FILE:
+ return result;
+ case Token::PRECISION:
+ this->precision();
+ break;
+ case Token::DIRECTIVE: {
+ std::unique_ptr<ASTDeclaration> decl = this->directive();
+ if (decl) {
+ result.push_back(std::move(decl));
+ }
+ break;
+ }
+ default: {
+ std::unique_ptr<ASTDeclaration> decl = this->declaration();
+ if (!decl) {
+ continue;
+ }
+ result.push_back(std::move(decl));
+ }
+ }
+ }
+}
+
+Token Parser::nextToken() {
+ if (fPushback.fKind != Token::INVALID_TOKEN) {
+ Token result = fPushback;
+ fPushback.fKind = Token::INVALID_TOKEN;
+ fPushback.fText = "";
+ return result;
+ }
+ int token = sksllex(fScanner);
+ return Token(Position(skslget_lineno(fScanner), -1), (Token::Kind) token,
+ token == Token::END_OF_FILE ? "<end of file>" :
+ std::string(skslget_text(fScanner)));
+}
+
+void Parser::pushback(Token t) {
+ ASSERT(fPushback.fKind == Token::INVALID_TOKEN);
+ fPushback = t;
+}
+
+Token Parser::peek() {
+ fPushback = this->nextToken();
+ return fPushback;
+}
+
+bool Parser::expect(Token::Kind kind, std::string expected, Token* result) {
+ Token next = this->nextToken();
+ if (next.fKind == kind) {
+ if (result) {
+ *result = next;
+ }
+ return true;
+ } else {
+ this->error(next.fPosition, "expected " + expected + ", but found '" + next.fText + "'");
+ return false;
+ }
+}
+
+void Parser::error(Position p, std::string msg) {
+ fErrors.error(p, msg);
+}
+
+bool Parser::isType(std::string name) {
+ return nullptr != fTypes[name];
+}
+
+/* PRECISION (LOWP | MEDIUMP | HIGHP) type SEMICOLON */
+void Parser::precision() {
+ if (!this->expect(Token::PRECISION, "'precision'")) {
+ return;
+ }
+ Token p = this->nextToken();
+ switch (p.fKind) {
+ case Token::LOWP: // fall through
+ case Token::MEDIUMP: // fall through
+ case Token::HIGHP:
+ // ignored for now
+ break;
+ default:
+ this->error(p.fPosition, "expected 'lowp', 'mediump', or 'highp', but found '" +
+ p.fText + "'");
+ return;
+ }
+ if (!this->type()) {
+ return;
+ }
+ this->expect(Token::SEMICOLON, "';'");
+}
+
+/* DIRECTIVE(#version) INT_LITERAL | DIRECTIVE(#extension) IDENTIFIER COLON IDENTIFIER */
+std::unique_ptr<ASTDeclaration> Parser::directive() {
+ Token start;
+ if (!this->expect(Token::DIRECTIVE, "a directive", &start)) {
+ return nullptr;
+ }
+ if (start.fText == "#version") {
+ this->expect(Token::INT_LITERAL, "a version number");
+ // ignored for now
+ return nullptr;
+ } else if (start.fText == "#extension") {
+ Token name;
+ if (!this->expect(Token::IDENTIFIER, "an identifier", &name)) {
+ return nullptr;
+ }
+ if (!this->expect(Token::COLON, "':'")) {
+ return nullptr;
+ }
+ // FIXME: need to start paying attention to this token
+ if (!this->expect(Token::IDENTIFIER, "an identifier")) {
+ return nullptr;
+ }
+ return std::unique_ptr<ASTDeclaration>(new ASTExtension(start.fPosition,
+ std::move(name.fText)));
+ } else {
+ this->error(start.fPosition, "unsupported directive '" + start.fText + "'");
+ return nullptr;
+ }
+}
+
+/* modifiers (structVarDeclaration | type IDENTIFIER ((LPAREN parameter
+ (COMMA parameter)* RPAREN (block | SEMICOLON)) | SEMICOLON) | interfaceBlock) */
+std::unique_ptr<ASTDeclaration> Parser::declaration() {
+ ASTModifiers modifiers = this->modifiers();
+ Token lookahead = this->peek();
+ if (lookahead.fKind == Token::IDENTIFIER && !this->isType(lookahead.fText)) {
+ // we have an identifier that's not a type, could be the start of an interface block
+ return this->interfaceBlock(modifiers);
+ }
+ if (lookahead.fKind == Token::STRUCT) {
+ return this->structVarDeclaration(modifiers);
+ }
+ std::unique_ptr<ASTType> type(this->type());
+ if (!type) {
+ return nullptr;
+ }
+ if (type->fKind == ASTType::kStruct_Kind && peek().fKind == Token::SEMICOLON) {
+ this->nextToken();
+ return nullptr;
+ }
+ Token name;
+ if (!this->expect(Token::IDENTIFIER, "an identifier", &name)) {
+ return nullptr;
+ }
+ if (!modifiers.fFlags && this->peek().fKind == Token::LPAREN) {
+ this->nextToken();
+ std::vector<std::unique_ptr<ASTParameter>> parameters;
+ while (this->peek().fKind != Token::RPAREN) {
+ if (parameters.size() > 0) {
+ if (!this->expect(Token::COMMA, "','")) {
+ return nullptr;
+ }
+ }
+ std::unique_ptr<ASTParameter> parameter = this->parameter();
+ if (!parameter) {
+ return nullptr;
+ }
+ parameters.push_back(std::move(parameter));
+ }
+ this->nextToken();
+ std::unique_ptr<ASTBlock> body;
+ if (this->peek().fKind == Token::SEMICOLON) {
+ this->nextToken();
+ } else {
+ body = this->block();
+ if (!body) {
+ return nullptr;
+ }
+ }
+ return std::unique_ptr<ASTDeclaration>(new ASTFunction(name.fPosition, std::move(type),
+ std::move(name.fText),
+ std::move(parameters),
+ std::move(body)));
+ } else {
+ return this->varDeclarationEnd(modifiers, std::move(type), name.fText);
+ }
+}
+
+/* modifiers type IDENTIFIER varDeclarationEnd */
+std::unique_ptr<ASTVarDeclarations> Parser::varDeclarations() {
+ ASTModifiers modifiers = this->modifiers();
+ std::unique_ptr<ASTType> type(this->type());
+ if (!type) {
+ return nullptr;
+ }
+ Token name;
+ if (!this->expect(Token::IDENTIFIER, "an identifier", &name)) {
+ return nullptr;
+ }
+ return this->varDeclarationEnd(modifiers, std::move(type), std::move(name.fText));
+}
+
+/* STRUCT IDENTIFIER LBRACE varDeclaration* RBRACE */
+std::unique_ptr<ASTType> Parser::structDeclaration() {
+ if (!this->expect(Token::STRUCT, "'struct'")) {
+ return nullptr;
+ }
+ Token name;
+ if (!this->expect(Token::IDENTIFIER, "an identifier", &name)) {
+ return nullptr;
+ }
+ if (!this->expect(Token::LBRACE, "'{'")) {
+ return nullptr;
+ }
+ std::vector<Type::Field> fields;
+ while (this->peek().fKind != Token::RBRACE) {
+ std::unique_ptr<ASTVarDeclarations> decl = this->varDeclarations();
+ if (!decl) {
+ return nullptr;
+ }
+ for (const auto& var : decl->fVars) {
+ auto type = (const Type*) fTypes[decl->fType->fName];
+ for (int i = (int) var.fSizes.size() - 1; i >= 0; i--) {
+ if (var.fSizes[i]->fKind != ASTExpression::kInt_Kind) {
+ this->error(decl->fPosition, "array size in struct field must be a constant");
+ }
+ uint64_t columns = ((ASTIntLiteral&) *var.fSizes[i]).fValue;
+ std::string name = type->name() + "[" + to_string(columns) + "]";
+ type = new Type(name, Type::kArray_Kind, *type, (int) columns);
+ fTypes.takeOwnership((Type*) type);
+ }
+ fields.push_back(Type::Field(decl->fModifiers, var.fName, type));
+ if (var.fValue) {
+ this->error(decl->fPosition, "initializers are not permitted on struct fields");
+ }
+ }
+ }
+ if (!this->expect(Token::RBRACE, "'}'")) {
+ return nullptr;
+ }
+ fTypes.add(name.fText, std::unique_ptr<Type>(new Type(name.fText, fields)));
+ return std::unique_ptr<ASTType>(new ASTType(name.fPosition, name.fText,
+ ASTType::kStruct_Kind));
+}
+
+/* structDeclaration ((IDENTIFIER varDeclarationEnd) | SEMICOLON) */
+std::unique_ptr<ASTVarDeclarations> Parser::structVarDeclaration(ASTModifiers modifiers) {
+ std::unique_ptr<ASTType> type = this->structDeclaration();
+ if (!type) {
+ return nullptr;
+ }
+ if (peek().fKind == Token::IDENTIFIER) {
+ Token name = this->nextToken();
+ std::unique_ptr<ASTVarDeclarations> result = this->varDeclarationEnd(modifiers,
+ std::move(type),
+ std::move(name.fText));
+ if (result) {
+ for (const auto& var : result->fVars) {
+ if (var.fValue) {
+ this->error(var.fValue->fPosition,
+ "struct variables cannot be initialized");
+ }
+ }
+ }
+ return result;
+ }
+ this->expect(Token::SEMICOLON, "';'");
+ return nullptr;
+}
+
+/* (LBRACKET expression? RBRACKET)* (EQ expression)? (COMMA IDENTIFER
+ (LBRACKET expression? RBRACKET)* (EQ expression)?)* SEMICOLON */
+std::unique_ptr<ASTVarDeclarations> Parser::varDeclarationEnd(ASTModifiers mods,
+ std::unique_ptr<ASTType> type,
+ std::string name) {
+ std::vector<ASTVarDeclaration> vars;
+ std::vector<std::unique_ptr<ASTExpression>> currentVarSizes;
+ while (this->peek().fKind == Token::LBRACKET) {
+ this->nextToken();
+ if (this->peek().fKind == Token::RBRACKET) {
+ this->nextToken();
+ currentVarSizes.push_back(nullptr);
+ } else {
+ std::unique_ptr<ASTExpression> size(this->expression());
+ if (!size) {
+ return nullptr;
+ }
+ currentVarSizes.push_back(std::move(size));
+ if (!this->expect(Token::RBRACKET, "']'")) {
+ return nullptr;
+ }
+ }
+ }
+ std::unique_ptr<ASTExpression> value;
+ if (this->peek().fKind == Token::EQ) {
+ this->nextToken();
+ value = this->expression();
+ if (!value) {
+ return nullptr;
+ }
+ }
+ vars.emplace_back(std::move(name), std::move(currentVarSizes), std::move(value));
+ while (this->peek().fKind == Token::COMMA) {
+ this->nextToken();
+ Token name;
+ if (!this->expect(Token::IDENTIFIER, "an identifier", &name)) {
+ return nullptr;
+ }
+ currentVarSizes.clear();
+ value.reset();
+ while (this->peek().fKind == Token::LBRACKET) {
+ this->nextToken();
+ if (this->peek().fKind == Token::RBRACKET) {
+ this->nextToken();
+ currentVarSizes.push_back(nullptr);
+ } else {
+ std::unique_ptr<ASTExpression> size(this->expression());
+ if (!size) {
+ return nullptr;
+ }
+ currentVarSizes.push_back(std::move(size));
+ if (!this->expect(Token::RBRACKET, "']'")) {
+ return nullptr;
+ }
+ }
+ }
+ if (this->peek().fKind == Token::EQ) {
+ this->nextToken();
+ value = this->expression();
+ if (!value) {
+ return nullptr;
+ }
+ }
+ vars.emplace_back(std::move(name.fText), std::move(currentVarSizes), std::move(value));
+ }
+ if (!this->expect(Token::SEMICOLON, "';'")) {
+ return nullptr;
+ }
+ return std::unique_ptr<ASTVarDeclarations>(new ASTVarDeclarations(std::move(mods),
+ std::move(type),
+ std::move(vars)));
+}
+
+/* modifiers type IDENTIFIER (LBRACKET INT_LITERAL RBRACKET)? */
+std::unique_ptr<ASTParameter> Parser::parameter() {
+ ASTModifiers modifiers = this->modifiersWithDefaults(ASTModifiers::kIn_Flag);
+ std::unique_ptr<ASTType> type = this->type();
+ if (!type) {
+ return nullptr;
+ }
+ Token name;
+ if (!this->expect(Token::IDENTIFIER, "an identifier", &name)) {
+ return nullptr;
+ }
+ std::vector<int> sizes;
+ while (this->peek().fKind == Token::LBRACKET) {
+ this->nextToken();
+ Token sizeToken;
+ if (!this->expect(Token::INT_LITERAL, "a positive integer", &sizeToken)) {
+ return nullptr;
+ }
+ sizes.push_back(SkSL::stoi(sizeToken.fText));
+ if (!this->expect(Token::RBRACKET, "']'")) {
+ return nullptr;
+ }
+ }
+ return std::unique_ptr<ASTParameter>(new ASTParameter(name.fPosition, modifiers,
+ std::move(type), name.fText,
+ std::move(sizes)));
+}
+
+/** (EQ INT_LITERAL)? */
+int Parser::layoutInt() {
+ if (!this->expect(Token::EQ, "'='")) {
+ return -1;
+ }
+ Token resultToken;
+ if (this->expect(Token::INT_LITERAL, "a non-negative integer", &resultToken)) {
+ return SkSL::stoi(resultToken.fText);
+ }
+ return -1;
+}
+
+/* LAYOUT LPAREN IDENTIFIER EQ INT_LITERAL (COMMA IDENTIFIER EQ INT_LITERAL)*
+ RPAREN */
+ASTLayout Parser::layout() {
+ int location = -1;
+ int binding = -1;
+ int index = -1;
+ int set = -1;
+ int builtin = -1;
+ bool originUpperLeft = false;
+ if (this->peek().fKind == Token::LAYOUT) {
+ this->nextToken();
+ if (!this->expect(Token::LPAREN, "'('")) {
+ return ASTLayout(location, binding, index, set, builtin, originUpperLeft);
+ }
+ for (;;) {
+ Token t = this->nextToken();
+ if (t.fText == "location") {
+ location = this->layoutInt();
+ } else if (t.fText == "binding") {
+ binding = this->layoutInt();
+ } else if (t.fText == "index") {
+ index = this->layoutInt();
+ } else if (t.fText == "set") {
+ set = this->layoutInt();
+ } else if (t.fText == "builtin") {
+ builtin = this->layoutInt();
+ } else if (t.fText == "origin_upper_left") {
+ originUpperLeft = true;
+ } else {
+ this->error(t.fPosition, ("'" + t.fText +
+ "' is not a valid layout qualifier").c_str());
+ }
+ if (this->peek().fKind == Token::RPAREN) {
+ this->nextToken();
+ break;
+ }
+ if (!this->expect(Token::COMMA, "','")) {
+ break;
+ }
+ }
+ }
+ return ASTLayout(location, binding, index, set, builtin, originUpperLeft);
+}
+
+/* layout? (UNIFORM | CONST | IN | OUT | INOUT | LOWP | MEDIUMP | HIGHP | FLAT | NOPERSPECTIVE)* */
+ASTModifiers Parser::modifiers() {
+ ASTLayout layout = this->layout();
+ int flags = 0;
+ for (;;) {
+ // TODO: handle duplicate / incompatible flags
+ switch (peek().fKind) {
+ case Token::UNIFORM:
+ this->nextToken();
+ flags |= ASTModifiers::kUniform_Flag;
+ break;
+ case Token::CONST:
+ this->nextToken();
+ flags |= ASTModifiers::kConst_Flag;
+ break;
+ case Token::IN:
+ this->nextToken();
+ flags |= ASTModifiers::kIn_Flag;
+ break;
+ case Token::OUT:
+ this->nextToken();
+ flags |= ASTModifiers::kOut_Flag;
+ break;
+ case Token::INOUT:
+ this->nextToken();
+ flags |= ASTModifiers::kIn_Flag;
+ flags |= ASTModifiers::kOut_Flag;
+ break;
+ case Token::LOWP:
+ this->nextToken();
+ flags |= ASTModifiers::kLowp_Flag;
+ break;
+ case Token::MEDIUMP:
+ this->nextToken();
+ flags |= ASTModifiers::kMediump_Flag;
+ break;
+ case Token::HIGHP:
+ this->nextToken();
+ flags |= ASTModifiers::kHighp_Flag;
+ break;
+ case Token::FLAT:
+ this->nextToken();
+ flags |= ASTModifiers::kFlat_Flag;
+ break;
+ case Token::NOPERSPECTIVE:
+ this->nextToken();
+ flags |= ASTModifiers::kNoPerspective_Flag;
+ break;
+ default:
+ return ASTModifiers(layout, flags);
+ }
+ }
+}
+
+ASTModifiers Parser::modifiersWithDefaults(int defaultFlags) {
+ ASTModifiers result = this->modifiers();
+ if (!result.fFlags) {
+ return ASTModifiers(result.fLayout, defaultFlags);
+ }
+ return result;
+}
+
+/* ifStatement | forStatement | doStatement | whileStatement | block | expression */
+std::unique_ptr<ASTStatement> Parser::statement() {
+ Token start = this->peek();
+ switch (start.fKind) {
+ case Token::IF:
+ return this->ifStatement();
+ case Token::FOR:
+ return this->forStatement();
+ case Token::DO:
+ return this->doStatement();
+ case Token::WHILE:
+ return this->whileStatement();
+ case Token::RETURN:
+ return this->returnStatement();
+ case Token::BREAK:
+ return this->breakStatement();
+ case Token::CONTINUE:
+ return this->continueStatement();
+ case Token::DISCARD:
+ return this->discardStatement();
+ case Token::LBRACE:
+ return this->block();
+ case Token::SEMICOLON:
+ this->nextToken();
+ return std::unique_ptr<ASTStatement>(new ASTBlock(start.fPosition,
+ std::vector<std::unique_ptr<ASTStatement>>()));
+ case Token::CONST: // fall through
+ case Token::HIGHP: // fall through
+ case Token::MEDIUMP: // fall through
+ case Token::LOWP: {
+ auto decl = this->varDeclarations();
+ if (!decl) {
+ return nullptr;
+ }
+ return std::unique_ptr<ASTStatement>(new ASTVarDeclarationStatement(std::move(decl)));
+ }
+ case Token::IDENTIFIER:
+ if (this->isType(start.fText)) {
+ auto decl = this->varDeclarations();
+ if (!decl) {
+ return nullptr;
+ }
+ return std::unique_ptr<ASTStatement>(new ASTVarDeclarationStatement(
+ std::move(decl)));
+ }
+ // fall through
+ default:
+ return this->expressionStatement();
+ }
+}
+
+/* IDENTIFIER(type) */
+std::unique_ptr<ASTType> Parser::type() {
+ Token type;
+ if (!this->expect(Token::IDENTIFIER, "a type", &type)) {
+ return nullptr;
+ }
+ if (!this->isType(type.fText)) {
+ this->error(type.fPosition, ("no type named '" + type.fText + "'").c_str());
+ return nullptr;
+ }
+ return std::unique_ptr<ASTType>(new ASTType(type.fPosition, std::move(type.fText),
+ ASTType::kIdentifier_Kind));
+}
+
+/* IDENTIFIER LBRACE varDeclaration* RBRACE */
+std::unique_ptr<ASTDeclaration> Parser::interfaceBlock(ASTModifiers mods) {
+ Token name;
+ if (!this->expect(Token::IDENTIFIER, "an identifier", &name)) {
+ return nullptr;
+ }
+ if (peek().fKind != Token::LBRACE) {
+ // we only get into interfaceBlock if we found a top-level identifier which was not a type.
+ // 99% of the time, the user was not actually intending to create an interface block, so
+ // it's better to report it as an unknown type
+ this->error(name.fPosition, "no type named '" + name.fText + "'");
+ return nullptr;
+ }
+ this->nextToken();
+ std::vector<std::unique_ptr<ASTVarDeclarations>> decls;
+ while (this->peek().fKind != Token::RBRACE) {
+ std::unique_ptr<ASTVarDeclarations> decl = this->varDeclarations();
+ if (!decl) {
+ return nullptr;
+ }
+ decls.push_back(std::move(decl));
+ }
+ this->nextToken();
+ std::string valueName;
+ if (this->peek().fKind == Token::IDENTIFIER) {
+ valueName = this->nextToken().fText;
+ }
+ this->expect(Token::SEMICOLON, "';'");
+ return std::unique_ptr<ASTDeclaration>(new ASTInterfaceBlock(name.fPosition, mods,
+ name.fText, std::move(valueName),
+ std::move(decls)));
+}
+
+/* IF LPAREN expression RPAREN statement (ELSE statement)? */
+std::unique_ptr<ASTIfStatement> Parser::ifStatement() {
+ Token start;
+ if (!this->expect(Token::IF, "'if'", &start)) {
+ return nullptr;
+ }
+ if (!this->expect(Token::LPAREN, "'('")) {
+ return nullptr;
+ }
+ std::unique_ptr<ASTExpression> test(this->expression());
+ if (!test) {
+ return nullptr;
+ }
+ if (!this->expect(Token::RPAREN, "')'")) {
+ return nullptr;
+ }
+ std::unique_ptr<ASTStatement> ifTrue(this->statement());
+ if (!ifTrue) {
+ return nullptr;
+ }
+ std::unique_ptr<ASTStatement> ifFalse;
+ if (this->peek().fKind == Token::ELSE) {
+ this->nextToken();
+ ifFalse = this->statement();
+ if (!ifFalse) {
+ return nullptr;
+ }
+ }
+ return std::unique_ptr<ASTIfStatement>(new ASTIfStatement(start.fPosition, std::move(test),
+ std::move(ifTrue),
+ std::move(ifFalse)));
+}
+
+/* DO statement WHILE LPAREN expression RPAREN SEMICOLON */
+std::unique_ptr<ASTDoStatement> Parser::doStatement() {
+ Token start;
+ if (!this->expect(Token::DO, "'do'", &start)) {
+ return nullptr;
+ }
+ std::unique_ptr<ASTStatement> statement(this->statement());
+ if (!statement) {
+ return nullptr;
+ }
+ if (!this->expect(Token::WHILE, "'while'")) {
+ return nullptr;
+ }
+ if (!this->expect(Token::LPAREN, "'('")) {
+ return nullptr;
+ }
+ std::unique_ptr<ASTExpression> test(this->expression());
+ if (!test) {
+ return nullptr;
+ }
+ if (!this->expect(Token::RPAREN, "')'")) {
+ return nullptr;
+ }
+ if (!this->expect(Token::SEMICOLON, "';'")) {
+ return nullptr;
+ }
+ return std::unique_ptr<ASTDoStatement>(new ASTDoStatement(start.fPosition,
+ std::move(statement),
+ std::move(test)));
+}
+
+/* WHILE LPAREN expression RPAREN STATEMENT */
+std::unique_ptr<ASTWhileStatement> Parser::whileStatement() {
+ Token start;
+ if (!this->expect(Token::WHILE, "'while'", &start)) {
+ return nullptr;
+ }
+ if (!this->expect(Token::LPAREN, "'('")) {
+ return nullptr;
+ }
+ std::unique_ptr<ASTExpression> test(this->expression());
+ if (!test) {
+ return nullptr;
+ }
+ if (!this->expect(Token::RPAREN, "')'")) {
+ return nullptr;
+ }
+ std::unique_ptr<ASTStatement> statement(this->statement());
+ if (!statement) {
+ return nullptr;
+ }
+ return std::unique_ptr<ASTWhileStatement>(new ASTWhileStatement(start.fPosition,
+ std::move(test),
+ std::move(statement)));
+}
+
+/* FOR LPAREN (declaration | expression)? SEMICOLON expression? SEMICOLON expression? RPAREN
+ STATEMENT */
+std::unique_ptr<ASTForStatement> Parser::forStatement() {
+ Token start;
+ if (!this->expect(Token::FOR, "'for'", &start)) {
+ return nullptr;
+ }
+ if (!this->expect(Token::LPAREN, "'('")) {
+ return nullptr;
+ }
+ std::unique_ptr<ASTStatement> initializer;
+ Token nextToken = this->peek();
+ switch (nextToken.fKind) {
+ case Token::SEMICOLON:
+ break;
+ case Token::CONST:
+ initializer = std::unique_ptr<ASTStatement>(new ASTVarDeclarationStatement(
+ this->varDeclarations()));
+ break;
+ case Token::IDENTIFIER:
+ if (this->isType(nextToken.fText)) {
+ initializer = std::unique_ptr<ASTStatement>(new ASTVarDeclarationStatement(
+ this->varDeclarations()));
+ break;
+ }
+ // fall through
+ default:
+ initializer = this->expressionStatement();
+ }
+ std::unique_ptr<ASTExpression> test;
+ if (this->peek().fKind != Token::SEMICOLON) {
+ test = this->expression();
+ if (!test) {
+ return nullptr;
+ }
+ }
+ if (!this->expect(Token::SEMICOLON, "';'")) {
+ return nullptr;
+ }
+ std::unique_ptr<ASTExpression> next;
+ if (this->peek().fKind != Token::SEMICOLON) {
+ next = this->expression();
+ if (!next) {
+ return nullptr;
+ }
+ }
+ if (!this->expect(Token::RPAREN, "')'")) {
+ return nullptr;
+ }
+ std::unique_ptr<ASTStatement> statement(this->statement());
+ if (!statement) {
+ return nullptr;
+ }
+ return std::unique_ptr<ASTForStatement>(new ASTForStatement(start.fPosition,
+ std::move(initializer),
+ std::move(test), std::move(next),
+ std::move(statement)));
+}
+
+/* RETURN expression? SEMICOLON */
+std::unique_ptr<ASTReturnStatement> Parser::returnStatement() {
+ Token start;
+ if (!this->expect(Token::RETURN, "'return'", &start)) {
+ return nullptr;
+ }
+ std::unique_ptr<ASTExpression> expression;
+ if (this->peek().fKind != Token::SEMICOLON) {
+ expression = this->expression();
+ if (!expression) {
+ return nullptr;
+ }
+ }
+ if (!this->expect(Token::SEMICOLON, "';'")) {
+ return nullptr;
+ }
+ return std::unique_ptr<ASTReturnStatement>(new ASTReturnStatement(start.fPosition,
+ std::move(expression)));
+}
+
+/* BREAK SEMICOLON */
+std::unique_ptr<ASTBreakStatement> Parser::breakStatement() {
+ Token start;
+ if (!this->expect(Token::BREAK, "'break'", &start)) {
+ return nullptr;
+ }
+ if (!this->expect(Token::SEMICOLON, "';'")) {
+ return nullptr;
+ }
+ return std::unique_ptr<ASTBreakStatement>(new ASTBreakStatement(start.fPosition));
+}
+
+/* CONTINUE SEMICOLON */
+std::unique_ptr<ASTContinueStatement> Parser::continueStatement() {
+ Token start;
+ if (!this->expect(Token::CONTINUE, "'continue'", &start)) {
+ return nullptr;
+ }
+ if (!this->expect(Token::SEMICOLON, "';'")) {
+ return nullptr;
+ }
+ return std::unique_ptr<ASTContinueStatement>(new ASTContinueStatement(start.fPosition));
+}
+
+/* DISCARD SEMICOLON */
+std::unique_ptr<ASTDiscardStatement> Parser::discardStatement() {
+ Token start;
+ if (!this->expect(Token::DISCARD, "'continue'", &start)) {
+ return nullptr;
+ }
+ if (!this->expect(Token::SEMICOLON, "';'")) {
+ return nullptr;
+ }
+ return std::unique_ptr<ASTDiscardStatement>(new ASTDiscardStatement(start.fPosition));
+}
+
+/* LBRACE statement* RBRACE */
+std::unique_ptr<ASTBlock> Parser::block() {
+ Token start;
+ if (!this->expect(Token::LBRACE, "'{'", &start)) {
+ return nullptr;
+ }
+ std::vector<std::unique_ptr<ASTStatement>> statements;
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::RBRACE:
+ this->nextToken();
+ return std::unique_ptr<ASTBlock>(new ASTBlock(start.fPosition,
+ std::move(statements)));
+ case Token::END_OF_FILE:
+ this->error(this->peek().fPosition, "expected '}', but found end of file");
+ return nullptr;
+ default: {
+ std::unique_ptr<ASTStatement> statement = this->statement();
+ if (!statement) {
+ return nullptr;
+ }
+ statements.push_back(std::move(statement));
+ }
+ }
+ }
+}
+
+/* expression SEMICOLON */
+std::unique_ptr<ASTExpressionStatement> Parser::expressionStatement() {
+ std::unique_ptr<ASTExpression> expr = this->expression();
+ if (expr) {
+ if (this->expect(Token::SEMICOLON, "';'")) {
+ ASTExpressionStatement* result = new ASTExpressionStatement(std::move(expr));
+ return std::unique_ptr<ASTExpressionStatement>(result);
+ }
+ }
+ return nullptr;
+}
+
+/* assignmentExpression */
+std::unique_ptr<ASTExpression> Parser::expression() {
+ return this->assignmentExpression();
+}
+
+/* ternaryExpression ((EQEQ | STAREQ | SLASHEQ | PERCENTEQ | PLUSEQ | MINUSEQ | SHLEQ | SHREQ |
+ BITWISEANDEQ | BITWISEXOREQ | BITWISEOREQ | LOGICALANDEQ | LOGICALXOREQ | LOGICALOREQ)
+ assignmentExpression)*
+ */
+std::unique_ptr<ASTExpression> Parser::assignmentExpression() {
+ std::unique_ptr<ASTExpression> result = this->ternaryExpression();
+ if (!result) {
+ return nullptr;
+ }
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::EQ: // fall through
+ case Token::STAREQ: // fall through
+ case Token::SLASHEQ: // fall through
+ case Token::PERCENTEQ: // fall through
+ case Token::PLUSEQ: // fall through
+ case Token::MINUSEQ: // fall through
+ case Token::SHLEQ: // fall through
+ case Token::SHREQ: // fall through
+ case Token::BITWISEANDEQ: // fall through
+ case Token::BITWISEXOREQ: // fall through
+ case Token::BITWISEOREQ: // fall through
+ case Token::LOGICALANDEQ: // fall through
+ case Token::LOGICALXOREQ: // fall through
+ case Token::LOGICALOREQ: {
+ Token t = this->nextToken();
+ std::unique_ptr<ASTExpression> right = this->assignmentExpression();
+ if (!right) {
+ return nullptr;
+ }
+ result = std::unique_ptr<ASTExpression>(new ASTBinaryExpression(std::move(result),
+ t,
+ std::move(right)));
+ }
+ default:
+ return result;
+ }
+ }
+}
+
+/* logicalOrExpression ('?' expression ':' assignmentExpression)? */
+std::unique_ptr<ASTExpression> Parser::ternaryExpression() {
+ std::unique_ptr<ASTExpression> result = this->logicalOrExpression();
+ if (!result) {
+ return nullptr;
+ }
+ if (this->peek().fKind == Token::QUESTION) {
+ Token question = this->nextToken();
+ std::unique_ptr<ASTExpression> trueExpr = this->expression();
+ if (!trueExpr) {
+ return nullptr;
+ }
+ if (this->expect(Token::COLON, "':'")) {
+ std::unique_ptr<ASTExpression> falseExpr = this->assignmentExpression();
+ return std::unique_ptr<ASTExpression>(new ASTTernaryExpression(std::move(result),
+ std::move(trueExpr),
+ std::move(falseExpr)));
+ }
+ return nullptr;
+ }
+ return result;
+}
+
+/* logicalXorExpression (LOGICALOR logicalXorExpression)* */
+std::unique_ptr<ASTExpression> Parser::logicalOrExpression() {
+ std::unique_ptr<ASTExpression> result = this->logicalXorExpression();
+ if (!result) {
+ return nullptr;
+ }
+ while (this->peek().fKind == Token::LOGICALOR) {
+ Token t = this->nextToken();
+ std::unique_ptr<ASTExpression> right = this->logicalXorExpression();
+ if (!right) {
+ return nullptr;
+ }
+ result.reset(new ASTBinaryExpression(std::move(result), t, std::move(right)));
+ }
+ return result;
+}
+
+/* logicalAndExpression (LOGICALXOR logicalAndExpression)* */
+std::unique_ptr<ASTExpression> Parser::logicalXorExpression() {
+ std::unique_ptr<ASTExpression> result = this->logicalAndExpression();
+ if (!result) {
+ return nullptr;
+ }
+ while (this->peek().fKind == Token::LOGICALXOR) {
+ Token t = this->nextToken();
+ std::unique_ptr<ASTExpression> right = this->logicalAndExpression();
+ if (!right) {
+ return nullptr;
+ }
+ result.reset(new ASTBinaryExpression(std::move(result), t, std::move(right)));
+ }
+ return result;
+}
+
+/* bitwiseOrExpression (LOGICALAND bitwiseOrExpression)* */
+std::unique_ptr<ASTExpression> Parser::logicalAndExpression() {
+ std::unique_ptr<ASTExpression> result = this->bitwiseOrExpression();
+ if (!result) {
+ return nullptr;
+ }
+ while (this->peek().fKind == Token::LOGICALAND) {
+ Token t = this->nextToken();
+ std::unique_ptr<ASTExpression> right = this->bitwiseOrExpression();
+ if (!right) {
+ return nullptr;
+ }
+ result.reset(new ASTBinaryExpression(std::move(result), t, std::move(right)));
+ }
+ return result;
+}
+
+/* bitwiseXorExpression (BITWISEOR bitwiseXorExpression)* */
+std::unique_ptr<ASTExpression> Parser::bitwiseOrExpression() {
+ std::unique_ptr<ASTExpression> result = this->bitwiseXorExpression();
+ if (!result) {
+ return nullptr;
+ }
+ while (this->peek().fKind == Token::BITWISEOR) {
+ Token t = this->nextToken();
+ std::unique_ptr<ASTExpression> right = this->bitwiseXorExpression();
+ if (!right) {
+ return nullptr;
+ }
+ result.reset(new ASTBinaryExpression(std::move(result), t, std::move(right)));
+ }
+ return result;
+}
+
+/* bitwiseAndExpression (BITWISEXOR bitwiseAndExpression)* */
+std::unique_ptr<ASTExpression> Parser::bitwiseXorExpression() {
+ std::unique_ptr<ASTExpression> result = this->bitwiseAndExpression();
+ if (!result) {
+ return nullptr;
+ }
+ while (this->peek().fKind == Token::BITWISEXOR) {
+ Token t = this->nextToken();
+ std::unique_ptr<ASTExpression> right = this->bitwiseAndExpression();
+ if (!right) {
+ return nullptr;
+ }
+ result.reset(new ASTBinaryExpression(std::move(result), t, std::move(right)));
+ }
+ return result;
+}
+
+/* equalityExpression (BITWISEAND equalityExpression)* */
+std::unique_ptr<ASTExpression> Parser::bitwiseAndExpression() {
+ std::unique_ptr<ASTExpression> result = this->equalityExpression();
+ if (!result) {
+ return nullptr;
+ }
+ while (this->peek().fKind == Token::BITWISEAND) {
+ Token t = this->nextToken();
+ std::unique_ptr<ASTExpression> right = this->equalityExpression();
+ if (!right) {
+ return nullptr;
+ }
+ result.reset(new ASTBinaryExpression(std::move(result), t, std::move(right)));
+ }
+ return result;
+}
+
+/* relationalExpression ((EQEQ | NEQ) relationalExpression)* */
+std::unique_ptr<ASTExpression> Parser::equalityExpression() {
+ std::unique_ptr<ASTExpression> result = this->relationalExpression();
+ if (!result) {
+ return nullptr;
+ }
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::EQEQ: // fall through
+ case Token::NEQ: {
+ Token t = this->nextToken();
+ std::unique_ptr<ASTExpression> right = this->relationalExpression();
+ if (!right) {
+ return nullptr;
+ }
+ result.reset(new ASTBinaryExpression(std::move(result), t, std::move(right)));
+ break;
+ }
+ default:
+ return result;
+ }
+ }
+}
+
+/* shiftExpression ((LT | GT | LTEQ | GTEQ) shiftExpression)* */
+std::unique_ptr<ASTExpression> Parser::relationalExpression() {
+ std::unique_ptr<ASTExpression> result = this->shiftExpression();
+ if (!result) {
+ return nullptr;
+ }
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::LT: // fall through
+ case Token::GT: // fall through
+ case Token::LTEQ: // fall through
+ case Token::GTEQ: {
+ Token t = this->nextToken();
+ std::unique_ptr<ASTExpression> right = this->shiftExpression();
+ if (!right) {
+ return nullptr;
+ }
+ result.reset(new ASTBinaryExpression(std::move(result), t, std::move(right)));
+ break;
+ }
+ default:
+ return result;
+ }
+ }
+}
+
+/* additiveExpression ((SHL | SHR) additiveExpression)* */
+std::unique_ptr<ASTExpression> Parser::shiftExpression() {
+ std::unique_ptr<ASTExpression> result = this->additiveExpression();
+ if (!result) {
+ return nullptr;
+ }
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::SHL: // fall through
+ case Token::SHR: {
+ Token t = this->nextToken();
+ std::unique_ptr<ASTExpression> right = this->additiveExpression();
+ if (!right) {
+ return nullptr;
+ }
+ result.reset(new ASTBinaryExpression(std::move(result), t, std::move(right)));
+ break;
+ }
+ default:
+ return result;
+ }
+ }
+}
+
+/* multiplicativeExpression ((PLUS | MINUS) multiplicativeExpression)* */
+std::unique_ptr<ASTExpression> Parser::additiveExpression() {
+ std::unique_ptr<ASTExpression> result = this->multiplicativeExpression();
+ if (!result) {
+ return nullptr;
+ }
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::PLUS: // fall through
+ case Token::MINUS: {
+ Token t = this->nextToken();
+ std::unique_ptr<ASTExpression> right = this->multiplicativeExpression();
+ if (!right) {
+ return nullptr;
+ }
+ result.reset(new ASTBinaryExpression(std::move(result), t, std::move(right)));
+ break;
+ }
+ default:
+ return result;
+ }
+ }
+}
+
+/* unaryExpression ((STAR | SLASH | PERCENT) unaryExpression)* */
+std::unique_ptr<ASTExpression> Parser::multiplicativeExpression() {
+ std::unique_ptr<ASTExpression> result = this->unaryExpression();
+ if (!result) {
+ return nullptr;
+ }
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::STAR: // fall through
+ case Token::SLASH: // fall through
+ case Token::PERCENT: {
+ Token t = this->nextToken();
+ std::unique_ptr<ASTExpression> right = this->unaryExpression();
+ if (!right) {
+ return nullptr;
+ }
+ result.reset(new ASTBinaryExpression(std::move(result), t, std::move(right)));
+ break;
+ }
+ default:
+ return result;
+ }
+ }
+}
+
+/* postfixExpression | (PLUS | MINUS | NOT | PLUSPLUS | MINUSMINUS) unaryExpression */
+std::unique_ptr<ASTExpression> Parser::unaryExpression() {
+ switch (this->peek().fKind) {
+ case Token::PLUS: // fall through
+ case Token::MINUS: // fall through
+ case Token::NOT: // fall through
+ case Token::PLUSPLUS: // fall through
+ case Token::MINUSMINUS: {
+ Token t = this->nextToken();
+ std::unique_ptr<ASTExpression> expr = this->unaryExpression();
+ if (!expr) {
+ return nullptr;
+ }
+ return std::unique_ptr<ASTExpression>(new ASTPrefixExpression(t, std::move(expr)));
+ }
+ default:
+ return this->postfixExpression();
+ }
+}
+
+/* term suffix* */
+std::unique_ptr<ASTExpression> Parser::postfixExpression() {
+ std::unique_ptr<ASTExpression> result = this->term();
+ if (!result) {
+ return nullptr;
+ }
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::LBRACKET: // fall through
+ case Token::DOT: // fall through
+ case Token::LPAREN: // fall through
+ case Token::PLUSPLUS: // fall through
+ case Token::MINUSMINUS: {
+ std::unique_ptr<ASTSuffix> s = this->suffix();
+ if (!s) {
+ return nullptr;
+ }
+ result.reset(new ASTSuffixExpression(std::move(result), std::move(s)));
+ break;
+ }
+ default:
+ return result;
+ }
+ }
+}
+
+/* LBRACKET expression RBRACKET | DOT IDENTIFIER | LPAREN parameters RPAREN |
+ PLUSPLUS | MINUSMINUS */
+std::unique_ptr<ASTSuffix> Parser::suffix() {
+ Token next = this->nextToken();
+ switch (next.fKind) {
+ case Token::LBRACKET: {
+ std::unique_ptr<ASTExpression> e = this->expression();
+ if (!e) {
+ return nullptr;
+ }
+ this->expect(Token::RBRACKET, "']' to complete array access expression");
+ return std::unique_ptr<ASTSuffix>(new ASTIndexSuffix(std::move(e)));
+ }
+ case Token::DOT: {
+ Position pos = this->peek().fPosition;
+ std::string text;
+ if (this->identifier(&text)) {
+ return std::unique_ptr<ASTSuffix>(new ASTFieldSuffix(pos, std::move(text)));
+ }
+ return nullptr;
+ }
+ case Token::LPAREN: {
+ std::vector<std::unique_ptr<ASTExpression>> parameters;
+ if (this->peek().fKind != Token::RPAREN) {
+ for (;;) {
+ std::unique_ptr<ASTExpression> expr = this->expression();
+ if (!expr) {
+ return nullptr;
+ }
+ parameters.push_back(std::move(expr));
+ if (this->peek().fKind != Token::COMMA) {
+ break;
+ }
+ this->nextToken();
+ }
+ }
+ this->expect(Token::RPAREN, "')' to complete function parameters");
+ return std::unique_ptr<ASTSuffix>(new ASTCallSuffix(next.fPosition,
+ std::move(parameters)));
+ }
+ case Token::PLUSPLUS:
+ return std::unique_ptr<ASTSuffix>(new ASTSuffix(next.fPosition,
+ ASTSuffix::kPostIncrement_Kind));
+ case Token::MINUSMINUS:
+ return std::unique_ptr<ASTSuffix>(new ASTSuffix(next.fPosition,
+ ASTSuffix::kPostDecrement_Kind));
+ default: {
+ this->error(next.fPosition, "expected expression suffix, but found '" + next.fText +
+ "'\n");
+ return nullptr;
+ }
+ }
+}
+
+/* IDENTIFIER | intLiteral | floatLiteral | boolLiteral | '(' expression ')' */
+std::unique_ptr<ASTExpression> Parser::term() {
+ std::unique_ptr<ASTExpression> result;
+ Token t = this->peek();
+ switch (t.fKind) {
+ case Token::IDENTIFIER: {
+ std::string text;
+ if (this->identifier(&text)) {
+ result.reset(new ASTIdentifier(t.fPosition, std::move(text)));
+ }
+ break;
+ }
+ case Token::INT_LITERAL: {
+ int64_t i;
+ if (this->intLiteral(&i)) {
+ result.reset(new ASTIntLiteral(t.fPosition, i));
+ }
+ break;
+ }
+ case Token::FLOAT_LITERAL: {
+ double f;
+ if (this->floatLiteral(&f)) {
+ result.reset(new ASTFloatLiteral(t.fPosition, f));
+ }
+ break;
+ }
+ case Token::TRUE_LITERAL: // fall through
+ case Token::FALSE_LITERAL: {
+ bool b;
+ if (this->boolLiteral(&b)) {
+ result.reset(new ASTBoolLiteral(t.fPosition, b));
+ }
+ break;
+ }
+ case Token::LPAREN: {
+ this->nextToken();
+ result = this->expression();
+ if (result) {
+ this->expect(Token::RPAREN, "')' to complete expression");
+ }
+ break;
+ }
+ default:
+ this->nextToken();
+ this->error(t.fPosition, "expected expression, but found '" + t.fText + "'\n");
+ result = nullptr;
+ }
+ return result;
+}
+
+/* INT_LITERAL */
+bool Parser::intLiteral(int64_t* dest) {
+ Token t;
+ if (this->expect(Token::INT_LITERAL, "integer literal", &t)) {
+ *dest = SkSL::stol(t.fText);
+ return true;
+ }
+ return false;
+}
+
+/* FLOAT_LITERAL */
+bool Parser::floatLiteral(double* dest) {
+ Token t;
+ if (this->expect(Token::FLOAT_LITERAL, "float literal", &t)) {
+ *dest = SkSL::stod(t.fText);
+ return true;
+ }
+ return false;
+}
+
+/* TRUE_LITERAL | FALSE_LITERAL */
+bool Parser::boolLiteral(bool* dest) {
+ Token t = this->nextToken();
+ switch (t.fKind) {
+ case Token::TRUE_LITERAL:
+ *dest = true;
+ return true;
+ case Token::FALSE_LITERAL:
+ *dest = false;
+ return true;
+ default:
+ this->error(t.fPosition, "expected 'true' or 'false', but found '" + t.fText + "'\n");
+ return false;
+ }
+}
+
+/* IDENTIFIER */
+bool Parser::identifier(std::string* dest) {
+ Token t;
+ if (this->expect(Token::IDENTIFIER, "identifier", &t)) {
+ *dest = t.fText;
+ return true;
+ }
+ return false;
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/sksl/SkSLParser.h b/gfx/skia/skia/src/sksl/SkSLParser.h
new file mode 100644
index 000000000..75f304bc8
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLParser.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_PARSER
+#define SKSL_PARSER
+
+#include <string>
+#include <vector>
+#include <memory>
+#include <unordered_set>
+#include "SkSLErrorReporter.h"
+#include "SkSLToken.h"
+
+struct yy_buffer_state;
+#define YY_TYPEDEF_YY_BUFFER_STATE
+typedef struct yy_buffer_state *YY_BUFFER_STATE;
+
+namespace SkSL {
+
+struct ASTBlock;
+struct ASTBreakStatement;
+struct ASTContinueStatement;
+struct ASTDeclaration;
+struct ASTDiscardStatement;
+struct ASTDoStatement;
+struct ASTExpression;
+struct ASTExpressionStatement;
+struct ASTForStatement;
+struct ASTIfStatement;
+struct ASTInterfaceBlock;
+struct ASTLayout;
+struct ASTModifiers;
+struct ASTParameter;
+struct ASTReturnStatement;
+struct ASTStatement;
+struct ASTSuffix;
+struct ASTType;
+struct ASTWhileStatement;
+struct ASTVarDeclarations;
+class SymbolTable;
+
+/**
+ * Consumes .sksl text and produces an abstract syntax tree describing the contents.
+ */
+class Parser {
+public:
+ Parser(std::string text, SymbolTable& types, ErrorReporter& errors);
+
+ ~Parser();
+
+ /**
+ * Consumes a complete .sksl file and produces a list of declarations. Errors are reported via
+ * the ErrorReporter; the return value may contain some declarations even when errors have
+ * occurred.
+ */
+ std::vector<std::unique_ptr<ASTDeclaration>> file();
+
+private:
+ /**
+ * Return the next token from the parse stream.
+ */
+ Token nextToken();
+
+ /**
+ * Push a token back onto the parse stream, so that it is the next one read. Only a single level
+ * of pushback is supported (that is, it is an error to call pushback() twice in a row without
+ * an intervening nextToken()).
+ */
+ void pushback(Token t);
+
+ /**
+ * Returns the next token without consuming it from the stream.
+ */
+ Token peek();
+
+ /**
+ * Reads the next token and generates an error if it is not the expected type. The 'expected'
+ * string is part of the error message, which reads:
+ *
+ * "expected <expected>, but found '<actual text>'"
+ *
+ * If 'result' is non-null, it is set to point to the token that was read.
+ * Returns true if the read token was as expected, false otherwise.
+ */
+ bool expect(Token::Kind kind, std::string expected, Token* result = nullptr);
+
+ void error(Position p, std::string msg);
+
+ /**
+ * Returns true if the 'name' identifier refers to a type name. For instance, isType("int") will
+ * always return true.
+ */
+ bool isType(std::string name);
+
+ // these functions parse individual grammar rules from the current parse position; you probably
+ // don't need to call any of these outside of the parser. The function declarations in the .cpp
+ // file have comments describing the grammar rules.
+
+ void precision();
+
+ std::unique_ptr<ASTDeclaration> directive();
+
+ std::unique_ptr<ASTDeclaration> declaration();
+
+ std::unique_ptr<ASTVarDeclarations> varDeclarations();
+
+ std::unique_ptr<ASTType> structDeclaration();
+
+ std::unique_ptr<ASTVarDeclarations> structVarDeclaration(ASTModifiers modifiers);
+
+ std::unique_ptr<ASTVarDeclarations> varDeclarationEnd(ASTModifiers modifiers,
+ std::unique_ptr<ASTType> type,
+ std::string name);
+
+ std::unique_ptr<ASTParameter> parameter();
+
+ int layoutInt();
+
+ ASTLayout layout();
+
+ ASTModifiers modifiers();
+
+ ASTModifiers modifiersWithDefaults(int defaultFlags);
+
+ std::unique_ptr<ASTStatement> statement();
+
+ std::unique_ptr<ASTType> type();
+
+ std::unique_ptr<ASTDeclaration> interfaceBlock(ASTModifiers mods);
+
+ std::unique_ptr<ASTIfStatement> ifStatement();
+
+ std::unique_ptr<ASTDoStatement> doStatement();
+
+ std::unique_ptr<ASTWhileStatement> whileStatement();
+
+ std::unique_ptr<ASTForStatement> forStatement();
+
+ std::unique_ptr<ASTReturnStatement> returnStatement();
+
+ std::unique_ptr<ASTBreakStatement> breakStatement();
+
+ std::unique_ptr<ASTContinueStatement> continueStatement();
+
+ std::unique_ptr<ASTDiscardStatement> discardStatement();
+
+ std::unique_ptr<ASTBlock> block();
+
+ std::unique_ptr<ASTExpressionStatement> expressionStatement();
+
+ std::unique_ptr<ASTExpression> expression();
+
+ std::unique_ptr<ASTExpression> assignmentExpression();
+
+ std::unique_ptr<ASTExpression> ternaryExpression();
+
+ std::unique_ptr<ASTExpression> logicalOrExpression();
+
+ std::unique_ptr<ASTExpression> logicalXorExpression();
+
+ std::unique_ptr<ASTExpression> logicalAndExpression();
+
+ std::unique_ptr<ASTExpression> bitwiseOrExpression();
+
+ std::unique_ptr<ASTExpression> bitwiseXorExpression();
+
+ std::unique_ptr<ASTExpression> bitwiseAndExpression();
+
+ std::unique_ptr<ASTExpression> equalityExpression();
+
+ std::unique_ptr<ASTExpression> relationalExpression();
+
+ std::unique_ptr<ASTExpression> shiftExpression();
+
+ std::unique_ptr<ASTExpression> additiveExpression();
+
+ std::unique_ptr<ASTExpression> multiplicativeExpression();
+
+ std::unique_ptr<ASTExpression> unaryExpression();
+
+ std::unique_ptr<ASTExpression> postfixExpression();
+
+ std::unique_ptr<ASTSuffix> suffix();
+
+ std::unique_ptr<ASTExpression> term();
+
+ bool intLiteral(int64_t* dest);
+
+ bool floatLiteral(double* dest);
+
+ bool boolLiteral(bool* dest);
+
+ bool identifier(std::string* dest);
+
+
+ void* fScanner;
+ YY_BUFFER_STATE fBuffer;
+ Token fPushback;
+ SymbolTable& fTypes;
+ ErrorReporter& fErrors;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLPosition.h b/gfx/skia/skia/src/sksl/SkSLPosition.h
new file mode 100644
index 000000000..979f630ae
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLPosition.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_POSITION
+#define SKSL_POSITION
+
+#include "SkSLUtil.h"
+
+namespace SkSL {
+
+/**
+ * Represents a position in the source code. Both line and column are one-based. Column is currently
+ * ignored.
+ */
+struct Position {
+ Position()
+ : fLine(-1)
+ , fColumn(-1) {}
+
+ Position(int line, int column)
+ : fLine(line)
+ , fColumn(column) {}
+
+ std::string description() const {
+ return to_string(fLine);
+ }
+
+ int fLine;
+ int fColumn;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLSPIRVCodeGenerator.cpp b/gfx/skia/skia/src/sksl/SkSLSPIRVCodeGenerator.cpp
new file mode 100644
index 000000000..d17e3c42a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLSPIRVCodeGenerator.cpp
@@ -0,0 +1,2638 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkSLSPIRVCodeGenerator.h"
+
+#include "string.h"
+
+#include "GLSL.std.450.h"
+
+#include "ir/SkSLExpressionStatement.h"
+#include "ir/SkSLExtension.h"
+#include "ir/SkSLIndexExpression.h"
+#include "ir/SkSLVariableReference.h"
+
+namespace SkSL {
+
+#define SPIRV_DEBUG 0
+
+static const int32_t SKSL_MAGIC = 0x0; // FIXME: we should probably register a magic number
+
+void SPIRVCodeGenerator::setupIntrinsics() {
+#define ALL_GLSL(x) std::make_tuple(kGLSL_STD_450_IntrinsicKind, GLSLstd450 ## x, GLSLstd450 ## x, \
+ GLSLstd450 ## x, GLSLstd450 ## x)
+#define BY_TYPE_GLSL(ifFloat, ifInt, ifUInt) std::make_tuple(kGLSL_STD_450_IntrinsicKind, \
+ GLSLstd450 ## ifFloat, \
+ GLSLstd450 ## ifInt, \
+ GLSLstd450 ## ifUInt, \
+ SpvOpUndef)
+#define SPECIAL(x) std::make_tuple(kSpecial_IntrinsicKind, k ## x ## _SpecialIntrinsic, \
+ k ## x ## _SpecialIntrinsic, k ## x ## _SpecialIntrinsic, \
+ k ## x ## _SpecialIntrinsic)
+ fIntrinsicMap["round"] = ALL_GLSL(Round);
+ fIntrinsicMap["roundEven"] = ALL_GLSL(RoundEven);
+ fIntrinsicMap["trunc"] = ALL_GLSL(Trunc);
+ fIntrinsicMap["abs"] = BY_TYPE_GLSL(FAbs, SAbs, SAbs);
+ fIntrinsicMap["sign"] = BY_TYPE_GLSL(FSign, SSign, SSign);
+ fIntrinsicMap["floor"] = ALL_GLSL(Floor);
+ fIntrinsicMap["ceil"] = ALL_GLSL(Ceil);
+ fIntrinsicMap["fract"] = ALL_GLSL(Fract);
+ fIntrinsicMap["radians"] = ALL_GLSL(Radians);
+ fIntrinsicMap["degrees"] = ALL_GLSL(Degrees);
+ fIntrinsicMap["sin"] = ALL_GLSL(Sin);
+ fIntrinsicMap["cos"] = ALL_GLSL(Cos);
+ fIntrinsicMap["tan"] = ALL_GLSL(Tan);
+ fIntrinsicMap["asin"] = ALL_GLSL(Asin);
+ fIntrinsicMap["acos"] = ALL_GLSL(Acos);
+ fIntrinsicMap["atan"] = SPECIAL(Atan);
+ fIntrinsicMap["sinh"] = ALL_GLSL(Sinh);
+ fIntrinsicMap["cosh"] = ALL_GLSL(Cosh);
+ fIntrinsicMap["tanh"] = ALL_GLSL(Tanh);
+ fIntrinsicMap["asinh"] = ALL_GLSL(Asinh);
+ fIntrinsicMap["acosh"] = ALL_GLSL(Acosh);
+ fIntrinsicMap["atanh"] = ALL_GLSL(Atanh);
+ fIntrinsicMap["pow"] = ALL_GLSL(Pow);
+ fIntrinsicMap["exp"] = ALL_GLSL(Exp);
+ fIntrinsicMap["log"] = ALL_GLSL(Log);
+ fIntrinsicMap["exp2"] = ALL_GLSL(Exp2);
+ fIntrinsicMap["log2"] = ALL_GLSL(Log2);
+ fIntrinsicMap["sqrt"] = ALL_GLSL(Sqrt);
+ fIntrinsicMap["inversesqrt"] = ALL_GLSL(InverseSqrt);
+ fIntrinsicMap["determinant"] = ALL_GLSL(Determinant);
+ fIntrinsicMap["matrixInverse"] = ALL_GLSL(MatrixInverse);
+ fIntrinsicMap["mod"] = std::make_tuple(kSPIRV_IntrinsicKind, SpvOpFMod, SpvOpSMod,
+ SpvOpUMod, SpvOpUndef);
+ fIntrinsicMap["min"] = BY_TYPE_GLSL(FMin, SMin, UMin);
+ fIntrinsicMap["max"] = BY_TYPE_GLSL(FMax, SMax, UMax);
+ fIntrinsicMap["clamp"] = BY_TYPE_GLSL(FClamp, SClamp, UClamp);
+ fIntrinsicMap["dot"] = std::make_tuple(kSPIRV_IntrinsicKind, SpvOpDot, SpvOpUndef,
+ SpvOpUndef, SpvOpUndef);
+ fIntrinsicMap["mix"] = ALL_GLSL(FMix);
+ fIntrinsicMap["step"] = ALL_GLSL(Step);
+ fIntrinsicMap["smoothstep"] = ALL_GLSL(SmoothStep);
+ fIntrinsicMap["fma"] = ALL_GLSL(Fma);
+ fIntrinsicMap["frexp"] = ALL_GLSL(Frexp);
+ fIntrinsicMap["ldexp"] = ALL_GLSL(Ldexp);
+
+#define PACK(type) fIntrinsicMap["pack" #type] = ALL_GLSL(Pack ## type); \
+ fIntrinsicMap["unpack" #type] = ALL_GLSL(Unpack ## type)
+ PACK(Snorm4x8);
+ PACK(Unorm4x8);
+ PACK(Snorm2x16);
+ PACK(Unorm2x16);
+ PACK(Half2x16);
+ PACK(Double2x32);
+ fIntrinsicMap["length"] = ALL_GLSL(Length);
+ fIntrinsicMap["distance"] = ALL_GLSL(Distance);
+ fIntrinsicMap["cross"] = ALL_GLSL(Cross);
+ fIntrinsicMap["normalize"] = ALL_GLSL(Normalize);
+ fIntrinsicMap["faceForward"] = ALL_GLSL(FaceForward);
+ fIntrinsicMap["reflect"] = ALL_GLSL(Reflect);
+ fIntrinsicMap["refract"] = ALL_GLSL(Refract);
+ fIntrinsicMap["findLSB"] = ALL_GLSL(FindILsb);
+ fIntrinsicMap["findMSB"] = BY_TYPE_GLSL(FindSMsb, FindSMsb, FindUMsb);
+ fIntrinsicMap["dFdx"] = std::make_tuple(kSPIRV_IntrinsicKind, SpvOpDPdx, SpvOpUndef,
+ SpvOpUndef, SpvOpUndef);
+ fIntrinsicMap["dFdy"] = std::make_tuple(kSPIRV_IntrinsicKind, SpvOpDPdy, SpvOpUndef,
+ SpvOpUndef, SpvOpUndef);
+ fIntrinsicMap["dFdy"] = std::make_tuple(kSPIRV_IntrinsicKind, SpvOpDPdy, SpvOpUndef,
+ SpvOpUndef, SpvOpUndef);
+ fIntrinsicMap["texture"] = SPECIAL(Texture);
+ fIntrinsicMap["texture2D"] = SPECIAL(Texture2D);
+ fIntrinsicMap["textureProj"] = SPECIAL(TextureProj);
+
+ fIntrinsicMap["any"] = std::make_tuple(kSPIRV_IntrinsicKind, SpvOpUndef,
+ SpvOpUndef, SpvOpUndef, SpvOpAny);
+ fIntrinsicMap["all"] = std::make_tuple(kSPIRV_IntrinsicKind, SpvOpUndef,
+ SpvOpUndef, SpvOpUndef, SpvOpAll);
+ fIntrinsicMap["equal"] = std::make_tuple(kSPIRV_IntrinsicKind, SpvOpFOrdEqual,
+ SpvOpIEqual, SpvOpIEqual,
+ SpvOpLogicalEqual);
+ fIntrinsicMap["notEqual"] = std::make_tuple(kSPIRV_IntrinsicKind, SpvOpFOrdNotEqual,
+ SpvOpINotEqual, SpvOpINotEqual,
+ SpvOpLogicalNotEqual);
+ fIntrinsicMap["lessThan"] = std::make_tuple(kSPIRV_IntrinsicKind, SpvOpSLessThan,
+ SpvOpULessThan, SpvOpFOrdLessThan,
+ SpvOpUndef);
+ fIntrinsicMap["lessThanEqual"] = std::make_tuple(kSPIRV_IntrinsicKind, SpvOpSLessThanEqual,
+ SpvOpULessThanEqual, SpvOpFOrdLessThanEqual,
+ SpvOpUndef);
+ fIntrinsicMap["greaterThan"] = std::make_tuple(kSPIRV_IntrinsicKind, SpvOpSGreaterThan,
+ SpvOpUGreaterThan, SpvOpFOrdGreaterThan,
+ SpvOpUndef);
+ fIntrinsicMap["greaterThanEqual"] = std::make_tuple(kSPIRV_IntrinsicKind,
+ SpvOpSGreaterThanEqual,
+ SpvOpUGreaterThanEqual,
+ SpvOpFOrdGreaterThanEqual,
+ SpvOpUndef);
+
+// interpolateAt* not yet supported...
+}
+
+void SPIRVCodeGenerator::writeWord(int32_t word, std::ostream& out) {
+#if SPIRV_DEBUG
+ out << "(" << word << ") ";
+#else
+ out.write((const char*) &word, sizeof(word));
+#endif
+}
+
+static bool is_float(const Context& context, const Type& type) {
+ if (type.kind() == Type::kVector_Kind) {
+ return is_float(context, type.componentType());
+ }
+ return type == *context.fFloat_Type || type == *context.fDouble_Type;
+}
+
+static bool is_signed(const Context& context, const Type& type) {
+ if (type.kind() == Type::kVector_Kind) {
+ return is_signed(context, type.componentType());
+ }
+ return type == *context.fInt_Type;
+}
+
+static bool is_unsigned(const Context& context, const Type& type) {
+ if (type.kind() == Type::kVector_Kind) {
+ return is_unsigned(context, type.componentType());
+ }
+ return type == *context.fUInt_Type;
+}
+
+static bool is_bool(const Context& context, const Type& type) {
+ if (type.kind() == Type::kVector_Kind) {
+ return is_bool(context, type.componentType());
+ }
+ return type == *context.fBool_Type;
+}
+
+static bool is_out(const Variable& var) {
+ return (var.fModifiers.fFlags & Modifiers::kOut_Flag) != 0;
+}
+
+#if SPIRV_DEBUG
+static std::string opcode_text(SpvOp_ opCode) {
+ switch (opCode) {
+ case SpvOpNop:
+ return "Nop";
+ case SpvOpUndef:
+ return "Undef";
+ case SpvOpSourceContinued:
+ return "SourceContinued";
+ case SpvOpSource:
+ return "Source";
+ case SpvOpSourceExtension:
+ return "SourceExtension";
+ case SpvOpName:
+ return "Name";
+ case SpvOpMemberName:
+ return "MemberName";
+ case SpvOpString:
+ return "String";
+ case SpvOpLine:
+ return "Line";
+ case SpvOpExtension:
+ return "Extension";
+ case SpvOpExtInstImport:
+ return "ExtInstImport";
+ case SpvOpExtInst:
+ return "ExtInst";
+ case SpvOpMemoryModel:
+ return "MemoryModel";
+ case SpvOpEntryPoint:
+ return "EntryPoint";
+ case SpvOpExecutionMode:
+ return "ExecutionMode";
+ case SpvOpCapability:
+ return "Capability";
+ case SpvOpTypeVoid:
+ return "TypeVoid";
+ case SpvOpTypeBool:
+ return "TypeBool";
+ case SpvOpTypeInt:
+ return "TypeInt";
+ case SpvOpTypeFloat:
+ return "TypeFloat";
+ case SpvOpTypeVector:
+ return "TypeVector";
+ case SpvOpTypeMatrix:
+ return "TypeMatrix";
+ case SpvOpTypeImage:
+ return "TypeImage";
+ case SpvOpTypeSampler:
+ return "TypeSampler";
+ case SpvOpTypeSampledImage:
+ return "TypeSampledImage";
+ case SpvOpTypeArray:
+ return "TypeArray";
+ case SpvOpTypeRuntimeArray:
+ return "TypeRuntimeArray";
+ case SpvOpTypeStruct:
+ return "TypeStruct";
+ case SpvOpTypeOpaque:
+ return "TypeOpaque";
+ case SpvOpTypePointer:
+ return "TypePointer";
+ case SpvOpTypeFunction:
+ return "TypeFunction";
+ case SpvOpTypeEvent:
+ return "TypeEvent";
+ case SpvOpTypeDeviceEvent:
+ return "TypeDeviceEvent";
+ case SpvOpTypeReserveId:
+ return "TypeReserveId";
+ case SpvOpTypeQueue:
+ return "TypeQueue";
+ case SpvOpTypePipe:
+ return "TypePipe";
+ case SpvOpTypeForwardPointer:
+ return "TypeForwardPointer";
+ case SpvOpConstantTrue:
+ return "ConstantTrue";
+ case SpvOpConstantFalse:
+ return "ConstantFalse";
+ case SpvOpConstant:
+ return "Constant";
+ case SpvOpConstantComposite:
+ return "ConstantComposite";
+ case SpvOpConstantSampler:
+ return "ConstantSampler";
+ case SpvOpConstantNull:
+ return "ConstantNull";
+ case SpvOpSpecConstantTrue:
+ return "SpecConstantTrue";
+ case SpvOpSpecConstantFalse:
+ return "SpecConstantFalse";
+ case SpvOpSpecConstant:
+ return "SpecConstant";
+ case SpvOpSpecConstantComposite:
+ return "SpecConstantComposite";
+ case SpvOpSpecConstantOp:
+ return "SpecConstantOp";
+ case SpvOpFunction:
+ return "Function";
+ case SpvOpFunctionParameter:
+ return "FunctionParameter";
+ case SpvOpFunctionEnd:
+ return "FunctionEnd";
+ case SpvOpFunctionCall:
+ return "FunctionCall";
+ case SpvOpVariable:
+ return "Variable";
+ case SpvOpImageTexelPointer:
+ return "ImageTexelPointer";
+ case SpvOpLoad:
+ return "Load";
+ case SpvOpStore:
+ return "Store";
+ case SpvOpCopyMemory:
+ return "CopyMemory";
+ case SpvOpCopyMemorySized:
+ return "CopyMemorySized";
+ case SpvOpAccessChain:
+ return "AccessChain";
+ case SpvOpInBoundsAccessChain:
+ return "InBoundsAccessChain";
+ case SpvOpPtrAccessChain:
+ return "PtrAccessChain";
+ case SpvOpArrayLength:
+ return "ArrayLength";
+ case SpvOpGenericPtrMemSemantics:
+ return "GenericPtrMemSemantics";
+ case SpvOpInBoundsPtrAccessChain:
+ return "InBoundsPtrAccessChain";
+ case SpvOpDecorate:
+ return "Decorate";
+ case SpvOpMemberDecorate:
+ return "MemberDecorate";
+ case SpvOpDecorationGroup:
+ return "DecorationGroup";
+ case SpvOpGroupDecorate:
+ return "GroupDecorate";
+ case SpvOpGroupMemberDecorate:
+ return "GroupMemberDecorate";
+ case SpvOpVectorExtractDynamic:
+ return "VectorExtractDynamic";
+ case SpvOpVectorInsertDynamic:
+ return "VectorInsertDynamic";
+ case SpvOpVectorShuffle:
+ return "VectorShuffle";
+ case SpvOpCompositeConstruct:
+ return "CompositeConstruct";
+ case SpvOpCompositeExtract:
+ return "CompositeExtract";
+ case SpvOpCompositeInsert:
+ return "CompositeInsert";
+ case SpvOpCopyObject:
+ return "CopyObject";
+ case SpvOpTranspose:
+ return "Transpose";
+ case SpvOpSampledImage:
+ return "SampledImage";
+ case SpvOpImageSampleImplicitLod:
+ return "ImageSampleImplicitLod";
+ case SpvOpImageSampleExplicitLod:
+ return "ImageSampleExplicitLod";
+ case SpvOpImageSampleDrefImplicitLod:
+ return "ImageSampleDrefImplicitLod";
+ case SpvOpImageSampleDrefExplicitLod:
+ return "ImageSampleDrefExplicitLod";
+ case SpvOpImageSampleProjImplicitLod:
+ return "ImageSampleProjImplicitLod";
+ case SpvOpImageSampleProjExplicitLod:
+ return "ImageSampleProjExplicitLod";
+ case SpvOpImageSampleProjDrefImplicitLod:
+ return "ImageSampleProjDrefImplicitLod";
+ case SpvOpImageSampleProjDrefExplicitLod:
+ return "ImageSampleProjDrefExplicitLod";
+ case SpvOpImageFetch:
+ return "ImageFetch";
+ case SpvOpImageGather:
+ return "ImageGather";
+ case SpvOpImageDrefGather:
+ return "ImageDrefGather";
+ case SpvOpImageRead:
+ return "ImageRead";
+ case SpvOpImageWrite:
+ return "ImageWrite";
+ case SpvOpImage:
+ return "Image";
+ case SpvOpImageQueryFormat:
+ return "ImageQueryFormat";
+ case SpvOpImageQueryOrder:
+ return "ImageQueryOrder";
+ case SpvOpImageQuerySizeLod:
+ return "ImageQuerySizeLod";
+ case SpvOpImageQuerySize:
+ return "ImageQuerySize";
+ case SpvOpImageQueryLod:
+ return "ImageQueryLod";
+ case SpvOpImageQueryLevels:
+ return "ImageQueryLevels";
+ case SpvOpImageQuerySamples:
+ return "ImageQuerySamples";
+ case SpvOpConvertFToU:
+ return "ConvertFToU";
+ case SpvOpConvertFToS:
+ return "ConvertFToS";
+ case SpvOpConvertSToF:
+ return "ConvertSToF";
+ case SpvOpConvertUToF:
+ return "ConvertUToF";
+ case SpvOpUConvert:
+ return "UConvert";
+ case SpvOpSConvert:
+ return "SConvert";
+ case SpvOpFConvert:
+ return "FConvert";
+ case SpvOpQuantizeToF16:
+ return "QuantizeToF16";
+ case SpvOpConvertPtrToU:
+ return "ConvertPtrToU";
+ case SpvOpSatConvertSToU:
+ return "SatConvertSToU";
+ case SpvOpSatConvertUToS:
+ return "SatConvertUToS";
+ case SpvOpConvertUToPtr:
+ return "ConvertUToPtr";
+ case SpvOpPtrCastToGeneric:
+ return "PtrCastToGeneric";
+ case SpvOpGenericCastToPtr:
+ return "GenericCastToPtr";
+ case SpvOpGenericCastToPtrExplicit:
+ return "GenericCastToPtrExplicit";
+ case SpvOpBitcast:
+ return "Bitcast";
+ case SpvOpSNegate:
+ return "SNegate";
+ case SpvOpFNegate:
+ return "FNegate";
+ case SpvOpIAdd:
+ return "IAdd";
+ case SpvOpFAdd:
+ return "FAdd";
+ case SpvOpISub:
+ return "ISub";
+ case SpvOpFSub:
+ return "FSub";
+ case SpvOpIMul:
+ return "IMul";
+ case SpvOpFMul:
+ return "FMul";
+ case SpvOpUDiv:
+ return "UDiv";
+ case SpvOpSDiv:
+ return "SDiv";
+ case SpvOpFDiv:
+ return "FDiv";
+ case SpvOpUMod:
+ return "UMod";
+ case SpvOpSRem:
+ return "SRem";
+ case SpvOpSMod:
+ return "SMod";
+ case SpvOpFRem:
+ return "FRem";
+ case SpvOpFMod:
+ return "FMod";
+ case SpvOpVectorTimesScalar:
+ return "VectorTimesScalar";
+ case SpvOpMatrixTimesScalar:
+ return "MatrixTimesScalar";
+ case SpvOpVectorTimesMatrix:
+ return "VectorTimesMatrix";
+ case SpvOpMatrixTimesVector:
+ return "MatrixTimesVector";
+ case SpvOpMatrixTimesMatrix:
+ return "MatrixTimesMatrix";
+ case SpvOpOuterProduct:
+ return "OuterProduct";
+ case SpvOpDot:
+ return "Dot";
+ case SpvOpIAddCarry:
+ return "IAddCarry";
+ case SpvOpISubBorrow:
+ return "ISubBorrow";
+ case SpvOpUMulExtended:
+ return "UMulExtended";
+ case SpvOpSMulExtended:
+ return "SMulExtended";
+ case SpvOpAny:
+ return "Any";
+ case SpvOpAll:
+ return "All";
+ case SpvOpIsNan:
+ return "IsNan";
+ case SpvOpIsInf:
+ return "IsInf";
+ case SpvOpIsFinite:
+ return "IsFinite";
+ case SpvOpIsNormal:
+ return "IsNormal";
+ case SpvOpSignBitSet:
+ return "SignBitSet";
+ case SpvOpLessOrGreater:
+ return "LessOrGreater";
+ case SpvOpOrdered:
+ return "Ordered";
+ case SpvOpUnordered:
+ return "Unordered";
+ case SpvOpLogicalEqual:
+ return "LogicalEqual";
+ case SpvOpLogicalNotEqual:
+ return "LogicalNotEqual";
+ case SpvOpLogicalOr:
+ return "LogicalOr";
+ case SpvOpLogicalAnd:
+ return "LogicalAnd";
+ case SpvOpLogicalNot:
+ return "LogicalNot";
+ case SpvOpSelect:
+ return "Select";
+ case SpvOpIEqual:
+ return "IEqual";
+ case SpvOpINotEqual:
+ return "INotEqual";
+ case SpvOpUGreaterThan:
+ return "UGreaterThan";
+ case SpvOpSGreaterThan:
+ return "SGreaterThan";
+ case SpvOpUGreaterThanEqual:
+ return "UGreaterThanEqual";
+ case SpvOpSGreaterThanEqual:
+ return "SGreaterThanEqual";
+ case SpvOpULessThan:
+ return "ULessThan";
+ case SpvOpSLessThan:
+ return "SLessThan";
+ case SpvOpULessThanEqual:
+ return "ULessThanEqual";
+ case SpvOpSLessThanEqual:
+ return "SLessThanEqual";
+ case SpvOpFOrdEqual:
+ return "FOrdEqual";
+ case SpvOpFUnordEqual:
+ return "FUnordEqual";
+ case SpvOpFOrdNotEqual:
+ return "FOrdNotEqual";
+ case SpvOpFUnordNotEqual:
+ return "FUnordNotEqual";
+ case SpvOpFOrdLessThan:
+ return "FOrdLessThan";
+ case SpvOpFUnordLessThan:
+ return "FUnordLessThan";
+ case SpvOpFOrdGreaterThan:
+ return "FOrdGreaterThan";
+ case SpvOpFUnordGreaterThan:
+ return "FUnordGreaterThan";
+ case SpvOpFOrdLessThanEqual:
+ return "FOrdLessThanEqual";
+ case SpvOpFUnordLessThanEqual:
+ return "FUnordLessThanEqual";
+ case SpvOpFOrdGreaterThanEqual:
+ return "FOrdGreaterThanEqual";
+ case SpvOpFUnordGreaterThanEqual:
+ return "FUnordGreaterThanEqual";
+ case SpvOpShiftRightLogical:
+ return "ShiftRightLogical";
+ case SpvOpShiftRightArithmetic:
+ return "ShiftRightArithmetic";
+ case SpvOpShiftLeftLogical:
+ return "ShiftLeftLogical";
+ case SpvOpBitwiseOr:
+ return "BitwiseOr";
+ case SpvOpBitwiseXor:
+ return "BitwiseXor";
+ case SpvOpBitwiseAnd:
+ return "BitwiseAnd";
+ case SpvOpNot:
+ return "Not";
+ case SpvOpBitFieldInsert:
+ return "BitFieldInsert";
+ case SpvOpBitFieldSExtract:
+ return "BitFieldSExtract";
+ case SpvOpBitFieldUExtract:
+ return "BitFieldUExtract";
+ case SpvOpBitReverse:
+ return "BitReverse";
+ case SpvOpBitCount:
+ return "BitCount";
+ case SpvOpDPdx:
+ return "DPdx";
+ case SpvOpDPdy:
+ return "DPdy";
+ case SpvOpFwidth:
+ return "Fwidth";
+ case SpvOpDPdxFine:
+ return "DPdxFine";
+ case SpvOpDPdyFine:
+ return "DPdyFine";
+ case SpvOpFwidthFine:
+ return "FwidthFine";
+ case SpvOpDPdxCoarse:
+ return "DPdxCoarse";
+ case SpvOpDPdyCoarse:
+ return "DPdyCoarse";
+ case SpvOpFwidthCoarse:
+ return "FwidthCoarse";
+ case SpvOpEmitVertex:
+ return "EmitVertex";
+ case SpvOpEndPrimitive:
+ return "EndPrimitive";
+ case SpvOpEmitStreamVertex:
+ return "EmitStreamVertex";
+ case SpvOpEndStreamPrimitive:
+ return "EndStreamPrimitive";
+ case SpvOpControlBarrier:
+ return "ControlBarrier";
+ case SpvOpMemoryBarrier:
+ return "MemoryBarrier";
+ case SpvOpAtomicLoad:
+ return "AtomicLoad";
+ case SpvOpAtomicStore:
+ return "AtomicStore";
+ case SpvOpAtomicExchange:
+ return "AtomicExchange";
+ case SpvOpAtomicCompareExchange:
+ return "AtomicCompareExchange";
+ case SpvOpAtomicCompareExchangeWeak:
+ return "AtomicCompareExchangeWeak";
+ case SpvOpAtomicIIncrement:
+ return "AtomicIIncrement";
+ case SpvOpAtomicIDecrement:
+ return "AtomicIDecrement";
+ case SpvOpAtomicIAdd:
+ return "AtomicIAdd";
+ case SpvOpAtomicISub:
+ return "AtomicISub";
+ case SpvOpAtomicSMin:
+ return "AtomicSMin";
+ case SpvOpAtomicUMin:
+ return "AtomicUMin";
+ case SpvOpAtomicSMax:
+ return "AtomicSMax";
+ case SpvOpAtomicUMax:
+ return "AtomicUMax";
+ case SpvOpAtomicAnd:
+ return "AtomicAnd";
+ case SpvOpAtomicOr:
+ return "AtomicOr";
+ case SpvOpAtomicXor:
+ return "AtomicXor";
+ case SpvOpPhi:
+ return "Phi";
+ case SpvOpLoopMerge:
+ return "LoopMerge";
+ case SpvOpSelectionMerge:
+ return "SelectionMerge";
+ case SpvOpLabel:
+ return "Label";
+ case SpvOpBranch:
+ return "Branch";
+ case SpvOpBranchConditional:
+ return "BranchConditional";
+ case SpvOpSwitch:
+ return "Switch";
+ case SpvOpKill:
+ return "Kill";
+ case SpvOpReturn:
+ return "Return";
+ case SpvOpReturnValue:
+ return "ReturnValue";
+ case SpvOpUnreachable:
+ return "Unreachable";
+ case SpvOpLifetimeStart:
+ return "LifetimeStart";
+ case SpvOpLifetimeStop:
+ return "LifetimeStop";
+ case SpvOpGroupAsyncCopy:
+ return "GroupAsyncCopy";
+ case SpvOpGroupWaitEvents:
+ return "GroupWaitEvents";
+ case SpvOpGroupAll:
+ return "GroupAll";
+ case SpvOpGroupAny:
+ return "GroupAny";
+ case SpvOpGroupBroadcast:
+ return "GroupBroadcast";
+ case SpvOpGroupIAdd:
+ return "GroupIAdd";
+ case SpvOpGroupFAdd:
+ return "GroupFAdd";
+ case SpvOpGroupFMin:
+ return "GroupFMin";
+ case SpvOpGroupUMin:
+ return "GroupUMin";
+ case SpvOpGroupSMin:
+ return "GroupSMin";
+ case SpvOpGroupFMax:
+ return "GroupFMax";
+ case SpvOpGroupUMax:
+ return "GroupUMax";
+ case SpvOpGroupSMax:
+ return "GroupSMax";
+ case SpvOpReadPipe:
+ return "ReadPipe";
+ case SpvOpWritePipe:
+ return "WritePipe";
+ case SpvOpReservedReadPipe:
+ return "ReservedReadPipe";
+ case SpvOpReservedWritePipe:
+ return "ReservedWritePipe";
+ case SpvOpReserveReadPipePackets:
+ return "ReserveReadPipePackets";
+ case SpvOpReserveWritePipePackets:
+ return "ReserveWritePipePackets";
+ case SpvOpCommitReadPipe:
+ return "CommitReadPipe";
+ case SpvOpCommitWritePipe:
+ return "CommitWritePipe";
+ case SpvOpIsValidReserveId:
+ return "IsValidReserveId";
+ case SpvOpGetNumPipePackets:
+ return "GetNumPipePackets";
+ case SpvOpGetMaxPipePackets:
+ return "GetMaxPipePackets";
+ case SpvOpGroupReserveReadPipePackets:
+ return "GroupReserveReadPipePackets";
+ case SpvOpGroupReserveWritePipePackets:
+ return "GroupReserveWritePipePackets";
+ case SpvOpGroupCommitReadPipe:
+ return "GroupCommitReadPipe";
+ case SpvOpGroupCommitWritePipe:
+ return "GroupCommitWritePipe";
+ case SpvOpEnqueueMarker:
+ return "EnqueueMarker";
+ case SpvOpEnqueueKernel:
+ return "EnqueueKernel";
+ case SpvOpGetKernelNDrangeSubGroupCount:
+ return "GetKernelNDrangeSubGroupCount";
+ case SpvOpGetKernelNDrangeMaxSubGroupSize:
+ return "GetKernelNDrangeMaxSubGroupSize";
+ case SpvOpGetKernelWorkGroupSize:
+ return "GetKernelWorkGroupSize";
+ case SpvOpGetKernelPreferredWorkGroupSizeMultiple:
+ return "GetKernelPreferredWorkGroupSizeMultiple";
+ case SpvOpRetainEvent:
+ return "RetainEvent";
+ case SpvOpReleaseEvent:
+ return "ReleaseEvent";
+ case SpvOpCreateUserEvent:
+ return "CreateUserEvent";
+ case SpvOpIsValidEvent:
+ return "IsValidEvent";
+ case SpvOpSetUserEventStatus:
+ return "SetUserEventStatus";
+ case SpvOpCaptureEventProfilingInfo:
+ return "CaptureEventProfilingInfo";
+ case SpvOpGetDefaultQueue:
+ return "GetDefaultQueue";
+ case SpvOpBuildNDRange:
+ return "BuildNDRange";
+ case SpvOpImageSparseSampleImplicitLod:
+ return "ImageSparseSampleImplicitLod";
+ case SpvOpImageSparseSampleExplicitLod:
+ return "ImageSparseSampleExplicitLod";
+ case SpvOpImageSparseSampleDrefImplicitLod:
+ return "ImageSparseSampleDrefImplicitLod";
+ case SpvOpImageSparseSampleDrefExplicitLod:
+ return "ImageSparseSampleDrefExplicitLod";
+ case SpvOpImageSparseSampleProjImplicitLod:
+ return "ImageSparseSampleProjImplicitLod";
+ case SpvOpImageSparseSampleProjExplicitLod:
+ return "ImageSparseSampleProjExplicitLod";
+ case SpvOpImageSparseSampleProjDrefImplicitLod:
+ return "ImageSparseSampleProjDrefImplicitLod";
+ case SpvOpImageSparseSampleProjDrefExplicitLod:
+ return "ImageSparseSampleProjDrefExplicitLod";
+ case SpvOpImageSparseFetch:
+ return "ImageSparseFetch";
+ case SpvOpImageSparseGather:
+ return "ImageSparseGather";
+ case SpvOpImageSparseDrefGather:
+ return "ImageSparseDrefGather";
+ case SpvOpImageSparseTexelsResident:
+ return "ImageSparseTexelsResident";
+ case SpvOpNoLine:
+ return "NoLine";
+ case SpvOpAtomicFlagTestAndSet:
+ return "AtomicFlagTestAndSet";
+ case SpvOpAtomicFlagClear:
+ return "AtomicFlagClear";
+ case SpvOpImageSparseRead:
+ return "ImageSparseRead";
+ default:
+ ABORT("unsupported SPIR-V op");
+ }
+}
+#endif
+
+void SPIRVCodeGenerator::writeOpCode(SpvOp_ opCode, int length, std::ostream& out) {
+ ASSERT(opCode != SpvOpUndef);
+ switch (opCode) {
+ case SpvOpReturn: // fall through
+ case SpvOpReturnValue: // fall through
+ case SpvOpKill: // fall through
+ case SpvOpBranch: // fall through
+ case SpvOpBranchConditional:
+ ASSERT(fCurrentBlock);
+ fCurrentBlock = 0;
+ break;
+ case SpvOpConstant: // fall through
+ case SpvOpConstantTrue: // fall through
+ case SpvOpConstantFalse: // fall through
+ case SpvOpConstantComposite: // fall through
+ case SpvOpTypeVoid: // fall through
+ case SpvOpTypeInt: // fall through
+ case SpvOpTypeFloat: // fall through
+ case SpvOpTypeBool: // fall through
+ case SpvOpTypeVector: // fall through
+ case SpvOpTypeMatrix: // fall through
+ case SpvOpTypeArray: // fall through
+ case SpvOpTypePointer: // fall through
+ case SpvOpTypeFunction: // fall through
+ case SpvOpTypeRuntimeArray: // fall through
+ case SpvOpTypeStruct: // fall through
+ case SpvOpTypeImage: // fall through
+ case SpvOpTypeSampledImage: // fall through
+ case SpvOpVariable: // fall through
+ case SpvOpFunction: // fall through
+ case SpvOpFunctionParameter: // fall through
+ case SpvOpFunctionEnd: // fall through
+ case SpvOpExecutionMode: // fall through
+ case SpvOpMemoryModel: // fall through
+ case SpvOpCapability: // fall through
+ case SpvOpExtInstImport: // fall through
+ case SpvOpEntryPoint: // fall through
+ case SpvOpSource: // fall through
+ case SpvOpSourceExtension: // fall through
+ case SpvOpName: // fall through
+ case SpvOpMemberName: // fall through
+ case SpvOpDecorate: // fall through
+ case SpvOpMemberDecorate:
+ break;
+ default:
+ ASSERT(fCurrentBlock);
+ }
+#if SPIRV_DEBUG
+ out << std::endl << opcode_text(opCode) << " ";
+#else
+ this->writeWord((length << 16) | opCode, out);
+#endif
+}
+
+void SPIRVCodeGenerator::writeLabel(SpvId label, std::ostream& out) {
+ fCurrentBlock = label;
+ this->writeInstruction(SpvOpLabel, label, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, std::ostream& out) {
+ this->writeOpCode(opCode, 1, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, std::ostream& out) {
+ this->writeOpCode(opCode, 2, out);
+ this->writeWord(word1, out);
+}
+
+void SPIRVCodeGenerator::writeString(const char* string, std::ostream& out) {
+ size_t length = strlen(string);
+ out << string;
+ switch (length % 4) {
+ case 1:
+ out << (char) 0;
+ // fall through
+ case 2:
+ out << (char) 0;
+ // fall through
+ case 3:
+ out << (char) 0;
+ break;
+ default:
+ this->writeWord(0, out);
+ }
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, const char* string, std::ostream& out) {
+ int32_t length = (int32_t) strlen(string);
+ this->writeOpCode(opCode, 1 + (length + 4) / 4, out);
+ this->writeString(string, out);
+}
+
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, const char* string,
+ std::ostream& out) {
+ int32_t length = (int32_t) strlen(string);
+ this->writeOpCode(opCode, 2 + (length + 4) / 4, out);
+ this->writeWord(word1, out);
+ this->writeString(string, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ const char* string, std::ostream& out) {
+ int32_t length = (int32_t) strlen(string);
+ this->writeOpCode(opCode, 3 + (length + 4) / 4, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeString(string, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ std::ostream& out) {
+ this->writeOpCode(opCode, 3, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ int32_t word3, std::ostream& out) {
+ this->writeOpCode(opCode, 4, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeWord(word3, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ int32_t word3, int32_t word4, std::ostream& out) {
+ this->writeOpCode(opCode, 5, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeWord(word3, out);
+ this->writeWord(word4, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ int32_t word3, int32_t word4, int32_t word5,
+ std::ostream& out) {
+ this->writeOpCode(opCode, 6, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeWord(word3, out);
+ this->writeWord(word4, out);
+ this->writeWord(word5, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ int32_t word3, int32_t word4, int32_t word5,
+ int32_t word6, std::ostream& out) {
+ this->writeOpCode(opCode, 7, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeWord(word3, out);
+ this->writeWord(word4, out);
+ this->writeWord(word5, out);
+ this->writeWord(word6, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ int32_t word3, int32_t word4, int32_t word5,
+ int32_t word6, int32_t word7, std::ostream& out) {
+ this->writeOpCode(opCode, 8, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeWord(word3, out);
+ this->writeWord(word4, out);
+ this->writeWord(word5, out);
+ this->writeWord(word6, out);
+ this->writeWord(word7, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ int32_t word3, int32_t word4, int32_t word5,
+ int32_t word6, int32_t word7, int32_t word8,
+ std::ostream& out) {
+ this->writeOpCode(opCode, 9, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeWord(word3, out);
+ this->writeWord(word4, out);
+ this->writeWord(word5, out);
+ this->writeWord(word6, out);
+ this->writeWord(word7, out);
+ this->writeWord(word8, out);
+}
+
+void SPIRVCodeGenerator::writeCapabilities(std::ostream& out) {
+ for (uint64_t i = 0, bit = 1; i <= kLast_Capability; i++, bit <<= 1) {
+ if (fCapabilities & bit) {
+ this->writeInstruction(SpvOpCapability, (SpvId) i, out);
+ }
+ }
+}
+
+SpvId SPIRVCodeGenerator::nextId() {
+ return fIdCount++;
+}
+
+void SPIRVCodeGenerator::writeStruct(const Type& type, SpvId resultId) {
+ this->writeInstruction(SpvOpName, resultId, type.name().c_str(), fNameBuffer);
+ // go ahead and write all of the field types, so we don't inadvertently write them while we're
+ // in the middle of writing the struct instruction
+ std::vector<SpvId> types;
+ for (const auto& f : type.fields()) {
+ types.push_back(this->getType(*f.fType));
+ }
+ this->writeOpCode(SpvOpTypeStruct, 2 + (int32_t) types.size(), fConstantBuffer);
+ this->writeWord(resultId, fConstantBuffer);
+ for (SpvId id : types) {
+ this->writeWord(id, fConstantBuffer);
+ }
+ size_t offset = 0;
+ for (int32_t i = 0; i < (int32_t) type.fields().size(); i++) {
+ size_t size = type.fields()[i].fType->size();
+ size_t alignment = type.fields()[i].fType->alignment();
+ size_t mod = offset % alignment;
+ if (mod != 0) {
+ offset += alignment - mod;
+ }
+ this->writeInstruction(SpvOpMemberName, resultId, i, type.fields()[i].fName.c_str(),
+ fNameBuffer);
+ this->writeLayout(type.fields()[i].fModifiers.fLayout, resultId, i);
+ if (type.fields()[i].fModifiers.fLayout.fBuiltin < 0) {
+ this->writeInstruction(SpvOpMemberDecorate, resultId, (SpvId) i, SpvDecorationOffset,
+ (SpvId) offset, fDecorationBuffer);
+ }
+ if (type.fields()[i].fType->kind() == Type::kMatrix_Kind) {
+ this->writeInstruction(SpvOpMemberDecorate, resultId, i, SpvDecorationColMajor,
+ fDecorationBuffer);
+ this->writeInstruction(SpvOpMemberDecorate, resultId, i, SpvDecorationMatrixStride,
+ (SpvId) type.fields()[i].fType->stride(), fDecorationBuffer);
+ }
+ offset += size;
+ Type::Kind kind = type.fields()[i].fType->kind();
+ if ((kind == Type::kArray_Kind || kind == Type::kStruct_Kind) && offset % alignment != 0) {
+ offset += alignment - offset % alignment;
+ }
+ ASSERT(offset % alignment == 0);
+ }
+}
+
+SpvId SPIRVCodeGenerator::getType(const Type& type) {
+ auto entry = fTypeMap.find(type.name());
+ if (entry == fTypeMap.end()) {
+ SpvId result = this->nextId();
+ switch (type.kind()) {
+ case Type::kScalar_Kind:
+ if (type == *fContext.fBool_Type) {
+ this->writeInstruction(SpvOpTypeBool, result, fConstantBuffer);
+ } else if (type == *fContext.fInt_Type) {
+ this->writeInstruction(SpvOpTypeInt, result, 32, 1, fConstantBuffer);
+ } else if (type == *fContext.fUInt_Type) {
+ this->writeInstruction(SpvOpTypeInt, result, 32, 0, fConstantBuffer);
+ } else if (type == *fContext.fFloat_Type) {
+ this->writeInstruction(SpvOpTypeFloat, result, 32, fConstantBuffer);
+ } else if (type == *fContext.fDouble_Type) {
+ this->writeInstruction(SpvOpTypeFloat, result, 64, fConstantBuffer);
+ } else {
+ ASSERT(false);
+ }
+ break;
+ case Type::kVector_Kind:
+ this->writeInstruction(SpvOpTypeVector, result,
+ this->getType(type.componentType()),
+ type.columns(), fConstantBuffer);
+ break;
+ case Type::kMatrix_Kind:
+ this->writeInstruction(SpvOpTypeMatrix, result,
+ this->getType(index_type(fContext, type)),
+ type.columns(), fConstantBuffer);
+ break;
+ case Type::kStruct_Kind:
+ this->writeStruct(type, result);
+ break;
+ case Type::kArray_Kind: {
+ if (type.columns() > 0) {
+ IntLiteral count(fContext, Position(), type.columns());
+ this->writeInstruction(SpvOpTypeArray, result,
+ this->getType(type.componentType()),
+ this->writeIntLiteral(count), fConstantBuffer);
+ this->writeInstruction(SpvOpDecorate, result, SpvDecorationArrayStride,
+ (int32_t) type.stride(), fDecorationBuffer);
+ } else {
+ ABORT("runtime-sized arrays are not yet supported");
+ this->writeInstruction(SpvOpTypeRuntimeArray, result,
+ this->getType(type.componentType()), fConstantBuffer);
+ }
+ break;
+ }
+ case Type::kSampler_Kind: {
+ SpvId image = this->nextId();
+ this->writeInstruction(SpvOpTypeImage, image, this->getType(*fContext.fFloat_Type),
+ type.dimensions(), type.isDepth(), type.isArrayed(),
+ type.isMultisampled(), type.isSampled(),
+ SpvImageFormatUnknown, fConstantBuffer);
+ this->writeInstruction(SpvOpTypeSampledImage, result, image, fConstantBuffer);
+ break;
+ }
+ default:
+ if (type == *fContext.fVoid_Type) {
+ this->writeInstruction(SpvOpTypeVoid, result, fConstantBuffer);
+ } else {
+ ABORT("invalid type: %s", type.description().c_str());
+ }
+ }
+ fTypeMap[type.name()] = result;
+ return result;
+ }
+ return entry->second;
+}
+
+SpvId SPIRVCodeGenerator::getFunctionType(const FunctionDeclaration& function) {
+ std::string key = function.fReturnType.description() + "(";
+ std::string separator = "";
+ for (size_t i = 0; i < function.fParameters.size(); i++) {
+ key += separator;
+ separator = ", ";
+ key += function.fParameters[i]->fType.description();
+ }
+ key += ")";
+ auto entry = fTypeMap.find(key);
+ if (entry == fTypeMap.end()) {
+ SpvId result = this->nextId();
+ int32_t length = 3 + (int32_t) function.fParameters.size();
+ SpvId returnType = this->getType(function.fReturnType);
+ std::vector<SpvId> parameterTypes;
+ for (size_t i = 0; i < function.fParameters.size(); i++) {
+ // glslang seems to treat all function arguments as pointers whether they need to be or
+ // not. I was initially puzzled by this until I ran bizarre failures with certain
+ // patterns of function calls and control constructs, as exemplified by this minimal
+ // failure case:
+ //
+ // void sphere(float x) {
+ // }
+ //
+ // void map() {
+ // sphere(1.0);
+ // }
+ //
+ // void main() {
+ // for (int i = 0; i < 1; i++) {
+ // map();
+ // }
+ // }
+ //
+ // As of this writing, compiling this in the "obvious" way (with sphere taking a float)
+ // crashes. Making it take a float* and storing the argument in a temporary variable,
+ // as glslang does, fixes it. It's entirely possible I simply missed whichever part of
+ // the spec makes this make sense.
+// if (is_out(function->fParameters[i])) {
+ parameterTypes.push_back(this->getPointerType(function.fParameters[i]->fType,
+ SpvStorageClassFunction));
+// } else {
+// parameterTypes.push_back(this->getType(function.fParameters[i]->fType));
+// }
+ }
+ this->writeOpCode(SpvOpTypeFunction, length, fConstantBuffer);
+ this->writeWord(result, fConstantBuffer);
+ this->writeWord(returnType, fConstantBuffer);
+ for (SpvId id : parameterTypes) {
+ this->writeWord(id, fConstantBuffer);
+ }
+ fTypeMap[key] = result;
+ return result;
+ }
+ return entry->second;
+}
+
+SpvId SPIRVCodeGenerator::getPointerType(const Type& type,
+ SpvStorageClass_ storageClass) {
+ std::string key = type.description() + "*" + to_string(storageClass);
+ auto entry = fTypeMap.find(key);
+ if (entry == fTypeMap.end()) {
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpTypePointer, result, storageClass,
+ this->getType(type), fConstantBuffer);
+ fTypeMap[key] = result;
+ return result;
+ }
+ return entry->second;
+}
+
+SpvId SPIRVCodeGenerator::writeExpression(const Expression& expr, std::ostream& out) {
+ switch (expr.fKind) {
+ case Expression::kBinary_Kind:
+ return this->writeBinaryExpression((BinaryExpression&) expr, out);
+ case Expression::kBoolLiteral_Kind:
+ return this->writeBoolLiteral((BoolLiteral&) expr);
+ case Expression::kConstructor_Kind:
+ return this->writeConstructor((Constructor&) expr, out);
+ case Expression::kIntLiteral_Kind:
+ return this->writeIntLiteral((IntLiteral&) expr);
+ case Expression::kFieldAccess_Kind:
+ return this->writeFieldAccess(((FieldAccess&) expr), out);
+ case Expression::kFloatLiteral_Kind:
+ return this->writeFloatLiteral(((FloatLiteral&) expr));
+ case Expression::kFunctionCall_Kind:
+ return this->writeFunctionCall((FunctionCall&) expr, out);
+ case Expression::kPrefix_Kind:
+ return this->writePrefixExpression((PrefixExpression&) expr, out);
+ case Expression::kPostfix_Kind:
+ return this->writePostfixExpression((PostfixExpression&) expr, out);
+ case Expression::kSwizzle_Kind:
+ return this->writeSwizzle((Swizzle&) expr, out);
+ case Expression::kVariableReference_Kind:
+ return this->writeVariableReference((VariableReference&) expr, out);
+ case Expression::kTernary_Kind:
+ return this->writeTernaryExpression((TernaryExpression&) expr, out);
+ case Expression::kIndex_Kind:
+ return this->writeIndexExpression((IndexExpression&) expr, out);
+ default:
+ ABORT("unsupported expression: %s", expr.description().c_str());
+ }
+ return -1;
+}
+
+SpvId SPIRVCodeGenerator::writeIntrinsicCall(const FunctionCall& c, std::ostream& out) {
+ auto intrinsic = fIntrinsicMap.find(c.fFunction.fName);
+ ASSERT(intrinsic != fIntrinsicMap.end());
+ const Type& type = c.fArguments[0]->fType;
+ int32_t intrinsicId;
+ if (std::get<0>(intrinsic->second) == kSpecial_IntrinsicKind || is_float(fContext, type)) {
+ intrinsicId = std::get<1>(intrinsic->second);
+ } else if (is_signed(fContext, type)) {
+ intrinsicId = std::get<2>(intrinsic->second);
+ } else if (is_unsigned(fContext, type)) {
+ intrinsicId = std::get<3>(intrinsic->second);
+ } else if (is_bool(fContext, type)) {
+ intrinsicId = std::get<4>(intrinsic->second);
+ } else {
+ ABORT("invalid call %s, cannot operate on '%s'", c.description().c_str(),
+ type.description().c_str());
+ }
+ switch (std::get<0>(intrinsic->second)) {
+ case kGLSL_STD_450_IntrinsicKind: {
+ SpvId result = this->nextId();
+ std::vector<SpvId> arguments;
+ for (size_t i = 0; i < c.fArguments.size(); i++) {
+ arguments.push_back(this->writeExpression(*c.fArguments[i], out));
+ }
+ this->writeOpCode(SpvOpExtInst, 5 + (int32_t) arguments.size(), out);
+ this->writeWord(this->getType(c.fType), out);
+ this->writeWord(result, out);
+ this->writeWord(fGLSLExtendedInstructions, out);
+ this->writeWord(intrinsicId, out);
+ for (SpvId id : arguments) {
+ this->writeWord(id, out);
+ }
+ return result;
+ }
+ case kSPIRV_IntrinsicKind: {
+ SpvId result = this->nextId();
+ std::vector<SpvId> arguments;
+ for (size_t i = 0; i < c.fArguments.size(); i++) {
+ arguments.push_back(this->writeExpression(*c.fArguments[i], out));
+ }
+ this->writeOpCode((SpvOp_) intrinsicId, 3 + (int32_t) arguments.size(), out);
+ this->writeWord(this->getType(c.fType), out);
+ this->writeWord(result, out);
+ for (SpvId id : arguments) {
+ this->writeWord(id, out);
+ }
+ return result;
+ }
+ case kSpecial_IntrinsicKind:
+ return this->writeSpecialIntrinsic(c, (SpecialIntrinsic) intrinsicId, out);
+ default:
+ ABORT("unsupported intrinsic kind");
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeSpecialIntrinsic(const FunctionCall& c, SpecialIntrinsic kind,
+ std::ostream& out) {
+ SpvId result = this->nextId();
+ switch (kind) {
+ case kAtan_SpecialIntrinsic: {
+ std::vector<SpvId> arguments;
+ for (size_t i = 0; i < c.fArguments.size(); i++) {
+ arguments.push_back(this->writeExpression(*c.fArguments[i], out));
+ }
+ this->writeOpCode(SpvOpExtInst, 5 + (int32_t) arguments.size(), out);
+ this->writeWord(this->getType(c.fType), out);
+ this->writeWord(result, out);
+ this->writeWord(fGLSLExtendedInstructions, out);
+ this->writeWord(arguments.size() == 2 ? GLSLstd450Atan2 : GLSLstd450Atan, out);
+ for (SpvId id : arguments) {
+ this->writeWord(id, out);
+ }
+ return result;
+ }
+ case kTexture_SpecialIntrinsic: {
+ SpvId type = this->getType(c.fType);
+ SpvId sampler = this->writeExpression(*c.fArguments[0], out);
+ SpvId uv = this->writeExpression(*c.fArguments[1], out);
+ if (c.fArguments.size() == 3) {
+ this->writeInstruction(SpvOpImageSampleImplicitLod, type, result, sampler, uv,
+ SpvImageOperandsBiasMask,
+ this->writeExpression(*c.fArguments[2], out),
+ out);
+ } else {
+ ASSERT(c.fArguments.size() == 2);
+ this->writeInstruction(SpvOpImageSampleImplicitLod, type, result, sampler, uv, out);
+ }
+ break;
+ }
+ case kTextureProj_SpecialIntrinsic: {
+ SpvId type = this->getType(c.fType);
+ SpvId sampler = this->writeExpression(*c.fArguments[0], out);
+ SpvId uv = this->writeExpression(*c.fArguments[1], out);
+ if (c.fArguments.size() == 3) {
+ this->writeInstruction(SpvOpImageSampleProjImplicitLod, type, result, sampler, uv,
+ SpvImageOperandsBiasMask,
+ this->writeExpression(*c.fArguments[2], out),
+ out);
+ } else {
+ ASSERT(c.fArguments.size() == 2);
+ this->writeInstruction(SpvOpImageSampleProjImplicitLod, type, result, sampler, uv,
+ out);
+ }
+ break;
+ }
+ case kTexture2D_SpecialIntrinsic: {
+ SpvId img = this->writeExpression(*c.fArguments[0], out);
+ SpvId coords = this->writeExpression(*c.fArguments[1], out);
+ this->writeInstruction(SpvOpImageSampleImplicitLod,
+ this->getType(c.fType),
+ result,
+ img,
+ coords,
+ out);
+ break;
+ }
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeFunctionCall(const FunctionCall& c, std::ostream& out) {
+ const auto& entry = fFunctionMap.find(&c.fFunction);
+ if (entry == fFunctionMap.end()) {
+ return this->writeIntrinsicCall(c, out);
+ }
+ // stores (variable, type, lvalue) pairs to extract and save after the function call is complete
+ std::vector<std::tuple<SpvId, SpvId, std::unique_ptr<LValue>>> lvalues;
+ std::vector<SpvId> arguments;
+ for (size_t i = 0; i < c.fArguments.size(); i++) {
+ // id of temporary variable that we will use to hold this argument, or 0 if it is being
+ // passed directly
+ SpvId tmpVar;
+ // if we need a temporary var to store this argument, this is the value to store in the var
+ SpvId tmpValueId;
+ if (is_out(*c.fFunction.fParameters[i])) {
+ std::unique_ptr<LValue> lv = this->getLValue(*c.fArguments[i], out);
+ SpvId ptr = lv->getPointer();
+ if (ptr) {
+ arguments.push_back(ptr);
+ continue;
+ } else {
+ // lvalue cannot simply be read and written via a pointer (e.g. a swizzle). Need to
+ // copy it into a temp, call the function, read the value out of the temp, and then
+ // update the lvalue.
+ tmpValueId = lv->load(out);
+ tmpVar = this->nextId();
+ lvalues.push_back(std::make_tuple(tmpVar, this->getType(c.fArguments[i]->fType),
+ std::move(lv)));
+ }
+ } else {
+ // see getFunctionType for an explanation of why we're always using pointer parameters
+ tmpValueId = this->writeExpression(*c.fArguments[i], out);
+ tmpVar = this->nextId();
+ }
+ this->writeInstruction(SpvOpVariable,
+ this->getPointerType(c.fArguments[i]->fType,
+ SpvStorageClassFunction),
+ tmpVar,
+ SpvStorageClassFunction,
+ fVariableBuffer);
+ this->writeInstruction(SpvOpStore, tmpVar, tmpValueId, out);
+ arguments.push_back(tmpVar);
+ }
+ SpvId result = this->nextId();
+ this->writeOpCode(SpvOpFunctionCall, 4 + (int32_t) c.fArguments.size(), out);
+ this->writeWord(this->getType(c.fType), out);
+ this->writeWord(result, out);
+ this->writeWord(entry->second, out);
+ for (SpvId id : arguments) {
+ this->writeWord(id, out);
+ }
+ // now that the call is complete, we may need to update some lvalues with the new values of out
+ // arguments
+ for (const auto& tuple : lvalues) {
+ SpvId load = this->nextId();
+ this->writeInstruction(SpvOpLoad, std::get<1>(tuple), load, std::get<0>(tuple), out);
+ std::get<2>(tuple)->store(load, out);
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeConstantVector(const Constructor& c) {
+ ASSERT(c.fType.kind() == Type::kVector_Kind && c.isConstant());
+ SpvId result = this->nextId();
+ std::vector<SpvId> arguments;
+ for (size_t i = 0; i < c.fArguments.size(); i++) {
+ arguments.push_back(this->writeExpression(*c.fArguments[i], fConstantBuffer));
+ }
+ SpvId type = this->getType(c.fType);
+ if (c.fArguments.size() == 1) {
+ // with a single argument, a vector will have all of its entries equal to the argument
+ this->writeOpCode(SpvOpConstantComposite, 3 + c.fType.columns(), fConstantBuffer);
+ this->writeWord(type, fConstantBuffer);
+ this->writeWord(result, fConstantBuffer);
+ for (int i = 0; i < c.fType.columns(); i++) {
+ this->writeWord(arguments[0], fConstantBuffer);
+ }
+ } else {
+ this->writeOpCode(SpvOpConstantComposite, 3 + (int32_t) c.fArguments.size(),
+ fConstantBuffer);
+ this->writeWord(type, fConstantBuffer);
+ this->writeWord(result, fConstantBuffer);
+ for (SpvId id : arguments) {
+ this->writeWord(id, fConstantBuffer);
+ }
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeFloatConstructor(const Constructor& c, std::ostream& out) {
+ ASSERT(c.fType == *fContext.fFloat_Type);
+ ASSERT(c.fArguments.size() == 1);
+ ASSERT(c.fArguments[0]->fType.isNumber());
+ SpvId result = this->nextId();
+ SpvId parameter = this->writeExpression(*c.fArguments[0], out);
+ if (c.fArguments[0]->fType == *fContext.fInt_Type) {
+ this->writeInstruction(SpvOpConvertSToF, this->getType(c.fType), result, parameter,
+ out);
+ } else if (c.fArguments[0]->fType == *fContext.fUInt_Type) {
+ this->writeInstruction(SpvOpConvertUToF, this->getType(c.fType), result, parameter,
+ out);
+ } else if (c.fArguments[0]->fType == *fContext.fFloat_Type) {
+ return parameter;
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeIntConstructor(const Constructor& c, std::ostream& out) {
+ ASSERT(c.fType == *fContext.fInt_Type);
+ ASSERT(c.fArguments.size() == 1);
+ ASSERT(c.fArguments[0]->fType.isNumber());
+ SpvId result = this->nextId();
+ SpvId parameter = this->writeExpression(*c.fArguments[0], out);
+ if (c.fArguments[0]->fType == *fContext.fFloat_Type) {
+ this->writeInstruction(SpvOpConvertFToS, this->getType(c.fType), result, parameter,
+ out);
+ } else if (c.fArguments[0]->fType == *fContext.fUInt_Type) {
+ this->writeInstruction(SpvOpSatConvertUToS, this->getType(c.fType), result, parameter,
+ out);
+ } else if (c.fArguments[0]->fType == *fContext.fInt_Type) {
+ return parameter;
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeMatrixConstructor(const Constructor& c, std::ostream& out) {
+ ASSERT(c.fType.kind() == Type::kMatrix_Kind);
+ // go ahead and write the arguments so we don't try to write new instructions in the middle of
+ // an instruction
+ std::vector<SpvId> arguments;
+ for (size_t i = 0; i < c.fArguments.size(); i++) {
+ arguments.push_back(this->writeExpression(*c.fArguments[i], out));
+ }
+ SpvId result = this->nextId();
+ int rows = c.fType.rows();
+ int columns = c.fType.columns();
+ // FIXME this won't work to create a matrix from another matrix
+ if (arguments.size() == 1) {
+ // with a single argument, a matrix will have all of its diagonal entries equal to the
+ // argument and its other values equal to zero
+ // FIXME this won't work for int matrices
+ FloatLiteral zero(fContext, Position(), 0);
+ SpvId zeroId = this->writeFloatLiteral(zero);
+ std::vector<SpvId> columnIds;
+ for (int column = 0; column < columns; column++) {
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + c.fType.rows(),
+ out);
+ this->writeWord(this->getType(c.fType.componentType().toCompound(fContext, rows, 1)),
+ out);
+ SpvId columnId = this->nextId();
+ this->writeWord(columnId, out);
+ columnIds.push_back(columnId);
+ for (int row = 0; row < c.fType.columns(); row++) {
+ this->writeWord(row == column ? arguments[0] : zeroId, out);
+ }
+ }
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + columns,
+ out);
+ this->writeWord(this->getType(c.fType), out);
+ this->writeWord(result, out);
+ for (SpvId id : columnIds) {
+ this->writeWord(id, out);
+ }
+ } else {
+ std::vector<SpvId> columnIds;
+ int currentCount = 0;
+ for (size_t i = 0; i < arguments.size(); i++) {
+ if (c.fArguments[i]->fType.kind() == Type::kVector_Kind) {
+ ASSERT(currentCount == 0);
+ columnIds.push_back(arguments[i]);
+ currentCount = 0;
+ } else {
+ ASSERT(c.fArguments[i]->fType.kind() == Type::kScalar_Kind);
+ if (currentCount == 0) {
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + c.fType.rows(), out);
+ this->writeWord(this->getType(c.fType.componentType().toCompound(fContext, rows,
+ 1)),
+ out);
+ SpvId id = this->nextId();
+ this->writeWord(id, out);
+ columnIds.push_back(id);
+ }
+ this->writeWord(arguments[i], out);
+ currentCount = (currentCount + 1) % rows;
+ }
+ }
+ ASSERT(columnIds.size() == (size_t) columns);
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + columns, out);
+ this->writeWord(this->getType(c.fType), out);
+ this->writeWord(result, out);
+ for (SpvId id : columnIds) {
+ this->writeWord(id, out);
+ }
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeVectorConstructor(const Constructor& c, std::ostream& out) {
+ ASSERT(c.fType.kind() == Type::kVector_Kind);
+ if (c.isConstant()) {
+ return this->writeConstantVector(c);
+ }
+ // go ahead and write the arguments so we don't try to write new instructions in the middle of
+ // an instruction
+ std::vector<SpvId> arguments;
+ for (size_t i = 0; i < c.fArguments.size(); i++) {
+ arguments.push_back(this->writeExpression(*c.fArguments[i], out));
+ }
+ SpvId result = this->nextId();
+ if (arguments.size() == 1 && c.fArguments[0]->fType.kind() == Type::kScalar_Kind) {
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + c.fType.columns(), out);
+ this->writeWord(this->getType(c.fType), out);
+ this->writeWord(result, out);
+ for (int i = 0; i < c.fType.columns(); i++) {
+ this->writeWord(arguments[0], out);
+ }
+ } else {
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + (int32_t) c.fArguments.size(), out);
+ this->writeWord(this->getType(c.fType), out);
+ this->writeWord(result, out);
+ for (SpvId id : arguments) {
+ this->writeWord(id, out);
+ }
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeConstructor(const Constructor& c, std::ostream& out) {
+ if (c.fType == *fContext.fFloat_Type) {
+ return this->writeFloatConstructor(c, out);
+ } else if (c.fType == *fContext.fInt_Type) {
+ return this->writeIntConstructor(c, out);
+ }
+ switch (c.fType.kind()) {
+ case Type::kVector_Kind:
+ return this->writeVectorConstructor(c, out);
+ case Type::kMatrix_Kind:
+ return this->writeMatrixConstructor(c, out);
+ default:
+ ABORT("unsupported constructor: %s", c.description().c_str());
+ }
+}
+
+SpvStorageClass_ get_storage_class(const Modifiers& modifiers) {
+ if (modifiers.fFlags & Modifiers::kIn_Flag) {
+ return SpvStorageClassInput;
+ } else if (modifiers.fFlags & Modifiers::kOut_Flag) {
+ return SpvStorageClassOutput;
+ } else if (modifiers.fFlags & Modifiers::kUniform_Flag) {
+ return SpvStorageClassUniform;
+ } else {
+ return SpvStorageClassFunction;
+ }
+}
+
+SpvStorageClass_ get_storage_class(const Expression& expr) {
+ switch (expr.fKind) {
+ case Expression::kVariableReference_Kind:
+ return get_storage_class(((VariableReference&) expr).fVariable.fModifiers);
+ case Expression::kFieldAccess_Kind:
+ return get_storage_class(*((FieldAccess&) expr).fBase);
+ case Expression::kIndex_Kind:
+ return get_storage_class(*((IndexExpression&) expr).fBase);
+ default:
+ return SpvStorageClassFunction;
+ }
+}
+
+std::vector<SpvId> SPIRVCodeGenerator::getAccessChain(const Expression& expr, std::ostream& out) {
+ std::vector<SpvId> chain;
+ switch (expr.fKind) {
+ case Expression::kIndex_Kind: {
+ IndexExpression& indexExpr = (IndexExpression&) expr;
+ chain = this->getAccessChain(*indexExpr.fBase, out);
+ chain.push_back(this->writeExpression(*indexExpr.fIndex, out));
+ break;
+ }
+ case Expression::kFieldAccess_Kind: {
+ FieldAccess& fieldExpr = (FieldAccess&) expr;
+ chain = this->getAccessChain(*fieldExpr.fBase, out);
+ IntLiteral index(fContext, Position(), fieldExpr.fFieldIndex);
+ chain.push_back(this->writeIntLiteral(index));
+ break;
+ }
+ default:
+ chain.push_back(this->getLValue(expr, out)->getPointer());
+ }
+ return chain;
+}
+
+class PointerLValue : public SPIRVCodeGenerator::LValue {
+public:
+ PointerLValue(SPIRVCodeGenerator& gen, SpvId pointer, SpvId type)
+ : fGen(gen)
+ , fPointer(pointer)
+ , fType(type) {}
+
+ virtual SpvId getPointer() override {
+ return fPointer;
+ }
+
+ virtual SpvId load(std::ostream& out) override {
+ SpvId result = fGen.nextId();
+ fGen.writeInstruction(SpvOpLoad, fType, result, fPointer, out);
+ return result;
+ }
+
+ virtual void store(SpvId value, std::ostream& out) override {
+ fGen.writeInstruction(SpvOpStore, fPointer, value, out);
+ }
+
+private:
+ SPIRVCodeGenerator& fGen;
+ const SpvId fPointer;
+ const SpvId fType;
+};
+
+class SwizzleLValue : public SPIRVCodeGenerator::LValue {
+public:
+ SwizzleLValue(SPIRVCodeGenerator& gen, SpvId vecPointer, const std::vector<int>& components,
+ const Type& baseType, const Type& swizzleType)
+ : fGen(gen)
+ , fVecPointer(vecPointer)
+ , fComponents(components)
+ , fBaseType(baseType)
+ , fSwizzleType(swizzleType) {}
+
+ virtual SpvId getPointer() override {
+ return 0;
+ }
+
+ virtual SpvId load(std::ostream& out) override {
+ SpvId base = fGen.nextId();
+ fGen.writeInstruction(SpvOpLoad, fGen.getType(fBaseType), base, fVecPointer, out);
+ SpvId result = fGen.nextId();
+ fGen.writeOpCode(SpvOpVectorShuffle, 5 + (int32_t) fComponents.size(), out);
+ fGen.writeWord(fGen.getType(fSwizzleType), out);
+ fGen.writeWord(result, out);
+ fGen.writeWord(base, out);
+ fGen.writeWord(base, out);
+ for (int component : fComponents) {
+ fGen.writeWord(component, out);
+ }
+ return result;
+ }
+
+ virtual void store(SpvId value, std::ostream& out) override {
+ // use OpVectorShuffle to mix and match the vector components. We effectively create
+ // a virtual vector out of the concatenation of the left and right vectors, and then
+ // select components from this virtual vector to make the result vector. For
+ // instance, given:
+ // vec3 L = ...;
+ // vec3 R = ...;
+ // L.xz = R.xy;
+ // we end up with the virtual vector (L.x, L.y, L.z, R.x, R.y, R.z). Then we want
+ // our result vector to look like (R.x, L.y, R.y), so we need to select indices
+ // (3, 1, 4).
+ SpvId base = fGen.nextId();
+ fGen.writeInstruction(SpvOpLoad, fGen.getType(fBaseType), base, fVecPointer, out);
+ SpvId shuffle = fGen.nextId();
+ fGen.writeOpCode(SpvOpVectorShuffle, 5 + fBaseType.columns(), out);
+ fGen.writeWord(fGen.getType(fBaseType), out);
+ fGen.writeWord(shuffle, out);
+ fGen.writeWord(base, out);
+ fGen.writeWord(value, out);
+ for (int i = 0; i < fBaseType.columns(); i++) {
+ // current offset into the virtual vector, defaults to pulling the unmodified
+ // value from the left side
+ int offset = i;
+ // check to see if we are writing this component
+ for (size_t j = 0; j < fComponents.size(); j++) {
+ if (fComponents[j] == i) {
+ // we're writing to this component, so adjust the offset to pull from
+ // the correct component of the right side instead of preserving the
+ // value from the left
+ offset = (int) (j + fBaseType.columns());
+ break;
+ }
+ }
+ fGen.writeWord(offset, out);
+ }
+ fGen.writeInstruction(SpvOpStore, fVecPointer, shuffle, out);
+ }
+
+private:
+ SPIRVCodeGenerator& fGen;
+ const SpvId fVecPointer;
+ const std::vector<int>& fComponents;
+ const Type& fBaseType;
+ const Type& fSwizzleType;
+};
+
+std::unique_ptr<SPIRVCodeGenerator::LValue> SPIRVCodeGenerator::getLValue(const Expression& expr,
+ std::ostream& out) {
+ switch (expr.fKind) {
+ case Expression::kVariableReference_Kind: {
+ const Variable& var = ((VariableReference&) expr).fVariable;
+ auto entry = fVariableMap.find(&var);
+ ASSERT(entry != fVariableMap.end());
+ return std::unique_ptr<SPIRVCodeGenerator::LValue>(new PointerLValue(
+ *this,
+ entry->second,
+ this->getType(expr.fType)));
+ }
+ case Expression::kIndex_Kind: // fall through
+ case Expression::kFieldAccess_Kind: {
+ std::vector<SpvId> chain = this->getAccessChain(expr, out);
+ SpvId member = this->nextId();
+ this->writeOpCode(SpvOpAccessChain, (SpvId) (3 + chain.size()), out);
+ this->writeWord(this->getPointerType(expr.fType, get_storage_class(expr)), out);
+ this->writeWord(member, out);
+ for (SpvId idx : chain) {
+ this->writeWord(idx, out);
+ }
+ return std::unique_ptr<SPIRVCodeGenerator::LValue>(new PointerLValue(
+ *this,
+ member,
+ this->getType(expr.fType)));
+ }
+
+ case Expression::kSwizzle_Kind: {
+ Swizzle& swizzle = (Swizzle&) expr;
+ size_t count = swizzle.fComponents.size();
+ SpvId base = this->getLValue(*swizzle.fBase, out)->getPointer();
+ ASSERT(base);
+ if (count == 1) {
+ IntLiteral index(fContext, Position(), swizzle.fComponents[0]);
+ SpvId member = this->nextId();
+ this->writeInstruction(SpvOpAccessChain,
+ this->getPointerType(swizzle.fType,
+ get_storage_class(*swizzle.fBase)),
+ member,
+ base,
+ this->writeIntLiteral(index),
+ out);
+ return std::unique_ptr<SPIRVCodeGenerator::LValue>(new PointerLValue(
+ *this,
+ member,
+ this->getType(expr.fType)));
+ } else {
+ return std::unique_ptr<SPIRVCodeGenerator::LValue>(new SwizzleLValue(
+ *this,
+ base,
+ swizzle.fComponents,
+ swizzle.fBase->fType,
+ expr.fType));
+ }
+ }
+
+ default:
+ // expr isn't actually an lvalue, create a dummy variable for it. This case happens due
+ // to the need to store values in temporary variables during function calls (see
+ // comments in getFunctionType); erroneous uses of rvalues as lvalues should have been
+ // caught by IRGenerator
+ SpvId result = this->nextId();
+ SpvId type = this->getPointerType(expr.fType, SpvStorageClassFunction);
+ this->writeInstruction(SpvOpVariable, type, result, SpvStorageClassFunction,
+ fVariableBuffer);
+ this->writeInstruction(SpvOpStore, result, this->writeExpression(expr, out), out);
+ return std::unique_ptr<SPIRVCodeGenerator::LValue>(new PointerLValue(
+ *this,
+ result,
+ this->getType(expr.fType)));
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeVariableReference(const VariableReference& ref, std::ostream& out) {
+ auto entry = fVariableMap.find(&ref.fVariable);
+ ASSERT(entry != fVariableMap.end());
+ SpvId var = entry->second;
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpLoad, this->getType(ref.fVariable.fType), result, var, out);
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeIndexExpression(const IndexExpression& expr, std::ostream& out) {
+ return getLValue(expr, out)->load(out);
+}
+
+SpvId SPIRVCodeGenerator::writeFieldAccess(const FieldAccess& f, std::ostream& out) {
+ return getLValue(f, out)->load(out);
+}
+
+SpvId SPIRVCodeGenerator::writeSwizzle(const Swizzle& swizzle, std::ostream& out) {
+ SpvId base = this->writeExpression(*swizzle.fBase, out);
+ SpvId result = this->nextId();
+ size_t count = swizzle.fComponents.size();
+ if (count == 1) {
+ this->writeInstruction(SpvOpCompositeExtract, this->getType(swizzle.fType), result, base,
+ swizzle.fComponents[0], out);
+ } else {
+ this->writeOpCode(SpvOpVectorShuffle, 5 + (int32_t) count, out);
+ this->writeWord(this->getType(swizzle.fType), out);
+ this->writeWord(result, out);
+ this->writeWord(base, out);
+ this->writeWord(base, out);
+ for (int component : swizzle.fComponents) {
+ this->writeWord(component, out);
+ }
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeBinaryOperation(const Type& resultType,
+ const Type& operandType, SpvId lhs,
+ SpvId rhs, SpvOp_ ifFloat, SpvOp_ ifInt,
+ SpvOp_ ifUInt, SpvOp_ ifBool, std::ostream& out) {
+ SpvId result = this->nextId();
+ if (is_float(fContext, operandType)) {
+ this->writeInstruction(ifFloat, this->getType(resultType), result, lhs, rhs, out);
+ } else if (is_signed(fContext, operandType)) {
+ this->writeInstruction(ifInt, this->getType(resultType), result, lhs, rhs, out);
+ } else if (is_unsigned(fContext, operandType)) {
+ this->writeInstruction(ifUInt, this->getType(resultType), result, lhs, rhs, out);
+ } else if (operandType == *fContext.fBool_Type) {
+ this->writeInstruction(ifBool, this->getType(resultType), result, lhs, rhs, out);
+ } else {
+ ABORT("invalid operandType: %s", operandType.description().c_str());
+ }
+ return result;
+}
+
+bool is_assignment(Token::Kind op) {
+ switch (op) {
+ case Token::EQ: // fall through
+ case Token::PLUSEQ: // fall through
+ case Token::MINUSEQ: // fall through
+ case Token::STAREQ: // fall through
+ case Token::SLASHEQ: // fall through
+ case Token::PERCENTEQ: // fall through
+ case Token::SHLEQ: // fall through
+ case Token::SHREQ: // fall through
+ case Token::BITWISEOREQ: // fall through
+ case Token::BITWISEXOREQ: // fall through
+ case Token::BITWISEANDEQ: // fall through
+ case Token::LOGICALOREQ: // fall through
+ case Token::LOGICALXOREQ: // fall through
+ case Token::LOGICALANDEQ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeBinaryExpression(const BinaryExpression& b, std::ostream& out) {
+ // handle cases where we don't necessarily evaluate both LHS and RHS
+ switch (b.fOperator) {
+ case Token::EQ: {
+ SpvId rhs = this->writeExpression(*b.fRight, out);
+ this->getLValue(*b.fLeft, out)->store(rhs, out);
+ return rhs;
+ }
+ case Token::LOGICALAND:
+ return this->writeLogicalAnd(b, out);
+ case Token::LOGICALOR:
+ return this->writeLogicalOr(b, out);
+ default:
+ break;
+ }
+
+ // "normal" operators
+ const Type& resultType = b.fType;
+ std::unique_ptr<LValue> lvalue;
+ SpvId lhs;
+ if (is_assignment(b.fOperator)) {
+ lvalue = this->getLValue(*b.fLeft, out);
+ lhs = lvalue->load(out);
+ } else {
+ lvalue = nullptr;
+ lhs = this->writeExpression(*b.fLeft, out);
+ }
+ SpvId rhs = this->writeExpression(*b.fRight, out);
+ // component type we are operating on: float, int, uint
+ const Type* operandType;
+ // IR allows mismatched types in expressions (e.g. vec2 * float), but they need special handling
+ // in SPIR-V
+ if (b.fLeft->fType != b.fRight->fType) {
+ if (b.fLeft->fType.kind() == Type::kVector_Kind &&
+ b.fRight->fType.isNumber()) {
+ // promote number to vector
+ SpvId vec = this->nextId();
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + b.fType.columns(), out);
+ this->writeWord(this->getType(resultType), out);
+ this->writeWord(vec, out);
+ for (int i = 0; i < resultType.columns(); i++) {
+ this->writeWord(rhs, out);
+ }
+ rhs = vec;
+ operandType = &b.fRight->fType;
+ } else if (b.fRight->fType.kind() == Type::kVector_Kind &&
+ b.fLeft->fType.isNumber()) {
+ // promote number to vector
+ SpvId vec = this->nextId();
+ this->writeOpCode(SpvOpCompositeConstruct, 3 + b.fType.columns(), out);
+ this->writeWord(this->getType(resultType), out);
+ this->writeWord(vec, out);
+ for (int i = 0; i < resultType.columns(); i++) {
+ this->writeWord(lhs, out);
+ }
+ lhs = vec;
+ ASSERT(!lvalue);
+ operandType = &b.fLeft->fType;
+ } else if (b.fLeft->fType.kind() == Type::kMatrix_Kind) {
+ SpvOp_ op;
+ if (b.fRight->fType.kind() == Type::kMatrix_Kind) {
+ op = SpvOpMatrixTimesMatrix;
+ } else if (b.fRight->fType.kind() == Type::kVector_Kind) {
+ op = SpvOpMatrixTimesVector;
+ } else {
+ ASSERT(b.fRight->fType.kind() == Type::kScalar_Kind);
+ op = SpvOpMatrixTimesScalar;
+ }
+ SpvId result = this->nextId();
+ this->writeInstruction(op, this->getType(b.fType), result, lhs, rhs, out);
+ if (b.fOperator == Token::STAREQ) {
+ lvalue->store(result, out);
+ } else {
+ ASSERT(b.fOperator == Token::STAR);
+ }
+ return result;
+ } else if (b.fRight->fType.kind() == Type::kMatrix_Kind) {
+ SpvId result = this->nextId();
+ if (b.fLeft->fType.kind() == Type::kVector_Kind) {
+ this->writeInstruction(SpvOpVectorTimesMatrix, this->getType(b.fType), result,
+ lhs, rhs, out);
+ } else {
+ ASSERT(b.fLeft->fType.kind() == Type::kScalar_Kind);
+ this->writeInstruction(SpvOpMatrixTimesScalar, this->getType(b.fType), result, rhs,
+ lhs, out);
+ }
+ if (b.fOperator == Token::STAREQ) {
+ lvalue->store(result, out);
+ } else {
+ ASSERT(b.fOperator == Token::STAR);
+ }
+ return result;
+ } else {
+ ABORT("unsupported binary expression: %s", b.description().c_str());
+ }
+ } else {
+ operandType = &b.fLeft->fType;
+ ASSERT(*operandType == b.fRight->fType);
+ }
+ switch (b.fOperator) {
+ case Token::EQEQ:
+ ASSERT(resultType == *fContext.fBool_Type);
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFOrdEqual,
+ SpvOpIEqual, SpvOpIEqual, SpvOpLogicalEqual, out);
+ case Token::NEQ:
+ ASSERT(resultType == *fContext.fBool_Type);
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFOrdNotEqual,
+ SpvOpINotEqual, SpvOpINotEqual, SpvOpLogicalNotEqual,
+ out);
+ case Token::GT:
+ ASSERT(resultType == *fContext.fBool_Type);
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs,
+ SpvOpFOrdGreaterThan, SpvOpSGreaterThan,
+ SpvOpUGreaterThan, SpvOpUndef, out);
+ case Token::LT:
+ ASSERT(resultType == *fContext.fBool_Type);
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFOrdLessThan,
+ SpvOpSLessThan, SpvOpULessThan, SpvOpUndef, out);
+ case Token::GTEQ:
+ ASSERT(resultType == *fContext.fBool_Type);
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs,
+ SpvOpFOrdGreaterThanEqual, SpvOpSGreaterThanEqual,
+ SpvOpUGreaterThanEqual, SpvOpUndef, out);
+ case Token::LTEQ:
+ ASSERT(resultType == *fContext.fBool_Type);
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs,
+ SpvOpFOrdLessThanEqual, SpvOpSLessThanEqual,
+ SpvOpULessThanEqual, SpvOpUndef, out);
+ case Token::PLUS:
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFAdd,
+ SpvOpIAdd, SpvOpIAdd, SpvOpUndef, out);
+ case Token::MINUS:
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFSub,
+ SpvOpISub, SpvOpISub, SpvOpUndef, out);
+ case Token::STAR:
+ if (b.fLeft->fType.kind() == Type::kMatrix_Kind &&
+ b.fRight->fType.kind() == Type::kMatrix_Kind) {
+ // matrix multiply
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpMatrixTimesMatrix, this->getType(resultType), result,
+ lhs, rhs, out);
+ return result;
+ }
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFMul,
+ SpvOpIMul, SpvOpIMul, SpvOpUndef, out);
+ case Token::SLASH:
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFDiv,
+ SpvOpSDiv, SpvOpUDiv, SpvOpUndef, out);
+ case Token::PLUSEQ: {
+ SpvId result = this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFAdd,
+ SpvOpIAdd, SpvOpIAdd, SpvOpUndef, out);
+ ASSERT(lvalue);
+ lvalue->store(result, out);
+ return result;
+ }
+ case Token::MINUSEQ: {
+ SpvId result = this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFSub,
+ SpvOpISub, SpvOpISub, SpvOpUndef, out);
+ ASSERT(lvalue);
+ lvalue->store(result, out);
+ return result;
+ }
+ case Token::STAREQ: {
+ if (b.fLeft->fType.kind() == Type::kMatrix_Kind &&
+ b.fRight->fType.kind() == Type::kMatrix_Kind) {
+ // matrix multiply
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpMatrixTimesMatrix, this->getType(resultType), result,
+ lhs, rhs, out);
+ ASSERT(lvalue);
+ lvalue->store(result, out);
+ return result;
+ }
+ SpvId result = this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFMul,
+ SpvOpIMul, SpvOpIMul, SpvOpUndef, out);
+ ASSERT(lvalue);
+ lvalue->store(result, out);
+ return result;
+ }
+ case Token::SLASHEQ: {
+ SpvId result = this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFDiv,
+ SpvOpSDiv, SpvOpUDiv, SpvOpUndef, out);
+ ASSERT(lvalue);
+ lvalue->store(result, out);
+ return result;
+ }
+ default:
+ // FIXME: missing support for some operators (bitwise, &&=, ||=, shift...)
+ ABORT("unsupported binary expression: %s", b.description().c_str());
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeLogicalAnd(const BinaryExpression& a, std::ostream& out) {
+ ASSERT(a.fOperator == Token::LOGICALAND);
+ BoolLiteral falseLiteral(fContext, Position(), false);
+ SpvId falseConstant = this->writeBoolLiteral(falseLiteral);
+ SpvId lhs = this->writeExpression(*a.fLeft, out);
+ SpvId rhsLabel = this->nextId();
+ SpvId end = this->nextId();
+ SpvId lhsBlock = fCurrentBlock;
+ this->writeInstruction(SpvOpSelectionMerge, end, SpvSelectionControlMaskNone, out);
+ this->writeInstruction(SpvOpBranchConditional, lhs, rhsLabel, end, out);
+ this->writeLabel(rhsLabel, out);
+ SpvId rhs = this->writeExpression(*a.fRight, out);
+ SpvId rhsBlock = fCurrentBlock;
+ this->writeInstruction(SpvOpBranch, end, out);
+ this->writeLabel(end, out);
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpPhi, this->getType(*fContext.fBool_Type), result, falseConstant,
+ lhsBlock, rhs, rhsBlock, out);
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeLogicalOr(const BinaryExpression& o, std::ostream& out) {
+ ASSERT(o.fOperator == Token::LOGICALOR);
+ BoolLiteral trueLiteral(fContext, Position(), true);
+ SpvId trueConstant = this->writeBoolLiteral(trueLiteral);
+ SpvId lhs = this->writeExpression(*o.fLeft, out);
+ SpvId rhsLabel = this->nextId();
+ SpvId end = this->nextId();
+ SpvId lhsBlock = fCurrentBlock;
+ this->writeInstruction(SpvOpSelectionMerge, end, SpvSelectionControlMaskNone, out);
+ this->writeInstruction(SpvOpBranchConditional, lhs, end, rhsLabel, out);
+ this->writeLabel(rhsLabel, out);
+ SpvId rhs = this->writeExpression(*o.fRight, out);
+ SpvId rhsBlock = fCurrentBlock;
+ this->writeInstruction(SpvOpBranch, end, out);
+ this->writeLabel(end, out);
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpPhi, this->getType(*fContext.fBool_Type), result, trueConstant,
+ lhsBlock, rhs, rhsBlock, out);
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeTernaryExpression(const TernaryExpression& t, std::ostream& out) {
+ SpvId test = this->writeExpression(*t.fTest, out);
+ if (t.fIfTrue->isConstant() && t.fIfFalse->isConstant()) {
+ // both true and false are constants, can just use OpSelect
+ SpvId result = this->nextId();
+ SpvId trueId = this->writeExpression(*t.fIfTrue, out);
+ SpvId falseId = this->writeExpression(*t.fIfFalse, out);
+ this->writeInstruction(SpvOpSelect, this->getType(t.fType), result, test, trueId, falseId,
+ out);
+ return result;
+ }
+ // was originally using OpPhi to choose the result, but for some reason that is crashing on
+ // Adreno. Switched to storing the result in a temp variable as glslang does.
+ SpvId var = this->nextId();
+ this->writeInstruction(SpvOpVariable, this->getPointerType(t.fType, SpvStorageClassFunction),
+ var, SpvStorageClassFunction, fVariableBuffer);
+ SpvId trueLabel = this->nextId();
+ SpvId falseLabel = this->nextId();
+ SpvId end = this->nextId();
+ this->writeInstruction(SpvOpSelectionMerge, end, SpvSelectionControlMaskNone, out);
+ this->writeInstruction(SpvOpBranchConditional, test, trueLabel, falseLabel, out);
+ this->writeLabel(trueLabel, out);
+ this->writeInstruction(SpvOpStore, var, this->writeExpression(*t.fIfTrue, out), out);
+ this->writeInstruction(SpvOpBranch, end, out);
+ this->writeLabel(falseLabel, out);
+ this->writeInstruction(SpvOpStore, var, this->writeExpression(*t.fIfFalse, out), out);
+ this->writeInstruction(SpvOpBranch, end, out);
+ this->writeLabel(end, out);
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpLoad, this->getType(t.fType), result, var, out);
+ return result;
+}
+
+std::unique_ptr<Expression> create_literal_1(const Context& context, const Type& type) {
+ if (type == *context.fInt_Type) {
+ return std::unique_ptr<Expression>(new IntLiteral(context, Position(), 1));
+ }
+ else if (type == *context.fFloat_Type) {
+ return std::unique_ptr<Expression>(new FloatLiteral(context, Position(), 1.0));
+ } else {
+ ABORT("math is unsupported on type '%s'")
+ }
+}
+
+SpvId SPIRVCodeGenerator::writePrefixExpression(const PrefixExpression& p, std::ostream& out) {
+ if (p.fOperator == Token::MINUS) {
+ SpvId result = this->nextId();
+ SpvId typeId = this->getType(p.fType);
+ SpvId expr = this->writeExpression(*p.fOperand, out);
+ if (is_float(fContext, p.fType)) {
+ this->writeInstruction(SpvOpFNegate, typeId, result, expr, out);
+ } else if (is_signed(fContext, p.fType)) {
+ this->writeInstruction(SpvOpSNegate, typeId, result, expr, out);
+ } else {
+ ABORT("unsupported prefix expression %s", p.description().c_str());
+ };
+ return result;
+ }
+ switch (p.fOperator) {
+ case Token::PLUS:
+ return this->writeExpression(*p.fOperand, out);
+ case Token::PLUSPLUS: {
+ std::unique_ptr<LValue> lv = this->getLValue(*p.fOperand, out);
+ SpvId one = this->writeExpression(*create_literal_1(fContext, p.fType), out);
+ SpvId result = this->writeBinaryOperation(p.fType, p.fType, lv->load(out), one,
+ SpvOpFAdd, SpvOpIAdd, SpvOpIAdd, SpvOpUndef,
+ out);
+ lv->store(result, out);
+ return result;
+ }
+ case Token::MINUSMINUS: {
+ std::unique_ptr<LValue> lv = this->getLValue(*p.fOperand, out);
+ SpvId one = this->writeExpression(*create_literal_1(fContext, p.fType), out);
+ SpvId result = this->writeBinaryOperation(p.fType, p.fType, lv->load(out), one,
+ SpvOpFSub, SpvOpISub, SpvOpISub, SpvOpUndef,
+ out);
+ lv->store(result, out);
+ return result;
+ }
+ case Token::NOT: {
+ ASSERT(p.fOperand->fType == *fContext.fBool_Type);
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpLogicalNot, this->getType(p.fOperand->fType), result,
+ this->writeExpression(*p.fOperand, out), out);
+ return result;
+ }
+ default:
+ ABORT("unsupported prefix expression: %s", p.description().c_str());
+ }
+}
+
+SpvId SPIRVCodeGenerator::writePostfixExpression(const PostfixExpression& p, std::ostream& out) {
+ std::unique_ptr<LValue> lv = this->getLValue(*p.fOperand, out);
+ SpvId result = lv->load(out);
+ SpvId one = this->writeExpression(*create_literal_1(fContext, p.fType), out);
+ switch (p.fOperator) {
+ case Token::PLUSPLUS: {
+ SpvId temp = this->writeBinaryOperation(p.fType, p.fType, result, one, SpvOpFAdd,
+ SpvOpIAdd, SpvOpIAdd, SpvOpUndef, out);
+ lv->store(temp, out);
+ return result;
+ }
+ case Token::MINUSMINUS: {
+ SpvId temp = this->writeBinaryOperation(p.fType, p.fType, result, one, SpvOpFSub,
+ SpvOpISub, SpvOpISub, SpvOpUndef, out);
+ lv->store(temp, out);
+ return result;
+ }
+ default:
+ ABORT("unsupported postfix expression %s", p.description().c_str());
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeBoolLiteral(const BoolLiteral& b) {
+ if (b.fValue) {
+ if (fBoolTrue == 0) {
+ fBoolTrue = this->nextId();
+ this->writeInstruction(SpvOpConstantTrue, this->getType(b.fType), fBoolTrue,
+ fConstantBuffer);
+ }
+ return fBoolTrue;
+ } else {
+ if (fBoolFalse == 0) {
+ fBoolFalse = this->nextId();
+ this->writeInstruction(SpvOpConstantFalse, this->getType(b.fType), fBoolFalse,
+ fConstantBuffer);
+ }
+ return fBoolFalse;
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeIntLiteral(const IntLiteral& i) {
+ if (i.fType == *fContext.fInt_Type) {
+ auto entry = fIntConstants.find(i.fValue);
+ if (entry == fIntConstants.end()) {
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpConstant, this->getType(i.fType), result, (SpvId) i.fValue,
+ fConstantBuffer);
+ fIntConstants[i.fValue] = result;
+ return result;
+ }
+ return entry->second;
+ } else {
+ ASSERT(i.fType == *fContext.fUInt_Type);
+ auto entry = fUIntConstants.find(i.fValue);
+ if (entry == fUIntConstants.end()) {
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpConstant, this->getType(i.fType), result, (SpvId) i.fValue,
+ fConstantBuffer);
+ fUIntConstants[i.fValue] = result;
+ return result;
+ }
+ return entry->second;
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeFloatLiteral(const FloatLiteral& f) {
+ if (f.fType == *fContext.fFloat_Type) {
+ float value = (float) f.fValue;
+ auto entry = fFloatConstants.find(value);
+ if (entry == fFloatConstants.end()) {
+ SpvId result = this->nextId();
+ uint32_t bits;
+ ASSERT(sizeof(bits) == sizeof(value));
+ memcpy(&bits, &value, sizeof(bits));
+ this->writeInstruction(SpvOpConstant, this->getType(f.fType), result, bits,
+ fConstantBuffer);
+ fFloatConstants[value] = result;
+ return result;
+ }
+ return entry->second;
+ } else {
+ ASSERT(f.fType == *fContext.fDouble_Type);
+ auto entry = fDoubleConstants.find(f.fValue);
+ if (entry == fDoubleConstants.end()) {
+ SpvId result = this->nextId();
+ uint64_t bits;
+ ASSERT(sizeof(bits) == sizeof(f.fValue));
+ memcpy(&bits, &f.fValue, sizeof(bits));
+ this->writeInstruction(SpvOpConstant, this->getType(f.fType), result,
+ bits & 0xffffffff, bits >> 32, fConstantBuffer);
+ fDoubleConstants[f.fValue] = result;
+ return result;
+ }
+ return entry->second;
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeFunctionStart(const FunctionDeclaration& f, std::ostream& out) {
+ SpvId result = fFunctionMap[&f];
+ this->writeInstruction(SpvOpFunction, this->getType(f.fReturnType), result,
+ SpvFunctionControlMaskNone, this->getFunctionType(f), out);
+ this->writeInstruction(SpvOpName, result, f.fName.c_str(), fNameBuffer);
+ for (size_t i = 0; i < f.fParameters.size(); i++) {
+ SpvId id = this->nextId();
+ fVariableMap[f.fParameters[i]] = id;
+ SpvId type;
+ type = this->getPointerType(f.fParameters[i]->fType, SpvStorageClassFunction);
+ this->writeInstruction(SpvOpFunctionParameter, type, id, out);
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeFunction(const FunctionDefinition& f, std::ostream& out) {
+ SpvId result = this->writeFunctionStart(f.fDeclaration, out);
+ this->writeLabel(this->nextId(), out);
+ if (f.fDeclaration.fName == "main") {
+ out << fGlobalInitializersBuffer.str();
+ }
+ std::stringstream bodyBuffer;
+ this->writeBlock(*f.fBody, bodyBuffer);
+ out << fVariableBuffer.str();
+ fVariableBuffer.str("");
+ out << bodyBuffer.str();
+ if (fCurrentBlock) {
+ this->writeInstruction(SpvOpReturn, out);
+ }
+ this->writeInstruction(SpvOpFunctionEnd, out);
+ return result;
+}
+
+void SPIRVCodeGenerator::writeLayout(const Layout& layout, SpvId target) {
+ if (layout.fLocation >= 0) {
+ this->writeInstruction(SpvOpDecorate, target, SpvDecorationLocation, layout.fLocation,
+ fDecorationBuffer);
+ }
+ if (layout.fBinding >= 0) {
+ this->writeInstruction(SpvOpDecorate, target, SpvDecorationBinding, layout.fBinding,
+ fDecorationBuffer);
+ }
+ if (layout.fIndex >= 0) {
+ this->writeInstruction(SpvOpDecorate, target, SpvDecorationIndex, layout.fIndex,
+ fDecorationBuffer);
+ }
+ if (layout.fSet >= 0) {
+ this->writeInstruction(SpvOpDecorate, target, SpvDecorationDescriptorSet, layout.fSet,
+ fDecorationBuffer);
+ }
+ if (layout.fBuiltin >= 0) {
+ this->writeInstruction(SpvOpDecorate, target, SpvDecorationBuiltIn, layout.fBuiltin,
+ fDecorationBuffer);
+ }
+}
+
+void SPIRVCodeGenerator::writeLayout(const Layout& layout, SpvId target, int member) {
+ if (layout.fLocation >= 0) {
+ this->writeInstruction(SpvOpMemberDecorate, target, member, SpvDecorationLocation,
+ layout.fLocation, fDecorationBuffer);
+ }
+ if (layout.fBinding >= 0) {
+ this->writeInstruction(SpvOpMemberDecorate, target, member, SpvDecorationBinding,
+ layout.fBinding, fDecorationBuffer);
+ }
+ if (layout.fIndex >= 0) {
+ this->writeInstruction(SpvOpMemberDecorate, target, member, SpvDecorationIndex,
+ layout.fIndex, fDecorationBuffer);
+ }
+ if (layout.fSet >= 0) {
+ this->writeInstruction(SpvOpMemberDecorate, target, member, SpvDecorationDescriptorSet,
+ layout.fSet, fDecorationBuffer);
+ }
+ if (layout.fBuiltin >= 0) {
+ this->writeInstruction(SpvOpMemberDecorate, target, member, SpvDecorationBuiltIn,
+ layout.fBuiltin, fDecorationBuffer);
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeInterfaceBlock(const InterfaceBlock& intf) {
+ SpvId type = this->getType(intf.fVariable.fType);
+ SpvId result = this->nextId();
+ this->writeInstruction(SpvOpDecorate, type, SpvDecorationBlock, fDecorationBuffer);
+ SpvStorageClass_ storageClass = get_storage_class(intf.fVariable.fModifiers);
+ SpvId ptrType = this->nextId();
+ this->writeInstruction(SpvOpTypePointer, ptrType, storageClass, type, fConstantBuffer);
+ this->writeInstruction(SpvOpVariable, ptrType, result, storageClass, fConstantBuffer);
+ this->writeLayout(intf.fVariable.fModifiers.fLayout, result);
+ fVariableMap[&intf.fVariable] = result;
+ return result;
+}
+
+void SPIRVCodeGenerator::writeGlobalVars(const VarDeclarations& decl, std::ostream& out) {
+ for (size_t i = 0; i < decl.fVars.size(); i++) {
+ const VarDeclaration& varDecl = decl.fVars[i];
+ const Variable* var = varDecl.fVar;
+ if (!var->fIsReadFrom && !var->fIsWrittenTo &&
+ !(var->fModifiers.fFlags & (Modifiers::kIn_Flag |
+ Modifiers::kOut_Flag |
+ Modifiers::kUniform_Flag))) {
+ // variable is dead and not an input / output var (the Vulkan debug layers complain if
+ // we elide an interface var, even if it's dead)
+ continue;
+ }
+ SpvStorageClass_ storageClass;
+ if (var->fModifiers.fFlags & Modifiers::kIn_Flag) {
+ storageClass = SpvStorageClassInput;
+ } else if (var->fModifiers.fFlags & Modifiers::kOut_Flag) {
+ storageClass = SpvStorageClassOutput;
+ } else if (var->fModifiers.fFlags & Modifiers::kUniform_Flag) {
+ if (var->fType.kind() == Type::kSampler_Kind) {
+ storageClass = SpvStorageClassUniformConstant;
+ } else {
+ storageClass = SpvStorageClassUniform;
+ }
+ } else {
+ storageClass = SpvStorageClassPrivate;
+ }
+ SpvId id = this->nextId();
+ fVariableMap[var] = id;
+ SpvId type = this->getPointerType(var->fType, storageClass);
+ this->writeInstruction(SpvOpVariable, type, id, storageClass, fConstantBuffer);
+ this->writeInstruction(SpvOpName, id, var->fName.c_str(), fNameBuffer);
+ if (var->fType.kind() == Type::kMatrix_Kind) {
+ this->writeInstruction(SpvOpMemberDecorate, id, (SpvId) i, SpvDecorationColMajor,
+ fDecorationBuffer);
+ this->writeInstruction(SpvOpMemberDecorate, id, (SpvId) i, SpvDecorationMatrixStride,
+ (SpvId) var->fType.stride(), fDecorationBuffer);
+ }
+ if (varDecl.fValue) {
+ ASSERT(!fCurrentBlock);
+ fCurrentBlock = -1;
+ SpvId value = this->writeExpression(*varDecl.fValue, fGlobalInitializersBuffer);
+ this->writeInstruction(SpvOpStore, id, value, fGlobalInitializersBuffer);
+ fCurrentBlock = 0;
+ }
+ this->writeLayout(var->fModifiers.fLayout, id);
+ }
+}
+
+void SPIRVCodeGenerator::writeVarDeclarations(const VarDeclarations& decl, std::ostream& out) {
+ for (const auto& varDecl : decl.fVars) {
+ const Variable* var = varDecl.fVar;
+ SpvId id = this->nextId();
+ fVariableMap[var] = id;
+ SpvId type = this->getPointerType(var->fType, SpvStorageClassFunction);
+ this->writeInstruction(SpvOpVariable, type, id, SpvStorageClassFunction, fVariableBuffer);
+ this->writeInstruction(SpvOpName, id, var->fName.c_str(), fNameBuffer);
+ if (varDecl.fValue) {
+ SpvId value = this->writeExpression(*varDecl.fValue, out);
+ this->writeInstruction(SpvOpStore, id, value, out);
+ }
+ }
+}
+
+void SPIRVCodeGenerator::writeStatement(const Statement& s, std::ostream& out) {
+ switch (s.fKind) {
+ case Statement::kBlock_Kind:
+ this->writeBlock((Block&) s, out);
+ break;
+ case Statement::kExpression_Kind:
+ this->writeExpression(*((ExpressionStatement&) s).fExpression, out);
+ break;
+ case Statement::kReturn_Kind:
+ this->writeReturnStatement((ReturnStatement&) s, out);
+ break;
+ case Statement::kVarDeclarations_Kind:
+ this->writeVarDeclarations(*((VarDeclarationsStatement&) s).fDeclaration, out);
+ break;
+ case Statement::kIf_Kind:
+ this->writeIfStatement((IfStatement&) s, out);
+ break;
+ case Statement::kFor_Kind:
+ this->writeForStatement((ForStatement&) s, out);
+ break;
+ case Statement::kBreak_Kind:
+ this->writeInstruction(SpvOpBranch, fBreakTarget.top(), out);
+ break;
+ case Statement::kContinue_Kind:
+ this->writeInstruction(SpvOpBranch, fContinueTarget.top(), out);
+ break;
+ case Statement::kDiscard_Kind:
+ this->writeInstruction(SpvOpKill, out);
+ break;
+ default:
+ ABORT("unsupported statement: %s", s.description().c_str());
+ }
+}
+
+void SPIRVCodeGenerator::writeBlock(const Block& b, std::ostream& out) {
+ for (size_t i = 0; i < b.fStatements.size(); i++) {
+ this->writeStatement(*b.fStatements[i], out);
+ }
+}
+
+void SPIRVCodeGenerator::writeIfStatement(const IfStatement& stmt, std::ostream& out) {
+ SpvId test = this->writeExpression(*stmt.fTest, out);
+ SpvId ifTrue = this->nextId();
+ SpvId ifFalse = this->nextId();
+ if (stmt.fIfFalse) {
+ SpvId end = this->nextId();
+ this->writeInstruction(SpvOpSelectionMerge, end, SpvSelectionControlMaskNone, out);
+ this->writeInstruction(SpvOpBranchConditional, test, ifTrue, ifFalse, out);
+ this->writeLabel(ifTrue, out);
+ this->writeStatement(*stmt.fIfTrue, out);
+ if (fCurrentBlock) {
+ this->writeInstruction(SpvOpBranch, end, out);
+ }
+ this->writeLabel(ifFalse, out);
+ this->writeStatement(*stmt.fIfFalse, out);
+ if (fCurrentBlock) {
+ this->writeInstruction(SpvOpBranch, end, out);
+ }
+ this->writeLabel(end, out);
+ } else {
+ this->writeInstruction(SpvOpSelectionMerge, ifFalse, SpvSelectionControlMaskNone, out);
+ this->writeInstruction(SpvOpBranchConditional, test, ifTrue, ifFalse, out);
+ this->writeLabel(ifTrue, out);
+ this->writeStatement(*stmt.fIfTrue, out);
+ if (fCurrentBlock) {
+ this->writeInstruction(SpvOpBranch, ifFalse, out);
+ }
+ this->writeLabel(ifFalse, out);
+ }
+}
+
+void SPIRVCodeGenerator::writeForStatement(const ForStatement& f, std::ostream& out) {
+ if (f.fInitializer) {
+ this->writeStatement(*f.fInitializer, out);
+ }
+ SpvId header = this->nextId();
+ SpvId start = this->nextId();
+ SpvId body = this->nextId();
+ SpvId next = this->nextId();
+ fContinueTarget.push(next);
+ SpvId end = this->nextId();
+ fBreakTarget.push(end);
+ this->writeInstruction(SpvOpBranch, header, out);
+ this->writeLabel(header, out);
+ this->writeInstruction(SpvOpLoopMerge, end, next, SpvLoopControlMaskNone, out);
+ this->writeInstruction(SpvOpBranch, start, out);
+ this->writeLabel(start, out);
+ SpvId test = this->writeExpression(*f.fTest, out);
+ this->writeInstruction(SpvOpBranchConditional, test, body, end, out);
+ this->writeLabel(body, out);
+ this->writeStatement(*f.fStatement, out);
+ if (fCurrentBlock) {
+ this->writeInstruction(SpvOpBranch, next, out);
+ }
+ this->writeLabel(next, out);
+ if (f.fNext) {
+ this->writeExpression(*f.fNext, out);
+ }
+ this->writeInstruction(SpvOpBranch, header, out);
+ this->writeLabel(end, out);
+ fBreakTarget.pop();
+ fContinueTarget.pop();
+}
+
+void SPIRVCodeGenerator::writeReturnStatement(const ReturnStatement& r, std::ostream& out) {
+ if (r.fExpression) {
+ this->writeInstruction(SpvOpReturnValue, this->writeExpression(*r.fExpression, out),
+ out);
+ } else {
+ this->writeInstruction(SpvOpReturn, out);
+ }
+}
+
+void SPIRVCodeGenerator::writeInstructions(const Program& program, std::ostream& out) {
+ fGLSLExtendedInstructions = this->nextId();
+ std::stringstream body;
+ std::vector<SpvId> interfaceVars;
+ // assign IDs to functions
+ for (size_t i = 0; i < program.fElements.size(); i++) {
+ if (program.fElements[i]->fKind == ProgramElement::kFunction_Kind) {
+ FunctionDefinition& f = (FunctionDefinition&) *program.fElements[i];
+ fFunctionMap[&f.fDeclaration] = this->nextId();
+ }
+ }
+ for (size_t i = 0; i < program.fElements.size(); i++) {
+ if (program.fElements[i]->fKind == ProgramElement::kInterfaceBlock_Kind) {
+ InterfaceBlock& intf = (InterfaceBlock&) *program.fElements[i];
+ SpvId id = this->writeInterfaceBlock(intf);
+ if ((intf.fVariable.fModifiers.fFlags & Modifiers::kIn_Flag) ||
+ (intf.fVariable.fModifiers.fFlags & Modifiers::kOut_Flag)) {
+ interfaceVars.push_back(id);
+ }
+ }
+ }
+ for (size_t i = 0; i < program.fElements.size(); i++) {
+ if (program.fElements[i]->fKind == ProgramElement::kVar_Kind) {
+ this->writeGlobalVars(((VarDeclarations&) *program.fElements[i]), body);
+ }
+ }
+ for (size_t i = 0; i < program.fElements.size(); i++) {
+ if (program.fElements[i]->fKind == ProgramElement::kFunction_Kind) {
+ this->writeFunction(((FunctionDefinition&) *program.fElements[i]), body);
+ }
+ }
+ const FunctionDeclaration* main = nullptr;
+ for (auto entry : fFunctionMap) {
+ if (entry.first->fName == "main") {
+ main = entry.first;
+ }
+ }
+ ASSERT(main);
+ for (auto entry : fVariableMap) {
+ const Variable* var = entry.first;
+ if (var->fStorage == Variable::kGlobal_Storage &&
+ ((var->fModifiers.fFlags & Modifiers::kIn_Flag) ||
+ (var->fModifiers.fFlags & Modifiers::kOut_Flag))) {
+ interfaceVars.push_back(entry.second);
+ }
+ }
+ this->writeCapabilities(out);
+ this->writeInstruction(SpvOpExtInstImport, fGLSLExtendedInstructions, "GLSL.std.450", out);
+ this->writeInstruction(SpvOpMemoryModel, SpvAddressingModelLogical, SpvMemoryModelGLSL450, out);
+ this->writeOpCode(SpvOpEntryPoint, (SpvId) (3 + (strlen(main->fName.c_str()) + 4) / 4) +
+ (int32_t) interfaceVars.size(), out);
+ switch (program.fKind) {
+ case Program::kVertex_Kind:
+ this->writeWord(SpvExecutionModelVertex, out);
+ break;
+ case Program::kFragment_Kind:
+ this->writeWord(SpvExecutionModelFragment, out);
+ break;
+ }
+ this->writeWord(fFunctionMap[main], out);
+ this->writeString(main->fName.c_str(), out);
+ for (int var : interfaceVars) {
+ this->writeWord(var, out);
+ }
+ if (program.fKind == Program::kFragment_Kind) {
+ this->writeInstruction(SpvOpExecutionMode,
+ fFunctionMap[main],
+ SpvExecutionModeOriginUpperLeft,
+ out);
+ }
+ for (size_t i = 0; i < program.fElements.size(); i++) {
+ if (program.fElements[i]->fKind == ProgramElement::kExtension_Kind) {
+ this->writeInstruction(SpvOpSourceExtension,
+ ((Extension&) *program.fElements[i]).fName.c_str(),
+ out);
+ }
+ }
+
+ out << fNameBuffer.str();
+ out << fDecorationBuffer.str();
+ out << fConstantBuffer.str();
+ out << fExternalFunctionsBuffer.str();
+ out << body.str();
+}
+
+void SPIRVCodeGenerator::generateCode(const Program& program, std::ostream& out) {
+ this->writeWord(SpvMagicNumber, out);
+ this->writeWord(SpvVersion, out);
+ this->writeWord(SKSL_MAGIC, out);
+ std::stringstream buffer;
+ this->writeInstructions(program, buffer);
+ this->writeWord(fIdCount, out);
+ this->writeWord(0, out); // reserved, always zero
+ out << buffer.str();
+}
+
+}
diff --git a/gfx/skia/skia/src/sksl/SkSLSPIRVCodeGenerator.h b/gfx/skia/skia/src/sksl/SkSLSPIRVCodeGenerator.h
new file mode 100644
index 000000000..e7b2b3023
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLSPIRVCodeGenerator.h
@@ -0,0 +1,267 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SPIRVCODEGENERATOR
+#define SKSL_SPIRVCODEGENERATOR
+
+#include <sstream>
+#include <stack>
+#include <tuple>
+#include <unordered_map>
+
+#include "SkSLCodeGenerator.h"
+#include "ir/SkSLBinaryExpression.h"
+#include "ir/SkSLBoolLiteral.h"
+#include "ir/SkSLConstructor.h"
+#include "ir/SkSLFloatLiteral.h"
+#include "ir/SkSLIfStatement.h"
+#include "ir/SkSLIndexExpression.h"
+#include "ir/SkSLInterfaceBlock.h"
+#include "ir/SkSLIntLiteral.h"
+#include "ir/SkSLFieldAccess.h"
+#include "ir/SkSLForStatement.h"
+#include "ir/SkSLFunctionCall.h"
+#include "ir/SkSLFunctionDeclaration.h"
+#include "ir/SkSLFunctionDefinition.h"
+#include "ir/SkSLPrefixExpression.h"
+#include "ir/SkSLPostfixExpression.h"
+#include "ir/SkSLProgramElement.h"
+#include "ir/SkSLReturnStatement.h"
+#include "ir/SkSLStatement.h"
+#include "ir/SkSLSwizzle.h"
+#include "ir/SkSLTernaryExpression.h"
+#include "ir/SkSLVarDeclaration.h"
+#include "ir/SkSLVarDeclarationStatement.h"
+#include "ir/SkSLVariableReference.h"
+#include "spirv.h"
+
+namespace SkSL {
+
+#define kLast_Capability SpvCapabilityMultiViewport
+
+/**
+ * Converts a Program into a SPIR-V binary.
+ */
+class SPIRVCodeGenerator : public CodeGenerator {
+public:
+ class LValue {
+ public:
+ virtual ~LValue() {}
+
+ // returns a pointer to the lvalue, if possible. If the lvalue cannot be directly referenced
+ // by a pointer (e.g. vector swizzles), returns 0.
+ virtual SpvId getPointer() = 0;
+
+ virtual SpvId load(std::ostream& out) = 0;
+
+ virtual void store(SpvId value, std::ostream& out) = 0;
+ };
+
+ SPIRVCodeGenerator(const Context* context)
+ : fContext(*context)
+ , fCapabilities(1 << SpvCapabilityShader)
+ , fIdCount(1)
+ , fBoolTrue(0)
+ , fBoolFalse(0)
+ , fCurrentBlock(0) {
+ this->setupIntrinsics();
+ }
+
+ void generateCode(const Program& program, std::ostream& out) override;
+
+private:
+ enum IntrinsicKind {
+ kGLSL_STD_450_IntrinsicKind,
+ kSPIRV_IntrinsicKind,
+ kSpecial_IntrinsicKind
+ };
+
+ enum SpecialIntrinsic {
+ kAtan_SpecialIntrinsic,
+ kTexture_SpecialIntrinsic,
+ kTexture2D_SpecialIntrinsic,
+ kTextureProj_SpecialIntrinsic
+ };
+
+ void setupIntrinsics();
+
+ SpvId nextId();
+
+ SpvId getType(const Type& type);
+
+ SpvId getFunctionType(const FunctionDeclaration& function);
+
+ SpvId getPointerType(const Type& type, SpvStorageClass_ storageClass);
+
+ std::vector<SpvId> getAccessChain(const Expression& expr, std::ostream& out);
+
+ void writeLayout(const Layout& layout, SpvId target);
+
+ void writeLayout(const Layout& layout, SpvId target, int member);
+
+ void writeStruct(const Type& type, SpvId resultId);
+
+ void writeProgramElement(const ProgramElement& pe, std::ostream& out);
+
+ SpvId writeInterfaceBlock(const InterfaceBlock& intf);
+
+ SpvId writeFunctionStart(const FunctionDeclaration& f, std::ostream& out);
+
+ SpvId writeFunctionDeclaration(const FunctionDeclaration& f, std::ostream& out);
+
+ SpvId writeFunction(const FunctionDefinition& f, std::ostream& out);
+
+ void writeGlobalVars(const VarDeclarations& v, std::ostream& out);
+
+ void writeVarDeclarations(const VarDeclarations& decl, std::ostream& out);
+
+ SpvId writeVariableReference(const VariableReference& ref, std::ostream& out);
+
+ std::unique_ptr<LValue> getLValue(const Expression& value, std::ostream& out);
+
+ SpvId writeExpression(const Expression& expr, std::ostream& out);
+
+ SpvId writeIntrinsicCall(const FunctionCall& c, std::ostream& out);
+
+ SpvId writeFunctionCall(const FunctionCall& c, std::ostream& out);
+
+ SpvId writeSpecialIntrinsic(const FunctionCall& c, SpecialIntrinsic kind, std::ostream& out);
+
+ SpvId writeConstantVector(const Constructor& c);
+
+ SpvId writeFloatConstructor(const Constructor& c, std::ostream& out);
+
+ SpvId writeIntConstructor(const Constructor& c, std::ostream& out);
+
+ SpvId writeMatrixConstructor(const Constructor& c, std::ostream& out);
+
+ SpvId writeVectorConstructor(const Constructor& c, std::ostream& out);
+
+ SpvId writeConstructor(const Constructor& c, std::ostream& out);
+
+ SpvId writeFieldAccess(const FieldAccess& f, std::ostream& out);
+
+ SpvId writeSwizzle(const Swizzle& swizzle, std::ostream& out);
+
+ SpvId writeBinaryOperation(const Type& resultType, const Type& operandType, SpvId lhs,
+ SpvId rhs, SpvOp_ ifFloat, SpvOp_ ifInt, SpvOp_ ifUInt,
+ SpvOp_ ifBool, std::ostream& out);
+
+ SpvId writeBinaryOperation(const BinaryExpression& expr, SpvOp_ ifFloat, SpvOp_ ifInt,
+ SpvOp_ ifUInt, std::ostream& out);
+
+ SpvId writeBinaryExpression(const BinaryExpression& b, std::ostream& out);
+
+ SpvId writeTernaryExpression(const TernaryExpression& t, std::ostream& out);
+
+ SpvId writeIndexExpression(const IndexExpression& expr, std::ostream& out);
+
+ SpvId writeLogicalAnd(const BinaryExpression& b, std::ostream& out);
+
+ SpvId writeLogicalOr(const BinaryExpression& o, std::ostream& out);
+
+ SpvId writePrefixExpression(const PrefixExpression& p, std::ostream& out);
+
+ SpvId writePostfixExpression(const PostfixExpression& p, std::ostream& out);
+
+ SpvId writeBoolLiteral(const BoolLiteral& b);
+
+ SpvId writeIntLiteral(const IntLiteral& i);
+
+ SpvId writeFloatLiteral(const FloatLiteral& f);
+
+ void writeStatement(const Statement& s, std::ostream& out);
+
+ void writeBlock(const Block& b, std::ostream& out);
+
+ void writeIfStatement(const IfStatement& stmt, std::ostream& out);
+
+ void writeForStatement(const ForStatement& f, std::ostream& out);
+
+ void writeReturnStatement(const ReturnStatement& r, std::ostream& out);
+
+ void writeCapabilities(std::ostream& out);
+
+ void writeInstructions(const Program& program, std::ostream& out);
+
+ void writeOpCode(SpvOp_ opCode, int length, std::ostream& out);
+
+ void writeWord(int32_t word, std::ostream& out);
+
+ void writeString(const char* string, std::ostream& out);
+
+ void writeLabel(SpvId id, std::ostream& out);
+
+ void writeInstruction(SpvOp_ opCode, std::ostream& out);
+
+ void writeInstruction(SpvOp_ opCode, const char* string, std::ostream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, std::ostream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, const char* string, std::ostream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, const char* string,
+ std::ostream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, std::ostream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, int32_t word3,
+ std::ostream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, int32_t word3, int32_t word4,
+ std::ostream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, int32_t word3, int32_t word4,
+ int32_t word5, std::ostream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, int32_t word3, int32_t word4,
+ int32_t word5, int32_t word6, std::ostream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, int32_t word3, int32_t word4,
+ int32_t word5, int32_t word6, int32_t word7, std::ostream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, int32_t word3, int32_t word4,
+ int32_t word5, int32_t word6, int32_t word7, int32_t word8,
+ std::ostream& out);
+
+ const Context& fContext;
+
+ uint64_t fCapabilities;
+ SpvId fIdCount;
+ SpvId fGLSLExtendedInstructions;
+ typedef std::tuple<IntrinsicKind, int32_t, int32_t, int32_t, int32_t> Intrinsic;
+ std::unordered_map<std::string, Intrinsic> fIntrinsicMap;
+ std::unordered_map<const FunctionDeclaration*, SpvId> fFunctionMap;
+ std::unordered_map<const Variable*, SpvId> fVariableMap;
+ std::unordered_map<const Variable*, int32_t> fInterfaceBlockMap;
+ std::unordered_map<std::string, SpvId> fTypeMap;
+ std::stringstream fCapabilitiesBuffer;
+ std::stringstream fGlobalInitializersBuffer;
+ std::stringstream fConstantBuffer;
+ std::stringstream fExternalFunctionsBuffer;
+ std::stringstream fVariableBuffer;
+ std::stringstream fNameBuffer;
+ std::stringstream fDecorationBuffer;
+
+ SpvId fBoolTrue;
+ SpvId fBoolFalse;
+ std::unordered_map<int64_t, SpvId> fIntConstants;
+ std::unordered_map<uint64_t, SpvId> fUIntConstants;
+ std::unordered_map<float, SpvId> fFloatConstants;
+ std::unordered_map<double, SpvId> fDoubleConstants;
+ // label of the current block, or 0 if we are not in a block
+ SpvId fCurrentBlock;
+ std::stack<SpvId> fBreakTarget;
+ std::stack<SpvId> fContinueTarget;
+
+ friend class PointerLValue;
+ friend class SwizzleLValue;
+};
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLToken.h b/gfx/skia/skia/src/sksl/SkSLToken.h
new file mode 100644
index 000000000..29fa81e2e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLToken.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_TOKEN
+#define SKSL_TOKEN
+
+#include "SkSLPosition.h"
+#include "SkSLUtil.h"
+
+namespace SkSL {
+
+#undef IN
+#undef OUT
+#undef CONST
+
+/**
+ * Represents a lexical analysis token. Token is generally only used during the parse process, but
+ * Token::Kind is also used to represent operator kinds.
+ */
+struct Token {
+ enum Kind {
+ END_OF_FILE,
+ IDENTIFIER,
+ INT_LITERAL,
+ FLOAT_LITERAL,
+ TRUE_LITERAL,
+ FALSE_LITERAL,
+ LPAREN,
+ RPAREN,
+ LBRACE,
+ RBRACE,
+ LBRACKET,
+ RBRACKET,
+ DOT,
+ COMMA,
+ PLUSPLUS,
+ MINUSMINUS,
+ PLUS,
+ MINUS,
+ STAR,
+ SLASH,
+ PERCENT,
+ SHL,
+ SHR,
+ BITWISEOR,
+ BITWISEXOR,
+ BITWISEAND,
+ LOGICALOR,
+ LOGICALXOR,
+ LOGICALAND,
+ NOT,
+ QUESTION,
+ COLON,
+ EQ,
+ EQEQ,
+ NEQ,
+ GT,
+ LT,
+ GTEQ,
+ LTEQ,
+ PLUSEQ,
+ MINUSEQ,
+ STAREQ,
+ SLASHEQ,
+ PERCENTEQ,
+ SHLEQ,
+ SHREQ,
+ BITWISEOREQ,
+ BITWISEXOREQ,
+ BITWISEANDEQ,
+ LOGICALOREQ,
+ LOGICALXOREQ,
+ LOGICALANDEQ,
+ SEMICOLON,
+ IF,
+ ELSE,
+ FOR,
+ WHILE,
+ DO,
+ RETURN,
+ BREAK,
+ CONTINUE,
+ DISCARD,
+ IN,
+ OUT,
+ INOUT,
+ CONST,
+ LOWP,
+ MEDIUMP,
+ HIGHP,
+ UNIFORM,
+ FLAT,
+ NOPERSPECTIVE,
+ STRUCT,
+ LAYOUT,
+ DIRECTIVE,
+ PRECISION,
+ INVALID_TOKEN
+ };
+
+ static std::string OperatorName(Kind kind) {
+ switch (kind) {
+ case Token::PLUS: return "+";
+ case Token::MINUS: return "-";
+ case Token::STAR: return "*";
+ case Token::SLASH: return "/";
+ case Token::PERCENT: return "%";
+ case Token::SHL: return "<<";
+ case Token::SHR: return ">>";
+ case Token::LOGICALAND: return "&&";
+ case Token::LOGICALOR: return "||";
+ case Token::LOGICALXOR: return "^^";
+ case Token::BITWISEAND: return "&";
+ case Token::BITWISEOR: return "|";
+ case Token::BITWISEXOR: return "^";
+ case Token::EQ: return "=";
+ case Token::EQEQ: return "==";
+ case Token::NEQ: return "!=";
+ case Token::LT: return "<";
+ case Token::GT: return ">";
+ case Token::LTEQ: return "<=";
+ case Token::GTEQ: return ">=";
+ case Token::PLUSEQ: return "+=";
+ case Token::MINUSEQ: return "-=";
+ case Token::STAREQ: return "*=";
+ case Token::SLASHEQ: return "/=";
+ case Token::PERCENTEQ: return "%=";
+ case Token::SHLEQ: return "<<=";
+ case Token::SHREQ: return ">>=";
+ case Token::LOGICALANDEQ: return "&&=";
+ case Token::LOGICALOREQ: return "||=";
+ case Token::LOGICALXOREQ: return "^^=";
+ case Token::BITWISEANDEQ: return "&=";
+ case Token::BITWISEOREQ: return "|=";
+ case Token::BITWISEXOREQ: return "^=";
+ case Token::PLUSPLUS: return "++";
+ case Token::MINUSMINUS: return "--";
+ case Token::NOT: return "!";
+ default:
+ ABORT("unsupported operator: %d\n", kind);
+ }
+ }
+
+ Token() {
+ }
+
+ Token(Position position, Kind kind, std::string text)
+ : fPosition(position)
+ , fKind(kind)
+ , fText(std::move(text)) {}
+
+ Position fPosition;
+ Kind fKind;
+ std::string fText;
+};
+
+} // namespace
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLUtil.cpp b/gfx/skia/skia/src/sksl/SkSLUtil.cpp
new file mode 100644
index 000000000..327bffe4f
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLUtil.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkSLUtil.h"
+
+namespace SkSL {
+
+int stoi(std::string s) {
+ return atoi(s.c_str());
+}
+
+double stod(std::string s) {
+ return atof(s.c_str());
+}
+
+long stol(std::string s) {
+ return atol(s.c_str());
+}
+
+void sksl_abort() {
+#ifdef SKIA
+ sk_abort_no_print();
+ exit(1);
+#else
+ abort();
+#endif
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/sksl/SkSLUtil.h b/gfx/skia/skia/src/sksl/SkSLUtil.h
new file mode 100644
index 000000000..33611cde0
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLUtil.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_UTIL
+#define SKSL_UTIL
+
+#include <iomanip>
+#include <string>
+#include <sstream>
+#include "stdlib.h"
+#include "assert.h"
+#include "SkTypes.h"
+
+namespace SkSL {
+
+// our own definitions of certain std:: functions, because they are not always present on Android
+
+template <typename T> std::string to_string(T value) {
+ std::stringstream buffer;
+ buffer << std::setprecision(std::numeric_limits<T>::digits10) << value;
+ return buffer.str();
+}
+
+#if _MSC_VER
+#define NORETURN __declspec(noreturn)
+#else
+#define NORETURN __attribute__((__noreturn__))
+#endif
+int stoi(std::string s);
+
+double stod(std::string s);
+
+long stol(std::string s);
+
+NORETURN void sksl_abort();
+
+} // namespace
+
+#ifdef DEBUG
+#define ASSERT(x) assert(x)
+#define ASSERT_RESULT(x) ASSERT(x);
+#else
+#define ASSERT(x)
+#define ASSERT_RESULT(x) x
+#endif
+
+#ifdef SKIA
+#define ABORT(...) { SkDebugf(__VA_ARGS__); sksl_abort(); }
+#else
+#define ABORT(...) { sksl_abort(); }
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTBinaryExpression.h b/gfx/skia/skia/src/sksl/ast/SkSLASTBinaryExpression.h
new file mode 100644
index 000000000..88feba66a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTBinaryExpression.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTBINARYEXPRESSION
+#define SKSL_ASTBINARYEXPRESSION
+
+#include "SkSLASTExpression.h"
+#include "../SkSLToken.h"
+#include <sstream>
+
+namespace SkSL {
+
+/**
+ * Represents a binary operation, with the operator represented by the token's type.
+ */
+struct ASTBinaryExpression : public ASTExpression {
+ ASTBinaryExpression(std::unique_ptr<ASTExpression> left, Token op,
+ std::unique_ptr<ASTExpression> right)
+ : INHERITED(op.fPosition, kBinary_Kind)
+ , fLeft(std::move(left))
+ , fOperator(op.fKind)
+ , fRight(std::move(right)) {}
+
+ std::string description() const override {
+ return "(" + fLeft->description() + " " + Token::OperatorName(fOperator) + " " +
+ fRight->description() + ")";
+ }
+
+ const std::unique_ptr<ASTExpression> fLeft;
+ const Token::Kind fOperator;
+ const std::unique_ptr<ASTExpression> fRight;
+
+ typedef ASTExpression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTBlock.h b/gfx/skia/skia/src/sksl/ast/SkSLASTBlock.h
new file mode 100644
index 000000000..09450a3db
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTBlock.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTBLOCK
+#define SKSL_ASTBLOCK
+
+#include "SkSLASTStatement.h"
+
+namespace SkSL {
+
+/**
+ * Represents a curly-braced block of statements.
+ */
+struct ASTBlock : public ASTStatement {
+ ASTBlock(Position position, std::vector<std::unique_ptr<ASTStatement>> statements)
+ : INHERITED(position, kBlock_Kind)
+ , fStatements(std::move(statements)) {}
+
+ std::string description() const override {
+ std::string result("{");
+ for (size_t i = 0; i < fStatements.size(); i++) {
+ result += "\n";
+ result += fStatements[i]->description();
+ }
+ result += "\n}\n";
+ return result;
+ }
+
+ const std::vector<std::unique_ptr<ASTStatement>> fStatements;
+
+ typedef ASTStatement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTBoolLiteral.h b/gfx/skia/skia/src/sksl/ast/SkSLASTBoolLiteral.h
new file mode 100644
index 000000000..ff5882295
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTBoolLiteral.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTBOOLLITERAL
+#define SKSL_ASTBOOLLITERAL
+
+#include "SkSLASTExpression.h"
+
+namespace SkSL {
+
+/**
+ * Represents "true" or "false".
+ */
+struct ASTBoolLiteral : public ASTExpression {
+ ASTBoolLiteral(Position position, bool value)
+ : INHERITED(position, kBool_Kind)
+ , fValue(value) {}
+
+ std::string description() const override {
+ return fValue ? "true" : "false";
+ }
+
+ const bool fValue;
+
+ typedef ASTExpression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTBreakStatement.h b/gfx/skia/skia/src/sksl/ast/SkSLASTBreakStatement.h
new file mode 100644
index 000000000..ede548cc2
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTBreakStatement.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTBREAKSTATEMENT
+#define SKSL_ASTBREAKSTATEMENT
+
+#include "SkSLASTStatement.h"
+
+namespace SkSL {
+
+/**
+ * A 'break' statement.
+ */
+struct ASTBreakStatement : public ASTStatement {
+ ASTBreakStatement(Position position)
+ : INHERITED(position, kBreak_Kind) {}
+
+ std::string description() const override {
+ return "break;";
+ }
+
+ typedef ASTStatement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTCallSuffix.h b/gfx/skia/skia/src/sksl/ast/SkSLASTCallSuffix.h
new file mode 100644
index 000000000..5cff6f6c9
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTCallSuffix.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTCALLSUFFIX
+#define SKSL_ASTCALLSUFFIX
+
+#include <sstream>
+#include <vector>
+#include "SkSLASTSuffix.h"
+
+namespace SkSL {
+
+/**
+ * A parenthesized list of arguments following an expression, indicating a function call.
+ */
+struct ASTCallSuffix : public ASTSuffix {
+ ASTCallSuffix(Position position, std::vector<std::unique_ptr<ASTExpression>> arguments)
+ : INHERITED(position, ASTSuffix::kCall_Kind)
+ , fArguments(std::move(arguments)) {}
+
+ std::string description() const override {
+ std::string result("(");
+ std::string separator = "";
+ for (size_t i = 0; i < fArguments.size(); ++i) {
+ result += separator;
+ separator = ", ";
+ result += fArguments[i]->description();
+ }
+ result += ")";
+ return result;
+ }
+
+ std::vector<std::unique_ptr<ASTExpression>> fArguments;
+
+ typedef ASTSuffix INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTContinueStatement.h b/gfx/skia/skia/src/sksl/ast/SkSLASTContinueStatement.h
new file mode 100644
index 000000000..d5ab7a5c7
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTContinueStatement.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTCONTINUESTATEMENT
+#define SKSL_ASTCONTINUESTATEMENT
+
+#include "SkSLASTStatement.h"
+
+namespace SkSL {
+
+/**
+ * A 'continue' statement.
+ */
+struct ASTContinueStatement : public ASTStatement {
+ ASTContinueStatement(Position position)
+ : INHERITED(position, kContinue_Kind) {}
+
+ std::string description() const override {
+ return "continue;";
+ }
+
+ typedef ASTStatement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTDeclaration.h b/gfx/skia/skia/src/sksl/ast/SkSLASTDeclaration.h
new file mode 100644
index 000000000..8b55ecf83
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTDeclaration.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTDECLARATION
+#define SKSL_ASTDECLARATION
+
+#include "SkSLASTPositionNode.h"
+
+namespace SkSL {
+
+/**
+ * Abstract supertype of declarations such as variables and functions.
+ */
+struct ASTDeclaration : public ASTPositionNode {
+ enum Kind {
+ kVar_Kind,
+ kFunction_Kind,
+ kInterfaceBlock_Kind,
+ kExtension_Kind
+ };
+
+ ASTDeclaration(Position position, Kind kind)
+ : INHERITED(position)
+ , fKind(kind) {}
+
+ Kind fKind;
+
+ typedef ASTPositionNode INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTDiscardStatement.h b/gfx/skia/skia/src/sksl/ast/SkSLASTDiscardStatement.h
new file mode 100644
index 000000000..4eaeec9ea
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTDiscardStatement.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTDISCARDSTATEMENT
+#define SKSL_ASTDISCARDSTATEMENT
+
+#include "SkSLASTStatement.h"
+
+namespace SkSL {
+
+/**
+ * A 'discard' statement.
+ */
+struct ASTDiscardStatement : public ASTStatement {
+ ASTDiscardStatement(Position position)
+ : INHERITED(position, kDiscard_Kind) {}
+
+ std::string description() const override {
+ return "discard;";
+ }
+
+ typedef ASTStatement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTDoStatement.h b/gfx/skia/skia/src/sksl/ast/SkSLASTDoStatement.h
new file mode 100644
index 000000000..a952d62eb
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTDoStatement.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTDOSTATEMENT
+#define SKSL_ASTDOSTATEMENT
+
+#include "SkSLASTStatement.h"
+
+namespace SkSL {
+
+/**
+ * A 'do' loop.
+ */
+struct ASTDoStatement : public ASTStatement {
+ ASTDoStatement(Position position, std::unique_ptr<ASTStatement> statement,
+ std::unique_ptr<ASTExpression> test)
+ : INHERITED(position, kDo_Kind)
+ , fStatement(std::move(statement))
+ , fTest(std::move(test)) {}
+
+ std::string description() const override {
+ return "do " + fStatement->description() + " while (" + fTest->description() + ");";
+ }
+
+ const std::unique_ptr<ASTStatement> fStatement;
+ const std::unique_ptr<ASTExpression> fTest;
+
+ typedef ASTStatement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTExpression.h b/gfx/skia/skia/src/sksl/ast/SkSLASTExpression.h
new file mode 100644
index 000000000..8a4827104
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTExpression.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTEXPRESSION
+#define SKSL_ASTEXPRESSION
+
+#include "SkSLASTPositionNode.h"
+
+namespace SkSL {
+
+/**
+ * Abstract supertype of all expressions.
+ */
+struct ASTExpression : public ASTPositionNode {
+ enum Kind {
+ kFloat_Kind,
+ kIdentifier_Kind,
+ kInt_Kind,
+ kBool_Kind,
+ kPrefix_Kind,
+ kSuffix_Kind,
+ kBinary_Kind,
+ kTernary_Kind
+ };
+
+ ASTExpression(Position position, Kind kind)
+ : INHERITED(position)
+ , fKind(kind) {}
+
+ const Kind fKind;
+
+ typedef ASTPositionNode INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTExpressionStatement.h b/gfx/skia/skia/src/sksl/ast/SkSLASTExpressionStatement.h
new file mode 100644
index 000000000..450cca29f
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTExpressionStatement.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTEXPRESSIONSTATEMENT
+#define SKSL_ASTEXPRESSIONSTATEMENT
+
+#include "SkSLASTStatement.h"
+
+namespace SkSL {
+
+/**
+ * A lone expression being used as a statement.
+ */
+struct ASTExpressionStatement : public ASTStatement {
+ ASTExpressionStatement(std::unique_ptr<ASTExpression> expression)
+ : INHERITED(expression->fPosition, kExpression_Kind)
+ , fExpression(std::move(expression)) {}
+
+ std::string description() const override {
+ return fExpression->description() + ";";
+ }
+
+ const std::unique_ptr<ASTExpression> fExpression;
+
+ typedef ASTStatement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTExtension.h b/gfx/skia/skia/src/sksl/ast/SkSLASTExtension.h
new file mode 100644
index 000000000..896ac46c5
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTExtension.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTEXTENSION
+#define SKSL_ASTEXTENSION
+
+#include "SkSLASTDeclaration.h"
+
+namespace SkSL {
+
+/**
+ * An extension declaration.
+ */
+struct ASTExtension : public ASTDeclaration {
+ ASTExtension(Position position, std::string name)
+ : INHERITED(position, kExtension_Kind)
+ , fName(std::move(name)) {}
+
+ std::string description() const override {
+ return "#extension " + fName + " : enable";
+ }
+
+ const std::string fName;
+
+ typedef ASTDeclaration INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTFieldSuffix.h b/gfx/skia/skia/src/sksl/ast/SkSLASTFieldSuffix.h
new file mode 100644
index 000000000..cf141d822
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTFieldSuffix.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTFIELDSUFFIX
+#define SKSL_ASTFIELDSUFFIX
+
+#include "SkSLASTSuffix.h"
+
+namespace SkSL {
+
+/**
+ * A dotted identifier of the form ".foo". We refer to these as "fields" at parse time even if it is
+ * actually vector swizzle (which looks the same to the parser).
+ */
+struct ASTFieldSuffix : public ASTSuffix {
+ ASTFieldSuffix(Position position, std::string field)
+ : INHERITED(position, ASTSuffix::kField_Kind)
+ , fField(std::move(field)) {}
+
+ std::string description() const override {
+ return "." + fField;
+ }
+
+ std::string fField;
+
+ typedef ASTSuffix INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTFloatLiteral.h b/gfx/skia/skia/src/sksl/ast/SkSLASTFloatLiteral.h
new file mode 100644
index 000000000..89d43cc00
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTFloatLiteral.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTFLOATLITERAL
+#define SKSL_ASTFLOATLITERAL
+
+#include "SkSLASTExpression.h"
+
+namespace SkSL {
+
+/**
+ * A literal floating point number.
+ */
+struct ASTFloatLiteral : public ASTExpression {
+ ASTFloatLiteral(Position position, double value)
+ : INHERITED(position, kFloat_Kind)
+ , fValue(value) {}
+
+ std::string description() const override {
+ return to_string(fValue);
+ }
+
+ const double fValue;
+
+ typedef ASTExpression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTForStatement.h b/gfx/skia/skia/src/sksl/ast/SkSLASTForStatement.h
new file mode 100644
index 000000000..f4f68c8f4
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTForStatement.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTFORSTATEMENT
+#define SKSL_ASTFORSTATEMENT
+
+#include "SkSLASTStatement.h"
+
+namespace SkSL {
+
+/**
+ * A 'for' loop.
+ */
+struct ASTForStatement : public ASTStatement {
+ ASTForStatement(Position position, std::unique_ptr<ASTStatement> initializer,
+ std::unique_ptr<ASTExpression> test, std::unique_ptr<ASTExpression> next,
+ std::unique_ptr<ASTStatement> statement)
+ : INHERITED(position, kFor_Kind)
+ , fInitializer(std::move(initializer))
+ , fTest(std::move(test))
+ , fNext(std::move(next))
+ , fStatement(std::move(statement)) {}
+
+ std::string description() const override {
+ std::string result = "for (";
+ if (fInitializer) {
+ result.append(fInitializer->description());
+ }
+ result += " ";
+ if (fTest) {
+ result.append(fTest->description());
+ }
+ result += "; ";
+ if (fNext) {
+ result.append(fNext->description());
+ }
+ result += ") ";
+ result += fStatement->description();
+ return result;
+ }
+
+ const std::unique_ptr<ASTStatement> fInitializer;
+ const std::unique_ptr<ASTExpression> fTest;
+ const std::unique_ptr<ASTExpression> fNext;
+ const std::unique_ptr<ASTStatement> fStatement;
+
+ typedef ASTStatement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTFunction.h b/gfx/skia/skia/src/sksl/ast/SkSLASTFunction.h
new file mode 100644
index 000000000..c5c3b9ad8
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTFunction.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTFUNCTION
+#define SKSL_ASTFUNCTION
+
+#include "SkSLASTBlock.h"
+#include "SkSLASTDeclaration.h"
+#include "SkSLASTParameter.h"
+#include "SkSLASTType.h"
+
+namespace SkSL {
+
+/**
+ * A function declaration or definition. The fBody field will be null for declarations.
+ */
+struct ASTFunction : public ASTDeclaration {
+ ASTFunction(Position position, std::unique_ptr<ASTType> returnType, std::string name,
+ std::vector<std::unique_ptr<ASTParameter>> parameters,
+ std::unique_ptr<ASTBlock> body)
+ : INHERITED(position, kFunction_Kind)
+ , fReturnType(std::move(returnType))
+ , fName(std::move(name))
+ , fParameters(std::move(parameters))
+ , fBody(std::move(body)) {}
+
+ std::string description() const override {
+ std::string result = fReturnType->description() + " " + fName + "(";
+ for (size_t i = 0; i < fParameters.size(); i++) {
+ if (i > 0) {
+ result += ", ";
+ }
+ result += fParameters[i]->description();
+ }
+ if (fBody) {
+ result += ") " + fBody->description();
+ } else {
+ result += ");";
+ }
+ return result;
+ }
+
+ const std::unique_ptr<ASTType> fReturnType;
+ const std::string fName;
+ const std::vector<std::unique_ptr<ASTParameter>> fParameters;
+ const std::unique_ptr<ASTBlock> fBody;
+
+ typedef ASTDeclaration INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTIdentifier.h b/gfx/skia/skia/src/sksl/ast/SkSLASTIdentifier.h
new file mode 100644
index 000000000..d67f64d39
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTIdentifier.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTIDENTIFIER
+#define SKSL_ASTIDENTIFIER
+
+#include "SkSLASTExpression.h"
+
+namespace SkSL {
+
+/**
+ * An identifier in an expression context.
+ */
+struct ASTIdentifier : public ASTExpression {
+ ASTIdentifier(Position position, std::string text)
+ : INHERITED(position, kIdentifier_Kind)
+ , fText(std::move(text)) {}
+
+ std::string description() const override {
+ return fText;
+ }
+
+ const std::string fText;
+
+ typedef ASTExpression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTIfStatement.h b/gfx/skia/skia/src/sksl/ast/SkSLASTIfStatement.h
new file mode 100644
index 000000000..06f663d5f
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTIfStatement.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTIFSTATEMENT
+#define SKSL_ASTIFSTATEMENT
+
+#include "SkSLASTStatement.h"
+
+namespace SkSL {
+
+/**
+ * An 'if' statement.
+ */
+struct ASTIfStatement : public ASTStatement {
+ ASTIfStatement(Position position, std::unique_ptr<ASTExpression> test,
+ std::unique_ptr<ASTStatement> ifTrue, std::unique_ptr<ASTStatement> ifFalse)
+ : INHERITED(position, kIf_Kind)
+ , fTest(std::move(test))
+ , fIfTrue(std::move(ifTrue))
+ , fIfFalse(std::move(ifFalse)) {}
+
+ std::string description() const override {
+ std::string result("if (");
+ result += fTest->description();
+ result += ") ";
+ result += fIfTrue->description();
+ if (fIfFalse) {
+ result += " else ";
+ result += fIfFalse->description();
+ }
+ return result;
+ }
+
+ const std::unique_ptr<ASTExpression> fTest;
+ const std::unique_ptr<ASTStatement> fIfTrue;
+ const std::unique_ptr<ASTStatement> fIfFalse;
+
+ typedef ASTStatement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTIndexSuffix.h b/gfx/skia/skia/src/sksl/ast/SkSLASTIndexSuffix.h
new file mode 100644
index 000000000..44d91fa4c
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTIndexSuffix.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTINDEXSUFFIX
+#define SKSL_ASTINDEXSUFFIX
+
+#include "SkSLASTExpression.h"
+#include "SkSLASTSuffix.h"
+
+namespace SkSL {
+
+/**
+ * A bracketed expression, as in '[0]', indicating an array access.
+ */
+struct ASTIndexSuffix : public ASTSuffix {
+ ASTIndexSuffix(std::unique_ptr<ASTExpression> expression)
+ : INHERITED(expression->fPosition, ASTSuffix::kIndex_Kind)
+ , fExpression(std::move(expression)) {}
+
+ std::string description() const override {
+ return "[" + fExpression->description() + "]";
+ }
+
+ std::unique_ptr<ASTExpression> fExpression;
+
+ typedef ASTSuffix INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTIntLiteral.h b/gfx/skia/skia/src/sksl/ast/SkSLASTIntLiteral.h
new file mode 100644
index 000000000..259884753
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTIntLiteral.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTINTLITERAL
+#define SKSL_ASTINTLITERAL
+
+#include "SkSLASTExpression.h"
+
+namespace SkSL {
+
+/**
+ * A literal integer. At the AST level, integer literals are always positive; a negative number will
+ * appear as a unary minus being applied to an integer literal.
+ */
+struct ASTIntLiteral : public ASTExpression {
+ ASTIntLiteral(Position position, uint64_t value)
+ : INHERITED(position, kInt_Kind)
+ , fValue(value) {}
+
+ std::string description() const override {
+ return to_string(fValue);
+ }
+
+ const uint64_t fValue;
+
+ typedef ASTExpression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTInterfaceBlock.h b/gfx/skia/skia/src/sksl/ast/SkSLASTInterfaceBlock.h
new file mode 100644
index 000000000..c27136207
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTInterfaceBlock.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTINTERFACEBLOCK
+#define SKSL_ASTINTERFACEBLOCK
+
+#include "SkSLASTVarDeclaration.h"
+
+namespace SkSL {
+
+/**
+ * An interface block, as in:
+ *
+ * out gl_PerVertex {
+ * layout(builtin=0) vec4 gl_Position;
+ * layout(builtin=1) float gl_PointSize;
+ * };
+ */
+struct ASTInterfaceBlock : public ASTDeclaration {
+ // valueName is empty when it was not present in the source
+ ASTInterfaceBlock(Position position,
+ ASTModifiers modifiers,
+ std::string interfaceName,
+ std::string valueName,
+ std::vector<std::unique_ptr<ASTVarDeclarations>> declarations)
+ : INHERITED(position, kInterfaceBlock_Kind)
+ , fModifiers(modifiers)
+ , fInterfaceName(std::move(interfaceName))
+ , fValueName(std::move(valueName))
+ , fDeclarations(std::move(declarations)) {}
+
+ std::string description() const override {
+ std::string result = fModifiers.description() + fInterfaceName + " {\n";
+ for (size_t i = 0; i < fDeclarations.size(); i++) {
+ result += fDeclarations[i]->description() + "\n";
+ }
+ result += "}";
+ if (fValueName.length()) {
+ result += " " + fValueName;
+ }
+ return result + ";";
+ }
+
+ const ASTModifiers fModifiers;
+ const std::string fInterfaceName;
+ const std::string fValueName;
+ const std::vector<std::unique_ptr<ASTVarDeclarations>> fDeclarations;
+
+ typedef ASTDeclaration INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTLayout.h b/gfx/skia/skia/src/sksl/ast/SkSLASTLayout.h
new file mode 100644
index 000000000..08d67531c
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTLayout.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTLAYOUT
+#define SKSL_ASTLAYOUT
+
+#include "SkSLASTNode.h"
+#include "SkSLUtil.h"
+
+namespace SkSL {
+
+/**
+ * Represents a layout block appearing before a variable declaration, as in:
+ *
+ * layout (location = 0) int x;
+ */
+struct ASTLayout : public ASTNode {
+ // For all parameters, a -1 means no value
+ ASTLayout(int location, int binding, int index, int set, int builtin, bool originUpperLeft)
+ : fLocation(location)
+ , fBinding(binding)
+ , fIndex(index)
+ , fSet(set)
+ , fBuiltin(builtin)
+ , fOriginUpperLeft(originUpperLeft) {}
+
+ std::string description() const {
+ std::string result;
+ std::string separator;
+ if (fLocation >= 0) {
+ result += separator + "location = " + to_string(fLocation);
+ separator = ", ";
+ }
+ if (fBinding >= 0) {
+ result += separator + "binding = " + to_string(fBinding);
+ separator = ", ";
+ }
+ if (fIndex >= 0) {
+ result += separator + "index = " + to_string(fIndex);
+ separator = ", ";
+ }
+ if (fSet >= 0) {
+ result += separator + "set = " + to_string(fSet);
+ separator = ", ";
+ }
+ if (fBuiltin >= 0) {
+ result += separator + "builtin = " + to_string(fBuiltin);
+ separator = ", ";
+ }
+ if (fOriginUpperLeft) {
+ result += separator + "origin_upper_left";
+ separator = ", ";
+ }
+ if (result.length() > 0) {
+ result = "layout (" + result + ")";
+ }
+ return result;
+ }
+
+ const int fLocation;
+ const int fBinding;
+ const int fIndex;
+ const int fSet;
+ const int fBuiltin;
+ const bool fOriginUpperLeft;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTModifiers.h b/gfx/skia/skia/src/sksl/ast/SkSLASTModifiers.h
new file mode 100644
index 000000000..61d2e9f25
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTModifiers.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTMODIFIERS
+#define SKSL_ASTMODIFIERS
+
+#include "SkSLASTLayout.h"
+#include "SkSLASTNode.h"
+
+namespace SkSL {
+
+/**
+ * A set of modifier keywords (in, out, uniform, etc.) appearing before a declaration.
+ */
+struct ASTModifiers : public ASTNode {
+ enum Flag {
+ kNo_Flag = 0,
+ kConst_Flag = 1,
+ kIn_Flag = 2,
+ kOut_Flag = 4,
+ kLowp_Flag = 8,
+ kMediump_Flag = 16,
+ kHighp_Flag = 32,
+ kUniform_Flag = 64,
+ kFlat_Flag = 128,
+ kNoPerspective_Flag = 256
+ };
+
+ ASTModifiers(ASTLayout layout, int flags)
+ : fLayout(layout)
+ , fFlags(flags) {}
+
+ std::string description() const override {
+ std::string result = fLayout.description();
+ if (fFlags & kUniform_Flag) {
+ result += "uniform ";
+ }
+ if (fFlags & kConst_Flag) {
+ result += "const ";
+ }
+ if (fFlags & kLowp_Flag) {
+ result += "lowp ";
+ }
+ if (fFlags & kMediump_Flag) {
+ result += "mediump ";
+ }
+ if (fFlags & kHighp_Flag) {
+ result += "highp ";
+ }
+ if (fFlags & kFlat_Flag) {
+ result += "flat ";
+ }
+ if (fFlags & kNoPerspective_Flag) {
+ result += "noperspective ";
+ }
+
+ if ((fFlags & kIn_Flag) && (fFlags & kOut_Flag)) {
+ result += "inout ";
+ } else if (fFlags & kIn_Flag) {
+ result += "in ";
+ } else if (fFlags & kOut_Flag) {
+ result += "out ";
+ }
+
+ return result;
+ }
+
+ const ASTLayout fLayout;
+ const int fFlags;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTNode.h b/gfx/skia/skia/src/sksl/ast/SkSLASTNode.h
new file mode 100644
index 000000000..4305011fa
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTNode.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTNODE
+#define SKSL_ASTNODE
+
+#include <memory>
+#include <string>
+
+namespace SkSL {
+
+/**
+ * Represents a node in the abstract syntax tree (AST). The AST is based directly on the parse tree;
+ * it is a parsed-but-not-yet-analyzed version of the program.
+ */
+struct ASTNode {
+ virtual ~ASTNode() {}
+
+ virtual std::string description() const = 0;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTParameter.h b/gfx/skia/skia/src/sksl/ast/SkSLASTParameter.h
new file mode 100644
index 000000000..8f1b4535f
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTParameter.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTPARAMETER
+#define SKSL_ASTPARAMETER
+
+#include "SkSLASTModifiers.h"
+#include "SkSLASTType.h"
+
+namespace SkSL {
+
+/**
+ * A declaration of a parameter, as part of a function declaration.
+ */
+struct ASTParameter : public ASTPositionNode {
+ // 'sizes' is a list of the array sizes appearing on a parameter, in source order.
+ // e.g. int x[3][1] would have sizes [3, 1].
+ ASTParameter(Position position, ASTModifiers modifiers, std::unique_ptr<ASTType> type,
+ std::string name, std::vector<int> sizes)
+ : INHERITED(position)
+ , fModifiers(modifiers)
+ , fType(std::move(type))
+ , fName(std::move(name))
+ , fSizes(std::move(sizes)) {}
+
+ std::string description() const override {
+ std::string result = fModifiers.description() + fType->description() + " " + fName;
+ for (int size : fSizes) {
+ result += "[" + to_string(size) + "]";
+ }
+ return result;
+ }
+
+ const ASTModifiers fModifiers;
+ const std::unique_ptr<ASTType> fType;
+ const std::string fName;
+ const std::vector<int> fSizes;
+
+ typedef ASTPositionNode INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTPositionNode.h b/gfx/skia/skia/src/sksl/ast/SkSLASTPositionNode.h
new file mode 100644
index 000000000..226b4ae4b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTPositionNode.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTPOSITIONNODE
+#define SKSL_ASTPOSITIONNODE
+
+#include "SkSLASTNode.h"
+#include "../SkSLPosition.h"
+
+namespace SkSL {
+
+/**
+ * An AST node with an associated position in the source.
+ */
+struct ASTPositionNode : public ASTNode {
+ ASTPositionNode(Position position)
+ : fPosition(position) {}
+
+ const Position fPosition;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTPrefixExpression.h b/gfx/skia/skia/src/sksl/ast/SkSLASTPrefixExpression.h
new file mode 100644
index 000000000..0d326e2aa
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTPrefixExpression.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTPREFIXEXPRESSION
+#define SKSL_ASTPREFIXEXPRESSION
+
+#include "SkSLASTExpression.h"
+#include "../SkSLToken.h"
+
+namespace SkSL {
+
+/**
+ * An expression modified by a unary operator appearing in front of it, such as '-x' or '!inside'.
+ */
+struct ASTPrefixExpression : public ASTExpression {
+ ASTPrefixExpression(Token op, std::unique_ptr<ASTExpression> operand)
+ : INHERITED(op.fPosition, kPrefix_Kind)
+ , fOperator(op.fKind)
+ , fOperand(std::move(operand)) {}
+
+ std::string description() const override {
+ return Token::OperatorName(fOperator) + fOperand->description();
+ }
+
+ const Token::Kind fOperator;
+ const std::unique_ptr<ASTExpression> fOperand;
+
+ typedef ASTExpression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTReturnStatement.h b/gfx/skia/skia/src/sksl/ast/SkSLASTReturnStatement.h
new file mode 100644
index 000000000..3aac783a8
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTReturnStatement.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTRETURNSTATEMENT
+#define SKSL_ASTRETURNSTATEMENT
+
+#include "SkSLASTStatement.h"
+
+namespace SkSL {
+
+/**
+ * A 'return' statement.
+ */
+struct ASTReturnStatement : public ASTStatement {
+ // expression may be null
+ ASTReturnStatement(Position position, std::unique_ptr<ASTExpression> expression)
+ : INHERITED(position, kReturn_Kind)
+ , fExpression(std::move(expression)) {}
+
+ std::string description() const override {
+ std::string result("return");
+ if (fExpression) {
+ result += " " + fExpression->description();
+ }
+ return result + ";";
+ }
+
+ const std::unique_ptr<ASTExpression> fExpression;
+
+ typedef ASTStatement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTStatement.h b/gfx/skia/skia/src/sksl/ast/SkSLASTStatement.h
new file mode 100644
index 000000000..9ddde063e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTStatement.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTSTATEMENT
+#define SKSL_ASTSTATEMENT
+
+#include <vector>
+#include "SkSLASTPositionNode.h"
+#include "SkSLASTExpression.h"
+
+namespace SkSL {
+
+/**
+ * Abstract supertype of all statements.
+ */
+struct ASTStatement : public ASTPositionNode {
+ enum Kind {
+ kBlock_Kind,
+ kVarDeclaration_Kind,
+ kExpression_Kind,
+ kIf_Kind,
+ kFor_Kind,
+ kWhile_Kind,
+ kDo_Kind,
+ kReturn_Kind,
+ kBreak_Kind,
+ kContinue_Kind,
+ kDiscard_Kind
+ };
+
+ ASTStatement(Position position, Kind kind)
+ : INHERITED(position)
+ , fKind(kind) {}
+
+ Kind fKind;
+
+ typedef ASTPositionNode INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTSuffix.h b/gfx/skia/skia/src/sksl/ast/SkSLASTSuffix.h
new file mode 100644
index 000000000..18f79f01e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTSuffix.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTSUFFIX
+#define SKSL_ASTSUFFIX
+
+#include "SkSLASTPositionNode.h"
+#include "SkSLASTExpression.h"
+
+namespace SkSL {
+
+/**
+ * This and its subclasses represents expression suffixes, such as '[0]' or '.rgb'. Suffixes are not
+ * expressions in and of themselves; they are attached to expressions to modify them.
+ */
+struct ASTSuffix : public ASTPositionNode {
+ enum Kind {
+ kIndex_Kind,
+ kCall_Kind,
+ kField_Kind,
+ kPostIncrement_Kind,
+ kPostDecrement_Kind
+ };
+
+ ASTSuffix(Position position, Kind kind)
+ : INHERITED(position)
+ , fKind(kind) {}
+
+ std::string description() const override {
+ switch (fKind) {
+ case kPostIncrement_Kind:
+ return "++";
+ case kPostDecrement_Kind:
+ return "--";
+ default:
+ ABORT("unsupported suffix operator");
+ }
+ }
+
+ Kind fKind;
+
+ typedef ASTPositionNode INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTSuffixExpression.h b/gfx/skia/skia/src/sksl/ast/SkSLASTSuffixExpression.h
new file mode 100644
index 000000000..c0fda294b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTSuffixExpression.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTSUFFIXEXPRESSION
+#define SKSL_ASTSUFFIXEXPRESSION
+
+#include "SkSLASTSuffix.h"
+#include "SkSLASTExpression.h"
+
+namespace SkSL {
+
+/**
+ * An expression with an associated suffix.
+ */
+struct ASTSuffixExpression : public ASTExpression {
+ ASTSuffixExpression(std::unique_ptr<ASTExpression> base, std::unique_ptr<ASTSuffix> suffix)
+ : INHERITED(base->fPosition, kSuffix_Kind)
+ , fBase(std::move(base))
+ , fSuffix(std::move(suffix)) {}
+
+ std::string description() const override {
+ return fBase->description() + fSuffix->description();
+ }
+
+ const std::unique_ptr<ASTExpression> fBase;
+ const std::unique_ptr<ASTSuffix> fSuffix;
+
+ typedef ASTExpression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTTernaryExpression.h b/gfx/skia/skia/src/sksl/ast/SkSLASTTernaryExpression.h
new file mode 100644
index 000000000..20b827a04
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTTernaryExpression.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTTERNARYEXPRESSION
+#define SKSL_ASTTERNARYEXPRESSION
+
+#include "SkSLASTExpression.h"
+
+namespace SkSL {
+
+/**
+ * A ternary expression (test ? ifTrue : ifFalse).
+ */
+struct ASTTernaryExpression : public ASTExpression {
+ ASTTernaryExpression(std::unique_ptr<ASTExpression> test,
+ std::unique_ptr<ASTExpression> ifTrue,
+ std::unique_ptr<ASTExpression> ifFalse)
+ : INHERITED(test->fPosition, kTernary_Kind)
+ , fTest(std::move(test))
+ , fIfTrue(std::move(ifTrue))
+ , fIfFalse(std::move(ifFalse)) {}
+
+ std::string description() const override {
+ return "(" + fTest->description() + " ? " + fIfTrue->description() + " : " +
+ fIfFalse->description() + ")";
+ }
+
+ const std::unique_ptr<ASTExpression> fTest;
+ const std::unique_ptr<ASTExpression> fIfTrue;
+ const std::unique_ptr<ASTExpression> fIfFalse;
+
+ typedef ASTExpression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTType.h b/gfx/skia/skia/src/sksl/ast/SkSLASTType.h
new file mode 100644
index 000000000..b8fdedb21
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTType.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTTYPE
+#define SKSL_ASTTYPE
+
+namespace SkSL {
+
+/**
+ * A type, such as 'int' or 'struct foo'.
+ */
+struct ASTType : public ASTPositionNode {
+ enum Kind {
+ kIdentifier_Kind,
+ kStruct_Kind
+ };
+
+ ASTType(Position position, std::string name, Kind kind)
+ : INHERITED(position)
+ , fName(std::move(name))
+ , fKind(kind) {}
+
+ std::string description() const override {
+ return fName;
+ }
+
+ const std::string fName;
+
+ const Kind fKind;
+
+ typedef ASTPositionNode INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTVarDeclaration.h b/gfx/skia/skia/src/sksl/ast/SkSLASTVarDeclaration.h
new file mode 100644
index 000000000..066922fb8
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTVarDeclaration.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTVARDECLARATIONS
+#define SKSL_ASTVARDECLARATIONS
+
+#include "SkSLASTDeclaration.h"
+#include "SkSLASTModifiers.h"
+#include "SkSLASTStatement.h"
+#include "SkSLASTType.h"
+#include "../SkSLUtil.h"
+
+namespace SkSL {
+
+/**
+ * A single variable declaration within a var declaration statement. For instance, the statement
+ * 'int x = 2, y[3];' is an ASTVarDeclarations statement containing two individual ASTVarDeclaration
+ * instances.
+ */
+struct ASTVarDeclaration {
+ ASTVarDeclaration(const std::string name,
+ std::vector<std::unique_ptr<ASTExpression>> sizes,
+ std::unique_ptr<ASTExpression> value)
+ : fName(name)
+ , fSizes(std::move(sizes))
+ , fValue(std::move(value)) {}
+
+ std::string description() const {
+ std::string result = fName;
+ for (const auto& size : fSizes) {
+ if (size) {
+ result += "[" + size->description() + "]";
+ } else {
+ result += "[]";
+ }
+ }
+ if (fValue) {
+ result += " = " + fValue->description();
+ }
+ return result;
+ }
+
+ std::string fName;
+
+ // array sizes, if any. e.g. 'foo[3][]' has sizes [3, null]
+ std::vector<std::unique_ptr<ASTExpression>> fSizes;
+
+ // initial value, may be null
+ std::unique_ptr<ASTExpression> fValue;
+};
+
+/**
+ * A variable declaration statement, which may consist of one or more individual variables.
+ */
+struct ASTVarDeclarations : public ASTDeclaration {
+ ASTVarDeclarations(ASTModifiers modifiers,
+ std::unique_ptr<ASTType> type,
+ std::vector<ASTVarDeclaration> vars)
+ : INHERITED(type->fPosition, kVar_Kind)
+ , fModifiers(modifiers)
+ , fType(std::move(type))
+ , fVars(std::move(vars)) {}
+
+ std::string description() const override {
+ std::string result = fModifiers.description() + fType->description() + " ";
+ std::string separator = "";
+ for (const auto& var : fVars) {
+ result += separator;
+ separator = ", ";
+ result += var.description();
+ }
+ return result;
+ }
+
+ const ASTModifiers fModifiers;
+ const std::unique_ptr<ASTType> fType;
+ const std::vector<ASTVarDeclaration> fVars;
+
+ typedef ASTDeclaration INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTVarDeclarationStatement.h b/gfx/skia/skia/src/sksl/ast/SkSLASTVarDeclarationStatement.h
new file mode 100644
index 000000000..8bae38914
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTVarDeclarationStatement.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTVARDECLARATIONSTATEMENT
+#define SKSL_ASTVARDECLARATIONSTATEMENT
+
+#include "SkSLASTStatement.h"
+#include "SkSLASTVarDeclaration.h"
+
+namespace SkSL {
+
+/**
+ * A variable declaration appearing as a statement within a function.
+ */
+struct ASTVarDeclarationStatement : public ASTStatement {
+ ASTVarDeclarationStatement(std::unique_ptr<ASTVarDeclarations> decl)
+ : INHERITED(decl->fPosition, kVarDeclaration_Kind)
+ , fDeclarations(std::move(decl)) {}
+
+ std::string description() const override {
+ return fDeclarations->description() + ";";
+ }
+
+ std::unique_ptr<ASTVarDeclarations> fDeclarations;
+
+ typedef ASTStatement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ast/SkSLASTWhileStatement.h b/gfx/skia/skia/src/sksl/ast/SkSLASTWhileStatement.h
new file mode 100644
index 000000000..e29aa23e4
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ast/SkSLASTWhileStatement.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ASTWHILESTATEMENT
+#define SKSL_ASTWHILESTATEMENT
+
+#include "SkSLASTStatement.h"
+
+namespace SkSL {
+
+/**
+ * A 'while' statement.
+ */
+struct ASTWhileStatement : public ASTStatement {
+ ASTWhileStatement(Position position, std::unique_ptr<ASTExpression> test,
+ std::unique_ptr<ASTStatement> statement)
+ : INHERITED(position, kWhile_Kind)
+ , fTest(std::move(test))
+ , fStatement(std::move(statement)) {}
+
+ std::string description() const override {
+ return "while (" + fTest->description() + ") " + fStatement->description();
+ }
+
+ const std::unique_ptr<ASTExpression> fTest;
+ const std::unique_ptr<ASTStatement> fStatement;
+
+ typedef ASTStatement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLBinaryExpression.h b/gfx/skia/skia/src/sksl/ir/SkSLBinaryExpression.h
new file mode 100644
index 000000000..9ecdbc717
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLBinaryExpression.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_BINARYEXPRESSION
+#define SKSL_BINARYEXPRESSION
+
+#include "SkSLExpression.h"
+#include "../SkSLToken.h"
+
+namespace SkSL {
+
+/**
+ * A binary operation.
+ */
+struct BinaryExpression : public Expression {
+ BinaryExpression(Position position, std::unique_ptr<Expression> left, Token::Kind op,
+ std::unique_ptr<Expression> right, const Type& type)
+ : INHERITED(position, kBinary_Kind, type)
+ , fLeft(std::move(left))
+ , fOperator(op)
+ , fRight(std::move(right)) {}
+
+ virtual std::string description() const override {
+ return "(" + fLeft->description() + " " + Token::OperatorName(fOperator) + " " +
+ fRight->description() + ")";
+ }
+
+ const std::unique_ptr<Expression> fLeft;
+ const Token::Kind fOperator;
+ const std::unique_ptr<Expression> fRight;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLBlock.h b/gfx/skia/skia/src/sksl/ir/SkSLBlock.h
new file mode 100644
index 000000000..a53d13d16
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLBlock.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_BLOCK
+#define SKSL_BLOCK
+
+#include "SkSLStatement.h"
+#include "SkSLSymbolTable.h"
+
+namespace SkSL {
+
+/**
+ * A block of multiple statements functioning as a single statement.
+ */
+struct Block : public Statement {
+ Block(Position position, std::vector<std::unique_ptr<Statement>> statements,
+ const std::shared_ptr<SymbolTable> symbols)
+ : INHERITED(position, kBlock_Kind)
+ , fStatements(std::move(statements))
+ , fSymbols(std::move(symbols)) {}
+
+ std::string description() const override {
+ std::string result = "{";
+ for (size_t i = 0; i < fStatements.size(); i++) {
+ result += "\n";
+ result += fStatements[i]->description();
+ }
+ result += "\n}\n";
+ return result;
+ }
+
+ const std::vector<std::unique_ptr<Statement>> fStatements;
+ const std::shared_ptr<SymbolTable> fSymbols;
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLBoolLiteral.h b/gfx/skia/skia/src/sksl/ir/SkSLBoolLiteral.h
new file mode 100644
index 000000000..ba054e418
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLBoolLiteral.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_BOOLLITERAL
+#define SKSL_BOOLLITERAL
+
+#include "SkSLContext.h"
+#include "SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * Represents 'true' or 'false'.
+ */
+struct BoolLiteral : public Expression {
+ BoolLiteral(const Context& context, Position position, bool value)
+ : INHERITED(position, kBoolLiteral_Kind, *context.fBool_Type)
+ , fValue(value) {}
+
+ std::string description() const override {
+ return fValue ? "true" : "false";
+ }
+
+ bool isConstant() const override {
+ return true;
+ }
+
+ const bool fValue;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLBreakStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLBreakStatement.h
new file mode 100644
index 000000000..8aa17b096
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLBreakStatement.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_BREAKSTATEMENT
+#define SKSL_BREAKSTATEMENT
+
+#include "SkSLExpression.h"
+#include "SkSLStatement.h"
+
+namespace SkSL {
+
+/**
+ * A 'break' statement.
+ */
+struct BreakStatement : public Statement {
+ BreakStatement(Position position)
+ : INHERITED(position, kBreak_Kind) {}
+
+ std::string description() const override {
+ return "break;";
+ }
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructor.h b/gfx/skia/skia/src/sksl/ir/SkSLConstructor.h
new file mode 100644
index 000000000..0501b651e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructor.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CONSTRUCTOR
+#define SKSL_CONSTRUCTOR
+
+#include "SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * Represents the construction of a compound type, such as "vec2(x, y)".
+ */
+struct Constructor : public Expression {
+ Constructor(Position position, const Type& type,
+ std::vector<std::unique_ptr<Expression>> arguments)
+ : INHERITED(position, kConstructor_Kind, type)
+ , fArguments(std::move(arguments)) {}
+
+ std::string description() const override {
+ std::string result = fType.description() + "(";
+ std::string separator = "";
+ for (size_t i = 0; i < fArguments.size(); i++) {
+ result += separator;
+ result += fArguments[i]->description();
+ separator = ", ";
+ }
+ result += ")";
+ return result;
+ }
+
+ bool isConstant() const override {
+ for (size_t i = 0; i < fArguments.size(); i++) {
+ if (!fArguments[i]->isConstant()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ const std::vector<std::unique_ptr<Expression>> fArguments;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLContinueStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLContinueStatement.h
new file mode 100644
index 000000000..1951bd990
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLContinueStatement.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CONTINUESTATEMENT
+#define SKSL_CONTINUESTATEMENT
+
+#include "SkSLExpression.h"
+#include "SkSLStatement.h"
+
+namespace SkSL {
+
+/**
+ * A 'continue' statement.
+ */
+struct ContinueStatement : public Statement {
+ ContinueStatement(Position position)
+ : INHERITED(position, kContinue_Kind) {}
+
+ std::string description() const override {
+ return "continue;";
+ }
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLDiscardStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLDiscardStatement.h
new file mode 100644
index 000000000..b39712ebd
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLDiscardStatement.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DISCARDSTATEMENT
+#define SKSL_DISCARDSTATEMENT
+
+#include "SkSLExpression.h"
+#include "SkSLStatement.h"
+
+namespace SkSL {
+
+/**
+ * A 'discard' statement.
+ */
+struct DiscardStatement : public Statement {
+ DiscardStatement(Position position)
+ : INHERITED(position, kDiscard_Kind) {}
+
+ std::string description() const override {
+ return "discard;";
+ }
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLDoStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLDoStatement.h
new file mode 100644
index 000000000..601245327
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLDoStatement.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DOSTATEMENT
+#define SKSL_DOSTATEMENT
+
+#include "SkSLExpression.h"
+#include "SkSLStatement.h"
+
+namespace SkSL {
+
+/**
+ * A 'do' statement.
+ */
+struct DoStatement : public Statement {
+ DoStatement(Position position, std::unique_ptr<Statement> statement,
+ std::unique_ptr<Expression> test)
+ : INHERITED(position, kDo_Kind)
+ , fStatement(std::move(statement))
+ , fTest(std::move(test)) {}
+
+ std::string description() const override {
+ return "do " + fStatement->description() + " while (" + fTest->description() + ");";
+ }
+
+ const std::unique_ptr<Statement> fStatement;
+ const std::unique_ptr<Expression> fTest;
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLExpression.h b/gfx/skia/skia/src/sksl/ir/SkSLExpression.h
new file mode 100644
index 000000000..92cb37de7
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLExpression.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_EXPRESSION
+#define SKSL_EXPRESSION
+
+#include "SkSLIRNode.h"
+#include "SkSLType.h"
+
+namespace SkSL {
+
+/**
+ * Abstract supertype of all expressions.
+ */
+struct Expression : public IRNode {
+ enum Kind {
+ kBinary_Kind,
+ kBoolLiteral_Kind,
+ kConstructor_Kind,
+ kIntLiteral_Kind,
+ kFieldAccess_Kind,
+ kFloatLiteral_Kind,
+ kFunctionReference_Kind,
+ kFunctionCall_Kind,
+ kIndex_Kind,
+ kPrefix_Kind,
+ kPostfix_Kind,
+ kSwizzle_Kind,
+ kVariableReference_Kind,
+ kTernary_Kind,
+ kTypeReference_Kind,
+ };
+
+ Expression(Position position, Kind kind, const Type& type)
+ : INHERITED(position)
+ , fKind(kind)
+ , fType(std::move(type)) {}
+
+ virtual bool isConstant() const {
+ return false;
+ }
+
+ const Kind fKind;
+ const Type& fType;
+
+ typedef IRNode INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLExpressionStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLExpressionStatement.h
new file mode 100644
index 000000000..e975ccf2a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLExpressionStatement.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_EXPRESSIONSTATEMENT
+#define SKSL_EXPRESSIONSTATEMENT
+
+#include "SkSLExpression.h"
+#include "SkSLStatement.h"
+
+namespace SkSL {
+
+/**
+ * A lone expression being used as a statement.
+ */
+struct ExpressionStatement : public Statement {
+ ExpressionStatement(std::unique_ptr<Expression> expression)
+ : INHERITED(expression->fPosition, kExpression_Kind)
+ , fExpression(std::move(expression)) {}
+
+ std::string description() const override {
+ return fExpression->description() + ";";
+ }
+
+ const std::unique_ptr<Expression> fExpression;
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLExtension.h b/gfx/skia/skia/src/sksl/ir/SkSLExtension.h
new file mode 100644
index 000000000..d7f83fad8
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLExtension.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_EXTENSION
+#define SKSL_EXTENSION
+
+#include "SkSLProgramElement.h"
+
+namespace SkSL {
+
+/**
+ * An extension declaration.
+ */
+struct Extension : public ProgramElement {
+ Extension(Position position, std::string name)
+ : INHERITED(position, kExtension_Kind)
+ , fName(std::move(name)) {}
+
+ std::string description() const override {
+ return "#extension " + fName + " : enable";
+ }
+
+ const std::string fName;
+
+ typedef ProgramElement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLField.h b/gfx/skia/skia/src/sksl/ir/SkSLField.h
new file mode 100644
index 000000000..a01df2943
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLField.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FIELD
+#define SKSL_FIELD
+
+#include "SkSLModifiers.h"
+#include "SkSLPosition.h"
+#include "SkSLSymbol.h"
+#include "SkSLType.h"
+
+namespace SkSL {
+
+/**
+ * A symbol which should be interpreted as a field access. Fields are added to the symboltable
+ * whenever a bare reference to an identifier should refer to a struct field; in GLSL, this is the
+ * result of declaring anonymous interface blocks.
+ */
+struct Field : public Symbol {
+ Field(Position position, const Variable& owner, int fieldIndex)
+ : INHERITED(position, kField_Kind, owner.fType.fields()[fieldIndex].fName)
+ , fOwner(owner)
+ , fFieldIndex(fieldIndex) {}
+
+ virtual std::string description() const override {
+ return fOwner.description() + "." + fOwner.fType.fields()[fFieldIndex].fName;
+ }
+
+ const Variable& fOwner;
+ const int fFieldIndex;
+
+ typedef Symbol INHERITED;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFieldAccess.h b/gfx/skia/skia/src/sksl/ir/SkSLFieldAccess.h
new file mode 100644
index 000000000..4be4e9e84
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFieldAccess.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FIELDACCESS
+#define SKSL_FIELDACCESS
+
+#include "SkSLExpression.h"
+#include "SkSLUtil.h"
+
+namespace SkSL {
+
+/**
+ * An expression which extracts a field from a struct, as in 'foo.bar'.
+ */
+struct FieldAccess : public Expression {
+ enum OwnerKind {
+ kDefault_OwnerKind,
+ // this field access is to a field of an anonymous interface block (and thus, the field name
+ // is actually in global scope, so only the field name needs to be written in GLSL)
+ kAnonymousInterfaceBlock_OwnerKind
+ };
+
+ FieldAccess(std::unique_ptr<Expression> base, int fieldIndex,
+ OwnerKind ownerKind = kDefault_OwnerKind)
+ : INHERITED(base->fPosition, kFieldAccess_Kind, *base->fType.fields()[fieldIndex].fType)
+ , fBase(std::move(base))
+ , fFieldIndex(fieldIndex)
+ , fOwnerKind(ownerKind) {}
+
+ virtual std::string description() const override {
+ return fBase->description() + "." + fBase->fType.fields()[fFieldIndex].fName;
+ }
+
+ const std::unique_ptr<Expression> fBase;
+ const int fFieldIndex;
+ const OwnerKind fOwnerKind;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFloatLiteral.h b/gfx/skia/skia/src/sksl/ir/SkSLFloatLiteral.h
new file mode 100644
index 000000000..a8fcfcf64
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFloatLiteral.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FLOATLITERAL
+#define SKSL_FLOATLITERAL
+
+#include "SkSLContext.h"
+#include "SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * A literal floating point number.
+ */
+struct FloatLiteral : public Expression {
+ FloatLiteral(const Context& context, Position position, double value)
+ : INHERITED(position, kFloatLiteral_Kind, *context.fFloat_Type)
+ , fValue(value) {}
+
+ virtual std::string description() const override {
+ return to_string(fValue);
+ }
+
+ bool isConstant() const override {
+ return true;
+ }
+
+ const double fValue;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLForStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLForStatement.h
new file mode 100644
index 000000000..642d15125
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLForStatement.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FORSTATEMENT
+#define SKSL_FORSTATEMENT
+
+#include "SkSLExpression.h"
+#include "SkSLStatement.h"
+#include "SkSLSymbolTable.h"
+
+namespace SkSL {
+
+/**
+ * A 'for' statement.
+ */
+struct ForStatement : public Statement {
+ ForStatement(Position position, std::unique_ptr<Statement> initializer,
+ std::unique_ptr<Expression> test, std::unique_ptr<Expression> next,
+ std::unique_ptr<Statement> statement, std::shared_ptr<SymbolTable> symbols)
+ : INHERITED(position, kFor_Kind)
+ , fInitializer(std::move(initializer))
+ , fTest(std::move(test))
+ , fNext(std::move(next))
+ , fStatement(std::move(statement))
+ , fSymbols(symbols) {}
+
+ std::string description() const override {
+ std::string result = "for (";
+ if (fInitializer) {
+ result += fInitializer->description();
+ }
+ result += " ";
+ if (fTest) {
+ result += fTest->description();
+ }
+ result += "; ";
+ if (fNext) {
+ result += fNext->description();
+ }
+ result += ") " + fStatement->description();
+ return result;
+ }
+
+ const std::unique_ptr<Statement> fInitializer;
+ const std::unique_ptr<Expression> fTest;
+ const std::unique_ptr<Expression> fNext;
+ const std::unique_ptr<Statement> fStatement;
+ const std::shared_ptr<SymbolTable> fSymbols;
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFunctionCall.h b/gfx/skia/skia/src/sksl/ir/SkSLFunctionCall.h
new file mode 100644
index 000000000..85dba40f2
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFunctionCall.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FUNCTIONCALL
+#define SKSL_FUNCTIONCALL
+
+#include "SkSLExpression.h"
+#include "SkSLFunctionDeclaration.h"
+
+namespace SkSL {
+
+/**
+ * A function invocation.
+ */
+struct FunctionCall : public Expression {
+ FunctionCall(Position position, const FunctionDeclaration& function,
+ std::vector<std::unique_ptr<Expression>> arguments)
+ : INHERITED(position, kFunctionCall_Kind, function.fReturnType)
+ , fFunction(std::move(function))
+ , fArguments(std::move(arguments)) {}
+
+ std::string description() const override {
+ std::string result = fFunction.fName + "(";
+ std::string separator = "";
+ for (size_t i = 0; i < fArguments.size(); i++) {
+ result += separator;
+ result += fArguments[i]->description();
+ separator = ", ";
+ }
+ result += ")";
+ return result;
+ }
+
+ const FunctionDeclaration& fFunction;
+ const std::vector<std::unique_ptr<Expression>> fArguments;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFunctionDeclaration.h b/gfx/skia/skia/src/sksl/ir/SkSLFunctionDeclaration.h
new file mode 100644
index 000000000..16a184a6d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFunctionDeclaration.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FUNCTIONDECLARATION
+#define SKSL_FUNCTIONDECLARATION
+
+#include "SkSLModifiers.h"
+#include "SkSLSymbol.h"
+#include "SkSLSymbolTable.h"
+#include "SkSLType.h"
+#include "SkSLVariable.h"
+
+namespace SkSL {
+
+/**
+ * A function declaration (not a definition -- does not contain a body).
+ */
+struct FunctionDeclaration : public Symbol {
+ FunctionDeclaration(Position position, std::string name,
+ std::vector<const Variable*> parameters, const Type& returnType)
+ : INHERITED(position, kFunctionDeclaration_Kind, std::move(name))
+ , fDefined(false)
+ , fParameters(std::move(parameters))
+ , fReturnType(returnType) {}
+
+ std::string description() const override {
+ std::string result = fReturnType.description() + " " + fName + "(";
+ std::string separator = "";
+ for (auto p : fParameters) {
+ result += separator;
+ separator = ", ";
+ result += p->description();
+ }
+ result += ")";
+ return result;
+ }
+
+ bool matches(const FunctionDeclaration& f) const {
+ if (fName != f.fName) {
+ return false;
+ }
+ if (fParameters.size() != f.fParameters.size()) {
+ return false;
+ }
+ for (size_t i = 0; i < fParameters.size(); i++) {
+ if (fParameters[i]->fType != f.fParameters[i]->fType) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ mutable bool fDefined;
+ const std::vector<const Variable*> fParameters;
+ const Type& fReturnType;
+
+ typedef Symbol INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFunctionDefinition.h b/gfx/skia/skia/src/sksl/ir/SkSLFunctionDefinition.h
new file mode 100644
index 000000000..ace27a3ed
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFunctionDefinition.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FUNCTIONDEFINITION
+#define SKSL_FUNCTIONDEFINITION
+
+#include "SkSLBlock.h"
+#include "SkSLFunctionDeclaration.h"
+#include "SkSLProgramElement.h"
+
+namespace SkSL {
+
+/**
+ * A function definition (a declaration plus an associated block of code).
+ */
+struct FunctionDefinition : public ProgramElement {
+ FunctionDefinition(Position position, const FunctionDeclaration& declaration,
+ std::unique_ptr<Block> body)
+ : INHERITED(position, kFunction_Kind)
+ , fDeclaration(declaration)
+ , fBody(std::move(body)) {}
+
+ std::string description() const override {
+ return fDeclaration.description() + " " + fBody->description();
+ }
+
+ const FunctionDeclaration& fDeclaration;
+ const std::unique_ptr<Block> fBody;
+
+ typedef ProgramElement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFunctionReference.h b/gfx/skia/skia/src/sksl/ir/SkSLFunctionReference.h
new file mode 100644
index 000000000..f3f8fb71d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFunctionReference.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FUNCTIONREFERENCE
+#define SKSL_FUNCTIONREFERENCE
+
+#include "SkSLContext.h"
+#include "SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * An identifier referring to a function name. This is an intermediate value: FunctionReferences are
+ * always eventually replaced by FunctionCalls in valid programs.
+ */
+struct FunctionReference : public Expression {
+ FunctionReference(const Context& context, Position position,
+ std::vector<const FunctionDeclaration*> function)
+ : INHERITED(position, kFunctionReference_Kind, *context.fInvalid_Type)
+ , fFunctions(function) {}
+
+ virtual std::string description() const override {
+ ASSERT(false);
+ return "<function>";
+ }
+
+ const std::vector<const FunctionDeclaration*> fFunctions;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLIRNode.h b/gfx/skia/skia/src/sksl/ir/SkSLIRNode.h
new file mode 100644
index 000000000..8c433cfc6
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLIRNode.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_IRNODE
+#define SKSL_IRNODE
+
+#include "../SkSLPosition.h"
+
+namespace SkSL {
+
+/**
+ * Represents a node in the intermediate representation (IR) tree. The IR is a fully-resolved
+ * version of the program (all types determined, everything validated), ready for code generation.
+ */
+struct IRNode {
+ IRNode(Position position)
+ : fPosition(position) {}
+
+ virtual ~IRNode() {}
+
+ virtual std::string description() const = 0;
+
+ const Position fPosition;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLIfStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLIfStatement.h
new file mode 100644
index 000000000..8ab5c00fd
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLIfStatement.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_IFSTATEMENT
+#define SKSL_IFSTATEMENT
+
+#include "SkSLExpression.h"
+#include "SkSLStatement.h"
+
+namespace SkSL {
+
+/**
+ * An 'if' statement.
+ */
+struct IfStatement : public Statement {
+ IfStatement(Position position, std::unique_ptr<Expression> test,
+ std::unique_ptr<Statement> ifTrue, std::unique_ptr<Statement> ifFalse)
+ : INHERITED(position, kIf_Kind)
+ , fTest(std::move(test))
+ , fIfTrue(std::move(ifTrue))
+ , fIfFalse(std::move(ifFalse)) {}
+
+ std::string description() const override {
+ std::string result = "if (" + fTest->description() + ") " + fIfTrue->description();
+ if (fIfFalse) {
+ result += " else " + fIfFalse->description();
+ }
+ return result;
+ }
+
+ const std::unique_ptr<Expression> fTest;
+ const std::unique_ptr<Statement> fIfTrue;
+ const std::unique_ptr<Statement> fIfFalse;
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLIndexExpression.h b/gfx/skia/skia/src/sksl/ir/SkSLIndexExpression.h
new file mode 100644
index 000000000..f5b0d09c2
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLIndexExpression.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_INDEX
+#define SKSL_INDEX
+
+#include "SkSLExpression.h"
+#include "SkSLUtil.h"
+
+namespace SkSL {
+
+/**
+ * Given a type, returns the type that will result from extracting an array value from it.
+ */
+static const Type& index_type(const Context& context, const Type& type) {
+ if (type.kind() == Type::kMatrix_Kind) {
+ if (type.componentType() == *context.fFloat_Type) {
+ switch (type.columns()) {
+ case 2: return *context.fVec2_Type;
+ case 3: return *context.fVec3_Type;
+ case 4: return *context.fVec4_Type;
+ default: ASSERT(false);
+ }
+ } else {
+ ASSERT(type.componentType() == *context.fDouble_Type);
+ switch (type.columns()) {
+ case 2: return *context.fDVec2_Type;
+ case 3: return *context.fDVec3_Type;
+ case 4: return *context.fDVec4_Type;
+ default: ASSERT(false);
+ }
+ }
+ }
+ return type.componentType();
+}
+
+/**
+ * An expression which extracts a value from an array or matrix, as in 'm[2]'.
+ */
+struct IndexExpression : public Expression {
+ IndexExpression(const Context& context, std::unique_ptr<Expression> base,
+ std::unique_ptr<Expression> index)
+ : INHERITED(base->fPosition, kIndex_Kind, index_type(context, base->fType))
+ , fBase(std::move(base))
+ , fIndex(std::move(index)) {
+ ASSERT(fIndex->fType == *context.fInt_Type);
+ }
+
+ std::string description() const override {
+ return fBase->description() + "[" + fIndex->description() + "]";
+ }
+
+ const std::unique_ptr<Expression> fBase;
+ const std::unique_ptr<Expression> fIndex;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLIntLiteral.h b/gfx/skia/skia/src/sksl/ir/SkSLIntLiteral.h
new file mode 100644
index 000000000..2bc565712
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLIntLiteral.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_INTLITERAL
+#define SKSL_INTLITERAL
+
+#include "SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * A literal integer.
+ */
+struct IntLiteral : public Expression {
+ // FIXME: we will need to revisit this if/when we add full support for both signed and unsigned
+ // 64-bit integers, but for right now an int64_t will hold every value we care about
+ IntLiteral(const Context& context, Position position, int64_t value)
+ : INHERITED(position, kIntLiteral_Kind, *context.fInt_Type)
+ , fValue(value) {}
+
+ virtual std::string description() const override {
+ return to_string(fValue);
+ }
+
+ bool isConstant() const override {
+ return true;
+ }
+
+ const int64_t fValue;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLInterfaceBlock.h b/gfx/skia/skia/src/sksl/ir/SkSLInterfaceBlock.h
new file mode 100644
index 000000000..f1121ed70
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLInterfaceBlock.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_INTERFACEBLOCK
+#define SKSL_INTERFACEBLOCK
+
+#include "SkSLProgramElement.h"
+#include "SkSLVarDeclaration.h"
+
+namespace SkSL {
+
+/**
+ * An interface block, as in:
+ *
+ * out gl_PerVertex {
+ * layout(builtin=0) vec4 gl_Position;
+ * layout(builtin=1) float gl_PointSize;
+ * };
+ *
+ * At the IR level, this is represented by a single variable of struct type.
+ */
+struct InterfaceBlock : public ProgramElement {
+ InterfaceBlock(Position position, const Variable& var, std::shared_ptr<SymbolTable> typeOwner)
+ : INHERITED(position, kInterfaceBlock_Kind)
+ , fVariable(std::move(var))
+ , fTypeOwner(typeOwner) {
+ ASSERT(fVariable.fType.kind() == Type::kStruct_Kind);
+ }
+
+ std::string description() const override {
+ std::string result = fVariable.fModifiers.description() + fVariable.fName + " {\n";
+ for (size_t i = 0; i < fVariable.fType.fields().size(); i++) {
+ result += fVariable.fType.fields()[i].description() + "\n";
+ }
+ result += "};";
+ return result;
+ }
+
+ const Variable& fVariable;
+ const std::shared_ptr<SymbolTable> fTypeOwner;
+
+ typedef ProgramElement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLLayout.h b/gfx/skia/skia/src/sksl/ir/SkSLLayout.h
new file mode 100644
index 000000000..d8dc98096
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLLayout.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_LAYOUT
+#define SKSL_LAYOUT
+
+namespace SkSL {
+
+/**
+ * Represents a layout block appearing before a variable declaration, as in:
+ *
+ * layout (location = 0) int x;
+ */
+struct Layout {
+ Layout(const ASTLayout& layout)
+ : fLocation(layout.fLocation)
+ , fBinding(layout.fBinding)
+ , fIndex(layout.fIndex)
+ , fSet(layout.fSet)
+ , fBuiltin(layout.fBuiltin)
+ , fOriginUpperLeft(layout.fOriginUpperLeft) {}
+
+ Layout(int location, int binding, int index, int set, int builtin, bool originUpperLeft)
+ : fLocation(location)
+ , fBinding(binding)
+ , fIndex(index)
+ , fSet(set)
+ , fBuiltin(builtin)
+ , fOriginUpperLeft(originUpperLeft) {}
+
+ std::string description() const {
+ std::string result;
+ std::string separator;
+ if (fLocation >= 0) {
+ result += separator + "location = " + to_string(fLocation);
+ separator = ", ";
+ }
+ if (fBinding >= 0) {
+ result += separator + "binding = " + to_string(fBinding);
+ separator = ", ";
+ }
+ if (fIndex >= 0) {
+ result += separator + "index = " + to_string(fIndex);
+ separator = ", ";
+ }
+ if (fSet >= 0) {
+ result += separator + "set = " + to_string(fSet);
+ separator = ", ";
+ }
+ if (fBuiltin >= 0) {
+ result += separator + "builtin = " + to_string(fBuiltin);
+ separator = ", ";
+ }
+ if (fOriginUpperLeft) {
+ result += separator + "origin_upper_left";
+ separator = ", ";
+ }
+ if (result.length() > 0) {
+ result = "layout (" + result + ")";
+ }
+ return result;
+ }
+
+ bool operator==(const Layout& other) const {
+ return fLocation == other.fLocation &&
+ fBinding == other.fBinding &&
+ fIndex == other.fIndex &&
+ fSet == other.fSet &&
+ fBuiltin == other.fBuiltin;
+ }
+
+ bool operator!=(const Layout& other) const {
+ return !(*this == other);
+ }
+
+ // everything but builtin is in the GLSL spec; builtin comes from SPIR-V and identifies which
+ // particular builtin value this object represents.
+ int fLocation;
+ int fBinding;
+ int fIndex;
+ int fSet;
+ int fBuiltin;
+ bool fOriginUpperLeft;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLModifiers.h b/gfx/skia/skia/src/sksl/ir/SkSLModifiers.h
new file mode 100644
index 000000000..f39e92959
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLModifiers.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_MODIFIERS
+#define SKSL_MODIFIERS
+
+#include "../ast/SkSLASTModifiers.h"
+#include "SkSLLayout.h"
+
+namespace SkSL {
+
+/**
+ * A set of modifier keywords (in, out, uniform, etc.) appearing before a declaration.
+ */
+struct Modifiers {
+ enum Flag {
+ kNo_Flag = ASTModifiers::kNo_Flag,
+ kConst_Flag = ASTModifiers::kConst_Flag,
+ kIn_Flag = ASTModifiers::kIn_Flag,
+ kOut_Flag = ASTModifiers::kOut_Flag,
+ kLowp_Flag = ASTModifiers::kLowp_Flag,
+ kMediump_Flag = ASTModifiers::kMediump_Flag,
+ kHighp_Flag = ASTModifiers::kHighp_Flag,
+ kUniform_Flag = ASTModifiers::kUniform_Flag,
+ kFlat_Flag = ASTModifiers::kFlat_Flag,
+ kNoPerspective_Flag = ASTModifiers::kNoPerspective_Flag
+ };
+
+ Modifiers(const ASTModifiers& modifiers)
+ : fLayout(modifiers.fLayout)
+ , fFlags(modifiers.fFlags) {}
+
+ Modifiers(Layout& layout, int flags)
+ : fLayout(layout)
+ , fFlags(flags) {}
+
+ std::string description() const {
+ std::string result = fLayout.description();
+ if (fFlags & kUniform_Flag) {
+ result += "uniform ";
+ }
+ if (fFlags & kConst_Flag) {
+ result += "const ";
+ }
+ if (fFlags & kLowp_Flag) {
+ result += "lowp ";
+ }
+ if (fFlags & kMediump_Flag) {
+ result += "mediump ";
+ }
+ if (fFlags & kHighp_Flag) {
+ result += "highp ";
+ }
+ if (fFlags & kFlat_Flag) {
+ result += "flat ";
+ }
+ if (fFlags & kNoPerspective_Flag) {
+ result += "noperspective ";
+ }
+
+ if ((fFlags & kIn_Flag) && (fFlags & kOut_Flag)) {
+ result += "inout ";
+ } else if (fFlags & kIn_Flag) {
+ result += "in ";
+ } else if (fFlags & kOut_Flag) {
+ result += "out ";
+ }
+
+ return result;
+ }
+
+ bool operator==(const Modifiers& other) const {
+ return fLayout == other.fLayout && fFlags == other.fFlags;
+ }
+
+ bool operator!=(const Modifiers& other) const {
+ return !(*this == other);
+ }
+
+ Layout fLayout;
+ int fFlags;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLPostfixExpression.h b/gfx/skia/skia/src/sksl/ir/SkSLPostfixExpression.h
new file mode 100644
index 000000000..de146ac43
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLPostfixExpression.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_POSTFIXEXPRESSION
+#define SKSL_POSTFIXEXPRESSION
+
+#include "SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * An expression modified by a unary operator appearing after it, such as 'i++'.
+ */
+struct PostfixExpression : public Expression {
+ PostfixExpression(std::unique_ptr<Expression> operand, Token::Kind op)
+ : INHERITED(operand->fPosition, kPostfix_Kind, operand->fType)
+ , fOperand(std::move(operand))
+ , fOperator(op) {}
+
+ virtual std::string description() const override {
+ return fOperand->description() + Token::OperatorName(fOperator);
+ }
+
+ const std::unique_ptr<Expression> fOperand;
+ const Token::Kind fOperator;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLPrefixExpression.h b/gfx/skia/skia/src/sksl/ir/SkSLPrefixExpression.h
new file mode 100644
index 000000000..53c3849b3
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLPrefixExpression.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_PREFIXEXPRESSION
+#define SKSL_PREFIXEXPRESSION
+
+#include "SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * An expression modified by a unary operator appearing before it, such as '!flag'.
+ */
+struct PrefixExpression : public Expression {
+ PrefixExpression(Token::Kind op, std::unique_ptr<Expression> operand)
+ : INHERITED(operand->fPosition, kPrefix_Kind, operand->fType)
+ , fOperand(std::move(operand))
+ , fOperator(op) {}
+
+ virtual std::string description() const override {
+ return Token::OperatorName(fOperator) + fOperand->description();
+ }
+
+ const std::unique_ptr<Expression> fOperand;
+ const Token::Kind fOperator;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLProgram.h b/gfx/skia/skia/src/sksl/ir/SkSLProgram.h
new file mode 100644
index 000000000..205db6e93
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLProgram.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_PROGRAM
+#define SKSL_PROGRAM
+
+#include <vector>
+#include <memory>
+
+#include "SkSLProgramElement.h"
+#include "SkSLSymbolTable.h"
+
+namespace SkSL {
+
+/**
+ * Represents a fully-digested program, ready for code generation.
+ */
+struct Program {
+ enum Kind {
+ kFragment_Kind,
+ kVertex_Kind
+ };
+
+ Program(Kind kind, std::vector<std::unique_ptr<ProgramElement>> elements,
+ std::shared_ptr<SymbolTable> symbols)
+ : fKind(kind)
+ , fElements(std::move(elements))
+ , fSymbols(symbols) {}
+
+ Kind fKind;
+
+ std::vector<std::unique_ptr<ProgramElement>> fElements;
+ std::shared_ptr<SymbolTable> fSymbols;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLProgramElement.h b/gfx/skia/skia/src/sksl/ir/SkSLProgramElement.h
new file mode 100644
index 000000000..44fc34066
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLProgramElement.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_PROGRAMELEMENT
+#define SKSL_PROGRAMELEMENT
+
+#include "SkSLIRNode.h"
+
+namespace SkSL {
+
+/**
+ * Represents a top-level element (e.g. function or global variable) in a program.
+ */
+struct ProgramElement : public IRNode {
+ enum Kind {
+ kVar_Kind,
+ kFunction_Kind,
+ kInterfaceBlock_Kind,
+ kExtension_Kind
+ };
+
+ ProgramElement(Position position, Kind kind)
+ : INHERITED(position)
+ , fKind(kind) {}
+
+ Kind fKind;
+
+ typedef IRNode INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLReturnStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLReturnStatement.h
new file mode 100644
index 000000000..ec2226cc5
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLReturnStatement.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_RETURNSTATEMENT
+#define SKSL_RETURNSTATEMENT
+
+#include "SkSLExpression.h"
+#include "SkSLStatement.h"
+
+namespace SkSL {
+
+/**
+ * A 'return' statement.
+ */
+struct ReturnStatement : public Statement {
+ ReturnStatement(Position position)
+ : INHERITED(position, kReturn_Kind) {}
+
+ ReturnStatement(std::unique_ptr<Expression> expression)
+ : INHERITED(expression->fPosition, kReturn_Kind)
+ , fExpression(std::move(expression)) {}
+
+ std::string description() const override {
+ if (fExpression) {
+ return "return " + fExpression->description() + ";";
+ } else {
+ return "return;";
+ }
+ }
+
+ const std::unique_ptr<Expression> fExpression;
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLStatement.h
new file mode 100644
index 000000000..012311fdd
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLStatement.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_STATEMENT
+#define SKSL_STATEMENT
+
+#include "SkSLIRNode.h"
+#include "SkSLType.h"
+
+namespace SkSL {
+
+/**
+ * Abstract supertype of all statements.
+ */
+struct Statement : public IRNode {
+ enum Kind {
+ kBlock_Kind,
+ kBreak_Kind,
+ kContinue_Kind,
+ kDiscard_Kind,
+ kDo_Kind,
+ kExpression_Kind,
+ kFor_Kind,
+ kIf_Kind,
+ kReturn_Kind,
+ kVarDeclarations_Kind,
+ kWhile_Kind
+ };
+
+ Statement(Position position, Kind kind)
+ : INHERITED(position)
+ , fKind(kind) {}
+
+ const Kind fKind;
+
+ typedef IRNode INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSwizzle.h b/gfx/skia/skia/src/sksl/ir/SkSLSwizzle.h
new file mode 100644
index 000000000..0eb4a00dc
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSwizzle.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SWIZZLE
+#define SKSL_SWIZZLE
+
+#include "SkSLExpression.h"
+#include "SkSLUtil.h"
+
+namespace SkSL {
+
+/**
+ * Given a type and a swizzle component count, returns the type that will result from swizzling. For
+ * instance, swizzling a vec3 with two components will result in a vec2. It is possible to swizzle
+ * with more components than the source vector, as in 'vec2(1).xxxx'.
+ */
+static const Type& get_type(const Context& context, Expression& value, size_t count) {
+ const Type& base = value.fType.componentType();
+ if (count == 1) {
+ return base;
+ }
+ if (base == *context.fFloat_Type) {
+ switch (count) {
+ case 2: return *context.fVec2_Type;
+ case 3: return *context.fVec3_Type;
+ case 4: return *context.fVec4_Type;
+ }
+ } else if (base == *context.fDouble_Type) {
+ switch (count) {
+ case 2: return *context.fDVec2_Type;
+ case 3: return *context.fDVec3_Type;
+ case 4: return *context.fDVec4_Type;
+ }
+ } else if (base == *context.fInt_Type) {
+ switch (count) {
+ case 2: return *context.fIVec2_Type;
+ case 3: return *context.fIVec3_Type;
+ case 4: return *context.fIVec4_Type;
+ }
+ } else if (base == *context.fUInt_Type) {
+ switch (count) {
+ case 2: return *context.fUVec2_Type;
+ case 3: return *context.fUVec3_Type;
+ case 4: return *context.fUVec4_Type;
+ }
+ } else if (base == *context.fBool_Type) {
+ switch (count) {
+ case 2: return *context.fBVec2_Type;
+ case 3: return *context.fBVec3_Type;
+ case 4: return *context.fBVec4_Type;
+ }
+ }
+ ABORT("cannot swizzle %s\n", value.description().c_str());
+}
+
+/**
+ * Represents a vector swizzle operation such as 'vec2(1, 2, 3).zyx'.
+ */
+struct Swizzle : public Expression {
+ Swizzle(const Context& context, std::unique_ptr<Expression> base, std::vector<int> components)
+ : INHERITED(base->fPosition, kSwizzle_Kind, get_type(context, *base, components.size()))
+ , fBase(std::move(base))
+ , fComponents(std::move(components)) {
+ ASSERT(fComponents.size() >= 1 && fComponents.size() <= 4);
+ }
+
+ std::string description() const override {
+ std::string result = fBase->description() + ".";
+ for (int x : fComponents) {
+ result += "xyzw"[x];
+ }
+ return result;
+ }
+
+ const std::unique_ptr<Expression> fBase;
+ const std::vector<int> fComponents;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSymbol.h b/gfx/skia/skia/src/sksl/ir/SkSLSymbol.h
new file mode 100644
index 000000000..d736516bc
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSymbol.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SYMBOL
+#define SKSL_SYMBOL
+
+#include "SkSLIRNode.h"
+
+namespace SkSL {
+
+/**
+ * Represents a symboltable entry.
+ */
+struct Symbol : public IRNode {
+ enum Kind {
+ kFunctionDeclaration_Kind,
+ kUnresolvedFunction_Kind,
+ kType_Kind,
+ kVariable_Kind,
+ kField_Kind
+ };
+
+ Symbol(Position position, Kind kind, std::string name)
+ : INHERITED(position)
+ , fKind(kind)
+ , fName(std::move(name)) {}
+
+ const Kind fKind;
+ const std::string fName;
+
+ typedef IRNode INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.cpp b/gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.cpp
new file mode 100644
index 000000000..9d8c0063c
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkSLSymbolTable.h"
+#include "SkSLUnresolvedFunction.h"
+
+namespace SkSL {
+
+std::vector<const FunctionDeclaration*> SymbolTable::GetFunctions(const Symbol& s) {
+ switch (s.fKind) {
+ case Symbol::kFunctionDeclaration_Kind:
+ return { &((FunctionDeclaration&) s) };
+ case Symbol::kUnresolvedFunction_Kind:
+ return ((UnresolvedFunction&) s).fFunctions;
+ default:
+ return std::vector<const FunctionDeclaration*>();
+ }
+}
+
+const Symbol* SymbolTable::operator[](const std::string& name) {
+ const auto& entry = fSymbols.find(name);
+ if (entry == fSymbols.end()) {
+ if (fParent) {
+ return (*fParent)[name];
+ }
+ return nullptr;
+ }
+ if (fParent) {
+ auto functions = GetFunctions(*entry->second);
+ if (functions.size() > 0) {
+ bool modified = false;
+ const Symbol* previous = (*fParent)[name];
+ if (previous) {
+ auto previousFunctions = GetFunctions(*previous);
+ for (const FunctionDeclaration* prev : previousFunctions) {
+ bool found = false;
+ for (const FunctionDeclaration* current : functions) {
+ if (current->matches(*prev)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ functions.push_back(prev);
+ modified = true;
+ }
+ }
+ if (modified) {
+ ASSERT(functions.size() > 1);
+ return this->takeOwnership(new UnresolvedFunction(functions));
+ }
+ }
+ }
+ }
+ return entry->second;
+}
+
+Symbol* SymbolTable::takeOwnership(Symbol* s) {
+ fOwnedPointers.push_back(std::unique_ptr<Symbol>(s));
+ return s;
+}
+
+void SymbolTable::add(const std::string& name, std::unique_ptr<Symbol> symbol) {
+ this->addWithoutOwnership(name, symbol.get());
+ fOwnedPointers.push_back(std::move(symbol));
+}
+
+void SymbolTable::addWithoutOwnership(const std::string& name, const Symbol* symbol) {
+ const auto& existing = fSymbols.find(name);
+ if (existing == fSymbols.end()) {
+ fSymbols[name] = symbol;
+ } else if (symbol->fKind == Symbol::kFunctionDeclaration_Kind) {
+ const Symbol* oldSymbol = existing->second;
+ if (oldSymbol->fKind == Symbol::kFunctionDeclaration_Kind) {
+ std::vector<const FunctionDeclaration*> functions;
+ functions.push_back((const FunctionDeclaration*) oldSymbol);
+ functions.push_back((const FunctionDeclaration*) symbol);
+ UnresolvedFunction* u = new UnresolvedFunction(std::move(functions));
+ fSymbols[name] = u;
+ this->takeOwnership(u);
+ } else if (oldSymbol->fKind == Symbol::kUnresolvedFunction_Kind) {
+ std::vector<const FunctionDeclaration*> functions;
+ for (const auto* f : ((UnresolvedFunction&) *oldSymbol).fFunctions) {
+ functions.push_back(f);
+ }
+ functions.push_back((const FunctionDeclaration*) symbol);
+ UnresolvedFunction* u = new UnresolvedFunction(std::move(functions));
+ fSymbols[name] = u;
+ this->takeOwnership(u);
+ }
+ } else {
+ fErrorReporter.error(symbol->fPosition, "symbol '" + name + "' was already defined");
+ }
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.h b/gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.h
new file mode 100644
index 000000000..d732023ff
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SYMBOLTABLE
+#define SKSL_SYMBOLTABLE
+
+#include <memory>
+#include <unordered_map>
+#include <vector>
+#include "SkSLErrorReporter.h"
+#include "SkSLSymbol.h"
+
+namespace SkSL {
+
+struct FunctionDeclaration;
+
+/**
+ * Maps identifiers to symbols. Functions, in particular, are mapped to either FunctionDeclaration
+ * or UnresolvedFunction depending on whether they are overloaded or not.
+ */
+class SymbolTable {
+public:
+ SymbolTable(ErrorReporter& errorReporter)
+ : fErrorReporter(errorReporter) {}
+
+ SymbolTable(std::shared_ptr<SymbolTable> parent, ErrorReporter& errorReporter)
+ : fParent(parent)
+ , fErrorReporter(errorReporter) {}
+
+ const Symbol* operator[](const std::string& name);
+
+ void add(const std::string& name, std::unique_ptr<Symbol> symbol);
+
+ void addWithoutOwnership(const std::string& name, const Symbol* symbol);
+
+ Symbol* takeOwnership(Symbol* s);
+
+ const std::shared_ptr<SymbolTable> fParent;
+
+private:
+ static std::vector<const FunctionDeclaration*> GetFunctions(const Symbol& s);
+
+ std::vector<std::unique_ptr<Symbol>> fOwnedPointers;
+
+ std::unordered_map<std::string, const Symbol*> fSymbols;
+
+ ErrorReporter& fErrorReporter;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLTernaryExpression.h b/gfx/skia/skia/src/sksl/ir/SkSLTernaryExpression.h
new file mode 100644
index 000000000..bfaf304e5
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLTernaryExpression.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_TERNARYEXPRESSION
+#define SKSL_TERNARYEXPRESSION
+
+#include "SkSLExpression.h"
+#include "../SkSLPosition.h"
+
+namespace SkSL {
+
+/**
+ * A ternary expression (test ? ifTrue : ifFalse).
+ */
+struct TernaryExpression : public Expression {
+ TernaryExpression(Position position, std::unique_ptr<Expression> test,
+ std::unique_ptr<Expression> ifTrue, std::unique_ptr<Expression> ifFalse)
+ : INHERITED(position, kTernary_Kind, ifTrue->fType)
+ , fTest(std::move(test))
+ , fIfTrue(std::move(ifTrue))
+ , fIfFalse(std::move(ifFalse)) {
+ ASSERT(fIfTrue->fType == fIfFalse->fType);
+ }
+
+ std::string description() const override {
+ return "(" + fTest->description() + " ? " + fIfTrue->description() + " : " +
+ fIfFalse->description() + ")";
+ }
+
+ const std::unique_ptr<Expression> fTest;
+ const std::unique_ptr<Expression> fIfTrue;
+ const std::unique_ptr<Expression> fIfFalse;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLType.cpp b/gfx/skia/skia/src/sksl/ir/SkSLType.cpp
new file mode 100644
index 000000000..d28c4f066
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLType.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkSLType.h"
+#include "SkSLContext.h"
+
+namespace SkSL {
+
+bool Type::determineCoercionCost(const Type& other, int* outCost) const {
+ if (*this == other) {
+ *outCost = 0;
+ return true;
+ }
+ if (this->kind() == kVector_Kind && other.kind() == kVector_Kind) {
+ if (this->columns() == other.columns()) {
+ return this->componentType().determineCoercionCost(other.componentType(), outCost);
+ }
+ return false;
+ }
+ if (this->kind() == kMatrix_Kind) {
+ if (this->columns() == other.columns() &&
+ this->rows() == other.rows()) {
+ return this->componentType().determineCoercionCost(other.componentType(), outCost);
+ }
+ return false;
+ }
+ for (size_t i = 0; i < fCoercibleTypes.size(); i++) {
+ if (*fCoercibleTypes[i] == other) {
+ *outCost = (int) i + 1;
+ return true;
+ }
+ }
+ return false;
+}
+
+const Type& Type::toCompound(const Context& context, int columns, int rows) const {
+ ASSERT(this->kind() == Type::kScalar_Kind);
+ if (columns == 1 && rows == 1) {
+ return *this;
+ }
+ if (*this == *context.fFloat_Type) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 2: return *context.fVec2_Type;
+ case 3: return *context.fVec3_Type;
+ case 4: return *context.fVec4_Type;
+ default: ABORT("unsupported vector column count (%d)", columns);
+ }
+ case 2:
+ switch (columns) {
+ case 2: return *context.fMat2x2_Type;
+ case 3: return *context.fMat3x2_Type;
+ case 4: return *context.fMat4x2_Type;
+ default: ABORT("unsupported matrix column count (%d)", columns);
+ }
+ case 3:
+ switch (columns) {
+ case 2: return *context.fMat2x3_Type;
+ case 3: return *context.fMat3x3_Type;
+ case 4: return *context.fMat4x3_Type;
+ default: ABORT("unsupported matrix column count (%d)", columns);
+ }
+ case 4:
+ switch (columns) {
+ case 2: return *context.fMat2x4_Type;
+ case 3: return *context.fMat3x4_Type;
+ case 4: return *context.fMat4x4_Type;
+ default: ABORT("unsupported matrix column count (%d)", columns);
+ }
+ default: ABORT("unsupported row count (%d)", rows);
+ }
+ } else if (*this == *context.fDouble_Type) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 2: return *context.fDVec2_Type;
+ case 3: return *context.fDVec3_Type;
+ case 4: return *context.fDVec4_Type;
+ default: ABORT("unsupported vector column count (%d)", columns);
+ }
+ case 2:
+ switch (columns) {
+ case 2: return *context.fDMat2x2_Type;
+ case 3: return *context.fDMat3x2_Type;
+ case 4: return *context.fDMat4x2_Type;
+ default: ABORT("unsupported matrix column count (%d)", columns);
+ }
+ case 3:
+ switch (columns) {
+ case 2: return *context.fDMat2x3_Type;
+ case 3: return *context.fDMat3x3_Type;
+ case 4: return *context.fDMat4x3_Type;
+ default: ABORT("unsupported matrix column count (%d)", columns);
+ }
+ case 4:
+ switch (columns) {
+ case 2: return *context.fDMat2x4_Type;
+ case 3: return *context.fDMat3x4_Type;
+ case 4: return *context.fDMat4x4_Type;
+ default: ABORT("unsupported matrix column count (%d)", columns);
+ }
+ default: ABORT("unsupported row count (%d)", rows);
+ }
+ } else if (*this == *context.fInt_Type) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 2: return *context.fIVec2_Type;
+ case 3: return *context.fIVec3_Type;
+ case 4: return *context.fIVec4_Type;
+ default: ABORT("unsupported vector column count (%d)", columns);
+ }
+ default: ABORT("unsupported row count (%d)", rows);
+ }
+ } else if (*this == *context.fUInt_Type) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 2: return *context.fUVec2_Type;
+ case 3: return *context.fUVec3_Type;
+ case 4: return *context.fUVec4_Type;
+ default: ABORT("unsupported vector column count (%d)", columns);
+ }
+ default: ABORT("unsupported row count (%d)", rows);
+ }
+ }
+ ABORT("unsupported scalar_to_compound type %s", this->description().c_str());
+}
+
+} // namespace
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLType.h b/gfx/skia/skia/src/sksl/ir/SkSLType.h
new file mode 100644
index 000000000..ad2185b4e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLType.h
@@ -0,0 +1,345 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKIASL_TYPE
+#define SKIASL_TYPE
+
+#include "SkSLModifiers.h"
+#include "SkSLSymbol.h"
+#include "../SkSLPosition.h"
+#include "../SkSLUtil.h"
+#include "../spirv.h"
+#include <vector>
+#include <memory>
+
+namespace SkSL {
+
+class Context;
+
+/**
+ * Represents a type, such as int or vec4.
+ */
+class Type : public Symbol {
+public:
+ struct Field {
+ Field(Modifiers modifiers, std::string name, const Type* type)
+ : fModifiers(modifiers)
+ , fName(std::move(name))
+ , fType(std::move(type)) {}
+
+ const std::string description() const {
+ return fType->description() + " " + fName + ";";
+ }
+
+ Modifiers fModifiers;
+ std::string fName;
+ const Type* fType;
+ };
+
+ enum Kind {
+ kScalar_Kind,
+ kVector_Kind,
+ kMatrix_Kind,
+ kArray_Kind,
+ kStruct_Kind,
+ kGeneric_Kind,
+ kSampler_Kind,
+ kOther_Kind
+ };
+
+ // Create an "other" (special) type with the given name. These types cannot be directly
+ // referenced from user code.
+ Type(std::string name)
+ : INHERITED(Position(), kType_Kind, std::move(name))
+ , fTypeKind(kOther_Kind) {}
+
+ // Create a generic type which maps to the listed types.
+ Type(std::string name, std::vector<const Type*> types)
+ : INHERITED(Position(), kType_Kind, std::move(name))
+ , fTypeKind(kGeneric_Kind)
+ , fCoercibleTypes(std::move(types)) {
+ ASSERT(fCoercibleTypes.size() == 4);
+ }
+
+ // Create a struct type with the given fields.
+ Type(std::string name, std::vector<Field> fields)
+ : INHERITED(Position(), kType_Kind, std::move(name))
+ , fTypeKind(kStruct_Kind)
+ , fFields(std::move(fields)) {}
+
+ // Create a scalar type.
+ Type(std::string name, bool isNumber)
+ : INHERITED(Position(), kType_Kind, std::move(name))
+ , fTypeKind(kScalar_Kind)
+ , fIsNumber(isNumber)
+ , fColumns(1)
+ , fRows(1) {}
+
+ // Create a scalar type which can be coerced to the listed types.
+ Type(std::string name, bool isNumber, std::vector<const Type*> coercibleTypes)
+ : INHERITED(Position(), kType_Kind, std::move(name))
+ , fTypeKind(kScalar_Kind)
+ , fIsNumber(isNumber)
+ , fCoercibleTypes(std::move(coercibleTypes))
+ , fColumns(1)
+ , fRows(1) {}
+
+ // Create a vector type.
+ Type(std::string name, const Type& componentType, int columns)
+ : Type(name, kVector_Kind, componentType, columns) {}
+
+ // Create a vector or array type.
+ Type(std::string name, Kind kind, const Type& componentType, int columns)
+ : INHERITED(Position(), kType_Kind, std::move(name))
+ , fTypeKind(kind)
+ , fComponentType(&componentType)
+ , fColumns(columns)
+ , fRows(1)
+ , fDimensions(SpvDim1D) {}
+
+ // Create a matrix type.
+ Type(std::string name, const Type& componentType, int columns, int rows)
+ : INHERITED(Position(), kType_Kind, std::move(name))
+ , fTypeKind(kMatrix_Kind)
+ , fComponentType(&componentType)
+ , fColumns(columns)
+ , fRows(rows)
+ , fDimensions(SpvDim1D) {}
+
+ // Create a sampler type.
+ Type(std::string name, SpvDim_ dimensions, bool isDepth, bool isArrayed, bool isMultisampled,
+ bool isSampled)
+ : INHERITED(Position(), kType_Kind, std::move(name))
+ , fTypeKind(kSampler_Kind)
+ , fDimensions(dimensions)
+ , fIsDepth(isDepth)
+ , fIsArrayed(isArrayed)
+ , fIsMultisampled(isMultisampled)
+ , fIsSampled(isSampled) {}
+
+ std::string name() const {
+ return fName;
+ }
+
+ std::string description() const override {
+ return fName;
+ }
+
+ bool operator==(const Type& other) const {
+ return fName == other.fName;
+ }
+
+ bool operator!=(const Type& other) const {
+ return fName != other.fName;
+ }
+
+ /**
+ * Returns the category (scalar, vector, matrix, etc.) of this type.
+ */
+ Kind kind() const {
+ return fTypeKind;
+ }
+
+ /**
+ * Returns true if this is a numeric scalar type.
+ */
+ bool isNumber() const {
+ return fIsNumber;
+ }
+
+ /**
+ * Returns true if an instance of this type can be freely coerced (implicitly converted) to
+ * another type.
+ */
+ bool canCoerceTo(const Type& other) const {
+ int cost;
+ return determineCoercionCost(other, &cost);
+ }
+
+ /**
+ * Determines the "cost" of coercing (implicitly converting) this type to another type. The cost
+ * is a number with no particular meaning other than that lower costs are preferable to higher
+ * costs. Returns true if a conversion is possible, false otherwise. The value of the out
+ * parameter is undefined if false is returned.
+ */
+ bool determineCoercionCost(const Type& other, int* outCost) const;
+
+ /**
+ * For matrices and vectors, returns the type of individual cells (e.g. mat2 has a component
+ * type of kFloat_Type). For all other types, causes an assertion failure.
+ */
+ const Type& componentType() const {
+ ASSERT(fComponentType);
+ return *fComponentType;
+ }
+
+ /**
+ * For matrices and vectors, returns the number of columns (e.g. both mat3 and vec3 return 3).
+ * For scalars, returns 1. For arrays, returns either the size of the array (if known) or -1.
+ * For all other types, causes an assertion failure.
+ */
+ int columns() const {
+ ASSERT(fTypeKind == kScalar_Kind || fTypeKind == kVector_Kind ||
+ fTypeKind == kMatrix_Kind || fTypeKind == kArray_Kind);
+ return fColumns;
+ }
+
+ /**
+ * For matrices, returns the number of rows (e.g. mat2x4 returns 4). For vectors and scalars,
+ * returns 1. For all other types, causes an assertion failure.
+ */
+ int rows() const {
+ ASSERT(fRows > 0);
+ return fRows;
+ }
+
+ const std::vector<Field>& fields() const {
+ ASSERT(fTypeKind == kStruct_Kind);
+ return fFields;
+ }
+
+ /**
+ * For generic types, returns the types that this generic type can substitute for. For other
+ * types, returns a list of other types that this type can be coerced into.
+ */
+ const std::vector<const Type*>& coercibleTypes() const {
+ ASSERT(fCoercibleTypes.size() > 0);
+ return fCoercibleTypes;
+ }
+
+ int dimensions() const {
+ ASSERT(fTypeKind == kSampler_Kind);
+ return fDimensions;
+ }
+
+ bool isDepth() const {
+ ASSERT(fTypeKind == kSampler_Kind);
+ return fIsDepth;
+ }
+
+ bool isArrayed() const {
+ ASSERT(fTypeKind == kSampler_Kind);
+ return fIsArrayed;
+ }
+
+ bool isMultisampled() const {
+ ASSERT(fTypeKind == kSampler_Kind);
+ return fIsMultisampled;
+ }
+
+ bool isSampled() const {
+ ASSERT(fTypeKind == kSampler_Kind);
+ return fIsSampled;
+ }
+
+ static size_t vector_alignment(size_t componentSize, int columns) {
+ return componentSize * (columns + columns % 2);
+ }
+
+ /**
+ * Returns the type's required alignment (when putting this type into a struct, the offset must
+ * be a multiple of the alignment).
+ */
+ size_t alignment() const {
+ // See OpenGL Spec 7.6.2.2 Standard Uniform Block Layout
+ switch (fTypeKind) {
+ case kScalar_Kind:
+ return this->size();
+ case kVector_Kind:
+ return vector_alignment(fComponentType->size(), fColumns);
+ case kMatrix_Kind:
+ return (vector_alignment(fComponentType->size(), fRows) + 15) & ~15;
+ case kArray_Kind:
+ // round up to next multiple of 16
+ return (fComponentType->alignment() + 15) & ~15;
+ case kStruct_Kind: {
+ size_t result = 16;
+ for (size_t i = 0; i < fFields.size(); i++) {
+ size_t alignment = fFields[i].fType->alignment();
+ if (alignment > result) {
+ result = alignment;
+ }
+ }
+ }
+ default:
+ ABORT(("cannot determine size of type " + fName).c_str());
+ }
+ }
+
+ /**
+ * For matrices and arrays, returns the number of bytes from the start of one entry (row, in
+ * the case of matrices) to the start of the next.
+ */
+ size_t stride() const {
+ switch (fTypeKind) {
+ case kMatrix_Kind: // fall through
+ case kArray_Kind:
+ return this->alignment();
+ default:
+ ABORT("type does not have a stride");
+ }
+ }
+
+ /**
+ * Returns the size of this type in bytes.
+ */
+ size_t size() const {
+ switch (fTypeKind) {
+ case kScalar_Kind:
+ // FIXME need to take precision into account, once we figure out how we want to
+ // handle it...
+ return 4;
+ case kVector_Kind:
+ return fColumns * fComponentType->size();
+ case kMatrix_Kind:
+ return vector_alignment(fComponentType->size(), fRows) * fColumns;
+ case kArray_Kind:
+ return fColumns * this->stride();
+ case kStruct_Kind: {
+ size_t total = 0;
+ for (size_t i = 0; i < fFields.size(); i++) {
+ size_t alignment = fFields[i].fType->alignment();
+ if (total % alignment != 0) {
+ total += alignment - total % alignment;
+ }
+ ASSERT(false);
+ ASSERT(total % alignment == 0);
+ total += fFields[i].fType->size();
+ }
+ return total;
+ }
+ default:
+ ABORT(("cannot determine size of type " + fName).c_str());
+ }
+ }
+
+ /**
+ * Returns the corresponding vector or matrix type with the specified number of columns and
+ * rows.
+ */
+ const Type& toCompound(const Context& context, int columns, int rows) const;
+
+private:
+ typedef Symbol INHERITED;
+
+ const Kind fTypeKind;
+ const bool fIsNumber = false;
+ const Type* fComponentType = nullptr;
+ const std::vector<const Type*> fCoercibleTypes;
+ const int fColumns = -1;
+ const int fRows = -1;
+ const std::vector<Field> fFields;
+ const SpvDim_ fDimensions = SpvDim1D;
+ const bool fIsDepth = false;
+ const bool fIsArrayed = false;
+ const bool fIsMultisampled = false;
+ const bool fIsSampled = false;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLTypeReference.h b/gfx/skia/skia/src/sksl/ir/SkSLTypeReference.h
new file mode 100644
index 000000000..10f36aa24
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLTypeReference.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_TYPEREFERENCE
+#define SKSL_TYPEREFERENCE
+
+#include "SkSLContext.h"
+#include "SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * Represents an identifier referring to a type. This is an intermediate value: TypeReferences are
+ * always eventually replaced by Constructors in valid programs.
+ */
+struct TypeReference : public Expression {
+ TypeReference(const Context& context, Position position, const Type& type)
+ : INHERITED(position, kTypeReference_Kind, *context.fInvalid_Type)
+ , fValue(type) {}
+
+ std::string description() const override {
+ ASSERT(false);
+ return "<type>";
+ }
+
+ const Type& fValue;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLUnresolvedFunction.h b/gfx/skia/skia/src/sksl/ir/SkSLUnresolvedFunction.h
new file mode 100644
index 000000000..7e8a3601e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLUnresolvedFunction.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_UNRESOLVEDFUNCTION
+#define SKSL_UNRESOLVEDFUNCTION
+
+#include "SkSLFunctionDeclaration.h"
+
+namespace SkSL {
+
+/**
+ * A symbol representing multiple functions with the same name.
+ */
+struct UnresolvedFunction : public Symbol {
+ UnresolvedFunction(std::vector<const FunctionDeclaration*> funcs)
+ : INHERITED(Position(), kUnresolvedFunction_Kind, funcs[0]->fName)
+ , fFunctions(std::move(funcs)) {
+#ifdef DEBUG
+ for (auto func : funcs) {
+ ASSERT(func->fName == fName);
+ }
+#endif
+ }
+
+ virtual std::string description() const override {
+ return fName;
+ }
+
+ const std::vector<const FunctionDeclaration*> fFunctions;
+
+ typedef Symbol INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLVarDeclaration.h b/gfx/skia/skia/src/sksl/ir/SkSLVarDeclaration.h
new file mode 100644
index 000000000..e64a874d6
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLVarDeclaration.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_VARDECLARATIONS
+#define SKSL_VARDECLARATIONS
+
+#include "SkSLExpression.h"
+#include "SkSLStatement.h"
+#include "SkSLVariable.h"
+
+namespace SkSL {
+
+/**
+ * A single variable declaration within a var declaration statement. For instance, the statement
+ * 'int x = 2, y[3];' is a VarDeclarations statement containing two individual VarDeclaration
+ * instances.
+ */
+struct VarDeclaration {
+ VarDeclaration(const Variable* var,
+ std::vector<std::unique_ptr<Expression>> sizes,
+ std::unique_ptr<Expression> value)
+ : fVar(var)
+ , fSizes(std::move(sizes))
+ , fValue(std::move(value)) {}
+
+ std::string description() const {
+ std::string result = fVar->fName;
+ for (const auto& size : fSizes) {
+ if (size) {
+ result += "[" + size->description() + "]";
+ } else {
+ result += "[]";
+ }
+ }
+ if (fValue) {
+ result += " = " + fValue->description();
+ }
+ return result;
+ }
+
+ const Variable* fVar;
+ std::vector<std::unique_ptr<Expression>> fSizes;
+ std::unique_ptr<Expression> fValue;
+};
+
+/**
+ * A variable declaration statement, which may consist of one or more individual variables.
+ */
+struct VarDeclarations : public ProgramElement {
+ VarDeclarations(Position position, const Type* baseType,
+ std::vector<VarDeclaration> vars)
+ : INHERITED(position, kVar_Kind)
+ , fBaseType(*baseType)
+ , fVars(std::move(vars)) {}
+
+ std::string description() const override {
+ if (!fVars.size()) {
+ return "";
+ }
+ std::string result = fVars[0].fVar->fModifiers.description() + fBaseType.description() +
+ " ";
+ std::string separator = "";
+ for (const auto& var : fVars) {
+ result += separator;
+ separator = ", ";
+ result += var.description();
+ }
+ return result;
+ }
+
+ const Type& fBaseType;
+ const std::vector<VarDeclaration> fVars;
+
+ typedef ProgramElement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLVarDeclarationStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLVarDeclarationStatement.h
new file mode 100644
index 000000000..59d37ab91
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLVarDeclarationStatement.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_VARDECLARATIONSTATEMENT
+#define SKSL_VARDECLARATIONSTATEMENT
+
+#include "SkSLStatement.h"
+#include "SkSLVarDeclaration.h"
+
+namespace SkSL {
+
+/**
+ * One or more variable declarations appearing as a statement within a function.
+ */
+struct VarDeclarationsStatement : public Statement {
+ VarDeclarationsStatement(std::unique_ptr<VarDeclarations> decl)
+ : INHERITED(decl->fPosition, kVarDeclarations_Kind)
+ , fDeclaration(std::move(decl)) {}
+
+ std::string description() const override {
+ return fDeclaration->description();
+ }
+
+ const std::shared_ptr<VarDeclarations> fDeclaration;
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLVariable.h b/gfx/skia/skia/src/sksl/ir/SkSLVariable.h
new file mode 100644
index 000000000..217b1006b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLVariable.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_VARIABLE
+#define SKSL_VARIABLE
+
+#include "SkSLModifiers.h"
+#include "SkSLPosition.h"
+#include "SkSLSymbol.h"
+#include "SkSLType.h"
+
+namespace SkSL {
+
+/**
+ * Represents a variable, whether local, global, or a function parameter. This represents the
+ * variable itself (the storage location), which is shared between all VariableReferences which
+ * read or write that storage location.
+ */
+struct Variable : public Symbol {
+ enum Storage {
+ kGlobal_Storage,
+ kLocal_Storage,
+ kParameter_Storage
+ };
+
+ Variable(Position position, Modifiers modifiers, std::string name, const Type& type,
+ Storage storage)
+ : INHERITED(position, kVariable_Kind, std::move(name))
+ , fModifiers(modifiers)
+ , fType(type)
+ , fStorage(storage)
+ , fIsReadFrom(false)
+ , fIsWrittenTo(false) {}
+
+ virtual std::string description() const override {
+ return fModifiers.description() + fType.fName + " " + fName;
+ }
+
+ mutable Modifiers fModifiers;
+ const Type& fType;
+ const Storage fStorage;
+
+ mutable bool fIsReadFrom;
+ mutable bool fIsWrittenTo;
+
+ typedef Symbol INHERITED;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLVariableReference.h b/gfx/skia/skia/src/sksl/ir/SkSLVariableReference.h
new file mode 100644
index 000000000..b443da1f2
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLVariableReference.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_VARIABLEREFERENCE
+#define SKSL_VARIABLEREFERENCE
+
+#include "SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * A reference to a variable, through which it can be read or written. In the statement:
+ *
+ * x = x + 1;
+ *
+ * there is only one Variable 'x', but two VariableReferences to it.
+ */
+struct VariableReference : public Expression {
+ VariableReference(Position position, const Variable& variable)
+ : INHERITED(position, kVariableReference_Kind, variable.fType)
+ , fVariable(variable) {}
+
+ std::string description() const override {
+ return fVariable.fName;
+ }
+
+ const Variable& fVariable;
+
+ typedef Expression INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLWhileStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLWhileStatement.h
new file mode 100644
index 000000000..1acb57258
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLWhileStatement.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_WHILESTATEMENT
+#define SKSL_WHILESTATEMENT
+
+#include "SkSLExpression.h"
+#include "SkSLStatement.h"
+
+namespace SkSL {
+
+/**
+ * A 'while' loop.
+ */
+struct WhileStatement : public Statement {
+ WhileStatement(Position position, std::unique_ptr<Expression> test,
+ std::unique_ptr<Statement> statement)
+ : INHERITED(position, kWhile_Kind)
+ , fTest(std::move(test))
+ , fStatement(std::move(statement)) {}
+
+ std::string description() const override {
+ return "while (" + fTest->description() + ") " + fStatement->description();
+ }
+
+ const std::unique_ptr<Expression> fTest;
+ const std::unique_ptr<Statement> fStatement;
+
+ typedef Statement INHERITED;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/lex.sksl.c b/gfx/skia/skia/src/sksl/lex.sksl.c
new file mode 100644
index 000000000..4993fac3a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex.sksl.c
@@ -0,0 +1,2505 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#line 3 "lex.sksl.c"
+
+#define YY_INT_ALIGNED short int
+
+/* A lexical scanner generated by flex */
+
+#define FLEX_SCANNER
+#define YY_FLEX_MAJOR_VERSION 2
+#define YY_FLEX_MINOR_VERSION 5
+#define YY_FLEX_SUBMINOR_VERSION 37
+#if YY_FLEX_SUBMINOR_VERSION > 0
+#define FLEX_BETA
+#endif
+
+/* First, we deal with platform-specific or compiler-specific issues. */
+
+/* begin standard C headers. */
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdlib.h>
+
+/* end standard C headers. */
+
+/* flex integer type definitions */
+
+#ifndef FLEXINT_H
+#define FLEXINT_H
+
+/* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */
+
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+
+/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h,
+ * if you want the limit (max/min) macros for int types.
+ */
+#ifndef __STDC_LIMIT_MACROS
+#define __STDC_LIMIT_MACROS 1
+#endif
+
+#include <inttypes.h>
+typedef int8_t flex_int8_t;
+typedef uint8_t flex_uint8_t;
+typedef int16_t flex_int16_t;
+typedef uint16_t flex_uint16_t;
+typedef int32_t flex_int32_t;
+typedef uint32_t flex_uint32_t;
+#else
+typedef signed char flex_int8_t;
+typedef short int flex_int16_t;
+typedef int flex_int32_t;
+typedef unsigned char flex_uint8_t;
+typedef unsigned short int flex_uint16_t;
+typedef unsigned int flex_uint32_t;
+
+/* Limits of integral types. */
+#ifndef INT8_MIN
+#define INT8_MIN (-128)
+#endif
+#ifndef INT16_MIN
+#define INT16_MIN (-32767-1)
+#endif
+#ifndef INT32_MIN
+#define INT32_MIN (-2147483647-1)
+#endif
+#ifndef INT8_MAX
+#define INT8_MAX (127)
+#endif
+#ifndef INT16_MAX
+#define INT16_MAX (32767)
+#endif
+#ifndef INT32_MAX
+#define INT32_MAX (2147483647)
+#endif
+#ifndef UINT8_MAX
+#define UINT8_MAX (255U)
+#endif
+#ifndef UINT16_MAX
+#define UINT16_MAX (65535U)
+#endif
+#ifndef UINT32_MAX
+#define UINT32_MAX (4294967295U)
+#endif
+
+#endif /* ! C99 */
+
+#endif /* ! FLEXINT_H */
+
+#ifdef __cplusplus
+
+/* The "const" storage-class-modifier is valid. */
+#define YY_USE_CONST
+
+#else /* ! __cplusplus */
+
+/* C99 requires __STDC__ to be defined as 1. */
+#if defined (__STDC__)
+
+#define YY_USE_CONST
+
+#endif /* defined (__STDC__) */
+#endif /* ! __cplusplus */
+
+#ifdef YY_USE_CONST
+#define yyconst const
+#else
+#define yyconst
+#endif
+
+/* Returned upon end-of-file. */
+#define YY_NULL 0
+
+/* Promotes a possibly negative, possibly signed char to an unsigned
+ * integer for use as an array index. If the signed char is negative,
+ * we want to instead treat it as an 8-bit unsigned char, hence the
+ * double cast.
+ */
+#define YY_SC_TO_UI(c) ((unsigned int) (unsigned char) c)
+
+/* An opaque pointer. */
+#ifndef YY_TYPEDEF_YY_SCANNER_T
+#define YY_TYPEDEF_YY_SCANNER_T
+typedef void* yyscan_t;
+#endif
+
+/* For convenience, these vars (plus the bison vars far below)
+ are macros in the reentrant scanner. */
+#define yyin yyg->yyin_r
+#define yyout yyg->yyout_r
+#define yyextra yyg->yyextra_r
+#define yyleng yyg->yyleng_r
+#define yytext yyg->yytext_r
+#define yylineno (YY_CURRENT_BUFFER_LVALUE->yy_bs_lineno)
+#define yycolumn (YY_CURRENT_BUFFER_LVALUE->yy_bs_column)
+#define yy_flex_debug yyg->yy_flex_debug_r
+
+/* Enter a start condition. This macro really ought to take a parameter,
+ * but we do it the disgusting crufty way forced on us by the ()-less
+ * definition of BEGIN.
+ */
+#define BEGIN yyg->yy_start = 1 + 2 *
+
+/* Translate the current start state into a value that can be later handed
+ * to BEGIN to return to the state. The YYSTATE alias is for lex
+ * compatibility.
+ */
+#define YY_START ((yyg->yy_start - 1) / 2)
+#define YYSTATE YY_START
+
+/* Action number for EOF rule of a given start state. */
+#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1)
+
+/* Special action meaning "start processing a new file". */
+#define YY_NEW_FILE skslrestart(yyin ,yyscanner )
+
+#define YY_END_OF_BUFFER_CHAR 0
+
+/* Size of default input buffer. */
+#ifndef YY_BUF_SIZE
+#define YY_BUF_SIZE 16384
+#endif
+
+/* The state buf must be large enough to hold one state per character in the main buffer.
+ */
+#define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type))
+
+#ifndef YY_TYPEDEF_YY_BUFFER_STATE
+#define YY_TYPEDEF_YY_BUFFER_STATE
+typedef struct yy_buffer_state *YY_BUFFER_STATE;
+#endif
+
+#ifndef YY_TYPEDEF_YY_SIZE_T
+#define YY_TYPEDEF_YY_SIZE_T
+typedef size_t yy_size_t;
+#endif
+
+#define EOB_ACT_CONTINUE_SCAN 0
+#define EOB_ACT_END_OF_FILE 1
+#define EOB_ACT_LAST_MATCH 2
+
+ /* Note: We specifically omit the test for yy_rule_can_match_eol because it requires
+ * access to the local variable yy_act. Since yyless() is a macro, it would break
+ * existing scanners that call yyless() from OUTSIDE sksllex.
+ * One obvious solution it to make yy_act a global. I tried that, and saw
+ * a 5% performance hit in a non-yylineno scanner, because yy_act is
+ * normally declared as a register variable-- so it is not worth it.
+ */
+ #define YY_LESS_LINENO(n) \
+ do { \
+ int yyl;\
+ for ( yyl = n; yyl < yyleng; ++yyl )\
+ if ( yytext[yyl] == '\n' )\
+ --yylineno;\
+ }while(0)
+
+/* Return all but the first "n" matched characters back to the input stream. */
+#define yyless(n) \
+ do \
+ { \
+ /* Undo effects of setting up yytext. */ \
+ int yyless_macro_arg = (n); \
+ YY_LESS_LINENO(yyless_macro_arg);\
+ *yy_cp = yyg->yy_hold_char; \
+ YY_RESTORE_YY_MORE_OFFSET \
+ yyg->yy_c_buf_p = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \
+ YY_DO_BEFORE_ACTION; /* set up yytext again */ \
+ } \
+ while ( 0 )
+
+#define unput(c) yyunput( c, yyg->yytext_ptr , yyscanner )
+
+#ifndef YY_STRUCT_YY_BUFFER_STATE
+#define YY_STRUCT_YY_BUFFER_STATE
+struct yy_buffer_state
+ {
+ FILE *yy_input_file;
+
+ char *yy_ch_buf; /* input buffer */
+ char *yy_buf_pos; /* current position in input buffer */
+
+ /* Size of input buffer in bytes, not including room for EOB
+ * characters.
+ */
+ yy_size_t yy_buf_size;
+
+ /* Number of characters read into yy_ch_buf, not including EOB
+ * characters.
+ */
+ yy_size_t yy_n_chars;
+
+ /* Whether we "own" the buffer - i.e., we know we created it,
+ * and can realloc() it to grow it, and should free() it to
+ * delete it.
+ */
+ int yy_is_our_buffer;
+
+ /* Whether this is an "interactive" input source; if so, and
+ * if we're using stdio for input, then we want to use getc()
+ * instead of fread(), to make sure we stop fetching input after
+ * each newline.
+ */
+ int yy_is_interactive;
+
+ /* Whether we're considered to be at the beginning of a line.
+ * If so, '^' rules will be active on the next match, otherwise
+ * not.
+ */
+ int yy_at_bol;
+
+ int yy_bs_lineno; /**< The line count. */
+ int yy_bs_column; /**< The column count. */
+
+ /* Whether to try to fill the input buffer when we reach the
+ * end of it.
+ */
+ int yy_fill_buffer;
+
+ int yy_buffer_status;
+
+#define YY_BUFFER_NEW 0
+#define YY_BUFFER_NORMAL 1
+ /* When an EOF's been seen but there's still some text to process
+ * then we mark the buffer as YY_EOF_PENDING, to indicate that we
+ * shouldn't try reading from the input source any more. We might
+ * still have a bunch of tokens to match, though, because of
+ * possible backing-up.
+ *
+ * When we actually see the EOF, we change the status to "new"
+ * (via skslrestart()), so that the user can continue scanning by
+ * just pointing yyin at a new input file.
+ */
+#define YY_BUFFER_EOF_PENDING 2
+
+ };
+#endif /* !YY_STRUCT_YY_BUFFER_STATE */
+
+/* We provide macros for accessing buffer states in case in the
+ * future we want to put the buffer states in a more general
+ * "scanner state".
+ *
+ * Returns the top of the stack, or NULL.
+ */
+#define YY_CURRENT_BUFFER ( yyg->yy_buffer_stack \
+ ? yyg->yy_buffer_stack[yyg->yy_buffer_stack_top] \
+ : NULL)
+
+/* Same as previous macro, but useful when we know that the buffer stack is not
+ * NULL or when we need an lvalue. For internal use only.
+ */
+#define YY_CURRENT_BUFFER_LVALUE yyg->yy_buffer_stack[yyg->yy_buffer_stack_top]
+
+void skslrestart (FILE *input_file ,yyscan_t yyscanner );
+void sksl_switch_to_buffer (YY_BUFFER_STATE new_buffer ,yyscan_t yyscanner );
+YY_BUFFER_STATE sksl_create_buffer (FILE *file,int size ,yyscan_t yyscanner );
+void sksl_delete_buffer (YY_BUFFER_STATE b ,yyscan_t yyscanner );
+void sksl_flush_buffer (YY_BUFFER_STATE b ,yyscan_t yyscanner );
+void skslpush_buffer_state (YY_BUFFER_STATE new_buffer ,yyscan_t yyscanner );
+void skslpop_buffer_state (yyscan_t yyscanner );
+
+static void skslensure_buffer_stack (yyscan_t yyscanner );
+static void sksl_load_buffer_state (yyscan_t yyscanner );
+static void sksl_init_buffer (YY_BUFFER_STATE b,FILE *file ,yyscan_t yyscanner );
+
+#define YY_FLUSH_BUFFER sksl_flush_buffer(YY_CURRENT_BUFFER ,yyscanner)
+
+YY_BUFFER_STATE sksl_scan_buffer (char *base,yy_size_t size ,yyscan_t yyscanner );
+YY_BUFFER_STATE sksl_scan_string (yyconst char *yy_str ,yyscan_t yyscanner );
+YY_BUFFER_STATE sksl_scan_bytes (yyconst char *bytes,yy_size_t len ,yyscan_t yyscanner );
+
+void *skslalloc (yy_size_t ,yyscan_t yyscanner );
+void *skslrealloc (void *,yy_size_t ,yyscan_t yyscanner );
+void skslfree (void * ,yyscan_t yyscanner );
+
+#define yy_new_buffer sksl_create_buffer
+
+#define yy_set_interactive(is_interactive) \
+ { \
+ if ( ! YY_CURRENT_BUFFER ){ \
+ skslensure_buffer_stack (yyscanner); \
+ YY_CURRENT_BUFFER_LVALUE = \
+ sksl_create_buffer(yyin,YY_BUF_SIZE ,yyscanner); \
+ } \
+ YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \
+ }
+
+#define yy_set_bol(at_bol) \
+ { \
+ if ( ! YY_CURRENT_BUFFER ){\
+ skslensure_buffer_stack (yyscanner); \
+ YY_CURRENT_BUFFER_LVALUE = \
+ sksl_create_buffer(yyin,YY_BUF_SIZE ,yyscanner); \
+ } \
+ YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \
+ }
+
+#define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol)
+
+/* Begin user sect3 */
+
+typedef unsigned char YY_CHAR;
+
+typedef int yy_state_type;
+
+#define yytext_ptr yytext_r
+
+static yy_state_type yy_get_previous_state (yyscan_t yyscanner );
+static yy_state_type yy_try_NUL_trans (yy_state_type current_state ,yyscan_t yyscanner);
+static int yy_get_next_buffer (yyscan_t yyscanner );
+static void yy_fatal_error (yyconst char msg[] ,yyscan_t yyscanner );
+
+/* Done after the current pattern has been matched and before the
+ * corresponding action - sets up yytext.
+ */
+#define YY_DO_BEFORE_ACTION \
+ yyg->yytext_ptr = yy_bp; \
+ yyleng = (size_t) (yy_cp - yy_bp); \
+ yyg->yy_hold_char = *yy_cp; \
+ *yy_cp = '\0'; \
+ yyg->yy_c_buf_p = yy_cp;
+
+#define YY_NUM_RULES 82
+#define YY_END_OF_BUFFER 83
+/* This struct is not used in this scanner,
+ but its presence is necessary. */
+struct yy_trans_info
+ {
+ flex_int32_t yy_verify;
+ flex_int32_t yy_nxt;
+ };
+static yyconst flex_int16_t yy_accept[201] =
+ { 0,
+ 0, 0, 83, 81, 80, 80, 54, 81, 29, 45,
+ 50, 31, 32, 43, 41, 38, 42, 37, 44, 4,
+ 56, 77, 61, 57, 60, 55, 35, 36, 49, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 33, 48, 34, 80,
+ 59, 30, 29, 68, 53, 73, 66, 39, 64, 40,
+ 65, 1, 0, 78, 67, 2, 4, 0, 46, 63,
+ 58, 62, 47, 72, 52, 29, 29, 29, 11, 29,
+ 29, 29, 29, 29, 7, 16, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 29, 71, 51, 30,
+
+ 76, 0, 0, 0, 78, 1, 0, 0, 3, 69,
+ 70, 75, 29, 29, 29, 29, 29, 29, 9, 29,
+ 29, 29, 29, 29, 29, 17, 29, 29, 29, 29,
+ 29, 29, 74, 0, 1, 79, 0, 0, 2, 29,
+ 29, 29, 29, 8, 29, 24, 29, 29, 29, 21,
+ 29, 29, 29, 29, 29, 5, 29, 29, 0, 1,
+ 12, 20, 29, 29, 6, 23, 18, 29, 29, 29,
+ 29, 29, 29, 29, 10, 29, 29, 27, 29, 29,
+ 29, 15, 26, 29, 29, 14, 22, 29, 29, 19,
+ 13, 29, 29, 29, 28, 29, 29, 29, 25, 0
+
+ } ;
+
+static yyconst flex_int32_t yy_ec[256] =
+ { 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 3,
+ 1, 1, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 4, 1, 5, 6, 7, 8, 1, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 18, 19, 20,
+ 21, 22, 23, 1, 6, 6, 6, 6, 24, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 25, 1, 26, 27, 6, 1, 28, 29, 30, 31,
+
+ 32, 33, 34, 35, 36, 6, 37, 38, 39, 40,
+ 41, 42, 6, 43, 44, 45, 46, 47, 48, 6,
+ 49, 6, 50, 51, 52, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1
+ } ;
+
+static yyconst flex_int32_t yy_meta[53] =
+ { 0,
+ 1, 1, 2, 1, 1, 3, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 4, 1, 1, 1,
+ 1, 1, 1, 3, 1, 1, 1, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 1,
+ 1, 1
+ } ;
+
+static yyconst flex_int16_t yy_base[206] =
+ { 0,
+ 0, 0, 238, 239, 51, 53, 216, 0, 0, 215,
+ 49, 239, 239, 214, 46, 239, 45, 217, 52, 45,
+ 239, 239, 44, 212, 50, 239, 239, 239, 53, 189,
+ 190, 40, 192, 47, 193, 46, 50, 196, 186, 180,
+ 182, 192, 178, 179, 181, 185, 239, 61, 239, 81,
+ 239, 0, 0, 239, 198, 239, 239, 239, 239, 239,
+ 239, 70, 207, 0, 239, 72, 75, 81, 196, 239,
+ 239, 239, 195, 239, 194, 182, 173, 168, 0, 167,
+ 172, 181, 165, 173, 0, 165, 156, 156, 172, 160,
+ 156, 168, 154, 155, 151, 160, 159, 239, 173, 0,
+
+ 239, 89, 182, 176, 0, 91, 97, 174, 173, 239,
+ 239, 239, 161, 72, 158, 155, 142, 140, 0, 149,
+ 137, 141, 139, 144, 147, 0, 148, 131, 130, 143,
+ 141, 135, 239, 155, 154, 239, 107, 153, 152, 131,
+ 122, 130, 137, 0, 132, 0, 121, 117, 115, 0,
+ 114, 116, 122, 114, 126, 0, 114, 122, 136, 135,
+ 0, 0, 111, 107, 0, 0, 0, 104, 109, 103,
+ 102, 105, 99, 100, 0, 96, 110, 0, 98, 97,
+ 102, 0, 0, 98, 102, 0, 0, 90, 79, 0,
+ 0, 88, 73, 65, 0, 69, 53, 65, 0, 239,
+
+ 58, 122, 124, 128, 132
+ } ;
+
+static yyconst flex_int16_t yy_def[206] =
+ { 0,
+ 200, 1, 200, 200, 200, 200, 200, 201, 202, 200,
+ 200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
+ 200, 200, 200, 200, 200, 200, 200, 200, 200, 202,
+ 202, 202, 202, 202, 202, 202, 202, 202, 202, 202,
+ 202, 202, 202, 202, 202, 202, 200, 200, 200, 200,
+ 200, 203, 202, 200, 200, 200, 200, 200, 200, 200,
+ 200, 200, 204, 205, 200, 200, 200, 200, 200, 200,
+ 200, 200, 200, 200, 200, 202, 202, 202, 202, 202,
+ 202, 202, 202, 202, 202, 202, 202, 202, 202, 202,
+ 202, 202, 202, 202, 202, 202, 202, 200, 200, 203,
+
+ 200, 200, 204, 204, 205, 200, 200, 200, 200, 200,
+ 200, 200, 202, 202, 202, 202, 202, 202, 202, 202,
+ 202, 202, 202, 202, 202, 202, 202, 202, 202, 202,
+ 202, 202, 200, 200, 200, 200, 200, 200, 200, 202,
+ 202, 202, 202, 202, 202, 202, 202, 202, 202, 202,
+ 202, 202, 202, 202, 202, 202, 202, 202, 200, 200,
+ 202, 202, 202, 202, 202, 202, 202, 202, 202, 202,
+ 202, 202, 202, 202, 202, 202, 202, 202, 202, 202,
+ 202, 202, 202, 202, 202, 202, 202, 202, 202, 202,
+ 202, 202, 202, 202, 202, 202, 202, 202, 202, 0,
+
+ 200, 200, 200, 200, 200
+ } ;
+
+static yyconst flex_int16_t yy_nxt[292] =
+ { 0,
+ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 9, 27, 28, 29, 9, 30, 31,
+ 32, 33, 34, 9, 35, 36, 9, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 9, 46, 9, 47,
+ 48, 49, 50, 50, 50, 50, 55, 58, 60, 66,
+ 52, 67, 63, 69, 70, 61, 59, 64, 68, 56,
+ 72, 73, 65, 74, 81, 78, 68, 87, 85, 75,
+ 79, 98, 50, 50, 82, 86, 62, 83, 106, 66,
+ 88, 67, 108, 102, 108, 107, 199, 109, 68, 198,
+
+ 134, 102, 134, 107, 197, 135, 68, 106, 138, 196,
+ 138, 99, 195, 139, 137, 141, 142, 194, 159, 193,
+ 159, 192, 137, 160, 53, 53, 100, 100, 103, 103,
+ 103, 103, 105, 191, 105, 105, 190, 189, 188, 187,
+ 186, 185, 184, 183, 182, 181, 180, 179, 178, 177,
+ 176, 160, 160, 175, 174, 173, 172, 171, 170, 169,
+ 168, 167, 166, 165, 164, 163, 162, 161, 139, 139,
+ 135, 135, 158, 157, 156, 155, 154, 153, 152, 151,
+ 150, 149, 148, 147, 146, 145, 144, 143, 140, 109,
+ 109, 136, 104, 133, 132, 131, 130, 129, 128, 127,
+
+ 126, 125, 124, 123, 122, 121, 120, 119, 118, 117,
+ 116, 115, 114, 113, 112, 111, 110, 104, 101, 97,
+ 96, 95, 94, 93, 92, 91, 90, 89, 84, 80,
+ 77, 76, 71, 62, 57, 54, 51, 200, 3, 200,
+ 200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
+ 200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
+ 200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
+ 200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
+ 200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
+ 200
+
+ } ;
+
+static yyconst flex_int16_t yy_chk[292] =
+ { 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 5, 5, 6, 6, 11, 15, 17, 20,
+ 201, 20, 19, 23, 23, 17, 15, 19, 20, 11,
+ 25, 25, 19, 29, 34, 32, 20, 37, 36, 29,
+ 32, 48, 50, 50, 34, 36, 62, 34, 66, 67,
+ 37, 67, 68, 62, 68, 66, 198, 68, 67, 197,
+
+ 102, 62, 102, 66, 196, 102, 67, 106, 107, 194,
+ 107, 48, 193, 107, 106, 114, 114, 192, 137, 189,
+ 137, 188, 106, 137, 202, 202, 203, 203, 204, 204,
+ 204, 204, 205, 185, 205, 205, 184, 181, 180, 179,
+ 177, 176, 174, 173, 172, 171, 170, 169, 168, 164,
+ 163, 160, 159, 158, 157, 155, 154, 153, 152, 151,
+ 149, 148, 147, 145, 143, 142, 141, 140, 139, 138,
+ 135, 134, 132, 131, 130, 129, 128, 127, 125, 124,
+ 123, 122, 121, 120, 118, 117, 116, 115, 113, 109,
+ 108, 104, 103, 99, 97, 96, 95, 94, 93, 92,
+
+ 91, 90, 89, 88, 87, 86, 84, 83, 82, 81,
+ 80, 78, 77, 76, 75, 73, 69, 63, 55, 46,
+ 45, 44, 43, 42, 41, 40, 39, 38, 35, 33,
+ 31, 30, 24, 18, 14, 10, 7, 3, 200, 200,
+ 200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
+ 200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
+ 200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
+ 200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
+ 200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
+ 200
+
+ } ;
+
+/* Table of booleans, true if rule could match eol. */
+static yyconst flex_int32_t yy_rule_can_match_eol[83] =
+ { 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 0, 0, };
+
+/* The intent behind this definition is that it'll catch
+ * any uses of REJECT which flex missed.
+ */
+#define REJECT reject_used_but_not_detected
+#define yymore() yymore_used_but_not_detected
+#define YY_MORE_ADJ 0
+#define YY_RESTORE_YY_MORE_OFFSET
+#line 1 "sksl.flex"
+/*
+
+ This file is IGNORED during the build process!
+
+ As this file is updated so infrequently and flex is not universally present on build machines,
+ the lex.sksl.c file must be manually regenerated if you make any changes to this file. Just run:
+
+ flex sksl.flex
+
+ You will have to manually add a copyright notice to the top of lex.sksl.c.
+
+*/
+#define YY_NO_UNISTD_H 1
+#line 598 "lex.sksl.c"
+
+#define INITIAL 0
+
+#ifndef YY_NO_UNISTD_H
+/* Special case for "unistd.h", since it is non-ANSI. We include it way
+ * down here because we want the user's section 1 to have been scanned first.
+ * The user has a chance to override it with an option.
+ */
+#include <unistd.h>
+#endif
+
+#ifndef YY_EXTRA_TYPE
+#define YY_EXTRA_TYPE void *
+#endif
+
+/* Holds the entire state of the reentrant scanner. */
+struct yyguts_t
+ {
+
+ /* User-defined. Not touched by flex. */
+ YY_EXTRA_TYPE yyextra_r;
+
+ /* The rest are the same as the globals declared in the non-reentrant scanner. */
+ FILE *yyin_r, *yyout_r;
+ size_t yy_buffer_stack_top; /**< index of top of stack. */
+ size_t yy_buffer_stack_max; /**< capacity of stack. */
+ YY_BUFFER_STATE * yy_buffer_stack; /**< Stack as an array. */
+ char yy_hold_char;
+ yy_size_t yy_n_chars;
+ yy_size_t yyleng_r;
+ char *yy_c_buf_p;
+ int yy_init;
+ int yy_start;
+ int yy_did_buffer_switch_on_eof;
+ int yy_start_stack_ptr;
+ int yy_start_stack_depth;
+ int *yy_start_stack;
+ yy_state_type yy_last_accepting_state;
+ char* yy_last_accepting_cpos;
+
+ int yylineno_r;
+ int yy_flex_debug_r;
+
+ char *yytext_r;
+ int yy_more_flag;
+ int yy_more_len;
+
+ }; /* end struct yyguts_t */
+
+static int yy_init_globals (yyscan_t yyscanner );
+
+int sksllex_init (yyscan_t* scanner);
+
+int sksllex_init_extra (YY_EXTRA_TYPE user_defined,yyscan_t* scanner);
+
+/* Accessor methods to globals.
+ These are made visible to non-reentrant scanners for convenience. */
+
+int sksllex_destroy (yyscan_t yyscanner );
+
+int skslget_debug (yyscan_t yyscanner );
+
+void skslset_debug (int debug_flag ,yyscan_t yyscanner );
+
+YY_EXTRA_TYPE skslget_extra (yyscan_t yyscanner );
+
+void skslset_extra (YY_EXTRA_TYPE user_defined ,yyscan_t yyscanner );
+
+FILE *skslget_in (yyscan_t yyscanner );
+
+void skslset_in (FILE * in_str ,yyscan_t yyscanner );
+
+FILE *skslget_out (yyscan_t yyscanner );
+
+void skslset_out (FILE * out_str ,yyscan_t yyscanner );
+
+yy_size_t skslget_leng (yyscan_t yyscanner );
+
+char *skslget_text (yyscan_t yyscanner );
+
+int skslget_lineno (yyscan_t yyscanner );
+
+void skslset_lineno (int line_number ,yyscan_t yyscanner );
+
+int skslget_column (yyscan_t yyscanner );
+
+void skslset_column (int column_no ,yyscan_t yyscanner );
+
+/* Macros after this point can all be overridden by user definitions in
+ * section 1.
+ */
+
+#ifndef YY_SKIP_YYWRAP
+#ifdef __cplusplus
+extern "C" int skslwrap (yyscan_t yyscanner );
+#else
+extern int skslwrap (yyscan_t yyscanner );
+#endif
+#endif
+
+ static void yyunput (int c,char *buf_ptr ,yyscan_t yyscanner);
+
+#ifndef yytext_ptr
+static void yy_flex_strncpy (char *,yyconst char *,int ,yyscan_t yyscanner);
+#endif
+
+#ifdef YY_NEED_STRLEN
+static int yy_flex_strlen (yyconst char * ,yyscan_t yyscanner);
+#endif
+
+#ifndef YY_NO_INPUT
+
+#ifdef __cplusplus
+static int yyinput (yyscan_t yyscanner );
+#else
+static int input (yyscan_t yyscanner );
+#endif
+
+#endif
+
+/* Amount of stuff to slurp up with each read. */
+#ifndef YY_READ_BUF_SIZE
+#define YY_READ_BUF_SIZE 8192
+#endif
+
+/* Copy whatever the last rule matched to the standard output. */
+#ifndef ECHO
+/* This used to be an fputs(), but since the string might contain NUL's,
+ * we now use fwrite().
+ */
+#define ECHO do { if (fwrite( yytext, yyleng, 1, yyout )) {} } while (0)
+#endif
+
+/* Gets input and stuffs it into "buf". number of characters read, or YY_NULL,
+ * is returned in "result".
+ */
+#ifndef YY_INPUT
+#define YY_INPUT(buf,result,max_size) \
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \
+ { \
+ int c = '*'; \
+ size_t n; \
+ for ( n = 0; n < max_size && \
+ (c = getc( yyin )) != EOF && c != '\n'; ++n ) \
+ buf[n] = (char) c; \
+ if ( c == '\n' ) \
+ buf[n++] = (char) c; \
+ if ( c == EOF && ferror( yyin ) ) \
+ YY_FATAL_ERROR( "input in flex scanner failed" ); \
+ result = n; \
+ } \
+ else \
+ { \
+ errno=0; \
+ while ( (result = fread(buf, 1, max_size, yyin))==0 && ferror(yyin)) \
+ { \
+ if( errno != EINTR) \
+ { \
+ YY_FATAL_ERROR( "input in flex scanner failed" ); \
+ break; \
+ } \
+ errno=0; \
+ clearerr(yyin); \
+ } \
+ }\
+\
+
+#endif
+
+/* No semi-colon after return; correct usage is to write "yyterminate();" -
+ * we don't want an extra ';' after the "return" because that will cause
+ * some compilers to complain about unreachable statements.
+ */
+#ifndef yyterminate
+#define yyterminate() return YY_NULL
+#endif
+
+/* Number of entries by which start-condition stack grows. */
+#ifndef YY_START_STACK_INCR
+#define YY_START_STACK_INCR 25
+#endif
+
+/* Report a fatal error. */
+#ifndef YY_FATAL_ERROR
+#define YY_FATAL_ERROR(msg) yy_fatal_error( msg , yyscanner)
+#endif
+
+/* end tables serialization structures and prototypes */
+
+/* Default declaration of generated scanner - a define so the user can
+ * easily add parameters.
+ */
+#ifndef YY_DECL
+#define YY_DECL_IS_OURS 1
+
+extern int sksllex (yyscan_t yyscanner);
+
+#define YY_DECL int sksllex (yyscan_t yyscanner)
+#endif /* !YY_DECL */
+
+/* Code executed at the beginning of each rule, after yytext and yyleng
+ * have been set up.
+ */
+#ifndef YY_USER_ACTION
+#define YY_USER_ACTION
+#endif
+
+/* Code executed at the end of each rule. */
+#ifndef YY_BREAK
+#define YY_BREAK break;
+#endif
+
+#define YY_RULE_SETUP \
+ YY_USER_ACTION
+
+/** The main scanner function which does all the work.
+ */
+YY_DECL
+{
+ register yy_state_type yy_current_state;
+ register char *yy_cp, *yy_bp;
+ register int yy_act;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+#line 23 "sksl.flex"
+
+
+#line 826 "lex.sksl.c"
+
+ if ( !yyg->yy_init )
+ {
+ yyg->yy_init = 1;
+
+#ifdef YY_USER_INIT
+ YY_USER_INIT;
+#endif
+
+ if ( ! yyg->yy_start )
+ yyg->yy_start = 1; /* first start state */
+
+ if ( ! yyin )
+ yyin = stdin;
+
+ if ( ! yyout )
+ yyout = stdout;
+
+ if ( ! YY_CURRENT_BUFFER ) {
+ skslensure_buffer_stack (yyscanner);
+ YY_CURRENT_BUFFER_LVALUE =
+ sksl_create_buffer(yyin,YY_BUF_SIZE ,yyscanner);
+ }
+
+ sksl_load_buffer_state(yyscanner );
+ }
+
+ while ( 1 ) /* loops until end-of-file is reached */
+ {
+ yy_cp = yyg->yy_c_buf_p;
+
+ /* Support of yytext. */
+ *yy_cp = yyg->yy_hold_char;
+
+ /* yy_bp points to the position in yy_ch_buf of the start of
+ * the current run.
+ */
+ yy_bp = yy_cp;
+
+ yy_current_state = yyg->yy_start;
+yy_match:
+ do
+ {
+ register YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)];
+ if ( yy_accept[yy_current_state] )
+ {
+ yyg->yy_last_accepting_state = yy_current_state;
+ yyg->yy_last_accepting_cpos = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 201 )
+ yy_c = yy_meta[(unsigned int) yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
+ ++yy_cp;
+ }
+ while ( yy_current_state != 200 );
+ yy_cp = yyg->yy_last_accepting_cpos;
+ yy_current_state = yyg->yy_last_accepting_state;
+
+yy_find_action:
+ yy_act = yy_accept[yy_current_state];
+
+ YY_DO_BEFORE_ACTION;
+
+ if ( yy_act != YY_END_OF_BUFFER && yy_rule_can_match_eol[yy_act] )
+ {
+ int yyl;
+ for ( yyl = 0; yyl < yyleng; ++yyl )
+ if ( yytext[yyl] == '\n' )
+
+ do{ yylineno++;
+ yycolumn=0;
+ }while(0)
+;
+ }
+
+do_action: /* This label is used only to access EOF actions. */
+
+ switch ( yy_act )
+ { /* beginning of action switch */
+ case 0: /* must back up */
+ /* undo the effects of YY_DO_BEFORE_ACTION */
+ *yy_cp = yyg->yy_hold_char;
+ yy_cp = yyg->yy_last_accepting_cpos;
+ yy_current_state = yyg->yy_last_accepting_state;
+ goto yy_find_action;
+
+case 1:
+YY_RULE_SETUP
+#line 25 "sksl.flex"
+{ return SkSL::Token::FLOAT_LITERAL; }
+ YY_BREAK
+case 2:
+YY_RULE_SETUP
+#line 27 "sksl.flex"
+{ return SkSL::Token::FLOAT_LITERAL; }
+ YY_BREAK
+case 3:
+YY_RULE_SETUP
+#line 29 "sksl.flex"
+{ return SkSL::Token::FLOAT_LITERAL; }
+ YY_BREAK
+case 4:
+YY_RULE_SETUP
+#line 31 "sksl.flex"
+{ return SkSL::Token::INT_LITERAL; }
+ YY_BREAK
+case 5:
+YY_RULE_SETUP
+#line 33 "sksl.flex"
+{ return SkSL::Token::TRUE_LITERAL; }
+ YY_BREAK
+case 6:
+YY_RULE_SETUP
+#line 35 "sksl.flex"
+{ return SkSL::Token::FALSE_LITERAL; }
+ YY_BREAK
+case 7:
+YY_RULE_SETUP
+#line 37 "sksl.flex"
+{ return SkSL::Token::IF; }
+ YY_BREAK
+case 8:
+YY_RULE_SETUP
+#line 39 "sksl.flex"
+{ return SkSL::Token::ELSE; }
+ YY_BREAK
+case 9:
+YY_RULE_SETUP
+#line 41 "sksl.flex"
+{ return SkSL::Token::FOR; }
+ YY_BREAK
+case 10:
+YY_RULE_SETUP
+#line 43 "sksl.flex"
+{ return SkSL::Token::WHILE; }
+ YY_BREAK
+case 11:
+YY_RULE_SETUP
+#line 45 "sksl.flex"
+{ return SkSL::Token::DO; }
+ YY_BREAK
+case 12:
+YY_RULE_SETUP
+#line 47 "sksl.flex"
+{ return SkSL::Token::BREAK; }
+ YY_BREAK
+case 13:
+YY_RULE_SETUP
+#line 49 "sksl.flex"
+{ return SkSL::Token::CONTINUE; }
+ YY_BREAK
+case 14:
+YY_RULE_SETUP
+#line 51 "sksl.flex"
+{ return SkSL::Token::DISCARD; }
+ YY_BREAK
+case 15:
+YY_RULE_SETUP
+#line 53 "sksl.flex"
+{ return SkSL::Token::RETURN; }
+ YY_BREAK
+case 16:
+YY_RULE_SETUP
+#line 55 "sksl.flex"
+{ return SkSL::Token::IN; }
+ YY_BREAK
+case 17:
+YY_RULE_SETUP
+#line 57 "sksl.flex"
+{ return SkSL::Token::OUT; }
+ YY_BREAK
+case 18:
+YY_RULE_SETUP
+#line 59 "sksl.flex"
+{ return SkSL::Token::INOUT; }
+ YY_BREAK
+case 19:
+YY_RULE_SETUP
+#line 61 "sksl.flex"
+{ return SkSL::Token::UNIFORM; }
+ YY_BREAK
+case 20:
+YY_RULE_SETUP
+#line 63 "sksl.flex"
+{ return SkSL::Token::CONST; }
+ YY_BREAK
+case 21:
+YY_RULE_SETUP
+#line 65 "sksl.flex"
+{ return SkSL::Token::LOWP; }
+ YY_BREAK
+case 22:
+YY_RULE_SETUP
+#line 67 "sksl.flex"
+{ return SkSL::Token::MEDIUMP; }
+ YY_BREAK
+case 23:
+YY_RULE_SETUP
+#line 69 "sksl.flex"
+{ return SkSL::Token::HIGHP; }
+ YY_BREAK
+case 24:
+YY_RULE_SETUP
+#line 71 "sksl.flex"
+{ return SkSL::Token::FLAT; }
+ YY_BREAK
+case 25:
+YY_RULE_SETUP
+#line 73 "sksl.flex"
+{ return SkSL::Token::NOPERSPECTIVE; }
+ YY_BREAK
+case 26:
+YY_RULE_SETUP
+#line 75 "sksl.flex"
+{ return SkSL::Token::STRUCT; }
+ YY_BREAK
+case 27:
+YY_RULE_SETUP
+#line 77 "sksl.flex"
+{ return SkSL::Token::LAYOUT; }
+ YY_BREAK
+case 28:
+YY_RULE_SETUP
+#line 79 "sksl.flex"
+{ return SkSL::Token::PRECISION; }
+ YY_BREAK
+case 29:
+YY_RULE_SETUP
+#line 81 "sksl.flex"
+{ return SkSL::Token::IDENTIFIER; }
+ YY_BREAK
+case 30:
+YY_RULE_SETUP
+#line 83 "sksl.flex"
+{ return SkSL::Token::DIRECTIVE; }
+ YY_BREAK
+case 31:
+YY_RULE_SETUP
+#line 85 "sksl.flex"
+{ return SkSL::Token::LPAREN; }
+ YY_BREAK
+case 32:
+YY_RULE_SETUP
+#line 87 "sksl.flex"
+{ return SkSL::Token::RPAREN; }
+ YY_BREAK
+case 33:
+YY_RULE_SETUP
+#line 89 "sksl.flex"
+{ return SkSL::Token::LBRACE; }
+ YY_BREAK
+case 34:
+YY_RULE_SETUP
+#line 91 "sksl.flex"
+{ return SkSL::Token::RBRACE; }
+ YY_BREAK
+case 35:
+YY_RULE_SETUP
+#line 93 "sksl.flex"
+{ return SkSL::Token::LBRACKET; }
+ YY_BREAK
+case 36:
+YY_RULE_SETUP
+#line 95 "sksl.flex"
+{ return SkSL::Token::RBRACKET; }
+ YY_BREAK
+case 37:
+YY_RULE_SETUP
+#line 97 "sksl.flex"
+{ return SkSL::Token::DOT; }
+ YY_BREAK
+case 38:
+YY_RULE_SETUP
+#line 99 "sksl.flex"
+{ return SkSL::Token::COMMA; }
+ YY_BREAK
+case 39:
+YY_RULE_SETUP
+#line 101 "sksl.flex"
+{ return SkSL::Token::PLUSPLUS; }
+ YY_BREAK
+case 40:
+YY_RULE_SETUP
+#line 103 "sksl.flex"
+{ return SkSL::Token::MINUSMINUS; }
+ YY_BREAK
+case 41:
+YY_RULE_SETUP
+#line 105 "sksl.flex"
+{ return SkSL::Token::PLUS; }
+ YY_BREAK
+case 42:
+YY_RULE_SETUP
+#line 107 "sksl.flex"
+{ return SkSL::Token::MINUS; }
+ YY_BREAK
+case 43:
+YY_RULE_SETUP
+#line 109 "sksl.flex"
+{ return SkSL::Token::STAR; }
+ YY_BREAK
+case 44:
+YY_RULE_SETUP
+#line 111 "sksl.flex"
+{ return SkSL::Token::SLASH; }
+ YY_BREAK
+case 45:
+YY_RULE_SETUP
+#line 113 "sksl.flex"
+{ return SkSL::Token::PERCENT; }
+ YY_BREAK
+case 46:
+YY_RULE_SETUP
+#line 115 "sksl.flex"
+{ return SkSL::Token::SHL; }
+ YY_BREAK
+case 47:
+YY_RULE_SETUP
+#line 117 "sksl.flex"
+{ return SkSL::Token::SHR; }
+ YY_BREAK
+case 48:
+YY_RULE_SETUP
+#line 119 "sksl.flex"
+{ return SkSL::Token::BITWISEOR; }
+ YY_BREAK
+case 49:
+YY_RULE_SETUP
+#line 121 "sksl.flex"
+{ return SkSL::Token::BITWISEXOR; }
+ YY_BREAK
+case 50:
+YY_RULE_SETUP
+#line 123 "sksl.flex"
+{ return SkSL::Token::BITWISEAND; }
+ YY_BREAK
+case 51:
+YY_RULE_SETUP
+#line 125 "sksl.flex"
+{ return SkSL::Token::LOGICALOR; }
+ YY_BREAK
+case 52:
+YY_RULE_SETUP
+#line 127 "sksl.flex"
+{ return SkSL::Token::LOGICALXOR; }
+ YY_BREAK
+case 53:
+YY_RULE_SETUP
+#line 129 "sksl.flex"
+{ return SkSL::Token::LOGICALAND; }
+ YY_BREAK
+case 54:
+YY_RULE_SETUP
+#line 131 "sksl.flex"
+{ return SkSL::Token::NOT; }
+ YY_BREAK
+case 55:
+YY_RULE_SETUP
+#line 133 "sksl.flex"
+{ return SkSL::Token::QUESTION; }
+ YY_BREAK
+case 56:
+YY_RULE_SETUP
+#line 135 "sksl.flex"
+{ return SkSL::Token::COLON; }
+ YY_BREAK
+case 57:
+YY_RULE_SETUP
+#line 137 "sksl.flex"
+{ return SkSL::Token::EQ; }
+ YY_BREAK
+case 58:
+YY_RULE_SETUP
+#line 139 "sksl.flex"
+{ return SkSL::Token::EQEQ; }
+ YY_BREAK
+case 59:
+YY_RULE_SETUP
+#line 141 "sksl.flex"
+{ return SkSL::Token::NEQ; }
+ YY_BREAK
+case 60:
+YY_RULE_SETUP
+#line 143 "sksl.flex"
+{ return SkSL::Token::GT; }
+ YY_BREAK
+case 61:
+YY_RULE_SETUP
+#line 145 "sksl.flex"
+{ return SkSL::Token::LT; }
+ YY_BREAK
+case 62:
+YY_RULE_SETUP
+#line 147 "sksl.flex"
+{ return SkSL::Token::GTEQ; }
+ YY_BREAK
+case 63:
+YY_RULE_SETUP
+#line 149 "sksl.flex"
+{ return SkSL::Token::LTEQ; }
+ YY_BREAK
+case 64:
+YY_RULE_SETUP
+#line 151 "sksl.flex"
+{ return SkSL::Token::PLUSEQ; }
+ YY_BREAK
+case 65:
+YY_RULE_SETUP
+#line 153 "sksl.flex"
+{ return SkSL::Token::MINUSEQ; }
+ YY_BREAK
+case 66:
+YY_RULE_SETUP
+#line 155 "sksl.flex"
+{ return SkSL::Token::STAREQ; }
+ YY_BREAK
+case 67:
+YY_RULE_SETUP
+#line 157 "sksl.flex"
+{ return SkSL::Token::SLASHEQ; }
+ YY_BREAK
+case 68:
+YY_RULE_SETUP
+#line 159 "sksl.flex"
+{ return SkSL::Token::PERCENTEQ; }
+ YY_BREAK
+case 69:
+YY_RULE_SETUP
+#line 161 "sksl.flex"
+{ return SkSL::Token::SHLEQ; }
+ YY_BREAK
+case 70:
+YY_RULE_SETUP
+#line 163 "sksl.flex"
+{ return SkSL::Token::SHREQ; }
+ YY_BREAK
+case 71:
+YY_RULE_SETUP
+#line 165 "sksl.flex"
+{ return SkSL::Token::BITWISEOREQ; }
+ YY_BREAK
+case 72:
+YY_RULE_SETUP
+#line 167 "sksl.flex"
+{ return SkSL::Token::BITWISEXOREQ; }
+ YY_BREAK
+case 73:
+YY_RULE_SETUP
+#line 169 "sksl.flex"
+{ return SkSL::Token::BITWISEANDEQ; }
+ YY_BREAK
+case 74:
+YY_RULE_SETUP
+#line 171 "sksl.flex"
+{ return SkSL::Token::LOGICALOREQ; }
+ YY_BREAK
+case 75:
+YY_RULE_SETUP
+#line 173 "sksl.flex"
+{ return SkSL::Token::LOGICALXOREQ; }
+ YY_BREAK
+case 76:
+YY_RULE_SETUP
+#line 175 "sksl.flex"
+{ return SkSL::Token::LOGICALANDEQ; }
+ YY_BREAK
+case 77:
+YY_RULE_SETUP
+#line 177 "sksl.flex"
+{ return SkSL::Token::SEMICOLON; }
+ YY_BREAK
+case 78:
+YY_RULE_SETUP
+#line 179 "sksl.flex"
+/* line comment */
+ YY_BREAK
+case 79:
+/* rule 79 can match eol */
+YY_RULE_SETUP
+#line 181 "sksl.flex"
+/* block comment */
+ YY_BREAK
+case 80:
+/* rule 80 can match eol */
+YY_RULE_SETUP
+#line 183 "sksl.flex"
+/* whitespace */
+ YY_BREAK
+case 81:
+YY_RULE_SETUP
+#line 185 "sksl.flex"
+{ return SkSL::Token::INVALID_TOKEN; }
+ YY_BREAK
+case 82:
+YY_RULE_SETUP
+#line 187 "sksl.flex"
+ECHO;
+ YY_BREAK
+#line 1329 "lex.sksl.c"
+case YY_STATE_EOF(INITIAL):
+ yyterminate();
+
+ case YY_END_OF_BUFFER:
+ {
+ /* Amount of text matched not including the EOB char. */
+ int yy_amount_of_matched_text = (int) (yy_cp - yyg->yytext_ptr) - 1;
+
+ /* Undo the effects of YY_DO_BEFORE_ACTION. */
+ *yy_cp = yyg->yy_hold_char;
+ YY_RESTORE_YY_MORE_OFFSET
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW )
+ {
+ /* We're scanning a new file or input source. It's
+ * possible that this happened because the user
+ * just pointed yyin at a new source and called
+ * sksllex(). If so, then we have to assure
+ * consistency between YY_CURRENT_BUFFER and our
+ * globals. Here is the right place to do so, because
+ * this is the first action (other than possibly a
+ * back-up) that will match for the new input source.
+ */
+ yyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
+ YY_CURRENT_BUFFER_LVALUE->yy_input_file = yyin;
+ YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL;
+ }
+
+ /* Note that here we test for yy_c_buf_p "<=" to the position
+ * of the first EOB in the buffer, since yy_c_buf_p will
+ * already have been incremented past the NUL character
+ * (since all states make transitions on EOB to the
+ * end-of-buffer state). Contrast this with the test
+ * in input().
+ */
+ if ( yyg->yy_c_buf_p <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars] )
+ { /* This was really a NUL. */
+ yy_state_type yy_next_state;
+
+ yyg->yy_c_buf_p = yyg->yytext_ptr + yy_amount_of_matched_text;
+
+ yy_current_state = yy_get_previous_state( yyscanner );
+
+ /* Okay, we're now positioned to make the NUL
+ * transition. We couldn't have
+ * yy_get_previous_state() go ahead and do it
+ * for us because it doesn't know how to deal
+ * with the possibility of jamming (and we don't
+ * want to build jamming into it because then it
+ * will run more slowly).
+ */
+
+ yy_next_state = yy_try_NUL_trans( yy_current_state , yyscanner);
+
+ yy_bp = yyg->yytext_ptr + YY_MORE_ADJ;
+
+ if ( yy_next_state )
+ {
+ /* Consume the NUL. */
+ yy_cp = ++yyg->yy_c_buf_p;
+ yy_current_state = yy_next_state;
+ goto yy_match;
+ }
+
+ else
+ {
+ yy_cp = yyg->yy_last_accepting_cpos;
+ yy_current_state = yyg->yy_last_accepting_state;
+ goto yy_find_action;
+ }
+ }
+
+ else switch ( yy_get_next_buffer( yyscanner ) )
+ {
+ case EOB_ACT_END_OF_FILE:
+ {
+ yyg->yy_did_buffer_switch_on_eof = 0;
+
+ if ( skslwrap(yyscanner ) )
+ {
+ /* Note: because we've taken care in
+ * yy_get_next_buffer() to have set up
+ * yytext, we can now set up
+ * yy_c_buf_p so that if some total
+ * hoser (like flex itself) wants to
+ * call the scanner after we return the
+ * YY_NULL, it'll still work - another
+ * YY_NULL will get returned.
+ */
+ yyg->yy_c_buf_p = yyg->yytext_ptr + YY_MORE_ADJ;
+
+ yy_act = YY_STATE_EOF(YY_START);
+ goto do_action;
+ }
+
+ else
+ {
+ if ( ! yyg->yy_did_buffer_switch_on_eof )
+ YY_NEW_FILE;
+ }
+ break;
+ }
+
+ case EOB_ACT_CONTINUE_SCAN:
+ yyg->yy_c_buf_p =
+ yyg->yytext_ptr + yy_amount_of_matched_text;
+
+ yy_current_state = yy_get_previous_state( yyscanner );
+
+ yy_cp = yyg->yy_c_buf_p;
+ yy_bp = yyg->yytext_ptr + YY_MORE_ADJ;
+ goto yy_match;
+
+ case EOB_ACT_LAST_MATCH:
+ yyg->yy_c_buf_p =
+ &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars];
+
+ yy_current_state = yy_get_previous_state( yyscanner );
+
+ yy_cp = yyg->yy_c_buf_p;
+ yy_bp = yyg->yytext_ptr + YY_MORE_ADJ;
+ goto yy_find_action;
+ }
+ break;
+ }
+
+ default:
+ YY_FATAL_ERROR(
+ "fatal flex scanner internal error--no action found" );
+ } /* end of action switch */
+ } /* end of scanning one token */
+} /* end of sksllex */
+
+/* yy_get_next_buffer - try to read in a new buffer
+ *
+ * Returns a code representing an action:
+ * EOB_ACT_LAST_MATCH -
+ * EOB_ACT_CONTINUE_SCAN - continue scanning from current position
+ * EOB_ACT_END_OF_FILE - end of file
+ */
+static int yy_get_next_buffer (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ register char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf;
+ register char *source = yyg->yytext_ptr;
+ register int number_to_move, i;
+ int ret_val;
+
+ if ( yyg->yy_c_buf_p > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars + 1] )
+ YY_FATAL_ERROR(
+ "fatal flex scanner internal error--end of buffer missed" );
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 )
+ { /* Don't try to fill the buffer, so this is an EOF. */
+ if ( yyg->yy_c_buf_p - yyg->yytext_ptr - YY_MORE_ADJ == 1 )
+ {
+ /* We matched a single character, the EOB, so
+ * treat this as a final EOF.
+ */
+ return EOB_ACT_END_OF_FILE;
+ }
+
+ else
+ {
+ /* We matched some text prior to the EOB, first
+ * process it.
+ */
+ return EOB_ACT_LAST_MATCH;
+ }
+ }
+
+ /* Try to read more data. */
+
+ /* First move last chars to start of buffer. */
+ number_to_move = (int) (yyg->yy_c_buf_p - yyg->yytext_ptr) - 1;
+
+ for ( i = 0; i < number_to_move; ++i )
+ *(dest++) = *(source++);
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING )
+ /* don't do the read, it's not guaranteed to return an EOF,
+ * just force an EOF
+ */
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars = 0;
+
+ else
+ {
+ yy_size_t num_to_read =
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1;
+
+ while ( num_to_read <= 0 )
+ { /* Not enough room in the buffer - grow it. */
+
+ /* just a shorter name for the current buffer */
+ YY_BUFFER_STATE b = YY_CURRENT_BUFFER_LVALUE;
+
+ int yy_c_buf_p_offset =
+ (int) (yyg->yy_c_buf_p - b->yy_ch_buf);
+
+ if ( b->yy_is_our_buffer )
+ {
+ yy_size_t new_size = b->yy_buf_size * 2;
+
+ if ( new_size <= 0 )
+ b->yy_buf_size += b->yy_buf_size / 8;
+ else
+ b->yy_buf_size *= 2;
+
+ b->yy_ch_buf = (char *)
+ /* Include room in for 2 EOB chars. */
+ skslrealloc((void *) b->yy_ch_buf,b->yy_buf_size + 2 ,yyscanner );
+ }
+ else
+ /* Can't grow it, we don't own it. */
+ b->yy_ch_buf = 0;
+
+ if ( ! b->yy_ch_buf )
+ YY_FATAL_ERROR(
+ "fatal error - scanner input buffer overflow" );
+
+ yyg->yy_c_buf_p = &b->yy_ch_buf[yy_c_buf_p_offset];
+
+ num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size -
+ number_to_move - 1;
+
+ }
+
+ if ( num_to_read > YY_READ_BUF_SIZE )
+ num_to_read = YY_READ_BUF_SIZE;
+
+ /* Read in more data. */
+ YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]),
+ yyg->yy_n_chars, num_to_read );
+
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars;
+ }
+
+ if ( yyg->yy_n_chars == 0 )
+ {
+ if ( number_to_move == YY_MORE_ADJ )
+ {
+ ret_val = EOB_ACT_END_OF_FILE;
+ skslrestart(yyin ,yyscanner);
+ }
+
+ else
+ {
+ ret_val = EOB_ACT_LAST_MATCH;
+ YY_CURRENT_BUFFER_LVALUE->yy_buffer_status =
+ YY_BUFFER_EOF_PENDING;
+ }
+ }
+
+ else
+ ret_val = EOB_ACT_CONTINUE_SCAN;
+
+ if ((yy_size_t) (yyg->yy_n_chars + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) {
+ /* Extend the array by 50%, plus the number we really need. */
+ yy_size_t new_size = yyg->yy_n_chars + number_to_move + (yyg->yy_n_chars >> 1);
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) skslrealloc((void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf,new_size ,yyscanner );
+ if ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_get_next_buffer()" );
+ }
+
+ yyg->yy_n_chars += number_to_move;
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars] = YY_END_OF_BUFFER_CHAR;
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars + 1] = YY_END_OF_BUFFER_CHAR;
+
+ yyg->yytext_ptr = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0];
+
+ return ret_val;
+}
+
+/* yy_get_previous_state - get the state just before the EOB char was reached */
+
+ static yy_state_type yy_get_previous_state (yyscan_t yyscanner)
+{
+ register yy_state_type yy_current_state;
+ register char *yy_cp;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ yy_current_state = yyg->yy_start;
+
+ for ( yy_cp = yyg->yytext_ptr + YY_MORE_ADJ; yy_cp < yyg->yy_c_buf_p; ++yy_cp )
+ {
+ register YY_CHAR yy_c = (*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1);
+ if ( yy_accept[yy_current_state] )
+ {
+ yyg->yy_last_accepting_state = yy_current_state;
+ yyg->yy_last_accepting_cpos = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 201 )
+ yy_c = yy_meta[(unsigned int) yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
+ }
+
+ return yy_current_state;
+}
+
+/* yy_try_NUL_trans - try to make a transition on the NUL character
+ *
+ * synopsis
+ * next_state = yy_try_NUL_trans( current_state );
+ */
+ static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state , yyscan_t yyscanner)
+{
+ register int yy_is_jam;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; /* This var may be unused depending upon options. */
+ register char *yy_cp = yyg->yy_c_buf_p;
+
+ register YY_CHAR yy_c = 1;
+ if ( yy_accept[yy_current_state] )
+ {
+ yyg->yy_last_accepting_state = yy_current_state;
+ yyg->yy_last_accepting_cpos = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 201 )
+ yy_c = yy_meta[(unsigned int) yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
+ yy_is_jam = (yy_current_state == 200);
+
+ (void)yyg;
+ return yy_is_jam ? 0 : yy_current_state;
+}
+
+ static void yyunput (int c, register char * yy_bp , yyscan_t yyscanner)
+{
+ register char *yy_cp;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ yy_cp = yyg->yy_c_buf_p;
+
+ /* undo effects of setting up yytext */
+ *yy_cp = yyg->yy_hold_char;
+
+ if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )
+ { /* need to shift things up to make room */
+ /* +2 for EOB chars. */
+ register yy_size_t number_to_move = yyg->yy_n_chars + 2;
+ register char *dest = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_size + 2];
+ register char *source =
+ &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move];
+
+ while ( source > YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
+ *--dest = *--source;
+
+ yy_cp += (int) (dest - source);
+ yy_bp += (int) (dest - source);
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars =
+ yyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_buf_size;
+
+ if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )
+ YY_FATAL_ERROR( "flex scanner push-back overflow" );
+ }
+
+ *--yy_cp = (char) c;
+
+ if ( c == '\n' ){
+ --yylineno;
+ }
+
+ yyg->yytext_ptr = yy_bp;
+ yyg->yy_hold_char = *yy_cp;
+ yyg->yy_c_buf_p = yy_cp;
+}
+
+#ifndef YY_NO_INPUT
+#ifdef __cplusplus
+ static int yyinput (yyscan_t yyscanner)
+#else
+ static int input (yyscan_t yyscanner)
+#endif
+
+{
+ int c;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ *yyg->yy_c_buf_p = yyg->yy_hold_char;
+
+ if ( *yyg->yy_c_buf_p == YY_END_OF_BUFFER_CHAR )
+ {
+ /* yy_c_buf_p now points to the character we want to return.
+ * If this occurs *before* the EOB characters, then it's a
+ * valid NUL; if not, then we've hit the end of the buffer.
+ */
+ if ( yyg->yy_c_buf_p < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars] )
+ /* This was really a NUL. */
+ *yyg->yy_c_buf_p = '\0';
+
+ else
+ { /* need more input */
+ yy_size_t offset = yyg->yy_c_buf_p - yyg->yytext_ptr;
+ ++yyg->yy_c_buf_p;
+
+ switch ( yy_get_next_buffer( yyscanner ) )
+ {
+ case EOB_ACT_LAST_MATCH:
+ /* This happens because yy_g_n_b()
+ * sees that we've accumulated a
+ * token and flags that we need to
+ * try matching the token before
+ * proceeding. But for input(),
+ * there's no matching to consider.
+ * So convert the EOB_ACT_LAST_MATCH
+ * to EOB_ACT_END_OF_FILE.
+ */
+
+ /* Reset buffer status. */
+ skslrestart(yyin ,yyscanner);
+
+ /*FALLTHROUGH*/
+
+ case EOB_ACT_END_OF_FILE:
+ {
+ if ( skslwrap(yyscanner ) )
+ return EOF;
+
+ if ( ! yyg->yy_did_buffer_switch_on_eof )
+ YY_NEW_FILE;
+#ifdef __cplusplus
+ return yyinput(yyscanner);
+#else
+ return input(yyscanner);
+#endif
+ }
+
+ case EOB_ACT_CONTINUE_SCAN:
+ yyg->yy_c_buf_p = yyg->yytext_ptr + offset;
+ break;
+ }
+ }
+ }
+
+ c = *(unsigned char *) yyg->yy_c_buf_p; /* cast for 8-bit char's */
+ *yyg->yy_c_buf_p = '\0'; /* preserve yytext */
+ yyg->yy_hold_char = *++yyg->yy_c_buf_p;
+
+ if ( c == '\n' )
+
+ do{ yylineno++;
+ yycolumn=0;
+ }while(0)
+;
+
+ return c;
+}
+#endif /* ifndef YY_NO_INPUT */
+
+/** Immediately switch to a different input stream.
+ * @param input_file A readable stream.
+ * @param yyscanner The scanner object.
+ * @note This function does not reset the start condition to @c INITIAL .
+ */
+ void skslrestart (FILE * input_file , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ if ( ! YY_CURRENT_BUFFER ){
+ skslensure_buffer_stack (yyscanner);
+ YY_CURRENT_BUFFER_LVALUE =
+ sksl_create_buffer(yyin,YY_BUF_SIZE ,yyscanner);
+ }
+
+ sksl_init_buffer(YY_CURRENT_BUFFER,input_file ,yyscanner);
+ sksl_load_buffer_state(yyscanner );
+}
+
+/** Switch to a different input buffer.
+ * @param new_buffer The new input buffer.
+ * @param yyscanner The scanner object.
+ */
+ void sksl_switch_to_buffer (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ /* TODO. We should be able to replace this entire function body
+ * with
+ * skslpop_buffer_state();
+ * skslpush_buffer_state(new_buffer);
+ */
+ skslensure_buffer_stack (yyscanner);
+ if ( YY_CURRENT_BUFFER == new_buffer )
+ return;
+
+ if ( YY_CURRENT_BUFFER )
+ {
+ /* Flush out information for old buffer. */
+ *yyg->yy_c_buf_p = yyg->yy_hold_char;
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = yyg->yy_c_buf_p;
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars;
+ }
+
+ YY_CURRENT_BUFFER_LVALUE = new_buffer;
+ sksl_load_buffer_state(yyscanner );
+
+ /* We don't actually know whether we did this switch during
+ * EOF (skslwrap()) processing, but the only time this flag
+ * is looked at is after skslwrap() is called, so it's safe
+ * to go ahead and always set it.
+ */
+ yyg->yy_did_buffer_switch_on_eof = 1;
+}
+
+static void sksl_load_buffer_state (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ yyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
+ yyg->yytext_ptr = yyg->yy_c_buf_p = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos;
+ yyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file;
+ yyg->yy_hold_char = *yyg->yy_c_buf_p;
+}
+
+/** Allocate and initialize an input buffer state.
+ * @param file A readable stream.
+ * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE.
+ * @param yyscanner The scanner object.
+ * @return the allocated buffer state.
+ */
+ YY_BUFFER_STATE sksl_create_buffer (FILE * file, int size , yyscan_t yyscanner)
+{
+ YY_BUFFER_STATE b;
+
+ b = (YY_BUFFER_STATE) skslalloc(sizeof( struct yy_buffer_state ) ,yyscanner );
+ if ( ! b )
+ YY_FATAL_ERROR( "out of dynamic memory in sksl_create_buffer()" );
+
+ b->yy_buf_size = size;
+
+ /* yy_ch_buf has to be 2 characters longer than the size given because
+ * we need to put in 2 end-of-buffer characters.
+ */
+ b->yy_ch_buf = (char *) skslalloc(b->yy_buf_size + 2 ,yyscanner );
+ if ( ! b->yy_ch_buf )
+ YY_FATAL_ERROR( "out of dynamic memory in sksl_create_buffer()" );
+
+ b->yy_is_our_buffer = 1;
+
+ sksl_init_buffer(b,file ,yyscanner);
+
+ return b;
+}
+
+/** Destroy the buffer.
+ * @param b a buffer created with sksl_create_buffer()
+ * @param yyscanner The scanner object.
+ */
+ void sksl_delete_buffer (YY_BUFFER_STATE b , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ if ( ! b )
+ return;
+
+ if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */
+ YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0;
+
+ if ( b->yy_is_our_buffer )
+ skslfree((void *) b->yy_ch_buf ,yyscanner );
+
+ skslfree((void *) b ,yyscanner );
+}
+
+/* Initializes or reinitializes a buffer.
+ * This function is sometimes called more than once on the same buffer,
+ * such as during a skslrestart() or at EOF.
+ */
+ static void sksl_init_buffer (YY_BUFFER_STATE b, FILE * file , yyscan_t yyscanner)
+
+{
+ int oerrno = errno;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ sksl_flush_buffer(b ,yyscanner);
+
+ b->yy_input_file = file;
+ b->yy_fill_buffer = 1;
+
+ /* If b is the current buffer, then sksl_init_buffer was _probably_
+ * called from skslrestart() or through yy_get_next_buffer.
+ * In that case, we don't want to reset the lineno or column.
+ */
+ if (b != YY_CURRENT_BUFFER){
+ b->yy_bs_lineno = 1;
+ b->yy_bs_column = 0;
+ }
+
+ b->yy_is_interactive = 0;
+
+ errno = oerrno;
+}
+
+/** Discard all buffered characters. On the next scan, YY_INPUT will be called.
+ * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER.
+ * @param yyscanner The scanner object.
+ */
+ void sksl_flush_buffer (YY_BUFFER_STATE b , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ if ( ! b )
+ return;
+
+ b->yy_n_chars = 0;
+
+ /* We always need two end-of-buffer characters. The first causes
+ * a transition to the end-of-buffer state. The second causes
+ * a jam in that state.
+ */
+ b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR;
+ b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR;
+
+ b->yy_buf_pos = &b->yy_ch_buf[0];
+
+ b->yy_at_bol = 1;
+ b->yy_buffer_status = YY_BUFFER_NEW;
+
+ if ( b == YY_CURRENT_BUFFER )
+ sksl_load_buffer_state(yyscanner );
+}
+
+/** Pushes the new state onto the stack. The new state becomes
+ * the current state. This function will allocate the stack
+ * if necessary.
+ * @param new_buffer The new state.
+ * @param yyscanner The scanner object.
+ */
+void skslpush_buffer_state (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ if (new_buffer == NULL)
+ return;
+
+ skslensure_buffer_stack(yyscanner);
+
+ /* This block is copied from sksl_switch_to_buffer. */
+ if ( YY_CURRENT_BUFFER )
+ {
+ /* Flush out information for old buffer. */
+ *yyg->yy_c_buf_p = yyg->yy_hold_char;
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = yyg->yy_c_buf_p;
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars;
+ }
+
+ /* Only push if top exists. Otherwise, replace top. */
+ if (YY_CURRENT_BUFFER)
+ yyg->yy_buffer_stack_top++;
+ YY_CURRENT_BUFFER_LVALUE = new_buffer;
+
+ /* copied from sksl_switch_to_buffer. */
+ sksl_load_buffer_state(yyscanner );
+ yyg->yy_did_buffer_switch_on_eof = 1;
+}
+
+/** Removes and deletes the top of the stack, if present.
+ * The next element becomes the new top.
+ * @param yyscanner The scanner object.
+ */
+void skslpop_buffer_state (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ if (!YY_CURRENT_BUFFER)
+ return;
+
+ sksl_delete_buffer(YY_CURRENT_BUFFER ,yyscanner);
+ YY_CURRENT_BUFFER_LVALUE = NULL;
+ if (yyg->yy_buffer_stack_top > 0)
+ --yyg->yy_buffer_stack_top;
+
+ if (YY_CURRENT_BUFFER) {
+ sksl_load_buffer_state(yyscanner );
+ yyg->yy_did_buffer_switch_on_eof = 1;
+ }
+}
+
+/* Allocates the stack if it does not exist.
+ * Guarantees space for at least one push.
+ */
+static void skslensure_buffer_stack (yyscan_t yyscanner)
+{
+ yy_size_t num_to_alloc;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ if (!yyg->yy_buffer_stack) {
+
+ /* First allocation is just for 2 elements, since we don't know if this
+ * scanner will even need a stack. We use 2 instead of 1 to avoid an
+ * immediate realloc on the next call.
+ */
+ num_to_alloc = 1;
+ yyg->yy_buffer_stack = (struct yy_buffer_state**)skslalloc
+ (num_to_alloc * sizeof(struct yy_buffer_state*)
+ , yyscanner);
+ if ( ! yyg->yy_buffer_stack )
+ YY_FATAL_ERROR( "out of dynamic memory in skslensure_buffer_stack()" );
+
+ memset(yyg->yy_buffer_stack, 0, num_to_alloc * sizeof(struct yy_buffer_state*));
+
+ yyg->yy_buffer_stack_max = num_to_alloc;
+ yyg->yy_buffer_stack_top = 0;
+ return;
+ }
+
+ if (yyg->yy_buffer_stack_top >= (yyg->yy_buffer_stack_max) - 1){
+
+ /* Increase the buffer to prepare for a possible push. */
+ int grow_size = 8 /* arbitrary grow size */;
+
+ num_to_alloc = yyg->yy_buffer_stack_max + grow_size;
+ yyg->yy_buffer_stack = (struct yy_buffer_state**)skslrealloc
+ (yyg->yy_buffer_stack,
+ num_to_alloc * sizeof(struct yy_buffer_state*)
+ , yyscanner);
+ if ( ! yyg->yy_buffer_stack )
+ YY_FATAL_ERROR( "out of dynamic memory in skslensure_buffer_stack()" );
+
+ /* zero only the new slots.*/
+ memset(yyg->yy_buffer_stack + yyg->yy_buffer_stack_max, 0, grow_size * sizeof(struct yy_buffer_state*));
+ yyg->yy_buffer_stack_max = num_to_alloc;
+ }
+}
+
+/** Setup the input buffer state to scan directly from a user-specified character buffer.
+ * @param base the character buffer
+ * @param size the size in bytes of the character buffer
+ * @param yyscanner The scanner object.
+ * @return the newly allocated buffer state object.
+ */
+YY_BUFFER_STATE sksl_scan_buffer (char * base, yy_size_t size , yyscan_t yyscanner)
+{
+ YY_BUFFER_STATE b;
+
+ if ( size < 2 ||
+ base[size-2] != YY_END_OF_BUFFER_CHAR ||
+ base[size-1] != YY_END_OF_BUFFER_CHAR )
+ /* They forgot to leave room for the EOB's. */
+ return 0;
+
+ b = (YY_BUFFER_STATE) skslalloc(sizeof( struct yy_buffer_state ) ,yyscanner );
+ if ( ! b )
+ YY_FATAL_ERROR( "out of dynamic memory in sksl_scan_buffer()" );
+
+ b->yy_buf_size = size - 2; /* "- 2" to take care of EOB's */
+ b->yy_buf_pos = b->yy_ch_buf = base;
+ b->yy_is_our_buffer = 0;
+ b->yy_input_file = 0;
+ b->yy_n_chars = b->yy_buf_size;
+ b->yy_is_interactive = 0;
+ b->yy_at_bol = 1;
+ b->yy_fill_buffer = 0;
+ b->yy_buffer_status = YY_BUFFER_NEW;
+
+ sksl_switch_to_buffer(b ,yyscanner );
+
+ return b;
+}
+
+/** Setup the input buffer state to scan a string. The next call to sksllex() will
+ * scan from a @e copy of @a str.
+ * @param yystr a NUL-terminated string to scan
+ * @param yyscanner The scanner object.
+ * @return the newly allocated buffer state object.
+ * @note If you want to scan bytes that may contain NUL values, then use
+ * sksl_scan_bytes() instead.
+ */
+YY_BUFFER_STATE sksl_scan_string (yyconst char * yystr , yyscan_t yyscanner)
+{
+
+ return sksl_scan_bytes(yystr,strlen(yystr) ,yyscanner);
+}
+
+/** Setup the input buffer state to scan the given bytes. The next call to sksllex() will
+ * scan from a @e copy of @a bytes.
+ * @param yybytes the byte buffer to scan
+ * @param _yybytes_len the number of bytes in the buffer pointed to by @a bytes.
+ * @param yyscanner The scanner object.
+ * @return the newly allocated buffer state object.
+ */
+YY_BUFFER_STATE sksl_scan_bytes (yyconst char * yybytes, yy_size_t _yybytes_len , yyscan_t yyscanner)
+{
+ YY_BUFFER_STATE b;
+ char *buf;
+ yy_size_t n;
+ int i;
+
+ /* Get memory for full buffer, including space for trailing EOB's. */
+ n = _yybytes_len + 2;
+ buf = (char *) skslalloc(n ,yyscanner );
+ if ( ! buf )
+ YY_FATAL_ERROR( "out of dynamic memory in sksl_scan_bytes()" );
+
+ for ( i = 0; i < _yybytes_len; ++i )
+ buf[i] = yybytes[i];
+
+ buf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR;
+
+ b = sksl_scan_buffer(buf,n ,yyscanner);
+ if ( ! b )
+ YY_FATAL_ERROR( "bad buffer in sksl_scan_bytes()" );
+
+ /* It's okay to grow etc. this buffer, and we should throw it
+ * away when we're done.
+ */
+ b->yy_is_our_buffer = 1;
+
+ return b;
+}
+
+#ifndef YY_EXIT_FAILURE
+#define YY_EXIT_FAILURE 2
+#endif
+
+static void yy_fatal_error (yyconst char* msg , yyscan_t yyscanner)
+{
+ (void) fprintf( stderr, "%s\n", msg );
+ exit( YY_EXIT_FAILURE );
+}
+
+/* Redefine yyless() so it works in section 3 code. */
+
+#undef yyless
+#define yyless(n) \
+ do \
+ { \
+ /* Undo effects of setting up yytext. */ \
+ int yyless_macro_arg = (n); \
+ YY_LESS_LINENO(yyless_macro_arg);\
+ yytext[yyleng] = yyg->yy_hold_char; \
+ yyg->yy_c_buf_p = yytext + yyless_macro_arg; \
+ yyg->yy_hold_char = *yyg->yy_c_buf_p; \
+ *yyg->yy_c_buf_p = '\0'; \
+ yyleng = yyless_macro_arg; \
+ } \
+ while ( 0 )
+
+/* Accessor methods (get/set functions) to struct members. */
+
+/** Get the user-defined data for this scanner.
+ * @param yyscanner The scanner object.
+ */
+YY_EXTRA_TYPE skslget_extra (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yyextra;
+}
+
+/** Get the current line number.
+ * @param yyscanner The scanner object.
+ */
+int skslget_lineno (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ if (! YY_CURRENT_BUFFER)
+ return 0;
+
+ return yylineno;
+}
+
+/** Get the current column number.
+ * @param yyscanner The scanner object.
+ */
+int skslget_column (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ if (! YY_CURRENT_BUFFER)
+ return 0;
+
+ return yycolumn;
+}
+
+/** Get the input stream.
+ * @param yyscanner The scanner object.
+ */
+FILE *skslget_in (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yyin;
+}
+
+/** Get the output stream.
+ * @param yyscanner The scanner object.
+ */
+FILE *skslget_out (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yyout;
+}
+
+/** Get the length of the current token.
+ * @param yyscanner The scanner object.
+ */
+yy_size_t skslget_leng (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yyleng;
+}
+
+/** Get the current token.
+ * @param yyscanner The scanner object.
+ */
+
+char *skslget_text (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yytext;
+}
+
+/** Set the user-defined data. This data is never touched by the scanner.
+ * @param user_defined The data to be associated with this scanner.
+ * @param yyscanner The scanner object.
+ */
+void skslset_extra (YY_EXTRA_TYPE user_defined , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ yyextra = user_defined ;
+}
+
+/** Set the current line number.
+ * @param line_number
+ * @param yyscanner The scanner object.
+ */
+void skslset_lineno (int line_number , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ /* lineno is only valid if an input buffer exists. */
+ if (! YY_CURRENT_BUFFER )
+ YY_FATAL_ERROR( "skslset_lineno called with no buffer" );
+
+ yylineno = line_number;
+}
+
+/** Set the current column.
+ * @param line_number
+ * @param yyscanner The scanner object.
+ */
+void skslset_column (int column_no , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ /* column is only valid if an input buffer exists. */
+ if (! YY_CURRENT_BUFFER )
+ YY_FATAL_ERROR( "skslset_column called with no buffer" );
+
+ yycolumn = column_no;
+}
+
+/** Set the input stream. This does not discard the current
+ * input buffer.
+ * @param in_str A readable stream.
+ * @param yyscanner The scanner object.
+ * @see sksl_switch_to_buffer
+ */
+void skslset_in (FILE * in_str , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ yyin = in_str ;
+}
+
+void skslset_out (FILE * out_str , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ yyout = out_str ;
+}
+
+int skslget_debug (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yy_flex_debug;
+}
+
+void skslset_debug (int bdebug , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ yy_flex_debug = bdebug ;
+}
+
+/* Accessor methods for yylval and yylloc */
+
+/* User-visible API */
+
+/* sksllex_init is special because it creates the scanner itself, so it is
+ * the ONLY reentrant function that doesn't take the scanner as the last argument.
+ * That's why we explicitly handle the declaration, instead of using our macros.
+ */
+
+int sksllex_init(yyscan_t* ptr_yy_globals)
+
+{
+ if (ptr_yy_globals == NULL){
+ errno = EINVAL;
+ return 1;
+ }
+
+ *ptr_yy_globals = (yyscan_t) skslalloc ( sizeof( struct yyguts_t ), NULL );
+
+ if (*ptr_yy_globals == NULL){
+ errno = ENOMEM;
+ return 1;
+ }
+
+ /* By setting to 0xAA, we expose bugs in yy_init_globals. Leave at 0x00 for releases. */
+ memset(*ptr_yy_globals,0x00,sizeof(struct yyguts_t));
+
+ return yy_init_globals ( *ptr_yy_globals );
+}
+
+/* sksllex_init_extra has the same functionality as sksllex_init, but follows the
+ * convention of taking the scanner as the last argument. Note however, that
+ * this is a *pointer* to a scanner, as it will be allocated by this call (and
+ * is the reason, too, why this function also must handle its own declaration).
+ * The user defined value in the first argument will be available to skslalloc in
+ * the yyextra field.
+ */
+
+int sksllex_init_extra(YY_EXTRA_TYPE yy_user_defined,yyscan_t* ptr_yy_globals )
+
+{
+ struct yyguts_t dummy_yyguts;
+
+ skslset_extra (yy_user_defined, &dummy_yyguts);
+
+ if (ptr_yy_globals == NULL){
+ errno = EINVAL;
+ return 1;
+ }
+
+ *ptr_yy_globals = (yyscan_t) skslalloc ( sizeof( struct yyguts_t ), &dummy_yyguts );
+
+ if (*ptr_yy_globals == NULL){
+ errno = ENOMEM;
+ return 1;
+ }
+
+ /* By setting to 0xAA, we expose bugs in
+ yy_init_globals. Leave at 0x00 for releases. */
+ memset(*ptr_yy_globals,0x00,sizeof(struct yyguts_t));
+
+ skslset_extra (yy_user_defined, *ptr_yy_globals);
+
+ return yy_init_globals ( *ptr_yy_globals );
+}
+
+static int yy_init_globals (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ /* Initialization is the same as for the non-reentrant scanner.
+ * This function is called from sksllex_destroy(), so don't allocate here.
+ */
+
+ yyg->yy_buffer_stack = 0;
+ yyg->yy_buffer_stack_top = 0;
+ yyg->yy_buffer_stack_max = 0;
+ yyg->yy_c_buf_p = (char *) 0;
+ yyg->yy_init = 0;
+ yyg->yy_start = 0;
+
+ yyg->yy_start_stack_ptr = 0;
+ yyg->yy_start_stack_depth = 0;
+ yyg->yy_start_stack = NULL;
+
+/* Defined in main.c */
+#ifdef YY_STDINIT
+ yyin = stdin;
+ yyout = stdout;
+#else
+ yyin = (FILE *) 0;
+ yyout = (FILE *) 0;
+#endif
+
+ /* For future reference: Set errno on error, since we are called by
+ * sksllex_init()
+ */
+ return 0;
+}
+
+/* sksllex_destroy is for both reentrant and non-reentrant scanners. */
+int sksllex_destroy (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ /* Pop the buffer stack, destroying each element. */
+ while(YY_CURRENT_BUFFER){
+ sksl_delete_buffer(YY_CURRENT_BUFFER ,yyscanner );
+ YY_CURRENT_BUFFER_LVALUE = NULL;
+ skslpop_buffer_state(yyscanner);
+ }
+
+ /* Destroy the stack itself. */
+ skslfree(yyg->yy_buffer_stack ,yyscanner);
+ yyg->yy_buffer_stack = NULL;
+
+ /* Destroy the start condition stack. */
+ skslfree(yyg->yy_start_stack ,yyscanner );
+ yyg->yy_start_stack = NULL;
+
+ /* Reset the globals. This is important in a non-reentrant scanner so the next time
+ * sksllex() is called, initialization will occur. */
+ yy_init_globals( yyscanner);
+
+ /* Destroy the main struct (reentrant only). */
+ skslfree ( yyscanner , yyscanner );
+ yyscanner = NULL;
+ return 0;
+}
+
+/*
+ * Internal utility routines.
+ */
+
+#ifndef yytext_ptr
+static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yyscan_t yyscanner)
+{
+ register int i;
+ for ( i = 0; i < n; ++i )
+ s1[i] = s2[i];
+}
+#endif
+
+#ifdef YY_NEED_STRLEN
+static int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner)
+{
+ register int n;
+ for ( n = 0; s[n]; ++n )
+ ;
+
+ return n;
+}
+#endif
+
+void *skslalloc (yy_size_t size , yyscan_t yyscanner)
+{
+ return (void *) malloc( size );
+}
+
+void *skslrealloc (void * ptr, yy_size_t size , yyscan_t yyscanner)
+{
+ /* The cast to (char *) in the following accommodates both
+ * implementations that use char* generic pointers, and those
+ * that use void* generic pointers. It works with the latter
+ * because both ANSI C and C++ allow castless assignment from
+ * any pointer type to void*, and deal with argument conversions
+ * as though doing an assignment.
+ */
+ return (void *) realloc( (char *) ptr, size );
+}
+
+void skslfree (void * ptr , yyscan_t yyscanner)
+{
+ free( (char *) ptr ); /* see skslrealloc() for (char *) cast */
+}
+
+#define YYTABLES_NAME "yytables"
+
+#line 187 "sksl.flex"
+
+
+
+int skslwrap(yyscan_t scanner) {
+ return 1; // terminate
+}
+
diff --git a/gfx/skia/skia/src/sksl/sksl.flex b/gfx/skia/skia/src/sksl/sksl.flex
new file mode 100644
index 000000000..67b48e9cb
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl.flex
@@ -0,0 +1,191 @@
+/*
+
+ This file is IGNORED during the build process!
+
+ As this file is updated so infrequently and flex is not universally present on build machines,
+ the lex.sksl.c file must be manually regenerated if you make any changes to this file. Just run:
+
+ flex sksl.flex
+
+ You will have to manually add a copyright notice to the top of lex.sksl.c.
+
+*/
+
+%option prefix="sksl"
+%option reentrant
+%option yylineno
+%option never-interactive
+%option nounistd
+
+DIGIT [0-9]
+LETTER [a-zA-Z_$]
+
+%%
+
+{DIGIT}*"."{DIGIT}+([eE][+-]?{DIGIT}+)? { return SkSL::Token::FLOAT_LITERAL; }
+
+{DIGIT}+"."{DIGIT}*([eE][+-]?{DIGIT}+)? { return SkSL::Token::FLOAT_LITERAL; }
+
+{DIGIT}+([eE][+-]?{DIGIT}+) { return SkSL::Token::FLOAT_LITERAL; }
+
+{DIGIT}+ { return SkSL::Token::INT_LITERAL; }
+
+true { return SkSL::Token::TRUE_LITERAL; }
+
+false { return SkSL::Token::FALSE_LITERAL; }
+
+if { return SkSL::Token::IF; }
+
+else { return SkSL::Token::ELSE; }
+
+for { return SkSL::Token::FOR; }
+
+while { return SkSL::Token::WHILE; }
+
+do { return SkSL::Token::DO; }
+
+break { return SkSL::Token::BREAK; }
+
+continue { return SkSL::Token::CONTINUE; }
+
+discard { return SkSL::Token::DISCARD; }
+
+return { return SkSL::Token::RETURN; }
+
+in { return SkSL::Token::IN; }
+
+out { return SkSL::Token::OUT; }
+
+inout { return SkSL::Token::INOUT; }
+
+uniform { return SkSL::Token::UNIFORM; }
+
+const { return SkSL::Token::CONST; }
+
+lowp { return SkSL::Token::LOWP; }
+
+mediump { return SkSL::Token::MEDIUMP; }
+
+highp { return SkSL::Token::HIGHP; }
+
+flat { return SkSL::Token::FLAT; }
+
+noperspective { return SkSL::Token::NOPERSPECTIVE; }
+
+struct { return SkSL::Token::STRUCT; }
+
+layout { return SkSL::Token::LAYOUT; }
+
+precision { return SkSL::Token::PRECISION; }
+
+{LETTER}({DIGIT}|{LETTER})* { return SkSL::Token::IDENTIFIER; }
+
+"#"{LETTER}({DIGIT}|{LETTER})* { return SkSL::Token::DIRECTIVE; }
+
+"(" { return SkSL::Token::LPAREN; }
+
+")" { return SkSL::Token::RPAREN; }
+
+"{" { return SkSL::Token::LBRACE; }
+
+"}" { return SkSL::Token::RBRACE; }
+
+"[" { return SkSL::Token::LBRACKET; }
+
+"]" { return SkSL::Token::RBRACKET; }
+
+"." { return SkSL::Token::DOT; }
+
+"," { return SkSL::Token::COMMA; }
+
+"++" { return SkSL::Token::PLUSPLUS; }
+
+"--" { return SkSL::Token::MINUSMINUS; }
+
+"+" { return SkSL::Token::PLUS; }
+
+"-" { return SkSL::Token::MINUS; }
+
+"*" { return SkSL::Token::STAR; }
+
+"/" { return SkSL::Token::SLASH; }
+
+"%" { return SkSL::Token::PERCENT; }
+
+"<<" { return SkSL::Token::SHL; }
+
+">>" { return SkSL::Token::SHR; }
+
+"|" { return SkSL::Token::BITWISEOR; }
+
+"^" { return SkSL::Token::BITWISEXOR; }
+
+"&" { return SkSL::Token::BITWISEAND; }
+
+"||" { return SkSL::Token::LOGICALOR; }
+
+"^^" { return SkSL::Token::LOGICALXOR; }
+
+"&&" { return SkSL::Token::LOGICALAND; }
+
+"!" { return SkSL::Token::NOT; }
+
+"?" { return SkSL::Token::QUESTION; }
+
+":" { return SkSL::Token::COLON; }
+
+"=" { return SkSL::Token::EQ; }
+
+"==" { return SkSL::Token::EQEQ; }
+
+"!=" { return SkSL::Token::NEQ; }
+
+">" { return SkSL::Token::GT; }
+
+"<" { return SkSL::Token::LT; }
+
+">=" { return SkSL::Token::GTEQ; }
+
+"<=" { return SkSL::Token::LTEQ; }
+
+"+=" { return SkSL::Token::PLUSEQ; }
+
+"-=" { return SkSL::Token::MINUSEQ; }
+
+"*=" { return SkSL::Token::STAREQ; }
+
+"/=" { return SkSL::Token::SLASHEQ; }
+
+"%=" { return SkSL::Token::PERCENTEQ; }
+
+"<<=" { return SkSL::Token::SHLEQ; }
+
+">>=" { return SkSL::Token::SHREQ; }
+
+"|=" { return SkSL::Token::BITWISEOREQ; }
+
+"^=" { return SkSL::Token::BITWISEXOREQ; }
+
+"&=" { return SkSL::Token::BITWISEANDEQ; }
+
+"||=" { return SkSL::Token::LOGICALOREQ; }
+
+"^^=" { return SkSL::Token::LOGICALXOREQ; }
+
+"&&=" { return SkSL::Token::LOGICALANDEQ; }
+
+";" { return SkSL::Token::SEMICOLON; }
+
+"//".* /* line comment */
+
+"/*"([^*]|"*"[^/])*"*/" /* block comment */
+
+[ \t\r\n]+ /* whitespace */
+
+. { return SkSL::Token::INVALID_TOKEN; }
+
+%%
+
+int skslwrap(yyscan_t scanner) {
+ return 1; // terminate
+}
diff --git a/gfx/skia/skia/src/sksl/sksl.include b/gfx/skia/skia/src/sksl/sksl.include
new file mode 100644
index 000000000..4fd5511ee
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl.include
@@ -0,0 +1,544 @@
+STRINGIFY(
+
+// defines built-in functions supported by SkiaSL
+
+$genType radians($genType degrees);
+$genType sin($genType angle);
+$genType cos($genType angle);
+$genType tan($genType angle);
+$genType asin($genType x);
+$genType acos($genType x);
+$genType atan($genType y, $genType x);
+$genType atan($genType y_over_x);
+$genType sinh($genType x);
+$genType cosh($genType x);
+$genType tanh($genType x);
+$genType asinh($genType x);
+$genType acosh($genType x);
+$genType atanh($genType x);
+$genType pow($genType x, $genType y);
+$genType exp($genType x);
+$genType log($genType x);
+$genType exp2($genType x);
+$genType log2($genType x);
+$genType sqrt($genType x);
+$genDType sqrt($genDType x);
+$genType inversesqrt($genType x);
+$genDType inversesqrt($genDType x);
+$genType abs($genType x);
+$genIType abs($genIType x);
+$genDType abs($genDType x);
+$genType sign($genType x);
+$genIType sign($genIType x);
+$genDType sign($genDType x);
+$genType floor($genType x);
+$genDType floor($genDType x);
+$genType trunc($genType x);
+$genDType trunc($genDType x);
+$genType round($genType x);
+$genDType round($genDType x);
+$genType roundEven($genType x);
+$genDType roundEven($genDType x);
+$genType ceil($genType x);
+$genDType ceil($genDType x);
+$genType fract($genType x);
+$genDType fract($genDType x);
+$genType mod($genType x, float y);
+$genType mod($genType x, $genType y);
+$genDType mod($genDType x, double y);
+$genDType mod($genDType x, $genDType y);
+$genType modf($genType x, out $genType i);
+$genDType modf($genDType x, out $genDType i);
+$genType min($genType x, $genType y);
+$genType min($genType x, float y);
+$genDType min($genDType x, $genDType y);
+$genDType min($genDType x, double y);
+$genIType min($genIType x, $genIType y);
+$genIType min($genIType x, int y);
+$genUType min($genUType x, $genUType y);
+$genUType min($genUType x, uint y);
+$genType max($genType x, $genType y);
+$genType max($genType x, float y);
+$genDType max($genDType x, $genDType y);
+$genDType max($genDType x, double y);
+$genIType max($genIType x, $genIType y);
+$genIType max($genIType x, int y);
+$genUType max($genUType x, $genUType y);
+$genUType max($genUType x, uint y);
+$genType clamp($genType x, $genType minVal, $genType maxVal);
+$genType clamp($genType x, float minVal, float maxVal);
+$genDType clamp($genDType x, $genDType minVal, $genDType maxVal);
+$genDType clamp($genDType x, double minVal, double maxVal);
+$genIType clamp($genIType x, $genIType minVal, $genIType maxVal);
+$genIType clamp($genIType x, int minVal, int maxVal);
+$genUType clamp($genUType x, $genUType minVal, $genUType maxVal);
+$genUType clamp($genUType x, uint minVal, uint maxVal);
+$genType mix($genType x, $genType y, $genType a);
+$genType mix($genType x, $genType y, float a);
+$genDType mix($genDType x, $genDType y, $genDType a);
+$genDType mix($genDType x, $genDType y, double a);
+$genType mix($genType x, $genType y, $genBType a);
+$genDType mix($genDType x, $genDType y, $genBType a);
+$genIType mix($genIType x, $genIType y, $genBType a);
+$genUType mix($genUType x, $genUType y, $genBType a);
+$genBType mix($genBType x, $genBType y, $genBType a);
+$genType step($genType edge, $genType x);
+$genType step(float edge, $genType x);
+$genDType step($genDType edge, $genDType x);
+$genDType step(double edge, $genDType x);
+$genType smoothstep($genType edge0, $genType edge1, $genType x);
+$genType smoothstep(float edge0, float edge1, $genType x);
+$genDType smoothstep($genDType edge0, $genDType edge1, $genDType x);
+$genDType smoothstep(double edge0, double edge1, $genDType x);
+$genBType isnan($genType x);
+$genBType isnan($genDType x);
+$genBType isinf($genType x);
+$genBType isinf($genDType x);
+$genIType floatBitsToInt($genType value);
+$genUType floatBitsToUint($genType value);
+$genType intBitsToFloat($genIType value);
+$genType uintBitsToFloat($genUType value);
+$genType fma($genType a, $genType b, $genType c);
+$genDType fma($genDType a, $genDType b, $genDType c);
+$genType frexp($genType x, out $genIType exp);
+$genDType frexp($genDType x, out $genIType exp);
+$genType ldexp($genType x, in $genIType exp);
+$genDType ldexp($genDType x, in $genIType exp);
+uint packUnorm2x16(vec2 v);
+uint packSnorm2x16(vec2 v);
+uint packUnorm4x8(vec4 v);
+uint packSnorm4x8(vec4 v);
+vec2 unpackUnorm2x16(uint p);
+vec2 unpackSnorm2x16(uint p);
+vec4 unpackUnorm4x8(uint p);
+vec4 unpackSnorm4x8(uint p);
+double packDouble2x32(uvec2 v);
+uvec2 unpackDouble2x32(double v);
+uint packHalf2x16(vec2 v);
+vec2 unpackHalf2x16(uint v);
+float length($genType x);
+double length($genDType x);
+float distance($genType p0, $genType p1);
+double distance($genDType p0, $genDType p1);
+float dot($genType x, $genType y);
+double dot($genDType x, $genDType y);
+vec3 cross(vec3 x, vec3 y);
+dvec3 cross(dvec3 x, dvec3 y);
+$genType normalize($genType x);
+$genDType normalize($genDType x);
+vec4 ftransform();
+$genType faceforward($genType N, $genType I, $genType Nref);
+$genDType faceforward($genDType N, $genDType I, $genDType Nref);
+$genType reflect($genType I, $genType N);
+$genDType reflect($genDType I, $genDType N);
+$genType refract($genType I, $genType N, float eta);
+$genDType refract($genDType I, $genDType N, float eta);
+$mat matrixCompMult($mat x, $mat y);
+mat2 outerProduct(vec2 c, vec2 r);
+mat3 outerProduct(vec3 c, vec3 r);
+mat4 outerProduct(vec4 c, vec4 r);
+mat2x3 outerProduct(vec3 c, vec2 r);
+mat3x2 outerProduct(vec2 c, vec3 r);
+mat2x4 outerProduct(vec4 c, vec2 r);
+mat4x2 outerProduct(vec2 c, vec4 r);
+mat3x4 outerProduct(vec4 c, vec3 r);
+mat4x3 outerProduct(vec3 c, vec4 r);
+mat2 transpose(mat2 m);
+mat3 transpose(mat3 m);
+mat4 transpose(mat4 m);
+mat2x3 transpose(mat3x2 m);
+mat3x2 transpose(mat2x3 m);
+mat2x4 transpose(mat4x2 m);
+mat4x2 transpose(mat2x4 m);
+mat3x4 transpose(mat4x3 m);
+mat4x3 transpose(mat3x4 m);
+float determinant(mat2 m);
+float determinant(mat3 m);
+float determinant(mat4 m);
+mat2 inverse(mat2 m);
+mat3 inverse(mat3 m);
+mat4 inverse(mat4 m);
+$bvec lessThan($vec x, $vec y);
+$bvec lessThan($ivec x, $ivec y);
+$bvec lessThan($uvec x, $uvec y);
+$bvec lessThanEqual($vec x, $vec y);
+$bvec lessThanEqual($ivec x, $ivec y);
+$bvec lessThanEqual($uvec x, $uvec y);
+$bvec greaterThan($vec x, $vec y);
+$bvec greaterThan($ivec x, $ivec y);
+$bvec greaterThan($uvec x, $uvec y);
+$bvec greaterThanEqual($vec x, $vec y);
+$bvec greaterThanEqual($ivec x, $ivec y);
+$bvec greaterThanEqual($uvec x, $uvec y);
+$bvec equal($vec x, $vec y);
+$bvec equal($ivec x, $ivec y);
+$bvec equal($uvec x, $uvec y);
+$bvec equal($bvec x, $bvec y);
+$bvec notEqual($vec x, $vec y);
+$bvec notEqual($ivec x, $ivec y);
+$bvec notEqual($uvec x, $uvec y);
+$bvec notEqual($bvec x, $bvec y);
+bool any($bvec x);
+bool all($bvec x);
+$bvec not($bvec x);
+$genUType uaddCarry($genUType x, $genUType y, out $genUType carry);
+$genUType usubBorrow($genUType x, $genUType y, out $genUType borrow);
+void umulExtended($genUType x, $genUType y, out $genUType msb, out $genUType lsb);
+void imulExtended($genIType x, $genIType y, out $genIType msb, out $genIType lsb);
+$genIType bitfieldExtract($genIType value, int offset, int bits);
+$genUType bitfieldExtract($genUType value, int offset, int bits);
+$genIType bitfieldInsert($genIType base, $genIType insert, int offset, int bits);
+$genUType bitfieldInsert($genUType base, $genUType insert, int offset, int bits);
+$genIType bitfieldReverse($genIType value);
+$genUType bitfieldReverse($genUType value);
+$genIType bitCount($genIType value);
+$genIType bitCount($genUType value);
+$genIType findLSB($genIType value);
+$genIType findLSB($genUType value);
+$genIType findMSB($genIType value);
+$genIType findMSB($genUType value);
+int textureSize($gsampler1D sampler, int lod);
+ivec2 textureSize($gsampler2D sampler, int lod);
+ivec3 textureSize($gsampler3D sampler, int lod);
+ivec2 textureSize($gsamplerCube sampler, int lod);
+int textureSize(sampler1DShadow sampler, int lod);
+ivec2 textureSize(sampler2DShadow sampler, int lod);
+ivec2 textureSize(samplerCubeShadow sampler, int lod);
+ivec3 textureSize($gsamplerCubeArray sampler, int lod);
+ivec3 textureSize(samplerCubeArrayShadow sampler, int lod);
+ivec2 textureSize($gsampler2DRect sampler);
+ivec2 textureSize(sampler2DRectShadow sampler);
+ivec2 textureSize($gsampler1DArray sampler, int lod);
+ivec3 textureSize($gsampler2DArray sampler, int lod);
+ivec2 textureSize(sampler1DArrayShadow sampler, int lod);
+ivec3 textureSize(sampler2DArrayShadow sampler, int lod);
+int textureSize($gsamplerBuffer sampler);
+ivec2 textureSize($gsampler2DMS sampler);
+ivec3 textureSize($gsampler2DMSArray sampler);
+vec2 textureQueryLod($gsampler1D sampler, float P);
+vec2 textureQueryLod($gsampler2D sampler, vec2 P);
+vec2 textureQueryLod($gsampler3D sampler, vec3 P);
+vec2 textureQueryLod($gsamplerCube sampler, vec3 P);
+vec2 textureQueryLod($gsampler1DArray sampler, float P);
+vec2 textureQueryLod($gsampler2DArray sampler, vec2 P);
+vec2 textureQueryLod($gsamplerCubeArray sampler, vec3 P);
+vec2 textureQueryLod(sampler1DShadow sampler, float P);
+vec2 textureQueryLod(sampler2DShadow sampler, vec2 P);
+vec2 textureQueryLod(samplerCubeShadow sampler, vec3 P);
+vec2 textureQueryLod(sampler1DArrayShadow sampler, float P);
+vec2 textureQueryLod(sampler2DArrayShadow sampler, vec2 P);
+vec2 textureQueryLod(samplerCubeArrayShadow sampler, vec3 P);
+int textureQueryLevels($gsampler1D sampler);
+int textureQueryLevels($gsampler2D sampler);
+int textureQueryLevels($gsampler3D sampler);
+int textureQueryLevels($gsamplerCube sampler);
+int textureQueryLevels($gsampler1DArray sampler);
+int textureQueryLevels($gsampler2DArray sampler);
+int textureQueryLevels($gsamplerCubeArray sampler);
+int textureQueryLevels(sampler1DShadow sampler);
+int textureQueryLevels(sampler2DShadow sampler);
+int textureQueryLevels(samplerCubeShadow sampler);
+int textureQueryLevels(sampler1DArrayShadow sampler);
+int textureQueryLevels(sampler2DArrayShadow sampler);
+int textureQueryLevels(samplerCubeArrayShadow sampler);
+$gvec4 texture($gsampler1D sampler, float P);
+$gvec4 texture($gsampler1D sampler, float P, float bias);
+$gvec4 texture($gsampler2D sampler, vec2 P);
+$gvec4 texture($gsampler2D sampler, vec2 P, float bias);
+$gvec4 texture($gsampler3D sampler, vec3 P);
+$gvec4 texture($gsampler3D sampler, vec3 P, float bias);
+$gvec4 texture($gsamplerCube sampler, vec3 P);
+$gvec4 texture($gsamplerCube sampler, vec3 P, float bias);
+float texture(sampler1DShadow sampler, vec3 P);
+float texture(sampler1DShadow sampler, vec3 P, float bias);
+float texture(sampler2DShadow sampler, vec3 P);
+float texture(sampler2DShadow sampler, vec3 P, float bias);
+float texture(samplerCubeShadow sampler, vec4 P);
+float texture(samplerCubeShadow sampler, vec4 P, float bias);
+$gvec4 texture($gsampler1DArray sampler, vec2 P);
+$gvec4 texture($gsampler1DArray sampler, vec2 P, float bias);
+$gvec4 texture($gsampler2DArray sampler, vec3 P);
+$gvec4 texture($gsampler2DArray sampler, vec3 P, float bias);
+$gvec4 texture($gsamplerCubeArray sampler, vec4 P);
+$gvec4 texture($gsamplerCubeArray sampler, vec4 P, float bias);
+float texture(sampler1DArrayShadow sampler, vec3 P);
+float texture(sampler1DArrayShadow sampler, vec3 P, float bias);
+float texture(sampler2DArrayShadow sampler, vec4 P);
+$gvec4 texture($gsampler2DRect sampler, vec2 P);
+float texture(sampler2DRectShadow sampler, vec3 P);
+float texture($gsamplerCubeArrayShadow sampler, vec4 P, float compare);
+
+)
+
+// split into multiple chunks, as MSVC++ complains if a single string is too long
+
+STRINGIFY(
+
+$gvec4 textureProj($gsampler1D sampler, vec2 P);
+$gvec4 textureProj($gsampler1D sampler, vec2 P, float bias);
+$gvec4 textureProj($gsampler1D sampler, vec4 P);
+$gvec4 textureProj($gsampler1D sampler, vec4 P, float bias);
+$gvec4 textureProj($gsampler2D sampler, vec3 P);
+$gvec4 textureProj($gsampler2D sampler, vec3 P, float bias);
+$gvec4 textureProj($gsampler2D sampler, vec4 P);
+$gvec4 textureProj($gsampler2D sampler, vec4 P, float bias);
+$gvec4 textureProj($gsampler3D sampler, vec4 P);
+$gvec4 textureProj($gsampler3D sampler, vec4 P, float bias);
+float textureProj(sampler1DShadow sampler, vec4 P);
+float textureProj(sampler1DShadow sampler, vec4 P, float bias);
+float textureProj(sampler2DShadow sampler, vec4 P);
+float textureProj(sampler2DShadow sampler, vec4 P, float bias);
+$gvec4 textureProj($gsampler2DRect sampler, vec3 P);
+$gvec4 textureProj($gsampler2DRect sampler, vec4 P);
+float textureProj(sampler2DRectShadow sampler, vec4 P);
+$gvec4 textureLod($gsampler1D sampler, float P, float lod);
+$gvec4 textureLod($gsampler2D sampler, vec2 P, float lod);
+$gvec4 textureLod($gsampler3D sampler, vec3 P, float lod);
+$gvec4 textureLod($gsamplerCube sampler, vec3 P, float lod);
+float textureLod(sampler1DShadow sampler, vec3 P, float lod);
+float textureLod(sampler2DShadow sampler, vec3 P, float lod);
+$gvec4 textureLod($gsampler1DArray sampler, vec2 P, float lod);
+$gvec4 textureLod($gsampler2DArray sampler, vec3 P, float lod);
+float textureLod(sampler1DArrayShadow sampler, vec3 P, float lod);
+$gvec4 textureLod($gsamplerCubeArray sampler, vec4 P, float lod);
+$gvec4 textureOffset($gsampler1D sampler, float P, int offset);
+$gvec4 textureOffset($gsampler1D sampler, float P, int offset, float bias);
+$gvec4 textureOffset($gsampler2D sampler, vec2 P, ivec2 offset);
+$gvec4 textureOffset($gsampler2D sampler, vec2 P, ivec2 offset, float bias);
+$gvec4 textureOffset($gsampler3D sampler, vec3 P, ivec3 offset);
+$gvec4 textureOffset($gsampler3D sampler, vec3 P, ivec3 offset, float bias);
+$gvec4 textureOffset($gsampler2DRect sampler, vec2 P, ivec2 offset);
+float textureOffset(sampler2DRectShadow sampler, vec3 P, ivec2 offset);
+float textureOffset(sampler1DShadow sampler, vec3 P, int offset);
+float textureOffset(sampler1DShadow sampler, vec3 P, int offset, float bias);
+float textureOffset(sampler2DShadow sampler, vec3 P, ivec2 offset);
+float textureOffset(sampler2DShadow sampler, vec3 P, ivec2 offset, float bias);
+$gvec4 textureOffset($gsampler1DArray sampler, vec2 P, int offset);
+$gvec4 textureOffset($gsampler1DArray sampler, vec2 P, int offset, float bias);
+$gvec4 textureOffset($gsampler2DArray sampler, vec3 P, ivec2 offset);
+$gvec4 textureOffset($gsampler2DArray sampler, vec3 P, ivec2 offset, float bias);
+float textureOffset(sampler1DArrayShadow sampler, vec3 P, int offset);
+float textureOffset(sampler1DArrayShadow sampler, vec3 P, int offset, float bias);
+float textureOffset(sampler2DArrayShadow sampler, vec4 P, ivec2 offset);
+$gvec4 texelFetch($gsampler1D sampler, int P, int lod);
+$gvec4 texelFetch($gsampler2D sampler, ivec2 P, int lod);
+$gvec4 texelFetch($gsampler3D sampler, ivec3 P, int lod);
+$gvec4 texelFetch($gsampler2DRect sampler, ivec2 P);
+$gvec4 texelFetch($gsampler1DArray sampler, ivec2 P, int lod);
+$gvec4 texelFetch($gsampler2DArray sampler, ivec3 P, int lod);
+$gvec4 texelFetch($gsamplerBuffer sampler, int P);
+$gvec4 texelFetch($gsampler2DMS sampler, ivec2 P, int sample);
+$gvec4 texelFetch($gsampler2DMSArray sampler, ivec3 P, int sample);
+$gvec4 texelFetchOffset($gsampler1D sampler, int P, int lod, int offset);
+$gvec4 texelFetchOffset($gsampler2D sampler, ivec2 P, int lod, ivec2 offset);
+$gvec4 texelFetchOffset($gsampler3D sampler, ivec3 P, int lod, ivec3 offset);
+$gvec4 texelFetchOffset($gsampler2DRect sampler, ivec2 P, ivec2 offset);
+$gvec4 texelFetchOffset($gsampler1DArray sampler, ivec2 P, int lod, int offset);
+$gvec4 texelFetchOffset($gsampler2DArray sampler, ivec3 P, int lod, ivec2 offset);
+$gvec4 textureProjOffset($gsampler1D sampler, vec2 P, int offset);
+$gvec4 textureProjOffset($gsampler1D sampler, vec2 P, int offset, float bias);
+$gvec4 textureProjOffset($gsampler1D sampler, vec4 P, int offset);
+$gvec4 textureProjOffset($gsampler1D sampler, vec4 P, int offset, float bias);
+$gvec4 textureProjOffset($gsampler2D sampler, vec3 P, ivec2 offset);
+$gvec4 textureProjOffset($gsampler2D sampler, vec3 P, ivec2 offset, float bias);
+$gvec4 textureProjOffset($gsampler2D sampler, vec4 P, ivec2 offset);
+$gvec4 textureProjOffset($gsampler2D sampler, vec4 P, ivec2 offset, float bias);
+$gvec4 textureProjOffset($gsampler3D sampler, vec4 P, ivec3 offset);
+$gvec4 textureProjOffset($gsampler3D sampler, vec4 P, ivec3 offset, float bias);
+$gvec4 textureProjOffset($gsampler2DRect sampler, vec3 P, ivec2 offset);
+$gvec4 textureProjOffset($gsampler2DRect sampler, vec4 P, ivec2 offset);
+float textureProjOffset(sampler2DRectShadow sampler, vec4 P, ivec2 offset);
+float textureProjOffset(sampler1DShadow sampler, vec4 P, int offset);
+float textureProjOffset(sampler1DShadow sampler, vec4 P, int offset, float bias);
+float textureProjOffset(sampler2DShadow sampler, vec4 P, ivec2 offset);
+float textureProjOffset(sampler2DShadow sampler, vec4 P, ivec2 offset, float bias);
+$gvec4 textureLodOffset($gsampler1D sampler, float P, float lod, int offset);
+$gvec4 textureLodOffset($gsampler2D sampler, vec2 P, float lod, ivec2 offset);
+$gvec4 textureLodOffset($gsampler3D sampler, vec3 P, float lod, ivec3 offset);
+float textureLodOffset(sampler1DShadow sampler, vec3 P, float lod, int offset);
+float textureLodOffset(sampler2DShadow sampler, vec3 P, float lod, ivec2 offset);
+$gvec4 textureLodOffset($gsampler1DArray sampler, vec2 P, float lod, int offset);
+$gvec4 textureLodOffset($gsampler2DArray sampler, vec3 P, float lod, ivec2 offset);
+float textureLodOffset(sampler1DArrayShadow sampler, vec3 P, float lod, int offset);
+$gvec4 textureProjLod($gsampler1D sampler, vec2 P, float lod);
+$gvec4 textureProjLod($gsampler1D sampler, vec4 P, float lod);
+$gvec4 textureProjLod($gsampler2D sampler, vec3 P, float lod);
+$gvec4 textureProjLod($gsampler2D sampler, vec4 P, float lod);
+$gvec4 textureProjLod($gsampler3D sampler, vec4 P, float lod);
+float textureProjLod(sampler1DShadow sampler, vec4 P, float lod);
+float textureProjLod(sampler2DShadow sampler, vec4 P, float lod);
+$gvec4 textureProjLodOffset($gsampler1D sampler, vec2 P, float lod, int offset);
+$gvec4 textureProjLodOffset($gsampler1D sampler, vec4 P, float lod, int offset);
+$gvec4 textureProjLodOffset($gsampler2D sampler, vec3 P, float lod, ivec2 offset);
+$gvec4 textureProjLodOffset($gsampler2D sampler, vec4 P, float lod, ivec2 offset);
+$gvec4 textureProjLodOffset($gsampler3D sampler, vec4 P, float lod, ivec3 offset);
+float textureProjLodOffset(sampler1DShadow sampler, vec4 P, float lod, int offset);
+float textureProjLodOffset(sampler2DShadow sampler, vec4 P, float lod, ivec2 offset);
+$gvec4 textureGrad($gsampler1D sampler, float P, float dPdx, float dPdy);
+$gvec4 textureGrad($gsampler2D sampler, vec2 P, vec2 dPdx, vec2 dPdy);
+$gvec4 textureGrad($gsampler3D sampler, vec3 P, vec3 dPdx, vec3 dPdy);
+$gvec4 textureGrad($gsamplerCube sampler, vec3 P, vec3 dPdx, vec3 dPdy);
+$gvec4 textureGrad($gsampler2DRect sampler, vec2 P, vec2 dPdx, vec2 dPdy);
+float textureGrad(sampler2DRectShadow sampler, vec3 P, vec2 dPdx, vec2 dPdy);
+float textureGrad(sampler1DShadow sampler, vec3 P, float dPdx, float dPdy);
+float textureGrad(sampler2DShadow sampler, vec3 P, vec2 dPdx, vec2 dPdy);
+float textureGrad(samplerCubeShadow sampler, vec4 P, vec3 dPdx, vec3 dPdy);
+$gvec4 textureGrad($gsampler1DArray sampler, vec2 P, float dPdx, float dPdy);
+$gvec4 textureGrad($gsampler2DArray sampler, vec3 P, vec2 dPdx, vec2 dPdy);
+float textureGrad(sampler1DArrayShadow sampler, vec3 P, float dPdx, float dPdy);
+float textureGrad(sampler2DArrayShadow sampler, vec4 P, vec2 dPdx, vec2 dPdy);
+$gvec4 textureGrad($gsamplerCubeArray sampler, vec4 P, vec3 dPdx, vec3 dPdy);
+$gvec4 textureGradOffset($gsampler1D sampler, float P, float dPdx, float dPdy, int offset);
+$gvec4 textureGradOffset($gsampler2D sampler, vec2 P, vec2 dPdx, vec2 dPdy, ivec2 offset);
+$gvec4 textureGradOffset($gsampler3D sampler, vec3 P, vec3 dPdx, vec3 dPdy, ivec3 offset);
+$gvec4 textureGradOffset($gsampler2DRect sampler, vec2 P, vec2 dPdx, vec2 dPdy, ivec2 offset);
+float textureGradOffset(sampler2DRectShadow sampler, vec3 P, vec2 dPdx, vec2 dPdy, ivec2 offset);
+float textureGradOffset(sampler1DShadow sampler, vec3 P, float dPdx, float dPdy, int offset );
+float textureGradOffset(sampler2DShadow sampler, vec3 P, vec2 dPdx, vec2 dPdy, ivec2 offset);
+$gvec4 textureGradOffset($gsampler1DArray sampler, vec2 P, float dPdx, float dPdy, int offset);
+$gvec4 textureGradOffset($gsampler2DArray sampler, vec3 P, vec2 dPdx, vec2 dPdy, ivec2 offset);
+float textureGradOffset(sampler1DArrayShadow sampler, vec3 P, float dPdx, float dPdy, int offset);
+float textureGradOffset(sampler2DArrayShadow sampler, vec4 P, vec2 dPdx, vec2 dPdy, ivec2 offset);
+$gvec4 textureProjGrad($gsampler1D sampler, vec2 P, float dPdx, float dPdy);
+$gvec4 textureProjGrad($gsampler1D sampler, vec4 P, float dPdx, float dPdy);
+$gvec4 textureProjGrad($gsampler2D sampler, vec3 P, vec2 dPdx, vec2 dPdy);
+$gvec4 textureProjGrad($gsampler2D sampler, vec4 P, vec2 dPdx, vec2 dPdy);
+$gvec4 textureProjGrad($gsampler3D sampler, vec4 P, vec3 dPdx, vec3 dPdy);
+$gvec4 textureProjGrad($gsampler2DRect sampler, vec3 P, vec2 dPdx, vec2 dPdy);
+$gvec4 textureProjGrad($gsampler2DRect sampler, vec4 P, vec2 dPdx, vec2 dPdy);
+float textureProjGrad(sampler2DRectShadow sampler, vec4 P, vec2 dPdx, vec2 dPdy);
+float textureProjGrad(sampler1DShadow sampler, vec4 P, float dPdx, float dPdy);
+float textureProjGrad(sampler2DShadow sampler, vec4 P, vec2 dPdx, vec2 dPdy);
+$gvec4 textureProjGradOffset($gsampler1D sampler, vec2 P, float dPdx, float dPdy, int offset);
+$gvec4 textureProjGradOffset($gsampler1D sampler, vec4 P, float dPdx, float dPdy, int offset);
+$gvec4 textureProjGradOffset($gsampler2D sampler, vec3 P, vec2 dPdx, vec2 dPdy, ivec2 offset);
+$gvec4 textureProjGradOffset($gsampler2D sampler, vec4 P, vec2 dPdx, vec2 dPdy, ivec2 offset);
+$gvec4 textureProjGradOffset($gsampler2DRect sampler, vec3 P, vec2 dPdx, vec2 dPdy, ivec2 offset);
+$gvec4 textureProjGradOffset($gsampler2DRect sampler, vec4 P, vec2 dPdx, vec2 dPdy, ivec2 offset);
+float textureProjGradOffset(sampler2DRectShadow sampler, vec4 P, vec2 dPdx, vec2 dPdy, ivec2 offset);
+$gvec4 textureProjGradOffset($gsampler3D sampler, vec4 P, vec3 dPdx, vec3 dPdy, ivec3 offset);
+float textureProjGradOffset(sampler1DShadow sampler, vec4 P, float dPdx, float dPdy, int offset);
+float textureProjGradOffset(sampler2DShadow sampler, vec4 P, vec2 dPdx, vec2 dPdy, ivec2 offset);
+$gvec4 textureGather($gsampler2D sampler, vec2 P);
+$gvec4 textureGather($gsampler2D sampler, vec2 P, int comp);
+$gvec4 textureGather($gsampler2DArray sampler, vec3 P);
+$gvec4 textureGather($gsampler2DArray sampler, vec3 P, int comp);
+$gvec4 textureGather($gsamplerCube sampler, vec3 P);
+$gvec4 textureGather($gsamplerCube sampler, vec3 P, int comp);
+$gvec4 textureGather($gsamplerCubeArray sampler, vec4 P);
+$gvec4 textureGather($gsamplerCubeArray sampler, vec4 P, int comp);
+$gvec4 textureGather($gsampler2DRect sampler, vec2 P);
+$gvec4 textureGather($gsampler2DRect sampler, vec2 P, int comp);
+vec4 textureGather(sampler2DShadow sampler, vec2 P, float refZ);
+vec4 textureGather(sampler2DArrayShadow sampler, vec3 P, float refZ);
+vec4 textureGather(samplerCubeShadow sampler, vec3 P, float refZ);
+vec4 textureGather(samplerCubeArrayShadow sampler, vec4 P, float refZ);
+vec4 textureGather(sampler2DRectShadow sampler, vec2 P, float refZ);
+$gvec4 textureGatherOffset($gsampler2D sampler, vec2 P, ivec2 offset);
+$gvec4 textureGatherOffset($gsampler2D sampler, vec2 P, ivec2 offset, int comp);
+$gvec4 textureGatherOffset($gsampler2DArray sampler, vec3 P, ivec2 offset);
+$gvec4 textureGatherOffset($gsampler2DArray sampler, vec3 P, ivec2 offset, int comp);
+$gvec4 textureGatherOffset($gsampler2DRect sampler, vec2 P, ivec2 offset);
+$gvec4 textureGatherOffset($gsampler2DRect sampler, vec2 P, ivec2 offset, int comp);
+vec4 textureGatherOffset(sampler2DShadow sampler, vec2 P, float refZ, ivec2 offset);
+vec4 textureGatherOffset(sampler2DArrayShadow sampler, vec3 P, float refZ, ivec2 offset);
+vec4 textureGatherOffset(sampler2DRectShadow sampler, vec2 P, float refZ, ivec2 offset);
+/*
+$gvec4 textureGatherOffsets($gsampler2D sampler, vec2 P, ivec2 offsets[4]);
+$gvec4 textureGatherOffsets($gsampler2D sampler, vec2 P, ivec2 offsets[4], int comp);
+$gvec4 textureGatherOffsets($gsampler2DArray sampler, vec3 P, ivec2 offsets[4]);
+$gvec4 textureGatherOffsets($gsampler2DArray sampler, vec3 P, ivec2 offsets[4], int comp);
+$gvec4 textureGatherOffsets($gsampler2DRect sampler, vec2 P, ivec2 offsets[4]);
+$gvec4 textureGatherOffsets($gsampler2DRect sampler, vec2 P, ivec2 offsets[4], int comp);
+vec4 textureGatherOffsets(sampler2DShadow sampler, vec2 P, float refZ, ivec2 offsets[4]);
+vec4 textureGatherOffsets(sampler2DArrayShadow sampler, vec3 P, float refZ, ivec2 offsets[4]);
+vec4 textureGatherOffsets(sampler2DRectShadow sampler, vec2 P, float refZ, ivec2 offsets[4]);
+*/
+vec4 texture1D(sampler1D sampler, float coord);
+vec4 texture1D(sampler1D sampler, float coord, float bias);
+vec4 texture1DProj(sampler1D sampler, vec2 coord);
+vec4 texture1DProj(sampler1D sampler, vec2 coord, float bias);
+vec4 texture1DProj(sampler1D sampler, vec4 coord);
+vec4 texture1DProj(sampler1D sampler, vec4 coord, float bias);
+vec4 texture1DLod(sampler1D sampler, float coord, float lod);
+vec4 texture1DProjLod(sampler1D sampler, vec2 coord, float lod);
+vec4 texture1DProjLod(sampler1D sampler, vec4 coord, float lod);
+vec4 texture2D(sampler2D sampler, vec2 coord);
+vec4 texture2D(sampler2D sampler, vec2 coord, float bias);
+vec4 texture2DProj(sampler2D sampler, vec3 coord);
+vec4 texture2DProj(sampler2D sampler, vec3 coord, float bias);
+vec4 texture2DProj(sampler2D sampler, vec4 coord);
+vec4 texture2DProj(sampler2D sampler, vec4 coord, float bias);
+vec4 texture2DLod(sampler2D sampler, vec2 coord, float lod);
+vec4 texture2DProjLod(sampler2D sampler, vec3 coord, float lod);
+vec4 texture2DProjLod(sampler2D sampler, vec4 coord, float lod);
+vec4 texture3D(sampler3D sampler, vec3 coord);
+vec4 texture3D(sampler3D sampler, vec3 coord, float bias);
+vec4 texture3DProj(sampler3D sampler, vec4 coord);
+vec4 texture3DProj(sampler3D sampler, vec4 coord, float bias);
+vec4 texture3DLod(sampler3D sampler, vec3 coord, float lod);
+vec4 texture3DProjLod(sampler3D sampler, vec4 coord, float lod);
+vec4 textureCube(samplerCube sampler, vec3 coord);
+vec4 textureCube(samplerCube sampler, vec3 coord, float bias);
+vec4 textureCubeLod(samplerCube sampler, vec3 coord, float lod);
+vec4 shadow1D(sampler1DShadow sampler, vec3 coord);
+vec4 shadow1D(sampler1DShadow sampler, vec3 coord, float bias);
+vec4 shadow2D(sampler2DShadow sampler, vec3 coord);
+vec4 shadow2D(sampler2DShadow sampler, vec3 coord, float bias);
+vec4 shadow1DProj(sampler1DShadow sampler, vec4 coord);
+vec4 shadow1DProj(sampler1DShadow sampler, vec4 coord, float bias);
+vec4 shadow2DProj(sampler2DShadow sampler, vec4 coord);
+vec4 shadow2DProj(sampler2DShadow sampler, vec4 coord, float bias);
+vec4 shadow1DLod(sampler1DShadow sampler, vec3 coord, float lod);
+vec4 shadow2DLod(sampler2DShadow sampler, vec3 coord, float lod);
+vec4 shadow1DProjLod(sampler1DShadow sampler, vec4 coord, float lod);
+vec4 shadow2DProjLod(sampler2DShadow sampler, vec4 coord, float lod);
+/*
+uint atomicCounterIncrement(atomic_uint c);
+uint atomicCounter(atomic_uint c);
+uint atomicAdd(inout uint mem, uint data);
+int atomicAdd(inout int mem, int data);
+uint atomicMin(inout uint mem, uint data);
+int atomicMin(inout int mem, int data);
+uint atomicMax(inout uint mem, uint data);
+int atomicMax(inout int mem, int data);
+uint atomicAnd(inout uint mem, uint data);
+int atomicAnd(inout int mem, int data);
+uint atomicOr(inout uint mem, uint data);
+int atomicOr(inout int mem, int data);
+uint atomicXor(inout uint mem, uint data);
+int atomicXor(inout int mem, int data);
+uint atomicExchange(inout uint mem, uint data);
+int atomicExchange(inout int mem, int data);
+uint atomicCompSwap(inout uint mem, uint compare, uint data);
+int atomicCompSwap(inout int mem, int compare, int data);
+*/
+// section 8.12 Image Functions will go here if and when we add support for them
+
+$genType dFdx($genType p);
+$genType dFdy($genType p);
+$genType fwidth($genType p);
+$genType fwidthCoarse($genType p);
+$genType fwidthFine($genType p);
+float interpolateAtSample(float interpolant, int sample);
+vec2 interpolateAtSample(vec2 interpolant, int sample);
+vec3 interpolateAtSample(vec3 interpolant, int sample);
+vec4 interpolateAtSample(vec4 interpolant, int sample);
+float interpolateAtOffset(float interpolant, vec2 offset);
+vec2 interpolateAtOffset(vec2 interpolant, vec2 offset);
+vec3 interpolateAtOffset(vec3 interpolant, vec2 offset);
+vec4 interpolateAtOffset(vec4 interpolant, vec2 offset);
+void EmitStreamVertex(int stream);
+void EndStreamPrimitive(int stream);
+void EmitVertex();
+void EndPrimitive();
+void barrier();
+void memoryBarrier();
+void memoryBarrierAtomicCounter();
+void memoryBarrierBuffer();
+void memoryBarrierShared();
+void memoryBarrierImage();
+void groupMemoryBarrier();
+
+)
+
diff --git a/gfx/skia/skia/src/sksl/sksl_frag.include b/gfx/skia/skia/src/sksl/sksl_frag.include
new file mode 100644
index 000000000..123c3393a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl_frag.include
@@ -0,0 +1,8 @@
+STRINGIFY(
+
+// defines built-in interfaces supported by SkiaSL fragment shaders
+
+layout(builtin=15) in vec4 gl_FragCoord;
+
+)
+
diff --git a/gfx/skia/skia/src/sksl/sksl_vert.include b/gfx/skia/skia/src/sksl/sksl_vert.include
new file mode 100644
index 000000000..85b53850a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl_vert.include
@@ -0,0 +1,11 @@
+STRINGIFY(
+
+// defines built-in interfaces supported by SkiaSL vertex shaders
+
+out gl_PerVertex {
+ layout(builtin=0) vec4 gl_Position;
+ layout(builtin=1) float gl_PointSize;
+};
+
+)
+
diff --git a/gfx/skia/skia/src/sksl/spirv.h b/gfx/skia/skia/src/sksl/spirv.h
new file mode 100644
index 000000000..e4f5b5bee
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/spirv.h
@@ -0,0 +1,870 @@
+/*
+** Copyright (c) 2014-2016 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+/*
+** This header is automatically generated by the same tool that creates
+** the Binary Section of the SPIR-V specification.
+*/
+
+/*
+** Enumeration tokens for SPIR-V, in various styles:
+** C, C++, C++11, JSON, Lua, Python
+**
+** - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL
+** - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL
+** - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL
+** - Lua will use tables, e.g.: spv.SourceLanguage.GLSL
+** - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL']
+**
+** Some tokens act like mask values, which can be OR'd together,
+** while others are mutually exclusive. The mask-like ones have
+** "Mask" in their name, and a parallel enum that has the shift
+** amount (1 << x) for each corresponding enumerant.
+*/
+
+#ifndef spirv_H
+#define spirv_H
+
+typedef unsigned int SpvId;
+
+#define SPV_VERSION 0x10000
+#define SPV_REVISION 4
+
+static const unsigned int SpvMagicNumber = 0x07230203;
+static const unsigned int SpvVersion = 0x00010000;
+static const unsigned int SpvRevision = 4;
+static const unsigned int SpvOpCodeMask = 0xffff;
+static const unsigned int SpvWordCountShift = 16;
+
+typedef enum SpvSourceLanguage_ {
+ SpvSourceLanguageUnknown = 0,
+ SpvSourceLanguageESSL = 1,
+ SpvSourceLanguageGLSL = 2,
+ SpvSourceLanguageOpenCL_C = 3,
+ SpvSourceLanguageOpenCL_CPP = 4,
+} SpvSourceLanguage;
+
+typedef enum SpvExecutionModel_ {
+ SpvExecutionModelVertex = 0,
+ SpvExecutionModelTessellationControl = 1,
+ SpvExecutionModelTessellationEvaluation = 2,
+ SpvExecutionModelGeometry = 3,
+ SpvExecutionModelFragment = 4,
+ SpvExecutionModelGLCompute = 5,
+ SpvExecutionModelKernel = 6,
+} SpvExecutionModel;
+
+typedef enum SpvAddressingModel_ {
+ SpvAddressingModelLogical = 0,
+ SpvAddressingModelPhysical32 = 1,
+ SpvAddressingModelPhysical64 = 2,
+} SpvAddressingModel;
+
+typedef enum SpvMemoryModel_ {
+ SpvMemoryModelSimple = 0,
+ SpvMemoryModelGLSL450 = 1,
+ SpvMemoryModelOpenCL = 2,
+} SpvMemoryModel;
+
+typedef enum SpvExecutionMode_ {
+ SpvExecutionModeInvocations = 0,
+ SpvExecutionModeSpacingEqual = 1,
+ SpvExecutionModeSpacingFractionalEven = 2,
+ SpvExecutionModeSpacingFractionalOdd = 3,
+ SpvExecutionModeVertexOrderCw = 4,
+ SpvExecutionModeVertexOrderCcw = 5,
+ SpvExecutionModePixelCenterInteger = 6,
+ SpvExecutionModeOriginUpperLeft = 7,
+ SpvExecutionModeOriginLowerLeft = 8,
+ SpvExecutionModeEarlyFragmentTests = 9,
+ SpvExecutionModePointMode = 10,
+ SpvExecutionModeXfb = 11,
+ SpvExecutionModeDepthReplacing = 12,
+ SpvExecutionModeDepthGreater = 14,
+ SpvExecutionModeDepthLess = 15,
+ SpvExecutionModeDepthUnchanged = 16,
+ SpvExecutionModeLocalSize = 17,
+ SpvExecutionModeLocalSizeHint = 18,
+ SpvExecutionModeInputPoints = 19,
+ SpvExecutionModeInputLines = 20,
+ SpvExecutionModeInputLinesAdjacency = 21,
+ SpvExecutionModeTriangles = 22,
+ SpvExecutionModeInputTrianglesAdjacency = 23,
+ SpvExecutionModeQuads = 24,
+ SpvExecutionModeIsolines = 25,
+ SpvExecutionModeOutputVertices = 26,
+ SpvExecutionModeOutputPoints = 27,
+ SpvExecutionModeOutputLineStrip = 28,
+ SpvExecutionModeOutputTriangleStrip = 29,
+ SpvExecutionModeVecTypeHint = 30,
+ SpvExecutionModeContractionOff = 31,
+} SpvExecutionMode;
+
+typedef enum SpvStorageClass_ {
+ SpvStorageClassUniformConstant = 0,
+ SpvStorageClassInput = 1,
+ SpvStorageClassUniform = 2,
+ SpvStorageClassOutput = 3,
+ SpvStorageClassWorkgroup = 4,
+ SpvStorageClassCrossWorkgroup = 5,
+ SpvStorageClassPrivate = 6,
+ SpvStorageClassFunction = 7,
+ SpvStorageClassGeneric = 8,
+ SpvStorageClassPushConstant = 9,
+ SpvStorageClassAtomicCounter = 10,
+ SpvStorageClassImage = 11,
+} SpvStorageClass;
+
+typedef enum SpvDim_ {
+ SpvDim1D = 0,
+ SpvDim2D = 1,
+ SpvDim3D = 2,
+ SpvDimCube = 3,
+ SpvDimRect = 4,
+ SpvDimBuffer = 5,
+ SpvDimSubpassData = 6,
+} SpvDim;
+
+typedef enum SpvSamplerAddressingMode_ {
+ SpvSamplerAddressingModeNone = 0,
+ SpvSamplerAddressingModeClampToEdge = 1,
+ SpvSamplerAddressingModeClamp = 2,
+ SpvSamplerAddressingModeRepeat = 3,
+ SpvSamplerAddressingModeRepeatMirrored = 4,
+} SpvSamplerAddressingMode;
+
+typedef enum SpvSamplerFilterMode_ {
+ SpvSamplerFilterModeNearest = 0,
+ SpvSamplerFilterModeLinear = 1,
+} SpvSamplerFilterMode;
+
+typedef enum SpvImageFormat_ {
+ SpvImageFormatUnknown = 0,
+ SpvImageFormatRgba32f = 1,
+ SpvImageFormatRgba16f = 2,
+ SpvImageFormatR32f = 3,
+ SpvImageFormatRgba8 = 4,
+ SpvImageFormatRgba8Snorm = 5,
+ SpvImageFormatRg32f = 6,
+ SpvImageFormatRg16f = 7,
+ SpvImageFormatR11fG11fB10f = 8,
+ SpvImageFormatR16f = 9,
+ SpvImageFormatRgba16 = 10,
+ SpvImageFormatRgb10A2 = 11,
+ SpvImageFormatRg16 = 12,
+ SpvImageFormatRg8 = 13,
+ SpvImageFormatR16 = 14,
+ SpvImageFormatR8 = 15,
+ SpvImageFormatRgba16Snorm = 16,
+ SpvImageFormatRg16Snorm = 17,
+ SpvImageFormatRg8Snorm = 18,
+ SpvImageFormatR16Snorm = 19,
+ SpvImageFormatR8Snorm = 20,
+ SpvImageFormatRgba32i = 21,
+ SpvImageFormatRgba16i = 22,
+ SpvImageFormatRgba8i = 23,
+ SpvImageFormatR32i = 24,
+ SpvImageFormatRg32i = 25,
+ SpvImageFormatRg16i = 26,
+ SpvImageFormatRg8i = 27,
+ SpvImageFormatR16i = 28,
+ SpvImageFormatR8i = 29,
+ SpvImageFormatRgba32ui = 30,
+ SpvImageFormatRgba16ui = 31,
+ SpvImageFormatRgba8ui = 32,
+ SpvImageFormatR32ui = 33,
+ SpvImageFormatRgb10a2ui = 34,
+ SpvImageFormatRg32ui = 35,
+ SpvImageFormatRg16ui = 36,
+ SpvImageFormatRg8ui = 37,
+ SpvImageFormatR16ui = 38,
+ SpvImageFormatR8ui = 39,
+} SpvImageFormat;
+
+typedef enum SpvImageChannelOrder_ {
+ SpvImageChannelOrderR = 0,
+ SpvImageChannelOrderA = 1,
+ SpvImageChannelOrderRG = 2,
+ SpvImageChannelOrderRA = 3,
+ SpvImageChannelOrderRGB = 4,
+ SpvImageChannelOrderRGBA = 5,
+ SpvImageChannelOrderBGRA = 6,
+ SpvImageChannelOrderARGB = 7,
+ SpvImageChannelOrderIntensity = 8,
+ SpvImageChannelOrderLuminance = 9,
+ SpvImageChannelOrderRx = 10,
+ SpvImageChannelOrderRGx = 11,
+ SpvImageChannelOrderRGBx = 12,
+ SpvImageChannelOrderDepth = 13,
+ SpvImageChannelOrderDepthStencil = 14,
+ SpvImageChannelOrdersRGB = 15,
+ SpvImageChannelOrdersRGBx = 16,
+ SpvImageChannelOrdersRGBA = 17,
+ SpvImageChannelOrdersBGRA = 18,
+} SpvImageChannelOrder;
+
+typedef enum SpvImageChannelDataType_ {
+ SpvImageChannelDataTypeSnormInt8 = 0,
+ SpvImageChannelDataTypeSnormInt16 = 1,
+ SpvImageChannelDataTypeUnormInt8 = 2,
+ SpvImageChannelDataTypeUnormInt16 = 3,
+ SpvImageChannelDataTypeUnormShort565 = 4,
+ SpvImageChannelDataTypeUnormShort555 = 5,
+ SpvImageChannelDataTypeUnormInt101010 = 6,
+ SpvImageChannelDataTypeSignedInt8 = 7,
+ SpvImageChannelDataTypeSignedInt16 = 8,
+ SpvImageChannelDataTypeSignedInt32 = 9,
+ SpvImageChannelDataTypeUnsignedInt8 = 10,
+ SpvImageChannelDataTypeUnsignedInt16 = 11,
+ SpvImageChannelDataTypeUnsignedInt32 = 12,
+ SpvImageChannelDataTypeHalfFloat = 13,
+ SpvImageChannelDataTypeFloat = 14,
+ SpvImageChannelDataTypeUnormInt24 = 15,
+ SpvImageChannelDataTypeUnormInt101010_2 = 16,
+} SpvImageChannelDataType;
+
+typedef enum SpvImageOperandsShift_ {
+ SpvImageOperandsBiasShift = 0,
+ SpvImageOperandsLodShift = 1,
+ SpvImageOperandsGradShift = 2,
+ SpvImageOperandsConstOffsetShift = 3,
+ SpvImageOperandsOffsetShift = 4,
+ SpvImageOperandsConstOffsetsShift = 5,
+ SpvImageOperandsSampleShift = 6,
+ SpvImageOperandsMinLodShift = 7,
+} SpvImageOperandsShift;
+
+typedef enum SpvImageOperandsMask_ {
+ SpvImageOperandsMaskNone = 0,
+ SpvImageOperandsBiasMask = 0x00000001,
+ SpvImageOperandsLodMask = 0x00000002,
+ SpvImageOperandsGradMask = 0x00000004,
+ SpvImageOperandsConstOffsetMask = 0x00000008,
+ SpvImageOperandsOffsetMask = 0x00000010,
+ SpvImageOperandsConstOffsetsMask = 0x00000020,
+ SpvImageOperandsSampleMask = 0x00000040,
+ SpvImageOperandsMinLodMask = 0x00000080,
+} SpvImageOperandsMask;
+
+typedef enum SpvFPFastMathModeShift_ {
+ SpvFPFastMathModeNotNaNShift = 0,
+ SpvFPFastMathModeNotInfShift = 1,
+ SpvFPFastMathModeNSZShift = 2,
+ SpvFPFastMathModeAllowRecipShift = 3,
+ SpvFPFastMathModeFastShift = 4,
+} SpvFPFastMathModeShift;
+
+typedef enum SpvFPFastMathModeMask_ {
+ SpvFPFastMathModeMaskNone = 0,
+ SpvFPFastMathModeNotNaNMask = 0x00000001,
+ SpvFPFastMathModeNotInfMask = 0x00000002,
+ SpvFPFastMathModeNSZMask = 0x00000004,
+ SpvFPFastMathModeAllowRecipMask = 0x00000008,
+ SpvFPFastMathModeFastMask = 0x00000010,
+} SpvFPFastMathModeMask;
+
+typedef enum SpvFPRoundingMode_ {
+ SpvFPRoundingModeRTE = 0,
+ SpvFPRoundingModeRTZ = 1,
+ SpvFPRoundingModeRTP = 2,
+ SpvFPRoundingModeRTN = 3,
+} SpvFPRoundingMode;
+
+typedef enum SpvLinkageType_ {
+ SpvLinkageTypeExport = 0,
+ SpvLinkageTypeImport = 1,
+} SpvLinkageType;
+
+typedef enum SpvAccessQualifier_ {
+ SpvAccessQualifierReadOnly = 0,
+ SpvAccessQualifierWriteOnly = 1,
+ SpvAccessQualifierReadWrite = 2,
+} SpvAccessQualifier;
+
+typedef enum SpvFunctionParameterAttribute_ {
+ SpvFunctionParameterAttributeZext = 0,
+ SpvFunctionParameterAttributeSext = 1,
+ SpvFunctionParameterAttributeByVal = 2,
+ SpvFunctionParameterAttributeSret = 3,
+ SpvFunctionParameterAttributeNoAlias = 4,
+ SpvFunctionParameterAttributeNoCapture = 5,
+ SpvFunctionParameterAttributeNoWrite = 6,
+ SpvFunctionParameterAttributeNoReadWrite = 7,
+} SpvFunctionParameterAttribute;
+
+typedef enum SpvDecoration_ {
+ SpvDecorationRelaxedPrecision = 0,
+ SpvDecorationSpecId = 1,
+ SpvDecorationBlock = 2,
+ SpvDecorationBufferBlock = 3,
+ SpvDecorationRowMajor = 4,
+ SpvDecorationColMajor = 5,
+ SpvDecorationArrayStride = 6,
+ SpvDecorationMatrixStride = 7,
+ SpvDecorationGLSLShared = 8,
+ SpvDecorationGLSLPacked = 9,
+ SpvDecorationCPacked = 10,
+ SpvDecorationBuiltIn = 11,
+ SpvDecorationNoPerspective = 13,
+ SpvDecorationFlat = 14,
+ SpvDecorationPatch = 15,
+ SpvDecorationCentroid = 16,
+ SpvDecorationSample = 17,
+ SpvDecorationInvariant = 18,
+ SpvDecorationRestrict = 19,
+ SpvDecorationAliased = 20,
+ SpvDecorationVolatile = 21,
+ SpvDecorationConstant = 22,
+ SpvDecorationCoherent = 23,
+ SpvDecorationNonWritable = 24,
+ SpvDecorationNonReadable = 25,
+ SpvDecorationUniform = 26,
+ SpvDecorationSaturatedConversion = 28,
+ SpvDecorationStream = 29,
+ SpvDecorationLocation = 30,
+ SpvDecorationComponent = 31,
+ SpvDecorationIndex = 32,
+ SpvDecorationBinding = 33,
+ SpvDecorationDescriptorSet = 34,
+ SpvDecorationOffset = 35,
+ SpvDecorationXfbBuffer = 36,
+ SpvDecorationXfbStride = 37,
+ SpvDecorationFuncParamAttr = 38,
+ SpvDecorationFPRoundingMode = 39,
+ SpvDecorationFPFastMathMode = 40,
+ SpvDecorationLinkageAttributes = 41,
+ SpvDecorationNoContraction = 42,
+ SpvDecorationInputAttachmentIndex = 43,
+ SpvDecorationAlignment = 44,
+} SpvDecoration;
+
+typedef enum SpvBuiltIn_ {
+ SpvBuiltInPosition = 0,
+ SpvBuiltInPointSize = 1,
+ SpvBuiltInClipDistance = 3,
+ SpvBuiltInCullDistance = 4,
+ SpvBuiltInVertexId = 5,
+ SpvBuiltInInstanceId = 6,
+ SpvBuiltInPrimitiveId = 7,
+ SpvBuiltInInvocationId = 8,
+ SpvBuiltInLayer = 9,
+ SpvBuiltInViewportIndex = 10,
+ SpvBuiltInTessLevelOuter = 11,
+ SpvBuiltInTessLevelInner = 12,
+ SpvBuiltInTessCoord = 13,
+ SpvBuiltInPatchVertices = 14,
+ SpvBuiltInFragCoord = 15,
+ SpvBuiltInPointCoord = 16,
+ SpvBuiltInFrontFacing = 17,
+ SpvBuiltInSampleId = 18,
+ SpvBuiltInSamplePosition = 19,
+ SpvBuiltInSampleMask = 20,
+ SpvBuiltInFragDepth = 22,
+ SpvBuiltInHelperInvocation = 23,
+ SpvBuiltInNumWorkgroups = 24,
+ SpvBuiltInWorkgroupSize = 25,
+ SpvBuiltInWorkgroupId = 26,
+ SpvBuiltInLocalInvocationId = 27,
+ SpvBuiltInGlobalInvocationId = 28,
+ SpvBuiltInLocalInvocationIndex = 29,
+ SpvBuiltInWorkDim = 30,
+ SpvBuiltInGlobalSize = 31,
+ SpvBuiltInEnqueuedWorkgroupSize = 32,
+ SpvBuiltInGlobalOffset = 33,
+ SpvBuiltInGlobalLinearId = 34,
+ SpvBuiltInSubgroupSize = 36,
+ SpvBuiltInSubgroupMaxSize = 37,
+ SpvBuiltInNumSubgroups = 38,
+ SpvBuiltInNumEnqueuedSubgroups = 39,
+ SpvBuiltInSubgroupId = 40,
+ SpvBuiltInSubgroupLocalInvocationId = 41,
+ SpvBuiltInVertexIndex = 42,
+ SpvBuiltInInstanceIndex = 43,
+} SpvBuiltIn;
+
+typedef enum SpvSelectionControlShift_ {
+ SpvSelectionControlFlattenShift = 0,
+ SpvSelectionControlDontFlattenShift = 1,
+} SpvSelectionControlShift;
+
+typedef enum SpvSelectionControlMask_ {
+ SpvSelectionControlMaskNone = 0,
+ SpvSelectionControlFlattenMask = 0x00000001,
+ SpvSelectionControlDontFlattenMask = 0x00000002,
+} SpvSelectionControlMask;
+
+typedef enum SpvLoopControlShift_ {
+ SpvLoopControlUnrollShift = 0,
+ SpvLoopControlDontUnrollShift = 1,
+} SpvLoopControlShift;
+
+typedef enum SpvLoopControlMask_ {
+ SpvLoopControlMaskNone = 0,
+ SpvLoopControlUnrollMask = 0x00000001,
+ SpvLoopControlDontUnrollMask = 0x00000002,
+} SpvLoopControlMask;
+
+typedef enum SpvFunctionControlShift_ {
+ SpvFunctionControlInlineShift = 0,
+ SpvFunctionControlDontInlineShift = 1,
+ SpvFunctionControlPureShift = 2,
+ SpvFunctionControlConstShift = 3,
+} SpvFunctionControlShift;
+
+typedef enum SpvFunctionControlMask_ {
+ SpvFunctionControlMaskNone = 0,
+ SpvFunctionControlInlineMask = 0x00000001,
+ SpvFunctionControlDontInlineMask = 0x00000002,
+ SpvFunctionControlPureMask = 0x00000004,
+ SpvFunctionControlConstMask = 0x00000008,
+} SpvFunctionControlMask;
+
+typedef enum SpvMemorySemanticsShift_ {
+ SpvMemorySemanticsAcquireShift = 1,
+ SpvMemorySemanticsReleaseShift = 2,
+ SpvMemorySemanticsAcquireReleaseShift = 3,
+ SpvMemorySemanticsSequentiallyConsistentShift = 4,
+ SpvMemorySemanticsUniformMemoryShift = 6,
+ SpvMemorySemanticsSubgroupMemoryShift = 7,
+ SpvMemorySemanticsWorkgroupMemoryShift = 8,
+ SpvMemorySemanticsCrossWorkgroupMemoryShift = 9,
+ SpvMemorySemanticsAtomicCounterMemoryShift = 10,
+ SpvMemorySemanticsImageMemoryShift = 11,
+} SpvMemorySemanticsShift;
+
+typedef enum SpvMemorySemanticsMask_ {
+ SpvMemorySemanticsMaskNone = 0,
+ SpvMemorySemanticsAcquireMask = 0x00000002,
+ SpvMemorySemanticsReleaseMask = 0x00000004,
+ SpvMemorySemanticsAcquireReleaseMask = 0x00000008,
+ SpvMemorySemanticsSequentiallyConsistentMask = 0x00000010,
+ SpvMemorySemanticsUniformMemoryMask = 0x00000040,
+ SpvMemorySemanticsSubgroupMemoryMask = 0x00000080,
+ SpvMemorySemanticsWorkgroupMemoryMask = 0x00000100,
+ SpvMemorySemanticsCrossWorkgroupMemoryMask = 0x00000200,
+ SpvMemorySemanticsAtomicCounterMemoryMask = 0x00000400,
+ SpvMemorySemanticsImageMemoryMask = 0x00000800,
+} SpvMemorySemanticsMask;
+
+typedef enum SpvMemoryAccessShift_ {
+ SpvMemoryAccessVolatileShift = 0,
+ SpvMemoryAccessAlignedShift = 1,
+ SpvMemoryAccessNontemporalShift = 2,
+} SpvMemoryAccessShift;
+
+typedef enum SpvMemoryAccessMask_ {
+ SpvMemoryAccessMaskNone = 0,
+ SpvMemoryAccessVolatileMask = 0x00000001,
+ SpvMemoryAccessAlignedMask = 0x00000002,
+ SpvMemoryAccessNontemporalMask = 0x00000004,
+} SpvMemoryAccessMask;
+
+typedef enum SpvScope_ {
+ SpvScopeCrossDevice = 0,
+ SpvScopeDevice = 1,
+ SpvScopeWorkgroup = 2,
+ SpvScopeSubgroup = 3,
+ SpvScopeInvocation = 4,
+} SpvScope;
+
+typedef enum SpvGroupOperation_ {
+ SpvGroupOperationReduce = 0,
+ SpvGroupOperationInclusiveScan = 1,
+ SpvGroupOperationExclusiveScan = 2,
+} SpvGroupOperation;
+
+typedef enum SpvKernelEnqueueFlags_ {
+ SpvKernelEnqueueFlagsNoWait = 0,
+ SpvKernelEnqueueFlagsWaitKernel = 1,
+ SpvKernelEnqueueFlagsWaitWorkGroup = 2,
+} SpvKernelEnqueueFlags;
+
+typedef enum SpvKernelProfilingInfoShift_ {
+ SpvKernelProfilingInfoCmdExecTimeShift = 0,
+} SpvKernelProfilingInfoShift;
+
+typedef enum SpvKernelProfilingInfoMask_ {
+ SpvKernelProfilingInfoMaskNone = 0,
+ SpvKernelProfilingInfoCmdExecTimeMask = 0x00000001,
+} SpvKernelProfilingInfoMask;
+
+typedef enum SpvCapability_ {
+ SpvCapabilityMatrix = 0,
+ SpvCapabilityShader = 1,
+ SpvCapabilityGeometry = 2,
+ SpvCapabilityTessellation = 3,
+ SpvCapabilityAddresses = 4,
+ SpvCapabilityLinkage = 5,
+ SpvCapabilityKernel = 6,
+ SpvCapabilityVector16 = 7,
+ SpvCapabilityFloat16Buffer = 8,
+ SpvCapabilityFloat16 = 9,
+ SpvCapabilityFloat64 = 10,
+ SpvCapabilityInt64 = 11,
+ SpvCapabilityInt64Atomics = 12,
+ SpvCapabilityImageBasic = 13,
+ SpvCapabilityImageReadWrite = 14,
+ SpvCapabilityImageMipmap = 15,
+ SpvCapabilityPipes = 17,
+ SpvCapabilityGroups = 18,
+ SpvCapabilityDeviceEnqueue = 19,
+ SpvCapabilityLiteralSampler = 20,
+ SpvCapabilityAtomicStorage = 21,
+ SpvCapabilityInt16 = 22,
+ SpvCapabilityTessellationPointSize = 23,
+ SpvCapabilityGeometryPointSize = 24,
+ SpvCapabilityImageGatherExtended = 25,
+ SpvCapabilityStorageImageMultisample = 27,
+ SpvCapabilityUniformBufferArrayDynamicIndexing = 28,
+ SpvCapabilitySampledImageArrayDynamicIndexing = 29,
+ SpvCapabilityStorageBufferArrayDynamicIndexing = 30,
+ SpvCapabilityStorageImageArrayDynamicIndexing = 31,
+ SpvCapabilityClipDistance = 32,
+ SpvCapabilityCullDistance = 33,
+ SpvCapabilityImageCubeArray = 34,
+ SpvCapabilitySampleRateShading = 35,
+ SpvCapabilityImageRect = 36,
+ SpvCapabilitySampledRect = 37,
+ SpvCapabilityGenericPointer = 38,
+ SpvCapabilityInt8 = 39,
+ SpvCapabilityInputAttachment = 40,
+ SpvCapabilitySparseResidency = 41,
+ SpvCapabilityMinLod = 42,
+ SpvCapabilitySampled1D = 43,
+ SpvCapabilityImage1D = 44,
+ SpvCapabilitySampledCubeArray = 45,
+ SpvCapabilitySampledBuffer = 46,
+ SpvCapabilityImageBuffer = 47,
+ SpvCapabilityImageMSArray = 48,
+ SpvCapabilityStorageImageExtendedFormats = 49,
+ SpvCapabilityImageQuery = 50,
+ SpvCapabilityDerivativeControl = 51,
+ SpvCapabilityInterpolationFunction = 52,
+ SpvCapabilityTransformFeedback = 53,
+ SpvCapabilityGeometryStreams = 54,
+ SpvCapabilityStorageImageReadWithoutFormat = 55,
+ SpvCapabilityStorageImageWriteWithoutFormat = 56,
+ SpvCapabilityMultiViewport = 57,
+} SpvCapability;
+
+typedef enum SpvOp_ {
+ SpvOpNop = 0,
+ SpvOpUndef = 1,
+ SpvOpSourceContinued = 2,
+ SpvOpSource = 3,
+ SpvOpSourceExtension = 4,
+ SpvOpName = 5,
+ SpvOpMemberName = 6,
+ SpvOpString = 7,
+ SpvOpLine = 8,
+ SpvOpExtension = 10,
+ SpvOpExtInstImport = 11,
+ SpvOpExtInst = 12,
+ SpvOpMemoryModel = 14,
+ SpvOpEntryPoint = 15,
+ SpvOpExecutionMode = 16,
+ SpvOpCapability = 17,
+ SpvOpTypeVoid = 19,
+ SpvOpTypeBool = 20,
+ SpvOpTypeInt = 21,
+ SpvOpTypeFloat = 22,
+ SpvOpTypeVector = 23,
+ SpvOpTypeMatrix = 24,
+ SpvOpTypeImage = 25,
+ SpvOpTypeSampler = 26,
+ SpvOpTypeSampledImage = 27,
+ SpvOpTypeArray = 28,
+ SpvOpTypeRuntimeArray = 29,
+ SpvOpTypeStruct = 30,
+ SpvOpTypeOpaque = 31,
+ SpvOpTypePointer = 32,
+ SpvOpTypeFunction = 33,
+ SpvOpTypeEvent = 34,
+ SpvOpTypeDeviceEvent = 35,
+ SpvOpTypeReserveId = 36,
+ SpvOpTypeQueue = 37,
+ SpvOpTypePipe = 38,
+ SpvOpTypeForwardPointer = 39,
+ SpvOpConstantTrue = 41,
+ SpvOpConstantFalse = 42,
+ SpvOpConstant = 43,
+ SpvOpConstantComposite = 44,
+ SpvOpConstantSampler = 45,
+ SpvOpConstantNull = 46,
+ SpvOpSpecConstantTrue = 48,
+ SpvOpSpecConstantFalse = 49,
+ SpvOpSpecConstant = 50,
+ SpvOpSpecConstantComposite = 51,
+ SpvOpSpecConstantOp = 52,
+ SpvOpFunction = 54,
+ SpvOpFunctionParameter = 55,
+ SpvOpFunctionEnd = 56,
+ SpvOpFunctionCall = 57,
+ SpvOpVariable = 59,
+ SpvOpImageTexelPointer = 60,
+ SpvOpLoad = 61,
+ SpvOpStore = 62,
+ SpvOpCopyMemory = 63,
+ SpvOpCopyMemorySized = 64,
+ SpvOpAccessChain = 65,
+ SpvOpInBoundsAccessChain = 66,
+ SpvOpPtrAccessChain = 67,
+ SpvOpArrayLength = 68,
+ SpvOpGenericPtrMemSemantics = 69,
+ SpvOpInBoundsPtrAccessChain = 70,
+ SpvOpDecorate = 71,
+ SpvOpMemberDecorate = 72,
+ SpvOpDecorationGroup = 73,
+ SpvOpGroupDecorate = 74,
+ SpvOpGroupMemberDecorate = 75,
+ SpvOpVectorExtractDynamic = 77,
+ SpvOpVectorInsertDynamic = 78,
+ SpvOpVectorShuffle = 79,
+ SpvOpCompositeConstruct = 80,
+ SpvOpCompositeExtract = 81,
+ SpvOpCompositeInsert = 82,
+ SpvOpCopyObject = 83,
+ SpvOpTranspose = 84,
+ SpvOpSampledImage = 86,
+ SpvOpImageSampleImplicitLod = 87,
+ SpvOpImageSampleExplicitLod = 88,
+ SpvOpImageSampleDrefImplicitLod = 89,
+ SpvOpImageSampleDrefExplicitLod = 90,
+ SpvOpImageSampleProjImplicitLod = 91,
+ SpvOpImageSampleProjExplicitLod = 92,
+ SpvOpImageSampleProjDrefImplicitLod = 93,
+ SpvOpImageSampleProjDrefExplicitLod = 94,
+ SpvOpImageFetch = 95,
+ SpvOpImageGather = 96,
+ SpvOpImageDrefGather = 97,
+ SpvOpImageRead = 98,
+ SpvOpImageWrite = 99,
+ SpvOpImage = 100,
+ SpvOpImageQueryFormat = 101,
+ SpvOpImageQueryOrder = 102,
+ SpvOpImageQuerySizeLod = 103,
+ SpvOpImageQuerySize = 104,
+ SpvOpImageQueryLod = 105,
+ SpvOpImageQueryLevels = 106,
+ SpvOpImageQuerySamples = 107,
+ SpvOpConvertFToU = 109,
+ SpvOpConvertFToS = 110,
+ SpvOpConvertSToF = 111,
+ SpvOpConvertUToF = 112,
+ SpvOpUConvert = 113,
+ SpvOpSConvert = 114,
+ SpvOpFConvert = 115,
+ SpvOpQuantizeToF16 = 116,
+ SpvOpConvertPtrToU = 117,
+ SpvOpSatConvertSToU = 118,
+ SpvOpSatConvertUToS = 119,
+ SpvOpConvertUToPtr = 120,
+ SpvOpPtrCastToGeneric = 121,
+ SpvOpGenericCastToPtr = 122,
+ SpvOpGenericCastToPtrExplicit = 123,
+ SpvOpBitcast = 124,
+ SpvOpSNegate = 126,
+ SpvOpFNegate = 127,
+ SpvOpIAdd = 128,
+ SpvOpFAdd = 129,
+ SpvOpISub = 130,
+ SpvOpFSub = 131,
+ SpvOpIMul = 132,
+ SpvOpFMul = 133,
+ SpvOpUDiv = 134,
+ SpvOpSDiv = 135,
+ SpvOpFDiv = 136,
+ SpvOpUMod = 137,
+ SpvOpSRem = 138,
+ SpvOpSMod = 139,
+ SpvOpFRem = 140,
+ SpvOpFMod = 141,
+ SpvOpVectorTimesScalar = 142,
+ SpvOpMatrixTimesScalar = 143,
+ SpvOpVectorTimesMatrix = 144,
+ SpvOpMatrixTimesVector = 145,
+ SpvOpMatrixTimesMatrix = 146,
+ SpvOpOuterProduct = 147,
+ SpvOpDot = 148,
+ SpvOpIAddCarry = 149,
+ SpvOpISubBorrow = 150,
+ SpvOpUMulExtended = 151,
+ SpvOpSMulExtended = 152,
+ SpvOpAny = 154,
+ SpvOpAll = 155,
+ SpvOpIsNan = 156,
+ SpvOpIsInf = 157,
+ SpvOpIsFinite = 158,
+ SpvOpIsNormal = 159,
+ SpvOpSignBitSet = 160,
+ SpvOpLessOrGreater = 161,
+ SpvOpOrdered = 162,
+ SpvOpUnordered = 163,
+ SpvOpLogicalEqual = 164,
+ SpvOpLogicalNotEqual = 165,
+ SpvOpLogicalOr = 166,
+ SpvOpLogicalAnd = 167,
+ SpvOpLogicalNot = 168,
+ SpvOpSelect = 169,
+ SpvOpIEqual = 170,
+ SpvOpINotEqual = 171,
+ SpvOpUGreaterThan = 172,
+ SpvOpSGreaterThan = 173,
+ SpvOpUGreaterThanEqual = 174,
+ SpvOpSGreaterThanEqual = 175,
+ SpvOpULessThan = 176,
+ SpvOpSLessThan = 177,
+ SpvOpULessThanEqual = 178,
+ SpvOpSLessThanEqual = 179,
+ SpvOpFOrdEqual = 180,
+ SpvOpFUnordEqual = 181,
+ SpvOpFOrdNotEqual = 182,
+ SpvOpFUnordNotEqual = 183,
+ SpvOpFOrdLessThan = 184,
+ SpvOpFUnordLessThan = 185,
+ SpvOpFOrdGreaterThan = 186,
+ SpvOpFUnordGreaterThan = 187,
+ SpvOpFOrdLessThanEqual = 188,
+ SpvOpFUnordLessThanEqual = 189,
+ SpvOpFOrdGreaterThanEqual = 190,
+ SpvOpFUnordGreaterThanEqual = 191,
+ SpvOpShiftRightLogical = 194,
+ SpvOpShiftRightArithmetic = 195,
+ SpvOpShiftLeftLogical = 196,
+ SpvOpBitwiseOr = 197,
+ SpvOpBitwiseXor = 198,
+ SpvOpBitwiseAnd = 199,
+ SpvOpNot = 200,
+ SpvOpBitFieldInsert = 201,
+ SpvOpBitFieldSExtract = 202,
+ SpvOpBitFieldUExtract = 203,
+ SpvOpBitReverse = 204,
+ SpvOpBitCount = 205,
+ SpvOpDPdx = 207,
+ SpvOpDPdy = 208,
+ SpvOpFwidth = 209,
+ SpvOpDPdxFine = 210,
+ SpvOpDPdyFine = 211,
+ SpvOpFwidthFine = 212,
+ SpvOpDPdxCoarse = 213,
+ SpvOpDPdyCoarse = 214,
+ SpvOpFwidthCoarse = 215,
+ SpvOpEmitVertex = 218,
+ SpvOpEndPrimitive = 219,
+ SpvOpEmitStreamVertex = 220,
+ SpvOpEndStreamPrimitive = 221,
+ SpvOpControlBarrier = 224,
+ SpvOpMemoryBarrier = 225,
+ SpvOpAtomicLoad = 227,
+ SpvOpAtomicStore = 228,
+ SpvOpAtomicExchange = 229,
+ SpvOpAtomicCompareExchange = 230,
+ SpvOpAtomicCompareExchangeWeak = 231,
+ SpvOpAtomicIIncrement = 232,
+ SpvOpAtomicIDecrement = 233,
+ SpvOpAtomicIAdd = 234,
+ SpvOpAtomicISub = 235,
+ SpvOpAtomicSMin = 236,
+ SpvOpAtomicUMin = 237,
+ SpvOpAtomicSMax = 238,
+ SpvOpAtomicUMax = 239,
+ SpvOpAtomicAnd = 240,
+ SpvOpAtomicOr = 241,
+ SpvOpAtomicXor = 242,
+ SpvOpPhi = 245,
+ SpvOpLoopMerge = 246,
+ SpvOpSelectionMerge = 247,
+ SpvOpLabel = 248,
+ SpvOpBranch = 249,
+ SpvOpBranchConditional = 250,
+ SpvOpSwitch = 251,
+ SpvOpKill = 252,
+ SpvOpReturn = 253,
+ SpvOpReturnValue = 254,
+ SpvOpUnreachable = 255,
+ SpvOpLifetimeStart = 256,
+ SpvOpLifetimeStop = 257,
+ SpvOpGroupAsyncCopy = 259,
+ SpvOpGroupWaitEvents = 260,
+ SpvOpGroupAll = 261,
+ SpvOpGroupAny = 262,
+ SpvOpGroupBroadcast = 263,
+ SpvOpGroupIAdd = 264,
+ SpvOpGroupFAdd = 265,
+ SpvOpGroupFMin = 266,
+ SpvOpGroupUMin = 267,
+ SpvOpGroupSMin = 268,
+ SpvOpGroupFMax = 269,
+ SpvOpGroupUMax = 270,
+ SpvOpGroupSMax = 271,
+ SpvOpReadPipe = 274,
+ SpvOpWritePipe = 275,
+ SpvOpReservedReadPipe = 276,
+ SpvOpReservedWritePipe = 277,
+ SpvOpReserveReadPipePackets = 278,
+ SpvOpReserveWritePipePackets = 279,
+ SpvOpCommitReadPipe = 280,
+ SpvOpCommitWritePipe = 281,
+ SpvOpIsValidReserveId = 282,
+ SpvOpGetNumPipePackets = 283,
+ SpvOpGetMaxPipePackets = 284,
+ SpvOpGroupReserveReadPipePackets = 285,
+ SpvOpGroupReserveWritePipePackets = 286,
+ SpvOpGroupCommitReadPipe = 287,
+ SpvOpGroupCommitWritePipe = 288,
+ SpvOpEnqueueMarker = 291,
+ SpvOpEnqueueKernel = 292,
+ SpvOpGetKernelNDrangeSubGroupCount = 293,
+ SpvOpGetKernelNDrangeMaxSubGroupSize = 294,
+ SpvOpGetKernelWorkGroupSize = 295,
+ SpvOpGetKernelPreferredWorkGroupSizeMultiple = 296,
+ SpvOpRetainEvent = 297,
+ SpvOpReleaseEvent = 298,
+ SpvOpCreateUserEvent = 299,
+ SpvOpIsValidEvent = 300,
+ SpvOpSetUserEventStatus = 301,
+ SpvOpCaptureEventProfilingInfo = 302,
+ SpvOpGetDefaultQueue = 303,
+ SpvOpBuildNDRange = 304,
+ SpvOpImageSparseSampleImplicitLod = 305,
+ SpvOpImageSparseSampleExplicitLod = 306,
+ SpvOpImageSparseSampleDrefImplicitLod = 307,
+ SpvOpImageSparseSampleDrefExplicitLod = 308,
+ SpvOpImageSparseSampleProjImplicitLod = 309,
+ SpvOpImageSparseSampleProjExplicitLod = 310,
+ SpvOpImageSparseSampleProjDrefImplicitLod = 311,
+ SpvOpImageSparseSampleProjDrefExplicitLod = 312,
+ SpvOpImageSparseFetch = 313,
+ SpvOpImageSparseGather = 314,
+ SpvOpImageSparseDrefGather = 315,
+ SpvOpImageSparseTexelsResident = 316,
+ SpvOpNoLine = 317,
+ SpvOpAtomicFlagTestAndSet = 318,
+ SpvOpAtomicFlagClear = 319,
+ SpvOpImageSparseRead = 320,
+} SpvOp;
+
+#endif // #ifndef spirv_H
diff --git a/gfx/skia/skia/src/svg/SkSVGCanvas.cpp b/gfx/skia/skia/src/svg/SkSVGCanvas.cpp
new file mode 100644
index 000000000..b6634b8a1
--- /dev/null
+++ b/gfx/skia/skia/src/svg/SkSVGCanvas.cpp
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkSVGCanvas.h"
+#include "SkSVGDevice.h"
+
+SkCanvas* SkSVGCanvas::Create(const SkRect& bounds, SkXMLWriter* writer) {
+ // TODO: pass full bounds to the device
+ SkISize size = bounds.roundOut().size();
+ SkAutoTUnref<SkBaseDevice> device(SkSVGDevice::Create(size, writer));
+
+ return new SkCanvas(device);
+}
diff --git a/gfx/skia/skia/src/svg/SkSVGDevice.cpp b/gfx/skia/skia/src/svg/SkSVGDevice.cpp
new file mode 100644
index 000000000..16e2b3f7b
--- /dev/null
+++ b/gfx/skia/skia/src/svg/SkSVGDevice.cpp
@@ -0,0 +1,813 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkSVGDevice.h"
+
+#include "SkBase64.h"
+#include "SkBitmap.h"
+#include "SkChecksum.h"
+#include "SkClipStack.h"
+#include "SkData.h"
+#include "SkDraw.h"
+#include "SkImageEncoder.h"
+#include "SkPaint.h"
+#include "SkParsePath.h"
+#include "SkShader.h"
+#include "SkStream.h"
+#include "SkTHash.h"
+#include "SkTypeface.h"
+#include "SkUtils.h"
+#include "SkXMLWriter.h"
+
+namespace {
+
+static SkString svg_color(SkColor color) {
+ return SkStringPrintf("rgb(%u,%u,%u)",
+ SkColorGetR(color),
+ SkColorGetG(color),
+ SkColorGetB(color));
+}
+
+static SkScalar svg_opacity(SkColor color) {
+ return SkIntToScalar(SkColorGetA(color)) / SK_AlphaOPAQUE;
+}
+
+// Keep in sync with SkPaint::Cap
+static const char* cap_map[] = {
+ nullptr, // kButt_Cap (default)
+ "round", // kRound_Cap
+ "square" // kSquare_Cap
+};
+static_assert(SK_ARRAY_COUNT(cap_map) == SkPaint::kCapCount, "missing_cap_map_entry");
+
+static const char* svg_cap(SkPaint::Cap cap) {
+ SkASSERT(cap < SK_ARRAY_COUNT(cap_map));
+ return cap_map[cap];
+}
+
+// Keep in sync with SkPaint::Join
+static const char* join_map[] = {
+ nullptr, // kMiter_Join (default)
+ "round", // kRound_Join
+ "bevel" // kBevel_Join
+};
+static_assert(SK_ARRAY_COUNT(join_map) == SkPaint::kJoinCount, "missing_join_map_entry");
+
+static const char* svg_join(SkPaint::Join join) {
+ SkASSERT(join < SK_ARRAY_COUNT(join_map));
+ return join_map[join];
+}
+
+// Keep in sync with SkPaint::Align
+static const char* text_align_map[] = {
+ nullptr, // kLeft_Align (default)
+ "middle", // kCenter_Align
+ "end" // kRight_Align
+};
+static_assert(SK_ARRAY_COUNT(text_align_map) == SkPaint::kAlignCount,
+ "missing_text_align_map_entry");
+static const char* svg_text_align(SkPaint::Align align) {
+ SkASSERT(align < SK_ARRAY_COUNT(text_align_map));
+ return text_align_map[align];
+}
+
+static SkString svg_transform(const SkMatrix& t) {
+ SkASSERT(!t.isIdentity());
+
+ SkString tstr;
+ switch (t.getType()) {
+ case SkMatrix::kPerspective_Mask:
+ SkDebugf("Can't handle perspective matrices.");
+ break;
+ case SkMatrix::kTranslate_Mask:
+ tstr.printf("translate(%g %g)", t.getTranslateX(), t.getTranslateY());
+ break;
+ case SkMatrix::kScale_Mask:
+ tstr.printf("scale(%g %g)", t.getScaleX(), t.getScaleY());
+ break;
+ default:
+ // http://www.w3.org/TR/SVG/coords.html#TransformMatrixDefined
+ // | a c e |
+ // | b d f |
+ // | 0 0 1 |
+ tstr.printf("matrix(%g %g %g %g %g %g)",
+ t.getScaleX(), t.getSkewY(),
+ t.getSkewX(), t.getScaleY(),
+ t.getTranslateX(), t.getTranslateY());
+ break;
+ }
+
+ return tstr;
+}
+
+struct Resources {
+ Resources(const SkPaint& paint)
+ : fPaintServer(svg_color(paint.getColor())) {}
+
+ SkString fPaintServer;
+ SkString fClip;
+};
+
+class SVGTextBuilder : SkNoncopyable {
+public:
+ SVGTextBuilder(const void* text, size_t byteLen, const SkPaint& paint, const SkPoint& offset,
+ unsigned scalarsPerPos, const SkScalar pos[] = nullptr)
+ : fOffset(offset)
+ , fScalarsPerPos(scalarsPerPos)
+ , fPos(pos)
+ , fLastCharWasWhitespace(true) // start off in whitespace mode to strip all leading space
+ {
+ SkASSERT(scalarsPerPos <= 2);
+ SkASSERT(scalarsPerPos == 0 || SkToBool(pos));
+
+ int count = paint.countText(text, byteLen);
+
+ switch(paint.getTextEncoding()) {
+ case SkPaint::kGlyphID_TextEncoding: {
+ SkASSERT(count * sizeof(uint16_t) == byteLen);
+ SkAutoSTArray<64, SkUnichar> unichars(count);
+ paint.glyphsToUnichars((const uint16_t*)text, count, unichars.get());
+ for (int i = 0; i < count; ++i) {
+ this->appendUnichar(unichars[i]);
+ }
+ } break;
+ case SkPaint::kUTF8_TextEncoding: {
+ const char* c8 = reinterpret_cast<const char*>(text);
+ for (int i = 0; i < count; ++i) {
+ this->appendUnichar(SkUTF8_NextUnichar(&c8));
+ }
+ SkASSERT(reinterpret_cast<const char*>(text) + byteLen == c8);
+ } break;
+ case SkPaint::kUTF16_TextEncoding: {
+ const uint16_t* c16 = reinterpret_cast<const uint16_t*>(text);
+ for (int i = 0; i < count; ++i) {
+ this->appendUnichar(SkUTF16_NextUnichar(&c16));
+ }
+ SkASSERT(SkIsAlign2(byteLen));
+ SkASSERT(reinterpret_cast<const uint16_t*>(text) + (byteLen / 2) == c16);
+ } break;
+ case SkPaint::kUTF32_TextEncoding: {
+ SkASSERT(count * sizeof(uint32_t) == byteLen);
+ const uint32_t* c32 = reinterpret_cast<const uint32_t*>(text);
+ for (int i = 0; i < count; ++i) {
+ this->appendUnichar(c32[i]);
+ }
+ } break;
+ default:
+ SkFAIL("unknown text encoding");
+ }
+
+ if (scalarsPerPos < 2) {
+ SkASSERT(fPosY.isEmpty());
+ fPosY.appendScalar(offset.y()); // DrawText or DrawPosTextH (fixed Y).
+ }
+
+ if (scalarsPerPos < 1) {
+ SkASSERT(fPosX.isEmpty());
+ fPosX.appendScalar(offset.x()); // DrawText (X also fixed).
+ }
+ }
+
+ const SkString& text() const { return fText; }
+ const SkString& posX() const { return fPosX; }
+ const SkString& posY() const { return fPosY; }
+
+private:
+ void appendUnichar(SkUnichar c) {
+ bool discardPos = false;
+ bool isWhitespace = false;
+
+ switch(c) {
+ case ' ':
+ case '\t':
+ // consolidate whitespace to match SVG's xml:space=default munging
+ // (http://www.w3.org/TR/SVG/text.html#WhiteSpace)
+ if (fLastCharWasWhitespace) {
+ discardPos = true;
+ } else {
+ fText.appendUnichar(c);
+ }
+ isWhitespace = true;
+ break;
+ case '\0':
+ // SkPaint::glyphsToUnichars() returns \0 for inconvertible glyphs, but these
+ // are not legal XML characters (http://www.w3.org/TR/REC-xml/#charsets)
+ discardPos = true;
+ isWhitespace = fLastCharWasWhitespace; // preserve whitespace consolidation
+ break;
+ case '&':
+ fText.append("&amp;");
+ break;
+ case '"':
+ fText.append("&quot;");
+ break;
+ case '\'':
+ fText.append("&apos;");
+ break;
+ case '<':
+ fText.append("&lt;");
+ break;
+ case '>':
+ fText.append("&gt;");
+ break;
+ default:
+ fText.appendUnichar(c);
+ break;
+ }
+
+ this->advancePos(discardPos);
+ fLastCharWasWhitespace = isWhitespace;
+ }
+
+ void advancePos(bool discard) {
+ if (!discard && fScalarsPerPos > 0) {
+ fPosX.appendf("%.8g, ", fOffset.x() + fPos[0]);
+ if (fScalarsPerPos > 1) {
+ SkASSERT(fScalarsPerPos == 2);
+ fPosY.appendf("%.8g, ", fOffset.y() + fPos[1]);
+ }
+ }
+ fPos += fScalarsPerPos;
+ }
+
+ const SkPoint& fOffset;
+ const unsigned fScalarsPerPos;
+ const SkScalar* fPos;
+
+ SkString fText, fPosX, fPosY;
+ bool fLastCharWasWhitespace;
+};
+
+}
+
+// For now all this does is serve unique serial IDs, but it will eventually evolve to track
+// and deduplicate resources.
+class SkSVGDevice::ResourceBucket : ::SkNoncopyable {
+public:
+ ResourceBucket() : fGradientCount(0), fClipCount(0), fPathCount(0), fImageCount(0) {}
+
+ SkString addLinearGradient() {
+ return SkStringPrintf("gradient_%d", fGradientCount++);
+ }
+
+ SkString addClip() {
+ return SkStringPrintf("clip_%d", fClipCount++);
+ }
+
+ SkString addPath() {
+ return SkStringPrintf("path_%d", fPathCount++);
+ }
+
+ SkString addImage() {
+ return SkStringPrintf("img_%d", fImageCount++);
+ }
+
+private:
+ uint32_t fGradientCount;
+ uint32_t fClipCount;
+ uint32_t fPathCount;
+ uint32_t fImageCount;
+};
+
+class SkSVGDevice::AutoElement : ::SkNoncopyable {
+public:
+ AutoElement(const char name[], SkXMLWriter* writer)
+ : fWriter(writer)
+ , fResourceBucket(nullptr) {
+ fWriter->startElement(name);
+ }
+
+ AutoElement(const char name[], SkXMLWriter* writer, ResourceBucket* bucket,
+ const SkDraw& draw, const SkPaint& paint)
+ : fWriter(writer)
+ , fResourceBucket(bucket) {
+
+ Resources res = this->addResources(draw, paint);
+ if (!res.fClip.isEmpty()) {
+ // The clip is in device space. Apply it via a <g> wrapper to avoid local transform
+ // interference.
+ fClipGroup.reset(new AutoElement("g", fWriter));
+ fClipGroup->addAttribute("clip-path",res.fClip);
+ }
+
+ fWriter->startElement(name);
+
+ this->addPaint(paint, res);
+
+ if (!draw.fMatrix->isIdentity()) {
+ this->addAttribute("transform", svg_transform(*draw.fMatrix));
+ }
+ }
+
+ ~AutoElement() {
+ fWriter->endElement();
+ }
+
+ void addAttribute(const char name[], const char val[]) {
+ fWriter->addAttribute(name, val);
+ }
+
+ void addAttribute(const char name[], const SkString& val) {
+ fWriter->addAttribute(name, val.c_str());
+ }
+
+ void addAttribute(const char name[], int32_t val) {
+ fWriter->addS32Attribute(name, val);
+ }
+
+ void addAttribute(const char name[], SkScalar val) {
+ fWriter->addScalarAttribute(name, val);
+ }
+
+ void addText(const SkString& text) {
+ fWriter->addText(text.c_str(), text.size());
+ }
+
+ void addRectAttributes(const SkRect&);
+ void addPathAttributes(const SkPath&);
+ void addTextAttributes(const SkPaint&);
+
+private:
+ Resources addResources(const SkDraw& draw, const SkPaint& paint);
+ void addClipResources(const SkDraw& draw, Resources* resources);
+ void addShaderResources(const SkPaint& paint, Resources* resources);
+
+ void addPaint(const SkPaint& paint, const Resources& resources);
+
+ SkString addLinearGradientDef(const SkShader::GradientInfo& info, const SkShader* shader);
+
+ SkXMLWriter* fWriter;
+ ResourceBucket* fResourceBucket;
+ SkAutoTDelete<AutoElement> fClipGroup;
+};
+
+void SkSVGDevice::AutoElement::addPaint(const SkPaint& paint, const Resources& resources) {
+ SkPaint::Style style = paint.getStyle();
+ if (style == SkPaint::kFill_Style || style == SkPaint::kStrokeAndFill_Style) {
+ this->addAttribute("fill", resources.fPaintServer);
+
+ if (SK_AlphaOPAQUE != SkColorGetA(paint.getColor())) {
+ this->addAttribute("fill-opacity", svg_opacity(paint.getColor()));
+ }
+ } else {
+ SkASSERT(style == SkPaint::kStroke_Style);
+ this->addAttribute("fill", "none");
+ }
+
+ if (style == SkPaint::kStroke_Style || style == SkPaint::kStrokeAndFill_Style) {
+ this->addAttribute("stroke", resources.fPaintServer);
+
+ SkScalar strokeWidth = paint.getStrokeWidth();
+ if (strokeWidth == 0) {
+ // Hairline stroke
+ strokeWidth = 1;
+ this->addAttribute("vector-effect", "non-scaling-stroke");
+ }
+ this->addAttribute("stroke-width", strokeWidth);
+
+ if (const char* cap = svg_cap(paint.getStrokeCap())) {
+ this->addAttribute("stroke-linecap", cap);
+ }
+
+ if (const char* join = svg_join(paint.getStrokeJoin())) {
+ this->addAttribute("stroke-linejoin", join);
+ }
+
+ if (paint.getStrokeJoin() == SkPaint::kMiter_Join) {
+ this->addAttribute("stroke-miterlimit", paint.getStrokeMiter());
+ }
+
+ if (SK_AlphaOPAQUE != SkColorGetA(paint.getColor())) {
+ this->addAttribute("stroke-opacity", svg_opacity(paint.getColor()));
+ }
+ } else {
+ SkASSERT(style == SkPaint::kFill_Style);
+ this->addAttribute("stroke", "none");
+ }
+}
+
+Resources SkSVGDevice::AutoElement::addResources(const SkDraw& draw, const SkPaint& paint) {
+ Resources resources(paint);
+
+ // FIXME: this is a weak heuristic and we end up with LOTS of redundant clips.
+ bool hasClip = !draw.fClipStack->isWideOpen();
+ bool hasShader = SkToBool(paint.getShader());
+
+ if (hasClip || hasShader) {
+ AutoElement defs("defs", fWriter);
+
+ if (hasClip) {
+ this->addClipResources(draw, &resources);
+ }
+
+ if (hasShader) {
+ this->addShaderResources(paint, &resources);
+ }
+ }
+
+ return resources;
+}
+
+void SkSVGDevice::AutoElement::addShaderResources(const SkPaint& paint, Resources* resources) {
+ const SkShader* shader = paint.getShader();
+ SkASSERT(SkToBool(shader));
+
+ SkShader::GradientInfo grInfo;
+ grInfo.fColorCount = 0;
+ if (SkShader::kLinear_GradientType != shader->asAGradient(&grInfo)) {
+ // TODO: non-linear gradient support
+ SkDebugf("unsupported shader type\n");
+ return;
+ }
+
+ SkAutoSTArray<16, SkColor> grColors(grInfo.fColorCount);
+ SkAutoSTArray<16, SkScalar> grOffsets(grInfo.fColorCount);
+ grInfo.fColors = grColors.get();
+ grInfo.fColorOffsets = grOffsets.get();
+
+ // One more call to get the actual colors/offsets.
+ shader->asAGradient(&grInfo);
+ SkASSERT(grInfo.fColorCount <= grColors.count());
+ SkASSERT(grInfo.fColorCount <= grOffsets.count());
+
+ resources->fPaintServer.printf("url(#%s)", addLinearGradientDef(grInfo, shader).c_str());
+}
+
+void SkSVGDevice::AutoElement::addClipResources(const SkDraw& draw, Resources* resources) {
+ SkASSERT(!draw.fClipStack->isWideOpen());
+
+ SkPath clipPath;
+ (void) draw.fClipStack->asPath(&clipPath);
+
+ SkString clipID = fResourceBucket->addClip();
+ const char* clipRule = clipPath.getFillType() == SkPath::kEvenOdd_FillType ?
+ "evenodd" : "nonzero";
+ {
+ // clipPath is in device space, but since we're only pushing transform attributes
+ // to the leaf nodes, so are all our elements => SVG userSpaceOnUse == device space.
+ AutoElement clipPathElement("clipPath", fWriter);
+ clipPathElement.addAttribute("id", clipID);
+
+ SkRect clipRect = SkRect::MakeEmpty();
+ if (clipPath.isEmpty() || clipPath.isRect(&clipRect)) {
+ AutoElement rectElement("rect", fWriter);
+ rectElement.addRectAttributes(clipRect);
+ rectElement.addAttribute("clip-rule", clipRule);
+ } else {
+ AutoElement pathElement("path", fWriter);
+ pathElement.addPathAttributes(clipPath);
+ pathElement.addAttribute("clip-rule", clipRule);
+ }
+ }
+
+ resources->fClip.printf("url(#%s)", clipID.c_str());
+}
+
+SkString SkSVGDevice::AutoElement::addLinearGradientDef(const SkShader::GradientInfo& info,
+ const SkShader* shader) {
+ SkASSERT(fResourceBucket);
+ SkString id = fResourceBucket->addLinearGradient();
+
+ {
+ AutoElement gradient("linearGradient", fWriter);
+
+ gradient.addAttribute("id", id);
+ gradient.addAttribute("gradientUnits", "userSpaceOnUse");
+ gradient.addAttribute("x1", info.fPoint[0].x());
+ gradient.addAttribute("y1", info.fPoint[0].y());
+ gradient.addAttribute("x2", info.fPoint[1].x());
+ gradient.addAttribute("y2", info.fPoint[1].y());
+
+ if (!shader->getLocalMatrix().isIdentity()) {
+ this->addAttribute("gradientTransform", svg_transform(shader->getLocalMatrix()));
+ }
+
+ SkASSERT(info.fColorCount >= 2);
+ for (int i = 0; i < info.fColorCount; ++i) {
+ SkColor color = info.fColors[i];
+ SkString colorStr(svg_color(color));
+
+ {
+ AutoElement stop("stop", fWriter);
+ stop.addAttribute("offset", info.fColorOffsets[i]);
+ stop.addAttribute("stop-color", colorStr.c_str());
+
+ if (SK_AlphaOPAQUE != SkColorGetA(color)) {
+ stop.addAttribute("stop-opacity", svg_opacity(color));
+ }
+ }
+ }
+ }
+
+ return id;
+}
+
+void SkSVGDevice::AutoElement::addRectAttributes(const SkRect& rect) {
+ // x, y default to 0
+ if (rect.x() != 0) {
+ this->addAttribute("x", rect.x());
+ }
+ if (rect.y() != 0) {
+ this->addAttribute("y", rect.y());
+ }
+
+ this->addAttribute("width", rect.width());
+ this->addAttribute("height", rect.height());
+}
+
+void SkSVGDevice::AutoElement::addPathAttributes(const SkPath& path) {
+ SkString pathData;
+ SkParsePath::ToSVGString(path, &pathData);
+ this->addAttribute("d", pathData);
+}
+
+void SkSVGDevice::AutoElement::addTextAttributes(const SkPaint& paint) {
+ this->addAttribute("font-size", paint.getTextSize());
+
+ if (const char* textAlign = svg_text_align(paint.getTextAlign())) {
+ this->addAttribute("text-anchor", textAlign);
+ }
+
+ SkString familyName;
+ SkTHashSet<SkString> familySet;
+ sk_sp<const SkTypeface> tface(paint.getTypeface() ?
+ sk_ref_sp(paint.getTypeface()) : SkTypeface::MakeDefault());
+
+ SkASSERT(tface);
+ SkTypeface::Style style = tface->style();
+ if (style & SkTypeface::kItalic) {
+ this->addAttribute("font-style", "italic");
+ }
+ if (style & SkTypeface::kBold) {
+ this->addAttribute("font-weight", "bold");
+ }
+
+ SkAutoTUnref<SkTypeface::LocalizedStrings> familyNameIter(tface->createFamilyNameIterator());
+ SkTypeface::LocalizedString familyString;
+ while (familyNameIter->next(&familyString)) {
+ if (familySet.contains(familyString.fString)) {
+ continue;
+ }
+ familySet.add(familyString.fString);
+ familyName.appendf((familyName.isEmpty() ? "%s" : ", %s"), familyString.fString.c_str());
+ }
+
+ if (!familyName.isEmpty()) {
+ this->addAttribute("font-family", familyName);
+ }
+}
+
+SkBaseDevice* SkSVGDevice::Create(const SkISize& size, SkXMLWriter* writer) {
+ if (!writer) {
+ return nullptr;
+ }
+
+ return new SkSVGDevice(size, writer);
+}
+
+SkSVGDevice::SkSVGDevice(const SkISize& size, SkXMLWriter* writer)
+ : INHERITED(SkImageInfo::MakeUnknown(size.fWidth, size.fHeight),
+ SkSurfaceProps(0, kUnknown_SkPixelGeometry))
+ , fWriter(writer)
+ , fResourceBucket(new ResourceBucket)
+{
+ SkASSERT(writer);
+
+ fWriter->writeHeader();
+
+ // The root <svg> tag gets closed by the destructor.
+ fRootElement.reset(new AutoElement("svg", fWriter));
+
+ fRootElement->addAttribute("xmlns", "http://www.w3.org/2000/svg");
+ fRootElement->addAttribute("xmlns:xlink", "http://www.w3.org/1999/xlink");
+ fRootElement->addAttribute("width", size.width());
+ fRootElement->addAttribute("height", size.height());
+}
+
+SkSVGDevice::~SkSVGDevice() {
+}
+
+void SkSVGDevice::drawPaint(const SkDraw& draw, const SkPaint& paint) {
+ AutoElement rect("rect", fWriter, fResourceBucket, draw, paint);
+ rect.addRectAttributes(SkRect::MakeWH(SkIntToScalar(this->width()),
+ SkIntToScalar(this->height())));
+}
+
+void SkSVGDevice::drawPoints(const SkDraw& draw, SkCanvas::PointMode mode, size_t count,
+ const SkPoint pts[], const SkPaint& paint) {
+ SkPath path;
+
+ switch (mode) {
+ // todo
+ case SkCanvas::kPoints_PointMode:
+ SkDebugf("unsupported operation: drawPoints(kPoints_PointMode)\n");
+ break;
+ case SkCanvas::kLines_PointMode:
+ count -= 1;
+ for (size_t i = 0; i < count; i += 2) {
+ path.rewind();
+ path.moveTo(pts[i]);
+ path.lineTo(pts[i+1]);
+ AutoElement elem("path", fWriter, fResourceBucket, draw, paint);
+ elem.addPathAttributes(path);
+ }
+ break;
+ case SkCanvas::kPolygon_PointMode:
+ if (count > 1) {
+ path.addPoly(pts, SkToInt(count), false);
+ path.moveTo(pts[0]);
+ AutoElement elem("path", fWriter, fResourceBucket, draw, paint);
+ elem.addPathAttributes(path);
+ }
+ break;
+ }
+}
+
+void SkSVGDevice::drawRect(const SkDraw& draw, const SkRect& r, const SkPaint& paint) {
+ AutoElement rect("rect", fWriter, fResourceBucket, draw, paint);
+ rect.addRectAttributes(r);
+}
+
+void SkSVGDevice::drawOval(const SkDraw& draw, const SkRect& oval, const SkPaint& paint) {
+ AutoElement ellipse("ellipse", fWriter, fResourceBucket, draw, paint);
+ ellipse.addAttribute("cx", oval.centerX());
+ ellipse.addAttribute("cy", oval.centerY());
+ ellipse.addAttribute("rx", oval.width() / 2);
+ ellipse.addAttribute("ry", oval.height() / 2);
+}
+
+void SkSVGDevice::drawRRect(const SkDraw& draw, const SkRRect& rr, const SkPaint& paint) {
+ SkPath path;
+ path.addRRect(rr);
+
+ AutoElement elem("path", fWriter, fResourceBucket, draw, paint);
+ elem.addPathAttributes(path);
+}
+
+void SkSVGDevice::drawPath(const SkDraw& draw, const SkPath& path, const SkPaint& paint,
+ const SkMatrix* prePathMatrix, bool pathIsMutable) {
+ AutoElement elem("path", fWriter, fResourceBucket, draw, paint);
+ elem.addPathAttributes(path);
+
+ // TODO: inverse fill types?
+ if (path.getFillType() == SkPath::kEvenOdd_FillType) {
+ elem.addAttribute("fill-rule", "evenodd");
+ }
+}
+
+void SkSVGDevice::drawBitmapCommon(const SkDraw& draw, const SkBitmap& bm,
+ const SkPaint& paint) {
+ SkAutoTUnref<const SkData> pngData(
+ SkImageEncoder::EncodeData(bm, SkImageEncoder::kPNG_Type, SkImageEncoder::kDefaultQuality));
+ if (!pngData) {
+ return;
+ }
+
+ size_t b64Size = SkBase64::Encode(pngData->data(), pngData->size(), nullptr);
+ SkAutoTMalloc<char> b64Data(b64Size);
+ SkBase64::Encode(pngData->data(), pngData->size(), b64Data.get());
+
+ SkString svgImageData("data:image/png;base64,");
+ svgImageData.append(b64Data.get(), b64Size);
+
+ SkString imageID = fResourceBucket->addImage();
+ {
+ AutoElement defs("defs", fWriter);
+ {
+ AutoElement image("image", fWriter);
+ image.addAttribute("id", imageID);
+ image.addAttribute("width", bm.width());
+ image.addAttribute("height", bm.height());
+ image.addAttribute("xlink:href", svgImageData);
+ }
+ }
+
+ {
+ AutoElement imageUse("use", fWriter, fResourceBucket, draw, paint);
+ imageUse.addAttribute("xlink:href", SkStringPrintf("#%s", imageID.c_str()));
+ }
+}
+
+void SkSVGDevice::drawBitmap(const SkDraw& draw, const SkBitmap& bitmap,
+ const SkMatrix& matrix, const SkPaint& paint) {
+ SkMatrix adjustedMatrix = *draw.fMatrix;
+ adjustedMatrix.preConcat(matrix);
+ SkDraw adjustedDraw(draw);
+ adjustedDraw.fMatrix = &adjustedMatrix;
+
+ drawBitmapCommon(adjustedDraw, bitmap, paint);
+}
+
+void SkSVGDevice::drawSprite(const SkDraw& draw, const SkBitmap& bitmap,
+ int x, int y, const SkPaint& paint) {
+ SkMatrix adjustedMatrix = *draw.fMatrix;
+ adjustedMatrix.preTranslate(SkIntToScalar(x), SkIntToScalar(y));
+ SkDraw adjustedDraw(draw);
+ adjustedDraw.fMatrix = &adjustedMatrix;
+
+ drawBitmapCommon(adjustedDraw, bitmap, paint);
+}
+
+void SkSVGDevice::drawBitmapRect(const SkDraw& draw, const SkBitmap& bm, const SkRect* srcOrNull,
+ const SkRect& dst, const SkPaint& paint,
+ SkCanvas::SrcRectConstraint) {
+ SkMatrix adjustedMatrix;
+ adjustedMatrix.setRectToRect(srcOrNull ? *srcOrNull : SkRect::Make(bm.bounds()),
+ dst,
+ SkMatrix::kFill_ScaleToFit);
+ adjustedMatrix.postConcat(*draw.fMatrix);
+
+ SkDraw adjustedDraw(draw);
+ adjustedDraw.fMatrix = &adjustedMatrix;
+
+ SkClipStack adjustedClipStack;
+ if (srcOrNull && *srcOrNull != SkRect::Make(bm.bounds())) {
+ adjustedClipStack = *draw.fClipStack;
+ adjustedClipStack.clipRect(dst, *draw.fMatrix, SkCanvas::kIntersect_Op,
+ paint.isAntiAlias());
+ adjustedDraw.fClipStack = &adjustedClipStack;
+ }
+
+ drawBitmapCommon(adjustedDraw, bm, paint);
+}
+
+void SkSVGDevice::drawText(const SkDraw& draw, const void* text, size_t len,
+ SkScalar x, SkScalar y, const SkPaint& paint) {
+ AutoElement elem("text", fWriter, fResourceBucket, draw, paint);
+ elem.addTextAttributes(paint);
+
+ SVGTextBuilder builder(text, len, paint, SkPoint::Make(x, y), 0);
+ elem.addAttribute("x", builder.posX());
+ elem.addAttribute("y", builder.posY());
+ elem.addText(builder.text());
+}
+
+void SkSVGDevice::drawPosText(const SkDraw& draw, const void* text, size_t len,
+ const SkScalar pos[], int scalarsPerPos, const SkPoint& offset,
+ const SkPaint& paint) {
+ SkASSERT(scalarsPerPos == 1 || scalarsPerPos == 2);
+
+ AutoElement elem("text", fWriter, fResourceBucket, draw, paint);
+ elem.addTextAttributes(paint);
+
+ SVGTextBuilder builder(text, len, paint, offset, scalarsPerPos, pos);
+ elem.addAttribute("x", builder.posX());
+ elem.addAttribute("y", builder.posY());
+ elem.addText(builder.text());
+}
+
+void SkSVGDevice::drawTextOnPath(const SkDraw&, const void* text, size_t len, const SkPath& path,
+ const SkMatrix* matrix, const SkPaint& paint) {
+ SkString pathID = fResourceBucket->addPath();
+
+ {
+ AutoElement defs("defs", fWriter);
+ AutoElement pathElement("path", fWriter);
+ pathElement.addAttribute("id", pathID);
+ pathElement.addPathAttributes(path);
+
+ }
+
+ {
+ AutoElement textElement("text", fWriter);
+ textElement.addTextAttributes(paint);
+
+ if (matrix && !matrix->isIdentity()) {
+ textElement.addAttribute("transform", svg_transform(*matrix));
+ }
+
+ {
+ AutoElement textPathElement("textPath", fWriter);
+ textPathElement.addAttribute("xlink:href", SkStringPrintf("#%s", pathID.c_str()));
+
+ if (paint.getTextAlign() != SkPaint::kLeft_Align) {
+ SkASSERT(paint.getTextAlign() == SkPaint::kCenter_Align ||
+ paint.getTextAlign() == SkPaint::kRight_Align);
+ textPathElement.addAttribute("startOffset",
+ paint.getTextAlign() == SkPaint::kCenter_Align ? "50%" : "100%");
+ }
+
+ SVGTextBuilder builder(text, len, paint, SkPoint::Make(0, 0), 0);
+ textPathElement.addText(builder.text());
+ }
+ }
+}
+
+void SkSVGDevice::drawVertices(const SkDraw&, SkCanvas::VertexMode, int vertexCount,
+ const SkPoint verts[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint) {
+ // todo
+ SkDebugf("unsupported operation: drawVertices()\n");
+}
+
+void SkSVGDevice::drawDevice(const SkDraw&, SkBaseDevice*, int x, int y,
+ const SkPaint&) {
+ // todo
+ SkDebugf("unsupported operation: drawDevice()\n");
+}
diff --git a/gfx/skia/skia/src/svg/SkSVGDevice.h b/gfx/skia/skia/src/svg/SkSVGDevice.h
new file mode 100644
index 000000000..cb13ffdc8
--- /dev/null
+++ b/gfx/skia/skia/src/svg/SkSVGDevice.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSVGDevice_DEFINED
+#define SkSVGDevice_DEFINED
+
+#include "SkDevice.h"
+#include "SkTemplates.h"
+
+class SkXMLWriter;
+
+class SkSVGDevice : public SkBaseDevice {
+public:
+ static SkBaseDevice* Create(const SkISize& size, SkXMLWriter* writer);
+
+protected:
+ void drawPaint(const SkDraw&, const SkPaint& paint) override;
+ void drawPoints(const SkDraw&, SkCanvas::PointMode mode, size_t count,
+ const SkPoint[], const SkPaint& paint) override;
+ void drawRect(const SkDraw&, const SkRect& r, const SkPaint& paint) override;
+ void drawOval(const SkDraw&, const SkRect& oval, const SkPaint& paint) override;
+ void drawRRect(const SkDraw&, const SkRRect& rr, const SkPaint& paint) override;
+ void drawPath(const SkDraw&, const SkPath& path,
+ const SkPaint& paint,
+ const SkMatrix* prePathMatrix = nullptr,
+ bool pathIsMutable = false) override;
+
+ void drawBitmap(const SkDraw&, const SkBitmap& bitmap,
+ const SkMatrix& matrix, const SkPaint& paint) override;
+ void drawSprite(const SkDraw&, const SkBitmap& bitmap,
+ int x, int y, const SkPaint& paint) override;
+ void drawBitmapRect(const SkDraw&, const SkBitmap&,
+ const SkRect* srcOrNull, const SkRect& dst,
+ const SkPaint& paint, SkCanvas::SrcRectConstraint) override;
+
+ void drawText(const SkDraw&, const void* text, size_t len,
+ SkScalar x, SkScalar y, const SkPaint& paint) override;
+ void drawPosText(const SkDraw&, const void* text, size_t len,
+ const SkScalar pos[], int scalarsPerPos,
+ const SkPoint& offset, const SkPaint& paint) override;
+ void drawTextOnPath(const SkDraw&, const void* text, size_t len,
+ const SkPath& path, const SkMatrix* matrix,
+ const SkPaint& paint) override;
+ void drawVertices(const SkDraw&, SkCanvas::VertexMode, int vertexCount,
+ const SkPoint verts[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint) override;
+
+ void drawDevice(const SkDraw&, SkBaseDevice*, int x, int y,
+ const SkPaint&) override;
+
+private:
+ SkSVGDevice(const SkISize& size, SkXMLWriter* writer);
+ virtual ~SkSVGDevice();
+
+ void drawBitmapCommon(const SkDraw& draw, const SkBitmap& bm, const SkPaint& paint);
+
+ class AutoElement;
+ class ResourceBucket;
+
+ SkXMLWriter* fWriter;
+ SkAutoTDelete<AutoElement> fRootElement;
+ SkAutoTDelete<ResourceBucket> fResourceBucket;
+
+ typedef SkBaseDevice INHERITED;
+};
+
+#endif // SkSVGDevice_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkBase64.cpp b/gfx/skia/skia/src/utils/SkBase64.cpp
new file mode 100644
index 000000000..cb3396ca7
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkBase64.cpp
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkBase64.h"
+
+#define DecodePad -2
+#define EncodePad 64
+
+static const char default_encode[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz"
+ "0123456789+/=";
+
+static const signed char decodeData[] = {
+ 62, -1, -1, -1, 63,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, DecodePad, -1, -1,
+ -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, -1,
+ -1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51
+};
+
+SkBase64::SkBase64() : fLength((size_t) -1), fData(nullptr) {
+}
+
+#if defined _WIN32 // disable 'two', etc. may be used without having been initialized
+#pragma warning ( push )
+#pragma warning ( disable : 4701 )
+#endif
+
+SkBase64::Error SkBase64::decode(const void* srcPtr, size_t size, bool writeDestination) {
+ unsigned char* dst = (unsigned char*) fData;
+ const unsigned char* dstStart = (const unsigned char*) fData;
+ const unsigned char* src = (const unsigned char*) srcPtr;
+ bool padTwo = false;
+ bool padThree = false;
+ const unsigned char* end = src + size;
+ while (src < end) {
+ unsigned char bytes[4];
+ int byte = 0;
+ do {
+ unsigned char srcByte = *src++;
+ if (srcByte == 0)
+ goto goHome;
+ if (srcByte <= ' ')
+ continue; // treat as white space
+ if (srcByte < '+' || srcByte > 'z')
+ return kBadCharError;
+ signed char decoded = decodeData[srcByte - '+'];
+ bytes[byte] = decoded;
+ if (decoded < 0) {
+ if (decoded == DecodePad)
+ goto handlePad;
+ return kBadCharError;
+ } else
+ byte++;
+ if (*src)
+ continue;
+ if (byte == 0)
+ goto goHome;
+ if (byte == 4)
+ break;
+handlePad:
+ if (byte < 2)
+ return kPadError;
+ padThree = true;
+ if (byte == 2)
+ padTwo = true;
+ break;
+ } while (byte < 4);
+ int two = 0;
+ int three = 0;
+ if (writeDestination) {
+ int one = (uint8_t) (bytes[0] << 2);
+ two = bytes[1];
+ one |= two >> 4;
+ two = (uint8_t) (two << 4);
+ three = bytes[2];
+ two |= three >> 2;
+ three = (uint8_t) (three << 6);
+ three |= bytes[3];
+ SkASSERT(one < 256 && two < 256 && three < 256);
+ *dst = (unsigned char) one;
+ }
+ dst++;
+ if (padTwo)
+ break;
+ if (writeDestination)
+ *dst = (unsigned char) two;
+ dst++;
+ if (padThree)
+ break;
+ if (writeDestination)
+ *dst = (unsigned char) three;
+ dst++;
+ }
+goHome:
+ fLength = dst - dstStart;
+ return kNoError;
+}
+
+#if defined _WIN32
+#pragma warning ( pop )
+#endif
+
+size_t SkBase64::Encode(const void* srcPtr, size_t length, void* dstPtr, const char* encodeMap) {
+ const char* encode;
+ if (nullptr == encodeMap) {
+ encode = default_encode;
+ } else {
+ encode = encodeMap;
+ }
+ const unsigned char* src = (const unsigned char*) srcPtr;
+ unsigned char* dst = (unsigned char*) dstPtr;
+ if (dst) {
+ size_t remainder = length % 3;
+ const unsigned char* end = &src[length - remainder];
+ while (src < end) {
+ unsigned a = *src++;
+ unsigned b = *src++;
+ unsigned c = *src++;
+ int d = c & 0x3F;
+ c = (c >> 6 | b << 2) & 0x3F;
+ b = (b >> 4 | a << 4) & 0x3F;
+ a = a >> 2;
+ *dst++ = encode[a];
+ *dst++ = encode[b];
+ *dst++ = encode[c];
+ *dst++ = encode[d];
+ }
+ if (remainder > 0) {
+ int k1 = 0;
+ int k2 = EncodePad;
+ int a = (uint8_t) *src++;
+ if (remainder == 2)
+ {
+ int b = *src++;
+ k1 = b >> 4;
+ k2 = (b << 2) & 0x3F;
+ }
+ *dst++ = encode[a >> 2];
+ *dst++ = encode[(k1 | a << 4) & 0x3F];
+ *dst++ = encode[k2];
+ *dst++ = encode[EncodePad];
+ }
+ }
+ return (length + 2) / 3 * 4;
+}
+
+SkBase64::Error SkBase64::decode(const char* src, size_t len) {
+ Error err = decode(src, len, false);
+ SkASSERT(err == kNoError);
+ if (err != kNoError)
+ return err;
+ fData = new char[fLength]; // should use sk_malloc/sk_free
+ decode(src, len, true);
+ return kNoError;
+}
diff --git a/gfx/skia/skia/src/utils/SkBase64.h b/gfx/skia/skia/src/utils/SkBase64.h
new file mode 100644
index 000000000..13350b7b6
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkBase64.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBase64_DEFINED
+#define SkBase64_DEFINED
+
+#include "SkTypes.h"
+
+struct SkBase64 {
+public:
+ enum Error {
+ kNoError,
+ kPadError,
+ kBadCharError
+ };
+
+ SkBase64();
+ Error decode(const char* src, size_t length);
+ char* getData() { return fData; }
+ /**
+ Base64 encodes src into dst. encode is a pointer to at least 65 chars.
+ encode[64] will be used as the pad character. Encodings other than the
+ default encoding cannot be decoded.
+ */
+ static size_t Encode(const void* src, size_t length, void* dest, const char* encode = nullptr);
+
+private:
+ Error decode(const void* srcPtr, size_t length, bool writeDestination);
+
+ size_t fLength;
+ char* fData;
+ friend class SkImageBaseBitmap;
+};
+
+#endif // SkBase64_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkBitSet.h b/gfx/skia/skia/src/utils/SkBitSet.h
new file mode 100644
index 000000000..2e2dbebbb
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkBitSet.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitSet_DEFINED
+#define SkBitSet_DEFINED
+
+#include "SkTDArray.h"
+#include "SkTemplates.h"
+
+class SkBitSet {
+public:
+ explicit SkBitSet(int numberOfBits) {
+ SkASSERT(numberOfBits >= 0);
+ fDwordCount = (numberOfBits + 31) / 32; // Round up size to 32-bit boundary.
+ if (fDwordCount > 0) {
+ fBitData.reset((uint32_t*)sk_calloc_throw(fDwordCount * sizeof(uint32_t)));
+ }
+ }
+
+ SkBitSet(const SkBitSet&) = delete;
+ SkBitSet& operator=(const SkBitSet&) = delete;
+
+ /** Set the value of the index-th bit to true. */
+ void set(int index) {
+ uint32_t mask = 1 << (index & 31);
+ uint32_t* chunk = this->internalGet(index);
+ SkASSERT(chunk);
+ *chunk |= mask;
+ }
+
+ template<typename T>
+ void setAll(T* array, int len) {
+ static_assert(std::is_integral<T>::value, "T is integral");
+ for (int i = 0; i < len; ++i) {
+ this->set(static_cast<int>(array[i]));
+ }
+ }
+
+ bool has(int index) const {
+ const uint32_t* chunk = this->internalGet(index);
+ uint32_t mask = 1 << (index & 31);
+ return chunk && SkToBool(*chunk & mask);
+ }
+
+ /** Export indices of set bits to T array. */
+ template<typename T>
+ void exportTo(SkTDArray<T>* array) const {
+ static_assert(std::is_integral<T>::value, "T is integral");
+ SkASSERT(array);
+ uint32_t* data = reinterpret_cast<uint32_t*>(fBitData.get());
+ for (unsigned int i = 0; i < fDwordCount; ++i) {
+ uint32_t value = data[i];
+ if (value) { // There are set bits
+ unsigned int index = i * 32;
+ for (unsigned int j = 0; j < 32; ++j) {
+ if (0x1 & (value >> j)) {
+ array->push(index + j);
+ }
+ }
+ }
+ }
+ }
+
+private:
+ std::unique_ptr<uint32_t, SkFunctionWrapper<void, void, sk_free>> fBitData;
+ size_t fDwordCount; // Dword (32-bit) count of the bitset.
+
+ uint32_t* internalGet(int index) const {
+ size_t internalIndex = index / 32;
+ if (internalIndex >= fDwordCount) {
+ return nullptr;
+ }
+ return fBitData.get() + internalIndex;
+ }
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkBitmapSourceDeserializer.cpp b/gfx/skia/skia/src/utils/SkBitmapSourceDeserializer.cpp
new file mode 100644
index 000000000..1f8cc1c7b
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkBitmapSourceDeserializer.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmapSourceDeserializer.h"
+
+#include "SkBitmap.h"
+#include "SkFilterQuality.h"
+#include "SkImage.h"
+#include "SkImageSource.h"
+#include "SkReadBuffer.h"
+
+sk_sp<SkFlattenable> SkBitmapSourceDeserializer::CreateProc(SkReadBuffer& buffer) {
+ SkFilterQuality filterQuality;
+ if (buffer.isVersionLT(SkReadBuffer::kBitmapSourceFilterQuality_Version)) {
+ filterQuality = kHigh_SkFilterQuality;
+ } else {
+ filterQuality = (SkFilterQuality)buffer.readInt();
+ }
+ SkRect src, dst;
+ buffer.readRect(&src);
+ buffer.readRect(&dst);
+ sk_sp<SkImage> image = buffer.readBitmapAsImage();
+ if (image) {
+ return SkImageSource::Make(std::move(image), src, dst, filterQuality);
+ }
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/utils/SkBitmapSourceDeserializer.h b/gfx/skia/skia/src/utils/SkBitmapSourceDeserializer.h
new file mode 100644
index 000000000..e01759991
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkBitmapSourceDeserializer.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapSourceDeserializer_DEFINED
+#define SkBitmapSourceDeserializer_DEFINED
+
+#include "SkFlattenable.h"
+
+// A temporary utility class to support deserializing legacy SkBitmapSource as SkImageSource.
+// Should be removed when SKP versions which may contain SkBitmapSource records are phased out.
+class SkBitmapSourceDeserializer : public SkFlattenable {
+public:
+ SK_DEFINE_FLATTENABLE_TYPE(SkImageFilter)
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkBitmapSource)
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkBoundaryPatch.cpp b/gfx/skia/skia/src/utils/SkBoundaryPatch.cpp
new file mode 100644
index 000000000..0cfb09c2c
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkBoundaryPatch.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkBoundaryPatch.h"
+
+SkBoundaryPatch::SkBoundaryPatch() : fBoundary(nullptr) {}
+
+SkBoundaryPatch::~SkBoundaryPatch() {
+ SkSafeUnref(fBoundary);
+}
+
+SkBoundary* SkBoundaryPatch::setBoundary(SkBoundary* b) {
+ SkRefCnt_SafeAssign(fBoundary, b);
+ return b;
+}
+
+static SkPoint SkMakePoint(SkScalar x, SkScalar y) {
+ SkPoint pt;
+ pt.set(x, y);
+ return pt;
+}
+
+static SkPoint SkPointInterp(const SkPoint& a, const SkPoint& b, SkScalar t) {
+ return SkMakePoint(SkScalarInterp(a.fX, b.fX, t),
+ SkScalarInterp(a.fY, b.fY, t));
+}
+
+SkPoint SkBoundaryPatch::eval(SkScalar unitU, SkScalar unitV) {
+ SkBoundary* b = fBoundary;
+ SkPoint u = SkPointInterp(b->eval(SkBoundary::kLeft, SK_Scalar1 - unitV),
+ b->eval(SkBoundary::kRight, unitV),
+ unitU);
+ SkPoint v = SkPointInterp(b->eval(SkBoundary::kTop, unitU),
+ b->eval(SkBoundary::kBottom, SK_Scalar1 - unitU),
+ unitV);
+ return SkMakePoint(SkScalarAve(u.fX, v.fX),
+ SkScalarAve(u.fY, v.fY));
+}
+
+bool SkBoundaryPatch::evalPatch(SkPoint verts[], int rows, int cols) {
+ if (rows < 2 || cols < 2) {
+ return false;
+ }
+
+ const SkScalar invR = SkScalarInvert(SkIntToScalar(rows - 1));
+ const SkScalar invC = SkScalarInvert(SkIntToScalar(cols - 1));
+
+ for (int y = 0; y < cols; y++) {
+ SkScalar yy = y * invC;
+ for (int x = 0; x < rows; x++) {
+ *verts++ = this->eval(x * invR, yy);
+ }
+ }
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////
+
+#include "SkGeometry.h"
+
+SkPoint SkLineBoundary::eval(Edge e, SkScalar t) {
+ SkASSERT((unsigned)e < 4);
+ return SkPointInterp(fPts[e], fPts[(e + 1) & 3], t);
+}
+
+SkPoint SkCubicBoundary::eval(Edge e, SkScalar t) {
+ SkASSERT((unsigned)e < 4);
+
+ // ensure our 4th cubic wraps to the start of the first
+ fPts[12] = fPts[0];
+
+ SkPoint loc;
+ SkEvalCubicAt(&fPts[e * 3], t, &loc, nullptr, nullptr);
+ return loc;
+}
diff --git a/gfx/skia/skia/src/utils/SkCamera.cpp b/gfx/skia/skia/src/utils/SkCamera.cpp
new file mode 100644
index 000000000..c8c462a5f
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCamera.cpp
@@ -0,0 +1,373 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCamera.h"
+
+static SkScalar SkScalarDotDiv(int count, const SkScalar a[], int step_a,
+ const SkScalar b[], int step_b,
+ SkScalar denom) {
+ SkScalar prod = 0;
+ for (int i = 0; i < count; i++) {
+ prod += a[0] * b[0];
+ a += step_a;
+ b += step_b;
+ }
+ return prod / denom;
+}
+
+static SkScalar SkScalarDot(int count, const SkScalar a[], int step_a,
+ const SkScalar b[], int step_b) {
+ SkScalar prod = 0;
+ for (int i = 0; i < count; i++) {
+ prod += a[0] * b[0];
+ a += step_a;
+ b += step_b;
+ }
+ return prod;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkScalar SkPoint3D::normalize(SkUnit3D* unit) const {
+ SkScalar mag = SkScalarSqrt(fX*fX + fY*fY + fZ*fZ);
+ if (mag) {
+ SkScalar scale = SkScalarInvert(mag);
+ unit->fX = fX * scale;
+ unit->fY = fY * scale;
+ unit->fZ = fZ * scale;
+ } else {
+ unit->fX = unit->fY = unit->fZ = 0;
+ }
+ return mag;
+}
+
+SkScalar SkUnit3D::Dot(const SkUnit3D& a, const SkUnit3D& b) {
+ return a.fX * b.fX + a.fY * b.fY + a.fZ * b.fZ;
+}
+
+void SkUnit3D::Cross(const SkUnit3D& a, const SkUnit3D& b, SkUnit3D* cross) {
+ SkASSERT(cross);
+
+ // use x,y,z, in case &a == cross or &b == cross
+
+ SkScalar x = a.fY * b.fZ - a.fZ * b.fY;
+ SkScalar y = a.fZ * b.fX - a.fX * b.fY;
+ SkScalar z = a.fX * b.fY - a.fY * b.fX;
+
+ cross->set(x, y, z);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPatch3D::SkPatch3D() {
+ this->reset();
+}
+
+void SkPatch3D::reset() {
+ fOrigin.set(0, 0, 0);
+ fU.set(SK_Scalar1, 0, 0);
+ fV.set(0, -SK_Scalar1, 0);
+}
+
+void SkPatch3D::transform(const SkMatrix3D& m, SkPatch3D* dst) const {
+ if (dst == nullptr) {
+ dst = (SkPatch3D*)this;
+ }
+ m.mapVector(fU, &dst->fU);
+ m.mapVector(fV, &dst->fV);
+ m.mapPoint(fOrigin, &dst->fOrigin);
+}
+
+SkScalar SkPatch3D::dotWith(SkScalar dx, SkScalar dy, SkScalar dz) const {
+ SkScalar cx = SkScalarMul(fU.fY, fV.fZ) - SkScalarMul(fU.fZ, fV.fY);
+ SkScalar cy = SkScalarMul(fU.fZ, fV.fX) - SkScalarMul(fU.fX, fV.fY);
+ SkScalar cz = SkScalarMul(fU.fX, fV.fY) - SkScalarMul(fU.fY, fV.fX);
+
+ return SkScalarMul(cx, dx) + SkScalarMul(cy, dy) + SkScalarMul(cz, dz);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix3D::reset() {
+ memset(fMat, 0, sizeof(fMat));
+ fMat[0][0] = fMat[1][1] = fMat[2][2] = SK_Scalar1;
+}
+
+void SkMatrix3D::setTranslate(SkScalar x, SkScalar y, SkScalar z) {
+ memset(fMat, 0, sizeof(fMat));
+ fMat[0][0] = x;
+ fMat[1][1] = y;
+ fMat[2][2] = z;
+}
+
+void SkMatrix3D::setRotateX(SkScalar degX) {
+ SkScalar s, c;
+
+ s = SkScalarSinCos(SkDegreesToRadians(degX), &c);
+ this->setRow(0, SK_Scalar1, 0, 0);
+ this->setRow(1, 0, c, -s);
+ this->setRow(2, 0, s, c);
+}
+
+void SkMatrix3D::setRotateY(SkScalar degY) {
+ SkScalar s, c;
+
+ s = SkScalarSinCos(SkDegreesToRadians(degY), &c);
+ this->setRow(0, c, 0, -s);
+ this->setRow(1, 0, SK_Scalar1, 0);
+ this->setRow(2, s, 0, c);
+}
+
+void SkMatrix3D::setRotateZ(SkScalar degZ) {
+ SkScalar s, c;
+
+ s = SkScalarSinCos(SkDegreesToRadians(degZ), &c);
+ this->setRow(0, c, -s, 0);
+ this->setRow(1, s, c, 0);
+ this->setRow(2, 0, 0, SK_Scalar1);
+}
+
+void SkMatrix3D::preTranslate(SkScalar x, SkScalar y, SkScalar z) {
+ SkScalar col[3] = { x, y, z};
+
+ for (int i = 0; i < 3; i++) {
+ fMat[i][3] += SkScalarDot(3, &fMat[i][0], 1, col, 1);
+ }
+}
+
+void SkMatrix3D::preRotateX(SkScalar degX) {
+ SkMatrix3D m;
+ m.setRotateX(degX);
+ this->setConcat(*this, m);
+}
+
+void SkMatrix3D::preRotateY(SkScalar degY) {
+ SkMatrix3D m;
+ m.setRotateY(degY);
+ this->setConcat(*this, m);
+}
+
+void SkMatrix3D::preRotateZ(SkScalar degZ) {
+ SkMatrix3D m;
+ m.setRotateZ(degZ);
+ this->setConcat(*this, m);
+}
+
+void SkMatrix3D::setConcat(const SkMatrix3D& a, const SkMatrix3D& b) {
+ SkMatrix3D tmp;
+ SkMatrix3D* c = this;
+
+ if (this == &a || this == &b) {
+ c = &tmp;
+ }
+ for (int i = 0; i < 3; i++) {
+ for (int j = 0; j < 3; j++) {
+ c->fMat[i][j] = SkScalarDot(3, &a.fMat[i][0], 1, &b.fMat[0][j], 4);
+ }
+ c->fMat[i][3] = SkScalarDot(3, &a.fMat[i][0], 1,
+ &b.fMat[0][3], 4) + a.fMat[i][3];
+ }
+
+ if (c == &tmp) {
+ *this = tmp;
+ }
+}
+
+void SkMatrix3D::mapPoint(const SkPoint3D& src, SkPoint3D* dst) const {
+ SkScalar x = SkScalarDot(3, &fMat[0][0], 1, &src.fX, 1) + fMat[0][3];
+ SkScalar y = SkScalarDot(3, &fMat[1][0], 1, &src.fX, 1) + fMat[1][3];
+ SkScalar z = SkScalarDot(3, &fMat[2][0], 1, &src.fX, 1) + fMat[2][3];
+ dst->set(x, y, z);
+}
+
+void SkMatrix3D::mapVector(const SkVector3D& src, SkVector3D* dst) const {
+ SkScalar x = SkScalarDot(3, &fMat[0][0], 1, &src.fX, 1);
+ SkScalar y = SkScalarDot(3, &fMat[1][0], 1, &src.fX, 1);
+ SkScalar z = SkScalarDot(3, &fMat[2][0], 1, &src.fX, 1);
+ dst->set(x, y, z);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkCamera3D::SkCamera3D() {
+ this->reset();
+}
+
+void SkCamera3D::reset() {
+ fLocation.set(0, 0, -SkIntToScalar(576)); // 8 inches backward
+ fAxis.set(0, 0, SK_Scalar1); // forward
+ fZenith.set(0, -SK_Scalar1, 0); // up
+
+ fObserver.set(0, 0, fLocation.fZ);
+
+ fNeedToUpdate = true;
+}
+
+void SkCamera3D::update() {
+ fNeedToUpdate = true;
+}
+
+void SkCamera3D::doUpdate() const {
+ SkUnit3D axis, zenith, cross;
+
+ fAxis.normalize(&axis);
+
+ {
+ SkScalar dot = SkUnit3D::Dot(*SkTCast<const SkUnit3D*>(&fZenith), axis);
+
+ zenith.fX = fZenith.fX - dot * axis.fX;
+ zenith.fY = fZenith.fY - dot * axis.fY;
+ zenith.fZ = fZenith.fZ - dot * axis.fZ;
+
+ SkTCast<SkPoint3D*>(&zenith)->normalize(&zenith);
+ }
+
+ SkUnit3D::Cross(axis, zenith, &cross);
+
+ {
+ SkMatrix* orien = &fOrientation;
+ SkScalar x = fObserver.fX;
+ SkScalar y = fObserver.fY;
+ SkScalar z = fObserver.fZ;
+
+ orien->set(SkMatrix::kMScaleX, x * axis.fX - z * cross.fX);
+ orien->set(SkMatrix::kMSkewX, x * axis.fY - z * cross.fY);
+ orien->set(SkMatrix::kMTransX, x * axis.fZ - z * cross.fZ);
+ orien->set(SkMatrix::kMSkewY, y * axis.fX - z * zenith.fX);
+ orien->set(SkMatrix::kMScaleY, y * axis.fY - z * zenith.fY);
+ orien->set(SkMatrix::kMTransY, y * axis.fZ - z * zenith.fZ);
+ orien->set(SkMatrix::kMPersp0, axis.fX);
+ orien->set(SkMatrix::kMPersp1, axis.fY);
+ orien->set(SkMatrix::kMPersp2, axis.fZ);
+ }
+}
+
+void SkCamera3D::patchToMatrix(const SkPatch3D& quilt, SkMatrix* matrix) const {
+ if (fNeedToUpdate) {
+ this->doUpdate();
+ fNeedToUpdate = false;
+ }
+
+ const SkScalar* mapPtr = (const SkScalar*)(const void*)&fOrientation;
+ const SkScalar* patchPtr;
+ SkPoint3D diff;
+ SkScalar dot;
+
+ diff.fX = quilt.fOrigin.fX - fLocation.fX;
+ diff.fY = quilt.fOrigin.fY - fLocation.fY;
+ diff.fZ = quilt.fOrigin.fZ - fLocation.fZ;
+
+ dot = SkUnit3D::Dot(*SkTCast<const SkUnit3D*>(&diff),
+ *SkTCast<const SkUnit3D*>(SkTCast<const SkScalar*>(&fOrientation) + 6));
+
+ patchPtr = (const SkScalar*)&quilt;
+ matrix->set(SkMatrix::kMScaleX, SkScalarDotDiv(3, patchPtr, 1, mapPtr, 1, dot));
+ matrix->set(SkMatrix::kMSkewY, SkScalarDotDiv(3, patchPtr, 1, mapPtr+3, 1, dot));
+ matrix->set(SkMatrix::kMPersp0, SkScalarDotDiv(3, patchPtr, 1, mapPtr+6, 1, dot));
+
+ patchPtr += 3;
+ matrix->set(SkMatrix::kMSkewX, SkScalarDotDiv(3, patchPtr, 1, mapPtr, 1, dot));
+ matrix->set(SkMatrix::kMScaleY, SkScalarDotDiv(3, patchPtr, 1, mapPtr+3, 1, dot));
+ matrix->set(SkMatrix::kMPersp1, SkScalarDotDiv(3, patchPtr, 1, mapPtr+6, 1, dot));
+
+ patchPtr = (const SkScalar*)(const void*)&diff;
+ matrix->set(SkMatrix::kMTransX, SkScalarDotDiv(3, patchPtr, 1, mapPtr, 1, dot));
+ matrix->set(SkMatrix::kMTransY, SkScalarDotDiv(3, patchPtr, 1, mapPtr+3, 1, dot));
+ matrix->set(SkMatrix::kMPersp2, SK_Scalar1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+Sk3DView::Sk3DView() {
+ fInitialRec.fMatrix.reset();
+ fRec = &fInitialRec;
+}
+
+Sk3DView::~Sk3DView() {
+ Rec* rec = fRec;
+ while (rec != &fInitialRec) {
+ Rec* next = rec->fNext;
+ delete rec;
+ rec = next;
+ }
+}
+
+void Sk3DView::save() {
+ Rec* rec = new Rec;
+ rec->fNext = fRec;
+ rec->fMatrix = fRec->fMatrix;
+ fRec = rec;
+}
+
+void Sk3DView::restore() {
+ SkASSERT(fRec != &fInitialRec);
+ Rec* next = fRec->fNext;
+ delete fRec;
+ fRec = next;
+}
+
+#ifdef SK_BUILD_FOR_ANDROID
+void Sk3DView::setCameraLocation(SkScalar x, SkScalar y, SkScalar z) {
+ // the camera location is passed in inches, set in pt
+ SkScalar lz = z * 72.0f;
+ fCamera.fLocation.set(x * 72.0f, y * 72.0f, lz);
+ fCamera.fObserver.set(0, 0, lz);
+ fCamera.update();
+
+}
+
+SkScalar Sk3DView::getCameraLocationX() {
+ return fCamera.fLocation.fX / 72.0f;
+}
+
+SkScalar Sk3DView::getCameraLocationY() {
+ return fCamera.fLocation.fY / 72.0f;
+}
+
+SkScalar Sk3DView::getCameraLocationZ() {
+ return fCamera.fLocation.fZ / 72.0f;
+}
+#endif
+
+void Sk3DView::translate(SkScalar x, SkScalar y, SkScalar z) {
+ fRec->fMatrix.preTranslate(x, y, z);
+}
+
+void Sk3DView::rotateX(SkScalar deg) {
+ fRec->fMatrix.preRotateX(deg);
+}
+
+void Sk3DView::rotateY(SkScalar deg) {
+ fRec->fMatrix.preRotateY(deg);
+}
+
+void Sk3DView::rotateZ(SkScalar deg) {
+ fRec->fMatrix.preRotateZ(deg);
+}
+
+SkScalar Sk3DView::dotWithNormal(SkScalar x, SkScalar y, SkScalar z) const {
+ SkPatch3D patch;
+ patch.transform(fRec->fMatrix);
+ return patch.dotWith(x, y, z);
+}
+
+void Sk3DView::getMatrix(SkMatrix* matrix) const {
+ if (matrix != nullptr) {
+ SkPatch3D patch;
+ patch.transform(fRec->fMatrix);
+ fCamera.patchToMatrix(patch, matrix);
+ }
+}
+
+#include "SkCanvas.h"
+
+void Sk3DView::applyToCanvas(SkCanvas* canvas) const {
+ SkMatrix matrix;
+
+ this->getMatrix(&matrix);
+ canvas->concat(matrix);
+}
diff --git a/gfx/skia/skia/src/utils/SkCanvasStack.cpp b/gfx/skia/skia/src/utils/SkCanvasStack.cpp
new file mode 100644
index 000000000..58607d7d2
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCanvasStack.cpp
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkCanvasStack.h"
+
+SkCanvasStack::SkCanvasStack(int width, int height)
+ : INHERITED(width, height) {}
+
+SkCanvasStack::~SkCanvasStack() {
+ this->removeAll();
+}
+
+void SkCanvasStack::pushCanvas(SkCanvas* canvas, const SkIPoint& origin) {
+ if (canvas) {
+ // compute the bounds of this canvas
+ const SkIRect canvasBounds = SkIRect::MakeSize(canvas->getDeviceSize());
+
+ // push the canvas onto the stack
+ this->INHERITED::addCanvas(canvas);
+
+ // push the canvas data onto the stack
+ CanvasData* data = &fCanvasData.push_back();
+ data->origin = origin;
+ data->requiredClip.setRect(canvasBounds);
+
+ // subtract this region from the canvas objects already on the stack.
+ // This ensures they do not draw into the space occupied by the layers
+ // above them.
+ for (int i = fList.count() - 1; i > 0; --i) {
+ SkIRect localBounds = canvasBounds;
+ localBounds.offset(origin - fCanvasData[i-1].origin);
+
+ fCanvasData[i-1].requiredClip.op(localBounds, SkRegion::kDifference_Op);
+ fList[i-1]->clipRegion(fCanvasData[i-1].requiredClip);
+ }
+ }
+ SkASSERT(fList.count() == fCanvasData.count());
+}
+
+void SkCanvasStack::removeAll() {
+ fCanvasData.reset();
+ this->INHERITED::removeAll();
+}
+
+/**
+ * Traverse all canvases (e.g. layers) the stack and ensure that they are clipped
+ * to their bounds and that the area covered by any canvas higher in the stack is
+ * also clipped out.
+ */
+void SkCanvasStack::clipToZOrderedBounds() {
+ SkASSERT(fList.count() == fCanvasData.count());
+ for (int i = 0; i < fList.count(); ++i) {
+ fList[i]->clipRegion(fCanvasData[i].requiredClip);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * We need to handle setMatrix specially as it overwrites the matrix in each
+ * canvas unlike all other matrix operations (i.e. translate, scale, etc) which
+ * just pre-concatenate with the existing matrix.
+ */
+void SkCanvasStack::didSetMatrix(const SkMatrix& matrix) {
+ SkASSERT(fList.count() == fCanvasData.count());
+ for (int i = 0; i < fList.count(); ++i) {
+
+ SkMatrix tempMatrix = matrix;
+ tempMatrix.postTranslate(SkIntToScalar(-fCanvasData[i].origin.x()),
+ SkIntToScalar(-fCanvasData[i].origin.y()));
+ fList[i]->setMatrix(tempMatrix);
+ }
+ this->SkCanvas::didSetMatrix(matrix);
+}
+
+void SkCanvasStack::onClipRect(const SkRect& r, ClipOp op, ClipEdgeStyle edgeStyle) {
+ this->INHERITED::onClipRect(r, op, edgeStyle);
+ this->clipToZOrderedBounds();
+}
+
+void SkCanvasStack::onClipRRect(const SkRRect& rr, ClipOp op, ClipEdgeStyle edgeStyle) {
+ this->INHERITED::onClipRRect(rr, op, edgeStyle);
+ this->clipToZOrderedBounds();
+}
+
+void SkCanvasStack::onClipPath(const SkPath& p, ClipOp op, ClipEdgeStyle edgeStyle) {
+ this->INHERITED::onClipPath(p, op, edgeStyle);
+ this->clipToZOrderedBounds();
+}
+
+void SkCanvasStack::onClipRegion(const SkRegion& deviceRgn, ClipOp op) {
+ SkASSERT(fList.count() == fCanvasData.count());
+ for (int i = 0; i < fList.count(); ++i) {
+ SkRegion tempRegion;
+ deviceRgn.translate(-fCanvasData[i].origin.x(),
+ -fCanvasData[i].origin.y(), &tempRegion);
+ tempRegion.op(fCanvasData[i].requiredClip, SkRegion::kIntersect_Op);
+ fList[i]->clipRegion(tempRegion, op);
+ }
+ this->SkCanvas::onClipRegion(deviceRgn, op);
+}
diff --git a/gfx/skia/skia/src/utils/SkCanvasStack.h b/gfx/skia/skia/src/utils/SkCanvasStack.h
new file mode 100644
index 000000000..762ab9f76
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCanvasStack.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCanvasStack_DEFINED
+#define SkCanvasStack_DEFINED
+
+#include "SkNWayCanvas.h"
+#include "SkTArray.h"
+
+class SkCanvasStack : public SkNWayCanvas {
+public:
+ SkCanvasStack(int width, int height);
+ virtual ~SkCanvasStack();
+
+ void pushCanvas(SkCanvas* canvas, const SkIPoint& origin);
+ void removeAll() override;
+
+ /*
+ * The following add/remove canvas methods are overrides from SkNWayCanvas
+ * that do not make sense in the context of our CanvasStack, but since we
+ * can share most of the other implementation of NWay we override those
+ * methods to be no-ops.
+ */
+ void addCanvas(SkCanvas*) override { SkDEBUGFAIL("Invalid Op"); }
+ void removeCanvas(SkCanvas*) override { SkDEBUGFAIL("Invalid Op"); }
+
+protected:
+ void didSetMatrix(const SkMatrix&) override;
+
+ void onClipRect(const SkRect&, ClipOp, ClipEdgeStyle) override;
+ void onClipRRect(const SkRRect&, ClipOp, ClipEdgeStyle) override;
+ void onClipPath(const SkPath&, ClipOp, ClipEdgeStyle) override;
+ void onClipRegion(const SkRegion&, ClipOp) override;
+
+private:
+ void clipToZOrderedBounds();
+
+ struct CanvasData {
+ SkIPoint origin;
+ SkRegion requiredClip;
+ };
+
+ SkTArray<CanvasData> fCanvasData;
+
+ typedef SkNWayCanvas INHERITED;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkCanvasStateUtils.cpp b/gfx/skia/skia/src/utils/SkCanvasStateUtils.cpp
new file mode 100644
index 000000000..cc8ea43d4
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCanvasStateUtils.cpp
@@ -0,0 +1,355 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCanvasStateUtils.h"
+
+#include "SkCanvas.h"
+#include "SkCanvasStack.h"
+#include "SkDevice.h"
+#include "SkErrorInternals.h"
+#include "SkRasterClip.h"
+#include "SkWriter32.h"
+
+/*
+ * WARNING: The structs below are part of a stable ABI and as such we explicitly
+ * use unambigious primitives (e.g. int32_t instead of an enum).
+ *
+ * ANY CHANGES TO THE STRUCTS BELOW THAT IMPACT THE ABI SHOULD RESULT IN A NEW
+ * NEW SUBCLASS OF SkCanvasState. SUCH CHANGES SHOULD ONLY BE MADE IF ABSOLUTELY
+ * NECESSARY!
+ *
+ * In order to test changes, run the CanvasState tests. gyp/canvas_state_lib.gyp
+ * describes how to create a library to pass to the CanvasState tests. The tests
+ * should succeed when building the library with your changes and passing that to
+ * the tests running in the unchanged Skia.
+ */
+enum RasterConfigs {
+ kUnknown_RasterConfig = 0,
+ kRGB_565_RasterConfig = 1,
+ kARGB_8888_RasterConfig = 2
+};
+typedef int32_t RasterConfig;
+
+enum CanvasBackends {
+ kUnknown_CanvasBackend = 0,
+ kRaster_CanvasBackend = 1,
+ kGPU_CanvasBackend = 2,
+ kPDF_CanvasBackend = 3
+};
+typedef int32_t CanvasBackend;
+
+struct ClipRect {
+ int32_t left, top, right, bottom;
+};
+
+struct SkMCState {
+ float matrix[9];
+ // NOTE: this only works for non-antialiased clips
+ int32_t clipRectCount;
+ ClipRect* clipRects;
+};
+
+// NOTE: If you add more members, create a new subclass of SkCanvasState with a
+// new CanvasState::version.
+struct SkCanvasLayerState {
+ CanvasBackend type;
+ int32_t x, y;
+ int32_t width;
+ int32_t height;
+
+ SkMCState mcState;
+
+ union {
+ struct {
+ RasterConfig config; // pixel format: a value from RasterConfigs.
+ uint64_t rowBytes; // Number of bytes from start of one line to next.
+ void* pixels; // The pixels, all (height * rowBytes) of them.
+ } raster;
+ struct {
+ int32_t textureID;
+ } gpu;
+ };
+};
+
+class SkCanvasState {
+public:
+ SkCanvasState(int32_t version, SkCanvas* canvas) {
+ SkASSERT(canvas);
+ this->version = version;
+ width = canvas->getBaseLayerSize().width();
+ height = canvas->getBaseLayerSize().height();
+
+ }
+
+ /**
+ * The version this struct was built with. This field must always appear
+ * first in the struct so that when the versions don't match (and the
+ * remaining contents and size are potentially different) we can still
+ * compare the version numbers.
+ */
+ int32_t version;
+ int32_t width;
+ int32_t height;
+ int32_t alignmentPadding;
+};
+
+class SkCanvasState_v1 : public SkCanvasState {
+public:
+ static const int32_t kVersion = 1;
+
+ SkCanvasState_v1(SkCanvas* canvas)
+ : INHERITED(kVersion, canvas)
+ {
+ layerCount = 0;
+ layers = nullptr;
+ mcState.clipRectCount = 0;
+ mcState.clipRects = nullptr;
+ originalCanvas = SkRef(canvas);
+ }
+
+ ~SkCanvasState_v1() {
+ // loop through the layers and free the data allocated to the clipRects
+ for (int i = 0; i < layerCount; ++i) {
+ sk_free(layers[i].mcState.clipRects);
+ }
+
+ sk_free(mcState.clipRects);
+ sk_free(layers);
+
+ // it is now safe to free the canvas since there should be no remaining
+ // references to the content that is referenced by this canvas (e.g. pixels)
+ originalCanvas->unref();
+ }
+
+ SkMCState mcState;
+
+ int32_t layerCount;
+ SkCanvasLayerState* layers;
+private:
+ SkCanvas* originalCanvas;
+ typedef SkCanvasState INHERITED;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+class ClipValidator : public SkCanvas::ClipVisitor {
+public:
+ ClipValidator() : fFailed(false) {}
+ bool failed() { return fFailed; }
+
+ // ClipVisitor
+ void clipRect(const SkRect& rect, SkCanvas::ClipOp op, bool antialias) override {
+ fFailed |= antialias;
+ }
+
+ void clipRRect(const SkRRect& rrect, SkCanvas::ClipOp op, bool antialias) override {
+ fFailed |= antialias;
+ }
+
+ void clipPath(const SkPath&, SkCanvas::ClipOp, bool antialias) override {
+ fFailed |= antialias;
+ }
+
+private:
+ bool fFailed;
+};
+
+static void setup_MC_state(SkMCState* state, const SkMatrix& matrix, const SkRegion& clip) {
+ // initialize the struct
+ state->clipRectCount = 0;
+
+ // capture the matrix
+ for (int i = 0; i < 9; i++) {
+ state->matrix[i] = matrix.get(i);
+ }
+
+ /*
+ * capture the clip
+ *
+ * storage is allocated on the stack for the first 4 rects. This value was
+ * chosen somewhat arbitrarily, but does allow us to represent simple clips
+ * and some more common complex clips (e.g. a clipRect with a sub-rect
+ * clipped out of its interior) without needing to malloc any additional memory.
+ */
+ SkSWriter32<4*sizeof(ClipRect)> clipWriter;
+
+ if (!clip.isEmpty()) {
+ // only returns the b/w clip so aa clips fail
+ SkRegion::Iterator clip_iterator(clip);
+ for (; !clip_iterator.done(); clip_iterator.next()) {
+ // this assumes the SkIRect is stored in l,t,r,b ordering which
+ // matches the ordering of our ClipRect struct
+ clipWriter.writeIRect(clip_iterator.rect());
+ state->clipRectCount++;
+ }
+ }
+
+ // allocate memory for the clip then and copy them to the struct
+ state->clipRects = (ClipRect*) sk_malloc_throw(clipWriter.bytesWritten());
+ clipWriter.flatten(state->clipRects);
+}
+
+
+
+SkCanvasState* SkCanvasStateUtils::CaptureCanvasState(SkCanvas* canvas) {
+ SkASSERT(canvas);
+
+ // Check the clip can be decomposed into rectangles (i.e. no soft clips).
+ ClipValidator validator;
+ canvas->replayClips(&validator);
+ if (validator.failed()) {
+ SkErrorInternals::SetError(kInvalidOperation_SkError,
+ "CaptureCanvasState does not support canvases with antialiased clips.\n");
+ return nullptr;
+ }
+
+ SkAutoTDelete<SkCanvasState_v1> canvasState(new SkCanvasState_v1(canvas));
+
+ // decompose the total matrix and clip
+ setup_MC_state(&canvasState->mcState, canvas->getTotalMatrix(),
+ canvas->internal_private_getTotalClip());
+
+ /*
+ * decompose the layers
+ *
+ * storage is allocated on the stack for the first 3 layers. It is common in
+ * some view systems (e.g. Android) that a few non-clipped layers are present
+ * and we will not need to malloc any additional memory in those cases.
+ */
+ SkSWriter32<3*sizeof(SkCanvasLayerState)> layerWriter;
+ int layerCount = 0;
+ for (SkCanvas::LayerIter layer(canvas); !layer.done(); layer.next()) {
+
+ // we currently only work for bitmap backed devices
+ SkPixmap pmap;
+ if (!layer.device()->accessPixels(&pmap) || 0 == pmap.width() || 0 == pmap.height()) {
+ return nullptr;
+ }
+
+ SkCanvasLayerState* layerState =
+ (SkCanvasLayerState*) layerWriter.reserve(sizeof(SkCanvasLayerState));
+ layerState->type = kRaster_CanvasBackend;
+ layerState->x = layer.x();
+ layerState->y = layer.y();
+ layerState->width = pmap.width();
+ layerState->height = pmap.height();
+
+ switch (pmap.colorType()) {
+ case kN32_SkColorType:
+ layerState->raster.config = kARGB_8888_RasterConfig;
+ break;
+ case kRGB_565_SkColorType:
+ layerState->raster.config = kRGB_565_RasterConfig;
+ break;
+ default:
+ return nullptr;
+ }
+ layerState->raster.rowBytes = pmap.rowBytes();
+ layerState->raster.pixels = pmap.writable_addr();
+
+ setup_MC_state(&layerState->mcState, layer.matrix(), layer.clip().bwRgn());
+ layerCount++;
+ }
+
+ // allocate memory for the layers and then and copy them to the struct
+ SkASSERT(layerWriter.bytesWritten() == layerCount * sizeof(SkCanvasLayerState));
+ canvasState->layerCount = layerCount;
+ canvasState->layers = (SkCanvasLayerState*) sk_malloc_throw(layerWriter.bytesWritten());
+ layerWriter.flatten(canvasState->layers);
+
+ return canvasState.release();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static void setup_canvas_from_MC_state(const SkMCState& state, SkCanvas* canvas) {
+ // reconstruct the matrix
+ SkMatrix matrix;
+ for (int i = 0; i < 9; i++) {
+ matrix.set(i, state.matrix[i]);
+ }
+
+ // reconstruct the clip
+ SkRegion clip;
+ for (int i = 0; i < state.clipRectCount; ++i) {
+ clip.op(SkIRect::MakeLTRB(state.clipRects[i].left,
+ state.clipRects[i].top,
+ state.clipRects[i].right,
+ state.clipRects[i].bottom),
+ SkRegion::kUnion_Op);
+ }
+
+ canvas->setMatrix(matrix);
+ canvas->clipRegion(clip, SkCanvas::kReplace_Op);
+}
+
+static SkCanvas* create_canvas_from_canvas_layer(const SkCanvasLayerState& layerState) {
+ SkASSERT(kRaster_CanvasBackend == layerState.type);
+
+ SkBitmap bitmap;
+ SkColorType colorType =
+ layerState.raster.config == kARGB_8888_RasterConfig ? kN32_SkColorType :
+ layerState.raster.config == kRGB_565_RasterConfig ? kRGB_565_SkColorType :
+ kUnknown_SkColorType;
+
+ if (colorType == kUnknown_SkColorType) {
+ return nullptr;
+ }
+
+ bitmap.installPixels(SkImageInfo::Make(layerState.width, layerState.height,
+ colorType, kPremul_SkAlphaType),
+ layerState.raster.pixels, (size_t) layerState.raster.rowBytes);
+
+ SkASSERT(!bitmap.empty());
+ SkASSERT(!bitmap.isNull());
+
+ SkAutoTUnref<SkCanvas> canvas(new SkCanvas(bitmap));
+
+ // setup the matrix and clip
+ setup_canvas_from_MC_state(layerState.mcState, canvas.get());
+
+ return canvas.release();
+}
+
+SkCanvas* SkCanvasStateUtils::CreateFromCanvasState(const SkCanvasState* state) {
+ SkASSERT(state);
+ // Currently there is only one possible version.
+ SkASSERT(SkCanvasState_v1::kVersion == state->version);
+
+ const SkCanvasState_v1* state_v1 = static_cast<const SkCanvasState_v1*>(state);
+
+ if (state_v1->layerCount < 1) {
+ return nullptr;
+ }
+
+ SkAutoTUnref<SkCanvasStack> canvas(new SkCanvasStack(state->width, state->height));
+
+ // setup the matrix and clip on the n-way canvas
+ setup_canvas_from_MC_state(state_v1->mcState, canvas);
+
+ // Iterate over the layers and add them to the n-way canvas
+ for (int i = state_v1->layerCount - 1; i >= 0; --i) {
+ SkAutoTUnref<SkCanvas> canvasLayer(create_canvas_from_canvas_layer(state_v1->layers[i]));
+ if (!canvasLayer.get()) {
+ return nullptr;
+ }
+ canvas->pushCanvas(canvasLayer.get(), SkIPoint::Make(state_v1->layers[i].x,
+ state_v1->layers[i].y));
+ }
+
+ return canvas.release();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void SkCanvasStateUtils::ReleaseCanvasState(SkCanvasState* state) {
+ SkASSERT(!state || SkCanvasState_v1::kVersion == state->version);
+ // Upcast to the correct version of SkCanvasState. This avoids having a virtual destructor on
+ // SkCanvasState. That would be strange since SkCanvasState has no other virtual functions, and
+ // instead uses the field "version" to determine how to behave.
+ delete static_cast<SkCanvasState_v1*>(state);
+}
diff --git a/gfx/skia/skia/src/utils/SkCurveMeasure.cpp b/gfx/skia/skia/src/utils/SkCurveMeasure.cpp
new file mode 100644
index 000000000..a82008e67
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCurveMeasure.cpp
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkCurveMeasure.h"
+#include "SkGeometry.h"
+
+// for abs
+#include <cmath>
+
+#define UNIMPLEMENTED SkDEBUGF(("%s:%d unimplemented\n", __FILE__, __LINE__))
+
+/// Used inside SkCurveMeasure::getTime's Newton's iteration
+static inline SkPoint evaluate(const SkPoint pts[4], SkSegType segType,
+ SkScalar t) {
+ SkPoint pos;
+ switch (segType) {
+ case kQuad_SegType:
+ pos = SkEvalQuadAt(pts, t);
+ break;
+ case kLine_SegType:
+ pos = SkPoint::Make(SkScalarInterp(pts[0].x(), pts[1].x(), t),
+ SkScalarInterp(pts[0].y(), pts[1].y(), t));
+ break;
+ case kCubic_SegType:
+ SkEvalCubicAt(pts, t, &pos, nullptr, nullptr);
+ break;
+ case kConic_SegType: {
+ SkConic conic(pts, pts[3].x());
+ conic.evalAt(t, &pos);
+ }
+ break;
+ default:
+ UNIMPLEMENTED;
+ }
+
+ return pos;
+}
+
+/// Used inside SkCurveMeasure::getTime's Newton's iteration
+static inline SkVector evaluateDerivative(const SkPoint pts[4],
+ SkSegType segType, SkScalar t) {
+ SkVector tan;
+ switch (segType) {
+ case kQuad_SegType:
+ tan = SkEvalQuadTangentAt(pts, t);
+ break;
+ case kLine_SegType:
+ tan = pts[1] - pts[0];
+ break;
+ case kCubic_SegType:
+ SkEvalCubicAt(pts, t, nullptr, &tan, nullptr);
+ break;
+ case kConic_SegType: {
+ SkConic conic(pts, pts[3].x());
+ conic.evalAt(t, nullptr, &tan);
+ }
+ break;
+ default:
+ UNIMPLEMENTED;
+ }
+
+ return tan;
+}
+/// Used in ArcLengthIntegrator::computeLength
+static inline Sk8f evaluateDerivativeLength(const Sk8f& ts,
+ const Sk8f (&xCoeff)[3],
+ const Sk8f (&yCoeff)[3],
+ const SkSegType segType) {
+ Sk8f x;
+ Sk8f y;
+ switch (segType) {
+ case kQuad_SegType:
+ x = xCoeff[0]*ts + xCoeff[1];
+ y = yCoeff[0]*ts + yCoeff[1];
+ break;
+ case kCubic_SegType:
+ x = (xCoeff[0]*ts + xCoeff[1])*ts + xCoeff[2];
+ y = (yCoeff[0]*ts + yCoeff[1])*ts + yCoeff[2];
+ break;
+ case kConic_SegType:
+ UNIMPLEMENTED;
+ break;
+ default:
+ UNIMPLEMENTED;
+ }
+
+ x = x * x;
+ y = y * y;
+
+ return (x + y).sqrt();
+}
+
+ArcLengthIntegrator::ArcLengthIntegrator(const SkPoint* pts, SkSegType segType)
+ : fSegType(segType) {
+ switch (fSegType) {
+ case kQuad_SegType: {
+ float Ax = pts[0].x();
+ float Bx = pts[1].x();
+ float Cx = pts[2].x();
+ float Ay = pts[0].y();
+ float By = pts[1].y();
+ float Cy = pts[2].y();
+
+ // precompute coefficients for derivative
+ xCoeff[0] = Sk8f(2*(Ax - 2*Bx + Cx));
+ xCoeff[1] = Sk8f(2*(Bx - Ax));
+
+ yCoeff[0] = Sk8f(2*(Ay - 2*By + Cy));
+ yCoeff[1] = Sk8f(2*(By - Ay));
+ }
+ break;
+ case kCubic_SegType:
+ {
+ float Ax = pts[0].x();
+ float Bx = pts[1].x();
+ float Cx = pts[2].x();
+ float Dx = pts[3].x();
+ float Ay = pts[0].y();
+ float By = pts[1].y();
+ float Cy = pts[2].y();
+ float Dy = pts[3].y();
+
+ // precompute coefficients for derivative
+ xCoeff[0] = Sk8f(3*(-Ax + 3*(Bx - Cx) + Dx));
+ xCoeff[1] = Sk8f(6*(Ax - 2*Bx + Cx));
+ xCoeff[2] = Sk8f(3*(-Ax + Bx));
+
+ yCoeff[0] = Sk8f(3*(-Ay + 3*(By - Cy) + Dy));
+ yCoeff[1] = Sk8f(6*(Ay - 2*By + Cy));
+ yCoeff[2] = Sk8f(3*(-Ay + By));
+ }
+ break;
+ case kConic_SegType:
+ UNIMPLEMENTED;
+ break;
+ default:
+ UNIMPLEMENTED;
+ }
+}
+
+// We use Gaussian quadrature
+// (https://en.wikipedia.org/wiki/Gaussian_quadrature)
+// to approximate the arc length integral here, because it is amenable to SIMD.
+SkScalar ArcLengthIntegrator::computeLength(SkScalar t) {
+ SkScalar length = 0.0f;
+
+ Sk8f lengths = evaluateDerivativeLength(absc*t, xCoeff, yCoeff, fSegType);
+ lengths = weights*lengths;
+ // is it faster or more accurate to sum and then multiply or vice versa?
+ lengths = lengths*(t*0.5f);
+
+ // Why does SkNx index with ints? does negative index mean something?
+ for (int i = 0; i < 8; i++) {
+ length += lengths[i];
+ }
+ return length;
+}
+
+SkCurveMeasure::SkCurveMeasure(const SkPoint* pts, SkSegType segType)
+ : fSegType(segType) {
+ switch (fSegType) {
+ case SkSegType::kQuad_SegType:
+ for (size_t i = 0; i < 3; i++) {
+ fPts[i] = pts[i];
+ }
+ break;
+ case SkSegType::kLine_SegType:
+ fPts[0] = pts[0];
+ fPts[1] = pts[1];
+ fLength = (fPts[1] - fPts[0]).length();
+ break;
+ case SkSegType::kCubic_SegType:
+ for (size_t i = 0; i < 4; i++) {
+ fPts[i] = pts[i];
+ }
+ break;
+ case SkSegType::kConic_SegType:
+ for (size_t i = 0; i < 4; i++) {
+ fPts[i] = pts[i];
+ }
+ break;
+ default:
+ UNIMPLEMENTED;
+ break;
+ }
+ if (kLine_SegType != segType) {
+ fIntegrator = ArcLengthIntegrator(fPts, fSegType);
+ }
+}
+
+SkScalar SkCurveMeasure::getLength() {
+ if (-1.0f == fLength) {
+ fLength = fIntegrator.computeLength(1.0f);
+ }
+ return fLength;
+}
+
+// Given an arc length targetLength, we want to determine what t
+// gives us the corresponding arc length along the curve.
+// We do this by letting the arc length integral := f(t) and
+// solving for the root of the equation f(t) - targetLength = 0
+// using Newton's method and lerp-bisection.
+// The computationally expensive parts are the integral approximation
+// at each step, and computing the derivative of the arc length integral,
+// which is equal to the length of the tangent (so we have to do a sqrt).
+
+SkScalar SkCurveMeasure::getTime(SkScalar targetLength) {
+ if (targetLength <= 0.0f) {
+ return 0.0f;
+ }
+
+ SkScalar currentLength = getLength();
+
+ if (targetLength > currentLength || (SkScalarNearlyEqual(targetLength, currentLength))) {
+ return 1.0f;
+ }
+ if (kLine_SegType == fSegType) {
+ return targetLength / currentLength;
+ }
+
+ // initial estimate of t is percentage of total length
+ SkScalar currentT = targetLength / currentLength;
+ SkScalar prevT = -1.0f;
+ SkScalar newT;
+
+ SkScalar minT = 0.0f;
+ SkScalar maxT = 1.0f;
+
+ int iterations = 0;
+ while (iterations < kNewtonIters + kBisectIters) {
+ currentLength = fIntegrator.computeLength(currentT);
+ SkScalar lengthDiff = currentLength - targetLength;
+
+ // Update root bounds.
+ // If lengthDiff is positive, we have overshot the target, so
+ // we know the current t is an upper bound, and similarly
+ // for the lower bound.
+ if (lengthDiff > 0.0f) {
+ if (currentT < maxT) {
+ maxT = currentT;
+ }
+ } else {
+ if (currentT > minT) {
+ minT = currentT;
+ }
+ }
+
+ // We have a tolerance on both the absolute value of the difference and
+ // on the t value
+ // because we may not have enough precision in the t to get close enough
+ // in the length.
+ if ((std::abs(lengthDiff) < kTolerance) ||
+ (std::abs(prevT - currentT) < kTolerance)) {
+ break;
+ }
+
+ prevT = currentT;
+ if (iterations < kNewtonIters) {
+ // This is just newton's formula.
+ SkScalar dt = evaluateDerivative(fPts, fSegType, currentT).length();
+ newT = currentT - (lengthDiff / dt);
+
+ // If newT is out of bounds, bisect inside newton.
+ if ((newT < 0.0f) || (newT > 1.0f)) {
+ newT = (minT + maxT) * 0.5f;
+ }
+ } else if (iterations < kNewtonIters + kBisectIters) {
+ if (lengthDiff > 0.0f) {
+ maxT = currentT;
+ } else {
+ minT = currentT;
+ }
+ // TODO(hstern) do a lerp here instead of a bisection
+ newT = (minT + maxT) * 0.5f;
+ } else {
+ SkDEBUGF(("%.7f %.7f didn't get close enough after bisection.\n",
+ currentT, currentLength));
+ break;
+ }
+ currentT = newT;
+
+ SkASSERT(minT <= maxT);
+
+ iterations++;
+ }
+
+ // debug. is there an SKDEBUG or something for ifdefs?
+ fIters = iterations;
+
+ return currentT;
+}
+
+void SkCurveMeasure::getPosTanTime(SkScalar targetLength, SkPoint* pos,
+ SkVector* tan, SkScalar* time) {
+ SkScalar t = getTime(targetLength);
+
+ if (time) {
+ *time = t;
+ }
+ if (pos) {
+ *pos = evaluate(fPts, fSegType, t);
+ }
+ if (tan) {
+ *tan = evaluateDerivative(fPts, fSegType, t);
+ }
+}
diff --git a/gfx/skia/skia/src/utils/SkCurveMeasure.h b/gfx/skia/skia/src/utils/SkCurveMeasure.h
new file mode 100644
index 000000000..580721123
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCurveMeasure.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCurveMeasure_DEFINED
+#define SkCurveMeasure_DEFINED
+
+#include "SkPathMeasurePriv.h"
+#include "SkPoint.h"
+#include "SkNx.h"
+
+// These are weights and abscissae for gaussian quadrature with weight function
+// w(x) = 1
+static SkScalar weights8[8] = {0.3626837833783620f, 0.3626837833783620f,
+ 0.3137066458778873f, 0.3137066458778873f,
+ 0.2223810344533745f, 0.2223810344533745f,
+ 0.1012285362903763f, 0.1012285362903763f};
+static SkScalar absc8[8] = {-0.1834346424956498f, 0.1834346424956498f,
+ -0.5255324099163290f, 0.5255324099163290f,
+ -0.7966664774136267f, 0.7966664774136267f,
+ -0.9602898564975363f, 0.9602898564975363f};
+
+static Sk8f weights = Sk8f::Load(weights8);
+static Sk8f absc = 0.5f*(Sk8f::Load(absc8) + 1.0f);
+
+
+class ArcLengthIntegrator {
+public:
+ ArcLengthIntegrator() {}
+ ArcLengthIntegrator(const SkPoint* pts, SkSegType segType);
+ SkScalar computeLength(SkScalar t);
+
+private:
+ SkSegType fSegType;
+
+ // precomputed coefficients for derivatives in Horner form
+ Sk8f xCoeff[3];
+ Sk8f yCoeff[3];
+};
+
+class SkCurveMeasure {
+public:
+ SkCurveMeasure() {}
+
+ // Almost exactly the same as in SkPath::Iter:
+ // kLine_SegType -> 2 points: start end
+ // kQuad_SegType -> 3 points: start control end
+ // kCubic_SegType -> 4 points: start control1 control2 end
+ // kConic_SegType -> 4 points: start control end (w, w)
+ //
+ // i.e. the only difference is that the conic's last point is a point
+ // consisting of the w value twice
+ SkCurveMeasure(const SkPoint* pts, SkSegType segType);
+
+ SkScalar getTime(SkScalar targetLength);
+ void getPosTanTime(SkScalar distance, SkPoint* pos, SkVector* tan, SkScalar* time);
+ SkScalar getLength();
+
+private:
+ const SkScalar kTolerance = 0.0001f;
+ const int kNewtonIters = 5;
+ const int kBisectIters = 5;
+
+ SkSegType fSegType;
+ SkPoint fPts[4];
+ SkScalar fLength = -1.0f;
+ ArcLengthIntegrator fIntegrator;
+
+ // for debug purposes
+ int fIters;
+};
+
+#endif // SkCurveMeasure_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkDashPath.cpp b/gfx/skia/skia/src/utils/SkDashPath.cpp
new file mode 100644
index 000000000..c0cdcc195
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkDashPath.cpp
@@ -0,0 +1,345 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkDashPathPriv.h"
+#include "SkPathMeasure.h"
+#include "SkStrokeRec.h"
+
+static inline int is_even(int x) {
+ return !(x & 1);
+}
+
+static SkScalar find_first_interval(const SkScalar intervals[], SkScalar phase,
+ int32_t* index, int count) {
+ for (int i = 0; i < count; ++i) {
+ SkScalar gap = intervals[i];
+ if (phase > gap || (phase == gap && gap)) {
+ phase -= gap;
+ } else {
+ *index = i;
+ return gap - phase;
+ }
+ }
+ // If we get here, phase "appears" to be larger than our length. This
+ // shouldn't happen with perfect precision, but we can accumulate errors
+ // during the initial length computation (rounding can make our sum be too
+ // big or too small. In that event, we just have to eat the error here.
+ *index = 0;
+ return intervals[0];
+}
+
+void SkDashPath::CalcDashParameters(SkScalar phase, const SkScalar intervals[], int32_t count,
+ SkScalar* initialDashLength, int32_t* initialDashIndex,
+ SkScalar* intervalLength, SkScalar* adjustedPhase) {
+ SkScalar len = 0;
+ for (int i = 0; i < count; i++) {
+ len += intervals[i];
+ }
+ *intervalLength = len;
+ // Adjust phase to be between 0 and len, "flipping" phase if negative.
+ // e.g., if len is 100, then phase of -20 (or -120) is equivalent to 80
+ if (adjustedPhase) {
+ if (phase < 0) {
+ phase = -phase;
+ if (phase > len) {
+ phase = SkScalarMod(phase, len);
+ }
+ phase = len - phase;
+
+ // Due to finite precision, it's possible that phase == len,
+ // even after the subtract (if len >>> phase), so fix that here.
+ // This fixes http://crbug.com/124652 .
+ SkASSERT(phase <= len);
+ if (phase == len) {
+ phase = 0;
+ }
+ } else if (phase >= len) {
+ phase = SkScalarMod(phase, len);
+ }
+ *adjustedPhase = phase;
+ }
+ SkASSERT(phase >= 0 && phase < len);
+
+ *initialDashLength = find_first_interval(intervals, phase,
+ initialDashIndex, count);
+
+ SkASSERT(*initialDashLength >= 0);
+ SkASSERT(*initialDashIndex >= 0 && *initialDashIndex < count);
+}
+
+static void outset_for_stroke(SkRect* rect, const SkStrokeRec& rec) {
+ SkScalar radius = SkScalarHalf(rec.getWidth());
+ if (0 == radius) {
+ radius = SK_Scalar1; // hairlines
+ }
+ if (SkPaint::kMiter_Join == rec.getJoin()) {
+ radius = SkScalarMul(radius, rec.getMiter());
+ }
+ rect->outset(radius, radius);
+}
+
+// Only handles lines for now. If returns true, dstPath is the new (smaller)
+// path. If returns false, then dstPath parameter is ignored.
+static bool cull_path(const SkPath& srcPath, const SkStrokeRec& rec,
+ const SkRect* cullRect, SkScalar intervalLength,
+ SkPath* dstPath) {
+ if (nullptr == cullRect) {
+ return false;
+ }
+
+ SkPoint pts[2];
+ if (!srcPath.isLine(pts)) {
+ return false;
+ }
+
+ SkRect bounds = *cullRect;
+ outset_for_stroke(&bounds, rec);
+
+ SkScalar dx = pts[1].x() - pts[0].x();
+ SkScalar dy = pts[1].y() - pts[0].y();
+
+ // just do horizontal lines for now (lazy)
+ if (dy) {
+ return false;
+ }
+
+ SkScalar minX = pts[0].fX;
+ SkScalar maxX = pts[1].fX;
+
+ if (dx < 0) {
+ SkTSwap(minX, maxX);
+ }
+
+ SkASSERT(minX <= maxX);
+ if (maxX < bounds.fLeft || minX > bounds.fRight) {
+ return false;
+ }
+
+ // Now we actually perform the chop, removing the excess to the left and
+ // right of the bounds (keeping our new line "in phase" with the dash,
+ // hence the (mod intervalLength).
+
+ if (minX < bounds.fLeft) {
+ minX = bounds.fLeft - SkScalarMod(bounds.fLeft - minX,
+ intervalLength);
+ }
+ if (maxX > bounds.fRight) {
+ maxX = bounds.fRight + SkScalarMod(maxX - bounds.fRight,
+ intervalLength);
+ }
+
+ SkASSERT(maxX >= minX);
+ if (dx < 0) {
+ SkTSwap(minX, maxX);
+ }
+ pts[0].fX = minX;
+ pts[1].fX = maxX;
+
+ dstPath->moveTo(pts[0]);
+ dstPath->lineTo(pts[1]);
+ return true;
+}
+
+class SpecialLineRec {
+public:
+ bool init(const SkPath& src, SkPath* dst, SkStrokeRec* rec,
+ int intervalCount, SkScalar intervalLength) {
+ if (rec->isHairlineStyle() || !src.isLine(fPts)) {
+ return false;
+ }
+
+ // can relax this in the future, if we handle square and round caps
+ if (SkPaint::kButt_Cap != rec->getCap()) {
+ return false;
+ }
+
+ SkScalar pathLength = SkPoint::Distance(fPts[0], fPts[1]);
+
+ fTangent = fPts[1] - fPts[0];
+ if (fTangent.isZero()) {
+ return false;
+ }
+
+ fPathLength = pathLength;
+ fTangent.scale(SkScalarInvert(pathLength));
+ fTangent.rotateCCW(&fNormal);
+ fNormal.scale(SkScalarHalf(rec->getWidth()));
+
+ // now estimate how many quads will be added to the path
+ // resulting segments = pathLen * intervalCount / intervalLen
+ // resulting points = 4 * segments
+
+ SkScalar ptCount = SkScalarMulDiv(pathLength,
+ SkIntToScalar(intervalCount),
+ intervalLength);
+ ptCount = SkTMin(ptCount, SkDashPath::kMaxDashCount);
+ int n = SkScalarCeilToInt(ptCount) << 2;
+ dst->incReserve(n);
+
+ // we will take care of the stroking
+ rec->setFillStyle();
+ return true;
+ }
+
+ void addSegment(SkScalar d0, SkScalar d1, SkPath* path) const {
+ SkASSERT(d0 <= fPathLength);
+ // clamp the segment to our length
+ if (d1 > fPathLength) {
+ d1 = fPathLength;
+ }
+
+ SkScalar x0 = fPts[0].fX + SkScalarMul(fTangent.fX, d0);
+ SkScalar x1 = fPts[0].fX + SkScalarMul(fTangent.fX, d1);
+ SkScalar y0 = fPts[0].fY + SkScalarMul(fTangent.fY, d0);
+ SkScalar y1 = fPts[0].fY + SkScalarMul(fTangent.fY, d1);
+
+ SkPoint pts[4];
+ pts[0].set(x0 + fNormal.fX, y0 + fNormal.fY); // moveTo
+ pts[1].set(x1 + fNormal.fX, y1 + fNormal.fY); // lineTo
+ pts[2].set(x1 - fNormal.fX, y1 - fNormal.fY); // lineTo
+ pts[3].set(x0 - fNormal.fX, y0 - fNormal.fY); // lineTo
+
+ path->addPoly(pts, SK_ARRAY_COUNT(pts), false);
+ }
+
+private:
+ SkPoint fPts[2];
+ SkVector fTangent;
+ SkVector fNormal;
+ SkScalar fPathLength;
+};
+
+
+bool SkDashPath::InternalFilter(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect, const SkScalar aIntervals[],
+ int32_t count, SkScalar initialDashLength, int32_t initialDashIndex,
+ SkScalar intervalLength,
+ StrokeRecApplication strokeRecApplication) {
+
+ // we do nothing if the src wants to be filled
+ SkStrokeRec::Style style = rec->getStyle();
+ if (SkStrokeRec::kFill_Style == style || SkStrokeRec::kStrokeAndFill_Style == style) {
+ return false;
+ }
+
+ const SkScalar* intervals = aIntervals;
+ SkScalar dashCount = 0;
+ int segCount = 0;
+
+ SkPath cullPathStorage;
+ const SkPath* srcPtr = &src;
+ if (cull_path(src, *rec, cullRect, intervalLength, &cullPathStorage)) {
+ srcPtr = &cullPathStorage;
+ }
+
+ SpecialLineRec lineRec;
+ bool specialLine = (StrokeRecApplication::kAllow == strokeRecApplication) &&
+ lineRec.init(*srcPtr, dst, rec, count >> 1, intervalLength);
+
+ SkPathMeasure meas(*srcPtr, false, rec->getResScale());
+
+ do {
+ bool skipFirstSegment = meas.isClosed();
+ bool addedSegment = false;
+ SkScalar length = meas.getLength();
+ int index = initialDashIndex;
+
+ // Since the path length / dash length ratio may be arbitrarily large, we can exert
+ // significant memory pressure while attempting to build the filtered path. To avoid this,
+ // we simply give up dashing beyond a certain threshold.
+ //
+ // The original bug report (http://crbug.com/165432) is based on a path yielding more than
+ // 90 million dash segments and crashing the memory allocator. A limit of 1 million
+ // segments seems reasonable: at 2 verbs per segment * 9 bytes per verb, this caps the
+ // maximum dash memory overhead at roughly 17MB per path.
+ dashCount += length * (count >> 1) / intervalLength;
+ if (dashCount > kMaxDashCount) {
+ dst->reset();
+ return false;
+ }
+
+ // Using double precision to avoid looping indefinitely due to single precision rounding
+ // (for extreme path_length/dash_length ratios). See test_infinite_dash() unittest.
+ double distance = 0;
+ double dlen = initialDashLength;
+
+ while (distance < length) {
+ SkASSERT(dlen >= 0);
+ addedSegment = false;
+ if (is_even(index) && !skipFirstSegment) {
+ addedSegment = true;
+ ++segCount;
+
+ if (specialLine) {
+ lineRec.addSegment(SkDoubleToScalar(distance),
+ SkDoubleToScalar(distance + dlen),
+ dst);
+ } else {
+ meas.getSegment(SkDoubleToScalar(distance),
+ SkDoubleToScalar(distance + dlen),
+ dst, true);
+ }
+ }
+ distance += dlen;
+
+ // clear this so we only respect it the first time around
+ skipFirstSegment = false;
+
+ // wrap around our intervals array if necessary
+ index += 1;
+ SkASSERT(index <= count);
+ if (index == count) {
+ index = 0;
+ }
+
+ // fetch our next dlen
+ dlen = intervals[index];
+ }
+
+ // extend if we ended on a segment and we need to join up with the (skipped) initial segment
+ if (meas.isClosed() && is_even(initialDashIndex) &&
+ initialDashLength >= 0) {
+ meas.getSegment(0, initialDashLength, dst, !addedSegment);
+ ++segCount;
+ }
+ } while (meas.nextContour());
+
+ if (segCount > 1) {
+ dst->setConvexity(SkPath::kConcave_Convexity);
+ }
+
+ return true;
+}
+
+bool SkDashPath::FilterDashPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect, const SkPathEffect::DashInfo& info) {
+ if (!ValidDashPath(info.fPhase, info.fIntervals, info.fCount)) {
+ return false;
+ }
+ SkScalar initialDashLength = 0;
+ int32_t initialDashIndex = 0;
+ SkScalar intervalLength = 0;
+ CalcDashParameters(info.fPhase, info.fIntervals, info.fCount,
+ &initialDashLength, &initialDashIndex, &intervalLength);
+ return InternalFilter(dst, src, rec, cullRect, info.fIntervals, info.fCount, initialDashLength,
+ initialDashIndex, intervalLength);
+}
+
+bool SkDashPath::ValidDashPath(SkScalar phase, const SkScalar intervals[], int32_t count) {
+ if (count < 2 || !SkIsAlign2(count)) {
+ return false;
+ }
+ SkScalar length = 0;
+ for (int i = 0; i < count; i++) {
+ if (intervals[i] < 0) {
+ return false;
+ }
+ length += intervals[i];
+ }
+ // watch out for values that might make us go out of bounds
+ return length > 0 && SkScalarIsFinite(phase) && SkScalarIsFinite(length);
+}
diff --git a/gfx/skia/skia/src/utils/SkDashPathPriv.h b/gfx/skia/skia/src/utils/SkDashPathPriv.h
new file mode 100644
index 000000000..752b59dbe
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkDashPathPriv.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDashPathPriv_DEFINED
+#define SkDashPathPriv_DEFINED
+
+#include "SkPathEffect.h"
+
+namespace SkDashPath {
+ /**
+ * Calculates the initialDashLength, initialDashIndex, and intervalLength based on the
+ * inputed phase and intervals. If adjustedPhase is passed in, then the phase will be
+ * adjusted to be between 0 and intervalLength. The result will be stored in adjustedPhase.
+ * If adjustedPhase is nullptr then it is assumed phase is already between 0 and intervalLength
+ *
+ * Caller should have already used ValidDashPath to exclude invalid data.
+ */
+ void CalcDashParameters(SkScalar phase, const SkScalar intervals[], int32_t count,
+ SkScalar* initialDashLength, int32_t* initialDashIndex,
+ SkScalar* intervalLength, SkScalar* adjustedPhase = nullptr);
+
+ bool FilterDashPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*,
+ const SkPathEffect::DashInfo& info);
+
+ const SkScalar kMaxDashCount = 1000000;
+
+ /** See comments for InternalFilter */
+ enum class StrokeRecApplication {
+ kDisallow,
+ kAllow,
+ };
+
+ /**
+ * Caller should have already used ValidDashPath to exclude invalid data. Typically, this leaves
+ * the strokeRec unmodified. However, for some simple shapes (e.g. a line) it may directly
+ * evaluate the dash and stroke to produce a stroked output path with a fill strokeRec. Passing
+ * true for disallowStrokeRecApplication turns this behavior off.
+ */
+ bool InternalFilter(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect, const SkScalar aIntervals[],
+ int32_t count, SkScalar initialDashLength, int32_t initialDashIndex,
+ SkScalar intervalLength,
+ StrokeRecApplication = StrokeRecApplication::kAllow);
+
+ bool ValidDashPath(SkScalar phase, const SkScalar intervals[], int32_t count);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkDeferredCanvas.cpp b/gfx/skia/skia/src/utils/SkDeferredCanvas.cpp
new file mode 100644
index 000000000..75cd5dbd4
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkDeferredCanvas.cpp
@@ -0,0 +1,570 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkDeferredCanvas.h"
+#include "SkDrawable.h"
+#include "SkPath.h"
+#include "SkRRect.h"
+#include "SkSurface.h"
+#include "SkTextBlob.h"
+
+bool SkDeferredCanvas::Rec::isConcat(SkMatrix* m) const {
+ switch (fType) {
+ case kTrans_Type:
+ m->setTranslate(fData.fTranslate.x(), fData.fTranslate.y());
+ return true;
+ case kScaleTrans_Type:
+ m->setScaleTranslate(fData.fScaleTrans.fScale.x(),
+ fData.fScaleTrans.fScale.y(),
+ fData.fScaleTrans.fTrans.x(),
+ fData.fScaleTrans.fTrans.y());
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+void SkDeferredCanvas::Rec::setConcat(const SkMatrix& m) {
+ SkASSERT(m.getType() <= (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask));
+
+ if (m.getType() <= SkMatrix::kTranslate_Mask) {
+ fType = kTrans_Type;
+ fData.fTranslate.set(m.getTranslateX(), m.getTranslateY());
+ } else {
+ fType = kScaleTrans_Type;
+ fData.fScaleTrans.fScale.set(m.getScaleX(), m.getScaleY());
+ fData.fScaleTrans.fTrans.set(m.getTranslateX(), m.getTranslateY());
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkDeferredCanvas::SkDeferredCanvas(SkCanvas* canvas)
+ : INHERITED(canvas->getBaseLayerSize().width(), canvas->getBaseLayerSize().height())
+ , fCanvas(canvas)
+{}
+
+SkDeferredCanvas::~SkDeferredCanvas() {}
+
+void SkDeferredCanvas::push_save() {
+ Rec* r = fRecs.append();
+ r->fType = kSave_Type;
+}
+
+void SkDeferredCanvas::push_cliprect(const SkRect& bounds) {
+ int index = fRecs.count() - 1;
+ if (index >= 0 && fRecs[index].fType == kClipRect_Type) {
+ if (!fRecs[index].fData.fBounds.intersect(bounds)) {
+ fRecs[index].fData.fBounds.setEmpty();
+ }
+ } else {
+ Rec* r = fRecs.append();
+ r->fType = kClipRect_Type;
+ r->fData.fBounds = bounds;
+ }
+}
+
+bool SkDeferredCanvas::push_concat(const SkMatrix& mat) {
+ if (mat.getType() > (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask)) {
+ return false;
+ }
+ // At the moment, we don't know which ops can scale and which can also flip, so
+ // we reject negative scales for now
+ if (mat.getScaleX() < 0 || mat.getScaleY() < 0) {
+ return false;
+ }
+
+ int index = fRecs.count() - 1;
+ SkMatrix m;
+ if (index >= 0 && fRecs[index].isConcat(&m)) {
+ m.preConcat(mat);
+ fRecs[index].setConcat(m);
+ } else {
+ fRecs.append()->setConcat(mat);
+ }
+ return true;
+}
+
+void SkDeferredCanvas::emit(const Rec& rec) {
+ switch (rec.fType) {
+ case kSave_Type:
+ fCanvas->save();
+ this->INHERITED::willSave();
+ break;
+ case kClipRect_Type:
+ fCanvas->clipRect(rec.fData.fBounds);
+ this->INHERITED::onClipRect(rec.fData.fBounds,
+ kIntersect_Op, kHard_ClipEdgeStyle);
+ break;
+ case kTrans_Type:
+ case kScaleTrans_Type: {
+ SkMatrix mat;
+ rec.getConcat(&mat);
+ fCanvas->concat(mat);
+ this->INHERITED::didConcat(mat);
+ } break;
+ }
+}
+
+void SkDeferredCanvas::flush_le(int index) {
+ SkASSERT(index >= -1 && index < fRecs.count());
+
+ int count = index + 1;
+ for (int i = 0; i < count; ++i) {
+ this->emit(fRecs[i]);
+ }
+ fRecs.remove(0, count);
+}
+
+void SkDeferredCanvas::flush_all() {
+ this->flush_le(fRecs.count() - 1);
+}
+
+void SkDeferredCanvas::flush_before_saves() {
+ int i;
+ for (i = fRecs.count() - 1; i >= 0; --i) {
+ if (kSave_Type != fRecs[i].fType) {
+ break;
+ }
+ }
+ this->flush_le(i);
+}
+
+enum Flags {
+ kNoTranslate_Flag = 1 << 0,
+ kNoClip_Flag = 1 << 1,
+ kNoCull_Flag = 1 << 2,
+ kNoScale_Flag = 1 << 3,
+};
+
+void SkDeferredCanvas::flush_check(SkRect* bounds, const SkPaint* paint, unsigned flags) {
+ if (paint) {
+ if (paint->getShader() || paint->getImageFilter()) {
+ flags |= kNoTranslate_Flag | kNoScale_Flag;
+ }
+ // TODO: replace these with code to enlarge the bounds conservatively?
+ if (paint->getStyle() != SkPaint::kFill_Style || paint->getMaskFilter() ||
+ paint->getImageFilter() || paint->getPathEffect())
+ {
+ flags |= kNoCull_Flag | kNoScale_Flag | kNoClip_Flag;
+ }
+ if (paint->getLooper()) {
+ // to be conservative, we disable both, since embedded layers could have shaders
+ // or strokes etc.
+ flags |= kNoTranslate_Flag | kNoCull_Flag | kNoScale_Flag;
+ }
+ }
+ bool canClip = !(flags & kNoClip_Flag);
+ bool canTranslate = !(flags & kNoTranslate_Flag);
+ bool canCull = !(flags & kNoCull_Flag);
+ bool canScale = !(flags & kNoScale_Flag);
+
+ int i;
+ for (i = fRecs.count() - 1; i >= 0; --i) {
+ const Rec& rec = fRecs[i];
+ switch (rec.fType) {
+ case kSave_Type:
+ // continue to the next rec
+ break;
+ case kClipRect_Type:
+ if (!canCull) {
+ goto STOP;
+ }
+ if (canClip) {
+ if (!bounds->intersect(rec.fData.fBounds)) {
+ bounds->setEmpty();
+ return;
+ }
+ // continue to the next rec
+ } else {
+ if (!rec.fData.fBounds.contains(*bounds)) {
+ goto STOP;
+ }
+ // continue to the next rec
+ }
+ break;
+ case kTrans_Type:
+ if (canTranslate) {
+ bounds->offset(rec.fData.fTranslate.x(), rec.fData.fTranslate.y());
+ // continue to the next rec
+ } else {
+ goto STOP;
+ }
+ break;
+ case kScaleTrans_Type:
+ if (canScale) {
+ SkMatrix m;
+ rec.getConcat(&m);
+ m.mapRectScaleTranslate(bounds, *bounds);
+ } else {
+ goto STOP;
+ }
+ break;
+ }
+ }
+STOP:
+ this->flush_le(i);
+}
+
+void SkDeferredCanvas::flush_translate(SkScalar* x, SkScalar* y, const SkRect& bounds,
+ const SkPaint* paint) {
+ SkRect tmp = bounds;
+ this->flush_check(&tmp, paint, kNoClip_Flag | kNoScale_Flag);
+ *x += tmp.x() - bounds.x();
+ *y += tmp.y() - bounds.y();
+}
+
+void SkDeferredCanvas::flush_translate(SkScalar* x, SkScalar* y, const SkPaint& paint) {
+ SkRect tmp = SkRect::MakeXYWH(*x, *y, 1, 1);
+ this->flush_check(&tmp, &paint, kNoClip_Flag | kNoCull_Flag | kNoScale_Flag);
+ *x = tmp.x();
+ *y = tmp.y();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkDeferredCanvas::willSave() {
+ this->push_save();
+}
+
+SkCanvas::SaveLayerStrategy SkDeferredCanvas::getSaveLayerStrategy(const SaveLayerRec& rec) {
+ this->flush_all();
+ fCanvas->saveLayer(rec);
+ this->INHERITED::getSaveLayerStrategy(rec);
+ // No need for a layer.
+ return kNoLayer_SaveLayerStrategy;
+}
+
+void SkDeferredCanvas::willRestore() {
+ for (int i = fRecs.count() - 1; i >= 0; --i) {
+ if (kSave_Type == fRecs[i].fType) {
+ fRecs.setCount(i); // pop off everything here and later
+ return;
+ }
+ }
+ for (int i = 0; i < fRecs.count(); ++i) {
+ SkASSERT(kSave_Type != fRecs[i].fType);
+ }
+ fRecs.setCount(0);
+ fCanvas->restore();
+ this->INHERITED::willRestore();
+}
+
+void SkDeferredCanvas::didConcat(const SkMatrix& matrix) {
+ if (matrix.isIdentity()) {
+ return;
+ }
+ if (!this->push_concat(matrix)) {
+ this->flush_all();
+ fCanvas->concat(matrix);
+ this->INHERITED::didConcat(matrix);
+ }
+}
+
+void SkDeferredCanvas::didSetMatrix(const SkMatrix& matrix) {
+ this->flush_all();
+ fCanvas->setMatrix(matrix);
+ this->INHERITED::didSetMatrix(matrix);
+}
+
+void SkDeferredCanvas::onClipRect(const SkRect& rect, ClipOp op, ClipEdgeStyle edgeStyle) {
+ if (kIntersect_Op == op) {
+ this->push_cliprect(rect);
+ } else {
+ this->flush_all();
+ fCanvas->clipRect(rect, op, kSoft_ClipEdgeStyle == edgeStyle);
+ this->INHERITED::onClipRect(rect, op, edgeStyle);
+ }
+}
+
+void SkDeferredCanvas::onClipRRect(const SkRRect& rrect, ClipOp op, ClipEdgeStyle edgeStyle) {
+ this->flush_all();
+ fCanvas->clipRRect(rrect, op, kSoft_ClipEdgeStyle == edgeStyle);
+ this->INHERITED::onClipRRect(rrect, op, edgeStyle);
+}
+
+void SkDeferredCanvas::onClipPath(const SkPath& path, ClipOp op, ClipEdgeStyle edgeStyle) {
+ this->flush_all();
+ fCanvas->clipPath(path, op, kSoft_ClipEdgeStyle == edgeStyle);
+ this->INHERITED::onClipPath(path, op, edgeStyle);
+}
+
+void SkDeferredCanvas::onClipRegion(const SkRegion& deviceRgn, ClipOp op) {
+ this->flush_all();
+ fCanvas->clipRegion(deviceRgn, op);
+ this->INHERITED::onClipRegion(deviceRgn, op);
+}
+
+void SkDeferredCanvas::onDrawPaint(const SkPaint& paint) {
+ // TODO: Can we turn this into drawRect?
+ this->flush_all();
+ fCanvas->drawPaint(paint);
+}
+
+void SkDeferredCanvas::onDrawPoints(PointMode mode, size_t count, const SkPoint pts[],
+ const SkPaint& paint) {
+ this->flush_all();
+ fCanvas->drawPoints(mode, count, pts, paint);
+}
+
+void SkDeferredCanvas::onDrawRect(const SkRect& rect, const SkPaint& paint) {
+ SkRect modRect = rect;
+ this->flush_check(&modRect, &paint);
+ fCanvas->drawRect(modRect, paint);
+}
+
+void SkDeferredCanvas::onDrawRegion(const SkRegion& region, const SkPaint& paint) {
+ this->flush_all(); // can we do better?
+ fCanvas->drawRegion(region, paint);
+}
+
+void SkDeferredCanvas::onDrawOval(const SkRect& rect, const SkPaint& paint) {
+ SkRect modRect = rect;
+ this->flush_check(&modRect, &paint, kNoClip_Flag);
+ fCanvas->drawOval(modRect, paint);
+}
+
+void SkDeferredCanvas::onDrawArc(const SkRect& rect, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) {
+ SkRect modRect = rect;
+ this->flush_check(&modRect, &paint, kNoClip_Flag);
+ fCanvas->drawArc(modRect, startAngle, sweepAngle, useCenter, paint);
+}
+
+static SkRRect make_offset(const SkRRect& src, SkScalar dx, SkScalar dy) {
+ SkRRect dst = src;
+ dst.offset(dx, dy);
+ return dst;
+}
+
+void SkDeferredCanvas::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ SkRect modRect = rrect.getBounds();
+ this->flush_check(&modRect, &paint, kNoClip_Flag);
+ fCanvas->drawRRect(make_offset(rrect,
+ modRect.x() - rrect.getBounds().x(),
+ modRect.y() - rrect.getBounds().y()), paint);
+}
+
+void SkDeferredCanvas::onDrawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint& paint) {
+ this->flush_all();
+ fCanvas->drawDRRect(outer, inner, paint);
+}
+
+void SkDeferredCanvas::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ if (path.isInverseFillType()) {
+ this->flush_before_saves();
+ } else {
+ SkRect modRect = path.getBounds();
+ this->flush_check(&modRect, &paint, kNoClip_Flag | kNoTranslate_Flag | kNoScale_Flag);
+ }
+ fCanvas->drawPath(path, paint);
+}
+
+void SkDeferredCanvas::onDrawBitmap(const SkBitmap& bitmap, SkScalar x, SkScalar y,
+ const SkPaint* paint) {
+ const SkScalar w = SkIntToScalar(bitmap.width());
+ const SkScalar h = SkIntToScalar(bitmap.height());
+ SkRect bounds = SkRect::MakeXYWH(x, y, w, h);
+ this->flush_check(&bounds, paint, kNoClip_Flag);
+ if (bounds.width() == w && bounds.height() == h) {
+ fCanvas->drawBitmap(bitmap, bounds.x(), bounds.y(), paint);
+ } else {
+ fCanvas->drawBitmapRect(bitmap, bounds, paint);
+ }
+}
+
+void SkDeferredCanvas::onDrawBitmapRect(const SkBitmap& bitmap, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ SkRect modRect = dst;
+ this->flush_check(&modRect, paint, kNoClip_Flag);
+ fCanvas->legacy_drawBitmapRect(bitmap, src, modRect, paint, constraint);
+}
+
+void SkDeferredCanvas::onDrawBitmapNine(const SkBitmap& bitmap, const SkIRect& center,
+ const SkRect& dst, const SkPaint* paint) {
+ SkRect modRect = dst;
+ this->flush_check(&modRect, paint, kNoClip_Flag);
+ fCanvas->drawBitmapNine(bitmap, center, modRect, paint);
+}
+
+void SkDeferredCanvas::onDrawBitmapLattice(const SkBitmap& bitmap, const Lattice& lattice,
+ const SkRect& dst, const SkPaint* paint) {
+ SkRect modRect = dst;
+ this->flush_check(&modRect, paint, kNoClip_Flag);
+ fCanvas->drawBitmapLattice(bitmap, lattice, modRect, paint);
+}
+
+void SkDeferredCanvas::onDrawImageNine(const SkImage* image, const SkIRect& center,
+ const SkRect& dst, const SkPaint* paint) {
+ SkRect modRect = dst;
+ this->flush_check(&modRect, paint, kNoClip_Flag);
+ fCanvas->drawImageNine(image, center, modRect, paint);
+}
+
+void SkDeferredCanvas::onDrawImage(const SkImage* image, SkScalar x, SkScalar y,
+ const SkPaint* paint) {
+ const SkScalar w = SkIntToScalar(image->width());
+ const SkScalar h = SkIntToScalar(image->height());
+ SkRect bounds = SkRect::MakeXYWH(x, y, w, h);
+ this->flush_check(&bounds, paint, kNoClip_Flag);
+ if (bounds.width() == w && bounds.height() == h) {
+ fCanvas->drawImage(image, bounds.x(), bounds.y(), paint);
+ } else {
+ fCanvas->drawImageRect(image, bounds, paint);
+ }
+}
+
+void SkDeferredCanvas::onDrawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ SkRect modRect = dst;
+ this->flush_check(&modRect, paint, kNoClip_Flag);
+ fCanvas->legacy_drawImageRect(image, src, modRect, paint, constraint);
+}
+
+void SkDeferredCanvas::onDrawImageLattice(const SkImage* image, const Lattice& lattice,
+ const SkRect& dst, const SkPaint* paint) {
+ SkRect modRect = dst;
+ this->flush_check(&modRect, paint, kNoClip_Flag);
+ fCanvas->drawImageLattice(image, lattice, modRect, paint);
+}
+
+void SkDeferredCanvas::onDrawText(const void* text, size_t byteLength, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ this->flush_translate(&x, &y, paint);
+ fCanvas->drawText(text, byteLength, x, y, paint);
+}
+
+void SkDeferredCanvas::onDrawPosText(const void* text, size_t byteLength, const SkPoint pos[],
+ const SkPaint& paint) {
+ this->flush_before_saves();
+ fCanvas->drawPosText(text, byteLength, pos, paint);
+}
+
+void SkDeferredCanvas::onDrawPosTextH(const void* text, size_t byteLength, const SkScalar xpos[],
+ SkScalar constY, const SkPaint& paint) {
+ this->flush_before_saves();
+ fCanvas->drawPosTextH(text, byteLength, xpos, constY, paint);
+}
+
+void SkDeferredCanvas::onDrawTextOnPath(const void* text, size_t byteLength, const SkPath& path,
+ const SkMatrix* matrix, const SkPaint& paint) {
+ this->flush_before_saves();
+ fCanvas->drawTextOnPath(text, byteLength, path, matrix, paint);
+}
+
+void SkDeferredCanvas::onDrawTextRSXform(const void* text, size_t byteLength,
+ const SkRSXform xform[], const SkRect* cullRect,
+ const SkPaint& paint) {
+ if (cullRect) {
+ SkRect modRect = *cullRect;
+ // only allow culling
+ this->flush_check(&modRect, &paint, kNoClip_Flag | kNoScale_Flag | kNoTranslate_Flag);
+ } else {
+ this->flush_before_saves();
+ }
+ fCanvas->drawTextRSXform(text, byteLength, xform, cullRect, paint);
+}
+
+void SkDeferredCanvas::onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint &paint) {
+ this->flush_translate(&x, &y, blob->bounds(), &paint);
+ fCanvas->drawTextBlob(blob, x, y, paint);
+}
+
+#include "SkPicture.h"
+#include "SkCanvasPriv.h"
+void SkDeferredCanvas::onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint) {
+#if 1
+ SkAutoCanvasMatrixPaint acmp(this, matrix, paint, picture->cullRect());
+ picture->playback(this);
+#else
+ this->flush_before_saves();
+ fCanvas->drawPicture(picture, matrix, paint);
+#endif
+}
+
+void SkDeferredCanvas::onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix) {
+ // TODO: investigate culling and applying concat to the matrix
+#if 1
+ drawable->draw(this, matrix);
+#else
+ this->flush_before_saves();
+ fCanvas->drawDrawable(drawable, matrix);
+#endif
+}
+
+void SkDeferredCanvas::onDrawAtlas(const SkImage* image, const SkRSXform xform[],
+ const SkRect rects[], const SkColor colors[],
+ int count, SkXfermode::Mode mode,
+ const SkRect* cull, const SkPaint* paint) {
+ this->flush_before_saves();
+ fCanvas->drawAtlas(image, xform, rects, colors, count, mode, cull, paint);
+}
+
+void SkDeferredCanvas::onDrawVertices(VertexMode vmode, int vertexCount,
+ const SkPoint vertices[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint) {
+ this->flush_before_saves();
+ fCanvas->drawVertices(vmode, vertexCount, vertices, texs, colors, xmode,
+ indices, indexCount, paint);
+}
+
+void SkDeferredCanvas::onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkXfermode* xmode,
+ const SkPaint& paint) {
+ this->flush_before_saves();
+ fCanvas->drawPatch(cubics, colors, texCoords, xmode, paint);
+}
+
+void SkDeferredCanvas::onDrawAnnotation(const SkRect& rect, const char key[], SkData* data) {
+ fCanvas->drawAnnotation(rect, key, data);
+}
+
+#ifdef SK_SUPPORT_LEGACY_DRAWFILTER
+SkDrawFilter* SkDeferredCanvas::setDrawFilter(SkDrawFilter* filter) {
+ fCanvas->setDrawFilter(filter);
+ return this->INHERITED::setDrawFilter(filter);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkSurface> SkDeferredCanvas::onNewSurface(const SkImageInfo& info,
+ const SkSurfaceProps& props) {
+ return fCanvas->makeSurface(info, &props);
+}
+SkISize SkDeferredCanvas::getBaseLayerSize() const { return fCanvas->getBaseLayerSize(); }
+bool SkDeferredCanvas::getClipBounds(SkRect* bounds) const {
+ return fCanvas->getClipBounds(bounds);
+}
+bool SkDeferredCanvas::getClipDeviceBounds(SkIRect* bounds) const {
+ return fCanvas->getClipDeviceBounds(bounds);
+}
+bool SkDeferredCanvas::isClipEmpty() const { return fCanvas->isClipEmpty(); }
+bool SkDeferredCanvas::isClipRect() const { return fCanvas->isClipRect(); }
+bool SkDeferredCanvas::onPeekPixels(SkPixmap* pixmap) { return fCanvas->peekPixels(pixmap); }
+bool SkDeferredCanvas::onAccessTopLayerPixels(SkPixmap* pixmap) {
+ SkImageInfo info;
+ size_t rowBytes;
+ SkIPoint* origin = nullptr;
+ void* addr = fCanvas->accessTopLayerPixels(&info, &rowBytes, origin);
+ if (addr) {
+ *pixmap = SkPixmap(info, addr, rowBytes);
+ return true;
+ }
+ return false;
+}
+SkImageInfo SkDeferredCanvas::onImageInfo() const { return fCanvas->imageInfo(); }
+bool SkDeferredCanvas::onGetProps(SkSurfaceProps* props) const { return fCanvas->getProps(props); }
+void SkDeferredCanvas::onFlush() {
+ this->flush_all();
+ return fCanvas->flush();
+}
diff --git a/gfx/skia/skia/src/utils/SkDeferredCanvas.h b/gfx/skia/skia/src/utils/SkDeferredCanvas.h
new file mode 100644
index 000000000..312c22d26
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkDeferredCanvas.h
@@ -0,0 +1,156 @@
+
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDeferredCanvas_DEFINED
+#define SkDeferredCanvas_DEFINED
+
+#include "../private/SkTDArray.h"
+#include "SkCanvas.h"
+
+class SK_API SkDeferredCanvas : public SkCanvas {
+public:
+ SkDeferredCanvas(SkCanvas*);
+ ~SkDeferredCanvas() override;
+
+#ifdef SK_SUPPORT_LEGACY_DRAWFILTER
+ SkDrawFilter* setDrawFilter(SkDrawFilter*) override;
+#endif
+
+protected:
+ sk_sp<SkSurface> onNewSurface(const SkImageInfo&, const SkSurfaceProps&) override;
+ SkISize getBaseLayerSize() const override;
+ bool getClipBounds(SkRect* bounds) const override;
+ bool getClipDeviceBounds(SkIRect* bounds) const override;
+ bool isClipEmpty() const override;
+ bool isClipRect() const override;
+ bool onPeekPixels(SkPixmap*) override;
+ bool onAccessTopLayerPixels(SkPixmap*) override;
+ SkImageInfo onImageInfo() const override;
+ bool onGetProps(SkSurfaceProps*) const override;
+ void onFlush() override;
+// SkCanvas* canvasForDrawIter() override;
+
+ void willSave() override;
+ SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec&) override;
+ void willRestore() override;
+
+ void didConcat(const SkMatrix&) override;
+ void didSetMatrix(const SkMatrix&) override;
+
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override;
+ virtual void onDrawText(const void* text, size_t byteLength, SkScalar x, SkScalar y,
+ const SkPaint&) override;
+ virtual void onDrawPosText(const void* text, size_t byteLength, const SkPoint pos[],
+ const SkPaint&) override;
+ virtual void onDrawPosTextH(const void* text, size_t byteLength, const SkScalar xpos[],
+ SkScalar constY, const SkPaint&) override;
+ virtual void onDrawTextOnPath(const void* text, size_t byteLength, const SkPath& path,
+ const SkMatrix* matrix, const SkPaint&) override;
+ void onDrawTextRSXform(const void* text, size_t byteLength, const SkRSXform[],
+ const SkRect* cullRect, const SkPaint&) override;
+ virtual void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) override;
+ virtual void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkXfermode* xmode,
+ const SkPaint& paint) override;
+
+ void onDrawPaint(const SkPaint&) override;
+ void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&) override;
+ void onDrawRect(const SkRect&, const SkPaint&) override;
+ void onDrawRegion(const SkRegion& region, const SkPaint& paint) override;
+ void onDrawOval(const SkRect&, const SkPaint&) override;
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override;
+ void onDrawRRect(const SkRRect&, const SkPaint&) override;
+ void onDrawPath(const SkPath&, const SkPaint&) override;
+
+ void onDrawBitmap(const SkBitmap&, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawBitmapLattice(const SkBitmap&, const Lattice& lattice, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawBitmapNine(const SkBitmap&, const SkIRect& center, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawBitmapRect(const SkBitmap&, const SkRect* src, const SkRect& dst, const SkPaint*,
+ SrcRectConstraint) override;
+
+ void onDrawImage(const SkImage*, SkScalar left, SkScalar top, const SkPaint*) override;
+ void onDrawImageLattice(const SkImage*, const Lattice& lattice, const SkRect& dst,
+ const SkPaint*) override;
+ void onDrawImageNine(const SkImage* image, const SkIRect& center,
+ const SkRect& dst, const SkPaint*) override;
+ void onDrawImageRect(const SkImage*, const SkRect* src, const SkRect& dst,
+ const SkPaint*, SrcRectConstraint) override;
+
+ void onDrawVertices(VertexMode vmode, int vertexCount,
+ const SkPoint vertices[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint&) override;
+ void onDrawAtlas(const SkImage* image, const SkRSXform xform[],
+ const SkRect rects[], const SkColor colors[],
+ int count, SkXfermode::Mode mode,
+ const SkRect* cull, const SkPaint* paint) override;
+
+ void onClipRect(const SkRect&, ClipOp, ClipEdgeStyle) override;
+ void onClipRRect(const SkRRect&, ClipOp, ClipEdgeStyle) override;
+ void onClipPath(const SkPath&, ClipOp, ClipEdgeStyle) override;
+ void onClipRegion(const SkRegion&, ClipOp) override;
+
+ void onDrawDrawable(SkDrawable*, const SkMatrix*) override;
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override;
+ void onDrawAnnotation(const SkRect&, const char[], SkData*) override;
+
+ class Iter;
+
+private:
+ SkCanvas* fCanvas;
+
+ enum Type {
+ kSave_Type,
+ kClipRect_Type,
+ kTrans_Type,
+ kScaleTrans_Type,
+ };
+ struct Rec {
+ Type fType;
+ union {
+ SkRect fBounds;
+ SkVector fTranslate;
+ struct {
+ SkVector fScale;
+ SkVector fTrans; // post translate
+ } fScaleTrans;
+ } fData;
+
+ bool isConcat(SkMatrix*) const;
+ void getConcat(SkMatrix* mat) const {
+ SkDEBUGCODE(bool isconcat = ) this->isConcat(mat);
+ SkASSERT(isconcat);
+ }
+ void setConcat(const SkMatrix&);
+ };
+ SkTDArray<Rec> fRecs;
+
+ void push_save();
+ void push_cliprect(const SkRect&);
+ bool push_concat(const SkMatrix&);
+
+ void emit(const Rec& rec);
+
+ void flush_all();
+ void flush_before_saves();
+ void flush_le(int index);
+ void flush_translate(SkScalar* x, SkScalar* y, const SkPaint&);
+ void flush_translate(SkScalar* x, SkScalar* y, const SkRect& bounds, const SkPaint* = nullptr);
+ void flush_check(SkRect* bounds, const SkPaint*, unsigned flags = 0);
+
+ void internal_flush_translate(SkScalar* x, SkScalar* y, const SkRect* boundsOrNull);
+
+ typedef SkCanvas INHERITED;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkDumpCanvas.cpp b/gfx/skia/skia/src/utils/SkDumpCanvas.cpp
new file mode 100644
index 000000000..eda23b7eb
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkDumpCanvas.cpp
@@ -0,0 +1,571 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkData.h"
+#include "SkDumpCanvas.h"
+#include "SkPatchUtils.h"
+#include "SkPicture.h"
+#include "SkPixelRef.h"
+#include "SkRRect.h"
+#include "SkString.h"
+#include "SkTextBlob.h"
+#include <stdarg.h>
+#include <stdio.h>
+
+// needed just to know that these are all subclassed from SkFlattenable
+#include "SkShader.h"
+#include "SkPathEffect.h"
+#include "SkXfermode.h"
+#include "SkColorFilter.h"
+#include "SkPathEffect.h"
+#include "SkMaskFilter.h"
+
+static void toString(const SkRect& r, SkString* str) {
+ str->appendf("[%g,%g %g:%g]",
+ SkScalarToFloat(r.fLeft), SkScalarToFloat(r.fTop),
+ SkScalarToFloat(r.width()), SkScalarToFloat(r.height()));
+}
+
+static void toString(const SkIRect& r, SkString* str) {
+ str->appendf("[%d,%d %d:%d]", r.fLeft, r.fTop, r.width(), r.height());
+}
+
+static void toString(const SkRRect& rrect, SkString* str) {
+ SkRect r = rrect.getBounds();
+ str->appendf("[%g,%g %g:%g]",
+ SkScalarToFloat(r.fLeft), SkScalarToFloat(r.fTop),
+ SkScalarToFloat(r.width()), SkScalarToFloat(r.height()));
+ if (rrect.isOval()) {
+ str->append("()");
+ } else if (rrect.isSimple()) {
+ const SkVector& rad = rrect.getSimpleRadii();
+ str->appendf("(%g,%g)", rad.x(), rad.y());
+ } else if (rrect.isComplex()) {
+ SkVector radii[4] = {
+ rrect.radii(SkRRect::kUpperLeft_Corner),
+ rrect.radii(SkRRect::kUpperRight_Corner),
+ rrect.radii(SkRRect::kLowerRight_Corner),
+ rrect.radii(SkRRect::kLowerLeft_Corner),
+ };
+ str->appendf("(%g,%g %g,%g %g,%g %g,%g)",
+ radii[0].x(), radii[0].y(),
+ radii[1].x(), radii[1].y(),
+ radii[2].x(), radii[2].y(),
+ radii[3].x(), radii[3].y());
+ }
+}
+
+static void dumpVerbs(const SkPath& path, SkString* str) {
+ SkPath::Iter iter(path, false);
+ SkPoint pts[4];
+ for (;;) {
+ switch (iter.next(pts, false)) {
+ case SkPath::kMove_Verb:
+ str->appendf(" M%g,%g", pts[0].fX, pts[0].fY);
+ break;
+ case SkPath::kLine_Verb:
+ str->appendf(" L%g,%g", pts[0].fX, pts[0].fY);
+ break;
+ case SkPath::kQuad_Verb:
+ str->appendf(" Q%g,%g,%g,%g", pts[1].fX, pts[1].fY,
+ pts[2].fX, pts[2].fY);
+ break;
+ case SkPath::kCubic_Verb:
+ str->appendf(" C%g,%g,%g,%g,%g,%g", pts[1].fX, pts[1].fY,
+ pts[2].fX, pts[2].fY, pts[3].fX, pts[3].fY);
+ break;
+ case SkPath::kClose_Verb:
+ str->append("X");
+ break;
+ case SkPath::kDone_Verb:
+ return;
+ case SkPath::kConic_Verb:
+ SkASSERT(0);
+ break;
+ }
+ }
+}
+
+static void toString(const SkPath& path, SkString* str) {
+ if (path.isEmpty()) {
+ str->append("path:empty");
+ } else {
+ toString(path.getBounds(), str);
+#if 1
+ SkString s;
+ dumpVerbs(path, &s);
+ str->append(s.c_str());
+#endif
+ str->append("]");
+ str->prepend("path:[");
+ }
+}
+
+static const char* toString(SkCanvas::ClipOp op) {
+ static const char* gOpNames[] = {
+ "DIFF", "SECT", "UNION", "XOR", "RDIFF", "REPLACE"
+ };
+ return gOpNames[op];
+}
+
+static void toString(const SkRegion& rgn, SkString* str) {
+ str->append("Region:[");
+ toString(rgn.getBounds(), str);
+ str->append("]");
+ if (rgn.isComplex()) {
+ str->append(".complex");
+ }
+}
+
+static const char* toString(SkCanvas::VertexMode vm) {
+ static const char* gVMNames[] = {
+ "TRIANGLES", "STRIP", "FAN"
+ };
+ return gVMNames[vm];
+}
+
+static const char* toString(SkCanvas::PointMode pm) {
+ static const char* gPMNames[] = {
+ "POINTS", "LINES", "POLYGON"
+ };
+ return gPMNames[pm];
+}
+
+static void toString(const void* text, size_t byteLen, SkPaint::TextEncoding enc,
+ SkString* str) {
+ // FIXME: this code appears to be untested - and probably unused - and probably wrong
+ switch (enc) {
+ case SkPaint::kUTF8_TextEncoding:
+ str->appendf("\"%.*s\"%s", (int)SkTMax<size_t>(byteLen, 32), (const char*) text,
+ byteLen > 32 ? "..." : "");
+ break;
+ case SkPaint::kUTF16_TextEncoding:
+ str->appendf("\"%.*ls\"%s", (int)SkTMax<size_t>(byteLen, 32), (const wchar_t*) text,
+ byteLen > 64 ? "..." : "");
+ break;
+ case SkPaint::kUTF32_TextEncoding:
+ str->appendf("\"%.*ls\"%s", (int)SkTMax<size_t>(byteLen, 32), (const wchar_t*) text,
+ byteLen > 128 ? "..." : "");
+ break;
+ case SkPaint::kGlyphID_TextEncoding:
+ str->append("<glyphs>");
+ break;
+
+ default:
+ SkASSERT(false);
+ break;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define WIDE_OPEN 16384
+
+SkDumpCanvas::SkDumpCanvas(Dumper* dumper) : INHERITED(WIDE_OPEN, WIDE_OPEN) {
+ fNestLevel = 0;
+ SkSafeRef(dumper);
+ fDumper = dumper;
+}
+
+SkDumpCanvas::~SkDumpCanvas() {
+ SkSafeUnref(fDumper);
+}
+
+void SkDumpCanvas::dump(Verb verb, const SkPaint* paint,
+ const char format[], ...) {
+ static const size_t BUFFER_SIZE = 1024;
+
+ char buffer[BUFFER_SIZE];
+ va_list args;
+ va_start(args, format);
+ vsnprintf(buffer, BUFFER_SIZE, format, args);
+ va_end(args);
+
+ if (fDumper) {
+ fDumper->dump(this, verb, buffer, paint);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkDumpCanvas::willSave() {
+ this->dump(kSave_Verb, nullptr, "save()");
+ this->INHERITED::willSave();
+}
+
+SkCanvas::SaveLayerStrategy SkDumpCanvas::getSaveLayerStrategy(const SaveLayerRec& rec) {
+ SkString str;
+ str.printf("saveLayer(0x%X)", rec.fSaveLayerFlags);
+ if (rec.fBounds) {
+ str.append(" bounds");
+ toString(*rec.fBounds, &str);
+ }
+ const SkPaint* paint = rec.fPaint;
+ if (paint) {
+ if (paint->getAlpha() != 0xFF) {
+ str.appendf(" alpha:0x%02X", paint->getAlpha());
+ }
+ if (!paint->isSrcOver()) {
+ str.appendf(" blendmode:%d", (int)paint->getBlendMode());
+ }
+ }
+ this->dump(kSave_Verb, paint, str.c_str());
+ return this->INHERITED::getSaveLayerStrategy(rec);
+}
+
+void SkDumpCanvas::willRestore() {
+ this->dump(kRestore_Verb, nullptr, "restore");
+ this->INHERITED::willRestore();
+}
+
+void SkDumpCanvas::didConcat(const SkMatrix& matrix) {
+ SkString str;
+
+ switch (matrix.getType()) {
+ case SkMatrix::kTranslate_Mask:
+ this->dump(kMatrix_Verb, nullptr, "translate(%g %g)",
+ SkScalarToFloat(matrix.getTranslateX()),
+ SkScalarToFloat(matrix.getTranslateY()));
+ break;
+ case SkMatrix::kScale_Mask:
+ this->dump(kMatrix_Verb, nullptr, "scale(%g %g)",
+ SkScalarToFloat(matrix.getScaleX()),
+ SkScalarToFloat(matrix.getScaleY()));
+ break;
+ default:
+ matrix.toString(&str);
+ this->dump(kMatrix_Verb, nullptr, "concat(%s)", str.c_str());
+ break;
+ }
+
+ this->INHERITED::didConcat(matrix);
+}
+
+void SkDumpCanvas::didSetMatrix(const SkMatrix& matrix) {
+ SkString str;
+ matrix.toString(&str);
+ this->dump(kMatrix_Verb, nullptr, "setMatrix(%s)", str.c_str());
+ this->INHERITED::didSetMatrix(matrix);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+const char* SkDumpCanvas::EdgeStyleToAAString(ClipEdgeStyle edgeStyle) {
+ return kSoft_ClipEdgeStyle == edgeStyle ? "AA" : "BW";
+}
+
+void SkDumpCanvas::onClipRect(const SkRect& rect, ClipOp op, ClipEdgeStyle edgeStyle) {
+ SkString str;
+ toString(rect, &str);
+ this->dump(kClip_Verb, nullptr, "clipRect(%s %s %s)", str.c_str(), toString(op),
+ EdgeStyleToAAString(edgeStyle));
+ this->INHERITED::onClipRect(rect, op, edgeStyle);
+}
+
+void SkDumpCanvas::onClipRRect(const SkRRect& rrect, ClipOp op, ClipEdgeStyle edgeStyle) {
+ SkString str;
+ toString(rrect, &str);
+ this->dump(kClip_Verb, nullptr, "clipRRect(%s %s %s)", str.c_str(), toString(op),
+ EdgeStyleToAAString(edgeStyle));
+ this->INHERITED::onClipRRect(rrect, op, edgeStyle);
+}
+
+void SkDumpCanvas::onClipPath(const SkPath& path, ClipOp op, ClipEdgeStyle edgeStyle) {
+ SkString str;
+ toString(path, &str);
+ this->dump(kClip_Verb, nullptr, "clipPath(%s %s %s)", str.c_str(), toString(op),
+ EdgeStyleToAAString(edgeStyle));
+ this->INHERITED::onClipPath(path, op, edgeStyle);
+}
+
+void SkDumpCanvas::onClipRegion(const SkRegion& deviceRgn, ClipOp op) {
+ SkString str;
+ toString(deviceRgn, &str);
+ this->dump(kClip_Verb, nullptr, "clipRegion(%s %s)", str.c_str(),
+ toString(op));
+ this->INHERITED::onClipRegion(deviceRgn, op);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkDumpCanvas::onDrawPaint(const SkPaint& paint) {
+ this->dump(kDrawPaint_Verb, &paint, "drawPaint()");
+}
+
+void SkDumpCanvas::onDrawPoints(PointMode mode, size_t count,
+ const SkPoint pts[], const SkPaint& paint) {
+ this->dump(kDrawPoints_Verb, &paint, "drawPoints(%s, %d)", toString(mode),
+ count);
+}
+
+void SkDumpCanvas::onDrawOval(const SkRect& rect, const SkPaint& paint) {
+ SkString str;
+ toString(rect, &str);
+ this->dump(kDrawOval_Verb, &paint, "drawOval(%s)", str.c_str());
+}
+
+void SkDumpCanvas::onDrawArc(const SkRect& rect, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) {
+ SkString str;
+ toString(rect, &str);
+ this->dump(kDrawArc_Verb, &paint, "drawArc(%s, %g, %g, %d)", str.c_str(), startAngle,
+ sweepAngle, useCenter);
+}
+
+void SkDumpCanvas::onDrawRect(const SkRect& rect, const SkPaint& paint) {
+ SkString str;
+ toString(rect, &str);
+ this->dump(kDrawRect_Verb, &paint, "drawRect(%s)", str.c_str());
+}
+
+void SkDumpCanvas::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ SkString str;
+ toString(rrect, &str);
+ this->dump(kDrawDRRect_Verb, &paint, "drawRRect(%s)", str.c_str());
+}
+
+void SkDumpCanvas::onDrawDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkPaint& paint) {
+ SkString str0, str1;
+ toString(outer, &str0);
+ toString(inner, &str0);
+ this->dump(kDrawRRect_Verb, &paint, "drawDRRect(%s,%s)",
+ str0.c_str(), str1.c_str());
+}
+
+void SkDumpCanvas::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ SkString str;
+ toString(path, &str);
+ this->dump(kDrawPath_Verb, &paint, "drawPath(%s)", str.c_str());
+}
+
+void SkDumpCanvas::onDrawBitmap(const SkBitmap& bitmap, SkScalar x, SkScalar y,
+ const SkPaint* paint) {
+ SkString str;
+ bitmap.toString(&str);
+ this->dump(kDrawBitmap_Verb, paint, "drawBitmap(%s %g %g)", str.c_str(),
+ SkScalarToFloat(x), SkScalarToFloat(y));
+}
+
+void SkDumpCanvas::onDrawBitmapRect(const SkBitmap& bitmap, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint) {
+ SkString bs, rs;
+ bitmap.toString(&bs);
+ toString(dst, &rs);
+ // show the src-rect only if its not everything
+ if (src && (src->fLeft > 0 || src->fTop > 0 ||
+ src->fRight < SkIntToScalar(bitmap.width()) ||
+ src->fBottom < SkIntToScalar(bitmap.height()))) {
+ SkString ss;
+ toString(*src, &ss);
+ rs.prependf("%s ", ss.c_str());
+ }
+
+ this->dump(kDrawBitmap_Verb, paint, "drawBitmapRect(%s %s)", bs.c_str(), rs.c_str());
+}
+
+void SkDumpCanvas::onDrawBitmapNine(const SkBitmap& bitmap, const SkIRect& center,
+ const SkRect& dst, const SkPaint* paint) {
+ SkString str, centerStr, dstStr;
+ bitmap.toString(&str);
+ toString(center, &centerStr);
+ toString(dst, &dstStr);
+ this->dump(kDrawBitmap_Verb, paint, "drawBitmapNine(%s %s %s)", str.c_str(),
+ centerStr.c_str(), dstStr.c_str());
+}
+
+void SkDumpCanvas::onDrawImage(const SkImage* image, SkScalar x, SkScalar y, const SkPaint* paint) {
+ SkString str;
+ image->toString(&str);
+ this->dump(kDrawBitmap_Verb, paint, "drawImage(%s %g %g)", str.c_str(),
+ SkScalarToFloat(x), SkScalarToFloat(y));
+}
+
+void SkDumpCanvas::onDrawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint) {
+ SkString bs, rs;
+ image->toString(&bs);
+ toString(dst, &rs);
+ // show the src-rect only if its not everything
+ if (src && (src->fLeft > 0 || src->fTop > 0 ||
+ src->fRight < SkIntToScalar(image->width()) ||
+ src->fBottom < SkIntToScalar(image->height()))) {
+ SkString ss;
+ toString(*src, &ss);
+ rs.prependf("%s ", ss.c_str());
+ }
+
+ this->dump(kDrawBitmap_Verb, paint, "drawImageRectToRect(%s %s)",
+ bs.c_str(), rs.c_str());
+}
+
+void SkDumpCanvas::onDrawText(const void* text, size_t byteLength, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ SkString str;
+ toString(text, byteLength, paint.getTextEncoding(), &str);
+ this->dump(kDrawText_Verb, &paint, "drawText(%s [%d] %g %g)", str.c_str(),
+ byteLength, SkScalarToFloat(x), SkScalarToFloat(y));
+}
+
+void SkDumpCanvas::onDrawPosText(const void* text, size_t byteLength, const SkPoint pos[],
+ const SkPaint& paint) {
+ SkString str;
+ toString(text, byteLength, paint.getTextEncoding(), &str);
+ this->dump(kDrawText_Verb, &paint, "drawPosText(%s [%d] %g %g ...)",
+ str.c_str(), byteLength, SkScalarToFloat(pos[0].fX),
+ SkScalarToFloat(pos[0].fY));
+}
+
+void SkDumpCanvas::onDrawPosTextH(const void* text, size_t byteLength, const SkScalar xpos[],
+ SkScalar constY, const SkPaint& paint) {
+ SkString str;
+ toString(text, byteLength, paint.getTextEncoding(), &str);
+ this->dump(kDrawText_Verb, &paint, "drawPosTextH(%s [%d] %g %g ...)",
+ str.c_str(), byteLength, SkScalarToFloat(xpos[0]),
+ SkScalarToFloat(constY));
+}
+
+void SkDumpCanvas::onDrawTextOnPath(const void* text, size_t byteLength, const SkPath& path,
+ const SkMatrix* matrix, const SkPaint& paint) {
+ SkString str;
+ toString(text, byteLength, paint.getTextEncoding(), &str);
+ this->dump(kDrawText_Verb, &paint, "drawTextOnPath(%s [%d])",
+ str.c_str(), byteLength);
+}
+
+void SkDumpCanvas::onDrawTextRSXform(const void* text, size_t byteLength, const SkRSXform xform[],
+ const SkRect* cull, const SkPaint& paint) {
+ SkString str;
+ toString(text, byteLength, paint.getTextEncoding(), &str);
+ this->dump(kDrawText_Verb, &paint, "drawTextRSXform(%s [%d])",
+ str.c_str(), byteLength);
+}
+
+void SkDumpCanvas::onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ SkString str;
+ toString(blob->bounds(), &str);
+ this->dump(kDrawText_Verb, &paint, "drawTextBlob(%p) [%s]", blob, str.c_str());
+ // FIXME: dump the actual blob content?
+}
+
+void SkDumpCanvas::onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint) {
+ this->dump(kDrawPicture_Verb, nullptr, "drawPicture(%p) %f:%f:%f:%f", picture,
+ picture->cullRect().fLeft, picture->cullRect().fTop,
+ picture->cullRect().fRight, picture->cullRect().fBottom);
+ fNestLevel += 1;
+ this->INHERITED::onDrawPicture(picture, matrix, paint);
+ fNestLevel -= 1;
+ this->dump(kDrawPicture_Verb, nullptr, "endPicture(%p) %f:%f:%f:%f", &picture,
+ picture->cullRect().fLeft, picture->cullRect().fTop,
+ picture->cullRect().fRight, picture->cullRect().fBottom);
+}
+
+void SkDumpCanvas::onDrawVertices(VertexMode vmode, int vertexCount,
+ const SkPoint vertices[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint) {
+ this->dump(kDrawVertices_Verb, &paint, "drawVertices(%s [%d] %g %g ...)",
+ toString(vmode), vertexCount, SkScalarToFloat(vertices[0].fX),
+ SkScalarToFloat(vertices[0].fY));
+}
+
+void SkDumpCanvas::onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkXfermode* xmode,
+ const SkPaint& paint) {
+ //dumps corner points and colors in clockwise order starting on upper-left corner
+ this->dump(kDrawPatch_Verb, &paint, "drawPatch(Vertices{[%f, %f], [%f, %f], [%f, %f], [%f, %f]}\
+ | Colors{[0x%x], [0x%x], [0x%x], [0x%x]} | TexCoords{[%f,%f], [%f,%f], [%f,%f], \
+ [%f,%f]})",
+ cubics[SkPatchUtils::kTopP0_CubicCtrlPts].fX,
+ cubics[SkPatchUtils::kTopP0_CubicCtrlPts].fY,
+ cubics[SkPatchUtils::kTopP3_CubicCtrlPts].fX,
+ cubics[SkPatchUtils::kTopP3_CubicCtrlPts].fY,
+ cubics[SkPatchUtils::kBottomP3_CubicCtrlPts].fX,
+ cubics[SkPatchUtils::kBottomP3_CubicCtrlPts].fY,
+ cubics[SkPatchUtils::kBottomP0_CubicCtrlPts].fX,
+ cubics[SkPatchUtils::kBottomP0_CubicCtrlPts].fY,
+ colors[0], colors[1], colors[2], colors[3],
+ texCoords[0].x(), texCoords[0].y(), texCoords[1].x(), texCoords[1].y(),
+ texCoords[2].x(), texCoords[2].y(), texCoords[3].x(), texCoords[3].y());
+}
+
+void SkDumpCanvas::onDrawAnnotation(const SkRect& rect, const char key[], SkData* value) {
+ SkString str;
+ toString(rect, &str);
+ this->dump(kDrawAnnotation_Verb, nullptr, "drawAnnotation(%s \"%s\" (%zu))",
+ str.c_str(), key, value ? value->size() : 0);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+SkFormatDumper::SkFormatDumper(void (*proc)(const char*, void*), void* refcon) {
+ fProc = proc;
+ fRefcon = refcon;
+}
+
+static void appendPtr(SkString* str, const void* ptr, const char name[]) {
+ if (ptr) {
+ str->appendf(" %s:%p", name, ptr);
+ }
+}
+
+static void appendFlattenable(SkString* str, const SkFlattenable* ptr,
+ const char name[]) {
+ if (ptr) {
+ str->appendf(" %s:%p", name, ptr);
+ }
+}
+
+void SkFormatDumper::dump(SkDumpCanvas* canvas, SkDumpCanvas::Verb verb,
+ const char str[], const SkPaint* p) {
+ SkString msg, tab;
+ const int level = canvas->getNestLevel() + canvas->getSaveCount() - 1;
+ SkASSERT(level >= 0);
+ for (int i = 0; i < level; i++) {
+#if 0
+ tab.append("\t");
+#else
+ tab.append(" "); // tabs are often too wide to be useful
+#endif
+ }
+ msg.printf("%s%s", tab.c_str(), str);
+
+ if (p) {
+ msg.appendf(" color:0x%08X flags:%X", p->getColor(), p->getFlags());
+ if (!p->isSrcOver()) {
+ msg.appendf(" blendmode:%d", p->getBlendMode());
+ }
+ appendFlattenable(&msg, p->getShader(), "shader");
+ appendFlattenable(&msg, p->getPathEffect(), "pathEffect");
+ appendFlattenable(&msg, p->getMaskFilter(), "maskFilter");
+ appendFlattenable(&msg, p->getPathEffect(), "pathEffect");
+ appendFlattenable(&msg, p->getColorFilter(), "filter");
+
+ if (SkDumpCanvas::kDrawText_Verb == verb) {
+ msg.appendf(" textSize:%g", SkScalarToFloat(p->getTextSize()));
+ appendPtr(&msg, p->getTypeface(), "typeface");
+ }
+
+ if (p->getStyle() != SkPaint::kFill_Style) {
+ msg.appendf(" strokeWidth:%g", SkScalarToFloat(p->getStrokeWidth()));
+ }
+ }
+
+ fProc(msg.c_str(), fRefcon);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void dumpToDebugf(const char text[], void*) {
+ SkDebugf("%s\n", text);
+}
+
+SkDebugfDumper::SkDebugfDumper() : INHERITED(dumpToDebugf, nullptr) {}
diff --git a/gfx/skia/skia/src/utils/SkEventTracer.cpp b/gfx/skia/skia/src/utils/SkEventTracer.cpp
new file mode 100644
index 000000000..0a748d146
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkEventTracer.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAtomics.h"
+#include "SkEventTracer.h"
+#include "SkOnce.h"
+
+#include <stdlib.h>
+
+class SkDefaultEventTracer : public SkEventTracer {
+ SkEventTracer::Handle
+ addTraceEvent(char phase,
+ const uint8_t* categoryEnabledFlag,
+ const char* name,
+ uint64_t id,
+ int numArgs,
+ const char** argNames,
+ const uint8_t* argTypes,
+ const uint64_t* argValues,
+ uint8_t flags) override { return 0; }
+
+ void
+ updateTraceEventDuration(const uint8_t* categoryEnabledFlag,
+ const char* name,
+ SkEventTracer::Handle handle) override {}
+
+ const uint8_t* getCategoryGroupEnabled(const char* name) override {
+ static uint8_t no = 0;
+ return &no;
+ }
+ const char* getCategoryGroupName(
+ const uint8_t* categoryEnabledFlag) override {
+ static const char* dummy = "dummy";
+ return dummy;
+ }
+};
+
+// We prefer gUserTracer if it's been set, otherwise we fall back on a default tracer;
+static SkEventTracer* gUserTracer = nullptr;
+
+void SkEventTracer::SetInstance(SkEventTracer* tracer) {
+ SkASSERT(nullptr == sk_atomic_load(&gUserTracer, sk_memory_order_acquire));
+ sk_atomic_store(&gUserTracer, tracer, sk_memory_order_release);
+ // An atomic load during process shutdown is probably overkill, but safe overkill.
+ atexit([]() { delete sk_atomic_load(&gUserTracer, sk_memory_order_acquire); });
+}
+
+SkEventTracer* SkEventTracer::GetInstance() {
+ if (SkEventTracer* tracer = sk_atomic_load(&gUserTracer, sk_memory_order_acquire)) {
+ return tracer;
+ }
+ static SkOnce once;
+ static SkDefaultEventTracer* defaultTracer;
+ once([] { defaultTracer = new SkDefaultEventTracer; });
+ return defaultTracer;
+}
diff --git a/gfx/skia/skia/src/utils/SkFloatUtils.h b/gfx/skia/skia/src/utils/SkFloatUtils.h
new file mode 100644
index 000000000..101aac747
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkFloatUtils.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFloatUtils_DEFINED
+#define SkFloatUtils_DEFINED
+
+#include "SkTypes.h"
+#include <limits.h>
+#include <float.h>
+
+template <size_t size>
+class SkTypeWithSize {
+public:
+ // Prevents using SkTypeWithSize<N> with non-specialized N.
+ typedef void UInt;
+};
+
+template <>
+class SkTypeWithSize<32> {
+public:
+ typedef uint32_t UInt;
+};
+
+template <>
+class SkTypeWithSize<64> {
+public:
+ typedef uint64_t UInt;
+};
+
+template <typename RawType>
+struct SkNumericLimits {
+ static const int digits = 0;
+};
+
+template <>
+struct SkNumericLimits<double> {
+ static const int digits = DBL_MANT_DIG;
+};
+
+template <>
+struct SkNumericLimits<float> {
+ static const int digits = FLT_MANT_DIG;
+};
+
+//See
+//http://stackoverflow.com/questions/17333/most-effective-way-for-float-and-double-comparison/3423299#3423299
+//http://code.google.com/p/googletest/source/browse/trunk/include/gtest/internal/gtest-internal.h
+//http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
+
+template <typename RawType, unsigned int ULPs>
+class SkFloatingPoint {
+public:
+ /** Bits is a unsigned integer the same size as the floating point number. */
+ typedef typename SkTypeWithSize<sizeof(RawType) * CHAR_BIT>::UInt Bits;
+
+ /** # of bits in a number. */
+ static const size_t kBitCount = CHAR_BIT * sizeof(RawType);
+
+ /** # of fraction bits in a number. */
+ static const size_t kFractionBitCount = SkNumericLimits<RawType>::digits - 1;
+
+ /** # of exponent bits in a number. */
+ static const size_t kExponentBitCount = kBitCount - 1 - kFractionBitCount;
+
+ /** The mask for the sign bit. */
+ static const Bits kSignBitMask = static_cast<Bits>(1) << (kBitCount - 1);
+
+ /** The mask for the fraction bits. */
+ static const Bits kFractionBitMask =
+ ~static_cast<Bits>(0) >> (kExponentBitCount + 1);
+
+ /** The mask for the exponent bits. */
+ static const Bits kExponentBitMask = ~(kSignBitMask | kFractionBitMask);
+
+ /** How many ULP's (Units in the Last Place) to tolerate when comparing. */
+ static const size_t kMaxUlps = ULPs;
+
+ /**
+ * Constructs a FloatingPoint from a raw floating-point number.
+ *
+ * On an Intel CPU, passing a non-normalized NAN (Not a Number)
+ * around may change its bits, although the new value is guaranteed
+ * to be also a NAN. Therefore, don't expect this constructor to
+ * preserve the bits in x when x is a NAN.
+ */
+ explicit SkFloatingPoint(const RawType& x) { fU.value = x; }
+
+ /** Returns the exponent bits of this number. */
+ Bits exponent_bits() const { return kExponentBitMask & fU.bits; }
+
+ /** Returns the fraction bits of this number. */
+ Bits fraction_bits() const { return kFractionBitMask & fU.bits; }
+
+ /** Returns true iff this is NAN (not a number). */
+ bool is_nan() const {
+ // It's a NAN if both of the folloowing are true:
+ // * the exponent bits are all ones
+ // * the fraction bits are not all zero.
+ return (exponent_bits() == kExponentBitMask) && (fraction_bits() != 0);
+ }
+
+ /**
+ * Returns true iff this number is at most kMaxUlps ULP's away from ths.
+ * In particular, this function:
+ * - returns false if either number is (or both are) NAN.
+ * - treats really large numbers as almost equal to infinity.
+ * - thinks +0.0 and -0.0 are 0 DLP's apart.
+ */
+ bool AlmostEquals(const SkFloatingPoint& rhs) const {
+ // Any comparison operation involving a NAN must return false.
+ if (is_nan() || rhs.is_nan()) return false;
+
+ const Bits dist = DistanceBetweenSignAndMagnitudeNumbers(fU.bits,
+ rhs.fU.bits);
+ //SkDEBUGF(("(%f, %f, %d) ", u_.value_, rhs.u_.value_, dist));
+ return dist <= kMaxUlps;
+ }
+
+private:
+ /** The data type used to store the actual floating-point number. */
+ union FloatingPointUnion {
+ /** The raw floating-point number. */
+ RawType value;
+ /** The bits that represent the number. */
+ Bits bits;
+ };
+
+ /**
+ * Converts an integer from the sign-and-magnitude representation to
+ * the biased representation. More precisely, let N be 2 to the
+ * power of (kBitCount - 1), an integer x is represented by the
+ * unsigned number x + N.
+ *
+ * For instance,
+ *
+ * -N + 1 (the most negative number representable using
+ * sign-and-magnitude) is represented by 1;
+ * 0 is represented by N; and
+ * N - 1 (the biggest number representable using
+ * sign-and-magnitude) is represented by 2N - 1.
+ *
+ * Read http://en.wikipedia.org/wiki/Signed_number_representations
+ * for more details on signed number representations.
+ */
+ static Bits SignAndMagnitudeToBiased(const Bits &sam) {
+ if (kSignBitMask & sam) {
+ // sam represents a negative number.
+ return ~sam + 1;
+ } else {
+ // sam represents a positive number.
+ return kSignBitMask | sam;
+ }
+ }
+
+ /**
+ * Given two numbers in the sign-and-magnitude representation,
+ * returns the distance between them as an unsigned number.
+ */
+ static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits &sam1,
+ const Bits &sam2) {
+ const Bits biased1 = SignAndMagnitudeToBiased(sam1);
+ const Bits biased2 = SignAndMagnitudeToBiased(sam2);
+ return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1);
+ }
+
+ FloatingPointUnion fU;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkFrontBufferedStream.cpp b/gfx/skia/skia/src/utils/SkFrontBufferedStream.cpp
new file mode 100644
index 000000000..2dfb8ab88
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkFrontBufferedStream.cpp
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFrontBufferedStream.h"
+#include "SkStream.h"
+#include "SkTemplates.h"
+
+class FrontBufferedStream : public SkStreamRewindable {
+public:
+ // Called by Create.
+ FrontBufferedStream(SkStream*, size_t bufferSize);
+
+ size_t read(void* buffer, size_t size) override;
+
+ size_t peek(void* buffer, size_t size) const override;
+
+ bool isAtEnd() const override;
+
+ bool rewind() override;
+
+ bool hasLength() const override { return fHasLength; }
+
+ size_t getLength() const override { return fLength; }
+
+ SkStreamRewindable* duplicate() const override { return nullptr; }
+
+private:
+ SkAutoTDelete<SkStream> fStream;
+ const bool fHasLength;
+ const size_t fLength;
+ // Current offset into the stream. Always >= 0.
+ size_t fOffset;
+ // Amount that has been buffered by calls to read. Will always be less than
+ // fBufferSize.
+ size_t fBufferedSoFar;
+ // Total size of the buffer.
+ const size_t fBufferSize;
+ // FIXME: SkAutoTMalloc throws on failure. Instead, Create should return a
+ // nullptr stream.
+ SkAutoTMalloc<char> fBuffer;
+
+ // Read up to size bytes from already buffered data, and copy to
+ // dst, if non-nullptr. Updates fOffset. Assumes that fOffset is less
+ // than fBufferedSoFar.
+ size_t readFromBuffer(char* dst, size_t size);
+
+ // Buffer up to size bytes from the stream, and copy to dst if non-
+ // nullptr. Updates fOffset and fBufferedSoFar. Assumes that fOffset is
+ // less than fBufferedSoFar, and size is greater than 0.
+ size_t bufferAndWriteTo(char* dst, size_t size);
+
+ // Read up to size bytes directly from the stream and into dst if non-
+ // nullptr. Updates fOffset. Assumes fOffset is at or beyond the buffered
+ // data, and size is greater than 0.
+ size_t readDirectlyFromStream(char* dst, size_t size);
+
+ typedef SkStream INHERITED;
+};
+
+SkStreamRewindable* SkFrontBufferedStream::Create(SkStream* stream, size_t bufferSize) {
+ if (nullptr == stream) {
+ return nullptr;
+ }
+ return new FrontBufferedStream(stream, bufferSize);
+}
+
+FrontBufferedStream::FrontBufferedStream(SkStream* stream, size_t bufferSize)
+ : fStream(stream)
+ , fHasLength(stream->hasPosition() && stream->hasLength())
+ , fLength(stream->getLength() - stream->getPosition())
+ , fOffset(0)
+ , fBufferedSoFar(0)
+ , fBufferSize(bufferSize)
+ , fBuffer(bufferSize) {}
+
+bool FrontBufferedStream::isAtEnd() const {
+ if (fOffset < fBufferedSoFar) {
+ // Even if the underlying stream is at the end, this stream has been
+ // rewound after buffering, so it is not at the end.
+ return false;
+ }
+
+ return fStream->isAtEnd();
+}
+
+bool FrontBufferedStream::rewind() {
+ // Only allow a rewind if we have not exceeded the buffer.
+ if (fOffset <= fBufferSize) {
+ fOffset = 0;
+ return true;
+ }
+ return false;
+}
+
+size_t FrontBufferedStream::readFromBuffer(char* dst, size_t size) {
+ SkASSERT(fOffset < fBufferedSoFar);
+ // Some data has already been copied to fBuffer. Read up to the
+ // lesser of the size requested and the remainder of the buffered
+ // data.
+ const size_t bytesToCopy = SkTMin(size, fBufferedSoFar - fOffset);
+ if (dst != nullptr) {
+ memcpy(dst, fBuffer + fOffset, bytesToCopy);
+ }
+
+ // Update fOffset to the new position. It is guaranteed to be
+ // within the buffered data.
+ fOffset += bytesToCopy;
+ SkASSERT(fOffset <= fBufferedSoFar);
+
+ return bytesToCopy;
+}
+
+size_t FrontBufferedStream::bufferAndWriteTo(char* dst, size_t size) {
+ SkASSERT(size > 0);
+ SkASSERT(fOffset >= fBufferedSoFar);
+ SkASSERT(fBuffer);
+ // Data needs to be buffered. Buffer up to the lesser of the size requested
+ // and the remainder of the max buffer size.
+ const size_t bytesToBuffer = SkTMin(size, fBufferSize - fBufferedSoFar);
+ char* buffer = fBuffer + fOffset;
+ const size_t buffered = fStream->read(buffer, bytesToBuffer);
+
+ fBufferedSoFar += buffered;
+ fOffset = fBufferedSoFar;
+ SkASSERT(fBufferedSoFar <= fBufferSize);
+
+ // Copy the buffer to the destination buffer and update the amount read.
+ if (dst != nullptr) {
+ memcpy(dst, buffer, buffered);
+ }
+
+ return buffered;
+}
+
+size_t FrontBufferedStream::readDirectlyFromStream(char* dst, size_t size) {
+ SkASSERT(size > 0);
+ // If we get here, we have buffered all that can be buffered.
+ SkASSERT(fBufferSize == fBufferedSoFar && fOffset >= fBufferSize);
+
+ const size_t bytesReadDirectly = fStream->read(dst, size);
+ fOffset += bytesReadDirectly;
+
+ // If we have read past the end of the buffer, rewinding is no longer
+ // supported, so we can go ahead and free the memory.
+ if (bytesReadDirectly > 0) {
+ sk_free(fBuffer.release());
+ }
+
+ return bytesReadDirectly;
+}
+
+size_t FrontBufferedStream::peek(void* dst, size_t size) const {
+ // Keep track of the offset so we can return to it.
+ const size_t start = fOffset;
+
+ if (start >= fBufferSize) {
+ // This stream is not able to buffer.
+ return 0;
+ }
+
+ size = SkTMin(size, fBufferSize - start);
+ FrontBufferedStream* nonConstThis = const_cast<FrontBufferedStream*>(this);
+ const size_t bytesRead = nonConstThis->read(dst, size);
+ nonConstThis->fOffset = start;
+ return bytesRead;
+}
+
+size_t FrontBufferedStream::read(void* voidDst, size_t size) {
+ // Cast voidDst to a char* for easy addition.
+ char* dst = reinterpret_cast<char*>(voidDst);
+ SkDEBUGCODE(const size_t totalSize = size;)
+ const size_t start = fOffset;
+
+ // First, read any data that was previously buffered.
+ if (fOffset < fBufferedSoFar) {
+ const size_t bytesCopied = this->readFromBuffer(dst, size);
+
+ // Update the remaining number of bytes needed to read
+ // and the destination buffer.
+ size -= bytesCopied;
+ SkASSERT(size + (fOffset - start) == totalSize);
+ if (dst != nullptr) {
+ dst += bytesCopied;
+ }
+ }
+
+ // Buffer any more data that should be buffered, and copy it to the
+ // destination.
+ if (size > 0 && fBufferedSoFar < fBufferSize && !fStream->isAtEnd()) {
+ const size_t buffered = this->bufferAndWriteTo(dst, size);
+
+ // Update the remaining number of bytes needed to read
+ // and the destination buffer.
+ size -= buffered;
+ SkASSERT(size + (fOffset - start) == totalSize);
+ if (dst != nullptr) {
+ dst += buffered;
+ }
+ }
+
+ if (size > 0 && !fStream->isAtEnd()) {
+ SkDEBUGCODE(const size_t bytesReadDirectly =) this->readDirectlyFromStream(dst, size);
+ SkDEBUGCODE(size -= bytesReadDirectly;)
+ SkASSERT(size + (fOffset - start) == totalSize);
+ }
+
+ return fOffset - start;
+}
diff --git a/gfx/skia/skia/src/utils/SkInterpolator.cpp b/gfx/skia/skia/src/utils/SkInterpolator.cpp
new file mode 100644
index 000000000..d3c0b26fe
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkInterpolator.cpp
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkFixed.h"
+#include "SkInterpolator.h"
+#include "SkMath.h"
+#include "SkTSearch.h"
+
+SkInterpolatorBase::SkInterpolatorBase() {
+ fStorage = nullptr;
+ fTimes = nullptr;
+ SkDEBUGCODE(fTimesArray = nullptr;)
+}
+
+SkInterpolatorBase::~SkInterpolatorBase() {
+ if (fStorage) {
+ sk_free(fStorage);
+ }
+}
+
+void SkInterpolatorBase::reset(int elemCount, int frameCount) {
+ fFlags = 0;
+ fElemCount = SkToU8(elemCount);
+ fFrameCount = SkToS16(frameCount);
+ fRepeat = SK_Scalar1;
+ if (fStorage) {
+ sk_free(fStorage);
+ fStorage = nullptr;
+ fTimes = nullptr;
+ SkDEBUGCODE(fTimesArray = nullptr);
+ }
+}
+
+/* Each value[] run is formated as:
+ <time (in msec)>
+ <blend>
+ <data[fElemCount]>
+
+ Totaling fElemCount+2 entries per keyframe
+*/
+
+bool SkInterpolatorBase::getDuration(SkMSec* startTime, SkMSec* endTime) const {
+ if (fFrameCount == 0) {
+ return false;
+ }
+
+ if (startTime) {
+ *startTime = fTimes[0].fTime;
+ }
+ if (endTime) {
+ *endTime = fTimes[fFrameCount - 1].fTime;
+ }
+ return true;
+}
+
+SkScalar SkInterpolatorBase::ComputeRelativeT(SkMSec time, SkMSec prevTime,
+ SkMSec nextTime, const SkScalar blend[4]) {
+ SkASSERT(time > prevTime && time < nextTime);
+
+ SkScalar t = (SkScalar)(time - prevTime) / (SkScalar)(nextTime - prevTime);
+ return blend ?
+ SkUnitCubicInterp(t, blend[0], blend[1], blend[2], blend[3]) : t;
+}
+
+SkInterpolatorBase::Result SkInterpolatorBase::timeToT(SkMSec time, SkScalar* T,
+ int* indexPtr, bool* exactPtr) const {
+ SkASSERT(fFrameCount > 0);
+ Result result = kNormal_Result;
+ if (fRepeat != SK_Scalar1) {
+ SkMSec startTime = 0, endTime = 0; // initialize to avoid warning
+ this->getDuration(&startTime, &endTime);
+ SkMSec totalTime = endTime - startTime;
+ SkMSec offsetTime = time - startTime;
+ endTime = SkScalarFloorToInt(fRepeat * totalTime);
+ if (offsetTime >= endTime) {
+ SkScalar fraction = SkScalarFraction(fRepeat);
+ offsetTime = fraction == 0 && fRepeat > 0 ? totalTime :
+ (SkMSec) SkScalarFloorToInt(fraction * totalTime);
+ result = kFreezeEnd_Result;
+ } else {
+ int mirror = fFlags & kMirror;
+ offsetTime = offsetTime % (totalTime << mirror);
+ if (offsetTime > totalTime) { // can only be true if fMirror is true
+ offsetTime = (totalTime << 1) - offsetTime;
+ }
+ }
+ time = offsetTime + startTime;
+ }
+
+ int index = SkTSearch<SkMSec>(&fTimes[0].fTime, fFrameCount, time,
+ sizeof(SkTimeCode));
+
+ bool exact = true;
+
+ if (index < 0) {
+ index = ~index;
+ if (index == 0) {
+ result = kFreezeStart_Result;
+ } else if (index == fFrameCount) {
+ if (fFlags & kReset) {
+ index = 0;
+ } else {
+ index -= 1;
+ }
+ result = kFreezeEnd_Result;
+ } else {
+ exact = false;
+ }
+ }
+ SkASSERT(index < fFrameCount);
+ const SkTimeCode* nextTime = &fTimes[index];
+ SkMSec nextT = nextTime[0].fTime;
+ if (exact) {
+ *T = 0;
+ } else {
+ SkMSec prevT = nextTime[-1].fTime;
+ *T = ComputeRelativeT(time, prevT, nextT, nextTime[-1].fBlend);
+ }
+ *indexPtr = index;
+ *exactPtr = exact;
+ return result;
+}
+
+
+SkInterpolator::SkInterpolator() {
+ INHERITED::reset(0, 0);
+ fValues = nullptr;
+ SkDEBUGCODE(fScalarsArray = nullptr;)
+}
+
+SkInterpolator::SkInterpolator(int elemCount, int frameCount) {
+ SkASSERT(elemCount > 0);
+ this->reset(elemCount, frameCount);
+}
+
+void SkInterpolator::reset(int elemCount, int frameCount) {
+ INHERITED::reset(elemCount, frameCount);
+ fStorage = sk_malloc_throw((sizeof(SkScalar) * elemCount +
+ sizeof(SkTimeCode)) * frameCount);
+ fTimes = (SkTimeCode*) fStorage;
+ fValues = (SkScalar*) ((char*) fStorage + sizeof(SkTimeCode) * frameCount);
+#ifdef SK_DEBUG
+ fTimesArray = (SkTimeCode(*)[10]) fTimes;
+ fScalarsArray = (SkScalar(*)[10]) fValues;
+#endif
+}
+
+#define SK_Fixed1Third (SK_Fixed1/3)
+#define SK_Fixed2Third (SK_Fixed1*2/3)
+
+static const SkScalar gIdentityBlend[4] = {
+ 0.33333333f, 0.33333333f, 0.66666667f, 0.66666667f
+};
+
+bool SkInterpolator::setKeyFrame(int index, SkMSec time,
+ const SkScalar values[], const SkScalar blend[4]) {
+ SkASSERT(values != nullptr);
+
+ if (blend == nullptr) {
+ blend = gIdentityBlend;
+ }
+
+ bool success = ~index == SkTSearch<SkMSec>(&fTimes->fTime, index, time,
+ sizeof(SkTimeCode));
+ SkASSERT(success);
+ if (success) {
+ SkTimeCode* timeCode = &fTimes[index];
+ timeCode->fTime = time;
+ memcpy(timeCode->fBlend, blend, sizeof(timeCode->fBlend));
+ SkScalar* dst = &fValues[fElemCount * index];
+ memcpy(dst, values, fElemCount * sizeof(SkScalar));
+ }
+ return success;
+}
+
+SkInterpolator::Result SkInterpolator::timeToValues(SkMSec time,
+ SkScalar values[]) const {
+ SkScalar T;
+ int index;
+ bool exact;
+ Result result = timeToT(time, &T, &index, &exact);
+ if (values) {
+ const SkScalar* nextSrc = &fValues[index * fElemCount];
+
+ if (exact) {
+ memcpy(values, nextSrc, fElemCount * sizeof(SkScalar));
+ } else {
+ SkASSERT(index > 0);
+
+ const SkScalar* prevSrc = nextSrc - fElemCount;
+
+ for (int i = fElemCount - 1; i >= 0; --i) {
+ values[i] = SkScalarInterp(prevSrc[i], nextSrc[i], T);
+ }
+ }
+ }
+ return result;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef int Dot14;
+#define Dot14_ONE (1 << 14)
+#define Dot14_HALF (1 << 13)
+
+#define Dot14ToFloat(x) ((x) / 16384.f)
+
+static inline Dot14 Dot14Mul(Dot14 a, Dot14 b) {
+ return (a * b + Dot14_HALF) >> 14;
+}
+
+static inline Dot14 eval_cubic(Dot14 t, Dot14 A, Dot14 B, Dot14 C) {
+ return Dot14Mul(Dot14Mul(Dot14Mul(C, t) + B, t) + A, t);
+}
+
+static inline Dot14 pin_and_convert(SkScalar x) {
+ if (x <= 0) {
+ return 0;
+ }
+ if (x >= SK_Scalar1) {
+ return Dot14_ONE;
+ }
+ return SkScalarToFixed(x) >> 2;
+}
+
+SkScalar SkUnitCubicInterp(SkScalar value, SkScalar bx, SkScalar by,
+ SkScalar cx, SkScalar cy) {
+ // pin to the unit-square, and convert to 2.14
+ Dot14 x = pin_and_convert(value);
+
+ if (x == 0) return 0;
+ if (x == Dot14_ONE) return SK_Scalar1;
+
+ Dot14 b = pin_and_convert(bx);
+ Dot14 c = pin_and_convert(cx);
+
+ // Now compute our coefficients from the control points
+ // t -> 3b
+ // t^2 -> 3c - 6b
+ // t^3 -> 3b - 3c + 1
+ Dot14 A = 3*b;
+ Dot14 B = 3*(c - 2*b);
+ Dot14 C = 3*(b - c) + Dot14_ONE;
+
+ // Now search for a t value given x
+ Dot14 t = Dot14_HALF;
+ Dot14 dt = Dot14_HALF;
+ for (int i = 0; i < 13; i++) {
+ dt >>= 1;
+ Dot14 guess = eval_cubic(t, A, B, C);
+ if (x < guess) {
+ t -= dt;
+ } else {
+ t += dt;
+ }
+ }
+
+ // Now we have t, so compute the coeff for Y and evaluate
+ b = pin_and_convert(by);
+ c = pin_and_convert(cy);
+ A = 3*b;
+ B = 3*(c - 2*b);
+ C = 3*(b - c) + Dot14_ONE;
+ return SkFixedToScalar(eval_cubic(t, A, B, C) << 2);
+}
diff --git a/gfx/skia/skia/src/utils/SkLayer.cpp b/gfx/skia/skia/src/utils/SkLayer.cpp
new file mode 100644
index 000000000..d0de1ba6b
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkLayer.cpp
@@ -0,0 +1,229 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkLayer.h"
+#include "SkCanvas.h"
+
+//#define DEBUG_DRAW_LAYER_BOUNDS
+//#define DEBUG_TRACK_NEW_DELETE
+
+#ifdef DEBUG_TRACK_NEW_DELETE
+ static int gLayerAllocCount;
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkLayer::SkLayer() {
+ fParent = nullptr;
+ m_opacity = SK_Scalar1;
+ m_size.set(0, 0);
+ m_position.set(0, 0);
+ m_anchorPoint.set(SK_ScalarHalf, SK_ScalarHalf);
+
+ fMatrix.reset();
+ fChildrenMatrix.reset();
+ fFlags = 0;
+
+#ifdef DEBUG_TRACK_NEW_DELETE
+ gLayerAllocCount += 1;
+ SkDebugf("SkLayer new: %d\n", gLayerAllocCount);
+#endif
+}
+
+SkLayer::SkLayer(const SkLayer& src) : INHERITED() {
+ fParent = nullptr;
+ m_opacity = src.m_opacity;
+ m_size = src.m_size;
+ m_position = src.m_position;
+ m_anchorPoint = src.m_anchorPoint;
+
+ fMatrix = src.fMatrix;
+ fChildrenMatrix = src.fChildrenMatrix;
+ fFlags = src.fFlags;
+
+#ifdef DEBUG_TRACK_NEW_DELETE
+ gLayerAllocCount += 1;
+ SkDebugf("SkLayer copy: %d\n", gLayerAllocCount);
+#endif
+}
+
+SkLayer::~SkLayer() {
+ this->removeChildren();
+
+#ifdef DEBUG_TRACK_NEW_DELETE
+ gLayerAllocCount -= 1;
+ SkDebugf("SkLayer delete: %d\n", gLayerAllocCount);
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkLayer::isInheritFromRootTransform() const {
+ return (fFlags & kInheritFromRootTransform_Flag) != 0;
+}
+
+void SkLayer::setInheritFromRootTransform(bool doInherit) {
+ if (doInherit) {
+ fFlags |= kInheritFromRootTransform_Flag;
+ } else {
+ fFlags &= ~kInheritFromRootTransform_Flag;
+ }
+}
+
+void SkLayer::setMatrix(const SkMatrix& matrix) {
+ fMatrix = matrix;
+}
+
+void SkLayer::setChildrenMatrix(const SkMatrix& matrix) {
+ fChildrenMatrix = matrix;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+int SkLayer::countChildren() const {
+ return m_children.count();
+}
+
+SkLayer* SkLayer::getChild(int index) const {
+ if ((unsigned)index < (unsigned)m_children.count()) {
+ SkASSERT(m_children[index]->fParent == this);
+ return m_children[index];
+ }
+ return nullptr;
+}
+
+SkLayer* SkLayer::addChild(SkLayer* child) {
+ SkASSERT(this != child);
+ child->ref();
+ child->detachFromParent();
+ SkASSERT(child->fParent == nullptr);
+ child->fParent = this;
+
+ *m_children.append() = child;
+ return child;
+}
+
+void SkLayer::detachFromParent() {
+ if (fParent) {
+ int index = fParent->m_children.find(this);
+ SkASSERT(index >= 0);
+ fParent->m_children.remove(index);
+ fParent = nullptr;
+ this->unref(); // this call might delete us
+ }
+}
+
+void SkLayer::removeChildren() {
+ int count = m_children.count();
+ for (int i = 0; i < count; i++) {
+ SkLayer* child = m_children[i];
+ SkASSERT(child->fParent == this);
+ child->fParent = nullptr; // in case it has more than one owner
+ child->unref();
+ }
+ m_children.reset();
+}
+
+SkLayer* SkLayer::getRootLayer() const {
+ const SkLayer* root = this;
+ while (root->fParent != nullptr) {
+ root = root->fParent;
+ }
+ return const_cast<SkLayer*>(root);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkLayer::getLocalTransform(SkMatrix* matrix) const {
+ matrix->setTranslate(m_position.fX, m_position.fY);
+
+ SkScalar tx = SkScalarMul(m_anchorPoint.fX, m_size.width());
+ SkScalar ty = SkScalarMul(m_anchorPoint.fY, m_size.height());
+ matrix->preTranslate(tx, ty);
+ matrix->preConcat(this->getMatrix());
+ matrix->preTranslate(-tx, -ty);
+}
+
+void SkLayer::localToGlobal(SkMatrix* matrix) const {
+ this->getLocalTransform(matrix);
+
+ if (this->isInheritFromRootTransform()) {
+ matrix->postConcat(this->getRootLayer()->getMatrix());
+ return;
+ }
+
+ const SkLayer* layer = this;
+ while (layer->fParent != nullptr) {
+ layer = layer->fParent;
+
+ SkMatrix tmp;
+ layer->getLocalTransform(&tmp);
+ tmp.preConcat(layer->getChildrenMatrix());
+ matrix->postConcat(tmp);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkLayer::onDraw(SkCanvas*, SkScalar opacity) {
+// SkDebugf("----- no onDraw for %p\n", this);
+}
+
+#include "SkString.h"
+
+void SkLayer::draw(SkCanvas* canvas, SkScalar opacity) {
+#if 0
+ SkString str1, str2;
+ // this->getMatrix().toDumpString(&str1);
+ // this->getChildrenMatrix().toDumpString(&str2);
+ SkDebugf("--- drawlayer %p opacity %g size [%g %g] pos [%g %g] matrix %s children %s\n",
+ this, opacity * this->getOpacity(), m_size.width(), m_size.height(),
+ m_position.fX, m_position.fY, str1.c_str(), str2.c_str());
+#endif
+
+ opacity = SkScalarMul(opacity, this->getOpacity());
+ if (opacity <= 0) {
+// SkDebugf("---- abort drawing %p opacity %g\n", this, opacity);
+ return;
+ }
+
+ SkAutoCanvasRestore acr(canvas, true);
+
+ // apply our local transform
+ {
+ SkMatrix tmp;
+ this->getLocalTransform(&tmp);
+ if (this->isInheritFromRootTransform()) {
+ // should we also apply the root's childrenMatrix?
+ canvas->setMatrix(getRootLayer()->getMatrix());
+ }
+ canvas->concat(tmp);
+ }
+
+ this->onDraw(canvas, opacity);
+
+#ifdef DEBUG_DRAW_LAYER_BOUNDS
+ {
+ SkRect r = SkRect::MakeSize(this->getSize());
+ SkPaint p;
+ p.setAntiAlias(true);
+ p.setStyle(SkPaint::kStroke_Style);
+ p.setStrokeWidth(SkIntToScalar(2));
+ p.setColor(0xFFFF44DD);
+ canvas->drawRect(r, p);
+ canvas->drawLine(r.fLeft, r.fTop, r.fRight, r.fBottom, p);
+ canvas->drawLine(r.fLeft, r.fBottom, r.fRight, r.fTop, p);
+ }
+#endif
+
+ int count = this->countChildren();
+ if (count > 0) {
+ canvas->concat(this->getChildrenMatrix());
+ for (int i = 0; i < count; i++) {
+ this->getChild(i)->draw(canvas, opacity);
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/utils/SkLua.cpp b/gfx/skia/skia/src/utils/SkLua.cpp
new file mode 100644
index 000000000..9eb6f0b27
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkLua.cpp
@@ -0,0 +1,2151 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkLua.h"
+
+#if SK_SUPPORT_GPU
+#include "GrClip.h"
+#include "GrReducedClip.h"
+#endif
+
+#include "SkBlurImageFilter.h"
+#include "SkCanvas.h"
+#include "SkColorFilter.h"
+#include "SkData.h"
+#include "SkDocument.h"
+#include "SkGradientShader.h"
+#include "SkImage.h"
+#include "SkMatrix.h"
+#include "SkPaint.h"
+#include "SkPath.h"
+#include "SkPictureRecorder.h"
+#include "SkPixelRef.h"
+#include "SkRRect.h"
+#include "SkString.h"
+#include "SkSurface.h"
+#include "SkTextBlob.h"
+#include "SkTypeface.h"
+
+extern "C" {
+ #include "lua.h"
+ #include "lualib.h"
+ #include "lauxlib.h"
+}
+
+// return the metatable name for a given class
+template <typename T> const char* get_mtname();
+#define DEF_MTNAME(T) \
+ template <> const char* get_mtname<T>() { \
+ return #T "_LuaMetaTableName"; \
+ }
+
+DEF_MTNAME(SkCanvas)
+DEF_MTNAME(SkColorFilter)
+DEF_MTNAME(SkDocument)
+DEF_MTNAME(SkImage)
+DEF_MTNAME(SkImageFilter)
+DEF_MTNAME(SkMatrix)
+DEF_MTNAME(SkRRect)
+DEF_MTNAME(SkPath)
+DEF_MTNAME(SkPaint)
+DEF_MTNAME(SkPathEffect)
+DEF_MTNAME(SkPicture)
+DEF_MTNAME(SkPictureRecorder)
+DEF_MTNAME(SkShader)
+DEF_MTNAME(SkSurface)
+DEF_MTNAME(SkTextBlob)
+DEF_MTNAME(SkTypeface)
+
+template <typename T> T* push_new(lua_State* L) {
+ T* addr = (T*)lua_newuserdata(L, sizeof(T));
+ new (addr) T;
+ luaL_getmetatable(L, get_mtname<T>());
+ lua_setmetatable(L, -2);
+ return addr;
+}
+
+template <typename T> void push_obj(lua_State* L, const T& obj) {
+ new (lua_newuserdata(L, sizeof(T))) T(obj);
+ luaL_getmetatable(L, get_mtname<T>());
+ lua_setmetatable(L, -2);
+}
+
+template <typename T> T* push_ref(lua_State* L, T* ref) {
+ *(T**)lua_newuserdata(L, sizeof(T*)) = SkSafeRef(ref);
+ luaL_getmetatable(L, get_mtname<T>());
+ lua_setmetatable(L, -2);
+ return ref;
+}
+
+template <typename T> void push_ref(lua_State* L, sk_sp<T> sp) {
+ *(T**)lua_newuserdata(L, sizeof(T*)) = sp.release();
+ luaL_getmetatable(L, get_mtname<T>());
+ lua_setmetatable(L, -2);
+}
+
+template <typename T> T* get_ref(lua_State* L, int index) {
+ return *(T**)luaL_checkudata(L, index, get_mtname<T>());
+}
+
+template <typename T> T* get_obj(lua_State* L, int index) {
+ return (T*)luaL_checkudata(L, index, get_mtname<T>());
+}
+
+static bool lua2bool(lua_State* L, int index) {
+ return !!lua_toboolean(L, index);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkLua::SkLua(const char termCode[]) : fTermCode(termCode), fWeOwnL(true) {
+ fL = luaL_newstate();
+ luaL_openlibs(fL);
+ SkLua::Load(fL);
+}
+
+SkLua::SkLua(lua_State* L) : fL(L), fWeOwnL(false) {}
+
+SkLua::~SkLua() {
+ if (fWeOwnL) {
+ if (fTermCode.size() > 0) {
+ lua_getglobal(fL, fTermCode.c_str());
+ if (lua_pcall(fL, 0, 0, 0) != LUA_OK) {
+ SkDebugf("lua err: %s\n", lua_tostring(fL, -1));
+ }
+ }
+ lua_close(fL);
+ }
+}
+
+bool SkLua::runCode(const char code[]) {
+ int err = luaL_loadstring(fL, code) || lua_pcall(fL, 0, 0, 0);
+ if (err) {
+ SkDebugf("--- lua failed: %s\n", lua_tostring(fL, -1));
+ return false;
+ }
+ return true;
+}
+
+bool SkLua::runCode(const void* code, size_t size) {
+ SkString str((const char*)code, size);
+ return this->runCode(str.c_str());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define CHECK_SETFIELD(key) do if (key) lua_setfield(fL, -2, key); while (0)
+
+static void setfield_bool_if(lua_State* L, const char key[], bool pred) {
+ if (pred) {
+ lua_pushboolean(L, true);
+ lua_setfield(L, -2, key);
+ }
+}
+
+static void setfield_string(lua_State* L, const char key[], const char value[]) {
+ lua_pushstring(L, value);
+ lua_setfield(L, -2, key);
+}
+
+static void setfield_number(lua_State* L, const char key[], double value) {
+ lua_pushnumber(L, value);
+ lua_setfield(L, -2, key);
+}
+
+static void setfield_boolean(lua_State* L, const char key[], bool value) {
+ lua_pushboolean(L, value);
+ lua_setfield(L, -2, key);
+}
+
+static void setfield_scalar(lua_State* L, const char key[], SkScalar value) {
+ setfield_number(L, key, SkScalarToLua(value));
+}
+
+static void setfield_function(lua_State* L,
+ const char key[], lua_CFunction value) {
+ lua_pushcfunction(L, value);
+ lua_setfield(L, -2, key);
+}
+
+static int lua2int_def(lua_State* L, int index, int defaultValue) {
+ if (lua_isnumber(L, index)) {
+ return (int)lua_tonumber(L, index);
+ } else {
+ return defaultValue;
+ }
+}
+
+static SkScalar lua2scalar(lua_State* L, int index) {
+ SkASSERT(lua_isnumber(L, index));
+ return SkLuaToScalar(lua_tonumber(L, index));
+}
+
+static SkScalar lua2scalar_def(lua_State* L, int index, SkScalar defaultValue) {
+ if (lua_isnumber(L, index)) {
+ return SkLuaToScalar(lua_tonumber(L, index));
+ } else {
+ return defaultValue;
+ }
+}
+
+static SkScalar getarray_scalar(lua_State* L, int stackIndex, int arrayIndex) {
+ SkASSERT(lua_istable(L, stackIndex));
+ lua_rawgeti(L, stackIndex, arrayIndex);
+
+ SkScalar value = lua2scalar(L, -1);
+ lua_pop(L, 1);
+ return value;
+}
+
+static void getarray_scalars(lua_State* L, int stackIndex, SkScalar dst[], int count) {
+ for (int i = 0; i < count; ++i) {
+ dst[i] = getarray_scalar(L, stackIndex, i + 1);
+ }
+}
+
+static void getarray_points(lua_State* L, int stackIndex, SkPoint pts[], int count) {
+ getarray_scalars(L, stackIndex, &pts[0].fX, count * 2);
+}
+
+static void setarray_number(lua_State* L, int index, double value) {
+ lua_pushnumber(L, value);
+ lua_rawseti(L, -2, index);
+}
+
+static void setarray_scalar(lua_State* L, int index, SkScalar value) {
+ setarray_number(L, index, SkScalarToLua(value));
+}
+
+static void setarray_string(lua_State* L, int index, const char str[]) {
+ lua_pushstring(L, str);
+ lua_rawseti(L, -2, index);
+}
+
+void SkLua::pushBool(bool value, const char key[]) {
+ lua_pushboolean(fL, value);
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushString(const char str[], const char key[]) {
+ lua_pushstring(fL, str);
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushString(const char str[], size_t length, const char key[]) {
+ // TODO: how to do this w/o making a copy?
+ SkString s(str, length);
+ lua_pushstring(fL, s.c_str());
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushString(const SkString& str, const char key[]) {
+ lua_pushstring(fL, str.c_str());
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushColor(SkColor color, const char key[]) {
+ lua_newtable(fL);
+ setfield_number(fL, "a", SkColorGetA(color) / 255.0);
+ setfield_number(fL, "r", SkColorGetR(color) / 255.0);
+ setfield_number(fL, "g", SkColorGetG(color) / 255.0);
+ setfield_number(fL, "b", SkColorGetB(color) / 255.0);
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushU32(uint32_t value, const char key[]) {
+ lua_pushnumber(fL, (double)value);
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushScalar(SkScalar value, const char key[]) {
+ lua_pushnumber(fL, SkScalarToLua(value));
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushArrayU16(const uint16_t array[], int count, const char key[]) {
+ lua_newtable(fL);
+ for (int i = 0; i < count; ++i) {
+ // make it base-1 to match lua convention
+ setarray_number(fL, i + 1, (double)array[i]);
+ }
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushArrayPoint(const SkPoint array[], int count, const char key[]) {
+ lua_newtable(fL);
+ for (int i = 0; i < count; ++i) {
+ // make it base-1 to match lua convention
+ lua_newtable(fL);
+ this->pushScalar(array[i].fX, "x");
+ this->pushScalar(array[i].fY, "y");
+ lua_rawseti(fL, -2, i + 1);
+ }
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushArrayScalar(const SkScalar array[], int count, const char key[]) {
+ lua_newtable(fL);
+ for (int i = 0; i < count; ++i) {
+ // make it base-1 to match lua convention
+ setarray_scalar(fL, i + 1, array[i]);
+ }
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushRect(const SkRect& r, const char key[]) {
+ lua_newtable(fL);
+ setfield_scalar(fL, "left", r.fLeft);
+ setfield_scalar(fL, "top", r.fTop);
+ setfield_scalar(fL, "right", r.fRight);
+ setfield_scalar(fL, "bottom", r.fBottom);
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushRRect(const SkRRect& rr, const char key[]) {
+ push_obj(fL, rr);
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushDash(const SkPathEffect::DashInfo& info, const char key[]) {
+ lua_newtable(fL);
+ setfield_scalar(fL, "phase", info.fPhase);
+ this->pushArrayScalar(info.fIntervals, info.fCount, "intervals");
+ CHECK_SETFIELD(key);
+}
+
+
+void SkLua::pushMatrix(const SkMatrix& matrix, const char key[]) {
+ push_obj(fL, matrix);
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushPaint(const SkPaint& paint, const char key[]) {
+ push_obj(fL, paint);
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushPath(const SkPath& path, const char key[]) {
+ push_obj(fL, path);
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushCanvas(SkCanvas* canvas, const char key[]) {
+ push_ref(fL, canvas);
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushTextBlob(const SkTextBlob* blob, const char key[]) {
+ push_ref(fL, const_cast<SkTextBlob*>(blob));
+ CHECK_SETFIELD(key);
+}
+
+static const char* element_type(SkClipStack::Element::Type type) {
+ switch (type) {
+ case SkClipStack::Element::kEmpty_Type:
+ return "empty";
+ case SkClipStack::Element::kRect_Type:
+ return "rect";
+ case SkClipStack::Element::kRRect_Type:
+ return "rrect";
+ case SkClipStack::Element::kPath_Type:
+ return "path";
+ }
+ return "unknown";
+}
+
+static const char* region_op(SkRegion::Op op) {
+ switch (op) {
+ case SkRegion::kDifference_Op:
+ return "difference";
+ case SkRegion::kIntersect_Op:
+ return "intersect";
+ case SkRegion::kUnion_Op:
+ return "union";
+ case SkRegion::kXOR_Op:
+ return "xor";
+ case SkRegion::kReverseDifference_Op:
+ return "reverse-difference";
+ case SkRegion::kReplace_Op:
+ return "replace";
+ }
+ return "unknown";
+}
+
+void SkLua::pushClipStack(const SkClipStack& stack, const char* key) {
+ lua_newtable(fL);
+ SkClipStack::B2TIter iter(stack);
+ const SkClipStack::Element* element;
+ int i = 0;
+ while ((element = iter.next())) {
+ this->pushClipStackElement(*element);
+ lua_rawseti(fL, -2, ++i);
+ }
+ CHECK_SETFIELD(key);
+}
+
+void SkLua::pushClipStackElement(const SkClipStack::Element& element, const char* key) {
+ lua_newtable(fL);
+ SkClipStack::Element::Type type = element.getType();
+ this->pushString(element_type(type), "type");
+ switch (type) {
+ case SkClipStack::Element::kEmpty_Type:
+ break;
+ case SkClipStack::Element::kRect_Type:
+ this->pushRect(element.getRect(), "rect");
+ break;
+ case SkClipStack::Element::kRRect_Type:
+ this->pushRRect(element.getRRect(), "rrect");
+ break;
+ case SkClipStack::Element::kPath_Type:
+ this->pushPath(element.getPath(), "path");
+ break;
+ }
+ this->pushString(region_op((SkRegion::Op)element.getOp()), "op");
+ this->pushBool(element.isAA(), "aa");
+ CHECK_SETFIELD(key);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+static SkScalar getfield_scalar(lua_State* L, int index, const char key[]) {
+ SkASSERT(lua_istable(L, index));
+ lua_pushstring(L, key);
+ lua_gettable(L, index);
+
+ SkScalar value = lua2scalar(L, -1);
+ lua_pop(L, 1);
+ return value;
+}
+
+static SkScalar getfield_scalar_default(lua_State* L, int index, const char key[], SkScalar def) {
+ SkASSERT(lua_istable(L, index));
+ lua_pushstring(L, key);
+ lua_gettable(L, index);
+
+ SkScalar value;
+ if (lua_isnil(L, -1)) {
+ value = def;
+ } else {
+ value = lua2scalar(L, -1);
+ }
+ lua_pop(L, 1);
+ return value;
+}
+
+static SkScalar byte2unit(U8CPU byte) {
+ return byte / 255.0f;
+}
+
+static U8CPU unit2byte(SkScalar x) {
+ if (x <= 0) {
+ return 0;
+ } else if (x >= 1) {
+ return 255;
+ } else {
+ return SkScalarRoundToInt(x * 255);
+ }
+}
+
+static SkColor lua2color(lua_State* L, int index) {
+ return SkColorSetARGB(unit2byte(getfield_scalar_default(L, index, "a", 1)),
+ unit2byte(getfield_scalar_default(L, index, "r", 0)),
+ unit2byte(getfield_scalar_default(L, index, "g", 0)),
+ unit2byte(getfield_scalar_default(L, index, "b", 0)));
+}
+
+static SkRect* lua2rect(lua_State* L, int index, SkRect* rect) {
+ rect->set(getfield_scalar_default(L, index, "left", 0),
+ getfield_scalar_default(L, index, "top", 0),
+ getfield_scalar(L, index, "right"),
+ getfield_scalar(L, index, "bottom"));
+ return rect;
+}
+
+static int lcanvas_clear(lua_State* L) {
+ get_ref<SkCanvas>(L, 1)->clear(0);
+ return 0;
+}
+
+static int lcanvas_drawColor(lua_State* L) {
+ get_ref<SkCanvas>(L, 1)->drawColor(lua2color(L, 2));
+ return 0;
+}
+
+static int lcanvas_drawPaint(lua_State* L) {
+ get_ref<SkCanvas>(L, 1)->drawPaint(*get_obj<SkPaint>(L, 2));
+ return 0;
+}
+
+static int lcanvas_drawRect(lua_State* L) {
+ SkRect rect;
+ lua2rect(L, 2, &rect);
+ const SkPaint* paint = get_obj<SkPaint>(L, 3);
+ get_ref<SkCanvas>(L, 1)->drawRect(rect, *paint);
+ return 0;
+}
+
+static int lcanvas_drawOval(lua_State* L) {
+ SkRect rect;
+ get_ref<SkCanvas>(L, 1)->drawOval(*lua2rect(L, 2, &rect),
+ *get_obj<SkPaint>(L, 3));
+ return 0;
+}
+
+static int lcanvas_drawCircle(lua_State* L) {
+ get_ref<SkCanvas>(L, 1)->drawCircle(lua2scalar(L, 2),
+ lua2scalar(L, 3),
+ lua2scalar(L, 4),
+ *get_obj<SkPaint>(L, 5));
+ return 0;
+}
+
+static SkPaint* lua2OptionalPaint(lua_State* L, int index, SkPaint* paint) {
+ if (lua_isnumber(L, index)) {
+ paint->setAlpha(SkScalarRoundToInt(lua2scalar(L, index) * 255));
+ return paint;
+ } else if (lua_isuserdata(L, index)) {
+ const SkPaint* ptr = get_obj<SkPaint>(L, index);
+ if (ptr) {
+ *paint = *ptr;
+ return paint;
+ }
+ }
+ return nullptr;
+}
+
+static int lcanvas_drawImage(lua_State* L) {
+ SkCanvas* canvas = get_ref<SkCanvas>(L, 1);
+ SkImage* image = get_ref<SkImage>(L, 2);
+ if (nullptr == image) {
+ return 0;
+ }
+ SkScalar x = lua2scalar(L, 3);
+ SkScalar y = lua2scalar(L, 4);
+
+ SkPaint paint;
+ canvas->drawImage(image, x, y, lua2OptionalPaint(L, 5, &paint));
+ return 0;
+}
+
+static int lcanvas_drawImageRect(lua_State* L) {
+ SkCanvas* canvas = get_ref<SkCanvas>(L, 1);
+ SkImage* image = get_ref<SkImage>(L, 2);
+ if (nullptr == image) {
+ return 0;
+ }
+
+ SkRect srcR, dstR;
+ SkRect* srcRPtr = nullptr;
+ if (!lua_isnil(L, 3)) {
+ srcRPtr = lua2rect(L, 3, &srcR);
+ }
+ lua2rect(L, 4, &dstR);
+
+ SkPaint paint;
+ canvas->legacy_drawImageRect(image, srcRPtr, dstR, lua2OptionalPaint(L, 5, &paint));
+ return 0;
+}
+
+static int lcanvas_drawPatch(lua_State* L) {
+ SkPoint cubics[12];
+ SkColor colorStorage[4];
+ SkPoint texStorage[4];
+
+ const SkColor* colors = nullptr;
+ const SkPoint* texs = nullptr;
+
+ getarray_points(L, 2, cubics, 12);
+
+ colorStorage[0] = SK_ColorRED;
+ colorStorage[1] = SK_ColorGREEN;
+ colorStorage[2] = SK_ColorBLUE;
+ colorStorage[3] = SK_ColorGRAY;
+
+ if (lua_isnil(L, 4)) {
+ colors = colorStorage;
+ } else {
+ getarray_points(L, 4, texStorage, 4);
+ texs = texStorage;
+ }
+
+ get_ref<SkCanvas>(L, 1)->drawPatch(cubics, colors, texs, nullptr, *get_obj<SkPaint>(L, 5));
+ return 0;
+}
+
+static int lcanvas_drawPath(lua_State* L) {
+ get_ref<SkCanvas>(L, 1)->drawPath(*get_obj<SkPath>(L, 2),
+ *get_obj<SkPaint>(L, 3));
+ return 0;
+}
+
+// drawPicture(pic, x, y, paint)
+static int lcanvas_drawPicture(lua_State* L) {
+ SkCanvas* canvas = get_ref<SkCanvas>(L, 1);
+ SkPicture* picture = get_ref<SkPicture>(L, 2);
+ SkScalar x = lua2scalar_def(L, 3, 0);
+ SkScalar y = lua2scalar_def(L, 4, 0);
+ SkMatrix matrix, *matrixPtr = nullptr;
+ if (x || y) {
+ matrix.setTranslate(x, y);
+ matrixPtr = &matrix;
+ }
+ SkPaint paint;
+ canvas->drawPicture(picture, matrixPtr, lua2OptionalPaint(L, 5, &paint));
+ return 0;
+}
+
+static int lcanvas_drawText(lua_State* L) {
+ if (lua_gettop(L) < 5) {
+ return 0;
+ }
+
+ if (lua_isstring(L, 2) && lua_isnumber(L, 3) && lua_isnumber(L, 4)) {
+ size_t len;
+ const char* text = lua_tolstring(L, 2, &len);
+ get_ref<SkCanvas>(L, 1)->drawText(text, len,
+ lua2scalar(L, 3), lua2scalar(L, 4),
+ *get_obj<SkPaint>(L, 5));
+ }
+ return 0;
+}
+
+static int lcanvas_drawTextBlob(lua_State* L) {
+ const SkTextBlob* blob = get_ref<SkTextBlob>(L, 2);
+ SkScalar x = lua2scalar(L, 3);
+ SkScalar y = lua2scalar(L, 4);
+ const SkPaint& paint = *get_obj<SkPaint>(L, 5);
+ get_ref<SkCanvas>(L, 1)->drawTextBlob(blob, x, y, paint);
+ return 0;
+}
+
+static int lcanvas_getSaveCount(lua_State* L) {
+ lua_pushnumber(L, get_ref<SkCanvas>(L, 1)->getSaveCount());
+ return 1;
+}
+
+static int lcanvas_getTotalMatrix(lua_State* L) {
+ SkLua(L).pushMatrix(get_ref<SkCanvas>(L, 1)->getTotalMatrix());
+ return 1;
+}
+
+static int lcanvas_getClipStack(lua_State* L) {
+ SkLua(L).pushClipStack(*get_ref<SkCanvas>(L, 1)->getClipStack());
+ return 1;
+}
+
+int SkLua::lcanvas_getReducedClipStack(lua_State* L) {
+#if SK_SUPPORT_GPU
+ const SkCanvas* canvas = get_ref<SkCanvas>(L, 1);
+ SkRect queryBounds = SkRect::Make(canvas->getTopLayerBounds());
+ SkASSERT(!GrClip::GetPixelIBounds(queryBounds).isEmpty());
+
+ const GrReducedClip reducedClip(*canvas->getClipStack(), queryBounds);
+
+ GrReducedClip::ElementList::Iter iter(reducedClip.elements());
+ int i = 0;
+ lua_newtable(L);
+ while(iter.get()) {
+ SkLua(L).pushClipStackElement(*iter.get());
+ iter.next();
+ lua_rawseti(L, -2, ++i);
+ }
+ // Currently this only returns the element list to lua, not the initial state or result bounds.
+ // It could return these as additional items on the lua stack.
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+static int lcanvas_save(lua_State* L) {
+ lua_pushinteger(L, get_ref<SkCanvas>(L, 1)->save());
+ return 1;
+}
+
+static int lcanvas_saveLayer(lua_State* L) {
+ SkPaint paint;
+ lua_pushinteger(L, get_ref<SkCanvas>(L, 1)->saveLayer(nullptr, lua2OptionalPaint(L, 2, &paint)));
+ return 1;
+}
+
+static int lcanvas_restore(lua_State* L) {
+ get_ref<SkCanvas>(L, 1)->restore();
+ return 0;
+}
+
+static int lcanvas_scale(lua_State* L) {
+ SkScalar sx = lua2scalar_def(L, 2, 1);
+ SkScalar sy = lua2scalar_def(L, 3, sx);
+ get_ref<SkCanvas>(L, 1)->scale(sx, sy);
+ return 0;
+}
+
+static int lcanvas_translate(lua_State* L) {
+ SkScalar tx = lua2scalar_def(L, 2, 0);
+ SkScalar ty = lua2scalar_def(L, 3, 0);
+ get_ref<SkCanvas>(L, 1)->translate(tx, ty);
+ return 0;
+}
+
+static int lcanvas_rotate(lua_State* L) {
+ SkScalar degrees = lua2scalar_def(L, 2, 0);
+ get_ref<SkCanvas>(L, 1)->rotate(degrees);
+ return 0;
+}
+
+static int lcanvas_concat(lua_State* L) {
+ get_ref<SkCanvas>(L, 1)->concat(*get_obj<SkMatrix>(L, 2));
+ return 0;
+}
+
+static int lcanvas_newSurface(lua_State* L) {
+ int width = lua2int_def(L, 2, 0);
+ int height = lua2int_def(L, 3, 0);
+ SkImageInfo info = SkImageInfo::MakeN32Premul(width, height);
+ auto surface = get_ref<SkCanvas>(L, 1)->makeSurface(info);
+ if (nullptr == surface) {
+ lua_pushnil(L);
+ } else {
+ push_ref(L, surface);
+ }
+ return 1;
+}
+
+static int lcanvas_gc(lua_State* L) {
+ get_ref<SkCanvas>(L, 1)->unref();
+ return 0;
+}
+
+const struct luaL_Reg gSkCanvas_Methods[] = {
+ { "clear", lcanvas_clear },
+ { "drawColor", lcanvas_drawColor },
+ { "drawPaint", lcanvas_drawPaint },
+ { "drawRect", lcanvas_drawRect },
+ { "drawOval", lcanvas_drawOval },
+ { "drawCircle", lcanvas_drawCircle },
+ { "drawImage", lcanvas_drawImage },
+ { "drawImageRect", lcanvas_drawImageRect },
+ { "drawPatch", lcanvas_drawPatch },
+ { "drawPath", lcanvas_drawPath },
+ { "drawPicture", lcanvas_drawPicture },
+ { "drawText", lcanvas_drawText },
+ { "drawTextBlob", lcanvas_drawTextBlob },
+ { "getSaveCount", lcanvas_getSaveCount },
+ { "getTotalMatrix", lcanvas_getTotalMatrix },
+ { "getClipStack", lcanvas_getClipStack },
+#if SK_SUPPORT_GPU
+ { "getReducedClipStack", SkLua::lcanvas_getReducedClipStack },
+#endif
+ { "save", lcanvas_save },
+ { "saveLayer", lcanvas_saveLayer },
+ { "restore", lcanvas_restore },
+ { "scale", lcanvas_scale },
+ { "translate", lcanvas_translate },
+ { "rotate", lcanvas_rotate },
+ { "concat", lcanvas_concat },
+
+ { "newSurface", lcanvas_newSurface },
+
+ { "__gc", lcanvas_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int ldocument_beginPage(lua_State* L) {
+ const SkRect* contentPtr = nullptr;
+ push_ref(L, get_ref<SkDocument>(L, 1)->beginPage(lua2scalar(L, 2),
+ lua2scalar(L, 3),
+ contentPtr));
+ return 1;
+}
+
+static int ldocument_endPage(lua_State* L) {
+ get_ref<SkDocument>(L, 1)->endPage();
+ return 0;
+}
+
+static int ldocument_close(lua_State* L) {
+ get_ref<SkDocument>(L, 1)->close();
+ return 0;
+}
+
+static int ldocument_gc(lua_State* L) {
+ get_ref<SkDocument>(L, 1)->unref();
+ return 0;
+}
+
+static const struct luaL_Reg gSkDocument_Methods[] = {
+ { "beginPage", ldocument_beginPage },
+ { "endPage", ldocument_endPage },
+ { "close", ldocument_close },
+ { "__gc", ldocument_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lpaint_isAntiAlias(lua_State* L) {
+ lua_pushboolean(L, get_obj<SkPaint>(L, 1)->isAntiAlias());
+ return 1;
+}
+
+static int lpaint_setAntiAlias(lua_State* L) {
+ get_obj<SkPaint>(L, 1)->setAntiAlias(lua2bool(L, 2));
+ return 0;
+}
+
+static int lpaint_isDither(lua_State* L) {
+ lua_pushboolean(L, get_obj<SkPaint>(L, 1)->isDither());
+ return 1;
+}
+
+static int lpaint_setDither(lua_State* L) {
+ get_obj<SkPaint>(L, 1)->setDither(lua2bool(L, 2));
+ return 0;
+}
+
+static int lpaint_isUnderlineText(lua_State* L) {
+ lua_pushboolean(L, get_obj<SkPaint>(L, 1)->isUnderlineText());
+ return 1;
+}
+
+static int lpaint_isStrikeThruText(lua_State* L) {
+ lua_pushboolean(L, get_obj<SkPaint>(L, 1)->isStrikeThruText());
+ return 1;
+}
+
+static int lpaint_isFakeBoldText(lua_State* L) {
+ lua_pushboolean(L, get_obj<SkPaint>(L, 1)->isFakeBoldText());
+ return 1;
+}
+
+static int lpaint_isLinearText(lua_State* L) {
+ lua_pushboolean(L, get_obj<SkPaint>(L, 1)->isLinearText());
+ return 1;
+}
+
+static int lpaint_isSubpixelText(lua_State* L) {
+ lua_pushboolean(L, get_obj<SkPaint>(L, 1)->isSubpixelText());
+ return 1;
+}
+
+static int lpaint_setSubpixelText(lua_State* L) {
+ get_obj<SkPaint>(L, 1)->setSubpixelText(lua2bool(L, 2));
+ return 1;
+}
+
+static int lpaint_isDevKernText(lua_State* L) {
+ lua_pushboolean(L, get_obj<SkPaint>(L, 1)->isDevKernText());
+ return 1;
+}
+
+static int lpaint_isLCDRenderText(lua_State* L) {
+ lua_pushboolean(L, get_obj<SkPaint>(L, 1)->isLCDRenderText());
+ return 1;
+}
+
+static int lpaint_setLCDRenderText(lua_State* L) {
+ get_obj<SkPaint>(L, 1)->setLCDRenderText(lua2bool(L, 2));
+ return 1;
+}
+
+static int lpaint_isEmbeddedBitmapText(lua_State* L) {
+ lua_pushboolean(L, get_obj<SkPaint>(L, 1)->isEmbeddedBitmapText());
+ return 1;
+}
+
+static int lpaint_isAutohinted(lua_State* L) {
+ lua_pushboolean(L, get_obj<SkPaint>(L, 1)->isAutohinted());
+ return 1;
+}
+
+static int lpaint_isVerticalText(lua_State* L) {
+ lua_pushboolean(L, get_obj<SkPaint>(L, 1)->isVerticalText());
+ return 1;
+}
+
+static int lpaint_getAlpha(lua_State* L) {
+ SkLua(L).pushScalar(byte2unit(get_obj<SkPaint>(L, 1)->getAlpha()));
+ return 1;
+}
+
+static int lpaint_setAlpha(lua_State* L) {
+ get_obj<SkPaint>(L, 1)->setAlpha(unit2byte(lua2scalar(L, 2)));
+ return 0;
+}
+
+static int lpaint_getColor(lua_State* L) {
+ SkLua(L).pushColor(get_obj<SkPaint>(L, 1)->getColor());
+ return 1;
+}
+
+static int lpaint_setColor(lua_State* L) {
+ get_obj<SkPaint>(L, 1)->setColor(lua2color(L, 2));
+ return 0;
+}
+
+static int lpaint_getTextSize(lua_State* L) {
+ SkLua(L).pushScalar(get_obj<SkPaint>(L, 1)->getTextSize());
+ return 1;
+}
+
+static int lpaint_getTextScaleX(lua_State* L) {
+ SkLua(L).pushScalar(get_obj<SkPaint>(L, 1)->getTextScaleX());
+ return 1;
+}
+
+static int lpaint_getTextSkewX(lua_State* L) {
+ SkLua(L).pushScalar(get_obj<SkPaint>(L, 1)->getTextSkewX());
+ return 1;
+}
+
+static int lpaint_setTextSize(lua_State* L) {
+ get_obj<SkPaint>(L, 1)->setTextSize(lua2scalar(L, 2));
+ return 0;
+}
+
+static int lpaint_getTypeface(lua_State* L) {
+ push_ref(L, get_obj<SkPaint>(L, 1)->getTypeface());
+ return 1;
+}
+
+static int lpaint_setTypeface(lua_State* L) {
+ get_obj<SkPaint>(L, 1)->setTypeface(sk_ref_sp(get_ref<SkTypeface>(L, 2)));
+ return 0;
+}
+
+static int lpaint_getHinting(lua_State* L) {
+ SkLua(L).pushU32(get_obj<SkPaint>(L, 1)->getHinting());
+ return 1;
+}
+
+static int lpaint_getFilterQuality(lua_State* L) {
+ SkLua(L).pushU32(get_obj<SkPaint>(L, 1)->getFilterQuality());
+ return 1;
+}
+
+static int lpaint_setFilterQuality(lua_State* L) {
+ int level = lua2int_def(L, 2, -1);
+ if (level >= 0 && level <= 3) {
+ get_obj<SkPaint>(L, 1)->setFilterQuality((SkFilterQuality)level);
+ }
+ return 0;
+}
+
+static int lpaint_getFontID(lua_State* L) {
+ SkTypeface* face = get_obj<SkPaint>(L, 1)->getTypeface();
+ SkLua(L).pushU32(SkTypeface::UniqueID(face));
+ return 1;
+}
+
+static const struct {
+ const char* fLabel;
+ SkPaint::Align fAlign;
+} gAlignRec[] = {
+ { "left", SkPaint::kLeft_Align },
+ { "center", SkPaint::kCenter_Align },
+ { "right", SkPaint::kRight_Align },
+};
+
+static int lpaint_getTextAlign(lua_State* L) {
+ SkPaint::Align align = get_obj<SkPaint>(L, 1)->getTextAlign();
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gAlignRec); ++i) {
+ if (gAlignRec[i].fAlign == align) {
+ lua_pushstring(L, gAlignRec[i].fLabel);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int lpaint_setTextAlign(lua_State* L) {
+ if (lua_isstring(L, 2)) {
+ size_t len;
+ const char* label = lua_tolstring(L, 2, &len);
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gAlignRec); ++i) {
+ if (!strcmp(gAlignRec[i].fLabel, label)) {
+ get_obj<SkPaint>(L, 1)->setTextAlign(gAlignRec[i].fAlign);
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static int lpaint_getStroke(lua_State* L) {
+ lua_pushboolean(L, SkPaint::kStroke_Style == get_obj<SkPaint>(L, 1)->getStyle());
+ return 1;
+}
+
+static int lpaint_setStroke(lua_State* L) {
+ SkPaint::Style style;
+
+ if (lua_toboolean(L, 2)) {
+ style = SkPaint::kStroke_Style;
+ } else {
+ style = SkPaint::kFill_Style;
+ }
+ get_obj<SkPaint>(L, 1)->setStyle(style);
+ return 0;
+}
+
+static int lpaint_getStrokeCap(lua_State* L) {
+ SkLua(L).pushU32(get_obj<SkPaint>(L, 1)->getStrokeCap());
+ return 1;
+}
+
+static int lpaint_getStrokeJoin(lua_State* L) {
+ SkLua(L).pushU32(get_obj<SkPaint>(L, 1)->getStrokeJoin());
+ return 1;
+}
+
+static int lpaint_getTextEncoding(lua_State* L) {
+ SkLua(L).pushU32(get_obj<SkPaint>(L, 1)->getTextEncoding());
+ return 1;
+}
+
+static int lpaint_getStrokeWidth(lua_State* L) {
+ SkLua(L).pushScalar(get_obj<SkPaint>(L, 1)->getStrokeWidth());
+ return 1;
+}
+
+static int lpaint_setStrokeWidth(lua_State* L) {
+ get_obj<SkPaint>(L, 1)->setStrokeWidth(lua2scalar(L, 2));
+ return 0;
+}
+
+static int lpaint_getStrokeMiter(lua_State* L) {
+ SkLua(L).pushScalar(get_obj<SkPaint>(L, 1)->getStrokeMiter());
+ return 1;
+}
+
+static int lpaint_measureText(lua_State* L) {
+ if (lua_isstring(L, 2)) {
+ size_t len;
+ const char* text = lua_tolstring(L, 2, &len);
+ SkLua(L).pushScalar(get_obj<SkPaint>(L, 1)->measureText(text, len));
+ return 1;
+ }
+ return 0;
+}
+
+struct FontMetrics {
+ SkScalar fTop; //!< The greatest distance above the baseline for any glyph (will be <= 0)
+ SkScalar fAscent; //!< The recommended distance above the baseline (will be <= 0)
+ SkScalar fDescent; //!< The recommended distance below the baseline (will be >= 0)
+ SkScalar fBottom; //!< The greatest distance below the baseline for any glyph (will be >= 0)
+ SkScalar fLeading; //!< The recommended distance to add between lines of text (will be >= 0)
+ SkScalar fAvgCharWidth; //!< the average charactor width (>= 0)
+ SkScalar fXMin; //!< The minimum bounding box x value for all glyphs
+ SkScalar fXMax; //!< The maximum bounding box x value for all glyphs
+ SkScalar fXHeight; //!< the height of an 'x' in px, or 0 if no 'x' in face
+};
+
+static int lpaint_getFontMetrics(lua_State* L) {
+ SkPaint::FontMetrics fm;
+ SkScalar height = get_obj<SkPaint>(L, 1)->getFontMetrics(&fm);
+
+ lua_newtable(L);
+ setfield_scalar(L, "top", fm.fTop);
+ setfield_scalar(L, "ascent", fm.fAscent);
+ setfield_scalar(L, "descent", fm.fDescent);
+ setfield_scalar(L, "bottom", fm.fBottom);
+ setfield_scalar(L, "leading", fm.fLeading);
+ SkLua(L).pushScalar(height);
+ return 2;
+}
+
+static int lpaint_getEffects(lua_State* L) {
+ const SkPaint* paint = get_obj<SkPaint>(L, 1);
+
+ lua_newtable(L);
+ setfield_bool_if(L, "looper", !!paint->getLooper());
+ setfield_bool_if(L, "pathEffect", !!paint->getPathEffect());
+ setfield_bool_if(L, "rasterizer", !!paint->getRasterizer());
+ setfield_bool_if(L, "maskFilter", !!paint->getMaskFilter());
+ setfield_bool_if(L, "shader", !!paint->getShader());
+ setfield_bool_if(L, "colorFilter", !!paint->getColorFilter());
+ setfield_bool_if(L, "imageFilter", !!paint->getImageFilter());
+ return 1;
+}
+
+static int lpaint_getColorFilter(lua_State* L) {
+ const SkPaint* paint = get_obj<SkPaint>(L, 1);
+ SkColorFilter* cf = paint->getColorFilter();
+ if (cf) {
+ push_ref(L, cf);
+ return 1;
+ }
+ return 0;
+}
+
+static int lpaint_setColorFilter(lua_State* L) {
+ SkPaint* paint = get_obj<SkPaint>(L, 1);
+ paint->setColorFilter(sk_ref_sp(get_ref<SkColorFilter>(L, 2)));
+ return 0;
+}
+
+static int lpaint_getImageFilter(lua_State* L) {
+ const SkPaint* paint = get_obj<SkPaint>(L, 1);
+ SkImageFilter* imf = paint->getImageFilter();
+ if (imf) {
+ push_ref(L, imf);
+ return 1;
+ }
+ return 0;
+}
+
+static int lpaint_setImageFilter(lua_State* L) {
+ SkPaint* paint = get_obj<SkPaint>(L, 1);
+ paint->setImageFilter(get_ref<SkImageFilter>(L, 2));
+ return 0;
+}
+
+static int lpaint_getShader(lua_State* L) {
+ const SkPaint* paint = get_obj<SkPaint>(L, 1);
+ SkShader* shader = paint->getShader();
+ if (shader) {
+ push_ref(L, shader);
+ return 1;
+ }
+ return 0;
+}
+
+static int lpaint_setShader(lua_State* L) {
+ SkPaint* paint = get_obj<SkPaint>(L, 1);
+ paint->setShader(sk_ref_sp(get_ref<SkShader>(L, 2)));
+ return 0;
+}
+
+static int lpaint_getPathEffect(lua_State* L) {
+ const SkPaint* paint = get_obj<SkPaint>(L, 1);
+ SkPathEffect* pe = paint->getPathEffect();
+ if (pe) {
+ push_ref(L, pe);
+ return 1;
+ }
+ return 0;
+}
+
+static int lpaint_getFillPath(lua_State* L) {
+ const SkPaint* paint = get_obj<SkPaint>(L, 1);
+ const SkPath* path = get_obj<SkPath>(L, 2);
+
+ SkPath fillpath;
+ paint->getFillPath(*path, &fillpath);
+
+ SkLua lua(L);
+ lua.pushPath(fillpath);
+
+ return 1;
+}
+
+static int lpaint_gc(lua_State* L) {
+ get_obj<SkPaint>(L, 1)->~SkPaint();
+ return 0;
+}
+
+static const struct luaL_Reg gSkPaint_Methods[] = {
+ { "isAntiAlias", lpaint_isAntiAlias },
+ { "setAntiAlias", lpaint_setAntiAlias },
+ { "isDither", lpaint_isDither },
+ { "setDither", lpaint_setDither },
+ { "getFilterQuality", lpaint_getFilterQuality },
+ { "setFilterQuality", lpaint_setFilterQuality },
+ { "isUnderlineText", lpaint_isUnderlineText },
+ { "isStrikeThruText", lpaint_isStrikeThruText },
+ { "isFakeBoldText", lpaint_isFakeBoldText },
+ { "isLinearText", lpaint_isLinearText },
+ { "isSubpixelText", lpaint_isSubpixelText },
+ { "setSubpixelText", lpaint_setSubpixelText },
+ { "isDevKernText", lpaint_isDevKernText },
+ { "isLCDRenderText", lpaint_isLCDRenderText },
+ { "setLCDRenderText", lpaint_setLCDRenderText },
+ { "isEmbeddedBitmapText", lpaint_isEmbeddedBitmapText },
+ { "isAutohinted", lpaint_isAutohinted },
+ { "isVerticalText", lpaint_isVerticalText },
+ { "getAlpha", lpaint_getAlpha },
+ { "setAlpha", lpaint_setAlpha },
+ { "getColor", lpaint_getColor },
+ { "setColor", lpaint_setColor },
+ { "getTextSize", lpaint_getTextSize },
+ { "setTextSize", lpaint_setTextSize },
+ { "getTextScaleX", lpaint_getTextScaleX },
+ { "getTextSkewX", lpaint_getTextSkewX },
+ { "getTypeface", lpaint_getTypeface },
+ { "setTypeface", lpaint_setTypeface },
+ { "getHinting", lpaint_getHinting },
+ { "getFontID", lpaint_getFontID },
+ { "getTextAlign", lpaint_getTextAlign },
+ { "setTextAlign", lpaint_setTextAlign },
+ { "getStroke", lpaint_getStroke },
+ { "setStroke", lpaint_setStroke },
+ { "getStrokeCap", lpaint_getStrokeCap },
+ { "getStrokeJoin", lpaint_getStrokeJoin },
+ { "getTextEncoding", lpaint_getTextEncoding },
+ { "getStrokeWidth", lpaint_getStrokeWidth },
+ { "setStrokeWidth", lpaint_setStrokeWidth },
+ { "getStrokeMiter", lpaint_getStrokeMiter },
+ { "measureText", lpaint_measureText },
+ { "getFontMetrics", lpaint_getFontMetrics },
+ { "getEffects", lpaint_getEffects },
+ { "getColorFilter", lpaint_getColorFilter },
+ { "setColorFilter", lpaint_setColorFilter },
+ { "getImageFilter", lpaint_getImageFilter },
+ { "setImageFilter", lpaint_setImageFilter },
+ { "getShader", lpaint_getShader },
+ { "setShader", lpaint_setShader },
+ { "getPathEffect", lpaint_getPathEffect },
+ { "getFillPath", lpaint_getFillPath },
+ { "__gc", lpaint_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static const char* mode2string(SkShader::TileMode mode) {
+ static const char* gNames[] = { "clamp", "repeat", "mirror" };
+ SkASSERT((unsigned)mode < SK_ARRAY_COUNT(gNames));
+ return gNames[mode];
+}
+
+static const char* gradtype2string(SkShader::GradientType t) {
+ static const char* gNames[] = {
+ "none", "color", "linear", "radial", "radial2", "sweep", "conical"
+ };
+ SkASSERT((unsigned)t < SK_ARRAY_COUNT(gNames));
+ return gNames[t];
+}
+
+static int lshader_isOpaque(lua_State* L) {
+ SkShader* shader = get_ref<SkShader>(L, 1);
+ return shader && shader->isOpaque();
+}
+
+static int lshader_isAImage(lua_State* L) {
+ SkShader* shader = get_ref<SkShader>(L, 1);
+ if (shader) {
+ SkMatrix matrix;
+ SkShader::TileMode modes[2];
+ if (SkImage* image = shader->isAImage(&matrix, modes)) {
+ lua_newtable(L);
+ setfield_number(L, "id", image->uniqueID());
+ setfield_number(L, "width", image->width());
+ setfield_number(L, "height", image->height());
+ setfield_string(L, "tileX", mode2string(modes[0]));
+ setfield_string(L, "tileY", mode2string(modes[1]));
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int lshader_asAGradient(lua_State* L) {
+ SkShader* shader = get_ref<SkShader>(L, 1);
+ if (shader) {
+ SkShader::GradientInfo info;
+ sk_bzero(&info, sizeof(info));
+
+ SkShader::GradientType t = shader->asAGradient(&info);
+
+ if (SkShader::kNone_GradientType != t) {
+ SkAutoTArray<SkScalar> pos(info.fColorCount);
+ info.fColorOffsets = pos.get();
+ shader->asAGradient(&info);
+
+ lua_newtable(L);
+ setfield_string(L, "type", gradtype2string(t));
+ setfield_string(L, "tile", mode2string(info.fTileMode));
+ setfield_number(L, "colorCount", info.fColorCount);
+
+ lua_newtable(L);
+ for (int i = 0; i < info.fColorCount; i++) {
+ // Lua uses 1-based indexing
+ setarray_scalar(L, i+1, pos[i]);
+ }
+ lua_setfield(L, -2, "positions");
+
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int lshader_gc(lua_State* L) {
+ get_ref<SkShader>(L, 1)->unref();
+ return 0;
+}
+
+static const struct luaL_Reg gSkShader_Methods[] = {
+ { "isOpaque", lshader_isOpaque },
+ { "isAImage", lshader_isAImage },
+ { "asAGradient", lshader_asAGradient },
+ { "__gc", lshader_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lpatheffect_asADash(lua_State* L) {
+ SkPathEffect* pe = get_ref<SkPathEffect>(L, 1);
+ if (pe) {
+ SkPathEffect::DashInfo info;
+ SkPathEffect::DashType dashType = pe->asADash(&info);
+ if (SkPathEffect::kDash_DashType == dashType) {
+ SkAutoTArray<SkScalar> intervals(info.fCount);
+ info.fIntervals = intervals.get();
+ pe->asADash(&info);
+ SkLua(L).pushDash(info);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int lpatheffect_gc(lua_State* L) {
+ get_ref<SkPathEffect>(L, 1)->unref();
+ return 0;
+}
+
+static const struct luaL_Reg gSkPathEffect_Methods[] = {
+ { "asADash", lpatheffect_asADash },
+ { "__gc", lpatheffect_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lpcolorfilter_gc(lua_State* L) {
+ get_ref<SkColorFilter>(L, 1)->unref();
+ return 0;
+}
+
+static const struct luaL_Reg gSkColorFilter_Methods[] = {
+ { "__gc", lpcolorfilter_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lpimagefilter_gc(lua_State* L) {
+ get_ref<SkImageFilter>(L, 1)->unref();
+ return 0;
+}
+
+static const struct luaL_Reg gSkImageFilter_Methods[] = {
+ { "__gc", lpimagefilter_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lmatrix_getType(lua_State* L) {
+ SkMatrix::TypeMask mask = get_obj<SkMatrix>(L, 1)->getType();
+
+ lua_newtable(L);
+ setfield_boolean(L, "translate", SkToBool(mask & SkMatrix::kTranslate_Mask));
+ setfield_boolean(L, "scale", SkToBool(mask & SkMatrix::kScale_Mask));
+ setfield_boolean(L, "affine", SkToBool(mask & SkMatrix::kAffine_Mask));
+ setfield_boolean(L, "perspective", SkToBool(mask & SkMatrix::kPerspective_Mask));
+ return 1;
+}
+
+static int lmatrix_getScaleX(lua_State* L) {
+ lua_pushnumber(L, get_obj<SkMatrix>(L,1)->getScaleX());
+ return 1;
+}
+
+static int lmatrix_getScaleY(lua_State* L) {
+ lua_pushnumber(L, get_obj<SkMatrix>(L,1)->getScaleY());
+ return 1;
+}
+
+static int lmatrix_getTranslateX(lua_State* L) {
+ lua_pushnumber(L, get_obj<SkMatrix>(L,1)->getTranslateX());
+ return 1;
+}
+
+static int lmatrix_getTranslateY(lua_State* L) {
+ lua_pushnumber(L, get_obj<SkMatrix>(L,1)->getTranslateY());
+ return 1;
+}
+
+static int lmatrix_invert(lua_State* L) {
+ lua_pushboolean(L, get_obj<SkMatrix>(L, 1)->invert(get_obj<SkMatrix>(L, 2)));
+ return 1;
+}
+
+static int lmatrix_mapXY(lua_State* L) {
+ SkPoint pt = { lua2scalar(L, 2), lua2scalar(L, 3) };
+ get_obj<SkMatrix>(L, 1)->mapPoints(&pt, &pt, 1);
+ lua_pushnumber(L, pt.x());
+ lua_pushnumber(L, pt.y());
+ return 2;
+}
+
+static int lmatrix_setRectToRect(lua_State* L) {
+ SkMatrix* matrix = get_obj<SkMatrix>(L, 1);
+ SkRect srcR, dstR;
+ lua2rect(L, 2, &srcR);
+ lua2rect(L, 3, &dstR);
+ const char* scaleToFitStr = lua_tostring(L, 4);
+ SkMatrix::ScaleToFit scaleToFit = SkMatrix::kFill_ScaleToFit;
+
+ if (scaleToFitStr) {
+ const struct {
+ const char* fName;
+ SkMatrix::ScaleToFit fScaleToFit;
+ } rec[] = {
+ { "fill", SkMatrix::kFill_ScaleToFit },
+ { "start", SkMatrix::kStart_ScaleToFit },
+ { "center", SkMatrix::kCenter_ScaleToFit },
+ { "end", SkMatrix::kEnd_ScaleToFit },
+ };
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(rec); ++i) {
+ if (strcmp(rec[i].fName, scaleToFitStr) == 0) {
+ scaleToFit = rec[i].fScaleToFit;
+ break;
+ }
+ }
+ }
+
+ matrix->setRectToRect(srcR, dstR, scaleToFit);
+ return 0;
+}
+
+static const struct luaL_Reg gSkMatrix_Methods[] = {
+ { "getType", lmatrix_getType },
+ { "getScaleX", lmatrix_getScaleX },
+ { "getScaleY", lmatrix_getScaleY },
+ { "getTranslateX", lmatrix_getTranslateX },
+ { "getTranslateY", lmatrix_getTranslateY },
+ { "setRectToRect", lmatrix_setRectToRect },
+ { "invert", lmatrix_invert },
+ { "mapXY", lmatrix_mapXY },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lpath_getBounds(lua_State* L) {
+ SkLua(L).pushRect(get_obj<SkPath>(L, 1)->getBounds());
+ return 1;
+}
+
+static const char* fill_type_to_str(SkPath::FillType fill) {
+ switch (fill) {
+ case SkPath::kEvenOdd_FillType:
+ return "even-odd";
+ case SkPath::kWinding_FillType:
+ return "winding";
+ case SkPath::kInverseEvenOdd_FillType:
+ return "inverse-even-odd";
+ case SkPath::kInverseWinding_FillType:
+ return "inverse-winding";
+ }
+ return "unknown";
+}
+
+static int lpath_getFillType(lua_State* L) {
+ SkPath::FillType fill = get_obj<SkPath>(L, 1)->getFillType();
+ SkLua(L).pushString(fill_type_to_str(fill));
+ return 1;
+}
+
+static SkString segment_masks_to_str(uint32_t segmentMasks) {
+ SkString result;
+ bool first = true;
+ if (SkPath::kLine_SegmentMask & segmentMasks) {
+ result.append("line");
+ first = false;
+ SkDEBUGCODE(segmentMasks &= ~SkPath::kLine_SegmentMask;)
+ }
+ if (SkPath::kQuad_SegmentMask & segmentMasks) {
+ if (!first) {
+ result.append(" ");
+ }
+ result.append("quad");
+ first = false;
+ SkDEBUGCODE(segmentMasks &= ~SkPath::kQuad_SegmentMask;)
+ }
+ if (SkPath::kConic_SegmentMask & segmentMasks) {
+ if (!first) {
+ result.append(" ");
+ }
+ result.append("conic");
+ first = false;
+ SkDEBUGCODE(segmentMasks &= ~SkPath::kConic_SegmentMask;)
+ }
+ if (SkPath::kCubic_SegmentMask & segmentMasks) {
+ if (!first) {
+ result.append(" ");
+ }
+ result.append("cubic");
+ SkDEBUGCODE(segmentMasks &= ~SkPath::kCubic_SegmentMask;)
+ }
+ SkASSERT(0 == segmentMasks);
+ return result;
+}
+
+static int lpath_getSegmentTypes(lua_State* L) {
+ uint32_t segMasks = get_obj<SkPath>(L, 1)->getSegmentMasks();
+ SkLua(L).pushString(segment_masks_to_str(segMasks));
+ return 1;
+}
+
+static int lpath_isConvex(lua_State* L) {
+ bool isConvex = SkPath::kConvex_Convexity == get_obj<SkPath>(L, 1)->getConvexity();
+ SkLua(L).pushBool(isConvex);
+ return 1;
+}
+
+static int lpath_isEmpty(lua_State* L) {
+ lua_pushboolean(L, get_obj<SkPath>(L, 1)->isEmpty());
+ return 1;
+}
+
+static int lpath_isRect(lua_State* L) {
+ SkRect r;
+ bool pred = get_obj<SkPath>(L, 1)->isRect(&r);
+ int ret_count = 1;
+ lua_pushboolean(L, pred);
+ if (pred) {
+ SkLua(L).pushRect(r);
+ ret_count += 1;
+ }
+ return ret_count;
+}
+
+static const char* dir2string(SkPath::Direction dir) {
+ static const char* gStr[] = {
+ "unknown", "cw", "ccw"
+ };
+ SkASSERT((unsigned)dir < SK_ARRAY_COUNT(gStr));
+ return gStr[dir];
+}
+
+static int lpath_isNestedFillRects(lua_State* L) {
+ SkRect rects[2];
+ SkPath::Direction dirs[2];
+ bool pred = get_obj<SkPath>(L, 1)->isNestedFillRects(rects, dirs);
+ int ret_count = 1;
+ lua_pushboolean(L, pred);
+ if (pred) {
+ SkLua lua(L);
+ lua.pushRect(rects[0]);
+ lua.pushRect(rects[1]);
+ lua_pushstring(L, dir2string(dirs[0]));
+ lua_pushstring(L, dir2string(dirs[0]));
+ ret_count += 4;
+ }
+ return ret_count;
+}
+
+static int lpath_countPoints(lua_State* L) {
+ lua_pushinteger(L, get_obj<SkPath>(L, 1)->countPoints());
+ return 1;
+}
+
+static int lpath_getVerbs(lua_State* L) {
+ const SkPath* path = get_obj<SkPath>(L, 1);
+ SkPath::Iter iter(*path, false);
+ SkPoint pts[4];
+
+ lua_newtable(L);
+
+ bool done = false;
+ int i = 0;
+ do {
+ switch (iter.next(pts, true)) {
+ case SkPath::kMove_Verb:
+ setarray_string(L, ++i, "move");
+ break;
+ case SkPath::kClose_Verb:
+ setarray_string(L, ++i, "close");
+ break;
+ case SkPath::kLine_Verb:
+ setarray_string(L, ++i, "line");
+ break;
+ case SkPath::kQuad_Verb:
+ setarray_string(L, ++i, "quad");
+ break;
+ case SkPath::kConic_Verb:
+ setarray_string(L, ++i, "conic");
+ break;
+ case SkPath::kCubic_Verb:
+ setarray_string(L, ++i, "cubic");
+ break;
+ case SkPath::kDone_Verb:
+ setarray_string(L, ++i, "done");
+ done = true;
+ break;
+ }
+ } while (!done);
+
+ return 1;
+}
+
+static int lpath_reset(lua_State* L) {
+ get_obj<SkPath>(L, 1)->reset();
+ return 0;
+}
+
+static int lpath_moveTo(lua_State* L) {
+ get_obj<SkPath>(L, 1)->moveTo(lua2scalar(L, 2), lua2scalar(L, 3));
+ return 0;
+}
+
+static int lpath_lineTo(lua_State* L) {
+ get_obj<SkPath>(L, 1)->lineTo(lua2scalar(L, 2), lua2scalar(L, 3));
+ return 0;
+}
+
+static int lpath_quadTo(lua_State* L) {
+ get_obj<SkPath>(L, 1)->quadTo(lua2scalar(L, 2), lua2scalar(L, 3),
+ lua2scalar(L, 4), lua2scalar(L, 5));
+ return 0;
+}
+
+static int lpath_cubicTo(lua_State* L) {
+ get_obj<SkPath>(L, 1)->cubicTo(lua2scalar(L, 2), lua2scalar(L, 3),
+ lua2scalar(L, 4), lua2scalar(L, 5),
+ lua2scalar(L, 6), lua2scalar(L, 7));
+ return 0;
+}
+
+static int lpath_close(lua_State* L) {
+ get_obj<SkPath>(L, 1)->close();
+ return 0;
+}
+
+static int lpath_gc(lua_State* L) {
+ get_obj<SkPath>(L, 1)->~SkPath();
+ return 0;
+}
+
+static const struct luaL_Reg gSkPath_Methods[] = {
+ { "getBounds", lpath_getBounds },
+ { "getFillType", lpath_getFillType },
+ { "getSegmentTypes", lpath_getSegmentTypes },
+ { "getVerbs", lpath_getVerbs },
+ { "isConvex", lpath_isConvex },
+ { "isEmpty", lpath_isEmpty },
+ { "isRect", lpath_isRect },
+ { "isNestedFillRects", lpath_isNestedFillRects },
+ { "countPoints", lpath_countPoints },
+ { "reset", lpath_reset },
+ { "moveTo", lpath_moveTo },
+ { "lineTo", lpath_lineTo },
+ { "quadTo", lpath_quadTo },
+ { "cubicTo", lpath_cubicTo },
+ { "close", lpath_close },
+ { "__gc", lpath_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static const char* rrect_type(const SkRRect& rr) {
+ switch (rr.getType()) {
+ case SkRRect::kEmpty_Type: return "empty";
+ case SkRRect::kRect_Type: return "rect";
+ case SkRRect::kOval_Type: return "oval";
+ case SkRRect::kSimple_Type: return "simple";
+ case SkRRect::kNinePatch_Type: return "nine-patch";
+ case SkRRect::kComplex_Type: return "complex";
+ }
+ SkDEBUGFAIL("never get here");
+ return "";
+}
+
+static int lrrect_rect(lua_State* L) {
+ SkLua(L).pushRect(get_obj<SkRRect>(L, 1)->rect());
+ return 1;
+}
+
+static int lrrect_type(lua_State* L) {
+ lua_pushstring(L, rrect_type(*get_obj<SkRRect>(L, 1)));
+ return 1;
+}
+
+static int lrrect_radii(lua_State* L) {
+ int corner = SkToInt(lua_tointeger(L, 2));
+ SkVector v;
+ if (corner < 0 || corner > 3) {
+ SkDebugf("bad corner index %d", corner);
+ v.set(0, 0);
+ } else {
+ v = get_obj<SkRRect>(L, 1)->radii((SkRRect::Corner)corner);
+ }
+ lua_pushnumber(L, v.fX);
+ lua_pushnumber(L, v.fY);
+ return 2;
+}
+
+static int lrrect_gc(lua_State* L) {
+ get_obj<SkRRect>(L, 1)->~SkRRect();
+ return 0;
+}
+
+static const struct luaL_Reg gSkRRect_Methods[] = {
+ { "rect", lrrect_rect },
+ { "type", lrrect_type },
+ { "radii", lrrect_radii },
+ { "__gc", lrrect_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int limage_width(lua_State* L) {
+ lua_pushinteger(L, get_ref<SkImage>(L, 1)->width());
+ return 1;
+}
+
+static int limage_height(lua_State* L) {
+ lua_pushinteger(L, get_ref<SkImage>(L, 1)->height());
+ return 1;
+}
+
+static int limage_newShader(lua_State* L) {
+ SkShader::TileMode tmode = SkShader::kClamp_TileMode;
+ const SkMatrix* localM = nullptr;
+ push_ref(L, get_ref<SkImage>(L, 1)->makeShader(tmode, tmode, localM));
+ return 1;
+}
+
+static int limage_gc(lua_State* L) {
+ get_ref<SkImage>(L, 1)->unref();
+ return 0;
+}
+
+static const struct luaL_Reg gSkImage_Methods[] = {
+ { "width", limage_width },
+ { "height", limage_height },
+ { "newShader", limage_newShader },
+ { "__gc", limage_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lsurface_width(lua_State* L) {
+ lua_pushinteger(L, get_ref<SkSurface>(L, 1)->width());
+ return 1;
+}
+
+static int lsurface_height(lua_State* L) {
+ lua_pushinteger(L, get_ref<SkSurface>(L, 1)->height());
+ return 1;
+}
+
+static int lsurface_getCanvas(lua_State* L) {
+ SkCanvas* canvas = get_ref<SkSurface>(L, 1)->getCanvas();
+ if (nullptr == canvas) {
+ lua_pushnil(L);
+ } else {
+ push_ref(L, canvas);
+ // note: we don't unref canvas, since getCanvas did not ref it.
+ // warning: this is weird: now Lua owns a ref on this canvas, but what if they let
+ // the real owner (the surface) go away, but still hold onto the canvas?
+ // *really* we want to sort of ref the surface again, but have the native object
+ // know that it is supposed to be treated as a canvas...
+ }
+ return 1;
+}
+
+static int lsurface_newImageSnapshot(lua_State* L) {
+ sk_sp<SkImage> image = get_ref<SkSurface>(L, 1)->makeImageSnapshot();
+ if (!image) {
+ lua_pushnil(L);
+ } else {
+ push_ref(L, image);
+ }
+ return 1;
+}
+
+static int lsurface_newSurface(lua_State* L) {
+ int width = lua2int_def(L, 2, 0);
+ int height = lua2int_def(L, 3, 0);
+ SkImageInfo info = SkImageInfo::MakeN32Premul(width, height);
+ auto surface = get_ref<SkSurface>(L, 1)->makeSurface(info);
+ if (nullptr == surface) {
+ lua_pushnil(L);
+ } else {
+ push_ref(L, surface);
+ }
+ return 1;
+}
+
+static int lsurface_gc(lua_State* L) {
+ get_ref<SkSurface>(L, 1)->unref();
+ return 0;
+}
+
+static const struct luaL_Reg gSkSurface_Methods[] = {
+ { "width", lsurface_width },
+ { "height", lsurface_height },
+ { "getCanvas", lsurface_getCanvas },
+ { "newImageSnapshot", lsurface_newImageSnapshot },
+ { "newSurface", lsurface_newSurface },
+ { "__gc", lsurface_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lpicturerecorder_beginRecording(lua_State* L) {
+ const SkScalar w = lua2scalar_def(L, 2, -1);
+ const SkScalar h = lua2scalar_def(L, 3, -1);
+ if (w <= 0 || h <= 0) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ SkCanvas* canvas = get_obj<SkPictureRecorder>(L, 1)->beginRecording(w, h);
+ if (nullptr == canvas) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ push_ref(L, canvas);
+ return 1;
+}
+
+static int lpicturerecorder_getCanvas(lua_State* L) {
+ SkCanvas* canvas = get_obj<SkPictureRecorder>(L, 1)->getRecordingCanvas();
+ if (nullptr == canvas) {
+ lua_pushnil(L);
+ return 1;
+ }
+ push_ref(L, canvas);
+ return 1;
+}
+
+static int lpicturerecorder_endRecording(lua_State* L) {
+ sk_sp<SkPicture> pic = get_obj<SkPictureRecorder>(L, 1)->finishRecordingAsPicture();
+ if (!pic) {
+ lua_pushnil(L);
+ return 1;
+ }
+ push_ref(L, std::move(pic));
+ return 1;
+}
+
+static int lpicturerecorder_gc(lua_State* L) {
+ get_obj<SkPictureRecorder>(L, 1)->~SkPictureRecorder();
+ return 0;
+}
+
+static const struct luaL_Reg gSkPictureRecorder_Methods[] = {
+ { "beginRecording", lpicturerecorder_beginRecording },
+ { "getCanvas", lpicturerecorder_getCanvas },
+ { "endRecording", lpicturerecorder_endRecording },
+ { "__gc", lpicturerecorder_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lpicture_width(lua_State* L) {
+ lua_pushnumber(L, get_ref<SkPicture>(L, 1)->cullRect().width());
+ return 1;
+}
+
+static int lpicture_height(lua_State* L) {
+ lua_pushnumber(L, get_ref<SkPicture>(L, 1)->cullRect().height());
+ return 1;
+}
+
+static int lpicture_gc(lua_State* L) {
+ get_ref<SkPicture>(L, 1)->unref();
+ return 0;
+}
+
+static const struct luaL_Reg gSkPicture_Methods[] = {
+ { "width", lpicture_width },
+ { "height", lpicture_height },
+ { "__gc", lpicture_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int ltextblob_bounds(lua_State* L) {
+ SkLua(L).pushRect(get_ref<SkTextBlob>(L, 1)->bounds());
+ return 1;
+}
+
+static int ltextblob_gc(lua_State* L) {
+ SkSafeUnref(get_ref<SkTextBlob>(L, 1));
+ return 0;
+}
+
+static const struct luaL_Reg gSkTextBlob_Methods[] = {
+ { "bounds", ltextblob_bounds },
+ { "__gc", ltextblob_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int ltypeface_getFamilyName(lua_State* L) {
+ SkString str;
+ get_ref<SkTypeface>(L, 1)->getFamilyName(&str);
+ lua_pushstring(L, str.c_str());
+ return 1;
+}
+
+static int ltypeface_getStyle(lua_State* L) {
+ lua_pushnumber(L, (double)get_ref<SkTypeface>(L, 1)->style());
+ return 1;
+}
+
+static int ltypeface_gc(lua_State* L) {
+ SkSafeUnref(get_ref<SkTypeface>(L, 1));
+ return 0;
+}
+
+static const struct luaL_Reg gSkTypeface_Methods[] = {
+ { "getFamilyName", ltypeface_getFamilyName },
+ { "getStyle", ltypeface_getStyle },
+ { "__gc", ltypeface_gc },
+ { nullptr, nullptr }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class AutoCallLua {
+public:
+ AutoCallLua(lua_State* L, const char func[], const char verb[]) : fL(L) {
+ lua_getglobal(L, func);
+ if (!lua_isfunction(L, -1)) {
+ int t = lua_type(L, -1);
+ SkDebugf("--- expected function %d\n", t);
+ }
+
+ lua_newtable(L);
+ setfield_string(L, "verb", verb);
+ }
+
+ ~AutoCallLua() {
+ if (lua_pcall(fL, 1, 0, 0) != LUA_OK) {
+ SkDebugf("lua err: %s\n", lua_tostring(fL, -1));
+ }
+ lua_settop(fL, -1);
+ }
+
+private:
+ lua_State* fL;
+};
+
+#define AUTO_LUA(verb) AutoCallLua acl(fL, fFunc.c_str(), verb)
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int lsk_newDocumentPDF(lua_State* L) {
+ const char* file = nullptr;
+ if (lua_gettop(L) > 0 && lua_isstring(L, 1)) {
+ file = lua_tolstring(L, 1, nullptr);
+ }
+
+ sk_sp<SkDocument> doc = SkDocument::MakePDF(file);
+ if (nullptr == doc) {
+ // do I need to push a nil on the stack and return 1?
+ return 0;
+ } else {
+ push_ref(L, std::move(doc));
+ return 1;
+ }
+}
+
+static int lsk_newBlurImageFilter(lua_State* L) {
+ SkScalar sigmaX = lua2scalar_def(L, 1, 0);
+ SkScalar sigmaY = lua2scalar_def(L, 2, 0);
+ sk_sp<SkImageFilter> imf(SkBlurImageFilter::Make(sigmaX, sigmaY, nullptr));
+ if (!imf) {
+ lua_pushnil(L);
+ } else {
+ push_ref(L, std::move(imf));
+ }
+ return 1;
+}
+
+static int lsk_newLinearGradient(lua_State* L) {
+ SkScalar x0 = lua2scalar_def(L, 1, 0);
+ SkScalar y0 = lua2scalar_def(L, 2, 0);
+ SkColor c0 = lua2color(L, 3);
+ SkScalar x1 = lua2scalar_def(L, 4, 0);
+ SkScalar y1 = lua2scalar_def(L, 5, 0);
+ SkColor c1 = lua2color(L, 6);
+
+ SkPoint pts[] = { { x0, y0 }, { x1, y1 } };
+ SkColor colors[] = { c0, c1 };
+ sk_sp<SkShader> s(SkGradientShader::MakeLinear(pts, colors, nullptr, 2,
+ SkShader::kClamp_TileMode));
+ if (!s) {
+ lua_pushnil(L);
+ } else {
+ push_ref(L, std::move(s));
+ }
+ return 1;
+}
+
+static int lsk_newMatrix(lua_State* L) {
+ push_new<SkMatrix>(L)->reset();
+ return 1;
+}
+
+static int lsk_newPaint(lua_State* L) {
+ push_new<SkPaint>(L);
+ return 1;
+}
+
+static int lsk_newPath(lua_State* L) {
+ push_new<SkPath>(L);
+ return 1;
+}
+
+static int lsk_newPictureRecorder(lua_State* L) {
+ push_new<SkPictureRecorder>(L);
+ return 1;
+}
+
+static int lsk_newRRect(lua_State* L) {
+ push_new<SkRRect>(L)->setEmpty();
+ return 1;
+}
+
+#include "SkTextBox.h"
+// Sk.newTextBlob(text, rect, paint)
+static int lsk_newTextBlob(lua_State* L) {
+ const char* text = lua_tolstring(L, 1, nullptr);
+ SkRect bounds;
+ lua2rect(L, 2, &bounds);
+ const SkPaint& paint = *get_obj<SkPaint>(L, 3);
+
+ SkTextBox box;
+ box.setMode(SkTextBox::kLineBreak_Mode);
+ box.setBox(bounds);
+ box.setText(text, strlen(text), paint);
+
+ SkScalar newBottom;
+ push_ref<SkTextBlob>(L, box.snapshotTextBlob(&newBottom));
+ SkLua(L).pushScalar(newBottom);
+ return 2;
+}
+
+static int lsk_newTypeface(lua_State* L) {
+ const char* name = nullptr;
+ int style = SkTypeface::kNormal;
+
+ int count = lua_gettop(L);
+ if (count > 0 && lua_isstring(L, 1)) {
+ name = lua_tolstring(L, 1, nullptr);
+ if (count > 1 && lua_isnumber(L, 2)) {
+ style = lua_tointegerx(L, 2, nullptr) & SkTypeface::kBoldItalic;
+ }
+ }
+
+ sk_sp<SkTypeface> face(SkTypeface::MakeFromName(name, SkFontStyle::FromOldStyle(style)));
+// SkDebugf("---- name <%s> style=%d, face=%p ref=%d\n", name, style, face, face->getRefCnt());
+ if (nullptr == face) {
+ face = SkTypeface::MakeDefault();
+ }
+ push_ref(L, std::move(face));
+ return 1;
+}
+
+static int lsk_newRasterSurface(lua_State* L) {
+ int width = lua2int_def(L, 1, 0);
+ int height = lua2int_def(L, 2, 0);
+ SkImageInfo info = SkImageInfo::MakeN32Premul(width, height);
+ SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
+ auto surface = SkSurface::MakeRaster(info, &props);
+ if (nullptr == surface) {
+ lua_pushnil(L);
+ } else {
+ push_ref(L, surface);
+ }
+ return 1;
+}
+
+static int lsk_loadImage(lua_State* L) {
+ if (lua_gettop(L) > 0 && lua_isstring(L, 1)) {
+ const char* name = lua_tolstring(L, 1, nullptr);
+ sk_sp<SkData> data(SkData::MakeFromFileName(name));
+ if (data) {
+ auto image = SkImage::MakeFromEncoded(std::move(data));
+ if (image) {
+ push_ref(L, std::move(image));
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+static void register_Sk(lua_State* L) {
+ lua_newtable(L);
+ lua_pushvalue(L, -1);
+ lua_setglobal(L, "Sk");
+ // the Sk table is still on top
+
+ setfield_function(L, "newDocumentPDF", lsk_newDocumentPDF);
+ setfield_function(L, "loadImage", lsk_loadImage);
+ setfield_function(L, "newBlurImageFilter", lsk_newBlurImageFilter);
+ setfield_function(L, "newLinearGradient", lsk_newLinearGradient);
+ setfield_function(L, "newMatrix", lsk_newMatrix);
+ setfield_function(L, "newPaint", lsk_newPaint);
+ setfield_function(L, "newPath", lsk_newPath);
+ setfield_function(L, "newPictureRecorder", lsk_newPictureRecorder);
+ setfield_function(L, "newRRect", lsk_newRRect);
+ setfield_function(L, "newRasterSurface", lsk_newRasterSurface);
+ setfield_function(L, "newTextBlob", lsk_newTextBlob);
+ setfield_function(L, "newTypeface", lsk_newTypeface);
+ lua_pop(L, 1); // pop off the Sk table
+}
+
+#define REG_CLASS(L, C) \
+ do { \
+ luaL_newmetatable(L, get_mtname<C>()); \
+ lua_pushvalue(L, -1); \
+ lua_setfield(L, -2, "__index"); \
+ luaL_setfuncs(L, g##C##_Methods, 0); \
+ lua_pop(L, 1); /* pop off the meta-table */ \
+ } while (0)
+
+void SkLua::Load(lua_State* L) {
+ register_Sk(L);
+ REG_CLASS(L, SkCanvas);
+ REG_CLASS(L, SkColorFilter);
+ REG_CLASS(L, SkDocument);
+ REG_CLASS(L, SkImage);
+ REG_CLASS(L, SkImageFilter);
+ REG_CLASS(L, SkMatrix);
+ REG_CLASS(L, SkPaint);
+ REG_CLASS(L, SkPath);
+ REG_CLASS(L, SkPathEffect);
+ REG_CLASS(L, SkPicture);
+ REG_CLASS(L, SkPictureRecorder);
+ REG_CLASS(L, SkRRect);
+ REG_CLASS(L, SkShader);
+ REG_CLASS(L, SkSurface);
+ REG_CLASS(L, SkTextBlob);
+ REG_CLASS(L, SkTypeface);
+}
+
+extern "C" int luaopen_skia(lua_State* L);
+extern "C" int luaopen_skia(lua_State* L) {
+ SkLua::Load(L);
+ return 0;
+}
diff --git a/gfx/skia/skia/src/utils/SkLuaCanvas.cpp b/gfx/skia/skia/src/utils/SkLuaCanvas.cpp
new file mode 100644
index 000000000..6b769f0ba
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkLuaCanvas.cpp
@@ -0,0 +1,317 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkLuaCanvas.h"
+#include "SkLua.h"
+
+extern "C" {
+ #include "lua.h"
+ #include "lauxlib.h"
+}
+
+class AutoCallLua : public SkLua {
+public:
+ AutoCallLua(lua_State* L, const char func[], const char verb[]) : INHERITED(L) {
+ lua_getglobal(L, func);
+ if (!lua_isfunction(L, -1)) {
+ int t = lua_type(L, -1);
+ SkDebugf("--- expected function %d\n", t);
+ }
+
+ lua_newtable(L);
+ this->pushString(verb, "verb");
+ }
+
+ ~AutoCallLua() {
+ lua_State* L = this->get();
+ if (lua_pcall(L, 1, 0, 0) != LUA_OK) {
+ SkDebugf("lua err: %s\n", lua_tostring(L, -1));
+ }
+ lua_settop(L, -1);
+ }
+
+ void pushEncodedText(SkPaint::TextEncoding, const void*, size_t);
+
+private:
+ typedef SkLua INHERITED;
+};
+
+#define AUTO_LUA(verb) AutoCallLua lua(fL, fFunc.c_str(), verb)
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+void AutoCallLua::pushEncodedText(SkPaint::TextEncoding enc, const void* text,
+ size_t length) {
+ switch (enc) {
+ case SkPaint::kUTF8_TextEncoding:
+ this->pushString((const char*)text, length, "text");
+ break;
+ case SkPaint::kUTF16_TextEncoding: {
+ SkString str;
+ str.setUTF16((const uint16_t*)text, length);
+ this->pushString(str, "text");
+ } break;
+ case SkPaint::kGlyphID_TextEncoding:
+ this->pushArrayU16((const uint16_t*)text, SkToInt(length >> 1),
+ "glyphs");
+ break;
+ case SkPaint::kUTF32_TextEncoding:
+ break;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkLuaCanvas::pushThis() {
+ SkLua(fL).pushCanvas(this);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkLuaCanvas::SkLuaCanvas(int width, int height, lua_State* L, const char func[])
+ : INHERITED(width, height)
+ , fL(L)
+ , fFunc(func) {
+}
+
+SkLuaCanvas::~SkLuaCanvas() {}
+
+void SkLuaCanvas::willSave() {
+ AUTO_LUA("save");
+ this->INHERITED::willSave();
+}
+
+SkCanvas::SaveLayerStrategy SkLuaCanvas::getSaveLayerStrategy(const SaveLayerRec& rec) {
+ AUTO_LUA("saveLayer");
+ if (rec.fBounds) {
+ lua.pushRect(*rec.fBounds, "bounds");
+ }
+ if (rec.fPaint) {
+ lua.pushPaint(*rec.fPaint, "paint");
+ }
+
+ (void)this->INHERITED::getSaveLayerStrategy(rec);
+ // No need for a layer.
+ return kNoLayer_SaveLayerStrategy;
+}
+
+void SkLuaCanvas::willRestore() {
+ AUTO_LUA("restore");
+ this->INHERITED::willRestore();
+}
+
+void SkLuaCanvas::didConcat(const SkMatrix& matrix) {
+ switch (matrix.getType()) {
+ case SkMatrix::kTranslate_Mask: {
+ AUTO_LUA("translate");
+ lua.pushScalar(matrix.getTranslateX(), "dx");
+ lua.pushScalar(matrix.getTranslateY(), "dy");
+ break;
+ }
+ case SkMatrix::kScale_Mask: {
+ AUTO_LUA("scale");
+ lua.pushScalar(matrix.getScaleX(), "sx");
+ lua.pushScalar(matrix.getScaleY(), "sy");
+ break;
+ }
+ default: {
+ AUTO_LUA("concat");
+ // pushMatrix added in https://codereview.chromium.org/203203004/
+ // Doesn't seem to have ever been working correctly since added
+ // lua.pushMatrix(matrix);
+ break;
+ }
+ }
+
+ this->INHERITED::didConcat(matrix);
+}
+
+void SkLuaCanvas::didSetMatrix(const SkMatrix& matrix) {
+ this->INHERITED::didSetMatrix(matrix);
+}
+
+void SkLuaCanvas::onClipRect(const SkRect& r, ClipOp op, ClipEdgeStyle edgeStyle) {
+ AUTO_LUA("clipRect");
+ lua.pushRect(r, "rect");
+ lua.pushBool(kSoft_ClipEdgeStyle == edgeStyle, "aa");
+ this->INHERITED::onClipRect(r, op, edgeStyle);
+}
+
+void SkLuaCanvas::onClipRRect(const SkRRect& rrect, ClipOp op, ClipEdgeStyle edgeStyle) {
+ AUTO_LUA("clipRRect");
+ lua.pushRRect(rrect, "rrect");
+ lua.pushBool(kSoft_ClipEdgeStyle == edgeStyle, "aa");
+ this->INHERITED::onClipRRect(rrect, op, edgeStyle);
+}
+
+void SkLuaCanvas::onClipPath(const SkPath& path, ClipOp op, ClipEdgeStyle edgeStyle) {
+ AUTO_LUA("clipPath");
+ lua.pushPath(path, "path");
+ lua.pushBool(kSoft_ClipEdgeStyle == edgeStyle, "aa");
+ this->INHERITED::onClipPath(path, op, edgeStyle);
+}
+
+void SkLuaCanvas::onClipRegion(const SkRegion& deviceRgn, ClipOp op) {
+ AUTO_LUA("clipRegion");
+ this->INHERITED::onClipRegion(deviceRgn, op);
+}
+
+void SkLuaCanvas::onDrawPaint(const SkPaint& paint) {
+ AUTO_LUA("drawPaint");
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawPoints(PointMode mode, size_t count,
+ const SkPoint pts[], const SkPaint& paint) {
+ AUTO_LUA("drawPoints");
+ lua.pushArrayPoint(pts, SkToInt(count), "points");
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawOval(const SkRect& rect, const SkPaint& paint) {
+ AUTO_LUA("drawOval");
+ lua.pushRect(rect, "rect");
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawArc(const SkRect& rect, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) {
+ AUTO_LUA("drawArc");
+ lua.pushRect(rect, "rect");
+ lua.pushScalar(startAngle, "startAngle");
+ lua.pushScalar(sweepAngle, "sweepAngle");
+ lua.pushBool(useCenter, "useCenter");
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawRect(const SkRect& rect, const SkPaint& paint) {
+ AUTO_LUA("drawRect");
+ lua.pushRect(rect, "rect");
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ AUTO_LUA("drawRRect");
+ lua.pushRRect(rrect, "rrect");
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkPaint& paint) {
+ AUTO_LUA("drawDRRect");
+ lua.pushRRect(outer, "outer");
+ lua.pushRRect(inner, "inner");
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ AUTO_LUA("drawPath");
+ lua.pushPath(path, "path");
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawBitmap(const SkBitmap& bitmap, SkScalar x, SkScalar y,
+ const SkPaint* paint) {
+ AUTO_LUA("drawBitmap");
+ if (paint) {
+ lua.pushPaint(*paint, "paint");
+ }
+}
+
+void SkLuaCanvas::onDrawBitmapRect(const SkBitmap& bitmap, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint) {
+ AUTO_LUA("drawBitmapRect");
+ if (paint) {
+ lua.pushPaint(*paint, "paint");
+ }
+}
+
+void SkLuaCanvas::onDrawBitmapNine(const SkBitmap& bitmap, const SkIRect& center, const SkRect& dst,
+ const SkPaint* paint) {
+ AUTO_LUA("drawBitmapNine");
+ if (paint) {
+ lua.pushPaint(*paint, "paint");
+ }
+}
+
+void SkLuaCanvas::onDrawImage(const SkImage* image, SkScalar x, SkScalar y, const SkPaint* paint) {
+ AUTO_LUA("drawImage");
+ if (paint) {
+ lua.pushPaint(*paint, "paint");
+ }
+}
+
+void SkLuaCanvas::onDrawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint) {
+ AUTO_LUA("drawImageRect");
+ if (paint) {
+ lua.pushPaint(*paint, "paint");
+ }
+}
+
+void SkLuaCanvas::onDrawText(const void* text, size_t byteLength, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ AUTO_LUA("drawText");
+ lua.pushEncodedText(paint.getTextEncoding(), text, byteLength);
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawPosText(const void* text, size_t byteLength, const SkPoint pos[],
+ const SkPaint& paint) {
+ AUTO_LUA("drawPosText");
+ lua.pushEncodedText(paint.getTextEncoding(), text, byteLength);
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawPosTextH(const void* text, size_t byteLength, const SkScalar xpos[],
+ SkScalar constY, const SkPaint& paint) {
+ AUTO_LUA("drawPosTextH");
+ lua.pushEncodedText(paint.getTextEncoding(), text, byteLength);
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawTextOnPath(const void* text, size_t byteLength, const SkPath& path,
+ const SkMatrix* matrix, const SkPaint& paint) {
+ AUTO_LUA("drawTextOnPath");
+ lua.pushPath(path, "path");
+ lua.pushEncodedText(paint.getTextEncoding(), text, byteLength);
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawTextRSXform(const void* text, size_t byteLength, const SkRSXform xform[],
+ const SkRect* cull, const SkPaint& paint) {
+ AUTO_LUA("drawTextRSXform");
+ lua.pushEncodedText(paint.getTextEncoding(), text, byteLength);
+ // TODO: export other params
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawTextBlob(const SkTextBlob *blob, SkScalar x, SkScalar y,
+ const SkPaint &paint) {
+ AUTO_LUA("drawTextBlob");
+ lua.pushTextBlob(blob, "blob");
+ lua.pushScalar(x, "x");
+ lua.pushScalar(y, "y");
+ lua.pushPaint(paint, "paint");
+}
+
+void SkLuaCanvas::onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint) {
+ AUTO_LUA("drawPicture");
+ // call through so we can see the nested picture ops
+ this->INHERITED::onDrawPicture(picture, matrix, paint);
+}
+
+void SkLuaCanvas::onDrawVertices(VertexMode vmode, int vertexCount,
+ const SkPoint vertices[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint) {
+ AUTO_LUA("drawVertices");
+ lua.pushPaint(paint, "paint");
+}
diff --git a/gfx/skia/skia/src/utils/SkMatrix22.cpp b/gfx/skia/skia/src/utils/SkMatrix22.cpp
new file mode 100644
index 000000000..a13b72939
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkMatrix22.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMatrix.h"
+#include "SkMatrix22.h"
+#include "SkPoint.h"
+#include "SkScalar.h"
+
+void SkComputeGivensRotation(const SkVector& h, SkMatrix* G) {
+ const SkScalar& a = h.fX;
+ const SkScalar& b = h.fY;
+ SkScalar c, s;
+ if (0 == b) {
+ c = SkScalarCopySign(SK_Scalar1, a);
+ s = 0;
+ //r = SkScalarAbs(a);
+ } else if (0 == a) {
+ c = 0;
+ s = -SkScalarCopySign(SK_Scalar1, b);
+ //r = SkScalarAbs(b);
+ } else if (SkScalarAbs(b) > SkScalarAbs(a)) {
+ SkScalar t = a / b;
+ SkScalar u = SkScalarCopySign(SkScalarSqrt(SK_Scalar1 + t*t), b);
+ s = -SK_Scalar1 / u;
+ c = -s * t;
+ //r = b * u;
+ } else {
+ SkScalar t = b / a;
+ SkScalar u = SkScalarCopySign(SkScalarSqrt(SK_Scalar1 + t*t), a);
+ c = SK_Scalar1 / u;
+ s = -c * t;
+ //r = a * u;
+ }
+
+ G->setSinCos(s, c);
+}
diff --git a/gfx/skia/skia/src/utils/SkMatrix22.h b/gfx/skia/skia/src/utils/SkMatrix22.h
new file mode 100644
index 000000000..bc567eab8
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkMatrix22.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMatrix22_DEFINED
+#define SkMatrix22_DEFINED
+
+#include "SkPoint.h"
+
+class SkMatrix;
+
+/** Find the Givens matrix G, which is the rotational matrix
+ * that rotates the vector h to the positive hoizontal axis.
+ * G * h = [hypot(h), 0]
+ *
+ * This is equivalent to
+ *
+ * SkScalar r = h.length();
+ * SkScalar r_inv = r ? SkScalarInvert(r) : 0;
+ * h.scale(r_inv);
+ * G->setSinCos(-h.fY, h.fX);
+ *
+ * but has better numerical stability by using (partial) hypot,
+ * and saves a multiply by not computing r.
+ */
+void SkComputeGivensRotation(const SkVector& h, SkMatrix* G);
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkMeshUtils.cpp b/gfx/skia/skia/src/utils/SkMeshUtils.cpp
new file mode 100644
index 000000000..27eccc250
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkMeshUtils.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkMeshUtils.h"
+#include "SkCanvas.h"
+#include "SkPaint.h"
+
+SkMeshIndices::SkMeshIndices() {
+ sk_bzero(this, sizeof(*this));
+}
+
+SkMeshIndices::~SkMeshIndices() {
+ sk_free(fStorage);
+}
+
+bool SkMeshIndices::init(SkPoint tex[], uint16_t indices[],
+ int texW, int texH, int rows, int cols) {
+ if (rows < 2 || cols < 2) {
+ sk_free(fStorage);
+ fStorage = nullptr;
+ fTex = nullptr;
+ fIndices = nullptr;
+ fTexCount = fIndexCount = 0;
+ return false;
+ }
+
+ sk_free(fStorage);
+ fStorage = nullptr;
+
+ fTexCount = rows * cols;
+ rows -= 1;
+ cols -= 1;
+ fIndexCount = rows * cols * 6;
+
+ if (tex) {
+ fTex = tex;
+ fIndices = indices;
+ } else {
+ fStorage = sk_malloc_throw(fTexCount * sizeof(SkPoint) +
+ fIndexCount * sizeof(uint16_t));
+ fTex = (SkPoint*)fStorage;
+ fIndices = (uint16_t*)(fTex + fTexCount);
+ }
+
+ // compute the indices
+ {
+ uint16_t* idx = fIndices;
+ int index = 0;
+ for (int y = 0; y < cols; y++) {
+ for (int x = 0; x < rows; x++) {
+ *idx++ = index;
+ *idx++ = index + rows + 1;
+ *idx++ = index + 1;
+
+ *idx++ = index + 1;
+ *idx++ = index + rows + 1;
+ *idx++ = index + rows + 2;
+
+ index += 1;
+ }
+ index += 1;
+ }
+ }
+
+ // compute texture coordinates
+ {
+ SkPoint* tex = fTex;
+ const SkScalar dx = SkIntToScalar(texW) / rows;
+ const SkScalar dy = SkIntToScalar(texH) / cols;
+ for (int y = 0; y <= cols; y++) {
+ for (int x = 0; x <= rows; x++) {
+ tex->set(x*dx, y*dy);
+ tex += 1;
+ }
+ }
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkShader.h"
+
+void SkMeshUtils::Draw(SkCanvas* canvas, const SkBitmap& bitmap,
+ int rows, int cols, const SkPoint verts[],
+ const SkColor colors[], const SkPaint& paint) {
+ SkMeshIndices idx;
+
+ if (idx.init(bitmap.width(), bitmap.height(), rows, cols)) {
+ SkPaint p(paint);
+ p.setShader(SkShader::MakeBitmapShader(bitmap,
+ SkShader::kClamp_TileMode,
+ SkShader::kClamp_TileMode));
+ canvas->drawVertices(SkCanvas::kTriangles_VertexMode,
+ rows * cols, verts, idx.tex(), colors, nullptr,
+ idx.indices(), idx.indexCount(), p);
+ }
+}
diff --git a/gfx/skia/skia/src/utils/SkMultiPictureDocument.cpp b/gfx/skia/skia/src/utils/SkMultiPictureDocument.cpp
new file mode 100644
index 000000000..217e7a300
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkMultiPictureDocument.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMultiPictureDocument.h"
+#include "SkMultiPictureDocumentPriv.h"
+#include "SkPicture.h"
+#include "SkPictureRecorder.h"
+#include "SkStream.h"
+#include "SkTArray.h"
+
+/*
+ File format:
+ BEGINNING_OF_FILE:
+ kMagic
+ uint32_t version_number (==2)
+ uint32_t page_count
+ {
+ float sizeX
+ float sizeY
+ } * page_count
+ skp file
+*/
+
+namespace {
+static SkCanvas* trim(SkCanvas* canvas,
+ SkScalar w, SkScalar h,
+ const SkRect& trimBox) {
+ // Only trim if necessary.
+ if (trimBox != SkRect::MakeWH(w, h)) {
+ // All SkDocument implementations implement trimBox using a
+ // clip+translate.
+ canvas->clipRect(trimBox);
+ canvas->translate(trimBox.x(), trimBox.y());
+ }
+ return canvas;
+}
+
+struct MultiPictureDocument final : public SkDocument {
+ SkPictureRecorder fPictureRecorder;
+ SkSize fCurrentPageSize;
+ SkTArray<sk_sp<SkPicture>> fPages;
+ SkTArray<SkSize> fSizes;
+ MultiPictureDocument(SkWStream* s, void (*d)(SkWStream*, bool))
+ : SkDocument(s, d) {}
+ ~MultiPictureDocument() { this->close(); }
+
+ SkCanvas* onBeginPage(SkScalar w, SkScalar h, const SkRect& c) override {
+ fCurrentPageSize.set(w, h);
+ return trim(fPictureRecorder.beginRecording(w, h), w, h, c);
+ }
+ void onEndPage() override {
+ fSizes.push_back(fCurrentPageSize);
+ fPages.push_back(fPictureRecorder.finishRecordingAsPicture());
+ }
+ void onClose(SkWStream* wStream) override {
+ SkASSERT(wStream);
+ SkASSERT(wStream->bytesWritten() == 0);
+ wStream->writeText(SkMultiPictureDocumentProtocol::kMagic);
+ wStream->write32(SkMultiPictureDocumentProtocol::kVersion);
+ wStream->write32(SkToU32(fPages.count()));
+ for (SkSize s : fSizes) {
+ wStream->write(&s, sizeof(s));
+ }
+ SkSize bigsize = SkMultiPictureDocumentProtocol::Join(fSizes);
+ SkCanvas* c = fPictureRecorder.beginRecording(SkRect::MakeSize(bigsize));
+ for (const sk_sp<SkPicture>& page : fPages) {
+ c->drawPicture(page);
+ c->drawAnnotation(SkRect::MakeEmpty(),
+ SkMultiPictureDocumentProtocol::kEndPage,
+ nullptr);
+ }
+ sk_sp<SkPicture> p = fPictureRecorder.finishRecordingAsPicture();
+ p->serialize(wStream);
+ fPages.reset();
+ fSizes.reset();
+ return;
+ }
+ void onAbort() override {
+ fPages.reset();
+ fSizes.reset();
+ }
+};
+}
+
+sk_sp<SkDocument> SkMakeMultiPictureDocument(SkWStream* wStream) {
+ return sk_make_sp<MultiPictureDocument>(wStream, nullptr);
+}
diff --git a/gfx/skia/skia/src/utils/SkMultiPictureDocument.h b/gfx/skia/skia/src/utils/SkMultiPictureDocument.h
new file mode 100644
index 000000000..ac782606f
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkMultiPictureDocument.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkMultiPictureDocument_DEFINED
+#define SkMultiPictureDocument_DEFINED
+
+/*
+ This format is not intended to be used in production.
+
+ For clients looking for a way to represent a document in memory,
+
+ struct Doc {
+ std::vector<sk_sp<SkPicture>> fPages;
+ std::vector<SkSize> fPageSizes;
+ };
+
+ or
+
+ struct Page {
+ sk_sp<SkPicture> fPage;
+ SkSize fPageSize;
+ };
+ std::vector<Page> pages;
+
+ would work much better.
+
+ Multi-SkPicture (MSKP) files are still useful for debugging and
+ testing.
+
+ The downsides of this format are currently:
+ - no way to extract a single page; must read the entire file at once.
+ - must use `dm` to convert to another format before passing into
+ standard skp tools.
+ - `dm` can extract the first page to skp, but no others.
+
+ TODO(halcanary): replace with somthing that addresses these issues.
+ */
+
+#include "SkDocument.h"
+
+/** Writes into an experimental, undocumented file format that is
+ useful for debugging documents printed via Skia. */
+SK_API sk_sp<SkDocument> SkMakeMultiPictureDocument(SkWStream* dst);
+
+#endif // SkMultiPictureDocument_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkMultiPictureDocumentPriv.h b/gfx/skia/skia/src/utils/SkMultiPictureDocumentPriv.h
new file mode 100644
index 000000000..6d5ab47d2
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkMultiPictureDocumentPriv.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMultiPictureDocumentPriv_DEFINED
+#define SkMultiPictureDocumentPriv_DEFINED
+
+#include "SkTArray.h"
+#include "SkSize.h"
+
+namespace SkMultiPictureDocumentProtocol {
+static constexpr char kMagic[] = "Skia Multi-Picture Doc\n\n";
+
+static constexpr char kEndPage[] = "SkMultiPictureEndPage";
+
+const uint32_t kVersion = 2;
+
+inline SkSize Join(const SkTArray<SkSize>& sizes) {
+ SkSize joined = SkSize::Make(0, 0);
+ for (SkSize s : sizes) {
+ joined = SkSize::Make(SkTMax(joined.width(), s.width()),
+ SkTMax(joined.height(), s.height()));
+ }
+ return joined;
+}
+
+}
+
+#endif // SkMultiPictureDocumentPriv_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkMultiPictureDocumentReader.cpp b/gfx/skia/skia/src/utils/SkMultiPictureDocumentReader.cpp
new file mode 100644
index 000000000..3924f3eb4
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkMultiPictureDocumentReader.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkMultiPictureDocumentPriv.h"
+#include "SkMultiPictureDocumentReader.h"
+#include "SkPicture.h"
+#include "SkStream.h"
+#include "SkPictureRecorder.h"
+#include "SkNWayCanvas.h"
+
+bool SkMultiPictureDocumentReader::init(SkStreamSeekable* stream) {
+ if (!stream) {
+ return false;
+ }
+ stream->seek(0);
+ const size_t size = sizeof(SkMultiPictureDocumentProtocol::kMagic) - 1;
+ char buffer[size];
+ if (size != stream->read(buffer, size) ||
+ 0 != memcmp(SkMultiPictureDocumentProtocol::kMagic, buffer, size)) {
+ stream = nullptr;
+ return false;
+ }
+ bool good = true;
+ uint32_t versionNumber = stream->readU32();
+ if (versionNumber != SkMultiPictureDocumentProtocol::kVersion) {
+ return false;
+ }
+ uint32_t pageCount = stream->readU32();
+ fSizes.reset(pageCount);
+ for (uint32_t i = 0; i < pageCount; ++i) {
+ SkSize size;
+ good &= sizeof(size) == stream->read(&size, sizeof(size));
+ fSizes[i] = size;
+ }
+ fOffset = stream->getPosition();
+ return good;
+}
+
+namespace {
+struct PagerCanvas : public SkNWayCanvas {
+ SkPictureRecorder fRecorder;
+ const SkTArray<SkSize>* fSizes;
+ SkTArray<sk_sp<SkPicture>>* fDest;
+ PagerCanvas(SkISize wh,
+ const SkTArray<SkSize>* s,
+ SkTArray<sk_sp<SkPicture>>* d)
+ : SkNWayCanvas(wh.width(), wh.height()), fSizes(s), fDest(d) {
+ this->nextCanvas();
+ }
+ void nextCanvas() {
+ int i = fDest->count();
+ if (i < fSizes->count()) {
+ SkRect bounds = SkRect::MakeSize((*fSizes)[i]);
+ this->addCanvas(fRecorder.beginRecording(bounds));
+ }
+ }
+ void onDrawAnnotation(const SkRect& r, const char* key, SkData* d) override {
+ if (0 == strcmp(key, SkMultiPictureDocumentProtocol::kEndPage)) {
+ this->removeAll();
+ if (fRecorder.getRecordingCanvas()) {
+ fDest->emplace_back(fRecorder.finishRecordingAsPicture());
+ }
+ this->nextCanvas();
+ } else {
+ this->SkNWayCanvas::onDrawAnnotation(r, key, d);
+ }
+ }
+};
+} // namespace
+
+sk_sp<SkPicture> SkMultiPictureDocumentReader::readPage(SkStreamSeekable* stream,
+ int pageNumber) const {
+ SkASSERT(pageNumber >= 0);
+ SkASSERT(pageNumber < fSizes.count());
+ if (0 == fPages.count()) {
+ stream->seek(fOffset); // jump to beginning of skp
+ auto picture = SkPicture::MakeFromStream(stream);
+ SkISize size = SkMultiPictureDocumentProtocol::Join(fSizes).toCeil();
+ PagerCanvas canvas(size, &fSizes, &this->fPages);
+ // Must call playback(), not drawPicture() to reach
+ // PagerCanvas::onDrawAnnotation().
+ picture->playback(&canvas);
+ if (fPages.count() != fSizes.count()) {
+ SkDEBUGF(("Malformed SkMultiPictureDocument\n"));
+ }
+ }
+ // Allow for malformed document.
+ return pageNumber < fPages.count() ? fPages[pageNumber] : nullptr;
+}
diff --git a/gfx/skia/skia/src/utils/SkMultiPictureDocumentReader.h b/gfx/skia/skia/src/utils/SkMultiPictureDocumentReader.h
new file mode 100644
index 000000000..e0473a653
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkMultiPictureDocumentReader.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkMultiPictureDocumentReader_DEFINED
+#define SkMultiPictureDocumentReader_DEFINED
+
+#include "../private/SkTArray.h"
+#include "SkPicture.h"
+#include "SkSize.h"
+#include "SkStream.h"
+
+/** A lightweight helper class for reading a Skia MultiPictureDocument. */
+class SkMultiPictureDocumentReader {
+public:
+ /** Initialize the MultiPictureDocument. Does not take ownership
+ of the SkStreamSeekable. */
+ bool init(SkStreamSeekable*);
+
+ /** Return to factory settings. */
+ void reset() {
+ fSizes.reset();
+ fPages.reset();
+ }
+
+ /** Call this after calling init() (otherwise you'll always get zero). */
+ int pageCount() const { return fSizes.count(); }
+
+ /** Deserialize a page from the stream. Call init() first. The
+ SkStreamSeekable doesn't need to be the same object, but
+ should point to the same information as before. */
+ sk_sp<SkPicture> readPage(SkStreamSeekable*, int) const;
+
+ /** Fetch the size of the given page, without deserializing the
+ entire page. */
+ SkSize pageSize(int i) const { return fSizes[i]; }
+
+private:
+ SkTArray<SkSize> fSizes;
+ size_t fOffset;
+ mutable SkTArray<sk_sp<SkPicture>> fPages;
+};
+
+#endif // SkMultiPictureDocumentReader_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkNWayCanvas.cpp b/gfx/skia/skia/src/utils/SkNWayCanvas.cpp
new file mode 100644
index 000000000..e19e7a8e5
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkNWayCanvas.cpp
@@ -0,0 +1,325 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkNWayCanvas.h"
+
+SkNWayCanvas::SkNWayCanvas(int width, int height)
+ : INHERITED(width, height) {}
+
+SkNWayCanvas::~SkNWayCanvas() {
+ this->removeAll();
+}
+
+void SkNWayCanvas::addCanvas(SkCanvas* canvas) {
+ if (canvas) {
+ canvas->ref();
+ *fList.append() = canvas;
+ }
+}
+
+void SkNWayCanvas::removeCanvas(SkCanvas* canvas) {
+ int index = fList.find(canvas);
+ if (index >= 0) {
+ canvas->unref();
+ fList.removeShuffle(index);
+ }
+}
+
+void SkNWayCanvas::removeAll() {
+ fList.unrefAll();
+ fList.reset();
+}
+
+///////////////////////////////////////////////////////////////////////////
+// These are forwarded to the N canvases we're referencing
+
+class SkNWayCanvas::Iter {
+public:
+ Iter(const SkTDArray<SkCanvas*>& list) : fList(list) {
+ fIndex = 0;
+ }
+ bool next() {
+ if (fIndex < fList.count()) {
+ fCanvas = fList[fIndex++];
+ return true;
+ }
+ return false;
+ }
+ SkCanvas* operator->() { return fCanvas; }
+
+private:
+ const SkTDArray<SkCanvas*>& fList;
+ int fIndex;
+ SkCanvas* fCanvas;
+};
+
+void SkNWayCanvas::willSave() {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->save();
+ }
+
+ this->INHERITED::willSave();
+}
+
+SkCanvas::SaveLayerStrategy SkNWayCanvas::getSaveLayerStrategy(const SaveLayerRec& rec) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->saveLayer(rec);
+ }
+
+ this->INHERITED::getSaveLayerStrategy(rec);
+ // No need for a layer.
+ return kNoLayer_SaveLayerStrategy;
+}
+
+void SkNWayCanvas::willRestore() {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->restore();
+ }
+ this->INHERITED::willRestore();
+}
+
+void SkNWayCanvas::didConcat(const SkMatrix& matrix) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->concat(matrix);
+ }
+ this->INHERITED::didConcat(matrix);
+}
+
+void SkNWayCanvas::didSetMatrix(const SkMatrix& matrix) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->setMatrix(matrix);
+ }
+ this->INHERITED::didSetMatrix(matrix);
+}
+
+void SkNWayCanvas::onClipRect(const SkRect& rect, ClipOp op, ClipEdgeStyle edgeStyle) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->clipRect(rect, op, kSoft_ClipEdgeStyle == edgeStyle);
+ }
+ this->INHERITED::onClipRect(rect, op, edgeStyle);
+}
+
+void SkNWayCanvas::onClipRRect(const SkRRect& rrect, ClipOp op, ClipEdgeStyle edgeStyle) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->clipRRect(rrect, op, kSoft_ClipEdgeStyle == edgeStyle);
+ }
+ this->INHERITED::onClipRRect(rrect, op, edgeStyle);
+}
+
+void SkNWayCanvas::onClipPath(const SkPath& path, ClipOp op, ClipEdgeStyle edgeStyle) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->clipPath(path, op, kSoft_ClipEdgeStyle == edgeStyle);
+ }
+ this->INHERITED::onClipPath(path, op, edgeStyle);
+}
+
+void SkNWayCanvas::onClipRegion(const SkRegion& deviceRgn, ClipOp op) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->clipRegion(deviceRgn, op);
+ }
+ this->INHERITED::onClipRegion(deviceRgn, op);
+}
+
+void SkNWayCanvas::onDrawPaint(const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawPaint(paint);
+ }
+}
+
+void SkNWayCanvas::onDrawPoints(PointMode mode, size_t count, const SkPoint pts[],
+ const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawPoints(mode, count, pts, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawRect(const SkRect& rect, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawRect(rect, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawOval(const SkRect& rect, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawOval(rect, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawArc(const SkRect& rect, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawArc(rect, startAngle, sweepAngle, useCenter, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawRRect(rrect, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawDRRect(outer, inner, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawPath(path, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawBitmap(const SkBitmap& bitmap, SkScalar x, SkScalar y,
+ const SkPaint* paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawBitmap(bitmap, x, y, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawBitmapRect(const SkBitmap& bitmap, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->legacy_drawBitmapRect(bitmap, src, dst, paint, (SrcRectConstraint)constraint);
+ }
+}
+
+void SkNWayCanvas::onDrawBitmapNine(const SkBitmap& bitmap, const SkIRect& center,
+ const SkRect& dst, const SkPaint* paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawBitmapNine(bitmap, center, dst, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawImage(const SkImage* image, SkScalar left, SkScalar top,
+ const SkPaint* paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawImage(image, left, top, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->legacy_drawImageRect(image, src, dst, paint, constraint);
+ }
+}
+
+void SkNWayCanvas::onDrawText(const void* text, size_t byteLength, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawText(text, byteLength, x, y, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawPosText(const void* text, size_t byteLength, const SkPoint pos[],
+ const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawPosText(text, byteLength, pos, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawPosTextH(const void* text, size_t byteLength, const SkScalar xpos[],
+ SkScalar constY, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawPosTextH(text, byteLength, xpos, constY, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawTextOnPath(const void* text, size_t byteLength, const SkPath& path,
+ const SkMatrix* matrix, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawTextOnPath(text, byteLength, path, matrix, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawTextRSXform(const void* text, size_t byteLength, const SkRSXform xform[],
+ const SkRect* cull, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawTextRSXform(text, byteLength, xform, cull, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint &paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawTextBlob(blob, x, y, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawPicture(picture, matrix, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawVertices(VertexMode vmode, int vertexCount,
+ const SkPoint vertices[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawVertices(vmode, vertexCount, vertices, texs, colors, xmode,
+ indices, indexCount, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkXfermode* xmode,
+ const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawPatch(cubics, colors, texCoords, xmode, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawAnnotation(const SkRect& rect, const char key[], SkData* data) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawAnnotation(rect, key, data);
+ }
+}
+
+#ifdef SK_SUPPORT_LEGACY_DRAWFILTER
+SkDrawFilter* SkNWayCanvas::setDrawFilter(SkDrawFilter* filter) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->setDrawFilter(filter);
+ }
+ return this->INHERITED::setDrawFilter(filter);
+}
+#endif
diff --git a/gfx/skia/skia/src/utils/SkNullCanvas.cpp b/gfx/skia/skia/src/utils/SkNullCanvas.cpp
new file mode 100644
index 000000000..b5ee8d30a
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkNullCanvas.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkNullCanvas.h"
+
+#include "SkCanvas.h"
+#include "SkNWayCanvas.h"
+
+
+SkCanvas* SkCreateNullCanvas() {
+ // An N-Way canvas forwards calls to N canvas's. When N == 0 it's
+ // effectively a null canvas.
+ return new SkNWayCanvas(0, 0);
+}
diff --git a/gfx/skia/skia/src/utils/SkOSFile.cpp b/gfx/skia/skia/src/utils/SkOSFile.cpp
new file mode 100644
index 000000000..a0c003ba8
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkOSFile.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkOSFile.h"
+
+SkString SkOSPath::Join(const char *rootPath, const char *relativePath) {
+ SkString result(rootPath);
+ if (!result.endsWith(SkPATH_SEPARATOR) && !result.isEmpty()) {
+ result.appendUnichar(SkPATH_SEPARATOR);
+ }
+ result.append(relativePath);
+ return result;
+}
+
+SkString SkOSPath::Basename(const char* fullPath) {
+ if (!fullPath) {
+ return SkString();
+ }
+ const char* filename = strrchr(fullPath, SkPATH_SEPARATOR);
+ if (nullptr == filename) {
+ filename = fullPath;
+ } else {
+ ++filename;
+ }
+ return SkString(filename);
+}
+
+SkString SkOSPath::Dirname(const char* fullPath) {
+ if (!fullPath) {
+ return SkString();
+ }
+ const char* end = strrchr(fullPath, SkPATH_SEPARATOR);
+ if (nullptr == end) {
+ return SkString();
+ }
+ if (end == fullPath) {
+ SkASSERT(fullPath[0] == SkPATH_SEPARATOR);
+ ++end;
+ }
+ return SkString(fullPath, end - fullPath);
+}
diff --git a/gfx/skia/skia/src/utils/SkPaintFilterCanvas.cpp b/gfx/skia/skia/src/utils/SkPaintFilterCanvas.cpp
new file mode 100644
index 000000000..15d76d612
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkPaintFilterCanvas.cpp
@@ -0,0 +1,230 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPaintFilterCanvas.h"
+
+#include "SkPaint.h"
+#include "SkTLazy.h"
+
+class SkPaintFilterCanvas::AutoPaintFilter {
+public:
+ AutoPaintFilter(const SkPaintFilterCanvas* canvas, Type type, const SkPaint* paint)
+ : fPaint(paint) {
+ fShouldDraw = canvas->onFilter(&fPaint, type);
+ }
+
+ AutoPaintFilter(const SkPaintFilterCanvas* canvas, Type type, const SkPaint& paint)
+ : AutoPaintFilter(canvas, type, &paint) { }
+
+ const SkPaint* paint() const { return fPaint; }
+
+ bool shouldDraw() const { return fShouldDraw; }
+
+private:
+ SkTCopyOnFirstWrite<SkPaint> fPaint;
+ bool fShouldDraw;
+};
+
+SkPaintFilterCanvas::SkPaintFilterCanvas(int width, int height) : INHERITED(width, height) { }
+
+SkPaintFilterCanvas::SkPaintFilterCanvas(SkCanvas *canvas)
+ : INHERITED(canvas->imageInfo().width(), canvas->imageInfo().height()) {
+
+ // Transfer matrix & clip state before adding the target canvas.
+ SkIRect devClip;
+ canvas->getClipDeviceBounds(&devClip);
+ this->clipRect(SkRect::Make(devClip));
+ this->setMatrix(canvas->getTotalMatrix());
+
+ this->addCanvas(canvas);
+}
+
+void SkPaintFilterCanvas::onDrawPaint(const SkPaint& paint) {
+ AutoPaintFilter apf(this, kPaint_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawPaint(*apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawPoints(PointMode mode, size_t count, const SkPoint pts[],
+ const SkPaint& paint) {
+ AutoPaintFilter apf(this, kPoint_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawPoints(mode, count, pts, *apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawRect(const SkRect& rect, const SkPaint& paint) {
+ AutoPaintFilter apf(this, kRect_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawRect(rect, *apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ AutoPaintFilter apf(this, kRRect_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawRRect(rrect, *apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkPaint& paint) {
+ AutoPaintFilter apf(this, kDRRect_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawDRRect(outer, inner, *apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawOval(const SkRect& rect, const SkPaint& paint) {
+ AutoPaintFilter apf(this, kOval_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawOval(rect, *apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawArc(const SkRect& rect, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) {
+ AutoPaintFilter apf(this, kArc_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawArc(rect, startAngle, sweepAngle, useCenter, *apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ AutoPaintFilter apf(this, kPath_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawPath(path, *apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawBitmap(const SkBitmap& bm, SkScalar left, SkScalar top,
+ const SkPaint* paint) {
+ AutoPaintFilter apf(this, kBitmap_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawBitmap(bm, left, top, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawBitmapRect(const SkBitmap& bm, const SkRect* src, const SkRect& dst,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ AutoPaintFilter apf(this, kBitmap_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawBitmapRect(bm, src, dst, apf.paint(), constraint);
+ }
+}
+
+void SkPaintFilterCanvas::onDrawBitmapNine(const SkBitmap& bm, const SkIRect& center,
+ const SkRect& dst, const SkPaint* paint) {
+ AutoPaintFilter apf(this, kBitmap_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawBitmapNine(bm, center, dst, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawImage(const SkImage* image, SkScalar left, SkScalar top,
+ const SkPaint* paint) {
+ AutoPaintFilter apf(this, kBitmap_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawImage(image, left, top, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawImageRect(const SkImage* image, const SkRect* src,
+ const SkRect& dst, const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ AutoPaintFilter apf(this, kBitmap_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawImageRect(image, src, dst, apf.paint(), constraint);
+ }
+}
+
+void SkPaintFilterCanvas::onDrawImageNine(const SkImage* image, const SkIRect& center,
+ const SkRect& dst, const SkPaint* paint) {
+ AutoPaintFilter apf(this, kBitmap_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawImageNine(image, center, dst, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawVertices(VertexMode vmode, int vertexCount,
+ const SkPoint vertices[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint) {
+ AutoPaintFilter apf(this, kVertices_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawVertices(vmode, vertexCount, vertices, texs, colors, xmode, indices,
+ indexCount, *apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawPatch(const SkPoint cubics[], const SkColor colors[],
+ const SkPoint texCoords[], SkXfermode* xmode,
+ const SkPaint& paint) {
+ AutoPaintFilter apf(this, kPatch_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawPatch(cubics, colors, texCoords, xmode, *apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawPicture(const SkPicture* picture, const SkMatrix* m,
+ const SkPaint* paint) {
+ AutoPaintFilter apf(this, kPicture_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawPicture(picture, m, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawText(const void* text, size_t byteLength, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ AutoPaintFilter apf(this, kText_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawText(text, byteLength, x, y, *apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawPosText(const void* text, size_t byteLength, const SkPoint pos[],
+ const SkPaint& paint) {
+ AutoPaintFilter apf(this, kText_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawPosText(text, byteLength, pos, *apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawPosTextH(const void* text, size_t byteLength, const SkScalar xpos[],
+ SkScalar constY, const SkPaint& paint) {
+ AutoPaintFilter apf(this, kText_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawPosTextH(text, byteLength, xpos, constY, *apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawTextOnPath(const void* text, size_t byteLength, const SkPath& path,
+ const SkMatrix* matrix, const SkPaint& paint) {
+ AutoPaintFilter apf(this, kText_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawTextOnPath(text, byteLength, path, matrix, *apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawTextRSXform(const void* text, size_t byteLength,
+ const SkRSXform xform[], const SkRect* cull,
+ const SkPaint& paint) {
+ AutoPaintFilter apf(this, kText_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawTextRSXform(text, byteLength, xform, cull, *apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ AutoPaintFilter apf(this, kTextBlob_Type, paint);
+ if (apf.shouldDraw()) {
+ this->INHERITED::onDrawTextBlob(blob, x, y, *apf.paint());
+ }
+}
diff --git a/gfx/skia/skia/src/utils/SkParse.cpp b/gfx/skia/skia/src/utils/SkParse.cpp
new file mode 100644
index 000000000..90c9fffe4
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkParse.cpp
@@ -0,0 +1,296 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkParse.h"
+
+#include <stdlib.h>
+
+static inline bool is_between(int c, int min, int max)
+{
+ return (unsigned)(c - min) <= (unsigned)(max - min);
+}
+
+static inline bool is_ws(int c)
+{
+ return is_between(c, 1, 32);
+}
+
+static inline bool is_digit(int c)
+{
+ return is_between(c, '0', '9');
+}
+
+static inline bool is_sep(int c)
+{
+ return is_ws(c) || c == ',' || c == ';';
+}
+
+static int to_hex(int c)
+{
+ if (is_digit(c))
+ return c - '0';
+
+ c |= 0x20; // make us lower-case
+ if (is_between(c, 'a', 'f'))
+ return c + 10 - 'a';
+ else
+ return -1;
+}
+
+static inline bool is_hex(int c)
+{
+ return to_hex(c) >= 0;
+}
+
+static const char* skip_ws(const char str[])
+{
+ SkASSERT(str);
+ while (is_ws(*str))
+ str++;
+ return str;
+}
+
+static const char* skip_sep(const char str[])
+{
+ SkASSERT(str);
+ while (is_sep(*str))
+ str++;
+ return str;
+}
+
+int SkParse::Count(const char str[])
+{
+ char c;
+ int count = 0;
+ goto skipLeading;
+ do {
+ count++;
+ do {
+ if ((c = *str++) == '\0')
+ goto goHome;
+ } while (is_sep(c) == false);
+skipLeading:
+ do {
+ if ((c = *str++) == '\0')
+ goto goHome;
+ } while (is_sep(c));
+ } while (true);
+goHome:
+ return count;
+}
+
+int SkParse::Count(const char str[], char separator)
+{
+ char c;
+ int count = 0;
+ goto skipLeading;
+ do {
+ count++;
+ do {
+ if ((c = *str++) == '\0')
+ goto goHome;
+ } while (c != separator);
+skipLeading:
+ do {
+ if ((c = *str++) == '\0')
+ goto goHome;
+ } while (c == separator);
+ } while (true);
+goHome:
+ return count;
+}
+
+const char* SkParse::FindHex(const char str[], uint32_t* value)
+{
+ SkASSERT(str);
+ str = skip_ws(str);
+
+ if (!is_hex(*str))
+ return nullptr;
+
+ uint32_t n = 0;
+ int max_digits = 8;
+ int digit;
+
+ while ((digit = to_hex(*str)) >= 0)
+ {
+ if (--max_digits < 0)
+ return nullptr;
+ n = (n << 4) | digit;
+ str += 1;
+ }
+
+ if (*str == 0 || is_ws(*str))
+ {
+ if (value)
+ *value = n;
+ return str;
+ }
+ return nullptr;
+}
+
+const char* SkParse::FindS32(const char str[], int32_t* value)
+{
+ SkASSERT(str);
+ str = skip_ws(str);
+
+ int sign = 0;
+ if (*str == '-')
+ {
+ sign = -1;
+ str += 1;
+ }
+
+ if (!is_digit(*str))
+ return nullptr;
+
+ int n = 0;
+ while (is_digit(*str))
+ {
+ n = 10*n + *str - '0';
+ str += 1;
+ }
+ if (value)
+ *value = (n ^ sign) - sign;
+ return str;
+}
+
+const char* SkParse::FindMSec(const char str[], SkMSec* value)
+{
+ SkASSERT(str);
+ str = skip_ws(str);
+
+ int sign = 0;
+ if (*str == '-')
+ {
+ sign = -1;
+ str += 1;
+ }
+
+ if (!is_digit(*str))
+ return nullptr;
+
+ int n = 0;
+ while (is_digit(*str))
+ {
+ n = 10*n + *str - '0';
+ str += 1;
+ }
+ int remaining10s = 3;
+ if (*str == '.') {
+ str++;
+ while (is_digit(*str))
+ {
+ n = 10*n + *str - '0';
+ str += 1;
+ if (--remaining10s == 0)
+ break;
+ }
+ }
+ while (--remaining10s >= 0)
+ n *= 10;
+ if (value)
+ *value = (n ^ sign) - sign;
+ return str;
+}
+
+const char* SkParse::FindScalar(const char str[], SkScalar* value) {
+ SkASSERT(str);
+ str = skip_ws(str);
+
+ char* stop;
+ float v = (float)strtod(str, &stop);
+ if (str == stop) {
+ return nullptr;
+ }
+ if (value) {
+ *value = v;
+ }
+ return stop;
+}
+
+const char* SkParse::FindScalars(const char str[], SkScalar value[], int count)
+{
+ SkASSERT(count >= 0);
+
+ if (count > 0)
+ {
+ for (;;)
+ {
+ str = SkParse::FindScalar(str, value);
+ if (--count == 0 || str == nullptr)
+ break;
+
+ // keep going
+ str = skip_sep(str);
+ if (value)
+ value += 1;
+ }
+ }
+ return str;
+}
+
+static bool lookup_str(const char str[], const char** table, int count)
+{
+ while (--count >= 0)
+ if (!strcmp(str, table[count]))
+ return true;
+ return false;
+}
+
+bool SkParse::FindBool(const char str[], bool* value)
+{
+ static const char* gYes[] = { "yes", "1", "true" };
+ static const char* gNo[] = { "no", "0", "false" };
+
+ if (lookup_str(str, gYes, SK_ARRAY_COUNT(gYes)))
+ {
+ if (value) *value = true;
+ return true;
+ }
+ else if (lookup_str(str, gNo, SK_ARRAY_COUNT(gNo)))
+ {
+ if (value) *value = false;
+ return true;
+ }
+ return false;
+}
+
+int SkParse::FindList(const char target[], const char list[])
+{
+ size_t len = strlen(target);
+ int index = 0;
+
+ for (;;)
+ {
+ const char* end = strchr(list, ',');
+ size_t entryLen;
+
+ if (end == nullptr) // last entry
+ entryLen = strlen(list);
+ else
+ entryLen = end - list;
+
+ if (entryLen == len && memcmp(target, list, len) == 0)
+ return index;
+ if (end == nullptr)
+ break;
+
+ list = end + 1; // skip the ','
+ index += 1;
+ }
+ return -1;
+}
+
+#ifdef SK_SUPPORT_UNITTEST
+void SkParse::UnitTest()
+{
+ // !!! additional parse tests go here
+ SkParse::TestColor();
+}
+#endif
diff --git a/gfx/skia/skia/src/utils/SkParseColor.cpp b/gfx/skia/skia/src/utils/SkParseColor.cpp
new file mode 100644
index 000000000..eafdc4c3d
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkParseColor.cpp
@@ -0,0 +1,538 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkParse.h"
+
+#ifdef SK_DEBUG
+#include "SkString.h"
+
+#ifdef SK_SUPPORT_UNITTEST
+ // compress names 6 chars per long (packed 5 bits/char )
+ // note: little advantage to splitting chars across longs, since 3 longs at 2 unused bits each
+ // allow for one additional split char (vs. the 18 unsplit chars in the three longs)
+ // use extra two bits to represent:
+ // 00 : final 6 (or fewer) chars (if 'a' is 0x01, zero could have special meaning)
+ // 01 : not final 6 chars
+ // 10 : color
+ // 11 : unused, except as debugging sentinal? (could be -1 for easier test)
+ // !!! the bit to end the word (last) is at the low bit for binary search
+ // lookup first character in offset for quick start
+ // offset is 27-entry table of bytes(?) that trims linear search to at most 21 entries ('d')
+ // shift match into long; set bit 30 if it all doesn't fit
+ // while longs don't match, march forward
+ // if they do match, and bit 30 is set, advance match, clearing bit 30 if
+ // final chars, and advance to next test
+ // if they do match, and bit 30 is clear, get next long (color) and return it
+ // stop at lookup of first char + 1
+static const struct SkNameRGB {
+ const char* name;
+ int rgb;
+} colorNames[] = {
+ { "aliceblue", 0xF0F8FF },
+ { "antiquewhite", 0xFAEBD7 },
+ { "aqua", 0x00FFFF },
+ { "aquamarine", 0x7FFFD4 },
+ { "azure", 0xF0FFFF },
+ { "beige", 0xF5F5DC },
+ { "bisque", 0xFFE4C4 },
+ { "black", 0x000000 },
+ { "blanchedalmond", 0xFFEBCD },
+ { "blue", 0x0000FF },
+ { "blueviolet", 0x8A2BE2 },
+ { "brown", 0xA52A2A },
+ { "burlywood", 0xDEB887 },
+ { "cadetblue", 0x5F9EA0 },
+ { "chartreuse", 0x7FFF00 },
+ { "chocolate", 0xD2691E },
+ { "coral", 0xFF7F50 },
+ { "cornflowerblue", 0x6495ED },
+ { "cornsilk", 0xFFF8DC },
+ { "crimson", 0xDC143C },
+ { "cyan", 0x00FFFF },
+ { "darkblue", 0x00008B },
+ { "darkcyan", 0x008B8B },
+ { "darkgoldenrod", 0xB8860B },
+ { "darkgray", 0xA9A9A9 },
+ { "darkgreen", 0x006400 },
+ { "darkkhaki", 0xBDB76B },
+ { "darkmagenta", 0x8B008B },
+ { "darkolivegreen", 0x556B2F },
+ { "darkorange", 0xFF8C00 },
+ { "darkorchid", 0x9932CC },
+ { "darkred", 0x8B0000 },
+ { "darksalmon", 0xE9967A },
+ { "darkseagreen", 0x8FBC8F },
+ { "darkslateblue", 0x483D8B },
+ { "darkslategray", 0x2F4F4F },
+ { "darkturquoise", 0x00CED1 },
+ { "darkviolet", 0x9400D3 },
+ { "deeppink", 0xFF1493 },
+ { "deepskyblue", 0x00BFFF },
+ { "dimgray", 0x696969 },
+ { "dodgerblue", 0x1E90FF },
+ { "firebrick", 0xB22222 },
+ { "floralwhite", 0xFFFAF0 },
+ { "forestgreen", 0x228B22 },
+ { "fuchsia", 0xFF00FF },
+ { "gainsboro", 0xDCDCDC },
+ { "ghostwhite", 0xF8F8FF },
+ { "gold", 0xFFD700 },
+ { "goldenrod", 0xDAA520 },
+ { "gray", 0x808080 },
+ { "green", 0x008000 },
+ { "greenyellow", 0xADFF2F },
+ { "honeydew", 0xF0FFF0 },
+ { "hotpink", 0xFF69B4 },
+ { "indianred", 0xCD5C5C },
+ { "indigo", 0x4B0082 },
+ { "ivory", 0xFFFFF0 },
+ { "khaki", 0xF0E68C },
+ { "lavender", 0xE6E6FA },
+ { "lavenderblush", 0xFFF0F5 },
+ { "lawngreen", 0x7CFC00 },
+ { "lemonchiffon", 0xFFFACD },
+ { "lightblue", 0xADD8E6 },
+ { "lightcoral", 0xF08080 },
+ { "lightcyan", 0xE0FFFF },
+ { "lightgoldenrodyellow", 0xFAFAD2 },
+ { "lightgreen", 0x90EE90 },
+ { "lightgrey", 0xD3D3D3 },
+ { "lightpink", 0xFFB6C1 },
+ { "lightsalmon", 0xFFA07A },
+ { "lightseagreen", 0x20B2AA },
+ { "lightskyblue", 0x87CEFA },
+ { "lightslategray", 0x778899 },
+ { "lightsteelblue", 0xB0C4DE },
+ { "lightyellow", 0xFFFFE0 },
+ { "lime", 0x00FF00 },
+ { "limegreen", 0x32CD32 },
+ { "linen", 0xFAF0E6 },
+ { "magenta", 0xFF00FF },
+ { "maroon", 0x800000 },
+ { "mediumaquamarine", 0x66CDAA },
+ { "mediumblue", 0x0000CD },
+ { "mediumorchid", 0xBA55D3 },
+ { "mediumpurple", 0x9370DB },
+ { "mediumseagreen", 0x3CB371 },
+ { "mediumslateblue", 0x7B68EE },
+ { "mediumspringgreen", 0x00FA9A },
+ { "mediumturquoise", 0x48D1CC },
+ { "mediumvioletred", 0xC71585 },
+ { "midnightblue", 0x191970 },
+ { "mintcream", 0xF5FFFA },
+ { "mistyrose", 0xFFE4E1 },
+ { "moccasin", 0xFFE4B5 },
+ { "navajowhite", 0xFFDEAD },
+ { "navy", 0x000080 },
+ { "oldlace", 0xFDF5E6 },
+ { "olive", 0x808000 },
+ { "olivedrab", 0x6B8E23 },
+ { "orange", 0xFFA500 },
+ { "orangered", 0xFF4500 },
+ { "orchid", 0xDA70D6 },
+ { "palegoldenrod", 0xEEE8AA },
+ { "palegreen", 0x98FB98 },
+ { "paleturquoise", 0xAFEEEE },
+ { "palevioletred", 0xDB7093 },
+ { "papayawhip", 0xFFEFD5 },
+ { "peachpuff", 0xFFDAB9 },
+ { "peru", 0xCD853F },
+ { "pink", 0xFFC0CB },
+ { "plum", 0xDDA0DD },
+ { "powderblue", 0xB0E0E6 },
+ { "purple", 0x800080 },
+ { "red", 0xFF0000 },
+ { "rosybrown", 0xBC8F8F },
+ { "royalblue", 0x4169E1 },
+ { "saddlebrown", 0x8B4513 },
+ { "salmon", 0xFA8072 },
+ { "sandybrown", 0xF4A460 },
+ { "seagreen", 0x2E8B57 },
+ { "seashell", 0xFFF5EE },
+ { "sienna", 0xA0522D },
+ { "silver", 0xC0C0C0 },
+ { "skyblue", 0x87CEEB },
+ { "slateblue", 0x6A5ACD },
+ { "slategray", 0x708090 },
+ { "snow", 0xFFFAFA },
+ { "springgreen", 0x00FF7F },
+ { "steelblue", 0x4682B4 },
+ { "tan", 0xD2B48C },
+ { "teal", 0x008080 },
+ { "thistle", 0xD8BFD8 },
+ { "tomato", 0xFF6347 },
+ { "turquoise", 0x40E0D0 },
+ { "violet", 0xEE82EE },
+ { "wheat", 0xF5DEB3 },
+ { "white", 0xFFFFFF },
+ { "whitesmoke", 0xF5F5F5 },
+ { "yellow", 0xFFFF00 },
+ { "yellowgreen", 0x9ACD32 }
+};
+
+int colorNamesSize = SK_ARRAY_COUNT(colorNames);
+
+static void CreateTable() {
+ SkString comment;
+ size_t originalSize = 0;
+ int replacement = 0;
+ for (int index = 0; index < colorNamesSize; index++) {
+ SkNameRGB nameRGB = colorNames[index];
+ const char* name = nameRGB.name;
+ size_t len = strlen(name);
+ originalSize += len + 9;
+ bool first = true;
+ bool last = false;
+ do {
+ int compressed = 0;
+ const char* start = name;
+ for (int chIndex = 0; chIndex < 6; chIndex++) {
+ compressed <<= 5;
+ compressed |= *name ? *name++ - 'a' + 1 : 0 ;
+ }
+ replacement += sizeof(int);
+ compressed <<= 1;
+ compressed |= 1;
+ if (first) {
+ compressed |= 0x80000000;
+ first = false;
+ }
+ if (len <= 6) { // last
+ compressed &= ~1;
+ last = true;
+ }
+ len -= 6;
+ SkDebugf("0x%08x, ", compressed);
+ comment.append(start, name - start);
+ } while (last == false);
+ replacement += sizeof(int);
+ SkDebugf("0x%08x, ", nameRGB.rgb);
+ SkDebugf("// %s\n", comment.c_str());
+ comment.reset();
+ }
+ SkDebugf("// original = %d : replacement = %d\n", originalSize, replacement);
+ SkASSERT(0); // always stop after creating table
+}
+#endif
+
+#endif
+
+static const unsigned int gColorNames[] = {
+0x85891945, 0x32a50000, 0x00f0f8ff, // aliceblue
+0x85d44c6b, 0x16e84d0a, 0x00faebd7, // antiquewhite
+0x86350800, 0x0000ffff, // aqua
+0x86350b43, 0x492e2800, 0x007fffd4, // aquamarine
+0x87559140, 0x00f0ffff, // azure
+0x88a93940, 0x00f5f5dc, // beige
+0x89338d4a, 0x00ffe4c4, // bisque
+0x89811ac0, 0x00000000, // black
+0x898170d1, 0x1481635f, 0x38800000, 0x00ffebcd, // blanchedalmond
+0x89952800, 0x000000ff, // blue
+0x89952d93, 0x3d85a000, 0x008a2be2, // blueviolet
+0x8a4fbb80, 0x00a52a2a, // brown
+0x8ab2666f, 0x3de40000, 0x00deb887, // burlywood
+0x8c242d05, 0x32a50000, 0x005f9ea0, // cadetblue
+0x8d019525, 0x16b32800, 0x007fff00, // chartreuse
+0x8d0f1bd9, 0x06850000, 0x00d2691e, // chocolate
+0x8df20b00, 0x00ff7f50, // coral
+0x8df27199, 0x3ee59099, 0x54a00000, 0x006495ed, // cornflowerblue
+0x8df274d3, 0x31600000, 0x00fff8dc, // cornsilk
+0x8e496cdf, 0x38000000, 0x00dc143c, // crimson
+0x8f217000, 0x0000ffff, // cyan
+0x90325899, 0x54a00000, 0x0000008b, // darkblue
+0x903258f3, 0x05c00000, 0x00008b8b, // darkcyan
+0x903259df, 0x3085749f, 0x10000000, 0x00b8860b, // darkgoldenrod
+0x903259e5, 0x07200000, 0x00a9a9a9, // darkgray
+0x903259e5, 0x14ae0000, 0x00006400, // darkgreen
+0x90325ad1, 0x05690000, 0x00bdb76b, // darkkhaki
+0x90325b43, 0x1caea040, 0x008b008b, // darkmagenta
+0x90325bd9, 0x26c53c8b, 0x15c00000, 0x00556b2f, // darkolivegreen
+0x90325be5, 0x05c72800, 0x00ff8c00, // darkorange
+0x90325be5, 0x0d092000, 0x009932cc, // darkorchid
+0x90325c8b, 0x10000000, 0x008b0000, // darkred
+0x90325cc3, 0x31af7000, 0x00e9967a, // darksalmon
+0x90325ccb, 0x04f2295c, 0x008fbc8f, // darkseagreen
+0x90325cd9, 0x0685132b, 0x14000000, 0x00483d8b, // darkslateblue
+0x90325cd9, 0x06853c83, 0x64000000, 0x002f4f4f, // darkslategray
+0x90325d2b, 0x4a357a67, 0x14000000, 0x0000ced1, // darkturquoise
+0x90325d93, 0x3d85a000, 0x009400d3, // darkviolet
+0x90a58413, 0x39600000, 0x00ff1493, // deeppink
+0x90a584d7, 0x644ca940, 0x0000bfff, // deepskyblue
+0x912d3c83, 0x64000000, 0x00696969, // dimgray
+0x91e43965, 0x09952800, 0x001e90ff, // dodgerblue
+0x993228a5, 0x246b0000, 0x00b22222, // firebrick
+0x998f9059, 0x5d09a140, 0x00fffaf0, // floralwhite
+0x99f22ce9, 0x1e452b80, 0x00228b22, // forestgreen
+0x9aa344d3, 0x04000000, 0x00ff00ff, // fuchsia
+0x9c2974c5, 0x3e4f0000, 0x00dcdcdc, // gainsboro
+0x9d0f9d2f, 0x21342800, 0x00f8f8ff, // ghostwhite
+0x9dec2000, 0x00ffd700, // gold
+0x9dec215d, 0x49e40000, 0x00daa520, // goldenrod
+0x9e41c800, 0x00808080, // gray
+0x9e452b80, 0x00008000, // green
+0x9e452bb3, 0x158c7dc0, 0x00adff2f, // greenyellow
+0xa1ee2e49, 0x16e00000, 0x00f0fff0, // honeydew
+0xa1f4825d, 0x2c000000, 0x00ff69b4, // hotpink
+0xa5c4485d, 0x48a40000, 0x00cd5c5c, // indianred
+0xa5c449de, 0x004b0082, // indigo
+0xa6cf9640, 0x00fffff0, // ivory
+0xad015a40, 0x00f0e68c, // khaki
+0xb0362b89, 0x16400000, 0x00e6e6fa, // lavender
+0xb0362b89, 0x16426567, 0x20000000, 0x00fff0f5, // lavenderblush
+0xb03771e5, 0x14ae0000, 0x007cfc00, // lawngreen
+0xb0ad7b87, 0x212633dc, 0x00fffacd, // lemonchiffon
+0xb1274505, 0x32a50000, 0x00add8e6, // lightblue
+0xb1274507, 0x3e416000, 0x00f08080, // lightcoral
+0xb1274507, 0x642e0000, 0x00e0ffff, // lightcyan
+0xb127450f, 0x3d842ba5, 0x3c992b19, 0x3ee00000, 0x00fafad2, // lightgoldenrodyellow
+0xb127450f, 0x48a57000, 0x0090ee90, // lightgreen
+0xb127450f, 0x48b90000, 0x00d3d3d3, // lightgrey
+0xb1274521, 0x25cb0000, 0x00ffb6c1, // lightpink
+0xb1274527, 0x058d7b80, 0x00ffa07a, // lightsalmon
+0xb1274527, 0x1427914b, 0x38000000, 0x0020b2aa, // lightseagreen
+0xb1274527, 0x2f22654a, 0x0087cefa, // lightskyblue
+0xb1274527, 0x303429e5, 0x07200000, 0x00778899, // lightslategray
+0xb1274527, 0x50a56099, 0x54a00000, 0x00b0c4de, // lightsteelblue
+0xb1274533, 0x158c7dc0, 0x00ffffe0, // lightyellow
+0xb12d2800, 0x0000ff00, // lime
+0xb12d29e5, 0x14ae0000, 0x0032cd32, // limegreen
+0xb12e2b80, 0x00faf0e6, // linen
+0xb4272ba9, 0x04000000, 0x00ff00ff, // magenta
+0xb4327bdc, 0x00800000, // maroon
+0xb4a44d5b, 0x06350b43, 0x492e2800, 0x0066cdaa, // mediumaquamarine
+0xb4a44d5b, 0x09952800, 0x000000cd, // mediumblue
+0xb4a44d5b, 0x3e434248, 0x00ba55d3, // mediumorchid
+0xb4a44d5b, 0x42b2830a, 0x009370db, // mediumpurple
+0xb4a44d5b, 0x4ca13c8b, 0x15c00000, 0x003cb371, // mediumseagreen
+0xb4a44d5b, 0x4d81a145, 0x32a50000, 0x007b68ee, // mediumslateblue
+0xb4a44d5b, 0x4e124b8f, 0x1e452b80, 0x0000fa9a, // mediumspringgreen
+0xb4a44d5b, 0x52b28d5f, 0x26650000, 0x0048d1cc, // mediumturquoise
+0xb4a44d5b, 0x592f6169, 0x48a40000, 0x00c71585, // mediumvioletred
+0xb524724f, 0x2282654a, 0x00191970, // midnightblue
+0xb52ea0e5, 0x142d0000, 0x00f5fffa, // mintcream
+0xb533a665, 0x3e650000, 0x00ffe4e1, // mistyrose
+0xb5e31867, 0x25c00000, 0x00ffe4b5, // moccasin
+0xb8360a9f, 0x5d09a140, 0x00ffdead, // navajowhite
+0xb836c800, 0x00000080, // navy
+0xbd846047, 0x14000000, 0x00fdf5e6, // oldlace
+0xbd89b140, 0x00808000, // olive
+0xbd89b149, 0x48220000, 0x006b8e23, // olivedrab
+0xbe4171ca, 0x00ffa500, // orange
+0xbe4171cb, 0x48a40000, 0x00ff4500, // orangered
+0xbe434248, 0x00da70d6, // orchid
+0xc02c29df, 0x3085749f, 0x10000000, 0x00eee8aa, // palegoldenrod
+0xc02c29e5, 0x14ae0000, 0x0098fb98, // palegreen
+0xc02c2d2b, 0x4a357a67, 0x14000000, 0x00afeeee, // paleturquoise
+0xc02c2d93, 0x3d85a48b, 0x10000000, 0x00db7093, // palevioletred
+0xc0300e43, 0x5d098000, 0x00ffefd5, // papayawhip
+0xc0a11a21, 0x54c60000, 0x00ffdab9, // peachpuff
+0xc0b2a800, 0x00cd853f, // peru
+0xc12e5800, 0x00ffc0cb, // pink
+0xc1956800, 0x00dda0dd, // plum
+0xc1f72165, 0x09952800, 0x00b0e0e6, // powderblue
+0xc2b2830a, 0x00800080, // purple
+0xc8a40000, 0x00ff0000, // red
+0xc9f3c8a5, 0x3eee0000, 0x00bc8f8f, // rosybrown
+0xc9f90b05, 0x32a50000, 0x004169e1, // royalblue
+0xcc24230b, 0x0a4fbb80, 0x008b4513, // saddlebrown
+0xcc2c6bdc, 0x00fa8072, // salmon
+0xcc2e2645, 0x49f77000, 0x00f4a460, // sandybrown
+0xcca13c8b, 0x15c00000, 0x002e8b57, // seagreen
+0xcca19a0b, 0x31800000, 0x00fff5ee, // seashell
+0xcd257382, 0x00a0522d, // sienna
+0xcd2cb164, 0x00c0c0c0, // silver
+0xcd79132b, 0x14000000, 0x0087ceeb, // skyblue
+0xcd81a145, 0x32a50000, 0x006a5acd, // slateblue
+0xcd81a14f, 0x48390000, 0x00708090, // slategray
+0xcdcfb800, 0x00fffafa, // snow
+0xce124b8f, 0x1e452b80, 0x0000ff7f, // springgreen
+0xce852b05, 0x32a50000, 0x004682b4, // steelblue
+0xd02e0000, 0x00d2b48c, // tan
+0xd0a16000, 0x00008080, // teal
+0xd1099d19, 0x14000000, 0x00d8bfd8, // thistle
+0xd1ed0d1e, 0x00ff6347, // tomato
+0xd2b28d5f, 0x26650000, 0x0040e0d0, // turquoise
+0xd92f6168, 0x00ee82ee, // violet
+0xdd050d00, 0x00f5deb3, // wheat
+0xdd09a140, 0x00ffffff, // white
+0xdd09a167, 0x35eb2800, 0x00f5f5f5, // whitesmoke
+0xe4ac63ee, 0x00ffff00, // yellow
+0xe4ac63ef, 0x1e452b80, 0x009acd32 // yellowgreen
+}; // original = 2505 : replacement = 1616
+
+
+const char* SkParse::FindNamedColor(const char* name, size_t len, SkColor* color) {
+ const char* namePtr = name;
+ unsigned int sixMatches[4];
+ unsigned int* sixMatchPtr = sixMatches;
+ bool first = true;
+ bool last = false;
+ char ch;
+ do {
+ unsigned int sixMatch = 0;
+ for (int chIndex = 0; chIndex < 6; chIndex++) {
+ sixMatch <<= 5;
+ ch = *namePtr | 0x20;
+ if (ch < 'a' || ch > 'z')
+ ch = 0;
+ else {
+ ch = ch - 'a' + 1;
+ namePtr++;
+ }
+ sixMatch |= ch ; // turn 'A' (0x41) into 'a' (0x61);
+ }
+ sixMatch <<= 1;
+ sixMatch |= 1;
+ if (first) {
+ sixMatch |= 0x80000000;
+ first = false;
+ }
+ ch = *namePtr | 0x20;
+ last = ch < 'a' || ch > 'z';
+ if (last)
+ sixMatch &= ~1;
+ len -= 6;
+ *sixMatchPtr++ = sixMatch;
+ } while (last == false && len > 0);
+ const int colorNameSize = sizeof(gColorNames) / sizeof(unsigned int);
+ int lo = 0;
+ int hi = colorNameSize - 3; // back off to beginning of yellowgreen
+ while (lo <= hi) {
+ int mid = (hi + lo) >> 1;
+ while ((int) gColorNames[mid] >= 0)
+ --mid;
+ sixMatchPtr = sixMatches;
+ while (gColorNames[mid] == *sixMatchPtr) {
+ ++mid;
+ if ((*sixMatchPtr & 1) == 0) { // last
+ *color = gColorNames[mid] | 0xFF000000;
+ return namePtr;
+ }
+ ++sixMatchPtr;
+ }
+ int sixMask = *sixMatchPtr & ~0x80000000;
+ int midMask = gColorNames[mid] & ~0x80000000;
+ if (sixMask > midMask) {
+ lo = mid + 2; // skip color
+ while ((int) gColorNames[lo] >= 0)
+ ++lo;
+ } else if (hi == mid)
+ return nullptr;
+ else
+ hi = mid;
+ }
+ return nullptr;
+}
+
+// !!! move to char utilities
+//static int count_separators(const char* str, const char* sep) {
+// char c;
+// int separators = 0;
+// while ((c = *str++) != '\0') {
+// if (strchr(sep, c) == nullptr)
+// continue;
+// do {
+// if ((c = *str++) == '\0')
+// goto goHome;
+// } while (strchr(sep, c) != nullptr);
+// separators++;
+// }
+//goHome:
+// return separators;
+//}
+
+static inline unsigned nib2byte(unsigned n)
+{
+ SkASSERT((n & ~0xF) == 0);
+ return (n << 4) | n;
+}
+
+const char* SkParse::FindColor(const char* value, SkColor* colorPtr) {
+ unsigned int oldAlpha = SkColorGetA(*colorPtr);
+ if (value[0] == '#') {
+ uint32_t hex;
+ const char* end = SkParse::FindHex(value + 1, &hex);
+// SkASSERT(end);
+ if (end == nullptr)
+ return end;
+ size_t len = end - value - 1;
+ if (len == 3 || len == 4) {
+ unsigned a = len == 4 ? nib2byte(hex >> 12) : oldAlpha;
+ unsigned r = nib2byte((hex >> 8) & 0xF);
+ unsigned g = nib2byte((hex >> 4) & 0xF);
+ unsigned b = nib2byte(hex & 0xF);
+ *colorPtr = SkColorSetARGB(a, r, g, b);
+ return end;
+ } else if (len == 6 || len == 8) {
+ if (len == 6)
+ hex |= oldAlpha << 24;
+ *colorPtr = hex;
+ return end;
+ } else {
+// SkASSERT(0);
+ return nullptr;
+ }
+// } else if (strchr(value, ',')) {
+// SkScalar array[4];
+// int count = count_separators(value, ",") + 1; // !!! count commas, add 1
+// SkASSERT(count == 3 || count == 4);
+// array[0] = SK_Scalar1 * 255;
+// const char* end = SkParse::FindScalars(value, &array[4 - count], count);
+// if (end == nullptr)
+// return nullptr;
+ // !!! range check for errors?
+// *colorPtr = SkColorSetARGB(SkScalarRoundToInt(array[0]), SkScalarRoundToInt(array[1]),
+// SkScalarRoundToInt(array[2]), SkScalarRoundToInt(array[3]));
+// return end;
+ } else
+ return FindNamedColor(value, strlen(value), colorPtr);
+}
+
+#ifdef SK_SUPPORT_UNITTEST
+void SkParse::TestColor() {
+ if (false)
+ CreateTable(); // regenerates data table in the output window
+ SkColor result;
+ int index;
+ for (index = 0; index < colorNamesSize; index++) {
+ result = SK_ColorBLACK;
+ SkNameRGB nameRGB = colorNames[index];
+ SkASSERT(FindColor(nameRGB.name, &result) != nullptr);
+ SkASSERT(result == (SkColor) (nameRGB.rgb | 0xFF000000));
+ }
+ for (index = 0; index < colorNamesSize; index++) {
+ result = SK_ColorBLACK;
+ SkNameRGB nameRGB = colorNames[index];
+ char bad[24];
+ size_t len = strlen(nameRGB.name);
+ memcpy(bad, nameRGB.name, len);
+ bad[len - 1] -= 1;
+ SkASSERT(FindColor(bad, &result) == nullptr);
+ bad[len - 1] += 2;
+ SkASSERT(FindColor(bad, &result) == nullptr);
+ }
+ result = SK_ColorBLACK;
+ SkASSERT(FindColor("lightGrey", &result));
+ SkASSERT(result == 0xffd3d3d3);
+// SkASSERT(FindColor("12,34,56,78", &result));
+// SkASSERT(result == ((12 << 24) | (34 << 16) | (56 << 8) | (78 << 0)));
+ result = SK_ColorBLACK;
+ SkASSERT(FindColor("#ABCdef", &result));
+ SkASSERT(result == 0XFFABCdef);
+ SkASSERT(FindColor("#12ABCdef", &result));
+ SkASSERT(result == 0X12ABCdef);
+ result = SK_ColorBLACK;
+ SkASSERT(FindColor("#123", &result));
+ SkASSERT(result == 0Xff112233);
+ SkASSERT(FindColor("#abcd", &result));
+ SkASSERT(result == 0Xaabbccdd);
+ result = SK_ColorBLACK;
+// SkASSERT(FindColor("71,162,253", &result));
+// SkASSERT(result == ((0xFF << 24) | (71 << 16) | (162 << 8) | (253 << 0)));
+}
+#endif
diff --git a/gfx/skia/skia/src/utils/SkParsePath.cpp b/gfx/skia/skia/src/utils/SkParsePath.cpp
new file mode 100644
index 000000000..c92466190
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkParsePath.cpp
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkParse.h"
+#include "SkParsePath.h"
+
+static inline bool is_between(int c, int min, int max) {
+ return (unsigned)(c - min) <= (unsigned)(max - min);
+}
+
+static inline bool is_ws(int c) {
+ return is_between(c, 1, 32);
+}
+
+static inline bool is_digit(int c) {
+ return is_between(c, '0', '9');
+}
+
+static inline bool is_sep(int c) {
+ return is_ws(c) || c == ',';
+}
+
+static inline bool is_lower(int c) {
+ return is_between(c, 'a', 'z');
+}
+
+static inline int to_upper(int c) {
+ return c - 'a' + 'A';
+}
+
+static const char* skip_ws(const char str[]) {
+ SkASSERT(str);
+ while (is_ws(*str))
+ str++;
+ return str;
+}
+
+static const char* skip_sep(const char str[]) {
+ if (!str) {
+ return nullptr;
+ }
+ while (is_sep(*str))
+ str++;
+ return str;
+}
+
+static const char* find_points(const char str[], SkPoint value[], int count,
+ bool isRelative, SkPoint* relative) {
+ str = SkParse::FindScalars(str, &value[0].fX, count * 2);
+ if (isRelative) {
+ for (int index = 0; index < count; index++) {
+ value[index].fX += relative->fX;
+ value[index].fY += relative->fY;
+ }
+ }
+ return str;
+}
+
+static const char* find_scalar(const char str[], SkScalar* value,
+ bool isRelative, SkScalar relative) {
+ str = SkParse::FindScalar(str, value);
+ if (!str) {
+ return nullptr;
+ }
+ if (isRelative) {
+ *value += relative;
+ }
+ str = skip_sep(str);
+ return str;
+}
+
+bool SkParsePath::FromSVGString(const char data[], SkPath* result) {
+ SkPath path;
+ SkPoint first = {0, 0};
+ SkPoint c = {0, 0};
+ SkPoint lastc = {0, 0};
+ SkPoint points[3];
+ char op = '\0';
+ char previousOp = '\0';
+ bool relative = false;
+ for (;;) {
+ if (!data) {
+ // Truncated data
+ return false;
+ }
+ data = skip_ws(data);
+ if (data[0] == '\0') {
+ break;
+ }
+ char ch = data[0];
+ if (is_digit(ch) || ch == '-' || ch == '+' || ch == '.') {
+ if (op == '\0') {
+ return false;
+ }
+ } else if (is_sep(ch)) {
+ data = skip_sep(data);
+ } else {
+ op = ch;
+ relative = false;
+ if (is_lower(op)) {
+ op = (char) to_upper(op);
+ relative = true;
+ }
+ data++;
+ data = skip_sep(data);
+ }
+ switch (op) {
+ case 'M':
+ data = find_points(data, points, 1, relative, &c);
+ path.moveTo(points[0]);
+ previousOp = '\0';
+ op = 'L';
+ c = points[0];
+ break;
+ case 'L':
+ data = find_points(data, points, 1, relative, &c);
+ path.lineTo(points[0]);
+ c = points[0];
+ break;
+ case 'H': {
+ SkScalar x;
+ data = find_scalar(data, &x, relative, c.fX);
+ path.lineTo(x, c.fY);
+ c.fX = x;
+ } break;
+ case 'V': {
+ SkScalar y;
+ data = find_scalar(data, &y, relative, c.fY);
+ path.lineTo(c.fX, y);
+ c.fY = y;
+ } break;
+ case 'C':
+ data = find_points(data, points, 3, relative, &c);
+ goto cubicCommon;
+ case 'S':
+ data = find_points(data, &points[1], 2, relative, &c);
+ points[0] = c;
+ if (previousOp == 'C' || previousOp == 'S') {
+ points[0].fX -= lastc.fX - c.fX;
+ points[0].fY -= lastc.fY - c.fY;
+ }
+ cubicCommon:
+ path.cubicTo(points[0], points[1], points[2]);
+ lastc = points[1];
+ c = points[2];
+ break;
+ case 'Q': // Quadratic Bezier Curve
+ data = find_points(data, points, 2, relative, &c);
+ goto quadraticCommon;
+ case 'T':
+ data = find_points(data, &points[1], 1, relative, &c);
+ points[0] = c;
+ if (previousOp == 'Q' || previousOp == 'T') {
+ points[0].fX -= lastc.fX - c.fX;
+ points[0].fY -= lastc.fY - c.fY;
+ }
+ quadraticCommon:
+ path.quadTo(points[0], points[1]);
+ lastc = points[0];
+ c = points[1];
+ break;
+ case 'A': {
+ SkPoint radii;
+ SkScalar angle, largeArc, sweep;
+ if ((data = find_points(data, &radii, 1, false, nullptr))
+ && (data = skip_sep(data))
+ && (data = find_scalar(data, &angle, false, 0))
+ && (data = skip_sep(data))
+ && (data = find_scalar(data, &largeArc, false, 0))
+ && (data = skip_sep(data))
+ && (data = find_scalar(data, &sweep, false, 0))
+ && (data = skip_sep(data))
+ && (data = find_points(data, &points[0], 1, relative, &c))) {
+ path.arcTo(radii, angle, (SkPath::ArcSize) SkToBool(largeArc),
+ (SkPath::Direction) !SkToBool(sweep), points[0]);
+ path.getLastPt(&c);
+ }
+ } break;
+ case 'Z':
+ path.close();
+ c = first;
+ break;
+ case '~': {
+ SkPoint args[2];
+ data = find_points(data, args, 2, false, nullptr);
+ path.moveTo(args[0].fX, args[0].fY);
+ path.lineTo(args[1].fX, args[1].fY);
+ } break;
+ default:
+ return false;
+ }
+ if (previousOp == 0) {
+ first = c;
+ }
+ previousOp = op;
+ }
+ // we're good, go ahead and swap in the result
+ result->swap(path);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkGeometry.h"
+#include "SkString.h"
+#include "SkStream.h"
+
+static void write_scalar(SkWStream* stream, SkScalar value) {
+ char buffer[64];
+#ifdef SK_BUILD_FOR_WIN32
+ int len = _snprintf(buffer, sizeof(buffer), "%g", value);
+#else
+ int len = snprintf(buffer, sizeof(buffer), "%g", value);
+#endif
+ char* stop = buffer + len;
+ stream->write(buffer, stop - buffer);
+}
+
+static void append_scalars(SkWStream* stream, char verb, const SkScalar data[],
+ int count) {
+ stream->write(&verb, 1);
+ write_scalar(stream, data[0]);
+ for (int i = 1; i < count; i++) {
+ stream->write(" ", 1);
+ write_scalar(stream, data[i]);
+ }
+}
+
+void SkParsePath::ToSVGString(const SkPath& path, SkString* str) {
+ SkDynamicMemoryWStream stream;
+
+ SkPath::Iter iter(path, false);
+ SkPoint pts[4];
+
+ for (;;) {
+ switch (iter.next(pts, false)) {
+ case SkPath::kConic_Verb: {
+ const SkScalar tol = SK_Scalar1 / 1024; // how close to a quad
+ SkAutoConicToQuads quadder;
+ const SkPoint* quadPts = quadder.computeQuads(pts, iter.conicWeight(), tol);
+ for (int i = 0; i < quadder.countQuads(); ++i) {
+ append_scalars(&stream, 'Q', &quadPts[i*2 + 1].fX, 4);
+ }
+ } break;
+ case SkPath::kMove_Verb:
+ append_scalars(&stream, 'M', &pts[0].fX, 2);
+ break;
+ case SkPath::kLine_Verb:
+ append_scalars(&stream, 'L', &pts[1].fX, 2);
+ break;
+ case SkPath::kQuad_Verb:
+ append_scalars(&stream, 'Q', &pts[1].fX, 4);
+ break;
+ case SkPath::kCubic_Verb:
+ append_scalars(&stream, 'C', &pts[1].fX, 6);
+ break;
+ case SkPath::kClose_Verb:
+ stream.write("Z", 1);
+ break;
+ case SkPath::kDone_Verb:
+ str->resize(stream.getOffset());
+ stream.copyTo(str->writable_str());
+ return;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/utils/SkPatchGrid.cpp b/gfx/skia/skia/src/utils/SkPatchGrid.cpp
new file mode 100644
index 000000000..3b7c06e95
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkPatchGrid.cpp
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPatchGrid.h"
+#include "SkPatchUtils.h"
+
+SkPatchGrid::SkPatchGrid(int rows, int cols, VertexType flags, SkXfermode* xfer)
+ : fRows(0)
+ , fCols(0)
+ , fModeFlags(kNone_VertexType)
+ , fCornerPts(nullptr)
+ , fCornerColors(nullptr)
+ , fTexCoords(nullptr)
+ , fHrzCtrlPts(nullptr)
+ , fVrtCtrlPts(nullptr)
+ , fXferMode(nullptr) {
+ this->reset(rows, cols, flags, xfer);
+}
+
+SkPatchGrid::~SkPatchGrid() {
+ delete[] fCornerPts;
+ delete[] fCornerColors;
+ delete[] fTexCoords;
+ delete[] fHrzCtrlPts;
+ delete[] fVrtCtrlPts;
+}
+
+bool SkPatchGrid::setPatch(int x, int y, const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4]) {
+ // Check for the passed paramaters to be within the range of the grid dimensions and a valid
+ // pointer for the cubics' control points.
+ if (x < 0 || y < 0 || x > fCols - 1 || y > fRows - 1 || nullptr == cubics) {
+ return false;
+ }
+
+ // setup corners and colors
+ int cornerPos = y * (fCols + 1) + x;
+ fCornerPts[cornerPos] = cubics[SkPatchUtils::kTopP0_CubicCtrlPts];
+ fCornerPts[cornerPos + 1] = cubics[SkPatchUtils::kTopP3_CubicCtrlPts];
+ fCornerPts[cornerPos + (fCols + 1)] = cubics[SkPatchUtils::kBottomP0_CubicCtrlPts];
+ fCornerPts[cornerPos + (fCols + 1) + 1] = cubics[SkPatchUtils::kBottomP3_CubicCtrlPts];
+
+ // set horizontal control points
+ int hrzPos = y * (fCols * 2) + (x * 2);
+ fHrzCtrlPts[hrzPos] = cubics[SkPatchUtils::kTopP1_CubicCtrlPts];
+ fHrzCtrlPts[hrzPos + 1] = cubics[SkPatchUtils::kTopP2_CubicCtrlPts];
+ fHrzCtrlPts[hrzPos + (fCols * 2)] = cubics[SkPatchUtils::kBottomP1_CubicCtrlPts];
+ fHrzCtrlPts[hrzPos + (fCols * 2) + 1] = cubics[SkPatchUtils::kBottomP2_CubicCtrlPts];
+
+ // set vertical control points
+ int vrtPos = (y*2) * (fCols + 1) + x;
+ fVrtCtrlPts[vrtPos] = cubics[SkPatchUtils::kLeftP1_CubicCtrlPts];
+ fVrtCtrlPts[vrtPos + 1] = cubics[SkPatchUtils::kRightP1_CubicCtrlPts];
+ fVrtCtrlPts[vrtPos + (fCols + 1)] = cubics[SkPatchUtils::kLeftP2_CubicCtrlPts];
+ fVrtCtrlPts[vrtPos + (fCols + 1) + 1] = cubics[SkPatchUtils::kRightP2_CubicCtrlPts];
+
+ // set optional values (colors and texture coordinates)
+ if ((fModeFlags & kColors_VertexType) && colors) {
+ fCornerColors[cornerPos] = colors[0];
+ fCornerColors[cornerPos + 1] = colors[1];
+ fCornerColors[cornerPos + (fCols + 1)] = colors[3];
+ fCornerColors[cornerPos + (fCols + 1) + 1] = colors[2];
+ }
+
+ if ((fModeFlags & kTexs_VertexType) && texCoords) {
+ fTexCoords[cornerPos] = texCoords[0];
+ fTexCoords[cornerPos + 1] = texCoords[1];
+ fTexCoords[cornerPos + (fCols + 1)] = texCoords[3];
+ fTexCoords[cornerPos + (fCols + 1) + 1] = texCoords[2];
+ }
+
+ return true;
+}
+
+bool SkPatchGrid::getPatch(int x, int y, SkPoint cubics[12], SkColor colors[4],
+ SkPoint texCoords[4]) const {
+
+ if (x < 0 || y < 0 || x > fCols - 1 || y > fRows - 1 || nullptr == cubics) {
+ return false;
+ }
+
+ // set the patch by building the array of points and colors with the corresponding values.
+ int cornerPos = y * (fCols + 1) + x;
+ cubics[SkPatchUtils::kTopP0_CubicCtrlPts] = fCornerPts[cornerPos];
+ cubics[SkPatchUtils::kTopP3_CubicCtrlPts] = fCornerPts[cornerPos + 1];
+ cubics[SkPatchUtils::kBottomP0_CubicCtrlPts] = fCornerPts[cornerPos + (fCols + 1)];
+ cubics[SkPatchUtils::kBottomP3_CubicCtrlPts] = fCornerPts[cornerPos + (fCols + 1) + 1];
+
+ int hrzPos = y * (fCols * 2) + (x * 2);
+ cubics[SkPatchUtils::kTopP1_CubicCtrlPts] = fHrzCtrlPts[hrzPos];
+ cubics[SkPatchUtils::kTopP2_CubicCtrlPts] = fHrzCtrlPts[hrzPos + 1];
+ cubics[SkPatchUtils::kBottomP1_CubicCtrlPts] = fHrzCtrlPts[hrzPos + (fCols * 2)];
+ cubics[SkPatchUtils::kBottomP2_CubicCtrlPts] = fHrzCtrlPts[hrzPos + (fCols * 2) + 1];
+
+ int vrtPos = (y*2) * (fCols + 1) + x;
+ cubics[SkPatchUtils::kLeftP1_CubicCtrlPts] = fVrtCtrlPts[vrtPos];
+ cubics[SkPatchUtils::kRightP1_CubicCtrlPts] = fVrtCtrlPts[vrtPos + 1];
+ cubics[SkPatchUtils::kLeftP2_CubicCtrlPts] = fVrtCtrlPts[vrtPos + (fCols + 1)];
+ cubics[SkPatchUtils::kRightP2_CubicCtrlPts] = fVrtCtrlPts[vrtPos + (fCols + 1) + 1];
+
+ if ((fModeFlags & kColors_VertexType) && colors) {
+ colors[0] = fCornerColors[cornerPos];
+ colors[1] = fCornerColors[cornerPos + 1];
+ colors[3] = fCornerColors[cornerPos + (fCols + 1)];
+ colors[2] = fCornerColors[cornerPos + (fCols + 1) + 1];
+ }
+
+ if ((fModeFlags & kTexs_VertexType) && texCoords) {
+ texCoords[0] = fTexCoords[cornerPos];
+ texCoords[1] = fTexCoords[cornerPos + 1];
+ texCoords[3] = fTexCoords[cornerPos + (fCols + 1)];
+ texCoords[2] = fTexCoords[cornerPos + (fCols + 1) + 1];
+ }
+
+ return true;
+}
+
+void SkPatchGrid::reset(int rows, int cols, VertexType flags, SkXfermode* xMode) {
+ delete[] fCornerPts;
+ delete[] fCornerColors;
+ delete[] fTexCoords;
+ delete[] fHrzCtrlPts;
+ delete[] fVrtCtrlPts;
+
+ fCols = cols;
+ fRows = rows;
+ fModeFlags = flags;
+ fXferMode = xMode;
+
+ fCornerPts = new SkPoint[(fRows + 1) * (fCols + 1)];
+ fHrzCtrlPts = new SkPoint[(fRows + 1) * fCols * 2];
+ fVrtCtrlPts = new SkPoint[fRows * 2 * (fCols + 1)];
+ memset(fCornerPts, 0, (fRows + 1) * (fCols + 1) * sizeof(SkPoint));
+ memset(fHrzCtrlPts, 0, (fRows + 1) * fCols * 2 * sizeof(SkPoint));
+ memset(fVrtCtrlPts, 0, fRows * 2 * (fCols + 1) * sizeof(SkPoint));
+
+ if (fModeFlags & kColors_VertexType) {
+ fCornerColors = new SkColor[(fRows + 1) * (fCols + 1)];
+ memset(fCornerColors, 0, (fRows + 1) * (fCols + 1) * sizeof(SkColor));
+ }
+
+ if (fModeFlags & kTexs_VertexType) {
+ fTexCoords = new SkPoint[(fRows + 1) * (fCols + 1)];
+ memset(fTexCoords, 0, (fRows + 1) * (fCols + 1) * sizeof(SkPoint));
+ }
+}
+
+void SkPatchGrid::draw(SkCanvas* canvas, SkPaint& paint) {
+ int* maxCols = new int[fCols];
+ int* maxRows = new int[fRows];
+ memset(maxCols, 0, fCols * sizeof(int));
+ memset(maxRows, 0, fRows * sizeof(int));
+
+ // Get the maximum level of detail per axis for each row and column
+ for (int y = 0; y < fRows; y++) {
+ for (int x = 0; x < fCols; x++) {
+ SkPoint cubics[12];
+ this->getPatch(x, y, cubics, nullptr, nullptr);
+ SkMatrix matrix = canvas->getTotalMatrix();
+ SkISize lod = SkPatchUtils::GetLevelOfDetail(cubics, &matrix);
+ maxCols[x] = SkMax32(maxCols[x], lod.width());
+ maxRows[y] = SkMax32(maxRows[y], lod.height());
+ }
+ }
+ // Draw the patches by generating their geometry with the maximum level of detail per axis.
+ for (int x = 0; x < fCols; x++) {
+ for (int y = 0; y < fRows; y++) {
+ SkPoint cubics[12];
+ SkPoint texCoords[4];
+ SkColor colors[4];
+ this->getPatch(x, y, cubics, colors, texCoords);
+ SkPatchUtils::VertexData data;
+ if (SkPatchUtils::getVertexData(&data, cubics,
+ fModeFlags & kColors_VertexType ? colors : nullptr,
+ fModeFlags & kTexs_VertexType ? texCoords : nullptr,
+ maxCols[x], maxRows[y])) {
+ canvas->drawVertices(SkCanvas::kTriangles_VertexMode, data.fVertexCount,
+ data.fPoints, data.fTexCoords, data.fColors, fXferMode,
+ data.fIndices, data.fIndexCount, paint);
+ }
+ }
+ }
+ delete[] maxCols;
+ delete[] maxRows;
+}
diff --git a/gfx/skia/skia/src/utils/SkPatchGrid.h b/gfx/skia/skia/src/utils/SkPatchGrid.h
new file mode 100644
index 000000000..ca2a35b91
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkPatchGrid.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPatchGrid_DEFINED
+#define SkPatchGrid_DEFINED
+
+#include "SkCanvas.h"
+#include "SkPatchUtils.h"
+#include "SkXfermode.h"
+
+/**
+ * Class that represents a grid of patches. Adjacent patches share their corners and a color is
+ * specified at each one of them. The colors are bilinearly interpolated across the patch.
+ *
+ * This implementation defines a bidimensional array of patches. There are 3 arrays to store the
+ * control points of the patches to avoid storing repeated data since there are several points
+ * shared between adjacent patches.
+ *
+ * The array fCornerPts stores the corner control points of the patches.
+ * The array fHrzPts holds the intermidiate control points of the top and bottom curves of a patch.
+ * The array fVrtPts holds the intermidiate control points of the left and right curves of a patch.
+ * The array fCornerColors holds the corner colors in the same format as fCornerPts.
+ * The array fTexCoords holds the texture coordinates in the same format as fCornerpts.
+ *
+ * fCornerPts fHrzPts fVrtPts
+ * -------------- ------------------- --------------
+ * | C0 | C1 | C2 | | H0 | H1 | H2 | H3 | | V0 | V1 | V2 |
+ * -------------- ------------------ ---------------
+ * | C3 | C4 | C5 | | H4 | H5 | H6 | H7 | | V4 | V5 | V6 |
+ * -------------- ------------------- --------------
+ * | C6 | C7 | C8 | | H8 | H9 | H10| H11| | V6 | V7 | V8 |
+ * -------------- ------------------- --------------
+ * | V9 | V10| V11|
+ * --------------
+ *
+ * With the above configuration we would have a 2x2 grid of patches:
+ * H0 H1 H2 H3
+ * / \/ \
+ * C0-------C1-------C2
+ * /| | |\
+ * v0 | v1 | v2
+ * v3 | V4 | v5
+ * \| | |/
+ * C3-H4-H5-C4-H6-H7-C5
+ * /| | |\
+ * v6 | v7 | v8
+ * v9 | v10 | v11
+ * \| | |/
+ * C6-------C7-------C8
+ * \ / \ /
+ * H8 H9 H10 H11
+ *
+ * When trying to get a patch at a certain position it justs builds it with the corresponding
+ * points.
+ * When adding a patch it tries to add the points at their corresponding position trying to comply
+ * with the adjacent points or overwriting them.
+ *
+ * Based the idea on the SVG2 spec for mesh gradients in which a grid of patches is build as in the
+ * the following example:
+ * <meshGradient x="100" y="100">
+ * <meshRow>
+ * <meshPatch>
+ * <stop .../>
+ * Up to four stops in first patch. See details below.
+ * </meshPatch>
+ * <meshPatch>
+ * Any number of meshPatches in row.
+ * </meshPatch>
+ * </meshRow>
+ * <meshRow>
+ * Any number of meshRows, each with the same number of meshPatches as in the first row.
+ * </meshRow>
+ * </meshGradient>
+ */
+class SkPatchGrid {
+
+public:
+
+ enum VertexType {
+ kNone_VertexType = 0X00,
+ kColors_VertexType = 0x01,
+ kTexs_VertexType = 0x02,
+ kColorsAndTexs_VertexType = 0x03
+ };
+
+ SkPatchGrid(int rows = 0, int cols = 0, VertexType flags = kNone_VertexType,
+ SkXfermode* xfer = nullptr);
+
+ ~SkPatchGrid();
+
+ /**
+ * Add a patch at location (x,y) overwriting the previous patch and shared points so they
+ * mantain C0 connectivity.
+ * The control points must be passed in a clockwise order starting at the top left corner.
+ * The colors and texCoords are the values at the corners of the patch which will be bilerp
+ * across it, they must also be in counterclockwise order starting at the top left corner.
+ */
+ bool setPatch(int x, int y, const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4]);
+
+ /**
+ * Get patch at location (x,y). If cubics, colors or texCoords is not nullptr it sets patch's
+ * array with its corresponding values.
+ * The function returns false if the cubics parameter is nullptr or if the (x,y) coordinates are
+ * not within the range of the grid.
+ */
+ bool getPatch(int x, int y, SkPoint cubics[12], SkColor colors[4], SkPoint texCoords[4]) const;
+
+ /**
+ * Resets the grid of patches to contain rows and cols of patches.
+ */
+ void reset(int rows, int cols, VertexType flags, SkXfermode* xMode);
+
+ /**
+ * Draws the grid of patches. The patches are drawn starting at patch (0,0) drawing columns, so
+ * for a 2x2 grid the order would be (0,0)->(0,1)->(1,0)->(1,1). The order follows the order
+ * of the parametric coordinates of the coons patch.
+ */
+ void draw(SkCanvas* canvas, SkPaint& paint);
+
+ /**
+ * Get the dimensions of the grid of patches.
+ */
+ SkISize getDimensions() const {
+ return SkISize::Make(fCols, fRows);
+ }
+
+private:
+ int fRows, fCols;
+ VertexType fModeFlags;
+ SkPoint* fCornerPts;
+ SkColor* fCornerColors;
+ SkPoint* fTexCoords;
+ SkPoint* fHrzCtrlPts;
+ SkPoint* fVrtCtrlPts;
+ SkXfermode* fXferMode;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkPatchUtils.cpp b/gfx/skia/skia/src/utils/SkPatchUtils.cpp
new file mode 100644
index 000000000..cbaae39e8
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkPatchUtils.cpp
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPatchUtils.h"
+
+#include "SkColorPriv.h"
+#include "SkGeometry.h"
+
+/**
+ * Evaluator to sample the values of a cubic bezier using forward differences.
+ * Forward differences is a method for evaluating a nth degree polynomial at a uniform step by only
+ * adding precalculated values.
+ * For a linear example we have the function f(t) = m*t+b, then the value of that function at t+h
+ * would be f(t+h) = m*(t+h)+b. If we want to know the uniform step that we must add to the first
+ * evaluation f(t) then we need to substract f(t+h) - f(t) = m*t + m*h + b - m*t + b = mh. After
+ * obtaining this value (mh) we could just add this constant step to our first sampled point
+ * to compute the next one.
+ *
+ * For the cubic case the first difference gives as a result a quadratic polynomial to which we can
+ * apply again forward differences and get linear function to which we can apply again forward
+ * differences to get a constant difference. This is why we keep an array of size 4, the 0th
+ * position keeps the sampled value while the next ones keep the quadratic, linear and constant
+ * difference values.
+ */
+
+class FwDCubicEvaluator {
+
+public:
+
+ /**
+ * Receives the 4 control points of the cubic bezier.
+ */
+
+ explicit FwDCubicEvaluator(const SkPoint points[4])
+ : fCoefs(points) {
+ memcpy(fPoints, points, 4 * sizeof(SkPoint));
+
+ this->restart(1);
+ }
+
+ /**
+ * Restarts the forward differences evaluator to the first value of t = 0.
+ */
+ void restart(int divisions) {
+ fDivisions = divisions;
+ fCurrent = 0;
+ fMax = fDivisions + 1;
+ Sk2s h = Sk2s(1.f / fDivisions);
+ Sk2s h2 = h * h;
+ Sk2s h3 = h2 * h;
+ Sk2s fwDiff3 = Sk2s(6) * fCoefs.fA * h3;
+ fFwDiff[3] = to_point(fwDiff3);
+ fFwDiff[2] = to_point(fwDiff3 + times_2(fCoefs.fB) * h2);
+ fFwDiff[1] = to_point(fCoefs.fA * h3 + fCoefs.fB * h2 + fCoefs.fC * h);
+ fFwDiff[0] = to_point(fCoefs.fD);
+ }
+
+ /**
+ * Check if the evaluator is still within the range of 0<=t<=1
+ */
+ bool done() const {
+ return fCurrent > fMax;
+ }
+
+ /**
+ * Call next to obtain the SkPoint sampled and move to the next one.
+ */
+ SkPoint next() {
+ SkPoint point = fFwDiff[0];
+ fFwDiff[0] += fFwDiff[1];
+ fFwDiff[1] += fFwDiff[2];
+ fFwDiff[2] += fFwDiff[3];
+ fCurrent++;
+ return point;
+ }
+
+ const SkPoint* getCtrlPoints() const {
+ return fPoints;
+ }
+
+private:
+ SkCubicCoeff fCoefs;
+ int fMax, fCurrent, fDivisions;
+ SkPoint fFwDiff[4], fPoints[4];
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+// size in pixels of each partition per axis, adjust this knob
+static const int kPartitionSize = 10;
+
+/**
+ * Calculate the approximate arc length given a bezier curve's control points.
+ */
+static SkScalar approx_arc_length(SkPoint* points, int count) {
+ if (count < 2) {
+ return 0;
+ }
+ SkScalar arcLength = 0;
+ for (int i = 0; i < count - 1; i++) {
+ arcLength += SkPoint::Distance(points[i], points[i + 1]);
+ }
+ return arcLength;
+}
+
+static SkScalar bilerp(SkScalar tx, SkScalar ty, SkScalar c00, SkScalar c10, SkScalar c01,
+ SkScalar c11) {
+ SkScalar a = c00 * (1.f - tx) + c10 * tx;
+ SkScalar b = c01 * (1.f - tx) + c11 * tx;
+ return a * (1.f - ty) + b * ty;
+}
+
+SkISize SkPatchUtils::GetLevelOfDetail(const SkPoint cubics[12], const SkMatrix* matrix) {
+
+ // Approximate length of each cubic.
+ SkPoint pts[kNumPtsCubic];
+ SkPatchUtils::getTopCubic(cubics, pts);
+ matrix->mapPoints(pts, kNumPtsCubic);
+ SkScalar topLength = approx_arc_length(pts, kNumPtsCubic);
+
+ SkPatchUtils::getBottomCubic(cubics, pts);
+ matrix->mapPoints(pts, kNumPtsCubic);
+ SkScalar bottomLength = approx_arc_length(pts, kNumPtsCubic);
+
+ SkPatchUtils::getLeftCubic(cubics, pts);
+ matrix->mapPoints(pts, kNumPtsCubic);
+ SkScalar leftLength = approx_arc_length(pts, kNumPtsCubic);
+
+ SkPatchUtils::getRightCubic(cubics, pts);
+ matrix->mapPoints(pts, kNumPtsCubic);
+ SkScalar rightLength = approx_arc_length(pts, kNumPtsCubic);
+
+ // Level of detail per axis, based on the larger side between top and bottom or left and right
+ int lodX = static_cast<int>(SkMaxScalar(topLength, bottomLength) / kPartitionSize);
+ int lodY = static_cast<int>(SkMaxScalar(leftLength, rightLength) / kPartitionSize);
+
+ return SkISize::Make(SkMax32(8, lodX), SkMax32(8, lodY));
+}
+
+void SkPatchUtils::getTopCubic(const SkPoint cubics[12], SkPoint points[4]) {
+ points[0] = cubics[kTopP0_CubicCtrlPts];
+ points[1] = cubics[kTopP1_CubicCtrlPts];
+ points[2] = cubics[kTopP2_CubicCtrlPts];
+ points[3] = cubics[kTopP3_CubicCtrlPts];
+}
+
+void SkPatchUtils::getBottomCubic(const SkPoint cubics[12], SkPoint points[4]) {
+ points[0] = cubics[kBottomP0_CubicCtrlPts];
+ points[1] = cubics[kBottomP1_CubicCtrlPts];
+ points[2] = cubics[kBottomP2_CubicCtrlPts];
+ points[3] = cubics[kBottomP3_CubicCtrlPts];
+}
+
+void SkPatchUtils::getLeftCubic(const SkPoint cubics[12], SkPoint points[4]) {
+ points[0] = cubics[kLeftP0_CubicCtrlPts];
+ points[1] = cubics[kLeftP1_CubicCtrlPts];
+ points[2] = cubics[kLeftP2_CubicCtrlPts];
+ points[3] = cubics[kLeftP3_CubicCtrlPts];
+}
+
+void SkPatchUtils::getRightCubic(const SkPoint cubics[12], SkPoint points[4]) {
+ points[0] = cubics[kRightP0_CubicCtrlPts];
+ points[1] = cubics[kRightP1_CubicCtrlPts];
+ points[2] = cubics[kRightP2_CubicCtrlPts];
+ points[3] = cubics[kRightP3_CubicCtrlPts];
+}
+
+bool SkPatchUtils::getVertexData(SkPatchUtils::VertexData* data, const SkPoint cubics[12],
+ const SkColor colors[4], const SkPoint texCoords[4], int lodX, int lodY) {
+ if (lodX < 1 || lodY < 1 || nullptr == cubics || nullptr == data) {
+ return false;
+ }
+
+ // check for overflow in multiplication
+ const int64_t lodX64 = (lodX + 1),
+ lodY64 = (lodY + 1),
+ mult64 = lodX64 * lodY64;
+ if (mult64 > SK_MaxS32) {
+ return false;
+ }
+ data->fVertexCount = SkToS32(mult64);
+
+ // it is recommended to generate draw calls of no more than 65536 indices, so we never generate
+ // more than 60000 indices. To accomplish that we resize the LOD and vertex count
+ if (data->fVertexCount > 10000 || lodX > 200 || lodY > 200) {
+ SkScalar weightX = static_cast<SkScalar>(lodX) / (lodX + lodY);
+ SkScalar weightY = static_cast<SkScalar>(lodY) / (lodX + lodY);
+
+ // 200 comes from the 100 * 2 which is the max value of vertices because of the limit of
+ // 60000 indices ( sqrt(60000 / 6) that comes from data->fIndexCount = lodX * lodY * 6)
+ lodX = static_cast<int>(weightX * 200);
+ lodY = static_cast<int>(weightY * 200);
+ data->fVertexCount = (lodX + 1) * (lodY + 1);
+ }
+ data->fIndexCount = lodX * lodY * 6;
+
+ data->fPoints = new SkPoint[data->fVertexCount];
+ data->fIndices = new uint16_t[data->fIndexCount];
+
+ // if colors is not null then create array for colors
+ SkPMColor colorsPM[kNumCorners];
+ if (colors) {
+ // premultiply colors to avoid color bleeding.
+ for (int i = 0; i < kNumCorners; i++) {
+ colorsPM[i] = SkPreMultiplyColor(colors[i]);
+ }
+ data->fColors = new uint32_t[data->fVertexCount];
+ }
+
+ // if texture coordinates are not null then create array for them
+ if (texCoords) {
+ data->fTexCoords = new SkPoint[data->fVertexCount];
+ }
+
+ SkPoint pts[kNumPtsCubic];
+ SkPatchUtils::getBottomCubic(cubics, pts);
+ FwDCubicEvaluator fBottom(pts);
+ SkPatchUtils::getTopCubic(cubics, pts);
+ FwDCubicEvaluator fTop(pts);
+ SkPatchUtils::getLeftCubic(cubics, pts);
+ FwDCubicEvaluator fLeft(pts);
+ SkPatchUtils::getRightCubic(cubics, pts);
+ FwDCubicEvaluator fRight(pts);
+
+ fBottom.restart(lodX);
+ fTop.restart(lodX);
+
+ SkScalar u = 0.0f;
+ int stride = lodY + 1;
+ for (int x = 0; x <= lodX; x++) {
+ SkPoint bottom = fBottom.next(), top = fTop.next();
+ fLeft.restart(lodY);
+ fRight.restart(lodY);
+ SkScalar v = 0.f;
+ for (int y = 0; y <= lodY; y++) {
+ int dataIndex = x * (lodY + 1) + y;
+
+ SkPoint left = fLeft.next(), right = fRight.next();
+
+ SkPoint s0 = SkPoint::Make((1.0f - v) * top.x() + v * bottom.x(),
+ (1.0f - v) * top.y() + v * bottom.y());
+ SkPoint s1 = SkPoint::Make((1.0f - u) * left.x() + u * right.x(),
+ (1.0f - u) * left.y() + u * right.y());
+ SkPoint s2 = SkPoint::Make(
+ (1.0f - v) * ((1.0f - u) * fTop.getCtrlPoints()[0].x()
+ + u * fTop.getCtrlPoints()[3].x())
+ + v * ((1.0f - u) * fBottom.getCtrlPoints()[0].x()
+ + u * fBottom.getCtrlPoints()[3].x()),
+ (1.0f - v) * ((1.0f - u) * fTop.getCtrlPoints()[0].y()
+ + u * fTop.getCtrlPoints()[3].y())
+ + v * ((1.0f - u) * fBottom.getCtrlPoints()[0].y()
+ + u * fBottom.getCtrlPoints()[3].y()));
+ data->fPoints[dataIndex] = s0 + s1 - s2;
+
+ if (colors) {
+ uint8_t a = uint8_t(bilerp(u, v,
+ SkScalar(SkColorGetA(colorsPM[kTopLeft_Corner])),
+ SkScalar(SkColorGetA(colorsPM[kTopRight_Corner])),
+ SkScalar(SkColorGetA(colorsPM[kBottomLeft_Corner])),
+ SkScalar(SkColorGetA(colorsPM[kBottomRight_Corner]))));
+ uint8_t r = uint8_t(bilerp(u, v,
+ SkScalar(SkColorGetR(colorsPM[kTopLeft_Corner])),
+ SkScalar(SkColorGetR(colorsPM[kTopRight_Corner])),
+ SkScalar(SkColorGetR(colorsPM[kBottomLeft_Corner])),
+ SkScalar(SkColorGetR(colorsPM[kBottomRight_Corner]))));
+ uint8_t g = uint8_t(bilerp(u, v,
+ SkScalar(SkColorGetG(colorsPM[kTopLeft_Corner])),
+ SkScalar(SkColorGetG(colorsPM[kTopRight_Corner])),
+ SkScalar(SkColorGetG(colorsPM[kBottomLeft_Corner])),
+ SkScalar(SkColorGetG(colorsPM[kBottomRight_Corner]))));
+ uint8_t b = uint8_t(bilerp(u, v,
+ SkScalar(SkColorGetB(colorsPM[kTopLeft_Corner])),
+ SkScalar(SkColorGetB(colorsPM[kTopRight_Corner])),
+ SkScalar(SkColorGetB(colorsPM[kBottomLeft_Corner])),
+ SkScalar(SkColorGetB(colorsPM[kBottomRight_Corner]))));
+ data->fColors[dataIndex] = SkPackARGB32(a,r,g,b);
+ }
+
+ if (texCoords) {
+ data->fTexCoords[dataIndex] = SkPoint::Make(
+ bilerp(u, v, texCoords[kTopLeft_Corner].x(),
+ texCoords[kTopRight_Corner].x(),
+ texCoords[kBottomLeft_Corner].x(),
+ texCoords[kBottomRight_Corner].x()),
+ bilerp(u, v, texCoords[kTopLeft_Corner].y(),
+ texCoords[kTopRight_Corner].y(),
+ texCoords[kBottomLeft_Corner].y(),
+ texCoords[kBottomRight_Corner].y()));
+
+ }
+
+ if(x < lodX && y < lodY) {
+ int i = 6 * (x * lodY + y);
+ data->fIndices[i] = x * stride + y;
+ data->fIndices[i + 1] = x * stride + 1 + y;
+ data->fIndices[i + 2] = (x + 1) * stride + 1 + y;
+ data->fIndices[i + 3] = data->fIndices[i];
+ data->fIndices[i + 4] = data->fIndices[i + 2];
+ data->fIndices[i + 5] = (x + 1) * stride + y;
+ }
+ v = SkScalarClampMax(v + 1.f / lodY, 1);
+ }
+ u = SkScalarClampMax(u + 1.f / lodX, 1);
+ }
+ return true;
+
+}
diff --git a/gfx/skia/skia/src/utils/SkPatchUtils.h b/gfx/skia/skia/src/utils/SkPatchUtils.h
new file mode 100644
index 000000000..67ab621e8
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkPatchUtils.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPatchUtils_DEFINED
+#define SkPatchUtils_DEFINED
+
+#include "SkColorPriv.h"
+#include "SkMatrix.h"
+
+class SK_API SkPatchUtils {
+
+public:
+ /**
+ * Structure that holds the vertex data related to the tessellation of a patch. It is passed
+ * as a parameter to the function getVertexData which sets the points, colors and texture
+ * coordinates of the vertices and the indices for them to be drawn as triangles.
+ */
+ struct VertexData {
+ int fVertexCount, fIndexCount;
+ SkPoint* fPoints;
+ SkPoint* fTexCoords;
+ uint32_t* fColors;
+ uint16_t* fIndices;
+
+ VertexData()
+ : fVertexCount(0)
+ , fIndexCount(0)
+ , fPoints(nullptr)
+ , fTexCoords(nullptr)
+ , fColors(nullptr)
+ , fIndices(nullptr) { }
+
+ ~VertexData() {
+ delete[] fPoints;
+ delete[] fTexCoords;
+ delete[] fColors;
+ delete[] fIndices;
+ }
+ };
+
+ // Enums for control points based on the order specified in the constructor (clockwise).
+ enum CubicCtrlPts {
+ kTopP0_CubicCtrlPts = 0,
+ kTopP1_CubicCtrlPts = 1,
+ kTopP2_CubicCtrlPts = 2,
+ kTopP3_CubicCtrlPts = 3,
+
+ kRightP0_CubicCtrlPts = 3,
+ kRightP1_CubicCtrlPts = 4,
+ kRightP2_CubicCtrlPts = 5,
+ kRightP3_CubicCtrlPts = 6,
+
+ kBottomP0_CubicCtrlPts = 9,
+ kBottomP1_CubicCtrlPts = 8,
+ kBottomP2_CubicCtrlPts = 7,
+ kBottomP3_CubicCtrlPts = 6,
+
+ kLeftP0_CubicCtrlPts = 0,
+ kLeftP1_CubicCtrlPts = 11,
+ kLeftP2_CubicCtrlPts = 10,
+ kLeftP3_CubicCtrlPts = 9,
+ };
+
+ // Enum for corner also clockwise.
+ enum Corner {
+ kTopLeft_Corner = 0,
+ kTopRight_Corner,
+ kBottomRight_Corner,
+ kBottomLeft_Corner
+ };
+
+ enum {
+ kNumCtrlPts = 12,
+ kNumCorners = 4,
+ kNumPtsCubic = 4
+ };
+
+ /**
+ * Method that calculates a level of detail (number of subdivisions) for a patch in both axis.
+ */
+ static SkISize GetLevelOfDetail(const SkPoint cubics[12], const SkMatrix* matrix);
+
+ /**
+ * Get the points corresponding to the top cubic of cubics.
+ */
+ static void getTopCubic(const SkPoint cubics[12], SkPoint points[4]);
+
+ /**
+ * Get the points corresponding to the bottom cubic of cubics.
+ */
+ static void getBottomCubic(const SkPoint cubics[12], SkPoint points[4]);
+
+ /**
+ * Get the points corresponding to the left cubic of cubics.
+ */
+ static void getLeftCubic(const SkPoint cubics[12], SkPoint points[4]);
+
+ /**
+ * Get the points corresponding to the right cubic of cubics.
+ */
+ static void getRightCubic(const SkPoint cubics[12], SkPoint points[4]);
+
+ /**
+ * Function that evaluates the coons patch interpolation.
+ * data refers to the pointer of the PatchData struct in which the tessellation data is set.
+ * cubics refers to the points of the cubics.
+ * lod refers the level of detail for each axis.
+ * colors refers to the corner colors that will be bilerp across the patch (optional parameter)
+ * texCoords refers to the corner texture coordinates that will be bilerp across the patch
+ (optional parameter)
+ */
+ static bool getVertexData(SkPatchUtils::VertexData* data, const SkPoint cubics[12],
+ const SkColor colors[4], const SkPoint texCoords[4],
+ int lodX, int lodY);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkRGBAToYUV.cpp b/gfx/skia/skia/src/utils/SkRGBAToYUV.cpp
new file mode 100644
index 000000000..0528b144f
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkRGBAToYUV.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkRGBAToYUV.h"
+#include "SkCanvas.h"
+#include "SkColorMatrixFilterRowMajor255.h"
+#include "SkImage.h"
+#include "SkPaint.h"
+#include "SkSurface.h"
+
+bool SkRGBAToYUV(const SkImage* image, const SkISize sizes[3], void* const planes[3],
+ const size_t rowBytes[3], SkYUVColorSpace colorSpace) {
+ // Matrices that go from RGBA to YUV.
+ static const SkScalar kYUVColorSpaceInvMatrices[][15] = {
+ // kJPEG_SkYUVColorSpace
+ { 0.299001f, 0.586998f, 0.114001f, 0.f, 0.0000821798f * 255.f,
+ -0.168736f, -0.331263f, 0.499999f, 0.f, 0.499954f * 255.f,
+ 0.499999f, -0.418686f, -0.0813131f, 0.f, 0.499941f * 255.f},
+
+ // kRec601_SkYUVColorSpace
+ { 0.256951f, 0.504421f, 0.0977346f, 0.f, 0.0625f * 255.f,
+ -0.148212f, -0.290954f, 0.439166f, 0.f, 0.5f * 255.f,
+ 0.439166f, -0.367886f, -0.0712802f, 0.f, 0.5f * 255.f},
+
+ // kRec709_SkYUVColorSpace
+ { 0.182663f, 0.614473f, 0.061971f, 0.f, 0.0625f * 255.f,
+ -0.100672f, -0.338658f, 0.43933f, 0.f, 0.5f * 255.f,
+ 0.439142f, -0.39891f, -0.040231f, 0.f, 0.5f * 255.f},
+ };
+ static_assert(kLastEnum_SkYUVColorSpace == 2, "yuv color matrix array problem");
+ static_assert(kJPEG_SkYUVColorSpace == 0, "yuv color matrix array problem");
+ static_assert(kRec601_SkYUVColorSpace == 1, "yuv color matrix array problem");
+ static_assert(kRec709_SkYUVColorSpace == 2, "yuv color matrix array problem");
+
+ for (int i = 0; i < 3; ++i) {
+ size_t rb = rowBytes[i] ? rowBytes[i] : sizes[i].fWidth;
+ auto surface(SkSurface::MakeRasterDirect(
+ SkImageInfo::MakeA8(sizes[i].fWidth, sizes[i].fHeight), planes[i], rb));
+ if (!surface) {
+ return false;
+ }
+ SkPaint paint;
+ paint.setFilterQuality(kLow_SkFilterQuality);
+ paint.setBlendMode(SkBlendMode::kSrc);
+ int rowStartIdx = 5 * i;
+ const SkScalar* row = kYUVColorSpaceInvMatrices[colorSpace] + rowStartIdx;
+ paint.setColorFilter(
+ SkColorMatrixFilterRowMajor255::MakeSingleChannelOutput(row));
+ surface->getCanvas()->drawImageRect(image, SkIRect::MakeWH(image->width(), image->height()),
+ SkRect::MakeIWH(surface->width(), surface->height()),
+ &paint);
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/utils/SkRGBAToYUV.h b/gfx/skia/skia/src/utils/SkRGBAToYUV.h
new file mode 100644
index 000000000..5c3c1b146
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkRGBAToYUV.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRGBAToYUV_DEFINED
+#define SkRGBAToYUV_DEFINED
+
+#include "SkPixmap.h"
+#include "SkSize.h"
+
+class SkImage;
+// Works with any image type at the moment, but in the future it may only work with raster-backed
+// images. This really should take a SkPixmap for the input, however the implementation for the
+// time being requires an image.
+bool SkRGBAToYUV(const SkImage*, const SkISize [3], void* const planes[3],
+ const size_t rowBytes[3], SkYUVColorSpace);
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkShadowPaintFilterCanvas.cpp b/gfx/skia/skia/src/utils/SkShadowPaintFilterCanvas.cpp
new file mode 100644
index 000000000..289ae3cd0
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkShadowPaintFilterCanvas.cpp
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkPathEffect.h"
+#include "SkShadowPaintFilterCanvas.h"
+
+#ifdef SK_EXPERIMENTAL_SHADOWING
+
+SkShadowPaintFilterCanvas::SkShadowPaintFilterCanvas(SkCanvas *canvas)
+ : SkPaintFilterCanvas(canvas) {
+ fShadowParams.fShadowRadius = 0.0f;
+ fShadowParams.fType = SkShadowParams::kNoBlur_ShadowType;
+ fShadowParams.fBiasingConstant = 0.0f;
+ fShadowParams.fMinVariance = 0.0f;
+}
+
+// TODO use a shader instead
+bool SkShadowPaintFilterCanvas::onFilter(SkTCopyOnFirstWrite<SkPaint>* paint, Type type) const {
+ if (*paint) {
+ int z = this->getZ();
+ SkASSERT(z <= 0xFF && z >= 0x00);
+
+ SkPaint newPaint;
+ newPaint.setPathEffect(sk_ref_sp<SkPathEffect>((*paint)->getPathEffect()));
+
+ SkColor color = 0xFF000000; // init color to opaque black
+ color |= z; // Put the index into the blue component
+
+ if (fShadowParams.fType == SkShadowParams::kVariance_ShadowType) {
+ int z2 = z * z;
+ if (z2 > 255 * 256) {
+ color |= 0xff00;
+ } else {
+ // Let's only store the more significant bits of z2 to save space.
+ // In practice, this should barely impact shadow blur quality.
+ color |= z2 & 0x0000ff00;
+ }
+ }
+ newPaint.setColor(color);
+
+ *paint->writable() = newPaint;
+ }
+
+ return true;
+}
+
+SkISize SkShadowPaintFilterCanvas::ComputeDepthMapSize(const SkLights::Light& light, int maxDepth,
+ int width, int height) {
+ if (light.type() != SkLights::Light::kDirectional_LightType) {
+ // Calculating the right depth map size for point lights is complex,
+ // as it depends on the max depth, the max depth delta, the location
+ // of the point light and the shapes, etc... If we take upper bounds
+ // on those metrics, the shadow map will be pretty big in any case.
+ // Thus, just using 4x the width and height seems to work for most scenes.
+ return SkISize::Make(width * 4, height * 4);
+ }
+
+ int dMapWidth = SkMin32(maxDepth * fabs(light.dir().fX) + width,
+ width * 2);
+ int dMapHeight = SkMin32(maxDepth * fabs(light.dir().fY) + height,
+ height * 2);
+ return SkISize::Make(dMapWidth, dMapHeight);
+}
+
+void SkShadowPaintFilterCanvas::setShadowParams(const SkShadowParams &params) {
+ fShadowParams = params;
+}
+
+void SkShadowPaintFilterCanvas::onDrawPicture(const SkPicture *picture, const SkMatrix *matrix,
+ const SkPaint *paint) {
+ SkTCopyOnFirstWrite<SkPaint> filteredPaint(paint);
+ if (this->onFilter(&filteredPaint, kPicture_Type)) {
+ SkCanvas::onDrawPicture(picture, matrix, filteredPaint);
+ }
+}
+
+void SkShadowPaintFilterCanvas::updateMatrix() {
+ // It is up to the user to set the 0th light in fLights to
+ // the light the want to render the depth map with.
+ if (this->fLights->light(0).type() == SkLights::Light::kDirectional_LightType) {
+ const SkVector3& lightDir = this->fLights->light(0).dir();
+ SkScalar x = lightDir.fX * this->getZ();
+ SkScalar y = lightDir.fY * this->getZ();
+
+ this->translate(x, y);
+ } else if (this->fLights->light(0).type() == SkLights::Light::kPoint_LightType) {
+ SkISize size = this->getBaseLayerSize();
+
+ SkPoint3 lightPos = this->fLights->light(0).pos();
+
+ // shadow maps for point lights are 4x the size of the diffuse map, by experimentation
+ // (see SPFCanvas::ComputeDepthMapSize())
+ SkScalar diffuseHeight = size.fHeight / 4.0f;
+
+ // move point light with canvas's CTM
+ SkPoint lightPoint = SkPoint::Make(lightPos.fX, diffuseHeight - lightPos.fY);
+ SkMatrix mat = this->getTotalMatrix();
+ if (mat.invert(&mat)) {
+ mat.mapPoints(&lightPoint, 1);
+ }
+ lightPoint.set(lightPoint.fX, diffuseHeight - lightPoint.fY);
+
+ // center the shadow map
+ // note: the 3/8 constant is specific to the 4.0 depth map size multiplier
+ mat = this->getTotalMatrix();
+ mat.postTranslate(size.width() * 0.375f, size.height() * 0.375f);
+ this->setMatrix(mat);
+
+ // project shapes onto canvas as shadows
+ SkScalar scale = (lightPos.fZ) / (lightPos.fZ - this->getZ());
+ this->scale(scale, scale);
+
+ this->translate(-lightPoint.fX * this->getZ() /
+ ((lightPos.fZ - this->getZ()) * scale),
+ -(diffuseHeight - lightPoint.fY) * this->getZ() /
+ ((lightPos.fZ - this->getZ()) * scale));
+ }
+}
+
+void SkShadowPaintFilterCanvas::onDrawPaint(const SkPaint &paint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawPaint(paint);
+ this->restore();
+}
+
+void SkShadowPaintFilterCanvas::onDrawPoints(PointMode mode, size_t count, const SkPoint pts[],
+ const SkPaint &paint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawPoints(mode, count, pts, paint);
+ this->restore();
+}
+
+void SkShadowPaintFilterCanvas::onDrawRect(const SkRect &rect, const SkPaint &paint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawRect(rect, paint);
+ this->restore();
+}
+
+void SkShadowPaintFilterCanvas::onDrawRRect(const SkRRect &rrect, const SkPaint &paint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawRRect(rrect, paint);
+ this->restore();
+}
+
+void SkShadowPaintFilterCanvas::onDrawDRRect(const SkRRect &outer, const SkRRect &inner,
+ const SkPaint &paint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawDRRect(outer, inner, paint);
+ this->restore();
+}
+
+void SkShadowPaintFilterCanvas::onDrawOval(const SkRect &rect, const SkPaint &paint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawOval(rect, paint);
+ this->restore();
+}
+
+void SkShadowPaintFilterCanvas::onDrawArc(const SkRect &rect, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter,
+ const SkPaint &paint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawArc(rect, startAngle, sweepAngle, useCenter, paint);
+ this->restore();
+}
+
+void SkShadowPaintFilterCanvas::onDrawPath(const SkPath &path, const SkPaint &paint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawPath(path, paint);
+ this->restore();
+}
+
+void SkShadowPaintFilterCanvas::onDrawBitmap(const SkBitmap &bm, SkScalar left, SkScalar top,
+ const SkPaint *paint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawBitmap(bm, left, top, paint);
+ this->restore();
+}
+
+void SkShadowPaintFilterCanvas::onDrawBitmapRect(const SkBitmap &bm, const SkRect *src,
+ const SkRect &dst, const SkPaint *paint,
+ SrcRectConstraint constraint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawBitmapRect(bm, src, dst, paint, constraint);
+ this->restore();
+}
+
+void SkShadowPaintFilterCanvas::onDrawBitmapNine(const SkBitmap &bm, const SkIRect &center,
+ const SkRect &dst, const SkPaint *paint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawBitmapNine(bm, center, dst, paint);
+ this->restore();
+}
+
+void SkShadowPaintFilterCanvas::onDrawImage(const SkImage *image, SkScalar left,
+ SkScalar top, const SkPaint *paint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawImage(image, left, top, paint);
+ this->restore();
+}
+
+void SkShadowPaintFilterCanvas::onDrawImageRect(const SkImage *image, const SkRect *src,
+ const SkRect &dst, const SkPaint *paint,
+ SrcRectConstraint constraint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawImageRect(image, src, dst, paint, constraint);
+ this->restore();
+}
+
+void SkShadowPaintFilterCanvas::onDrawImageNine(const SkImage *image, const SkIRect &center,
+ const SkRect &dst, const SkPaint *paint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawImageNine(image, center, dst, paint);
+ this->restore();
+}
+
+
+void SkShadowPaintFilterCanvas::onDrawVertices(VertexMode vmode, int vertexCount,
+ const SkPoint vertices[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode *xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint &paint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawVertices(vmode, vertexCount, vertices, texs, colors,
+ xmode, indices, indexCount, paint);
+ this->restore();
+}
+
+void SkShadowPaintFilterCanvas::onDrawPatch(const SkPoint cubics[], const SkColor colors[],
+ const SkPoint texCoords[], SkXfermode *xmode,
+ const SkPaint &paint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawPatch(cubics, colors, texCoords, xmode, paint);
+ this->restore();
+}
+
+void SkShadowPaintFilterCanvas::onDrawText(const void *text, size_t byteLength, SkScalar x,
+ SkScalar y, const SkPaint &paint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawText(text, byteLength, x, y, paint);
+ this->restore();
+}
+
+void SkShadowPaintFilterCanvas::onDrawPosText(const void *text, size_t byteLength,
+ const SkPoint pos[], const SkPaint &paint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawPosText(text, byteLength, pos, paint);
+ this->restore();
+}
+
+void SkShadowPaintFilterCanvas::onDrawPosTextH(const void *text, size_t byteLength,
+ const SkScalar xpos[],
+ SkScalar constY, const SkPaint &paint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawPosTextH(text, byteLength, xpos, constY, paint);
+ this->restore();
+}
+
+void SkShadowPaintFilterCanvas::onDrawTextOnPath(const void *text, size_t byteLength,
+ const SkPath &path, const SkMatrix *matrix,
+ const SkPaint &paint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawTextOnPath(text, byteLength, path, matrix, paint);
+ this->restore();
+}
+
+void SkShadowPaintFilterCanvas::onDrawTextRSXform(const void *text, size_t byteLength,
+ const SkRSXform xform[], const SkRect *cull,
+ const SkPaint &paint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawTextRSXform(text, byteLength, xform, cull, paint);
+ this->restore();
+}
+
+void SkShadowPaintFilterCanvas::onDrawTextBlob(const SkTextBlob *blob, SkScalar x, SkScalar y,
+ const SkPaint &paint) {
+ this->save();
+ this->updateMatrix();
+ this->INHERITED::onDrawTextBlob(blob, x, y, paint);
+ this->restore();
+}
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkShadowPaintFilterCanvas.h b/gfx/skia/skia/src/utils/SkShadowPaintFilterCanvas.h
new file mode 100644
index 000000000..190c68b7c
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkShadowPaintFilterCanvas.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkShadowPaintFilterCanvas_DEFINED
+#define SkShadowPaintFilterCanvas_DEFINED
+
+#include "SkPaintFilterCanvas.h"
+
+#ifdef SK_EXPERIMENTAL_SHADOWING
+
+/** \class SkShadowPaintFilterCanvas
+ *
+ * A utility proxy class for implementing shadow maps.
+ *
+ * We override the onFilter method to draw depths into the canvas
+ * depending on the current draw depth of the canvas, throwing out
+ * the actual draw color.
+ *
+ * Note that we can only do this for one light at a time!
+ * It is up to the user to set the 0th light in fLights to
+ * the light the want to render the depth map with.
+ */
+class SkShadowPaintFilterCanvas : public SkPaintFilterCanvas {
+public:
+
+ SkShadowPaintFilterCanvas(SkCanvas *canvas);
+
+ // TODO use a shader instead
+ bool onFilter(SkTCopyOnFirstWrite<SkPaint>* paint, Type type) const override;
+
+ static SkISize ComputeDepthMapSize(const SkLights::Light& light, int maxDepth,
+ int width, int height);
+
+ void setShadowParams(const SkShadowParams &params);
+protected:
+ void updateMatrix();
+
+ void onDrawPicture(const SkPicture *picture, const SkMatrix *matrix,
+ const SkPaint *paint) override;
+
+ void onDrawPaint(const SkPaint &paint) override;
+
+ void onDrawPoints(PointMode mode, size_t count, const SkPoint pts[],
+ const SkPaint &paint) override;
+
+ void onDrawRect(const SkRect &rect, const SkPaint &paint) override;
+
+ void onDrawRRect(const SkRRect &rrect, const SkPaint &paint) override;
+
+ void onDrawDRRect(const SkRRect &outer, const SkRRect &inner,
+ const SkPaint &paint) override;
+
+ void onDrawOval(const SkRect &rect, const SkPaint &paint) override;
+
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override;
+
+ void onDrawPath(const SkPath &path, const SkPaint &paint) override;
+
+ void onDrawBitmap(const SkBitmap &bm, SkScalar left, SkScalar top,
+ const SkPaint *paint) override;
+
+ void onDrawBitmapRect(const SkBitmap &bm, const SkRect *src, const SkRect &dst,
+ const SkPaint *paint, SrcRectConstraint constraint) override;
+
+ void onDrawBitmapNine(const SkBitmap &bm, const SkIRect &center,
+ const SkRect &dst, const SkPaint *paint) override;
+
+ void onDrawImage(const SkImage *image, SkScalar left, SkScalar top,
+ const SkPaint *paint) override;
+
+ void onDrawImageRect(const SkImage *image, const SkRect *src,
+ const SkRect &dst, const SkPaint *paint,
+ SrcRectConstraint constraint) override;
+
+ void onDrawImageNine(const SkImage *image, const SkIRect &center,
+ const SkRect &dst, const SkPaint *paint) override;
+
+ void onDrawVertices(VertexMode vmode, int vertexCount,
+ const SkPoint vertices[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode *xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint &paint) override;
+
+ void onDrawPatch(const SkPoint cubics[], const SkColor colors[],
+ const SkPoint texCoords[], SkXfermode *xmode,
+ const SkPaint &paint) override;
+
+ void onDrawText(const void *text, size_t byteLength, SkScalar x, SkScalar y,
+ const SkPaint &paint) override;
+
+ void onDrawPosText(const void *text, size_t byteLength, const SkPoint pos[],
+ const SkPaint &paint) override;
+
+ void onDrawPosTextH(const void *text, size_t byteLength, const SkScalar xpos[],
+ SkScalar constY, const SkPaint &paint) override;
+
+ void onDrawTextOnPath(const void *text, size_t byteLength, const SkPath &path,
+ const SkMatrix *matrix, const SkPaint &paint) override;
+
+ void onDrawTextRSXform(const void *text, size_t byteLength,
+ const SkRSXform xform[], const SkRect *cull,
+ const SkPaint &paint) override;
+
+ void onDrawTextBlob(const SkTextBlob *blob, SkScalar x,
+ SkScalar y, const SkPaint &paint) override;
+private:
+ SkShadowParams fShadowParams;
+ typedef SkPaintFilterCanvas INHERITED;
+};
+
+
+#endif
+#endif
diff --git a/gfx/skia/skia/src/utils/SkTextBox.cpp b/gfx/skia/skia/src/utils/SkTextBox.cpp
new file mode 100644
index 000000000..bc2e2217d
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkTextBox.cpp
@@ -0,0 +1,302 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTextBox.h"
+#include "SkUtils.h"
+
+static inline int is_ws(int c)
+{
+ return !((c - 1) >> 5);
+}
+
+static size_t linebreak(const char text[], const char stop[],
+ const SkPaint& paint, SkScalar margin,
+ size_t* trailing = nullptr)
+{
+ size_t lengthBreak = paint.breakText(text, stop - text, margin);
+
+ //Check for white space or line breakers before the lengthBreak
+ const char* start = text;
+ const char* word_start = text;
+ int prevWS = true;
+ if (trailing) {
+ *trailing = 0;
+ }
+
+ while (text < stop) {
+ const char* prevText = text;
+ SkUnichar uni = SkUTF8_NextUnichar(&text);
+ int currWS = is_ws(uni);
+
+ if (!currWS && prevWS) {
+ word_start = prevText;
+ }
+ prevWS = currWS;
+
+ if (text > start + lengthBreak) {
+ if (currWS) {
+ // eat the rest of the whitespace
+ while (text < stop && is_ws(SkUTF8_ToUnichar(text))) {
+ text += SkUTF8_CountUTF8Bytes(text);
+ }
+ if (trailing) {
+ *trailing = text - prevText;
+ }
+ } else {
+ // backup until a whitespace (or 1 char)
+ if (word_start == start) {
+ if (prevText > start) {
+ text = prevText;
+ }
+ } else {
+ text = word_start;
+ }
+ }
+ break;
+ }
+
+ if ('\n' == uni) {
+ size_t ret = text - start;
+ size_t lineBreakSize = 1;
+ if (text < stop) {
+ uni = SkUTF8_NextUnichar(&text);
+ if ('\r' == uni) {
+ ret = text - start;
+ ++lineBreakSize;
+ }
+ }
+ if (trailing) {
+ *trailing = lineBreakSize;
+ }
+ return ret;
+ }
+
+ if ('\r' == uni) {
+ size_t ret = text - start;
+ size_t lineBreakSize = 1;
+ if (text < stop) {
+ uni = SkUTF8_NextUnichar(&text);
+ if ('\n' == uni) {
+ ret = text - start;
+ ++lineBreakSize;
+ }
+ }
+ if (trailing) {
+ *trailing = lineBreakSize;
+ }
+ return ret;
+ }
+ }
+
+ return text - start;
+}
+
+int SkTextLineBreaker::CountLines(const char text[], size_t len, const SkPaint& paint, SkScalar width)
+{
+ const char* stop = text + len;
+ int count = 0;
+
+ if (width > 0)
+ {
+ do {
+ count += 1;
+ text += linebreak(text, stop, paint, width);
+ } while (text < stop);
+ }
+ return count;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+SkTextBox::SkTextBox()
+{
+ fBox.setEmpty();
+ fSpacingMul = SK_Scalar1;
+ fSpacingAdd = 0;
+ fMode = kLineBreak_Mode;
+ fSpacingAlign = kStart_SpacingAlign;
+}
+
+void SkTextBox::setMode(Mode mode)
+{
+ SkASSERT((unsigned)mode < kModeCount);
+ fMode = SkToU8(mode);
+}
+
+void SkTextBox::setSpacingAlign(SpacingAlign align)
+{
+ SkASSERT((unsigned)align < kSpacingAlignCount);
+ fSpacingAlign = SkToU8(align);
+}
+
+void SkTextBox::getBox(SkRect* box) const
+{
+ if (box)
+ *box = fBox;
+}
+
+void SkTextBox::setBox(const SkRect& box)
+{
+ fBox = box;
+}
+
+void SkTextBox::setBox(SkScalar left, SkScalar top, SkScalar right, SkScalar bottom)
+{
+ fBox.set(left, top, right, bottom);
+}
+
+void SkTextBox::getSpacing(SkScalar* mul, SkScalar* add) const
+{
+ if (mul)
+ *mul = fSpacingMul;
+ if (add)
+ *add = fSpacingAdd;
+}
+
+void SkTextBox::setSpacing(SkScalar mul, SkScalar add)
+{
+ fSpacingMul = mul;
+ fSpacingAdd = add;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+
+SkScalar SkTextBox::visit(Visitor& visitor, const char text[], size_t len,
+ const SkPaint& paint) const {
+ SkScalar marginWidth = fBox.width();
+
+ if (marginWidth <= 0 || len == 0) {
+ return fBox.top();
+ }
+
+ const char* textStop = text + len;
+
+ SkScalar x, y, scaledSpacing, height, fontHeight;
+ SkPaint::FontMetrics metrics;
+
+ switch (paint.getTextAlign()) {
+ case SkPaint::kLeft_Align:
+ x = 0;
+ break;
+ case SkPaint::kCenter_Align:
+ x = SkScalarHalf(marginWidth);
+ break;
+ default:
+ x = marginWidth;
+ break;
+ }
+ x += fBox.fLeft;
+
+ fontHeight = paint.getFontMetrics(&metrics);
+ scaledSpacing = SkScalarMul(fontHeight, fSpacingMul) + fSpacingAdd;
+ height = fBox.height();
+
+ // compute Y position for first line
+ {
+ SkScalar textHeight = fontHeight;
+
+ if (fMode == kLineBreak_Mode && fSpacingAlign != kStart_SpacingAlign) {
+ int count = SkTextLineBreaker::CountLines(text, textStop - text, paint, marginWidth);
+ SkASSERT(count > 0);
+ textHeight += scaledSpacing * (count - 1);
+ }
+
+ switch (fSpacingAlign) {
+ case kStart_SpacingAlign:
+ y = 0;
+ break;
+ case kCenter_SpacingAlign:
+ y = SkScalarHalf(height - textHeight);
+ break;
+ default:
+ SkASSERT(fSpacingAlign == kEnd_SpacingAlign);
+ y = height - textHeight;
+ break;
+ }
+ y += fBox.fTop - metrics.fAscent;
+ }
+
+ for (;;) {
+ size_t trailing;
+ len = linebreak(text, textStop, paint, marginWidth, &trailing);
+ if (y + metrics.fDescent + metrics.fLeading > 0) {
+ visitor(text, len - trailing, x, y, paint);
+ }
+ text += len;
+ if (text >= textStop) {
+ break;
+ }
+ y += scaledSpacing;
+ if (y + metrics.fAscent >= fBox.fBottom) {
+ break;
+ }
+ }
+ return y + metrics.fDescent + metrics.fLeading;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class CanvasVisitor : public SkTextBox::Visitor {
+ SkCanvas* fCanvas;
+public:
+ CanvasVisitor(SkCanvas* canvas) : fCanvas(canvas) {}
+
+ void operator()(const char text[], size_t length, SkScalar x, SkScalar y,
+ const SkPaint& paint) override {
+ fCanvas->drawText(text, length, x, y, paint);
+ }
+};
+
+void SkTextBox::setText(const char text[], size_t len, const SkPaint& paint) {
+ fText = text;
+ fLen = len;
+ fPaint = &paint;
+}
+
+void SkTextBox::draw(SkCanvas* canvas, const char text[], size_t len, const SkPaint& paint) {
+ CanvasVisitor sink(canvas);
+ this->visit(sink, text, len, paint);
+}
+
+void SkTextBox::draw(SkCanvas* canvas) {
+ this->draw(canvas, fText, fLen, *fPaint);
+}
+
+int SkTextBox::countLines() const {
+ return SkTextLineBreaker::CountLines(fText, fLen, *fPaint, fBox.width());
+}
+
+SkScalar SkTextBox::getTextHeight() const {
+ SkScalar spacing = SkScalarMul(fPaint->getTextSize(), fSpacingMul) + fSpacingAdd;
+ return this->countLines() * spacing;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkTextBlob.h"
+
+class TextBlobVisitor : public SkTextBox::Visitor {
+public:
+ SkTextBlobBuilder fBuilder;
+
+ void operator()(const char text[], size_t length, SkScalar x, SkScalar y,
+ const SkPaint& paint) override {
+ SkPaint p(paint);
+ p.setTextEncoding(SkPaint::kGlyphID_TextEncoding);
+ const int count = paint.countText(text, length);
+ paint.textToGlyphs(text, length, fBuilder.allocRun(p, count, x, y).glyphs);
+ }
+};
+
+sk_sp<SkTextBlob> SkTextBox::snapshotTextBlob(SkScalar* computedBottom) const {
+ TextBlobVisitor visitor;
+ SkScalar newB = this->visit(visitor, fText, fLen, *fPaint);
+ if (computedBottom) {
+ *computedBottom = newB;
+ }
+ return visitor.fBuilder.make();
+}
diff --git a/gfx/skia/skia/src/utils/SkTextureCompressor.cpp b/gfx/skia/skia/src/utils/SkTextureCompressor.cpp
new file mode 100644
index 000000000..e9e4d2e82
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkTextureCompressor.cpp
@@ -0,0 +1,231 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTextureCompressor.h"
+#include "SkTextureCompressor_ASTC.h"
+#include "SkTextureCompressor_LATC.h"
+#include "SkTextureCompressor_R11EAC.h"
+
+#include "SkBitmap.h"
+#include "SkBitmapProcShader.h"
+#include "SkData.h"
+#include "SkEndian.h"
+#include "SkMathPriv.h"
+#include "SkOpts.h"
+
+#ifndef SK_IGNORE_ETC1_SUPPORT
+# include "etc1.h"
+#endif
+
+// Convert ETC1 functions to our function signatures
+static bool compress_etc1_565(uint8_t* dst, const uint8_t* src,
+ int width, int height, size_t rowBytes) {
+#ifndef SK_IGNORE_ETC1_SUPPORT
+ return 0 == etc1_encode_image(src, width, height, 2, SkToInt(rowBytes), dst);
+#else
+ return false;
+#endif
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+namespace SkTextureCompressor {
+
+void GetBlockDimensions(Format format, int* dimX, int* dimY, bool matchSpec) {
+ if (nullptr == dimX || nullptr == dimY) {
+ return;
+ }
+
+ if (!matchSpec && SkOpts::fill_block_dimensions(format, dimX, dimY)) {
+ return;
+ }
+
+ // No specialized arguments, return the dimensions as they are in the spec.
+ static const struct FormatDimensions {
+ const int fBlockSizeX;
+ const int fBlockSizeY;
+ } kFormatDimensions[kFormatCnt] = {
+ { 4, 4 }, // kLATC_Format
+ { 4, 4 }, // kR11_EAC_Format
+ { 4, 4 }, // kETC1_Format
+ { 4, 4 }, // kASTC_4x4_Format
+ { 5, 4 }, // kASTC_5x4_Format
+ { 5, 5 }, // kASTC_5x5_Format
+ { 6, 5 }, // kASTC_6x5_Format
+ { 6, 6 }, // kASTC_6x6_Format
+ { 8, 5 }, // kASTC_8x5_Format
+ { 8, 6 }, // kASTC_8x6_Format
+ { 8, 8 }, // kASTC_8x8_Format
+ { 10, 5 }, // kASTC_10x5_Format
+ { 10, 6 }, // kASTC_10x6_Format
+ { 10, 8 }, // kASTC_10x8_Format
+ { 10, 10 }, // kASTC_10x10_Format
+ { 12, 10 }, // kASTC_12x10_Format
+ { 12, 12 }, // kASTC_12x12_Format
+ };
+
+ *dimX = kFormatDimensions[format].fBlockSizeX;
+ *dimY = kFormatDimensions[format].fBlockSizeY;
+}
+
+int GetCompressedDataSize(Format fmt, int width, int height) {
+ int dimX, dimY;
+ GetBlockDimensions(fmt, &dimX, &dimY, true);
+
+ int encodedBlockSize = 0;
+
+ switch (fmt) {
+ // These formats are 64 bits per 4x4 block.
+ case kLATC_Format:
+ case kR11_EAC_Format:
+ case kETC1_Format:
+ encodedBlockSize = 8;
+ break;
+
+ // This format is 128 bits.
+ case kASTC_4x4_Format:
+ case kASTC_5x4_Format:
+ case kASTC_5x5_Format:
+ case kASTC_6x5_Format:
+ case kASTC_6x6_Format:
+ case kASTC_8x5_Format:
+ case kASTC_8x6_Format:
+ case kASTC_8x8_Format:
+ case kASTC_10x5_Format:
+ case kASTC_10x6_Format:
+ case kASTC_10x8_Format:
+ case kASTC_10x10_Format:
+ case kASTC_12x10_Format:
+ case kASTC_12x12_Format:
+ encodedBlockSize = 16;
+ break;
+
+ default:
+ SkFAIL("Unknown compressed format!");
+ return -1;
+ }
+
+ if(((width % dimX) == 0) && ((height % dimY) == 0)) {
+ const int blocksX = width / dimX;
+ const int blocksY = height / dimY;
+
+ return blocksX * blocksY * encodedBlockSize;
+ }
+
+ return -1;
+}
+
+bool CompressBufferToFormat(uint8_t* dst, const uint8_t* src, SkColorType srcColorType,
+ int width, int height, size_t rowBytes, Format format) {
+ SkOpts::TextureCompressor proc = SkOpts::texture_compressor(srcColorType, format);
+ if (proc && proc(dst, src, width, height, rowBytes)) {
+ return true;
+ }
+
+ switch (srcColorType) {
+ case kAlpha_8_SkColorType:
+ if (format == kLATC_Format) { proc = CompressA8ToLATC; }
+ if (format == kR11_EAC_Format) { proc = CompressA8ToR11EAC; }
+ if (format == kASTC_12x12_Format) { proc = CompressA8To12x12ASTC; }
+ break;
+ case kRGB_565_SkColorType:
+ if (format == kETC1_Format) { proc = compress_etc1_565; }
+ break;
+ default:
+ break;
+ }
+ if (proc && proc(dst, src, width, height, rowBytes)) {
+ return true;
+ }
+
+ return false;
+}
+
+sk_sp<SkData> CompressBitmapToFormat(const SkPixmap& pixmap, Format format) {
+ int compressedDataSize = GetCompressedDataSize(format, pixmap.width(), pixmap.height());
+ if (compressedDataSize < 0) {
+ return nullptr;
+ }
+
+ const uint8_t* src = reinterpret_cast<const uint8_t*>(pixmap.addr());
+ sk_sp<SkData> dst(SkData::MakeUninitialized(compressedDataSize));
+
+ if (!CompressBufferToFormat((uint8_t*)dst->writable_data(), src, pixmap.colorType(),
+ pixmap.width(), pixmap.height(), pixmap.rowBytes(), format)) {
+ return nullptr;
+ }
+ return dst;
+}
+
+SkBlitter* CreateBlitterForFormat(int width, int height, void* compressedBuffer,
+ SkTBlitterAllocator *allocator, Format format) {
+ switch(format) {
+ case kLATC_Format:
+ return CreateLATCBlitter(width, height, compressedBuffer, allocator);
+
+ case kR11_EAC_Format:
+ return CreateR11EACBlitter(width, height, compressedBuffer, allocator);
+
+ case kASTC_12x12_Format:
+ return CreateASTCBlitter(width, height, compressedBuffer, allocator);
+
+ default:
+ return nullptr;
+ }
+
+ return nullptr;
+}
+
+bool DecompressBufferFromFormat(uint8_t* dst, int dstRowBytes, const uint8_t* src,
+ int width, int height, Format format) {
+ int dimX, dimY;
+ GetBlockDimensions(format, &dimX, &dimY, true);
+
+ if (width < 0 || ((width % dimX) != 0) || height < 0 || ((height % dimY) != 0)) {
+ return false;
+ }
+
+ switch(format) {
+ case kLATC_Format:
+ DecompressLATC(dst, dstRowBytes, src, width, height);
+ return true;
+
+ case kR11_EAC_Format:
+ DecompressR11EAC(dst, dstRowBytes, src, width, height);
+ return true;
+
+#ifndef SK_IGNORE_ETC1_SUPPORT
+ case kETC1_Format:
+ return 0 == etc1_decode_image(src, dst, width, height, 3, dstRowBytes);
+#endif
+
+ case kASTC_4x4_Format:
+ case kASTC_5x4_Format:
+ case kASTC_5x5_Format:
+ case kASTC_6x5_Format:
+ case kASTC_6x6_Format:
+ case kASTC_8x5_Format:
+ case kASTC_8x6_Format:
+ case kASTC_8x8_Format:
+ case kASTC_10x5_Format:
+ case kASTC_10x6_Format:
+ case kASTC_10x8_Format:
+ case kASTC_10x10_Format:
+ case kASTC_12x10_Format:
+ case kASTC_12x12_Format:
+ DecompressASTC(dst, dstRowBytes, src, width, height, dimX, dimY);
+ return true;
+
+ default:
+ // Do nothing...
+ break;
+ }
+
+ return false;
+}
+
+} // namespace SkTextureCompressor
diff --git a/gfx/skia/skia/src/utils/SkTextureCompressor.h b/gfx/skia/skia/src/utils/SkTextureCompressor.h
new file mode 100644
index 000000000..1ae4aef13
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkTextureCompressor.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTextureCompressor_DEFINED
+#define SkTextureCompressor_DEFINED
+
+#include "SkBitmapProcShader.h"
+#include "SkImageInfo.h"
+
+class SkBitmap;
+class SkBlitter;
+class SkData;
+
+namespace SkTextureCompressor {
+ // Various texture compression formats that we support.
+ enum Format {
+ // Alpha only formats.
+ kLATC_Format, // 4x4 blocks, (de)compresses A8
+ kR11_EAC_Format, // 4x4 blocks, (de)compresses A8
+
+ // RGB only formats
+ kETC1_Format, // 4x4 blocks, compresses RGB 565, decompresses 8-bit RGB
+ // NOTE: ETC1 supports 8-bit RGB compression, but we
+ // currently don't have any RGB8 SkColorTypes. We could
+ // support 8-bit RGBA but we would have to preprocess the
+ // bitmap to insert alphas.
+
+ // Multi-purpose formats
+ kASTC_4x4_Format, // 4x4 blocks, no compression, decompresses RGBA
+ kASTC_5x4_Format, // 5x4 blocks, no compression, decompresses RGBA
+ kASTC_5x5_Format, // 5x5 blocks, no compression, decompresses RGBA
+ kASTC_6x5_Format, // 6x5 blocks, no compression, decompresses RGBA
+ kASTC_6x6_Format, // 6x6 blocks, no compression, decompresses RGBA
+ kASTC_8x5_Format, // 8x5 blocks, no compression, decompresses RGBA
+ kASTC_8x6_Format, // 8x6 blocks, no compression, decompresses RGBA
+ kASTC_8x8_Format, // 8x8 blocks, no compression, decompresses RGBA
+ kASTC_10x5_Format, // 10x5 blocks, no compression, decompresses RGBA
+ kASTC_10x6_Format, // 10x6 blocks, no compression, decompresses RGBA
+ kASTC_10x8_Format, // 10x8 blocks, no compression, decompresses RGBA
+ kASTC_10x10_Format, // 10x10 blocks, no compression, decompresses RGBA
+ kASTC_12x10_Format, // 12x10 blocks, no compression, decompresses RGBA
+ kASTC_12x12_Format, // 12x12 blocks, compresses A8, decompresses RGBA
+
+ kLast_Format = kASTC_12x12_Format
+ };
+ static const int kFormatCnt = kLast_Format + 1;
+
+ // Returns the size of the compressed data given the width, height, and
+ // desired compression format. If the width and height are not an appropriate
+ // multiple of the block size, then this function returns an error (-1).
+ int GetCompressedDataSize(Format fmt, int width, int height);
+
+ // Returns an SkData holding a blob of compressed data that corresponds
+ // to the pixmap. If the pixmap colorType cannot be compressed using the
+ // associated format, then we return nullptr.
+ sk_sp<SkData> CompressBitmapToFormat(const SkPixmap&, Format format);
+
+ // Compresses the given src data into dst. The src data is assumed to be
+ // large enough to hold width*height pixels. The dst data is expected to
+ // be large enough to hold the compressed data according to the format.
+ bool CompressBufferToFormat(uint8_t* dst, const uint8_t* src, SkColorType srcColorType,
+ int width, int height, size_t rowBytes, Format format);
+
+ // Decompresses the given src data from the format specified into the
+ // destination buffer. The width and height of the data passed corresponds
+ // to the width and height of the uncompressed image. The destination buffer (dst)
+ // is assumed to be large enough to hold the entire decompressed image. The
+ // decompressed image colors are determined based on the passed format.
+ //
+ // Note, CompressBufferToFormat compresses A8 data into ASTC. However,
+ // general ASTC data encodes RGBA data, so that is what the decompressor
+ // operates on.
+ //
+ // Returns true if successfully decompresses the src data.
+ bool DecompressBufferFromFormat(uint8_t* dst, int dstRowBytes, const uint8_t* src,
+ int width, int height, Format format);
+
+ // Returns true if there exists a blitter for the specified format.
+ inline bool ExistsBlitterForFormat(Format format) {
+ switch (format) {
+ case kLATC_Format:
+ case kR11_EAC_Format:
+ case kASTC_12x12_Format:
+ return true;
+
+ default:
+ return false;
+ }
+ }
+
+ // Returns the blitter for the given compression format. Note, the blitter
+ // is intended to be used with the proper input. I.e. if you try to blit
+ // RGB source data into an R11 EAC texture, you're gonna have a bad time.
+ SkBlitter* CreateBlitterForFormat(int width, int height, void* compressedBuffer,
+ SkTBlitterAllocator *allocator, Format format);
+
+ // Returns the desired dimensions of the block size for the given format. These dimensions
+ // don't necessarily correspond to the specification's dimensions, since there may
+ // be specialized algorithms that operate on multiple blocks at once. If the
+ // flag 'matchSpec' is true, then the actual dimensions from the specification are
+ // returned. If the flag is false, then these dimensions reflect the appropriate operable
+ // dimensions of the compression functions.
+ void GetBlockDimensions(Format format, int* dimX, int* dimY, bool matchSpec = false);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkTextureCompressor_ASTC.cpp b/gfx/skia/skia/src/utils/SkTextureCompressor_ASTC.cpp
new file mode 100644
index 000000000..8a96b911e
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkTextureCompressor_ASTC.cpp
@@ -0,0 +1,2101 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTextureCompressor_ASTC.h"
+#include "SkTextureCompressor_Blitter.h"
+
+#include "SkBlitter.h"
+#include "SkEndian.h"
+#include "SkMathPriv.h"
+
+// This table contains the weight values for each texel. This is used in determining
+// how to convert a 12x12 grid of alpha values into a 6x5 grid of index values. Since
+// we have a 6x5 grid, that gives 30 values that we have to compute. For each index,
+// we store up to 20 different triplets of values. In order the triplets are:
+// weight, texel-x, texel-y
+// The weight value corresponds to the amount that this index contributes to the final
+// index value of the given texel. Hence, we need to reconstruct the 6x5 index grid
+// from their relative contribution to the 12x12 texel grid.
+//
+// The algorithm is something like this:
+// foreach index i:
+// total-weight = 0;
+// total-alpha = 0;
+// for w = 1 to 20:
+// weight = table[i][w*3];
+// texel-x = table[i][w*3 + 1];
+// texel-y = table[i][w*3 + 2];
+// if weight >= 0:
+// total-weight += weight;
+// total-alpha += weight * alphas[texel-x][texel-y];
+//
+// total-alpha /= total-weight;
+// index = top three bits of total-alpha
+//
+// If the associated index does not contribute to 20 different texels (e.g. it's in
+// a corner), then the extra texels are stored with -1's in the table.
+
+static const int8_t k6x5To12x12Table[30][60] = {
+{ 16, 0, 0, 9, 1, 0, 1, 2, 0, 10, 0, 1, 6, 1, 1, 1, 2, 1, 4, 0, 2, 2,
+ 1, 2, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0,
+ 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0}, // n = 20
+{ 7, 1, 0, 15, 2, 0, 10, 3, 0, 3, 4, 0, 4, 1, 1, 9, 2, 1, 6, 3, 1, 2,
+ 4, 1, 2, 1, 2, 4, 2, 2, 3, 3, 2, 1, 4, 2, -1, 0, 0, -1, 0, 0, -1, 0,
+ 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0}, // n = 20
+{ 6, 3, 0, 13, 4, 0, 12, 5, 0, 4, 6, 0, 4, 3, 1, 8, 4, 1, 8, 5, 1, 3,
+ 6, 1, 1, 3, 2, 3, 4, 2, 3, 5, 2, 1, 6, 2, -1, 0, 0, -1, 0, 0, -1, 0,
+ 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0}, // n = 20
+{ 4, 5, 0, 12, 6, 0, 13, 7, 0, 6, 8, 0, 2, 5, 1, 7, 6, 1, 8, 7, 1, 4,
+ 8, 1, 1, 5, 2, 3, 6, 2, 3, 7, 2, 2, 8, 2, -1, 0, 0, -1, 0, 0, -1, 0,
+ 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0}, // n = 20
+{ 3, 7, 0, 10, 8, 0, 15, 9, 0, 7, 10, 0, 2, 7, 1, 6, 8, 1, 9, 9, 1, 4,
+ 10, 1, 1, 7, 2, 2, 8, 2, 4, 9, 2, 2, 10, 2, -1, 0, 0, -1, 0, 0, -1, 0,
+ 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0}, // n = 20
+{ 1, 9, 0, 9, 10, 0, 16, 11, 0, 1, 9, 1, 6, 10, 1, 10, 11, 1, 2, 10, 2, 4,
+ 11, 2, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0,
+ 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0}, // n = 20
+{ 6, 0, 1, 3, 1, 1, 12, 0, 2, 7, 1, 2, 1, 2, 2, 15, 0, 3, 8, 1, 3, 1,
+ 2, 3, 9, 0, 4, 5, 1, 4, 1, 2, 4, 3, 0, 5, 2, 1, 5, -1, 0, 0, -1, 0,
+ 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0}, // n = 20
+{ 3, 1, 1, 6, 2, 1, 4, 3, 1, 1, 4, 1, 5, 1, 2, 11, 2, 2, 7, 3, 2, 2,
+ 4, 2, 7, 1, 3, 14, 2, 3, 9, 3, 3, 3, 4, 3, 4, 1, 4, 8, 2, 4, 6, 3,
+ 4, 2, 4, 4, 1, 1, 5, 3, 2, 5, 2, 3, 5, 1, 4, 5}, // n = 20
+{ 2, 3, 1, 5, 4, 1, 4, 5, 1, 1, 6, 1, 5, 3, 2, 10, 4, 2, 9, 5, 2, 3,
+ 6, 2, 6, 3, 3, 12, 4, 3, 11, 5, 3, 4, 6, 3, 3, 3, 4, 7, 4, 4, 7, 5,
+ 4, 2, 6, 4, 1, 3, 5, 2, 4, 5, 2, 5, 5, 1, 6, 5}, // n = 20
+{ 2, 5, 1, 5, 6, 1, 5, 7, 1, 2, 8, 1, 3, 5, 2, 9, 6, 2, 10, 7, 2, 4,
+ 8, 2, 4, 5, 3, 11, 6, 3, 12, 7, 3, 6, 8, 3, 2, 5, 4, 7, 6, 4, 7, 7,
+ 4, 3, 8, 4, 1, 5, 5, 2, 6, 5, 2, 7, 5, 1, 8, 5}, // n = 20
+{ 1, 7, 1, 4, 8, 1, 6, 9, 1, 3, 10, 1, 2, 7, 2, 8, 8, 2, 11, 9, 2, 5,
+ 10, 2, 3, 7, 3, 9, 8, 3, 14, 9, 3, 7, 10, 3, 2, 7, 4, 6, 8, 4, 8, 9,
+ 4, 4, 10, 4, 1, 7, 5, 2, 8, 5, 3, 9, 5, 1, 10, 5}, // n = 20
+{ 3, 10, 1, 6, 11, 1, 1, 9, 2, 7, 10, 2, 12, 11, 2, 1, 9, 3, 8, 10, 3, 15,
+ 11, 3, 1, 9, 4, 5, 10, 4, 9, 11, 4, 2, 10, 5, 3, 11, 5, -1, 0, 0, -1, 0,
+ 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0}, // n = 20
+{ 1, 0, 3, 1, 1, 3, 7, 0, 4, 4, 1, 4, 13, 0, 5, 7, 1, 5, 1, 2, 5, 13,
+ 0, 6, 7, 1, 6, 1, 2, 6, 7, 0, 7, 4, 1, 7, 1, 0, 8, 1, 1, 8, -1, 0,
+ 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0}, // n = 20
+{ 1, 2, 3, 1, 3, 3, 3, 1, 4, 7, 2, 4, 4, 3, 4, 1, 4, 4, 6, 1, 5, 12,
+ 2, 5, 8, 3, 5, 2, 4, 5, 6, 1, 6, 12, 2, 6, 8, 3, 6, 2, 4, 6, 3, 1,
+ 7, 7, 2, 7, 4, 3, 7, 1, 4, 7, 1, 2, 8, 1, 3, 8}, // n = 20
+{ 1, 4, 3, 1, 5, 3, 3, 3, 4, 6, 4, 4, 5, 5, 4, 2, 6, 4, 5, 3, 5, 11,
+ 4, 5, 10, 5, 5, 3, 6, 5, 5, 3, 6, 11, 4, 6, 10, 5, 6, 3, 6, 6, 3, 3,
+ 7, 6, 4, 7, 5, 5, 7, 2, 6, 7, 1, 4, 8, 1, 5, 8}, // n = 20
+{ 1, 6, 3, 1, 7, 3, 2, 5, 4, 5, 6, 4, 6, 7, 4, 3, 8, 4, 3, 5, 5, 10,
+ 6, 5, 11, 7, 5, 5, 8, 5, 3, 5, 6, 10, 6, 6, 11, 7, 6, 5, 8, 6, 2, 5,
+ 7, 5, 6, 7, 6, 7, 7, 3, 8, 7, 1, 6, 8, 1, 7, 8}, // n = 20
+{ 1, 8, 3, 1, 9, 3, 1, 7, 4, 4, 8, 4, 7, 9, 4, 3, 10, 4, 2, 7, 5, 8,
+ 8, 5, 12, 9, 5, 6, 10, 5, 2, 7, 6, 8, 8, 6, 12, 9, 6, 6, 10, 6, 1, 7,
+ 7, 4, 8, 7, 7, 9, 7, 3, 10, 7, 1, 8, 8, 1, 9, 8}, // n = 20
+{ 1, 10, 3, 1, 11, 3, 4, 10, 4, 7, 11, 4, 1, 9, 5, 7, 10, 5, 13, 11, 5, 1,
+ 9, 6, 7, 10, 6, 13, 11, 6, 4, 10, 7, 7, 11, 7, 1, 10, 8, 1, 11, 8, -1, 0,
+ 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0}, // n = 20
+{ 3, 0, 6, 2, 1, 6, 9, 0, 7, 5, 1, 7, 1, 2, 7, 15, 0, 8, 8, 1, 8, 1,
+ 2, 8, 12, 0, 9, 7, 1, 9, 1, 2, 9, 6, 0, 10, 3, 1, 10, -1, 0, 0, -1, 0,
+ 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0}, // n = 20
+{ 1, 1, 6, 3, 2, 6, 2, 3, 6, 1, 4, 6, 4, 1, 7, 8, 2, 7, 6, 3, 7, 2,
+ 4, 7, 7, 1, 8, 14, 2, 8, 9, 3, 8, 3, 4, 8, 5, 1, 9, 11, 2, 9, 8, 3,
+ 9, 2, 4, 9, 3, 1, 10, 6, 2, 10, 4, 3, 10, 1, 4, 10}, // n = 20
+{ 1, 3, 6, 2, 4, 6, 2, 5, 6, 1, 6, 6, 3, 3, 7, 7, 4, 7, 7, 5, 7, 2,
+ 6, 7, 6, 3, 8, 12, 4, 8, 11, 5, 8, 4, 6, 8, 4, 3, 9, 10, 4, 9, 9, 5,
+ 9, 3, 6, 9, 2, 3, 10, 5, 4, 10, 5, 5, 10, 2, 6, 10}, // n = 20
+{ 1, 5, 6, 2, 6, 6, 2, 7, 6, 1, 8, 6, 2, 5, 7, 7, 6, 7, 7, 7, 7, 3,
+ 8, 7, 4, 5, 8, 11, 6, 8, 12, 7, 8, 6, 8, 8, 3, 5, 9, 9, 6, 9, 10, 7,
+ 9, 5, 8, 9, 1, 5, 10, 4, 6, 10, 5, 7, 10, 2, 8, 10}, // n = 20
+{ 1, 7, 6, 2, 8, 6, 3, 9, 6, 1, 10, 6, 2, 7, 7, 6, 8, 7, 8, 9, 7, 4,
+ 10, 7, 3, 7, 8, 9, 8, 8, 14, 9, 8, 7, 10, 8, 2, 7, 9, 7, 8, 9, 11, 9,
+ 9, 5, 10, 9, 1, 7, 10, 4, 8, 10, 6, 9, 10, 3, 10, 10}, // n = 20
+{ 2, 10, 6, 3, 11, 6, 1, 9, 7, 5, 10, 7, 9, 11, 7, 1, 9, 8, 8, 10, 8, 15,
+ 11, 8, 1, 9, 9, 7, 10, 9, 12, 11, 9, 3, 10, 10, 6, 11, 10, -1, 0, 0, -1, 0,
+ 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0}, // n = 20
+{ 4, 0, 9, 2, 1, 9, 10, 0, 10, 6, 1, 10, 1, 2, 10, 16, 0, 11, 9, 1, 11, 1,
+ 2, 11, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0,
+ 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0}, // n = 20
+{ 2, 1, 9, 4, 2, 9, 2, 3, 9, 1, 4, 9, 4, 1, 10, 9, 2, 10, 6, 3, 10, 2,
+ 4, 10, 7, 1, 11, 15, 2, 11, 10, 3, 11, 3, 4, 11, -1, 0, 0, -1, 0, 0, -1, 0,
+ 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0}, // n = 20
+{ 2, 3, 9, 3, 4, 9, 3, 5, 9, 1, 6, 9, 4, 3, 10, 8, 4, 10, 7, 5, 10, 2,
+ 6, 10, 6, 3, 11, 13, 4, 11, 12, 5, 11, 4, 6, 11, -1, 0, 0, -1, 0, 0, -1, 0,
+ 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0}, // n = 20
+{ 1, 5, 9, 3, 6, 9, 3, 7, 9, 1, 8, 9, 3, 5, 10, 8, 6, 10, 8, 7, 10, 4,
+ 8, 10, 4, 5, 11, 12, 6, 11, 13, 7, 11, 6, 8, 11, -1, 0, 0, -1, 0, 0, -1, 0,
+ 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0}, // n = 20
+{ 1, 7, 9, 3, 8, 9, 4, 9, 9, 2, 10, 9, 2, 7, 10, 6, 8, 10, 9, 9, 10, 4,
+ 10, 10, 3, 7, 11, 10, 8, 11, 15, 9, 11, 7, 10, 11, -1, 0, 0, -1, 0, 0, -1, 0,
+ 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0}, // n = 20
+{ 2, 10, 9, 4, 11, 9, 1, 9, 10, 6, 10, 10, 10, 11, 10, 1, 9, 11, 9, 10, 11, 16,
+ 11, 11, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0,
+ 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0} // n = 20
+};
+
+// Returns the alpha value of a texel at position (x, y) from src.
+// (x, y) are assumed to be in the range [0, 12).
+inline uint8_t GetAlpha(const uint8_t *src, size_t rowBytes, int x, int y) {
+ SkASSERT(x >= 0 && x < 12);
+ SkASSERT(y >= 0 && y < 12);
+ SkASSERT(rowBytes >= 12);
+ return *(src + y*rowBytes + x);
+}
+
+inline uint8_t GetAlphaTranspose(const uint8_t *src, size_t rowBytes, int x, int y) {
+ return GetAlpha(src, rowBytes, y, x);
+}
+
+// Output the 16 bytes stored in top and bottom and advance the pointer. The bytes
+// are stored as the integers are represented in memory, so they should be swapped
+// if necessary.
+static inline void send_packing(uint8_t** dst, const uint64_t top, const uint64_t bottom) {
+ uint64_t* dst64 = reinterpret_cast<uint64_t*>(*dst);
+ dst64[0] = top;
+ dst64[1] = bottom;
+ *dst += 16;
+}
+
+// Compresses an ASTC block, by looking up the proper contributions from
+// k6x5To12x12Table and computing an index from the associated values.
+typedef uint8_t (*GetAlphaProc)(const uint8_t* src, size_t rowBytes, int x, int y);
+
+template<GetAlphaProc getAlphaProc>
+static void compress_a8_astc_block(uint8_t** dst, const uint8_t* src, size_t rowBytes) {
+ // Check for single color
+ bool constant = true;
+ const uint32_t firstInt = *(reinterpret_cast<const uint32_t*>(src));
+ for (int i = 0; i < 12; ++i) {
+ const uint32_t *rowInt = reinterpret_cast<const uint32_t *>(src + i*rowBytes);
+ constant = constant && (rowInt[0] == firstInt);
+ constant = constant && (rowInt[1] == firstInt);
+ constant = constant && (rowInt[2] == firstInt);
+ }
+
+ if (constant) {
+ if (0 == firstInt) {
+ // All of the indices are set to zero, and the colors are
+ // v0 = 0, v1 = 255, so everything will be transparent.
+ send_packing(dst, SkTEndian_SwapLE64(0x0000000001FE000173ULL), 0);
+ return;
+ } else if (0xFFFFFFFF == firstInt) {
+ // All of the indices are set to zero, and the colors are
+ // v0 = 255, v1 = 0, so everything will be opaque.
+ send_packing(dst, SkTEndian_SwapLE64(0x000000000001FE0173ULL), 0);
+ return;
+ }
+ }
+
+ uint8_t indices[30]; // 6x5 index grid
+ for (int idx = 0; idx < 30; ++idx) {
+ int weightTot = 0;
+ int alphaTot = 0;
+ for (int w = 0; w < 20; ++w) {
+ const int8_t weight = k6x5To12x12Table[idx][w*3];
+ if (weight > 0) {
+ const int x = k6x5To12x12Table[idx][w*3 + 1];
+ const int y = k6x5To12x12Table[idx][w*3 + 2];
+ weightTot += weight;
+ alphaTot += weight * getAlphaProc(src, rowBytes, x, y);
+ } else {
+ // In our table, not every entry has 20 weights, and all
+ // of them are nonzero. Once we hit a negative weight, we
+ // know that all of the other weights are not valid either.
+ break;
+ }
+ }
+
+ indices[idx] = (alphaTot / weightTot) >> 5;
+ }
+
+ // Pack indices... The ASTC block layout is fairly complicated. An extensive
+ // description can be found here:
+ // https://www.opengl.org/registry/specs/KHR/texture_compression_astc_hdr.txt
+ //
+ // Here is a summary of the options that we've chosen:
+ // 1. Block mode: 0b00101110011
+ // - 6x5 texel grid
+ // - Single plane
+ // - Low-precision index values
+ // - Index range 0-7 (three bits per index)
+ // 2. Partitions: 0b00
+ // - One partition
+ // 3. Color Endpoint Mode: 0b0000
+ // - Direct luminance -- e0=(v0,v0,v0,0xFF); e1=(v1,v1,v1,0xFF);
+ // 4. 8-bit endpoints:
+ // v0 = 0, v1 = 255
+ //
+ // The rest of the block contains the 30 index values from before, which
+ // are currently stored in the indices variable.
+
+ uint64_t top = 0x0000000001FE000173ULL;
+ uint64_t bottom = 0;
+
+ for (int idx = 0; idx <= 20; ++idx) {
+ const uint8_t index = indices[idx];
+ bottom |= static_cast<uint64_t>(index) << (61-(idx*3));
+ }
+
+ // index 21 straddles top and bottom
+ {
+ const uint8_t index = indices[21];
+ bottom |= index & 1;
+ top |= static_cast<uint64_t>((index >> 2) | (index & 2)) << 62;
+ }
+
+ for (int idx = 22; idx < 30; ++idx) {
+ const uint8_t index = indices[idx];
+ top |= static_cast<uint64_t>(index) << (59-(idx-22)*3);
+ }
+
+ // Reverse each 3-bit index since indices are read in reverse order...
+ uint64_t t = (bottom ^ (bottom >> 2)) & 0x2492492492492492ULL;
+ bottom = bottom ^ t ^ (t << 2);
+
+ t = (top ^ (top >> 2)) & 0x0924924000000000ULL;
+ top = top ^ t ^ (t << 2);
+
+ send_packing(dst, SkEndian_SwapLE64(top), SkEndian_SwapLE64(bottom));
+}
+
+inline void CompressA8ASTCBlockVertical(uint8_t* dst, const uint8_t* src) {
+ compress_a8_astc_block<GetAlphaTranspose>(&dst, src, 12);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// ASTC Decoder
+//
+// Full details available in the spec:
+// http://www.khronos.org/registry/gles/extensions/OES/OES_texture_compression_astc.txt
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// Enable this to assert whenever a decoded block has invalid ASTC values. Otherwise,
+// each invalid block will result in a disgusting magenta color.
+#define ASSERT_ASTC_DECODE_ERROR 0
+
+// Reverse 64-bit integer taken from TAOCP 4a, although it's better
+// documented at this site:
+// http://matthewarcus.wordpress.com/2012/11/18/reversing-a-64-bit-word/
+
+template <typename T, T m, int k>
+static inline T swap_bits(T p) {
+ T q = ((p>>k)^p) & m;
+ return p^q^(q<<k);
+}
+
+static inline uint64_t reverse64(uint64_t n) {
+ static const uint64_t m0 = 0x5555555555555555ULL;
+ static const uint64_t m1 = 0x0300c0303030c303ULL;
+ static const uint64_t m2 = 0x00c0300c03f0003fULL;
+ static const uint64_t m3 = 0x00000ffc00003fffULL;
+ n = ((n>>1)&m0) | (n&m0)<<1;
+ n = swap_bits<uint64_t, m1, 4>(n);
+ n = swap_bits<uint64_t, m2, 8>(n);
+ n = swap_bits<uint64_t, m3, 20>(n);
+ n = (n >> 34) | (n << 30);
+ return n;
+}
+
+// An ASTC block is 128 bits. We represent it as two 64-bit integers in order
+// to efficiently operate on the block using bitwise operations.
+struct ASTCBlock {
+ uint64_t fLow;
+ uint64_t fHigh;
+
+ // Reverses the bits of an ASTC block, making the LSB of the
+ // 128 bit block the MSB.
+ inline void reverse() {
+ const uint64_t newLow = reverse64(this->fHigh);
+ this->fHigh = reverse64(this->fLow);
+ this->fLow = newLow;
+ }
+};
+
+// Writes the given color to every pixel in the block. This is used by void-extent
+// blocks (a special constant-color encoding of a block) and by the error function.
+static inline void write_constant_color(uint8_t* dst, int blockDimX, int blockDimY,
+ int dstRowBytes, SkColor color) {
+ for (int y = 0; y < blockDimY; ++y) {
+ SkColor *dstColors = reinterpret_cast<SkColor*>(dst);
+ for (int x = 0; x < blockDimX; ++x) {
+ dstColors[x] = color;
+ }
+ dst += dstRowBytes;
+ }
+}
+
+// Sets the entire block to the ASTC "error" color, a disgusting magenta
+// that's not supposed to appear in natural images.
+static inline void write_error_color(uint8_t* dst, int blockDimX, int blockDimY,
+ int dstRowBytes) {
+ static const SkColor kASTCErrorColor = SkColorSetRGB(0xFF, 0, 0xFF);
+
+#if ASSERT_ASTC_DECODE_ERROR
+ SkDEBUGFAIL("ASTC decoding error!\n");
+#endif
+
+ write_constant_color(dst, blockDimX, blockDimY, dstRowBytes, kASTCErrorColor);
+}
+
+// Reads up to 64 bits of the ASTC block starting from bit
+// 'from' and going up to but not including bit 'to'. 'from' starts
+// counting from the LSB, counting up to the MSB. Returns -1 on
+// error.
+static uint64_t read_astc_bits(const ASTCBlock &block, int from, int to) {
+ SkASSERT(0 <= from && from <= 128);
+ SkASSERT(0 <= to && to <= 128);
+
+ const int nBits = to - from;
+ if (0 == nBits) {
+ return 0;
+ }
+
+ if (nBits < 0 || 64 <= nBits) {
+ SkDEBUGFAIL("ASTC -- shouldn't read more than 64 bits");
+ return -1;
+ }
+
+ // Remember, the 'to' bit isn't read.
+ uint64_t result = 0;
+ if (to <= 64) {
+ // All desired bits are in the low 64-bits.
+ result = (block.fLow >> from) & ((1ULL << nBits) - 1);
+ } else if (from >= 64) {
+ // All desired bits are in the high 64-bits.
+ result = (block.fHigh >> (from - 64)) & ((1ULL << nBits) - 1);
+ } else {
+ // from < 64 && to > 64
+ SkASSERT(nBits > (64 - from));
+ const int nLow = 64 - from;
+ const int nHigh = nBits - nLow;
+ result =
+ ((block.fLow >> from) & ((1ULL << nLow) - 1)) |
+ ((block.fHigh & ((1ULL << nHigh) - 1)) << nLow);
+ }
+
+ return result;
+}
+
+// Returns the number of bits needed to represent a number
+// in the given power-of-two range (excluding the power of two itself).
+static inline int bits_for_range(int x) {
+ SkASSERT(SkIsPow2(x));
+ SkASSERT(0 != x);
+ // Since we know it's a power of two, there should only be one bit set,
+ // meaning the number of trailing zeros is 31 minus the number of leading
+ // zeros.
+ return 31 - SkCLZ(x);
+}
+
+// Clamps an integer to the range [0, 255]
+static inline int clamp_byte(int x) {
+ return SkClampMax(x, 255);
+}
+
+// Helper function defined in the ASTC spec, section C.2.14
+// It transfers a few bits of precision from one value to another.
+static inline void bit_transfer_signed(int *a, int *b) {
+ *b >>= 1;
+ *b |= *a & 0x80;
+ *a >>= 1;
+ *a &= 0x3F;
+ if ( (*a & 0x20) != 0 ) {
+ *a -= 0x40;
+ }
+}
+
+// Helper function defined in the ASTC spec, section C.2.14
+// It uses the value in the blue channel to tint the red and green
+static inline SkColor blue_contract(int a, int r, int g, int b) {
+ return SkColorSetARGB(a, (r + b) >> 1, (g + b) >> 1, b);
+}
+
+// Helper function that decodes two colors from eight values. If isRGB is true,
+// then the pointer 'v' contains six values and the last two are considered to be
+// 0xFF. If isRGB is false, then all eight values come from the pointer 'v'. This
+// corresponds to the decode procedure for the following endpoint modes:
+// kLDR_RGB_Direct_ColorEndpointMode
+// kLDR_RGBA_Direct_ColorEndpointMode
+static inline void decode_rgba_direct(const int *v, SkColor *endpoints, bool isRGB) {
+
+ int v6 = 0xFF;
+ int v7 = 0xFF;
+ if (!isRGB) {
+ v6 = v[6];
+ v7 = v[7];
+ }
+
+ const int s0 = v[0] + v[2] + v[4];
+ const int s1 = v[1] + v[3] + v[5];
+
+ if (s1 >= s0) {
+ endpoints[0] = SkColorSetARGB(v6, v[0], v[2], v[4]);
+ endpoints[1] = SkColorSetARGB(v7, v[1], v[3], v[5]);
+ } else {
+ endpoints[0] = blue_contract(v7, v[1], v[3], v[5]);
+ endpoints[1] = blue_contract(v6, v[0], v[2], v[4]);
+ }
+}
+
+// Helper function that decodes two colors from six values. If isRGB is true,
+// then the pointer 'v' contains four values and the last two are considered to be
+// 0xFF. If isRGB is false, then all six values come from the pointer 'v'. This
+// corresponds to the decode procedure for the following endpoint modes:
+// kLDR_RGB_BaseScale_ColorEndpointMode
+// kLDR_RGB_BaseScaleWithAlpha_ColorEndpointMode
+static inline void decode_rgba_basescale(const int *v, SkColor *endpoints, bool isRGB) {
+
+ int v4 = 0xFF;
+ int v5 = 0xFF;
+ if (!isRGB) {
+ v4 = v[4];
+ v5 = v[5];
+ }
+
+ endpoints[0] = SkColorSetARGB(v4,
+ (v[0]*v[3]) >> 8,
+ (v[1]*v[3]) >> 8,
+ (v[2]*v[3]) >> 8);
+ endpoints[1] = SkColorSetARGB(v5, v[0], v[1], v[2]);
+}
+
+// Helper function that decodes two colors from eight values. If isRGB is true,
+// then the pointer 'v' contains six values and the last two are considered to be
+// 0xFF. If isRGB is false, then all eight values come from the pointer 'v'. This
+// corresponds to the decode procedure for the following endpoint modes:
+// kLDR_RGB_BaseOffset_ColorEndpointMode
+// kLDR_RGBA_BaseOffset_ColorEndpointMode
+//
+// If isRGB is true, then treat this as if v6 and v7 are meant to encode full alpha values.
+static inline void decode_rgba_baseoffset(const int *v, SkColor *endpoints, bool isRGB) {
+ int v0 = v[0];
+ int v1 = v[1];
+ int v2 = v[2];
+ int v3 = v[3];
+ int v4 = v[4];
+ int v5 = v[5];
+ int v6 = isRGB ? 0xFF : v[6];
+ // The 0 is here because this is an offset, not a direct value
+ int v7 = isRGB ? 0 : v[7];
+
+ bit_transfer_signed(&v1, &v0);
+ bit_transfer_signed(&v3, &v2);
+ bit_transfer_signed(&v5, &v4);
+ if (!isRGB) {
+ bit_transfer_signed(&v7, &v6);
+ }
+
+ int c[2][4];
+ if ((v1 + v3 + v5) >= 0) {
+ c[0][0] = v6;
+ c[0][1] = v0;
+ c[0][2] = v2;
+ c[0][3] = v4;
+
+ c[1][0] = v6 + v7;
+ c[1][1] = v0 + v1;
+ c[1][2] = v2 + v3;
+ c[1][3] = v4 + v5;
+ } else {
+ c[0][0] = v6 + v7;
+ c[0][1] = (v0 + v1 + v4 + v5) >> 1;
+ c[0][2] = (v2 + v3 + v4 + v5) >> 1;
+ c[0][3] = v4 + v5;
+
+ c[1][0] = v6;
+ c[1][1] = (v0 + v4) >> 1;
+ c[1][2] = (v2 + v4) >> 1;
+ c[1][3] = v4;
+ }
+
+ endpoints[0] = SkColorSetARGB(clamp_byte(c[0][0]),
+ clamp_byte(c[0][1]),
+ clamp_byte(c[0][2]),
+ clamp_byte(c[0][3]));
+
+ endpoints[1] = SkColorSetARGB(clamp_byte(c[1][0]),
+ clamp_byte(c[1][1]),
+ clamp_byte(c[1][2]),
+ clamp_byte(c[1][3]));
+}
+
+
+// A helper class used to decode bit values from standard integer values.
+// We can't use this class with ASTCBlock because then it would need to
+// handle multi-value ranges, and it's non-trivial to lookup a range of bits
+// that splits across two different ints.
+template <typename T>
+class SkTBits {
+public:
+ SkTBits(const T val) : fVal(val) { }
+
+ // Returns the bit at the given position
+ T operator [](const int idx) const {
+ return (fVal >> idx) & 1;
+ }
+
+ // Returns the bits in the given range, inclusive
+ T operator ()(const int end, const int start) const {
+ SkASSERT(end >= start);
+ return (fVal >> start) & ((1ULL << ((end - start) + 1)) - 1);
+ }
+
+private:
+ const T fVal;
+};
+
+// This algorithm matches the trit block decoding in the spec (Table C.2.14)
+static void decode_trit_block(int* dst, int nBits, const uint64_t &block) {
+
+ SkTBits<uint64_t> blockBits(block);
+
+ // According to the spec, a trit block, which contains five values,
+ // has the following layout:
+ //
+ // 27 26 25 24 23 22 21 20 19 18 17 16
+ // -----------------------------------------------
+ // |T7 | m4 |T6 T5 | m3 |T4 |
+ // -----------------------------------------------
+ //
+ // 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ // --------------------------------------------------------------
+ // | m2 |T3 T2 | m1 |T1 T0 | m0 |
+ // --------------------------------------------------------------
+ //
+ // Where the m's are variable width depending on the number of bits used
+ // to encode the values (anywhere from 0 to 6). Since 3^5 = 243, the extra
+ // byte labeled T (whose bits are interleaved where 0 is the LSB and 7 is
+ // the MSB), contains five trit values. To decode the trit values, the spec
+ // says that we need to follow the following algorithm:
+ //
+ // if T[4:2] = 111
+ // C = { T[7:5], T[1:0] }; t4 = t3 = 2
+ // else
+ // C = T[4:0]
+ //
+ // if T[6:5] = 11
+ // t4 = 2; t3 = T[7]
+ // else
+ // t4 = T[7]; t3 = T[6:5]
+ //
+ // if C[1:0] = 11
+ // t2 = 2; t1 = C[4]; t0 = { C[3], C[2]&~C[3] }
+ // else if C[3:2] = 11
+ // t2 = 2; t1 = 2; t0 = C[1:0]
+ // else
+ // t2 = C[4]; t1 = C[3:2]; t0 = { C[1], C[0]&~C[1] }
+ //
+ // The following C++ code is meant to mirror this layout and algorithm as
+ // closely as possible.
+
+ int m[5];
+ if (0 == nBits) {
+ memset(m, 0, sizeof(m));
+ } else {
+ SkASSERT(nBits < 8);
+ m[0] = static_cast<int>(blockBits(nBits - 1, 0));
+ m[1] = static_cast<int>(blockBits(2*nBits - 1 + 2, nBits + 2));
+ m[2] = static_cast<int>(blockBits(3*nBits - 1 + 4, 2*nBits + 4));
+ m[3] = static_cast<int>(blockBits(4*nBits - 1 + 5, 3*nBits + 5));
+ m[4] = static_cast<int>(blockBits(5*nBits - 1 + 7, 4*nBits + 7));
+ }
+
+ int T =
+ static_cast<int>(blockBits(nBits + 1, nBits)) |
+ (static_cast<int>(blockBits(2*nBits + 2 + 1, 2*nBits + 2)) << 2) |
+ (static_cast<int>(blockBits[3*nBits + 4] << 4)) |
+ (static_cast<int>(blockBits(4*nBits + 5 + 1, 4*nBits + 5)) << 5) |
+ (static_cast<int>(blockBits[5*nBits + 7] << 7));
+
+ int t[5];
+
+ int C;
+ SkTBits<int> Tbits(T);
+ if (0x7 == Tbits(4, 2)) {
+ C = (Tbits(7, 5) << 2) | Tbits(1, 0);
+ t[3] = t[4] = 2;
+ } else {
+ C = Tbits(4, 0);
+ if (Tbits(6, 5) == 0x3) {
+ t[4] = 2; t[3] = Tbits[7];
+ } else {
+ t[4] = Tbits[7]; t[3] = Tbits(6, 5);
+ }
+ }
+
+ SkTBits<int> Cbits(C);
+ if (Cbits(1, 0) == 0x3) {
+ t[2] = 2;
+ t[1] = Cbits[4];
+ t[0] = (Cbits[3] << 1) | (Cbits[2] & (0x1 & ~(Cbits[3])));
+ } else if (Cbits(3, 2) == 0x3) {
+ t[2] = 2;
+ t[1] = 2;
+ t[0] = Cbits(1, 0);
+ } else {
+ t[2] = Cbits[4];
+ t[1] = Cbits(3, 2);
+ t[0] = (Cbits[1] << 1) | (Cbits[0] & (0x1 & ~(Cbits[1])));
+ }
+
+#ifdef SK_DEBUG
+ // Make sure all of the decoded values have a trit less than three
+ // and a bit value within the range of the allocated bits.
+ for (int i = 0; i < 5; ++i) {
+ SkASSERT(t[i] < 3);
+ SkASSERT(m[i] < (1 << nBits));
+ }
+#endif
+
+ for (int i = 0; i < 5; ++i) {
+ *dst = (t[i] << nBits) + m[i];
+ ++dst;
+ }
+}
+
+// This algorithm matches the quint block decoding in the spec (Table C.2.15)
+static void decode_quint_block(int* dst, int nBits, const uint64_t &block) {
+ SkTBits<uint64_t> blockBits(block);
+
+ // According to the spec, a quint block, which contains three values,
+ // has the following layout:
+ //
+ //
+ // 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ // --------------------------------------------------------------------------
+ // |Q6 Q5 | m2 |Q4 Q3 | m1 |Q2 Q1 Q0 | m0 |
+ // --------------------------------------------------------------------------
+ //
+ // Where the m's are variable width depending on the number of bits used
+ // to encode the values (anywhere from 0 to 4). Since 5^3 = 125, the extra
+ // 7-bit value labeled Q (whose bits are interleaved where 0 is the LSB and 6 is
+ // the MSB), contains three quint values. To decode the quint values, the spec
+ // says that we need to follow the following algorithm:
+ //
+ // if Q[2:1] = 11 and Q[6:5] = 00
+ // q2 = { Q[0], Q[4]&~Q[0], Q[3]&~Q[0] }; q1 = q0 = 4
+ // else
+ // if Q[2:1] = 11
+ // q2 = 4; C = { Q[4:3], ~Q[6:5], Q[0] }
+ // else
+ // q2 = T[6:5]; C = Q[4:0]
+ //
+ // if C[2:0] = 101
+ // q1 = 4; q0 = C[4:3]
+ // else
+ // q1 = C[4:3]; q0 = C[2:0]
+ //
+ // The following C++ code is meant to mirror this layout and algorithm as
+ // closely as possible.
+
+ int m[3];
+ if (0 == nBits) {
+ memset(m, 0, sizeof(m));
+ } else {
+ SkASSERT(nBits < 8);
+ m[0] = static_cast<int>(blockBits(nBits - 1, 0));
+ m[1] = static_cast<int>(blockBits(2*nBits - 1 + 3, nBits + 3));
+ m[2] = static_cast<int>(blockBits(3*nBits - 1 + 5, 2*nBits + 5));
+ }
+
+ int Q =
+ static_cast<int>(blockBits(nBits + 2, nBits)) |
+ (static_cast<int>(blockBits(2*nBits + 3 + 1, 2*nBits + 3)) << 3) |
+ (static_cast<int>(blockBits(3*nBits + 5 + 1, 3*nBits + 5)) << 5);
+
+ int q[3];
+ SkTBits<int> Qbits(Q); // quantum?
+
+ if (Qbits(2, 1) == 0x3 && Qbits(6, 5) == 0) {
+ const int notBitZero = (0x1 & ~(Qbits[0]));
+ q[2] = (Qbits[0] << 2) | ((Qbits[4] & notBitZero) << 1) | (Qbits[3] & notBitZero);
+ q[1] = 4;
+ q[0] = 4;
+ } else {
+ int C;
+ if (Qbits(2, 1) == 0x3) {
+ q[2] = 4;
+ C = (Qbits(4, 3) << 3) | ((0x3 & ~(Qbits(6, 5))) << 1) | Qbits[0];
+ } else {
+ q[2] = Qbits(6, 5);
+ C = Qbits(4, 0);
+ }
+
+ SkTBits<int> Cbits(C);
+ if (Cbits(2, 0) == 0x5) {
+ q[1] = 4;
+ q[0] = Cbits(4, 3);
+ } else {
+ q[1] = Cbits(4, 3);
+ q[0] = Cbits(2, 0);
+ }
+ }
+
+#ifdef SK_DEBUG
+ for (int i = 0; i < 3; ++i) {
+ SkASSERT(q[i] < 5);
+ SkASSERT(m[i] < (1 << nBits));
+ }
+#endif
+
+ for (int i = 0; i < 3; ++i) {
+ *dst = (q[i] << nBits) + m[i];
+ ++dst;
+ }
+}
+
+// Function that decodes a sequence of integers stored as an ISE (Integer
+// Sequence Encoding) bit stream. The full details of this function are outlined
+// in section C.2.12 of the ASTC spec. A brief overview is as follows:
+//
+// - Each integer in the sequence is bounded by a specific range r.
+// - The range of each value determines the way the bit stream is interpreted,
+// - If the range is a power of two, then the sequence is a sequence of bits
+// - If the range is of the form 3*2^n, then the sequence is stored as a
+// sequence of blocks, each block contains 5 trits and 5 bit sequences, which
+// decodes into 5 values.
+// - Similarly, if the range is of the form 5*2^n, then the sequence is stored as a
+// sequence of blocks, each block contains 3 quints and 3 bit sequences, which
+// decodes into 3 values.
+static bool decode_integer_sequence(
+ int* dst, // The array holding the destination bits
+ int dstSize, // The maximum size of the array
+ int nVals, // The number of values that we'd like to decode
+ const ASTCBlock &block, // The block that we're decoding from
+ int startBit, // The bit from which we're going to do the reading
+ int endBit, // The bit at which we stop reading (not inclusive)
+ bool bReadForward, // If true, then read LSB -> MSB, else read MSB -> LSB
+ int nBits, // The number of bits representing this encoding
+ int nTrits, // The number of trits representing this encoding
+ int nQuints // The number of quints representing this encoding
+) {
+ // If we want more values than we have, then fail.
+ if (nVals > dstSize) {
+ return false;
+ }
+
+ ASTCBlock src = block;
+
+ if (!bReadForward) {
+ src.reverse();
+ startBit = 128 - startBit;
+ endBit = 128 - endBit;
+ }
+
+ while (nVals > 0) {
+
+ if (nTrits > 0) {
+ SkASSERT(0 == nQuints);
+
+ int endBlockBit = startBit + 8 + 5*nBits;
+ if (endBlockBit > endBit) {
+ endBlockBit = endBit;
+ }
+
+ // Trit blocks are three values large.
+ int trits[5];
+ decode_trit_block(trits, nBits, read_astc_bits(src, startBit, endBlockBit));
+ memcpy(dst, trits, SkMin32(nVals, 5)*sizeof(int));
+
+ dst += 5;
+ nVals -= 5;
+ startBit = endBlockBit;
+
+ } else if (nQuints > 0) {
+ SkASSERT(0 == nTrits);
+
+ int endBlockBit = startBit + 7 + 3*nBits;
+ if (endBlockBit > endBit) {
+ endBlockBit = endBit;
+ }
+
+ // Quint blocks are three values large
+ int quints[3];
+ decode_quint_block(quints, nBits, read_astc_bits(src, startBit, endBlockBit));
+ memcpy(dst, quints, SkMin32(nVals, 3)*sizeof(int));
+
+ dst += 3;
+ nVals -= 3;
+ startBit = endBlockBit;
+
+ } else {
+ // Just read the bits, but don't read more than we have...
+ int endValBit = startBit + nBits;
+ if (endValBit > endBit) {
+ endValBit = endBit;
+ }
+
+ SkASSERT(endValBit - startBit < 31);
+ *dst = static_cast<int>(read_astc_bits(src, startBit, endValBit));
+ ++dst;
+ --nVals;
+ startBit = endValBit;
+ }
+ }
+
+ return true;
+}
+
+// Helper function that unquantizes some (seemingly random) generated
+// numbers... meant to match the ASTC hardware. This function is used
+// to unquantize both colors (Table C.2.16) and weights (Table C.2.26)
+static inline int unquantize_value(unsigned mask, int A, int B, int C, int D) {
+ int T = D * C + B;
+ T = T ^ A;
+ T = (A & mask) | (T >> 2);
+ SkASSERT(T < 256);
+ return T;
+}
+
+// Helper function to replicate the bits in x that represents an oldPrec
+// precision integer into a prec precision integer. For example:
+// 255 == replicate_bits(7, 3, 8);
+static inline int replicate_bits(int x, int oldPrec, int prec) {
+ while (oldPrec < prec) {
+ const int toShift = SkMin32(prec-oldPrec, oldPrec);
+ x = (x << toShift) | (x >> (oldPrec - toShift));
+ oldPrec += toShift;
+ }
+
+ // Make sure that no bits are set outside the desired precision.
+ SkASSERT((-(1 << prec) & x) == 0);
+ return x;
+}
+
+// Returns the unquantized value of a color that's represented only as
+// a set of bits.
+static inline int unquantize_bits_color(int val, int nBits) {
+ return replicate_bits(val, nBits, 8);
+}
+
+// Returns the unquantized value of a color that's represented as a
+// trit followed by nBits bits. This algorithm follows the sequence
+// defined in section C.2.13 of the ASTC spec.
+static inline int unquantize_trit_color(int val, int nBits) {
+ SkASSERT(nBits > 0);
+ SkASSERT(nBits < 7);
+
+ const int D = (val >> nBits) & 0x3;
+ SkASSERT(D < 3);
+
+ const int A = -(val & 0x1) & 0x1FF;
+
+ static const int Cvals[6] = { 204, 93, 44, 22, 11, 5 };
+ const int C = Cvals[nBits - 1];
+
+ int B = 0;
+ const SkTBits<int> valBits(val);
+ switch (nBits) {
+ case 1:
+ B = 0;
+ break;
+
+ case 2: {
+ const int b = valBits[1];
+ B = (b << 1) | (b << 2) | (b << 4) | (b << 8);
+ }
+ break;
+
+ case 3: {
+ const int cb = valBits(2, 1);
+ B = cb | (cb << 2) | (cb << 7);
+ }
+ break;
+
+ case 4: {
+ const int dcb = valBits(3, 1);
+ B = dcb | (dcb << 6);
+ }
+ break;
+
+ case 5: {
+ const int edcb = valBits(4, 1);
+ B = (edcb << 5) | (edcb >> 2);
+ }
+ break;
+
+ case 6: {
+ const int fedcb = valBits(5, 1);
+ B = (fedcb << 4) | (fedcb >> 4);
+ }
+ break;
+ }
+
+ return unquantize_value(0x80, A, B, C, D);
+}
+
+// Returns the unquantized value of a color that's represented as a
+// quint followed by nBits bits. This algorithm follows the sequence
+// defined in section C.2.13 of the ASTC spec.
+static inline int unquantize_quint_color(int val, int nBits) {
+ const int D = (val >> nBits) & 0x7;
+ SkASSERT(D < 5);
+
+ const int A = -(val & 0x1) & 0x1FF;
+
+ static const int Cvals[5] = { 113, 54, 26, 13, 6 };
+ SkASSERT(nBits > 0);
+ SkASSERT(nBits < 6);
+
+ const int C = Cvals[nBits - 1];
+
+ int B = 0;
+ const SkTBits<int> valBits(val);
+ switch (nBits) {
+ case 1:
+ B = 0;
+ break;
+
+ case 2: {
+ const int b = valBits[1];
+ B = (b << 2) | (b << 3) | (b << 8);
+ }
+ break;
+
+ case 3: {
+ const int cb = valBits(2, 1);
+ B = (cb >> 1) | (cb << 1) | (cb << 7);
+ }
+ break;
+
+ case 4: {
+ const int dcb = valBits(3, 1);
+ B = (dcb >> 1) | (dcb << 6);
+ }
+ break;
+
+ case 5: {
+ const int edcb = valBits(4, 1);
+ B = (edcb << 5) | (edcb >> 3);
+ }
+ break;
+ }
+
+ return unquantize_value(0x80, A, B, C, D);
+}
+
+// This algorithm takes a list of integers, stored in vals, and unquantizes them
+// in place. This follows the algorithm laid out in section C.2.13 of the ASTC spec.
+static void unquantize_colors(int *vals, int nVals, int nBits, int nTrits, int nQuints) {
+ for (int i = 0; i < nVals; ++i) {
+ if (nTrits > 0) {
+ SkASSERT(nQuints == 0);
+ vals[i] = unquantize_trit_color(vals[i], nBits);
+ } else if (nQuints > 0) {
+ SkASSERT(nTrits == 0);
+ vals[i] = unquantize_quint_color(vals[i], nBits);
+ } else {
+ SkASSERT(nQuints == 0 && nTrits == 0);
+ vals[i] = unquantize_bits_color(vals[i], nBits);
+ }
+ }
+}
+
+// Returns an interpolated value between c0 and c1 based on the weight. This
+// follows the algorithm laid out in section C.2.19 of the ASTC spec.
+static int interpolate_channel(int c0, int c1, int weight) {
+ SkASSERT(0 <= c0 && c0 < 256);
+ SkASSERT(0 <= c1 && c1 < 256);
+
+ c0 = (c0 << 8) | c0;
+ c1 = (c1 << 8) | c1;
+
+ const int result = ((c0*(64 - weight) + c1*weight + 32) / 64) >> 8;
+
+ if (result > 255) {
+ return 255;
+ }
+
+ SkASSERT(result >= 0);
+ return result;
+}
+
+// Returns an interpolated color between the two endpoints based on the weight.
+static SkColor interpolate_endpoints(const SkColor endpoints[2], int weight) {
+ return SkColorSetARGB(
+ interpolate_channel(SkColorGetA(endpoints[0]), SkColorGetA(endpoints[1]), weight),
+ interpolate_channel(SkColorGetR(endpoints[0]), SkColorGetR(endpoints[1]), weight),
+ interpolate_channel(SkColorGetG(endpoints[0]), SkColorGetG(endpoints[1]), weight),
+ interpolate_channel(SkColorGetB(endpoints[0]), SkColorGetB(endpoints[1]), weight));
+}
+
+// Returns an interpolated color between the two endpoints based on the weight.
+// It uses separate weights for the channel depending on the value of the 'plane'
+// variable. By default, all channels will use weight 0, and the value of plane
+// means that weight1 will be used for:
+// 0: red
+// 1: green
+// 2: blue
+// 3: alpha
+static SkColor interpolate_dual_endpoints(
+ const SkColor endpoints[2], int weight0, int weight1, int plane) {
+ int a = interpolate_channel(SkColorGetA(endpoints[0]), SkColorGetA(endpoints[1]), weight0);
+ int r = interpolate_channel(SkColorGetR(endpoints[0]), SkColorGetR(endpoints[1]), weight0);
+ int g = interpolate_channel(SkColorGetG(endpoints[0]), SkColorGetG(endpoints[1]), weight0);
+ int b = interpolate_channel(SkColorGetB(endpoints[0]), SkColorGetB(endpoints[1]), weight0);
+
+ switch (plane) {
+
+ case 0:
+ r = interpolate_channel(
+ SkColorGetR(endpoints[0]), SkColorGetR(endpoints[1]), weight1);
+ break;
+
+ case 1:
+ g = interpolate_channel(
+ SkColorGetG(endpoints[0]), SkColorGetG(endpoints[1]), weight1);
+ break;
+
+ case 2:
+ b = interpolate_channel(
+ SkColorGetB(endpoints[0]), SkColorGetB(endpoints[1]), weight1);
+ break;
+
+ case 3:
+ a = interpolate_channel(
+ SkColorGetA(endpoints[0]), SkColorGetA(endpoints[1]), weight1);
+ break;
+
+ default:
+ SkDEBUGFAIL("Plane should be 0-3");
+ break;
+ }
+
+ return SkColorSetARGB(a, r, g, b);
+}
+
+// A struct of decoded values that we use to carry around information
+// about the block. dimX and dimY are the dimension in texels of the block,
+// for which there is only a limited subset of valid values:
+//
+// 4x4, 5x4, 5x5, 6x5, 6x6, 8x5, 8x6, 8x8, 10x5, 10x6, 10x8, 10x10, 12x10, 12x12
+
+struct ASTCDecompressionData {
+ ASTCDecompressionData(int dimX, int dimY) : fDimX(dimX), fDimY(dimY) { }
+ const int fDimX; // the X dimension of the decompressed block
+ const int fDimY; // the Y dimension of the decompressed block
+ ASTCBlock fBlock; // the block data
+ int fBlockMode; // the block header that contains the block mode.
+
+ bool fDualPlaneEnabled; // is this block compressing dual weight planes?
+ int fDualPlane; // the independent plane in dual plane mode.
+
+ bool fVoidExtent; // is this block a single color?
+ bool fError; // does this block have an error encoding?
+
+ int fWeightDimX; // the x dimension of the weight grid
+ int fWeightDimY; // the y dimension of the weight grid
+
+ int fWeightBits; // the number of bits used for each weight value
+ int fWeightTrits; // the number of trits used for each weight value
+ int fWeightQuints; // the number of quints used for each weight value
+
+ int fPartCount; // the number of partitions in this block
+ int fPartIndex; // the partition index: only relevant if fPartCount > 0
+
+ // CEM values can be anything in the range 0-15, and each corresponds to a different
+ // mode that represents the color data. We only support LDR modes.
+ enum ColorEndpointMode {
+ kLDR_Luminance_Direct_ColorEndpointMode = 0,
+ kLDR_Luminance_BaseOffset_ColorEndpointMode = 1,
+ kHDR_Luminance_LargeRange_ColorEndpointMode = 2,
+ kHDR_Luminance_SmallRange_ColorEndpointMode = 3,
+ kLDR_LuminanceAlpha_Direct_ColorEndpointMode = 4,
+ kLDR_LuminanceAlpha_BaseOffset_ColorEndpointMode = 5,
+ kLDR_RGB_BaseScale_ColorEndpointMode = 6,
+ kHDR_RGB_BaseScale_ColorEndpointMode = 7,
+ kLDR_RGB_Direct_ColorEndpointMode = 8,
+ kLDR_RGB_BaseOffset_ColorEndpointMode = 9,
+ kLDR_RGB_BaseScaleWithAlpha_ColorEndpointMode = 10,
+ kHDR_RGB_ColorEndpointMode = 11,
+ kLDR_RGBA_Direct_ColorEndpointMode = 12,
+ kLDR_RGBA_BaseOffset_ColorEndpointMode = 13,
+ kHDR_RGB_LDRAlpha_ColorEndpointMode = 14,
+ kHDR_RGB_HDRAlpha_ColorEndpointMode = 15
+ };
+ static const int kMaxColorEndpointModes = 16;
+
+ // the color endpoint modes for this block.
+ static const int kMaxPartitions = 4;
+ ColorEndpointMode fCEM[kMaxPartitions];
+
+ int fColorStartBit; // The bit position of the first bit of the color data
+ int fColorEndBit; // The bit position of the last *possible* bit of the color data
+
+ // Returns the number of partitions for this block.
+ int numPartitions() const {
+ return fPartCount;
+ }
+
+ // Returns the total number of weight values that are stored in this block
+ int numWeights() const {
+ return fWeightDimX * fWeightDimY * (fDualPlaneEnabled ? 2 : 1);
+ }
+
+#ifdef SK_DEBUG
+ // Returns the maximum value that any weight can take. We really only use
+ // this function for debugging.
+ int maxWeightValue() const {
+ int maxVal = (1 << fWeightBits);
+ if (fWeightTrits > 0) {
+ SkASSERT(0 == fWeightQuints);
+ maxVal *= 3;
+ } else if (fWeightQuints > 0) {
+ SkASSERT(0 == fWeightTrits);
+ maxVal *= 5;
+ }
+ return maxVal - 1;
+ }
+#endif
+
+ // The number of bits needed to represent the texel weight data. This
+ // comes from the 'data size determination' section of the ASTC spec (C.2.22)
+ int numWeightBits() const {
+ const int nWeights = this->numWeights();
+ return
+ ((nWeights*8*fWeightTrits + 4) / 5) +
+ ((nWeights*7*fWeightQuints + 2) / 3) +
+ (nWeights*fWeightBits);
+ }
+
+ // Returns the number of color values stored in this block. The number of
+ // values stored is directly a function of the color endpoint modes.
+ int numColorValues() const {
+ int numValues = 0;
+ for (int i = 0; i < this->numPartitions(); ++i) {
+ int cemInt = static_cast<int>(fCEM[i]);
+ numValues += ((cemInt >> 2) + 1) * 2;
+ }
+
+ return numValues;
+ }
+
+ // Figures out the number of bits available for color values, and fills
+ // in the maximum encoding that will fit the number of color values that
+ // we need. Returns false on error. (See section C.2.22 of the spec)
+ bool getColorValueEncoding(int *nBits, int *nTrits, int *nQuints) const {
+ if (nullptr == nBits || nullptr == nTrits || nullptr == nQuints) {
+ return false;
+ }
+
+ const int nColorVals = this->numColorValues();
+ if (nColorVals <= 0) {
+ return false;
+ }
+
+ const int colorBits = fColorEndBit - fColorStartBit;
+ SkASSERT(colorBits > 0);
+
+ // This is the minimum amount of accuracy required by the spec.
+ if (colorBits < ((13 * nColorVals + 4) / 5)) {
+ return false;
+ }
+
+ // Values can be represented as at most 8-bit values.
+ // !SPEED! place this in a lookup table based on colorBits and nColorVals
+ for (int i = 255; i > 0; --i) {
+ int range = i + 1;
+ int bits = 0, trits = 0, quints = 0;
+ bool valid = false;
+ if (SkIsPow2(range)) {
+ bits = bits_for_range(range);
+ valid = true;
+ } else if ((range % 3) == 0 && SkIsPow2(range/3)) {
+ trits = 1;
+ bits = bits_for_range(range/3);
+ valid = true;
+ } else if ((range % 5) == 0 && SkIsPow2(range/5)) {
+ quints = 1;
+ bits = bits_for_range(range/5);
+ valid = true;
+ }
+
+ if (valid) {
+ const int actualColorBits =
+ ((nColorVals*8*trits + 4) / 5) +
+ ((nColorVals*7*quints + 2) / 3) +
+ (nColorVals*bits);
+ if (actualColorBits <= colorBits) {
+ *nTrits = trits;
+ *nQuints = quints;
+ *nBits = bits;
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ // Converts the sequence of color values into endpoints. The algorithm here
+ // corresponds to the values determined by section C.2.14 of the ASTC spec
+ void colorEndpoints(SkColor endpoints[4][2], const int* colorValues) const {
+ for (int i = 0; i < this->numPartitions(); ++i) {
+ switch (fCEM[i]) {
+ case kLDR_Luminance_Direct_ColorEndpointMode: {
+ const int* v = colorValues;
+ endpoints[i][0] = SkColorSetARGB(0xFF, v[0], v[0], v[0]);
+ endpoints[i][1] = SkColorSetARGB(0xFF, v[1], v[1], v[1]);
+
+ colorValues += 2;
+ }
+ break;
+
+ case kLDR_Luminance_BaseOffset_ColorEndpointMode: {
+ const int* v = colorValues;
+ const int L0 = (v[0] >> 2) | (v[1] & 0xC0);
+ const int L1 = clamp_byte(L0 + (v[1] & 0x3F));
+
+ endpoints[i][0] = SkColorSetARGB(0xFF, L0, L0, L0);
+ endpoints[i][1] = SkColorSetARGB(0xFF, L1, L1, L1);
+
+ colorValues += 2;
+ }
+ break;
+
+ case kLDR_LuminanceAlpha_Direct_ColorEndpointMode: {
+ const int* v = colorValues;
+
+ endpoints[i][0] = SkColorSetARGB(v[2], v[0], v[0], v[0]);
+ endpoints[i][1] = SkColorSetARGB(v[3], v[1], v[1], v[1]);
+
+ colorValues += 4;
+ }
+ break;
+
+ case kLDR_LuminanceAlpha_BaseOffset_ColorEndpointMode: {
+ int v0 = colorValues[0];
+ int v1 = colorValues[1];
+ int v2 = colorValues[2];
+ int v3 = colorValues[3];
+
+ bit_transfer_signed(&v1, &v0);
+ bit_transfer_signed(&v3, &v2);
+
+ endpoints[i][0] = SkColorSetARGB(v2, v0, v0, v0);
+ endpoints[i][1] = SkColorSetARGB(
+ clamp_byte(v3+v2),
+ clamp_byte(v1+v0),
+ clamp_byte(v1+v0),
+ clamp_byte(v1+v0));
+
+ colorValues += 4;
+ }
+ break;
+
+ case kLDR_RGB_BaseScale_ColorEndpointMode: {
+ decode_rgba_basescale(colorValues, endpoints[i], true);
+ colorValues += 4;
+ }
+ break;
+
+ case kLDR_RGB_Direct_ColorEndpointMode: {
+ decode_rgba_direct(colorValues, endpoints[i], true);
+ colorValues += 6;
+ }
+ break;
+
+ case kLDR_RGB_BaseOffset_ColorEndpointMode: {
+ decode_rgba_baseoffset(colorValues, endpoints[i], true);
+ colorValues += 6;
+ }
+ break;
+
+ case kLDR_RGB_BaseScaleWithAlpha_ColorEndpointMode: {
+ decode_rgba_basescale(colorValues, endpoints[i], false);
+ colorValues += 6;
+ }
+ break;
+
+ case kLDR_RGBA_Direct_ColorEndpointMode: {
+ decode_rgba_direct(colorValues, endpoints[i], false);
+ colorValues += 8;
+ }
+ break;
+
+ case kLDR_RGBA_BaseOffset_ColorEndpointMode: {
+ decode_rgba_baseoffset(colorValues, endpoints[i], false);
+ colorValues += 8;
+ }
+ break;
+
+ default:
+ SkDEBUGFAIL("HDR mode unsupported! This should be caught sooner.");
+ break;
+ }
+ }
+ }
+
+ // Follows the procedure from section C.2.17 of the ASTC specification
+ int unquantizeWeight(int x) const {
+ SkASSERT(x <= this->maxWeightValue());
+
+ const int D = (x >> fWeightBits) & 0x7;
+ const int A = -(x & 0x1) & 0x7F;
+
+ SkTBits<int> xbits(x);
+
+ int T = 0;
+ if (fWeightTrits > 0) {
+ SkASSERT(0 == fWeightQuints);
+ switch (fWeightBits) {
+ case 0: {
+ // x is a single trit
+ SkASSERT(x < 3);
+
+ static const int kUnquantizationTable[3] = { 0, 32, 63 };
+ T = kUnquantizationTable[x];
+ }
+ break;
+
+ case 1: {
+ const int B = 0;
+ const int C = 50;
+ T = unquantize_value(0x20, A, B, C, D);
+ }
+ break;
+
+ case 2: {
+ const int b = xbits[1];
+ const int B = b | (b << 2) | (b << 6);
+ const int C = 23;
+ T = unquantize_value(0x20, A, B, C, D);
+ }
+ break;
+
+ case 3: {
+ const int cb = xbits(2, 1);
+ const int B = cb | (cb << 5);
+ const int C = 11;
+ T = unquantize_value(0x20, A, B, C, D);
+ }
+ break;
+
+ default:
+ SkDEBUGFAIL("Too many bits for trit encoding");
+ break;
+ }
+
+ } else if (fWeightQuints > 0) {
+ SkASSERT(0 == fWeightTrits);
+ switch (fWeightBits) {
+ case 0: {
+ // x is a single quint
+ SkASSERT(x < 5);
+
+ static const int kUnquantizationTable[5] = { 0, 16, 32, 47, 63 };
+ T = kUnquantizationTable[x];
+ }
+ break;
+
+ case 1: {
+ const int B = 0;
+ const int C = 28;
+ T = unquantize_value(0x20, A, B, C, D);
+ }
+ break;
+
+ case 2: {
+ const int b = xbits[1];
+ const int B = (b << 1) | (b << 6);
+ const int C = 13;
+ T = unquantize_value(0x20, A, B, C, D);
+ }
+ break;
+
+ default:
+ SkDEBUGFAIL("Too many bits for quint encoding");
+ break;
+ }
+ } else {
+ SkASSERT(0 == fWeightTrits);
+ SkASSERT(0 == fWeightQuints);
+
+ T = replicate_bits(x, fWeightBits, 6);
+ }
+
+ // This should bring the value within [0, 63]..
+ SkASSERT(T <= 63);
+
+ if (T > 32) {
+ T += 1;
+ }
+
+ SkASSERT(T <= 64);
+
+ return T;
+ }
+
+ // Returns the weight at the associated index. If the index is out of bounds, it
+ // returns zero. It also chooses the weight appropriately based on the given dual
+ // plane.
+ int getWeight(const int* unquantizedWeights, int idx, bool dualPlane) const {
+ const int maxIdx = (fDualPlaneEnabled ? 2 : 1) * fWeightDimX * fWeightDimY - 1;
+ if (fDualPlaneEnabled) {
+ const int effectiveIdx = 2*idx + (dualPlane ? 1 : 0);
+ if (effectiveIdx > maxIdx) {
+ return 0;
+ }
+ return unquantizedWeights[effectiveIdx];
+ }
+
+ SkASSERT(!dualPlane);
+
+ if (idx > maxIdx) {
+ return 0;
+ } else {
+ return unquantizedWeights[idx];
+ }
+ }
+
+ // This computes the effective weight at location (s, t) of the block. This
+ // weight is computed by sampling the texel weight grid (it's usually not 1-1), and
+ // then applying a bilerp. The algorithm outlined here follows the algorithm
+ // defined in section C.2.18 of the ASTC spec.
+ int infillWeight(const int* unquantizedValues, int s, int t, bool dualPlane) const {
+ const int Ds = (1024 + fDimX/2) / (fDimX - 1);
+ const int Dt = (1024 + fDimY/2) / (fDimY - 1);
+
+ const int cs = Ds * s;
+ const int ct = Dt * t;
+
+ const int gs = (cs*(fWeightDimX - 1) + 32) >> 6;
+ const int gt = (ct*(fWeightDimY - 1) + 32) >> 6;
+
+ const int js = gs >> 4;
+ const int jt = gt >> 4;
+
+ const int fs = gs & 0xF;
+ const int ft = gt & 0xF;
+
+ const int idx = js + jt*fWeightDimX;
+ const int p00 = this->getWeight(unquantizedValues, idx, dualPlane);
+ const int p01 = this->getWeight(unquantizedValues, idx + 1, dualPlane);
+ const int p10 = this->getWeight(unquantizedValues, idx + fWeightDimX, dualPlane);
+ const int p11 = this->getWeight(unquantizedValues, idx + fWeightDimX + 1, dualPlane);
+
+ const int w11 = (fs*ft + 8) >> 4;
+ const int w10 = ft - w11;
+ const int w01 = fs - w11;
+ const int w00 = 16 - fs - ft + w11;
+
+ const int weight = (p00*w00 + p01*w01 + p10*w10 + p11*w11 + 8) >> 4;
+ SkASSERT(weight <= 64);
+ return weight;
+ }
+
+ // Unquantizes the decoded texel weights as described in section C.2.17 of
+ // the ASTC specification. Additionally, it populates texelWeights with
+ // the expanded weight grid, which is computed according to section C.2.18
+ void texelWeights(int texelWeights[2][12][12], const int* texelValues) const {
+ // Unquantized texel weights...
+ int unquantizedValues[144*2]; // 12x12 blocks with dual plane decoding...
+ SkASSERT(this->numWeights() <= 144*2);
+
+ // Unquantize the weights and cache them
+ for (int j = 0; j < this->numWeights(); ++j) {
+ unquantizedValues[j] = this->unquantizeWeight(texelValues[j]);
+ }
+
+ // Do weight infill...
+ for (int y = 0; y < fDimY; ++y) {
+ for (int x = 0; x < fDimX; ++x) {
+ texelWeights[0][x][y] = this->infillWeight(unquantizedValues, x, y, false);
+ if (fDualPlaneEnabled) {
+ texelWeights[1][x][y] = this->infillWeight(unquantizedValues, x, y, true);
+ }
+ }
+ }
+ }
+
+ // Returns the partition for the texel located at position (x, y).
+ // Adapted from C.2.21 of the ASTC specification
+ int getPartition(int x, int y) const {
+ const int partitionCount = this->numPartitions();
+ int seed = fPartIndex;
+ if ((fDimX * fDimY) < 31) {
+ x <<= 1;
+ y <<= 1;
+ }
+
+ seed += (partitionCount - 1) * 1024;
+
+ uint32_t p = seed;
+ p ^= p >> 15; p -= p << 17; p += p << 7; p += p << 4;
+ p ^= p >> 5; p += p << 16; p ^= p >> 7; p ^= p >> 3;
+ p ^= p << 6; p ^= p >> 17;
+
+ uint32_t rnum = p;
+ uint8_t seed1 = rnum & 0xF;
+ uint8_t seed2 = (rnum >> 4) & 0xF;
+ uint8_t seed3 = (rnum >> 8) & 0xF;
+ uint8_t seed4 = (rnum >> 12) & 0xF;
+ uint8_t seed5 = (rnum >> 16) & 0xF;
+ uint8_t seed6 = (rnum >> 20) & 0xF;
+ uint8_t seed7 = (rnum >> 24) & 0xF;
+ uint8_t seed8 = (rnum >> 28) & 0xF;
+ uint8_t seed9 = (rnum >> 18) & 0xF;
+ uint8_t seed10 = (rnum >> 22) & 0xF;
+ uint8_t seed11 = (rnum >> 26) & 0xF;
+ uint8_t seed12 = ((rnum >> 30) | (rnum << 2)) & 0xF;
+
+ seed1 *= seed1; seed2 *= seed2;
+ seed3 *= seed3; seed4 *= seed4;
+ seed5 *= seed5; seed6 *= seed6;
+ seed7 *= seed7; seed8 *= seed8;
+ seed9 *= seed9; seed10 *= seed10;
+ seed11 *= seed11; seed12 *= seed12;
+
+ int sh1, sh2, sh3;
+ if (0 != (seed & 1)) {
+ sh1 = (0 != (seed & 2))? 4 : 5;
+ sh2 = (partitionCount == 3)? 6 : 5;
+ } else {
+ sh1 = (partitionCount==3)? 6 : 5;
+ sh2 = (0 != (seed & 2))? 4 : 5;
+ }
+ sh3 = (0 != (seed & 0x10))? sh1 : sh2;
+
+ seed1 >>= sh1; seed2 >>= sh2; seed3 >>= sh1; seed4 >>= sh2;
+ seed5 >>= sh1; seed6 >>= sh2; seed7 >>= sh1; seed8 >>= sh2;
+ seed9 >>= sh3; seed10 >>= sh3; seed11 >>= sh3; seed12 >>= sh3;
+
+ const int z = 0;
+ int a = seed1*x + seed2*y + seed11*z + (rnum >> 14);
+ int b = seed3*x + seed4*y + seed12*z + (rnum >> 10);
+ int c = seed5*x + seed6*y + seed9 *z + (rnum >> 6);
+ int d = seed7*x + seed8*y + seed10*z + (rnum >> 2);
+
+ a &= 0x3F;
+ b &= 0x3F;
+ c &= 0x3F;
+ d &= 0x3F;
+
+ if (partitionCount < 4) {
+ d = 0;
+ }
+
+ if (partitionCount < 3) {
+ c = 0;
+ }
+
+ if (a >= b && a >= c && a >= d) {
+ return 0;
+ } else if (b >= c && b >= d) {
+ return 1;
+ } else if (c >= d) {
+ return 2;
+ } else {
+ return 3;
+ }
+ }
+
+ // Performs the proper interpolation of the texel based on the
+ // endpoints and weights.
+ SkColor getTexel(const SkColor endpoints[4][2],
+ const int weights[2][12][12],
+ int x, int y) const {
+ int part = 0;
+ if (this->numPartitions() > 1) {
+ part = this->getPartition(x, y);
+ }
+
+ SkColor result;
+ if (fDualPlaneEnabled) {
+ result = interpolate_dual_endpoints(
+ endpoints[part], weights[0][x][y], weights[1][x][y], fDualPlane);
+ } else {
+ result = interpolate_endpoints(endpoints[part], weights[0][x][y]);
+ }
+
+#if 1
+ // !FIXME! if we're writing directly to a bitmap, then we don't need
+ // to swap the red and blue channels, but since we're usually being used
+ // by the SkImageDecoder_astc module, the results are expected to be in RGBA.
+ result = SkColorSetARGB(
+ SkColorGetA(result), SkColorGetB(result), SkColorGetG(result), SkColorGetR(result));
+#endif
+
+ return result;
+ }
+
+ void decode() {
+ // First decode the block mode.
+ this->decodeBlockMode();
+
+ // Now we can decode the partition information.
+ fPartIndex = static_cast<int>(read_astc_bits(fBlock, 11, 23));
+ fPartCount = (fPartIndex & 0x3) + 1;
+ fPartIndex >>= 2;
+
+ // This is illegal
+ if (fDualPlaneEnabled && this->numPartitions() == 4) {
+ fError = true;
+ return;
+ }
+
+ // Based on the partition info, we can decode the color information.
+ this->decodeColorData();
+ }
+
+ // Decodes the dual plane based on the given bit location. The final
+ // location, if the dual plane is enabled, is also the end of our color data.
+ // This function is only meant to be used from this->decodeColorData()
+ void decodeDualPlane(int bitLoc) {
+ if (fDualPlaneEnabled) {
+ fDualPlane = static_cast<int>(read_astc_bits(fBlock, bitLoc - 2, bitLoc));
+ fColorEndBit = bitLoc - 2;
+ } else {
+ fColorEndBit = bitLoc;
+ }
+ }
+
+ // Decodes the color information based on the ASTC spec.
+ void decodeColorData() {
+
+ // By default, the last color bit is at the end of the texel weights
+ const int lastWeight = 128 - this->numWeightBits();
+
+ // If we have a dual plane then it will be at this location, too.
+ int dualPlaneBitLoc = lastWeight;
+
+ // If there's only one partition, then our job is (relatively) easy.
+ if (this->numPartitions() == 1) {
+ fCEM[0] = static_cast<ColorEndpointMode>(read_astc_bits(fBlock, 13, 17));
+ fColorStartBit = 17;
+
+ // Handle dual plane mode...
+ this->decodeDualPlane(dualPlaneBitLoc);
+
+ return;
+ }
+
+ // If we have more than one partition, then we need to make
+ // room for the partition index.
+ fColorStartBit = 29;
+
+ // Read the base CEM. If it's zero, then we have no additional
+ // CEM data and the endpoints for each partition share the same CEM.
+ const int baseCEM = static_cast<int>(read_astc_bits(fBlock, 23, 25));
+ if (0 == baseCEM) {
+
+ const ColorEndpointMode sameCEM =
+ static_cast<ColorEndpointMode>(read_astc_bits(fBlock, 25, 29));
+
+ for (int i = 0; i < kMaxPartitions; ++i) {
+ fCEM[i] = sameCEM;
+ }
+
+ // Handle dual plane mode...
+ this->decodeDualPlane(dualPlaneBitLoc);
+
+ return;
+ }
+
+ // Move the dual plane selector bits down based on how many
+ // partitions the block contains.
+ switch (this->numPartitions()) {
+ case 2:
+ dualPlaneBitLoc -= 2;
+ break;
+
+ case 3:
+ dualPlaneBitLoc -= 5;
+ break;
+
+ case 4:
+ dualPlaneBitLoc -= 8;
+ break;
+
+ default:
+ SkDEBUGFAIL("Internal ASTC decoding error.");
+ break;
+ }
+
+ // The rest of the CEM config will be between the dual plane bit selector
+ // and the texel weight grid.
+ const int lowCEM = static_cast<int>(read_astc_bits(fBlock, 23, 29));
+ SkASSERT(lastWeight >= dualPlaneBitLoc);
+ SkASSERT(lastWeight - dualPlaneBitLoc < 31);
+ int fullCEM = static_cast<int>(read_astc_bits(fBlock, dualPlaneBitLoc, lastWeight));
+
+ // Attach the config at the end of the weight grid to the CEM values
+ // in the beginning of the block.
+ fullCEM = (fullCEM << 6) | lowCEM;
+
+ // Ignore the two least significant bits, since those are our baseCEM above.
+ fullCEM = fullCEM >> 2;
+
+ int C[kMaxPartitions]; // Next, decode C and M from the spec (Table C.2.12)
+ for (int i = 0; i < this->numPartitions(); ++i) {
+ C[i] = fullCEM & 1;
+ fullCEM = fullCEM >> 1;
+ }
+
+ int M[kMaxPartitions];
+ for (int i = 0; i < this->numPartitions(); ++i) {
+ M[i] = fullCEM & 0x3;
+ fullCEM = fullCEM >> 2;
+ }
+
+ // Construct our CEMs..
+ SkASSERT(baseCEM > 0);
+ for (int i = 0; i < this->numPartitions(); ++i) {
+ int cem = (baseCEM - 1) * 4;
+ cem += (0 == C[i])? 0 : 4;
+ cem += M[i];
+
+ SkASSERT(cem < 16);
+ fCEM[i] = static_cast<ColorEndpointMode>(cem);
+ }
+
+ // Finally, if we have dual plane mode, then read the plane selector.
+ this->decodeDualPlane(dualPlaneBitLoc);
+ }
+
+ // Decodes the block mode. This function determines whether or not we use
+ // dual plane encoding, the size of the texel weight grid, and the number of
+ // bits, trits and quints that are used to encode it. For more information,
+ // see section C.2.10 of the ASTC spec.
+ //
+ // For 2D blocks, the Block Mode field is laid out as follows:
+ //
+ // -------------------------------------------------------------------------
+ // 10 9 8 7 6 5 4 3 2 1 0 Width Height Notes
+ // -------------------------------------------------------------------------
+ // D H B A R0 0 0 R2 R1 B+4 A+2
+ // D H B A R0 0 1 R2 R1 B+8 A+2
+ // D H B A R0 1 0 R2 R1 A+2 B+8
+ // D H 0 B A R0 1 1 R2 R1 A+2 B+6
+ // D H 1 B A R0 1 1 R2 R1 B+2 A+2
+ // D H 0 0 A R0 R2 R1 0 0 12 A+2
+ // D H 0 1 A R0 R2 R1 0 0 A+2 12
+ // D H 1 1 0 0 R0 R2 R1 0 0 6 10
+ // D H 1 1 0 1 R0 R2 R1 0 0 10 6
+ // B 1 0 A R0 R2 R1 0 0 A+6 B+6 D=0, H=0
+ // x x 1 1 1 1 1 1 1 0 0 - - Void-extent
+ // x x 1 1 1 x x x x 0 0 - - Reserved*
+ // x x x x x x x 0 0 0 0 - - Reserved
+ // -------------------------------------------------------------------------
+ //
+ // D - dual plane enabled
+ // H, R - used to determine the number of bits/trits/quints in texel weight encoding
+ // R is a three bit value whose LSB is R0 and MSB is R1
+ // Width, Height - dimensions of the texel weight grid (determined by A and B)
+
+ void decodeBlockMode() {
+ const int blockMode = static_cast<int>(read_astc_bits(fBlock, 0, 11));
+
+ // Check for special void extent encoding
+ fVoidExtent = (blockMode & 0x1FF) == 0x1FC;
+
+ // Check for reserved block modes
+ fError = ((blockMode & 0x1C3) == 0x1C0) || ((blockMode & 0xF) == 0);
+
+ // Neither reserved nor void-extent, decode as usual
+ // This code corresponds to table C.2.8 of the ASTC spec
+ bool highPrecision = false;
+ int R = 0;
+ if ((blockMode & 0x3) == 0) {
+ R = ((0xC & blockMode) >> 1) | ((0x10 & blockMode) >> 4);
+ const int bitsSevenAndEight = (blockMode & 0x180) >> 7;
+ SkASSERT(0 <= bitsSevenAndEight && bitsSevenAndEight < 4);
+
+ const int A = (blockMode >> 5) & 0x3;
+ const int B = (blockMode >> 9) & 0x3;
+
+ fDualPlaneEnabled = (blockMode >> 10) & 0x1;
+ highPrecision = (blockMode >> 9) & 0x1;
+
+ switch (bitsSevenAndEight) {
+ default:
+ case 0:
+ fWeightDimX = 12;
+ fWeightDimY = A + 2;
+ break;
+
+ case 1:
+ fWeightDimX = A + 2;
+ fWeightDimY = 12;
+ break;
+
+ case 2:
+ fWeightDimX = A + 6;
+ fWeightDimY = B + 6;
+ fDualPlaneEnabled = false;
+ highPrecision = false;
+ break;
+
+ case 3:
+ if (0 == A) {
+ fWeightDimX = 6;
+ fWeightDimY = 10;
+ } else {
+ fWeightDimX = 10;
+ fWeightDimY = 6;
+ }
+ break;
+ }
+ } else { // (blockMode & 0x3) != 0
+ R = ((blockMode & 0x3) << 1) | ((blockMode & 0x10) >> 4);
+
+ const int bitsTwoAndThree = (blockMode >> 2) & 0x3;
+ SkASSERT(0 <= bitsTwoAndThree && bitsTwoAndThree < 4);
+
+ const int A = (blockMode >> 5) & 0x3;
+ const int B = (blockMode >> 7) & 0x3;
+
+ fDualPlaneEnabled = (blockMode >> 10) & 0x1;
+ highPrecision = (blockMode >> 9) & 0x1;
+
+ switch (bitsTwoAndThree) {
+ case 0:
+ fWeightDimX = B + 4;
+ fWeightDimY = A + 2;
+ break;
+ case 1:
+ fWeightDimX = B + 8;
+ fWeightDimY = A + 2;
+ break;
+ case 2:
+ fWeightDimX = A + 2;
+ fWeightDimY = B + 8;
+ break;
+ case 3:
+ if ((B & 0x2) == 0) {
+ fWeightDimX = A + 2;
+ fWeightDimY = (B & 1) + 6;
+ } else {
+ fWeightDimX = (B & 1) + 2;
+ fWeightDimY = A + 2;
+ }
+ break;
+ }
+ }
+
+ // We should have set the values of R and highPrecision
+ // from decoding the block mode, these are used to determine
+ // the proper dimensions of our weight grid.
+ if ((R & 0x6) == 0) {
+ fError = true;
+ } else {
+ static const int kBitAllocationTable[2][6][3] = {
+ {
+ { 1, 0, 0 },
+ { 0, 1, 0 },
+ { 2, 0, 0 },
+ { 0, 0, 1 },
+ { 1, 1, 0 },
+ { 3, 0, 0 }
+ },
+ {
+ { 1, 0, 1 },
+ { 2, 1, 0 },
+ { 4, 0, 0 },
+ { 2, 0, 1 },
+ { 3, 1, 0 },
+ { 5, 0, 0 }
+ }
+ };
+
+ fWeightBits = kBitAllocationTable[highPrecision][R - 2][0];
+ fWeightTrits = kBitAllocationTable[highPrecision][R - 2][1];
+ fWeightQuints = kBitAllocationTable[highPrecision][R - 2][2];
+ }
+ }
+};
+
+// Reads an ASTC block from the given pointer.
+static inline void read_astc_block(ASTCDecompressionData *dst, const uint8_t* src) {
+ const uint64_t* qword = reinterpret_cast<const uint64_t*>(src);
+ dst->fBlock.fLow = SkEndian_SwapLE64(qword[0]);
+ dst->fBlock.fHigh = SkEndian_SwapLE64(qword[1]);
+ dst->decode();
+}
+
+// Take a known void-extent block, and write out the values as a constant color.
+static void decompress_void_extent(uint8_t* dst, int dstRowBytes,
+ const ASTCDecompressionData &data) {
+ // The top 64 bits contain 4 16-bit RGBA values.
+ int a = (static_cast<int>(read_astc_bits(data.fBlock, 112, 128)) + 255) >> 8;
+ int b = (static_cast<int>(read_astc_bits(data.fBlock, 96, 112)) + 255) >> 8;
+ int g = (static_cast<int>(read_astc_bits(data.fBlock, 80, 96)) + 255) >> 8;
+ int r = (static_cast<int>(read_astc_bits(data.fBlock, 64, 80)) + 255) >> 8;
+
+ write_constant_color(dst, data.fDimX, data.fDimY, dstRowBytes, SkColorSetARGB(a, r, g, b));
+}
+
+// Decompresses a single ASTC block. It's assumed that data.fDimX and data.fDimY are
+// set and that the block has already been decoded (i.e. data.decode() has been called)
+static void decompress_astc_block(uint8_t* dst, int dstRowBytes,
+ const ASTCDecompressionData &data) {
+ if (data.fError) {
+ write_error_color(dst, data.fDimX, data.fDimY, dstRowBytes);
+ return;
+ }
+
+ if (data.fVoidExtent) {
+ decompress_void_extent(dst, dstRowBytes, data);
+ return;
+ }
+
+ // According to the spec, any more than 64 values is illegal. (C.2.24)
+ static const int kMaxTexelValues = 64;
+
+ // Decode the texel weights.
+ int texelValues[kMaxTexelValues];
+ bool success = decode_integer_sequence(
+ texelValues, kMaxTexelValues, data.numWeights(),
+ // texel data goes to the end of the 128 bit block.
+ data.fBlock, 128, 128 - data.numWeightBits(), false,
+ data.fWeightBits, data.fWeightTrits, data.fWeightQuints);
+
+ if (!success) {
+ write_error_color(dst, data.fDimX, data.fDimY, dstRowBytes);
+ return;
+ }
+
+ // Decode the color endpoints
+ int colorBits, colorTrits, colorQuints;
+ if (!data.getColorValueEncoding(&colorBits, &colorTrits, &colorQuints)) {
+ write_error_color(dst, data.fDimX, data.fDimY, dstRowBytes);
+ return;
+ }
+
+ // According to the spec, any more than 18 color values is illegal. (C.2.24)
+ static const int kMaxColorValues = 18;
+
+ int colorValues[kMaxColorValues];
+ success = decode_integer_sequence(
+ colorValues, kMaxColorValues, data.numColorValues(),
+ data.fBlock, data.fColorStartBit, data.fColorEndBit, true,
+ colorBits, colorTrits, colorQuints);
+
+ if (!success) {
+ write_error_color(dst, data.fDimX, data.fDimY, dstRowBytes);
+ return;
+ }
+
+ // Unquantize the color values after they've been decoded.
+ unquantize_colors(colorValues, data.numColorValues(), colorBits, colorTrits, colorQuints);
+
+ // Decode the colors into the appropriate endpoints.
+ SkColor endpoints[4][2];
+ data.colorEndpoints(endpoints, colorValues);
+
+ // Do texel infill and decode the texel values.
+ int texelWeights[2][12][12];
+ data.texelWeights(texelWeights, texelValues);
+
+ // Write the texels by interpolating them based on the information
+ // stored in the block.
+ dst += data.fDimY * dstRowBytes;
+ for (int y = 0; y < data.fDimY; ++y) {
+ dst -= dstRowBytes;
+ SkColor* colorPtr = reinterpret_cast<SkColor*>(dst);
+ for (int x = 0; x < data.fDimX; ++x) {
+ colorPtr[x] = data.getTexel(endpoints, texelWeights, x, y);
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// ASTC Comrpession Struct
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// This is the type passed as the CompressorType argument of the compressed
+// blitter for the ASTC format. The static functions required to be in this
+// struct are documented in SkTextureCompressor_Blitter.h
+struct CompressorASTC {
+ static inline void CompressA8Vertical(uint8_t* dst, const uint8_t* src) {
+ compress_a8_astc_block<GetAlphaTranspose>(&dst, src, 12);
+ }
+
+ static inline void CompressA8Horizontal(uint8_t* dst, const uint8_t* src,
+ int srcRowBytes) {
+ compress_a8_astc_block<GetAlpha>(&dst, src, srcRowBytes);
+ }
+
+#if PEDANTIC_BLIT_RECT
+ static inline void UpdateBlock(uint8_t* dst, const uint8_t* src, int srcRowBytes,
+ const uint8_t* mask) {
+ // TODO: krajcevski
+ // This is kind of difficult for ASTC because the weight values are calculated
+ // as an average of the actual weights. The best we can do is decompress the
+ // weights and recalculate them based on the new texel values. This should
+ // be "not too bad" since we know that anytime we hit this function, we're
+ // compressing 12x12 block dimension alpha-only, and we know the layout
+ // of the block
+ SkFAIL("Implement me!");
+ }
+#endif
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+namespace SkTextureCompressor {
+
+bool CompressA8To12x12ASTC(uint8_t* dst, const uint8_t* src,
+ int width, int height, size_t rowBytes) {
+ if (width < 0 || ((width % 12) != 0) || height < 0 || ((height % 12) != 0)) {
+ return false;
+ }
+
+ uint8_t** dstPtr = &dst;
+ for (int y = 0; y < height; y += 12) {
+ for (int x = 0; x < width; x += 12) {
+ compress_a8_astc_block<GetAlpha>(dstPtr, src + y*rowBytes + x, rowBytes);
+ }
+ }
+
+ return true;
+}
+
+SkBlitter* CreateASTCBlitter(int width, int height, void* outputBuffer,
+ SkTBlitterAllocator* allocator) {
+ if ((width % 12) != 0 || (height % 12) != 0) {
+ return nullptr;
+ }
+
+ // Memset the output buffer to an encoding that decodes to zero. We must do this
+ // in order to avoid having uninitialized values in the buffer if the blitter
+ // decides not to write certain scanlines (and skip entire rows of blocks).
+ // In the case of ASTC, if everything index is zero, then the interpolated value
+ // will decode to zero provided we have the right header. We use the encoding
+ // from recognizing all zero blocks from above.
+ const int nBlocks = (width * height / 144);
+ uint8_t *dst = reinterpret_cast<uint8_t *>(outputBuffer);
+ for (int i = 0; i < nBlocks; ++i) {
+ send_packing(&dst, SkTEndian_SwapLE64(0x0000000001FE000173ULL), 0);
+ }
+
+ return allocator->createT<
+ SkTCompressedAlphaBlitter<12, 16, CompressorASTC>, int, int, void* >
+ (width, height, outputBuffer);
+}
+
+void DecompressASTC(uint8_t* dst, int dstRowBytes, const uint8_t* src,
+ int width, int height, int blockDimX, int blockDimY) {
+ // ASTC is encoded in what they call "raster order", so that the first
+ // block is the bottom-left block in the image, and the first pixel
+ // is the bottom-left pixel of the image
+ dst += height * dstRowBytes;
+
+ ASTCDecompressionData data(blockDimX, blockDimY);
+ for (int y = 0; y < height; y += blockDimY) {
+ dst -= blockDimY * dstRowBytes;
+ SkColor *colorPtr = reinterpret_cast<SkColor*>(dst);
+ for (int x = 0; x < width; x += blockDimX) {
+ read_astc_block(&data, src);
+ decompress_astc_block(reinterpret_cast<uint8_t*>(colorPtr + x), dstRowBytes, data);
+
+ // ASTC encoded blocks are 16 bytes (128 bits) large.
+ src += 16;
+ }
+ }
+}
+
+} // SkTextureCompressor
diff --git a/gfx/skia/skia/src/utils/SkTextureCompressor_ASTC.h b/gfx/skia/skia/src/utils/SkTextureCompressor_ASTC.h
new file mode 100644
index 000000000..1312ee9c7
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkTextureCompressor_ASTC.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTextureCompressor_ASTC_DEFINED
+#define SkTextureCompressor_ASTC_DEFINED
+
+#include "SkBitmapProcShader.h"
+
+class SkBlitter;
+
+namespace SkTextureCompressor {
+
+ bool CompressA8To12x12ASTC(uint8_t* dst, const uint8_t* src,
+ int width, int height, size_t rowBytes);
+
+ SkBlitter* CreateASTCBlitter(int width, int height, void* outputBuffer,
+ SkTBlitterAllocator *allocator);
+
+ void DecompressASTC(uint8_t* dst, int dstRowBytes, const uint8_t* src,
+ int width, int height, int blockDimX, int blockDimY);
+}
+
+#endif // SkTextureCompressor_ASTC_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkTextureCompressor_Blitter.h b/gfx/skia/skia/src/utils/SkTextureCompressor_Blitter.h
new file mode 100644
index 000000000..f488707a3
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkTextureCompressor_Blitter.h
@@ -0,0 +1,733 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTextureCompressor_Blitter_DEFINED
+#define SkTextureCompressor_Blitter_DEFINED
+
+#include "SkTypes.h"
+#include "SkBlitter.h"
+
+namespace SkTextureCompressor {
+
+// Ostensibly, SkBlitter::BlitRect is supposed to set a rect of pixels to full
+// alpha. This becomes problematic when using compressed texture blitters, since
+// the rect rarely falls along block boundaries. The proper way to handle this is
+// to update the compressed encoding of a block by resetting the proper parameters
+// (and even recompressing the block) where a rect falls inbetween block boundaries.
+// PEDANTIC_BLIT_RECT attempts to do this by requiring the struct passed to
+// SkTCompressedAlphaBlitter to implement an UpdateBlock function call.
+//
+// However, the way that BlitRect gets used almost exclusively is to bracket inverse
+// fills for paths. In other words, the top few rows and bottom few rows of a path
+// that's getting inverse filled are called using blitRect. The rest are called using
+// the standard blitAntiH. As a result, we can just call blitAntiH with a faux RLE
+// of full alpha values, and then check in our flush() call that we don't run off the
+// edge of the buffer. This is why we do not need this flag to be turned on.
+//
+// NOTE: This code is unfinished, but is inteded as a starting point if an when
+// bugs are introduced from the existing code.
+#define PEDANTIC_BLIT_RECT 0
+
+// This class implements a blitter that blits directly into a buffer that will
+// be used as an compressed alpha texture. We compute this buffer by
+// buffering scan lines and then outputting them all at once. The number of
+// scan lines buffered is controlled by kBlockSize
+//
+// The CompressorType is a struct with a bunch of static methods that provides
+// the specialized compression functionality of the blitter. A complete CompressorType
+// will implement the following static functions;
+//
+// struct CompressorType {
+// // The function used to compress an A8 block. The layout of the
+// // block is also expected to be in column-major order.
+// static void CompressA8Vertical(uint8_t* dst, const uint8_t block[]);
+//
+// // The function used to compress an A8 block. The layout of the
+// // block is also expected to be in row-major order.
+// static void CompressA8Horizontal(uint8_t* dst, const uint8_t* src, int srcRowBytes);
+//
+#if PEDANTIC_BLIT_RECT
+// // The function used to update an already compressed block. This will
+// // most likely be implementation dependent. The mask variable will have
+// // 0xFF in positions where the block should be updated and 0 in positions
+// // where it shouldn't. src contains an uncompressed buffer of pixels.
+// static void UpdateBlock(uint8_t* dst, const uint8_t* src, int srcRowBytes,
+// const uint8_t* mask);
+#endif
+// };
+template<int BlockDim, int EncodedBlockSize, typename CompressorType>
+class SkTCompressedAlphaBlitter : public SkBlitter {
+public:
+ SkTCompressedAlphaBlitter(int width, int height, void *compressedBuffer)
+ // 0x7FFE is one minus the largest positive 16-bit int. We use it for
+ // debugging to make sure that we're properly setting the nextX distance
+ // in flushRuns().
+#ifdef SK_DEBUG
+ : fCalledOnceWithNonzeroY(false)
+ , fBlitMaskCalled(false),
+#else
+ :
+#endif
+ kLongestRun(0x7FFE), kZeroAlpha(0)
+ , fNextRun(0)
+ , fWidth(width)
+ , fHeight(height)
+ , fBuffer(compressedBuffer)
+ {
+ SkASSERT((width % BlockDim) == 0);
+ SkASSERT((height % BlockDim) == 0);
+ }
+
+ virtual ~SkTCompressedAlphaBlitter() { this->flushRuns(); }
+
+ // Blit a horizontal run of one or more pixels.
+ void blitH(int x, int y, int width) override {
+ // This function is intended to be called from any standard RGB
+ // buffer, so we should never encounter it. However, if some code
+ // path does end up here, then this needs to be investigated.
+ SkFAIL("Not implemented!");
+ }
+
+ // Blit a horizontal run of antialiased pixels; runs[] is a *sparse*
+ // zero-terminated run-length encoding of spans of constant alpha values.
+ void blitAntiH(int x, int y,
+ const SkAlpha antialias[],
+ const int16_t runs[]) override {
+ SkASSERT(0 == x);
+
+ // Make sure that the new row to blit is either the first
+ // row that we're blitting, or it's exactly the next scan row
+ // since the last row that we blit. This is to ensure that when
+ // we go to flush the runs, that they are all the same four
+ // runs.
+ if (fNextRun > 0 &&
+ ((x != fBufferedRuns[fNextRun-1].fX) ||
+ (y-1 != fBufferedRuns[fNextRun-1].fY))) {
+ this->flushRuns();
+ }
+
+ // Align the rows to a block boundary. If we receive rows that
+ // are not on a block boundary, then fill in the preceding runs
+ // with zeros. We do this by producing a single RLE that says
+ // that we have 0x7FFE pixels of zero (0x7FFE = 32766).
+ const int row = BlockDim * (y / BlockDim);
+ while ((row + fNextRun) < y) {
+ fBufferedRuns[fNextRun].fAlphas = &kZeroAlpha;
+ fBufferedRuns[fNextRun].fRuns = &kLongestRun;
+ fBufferedRuns[fNextRun].fX = 0;
+ fBufferedRuns[fNextRun].fY = row + fNextRun;
+ ++fNextRun;
+ }
+
+ // Make sure that our assumptions aren't violated...
+ SkASSERT(fNextRun == (y % BlockDim));
+ SkASSERT(fNextRun == 0 || fBufferedRuns[fNextRun - 1].fY < y);
+
+ // Set the values of the next run
+ fBufferedRuns[fNextRun].fAlphas = antialias;
+ fBufferedRuns[fNextRun].fRuns = runs;
+ fBufferedRuns[fNextRun].fX = x;
+ fBufferedRuns[fNextRun].fY = y;
+
+ // If we've output a block of scanlines in a row that don't violate our
+ // assumptions, then it's time to flush them...
+ if (BlockDim == ++fNextRun) {
+ this->flushRuns();
+ }
+ }
+
+ // Blit a vertical run of pixels with a constant alpha value.
+ void blitV(int x, int y, int height, SkAlpha alpha) override {
+ // This function is currently not implemented. It is not explicitly
+ // required by the contract, but if at some time a code path runs into
+ // this function (which is entirely possible), it needs to be implemented.
+ //
+ // TODO (krajcevski):
+ // This function will be most easily implemented in one of two ways:
+ // 1. Buffer each vertical column value and then construct a list
+ // of alpha values and output all of the blocks at once. This only
+ // requires a write to the compressed buffer
+ // 2. Replace the indices of each block with the proper indices based
+ // on the alpha value. This requires a read and write of the compressed
+ // buffer, but much less overhead.
+ SkFAIL("Not implemented!");
+ }
+
+ // Blit a solid rectangle one or more pixels wide. It's assumed that blitRect
+ // is called as a way to bracket blitAntiH where above and below the path the
+ // called path just needs a solid rectangle to fill in the mask.
+#ifdef SK_DEBUG
+ bool fCalledOnceWithNonzeroY;
+#endif
+ void blitRect(int x, int y, int width, int height) override {
+
+ // Assumptions:
+ SkASSERT(0 == x);
+ SkASSERT(width <= fWidth);
+
+ // Make sure that we're only ever bracketing calls to blitAntiH.
+ SkASSERT((0 == y) || (!fCalledOnceWithNonzeroY && (fCalledOnceWithNonzeroY = true)));
+
+#if !(PEDANTIC_BLIT_RECT)
+ for (int i = 0; i < height; ++i) {
+ const SkAlpha kFullAlpha = 0xFF;
+ this->blitAntiH(x, y+i, &kFullAlpha, &kLongestRun);
+ }
+#else
+ const int startBlockX = (x / BlockDim) * BlockDim;
+ const int startBlockY = (y / BlockDim) * BlockDim;
+
+ const int endBlockX = ((x + width) / BlockDim) * BlockDim;
+ const int endBlockY = ((y + height) / BlockDim) * BlockDim;
+
+ // If start and end are the same, then we only need to update a single block...
+ if (startBlockY == endBlockY && startBlockX == endBlockX) {
+ uint8_t mask[BlockDim*BlockDim];
+ memset(mask, 0, sizeof(mask));
+
+ const int xoff = x - startBlockX;
+ SkASSERT((xoff + width) <= BlockDim);
+
+ const int yoff = y - startBlockY;
+ SkASSERT((yoff + height) <= BlockDim);
+
+ for (int j = 0; j < height; ++j) {
+ memset(mask + (j + yoff)*BlockDim + xoff, 0xFF, width);
+ }
+
+ uint8_t* dst = this->getBlock(startBlockX, startBlockY);
+ CompressorType::UpdateBlock(dst, mask, BlockDim, mask);
+
+ // If start and end are the same in the y dimension, then we can freely update an
+ // entire row of blocks...
+ } else if (startBlockY == endBlockY) {
+
+ this->updateBlockRow(x, y, width, height, startBlockY, startBlockX, endBlockX);
+
+ // Similarly, if the start and end are in the same column, then we can just update
+ // an entire column of blocks...
+ } else if (startBlockX == endBlockX) {
+
+ this->updateBlockCol(x, y, width, height, startBlockX, startBlockY, endBlockY);
+
+ // Otherwise, the rect spans a non-trivial region of blocks, and we have to construct
+ // a kind of 9-patch to update each of the pieces of the rect. The top and bottom
+ // rows are updated using updateBlockRow, and the left and right columns are updated
+ // using updateBlockColumn. Anything in the middle is simply memset to an opaque block
+ // encoding.
+ } else {
+
+ const int innerStartBlockX = startBlockX + BlockDim;
+ const int innerStartBlockY = startBlockY + BlockDim;
+
+ // Blit top row
+ const int topRowHeight = innerStartBlockY - y;
+ this->updateBlockRow(x, y, width, topRowHeight, startBlockY,
+ startBlockX, endBlockX);
+
+ // Advance y
+ y += topRowHeight;
+ height -= topRowHeight;
+
+ // Blit middle
+ if (endBlockY > innerStartBlockY) {
+
+ // Update left row
+ this->updateBlockCol(x, y, innerStartBlockX - x, endBlockY, startBlockY,
+ startBlockX, innerStartBlockX);
+
+ // Update the middle with an opaque encoding...
+ uint8_t mask[BlockDim*BlockDim];
+ memset(mask, 0xFF, sizeof(mask));
+
+ uint8_t opaqueEncoding[EncodedBlockSize];
+ CompressorType::CompressA8Horizontal(opaqueEncoding, mask, BlockDim);
+
+ for (int j = innerStartBlockY; j < endBlockY; j += BlockDim) {
+ uint8_t* opaqueDst = this->getBlock(innerStartBlockX, j);
+ for (int i = innerStartBlockX; i < endBlockX; i += BlockDim) {
+ memcpy(opaqueDst, opaqueEncoding, EncodedBlockSize);
+ opaqueDst += EncodedBlockSize;
+ }
+ }
+
+ // If we need to update the right column, do that too
+ if (x + width > endBlockX) {
+ this->updateBlockCol(endBlockX, y, x + width - endBlockX, endBlockY,
+ endBlockX, innerStartBlockY, endBlockY);
+ }
+
+ // Advance y
+ height = y + height - endBlockY;
+ y = endBlockY;
+ }
+
+ // If we need to update the last row, then do that, too.
+ if (height > 0) {
+ this->updateBlockRow(x, y, width, height, endBlockY,
+ startBlockX, endBlockX);
+ }
+ }
+#endif
+ }
+
+ // Blit a rectangle with one alpha-blended column on the left,
+ // width (zero or more) opaque pixels, and one alpha-blended column
+ // on the right. The result will always be at least two pixels wide.
+ void blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) override {
+ // This function is currently not implemented. It is not explicitly
+ // required by the contract, but if at some time a code path runs into
+ // this function (which is entirely possible), it needs to be implemented.
+ //
+ // TODO (krajcevski):
+ // This function will be most easily implemented as follows:
+ // 1. If width/height are smaller than a block, then update the
+ // indices of the affected blocks.
+ // 2. If width/height are larger than a block, then construct a 9-patch
+ // of block encodings that represent the rectangle, and write them
+ // to the compressed buffer as necessary. Whether or not the blocks
+ // are overwritten by zeros or just their indices are updated is up
+ // to debate.
+ SkFAIL("Not implemented!");
+ }
+
+ // Blit a pattern of pixels defined by a rectangle-clipped mask; We make an
+ // assumption here that if this function gets called, then it will replace all
+ // of the compressed texture blocks that it touches. Hence, two separate calls
+ // to blitMask that have clips next to one another will cause artifacts. Most
+ // of the time, however, this function gets called because constructing the mask
+ // was faster than constructing the RLE for blitAntiH, and this function will
+ // only be called once.
+#ifdef SK_DEBUG
+ bool fBlitMaskCalled;
+#endif
+ void blitMask(const SkMask& mask, const SkIRect& clip) override {
+
+ // Assumptions:
+ SkASSERT(!fBlitMaskCalled);
+ SkDEBUGCODE(fBlitMaskCalled = true);
+ SkASSERT(SkMask::kA8_Format == mask.fFormat);
+ SkASSERT(mask.fBounds.contains(clip));
+
+ // Start from largest block boundary less than the clip boundaries.
+ const int startI = BlockDim * (clip.left() / BlockDim);
+ const int startJ = BlockDim * (clip.top() / BlockDim);
+
+ for (int j = startJ; j < clip.bottom(); j += BlockDim) {
+
+ // Get the destination for this block row
+ uint8_t* dst = this->getBlock(startI, j);
+ for (int i = startI; i < clip.right(); i += BlockDim) {
+
+ // At this point, the block should intersect the clip.
+ SkASSERT(SkIRect::IntersectsNoEmptyCheck(
+ SkIRect::MakeXYWH(i, j, BlockDim, BlockDim), clip));
+
+ // Do we need to pad it?
+ if (i < clip.left() || j < clip.top() ||
+ i + BlockDim > clip.right() || j + BlockDim > clip.bottom()) {
+
+ uint8_t block[BlockDim*BlockDim];
+ memset(block, 0, sizeof(block));
+
+ const int startX = SkMax32(i, clip.left());
+ const int startY = SkMax32(j, clip.top());
+
+ const int endX = SkMin32(i + BlockDim, clip.right());
+ const int endY = SkMin32(j + BlockDim, clip.bottom());
+
+ for (int y = startY; y < endY; ++y) {
+ const int col = startX - i;
+ const int row = y - j;
+ const int valsWide = endX - startX;
+ SkASSERT(valsWide <= BlockDim);
+ SkASSERT(0 <= col && col < BlockDim);
+ SkASSERT(0 <= row && row < BlockDim);
+ memcpy(block + row*BlockDim + col,
+ mask.getAddr8(startX, j + row), valsWide);
+ }
+
+ CompressorType::CompressA8Horizontal(dst, block, BlockDim);
+ } else {
+ // Otherwise, just compress it.
+ uint8_t*const src = mask.getAddr8(i, j);
+ const uint32_t rb = mask.fRowBytes;
+ CompressorType::CompressA8Horizontal(dst, src, rb);
+ }
+
+ dst += EncodedBlockSize;
+ }
+ }
+ }
+
+ // If the blitter just sets a single value for each pixel, return the
+ // bitmap it draws into, and assign value. If not, return nullptr and ignore
+ // the value parameter.
+ const SkPixmap* justAnOpaqueColor(uint32_t* value) override {
+ return nullptr;
+ }
+
+ /**
+ * Compressed texture blitters only really work correctly if they get
+ * BlockDim rows at a time. That being said, this blitter tries it's best
+ * to preserve semantics if blitAntiH doesn't get called in too many
+ * weird ways...
+ */
+ int requestRowsPreserved() const override { return BlockDim; }
+
+private:
+ static const int kPixelsPerBlock = BlockDim * BlockDim;
+
+ // The longest possible run of pixels that this blitter will receive.
+ // This is initialized in the constructor to 0x7FFE, which is one less
+ // than the largest positive 16-bit integer. We make sure that it's one
+ // less for debugging purposes. We also don't make this variable static
+ // in order to make sure that we can construct a valid pointer to it.
+ const int16_t kLongestRun;
+
+ // Usually used in conjunction with kLongestRun. This is initialized to
+ // zero.
+ const SkAlpha kZeroAlpha;
+
+ // This is the information that we buffer whenever we're asked to blit
+ // a row with this blitter.
+ struct BufferedRun {
+ const SkAlpha* fAlphas;
+ const int16_t* fRuns;
+ int fX, fY;
+ } fBufferedRuns[BlockDim];
+
+ // The next row [0, BlockDim) that we need to blit.
+ int fNextRun;
+
+ // The width and height of the image that we're blitting
+ const int fWidth;
+ const int fHeight;
+
+ // The compressed buffer that we're blitting into. It is assumed that the buffer
+ // is large enough to store a compressed image of size fWidth*fHeight.
+ void* const fBuffer;
+
+ // Various utility functions
+ int blocksWide() const { return fWidth / BlockDim; }
+ int blocksTall() const { return fHeight / BlockDim; }
+ int totalBlocks() const { return (fWidth * fHeight) / kPixelsPerBlock; }
+
+ // Returns the block index for the block containing pixel (x, y). Block
+ // indices start at zero and proceed in raster order.
+ int getBlockOffset(int x, int y) const {
+ SkASSERT(x < fWidth);
+ SkASSERT(y < fHeight);
+ const int blockCol = x / BlockDim;
+ const int blockRow = y / BlockDim;
+ return blockRow * this->blocksWide() + blockCol;
+ }
+
+ // Returns a pointer to the block containing pixel (x, y)
+ uint8_t *getBlock(int x, int y) const {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(fBuffer);
+ return ptr + EncodedBlockSize*this->getBlockOffset(x, y);
+ }
+
+ // Updates the block whose columns are stored in block. curAlphai is expected
+ // to store the alpha values that will be placed within each of the columns in
+ // the range [col, col+colsLeft).
+ typedef uint32_t Column[BlockDim/4];
+ typedef uint32_t Block[BlockDim][BlockDim/4];
+ inline void updateBlockColumns(Block block, const int col,
+ const int colsLeft, const Column curAlphai) {
+ SkASSERT(block);
+ SkASSERT(col + colsLeft <= BlockDim);
+
+ for (int i = col; i < (col + colsLeft); ++i) {
+ memcpy(block[i], curAlphai, sizeof(Column));
+ }
+ }
+
+ // The following function writes the buffered runs to compressed blocks.
+ // If fNextRun < BlockDim, then we fill the runs that we haven't buffered with
+ // the constant zero buffer.
+ void flushRuns() {
+ // If we don't have any runs, then just return.
+ if (0 == fNextRun) {
+ return;
+ }
+
+#ifndef NDEBUG
+ // Make sure that if we have any runs, they all match
+ for (int i = 1; i < fNextRun; ++i) {
+ SkASSERT(fBufferedRuns[i].fY == fBufferedRuns[i-1].fY + 1);
+ SkASSERT(fBufferedRuns[i].fX == fBufferedRuns[i-1].fX);
+ }
+#endif
+
+ // If we don't have as many runs as we have rows, fill in the remaining
+ // runs with constant zeros.
+ for (int i = fNextRun; i < BlockDim; ++i) {
+ fBufferedRuns[i].fY = fBufferedRuns[0].fY + i;
+ fBufferedRuns[i].fX = fBufferedRuns[0].fX;
+ fBufferedRuns[i].fAlphas = &kZeroAlpha;
+ fBufferedRuns[i].fRuns = &kLongestRun;
+ }
+
+ // Make sure that our assumptions aren't violated.
+ SkASSERT(fNextRun > 0 && fNextRun <= BlockDim);
+ SkASSERT((fBufferedRuns[0].fY % BlockDim) == 0);
+
+ // The following logic walks BlockDim rows at a time and outputs compressed
+ // blocks to the buffer passed into the constructor.
+ // We do the following:
+ //
+ // c1 c2 c3 c4
+ // -----------------------------------------------------------------------
+ // ... | | | | | ----> fBufferedRuns[0]
+ // -----------------------------------------------------------------------
+ // ... | | | | | ----> fBufferedRuns[1]
+ // -----------------------------------------------------------------------
+ // ... | | | | | ----> fBufferedRuns[2]
+ // -----------------------------------------------------------------------
+ // ... | | | | | ----> fBufferedRuns[3]
+ // -----------------------------------------------------------------------
+ //
+ // curX -- the macro X value that we've gotten to.
+ // c[BlockDim] -- the buffers that represent the columns of the current block
+ // that we're operating on
+ // curAlphaColumn -- buffer containing the column of alpha values from fBufferedRuns.
+ // nextX -- for each run, the next point at which we need to update curAlphaColumn
+ // after the value of curX.
+ // finalX -- the minimum of all the nextX values.
+ //
+ // curX advances to finalX outputting any blocks that it passes along
+ // the way. Since finalX will not change when we reach the end of a
+ // run, the termination criteria will be whenever curX == finalX at the
+ // end of a loop.
+
+ // Setup:
+ Block block;
+ sk_bzero(block, sizeof(block));
+
+ Column curAlphaColumn;
+ sk_bzero(curAlphaColumn, sizeof(curAlphaColumn));
+
+ SkAlpha *curAlpha = reinterpret_cast<SkAlpha*>(&curAlphaColumn);
+
+ int nextX[BlockDim];
+ for (int i = 0; i < BlockDim; ++i) {
+ nextX[i] = 0x7FFFFF;
+ }
+
+ uint8_t* outPtr = this->getBlock(fBufferedRuns[0].fX, fBufferedRuns[0].fY);
+
+ // Populate the first set of runs and figure out how far we need to
+ // advance on the first step
+ int curX = 0;
+ int finalX = 0xFFFFF;
+ for (int i = 0; i < BlockDim; ++i) {
+ nextX[i] = *(fBufferedRuns[i].fRuns);
+ curAlpha[i] = *(fBufferedRuns[i].fAlphas);
+
+ finalX = SkMin32(nextX[i], finalX);
+ }
+
+ // Make sure that we have a valid right-bound X value
+ SkASSERT(finalX < 0xFFFFF);
+
+ // If the finalX is the longest run, then just blit until we have
+ // width...
+ if (kLongestRun == finalX) {
+ finalX = fWidth;
+ }
+
+ // Run the blitter...
+ while (curX != finalX) {
+ SkASSERT(finalX >= curX);
+
+ // Do we need to populate the rest of the block?
+ if ((finalX - (BlockDim*(curX / BlockDim))) >= BlockDim) {
+ const int col = curX % BlockDim;
+ const int colsLeft = BlockDim - col;
+ SkASSERT(curX + colsLeft <= finalX);
+
+ this->updateBlockColumns(block, col, colsLeft, curAlphaColumn);
+
+ // Write this block
+ CompressorType::CompressA8Vertical(outPtr, reinterpret_cast<uint8_t*>(block));
+ outPtr += EncodedBlockSize;
+ curX += colsLeft;
+ }
+
+ // If we can advance even further, then just keep memsetting the block
+ if ((finalX - curX) >= BlockDim) {
+ SkASSERT((curX % BlockDim) == 0);
+
+ const int col = 0;
+ const int colsLeft = BlockDim;
+
+ this->updateBlockColumns(block, col, colsLeft, curAlphaColumn);
+
+ // While we can keep advancing, just keep writing the block.
+ uint8_t lastBlock[EncodedBlockSize];
+ CompressorType::CompressA8Vertical(lastBlock, reinterpret_cast<uint8_t*>(block));
+ while((finalX - curX) >= BlockDim) {
+ memcpy(outPtr, lastBlock, EncodedBlockSize);
+ outPtr += EncodedBlockSize;
+ curX += BlockDim;
+ }
+ }
+
+ // If we haven't advanced within the block then do so.
+ if (curX < finalX) {
+ const int col = curX % BlockDim;
+ const int colsLeft = finalX - curX;
+
+ this->updateBlockColumns(block, col, colsLeft, curAlphaColumn);
+ curX += colsLeft;
+ }
+
+ SkASSERT(curX == finalX);
+
+ // Figure out what the next advancement is...
+ if (finalX < fWidth) {
+ for (int i = 0; i < BlockDim; ++i) {
+ if (nextX[i] == finalX) {
+ const int16_t run = *(fBufferedRuns[i].fRuns);
+ fBufferedRuns[i].fRuns += run;
+ fBufferedRuns[i].fAlphas += run;
+ curAlpha[i] = *(fBufferedRuns[i].fAlphas);
+ nextX[i] += *(fBufferedRuns[i].fRuns);
+ }
+ }
+
+ finalX = 0xFFFFF;
+ for (int i = 0; i < BlockDim; ++i) {
+ finalX = SkMin32(nextX[i], finalX);
+ }
+ } else {
+ curX = finalX;
+ }
+ }
+
+ // If we didn't land on a block boundary, output the block...
+ if ((curX % BlockDim) > 0) {
+#ifdef SK_DEBUG
+ for (int i = 0; i < BlockDim; ++i) {
+ SkASSERT(nextX[i] == kLongestRun || nextX[i] == curX);
+ }
+#endif
+ const int col = curX % BlockDim;
+ const int colsLeft = BlockDim - col;
+
+ memset(curAlphaColumn, 0, sizeof(curAlphaColumn));
+ this->updateBlockColumns(block, col, colsLeft, curAlphaColumn);
+
+ CompressorType::CompressA8Vertical(outPtr, reinterpret_cast<uint8_t*>(block));
+ }
+
+ fNextRun = 0;
+ }
+
+#if PEDANTIC_BLIT_RECT
+ void updateBlockRow(int x, int y, int width, int height,
+ int blockRow, int startBlockX, int endBlockX) {
+ if (0 == width || 0 == height || startBlockX == endBlockX) {
+ return;
+ }
+
+ uint8_t* dst = this->getBlock(startBlockX, BlockDim * (y / BlockDim));
+
+ // One horizontal strip to update
+ uint8_t mask[BlockDim*BlockDim];
+ memset(mask, 0, sizeof(mask));
+
+ // Update the left cap
+ int blockX = startBlockX;
+ const int yoff = y - blockRow;
+ for (int j = 0; j < height; ++j) {
+ const int xoff = x - blockX;
+ memset(mask + (j + yoff)*BlockDim + xoff, 0xFF, BlockDim - xoff);
+ }
+ CompressorType::UpdateBlock(dst, mask, BlockDim, mask);
+ dst += EncodedBlockSize;
+ blockX += BlockDim;
+
+ // Update the middle
+ if (blockX < endBlockX) {
+ for (int j = 0; j < height; ++j) {
+ memset(mask + (j + yoff)*BlockDim, 0xFF, BlockDim);
+ }
+ while (blockX < endBlockX) {
+ CompressorType::UpdateBlock(dst, mask, BlockDim, mask);
+ dst += EncodedBlockSize;
+ blockX += BlockDim;
+ }
+ }
+
+ SkASSERT(endBlockX == blockX);
+
+ // Update the right cap (if we need to)
+ if (x + width > endBlockX) {
+ memset(mask, 0, sizeof(mask));
+ for (int j = 0; j < height; ++j) {
+ const int xoff = (x+width-blockX);
+ memset(mask + (j+yoff)*BlockDim, 0xFF, xoff);
+ }
+ CompressorType::UpdateBlock(dst, mask, BlockDim, mask);
+ }
+ }
+
+ void updateBlockCol(int x, int y, int width, int height,
+ int blockCol, int startBlockY, int endBlockY) {
+ if (0 == width || 0 == height || startBlockY == endBlockY) {
+ return;
+ }
+
+ // One vertical strip to update
+ uint8_t mask[BlockDim*BlockDim];
+ memset(mask, 0, sizeof(mask));
+ const int maskX0 = x - blockCol;
+ const int maskWidth = maskX0 + width;
+ SkASSERT(maskWidth <= BlockDim);
+
+ // Update the top cap
+ int blockY = startBlockY;
+ for (int j = (y - blockY); j < BlockDim; ++j) {
+ memset(mask + maskX0 + j*BlockDim, 0xFF, maskWidth);
+ }
+ CompressorType::UpdateBlock(this->getBlock(blockCol, blockY), mask, BlockDim, mask);
+ blockY += BlockDim;
+
+ // Update middle
+ if (blockY < endBlockY) {
+ for (int j = 0; j < BlockDim; ++j) {
+ memset(mask + maskX0 + j*BlockDim, 0xFF, maskWidth);
+ }
+ while (blockY < endBlockY) {
+ CompressorType::UpdateBlock(this->getBlock(blockCol, blockY),
+ mask, BlockDim, mask);
+ blockY += BlockDim;
+ }
+ }
+
+ SkASSERT(endBlockY == blockY);
+
+ // Update bottom
+ if (y + height > endBlockY) {
+ for (int j = y+height; j < endBlockY + BlockDim; ++j) {
+ memset(mask + (j-endBlockY)*BlockDim, 0, BlockDim);
+ }
+ CompressorType::UpdateBlock(this->getBlock(blockCol, blockY),
+ mask, BlockDim, mask);
+ }
+ }
+#endif // PEDANTIC_BLIT_RECT
+
+};
+
+} // namespace SkTextureCompressor
+
+#endif // SkTextureCompressor_Blitter_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkTextureCompressor_LATC.cpp b/gfx/skia/skia/src/utils/SkTextureCompressor_LATC.cpp
new file mode 100644
index 000000000..50aaf0b27
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkTextureCompressor_LATC.cpp
@@ -0,0 +1,519 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTextureCompressor_LATC.h"
+#include "SkTextureCompressor_Blitter.h"
+#include "SkTextureCompressor_Utils.h"
+
+#include "SkBlitter.h"
+#include "SkEndian.h"
+
+// Compression options. In general, the slow version is much more accurate, but
+// much slower. The fast option is much faster, but much less accurate. YMMV.
+#define COMPRESS_LATC_SLOW 0
+#define COMPRESS_LATC_FAST 1
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Generates an LATC palette. LATC constructs
+// a palette of eight colors from LUM0 and LUM1 using the algorithm:
+//
+// LUM0, if lum0 > lum1 and code(x,y) == 0
+// LUM1, if lum0 > lum1 and code(x,y) == 1
+// (6*LUM0+ LUM1)/7, if lum0 > lum1 and code(x,y) == 2
+// (5*LUM0+2*LUM1)/7, if lum0 > lum1 and code(x,y) == 3
+// (4*LUM0+3*LUM1)/7, if lum0 > lum1 and code(x,y) == 4
+// (3*LUM0+4*LUM1)/7, if lum0 > lum1 and code(x,y) == 5
+// (2*LUM0+5*LUM1)/7, if lum0 > lum1 and code(x,y) == 6
+// ( LUM0+6*LUM1)/7, if lum0 > lum1 and code(x,y) == 7
+//
+// LUM0, if lum0 <= lum1 and code(x,y) == 0
+// LUM1, if lum0 <= lum1 and code(x,y) == 1
+// (4*LUM0+ LUM1)/5, if lum0 <= lum1 and code(x,y) == 2
+// (3*LUM0+2*LUM1)/5, if lum0 <= lum1 and code(x,y) == 3
+// (2*LUM0+3*LUM1)/5, if lum0 <= lum1 and code(x,y) == 4
+// ( LUM0+4*LUM1)/5, if lum0 <= lum1 and code(x,y) == 5
+// 0, if lum0 <= lum1 and code(x,y) == 6
+// 255, if lum0 <= lum1 and code(x,y) == 7
+
+static const int kLATCPaletteSize = 8;
+static void generate_latc_palette(uint8_t palette[], uint8_t lum0, uint8_t lum1) {
+ palette[0] = lum0;
+ palette[1] = lum1;
+ if (lum0 > lum1) {
+ for (int i = 1; i < 7; i++) {
+ palette[i+1] = ((7-i)*lum0 + i*lum1) / 7;
+ }
+ } else {
+ for (int i = 1; i < 5; i++) {
+ palette[i+1] = ((5-i)*lum0 + i*lum1) / 5;
+ }
+ palette[6] = 0;
+ palette[7] = 255;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+#if COMPRESS_LATC_SLOW
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// Utility Functions
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// Absolute difference between two values. More correct than SkTAbs(a - b)
+// because it works on unsigned values.
+template <typename T> inline T abs_diff(const T &a, const T &b) {
+ return (a > b) ? (a - b) : (b - a);
+}
+
+static bool is_extremal(uint8_t pixel) {
+ return 0 == pixel || 255 == pixel;
+}
+
+typedef uint64_t (*A84x4To64BitProc)(const uint8_t block[]);
+
+// This function is used by both R11 EAC and LATC to compress 4x4 blocks
+// of 8-bit alpha into 64-bit values that comprise the compressed data.
+// For both formats, we need to make sure that the dimensions of the
+// src pixels are divisible by 4, and copy 4x4 blocks one at a time
+// for compression.
+static bool compress_4x4_a8_to_64bit(uint8_t* dst, const uint8_t* src,
+ int width, int height, size_t rowBytes,
+ A84x4To64BitProc proc) {
+ // Make sure that our data is well-formed enough to be considered for compression
+ if (0 == width || 0 == height || (width % 4) != 0 || (height % 4) != 0) {
+ return false;
+ }
+
+ int blocksX = width >> 2;
+ int blocksY = height >> 2;
+
+ uint8_t block[16];
+ uint64_t* encPtr = reinterpret_cast<uint64_t*>(dst);
+ for (int y = 0; y < blocksY; ++y) {
+ for (int x = 0; x < blocksX; ++x) {
+ // Load block
+ for (int k = 0; k < 4; ++k) {
+ memcpy(block + k*4, src + k*rowBytes + 4*x, 4);
+ }
+
+ // Compress it
+ *encPtr = proc(block);
+ ++encPtr;
+ }
+ src += 4 * rowBytes;
+ }
+
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// LATC compressor
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// LATC compressed texels down into square 4x4 blocks
+static const int kLATCBlockSize = 4;
+static const int kLATCPixelsPerBlock = kLATCBlockSize * kLATCBlockSize;
+
+// Compress a block by using the bounding box of the pixels. It is assumed that
+// there are no extremal pixels in this block otherwise we would have used
+// compressBlockBBIgnoreExtremal.
+static uint64_t compress_latc_block_bb(const uint8_t pixels[]) {
+ uint8_t minVal = 255;
+ uint8_t maxVal = 0;
+ for (int i = 0; i < kLATCPixelsPerBlock; ++i) {
+ minVal = SkTMin(pixels[i], minVal);
+ maxVal = SkTMax(pixels[i], maxVal);
+ }
+
+ SkASSERT(!is_extremal(minVal));
+ SkASSERT(!is_extremal(maxVal));
+
+ uint8_t palette[kLATCPaletteSize];
+ generate_latc_palette(palette, maxVal, minVal);
+
+ uint64_t indices = 0;
+ for (int i = kLATCPixelsPerBlock - 1; i >= 0; --i) {
+
+ // Find the best palette index
+ uint8_t bestError = abs_diff(pixels[i], palette[0]);
+ uint8_t idx = 0;
+ for (int j = 1; j < kLATCPaletteSize; ++j) {
+ uint8_t error = abs_diff(pixels[i], palette[j]);
+ if (error < bestError) {
+ bestError = error;
+ idx = j;
+ }
+ }
+
+ indices <<= 3;
+ indices |= idx;
+ }
+
+ return
+ SkEndian_SwapLE64(
+ static_cast<uint64_t>(maxVal) |
+ (static_cast<uint64_t>(minVal) << 8) |
+ (indices << 16));
+}
+
+// Compress a block by using the bounding box of the pixels without taking into
+// account the extremal values. The generated palette will contain extremal values
+// and fewer points along the line segment to interpolate.
+static uint64_t compress_latc_block_bb_ignore_extremal(const uint8_t pixels[]) {
+ uint8_t minVal = 255;
+ uint8_t maxVal = 0;
+ for (int i = 0; i < kLATCPixelsPerBlock; ++i) {
+ if (is_extremal(pixels[i])) {
+ continue;
+ }
+
+ minVal = SkTMin(pixels[i], minVal);
+ maxVal = SkTMax(pixels[i], maxVal);
+ }
+
+ SkASSERT(!is_extremal(minVal));
+ SkASSERT(!is_extremal(maxVal));
+
+ uint8_t palette[kLATCPaletteSize];
+ generate_latc_palette(palette, minVal, maxVal);
+
+ uint64_t indices = 0;
+ for (int i = kLATCPixelsPerBlock - 1; i >= 0; --i) {
+
+ // Find the best palette index
+ uint8_t idx = 0;
+ if (is_extremal(pixels[i])) {
+ if (0xFF == pixels[i]) {
+ idx = 7;
+ } else if (0 == pixels[i]) {
+ idx = 6;
+ } else {
+ SkFAIL("Pixel is extremal but not really?!");
+ }
+ } else {
+ uint8_t bestError = abs_diff(pixels[i], palette[0]);
+ for (int j = 1; j < kLATCPaletteSize - 2; ++j) {
+ uint8_t error = abs_diff(pixels[i], palette[j]);
+ if (error < bestError) {
+ bestError = error;
+ idx = j;
+ }
+ }
+ }
+
+ indices <<= 3;
+ indices |= idx;
+ }
+
+ return
+ SkEndian_SwapLE64(
+ static_cast<uint64_t>(minVal) |
+ (static_cast<uint64_t>(maxVal) << 8) |
+ (indices << 16));
+}
+
+
+// Compress LATC block. Each 4x4 block of pixels is decompressed by LATC from two
+// values LUM0 and LUM1, and an index into the generated palette. Details of how
+// the palette is generated can be found in the comments of generatePalette above.
+//
+// We choose which palette type to use based on whether or not 'pixels' contains
+// any extremal values (0 or 255). If there are extremal values, then we use the
+// palette that has the extremal values built in. Otherwise, we use the full bounding
+// box.
+
+static uint64_t compress_latc_block(const uint8_t pixels[]) {
+ // Collect unique pixels
+ int nUniquePixels = 0;
+ uint8_t uniquePixels[kLATCPixelsPerBlock];
+ for (int i = 0; i < kLATCPixelsPerBlock; ++i) {
+ bool foundPixel = false;
+ for (int j = 0; j < nUniquePixels; ++j) {
+ foundPixel = foundPixel || uniquePixels[j] == pixels[i];
+ }
+
+ if (!foundPixel) {
+ uniquePixels[nUniquePixels] = pixels[i];
+ ++nUniquePixels;
+ }
+ }
+
+ // If there's only one unique pixel, then our compression is easy.
+ if (1 == nUniquePixels) {
+ return SkEndian_SwapLE64(pixels[0] | (pixels[0] << 8));
+
+ // Similarly, if there are only two unique pixels, then our compression is
+ // easy again: place the pixels in the block header, and assign the indices
+ // with one or zero depending on which pixel they belong to.
+ } else if (2 == nUniquePixels) {
+ uint64_t outBlock = 0;
+ for (int i = kLATCPixelsPerBlock - 1; i >= 0; --i) {
+ int idx = 0;
+ if (pixels[i] == uniquePixels[1]) {
+ idx = 1;
+ }
+
+ outBlock <<= 3;
+ outBlock |= idx;
+ }
+ outBlock <<= 16;
+ outBlock |= (uniquePixels[0] | (uniquePixels[1] << 8));
+ return SkEndian_SwapLE64(outBlock);
+ }
+
+ // Count non-maximal pixel values
+ int nonExtremalPixels = 0;
+ for (int i = 0; i < nUniquePixels; ++i) {
+ if (!is_extremal(uniquePixels[i])) {
+ ++nonExtremalPixels;
+ }
+ }
+
+ // If all the pixels are nonmaximal then compute the palette using
+ // the bounding box of all the pixels.
+ if (nonExtremalPixels == nUniquePixels) {
+ // This is really just for correctness, in all of my tests we
+ // never take this step. We don't lose too much perf here because
+ // most of the processing in this function is worth it for the
+ // 1 == nUniquePixels optimization.
+ return compress_latc_block_bb(pixels);
+ } else {
+ return compress_latc_block_bb_ignore_extremal(pixels);
+ }
+}
+
+#endif // COMPRESS_LATC_SLOW
+
+////////////////////////////////////////////////////////////////////////////////
+
+#if COMPRESS_LATC_FAST
+
+// Take the top three bits of each index and pack them into the low 12
+// bits of the integer.
+static inline uint32_t pack_index(uint32_t x) {
+ // Pack it in...
+#if defined (SK_CPU_BENDIAN)
+ return
+ (x >> 24) |
+ ((x >> 13) & 0x38) |
+ ((x >> 2) & 0x1C0) |
+ ((x << 9) & 0xE00);
+#else
+ return
+ (x & 0x7) |
+ ((x >> 5) & 0x38) |
+ ((x >> 10) & 0x1C0) |
+ ((x >> 15) & 0xE00);
+#endif
+}
+
+// Converts each 8-bit byte in the integer into an LATC index, and then packs
+// the indices into the low 12 bits of the integer.
+static inline uint32_t convert_index(uint32_t x) {
+ // Since the palette is
+ // 255, 0, 219, 182, 146, 109, 73, 36
+ // we need to map the high three bits of each byte in the integer
+ // from
+ // 0 1 2 3 4 5 6 7
+ // to
+ // 1 7 6 5 4 3 2 0
+ //
+ // This first operation takes the mapping from
+ // 0 1 2 3 4 5 6 7 --> 7 6 5 4 3 2 1 0
+ x = 0x07070707 - SkTextureCompressor::ConvertToThreeBitIndex(x);
+
+ // mask is 1 if index is non-zero
+ const uint32_t mask = (x | (x >> 1) | (x >> 2)) & 0x01010101;
+
+ // add mask:
+ // 7 6 5 4 3 2 1 0 --> 8 7 6 5 4 3 2 0
+ x = (x + mask);
+
+ // Handle overflow:
+ // 8 7 6 5 4 3 2 0 --> 9 7 6 5 4 3 2 0
+ x |= (x >> 3) & 0x01010101;
+
+ // Mask out high bits:
+ // 9 7 6 5 4 3 2 0 --> 1 7 6 5 4 3 2 0
+ x &= 0x07070707;
+
+ return pack_index(x);
+}
+
+typedef uint64_t (*PackIndicesProc)(const uint8_t* alpha, size_t rowBytes);
+template<PackIndicesProc packIndicesProc>
+static void compress_a8_latc_block(uint8_t** dstPtr, const uint8_t* src, size_t rowBytes) {
+ *(reinterpret_cast<uint64_t*>(*dstPtr)) =
+ SkEndian_SwapLE64(0xFF | (packIndicesProc(src, rowBytes) << 16));
+ *dstPtr += 8;
+}
+
+inline uint64_t PackRowMajor(const uint8_t *indices, size_t rowBytes) {
+ uint64_t result = 0;
+ for (int i = 0; i < 4; ++i) {
+ const uint32_t idx = *(reinterpret_cast<const uint32_t*>(indices + i*rowBytes));
+ result |= static_cast<uint64_t>(convert_index(idx)) << 12*i;
+ }
+ return result;
+}
+
+inline uint64_t PackColumnMajor(const uint8_t *indices, size_t rowBytes) {
+ // !SPEED! Blarg, this is kind of annoying. SSE4 can make this
+ // a LOT faster.
+ uint8_t transposed[16];
+ for (int i = 0; i < 4; ++i) {
+ for (int j = 0; j < 4; ++j) {
+ transposed[j*4+i] = indices[i*rowBytes + j];
+ }
+ }
+
+ return PackRowMajor(transposed, 4);
+}
+
+static bool compress_4x4_a8_latc(uint8_t* dst, const uint8_t* src,
+ int width, int height, size_t rowBytes) {
+
+ if (width < 0 || ((width % 4) != 0) || height < 0 || ((height % 4) != 0)) {
+ return false;
+ }
+
+ uint8_t** dstPtr = &dst;
+ for (int y = 0; y < height; y += 4) {
+ for (int x = 0; x < width; x += 4) {
+ compress_a8_latc_block<PackRowMajor>(dstPtr, src + y*rowBytes + x, rowBytes);
+ }
+ }
+
+ return true;
+}
+
+void CompressA8LATCBlockVertical(uint8_t* dst, const uint8_t block[]) {
+ compress_a8_latc_block<PackColumnMajor>(&dst, block, 4);
+}
+
+#endif // COMPRESS_LATC_FAST
+
+void decompress_latc_block(uint8_t* dst, int dstRowBytes, const uint8_t* src) {
+ uint64_t block = SkEndian_SwapLE64(*(reinterpret_cast<const uint64_t *>(src)));
+ uint8_t lum0 = block & 0xFF;
+ uint8_t lum1 = (block >> 8) & 0xFF;
+
+ uint8_t palette[kLATCPaletteSize];
+ generate_latc_palette(palette, lum0, lum1);
+
+ block >>= 16;
+ for (int j = 0; j < 4; ++j) {
+ for (int i = 0; i < 4; ++i) {
+ dst[i] = palette[block & 0x7];
+ block >>= 3;
+ }
+ dst += dstRowBytes;
+ }
+}
+
+// This is the type passed as the CompressorType argument of the compressed
+// blitter for the LATC format. The static functions required to be in this
+// struct are documented in SkTextureCompressor_Blitter.h
+struct CompressorLATC {
+ static inline void CompressA8Vertical(uint8_t* dst, const uint8_t block[]) {
+ compress_a8_latc_block<PackColumnMajor>(&dst, block, 4);
+ }
+
+ static inline void CompressA8Horizontal(uint8_t* dst, const uint8_t* src,
+ int srcRowBytes) {
+ compress_a8_latc_block<PackRowMajor>(&dst, src, srcRowBytes);
+ }
+
+#if PEDANTIC_BLIT_RECT
+ static inline void UpdateBlock(uint8_t* dst, const uint8_t* src, int srcRowBytes,
+ const uint8_t* mask) {
+ // Pack the mask
+ uint64_t cmpMask = 0;
+ for (int i = 0; i < 4; ++i) {
+ const uint32_t idx = *(reinterpret_cast<const uint32_t*>(src + i*srcRowBytes));
+ cmpMask |= static_cast<uint64_t>(pack_index(idx)) << 12*i;
+ }
+ cmpMask = SkEndian_SwapLE64(cmpMask << 16); // avoid header
+
+ uint64_t cmpSrc;
+ uint8_t *cmpSrcPtr = reinterpret_cast<uint8_t*>(&cmpSrc);
+ compress_a8_latc_block<PackRowMajor>(&cmpSrcPtr, src, srcRowBytes);
+
+ // Mask out header
+ cmpSrc = cmpSrc & cmpMask;
+
+ // Read destination encoding
+ uint64_t *cmpDst = reinterpret_cast<uint64_t*>(dst);
+
+ // If the destination is the encoding for a blank block, then we need
+ // to properly set the header
+ if (0 == cmpDst) {
+ *cmpDst = SkTEndian_SwapLE64(0x24924924924900FFULL);
+ }
+
+ // Set the new indices
+ *cmpDst &= ~cmpMask;
+ *cmpDst |= cmpSrc;
+ }
+#endif // PEDANTIC_BLIT_RECT
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+namespace SkTextureCompressor {
+
+bool CompressA8ToLATC(uint8_t* dst, const uint8_t* src, int width, int height, size_t rowBytes) {
+#if COMPRESS_LATC_FAST
+ return compress_4x4_a8_latc(dst, src, width, height, rowBytes);
+#elif COMPRESS_LATC_SLOW
+ return compress_4x4_a8_to_64bit(dst, src, width, height, rowBytes, compress_latc_block);
+#else
+#error "Must choose either fast or slow LATC compression"
+#endif
+}
+
+SkBlitter* CreateLATCBlitter(int width, int height, void* outputBuffer,
+ SkTBlitterAllocator* allocator) {
+ if ((width % 4) != 0 || (height % 4) != 0) {
+ return nullptr;
+ }
+
+#if COMPRESS_LATC_FAST
+ // Memset the output buffer to an encoding that decodes to zero. We must do this
+ // in order to avoid having uninitialized values in the buffer if the blitter
+ // decides not to write certain scanlines (and skip entire rows of blocks).
+ // In the case of LATC, if everything is zero, then LUM0 and LUM1 are also zero,
+ // and they will only be non-zero (0xFF) if the index is 7. So bzero will do just fine.
+ // (8 bytes per block) * (w * h / 16 blocks) = w * h / 2
+ sk_bzero(outputBuffer, width * height / 2);
+
+ return allocator->createT<
+ SkTCompressedAlphaBlitter<4, 8, CompressorLATC>, int, int, void* >
+ (width, height, outputBuffer);
+#elif COMPRESS_LATC_SLOW
+ // TODO (krajcevski)
+ return nullptr;
+#endif
+}
+
+void DecompressLATC(uint8_t* dst, int dstRowBytes, const uint8_t* src, int width, int height) {
+ for (int j = 0; j < height; j += 4) {
+ for (int i = 0; i < width; i += 4) {
+ decompress_latc_block(dst + i, dstRowBytes, src);
+ src += 8;
+ }
+ dst += 4 * dstRowBytes;
+ }
+}
+
+} // SkTextureCompressor
diff --git a/gfx/skia/skia/src/utils/SkTextureCompressor_LATC.h b/gfx/skia/skia/src/utils/SkTextureCompressor_LATC.h
new file mode 100644
index 000000000..85647eafc
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkTextureCompressor_LATC.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTextureCompressor_LATC_DEFINED
+#define SkTextureCompressor_LATC_DEFINED
+
+#include "SkBitmapProcShader.h"
+
+class SkBlitter;
+
+namespace SkTextureCompressor {
+
+ bool CompressA8ToLATC(uint8_t* dst, const uint8_t* src,
+ int width, int height, size_t rowBytes);
+
+ SkBlitter* CreateLATCBlitter(int width, int height, void* outputBuffer,
+ SkTBlitterAllocator *allocator);
+
+ void DecompressLATC(uint8_t* dst, int dstRowBytes, const uint8_t* src, int width, int height);
+}
+
+#endif // SkTextureCompressor_LATC_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkTextureCompressor_R11EAC.cpp b/gfx/skia/skia/src/utils/SkTextureCompressor_R11EAC.cpp
new file mode 100644
index 000000000..5c298dda9
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkTextureCompressor_R11EAC.cpp
@@ -0,0 +1,670 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTextureCompressor.h"
+#include "SkTextureCompressor_Blitter.h"
+#include "SkTextureCompressor_Utils.h"
+
+#include "SkBlitter.h"
+#include "SkEndian.h"
+
+// #define COMPRESS_R11_EAC_SLOW 1
+// #define COMPRESS_R11_EAC_FAST 1
+#define COMPRESS_R11_EAC_FASTEST 1
+
+// Blocks compressed into R11 EAC are represented as follows:
+// 0000000000000000000000000000000000000000000000000000000000000000
+// |base_cw|mod|mul| ----------------- indices -------------------
+//
+// To reconstruct the value of a given pixel, we use the formula:
+// clamp[0, 2047](base_cw * 8 + 4 + mod_val*mul*8)
+//
+// mod_val is chosen from a palette of values based on the index of the
+// given pixel. The palette is chosen by the value stored in mod.
+// This formula returns a value between 0 and 2047, which is converted
+// to a float from 0 to 1 in OpenGL.
+//
+// If mul is zero, then we set mul = 1/8, so that the formula becomes
+// clamp[0, 2047](base_cw * 8 + 4 + mod_val)
+
+static const int kNumR11EACPalettes = 16;
+static const int kR11EACPaletteSize = 8;
+static const int kR11EACModifierPalettes[kNumR11EACPalettes][kR11EACPaletteSize] = {
+ {-3, -6, -9, -15, 2, 5, 8, 14},
+ {-3, -7, -10, -13, 2, 6, 9, 12},
+ {-2, -5, -8, -13, 1, 4, 7, 12},
+ {-2, -4, -6, -13, 1, 3, 5, 12},
+ {-3, -6, -8, -12, 2, 5, 7, 11},
+ {-3, -7, -9, -11, 2, 6, 8, 10},
+ {-4, -7, -8, -11, 3, 6, 7, 10},
+ {-3, -5, -8, -11, 2, 4, 7, 10},
+ {-2, -6, -8, -10, 1, 5, 7, 9},
+ {-2, -5, -8, -10, 1, 4, 7, 9},
+ {-2, -4, -8, -10, 1, 3, 7, 9},
+ {-2, -5, -7, -10, 1, 4, 6, 9},
+ {-3, -4, -7, -10, 2, 3, 6, 9},
+ {-1, -2, -3, -10, 0, 1, 2, 9},
+ {-4, -6, -8, -9, 3, 5, 7, 8},
+ {-3, -5, -7, -9, 2, 4, 6, 8}
+};
+
+#if COMPRESS_R11_EAC_SLOW
+
+// Pack the base codeword, palette, and multiplier into the 64 bits necessary
+// to decode it.
+static uint64_t pack_r11eac_block(uint16_t base_cw, uint16_t palette, uint16_t multiplier,
+ uint64_t indices) {
+ SkASSERT(palette < 16);
+ SkASSERT(multiplier < 16);
+ SkASSERT(indices < (static_cast<uint64_t>(1) << 48));
+
+ const uint64_t b = static_cast<uint64_t>(base_cw) << 56;
+ const uint64_t m = static_cast<uint64_t>(multiplier) << 52;
+ const uint64_t p = static_cast<uint64_t>(palette) << 48;
+ return SkEndian_SwapBE64(b | m | p | indices);
+}
+
+// Given a base codeword, a modifier, and a multiplier, compute the proper
+// pixel value in the range [0, 2047].
+static uint16_t compute_r11eac_pixel(int base_cw, int modifier, int multiplier) {
+ int ret = (base_cw * 8 + 4) + (modifier * multiplier * 8);
+ return (ret > 2047)? 2047 : ((ret < 0)? 0 : ret);
+}
+
+// Compress a block into R11 EAC format.
+// The compression works as follows:
+// 1. Find the center of the span of the block's values. Use this as the base codeword.
+// 2. Choose a multiplier based roughly on the size of the span of block values
+// 3. Iterate through each palette and choose the one with the most accurate
+// modifiers.
+static inline uint64_t compress_heterogeneous_r11eac_block(const uint8_t block[16]) {
+ // Find the center of the data...
+ uint16_t bmin = block[0];
+ uint16_t bmax = block[0];
+ for (int i = 1; i < 16; ++i) {
+ bmin = SkTMin<uint16_t>(bmin, block[i]);
+ bmax = SkTMax<uint16_t>(bmax, block[i]);
+ }
+
+ uint16_t center = (bmax + bmin) >> 1;
+ SkASSERT(center <= 255);
+
+ // Based on the min and max, we can guesstimate a proper multiplier
+ // This is kind of a magic choice to start with.
+ uint16_t multiplier = (bmax - center) / 10;
+
+ // Now convert the block to 11 bits and transpose it to match
+ // the proper layout
+ uint16_t cblock[16];
+ for (int i = 0; i < 4; ++i) {
+ for (int j = 0; j < 4; ++j) {
+ int srcIdx = i*4+j;
+ int dstIdx = j*4+i;
+ cblock[dstIdx] = (block[srcIdx] << 3) | (block[srcIdx] >> 5);
+ }
+ }
+
+ // Finally, choose the proper palette and indices
+ uint32_t bestError = 0xFFFFFFFF;
+ uint64_t bestIndices = 0;
+ uint16_t bestPalette = 0;
+ for (uint16_t paletteIdx = 0; paletteIdx < kNumR11EACPalettes; ++paletteIdx) {
+ const int *palette = kR11EACModifierPalettes[paletteIdx];
+
+ // Iterate through each pixel to find the best palette index
+ // and update the indices with the choice. Also store the error
+ // for this palette to be compared against the best error...
+ uint32_t error = 0;
+ uint64_t indices = 0;
+ for (int pixelIdx = 0; pixelIdx < 16; ++pixelIdx) {
+ const uint16_t pixel = cblock[pixelIdx];
+
+ // Iterate through each palette value to find the best index
+ // for this particular pixel for this particular palette.
+ uint16_t bestPixelError =
+ abs_diff(pixel, compute_r11eac_pixel(center, palette[0], multiplier));
+ int bestIndex = 0;
+ for (int i = 1; i < kR11EACPaletteSize; ++i) {
+ const uint16_t p = compute_r11eac_pixel(center, palette[i], multiplier);
+ const uint16_t perror = abs_diff(pixel, p);
+
+ // Is this index better?
+ if (perror < bestPixelError) {
+ bestIndex = i;
+ bestPixelError = perror;
+ }
+ }
+
+ SkASSERT(bestIndex < 8);
+
+ error += bestPixelError;
+ indices <<= 3;
+ indices |= bestIndex;
+ }
+
+ SkASSERT(indices < (static_cast<uint64_t>(1) << 48));
+
+ // Is this palette better?
+ if (error < bestError) {
+ bestPalette = paletteIdx;
+ bestIndices = indices;
+ bestError = error;
+ }
+ }
+
+ // Finally, pack everything together...
+ return pack_r11eac_block(center, bestPalette, multiplier, bestIndices);
+}
+#endif // COMPRESS_R11_EAC_SLOW
+
+#if COMPRESS_R11_EAC_FAST
+// This function takes into account that most blocks that we compress have a gradation from
+// fully opaque to fully transparent. The compression scheme works by selecting the
+// palette and multiplier that has the tightest fit to the 0-255 range. This is encoded
+// as the block header (0x8490). The indices are then selected by considering the top
+// three bits of each alpha value. For alpha masks, this reduces the dynamic range from
+// 17 to 8, but the quality is still acceptable.
+//
+// There are a few caveats that need to be taken care of...
+//
+// 1. The block is read in as scanlines, so the indices are stored as:
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+// However, the decomrpession routine reads them in column-major order, so they
+// need to be packed as:
+// 0 4 8 12 1 5 9 13 2 6 10 14 3 7 11 15
+// So when reading, they must be transposed.
+//
+// 2. We cannot use the top three bits as an index directly, since the R11 EAC palettes
+// above store the modulation values first decreasing and then increasing:
+// e.g. {-3, -6, -9, -15, 2, 5, 8, 14}
+// Hence, we need to convert the indices with the following mapping:
+// From: 0 1 2 3 4 5 6 7
+// To: 3 2 1 0 4 5 6 7
+static inline uint64_t compress_heterogeneous_r11eac_block(const uint8_t block[16]) {
+ uint64_t retVal = static_cast<uint64_t>(0x8490) << 48;
+ for(int i = 0; i < 4; ++i) {
+ for(int j = 0; j < 4; ++j) {
+ const int shift = 45-3*(j*4+i);
+ SkASSERT(shift <= 45);
+ const uint64_t idx = block[i*4+j] >> 5;
+ SkASSERT(idx < 8);
+
+ // !SPEED! This is slightly faster than having an if-statement.
+ switch(idx) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ retVal |= (3-idx) << shift;
+ break;
+ default:
+ retVal |= idx << shift;
+ break;
+ }
+ }
+ }
+
+ return SkEndian_SwapBE64(retVal);
+}
+#endif // COMPRESS_R11_EAC_FAST
+
+#if (COMPRESS_R11_EAC_SLOW) || (COMPRESS_R11_EAC_FAST)
+static uint64_t compress_r11eac_block(const uint8_t block[16]) {
+ // Are all blocks a solid color?
+ bool solid = true;
+ for (int i = 1; i < 16; ++i) {
+ if (block[i] != block[0]) {
+ solid = false;
+ break;
+ }
+ }
+
+ if (solid) {
+ switch(block[0]) {
+ // Fully transparent? We know the encoding...
+ case 0:
+ // (0x0020 << 48) produces the following:
+ // basw_cw: 0
+ // mod: 0, palette: {-3, -6, -9, -15, 2, 5, 8, 14}
+ // multiplier: 2
+ // mod_val: -3
+ //
+ // this gives the following formula:
+ // clamp[0, 2047](0*8+4+(-3)*2*8) = 0
+ //
+ // Furthermore, it is impervious to endianness:
+ // 0x0020000000002000ULL
+ // Will produce one pixel with index 2, which gives:
+ // clamp[0, 2047](0*8+4+(-9)*2*8) = 0
+ return 0x0020000000002000ULL;
+
+ // Fully opaque? We know this encoding too...
+ case 255:
+
+ // -1 produces the following:
+ // basw_cw: 255
+ // mod: 15, palette: {-3, -5, -7, -9, 2, 4, 6, 8}
+ // mod_val: 8
+ //
+ // this gives the following formula:
+ // clamp[0, 2047](255*8+4+8*8*8) = clamp[0, 2047](2556) = 2047
+ return 0xFFFFFFFFFFFFFFFFULL;
+
+ default:
+ // !TODO! krajcevski:
+ // This will probably never happen, since we're using this format
+ // primarily for compressing alpha maps. Usually the only
+ // non-fullly opaque or fully transparent blocks are not a solid
+ // intermediate color. If we notice that they are, then we can
+ // add another optimization...
+ break;
+ }
+ }
+
+ return compress_heterogeneous_r11eac_block(block);
+}
+
+// This function is used by R11 EAC to compress 4x4 blocks
+// of 8-bit alpha into 64-bit values that comprise the compressed data.
+// We need to make sure that the dimensions of the src pixels are divisible
+// by 4, and copy 4x4 blocks one at a time for compression.
+typedef uint64_t (*A84x4To64BitProc)(const uint8_t block[]);
+
+static bool compress_4x4_a8_to_64bit(uint8_t* dst, const uint8_t* src,
+ int width, int height, size_t rowBytes,
+ A84x4To64BitProc proc) {
+ // Make sure that our data is well-formed enough to be considered for compression
+ if (0 == width || 0 == height || (width % 4) != 0 || (height % 4) != 0) {
+ return false;
+ }
+
+ int blocksX = width >> 2;
+ int blocksY = height >> 2;
+
+ uint8_t block[16];
+ uint64_t* encPtr = reinterpret_cast<uint64_t*>(dst);
+ for (int y = 0; y < blocksY; ++y) {
+ for (int x = 0; x < blocksX; ++x) {
+ // Load block
+ for (int k = 0; k < 4; ++k) {
+ memcpy(block + k*4, src + k*rowBytes + 4*x, 4);
+ }
+
+ // Compress it
+ *encPtr = proc(block);
+ ++encPtr;
+ }
+ src += 4 * rowBytes;
+ }
+
+ return true;
+}
+#endif // (COMPRESS_R11_EAC_SLOW) || (COMPRESS_R11_EAC_FAST)
+
+// This function converts an integer containing four bytes of alpha
+// values into an integer containing four bytes of indices into R11 EAC.
+// Note, there needs to be a mapping of indices:
+// 0 1 2 3 4 5 6 7
+// 3 2 1 0 4 5 6 7
+//
+// To compute this, we first negate each byte, and then add three, which
+// gives the mapping
+// 3 2 1 0 -1 -2 -3 -4
+//
+// Then we mask out the negative values, take their absolute value, and
+// add three.
+//
+// Most of the voodoo in this function comes from Hacker's Delight, section 2-18
+static inline uint32_t convert_indices(uint32_t x) {
+ // Take the top three bits...
+ x = SkTextureCompressor::ConvertToThreeBitIndex(x);
+
+ // Negate...
+ x = ~((0x80808080 - x) ^ 0x7F7F7F7F);
+
+ // Add three
+ const uint32_t s = (x & 0x7F7F7F7F) + 0x03030303;
+ x = ((x ^ 0x03030303) & 0x80808080) ^ s;
+
+ // Absolute value
+ const uint32_t a = x & 0x80808080;
+ const uint32_t b = a >> 7;
+
+ // Aside: mask negatives (m is three if the byte was negative)
+ const uint32_t m = (a >> 6) | b;
+
+ // .. continue absolute value
+ x = (x ^ ((a - b) | a)) + b;
+
+ // Add three
+ return x + m;
+}
+
+#if COMPRESS_R11_EAC_FASTEST
+template<unsigned shift>
+static inline uint64_t swap_shift(uint64_t x, uint64_t mask) {
+ const uint64_t t = (x ^ (x >> shift)) & mask;
+ return x ^ t ^ (t << shift);
+}
+
+static inline uint64_t interleave6(uint64_t topRows, uint64_t bottomRows) {
+ // If our 3-bit block indices are laid out as:
+ // a b c d
+ // e f g h
+ // i j k l
+ // m n o p
+ //
+ // This function expects topRows and bottomRows to contain the first two rows
+ // of indices interleaved in the least significant bits of a and b. In other words...
+ //
+ // If the architecture is big endian, then topRows and bottomRows will contain the following:
+ // Bits 31-0:
+ // a: 00 a e 00 b f 00 c g 00 d h
+ // b: 00 i m 00 j n 00 k o 00 l p
+ //
+ // If the architecture is little endian, then topRows and bottomRows will contain
+ // the following:
+ // Bits 31-0:
+ // a: 00 d h 00 c g 00 b f 00 a e
+ // b: 00 l p 00 k o 00 j n 00 i m
+ //
+ // This function returns a 48-bit packing of the form:
+ // a e i m b f j n c g k o d h l p
+ //
+ // !SPEED! this function might be even faster if certain SIMD intrinsics are
+ // used..
+
+ // For both architectures, we can figure out a packing of the bits by
+ // using a shuffle and a few shift-rotates...
+ uint64_t x = (static_cast<uint64_t>(topRows) << 32) | static_cast<uint64_t>(bottomRows);
+
+ // x: 00 a e 00 b f 00 c g 00 d h 00 i m 00 j n 00 k o 00 l p
+
+ x = swap_shift<10>(x, 0x3FC0003FC00000ULL);
+
+ // x: b f 00 00 00 a e c g i m 00 00 00 d h j n 00 k o 00 l p
+
+ x = (x | ((x << 52) & (0x3FULL << 52)) | ((x << 20) & (0x3FULL << 28))) >> 16;
+
+ // x: 00 00 00 00 00 00 00 00 b f l p a e c g i m k o d h j n
+
+ x = swap_shift<6>(x, 0xFC0000ULL);
+
+#if defined (SK_CPU_BENDIAN)
+ // x: 00 00 00 00 00 00 00 00 b f l p a e i m c g k o d h j n
+
+ x = swap_shift<36>(x, 0x3FULL);
+
+ // x: 00 00 00 00 00 00 00 00 b f j n a e i m c g k o d h l p
+
+ x = swap_shift<12>(x, 0xFFF000000ULL);
+#else
+ // If our CPU is little endian, then the above logic will
+ // produce the following indices:
+ // x: 00 00 00 00 00 00 00 00 c g i m d h l p b f j n a e k o
+
+ x = swap_shift<36>(x, 0xFC0ULL);
+
+ // x: 00 00 00 00 00 00 00 00 a e i m d h l p b f j n c g k o
+
+ x = (x & (0xFFFULL << 36)) | ((x & 0xFFFFFFULL) << 12) | ((x >> 24) & 0xFFFULL);
+#endif
+
+ // x: 00 00 00 00 00 00 00 00 a e i m b f j n c g k o d h l p
+ return x;
+}
+
+// This function follows the same basic procedure as compress_heterogeneous_r11eac_block
+// above when COMPRESS_R11_EAC_FAST is defined, but it avoids a few loads/stores and
+// tries to optimize where it can using SIMD.
+static uint64_t compress_r11eac_block_fast(const uint8_t* src, size_t rowBytes) {
+ // Store each row of alpha values in an integer
+ const uint32_t alphaRow1 = *(reinterpret_cast<const uint32_t*>(src));
+ const uint32_t alphaRow2 = *(reinterpret_cast<const uint32_t*>(src + rowBytes));
+ const uint32_t alphaRow3 = *(reinterpret_cast<const uint32_t*>(src + 2*rowBytes));
+ const uint32_t alphaRow4 = *(reinterpret_cast<const uint32_t*>(src + 3*rowBytes));
+
+ // Check for solid blocks. The explanations for these values
+ // can be found in the comments of compress_r11eac_block above
+ if (alphaRow1 == alphaRow2 && alphaRow1 == alphaRow3 && alphaRow1 == alphaRow4) {
+ if (0 == alphaRow1) {
+ // Fully transparent block
+ return 0x0020000000002000ULL;
+ } else if (0xFFFFFFFF == alphaRow1) {
+ // Fully opaque block
+ return 0xFFFFFFFFFFFFFFFFULL;
+ }
+ }
+
+ // Convert each integer of alpha values into an integer of indices
+ const uint32_t indexRow1 = convert_indices(alphaRow1);
+ const uint32_t indexRow2 = convert_indices(alphaRow2);
+ const uint32_t indexRow3 = convert_indices(alphaRow3);
+ const uint32_t indexRow4 = convert_indices(alphaRow4);
+
+ // Interleave the indices from the top two rows and bottom two rows
+ // prior to passing them to interleave6. Since each index is at most
+ // three bits, then each byte can hold two indices... The way that the
+ // compression scheme expects the packing allows us to efficiently pack
+ // the top two rows and bottom two rows. Interleaving each 6-bit sequence
+ // and tightly packing it into a uint64_t is a little trickier, which is
+ // taken care of in interleave6.
+ const uint32_t r1r2 = (indexRow1 << 3) | indexRow2;
+ const uint32_t r3r4 = (indexRow3 << 3) | indexRow4;
+ const uint64_t indices = interleave6(r1r2, r3r4);
+
+ // Return the packed incdices in the least significant bits with the magic header
+ return SkEndian_SwapBE64(0x8490000000000000ULL | indices);
+}
+
+static bool compress_a8_to_r11eac_fast(uint8_t* dst, const uint8_t* src,
+ int width, int height, size_t rowBytes) {
+ // Make sure that our data is well-formed enough to be considered for compression
+ if (0 == width || 0 == height || (width % 4) != 0 || (height % 4) != 0) {
+ return false;
+ }
+
+ const int blocksX = width >> 2;
+ const int blocksY = height >> 2;
+
+ uint64_t* encPtr = reinterpret_cast<uint64_t*>(dst);
+ for (int y = 0; y < blocksY; ++y) {
+ for (int x = 0; x < blocksX; ++x) {
+ // Compress it
+ *encPtr = compress_r11eac_block_fast(src + 4*x, rowBytes);
+ ++encPtr;
+ }
+ src += 4 * rowBytes;
+ }
+ return true;
+}
+#endif // COMPRESS_R11_EAC_FASTEST
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// Utility functions used by the blitter
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// The R11 EAC format expects that indices are given in column-major order. Since
+// we receive alpha values in raster order, this usually means that we have to use
+// pack6 above to properly pack our indices. However, if our indices come from the
+// blitter, then each integer will be a column of indices, and hence can be efficiently
+// packed. This function takes the bottom three bits of each byte and places them in
+// the least significant 12 bits of the resulting integer.
+static inline uint32_t pack_indices_vertical(uint32_t x) {
+#if defined (SK_CPU_BENDIAN)
+ return
+ (x & 7) |
+ ((x >> 5) & (7 << 3)) |
+ ((x >> 10) & (7 << 6)) |
+ ((x >> 15) & (7 << 9));
+#else
+ return
+ ((x >> 24) & 7) |
+ ((x >> 13) & (7 << 3)) |
+ ((x >> 2) & (7 << 6)) |
+ ((x << 9) & (7 << 9));
+#endif
+}
+
+// This function returns the compressed format of a block given as four columns of
+// alpha values. Each column is assumed to be loaded from top to bottom, and hence
+// must first be converted to indices and then packed into the resulting 64-bit
+// integer.
+inline void compress_block_vertical(uint8_t* dstPtr, const uint8_t *block) {
+
+ const uint32_t* src = reinterpret_cast<const uint32_t*>(block);
+ uint64_t* dst = reinterpret_cast<uint64_t*>(dstPtr);
+
+ const uint32_t alphaColumn0 = src[0];
+ const uint32_t alphaColumn1 = src[1];
+ const uint32_t alphaColumn2 = src[2];
+ const uint32_t alphaColumn3 = src[3];
+
+ if (alphaColumn0 == alphaColumn1 &&
+ alphaColumn2 == alphaColumn3 &&
+ alphaColumn0 == alphaColumn2) {
+
+ if (0 == alphaColumn0) {
+ // Transparent
+ *dst = 0x0020000000002000ULL;
+ return;
+ }
+ else if (0xFFFFFFFF == alphaColumn0) {
+ // Opaque
+ *dst = 0xFFFFFFFFFFFFFFFFULL;
+ return;
+ }
+ }
+
+ const uint32_t indexColumn0 = convert_indices(alphaColumn0);
+ const uint32_t indexColumn1 = convert_indices(alphaColumn1);
+ const uint32_t indexColumn2 = convert_indices(alphaColumn2);
+ const uint32_t indexColumn3 = convert_indices(alphaColumn3);
+
+ const uint32_t packedIndexColumn0 = pack_indices_vertical(indexColumn0);
+ const uint32_t packedIndexColumn1 = pack_indices_vertical(indexColumn1);
+ const uint32_t packedIndexColumn2 = pack_indices_vertical(indexColumn2);
+ const uint32_t packedIndexColumn3 = pack_indices_vertical(indexColumn3);
+
+ *dst = SkEndian_SwapBE64(0x8490000000000000ULL |
+ (static_cast<uint64_t>(packedIndexColumn0) << 36) |
+ (static_cast<uint64_t>(packedIndexColumn1) << 24) |
+ static_cast<uint64_t>(packedIndexColumn2 << 12) |
+ static_cast<uint64_t>(packedIndexColumn3));
+}
+
+static inline int get_r11_eac_index(uint64_t block, int x, int y) {
+ SkASSERT(x >= 0 && x < 4);
+ SkASSERT(y >= 0 && y < 4);
+ const int idx = x*4 + y;
+ return (block >> ((15-idx)*3)) & 0x7;
+}
+
+static void decompress_r11_eac_block(uint8_t* dst, int dstRowBytes, const uint8_t* src) {
+ const uint64_t block = SkEndian_SwapBE64(*(reinterpret_cast<const uint64_t *>(src)));
+
+ const int base_cw = (block >> 56) & 0xFF;
+ const int mod = (block >> 52) & 0xF;
+ const int palette_idx = (block >> 48) & 0xF;
+
+ const int* palette = kR11EACModifierPalettes[palette_idx];
+
+ for (int j = 0; j < 4; ++j) {
+ for (int i = 0; i < 4; ++i) {
+ const int idx = get_r11_eac_index(block, i, j);
+ const int val = base_cw*8 + 4 + palette[idx]*mod*8;
+ if (val < 0) {
+ dst[i] = 0;
+ } else if (val > 2047) {
+ dst[i] = 0xFF;
+ } else {
+ dst[i] = (val >> 3) & 0xFF;
+ }
+ }
+ dst += dstRowBytes;
+ }
+}
+
+// This is the type passed as the CompressorType argument of the compressed
+// blitter for the R11 EAC format. The static functions required to be in this
+// struct are documented in SkTextureCompressor_Blitter.h
+struct CompressorR11EAC {
+ static inline void CompressA8Vertical(uint8_t* dst, const uint8_t* src) {
+ compress_block_vertical(dst, src);
+ }
+
+ static inline void CompressA8Horizontal(uint8_t* dst, const uint8_t* src,
+ int srcRowBytes) {
+ *(reinterpret_cast<uint64_t*>(dst)) = compress_r11eac_block_fast(src, srcRowBytes);
+ }
+
+#if PEDANTIC_BLIT_RECT
+ static inline void UpdateBlock(uint8_t* dst, const uint8_t* src, int srcRowBytes,
+ const uint8_t* mask) {
+ // TODO: krajcevski
+ // The implementation of this function should be similar to that of LATC, since
+ // the R11EAC indices directly correspond to pixel values.
+ SkFAIL("Implement me!");
+ }
+#endif
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+namespace SkTextureCompressor {
+
+bool CompressA8ToR11EAC(uint8_t* dst, const uint8_t* src, int width, int height, size_t rowBytes) {
+
+#if (COMPRESS_R11_EAC_SLOW) || (COMPRESS_R11_EAC_FAST)
+
+ return compress_4x4_a8_to_64bit(dst, src, width, height, rowBytes, compress_r11eac_block);
+
+#elif COMPRESS_R11_EAC_FASTEST
+
+ return compress_a8_to_r11eac_fast(dst, src, width, height, rowBytes);
+
+#else
+#error "Must choose R11 EAC algorithm"
+#endif
+}
+
+SkBlitter* CreateR11EACBlitter(int width, int height, void* outputBuffer,
+ SkTBlitterAllocator* allocator) {
+
+ if ((width % 4) != 0 || (height % 4) != 0) {
+ return nullptr;
+ }
+
+ // Memset the output buffer to an encoding that decodes to zero. We must do this
+ // in order to avoid having uninitialized values in the buffer if the blitter
+ // decides not to write certain scanlines (and skip entire rows of blocks).
+ // In the case of R11, we use the encoding from recognizing all zero pixels from above.
+ const int nBlocks = (width * height / 16); // 4x4 pixel blocks.
+ uint64_t *dst = reinterpret_cast<uint64_t *>(outputBuffer);
+ for (int i = 0; i < nBlocks; ++i) {
+ *dst = 0x0020000000002000ULL;
+ ++dst;
+ }
+
+ return allocator->createT<
+ SkTCompressedAlphaBlitter<4, 8, CompressorR11EAC>, int, int, void*>
+ (width, height, outputBuffer);
+}
+
+void DecompressR11EAC(uint8_t* dst, int dstRowBytes, const uint8_t* src, int width, int height) {
+ for (int j = 0; j < height; j += 4) {
+ for (int i = 0; i < width; i += 4) {
+ decompress_r11_eac_block(dst + i, dstRowBytes, src);
+ src += 8;
+ }
+ dst += 4 * dstRowBytes;
+ }
+}
+
+} // namespace SkTextureCompressor
diff --git a/gfx/skia/skia/src/utils/SkTextureCompressor_R11EAC.h b/gfx/skia/skia/src/utils/SkTextureCompressor_R11EAC.h
new file mode 100644
index 000000000..abaabfb36
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkTextureCompressor_R11EAC.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTextureCompressor_R11EAC_DEFINED
+#define SkTextureCompressor_R11EAC_DEFINED
+
+#include "SkBitmapProcShader.h"
+
+class SkBlitter;
+
+namespace SkTextureCompressor {
+
+ bool CompressA8ToR11EAC(uint8_t* dst, const uint8_t* src,
+ int width, int height, size_t rowBytes);
+
+ SkBlitter* CreateR11EACBlitter(int width, int height, void* outputBuffer,
+ SkTBlitterAllocator* allocator);
+
+ void DecompressR11EAC(uint8_t* dst, int dstRB, const uint8_t* src, int width, int height);
+}
+
+#endif // SkTextureCompressor_R11EAC_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkTextureCompressor_Utils.h b/gfx/skia/skia/src/utils/SkTextureCompressor_Utils.h
new file mode 100755
index 000000000..9b115a296
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkTextureCompressor_Utils.h
@@ -0,0 +1,68 @@
+/*
+* Copyright 2014 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef SkTextureCompressorUtils_DEFINED
+#define SkTextureCompressorUtils_DEFINED
+
+namespace SkTextureCompressor {
+
+ // In some compression formats used for grayscale alpha, i.e. coverage masks, three
+ // bit indices are used to represent each pixel. A compression scheme must therefore
+ // quantize the full eight bits of grayscale to three bits. The simplest way to do
+ // this is to take the top three bits of the grayscale value. However, this does not
+ // provide an accurate quantization: 192 will be quantized to 219 instead of 185. In
+ // our compression schemes, we let these three-bit indices represent the full range
+ // of grayscale values, and so when we go from three bits to eight bits, we replicate
+ // the three bits into the lower bits of the eight bit value. Below are two different
+ // techniques that offer a quality versus speed tradeoff in terms of quantization.
+#if 1
+ // Divides each byte in the 32-bit argument by three.
+ static inline uint32_t MultibyteDiv3(uint32_t x) {
+ const uint32_t a = (x >> 2) & 0x3F3F3F3F;
+ const uint32_t ar = (x & 0x03030303) << 4;
+
+ const uint32_t b = (x >> 4) & 0x0F0F0F0F;
+ const uint32_t br = (x & 0x0F0F0F0F) << 2;
+
+ const uint32_t c = (x >> 6) & 0x03030303;
+ const uint32_t cr = x & 0x3F3F3F3F;
+
+ return a + b + c + (((ar + br + cr) >> 6) & 0x03030303);
+ }
+
+ // Takes a loaded 32-bit integer of four 8-bit greyscale values and returns their
+ // quantization into 3-bit values, used by LATC and R11 EAC. Instead of taking the
+ // top three bits, the function computes the best three-bit value such that its
+ // reconstruction into an eight bit value via bit replication will yield the best
+ // results. In a 32-bit integer taking the range of values from 0-255 we would add
+ // 18 and divide by 36 (255 / 36 ~= 7). However, since we are working in constrained
+ // 8-bit space, our algorithm is the following:
+ // 1. Shift right by one to give room for overflow
+ // 2. Add 9 (18/2)
+ // 3. Divide by 18 (divide by two, then by three twice)
+ static inline uint32_t ConvertToThreeBitIndex(uint32_t x) {
+ x = (x >> 1) & 0x7F7F7F7F; // 1
+ x = x + 0x09090909; // 2
+
+ // Need to divide by 18... so first divide by two
+ x = (x >> 1) & 0x7F7F7F7F;
+
+ // Now divide by three twice
+ x = MultibyteDiv3(x);
+ x = MultibyteDiv3(x);
+ return x;
+ }
+#else
+ // Moves the top three bits of each byte in the 32-bit argument to the least
+ // significant bits of their respective byte.
+ static inline uint32_t ConvertToThreeBitIndex(uint32_t x) {
+ return (x >> 5) & 0x07070707;
+ }
+#endif
+}
+
+#endif // SkTextureCompressorUtils_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkThreadUtils.h b/gfx/skia/skia/src/utils/SkThreadUtils.h
new file mode 100644
index 000000000..d9fc99d39
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkThreadUtils.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkThreadUtils_DEFINED
+#define SkThreadUtils_DEFINED
+
+#include "SkTypes.h"
+
+class SkThread : SkNoncopyable {
+public:
+ typedef void (*entryPointProc)(void*);
+
+ SkThread(entryPointProc entryPoint, void* data = nullptr);
+
+ /**
+ * Non-virtual, do not subclass.
+ */
+ ~SkThread();
+
+ /**
+ * Starts the thread. Returns false if the thread could not be started.
+ */
+ bool start();
+
+ /**
+ * Waits for the thread to finish.
+ * If the thread has not started, returns immediately.
+ */
+ void join();
+
+private:
+ void* fData;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkThreadUtils_pthread.cpp b/gfx/skia/skia/src/utils/SkThreadUtils_pthread.cpp
new file mode 100644
index 000000000..0bd804d43
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkThreadUtils_pthread.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+
+#include "SkThreadUtils.h"
+#include "SkThreadUtils_pthread.h"
+
+#include <pthread.h>
+#include <signal.h>
+
+PThreadEvent::PThreadEvent() : fConditionFlag(false) {
+ pthread_cond_init(&fCondition, nullptr);
+ pthread_mutex_init(&fConditionMutex, nullptr);
+}
+PThreadEvent::~PThreadEvent() {
+ pthread_mutex_destroy(&fConditionMutex);
+ pthread_cond_destroy(&fCondition);
+}
+void PThreadEvent::trigger() {
+ pthread_mutex_lock(&fConditionMutex);
+ fConditionFlag = true;
+ pthread_cond_signal(&fCondition);
+ pthread_mutex_unlock(&fConditionMutex);
+}
+void PThreadEvent::wait() {
+ pthread_mutex_lock(&fConditionMutex);
+ while (!fConditionFlag) {
+ pthread_cond_wait(&fCondition, &fConditionMutex);
+ }
+ pthread_mutex_unlock(&fConditionMutex);
+}
+bool PThreadEvent::isTriggered() {
+ bool currentFlag;
+ pthread_mutex_lock(&fConditionMutex);
+ currentFlag = fConditionFlag;
+ pthread_mutex_unlock(&fConditionMutex);
+ return currentFlag;
+}
+
+SkThread_PThreadData::SkThread_PThreadData(SkThread::entryPointProc entryPoint, void* data)
+ : fPThread()
+ , fValidPThread(false)
+ , fParam(data)
+ , fEntryPoint(entryPoint)
+{
+ pthread_attr_init(&fAttr);
+ pthread_attr_setdetachstate(&fAttr, PTHREAD_CREATE_JOINABLE);
+}
+
+SkThread_PThreadData::~SkThread_PThreadData() {
+ pthread_attr_destroy(&fAttr);
+}
+
+static void* thread_start(void* arg) {
+ SkThread_PThreadData* pthreadData = static_cast<SkThread_PThreadData*>(arg);
+ // Wait for start signal
+ pthreadData->fStarted.wait();
+
+ // Call entry point only if thread was not canceled before starting.
+ if (!pthreadData->fCanceled.isTriggered()) {
+ pthreadData->fEntryPoint(pthreadData->fParam);
+ }
+ return nullptr;
+}
+
+SkThread::SkThread(entryPointProc entryPoint, void* data) {
+ SkThread_PThreadData* pthreadData = new SkThread_PThreadData(entryPoint, data);
+ fData = pthreadData;
+
+ int ret = pthread_create(&(pthreadData->fPThread),
+ &(pthreadData->fAttr),
+ thread_start,
+ pthreadData);
+
+ pthreadData->fValidPThread = (0 == ret);
+}
+
+SkThread::~SkThread() {
+ if (fData != nullptr) {
+ SkThread_PThreadData* pthreadData = static_cast<SkThread_PThreadData*>(fData);
+ // If created thread but start was never called, kill the thread.
+ if (pthreadData->fValidPThread && !pthreadData->fStarted.isTriggered()) {
+ pthreadData->fCanceled.trigger();
+ if (this->start()) {
+ this->join();
+ }
+ }
+ delete pthreadData;
+ }
+}
+
+bool SkThread::start() {
+ SkThread_PThreadData* pthreadData = static_cast<SkThread_PThreadData*>(fData);
+ if (!pthreadData->fValidPThread) {
+ return false;
+ }
+
+ if (pthreadData->fStarted.isTriggered()) {
+ return false;
+ }
+ pthreadData->fStarted.trigger();
+ return true;
+}
+
+void SkThread::join() {
+ SkThread_PThreadData* pthreadData = static_cast<SkThread_PThreadData*>(fData);
+ if (!pthreadData->fValidPThread || !pthreadData->fStarted.isTriggered()) {
+ return;
+ }
+
+ pthread_join(pthreadData->fPThread, nullptr);
+}
diff --git a/gfx/skia/skia/src/utils/SkThreadUtils_pthread.h b/gfx/skia/skia/src/utils/SkThreadUtils_pthread.h
new file mode 100644
index 000000000..3e1020275
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkThreadUtils_pthread.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkThreadUtils_PThreadData_DEFINED
+#define SkThreadUtils_PThreadData_DEFINED
+
+#include "SkThreadUtils.h"
+#include <pthread.h>
+
+class PThreadEvent : SkNoncopyable {
+public:
+ PThreadEvent();
+ ~PThreadEvent();
+ void trigger();
+ void wait();
+ bool isTriggered();
+
+private:
+ pthread_cond_t fCondition;
+ pthread_mutex_t fConditionMutex;
+ bool fConditionFlag;
+};
+
+class SkThread_PThreadData : SkNoncopyable {
+public:
+ SkThread_PThreadData(SkThread::entryPointProc entryPoint, void* data);
+ ~SkThread_PThreadData();
+ pthread_t fPThread;
+ bool fValidPThread;
+ PThreadEvent fStarted;
+ PThreadEvent fCanceled;
+
+ pthread_attr_t fAttr;
+
+ void* fParam;
+ SkThread::entryPointProc fEntryPoint;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkThreadUtils_win.cpp b/gfx/skia/skia/src/utils/SkThreadUtils_win.cpp
new file mode 100644
index 000000000..0da339aa8
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkThreadUtils_win.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32)
+
+#include "SkThreadUtils.h"
+#include "SkThreadUtils_win.h"
+
+SkThread_WinData::SkThread_WinData(SkThread::entryPointProc entryPoint, void* data)
+ : fHandle(nullptr)
+ , fParam(data)
+ , fThreadId(0)
+ , fEntryPoint(entryPoint)
+ , fStarted(false)
+{
+ fCancelEvent = CreateEvent(
+ nullptr, // default security attributes
+ false, //auto reset
+ false, //not signaled
+ nullptr); //no name
+}
+
+SkThread_WinData::~SkThread_WinData() {
+ CloseHandle(fCancelEvent);
+}
+
+static DWORD WINAPI thread_start(LPVOID data) {
+ SkThread_WinData* winData = static_cast<SkThread_WinData*>(data);
+
+ //See if this thread was canceled before starting.
+ if (WaitForSingleObject(winData->fCancelEvent, 0) == WAIT_OBJECT_0) {
+ return 0;
+ }
+
+ winData->fEntryPoint(winData->fParam);
+ return 0;
+}
+
+SkThread::SkThread(entryPointProc entryPoint, void* data) {
+ SkThread_WinData* winData = new SkThread_WinData(entryPoint, data);
+ fData = winData;
+
+ if (nullptr == winData->fCancelEvent) {
+ return;
+ }
+
+ winData->fHandle = CreateThread(
+ nullptr, // default security attributes
+ 0, // use default stack size
+ thread_start, // thread function name (proxy)
+ winData, // argument to thread function (proxy args)
+ CREATE_SUSPENDED, // we used to set processor affinity, which needed this
+ &winData->fThreadId); // returns the thread identifier
+}
+
+SkThread::~SkThread() {
+ if (fData != nullptr) {
+ SkThread_WinData* winData = static_cast<SkThread_WinData*>(fData);
+ // If created thread but start was never called, kill the thread.
+ if (winData->fHandle != nullptr && !winData->fStarted) {
+ if (SetEvent(winData->fCancelEvent) != 0) {
+ if (this->start()) {
+ this->join();
+ }
+ } else {
+ //kill with prejudice
+ TerminateThread(winData->fHandle, -1);
+ }
+ }
+ delete winData;
+ }
+}
+
+bool SkThread::start() {
+ SkThread_WinData* winData = static_cast<SkThread_WinData*>(fData);
+ if (nullptr == winData->fHandle) {
+ return false;
+ }
+
+ if (winData->fStarted) {
+ return false;
+ }
+ winData->fStarted = -1 != ResumeThread(winData->fHandle);
+ return winData->fStarted;
+}
+
+void SkThread::join() {
+ SkThread_WinData* winData = static_cast<SkThread_WinData*>(fData);
+ if (nullptr == winData->fHandle || !winData->fStarted) {
+ return;
+ }
+
+ WaitForSingleObject(winData->fHandle, INFINITE);
+}
+
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/utils/SkThreadUtils_win.h b/gfx/skia/skia/src/utils/SkThreadUtils_win.h
new file mode 100644
index 000000000..b1de4816c
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkThreadUtils_win.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkThreadUtils_WinData_DEFINED
+#define SkThreadUtils_WinData_DEFINED
+
+#include "SkLeanWindows.h"
+#include "SkThreadUtils.h"
+
+class SkThread_WinData : SkNoncopyable {
+public:
+ SkThread_WinData(SkThread::entryPointProc entryPoint, void* data);
+ ~SkThread_WinData();
+ HANDLE fHandle;
+ HANDLE fCancelEvent;
+
+ LPVOID fParam;
+ DWORD fThreadId;
+ SkThread::entryPointProc fEntryPoint;
+ bool fStarted;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkWhitelistChecksums.inc b/gfx/skia/skia/src/utils/SkWhitelistChecksums.inc
new file mode 100644
index 000000000..4b17df48f
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkWhitelistChecksums.inc
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * GenerateChecksums() in ../../src/utils/SkWhitelistTypefaces.cpp generated SkWhitelistChecksums.inc.
+ * Run 'whitelist_typefaces --generate' to create anew.
+ */
+
+#include "SkTDArray.h"
+
+struct Whitelist {
+ const char* fFontName;
+ uint32_t fChecksum;
+ bool fSerializedNameOnly;
+ bool fSerializedSub;
+};
+
+static Whitelist whitelist[] = {
+ { "Aegean", 0x639a35c7, false, false },
+ { "Analecta", 0x639a35c7, false, false },
+ { "Arial", 0xbc28cb14, false, false },
+ { "DejaVu Sans", 0x639a35c7, false, false },
+ { "DejaVu Sans Mono", 0xbc29a5d9, false, false },
+ { "DejaVu Serif", 0x9db67efe, false, false },
+ { "FreeMono", 0x724884f4, false, false },
+ { "FreeSans", 0x7dfc48a3, false, false },
+ { "FreeSerif", 0xa1ae8c77, false, false },
+ { "Khmer OS", 0x917c40aa, false, false },
+ { "Kochi Gothic", 0x962132dd, false, false },
+ { "Lohit Kannada", 0x0b6ce863, false, false },
+ { "Lohit Marathi", 0x0eb0a941, false, false },
+ { "Lohit Oriya", 0xf3e9d313, false, false },
+ { "Lohit Punjabi", 0xfd8b26e0, false, false },
+ { "Lohit Tamil", 0xa8111d99, false, false },
+ { "Lohit Telugu", 0xd34299e0, false, false },
+ { "Meera", 0xe3e16220, false, false },
+ { "Mukti Narrow", 0x53f7d053, false, false },
+ { "NanumBarunGothic", 0x639a35c7, false, false },
+ { "NanumGothic", 0xff8d773d, false, false },
+ { "OpenSymbol", 0x4fcaf331, false, false },
+ { "Symbola", 0x639a35c7, false, false },
+ { "TakaoPGothic", 0x068c405a, false, false },
+ { "Waree", 0x6a2bfca8, false, false },
+ { "WenQuanYi Micro Hei", 0xcdec08a3, false, false },
+ { "padmaa", 0x09eb1865, false, false },
+};
+
+static const int whitelistCount = (int) SK_ARRAY_COUNT(whitelist);
diff --git a/gfx/skia/skia/src/utils/SkWhitelistTypefaces.cpp b/gfx/skia/skia/src/utils/SkWhitelistTypefaces.cpp
new file mode 100644
index 000000000..007def6d8
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkWhitelistTypefaces.cpp
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkFontDescriptor.h"
+#include "SkOpts.h"
+#include "SkStream.h"
+#include "SkString.h"
+#include "SkTypeface.h"
+#include "SkUtils.h"
+#include "../sfnt/SkOTUtils.h"
+
+#include "SkWhitelistChecksums.inc"
+
+#define WHITELIST_DEBUG 0
+
+extern void WhitelistSerializeTypeface(const SkTypeface*, SkWStream* );
+sk_sp<SkTypeface> WhitelistDeserializeTypeface(SkStream* );
+extern bool CheckChecksums();
+extern bool GenerateChecksums();
+
+#if WHITELIST_DEBUG
+static bool timesNewRomanSerializedNameOnly = false;
+#endif
+
+#define SUBNAME_PREFIX "sk_"
+
+static bool font_name_is_local(const char* fontName, SkFontStyle style) {
+ if (!strcmp(fontName, "DejaVu Sans")) {
+ return true;
+ }
+ sk_sp<SkTypeface> defaultFace(SkTypeface::MakeFromName(nullptr, style));
+ sk_sp<SkTypeface> foundFace(SkTypeface::MakeFromName(fontName, style));
+ return defaultFace != foundFace;
+}
+
+static int whitelist_name_index(const SkTypeface* tf) {
+
+ SkString fontNameStr;
+ SkAutoTUnref<SkTypeface::LocalizedStrings> nameIter(
+ SkOTUtils::LocalizedStrings_NameTable::CreateForFamilyNames(*tf));
+ SkTypeface::LocalizedString familyNameLocalized;
+ while (nameIter->next(&familyNameLocalized)) {
+ fontNameStr = familyNameLocalized.fString;
+ // check against permissible list of names
+ for (int i = 0; i < whitelistCount; ++i) {
+ if (fontNameStr.equals(whitelist[i].fFontName)) {
+ return i;
+ }
+ }
+ }
+#if WHITELIST_DEBUG
+ SkAutoTUnref<SkTypeface::LocalizedStrings> debugIter(
+ SkOTUtils::LocalizedStrings_NameTable::CreateForFamilyNames(*tf));
+ while (debugIter->next(&familyNameLocalized)) {
+ SkDebugf("no match fontName=\"%s\"\n", familyNameLocalized.fString.c_str());
+ }
+#endif
+ return -1;
+}
+
+static uint32_t compute_checksum(const SkTypeface* tf) {
+ std::unique_ptr<SkFontData> fontData = tf->makeFontData();
+ if (!fontData) {
+ return 0;
+ }
+ SkStreamAsset* fontStream = fontData->getStream();
+ if (!fontStream) {
+ return 0;
+ }
+ SkTDArray<char> data;
+ size_t length = fontStream->getLength();
+ if (!length) {
+ return 0;
+ }
+ data.setCount((int) length);
+ if (!fontStream->peek(data.begin(), length)) {
+ return 0;
+ }
+ return SkOpts::hash(data.begin(), length);
+}
+
+static void serialize_sub(const char* fontName, SkFontStyle style, SkWStream* wstream) {
+ SkFontDescriptor desc;
+ SkString subName(SUBNAME_PREFIX);
+ subName.append(fontName);
+ const char* familyName = subName.c_str();
+ desc.setFamilyName(familyName);
+ desc.setStyle(style);
+ desc.serialize(wstream);
+#if WHITELIST_DEBUG
+ for (int i = 0; i < whitelistCount; ++i) {
+ if (!strcmp(fontName, whitelist[i].fFontName)) {
+ if (!whitelist[i].fSerializedSub) {
+ whitelist[i].fSerializedSub = true;
+ SkDebugf("%s %s\n", __FUNCTION__, familyName);
+ }
+ break;
+ }
+ }
+#endif
+}
+
+static bool is_local(const SkTypeface* tf) {
+ bool isLocal = false;
+ SkFontDescriptor desc;
+ tf->getFontDescriptor(&desc, &isLocal);
+ return isLocal;
+}
+
+static void serialize_full(const SkTypeface* tf, SkWStream* wstream) {
+ bool isLocal = false;
+ SkFontDescriptor desc;
+ tf->getFontDescriptor(&desc, &isLocal);
+
+ // Embed font data if it's a local font.
+ if (isLocal && !desc.hasFontData()) {
+ desc.setFontData(tf->makeFontData());
+ }
+ desc.serialize(wstream);
+}
+
+static void serialize_name_only(const SkTypeface* tf, SkWStream* wstream) {
+ bool isLocal = false;
+ SkFontDescriptor desc;
+ tf->getFontDescriptor(&desc, &isLocal);
+ SkASSERT(!isLocal);
+#if WHITELIST_DEBUG
+ const char* familyName = desc.getFamilyName();
+ if (familyName) {
+ if (!strcmp(familyName, "Times New Roman")) {
+ if (!timesNewRomanSerializedNameOnly) {
+ timesNewRomanSerializedNameOnly = true;
+ SkDebugf("%s %s\n", __FUNCTION__, familyName);
+ }
+ } else {
+ for (int i = 0; i < whitelistCount; ++i) {
+ if (!strcmp(familyName, whitelist[i].fFontName)) {
+ if (!whitelist[i].fSerializedNameOnly) {
+ whitelist[i].fSerializedNameOnly = true;
+ SkDebugf("%s %s\n", __FUNCTION__, familyName);
+ }
+ break;
+ }
+ }
+ }
+ }
+#endif
+ desc.serialize(wstream);
+}
+
+void WhitelistSerializeTypeface(const SkTypeface* tf, SkWStream* wstream) {
+ if (!is_local(tf)) {
+ serialize_name_only(tf, wstream);
+ return;
+ }
+ int whitelistIndex = whitelist_name_index(tf);
+ if (whitelistIndex < 0) {
+ serialize_full(tf, wstream);
+ return;
+ }
+ const char* fontName = whitelist[whitelistIndex].fFontName;
+ if (!font_name_is_local(fontName, tf->fontStyle())) {
+#if WHITELIST_DEBUG
+ SkDebugf("name not found locally \"%s\" style=%d\n", fontName, tf->style());
+#endif
+ serialize_full(tf, wstream);
+ return;
+ }
+ uint32_t checksum = compute_checksum(tf);
+ if (whitelist[whitelistIndex].fChecksum != checksum) {
+#if WHITELIST_DEBUG
+ if (whitelist[whitelistIndex].fChecksum) {
+ SkDebugf("!!! checksum changed !!!\n");
+ }
+ SkDebugf("checksum updated\n");
+ SkDebugf(" { \"%s\", 0x%08x },\n", fontName, checksum);
+#endif
+ whitelist[whitelistIndex].fChecksum = checksum;
+ }
+ serialize_sub(fontName, tf->fontStyle(), wstream);
+}
+
+sk_sp<SkTypeface> WhitelistDeserializeTypeface(SkStream* stream) {
+ SkFontDescriptor desc;
+ if (!SkFontDescriptor::Deserialize(stream, &desc)) {
+ return nullptr;
+ }
+
+ std::unique_ptr<SkFontData> data = desc.detachFontData();
+ if (data) {
+ sk_sp<SkTypeface> typeface(SkTypeface::MakeFromFontData(std::move(data)));
+ if (typeface) {
+ return typeface;
+ }
+ }
+ const char* familyName = desc.getFamilyName();
+ if (!strncmp(SUBNAME_PREFIX, familyName, sizeof(SUBNAME_PREFIX) - 1)) {
+ familyName += sizeof(SUBNAME_PREFIX) - 1;
+ }
+ return SkTypeface::MakeFromName(familyName, desc.getStyle());
+}
+
+bool CheckChecksums() {
+ for (int i = 0; i < whitelistCount; ++i) {
+ const char* fontName = whitelist[i].fFontName;
+ sk_sp<SkTypeface> tf(SkTypeface::MakeFromName(fontName, SkFontStyle()));
+ uint32_t checksum = compute_checksum(tf.get());
+ if (whitelist[i].fChecksum != checksum) {
+ return false;
+ }
+ }
+ return true;
+}
+
+const char checksumFileName[] = "SkWhitelistChecksums.inc";
+
+const char checksumHeader[] =
+"/*" "\n"
+" * Copyright 2015 Google Inc." "\n"
+" *" "\n"
+" * Use of this source code is governed by a BSD-style license that can be" "\n"
+" * found in the LICENSE file." "\n"
+" *" "\n"
+" * %s() in %s generated %s." "\n"
+" * Run 'whitelist_typefaces --generate' to create anew." "\n"
+" */" "\n"
+"" "\n"
+"#include \"SkTDArray.h\"" "\n"
+"" "\n"
+"struct Whitelist {" "\n"
+" const char* fFontName;" "\n"
+" uint32_t fChecksum;" "\n"
+" bool fSerializedNameOnly;" "\n"
+" bool fSerializedSub;" "\n"
+"};" "\n"
+"" "\n"
+"static Whitelist whitelist[] = {" "\n";
+
+const char checksumEntry[] =
+" { \"%s\", 0x%08x, false, false }," "\n";
+
+const char checksumTrailer[] =
+"};" "\n"
+"" "\n"
+"static const int whitelistCount = (int) SK_ARRAY_COUNT(whitelist);" "\n";
+
+
+#include "SkOSFile.h"
+
+bool GenerateChecksums() {
+ FILE* file = sk_fopen(checksumFileName, kWrite_SkFILE_Flag);
+ if (!file) {
+ SkDebugf("Can't open %s for writing.\n", checksumFileName);
+ return false;
+ }
+ SkString line;
+ line.printf(checksumHeader, __FUNCTION__, __FILE__, checksumFileName);
+ sk_fwrite(line.c_str(), line.size(), file);
+ for (int i = 0; i < whitelistCount; ++i) {
+ const char* fontName = whitelist[i].fFontName;
+ sk_sp<SkTypeface> tf(SkTypeface::MakeFromName(fontName, SkFontStyle()));
+ uint32_t checksum = compute_checksum(tf.get());
+ line.printf(checksumEntry, fontName, checksum);
+ sk_fwrite(line.c_str(), line.size(), file);
+ }
+ sk_fwrite(checksumTrailer, sizeof(checksumTrailer) - 1, file);
+ sk_fclose(file);
+ return true;
+}
diff --git a/gfx/skia/skia/src/utils/mac/SkCreateCGImageRef.cpp b/gfx/skia/skia/src/utils/mac/SkCreateCGImageRef.cpp
new file mode 100644
index 000000000..d9cdb86e4
--- /dev/null
+++ b/gfx/skia/skia/src/utils/mac/SkCreateCGImageRef.cpp
@@ -0,0 +1,248 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#include "SkCGUtils.h"
+#include "SkBitmap.h"
+#include "SkColorPriv.h"
+
+static CGBitmapInfo ComputeCGAlphaInfo_RGBA(SkAlphaType at) {
+ CGBitmapInfo info = kCGBitmapByteOrder32Big;
+ switch (at) {
+ case kUnknown_SkAlphaType:
+ break;
+ case kOpaque_SkAlphaType:
+ info |= kCGImageAlphaNoneSkipLast;
+ break;
+ case kPremul_SkAlphaType:
+ info |= kCGImageAlphaPremultipliedLast;
+ break;
+ case kUnpremul_SkAlphaType:
+ info |= kCGImageAlphaLast;
+ break;
+ }
+ return info;
+}
+
+static CGBitmapInfo ComputeCGAlphaInfo_BGRA(SkAlphaType at) {
+ CGBitmapInfo info = kCGBitmapByteOrder32Little;
+ switch (at) {
+ case kUnknown_SkAlphaType:
+ break;
+ case kOpaque_SkAlphaType:
+ info |= kCGImageAlphaNoneSkipFirst;
+ break;
+ case kPremul_SkAlphaType:
+ info |= kCGImageAlphaPremultipliedFirst;
+ break;
+ case kUnpremul_SkAlphaType:
+ info |= kCGImageAlphaFirst;
+ break;
+ }
+ return info;
+}
+
+static void SkBitmap_ReleaseInfo(void* info, const void* pixelData, size_t size) {
+ SkBitmap* bitmap = reinterpret_cast<SkBitmap*>(info);
+ delete bitmap;
+}
+
+static bool getBitmapInfo(const SkBitmap& bm,
+ size_t* bitsPerComponent,
+ CGBitmapInfo* info,
+ bool* upscaleTo32) {
+ if (upscaleTo32) {
+ *upscaleTo32 = false;
+ }
+
+ switch (bm.colorType()) {
+ case kRGB_565_SkColorType:
+#if 0
+ // doesn't see quite right. Are they thinking 1555?
+ *bitsPerComponent = 5;
+ *info = kCGBitmapByteOrder16Little | kCGImageAlphaNone;
+#else
+ if (upscaleTo32) {
+ *upscaleTo32 = true;
+ }
+ // now treat like RGBA
+ *bitsPerComponent = 8;
+ *info = ComputeCGAlphaInfo_RGBA(kOpaque_SkAlphaType);
+#endif
+ break;
+ case kRGBA_8888_SkColorType:
+ *bitsPerComponent = 8;
+ *info = ComputeCGAlphaInfo_RGBA(bm.alphaType());
+ break;
+ case kBGRA_8888_SkColorType:
+ *bitsPerComponent = 8;
+ *info = ComputeCGAlphaInfo_BGRA(bm.alphaType());
+ break;
+ case kARGB_4444_SkColorType:
+ *bitsPerComponent = 4;
+ *info = kCGBitmapByteOrder16Little;
+ if (bm.isOpaque()) {
+ *info |= kCGImageAlphaNoneSkipLast;
+ } else {
+ *info |= kCGImageAlphaPremultipliedLast;
+ }
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static SkBitmap* prepareForImageRef(const SkBitmap& bm,
+ size_t* bitsPerComponent,
+ CGBitmapInfo* info) {
+ bool upscaleTo32;
+ if (!getBitmapInfo(bm, bitsPerComponent, info, &upscaleTo32)) {
+ return nullptr;
+ }
+
+ SkBitmap* copy;
+ if (upscaleTo32) {
+ copy = new SkBitmap;
+ // here we make a ceep copy of the pixels, since CG won't take our
+ // 565 directly
+ bm.copyTo(copy, kN32_SkColorType);
+ } else {
+ copy = new SkBitmap(bm);
+ }
+ return copy;
+}
+
+CGImageRef SkCreateCGImageRefWithColorspace(const SkBitmap& bm,
+ CGColorSpaceRef colorSpace) {
+ size_t bitsPerComponent SK_INIT_TO_AVOID_WARNING;
+ CGBitmapInfo info SK_INIT_TO_AVOID_WARNING;
+
+ SkBitmap* bitmap = prepareForImageRef(bm, &bitsPerComponent, &info);
+ if (nullptr == bitmap) {
+ return nullptr;
+ }
+
+ const int w = bitmap->width();
+ const int h = bitmap->height();
+ const size_t s = bitmap->getSize();
+
+ // our provider "owns" the bitmap*, and will take care of deleting it
+ // we initially lock it, so we can access the pixels. The bitmap will be deleted in the release
+ // proc, which will in turn unlock the pixels
+ bitmap->lockPixels();
+ CGDataProviderRef dataRef = CGDataProviderCreateWithData(bitmap, bitmap->getPixels(), s,
+ SkBitmap_ReleaseInfo);
+
+ bool releaseColorSpace = false;
+ if (nullptr == colorSpace) {
+ colorSpace = CGColorSpaceCreateDeviceRGB();
+ releaseColorSpace = true;
+ }
+
+ CGImageRef ref = CGImageCreate(w, h, bitsPerComponent,
+ bitmap->bytesPerPixel() * 8,
+ bitmap->rowBytes(), colorSpace, info, dataRef,
+ nullptr, false, kCGRenderingIntentDefault);
+
+ if (releaseColorSpace) {
+ CGColorSpaceRelease(colorSpace);
+ }
+ CGDataProviderRelease(dataRef);
+ return ref;
+}
+
+void SkCGDrawBitmap(CGContextRef cg, const SkBitmap& bm, float x, float y) {
+ CGImageRef img = SkCreateCGImageRef(bm);
+
+ if (img) {
+ CGRect r = CGRectMake(0, 0, bm.width(), bm.height());
+
+ CGContextSaveGState(cg);
+ CGContextTranslateCTM(cg, x, r.size.height + y);
+ CGContextScaleCTM(cg, 1, -1);
+
+ CGContextDrawImage(cg, r, img);
+
+ CGContextRestoreGState(cg);
+
+ CGImageRelease(img);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SK_API bool SkCopyPixelsFromCGImage(const SkImageInfo& info, size_t rowBytes, void* pixels,
+ CGImageRef image) {
+ CGBitmapInfo cg_bitmap_info = 0;
+ size_t bitsPerComponent = 0;
+ switch (info.colorType()) {
+ case kRGBA_8888_SkColorType:
+ bitsPerComponent = 8;
+ cg_bitmap_info = ComputeCGAlphaInfo_RGBA(info.alphaType());
+ break;
+ case kBGRA_8888_SkColorType:
+ bitsPerComponent = 8;
+ cg_bitmap_info = ComputeCGAlphaInfo_BGRA(info.alphaType());
+ break;
+ default:
+ return false; // no other colortypes are supported (for now)
+ }
+
+ CGColorSpaceRef cs = CGColorSpaceCreateDeviceRGB();
+ CGContextRef cg = CGBitmapContextCreate(pixels, info.width(), info.height(), bitsPerComponent,
+ rowBytes, cs, cg_bitmap_info);
+ CFRelease(cs);
+ if (nullptr == cg) {
+ return false;
+ }
+
+ // use this blend mode, to avoid having to erase the pixels first, and to avoid CG performing
+ // any blending (which could introduce errors and be slower).
+ CGContextSetBlendMode(cg, kCGBlendModeCopy);
+
+ CGContextDrawImage(cg, CGRectMake(0, 0, info.width(), info.height()), image);
+ CGContextRelease(cg);
+ return true;
+}
+
+bool SkCreateBitmapFromCGImage(SkBitmap* dst, CGImageRef image, SkISize* scaleToFit) {
+ const int width = scaleToFit ? scaleToFit->width() : SkToInt(CGImageGetWidth(image));
+ const int height = scaleToFit ? scaleToFit->height() : SkToInt(CGImageGetHeight(image));
+ SkImageInfo info = SkImageInfo::MakeN32Premul(width, height);
+
+ SkBitmap tmp;
+ if (!tmp.tryAllocPixels(info)) {
+ return false;
+ }
+
+ if (!SkCopyPixelsFromCGImage(tmp.info(), tmp.rowBytes(), tmp.getPixels(), image)) {
+ return false;
+ }
+
+ CGImageAlphaInfo cgInfo = CGImageGetAlphaInfo(image);
+ switch (cgInfo) {
+ case kCGImageAlphaNone:
+ case kCGImageAlphaNoneSkipLast:
+ case kCGImageAlphaNoneSkipFirst:
+ SkASSERT(SkBitmap::ComputeIsOpaque(tmp));
+ tmp.setAlphaType(kOpaque_SkAlphaType);
+ break;
+ default:
+ // we don't know if we're opaque or not, so compute it.
+ if (SkBitmap::ComputeIsOpaque(tmp)) {
+ tmp.setAlphaType(kOpaque_SkAlphaType);
+ }
+ }
+
+ *dst = tmp;
+ return true;
+}
+
+#endif//defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
diff --git a/gfx/skia/skia/src/utils/mac/SkStream_mac.cpp b/gfx/skia/skia/src/utils/mac/SkStream_mac.cpp
new file mode 100644
index 000000000..e878c9724
--- /dev/null
+++ b/gfx/skia/skia/src/utils/mac/SkStream_mac.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#include "SkCGUtils.h"
+#include "SkStream.h"
+
+// These are used by CGDataProviderCreateWithData
+
+static void unref_proc(void* info, const void* addr, size_t size) {
+ SkASSERT(info);
+ ((SkRefCnt*)info)->unref();
+}
+
+static void delete_stream_proc(void* info, const void* addr, size_t size) {
+ SkASSERT(info);
+ SkStream* stream = (SkStream*)info;
+ SkASSERT(stream->getMemoryBase() == addr);
+ SkASSERT(stream->getLength() == size);
+ delete stream;
+}
+
+// These are used by CGDataProviderSequentialCallbacks
+
+static size_t get_bytes_proc(void* info, void* buffer, size_t bytes) {
+ SkASSERT(info);
+ return ((SkStream*)info)->read(buffer, bytes);
+}
+
+static off_t skip_forward_proc(void* info, off_t bytes) {
+ return ((SkStream*)info)->skip((size_t) bytes);
+}
+
+static void rewind_proc(void* info) {
+ SkASSERT(info);
+ ((SkStream*)info)->rewind();
+}
+
+// Used when info is an SkStream.
+static void release_info_proc(void* info) {
+ SkASSERT(info);
+ delete (SkStream*)info;
+}
+
+CGDataProviderRef SkCreateDataProviderFromStream(std::unique_ptr<SkStreamRewindable> stream) {
+ // TODO: Replace with SkStream::getData() when that is added. Then we only
+ // have one version of CGDataProviderCreateWithData (i.e. same release proc)
+ const void* addr = stream->getMemoryBase();
+ if (addr) {
+ // special-case when the stream is just a block of ram
+ size_t size = stream->getLength();
+ return CGDataProviderCreateWithData(stream.release(), addr, size, delete_stream_proc);
+ }
+
+ CGDataProviderSequentialCallbacks rec;
+ sk_bzero(&rec, sizeof(rec));
+ rec.version = 0;
+ rec.getBytes = get_bytes_proc;
+ rec.skipForward = skip_forward_proc;
+ rec.rewind = rewind_proc;
+ rec.releaseInfo = release_info_proc;
+ return CGDataProviderCreateSequential(stream.release(), &rec);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkData.h"
+
+CGDataProviderRef SkCreateDataProviderFromData(sk_sp<SkData> data) {
+ const void* addr = data->data();
+ size_t size = data->size();
+ return CGDataProviderCreateWithData(data.release(), addr, size, unref_proc);
+}
+
+#endif//defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
diff --git a/gfx/skia/skia/src/utils/win/SkAutoCoInitialize.cpp b/gfx/skia/skia/src/utils/win/SkAutoCoInitialize.cpp
new file mode 100644
index 000000000..f6da67dca
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkAutoCoInitialize.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32)
+
+#include "SkAutoCoInitialize.h"
+
+#include <objbase.h>
+#include <winerror.h>
+
+SkAutoCoInitialize::SkAutoCoInitialize() :
+ fHR(
+ CoInitializeEx(nullptr, COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE)
+ )
+{ }
+
+SkAutoCoInitialize::~SkAutoCoInitialize() {
+ if (SUCCEEDED(this->fHR)) {
+ CoUninitialize();
+ }
+}
+
+bool SkAutoCoInitialize::succeeded() {
+ return SUCCEEDED(this->fHR) || RPC_E_CHANGED_MODE == this->fHR;
+}
+
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/utils/win/SkAutoCoInitialize.h b/gfx/skia/skia/src/utils/win/SkAutoCoInitialize.h
new file mode 100644
index 000000000..f11cf856c
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkAutoCoInitialize.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAutoCo_DEFINED
+#define SkAutoCo_DEFINED
+
+#include "SkTypes.h"
+
+#ifdef SK_BUILD_FOR_WIN
+
+#include "SkLeanWindows.h"
+
+/**
+ * An instance of this class initializes COM on creation
+ * and closes the COM library on destruction.
+ */
+class SkAutoCoInitialize : SkNoncopyable {
+private:
+ HRESULT fHR;
+public:
+ SkAutoCoInitialize();
+ ~SkAutoCoInitialize();
+ bool succeeded();
+};
+
+#endif // SK_BUILD_FOR_WIN
+#endif // SkAutoCo_DEFINED
diff --git a/gfx/skia/skia/src/utils/win/SkDWrite.cpp b/gfx/skia/skia/src/utils/win/SkDWrite.cpp
new file mode 100644
index 000000000..17613f6fa
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkDWrite.cpp
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32)
+
+#include "SkDWrite.h"
+#include "SkHRESULT.h"
+#include "SkOnce.h"
+#include "SkString.h"
+
+#include <dwrite.h>
+
+static IDWriteFactory* gDWriteFactory = nullptr;
+
+static void release_dwrite_factory() {
+ if (gDWriteFactory) {
+ gDWriteFactory->Release();
+ }
+}
+
+static void create_dwrite_factory(IDWriteFactory** factory) {
+ typedef decltype(DWriteCreateFactory)* DWriteCreateFactoryProc;
+ DWriteCreateFactoryProc dWriteCreateFactoryProc = reinterpret_cast<DWriteCreateFactoryProc>(
+ GetProcAddress(LoadLibraryW(L"dwrite.dll"), "DWriteCreateFactory"));
+
+ if (!dWriteCreateFactoryProc) {
+ HRESULT hr = HRESULT_FROM_WIN32(GetLastError());
+ if (!IS_ERROR(hr)) {
+ hr = ERROR_PROC_NOT_FOUND;
+ }
+ HRVM(hr, "Could not get DWriteCreateFactory proc.");
+ }
+
+ HRVM(dWriteCreateFactoryProc(DWRITE_FACTORY_TYPE_SHARED,
+ __uuidof(IDWriteFactory),
+ reinterpret_cast<IUnknown**>(factory)),
+ "Could not create DirectWrite factory.");
+ atexit(release_dwrite_factory);
+}
+
+
+IDWriteFactory* sk_get_dwrite_factory() {
+ static SkOnce once;
+ once(create_dwrite_factory, &gDWriteFactory);
+ return gDWriteFactory;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// String conversion
+
+/** Converts a utf8 string to a WCHAR string. */
+HRESULT sk_cstring_to_wchar(const char* skname, SkSMallocWCHAR* name) {
+ int wlen = MultiByteToWideChar(CP_UTF8, 0, skname, -1, nullptr, 0);
+ if (0 == wlen) {
+ HRM(HRESULT_FROM_WIN32(GetLastError()),
+ "Could not get length for wchar to utf-8 conversion.");
+ }
+ name->reset(wlen);
+ wlen = MultiByteToWideChar(CP_UTF8, 0, skname, -1, name->get(), wlen);
+ if (0 == wlen) {
+ HRM(HRESULT_FROM_WIN32(GetLastError()), "Could not convert wchar to utf-8.");
+ }
+ return S_OK;
+}
+
+/** Converts a WCHAR string to a utf8 string. */
+HRESULT sk_wchar_to_skstring(WCHAR* name, int nameLen, SkString* skname) {
+ int len = WideCharToMultiByte(CP_UTF8, 0, name, nameLen, nullptr, 0, nullptr, nullptr);
+ if (0 == len) {
+ if (nameLen <= 0) {
+ skname->reset();
+ return S_OK;
+ }
+ HRM(HRESULT_FROM_WIN32(GetLastError()),
+ "Could not get length for utf-8 to wchar conversion.");
+ }
+ skname->resize(len);
+
+ len = WideCharToMultiByte(CP_UTF8, 0, name, nameLen, skname->writable_str(), len, nullptr, nullptr);
+ if (0 == len) {
+ HRM(HRESULT_FROM_WIN32(GetLastError()), "Could not convert utf-8 to wchar.");
+ }
+ return S_OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Locale
+
+void sk_get_locale_string(IDWriteLocalizedStrings* names, const WCHAR* preferedLocale,
+ SkString* skname) {
+ UINT32 nameIndex = 0;
+ if (preferedLocale) {
+ // Ignore any errors and continue with index 0 if there is a problem.
+ BOOL nameExists;
+ names->FindLocaleName(preferedLocale, &nameIndex, &nameExists);
+ if (!nameExists) {
+ nameIndex = 0;
+ }
+ }
+
+ UINT32 nameLen;
+ HRVM(names->GetStringLength(nameIndex, &nameLen), "Could not get name length.");
+
+ SkSMallocWCHAR name(nameLen+1);
+ HRVM(names->GetString(nameIndex, name.get(), nameLen+1), "Could not get string.");
+
+ HRV(sk_wchar_to_skstring(name.get(), nameLen, skname));
+}
+
+HRESULT SkGetGetUserDefaultLocaleNameProc(SkGetUserDefaultLocaleNameProc* proc) {
+ *proc = reinterpret_cast<SkGetUserDefaultLocaleNameProc>(
+ GetProcAddress(LoadLibraryW(L"Kernel32.dll"), "GetUserDefaultLocaleName")
+ );
+ if (!*proc) {
+ HRESULT hr = HRESULT_FROM_WIN32(GetLastError());
+ if (!IS_ERROR(hr)) {
+ hr = ERROR_PROC_NOT_FOUND;
+ }
+ return hr;
+ }
+ return S_OK;
+}
+
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/utils/win/SkDWrite.h b/gfx/skia/skia/src/utils/win/SkDWrite.h
new file mode 100644
index 000000000..ddcbc6c42
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkDWrite.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDWrite_DEFINED
+#define SkDWrite_DEFINED
+
+#include "SkFontStyle.h"
+#include "SkTemplates.h"
+
+#include <dwrite.h>
+#include <winsdkver.h>
+
+class SkString;
+
+////////////////////////////////////////////////////////////////////////////////
+// Factory
+
+#ifndef SK_HAS_DWRITE_1_H
+#define SK_HAS_DWRITE_1_H (WINVER_MAXVER >= 0x0602)
+#endif
+
+#ifndef SK_HAS_DWRITE_2_H
+#define SK_HAS_DWRITE_2_H (WINVER_MAXVER >= 0x0603)
+#endif
+
+IDWriteFactory* sk_get_dwrite_factory();
+
+////////////////////////////////////////////////////////////////////////////////
+// String conversion
+
+/** Prefer to use this type to prevent template proliferation. */
+typedef SkAutoSTMalloc<16, WCHAR> SkSMallocWCHAR;
+
+/** Converts a utf8 string to a WCHAR string. */
+HRESULT sk_cstring_to_wchar(const char* skname, SkSMallocWCHAR* name);
+
+/** Converts a WCHAR string to a utf8 string.
+ * @param nameLen the number of WCHARs in the name.
+ */
+HRESULT sk_wchar_to_skstring(WCHAR* name, int nameLen, SkString* skname);
+
+////////////////////////////////////////////////////////////////////////////////
+// Locale
+
+void sk_get_locale_string(IDWriteLocalizedStrings* names, const WCHAR* preferedLocale,
+ SkString* skname);
+
+typedef int (WINAPI *SkGetUserDefaultLocaleNameProc)(LPWSTR, int);
+HRESULT SkGetGetUserDefaultLocaleNameProc(SkGetUserDefaultLocaleNameProc* proc);
+
+////////////////////////////////////////////////////////////////////////////////
+// Table handling
+
+class AutoDWriteTable {
+public:
+ AutoDWriteTable(IDWriteFontFace* fontFace, UINT32 beTag) : fExists(FALSE), fFontFace(fontFace) {
+ // Any errors are ignored, user must check fExists anyway.
+ fontFace->TryGetFontTable(beTag,
+ reinterpret_cast<const void **>(&fData), &fSize, &fLock, &fExists);
+ }
+ ~AutoDWriteTable() {
+ if (fExists) {
+ fFontFace->ReleaseFontTable(fLock);
+ }
+ }
+
+ const uint8_t* fData;
+ UINT32 fSize;
+ BOOL fExists;
+private:
+ // Borrowed reference, the user must ensure the fontFace stays alive.
+ IDWriteFontFace* fFontFace;
+ void* fLock;
+};
+template<typename T> class AutoTDWriteTable : public AutoDWriteTable {
+public:
+ static const UINT32 tag = DWRITE_MAKE_OPENTYPE_TAG(T::TAG0, T::TAG1, T::TAG2, T::TAG3);
+ AutoTDWriteTable(IDWriteFontFace* fontFace) : AutoDWriteTable(fontFace, tag) { }
+
+ const T* get() const { return reinterpret_cast<const T*>(fData); }
+ const T* operator->() const { return reinterpret_cast<const T*>(fData); }
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// Style conversion
+
+struct DWriteStyle {
+ explicit DWriteStyle(const SkFontStyle& pattern) {
+ fWeight = (DWRITE_FONT_WEIGHT)pattern.weight();
+ fWidth = (DWRITE_FONT_STRETCH)pattern.width();
+ switch (pattern.slant()) {
+ case SkFontStyle::kUpright_Slant: fSlant = DWRITE_FONT_STYLE_NORMAL ; break;
+ case SkFontStyle::kItalic_Slant: fSlant = DWRITE_FONT_STYLE_ITALIC ; break;
+ case SkFontStyle::kOblique_Slant: fSlant = DWRITE_FONT_STYLE_OBLIQUE; break;
+ default: SkASSERT(false); break;
+ }
+ }
+ DWRITE_FONT_WEIGHT fWeight;
+ DWRITE_FONT_STRETCH fWidth;
+ DWRITE_FONT_STYLE fSlant;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.cpp b/gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.cpp
new file mode 100644
index 000000000..6c73441e7
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.cpp
@@ -0,0 +1,235 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32)
+
+#include "SkTypes.h"
+#include "SkDWriteFontFileStream.h"
+#include "SkHRESULT.h"
+#include "SkTemplates.h"
+#include "SkTFitsIn.h"
+#include "SkTScopedComPtr.h"
+
+#include <dwrite.h>
+
+///////////////////////////////////////////////////////////////////////////////
+// SkIDWriteFontFileStream
+
+SkDWriteFontFileStream::SkDWriteFontFileStream(IDWriteFontFileStream* fontFileStream)
+ : fFontFileStream(SkRefComPtr(fontFileStream))
+ , fPos(0)
+ , fLockedMemory(nullptr)
+ , fFragmentLock(nullptr) {
+}
+
+SkDWriteFontFileStream::~SkDWriteFontFileStream() {
+ if (fFragmentLock) {
+ fFontFileStream->ReleaseFileFragment(fFragmentLock);
+ }
+}
+
+size_t SkDWriteFontFileStream::read(void* buffer, size_t size) {
+ HRESULT hr = S_OK;
+
+ if (nullptr == buffer) {
+ size_t fileSize = this->getLength();
+
+ if (fPos + size > fileSize) {
+ size_t skipped = fileSize - fPos;
+ fPos = fileSize;
+ return skipped;
+ } else {
+ fPos += size;
+ return size;
+ }
+ }
+
+ const void* start;
+ void* fragmentLock;
+ hr = fFontFileStream->ReadFileFragment(&start, fPos, size, &fragmentLock);
+ if (SUCCEEDED(hr)) {
+ memcpy(buffer, start, size);
+ fFontFileStream->ReleaseFileFragment(fragmentLock);
+ fPos += size;
+ return size;
+ }
+
+ //The read may have failed because we asked for too much data.
+ size_t fileSize = this->getLength();
+ if (fPos + size <= fileSize) {
+ //This means we were within bounds, but failed for some other reason.
+ return 0;
+ }
+
+ size_t read = fileSize - fPos;
+ hr = fFontFileStream->ReadFileFragment(&start, fPos, read, &fragmentLock);
+ if (SUCCEEDED(hr)) {
+ memcpy(buffer, start, read);
+ fFontFileStream->ReleaseFileFragment(fragmentLock);
+ fPos = fileSize;
+ return read;
+ }
+
+ return 0;
+}
+
+bool SkDWriteFontFileStream::isAtEnd() const {
+ return fPos == this->getLength();
+}
+
+bool SkDWriteFontFileStream::rewind() {
+ fPos = 0;
+ return true;
+}
+
+SkDWriteFontFileStream* SkDWriteFontFileStream::duplicate() const {
+ return new SkDWriteFontFileStream(fFontFileStream.get());
+}
+
+size_t SkDWriteFontFileStream::getPosition() const {
+ return fPos;
+}
+
+bool SkDWriteFontFileStream::seek(size_t position) {
+ size_t length = this->getLength();
+ fPos = (position > length) ? length : position;
+ return true;
+}
+
+bool SkDWriteFontFileStream::move(long offset) {
+ return seek(fPos + offset);
+}
+
+SkDWriteFontFileStream* SkDWriteFontFileStream::fork() const {
+ SkAutoTDelete<SkDWriteFontFileStream> that(this->duplicate());
+ that->seek(fPos);
+ return that.release();
+}
+
+size_t SkDWriteFontFileStream::getLength() const {
+ HRESULT hr = S_OK;
+ UINT64 realFileSize = 0;
+ hr = fFontFileStream->GetFileSize(&realFileSize);
+ if (!SkTFitsIn<size_t>(realFileSize)) {
+ return 0;
+ }
+ return static_cast<size_t>(realFileSize);
+}
+
+const void* SkDWriteFontFileStream::getMemoryBase() {
+ if (fLockedMemory) {
+ return fLockedMemory;
+ }
+
+ UINT64 fileSize;
+ HRNM(fFontFileStream->GetFileSize(&fileSize), "Could not get file size");
+ HRNM(fFontFileStream->ReadFileFragment(&fLockedMemory, 0, fileSize, &fFragmentLock),
+ "Could not lock file fragment.");
+ return fLockedMemory;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// SkIDWriteFontFileStreamWrapper
+
+HRESULT SkDWriteFontFileStreamWrapper::Create(SkStreamAsset* stream,
+ SkDWriteFontFileStreamWrapper** streamFontFileStream)
+{
+ *streamFontFileStream = new SkDWriteFontFileStreamWrapper(stream);
+ if (nullptr == *streamFontFileStream) {
+ return E_OUTOFMEMORY;
+ }
+ return S_OK;
+}
+
+SkDWriteFontFileStreamWrapper::SkDWriteFontFileStreamWrapper(SkStreamAsset* stream)
+ : fRefCount(1), fStream(stream) {
+}
+
+HRESULT STDMETHODCALLTYPE SkDWriteFontFileStreamWrapper::QueryInterface(REFIID iid, void** ppvObject) {
+ if (iid == IID_IUnknown || iid == __uuidof(IDWriteFontFileStream)) {
+ *ppvObject = this;
+ AddRef();
+ return S_OK;
+ } else {
+ *ppvObject = nullptr;
+ return E_NOINTERFACE;
+ }
+}
+
+ULONG STDMETHODCALLTYPE SkDWriteFontFileStreamWrapper::AddRef() {
+ return InterlockedIncrement(&fRefCount);
+}
+
+ULONG STDMETHODCALLTYPE SkDWriteFontFileStreamWrapper::Release() {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+}
+
+HRESULT STDMETHODCALLTYPE SkDWriteFontFileStreamWrapper::ReadFileFragment(
+ void const** fragmentStart,
+ UINT64 fileOffset,
+ UINT64 fragmentSize,
+ void** fragmentContext)
+{
+ // The loader is responsible for doing a bounds check.
+ UINT64 fileSize;
+ this->GetFileSize(&fileSize);
+ if (fileOffset > fileSize || fragmentSize > fileSize - fileOffset) {
+ *fragmentStart = nullptr;
+ *fragmentContext = nullptr;
+ return E_FAIL;
+ }
+
+ if (!SkTFitsIn<size_t>(fileOffset + fragmentSize)) {
+ return E_FAIL;
+ }
+
+ const void* data = fStream->getMemoryBase();
+ if (data) {
+ *fragmentStart = static_cast<BYTE const*>(data) + static_cast<size_t>(fileOffset);
+ *fragmentContext = nullptr;
+
+ } else {
+ // May be called from multiple threads.
+ SkAutoMutexAcquire ama(fStreamMutex);
+
+ *fragmentStart = nullptr;
+ *fragmentContext = nullptr;
+
+ if (!fStream->seek(static_cast<size_t>(fileOffset))) {
+ return E_FAIL;
+ }
+ SkAutoTMalloc<uint8_t> streamData(static_cast<size_t>(fragmentSize));
+ if (fStream->read(streamData.get(), static_cast<size_t>(fragmentSize)) != fragmentSize) {
+ return E_FAIL;
+ }
+
+ *fragmentStart = streamData.get();
+ *fragmentContext = streamData.release();
+ }
+ return S_OK;
+}
+
+void STDMETHODCALLTYPE SkDWriteFontFileStreamWrapper::ReleaseFileFragment(void* fragmentContext) {
+ sk_free(fragmentContext);
+}
+
+HRESULT STDMETHODCALLTYPE SkDWriteFontFileStreamWrapper::GetFileSize(UINT64* fileSize) {
+ *fileSize = fStream->getLength();
+ return S_OK;
+}
+
+HRESULT STDMETHODCALLTYPE SkDWriteFontFileStreamWrapper::GetLastWriteTime(UINT64* lastWriteTime) {
+ // The concept of last write time does not apply to this loader.
+ *lastWriteTime = 0;
+ return E_NOTIMPL;
+}
+
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.h b/gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.h
new file mode 100644
index 000000000..e78b621c0
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDWriteFontFileStream_DEFINED
+#define SkDWriteFontFileStream_DEFINED
+
+#include "SkTypes.h"
+
+#include "SkMutex.h"
+#include "SkStream.h"
+#include "SkTScopedComPtr.h"
+
+#include <dwrite.h>
+
+/**
+ * An SkStream backed by an IDWriteFontFileStream.
+ * This allows Skia code to read an IDWriteFontFileStream.
+ */
+class SkDWriteFontFileStream : public SkStreamMemory {
+public:
+ explicit SkDWriteFontFileStream(IDWriteFontFileStream* fontFileStream);
+ virtual ~SkDWriteFontFileStream();
+
+ size_t read(void* buffer, size_t size) override;
+ bool isAtEnd() const override;
+ bool rewind() override;
+ SkDWriteFontFileStream* duplicate() const override;
+ size_t getPosition() const override;
+ bool seek(size_t position) override;
+ bool move(long offset) override;
+ SkDWriteFontFileStream* fork() const override;
+ size_t getLength() const override;
+ const void* getMemoryBase() override;
+
+private:
+ SkTScopedComPtr<IDWriteFontFileStream> fFontFileStream;
+ size_t fPos;
+ const void* fLockedMemory;
+ void* fFragmentLock;
+};
+
+/**
+ * An IDWriteFontFileStream backed by an SkStream.
+ * This allows DirectWrite to read an SkStream.
+ */
+class SkDWriteFontFileStreamWrapper : public IDWriteFontFileStream {
+public:
+ // IUnknown methods
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, void** ppvObject);
+ virtual ULONG STDMETHODCALLTYPE AddRef();
+ virtual ULONG STDMETHODCALLTYPE Release();
+
+ // IDWriteFontFileStream methods
+ virtual HRESULT STDMETHODCALLTYPE ReadFileFragment(
+ void const** fragmentStart,
+ UINT64 fileOffset,
+ UINT64 fragmentSize,
+ void** fragmentContext);
+
+ virtual void STDMETHODCALLTYPE ReleaseFileFragment(void* fragmentContext);
+ virtual HRESULT STDMETHODCALLTYPE GetFileSize(UINT64* fileSize);
+ virtual HRESULT STDMETHODCALLTYPE GetLastWriteTime(UINT64* lastWriteTime);
+
+ static HRESULT Create(SkStreamAsset* stream,
+ SkDWriteFontFileStreamWrapper** streamFontFileStream);
+
+private:
+ explicit SkDWriteFontFileStreamWrapper(SkStreamAsset* stream);
+ virtual ~SkDWriteFontFileStreamWrapper() { }
+
+ ULONG fRefCount;
+ SkAutoTDelete<SkStreamAsset> fStream;
+ SkMutex fStreamMutex;
+};
+#endif
diff --git a/gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.cpp b/gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.cpp
new file mode 100644
index 000000000..9f6a97caf
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.cpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32)
+
+#include "SkDWriteGeometrySink.h"
+#include "SkFloatUtils.h"
+#include "SkPath.h"
+
+#include <dwrite.h>
+#include <d2d1.h>
+
+SkDWriteGeometrySink::SkDWriteGeometrySink(SkPath* path) : fRefCount(1), fPath(path) { }
+
+SkDWriteGeometrySink::~SkDWriteGeometrySink() { }
+
+HRESULT STDMETHODCALLTYPE SkDWriteGeometrySink::QueryInterface(REFIID iid, void **object) {
+ if (nullptr == object) {
+ return E_INVALIDARG;
+ }
+ if (iid == __uuidof(IUnknown) || iid == __uuidof(IDWriteGeometrySink)) {
+ *object = static_cast<IDWriteGeometrySink*>(this);
+ this->AddRef();
+ return S_OK;
+ } else {
+ *object = nullptr;
+ return E_NOINTERFACE;
+ }
+}
+
+ULONG STDMETHODCALLTYPE SkDWriteGeometrySink::AddRef(void) {
+ return static_cast<ULONG>(InterlockedIncrement(&fRefCount));
+}
+
+ULONG STDMETHODCALLTYPE SkDWriteGeometrySink::Release(void) {
+ ULONG res = static_cast<ULONG>(InterlockedDecrement(&fRefCount));
+ if (0 == res) {
+ delete this;
+ }
+ return res;
+}
+
+void STDMETHODCALLTYPE SkDWriteGeometrySink::SetFillMode(D2D1_FILL_MODE fillMode) {
+ switch (fillMode) {
+ case D2D1_FILL_MODE_ALTERNATE:
+ fPath->setFillType(SkPath::kEvenOdd_FillType);
+ break;
+ case D2D1_FILL_MODE_WINDING:
+ fPath->setFillType(SkPath::kWinding_FillType);
+ break;
+ default:
+ SkDEBUGFAIL("Unknown D2D1_FILL_MODE.");
+ break;
+ }
+}
+
+void STDMETHODCALLTYPE SkDWriteGeometrySink::SetSegmentFlags(D2D1_PATH_SEGMENT vertexFlags) {
+ if (vertexFlags == D2D1_PATH_SEGMENT_NONE || vertexFlags == D2D1_PATH_SEGMENT_FORCE_ROUND_LINE_JOIN) {
+ SkDEBUGFAIL("Invalid D2D1_PATH_SEGMENT value.");
+ }
+}
+
+void STDMETHODCALLTYPE SkDWriteGeometrySink::BeginFigure(D2D1_POINT_2F startPoint, D2D1_FIGURE_BEGIN figureBegin) {
+ fPath->moveTo(startPoint.x, startPoint.y);
+ if (figureBegin == D2D1_FIGURE_BEGIN_HOLLOW) {
+ SkDEBUGFAIL("Invalid D2D1_FIGURE_BEGIN value.");
+ }
+}
+
+void STDMETHODCALLTYPE SkDWriteGeometrySink::AddLines(const D2D1_POINT_2F *points, UINT pointsCount) {
+ for (const D2D1_POINT_2F *end = &points[pointsCount]; points < end; ++points) {
+ fPath->lineTo(points->x, points->y);
+ }
+}
+
+static bool approximately_equal(float a, float b) {
+ const SkFloatingPoint<float, 10> lhs(a), rhs(b);
+ return lhs.AlmostEquals(rhs);
+}
+
+typedef struct {
+ float x;
+ float y;
+} Cubic[4], Quadratic[3];
+
+static bool check_quadratic(const Cubic& cubic, Quadratic& reduction) {
+ float dx10 = cubic[1].x - cubic[0].x;
+ float dx23 = cubic[2].x - cubic[3].x;
+ float midX = cubic[0].x + dx10 * 3 / 2;
+ //NOTE: !approximately_equal(midX - cubic[3].x, dx23 * 3 / 2)
+ //does not work as subnormals get in between the left side and 0.
+ if (!approximately_equal(midX, (dx23 * 3 / 2) + cubic[3].x)) {
+ return false;
+ }
+ float dy10 = cubic[1].y - cubic[0].y;
+ float dy23 = cubic[2].y - cubic[3].y;
+ float midY = cubic[0].y + dy10 * 3 / 2;
+ if (!approximately_equal(midY, (dy23 * 3 / 2) + cubic[3].y)) {
+ return false;
+ }
+ reduction[0] = cubic[0];
+ reduction[1].x = midX;
+ reduction[1].y = midY;
+ reduction[2] = cubic[3];
+ return true;
+}
+
+void STDMETHODCALLTYPE SkDWriteGeometrySink::AddBeziers(const D2D1_BEZIER_SEGMENT *beziers, UINT beziersCount) {
+ SkPoint lastPt;
+ fPath->getLastPt(&lastPt);
+ D2D1_POINT_2F prevPt = { SkScalarToFloat(lastPt.fX), SkScalarToFloat(lastPt.fY) };
+
+ for (const D2D1_BEZIER_SEGMENT *end = &beziers[beziersCount]; beziers < end; ++beziers) {
+ Cubic cubic = { { prevPt.x, prevPt.y },
+ { beziers->point1.x, beziers->point1.y },
+ { beziers->point2.x, beziers->point2.y },
+ { beziers->point3.x, beziers->point3.y }, };
+ Quadratic quadratic;
+ if (check_quadratic(cubic, quadratic)) {
+ fPath->quadTo(quadratic[1].x, quadratic[1].y,
+ quadratic[2].x, quadratic[2].y);
+ } else {
+ fPath->cubicTo(beziers->point1.x, beziers->point1.y,
+ beziers->point2.x, beziers->point2.y,
+ beziers->point3.x, beziers->point3.y);
+ }
+ prevPt = beziers->point3;
+ }
+}
+
+void STDMETHODCALLTYPE SkDWriteGeometrySink::EndFigure(D2D1_FIGURE_END figureEnd) {
+ fPath->close();
+}
+
+HRESULT SkDWriteGeometrySink::Close() {
+ return S_OK;
+}
+
+HRESULT SkDWriteGeometrySink::Create(SkPath* path, IDWriteGeometrySink** geometryToPath) {
+ *geometryToPath = new SkDWriteGeometrySink(path);
+ return S_OK;
+}
+
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.h b/gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.h
new file mode 100644
index 000000000..417c7f0d0
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDWriteToPath_DEFINED
+#define SkDWriteToPath_DEFINED
+
+#include "SkTypes.h"
+
+class SkPath;
+
+#include <dwrite.h>
+#include <d2d1.h>
+
+class SkDWriteGeometrySink : public IDWriteGeometrySink {
+private:
+ LONG fRefCount;
+ SkPath* fPath;
+
+ SkDWriteGeometrySink(const SkDWriteGeometrySink&);
+ SkDWriteGeometrySink& operator=(const SkDWriteGeometrySink&);
+
+protected:
+ explicit SkDWriteGeometrySink(SkPath* path);
+ virtual ~SkDWriteGeometrySink();
+
+public:
+ HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, void **object) override;
+ ULONG STDMETHODCALLTYPE AddRef(void) override;
+ ULONG STDMETHODCALLTYPE Release(void) override;
+
+ void STDMETHODCALLTYPE SetFillMode(D2D1_FILL_MODE fillMode) override;
+ void STDMETHODCALLTYPE SetSegmentFlags(D2D1_PATH_SEGMENT vertexFlags) override;
+ void STDMETHODCALLTYPE BeginFigure(D2D1_POINT_2F startPoint, D2D1_FIGURE_BEGIN figureBegin) override;
+ void STDMETHODCALLTYPE AddLines(const D2D1_POINT_2F *points, UINT pointsCount) override;
+ void STDMETHODCALLTYPE AddBeziers(const D2D1_BEZIER_SEGMENT *beziers, UINT beziersCount) override;
+ void STDMETHODCALLTYPE EndFigure(D2D1_FIGURE_END figureEnd) override;
+ HRESULT STDMETHODCALLTYPE Close() override;
+
+ static HRESULT Create(SkPath* path, IDWriteGeometrySink** geometryToPath);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/win/SkHRESULT.cpp b/gfx/skia/skia/src/utils/win/SkHRESULT.cpp
new file mode 100644
index 000000000..d95629961
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkHRESULT.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32)
+
+#include "SkHRESULT.h"
+
+void SkTraceHR(const char* file, unsigned long line, HRESULT hr, const char* msg) {
+ if (msg) {
+ SkDebugf("%s\n", msg);
+ }
+ SkDebugf("%s(%lu) : error 0x%x: ", file, line, hr);
+
+ LPSTR errorText = nullptr;
+ FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ nullptr,
+ hr,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPSTR) &errorText,
+ 0,
+ nullptr
+ );
+
+ if (nullptr == errorText) {
+ SkDebugf("<unknown>\n");
+ } else {
+ SkDebugf("%s", errorText);
+ LocalFree(errorText);
+ errorText = nullptr;
+ }
+}
+
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/utils/win/SkHRESULT.h b/gfx/skia/skia/src/utils/win/SkHRESULT.h
new file mode 100644
index 000000000..a9cd8c408
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkHRESULT.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkHRESULT_DEFINED
+#define SkHRESULT_DEFINED
+
+#include "SkTypes.h"
+#ifdef SK_BUILD_FOR_WIN
+
+#include "SkLeanWindows.h"
+
+void SkTraceHR(const char* file, unsigned long line,
+ HRESULT hr, const char* msg);
+
+#ifdef SK_DEBUG
+#define SK_TRACEHR(_hr, _msg) SkTraceHR(__FILE__, __LINE__, _hr, _msg)
+#else
+#define SK_TRACEHR(_hr, _msg) sk_ignore_unused_variable(_hr)
+#endif
+
+#define HR_GENERAL(_ex, _msg, _ret) {\
+ HRESULT _hr = _ex;\
+ if (FAILED(_hr)) {\
+ SK_TRACEHR(_hr, _msg);\
+ return _ret;\
+ }\
+}
+
+//@{
+/**
+These macros are for reporting HRESULT errors.
+The expression will be evaluated.
+If the resulting HRESULT SUCCEEDED then execution will continue normally.
+If the HRESULT FAILED then the macro will return from the current function.
+In variants ending with 'M' the given message will be traced when FAILED.
+The HR variants will return the HRESULT when FAILED.
+The HRB variants will return false when FAILED.
+The HRN variants will return nullptr when FAILED.
+The HRV variants will simply return when FAILED.
+The HRZ variants will return 0 when FAILED.
+*/
+#define HR(ex) HR_GENERAL(ex, nullptr, _hr)
+#define HRM(ex, msg) HR_GENERAL(ex, msg, _hr)
+
+#define HRB(ex) HR_GENERAL(ex, nullptr, false)
+#define HRBM(ex, msg) HR_GENERAL(ex, msg, false)
+
+#define HRN(ex) HR_GENERAL(ex, nullptr, nullptr)
+#define HRNM(ex, msg) HR_GENERAL(ex, msg, nullptr)
+
+#define HRV(ex) HR_GENERAL(ex, nullptr, )
+#define HRVM(ex, msg) HR_GENERAL(ex, msg, )
+
+#define HRZ(ex) HR_GENERAL(ex, nullptr, 0)
+#define HRZM(ex, msg) HR_GENERAL(ex, msg, 0)
+//@}
+#endif // SK_BUILD_FOR_WIN
+#endif // SkHRESULT_DEFINED
diff --git a/gfx/skia/skia/src/utils/win/SkIStream.cpp b/gfx/skia/skia/src/utils/win/SkIStream.cpp
new file mode 100644
index 000000000..560a947fc
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkIStream.cpp
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32)
+
+#include "SkIStream.h"
+#include "SkStream.h"
+
+/**
+ * SkBaseIStream
+ */
+SkBaseIStream::SkBaseIStream() : _refcount(1) { }
+SkBaseIStream::~SkBaseIStream() { }
+
+HRESULT STDMETHODCALLTYPE SkBaseIStream::QueryInterface(REFIID iid
+ , void ** ppvObject)
+{
+ if (nullptr == ppvObject) {
+ return E_INVALIDARG;
+ }
+ if (iid == __uuidof(IUnknown)
+ || iid == __uuidof(IStream)
+ || iid == __uuidof(ISequentialStream))
+ {
+ *ppvObject = static_cast<IStream*>(this);
+ AddRef();
+ return S_OK;
+ } else {
+ *ppvObject = nullptr;
+ return E_NOINTERFACE;
+ }
+}
+
+ULONG STDMETHODCALLTYPE SkBaseIStream::AddRef(void) {
+ return (ULONG)InterlockedIncrement(&_refcount);
+}
+
+ULONG STDMETHODCALLTYPE SkBaseIStream::Release(void) {
+ ULONG res = (ULONG) InterlockedDecrement(&_refcount);
+ if (0 == res) {
+ delete this;
+ }
+ return res;
+}
+
+// ISequentialStream Interface
+HRESULT STDMETHODCALLTYPE SkBaseIStream::Read(void* pv
+ , ULONG cb
+ , ULONG* pcbRead)
+{ return E_NOTIMPL; }
+
+HRESULT STDMETHODCALLTYPE SkBaseIStream::Write(void const* pv
+ , ULONG cb
+ , ULONG* pcbWritten)
+{ return E_NOTIMPL; }
+
+// IStream Interface
+HRESULT STDMETHODCALLTYPE SkBaseIStream::SetSize(ULARGE_INTEGER)
+{ return E_NOTIMPL; }
+
+HRESULT STDMETHODCALLTYPE SkBaseIStream::CopyTo(IStream*
+ , ULARGE_INTEGER
+ , ULARGE_INTEGER*
+ , ULARGE_INTEGER*)
+{ return E_NOTIMPL; }
+
+HRESULT STDMETHODCALLTYPE SkBaseIStream::Commit(DWORD)
+{ return E_NOTIMPL; }
+
+HRESULT STDMETHODCALLTYPE SkBaseIStream::Revert(void)
+{ return E_NOTIMPL; }
+
+HRESULT STDMETHODCALLTYPE SkBaseIStream::LockRegion(ULARGE_INTEGER
+ , ULARGE_INTEGER
+ , DWORD)
+{ return E_NOTIMPL; }
+
+HRESULT STDMETHODCALLTYPE SkBaseIStream::UnlockRegion(ULARGE_INTEGER
+ , ULARGE_INTEGER
+ , DWORD)
+{ return E_NOTIMPL; }
+
+HRESULT STDMETHODCALLTYPE SkBaseIStream::Clone(IStream **)
+{ return E_NOTIMPL; }
+
+HRESULT STDMETHODCALLTYPE SkBaseIStream::Seek(LARGE_INTEGER liDistanceToMove
+ , DWORD dwOrigin
+ , ULARGE_INTEGER* lpNewFilePointer)
+{ return E_NOTIMPL; }
+
+HRESULT STDMETHODCALLTYPE SkBaseIStream::Stat(STATSTG* pStatstg
+ , DWORD grfStatFlag)
+{ return E_NOTIMPL; }
+
+
+/**
+ * SkIStream
+ */
+SkIStream::SkIStream(SkStream* stream, bool deleteOnRelease)
+ : SkBaseIStream()
+ , fSkStream(stream)
+ , fDeleteOnRelease(deleteOnRelease)
+ , fLocation()
+{
+ this->fSkStream->rewind();
+}
+
+SkIStream::~SkIStream() {
+ if (fDeleteOnRelease) {
+ delete this->fSkStream;
+ }
+}
+
+HRESULT SkIStream::CreateFromSkStream(SkStream* stream
+ , bool deleteOnRelease
+ , IStream ** ppStream)
+{
+ if (nullptr == stream) {
+ return E_INVALIDARG;
+ }
+ *ppStream = new SkIStream(stream, deleteOnRelease);
+ return S_OK;
+}
+
+// ISequentialStream Interface
+HRESULT STDMETHODCALLTYPE SkIStream::Read(void* pv, ULONG cb, ULONG* pcbRead) {
+ *pcbRead = static_cast<ULONG>(this->fSkStream->read(pv, cb));
+ this->fLocation.QuadPart += *pcbRead;
+ return (*pcbRead == cb) ? S_OK : S_FALSE;
+}
+
+HRESULT STDMETHODCALLTYPE SkIStream::Write(void const* pv
+ , ULONG cb
+ , ULONG* pcbWritten)
+{
+ return STG_E_CANTSAVE;
+}
+
+// IStream Interface
+HRESULT STDMETHODCALLTYPE SkIStream::Seek(LARGE_INTEGER liDistanceToMove
+ , DWORD dwOrigin
+ , ULARGE_INTEGER* lpNewFilePointer)
+{
+ HRESULT hr = S_OK;
+
+ switch(dwOrigin) {
+ case STREAM_SEEK_SET: {
+ if (!this->fSkStream->rewind()) {
+ hr = E_FAIL;
+ } else {
+ size_t skipped = this->fSkStream->skip(
+ static_cast<size_t>(liDistanceToMove.QuadPart)
+ );
+ this->fLocation.QuadPart = skipped;
+ if (skipped != liDistanceToMove.QuadPart) {
+ hr = E_FAIL;
+ }
+ }
+ break;
+ }
+ case STREAM_SEEK_CUR: {
+ size_t skipped = this->fSkStream->skip(
+ static_cast<size_t>(liDistanceToMove.QuadPart)
+ );
+ this->fLocation.QuadPart += skipped;
+ if (skipped != liDistanceToMove.QuadPart) {
+ hr = E_FAIL;
+ }
+ break;
+ }
+ case STREAM_SEEK_END: {
+ if (!this->fSkStream->rewind()) {
+ hr = E_FAIL;
+ } else {
+ // FIXME: Should not depend on getLength.
+ // See https://code.google.com/p/skia/issues/detail?id=1570
+ LONGLONG skip = this->fSkStream->getLength()
+ + liDistanceToMove.QuadPart;
+ size_t skipped = this->fSkStream->skip(static_cast<size_t>(skip));
+ this->fLocation.QuadPart = skipped;
+ if (skipped != skip) {
+ hr = E_FAIL;
+ }
+ }
+ break;
+ }
+ default:
+ hr = STG_E_INVALIDFUNCTION;
+ break;
+ }
+
+ if (lpNewFilePointer) {
+ lpNewFilePointer->QuadPart = this->fLocation.QuadPart;
+ }
+ return hr;
+}
+
+HRESULT STDMETHODCALLTYPE SkIStream::Stat(STATSTG* pStatstg
+ , DWORD grfStatFlag)
+{
+ if (0 == (grfStatFlag & STATFLAG_NONAME)) {
+ return STG_E_INVALIDFLAG;
+ }
+ pStatstg->pwcsName = nullptr;
+ // FIXME: Should not depend on getLength
+ // See https://code.google.com/p/skia/issues/detail?id=1570
+ pStatstg->cbSize.QuadPart = this->fSkStream->getLength();
+ pStatstg->clsid = CLSID_NULL;
+ pStatstg->type = STGTY_STREAM;
+ pStatstg->grfMode = STGM_READ;
+ return S_OK;
+}
+
+
+/**
+ * SkIWStream
+ */
+SkWIStream::SkWIStream(SkWStream* stream)
+ : SkBaseIStream()
+ , fSkWStream(stream)
+{ }
+
+SkWIStream::~SkWIStream() {
+ if (this->fSkWStream) {
+ this->fSkWStream->flush();
+ }
+}
+
+HRESULT SkWIStream::CreateFromSkWStream(SkWStream* stream
+ , IStream ** ppStream)
+{
+ *ppStream = new SkWIStream(stream);
+ return S_OK;
+}
+
+// ISequentialStream Interface
+HRESULT STDMETHODCALLTYPE SkWIStream::Write(void const* pv
+ , ULONG cb
+ , ULONG* pcbWritten)
+{
+ HRESULT hr = S_OK;
+ bool wrote = this->fSkWStream->write(pv, cb);
+ if (wrote) {
+ *pcbWritten = cb;
+ } else {
+ *pcbWritten = 0;
+ hr = S_FALSE;
+ }
+ return hr;
+}
+
+// IStream Interface
+HRESULT STDMETHODCALLTYPE SkWIStream::Commit(DWORD) {
+ this->fSkWStream->flush();
+ return S_OK;
+}
+
+HRESULT STDMETHODCALLTYPE SkWIStream::Stat(STATSTG* pStatstg
+ , DWORD grfStatFlag)
+{
+ if (0 == (grfStatFlag & STATFLAG_NONAME)) {
+ return STG_E_INVALIDFLAG;
+ }
+ pStatstg->pwcsName = nullptr;
+ pStatstg->cbSize.QuadPart = 0;
+ pStatstg->clsid = CLSID_NULL;
+ pStatstg->type = STGTY_STREAM;
+ pStatstg->grfMode = STGM_WRITE;
+ return S_OK;
+}
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/utils/win/SkIStream.h b/gfx/skia/skia/src/utils/win/SkIStream.h
new file mode 100644
index 000000000..a3315c121
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkIStream.h
@@ -0,0 +1,135 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkIStream_DEFINED
+#define SkIStream_DEFINED
+
+#include "SkTypes.h"
+
+#ifdef SK_BUILD_FOR_WIN
+
+#include "SkLeanWindows.h"
+#include <ole2.h>
+
+class SkStream;
+class SkWStream;
+
+/**
+ * A bare IStream implementation which properly reference counts
+ * but returns E_NOTIMPL for all ISequentialStream and IStream methods.
+ */
+class SkBaseIStream : public IStream {
+private:
+ LONG _refcount;
+
+protected:
+ explicit SkBaseIStream();
+ virtual ~SkBaseIStream();
+
+public:
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid
+ , void ** ppvObject);
+ virtual ULONG STDMETHODCALLTYPE AddRef(void);
+ virtual ULONG STDMETHODCALLTYPE Release(void);
+
+ // ISequentialStream Interface
+public:
+ virtual HRESULT STDMETHODCALLTYPE Read(void* pv, ULONG cb, ULONG* pcbRead);
+
+ virtual HRESULT STDMETHODCALLTYPE Write(void const* pv
+ , ULONG cb
+ , ULONG* pcbWritten);
+
+ // IStream Interface
+public:
+ virtual HRESULT STDMETHODCALLTYPE SetSize(ULARGE_INTEGER);
+
+ virtual HRESULT STDMETHODCALLTYPE CopyTo(IStream*
+ , ULARGE_INTEGER
+ , ULARGE_INTEGER*
+ , ULARGE_INTEGER*);
+
+ virtual HRESULT STDMETHODCALLTYPE Commit(DWORD);
+
+ virtual HRESULT STDMETHODCALLTYPE Revert(void);
+
+ virtual HRESULT STDMETHODCALLTYPE LockRegion(ULARGE_INTEGER
+ , ULARGE_INTEGER
+ , DWORD);
+
+ virtual HRESULT STDMETHODCALLTYPE UnlockRegion(ULARGE_INTEGER
+ , ULARGE_INTEGER
+ , DWORD);
+
+ virtual HRESULT STDMETHODCALLTYPE Clone(IStream **);
+
+ virtual HRESULT STDMETHODCALLTYPE Seek(LARGE_INTEGER liDistanceToMove
+ , DWORD dwOrigin
+ , ULARGE_INTEGER* lpNewFilePointer);
+
+ virtual HRESULT STDMETHODCALLTYPE Stat(STATSTG* pStatstg
+ , DWORD grfStatFlag);
+};
+
+/**
+ * A minimal read-only IStream implementation which wraps an SkStream.
+ */
+class SkIStream : public SkBaseIStream {
+private:
+ SkStream *fSkStream;
+ const bool fDeleteOnRelease;
+ ULARGE_INTEGER fLocation;
+
+ SkIStream(SkStream* stream, bool fDeleteOnRelease);
+ virtual ~SkIStream();
+
+public:
+ HRESULT static CreateFromSkStream(SkStream* stream
+ , bool fDeleteOnRelease
+ , IStream ** ppStream);
+
+ virtual HRESULT STDMETHODCALLTYPE Read(void* pv, ULONG cb, ULONG* pcbRead);
+
+ virtual HRESULT STDMETHODCALLTYPE Write(void const* pv
+ , ULONG cb
+ , ULONG* pcbWritten);
+
+ virtual HRESULT STDMETHODCALLTYPE Seek(LARGE_INTEGER liDistanceToMove
+ , DWORD dwOrigin
+ , ULARGE_INTEGER* lpNewFilePointer);
+
+ virtual HRESULT STDMETHODCALLTYPE Stat(STATSTG* pStatstg
+ , DWORD grfStatFlag);
+};
+
+/**
+ * A minimal write-only IStream implementation which wraps an SkWIStream.
+ */
+class SkWIStream : public SkBaseIStream {
+private:
+ SkWStream *fSkWStream;
+
+ SkWIStream(SkWStream* stream);
+ virtual ~SkWIStream();
+
+public:
+ HRESULT static CreateFromSkWStream(SkWStream* stream, IStream ** ppStream);
+
+ virtual HRESULT STDMETHODCALLTYPE Write(void const* pv
+ , ULONG cb
+ , ULONG* pcbWritten);
+
+ virtual HRESULT STDMETHODCALLTYPE Commit(DWORD);
+
+ virtual HRESULT STDMETHODCALLTYPE Stat(STATSTG* pStatstg
+ , DWORD grfStatFlag);
+};
+
+#endif // SK_BUILD_FOR_WIN
+#endif // SkIStream_DEFINED
diff --git a/gfx/skia/skia/src/utils/win/SkTScopedComPtr.h b/gfx/skia/skia/src/utils/win/SkTScopedComPtr.h
new file mode 100644
index 000000000..5410f5c99
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkTScopedComPtr.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTScopedComPtr_DEFINED
+#define SkTScopedComPtr_DEFINED
+
+#include "SkLeanWindows.h"
+
+#ifdef SK_BUILD_FOR_WIN
+
+template<typename T>
+class SkBlockComRef : public T {
+private:
+ virtual ULONG STDMETHODCALLTYPE AddRef(void) = 0;
+ virtual ULONG STDMETHODCALLTYPE Release(void) = 0;
+};
+
+template<typename T> T* SkRefComPtr(T* ptr) {
+ ptr->AddRef();
+ return ptr;
+}
+
+template<typename T> T* SkSafeRefComPtr(T* ptr) {
+ if (ptr) {
+ ptr->AddRef();
+ }
+ return ptr;
+}
+
+template<typename T>
+class SkTScopedComPtr : SkNoncopyable {
+private:
+ T *fPtr;
+
+public:
+ explicit SkTScopedComPtr(T *ptr = nullptr) : fPtr(ptr) { }
+
+ ~SkTScopedComPtr() { this->reset();}
+
+ T &operator*() const { SkASSERT(fPtr != nullptr); return *fPtr; }
+
+ explicit operator bool() const { return fPtr != nullptr; }
+
+ SkBlockComRef<T> *operator->() const { return static_cast<SkBlockComRef<T>*>(fPtr); }
+
+ /**
+ * Returns the address of the underlying pointer.
+ * This is dangerous -- it breaks encapsulation and the reference escapes.
+ * Must only be used on instances currently pointing to NULL,
+ * and only to initialize the instance.
+ */
+ T **operator&() { SkASSERT(fPtr == nullptr); return &fPtr; }
+
+ T *get() const { return fPtr; }
+
+ void reset() {
+ if (this->fPtr) {
+ this->fPtr->Release();
+ this->fPtr = nullptr;
+ }
+ }
+
+ void swap(SkTScopedComPtr<T>& that) {
+ T* temp = this->fPtr;
+ this->fPtr = that.fPtr;
+ that.fPtr = temp;
+ }
+
+ T* release() {
+ T* temp = this->fPtr;
+ this->fPtr = nullptr;
+ return temp;
+ }
+};
+
+#endif // SK_BUILD_FOR_WIN
+#endif // SkTScopedComPtr_DEFINED
diff --git a/gfx/skia/skia/src/utils/win/SkWGL.h b/gfx/skia/skia/src/utils/win/SkWGL.h
new file mode 100644
index 000000000..3799377cc
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkWGL.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkRefCnt.h"
+
+#ifndef SkWGL_DEFINED
+#define SkWGL_DEFINED
+
+#include "SkLeanWindows.h"
+
+/**
+ * Working with WGL extensions can be a pain. Among the reasons is that You must
+ * have a GL context to get the proc addresses, but you want to use the procs to
+ * create a context in the first place. So you have to create a dummy GL ctx to
+ * get the proc addresses.
+ *
+ * This file helps by providing SkCreateWGLInterface(). It returns a struct of
+ * function pointers that it initializes. It also has a helper function to query
+ * for WGL extensions. It handles the fact that wglGetExtensionsString is itself
+ * an extension.
+ */
+
+#define SK_WGL_DRAW_TO_WINDOW 0x2001
+#define SK_WGL_ACCELERATION 0x2003
+#define SK_WGL_SUPPORT_OPENGL 0x2010
+#define SK_WGL_DOUBLE_BUFFER 0x2011
+#define SK_WGL_COLOR_BITS 0x2014
+#define SK_WGL_RED_BITS 0x2015
+#define SK_WGL_GREEN_BITS 0x2017
+#define SK_WGL_BLUE_BITS 0x2019
+#define SK_WGL_ALPHA_BITS 0x201B
+#define SK_WGL_STENCIL_BITS 0x2023
+#define SK_WGL_FULL_ACCELERATION 0x2027
+#define SK_WGL_SAMPLE_BUFFERS 0x2041
+#define SK_WGL_SAMPLES 0x2042
+#define SK_WGL_CONTEXT_MAJOR_VERSION 0x2091
+#define SK_WGL_CONTEXT_MINOR_VERSION 0x2092
+#define SK_WGL_CONTEXT_LAYER_PLANE 0x2093
+#define SK_WGL_CONTEXT_FLAGS 0x2094
+#define SK_WGL_CONTEXT_PROFILE_MASK 0x9126
+#define SK_WGL_CONTEXT_DEBUG_BIT 0x0001
+#define SK_WGL_CONTEXT_FORWARD_COMPATIBLE_BIT 0x0002
+#define SK_WGL_CONTEXT_CORE_PROFILE_BIT 0x00000001
+#define SK_WGL_CONTEXT_COMPATIBILITY_PROFILE_BIT 0x00000002
+#define SK_WGL_CONTEXT_ES2_PROFILE_BIT 0x00000004
+#define SK_ERROR_INVALID_VERSION 0x2095
+#define SK_ERROR_INVALID_PROFILE 0x2096
+
+DECLARE_HANDLE(HPBUFFER);
+
+class SkWGLExtensions {
+public:
+ SkWGLExtensions();
+ /**
+ * Determines if an extensions is available for a given DC.
+ * WGL_extensions_string is considered a prerequisite for all other
+ * extensions. It is necessary to check this before calling other class
+ * functions.
+ */
+ bool hasExtension(HDC dc, const char* ext) const;
+
+ const char* getExtensionsString(HDC hdc) const;
+ BOOL choosePixelFormat(HDC hdc, const int*, const FLOAT*, UINT, int*, UINT*) const;
+ BOOL getPixelFormatAttribiv(HDC, int, int, UINT, const int*, int*) const;
+ BOOL getPixelFormatAttribfv(HDC hdc, int, int, UINT, const int*, FLOAT*) const;
+ HGLRC createContextAttribs(HDC, HGLRC, const int *) const;
+
+ BOOL swapInterval(int interval) const;
+
+ HPBUFFER createPbuffer(HDC, int , int, int, const int*) const;
+ HDC getPbufferDC(HPBUFFER) const;
+ int releasePbufferDC(HPBUFFER, HDC) const;
+ BOOL destroyPbuffer(HPBUFFER) const;
+
+ /**
+ * WGL doesn't have precise rules for the ordering of formats returned
+ * by wglChoosePixelFormat. This function helps choose among the set of
+ * formats returned by wglChoosePixelFormat. The rules in decreasing
+ * priority are:
+ * * Choose formats with the smallest sample count that is >=
+ * desiredSampleCount (or the largest sample count if all formats have
+ * fewer samples than desiredSampleCount.)
+ * * Choose formats with the fewest color samples when coverage sampling
+ * is available.
+ * * If the above rules leave multiple formats, choose the one that
+ * appears first in the formats array parameter.
+ */
+ int selectFormat(const int formats[],
+ int formatCount,
+ HDC dc,
+ int desiredSampleCount) const;
+private:
+ typedef const char* (WINAPI *GetExtensionsStringProc)(HDC);
+ typedef BOOL (WINAPI *ChoosePixelFormatProc)(HDC, const int *, const FLOAT *, UINT, int *, UINT *);
+ typedef BOOL (WINAPI *GetPixelFormatAttribivProc)(HDC, int, int, UINT, const int*, int*);
+ typedef BOOL (WINAPI *GetPixelFormatAttribfvProc)(HDC, int, int, UINT, const int*, FLOAT*);
+ typedef HGLRC (WINAPI *CreateContextAttribsProc)(HDC, HGLRC, const int *);
+ typedef BOOL (WINAPI* SwapIntervalProc)(int);
+ typedef HPBUFFER (WINAPI* CreatePbufferProc)(HDC, int , int, int, const int*);
+ typedef HDC (WINAPI* GetPbufferDCProc)(HPBUFFER);
+ typedef int (WINAPI* ReleasePbufferDCProc)(HPBUFFER, HDC);
+ typedef BOOL (WINAPI* DestroyPbufferProc)(HPBUFFER);
+
+ GetExtensionsStringProc fGetExtensionsString;
+ ChoosePixelFormatProc fChoosePixelFormat;
+ GetPixelFormatAttribfvProc fGetPixelFormatAttribfv;
+ GetPixelFormatAttribivProc fGetPixelFormatAttribiv;
+ CreateContextAttribsProc fCreateContextAttribs;
+ SwapIntervalProc fSwapInterval;
+ CreatePbufferProc fCreatePbuffer;
+ GetPbufferDCProc fGetPbufferDC;
+ ReleasePbufferDCProc fReleasePbufferDC;
+ DestroyPbufferProc fDestroyPbuffer;
+};
+
+enum SkWGLContextRequest {
+ /** Requests to create core profile context if possible, otherwise
+ compatibility profile. */
+ kGLPreferCoreProfile_SkWGLContextRequest,
+ /** Requests to create compatibility profile context if possible, otherwise
+ core profile. */
+ kGLPreferCompatibilityProfile_SkWGLContextRequest,
+ /** Requests to create GL ES profile context. */
+ kGLES_SkWGLContextRequest
+};
+/**
+ * Helper to create an OpenGL context for a DC using WGL. Configs with a sample count >= to
+ * msaaSampleCount are preferred but if none is available then a context with a lower sample count
+ * (including non-MSAA) will be created. If preferCoreProfile is true but a core profile cannot be
+ * created then a compatible profile context will be created.
+ */
+HGLRC SkCreateWGLContext(HDC dc, int msaaSampleCount, bool deepColor, SkWGLContextRequest context);
+
+/**
+ * Helper class for creating a pbuffer context and deleting all the handles when finished. This
+ * requires that a device context has been created. However, the pbuffer gets its own device
+ * context. The original device context can be released once the pbuffer context is created.
+ */
+class SkWGLPbufferContext : public SkRefCnt {
+public:
+ static SkWGLPbufferContext* Create(HDC parentDC, int msaaSampleCount,
+ SkWGLContextRequest contextType);
+
+ virtual ~SkWGLPbufferContext();
+
+ HDC getDC() const { return fDC; }
+ HGLRC getGLRC() const { return fGLRC; }
+
+private:
+ SkWGLPbufferContext(HPBUFFER pbuffer, HDC dc, HGLRC glrc);
+
+ HPBUFFER fPbuffer;
+ HDC fDC;
+ HGLRC fGLRC;
+ SkWGLExtensions fExtensions;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/win/SkWGL_win.cpp b/gfx/skia/skia/src/utils/win/SkWGL_win.cpp
new file mode 100644
index 000000000..dc1b4caf1
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkWGL_win.cpp
@@ -0,0 +1,469 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32)
+
+#include "SkWGL.h"
+
+#include "SkTDArray.h"
+#include "SkTSearch.h"
+#include "SkTSort.h"
+
+bool SkWGLExtensions::hasExtension(HDC dc, const char* ext) const {
+ if (nullptr == this->fGetExtensionsString) {
+ return false;
+ }
+ if (!strcmp("WGL_ARB_extensions_string", ext)) {
+ return true;
+ }
+ const char* extensionString = this->getExtensionsString(dc);
+ size_t extLength = strlen(ext);
+
+ while (true) {
+ size_t n = strcspn(extensionString, " ");
+ if (n == extLength && 0 == strncmp(ext, extensionString, n)) {
+ return true;
+ }
+ if (0 == extensionString[n]) {
+ return false;
+ }
+ extensionString += n+1;
+ }
+
+ return false;
+}
+
+const char* SkWGLExtensions::getExtensionsString(HDC hdc) const {
+ return fGetExtensionsString(hdc);
+}
+
+BOOL SkWGLExtensions::choosePixelFormat(HDC hdc,
+ const int* piAttribIList,
+ const FLOAT* pfAttribFList,
+ UINT nMaxFormats,
+ int* piFormats,
+ UINT* nNumFormats) const {
+ return fChoosePixelFormat(hdc, piAttribIList, pfAttribFList,
+ nMaxFormats, piFormats, nNumFormats);
+}
+
+BOOL SkWGLExtensions::getPixelFormatAttribiv(HDC hdc,
+ int iPixelFormat,
+ int iLayerPlane,
+ UINT nAttributes,
+ const int *piAttributes,
+ int *piValues) const {
+ return fGetPixelFormatAttribiv(hdc, iPixelFormat, iLayerPlane,
+ nAttributes, piAttributes, piValues);
+}
+
+BOOL SkWGLExtensions::getPixelFormatAttribfv(HDC hdc,
+ int iPixelFormat,
+ int iLayerPlane,
+ UINT nAttributes,
+ const int *piAttributes,
+ float *pfValues) const {
+ return fGetPixelFormatAttribfv(hdc, iPixelFormat, iLayerPlane,
+ nAttributes, piAttributes, pfValues);
+}
+HGLRC SkWGLExtensions::createContextAttribs(HDC hDC,
+ HGLRC hShareContext,
+ const int *attribList) const {
+ return fCreateContextAttribs(hDC, hShareContext, attribList);
+}
+
+BOOL SkWGLExtensions::swapInterval(int interval) const {
+ return fSwapInterval(interval);
+}
+
+HPBUFFER SkWGLExtensions::createPbuffer(HDC hDC,
+ int iPixelFormat,
+ int iWidth,
+ int iHeight,
+ const int *piAttribList) const {
+ return fCreatePbuffer(hDC, iPixelFormat, iWidth, iHeight, piAttribList);
+}
+
+HDC SkWGLExtensions::getPbufferDC(HPBUFFER hPbuffer) const {
+ return fGetPbufferDC(hPbuffer);
+}
+
+int SkWGLExtensions::releasePbufferDC(HPBUFFER hPbuffer, HDC hDC) const {
+ return fReleasePbufferDC(hPbuffer, hDC);
+}
+
+BOOL SkWGLExtensions::destroyPbuffer(HPBUFFER hPbuffer) const {
+ return fDestroyPbuffer(hPbuffer);
+}
+
+namespace {
+
+struct PixelFormat {
+ int fFormat;
+ int fSampleCnt;
+ int fChoosePixelFormatRank;
+};
+
+bool pf_less(const PixelFormat& a, const PixelFormat& b) {
+ if (a.fSampleCnt < b.fSampleCnt) {
+ return true;
+ } else if (b.fSampleCnt < a.fSampleCnt) {
+ return false;
+ } else if (a.fChoosePixelFormatRank < b.fChoosePixelFormatRank) {
+ return true;
+ }
+ return false;
+}
+}
+
+int SkWGLExtensions::selectFormat(const int formats[],
+ int formatCount,
+ HDC dc,
+ int desiredSampleCount) const {
+ if (formatCount <= 0) {
+ return -1;
+ }
+ PixelFormat desiredFormat = {
+ 0,
+ desiredSampleCount,
+ 0,
+ };
+ SkTDArray<PixelFormat> rankedFormats;
+ rankedFormats.setCount(formatCount);
+ for (int i = 0; i < formatCount; ++i) {
+ static const int kQueryAttr = SK_WGL_SAMPLES;
+ int numSamples;
+ this->getPixelFormatAttribiv(dc,
+ formats[i],
+ 0,
+ 1,
+ &kQueryAttr,
+ &numSamples);
+ rankedFormats[i].fFormat = formats[i];
+ rankedFormats[i].fSampleCnt = numSamples;
+ rankedFormats[i].fChoosePixelFormatRank = i;
+ }
+ SkTQSort(rankedFormats.begin(),
+ rankedFormats.begin() + rankedFormats.count() - 1,
+ SkTLessFunctionToFunctorAdaptor<PixelFormat, pf_less>());
+ int idx = SkTSearch<PixelFormat, pf_less>(rankedFormats.begin(),
+ rankedFormats.count(),
+ desiredFormat,
+ sizeof(PixelFormat));
+ if (idx < 0) {
+ idx = ~idx;
+ }
+ return rankedFormats[idx].fFormat;
+}
+
+
+namespace {
+
+#if defined(UNICODE)
+ #define STR_LIT(X) L## #X
+#else
+ #define STR_LIT(X) #X
+#endif
+
+#define DUMMY_CLASS STR_LIT("DummyClass")
+
+HWND create_dummy_window() {
+ HMODULE module = GetModuleHandle(nullptr);
+ HWND dummy;
+ RECT windowRect;
+ windowRect.left = 0;
+ windowRect.right = 8;
+ windowRect.top = 0;
+ windowRect.bottom = 8;
+
+ WNDCLASS wc;
+
+ wc.style = CS_HREDRAW | CS_VREDRAW | CS_OWNDC;
+ wc.lpfnWndProc = (WNDPROC) DefWindowProc;
+ wc.cbClsExtra = 0;
+ wc.cbWndExtra = 0;
+ wc.hInstance = module;
+ wc.hIcon = LoadIcon(nullptr, IDI_WINLOGO);
+ wc.hCursor = LoadCursor(nullptr, IDC_ARROW);
+ wc.hbrBackground = nullptr;
+ wc.lpszMenuName = nullptr;
+ wc.lpszClassName = DUMMY_CLASS;
+
+ if(!RegisterClass(&wc)) {
+ return 0;
+ }
+
+ DWORD style, exStyle;
+ exStyle = WS_EX_CLIENTEDGE;
+ style = WS_SYSMENU;
+
+ AdjustWindowRectEx(&windowRect, style, false, exStyle);
+ if(!(dummy = CreateWindowEx(exStyle,
+ DUMMY_CLASS,
+ STR_LIT("DummyWindow"),
+ WS_CLIPSIBLINGS | WS_CLIPCHILDREN | style,
+ 0, 0,
+ windowRect.right-windowRect.left,
+ windowRect.bottom-windowRect.top,
+ nullptr, nullptr,
+ module,
+ nullptr))) {
+ UnregisterClass(DUMMY_CLASS, module);
+ return nullptr;
+ }
+ ShowWindow(dummy, SW_HIDE);
+
+ return dummy;
+}
+
+void destroy_dummy_window(HWND dummy) {
+ DestroyWindow(dummy);
+ HMODULE module = GetModuleHandle(nullptr);
+ UnregisterClass(DUMMY_CLASS, module);
+}
+}
+
+#define GET_PROC(NAME, SUFFIX) f##NAME = \
+ (##NAME##Proc) wglGetProcAddress("wgl" #NAME #SUFFIX)
+
+SkWGLExtensions::SkWGLExtensions()
+ : fGetExtensionsString(nullptr)
+ , fChoosePixelFormat(nullptr)
+ , fGetPixelFormatAttribfv(nullptr)
+ , fGetPixelFormatAttribiv(nullptr)
+ , fCreateContextAttribs(nullptr)
+ , fSwapInterval(nullptr)
+ , fCreatePbuffer(nullptr)
+ , fGetPbufferDC(nullptr)
+ , fReleasePbufferDC(nullptr)
+ , fDestroyPbuffer(nullptr)
+ {
+ HDC prevDC = wglGetCurrentDC();
+ HGLRC prevGLRC = wglGetCurrentContext();
+
+ PIXELFORMATDESCRIPTOR dummyPFD;
+
+ ZeroMemory(&dummyPFD, sizeof(dummyPFD));
+ dummyPFD.nSize = sizeof(dummyPFD);
+ dummyPFD.nVersion = 1;
+ dummyPFD.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL;
+ dummyPFD.iPixelType = PFD_TYPE_RGBA;
+ dummyPFD.cColorBits = 32;
+ dummyPFD.cDepthBits = 0;
+ dummyPFD.cStencilBits = 8;
+ dummyPFD.iLayerType = PFD_MAIN_PLANE;
+ HWND dummyWND = create_dummy_window();
+ if (dummyWND) {
+ HDC dummyDC = GetDC(dummyWND);
+ int dummyFormat = ChoosePixelFormat(dummyDC, &dummyPFD);
+ SetPixelFormat(dummyDC, dummyFormat, &dummyPFD);
+ HGLRC dummyGLRC = wglCreateContext(dummyDC);
+ SkASSERT(dummyGLRC);
+ wglMakeCurrent(dummyDC, dummyGLRC);
+
+ GET_PROC(GetExtensionsString, ARB);
+ GET_PROC(ChoosePixelFormat, ARB);
+ GET_PROC(GetPixelFormatAttribiv, ARB);
+ GET_PROC(GetPixelFormatAttribfv, ARB);
+ GET_PROC(CreateContextAttribs, ARB);
+ GET_PROC(SwapInterval, EXT);
+ GET_PROC(CreatePbuffer, ARB);
+ GET_PROC(GetPbufferDC, ARB);
+ GET_PROC(ReleasePbufferDC, ARB);
+ GET_PROC(DestroyPbuffer, ARB);
+
+ wglMakeCurrent(dummyDC, nullptr);
+ wglDeleteContext(dummyGLRC);
+ destroy_dummy_window(dummyWND);
+ }
+
+ wglMakeCurrent(prevDC, prevGLRC);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void get_pixel_formats_to_try(HDC dc, const SkWGLExtensions& extensions,
+ bool doubleBuffered, int msaaSampleCount, bool deepColor,
+ int formatsToTry[2]) {
+ auto appendAttr = [](SkTDArray<int>& attrs, int attr, int value) {
+ attrs.push(attr);
+ attrs.push(value);
+ };
+
+ SkTDArray<int> iAttrs;
+ appendAttr(iAttrs, SK_WGL_DRAW_TO_WINDOW, TRUE);
+ appendAttr(iAttrs, SK_WGL_DOUBLE_BUFFER, (doubleBuffered ? TRUE : FALSE));
+ appendAttr(iAttrs, SK_WGL_ACCELERATION, SK_WGL_FULL_ACCELERATION);
+ appendAttr(iAttrs, SK_WGL_SUPPORT_OPENGL, TRUE);
+ if (deepColor) {
+ appendAttr(iAttrs, SK_WGL_RED_BITS, 10);
+ appendAttr(iAttrs, SK_WGL_GREEN_BITS, 10);
+ appendAttr(iAttrs, SK_WGL_BLUE_BITS, 10);
+ appendAttr(iAttrs, SK_WGL_ALPHA_BITS, 2);
+ } else {
+ appendAttr(iAttrs, SK_WGL_COLOR_BITS, 24);
+ appendAttr(iAttrs, SK_WGL_ALPHA_BITS, 8);
+ }
+ appendAttr(iAttrs, SK_WGL_STENCIL_BITS, 8);
+
+ float fAttrs[] = {0, 0};
+
+ // Get a MSAA format if requested and possible.
+ if (msaaSampleCount > 0 &&
+ extensions.hasExtension(dc, "WGL_ARB_multisample")) {
+ SkTDArray<int> msaaIAttrs = iAttrs;
+ appendAttr(msaaIAttrs, SK_WGL_SAMPLE_BUFFERS, TRUE);
+ appendAttr(msaaIAttrs, SK_WGL_SAMPLES, msaaSampleCount);
+ appendAttr(msaaIAttrs, 0, 0);
+ unsigned int num;
+ int formats[64];
+ extensions.choosePixelFormat(dc, msaaIAttrs.begin(), fAttrs, 64, formats, &num);
+ num = SkTMin(num, 64U);
+ formatsToTry[0] = extensions.selectFormat(formats, num, dc, msaaSampleCount);
+ }
+
+ // Get a non-MSAA format
+ int* format = -1 == formatsToTry[0] ? &formatsToTry[0] : &formatsToTry[1];
+ unsigned int num;
+ appendAttr(iAttrs, 0, 0);
+ extensions.choosePixelFormat(dc, iAttrs.begin(), fAttrs, 1, format, &num);
+}
+
+static HGLRC create_gl_context(HDC dc, SkWGLExtensions extensions, SkWGLContextRequest contextType) {
+ HDC prevDC = wglGetCurrentDC();
+ HGLRC prevGLRC = wglGetCurrentContext();
+
+ HGLRC glrc = nullptr;
+ if (kGLES_SkWGLContextRequest == contextType) {
+ if (!extensions.hasExtension(dc, "WGL_EXT_create_context_es2_profile")) {
+ wglMakeCurrent(prevDC, prevGLRC);
+ return nullptr;
+ }
+ static const int glesAttribs[] = {
+ SK_WGL_CONTEXT_MAJOR_VERSION, 3,
+ SK_WGL_CONTEXT_MINOR_VERSION, 0,
+ SK_WGL_CONTEXT_PROFILE_MASK, SK_WGL_CONTEXT_ES2_PROFILE_BIT,
+ 0,
+ };
+ glrc = extensions.createContextAttribs(dc, nullptr, glesAttribs);
+ if (nullptr == glrc) {
+ wglMakeCurrent(prevDC, prevGLRC);
+ return nullptr;
+ }
+ } else {
+ if (kGLPreferCoreProfile_SkWGLContextRequest == contextType &&
+ extensions.hasExtension(dc, "WGL_ARB_create_context")) {
+ static const int kCoreGLVersions[] = {
+ 4, 3,
+ 4, 2,
+ 4, 1,
+ 4, 0,
+ 3, 3,
+ 3, 2,
+ };
+ int coreProfileAttribs[] = {
+ SK_WGL_CONTEXT_MAJOR_VERSION, -1,
+ SK_WGL_CONTEXT_MINOR_VERSION, -1,
+ SK_WGL_CONTEXT_PROFILE_MASK, SK_WGL_CONTEXT_CORE_PROFILE_BIT,
+ 0,
+ };
+ for (int v = 0; v < SK_ARRAY_COUNT(kCoreGLVersions) / 2; ++v) {
+ coreProfileAttribs[1] = kCoreGLVersions[2 * v];
+ coreProfileAttribs[3] = kCoreGLVersions[2 * v + 1];
+ glrc = extensions.createContextAttribs(dc, nullptr, coreProfileAttribs);
+ if (glrc) {
+ break;
+ }
+ }
+ }
+ }
+
+ if (nullptr == glrc) {
+ glrc = wglCreateContext(dc);
+ }
+ SkASSERT(glrc);
+
+ wglMakeCurrent(prevDC, prevGLRC);
+
+ // This might help make the context non-vsynced.
+ if (extensions.hasExtension(dc, "WGL_EXT_swap_control")) {
+ extensions.swapInterval(-1);
+ }
+ return glrc;
+}
+
+HGLRC SkCreateWGLContext(HDC dc, int msaaSampleCount, bool deepColor,
+ SkWGLContextRequest contextType) {
+ SkWGLExtensions extensions;
+ if (!extensions.hasExtension(dc, "WGL_ARB_pixel_format")) {
+ return nullptr;
+ }
+
+ BOOL set = FALSE;
+
+ int pixelFormatsToTry[] = { -1, -1 };
+ get_pixel_formats_to_try(dc, extensions, true, msaaSampleCount, deepColor, pixelFormatsToTry);
+ for (int f = 0;
+ !set && -1 != pixelFormatsToTry[f] && f < SK_ARRAY_COUNT(pixelFormatsToTry);
+ ++f) {
+ PIXELFORMATDESCRIPTOR pfd;
+ DescribePixelFormat(dc, pixelFormatsToTry[f], sizeof(pfd), &pfd);
+ set = SetPixelFormat(dc, pixelFormatsToTry[f], &pfd);
+ }
+
+ if (!set) {
+ return nullptr;
+ }
+
+ return create_gl_context(dc, extensions, contextType);}
+
+SkWGLPbufferContext* SkWGLPbufferContext::Create(HDC parentDC, int msaaSampleCount,
+ SkWGLContextRequest contextType) {
+ SkWGLExtensions extensions;
+ if (!extensions.hasExtension(parentDC, "WGL_ARB_pixel_format") ||
+ !extensions.hasExtension(parentDC, "WGL_ARB_pbuffer")) {
+ return nullptr;
+ }
+
+ // try for single buffer first
+ for (int dblBuffer = 0; dblBuffer < 2; ++dblBuffer) {
+ int pixelFormatsToTry[] = { -1, -1 };
+ get_pixel_formats_to_try(parentDC, extensions, (0 != dblBuffer), msaaSampleCount,
+ false, pixelFormatsToTry);
+ for (int f = 0; -1 != pixelFormatsToTry[f] && f < SK_ARRAY_COUNT(pixelFormatsToTry); ++f) {
+ HPBUFFER pbuf = extensions.createPbuffer(parentDC, pixelFormatsToTry[f], 1, 1, nullptr);
+ if (0 != pbuf) {
+ HDC dc = extensions.getPbufferDC(pbuf);
+ if (dc) {
+ HGLRC glrc = create_gl_context(dc, extensions, contextType);
+ if (glrc) {
+ return new SkWGLPbufferContext(pbuf, dc, glrc);
+ }
+ extensions.releasePbufferDC(pbuf, dc);
+ }
+ extensions.destroyPbuffer(pbuf);
+ }
+ }
+ }
+ return nullptr;
+}
+
+SkWGLPbufferContext::~SkWGLPbufferContext() {
+ SkASSERT(fExtensions.hasExtension(fDC, "WGL_ARB_pbuffer"));
+ wglDeleteContext(fGLRC);
+ fExtensions.releasePbufferDC(fPbuffer, fDC);
+ fExtensions.destroyPbuffer(fPbuffer);
+}
+
+SkWGLPbufferContext::SkWGLPbufferContext(HPBUFFER pbuffer, HDC dc, HGLRC glrc)
+ : fPbuffer(pbuffer)
+ , fDC(dc)
+ , fGLRC(glrc) {
+}
+
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/views/SkEvent.cpp b/gfx/skia/skia/src/views/SkEvent.cpp
new file mode 100644
index 000000000..7b658c839
--- /dev/null
+++ b/gfx/skia/skia/src/views/SkEvent.cpp
@@ -0,0 +1,512 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkEvent.h"
+
+void SkEvent::initialize(const char* type, size_t typeLen,
+ SkEventSinkID targetID) {
+ fType = nullptr;
+ setType(type, typeLen);
+ f32 = 0;
+ fTargetID = targetID;
+ fTargetProc = nullptr;
+#ifdef SK_DEBUG
+ fTime = 0;
+ fNextEvent = nullptr;
+#endif
+}
+
+SkEvent::SkEvent()
+{
+ initialize("", 0, 0);
+}
+
+SkEvent::SkEvent(const SkEvent& src)
+{
+ *this = src;
+ if (((size_t) fType & 1) == 0)
+ setType(src.fType);
+}
+
+SkEvent::SkEvent(const SkString& type, SkEventSinkID targetID)
+{
+ initialize(type.c_str(), type.size(), targetID);
+}
+
+SkEvent::SkEvent(const char type[], SkEventSinkID targetID)
+{
+ SkASSERT(type);
+ initialize(type, strlen(type), targetID);
+}
+
+SkEvent::~SkEvent()
+{
+ if (((size_t) fType & 1) == 0)
+ sk_free((void*) fType);
+}
+
+static size_t makeCharArray(char* buffer, size_t compact)
+{
+ size_t bits = (size_t) compact >> 1;
+ memcpy(buffer, &bits, sizeof(compact));
+ buffer[sizeof(compact)] = 0;
+ return strlen(buffer);
+}
+
+void SkEvent::getType(SkString* str) const
+{
+ if (str)
+ {
+ if ((size_t) fType & 1) // not a pointer
+ {
+ char chars[sizeof(size_t) + 1];
+ size_t len = makeCharArray(chars, (size_t) fType);
+ str->set(chars, len);
+ }
+ else
+ str->set(fType);
+ }
+}
+
+bool SkEvent::isType(const SkString& str) const
+{
+ return this->isType(str.c_str(), str.size());
+}
+
+bool SkEvent::isType(const char type[], size_t typeLen) const
+{
+ if (typeLen == 0)
+ typeLen = strlen(type);
+ if ((size_t) fType & 1) { // not a pointer
+ char chars[sizeof(size_t) + 1];
+ size_t len = makeCharArray(chars, (size_t) fType);
+ return len == typeLen && strncmp(chars, type, typeLen) == 0;
+ }
+ return strncmp(fType, type, typeLen) == 0 && fType[typeLen] == 0;
+}
+
+void SkEvent::setType(const char type[], size_t typeLen)
+{
+ if (typeLen == 0)
+ typeLen = strlen(type);
+ if (typeLen <= sizeof(fType)) {
+ size_t slot = 0;
+ memcpy(&slot, type, typeLen);
+ if (slot << 1 >> 1 != slot)
+ goto useCharStar;
+ slot <<= 1;
+ slot |= 1;
+ fType = (char*) slot;
+ } else {
+useCharStar:
+ fType = (char*) sk_malloc_throw(typeLen + 1);
+ SkASSERT(((size_t) fType & 1) == 0);
+ memcpy(fType, type, typeLen);
+ fType[typeLen] = 0;
+ }
+}
+
+void SkEvent::setType(const SkString& type)
+{
+ setType(type.c_str());
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+#include "SkParse.h"
+
+void SkEvent::inflate(const SkDOM& dom, const SkDOM::Node* node)
+{
+ const char* name = dom.findAttr(node, "type");
+ if (name)
+ this->setType(name);
+
+ const char* value;
+ if ((value = dom.findAttr(node, "fast32")) != nullptr)
+ {
+ int32_t n;
+ if (SkParse::FindS32(value, &n))
+ this->setFast32(n);
+ }
+
+ for (node = dom.getFirstChild(node); node; node = dom.getNextSibling(node))
+ {
+ if (strcmp(dom.getName(node), "data"))
+ {
+ SkDEBUGCODE(SkDebugf("SkEvent::inflate unrecognized subelement <%s>\n", dom.getName(node));)
+ continue;
+ }
+
+ name = dom.findAttr(node, "name");
+ if (name == nullptr)
+ {
+ SkDEBUGCODE(SkDebugf("SkEvent::inflate missing required \"name\" attribute in <data> subelement\n");)
+ continue;
+ }
+
+ if ((value = dom.findAttr(node, "s32")) != nullptr)
+ {
+ int32_t n;
+ if (SkParse::FindS32(value, &n))
+ this->setS32(name, n);
+ }
+ else if ((value = dom.findAttr(node, "scalar")) != nullptr)
+ {
+ SkScalar x;
+ if (SkParse::FindScalar(value, &x))
+ this->setScalar(name, x);
+ }
+ else if ((value = dom.findAttr(node, "string")) != nullptr)
+ this->setString(name, value);
+#ifdef SK_DEBUG
+ else
+ {
+ SkDebugf("SkEvent::inflate <data name=\"%s\"> subelement missing required type attribute [S32 | scalar | string]\n", name);
+ }
+#endif
+ }
+}
+
+#ifdef SK_DEBUG
+
+ #ifndef SkScalarToFloat
+ #define SkScalarToFloat(x) ((x) / 65536.f)
+ #endif
+
+ void SkEvent::dump(const char title[])
+ {
+ if (title)
+ SkDebugf("%s ", title);
+
+ SkString etype;
+ this->getType(&etype);
+ SkDebugf("event<%s> fast32=%d", etype.c_str(), this->getFast32());
+
+ const SkMetaData& md = this->getMetaData();
+ SkMetaData::Iter iter(md);
+ SkMetaData::Type mtype;
+ int count;
+ const char* name;
+
+ while ((name = iter.next(&mtype, &count)) != nullptr)
+ {
+ SkASSERT(count > 0);
+
+ SkDebugf(" <%s>=", name);
+ switch (mtype) {
+ case SkMetaData::kS32_Type: // vector version???
+ {
+ int32_t value;
+ md.findS32(name, &value);
+ SkDebugf("%d ", value);
+ }
+ break;
+ case SkMetaData::kScalar_Type:
+ {
+ const SkScalar* values = md.findScalars(name, &count, nullptr);
+ SkDebugf("%f", SkScalarToFloat(values[0]));
+ for (int i = 1; i < count; i++)
+ SkDebugf(", %f", SkScalarToFloat(values[i]));
+ SkDebugf(" ");
+ }
+ break;
+ case SkMetaData::kString_Type:
+ {
+ const char* value = md.findString(name);
+ SkASSERT(value);
+ SkDebugf("<%s> ", value);
+ }
+ break;
+ case SkMetaData::kPtr_Type: // vector version???
+ {
+ void* value;
+ md.findPtr(name, &value);
+ SkDebugf("%p ", value);
+ }
+ break;
+ case SkMetaData::kBool_Type: // vector version???
+ {
+ bool value;
+ md.findBool(name, &value);
+ SkDebugf("%s ", value ? "true" : "false");
+ }
+ break;
+ default:
+ SkDEBUGFAIL("unknown metadata type returned from iterator");
+ break;
+ }
+ }
+ SkDebugf("\n");
+ }
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+// #define SK_TRACE_EVENTSx
+#endif
+
+#ifdef SK_TRACE_EVENTS
+ static void event_log(const char s[])
+ {
+ SkDEBUGF(("%s\n", s));
+ }
+
+ #define EVENT_LOG(s) event_log(s)
+ #define EVENT_LOGN(s, n) do { SkString str(s); str.append(" "); str.appendS32(n); event_log(str.c_str()); } while (0)
+#else
+ #define EVENT_LOG(s)
+ #define EVENT_LOGN(s, n)
+#endif
+
+#include "SkMutex.h"
+#include "SkTime.h"
+
+class SkEvent_Globals {
+public:
+ SkEvent_Globals() {
+ fEventQHead = nullptr;
+ fEventQTail = nullptr;
+ fDelayQHead = nullptr;
+ SkDEBUGCODE(fEventCounter = 0;)
+ }
+
+ SkMutex fEventMutex;
+ SkEvent* fEventQHead, *fEventQTail;
+ SkEvent* fDelayQHead;
+ SkDEBUGCODE(int fEventCounter;)
+};
+
+static SkEvent_Globals& getGlobals() {
+ // leak this, so we don't incure any shutdown perf hit
+ static SkEvent_Globals* gGlobals = new SkEvent_Globals;
+ return *gGlobals;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkEvent::postDelay(SkMSec delay) {
+ if (!fTargetID && !fTargetProc) {
+ delete this;
+ return;
+ }
+
+ if (delay) {
+ this->postTime(GetMSecsSinceStartup() + delay);
+ return;
+ }
+
+ SkEvent_Globals& globals = getGlobals();
+
+ globals.fEventMutex.acquire();
+ bool wasEmpty = SkEvent::Enqueue(this);
+ globals.fEventMutex.release();
+
+ // call outside of us holding the mutex
+ if (wasEmpty) {
+ SkEvent::SignalNonEmptyQueue();
+ }
+}
+
+void SkEvent::postTime(SkMSec time) {
+ if (!fTargetID && !fTargetProc) {
+ delete this;
+ return;
+ }
+
+ SkEvent_Globals& globals = getGlobals();
+
+ globals.fEventMutex.acquire();
+ SkMSec queueDelay = SkEvent::EnqueueTime(this, time);
+ globals.fEventMutex.release();
+
+ // call outside of us holding the mutex
+ if ((int32_t)queueDelay != ~0) {
+ SkEvent::SignalQueueTimer(queueDelay);
+ }
+}
+
+bool SkEvent::Enqueue(SkEvent* evt) {
+ SkEvent_Globals& globals = getGlobals();
+ // gEventMutex acquired by caller
+
+ SkASSERT(evt);
+
+ bool wasEmpty = globals.fEventQHead == nullptr;
+
+ if (globals.fEventQTail)
+ globals.fEventQTail->fNextEvent = evt;
+ globals.fEventQTail = evt;
+ if (globals.fEventQHead == nullptr)
+ globals.fEventQHead = evt;
+ evt->fNextEvent = nullptr;
+
+ SkDEBUGCODE(++globals.fEventCounter);
+
+ return wasEmpty;
+}
+
+SkEvent* SkEvent::Dequeue() {
+ SkEvent_Globals& globals = getGlobals();
+ globals.fEventMutex.acquire();
+
+ SkEvent* evt = globals.fEventQHead;
+ if (evt) {
+ SkDEBUGCODE(--globals.fEventCounter);
+
+ globals.fEventQHead = evt->fNextEvent;
+ if (globals.fEventQHead == nullptr) {
+ globals.fEventQTail = nullptr;
+ }
+ }
+ globals.fEventMutex.release();
+
+ return evt;
+}
+
+bool SkEvent::QHasEvents() {
+ SkEvent_Globals& globals = getGlobals();
+
+ // this is not thread accurate, need a semaphore for that
+ return globals.fEventQHead != nullptr;
+}
+
+#ifdef SK_TRACE_EVENTS
+ static int gDelayDepth;
+#endif
+
+SkMSec SkEvent::EnqueueTime(SkEvent* evt, SkMSec time) {
+ SkEvent_Globals& globals = getGlobals();
+ // gEventMutex acquired by caller
+
+ SkEvent* curr = globals.fDelayQHead;
+ SkEvent* prev = nullptr;
+
+ while (curr) {
+ if (SkMSec_LT(time, curr->fTime)) {
+ break;
+ }
+ prev = curr;
+ curr = curr->fNextEvent;
+ }
+
+ evt->fTime = time;
+ evt->fNextEvent = curr;
+ if (prev == nullptr) {
+ globals.fDelayQHead = evt;
+ } else {
+ prev->fNextEvent = evt;
+ }
+
+ SkMSec delay = globals.fDelayQHead->fTime - GetMSecsSinceStartup();
+ if ((int32_t)delay <= 0) {
+ delay = 1;
+ }
+ return delay;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkEventSink.h"
+
+bool SkEvent::ProcessEvent() {
+ SkEvent* evt = SkEvent::Dequeue();
+ SkAutoTDelete<SkEvent> autoDelete(evt);
+ bool again = false;
+
+ EVENT_LOGN("ProcessEvent", (int32_t)evt);
+
+ if (evt) {
+ (void)SkEventSink::DoEvent(*evt);
+ again = SkEvent::QHasEvents();
+ }
+ return again;
+}
+
+void SkEvent::ServiceQueueTimer()
+{
+ SkEvent_Globals& globals = getGlobals();
+
+ globals.fEventMutex.acquire();
+
+ bool wasEmpty = false;
+ SkMSec now = GetMSecsSinceStartup();
+ SkEvent* evt = globals.fDelayQHead;
+
+ while (evt)
+ {
+ if (SkMSec_LT(now, evt->fTime))
+ break;
+
+#ifdef SK_TRACE_EVENTS
+ --gDelayDepth;
+ SkDebugf("dequeue-delay %s (%d)", evt->getType(), gDelayDepth);
+ const char* idStr = evt->findString("id");
+ if (idStr)
+ SkDebugf(" (%s)", idStr);
+ SkDebugf("\n");
+#endif
+
+ SkEvent* next = evt->fNextEvent;
+ if (SkEvent::Enqueue(evt))
+ wasEmpty = true;
+ evt = next;
+ }
+ globals.fDelayQHead = evt;
+
+ SkMSec time = evt ? evt->fTime - now : 0;
+
+ globals.fEventMutex.release();
+
+ if (wasEmpty)
+ SkEvent::SignalNonEmptyQueue();
+
+ SkEvent::SignalQueueTimer(time);
+}
+
+int SkEvent::CountEventsOnQueue() {
+ SkEvent_Globals& globals = getGlobals();
+ globals.fEventMutex.acquire();
+
+ int count = 0;
+ const SkEvent* evt = globals.fEventQHead;
+ while (evt) {
+ count += 1;
+ evt = evt->fNextEvent;
+ }
+ globals.fEventMutex.release();
+
+ return count;
+}
+
+SkMSec SkEvent::GetMSecsSinceStartup() {
+ static const double kEpoch = SkTime::GetMSecs();
+ return static_cast<SkMSec>(SkTime::GetMSecs() - kEpoch);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkEvent::Init() {}
+
+void SkEvent::Term() {
+ SkEvent_Globals& globals = getGlobals();
+
+ SkEvent* evt = globals.fEventQHead;
+ while (evt) {
+ SkEvent* next = evt->fNextEvent;
+ delete evt;
+ evt = next;
+ }
+
+ evt = globals.fDelayQHead;
+ while (evt) {
+ SkEvent* next = evt->fNextEvent;
+ delete evt;
+ evt = next;
+ }
+}
diff --git a/gfx/skia/skia/src/views/SkEventSink.cpp b/gfx/skia/skia/src/views/SkEventSink.cpp
new file mode 100644
index 000000000..1464fa0fa
--- /dev/null
+++ b/gfx/skia/skia/src/views/SkEventSink.cpp
@@ -0,0 +1,300 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkEventSink.h"
+#include "SkMutex.h"
+#include "SkTagList.h"
+#include "SkTime.h"
+
+class SkEventSink_Globals {
+public:
+ SkEventSink_Globals() {
+ fNextSinkID = 0;
+ fSinkHead = nullptr;
+ }
+
+ SkMutex fSinkMutex;
+ SkEventSinkID fNextSinkID;
+ SkEventSink* fSinkHead;
+};
+
+static SkEventSink_Globals& getGlobals() {
+ // leak this, so we don't incur any shutdown perf hit
+ static SkEventSink_Globals* gGlobals = new SkEventSink_Globals;
+ return *gGlobals;
+}
+
+SkEventSink::SkEventSink() : fTagHead(nullptr) {
+ SkEventSink_Globals& globals = getGlobals();
+
+ globals.fSinkMutex.acquire();
+
+ fID = ++globals.fNextSinkID;
+ fNextSink = globals.fSinkHead;
+ globals.fSinkHead = this;
+
+ globals.fSinkMutex.release();
+}
+
+SkEventSink::~SkEventSink() {
+ SkEventSink_Globals& globals = getGlobals();
+
+ if (fTagHead)
+ SkTagList::DeleteAll(fTagHead);
+
+ globals.fSinkMutex.acquire();
+
+ SkEventSink* sink = globals.fSinkHead;
+ SkEventSink* prev = nullptr;
+
+ for (;;) {
+ SkEventSink* next = sink->fNextSink;
+ if (sink == this) {
+ if (prev) {
+ prev->fNextSink = next;
+ } else {
+ globals.fSinkHead = next;
+ }
+ break;
+ }
+ prev = sink;
+ sink = next;
+ }
+ globals.fSinkMutex.release();
+}
+
+bool SkEventSink::doEvent(const SkEvent& evt) {
+ return this->onEvent(evt);
+}
+
+bool SkEventSink::doQuery(SkEvent* evt) {
+ SkASSERT(evt);
+ return this->onQuery(evt);
+}
+
+bool SkEventSink::onEvent(const SkEvent&) {
+ return false;
+}
+
+bool SkEventSink::onQuery(SkEvent*) {
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkTagList* SkEventSink::findTagList(U8CPU tag) const {
+ return fTagHead ? SkTagList::Find(fTagHead, tag) : nullptr;
+}
+
+void SkEventSink::addTagList(SkTagList* rec) {
+ SkASSERT(rec);
+ SkASSERT(fTagHead == nullptr || SkTagList::Find(fTagHead, rec->fTag) == nullptr);
+
+ rec->fNext = fTagHead;
+ fTagHead = rec;
+}
+
+void SkEventSink::removeTagList(U8CPU tag) {
+ if (fTagHead) {
+ SkTagList::DeleteTag(&fTagHead, tag);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct SkListenersTagList : SkTagList {
+ SkListenersTagList(U16CPU count) : SkTagList(kListeners_SkTagList)
+ {
+ fExtra16 = SkToU16(count);
+ fIDs = (SkEventSinkID*)sk_malloc_throw(count * sizeof(SkEventSinkID));
+ }
+ virtual ~SkListenersTagList()
+ {
+ sk_free(fIDs);
+ }
+
+ int countListners() const { return fExtra16; }
+
+ int find(SkEventSinkID id) const
+ {
+ const SkEventSinkID* idptr = fIDs;
+ for (int i = fExtra16 - 1; i >= 0; --i)
+ if (idptr[i] == id)
+ return i;
+ return -1;
+ }
+
+ SkEventSinkID* fIDs;
+};
+
+void SkEventSink::addListenerID(SkEventSinkID id)
+{
+ if (id == 0)
+ return;
+
+ SkListenersTagList* prev = (SkListenersTagList*)this->findTagList(kListeners_SkTagList);
+ int count = 0;
+
+ if (prev)
+ {
+ if (prev->find(id) >= 0)
+ return;
+ count = prev->countListners();
+ }
+
+ SkListenersTagList* next = new SkListenersTagList(count + 1);
+
+ if (prev)
+ {
+ memcpy(next->fIDs, prev->fIDs, count * sizeof(SkEventSinkID));
+ this->removeTagList(kListeners_SkTagList);
+ }
+ next->fIDs[count] = id;
+ this->addTagList(next);
+}
+
+void SkEventSink::copyListeners(const SkEventSink& sink)
+{
+ SkListenersTagList* sinkList = (SkListenersTagList*)sink.findTagList(kListeners_SkTagList);
+ if (sinkList == nullptr)
+ return;
+ SkASSERT(sinkList->countListners() > 0);
+ const SkEventSinkID* iter = sinkList->fIDs;
+ const SkEventSinkID* stop = iter + sinkList->countListners();
+ while (iter < stop)
+ addListenerID(*iter++);
+}
+
+void SkEventSink::removeListenerID(SkEventSinkID id)
+{
+ if (id == 0)
+ return;
+
+ SkListenersTagList* list = (SkListenersTagList*)this->findTagList(kListeners_SkTagList);
+
+ if (list == nullptr)
+ return;
+
+ int index = list->find(id);
+ if (index >= 0)
+ {
+ int count = list->countListners();
+ SkASSERT(count > 0);
+ if (count == 1)
+ this->removeTagList(kListeners_SkTagList);
+ else
+ {
+ // overwrite without resize/reallocating our struct (for speed)
+ list->fIDs[index] = list->fIDs[count - 1];
+ list->fExtra16 = SkToU16(count - 1);
+ }
+ }
+}
+
+bool SkEventSink::hasListeners() const
+{
+ return this->findTagList(kListeners_SkTagList) != nullptr;
+}
+
+void SkEventSink::postToListeners(const SkEvent& evt, SkMSec delay) {
+ SkListenersTagList* list = (SkListenersTagList*)this->findTagList(kListeners_SkTagList);
+ if (list) {
+ SkASSERT(list->countListners() > 0);
+ const SkEventSinkID* iter = list->fIDs;
+ const SkEventSinkID* stop = iter + list->countListners();
+ while (iter < stop) {
+ SkEvent* copy = new SkEvent(evt);
+ copy->setTargetID(*iter++)->postDelay(delay);
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkEventSink::EventResult SkEventSink::DoEvent(const SkEvent& evt) {
+ SkEvent::Proc proc = evt.getTargetProc();
+ if (proc) {
+ return proc(evt) ? kHandled_EventResult : kNotHandled_EventResult;
+ }
+
+ SkEventSink* sink = SkEventSink::FindSink(evt.getTargetID());
+ if (sink) {
+ return sink->doEvent(evt) ? kHandled_EventResult : kNotHandled_EventResult;
+ }
+
+ return kSinkNotFound_EventResult;
+}
+
+SkEventSink* SkEventSink::FindSink(SkEventSinkID sinkID)
+{
+ if (sinkID == 0)
+ return 0;
+
+ SkEventSink_Globals& globals = getGlobals();
+ SkAutoMutexAcquire ac(globals.fSinkMutex);
+ SkEventSink* sink = globals.fSinkHead;
+
+ while (sink)
+ {
+ if (sink->getSinkID() == sinkID)
+ return sink;
+ sink = sink->fNextSink;
+ }
+ return nullptr;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////
+
+#if 0 // experimental, not tested
+
+#include "SkMutex.h"
+#include "SkTDict.h"
+
+#define kMinStringBufferSize 128
+SK_DECLARE_STATIC_MUTEX(gNamedSinkMutex);
+static SkTDict<SkEventSinkID> gNamedSinkIDs(kMinStringBufferSize);
+
+/** Register a name/id pair with the system. If the name already exists,
+ replace its ID with the new id. This pair will persist until UnregisterNamedSink()
+ is called.
+*/
+void SkEventSink::RegisterNamedSinkID(const char name[], SkEventSinkID id)
+{
+ if (id && name && *name)
+ {
+ SkAutoMutexAcquire ac(gNamedSinkMutex);
+ gNamedSinkIDs.set(name, id);
+ }
+}
+
+/** Return the id that matches the specified name (from a previous call to
+ RegisterNamedSinkID(). If no match is found, return 0
+*/
+SkEventSinkID SkEventSink::FindNamedSinkID(const char name[])
+{
+ SkEventSinkID id = 0;
+
+ if (name && *name)
+ {
+ SkAutoMutexAcquire ac(gNamedSinkMutex);
+ (void)gNamedSinkIDs.find(name, &id);
+ }
+ return id;
+}
+
+/** Remove all name/id pairs from the system. This is call internally
+ on shutdown, to ensure no memory leaks. It should not be called
+ before shutdown.
+*/
+void SkEventSink::RemoveAllNamedSinkIDs()
+{
+ SkAutoMutexAcquire ac(gNamedSinkMutex);
+ (void)gNamedSinkIDs.reset();
+}
+#endif
diff --git a/gfx/skia/skia/src/views/SkOSMenu.cpp b/gfx/skia/skia/src/views/SkOSMenu.cpp
new file mode 100644
index 000000000..ec92a7b4a
--- /dev/null
+++ b/gfx/skia/skia/src/views/SkOSMenu.cpp
@@ -0,0 +1,263 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkAtomics.h"
+#include "SkOSMenu.h"
+#include <stdarg.h>
+
+static int gOSMenuCmd = 7000;
+
+SkOSMenu::SkOSMenu(const char title[]) {
+ fTitle.set(title);
+}
+
+SkOSMenu::~SkOSMenu() {
+ this->reset();
+}
+
+void SkOSMenu::reset() {
+ fItems.deleteAll();
+ fTitle.reset();
+}
+
+const SkOSMenu::Item* SkOSMenu::getItemByID(int itemID) const {
+ for (int i = 0; i < fItems.count(); ++i) {
+ if (itemID == fItems[i]->getID())
+ return fItems[i];
+ }
+ return nullptr;
+}
+
+void SkOSMenu::getItems(const SkOSMenu::Item* items[]) const {
+ if (items) {
+ for (int i = 0; i < fItems.count(); ++i) {
+ items[i] = fItems[i];
+ }
+ }
+}
+
+void SkOSMenu::assignKeyEquivalentToItem(int itemID, SkUnichar key) {
+ for (int i = 0; i < fItems.count(); ++i) {
+ if (itemID == fItems[i]->getID())
+ fItems[i]->setKeyEquivalent(key);
+ }
+}
+
+bool SkOSMenu::handleKeyEquivalent(SkUnichar key) {
+ int value = 0, size = 0;
+ bool state;
+ SkOSMenu::TriState tristate;
+ for (int i = 0; i < fItems.count(); ++i) {
+ Item* item = fItems[i];
+ if (item->getKeyEquivalent()== key) {
+ SkString list;
+ switch (item->getType()) {
+ case kList_Type:
+ SkOSMenu::FindListItemCount(*item->getEvent(), &size);
+ SkOSMenu::FindListIndex(*item->getEvent(), item->getSlotName(), &value);
+ value = (value + 1) % size;
+ item->setInt(value);
+ break;
+ case kSwitch_Type:
+ SkOSMenu::FindSwitchState(*item->getEvent(), item->getSlotName(), &state);
+ item->setBool(!state);
+ break;
+ case kTriState_Type:
+ SkOSMenu::FindTriState(*item->getEvent(), item->getSlotName(), &tristate);
+ if (kOnState == tristate)
+ tristate = kMixedState;
+ else
+ tristate = (SkOSMenu::TriState)((int)tristate + 1);
+ item->setTriState(tristate);
+ break;
+ case kAction_Type:
+ case kCustom_Type:
+ case kSlider_Type:
+ case kTextField_Type:
+ default:
+ break;
+ }
+ item->postEvent();
+ return true;
+ }
+ }
+ return false;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+SkOSMenu::Item::Item(const char label[], SkOSMenu::Type type,
+ const char slotName[], SkEvent* evt) {
+ fLabel.set(label);
+ fSlotName.set(slotName);
+ fType = type;
+ fEvent = evt;
+ fKey = 0;
+ fID = sk_atomic_inc(&gOSMenuCmd);
+}
+
+void SkOSMenu::Item::setBool(bool value) const {
+ SkASSERT(SkOSMenu::kSwitch_Type == fType);
+ fEvent->setBool(fSlotName.c_str(), value);
+}
+
+void SkOSMenu::Item::setScalar(SkScalar value) const {
+ SkASSERT(SkOSMenu::kSlider_Type == fType);
+ fEvent->setScalar(fSlotName.c_str(), value);
+}
+
+void SkOSMenu::Item::setInt(int value) const {
+ SkASSERT(SkOSMenu::kList_Type == fType);
+ fEvent->setS32(fSlotName.c_str(), value);
+}
+
+void SkOSMenu::Item::setTriState(TriState value) const {
+ SkASSERT(SkOSMenu::kTriState_Type == fType);
+ fEvent->setS32(fSlotName.c_str(), value);
+}
+
+void SkOSMenu::Item::setString(const char value[]) const {
+ SkASSERT(SkOSMenu::kTextField_Type == fType);
+ fEvent->setString(fSlotName.c_str(), value);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static const char* gMenuEventType = "SkOSMenuEventType";
+static const char* gSlider_Min_Scalar = "SkOSMenuSlider_Min";
+static const char* gSlider_Max_Scalar = "SkOSMenuSlider_Max";
+static const char* gDelimiter = "|";
+static const char* gList_Items_Str = "SkOSMenuList_Items";
+static const char* gList_ItemCount_S32 = "SkOSMenuList_ItemCount";
+
+int SkOSMenu::appendItem(const char label[], Type type, const char slotName[],
+ SkEvent* evt) {
+ SkOSMenu::Item* item = new Item(label, type, slotName, evt);
+ fItems.append(1, &item);
+ return item->getID();
+}
+
+int SkOSMenu::appendAction(const char label[], SkEventSinkID target) {
+ SkEvent* evt = new SkEvent(gMenuEventType, target);
+ //Store label in event so it can be used to identify the action later
+ evt->setString(label, label);
+ return appendItem(label, SkOSMenu::kAction_Type, "", evt);
+}
+
+int SkOSMenu::appendList(const char label[], const char slotName[],
+ SkEventSinkID target, int index, const char option[], ...) {
+ SkEvent* evt = new SkEvent(gMenuEventType, target);
+ va_list args;
+ if (option) {
+ SkString str(option);
+ va_start(args, option);
+ int count = 1;
+ for (const char* arg = va_arg(args, const char*); arg != nullptr; arg = va_arg(args, const char*)) {
+ str += gDelimiter;
+ str += arg;
+ ++count;
+ }
+ va_end(args);
+ evt->setString(gList_Items_Str, str);
+ evt->setS32(gList_ItemCount_S32, count);
+ evt->setS32(slotName, index);
+ }
+ return appendItem(label, SkOSMenu::kList_Type, slotName, evt);
+}
+
+int SkOSMenu::appendSlider(const char label[], const char slotName[],
+ SkEventSinkID target, SkScalar min, SkScalar max,
+ SkScalar defaultValue) {
+ SkEvent* evt = new SkEvent(gMenuEventType, target);
+ evt->setScalar(gSlider_Min_Scalar, min);
+ evt->setScalar(gSlider_Max_Scalar, max);
+ evt->setScalar(slotName, defaultValue);
+ return appendItem(label, SkOSMenu::kSlider_Type, slotName, evt);
+}
+
+int SkOSMenu::appendSwitch(const char label[], const char slotName[],
+ SkEventSinkID target, bool defaultState) {
+ SkEvent* evt = new SkEvent(gMenuEventType, target);
+ evt->setBool(slotName, defaultState);
+ return appendItem(label, SkOSMenu::kSwitch_Type, slotName, evt);
+}
+
+int SkOSMenu::appendTriState(const char label[], const char slotName[],
+ SkEventSinkID target, SkOSMenu::TriState defaultState) {
+ SkEvent* evt = new SkEvent(gMenuEventType, target);
+ evt->setS32(slotName, defaultState);
+ return appendItem(label, SkOSMenu::kTriState_Type, slotName, evt);
+}
+
+int SkOSMenu::appendTextField(const char label[], const char slotName[],
+ SkEventSinkID target, const char placeholder[]) {
+ SkEvent* evt = new SkEvent(gMenuEventType, target);
+ evt->setString(slotName, placeholder);
+ return appendItem(label, SkOSMenu::kTextField_Type, slotName, evt);
+}
+
+bool SkOSMenu::FindListItemCount(const SkEvent& evt, int* count) {
+ return evt.isType(gMenuEventType) && evt.findS32(gList_ItemCount_S32, count);
+}
+
+bool SkOSMenu::FindListItems(const SkEvent& evt, SkString items[]) {
+ if (evt.isType(gMenuEventType) && items) {
+ const char* text = evt.findString(gList_Items_Str);
+ if (text != nullptr) {
+ SkString temp(text);
+ char* token = strtok((char*)temp.c_str(), gDelimiter);
+ int index = 0;
+ while (token != nullptr) {
+ items[index].set(token, strlen(token));
+ token = strtok (nullptr, gDelimiter);
+ ++index;
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+bool SkOSMenu::FindSliderMin(const SkEvent& evt, SkScalar* min) {
+ return evt.isType(gMenuEventType) && evt.findScalar(gSlider_Min_Scalar, min);
+}
+
+bool SkOSMenu::FindSliderMax(const SkEvent& evt, SkScalar* max) {
+ return evt.isType(gMenuEventType) && evt.findScalar(gSlider_Max_Scalar, max);
+}
+
+bool SkOSMenu::FindAction(const SkEvent& evt, const char label[]) {
+ return evt.isType(gMenuEventType) && evt.findString(label);
+}
+
+bool SkOSMenu::FindListIndex(const SkEvent& evt, const char slotName[], int* value) {
+ return evt.isType(gMenuEventType) && evt.findS32(slotName, value);
+}
+
+bool SkOSMenu::FindSliderValue(const SkEvent& evt, const char slotName[], SkScalar* value) {
+ return evt.isType(gMenuEventType) && evt.findScalar(slotName, value);
+}
+
+bool SkOSMenu::FindSwitchState(const SkEvent& evt, const char slotName[], bool* value) {
+ return evt.isType(gMenuEventType) && evt.findBool(slotName, value);
+}
+
+bool SkOSMenu::FindTriState(const SkEvent& evt, const char slotName[], SkOSMenu::TriState* value) {
+ return evt.isType(gMenuEventType) && evt.findS32(slotName, (int*)value);
+}
+
+bool SkOSMenu::FindText(const SkEvent& evt, const char slotName[], SkString* value) {
+ if (evt.isType(gMenuEventType)) {
+ const char* text = evt.findString(slotName);
+ if (!text || !*text)
+ return false;
+ else {
+ value->set(text);
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/views/SkTagList.cpp b/gfx/skia/skia/src/views/SkTagList.cpp
new file mode 100644
index 000000000..27f3916ec
--- /dev/null
+++ b/gfx/skia/skia/src/views/SkTagList.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkTagList.h"
+
+SkTagList::~SkTagList()
+{
+}
+
+SkTagList* SkTagList::Find(SkTagList* rec, U8CPU tag)
+{
+ SkASSERT(tag < kSkTagListCount);
+
+ while (rec != nullptr)
+ {
+ if (rec->fTag == tag)
+ break;
+ rec = rec->fNext;
+ }
+ return rec;
+}
+
+void SkTagList::DeleteTag(SkTagList** head, U8CPU tag)
+{
+ SkASSERT(tag < kSkTagListCount);
+
+ SkTagList* rec = *head;
+ SkTagList* prev = nullptr;
+
+ while (rec != nullptr)
+ {
+ SkTagList* next = rec->fNext;
+
+ if (rec->fTag == tag)
+ {
+ if (prev)
+ prev->fNext = next;
+ else
+ *head = next;
+ delete rec;
+ break;
+ }
+ prev = rec;
+ rec = next;
+ }
+}
+
+void SkTagList::DeleteAll(SkTagList* rec)
+{
+ while (rec)
+ {
+ SkTagList* next = rec->fNext;
+ delete rec;
+ rec = next;
+ }
+}
diff --git a/gfx/skia/skia/src/views/SkTagList.h b/gfx/skia/skia/src/views/SkTagList.h
new file mode 100644
index 000000000..0b158e1ab
--- /dev/null
+++ b/gfx/skia/skia/src/views/SkTagList.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTagList_DEFINED
+#define SkTagList_DEFINED
+
+#include "SkTypes.h"
+
+enum SkTagListEnum {
+ kListeners_SkTagList,
+ kViewLayout_SkTagList,
+ kViewArtist_SkTagList,
+
+ kSkTagListCount
+};
+
+struct SkTagList {
+ SkTagList* fNext;
+ uint16_t fExtra16;
+ uint8_t fExtra8;
+ uint8_t fTag;
+
+ SkTagList(U8CPU tag) : fTag(SkToU8(tag))
+ {
+ SkASSERT(tag < kSkTagListCount);
+ fNext = nullptr;
+ fExtra16 = 0;
+ fExtra8 = 0;
+ }
+ virtual ~SkTagList();
+
+ static SkTagList* Find(SkTagList* head, U8CPU tag);
+ static void DeleteTag(SkTagList** headptr, U8CPU tag);
+ static void DeleteAll(SkTagList* head);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/views/SkTouchGesture.cpp b/gfx/skia/skia/src/views/SkTouchGesture.cpp
new file mode 100644
index 000000000..752828e37
--- /dev/null
+++ b/gfx/skia/skia/src/views/SkTouchGesture.cpp
@@ -0,0 +1,353 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <algorithm>
+
+#include "SkTouchGesture.h"
+#include "SkMatrix.h"
+#include "SkTime.h"
+
+#define DISCRETIZE_TRANSLATE_TO_AVOID_FLICKER true
+
+static const SkScalar MAX_FLING_SPEED = SkIntToScalar(1500);
+
+static SkScalar pin_max_fling(SkScalar speed) {
+ if (speed > MAX_FLING_SPEED) {
+ speed = MAX_FLING_SPEED;
+ }
+ return speed;
+}
+
+static double getseconds() {
+ return SkTime::GetMSecs() * 0.001;
+}
+
+// returns +1 or -1, depending on the sign of x
+// returns +1 if z is zero
+static SkScalar SkScalarSignNonZero(SkScalar x) {
+ SkScalar sign = SK_Scalar1;
+ if (x < 0) {
+ sign = -sign;
+ }
+ return sign;
+}
+
+static void unit_axis_align(SkVector* unit) {
+ const SkScalar TOLERANCE = SkDoubleToScalar(0.15);
+ if (SkScalarAbs(unit->fX) < TOLERANCE) {
+ unit->fX = 0;
+ unit->fY = SkScalarSignNonZero(unit->fY);
+ } else if (SkScalarAbs(unit->fY) < TOLERANCE) {
+ unit->fX = SkScalarSignNonZero(unit->fX);
+ unit->fY = 0;
+ }
+}
+
+void SkFlingState::reset(float sx, float sy) {
+ fActive = true;
+ fDirection.set(sx, sy);
+ fSpeed0 = SkPoint::Normalize(&fDirection);
+ fSpeed0 = pin_max_fling(fSpeed0);
+ fTime0 = getseconds();
+
+ unit_axis_align(&fDirection);
+// printf("---- speed %g dir %g %g\n", fSpeed0, fDirection.fX, fDirection.fY);
+}
+
+bool SkFlingState::evaluateMatrix(SkMatrix* matrix) {
+ if (!fActive) {
+ return false;
+ }
+
+ const float t = (float)(getseconds() - fTime0);
+ const float MIN_SPEED = 2;
+ const float K0 = 5;
+ const float K1 = 0.02f;
+ const float speed = fSpeed0 * (sk_float_exp(- K0 * t) - K1);
+ if (speed <= MIN_SPEED) {
+ fActive = false;
+ return false;
+ }
+ float dist = (fSpeed0 - speed) / K0;
+
+// printf("---- time %g speed %g dist %g\n", t, speed, dist);
+ float tx = fDirection.fX * dist;
+ float ty = fDirection.fY * dist;
+ if (DISCRETIZE_TRANSLATE_TO_AVOID_FLICKER) {
+ tx = (float)sk_float_round2int(tx);
+ ty = (float)sk_float_round2int(ty);
+ }
+ matrix->setTranslate(tx, ty);
+// printf("---- evaluate (%g %g)\n", tx, ty);
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static const SkMSec MAX_DBL_TAP_INTERVAL = 300;
+static const float MAX_DBL_TAP_DISTANCE = 100;
+static const float MAX_JITTER_RADIUS = 2;
+
+// if true, then ignore the touch-move, 'cause its probably just jitter
+static bool close_enough_for_jitter(float x0, float y0, float x1, float y1) {
+ return sk_float_abs(x0 - x1) <= MAX_JITTER_RADIUS &&
+ sk_float_abs(y0 - y1) <= MAX_JITTER_RADIUS;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkTouchGesture::SkTouchGesture() {
+ this->reset();
+}
+
+SkTouchGesture::~SkTouchGesture() {
+}
+
+void SkTouchGesture::reset() {
+ fIsTransLimited = false;
+ fTouches.reset();
+ fState = kEmpty_State;
+ fLocalM.reset();
+ fGlobalM.reset();
+
+ fLastUpMillis = SkTime::GetMSecs() - 2*MAX_DBL_TAP_INTERVAL;
+ fLastUpP.set(0, 0);
+}
+
+void SkTouchGesture::flushLocalM() {
+ fGlobalM.postConcat(fLocalM);
+ fLocalM.reset();
+}
+
+const SkMatrix& SkTouchGesture::localM() {
+ if (fFlinger.isActive()) {
+ if (!fFlinger.evaluateMatrix(&fLocalM)) {
+ this->flushLocalM();
+ }
+ }
+ return fLocalM;
+}
+
+void SkTouchGesture::appendNewRec(void* owner, float x, float y) {
+ Rec* rec = fTouches.append();
+ rec->fOwner = owner;
+ rec->fStartX = rec->fPrevX = rec->fLastX = x;
+ rec->fStartY = rec->fPrevY = rec->fLastY = y;
+ rec->fLastT = rec->fPrevT = static_cast<float>(SkTime::GetSecs());
+}
+
+void SkTouchGesture::touchBegin(void* owner, float x, float y) {
+// SkDebugf("--- %d touchBegin %p %g %g\n", fTouches.count(), owner, x, y);
+
+ int index = this->findRec(owner);
+ if (index >= 0) {
+ this->flushLocalM();
+ fTouches.removeShuffle(index);
+ SkDebugf("---- already exists, removing\n");
+ }
+
+ if (fTouches.count() == 2) {
+ return;
+ }
+
+ this->flushLocalM();
+ fFlinger.stop();
+
+ this->appendNewRec(owner, x, y);
+
+ switch (fTouches.count()) {
+ case 1:
+ fState = kTranslate_State;
+ break;
+ case 2:
+ fState = kZoom_State;
+ break;
+ default:
+ break;
+ }
+}
+
+int SkTouchGesture::findRec(void* owner) const {
+ for (int i = 0; i < fTouches.count(); i++) {
+ if (owner == fTouches[i].fOwner) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+static SkScalar center(float pos0, float pos1) {
+ return (pos0 + pos1) * 0.5f;
+}
+
+static const float MAX_ZOOM_SCALE = 4;
+static const float MIN_ZOOM_SCALE = 0.25f;
+
+float SkTouchGesture::limitTotalZoom(float scale) const {
+ // this query works 'cause we know that we're square-scale w/ no skew/rotation
+ const float curr = SkScalarToFloat(fGlobalM[0]);
+
+ if (scale > 1 && curr * scale > MAX_ZOOM_SCALE) {
+ scale = MAX_ZOOM_SCALE / curr;
+ } else if (scale < 1 && curr * scale < MIN_ZOOM_SCALE) {
+ scale = MIN_ZOOM_SCALE / curr;
+ }
+ return scale;
+}
+
+void SkTouchGesture::touchMoved(void* owner, float x, float y) {
+// SkDebugf("--- %d touchMoved %p %g %g\n", fTouches.count(), owner, x, y);
+
+ if (kEmpty_State == fState) {
+ return;
+ }
+
+ int index = this->findRec(owner);
+ if (index < 0) {
+ // not found, so I guess we should add it...
+ SkDebugf("---- add missing begin\n");
+ this->appendNewRec(owner, x, y);
+ index = fTouches.count() - 1;
+ }
+
+ Rec& rec = fTouches[index];
+
+ // not sure how valuable this is
+ if (fTouches.count() == 2) {
+ if (close_enough_for_jitter(rec.fLastX, rec.fLastY, x, y)) {
+// SkDebugf("--- drop touchMove, withing jitter tolerance %g %g\n", rec.fLastX - x, rec.fLastY - y);
+ return;
+ }
+ }
+
+ rec.fPrevX = rec.fLastX; rec.fLastX = x;
+ rec.fPrevY = rec.fLastY; rec.fLastY = y;
+ rec.fPrevT = rec.fLastT;
+ rec.fLastT = static_cast<float>(SkTime::GetSecs());
+
+ switch (fTouches.count()) {
+ case 1: {
+ float dx = rec.fLastX - rec.fStartX;
+ float dy = rec.fLastY - rec.fStartY;
+ dx = (float)sk_float_round2int(dx);
+ dy = (float)sk_float_round2int(dy);
+ fLocalM.setTranslate(dx, dy);
+ } break;
+ case 2: {
+ SkASSERT(kZoom_State == fState);
+ const Rec& rec0 = fTouches[0];
+ const Rec& rec1 = fTouches[1];
+
+ float scale = this->computePinch(rec0, rec1);
+ scale = this->limitTotalZoom(scale);
+
+ fLocalM.setTranslate(-center(rec0.fStartX, rec1.fStartX),
+ -center(rec0.fStartY, rec1.fStartY));
+ fLocalM.postScale(scale, scale);
+ fLocalM.postTranslate(center(rec0.fLastX, rec1.fLastX),
+ center(rec0.fLastY, rec1.fLastY));
+ } break;
+ default:
+ break;
+ }
+}
+
+void SkTouchGesture::touchEnd(void* owner) {
+// SkDebugf("--- %d touchEnd %p\n", fTouches.count(), owner);
+
+ int index = this->findRec(owner);
+ if (index < 0) {
+ SkDebugf("--- not found\n");
+ return;
+ }
+
+ const Rec& rec = fTouches[index];
+ if (this->handleDblTap(rec.fLastX, rec.fLastY)) {
+ return;
+ }
+
+ // count() reflects the number before we removed the owner
+ switch (fTouches.count()) {
+ case 1: {
+ this->flushLocalM();
+ float dx = rec.fLastX - rec.fPrevX;
+ float dy = rec.fLastY - rec.fPrevY;
+ float dur = rec.fLastT - rec.fPrevT;
+ if (dur > 0) {
+ fFlinger.reset(dx / dur, dy / dur);
+ }
+ fState = kEmpty_State;
+ } break;
+ case 2:
+ this->flushLocalM();
+ SkASSERT(kZoom_State == fState);
+ fState = kEmpty_State;
+ break;
+ default:
+ SkASSERT(kZoom_State == fState);
+ break;
+ }
+
+ fTouches.removeShuffle(index);
+
+ limitTrans();
+}
+
+float SkTouchGesture::computePinch(const Rec& rec0, const Rec& rec1) {
+ double dx = rec0.fStartX - rec1.fStartX;
+ double dy = rec0.fStartY - rec1.fStartY;
+ double dist0 = sqrt(dx*dx + dy*dy);
+
+ dx = rec0.fLastX - rec1.fLastX;
+ dy = rec0.fLastY - rec1.fLastY;
+ double dist1 = sqrt(dx*dx + dy*dy);
+
+ double scale = dist1 / dist0;
+ return (float)scale;
+}
+
+bool SkTouchGesture::handleDblTap(float x, float y) {
+ bool found = false;
+ double now = SkTime::GetMSecs();
+ if (now - fLastUpMillis <= MAX_DBL_TAP_INTERVAL) {
+ if (SkPoint::Length(fLastUpP.fX - x,
+ fLastUpP.fY - y) <= MAX_DBL_TAP_DISTANCE) {
+ fFlinger.stop();
+ fLocalM.reset();
+ fGlobalM.reset();
+ fTouches.reset();
+ fState = kEmpty_State;
+ found = true;
+ }
+ }
+
+ fLastUpMillis = now;
+ fLastUpP.set(x, y);
+ return found;
+}
+
+void SkTouchGesture::setTransLimit(const SkRect& contentRect, const SkRect& windowRect) {
+ fIsTransLimited = true;
+ fContentRect = contentRect;
+ fWindowRect = windowRect;
+}
+
+void SkTouchGesture::limitTrans() {
+ if (!fIsTransLimited) {
+ return;
+ }
+
+ SkRect scaledContent = fContentRect;
+ fGlobalM.mapRect(&scaledContent);
+ const SkScalar ZERO = 0;
+
+ fGlobalM.postTranslate(ZERO, std::min(ZERO, fWindowRect.fBottom - scaledContent.fTop));
+ fGlobalM.postTranslate(ZERO, std::max(ZERO, fWindowRect.fTop - scaledContent.fBottom));
+ fGlobalM.postTranslate(std::min(ZERO, fWindowRect.fRight - scaledContent.fLeft), ZERO);
+ fGlobalM.postTranslate(std::max(ZERO, fWindowRect.fLeft - scaledContent.fRight), ZERO);
+}
diff --git a/gfx/skia/skia/src/views/SkView.cpp b/gfx/skia/skia/src/views/SkView.cpp
new file mode 100644
index 000000000..492b2cdac
--- /dev/null
+++ b/gfx/skia/skia/src/views/SkView.cpp
@@ -0,0 +1,810 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkView.h"
+#include "SkCanvas.h"
+
+static inline uint32_t SkSetClearShift(uint32_t bits, bool cond, unsigned shift) {
+ SkASSERT((int)cond == 0 || (int)cond == 1);
+ return (bits & ~(1 << shift)) | ((int)cond << shift);
+}
+
+////////////////////////////////////////////////////////////////////////
+
+SkView::SkView(uint32_t flags) : fFlags(SkToU8(flags)) {
+ fWidth = fHeight = 0;
+ fLoc.set(0, 0);
+ fParent = fFirstChild = fNextSibling = fPrevSibling = nullptr;
+ fMatrix.setIdentity();
+ fContainsFocus = 0;
+}
+
+SkView::~SkView() {
+ this->detachAllChildren();
+}
+
+void SkView::setFlags(uint32_t flags) {
+ SkASSERT((flags & ~kAllFlagMasks) == 0);
+
+ uint32_t diff = fFlags ^ flags;
+
+ if (diff & kVisible_Mask)
+ this->inval(nullptr);
+
+ fFlags = SkToU8(flags);
+
+ if (diff & kVisible_Mask) {
+ this->inval(nullptr);
+ }
+}
+
+void SkView::setVisibleP(bool pred) {
+ this->setFlags(SkSetClearShift(fFlags, pred, kVisible_Shift));
+}
+
+void SkView::setEnabledP(bool pred) {
+ this->setFlags(SkSetClearShift(fFlags, pred, kEnabled_Shift));
+}
+
+void SkView::setFocusableP(bool pred) {
+ this->setFlags(SkSetClearShift(fFlags, pred, kFocusable_Shift));
+}
+
+void SkView::setClipToBounds(bool pred) {
+ this->setFlags(SkSetClearShift(fFlags, !pred, kNoClip_Shift));
+}
+
+void SkView::setSize(SkScalar width, SkScalar height) {
+ width = SkMaxScalar(0, width);
+ height = SkMaxScalar(0, height);
+
+ if (fWidth != width || fHeight != height)
+ {
+ this->inval(nullptr);
+ fWidth = width;
+ fHeight = height;
+ this->inval(nullptr);
+ this->onSizeChange();
+ this->invokeLayout();
+ }
+}
+
+void SkView::setLoc(SkScalar x, SkScalar y) {
+ if (fLoc.fX != x || fLoc.fY != y) {
+ this->inval(nullptr);
+ fLoc.set(x, y);
+ this->inval(nullptr);
+ }
+}
+
+void SkView::offset(SkScalar dx, SkScalar dy) {
+ if (dx || dy)
+ this->setLoc(fLoc.fX + dx, fLoc.fY + dy);
+}
+
+void SkView::setLocalMatrix(const SkMatrix& matrix) {
+ this->inval(nullptr);
+ fMatrix = matrix;
+ this->inval(nullptr);
+}
+
+void SkView::draw(SkCanvas* canvas) {
+ if (fWidth && fHeight && this->isVisible()) {
+ SkRect r;
+ r.set(fLoc.fX, fLoc.fY, fLoc.fX + fWidth, fLoc.fY + fHeight);
+ if (this->isClipToBounds() && canvas->quickReject(r)) {
+ return;
+ }
+
+ SkAutoCanvasRestore as(canvas, true);
+
+ if (this->isClipToBounds()) {
+ canvas->clipRect(r);
+ }
+
+ canvas->translate(fLoc.fX, fLoc.fY);
+ canvas->concat(fMatrix);
+
+ if (fParent) {
+ fParent->beforeChild(this, canvas);
+ }
+
+ int sc = canvas->save();
+ this->onDraw(canvas);
+ canvas->restoreToCount(sc);
+
+ if (fParent) {
+ fParent->afterChild(this, canvas);
+ }
+
+ B2FIter iter(this);
+ SkView* child;
+
+ SkCanvas* childCanvas = this->beforeChildren(canvas);
+
+ while ((child = iter.next()) != nullptr)
+ child->draw(childCanvas);
+
+ this->afterChildren(canvas);
+ }
+}
+
+void SkView::inval(SkRect* rect) {
+ SkView* view = this;
+ SkRect storage;
+
+ for (;;) {
+ if (!view->isVisible()) {
+ return;
+ }
+ if (view->isClipToBounds()) {
+ SkRect bounds;
+ view->getLocalBounds(&bounds);
+ if (rect && !bounds.intersect(*rect)) {
+ return;
+ }
+ storage = bounds;
+ rect = &storage;
+ }
+ if (view->handleInval(rect)) {
+ return;
+ }
+
+ SkView* parent = view->fParent;
+ if (parent == nullptr) {
+ return;
+ }
+
+ if (rect) {
+ rect->offset(view->fLoc.fX, view->fLoc.fY);
+ }
+ view = parent;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+bool SkView::setFocusView(SkView* fv) {
+ SkView* view = this;
+
+ do {
+ if (view->onSetFocusView(fv)) {
+ return true;
+ }
+ } while ((view = view->fParent) != nullptr);
+ return false;
+}
+
+SkView* SkView::getFocusView() const {
+ SkView* focus = nullptr;
+ const SkView* view = this;
+ do {
+ if (view->onGetFocusView(&focus)) {
+ break;
+ }
+ } while ((view = view->fParent) != nullptr);
+ return focus;
+}
+
+bool SkView::hasFocus() const {
+ return this == this->getFocusView();
+}
+
+bool SkView::acceptFocus() {
+ return this->isFocusable() && this->setFocusView(this);
+}
+
+/*
+ Try to give focus to this view, or its children
+*/
+SkView* SkView::acceptFocus(FocusDirection dir) {
+ if (dir == kNext_FocusDirection) {
+ if (this->acceptFocus()) {
+ return this;
+ }
+ B2FIter iter(this);
+ SkView* child, *focus;
+ while ((child = iter.next()) != nullptr) {
+ if ((focus = child->acceptFocus(dir)) != nullptr) {
+ return focus;
+ }
+ }
+ } else { // prev
+ F2BIter iter(this);
+ SkView* child, *focus;
+ while ((child = iter.next()) != nullptr) {
+ if ((focus = child->acceptFocus(dir)) != nullptr) {
+ return focus;
+ }
+ }
+ if (this->acceptFocus()) {
+ return this;
+ }
+ }
+ return nullptr;
+}
+
+SkView* SkView::moveFocus(FocusDirection dir) {
+ SkView* focus = this->getFocusView();
+
+ if (focus == nullptr) { // start with the root
+ focus = this;
+ while (focus->fParent) {
+ focus = focus->fParent;
+ }
+ }
+
+ SkView* child, *parent;
+
+ if (dir == kNext_FocusDirection) {
+ parent = focus;
+ child = focus->fFirstChild;
+ if (child)
+ goto FIRST_CHILD;
+ else
+ goto NEXT_SIB;
+
+ do {
+ while (child != parent->fFirstChild) {
+ FIRST_CHILD:
+ if ((focus = child->acceptFocus(dir)) != nullptr)
+ return focus;
+ child = child->fNextSibling;
+ }
+ NEXT_SIB:
+ child = parent->fNextSibling;
+ parent = parent->fParent;
+ } while (parent != nullptr);
+ } else { // prevfocus
+ parent = focus->fParent;
+ if (parent == nullptr) { // we're the root
+ return focus->acceptFocus(dir);
+ } else {
+ child = focus;
+ while (parent) {
+ while (child != parent->fFirstChild) {
+ child = child->fPrevSibling;
+ if ((focus = child->acceptFocus(dir)) != nullptr) {
+ return focus;
+ }
+ }
+ if (parent->acceptFocus()) {
+ return parent;
+ }
+ child = parent;
+ parent = parent->fParent;
+ }
+ }
+ }
+ return nullptr;
+}
+
+void SkView::onFocusChange(bool gainFocusP) {
+ this->inval(nullptr);
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+SkView::Click::Click(SkView* target) {
+ SkASSERT(target);
+ fTargetID = target->getSinkID();
+ fType = nullptr;
+ fWeOwnTheType = false;
+ fOwner = nullptr;
+}
+
+SkView::Click::~Click() {
+ this->resetType();
+}
+
+void SkView::Click::resetType() {
+ if (fWeOwnTheType) {
+ sk_free(fType);
+ fWeOwnTheType = false;
+ }
+ fType = nullptr;
+}
+
+bool SkView::Click::isType(const char type[]) const {
+ const char* t = fType;
+
+ if (type == t) {
+ return true;
+ }
+ if (type == nullptr) {
+ type = "";
+ }
+ if (t == nullptr) {
+ t = "";
+ }
+ return !strcmp(t, type);
+}
+
+void SkView::Click::setType(const char type[]) {
+ this->resetType();
+ fType = (char*)type;
+}
+
+void SkView::Click::copyType(const char type[]) {
+ if (fType != type) {
+ this->resetType();
+ if (type) {
+ size_t len = strlen(type) + 1;
+ fType = (char*)sk_malloc_throw(len);
+ memcpy(fType, type, len);
+ fWeOwnTheType = true;
+ }
+ }
+}
+
+SkView::Click* SkView::findClickHandler(SkScalar x, SkScalar y, unsigned modi) {
+ if (x < 0 || y < 0 || x >= fWidth || y >= fHeight) {
+ return nullptr;
+ }
+
+ if (this->onSendClickToChildren(x, y, modi)) {
+ F2BIter iter(this);
+ SkView* child;
+
+ while ((child = iter.next()) != nullptr) {
+ SkPoint p;
+#if 0
+ if (!child->globalToLocal(x, y, &p)) {
+ continue;
+ }
+#else
+ // the above seems broken, so just respecting fLoc for now <reed>
+ p.set(x - child->fLoc.x(), y - child->fLoc.y());
+#endif
+
+ Click* click = child->findClickHandler(p.fX, p.fY, modi);
+
+ if (click) {
+ return click;
+ }
+ }
+ }
+
+ return this->onFindClickHandler(x, y, modi);
+}
+
+void SkView::DoClickDown(Click* click, int x, int y, unsigned modi) {
+ SkASSERT(click);
+
+ SkView* target = (SkView*)SkEventSink::FindSink(click->fTargetID);
+ if (nullptr == target) {
+ return;
+ }
+
+ click->fIOrig.set(x, y);
+ click->fICurr = click->fIPrev = click->fIOrig;
+
+ click->fOrig.iset(x, y);
+ if (!target->globalToLocal(&click->fOrig)) {
+ // no history to let us recover from this failure
+ return;
+ }
+ click->fPrev = click->fCurr = click->fOrig;
+
+ click->fState = Click::kDown_State;
+ click->fModifierKeys = modi;
+ target->onClick(click);
+}
+
+void SkView::DoClickMoved(Click* click, int x, int y, unsigned modi) {
+ SkASSERT(click);
+
+ SkView* target = (SkView*)SkEventSink::FindSink(click->fTargetID);
+ if (nullptr == target) {
+ return;
+ }
+
+ click->fIPrev = click->fICurr;
+ click->fICurr.set(x, y);
+
+ click->fPrev = click->fCurr;
+ click->fCurr.iset(x, y);
+ if (!target->globalToLocal(&click->fCurr)) {
+ // on failure pretend the mouse didn't move
+ click->fCurr = click->fPrev;
+ }
+
+ click->fState = Click::kMoved_State;
+ click->fModifierKeys = modi;
+ target->onClick(click);
+}
+
+void SkView::DoClickUp(Click* click, int x, int y, unsigned modi) {
+ SkASSERT(click);
+
+ SkView* target = (SkView*)SkEventSink::FindSink(click->fTargetID);
+ if (nullptr == target) {
+ return;
+ }
+
+ click->fIPrev = click->fICurr;
+ click->fICurr.set(x, y);
+
+ click->fPrev = click->fCurr;
+ click->fCurr.iset(x, y);
+ if (!target->globalToLocal(&click->fCurr)) {
+ // on failure pretend the mouse didn't move
+ click->fCurr = click->fPrev;
+ }
+
+ click->fState = Click::kUp_State;
+ click->fModifierKeys = modi;
+ target->onClick(click);
+}
+
+//////////////////////////////////////////////////////////////////////
+
+void SkView::invokeLayout() {
+ SkView::Layout* layout = this->getLayout();
+
+ if (layout) {
+ layout->layoutChildren(this);
+ }
+}
+
+void SkView::onDraw(SkCanvas* canvas) {
+ Artist* artist = this->getArtist();
+
+ if (artist) {
+ artist->draw(this, canvas);
+ }
+}
+
+void SkView::onSizeChange() {}
+
+bool SkView::onSendClickToChildren(SkScalar x, SkScalar y, unsigned modi) {
+ return true;
+}
+
+SkView::Click* SkView::onFindClickHandler(SkScalar x, SkScalar y, unsigned modi) {
+ return nullptr;
+}
+
+bool SkView::onClick(Click*) {
+ return false;
+}
+
+bool SkView::handleInval(const SkRect*) {
+ return false;
+}
+
+//////////////////////////////////////////////////////////////////////
+
+void SkView::getLocalBounds(SkRect* bounds) const {
+ if (bounds) {
+ bounds->set(0, 0, fWidth, fHeight);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////
+
+void SkView::detachFromParent_NoLayout() {
+ this->validate();
+ if (fParent == nullptr) {
+ return;
+ }
+
+ if (fContainsFocus) {
+ (void)this->setFocusView(nullptr);
+ }
+
+ this->inval(nullptr);
+
+ SkView* next = nullptr;
+
+ if (fNextSibling != this) { // do we have any siblings
+ fNextSibling->fPrevSibling = fPrevSibling;
+ fPrevSibling->fNextSibling = fNextSibling;
+ next = fNextSibling;
+ }
+
+ if (fParent->fFirstChild == this) {
+ fParent->fFirstChild = next;
+ }
+
+ fParent = fNextSibling = fPrevSibling = nullptr;
+
+ this->validate();
+ this->unref();
+}
+
+void SkView::detachFromParent() {
+ this->validate();
+ SkView* parent = fParent;
+
+ if (parent) {
+ this->detachFromParent_NoLayout();
+ parent->invokeLayout();
+ }
+}
+
+SkView* SkView::attachChildToBack(SkView* child) {
+ this->validate();
+ SkASSERT(child != this);
+
+ if (child == nullptr || fFirstChild == child)
+ goto DONE;
+
+ child->ref();
+ child->detachFromParent_NoLayout();
+
+ if (fFirstChild == nullptr) {
+ child->fNextSibling = child;
+ child->fPrevSibling = child;
+ } else {
+ child->fNextSibling = fFirstChild;
+ child->fPrevSibling = fFirstChild->fPrevSibling;
+ fFirstChild->fPrevSibling->fNextSibling = child;
+ fFirstChild->fPrevSibling = child;
+ }
+
+ fFirstChild = child;
+ child->fParent = this;
+ child->inval(nullptr);
+
+ this->validate();
+ this->invokeLayout();
+DONE:
+ return child;
+}
+
+SkView* SkView::attachChildToFront(SkView* child) {
+ this->validate();
+ SkASSERT(child != this);
+
+ if (child == nullptr || (fFirstChild && fFirstChild->fPrevSibling == child))
+ goto DONE;
+
+ child->ref();
+ child->detachFromParent_NoLayout();
+
+ if (fFirstChild == nullptr) {
+ fFirstChild = child;
+ child->fNextSibling = child;
+ child->fPrevSibling = child;
+ } else {
+ child->fNextSibling = fFirstChild;
+ child->fPrevSibling = fFirstChild->fPrevSibling;
+ fFirstChild->fPrevSibling->fNextSibling = child;
+ fFirstChild->fPrevSibling = child;
+ }
+
+ child->fParent = this;
+ child->inval(nullptr);
+
+ this->validate();
+ this->invokeLayout();
+DONE:
+ return child;
+}
+
+void SkView::detachAllChildren() {
+ this->validate();
+ while (fFirstChild)
+ fFirstChild->detachFromParent_NoLayout();
+}
+
+void SkView::localToGlobal(SkMatrix* matrix) const {
+ if (matrix) {
+ matrix->reset();
+ const SkView* view = this;
+ while (view)
+ {
+ matrix->preConcat(view->getLocalMatrix());
+ matrix->preTranslate(-view->fLoc.fX, -view->fLoc.fY);
+ view = view->fParent;
+ }
+ }
+}
+
+bool SkView::globalToLocal(SkScalar x, SkScalar y, SkPoint* local) const {
+ if (local) {
+ SkMatrix m;
+ this->localToGlobal(&m);
+ if (!m.invert(&m)) {
+ return false;
+ }
+ SkPoint p;
+ m.mapXY(x, y, &p);
+ local->set(p.fX, p.fY);
+ }
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////
+
+/* Even if the subclass overrides onInflate, they should always be
+ sure to call the inherited method, so that we get called.
+*/
+void SkView::onInflate(const SkDOM& dom, const SkDOM::Node* node) {
+ SkScalar x, y;
+
+ x = this->locX();
+ y = this->locY();
+ (void)dom.findScalar(node, "x", &x);
+ (void)dom.findScalar(node, "y", &y);
+ this->setLoc(x, y);
+
+ x = this->width();
+ y = this->height();
+ (void)dom.findScalar(node, "width", &x);
+ (void)dom.findScalar(node, "height", &y);
+ this->setSize(x, y);
+
+ // inflate the flags
+
+ static const char* gFlagNames[] = {
+ "visible", "enabled", "focusable", "flexH", "flexV"
+ };
+ SkASSERT(SK_ARRAY_COUNT(gFlagNames) == kFlagShiftCount);
+
+ bool b;
+ uint32_t flags = this->getFlags();
+ for (unsigned i = 0; i < SK_ARRAY_COUNT(gFlagNames); i++) {
+ if (dom.findBool(node, gFlagNames[i], &b)) {
+ flags = SkSetClearShift(flags, b, i);
+ }
+ }
+ this->setFlags(flags);
+}
+
+void SkView::inflate(const SkDOM& dom, const SkDOM::Node* node) {
+ this->onInflate(dom, node);
+}
+
+void SkView::onPostInflate(const SkTDict<SkView*>&) {
+ // override in subclass as needed
+}
+
+void SkView::postInflate(const SkTDict<SkView*>& dict) {
+ this->onPostInflate(dict);
+
+ B2FIter iter(this);
+ SkView* child;
+ while ((child = iter.next()) != nullptr)
+ child->postInflate(dict);
+}
+
+//////////////////////////////////////////////////////////////////
+
+SkView* SkView::sendEventToParents(const SkEvent& evt) {
+ SkView* parent = fParent;
+
+ while (parent) {
+ if (parent->doEvent(evt)) {
+ return parent;
+ }
+ parent = parent->fParent;
+ }
+ return nullptr;
+}
+
+SkView* SkView::sendQueryToParents(SkEvent* evt) {
+ SkView* parent = fParent;
+
+ while (parent) {
+ if (parent->doQuery(evt)) {
+ return parent;
+ }
+ parent = parent->fParent;
+ }
+ return nullptr;
+}
+
+//////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////
+
+SkView::F2BIter::F2BIter(const SkView* parent) {
+ fFirstChild = parent ? parent->fFirstChild : nullptr;
+ fChild = fFirstChild ? fFirstChild->fPrevSibling : nullptr;
+}
+
+SkView* SkView::F2BIter::next() {
+ SkView* curr = fChild;
+
+ if (fChild) {
+ if (fChild == fFirstChild) {
+ fChild = nullptr;
+ } else {
+ fChild = fChild->fPrevSibling;
+ }
+ }
+ return curr;
+}
+
+SkView::B2FIter::B2FIter(const SkView* parent) {
+ fFirstChild = parent ? parent->fFirstChild : nullptr;
+ fChild = fFirstChild;
+}
+
+SkView* SkView::B2FIter::next() {
+ SkView* curr = fChild;
+
+ if (fChild) {
+ SkView* next = fChild->fNextSibling;
+ if (next == fFirstChild)
+ next = nullptr;
+ fChild = next;
+ }
+ return curr;
+}
+
+//////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+
+void SkView::validate() const {
+// SkASSERT(this->getRefCnt() > 0 && this->getRefCnt() < 100);
+ if (fParent) {
+ SkASSERT(fNextSibling);
+ SkASSERT(fPrevSibling);
+ } else {
+ bool nextNull = nullptr == fNextSibling;
+ bool prevNull = nullptr == fNextSibling;
+ SkASSERT(nextNull == prevNull);
+ }
+}
+
+static inline void show_if_nonzero(const char name[], SkScalar value) {
+ if (value) {
+ SkDebugf("%s=\"%g\"", name, value/65536.);
+ }
+}
+
+static void tab(int level) {
+ for (int i = 0; i < level; i++) {
+ SkDebugf(" ");
+ }
+}
+
+static void dumpview(const SkView* view, int level, bool recurse) {
+ tab(level);
+
+ SkDebugf("<view");
+ show_if_nonzero(" x", view->locX());
+ show_if_nonzero(" y", view->locY());
+ show_if_nonzero(" width", view->width());
+ show_if_nonzero(" height", view->height());
+
+ if (recurse) {
+ SkView::B2FIter iter(view);
+ SkView* child;
+ bool noChildren = true;
+
+ while ((child = iter.next()) != nullptr) {
+ if (noChildren) {
+ SkDebugf(">\n");
+ }
+ noChildren = false;
+ dumpview(child, level + 1, true);
+ }
+
+ if (!noChildren) {
+ tab(level);
+ SkDebugf("</view>\n");
+ } else {
+ goto ONELINER;
+ }
+ } else {
+ ONELINER:
+ SkDebugf(" />\n");
+ }
+}
+
+void SkView::dump(bool recurse) const {
+ dumpview(this, 0, recurse);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/views/SkViewPriv.cpp b/gfx/skia/skia/src/views/SkViewPriv.cpp
new file mode 100644
index 000000000..7dbe5f1ca
--- /dev/null
+++ b/gfx/skia/skia/src/views/SkViewPriv.cpp
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkViewPriv.h"
+
+//////////////////////////////////////////////////////////////////////
+
+void SkView::Artist::draw(SkView* view, SkCanvas* canvas)
+{
+ SkASSERT(view && canvas);
+ this->onDraw(view, canvas);
+}
+
+void SkView::Artist::inflate(const SkDOM& dom, const SkDOM::Node* node)
+{
+ SkASSERT(node);
+ this->onInflate(dom, node);
+}
+
+void SkView::Artist::onInflate(const SkDOM&, const SkDOM::Node*)
+{
+ // subclass should override this as needed
+}
+
+SkView::Artist* SkView::getArtist() const
+{
+ Artist_SkTagList* rec = (Artist_SkTagList*)this->findTagList(kViewArtist_SkTagList);
+ SkASSERT(rec == nullptr || rec->fArtist != nullptr);
+
+ return rec ? rec->fArtist : nullptr;
+}
+
+SkView::Artist* SkView::setArtist(Artist* obj)
+{
+ if (obj == nullptr)
+ {
+ this->removeTagList(kViewArtist_SkTagList);
+ }
+ else // add/replace
+ {
+ Artist_SkTagList* rec = (Artist_SkTagList*)this->findTagList(kViewArtist_SkTagList);
+
+ if (rec)
+ SkRefCnt_SafeAssign(rec->fArtist, obj);
+ else
+ this->addTagList(new Artist_SkTagList(obj));
+ }
+ return obj;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void SkView::Layout::layoutChildren(SkView* parent)
+{
+ SkASSERT(parent);
+ if (parent->width() > 0 && parent->height() > 0)
+ this->onLayoutChildren(parent);
+}
+
+void SkView::Layout::inflate(const SkDOM& dom, const SkDOM::Node* node)
+{
+ SkASSERT(node);
+ this->onInflate(dom, node);
+}
+
+void SkView::Layout::onInflate(const SkDOM&, const SkDOM::Node*)
+{
+ // subclass should override this as needed
+}
+
+SkView::Layout* SkView::getLayout() const
+{
+ Layout_SkTagList* rec = (Layout_SkTagList*)this->findTagList(kViewLayout_SkTagList);
+ SkASSERT(rec == nullptr || rec->fLayout != nullptr);
+
+ return rec ? rec->fLayout : nullptr;
+}
+
+SkView::Layout* SkView::setLayout(Layout* obj, bool invokeLayoutNow)
+{
+ if (obj == nullptr)
+ {
+ this->removeTagList(kViewLayout_SkTagList);
+ }
+ else // add/replace
+ {
+ Layout_SkTagList* rec = (Layout_SkTagList*)this->findTagList(kViewLayout_SkTagList);
+
+ if (rec)
+ SkRefCnt_SafeAssign(rec->fLayout, obj);
+ else
+ this->addTagList(new Layout_SkTagList(obj));
+ }
+
+ if (invokeLayoutNow)
+ this->invokeLayout();
+
+ return obj;
+}
diff --git a/gfx/skia/skia/src/views/SkViewPriv.h b/gfx/skia/skia/src/views/SkViewPriv.h
new file mode 100644
index 000000000..3b7645712
--- /dev/null
+++ b/gfx/skia/skia/src/views/SkViewPriv.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkViewPriv_DEFINED
+#define SkViewPriv_DEFINED
+
+#include "SkView.h"
+#include "SkTagList.h"
+
+struct Layout_SkTagList : SkTagList {
+ SkView::Layout* fLayout;
+
+ Layout_SkTagList(SkView::Layout* layout)
+ : SkTagList(kViewLayout_SkTagList), fLayout(layout)
+ {
+ SkASSERT(layout);
+ layout->ref();
+ }
+ virtual ~Layout_SkTagList()
+ {
+ fLayout->unref();
+ }
+};
+
+struct Artist_SkTagList : SkTagList {
+ SkView::Artist* fArtist;
+
+ Artist_SkTagList(SkView::Artist* artist)
+ : SkTagList(kViewArtist_SkTagList), fArtist(artist)
+ {
+ SkASSERT(artist);
+ artist->ref();
+ }
+ virtual ~Artist_SkTagList()
+ {
+ fArtist->unref();
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/views/SkWindow.cpp b/gfx/skia/skia/src/views/SkWindow.cpp
new file mode 100644
index 000000000..6e1ebf7a7
--- /dev/null
+++ b/gfx/skia/skia/src/views/SkWindow.cpp
@@ -0,0 +1,361 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkWindow.h"
+#include "SkCanvas.h"
+#include "SkOSMenu.h"
+#include "SkSurface.h"
+#include "SkSystemEventTypes.h"
+#include "SkTime.h"
+
+#define SK_EventDelayInval "\xd" "n" "\xa" "l"
+
+SkWindow::SkWindow()
+ : fSurfaceProps(SkSurfaceProps::kLegacyFontHost_InitType)
+ , fFocusView(nullptr)
+{
+ fClicks.reset();
+ fWaitingOnInval = false;
+ fMatrix.reset();
+
+ fBitmap.allocN32Pixels(0, 0);
+}
+
+SkWindow::~SkWindow() {
+ fClicks.deleteAll();
+ fMenus.deleteAll();
+}
+
+sk_sp<SkSurface> SkWindow::makeSurface() {
+ const SkBitmap& bm = this->getBitmap();
+ return SkSurface::MakeRasterDirect(bm.info(), bm.getPixels(), bm.rowBytes(), &fSurfaceProps);
+}
+
+void SkWindow::setMatrix(const SkMatrix& matrix) {
+ if (fMatrix != matrix) {
+ fMatrix = matrix;
+ this->inval(nullptr);
+ }
+}
+
+void SkWindow::preConcat(const SkMatrix& matrix) {
+ SkMatrix m;
+ m.setConcat(fMatrix, matrix);
+ this->setMatrix(m);
+}
+
+void SkWindow::postConcat(const SkMatrix& matrix) {
+ SkMatrix m;
+ m.setConcat(matrix, fMatrix);
+ this->setMatrix(m);
+}
+
+void SkWindow::resize(const SkImageInfo& info) {
+ if (fBitmap.info() != info) {
+ fBitmap.allocPixels(info);
+ this->inval(nullptr);
+ }
+ this->setSize(SkIntToScalar(fBitmap.width()), SkIntToScalar(fBitmap.height()));
+}
+
+void SkWindow::resize(int width, int height) {
+ this->resize(fBitmap.info().makeWH(width, height));
+}
+
+void SkWindow::setColorType(SkColorType ct, sk_sp<SkColorSpace> cs) {
+ const SkImageInfo& info = fBitmap.info();
+ this->resize(SkImageInfo::Make(info.width(), info.height(), ct, kPremul_SkAlphaType, cs));
+}
+
+bool SkWindow::handleInval(const SkRect* localR) {
+ SkIRect ir;
+
+ if (localR) {
+ SkRect devR;
+ SkMatrix inverse;
+ if (!fMatrix.invert(&inverse)) {
+ return false;
+ }
+ fMatrix.mapRect(&devR, *localR);
+ devR.round(&ir);
+ } else {
+ ir.set(0, 0,
+ SkScalarRoundToInt(this->width()),
+ SkScalarRoundToInt(this->height()));
+ }
+ fDirtyRgn.op(ir, SkRegion::kUnion_Op);
+
+ this->onHandleInval(ir);
+ return true;
+}
+
+void SkWindow::forceInvalAll() {
+ fDirtyRgn.setRect(0, 0,
+ SkScalarCeilToInt(this->width()),
+ SkScalarCeilToInt(this->height()));
+}
+
+#ifdef SK_SIMULATE_FAILED_MALLOC
+extern bool gEnableControlledThrow;
+#endif
+
+bool SkWindow::update(SkIRect* updateArea) {
+ if (!fDirtyRgn.isEmpty()) {
+ sk_sp<SkSurface> surface(this->makeSurface());
+ SkCanvas* canvas = surface->getCanvas();
+
+ canvas->clipRegion(fDirtyRgn);
+ if (updateArea) {
+ *updateArea = fDirtyRgn.getBounds();
+ }
+
+ SkAutoCanvasRestore acr(canvas, true);
+ canvas->concat(fMatrix);
+
+ // empty this now, so we can correctly record any inval calls that
+ // might be made during the draw call.
+ fDirtyRgn.setEmpty();
+
+#ifdef SK_SIMULATE_FAILED_MALLOC
+ gEnableControlledThrow = true;
+#endif
+#ifdef SK_BUILD_FOR_WIN32
+ //try {
+ this->draw(canvas);
+ //}
+ //catch (...) {
+ //}
+#else
+ this->draw(canvas);
+#endif
+#ifdef SK_SIMULATE_FAILED_MALLOC
+ gEnableControlledThrow = false;
+#endif
+
+ return true;
+ }
+ return false;
+}
+
+bool SkWindow::handleChar(SkUnichar uni) {
+ if (this->onHandleChar(uni))
+ return true;
+
+ SkView* focus = this->getFocusView();
+ if (focus == nullptr)
+ focus = this;
+
+ SkEvent evt(SK_EventType_Unichar);
+ evt.setFast32(uni);
+ return focus->doEvent(evt);
+}
+
+bool SkWindow::handleKey(SkKey key) {
+ if (key == kNONE_SkKey)
+ return false;
+
+ if (this->onHandleKey(key))
+ return true;
+
+ // send an event to the focus-view
+ {
+ SkView* focus = this->getFocusView();
+ if (focus == nullptr)
+ focus = this;
+
+ SkEvent evt(SK_EventType_Key);
+ evt.setFast32(key);
+ if (focus->doEvent(evt))
+ return true;
+ }
+
+ if (key == kUp_SkKey || key == kDown_SkKey) {
+ if (this->moveFocus(key == kUp_SkKey ? kPrev_FocusDirection : kNext_FocusDirection) == nullptr)
+ this->onSetFocusView(nullptr);
+ return true;
+ }
+ return false;
+}
+
+bool SkWindow::handleKeyUp(SkKey key) {
+ if (key == kNONE_SkKey)
+ return false;
+
+ if (this->onHandleKeyUp(key))
+ return true;
+
+ //send an event to the focus-view
+ {
+ SkView* focus = this->getFocusView();
+ if (focus == nullptr)
+ focus = this;
+
+ //should this one be the same?
+ SkEvent evt(SK_EventType_KeyUp);
+ evt.setFast32(key);
+ if (focus->doEvent(evt))
+ return true;
+ }
+ return false;
+}
+
+void SkWindow::addMenu(SkOSMenu* menu) {
+ *fMenus.append() = menu;
+ this->onAddMenu(menu);
+}
+
+void SkWindow::setTitle(const char title[]) {
+ if (nullptr == title) {
+ title = "";
+ }
+ fTitle.set(title);
+ this->onSetTitle(title);
+}
+
+bool SkWindow::onEvent(const SkEvent& evt) {
+ if (evt.isType(SK_EventDelayInval)) {
+ for (SkRegion::Iterator iter(fDirtyRgn); !iter.done(); iter.next())
+ this->onHandleInval(iter.rect());
+ fWaitingOnInval = false;
+ return true;
+ }
+ return this->INHERITED::onEvent(evt);
+}
+
+bool SkWindow::onGetFocusView(SkView** focus) const {
+ if (focus)
+ *focus = fFocusView;
+ return true;
+}
+
+bool SkWindow::onSetFocusView(SkView* focus) {
+ if (fFocusView != focus) {
+ if (fFocusView)
+ fFocusView->onFocusChange(false);
+ fFocusView = focus;
+ if (focus)
+ focus->onFocusChange(true);
+ }
+ return true;
+}
+
+void SkWindow::onHandleInval(const SkIRect&) {
+}
+
+bool SkWindow::onHandleChar(SkUnichar) {
+ return false;
+}
+
+bool SkWindow::onHandleKey(SkKey) {
+ return false;
+}
+
+bool SkWindow::onHandleKeyUp(SkKey) {
+ return false;
+}
+
+bool SkWindow::handleClick(int x, int y, Click::State state, void *owner,
+ unsigned modifierKeys) {
+ return this->onDispatchClick(x, y, state, owner, modifierKeys);
+}
+
+bool SkWindow::onDispatchClick(int x, int y, Click::State state,
+ void* owner, unsigned modifierKeys) {
+ bool handled = false;
+
+ // First, attempt to find an existing click with this owner.
+ int index = -1;
+ for (int i = 0; i < fClicks.count(); i++) {
+ if (owner == fClicks[i]->fOwner) {
+ index = i;
+ break;
+ }
+ }
+
+ switch (state) {
+ case Click::kDown_State: {
+ if (index != -1) {
+ delete fClicks[index];
+ fClicks.remove(index);
+ }
+ Click* click = this->findClickHandler(SkIntToScalar(x),
+ SkIntToScalar(y), modifierKeys);
+
+ if (click) {
+ click->fOwner = owner;
+ *fClicks.append() = click;
+ SkView::DoClickDown(click, x, y, modifierKeys);
+ handled = true;
+ }
+ break;
+ }
+ case Click::kMoved_State:
+ if (index != -1) {
+ SkView::DoClickMoved(fClicks[index], x, y, modifierKeys);
+ handled = true;
+ }
+ break;
+ case Click::kUp_State:
+ if (index != -1) {
+ SkView::DoClickUp(fClicks[index], x, y, modifierKeys);
+ delete fClicks[index];
+ fClicks.remove(index);
+ handled = true;
+ }
+ break;
+ default:
+ // Do nothing
+ break;
+ }
+ return handled;
+}
+
+#if SK_SUPPORT_GPU
+
+#include "GrContext.h"
+#include "gl/GrGLInterface.h"
+#include "gl/GrGLUtil.h"
+#include "SkGr.h"
+
+sk_sp<SkSurface> SkWindow::makeGpuBackedSurface(const AttachmentInfo& attachmentInfo,
+ const GrGLInterface* interface,
+ GrContext* grContext) {
+ GrBackendRenderTargetDesc desc;
+ desc.fWidth = SkScalarRoundToInt(this->width());
+ desc.fHeight = SkScalarRoundToInt(this->height());
+ if (0 == desc.fWidth || 0 == desc.fHeight) {
+ return nullptr;
+ }
+
+ // TODO: Query the actual framebuffer for sRGB capable. However, to
+ // preserve old (fake-linear) behavior, we don't do this. Instead, rely
+ // on the flag (currently driven via 'C' mode in SampleApp).
+ //
+ // Also, we may not have real sRGB support (ANGLE, in particular), so check for
+ // that, and fall back to L32:
+ //
+ // ... and, if we're using a 10-bit/channel FB0, it doesn't do sRGB conversion on write,
+ // so pretend that it's non-sRGB 8888:
+ desc.fConfig =
+ grContext->caps()->srgbSupport() &&
+ SkImageInfoIsGammaCorrect(info()) &&
+ (attachmentInfo.fColorBits != 30)
+ ? kSkiaGamma8888_GrPixelConfig : kSkia8888_GrPixelConfig;
+ desc.fOrigin = kBottomLeft_GrSurfaceOrigin;
+ desc.fSampleCnt = attachmentInfo.fSampleCount;
+ desc.fStencilBits = attachmentInfo.fStencilBits;
+ GrGLint buffer;
+ GR_GL_GetIntegerv(interface, GR_GL_FRAMEBUFFER_BINDING, &buffer);
+ desc.fRenderTargetHandle = buffer;
+
+ sk_sp<SkColorSpace> colorSpace =
+ grContext->caps()->srgbSupport() && SkImageInfoIsGammaCorrect(info())
+ ? SkColorSpace::NewNamed(SkColorSpace::kSRGB_Named) : nullptr;
+ return SkSurface::MakeFromBackendRenderTarget(grContext, desc, colorSpace, &fSurfaceProps);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/views/ios/SkOSWindow_iOS.mm b/gfx/skia/skia/src/views/ios/SkOSWindow_iOS.mm
new file mode 100755
index 000000000..aa7d3759b
--- /dev/null
+++ b/gfx/skia/skia/src/views/ios/SkOSWindow_iOS.mm
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#import <UIKit/UIKit.h>
+#include "SkCanvas.h"
+#include "SkGraphics.h"
+#import "SkEventNotifier.h"
+#include "SkOSMenu.h"
+#include "SkTime.h"
+#include "SkTypes.h"
+#import "SkUIView.h"
+#include "SkWindow.h"
+
+#define kINVAL_UIVIEW_EventType "inval-uiview"
+
+SkOSWindow::SkOSWindow(void* hWnd) : fHWND(hWnd) {
+ fInvalEventIsPending = false;
+ fNotifier = [[SkEventNotifier alloc] init];
+}
+SkOSWindow::~SkOSWindow() {
+ [(SkEventNotifier*)fNotifier release];
+}
+
+void SkOSWindow::onHandleInval(const SkIRect& r) {
+ if (!fInvalEventIsPending) {
+ fInvalEventIsPending = true;
+ (new SkEvent(kINVAL_UIVIEW_EventType, this->getSinkID()))->post();
+ }
+}
+
+bool SkOSWindow::onEvent(const SkEvent& evt) {
+ if (evt.isType(kINVAL_UIVIEW_EventType)) {
+ fInvalEventIsPending = false;
+ const SkIRect& r = this->getDirtyBounds();
+ [(SkUIView*)fHWND postInvalWithRect:&r];
+ return true;
+ }
+ if ([(SkUIView*)fHWND onHandleEvent:evt]) {
+ return true;
+ }
+ return this->INHERITED::onEvent(evt);
+}
+
+void SkOSWindow::onSetTitle(const char title[]) {
+ [(SkUIView*)fHWND setSkTitle:title];
+}
+
+void SkOSWindow::onAddMenu(const SkOSMenu* menu) {
+ [(SkUIView*)fHWND onAddMenu:menu];
+}
+
+void SkOSWindow::onUpdateMenu(SkOSMenu* menu) {
+ [(SkUIView*)fHWND onUpdateMenu:menu];
+}
+
+bool SkOSWindow::attach(SkBackEndTypes /* attachType */,
+ int /* msaaSampleCount */,
+ bool /* deepColor */,
+ AttachmentInfo* info) {
+ [(SkUIView*)fHWND getAttachmentInfo:info];
+ bool success = true;
+ return success;
+}
+
+void SkOSWindow::release() {}
+
+void SkOSWindow::present() {
+}
diff --git a/gfx/skia/skia/src/views/mac/SkEventNotifier.h b/gfx/skia/skia/src/views/mac/SkEventNotifier.h
new file mode 100644
index 000000000..ea6bbf1e3
--- /dev/null
+++ b/gfx/skia/skia/src/views/mac/SkEventNotifier.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#import <Foundation/Foundation.h>
+
+@interface SkEventNotifier : NSObject
+- (void)receiveSkEvent:(NSNotification*)notification;
++ (void)postTimedSkEvent:(NSTimeInterval)ti;
++ (void)timerFireMethod:(NSTimer*)theTimer;
+@end
diff --git a/gfx/skia/skia/src/views/mac/SkEventNotifier.mm b/gfx/skia/skia/src/views/mac/SkEventNotifier.mm
new file mode 100644
index 000000000..0864380d9
--- /dev/null
+++ b/gfx/skia/skia/src/views/mac/SkEventNotifier.mm
@@ -0,0 +1,68 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#import "SkEventNotifier.h"
+#include "SkEvent.h"
+#define SkEventClass @"SkEvenClass"
+@implementation SkEventNotifier
+- (id)init {
+ self = [super init];
+ if (self) {
+ //Register as an observer for SkEventClass events and call
+ //receiveSkEvent: upon receiving the event
+ [[NSNotificationCenter defaultCenter] addObserver:self
+ selector:@selector(receiveSkEvent:)
+ name:SkEventClass
+ object:nil];
+ }
+ return self;
+}
+
+- (void)dealloc {
+ [[NSNotificationCenter defaultCenter] removeObserver:self];
+ [super dealloc];
+}
+
+-(BOOL) acceptsFirstResponder {
+ return YES;
+}
+
+//SkEvent Handers
+- (void)receiveSkEvent:(NSNotification *)notification {
+ if(SkEvent::ProcessEvent())
+ SkEvent::SignalNonEmptyQueue();
+}
+
++ (void)postTimedSkEvent:(NSTimeInterval)timeInterval {
+ [NSTimer scheduledTimerWithTimeInterval:timeInterval target:self
+ selector:@selector(timerFireMethod:)
+ userInfo:nil repeats:NO];
+}
+
++ (void)timerFireMethod:(NSTimer*)theTimer {
+ SkEvent::ServiceQueueTimer();
+}
+
+@end
+////////////////////////////////////////////////////////////////////////////////
+void SkEvent::SignalNonEmptyQueue() {
+ //post a SkEventClass event to the default notification queue
+ NSNotification* notification = [NSNotification notificationWithName:SkEventClass object:nil];
+ [[NSNotificationQueue defaultQueue] enqueueNotification:notification
+ postingStyle:NSPostWhenIdle
+ coalesceMask:NSNotificationNoCoalescing
+ forModes:nil];
+}
+
+void SkEvent::SignalQueueTimer(SkMSec delay) {
+ if (delay) {
+ //Convert to seconds
+ NSTimeInterval ti = delay/(float)SK_MSec1;
+ [SkEventNotifier postTimedSkEvent:ti];
+ }
+}
diff --git a/gfx/skia/skia/src/views/mac/SkNSView.h b/gfx/skia/skia/src/views/mac/SkNSView.h
new file mode 100644
index 000000000..779c51b7e
--- /dev/null
+++ b/gfx/skia/skia/src/views/mac/SkNSView.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#import <QuartzCore/QuartzCore.h>
+#import <Cocoa/Cocoa.h>
+#import "SkWindow.h"
+class SkEvent;
+@class SkNSView;
+
+@protocol SkNSViewOptionsDelegate <NSObject>
+@optional
+// Called when the view needs to handle adding an SkOSMenu
+- (void) view:(SkNSView*)view didAddMenu:(const SkOSMenu*)menu;
+- (void) view:(SkNSView*)view didUpdateMenu:(const SkOSMenu*)menu;
+@end
+
+@interface SkNSView : NSView {
+ BOOL fRedrawRequestPending;
+
+ NSString* fTitle;
+ SkOSWindow* fWind;
+#if SK_SUPPORT_GPU
+ NSOpenGLContext* fGLContext;
+#endif
+ id<SkNSViewOptionsDelegate> fOptionsDelegate;
+}
+
+@property (nonatomic, readonly) SkOSWindow *fWind;
+@property (nonatomic, retain) NSString* fTitle;
+#if SK_SUPPORT_GPU
+@property (nonatomic, retain) NSOpenGLContext* fGLContext;
+#endif
+@property (nonatomic, assign) id<SkNSViewOptionsDelegate> fOptionsDelegate;
+
+- (id)initWithDefaults;
+- (void)setUpWindow;
+- (void)resizeSkView:(NSSize)newSize;
+- (void)setSkTitle:(const char*)title;
+- (void)onAddMenu:(const SkOSMenu*)menu;
+- (void)onUpdateMenu:(const SkOSMenu*)menu;
+- (void)postInvalWithRect:(const SkIRect*)rectOrNil;
+- (BOOL)onHandleEvent:(const SkEvent&)event;
+
+- (bool)attach:(SkOSWindow::SkBackEndTypes)attachType withMSAASampleCount:(int) sampleCount andGetInfo:(SkOSWindow::AttachmentInfo*) info;
+- (void)detach;
+- (void)present;
+
+- (void)setVSync:(bool)enable;
+
+- (void)freeNativeWind;
+
+@end
diff --git a/gfx/skia/skia/src/views/mac/SkNSView.mm b/gfx/skia/skia/src/views/mac/SkNSView.mm
new file mode 100644
index 000000000..ce2938237
--- /dev/null
+++ b/gfx/skia/skia/src/views/mac/SkNSView.mm
@@ -0,0 +1,430 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#import "SkNSView.h"
+#include "SkCanvas.h"
+#include "SkSurface.h"
+#include "SkCGUtils.h"
+#include "SkEvent.h"
+static_assert(SK_SUPPORT_GPU, "not_implemented_for_non_gpu_build");
+#include <OpenGL/gl.h>
+
+//#define FORCE_REDRAW
+// Can be dropped when we no longer support 10.6.
+#define RETINA_API_AVAILABLE (defined(MAC_OS_X_VERSION_10_7) && \
+ MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_7)
+@implementation SkNSView
+@synthesize fWind, fTitle, fOptionsDelegate, fGLContext;
+
+- (id)initWithCoder:(NSCoder*)coder {
+ if ((self = [super initWithCoder:coder])) {
+ self = [self initWithDefaults];
+ [self setUpWindow];
+ }
+ return self;
+}
+
+- (id)initWithFrame:(NSRect)frameRect {
+ if ((self = [super initWithFrame:frameRect])) {
+ self = [self initWithDefaults];
+ [self setUpWindow];
+ }
+ return self;
+}
+
+- (id)initWithDefaults {
+#if RETINA_API_AVAILABLE
+ [self setWantsBestResolutionOpenGLSurface:YES];
+#endif
+ fRedrawRequestPending = false;
+ fWind = NULL;
+ return self;
+}
+
+- (void)setUpWindow {
+ [[NSNotificationCenter defaultCenter] addObserver:self
+ selector:@selector(backingPropertiesChanged:)
+ name:@"NSWindowDidChangeBackingPropertiesNotification"
+ object:[self window]];
+ if (fWind) {
+ fWind->setVisibleP(true);
+ NSSize size = self.frame.size;
+#if RETINA_API_AVAILABLE
+ size = [self convertSizeToBacking:self.frame.size];
+#endif
+ fWind->resize((int) size.width, (int) size.height);
+ [[self window] setAcceptsMouseMovedEvents:YES];
+ }
+}
+
+-(BOOL) isFlipped {
+ return YES;
+}
+
+- (BOOL)acceptsFirstResponder {
+ return YES;
+}
+
+- (float)scaleFactor {
+ NSWindow *window = [self window];
+#if RETINA_API_AVAILABLE
+ if (window) {
+ return [window backingScaleFactor];
+ }
+ return [[NSScreen mainScreen] backingScaleFactor];
+#else
+ if (window) {
+ return [window userSpaceScaleFactor];
+ }
+ return [[NSScreen mainScreen] userSpaceScaleFactor];
+#endif
+}
+
+- (void)backingPropertiesChanged:(NSNotification *)notification {
+ CGFloat oldBackingScaleFactor = (CGFloat)[
+ [notification.userInfo objectForKey:@"NSBackingPropertyOldScaleFactorKey"] doubleValue
+ ];
+ CGFloat newBackingScaleFactor = [self scaleFactor];
+ if (oldBackingScaleFactor == newBackingScaleFactor) {
+ return;
+ }
+
+ // TODO: need a better way to force a refresh (that works).
+ // [fGLContext update] does not appear to update if the point size has not changed,
+ // even if the backing size has changed.
+ [self setFrameSize:NSMakeSize(self.frame.size.width + 1, self.frame.size.height + 1)];
+}
+
+- (void)resizeSkView:(NSSize)newSize {
+#if RETINA_API_AVAILABLE
+ newSize = [self convertSizeToBacking:newSize];
+#endif
+ if (fWind && (fWind->width() != newSize.width || fWind->height() != newSize.height)) {
+ fWind->resize((int) newSize.width, (int) newSize.height);
+ if (fGLContext) {
+ glClear(GL_STENCIL_BUFFER_BIT);
+ [fGLContext update];
+ }
+ }
+}
+
+- (void) setFrameSize:(NSSize)newSize {
+ [super setFrameSize:newSize];
+ [self resizeSkView:newSize];
+}
+
+- (void)dealloc {
+ [self freeNativeWind];
+ self.fGLContext = nil;
+ self.fTitle = nil;
+ [super dealloc];
+}
+
+- (void)freeNativeWind {
+ delete fWind;
+ fWind = nil;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+- (void)drawSkia {
+ fRedrawRequestPending = false;
+ if (fWind) {
+ sk_sp<SkSurface> surface(fWind->makeSurface());
+ fWind->draw(surface->getCanvas());
+#ifdef FORCE_REDRAW
+ fWind->inval(NULL);
+#endif
+ }
+}
+
+- (void)setSkTitle:(const char *)title {
+ self.fTitle = [NSString stringWithUTF8String:title];
+ [[self window] setTitle:self.fTitle];
+}
+
+- (BOOL)onHandleEvent:(const SkEvent&)evt {
+ return false;
+}
+
+#include "SkOSMenu.h"
+- (void)onAddMenu:(const SkOSMenu*)menu {
+ [self.fOptionsDelegate view:self didAddMenu:menu];
+}
+
+- (void)onUpdateMenu:(const SkOSMenu*)menu {
+ [self.fOptionsDelegate view:self didUpdateMenu:menu];
+}
+
+- (void)postInvalWithRect:(const SkIRect*)r {
+ if (!fRedrawRequestPending) {
+ fRedrawRequestPending = true;
+ [self setNeedsDisplay:YES];
+ [self performSelector:@selector(drawSkia) withObject:nil afterDelay:0];
+ }
+}
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkKey.h"
+enum {
+ SK_MacReturnKey = 36,
+ SK_MacDeleteKey = 51,
+ SK_MacEndKey = 119,
+ SK_MacLeftKey = 123,
+ SK_MacRightKey = 124,
+ SK_MacDownKey = 125,
+ SK_MacUpKey = 126,
+ SK_Mac0Key = 0x52,
+ SK_Mac1Key = 0x53,
+ SK_Mac2Key = 0x54,
+ SK_Mac3Key = 0x55,
+ SK_Mac4Key = 0x56,
+ SK_Mac5Key = 0x57,
+ SK_Mac6Key = 0x58,
+ SK_Mac7Key = 0x59,
+ SK_Mac8Key = 0x5b,
+ SK_Mac9Key = 0x5c
+};
+
+static SkKey raw2key(UInt32 raw)
+{
+ static const struct {
+ UInt32 fRaw;
+ SkKey fKey;
+ } gKeys[] = {
+ { SK_MacUpKey, kUp_SkKey },
+ { SK_MacDownKey, kDown_SkKey },
+ { SK_MacLeftKey, kLeft_SkKey },
+ { SK_MacRightKey, kRight_SkKey },
+ { SK_MacReturnKey, kOK_SkKey },
+ { SK_MacDeleteKey, kBack_SkKey },
+ { SK_MacEndKey, kEnd_SkKey },
+ { SK_Mac0Key, k0_SkKey },
+ { SK_Mac1Key, k1_SkKey },
+ { SK_Mac2Key, k2_SkKey },
+ { SK_Mac3Key, k3_SkKey },
+ { SK_Mac4Key, k4_SkKey },
+ { SK_Mac5Key, k5_SkKey },
+ { SK_Mac6Key, k6_SkKey },
+ { SK_Mac7Key, k7_SkKey },
+ { SK_Mac8Key, k8_SkKey },
+ { SK_Mac9Key, k9_SkKey }
+ };
+
+ for (unsigned i = 0; i < SK_ARRAY_COUNT(gKeys); i++)
+ if (gKeys[i].fRaw == raw)
+ return gKeys[i].fKey;
+ return kNONE_SkKey;
+}
+
+- (void)keyDown:(NSEvent *)event {
+ if (NULL == fWind)
+ return;
+
+ SkKey key = raw2key([event keyCode]);
+ if (kNONE_SkKey != key)
+ fWind->handleKey(key);
+ else{
+ unichar c = [[event characters] characterAtIndex:0];
+ fWind->handleChar((SkUnichar)c);
+ }
+}
+
+- (void)keyUp:(NSEvent *)event {
+ if (NULL == fWind)
+ return;
+
+ SkKey key = raw2key([event keyCode]);
+ if (kNONE_SkKey != key)
+ fWind->handleKeyUp(key);
+ // else
+ // unichar c = [[event characters] characterAtIndex:0];
+}
+
+static const struct {
+ unsigned fNSModifierMask;
+ unsigned fSkModifierMask;
+} gModifierMasks[] = {
+ { NSAlphaShiftKeyMask, kShift_SkModifierKey },
+ { NSShiftKeyMask, kShift_SkModifierKey },
+ { NSControlKeyMask, kControl_SkModifierKey },
+ { NSAlternateKeyMask, kOption_SkModifierKey },
+ { NSCommandKeyMask, kCommand_SkModifierKey },
+};
+
+static unsigned convertNSModifiersToSk(NSUInteger nsModi) {
+ unsigned skModi = 0;
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gModifierMasks); ++i) {
+ if (nsModi & gModifierMasks[i].fNSModifierMask) {
+ skModi |= gModifierMasks[i].fSkModifierMask;
+ }
+ }
+ return skModi;
+}
+
+- (void)mouseDown:(NSEvent *)event {
+ NSPoint p = [event locationInWindow];
+ unsigned modi = convertNSModifiersToSk([event modifierFlags]);
+
+ if ([self mouse:p inRect:[self bounds]] && fWind) {
+ NSPoint loc = [self convertPoint:p fromView:nil];
+#if RETINA_API_AVAILABLE
+ loc = [self convertPointToBacking:loc]; //y-up
+ loc.y = -loc.y;
+#endif
+ fWind->handleClick((int) loc.x, (int) loc.y,
+ SkView::Click::kDown_State, self, modi);
+ }
+}
+
+- (void)mouseDragged:(NSEvent *)event {
+ NSPoint p = [event locationInWindow];
+ unsigned modi = convertNSModifiersToSk([event modifierFlags]);
+
+ if ([self mouse:p inRect:[self bounds]] && fWind) {
+ NSPoint loc = [self convertPoint:p fromView:nil];
+#if RETINA_API_AVAILABLE
+ loc = [self convertPointToBacking:loc]; //y-up
+ loc.y = -loc.y;
+#endif
+ fWind->handleClick((int) loc.x, (int) loc.y,
+ SkView::Click::kMoved_State, self, modi);
+ }
+}
+
+- (void)mouseMoved:(NSEvent *)event {
+ NSPoint p = [event locationInWindow];
+ unsigned modi = convertNSModifiersToSk([event modifierFlags]);
+
+ if ([self mouse:p inRect:[self bounds]] && fWind) {
+ NSPoint loc = [self convertPoint:p fromView:nil];
+#if RETINA_API_AVAILABLE
+ loc = [self convertPointToBacking:loc]; //y-up
+ loc.y = -loc.y;
+#endif
+ fWind->handleClick((int) loc.x, (int) loc.y,
+ SkView::Click::kMoved_State, self, modi);
+ }
+}
+
+- (void)mouseUp:(NSEvent *)event {
+ NSPoint p = [event locationInWindow];
+ unsigned modi = convertNSModifiersToSk([event modifierFlags]);
+
+ if ([self mouse:p inRect:[self bounds]] && fWind) {
+ NSPoint loc = [self convertPoint:p fromView:nil];
+#if RETINA_API_AVAILABLE
+ loc = [self convertPointToBacking:loc]; //y-up
+ loc.y = -loc.y;
+#endif
+ fWind->handleClick((int) loc.x, (int) loc.y,
+ SkView::Click::kUp_State, self, modi);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+#include <OpenGL/OpenGL.h>
+
+static CGLContextObj createGLContext(int msaaSampleCount) {
+ GLint major, minor;
+ CGLGetVersion(&major, &minor);
+
+ static const CGLPixelFormatAttribute attributes[] = {
+ kCGLPFAStencilSize, (CGLPixelFormatAttribute) 8,
+ kCGLPFAAccelerated,
+ kCGLPFADoubleBuffer,
+ kCGLPFAOpenGLProfile, (CGLPixelFormatAttribute) kCGLOGLPVersion_3_2_Core,
+ (CGLPixelFormatAttribute)0
+ };
+
+ CGLPixelFormatObj format;
+ GLint npix = 0;
+ if (msaaSampleCount > 0) {
+ static const int kAttributeCount = SK_ARRAY_COUNT(attributes);
+ CGLPixelFormatAttribute msaaAttributes[kAttributeCount + 5];
+ memcpy(msaaAttributes, attributes, sizeof(attributes));
+ SkASSERT(0 == msaaAttributes[kAttributeCount - 1]);
+ msaaAttributes[kAttributeCount - 1] = kCGLPFASampleBuffers;
+ msaaAttributes[kAttributeCount + 0] = (CGLPixelFormatAttribute)1;
+ msaaAttributes[kAttributeCount + 1] = kCGLPFAMultisample;
+ msaaAttributes[kAttributeCount + 2] = kCGLPFASamples;
+ msaaAttributes[kAttributeCount + 3] =
+ (CGLPixelFormatAttribute)msaaSampleCount;
+ msaaAttributes[kAttributeCount + 4] = (CGLPixelFormatAttribute)0;
+ CGLChoosePixelFormat(msaaAttributes, &format, &npix);
+ }
+ if (!npix) {
+ CGLChoosePixelFormat(attributes, &format, &npix);
+ }
+ CGLContextObj ctx;
+ CGLCreateContext(format, NULL, &ctx);
+ CGLDestroyPixelFormat(format);
+
+ static const GLint interval = 1;
+ CGLSetParameter(ctx, kCGLCPSwapInterval, &interval);
+ CGLSetCurrentContext(ctx);
+ return ctx;
+}
+
+- (void)viewDidMoveToWindow {
+ [super viewDidMoveToWindow];
+
+ //Attaching view to fGLContext requires that the view to be part of a window,
+ //and that the NSWindow instance must have a CoreGraphics counterpart (or
+ //it must NOT be deferred or should have been on screen at least once)
+ if ([fGLContext view] != self && nil != self.window) {
+ [fGLContext setView:self];
+ }
+}
+- (bool)attach:(SkOSWindow::SkBackEndTypes)attachType
+ withMSAASampleCount:(int) sampleCount
+ andGetInfo:(SkOSWindow::AttachmentInfo*) info {
+ if (nil == fGLContext) {
+ CGLContextObj ctx = createGLContext(sampleCount);
+ SkASSERT(ctx);
+ fGLContext = [[NSOpenGLContext alloc] initWithCGLContextObj:ctx];
+ CGLReleaseContext(ctx);
+ if (NULL == fGLContext) {
+ return false;
+ }
+ [fGLContext setView:self];
+ }
+
+ [fGLContext makeCurrentContext];
+ CGLPixelFormatObj format = CGLGetPixelFormat((CGLContextObj)[fGLContext CGLContextObj]);
+ CGLDescribePixelFormat(format, 0, kCGLPFASamples, &info->fSampleCount);
+ CGLDescribePixelFormat(format, 0, kCGLPFAStencilSize, &info->fStencilBits);
+ NSSize size = self.bounds.size;
+#if RETINA_API_AVAILABLE
+ size = [self convertSizeToBacking:size];
+#endif
+ glViewport(0, 0, (int) size.width, (int) size.height);
+ glClearColor(0, 0, 0, 0);
+ glClearStencil(0);
+ glClear(GL_COLOR_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
+ return true;
+}
+
+- (void)detach {
+ [fGLContext release];
+ fGLContext = nil;
+}
+
+- (void)present {
+ if (nil != fGLContext) {
+ [fGLContext flushBuffer];
+ }
+}
+
+- (void)setVSync:(bool)enable {
+ if (fGLContext) {
+ GLint interval = enable ? 1 : 0;
+ CGLContextObj ctx = (CGLContextObj)[fGLContext CGLContextObj];
+ CGLSetParameter(ctx, kCGLCPSwapInterval, &interval);
+ }
+}
+@end
diff --git a/gfx/skia/skia/src/views/mac/SkOSWindow_Mac.mm b/gfx/skia/skia/src/views/mac/SkOSWindow_Mac.mm
new file mode 100644
index 000000000..faf1bbafd
--- /dev/null
+++ b/gfx/skia/skia/src/views/mac/SkOSWindow_Mac.mm
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#if defined(SK_BUILD_FOR_MAC)
+
+#import <Cocoa/Cocoa.h>
+#include "SkOSWindow_Mac.h"
+#include "SkOSMenu.h"
+#include "SkTypes.h"
+#include "SkWindow.h"
+#import "SkNSView.h"
+#import "SkEventNotifier.h"
+#define kINVAL_NSVIEW_EventType "inval-nsview"
+
+static_assert(SK_SUPPORT_GPU, "not_implemented_for_non_gpu_build");
+
+SkOSWindow::SkOSWindow(void* hWnd) : fHWND(hWnd) {
+ fInvalEventIsPending = false;
+ fGLContext = NULL;
+ fNotifier = [[SkEventNotifier alloc] init];
+}
+SkOSWindow::~SkOSWindow() {
+ [(SkEventNotifier*)fNotifier release];
+}
+
+void SkOSWindow::onHandleInval(const SkIRect& r) {
+ if (!fInvalEventIsPending) {
+ fInvalEventIsPending = true;
+ (new SkEvent(kINVAL_NSVIEW_EventType, this->getSinkID()))->post();
+ }
+}
+
+bool SkOSWindow::onEvent(const SkEvent& evt) {
+ if (evt.isType(kINVAL_NSVIEW_EventType)) {
+ fInvalEventIsPending = false;
+ const SkIRect& r = this->getDirtyBounds();
+ [(SkNSView*)fHWND postInvalWithRect:&r];
+ [(NSOpenGLContext*)fGLContext update];
+ return true;
+ }
+ if ([(SkNSView*)fHWND onHandleEvent:evt]) {
+ return true;
+ }
+ return this->INHERITED::onEvent(evt);
+}
+
+bool SkOSWindow::onDispatchClick(int x, int y, Click::State state, void* owner,
+ unsigned modi) {
+ return this->INHERITED::onDispatchClick(x, y, state, owner, modi);
+}
+
+void SkOSWindow::onSetTitle(const char title[]) {
+ [(SkNSView*)fHWND setSkTitle:title];
+}
+
+void SkOSWindow::onAddMenu(const SkOSMenu* menu) {
+ [(SkNSView*)fHWND onAddMenu:menu];
+}
+
+void SkOSWindow::onUpdateMenu(const SkOSMenu* menu) {
+ [(SkNSView*)fHWND onUpdateMenu:menu];
+}
+
+bool SkOSWindow::attach(SkBackEndTypes attachType, int sampleCount, bool /*deepColor*/,
+ AttachmentInfo* info) {
+ return [(SkNSView*)fHWND attach:attachType withMSAASampleCount:sampleCount andGetInfo:info];
+}
+
+void SkOSWindow::release() {
+ [(SkNSView*)fHWND detach];
+}
+
+void SkOSWindow::present() {
+ [(SkNSView*)fHWND present];
+}
+
+void SkOSWindow::closeWindow() {
+ [[(SkNSView*)fHWND window] close];
+}
+
+void SkOSWindow::setVsync(bool enable) {
+ [(SkNSView*)fHWND setVSync:enable];
+}
+
+bool SkOSWindow::makeFullscreen() {
+ [(SkNSView*)fHWND enterFullScreenMode:[NSScreen mainScreen] withOptions:nil];
+ return true;
+}
+
+
+#endif
diff --git a/gfx/skia/skia/src/views/mac/SkOptionsTableView.h b/gfx/skia/skia/src/views/mac/SkOptionsTableView.h
new file mode 100644
index 000000000..8fa03d1fc
--- /dev/null
+++ b/gfx/skia/skia/src/views/mac/SkOptionsTableView.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#import <Cocoa/Cocoa.h>
+#import "SkNSView.h"
+#import "SkOSMenu.h"
+#import "SkEvent.h"
+@interface SkOptionItem : NSObject {
+ NSCell* fCell;
+ const SkOSMenu::Item* fItem;
+}
+@property (nonatomic, assign) const SkOSMenu::Item* fItem;
+@property (nonatomic, retain) NSCell* fCell;
+@end
+
+@interface SkOptionsTableView : NSTableView <SkNSViewOptionsDelegate, NSTableViewDelegate, NSTableViewDataSource> {
+ NSMutableArray* fItems;
+ const SkTDArray<SkOSMenu*>* fMenus;
+ BOOL fShowKeys;
+}
+@property (nonatomic, retain) NSMutableArray* fItems;
+
+- (void)registerMenus:(const SkTDArray<SkOSMenu*>*)menus;
+- (void)updateMenu:(const SkOSMenu*)menu;
+- (void)loadMenu:(const SkOSMenu*)menu;
+- (IBAction)toggleKeyEquivalents:(id)sender;
+
+- (NSCell*)createAction;
+- (NSCell*)createList:(NSArray*)items current:(int)index;
+- (NSCell*)createSlider:(float)value min:(float)min max:(float)max;
+- (NSCell*)createSwitch:(BOOL)state;
+- (NSCell*)createTextField:(NSString*)placeHolder;
+- (NSCell*)createTriState:(NSCellStateValue)state;
+
+@end
diff --git a/gfx/skia/skia/src/views/mac/SkOptionsTableView.mm b/gfx/skia/skia/src/views/mac/SkOptionsTableView.mm
new file mode 100644
index 000000000..b4cdbf411
--- /dev/null
+++ b/gfx/skia/skia/src/views/mac/SkOptionsTableView.mm
@@ -0,0 +1,297 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#import "SkOptionsTableView.h"
+#import "SkTextFieldCell.h"
+@implementation SkOptionItem
+@synthesize fCell, fItem;
+- (void)dealloc {
+ [fCell release];
+ [super dealloc];
+}
+@end
+
+@implementation SkOptionsTableView
+@synthesize fItems;
+
+- (id)initWithCoder:(NSCoder*)coder {
+ if ((self = [super initWithCoder:coder])) {
+ self.dataSource = self;
+ self.delegate = self;
+ fMenus = NULL;
+ fShowKeys = YES;
+ [self setSelectionHighlightStyle:NSTableViewSelectionHighlightStyleNone];
+ self.fItems = [NSMutableArray array];
+ }
+ return self;
+}
+
+- (void)dealloc {
+ self.fItems = nil;
+ [super dealloc];
+}
+
+- (void) view:(SkNSView*)view didAddMenu:(const SkOSMenu*)menu {}
+- (void) view:(SkNSView*)view didUpdateMenu:(const SkOSMenu*)menu {
+ [self updateMenu:menu];
+}
+
+- (IBAction)toggleKeyEquivalents:(id)sender {
+ fShowKeys = !fShowKeys;
+ NSMenuItem* item = (NSMenuItem*)sender;
+ [item setState:fShowKeys];
+ [self reloadData];
+}
+
+- (void)registerMenus:(const SkTDArray<SkOSMenu*>*)menus {
+ fMenus = menus;
+ for (int i = 0; i < fMenus->count(); ++i) {
+ [self loadMenu:(*fMenus)[i]];
+ }
+}
+
+- (void)updateMenu:(const SkOSMenu*)menu {
+ // the first menu is always assumed to be the static, the second is
+ // repopulated every time over and over again
+
+ // seems pretty weird that we have to get rid of the const'ness here,
+ // but trying to propagate the const'ness through all the way to the fMenus
+ // vector was a non-starter.
+
+ int menuIndex = fMenus->find(const_cast<SkOSMenu *>(menu));
+ if (menuIndex >= 0 && menuIndex < fMenus->count()) {
+ NSUInteger first = 0;
+ for (int i = 0; i < menuIndex; ++i) {
+ first += (*fMenus)[i]->getCount();
+ }
+ [fItems removeObjectsInRange:NSMakeRange(first, [fItems count] - first)];
+ [self loadMenu:menu];
+ }
+ [self reloadData];
+}
+
+- (NSCellStateValue)triStateToNSState:(SkOSMenu::TriState)state {
+ if (SkOSMenu::kOnState == state)
+ return NSOnState;
+ else if (SkOSMenu::kOffState == state)
+ return NSOffState;
+ else
+ return NSMixedState;
+}
+
+- (void)loadMenu:(const SkOSMenu*)menu {
+ const SkOSMenu::Item* menuitems[menu->getCount()];
+ menu->getItems(menuitems);
+ for (int i = 0; i < menu->getCount(); ++i) {
+ const SkOSMenu::Item* item = menuitems[i];
+ SkOptionItem* option = [[SkOptionItem alloc] init];
+ option.fItem = item;
+
+ if (SkOSMenu::kList_Type == item->getType()) {
+ int index = 0, count = 0;
+ SkOSMenu::FindListItemCount(*item->getEvent(), &count);
+ NSMutableArray* optionstrs = [[NSMutableArray alloc] initWithCapacity:count];
+ SkAutoTDeleteArray<SkString> ada(new SkString[count]);
+ SkString* options = ada.get();
+ SkOSMenu::FindListItems(*item->getEvent(), options);
+ for (int i = 0; i < count; ++i)
+ [optionstrs addObject:[NSString stringWithUTF8String:options[i].c_str()]];
+ SkOSMenu::FindListIndex(*item->getEvent(), item->getSlotName(), &index);
+ option.fCell = [self createList:optionstrs current:index];
+ [optionstrs release];
+ }
+ else {
+ bool state = false;
+ SkString str;
+ SkOSMenu::TriState tristate;
+ switch (item->getType()) {
+ case SkOSMenu::kAction_Type:
+ option.fCell = [self createAction];
+ break;
+ case SkOSMenu::kSlider_Type:
+ SkScalar min, max, value;
+ SkOSMenu::FindSliderValue(*item->getEvent(), item->getSlotName(), &value);
+ SkOSMenu::FindSliderMin(*item->getEvent(), &min);
+ SkOSMenu::FindSliderMax(*item->getEvent(), &max);
+ option.fCell = [self createSlider:value
+ min:min
+ max:max];
+ break;
+ case SkOSMenu::kSwitch_Type:
+ SkOSMenu::FindSwitchState(*item->getEvent(), item->getSlotName(), &state);
+ option.fCell = [self createSwitch:(BOOL)state];
+ break;
+ case SkOSMenu::kTriState_Type:
+ SkOSMenu::FindTriState(*item->getEvent(), item->getSlotName(), &tristate);
+ option.fCell = [self createTriState:[self triStateToNSState:tristate]];
+ break;
+ case SkOSMenu::kTextField_Type:
+ SkOSMenu::FindText(*item->getEvent(),item->getSlotName(), &str);
+ option.fCell = [self createTextField:[NSString stringWithUTF8String:str.c_str()]];
+ break;
+ default:
+ break;
+ }
+ }
+ [fItems addObject:option];
+ [option release];
+ }
+}
+
+- (NSInteger)numberOfRowsInTableView:(NSTableView *)tableView {
+ return [self.fItems count];
+}
+
+- (id)tableView:(NSTableView *)tableView objectValueForTableColumn:(NSTableColumn *)tableColumn row:(NSInteger)row {
+ NSInteger columnIndex = [tableView columnWithIdentifier:[tableColumn identifier]];
+ if (columnIndex == 0) {
+ const SkOSMenu::Item* item = ((SkOptionItem*)[fItems objectAtIndex:row]).fItem;
+ NSString* label = [NSString stringWithUTF8String:item->getLabel()];
+ if (fShowKeys)
+ return [NSString stringWithFormat:@"%@ (%c)", label, item->getKeyEquivalent()];
+ else
+ return label;
+ }
+ else
+ return nil;
+}
+
+- (NSCell *)tableView:(NSTableView *)tableView dataCellForTableColumn:(NSTableColumn *)tableColumn row:(NSInteger)row {
+ if (tableColumn) {
+ NSInteger columnIndex = [tableView columnWithIdentifier:[tableColumn identifier]];
+ if (columnIndex == 1)
+ return [((SkOptionItem*)[fItems objectAtIndex:row]).fCell copy];
+ else
+ return [[[SkTextFieldCell alloc] init] autorelease];
+ }
+ return nil;
+}
+
+- (void)tableView:(NSTableView *)tableView willDisplayCell:(id)cell forTableColumn:(NSTableColumn *)tableColumn row:(NSInteger)row {
+ NSInteger columnIndex = [tableView columnWithIdentifier:[tableColumn identifier]];
+ if (columnIndex == 1) {
+ SkOptionItem* option = (SkOptionItem*)[self.fItems objectAtIndex:row];
+ NSCell* storedCell = option.fCell;
+ const SkOSMenu::Item* item = option.fItem;
+ switch (item->getType()) {
+ case SkOSMenu::kAction_Type:
+ break;
+ case SkOSMenu::kList_Type:
+ [cell selectItemAtIndex:[(NSPopUpButtonCell*)storedCell indexOfSelectedItem]];
+ break;
+ case SkOSMenu::kSlider_Type:
+ [cell setFloatValue:[storedCell floatValue]];
+ break;
+ case SkOSMenu::kSwitch_Type:
+ [cell setState:[(NSButtonCell*)storedCell state]];
+ break;
+ case SkOSMenu::kTextField_Type:
+ if ([[storedCell stringValue] length] > 0)
+ [cell setStringValue:[storedCell stringValue]];
+ break;
+ case SkOSMenu::kTriState_Type:
+ [cell setState:[(NSButtonCell*)storedCell state]];
+ break;
+ default:
+ break;
+ }
+ }
+ else {
+ [(SkTextFieldCell*)cell setEditable:NO];
+ }
+}
+
+- (void)tableView:(NSTableView *)tableView setObjectValue:(id)anObject forTableColumn:(NSTableColumn *)tableColumn row:(NSInteger)row {
+ NSInteger columnIndex = [tableView columnWithIdentifier:[tableColumn identifier]];
+ if (columnIndex == 1) {
+ SkOptionItem* option = (SkOptionItem*)[self.fItems objectAtIndex:row];
+ NSCell* cell = option.fCell;
+ const SkOSMenu::Item* item = option.fItem;
+ switch (item->getType()) {
+ case SkOSMenu::kAction_Type:
+ item->postEvent();
+ break;
+ case SkOSMenu::kList_Type:
+ [(NSPopUpButtonCell*)cell selectItemAtIndex:[anObject intValue]];
+ item->setInt([anObject intValue]);
+ break;
+ case SkOSMenu::kSlider_Type:
+ [cell setFloatValue:[anObject floatValue]];
+ item->setScalar([anObject floatValue]);
+ break;
+ case SkOSMenu::kSwitch_Type:
+ [cell setState:[anObject boolValue]];
+ item->setBool([anObject boolValue]);
+ break;
+ case SkOSMenu::kTextField_Type:
+ if ([anObject length] > 0) {
+ [cell setStringValue:anObject];
+ item->setString([anObject UTF8String]);
+ }
+ break;
+ case SkOSMenu::kTriState_Type:
+ [cell setState:[anObject intValue]];
+ item->setTriState((SkOSMenu::TriState)[anObject intValue]);
+ break;
+ default:
+ break;
+ }
+ item->postEvent();
+ }
+}
+
+- (NSCell*)createAction{
+ NSButtonCell* cell = [[[NSButtonCell alloc] init] autorelease];
+ [cell setTitle:@""];
+ [cell setButtonType:NSMomentaryPushInButton];
+ [cell setBezelStyle:NSSmallSquareBezelStyle];
+ return cell;
+}
+
+- (NSCell*)createList:(NSArray*)items current:(int)index {
+ NSPopUpButtonCell* cell = [[[NSPopUpButtonCell alloc] init] autorelease];
+ [cell addItemsWithTitles:items];
+ [cell selectItemAtIndex:index];
+ [cell setArrowPosition:NSPopUpArrowAtBottom];
+ [cell setBezelStyle:NSSmallSquareBezelStyle];
+ return cell;
+}
+
+- (NSCell*)createSlider:(float)value min:(float)min max:(float)max {
+ NSSliderCell* cell = [[[NSSliderCell alloc] init] autorelease];
+ [cell setFloatValue:value];
+ [cell setMinValue:min];
+ [cell setMaxValue:max];
+ return cell;
+}
+
+- (NSCell*)createSwitch:(BOOL)state {
+ NSButtonCell* cell = [[[NSButtonCell alloc] init] autorelease];
+ [cell setState:state];
+ [cell setTitle:@""];
+ [cell setButtonType:NSSwitchButton];
+ return cell;
+}
+
+- (NSCell*)createTextField:(NSString*)placeHolder {
+ SkTextFieldCell* cell = [[[SkTextFieldCell alloc] init] autorelease];
+ [cell setEditable:YES];
+ [cell setStringValue:@""];
+ [cell setPlaceholderString:placeHolder];
+ return cell;
+}
+
+- (NSCell*)createTriState:(NSCellStateValue)state {
+ NSButtonCell* cell = [[[NSButtonCell alloc] init] autorelease];
+ [cell setAllowsMixedState:TRUE];
+ [cell setTitle:@""];
+ [cell setState:(NSInteger)state];
+ [cell setButtonType:NSSwitchButton];
+ return cell;
+}
+@end
diff --git a/gfx/skia/skia/src/views/mac/SkSampleNSView.h b/gfx/skia/skia/src/views/mac/SkSampleNSView.h
new file mode 100644
index 000000000..b7da9fd55
--- /dev/null
+++ b/gfx/skia/skia/src/views/mac/SkSampleNSView.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#import "SkNSView.h"
+@interface SkSampleNSView : SkNSView
+- (id)initWithDefaults;
+@end
diff --git a/gfx/skia/skia/src/views/mac/SkSampleNSView.mm b/gfx/skia/skia/src/views/mac/SkSampleNSView.mm
new file mode 100644
index 000000000..7c0fc5907
--- /dev/null
+++ b/gfx/skia/skia/src/views/mac/SkSampleNSView.mm
@@ -0,0 +1,31 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#import "SkSampleNSView.h"
+#include "SampleApp.h"
+#include <crt_externs.h>
+@implementation SkSampleNSView
+
+- (id)initWithDefaults {
+ if ((self = [super initWithDefaults])) {
+ fWind = new SampleWindow(self, *_NSGetArgc(), *_NSGetArgv(), NULL);
+ }
+ return self;
+}
+
+- (void)swipeWithEvent:(NSEvent *)event {
+ CGFloat x = [event deltaX];
+ if (x < 0)
+ ((SampleWindow*)fWind)->previousSample();
+ else if (x > 0)
+ ((SampleWindow*)fWind)->nextSample();
+ else
+ ((SampleWindow*)fWind)->showOverview();
+}
+
+@end
diff --git a/gfx/skia/skia/src/views/mac/SkTextFieldCell.h b/gfx/skia/skia/src/views/mac/SkTextFieldCell.h
new file mode 100644
index 000000000..dfca7ae69
--- /dev/null
+++ b/gfx/skia/skia/src/views/mac/SkTextFieldCell.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#import <Cocoa/Cocoa.h>
+//A text field cell that has vertically centered text
+@interface SkTextFieldCell : NSTextFieldCell {
+ BOOL selectingOrEditing;
+}
+@end
diff --git a/gfx/skia/skia/src/views/mac/SkTextFieldCell.m b/gfx/skia/skia/src/views/mac/SkTextFieldCell.m
new file mode 100644
index 000000000..c5efc4640
--- /dev/null
+++ b/gfx/skia/skia/src/views/mac/SkTextFieldCell.m
@@ -0,0 +1,56 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#import "SkTextFieldCell.h"
+@implementation SkTextFieldCell
+- (NSRect)drawingRectForBounds:(NSRect)theRect {
+ NSRect newRect = [super drawingRectForBounds:theRect];
+ if (selectingOrEditing == NO) {
+ NSSize textSize = [self cellSizeForBounds:theRect];
+ float heightDelta = newRect.size.height - textSize.height;
+ if (heightDelta > 0) {
+ newRect.size.height -= heightDelta;
+ newRect.origin.y += (heightDelta / 2);
+ }
+ }
+ return newRect;
+}
+
+- (void)selectWithFrame:(NSRect)aRect
+ inView:(NSView *)controlView
+ editor:(NSText *)textObj
+ delegate:(id)anObject
+ start:(NSInteger)selStart
+ length:(NSInteger)selLength {
+ aRect = [self drawingRectForBounds:aRect];
+ selectingOrEditing = YES;
+ [super selectWithFrame:aRect
+ inView:controlView
+ editor:textObj
+ delegate:anObject
+ start:selStart
+ length:selLength];
+ selectingOrEditing = NO;
+}
+
+- (void)editWithFrame:(NSRect)aRect
+ inView:(NSView *)controlView
+ editor:(NSText *)textObj
+ delegate:(id)anObject
+ event:(NSEvent *)theEvent {
+ aRect = [self drawingRectForBounds:aRect];
+ selectingOrEditing = YES;
+ [super editWithFrame:aRect
+ inView:controlView
+ editor:textObj
+ delegate:anObject
+ event:theEvent];
+ selectingOrEditing = NO;
+}
+
+@end
diff --git a/gfx/skia/skia/src/views/mac/skia_mac.mm b/gfx/skia/skia/src/views/mac/skia_mac.mm
new file mode 100644
index 000000000..98d4c4bd9
--- /dev/null
+++ b/gfx/skia/skia/src/views/mac/skia_mac.mm
@@ -0,0 +1,126 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <crt_externs.h>
+#import <Cocoa/Cocoa.h>
+#include "SkApplication.h"
+#include "SkGraphics.h"
+#include "SkNSView.h"
+
+@interface MainView : SkNSView {
+}
+- (id)initWithFrame: (NSRect)frame ;
+- (void)dealloc;
+- (void)begin;
+@end
+
+@implementation MainView : SkNSView
+
+- (id)initWithFrame: (NSRect)frame {
+ self = [super initWithFrame:frame];
+ return self;
+}
+
+- (void)dealloc {
+ delete fWind;
+ [super dealloc];
+}
+
+- (void)begin {
+ fWind = create_sk_window(self, *_NSGetArgc(), *_NSGetArgv());
+ [self setUpWindow];
+}
+@end
+
+@interface AppDelegate : NSObject<NSApplicationDelegate, NSWindowDelegate> {
+}
+- (id)init;
+- (BOOL)applicationShouldTerminateAfterLastWindowClosed:(NSApplication *)theApplication;
+@end
+
+#
+@implementation AppDelegate : NSObject
+- (id)init {
+ self = [super init];
+ return self;
+}
+
+- (BOOL)applicationShouldTerminateAfterLastWindowClosed:(NSApplication *)theApplication {
+ return TRUE;
+}
+@end
+
+int main(int argc, char *argv[]) {
+ SkGraphics::Init();
+ signal(SIGPIPE, SIG_IGN);
+ NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
+
+ NSApplication* app = [NSApplication sharedApplication];
+
+ NSUInteger windowStyle = (NSTitledWindowMask | NSClosableWindowMask | NSResizableWindowMask | NSMiniaturizableWindowMask);
+
+ NSRect windowRect = NSMakeRect(100, 100, 1000, 1000);
+ NSWindow* window = [[NSWindow alloc] initWithContentRect:windowRect styleMask:windowStyle backing:NSBackingStoreBuffered defer:NO];
+
+ NSRect rect = [NSWindow contentRectForFrameRect:windowRect styleMask:windowStyle];
+ MainView* customView = [[MainView alloc] initWithFrame:rect];
+ [customView setTranslatesAutoresizingMaskIntoConstraints:NO];
+ NSView* contentView = window.contentView;
+ [contentView addSubview:customView];
+ NSDictionary *views = NSDictionaryOfVariableBindings(customView);
+
+ [contentView addConstraints:
+ [NSLayoutConstraint constraintsWithVisualFormat:@"H:|[customView]|"
+ options:0
+ metrics:nil
+ views:views]];
+
+ [contentView addConstraints:
+ [NSLayoutConstraint constraintsWithVisualFormat:@"V:|[customView]|"
+ options:0
+ metrics:nil
+ views:views]];
+
+ [customView begin];
+ [customView release];
+
+ [window makeKeyAndOrderFront:NSApp];
+
+ AppDelegate * appDelegate = [[[AppDelegate alloc] init] autorelease];
+
+ app.delegate = appDelegate;
+
+ NSMenu* menu=[[NSMenu alloc] initWithTitle:@"AMainMenu"];
+ NSMenuItem* item;
+ NSMenu* subMenu;
+
+ //Create the application menu.
+ item=[[NSMenuItem alloc] initWithTitle:@"Apple" action:NULL keyEquivalent:@""];
+ [menu addItem:item];
+ subMenu=[[NSMenu alloc] initWithTitle:@"Apple"];
+ [menu setSubmenu:subMenu forItem:item];
+ [item release];
+ item=[[NSMenuItem alloc] initWithTitle:@"Quit" action:@selector(terminate:) keyEquivalent:@"q"];
+ [subMenu addItem:item];
+ [item release];
+ [subMenu release];
+
+ //Add the menu to the app.
+ [app setMenu:menu];
+
+ [app setActivationPolicy:NSApplicationActivationPolicyRegular];
+
+ [app run];
+
+ [menu release];
+ [appDelegate release];
+ [window release];
+ [pool release];
+
+ return EXIT_SUCCESS;
+}
diff --git a/gfx/skia/skia/src/views/sdl/SkOSWindow_SDL.cpp b/gfx/skia/skia/src/views/sdl/SkOSWindow_SDL.cpp
new file mode 100644
index 000000000..88b8353b6
--- /dev/null
+++ b/gfx/skia/skia/src/views/sdl/SkOSWindow_SDL.cpp
@@ -0,0 +1,401 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkOSWindow_SDL.h"
+#include "SkCanvas.h"
+
+#if defined(SK_BUILD_FOR_ANDROID)
+#include <GLES/gl.h>
+#elif defined(SK_BUILD_FOR_UNIX)
+#include <GL/gl.h>
+#elif defined(SK_BUILD_FOR_MAC)
+#include <gl.h>
+#endif
+
+const int kInitialWindowWidth = 640;
+const int kInitialWindowHeight = 480;
+static SkOSWindow* gCurrentWindow;
+
+static void report_sdl_error(const char* failure) {
+ const char* error = SDL_GetError();
+ SkASSERT(error); // Called only to check SDL error.
+ SkDebugf("%s SDL Error: %s.\n", failure, error);
+ SDL_ClearError();
+}
+SkOSWindow::SkOSWindow(void*)
+ : fWindow(nullptr)
+ , fGLContext(nullptr)
+ , fWindowMSAASampleCount(0) {
+
+ SkASSERT(!gCurrentWindow);
+ gCurrentWindow = this;
+
+ this->createWindow(0);
+}
+
+SkOSWindow::~SkOSWindow() {
+ this->destroyWindow();
+ gCurrentWindow = nullptr;
+}
+
+SkOSWindow* SkOSWindow::GetInstanceForWindowID(Uint32 windowID) {
+ if (gCurrentWindow &&
+ gCurrentWindow->fWindow &&
+ SDL_GetWindowID(gCurrentWindow->fWindow) == windowID) {
+ return gCurrentWindow;
+ }
+ return nullptr;
+}
+
+void SkOSWindow::release() {
+ if (fGLContext) {
+ SDL_GL_DeleteContext(fGLContext);
+ fGLContext = nullptr;
+ }
+}
+
+bool SkOSWindow::attach(SkBackEndTypes attachType, int msaaSampleCount, bool deepColor,
+ AttachmentInfo* info) {
+ this->createWindow(msaaSampleCount);
+ if (!fWindow) {
+ return false;
+ }
+ if (!fGLContext) {
+ fGLContext = SDL_GL_CreateContext(fWindow);
+ if (!fGLContext) {
+ report_sdl_error("Failed to create SDL GL context.");
+ return false;
+ }
+ glClearColor(0, 0, 0, 0);
+ glClearStencil(0);
+ glStencilMask(0xffffffff);
+ glClear(GL_COLOR_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
+ }
+
+ if (SDL_GL_MakeCurrent(fWindow, fGLContext) != 0) {
+ report_sdl_error("Failed to make SDL GL context current.");
+ this->release();
+ return false;
+ }
+
+ info->fSampleCount = msaaSampleCount;
+ info->fStencilBits = 8;
+
+ glViewport(0, 0, SkScalarRoundToInt(this->width()), SkScalarRoundToInt(this->height()));
+ return true;
+}
+
+void SkOSWindow::present() {
+ if (!fWindow) {
+ return;
+ }
+ SDL_GL_SwapWindow(fWindow);
+}
+
+bool SkOSWindow::makeFullscreen() {
+ if (!fWindow) {
+ return false;
+ }
+ SDL_SetWindowFullscreen(fWindow, SDL_WINDOW_FULLSCREEN_DESKTOP);
+ return true;
+}
+
+void SkOSWindow::setVsync(bool vsync) {
+ if (!fWindow) {
+ return;
+ }
+ SDL_GL_SetSwapInterval(vsync ? 1 : 0);
+}
+
+void SkOSWindow::closeWindow() {
+ this->destroyWindow();
+
+ // Currently closing the window causes the app to quit.
+ SDL_Event event;
+ event.type = SDL_QUIT;
+ SDL_PushEvent(&event);
+}
+
+static SkKey convert_sdlkey_to_skkey(SDL_Keycode src) {
+ switch (src) {
+ case SDLK_UP:
+ return kUp_SkKey;
+ case SDLK_DOWN:
+ return kDown_SkKey;
+ case SDLK_LEFT:
+ return kLeft_SkKey;
+ case SDLK_RIGHT:
+ return kRight_SkKey;
+ case SDLK_HOME:
+ return kHome_SkKey;
+ case SDLK_END:
+ return kEnd_SkKey;
+ case SDLK_ASTERISK:
+ return kStar_SkKey;
+ case SDLK_HASH:
+ return kHash_SkKey;
+ case SDLK_0:
+ return k0_SkKey;
+ case SDLK_1:
+ return k1_SkKey;
+ case SDLK_2:
+ return k2_SkKey;
+ case SDLK_3:
+ return k3_SkKey;
+ case SDLK_4:
+ return k4_SkKey;
+ case SDLK_5:
+ return k5_SkKey;
+ case SDLK_6:
+ return k6_SkKey;
+ case SDLK_7:
+ return k7_SkKey;
+ case SDLK_8:
+ return k8_SkKey;
+ case SDLK_9:
+ return k9_SkKey;
+ default:
+ return kNONE_SkKey;
+ }
+}
+
+void SkOSWindow::createWindow(int msaaSampleCount) {
+ if (fWindowMSAASampleCount != msaaSampleCount) {
+ this->destroyWindow();
+ }
+ if (fWindow) {
+ return;
+ }
+ uint32_t windowFlags =
+#if defined(SK_BUILD_FOR_ANDROID)
+ SDL_WINDOW_BORDERLESS | SDL_WINDOW_FULLSCREEN_DESKTOP |
+ SDL_WINDOW_ALLOW_HIGHDPI |
+#endif
+ SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE;
+
+ // GL settings are part of SDL_WINDOW_OPENGL window creation arguments.
+#if defined(SK_BUILD_FOR_ANDROID)
+ // TODO we should try and get a 3.0 context first
+ SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 2);
+ SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 0);
+ SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_ES);
+#else
+ SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
+ SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 1);
+ SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
+#endif
+ SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8);
+ SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 8);
+ SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8);
+ SDL_GL_SetAttribute(SDL_GL_ALPHA_SIZE, 8);
+ SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
+ SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 24);
+ SDL_GL_SetAttribute(SDL_GL_STENCIL_SIZE, 8);
+#if defined(SK_BUILD_FOR_UNIX)
+ // Apparently MSAA request matches "slow caveat". Make SDL not set anything for caveat for MSAA
+ // by setting -1 for ACCELERATED_VISUAL. For non-MSAA, set ACCELERATED_VISUAL to 1 just for
+ // compatiblity with other platforms.
+ SDL_GL_SetAttribute(SDL_GL_ACCELERATED_VISUAL, msaaSampleCount > 0 ? -1 : 1);
+#else
+ SDL_GL_SetAttribute(SDL_GL_ACCELERATED_VISUAL, 1);
+#endif
+ SDL_GL_SetAttribute(SDL_GL_MULTISAMPLEBUFFERS, msaaSampleCount > 0 ? 1 : 0);
+ SDL_GL_SetAttribute(SDL_GL_MULTISAMPLESAMPLES, msaaSampleCount);
+
+ // This is an approximation for sizing purposes.
+ bool isInitialWindow = this->width() == 0 && this->height() == 0;
+ SkScalar windowWidth = isInitialWindow ? kInitialWindowWidth : this->width();
+ SkScalar windowHeight = isInitialWindow ? kInitialWindowHeight : this->height();
+
+ fWindow = SDL_CreateWindow(this->getTitle(), SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED,
+ windowWidth, windowHeight, windowFlags);
+ if (!fWindow) {
+ report_sdl_error("Failed to create SDL window.");
+ return;
+ }
+ fWindowMSAASampleCount = msaaSampleCount;
+}
+
+void SkOSWindow::destroyWindow() {
+ this->release();
+ if (fWindow) {
+ SDL_DestroyWindow(fWindow);
+ fWindow = nullptr;
+ fWindowMSAASampleCount = 0;
+ }
+}
+
+bool SkOSWindow::HasDirtyWindows() {
+ if (gCurrentWindow && gCurrentWindow->fWindow) {
+ return gCurrentWindow->isDirty();
+ }
+ return false;
+}
+
+void SkOSWindow::UpdateDirtyWindows() {
+ if (gCurrentWindow && gCurrentWindow->fWindow) {
+ if (gCurrentWindow->isDirty()) {
+ // This will call present.
+ gCurrentWindow->update(nullptr);
+ }
+ }
+}
+
+void SkOSWindow::HandleEvent(const SDL_Event& event) {
+ switch (event.type) {
+ case SDL_MOUSEMOTION:
+ if (SkOSWindow* window = GetInstanceForWindowID(event.motion.windowID)) {
+ if (event.motion.state == SDL_PRESSED) {
+ window->handleClick(event.motion.x, event.motion.y,
+ SkView::Click::kMoved_State, nullptr);
+ }
+ }
+ break;
+ case SDL_MOUSEBUTTONDOWN:
+ case SDL_MOUSEBUTTONUP:
+ if (SkOSWindow* window = GetInstanceForWindowID(event.button.windowID)) {
+ window->handleClick(event.button.x, event.button.y,
+ event.button.state == SDL_PRESSED ?
+ SkView::Click::kDown_State :
+ SkView::Click::kUp_State, nullptr);
+ }
+ break;
+ case SDL_KEYDOWN:
+ if (SkOSWindow* window = GetInstanceForWindowID(event.key.windowID)) {
+ SDL_Keycode key = event.key.keysym.sym;
+ SkKey sk = convert_sdlkey_to_skkey(key);
+ if (kNONE_SkKey != sk) {
+ if (event.key.state == SDL_PRESSED) {
+ window->handleKey(sk);
+ } else {
+ window->handleKeyUp(sk);
+ }
+ } else if (key == SDLK_ESCAPE) {
+ window->closeWindow();
+ }
+ }
+ break;
+ case SDL_TEXTINPUT:
+ if (SkOSWindow* window = GetInstanceForWindowID(event.text.windowID)) {
+ size_t len = strlen(event.text.text);
+ for (size_t i = 0; i < len; i++) {
+ window->handleChar((SkUnichar)event.text.text[i]);
+ }
+ }
+ break;
+ case SDL_WINDOWEVENT:
+ switch (event.window.event) {
+ case SDL_WINDOWEVENT_SHOWN:
+ // For initialization purposes, we resize upon first show.
+ // Fallthrough.
+ case SDL_WINDOWEVENT_SIZE_CHANGED:
+ if (SkOSWindow* window = GetInstanceForWindowID(event.window.windowID)) {
+ int w = 0;
+ int h = 0;
+ SDL_GetWindowSize(window->fWindow, &w, &h);
+ window->resize(w, h);
+ }
+ break;
+ case SDL_WINDOWEVENT_FOCUS_GAINED:
+ if (GetInstanceForWindowID(event.text.windowID)) {
+ SDL_StartTextInput();
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+SkMSec gTimerDelay;
+
+void SkOSWindow::RunEventLoop() {
+ for (;;) {
+ SkEvent::ServiceQueueTimer();
+ bool hasMoreSkEvents = SkEvent::ProcessEvent();
+
+ SDL_Event event;
+ bool hasSDLEvents = SDL_PollEvent(&event) == 1;
+
+ // Invalidations do not post to event loop, rather we just go through the
+ // windows for each event loop iteration.
+ bool hasDirtyWindows = HasDirtyWindows();
+
+ if (!hasSDLEvents && !hasMoreSkEvents && !hasDirtyWindows) {
+ // If there is no SDL events, SkOSWindow updates or SkEvents
+ // to be done, wait for the SDL events.
+ if (gTimerDelay > 0) {
+ hasSDLEvents = SDL_WaitEventTimeout(&event, gTimerDelay) == 1;
+ } else {
+ hasSDLEvents = SDL_WaitEvent(&event) == 1;
+ }
+ }
+ while (hasSDLEvents) {
+ if (event.type == SDL_QUIT) {
+ return;
+ }
+ HandleEvent(event);
+ hasSDLEvents = SDL_PollEvent(&event);
+ }
+ UpdateDirtyWindows();
+ }
+}
+
+void SkOSWindow::onSetTitle(const char title[]) {
+ if (!fWindow) {
+ return;
+ }
+ this->updateWindowTitle();
+}
+
+void SkOSWindow::updateWindowTitle() {
+ SDL_SetWindowTitle(fWindow, this->getTitle());
+}
+///////////////////////////////////////////////////////////////////////////////////////
+
+void SkEvent::SignalNonEmptyQueue() {
+ // nothing to do, since we spin on our event-queue
+}
+
+void SkEvent::SignalQueueTimer(SkMSec delay) {
+ gTimerDelay = delay;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "SkApplication.h"
+#include "SkEvent.h"
+#include "SkWindow.h"
+
+#if defined(SK_BUILD_FOR_ANDROID)
+int SDL_main(int argc, char** argv) {
+#else
+int main(int argc, char** argv) {
+#endif
+ if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_EVENTS) != 0) {
+ report_sdl_error("Failed to init SDL.");
+ return -1;
+ }
+
+ application_init();
+
+ SkOSWindow* window = create_sk_window(nullptr, argc, argv);
+
+ // drain any events that occurred before |window| was assigned.
+ while (SkEvent::ProcessEvent());
+
+ SkOSWindow::RunEventLoop();
+
+ delete window;
+ application_term();
+
+ SDL_Quit();
+
+ return 0;
+}
diff --git a/gfx/skia/skia/src/views/unix/SkOSWindow_Unix.cpp b/gfx/skia/skia/src/views/unix/SkOSWindow_Unix.cpp
new file mode 100644
index 000000000..2f195927e
--- /dev/null
+++ b/gfx/skia/skia/src/views/unix/SkOSWindow_Unix.cpp
@@ -0,0 +1,519 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include <X11/Xlib.h>
+#include <X11/Xatom.h>
+#include <X11/XKBlib.h>
+#include <GL/glx.h>
+#include <GL/gl.h>
+#include <GL/glu.h>
+
+#include "SkWindow.h"
+
+#include "SkBitmap.h"
+#include "SkCanvas.h"
+#include "SkColor.h"
+#include "SkEvent.h"
+#include "SkKey.h"
+#include "SkWindow.h"
+#include "XkeysToSkKeys.h"
+extern "C" {
+ #include "keysym2ucs.h"
+}
+
+const int WIDTH = 500;
+const int HEIGHT = 500;
+
+// Determine which events to listen for.
+const long EVENT_MASK = StructureNotifyMask|ButtonPressMask|ButtonReleaseMask
+ |ExposureMask|PointerMotionMask|KeyPressMask|KeyReleaseMask;
+
+SkOSWindow::SkOSWindow(void*)
+ : fVi(nullptr)
+ , fMSAASampleCount(0) {
+ fUnixWindow.fDisplay = nullptr;
+ fUnixWindow.fGLContext = nullptr;
+ this->initWindow(0, nullptr);
+ this->resize(WIDTH, HEIGHT);
+}
+
+SkOSWindow::~SkOSWindow() {
+ this->internalCloseWindow();
+}
+
+void SkOSWindow::internalCloseWindow() {
+ if (fUnixWindow.fDisplay) {
+ this->release();
+ SkASSERT(fUnixWindow.fGc);
+ XFreeGC(fUnixWindow.fDisplay, fUnixWindow.fGc);
+ fUnixWindow.fGc = nullptr;
+ XDestroyWindow(fUnixWindow.fDisplay, fUnixWindow.fWin);
+ fVi = nullptr;
+ XCloseDisplay(fUnixWindow.fDisplay);
+ fUnixWindow.fDisplay = nullptr;
+ fMSAASampleCount = 0;
+ }
+}
+
+void SkOSWindow::initWindow(int requestedMSAASampleCount, AttachmentInfo* info) {
+ if (fMSAASampleCount != requestedMSAASampleCount) {
+ this->internalCloseWindow();
+ }
+ // presence of fDisplay means we already have a window
+ if (fUnixWindow.fDisplay) {
+ if (info) {
+ if (fVi) {
+ glXGetConfig(fUnixWindow.fDisplay, fVi, GLX_SAMPLES_ARB, &info->fSampleCount);
+ glXGetConfig(fUnixWindow.fDisplay, fVi, GLX_STENCIL_SIZE, &info->fStencilBits);
+ } else {
+ info->fSampleCount = 0;
+ info->fStencilBits = 0;
+ }
+ }
+ return;
+ }
+ fUnixWindow.fDisplay = XOpenDisplay(nullptr);
+ Display* dsp = fUnixWindow.fDisplay;
+ if (nullptr == dsp) {
+ SkDebugf("Could not open an X Display");
+ return;
+ }
+ // Attempt to create a window that supports GL
+ GLint att[] = {
+ GLX_RGBA,
+ GLX_DEPTH_SIZE, 24,
+ GLX_DOUBLEBUFFER,
+ GLX_STENCIL_SIZE, 8,
+ None
+ };
+ SkASSERT(nullptr == fVi);
+ if (requestedMSAASampleCount > 0) {
+ static const GLint kAttCount = SK_ARRAY_COUNT(att);
+ GLint msaaAtt[kAttCount + 4];
+ memcpy(msaaAtt, att, sizeof(att));
+ SkASSERT(None == msaaAtt[kAttCount - 1]);
+ msaaAtt[kAttCount - 1] = GLX_SAMPLE_BUFFERS_ARB;
+ msaaAtt[kAttCount + 0] = 1;
+ msaaAtt[kAttCount + 1] = GLX_SAMPLES_ARB;
+ msaaAtt[kAttCount + 2] = requestedMSAASampleCount;
+ msaaAtt[kAttCount + 3] = None;
+ fVi = glXChooseVisual(dsp, DefaultScreen(dsp), msaaAtt);
+ fMSAASampleCount = requestedMSAASampleCount;
+ }
+ if (nullptr == fVi) {
+ fVi = glXChooseVisual(dsp, DefaultScreen(dsp), att);
+ fMSAASampleCount = 0;
+ }
+
+ if (fVi) {
+ if (info) {
+ glXGetConfig(dsp, fVi, GLX_SAMPLES_ARB, &info->fSampleCount);
+ glXGetConfig(dsp, fVi, GLX_STENCIL_SIZE, &info->fStencilBits);
+ }
+ Colormap colorMap = XCreateColormap(dsp,
+ RootWindow(dsp, fVi->screen),
+ fVi->visual,
+ AllocNone);
+ XSetWindowAttributes swa;
+ swa.colormap = colorMap;
+ swa.event_mask = EVENT_MASK;
+ fUnixWindow.fWin = XCreateWindow(dsp,
+ RootWindow(dsp, fVi->screen),
+ 0, 0, // x, y
+ WIDTH, HEIGHT,
+ 0, // border width
+ fVi->depth,
+ InputOutput,
+ fVi->visual,
+ CWEventMask | CWColormap,
+ &swa);
+ } else {
+ if (info) {
+ info->fSampleCount = 0;
+ info->fStencilBits = 0;
+ }
+ // Create a simple window instead. We will not be able to show GL
+ fUnixWindow.fWin = XCreateSimpleWindow(dsp,
+ DefaultRootWindow(dsp),
+ 0, 0, // x, y
+ WIDTH, HEIGHT,
+ 0, // border width
+ 0, // border value
+ 0); // background value
+ }
+ this->mapWindowAndWait();
+ fUnixWindow.fGc = XCreateGC(dsp, fUnixWindow.fWin, 0, nullptr);
+}
+
+static unsigned getModi(const XEvent& evt) {
+ static const struct {
+ unsigned fXMask;
+ unsigned fSkMask;
+ } gModi[] = {
+ // X values found by experiment. Is there a better way?
+ { 1, kShift_SkModifierKey },
+ { 4, kControl_SkModifierKey },
+ { 8, kOption_SkModifierKey },
+ };
+
+ unsigned modi = 0;
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gModi); ++i) {
+ if (evt.xkey.state & gModi[i].fXMask) {
+ modi |= gModi[i].fSkMask;
+ }
+ }
+ return modi;
+}
+
+static SkMSec gTimerDelay;
+
+static bool MyXNextEventWithDelay(Display* dsp, XEvent* evt) {
+ // Check for pending events before entering the select loop. There might
+ // be events in the in-memory queue but not processed yet.
+ if (XPending(dsp)) {
+ XNextEvent(dsp, evt);
+ return true;
+ }
+
+ SkMSec ms = gTimerDelay;
+ if (ms > 0) {
+ int x11_fd = ConnectionNumber(dsp);
+ fd_set input_fds;
+ FD_ZERO(&input_fds);
+ FD_SET(x11_fd, &input_fds);
+
+ timeval tv;
+ tv.tv_sec = ms / 1000; // seconds
+ tv.tv_usec = (ms % 1000) * 1000; // microseconds
+
+ if (!select(x11_fd + 1, &input_fds, nullptr, nullptr, &tv)) {
+ if (!XPending(dsp)) {
+ return false;
+ }
+ }
+ }
+ XNextEvent(dsp, evt);
+ return true;
+}
+
+static Atom wm_delete_window_message;
+
+SkOSWindow::NextXEventResult SkOSWindow::nextXEvent() {
+ XEvent evt;
+ Display* dsp = fUnixWindow.fDisplay;
+
+ if (!MyXNextEventWithDelay(dsp, &evt)) {
+ return kContinue_NextXEventResult;
+ }
+
+ switch (evt.type) {
+ case Expose:
+ if (0 == evt.xexpose.count) {
+ return kPaintRequest_NextXEventResult;
+ }
+ break;
+ case ConfigureNotify:
+ this->resize(evt.xconfigure.width, evt.xconfigure.height);
+ break;
+ case ButtonPress:
+ if (evt.xbutton.button == Button1)
+ this->handleClick(evt.xbutton.x, evt.xbutton.y,
+ SkView::Click::kDown_State, nullptr, getModi(evt));
+ break;
+ case ButtonRelease:
+ if (evt.xbutton.button == Button1)
+ this->handleClick(evt.xbutton.x, evt.xbutton.y,
+ SkView::Click::kUp_State, nullptr, getModi(evt));
+ break;
+ case MotionNotify:
+ this->handleClick(evt.xmotion.x, evt.xmotion.y,
+ SkView::Click::kMoved_State, nullptr, getModi(evt));
+ break;
+ case KeyPress: {
+ int shiftLevel = (evt.xkey.state & ShiftMask) ? 1 : 0;
+ KeySym keysym = XkbKeycodeToKeysym(dsp, evt.xkey.keycode,
+ 0, shiftLevel);
+ if (keysym == XK_Escape) {
+ return kQuitRequest_NextXEventResult;
+ }
+ this->handleKey(XKeyToSkKey(keysym));
+ long uni = keysym2ucs(keysym);
+ if (uni != -1) {
+ this->handleChar((SkUnichar) uni);
+ }
+ break;
+ }
+ case KeyRelease:
+ this->handleKeyUp(XKeyToSkKey(XkbKeycodeToKeysym(dsp, evt.xkey.keycode, 0, 0)));
+ break;
+ case ClientMessage:
+ if ((Atom)evt.xclient.data.l[0] == wm_delete_window_message) {
+ return kQuitRequest_NextXEventResult;
+ }
+ // fallthrough
+ default:
+ // Do nothing for other events
+ break;
+ }
+ return kContinue_NextXEventResult;
+}
+
+void SkOSWindow::loop() {
+ Display* dsp = fUnixWindow.fDisplay;
+ if (nullptr == dsp) {
+ return;
+ }
+ Window win = fUnixWindow.fWin;
+
+ wm_delete_window_message = XInternAtom(dsp, "WM_DELETE_WINDOW", False);
+ XSetWMProtocols(dsp, win, &wm_delete_window_message, 1);
+
+ XSelectInput(dsp, win, EVENT_MASK);
+
+ bool sentExposeEvent = false;
+
+ for (;;) {
+ SkEvent::ServiceQueueTimer();
+
+ bool moreToDo = SkEvent::ProcessEvent();
+
+ if (this->isDirty() && !sentExposeEvent) {
+ sentExposeEvent = true;
+
+ XEvent evt;
+ sk_bzero(&evt, sizeof(evt));
+ evt.type = Expose;
+ evt.xexpose.display = dsp;
+ XSendEvent(dsp, win, false, ExposureMask, &evt);
+ }
+
+ if (XPending(dsp) || !moreToDo) {
+ switch (this->nextXEvent()) {
+ case kContinue_NextXEventResult:
+ break;
+ case kPaintRequest_NextXEventResult:
+ sentExposeEvent = false;
+ if (this->isDirty()) {
+ this->update(nullptr);
+ }
+ this->doPaint();
+ break;
+ case kQuitRequest_NextXEventResult:
+ return;
+ }
+ }
+ }
+}
+
+void SkOSWindow::mapWindowAndWait() {
+ SkASSERT(fUnixWindow.fDisplay);
+ Display* dsp = fUnixWindow.fDisplay;
+ Window win = fUnixWindow.fWin;
+ XMapWindow(dsp, win);
+
+ long eventMask = StructureNotifyMask;
+ XSelectInput(dsp, win, eventMask);
+
+ // Wait until screen is ready.
+ XEvent evt;
+ do {
+ XNextEvent(dsp, &evt);
+ } while(evt.type != MapNotify);
+
+}
+
+////////////////////////////////////////////////
+
+// Some helper code to load the correct version of glXSwapInterval
+#define GLX_GET_PROC_ADDR(name) glXGetProcAddress(reinterpret_cast<const GLubyte*>((name)))
+#define EXT_WRANGLE(name, type, ...) \
+ if (GLX_GET_PROC_ADDR(#name)) { \
+ static type k##name; \
+ if (!k##name) { \
+ k##name = (type) GLX_GET_PROC_ADDR(#name); \
+ } \
+ k##name(__VA_ARGS__); \
+ /*SkDebugf("using %s\n", #name);*/ \
+ return; \
+ }
+
+static void glXSwapInterval(Display* dsp, GLXDrawable drawable, int interval) {
+ EXT_WRANGLE(glXSwapIntervalEXT, PFNGLXSWAPINTERVALEXTPROC, dsp, drawable, interval);
+ EXT_WRANGLE(glXSwapIntervalMESA, PFNGLXSWAPINTERVALMESAPROC, interval);
+ EXT_WRANGLE(glXSwapIntervalSGI, PFNGLXSWAPINTERVALSGIPROC, interval);
+}
+
+/////////////////////////////////////////////////////////////////////////
+
+bool SkOSWindow::attach(SkBackEndTypes, int msaaSampleCount, bool deepColor,
+ AttachmentInfo* info) {
+ this->initWindow(msaaSampleCount, info);
+
+ if (nullptr == fUnixWindow.fDisplay) {
+ return false;
+ }
+ if (nullptr == fUnixWindow.fGLContext) {
+ SkASSERT(fVi);
+
+ fUnixWindow.fGLContext = glXCreateContext(fUnixWindow.fDisplay,
+ fVi,
+ nullptr,
+ GL_TRUE);
+ if (nullptr == fUnixWindow.fGLContext) {
+ return false;
+ }
+ }
+ glXMakeCurrent(fUnixWindow.fDisplay,
+ fUnixWindow.fWin,
+ fUnixWindow.fGLContext);
+ glViewport(0, 0,
+ SkScalarRoundToInt(this->width()),
+ SkScalarRoundToInt(this->height()));
+ glClearColor(0, 0, 0, 0);
+ glClearStencil(0);
+ glClear(GL_COLOR_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
+ return true;
+}
+
+void SkOSWindow::release() {
+ if (nullptr == fUnixWindow.fDisplay || nullptr == fUnixWindow.fGLContext) {
+ return;
+ }
+ glXMakeCurrent(fUnixWindow.fDisplay, None, nullptr);
+ glXDestroyContext(fUnixWindow.fDisplay, fUnixWindow.fGLContext);
+ fUnixWindow.fGLContext = nullptr;
+}
+
+void SkOSWindow::present() {
+ if (fUnixWindow.fDisplay && fUnixWindow.fGLContext) {
+ glXSwapBuffers(fUnixWindow.fDisplay, fUnixWindow.fWin);
+ }
+}
+
+void SkOSWindow::onSetTitle(const char title[]) {
+ if (nullptr == fUnixWindow.fDisplay) {
+ return;
+ }
+ XTextProperty textProp;
+ textProp.value = (unsigned char*)title;
+ textProp.format = 8;
+ textProp.nitems = strlen((char*)textProp.value);
+ textProp.encoding = XA_STRING;
+ XSetWMName(fUnixWindow.fDisplay, fUnixWindow.fWin, &textProp);
+}
+
+static bool convertBitmapToXImage(XImage& image, const SkBitmap& bitmap) {
+ sk_bzero(&image, sizeof(image));
+
+ int bitsPerPixel = bitmap.bytesPerPixel() * 8;
+ image.width = bitmap.width();
+ image.height = bitmap.height();
+ image.format = ZPixmap;
+ image.data = (char*) bitmap.getPixels();
+ image.byte_order = LSBFirst;
+ image.bitmap_unit = bitsPerPixel;
+ image.bitmap_bit_order = LSBFirst;
+ image.bitmap_pad = bitsPerPixel;
+ image.depth = 24;
+ image.bytes_per_line = bitmap.rowBytes() - bitmap.width() * 4;
+ image.bits_per_pixel = bitsPerPixel;
+ return XInitImage(&image);
+}
+
+void SkOSWindow::doPaint() {
+ if (nullptr == fUnixWindow.fDisplay) {
+ return;
+ }
+ // If we are drawing with GL, we don't need XPutImage.
+ if (fUnixWindow.fGLContext) {
+ return;
+ }
+ // Draw the bitmap to the screen.
+ const SkBitmap& bitmap = getBitmap();
+ int width = bitmap.width();
+ int height = bitmap.height();
+
+ XImage image;
+ if (!convertBitmapToXImage(image, bitmap)) {
+ return;
+ }
+
+ XPutImage(fUnixWindow.fDisplay,
+ fUnixWindow.fWin,
+ fUnixWindow.fGc,
+ &image,
+ 0, 0, // src x,y
+ 0, 0, // dst x,y
+ width, height);
+}
+
+enum {
+ _NET_WM_STATE_REMOVE =0,
+ _NET_WM_STATE_ADD = 1,
+ _NET_WM_STATE_TOGGLE =2
+};
+
+bool SkOSWindow::makeFullscreen() {
+ Display* dsp = fUnixWindow.fDisplay;
+ if (nullptr == dsp) {
+ return false;
+ }
+
+ // Full screen
+ Atom wm_state = XInternAtom(dsp, "_NET_WM_STATE", False);
+ Atom fullscreen = XInternAtom(dsp, "_NET_WM_STATE_FULLSCREEN", False);
+
+ XEvent evt;
+ sk_bzero(&evt, sizeof(evt));
+ evt.type = ClientMessage;
+ evt.xclient.window = fUnixWindow.fWin;
+ evt.xclient.message_type = wm_state;
+ evt.xclient.format = 32;
+ evt.xclient.data.l[0] = _NET_WM_STATE_ADD;
+ evt.xclient.data.l[1] = fullscreen;
+ evt.xclient.data.l[2] = 0;
+
+ XSendEvent(dsp, DefaultRootWindow(dsp), False,
+ SubstructureRedirectMask | SubstructureNotifyMask, &evt);
+ return true;
+}
+
+void SkOSWindow::setVsync(bool vsync) {
+ if (fUnixWindow.fDisplay && fUnixWindow.fGLContext && fUnixWindow.fWin) {
+ int swapInterval = vsync ? 1 : 0;
+ glXSwapInterval(fUnixWindow.fDisplay, fUnixWindow.fWin, swapInterval);
+ }
+}
+
+void SkOSWindow::closeWindow() {
+ Display* dsp = fUnixWindow.fDisplay;
+ if (nullptr == dsp) {
+ return;
+ }
+
+ XEvent evt;
+ sk_bzero(&evt, sizeof(evt));
+ evt.type = ClientMessage;
+ evt.xclient.message_type = XInternAtom(dsp, "WM_PROTOCOLS", true);
+ evt.xclient.window = fUnixWindow.fWin;
+ evt.xclient.format = 32;
+ evt.xclient.data.l[0] = XInternAtom(dsp, "WM_DELETE_WINDOW", false);
+ evt.xclient.data.l[1] = CurrentTime;
+
+ XSendEvent(dsp, fUnixWindow.fWin, false, NoEventMask, &evt);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkEvent::SignalNonEmptyQueue() {
+ // nothing to do, since we spin on our event-queue, polling for XPending
+}
+
+void SkEvent::SignalQueueTimer(SkMSec delay) {
+ // just need to record the delay time. We handle waking up for it in
+ // MyXNextEventWithDelay()
+ gTimerDelay = delay;
+}
diff --git a/gfx/skia/skia/src/views/unix/XkeysToSkKeys.h b/gfx/skia/skia/src/views/unix/XkeysToSkKeys.h
new file mode 100644
index 000000000..aced74c0a
--- /dev/null
+++ b/gfx/skia/skia/src/views/unix/XkeysToSkKeys.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "X11/Xlib.h"
+#include "X11/keysym.h"
+
+#include "SkKey.h"
+
+#ifndef XKEYS_TOSKKEYS_H
+#define XKEYS_TOSKKEYS_H
+
+SkKey XKeyToSkKey(KeySym keysym) {
+ switch (keysym) {
+ case XK_BackSpace:
+ return kBack_SkKey;
+ case XK_Return:
+ return kOK_SkKey;
+ case XK_Home:
+ return kHome_SkKey;
+ case XK_End:
+ return kEnd_SkKey;
+ case XK_Right:
+ return kRight_SkKey;
+ case XK_Left:
+ return kLeft_SkKey;
+ case XK_Down:
+ return kDown_SkKey;
+ case XK_Up:
+ return kUp_SkKey;
+ case XK_KP_0:
+ case XK_KP_Insert:
+ return k0_SkKey;
+ case XK_KP_1:
+ case XK_KP_End:
+ return k1_SkKey;
+ case XK_KP_2:
+ case XK_KP_Down:
+ return k2_SkKey;
+ case XK_KP_3:
+ case XK_KP_Page_Down:
+ return k3_SkKey;
+ case XK_KP_4:
+ case XK_KP_Left:
+ return k4_SkKey;
+ case XK_KP_5:
+ return k5_SkKey;
+ case XK_KP_6:
+ case XK_KP_Right:
+ return k6_SkKey;
+ case XK_KP_7:
+ case XK_KP_Home:
+ return k7_SkKey;
+ case XK_KP_8:
+ case XK_KP_Up:
+ return k8_SkKey;
+ case XK_KP_9:
+ case XK_KP_Page_Up:
+ return k9_SkKey;
+ default:
+ return kNONE_SkKey;
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/views/unix/keysym2ucs.c b/gfx/skia/skia/src/views/unix/keysym2ucs.c
new file mode 100644
index 000000000..a0c4ced9e
--- /dev/null
+++ b/gfx/skia/skia/src/views/unix/keysym2ucs.c
@@ -0,0 +1,848 @@
+/* $XFree86$
+ * This module converts keysym values into the corresponding ISO 10646
+ * (UCS, Unicode) values.
+ *
+ * The array keysymtab[] contains pairs of X11 keysym values for graphical
+ * characters and the corresponding Unicode value. The function
+ * keysym2ucs() maps a keysym onto a Unicode value using a binary search,
+ * therefore keysymtab[] must remain SORTED by keysym value.
+ *
+ * The keysym -> UTF-8 conversion will hopefully one day be provided
+ * by Xlib via XmbLookupString() and should ideally not have to be
+ * done in X applications. But we are not there yet.
+ *
+ * We allow to represent any UCS character in the range U-00000000 to
+ * U-00FFFFFF by a keysym value in the range 0x01000000 to 0x01ffffff.
+ * This admittedly does not cover the entire 31-bit space of UCS, but
+ * it does cover all of the characters up to U-10FFFF, which can be
+ * represented by UTF-16, and more, and it is very unlikely that higher
+ * UCS codes will ever be assigned by ISO. So to get Unicode character
+ * U+ABCD you can directly use keysym 0x0100abcd.
+ *
+ * NOTE: The comments in the table below contain the actual character
+ * encoded in UTF-8, so for viewing and editing best use an editor in
+ * UTF-8 mode.
+ *
+ * Author: Markus G. Kuhn <http://www.cl.cam.ac.uk/~mgk25/>,
+ * University of Cambridge, April 2001
+ *
+ * Special thanks to Richard Verhoeven <river@win.tue.nl> for preparing
+ * an initial draft of the mapping table.
+ *
+ * This software is in the public domain. Share and enjoy!
+ *
+ * AUTOMATICALLY GENERATED FILE, DO NOT EDIT !!! (unicode/convmap.pl)
+ */
+
+#include "keysym2ucs.h"
+
+struct codepair {
+ unsigned short keysym;
+ unsigned short ucs;
+} keysymtab[] = {
+ { 0x01a1, 0x0104 }, /* Aogonek Ą LATIN CAPITAL LETTER A WITH OGONEK */
+ { 0x01a2, 0x02d8 }, /* breve ˘ BREVE */
+ { 0x01a3, 0x0141 }, /* Lstroke Ł LATIN CAPITAL LETTER L WITH STROKE */
+ { 0x01a5, 0x013d }, /* Lcaron Ľ LATIN CAPITAL LETTER L WITH CARON */
+ { 0x01a6, 0x015a }, /* Sacute Ś LATIN CAPITAL LETTER S WITH ACUTE */
+ { 0x01a9, 0x0160 }, /* Scaron Š LATIN CAPITAL LETTER S WITH CARON */
+ { 0x01aa, 0x015e }, /* Scedilla Ş LATIN CAPITAL LETTER S WITH CEDILLA */
+ { 0x01ab, 0x0164 }, /* Tcaron Ť LATIN CAPITAL LETTER T WITH CARON */
+ { 0x01ac, 0x0179 }, /* Zacute Ź LATIN CAPITAL LETTER Z WITH ACUTE */
+ { 0x01ae, 0x017d }, /* Zcaron Ž LATIN CAPITAL LETTER Z WITH CARON */
+ { 0x01af, 0x017b }, /* Zabovedot Ż LATIN CAPITAL LETTER Z WITH DOT ABOVE */
+ { 0x01b1, 0x0105 }, /* aogonek ą LATIN SMALL LETTER A WITH OGONEK */
+ { 0x01b2, 0x02db }, /* ogonek ˛ OGONEK */
+ { 0x01b3, 0x0142 }, /* lstroke ł LATIN SMALL LETTER L WITH STROKE */
+ { 0x01b5, 0x013e }, /* lcaron ľ LATIN SMALL LETTER L WITH CARON */
+ { 0x01b6, 0x015b }, /* sacute ś LATIN SMALL LETTER S WITH ACUTE */
+ { 0x01b7, 0x02c7 }, /* caron ˇ CARON */
+ { 0x01b9, 0x0161 }, /* scaron š LATIN SMALL LETTER S WITH CARON */
+ { 0x01ba, 0x015f }, /* scedilla ş LATIN SMALL LETTER S WITH CEDILLA */
+ { 0x01bb, 0x0165 }, /* tcaron ť LATIN SMALL LETTER T WITH CARON */
+ { 0x01bc, 0x017a }, /* zacute ź LATIN SMALL LETTER Z WITH ACUTE */
+ { 0x01bd, 0x02dd }, /* doubleacute ˝ DOUBLE ACUTE ACCENT */
+ { 0x01be, 0x017e }, /* zcaron ž LATIN SMALL LETTER Z WITH CARON */
+ { 0x01bf, 0x017c }, /* zabovedot ż LATIN SMALL LETTER Z WITH DOT ABOVE */
+ { 0x01c0, 0x0154 }, /* Racute Ŕ LATIN CAPITAL LETTER R WITH ACUTE */
+ { 0x01c3, 0x0102 }, /* Abreve Ă LATIN CAPITAL LETTER A WITH BREVE */
+ { 0x01c5, 0x0139 }, /* Lacute Ĺ LATIN CAPITAL LETTER L WITH ACUTE */
+ { 0x01c6, 0x0106 }, /* Cacute Ć LATIN CAPITAL LETTER C WITH ACUTE */
+ { 0x01c8, 0x010c }, /* Ccaron Č LATIN CAPITAL LETTER C WITH CARON */
+ { 0x01ca, 0x0118 }, /* Eogonek Ę LATIN CAPITAL LETTER E WITH OGONEK */
+ { 0x01cc, 0x011a }, /* Ecaron Ě LATIN CAPITAL LETTER E WITH CARON */
+ { 0x01cf, 0x010e }, /* Dcaron Ď LATIN CAPITAL LETTER D WITH CARON */
+ { 0x01d0, 0x0110 }, /* Dstroke Đ LATIN CAPITAL LETTER D WITH STROKE */
+ { 0x01d1, 0x0143 }, /* Nacute Ń LATIN CAPITAL LETTER N WITH ACUTE */
+ { 0x01d2, 0x0147 }, /* Ncaron Ň LATIN CAPITAL LETTER N WITH CARON */
+ { 0x01d5, 0x0150 }, /* Odoubleacute Ő LATIN CAPITAL LETTER O WITH DOUBLE ACUTE */
+ { 0x01d8, 0x0158 }, /* Rcaron Ř LATIN CAPITAL LETTER R WITH CARON */
+ { 0x01d9, 0x016e }, /* Uring Ů LATIN CAPITAL LETTER U WITH RING ABOVE */
+ { 0x01db, 0x0170 }, /* Udoubleacute Ű LATIN CAPITAL LETTER U WITH DOUBLE ACUTE */
+ { 0x01de, 0x0162 }, /* Tcedilla Ţ LATIN CAPITAL LETTER T WITH CEDILLA */
+ { 0x01e0, 0x0155 }, /* racute ŕ LATIN SMALL LETTER R WITH ACUTE */
+ { 0x01e3, 0x0103 }, /* abreve ă LATIN SMALL LETTER A WITH BREVE */
+ { 0x01e5, 0x013a }, /* lacute ĺ LATIN SMALL LETTER L WITH ACUTE */
+ { 0x01e6, 0x0107 }, /* cacute ć LATIN SMALL LETTER C WITH ACUTE */
+ { 0x01e8, 0x010d }, /* ccaron č LATIN SMALL LETTER C WITH CARON */
+ { 0x01ea, 0x0119 }, /* eogonek ę LATIN SMALL LETTER E WITH OGONEK */
+ { 0x01ec, 0x011b }, /* ecaron ě LATIN SMALL LETTER E WITH CARON */
+ { 0x01ef, 0x010f }, /* dcaron ď LATIN SMALL LETTER D WITH CARON */
+ { 0x01f0, 0x0111 }, /* dstroke đ LATIN SMALL LETTER D WITH STROKE */
+ { 0x01f1, 0x0144 }, /* nacute ń LATIN SMALL LETTER N WITH ACUTE */
+ { 0x01f2, 0x0148 }, /* ncaron ň LATIN SMALL LETTER N WITH CARON */
+ { 0x01f5, 0x0151 }, /* odoubleacute ő LATIN SMALL LETTER O WITH DOUBLE ACUTE */
+ { 0x01f8, 0x0159 }, /* rcaron ř LATIN SMALL LETTER R WITH CARON */
+ { 0x01f9, 0x016f }, /* uring ů LATIN SMALL LETTER U WITH RING ABOVE */
+ { 0x01fb, 0x0171 }, /* udoubleacute ű LATIN SMALL LETTER U WITH DOUBLE ACUTE */
+ { 0x01fe, 0x0163 }, /* tcedilla ţ LATIN SMALL LETTER T WITH CEDILLA */
+ { 0x01ff, 0x02d9 }, /* abovedot ˙ DOT ABOVE */
+ { 0x02a1, 0x0126 }, /* Hstroke Ħ LATIN CAPITAL LETTER H WITH STROKE */
+ { 0x02a6, 0x0124 }, /* Hcircumflex Ĥ LATIN CAPITAL LETTER H WITH CIRCUMFLEX */
+ { 0x02a9, 0x0130 }, /* Iabovedot İ LATIN CAPITAL LETTER I WITH DOT ABOVE */
+ { 0x02ab, 0x011e }, /* Gbreve Ğ LATIN CAPITAL LETTER G WITH BREVE */
+ { 0x02ac, 0x0134 }, /* Jcircumflex Ĵ LATIN CAPITAL LETTER J WITH CIRCUMFLEX */
+ { 0x02b1, 0x0127 }, /* hstroke ħ LATIN SMALL LETTER H WITH STROKE */
+ { 0x02b6, 0x0125 }, /* hcircumflex ĥ LATIN SMALL LETTER H WITH CIRCUMFLEX */
+ { 0x02b9, 0x0131 }, /* idotless ı LATIN SMALL LETTER DOTLESS I */
+ { 0x02bb, 0x011f }, /* gbreve ğ LATIN SMALL LETTER G WITH BREVE */
+ { 0x02bc, 0x0135 }, /* jcircumflex ĵ LATIN SMALL LETTER J WITH CIRCUMFLEX */
+ { 0x02c5, 0x010a }, /* Cabovedot Ċ LATIN CAPITAL LETTER C WITH DOT ABOVE */
+ { 0x02c6, 0x0108 }, /* Ccircumflex Ĉ LATIN CAPITAL LETTER C WITH CIRCUMFLEX */
+ { 0x02d5, 0x0120 }, /* Gabovedot Ġ LATIN CAPITAL LETTER G WITH DOT ABOVE */
+ { 0x02d8, 0x011c }, /* Gcircumflex Ĝ LATIN CAPITAL LETTER G WITH CIRCUMFLEX */
+ { 0x02dd, 0x016c }, /* Ubreve Ŭ LATIN CAPITAL LETTER U WITH BREVE */
+ { 0x02de, 0x015c }, /* Scircumflex Ŝ LATIN CAPITAL LETTER S WITH CIRCUMFLEX */
+ { 0x02e5, 0x010b }, /* cabovedot ċ LATIN SMALL LETTER C WITH DOT ABOVE */
+ { 0x02e6, 0x0109 }, /* ccircumflex ĉ LATIN SMALL LETTER C WITH CIRCUMFLEX */
+ { 0x02f5, 0x0121 }, /* gabovedot ġ LATIN SMALL LETTER G WITH DOT ABOVE */
+ { 0x02f8, 0x011d }, /* gcircumflex ĝ LATIN SMALL LETTER G WITH CIRCUMFLEX */
+ { 0x02fd, 0x016d }, /* ubreve ŭ LATIN SMALL LETTER U WITH BREVE */
+ { 0x02fe, 0x015d }, /* scircumflex ŝ LATIN SMALL LETTER S WITH CIRCUMFLEX */
+ { 0x03a2, 0x0138 }, /* kra ĸ LATIN SMALL LETTER KRA */
+ { 0x03a3, 0x0156 }, /* Rcedilla Ŗ LATIN CAPITAL LETTER R WITH CEDILLA */
+ { 0x03a5, 0x0128 }, /* Itilde Ĩ LATIN CAPITAL LETTER I WITH TILDE */
+ { 0x03a6, 0x013b }, /* Lcedilla Ļ LATIN CAPITAL LETTER L WITH CEDILLA */
+ { 0x03aa, 0x0112 }, /* Emacron Ē LATIN CAPITAL LETTER E WITH MACRON */
+ { 0x03ab, 0x0122 }, /* Gcedilla Ģ LATIN CAPITAL LETTER G WITH CEDILLA */
+ { 0x03ac, 0x0166 }, /* Tslash Ŧ LATIN CAPITAL LETTER T WITH STROKE */
+ { 0x03b3, 0x0157 }, /* rcedilla ŗ LATIN SMALL LETTER R WITH CEDILLA */
+ { 0x03b5, 0x0129 }, /* itilde ĩ LATIN SMALL LETTER I WITH TILDE */
+ { 0x03b6, 0x013c }, /* lcedilla ļ LATIN SMALL LETTER L WITH CEDILLA */
+ { 0x03ba, 0x0113 }, /* emacron ē LATIN SMALL LETTER E WITH MACRON */
+ { 0x03bb, 0x0123 }, /* gcedilla ģ LATIN SMALL LETTER G WITH CEDILLA */
+ { 0x03bc, 0x0167 }, /* tslash ŧ LATIN SMALL LETTER T WITH STROKE */
+ { 0x03bd, 0x014a }, /* ENG Ŋ LATIN CAPITAL LETTER ENG */
+ { 0x03bf, 0x014b }, /* eng ŋ LATIN SMALL LETTER ENG */
+ { 0x03c0, 0x0100 }, /* Amacron Ā LATIN CAPITAL LETTER A WITH MACRON */
+ { 0x03c7, 0x012e }, /* Iogonek Į LATIN CAPITAL LETTER I WITH OGONEK */
+ { 0x03cc, 0x0116 }, /* Eabovedot Ė LATIN CAPITAL LETTER E WITH DOT ABOVE */
+ { 0x03cf, 0x012a }, /* Imacron Ī LATIN CAPITAL LETTER I WITH MACRON */
+ { 0x03d1, 0x0145 }, /* Ncedilla Ņ LATIN CAPITAL LETTER N WITH CEDILLA */
+ { 0x03d2, 0x014c }, /* Omacron Ō LATIN CAPITAL LETTER O WITH MACRON */
+ { 0x03d3, 0x0136 }, /* Kcedilla Ķ LATIN CAPITAL LETTER K WITH CEDILLA */
+ { 0x03d9, 0x0172 }, /* Uogonek Ų LATIN CAPITAL LETTER U WITH OGONEK */
+ { 0x03dd, 0x0168 }, /* Utilde Ũ LATIN CAPITAL LETTER U WITH TILDE */
+ { 0x03de, 0x016a }, /* Umacron Ū LATIN CAPITAL LETTER U WITH MACRON */
+ { 0x03e0, 0x0101 }, /* amacron ā LATIN SMALL LETTER A WITH MACRON */
+ { 0x03e7, 0x012f }, /* iogonek į LATIN SMALL LETTER I WITH OGONEK */
+ { 0x03ec, 0x0117 }, /* eabovedot ė LATIN SMALL LETTER E WITH DOT ABOVE */
+ { 0x03ef, 0x012b }, /* imacron ī LATIN SMALL LETTER I WITH MACRON */
+ { 0x03f1, 0x0146 }, /* ncedilla ņ LATIN SMALL LETTER N WITH CEDILLA */
+ { 0x03f2, 0x014d }, /* omacron ō LATIN SMALL LETTER O WITH MACRON */
+ { 0x03f3, 0x0137 }, /* kcedilla ķ LATIN SMALL LETTER K WITH CEDILLA */
+ { 0x03f9, 0x0173 }, /* uogonek ų LATIN SMALL LETTER U WITH OGONEK */
+ { 0x03fd, 0x0169 }, /* utilde ũ LATIN SMALL LETTER U WITH TILDE */
+ { 0x03fe, 0x016b }, /* umacron ū LATIN SMALL LETTER U WITH MACRON */
+ { 0x047e, 0x203e }, /* overline ‾ OVERLINE */
+ { 0x04a1, 0x3002 }, /* kana_fullstop 。 IDEOGRAPHIC FULL STOP */
+ { 0x04a2, 0x300c }, /* kana_openingbracket 「 LEFT CORNER BRACKET */
+ { 0x04a3, 0x300d }, /* kana_closingbracket 」 RIGHT CORNER BRACKET */
+ { 0x04a4, 0x3001 }, /* kana_comma 、 IDEOGRAPHIC COMMA */
+ { 0x04a5, 0x30fb }, /* kana_conjunctive ・ KATAKANA MIDDLE DOT */
+ { 0x04a6, 0x30f2 }, /* kana_WO ヲ KATAKANA LETTER WO */
+ { 0x04a7, 0x30a1 }, /* kana_a ァ KATAKANA LETTER SMALL A */
+ { 0x04a8, 0x30a3 }, /* kana_i ィ KATAKANA LETTER SMALL I */
+ { 0x04a9, 0x30a5 }, /* kana_u ゥ KATAKANA LETTER SMALL U */
+ { 0x04aa, 0x30a7 }, /* kana_e ェ KATAKANA LETTER SMALL E */
+ { 0x04ab, 0x30a9 }, /* kana_o ォ KATAKANA LETTER SMALL O */
+ { 0x04ac, 0x30e3 }, /* kana_ya ャ KATAKANA LETTER SMALL YA */
+ { 0x04ad, 0x30e5 }, /* kana_yu ュ KATAKANA LETTER SMALL YU */
+ { 0x04ae, 0x30e7 }, /* kana_yo ョ KATAKANA LETTER SMALL YO */
+ { 0x04af, 0x30c3 }, /* kana_tsu ッ KATAKANA LETTER SMALL TU */
+ { 0x04b0, 0x30fc }, /* prolongedsound ー KATAKANA-HIRAGANA PROLONGED SOUND MARK */
+ { 0x04b1, 0x30a2 }, /* kana_A ア KATAKANA LETTER A */
+ { 0x04b2, 0x30a4 }, /* kana_I イ KATAKANA LETTER I */
+ { 0x04b3, 0x30a6 }, /* kana_U ウ KATAKANA LETTER U */
+ { 0x04b4, 0x30a8 }, /* kana_E エ KATAKANA LETTER E */
+ { 0x04b5, 0x30aa }, /* kana_O オ KATAKANA LETTER O */
+ { 0x04b6, 0x30ab }, /* kana_KA カ KATAKANA LETTER KA */
+ { 0x04b7, 0x30ad }, /* kana_KI キ KATAKANA LETTER KI */
+ { 0x04b8, 0x30af }, /* kana_KU ク KATAKANA LETTER KU */
+ { 0x04b9, 0x30b1 }, /* kana_KE ケ KATAKANA LETTER KE */
+ { 0x04ba, 0x30b3 }, /* kana_KO コ KATAKANA LETTER KO */
+ { 0x04bb, 0x30b5 }, /* kana_SA サ KATAKANA LETTER SA */
+ { 0x04bc, 0x30b7 }, /* kana_SHI シ KATAKANA LETTER SI */
+ { 0x04bd, 0x30b9 }, /* kana_SU ス KATAKANA LETTER SU */
+ { 0x04be, 0x30bb }, /* kana_SE セ KATAKANA LETTER SE */
+ { 0x04bf, 0x30bd }, /* kana_SO ソ KATAKANA LETTER SO */
+ { 0x04c0, 0x30bf }, /* kana_TA タ KATAKANA LETTER TA */
+ { 0x04c1, 0x30c1 }, /* kana_CHI チ KATAKANA LETTER TI */
+ { 0x04c2, 0x30c4 }, /* kana_TSU ツ KATAKANA LETTER TU */
+ { 0x04c3, 0x30c6 }, /* kana_TE テ KATAKANA LETTER TE */
+ { 0x04c4, 0x30c8 }, /* kana_TO ト KATAKANA LETTER TO */
+ { 0x04c5, 0x30ca }, /* kana_NA ナ KATAKANA LETTER NA */
+ { 0x04c6, 0x30cb }, /* kana_NI ニ KATAKANA LETTER NI */
+ { 0x04c7, 0x30cc }, /* kana_NU ヌ KATAKANA LETTER NU */
+ { 0x04c8, 0x30cd }, /* kana_NE ネ KATAKANA LETTER NE */
+ { 0x04c9, 0x30ce }, /* kana_NO ノ KATAKANA LETTER NO */
+ { 0x04ca, 0x30cf }, /* kana_HA ハ KATAKANA LETTER HA */
+ { 0x04cb, 0x30d2 }, /* kana_HI ヒ KATAKANA LETTER HI */
+ { 0x04cc, 0x30d5 }, /* kana_FU フ KATAKANA LETTER HU */
+ { 0x04cd, 0x30d8 }, /* kana_HE ヘ KATAKANA LETTER HE */
+ { 0x04ce, 0x30db }, /* kana_HO ホ KATAKANA LETTER HO */
+ { 0x04cf, 0x30de }, /* kana_MA マ KATAKANA LETTER MA */
+ { 0x04d0, 0x30df }, /* kana_MI ミ KATAKANA LETTER MI */
+ { 0x04d1, 0x30e0 }, /* kana_MU ム KATAKANA LETTER MU */
+ { 0x04d2, 0x30e1 }, /* kana_ME メ KATAKANA LETTER ME */
+ { 0x04d3, 0x30e2 }, /* kana_MO モ KATAKANA LETTER MO */
+ { 0x04d4, 0x30e4 }, /* kana_YA ヤ KATAKANA LETTER YA */
+ { 0x04d5, 0x30e6 }, /* kana_YU ユ KATAKANA LETTER YU */
+ { 0x04d6, 0x30e8 }, /* kana_YO ヨ KATAKANA LETTER YO */
+ { 0x04d7, 0x30e9 }, /* kana_RA ラ KATAKANA LETTER RA */
+ { 0x04d8, 0x30ea }, /* kana_RI リ KATAKANA LETTER RI */
+ { 0x04d9, 0x30eb }, /* kana_RU ル KATAKANA LETTER RU */
+ { 0x04da, 0x30ec }, /* kana_RE レ KATAKANA LETTER RE */
+ { 0x04db, 0x30ed }, /* kana_RO ロ KATAKANA LETTER RO */
+ { 0x04dc, 0x30ef }, /* kana_WA ワ KATAKANA LETTER WA */
+ { 0x04dd, 0x30f3 }, /* kana_N ン KATAKANA LETTER N */
+ { 0x04de, 0x309b }, /* voicedsound ゛ KATAKANA-HIRAGANA VOICED SOUND MARK */
+ { 0x04df, 0x309c }, /* semivoicedsound ゜ KATAKANA-HIRAGANA SEMI-VOICED SOUND MARK */
+ { 0x05ac, 0x060c }, /* Arabic_comma ، ARABIC COMMA */
+ { 0x05bb, 0x061b }, /* Arabic_semicolon ؛ ARABIC SEMICOLON */
+ { 0x05bf, 0x061f }, /* Arabic_question_mark ؟ ARABIC QUESTION MARK */
+ { 0x05c1, 0x0621 }, /* Arabic_hamza ء ARABIC LETTER HAMZA */
+ { 0x05c2, 0x0622 }, /* Arabic_maddaonalef آ ARABIC LETTER ALEF WITH MADDA ABOVE */
+ { 0x05c3, 0x0623 }, /* Arabic_hamzaonalef أ ARABIC LETTER ALEF WITH HAMZA ABOVE */
+ { 0x05c4, 0x0624 }, /* Arabic_hamzaonwaw ؤ ARABIC LETTER WAW WITH HAMZA ABOVE */
+ { 0x05c5, 0x0625 }, /* Arabic_hamzaunderalef إ ARABIC LETTER ALEF WITH HAMZA BELOW */
+ { 0x05c6, 0x0626 }, /* Arabic_hamzaonyeh ئ ARABIC LETTER YEH WITH HAMZA ABOVE */
+ { 0x05c7, 0x0627 }, /* Arabic_alef ا ARABIC LETTER ALEF */
+ { 0x05c8, 0x0628 }, /* Arabic_beh ب ARABIC LETTER BEH */
+ { 0x05c9, 0x0629 }, /* Arabic_tehmarbuta ة ARABIC LETTER TEH MARBUTA */
+ { 0x05ca, 0x062a }, /* Arabic_teh ت ARABIC LETTER TEH */
+ { 0x05cb, 0x062b }, /* Arabic_theh ث ARABIC LETTER THEH */
+ { 0x05cc, 0x062c }, /* Arabic_jeem ج ARABIC LETTER JEEM */
+ { 0x05cd, 0x062d }, /* Arabic_hah ح ARABIC LETTER HAH */
+ { 0x05ce, 0x062e }, /* Arabic_khah خ ARABIC LETTER KHAH */
+ { 0x05cf, 0x062f }, /* Arabic_dal د ARABIC LETTER DAL */
+ { 0x05d0, 0x0630 }, /* Arabic_thal ذ ARABIC LETTER THAL */
+ { 0x05d1, 0x0631 }, /* Arabic_ra ر ARABIC LETTER REH */
+ { 0x05d2, 0x0632 }, /* Arabic_zain ز ARABIC LETTER ZAIN */
+ { 0x05d3, 0x0633 }, /* Arabic_seen س ARABIC LETTER SEEN */
+ { 0x05d4, 0x0634 }, /* Arabic_sheen ش ARABIC LETTER SHEEN */
+ { 0x05d5, 0x0635 }, /* Arabic_sad ص ARABIC LETTER SAD */
+ { 0x05d6, 0x0636 }, /* Arabic_dad ض ARABIC LETTER DAD */
+ { 0x05d7, 0x0637 }, /* Arabic_tah ط ARABIC LETTER TAH */
+ { 0x05d8, 0x0638 }, /* Arabic_zah ظ ARABIC LETTER ZAH */
+ { 0x05d9, 0x0639 }, /* Arabic_ain ع ARABIC LETTER AIN */
+ { 0x05da, 0x063a }, /* Arabic_ghain غ ARABIC LETTER GHAIN */
+ { 0x05e0, 0x0640 }, /* Arabic_tatweel ـ ARABIC TATWEEL */
+ { 0x05e1, 0x0641 }, /* Arabic_feh ف ARABIC LETTER FEH */
+ { 0x05e2, 0x0642 }, /* Arabic_qaf ق ARABIC LETTER QAF */
+ { 0x05e3, 0x0643 }, /* Arabic_kaf ك ARABIC LETTER KAF */
+ { 0x05e4, 0x0644 }, /* Arabic_lam ل ARABIC LETTER LAM */
+ { 0x05e5, 0x0645 }, /* Arabic_meem م ARABIC LETTER MEEM */
+ { 0x05e6, 0x0646 }, /* Arabic_noon ن ARABIC LETTER NOON */
+ { 0x05e7, 0x0647 }, /* Arabic_ha ه ARABIC LETTER HEH */
+ { 0x05e8, 0x0648 }, /* Arabic_waw و ARABIC LETTER WAW */
+ { 0x05e9, 0x0649 }, /* Arabic_alefmaksura ى ARABIC LETTER ALEF MAKSURA */
+ { 0x05ea, 0x064a }, /* Arabic_yeh ي ARABIC LETTER YEH */
+ { 0x05eb, 0x064b }, /* Arabic_fathatan ً ARABIC FATHATAN */
+ { 0x05ec, 0x064c }, /* Arabic_dammatan ٌ ARABIC DAMMATAN */
+ { 0x05ed, 0x064d }, /* Arabic_kasratan ٍ ARABIC KASRATAN */
+ { 0x05ee, 0x064e }, /* Arabic_fatha َ ARABIC FATHA */
+ { 0x05ef, 0x064f }, /* Arabic_damma ُ ARABIC DAMMA */
+ { 0x05f0, 0x0650 }, /* Arabic_kasra ِ ARABIC KASRA */
+ { 0x05f1, 0x0651 }, /* Arabic_shadda ّ ARABIC SHADDA */
+ { 0x05f2, 0x0652 }, /* Arabic_sukun ْ ARABIC SUKUN */
+ { 0x06a1, 0x0452 }, /* Serbian_dje ђ CYRILLIC SMALL LETTER DJE */
+ { 0x06a2, 0x0453 }, /* Macedonia_gje ѓ CYRILLIC SMALL LETTER GJE */
+ { 0x06a3, 0x0451 }, /* Cyrillic_io ё CYRILLIC SMALL LETTER IO */
+ { 0x06a4, 0x0454 }, /* Ukrainian_ie є CYRILLIC SMALL LETTER UKRAINIAN IE */
+ { 0x06a5, 0x0455 }, /* Macedonia_dse ѕ CYRILLIC SMALL LETTER DZE */
+ { 0x06a6, 0x0456 }, /* Ukrainian_i і CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I */
+ { 0x06a7, 0x0457 }, /* Ukrainian_yi ї CYRILLIC SMALL LETTER YI */
+ { 0x06a8, 0x0458 }, /* Cyrillic_je ј CYRILLIC SMALL LETTER JE */
+ { 0x06a9, 0x0459 }, /* Cyrillic_lje љ CYRILLIC SMALL LETTER LJE */
+ { 0x06aa, 0x045a }, /* Cyrillic_nje њ CYRILLIC SMALL LETTER NJE */
+ { 0x06ab, 0x045b }, /* Serbian_tshe ћ CYRILLIC SMALL LETTER TSHE */
+ { 0x06ac, 0x045c }, /* Macedonia_kje ќ CYRILLIC SMALL LETTER KJE */
+ { 0x06ae, 0x045e }, /* Byelorussian_shortu ў CYRILLIC SMALL LETTER SHORT U */
+ { 0x06af, 0x045f }, /* Cyrillic_dzhe џ CYRILLIC SMALL LETTER DZHE */
+ { 0x06b0, 0x2116 }, /* numerosign № NUMERO SIGN */
+ { 0x06b1, 0x0402 }, /* Serbian_DJE Ђ CYRILLIC CAPITAL LETTER DJE */
+ { 0x06b2, 0x0403 }, /* Macedonia_GJE Ѓ CYRILLIC CAPITAL LETTER GJE */
+ { 0x06b3, 0x0401 }, /* Cyrillic_IO Ё CYRILLIC CAPITAL LETTER IO */
+ { 0x06b4, 0x0404 }, /* Ukrainian_IE Є CYRILLIC CAPITAL LETTER UKRAINIAN IE */
+ { 0x06b5, 0x0405 }, /* Macedonia_DSE Ѕ CYRILLIC CAPITAL LETTER DZE */
+ { 0x06b6, 0x0406 }, /* Ukrainian_I І CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I */
+ { 0x06b7, 0x0407 }, /* Ukrainian_YI Ї CYRILLIC CAPITAL LETTER YI */
+ { 0x06b8, 0x0408 }, /* Cyrillic_JE Ј CYRILLIC CAPITAL LETTER JE */
+ { 0x06b9, 0x0409 }, /* Cyrillic_LJE Љ CYRILLIC CAPITAL LETTER LJE */
+ { 0x06ba, 0x040a }, /* Cyrillic_NJE Њ CYRILLIC CAPITAL LETTER NJE */
+ { 0x06bb, 0x040b }, /* Serbian_TSHE Ћ CYRILLIC CAPITAL LETTER TSHE */
+ { 0x06bc, 0x040c }, /* Macedonia_KJE Ќ CYRILLIC CAPITAL LETTER KJE */
+ { 0x06be, 0x040e }, /* Byelorussian_SHORTU Ў CYRILLIC CAPITAL LETTER SHORT U */
+ { 0x06bf, 0x040f }, /* Cyrillic_DZHE Џ CYRILLIC CAPITAL LETTER DZHE */
+ { 0x06c0, 0x044e }, /* Cyrillic_yu ю CYRILLIC SMALL LETTER YU */
+ { 0x06c1, 0x0430 }, /* Cyrillic_a а CYRILLIC SMALL LETTER A */
+ { 0x06c2, 0x0431 }, /* Cyrillic_be б CYRILLIC SMALL LETTER BE */
+ { 0x06c3, 0x0446 }, /* Cyrillic_tse ц CYRILLIC SMALL LETTER TSE */
+ { 0x06c4, 0x0434 }, /* Cyrillic_de д CYRILLIC SMALL LETTER DE */
+ { 0x06c5, 0x0435 }, /* Cyrillic_ie е CYRILLIC SMALL LETTER IE */
+ { 0x06c6, 0x0444 }, /* Cyrillic_ef ф CYRILLIC SMALL LETTER EF */
+ { 0x06c7, 0x0433 }, /* Cyrillic_ghe г CYRILLIC SMALL LETTER GHE */
+ { 0x06c8, 0x0445 }, /* Cyrillic_ha х CYRILLIC SMALL LETTER HA */
+ { 0x06c9, 0x0438 }, /* Cyrillic_i и CYRILLIC SMALL LETTER I */
+ { 0x06ca, 0x0439 }, /* Cyrillic_shorti й CYRILLIC SMALL LETTER SHORT I */
+ { 0x06cb, 0x043a }, /* Cyrillic_ka к CYRILLIC SMALL LETTER KA */
+ { 0x06cc, 0x043b }, /* Cyrillic_el л CYRILLIC SMALL LETTER EL */
+ { 0x06cd, 0x043c }, /* Cyrillic_em м CYRILLIC SMALL LETTER EM */
+ { 0x06ce, 0x043d }, /* Cyrillic_en н CYRILLIC SMALL LETTER EN */
+ { 0x06cf, 0x043e }, /* Cyrillic_o о CYRILLIC SMALL LETTER O */
+ { 0x06d0, 0x043f }, /* Cyrillic_pe п CYRILLIC SMALL LETTER PE */
+ { 0x06d1, 0x044f }, /* Cyrillic_ya я CYRILLIC SMALL LETTER YA */
+ { 0x06d2, 0x0440 }, /* Cyrillic_er р CYRILLIC SMALL LETTER ER */
+ { 0x06d3, 0x0441 }, /* Cyrillic_es с CYRILLIC SMALL LETTER ES */
+ { 0x06d4, 0x0442 }, /* Cyrillic_te т CYRILLIC SMALL LETTER TE */
+ { 0x06d5, 0x0443 }, /* Cyrillic_u у CYRILLIC SMALL LETTER U */
+ { 0x06d6, 0x0436 }, /* Cyrillic_zhe ж CYRILLIC SMALL LETTER ZHE */
+ { 0x06d7, 0x0432 }, /* Cyrillic_ve в CYRILLIC SMALL LETTER VE */
+ { 0x06d8, 0x044c }, /* Cyrillic_softsign ь CYRILLIC SMALL LETTER SOFT SIGN */
+ { 0x06d9, 0x044b }, /* Cyrillic_yeru ы CYRILLIC SMALL LETTER YERU */
+ { 0x06da, 0x0437 }, /* Cyrillic_ze з CYRILLIC SMALL LETTER ZE */
+ { 0x06db, 0x0448 }, /* Cyrillic_sha ш CYRILLIC SMALL LETTER SHA */
+ { 0x06dc, 0x044d }, /* Cyrillic_e э CYRILLIC SMALL LETTER E */
+ { 0x06dd, 0x0449 }, /* Cyrillic_shcha щ CYRILLIC SMALL LETTER SHCHA */
+ { 0x06de, 0x0447 }, /* Cyrillic_che ч CYRILLIC SMALL LETTER CHE */
+ { 0x06df, 0x044a }, /* Cyrillic_hardsign ъ CYRILLIC SMALL LETTER HARD SIGN */
+ { 0x06e0, 0x042e }, /* Cyrillic_YU Ю CYRILLIC CAPITAL LETTER YU */
+ { 0x06e1, 0x0410 }, /* Cyrillic_A А CYRILLIC CAPITAL LETTER A */
+ { 0x06e2, 0x0411 }, /* Cyrillic_BE Б CYRILLIC CAPITAL LETTER BE */
+ { 0x06e3, 0x0426 }, /* Cyrillic_TSE Ц CYRILLIC CAPITAL LETTER TSE */
+ { 0x06e4, 0x0414 }, /* Cyrillic_DE Д CYRILLIC CAPITAL LETTER DE */
+ { 0x06e5, 0x0415 }, /* Cyrillic_IE Е CYRILLIC CAPITAL LETTER IE */
+ { 0x06e6, 0x0424 }, /* Cyrillic_EF Ф CYRILLIC CAPITAL LETTER EF */
+ { 0x06e7, 0x0413 }, /* Cyrillic_GHE Г CYRILLIC CAPITAL LETTER GHE */
+ { 0x06e8, 0x0425 }, /* Cyrillic_HA Х CYRILLIC CAPITAL LETTER HA */
+ { 0x06e9, 0x0418 }, /* Cyrillic_I И CYRILLIC CAPITAL LETTER I */
+ { 0x06ea, 0x0419 }, /* Cyrillic_SHORTI Й CYRILLIC CAPITAL LETTER SHORT I */
+ { 0x06eb, 0x041a }, /* Cyrillic_KA К CYRILLIC CAPITAL LETTER KA */
+ { 0x06ec, 0x041b }, /* Cyrillic_EL Л CYRILLIC CAPITAL LETTER EL */
+ { 0x06ed, 0x041c }, /* Cyrillic_EM М CYRILLIC CAPITAL LETTER EM */
+ { 0x06ee, 0x041d }, /* Cyrillic_EN Н CYRILLIC CAPITAL LETTER EN */
+ { 0x06ef, 0x041e }, /* Cyrillic_O О CYRILLIC CAPITAL LETTER O */
+ { 0x06f0, 0x041f }, /* Cyrillic_PE П CYRILLIC CAPITAL LETTER PE */
+ { 0x06f1, 0x042f }, /* Cyrillic_YA Я CYRILLIC CAPITAL LETTER YA */
+ { 0x06f2, 0x0420 }, /* Cyrillic_ER Р CYRILLIC CAPITAL LETTER ER */
+ { 0x06f3, 0x0421 }, /* Cyrillic_ES С CYRILLIC CAPITAL LETTER ES */
+ { 0x06f4, 0x0422 }, /* Cyrillic_TE Т CYRILLIC CAPITAL LETTER TE */
+ { 0x06f5, 0x0423 }, /* Cyrillic_U У CYRILLIC CAPITAL LETTER U */
+ { 0x06f6, 0x0416 }, /* Cyrillic_ZHE Ж CYRILLIC CAPITAL LETTER ZHE */
+ { 0x06f7, 0x0412 }, /* Cyrillic_VE В CYRILLIC CAPITAL LETTER VE */
+ { 0x06f8, 0x042c }, /* Cyrillic_SOFTSIGN Ь CYRILLIC CAPITAL LETTER SOFT SIGN */
+ { 0x06f9, 0x042b }, /* Cyrillic_YERU Ы CYRILLIC CAPITAL LETTER YERU */
+ { 0x06fa, 0x0417 }, /* Cyrillic_ZE З CYRILLIC CAPITAL LETTER ZE */
+ { 0x06fb, 0x0428 }, /* Cyrillic_SHA Ш CYRILLIC CAPITAL LETTER SHA */
+ { 0x06fc, 0x042d }, /* Cyrillic_E Э CYRILLIC CAPITAL LETTER E */
+ { 0x06fd, 0x0429 }, /* Cyrillic_SHCHA Щ CYRILLIC CAPITAL LETTER SHCHA */
+ { 0x06fe, 0x0427 }, /* Cyrillic_CHE Ч CYRILLIC CAPITAL LETTER CHE */
+ { 0x06ff, 0x042a }, /* Cyrillic_HARDSIGN Ъ CYRILLIC CAPITAL LETTER HARD SIGN */
+ { 0x07a1, 0x0386 }, /* Greek_ALPHAaccent Ά GREEK CAPITAL LETTER ALPHA WITH TONOS */
+ { 0x07a2, 0x0388 }, /* Greek_EPSILONaccent Έ GREEK CAPITAL LETTER EPSILON WITH TONOS */
+ { 0x07a3, 0x0389 }, /* Greek_ETAaccent Ή GREEK CAPITAL LETTER ETA WITH TONOS */
+ { 0x07a4, 0x038a }, /* Greek_IOTAaccent Ί GREEK CAPITAL LETTER IOTA WITH TONOS */
+ { 0x07a5, 0x03aa }, /* Greek_IOTAdiaeresis Ϊ GREEK CAPITAL LETTER IOTA WITH DIALYTIKA */
+ { 0x07a7, 0x038c }, /* Greek_OMICRONaccent Ό GREEK CAPITAL LETTER OMICRON WITH TONOS */
+ { 0x07a8, 0x038e }, /* Greek_UPSILONaccent Ύ GREEK CAPITAL LETTER UPSILON WITH TONOS */
+ { 0x07a9, 0x03ab }, /* Greek_UPSILONdieresis Ϋ GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA */
+ { 0x07ab, 0x038f }, /* Greek_OMEGAaccent Ώ GREEK CAPITAL LETTER OMEGA WITH TONOS */
+ { 0x07ae, 0x0385 }, /* Greek_accentdieresis ΅ GREEK DIALYTIKA TONOS */
+ { 0x07af, 0x2015 }, /* Greek_horizbar ― HORIZONTAL BAR */
+ { 0x07b1, 0x03ac }, /* Greek_alphaaccent ά GREEK SMALL LETTER ALPHA WITH TONOS */
+ { 0x07b2, 0x03ad }, /* Greek_epsilonaccent έ GREEK SMALL LETTER EPSILON WITH TONOS */
+ { 0x07b3, 0x03ae }, /* Greek_etaaccent ή GREEK SMALL LETTER ETA WITH TONOS */
+ { 0x07b4, 0x03af }, /* Greek_iotaaccent ί GREEK SMALL LETTER IOTA WITH TONOS */
+ { 0x07b5, 0x03ca }, /* Greek_iotadieresis ϊ GREEK SMALL LETTER IOTA WITH DIALYTIKA */
+ { 0x07b6, 0x0390 }, /* Greek_iotaaccentdieresis ΐ GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS */
+ { 0x07b7, 0x03cc }, /* Greek_omicronaccent ό GREEK SMALL LETTER OMICRON WITH TONOS */
+ { 0x07b8, 0x03cd }, /* Greek_upsilonaccent ύ GREEK SMALL LETTER UPSILON WITH TONOS */
+ { 0x07b9, 0x03cb }, /* Greek_upsilondieresis ϋ GREEK SMALL LETTER UPSILON WITH DIALYTIKA */
+ { 0x07ba, 0x03b0 }, /* Greek_upsilonaccentdieresis ΰ GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS */
+ { 0x07bb, 0x03ce }, /* Greek_omegaaccent ώ GREEK SMALL LETTER OMEGA WITH TONOS */
+ { 0x07c1, 0x0391 }, /* Greek_ALPHA Α GREEK CAPITAL LETTER ALPHA */
+ { 0x07c2, 0x0392 }, /* Greek_BETA Β GREEK CAPITAL LETTER BETA */
+ { 0x07c3, 0x0393 }, /* Greek_GAMMA Γ GREEK CAPITAL LETTER GAMMA */
+ { 0x07c4, 0x0394 }, /* Greek_DELTA Δ GREEK CAPITAL LETTER DELTA */
+ { 0x07c5, 0x0395 }, /* Greek_EPSILON Ε GREEK CAPITAL LETTER EPSILON */
+ { 0x07c6, 0x0396 }, /* Greek_ZETA Ζ GREEK CAPITAL LETTER ZETA */
+ { 0x07c7, 0x0397 }, /* Greek_ETA Η GREEK CAPITAL LETTER ETA */
+ { 0x07c8, 0x0398 }, /* Greek_THETA Θ GREEK CAPITAL LETTER THETA */
+ { 0x07c9, 0x0399 }, /* Greek_IOTA Ι GREEK CAPITAL LETTER IOTA */
+ { 0x07ca, 0x039a }, /* Greek_KAPPA Κ GREEK CAPITAL LETTER KAPPA */
+ { 0x07cb, 0x039b }, /* Greek_LAMBDA Λ GREEK CAPITAL LETTER LAMDA */
+ { 0x07cc, 0x039c }, /* Greek_MU Μ GREEK CAPITAL LETTER MU */
+ { 0x07cd, 0x039d }, /* Greek_NU Ν GREEK CAPITAL LETTER NU */
+ { 0x07ce, 0x039e }, /* Greek_XI Ξ GREEK CAPITAL LETTER XI */
+ { 0x07cf, 0x039f }, /* Greek_OMICRON Ο GREEK CAPITAL LETTER OMICRON */
+ { 0x07d0, 0x03a0 }, /* Greek_PI Π GREEK CAPITAL LETTER PI */
+ { 0x07d1, 0x03a1 }, /* Greek_RHO Ρ GREEK CAPITAL LETTER RHO */
+ { 0x07d2, 0x03a3 }, /* Greek_SIGMA Σ GREEK CAPITAL LETTER SIGMA */
+ { 0x07d4, 0x03a4 }, /* Greek_TAU Τ GREEK CAPITAL LETTER TAU */
+ { 0x07d5, 0x03a5 }, /* Greek_UPSILON Υ GREEK CAPITAL LETTER UPSILON */
+ { 0x07d6, 0x03a6 }, /* Greek_PHI Φ GREEK CAPITAL LETTER PHI */
+ { 0x07d7, 0x03a7 }, /* Greek_CHI Χ GREEK CAPITAL LETTER CHI */
+ { 0x07d8, 0x03a8 }, /* Greek_PSI Ψ GREEK CAPITAL LETTER PSI */
+ { 0x07d9, 0x03a9 }, /* Greek_OMEGA Ω GREEK CAPITAL LETTER OMEGA */
+ { 0x07e1, 0x03b1 }, /* Greek_alpha α GREEK SMALL LETTER ALPHA */
+ { 0x07e2, 0x03b2 }, /* Greek_beta β GREEK SMALL LETTER BETA */
+ { 0x07e3, 0x03b3 }, /* Greek_gamma γ GREEK SMALL LETTER GAMMA */
+ { 0x07e4, 0x03b4 }, /* Greek_delta δ GREEK SMALL LETTER DELTA */
+ { 0x07e5, 0x03b5 }, /* Greek_epsilon ε GREEK SMALL LETTER EPSILON */
+ { 0x07e6, 0x03b6 }, /* Greek_zeta ζ GREEK SMALL LETTER ZETA */
+ { 0x07e7, 0x03b7 }, /* Greek_eta η GREEK SMALL LETTER ETA */
+ { 0x07e8, 0x03b8 }, /* Greek_theta θ GREEK SMALL LETTER THETA */
+ { 0x07e9, 0x03b9 }, /* Greek_iota ι GREEK SMALL LETTER IOTA */
+ { 0x07ea, 0x03ba }, /* Greek_kappa κ GREEK SMALL LETTER KAPPA */
+ { 0x07eb, 0x03bb }, /* Greek_lambda λ GREEK SMALL LETTER LAMDA */
+ { 0x07ec, 0x03bc }, /* Greek_mu μ GREEK SMALL LETTER MU */
+ { 0x07ed, 0x03bd }, /* Greek_nu ν GREEK SMALL LETTER NU */
+ { 0x07ee, 0x03be }, /* Greek_xi ξ GREEK SMALL LETTER XI */
+ { 0x07ef, 0x03bf }, /* Greek_omicron ο GREEK SMALL LETTER OMICRON */
+ { 0x07f0, 0x03c0 }, /* Greek_pi π GREEK SMALL LETTER PI */
+ { 0x07f1, 0x03c1 }, /* Greek_rho ρ GREEK SMALL LETTER RHO */
+ { 0x07f2, 0x03c3 }, /* Greek_sigma σ GREEK SMALL LETTER SIGMA */
+ { 0x07f3, 0x03c2 }, /* Greek_finalsmallsigma ς GREEK SMALL LETTER FINAL SIGMA */
+ { 0x07f4, 0x03c4 }, /* Greek_tau τ GREEK SMALL LETTER TAU */
+ { 0x07f5, 0x03c5 }, /* Greek_upsilon υ GREEK SMALL LETTER UPSILON */
+ { 0x07f6, 0x03c6 }, /* Greek_phi φ GREEK SMALL LETTER PHI */
+ { 0x07f7, 0x03c7 }, /* Greek_chi χ GREEK SMALL LETTER CHI */
+ { 0x07f8, 0x03c8 }, /* Greek_psi ψ GREEK SMALL LETTER PSI */
+ { 0x07f9, 0x03c9 }, /* Greek_omega ω GREEK SMALL LETTER OMEGA */
+ { 0x08a1, 0x23b7 }, /* leftradical ⎷ ??? */
+ { 0x08a2, 0x250c }, /* topleftradical ┌ BOX DRAWINGS LIGHT DOWN AND RIGHT */
+ { 0x08a3, 0x2500 }, /* horizconnector ─ BOX DRAWINGS LIGHT HORIZONTAL */
+ { 0x08a4, 0x2320 }, /* topintegral ⌠ TOP HALF INTEGRAL */
+ { 0x08a5, 0x2321 }, /* botintegral ⌡ BOTTOM HALF INTEGRAL */
+ { 0x08a6, 0x2502 }, /* vertconnector │ BOX DRAWINGS LIGHT VERTICAL */
+ { 0x08a7, 0x23a1 }, /* topleftsqbracket ⎡ ??? */
+ { 0x08a8, 0x23a3 }, /* botleftsqbracket ⎣ ??? */
+ { 0x08a9, 0x23a4 }, /* toprightsqbracket ⎤ ??? */
+ { 0x08aa, 0x23a6 }, /* botrightsqbracket ⎦ ??? */
+ { 0x08ab, 0x239b }, /* topleftparens ⎛ ??? */
+ { 0x08ac, 0x239d }, /* botleftparens ⎝ ??? */
+ { 0x08ad, 0x239e }, /* toprightparens ⎞ ??? */
+ { 0x08ae, 0x23a0 }, /* botrightparens ⎠ ??? */
+ { 0x08af, 0x23a8 }, /* leftmiddlecurlybrace ⎨ ??? */
+ { 0x08b0, 0x23ac }, /* rightmiddlecurlybrace ⎬ ??? */
+/* 0x08b1 topleftsummation ? ??? */
+/* 0x08b2 botleftsummation ? ??? */
+/* 0x08b3 topvertsummationconnector ? ??? */
+/* 0x08b4 botvertsummationconnector ? ??? */
+/* 0x08b5 toprightsummation ? ??? */
+/* 0x08b6 botrightsummation ? ??? */
+/* 0x08b7 rightmiddlesummation ? ??? */
+ { 0x08bc, 0x2264 }, /* lessthanequal ≤ LESS-THAN OR EQUAL TO */
+ { 0x08bd, 0x2260 }, /* notequal ≠ NOT EQUAL TO */
+ { 0x08be, 0x2265 }, /* greaterthanequal ≥ GREATER-THAN OR EQUAL TO */
+ { 0x08bf, 0x222b }, /* integral ∫ INTEGRAL */
+ { 0x08c0, 0x2234 }, /* therefore ∴ THEREFORE */
+ { 0x08c1, 0x221d }, /* variation ∝ PROPORTIONAL TO */
+ { 0x08c2, 0x221e }, /* infinity ∞ INFINITY */
+ { 0x08c5, 0x2207 }, /* nabla ∇ NABLA */
+ { 0x08c8, 0x223c }, /* approximate ∼ TILDE OPERATOR */
+ { 0x08c9, 0x2243 }, /* similarequal ≃ ASYMPTOTICALLY EQUAL TO */
+ { 0x08cd, 0x21d4 }, /* ifonlyif ⇔ LEFT RIGHT DOUBLE ARROW */
+ { 0x08ce, 0x21d2 }, /* implies ⇒ RIGHTWARDS DOUBLE ARROW */
+ { 0x08cf, 0x2261 }, /* identical ≡ IDENTICAL TO */
+ { 0x08d6, 0x221a }, /* radical √ SQUARE ROOT */
+ { 0x08da, 0x2282 }, /* includedin ⊂ SUBSET OF */
+ { 0x08db, 0x2283 }, /* includes ⊃ SUPERSET OF */
+ { 0x08dc, 0x2229 }, /* intersection ∩ INTERSECTION */
+ { 0x08dd, 0x222a }, /* union ∪ UNION */
+ { 0x08de, 0x2227 }, /* logicaland ∧ LOGICAL AND */
+ { 0x08df, 0x2228 }, /* logicalor ∨ LOGICAL OR */
+ { 0x08ef, 0x2202 }, /* partialderivative ∂ PARTIAL DIFFERENTIAL */
+ { 0x08f6, 0x0192 }, /* function ƒ LATIN SMALL LETTER F WITH HOOK */
+ { 0x08fb, 0x2190 }, /* leftarrow ← LEFTWARDS ARROW */
+ { 0x08fc, 0x2191 }, /* uparrow ↑ UPWARDS ARROW */
+ { 0x08fd, 0x2192 }, /* rightarrow → RIGHTWARDS ARROW */
+ { 0x08fe, 0x2193 }, /* downarrow ↓ DOWNWARDS ARROW */
+/* 0x09df blank ? ??? */
+ { 0x09e0, 0x25c6 }, /* soliddiamond ◆ BLACK DIAMOND */
+ { 0x09e1, 0x2592 }, /* checkerboard ▒ MEDIUM SHADE */
+ { 0x09e2, 0x2409 }, /* ht ␉ SYMBOL FOR HORIZONTAL TABULATION */
+ { 0x09e3, 0x240c }, /* ff ␌ SYMBOL FOR FORM FEED */
+ { 0x09e4, 0x240d }, /* cr ␍ SYMBOL FOR CARRIAGE RETURN */
+ { 0x09e5, 0x240a }, /* lf ␊ SYMBOL FOR LINE FEED */
+ { 0x09e8, 0x2424 }, /* nl ␤ SYMBOL FOR NEWLINE */
+ { 0x09e9, 0x240b }, /* vt ␋ SYMBOL FOR VERTICAL TABULATION */
+ { 0x09ea, 0x2518 }, /* lowrightcorner ┘ BOX DRAWINGS LIGHT UP AND LEFT */
+ { 0x09eb, 0x2510 }, /* uprightcorner ┐ BOX DRAWINGS LIGHT DOWN AND LEFT */
+ { 0x09ec, 0x250c }, /* upleftcorner ┌ BOX DRAWINGS LIGHT DOWN AND RIGHT */
+ { 0x09ed, 0x2514 }, /* lowleftcorner └ BOX DRAWINGS LIGHT UP AND RIGHT */
+ { 0x09ee, 0x253c }, /* crossinglines ┼ BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL */
+ { 0x09ef, 0x23ba }, /* horizlinescan1 ⎺ HORIZONTAL SCAN LINE-1 (Unicode 3.2 draft) */
+ { 0x09f0, 0x23bb }, /* horizlinescan3 ⎻ HORIZONTAL SCAN LINE-3 (Unicode 3.2 draft) */
+ { 0x09f1, 0x2500 }, /* horizlinescan5 ─ BOX DRAWINGS LIGHT HORIZONTAL */
+ { 0x09f2, 0x23bc }, /* horizlinescan7 ⎼ HORIZONTAL SCAN LINE-7 (Unicode 3.2 draft) */
+ { 0x09f3, 0x23bd }, /* horizlinescan9 ⎽ HORIZONTAL SCAN LINE-9 (Unicode 3.2 draft) */
+ { 0x09f4, 0x251c }, /* leftt ├ BOX DRAWINGS LIGHT VERTICAL AND RIGHT */
+ { 0x09f5, 0x2524 }, /* rightt ┤ BOX DRAWINGS LIGHT VERTICAL AND LEFT */
+ { 0x09f6, 0x2534 }, /* bott ┴ BOX DRAWINGS LIGHT UP AND HORIZONTAL */
+ { 0x09f7, 0x252c }, /* topt ┬ BOX DRAWINGS LIGHT DOWN AND HORIZONTAL */
+ { 0x09f8, 0x2502 }, /* vertbar │ BOX DRAWINGS LIGHT VERTICAL */
+ { 0x0aa1, 0x2003 }, /* emspace   EM SPACE */
+ { 0x0aa2, 0x2002 }, /* enspace   EN SPACE */
+ { 0x0aa3, 0x2004 }, /* em3space   THREE-PER-EM SPACE */
+ { 0x0aa4, 0x2005 }, /* em4space   FOUR-PER-EM SPACE */
+ { 0x0aa5, 0x2007 }, /* digitspace   FIGURE SPACE */
+ { 0x0aa6, 0x2008 }, /* punctspace   PUNCTUATION SPACE */
+ { 0x0aa7, 0x2009 }, /* thinspace   THIN SPACE */
+ { 0x0aa8, 0x200a }, /* hairspace   HAIR SPACE */
+ { 0x0aa9, 0x2014 }, /* emdash — EM DASH */
+ { 0x0aaa, 0x2013 }, /* endash – EN DASH */
+/* 0x0aac signifblank ? ??? */
+ { 0x0aae, 0x2026 }, /* ellipsis … HORIZONTAL ELLIPSIS */
+ { 0x0aaf, 0x2025 }, /* doubbaselinedot ‥ TWO DOT LEADER */
+ { 0x0ab0, 0x2153 }, /* onethird ⅓ VULGAR FRACTION ONE THIRD */
+ { 0x0ab1, 0x2154 }, /* twothirds ⅔ VULGAR FRACTION TWO THIRDS */
+ { 0x0ab2, 0x2155 }, /* onefifth ⅕ VULGAR FRACTION ONE FIFTH */
+ { 0x0ab3, 0x2156 }, /* twofifths ⅖ VULGAR FRACTION TWO FIFTHS */
+ { 0x0ab4, 0x2157 }, /* threefifths ⅗ VULGAR FRACTION THREE FIFTHS */
+ { 0x0ab5, 0x2158 }, /* fourfifths ⅘ VULGAR FRACTION FOUR FIFTHS */
+ { 0x0ab6, 0x2159 }, /* onesixth ⅙ VULGAR FRACTION ONE SIXTH */
+ { 0x0ab7, 0x215a }, /* fivesixths ⅚ VULGAR FRACTION FIVE SIXTHS */
+ { 0x0ab8, 0x2105 }, /* careof ℅ CARE OF */
+ { 0x0abb, 0x2012 }, /* figdash ‒ FIGURE DASH */
+ { 0x0abc, 0x2329 }, /* leftanglebracket 〈 LEFT-POINTING ANGLE BRACKET */
+/* 0x0abd decimalpoint ? ??? */
+ { 0x0abe, 0x232a }, /* rightanglebracket 〉 RIGHT-POINTING ANGLE BRACKET */
+/* 0x0abf marker ? ??? */
+ { 0x0ac3, 0x215b }, /* oneeighth ⅛ VULGAR FRACTION ONE EIGHTH */
+ { 0x0ac4, 0x215c }, /* threeeighths ⅜ VULGAR FRACTION THREE EIGHTHS */
+ { 0x0ac5, 0x215d }, /* fiveeighths ⅝ VULGAR FRACTION FIVE EIGHTHS */
+ { 0x0ac6, 0x215e }, /* seveneighths ⅞ VULGAR FRACTION SEVEN EIGHTHS */
+ { 0x0ac9, 0x2122 }, /* trademark ™ TRADE MARK SIGN */
+ { 0x0aca, 0x2613 }, /* signaturemark ☓ SALTIRE */
+/* 0x0acb trademarkincircle ? ??? */
+ { 0x0acc, 0x25c1 }, /* leftopentriangle ◁ WHITE LEFT-POINTING TRIANGLE */
+ { 0x0acd, 0x25b7 }, /* rightopentriangle ▷ WHITE RIGHT-POINTING TRIANGLE */
+ { 0x0ace, 0x25cb }, /* emopencircle ○ WHITE CIRCLE */
+ { 0x0acf, 0x25af }, /* emopenrectangle ▯ WHITE VERTICAL RECTANGLE */
+ { 0x0ad0, 0x2018 }, /* leftsinglequotemark ‘ LEFT SINGLE QUOTATION MARK */
+ { 0x0ad1, 0x2019 }, /* rightsinglequotemark ’ RIGHT SINGLE QUOTATION MARK */
+ { 0x0ad2, 0x201c }, /* leftdoublequotemark “ LEFT DOUBLE QUOTATION MARK */
+ { 0x0ad3, 0x201d }, /* rightdoublequotemark ” RIGHT DOUBLE QUOTATION MARK */
+ { 0x0ad4, 0x211e }, /* prescription ℞ PRESCRIPTION TAKE */
+ { 0x0ad6, 0x2032 }, /* minutes ′ PRIME */
+ { 0x0ad7, 0x2033 }, /* seconds ″ DOUBLE PRIME */
+ { 0x0ad9, 0x271d }, /* latincross ✝ LATIN CROSS */
+/* 0x0ada hexagram ? ??? */
+ { 0x0adb, 0x25ac }, /* filledrectbullet ▬ BLACK RECTANGLE */
+ { 0x0adc, 0x25c0 }, /* filledlefttribullet ◀ BLACK LEFT-POINTING TRIANGLE */
+ { 0x0add, 0x25b6 }, /* filledrighttribullet ▶ BLACK RIGHT-POINTING TRIANGLE */
+ { 0x0ade, 0x25cf }, /* emfilledcircle ● BLACK CIRCLE */
+ { 0x0adf, 0x25ae }, /* emfilledrect ▮ BLACK VERTICAL RECTANGLE */
+ { 0x0ae0, 0x25e6 }, /* enopencircbullet ◦ WHITE BULLET */
+ { 0x0ae1, 0x25ab }, /* enopensquarebullet ▫ WHITE SMALL SQUARE */
+ { 0x0ae2, 0x25ad }, /* openrectbullet ▭ WHITE RECTANGLE */
+ { 0x0ae3, 0x25b3 }, /* opentribulletup △ WHITE UP-POINTING TRIANGLE */
+ { 0x0ae4, 0x25bd }, /* opentribulletdown ▽ WHITE DOWN-POINTING TRIANGLE */
+ { 0x0ae5, 0x2606 }, /* openstar ☆ WHITE STAR */
+ { 0x0ae6, 0x2022 }, /* enfilledcircbullet • BULLET */
+ { 0x0ae7, 0x25aa }, /* enfilledsqbullet ▪ BLACK SMALL SQUARE */
+ { 0x0ae8, 0x25b2 }, /* filledtribulletup ▲ BLACK UP-POINTING TRIANGLE */
+ { 0x0ae9, 0x25bc }, /* filledtribulletdown ▼ BLACK DOWN-POINTING TRIANGLE */
+ { 0x0aea, 0x261c }, /* leftpointer ☜ WHITE LEFT POINTING INDEX */
+ { 0x0aeb, 0x261e }, /* rightpointer ☞ WHITE RIGHT POINTING INDEX */
+ { 0x0aec, 0x2663 }, /* club ♣ BLACK CLUB SUIT */
+ { 0x0aed, 0x2666 }, /* diamond ♦ BLACK DIAMOND SUIT */
+ { 0x0aee, 0x2665 }, /* heart ♥ BLACK HEART SUIT */
+ { 0x0af0, 0x2720 }, /* maltesecross ✠ MALTESE CROSS */
+ { 0x0af1, 0x2020 }, /* dagger † DAGGER */
+ { 0x0af2, 0x2021 }, /* doubledagger ‡ DOUBLE DAGGER */
+ { 0x0af3, 0x2713 }, /* checkmark ✓ CHECK MARK */
+ { 0x0af4, 0x2717 }, /* ballotcross ✗ BALLOT X */
+ { 0x0af5, 0x266f }, /* musicalsharp ♯ MUSIC SHARP SIGN */
+ { 0x0af6, 0x266d }, /* musicalflat ♭ MUSIC FLAT SIGN */
+ { 0x0af7, 0x2642 }, /* malesymbol ♂ MALE SIGN */
+ { 0x0af8, 0x2640 }, /* femalesymbol ♀ FEMALE SIGN */
+ { 0x0af9, 0x260e }, /* telephone ☎ BLACK TELEPHONE */
+ { 0x0afa, 0x2315 }, /* telephonerecorder ⌕ TELEPHONE RECORDER */
+ { 0x0afb, 0x2117 }, /* phonographcopyright ℗ SOUND RECORDING COPYRIGHT */
+ { 0x0afc, 0x2038 }, /* caret ‸ CARET */
+ { 0x0afd, 0x201a }, /* singlelowquotemark ‚ SINGLE LOW-9 QUOTATION MARK */
+ { 0x0afe, 0x201e }, /* doublelowquotemark „ DOUBLE LOW-9 QUOTATION MARK */
+/* 0x0aff cursor ? ??? */
+ { 0x0ba3, 0x003c }, /* leftcaret < LESS-THAN SIGN */
+ { 0x0ba6, 0x003e }, /* rightcaret > GREATER-THAN SIGN */
+ { 0x0ba8, 0x2228 }, /* downcaret ∨ LOGICAL OR */
+ { 0x0ba9, 0x2227 }, /* upcaret ∧ LOGICAL AND */
+ { 0x0bc0, 0x00af }, /* overbar ¯ MACRON */
+ { 0x0bc2, 0x22a5 }, /* downtack ⊥ UP TACK */
+ { 0x0bc3, 0x2229 }, /* upshoe ∩ INTERSECTION */
+ { 0x0bc4, 0x230a }, /* downstile ⌊ LEFT FLOOR */
+ { 0x0bc6, 0x005f }, /* underbar _ LOW LINE */
+ { 0x0bca, 0x2218 }, /* jot ∘ RING OPERATOR */
+ { 0x0bcc, 0x2395 }, /* quad ⎕ APL FUNCTIONAL SYMBOL QUAD */
+ { 0x0bce, 0x22a4 }, /* uptack ⊤ DOWN TACK */
+ { 0x0bcf, 0x25cb }, /* circle ○ WHITE CIRCLE */
+ { 0x0bd3, 0x2308 }, /* upstile ⌈ LEFT CEILING */
+ { 0x0bd6, 0x222a }, /* downshoe ∪ UNION */
+ { 0x0bd8, 0x2283 }, /* rightshoe ⊃ SUPERSET OF */
+ { 0x0bda, 0x2282 }, /* leftshoe ⊂ SUBSET OF */
+ { 0x0bdc, 0x22a2 }, /* lefttack ⊢ RIGHT TACK */
+ { 0x0bfc, 0x22a3 }, /* righttack ⊣ LEFT TACK */
+ { 0x0cdf, 0x2017 }, /* hebrew_doublelowline ‗ DOUBLE LOW LINE */
+ { 0x0ce0, 0x05d0 }, /* hebrew_aleph א HEBREW LETTER ALEF */
+ { 0x0ce1, 0x05d1 }, /* hebrew_bet ב HEBREW LETTER BET */
+ { 0x0ce2, 0x05d2 }, /* hebrew_gimel ג HEBREW LETTER GIMEL */
+ { 0x0ce3, 0x05d3 }, /* hebrew_dalet ד HEBREW LETTER DALET */
+ { 0x0ce4, 0x05d4 }, /* hebrew_he ה HEBREW LETTER HE */
+ { 0x0ce5, 0x05d5 }, /* hebrew_waw ו HEBREW LETTER VAV */
+ { 0x0ce6, 0x05d6 }, /* hebrew_zain ז HEBREW LETTER ZAYIN */
+ { 0x0ce7, 0x05d7 }, /* hebrew_chet ח HEBREW LETTER HET */
+ { 0x0ce8, 0x05d8 }, /* hebrew_tet ט HEBREW LETTER TET */
+ { 0x0ce9, 0x05d9 }, /* hebrew_yod י HEBREW LETTER YOD */
+ { 0x0cea, 0x05da }, /* hebrew_finalkaph ך HEBREW LETTER FINAL KAF */
+ { 0x0ceb, 0x05db }, /* hebrew_kaph כ HEBREW LETTER KAF */
+ { 0x0cec, 0x05dc }, /* hebrew_lamed ל HEBREW LETTER LAMED */
+ { 0x0ced, 0x05dd }, /* hebrew_finalmem ם HEBREW LETTER FINAL MEM */
+ { 0x0cee, 0x05de }, /* hebrew_mem מ HEBREW LETTER MEM */
+ { 0x0cef, 0x05df }, /* hebrew_finalnun ן HEBREW LETTER FINAL NUN */
+ { 0x0cf0, 0x05e0 }, /* hebrew_nun נ HEBREW LETTER NUN */
+ { 0x0cf1, 0x05e1 }, /* hebrew_samech ס HEBREW LETTER SAMEKH */
+ { 0x0cf2, 0x05e2 }, /* hebrew_ayin ע HEBREW LETTER AYIN */
+ { 0x0cf3, 0x05e3 }, /* hebrew_finalpe ף HEBREW LETTER FINAL PE */
+ { 0x0cf4, 0x05e4 }, /* hebrew_pe פ HEBREW LETTER PE */
+ { 0x0cf5, 0x05e5 }, /* hebrew_finalzade ץ HEBREW LETTER FINAL TSADI */
+ { 0x0cf6, 0x05e6 }, /* hebrew_zade צ HEBREW LETTER TSADI */
+ { 0x0cf7, 0x05e7 }, /* hebrew_qoph ק HEBREW LETTER QOF */
+ { 0x0cf8, 0x05e8 }, /* hebrew_resh ר HEBREW LETTER RESH */
+ { 0x0cf9, 0x05e9 }, /* hebrew_shin ש HEBREW LETTER SHIN */
+ { 0x0cfa, 0x05ea }, /* hebrew_taw ת HEBREW LETTER TAV */
+ { 0x0da1, 0x0e01 }, /* Thai_kokai ก THAI CHARACTER KO KAI */
+ { 0x0da2, 0x0e02 }, /* Thai_khokhai ข THAI CHARACTER KHO KHAI */
+ { 0x0da3, 0x0e03 }, /* Thai_khokhuat ฃ THAI CHARACTER KHO KHUAT */
+ { 0x0da4, 0x0e04 }, /* Thai_khokhwai ค THAI CHARACTER KHO KHWAI */
+ { 0x0da5, 0x0e05 }, /* Thai_khokhon ฅ THAI CHARACTER KHO KHON */
+ { 0x0da6, 0x0e06 }, /* Thai_khorakhang ฆ THAI CHARACTER KHO RAKHANG */
+ { 0x0da7, 0x0e07 }, /* Thai_ngongu ง THAI CHARACTER NGO NGU */
+ { 0x0da8, 0x0e08 }, /* Thai_chochan จ THAI CHARACTER CHO CHAN */
+ { 0x0da9, 0x0e09 }, /* Thai_choching ฉ THAI CHARACTER CHO CHING */
+ { 0x0daa, 0x0e0a }, /* Thai_chochang ช THAI CHARACTER CHO CHANG */
+ { 0x0dab, 0x0e0b }, /* Thai_soso ซ THAI CHARACTER SO SO */
+ { 0x0dac, 0x0e0c }, /* Thai_chochoe ฌ THAI CHARACTER CHO CHOE */
+ { 0x0dad, 0x0e0d }, /* Thai_yoying ญ THAI CHARACTER YO YING */
+ { 0x0dae, 0x0e0e }, /* Thai_dochada ฎ THAI CHARACTER DO CHADA */
+ { 0x0daf, 0x0e0f }, /* Thai_topatak ฏ THAI CHARACTER TO PATAK */
+ { 0x0db0, 0x0e10 }, /* Thai_thothan ฐ THAI CHARACTER THO THAN */
+ { 0x0db1, 0x0e11 }, /* Thai_thonangmontho ฑ THAI CHARACTER THO NANGMONTHO */
+ { 0x0db2, 0x0e12 }, /* Thai_thophuthao ฒ THAI CHARACTER THO PHUTHAO */
+ { 0x0db3, 0x0e13 }, /* Thai_nonen ณ THAI CHARACTER NO NEN */
+ { 0x0db4, 0x0e14 }, /* Thai_dodek ด THAI CHARACTER DO DEK */
+ { 0x0db5, 0x0e15 }, /* Thai_totao ต THAI CHARACTER TO TAO */
+ { 0x0db6, 0x0e16 }, /* Thai_thothung ถ THAI CHARACTER THO THUNG */
+ { 0x0db7, 0x0e17 }, /* Thai_thothahan ท THAI CHARACTER THO THAHAN */
+ { 0x0db8, 0x0e18 }, /* Thai_thothong ธ THAI CHARACTER THO THONG */
+ { 0x0db9, 0x0e19 }, /* Thai_nonu น THAI CHARACTER NO NU */
+ { 0x0dba, 0x0e1a }, /* Thai_bobaimai บ THAI CHARACTER BO BAIMAI */
+ { 0x0dbb, 0x0e1b }, /* Thai_popla ป THAI CHARACTER PO PLA */
+ { 0x0dbc, 0x0e1c }, /* Thai_phophung ผ THAI CHARACTER PHO PHUNG */
+ { 0x0dbd, 0x0e1d }, /* Thai_fofa ฝ THAI CHARACTER FO FA */
+ { 0x0dbe, 0x0e1e }, /* Thai_phophan พ THAI CHARACTER PHO PHAN */
+ { 0x0dbf, 0x0e1f }, /* Thai_fofan ฟ THAI CHARACTER FO FAN */
+ { 0x0dc0, 0x0e20 }, /* Thai_phosamphao ภ THAI CHARACTER PHO SAMPHAO */
+ { 0x0dc1, 0x0e21 }, /* Thai_moma ม THAI CHARACTER MO MA */
+ { 0x0dc2, 0x0e22 }, /* Thai_yoyak ย THAI CHARACTER YO YAK */
+ { 0x0dc3, 0x0e23 }, /* Thai_rorua ร THAI CHARACTER RO RUA */
+ { 0x0dc4, 0x0e24 }, /* Thai_ru ฤ THAI CHARACTER RU */
+ { 0x0dc5, 0x0e25 }, /* Thai_loling ล THAI CHARACTER LO LING */
+ { 0x0dc6, 0x0e26 }, /* Thai_lu ฦ THAI CHARACTER LU */
+ { 0x0dc7, 0x0e27 }, /* Thai_wowaen ว THAI CHARACTER WO WAEN */
+ { 0x0dc8, 0x0e28 }, /* Thai_sosala ศ THAI CHARACTER SO SALA */
+ { 0x0dc9, 0x0e29 }, /* Thai_sorusi ษ THAI CHARACTER SO RUSI */
+ { 0x0dca, 0x0e2a }, /* Thai_sosua ส THAI CHARACTER SO SUA */
+ { 0x0dcb, 0x0e2b }, /* Thai_hohip ห THAI CHARACTER HO HIP */
+ { 0x0dcc, 0x0e2c }, /* Thai_lochula ฬ THAI CHARACTER LO CHULA */
+ { 0x0dcd, 0x0e2d }, /* Thai_oang อ THAI CHARACTER O ANG */
+ { 0x0dce, 0x0e2e }, /* Thai_honokhuk ฮ THAI CHARACTER HO NOKHUK */
+ { 0x0dcf, 0x0e2f }, /* Thai_paiyannoi ฯ THAI CHARACTER PAIYANNOI */
+ { 0x0dd0, 0x0e30 }, /* Thai_saraa ะ THAI CHARACTER SARA A */
+ { 0x0dd1, 0x0e31 }, /* Thai_maihanakat ั THAI CHARACTER MAI HAN-AKAT */
+ { 0x0dd2, 0x0e32 }, /* Thai_saraaa า THAI CHARACTER SARA AA */
+ { 0x0dd3, 0x0e33 }, /* Thai_saraam ำ THAI CHARACTER SARA AM */
+ { 0x0dd4, 0x0e34 }, /* Thai_sarai ิ THAI CHARACTER SARA I */
+ { 0x0dd5, 0x0e35 }, /* Thai_saraii ี THAI CHARACTER SARA II */
+ { 0x0dd6, 0x0e36 }, /* Thai_saraue ึ THAI CHARACTER SARA UE */
+ { 0x0dd7, 0x0e37 }, /* Thai_sarauee ื THAI CHARACTER SARA UEE */
+ { 0x0dd8, 0x0e38 }, /* Thai_sarau ุ THAI CHARACTER SARA U */
+ { 0x0dd9, 0x0e39 }, /* Thai_sarauu ู THAI CHARACTER SARA UU */
+ { 0x0dda, 0x0e3a }, /* Thai_phinthu ฺ THAI CHARACTER PHINTHU */
+/* 0x0dde Thai_maihanakat_maitho ? ??? */
+ { 0x0ddf, 0x0e3f }, /* Thai_baht ฿ THAI CURRENCY SYMBOL BAHT */
+ { 0x0de0, 0x0e40 }, /* Thai_sarae เ THAI CHARACTER SARA E */
+ { 0x0de1, 0x0e41 }, /* Thai_saraae แ THAI CHARACTER SARA AE */
+ { 0x0de2, 0x0e42 }, /* Thai_sarao โ THAI CHARACTER SARA O */
+ { 0x0de3, 0x0e43 }, /* Thai_saraaimaimuan ใ THAI CHARACTER SARA AI MAIMUAN */
+ { 0x0de4, 0x0e44 }, /* Thai_saraaimaimalai ไ THAI CHARACTER SARA AI MAIMALAI */
+ { 0x0de5, 0x0e45 }, /* Thai_lakkhangyao ๅ THAI CHARACTER LAKKHANGYAO */
+ { 0x0de6, 0x0e46 }, /* Thai_maiyamok ๆ THAI CHARACTER MAIYAMOK */
+ { 0x0de7, 0x0e47 }, /* Thai_maitaikhu ็ THAI CHARACTER MAITAIKHU */
+ { 0x0de8, 0x0e48 }, /* Thai_maiek ่ THAI CHARACTER MAI EK */
+ { 0x0de9, 0x0e49 }, /* Thai_maitho ้ THAI CHARACTER MAI THO */
+ { 0x0dea, 0x0e4a }, /* Thai_maitri ๊ THAI CHARACTER MAI TRI */
+ { 0x0deb, 0x0e4b }, /* Thai_maichattawa ๋ THAI CHARACTER MAI CHATTAWA */
+ { 0x0dec, 0x0e4c }, /* Thai_thanthakhat ์ THAI CHARACTER THANTHAKHAT */
+ { 0x0ded, 0x0e4d }, /* Thai_nikhahit ํ THAI CHARACTER NIKHAHIT */
+ { 0x0df0, 0x0e50 }, /* Thai_leksun ๐ THAI DIGIT ZERO */
+ { 0x0df1, 0x0e51 }, /* Thai_leknung ๑ THAI DIGIT ONE */
+ { 0x0df2, 0x0e52 }, /* Thai_leksong ๒ THAI DIGIT TWO */
+ { 0x0df3, 0x0e53 }, /* Thai_leksam ๓ THAI DIGIT THREE */
+ { 0x0df4, 0x0e54 }, /* Thai_leksi ๔ THAI DIGIT FOUR */
+ { 0x0df5, 0x0e55 }, /* Thai_lekha ๕ THAI DIGIT FIVE */
+ { 0x0df6, 0x0e56 }, /* Thai_lekhok ๖ THAI DIGIT SIX */
+ { 0x0df7, 0x0e57 }, /* Thai_lekchet ๗ THAI DIGIT SEVEN */
+ { 0x0df8, 0x0e58 }, /* Thai_lekpaet ๘ THAI DIGIT EIGHT */
+ { 0x0df9, 0x0e59 }, /* Thai_lekkao ๙ THAI DIGIT NINE */
+ { 0x0ea1, 0x3131 }, /* Hangul_Kiyeog ㄱ HANGUL LETTER KIYEOK */
+ { 0x0ea2, 0x3132 }, /* Hangul_SsangKiyeog ㄲ HANGUL LETTER SSANGKIYEOK */
+ { 0x0ea3, 0x3133 }, /* Hangul_KiyeogSios ㄳ HANGUL LETTER KIYEOK-SIOS */
+ { 0x0ea4, 0x3134 }, /* Hangul_Nieun ㄴ HANGUL LETTER NIEUN */
+ { 0x0ea5, 0x3135 }, /* Hangul_NieunJieuj ㄵ HANGUL LETTER NIEUN-CIEUC */
+ { 0x0ea6, 0x3136 }, /* Hangul_NieunHieuh ㄶ HANGUL LETTER NIEUN-HIEUH */
+ { 0x0ea7, 0x3137 }, /* Hangul_Dikeud ㄷ HANGUL LETTER TIKEUT */
+ { 0x0ea8, 0x3138 }, /* Hangul_SsangDikeud ㄸ HANGUL LETTER SSANGTIKEUT */
+ { 0x0ea9, 0x3139 }, /* Hangul_Rieul ㄹ HANGUL LETTER RIEUL */
+ { 0x0eaa, 0x313a }, /* Hangul_RieulKiyeog ㄺ HANGUL LETTER RIEUL-KIYEOK */
+ { 0x0eab, 0x313b }, /* Hangul_RieulMieum ㄻ HANGUL LETTER RIEUL-MIEUM */
+ { 0x0eac, 0x313c }, /* Hangul_RieulPieub ㄼ HANGUL LETTER RIEUL-PIEUP */
+ { 0x0ead, 0x313d }, /* Hangul_RieulSios ㄽ HANGUL LETTER RIEUL-SIOS */
+ { 0x0eae, 0x313e }, /* Hangul_RieulTieut ㄾ HANGUL LETTER RIEUL-THIEUTH */
+ { 0x0eaf, 0x313f }, /* Hangul_RieulPhieuf ㄿ HANGUL LETTER RIEUL-PHIEUPH */
+ { 0x0eb0, 0x3140 }, /* Hangul_RieulHieuh ㅀ HANGUL LETTER RIEUL-HIEUH */
+ { 0x0eb1, 0x3141 }, /* Hangul_Mieum ㅁ HANGUL LETTER MIEUM */
+ { 0x0eb2, 0x3142 }, /* Hangul_Pieub ㅂ HANGUL LETTER PIEUP */
+ { 0x0eb3, 0x3143 }, /* Hangul_SsangPieub ㅃ HANGUL LETTER SSANGPIEUP */
+ { 0x0eb4, 0x3144 }, /* Hangul_PieubSios ㅄ HANGUL LETTER PIEUP-SIOS */
+ { 0x0eb5, 0x3145 }, /* Hangul_Sios ㅅ HANGUL LETTER SIOS */
+ { 0x0eb6, 0x3146 }, /* Hangul_SsangSios ㅆ HANGUL LETTER SSANGSIOS */
+ { 0x0eb7, 0x3147 }, /* Hangul_Ieung ㅇ HANGUL LETTER IEUNG */
+ { 0x0eb8, 0x3148 }, /* Hangul_Jieuj ㅈ HANGUL LETTER CIEUC */
+ { 0x0eb9, 0x3149 }, /* Hangul_SsangJieuj ㅉ HANGUL LETTER SSANGCIEUC */
+ { 0x0eba, 0x314a }, /* Hangul_Cieuc ㅊ HANGUL LETTER CHIEUCH */
+ { 0x0ebb, 0x314b }, /* Hangul_Khieuq ㅋ HANGUL LETTER KHIEUKH */
+ { 0x0ebc, 0x314c }, /* Hangul_Tieut ㅌ HANGUL LETTER THIEUTH */
+ { 0x0ebd, 0x314d }, /* Hangul_Phieuf ㅍ HANGUL LETTER PHIEUPH */
+ { 0x0ebe, 0x314e }, /* Hangul_Hieuh ㅎ HANGUL LETTER HIEUH */
+ { 0x0ebf, 0x314f }, /* Hangul_A ㅏ HANGUL LETTER A */
+ { 0x0ec0, 0x3150 }, /* Hangul_AE ㅐ HANGUL LETTER AE */
+ { 0x0ec1, 0x3151 }, /* Hangul_YA ㅑ HANGUL LETTER YA */
+ { 0x0ec2, 0x3152 }, /* Hangul_YAE ㅒ HANGUL LETTER YAE */
+ { 0x0ec3, 0x3153 }, /* Hangul_EO ㅓ HANGUL LETTER EO */
+ { 0x0ec4, 0x3154 }, /* Hangul_E ㅔ HANGUL LETTER E */
+ { 0x0ec5, 0x3155 }, /* Hangul_YEO ㅕ HANGUL LETTER YEO */
+ { 0x0ec6, 0x3156 }, /* Hangul_YE ㅖ HANGUL LETTER YE */
+ { 0x0ec7, 0x3157 }, /* Hangul_O ㅗ HANGUL LETTER O */
+ { 0x0ec8, 0x3158 }, /* Hangul_WA ㅘ HANGUL LETTER WA */
+ { 0x0ec9, 0x3159 }, /* Hangul_WAE ㅙ HANGUL LETTER WAE */
+ { 0x0eca, 0x315a }, /* Hangul_OE ㅚ HANGUL LETTER OE */
+ { 0x0ecb, 0x315b }, /* Hangul_YO ㅛ HANGUL LETTER YO */
+ { 0x0ecc, 0x315c }, /* Hangul_U ㅜ HANGUL LETTER U */
+ { 0x0ecd, 0x315d }, /* Hangul_WEO ㅝ HANGUL LETTER WEO */
+ { 0x0ece, 0x315e }, /* Hangul_WE ㅞ HANGUL LETTER WE */
+ { 0x0ecf, 0x315f }, /* Hangul_WI ㅟ HANGUL LETTER WI */
+ { 0x0ed0, 0x3160 }, /* Hangul_YU ㅠ HANGUL LETTER YU */
+ { 0x0ed1, 0x3161 }, /* Hangul_EU ㅡ HANGUL LETTER EU */
+ { 0x0ed2, 0x3162 }, /* Hangul_YI ㅢ HANGUL LETTER YI */
+ { 0x0ed3, 0x3163 }, /* Hangul_I ㅣ HANGUL LETTER I */
+ { 0x0ed4, 0x11a8 }, /* Hangul_J_Kiyeog ᆨ HANGUL JONGSEONG KIYEOK */
+ { 0x0ed5, 0x11a9 }, /* Hangul_J_SsangKiyeog ᆩ HANGUL JONGSEONG SSANGKIYEOK */
+ { 0x0ed6, 0x11aa }, /* Hangul_J_KiyeogSios ᆪ HANGUL JONGSEONG KIYEOK-SIOS */
+ { 0x0ed7, 0x11ab }, /* Hangul_J_Nieun ᆫ HANGUL JONGSEONG NIEUN */
+ { 0x0ed8, 0x11ac }, /* Hangul_J_NieunJieuj ᆬ HANGUL JONGSEONG NIEUN-CIEUC */
+ { 0x0ed9, 0x11ad }, /* Hangul_J_NieunHieuh ᆭ HANGUL JONGSEONG NIEUN-HIEUH */
+ { 0x0eda, 0x11ae }, /* Hangul_J_Dikeud ᆮ HANGUL JONGSEONG TIKEUT */
+ { 0x0edb, 0x11af }, /* Hangul_J_Rieul ᆯ HANGUL JONGSEONG RIEUL */
+ { 0x0edc, 0x11b0 }, /* Hangul_J_RieulKiyeog ᆰ HANGUL JONGSEONG RIEUL-KIYEOK */
+ { 0x0edd, 0x11b1 }, /* Hangul_J_RieulMieum ᆱ HANGUL JONGSEONG RIEUL-MIEUM */
+ { 0x0ede, 0x11b2 }, /* Hangul_J_RieulPieub ᆲ HANGUL JONGSEONG RIEUL-PIEUP */
+ { 0x0edf, 0x11b3 }, /* Hangul_J_RieulSios ᆳ HANGUL JONGSEONG RIEUL-SIOS */
+ { 0x0ee0, 0x11b4 }, /* Hangul_J_RieulTieut ᆴ HANGUL JONGSEONG RIEUL-THIEUTH */
+ { 0x0ee1, 0x11b5 }, /* Hangul_J_RieulPhieuf ᆵ HANGUL JONGSEONG RIEUL-PHIEUPH */
+ { 0x0ee2, 0x11b6 }, /* Hangul_J_RieulHieuh ᆶ HANGUL JONGSEONG RIEUL-HIEUH */
+ { 0x0ee3, 0x11b7 }, /* Hangul_J_Mieum ᆷ HANGUL JONGSEONG MIEUM */
+ { 0x0ee4, 0x11b8 }, /* Hangul_J_Pieub ᆸ HANGUL JONGSEONG PIEUP */
+ { 0x0ee5, 0x11b9 }, /* Hangul_J_PieubSios ᆹ HANGUL JONGSEONG PIEUP-SIOS */
+ { 0x0ee6, 0x11ba }, /* Hangul_J_Sios ᆺ HANGUL JONGSEONG SIOS */
+ { 0x0ee7, 0x11bb }, /* Hangul_J_SsangSios ᆻ HANGUL JONGSEONG SSANGSIOS */
+ { 0x0ee8, 0x11bc }, /* Hangul_J_Ieung ᆼ HANGUL JONGSEONG IEUNG */
+ { 0x0ee9, 0x11bd }, /* Hangul_J_Jieuj ᆽ HANGUL JONGSEONG CIEUC */
+ { 0x0eea, 0x11be }, /* Hangul_J_Cieuc ᆾ HANGUL JONGSEONG CHIEUCH */
+ { 0x0eeb, 0x11bf }, /* Hangul_J_Khieuq ᆿ HANGUL JONGSEONG KHIEUKH */
+ { 0x0eec, 0x11c0 }, /* Hangul_J_Tieut ᇀ HANGUL JONGSEONG THIEUTH */
+ { 0x0eed, 0x11c1 }, /* Hangul_J_Phieuf ᇁ HANGUL JONGSEONG PHIEUPH */
+ { 0x0eee, 0x11c2 }, /* Hangul_J_Hieuh ᇂ HANGUL JONGSEONG HIEUH */
+ { 0x0eef, 0x316d }, /* Hangul_RieulYeorinHieuh ㅭ HANGUL LETTER RIEUL-YEORINHIEUH */
+ { 0x0ef0, 0x3171 }, /* Hangul_SunkyeongeumMieum ㅱ HANGUL LETTER KAPYEOUNMIEUM */
+ { 0x0ef1, 0x3178 }, /* Hangul_SunkyeongeumPieub ㅸ HANGUL LETTER KAPYEOUNPIEUP */
+ { 0x0ef2, 0x317f }, /* Hangul_PanSios ㅿ HANGUL LETTER PANSIOS */
+ { 0x0ef3, 0x3181 }, /* Hangul_KkogjiDalrinIeung ㆁ HANGUL LETTER YESIEUNG */
+ { 0x0ef4, 0x3184 }, /* Hangul_SunkyeongeumPhieuf ㆄ HANGUL LETTER KAPYEOUNPHIEUPH */
+ { 0x0ef5, 0x3186 }, /* Hangul_YeorinHieuh ㆆ HANGUL LETTER YEORINHIEUH */
+ { 0x0ef6, 0x318d }, /* Hangul_AraeA ㆍ HANGUL LETTER ARAEA */
+ { 0x0ef7, 0x318e }, /* Hangul_AraeAE ㆎ HANGUL LETTER ARAEAE */
+ { 0x0ef8, 0x11eb }, /* Hangul_J_PanSios ᇫ HANGUL JONGSEONG PANSIOS */
+ { 0x0ef9, 0x11f0 }, /* Hangul_J_KkogjiDalrinIeung ᇰ HANGUL JONGSEONG YESIEUNG */
+ { 0x0efa, 0x11f9 }, /* Hangul_J_YeorinHieuh ᇹ HANGUL JONGSEONG YEORINHIEUH */
+ { 0x0eff, 0x20a9 }, /* Korean_Won ₩ WON SIGN */
+ { 0x13a4, 0x20ac }, /* Euro € EURO SIGN */
+ { 0x13bc, 0x0152 }, /* OE ΠLATIN CAPITAL LIGATURE OE */
+ { 0x13bd, 0x0153 }, /* oe œ LATIN SMALL LIGATURE OE */
+ { 0x13be, 0x0178 }, /* Ydiaeresis Ÿ LATIN CAPITAL LETTER Y WITH DIAERESIS */
+ { 0x20ac, 0x20ac }, /* EuroSign € EURO SIGN */
+};
+
+long keysym2ucs(KeySym keysym)
+{
+ int min = 0;
+ int max = sizeof(keysymtab) / sizeof(struct codepair) - 1;
+ int mid;
+
+ /* first check for Latin-1 characters (1:1 mapping) */
+ if ((keysym >= 0x0020 && keysym <= 0x007e) ||
+ (keysym >= 0x00a0 && keysym <= 0x00ff))
+ return keysym;
+
+ /* also check for directly encoded 24-bit UCS characters */
+ if ((keysym & 0xff000000) == 0x01000000)
+ return keysym & 0x00ffffff;
+
+ /* binary search in table */
+ while (max >= min) {
+ mid = (min + max) / 2;
+ if (keysymtab[mid].keysym < keysym)
+ min = mid + 1;
+ else if (keysymtab[mid].keysym > keysym)
+ max = mid - 1;
+ else {
+ /* found it */
+ return keysymtab[mid].ucs;
+ }
+ }
+
+ /* no matching Unicode value found */
+ return -1;
+}
diff --git a/gfx/skia/skia/src/views/unix/keysym2ucs.h b/gfx/skia/skia/src/views/unix/keysym2ucs.h
new file mode 100644
index 000000000..0287f02ad
--- /dev/null
+++ b/gfx/skia/skia/src/views/unix/keysym2ucs.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+/*
+ * This module converts keysym values into the corresponding ISO 10646-1
+ * (UCS, Unicode) values.
+ */
+
+#include <X11/X.h>
+
+long keysym2ucs(KeySym keysym);
diff --git a/gfx/skia/skia/src/views/unix/skia_unix.cpp b/gfx/skia/skia/src/views/unix/skia_unix.cpp
new file mode 100644
index 000000000..9f9905962
--- /dev/null
+++ b/gfx/skia/skia/src/views/unix/skia_unix.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkApplication.h"
+#include "SkEvent.h"
+#include "SkGraphics.h"
+#include "SkWindow.h"
+
+int main(int argc, char** argv){
+ SkGraphics::Init();
+ SkOSWindow* window = create_sk_window(nullptr, argc, argv);
+
+ // drain any events that occurred before |window| was assigned.
+ while (SkEvent::ProcessEvent());
+
+ // Start normal Skia sequence
+ application_init();
+
+ window->loop();
+
+ delete window;
+ application_term();
+ return 0;
+}
diff --git a/gfx/skia/skia/src/views/win/SkOSWindow_win.cpp b/gfx/skia/skia/src/views/win/SkOSWindow_win.cpp
new file mode 100644
index 000000000..983b253c5
--- /dev/null
+++ b/gfx/skia/skia/src/views/win/SkOSWindow_win.cpp
@@ -0,0 +1,772 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkTypes.h"
+
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "SkLeanWindows.h"
+
+#include <GL/gl.h>
+#include <WindowsX.h>
+#include "win/SkWGL.h"
+#include "SkWindow.h"
+#include "SkCanvas.h"
+#include "SkOSMenu.h"
+#include "SkTime.h"
+#include "SkUtils.h"
+
+#include "SkGraphics.h"
+
+#if SK_ANGLE
+#include "gl/GrGLAssembleInterface.h"
+#include "gl/GrGLInterface.h"
+#include "GLES2/gl2.h"
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#endif // SK_ANGLE
+
+const int kDefaultWindowWidth = 500;
+const int kDefaultWindowHeight = 500;
+
+#define GL_CALL(IFACE, X) \
+ SkASSERT(IFACE); \
+ do { \
+ (IFACE)->fFunctions.f##X; \
+ } while (false)
+
+#define WM_EVENT_CALLBACK (WM_USER+0)
+
+void post_skwinevent(HWND hwnd)
+{
+ PostMessage(hwnd, WM_EVENT_CALLBACK, 0, 0);
+}
+
+SkTHashMap<void*, SkOSWindow*> SkOSWindow::gHwndToOSWindowMap;
+
+SkOSWindow::SkOSWindow(const void* winInit) {
+ fWinInit = *(const WindowInit*)winInit;
+
+ fHWND = CreateWindow(fWinInit.fClass, NULL, WS_OVERLAPPEDWINDOW,
+ CW_USEDEFAULT, 0, kDefaultWindowWidth, kDefaultWindowHeight, NULL, NULL,
+ fWinInit.fInstance, NULL);
+ gHwndToOSWindowMap.set(fHWND, this);
+#if SK_SUPPORT_GPU
+#if SK_ANGLE
+ fDisplay = EGL_NO_DISPLAY;
+ fContext = EGL_NO_CONTEXT;
+ fSurface = EGL_NO_SURFACE;
+#endif
+
+ fHGLRC = NULL;
+#endif
+ fAttached = kNone_BackEndType;
+ fFullscreen = false;
+}
+
+SkOSWindow::~SkOSWindow() {
+#if SK_SUPPORT_GPU
+ if (fHGLRC) {
+ wglDeleteContext((HGLRC)fHGLRC);
+ }
+#if SK_ANGLE
+ if (EGL_NO_CONTEXT != fContext) {
+ eglDestroyContext(fDisplay, fContext);
+ fContext = EGL_NO_CONTEXT;
+ }
+
+ if (EGL_NO_SURFACE != fSurface) {
+ eglDestroySurface(fDisplay, fSurface);
+ fSurface = EGL_NO_SURFACE;
+ }
+
+ if (EGL_NO_DISPLAY != fDisplay) {
+ eglTerminate(fDisplay);
+ fDisplay = EGL_NO_DISPLAY;
+ }
+#endif // SK_ANGLE
+#endif // SK_SUPPORT_GPU
+ this->closeWindow();
+}
+
+static SkKey winToskKey(WPARAM vk) {
+ static const struct {
+ WPARAM fVK;
+ SkKey fKey;
+ } gPair[] = {
+ { VK_BACK, kBack_SkKey },
+ { VK_CLEAR, kBack_SkKey },
+ { VK_RETURN, kOK_SkKey },
+ { VK_UP, kUp_SkKey },
+ { VK_DOWN, kDown_SkKey },
+ { VK_LEFT, kLeft_SkKey },
+ { VK_RIGHT, kRight_SkKey }
+ };
+ for (size_t i = 0; i < SK_ARRAY_COUNT(gPair); i++) {
+ if (gPair[i].fVK == vk) {
+ return gPair[i].fKey;
+ }
+ }
+ return kNONE_SkKey;
+}
+
+static unsigned getModifiers(UINT message) {
+ return 0; // TODO
+}
+
+bool SkOSWindow::wndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) {
+ switch (message) {
+ case WM_KEYDOWN: {
+ SkKey key = winToskKey(wParam);
+ if (kNONE_SkKey != key) {
+ this->handleKey(key);
+ return true;
+ }
+ } break;
+ case WM_KEYUP: {
+ SkKey key = winToskKey(wParam);
+ if (kNONE_SkKey != key) {
+ this->handleKeyUp(key);
+ return true;
+ }
+ } break;
+ case WM_UNICHAR:
+ this->handleChar((SkUnichar) wParam);
+ return true;
+ case WM_CHAR: {
+ const uint16_t* c = reinterpret_cast<uint16_t*>(&wParam);
+ this->handleChar(SkUTF16_NextUnichar(&c));
+ return true;
+ } break;
+ case WM_SIZE: {
+ INT width = LOWORD(lParam);
+ INT height = HIWORD(lParam);
+ this->resize(width, height);
+ break;
+ }
+ case WM_PAINT: {
+ PAINTSTRUCT ps;
+ HDC hdc = BeginPaint(hWnd, &ps);
+ this->doPaint(hdc);
+ EndPaint(hWnd, &ps);
+ return true;
+ } break;
+
+ case WM_LBUTTONDOWN:
+ this->handleClick(GET_X_LPARAM(lParam), GET_Y_LPARAM(lParam),
+ Click::kDown_State, NULL, getModifiers(message));
+ return true;
+
+ case WM_MOUSEMOVE:
+ this->handleClick(GET_X_LPARAM(lParam), GET_Y_LPARAM(lParam),
+ Click::kMoved_State, NULL, getModifiers(message));
+ return true;
+
+ case WM_LBUTTONUP:
+ this->handleClick(GET_X_LPARAM(lParam), GET_Y_LPARAM(lParam),
+ Click::kUp_State, NULL, getModifiers(message));
+ return true;
+
+ case WM_EVENT_CALLBACK:
+ if (SkEvent::ProcessEvent()) {
+ post_skwinevent(hWnd);
+ }
+ return true;
+ }
+ return false;
+}
+
+void SkOSWindow::doPaint(void* ctx) {
+ this->update(NULL);
+
+ if (kNone_BackEndType == fAttached)
+ {
+ HDC hdc = (HDC)ctx;
+ const SkBitmap& bitmap = this->getBitmap();
+
+ BITMAPINFO bmi;
+ memset(&bmi, 0, sizeof(bmi));
+ bmi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
+ bmi.bmiHeader.biWidth = bitmap.width();
+ bmi.bmiHeader.biHeight = -bitmap.height(); // top-down image
+ bmi.bmiHeader.biPlanes = 1;
+ bmi.bmiHeader.biBitCount = 32;
+ bmi.bmiHeader.biCompression = BI_RGB;
+ bmi.bmiHeader.biSizeImage = 0;
+
+ //
+ // Do the SetDIBitsToDevice.
+ //
+ // TODO(wjmaclean):
+ // Fix this call to handle SkBitmaps that have rowBytes != width,
+ // i.e. may have padding at the end of lines. The SkASSERT below
+ // may be ignored by builds, and the only obviously safe option
+ // seems to be to copy the bitmap to a temporary (contiguous)
+ // buffer before passing to SetDIBitsToDevice().
+ SkASSERT(bitmap.width() * bitmap.bytesPerPixel() == bitmap.rowBytes());
+ bitmap.lockPixels();
+ int ret = SetDIBitsToDevice(hdc,
+ 0, 0,
+ bitmap.width(), bitmap.height(),
+ 0, 0,
+ 0, bitmap.height(),
+ bitmap.getPixels(),
+ &bmi,
+ DIB_RGB_COLORS);
+ (void)ret; // we're ignoring potential failures for now.
+ bitmap.unlockPixels();
+ }
+}
+
+void SkOSWindow::updateSize()
+{
+ RECT r;
+ GetWindowRect((HWND)fHWND, &r);
+ this->resize(r.right - r.left, r.bottom - r.top);
+}
+
+void SkOSWindow::onHandleInval(const SkIRect& r) {
+ RECT rect;
+ rect.left = r.fLeft;
+ rect.top = r.fTop;
+ rect.right = r.fRight;
+ rect.bottom = r.fBottom;
+ InvalidateRect((HWND)fHWND, &rect, FALSE);
+}
+
+void SkOSWindow::onAddMenu(const SkOSMenu* sk_menu)
+{
+}
+
+void SkOSWindow::onSetTitle(const char title[]){
+ SetWindowTextA((HWND)fHWND, title);
+}
+
+enum {
+ SK_MacReturnKey = 36,
+ SK_MacDeleteKey = 51,
+ SK_MacEndKey = 119,
+ SK_MacLeftKey = 123,
+ SK_MacRightKey = 124,
+ SK_MacDownKey = 125,
+ SK_MacUpKey = 126,
+
+ SK_Mac0Key = 0x52,
+ SK_Mac1Key = 0x53,
+ SK_Mac2Key = 0x54,
+ SK_Mac3Key = 0x55,
+ SK_Mac4Key = 0x56,
+ SK_Mac5Key = 0x57,
+ SK_Mac6Key = 0x58,
+ SK_Mac7Key = 0x59,
+ SK_Mac8Key = 0x5b,
+ SK_Mac9Key = 0x5c
+};
+
+static SkKey raw2key(uint32_t raw)
+{
+ static const struct {
+ uint32_t fRaw;
+ SkKey fKey;
+ } gKeys[] = {
+ { SK_MacUpKey, kUp_SkKey },
+ { SK_MacDownKey, kDown_SkKey },
+ { SK_MacLeftKey, kLeft_SkKey },
+ { SK_MacRightKey, kRight_SkKey },
+ { SK_MacReturnKey, kOK_SkKey },
+ { SK_MacDeleteKey, kBack_SkKey },
+ { SK_MacEndKey, kEnd_SkKey },
+ { SK_Mac0Key, k0_SkKey },
+ { SK_Mac1Key, k1_SkKey },
+ { SK_Mac2Key, k2_SkKey },
+ { SK_Mac3Key, k3_SkKey },
+ { SK_Mac4Key, k4_SkKey },
+ { SK_Mac5Key, k5_SkKey },
+ { SK_Mac6Key, k6_SkKey },
+ { SK_Mac7Key, k7_SkKey },
+ { SK_Mac8Key, k8_SkKey },
+ { SK_Mac9Key, k9_SkKey }
+ };
+
+ for (unsigned i = 0; i < SK_ARRAY_COUNT(gKeys); i++)
+ if (gKeys[i].fRaw == raw)
+ return gKeys[i].fKey;
+ return kNONE_SkKey;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////
+
+void SkEvent::SignalNonEmptyQueue()
+{
+ SkOSWindow::ForAllWindows([](void* hWND, SkOSWindow**) {
+ post_skwinevent((HWND)hWND);
+ });
+}
+
+static UINT_PTR gTimer;
+
+VOID CALLBACK sk_timer_proc(HWND hwnd, UINT uMsg, UINT_PTR idEvent, DWORD dwTime)
+{
+ SkEvent::ServiceQueueTimer();
+ //SkDebugf("timer task fired\n");
+}
+
+void SkEvent::SignalQueueTimer(SkMSec delay)
+{
+ if (gTimer)
+ {
+ KillTimer(NULL, gTimer);
+ gTimer = NULL;
+ }
+ if (delay)
+ {
+ gTimer = SetTimer(NULL, 0, delay, sk_timer_proc);
+ //SkDebugf("SetTimer of %d returned %d\n", delay, gTimer);
+ }
+}
+
+#if SK_SUPPORT_GPU
+
+bool SkOSWindow::attachGL(int msaaSampleCount, bool deepColor, AttachmentInfo* info) {
+ HDC dc = GetDC((HWND)fHWND);
+ if (NULL == fHGLRC) {
+ fHGLRC = SkCreateWGLContext(dc, msaaSampleCount, deepColor,
+ kGLPreferCompatibilityProfile_SkWGLContextRequest);
+ if (NULL == fHGLRC) {
+ return false;
+ }
+ glClearStencil(0);
+ glClearColor(0, 0, 0, 0);
+ glStencilMask(0xffffffff);
+ glClear(GL_STENCIL_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
+ }
+ if (wglMakeCurrent(dc, (HGLRC)fHGLRC)) {
+ // use DescribePixelFormat to get the stencil and color bit depth.
+ int pixelFormat = GetPixelFormat(dc);
+ PIXELFORMATDESCRIPTOR pfd;
+ DescribePixelFormat(dc, pixelFormat, sizeof(pfd), &pfd);
+ info->fStencilBits = pfd.cStencilBits;
+ // pfd.cColorBits includes alpha, so it will be 32 in 8/8/8/8 and 10/10/10/2
+ info->fColorBits = pfd.cRedBits + pfd.cGreenBits + pfd.cBlueBits;
+
+ // Get sample count if the MSAA WGL extension is present
+ SkWGLExtensions extensions;
+ if (extensions.hasExtension(dc, "WGL_ARB_multisample")) {
+ static const int kSampleCountAttr = SK_WGL_SAMPLES;
+ extensions.getPixelFormatAttribiv(dc,
+ pixelFormat,
+ 0,
+ 1,
+ &kSampleCountAttr,
+ &info->fSampleCount);
+ } else {
+ info->fSampleCount = 0;
+ }
+
+ glViewport(0, 0,
+ SkScalarRoundToInt(this->width()),
+ SkScalarRoundToInt(this->height()));
+ return true;
+ }
+ return false;
+}
+
+void SkOSWindow::detachGL() {
+ wglMakeCurrent(GetDC((HWND)fHWND), 0);
+ wglDeleteContext((HGLRC)fHGLRC);
+ fHGLRC = NULL;
+}
+
+void SkOSWindow::presentGL() {
+ HDC dc = GetDC((HWND)fHWND);
+ SwapBuffers(dc);
+ ReleaseDC((HWND)fHWND, dc);
+}
+
+#if SK_ANGLE
+
+static void* get_angle_egl_display(void* nativeDisplay) {
+ PFNEGLGETPLATFORMDISPLAYEXTPROC eglGetPlatformDisplayEXT;
+ eglGetPlatformDisplayEXT =
+ (PFNEGLGETPLATFORMDISPLAYEXTPROC)eglGetProcAddress("eglGetPlatformDisplayEXT");
+
+ // We expect ANGLE to support this extension
+ if (!eglGetPlatformDisplayEXT) {
+ return EGL_NO_DISPLAY;
+ }
+
+ EGLDisplay display = EGL_NO_DISPLAY;
+ // Try for an ANGLE D3D11 context, fall back to D3D9, and finally GL.
+ EGLint attribs[3][3] = {
+ {
+ EGL_PLATFORM_ANGLE_TYPE_ANGLE,
+ EGL_PLATFORM_ANGLE_TYPE_D3D11_ANGLE,
+ EGL_NONE
+ },
+ {
+ EGL_PLATFORM_ANGLE_TYPE_ANGLE,
+ EGL_PLATFORM_ANGLE_TYPE_D3D9_ANGLE,
+ EGL_NONE
+ },
+ };
+ for (int i = 0; i < 3 && display == EGL_NO_DISPLAY; ++i) {
+ display = eglGetPlatformDisplayEXT(EGL_PLATFORM_ANGLE_ANGLE,nativeDisplay, attribs[i]);
+ }
+ return display;
+}
+
+struct ANGLEAssembleContext {
+ ANGLEAssembleContext() {
+ fEGL = GetModuleHandle("libEGL.dll");
+ fGL = GetModuleHandle("libGLESv2.dll");
+ }
+
+ bool isValid() const { return SkToBool(fEGL) && SkToBool(fGL); }
+
+ HMODULE fEGL;
+ HMODULE fGL;
+};
+
+static GrGLFuncPtr angle_get_gl_proc(void* ctx, const char name[]) {
+ const ANGLEAssembleContext& context = *reinterpret_cast<const ANGLEAssembleContext*>(ctx);
+ GrGLFuncPtr proc = (GrGLFuncPtr) GetProcAddress(context.fGL, name);
+ if (proc) {
+ return proc;
+ }
+ proc = (GrGLFuncPtr) GetProcAddress(context.fEGL, name);
+ if (proc) {
+ return proc;
+ }
+ return eglGetProcAddress(name);
+}
+
+static const GrGLInterface* get_angle_gl_interface() {
+ ANGLEAssembleContext context;
+ if (!context.isValid()) {
+ return nullptr;
+ }
+ return GrGLAssembleGLESInterface(&context, angle_get_gl_proc);
+}
+
+bool create_ANGLE(EGLNativeWindowType hWnd,
+ int msaaSampleCount,
+ EGLDisplay* eglDisplay,
+ EGLContext* eglContext,
+ EGLSurface* eglSurface,
+ EGLConfig* eglConfig) {
+ static const EGLint contextAttribs[] = {
+ EGL_CONTEXT_CLIENT_VERSION, 2,
+ EGL_NONE, EGL_NONE
+ };
+ static const EGLint configAttribList[] = {
+ EGL_RED_SIZE, 8,
+ EGL_GREEN_SIZE, 8,
+ EGL_BLUE_SIZE, 8,
+ EGL_ALPHA_SIZE, 8,
+ EGL_DEPTH_SIZE, 8,
+ EGL_STENCIL_SIZE, 8,
+ EGL_NONE
+ };
+ static const EGLint surfaceAttribList[] = {
+ EGL_NONE, EGL_NONE
+ };
+
+ EGLDisplay display = get_angle_egl_display(GetDC(hWnd));
+
+ if (EGL_NO_DISPLAY == display) {
+ SkDebugf("Could not create ANGLE egl display!\n");
+ return false;
+ }
+
+ // Initialize EGL
+ EGLint majorVersion, minorVersion;
+ if (!eglInitialize(display, &majorVersion, &minorVersion)) {
+ return false;
+ }
+
+ EGLint numConfigs;
+ if (!eglGetConfigs(display, NULL, 0, &numConfigs)) {
+ return false;
+ }
+
+ // Choose config
+ bool foundConfig = false;
+ if (msaaSampleCount) {
+ static const int kConfigAttribListCnt =
+ SK_ARRAY_COUNT(configAttribList);
+ EGLint msaaConfigAttribList[kConfigAttribListCnt + 4];
+ memcpy(msaaConfigAttribList,
+ configAttribList,
+ sizeof(configAttribList));
+ SkASSERT(EGL_NONE == msaaConfigAttribList[kConfigAttribListCnt - 1]);
+ msaaConfigAttribList[kConfigAttribListCnt - 1] = EGL_SAMPLE_BUFFERS;
+ msaaConfigAttribList[kConfigAttribListCnt + 0] = 1;
+ msaaConfigAttribList[kConfigAttribListCnt + 1] = EGL_SAMPLES;
+ msaaConfigAttribList[kConfigAttribListCnt + 2] = msaaSampleCount;
+ msaaConfigAttribList[kConfigAttribListCnt + 3] = EGL_NONE;
+ if (eglChooseConfig(display, msaaConfigAttribList, eglConfig, 1, &numConfigs)) {
+ SkASSERT(numConfigs > 0);
+ foundConfig = true;
+ }
+ }
+ if (!foundConfig) {
+ if (!eglChooseConfig(display, configAttribList, eglConfig, 1, &numConfigs)) {
+ return false;
+ }
+ }
+
+ // Create a surface
+ EGLSurface surface = eglCreateWindowSurface(display, *eglConfig,
+ (EGLNativeWindowType)hWnd,
+ surfaceAttribList);
+ if (surface == EGL_NO_SURFACE) {
+ return false;
+ }
+
+ // Create a GL context
+ EGLContext context = eglCreateContext(display, *eglConfig,
+ EGL_NO_CONTEXT,
+ contextAttribs );
+ if (context == EGL_NO_CONTEXT ) {
+ return false;
+ }
+
+ // Make the context current
+ if (!eglMakeCurrent(display, surface, surface, context)) {
+ return false;
+ }
+
+ *eglDisplay = display;
+ *eglContext = context;
+ *eglSurface = surface;
+ return true;
+}
+
+bool SkOSWindow::attachANGLE(int msaaSampleCount, AttachmentInfo* info) {
+ if (EGL_NO_DISPLAY == fDisplay) {
+ bool bResult = create_ANGLE((HWND)fHWND,
+ msaaSampleCount,
+ &fDisplay,
+ &fContext,
+ &fSurface,
+ &fConfig);
+ if (false == bResult) {
+ return false;
+ }
+ fANGLEInterface.reset(get_angle_gl_interface());
+ if (!fANGLEInterface) {
+ this->detachANGLE();
+ return false;
+ }
+ GL_CALL(fANGLEInterface, ClearStencil(0));
+ GL_CALL(fANGLEInterface, ClearColor(0, 0, 0, 0));
+ GL_CALL(fANGLEInterface, StencilMask(0xffffffff));
+ GL_CALL(fANGLEInterface, Clear(GL_STENCIL_BUFFER_BIT | GL_COLOR_BUFFER_BIT));
+ }
+ if (!eglMakeCurrent(fDisplay, fSurface, fSurface, fContext)) {
+ this->detachANGLE();
+ return false;
+ }
+ eglGetConfigAttrib(fDisplay, fConfig, EGL_STENCIL_SIZE, &info->fStencilBits);
+ eglGetConfigAttrib(fDisplay, fConfig, EGL_SAMPLES, &info->fSampleCount);
+
+ GL_CALL(fANGLEInterface, Viewport(0, 0, SkScalarRoundToInt(this->width()),
+ SkScalarRoundToInt(this->height())));
+ return true;
+}
+
+void SkOSWindow::detachANGLE() {
+ fANGLEInterface.reset(nullptr);
+ eglMakeCurrent(fDisplay, EGL_NO_SURFACE , EGL_NO_SURFACE , EGL_NO_CONTEXT);
+
+ eglDestroyContext(fDisplay, fContext);
+ fContext = EGL_NO_CONTEXT;
+
+ eglDestroySurface(fDisplay, fSurface);
+ fSurface = EGL_NO_SURFACE;
+
+ eglTerminate(fDisplay);
+ fDisplay = EGL_NO_DISPLAY;
+}
+
+void SkOSWindow::presentANGLE() {
+ GL_CALL(fANGLEInterface, Flush());
+
+ eglSwapBuffers(fDisplay, fSurface);
+}
+#endif // SK_ANGLE
+
+#endif // SK_SUPPORT_GPU
+
+// return true on success
+bool SkOSWindow::attach(SkBackEndTypes attachType, int msaaSampleCount, bool deepColor,
+ AttachmentInfo* info) {
+
+ // attach doubles as "windowResize" so we need to allo
+ // already bound states to pass through again
+ // TODO: split out the resize functionality
+// SkASSERT(kNone_BackEndType == fAttached);
+ bool result = true;
+
+ switch (attachType) {
+ case kNone_BackEndType:
+ // nothing to do
+ break;
+#if SK_SUPPORT_GPU
+ case kNativeGL_BackEndType:
+ result = attachGL(msaaSampleCount, deepColor, info);
+ break;
+#if SK_ANGLE
+ case kANGLE_BackEndType:
+ result = attachANGLE(msaaSampleCount, info);
+ break;
+#endif // SK_ANGLE
+#endif // SK_SUPPORT_GPU
+ default:
+ SkASSERT(false);
+ result = false;
+ break;
+ }
+
+ if (result) {
+ fAttached = attachType;
+ }
+
+ return result;
+}
+
+void SkOSWindow::release() {
+ switch (fAttached) {
+ case kNone_BackEndType:
+ // nothing to do
+ break;
+#if SK_SUPPORT_GPU
+ case kNativeGL_BackEndType:
+ detachGL();
+ break;
+#if SK_ANGLE
+ case kANGLE_BackEndType:
+ detachANGLE();
+ break;
+#endif // SK_ANGLE
+#endif // SK_SUPPORT_GPU
+ default:
+ SkASSERT(false);
+ break;
+ }
+ fAttached = kNone_BackEndType;
+}
+
+void SkOSWindow::present() {
+ switch (fAttached) {
+ case kNone_BackEndType:
+ // nothing to do
+ return;
+#if SK_SUPPORT_GPU
+ case kNativeGL_BackEndType:
+ presentGL();
+ break;
+#if SK_ANGLE
+ case kANGLE_BackEndType:
+ presentANGLE();
+ break;
+#endif // SK_ANGLE
+#endif // SK_SUPPORT_GPU
+ default:
+ SkASSERT(false);
+ break;
+ }
+}
+
+bool SkOSWindow::makeFullscreen() {
+ if (fFullscreen) {
+ return true;
+ }
+#if SK_SUPPORT_GPU
+ if (fHGLRC) {
+ this->detachGL();
+ }
+#endif // SK_SUPPORT_GPU
+ // This is hacked together from various sources on the web. It can certainly be improved and be
+ // made more robust.
+
+ // Save current window/resolution information. We do this in case we ever implement switching
+ // back to windowed mode.
+ fSavedWindowState.fZoomed = SkToBool(IsZoomed((HWND)fHWND));
+ if (fSavedWindowState.fZoomed) {
+ SendMessage((HWND)fHWND, WM_SYSCOMMAND, SC_RESTORE, 0);
+ }
+ fSavedWindowState.fStyle = GetWindowLong((HWND)fHWND, GWL_STYLE);
+ fSavedWindowState.fExStyle = GetWindowLong((HWND)fHWND, GWL_EXSTYLE);
+ GetWindowRect((HWND)fHWND, &fSavedWindowState.fRect);
+ DEVMODE currScreenSettings;
+ memset(&currScreenSettings,0,sizeof(currScreenSettings));
+ currScreenSettings.dmSize = sizeof(currScreenSettings);
+ EnumDisplaySettings(NULL, ENUM_CURRENT_SETTINGS, &currScreenSettings);
+ fSavedWindowState.fScreenWidth = currScreenSettings.dmPelsWidth;
+ fSavedWindowState.fScreenHeight = currScreenSettings.dmPelsHeight;
+ fSavedWindowState.fScreenBits = currScreenSettings.dmBitsPerPel;
+ fSavedWindowState.fHWND = fHWND;
+
+ // Try different sizes to find an allowed setting? Use ChangeDisplaySettingsEx?
+ static const int kWidth = 1280;
+ static const int kHeight = 1024;
+ DEVMODE newScreenSettings;
+ memset(&newScreenSettings, 0, sizeof(newScreenSettings));
+ newScreenSettings.dmSize = sizeof(newScreenSettings);
+ newScreenSettings.dmPelsWidth = kWidth;
+ newScreenSettings.dmPelsHeight = kHeight;
+ newScreenSettings.dmBitsPerPel = 32;
+ newScreenSettings.dmFields = DM_BITSPERPEL|DM_PELSWIDTH|DM_PELSHEIGHT;
+ if (ChangeDisplaySettings(&newScreenSettings, CDS_FULLSCREEN) != DISP_CHANGE_SUCCESSFUL) {
+ return false;
+ }
+ RECT WindowRect;
+ WindowRect.left = 0;
+ WindowRect.right = kWidth;
+ WindowRect.top = 0;
+ WindowRect.bottom = kHeight;
+ ShowCursor(FALSE);
+ AdjustWindowRectEx(&WindowRect, WS_POPUP, FALSE, WS_EX_APPWINDOW);
+ HWND fsHWND = CreateWindowEx(
+ WS_EX_APPWINDOW,
+ fWinInit.fClass,
+ NULL,
+ WS_CLIPSIBLINGS | WS_CLIPCHILDREN | WS_POPUP,
+ 0, 0, WindowRect.right-WindowRect.left, WindowRect.bottom-WindowRect.top,
+ NULL,
+ NULL,
+ fWinInit.fInstance,
+ NULL
+ );
+ if (!fsHWND) {
+ return false;
+ }
+ // Hide the old window and set the entry in the global mapping for this SkOSWindow to the
+ // new HWND.
+ ShowWindow((HWND)fHWND, SW_HIDE);
+ gHwndToOSWindowMap.remove(fHWND);
+ fHWND = fsHWND;
+ gHwndToOSWindowMap.set(fHWND, this);
+ this->updateSize();
+
+ fFullscreen = true;
+ return true;
+}
+
+void SkOSWindow::setVsync(bool enable) {
+ SkWGLExtensions wgl;
+ wgl.swapInterval(enable ? 1 : 0);
+}
+
+void SkOSWindow::closeWindow() {
+ DestroyWindow((HWND)fHWND);
+ if (fFullscreen) {
+ DestroyWindow((HWND)fSavedWindowState.fHWND);
+ }
+ gHwndToOSWindowMap.remove(fHWND);
+}
+#endif
diff --git a/gfx/skia/skia/src/views/win/skia_win.cpp b/gfx/skia/skia/src/views/win/skia_win.cpp
new file mode 100644
index 000000000..df600d771
--- /dev/null
+++ b/gfx/skia/skia/src/views/win/skia_win.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+
+#include <tchar.h>
+
+#include "SkApplication.h"
+#include "SkGraphics.h"
+#include "SkOSWindow_Win.h"
+
+LRESULT CALLBACK WndProc(HWND, UINT, WPARAM, LPARAM);
+
+// Returns the main window Win32 class name.
+static const TCHAR* register_class(HINSTANCE hInstance) {
+ WNDCLASSEX wcex;
+ // The main window class name
+ static const TCHAR gSZWindowClass[] = _T("SkiaApp");
+
+ wcex.cbSize = sizeof(WNDCLASSEX);
+
+ wcex.style = CS_HREDRAW | CS_VREDRAW | CS_OWNDC;
+ wcex.lpfnWndProc = WndProc;
+ wcex.cbClsExtra = 0;
+ wcex.cbWndExtra = 0;
+ wcex.hInstance = hInstance;
+ wcex.hIcon = NULL;
+ wcex.hCursor = NULL;
+ wcex.hbrBackground = (HBRUSH)(COLOR_WINDOW+1);
+ wcex.lpszMenuName = NULL;
+ wcex.lpszClassName = gSZWindowClass;
+ wcex.hIconSm = NULL;
+
+ RegisterClassEx(&wcex);
+
+ return gSZWindowClass;
+}
+
+static char* tchar_to_utf8(const TCHAR* str) {
+#ifdef _UNICODE
+ int size = WideCharToMultiByte(CP_UTF8, 0, str, wcslen(str), NULL, 0, NULL, NULL);
+ char* str8 = (char*) sk_malloc_throw(size+1);
+ WideCharToMultiByte(CP_UTF8, 0, str, wcslen(str), str8, size, NULL, NULL);
+ str8[size] = '\0';
+ return str8;
+#else
+ return _strdup(str);
+#endif
+}
+
+// This file can work with GUI or CONSOLE subsystem types since we define _tWinMain and main().
+
+static int main_common(HINSTANCE hInstance, int show, int argc, char**argv);
+
+int APIENTRY _tWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine,
+ int nCmdShow) {
+
+ // convert from lpCmdLine to argc, argv.
+ char* argv[4096];
+ int argc = 0;
+ TCHAR exename[1024], *next;
+ int exenameLen = GetModuleFileName(NULL, exename, SK_ARRAY_COUNT(exename));
+ // we're ignoring the possibility that the exe name exceeds the exename buffer
+ (void) exenameLen;
+ argv[argc++] = tchar_to_utf8(exename);
+ TCHAR* arg = _tcstok_s(lpCmdLine, _T(" "), &next);
+ while (arg != NULL) {
+ argv[argc++] = tchar_to_utf8(arg);
+ arg = _tcstok_s(NULL, _T(" "), &next);
+ }
+ int result = main_common(hInstance, nCmdShow, argc, argv);
+ for (int i = 0; i < argc; ++i) {
+ sk_free(argv[i]);
+ }
+ return result;
+}
+
+int main(int argc, char**argv) {
+ SkGraphics::Init();
+ return main_common(GetModuleHandle(NULL), SW_SHOW, argc, argv);
+}
+
+static int main_common(HINSTANCE hInstance, int show, int argc, char**argv) {
+ const TCHAR* windowClass = register_class(hInstance);
+
+ application_init();
+
+ SkOSWindow::WindowInit winInit;
+ winInit.fInstance = hInstance;
+ winInit.fClass = windowClass;
+
+ create_sk_window(&winInit, argc, argv);
+ SkOSWindow::ForAllWindows([show](void* hWnd, SkOSWindow**) {
+ ShowWindow((HWND)hWnd, show);
+ UpdateWindow((HWND)hWnd); }
+ );
+
+ MSG msg;
+ // Main message loop
+ while (GetMessage(&msg, NULL, 0, 0)) {
+ if (true) {
+ TranslateMessage(&msg);
+ DispatchMessage(&msg);
+ }
+ }
+
+ application_term();
+
+ return (int) msg.wParam;
+}
+
+extern SkOSWindow* create_sk_window(void* hwnd, int argc, char** argv);
+
+LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) {
+ switch (message) {
+ case WM_COMMAND:
+ return DefWindowProc(hWnd, message, wParam, lParam);
+ case WM_DESTROY:
+ PostQuitMessage(0);
+ break;
+ default: {
+ SkOSWindow* window = SkOSWindow::GetOSWindowForHWND(hWnd);
+ if (window && window->wndProc(hWnd, message, wParam, lParam)) {
+ return 0;
+ } else {
+ return DefWindowProc(hWnd, message, wParam, lParam);
+ }
+ }
+ }
+ return 0;
+}
diff --git a/gfx/skia/skia/src/xml/SkDOM.cpp b/gfx/skia/skia/src/xml/SkDOM.cpp
new file mode 100644
index 000000000..38ba669bb
--- /dev/null
+++ b/gfx/skia/skia/src/xml/SkDOM.cpp
@@ -0,0 +1,477 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "SkDOM.h"
+#include "SkStream.h"
+#include "SkXMLWriter.h"
+
+/////////////////////////////////////////////////////////////////////////
+
+#include "SkXMLParser.h"
+bool SkXMLParser::parse(const SkDOM& dom, const SkDOMNode* node)
+{
+ const char* elemName = dom.getName(node);
+
+ if (this->startElement(elemName))
+ return false;
+
+ SkDOM::AttrIter iter(dom, node);
+ const char* name, *value;
+
+ while ((name = iter.next(&value)) != nullptr)
+ if (this->addAttribute(name, value))
+ return false;
+
+ if ((node = dom.getFirstChild(node)) != nullptr)
+ do {
+ if (!this->parse(dom, node))
+ return false;
+ } while ((node = dom.getNextSibling(node)) != nullptr);
+
+ return !this->endElement(elemName);
+}
+
+/////////////////////////////////////////////////////////////////////////
+
+struct SkDOMAttr {
+ const char* fName;
+ const char* fValue;
+};
+
+struct SkDOMNode {
+ const char* fName;
+ SkDOMNode* fFirstChild;
+ SkDOMNode* fNextSibling;
+ uint16_t fAttrCount;
+ uint8_t fType;
+ uint8_t fPad;
+
+ const SkDOMAttr* attrs() const
+ {
+ return (const SkDOMAttr*)(this + 1);
+ }
+ SkDOMAttr* attrs()
+ {
+ return (SkDOMAttr*)(this + 1);
+ }
+};
+
+/////////////////////////////////////////////////////////////////////////
+
+#define kMinChunkSize 512
+
+SkDOM::SkDOM() : fAlloc(kMinChunkSize), fRoot(nullptr)
+{
+}
+
+SkDOM::~SkDOM()
+{
+}
+
+const SkDOM::Node* SkDOM::getRootNode() const
+{
+ return fRoot;
+}
+
+const SkDOM::Node* SkDOM::getFirstChild(const Node* node, const char name[]) const
+{
+ SkASSERT(node);
+ const Node* child = node->fFirstChild;
+
+ if (name)
+ {
+ for (; child != nullptr; child = child->fNextSibling)
+ if (!strcmp(name, child->fName))
+ break;
+ }
+ return child;
+}
+
+const SkDOM::Node* SkDOM::getNextSibling(const Node* node, const char name[]) const
+{
+ SkASSERT(node);
+ const Node* sibling = node->fNextSibling;
+ if (name)
+ {
+ for (; sibling != nullptr; sibling = sibling->fNextSibling)
+ if (!strcmp(name, sibling->fName))
+ break;
+ }
+ return sibling;
+}
+
+SkDOM::Type SkDOM::getType(const Node* node) const
+{
+ SkASSERT(node);
+ return (Type)node->fType;
+}
+
+const char* SkDOM::getName(const Node* node) const
+{
+ SkASSERT(node);
+ return node->fName;
+}
+
+const char* SkDOM::findAttr(const Node* node, const char name[]) const
+{
+ SkASSERT(node);
+ const Attr* attr = node->attrs();
+ const Attr* stop = attr + node->fAttrCount;
+
+ while (attr < stop)
+ {
+ if (!strcmp(attr->fName, name))
+ return attr->fValue;
+ attr += 1;
+ }
+ return nullptr;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+
+const SkDOM::Attr* SkDOM::getFirstAttr(const Node* node) const
+{
+ return node->fAttrCount ? node->attrs() : nullptr;
+}
+
+const SkDOM::Attr* SkDOM::getNextAttr(const Node* node, const Attr* attr) const
+{
+ SkASSERT(node);
+ if (attr == nullptr)
+ return nullptr;
+ return (attr - node->attrs() + 1) < node->fAttrCount ? attr + 1 : nullptr;
+}
+
+const char* SkDOM::getAttrName(const Node* node, const Attr* attr) const
+{
+ SkASSERT(node);
+ SkASSERT(attr);
+ return attr->fName;
+}
+
+const char* SkDOM::getAttrValue(const Node* node, const Attr* attr) const
+{
+ SkASSERT(node);
+ SkASSERT(attr);
+ return attr->fValue;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+
+SkDOM::AttrIter::AttrIter(const SkDOM&, const SkDOM::Node* node)
+{
+ SkASSERT(node);
+ fAttr = node->attrs();
+ fStop = fAttr + node->fAttrCount;
+}
+
+const char* SkDOM::AttrIter::next(const char** value)
+{
+ const char* name = nullptr;
+
+ if (fAttr < fStop)
+ {
+ name = fAttr->fName;
+ if (value)
+ *value = fAttr->fValue;
+ fAttr += 1;
+ }
+ return name;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+#include "SkXMLParser.h"
+#include "SkTDArray.h"
+
+static char* dupstr(SkChunkAlloc* chunk, const char src[])
+{
+ SkASSERT(chunk && src);
+ size_t len = strlen(src);
+ char* dst = (char*)chunk->alloc(len + 1, SkChunkAlloc::kThrow_AllocFailType);
+ memcpy(dst, src, len + 1);
+ return dst;
+}
+
+class SkDOMParser : public SkXMLParser {
+public:
+ SkDOMParser(SkChunkAlloc* chunk) : SkXMLParser(&fParserError), fAlloc(chunk)
+ {
+ fAlloc->reset();
+ fRoot = nullptr;
+ fLevel = 0;
+ fNeedToFlush = true;
+ }
+ SkDOM::Node* getRoot() const { return fRoot; }
+ SkXMLParserError fParserError;
+
+protected:
+ void flushAttributes()
+ {
+ SkASSERT(fLevel > 0);
+
+ int attrCount = fAttrs.count();
+
+ SkDOM::Node* node = (SkDOM::Node*)fAlloc->alloc(sizeof(SkDOM::Node) + attrCount * sizeof(SkDOM::Attr),
+ SkChunkAlloc::kThrow_AllocFailType);
+
+ node->fName = fElemName;
+ node->fFirstChild = nullptr;
+ node->fAttrCount = SkToU16(attrCount);
+ node->fType = fElemType;
+
+ if (fRoot == nullptr)
+ {
+ node->fNextSibling = nullptr;
+ fRoot = node;
+ }
+ else // this adds siblings in reverse order. gets corrected in onEndElement()
+ {
+ SkDOM::Node* parent = fParentStack.top();
+ SkASSERT(fRoot && parent);
+ node->fNextSibling = parent->fFirstChild;
+ parent->fFirstChild = node;
+ }
+ *fParentStack.push() = node;
+
+ sk_careful_memcpy(node->attrs(), fAttrs.begin(), attrCount * sizeof(SkDOM::Attr));
+ fAttrs.reset();
+
+ }
+
+ bool onStartElement(const char elem[]) override {
+ this->startCommon(elem, SkDOM::kElement_Type);
+ return false;
+ }
+
+ bool onAddAttribute(const char name[], const char value[]) override {
+ SkDOM::Attr* attr = fAttrs.append();
+ attr->fName = dupstr(fAlloc, name);
+ attr->fValue = dupstr(fAlloc, value);
+ return false;
+ }
+
+ bool onEndElement(const char elem[]) override {
+ --fLevel;
+ if (fNeedToFlush)
+ this->flushAttributes();
+ fNeedToFlush = false;
+
+ SkDOM::Node* parent;
+
+ fParentStack.pop(&parent);
+
+ SkDOM::Node* child = parent->fFirstChild;
+ SkDOM::Node* prev = nullptr;
+ while (child)
+ {
+ SkDOM::Node* next = child->fNextSibling;
+ child->fNextSibling = prev;
+ prev = child;
+ child = next;
+ }
+ parent->fFirstChild = prev;
+ return false;
+ }
+
+ bool onText(const char text[], int len) override {
+ SkString str(text, len);
+ this->startCommon(str.c_str(), SkDOM::kText_Type);
+ this->SkDOMParser::onEndElement(str.c_str());
+
+ return false;
+ }
+
+private:
+ void startCommon(const char elem[], SkDOM::Type type) {
+ if (fLevel > 0 && fNeedToFlush)
+ this->flushAttributes();
+
+ fNeedToFlush = true;
+ fElemName = dupstr(fAlloc, elem);
+ fElemType = type;
+ ++fLevel;
+ }
+
+ SkTDArray<SkDOM::Node*> fParentStack;
+ SkChunkAlloc* fAlloc;
+ SkDOM::Node* fRoot;
+ bool fNeedToFlush;
+
+ // state needed for flushAttributes()
+ SkTDArray<SkDOM::Attr> fAttrs;
+ char* fElemName;
+ SkDOM::Type fElemType;
+ int fLevel;
+};
+
+const SkDOM::Node* SkDOM::build(SkStream& docStream) {
+ SkDOMParser parser(&fAlloc);
+ if (!parser.parse(docStream))
+ {
+ SkDEBUGCODE(SkDebugf("xml parse error, line %d\n", parser.fParserError.getLineNumber());)
+ fRoot = nullptr;
+ fAlloc.reset();
+ return nullptr;
+ }
+ fRoot = parser.getRoot();
+ return fRoot;
+}
+
+///////////////////////////////////////////////////////////////////////////
+
+static void walk_dom(const SkDOM& dom, const SkDOM::Node* node, SkXMLParser* parser)
+{
+ const char* elem = dom.getName(node);
+ if (dom.getType(node) == SkDOM::kText_Type) {
+ SkASSERT(dom.countChildren(node) == 0);
+ parser->text(elem, SkToInt(strlen(elem)));
+ return;
+ }
+
+ parser->startElement(elem);
+
+ SkDOM::AttrIter iter(dom, node);
+ const char* name;
+ const char* value;
+ while ((name = iter.next(&value)) != nullptr)
+ parser->addAttribute(name, value);
+
+ node = dom.getFirstChild(node, nullptr);
+ while (node)
+ {
+ walk_dom(dom, node, parser);
+ node = dom.getNextSibling(node, nullptr);
+ }
+
+ parser->endElement(elem);
+}
+
+const SkDOM::Node* SkDOM::copy(const SkDOM& dom, const SkDOM::Node* node)
+{
+ SkDOMParser parser(&fAlloc);
+
+ walk_dom(dom, node, &parser);
+
+ fRoot = parser.getRoot();
+ return fRoot;
+}
+
+SkXMLParser* SkDOM::beginParsing() {
+ SkASSERT(!fParser);
+ fParser.reset(new SkDOMParser(&fAlloc));
+
+ return fParser.get();
+}
+
+const SkDOM::Node* SkDOM::finishParsing() {
+ SkASSERT(fParser);
+ fRoot = fParser->getRoot();
+ fParser.reset();
+
+ return fRoot;
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+int SkDOM::countChildren(const Node* node, const char elem[]) const
+{
+ int count = 0;
+
+ node = this->getFirstChild(node, elem);
+ while (node)
+ {
+ count += 1;
+ node = this->getNextSibling(node, elem);
+ }
+ return count;
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+#include "SkParse.h"
+
+bool SkDOM::findS32(const Node* node, const char name[], int32_t* value) const
+{
+ const char* vstr = this->findAttr(node, name);
+ return vstr && SkParse::FindS32(vstr, value);
+}
+
+bool SkDOM::findScalars(const Node* node, const char name[], SkScalar value[], int count) const
+{
+ const char* vstr = this->findAttr(node, name);
+ return vstr && SkParse::FindScalars(vstr, value, count);
+}
+
+bool SkDOM::findHex(const Node* node, const char name[], uint32_t* value) const
+{
+ const char* vstr = this->findAttr(node, name);
+ return vstr && SkParse::FindHex(vstr, value);
+}
+
+bool SkDOM::findBool(const Node* node, const char name[], bool* value) const
+{
+ const char* vstr = this->findAttr(node, name);
+ return vstr && SkParse::FindBool(vstr, value);
+}
+
+int SkDOM::findList(const Node* node, const char name[], const char list[]) const
+{
+ const char* vstr = this->findAttr(node, name);
+ return vstr ? SkParse::FindList(vstr, list) : -1;
+}
+
+bool SkDOM::hasAttr(const Node* node, const char name[], const char value[]) const
+{
+ const char* vstr = this->findAttr(node, name);
+ return vstr && !strcmp(vstr, value);
+}
+
+bool SkDOM::hasS32(const Node* node, const char name[], int32_t target) const
+{
+ const char* vstr = this->findAttr(node, name);
+ int32_t value;
+ return vstr && SkParse::FindS32(vstr, &value) && value == target;
+}
+
+bool SkDOM::hasScalar(const Node* node, const char name[], SkScalar target) const
+{
+ const char* vstr = this->findAttr(node, name);
+ SkScalar value;
+ return vstr && SkParse::FindScalar(vstr, &value) && value == target;
+}
+
+bool SkDOM::hasHex(const Node* node, const char name[], uint32_t target) const
+{
+ const char* vstr = this->findAttr(node, name);
+ uint32_t value;
+ return vstr && SkParse::FindHex(vstr, &value) && value == target;
+}
+
+bool SkDOM::hasBool(const Node* node, const char name[], bool target) const
+{
+ const char* vstr = this->findAttr(node, name);
+ bool value;
+ return vstr && SkParse::FindBool(vstr, &value) && value == target;
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+
+void SkDOM::dump(const Node* node, int level) const
+{
+ if (node == nullptr)
+ node = this->getRootNode();
+
+ SkDebugWStream debugStream;
+ SkXMLStreamWriter xmlWriter(&debugStream);
+ xmlWriter.writeDOM(*this, node, false);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/xml/SkXMLParser.cpp b/gfx/skia/skia/src/xml/SkXMLParser.cpp
new file mode 100644
index 000000000..23c4e672b
--- /dev/null
+++ b/gfx/skia/skia/src/xml/SkXMLParser.cpp
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "expat.h"
+
+#include "SkStream.h"
+#include "SkString.h"
+#include "SkTypes.h"
+#include "SkXMLParser.h"
+
+static char const* const gErrorStrings[] = {
+ "empty or missing file ",
+ "unknown element ",
+ "unknown attribute name ",
+ "error in attribute value ",
+ "duplicate ID ",
+ "unknown error "
+};
+
+SkXMLParserError::SkXMLParserError() : fCode(kNoError), fLineNumber(-1),
+ fNativeCode(-1)
+{
+ reset();
+}
+
+SkXMLParserError::~SkXMLParserError()
+{
+ // need a virtual destructor for our subclasses
+}
+
+void SkXMLParserError::getErrorString(SkString* str) const
+{
+ SkASSERT(str);
+ SkString temp;
+ if (fCode != kNoError) {
+ if ((unsigned)fCode < SK_ARRAY_COUNT(gErrorStrings))
+ temp.set(gErrorStrings[fCode - 1]);
+ temp.append(fNoun);
+ } else
+ SkXMLParser::GetNativeErrorString(fNativeCode, &temp);
+ str->append(temp);
+}
+
+void SkXMLParserError::reset() {
+ fCode = kNoError;
+ fLineNumber = -1;
+ fNativeCode = -1;
+}
+
+////////////////
+
+namespace {
+
+const XML_Memory_Handling_Suite sk_XML_alloc = {
+ sk_malloc_throw,
+ sk_realloc_throw,
+ sk_free
+};
+
+struct ParsingContext {
+ ParsingContext(SkXMLParser* parser)
+ : fParser(parser)
+ , fXMLParser(XML_ParserCreate_MM(nullptr, &sk_XML_alloc, nullptr)) { }
+
+ void flushText() {
+ if (!fBufferedText.isEmpty()) {
+ fParser->text(fBufferedText.c_str(), SkTo<int>(fBufferedText.size()));
+ fBufferedText.reset();
+ }
+ }
+
+ void appendText(const char* txt, size_t len) {
+ fBufferedText.append(txt, len);
+ }
+
+ SkXMLParser* fParser;
+ SkAutoTCallVProc<skstd::remove_pointer_t<XML_Parser>, XML_ParserFree> fXMLParser;
+
+private:
+ SkString fBufferedText;
+};
+
+#define HANDLER_CONTEXT(arg, name) ParsingContext* name = static_cast<ParsingContext*>(arg);
+
+void XMLCALL start_element_handler(void *data, const char* tag, const char** attributes) {
+ HANDLER_CONTEXT(data, ctx);
+ ctx->flushText();
+
+ ctx->fParser->startElement(tag);
+
+ for (size_t i = 0; attributes[i]; i += 2) {
+ ctx->fParser->addAttribute(attributes[i], attributes[i + 1]);
+ }
+}
+
+void XMLCALL end_element_handler(void* data, const char* tag) {
+ HANDLER_CONTEXT(data, ctx);
+ ctx->flushText();
+
+ ctx->fParser->endElement(tag);
+}
+
+void XMLCALL text_handler(void *data, const char* txt, int len) {
+ HANDLER_CONTEXT(data, ctx);
+
+ ctx->appendText(txt, SkTo<size_t>(len));
+}
+
+void XMLCALL entity_decl_handler(void *data,
+ const XML_Char *entityName,
+ int is_parameter_entity,
+ const XML_Char *value,
+ int value_length,
+ const XML_Char *base,
+ const XML_Char *systemId,
+ const XML_Char *publicId,
+ const XML_Char *notationName) {
+ HANDLER_CONTEXT(data, ctx);
+
+ SkDebugf("'%s' entity declaration found, stopping processing", entityName);
+ XML_StopParser(ctx->fXMLParser, XML_FALSE);
+}
+
+} // anonymous namespace
+
+SkXMLParser::SkXMLParser(SkXMLParserError* parserError) : fParser(nullptr), fError(parserError)
+{
+}
+
+SkXMLParser::~SkXMLParser()
+{
+}
+
+bool SkXMLParser::parse(SkStream& docStream)
+{
+ ParsingContext ctx(this);
+ if (!ctx.fXMLParser) {
+ SkDebugf("could not create XML parser\n");
+ return false;
+ }
+
+ XML_SetUserData(ctx.fXMLParser, &ctx);
+ XML_SetElementHandler(ctx.fXMLParser, start_element_handler, end_element_handler);
+ XML_SetCharacterDataHandler(ctx.fXMLParser, text_handler);
+
+ // Disable entity processing, to inhibit internal entity expansion. See expat CVE-2013-0340.
+ XML_SetEntityDeclHandler(ctx.fXMLParser, entity_decl_handler);
+
+ static const int kBufferSize = 512 SkDEBUGCODE( - 507);
+ bool done = false;
+ do {
+ void* buffer = XML_GetBuffer(ctx.fXMLParser, kBufferSize);
+ if (!buffer) {
+ SkDebugf("could not buffer enough to continue\n");
+ return false;
+ }
+
+ size_t len = docStream.read(buffer, kBufferSize);
+ done = docStream.isAtEnd();
+ XML_Status status = XML_ParseBuffer(ctx.fXMLParser, SkToS32(len), done);
+ if (XML_STATUS_ERROR == status) {
+ XML_Error error = XML_GetErrorCode(ctx.fXMLParser);
+ int line = XML_GetCurrentLineNumber(ctx.fXMLParser);
+ int column = XML_GetCurrentColumnNumber(ctx.fXMLParser);
+ const XML_LChar* errorString = XML_ErrorString(error);
+ SkDebugf("parse error @%d:%d: %d (%s).\n", line, column, error, errorString);
+ return false;
+ }
+ } while (!done);
+
+ return true;
+}
+
+bool SkXMLParser::parse(const char doc[], size_t len)
+{
+ SkMemoryStream docStream(doc, len);
+ return this->parse(docStream);
+}
+
+void SkXMLParser::GetNativeErrorString(int error, SkString* str)
+{
+
+}
+
+bool SkXMLParser::startElement(const char elem[])
+{
+ return this->onStartElement(elem);
+}
+
+bool SkXMLParser::addAttribute(const char name[], const char value[])
+{
+ return this->onAddAttribute(name, value);
+}
+
+bool SkXMLParser::endElement(const char elem[])
+{
+ return this->onEndElement(elem);
+}
+
+bool SkXMLParser::text(const char text[], int len)
+{
+ return this->onText(text, len);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool SkXMLParser::onStartElement(const char elem[]) {return false; }
+bool SkXMLParser::onAddAttribute(const char name[], const char value[]) {return false; }
+bool SkXMLParser::onEndElement(const char elem[]) { return false; }
+bool SkXMLParser::onText(const char text[], int len) {return false; }
diff --git a/gfx/skia/skia/src/xml/SkXMLWriter.cpp b/gfx/skia/skia/src/xml/SkXMLWriter.cpp
new file mode 100644
index 000000000..5ee237ff6
--- /dev/null
+++ b/gfx/skia/skia/src/xml/SkXMLWriter.cpp
@@ -0,0 +1,361 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkXMLWriter.h"
+#include "SkStream.h"
+
+SkXMLWriter::SkXMLWriter(bool doEscapeMarkup) : fDoEscapeMarkup(doEscapeMarkup)
+{
+}
+
+SkXMLWriter::~SkXMLWriter()
+{
+ SkASSERT(fElems.count() == 0);
+}
+
+void SkXMLWriter::flush()
+{
+ while (fElems.count())
+ this->endElement();
+}
+
+void SkXMLWriter::addAttribute(const char name[], const char value[])
+{
+ this->addAttributeLen(name, value, strlen(value));
+}
+
+void SkXMLWriter::addS32Attribute(const char name[], int32_t value)
+{
+ SkString tmp;
+ tmp.appendS32(value);
+ this->addAttribute(name, tmp.c_str());
+}
+
+void SkXMLWriter::addHexAttribute(const char name[], uint32_t value, int minDigits)
+{
+ SkString tmp("0x");
+ tmp.appendHex(value, minDigits);
+ this->addAttribute(name, tmp.c_str());
+}
+
+void SkXMLWriter::addScalarAttribute(const char name[], SkScalar value)
+{
+ SkString tmp;
+ tmp.appendScalar(value);
+ this->addAttribute(name, tmp.c_str());
+}
+
+void SkXMLWriter::addText(const char text[], size_t length) {
+ if (fElems.isEmpty()) {
+ return;
+ }
+
+ this->onAddText(text, length);
+
+ fElems.top()->fHasText = true;
+}
+
+void SkXMLWriter::doEnd(Elem* elem)
+{
+ delete elem;
+}
+
+bool SkXMLWriter::doStart(const char name[], size_t length)
+{
+ int level = fElems.count();
+ bool firstChild = level > 0 && !fElems[level-1]->fHasChildren;
+ if (firstChild)
+ fElems[level-1]->fHasChildren = true;
+ Elem** elem = fElems.push();
+ *elem = new Elem(name, length);
+ return firstChild;
+}
+
+SkXMLWriter::Elem* SkXMLWriter::getEnd()
+{
+ Elem* elem;
+ fElems.pop(&elem);
+ return elem;
+}
+
+const char* SkXMLWriter::getHeader()
+{
+ static const char gHeader[] = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>";
+ return gHeader;
+}
+
+void SkXMLWriter::startElement(const char name[])
+{
+ this->startElementLen(name, strlen(name));
+}
+
+static const char* escape_char(char c, char storage[2])
+{
+ static const char* gEscapeChars[] = {
+ "<&lt;",
+ ">&gt;",
+ //"\"&quot;",
+ //"'&apos;",
+ "&&amp;"
+ };
+
+ const char** array = gEscapeChars;
+ for (unsigned i = 0; i < SK_ARRAY_COUNT(gEscapeChars); i++)
+ {
+ if (array[i][0] == c)
+ return &array[i][1];
+ }
+ storage[0] = c;
+ storage[1] = 0;
+ return storage;
+}
+
+static size_t escape_markup(char dst[], const char src[], size_t length)
+{
+ size_t extra = 0;
+ const char* stop = src + length;
+
+ while (src < stop)
+ {
+ char orig[2];
+ const char* seq = escape_char(*src, orig);
+ size_t seqSize = strlen(seq);
+
+ if (dst)
+ {
+ memcpy(dst, seq, seqSize);
+ dst += seqSize;
+ }
+
+ // now record the extra size needed
+ extra += seqSize - 1; // minus one to subtract the original char
+
+ // bump to the next src char
+ src += 1;
+ }
+ return extra;
+}
+
+void SkXMLWriter::addAttributeLen(const char name[], const char value[], size_t length)
+{
+ SkString valueStr;
+
+ if (fDoEscapeMarkup)
+ {
+ size_t extra = escape_markup(nullptr, value, length);
+ if (extra)
+ {
+ valueStr.resize(length + extra);
+ (void)escape_markup(valueStr.writable_str(), value, length);
+ value = valueStr.c_str();
+ length += extra;
+ }
+ }
+ this->onAddAttributeLen(name, value, length);
+}
+
+void SkXMLWriter::startElementLen(const char elem[], size_t length)
+{
+ this->onStartElementLen(elem, length);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+
+static void write_dom(const SkDOM& dom, const SkDOM::Node* node, SkXMLWriter* w, bool skipRoot)
+{
+ if (!skipRoot)
+ {
+ const char* elem = dom.getName(node);
+ if (dom.getType(node) == SkDOM::kText_Type) {
+ SkASSERT(dom.countChildren(node) == 0);
+ w->addText(elem, strlen(elem));
+ return;
+ }
+
+ w->startElement(elem);
+
+ SkDOM::AttrIter iter(dom, node);
+ const char* name;
+ const char* value;
+ while ((name = iter.next(&value)) != nullptr)
+ w->addAttribute(name, value);
+ }
+
+ node = dom.getFirstChild(node, nullptr);
+ while (node)
+ {
+ write_dom(dom, node, w, false);
+ node = dom.getNextSibling(node, nullptr);
+ }
+
+ if (!skipRoot)
+ w->endElement();
+}
+
+void SkXMLWriter::writeDOM(const SkDOM& dom, const SkDOM::Node* node, bool skipRoot)
+{
+ if (node)
+ write_dom(dom, node, this, skipRoot);
+}
+
+void SkXMLWriter::writeHeader()
+{
+}
+
+// SkXMLStreamWriter
+
+static void tab(SkWStream& stream, int level)
+{
+ for (int i = 0; i < level; i++)
+ stream.writeText("\t");
+}
+
+SkXMLStreamWriter::SkXMLStreamWriter(SkWStream* stream) : fStream(*stream)
+{
+}
+
+SkXMLStreamWriter::~SkXMLStreamWriter()
+{
+ this->flush();
+}
+
+void SkXMLStreamWriter::onAddAttributeLen(const char name[], const char value[], size_t length)
+{
+ SkASSERT(!fElems.top()->fHasChildren && !fElems.top()->fHasText);
+ fStream.writeText(" ");
+ fStream.writeText(name);
+ fStream.writeText("=\"");
+ fStream.write(value, length);
+ fStream.writeText("\"");
+}
+
+void SkXMLStreamWriter::onAddText(const char text[], size_t length) {
+ Elem* elem = fElems.top();
+
+ if (!elem->fHasChildren && !elem->fHasText) {
+ fStream.writeText(">");
+ fStream.newline();
+ }
+
+ tab(fStream, fElems.count() + 1);
+ fStream.write(text, length);
+ fStream.newline();
+}
+
+void SkXMLStreamWriter::onEndElement()
+{
+ Elem* elem = getEnd();
+ if (elem->fHasChildren || elem->fHasText)
+ {
+ tab(fStream, fElems.count());
+ fStream.writeText("</");
+ fStream.writeText(elem->fName.c_str());
+ fStream.writeText(">");
+ } else {
+ fStream.writeText("/>");
+ }
+ fStream.newline();
+ doEnd(elem);
+}
+
+void SkXMLStreamWriter::onStartElementLen(const char name[], size_t length)
+{
+ int level = fElems.count();
+ if (this->doStart(name, length))
+ {
+ // the first child, need to close with >
+ fStream.writeText(">");
+ fStream.newline();
+ }
+
+ tab(fStream, level);
+ fStream.writeText("<");
+ fStream.write(name, length);
+}
+
+void SkXMLStreamWriter::writeHeader()
+{
+ const char* header = getHeader();
+ fStream.write(header, strlen(header));
+ fStream.newline();
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "SkXMLParser.h"
+
+SkXMLParserWriter::SkXMLParserWriter(SkXMLParser* parser)
+ : SkXMLWriter(false), fParser(*parser)
+{
+}
+
+SkXMLParserWriter::~SkXMLParserWriter()
+{
+ this->flush();
+}
+
+void SkXMLParserWriter::onAddAttributeLen(const char name[], const char value[], size_t length)
+{
+ SkASSERT(fElems.count() == 0 || (!fElems.top()->fHasChildren && !fElems.top()->fHasText));
+ SkString str(value, length);
+ fParser.addAttribute(name, str.c_str());
+}
+
+void SkXMLParserWriter::onAddText(const char text[], size_t length) {
+ fParser.text(text, SkToInt(length));
+}
+
+void SkXMLParserWriter::onEndElement()
+{
+ Elem* elem = this->getEnd();
+ fParser.endElement(elem->fName.c_str());
+ this->doEnd(elem);
+}
+
+void SkXMLParserWriter::onStartElementLen(const char name[], size_t length)
+{
+ (void)this->doStart(name, length);
+ SkString str(name, length);
+ fParser.startElement(str.c_str());
+}
+
+
+////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+
+void SkXMLStreamWriter::UnitTest()
+{
+#ifdef SK_SUPPORT_UNITTEST
+ SkDebugWStream s;
+ SkXMLStreamWriter w(&s);
+
+ w.startElement("elem0");
+ w.addAttribute("hello", "world");
+ w.addS32Attribute("dec", 42);
+ w.addHexAttribute("hex", 0x42, 3);
+ w.addScalarAttribute("scalar", -4.2f);
+ w.startElement("elem1");
+ w.endElement();
+ w.startElement("elem1");
+ w.addAttribute("name", "value");
+ w.endElement();
+ w.startElement("elem1");
+ w.startElement("elem2");
+ w.startElement("elem3");
+ w.addAttribute("name", "value");
+ w.endElement();
+ w.endElement();
+ w.startElement("elem2");
+ w.endElement();
+ w.endElement();
+ w.endElement();
+#endif
+}
+
+#endif
diff --git a/gfx/skia/skia/src/xps/SkDocument_XPS.cpp b/gfx/skia/skia/src/xps/SkDocument_XPS.cpp
new file mode 100644
index 000000000..4a977aeae
--- /dev/null
+++ b/gfx/skia/skia/src/xps/SkDocument_XPS.cpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32)
+
+#include "SkDocument.h"
+#include "SkXPSDevice.h"
+#include "SkStream.h"
+
+class SkDocument_XPS : public SkDocument {
+public:
+ SkDocument_XPS(SkWStream* stream,
+ void (*doneProc)(SkWStream*, bool),
+ SkScalar dpi)
+ : SkDocument(stream, doneProc) {
+ const SkScalar kPointsPerMeter = SkDoubleToScalar(360000.0 / 127.0);
+ fUnitsPerMeter.set(kPointsPerMeter, kPointsPerMeter);
+ SkScalar pixelsPerMeterScale = SkDoubleToScalar(dpi * 5000.0 / 127.0);
+ fPixelsPerMeter.set(pixelsPerMeterScale, pixelsPerMeterScale);
+ fDevice.beginPortfolio(stream);
+ }
+
+ virtual ~SkDocument_XPS() {
+ // subclasses must call close() in their destructors
+ this->close();
+ }
+
+protected:
+ SkCanvas* onBeginPage(SkScalar width,
+ SkScalar height,
+ const SkRect& trimBox) override {
+ fDevice.beginSheet(fUnitsPerMeter, fPixelsPerMeter,
+ SkSize::Make(width, height));
+ fCanvas.reset(new SkCanvas(&fDevice));
+ fCanvas->clipRect(trimBox);
+ fCanvas->translate(trimBox.x(), trimBox.y());
+ return fCanvas.get();
+ }
+
+ void onEndPage() override {
+ SkASSERT(fCanvas.get());
+ fCanvas->flush();
+ fCanvas.reset(nullptr);
+ fDevice.endSheet();
+ }
+
+ void onClose(SkWStream*) override {
+ SkASSERT(!fCanvas.get());
+ (void)fDevice.endPortfolio();
+ }
+
+ void onAbort() override {}
+
+private:
+ SkXPSDevice fDevice;
+ SkAutoTUnref<SkCanvas> fCanvas;
+ SkVector fUnitsPerMeter;
+ SkVector fPixelsPerMeter;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkDocument> SkDocument::MakeXPS(SkWStream* stream, SkScalar dpi) {
+ return stream ? sk_make_sp<SkDocument_XPS>(stream, nullptr, dpi) : nullptr;
+}
+
+static void delete_wstream(SkWStream* stream, bool aborted) { delete stream; }
+
+sk_sp<SkDocument> SkDocument::MakeXPS(const char path[], SkScalar dpi) {
+ SkAutoTDelete<SkFILEWStream> stream(new SkFILEWStream(path));
+ if (!stream->isValid()) {
+ return nullptr;
+ }
+ return sk_make_sp<SkDocument_XPS>(stream.release(), delete_wstream, dpi);
+}
+
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/xps/SkDocument_XPS_None.cpp b/gfx/skia/skia/src/xps/SkDocument_XPS_None.cpp
new file mode 100644
index 000000000..b1c7ed4bc
--- /dev/null
+++ b/gfx/skia/skia/src/xps/SkDocument_XPS_None.cpp
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if !defined(SK_BUILD_FOR_WIN32)
+
+#include "SkDocument.h"
+sk_sp<SkDocument> SkDocument::MakeXPS(SkWStream*, SkScalar) { return nullptr; }
+sk_sp<SkDocument> SkDocument::MakeXPS(const char path[], SkScalar) {
+ return nullptr;
+}
+
+#endif//!defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/xps/SkXPSDevice.cpp b/gfx/skia/skia/src/xps/SkXPSDevice.cpp
new file mode 100644
index 000000000..5db644c30
--- /dev/null
+++ b/gfx/skia/skia/src/xps/SkXPSDevice.cpp
@@ -0,0 +1,2288 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN32)
+
+#include "SkLeanWindows.h"
+
+#ifndef UNICODE
+#define UNICODE
+#endif
+#ifndef _UNICODE
+#define _UNICODE
+#endif
+#include <ObjBase.h>
+#include <XpsObjectModel.h>
+#include <T2EmbApi.h>
+#include <FontSub.h>
+#include <limits>
+
+#include "SkColor.h"
+#include "SkData.h"
+#include "SkDraw.h"
+#include "SkEndian.h"
+#include "SkFindAndPlaceGlyph.h"
+#include "SkGeometry.h"
+#include "SkGlyphCache.h"
+#include "SkHRESULT.h"
+#include "SkImage.h"
+#include "SkImageEncoder.h"
+#include "SkIStream.h"
+#include "SkMaskFilter.h"
+#include "SkPaint.h"
+#include "SkPathEffect.h"
+#include "SkPathOps.h"
+#include "SkPoint.h"
+#include "SkRasterClip.h"
+#include "SkRasterizer.h"
+#include "SkSFNTHeader.h"
+#include "SkShader.h"
+#include "SkSize.h"
+#include "SkStream.h"
+#include "SkTDArray.h"
+#include "SkTLazy.h"
+#include "SkTScopedComPtr.h"
+#include "SkTTCFHeader.h"
+#include "SkTypefacePriv.h"
+#include "SkUtils.h"
+#include "SkXPSDevice.h"
+
+//Windows defines a FLOAT type,
+//make it clear when converting a scalar that this is what is wanted.
+#define SkScalarToFLOAT(n) SkScalarToFloat(n)
+
+//Dummy representation of a GUID from createId.
+#define L_GUID_ID L"XXXXXXXXsXXXXsXXXXsXXXXsXXXXXXXXXXXX"
+//Length of GUID representation from createId, including nullptr terminator.
+#define GUID_ID_LEN SK_ARRAY_COUNT(L_GUID_ID)
+
+/**
+ Formats a GUID and places it into buffer.
+ buffer should have space for at least GUID_ID_LEN wide characters.
+ The string will always be wchar null terminated.
+ XXXXXXXXsXXXXsXXXXsXXXXsXXXXXXXXXXXX0
+ @return -1 if there was an error, > 0 if success.
+ */
+static int format_guid(const GUID& guid,
+ wchar_t* buffer, size_t bufferSize,
+ wchar_t sep = '-') {
+ SkASSERT(bufferSize >= GUID_ID_LEN);
+ return swprintf_s(buffer,
+ bufferSize,
+ L"%08lX%c%04X%c%04X%c%02X%02X%c%02X%02X%02X%02X%02X%02X",
+ guid.Data1,
+ sep,
+ guid.Data2,
+ sep,
+ guid.Data3,
+ sep,
+ guid.Data4[0],
+ guid.Data4[1],
+ sep,
+ guid.Data4[2],
+ guid.Data4[3],
+ guid.Data4[4],
+ guid.Data4[5],
+ guid.Data4[6],
+ guid.Data4[7]);
+}
+
+HRESULT SkXPSDevice::createId(wchar_t* buffer, size_t bufferSize, wchar_t sep) {
+ GUID guid = {};
+#ifdef SK_XPS_USE_DETERMINISTIC_IDS
+ guid.Data1 = fNextId++;
+ // The following make this a valid Type4 UUID.
+ guid.Data3 = 0x4000;
+ guid.Data4[0] = 0x80;
+#else
+ HRM(CoCreateGuid(&guid), "Could not create GUID for id.");
+#endif
+
+ if (format_guid(guid, buffer, bufferSize, sep) == -1) {
+ HRM(E_UNEXPECTED, "Could not format GUID into id.");
+ }
+
+ return S_OK;
+}
+
+static SkBitmap make_fake_bitmap(int width, int height) {
+ SkBitmap bitmap;
+ bitmap.setInfo(SkImageInfo::MakeUnknown(width, height));
+ return bitmap;
+}
+
+// TODO: should inherit from SkBaseDevice instead of SkBitmapDevice...
+SkXPSDevice::SkXPSDevice()
+ : INHERITED(make_fake_bitmap(10000, 10000), SkSurfaceProps(0, kUnknown_SkPixelGeometry))
+ , fCurrentPage(0) {
+}
+
+SkXPSDevice::SkXPSDevice(IXpsOMObjectFactory* xpsFactory)
+ : INHERITED(make_fake_bitmap(10000, 10000), SkSurfaceProps(0, kUnknown_SkPixelGeometry))
+ , fCurrentPage(0) {
+
+ HRVM(CoCreateInstance(
+ CLSID_XpsOMObjectFactory,
+ nullptr,
+ CLSCTX_INPROC_SERVER,
+ IID_PPV_ARGS(&this->fXpsFactory)),
+ "Could not create factory for layer.");
+
+ HRVM(this->fXpsFactory->CreateCanvas(&this->fCurrentXpsCanvas),
+ "Could not create canvas for layer.");
+}
+
+SkXPSDevice::~SkXPSDevice() {
+}
+
+SkXPSDevice::TypefaceUse::TypefaceUse()
+ : typefaceId(0xffffffff)
+ , fontData(nullptr)
+ , xpsFont(nullptr)
+ , glyphsUsed(nullptr) {
+}
+
+SkXPSDevice::TypefaceUse::~TypefaceUse() {
+ //xpsFont owns fontData ref
+ this->xpsFont->Release();
+ delete this->glyphsUsed;
+}
+
+bool SkXPSDevice::beginPortfolio(SkWStream* outputStream) {
+ if (!this->fAutoCo.succeeded()) return false;
+
+ //Create XPS Factory.
+ HRBM(CoCreateInstance(
+ CLSID_XpsOMObjectFactory,
+ nullptr,
+ CLSCTX_INPROC_SERVER,
+ IID_PPV_ARGS(&this->fXpsFactory)),
+ "Could not create XPS factory.");
+
+ HRBM(SkWIStream::CreateFromSkWStream(outputStream, &this->fOutputStream),
+ "Could not convert SkStream to IStream.");
+
+ return true;
+}
+
+bool SkXPSDevice::beginSheet(
+ const SkVector& unitsPerMeter,
+ const SkVector& pixelsPerMeter,
+ const SkSize& trimSize,
+ const SkRect* mediaBox,
+ const SkRect* bleedBox,
+ const SkRect* artBox,
+ const SkRect* cropBox) {
+ ++this->fCurrentPage;
+
+ //For simplicity, just write everything out in geometry units,
+ //then have a base canvas do the scale to physical units.
+ this->fCurrentCanvasSize = trimSize;
+ this->fCurrentUnitsPerMeter = unitsPerMeter;
+ this->fCurrentPixelsPerMeter = pixelsPerMeter;
+
+ this->fCurrentXpsCanvas.reset();
+ HRBM(this->fXpsFactory->CreateCanvas(&this->fCurrentXpsCanvas),
+ "Could not create base canvas.");
+
+ return true;
+}
+
+template <typename T> static constexpr size_t sk_digits_in() {
+ return static_cast<size_t>(std::numeric_limits<T>::digits10 + 1);
+}
+
+HRESULT SkXPSDevice::createXpsThumbnail(IXpsOMPage* page,
+ const unsigned int pageNum,
+ IXpsOMImageResource** image) {
+ SkTScopedComPtr<IXpsOMThumbnailGenerator> thumbnailGenerator;
+ HRM(CoCreateInstance(
+ CLSID_XpsOMThumbnailGenerator,
+ nullptr,
+ CLSCTX_INPROC_SERVER,
+ IID_PPV_ARGS(&thumbnailGenerator)),
+ "Could not create thumbnail generator.");
+
+ SkTScopedComPtr<IOpcPartUri> partUri;
+ constexpr size_t size = SkTMax(
+ SK_ARRAY_COUNT(L"/Documents/1/Metadata/.png") + sk_digits_in<decltype(pageNum)>(),
+ SK_ARRAY_COUNT(L"/Metadata/" L_GUID_ID L".png"));
+ wchar_t buffer[size];
+ if (pageNum > 0) {
+ swprintf_s(buffer, size, L"/Documents/1/Metadata/%u.png", pageNum);
+ } else {
+ wchar_t id[GUID_ID_LEN];
+ HR(this->createId(id, GUID_ID_LEN));
+ swprintf_s(buffer, size, L"/Metadata/%s.png", id);
+ }
+ HRM(this->fXpsFactory->CreatePartUri(buffer, &partUri),
+ "Could not create thumbnail part uri.");
+
+ HRM(thumbnailGenerator->GenerateThumbnail(page,
+ XPS_IMAGE_TYPE_PNG,
+ XPS_THUMBNAIL_SIZE_LARGE,
+ partUri.get(),
+ image),
+ "Could not generate thumbnail.");
+
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::createXpsPage(const XPS_SIZE& pageSize,
+ IXpsOMPage** page) {
+ constexpr size_t size =
+ SK_ARRAY_COUNT(L"/Documents/1/Pages/.fpage")
+ + sk_digits_in<decltype(fCurrentPage)>();
+ wchar_t buffer[size];
+ swprintf_s(buffer, size, L"/Documents/1/Pages/%u.fpage",
+ this->fCurrentPage);
+ SkTScopedComPtr<IOpcPartUri> partUri;
+ HRM(this->fXpsFactory->CreatePartUri(buffer, &partUri),
+ "Could not create page part uri.");
+
+ //If the language is unknown, use "und" (XPS Spec 2.3.5.1).
+ HRM(this->fXpsFactory->CreatePage(&pageSize,
+ L"und",
+ partUri.get(),
+ page),
+ "Could not create page.");
+
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::initXpsDocumentWriter(IXpsOMImageResource* image) {
+ //Create package writer.
+ {
+ SkTScopedComPtr<IOpcPartUri> partUri;
+ HRM(this->fXpsFactory->CreatePartUri(L"/FixedDocumentSequence.fdseq",
+ &partUri),
+ "Could not create document sequence part uri.");
+ HRM(this->fXpsFactory->CreatePackageWriterOnStream(
+ this->fOutputStream.get(),
+ TRUE,
+ XPS_INTERLEAVING_OFF, //XPS_INTERLEAVING_ON,
+ partUri.get(),
+ nullptr,
+ image,
+ nullptr,
+ nullptr,
+ &this->fPackageWriter),
+ "Could not create package writer.");
+ }
+
+ //Begin the lone document.
+ {
+ SkTScopedComPtr<IOpcPartUri> partUri;
+ HRM(this->fXpsFactory->CreatePartUri(
+ L"/Documents/1/FixedDocument.fdoc",
+ &partUri),
+ "Could not create fixed document part uri.");
+ HRM(this->fPackageWriter->StartNewDocument(partUri.get(),
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr),
+ "Could not start document.");
+ }
+
+ return S_OK;
+}
+
+bool SkXPSDevice::endSheet() {
+ //XPS is fixed at 96dpi (XPS Spec 11.1).
+ static const float xpsDPI = 96.0f;
+ static const float inchesPerMeter = 10000.0f / 254.0f;
+ static const float targetUnitsPerMeter = xpsDPI * inchesPerMeter;
+ const float scaleX = targetUnitsPerMeter
+ / SkScalarToFLOAT(this->fCurrentUnitsPerMeter.fX);
+ const float scaleY = targetUnitsPerMeter
+ / SkScalarToFLOAT(this->fCurrentUnitsPerMeter.fY);
+
+ //Create the scale canvas.
+ SkTScopedComPtr<IXpsOMCanvas> scaleCanvas;
+ HRBM(this->fXpsFactory->CreateCanvas(&scaleCanvas),
+ "Could not create scale canvas.");
+ SkTScopedComPtr<IXpsOMVisualCollection> scaleCanvasVisuals;
+ HRBM(scaleCanvas->GetVisuals(&scaleCanvasVisuals),
+ "Could not get scale canvas visuals.");
+
+ SkTScopedComPtr<IXpsOMMatrixTransform> geomToPhys;
+ XPS_MATRIX rawGeomToPhys = { scaleX, 0, 0, scaleY, 0, 0, };
+ HRBM(this->fXpsFactory->CreateMatrixTransform(&rawGeomToPhys, &geomToPhys),
+ "Could not create geometry to physical transform.");
+ HRBM(scaleCanvas->SetTransformLocal(geomToPhys.get()),
+ "Could not set transform on scale canvas.");
+
+ //Add the content canvas to the scale canvas.
+ HRBM(scaleCanvasVisuals->Append(this->fCurrentXpsCanvas.get()),
+ "Could not add base canvas to scale canvas.");
+
+ //Create the page.
+ XPS_SIZE pageSize = {
+ SkScalarToFLOAT(this->fCurrentCanvasSize.width()) * scaleX,
+ SkScalarToFLOAT(this->fCurrentCanvasSize.height()) * scaleY,
+ };
+ SkTScopedComPtr<IXpsOMPage> page;
+ HRB(this->createXpsPage(pageSize, &page));
+
+ SkTScopedComPtr<IXpsOMVisualCollection> pageVisuals;
+ HRBM(page->GetVisuals(&pageVisuals), "Could not get page visuals.");
+
+ //Add the scale canvas to the page.
+ HRBM(pageVisuals->Append(scaleCanvas.get()),
+ "Could not add scale canvas to page.");
+
+ //Create the package writer if it hasn't been created yet.
+ if (nullptr == this->fPackageWriter.get()) {
+ SkTScopedComPtr<IXpsOMImageResource> image;
+ //Ignore return, thumbnail is completely optional.
+ this->createXpsThumbnail(page.get(), 0, &image);
+
+ HRB(this->initXpsDocumentWriter(image.get()));
+ }
+
+ HRBM(this->fPackageWriter->AddPage(page.get(),
+ &pageSize,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr),
+ "Could not write the page.");
+ this->fCurrentXpsCanvas.reset();
+
+ return true;
+}
+
+static HRESULT subset_typeface(SkXPSDevice::TypefaceUse* current) {
+ //CreateFontPackage wants unsigned short.
+ //Microsoft, Y U NO stdint.h?
+ SkTDArray<unsigned short> keepList;
+ current->glyphsUsed->exportTo(&keepList);
+
+ int ttcCount = (current->ttcIndex + 1);
+
+ //The following are declared with the types required by CreateFontPackage.
+ unsigned char *fontPackageBufferRaw = nullptr;
+ unsigned long fontPackageBufferSize;
+ unsigned long bytesWritten;
+ unsigned long result = CreateFontPackage(
+ (unsigned char *) current->fontData->getMemoryBase(),
+ (unsigned long) current->fontData->getLength(),
+ &fontPackageBufferRaw,
+ &fontPackageBufferSize,
+ &bytesWritten,
+ TTFCFP_FLAGS_SUBSET | TTFCFP_FLAGS_GLYPHLIST | (ttcCount > 0 ? TTFCFP_FLAGS_TTC : 0),
+ current->ttcIndex,
+ TTFCFP_SUBSET,
+ 0,
+ 0,
+ 0,
+ keepList.begin(),
+ keepList.count(),
+ sk_malloc_throw,
+ sk_realloc_throw,
+ sk_free,
+ nullptr);
+ SkAutoTMalloc<unsigned char> fontPackageBuffer(fontPackageBufferRaw);
+ if (result != NO_ERROR) {
+ SkDEBUGF(("CreateFontPackage Error %lu", result));
+ return E_UNEXPECTED;
+ }
+
+ // If it was originally a ttc, keep it a ttc.
+ // CreateFontPackage over-allocates, realloc usually decreases the size substantially.
+ size_t extra;
+ if (ttcCount > 0) {
+ // Create space for a ttc header.
+ extra = sizeof(SkTTCFHeader) + (ttcCount * sizeof(SK_OT_ULONG));
+ fontPackageBuffer.realloc(bytesWritten + extra);
+ //overlap is certain, use memmove
+ memmove(fontPackageBuffer.get() + extra, fontPackageBuffer.get(), bytesWritten);
+
+ // Write the ttc header.
+ SkTTCFHeader* ttcfHeader = reinterpret_cast<SkTTCFHeader*>(fontPackageBuffer.get());
+ ttcfHeader->ttcTag = SkTTCFHeader::TAG;
+ ttcfHeader->version = SkTTCFHeader::version_1;
+ ttcfHeader->numOffsets = SkEndian_SwapBE32(ttcCount);
+ SK_OT_ULONG* offsetPtr = SkTAfter<SK_OT_ULONG>(ttcfHeader);
+ for (int i = 0; i < ttcCount; ++i, ++offsetPtr) {
+ *offsetPtr = SkEndian_SwapBE32(SkToU32(extra));
+ }
+
+ // Fix up offsets in sfnt table entries.
+ SkSFNTHeader* sfntHeader = SkTAddOffset<SkSFNTHeader>(fontPackageBuffer.get(), extra);
+ int numTables = SkEndian_SwapBE16(sfntHeader->numTables);
+ SkSFNTHeader::TableDirectoryEntry* tableDirectory =
+ SkTAfter<SkSFNTHeader::TableDirectoryEntry>(sfntHeader);
+ for (int i = 0; i < numTables; ++i, ++tableDirectory) {
+ tableDirectory->offset = SkEndian_SwapBE32(
+ SkToU32(SkEndian_SwapBE32(SkToU32(tableDirectory->offset)) + extra));
+ }
+ } else {
+ extra = 0;
+ fontPackageBuffer.realloc(bytesWritten);
+ }
+
+ SkAutoTDelete<SkMemoryStream> newStream(new SkMemoryStream());
+ newStream->setMemoryOwned(fontPackageBuffer.release(), bytesWritten + extra);
+
+ SkTScopedComPtr<IStream> newIStream;
+ SkIStream::CreateFromSkStream(newStream.release(), true, &newIStream);
+
+ XPS_FONT_EMBEDDING embedding;
+ HRM(current->xpsFont->GetEmbeddingOption(&embedding),
+ "Could not get embedding option from font.");
+
+ SkTScopedComPtr<IOpcPartUri> partUri;
+ HRM(current->xpsFont->GetPartName(&partUri),
+ "Could not get part uri from font.");
+
+ HRM(current->xpsFont->SetContent(
+ newIStream.get(),
+ embedding,
+ partUri.get()),
+ "Could not set new stream for subsetted font.");
+
+ return S_OK;
+}
+
+bool SkXPSDevice::endPortfolio() {
+ //Subset fonts
+ if (!this->fTypefaces.empty()) {
+ SkXPSDevice::TypefaceUse* current = &this->fTypefaces.front();
+ const TypefaceUse* last = &this->fTypefaces.back();
+ for (; current <= last; ++current) {
+ //Ignore return for now, if it didn't subset, let it be.
+ subset_typeface(current);
+ }
+ }
+
+ HRBM(this->fPackageWriter->Close(), "Could not close writer.");
+
+ return true;
+}
+
+static XPS_COLOR xps_color(const SkColor skColor) {
+ //XPS uses non-pre-multiplied alpha (XPS Spec 11.4).
+ XPS_COLOR xpsColor;
+ xpsColor.colorType = XPS_COLOR_TYPE_SRGB;
+ xpsColor.value.sRGB.alpha = SkColorGetA(skColor);
+ xpsColor.value.sRGB.red = SkColorGetR(skColor);
+ xpsColor.value.sRGB.green = SkColorGetG(skColor);
+ xpsColor.value.sRGB.blue = SkColorGetB(skColor);
+
+ return xpsColor;
+}
+
+static XPS_POINT xps_point(const SkPoint& point) {
+ XPS_POINT xpsPoint = {
+ SkScalarToFLOAT(point.fX),
+ SkScalarToFLOAT(point.fY),
+ };
+ return xpsPoint;
+}
+
+static XPS_POINT xps_point(const SkPoint& point, const SkMatrix& matrix) {
+ SkPoint skTransformedPoint;
+ matrix.mapXY(point.fX, point.fY, &skTransformedPoint);
+ return xps_point(skTransformedPoint);
+}
+
+static XPS_SPREAD_METHOD xps_spread_method(SkShader::TileMode tileMode) {
+ switch (tileMode) {
+ case SkShader::kClamp_TileMode:
+ return XPS_SPREAD_METHOD_PAD;
+ case SkShader::kRepeat_TileMode:
+ return XPS_SPREAD_METHOD_REPEAT;
+ case SkShader::kMirror_TileMode:
+ return XPS_SPREAD_METHOD_REFLECT;
+ default:
+ SkDEBUGFAIL("Unknown tile mode.");
+ }
+ return XPS_SPREAD_METHOD_PAD;
+}
+
+static void transform_offsets(SkScalar* stopOffsets, const int numOffsets,
+ const SkPoint& start, const SkPoint& end,
+ const SkMatrix& transform) {
+ SkPoint startTransformed;
+ transform.mapXY(start.fX, start.fY, &startTransformed);
+ SkPoint endTransformed;
+ transform.mapXY(end.fX, end.fY, &endTransformed);
+
+ //Manhattan distance between transformed start and end.
+ SkScalar startToEnd = (endTransformed.fX - startTransformed.fX)
+ + (endTransformed.fY - startTransformed.fY);
+ if (SkScalarNearlyZero(startToEnd)) {
+ for (int i = 0; i < numOffsets; ++i) {
+ stopOffsets[i] = 0;
+ }
+ return;
+ }
+
+ for (int i = 0; i < numOffsets; ++i) {
+ SkPoint stop;
+ stop.fX = SkScalarMul(end.fX - start.fX, stopOffsets[i]);
+ stop.fY = SkScalarMul(end.fY - start.fY, stopOffsets[i]);
+
+ SkPoint stopTransformed;
+ transform.mapXY(stop.fX, stop.fY, &stopTransformed);
+
+ //Manhattan distance between transformed start and stop.
+ SkScalar startToStop = (stopTransformed.fX - startTransformed.fX)
+ + (stopTransformed.fY - startTransformed.fY);
+ //Percentage along transformed line.
+ stopOffsets[i] = startToStop / startToEnd;
+ }
+}
+
+HRESULT SkXPSDevice::createXpsTransform(const SkMatrix& matrix,
+ IXpsOMMatrixTransform** xpsTransform) {
+ SkScalar affine[6];
+ if (!matrix.asAffine(affine)) {
+ *xpsTransform = nullptr;
+ return S_FALSE;
+ }
+ XPS_MATRIX rawXpsMatrix = {
+ SkScalarToFLOAT(affine[SkMatrix::kAScaleX]),
+ SkScalarToFLOAT(affine[SkMatrix::kASkewY]),
+ SkScalarToFLOAT(affine[SkMatrix::kASkewX]),
+ SkScalarToFLOAT(affine[SkMatrix::kAScaleY]),
+ SkScalarToFLOAT(affine[SkMatrix::kATransX]),
+ SkScalarToFLOAT(affine[SkMatrix::kATransY]),
+ };
+ HRM(this->fXpsFactory->CreateMatrixTransform(&rawXpsMatrix, xpsTransform),
+ "Could not create transform.");
+
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::createPath(IXpsOMGeometryFigure* figure,
+ IXpsOMVisualCollection* visuals,
+ IXpsOMPath** path) {
+ SkTScopedComPtr<IXpsOMGeometry> geometry;
+ HRM(this->fXpsFactory->CreateGeometry(&geometry),
+ "Could not create geometry.");
+
+ SkTScopedComPtr<IXpsOMGeometryFigureCollection> figureCollection;
+ HRM(geometry->GetFigures(&figureCollection), "Could not get figures.");
+ HRM(figureCollection->Append(figure), "Could not add figure.");
+
+ HRM(this->fXpsFactory->CreatePath(path), "Could not create path.");
+ HRM((*path)->SetGeometryLocal(geometry.get()), "Could not set geometry");
+
+ HRM(visuals->Append(*path), "Could not add path to visuals.");
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::createXpsSolidColorBrush(const SkColor skColor,
+ const SkAlpha alpha,
+ IXpsOMBrush** xpsBrush) {
+ XPS_COLOR xpsColor = xps_color(skColor);
+ SkTScopedComPtr<IXpsOMSolidColorBrush> solidBrush;
+ HRM(this->fXpsFactory->CreateSolidColorBrush(&xpsColor, nullptr, &solidBrush),
+ "Could not create solid color brush.");
+ HRM(solidBrush->SetOpacity(alpha / 255.0f), "Could not set opacity.");
+ HRM(solidBrush->QueryInterface<IXpsOMBrush>(xpsBrush), "QI Fail.");
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::sideOfClamp(const SkRect& areaToFill,
+ const XPS_RECT& imageViewBox,
+ IXpsOMImageResource* image,
+ IXpsOMVisualCollection* visuals) {
+ SkTScopedComPtr<IXpsOMGeometryFigure> areaToFillFigure;
+ HR(this->createXpsRect(areaToFill, FALSE, TRUE, &areaToFillFigure));
+
+ SkTScopedComPtr<IXpsOMPath> areaToFillPath;
+ HR(this->createPath(areaToFillFigure.get(), visuals, &areaToFillPath));
+
+ SkTScopedComPtr<IXpsOMImageBrush> areaToFillBrush;
+ HRM(this->fXpsFactory->CreateImageBrush(image,
+ &imageViewBox,
+ &imageViewBox,
+ &areaToFillBrush),
+ "Could not create brush for side of clamp.");
+ HRM(areaToFillBrush->SetTileMode(XPS_TILE_MODE_FLIPXY),
+ "Could not set tile mode for side of clamp.");
+ HRM(areaToFillPath->SetFillBrushLocal(areaToFillBrush.get()),
+ "Could not set brush for side of clamp");
+
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::cornerOfClamp(const SkRect& areaToFill,
+ const SkColor color,
+ IXpsOMVisualCollection* visuals) {
+ SkTScopedComPtr<IXpsOMGeometryFigure> areaToFillFigure;
+ HR(this->createXpsRect(areaToFill, FALSE, TRUE, &areaToFillFigure));
+
+ SkTScopedComPtr<IXpsOMPath> areaToFillPath;
+ HR(this->createPath(areaToFillFigure.get(), visuals, &areaToFillPath));
+
+ SkTScopedComPtr<IXpsOMBrush> areaToFillBrush;
+ HR(this->createXpsSolidColorBrush(color, 0xFF, &areaToFillBrush));
+ HRM(areaToFillPath->SetFillBrushLocal(areaToFillBrush.get()),
+ "Could not set brush for corner of clamp.");
+
+ return S_OK;
+}
+
+static const XPS_TILE_MODE XTM_N = XPS_TILE_MODE_NONE;
+static const XPS_TILE_MODE XTM_T = XPS_TILE_MODE_TILE;
+static const XPS_TILE_MODE XTM_X = XPS_TILE_MODE_FLIPX;
+static const XPS_TILE_MODE XTM_Y = XPS_TILE_MODE_FLIPY;
+static const XPS_TILE_MODE XTM_XY = XPS_TILE_MODE_FLIPXY;
+
+//TODO(bungeman): In the future, should skia add None,
+//handle None+Mirror and None+Repeat correctly.
+//None is currently an internal hack so masks don't repeat (None+None only).
+static XPS_TILE_MODE SkToXpsTileMode[SkShader::kTileModeCount+1]
+ [SkShader::kTileModeCount+1] = {
+ //Clamp //Repeat //Mirror //None
+/*Clamp */ XTM_N, XTM_T, XTM_Y, XTM_N,
+/*Repeat*/ XTM_T, XTM_T, XTM_Y, XTM_N,
+/*Mirror*/ XTM_X, XTM_X, XTM_XY, XTM_X,
+/*None */ XTM_N, XTM_N, XTM_Y, XTM_N,
+};
+
+HRESULT SkXPSDevice::createXpsImageBrush(
+ const SkBitmap& bitmap,
+ const SkMatrix& localMatrix,
+ const SkShader::TileMode (&xy)[2],
+ const SkAlpha alpha,
+ IXpsOMTileBrush** xpsBrush) {
+ SkDynamicMemoryWStream write;
+ if (!SkImageEncoder::EncodeStream(&write, bitmap,
+ SkImageEncoder::kPNG_Type, 100)) {
+ HRM(E_FAIL, "Unable to encode bitmap as png.");
+ }
+ SkMemoryStream* read = new SkMemoryStream;
+ read->setData(write.detachAsData());
+ SkTScopedComPtr<IStream> readWrapper;
+ HRM(SkIStream::CreateFromSkStream(read, true, &readWrapper),
+ "Could not create stream from png data.");
+
+ const size_t size =
+ SK_ARRAY_COUNT(L"/Documents/1/Resources/Images/" L_GUID_ID L".png");
+ wchar_t buffer[size];
+ wchar_t id[GUID_ID_LEN];
+ HR(this->createId(id, GUID_ID_LEN));
+ swprintf_s(buffer, size, L"/Documents/1/Resources/Images/%s.png", id);
+
+ SkTScopedComPtr<IOpcPartUri> imagePartUri;
+ HRM(this->fXpsFactory->CreatePartUri(buffer, &imagePartUri),
+ "Could not create image part uri.");
+
+ SkTScopedComPtr<IXpsOMImageResource> imageResource;
+ HRM(this->fXpsFactory->CreateImageResource(
+ readWrapper.get(),
+ XPS_IMAGE_TYPE_PNG,
+ imagePartUri.get(),
+ &imageResource),
+ "Could not create image resource.");
+
+ XPS_RECT bitmapRect = {
+ 0.0, 0.0,
+ static_cast<FLOAT>(bitmap.width()), static_cast<FLOAT>(bitmap.height())
+ };
+ SkTScopedComPtr<IXpsOMImageBrush> xpsImageBrush;
+ HRM(this->fXpsFactory->CreateImageBrush(imageResource.get(),
+ &bitmapRect, &bitmapRect,
+ &xpsImageBrush),
+ "Could not create image brush.");
+
+ if (SkShader::kClamp_TileMode != xy[0] &&
+ SkShader::kClamp_TileMode != xy[1]) {
+
+ HRM(xpsImageBrush->SetTileMode(SkToXpsTileMode[xy[0]][xy[1]]),
+ "Could not set image tile mode");
+ HRM(xpsImageBrush->SetOpacity(alpha / 255.0f),
+ "Could not set image opacity.");
+ HRM(xpsImageBrush->QueryInterface(xpsBrush), "QI failed.");
+ } else {
+ //TODO(bungeman): compute how big this really needs to be.
+ const SkScalar BIG = SkIntToScalar(1000); //SK_ScalarMax;
+ const FLOAT BIG_F = SkScalarToFLOAT(BIG);
+ const SkScalar bWidth = SkIntToScalar(bitmap.width());
+ const SkScalar bHeight = SkIntToScalar(bitmap.height());
+
+ //create brush canvas
+ SkTScopedComPtr<IXpsOMCanvas> brushCanvas;
+ HRM(this->fXpsFactory->CreateCanvas(&brushCanvas),
+ "Could not create image brush canvas.");
+ SkTScopedComPtr<IXpsOMVisualCollection> brushVisuals;
+ HRM(brushCanvas->GetVisuals(&brushVisuals),
+ "Could not get image brush canvas visuals collection.");
+
+ //create central figure
+ const SkRect bitmapPoints = SkRect::MakeLTRB(0, 0, bWidth, bHeight);
+ SkTScopedComPtr<IXpsOMGeometryFigure> centralFigure;
+ HR(this->createXpsRect(bitmapPoints, FALSE, TRUE, &centralFigure));
+
+ SkTScopedComPtr<IXpsOMPath> centralPath;
+ HR(this->createPath(centralFigure.get(),
+ brushVisuals.get(),
+ &centralPath));
+ HRM(xpsImageBrush->SetTileMode(XPS_TILE_MODE_FLIPXY),
+ "Could not set tile mode for image brush central path.");
+ HRM(centralPath->SetFillBrushLocal(xpsImageBrush.get()),
+ "Could not set fill brush for image brush central path.");
+
+ //add left/right
+ if (SkShader::kClamp_TileMode == xy[0]) {
+ SkRect leftArea = SkRect::MakeLTRB(-BIG, 0, 0, bHeight);
+ XPS_RECT leftImageViewBox = {
+ 0.0, 0.0,
+ 1.0, static_cast<FLOAT>(bitmap.height()),
+ };
+ HR(this->sideOfClamp(leftArea, leftImageViewBox,
+ imageResource.get(),
+ brushVisuals.get()));
+
+ SkRect rightArea = SkRect::MakeLTRB(bWidth, 0, BIG, bHeight);
+ XPS_RECT rightImageViewBox = {
+ bitmap.width() - 1.0f, 0.0f,
+ 1.0f, static_cast<FLOAT>(bitmap.height()),
+ };
+ HR(this->sideOfClamp(rightArea, rightImageViewBox,
+ imageResource.get(),
+ brushVisuals.get()));
+ }
+
+ //add top/bottom
+ if (SkShader::kClamp_TileMode == xy[1]) {
+ SkRect topArea = SkRect::MakeLTRB(0, -BIG, bWidth, 0);
+ XPS_RECT topImageViewBox = {
+ 0.0, 0.0,
+ static_cast<FLOAT>(bitmap.width()), 1.0,
+ };
+ HR(this->sideOfClamp(topArea, topImageViewBox,
+ imageResource.get(),
+ brushVisuals.get()));
+
+ SkRect bottomArea = SkRect::MakeLTRB(0, bHeight, bWidth, BIG);
+ XPS_RECT bottomImageViewBox = {
+ 0.0f, bitmap.height() - 1.0f,
+ static_cast<FLOAT>(bitmap.width()), 1.0f,
+ };
+ HR(this->sideOfClamp(bottomArea, bottomImageViewBox,
+ imageResource.get(),
+ brushVisuals.get()));
+ }
+
+ //add tl, tr, bl, br
+ if (SkShader::kClamp_TileMode == xy[0] &&
+ SkShader::kClamp_TileMode == xy[1]) {
+
+ SkAutoLockPixels alp(bitmap);
+
+ const SkColor tlColor = bitmap.getColor(0,0);
+ const SkRect tlArea = SkRect::MakeLTRB(-BIG, -BIG, 0, 0);
+ HR(this->cornerOfClamp(tlArea, tlColor, brushVisuals.get()));
+
+ const SkColor trColor = bitmap.getColor(bitmap.width()-1,0);
+ const SkRect trArea = SkRect::MakeLTRB(bWidth, -BIG, BIG, 0);
+ HR(this->cornerOfClamp(trArea, trColor, brushVisuals.get()));
+
+ const SkColor brColor = bitmap.getColor(bitmap.width()-1,
+ bitmap.height()-1);
+ const SkRect brArea = SkRect::MakeLTRB(bWidth, bHeight, BIG, BIG);
+ HR(this->cornerOfClamp(brArea, brColor, brushVisuals.get()));
+
+ const SkColor blColor = bitmap.getColor(0,bitmap.height()-1);
+ const SkRect blArea = SkRect::MakeLTRB(-BIG, bHeight, 0, BIG);
+ HR(this->cornerOfClamp(blArea, blColor, brushVisuals.get()));
+ }
+
+ //create visual brush from canvas
+ XPS_RECT bound = {};
+ if (SkShader::kClamp_TileMode == xy[0] &&
+ SkShader::kClamp_TileMode == xy[1]) {
+
+ bound.x = BIG_F / -2;
+ bound.y = BIG_F / -2;
+ bound.width = BIG_F;
+ bound.height = BIG_F;
+ } else if (SkShader::kClamp_TileMode == xy[0]) {
+ bound.x = BIG_F / -2;
+ bound.y = 0.0f;
+ bound.width = BIG_F;
+ bound.height = static_cast<FLOAT>(bitmap.height());
+ } else if (SkShader::kClamp_TileMode == xy[1]) {
+ bound.x = 0;
+ bound.y = BIG_F / -2;
+ bound.width = static_cast<FLOAT>(bitmap.width());
+ bound.height = BIG_F;
+ }
+ SkTScopedComPtr<IXpsOMVisualBrush> clampBrush;
+ HRM(this->fXpsFactory->CreateVisualBrush(&bound, &bound, &clampBrush),
+ "Could not create visual brush for image brush.");
+ HRM(clampBrush->SetVisualLocal(brushCanvas.get()),
+ "Could not set canvas on visual brush for image brush.");
+ HRM(clampBrush->SetTileMode(SkToXpsTileMode[xy[0]][xy[1]]),
+ "Could not set tile mode on visual brush for image brush.");
+ HRM(clampBrush->SetOpacity(alpha / 255.0f),
+ "Could not set opacity on visual brush for image brush.");
+
+ HRM(clampBrush->QueryInterface(xpsBrush), "QI failed.");
+ }
+
+ SkTScopedComPtr<IXpsOMMatrixTransform> xpsMatrixToUse;
+ HR(this->createXpsTransform(localMatrix, &xpsMatrixToUse));
+ if (xpsMatrixToUse.get()) {
+ HRM((*xpsBrush)->SetTransformLocal(xpsMatrixToUse.get()),
+ "Could not set transform for image brush.");
+ } else {
+ //TODO(bungeman): perspective bitmaps in general.
+ }
+
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::createXpsGradientStop(const SkColor skColor,
+ const SkScalar offset,
+ IXpsOMGradientStop** xpsGradStop) {
+ XPS_COLOR gradStopXpsColor = xps_color(skColor);
+ HRM(this->fXpsFactory->CreateGradientStop(&gradStopXpsColor,
+ nullptr,
+ SkScalarToFLOAT(offset),
+ xpsGradStop),
+ "Could not create gradient stop.");
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::createXpsLinearGradient(SkShader::GradientInfo info,
+ const SkAlpha alpha,
+ const SkMatrix& localMatrix,
+ IXpsOMMatrixTransform* xpsMatrix,
+ IXpsOMBrush** xpsBrush) {
+ XPS_POINT startPoint;
+ XPS_POINT endPoint;
+ if (xpsMatrix) {
+ startPoint = xps_point(info.fPoint[0]);
+ endPoint = xps_point(info.fPoint[1]);
+ } else {
+ transform_offsets(info.fColorOffsets, info.fColorCount,
+ info.fPoint[0], info.fPoint[1],
+ localMatrix);
+ startPoint = xps_point(info.fPoint[0], localMatrix);
+ endPoint = xps_point(info.fPoint[1], localMatrix);
+ }
+
+ SkTScopedComPtr<IXpsOMGradientStop> gradStop0;
+ HR(createXpsGradientStop(info.fColors[0],
+ info.fColorOffsets[0],
+ &gradStop0));
+
+ SkTScopedComPtr<IXpsOMGradientStop> gradStop1;
+ HR(createXpsGradientStop(info.fColors[1],
+ info.fColorOffsets[1],
+ &gradStop1));
+
+ SkTScopedComPtr<IXpsOMLinearGradientBrush> gradientBrush;
+ HRM(this->fXpsFactory->CreateLinearGradientBrush(gradStop0.get(),
+ gradStop1.get(),
+ &startPoint,
+ &endPoint,
+ &gradientBrush),
+ "Could not create linear gradient brush.");
+ if (xpsMatrix) {
+ HRM(gradientBrush->SetTransformLocal(xpsMatrix),
+ "Could not set transform on linear gradient brush.");
+ }
+
+ SkTScopedComPtr<IXpsOMGradientStopCollection> gradStopCollection;
+ HRM(gradientBrush->GetGradientStops(&gradStopCollection),
+ "Could not get linear gradient stop collection.");
+ for (int i = 2; i < info.fColorCount; ++i) {
+ SkTScopedComPtr<IXpsOMGradientStop> gradStop;
+ HR(createXpsGradientStop(info.fColors[i],
+ info.fColorOffsets[i],
+ &gradStop));
+ HRM(gradStopCollection->Append(gradStop.get()),
+ "Could not add linear gradient stop.");
+ }
+
+ HRM(gradientBrush->SetSpreadMethod(xps_spread_method(info.fTileMode)),
+ "Could not set spread method of linear gradient.");
+
+ HRM(gradientBrush->SetOpacity(alpha / 255.0f),
+ "Could not set opacity of linear gradient brush.");
+ HRM(gradientBrush->QueryInterface<IXpsOMBrush>(xpsBrush), "QI failed");
+
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::createXpsRadialGradient(SkShader::GradientInfo info,
+ const SkAlpha alpha,
+ const SkMatrix& localMatrix,
+ IXpsOMMatrixTransform* xpsMatrix,
+ IXpsOMBrush** xpsBrush) {
+ SkTScopedComPtr<IXpsOMGradientStop> gradStop0;
+ HR(createXpsGradientStop(info.fColors[0],
+ info.fColorOffsets[0],
+ &gradStop0));
+
+ SkTScopedComPtr<IXpsOMGradientStop> gradStop1;
+ HR(createXpsGradientStop(info.fColors[1],
+ info.fColorOffsets[1],
+ &gradStop1));
+
+ //TODO: figure out how to fake better if not affine
+ XPS_POINT centerPoint;
+ XPS_POINT gradientOrigin;
+ XPS_SIZE radiiSizes;
+ if (xpsMatrix) {
+ centerPoint = xps_point(info.fPoint[0]);
+ gradientOrigin = xps_point(info.fPoint[0]);
+ radiiSizes.width = SkScalarToFLOAT(info.fRadius[0]);
+ radiiSizes.height = SkScalarToFLOAT(info.fRadius[0]);
+ } else {
+ centerPoint = xps_point(info.fPoint[0], localMatrix);
+ gradientOrigin = xps_point(info.fPoint[0], localMatrix);
+
+ SkScalar radius = info.fRadius[0];
+ SkVector vec[2];
+
+ vec[0].set(radius, 0);
+ vec[1].set(0, radius);
+ localMatrix.mapVectors(vec, 2);
+
+ SkScalar d0 = vec[0].length();
+ SkScalar d1 = vec[1].length();
+
+ radiiSizes.width = SkScalarToFLOAT(d0);
+ radiiSizes.height = SkScalarToFLOAT(d1);
+ }
+
+ SkTScopedComPtr<IXpsOMRadialGradientBrush> gradientBrush;
+ HRM(this->fXpsFactory->CreateRadialGradientBrush(gradStop0.get(),
+ gradStop1.get(),
+ &centerPoint,
+ &gradientOrigin,
+ &radiiSizes,
+ &gradientBrush),
+ "Could not create radial gradient brush.");
+ if (xpsMatrix) {
+ HRM(gradientBrush->SetTransformLocal(xpsMatrix),
+ "Could not set transform on radial gradient brush.");
+ }
+
+ SkTScopedComPtr<IXpsOMGradientStopCollection> gradStopCollection;
+ HRM(gradientBrush->GetGradientStops(&gradStopCollection),
+ "Could not get radial gradient stop collection.");
+ for (int i = 2; i < info.fColorCount; ++i) {
+ SkTScopedComPtr<IXpsOMGradientStop> gradStop;
+ HR(createXpsGradientStop(info.fColors[i],
+ info.fColorOffsets[i],
+ &gradStop));
+ HRM(gradStopCollection->Append(gradStop.get()),
+ "Could not add radial gradient stop.");
+ }
+
+ HRM(gradientBrush->SetSpreadMethod(xps_spread_method(info.fTileMode)),
+ "Could not set spread method of radial gradient.");
+
+ HRM(gradientBrush->SetOpacity(alpha / 255.0f),
+ "Could not set opacity of radial gradient brush.");
+ HRM(gradientBrush->QueryInterface<IXpsOMBrush>(xpsBrush), "QI failed.");
+
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::createXpsBrush(const SkPaint& skPaint,
+ IXpsOMBrush** brush,
+ const SkMatrix* parentTransform) {
+ const SkShader *shader = skPaint.getShader();
+ if (nullptr == shader) {
+ HR(this->createXpsSolidColorBrush(skPaint.getColor(), 0xFF, brush));
+ return S_OK;
+ }
+
+ //Gradient shaders.
+ SkShader::GradientInfo info;
+ info.fColorCount = 0;
+ info.fColors = nullptr;
+ info.fColorOffsets = nullptr;
+ SkShader::GradientType gradientType = shader->asAGradient(&info);
+
+ if (SkShader::kNone_GradientType == gradientType) {
+ //Nothing to see, move along.
+
+ } else if (SkShader::kColor_GradientType == gradientType) {
+ SkASSERT(1 == info.fColorCount);
+ SkColor color;
+ info.fColors = &color;
+ shader->asAGradient(&info);
+ SkAlpha alpha = skPaint.getAlpha();
+ HR(this->createXpsSolidColorBrush(color, alpha, brush));
+ return S_OK;
+
+ } else {
+ if (info.fColorCount == 0) {
+ const SkColor color = skPaint.getColor();
+ HR(this->createXpsSolidColorBrush(color, 0xFF, brush));
+ return S_OK;
+ }
+
+ SkAutoTArray<SkColor> colors(info.fColorCount);
+ SkAutoTArray<SkScalar> colorOffsets(info.fColorCount);
+ info.fColors = colors.get();
+ info.fColorOffsets = colorOffsets.get();
+ shader->asAGradient(&info);
+
+ if (1 == info.fColorCount) {
+ SkColor color = info.fColors[0];
+ SkAlpha alpha = skPaint.getAlpha();
+ HR(this->createXpsSolidColorBrush(color, alpha, brush));
+ return S_OK;
+ }
+
+ SkMatrix localMatrix = shader->getLocalMatrix();
+ if (parentTransform) {
+ localMatrix.preConcat(*parentTransform);
+ }
+ SkTScopedComPtr<IXpsOMMatrixTransform> xpsMatrixToUse;
+ HR(this->createXpsTransform(localMatrix, &xpsMatrixToUse));
+
+ if (SkShader::kLinear_GradientType == gradientType) {
+ HR(this->createXpsLinearGradient(info,
+ skPaint.getAlpha(),
+ localMatrix,
+ xpsMatrixToUse.get(),
+ brush));
+ return S_OK;
+ }
+
+ if (SkShader::kRadial_GradientType == gradientType) {
+ HR(this->createXpsRadialGradient(info,
+ skPaint.getAlpha(),
+ localMatrix,
+ xpsMatrixToUse.get(),
+ brush));
+ return S_OK;
+ }
+
+ if (SkShader::kConical_GradientType == gradientType) {
+ //simple if affine and one is 0, otherwise will have to fake
+ }
+
+ if (SkShader::kSweep_GradientType == gradientType) {
+ //have to fake
+ }
+ }
+
+ SkBitmap outTexture;
+ SkMatrix outMatrix;
+ SkShader::TileMode xy[2];
+ SkImage* image = shader->isAImage(&outMatrix, xy);
+ if (image && image->asLegacyBitmap(&outTexture, SkImage::kRO_LegacyBitmapMode)) {
+ //TODO: outMatrix??
+ SkMatrix localMatrix = shader->getLocalMatrix();
+ if (parentTransform) {
+ localMatrix.preConcat(*parentTransform);
+ }
+
+ SkTScopedComPtr<IXpsOMTileBrush> tileBrush;
+ HR(this->createXpsImageBrush(outTexture,
+ localMatrix,
+ xy,
+ skPaint.getAlpha(),
+ &tileBrush));
+
+ HRM(tileBrush->QueryInterface<IXpsOMBrush>(brush), "QI failed.");
+ } else {
+ HR(this->createXpsSolidColorBrush(skPaint.getColor(), 0xFF, brush));
+ }
+ return S_OK;
+}
+
+static bool rect_must_be_pathed(const SkPaint& paint, const SkMatrix& matrix) {
+ const bool zeroWidth = (0 == paint.getStrokeWidth());
+ const bool stroke = (SkPaint::kFill_Style != paint.getStyle());
+
+ return paint.getPathEffect() ||
+ paint.getMaskFilter() ||
+ paint.getRasterizer() ||
+ (stroke && (
+ (matrix.hasPerspective() && !zeroWidth) ||
+ SkPaint::kMiter_Join != paint.getStrokeJoin() ||
+ (SkPaint::kMiter_Join == paint.getStrokeJoin() &&
+ paint.getStrokeMiter() < SK_ScalarSqrt2)
+ ))
+ ;
+}
+
+HRESULT SkXPSDevice::createXpsRect(const SkRect& rect, BOOL stroke, BOOL fill,
+ IXpsOMGeometryFigure** xpsRect) {
+ const SkPoint points[4] = {
+ { rect.fLeft, rect.fTop },
+ { rect.fRight, rect.fTop },
+ { rect.fRight, rect.fBottom },
+ { rect.fLeft, rect.fBottom },
+ };
+ return this->createXpsQuad(points, stroke, fill, xpsRect);
+}
+HRESULT SkXPSDevice::createXpsQuad(const SkPoint (&points)[4],
+ BOOL stroke, BOOL fill,
+ IXpsOMGeometryFigure** xpsQuad) {
+ // Define the start point.
+ XPS_POINT startPoint = xps_point(points[0]);
+
+ // Create the figure.
+ HRM(this->fXpsFactory->CreateGeometryFigure(&startPoint, xpsQuad),
+ "Could not create quad geometry figure.");
+
+ // Define the type of each segment.
+ XPS_SEGMENT_TYPE segmentTypes[3] = {
+ XPS_SEGMENT_TYPE_LINE,
+ XPS_SEGMENT_TYPE_LINE,
+ XPS_SEGMENT_TYPE_LINE,
+ };
+
+ // Define the x and y coordinates of each corner of the figure.
+ FLOAT segmentData[6] = {
+ SkScalarToFLOAT(points[1].fX), SkScalarToFLOAT(points[1].fY),
+ SkScalarToFLOAT(points[2].fX), SkScalarToFLOAT(points[2].fY),
+ SkScalarToFLOAT(points[3].fX), SkScalarToFLOAT(points[3].fY),
+ };
+
+ // Describe if the segments are stroked.
+ BOOL segmentStrokes[3] = {
+ stroke, stroke, stroke,
+ };
+
+ // Add the segment data to the figure.
+ HRM((*xpsQuad)->SetSegments(
+ 3, 6,
+ segmentTypes , segmentData, segmentStrokes),
+ "Could not add segment data to quad.");
+
+ // Set the closed and filled properties of the figure.
+ HRM((*xpsQuad)->SetIsClosed(stroke), "Could not set quad close.");
+ HRM((*xpsQuad)->SetIsFilled(fill), "Could not set quad fill.");
+
+ return S_OK;
+}
+
+void SkXPSDevice::drawPoints(const SkDraw& d, SkCanvas::PointMode mode,
+ size_t count, const SkPoint points[],
+ const SkPaint& paint) {
+ //This will call back into the device to do the drawing.
+ d.drawPoints(mode, count, points, paint, true);
+}
+
+void SkXPSDevice::drawVertices(const SkDraw&, SkCanvas::VertexMode,
+ int vertexCount, const SkPoint verts[],
+ const SkPoint texs[], const SkColor colors[],
+ SkXfermode* xmode, const uint16_t indices[],
+ int indexCount, const SkPaint& paint) {
+ //TODO: override this for XPS
+ SkDEBUGF(("XPS drawVertices not yet implemented."));
+}
+
+void SkXPSDevice::drawPaint(const SkDraw& d, const SkPaint& origPaint) {
+ const SkRect r = SkRect::MakeSize(this->fCurrentCanvasSize);
+
+ //If trying to paint with a stroke, ignore that and fill.
+ SkPaint* fillPaint = const_cast<SkPaint*>(&origPaint);
+ SkTCopyOnFirstWrite<SkPaint> paint(origPaint);
+ if (paint->getStyle() != SkPaint::kFill_Style) {
+ paint.writable()->setStyle(SkPaint::kFill_Style);
+ }
+
+ this->internalDrawRect(d, r, false, *fillPaint);
+}
+
+void SkXPSDevice::drawRect(const SkDraw& d,
+ const SkRect& r,
+ const SkPaint& paint) {
+ this->internalDrawRect(d, r, true, paint);
+}
+
+void SkXPSDevice::drawRRect(const SkDraw& d,
+ const SkRRect& rr,
+ const SkPaint& paint) {
+ SkPath path;
+ path.addRRect(rr);
+ this->drawPath(d, path, paint, nullptr, true);
+}
+
+void SkXPSDevice::internalDrawRect(const SkDraw& d,
+ const SkRect& r,
+ bool transformRect,
+ const SkPaint& paint) {
+ //Exit early if there is nothing to draw.
+ if (d.fRC->isEmpty() ||
+ (paint.getAlpha() == 0 && paint.isSrcOver())) {
+ return;
+ }
+
+ //Path the rect if we can't optimize it.
+ if (rect_must_be_pathed(paint, *d.fMatrix)) {
+ SkPath tmp;
+ tmp.addRect(r);
+ tmp.setFillType(SkPath::kWinding_FillType);
+ this->drawPath(d, tmp, paint, nullptr, true);
+ return;
+ }
+
+ //Create the shaded path.
+ SkTScopedComPtr<IXpsOMPath> shadedPath;
+ HRVM(this->fXpsFactory->CreatePath(&shadedPath),
+ "Could not create shaded path for rect.");
+
+ //Create the shaded geometry.
+ SkTScopedComPtr<IXpsOMGeometry> shadedGeometry;
+ HRVM(this->fXpsFactory->CreateGeometry(&shadedGeometry),
+ "Could not create shaded geometry for rect.");
+
+ //Add the geometry to the shaded path.
+ HRVM(shadedPath->SetGeometryLocal(shadedGeometry.get()),
+ "Could not set shaded geometry for rect.");
+
+ //Set the brushes.
+ BOOL fill = FALSE;
+ BOOL stroke = FALSE;
+ HRV(this->shadePath(shadedPath.get(), paint, *d.fMatrix, &fill, &stroke));
+
+ bool xpsTransformsPath = true;
+ //Transform the geometry.
+ if (transformRect && xpsTransformsPath) {
+ SkTScopedComPtr<IXpsOMMatrixTransform> xpsTransform;
+ HRV(this->createXpsTransform(*d.fMatrix, &xpsTransform));
+ if (xpsTransform.get()) {
+ HRVM(shadedGeometry->SetTransformLocal(xpsTransform.get()),
+ "Could not set transform for rect.");
+ } else {
+ xpsTransformsPath = false;
+ }
+ }
+
+ //Create the figure.
+ SkTScopedComPtr<IXpsOMGeometryFigure> rectFigure;
+ {
+ SkPoint points[4] = {
+ { r.fLeft, r.fTop },
+ { r.fLeft, r.fBottom },
+ { r.fRight, r.fBottom },
+ { r.fRight, r.fTop },
+ };
+ if (!xpsTransformsPath && transformRect) {
+ d.fMatrix->mapPoints(points, SK_ARRAY_COUNT(points));
+ }
+ HRV(this->createXpsQuad(points, stroke, fill, &rectFigure));
+ }
+
+ //Get the figures of the shaded geometry.
+ SkTScopedComPtr<IXpsOMGeometryFigureCollection> shadedFigures;
+ HRVM(shadedGeometry->GetFigures(&shadedFigures),
+ "Could not get shaded figures for rect.");
+
+ //Add the figure to the shaded geometry figures.
+ HRVM(shadedFigures->Append(rectFigure.get()),
+ "Could not add shaded figure for rect.");
+
+ HRV(this->clip(shadedPath.get(), d));
+
+ //Add the shaded path to the current visuals.
+ SkTScopedComPtr<IXpsOMVisualCollection> currentVisuals;
+ HRVM(this->fCurrentXpsCanvas->GetVisuals(&currentVisuals),
+ "Could not get current visuals for rect.");
+ HRVM(currentVisuals->Append(shadedPath.get()),
+ "Could not add rect to current visuals.");
+}
+
+static HRESULT close_figure(const SkTDArray<XPS_SEGMENT_TYPE>& segmentTypes,
+ const SkTDArray<BOOL>& segmentStrokes,
+ const SkTDArray<FLOAT>& segmentData,
+ BOOL stroke, BOOL fill,
+ IXpsOMGeometryFigure* figure,
+ IXpsOMGeometryFigureCollection* figures) {
+ // Add the segment data to the figure.
+ HRM(figure->SetSegments(segmentTypes.count(), segmentData.count(),
+ segmentTypes.begin() , segmentData.begin(),
+ segmentStrokes.begin()),
+ "Could not set path segments.");
+
+ // Set the closed and filled properties of the figure.
+ HRM(figure->SetIsClosed(stroke), "Could not set path closed.");
+ HRM(figure->SetIsFilled(fill), "Could not set path fill.");
+
+ // Add the figure created above to this geometry.
+ HRM(figures->Append(figure), "Could not add path to geometry.");
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::addXpsPathGeometry(
+ IXpsOMGeometryFigureCollection* xpsFigures,
+ BOOL stroke, BOOL fill, const SkPath& path) {
+ SkTDArray<XPS_SEGMENT_TYPE> segmentTypes;
+ SkTDArray<BOOL> segmentStrokes;
+ SkTDArray<FLOAT> segmentData;
+
+ SkTScopedComPtr<IXpsOMGeometryFigure> xpsFigure;
+ SkPath::Iter iter(path, true);
+ SkPoint points[4];
+ SkPath::Verb verb;
+ while ((verb = iter.next(points)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kMove_Verb: {
+ if (xpsFigure.get()) {
+ HR(close_figure(segmentTypes, segmentStrokes, segmentData,
+ stroke, fill,
+ xpsFigure.get() , xpsFigures));
+ xpsFigure.reset();
+ segmentTypes.rewind();
+ segmentStrokes.rewind();
+ segmentData.rewind();
+ }
+ // Define the start point.
+ XPS_POINT startPoint = xps_point(points[0]);
+ // Create the figure.
+ HRM(this->fXpsFactory->CreateGeometryFigure(&startPoint,
+ &xpsFigure),
+ "Could not create path geometry figure.");
+ break;
+ }
+ case SkPath::kLine_Verb:
+ if (iter.isCloseLine()) break; //ignore the line, auto-closed
+ segmentTypes.push(XPS_SEGMENT_TYPE_LINE);
+ segmentStrokes.push(stroke);
+ segmentData.push(SkScalarToFLOAT(points[1].fX));
+ segmentData.push(SkScalarToFLOAT(points[1].fY));
+ break;
+ case SkPath::kQuad_Verb:
+ segmentTypes.push(XPS_SEGMENT_TYPE_QUADRATIC_BEZIER);
+ segmentStrokes.push(stroke);
+ segmentData.push(SkScalarToFLOAT(points[1].fX));
+ segmentData.push(SkScalarToFLOAT(points[1].fY));
+ segmentData.push(SkScalarToFLOAT(points[2].fX));
+ segmentData.push(SkScalarToFLOAT(points[2].fY));
+ break;
+ case SkPath::kCubic_Verb:
+ segmentTypes.push(XPS_SEGMENT_TYPE_BEZIER);
+ segmentStrokes.push(stroke);
+ segmentData.push(SkScalarToFLOAT(points[1].fX));
+ segmentData.push(SkScalarToFLOAT(points[1].fY));
+ segmentData.push(SkScalarToFLOAT(points[2].fX));
+ segmentData.push(SkScalarToFLOAT(points[2].fY));
+ segmentData.push(SkScalarToFLOAT(points[3].fX));
+ segmentData.push(SkScalarToFLOAT(points[3].fY));
+ break;
+ case SkPath::kConic_Verb: {
+ const SkScalar tol = SK_Scalar1 / 4;
+ SkAutoConicToQuads converter;
+ const SkPoint* quads =
+ converter.computeQuads(points, iter.conicWeight(), tol);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ segmentTypes.push(XPS_SEGMENT_TYPE_QUADRATIC_BEZIER);
+ segmentStrokes.push(stroke);
+ segmentData.push(SkScalarToFLOAT(quads[2 * i + 1].fX));
+ segmentData.push(SkScalarToFLOAT(quads[2 * i + 1].fY));
+ segmentData.push(SkScalarToFLOAT(quads[2 * i + 2].fX));
+ segmentData.push(SkScalarToFLOAT(quads[2 * i + 2].fY));
+ }
+ break;
+ }
+ case SkPath::kClose_Verb:
+ // we ignore these, and just get the whole segment from
+ // the corresponding line/quad/cubic verbs
+ break;
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ break;
+ }
+ }
+ if (xpsFigure.get()) {
+ HR(close_figure(segmentTypes, segmentStrokes, segmentData,
+ stroke, fill,
+ xpsFigure.get(), xpsFigures));
+ }
+ return S_OK;
+}
+
+void SkXPSDevice::convertToPpm(const SkMaskFilter* filter,
+ SkMatrix* matrix,
+ SkVector* ppuScale,
+ const SkIRect& clip, SkIRect* clipIRect) {
+ //This action is in unit space, but the ppm is specified in physical space.
+ ppuScale->set(fCurrentPixelsPerMeter.fX / fCurrentUnitsPerMeter.fX,
+ fCurrentPixelsPerMeter.fY / fCurrentUnitsPerMeter.fY);
+
+ matrix->postScale(ppuScale->fX, ppuScale->fY);
+
+ const SkIRect& irect = clip;
+ SkRect clipRect = SkRect::MakeLTRB(
+ SkScalarMul(SkIntToScalar(irect.fLeft), ppuScale->fX),
+ SkScalarMul(SkIntToScalar(irect.fTop), ppuScale->fY),
+ SkScalarMul(SkIntToScalar(irect.fRight), ppuScale->fX),
+ SkScalarMul(SkIntToScalar(irect.fBottom), ppuScale->fY));
+ clipRect.roundOut(clipIRect);
+}
+
+HRESULT SkXPSDevice::applyMask(const SkDraw& d,
+ const SkMask& mask,
+ const SkVector& ppuScale,
+ IXpsOMPath* shadedPath) {
+ //Get the geometry object.
+ SkTScopedComPtr<IXpsOMGeometry> shadedGeometry;
+ HRM(shadedPath->GetGeometry(&shadedGeometry),
+ "Could not get mask shaded geometry.");
+
+ //Get the figures from the geometry.
+ SkTScopedComPtr<IXpsOMGeometryFigureCollection> shadedFigures;
+ HRM(shadedGeometry->GetFigures(&shadedFigures),
+ "Could not get mask shaded figures.");
+
+ SkMatrix m;
+ m.reset();
+ m.setTranslate(SkIntToScalar(mask.fBounds.fLeft),
+ SkIntToScalar(mask.fBounds.fTop));
+ m.postScale(SkScalarInvert(ppuScale.fX), SkScalarInvert(ppuScale.fY));
+
+ SkShader::TileMode xy[2];
+ xy[0] = (SkShader::TileMode)3;
+ xy[1] = (SkShader::TileMode)3;
+
+ SkBitmap bm;
+ bm.installMaskPixels(mask);
+
+ SkTScopedComPtr<IXpsOMTileBrush> maskBrush;
+ HR(this->createXpsImageBrush(bm, m, xy, 0xFF, &maskBrush));
+ HRM(shadedPath->SetOpacityMaskBrushLocal(maskBrush.get()),
+ "Could not set mask.");
+
+ const SkRect universeRect = SkRect::MakeLTRB(0, 0,
+ this->fCurrentCanvasSize.fWidth, this->fCurrentCanvasSize.fHeight);
+ SkTScopedComPtr<IXpsOMGeometryFigure> shadedFigure;
+ HRM(this->createXpsRect(universeRect, FALSE, TRUE, &shadedFigure),
+ "Could not create mask shaded figure.");
+ HRM(shadedFigures->Append(shadedFigure.get()),
+ "Could not add mask shaded figure.");
+
+ HR(this->clip(shadedPath, d));
+
+ //Add the path to the active visual collection.
+ SkTScopedComPtr<IXpsOMVisualCollection> currentVisuals;
+ HRM(this->fCurrentXpsCanvas->GetVisuals(&currentVisuals),
+ "Could not get mask current visuals.");
+ HRM(currentVisuals->Append(shadedPath),
+ "Could not add masked shaded path to current visuals.");
+
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::shadePath(IXpsOMPath* shadedPath,
+ const SkPaint& shaderPaint,
+ const SkMatrix& matrix,
+ BOOL* fill, BOOL* stroke) {
+ *fill = FALSE;
+ *stroke = FALSE;
+
+ const SkPaint::Style style = shaderPaint.getStyle();
+ const bool hasFill = SkPaint::kFill_Style == style
+ || SkPaint::kStrokeAndFill_Style == style;
+ const bool hasStroke = SkPaint::kStroke_Style == style
+ || SkPaint::kStrokeAndFill_Style == style;
+
+ //TODO(bungeman): use dictionaries and lookups.
+ if (hasFill) {
+ *fill = TRUE;
+ SkTScopedComPtr<IXpsOMBrush> fillBrush;
+ HR(this->createXpsBrush(shaderPaint, &fillBrush, &matrix));
+ HRM(shadedPath->SetFillBrushLocal(fillBrush.get()),
+ "Could not set fill for shaded path.");
+ }
+
+ if (hasStroke) {
+ *stroke = TRUE;
+ SkTScopedComPtr<IXpsOMBrush> strokeBrush;
+ HR(this->createXpsBrush(shaderPaint, &strokeBrush, &matrix));
+ HRM(shadedPath->SetStrokeBrushLocal(strokeBrush.get()),
+ "Could not set stroke brush for shaded path.");
+ HRM(shadedPath->SetStrokeThickness(
+ SkScalarToFLOAT(shaderPaint.getStrokeWidth())),
+ "Could not set shaded path stroke thickness.");
+
+ if (0 == shaderPaint.getStrokeWidth()) {
+ //XPS hair width is a hack. (XPS Spec 11.6.12).
+ SkTScopedComPtr<IXpsOMDashCollection> dashes;
+ HRM(shadedPath->GetStrokeDashes(&dashes),
+ "Could not set dashes for shaded path.");
+ XPS_DASH dash;
+ dash.length = 1.0;
+ dash.gap = 0.0;
+ HRM(dashes->Append(&dash), "Could not add dashes to shaded path.");
+ HRM(shadedPath->SetStrokeDashOffset(-2.0),
+ "Could not set dash offset for shaded path.");
+ }
+ }
+ return S_OK;
+}
+
+void SkXPSDevice::drawPath(const SkDraw& d,
+ const SkPath& platonicPath,
+ const SkPaint& origPaint,
+ const SkMatrix* prePathMatrix,
+ bool pathIsMutable) {
+ SkTCopyOnFirstWrite<SkPaint> paint(origPaint);
+
+ // nothing to draw
+ if (d.fRC->isEmpty() ||
+ (paint->getAlpha() == 0 && paint->isSrcOver())) {
+ return;
+ }
+
+ SkPath modifiedPath;
+ const bool paintHasPathEffect = paint->getPathEffect()
+ || paint->getStyle() != SkPaint::kFill_Style;
+
+ //Apply pre-path matrix [Platonic-path -> Skeletal-path].
+ SkMatrix matrix = *d.fMatrix;
+ SkPath* skeletalPath = const_cast<SkPath*>(&platonicPath);
+ if (prePathMatrix) {
+ if (paintHasPathEffect || paint->getRasterizer()) {
+ if (!pathIsMutable) {
+ skeletalPath = &modifiedPath;
+ pathIsMutable = true;
+ }
+ platonicPath.transform(*prePathMatrix, skeletalPath);
+ } else {
+ matrix.preConcat(*prePathMatrix);
+ }
+ }
+
+ //Apply path effect [Skeletal-path -> Fillable-path].
+ SkPath* fillablePath = skeletalPath;
+ if (paintHasPathEffect) {
+ if (!pathIsMutable) {
+ fillablePath = &modifiedPath;
+ pathIsMutable = true;
+ }
+ bool fill = paint->getFillPath(*skeletalPath, fillablePath);
+
+ SkPaint* writablePaint = paint.writable();
+ writablePaint->setPathEffect(nullptr);
+ if (fill) {
+ writablePaint->setStyle(SkPaint::kFill_Style);
+ } else {
+ writablePaint->setStyle(SkPaint::kStroke_Style);
+ writablePaint->setStrokeWidth(0);
+ }
+ }
+
+ //Create the shaded path. This will be the path which is painted.
+ SkTScopedComPtr<IXpsOMPath> shadedPath;
+ HRVM(this->fXpsFactory->CreatePath(&shadedPath),
+ "Could not create shaded path for path.");
+
+ //Create the geometry for the shaded path.
+ SkTScopedComPtr<IXpsOMGeometry> shadedGeometry;
+ HRVM(this->fXpsFactory->CreateGeometry(&shadedGeometry),
+ "Could not create shaded geometry for path.");
+
+ //Add the geometry to the shaded path.
+ HRVM(shadedPath->SetGeometryLocal(shadedGeometry.get()),
+ "Could not add the shaded geometry to shaded path.");
+
+ SkRasterizer* rasterizer = paint->getRasterizer();
+ SkMaskFilter* filter = paint->getMaskFilter();
+
+ //Determine if we will draw or shade and mask.
+ if (rasterizer || filter) {
+ if (paint->getStyle() != SkPaint::kFill_Style) {
+ paint.writable()->setStyle(SkPaint::kFill_Style);
+ }
+ }
+
+ //Set the brushes.
+ BOOL fill;
+ BOOL stroke;
+ HRV(this->shadePath(shadedPath.get(),
+ *paint,
+ *d.fMatrix,
+ &fill,
+ &stroke));
+
+ //Rasterizer
+ if (rasterizer) {
+ SkIRect clipIRect;
+ SkVector ppuScale;
+ this->convertToPpm(filter,
+ &matrix,
+ &ppuScale,
+ d.fRC->getBounds(),
+ &clipIRect);
+
+ SkMask* mask = nullptr;
+
+ //[Fillable-path -> Mask]
+ SkMask rasteredMask;
+ if (rasterizer->rasterize(
+ *fillablePath,
+ matrix,
+ &clipIRect,
+ filter, //just to compute how much to draw.
+ &rasteredMask,
+ SkMask::kComputeBoundsAndRenderImage_CreateMode)) {
+
+ SkAutoMaskFreeImage rasteredAmi(rasteredMask.fImage);
+ mask = &rasteredMask;
+
+ //[Mask -> Mask]
+ SkMask filteredMask;
+ if (filter && filter->filterMask(&filteredMask, *mask, *d.fMatrix, nullptr)) {
+ mask = &filteredMask;
+ }
+ SkAutoMaskFreeImage filteredAmi(filteredMask.fImage);
+
+ //Draw mask.
+ HRV(this->applyMask(d, *mask, ppuScale, shadedPath.get()));
+ }
+ return;
+ }
+
+ //Mask filter
+ if (filter) {
+ SkIRect clipIRect;
+ SkVector ppuScale;
+ this->convertToPpm(filter,
+ &matrix,
+ &ppuScale,
+ d.fRC->getBounds(),
+ &clipIRect);
+
+ //[Fillable-path -> Pixel-path]
+ SkPath* pixelPath = pathIsMutable ? fillablePath : &modifiedPath;
+ fillablePath->transform(matrix, pixelPath);
+
+ SkMask* mask = nullptr;
+
+ SkASSERT(SkPaint::kFill_Style == paint->getStyle() ||
+ (SkPaint::kStroke_Style == paint->getStyle() && 0 == paint->getStrokeWidth()));
+ SkStrokeRec::InitStyle style = (SkPaint::kFill_Style == paint->getStyle())
+ ? SkStrokeRec::kFill_InitStyle
+ : SkStrokeRec::kHairline_InitStyle;
+ //[Pixel-path -> Mask]
+ SkMask rasteredMask;
+ if (SkDraw::DrawToMask(
+ *pixelPath,
+ &clipIRect,
+ filter, //just to compute how much to draw.
+ &matrix,
+ &rasteredMask,
+ SkMask::kComputeBoundsAndRenderImage_CreateMode,
+ style)) {
+
+ SkAutoMaskFreeImage rasteredAmi(rasteredMask.fImage);
+ mask = &rasteredMask;
+
+ //[Mask -> Mask]
+ SkMask filteredMask;
+ if (filter->filterMask(&filteredMask, rasteredMask, matrix, nullptr)) {
+ mask = &filteredMask;
+ }
+ SkAutoMaskFreeImage filteredAmi(filteredMask.fImage);
+
+ //Draw mask.
+ HRV(this->applyMask(d, *mask, ppuScale, shadedPath.get()));
+ }
+ return;
+ }
+
+ //Get the figures from the shaded geometry.
+ SkTScopedComPtr<IXpsOMGeometryFigureCollection> shadedFigures;
+ HRVM(shadedGeometry->GetFigures(&shadedFigures),
+ "Could not get shaded figures for shaded path.");
+
+ bool xpsTransformsPath = true;
+
+ //Set the fill rule.
+ SkPath* xpsCompatiblePath = fillablePath;
+ XPS_FILL_RULE xpsFillRule;
+ switch (fillablePath->getFillType()) {
+ case SkPath::kWinding_FillType:
+ xpsFillRule = XPS_FILL_RULE_NONZERO;
+ break;
+ case SkPath::kEvenOdd_FillType:
+ xpsFillRule = XPS_FILL_RULE_EVENODD;
+ break;
+ case SkPath::kInverseWinding_FillType: {
+ //[Fillable-path (inverse winding) -> XPS-path (inverse even odd)]
+ if (!pathIsMutable) {
+ xpsCompatiblePath = &modifiedPath;
+ pathIsMutable = true;
+ }
+ if (!Simplify(*fillablePath, xpsCompatiblePath)) {
+ SkDEBUGF(("Could not simplify inverse winding path."));
+ return;
+ }
+ }
+ // The xpsCompatiblePath is noW inverse even odd, so fall through.
+ case SkPath::kInverseEvenOdd_FillType: {
+ const SkRect universe = SkRect::MakeLTRB(
+ 0, 0,
+ this->fCurrentCanvasSize.fWidth,
+ this->fCurrentCanvasSize.fHeight);
+ SkTScopedComPtr<IXpsOMGeometryFigure> addOneFigure;
+ HRV(this->createXpsRect(universe, FALSE, TRUE, &addOneFigure));
+ HRVM(shadedFigures->Append(addOneFigure.get()),
+ "Could not add even-odd flip figure to shaded path.");
+ xpsTransformsPath = false;
+ xpsFillRule = XPS_FILL_RULE_EVENODD;
+ break;
+ }
+ default:
+ SkDEBUGFAIL("Unknown SkPath::FillType.");
+ }
+ HRVM(shadedGeometry->SetFillRule(xpsFillRule),
+ "Could not set fill rule for shaded path.");
+
+ //Create the XPS transform, if possible.
+ if (xpsTransformsPath) {
+ SkTScopedComPtr<IXpsOMMatrixTransform> xpsTransform;
+ HRV(this->createXpsTransform(matrix, &xpsTransform));
+
+ if (xpsTransform.get()) {
+ HRVM(shadedGeometry->SetTransformLocal(xpsTransform.get()),
+ "Could not set transform on shaded path.");
+ } else {
+ xpsTransformsPath = false;
+ }
+ }
+
+ SkPath* devicePath = xpsCompatiblePath;
+ if (!xpsTransformsPath) {
+ //[Fillable-path -> Device-path]
+ devicePath = pathIsMutable ? xpsCompatiblePath : &modifiedPath;
+ xpsCompatiblePath->transform(matrix, devicePath);
+ }
+ HRV(this->addXpsPathGeometry(shadedFigures.get(),
+ stroke, fill, *devicePath));
+
+ HRV(this->clip(shadedPath.get(), d));
+
+ //Add the path to the active visual collection.
+ SkTScopedComPtr<IXpsOMVisualCollection> currentVisuals;
+ HRVM(this->fCurrentXpsCanvas->GetVisuals(&currentVisuals),
+ "Could not get current visuals for shaded path.");
+ HRVM(currentVisuals->Append(shadedPath.get()),
+ "Could not add shaded path to current visuals.");
+}
+
+HRESULT SkXPSDevice::clip(IXpsOMVisual* xpsVisual, const SkDraw& d) {
+ SkPath clipPath;
+ if (d.fRC->isBW()) {
+ SkAssertResult(d.fRC->bwRgn().getBoundaryPath(&clipPath));
+ } else {
+ // Don't have a way to turn a AAClip into a path, so we just use the bounds.
+ // TODO: consider using fClipStack instead?
+ clipPath.addRect(SkRect::Make(d.fRC->getBounds()));
+ }
+
+ return this->clipToPath(xpsVisual, clipPath, XPS_FILL_RULE_EVENODD);
+}
+HRESULT SkXPSDevice::clipToPath(IXpsOMVisual* xpsVisual,
+ const SkPath& clipPath,
+ XPS_FILL_RULE fillRule) {
+ //Create the geometry.
+ SkTScopedComPtr<IXpsOMGeometry> clipGeometry;
+ HRM(this->fXpsFactory->CreateGeometry(&clipGeometry),
+ "Could not create clip geometry.");
+
+ //Get the figure collection of the geometry.
+ SkTScopedComPtr<IXpsOMGeometryFigureCollection> clipFigures;
+ HRM(clipGeometry->GetFigures(&clipFigures),
+ "Could not get the clip figures.");
+
+ //Create the figures into the geometry.
+ HR(this->addXpsPathGeometry(
+ clipFigures.get(),
+ FALSE, TRUE, clipPath));
+
+ HRM(clipGeometry->SetFillRule(fillRule),
+ "Could not set fill rule.");
+ HRM(xpsVisual->SetClipGeometryLocal(clipGeometry.get()),
+ "Could not set clip geometry.");
+
+ return S_OK;
+}
+
+void SkXPSDevice::drawBitmap(const SkDraw& d, const SkBitmap& bitmap,
+ const SkMatrix& matrix, const SkPaint& paint) {
+ if (d.fRC->isEmpty()) {
+ return;
+ }
+
+ SkIRect srcRect;
+ srcRect.set(0, 0, bitmap.width(), bitmap.height());
+
+ //Create the new shaded path.
+ SkTScopedComPtr<IXpsOMPath> shadedPath;
+ HRVM(this->fXpsFactory->CreatePath(&shadedPath),
+ "Could not create path for bitmap.");
+
+ //Create the shaded geometry.
+ SkTScopedComPtr<IXpsOMGeometry> shadedGeometry;
+ HRVM(this->fXpsFactory->CreateGeometry(&shadedGeometry),
+ "Could not create geometry for bitmap.");
+
+ //Add the shaded geometry to the shaded path.
+ HRVM(shadedPath->SetGeometryLocal(shadedGeometry.get()),
+ "Could not set the geometry for bitmap.");
+
+ //Get the shaded figures from the shaded geometry.
+ SkTScopedComPtr<IXpsOMGeometryFigureCollection> shadedFigures;
+ HRVM(shadedGeometry->GetFigures(&shadedFigures),
+ "Could not get the figures for bitmap.");
+
+ SkMatrix transform = matrix;
+ transform.postConcat(*d.fMatrix);
+
+ SkTScopedComPtr<IXpsOMMatrixTransform> xpsTransform;
+ HRV(this->createXpsTransform(transform, &xpsTransform));
+ if (xpsTransform.get()) {
+ HRVM(shadedGeometry->SetTransformLocal(xpsTransform.get()),
+ "Could not set transform for bitmap.");
+ } else {
+ //TODO: perspective that bitmap!
+ }
+
+ SkTScopedComPtr<IXpsOMGeometryFigure> rectFigure;
+ if (xpsTransform.get()) {
+ const SkShader::TileMode xy[2] = {
+ SkShader::kClamp_TileMode,
+ SkShader::kClamp_TileMode,
+ };
+ SkTScopedComPtr<IXpsOMTileBrush> xpsImageBrush;
+ HRV(this->createXpsImageBrush(bitmap,
+ transform,
+ xy,
+ paint.getAlpha(),
+ &xpsImageBrush));
+ HRVM(shadedPath->SetFillBrushLocal(xpsImageBrush.get()),
+ "Could not set bitmap brush.");
+
+ const SkRect bitmapRect = SkRect::MakeLTRB(0, 0,
+ SkIntToScalar(srcRect.width()), SkIntToScalar(srcRect.height()));
+ HRV(this->createXpsRect(bitmapRect, FALSE, TRUE, &rectFigure));
+ }
+ HRVM(shadedFigures->Append(rectFigure.get()),
+ "Could not add bitmap figure.");
+
+ //Get the current visual collection and add the shaded path to it.
+ SkTScopedComPtr<IXpsOMVisualCollection> currentVisuals;
+ HRVM(this->fCurrentXpsCanvas->GetVisuals(&currentVisuals),
+ "Could not get current visuals for bitmap");
+ HRVM(currentVisuals->Append(shadedPath.get()),
+ "Could not add bitmap to current visuals.");
+
+ HRV(this->clip(shadedPath.get(), d));
+}
+
+void SkXPSDevice::drawSprite(const SkDraw&, const SkBitmap& bitmap,
+ int x, int y,
+ const SkPaint& paint) {
+ //TODO: override this for XPS
+ SkDEBUGF(("XPS drawSprite not yet implemented."));
+}
+
+HRESULT SkXPSDevice::CreateTypefaceUse(const SkPaint& paint,
+ TypefaceUse** typefaceUse) {
+ SkAutoResolveDefaultTypeface typeface(paint.getTypeface());
+
+ //Check cache.
+ const SkFontID typefaceID = typeface->uniqueID();
+ if (!this->fTypefaces.empty()) {
+ TypefaceUse* current = &this->fTypefaces.front();
+ const TypefaceUse* last = &this->fTypefaces.back();
+ for (; current <= last; ++current) {
+ if (current->typefaceId == typefaceID) {
+ *typefaceUse = current;
+ return S_OK;
+ }
+ }
+ }
+
+ //TODO: create glyph only fonts
+ //and let the host deal with what kind of font we're looking at.
+ XPS_FONT_EMBEDDING embedding = XPS_FONT_EMBEDDING_RESTRICTED;
+
+ SkTScopedComPtr<IStream> fontStream;
+ int ttcIndex;
+ SkStream* fontData = typeface->openStream(&ttcIndex);
+ //TODO: cannot handle FON fonts.
+ HRM(SkIStream::CreateFromSkStream(fontData, true, &fontStream),
+ "Could not create font stream.");
+
+ const size_t size =
+ SK_ARRAY_COUNT(L"/Resources/Fonts/" L_GUID_ID L".odttf");
+ wchar_t buffer[size];
+ wchar_t id[GUID_ID_LEN];
+ HR(this->createId(id, GUID_ID_LEN));
+ swprintf_s(buffer, size, L"/Resources/Fonts/%s.odttf", id);
+
+ SkTScopedComPtr<IOpcPartUri> partUri;
+ HRM(this->fXpsFactory->CreatePartUri(buffer, &partUri),
+ "Could not create font resource part uri.");
+
+ SkTScopedComPtr<IXpsOMFontResource> xpsFontResource;
+ HRM(this->fXpsFactory->CreateFontResource(fontStream.get(),
+ embedding,
+ partUri.get(),
+ FALSE,
+ &xpsFontResource),
+ "Could not create font resource.");
+
+ //TODO: change openStream to return -1 for non-ttc, get rid of this.
+ uint8_t* data = (uint8_t*)fontData->getMemoryBase();
+ bool isTTC = (data &&
+ fontData->getLength() >= sizeof(SkTTCFHeader) &&
+ ((SkTTCFHeader*)data)->ttcTag == SkTTCFHeader::TAG);
+
+ TypefaceUse& newTypefaceUse = this->fTypefaces.push_back();
+ newTypefaceUse.typefaceId = typefaceID;
+ newTypefaceUse.ttcIndex = isTTC ? ttcIndex : -1;
+ newTypefaceUse.fontData = fontData;
+ newTypefaceUse.xpsFont = xpsFontResource.release();
+
+ SkAutoGlyphCache agc(paint, &this->surfaceProps(), &SkMatrix::I());
+ SkGlyphCache* glyphCache = agc.getCache();
+ unsigned int glyphCount = glyphCache->getGlyphCount();
+ newTypefaceUse.glyphsUsed = new SkBitSet(glyphCount);
+
+ *typefaceUse = &newTypefaceUse;
+ return S_OK;
+}
+
+HRESULT SkXPSDevice::AddGlyphs(const SkDraw& d,
+ IXpsOMObjectFactory* xpsFactory,
+ IXpsOMCanvas* canvas,
+ TypefaceUse* font,
+ LPCWSTR text,
+ XPS_GLYPH_INDEX* xpsGlyphs,
+ UINT32 xpsGlyphsLen,
+ XPS_POINT *origin,
+ FLOAT fontSize,
+ XPS_STYLE_SIMULATION sims,
+ const SkMatrix& transform,
+ const SkPaint& paint) {
+ SkTScopedComPtr<IXpsOMGlyphs> glyphs;
+ HRM(xpsFactory->CreateGlyphs(font->xpsFont, &glyphs), "Could not create glyphs.");
+ HRM(glyphs->SetFontFaceIndex(font->ttcIndex), "Could not set glyph font face index.");
+
+ //XPS uses affine transformations for everything...
+ //...except positioning text.
+ bool useCanvasForClip;
+ if ((transform.getType() & ~SkMatrix::kTranslate_Mask) == 0) {
+ origin->x += SkScalarToFLOAT(transform.getTranslateX());
+ origin->y += SkScalarToFLOAT(transform.getTranslateY());
+ useCanvasForClip = false;
+ } else {
+ SkTScopedComPtr<IXpsOMMatrixTransform> xpsMatrixToUse;
+ HR(this->createXpsTransform(transform, &xpsMatrixToUse));
+ if (xpsMatrixToUse.get()) {
+ HRM(glyphs->SetTransformLocal(xpsMatrixToUse.get()),
+ "Could not set transform matrix.");
+ useCanvasForClip = true;
+ } else {
+ SkDEBUGFAIL("Attempt to add glyphs in perspective.");
+ useCanvasForClip = false;
+ }
+ }
+
+ SkTScopedComPtr<IXpsOMGlyphsEditor> glyphsEditor;
+ HRM(glyphs->GetGlyphsEditor(&glyphsEditor), "Could not get glyph editor.");
+
+ if (text) {
+ HRM(glyphsEditor->SetUnicodeString(text),
+ "Could not set unicode string.");
+ }
+
+ if (xpsGlyphs) {
+ HRM(glyphsEditor->SetGlyphIndices(xpsGlyphsLen, xpsGlyphs),
+ "Could not set glyphs.");
+ }
+
+ HRM(glyphsEditor->ApplyEdits(), "Could not apply glyph edits.");
+
+ SkTScopedComPtr<IXpsOMBrush> xpsFillBrush;
+ HR(this->createXpsBrush(
+ paint,
+ &xpsFillBrush,
+ useCanvasForClip ? nullptr : &transform));
+
+ HRM(glyphs->SetFillBrushLocal(xpsFillBrush.get()),
+ "Could not set fill brush.");
+
+ HRM(glyphs->SetOrigin(origin), "Could not set glyph origin.");
+
+ HRM(glyphs->SetFontRenderingEmSize(fontSize),
+ "Could not set font size.");
+
+ HRM(glyphs->SetStyleSimulations(sims),
+ "Could not set style simulations.");
+
+ SkTScopedComPtr<IXpsOMVisualCollection> visuals;
+ HRM(canvas->GetVisuals(&visuals), "Could not get glyph canvas visuals.");
+
+ if (!useCanvasForClip) {
+ HR(this->clip(glyphs.get(), d));
+ HRM(visuals->Append(glyphs.get()), "Could not add glyphs to canvas.");
+ } else {
+ SkTScopedComPtr<IXpsOMCanvas> glyphCanvas;
+ HRM(this->fXpsFactory->CreateCanvas(&glyphCanvas),
+ "Could not create glyph canvas.");
+
+ SkTScopedComPtr<IXpsOMVisualCollection> glyphCanvasVisuals;
+ HRM(glyphCanvas->GetVisuals(&glyphCanvasVisuals),
+ "Could not get glyph visuals collection.");
+
+ HRM(glyphCanvasVisuals->Append(glyphs.get()),
+ "Could not add glyphs to page.");
+ HR(this->clip(glyphCanvas.get(), d));
+
+ HRM(visuals->Append(glyphCanvas.get()),
+ "Could not add glyph canvas to page.");
+ }
+
+ return S_OK;
+}
+
+static int num_glyph_guess(SkPaint::TextEncoding encoding, const void* text, size_t byteLength) {
+ switch (encoding) {
+ case SkPaint::kUTF8_TextEncoding:
+ return SkUTF8_CountUnichars(static_cast<const char *>(text), byteLength);
+ case SkPaint::kUTF16_TextEncoding:
+ return SkUTF16_CountUnichars(static_cast<const uint16_t *>(text), SkToInt(byteLength));
+ case SkPaint::kGlyphID_TextEncoding:
+ return SkToInt(byteLength / 2);
+ default:
+ SK_ABORT("Invalid Text Encoding");
+ }
+ return 0;
+}
+
+static bool text_must_be_pathed(const SkPaint& paint, const SkMatrix& matrix) {
+ const SkPaint::Style style = paint.getStyle();
+ return matrix.hasPerspective()
+ || SkPaint::kStroke_Style == style
+ || SkPaint::kStrokeAndFill_Style == style
+ || paint.getMaskFilter()
+ || paint.getRasterizer()
+ ;
+}
+
+typedef SkTDArray<XPS_GLYPH_INDEX> GlyphRun;
+
+class ProcessOneGlyph {
+public:
+ ProcessOneGlyph(FLOAT centemPerUnit, SkBitSet* glyphUse, GlyphRun* xpsGlyphs)
+ : fCentemPerUnit(centemPerUnit)
+ , fGlyphUse(glyphUse)
+ , fXpsGlyphs(xpsGlyphs) { }
+
+ void operator()(const SkGlyph& glyph, SkPoint position, SkPoint) {
+ SkASSERT(glyph.fWidth > 0 && glyph.fHeight > 0);
+
+ SkScalar x = position.fX;
+ SkScalar y = position.fY;
+
+ XPS_GLYPH_INDEX* xpsGlyph = fXpsGlyphs->append();
+ uint16_t glyphID = glyph.getGlyphID();
+ fGlyphUse->set(glyphID);
+ xpsGlyph->index = glyphID;
+ if (1 == fXpsGlyphs->count()) {
+ xpsGlyph->advanceWidth = 0.0f;
+ xpsGlyph->horizontalOffset = SkScalarToFloat(x) * fCentemPerUnit;
+ xpsGlyph->verticalOffset = SkScalarToFloat(y) * -fCentemPerUnit;
+ }
+ else {
+ const XPS_GLYPH_INDEX& first = (*fXpsGlyphs)[0];
+ xpsGlyph->advanceWidth = 0.0f;
+ xpsGlyph->horizontalOffset = (SkScalarToFloat(x) * fCentemPerUnit)
+ - first.horizontalOffset;
+ xpsGlyph->verticalOffset = (SkScalarToFloat(y) * -fCentemPerUnit)
+ - first.verticalOffset;
+ }
+ }
+
+private:
+ /** [in] Advance width and offsets for glyphs measured in
+ hundredths of the font em size (XPS Spec 5.1.3). */
+ const FLOAT fCentemPerUnit;
+ /** [in,out] The accumulated glyphs used in the current typeface. */
+ SkBitSet* const fGlyphUse;
+ /** [out] The glyphs to draw. */
+ GlyphRun* const fXpsGlyphs;
+};
+
+void SkXPSDevice::drawText(const SkDraw& d,
+ const void* text, size_t byteLen,
+ SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ if (byteLen < 1) return;
+
+ if (text_must_be_pathed(paint, *d.fMatrix)) {
+ SkPath path;
+ paint.getTextPath(text, byteLen, x, y, &path);
+ this->drawPath(d, path, paint, nullptr, true);
+ //TODO: add automation "text"
+ return;
+ }
+
+ TypefaceUse* typeface;
+ HRV(CreateTypefaceUse(paint, &typeface));
+
+ const SkMatrix& matrix = SkMatrix::I();
+
+ SkAutoGlyphCache autoCache(paint, &this->surfaceProps(), &matrix);
+ SkGlyphCache* cache = autoCache.getCache();
+
+ // Advance width and offsets for glyphs measured in hundredths of the font em size
+ // (XPS Spec 5.1.3).
+ FLOAT centemPerUnit = 100.0f / SkScalarToFLOAT(paint.getTextSize());
+ GlyphRun xpsGlyphs;
+ xpsGlyphs.setReserve(num_glyph_guess(paint.getTextEncoding(),
+ static_cast<const char*>(text), byteLen));
+
+ ProcessOneGlyph processOneGlyph(centemPerUnit, typeface->glyphsUsed, &xpsGlyphs);
+
+ SkFindAndPlaceGlyph::ProcessText(
+ paint.getTextEncoding(), static_cast<const char*>(text), byteLen,
+ SkPoint{ x, y }, matrix, paint.getTextAlign(), cache, processOneGlyph);
+
+ if (xpsGlyphs.count() == 0) {
+ return;
+ }
+
+ XPS_POINT origin = {
+ xpsGlyphs[0].horizontalOffset / centemPerUnit,
+ xpsGlyphs[0].verticalOffset / -centemPerUnit,
+ };
+ xpsGlyphs[0].horizontalOffset = 0.0f;
+ xpsGlyphs[0].verticalOffset = 0.0f;
+
+ HRV(AddGlyphs(d,
+ this->fXpsFactory.get(),
+ this->fCurrentXpsCanvas.get(),
+ typeface,
+ nullptr,
+ xpsGlyphs.begin(), xpsGlyphs.count(),
+ &origin,
+ SkScalarToFLOAT(paint.getTextSize()),
+ XPS_STYLE_SIMULATION_NONE,
+ *d.fMatrix,
+ paint));
+}
+
+void SkXPSDevice::drawPosText(const SkDraw& d,
+ const void* text, size_t byteLen,
+ const SkScalar pos[], int scalarsPerPos,
+ const SkPoint& offset, const SkPaint& paint) {
+ if (byteLen < 1) return;
+
+ if (text_must_be_pathed(paint, *d.fMatrix)) {
+ SkPath path;
+ //TODO: make this work, Draw currently does not handle as well.
+ //paint.getTextPath(text, byteLength, x, y, &path);
+ //this->drawPath(d, path, paint, nullptr, true);
+ //TODO: add automation "text"
+ return;
+ }
+
+ TypefaceUse* typeface;
+ HRV(CreateTypefaceUse(paint, &typeface));
+
+ const SkMatrix& matrix = SkMatrix::I();
+
+ SkAutoGlyphCache autoCache(paint, &this->surfaceProps(), &matrix);
+ SkGlyphCache* cache = autoCache.getCache();
+
+ // Advance width and offsets for glyphs measured in hundredths of the font em size
+ // (XPS Spec 5.1.3).
+ FLOAT centemPerUnit = 100.0f / SkScalarToFLOAT(paint.getTextSize());
+ GlyphRun xpsGlyphs;
+ xpsGlyphs.setReserve(num_glyph_guess(paint.getTextEncoding(),
+ static_cast<const char*>(text), byteLen));
+
+ ProcessOneGlyph processOneGlyph(centemPerUnit, typeface->glyphsUsed, &xpsGlyphs);
+
+ SkFindAndPlaceGlyph::ProcessPosText(
+ paint.getTextEncoding(), static_cast<const char*>(text), byteLen,
+ offset, matrix, pos, scalarsPerPos, paint.getTextAlign(), cache, processOneGlyph);
+
+ if (xpsGlyphs.count() == 0) {
+ return;
+ }
+
+ XPS_POINT origin = {
+ xpsGlyphs[0].horizontalOffset / centemPerUnit,
+ xpsGlyphs[0].verticalOffset / -centemPerUnit,
+ };
+ xpsGlyphs[0].horizontalOffset = 0.0f;
+ xpsGlyphs[0].verticalOffset = 0.0f;
+
+ HRV(AddGlyphs(d,
+ this->fXpsFactory.get(),
+ this->fCurrentXpsCanvas.get(),
+ typeface,
+ nullptr,
+ xpsGlyphs.begin(), xpsGlyphs.count(),
+ &origin,
+ SkScalarToFLOAT(paint.getTextSize()),
+ XPS_STYLE_SIMULATION_NONE,
+ *d.fMatrix,
+ paint));
+}
+
+void SkXPSDevice::drawDevice(const SkDraw& d, SkBaseDevice* dev,
+ int x, int y,
+ const SkPaint&) {
+ SkXPSDevice* that = static_cast<SkXPSDevice*>(dev);
+
+ SkTScopedComPtr<IXpsOMMatrixTransform> xpsTransform;
+ XPS_MATRIX rawTransform = {
+ 1.0f,
+ 0.0f,
+ 0.0f,
+ 1.0f,
+ static_cast<FLOAT>(x),
+ static_cast<FLOAT>(y),
+ };
+ HRVM(this->fXpsFactory->CreateMatrixTransform(&rawTransform, &xpsTransform),
+ "Could not create layer transform.");
+ HRVM(that->fCurrentXpsCanvas->SetTransformLocal(xpsTransform.get()),
+ "Could not set layer transform.");
+
+ //Get the current visual collection and add the layer to it.
+ SkTScopedComPtr<IXpsOMVisualCollection> currentVisuals;
+ HRVM(this->fCurrentXpsCanvas->GetVisuals(&currentVisuals),
+ "Could not get current visuals for layer.");
+ HRVM(currentVisuals->Append(that->fCurrentXpsCanvas.get()),
+ "Could not add layer to current visuals.");
+}
+
+SkBaseDevice* SkXPSDevice::onCreateDevice(const CreateInfo& info, const SkPaint*) {
+//Conditional for bug compatibility with PDF device.
+#if 0
+ if (SkBaseDevice::kGeneral_Usage == info.fUsage) {
+ return nullptr;
+ //To what stream do we write?
+ //SkXPSDevice* dev = new SkXPSDevice(this);
+ //SkSize s = SkSize::Make(width, height);
+ //dev->BeginCanvas(s, s, SkMatrix::I());
+ //return dev;
+ }
+#endif
+ return new SkXPSDevice(this->fXpsFactory.get());
+}
+
+#endif//defined(SK_BUILD_FOR_WIN32)
diff --git a/gfx/skia/skia/src/xps/SkXPSDevice.h b/gfx/skia/skia/src/xps/SkXPSDevice.h
new file mode 100644
index 000000000..57f314851
--- /dev/null
+++ b/gfx/skia/skia/src/xps/SkXPSDevice.h
@@ -0,0 +1,324 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkXPSDevice_DEFINED
+#define SkXPSDevice_DEFINED
+
+#include "SkTypes.h"
+
+#ifdef SK_BUILD_FOR_WIN
+
+#include <ObjBase.h>
+#include <XpsObjectModel.h>
+
+#include "SkAutoCoInitialize.h"
+#include "SkBitmapDevice.h"
+#include "SkBitSet.h"
+#include "SkCanvas.h"
+#include "SkColor.h"
+#include "SkPaint.h"
+#include "SkPath.h"
+#include "SkPoint.h"
+#include "SkShader.h"
+#include "SkSize.h"
+#include "SkTArray.h"
+#include "SkTScopedComPtr.h"
+#include "SkTypeface.h"
+
+//#define SK_XPS_USE_DETERMINISTIC_IDS
+
+/** \class SkXPSDevice
+
+ The drawing context for the XPS backend.
+*/
+class SkXPSDevice : public SkBitmapDevice {
+public:
+ SK_API SkXPSDevice();
+ SK_API virtual ~SkXPSDevice();
+
+ virtual bool beginPortfolio(SkWStream* outputStream);
+ /**
+ @param unitsPerMeter converts geometry units into physical units.
+ @param pixelsPerMeter resolution to use when geometry must be rasterized.
+ @param trimSize final page size in physical units.
+ The top left of the trim is the origin of physical space.
+ @param mediaBox The size of the physical media in physical units.
+ The top and left must be less than zero.
+ The bottom and right must be greater than the trimSize.
+ The default is to coincide with the trimSize.
+ @param bleedBox The size of the bleed box in physical units.
+ Must be contained within the mediaBox.
+ The default is to coincide with the mediaBox.
+ @param artBox The size of the content box in physical units.
+ Must be contained within the trimSize.
+ The default is to coincide with the trimSize.
+ @param cropBox The size of the recommended view port in physical units.
+ Must be contained within the mediaBox.
+ The default is to coincide with the mediaBox.
+ */
+ virtual bool beginSheet(
+ const SkVector& unitsPerMeter,
+ const SkVector& pixelsPerMeter,
+ const SkSize& trimSize,
+ const SkRect* mediaBox = NULL,
+ const SkRect* bleedBox = NULL,
+ const SkRect* artBox = NULL,
+ const SkRect* cropBox = NULL);
+
+ virtual bool endSheet();
+ virtual bool endPortfolio();
+
+protected:
+ void drawPaint(const SkDraw&, const SkPaint& paint) override;
+
+ virtual void drawPoints(
+ const SkDraw&,
+ SkCanvas::PointMode mode,
+ size_t count, const SkPoint[],
+ const SkPaint& paint) override;
+
+ virtual void drawRect(
+ const SkDraw&,
+ const SkRect& r,
+ const SkPaint& paint) override;
+
+ virtual void drawRRect(
+ const SkDraw&,
+ const SkRRect&,
+ const SkPaint& paint) override;
+
+ virtual void drawPath(
+ const SkDraw&,
+ const SkPath& platonicPath,
+ const SkPaint& paint,
+ const SkMatrix* prePathMatrix,
+ bool pathIsMutable) override;
+
+ virtual void drawBitmap(
+ const SkDraw&,
+ const SkBitmap& bitmap,
+ const SkMatrix& matrix,
+ const SkPaint& paint) override;
+
+ virtual void drawSprite(
+ const SkDraw&,
+ const SkBitmap& bitmap,
+ int x, int y,
+ const SkPaint& paint) override;
+
+ virtual void drawText(
+ const SkDraw&,
+ const void* text, size_t len,
+ SkScalar x, SkScalar y,
+ const SkPaint& paint) override;
+
+ virtual void drawPosText(
+ const SkDraw&,
+ const void* text, size_t len,
+ const SkScalar pos[], int scalarsPerPos,
+ const SkPoint& offset, const SkPaint& paint) override;
+
+ virtual void drawVertices(
+ const SkDraw&,
+ SkCanvas::VertexMode,
+ int vertexCount, const SkPoint verts[],
+ const SkPoint texs[], const SkColor colors[],
+ SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint) override;
+
+ virtual void drawDevice(
+ const SkDraw&,
+ SkBaseDevice* device,
+ int x, int y,
+ const SkPaint& paint) override;
+
+private:
+ class TypefaceUse : ::SkNoncopyable {
+ public:
+ SkFontID typefaceId;
+ int ttcIndex;
+ SkStream* fontData;
+ IXpsOMFontResource* xpsFont;
+ SkBitSet* glyphsUsed;
+
+ explicit TypefaceUse();
+ ~TypefaceUse();
+ };
+ friend static HRESULT subset_typeface(TypefaceUse* current);
+
+ SkXPSDevice(IXpsOMObjectFactory* xpsFactory);
+
+ SkAutoCoInitialize fAutoCo;
+ SkTScopedComPtr<IXpsOMObjectFactory> fXpsFactory;
+ SkTScopedComPtr<IStream> fOutputStream;
+ SkTScopedComPtr<IXpsOMPackageWriter> fPackageWriter;
+
+ unsigned int fCurrentPage;
+ SkTScopedComPtr<IXpsOMCanvas> fCurrentXpsCanvas;
+ SkSize fCurrentCanvasSize;
+ SkVector fCurrentUnitsPerMeter;
+ SkVector fCurrentPixelsPerMeter;
+
+ SkTArray<TypefaceUse, true> fTypefaces;
+
+ /** Creates a GUID based id and places it into buffer.
+ buffer should have space for at least GUID_ID_LEN wide characters.
+ The string will always be wchar null terminated.
+ XXXXXXXXsXXXXsXXXXsXXXXsXXXXXXXXXXXX0
+ The string may begin with a digit,
+ and so may not be suitable as a bare resource key.
+ */
+ HRESULT createId(wchar_t* buffer, size_t bufferSize, wchar_t sep = '-');
+#ifdef SK_XPS_USE_DETERMINISTIC_IDS
+ decltype(GUID::Data1) fNextId = 0;
+#endif
+
+ HRESULT initXpsDocumentWriter(IXpsOMImageResource* image);
+
+ HRESULT createXpsPage(
+ const XPS_SIZE& pageSize,
+ IXpsOMPage** page);
+
+ HRESULT createXpsThumbnail(
+ IXpsOMPage* page, const unsigned int pageNumber,
+ IXpsOMImageResource** image);
+
+ void internalDrawRect(
+ const SkDraw&,
+ const SkRect& r,
+ bool transformRect,
+ const SkPaint& paint);
+
+ HRESULT createXpsBrush(
+ const SkPaint& skPaint,
+ IXpsOMBrush** xpsBrush,
+ const SkMatrix* parentTransform = NULL);
+
+ HRESULT createXpsSolidColorBrush(
+ const SkColor skColor, const SkAlpha alpha,
+ IXpsOMBrush** xpsBrush);
+
+ HRESULT createXpsImageBrush(
+ const SkBitmap& bitmap,
+ const SkMatrix& localMatrix,
+ const SkShader::TileMode (&xy)[2],
+ const SkAlpha alpha,
+ IXpsOMTileBrush** xpsBrush);
+
+ HRESULT createXpsLinearGradient(
+ SkShader::GradientInfo info,
+ const SkAlpha alpha,
+ const SkMatrix& localMatrix,
+ IXpsOMMatrixTransform* xpsMatrixToUse,
+ IXpsOMBrush** xpsBrush);
+
+ HRESULT createXpsRadialGradient(
+ SkShader::GradientInfo info,
+ const SkAlpha alpha,
+ const SkMatrix& localMatrix,
+ IXpsOMMatrixTransform* xpsMatrixToUse,
+ IXpsOMBrush** xpsBrush);
+
+ HRESULT createXpsGradientStop(
+ const SkColor skColor,
+ const SkScalar offset,
+ IXpsOMGradientStop** xpsGradStop);
+
+ HRESULT createXpsTransform(
+ const SkMatrix& matrix,
+ IXpsOMMatrixTransform ** xpsTransform);
+
+ HRESULT createXpsRect(
+ const SkRect& rect,
+ BOOL stroke, BOOL fill,
+ IXpsOMGeometryFigure** xpsRect);
+
+ HRESULT createXpsQuad(
+ const SkPoint (&points)[4],
+ BOOL stroke, BOOL fill,
+ IXpsOMGeometryFigure** xpsQuad);
+
+ HRESULT CreateTypefaceUse(
+ const SkPaint& paint,
+ TypefaceUse** fontResource);
+
+ HRESULT AddGlyphs(
+ const SkDraw& d,
+ IXpsOMObjectFactory* xpsFactory,
+ IXpsOMCanvas* canvas,
+ TypefaceUse* font,
+ LPCWSTR text,
+ XPS_GLYPH_INDEX* xpsGlyphs,
+ UINT32 xpsGlyphsLen,
+ XPS_POINT *origin,
+ FLOAT fontSize,
+ XPS_STYLE_SIMULATION sims,
+ const SkMatrix& transform,
+ const SkPaint& paint);
+
+ HRESULT addXpsPathGeometry(
+ IXpsOMGeometryFigureCollection* figures,
+ BOOL stroke, BOOL fill, const SkPath& path);
+
+ HRESULT createPath(
+ IXpsOMGeometryFigure* figure,
+ IXpsOMVisualCollection* visuals,
+ IXpsOMPath** path);
+
+ HRESULT sideOfClamp(
+ const SkRect& leftPoints, const XPS_RECT& left,
+ IXpsOMImageResource* imageResource,
+ IXpsOMVisualCollection* visuals);
+
+ HRESULT cornerOfClamp(
+ const SkRect& tlPoints,
+ const SkColor color,
+ IXpsOMVisualCollection* visuals);
+
+ HRESULT clip(
+ IXpsOMVisual* xpsVisual,
+ const SkDraw& d);
+ HRESULT clipToPath(
+ IXpsOMVisual* xpsVisual,
+ const SkPath& clipPath,
+ XPS_FILL_RULE fillRule);
+
+ HRESULT drawInverseWindingPath(
+ const SkDraw& d,
+ const SkPath& devicePath,
+ IXpsOMPath* xpsPath);
+
+ HRESULT shadePath(
+ IXpsOMPath* shadedPath,
+ const SkPaint& shaderPaint,
+ const SkMatrix& matrix,
+ BOOL* fill, BOOL* stroke);
+
+ void convertToPpm(
+ const SkMaskFilter* filter,
+ SkMatrix* matrix,
+ SkVector* ppuScale,
+ const SkIRect& clip, SkIRect* clipIRect);
+
+ HRESULT applyMask(
+ const SkDraw& d,
+ const SkMask& mask,
+ const SkVector& ppuScale,
+ IXpsOMPath* shadedPath);
+
+ SkBaseDevice* onCreateDevice(const CreateInfo&, const SkPaint*) override;
+
+ // Disable the default copy and assign implementation.
+ SkXPSDevice(const SkXPSDevice&);
+ void operator=(const SkXPSDevice&);
+
+ typedef SkBitmapDevice INHERITED;
+};
+
+#endif // SK_BUILD_FOR_WIN
+#endif // SkXPSDevice_DEFINED